shell bypass 403

GrazzMean Shell

: /proc/thread-self/root/proc/self/cwd/ [ drwxr-xr-x ]
Uname: Linux web3.us.cloudlogin.co 5.10.226-xeon-hst #2 SMP Fri Sep 13 12:28:44 UTC 2024 x86_64
Software: Apache
PHP version: 8.1.31 [ PHP INFO ] PHP os: Linux
Server Ip: 162.210.96.117
Your Ip: 18.118.139.79
User: edustar (269686) | Group: tty (888)
Safe Mode: OFF
Disable Function:
NONE

name : llvm.zip
PKhwFZeש{**PassAnalysisSupport.hnu�[���//===- llvm/PassAnalysisSupport.h - Analysis Pass Support code --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines stuff that is used to define and "use" Analysis Passes.
// This file is automatically #included by Pass.h, so:
//
//           NO .CPP FILES SHOULD INCLUDE THIS FILE DIRECTLY
//
// Instead, #include Pass.h
//
//===----------------------------------------------------------------------===//

#if !defined(LLVM_PASS_H) || defined(LLVM_PASSANALYSISSUPPORT_H)
#error "Do not include <PassAnalysisSupport.h>; include <Pass.h> instead"
#endif

#ifndef LLVM_PASSANALYSISSUPPORT_H
#define LLVM_PASSANALYSISSUPPORT_H

#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include <cassert>
#include <tuple>
#include <utility>
#include <vector>

namespace llvm {

class Function;
class Pass;
class PMDataManager;
class StringRef;

//===----------------------------------------------------------------------===//
/// Represent the analysis usage information of a pass.  This tracks analyses
/// that the pass REQUIRES (must be available when the pass runs), REQUIRES
/// TRANSITIVE (must be available throughout the lifetime of the pass), and
/// analyses that the pass PRESERVES (the pass does not invalidate the results
/// of these analyses).  This information is provided by a pass to the Pass
/// infrastructure through the getAnalysisUsage virtual function.
///
class AnalysisUsage {
public:
  using VectorType = SmallVectorImpl<AnalysisID>;

private:
  /// Sets of analyses required and preserved by a pass
  // TODO: It's not clear that SmallVector is an appropriate data structure for
  // this usecase.  The sizes were picked to minimize wasted space, but are
  // otherwise fairly meaningless.
  SmallVector<AnalysisID, 8> Required;
  SmallVector<AnalysisID, 2> RequiredTransitive;
  SmallVector<AnalysisID, 2> Preserved;
  SmallVector<AnalysisID, 0> Used;
  bool PreservesAll = false;

  void pushUnique(VectorType &Set, AnalysisID ID) {
    if (!llvm::is_contained(Set, ID))
      Set.push_back(ID);
  }

public:
  AnalysisUsage() = default;

  ///@{
  /// Add the specified ID to the required set of the usage info for a pass.
  AnalysisUsage &addRequiredID(const void *ID);
  AnalysisUsage &addRequiredID(char &ID);
  template<class PassClass>
  AnalysisUsage &addRequired() {
    return addRequiredID(PassClass::ID);
  }

  AnalysisUsage &addRequiredTransitiveID(char &ID);
  template<class PassClass>
  AnalysisUsage &addRequiredTransitive() {
    return addRequiredTransitiveID(PassClass::ID);
  }
  ///@}

  ///@{
  /// Add the specified ID to the set of analyses preserved by this pass.
  AnalysisUsage &addPreservedID(const void *ID) {
    pushUnique(Preserved, ID);
    return *this;
  }
  AnalysisUsage &addPreservedID(char &ID) {
    pushUnique(Preserved, &ID);
    return *this;
  }
  /// Add the specified Pass class to the set of analyses preserved by this pass.
  template<class PassClass>
  AnalysisUsage &addPreserved() {
    pushUnique(Preserved, &PassClass::ID);
    return *this;
  }
  ///@}

  ///@{
  /// Add the specified ID to the set of analyses used by this pass if they are
  /// available..
  AnalysisUsage &addUsedIfAvailableID(const void *ID) {
    pushUnique(Used, ID);
    return *this;
  }
  AnalysisUsage &addUsedIfAvailableID(char &ID) {
    pushUnique(Used, &ID);
    return *this;
  }
  /// Add the specified Pass class to the set of analyses used by this pass.
  template<class PassClass>
  AnalysisUsage &addUsedIfAvailable() {
    pushUnique(Used, &PassClass::ID);
    return *this;
  }
  ///@}

  /// Add the Pass with the specified argument string to the set of analyses
  /// preserved by this pass. If no such Pass exists, do nothing. This can be
  /// useful when a pass is trivially preserved, but may not be linked in. Be
  /// careful about spelling!
  AnalysisUsage &addPreserved(StringRef Arg);

  /// Set by analyses that do not transform their input at all
  void setPreservesAll() { PreservesAll = true; }

  /// Determine whether a pass said it does not transform its input at all
  bool getPreservesAll() const { return PreservesAll; }

  /// This function should be called by the pass, iff they do not:
  ///
  ///  1. Add or remove basic blocks from the function
  ///  2. Modify terminator instructions in any way.
  ///
  /// This function annotates the AnalysisUsage info object to say that analyses
  /// that only depend on the CFG are preserved by this pass.
  void setPreservesCFG();

  const VectorType &getRequiredSet() const { return Required; }
  const VectorType &getRequiredTransitiveSet() const {
    return RequiredTransitive;
  }
  const VectorType &getPreservedSet() const { return Preserved; }
  const VectorType &getUsedSet() const { return Used; }
};

//===----------------------------------------------------------------------===//
/// AnalysisResolver - Simple interface used by Pass objects to pull all
/// analysis information out of pass manager that is responsible to manage
/// the pass.
///
class AnalysisResolver {
public:
  AnalysisResolver() = delete;
  explicit AnalysisResolver(PMDataManager &P) : PM(P) {}

  PMDataManager &getPMDataManager() { return PM; }

  /// Find pass that is implementing PI.
  Pass *findImplPass(AnalysisID PI) {
    Pass *ResultPass = nullptr;
    for (const auto &AnalysisImpl : AnalysisImpls) {
      if (AnalysisImpl.first == PI) {
        ResultPass = AnalysisImpl.second;
        break;
      }
    }
    return ResultPass;
  }

  /// Find pass that is implementing PI. Initialize pass for Function F.
  std::tuple<Pass *, bool> findImplPass(Pass *P, AnalysisID PI, Function &F);

  void addAnalysisImplsPair(AnalysisID PI, Pass *P) {
    if (findImplPass(PI) == P)
      return;
    std::pair<AnalysisID, Pass*> pir = std::make_pair(PI,P);
    AnalysisImpls.push_back(pir);
  }

  /// Clear cache that is used to connect a pass to the analysis (PassInfo).
  void clearAnalysisImpls() {
    AnalysisImpls.clear();
  }

  /// Return analysis result or null if it doesn't exist.
  Pass *getAnalysisIfAvailable(AnalysisID ID) const;

private:
  /// This keeps track of which passes implements the interfaces that are
  /// required by the current pass (to implement getAnalysis()).
  std::vector<std::pair<AnalysisID, Pass *>> AnalysisImpls;

  /// PassManager that is used to resolve analysis info
  PMDataManager &PM;
};

/// getAnalysisIfAvailable<AnalysisType>() - Subclasses use this function to
/// get analysis information that might be around, for example to update it.
/// This is different than getAnalysis in that it can fail (if the analysis
/// results haven't been computed), so should only be used if you can handle
/// the case when the analysis is not available.  This method is often used by
/// transformation APIs to update analysis results for a pass automatically as
/// the transform is performed.
template<typename AnalysisType>
AnalysisType *Pass::getAnalysisIfAvailable() const {
  assert(Resolver && "Pass not resident in a PassManager object!");

  const void *PI = &AnalysisType::ID;

  Pass *ResultPass = Resolver->getAnalysisIfAvailable(PI);
  if (!ResultPass) return nullptr;

  // Because the AnalysisType may not be a subclass of pass (for
  // AnalysisGroups), we use getAdjustedAnalysisPointer here to potentially
  // adjust the return pointer (because the class may multiply inherit, once
  // from pass, once from AnalysisType).
  return (AnalysisType*)ResultPass->getAdjustedAnalysisPointer(PI);
}

/// getAnalysis<AnalysisType>() - This function is used by subclasses to get
/// to the analysis information that they claim to use by overriding the
/// getAnalysisUsage function.
template<typename AnalysisType>
AnalysisType &Pass::getAnalysis() const {
  assert(Resolver && "Pass has not been inserted into a PassManager object!");
  return getAnalysisID<AnalysisType>(&AnalysisType::ID);
}

template<typename AnalysisType>
AnalysisType &Pass::getAnalysisID(AnalysisID PI) const {
  assert(PI && "getAnalysis for unregistered pass!");
  assert(Resolver&&"Pass has not been inserted into a PassManager object!");
  // PI *must* appear in AnalysisImpls.  Because the number of passes used
  // should be a small number, we just do a linear search over a (dense)
  // vector.
  Pass *ResultPass = Resolver->findImplPass(PI);
  assert(ResultPass &&
         "getAnalysis*() called on an analysis that was not "
         "'required' by pass!");

  // Because the AnalysisType may not be a subclass of pass (for
  // AnalysisGroups), we use getAdjustedAnalysisPointer here to potentially
  // adjust the return pointer (because the class may multiply inherit, once
  // from pass, once from AnalysisType).
  return *(AnalysisType*)ResultPass->getAdjustedAnalysisPointer(PI);
}

/// getAnalysis<AnalysisType>() - This function is used by subclasses to get
/// to the analysis information that they claim to use by overriding the
/// getAnalysisUsage function. If as part of the dependencies, an IR
/// transformation is triggered (e.g. because the analysis requires
/// BreakCriticalEdges), and Changed is non null, *Changed is updated.
template <typename AnalysisType>
AnalysisType &Pass::getAnalysis(Function &F, bool *Changed) {
  assert(Resolver &&"Pass has not been inserted into a PassManager object!");

  return getAnalysisID<AnalysisType>(&AnalysisType::ID, F, Changed);
}

template <typename AnalysisType>
AnalysisType &Pass::getAnalysisID(AnalysisID PI, Function &F, bool *Changed) {
  assert(PI && "getAnalysis for unregistered pass!");
  assert(Resolver && "Pass has not been inserted into a PassManager object!");
  // PI *must* appear in AnalysisImpls.  Because the number of passes used
  // should be a small number, we just do a linear search over a (dense)
  // vector.
  Pass *ResultPass;
  bool LocalChanged;
  std::tie(ResultPass, LocalChanged) = Resolver->findImplPass(this, PI, F);

  assert(ResultPass && "Unable to find requested analysis info");
  if (Changed)
    *Changed |= LocalChanged;
  else
    assert(!LocalChanged &&
           "A pass trigged a code update but the update status is lost");

  // Because the AnalysisType may not be a subclass of pass (for
  // AnalysisGroups), we use getAdjustedAnalysisPointer here to potentially
  // adjust the return pointer (because the class may multiply inherit, once
  // from pass, once from AnalysisType).
  return *(AnalysisType*)ResultPass->getAdjustedAnalysisPointer(PI);
}

} // end namespace llvm

#endif // LLVM_PASSANALYSISSUPPORT_H
PKhwFZ��R���Debuginfod/HTTPServer.hnu�[���//===-- llvm/Debuginfod/HTTPServer.h - HTTP server library ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file contains the declarations of the HTTPServer and HTTPServerRequest
/// classes, the HTTPResponse, and StreamingHTTPResponse structs, and the
/// streamFile function.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFOD_HTTPSERVER_H
#define LLVM_DEBUGINFOD_HTTPSERVER_H

#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Error.h"

#ifdef LLVM_ENABLE_HTTPLIB
// forward declarations
namespace httplib {
class Request;
class Response;
class Server;
} // namespace httplib
#endif

namespace llvm {

struct HTTPResponse;
struct StreamingHTTPResponse;
class HTTPServer;

class HTTPServerError : public ErrorInfo<HTTPServerError, ECError> {
public:
  static char ID;
  HTTPServerError(const Twine &Msg);
  void log(raw_ostream &OS) const override;

private:
  std::string Msg;
};

class HTTPServerRequest {
  friend HTTPServer;

#ifdef LLVM_ENABLE_HTTPLIB
private:
  HTTPServerRequest(const httplib::Request &HTTPLibRequest,
                    httplib::Response &HTTPLibResponse);
  httplib::Response &HTTPLibResponse;
#endif

public:
  std::string UrlPath;
  /// The elements correspond to match groups in the url path matching regex.
  SmallVector<std::string, 1> UrlPathMatches;

  // TODO bring in HTTP headers

  void setResponse(StreamingHTTPResponse Response);
  void setResponse(HTTPResponse Response);
};

struct HTTPResponse {
  unsigned Code;
  const char *ContentType;
  StringRef Body;
};

typedef std::function<void(HTTPServerRequest &)> HTTPRequestHandler;

/// An HTTPContentProvider is called by the HTTPServer to obtain chunks of the
/// streaming response body. The returned chunk should be located at Offset
/// bytes and have Length bytes.
typedef std::function<StringRef(size_t /*Offset*/, size_t /*Length*/)>
    HTTPContentProvider;

/// Wraps the content provider with HTTP Status code and headers.
struct StreamingHTTPResponse {
  unsigned Code;
  const char *ContentType;
  size_t ContentLength;
  HTTPContentProvider Provider;
  /// Called after the response transfer is complete with the success value of
  /// the transfer.
  std::function<void(bool)> CompletionHandler = [](bool Success) {};
};

/// Sets the response to stream the file at FilePath, if available, and
/// otherwise an HTTP 404 error response.
bool streamFile(HTTPServerRequest &Request, StringRef FilePath);

/// An HTTP server which can listen on a single TCP/IP port for HTTP
/// requests and delgate them to the appropriate registered handler.
class HTTPServer {
#ifdef LLVM_ENABLE_HTTPLIB
  std::unique_ptr<httplib::Server> Server;
  unsigned Port = 0;
#endif
public:
  HTTPServer();
  ~HTTPServer();

  /// Returns true only if LLVM has been compiled with a working HTTPServer.
  static bool isAvailable();

  /// Registers a URL pattern routing rule. When the server is listening, each
  /// request is dispatched to the first registered handler whose UrlPathPattern
  /// matches the UrlPath.
  Error get(StringRef UrlPathPattern, HTTPRequestHandler Handler);

  /// Attempts to assign the requested port and interface, returning an Error
  /// upon failure.
  Error bind(unsigned Port, const char *HostInterface = "0.0.0.0");

  /// Attempts to assign any available port and interface, returning either the
  /// port number or an Error upon failure.
  Expected<unsigned> bind(const char *HostInterface = "0.0.0.0");

  /// Attempts to listen for requests on the bound port. Returns an Error if
  /// called before binding a port.
  Error listen();

  /// If the server is listening, stop and unbind the socket.
  void stop();
};
} // end namespace llvm

#endif // LLVM_DEBUGINFOD_HTTPSERVER_H
PKhwFZu��PNNDebuginfod/DIFetcher.hnu�[���//===- llvm/DebugInfod/DIFetcher.h - Debug info fetcher----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file declares a DIFetcher implementation for obtaining debug info from
/// debuginfod.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFOD_DIFETCHER_H
#define LLVM_DEBUGINFOD_DIFETCHER_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/DebugInfo/Symbolize/DIFetcher.h"

namespace llvm {

class DebuginfodDIFetcher : public symbolize::DIFetcher {
public:
  virtual ~DebuginfodDIFetcher() = default;

  /// Fetches the given Build ID using debuginfod and returns a local path to
  /// the resulting debug binary.
  Optional<std::string> fetchBuildID(ArrayRef<uint8_t> BuildID) const override;
};

} // namespace llvm

#endif // LLVM_DEBUGINFOD_DIFETCHER_H
PKhwFZHV*�
�
Debuginfod/HTTPClient.hnu�[���//===-- llvm/Support/HTTPClient.h - HTTP client library ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file contains the declarations of the HTTPClient library for issuing
/// HTTP requests and handling the responses.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFOD_HTTPCLIENT_H
#define LLVM_DEBUGINFOD_HTTPCLIENT_H

#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/MemoryBuffer.h"

#include <chrono>

namespace llvm {

enum class HTTPMethod { GET };

/// A stateless description of an outbound HTTP request.
struct HTTPRequest {
  SmallString<128> Url;
  SmallVector<std::string, 0> Headers;
  HTTPMethod Method = HTTPMethod::GET;
  bool FollowRedirects = true;
  HTTPRequest(StringRef Url);
};

bool operator==(const HTTPRequest &A, const HTTPRequest &B);

/// A handler for state updates occurring while an HTTPRequest is performed.
/// Can trigger the client to abort the request by returning an Error from any
/// of its methods.
class HTTPResponseHandler {
public:
  /// Processes an additional chunk of bytes of the HTTP response body.
  virtual Error handleBodyChunk(StringRef BodyChunk) = 0;

protected:
  ~HTTPResponseHandler();
};

/// A reusable client that can perform HTTPRequests through a network socket.
class HTTPClient {
#ifdef LLVM_ENABLE_CURL
  void *Curl = nullptr;
#endif

public:
  HTTPClient();
  ~HTTPClient();

  static bool IsInitialized;

  /// Returns true only if LLVM has been compiled with a working HTTPClient.
  static bool isAvailable();

  /// Must be called at the beginning of a program, while it is a single thread.
  static void initialize();

  /// Must be called at the end of a program, while it is a single thread.
  static void cleanup();

  /// Sets the timeout for the entire request, in milliseconds. A zero or
  /// negative value means the request never times out.
  void setTimeout(std::chrono::milliseconds Timeout);

  /// Performs the Request, passing response data to the Handler. Returns all
  /// errors which occur during the request. Aborts if an error is returned by a
  /// Handler method.
  Error perform(const HTTPRequest &Request, HTTPResponseHandler &Handler);

  /// Returns the last received response code or zero if none.
  unsigned responseCode();
};

} // end namespace llvm

#endif // LLVM_DEBUGINFOD_HTTPCLIENT_H
PKhwFZ1gxDebuginfod/Debuginfod.hnu�[���//===-- llvm/Debuginfod/Debuginfod.h - Debuginfod client --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file contains several declarations for the debuginfod client and
/// server. The client functions are getDefaultDebuginfodUrls,
/// getCachedOrDownloadArtifact, and several convenience functions for specific
/// artifact types: getCachedOrDownloadSource, getCachedOrDownloadExecutable,
/// and getCachedOrDownloadDebuginfo. For the server, this file declares the
/// DebuginfodLogEntry and DebuginfodServer structs, as well as the
/// DebuginfodLog, DebuginfodCollection classes.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFOD_DEBUGINFOD_H
#define LLVM_DEBUGINFOD_DEBUGINFOD_H

#include "HTTPServer.h"

#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Object/BuildID.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Mutex.h"
#include "llvm/Support/RWMutex.h"
#include "llvm/Support/Timer.h"

#include <chrono>
#include <condition_variable>
#include <optional>
#include <queue>

namespace llvm {

/// Returns false if a debuginfod lookup can be determined to have no chance of
/// succeeding.
bool canUseDebuginfod();

/// Finds default array of Debuginfod server URLs by checking DEBUGINFOD_URLS
/// environment variable.
SmallVector<StringRef> getDefaultDebuginfodUrls();

/// Finds a default local file caching directory for the debuginfod client,
/// first checking DEBUGINFOD_CACHE_PATH.
Expected<std::string> getDefaultDebuginfodCacheDirectory();

/// Finds a default timeout for debuginfod HTTP requests. Checks
/// DEBUGINFOD_TIMEOUT environment variable, default is 90 seconds (90000 ms).
std::chrono::milliseconds getDefaultDebuginfodTimeout();

/// Fetches a specified source file by searching the default local cache
/// directory and server URLs.
Expected<std::string> getCachedOrDownloadSource(object::BuildIDRef ID,
                                                StringRef SourceFilePath);

/// Fetches an executable by searching the default local cache directory and
/// server URLs.
Expected<std::string> getCachedOrDownloadExecutable(object::BuildIDRef ID);

/// Fetches a debug binary by searching the default local cache directory and
/// server URLs.
Expected<std::string> getCachedOrDownloadDebuginfo(object::BuildIDRef ID);

/// Fetches any debuginfod artifact using the default local cache directory and
/// server URLs.
Expected<std::string> getCachedOrDownloadArtifact(StringRef UniqueKey,
                                                  StringRef UrlPath);

/// Fetches any debuginfod artifact using the specified local cache directory,
/// server URLs, and request timeout (in milliseconds). If the artifact is
/// found, uses the UniqueKey for the local cache file.
Expected<std::string> getCachedOrDownloadArtifact(
    StringRef UniqueKey, StringRef UrlPath, StringRef CacheDirectoryPath,
    ArrayRef<StringRef> DebuginfodUrls, std::chrono::milliseconds Timeout);

class ThreadPool;

struct DebuginfodLogEntry {
  std::string Message;
  DebuginfodLogEntry() = default;
  DebuginfodLogEntry(const Twine &Message);
};

class DebuginfodLog {
  std::mutex QueueMutex;
  std::condition_variable QueueCondition;
  std::queue<DebuginfodLogEntry> LogEntryQueue;

public:
  // Adds a log entry to end of the queue.
  void push(DebuginfodLogEntry Entry);
  // Adds a log entry to end of the queue.
  void push(const Twine &Message);
  // Blocks until there are log entries in the queue, then pops and returns the
  // first one.
  DebuginfodLogEntry pop();
};

/// Tracks a collection of debuginfod artifacts on the local filesystem.
class DebuginfodCollection {
  SmallVector<std::string, 1> Paths;
  sys::RWMutex BinariesMutex;
  StringMap<std::string> Binaries;
  sys::RWMutex DebugBinariesMutex;
  StringMap<std::string> DebugBinaries;
  Error findBinaries(StringRef Path);
  Expected<std::optional<std::string>> getDebugBinaryPath(object::BuildIDRef);
  Expected<std::optional<std::string>> getBinaryPath(object::BuildIDRef);
  // If the collection has not been updated since MinInterval, call update() and
  // return true. Otherwise return false. If update returns an error, return the
  // error.
  Expected<bool> updateIfStale();
  DebuginfodLog &Log;
  ThreadPool &Pool;
  Timer UpdateTimer;
  sys::Mutex UpdateMutex;

  // Minimum update interval, in seconds, for on-demand updates triggered when a
  // build-id is not found.
  double MinInterval;

public:
  DebuginfodCollection(ArrayRef<StringRef> Paths, DebuginfodLog &Log,
                       ThreadPool &Pool, double MinInterval);
  Error update();
  Error updateForever(std::chrono::milliseconds Interval);
  Expected<std::string> findDebugBinaryPath(object::BuildIDRef);
  Expected<std::string> findBinaryPath(object::BuildIDRef);
};

struct DebuginfodServer {
  HTTPServer Server;
  DebuginfodLog &Log;
  DebuginfodCollection &Collection;
  DebuginfodServer(DebuginfodLog &Log, DebuginfodCollection &Collection);
};

} // end namespace llvm

#endif
PKhwFZ���ˮ�Debuginfod/BuildIDFetcher.hnu�[���//===- llvm/DebugInfod/BuildIDFetcher.h - Build ID fetcher ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file declares a Build ID fetcher implementation for obtaining debug
/// info from debuginfod.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFOD_DIFETCHER_H
#define LLVM_DEBUGINFOD_DIFETCHER_H

#include "llvm/Object/BuildID.h"
#include <optional>

namespace llvm {

class DebuginfodFetcher : public object::BuildIDFetcher {
public:
  DebuginfodFetcher(std::vector<std::string> DebugFileDirectories)
      : BuildIDFetcher(std::move(DebugFileDirectories)) {}
  virtual ~DebuginfodFetcher() = default;

  /// Fetches the given Build ID using debuginfod and returns a local path to
  /// the resulting file.
  std::optional<std::string> fetch(object::BuildIDRef BuildID) const override;
};

} // namespace llvm

#endif // LLVM_DEBUGINFOD_DIFETCHER_H
PKhwFZdO����Object/RelocationResolver.hnu�[���//===- RelocVisitor.h - Visitor for object file relocations -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file provides a wrapper around all the different types of relocations
// in different file formats, such that a client can handle them in a unified
// manner by only implementing a minimal number of functions.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJECT_RELOCATIONRESOLVER_H
#define LLVM_OBJECT_RELOCATIONRESOLVER_H

#include <cstdint>
#include <utility>

namespace llvm {
namespace object {

class ObjectFile;
class RelocationRef;

using SupportsRelocation = bool (*)(uint64_t);
using RelocationResolver = uint64_t (*)(uint64_t Type, uint64_t Offset,
                                        uint64_t S, uint64_t LocData,
                                        int64_t Addend);

std::pair<SupportsRelocation, RelocationResolver>
getRelocationResolver(const ObjectFile &Obj);

uint64_t resolveRelocation(RelocationResolver Resolver, const RelocationRef &R,
                           uint64_t S, uint64_t LocData);

} // end namespace object
} // end namespace llvm

#endif // LLVM_OBJECT_RELOCATIONRESOLVER_H
PKhwFZ�_F(�9�9Object/StackMapParser.hnu�[���//===- StackMapParser.h - StackMap Parsing Support --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJECT_STACKMAPPARSER_H
#define LLVM_OBJECT_STACKMAPPARSER_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Object/ELF.h"
#include "llvm/Support/Endian.h"
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <vector>

namespace llvm {

/// A parser for the latest stackmap format.  At the moment, latest=V3.
template <support::endianness Endianness>
class StackMapParser {
public:
  template <typename AccessorT>
  class AccessorIterator {
  public:
    AccessorIterator(AccessorT A) : A(A) {}

    AccessorIterator& operator++() { A = A.next(); return *this; }
    AccessorIterator operator++(int) {
      auto tmp = *this;
      ++*this;
      return tmp;
    }

    bool operator==(const AccessorIterator &Other) const {
      return A.P == Other.A.P;
    }

    bool operator!=(const AccessorIterator &Other) const {
      return !(*this == Other);
    }

    AccessorT& operator*() { return A; }
    AccessorT* operator->() { return &A; }

  private:
    AccessorT A;
  };

  /// Accessor for function records.
  class FunctionAccessor {
    friend class StackMapParser;

  public:
    /// Get the function address.
    uint64_t getFunctionAddress() const {
      return read<uint64_t>(P);
    }

    /// Get the function's stack size.
    uint64_t getStackSize() const {
      return read<uint64_t>(P + sizeof(uint64_t));
    }

    /// Get the number of callsite records.
    uint64_t getRecordCount() const {
      return read<uint64_t>(P + (2 * sizeof(uint64_t)));
    }

  private:
    FunctionAccessor(const uint8_t *P) : P(P) {}

    const static int FunctionAccessorSize = 3 * sizeof(uint64_t);

    FunctionAccessor next() const {
      return FunctionAccessor(P + FunctionAccessorSize);
    }

    const uint8_t *P;
  };

  /// Accessor for constants.
  class ConstantAccessor {
    friend class StackMapParser;

  public:
    /// Return the value of this constant.
    uint64_t getValue() const { return read<uint64_t>(P); }

  private:
    ConstantAccessor(const uint8_t *P) : P(P) {}

    const static int ConstantAccessorSize = sizeof(uint64_t);

    ConstantAccessor next() const {
      return ConstantAccessor(P + ConstantAccessorSize);
    }

    const uint8_t *P;
  };

  enum class LocationKind : uint8_t {
    Register = 1, Direct = 2, Indirect = 3, Constant = 4, ConstantIndex = 5
  };

  /// Accessor for location records.
  class LocationAccessor {
    friend class StackMapParser;
    friend class RecordAccessor;

  public:
    /// Get the Kind for this location.
    LocationKind getKind() const {
      return LocationKind(P[KindOffset]);
    }

    /// Get the Size for this location.
    unsigned getSizeInBytes() const {
        return read<uint16_t>(P + SizeOffset);

    }

    /// Get the Dwarf register number for this location.
    uint16_t getDwarfRegNum() const {
      return read<uint16_t>(P + DwarfRegNumOffset);
    }

    /// Get the small-constant for this location. (Kind must be Constant).
    uint32_t getSmallConstant() const {
      assert(getKind() == LocationKind::Constant && "Not a small constant.");
      return read<uint32_t>(P + SmallConstantOffset);
    }

    /// Get the constant-index for this location. (Kind must be ConstantIndex).
    uint32_t getConstantIndex() const {
      assert(getKind() == LocationKind::ConstantIndex &&
             "Not a constant-index.");
      return read<uint32_t>(P + SmallConstantOffset);
    }

    /// Get the offset for this location. (Kind must be Direct or Indirect).
    int32_t getOffset() const {
      assert((getKind() == LocationKind::Direct ||
              getKind() == LocationKind::Indirect) &&
             "Not direct or indirect.");
      return read<int32_t>(P + SmallConstantOffset);
    }

  private:
    LocationAccessor(const uint8_t *P) : P(P) {}

    LocationAccessor next() const {
      return LocationAccessor(P + LocationAccessorSize);
    }

    static const int KindOffset = 0;
    static const int SizeOffset = KindOffset + sizeof(uint16_t);
    static const int DwarfRegNumOffset = SizeOffset + sizeof(uint16_t);
    static const int SmallConstantOffset = DwarfRegNumOffset + sizeof(uint32_t);
    static const int LocationAccessorSize = sizeof(uint64_t) + sizeof(uint32_t);

    const uint8_t *P;
  };

  /// Accessor for stackmap live-out fields.
  class LiveOutAccessor {
    friend class StackMapParser;
    friend class RecordAccessor;

  public:
    /// Get the Dwarf register number for this live-out.
    uint16_t getDwarfRegNum() const {
      return read<uint16_t>(P + DwarfRegNumOffset);
    }

    /// Get the size in bytes of live [sub]register.
    unsigned getSizeInBytes() const {
      return read<uint8_t>(P + SizeOffset);
    }

  private:
    LiveOutAccessor(const uint8_t *P) : P(P) {}

    LiveOutAccessor next() const {
      return LiveOutAccessor(P + LiveOutAccessorSize);
    }

    static const int DwarfRegNumOffset = 0;
    static const int SizeOffset =
      DwarfRegNumOffset + sizeof(uint16_t) + sizeof(uint8_t);
    static const int LiveOutAccessorSize = sizeof(uint32_t);

    const uint8_t *P;
  };

  /// Accessor for stackmap records.
  class RecordAccessor {
    friend class StackMapParser;

  public:
    using location_iterator = AccessorIterator<LocationAccessor>;
    using liveout_iterator = AccessorIterator<LiveOutAccessor>;

    /// Get the patchpoint/stackmap ID for this record.
    uint64_t getID() const {
      return read<uint64_t>(P + PatchpointIDOffset);
    }

    /// Get the instruction offset (from the start of the containing function)
    /// for this record.
    uint32_t getInstructionOffset() const {
      return read<uint32_t>(P + InstructionOffsetOffset);
    }

    /// Get the number of locations contained in this record.
    uint16_t getNumLocations() const {
      return read<uint16_t>(P + NumLocationsOffset);
    }

    /// Get the location with the given index.
    LocationAccessor getLocation(unsigned LocationIndex) const {
      unsigned LocationOffset =
        LocationListOffset + LocationIndex * LocationSize;
      return LocationAccessor(P + LocationOffset);
    }

    /// Begin iterator for locations.
    location_iterator location_begin() const {
      return location_iterator(getLocation(0));
    }

    /// End iterator for locations.
    location_iterator location_end() const {
      return location_iterator(getLocation(getNumLocations()));
    }

    /// Iterator range for locations.
    iterator_range<location_iterator> locations() const {
      return make_range(location_begin(), location_end());
    }

    /// Get the number of liveouts contained in this record.
    uint16_t getNumLiveOuts() const {
      return read<uint16_t>(P + getNumLiveOutsOffset());
    }

    /// Get the live-out with the given index.
    LiveOutAccessor getLiveOut(unsigned LiveOutIndex) const {
      unsigned LiveOutOffset =
        getNumLiveOutsOffset() + sizeof(uint16_t) + LiveOutIndex * LiveOutSize;
      return LiveOutAccessor(P + LiveOutOffset);
    }

    /// Begin iterator for live-outs.
    liveout_iterator liveouts_begin() const {
      return liveout_iterator(getLiveOut(0));
    }

    /// End iterator for live-outs.
    liveout_iterator liveouts_end() const {
      return liveout_iterator(getLiveOut(getNumLiveOuts()));
    }

    /// Iterator range for live-outs.
    iterator_range<liveout_iterator> liveouts() const {
      return make_range(liveouts_begin(), liveouts_end());
    }

  private:
    RecordAccessor(const uint8_t *P) : P(P) {}

    unsigned getNumLiveOutsOffset() const {
      unsigned LocOffset = 
          ((LocationListOffset + LocationSize * getNumLocations()) + 7) & ~0x7; 
      return LocOffset + sizeof(uint16_t);
    }

    unsigned getSizeInBytes() const {
      unsigned RecordSize =
        getNumLiveOutsOffset() + sizeof(uint16_t) + getNumLiveOuts() * LiveOutSize;
      return (RecordSize + 7) & ~0x7;
    }

    RecordAccessor next() const {
      return RecordAccessor(P + getSizeInBytes());
    }

    static const unsigned PatchpointIDOffset = 0;
    static const unsigned InstructionOffsetOffset =
      PatchpointIDOffset + sizeof(uint64_t);
    static const unsigned NumLocationsOffset =
      InstructionOffsetOffset + sizeof(uint32_t) + sizeof(uint16_t);
    static const unsigned LocationListOffset =
      NumLocationsOffset + sizeof(uint16_t);
    static const unsigned LocationSize = sizeof(uint64_t) + sizeof(uint32_t);
    static const unsigned LiveOutSize = sizeof(uint32_t);

    const uint8_t *P;
  };

  /// Construct a parser for a version-3 stackmap. StackMap data will be read
  /// from the given array.
  StackMapParser(ArrayRef<uint8_t> StackMapSection)
      : StackMapSection(StackMapSection) {
    ConstantsListOffset = FunctionListOffset + getNumFunctions() * FunctionSize;

    assert(StackMapSection[0] == 3 &&
           "StackMapParser can only parse version 3 stackmaps");

    unsigned CurrentRecordOffset =
      ConstantsListOffset + getNumConstants() * ConstantSize;

    for (unsigned I = 0, E = getNumRecords(); I != E; ++I) {
      StackMapRecordOffsets.push_back(CurrentRecordOffset);
      CurrentRecordOffset +=
        RecordAccessor(&StackMapSection[CurrentRecordOffset]).getSizeInBytes();
    }
  }

  /// Validates the header of the specified stack map section.
  static Error validateHeader(ArrayRef<uint8_t> StackMapSection) {
    // See the comment for StackMaps::emitStackmapHeader().
    if (StackMapSection.size() < 16)
      return object::createError(
          "the stack map section size (" + Twine(StackMapSection.size()) +
          ") is less than the minimum possible size of its header (16)");

    unsigned Version = StackMapSection[0];
    if (Version != 3)
      return object::createError(
          "the version (" + Twine(Version) +
          ") of the stack map section is unsupported, the "
          "supported version is 3");
    return Error::success();
  }

  using function_iterator = AccessorIterator<FunctionAccessor>;
  using constant_iterator = AccessorIterator<ConstantAccessor>;
  using record_iterator = AccessorIterator<RecordAccessor>;

  /// Get the version number of this stackmap. (Always returns 3).
  unsigned getVersion() const { return 3; }

  /// Get the number of functions in the stack map.
  uint32_t getNumFunctions() const {
    return read<uint32_t>(&StackMapSection[NumFunctionsOffset]);
  }

  /// Get the number of large constants in the stack map.
  uint32_t getNumConstants() const {
    return read<uint32_t>(&StackMapSection[NumConstantsOffset]);
  }

  /// Get the number of stackmap records in the stackmap.
  uint32_t getNumRecords() const {
    return read<uint32_t>(&StackMapSection[NumRecordsOffset]);
  }

  /// Return an FunctionAccessor for the given function index.
  FunctionAccessor getFunction(unsigned FunctionIndex) const {
    return FunctionAccessor(StackMapSection.data() +
                            getFunctionOffset(FunctionIndex));
  }

  /// Begin iterator for functions.
  function_iterator functions_begin() const {
    return function_iterator(getFunction(0));
  }

  /// End iterator for functions.
  function_iterator functions_end() const {
    return function_iterator(
             FunctionAccessor(StackMapSection.data() +
                              getFunctionOffset(getNumFunctions())));
  }

  /// Iterator range for functions.
  iterator_range<function_iterator> functions() const {
    return make_range(functions_begin(), functions_end());
  }

  /// Return the large constant at the given index.
  ConstantAccessor getConstant(unsigned ConstantIndex) const {
    return ConstantAccessor(StackMapSection.data() +
                            getConstantOffset(ConstantIndex));
  }

  /// Begin iterator for constants.
  constant_iterator constants_begin() const {
    return constant_iterator(getConstant(0));
  }

  /// End iterator for constants.
  constant_iterator constants_end() const {
    return constant_iterator(
             ConstantAccessor(StackMapSection.data() +
                              getConstantOffset(getNumConstants())));
  }

  /// Iterator range for constants.
  iterator_range<constant_iterator> constants() const {
    return make_range(constants_begin(), constants_end());
  }

  /// Return a RecordAccessor for the given record index.
  RecordAccessor getRecord(unsigned RecordIndex) const {
    std::size_t RecordOffset = StackMapRecordOffsets[RecordIndex];
    return RecordAccessor(StackMapSection.data() + RecordOffset);
  }

  /// Begin iterator for records.
  record_iterator records_begin() const {
    if (getNumRecords() == 0)
      return record_iterator(RecordAccessor(nullptr));
    return record_iterator(getRecord(0));
  }

  /// End iterator for records.
  record_iterator records_end() const {
    // Records need to be handled specially, since we cache the start addresses
    // for them: We can't just compute the 1-past-the-end address, we have to
    // look at the last record and use the 'next' method.
    if (getNumRecords() == 0)
      return record_iterator(RecordAccessor(nullptr));
    return record_iterator(getRecord(getNumRecords() - 1).next());
  }

  /// Iterator range for records.
  iterator_range<record_iterator> records() const {
    return make_range(records_begin(), records_end());
  }

private:
  template <typename T>
  static T read(const uint8_t *P) {
    return support::endian::read<T, Endianness, 1>(P);
  }

  static const unsigned HeaderOffset = 0;
  static const unsigned NumFunctionsOffset = HeaderOffset + sizeof(uint32_t);
  static const unsigned NumConstantsOffset = NumFunctionsOffset + sizeof(uint32_t);
  static const unsigned NumRecordsOffset = NumConstantsOffset + sizeof(uint32_t);
  static const unsigned FunctionListOffset = NumRecordsOffset + sizeof(uint32_t);

  static const unsigned FunctionSize = 3 * sizeof(uint64_t);
  static const unsigned ConstantSize = sizeof(uint64_t);

  std::size_t getFunctionOffset(unsigned FunctionIndex) const {
    return FunctionListOffset + FunctionIndex * FunctionSize;
  }

  std::size_t getConstantOffset(unsigned ConstantIndex) const {
    return ConstantsListOffset + ConstantIndex * ConstantSize;
  }

  ArrayRef<uint8_t> StackMapSection;
  unsigned ConstantsListOffset;
  std::vector<unsigned> StackMapRecordOffsets;
};

} // end namespace llvm

#endif // LLVM_OBJECT_STACKMAPPARSER_H
PKhwFZ��Y���Object/ModuleSymbolTable.hnu�[���//===- ModuleSymbolTable.h - symbol table for in-memory IR ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This class represents a symbol table built from in-memory IR. It provides
// access to GlobalValues and should only be used if such access is required
// (e.g. in the LTO implementation).
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJECT_MODULESYMBOLTABLE_H
#define LLVM_OBJECT_MODULESYMBOLTABLE_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/PointerUnion.h"
#include "llvm/IR/Mangler.h"
#include "llvm/Object/SymbolicFile.h"
#include "llvm/Support/Allocator.h"
#include <cstdint>
#include <string>
#include <utility>
#include <vector>

namespace llvm {

class GlobalValue;
class Module;

class ModuleSymbolTable {
public:
  using AsmSymbol = std::pair<std::string, uint32_t>;
  using Symbol = PointerUnion<GlobalValue *, AsmSymbol *>;

private:
  Module *FirstMod = nullptr;

  SpecificBumpPtrAllocator<AsmSymbol> AsmSymbols;
  std::vector<Symbol> SymTab;
  Mangler Mang;

public:
  ArrayRef<Symbol> symbols() const { return SymTab; }
  void addModule(Module *M);

  void printSymbolName(raw_ostream &OS, Symbol S) const;
  uint32_t getSymbolFlags(Symbol S) const;

  /// Parse inline ASM and collect the symbols that are defined or referenced in
  /// the current module.
  ///
  /// For each found symbol, call \p AsmSymbol with the name of the symbol found
  /// and the associated flags.
  static void CollectAsmSymbols(
      const Module &M,
      function_ref<void(StringRef, object::BasicSymbolRef::Flags)> AsmSymbol);

  /// Parse inline ASM and collect the symvers directives that are defined in
  /// the current module.
  ///
  /// For each found symbol, call \p AsmSymver with the name of the symbol and
  /// its alias.
  static void
  CollectAsmSymvers(const Module &M,
                    function_ref<void(StringRef, StringRef)> AsmSymver);
};

} // end namespace llvm

#endif // LLVM_OBJECT_MODULESYMBOLTABLE_H
PKhwFZ߿��$$Object/Minidump.hnu�[���//===- Minidump.h - Minidump object file implementation ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJECT_MINIDUMP_H
#define LLVM_OBJECT_MINIDUMP_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/iterator.h"
#include "llvm/BinaryFormat/Minidump.h"
#include "llvm/Object/Binary.h"
#include "llvm/Support/Error.h"

namespace llvm {
namespace object {

/// A class providing access to the contents of a minidump file.
class MinidumpFile : public Binary {
public:
  /// Construct a new MinidumpFile object from the given memory buffer. Returns
  /// an error if this file cannot be identified as a minidump file, or if its
  /// contents are badly corrupted (i.e. we cannot read the stream directory).
  static Expected<std::unique_ptr<MinidumpFile>> create(MemoryBufferRef Source);

  static bool classof(const Binary *B) { return B->isMinidump(); }

  /// Returns the contents of the minidump header.
  const minidump::Header &header() const { return Header; }

  /// Returns the list of streams (stream directory entries) in this file.
  ArrayRef<minidump::Directory> streams() const { return Streams; }

  /// Returns the raw contents of the stream given by the directory entry.
  ArrayRef<uint8_t> getRawStream(const minidump::Directory &Stream) const {
    return getData().slice(Stream.Location.RVA, Stream.Location.DataSize);
  }

  /// Returns the raw contents of the stream of the given type, or std::nullopt
  /// if the file does not contain a stream of this type.
  std::optional<ArrayRef<uint8_t>>
  getRawStream(minidump::StreamType Type) const;

  /// Returns the raw contents of an object given by the LocationDescriptor. An
  /// error is returned if the descriptor points outside of the minidump file.
  Expected<ArrayRef<uint8_t>>
  getRawData(minidump::LocationDescriptor Desc) const {
    return getDataSlice(getData(), Desc.RVA, Desc.DataSize);
  }

  /// Returns the minidump string at the given offset. An error is returned if
  /// we fail to parse the string, or the string is invalid UTF16.
  Expected<std::string> getString(size_t Offset) const;

  /// Returns the contents of the SystemInfo stream, cast to the appropriate
  /// type. An error is returned if the file does not contain this stream, or
  /// the stream is smaller than the size of the SystemInfo structure. The
  /// internal consistency of the stream is not checked in any way.
  Expected<const minidump::SystemInfo &> getSystemInfo() const {
    return getStream<minidump::SystemInfo>(minidump::StreamType::SystemInfo);
  }

  /// Returns the module list embedded in the ModuleList stream. An error is
  /// returned if the file does not contain this stream, or if the stream is
  /// not large enough to contain the number of modules declared in the stream
  /// header. The consistency of the Module entries themselves is not checked in
  /// any way.
  Expected<ArrayRef<minidump::Module>> getModuleList() const {
    return getListStream<minidump::Module>(minidump::StreamType::ModuleList);
  }

  /// Returns the thread list embedded in the ThreadList stream. An error is
  /// returned if the file does not contain this stream, or if the stream is
  /// not large enough to contain the number of threads declared in the stream
  /// header. The consistency of the Thread entries themselves is not checked in
  /// any way.
  Expected<ArrayRef<minidump::Thread>> getThreadList() const {
    return getListStream<minidump::Thread>(minidump::StreamType::ThreadList);
  }

  /// Returns the contents of the Exception stream.  An error is returned if the
  /// file does not contain this stream, or the stream is smaller than the size
  /// of the ExceptionStream structure.  The internal consistency of the stream
  /// is not checked in any way.
  Expected<const minidump::ExceptionStream &> getExceptionStream() const {
    return getStream<minidump::ExceptionStream>(
        minidump::StreamType::Exception);
  }

  /// Returns the list of descriptors embedded in the MemoryList stream. The
  /// descriptors provide the content of interesting regions of memory at the
  /// time the minidump was taken. An error is returned if the file does not
  /// contain this stream, or if the stream is not large enough to contain the
  /// number of memory descriptors declared in the stream header. The
  /// consistency of the MemoryDescriptor entries themselves is not checked in
  /// any way.
  Expected<ArrayRef<minidump::MemoryDescriptor>> getMemoryList() const {
    return getListStream<minidump::MemoryDescriptor>(
        minidump::StreamType::MemoryList);
  }

  class MemoryInfoIterator
      : public iterator_facade_base<MemoryInfoIterator,
                                    std::forward_iterator_tag,
                                    minidump::MemoryInfo> {
  public:
    MemoryInfoIterator(ArrayRef<uint8_t> Storage, size_t Stride)
        : Storage(Storage), Stride(Stride) {
      assert(Storage.size() % Stride == 0);
    }

    bool operator==(const MemoryInfoIterator &R) const {
      return Storage.size() == R.Storage.size();
    }

    const minidump::MemoryInfo &operator*() const {
      assert(Storage.size() >= sizeof(minidump::MemoryInfo));
      return *reinterpret_cast<const minidump::MemoryInfo *>(Storage.data());
    }

    MemoryInfoIterator &operator++() {
      Storage = Storage.drop_front(Stride);
      return *this;
    }

  private:
    ArrayRef<uint8_t> Storage;
    size_t Stride;
  };

  /// Returns the list of descriptors embedded in the MemoryInfoList stream. The
  /// descriptors provide properties (e.g. permissions) of interesting regions
  /// of memory at the time the minidump was taken. An error is returned if the
  /// file does not contain this stream, or if the stream is not large enough to
  /// contain the number of memory descriptors declared in the stream header.
  /// The consistency of the MemoryInfoList entries themselves is not checked
  /// in any way.
  Expected<iterator_range<MemoryInfoIterator>> getMemoryInfoList() const;

private:
  static Error createError(StringRef Str) {
    return make_error<GenericBinaryError>(Str, object_error::parse_failed);
  }

  static Error createEOFError() {
    return make_error<GenericBinaryError>("Unexpected EOF",
                                          object_error::unexpected_eof);
  }

  /// Return a slice of the given data array, with bounds checking.
  static Expected<ArrayRef<uint8_t>> getDataSlice(ArrayRef<uint8_t> Data,
                                                  size_t Offset, size_t Size);

  /// Return the slice of the given data array as an array of objects of the
  /// given type. The function checks that the input array is large enough to
  /// contain the correct number of objects of the given type.
  template <typename T>
  static Expected<ArrayRef<T>> getDataSliceAs(ArrayRef<uint8_t> Data,
                                              size_t Offset, size_t Count);

  MinidumpFile(MemoryBufferRef Source, const minidump::Header &Header,
               ArrayRef<minidump::Directory> Streams,
               DenseMap<minidump::StreamType, std::size_t> StreamMap)
      : Binary(ID_Minidump, Source), Header(Header), Streams(Streams),
        StreamMap(std::move(StreamMap)) {}

  ArrayRef<uint8_t> getData() const {
    return arrayRefFromStringRef(Data.getBuffer());
  }

  /// Return the stream of the given type, cast to the appropriate type. Checks
  /// that the stream is large enough to hold an object of this type.
  template <typename T>
  Expected<const T &> getStream(minidump::StreamType Stream) const;

  /// Return the contents of a stream which contains a list of fixed-size items,
  /// prefixed by the list size.
  template <typename T>
  Expected<ArrayRef<T>> getListStream(minidump::StreamType Stream) const;

  const minidump::Header &Header;
  ArrayRef<minidump::Directory> Streams;
  DenseMap<minidump::StreamType, std::size_t> StreamMap;
};

template <typename T>
Expected<const T &> MinidumpFile::getStream(minidump::StreamType Type) const {
  if (std::optional<ArrayRef<uint8_t>> Stream = getRawStream(Type)) {
    if (Stream->size() >= sizeof(T))
      return *reinterpret_cast<const T *>(Stream->data());
    return createEOFError();
  }
  return createError("No such stream");
}

template <typename T>
Expected<ArrayRef<T>> MinidumpFile::getDataSliceAs(ArrayRef<uint8_t> Data,
                                                   size_t Offset,
                                                   size_t Count) {
  // Check for overflow.
  if (Count > std::numeric_limits<size_t>::max() / sizeof(T))
    return createEOFError();
  Expected<ArrayRef<uint8_t>> Slice =
      getDataSlice(Data, Offset, sizeof(T) * Count);
  if (!Slice)
    return Slice.takeError();
  return ArrayRef<T>(reinterpret_cast<const T *>(Slice->data()), Count);
}

} // end namespace object
} // end namespace llvm

#endif // LLVM_OBJECT_MINIDUMP_H
PKhwFZEu�Object/TapiFile.hnu�[���//===- TapiFile.h - Text-based Dynamic Library Stub -------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the TapiFile interface.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJECT_TAPIFILE_H
#define LLVM_OBJECT_TAPIFILE_H

#include "llvm/ADT/StringRef.h"
#include "llvm/Object/Binary.h"
#include "llvm/Object/ObjectFile.h"
#include "llvm/Object/SymbolicFile.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/MemoryBufferRef.h"
#include "llvm/TextAPI/Architecture.h"

namespace llvm {

class raw_ostream;

namespace MachO {

class InterfaceFile;

}

namespace object {

class TapiFile : public SymbolicFile {
public:
  TapiFile(MemoryBufferRef Source, const MachO::InterfaceFile &Interface,
           MachO::Architecture Arch);
  ~TapiFile() override;

  void moveSymbolNext(DataRefImpl &DRI) const override;

  Error printSymbolName(raw_ostream &OS, DataRefImpl DRI) const override;

  Expected<uint32_t> getSymbolFlags(DataRefImpl DRI) const override;

  basic_symbol_iterator symbol_begin() const override;

  basic_symbol_iterator symbol_end() const override;

  Expected<SymbolRef::Type> getSymbolType(DataRefImpl DRI) const;

  static bool classof(const Binary *v) { return v->isTapiFile(); }

  bool is64Bit() const override { return MachO::is64Bit(Arch); }

private:
  struct Symbol {
    StringRef Prefix;
    StringRef Name;
    uint32_t Flags;
    SymbolRef::Type Type;

    constexpr Symbol(StringRef Prefix, StringRef Name, uint32_t Flags,
                     SymbolRef::Type Type)
        : Prefix(Prefix), Name(Name), Flags(Flags), Type(Type) {}
  };

  std::vector<Symbol> Symbols;
  MachO::Architecture Arch;
};

} // end namespace object.
} // end namespace llvm.

#endif // LLVM_OBJECT_TAPIFILE_H
PKhwFZ��,X�X�Object/ELFTypes.hnu�[���//===- ELFTypes.h - Endian specific types for ELF ---------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJECT_ELFTYPES_H
#define LLVM_OBJECT_ELFTYPES_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/BinaryFormat/ELF.h"
#include "llvm/Object/Error.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/MathExtras.h"
#include <cassert>
#include <cstdint>
#include <cstring>
#include <type_traits>

namespace llvm {
namespace object {

using support::endianness;

template <class ELFT> struct Elf_Ehdr_Impl;
template <class ELFT> struct Elf_Shdr_Impl;
template <class ELFT> struct Elf_Sym_Impl;
template <class ELFT> struct Elf_Dyn_Impl;
template <class ELFT> struct Elf_Phdr_Impl;
template <class ELFT, bool isRela> struct Elf_Rel_Impl;
template <class ELFT> struct Elf_Verdef_Impl;
template <class ELFT> struct Elf_Verdaux_Impl;
template <class ELFT> struct Elf_Verneed_Impl;
template <class ELFT> struct Elf_Vernaux_Impl;
template <class ELFT> struct Elf_Versym_Impl;
template <class ELFT> struct Elf_Hash_Impl;
template <class ELFT> struct Elf_GnuHash_Impl;
template <class ELFT> struct Elf_Chdr_Impl;
template <class ELFT> struct Elf_Nhdr_Impl;
template <class ELFT> class Elf_Note_Impl;
template <class ELFT> class Elf_Note_Iterator_Impl;
template <class ELFT> struct Elf_CGProfile_Impl;

template <endianness E, bool Is64> struct ELFType {
private:
  template <typename Ty>
  using packed = support::detail::packed_endian_specific_integral<Ty, E, 1>;

public:
  static const endianness TargetEndianness = E;
  static const bool Is64Bits = Is64;

  using uint = std::conditional_t<Is64, uint64_t, uint32_t>;
  using Ehdr = Elf_Ehdr_Impl<ELFType<E, Is64>>;
  using Shdr = Elf_Shdr_Impl<ELFType<E, Is64>>;
  using Sym = Elf_Sym_Impl<ELFType<E, Is64>>;
  using Dyn = Elf_Dyn_Impl<ELFType<E, Is64>>;
  using Phdr = Elf_Phdr_Impl<ELFType<E, Is64>>;
  using Rel = Elf_Rel_Impl<ELFType<E, Is64>, false>;
  using Rela = Elf_Rel_Impl<ELFType<E, Is64>, true>;
  using Relr = packed<uint>;
  using Verdef = Elf_Verdef_Impl<ELFType<E, Is64>>;
  using Verdaux = Elf_Verdaux_Impl<ELFType<E, Is64>>;
  using Verneed = Elf_Verneed_Impl<ELFType<E, Is64>>;
  using Vernaux = Elf_Vernaux_Impl<ELFType<E, Is64>>;
  using Versym = Elf_Versym_Impl<ELFType<E, Is64>>;
  using Hash = Elf_Hash_Impl<ELFType<E, Is64>>;
  using GnuHash = Elf_GnuHash_Impl<ELFType<E, Is64>>;
  using Chdr = Elf_Chdr_Impl<ELFType<E, Is64>>;
  using Nhdr = Elf_Nhdr_Impl<ELFType<E, Is64>>;
  using Note = Elf_Note_Impl<ELFType<E, Is64>>;
  using NoteIterator = Elf_Note_Iterator_Impl<ELFType<E, Is64>>;
  using CGProfile = Elf_CGProfile_Impl<ELFType<E, Is64>>;
  using DynRange = ArrayRef<Dyn>;
  using ShdrRange = ArrayRef<Shdr>;
  using SymRange = ArrayRef<Sym>;
  using RelRange = ArrayRef<Rel>;
  using RelaRange = ArrayRef<Rela>;
  using RelrRange = ArrayRef<Relr>;
  using PhdrRange = ArrayRef<Phdr>;

  using Half = packed<uint16_t>;
  using Word = packed<uint32_t>;
  using Sword = packed<int32_t>;
  using Xword = packed<uint64_t>;
  using Sxword = packed<int64_t>;
  using Addr = packed<uint>;
  using Off = packed<uint>;
};

using ELF32LE = ELFType<support::little, false>;
using ELF32BE = ELFType<support::big, false>;
using ELF64LE = ELFType<support::little, true>;
using ELF64BE = ELFType<support::big, true>;

// Use an alignment of 2 for the typedefs since that is the worst case for
// ELF files in archives.

// I really don't like doing this, but the alternative is copypasta.
#define LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)                                       \
  using Elf_Addr = typename ELFT::Addr;                                        \
  using Elf_Off = typename ELFT::Off;                                          \
  using Elf_Half = typename ELFT::Half;                                        \
  using Elf_Word = typename ELFT::Word;                                        \
  using Elf_Sword = typename ELFT::Sword;                                      \
  using Elf_Xword = typename ELFT::Xword;                                      \
  using Elf_Sxword = typename ELFT::Sxword;                                    \
  using uintX_t = typename ELFT::uint;                                         \
  using Elf_Ehdr = typename ELFT::Ehdr;                                        \
  using Elf_Shdr = typename ELFT::Shdr;                                        \
  using Elf_Sym = typename ELFT::Sym;                                          \
  using Elf_Dyn = typename ELFT::Dyn;                                          \
  using Elf_Phdr = typename ELFT::Phdr;                                        \
  using Elf_Rel = typename ELFT::Rel;                                          \
  using Elf_Rela = typename ELFT::Rela;                                        \
  using Elf_Relr = typename ELFT::Relr;                                        \
  using Elf_Verdef = typename ELFT::Verdef;                                    \
  using Elf_Verdaux = typename ELFT::Verdaux;                                  \
  using Elf_Verneed = typename ELFT::Verneed;                                  \
  using Elf_Vernaux = typename ELFT::Vernaux;                                  \
  using Elf_Versym = typename ELFT::Versym;                                    \
  using Elf_Hash = typename ELFT::Hash;                                        \
  using Elf_GnuHash = typename ELFT::GnuHash;                                  \
  using Elf_Chdr = typename ELFT::Chdr;                                        \
  using Elf_Nhdr = typename ELFT::Nhdr;                                        \
  using Elf_Note = typename ELFT::Note;                                        \
  using Elf_Note_Iterator = typename ELFT::NoteIterator;                       \
  using Elf_CGProfile = typename ELFT::CGProfile;                              \
  using Elf_Dyn_Range = typename ELFT::DynRange;                               \
  using Elf_Shdr_Range = typename ELFT::ShdrRange;                             \
  using Elf_Sym_Range = typename ELFT::SymRange;                               \
  using Elf_Rel_Range = typename ELFT::RelRange;                               \
  using Elf_Rela_Range = typename ELFT::RelaRange;                             \
  using Elf_Relr_Range = typename ELFT::RelrRange;                             \
  using Elf_Phdr_Range = typename ELFT::PhdrRange;

#define LLVM_ELF_COMMA ,
#define LLVM_ELF_IMPORT_TYPES(E, W)                                            \
  LLVM_ELF_IMPORT_TYPES_ELFT(ELFType<E LLVM_ELF_COMMA W>)

// Section header.
template <class ELFT> struct Elf_Shdr_Base;

template <endianness TargetEndianness>
struct Elf_Shdr_Base<ELFType<TargetEndianness, false>> {
  LLVM_ELF_IMPORT_TYPES(TargetEndianness, false)
  Elf_Word sh_name;      // Section name (index into string table)
  Elf_Word sh_type;      // Section type (SHT_*)
  Elf_Word sh_flags;     // Section flags (SHF_*)
  Elf_Addr sh_addr;      // Address where section is to be loaded
  Elf_Off sh_offset;     // File offset of section data, in bytes
  Elf_Word sh_size;      // Size of section, in bytes
  Elf_Word sh_link;      // Section type-specific header table index link
  Elf_Word sh_info;      // Section type-specific extra information
  Elf_Word sh_addralign; // Section address alignment
  Elf_Word sh_entsize;   // Size of records contained within the section
};

template <endianness TargetEndianness>
struct Elf_Shdr_Base<ELFType<TargetEndianness, true>> {
  LLVM_ELF_IMPORT_TYPES(TargetEndianness, true)
  Elf_Word sh_name;       // Section name (index into string table)
  Elf_Word sh_type;       // Section type (SHT_*)
  Elf_Xword sh_flags;     // Section flags (SHF_*)
  Elf_Addr sh_addr;       // Address where section is to be loaded
  Elf_Off sh_offset;      // File offset of section data, in bytes
  Elf_Xword sh_size;      // Size of section, in bytes
  Elf_Word sh_link;       // Section type-specific header table index link
  Elf_Word sh_info;       // Section type-specific extra information
  Elf_Xword sh_addralign; // Section address alignment
  Elf_Xword sh_entsize;   // Size of records contained within the section
};

template <class ELFT>
struct Elf_Shdr_Impl : Elf_Shdr_Base<ELFT> {
  using Elf_Shdr_Base<ELFT>::sh_entsize;
  using Elf_Shdr_Base<ELFT>::sh_size;

  /// Get the number of entities this section contains if it has any.
  unsigned getEntityCount() const {
    if (sh_entsize == 0)
      return 0;
    return sh_size / sh_entsize;
  }
};

template <class ELFT> struct Elf_Sym_Base;

template <endianness TargetEndianness>
struct Elf_Sym_Base<ELFType<TargetEndianness, false>> {
  LLVM_ELF_IMPORT_TYPES(TargetEndianness, false)
  Elf_Word st_name;       // Symbol name (index into string table)
  Elf_Addr st_value;      // Value or address associated with the symbol
  Elf_Word st_size;       // Size of the symbol
  unsigned char st_info;  // Symbol's type and binding attributes
  unsigned char st_other; // Must be zero; reserved
  Elf_Half st_shndx;      // Which section (header table index) it's defined in
};

template <endianness TargetEndianness>
struct Elf_Sym_Base<ELFType<TargetEndianness, true>> {
  LLVM_ELF_IMPORT_TYPES(TargetEndianness, true)
  Elf_Word st_name;       // Symbol name (index into string table)
  unsigned char st_info;  // Symbol's type and binding attributes
  unsigned char st_other; // Must be zero; reserved
  Elf_Half st_shndx;      // Which section (header table index) it's defined in
  Elf_Addr st_value;      // Value or address associated with the symbol
  Elf_Xword st_size;      // Size of the symbol
};

template <class ELFT>
struct Elf_Sym_Impl : Elf_Sym_Base<ELFT> {
  using Elf_Sym_Base<ELFT>::st_info;
  using Elf_Sym_Base<ELFT>::st_shndx;
  using Elf_Sym_Base<ELFT>::st_other;
  using Elf_Sym_Base<ELFT>::st_value;

  // These accessors and mutators correspond to the ELF32_ST_BIND,
  // ELF32_ST_TYPE, and ELF32_ST_INFO macros defined in the ELF specification:
  unsigned char getBinding() const { return st_info >> 4; }
  unsigned char getType() const { return st_info & 0x0f; }
  uint64_t getValue() const { return st_value; }
  void setBinding(unsigned char b) { setBindingAndType(b, getType()); }
  void setType(unsigned char t) { setBindingAndType(getBinding(), t); }

  void setBindingAndType(unsigned char b, unsigned char t) {
    st_info = (b << 4) + (t & 0x0f);
  }

  /// Access to the STV_xxx flag stored in the first two bits of st_other.
  /// STV_DEFAULT: 0
  /// STV_INTERNAL: 1
  /// STV_HIDDEN: 2
  /// STV_PROTECTED: 3
  unsigned char getVisibility() const { return st_other & 0x3; }
  void setVisibility(unsigned char v) {
    assert(v < 4 && "Invalid value for visibility");
    st_other = (st_other & ~0x3) | v;
  }

  bool isAbsolute() const { return st_shndx == ELF::SHN_ABS; }

  bool isCommon() const {
    return getType() == ELF::STT_COMMON || st_shndx == ELF::SHN_COMMON;
  }

  bool isDefined() const { return !isUndefined(); }

  bool isProcessorSpecific() const {
    return st_shndx >= ELF::SHN_LOPROC && st_shndx <= ELF::SHN_HIPROC;
  }

  bool isOSSpecific() const {
    return st_shndx >= ELF::SHN_LOOS && st_shndx <= ELF::SHN_HIOS;
  }

  bool isReserved() const {
    // ELF::SHN_HIRESERVE is 0xffff so st_shndx <= ELF::SHN_HIRESERVE is always
    // true and some compilers warn about it.
    return st_shndx >= ELF::SHN_LORESERVE;
  }

  bool isUndefined() const { return st_shndx == ELF::SHN_UNDEF; }

  bool isExternal() const {
    return getBinding() != ELF::STB_LOCAL;
  }

  Expected<StringRef> getName(StringRef StrTab) const;
};

template <class ELFT>
Expected<StringRef> Elf_Sym_Impl<ELFT>::getName(StringRef StrTab) const {
  uint32_t Offset = this->st_name;
  if (Offset >= StrTab.size())
    return createStringError(object_error::parse_failed,
                             "st_name (0x%" PRIx32
                             ") is past the end of the string table"
                             " of size 0x%zx",
                             Offset, StrTab.size());
  return StringRef(StrTab.data() + Offset);
}

/// Elf_Versym: This is the structure of entries in the SHT_GNU_versym section
/// (.gnu.version). This structure is identical for ELF32 and ELF64.
template <class ELFT>
struct Elf_Versym_Impl {
  LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
  Elf_Half vs_index; // Version index with flags (e.g. VERSYM_HIDDEN)
};

/// Elf_Verdef: This is the structure of entries in the SHT_GNU_verdef section
/// (.gnu.version_d). This structure is identical for ELF32 and ELF64.
template <class ELFT>
struct Elf_Verdef_Impl {
  LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
  Elf_Half vd_version; // Version of this structure (e.g. VER_DEF_CURRENT)
  Elf_Half vd_flags;   // Bitwise flags (VER_DEF_*)
  Elf_Half vd_ndx;     // Version index, used in .gnu.version entries
  Elf_Half vd_cnt;     // Number of Verdaux entries
  Elf_Word vd_hash;    // Hash of name
  Elf_Word vd_aux;     // Offset to the first Verdaux entry (in bytes)
  Elf_Word vd_next;    // Offset to the next Verdef entry (in bytes)

  /// Get the first Verdaux entry for this Verdef.
  const Elf_Verdaux *getAux() const {
    return reinterpret_cast<const Elf_Verdaux *>((const char *)this + vd_aux);
  }
};

/// Elf_Verdaux: This is the structure of auxiliary data in the SHT_GNU_verdef
/// section (.gnu.version_d). This structure is identical for ELF32 and ELF64.
template <class ELFT>
struct Elf_Verdaux_Impl {
  LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
  Elf_Word vda_name; // Version name (offset in string table)
  Elf_Word vda_next; // Offset to next Verdaux entry (in bytes)
};

/// Elf_Verneed: This is the structure of entries in the SHT_GNU_verneed
/// section (.gnu.version_r). This structure is identical for ELF32 and ELF64.
template <class ELFT>
struct Elf_Verneed_Impl {
  LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
  Elf_Half vn_version; // Version of this structure (e.g. VER_NEED_CURRENT)
  Elf_Half vn_cnt;     // Number of associated Vernaux entries
  Elf_Word vn_file;    // Library name (string table offset)
  Elf_Word vn_aux;     // Offset to first Vernaux entry (in bytes)
  Elf_Word vn_next;    // Offset to next Verneed entry (in bytes)
};

/// Elf_Vernaux: This is the structure of auxiliary data in SHT_GNU_verneed
/// section (.gnu.version_r). This structure is identical for ELF32 and ELF64.
template <class ELFT>
struct Elf_Vernaux_Impl {
  LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
  Elf_Word vna_hash;  // Hash of dependency name
  Elf_Half vna_flags; // Bitwise Flags (VER_FLAG_*)
  Elf_Half vna_other; // Version index, used in .gnu.version entries
  Elf_Word vna_name;  // Dependency name
  Elf_Word vna_next;  // Offset to next Vernaux entry (in bytes)
};

/// Elf_Dyn_Base: This structure matches the form of entries in the dynamic
///               table section (.dynamic) look like.
template <class ELFT> struct Elf_Dyn_Base;

template <endianness TargetEndianness>
struct Elf_Dyn_Base<ELFType<TargetEndianness, false>> {
  LLVM_ELF_IMPORT_TYPES(TargetEndianness, false)
  Elf_Sword d_tag;
  union {
    Elf_Word d_val;
    Elf_Addr d_ptr;
  } d_un;
};

template <endianness TargetEndianness>
struct Elf_Dyn_Base<ELFType<TargetEndianness, true>> {
  LLVM_ELF_IMPORT_TYPES(TargetEndianness, true)
  Elf_Sxword d_tag;
  union {
    Elf_Xword d_val;
    Elf_Addr d_ptr;
  } d_un;
};

/// Elf_Dyn_Impl: This inherits from Elf_Dyn_Base, adding getters.
template <class ELFT>
struct Elf_Dyn_Impl : Elf_Dyn_Base<ELFT> {
  using Elf_Dyn_Base<ELFT>::d_tag;
  using Elf_Dyn_Base<ELFT>::d_un;
  using intX_t = std::conditional_t<ELFT::Is64Bits, int64_t, int32_t>;
  using uintX_t = std::conditional_t<ELFT::Is64Bits, uint64_t, uint32_t>;
  intX_t getTag() const { return d_tag; }
  uintX_t getVal() const { return d_un.d_val; }
  uintX_t getPtr() const { return d_un.d_ptr; }
};

template <endianness TargetEndianness>
struct Elf_Rel_Impl<ELFType<TargetEndianness, false>, false> {
  LLVM_ELF_IMPORT_TYPES(TargetEndianness, false)
  static const bool IsRela = false;
  Elf_Addr r_offset; // Location (file byte offset, or program virtual addr)
  Elf_Word r_info;   // Symbol table index and type of relocation to apply

  uint32_t getRInfo(bool isMips64EL) const {
    assert(!isMips64EL);
    return r_info;
  }
  void setRInfo(uint32_t R, bool IsMips64EL) {
    assert(!IsMips64EL);
    r_info = R;
  }

  // These accessors and mutators correspond to the ELF32_R_SYM, ELF32_R_TYPE,
  // and ELF32_R_INFO macros defined in the ELF specification:
  uint32_t getSymbol(bool isMips64EL) const {
    return this->getRInfo(isMips64EL) >> 8;
  }
  unsigned char getType(bool isMips64EL) const {
    return (unsigned char)(this->getRInfo(isMips64EL) & 0x0ff);
  }
  void setSymbol(uint32_t s, bool IsMips64EL) {
    setSymbolAndType(s, getType(IsMips64EL), IsMips64EL);
  }
  void setType(unsigned char t, bool IsMips64EL) {
    setSymbolAndType(getSymbol(IsMips64EL), t, IsMips64EL);
  }
  void setSymbolAndType(uint32_t s, unsigned char t, bool IsMips64EL) {
    this->setRInfo((s << 8) + t, IsMips64EL);
  }
};

template <endianness TargetEndianness>
struct Elf_Rel_Impl<ELFType<TargetEndianness, false>, true>
    : public Elf_Rel_Impl<ELFType<TargetEndianness, false>, false> {
  LLVM_ELF_IMPORT_TYPES(TargetEndianness, false)
  static const bool IsRela = true;
  Elf_Sword r_addend; // Compute value for relocatable field by adding this
};

template <endianness TargetEndianness>
struct Elf_Rel_Impl<ELFType<TargetEndianness, true>, false> {
  LLVM_ELF_IMPORT_TYPES(TargetEndianness, true)
  static const bool IsRela = false;
  Elf_Addr r_offset; // Location (file byte offset, or program virtual addr)
  Elf_Xword r_info;  // Symbol table index and type of relocation to apply

  uint64_t getRInfo(bool isMips64EL) const {
    uint64_t t = r_info;
    if (!isMips64EL)
      return t;
    // Mips64 little endian has a "special" encoding of r_info. Instead of one
    // 64 bit little endian number, it is a little endian 32 bit number followed
    // by a 32 bit big endian number.
    return (t << 32) | ((t >> 8) & 0xff000000) | ((t >> 24) & 0x00ff0000) |
           ((t >> 40) & 0x0000ff00) | ((t >> 56) & 0x000000ff);
  }

  void setRInfo(uint64_t R, bool IsMips64EL) {
    if (IsMips64EL)
      r_info = (R >> 32) | ((R & 0xff000000) << 8) | ((R & 0x00ff0000) << 24) |
               ((R & 0x0000ff00) << 40) | ((R & 0x000000ff) << 56);
    else
      r_info = R;
  }

  // These accessors and mutators correspond to the ELF64_R_SYM, ELF64_R_TYPE,
  // and ELF64_R_INFO macros defined in the ELF specification:
  uint32_t getSymbol(bool isMips64EL) const {
    return (uint32_t)(this->getRInfo(isMips64EL) >> 32);
  }
  uint32_t getType(bool isMips64EL) const {
    return (uint32_t)(this->getRInfo(isMips64EL) & 0xffffffffL);
  }
  void setSymbol(uint32_t s, bool IsMips64EL) {
    setSymbolAndType(s, getType(IsMips64EL), IsMips64EL);
  }
  void setType(uint32_t t, bool IsMips64EL) {
    setSymbolAndType(getSymbol(IsMips64EL), t, IsMips64EL);
  }
  void setSymbolAndType(uint32_t s, uint32_t t, bool IsMips64EL) {
    this->setRInfo(((uint64_t)s << 32) + (t & 0xffffffffL), IsMips64EL);
  }
};

template <endianness TargetEndianness>
struct Elf_Rel_Impl<ELFType<TargetEndianness, true>, true>
    : public Elf_Rel_Impl<ELFType<TargetEndianness, true>, false> {
  LLVM_ELF_IMPORT_TYPES(TargetEndianness, true)
  static const bool IsRela = true;
  Elf_Sxword r_addend; // Compute value for relocatable field by adding this.
};

template <class ELFT>
struct Elf_Ehdr_Impl {
  LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
  unsigned char e_ident[ELF::EI_NIDENT]; // ELF Identification bytes
  Elf_Half e_type;                       // Type of file (see ET_*)
  Elf_Half e_machine;   // Required architecture for this file (see EM_*)
  Elf_Word e_version;   // Must be equal to 1
  Elf_Addr e_entry;     // Address to jump to in order to start program
  Elf_Off e_phoff;      // Program header table's file offset, in bytes
  Elf_Off e_shoff;      // Section header table's file offset, in bytes
  Elf_Word e_flags;     // Processor-specific flags
  Elf_Half e_ehsize;    // Size of ELF header, in bytes
  Elf_Half e_phentsize; // Size of an entry in the program header table
  Elf_Half e_phnum;     // Number of entries in the program header table
  Elf_Half e_shentsize; // Size of an entry in the section header table
  Elf_Half e_shnum;     // Number of entries in the section header table
  Elf_Half e_shstrndx;  // Section header table index of section name
                        // string table

  bool checkMagic() const {
    return (memcmp(e_ident, ELF::ElfMagic, strlen(ELF::ElfMagic))) == 0;
  }

  unsigned char getFileClass() const { return e_ident[ELF::EI_CLASS]; }
  unsigned char getDataEncoding() const { return e_ident[ELF::EI_DATA]; }
};

template <endianness TargetEndianness>
struct Elf_Phdr_Impl<ELFType<TargetEndianness, false>> {
  LLVM_ELF_IMPORT_TYPES(TargetEndianness, false)
  Elf_Word p_type;   // Type of segment
  Elf_Off p_offset;  // FileOffset where segment is located, in bytes
  Elf_Addr p_vaddr;  // Virtual Address of beginning of segment
  Elf_Addr p_paddr;  // Physical address of beginning of segment (OS-specific)
  Elf_Word p_filesz; // Num. of bytes in file image of segment (may be zero)
  Elf_Word p_memsz;  // Num. of bytes in mem image of segment (may be zero)
  Elf_Word p_flags;  // Segment flags
  Elf_Word p_align;  // Segment alignment constraint
};

template <endianness TargetEndianness>
struct Elf_Phdr_Impl<ELFType<TargetEndianness, true>> {
  LLVM_ELF_IMPORT_TYPES(TargetEndianness, true)
  Elf_Word p_type;    // Type of segment
  Elf_Word p_flags;   // Segment flags
  Elf_Off p_offset;   // FileOffset where segment is located, in bytes
  Elf_Addr p_vaddr;   // Virtual Address of beginning of segment
  Elf_Addr p_paddr;   // Physical address of beginning of segment (OS-specific)
  Elf_Xword p_filesz; // Num. of bytes in file image of segment (may be zero)
  Elf_Xword p_memsz;  // Num. of bytes in mem image of segment (may be zero)
  Elf_Xword p_align;  // Segment alignment constraint
};

// ELFT needed for endianness.
template <class ELFT>
struct Elf_Hash_Impl {
  LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
  Elf_Word nbucket;
  Elf_Word nchain;

  ArrayRef<Elf_Word> buckets() const {
    return ArrayRef<Elf_Word>(&nbucket + 2, &nbucket + 2 + nbucket);
  }

  ArrayRef<Elf_Word> chains() const {
    return ArrayRef<Elf_Word>(&nbucket + 2 + nbucket,
                              &nbucket + 2 + nbucket + nchain);
  }
};

// .gnu.hash section
template <class ELFT>
struct Elf_GnuHash_Impl {
  LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
  Elf_Word nbuckets;
  Elf_Word symndx;
  Elf_Word maskwords;
  Elf_Word shift2;

  ArrayRef<Elf_Off> filter() const {
    return ArrayRef<Elf_Off>(reinterpret_cast<const Elf_Off *>(&shift2 + 1),
                             maskwords);
  }

  ArrayRef<Elf_Word> buckets() const {
    return ArrayRef<Elf_Word>(
        reinterpret_cast<const Elf_Word *>(filter().end()), nbuckets);
  }

  ArrayRef<Elf_Word> values(unsigned DynamicSymCount) const {
    assert(DynamicSymCount >= symndx);
    return ArrayRef<Elf_Word>(buckets().end(), DynamicSymCount - symndx);
  }
};

// Compressed section headers.
// http://www.sco.com/developers/gabi/latest/ch4.sheader.html#compression_header
template <endianness TargetEndianness>
struct Elf_Chdr_Impl<ELFType<TargetEndianness, false>> {
  LLVM_ELF_IMPORT_TYPES(TargetEndianness, false)
  Elf_Word ch_type;
  Elf_Word ch_size;
  Elf_Word ch_addralign;
};

template <endianness TargetEndianness>
struct Elf_Chdr_Impl<ELFType<TargetEndianness, true>> {
  LLVM_ELF_IMPORT_TYPES(TargetEndianness, true)
  Elf_Word ch_type;
  Elf_Word ch_reserved;
  Elf_Xword ch_size;
  Elf_Xword ch_addralign;
};

/// Note header
template <class ELFT>
struct Elf_Nhdr_Impl {
  LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
  Elf_Word n_namesz;
  Elf_Word n_descsz;
  Elf_Word n_type;

  /// Get the size of the note, including name, descriptor, and padding. Both
  /// the start and the end of the descriptor are aligned by the section
  /// alignment. In practice many 64-bit systems deviate from the generic ABI by
  /// using sh_addralign=4.
  size_t getSize(size_t Align) const {
    return alignToPowerOf2(sizeof(*this) + n_namesz, Align) +
           alignToPowerOf2(n_descsz, Align);
  }
};

/// An ELF note.
///
/// Wraps a note header, providing methods for accessing the name and
/// descriptor safely.
template <class ELFT>
class Elf_Note_Impl {
  LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)

  const Elf_Nhdr_Impl<ELFT> &Nhdr;

  template <class NoteIteratorELFT> friend class Elf_Note_Iterator_Impl;

public:
  Elf_Note_Impl(const Elf_Nhdr_Impl<ELFT> &Nhdr) : Nhdr(Nhdr) {}

  /// Get the note's name, excluding the terminating null byte.
  StringRef getName() const {
    if (!Nhdr.n_namesz)
      return StringRef();
    return StringRef(reinterpret_cast<const char *>(&Nhdr) + sizeof(Nhdr),
                     Nhdr.n_namesz - 1);
  }

  /// Get the note's descriptor.
  ArrayRef<uint8_t> getDesc(size_t Align) const {
    if (!Nhdr.n_descsz)
      return ArrayRef<uint8_t>();
    return ArrayRef<uint8_t>(
        reinterpret_cast<const uint8_t *>(&Nhdr) +
            alignToPowerOf2(sizeof(Nhdr) + Nhdr.n_namesz, Align),
        Nhdr.n_descsz);
  }

  /// Get the note's descriptor as StringRef
  StringRef getDescAsStringRef(size_t Align) const {
    ArrayRef<uint8_t> Desc = getDesc(Align);
    return StringRef(reinterpret_cast<const char *>(Desc.data()), Desc.size());
  }

  /// Get the note's type.
  Elf_Word getType() const { return Nhdr.n_type; }
};

template <class ELFT> class Elf_Note_Iterator_Impl {
public:
  using iterator_category = std::forward_iterator_tag;
  using value_type = Elf_Note_Impl<ELFT>;
  using difference_type = std::ptrdiff_t;
  using pointer = value_type *;
  using reference = value_type &;

private:
  // Nhdr being a nullptr marks the end of iteration.
  const Elf_Nhdr_Impl<ELFT> *Nhdr = nullptr;
  size_t RemainingSize = 0u;
  size_t Align = 0;
  Error *Err = nullptr;

  template <class ELFFileELFT> friend class ELFFile;

  // Stop iteration and indicate an overflow.
  void stopWithOverflowError() {
    Nhdr = nullptr;
    *Err = make_error<StringError>("ELF note overflows container",
                                   object_error::parse_failed);
  }

  // Advance Nhdr by NoteSize bytes, starting from NhdrPos.
  //
  // Assumes NoteSize <= RemainingSize. Ensures Nhdr->getSize() <= RemainingSize
  // upon returning. Handles stopping iteration when reaching the end of the
  // container, either cleanly or with an overflow error.
  void advanceNhdr(const uint8_t *NhdrPos, size_t NoteSize) {
    RemainingSize -= NoteSize;
    if (RemainingSize == 0u) {
      // Ensure that if the iterator walks to the end, the error is checked
      // afterwards.
      *Err = Error::success();
      Nhdr = nullptr;
    } else if (sizeof(*Nhdr) > RemainingSize)
      stopWithOverflowError();
    else {
      Nhdr = reinterpret_cast<const Elf_Nhdr_Impl<ELFT> *>(NhdrPos + NoteSize);
      if (Nhdr->getSize(Align) > RemainingSize)
        stopWithOverflowError();
      else
        *Err = Error::success();
    }
  }

  Elf_Note_Iterator_Impl() = default;
  explicit Elf_Note_Iterator_Impl(Error &Err) : Err(&Err) {}
  Elf_Note_Iterator_Impl(const uint8_t *Start, size_t Size, size_t Align,
                         Error &Err)
      : RemainingSize(Size), Align(Align), Err(&Err) {
    consumeError(std::move(Err));
    assert(Start && "ELF note iterator starting at NULL");
    advanceNhdr(Start, 0u);
  }

public:
  Elf_Note_Iterator_Impl &operator++() {
    assert(Nhdr && "incremented ELF note end iterator");
    const uint8_t *NhdrPos = reinterpret_cast<const uint8_t *>(Nhdr);
    size_t NoteSize = Nhdr->getSize(Align);
    advanceNhdr(NhdrPos, NoteSize);
    return *this;
  }
  bool operator==(Elf_Note_Iterator_Impl Other) const {
    if (!Nhdr && Other.Err)
      (void)(bool)(*Other.Err);
    if (!Other.Nhdr && Err)
      (void)(bool)(*Err);
    return Nhdr == Other.Nhdr;
  }
  bool operator!=(Elf_Note_Iterator_Impl Other) const {
    return !(*this == Other);
  }
  Elf_Note_Impl<ELFT> operator*() const {
    assert(Nhdr && "dereferenced ELF note end iterator");
    return Elf_Note_Impl<ELFT>(*Nhdr);
  }
};

template <class ELFT> struct Elf_CGProfile_Impl {
  LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
  Elf_Xword cgp_weight;
};

// MIPS .reginfo section
template <class ELFT>
struct Elf_Mips_RegInfo;

template <support::endianness TargetEndianness>
struct Elf_Mips_RegInfo<ELFType<TargetEndianness, false>> {
  LLVM_ELF_IMPORT_TYPES(TargetEndianness, false)
  Elf_Word ri_gprmask;     // bit-mask of used general registers
  Elf_Word ri_cprmask[4];  // bit-mask of used co-processor registers
  Elf_Addr ri_gp_value;    // gp register value
};

template <support::endianness TargetEndianness>
struct Elf_Mips_RegInfo<ELFType<TargetEndianness, true>> {
  LLVM_ELF_IMPORT_TYPES(TargetEndianness, true)
  Elf_Word ri_gprmask;     // bit-mask of used general registers
  Elf_Word ri_pad;         // unused padding field
  Elf_Word ri_cprmask[4];  // bit-mask of used co-processor registers
  Elf_Addr ri_gp_value;    // gp register value
};

// .MIPS.options section
template <class ELFT> struct Elf_Mips_Options {
  LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
  uint8_t kind;     // Determines interpretation of variable part of descriptor
  uint8_t size;     // Byte size of descriptor, including this header
  Elf_Half section; // Section header index of section affected,
                    // or 0 for global options
  Elf_Word info;    // Kind-specific information

  Elf_Mips_RegInfo<ELFT> &getRegInfo() {
    assert(kind == ELF::ODK_REGINFO);
    return *reinterpret_cast<Elf_Mips_RegInfo<ELFT> *>(
        (uint8_t *)this + sizeof(Elf_Mips_Options));
  }
  const Elf_Mips_RegInfo<ELFT> &getRegInfo() const {
    return const_cast<Elf_Mips_Options *>(this)->getRegInfo();
  }
};

// .MIPS.abiflags section content
template <class ELFT> struct Elf_Mips_ABIFlags {
  LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
  Elf_Half version;  // Version of the structure
  uint8_t isa_level; // ISA level: 1-5, 32, and 64
  uint8_t isa_rev;   // ISA revision (0 for MIPS I - MIPS V)
  uint8_t gpr_size;  // General purpose registers size
  uint8_t cpr1_size; // Co-processor 1 registers size
  uint8_t cpr2_size; // Co-processor 2 registers size
  uint8_t fp_abi;    // Floating-point ABI flag
  Elf_Word isa_ext;  // Processor-specific extension
  Elf_Word ases;     // ASEs flags
  Elf_Word flags1;   // General flags
  Elf_Word flags2;   // General flags
};

// Struct representing the BBAddrMap for one function.
struct BBAddrMap {
  uint64_t Addr; // Function address
  // Struct representing the BBAddrMap information for one basic block.
  struct BBEntry {
    struct Metadata {
      bool HasReturn : 1;         // If this block ends with a return (or tail
                                  // call).
      bool HasTailCall : 1;       // If this block ends with a tail call.
      bool IsEHPad : 1;           // If this is an exception handling block.
      bool CanFallThrough : 1;    // If this block can fall through to its next.
      bool HasIndirectBranch : 1; // If this block ends with an indirect branch
                                  // (branch via a register).

      bool operator==(const Metadata &Other) const {
        return HasReturn == Other.HasReturn &&
               HasTailCall == Other.HasTailCall && IsEHPad == Other.IsEHPad &&
               CanFallThrough == Other.CanFallThrough &&
               HasIndirectBranch == Other.HasIndirectBranch;
      }

      // Encodes this struct as a uint32_t value.
      uint32_t encode() const {
        return static_cast<uint32_t>(HasReturn) |
               (static_cast<uint32_t>(HasTailCall) << 1) |
               (static_cast<uint32_t>(IsEHPad) << 2) |
               (static_cast<uint32_t>(CanFallThrough) << 3) |
               (static_cast<uint32_t>(HasIndirectBranch) << 4);
      }

      // Decodes and returns a Metadata struct from a uint32_t value.
      static Expected<Metadata> decode(uint32_t V) {
        Metadata MD{/*HasReturn=*/static_cast<bool>(V & 1),
                    /*HasTailCall=*/static_cast<bool>(V & (1 << 1)),
                    /*IsEHPad=*/static_cast<bool>(V & (1 << 2)),
                    /*CanFallThrough=*/static_cast<bool>(V & (1 << 3)),
                    /*HasIndirectBranch=*/static_cast<bool>(V & (1 << 4))};
        if (MD.encode() != V)
          return createStringError(
              std::error_code(), "invalid encoding for BBEntry::Metadata: 0x%x",
              V);
        return MD;
      }
    };

    uint32_t ID;     // Unique ID of this basic block.
    uint32_t Offset; // Offset of basic block relative to function start.
    uint32_t Size;   // Size of the basic block.
    Metadata MD;     // Metdata for this basic block.

    BBEntry(uint32_t ID, uint32_t Offset, uint32_t Size, Metadata MD)
        : ID(ID), Offset(Offset), Size(Size), MD(MD){};

    bool operator==(const BBEntry &Other) const {
      return ID == Other.ID && Offset == Other.Offset && Size == Other.Size &&
             MD == Other.MD;
    }

    bool hasReturn() const { return MD.HasReturn; }
    bool hasTailCall() const { return MD.HasTailCall; }
    bool isEHPad() const { return MD.IsEHPad; }
    bool canFallThrough() const { return MD.CanFallThrough; }
  };
  std::vector<BBEntry> BBEntries; // Basic block entries for this function.

  // Equality operator for unit testing.
  bool operator==(const BBAddrMap &Other) const {
    return Addr == Other.Addr && std::equal(BBEntries.begin(), BBEntries.end(),
                                            Other.BBEntries.begin());
  }
};

} // end namespace object.
} // end namespace llvm.

#endif // LLVM_OBJECT_ELFTYPES_H
PKhwFZVlJ,33Object/Archive.hnu�[���//===- Archive.h - ar archive file format -----------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the ar archive file format class.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJECT_ARCHIVE_H
#define LLVM_OBJECT_ARCHIVE_H

#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/fallible_iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Object/Binary.h"
#include "llvm/Support/Chrono.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MemoryBuffer.h"
#include <cassert>
#include <cstdint>
#include <memory>
#include <string>
#include <vector>

namespace llvm {
namespace object {

const char ArchiveMagic[] = "!<arch>\n";
const char ThinArchiveMagic[] = "!<thin>\n";
const char BigArchiveMagic[] = "<bigaf>\n";

class Archive;

class AbstractArchiveMemberHeader {
protected:
  AbstractArchiveMemberHeader(const Archive *Parent) : Parent(Parent){};

public:
  friend class Archive;
  virtual std::unique_ptr<AbstractArchiveMemberHeader> clone() const = 0;
  virtual ~AbstractArchiveMemberHeader() = default;

  /// Get the name without looking up long names.
  virtual Expected<StringRef> getRawName() const = 0;
  virtual StringRef getRawAccessMode() const = 0;
  virtual StringRef getRawLastModified() const = 0;
  virtual StringRef getRawUID() const = 0;
  virtual StringRef getRawGID() const = 0;

  /// Get the name looking up long names.
  virtual Expected<StringRef> getName(uint64_t Size) const = 0;
  virtual Expected<uint64_t> getSize() const = 0;
  virtual uint64_t getOffset() const = 0;

  /// Get next file member location.
  virtual Expected<const char *> getNextChildLoc() const = 0;
  virtual Expected<bool> isThin() const = 0;

  Expected<sys::fs::perms> getAccessMode() const;
  Expected<sys::TimePoint<std::chrono::seconds>> getLastModified() const;
  Expected<unsigned> getUID() const;
  Expected<unsigned> getGID() const;

  /// Returns the size in bytes of the format-defined member header of the
  /// concrete archive type.
  virtual uint64_t getSizeOf() const = 0;

  const Archive *Parent;
};

template <typename T>
class CommonArchiveMemberHeader : public AbstractArchiveMemberHeader {
public:
  CommonArchiveMemberHeader(const Archive *Parent, const T *RawHeaderPtr)
      : AbstractArchiveMemberHeader(Parent), ArMemHdr(RawHeaderPtr){};
  StringRef getRawAccessMode() const override;
  StringRef getRawLastModified() const override;
  StringRef getRawUID() const override;
  StringRef getRawGID() const override;

  uint64_t getOffset() const override;
  uint64_t getSizeOf() const override { return sizeof(T); }

  T const *ArMemHdr;
};

struct UnixArMemHdrType {
  char Name[16];
  char LastModified[12];
  char UID[6];
  char GID[6];
  char AccessMode[8];
  char Size[10]; ///< Size of data, not including header or padding.
  char Terminator[2];
};

class ArchiveMemberHeader : public CommonArchiveMemberHeader<UnixArMemHdrType> {
public:
  ArchiveMemberHeader(const Archive *Parent, const char *RawHeaderPtr,
                      uint64_t Size, Error *Err);

  std::unique_ptr<AbstractArchiveMemberHeader> clone() const override {
    return std::make_unique<ArchiveMemberHeader>(*this);
  }

  Expected<StringRef> getRawName() const override;

  Expected<StringRef> getName(uint64_t Size) const override;
  Expected<uint64_t> getSize() const override;
  Expected<const char *> getNextChildLoc() const override;
  Expected<bool> isThin() const override;
};

// File Member Header
struct BigArMemHdrType {
  char Size[20];       // File member size in decimal
  char NextOffset[20]; // Next member offset in decimal
  char PrevOffset[20]; // Previous member offset in decimal
  char LastModified[12];
  char UID[12];
  char GID[12];
  char AccessMode[12];
  char NameLen[4]; // File member name length in decimal
  union {
    char Name[2]; // Start of member name
    char Terminator[2];
  };
};

// Define file member header of AIX big archive.
class BigArchiveMemberHeader
    : public CommonArchiveMemberHeader<BigArMemHdrType> {

public:
  BigArchiveMemberHeader(Archive const *Parent, const char *RawHeaderPtr,
                         uint64_t Size, Error *Err);
  std::unique_ptr<AbstractArchiveMemberHeader> clone() const override {
    return std::make_unique<BigArchiveMemberHeader>(*this);
  }

  Expected<StringRef> getRawName() const override;
  Expected<uint64_t> getRawNameSize() const;

  Expected<StringRef> getName(uint64_t Size) const override;
  Expected<uint64_t> getSize() const override;
  Expected<const char *> getNextChildLoc() const override;
  Expected<uint64_t> getNextOffset() const;
  Expected<bool> isThin() const override { return false; }
};

class Archive : public Binary {
  virtual void anchor();

public:
  class Child {
    friend Archive;
    friend AbstractArchiveMemberHeader;

    const Archive *Parent;
    std::unique_ptr<AbstractArchiveMemberHeader> Header;
    /// Includes header but not padding byte.
    StringRef Data;
    /// Offset from Data to the start of the file.
    uint16_t StartOfFile;

    Expected<bool> isThinMember() const;

  public:
    Child(const Archive *Parent, const char *Start, Error *Err);
    Child(const Archive *Parent, StringRef Data, uint16_t StartOfFile);

    Child(const Child &C)
        : Parent(C.Parent), Data(C.Data), StartOfFile(C.StartOfFile) {
      if (C.Header)
        Header = C.Header->clone();
    }

    Child(Child &&C) {
      Parent = std::move(C.Parent);
      Header = std::move(C.Header);
      Data = C.Data;
      StartOfFile = C.StartOfFile;
    }

    Child &operator=(Child &&C) noexcept {
      if (&C == this)
        return *this;

      Parent = std::move(C.Parent);
      Header = std::move(C.Header);
      Data = C.Data;
      StartOfFile = C.StartOfFile;

      return *this;
    }

    Child &operator=(const Child &C) {
      if (&C == this)
        return *this;

      Parent = C.Parent;
      if (C.Header)
        Header = C.Header->clone();
      Data = C.Data;
      StartOfFile = C.StartOfFile;

      return *this;
    }

    bool operator==(const Child &other) const {
      assert(!Parent || !other.Parent || Parent == other.Parent);
      return Data.begin() == other.Data.begin();
    }

    const Archive *getParent() const { return Parent; }
    Expected<Child> getNext() const;

    Expected<StringRef> getName() const;
    Expected<std::string> getFullName() const;
    Expected<StringRef> getRawName() const { return Header->getRawName(); }

    Expected<sys::TimePoint<std::chrono::seconds>> getLastModified() const {
      return Header->getLastModified();
    }

    StringRef getRawLastModified() const {
      return Header->getRawLastModified();
    }

    Expected<unsigned> getUID() const { return Header->getUID(); }
    Expected<unsigned> getGID() const { return Header->getGID(); }

    Expected<sys::fs::perms> getAccessMode() const {
      return Header->getAccessMode();
    }

    /// \return the size of the archive member without the header or padding.
    Expected<uint64_t> getSize() const;
    /// \return the size in the archive header for this member.
    Expected<uint64_t> getRawSize() const;

    Expected<StringRef> getBuffer() const;
    uint64_t getChildOffset() const;
    uint64_t getDataOffset() const { return getChildOffset() + StartOfFile; }

    Expected<MemoryBufferRef> getMemoryBufferRef() const;

    Expected<std::unique_ptr<Binary>>
    getAsBinary(LLVMContext *Context = nullptr) const;
  };

  class ChildFallibleIterator {
    Child C;

  public:
    ChildFallibleIterator() : C(Child(nullptr, nullptr, nullptr)) {}
    ChildFallibleIterator(const Child &C) : C(C) {}

    const Child *operator->() const { return &C; }
    const Child &operator*() const { return C; }

    bool operator==(const ChildFallibleIterator &other) const {
      // Ignore errors here: If an error occurred during increment then getNext
      // will have been set to child_end(), and the following comparison should
      // do the right thing.
      return C == other.C;
    }

    bool operator!=(const ChildFallibleIterator &other) const {
      return !(*this == other);
    }

    Error inc() {
      auto NextChild = C.getNext();
      if (!NextChild)
        return NextChild.takeError();
      C = std::move(*NextChild);
      return Error::success();
    }
  };

  using child_iterator = fallible_iterator<ChildFallibleIterator>;

  class Symbol {
    const Archive *Parent;
    uint32_t SymbolIndex;
    uint32_t StringIndex; // Extra index to the string.

  public:
    Symbol(const Archive *p, uint32_t symi, uint32_t stri)
        : Parent(p), SymbolIndex(symi), StringIndex(stri) {}

    bool operator==(const Symbol &other) const {
      return (Parent == other.Parent) && (SymbolIndex == other.SymbolIndex);
    }

    StringRef getName() const;
    Expected<Child> getMember() const;
    Symbol getNext() const;
    bool isECSymbol() const;
  };

  class symbol_iterator {
    Symbol symbol;

  public:
    symbol_iterator(const Symbol &s) : symbol(s) {}

    const Symbol *operator->() const { return &symbol; }
    const Symbol &operator*() const { return symbol; }

    bool operator==(const symbol_iterator &other) const {
      return symbol == other.symbol;
    }

    bool operator!=(const symbol_iterator &other) const {
      return !(*this == other);
    }

    symbol_iterator &operator++() { // Preincrement
      symbol = symbol.getNext();
      return *this;
    }
  };

  Archive(MemoryBufferRef Source, Error &Err);
  static Expected<std::unique_ptr<Archive>> create(MemoryBufferRef Source);

  /// Size field is 10 decimal digits long
  static const uint64_t MaxMemberSize = 9999999999;

  enum Kind { K_GNU, K_GNU64, K_BSD, K_DARWIN, K_DARWIN64, K_COFF, K_AIXBIG };

  Kind kind() const { return (Kind)Format; }
  bool isThin() const { return IsThin; }
  static object::Archive::Kind getDefaultKindForHost();

  child_iterator child_begin(Error &Err, bool SkipInternal = true) const;
  child_iterator child_end() const;
  iterator_range<child_iterator> children(Error &Err,
                                          bool SkipInternal = true) const {
    return make_range(child_begin(Err, SkipInternal), child_end());
  }

  symbol_iterator symbol_begin() const;
  symbol_iterator symbol_end() const;
  iterator_range<symbol_iterator> symbols() const {
    return make_range(symbol_begin(), symbol_end());
  }

  Expected<iterator_range<symbol_iterator>> ec_symbols() const;

  static bool classof(Binary const *v) { return v->isArchive(); }

  // check if a symbol is in the archive
  Expected<std::optional<Child>> findSym(StringRef name) const;

  virtual bool isEmpty() const;
  bool hasSymbolTable() const;
  StringRef getSymbolTable() const { return SymbolTable; }
  StringRef getStringTable() const { return StringTable; }
  uint32_t getNumberOfSymbols() const;
  uint32_t getNumberOfECSymbols() const;
  virtual uint64_t getFirstChildOffset() const { return getArchiveMagicLen(); }

  std::vector<std::unique_ptr<MemoryBuffer>> takeThinBuffers() {
    return std::move(ThinBuffers);
  }

  std::unique_ptr<AbstractArchiveMemberHeader>
  createArchiveMemberHeader(const char *RawHeaderPtr, uint64_t Size,
                            Error *Err) const;

protected:
  uint64_t getArchiveMagicLen() const;
  void setFirstRegular(const Child &C);

  StringRef SymbolTable;
  StringRef ECSymbolTable;
  StringRef StringTable;

private:
  StringRef FirstRegularData;
  uint16_t FirstRegularStartOfFile = -1;

  unsigned Format : 3;
  unsigned IsThin : 1;
  mutable std::vector<std::unique_ptr<MemoryBuffer>> ThinBuffers;
};

class BigArchive : public Archive {
public:
  /// Fixed-Length Header.
  struct FixLenHdr {
    char Magic[sizeof(BigArchiveMagic) - 1]; ///< Big archive magic string.
    char MemOffset[20];                      ///< Offset to member table.
    char GlobSymOffset[20];                  ///< Offset to global symbol table.
    char
        GlobSym64Offset[20]; ///< Offset global symbol table for 64-bit objects.
    char FirstChildOffset[20]; ///< Offset to first archive member.
    char LastChildOffset[20];  ///< Offset to last archive member.
    char FreeOffset[20];       ///< Offset to first mem on free list.
  };

  const FixLenHdr *ArFixLenHdr;
  uint64_t FirstChildOffset = 0;
  uint64_t LastChildOffset = 0;
  std::string MergedGlobalSymtabBuf;

public:
  BigArchive(MemoryBufferRef Source, Error &Err);
  uint64_t getFirstChildOffset() const override { return FirstChildOffset; }
  uint64_t getLastChildOffset() const { return LastChildOffset; }
  bool isEmpty() const override { return getFirstChildOffset() == 0; }
};

} // end namespace object
} // end namespace llvm

#endif // LLVM_OBJECT_ARCHIVE_H
PKhwFZ;DŽ��$�$Object/WindowsResource.hnu�[���//===-- WindowsResource.h ---------------------------------------*- C++-*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===---------------------------------------------------------------------===//
//
// This file declares the .res file class.  .res files are intermediate
// products of the typical resource-compilation process on Windows.  This
// process is as follows:
//
// .rc file(s) ---(rc.exe)---> .res file(s) ---(cvtres.exe)---> COFF file
//
// .rc files are human-readable scripts that list all resources a program uses.
//
// They are compiled into .res files, which are a list of the resources in
// binary form.
//
// Finally the data stored in the .res is compiled into a COFF file, where it
// is organized in a directory tree structure for optimized access by the
// program during runtime.
//
// Ref: msdn.microsoft.com/en-us/library/windows/desktop/ms648007(v=vs.85).aspx
//
//===---------------------------------------------------------------------===//

#ifndef LLVM_OBJECT_WINDOWSRESOURCE_H
#define LLVM_OBJECT_WINDOWSRESOURCE_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/BinaryFormat/COFF.h"
#include "llvm/Object/Binary.h"
#include "llvm/Object/Error.h"
#include "llvm/Support/BinaryByteStream.h"
#include "llvm/Support/BinaryStreamReader.h"
#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Error.h"

#include <map>

namespace llvm {

class raw_ostream;
class ScopedPrinter;

namespace object {

class WindowsResource;
class ResourceSectionRef;
struct coff_resource_dir_table;

const size_t WIN_RES_MAGIC_SIZE = 16;
const size_t WIN_RES_NULL_ENTRY_SIZE = 16;
const uint32_t WIN_RES_HEADER_ALIGNMENT = 4;
const uint32_t WIN_RES_DATA_ALIGNMENT = 4;
const uint16_t WIN_RES_PURE_MOVEABLE = 0x0030;

struct WinResHeaderPrefix {
  support::ulittle32_t DataSize;
  support::ulittle32_t HeaderSize;
};

// Type and Name may each either be an integer ID or a string.  This struct is
// only used in the case where they are both IDs.
struct WinResIDs {
  uint16_t TypeFlag;
  support::ulittle16_t TypeID;
  uint16_t NameFlag;
  support::ulittle16_t NameID;

  void setType(uint16_t ID) {
    TypeFlag = 0xffff;
    TypeID = ID;
  }

  void setName(uint16_t ID) {
    NameFlag = 0xffff;
    NameID = ID;
  }
};

struct WinResHeaderSuffix {
  support::ulittle32_t DataVersion;
  support::ulittle16_t MemoryFlags;
  support::ulittle16_t Language;
  support::ulittle32_t Version;
  support::ulittle32_t Characteristics;
};

class EmptyResError : public GenericBinaryError {
public:
  EmptyResError(Twine Msg, object_error ECOverride)
      : GenericBinaryError(Msg, ECOverride) {}
};

class ResourceEntryRef {
public:
  Error moveNext(bool &End);
  bool checkTypeString() const { return IsStringType; }
  ArrayRef<UTF16> getTypeString() const { return Type; }
  uint16_t getTypeID() const { return TypeID; }
  bool checkNameString() const { return IsStringName; }
  ArrayRef<UTF16> getNameString() const { return Name; }
  uint16_t getNameID() const { return NameID; }
  uint16_t getDataVersion() const { return Suffix->DataVersion; }
  uint16_t getLanguage() const { return Suffix->Language; }
  uint16_t getMemoryFlags() const { return Suffix->MemoryFlags; }
  uint16_t getMajorVersion() const { return Suffix->Version >> 16; }
  uint16_t getMinorVersion() const { return Suffix->Version; }
  uint32_t getCharacteristics() const { return Suffix->Characteristics; }
  ArrayRef<uint8_t> getData() const { return Data; }

private:
  friend class WindowsResource;

  ResourceEntryRef(BinaryStreamRef Ref, const WindowsResource *Owner);
  Error loadNext();

  static Expected<ResourceEntryRef> create(BinaryStreamRef Ref,
                                           const WindowsResource *Owner);

  BinaryStreamReader Reader;
  const WindowsResource *Owner;
  bool IsStringType;
  ArrayRef<UTF16> Type;
  uint16_t TypeID;
  bool IsStringName;
  ArrayRef<UTF16> Name;
  uint16_t NameID;
  const WinResHeaderSuffix *Suffix = nullptr;
  ArrayRef<uint8_t> Data;
};

class WindowsResource : public Binary {
public:
  Expected<ResourceEntryRef> getHeadEntry();

  static bool classof(const Binary *V) { return V->isWinRes(); }

  static Expected<std::unique_ptr<WindowsResource>>
  createWindowsResource(MemoryBufferRef Source);

private:
  friend class ResourceEntryRef;

  WindowsResource(MemoryBufferRef Source);

  BinaryByteStream BBS;
};

class WindowsResourceParser {
public:
  class TreeNode;
  WindowsResourceParser(bool MinGW = false);
  Error parse(WindowsResource *WR, std::vector<std::string> &Duplicates);
  Error parse(ResourceSectionRef &RSR, StringRef Filename,
              std::vector<std::string> &Duplicates);
  void cleanUpManifests(std::vector<std::string> &Duplicates);
  void printTree(raw_ostream &OS) const;
  const TreeNode &getTree() const { return Root; }
  ArrayRef<std::vector<uint8_t>> getData() const { return Data; }
  ArrayRef<std::vector<UTF16>> getStringTable() const { return StringTable; }

  class TreeNode {
  public:
    template <typename T>
    using Children = std::map<T, std::unique_ptr<TreeNode>>;

    void print(ScopedPrinter &Writer, StringRef Name) const;
    uint32_t getTreeSize() const;
    uint32_t getStringIndex() const { return StringIndex; }
    uint32_t getDataIndex() const { return DataIndex; }
    uint16_t getMajorVersion() const { return MajorVersion; }
    uint16_t getMinorVersion() const { return MinorVersion; }
    uint32_t getCharacteristics() const { return Characteristics; }
    bool checkIsDataNode() const { return IsDataNode; }
    const Children<uint32_t> &getIDChildren() const { return IDChildren; }
    const Children<std::string> &getStringChildren() const {
      return StringChildren;
    }

  private:
    friend class WindowsResourceParser;

    // Index is the StringTable vector index for this node's name.
    static std::unique_ptr<TreeNode> createStringNode(uint32_t Index);
    static std::unique_ptr<TreeNode> createIDNode();
    // DataIndex is the Data vector index that the data node points at.
    static std::unique_ptr<TreeNode> createDataNode(uint16_t MajorVersion,
                                                    uint16_t MinorVersion,
                                                    uint32_t Characteristics,
                                                    uint32_t Origin,
                                                    uint32_t DataIndex);

    explicit TreeNode(uint32_t StringIndex);
    TreeNode(uint16_t MajorVersion, uint16_t MinorVersion,
             uint32_t Characteristics, uint32_t Origin, uint32_t DataIndex);

    bool addEntry(const ResourceEntryRef &Entry, uint32_t Origin,
                  std::vector<std::vector<uint8_t>> &Data,
                  std::vector<std::vector<UTF16>> &StringTable,
                  TreeNode *&Result);
    TreeNode &addTypeNode(const ResourceEntryRef &Entry,
                          std::vector<std::vector<UTF16>> &StringTable);
    TreeNode &addNameNode(const ResourceEntryRef &Entry,
                          std::vector<std::vector<UTF16>> &StringTable);
    bool addLanguageNode(const ResourceEntryRef &Entry, uint32_t Origin,
                         std::vector<std::vector<uint8_t>> &Data,
                         TreeNode *&Result);
    bool addDataChild(uint32_t ID, uint16_t MajorVersion, uint16_t MinorVersion,
                      uint32_t Characteristics, uint32_t Origin,
                      uint32_t DataIndex, TreeNode *&Result);
    TreeNode &addIDChild(uint32_t ID);
    TreeNode &addNameChild(ArrayRef<UTF16> NameRef,
                           std::vector<std::vector<UTF16>> &StringTable);
    void shiftDataIndexDown(uint32_t Index);

    bool IsDataNode = false;
    uint32_t StringIndex;
    uint32_t DataIndex;
    Children<uint32_t> IDChildren;
    Children<std::string> StringChildren;
    uint16_t MajorVersion = 0;
    uint16_t MinorVersion = 0;
    uint32_t Characteristics = 0;

    // The .res file that defined this TreeNode, for diagnostics.
    // Index into InputFilenames.
    uint32_t Origin;
  };

  struct StringOrID {
    bool IsString;
    ArrayRef<UTF16> String;
    uint32_t ID = ~0u;

    StringOrID(uint32_t ID) : IsString(false), ID(ID) {}
    StringOrID(ArrayRef<UTF16> String) : IsString(true), String(String) {}
  };

private:
  Error addChildren(TreeNode &Node, ResourceSectionRef &RSR,
                    const coff_resource_dir_table &Table, uint32_t Origin,
                    std::vector<StringOrID> &Context,
                    std::vector<std::string> &Duplicates);
  bool shouldIgnoreDuplicate(const ResourceEntryRef &Entry) const;
  bool shouldIgnoreDuplicate(const std::vector<StringOrID> &Context) const;

  TreeNode Root;
  std::vector<std::vector<uint8_t>> Data;
  std::vector<std::vector<UTF16>> StringTable;

  std::vector<std::string> InputFilenames;

  bool MinGW;
};

Expected<std::unique_ptr<MemoryBuffer>>
writeWindowsResourceCOFF(llvm::COFF::MachineTypes MachineType,
                         const WindowsResourceParser &Parser,
                         uint32_t TimeDateStamp);

void printResourceTypeName(uint16_t TypeID, raw_ostream &OS);
} // namespace object
} // namespace llvm

#endif
PKhwFZ�D�n��Object/OffloadBinary.hnu�[���//===--- Offloading.h - Utilities for handling offloading code  -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the binary format used for budingling device metadata with
// an associated device image. The data can then be stored inside a host object
// file to create a fat binary and read by the linker. This is intended to be a
// thin wrapper around the image itself. If this format becomes sufficiently
// complex it should be moved to a standard binary format like msgpack or ELF.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJECT_OFFLOADBINARY_H
#define LLVM_OBJECT_OFFLOADBINARY_H

#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Object/Binary.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/MemoryBuffer.h"
#include <memory>

namespace llvm {

namespace object {

/// The producer of the associated offloading image.
enum OffloadKind : uint16_t {
  OFK_None = 0,
  OFK_OpenMP,
  OFK_Cuda,
  OFK_HIP,
  OFK_LAST,
};

/// The type of contents the offloading image contains.
enum ImageKind : uint16_t {
  IMG_None = 0,
  IMG_Object,
  IMG_Bitcode,
  IMG_Cubin,
  IMG_Fatbinary,
  IMG_PTX,
  IMG_LAST,
};

/// A simple binary serialization of an offloading file. We use this format to
/// embed the offloading image into the host executable so it can be extracted
/// and used by the linker.
///
/// Many of these could be stored in the same section by the time the linker
/// sees it so we mark this information with a header. The version is used to
/// detect ABI stability and the size is used to find other offloading entries
/// that may exist in the same section. All offsets are given as absolute byte
/// offsets from the beginning of the file.
class OffloadBinary : public Binary {
public:
  using string_iterator = MapVector<StringRef, StringRef>::const_iterator;
  using string_iterator_range = iterator_range<string_iterator>;

  /// The current version of the binary used for backwards compatibility.
  static const uint32_t Version = 1;

  /// The offloading metadata that will be serialized to a memory buffer.
  struct OffloadingImage {
    ImageKind TheImageKind;
    OffloadKind TheOffloadKind;
    uint32_t Flags;
    MapVector<StringRef, StringRef> StringData;
    std::unique_ptr<MemoryBuffer> Image;
  };

  /// Attempt to parse the offloading binary stored in \p Data.
  static Expected<std::unique_ptr<OffloadBinary>> create(MemoryBufferRef);

  /// Serialize the contents of \p File to a binary buffer to be read later.
  static std::unique_ptr<MemoryBuffer> write(const OffloadingImage &);

  static uint64_t getAlignment() { return 8; }

  ImageKind getImageKind() const { return TheEntry->TheImageKind; }
  OffloadKind getOffloadKind() const { return TheEntry->TheOffloadKind; }
  uint32_t getVersion() const { return TheHeader->Version; }
  uint32_t getFlags() const { return TheEntry->Flags; }
  uint64_t getSize() const { return TheHeader->Size; }

  StringRef getTriple() const { return getString("triple"); }
  StringRef getArch() const { return getString("arch"); }
  StringRef getImage() const {
    return StringRef(&Buffer[TheEntry->ImageOffset], TheEntry->ImageSize);
  }

  // Iterator over all the key and value pairs in the binary.
  string_iterator_range strings() const {
    return string_iterator_range(StringData.begin(), StringData.end());
  }

  StringRef getString(StringRef Key) const { return StringData.lookup(Key); }

  static bool classof(const Binary *V) { return V->isOffloadFile(); }

  struct Header {
    uint8_t Magic[4] = {0x10, 0xFF, 0x10, 0xAD}; // 0x10FF10AD magic bytes.
    uint32_t Version = OffloadBinary::Version;   // Version identifier.
    uint64_t Size;        // Size in bytes of this entire binary.
    uint64_t EntryOffset; // Offset of the metadata entry in bytes.
    uint64_t EntrySize;   // Size of the metadata entry in bytes.
  };

  struct Entry {
    ImageKind TheImageKind;     // The kind of the image stored.
    OffloadKind TheOffloadKind; // The producer of this image.
    uint32_t Flags;             // Additional flags associated with the image.
    uint64_t StringOffset;      // Offset in bytes to the string map.
    uint64_t NumStrings;        // Number of entries in the string map.
    uint64_t ImageOffset;       // Offset in bytes of the actual binary image.
    uint64_t ImageSize;         // Size in bytes of the binary image.
  };

  struct StringEntry {
    uint64_t KeyOffset;
    uint64_t ValueOffset;
  };

private:
  OffloadBinary(MemoryBufferRef Source, const Header *TheHeader,
                const Entry *TheEntry)
      : Binary(Binary::ID_Offload, Source), Buffer(Source.getBufferStart()),
        TheHeader(TheHeader), TheEntry(TheEntry) {
    const StringEntry *StringMapBegin =
        reinterpret_cast<const StringEntry *>(&Buffer[TheEntry->StringOffset]);
    for (uint64_t I = 0, E = TheEntry->NumStrings; I != E; ++I) {
      StringRef Key = &Buffer[StringMapBegin[I].KeyOffset];
      StringData[Key] = &Buffer[StringMapBegin[I].ValueOffset];
    }
  }

  OffloadBinary(const OffloadBinary &Other) = delete;

  /// Map from keys to offsets in the binary.
  MapVector<StringRef, StringRef> StringData;
  /// Raw pointer to the MemoryBufferRef for convenience.
  const char *Buffer;
  /// Location of the header within the binary.
  const Header *TheHeader;
  /// Location of the metadata entries within the binary.
  const Entry *TheEntry;
};

/// A class to contain the binary information for a single OffloadBinary that
/// owns its memory.
class OffloadFile : public OwningBinary<OffloadBinary> {
public:
  using TargetID = std::pair<StringRef, StringRef>;

  OffloadFile(std::unique_ptr<OffloadBinary> Binary,
              std::unique_ptr<MemoryBuffer> Buffer)
      : OwningBinary<OffloadBinary>(std::move(Binary), std::move(Buffer)) {}

  /// We use the Triple and Architecture pair to group linker inputs together.
  /// This conversion function lets us use these inputs in a hash-map.
  operator TargetID() const {
    return std::make_pair(getBinary()->getTriple(), getBinary()->getArch());
  }
};

/// Extracts embedded device offloading code from a memory \p Buffer to a list
/// of \p Binaries.
Error extractOffloadBinaries(MemoryBufferRef Buffer,
                             SmallVectorImpl<OffloadFile> &Binaries);

/// Convert a string \p Name to an image kind.
ImageKind getImageKind(StringRef Name);

/// Convert an image kind to its string representation.
StringRef getImageKindName(ImageKind Name);

/// Convert a string \p Name to an offload kind.
OffloadKind getOffloadKind(StringRef Name);

/// Convert an offload kind to its string representation.
StringRef getOffloadKindName(OffloadKind Name);

} // namespace object

} // namespace llvm
#endif
PKhwFZ:���%%Object/COFFModuleDefinition.hnu�[���//===--- COFFModuleDefinition.h ---------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Windows-specific.
// A parser for the module-definition file (.def file).
// Parsed results are directly written to Config global variable.
//
// The format of module-definition files are described in this document:
// https://msdn.microsoft.com/en-us/library/28d6s79h.aspx
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJECT_COFFMODULEDEFINITION_H
#define LLVM_OBJECT_COFFMODULEDEFINITION_H

#include "llvm/BinaryFormat/COFF.h"
#include "llvm/Object/COFFImportFile.h"

namespace llvm {
namespace object {

struct COFFModuleDefinition {
  std::vector<COFFShortExport> Exports;
  std::string OutputFile;
  std::string ImportName;
  uint64_t ImageBase = 0;
  uint64_t StackReserve = 0;
  uint64_t StackCommit = 0;
  uint64_t HeapReserve = 0;
  uint64_t HeapCommit = 0;
  uint32_t MajorImageVersion = 0;
  uint32_t MinorImageVersion = 0;
  uint32_t MajorOSVersion = 0;
  uint32_t MinorOSVersion = 0;
};

Expected<COFFModuleDefinition>
parseCOFFModuleDefinition(MemoryBufferRef MB, COFF::MachineTypes Machine,
                          bool MingwDef = false, bool AddUnderscores = true);

} // End namespace object.
} // End namespace llvm.

#endif
PKhwFZ�@��>>Object/FaultMapParser.hnu�[���//===-- FaultMapParser.h - Parser for the  "FaultMaps" section --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJECT_FAULTMAPPARSER_H
#define LLVM_OBJECT_FAULTMAPPARSER_H

#include "llvm/Support/Endian.h"
#include <cassert>
#include <cstdint>

namespace llvm {

class raw_ostream;

/// A parser for the __llvm_faultmaps section generated by the FaultMaps class
/// declared in llvm/CodeGen/FaultMaps.h.  This parser is version locked with
/// with the __llvm_faultmaps section generated by the version of LLVM that
/// includes it.  No guarantees are made with respect to forward or backward
/// compatibility.
class FaultMapParser {
  using FaultMapVersionType = uint8_t;
  using Reserved0Type = uint8_t;
  using Reserved1Type = uint16_t;
  using NumFunctionsType = uint32_t;

  static const size_t FaultMapVersionOffset = 0;
  static const size_t Reserved0Offset =
      FaultMapVersionOffset + sizeof(FaultMapVersionType);
  static const size_t Reserved1Offset = Reserved0Offset + sizeof(Reserved0Type);
  static const size_t NumFunctionsOffset =
      Reserved1Offset + sizeof(Reserved1Type);
  static const size_t FunctionInfosOffset =
      NumFunctionsOffset + sizeof(NumFunctionsType);

  const uint8_t *P;
  const uint8_t *E;

  template <typename T> static T read(const uint8_t *P, const uint8_t *E) {
    assert(P + sizeof(T) <= E && "out of bounds read!");
    return support::endian::read<T, support::little, 1>(P);
  }

public:
  enum FaultKind {
    FaultingLoad = 1,
    FaultingLoadStore,
    FaultingStore,
    FaultKindMax
  };

  class FunctionFaultInfoAccessor {
    using FaultKindType = uint32_t;
    using FaultingPCOffsetType = uint32_t;
    using HandlerPCOffsetType = uint32_t;

    static const size_t FaultKindOffset = 0;
    static const size_t FaultingPCOffsetOffset =
        FaultKindOffset + sizeof(FaultKindType);
    static const size_t HandlerPCOffsetOffset =
        FaultingPCOffsetOffset + sizeof(FaultingPCOffsetType);

    const uint8_t *P;
    const uint8_t *E;

  public:
    static const size_t Size =
        HandlerPCOffsetOffset + sizeof(HandlerPCOffsetType);

    explicit FunctionFaultInfoAccessor(const uint8_t *P, const uint8_t *E)
        : P(P), E(E) {}

    FaultKindType getFaultKind() const {
      return read<FaultKindType>(P + FaultKindOffset, E);
    }

    FaultingPCOffsetType getFaultingPCOffset() const {
      return read<FaultingPCOffsetType>(P + FaultingPCOffsetOffset, E);
    }

    HandlerPCOffsetType getHandlerPCOffset() const {
      return read<HandlerPCOffsetType>(P + HandlerPCOffsetOffset, E);
    }
  };

  class FunctionInfoAccessor {
    using FunctionAddrType = uint64_t;
    using NumFaultingPCsType = uint32_t;
    using ReservedType = uint32_t;

    static const size_t FunctionAddrOffset = 0;
    static const size_t NumFaultingPCsOffset =
        FunctionAddrOffset + sizeof(FunctionAddrType);
    static const size_t ReservedOffset =
        NumFaultingPCsOffset + sizeof(NumFaultingPCsType);
    static const size_t FunctionFaultInfosOffset =
        ReservedOffset + sizeof(ReservedType);
    static const size_t FunctionInfoHeaderSize = FunctionFaultInfosOffset;

    const uint8_t *P = nullptr;
    const uint8_t *E = nullptr;

  public:
    FunctionInfoAccessor() = default;

    explicit FunctionInfoAccessor(const uint8_t *P, const uint8_t *E)
        : P(P), E(E) {}

    FunctionAddrType getFunctionAddr() const {
      return read<FunctionAddrType>(P + FunctionAddrOffset, E);
    }

    NumFaultingPCsType getNumFaultingPCs() const {
      return read<NumFaultingPCsType>(P + NumFaultingPCsOffset, E);
    }

    FunctionFaultInfoAccessor getFunctionFaultInfoAt(uint32_t Index) const {
      assert(Index < getNumFaultingPCs() && "index out of bounds!");
      const uint8_t *Begin = P + FunctionFaultInfosOffset +
                             FunctionFaultInfoAccessor::Size * Index;
      return FunctionFaultInfoAccessor(Begin, E);
    }

    FunctionInfoAccessor getNextFunctionInfo() const {
      size_t MySize = FunctionInfoHeaderSize +
                      getNumFaultingPCs() * FunctionFaultInfoAccessor::Size;

      const uint8_t *Begin = P + MySize;
      assert(Begin < E && "out of bounds!");
      return FunctionInfoAccessor(Begin, E);
    }
  };

  explicit FaultMapParser(const uint8_t *Begin, const uint8_t *End)
      : P(Begin), E(End) {}

  FaultMapVersionType getFaultMapVersion() const {
    auto Version = read<FaultMapVersionType>(P + FaultMapVersionOffset, E);
    assert(Version == 1 && "only version 1 supported!");
    return Version;
  }

  NumFunctionsType getNumFunctions() const {
    return read<NumFunctionsType>(P + NumFunctionsOffset, E);
  }

  FunctionInfoAccessor getFirstFunctionInfo() const {
    const uint8_t *Begin = P + FunctionInfosOffset;
    return FunctionInfoAccessor(Begin, E);
  }
};

raw_ostream &operator<<(raw_ostream &OS,
                        const FaultMapParser::FunctionFaultInfoAccessor &);

raw_ostream &operator<<(raw_ostream &OS,
                        const FaultMapParser::FunctionInfoAccessor &);

raw_ostream &operator<<(raw_ostream &OS, const FaultMapParser &);

} // namespace llvm

#endif
PKhwFZ< �ӊ�Object/SymbolicFile.hnu�[���//===- SymbolicFile.h - Interface that only provides symbols ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the SymbolicFile interface.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJECT_SYMBOLICFILE_H
#define LLVM_OBJECT_SYMBOLICFILE_H

#include "llvm/ADT/iterator_range.h"
#include "llvm/BinaryFormat/Magic.h"
#include "llvm/Object/Binary.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/MemoryBufferRef.h"
#include <cinttypes>
#include <cstdint>
#include <cstring>
#include <iterator>
#include <memory>

namespace llvm {

class LLVMContext;
class raw_ostream;

namespace object {

union DataRefImpl {
  // This entire union should probably be a
  // char[max(8, sizeof(uintptr_t))] and require the impl to cast.
  struct {
    uint32_t a, b;
  } d;
  uintptr_t p;

  DataRefImpl() { std::memset(this, 0, sizeof(DataRefImpl)); }
};

template <typename OStream>
OStream& operator<<(OStream &OS, const DataRefImpl &D) {
  OS << "(" << format("0x%08" PRIxPTR, D.p) << " (" << format("0x%08x", D.d.a)
     << ", " << format("0x%08x", D.d.b) << "))";
  return OS;
}

inline bool operator==(const DataRefImpl &a, const DataRefImpl &b) {
  // Check bitwise identical. This is the only legal way to compare a union w/o
  // knowing which member is in use.
  return std::memcmp(&a, &b, sizeof(DataRefImpl)) == 0;
}

inline bool operator!=(const DataRefImpl &a, const DataRefImpl &b) {
  return !operator==(a, b);
}

inline bool operator<(const DataRefImpl &a, const DataRefImpl &b) {
  // Check bitwise identical. This is the only legal way to compare a union w/o
  // knowing which member is in use.
  return std::memcmp(&a, &b, sizeof(DataRefImpl)) < 0;
}

template <class content_type> class content_iterator {
  content_type Current;

public:
  using iterator_category = std::forward_iterator_tag;
  using value_type = content_type;
  using difference_type = std::ptrdiff_t;
  using pointer = value_type *;
  using reference = value_type &;

  content_iterator(content_type symb) : Current(std::move(symb)) {}

  const content_type *operator->() const { return &Current; }

  const content_type &operator*() const { return Current; }

  bool operator==(const content_iterator &other) const {
    return Current == other.Current;
  }

  bool operator!=(const content_iterator &other) const {
    return !(*this == other);
  }

  content_iterator &operator++() { // preincrement
    Current.moveNext();
    return *this;
  }
};

class SymbolicFile;

/// This is a value type class that represents a single symbol in the list of
/// symbols in the object file.
class BasicSymbolRef {
  DataRefImpl SymbolPimpl;
  const SymbolicFile *OwningObject = nullptr;

public:
  enum Flags : unsigned {
    SF_None = 0,
    SF_Undefined = 1U << 0,      // Symbol is defined in another object file
    SF_Global = 1U << 1,         // Global symbol
    SF_Weak = 1U << 2,           // Weak symbol
    SF_Absolute = 1U << 3,       // Absolute symbol
    SF_Common = 1U << 4,         // Symbol has common linkage
    SF_Indirect = 1U << 5,       // Symbol is an alias to another symbol
    SF_Exported = 1U << 6,       // Symbol is visible to other DSOs
    SF_FormatSpecific = 1U << 7, // Specific to the object file format
                                 // (e.g. section symbols)
    SF_Thumb = 1U << 8,          // Thumb symbol in a 32-bit ARM binary
    SF_Hidden = 1U << 9,         // Symbol has hidden visibility
    SF_Const = 1U << 10,         // Symbol value is constant
    SF_Executable = 1U << 11,    // Symbol points to an executable section
                                 // (IR only)
  };

  BasicSymbolRef() = default;
  BasicSymbolRef(DataRefImpl SymbolP, const SymbolicFile *Owner);

  bool operator==(const BasicSymbolRef &Other) const;
  bool operator<(const BasicSymbolRef &Other) const;

  void moveNext();

  Error printName(raw_ostream &OS) const;

  /// Get symbol flags (bitwise OR of SymbolRef::Flags)
  Expected<uint32_t> getFlags() const;

  DataRefImpl getRawDataRefImpl() const;
  const SymbolicFile *getObject() const;
};

using basic_symbol_iterator = content_iterator<BasicSymbolRef>;

class SymbolicFile : public Binary {
public:
  SymbolicFile(unsigned int Type, MemoryBufferRef Source);
  ~SymbolicFile() override;

  // virtual interface.
  virtual void moveSymbolNext(DataRefImpl &Symb) const = 0;

  virtual Error printSymbolName(raw_ostream &OS, DataRefImpl Symb) const = 0;

  virtual Expected<uint32_t> getSymbolFlags(DataRefImpl Symb) const = 0;

  virtual basic_symbol_iterator symbol_begin() const = 0;

  virtual basic_symbol_iterator symbol_end() const = 0;

  virtual bool is64Bit() const = 0;

  // convenience wrappers.
  using basic_symbol_iterator_range = iterator_range<basic_symbol_iterator>;
  basic_symbol_iterator_range symbols() const {
    return basic_symbol_iterator_range(symbol_begin(), symbol_end());
  }

  // construction aux.
  static Expected<std::unique_ptr<SymbolicFile>>
  createSymbolicFile(MemoryBufferRef Object, llvm::file_magic Type,
                     LLVMContext *Context, bool InitContent = true);

  static Expected<std::unique_ptr<SymbolicFile>>
  createSymbolicFile(MemoryBufferRef Object) {
    return createSymbolicFile(Object, llvm::file_magic::unknown, nullptr);
  }

  static bool classof(const Binary *v) {
    return v->isSymbolic();
  }

  static bool isSymbolicFile(file_magic Type, const LLVMContext *Context);
};

inline BasicSymbolRef::BasicSymbolRef(DataRefImpl SymbolP,
                                      const SymbolicFile *Owner)
    : SymbolPimpl(SymbolP), OwningObject(Owner) {}

inline bool BasicSymbolRef::operator==(const BasicSymbolRef &Other) const {
  return SymbolPimpl == Other.SymbolPimpl;
}

inline bool BasicSymbolRef::operator<(const BasicSymbolRef &Other) const {
  return SymbolPimpl < Other.SymbolPimpl;
}

inline void BasicSymbolRef::moveNext() {
  return OwningObject->moveSymbolNext(SymbolPimpl);
}

inline Error BasicSymbolRef::printName(raw_ostream &OS) const {
  return OwningObject->printSymbolName(OS, SymbolPimpl);
}

inline Expected<uint32_t> BasicSymbolRef::getFlags() const {
  return OwningObject->getSymbolFlags(SymbolPimpl);
}

inline DataRefImpl BasicSymbolRef::getRawDataRefImpl() const {
  return SymbolPimpl;
}

inline const SymbolicFile *BasicSymbolRef::getObject() const {
  return OwningObject;
}

} // end namespace object
} // end namespace llvm

#endif // LLVM_OBJECT_SYMBOLICFILE_H
PKhwFZJ���Object/Error.hnu�[���//===- Error.h - system_error extensions for Object -------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This declares a new error_category for the Object library.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJECT_ERROR_H
#define LLVM_OBJECT_ERROR_H

#include "llvm/Support/Error.h"
#include <system_error>

namespace llvm {

class Twine;

namespace object {

const std::error_category &object_category();

enum class object_error {
  // Error code 0 is absent. Use std::error_code() instead.
  arch_not_found = 1,
  invalid_file_type,
  parse_failed,
  unexpected_eof,
  string_table_non_null_end,
  invalid_section_index,
  bitcode_section_not_found,
  invalid_symbol_index,
  section_stripped,
};

inline std::error_code make_error_code(object_error e) {
  return std::error_code(static_cast<int>(e), object_category());
}

/// Base class for all errors indicating malformed binary files.
///
/// Having a subclass for all malformed binary files allows archive-walking
/// code to skip malformed files without having to understand every possible
/// way that a binary file might be malformed.
///
/// Currently inherits from ECError for easy interoperability with
/// std::error_code, but this will be removed in the future.
class BinaryError : public ErrorInfo<BinaryError, ECError> {
  void anchor() override;
public:
  static char ID;
  BinaryError() {
    // Default to parse_failed, can be overridden with setErrorCode.
    setErrorCode(make_error_code(object_error::parse_failed));
  }
};

/// Generic binary error.
///
/// For errors that don't require their own specific sub-error (most errors)
/// this class can be used to describe the error via a string message.
class GenericBinaryError : public ErrorInfo<GenericBinaryError, BinaryError> {
public:
  static char ID;
  GenericBinaryError(const Twine &Msg);
  GenericBinaryError(const Twine &Msg, object_error ECOverride);
  const std::string &getMessage() const { return Msg; }
  void log(raw_ostream &OS) const override;
private:
  std::string Msg;
};

/// isNotObjectErrorInvalidFileType() is used when looping through the children
/// of an archive after calling getAsBinary() on the child and it returns an
/// llvm::Error.  In the cases we want to loop through the children and ignore the
/// non-objects in the archive this is used to test the error to see if an
/// error() function needs to called on the llvm::Error.
Error isNotObjectErrorInvalidFileType(llvm::Error Err);

inline Error createError(const Twine &Err) {
  return make_error<StringError>(Err, object_error::parse_failed);
}

} // end namespace object.

} // end namespace llvm.

namespace std {
template <>
struct is_error_code_enum<llvm::object::object_error> : std::true_type {};
}

#endif
PKhwFZ��I��Object/BuildID.hnu�[���//===- llvm/Object/BuildID.h - Build ID -------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file declares a library for handling Build IDs and using them to find
/// debug info.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_OBJECT_BUILDID_H
#define LLVM_DEBUGINFO_OBJECT_BUILDID_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"

namespace llvm {
namespace object {

/// A build ID in binary form.
typedef SmallVector<uint8_t, 10> BuildID;

/// A reference to a BuildID in binary form.
typedef ArrayRef<uint8_t> BuildIDRef;

class ObjectFile;

/// Parses a build ID from a hex string.
BuildID parseBuildID(StringRef Str);

/// Returns the build ID, if any, contained in the given object file.
BuildIDRef getBuildID(const ObjectFile *Obj);

/// BuildIDFetcher searches local cache directories for debug info.
class BuildIDFetcher {
public:
  BuildIDFetcher(std::vector<std::string> DebugFileDirectories)
      : DebugFileDirectories(std::move(DebugFileDirectories)) {}
  virtual ~BuildIDFetcher() = default;

  /// Returns the path to the debug file with the given build ID.
  virtual std::optional<std::string> fetch(BuildIDRef BuildID) const;

private:
  const std::vector<std::string> DebugFileDirectories;
};

} // namespace object
} // namespace llvm

#endif // LLVM_DEBUGINFO_OBJECT_BUILDID_H
PKhwFZ��Y���Object/Binary.hnu�[���//===- Binary.h - A generic binary file -------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the Binary class.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJECT_BINARY_H
#define LLVM_OBJECT_BINARY_H

#include "llvm-c/Types.h"
#include "llvm/Object/Error.h"
#include "llvm/Support/CBindingWrapping.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/TargetParser/Triple.h"
#include <memory>
#include <utility>

namespace llvm {

class LLVMContext;
class StringRef;

namespace object {

class Binary {
private:
  unsigned int TypeID;

protected:
  MemoryBufferRef Data;

  Binary(unsigned int Type, MemoryBufferRef Source);

  enum {
    ID_Archive,
    ID_MachOUniversalBinary,
    ID_COFFImportFile,
    ID_IR,            // LLVM IR
    ID_TapiUniversal, // Text-based Dynamic Library Stub file.
    ID_TapiFile,      // Text-based Dynamic Library Stub file.

    ID_Minidump,

    ID_WinRes, // Windows resource (.res) file.

    ID_Offload, // Offloading binary file.

    // Object and children.
    ID_StartObjects,
    ID_COFF,

    ID_XCOFF32, // AIX XCOFF 32-bit
    ID_XCOFF64, // AIX XCOFF 64-bit

    ID_ELF32L, // ELF 32-bit, little endian
    ID_ELF32B, // ELF 32-bit, big endian
    ID_ELF64L, // ELF 64-bit, little endian
    ID_ELF64B, // ELF 64-bit, big endian

    ID_MachO32L, // MachO 32-bit, little endian
    ID_MachO32B, // MachO 32-bit, big endian
    ID_MachO64L, // MachO 64-bit, little endian
    ID_MachO64B, // MachO 64-bit, big endian

    ID_GOFF,
    ID_Wasm,

    ID_EndObjects
  };

  static inline unsigned int getELFType(bool isLE, bool is64Bits) {
    if (isLE)
      return is64Bits ? ID_ELF64L : ID_ELF32L;
    else
      return is64Bits ? ID_ELF64B : ID_ELF32B;
  }

  static unsigned int getMachOType(bool isLE, bool is64Bits) {
    if (isLE)
      return is64Bits ? ID_MachO64L : ID_MachO32L;
    else
      return is64Bits ? ID_MachO64B : ID_MachO32B;
  }

public:
  Binary() = delete;
  Binary(const Binary &other) = delete;
  virtual ~Binary();

  virtual Error initContent() { return Error::success(); };

  StringRef getData() const;
  StringRef getFileName() const;
  MemoryBufferRef getMemoryBufferRef() const;

  // Cast methods.
  unsigned int getType() const { return TypeID; }

  // Convenience methods
  bool isObject() const {
    return TypeID > ID_StartObjects && TypeID < ID_EndObjects;
  }

  bool isSymbolic() const {
    return isIR() || isObject() || isCOFFImportFile() || isTapiFile();
  }

  bool isArchive() const { return TypeID == ID_Archive; }

  bool isMachOUniversalBinary() const {
    return TypeID == ID_MachOUniversalBinary;
  }

  bool isTapiUniversal() const { return TypeID == ID_TapiUniversal; }

  bool isELF() const {
    return TypeID >= ID_ELF32L && TypeID <= ID_ELF64B;
  }

  bool isMachO() const {
    return TypeID >= ID_MachO32L && TypeID <= ID_MachO64B;
  }

  bool isCOFF() const {
    return TypeID == ID_COFF;
  }

  bool isXCOFF() const { return TypeID == ID_XCOFF32 || TypeID == ID_XCOFF64; }

  bool isWasm() const { return TypeID == ID_Wasm; }

  bool isOffloadFile() const { return TypeID == ID_Offload; }

  bool isCOFFImportFile() const {
    return TypeID == ID_COFFImportFile;
  }

  bool isIR() const {
    return TypeID == ID_IR;
  }

  bool isGOFF() const { return TypeID == ID_GOFF; }

  bool isMinidump() const { return TypeID == ID_Minidump; }

  bool isTapiFile() const { return TypeID == ID_TapiFile; }

  bool isLittleEndian() const {
    return !(TypeID == ID_ELF32B || TypeID == ID_ELF64B ||
             TypeID == ID_MachO32B || TypeID == ID_MachO64B ||
             TypeID == ID_XCOFF32 || TypeID == ID_XCOFF64);
  }

  bool isWinRes() const { return TypeID == ID_WinRes; }

  Triple::ObjectFormatType getTripleObjectFormat() const {
    if (isCOFF())
      return Triple::COFF;
    if (isMachO())
      return Triple::MachO;
    if (isELF())
      return Triple::ELF;
    if (isGOFF())
      return Triple::GOFF;
    return Triple::UnknownObjectFormat;
  }

  static Error checkOffset(MemoryBufferRef M, uintptr_t Addr,
                           const uint64_t Size) {
    if (Addr + Size < Addr || Addr + Size < Size ||
        Addr + Size > reinterpret_cast<uintptr_t>(M.getBufferEnd()) ||
        Addr < reinterpret_cast<uintptr_t>(M.getBufferStart())) {
      return errorCodeToError(object_error::unexpected_eof);
    }
    return Error::success();
  }
};

// Create wrappers for C Binding types (see CBindingWrapping.h).
DEFINE_ISA_CONVERSION_FUNCTIONS(Binary, LLVMBinaryRef)

/// Create a Binary from Source, autodetecting the file type.
///
/// @param Source The data to create the Binary from.
Expected<std::unique_ptr<Binary>> createBinary(MemoryBufferRef Source,
                                               LLVMContext *Context = nullptr,
                                               bool InitContent = true);

template <typename T> class OwningBinary {
  std::unique_ptr<T> Bin;
  std::unique_ptr<MemoryBuffer> Buf;

public:
  OwningBinary();
  OwningBinary(std::unique_ptr<T> Bin, std::unique_ptr<MemoryBuffer> Buf);
  OwningBinary(OwningBinary<T>&& Other);
  OwningBinary<T> &operator=(OwningBinary<T> &&Other);

  std::pair<std::unique_ptr<T>, std::unique_ptr<MemoryBuffer>> takeBinary();

  T* getBinary();
  const T* getBinary() const;
};

template <typename T>
OwningBinary<T>::OwningBinary(std::unique_ptr<T> Bin,
                              std::unique_ptr<MemoryBuffer> Buf)
    : Bin(std::move(Bin)), Buf(std::move(Buf)) {}

template <typename T> OwningBinary<T>::OwningBinary() = default;

template <typename T>
OwningBinary<T>::OwningBinary(OwningBinary &&Other)
    : Bin(std::move(Other.Bin)), Buf(std::move(Other.Buf)) {}

template <typename T>
OwningBinary<T> &OwningBinary<T>::operator=(OwningBinary &&Other) {
  Bin = std::move(Other.Bin);
  Buf = std::move(Other.Buf);
  return *this;
}

template <typename T>
std::pair<std::unique_ptr<T>, std::unique_ptr<MemoryBuffer>>
OwningBinary<T>::takeBinary() {
  return std::make_pair(std::move(Bin), std::move(Buf));
}

template <typename T> T* OwningBinary<T>::getBinary() {
  return Bin.get();
}

template <typename T> const T* OwningBinary<T>::getBinary() const {
  return Bin.get();
}

Expected<OwningBinary<Binary>> createBinary(StringRef Path,
                                            LLVMContext *Context = nullptr,
                                            bool InitContent = true);

} // end namespace object

} // end namespace llvm

#endif // LLVM_OBJECT_BINARY_H
PKhwFZ%�%��Object/DXContainer.hnu�[���//===- DXContainer.h - DXContainer file implementation ----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the DXContainerFile class, which implements the ObjectFile
// interface for DXContainer files.
//
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJECT_DXCONTAINER_H
#define LLVM_OBJECT_DXCONTAINER_H

#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/BinaryFormat/DXContainer.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/MemoryBufferRef.h"
#include "llvm/TargetParser/Triple.h"
#include <variant>

namespace llvm {
namespace object {

namespace DirectX {
class PSVRuntimeInfo {

  // This class provides a view into the underlying resource array. The Resource
  // data is little-endian encoded and may not be properly aligned to read
  // directly from. The dereference operator creates a copy of the data and byte
  // swaps it as appropriate.
  struct ResourceArray {
    StringRef Data;
    uint32_t Stride; // size of each element in the list.

    ResourceArray() = default;
    ResourceArray(StringRef D, size_t S) : Data(D), Stride(S) {}

    using value_type = dxbc::PSV::v2::ResourceBindInfo;
    static constexpr uint32_t MaxStride() {
      return static_cast<uint32_t>(sizeof(value_type));
    }

    struct iterator {
      StringRef Data;
      uint32_t Stride; // size of each element in the list.
      const char *Current;

      iterator(const ResourceArray &A, const char *C)
          : Data(A.Data), Stride(A.Stride), Current(C) {}
      iterator(const iterator &) = default;

      value_type operator*() {
        // Explicitly zero the structure so that unused fields are zeroed. It is
        // up to the user to know if the fields are used by verifying the PSV
        // version.
        value_type Val = {{0, 0, 0, 0}, 0, 0};
        if (Current >= Data.end())
          return Val;
        memcpy(static_cast<void *>(&Val), Current,
               std::min(Stride, MaxStride()));
        if (sys::IsBigEndianHost)
          Val.swapBytes();
        return Val;
      }

      iterator operator++() {
        if (Current < Data.end())
          Current += Stride;
        return *this;
      }

      iterator operator++(int) {
        iterator Tmp = *this;
        ++*this;
        return Tmp;
      }

      iterator operator--() {
        if (Current > Data.begin())
          Current -= Stride;
        return *this;
      }

      iterator operator--(int) {
        iterator Tmp = *this;
        --*this;
        return Tmp;
      }

      bool operator==(const iterator I) { return I.Current == Current; }
      bool operator!=(const iterator I) { return !(*this == I); }
    };

    iterator begin() const { return iterator(*this, Data.begin()); }

    iterator end() const { return iterator(*this, Data.end()); }

    size_t size() const { return Data.size() / Stride; }
  };

  StringRef Data;
  uint32_t Size;
  using InfoStruct =
      std::variant<std::monostate, dxbc::PSV::v0::RuntimeInfo,
                   dxbc::PSV::v1::RuntimeInfo, dxbc::PSV::v2::RuntimeInfo>;
  InfoStruct BasicInfo;
  ResourceArray Resources;

public:
  PSVRuntimeInfo(StringRef D) : Data(D), Size(0) {}

  // Parsing depends on the shader kind
  Error parse(uint16_t ShaderKind);

  uint32_t getSize() const { return Size; }
  uint32_t getResourceCount() const { return Resources.size(); }
  ResourceArray getResources() const { return Resources; }

  uint32_t getVersion() const {
    return Size >= sizeof(dxbc::PSV::v2::RuntimeInfo)
               ? 2
               : (Size >= sizeof(dxbc::PSV::v1::RuntimeInfo) ? 1 : 0);
  }

  uint32_t getResourceStride() const { return Resources.Stride; }

  const InfoStruct &getInfo() const { return BasicInfo; }
};

} // namespace DirectX

class DXContainer {
public:
  using DXILData = std::pair<dxbc::ProgramHeader, const char *>;

private:
  DXContainer(MemoryBufferRef O);

  MemoryBufferRef Data;
  dxbc::Header Header;
  SmallVector<uint32_t, 4> PartOffsets;
  std::optional<DXILData> DXIL;
  std::optional<uint64_t> ShaderFlags;
  std::optional<dxbc::ShaderHash> Hash;
  std::optional<DirectX::PSVRuntimeInfo> PSVInfo;

  Error parseHeader();
  Error parsePartOffsets();
  Error parseDXILHeader(StringRef Part);
  Error parseShaderFlags(StringRef Part);
  Error parseHash(StringRef Part);
  Error parsePSVInfo(StringRef Part);
  friend class PartIterator;

public:
  // The PartIterator is a wrapper around the iterator for the PartOffsets
  // member of the DXContainer. It contains a refernce to the container, and the
  // current iterator value, as well as storage for a parsed part header.
  class PartIterator {
    const DXContainer &Container;
    SmallVectorImpl<uint32_t>::const_iterator OffsetIt;
    struct PartData {
      dxbc::PartHeader Part;
      uint32_t Offset;
      StringRef Data;
    } IteratorState;

    friend class DXContainer;

    PartIterator(const DXContainer &C,
                 SmallVectorImpl<uint32_t>::const_iterator It)
        : Container(C), OffsetIt(It) {
      if (OffsetIt == Container.PartOffsets.end())
        updateIteratorImpl(Container.PartOffsets.back());
      else
        updateIterator();
    }

    // Updates the iterator's state data. This results in copying the part
    // header into the iterator and handling any required byte swapping. This is
    // called when incrementing or decrementing the iterator.
    void updateIterator() {
      if (OffsetIt != Container.PartOffsets.end())
        updateIteratorImpl(*OffsetIt);
    }

    // Implementation for updating the iterator state based on a specified
    // offest.
    void updateIteratorImpl(const uint32_t Offset);

  public:
    PartIterator &operator++() {
      if (OffsetIt == Container.PartOffsets.end())
        return *this;
      ++OffsetIt;
      updateIterator();
      return *this;
    }

    PartIterator operator++(int) {
      PartIterator Tmp = *this;
      ++(*this);
      return Tmp;
    }

    bool operator==(const PartIterator &RHS) const {
      return OffsetIt == RHS.OffsetIt;
    }

    bool operator!=(const PartIterator &RHS) const {
      return OffsetIt != RHS.OffsetIt;
    }

    const PartData &operator*() { return IteratorState; }
    const PartData *operator->() { return &IteratorState; }
  };

  PartIterator begin() const {
    return PartIterator(*this, PartOffsets.begin());
  }

  PartIterator end() const { return PartIterator(*this, PartOffsets.end()); }

  StringRef getData() const { return Data.getBuffer(); }
  static Expected<DXContainer> create(MemoryBufferRef Object);

  const dxbc::Header &getHeader() const { return Header; }

  const std::optional<DXILData> &getDXIL() const { return DXIL; }

  std::optional<uint64_t> getShaderFlags() const { return ShaderFlags; }

  std::optional<dxbc::ShaderHash> getShaderHash() const { return Hash; }

  const std::optional<DirectX::PSVRuntimeInfo> &getPSVInfo() const {
    return PSVInfo;
  };
};

} // namespace object
} // namespace llvm

#endif // LLVM_OBJECT_DXCONTAINER_H
PKhwFZ���AAObject/ArchiveWriter.hnu�[���//===- ArchiveWriter.h - ar archive file format writer ----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Declares the writeArchive function for writing an archive file.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJECT_ARCHIVEWRITER_H
#define LLVM_OBJECT_ARCHIVEWRITER_H

#include "llvm/Object/Archive.h"

namespace llvm {

struct NewArchiveMember {
  std::unique_ptr<MemoryBuffer> Buf;
  StringRef MemberName;
  sys::TimePoint<std::chrono::seconds> ModTime;
  unsigned UID = 0, GID = 0, Perms = 0644;

  NewArchiveMember() = default;
  NewArchiveMember(MemoryBufferRef BufRef);

  // Detect the archive format from the object or bitcode file. This helps
  // assume the archive format when creating or editing archives in the case
  // one isn't explicitly set.
  object::Archive::Kind detectKindFromObject() const;

  static Expected<NewArchiveMember>
  getOldMember(const object::Archive::Child &OldMember, bool Deterministic);

  static Expected<NewArchiveMember> getFile(StringRef FileName,
                                            bool Deterministic);
};

Expected<std::string> computeArchiveRelativePath(StringRef From, StringRef To);

Error writeArchive(StringRef ArcName, ArrayRef<NewArchiveMember> NewMembers,
                   bool WriteSymtab, object::Archive::Kind Kind,
                   bool Deterministic, bool Thin,
                   std::unique_ptr<MemoryBuffer> OldArchiveBuf = nullptr,
                   bool IsEC = false);

// writeArchiveToBuffer is similar to writeArchive but returns the Archive in a
// buffer instead of writing it out to a file.
Expected<std::unique_ptr<MemoryBuffer>>
writeArchiveToBuffer(ArrayRef<NewArchiveMember> NewMembers, bool WriteSymtab,
                     object::Archive::Kind Kind, bool Deterministic, bool Thin);
}

#endif
PKhwFZ�}ľ�Object/IRObjectFile.hnu�[���//===- IRObjectFile.h - LLVM IR object file implementation ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the IRObjectFile template class.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJECT_IROBJECTFILE_H
#define LLVM_OBJECT_IROBJECTFILE_H

#include "llvm/Bitcode/BitcodeReader.h"
#include "llvm/Object/IRSymtab.h"
#include "llvm/Object/ModuleSymbolTable.h"
#include "llvm/Object/SymbolicFile.h"

namespace llvm {
class Module;

namespace object {
class ObjectFile;

class IRObjectFile : public SymbolicFile {
  std::vector<std::unique_ptr<Module>> Mods;
  ModuleSymbolTable SymTab;
  IRObjectFile(MemoryBufferRef Object,
               std::vector<std::unique_ptr<Module>> Mods);

public:
  ~IRObjectFile() override;
  void moveSymbolNext(DataRefImpl &Symb) const override;
  Error printSymbolName(raw_ostream &OS, DataRefImpl Symb) const override;
  Expected<uint32_t> getSymbolFlags(DataRefImpl Symb) const override;
  basic_symbol_iterator symbol_begin() const override;
  basic_symbol_iterator symbol_end() const override;
  bool is64Bit() const override {
    return Triple(getTargetTriple()).isArch64Bit();
  }
  StringRef getTargetTriple() const;

  static bool classof(const Binary *v) {
    return v->isIR();
  }

  using module_iterator =
      pointee_iterator<std::vector<std::unique_ptr<Module>>::const_iterator,
                       const Module>;

  module_iterator module_begin() const { return module_iterator(Mods.begin()); }
  module_iterator module_end() const { return module_iterator(Mods.end()); }

  iterator_range<module_iterator> modules() const {
    return make_range(module_begin(), module_end());
  }

  /// Finds and returns bitcode embedded in the given object file, or an
  /// error code if not found.
  static Expected<MemoryBufferRef> findBitcodeInObject(const ObjectFile &Obj);

  /// Finds and returns bitcode in the given memory buffer (which may
  /// be either a bitcode file or a native object file with embedded bitcode),
  /// or an error code if not found.
  static Expected<MemoryBufferRef>
  findBitcodeInMemBuffer(MemoryBufferRef Object);

  static Expected<std::unique_ptr<IRObjectFile>> create(MemoryBufferRef Object,
                                                        LLVMContext &Context);
};

/// The contents of a bitcode file and its irsymtab. Any underlying data
/// for the irsymtab are owned by Symtab and Strtab.
struct IRSymtabFile {
  std::vector<BitcodeModule> Mods;
  SmallVector<char, 0> Symtab, Strtab;
  irsymtab::Reader TheReader;
};

/// Reads a bitcode file, creating its irsymtab if necessary.
Expected<IRSymtabFile> readIRSymtab(MemoryBufferRef MBRef);

}

} // namespace llvm

#endif
PKhwFZZ��2�T�TObject/ObjectFile.hnu�[���//===- ObjectFile.h - File format independent object file -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares a file format independent ObjectFile class.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJECT_OBJECTFILE_H
#define LLVM_OBJECT_OBJECTFILE_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/BinaryFormat/Magic.h"
#include "llvm/BinaryFormat/Swift.h"
#include "llvm/Object/Binary.h"
#include "llvm/Object/Error.h"
#include "llvm/Object/SymbolicFile.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/MemoryBufferRef.h"
#include "llvm/TargetParser/Triple.h"
#include <cassert>
#include <cstdint>
#include <memory>

namespace llvm {

class SubtargetFeatures;

namespace object {

class COFFObjectFile;
class MachOObjectFile;
class ObjectFile;
class SectionRef;
class SymbolRef;
class symbol_iterator;
class WasmObjectFile;

using section_iterator = content_iterator<SectionRef>;

typedef std::function<bool(const SectionRef &)> SectionFilterPredicate;
/// This is a value type class that represents a single relocation in the list
/// of relocations in the object file.
class RelocationRef {
  DataRefImpl RelocationPimpl;
  const ObjectFile *OwningObject = nullptr;

public:
  RelocationRef() = default;
  RelocationRef(DataRefImpl RelocationP, const ObjectFile *Owner);

  bool operator==(const RelocationRef &Other) const;

  void moveNext();

  uint64_t getOffset() const;
  symbol_iterator getSymbol() const;
  uint64_t getType() const;

  /// Get a string that represents the type of this relocation.
  ///
  /// This is for display purposes only.
  void getTypeName(SmallVectorImpl<char> &Result) const;

  DataRefImpl getRawDataRefImpl() const;
  const ObjectFile *getObject() const;
};

using relocation_iterator = content_iterator<RelocationRef>;

/// This is a value type class that represents a single section in the list of
/// sections in the object file.
class SectionRef {
  friend class SymbolRef;

  DataRefImpl SectionPimpl;
  const ObjectFile *OwningObject = nullptr;

public:
  SectionRef() = default;
  SectionRef(DataRefImpl SectionP, const ObjectFile *Owner);

  bool operator==(const SectionRef &Other) const;
  bool operator!=(const SectionRef &Other) const;
  bool operator<(const SectionRef &Other) const;

  void moveNext();

  Expected<StringRef> getName() const;
  uint64_t getAddress() const;
  uint64_t getIndex() const;
  uint64_t getSize() const;
  Expected<StringRef> getContents() const;

  /// Get the alignment of this section.
  Align getAlignment() const;

  bool isCompressed() const;
  /// Whether this section contains instructions.
  bool isText() const;
  /// Whether this section contains data, not instructions.
  bool isData() const;
  /// Whether this section contains BSS uninitialized data.
  bool isBSS() const;
  bool isVirtual() const;
  bool isBitcode() const;
  bool isStripped() const;

  /// Whether this section will be placed in the text segment, according to the
  /// Berkeley size format. This is true if the section is allocatable, and
  /// contains either code or readonly data.
  bool isBerkeleyText() const;
  /// Whether this section will be placed in the data segment, according to the
  /// Berkeley size format. This is true if the section is allocatable and
  /// contains data (e.g. PROGBITS), but is not text.
  bool isBerkeleyData() const;

  /// Whether this section is a debug section.
  bool isDebugSection() const;

  bool containsSymbol(SymbolRef S) const;

  relocation_iterator relocation_begin() const;
  relocation_iterator relocation_end() const;
  iterator_range<relocation_iterator> relocations() const {
    return make_range(relocation_begin(), relocation_end());
  }

  /// Returns the related section if this section contains relocations. The
  /// returned section may or may not have applied its relocations.
  Expected<section_iterator> getRelocatedSection() const;

  DataRefImpl getRawDataRefImpl() const;
  const ObjectFile *getObject() const;
};

struct SectionedAddress {
  const static uint64_t UndefSection = UINT64_MAX;

  uint64_t Address = 0;
  uint64_t SectionIndex = UndefSection;
};

inline bool operator<(const SectionedAddress &LHS,
                      const SectionedAddress &RHS) {
  return std::tie(LHS.SectionIndex, LHS.Address) <
         std::tie(RHS.SectionIndex, RHS.Address);
}

inline bool operator==(const SectionedAddress &LHS,
                       const SectionedAddress &RHS) {
  return std::tie(LHS.SectionIndex, LHS.Address) ==
         std::tie(RHS.SectionIndex, RHS.Address);
}

raw_ostream &operator<<(raw_ostream &OS, const SectionedAddress &Addr);

/// This is a value type class that represents a single symbol in the list of
/// symbols in the object file.
class SymbolRef : public BasicSymbolRef {
  friend class SectionRef;

public:
  enum Type {
    ST_Unknown, // Type not specified
    ST_Other,
    ST_Data,
    ST_Debug,
    ST_File,
    ST_Function,
  };

  SymbolRef() = default;
  SymbolRef(DataRefImpl SymbolP, const ObjectFile *Owner);
  SymbolRef(const BasicSymbolRef &B) : BasicSymbolRef(B) {
    assert(isa<ObjectFile>(BasicSymbolRef::getObject()));
  }

  Expected<StringRef> getName() const;
  /// Returns the symbol virtual address (i.e. address at which it will be
  /// mapped).
  Expected<uint64_t> getAddress() const;

  /// Return the value of the symbol depending on the object this can be an
  /// offset or a virtual address.
  Expected<uint64_t> getValue() const;

  /// Get the alignment of this symbol as the actual value (not log 2).
  uint32_t getAlignment() const;
  uint64_t getCommonSize() const;
  Expected<SymbolRef::Type> getType() const;

  /// Get section this symbol is defined in reference to. Result is
  /// end_sections() if it is undefined or is an absolute symbol.
  Expected<section_iterator> getSection() const;

  const ObjectFile *getObject() const;
};

class symbol_iterator : public basic_symbol_iterator {
public:
  symbol_iterator(SymbolRef Sym) : basic_symbol_iterator(Sym) {}
  symbol_iterator(const basic_symbol_iterator &B)
      : basic_symbol_iterator(SymbolRef(B->getRawDataRefImpl(),
                                        cast<ObjectFile>(B->getObject()))) {}

  const SymbolRef *operator->() const {
    const BasicSymbolRef &P = basic_symbol_iterator::operator *();
    return static_cast<const SymbolRef*>(&P);
  }

  const SymbolRef &operator*() const {
    const BasicSymbolRef &P = basic_symbol_iterator::operator *();
    return static_cast<const SymbolRef&>(P);
  }
};

/// This class is the base class for all object file types. Concrete instances
/// of this object are created by createObjectFile, which figures out which type
/// to create.
class ObjectFile : public SymbolicFile {
  virtual void anchor();

protected:
  ObjectFile(unsigned int Type, MemoryBufferRef Source);

  const uint8_t *base() const {
    return reinterpret_cast<const uint8_t *>(Data.getBufferStart());
  }

  // These functions are for SymbolRef to call internally. The main goal of
  // this is to allow SymbolRef::SymbolPimpl to point directly to the symbol
  // entry in the memory mapped object file. SymbolPimpl cannot contain any
  // virtual functions because then it could not point into the memory mapped
  // file.
  //
  // Implementations assume that the DataRefImpl is valid and has not been
  // modified externally. It's UB otherwise.
  friend class SymbolRef;

  virtual Expected<StringRef> getSymbolName(DataRefImpl Symb) const = 0;
  Error printSymbolName(raw_ostream &OS,
                                  DataRefImpl Symb) const override;
  virtual Expected<uint64_t> getSymbolAddress(DataRefImpl Symb) const = 0;
  virtual uint64_t getSymbolValueImpl(DataRefImpl Symb) const = 0;
  virtual uint32_t getSymbolAlignment(DataRefImpl Symb) const;
  virtual uint64_t getCommonSymbolSizeImpl(DataRefImpl Symb) const = 0;
  virtual Expected<SymbolRef::Type> getSymbolType(DataRefImpl Symb) const = 0;
  virtual Expected<section_iterator>
  getSymbolSection(DataRefImpl Symb) const = 0;

  // Same as above for SectionRef.
  friend class SectionRef;

  virtual void moveSectionNext(DataRefImpl &Sec) const = 0;
  virtual Expected<StringRef> getSectionName(DataRefImpl Sec) const = 0;
  virtual uint64_t getSectionAddress(DataRefImpl Sec) const = 0;
  virtual uint64_t getSectionIndex(DataRefImpl Sec) const = 0;
  virtual uint64_t getSectionSize(DataRefImpl Sec) const = 0;
  virtual Expected<ArrayRef<uint8_t>>
  getSectionContents(DataRefImpl Sec) const = 0;
  virtual uint64_t getSectionAlignment(DataRefImpl Sec) const = 0;
  virtual bool isSectionCompressed(DataRefImpl Sec) const = 0;
  virtual bool isSectionText(DataRefImpl Sec) const = 0;
  virtual bool isSectionData(DataRefImpl Sec) const = 0;
  virtual bool isSectionBSS(DataRefImpl Sec) const = 0;
  // A section is 'virtual' if its contents aren't present in the object image.
  virtual bool isSectionVirtual(DataRefImpl Sec) const = 0;
  virtual bool isSectionBitcode(DataRefImpl Sec) const;
  virtual bool isSectionStripped(DataRefImpl Sec) const;
  virtual bool isBerkeleyText(DataRefImpl Sec) const;
  virtual bool isBerkeleyData(DataRefImpl Sec) const;
  virtual bool isDebugSection(DataRefImpl Sec) const;
  virtual relocation_iterator section_rel_begin(DataRefImpl Sec) const = 0;
  virtual relocation_iterator section_rel_end(DataRefImpl Sec) const = 0;
  virtual Expected<section_iterator> getRelocatedSection(DataRefImpl Sec) const;

  // Same as above for RelocationRef.
  friend class RelocationRef;
  virtual void moveRelocationNext(DataRefImpl &Rel) const = 0;
  virtual uint64_t getRelocationOffset(DataRefImpl Rel) const = 0;
  virtual symbol_iterator getRelocationSymbol(DataRefImpl Rel) const = 0;
  virtual uint64_t getRelocationType(DataRefImpl Rel) const = 0;
  virtual void getRelocationTypeName(DataRefImpl Rel,
                                     SmallVectorImpl<char> &Result) const = 0;

  virtual llvm::binaryformat::Swift5ReflectionSectionKind
  mapReflectionSectionNameToEnumValue(StringRef SectionName) const {
    return llvm::binaryformat::Swift5ReflectionSectionKind::unknown;
  };

  Expected<uint64_t> getSymbolValue(DataRefImpl Symb) const;

public:
  ObjectFile() = delete;
  ObjectFile(const ObjectFile &other) = delete;

  uint64_t getCommonSymbolSize(DataRefImpl Symb) const {
    Expected<uint32_t> SymbolFlagsOrErr = getSymbolFlags(Symb);
    if (!SymbolFlagsOrErr)
      // TODO: Actually report errors helpfully.
      report_fatal_error(SymbolFlagsOrErr.takeError());
    assert(*SymbolFlagsOrErr & SymbolRef::SF_Common);
    return getCommonSymbolSizeImpl(Symb);
  }

  virtual std::vector<SectionRef> dynamic_relocation_sections() const {
    return std::vector<SectionRef>();
  }

  using symbol_iterator_range = iterator_range<symbol_iterator>;
  symbol_iterator_range symbols() const {
    return symbol_iterator_range(symbol_begin(), symbol_end());
  }

  virtual section_iterator section_begin() const = 0;
  virtual section_iterator section_end() const = 0;

  using section_iterator_range = iterator_range<section_iterator>;
  section_iterator_range sections() const {
    return section_iterator_range(section_begin(), section_end());
  }

  virtual bool hasDebugInfo() const;

  /// The number of bytes used to represent an address in this object
  ///        file format.
  virtual uint8_t getBytesInAddress() const = 0;

  virtual StringRef getFileFormatName() const = 0;
  virtual Triple::ArchType getArch() const = 0;
  virtual Expected<SubtargetFeatures> getFeatures() const = 0;
  virtual std::optional<StringRef> tryGetCPUName() const {
    return std::nullopt;
  };
  virtual void setARMSubArch(Triple &TheTriple) const { }
  virtual Expected<uint64_t> getStartAddress() const {
    return errorCodeToError(object_error::parse_failed);
  };

  /// Create a triple from the data in this object file.
  Triple makeTriple() const;

  /// Maps a debug section name to a standard DWARF section name.
  virtual StringRef mapDebugSectionName(StringRef Name) const { return Name; }

  /// True if this is a relocatable object (.o/.obj).
  virtual bool isRelocatableObject() const = 0;

  /// True if the reflection section can be stripped by the linker.
  bool isReflectionSectionStrippable(
      llvm::binaryformat::Swift5ReflectionSectionKind ReflectionSectionKind)
      const;

  /// @returns Pointer to ObjectFile subclass to handle this type of object.
  /// @param ObjectPath The path to the object file. ObjectPath.isObject must
  ///        return true.
  /// Create ObjectFile from path.
  static Expected<OwningBinary<ObjectFile>>
  createObjectFile(StringRef ObjectPath);

  static Expected<std::unique_ptr<ObjectFile>>
  createObjectFile(MemoryBufferRef Object, llvm::file_magic Type,
                   bool InitContent = true);
  static Expected<std::unique_ptr<ObjectFile>>
  createObjectFile(MemoryBufferRef Object) {
    return createObjectFile(Object, llvm::file_magic::unknown);
  }

  static bool classof(const Binary *v) {
    return v->isObject();
  }

  static Expected<std::unique_ptr<COFFObjectFile>>
  createCOFFObjectFile(MemoryBufferRef Object);

  static Expected<std::unique_ptr<ObjectFile>>
  createXCOFFObjectFile(MemoryBufferRef Object, unsigned FileType);

  static Expected<std::unique_ptr<ObjectFile>>
  createELFObjectFile(MemoryBufferRef Object, bool InitContent = true);

  static Expected<std::unique_ptr<MachOObjectFile>>
  createMachOObjectFile(MemoryBufferRef Object,
                        uint32_t UniversalCputype = 0,
                        uint32_t UniversalIndex = 0);

  static Expected<std::unique_ptr<ObjectFile>>
  createGOFFObjectFile(MemoryBufferRef Object);

  static Expected<std::unique_ptr<WasmObjectFile>>
  createWasmObjectFile(MemoryBufferRef Object);
};

/// A filtered iterator for SectionRefs that skips sections based on some given
/// predicate.
class SectionFilterIterator {
public:
  SectionFilterIterator(SectionFilterPredicate Pred,
                        const section_iterator &Begin,
                        const section_iterator &End)
      : Predicate(std::move(Pred)), Iterator(Begin), End(End) {
    scanPredicate();
  }
  const SectionRef &operator*() const { return *Iterator; }
  SectionFilterIterator &operator++() {
    ++Iterator;
    scanPredicate();
    return *this;
  }
  bool operator!=(const SectionFilterIterator &Other) const {
    return Iterator != Other.Iterator;
  }

private:
  void scanPredicate() {
    while (Iterator != End && !Predicate(*Iterator)) {
      ++Iterator;
    }
  }
  SectionFilterPredicate Predicate;
  section_iterator Iterator;
  section_iterator End;
};

/// Creates an iterator range of SectionFilterIterators for a given Object and
/// predicate.
class SectionFilter {
public:
  SectionFilter(SectionFilterPredicate Pred, const ObjectFile &Obj)
      : Predicate(std::move(Pred)), Object(Obj) {}
  SectionFilterIterator begin() {
    return SectionFilterIterator(Predicate, Object.section_begin(),
                                 Object.section_end());
  }
  SectionFilterIterator end() {
    return SectionFilterIterator(Predicate, Object.section_end(),
                                 Object.section_end());
  }

private:
  SectionFilterPredicate Predicate;
  const ObjectFile &Object;
};

// Inline function definitions.
inline SymbolRef::SymbolRef(DataRefImpl SymbolP, const ObjectFile *Owner)
    : BasicSymbolRef(SymbolP, Owner) {}

inline Expected<StringRef> SymbolRef::getName() const {
  return getObject()->getSymbolName(getRawDataRefImpl());
}

inline Expected<uint64_t> SymbolRef::getAddress() const {
  return getObject()->getSymbolAddress(getRawDataRefImpl());
}

inline Expected<uint64_t> SymbolRef::getValue() const {
  return getObject()->getSymbolValue(getRawDataRefImpl());
}

inline uint32_t SymbolRef::getAlignment() const {
  return getObject()->getSymbolAlignment(getRawDataRefImpl());
}

inline uint64_t SymbolRef::getCommonSize() const {
  return getObject()->getCommonSymbolSize(getRawDataRefImpl());
}

inline Expected<section_iterator> SymbolRef::getSection() const {
  return getObject()->getSymbolSection(getRawDataRefImpl());
}

inline Expected<SymbolRef::Type> SymbolRef::getType() const {
  return getObject()->getSymbolType(getRawDataRefImpl());
}

inline const ObjectFile *SymbolRef::getObject() const {
  const SymbolicFile *O = BasicSymbolRef::getObject();
  return cast<ObjectFile>(O);
}

/// SectionRef
inline SectionRef::SectionRef(DataRefImpl SectionP,
                              const ObjectFile *Owner)
  : SectionPimpl(SectionP)
  , OwningObject(Owner) {}

inline bool SectionRef::operator==(const SectionRef &Other) const {
  return OwningObject == Other.OwningObject &&
         SectionPimpl == Other.SectionPimpl;
}

inline bool SectionRef::operator!=(const SectionRef &Other) const {
  return !(*this == Other);
}

inline bool SectionRef::operator<(const SectionRef &Other) const {
  assert(OwningObject == Other.OwningObject);
  return SectionPimpl < Other.SectionPimpl;
}

inline void SectionRef::moveNext() {
  return OwningObject->moveSectionNext(SectionPimpl);
}

inline Expected<StringRef> SectionRef::getName() const {
  return OwningObject->getSectionName(SectionPimpl);
}

inline uint64_t SectionRef::getAddress() const {
  return OwningObject->getSectionAddress(SectionPimpl);
}

inline uint64_t SectionRef::getIndex() const {
  return OwningObject->getSectionIndex(SectionPimpl);
}

inline uint64_t SectionRef::getSize() const {
  return OwningObject->getSectionSize(SectionPimpl);
}

inline Expected<StringRef> SectionRef::getContents() const {
  Expected<ArrayRef<uint8_t>> Res =
      OwningObject->getSectionContents(SectionPimpl);
  if (!Res)
    return Res.takeError();
  return StringRef(reinterpret_cast<const char *>(Res->data()), Res->size());
}

inline Align SectionRef::getAlignment() const {
  return MaybeAlign(OwningObject->getSectionAlignment(SectionPimpl))
      .valueOrOne();
}

inline bool SectionRef::isCompressed() const {
  return OwningObject->isSectionCompressed(SectionPimpl);
}

inline bool SectionRef::isText() const {
  return OwningObject->isSectionText(SectionPimpl);
}

inline bool SectionRef::isData() const {
  return OwningObject->isSectionData(SectionPimpl);
}

inline bool SectionRef::isBSS() const {
  return OwningObject->isSectionBSS(SectionPimpl);
}

inline bool SectionRef::isVirtual() const {
  return OwningObject->isSectionVirtual(SectionPimpl);
}

inline bool SectionRef::isBitcode() const {
  return OwningObject->isSectionBitcode(SectionPimpl);
}

inline bool SectionRef::isStripped() const {
  return OwningObject->isSectionStripped(SectionPimpl);
}

inline bool SectionRef::isBerkeleyText() const {
  return OwningObject->isBerkeleyText(SectionPimpl);
}

inline bool SectionRef::isBerkeleyData() const {
  return OwningObject->isBerkeleyData(SectionPimpl);
}

inline bool SectionRef::isDebugSection() const {
  return OwningObject->isDebugSection(SectionPimpl);
}

inline relocation_iterator SectionRef::relocation_begin() const {
  return OwningObject->section_rel_begin(SectionPimpl);
}

inline relocation_iterator SectionRef::relocation_end() const {
  return OwningObject->section_rel_end(SectionPimpl);
}

inline Expected<section_iterator> SectionRef::getRelocatedSection() const {
  return OwningObject->getRelocatedSection(SectionPimpl);
}

inline DataRefImpl SectionRef::getRawDataRefImpl() const {
  return SectionPimpl;
}

inline const ObjectFile *SectionRef::getObject() const {
  return OwningObject;
}

/// RelocationRef
inline RelocationRef::RelocationRef(DataRefImpl RelocationP,
                              const ObjectFile *Owner)
  : RelocationPimpl(RelocationP)
  , OwningObject(Owner) {}

inline bool RelocationRef::operator==(const RelocationRef &Other) const {
  return RelocationPimpl == Other.RelocationPimpl;
}

inline void RelocationRef::moveNext() {
  return OwningObject->moveRelocationNext(RelocationPimpl);
}

inline uint64_t RelocationRef::getOffset() const {
  return OwningObject->getRelocationOffset(RelocationPimpl);
}

inline symbol_iterator RelocationRef::getSymbol() const {
  return OwningObject->getRelocationSymbol(RelocationPimpl);
}

inline uint64_t RelocationRef::getType() const {
  return OwningObject->getRelocationType(RelocationPimpl);
}

inline void RelocationRef::getTypeName(SmallVectorImpl<char> &Result) const {
  return OwningObject->getRelocationTypeName(RelocationPimpl, Result);
}

inline DataRefImpl RelocationRef::getRawDataRefImpl() const {
  return RelocationPimpl;
}

inline const ObjectFile *RelocationRef::getObject() const {
  return OwningObject;
}

} // end namespace object

template <> struct DenseMapInfo<object::SectionRef> {
  static bool isEqual(const object::SectionRef &A,
                      const object::SectionRef &B) {
    return A == B;
  }
  static object::SectionRef getEmptyKey() {
    return object::SectionRef({}, nullptr);
  }
  static object::SectionRef getTombstoneKey() {
    object::DataRefImpl TS;
    TS.p = (uintptr_t)-1;
    return object::SectionRef(TS, nullptr);
  }
  static unsigned getHashValue(const object::SectionRef &Sec) {
    object::DataRefImpl Raw = Sec.getRawDataRefImpl();
    return hash_combine(Raw.p, Raw.d.a, Raw.d.b);
  }
};

} // end namespace llvm

#endif // LLVM_OBJECT_OBJECTFILE_H
PKhwFZv�5u>.>.Object/IRSymtab.hnu�[���//===- IRSymtab.h - data definitions for IR symbol tables -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains data definitions and a reader and builder for a symbol
// table for LLVM IR. Its purpose is to allow linkers and other consumers of
// bitcode files to efficiently read the symbol table for symbol resolution
// purposes without needing to construct a module in memory.
//
// As with most object files the symbol table has two parts: the symbol table
// itself and a string table which is referenced by the symbol table.
//
// A symbol table corresponds to a single bitcode file, which may consist of
// multiple modules, so symbol tables may likewise contain symbols for multiple
// modules.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJECT_IRSYMTAB_H
#define LLVM_OBJECT_IRSYMTAB_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/IR/Comdat.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/Object/SymbolicFile.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Error.h"
#include <cassert>
#include <cstdint>
#include <vector>

namespace llvm {

struct BitcodeFileContents;
class StringTableBuilder;

namespace irsymtab {

namespace storage {

// The data structures in this namespace define the low-level serialization
// format. Clients that just want to read a symbol table should use the
// irsymtab::Reader class.

using Word = support::ulittle32_t;

/// A reference to a string in the string table.
struct Str {
  Word Offset, Size;

  StringRef get(StringRef Strtab) const {
    return {Strtab.data() + Offset, Size};
  }
};

/// A reference to a range of objects in the symbol table.
template <typename T> struct Range {
  Word Offset, Size;

  ArrayRef<T> get(StringRef Symtab) const {
    return {reinterpret_cast<const T *>(Symtab.data() + Offset), Size};
  }
};

/// Describes the range of a particular module's symbols within the symbol
/// table.
struct Module {
  Word Begin, End;

  /// The index of the first Uncommon for this Module.
  Word UncBegin;
};

/// This is equivalent to an IR comdat.
struct Comdat {
  Str Name;

  // llvm::Comdat::SelectionKind
  Word SelectionKind;
};

/// Contains the information needed by linkers for symbol resolution, as well as
/// by the LTO implementation itself.
struct Symbol {
  /// The mangled symbol name.
  Str Name;

  /// The unmangled symbol name, or the empty string if this is not an IR
  /// symbol.
  Str IRName;

  /// The index into Header::Comdats, or -1 if not a comdat member.
  Word ComdatIndex;

  Word Flags;
  enum FlagBits {
    FB_visibility, // 2 bits
    FB_has_uncommon = FB_visibility + 2,
    FB_undefined,
    FB_weak,
    FB_common,
    FB_indirect,
    FB_used,
    FB_tls,
    FB_may_omit,
    FB_global,
    FB_format_specific,
    FB_unnamed_addr,
    FB_executable,
  };
};

/// This data structure contains rarely used symbol fields and is optionally
/// referenced by a Symbol.
struct Uncommon {
  Word CommonSize, CommonAlign;

  /// COFF-specific: the name of the symbol that a weak external resolves to
  /// if not defined.
  Str COFFWeakExternFallbackName;

  /// Specified section name, if any.
  Str SectionName;
};


struct Header {
  /// Version number of the symtab format. This number should be incremented
  /// when the format changes, but it does not need to be incremented if a
  /// change to LLVM would cause it to create a different symbol table.
  Word Version;
  enum { kCurrentVersion = 3 };

  /// The producer's version string (LLVM_VERSION_STRING " " LLVM_REVISION).
  /// Consumers should rebuild the symbol table from IR if the producer's
  /// version does not match the consumer's version due to potential differences
  /// in symbol table format, symbol enumeration order and so on.
  Str Producer;

  Range<Module> Modules;
  Range<Comdat> Comdats;
  Range<Symbol> Symbols;
  Range<Uncommon> Uncommons;

  Str TargetTriple, SourceFileName;

  /// COFF-specific: linker directives.
  Str COFFLinkerOpts;

  /// Dependent Library Specifiers
  Range<Str> DependentLibraries;
};

} // end namespace storage

/// Fills in Symtab and StrtabBuilder with a valid symbol and string table for
/// Mods.
Error build(ArrayRef<Module *> Mods, SmallVector<char, 0> &Symtab,
            StringTableBuilder &StrtabBuilder, BumpPtrAllocator &Alloc);

/// This represents a symbol that has been read from a storage::Symbol and
/// possibly a storage::Uncommon.
struct Symbol {
  // Copied from storage::Symbol.
  StringRef Name, IRName;
  int ComdatIndex;
  uint32_t Flags;

  // Copied from storage::Uncommon.
  uint32_t CommonSize, CommonAlign;
  StringRef COFFWeakExternFallbackName;
  StringRef SectionName;

  /// Returns the mangled symbol name.
  StringRef getName() const { return Name; }

  /// Returns the unmangled symbol name, or the empty string if this is not an
  /// IR symbol.
  StringRef getIRName() const { return IRName; }

  /// Returns the index into the comdat table (see Reader::getComdatTable()), or
  /// -1 if not a comdat member.
  int getComdatIndex() const { return ComdatIndex; }

  using S = storage::Symbol;

  GlobalValue::VisibilityTypes getVisibility() const {
    return GlobalValue::VisibilityTypes((Flags >> S::FB_visibility) & 3);
  }

  bool isUndefined() const { return (Flags >> S::FB_undefined) & 1; }
  bool isWeak() const { return (Flags >> S::FB_weak) & 1; }
  bool isCommon() const { return (Flags >> S::FB_common) & 1; }
  bool isIndirect() const { return (Flags >> S::FB_indirect) & 1; }
  bool isUsed() const { return (Flags >> S::FB_used) & 1; }
  bool isTLS() const { return (Flags >> S::FB_tls) & 1; }

  bool canBeOmittedFromSymbolTable() const {
    return (Flags >> S::FB_may_omit) & 1;
  }

  bool isGlobal() const { return (Flags >> S::FB_global) & 1; }
  bool isFormatSpecific() const { return (Flags >> S::FB_format_specific) & 1; }
  bool isUnnamedAddr() const { return (Flags >> S::FB_unnamed_addr) & 1; }
  bool isExecutable() const { return (Flags >> S::FB_executable) & 1; }

  uint64_t getCommonSize() const {
    assert(isCommon());
    return CommonSize;
  }

  uint32_t getCommonAlignment() const {
    assert(isCommon());
    return CommonAlign;
  }

  /// COFF-specific: for weak externals, returns the name of the symbol that is
  /// used as a fallback if the weak external remains undefined.
  StringRef getCOFFWeakExternalFallback() const {
    assert(isWeak() && isIndirect());
    return COFFWeakExternFallbackName;
  }

  StringRef getSectionName() const { return SectionName; }
};

/// This class can be used to read a Symtab and Strtab produced by
/// irsymtab::build.
class Reader {
  StringRef Symtab, Strtab;

  ArrayRef<storage::Module> Modules;
  ArrayRef<storage::Comdat> Comdats;
  ArrayRef<storage::Symbol> Symbols;
  ArrayRef<storage::Uncommon> Uncommons;
  ArrayRef<storage::Str> DependentLibraries;

  StringRef str(storage::Str S) const { return S.get(Strtab); }

  template <typename T> ArrayRef<T> range(storage::Range<T> R) const {
    return R.get(Symtab);
  }

  const storage::Header &header() const {
    return *reinterpret_cast<const storage::Header *>(Symtab.data());
  }

public:
  class SymbolRef;

  Reader() = default;
  Reader(StringRef Symtab, StringRef Strtab) : Symtab(Symtab), Strtab(Strtab) {
    Modules = range(header().Modules);
    Comdats = range(header().Comdats);
    Symbols = range(header().Symbols);
    Uncommons = range(header().Uncommons);
    DependentLibraries = range(header().DependentLibraries);
  }

  using symbol_range = iterator_range<object::content_iterator<SymbolRef>>;

  /// Returns the symbol table for the entire bitcode file.
  /// The symbols enumerated by this method are ephemeral, but they can be
  /// copied into an irsymtab::Symbol object.
  symbol_range symbols() const;

  size_t getNumModules() const { return Modules.size(); }

  /// Returns a slice of the symbol table for the I'th module in the file.
  /// The symbols enumerated by this method are ephemeral, but they can be
  /// copied into an irsymtab::Symbol object.
  symbol_range module_symbols(unsigned I) const;

  StringRef getTargetTriple() const { return str(header().TargetTriple); }

  /// Returns the source file path specified at compile time.
  StringRef getSourceFileName() const { return str(header().SourceFileName); }

  /// Returns a table with all the comdats used by this file.
  std::vector<std::pair<StringRef, llvm::Comdat::SelectionKind>>
  getComdatTable() const {
    std::vector<std::pair<StringRef, llvm::Comdat::SelectionKind>> ComdatTable;
    ComdatTable.reserve(Comdats.size());
    for (auto C : Comdats)
      ComdatTable.push_back({str(C.Name), llvm::Comdat::SelectionKind(
                                              uint32_t(C.SelectionKind))});
    return ComdatTable;
  }

  /// COFF-specific: returns linker options specified in the input file.
  StringRef getCOFFLinkerOpts() const { return str(header().COFFLinkerOpts); }

  /// Returns dependent library specifiers
  std::vector<StringRef> getDependentLibraries() const {
    std::vector<StringRef> Specifiers;
    Specifiers.reserve(DependentLibraries.size());
    for (auto S : DependentLibraries) {
      Specifiers.push_back(str(S));
    }
    return Specifiers;
  }
};

/// Ephemeral symbols produced by Reader::symbols() and
/// Reader::module_symbols().
class Reader::SymbolRef : public Symbol {
  const storage::Symbol *SymI, *SymE;
  const storage::Uncommon *UncI;
  const Reader *R;

  void read() {
    if (SymI == SymE)
      return;

    Name = R->str(SymI->Name);
    IRName = R->str(SymI->IRName);
    ComdatIndex = SymI->ComdatIndex;
    Flags = SymI->Flags;

    if (Flags & (1 << storage::Symbol::FB_has_uncommon)) {
      CommonSize = UncI->CommonSize;
      CommonAlign = UncI->CommonAlign;
      COFFWeakExternFallbackName = R->str(UncI->COFFWeakExternFallbackName);
      SectionName = R->str(UncI->SectionName);
    } else
      // Reset this field so it can be queried unconditionally for all symbols.
      SectionName = "";
  }

public:
  SymbolRef(const storage::Symbol *SymI, const storage::Symbol *SymE,
            const storage::Uncommon *UncI, const Reader *R)
      : SymI(SymI), SymE(SymE), UncI(UncI), R(R) {
    read();
  }

  void moveNext() {
    ++SymI;
    if (Flags & (1 << storage::Symbol::FB_has_uncommon))
      ++UncI;
    read();
  }

  bool operator==(const SymbolRef &Other) const { return SymI == Other.SymI; }
};

inline Reader::symbol_range Reader::symbols() const {
  return {SymbolRef(Symbols.begin(), Symbols.end(), Uncommons.begin(), this),
          SymbolRef(Symbols.end(), Symbols.end(), nullptr, this)};
}

inline Reader::symbol_range Reader::module_symbols(unsigned I) const {
  const storage::Module &M = Modules[I];
  const storage::Symbol *MBegin = Symbols.begin() + M.Begin,
                        *MEnd = Symbols.begin() + M.End;
  return {SymbolRef(MBegin, MEnd, Uncommons.begin() + M.UncBegin, this),
          SymbolRef(MEnd, MEnd, nullptr, this)};
}

/// The contents of the irsymtab in a bitcode file. Any underlying data for the
/// irsymtab are owned by Symtab and Strtab.
struct FileContents {
  SmallVector<char, 0> Symtab, Strtab;
  Reader TheReader;
};

/// Reads the contents of a bitcode file, creating its irsymtab if necessary.
Expected<FileContents> readBitcode(const BitcodeFileContents &BFC);

} // end namespace irsymtab
} // end namespace llvm

#endif // LLVM_OBJECT_IRSYMTAB_H
PKhwFZ?����5�5
Object/Wasm.hnu�[���//===- Wasm.h - Wasm object file implementation -----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the WasmObjectFile class, which implements the ObjectFile
// interface for Wasm files.
//
// See: https://github.com/WebAssembly/design/blob/main/BinaryEncoding.md
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJECT_WASM_H
#define LLVM_OBJECT_WASM_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/BinaryFormat/Wasm.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/MC/MCSymbolWasm.h"
#include "llvm/Object/Binary.h"
#include "llvm/Object/ObjectFile.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/MemoryBuffer.h"
#include <cstddef>
#include <cstdint>
#include <vector>

namespace llvm {
namespace object {

class WasmSymbol {
public:
  WasmSymbol(const wasm::WasmSymbolInfo &Info,
             const wasm::WasmGlobalType *GlobalType,
             const wasm::WasmTableType *TableType,
             const wasm::WasmSignature *Signature)
      : Info(Info), GlobalType(GlobalType), TableType(TableType),
        Signature(Signature) {}

  const wasm::WasmSymbolInfo &Info;
  const wasm::WasmGlobalType *GlobalType;
  const wasm::WasmTableType *TableType;
  const wasm::WasmSignature *Signature;

  bool isTypeFunction() const {
    return Info.Kind == wasm::WASM_SYMBOL_TYPE_FUNCTION;
  }

  bool isTypeTable() const { return Info.Kind == wasm::WASM_SYMBOL_TYPE_TABLE; }

  bool isTypeData() const { return Info.Kind == wasm::WASM_SYMBOL_TYPE_DATA; }

  bool isTypeGlobal() const {
    return Info.Kind == wasm::WASM_SYMBOL_TYPE_GLOBAL;
  }

  bool isTypeSection() const {
    return Info.Kind == wasm::WASM_SYMBOL_TYPE_SECTION;
  }

  bool isTypeTag() const { return Info.Kind == wasm::WASM_SYMBOL_TYPE_TAG; }

  bool isDefined() const { return !isUndefined(); }

  bool isUndefined() const {
    return (Info.Flags & wasm::WASM_SYMBOL_UNDEFINED) != 0;
  }

  bool isBindingWeak() const {
    return getBinding() == wasm::WASM_SYMBOL_BINDING_WEAK;
  }

  bool isBindingGlobal() const {
    return getBinding() == wasm::WASM_SYMBOL_BINDING_GLOBAL;
  }

  bool isBindingLocal() const {
    return getBinding() == wasm::WASM_SYMBOL_BINDING_LOCAL;
  }

  unsigned getBinding() const {
    return Info.Flags & wasm::WASM_SYMBOL_BINDING_MASK;
  }

  bool isHidden() const {
    return getVisibility() == wasm::WASM_SYMBOL_VISIBILITY_HIDDEN;
  }

  unsigned getVisibility() const {
    return Info.Flags & wasm::WASM_SYMBOL_VISIBILITY_MASK;
  }

  void print(raw_ostream &Out) const;

#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
  LLVM_DUMP_METHOD void dump() const;
#endif
};

struct WasmSection {
  WasmSection() = default;

  uint32_t Type = 0;
  uint32_t Offset = 0;       // Offset within the file
  StringRef Name;            // Section name (User-defined sections only)
  uint32_t Comdat = UINT32_MAX; // From the "comdat info" section
  ArrayRef<uint8_t> Content;
  std::vector<wasm::WasmRelocation> Relocations;
  // Length of the LEB encoding of the section header's size field
  std::optional<uint8_t> HeaderSecSizeEncodingLen;
};

struct WasmSegment {
  uint32_t SectionOffset;
  wasm::WasmDataSegment Data;
};

class WasmObjectFile : public ObjectFile {

public:
  WasmObjectFile(MemoryBufferRef Object, Error &Err);

  const wasm::WasmObjectHeader &getHeader() const;
  const WasmSymbol &getWasmSymbol(const DataRefImpl &Symb) const;
  const WasmSymbol &getWasmSymbol(const SymbolRef &Symbol) const;
  const WasmSection &getWasmSection(const SectionRef &Section) const;
  const wasm::WasmRelocation &getWasmRelocation(const RelocationRef &Ref) const;

  static bool classof(const Binary *v) { return v->isWasm(); }

  const wasm::WasmDylinkInfo &dylinkInfo() const { return DylinkInfo; }
  const wasm::WasmProducerInfo &getProducerInfo() const { return ProducerInfo; }
  ArrayRef<wasm::WasmFeatureEntry> getTargetFeatures() const {
    return TargetFeatures;
  }
  ArrayRef<wasm::WasmSignature> types() const { return Signatures; }
  ArrayRef<wasm::WasmImport> imports() const { return Imports; }
  ArrayRef<wasm::WasmTable> tables() const { return Tables; }
  ArrayRef<wasm::WasmLimits> memories() const { return Memories; }
  ArrayRef<wasm::WasmGlobal> globals() const { return Globals; }
  ArrayRef<wasm::WasmTag> tags() const { return Tags; }
  ArrayRef<wasm::WasmExport> exports() const { return Exports; }
  ArrayRef<WasmSymbol> syms() const { return Symbols; }
  const wasm::WasmLinkingData &linkingData() const { return LinkingData; }
  uint32_t getNumberOfSymbols() const { return Symbols.size(); }
  ArrayRef<wasm::WasmElemSegment> elements() const { return ElemSegments; }
  ArrayRef<WasmSegment> dataSegments() const { return DataSegments; }
  ArrayRef<wasm::WasmFunction> functions() const { return Functions; }
  ArrayRef<wasm::WasmDebugName> debugNames() const { return DebugNames; }
  uint32_t startFunction() const { return StartFunction; }
  uint32_t getNumImportedGlobals() const { return NumImportedGlobals; }
  uint32_t getNumImportedTables() const { return NumImportedTables; }
  uint32_t getNumImportedFunctions() const { return NumImportedFunctions; }
  uint32_t getNumImportedTags() const { return NumImportedTags; }
  uint32_t getNumSections() const { return Sections.size(); }
  void moveSymbolNext(DataRefImpl &Symb) const override;

  Expected<uint32_t> getSymbolFlags(DataRefImpl Symb) const override;

  basic_symbol_iterator symbol_begin() const override;

  basic_symbol_iterator symbol_end() const override;
  Expected<StringRef> getSymbolName(DataRefImpl Symb) const override;

  bool is64Bit() const override { return false; }

  Expected<uint64_t> getSymbolAddress(DataRefImpl Symb) const override;
  uint64_t getWasmSymbolValue(const WasmSymbol &Sym) const;
  uint64_t getSymbolValueImpl(DataRefImpl Symb) const override;
  uint32_t getSymbolAlignment(DataRefImpl Symb) const override;
  uint64_t getCommonSymbolSizeImpl(DataRefImpl Symb) const override;
  Expected<SymbolRef::Type> getSymbolType(DataRefImpl Symb) const override;
  Expected<section_iterator> getSymbolSection(DataRefImpl Symb) const override;
  uint32_t getSymbolSectionId(SymbolRef Sym) const;

  // Overrides from SectionRef.
  void moveSectionNext(DataRefImpl &Sec) const override;
  Expected<StringRef> getSectionName(DataRefImpl Sec) const override;
  uint64_t getSectionAddress(DataRefImpl Sec) const override;
  uint64_t getSectionIndex(DataRefImpl Sec) const override;
  uint64_t getSectionSize(DataRefImpl Sec) const override;
  Expected<ArrayRef<uint8_t>>
  getSectionContents(DataRefImpl Sec) const override;
  uint64_t getSectionAlignment(DataRefImpl Sec) const override;
  bool isSectionCompressed(DataRefImpl Sec) const override;
  bool isSectionText(DataRefImpl Sec) const override;
  bool isSectionData(DataRefImpl Sec) const override;
  bool isSectionBSS(DataRefImpl Sec) const override;
  bool isSectionVirtual(DataRefImpl Sec) const override;
  relocation_iterator section_rel_begin(DataRefImpl Sec) const override;
  relocation_iterator section_rel_end(DataRefImpl Sec) const override;

  // Overrides from RelocationRef.
  void moveRelocationNext(DataRefImpl &Rel) const override;
  uint64_t getRelocationOffset(DataRefImpl Rel) const override;
  symbol_iterator getRelocationSymbol(DataRefImpl Rel) const override;
  uint64_t getRelocationType(DataRefImpl Rel) const override;
  void getRelocationTypeName(DataRefImpl Rel,
                             SmallVectorImpl<char> &Result) const override;

  section_iterator section_begin() const override;
  section_iterator section_end() const override;
  uint8_t getBytesInAddress() const override;
  StringRef getFileFormatName() const override;
  Triple::ArchType getArch() const override;
  Expected<SubtargetFeatures> getFeatures() const override;
  bool isRelocatableObject() const override;
  bool isSharedObject() const;

  struct ReadContext {
    const uint8_t *Start;
    const uint8_t *Ptr;
    const uint8_t *End;
  };

private:
  bool isValidFunctionIndex(uint32_t Index) const;
  bool isDefinedFunctionIndex(uint32_t Index) const;
  bool isValidGlobalIndex(uint32_t Index) const;
  bool isValidTableNumber(uint32_t Index) const;
  bool isDefinedGlobalIndex(uint32_t Index) const;
  bool isDefinedTableNumber(uint32_t Index) const;
  bool isValidTagIndex(uint32_t Index) const;
  bool isDefinedTagIndex(uint32_t Index) const;
  bool isValidFunctionSymbol(uint32_t Index) const;
  bool isValidTableSymbol(uint32_t Index) const;
  bool isValidGlobalSymbol(uint32_t Index) const;
  bool isValidTagSymbol(uint32_t Index) const;
  bool isValidDataSymbol(uint32_t Index) const;
  bool isValidSectionSymbol(uint32_t Index) const;
  wasm::WasmFunction &getDefinedFunction(uint32_t Index);
  const wasm::WasmFunction &getDefinedFunction(uint32_t Index) const;
  wasm::WasmGlobal &getDefinedGlobal(uint32_t Index);
  wasm::WasmTag &getDefinedTag(uint32_t Index);

  const WasmSection &getWasmSection(DataRefImpl Ref) const;
  const wasm::WasmRelocation &getWasmRelocation(DataRefImpl Ref) const;
  uint32_t getSymbolSectionIdImpl(const WasmSymbol &Symb) const;

  Error parseSection(WasmSection &Sec);
  Error parseCustomSection(WasmSection &Sec, ReadContext &Ctx);

  // Standard section types
  Error parseTypeSection(ReadContext &Ctx);
  Error parseImportSection(ReadContext &Ctx);
  Error parseFunctionSection(ReadContext &Ctx);
  Error parseTableSection(ReadContext &Ctx);
  Error parseMemorySection(ReadContext &Ctx);
  Error parseTagSection(ReadContext &Ctx);
  Error parseGlobalSection(ReadContext &Ctx);
  Error parseExportSection(ReadContext &Ctx);
  Error parseStartSection(ReadContext &Ctx);
  Error parseElemSection(ReadContext &Ctx);
  Error parseCodeSection(ReadContext &Ctx);
  Error parseDataSection(ReadContext &Ctx);
  Error parseDataCountSection(ReadContext &Ctx);

  // Custom section types
  Error parseDylinkSection(ReadContext &Ctx);
  Error parseDylink0Section(ReadContext &Ctx);
  Error parseNameSection(ReadContext &Ctx);
  Error parseLinkingSection(ReadContext &Ctx);
  Error parseLinkingSectionSymtab(ReadContext &Ctx);
  Error parseLinkingSectionComdat(ReadContext &Ctx);
  Error parseProducersSection(ReadContext &Ctx);
  Error parseTargetFeaturesSection(ReadContext &Ctx);
  Error parseRelocSection(StringRef Name, ReadContext &Ctx);

  wasm::WasmObjectHeader Header;
  std::vector<WasmSection> Sections;
  wasm::WasmDylinkInfo DylinkInfo;
  wasm::WasmProducerInfo ProducerInfo;
  std::vector<wasm::WasmFeatureEntry> TargetFeatures;
  std::vector<wasm::WasmSignature> Signatures;
  std::vector<wasm::WasmTable> Tables;
  std::vector<wasm::WasmLimits> Memories;
  std::vector<wasm::WasmGlobal> Globals;
  std::vector<wasm::WasmTag> Tags;
  std::vector<wasm::WasmImport> Imports;
  std::vector<wasm::WasmExport> Exports;
  std::vector<wasm::WasmElemSegment> ElemSegments;
  std::vector<WasmSegment> DataSegments;
  std::optional<size_t> DataCount;
  std::vector<wasm::WasmFunction> Functions;
  std::vector<WasmSymbol> Symbols;
  std::vector<wasm::WasmDebugName> DebugNames;
  uint32_t StartFunction = -1;
  bool HasLinkingSection = false;
  bool HasDylinkSection = false;
  bool HasMemory64 = false;
  wasm::WasmLinkingData LinkingData;
  uint32_t NumImportedGlobals = 0;
  uint32_t NumImportedTables = 0;
  uint32_t NumImportedFunctions = 0;
  uint32_t NumImportedTags = 0;
  uint32_t CodeSection = 0;
  uint32_t DataSection = 0;
  uint32_t TagSection = 0;
  uint32_t GlobalSection = 0;
  uint32_t TableSection = 0;
};

class WasmSectionOrderChecker {
public:
  // We define orders for all core wasm sections and known custom sections.
  enum : int {
    // Sentinel, must be zero
    WASM_SEC_ORDER_NONE = 0,

    // Core sections
    WASM_SEC_ORDER_TYPE,
    WASM_SEC_ORDER_IMPORT,
    WASM_SEC_ORDER_FUNCTION,
    WASM_SEC_ORDER_TABLE,
    WASM_SEC_ORDER_MEMORY,
    WASM_SEC_ORDER_TAG,
    WASM_SEC_ORDER_GLOBAL,
    WASM_SEC_ORDER_EXPORT,
    WASM_SEC_ORDER_START,
    WASM_SEC_ORDER_ELEM,
    WASM_SEC_ORDER_DATACOUNT,
    WASM_SEC_ORDER_CODE,
    WASM_SEC_ORDER_DATA,

    // Custom sections
    // "dylink" should be the very first section in the module
    WASM_SEC_ORDER_DYLINK,
    // "linking" section requires DATA section in order to validate data symbols
    WASM_SEC_ORDER_LINKING,
    // Must come after "linking" section in order to validate reloc indexes.
    WASM_SEC_ORDER_RELOC,
    // "name" section must appear after DATA. Comes after "linking" to allow
    // symbol table to set default function name.
    WASM_SEC_ORDER_NAME,
    // "producers" section must appear after "name" section.
    WASM_SEC_ORDER_PRODUCERS,
    // "target_features" section must appear after producers section
    WASM_SEC_ORDER_TARGET_FEATURES,

    // Must be last
    WASM_NUM_SEC_ORDERS

  };

  // Sections that may or may not be present, but cannot be predecessors
  static int DisallowedPredecessors[WASM_NUM_SEC_ORDERS][WASM_NUM_SEC_ORDERS];

  bool isValidSectionOrder(unsigned ID, StringRef CustomSectionName = "");

private:
  bool Seen[WASM_NUM_SEC_ORDERS] = {}; // Sections that have been seen already

  // Returns -1 for unknown sections.
  int getSectionOrder(unsigned ID, StringRef CustomSectionName = "");
};

} // end namespace object

inline raw_ostream &operator<<(raw_ostream &OS, const object::WasmSymbol &Sym) {
  Sym.print(OS);
  return OS;
}

} // end namespace llvm

#endif // LLVM_OBJECT_WASM_H
PKhwFZy\�w��Object/Decompressor.hnu�[���//===-- Decompressor.h ------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===/

#ifndef LLVM_OBJECT_DECOMPRESSOR_H
#define LLVM_OBJECT_DECOMPRESSOR_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Compression.h"
#include "llvm/Support/Error.h"

namespace llvm {
namespace object {

/// Decompressor helps to handle decompression of compressed sections.
class Decompressor {
public:
  /// Create decompressor object.
  /// @param Name        Section name.
  /// @param Data        Section content.
  /// @param IsLE        Flag determines if Data is in little endian form.
  /// @param Is64Bit     Flag determines if object is 64 bit.
  static Expected<Decompressor> create(StringRef Name, StringRef Data,
                                       bool IsLE, bool Is64Bit);

  /// Resize the buffer and uncompress section data into it.
  /// @param Out         Destination buffer.
  template <class T> Error resizeAndDecompress(T &Out) {
    Out.resize(DecompressedSize);
    return decompress({(uint8_t *)Out.data(), (size_t)DecompressedSize});
  }

  /// Uncompress section data to raw buffer provided.
  Error decompress(MutableArrayRef<uint8_t> Output);

  /// Return memory buffer size required for decompression.
  uint64_t getDecompressedSize() { return DecompressedSize; }

private:
  Decompressor(StringRef Data);

  Error consumeCompressedHeader(bool Is64Bit, bool IsLittleEndian);

  StringRef SectionData;
  uint64_t DecompressedSize;
  DebugCompressionType CompressionType = DebugCompressionType::None;
};

} // end namespace object
} // end namespace llvm

#endif // LLVM_OBJECT_DECOMPRESSOR_H
PKhwFZ̦��

Object/MachOUniversalWriter.hnu�[���//===- MachOUniversalWriter.h - MachO universal binary writer----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Declares the Slice class and writeUniversalBinary function for writing a
// MachO universal binary file.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJECT_MACHOUNIVERSALWRITER_H
#define LLVM_OBJECT_MACHOUNIVERSALWRITER_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
#include "llvm/BinaryFormat/MachO.h"
#include "llvm/Support/Error.h"
#include <cstdint>
#include <string>

namespace llvm {
class LLVMContext;

namespace object {
class Archive;
class Binary;
class IRObjectFile;
class MachOObjectFile;

class Slice {
  const Binary *B;
  uint32_t CPUType;
  uint32_t CPUSubType;
  std::string ArchName;

  // P2Alignment field stores slice alignment values from universal
  // binaries. This is also needed to order the slices so the total
  // file size can be calculated before creating the output buffer.
  uint32_t P2Alignment;

  Slice(const IRObjectFile &IRO, uint32_t CPUType, uint32_t CPUSubType,
        std::string ArchName, uint32_t Align);

public:
  explicit Slice(const MachOObjectFile &O);

  Slice(const MachOObjectFile &O, uint32_t Align);

  /// This constructor takes pre-specified \param CPUType , \param CPUSubType ,
  /// \param ArchName , \param Align instead of inferring them from the archive
  /// members.
  Slice(const Archive &A, uint32_t CPUType, uint32_t CPUSubType,
        std::string ArchName, uint32_t Align);

  static Expected<Slice> create(const Archive &A,
                                LLVMContext *LLVMCtx = nullptr);

  static Expected<Slice> create(const IRObjectFile &IRO, uint32_t Align);

  void setP2Alignment(uint32_t Align) { P2Alignment = Align; }

  const Binary *getBinary() const { return B; }

  uint32_t getCPUType() const { return CPUType; }

  uint32_t getCPUSubType() const { return CPUSubType; }

  uint32_t getP2Alignment() const { return P2Alignment; }

  uint64_t getCPUID() const {
    return static_cast<uint64_t>(CPUType) << 32 | CPUSubType;
  }

  std::string getArchString() const {
    if (!ArchName.empty())
      return ArchName;
    return ("unknown(" + Twine(CPUType) + "," +
            Twine(CPUSubType & ~MachO::CPU_SUBTYPE_MASK) + ")")
        .str();
  }

  friend bool operator<(const Slice &Lhs, const Slice &Rhs) {
    if (Lhs.CPUType == Rhs.CPUType)
      return Lhs.CPUSubType < Rhs.CPUSubType;
    // force arm64-family to follow after all other slices for
    // compatibility with cctools lipo
    if (Lhs.CPUType == MachO::CPU_TYPE_ARM64)
      return false;
    if (Rhs.CPUType == MachO::CPU_TYPE_ARM64)
      return true;
    // Sort by alignment to minimize file size
    return Lhs.P2Alignment < Rhs.P2Alignment;
  }
};

Error writeUniversalBinary(ArrayRef<Slice> Slices, StringRef OutputFileName);

Error writeUniversalBinaryToStream(ArrayRef<Slice> Slices, raw_ostream &Out);

} // end namespace object

} // end namespace llvm

#endif // LLVM_OBJECT_MACHOUNIVERSALWRITER_H
PKhwFZ�U��Object/GOFFObjectFile.hnu�[���//===- GOFFObjectFile.h - GOFF object file implementation -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the GOFFObjectFile class.
// Record classes and derivatives are also declared and implemented.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJECT_GOFFOBJECTFILE_H
#define LLVM_OBJECT_GOFFOBJECTFILE_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/IndexedMap.h"
#include "llvm/BinaryFormat/GOFF.h"
#include "llvm/Object/ObjectFile.h"
#include "llvm/Support/ConvertEBCDIC.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/TargetParser/SubtargetFeature.h"
#include "llvm/TargetParser/Triple.h"

namespace llvm {

namespace object {

class GOFFObjectFile : public ObjectFile {
  IndexedMap<const uint8_t *> EsdPtrs; // Indexed by EsdId.

  mutable DenseMap<uint32_t, std::pair<size_t, std::unique_ptr<char[]>>>
      EsdNamesCache;

  typedef DataRefImpl SectionEntryImpl;
  // (EDID, 0)               code, r/o data section
  // (EDID,PRID)             r/w data section
  SmallVector<SectionEntryImpl, 256> SectionList;
  mutable DenseMap<uint32_t, std::string> SectionDataCache;

public:
  Expected<StringRef> getSymbolName(SymbolRef Symbol) const;

  GOFFObjectFile(MemoryBufferRef Object, Error &Err);
  static inline bool classof(const Binary *V) { return V->isGOFF(); }
  section_iterator section_begin() const override;
  section_iterator section_end() const override;

  uint8_t getBytesInAddress() const override { return 8; }

  StringRef getFileFormatName() const override { return "GOFF-SystemZ"; }

  Triple::ArchType getArch() const override { return Triple::systemz; }

  Expected<SubtargetFeatures> getFeatures() const override { return SubtargetFeatures(); }

  bool isRelocatableObject() const override { return true; }

  void moveSymbolNext(DataRefImpl &Symb) const override;
  basic_symbol_iterator symbol_begin() const override;
  basic_symbol_iterator symbol_end() const override;

  bool is64Bit() const override {
    return true;
  }

private:
  // SymbolRef.
  Expected<StringRef> getSymbolName(DataRefImpl Symb) const override;
  Expected<uint64_t> getSymbolAddress(DataRefImpl Symb) const override;
  uint64_t getSymbolValueImpl(DataRefImpl Symb) const override;
  uint64_t getCommonSymbolSizeImpl(DataRefImpl Symb) const override;
  Expected<uint32_t> getSymbolFlags(DataRefImpl Symb) const override;
  Expected<SymbolRef::Type> getSymbolType(DataRefImpl Symb) const override;
  Expected<section_iterator> getSymbolSection(DataRefImpl Symb) const override;

  const uint8_t *getSymbolEsdRecord(DataRefImpl Symb) const;
  bool isSymbolUnresolved(DataRefImpl Symb) const;
  bool isSymbolIndirect(DataRefImpl Symb) const;

  // SectionRef.
  void moveSectionNext(DataRefImpl &Sec) const override {}
  virtual Expected<StringRef> getSectionName(DataRefImpl Sec) const override {
    return StringRef();
  }
  uint64_t getSectionAddress(DataRefImpl Sec) const override { return 0; }
  uint64_t getSectionSize(DataRefImpl Sec) const override { return 0; }
  virtual Expected<ArrayRef<uint8_t>>
  getSectionContents(DataRefImpl Sec) const override {
    return ArrayRef<uint8_t>();
  }
  uint64_t getSectionIndex(DataRefImpl Sec) const override { return 0; }
  uint64_t getSectionAlignment(DataRefImpl Sec) const override { return 0; }
  bool isSectionCompressed(DataRefImpl Sec) const override { return false; }
  bool isSectionText(DataRefImpl Sec) const override { return false; }
  bool isSectionData(DataRefImpl Sec) const override { return false; }
  bool isSectionBSS(DataRefImpl Sec) const override { return false; }
  bool isSectionVirtual(DataRefImpl Sec) const override { return false; }
  relocation_iterator section_rel_begin(DataRefImpl Sec) const override {
    return relocation_iterator(RelocationRef(Sec, this));
  }
  relocation_iterator section_rel_end(DataRefImpl Sec) const override {
    return relocation_iterator(RelocationRef(Sec, this));
  }

  const uint8_t *getSectionEdEsdRecord(DataRefImpl &Sec) const;
  const uint8_t *getSectionPrEsdRecord(DataRefImpl &Sec) const;
  const uint8_t *getSectionEdEsdRecord(uint32_t SectionIndex) const;
  const uint8_t *getSectionPrEsdRecord(uint32_t SectionIndex) const;

  // RelocationRef.
  void moveRelocationNext(DataRefImpl &Rel) const override {}
  uint64_t getRelocationOffset(DataRefImpl Rel) const override { return 0; }
  symbol_iterator getRelocationSymbol(DataRefImpl Rel) const override {
    DataRefImpl Temp;
    return basic_symbol_iterator(SymbolRef(Temp, this));
  }
  uint64_t getRelocationType(DataRefImpl Rel) const override { return 0; }
  void getRelocationTypeName(DataRefImpl Rel,
                             SmallVectorImpl<char> &Result) const override {}
};

} // namespace object

} // namespace llvm

#endif
PKhwFZ(�
��Object/CVDebugRecord.hnu�[���//===- CVDebugRecord.h ------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJECT_CVDEBUGRECORD_H
#define LLVM_OBJECT_CVDEBUGRECORD_H

#include "llvm/Support/Endian.h"

namespace llvm {
namespace OMF {
struct Signature {
  enum ID : uint32_t {
    PDB70 = 0x53445352, // RSDS
    PDB20 = 0x3031424e, // NB10
    CV50 = 0x3131424e,  // NB11
    CV41 = 0x3930424e,  // NB09
  };

  support::ulittle32_t CVSignature;
  support::ulittle32_t Offset;
};
}

namespace codeview {
struct PDB70DebugInfo {
  support::ulittle32_t CVSignature;
  uint8_t Signature[16];
  support::ulittle32_t Age;
  // char PDBFileName[];
};

struct PDB20DebugInfo {
  support::ulittle32_t CVSignature;
  support::ulittle32_t Offset;
  support::ulittle32_t Signature;
  support::ulittle32_t Age;
  // char PDBFileName[];
};

union DebugInfo {
  struct OMF::Signature Signature;
  struct PDB20DebugInfo PDB20;
  struct PDB70DebugInfo PDB70;
};
}
}

#endif

PKhwFZ�g/ҷ"�"
Object/GOFF.hnu�[���//===- GOFF.h - GOFF object file implementation -----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the GOFFObjectFile class.
// Record classes and derivatives are also declared and implemented.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJECT_GOFF_H
#define LLVM_OBJECT_GOFF_H

#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/BinaryFormat/GOFF.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/raw_ostream.h"

namespace llvm {
namespace object {

/// \brief Represents a GOFF physical record.
///
/// Specifies protected member functions to manipulate the record. These should
/// be called from deriving classes to change values as that record specifies.
class Record {
public:
  static Error getContinuousData(const uint8_t *Record, uint16_t DataLength,
                                 int DataIndex, SmallString<256> &CompleteData);

  static bool isContinued(const uint8_t *Record) {
    uint8_t IsContinued;
    getBits(Record, 1, 7, 1, IsContinued);
    return IsContinued;
  }

  static bool isContinuation(const uint8_t *Record) {
    uint8_t IsContinuation;
    getBits(Record, 1, 6, 1, IsContinuation);
    return IsContinuation;
  }

protected:
  /// \brief Get bit field of specified byte.
  ///
  /// Used to pack bit fields into one byte. Fields are packed left to right.
  /// Bit index zero is the most significant bit of the byte.
  ///
  /// \param ByteIndex index of byte the field is in.
  /// \param BitIndex index of first bit of field.
  /// \param Length length of bit field.
  /// \param Value value of bit field.
  static void getBits(const uint8_t *Bytes, uint8_t ByteIndex, uint8_t BitIndex,
                      uint8_t Length, uint8_t &Value) {
    assert(ByteIndex < GOFF::RecordLength && "Byte index out of bounds!");
    assert(BitIndex < 8 && "Bit index out of bounds!");
    assert(Length + BitIndex <= 8 && "Bit length too long!");

    get<uint8_t>(Bytes, ByteIndex, Value);
    Value = (Value >> (8 - BitIndex - Length)) & ((1 << Length) - 1);
  }

  template <class T>
  static void get(const uint8_t *Bytes, uint8_t ByteIndex, T &Value) {
    assert(ByteIndex + sizeof(T) <= GOFF::RecordLength &&
           "Byte index out of bounds!");
    Value = support::endian::read<T, support::big, support::unaligned>(
        &Bytes[ByteIndex]);
  }
};

class HDRRecord : public Record {
public:
  static Error getData(const uint8_t *Record, SmallString<256> &CompleteData);

  static uint16_t getPropertyModuleLength(const uint8_t *Record) {
    uint16_t Length;
    get<uint16_t>(Record, 52, Length);
    return Length;
  }
};

class ESDRecord : public Record {
public:
  /// \brief Number of bytes for name; any more must go in continuation.
  /// This is the number of bytes that can fit into the data field of an ESD
  /// record.
  static const uint8_t ESDMaxUncontinuedNameLength = 8;

  /// \brief Maximum name length for ESD records and continuations.
  /// This is the number of bytes that can fit into the data field of an ESD
  /// record AND following continuations. This is limited fundamentally by the
  /// 16 bit SIGNED length field.
  static const uint16_t MaxNameLength = 32 * 1024;

public:
  static Error getData(const uint8_t *Record, SmallString<256> &CompleteData);

  // ESD Get routines.
  static void getSymbolType(const uint8_t *Record,
                            GOFF::ESDSymbolType &SymbolType) {
    uint8_t Value;
    get<uint8_t>(Record, 3, Value);
    SymbolType = (GOFF::ESDSymbolType)Value;
  }

  static void getEsdId(const uint8_t *Record, uint32_t &EsdId) {
    get<uint32_t>(Record, 4, EsdId);
  }

  static void getParentEsdId(const uint8_t *Record, uint32_t &EsdId) {
    get<uint32_t>(Record, 8, EsdId);
  }

  static void getOffset(const uint8_t *Record, uint32_t &Offset) {
    get<uint32_t>(Record, 16, Offset);
  }

  static void getLength(const uint8_t *Record, uint32_t &Length) {
    get<uint32_t>(Record, 24, Length);
  }

  static void getNameSpaceId(const uint8_t *Record, GOFF::ESDNameSpaceId &Id) {
    uint8_t Value;
    get<uint8_t>(Record, 40, Value);
    Id = (GOFF::ESDNameSpaceId)Value;
  }

  static void getFillBytePresent(const uint8_t *Record, bool &Present) {
    uint8_t Value;
    getBits(Record, 41, 0, 1, Value);
    Present = (bool)Value;
  }

  static void getNameMangled(const uint8_t *Record, bool &Mangled) {
    uint8_t Value;
    getBits(Record, 41, 1, 1, Value);
    Mangled = (bool)Value;
  }

  static void getRenamable(const uint8_t *Record, bool &Renamable) {
    uint8_t Value;
    getBits(Record, 41, 2, 1, Value);
    Renamable = (bool)Value;
  }

  static void getRemovable(const uint8_t *Record, bool &Removable) {
    uint8_t Value;
    getBits(Record, 41, 3, 1, Value);
    Removable = (bool)Value;
  }

  static void getFillByteValue(const uint8_t *Record, uint8_t &Fill) {
    get<uint8_t>(Record, 42, Fill);
  }

  static void getAdaEsdId(const uint8_t *Record, uint32_t &EsdId) {
    get<uint32_t>(Record, 44, EsdId);
  }

  static void getSortPriority(const uint8_t *Record, uint32_t &Priority) {
    get<uint32_t>(Record, 48, Priority);
  }

  static void getAmode(const uint8_t *Record, GOFF::ESDAmode &Amode) {
    uint8_t Value;
    get<uint8_t>(Record, 60, Value);
    Amode = (GOFF::ESDAmode)Value;
  }

  static void getRmode(const uint8_t *Record, GOFF::ESDRmode &Rmode) {
    uint8_t Value;
    get<uint8_t>(Record, 61, Value);
    Rmode = (GOFF::ESDRmode)Value;
  }

  static void getTextStyle(const uint8_t *Record, GOFF::ESDTextStyle &Style) {
    uint8_t Value;
    getBits(Record, 62, 0, 4, Value);
    Style = (GOFF::ESDTextStyle)Value;
  }

  static void getBindingAlgorithm(const uint8_t *Record,
                                  GOFF::ESDBindingAlgorithm &Algorithm) {
    uint8_t Value;
    getBits(Record, 62, 4, 4, Value);
    Algorithm = (GOFF::ESDBindingAlgorithm)Value;
  }

  static void getTaskingBehavior(const uint8_t *Record,
                                 GOFF::ESDTaskingBehavior &TaskingBehavior) {
    uint8_t Value;
    getBits(Record, 63, 0, 3, Value);
    TaskingBehavior = (GOFF::ESDTaskingBehavior)Value;
  }

  static void getReadOnly(const uint8_t *Record, bool &ReadOnly) {
    uint8_t Value;
    getBits(Record, 63, 4, 1, Value);
    ReadOnly = (bool)Value;
  }

  static void getExecutable(const uint8_t *Record,
                            GOFF::ESDExecutable &Executable) {
    uint8_t Value;
    getBits(Record, 63, 5, 3, Value);
    Executable = (GOFF::ESDExecutable)Value;
  }

  static void getDuplicateSeverity(const uint8_t *Record,
                                   GOFF::ESDDuplicateSymbolSeverity &DSS) {
    uint8_t Value;
    getBits(Record, 64, 2, 2, Value);
    DSS = (GOFF::ESDDuplicateSymbolSeverity)Value;
  }

  static void getBindingStrength(const uint8_t *Record,
                                 GOFF::ESDBindingStrength &Strength) {
    uint8_t Value;
    getBits(Record, 64, 4, 4, Value);
    Strength = (GOFF::ESDBindingStrength)Value;
  }

  static void getLoadingBehavior(const uint8_t *Record,
                                 GOFF::ESDLoadingBehavior &Behavior) {
    uint8_t Value;
    getBits(Record, 65, 0, 2, Value);
    Behavior = (GOFF::ESDLoadingBehavior)Value;
  }

  static void getIndirectReference(const uint8_t *Record, bool &Indirect) {
    uint8_t Value;
    getBits(Record, 65, 3, 1, Value);
    Indirect = (bool)Value;
  }

  static void getBindingScope(const uint8_t *Record,
                              GOFF::ESDBindingScope &Scope) {
    uint8_t Value;
    getBits(Record, 65, 4, 4, Value);
    Scope = (GOFF::ESDBindingScope)Value;
  }

  static void getLinkageType(const uint8_t *Record,
                             GOFF::ESDLinkageType &Type) {
    uint8_t Value;
    getBits(Record, 66, 2, 1, Value);
    Type = (GOFF::ESDLinkageType)Value;
  }

  static void getAlignment(const uint8_t *Record,
                           GOFF::ESDAlignment &Alignment) {
    uint8_t Value;
    getBits(Record, 66, 3, 5, Value);
    Alignment = (GOFF::ESDAlignment)Value;
  }

  static uint16_t getNameLength(const uint8_t *Record) {
    uint16_t Length;
    get<uint16_t>(Record, 70, Length);
    return Length;
  }
};

class ENDRecord : public Record {
public:
  static Error getData(const uint8_t *Record, SmallString<256> &CompleteData);

  static uint16_t getNameLength(const uint8_t *Record) {
    uint16_t Length;
    get<uint16_t>(Record, 24, Length);
    return Length;
  }
};

} // end namespace object
} // end namespace llvm

#endif
PKhwFZK�:�:�
Object/COFF.hnu�[���//===- COFF.h - COFF object file implementation -----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the COFFObjectFile class.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJECT_COFF_H
#define LLVM_OBJECT_COFF_H

#include "llvm/ADT/iterator_range.h"
#include "llvm/BinaryFormat/COFF.h"
#include "llvm/Object/Binary.h"
#include "llvm/Object/CVDebugRecord.h"
#include "llvm/Object/Error.h"
#include "llvm/Object/ObjectFile.h"
#include "llvm/Support/BinaryByteStream.h"
#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/TargetParser/SubtargetFeature.h"
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <system_error>

namespace llvm {

template <typename T> class ArrayRef;

namespace object {

class BaseRelocRef;
class DelayImportDirectoryEntryRef;
class ExportDirectoryEntryRef;
class ImportDirectoryEntryRef;
class ImportedSymbolRef;
class ResourceSectionRef;

using import_directory_iterator = content_iterator<ImportDirectoryEntryRef>;
using delay_import_directory_iterator =
    content_iterator<DelayImportDirectoryEntryRef>;
using export_directory_iterator = content_iterator<ExportDirectoryEntryRef>;
using imported_symbol_iterator = content_iterator<ImportedSymbolRef>;
using base_reloc_iterator = content_iterator<BaseRelocRef>;

/// The DOS compatible header at the front of all PE/COFF executables.
struct dos_header {
  char                 Magic[2];
  support::ulittle16_t UsedBytesInTheLastPage;
  support::ulittle16_t FileSizeInPages;
  support::ulittle16_t NumberOfRelocationItems;
  support::ulittle16_t HeaderSizeInParagraphs;
  support::ulittle16_t MinimumExtraParagraphs;
  support::ulittle16_t MaximumExtraParagraphs;
  support::ulittle16_t InitialRelativeSS;
  support::ulittle16_t InitialSP;
  support::ulittle16_t Checksum;
  support::ulittle16_t InitialIP;
  support::ulittle16_t InitialRelativeCS;
  support::ulittle16_t AddressOfRelocationTable;
  support::ulittle16_t OverlayNumber;
  support::ulittle16_t Reserved[4];
  support::ulittle16_t OEMid;
  support::ulittle16_t OEMinfo;
  support::ulittle16_t Reserved2[10];
  support::ulittle32_t AddressOfNewExeHeader;
};

struct coff_file_header {
  support::ulittle16_t Machine;
  support::ulittle16_t NumberOfSections;
  support::ulittle32_t TimeDateStamp;
  support::ulittle32_t PointerToSymbolTable;
  support::ulittle32_t NumberOfSymbols;
  support::ulittle16_t SizeOfOptionalHeader;
  support::ulittle16_t Characteristics;

  bool isImportLibrary() const { return NumberOfSections == 0xffff; }
};

struct coff_bigobj_file_header {
  support::ulittle16_t Sig1;
  support::ulittle16_t Sig2;
  support::ulittle16_t Version;
  support::ulittle16_t Machine;
  support::ulittle32_t TimeDateStamp;
  uint8_t              UUID[16];
  support::ulittle32_t unused1;
  support::ulittle32_t unused2;
  support::ulittle32_t unused3;
  support::ulittle32_t unused4;
  support::ulittle32_t NumberOfSections;
  support::ulittle32_t PointerToSymbolTable;
  support::ulittle32_t NumberOfSymbols;
};

/// The 32-bit PE header that follows the COFF header.
struct pe32_header {
  support::ulittle16_t Magic;
  uint8_t MajorLinkerVersion;
  uint8_t MinorLinkerVersion;
  support::ulittle32_t SizeOfCode;
  support::ulittle32_t SizeOfInitializedData;
  support::ulittle32_t SizeOfUninitializedData;
  support::ulittle32_t AddressOfEntryPoint;
  support::ulittle32_t BaseOfCode;
  support::ulittle32_t BaseOfData;
  support::ulittle32_t ImageBase;
  support::ulittle32_t SectionAlignment;
  support::ulittle32_t FileAlignment;
  support::ulittle16_t MajorOperatingSystemVersion;
  support::ulittle16_t MinorOperatingSystemVersion;
  support::ulittle16_t MajorImageVersion;
  support::ulittle16_t MinorImageVersion;
  support::ulittle16_t MajorSubsystemVersion;
  support::ulittle16_t MinorSubsystemVersion;
  support::ulittle32_t Win32VersionValue;
  support::ulittle32_t SizeOfImage;
  support::ulittle32_t SizeOfHeaders;
  support::ulittle32_t CheckSum;
  support::ulittle16_t Subsystem;
  // FIXME: This should be DllCharacteristics.
  support::ulittle16_t DLLCharacteristics;
  support::ulittle32_t SizeOfStackReserve;
  support::ulittle32_t SizeOfStackCommit;
  support::ulittle32_t SizeOfHeapReserve;
  support::ulittle32_t SizeOfHeapCommit;
  support::ulittle32_t LoaderFlags;
  // FIXME: This should be NumberOfRvaAndSizes.
  support::ulittle32_t NumberOfRvaAndSize;
};

/// The 64-bit PE header that follows the COFF header.
struct pe32plus_header {
  support::ulittle16_t Magic;
  uint8_t MajorLinkerVersion;
  uint8_t MinorLinkerVersion;
  support::ulittle32_t SizeOfCode;
  support::ulittle32_t SizeOfInitializedData;
  support::ulittle32_t SizeOfUninitializedData;
  support::ulittle32_t AddressOfEntryPoint;
  support::ulittle32_t BaseOfCode;
  support::ulittle64_t ImageBase;
  support::ulittle32_t SectionAlignment;
  support::ulittle32_t FileAlignment;
  support::ulittle16_t MajorOperatingSystemVersion;
  support::ulittle16_t MinorOperatingSystemVersion;
  support::ulittle16_t MajorImageVersion;
  support::ulittle16_t MinorImageVersion;
  support::ulittle16_t MajorSubsystemVersion;
  support::ulittle16_t MinorSubsystemVersion;
  support::ulittle32_t Win32VersionValue;
  support::ulittle32_t SizeOfImage;
  support::ulittle32_t SizeOfHeaders;
  support::ulittle32_t CheckSum;
  support::ulittle16_t Subsystem;
  support::ulittle16_t DLLCharacteristics;
  support::ulittle64_t SizeOfStackReserve;
  support::ulittle64_t SizeOfStackCommit;
  support::ulittle64_t SizeOfHeapReserve;
  support::ulittle64_t SizeOfHeapCommit;
  support::ulittle32_t LoaderFlags;
  support::ulittle32_t NumberOfRvaAndSize;
};

struct data_directory {
  support::ulittle32_t RelativeVirtualAddress;
  support::ulittle32_t Size;
};

struct debug_directory {
  support::ulittle32_t Characteristics;
  support::ulittle32_t TimeDateStamp;
  support::ulittle16_t MajorVersion;
  support::ulittle16_t MinorVersion;
  support::ulittle32_t Type;
  support::ulittle32_t SizeOfData;
  support::ulittle32_t AddressOfRawData;
  support::ulittle32_t PointerToRawData;
};

template <typename IntTy>
struct import_lookup_table_entry {
  IntTy Data;

  bool isOrdinal() const { return Data < 0; }

  uint16_t getOrdinal() const {
    assert(isOrdinal() && "ILT entry is not an ordinal!");
    return Data & 0xFFFF;
  }

  uint32_t getHintNameRVA() const {
    assert(!isOrdinal() && "ILT entry is not a Hint/Name RVA!");
    return Data & 0xFFFFFFFF;
  }
};

using import_lookup_table_entry32 =
    import_lookup_table_entry<support::little32_t>;
using import_lookup_table_entry64 =
    import_lookup_table_entry<support::little64_t>;

struct delay_import_directory_table_entry {
  // dumpbin reports this field as "Characteristics" instead of "Attributes".
  support::ulittle32_t Attributes;
  support::ulittle32_t Name;
  support::ulittle32_t ModuleHandle;
  support::ulittle32_t DelayImportAddressTable;
  support::ulittle32_t DelayImportNameTable;
  support::ulittle32_t BoundDelayImportTable;
  support::ulittle32_t UnloadDelayImportTable;
  support::ulittle32_t TimeStamp;
};

struct export_directory_table_entry {
  support::ulittle32_t ExportFlags;
  support::ulittle32_t TimeDateStamp;
  support::ulittle16_t MajorVersion;
  support::ulittle16_t MinorVersion;
  support::ulittle32_t NameRVA;
  support::ulittle32_t OrdinalBase;
  support::ulittle32_t AddressTableEntries;
  support::ulittle32_t NumberOfNamePointers;
  support::ulittle32_t ExportAddressTableRVA;
  support::ulittle32_t NamePointerRVA;
  support::ulittle32_t OrdinalTableRVA;
};

union export_address_table_entry {
  support::ulittle32_t ExportRVA;
  support::ulittle32_t ForwarderRVA;
};

using export_name_pointer_table_entry = support::ulittle32_t;
using export_ordinal_table_entry = support::ulittle16_t;

struct StringTableOffset {
  support::ulittle32_t Zeroes;
  support::ulittle32_t Offset;
};

template <typename SectionNumberType>
struct coff_symbol {
  union {
    char ShortName[COFF::NameSize];
    StringTableOffset Offset;
  } Name;

  support::ulittle32_t Value;
  SectionNumberType SectionNumber;

  support::ulittle16_t Type;

  uint8_t StorageClass;
  uint8_t NumberOfAuxSymbols;
};

using coff_symbol16 = coff_symbol<support::ulittle16_t>;
using coff_symbol32 = coff_symbol<support::ulittle32_t>;

// Contains only common parts of coff_symbol16 and coff_symbol32.
struct coff_symbol_generic {
  union {
    char ShortName[COFF::NameSize];
    StringTableOffset Offset;
  } Name;
  support::ulittle32_t Value;
};

struct coff_aux_section_definition;
struct coff_aux_weak_external;

class COFFSymbolRef {
public:
  COFFSymbolRef() = default;
  COFFSymbolRef(const coff_symbol16 *CS) : CS16(CS) {}
  COFFSymbolRef(const coff_symbol32 *CS) : CS32(CS) {}

  const void *getRawPtr() const {
    return CS16 ? static_cast<const void *>(CS16) : CS32;
  }

  const coff_symbol_generic *getGeneric() const {
    if (CS16)
      return reinterpret_cast<const coff_symbol_generic *>(CS16);
    return reinterpret_cast<const coff_symbol_generic *>(CS32);
  }

  friend bool operator<(COFFSymbolRef A, COFFSymbolRef B) {
    return A.getRawPtr() < B.getRawPtr();
  }

  bool isBigObj() const {
    if (CS16)
      return false;
    if (CS32)
      return true;
    llvm_unreachable("COFFSymbolRef points to nothing!");
  }

  const char *getShortName() const {
    return CS16 ? CS16->Name.ShortName : CS32->Name.ShortName;
  }

  const StringTableOffset &getStringTableOffset() const {
    assert(isSet() && "COFFSymbolRef points to nothing!");
    return CS16 ? CS16->Name.Offset : CS32->Name.Offset;
  }

  uint32_t getValue() const {
    assert(isSet() && "COFFSymbolRef points to nothing!");
    return CS16 ? CS16->Value : CS32->Value;
  }

  int32_t getSectionNumber() const {
    assert(isSet() && "COFFSymbolRef points to nothing!");
    if (CS16) {
      // Reserved sections are returned as negative numbers.
      if (CS16->SectionNumber <= COFF::MaxNumberOfSections16)
        return CS16->SectionNumber;
      return static_cast<int16_t>(CS16->SectionNumber);
    }
    return static_cast<int32_t>(CS32->SectionNumber);
  }

  uint16_t getType() const {
    assert(isSet() && "COFFSymbolRef points to nothing!");
    return CS16 ? CS16->Type : CS32->Type;
  }

  uint8_t getStorageClass() const {
    assert(isSet() && "COFFSymbolRef points to nothing!");
    return CS16 ? CS16->StorageClass : CS32->StorageClass;
  }

  uint8_t getNumberOfAuxSymbols() const {
    assert(isSet() && "COFFSymbolRef points to nothing!");
    return CS16 ? CS16->NumberOfAuxSymbols : CS32->NumberOfAuxSymbols;
  }

  uint8_t getBaseType() const { return getType() & 0x0F; }

  uint8_t getComplexType() const {
    return (getType() & 0xF0) >> COFF::SCT_COMPLEX_TYPE_SHIFT;
  }

  template <typename T> const T *getAux() const {
    return CS16 ? reinterpret_cast<const T *>(CS16 + 1)
                : reinterpret_cast<const T *>(CS32 + 1);
  }

  const coff_aux_section_definition *getSectionDefinition() const {
    if (!getNumberOfAuxSymbols() ||
        getStorageClass() != COFF::IMAGE_SYM_CLASS_STATIC)
      return nullptr;
    return getAux<coff_aux_section_definition>();
  }

  const coff_aux_weak_external *getWeakExternal() const {
    if (!getNumberOfAuxSymbols() ||
        getStorageClass() != COFF::IMAGE_SYM_CLASS_WEAK_EXTERNAL)
      return nullptr;
    return getAux<coff_aux_weak_external>();
  }

  bool isAbsolute() const {
    return getSectionNumber() == -1;
  }

  bool isExternal() const {
    return getStorageClass() == COFF::IMAGE_SYM_CLASS_EXTERNAL;
  }

  bool isCommon() const {
    return (isExternal() || isSection()) &&
           getSectionNumber() == COFF::IMAGE_SYM_UNDEFINED && getValue() != 0;
  }

  bool isUndefined() const {
    return isExternal() && getSectionNumber() == COFF::IMAGE_SYM_UNDEFINED &&
           getValue() == 0;
  }

  bool isWeakExternal() const {
    return getStorageClass() == COFF::IMAGE_SYM_CLASS_WEAK_EXTERNAL;
  }

  bool isFunctionDefinition() const {
    return isExternal() && getBaseType() == COFF::IMAGE_SYM_TYPE_NULL &&
           getComplexType() == COFF::IMAGE_SYM_DTYPE_FUNCTION &&
           !COFF::isReservedSectionNumber(getSectionNumber());
  }

  bool isFunctionLineInfo() const {
    return getStorageClass() == COFF::IMAGE_SYM_CLASS_FUNCTION;
  }

  bool isAnyUndefined() const {
    return isUndefined() || isWeakExternal();
  }

  bool isFileRecord() const {
    return getStorageClass() == COFF::IMAGE_SYM_CLASS_FILE;
  }

  bool isSection() const {
    return getStorageClass() == COFF::IMAGE_SYM_CLASS_SECTION;
  }

  bool isSectionDefinition() const {
    // C++/CLI creates external ABS symbols for non-const appdomain globals.
    // These are also followed by an auxiliary section definition.
    bool isAppdomainGlobal =
        getStorageClass() == COFF::IMAGE_SYM_CLASS_EXTERNAL &&
        getSectionNumber() == COFF::IMAGE_SYM_ABSOLUTE;
    bool isOrdinarySection = getStorageClass() == COFF::IMAGE_SYM_CLASS_STATIC;
    if (!getNumberOfAuxSymbols())
      return false;
    return isAppdomainGlobal || isOrdinarySection;
  }

  bool isCLRToken() const {
    return getStorageClass() == COFF::IMAGE_SYM_CLASS_CLR_TOKEN;
  }

private:
  bool isSet() const { return CS16 || CS32; }

  const coff_symbol16 *CS16 = nullptr;
  const coff_symbol32 *CS32 = nullptr;
};

struct coff_section {
  char Name[COFF::NameSize];
  support::ulittle32_t VirtualSize;
  support::ulittle32_t VirtualAddress;
  support::ulittle32_t SizeOfRawData;
  support::ulittle32_t PointerToRawData;
  support::ulittle32_t PointerToRelocations;
  support::ulittle32_t PointerToLinenumbers;
  support::ulittle16_t NumberOfRelocations;
  support::ulittle16_t NumberOfLinenumbers;
  support::ulittle32_t Characteristics;

  // Returns true if the actual number of relocations is stored in
  // VirtualAddress field of the first relocation table entry.
  bool hasExtendedRelocations() const {
    return (Characteristics & COFF::IMAGE_SCN_LNK_NRELOC_OVFL) &&
           NumberOfRelocations == UINT16_MAX;
  }

  uint32_t getAlignment() const {
    // The IMAGE_SCN_TYPE_NO_PAD bit is a legacy way of getting to
    // IMAGE_SCN_ALIGN_1BYTES.
    if (Characteristics & COFF::IMAGE_SCN_TYPE_NO_PAD)
      return 1;

    // Bit [20:24] contains section alignment. 0 means use a default alignment
    // of 16.
    uint32_t Shift = (Characteristics >> 20) & 0xF;
    if (Shift > 0)
      return 1U << (Shift - 1);
    return 16;
  }
};

struct coff_relocation {
  support::ulittle32_t VirtualAddress;
  support::ulittle32_t SymbolTableIndex;
  support::ulittle16_t Type;
};

struct coff_aux_function_definition {
  support::ulittle32_t TagIndex;
  support::ulittle32_t TotalSize;
  support::ulittle32_t PointerToLinenumber;
  support::ulittle32_t PointerToNextFunction;
  char Unused1[2];
};

static_assert(sizeof(coff_aux_function_definition) == 18,
              "auxiliary entry must be 18 bytes");

struct coff_aux_bf_and_ef_symbol {
  char Unused1[4];
  support::ulittle16_t Linenumber;
  char Unused2[6];
  support::ulittle32_t PointerToNextFunction;
  char Unused3[2];
};

static_assert(sizeof(coff_aux_bf_and_ef_symbol) == 18,
              "auxiliary entry must be 18 bytes");

struct coff_aux_weak_external {
  support::ulittle32_t TagIndex;
  support::ulittle32_t Characteristics;
  char Unused1[10];
};

static_assert(sizeof(coff_aux_weak_external) == 18,
              "auxiliary entry must be 18 bytes");

struct coff_aux_section_definition {
  support::ulittle32_t Length;
  support::ulittle16_t NumberOfRelocations;
  support::ulittle16_t NumberOfLinenumbers;
  support::ulittle32_t CheckSum;
  support::ulittle16_t NumberLowPart;
  uint8_t              Selection;
  uint8_t              Unused;
  support::ulittle16_t NumberHighPart;
  int32_t getNumber(bool IsBigObj) const {
    uint32_t Number = static_cast<uint32_t>(NumberLowPart);
    if (IsBigObj)
      Number |= static_cast<uint32_t>(NumberHighPart) << 16;
    return static_cast<int32_t>(Number);
  }
};

static_assert(sizeof(coff_aux_section_definition) == 18,
              "auxiliary entry must be 18 bytes");

struct coff_aux_clr_token {
  uint8_t              AuxType;
  uint8_t              Reserved;
  support::ulittle32_t SymbolTableIndex;
  char                 MBZ[12];
};

static_assert(sizeof(coff_aux_clr_token) == 18,
              "auxiliary entry must be 18 bytes");

struct coff_import_header {
  support::ulittle16_t Sig1;
  support::ulittle16_t Sig2;
  support::ulittle16_t Version;
  support::ulittle16_t Machine;
  support::ulittle32_t TimeDateStamp;
  support::ulittle32_t SizeOfData;
  support::ulittle16_t OrdinalHint;
  support::ulittle16_t TypeInfo;

  int getType() const { return TypeInfo & 0x3; }
  int getNameType() const { return (TypeInfo >> 2) & 0x7; }
};

struct coff_import_directory_table_entry {
  support::ulittle32_t ImportLookupTableRVA;
  support::ulittle32_t TimeDateStamp;
  support::ulittle32_t ForwarderChain;
  support::ulittle32_t NameRVA;
  support::ulittle32_t ImportAddressTableRVA;

  bool isNull() const {
    return ImportLookupTableRVA == 0 && TimeDateStamp == 0 &&
           ForwarderChain == 0 && NameRVA == 0 && ImportAddressTableRVA == 0;
  }
};

template <typename IntTy>
struct coff_tls_directory {
  IntTy StartAddressOfRawData;
  IntTy EndAddressOfRawData;
  IntTy AddressOfIndex;
  IntTy AddressOfCallBacks;
  support::ulittle32_t SizeOfZeroFill;
  support::ulittle32_t Characteristics;

  uint32_t getAlignment() const {
    // Bit [20:24] contains section alignment.
    uint32_t Shift = (Characteristics & COFF::IMAGE_SCN_ALIGN_MASK) >> 20;
    if (Shift > 0)
      return 1U << (Shift - 1);
    return 0;
  }

  void setAlignment(uint32_t Align) {
    uint32_t AlignBits = 0;
    if (Align) {
      assert(llvm::isPowerOf2_32(Align) && "alignment is not a power of 2");
      assert(llvm::Log2_32(Align) <= 13 && "alignment requested is too large");
      AlignBits = (llvm::Log2_32(Align) + 1) << 20;
    }
    Characteristics =
        (Characteristics & ~COFF::IMAGE_SCN_ALIGN_MASK) | AlignBits;
  }
};

using coff_tls_directory32 = coff_tls_directory<support::little32_t>;
using coff_tls_directory64 = coff_tls_directory<support::little64_t>;

enum class frame_type : uint16_t { Fpo = 0, Trap = 1, Tss = 2, NonFpo = 3 };

struct coff_load_config_code_integrity {
  support::ulittle16_t Flags;
  support::ulittle16_t Catalog;
  support::ulittle32_t CatalogOffset;
  support::ulittle32_t Reserved;
};

/// 32-bit load config (IMAGE_LOAD_CONFIG_DIRECTORY32)
struct coff_load_configuration32 {
  support::ulittle32_t Size;
  support::ulittle32_t TimeDateStamp;
  support::ulittle16_t MajorVersion;
  support::ulittle16_t MinorVersion;
  support::ulittle32_t GlobalFlagsClear;
  support::ulittle32_t GlobalFlagsSet;
  support::ulittle32_t CriticalSectionDefaultTimeout;
  support::ulittle32_t DeCommitFreeBlockThreshold;
  support::ulittle32_t DeCommitTotalFreeThreshold;
  support::ulittle32_t LockPrefixTable;
  support::ulittle32_t MaximumAllocationSize;
  support::ulittle32_t VirtualMemoryThreshold;
  support::ulittle32_t ProcessAffinityMask;
  support::ulittle32_t ProcessHeapFlags;
  support::ulittle16_t CSDVersion;
  support::ulittle16_t DependentLoadFlags;
  support::ulittle32_t EditList;
  support::ulittle32_t SecurityCookie;
  support::ulittle32_t SEHandlerTable;
  support::ulittle32_t SEHandlerCount;

  // Added in MSVC 2015 for /guard:cf.
  support::ulittle32_t GuardCFCheckFunction;
  support::ulittle32_t GuardCFCheckDispatch;
  support::ulittle32_t GuardCFFunctionTable;
  support::ulittle32_t GuardCFFunctionCount;
  support::ulittle32_t GuardFlags; // coff_guard_flags

  // Added in MSVC 2017
  coff_load_config_code_integrity CodeIntegrity;
  support::ulittle32_t GuardAddressTakenIatEntryTable;
  support::ulittle32_t GuardAddressTakenIatEntryCount;
  support::ulittle32_t GuardLongJumpTargetTable;
  support::ulittle32_t GuardLongJumpTargetCount;
  support::ulittle32_t DynamicValueRelocTable;
  support::ulittle32_t CHPEMetadataPointer;
  support::ulittle32_t GuardRFFailureRoutine;
  support::ulittle32_t GuardRFFailureRoutineFunctionPointer;
  support::ulittle32_t DynamicValueRelocTableOffset;
  support::ulittle16_t DynamicValueRelocTableSection;
  support::ulittle16_t Reserved2;
  support::ulittle32_t GuardRFVerifyStackPointerFunctionPointer;
  support::ulittle32_t HotPatchTableOffset;

  // Added in MSVC 2019
  support::ulittle32_t Reserved3;
  support::ulittle32_t EnclaveConfigurationPointer;
  support::ulittle32_t VolatileMetadataPointer;
  support::ulittle32_t GuardEHContinuationTable;
  support::ulittle32_t GuardEHContinuationCount;
  support::ulittle32_t GuardXFGCheckFunctionPointer;
  support::ulittle32_t GuardXFGDispatchFunctionPointer;
  support::ulittle32_t GuardXFGTableDispatchFunctionPointer;
  support::ulittle32_t CastGuardOsDeterminedFailureMode;
};

/// 64-bit load config (IMAGE_LOAD_CONFIG_DIRECTORY64)
struct coff_load_configuration64 {
  support::ulittle32_t Size;
  support::ulittle32_t TimeDateStamp;
  support::ulittle16_t MajorVersion;
  support::ulittle16_t MinorVersion;
  support::ulittle32_t GlobalFlagsClear;
  support::ulittle32_t GlobalFlagsSet;
  support::ulittle32_t CriticalSectionDefaultTimeout;
  support::ulittle64_t DeCommitFreeBlockThreshold;
  support::ulittle64_t DeCommitTotalFreeThreshold;
  support::ulittle64_t LockPrefixTable;
  support::ulittle64_t MaximumAllocationSize;
  support::ulittle64_t VirtualMemoryThreshold;
  support::ulittle64_t ProcessAffinityMask;
  support::ulittle32_t ProcessHeapFlags;
  support::ulittle16_t CSDVersion;
  support::ulittle16_t DependentLoadFlags;
  support::ulittle64_t EditList;
  support::ulittle64_t SecurityCookie;
  support::ulittle64_t SEHandlerTable;
  support::ulittle64_t SEHandlerCount;

  // Added in MSVC 2015 for /guard:cf.
  support::ulittle64_t GuardCFCheckFunction;
  support::ulittle64_t GuardCFCheckDispatch;
  support::ulittle64_t GuardCFFunctionTable;
  support::ulittle64_t GuardCFFunctionCount;
  support::ulittle32_t GuardFlags;

  // Added in MSVC 2017
  coff_load_config_code_integrity CodeIntegrity;
  support::ulittle64_t GuardAddressTakenIatEntryTable;
  support::ulittle64_t GuardAddressTakenIatEntryCount;
  support::ulittle64_t GuardLongJumpTargetTable;
  support::ulittle64_t GuardLongJumpTargetCount;
  support::ulittle64_t DynamicValueRelocTable;
  support::ulittle64_t CHPEMetadataPointer;
  support::ulittle64_t GuardRFFailureRoutine;
  support::ulittle64_t GuardRFFailureRoutineFunctionPointer;
  support::ulittle32_t DynamicValueRelocTableOffset;
  support::ulittle16_t DynamicValueRelocTableSection;
  support::ulittle16_t Reserved2;
  support::ulittle64_t GuardRFVerifyStackPointerFunctionPointer;
  support::ulittle32_t HotPatchTableOffset;

  // Added in MSVC 2019
  support::ulittle32_t Reserved3;
  support::ulittle64_t EnclaveConfigurationPointer;
  support::ulittle64_t VolatileMetadataPointer;
  support::ulittle64_t GuardEHContinuationTable;
  support::ulittle64_t GuardEHContinuationCount;
  support::ulittle64_t GuardXFGCheckFunctionPointer;
  support::ulittle64_t GuardXFGDispatchFunctionPointer;
  support::ulittle64_t GuardXFGTableDispatchFunctionPointer;
  support::ulittle64_t CastGuardOsDeterminedFailureMode;
};

struct chpe_metadata {
  support::ulittle32_t Version;
  support::ulittle32_t CodeMap;
  support::ulittle32_t CodeMapCount;
  support::ulittle32_t CodeRangesToEntryPoints;
  support::ulittle32_t RedirectionMetadata;
  support::ulittle32_t __os_arm64x_dispatch_call_no_redirect;
  support::ulittle32_t __os_arm64x_dispatch_ret;
  support::ulittle32_t __os_arm64x_dispatch_call;
  support::ulittle32_t __os_arm64x_dispatch_icall;
  support::ulittle32_t __os_arm64x_dispatch_icall_cfg;
  support::ulittle32_t AlternateEntryPoint;
  support::ulittle32_t AuxiliaryIAT;
  support::ulittle32_t CodeRangesToEntryPointsCount;
  support::ulittle32_t RedirectionMetadataCount;
  support::ulittle32_t GetX64InformationFunctionPointer;
  support::ulittle32_t SetX64InformationFunctionPointer;
  support::ulittle32_t ExtraRFETable;
  support::ulittle32_t ExtraRFETableSize;
  support::ulittle32_t __os_arm64x_dispatch_fptr;
  support::ulittle32_t AuxiliaryIATCopy;
};

struct chpe_range_entry {
  support::ulittle32_t StartOffset;
  support::ulittle32_t Length;
};

enum chpe_range_type { CHPE_RANGE_ARM64, CHPE_RANGE_ARM64EC, CHPE_RANGE_AMD64 };

struct chpe_code_range_entry {
  support::ulittle32_t StartRva;
  support::ulittle32_t EndRva;
  support::ulittle32_t EntryPoint;
};

struct chpe_redirection_entry {
  support::ulittle32_t Source;
  support::ulittle32_t Destination;
};

struct coff_runtime_function_x64 {
  support::ulittle32_t BeginAddress;
  support::ulittle32_t EndAddress;
  support::ulittle32_t UnwindInformation;
};

struct coff_base_reloc_block_header {
  support::ulittle32_t PageRVA;
  support::ulittle32_t BlockSize;
};

struct coff_base_reloc_block_entry {
  support::ulittle16_t Data;

  int getType() const { return Data >> 12; }
  int getOffset() const { return Data & ((1 << 12) - 1); }
};

struct coff_resource_dir_entry {
  union {
    support::ulittle32_t NameOffset;
    support::ulittle32_t ID;
    uint32_t getNameOffset() const {
      return maskTrailingOnes<uint32_t>(31) & NameOffset;
    }
    // Even though the PE/COFF spec doesn't mention this, the high bit of a name
    // offset is set.
    void setNameOffset(uint32_t Offset) { NameOffset = Offset | (1 << 31); }
  } Identifier;
  union {
    support::ulittle32_t DataEntryOffset;
    support::ulittle32_t SubdirOffset;

    bool isSubDir() const { return SubdirOffset >> 31; }
    uint32_t value() const {
      return maskTrailingOnes<uint32_t>(31) & SubdirOffset;
    }

  } Offset;
};

struct coff_resource_data_entry {
  support::ulittle32_t DataRVA;
  support::ulittle32_t DataSize;
  support::ulittle32_t Codepage;
  support::ulittle32_t Reserved;
};

struct coff_resource_dir_table {
  support::ulittle32_t Characteristics;
  support::ulittle32_t TimeDateStamp;
  support::ulittle16_t MajorVersion;
  support::ulittle16_t MinorVersion;
  support::ulittle16_t NumberOfNameEntries;
  support::ulittle16_t NumberOfIDEntries;
};

struct debug_h_header {
  support::ulittle32_t Magic;
  support::ulittle16_t Version;
  support::ulittle16_t HashAlgorithm;
};

class COFFObjectFile : public ObjectFile {
private:
  COFFObjectFile(MemoryBufferRef Object);

  friend class ImportDirectoryEntryRef;
  friend class ExportDirectoryEntryRef;
  const coff_file_header *COFFHeader;
  const coff_bigobj_file_header *COFFBigObjHeader;
  const pe32_header *PE32Header;
  const pe32plus_header *PE32PlusHeader;
  const data_directory *DataDirectory;
  const coff_section *SectionTable;
  const coff_symbol16 *SymbolTable16;
  const coff_symbol32 *SymbolTable32;
  const char *StringTable;
  uint32_t StringTableSize;
  const coff_import_directory_table_entry *ImportDirectory;
  const delay_import_directory_table_entry *DelayImportDirectory;
  uint32_t NumberOfDelayImportDirectory;
  const export_directory_table_entry *ExportDirectory;
  const coff_base_reloc_block_header *BaseRelocHeader;
  const coff_base_reloc_block_header *BaseRelocEnd;
  const debug_directory *DebugDirectoryBegin;
  const debug_directory *DebugDirectoryEnd;
  const coff_tls_directory32 *TLSDirectory32;
  const coff_tls_directory64 *TLSDirectory64;
  // Either coff_load_configuration32 or coff_load_configuration64.
  const void *LoadConfig = nullptr;
  const chpe_metadata *CHPEMetadata = nullptr;

  Expected<StringRef> getString(uint32_t offset) const;

  template <typename coff_symbol_type>
  const coff_symbol_type *toSymb(DataRefImpl Symb) const;
  const coff_section *toSec(DataRefImpl Sec) const;
  const coff_relocation *toRel(DataRefImpl Rel) const;

  // Finish initializing the object and return success or an error.
  Error initialize();

  Error initSymbolTablePtr();
  Error initImportTablePtr();
  Error initDelayImportTablePtr();
  Error initExportTablePtr();
  Error initBaseRelocPtr();
  Error initDebugDirectoryPtr();
  Error initTLSDirectoryPtr();
  Error initLoadConfigPtr();

public:
  static Expected<std::unique_ptr<COFFObjectFile>>
  create(MemoryBufferRef Object);

  uintptr_t getSymbolTable() const {
    if (SymbolTable16)
      return reinterpret_cast<uintptr_t>(SymbolTable16);
    if (SymbolTable32)
      return reinterpret_cast<uintptr_t>(SymbolTable32);
    return uintptr_t(0);
  }

  uint16_t getMachine() const {
    if (COFFHeader) {
      if (CHPEMetadata) {
        switch (COFFHeader->Machine) {
        case COFF::IMAGE_FILE_MACHINE_AMD64:
          return COFF::IMAGE_FILE_MACHINE_ARM64EC;
        case COFF::IMAGE_FILE_MACHINE_ARM64:
          return COFF::IMAGE_FILE_MACHINE_ARM64X;
        }
      }
      return COFFHeader->Machine;
    }
    if (COFFBigObjHeader)
      return COFFBigObjHeader->Machine;
    llvm_unreachable("no COFF header!");
  }

  uint16_t getSizeOfOptionalHeader() const {
    if (COFFHeader)
      return COFFHeader->isImportLibrary() ? 0
                                           : COFFHeader->SizeOfOptionalHeader;
    // bigobj doesn't have this field.
    if (COFFBigObjHeader)
      return 0;
    llvm_unreachable("no COFF header!");
  }

  uint16_t getCharacteristics() const {
    if (COFFHeader)
      return COFFHeader->isImportLibrary() ? 0 : COFFHeader->Characteristics;
    // bigobj doesn't have characteristics to speak of,
    // editbin will silently lie to you if you attempt to set any.
    if (COFFBigObjHeader)
      return 0;
    llvm_unreachable("no COFF header!");
  }

  uint32_t getTimeDateStamp() const {
    if (COFFHeader)
      return COFFHeader->TimeDateStamp;
    if (COFFBigObjHeader)
      return COFFBigObjHeader->TimeDateStamp;
    llvm_unreachable("no COFF header!");
  }

  uint32_t getNumberOfSections() const {
    if (COFFHeader)
      return COFFHeader->isImportLibrary() ? 0 : COFFHeader->NumberOfSections;
    if (COFFBigObjHeader)
      return COFFBigObjHeader->NumberOfSections;
    llvm_unreachable("no COFF header!");
  }

  uint32_t getPointerToSymbolTable() const {
    if (COFFHeader)
      return COFFHeader->isImportLibrary() ? 0
                                           : COFFHeader->PointerToSymbolTable;
    if (COFFBigObjHeader)
      return COFFBigObjHeader->PointerToSymbolTable;
    llvm_unreachable("no COFF header!");
  }

  uint32_t getRawNumberOfSymbols() const {
    if (COFFHeader)
      return COFFHeader->isImportLibrary() ? 0 : COFFHeader->NumberOfSymbols;
    if (COFFBigObjHeader)
      return COFFBigObjHeader->NumberOfSymbols;
    llvm_unreachable("no COFF header!");
  }

  uint32_t getNumberOfSymbols() const {
    if (!SymbolTable16 && !SymbolTable32)
      return 0;
    return getRawNumberOfSymbols();
  }

  uint32_t getStringTableSize() const { return StringTableSize; }

  const export_directory_table_entry *getExportTable() const {
    return ExportDirectory;
  }

  const coff_load_configuration32 *getLoadConfig32() const {
    assert(!is64());
    return reinterpret_cast<const coff_load_configuration32 *>(LoadConfig);
  }

  const coff_load_configuration64 *getLoadConfig64() const {
    assert(is64());
    return reinterpret_cast<const coff_load_configuration64 *>(LoadConfig);
  }

  const chpe_metadata *getCHPEMetadata() const { return CHPEMetadata; }

  StringRef getRelocationTypeName(uint16_t Type) const;

protected:
  void moveSymbolNext(DataRefImpl &Symb) const override;
  Expected<StringRef> getSymbolName(DataRefImpl Symb) const override;
  Expected<uint64_t> getSymbolAddress(DataRefImpl Symb) const override;
  uint32_t getSymbolAlignment(DataRefImpl Symb) const override;
  uint64_t getSymbolValueImpl(DataRefImpl Symb) const override;
  uint64_t getCommonSymbolSizeImpl(DataRefImpl Symb) const override;
  Expected<uint32_t> getSymbolFlags(DataRefImpl Symb) const override;
  Expected<SymbolRef::Type> getSymbolType(DataRefImpl Symb) const override;
  Expected<section_iterator> getSymbolSection(DataRefImpl Symb) const override;
  void moveSectionNext(DataRefImpl &Sec) const override;
  Expected<StringRef> getSectionName(DataRefImpl Sec) const override;
  uint64_t getSectionAddress(DataRefImpl Sec) const override;
  uint64_t getSectionIndex(DataRefImpl Sec) const override;
  uint64_t getSectionSize(DataRefImpl Sec) const override;
  Expected<ArrayRef<uint8_t>>
  getSectionContents(DataRefImpl Sec) const override;
  uint64_t getSectionAlignment(DataRefImpl Sec) const override;
  bool isSectionCompressed(DataRefImpl Sec) const override;
  bool isSectionText(DataRefImpl Sec) const override;
  bool isSectionData(DataRefImpl Sec) const override;
  bool isSectionBSS(DataRefImpl Sec) const override;
  bool isSectionVirtual(DataRefImpl Sec) const override;
  bool isDebugSection(DataRefImpl Sec) const override;
  relocation_iterator section_rel_begin(DataRefImpl Sec) const override;
  relocation_iterator section_rel_end(DataRefImpl Sec) const override;

  void moveRelocationNext(DataRefImpl &Rel) const override;
  uint64_t getRelocationOffset(DataRefImpl Rel) const override;
  symbol_iterator getRelocationSymbol(DataRefImpl Rel) const override;
  uint64_t getRelocationType(DataRefImpl Rel) const override;
  void getRelocationTypeName(DataRefImpl Rel,
                             SmallVectorImpl<char> &Result) const override;

public:
  basic_symbol_iterator symbol_begin() const override;
  basic_symbol_iterator symbol_end() const override;
  section_iterator section_begin() const override;
  section_iterator section_end() const override;

  bool is64Bit() const override { return false; }

  const coff_section *getCOFFSection(const SectionRef &Section) const;
  COFFSymbolRef getCOFFSymbol(const DataRefImpl &Ref) const;
  COFFSymbolRef getCOFFSymbol(const SymbolRef &Symbol) const;
  const coff_relocation *getCOFFRelocation(const RelocationRef &Reloc) const;
  unsigned getSectionID(SectionRef Sec) const;
  unsigned getSymbolSectionID(SymbolRef Sym) const;

  uint8_t getBytesInAddress() const override;
  StringRef getFileFormatName() const override;
  Triple::ArchType getArch() const override;
  Expected<uint64_t> getStartAddress() const override;
  Expected<SubtargetFeatures> getFeatures() const override {
    return SubtargetFeatures();
  }

  import_directory_iterator import_directory_begin() const;
  import_directory_iterator import_directory_end() const;
  delay_import_directory_iterator delay_import_directory_begin() const;
  delay_import_directory_iterator delay_import_directory_end() const;
  export_directory_iterator export_directory_begin() const;
  export_directory_iterator export_directory_end() const;
  base_reloc_iterator base_reloc_begin() const;
  base_reloc_iterator base_reloc_end() const;
  const debug_directory *debug_directory_begin() const {
    return DebugDirectoryBegin;
  }
  const debug_directory *debug_directory_end() const {
    return DebugDirectoryEnd;
  }

  iterator_range<import_directory_iterator> import_directories() const;
  iterator_range<delay_import_directory_iterator>
      delay_import_directories() const;
  iterator_range<export_directory_iterator> export_directories() const;
  iterator_range<base_reloc_iterator> base_relocs() const;
  iterator_range<const debug_directory *> debug_directories() const {
    return make_range(debug_directory_begin(), debug_directory_end());
  }

  const coff_tls_directory32 *getTLSDirectory32() const {
    return TLSDirectory32;
  }
  const coff_tls_directory64 *getTLSDirectory64() const {
    return TLSDirectory64;
  }

  const dos_header *getDOSHeader() const {
    if (!PE32Header && !PE32PlusHeader)
      return nullptr;
    return reinterpret_cast<const dos_header *>(base());
  }

  const coff_file_header *getCOFFHeader() const { return COFFHeader; }
  const coff_bigobj_file_header *getCOFFBigObjHeader() const {
    return COFFBigObjHeader;
  }
  const pe32_header *getPE32Header() const { return PE32Header; }
  const pe32plus_header *getPE32PlusHeader() const { return PE32PlusHeader; }

  const data_directory *getDataDirectory(uint32_t index) const;
  Expected<const coff_section *> getSection(int32_t index) const;

  Expected<COFFSymbolRef> getSymbol(uint32_t index) const {
    if (index >= getNumberOfSymbols())
      return errorCodeToError(object_error::parse_failed);
    if (SymbolTable16)
      return COFFSymbolRef(SymbolTable16 + index);
    if (SymbolTable32)
      return COFFSymbolRef(SymbolTable32 + index);
    return errorCodeToError(object_error::parse_failed);
  }

  template <typename T>
  Error getAuxSymbol(uint32_t index, const T *&Res) const {
    Expected<COFFSymbolRef> S = getSymbol(index);
    if (Error E = S.takeError())
      return E;
    Res = reinterpret_cast<const T *>(S->getRawPtr());
    return Error::success();
  }

  Expected<StringRef> getSymbolName(COFFSymbolRef Symbol) const;
  Expected<StringRef> getSymbolName(const coff_symbol_generic *Symbol) const;

  ArrayRef<uint8_t> getSymbolAuxData(COFFSymbolRef Symbol) const;

  uint32_t getSymbolIndex(COFFSymbolRef Symbol) const;

  size_t getSymbolTableEntrySize() const {
    if (COFFHeader)
      return sizeof(coff_symbol16);
    if (COFFBigObjHeader)
      return sizeof(coff_symbol32);
    llvm_unreachable("null symbol table pointer!");
  }

  ArrayRef<coff_relocation> getRelocations(const coff_section *Sec) const;

  Expected<StringRef> getSectionName(const coff_section *Sec) const;
  uint64_t getSectionSize(const coff_section *Sec) const;
  Error getSectionContents(const coff_section *Sec,
                           ArrayRef<uint8_t> &Res) const;

  uint64_t getImageBase() const;
  Error getVaPtr(uint64_t VA, uintptr_t &Res) const;
  Error getRvaPtr(uint32_t Rva, uintptr_t &Res,
                  const char *ErrorContext = nullptr) const;

  /// Given an RVA base and size, returns a valid array of bytes or an error
  /// code if the RVA and size is not contained completely within a valid
  /// section.
  Error getRvaAndSizeAsBytes(uint32_t RVA, uint32_t Size,
                             ArrayRef<uint8_t> &Contents,
                             const char *ErrorContext = nullptr) const;

  Error getHintName(uint32_t Rva, uint16_t &Hint,
                              StringRef &Name) const;

  /// Get PDB information out of a codeview debug directory entry.
  Error getDebugPDBInfo(const debug_directory *DebugDir,
                        const codeview::DebugInfo *&Info,
                        StringRef &PDBFileName) const;

  /// Get PDB information from an executable. If the information is not present,
  /// Info will be set to nullptr and PDBFileName will be empty. An error is
  /// returned only on corrupt object files. Convenience accessor that can be
  /// used if the debug directory is not already handy.
  Error getDebugPDBInfo(const codeview::DebugInfo *&Info,
                        StringRef &PDBFileName) const;

  bool isRelocatableObject() const override;
  bool is64() const { return PE32PlusHeader; }

  StringRef mapDebugSectionName(StringRef Name) const override;

  static bool classof(const Binary *v) { return v->isCOFF(); }
};

// The iterator for the import directory table.
class ImportDirectoryEntryRef {
public:
  ImportDirectoryEntryRef() = default;
  ImportDirectoryEntryRef(const coff_import_directory_table_entry *Table,
                          uint32_t I, const COFFObjectFile *Owner)
      : ImportTable(Table), Index(I), OwningObject(Owner) {}

  bool operator==(const ImportDirectoryEntryRef &Other) const;
  void moveNext();

  imported_symbol_iterator imported_symbol_begin() const;
  imported_symbol_iterator imported_symbol_end() const;
  iterator_range<imported_symbol_iterator> imported_symbols() const;

  imported_symbol_iterator lookup_table_begin() const;
  imported_symbol_iterator lookup_table_end() const;
  iterator_range<imported_symbol_iterator> lookup_table_symbols() const;

  Error getName(StringRef &Result) const;
  Error getImportLookupTableRVA(uint32_t &Result) const;
  Error getImportAddressTableRVA(uint32_t &Result) const;

  Error
  getImportTableEntry(const coff_import_directory_table_entry *&Result) const;

private:
  const coff_import_directory_table_entry *ImportTable;
  uint32_t Index;
  const COFFObjectFile *OwningObject = nullptr;
};

class DelayImportDirectoryEntryRef {
public:
  DelayImportDirectoryEntryRef() = default;
  DelayImportDirectoryEntryRef(const delay_import_directory_table_entry *T,
                               uint32_t I, const COFFObjectFile *Owner)
      : Table(T), Index(I), OwningObject(Owner) {}

  bool operator==(const DelayImportDirectoryEntryRef &Other) const;
  void moveNext();

  imported_symbol_iterator imported_symbol_begin() const;
  imported_symbol_iterator imported_symbol_end() const;
  iterator_range<imported_symbol_iterator> imported_symbols() const;

  Error getName(StringRef &Result) const;
  Error getDelayImportTable(
      const delay_import_directory_table_entry *&Result) const;
  Error getImportAddress(int AddrIndex, uint64_t &Result) const;

private:
  const delay_import_directory_table_entry *Table;
  uint32_t Index;
  const COFFObjectFile *OwningObject = nullptr;
};

// The iterator for the export directory table entry.
class ExportDirectoryEntryRef {
public:
  ExportDirectoryEntryRef() = default;
  ExportDirectoryEntryRef(const export_directory_table_entry *Table, uint32_t I,
                          const COFFObjectFile *Owner)
      : ExportTable(Table), Index(I), OwningObject(Owner) {}

  bool operator==(const ExportDirectoryEntryRef &Other) const;
  void moveNext();

  Error getDllName(StringRef &Result) const;
  Error getOrdinalBase(uint32_t &Result) const;
  Error getOrdinal(uint32_t &Result) const;
  Error getExportRVA(uint32_t &Result) const;
  Error getSymbolName(StringRef &Result) const;

  Error isForwarder(bool &Result) const;
  Error getForwardTo(StringRef &Result) const;

private:
  const export_directory_table_entry *ExportTable;
  uint32_t Index;
  const COFFObjectFile *OwningObject = nullptr;
};

class ImportedSymbolRef {
public:
  ImportedSymbolRef() = default;
  ImportedSymbolRef(const import_lookup_table_entry32 *Entry, uint32_t I,
                    const COFFObjectFile *Owner)
      : Entry32(Entry), Entry64(nullptr), Index(I), OwningObject(Owner) {}
  ImportedSymbolRef(const import_lookup_table_entry64 *Entry, uint32_t I,
                    const COFFObjectFile *Owner)
      : Entry32(nullptr), Entry64(Entry), Index(I), OwningObject(Owner) {}

  bool operator==(const ImportedSymbolRef &Other) const;
  void moveNext();

  Error getSymbolName(StringRef &Result) const;
  Error isOrdinal(bool &Result) const;
  Error getOrdinal(uint16_t &Result) const;
  Error getHintNameRVA(uint32_t &Result) const;

private:
  const import_lookup_table_entry32 *Entry32;
  const import_lookup_table_entry64 *Entry64;
  uint32_t Index;
  const COFFObjectFile *OwningObject = nullptr;
};

class BaseRelocRef {
public:
  BaseRelocRef() = default;
  BaseRelocRef(const coff_base_reloc_block_header *Header,
               const COFFObjectFile *Owner)
      : Header(Header), Index(0) {}

  bool operator==(const BaseRelocRef &Other) const;
  void moveNext();

  Error getType(uint8_t &Type) const;
  Error getRVA(uint32_t &Result) const;

private:
  const coff_base_reloc_block_header *Header;
  uint32_t Index;
};

class ResourceSectionRef {
public:
  ResourceSectionRef() = default;
  explicit ResourceSectionRef(StringRef Ref) : BBS(Ref, support::little) {}

  Error load(const COFFObjectFile *O);
  Error load(const COFFObjectFile *O, const SectionRef &S);

  Expected<ArrayRef<UTF16>>
  getEntryNameString(const coff_resource_dir_entry &Entry);
  Expected<const coff_resource_dir_table &>
  getEntrySubDir(const coff_resource_dir_entry &Entry);
  Expected<const coff_resource_data_entry &>
  getEntryData(const coff_resource_dir_entry &Entry);
  Expected<const coff_resource_dir_table &> getBaseTable();
  Expected<const coff_resource_dir_entry &>
  getTableEntry(const coff_resource_dir_table &Table, uint32_t Index);

  Expected<StringRef> getContents(const coff_resource_data_entry &Entry);

private:
  BinaryByteStream BBS;

  SectionRef Section;
  const COFFObjectFile *Obj = nullptr;

  std::vector<const coff_relocation *> Relocs;

  Expected<const coff_resource_dir_table &> getTableAtOffset(uint32_t Offset);
  Expected<const coff_resource_dir_entry &>
  getTableEntryAtOffset(uint32_t Offset);
  Expected<const coff_resource_data_entry &>
  getDataEntryAtOffset(uint32_t Offset);
  Expected<ArrayRef<UTF16>> getDirStringAtOffset(uint32_t Offset);
};

// Corresponds to `_FPO_DATA` structure in the PE/COFF spec.
struct FpoData {
  support::ulittle32_t Offset; // ulOffStart: Offset 1st byte of function code
  support::ulittle32_t Size;   // cbProcSize: # bytes in function
  support::ulittle32_t NumLocals; // cdwLocals: # bytes in locals/4
  support::ulittle16_t NumParams; // cdwParams: # bytes in params/4
  support::ulittle16_t Attributes;

  // cbProlog: # bytes in prolog
  int getPrologSize() const { return Attributes & 0xF; }

  // cbRegs: # regs saved
  int getNumSavedRegs() const { return (Attributes >> 8) & 0x7; }

  // fHasSEH: true if seh is func
  bool hasSEH() const { return (Attributes >> 9) & 1; }

  // fUseBP: true if EBP has been allocated
  bool useBP() const { return (Attributes >> 10) & 1; }

  // cbFrame: frame pointer
  frame_type getFP() const { return static_cast<frame_type>(Attributes >> 14); }
};

class SectionStrippedError
    : public ErrorInfo<SectionStrippedError, BinaryError> {
public:
  SectionStrippedError() { setErrorCode(object_error::section_stripped); }
};

} // end namespace object

} // end namespace llvm

#endif // LLVM_OBJECT_COFF_H
PKhwFZ��ۇObject/WindowsMachineFlag.hnu�[���//===- WindowsMachineFlag.h -------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Functions for implementing the /machine: flag.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJECT_WINDOWSMACHINEFLAG_H
#define LLVM_OBJECT_WINDOWSMACHINEFLAG_H

namespace llvm {

class StringRef;
namespace COFF {
enum MachineTypes : unsigned;
}

// Returns a user-readable string for ARMNT, ARM64, AMD64, I386.
// Other MachineTypes values must not be passed in.
StringRef machineToStr(COFF::MachineTypes MT);

// Maps /machine: arguments to a MachineTypes value.
// Only returns ARMNT, ARM64, AMD64, I386, or IMAGE_FILE_MACHINE_UNKNOWN.
COFF::MachineTypes getMachineType(StringRef S);

}

#endif
PKhwFZ�iKo�}�}Object/XCOFFObjectFile.hnu�[���//===- XCOFFObjectFile.h - XCOFF object file implementation -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the XCOFFObjectFile class.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJECT_XCOFFOBJECTFILE_H
#define LLVM_OBJECT_XCOFFOBJECTFILE_H

#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/BinaryFormat/XCOFF.h"
#include "llvm/Object/ObjectFile.h"
#include "llvm/Support/Endian.h"
#include <limits>

namespace llvm {
namespace object {

struct XCOFFFileHeader32 {
  support::ubig16_t Magic;
  support::ubig16_t NumberOfSections;

  // Unix time value, value of 0 indicates no timestamp.
  // Negative values are reserved.
  support::big32_t TimeStamp;

  support::ubig32_t SymbolTableOffset; // File offset to symbol table.
  support::big32_t NumberOfSymTableEntries;
  support::ubig16_t AuxHeaderSize;
  support::ubig16_t Flags;
};

struct XCOFFFileHeader64 {
  support::ubig16_t Magic;
  support::ubig16_t NumberOfSections;

  // Unix time value, value of 0 indicates no timestamp.
  // Negative values are reserved.
  support::big32_t TimeStamp;

  support::ubig64_t SymbolTableOffset; // File offset to symbol table.
  support::ubig16_t AuxHeaderSize;
  support::ubig16_t Flags;
  support::ubig32_t NumberOfSymTableEntries;
};

template <typename T> struct XCOFFAuxiliaryHeader {
  static constexpr uint8_t AuxiHeaderFlagMask = 0xF0;
  static constexpr uint8_t AuxiHeaderTDataAlignmentMask = 0x0F;

public:
  uint8_t getFlag() const {
    return static_cast<const T *>(this)->FlagAndTDataAlignment &
           AuxiHeaderFlagMask;
  }

  uint8_t getTDataAlignment() const {
    return static_cast<const T *>(this)->FlagAndTDataAlignment &
           AuxiHeaderTDataAlignmentMask;
  }

  uint16_t getVersion() const { return static_cast<const T *>(this)->Version; }
};

struct XCOFFAuxiliaryHeader32 : XCOFFAuxiliaryHeader<XCOFFAuxiliaryHeader32> {
  support::ubig16_t
      AuxMagic; ///< If the value of the o_vstamp field is greater than 1, the
                ///< o_mflags field is reserved for future use and it should
                ///< contain 0. Otherwise, this field is not used.
  support::ubig16_t
      Version; ///< The valid values are 1 and 2. When the o_vstamp field is 2
               ///< in an XCOFF32 file, the new interpretation of the n_type
               ///< field in the symbol table entry is used.
  support::ubig32_t TextSize;
  support::ubig32_t InitDataSize;
  support::ubig32_t BssDataSize;
  support::ubig32_t EntryPointAddr;
  support::ubig32_t TextStartAddr;
  support::ubig32_t DataStartAddr;
  support::ubig32_t TOCAnchorAddr;
  support::ubig16_t SecNumOfEntryPoint;
  support::ubig16_t SecNumOfText;
  support::ubig16_t SecNumOfData;
  support::ubig16_t SecNumOfTOC;
  support::ubig16_t SecNumOfLoader;
  support::ubig16_t SecNumOfBSS;
  support::ubig16_t MaxAlignOfText;
  support::ubig16_t MaxAlignOfData;
  support::ubig16_t ModuleType;
  uint8_t CpuFlag;
  uint8_t CpuType;
  support::ubig32_t MaxStackSize; ///< If the value is 0, the system default
                                  ///< maximum stack size is used.
  support::ubig32_t MaxDataSize;  ///< If the value is 0, the system default
                                  ///< maximum data size is used.
  support::ubig32_t
      ReservedForDebugger; ///< This field should contain 0. When a loaded
                           ///< program is being debugged, the memory image of
                           ///< this field may be modified by a debugger to
                           ///< insert a trap instruction.
  uint8_t TextPageSize;  ///< Specifies the size of pages for the exec text. The
                         ///< default value is 0 (system-selected page size).
  uint8_t DataPageSize;  ///< Specifies the size of pages for the exec data. The
                         ///< default value is 0 (system-selected page size).
  uint8_t StackPageSize; ///< Specifies the size of pages for the stack. The
                         ///< default value is 0 (system-selected page size).
  uint8_t FlagAndTDataAlignment;
  support::ubig16_t SecNumOfTData;
  support::ubig16_t SecNumOfTBSS;
};

struct XCOFFAuxiliaryHeader64 : XCOFFAuxiliaryHeader<XCOFFAuxiliaryHeader64> {
  support::ubig16_t AuxMagic;
  support::ubig16_t Version;
  support::ubig32_t ReservedForDebugger;
  support::ubig64_t TextStartAddr;
  support::ubig64_t DataStartAddr;
  support::ubig64_t TOCAnchorAddr;
  support::ubig16_t SecNumOfEntryPoint;
  support::ubig16_t SecNumOfText;
  support::ubig16_t SecNumOfData;
  support::ubig16_t SecNumOfTOC;
  support::ubig16_t SecNumOfLoader;
  support::ubig16_t SecNumOfBSS;
  support::ubig16_t MaxAlignOfText;
  support::ubig16_t MaxAlignOfData;
  support::ubig16_t ModuleType;
  uint8_t CpuFlag;
  uint8_t CpuType;
  uint8_t TextPageSize;
  uint8_t DataPageSize;
  uint8_t StackPageSize;
  uint8_t FlagAndTDataAlignment;
  support::ubig64_t TextSize;
  support::ubig64_t InitDataSize;
  support::ubig64_t BssDataSize;
  support::ubig64_t EntryPointAddr;
  support::ubig64_t MaxStackSize;
  support::ubig64_t MaxDataSize;
  support::ubig16_t SecNumOfTData;
  support::ubig16_t SecNumOfTBSS;
  support::ubig16_t XCOFF64Flag;
};

template <typename T> struct XCOFFSectionHeader {
  // Least significant 3 bits are reserved.
  static constexpr unsigned SectionFlagsReservedMask = 0x7;

  // The low order 16 bits of section flags denotes the section type.
  static constexpr unsigned SectionFlagsTypeMask = 0xffffu;

public:
  StringRef getName() const;
  uint16_t getSectionType() const;
  bool isReservedSectionType() const;
};

// Explicit extern template declarations.
struct XCOFFSectionHeader32;
struct XCOFFSectionHeader64;
extern template struct XCOFFSectionHeader<XCOFFSectionHeader32>;
extern template struct XCOFFSectionHeader<XCOFFSectionHeader64>;

struct XCOFFSectionHeader32 : XCOFFSectionHeader<XCOFFSectionHeader32> {
  char Name[XCOFF::NameSize];
  support::ubig32_t PhysicalAddress;
  support::ubig32_t VirtualAddress;
  support::ubig32_t SectionSize;
  support::ubig32_t FileOffsetToRawData;
  support::ubig32_t FileOffsetToRelocationInfo;
  support::ubig32_t FileOffsetToLineNumberInfo;
  support::ubig16_t NumberOfRelocations;
  support::ubig16_t NumberOfLineNumbers;
  support::big32_t Flags;
};

struct XCOFFSectionHeader64 : XCOFFSectionHeader<XCOFFSectionHeader64> {
  char Name[XCOFF::NameSize];
  support::ubig64_t PhysicalAddress;
  support::ubig64_t VirtualAddress;
  support::ubig64_t SectionSize;
  support::big64_t FileOffsetToRawData;
  support::big64_t FileOffsetToRelocationInfo;
  support::big64_t FileOffsetToLineNumberInfo;
  support::ubig32_t NumberOfRelocations;
  support::ubig32_t NumberOfLineNumbers;
  support::big32_t Flags;
  char Padding[4];
};

struct LoaderSectionHeader32;
struct LoaderSectionHeader64;
struct LoaderSectionSymbolEntry32 {
  struct NameOffsetInStrTbl {
    support::big32_t IsNameInStrTbl; // Zero indicates name in string table.
    support::ubig32_t Offset;
  };

  char SymbolName[XCOFF::NameSize];
  support::ubig32_t Value; // The virtual address of the symbol.
  support::big16_t SectionNumber;
  uint8_t SymbolType;
  XCOFF::StorageClass StorageClass;
  support::ubig32_t ImportFileID;
  support::ubig32_t ParameterTypeCheck;

  Expected<StringRef>
  getSymbolName(const LoaderSectionHeader32 *LoaderSecHeader) const;
};

struct LoaderSectionSymbolEntry64 {
  support::ubig64_t Value; // The virtual address of the symbol.
  support::ubig32_t Offset;
  support::big16_t SectionNumber;
  uint8_t SymbolType;
  XCOFF::StorageClass StorageClass;
  support::ubig32_t ImportFileID;
  support::ubig32_t ParameterTypeCheck;

  Expected<StringRef>
  getSymbolName(const LoaderSectionHeader64 *LoaderSecHeader) const;
};

struct LoaderSectionRelocationEntry32 {
  support::ubig32_t VirtualAddr;
  support::big32_t SymbolIndex;
  support::ubig16_t Type;
  support::big16_t SectionNum;
};

struct LoaderSectionRelocationEntry64 {
  support::ubig64_t VirtualAddr;
  support::ubig16_t Type;
  support::big16_t SectionNum;
  support::big32_t SymbolIndex;
};

struct LoaderSectionHeader32 {
  support::ubig32_t Version;
  support::ubig32_t NumberOfSymTabEnt;
  support::ubig32_t NumberOfRelTabEnt;
  support::ubig32_t LengthOfImpidStrTbl;
  support::ubig32_t NumberOfImpid;
  support::big32_t OffsetToImpid;
  support::ubig32_t LengthOfStrTbl;
  support::big32_t OffsetToStrTbl;

  uint64_t getOffsetToSymTbl() const {
    return NumberOfSymTabEnt == 0 ? 0 : sizeof(LoaderSectionHeader32);
  }

  uint64_t getOffsetToRelEnt() const {
    // Relocation table is after Symbol table.
    return NumberOfRelTabEnt == 0
               ? 0
               : sizeof(LoaderSectionHeader32) +
                     sizeof(LoaderSectionSymbolEntry32) * NumberOfSymTabEnt;
  }
};

struct LoaderSectionHeader64 {
  support::ubig32_t Version;
  support::ubig32_t NumberOfSymTabEnt;
  support::ubig32_t NumberOfRelTabEnt;
  support::ubig32_t LengthOfImpidStrTbl;
  support::ubig32_t NumberOfImpid;
  support::ubig32_t LengthOfStrTbl;
  support::big64_t OffsetToImpid;
  support::big64_t OffsetToStrTbl;
  support::big64_t OffsetToSymTbl;
  support::big64_t OffsetToRelEnt;

  uint64_t getOffsetToSymTbl() const { return OffsetToSymTbl; }
  uint64_t getOffsetToRelEnt() const { return OffsetToRelEnt; }
};

template <typename AddressType> struct ExceptionSectionEntry {
  union {
    support::ubig32_t SymbolIdx;
    AddressType TrapInstAddr;
  };
  uint8_t LangId;
  uint8_t Reason;

  uint32_t getSymbolIndex() const {
    assert(Reason == 0 && "Get symbol table index of the function only when "
                          "the e_reason field is 0.");
    return SymbolIdx;
  }

  uint64_t getTrapInstAddr() const {
    assert(Reason != 0 && "Zero is not a valid trap exception reason code.");
    return TrapInstAddr;
  }
  uint8_t getLangID() const { return LangId; }
  uint8_t getReason() const { return Reason; }
};

typedef ExceptionSectionEntry<support::ubig32_t> ExceptionSectionEntry32;
typedef ExceptionSectionEntry<support::ubig64_t> ExceptionSectionEntry64;

// Explicit extern template declarations.
extern template struct ExceptionSectionEntry<support::ubig32_t>;
extern template struct ExceptionSectionEntry<support::ubig64_t>;

struct XCOFFStringTable {
  uint32_t Size;
  const char *Data;
};

struct XCOFFCsectAuxEnt32 {
  support::ubig32_t SectionOrLength;
  support::ubig32_t ParameterHashIndex;
  support::ubig16_t TypeChkSectNum;
  uint8_t SymbolAlignmentAndType;
  XCOFF::StorageMappingClass StorageMappingClass;
  support::ubig32_t StabInfoIndex;
  support::ubig16_t StabSectNum;
};

struct XCOFFCsectAuxEnt64 {
  support::ubig32_t SectionOrLengthLowByte;
  support::ubig32_t ParameterHashIndex;
  support::ubig16_t TypeChkSectNum;
  uint8_t SymbolAlignmentAndType;
  XCOFF::StorageMappingClass StorageMappingClass;
  support::ubig32_t SectionOrLengthHighByte;
  uint8_t Pad;
  XCOFF::SymbolAuxType AuxType;
};

class XCOFFCsectAuxRef {
public:
  static constexpr uint8_t SymbolTypeMask = 0x07;
  static constexpr uint8_t SymbolAlignmentMask = 0xF8;
  static constexpr size_t SymbolAlignmentBitOffset = 3;

  XCOFFCsectAuxRef(const XCOFFCsectAuxEnt32 *Entry32) : Entry32(Entry32) {}
  XCOFFCsectAuxRef(const XCOFFCsectAuxEnt64 *Entry64) : Entry64(Entry64) {}

  // For getSectionOrLength(),
  // If the symbol type is XTY_SD or XTY_CM, the csect length.
  // If the symbol type is XTY_LD, the symbol table
  // index of the containing csect.
  // If the symbol type is XTY_ER, 0.
  uint64_t getSectionOrLength() const {
    return Entry32 ? getSectionOrLength32() : getSectionOrLength64();
  }

  uint32_t getSectionOrLength32() const {
    assert(Entry32 && "32-bit interface called on 64-bit object file.");
    return Entry32->SectionOrLength;
  }

  uint64_t getSectionOrLength64() const {
    assert(Entry64 && "64-bit interface called on 32-bit object file.");
    return (static_cast<uint64_t>(Entry64->SectionOrLengthHighByte) << 32) |
           Entry64->SectionOrLengthLowByte;
  }

#define GETVALUE(X) Entry32 ? Entry32->X : Entry64->X

  uint32_t getParameterHashIndex() const {
    return GETVALUE(ParameterHashIndex);
  }

  uint16_t getTypeChkSectNum() const { return GETVALUE(TypeChkSectNum); }

  XCOFF::StorageMappingClass getStorageMappingClass() const {
    return GETVALUE(StorageMappingClass);
  }

  uintptr_t getEntryAddress() const {
    return Entry32 ? reinterpret_cast<uintptr_t>(Entry32)
                   : reinterpret_cast<uintptr_t>(Entry64);
  }

  uint16_t getAlignmentLog2() const {
    return (getSymbolAlignmentAndType() & SymbolAlignmentMask) >>
           SymbolAlignmentBitOffset;
  }

  uint8_t getSymbolType() const {
    return getSymbolAlignmentAndType() & SymbolTypeMask;
  }

  bool isLabel() const { return getSymbolType() == XCOFF::XTY_LD; }

  uint32_t getStabInfoIndex32() const {
    assert(Entry32 && "32-bit interface called on 64-bit object file.");
    return Entry32->StabInfoIndex;
  }

  uint16_t getStabSectNum32() const {
    assert(Entry32 && "32-bit interface called on 64-bit object file.");
    return Entry32->StabSectNum;
  }

  XCOFF::SymbolAuxType getAuxType64() const {
    assert(Entry64 && "64-bit interface called on 32-bit object file.");
    return Entry64->AuxType;
  }

private:
  uint8_t getSymbolAlignmentAndType() const {
    return GETVALUE(SymbolAlignmentAndType);
  }

#undef GETVALUE

  const XCOFFCsectAuxEnt32 *Entry32 = nullptr;
  const XCOFFCsectAuxEnt64 *Entry64 = nullptr;
};

struct XCOFFFileAuxEnt {
  typedef struct {
    support::big32_t Magic; // Zero indicates name in string table.
    support::ubig32_t Offset;
    char NamePad[XCOFF::FileNamePadSize];
  } NameInStrTblType;
  union {
    char Name[XCOFF::NameSize + XCOFF::FileNamePadSize];
    NameInStrTblType NameInStrTbl;
  };
  XCOFF::CFileStringType Type;
  uint8_t ReservedZeros[2];
  XCOFF::SymbolAuxType AuxType; // 64-bit XCOFF file only.
};

struct XCOFFSectAuxEntForStat {
  support::ubig32_t SectionLength;
  support::ubig16_t NumberOfRelocEnt;
  support::ubig16_t NumberOfLineNum;
  uint8_t Pad[10];
}; // 32-bit XCOFF file only.

struct XCOFFFunctionAuxEnt32 {
  support::ubig32_t OffsetToExceptionTbl;
  support::ubig32_t SizeOfFunction;
  support::ubig32_t PtrToLineNum;
  support::big32_t SymIdxOfNextBeyond;
  uint8_t Pad[2];
};

struct XCOFFFunctionAuxEnt64 {
  support::ubig64_t PtrToLineNum;
  support::ubig32_t SizeOfFunction;
  support::big32_t SymIdxOfNextBeyond;
  uint8_t Pad;
  XCOFF::SymbolAuxType AuxType; // Contains _AUX_FCN; Type of auxiliary entry
};

struct XCOFFExceptionAuxEnt {
  support::ubig64_t OffsetToExceptionTbl;
  support::ubig32_t SizeOfFunction;
  support::big32_t SymIdxOfNextBeyond;
  uint8_t Pad;
  XCOFF::SymbolAuxType AuxType; // Contains _AUX_EXCEPT; Type of auxiliary entry
};

struct XCOFFBlockAuxEnt32 {
  uint8_t ReservedZeros1[2];
  support::ubig16_t LineNumHi;
  support::ubig16_t LineNumLo;
  uint8_t ReservedZeros2[12];
};

struct XCOFFBlockAuxEnt64 {
  support::ubig32_t LineNum;
  uint8_t Pad[13];
  XCOFF::SymbolAuxType AuxType; // Contains _AUX_SYM; Type of auxiliary entry
};

struct XCOFFSectAuxEntForDWARF32 {
  support::ubig32_t LengthOfSectionPortion;
  uint8_t Pad1[4];
  support::ubig32_t NumberOfRelocEnt;
  uint8_t Pad2[6];
};

struct XCOFFSectAuxEntForDWARF64 {
  support::ubig64_t LengthOfSectionPortion;
  support::ubig64_t NumberOfRelocEnt;
  uint8_t Pad;
  XCOFF::SymbolAuxType AuxType; // Contains _AUX_SECT; Type of Auxillary entry
};

template <typename AddressType> struct XCOFFRelocation {
public:
  AddressType VirtualAddress;
  support::ubig32_t SymbolIndex;

  // Packed field, see XR_* masks for details of packing.
  uint8_t Info;

  XCOFF::RelocationType Type;

public:
  bool isRelocationSigned() const;
  bool isFixupIndicated() const;

  // Returns the number of bits being relocated.
  uint8_t getRelocatedLength() const;
};

extern template struct XCOFFRelocation<llvm::support::ubig32_t>;
extern template struct XCOFFRelocation<llvm::support::ubig64_t>;

struct XCOFFRelocation32 : XCOFFRelocation<llvm::support::ubig32_t> {};
struct XCOFFRelocation64 : XCOFFRelocation<llvm::support::ubig64_t> {};

class XCOFFSymbolRef;

class XCOFFObjectFile : public ObjectFile {
private:
  const void *FileHeader = nullptr;
  const void *AuxiliaryHeader = nullptr;
  const void *SectionHeaderTable = nullptr;

  const void *SymbolTblPtr = nullptr;
  XCOFFStringTable StringTable = {0, nullptr};

  const XCOFFSectionHeader32 *sectionHeaderTable32() const;
  const XCOFFSectionHeader64 *sectionHeaderTable64() const;
  template <typename T> const T *sectionHeaderTable() const;

  size_t getFileHeaderSize() const;
  size_t getSectionHeaderSize() const;

  const XCOFFSectionHeader32 *toSection32(DataRefImpl Ref) const;
  const XCOFFSectionHeader64 *toSection64(DataRefImpl Ref) const;
  uintptr_t getSectionHeaderTableAddress() const;
  uintptr_t getEndOfSymbolTableAddress() const;

  DataRefImpl getSectionByType(XCOFF::SectionTypeFlags SectType) const;
  uint64_t getSectionFileOffsetToRawData(DataRefImpl Sec) const;

  // This returns a pointer to the start of the storage for the name field of
  // the 32-bit or 64-bit SectionHeader struct. This string is *not* necessarily
  // null-terminated.
  const char *getSectionNameInternal(DataRefImpl Sec) const;

  static bool isReservedSectionNumber(int16_t SectionNumber);

  // Constructor and "create" factory function. The constructor is only a thin
  // wrapper around the base constructor. The "create" function fills out the
  // XCOFF-specific information and performs the error checking along the way.
  XCOFFObjectFile(unsigned Type, MemoryBufferRef Object);
  static Expected<std::unique_ptr<XCOFFObjectFile>> create(unsigned Type,
                                                           MemoryBufferRef MBR);

  // Helper for parsing the StringTable. Returns an 'Error' if parsing failed
  // and an XCOFFStringTable if parsing succeeded.
  static Expected<XCOFFStringTable> parseStringTable(const XCOFFObjectFile *Obj,
                                                     uint64_t Offset);

  // Make a friend so it can call the private 'create' function.
  friend Expected<std::unique_ptr<ObjectFile>>
  ObjectFile::createXCOFFObjectFile(MemoryBufferRef Object, unsigned FileType);

  void checkSectionAddress(uintptr_t Addr, uintptr_t TableAddr) const;

public:
  static constexpr uint64_t InvalidRelocOffset =
      std::numeric_limits<uint64_t>::max();

  // Interface inherited from base classes.
  void moveSymbolNext(DataRefImpl &Symb) const override;
  Expected<uint32_t> getSymbolFlags(DataRefImpl Symb) const override;
  basic_symbol_iterator symbol_begin() const override;
  basic_symbol_iterator symbol_end() const override;
  bool is64Bit() const override;
  Expected<StringRef> getSymbolName(DataRefImpl Symb) const override;
  Expected<uint64_t> getSymbolAddress(DataRefImpl Symb) const override;
  uint64_t getSymbolValueImpl(DataRefImpl Symb) const override;
  uint32_t getSymbolAlignment(DataRefImpl Symb) const override;
  uint64_t getCommonSymbolSizeImpl(DataRefImpl Symb) const override;
  Expected<SymbolRef::Type> getSymbolType(DataRefImpl Symb) const override;
  Expected<section_iterator> getSymbolSection(DataRefImpl Symb) const override;

  void moveSectionNext(DataRefImpl &Sec) const override;
  Expected<StringRef> getSectionName(DataRefImpl Sec) const override;
  uint64_t getSectionAddress(DataRefImpl Sec) const override;
  uint64_t getSectionIndex(DataRefImpl Sec) const override;
  uint64_t getSectionSize(DataRefImpl Sec) const override;
  Expected<ArrayRef<uint8_t>>
  getSectionContents(DataRefImpl Sec) const override;
  uint64_t getSectionAlignment(DataRefImpl Sec) const override;
  bool isSectionCompressed(DataRefImpl Sec) const override;
  bool isSectionText(DataRefImpl Sec) const override;
  bool isSectionData(DataRefImpl Sec) const override;
  bool isSectionBSS(DataRefImpl Sec) const override;
  bool isDebugSection(DataRefImpl Sec) const override;

  bool isSectionVirtual(DataRefImpl Sec) const override;
  relocation_iterator section_rel_begin(DataRefImpl Sec) const override;
  relocation_iterator section_rel_end(DataRefImpl Sec) const override;

  void moveRelocationNext(DataRefImpl &Rel) const override;

  /// \returns the relocation offset with the base address of the containing
  /// section as zero, or InvalidRelocOffset on errors (such as a relocation
  /// that does not refer to an address in any section).
  uint64_t getRelocationOffset(DataRefImpl Rel) const override;
  symbol_iterator getRelocationSymbol(DataRefImpl Rel) const override;
  uint64_t getRelocationType(DataRefImpl Rel) const override;
  void getRelocationTypeName(DataRefImpl Rel,
                             SmallVectorImpl<char> &Result) const override;

  section_iterator section_begin() const override;
  section_iterator section_end() const override;
  uint8_t getBytesInAddress() const override;
  StringRef getFileFormatName() const override;
  Triple::ArchType getArch() const override;
  Expected<SubtargetFeatures> getFeatures() const override;
  Expected<uint64_t> getStartAddress() const override;
  StringRef mapDebugSectionName(StringRef Name) const override;
  bool isRelocatableObject() const override;

  // Below here is the non-inherited interface.

  Expected<StringRef> getRawData(const char *Start, uint64_t Size,
                                 StringRef Name) const;

  const XCOFFAuxiliaryHeader32 *auxiliaryHeader32() const;
  const XCOFFAuxiliaryHeader64 *auxiliaryHeader64() const;

  const void *getPointerToSymbolTable() const { return SymbolTblPtr; }

  Expected<StringRef> getSymbolSectionName(XCOFFSymbolRef Ref) const;
  unsigned getSymbolSectionID(SymbolRef Sym) const;
  XCOFFSymbolRef toSymbolRef(DataRefImpl Ref) const;

  // File header related interfaces.
  const XCOFFFileHeader32 *fileHeader32() const;
  const XCOFFFileHeader64 *fileHeader64() const;
  uint16_t getMagic() const;
  uint16_t getNumberOfSections() const;
  int32_t getTimeStamp() const;

  // Symbol table offset and entry count are handled differently between
  // XCOFF32 and XCOFF64.
  uint32_t getSymbolTableOffset32() const;
  uint64_t getSymbolTableOffset64() const;

  // Note that this value is signed and might return a negative value. Negative
  // values are reserved for future use.
  int32_t getRawNumberOfSymbolTableEntries32() const;

  // The sanitized value appropriate to use as an index into the symbol table.
  uint32_t getLogicalNumberOfSymbolTableEntries32() const;

  uint32_t getNumberOfSymbolTableEntries64() const;

  // Return getLogicalNumberOfSymbolTableEntries32 or
  // getNumberOfSymbolTableEntries64 depending on the object mode.
  uint32_t getNumberOfSymbolTableEntries() const;

  uint32_t getSymbolIndex(uintptr_t SymEntPtr) const;
  uint64_t getSymbolSize(DataRefImpl Symb) const;
  uintptr_t getSymbolByIndex(uint32_t Idx) const {
    return reinterpret_cast<uintptr_t>(SymbolTblPtr) +
           XCOFF::SymbolTableEntrySize * Idx;
  }
  uintptr_t getSymbolEntryAddressByIndex(uint32_t SymbolTableIndex) const;
  Expected<StringRef> getSymbolNameByIndex(uint32_t SymbolTableIndex) const;

  Expected<StringRef> getCFileName(const XCOFFFileAuxEnt *CFileEntPtr) const;
  uint16_t getOptionalHeaderSize() const;
  uint16_t getFlags() const;

  // Section header table related interfaces.
  ArrayRef<XCOFFSectionHeader32> sections32() const;
  ArrayRef<XCOFFSectionHeader64> sections64() const;

  int32_t getSectionFlags(DataRefImpl Sec) const;
  Expected<DataRefImpl> getSectionByNum(int16_t Num) const;

  Expected<uintptr_t>
  getSectionFileOffsetToRawData(XCOFF::SectionTypeFlags SectType) const;

  void checkSymbolEntryPointer(uintptr_t SymbolEntPtr) const;

  // Relocation-related interfaces.
  template <typename T>
  Expected<uint32_t>
  getNumberOfRelocationEntries(const XCOFFSectionHeader<T> &Sec) const;

  template <typename Shdr, typename Reloc>
  Expected<ArrayRef<Reloc>> relocations(const Shdr &Sec) const;

  // Loader section related interfaces.
  Expected<StringRef> getImportFileTable() const;

  // Exception-related interface.
  template <typename ExceptEnt>
  Expected<ArrayRef<ExceptEnt>> getExceptionEntries() const;

  // This function returns string table entry.
  Expected<StringRef> getStringTableEntry(uint32_t Offset) const;

  // This function returns the string table.
  StringRef getStringTable() const;

  const XCOFF::SymbolAuxType *getSymbolAuxType(uintptr_t AuxEntryAddress) const;

  static uintptr_t getAdvancedSymbolEntryAddress(uintptr_t CurrentAddress,
                                                 uint32_t Distance);

  static bool classof(const Binary *B) { return B->isXCOFF(); }

  std::optional<StringRef> tryGetCPUName() const override;
}; // XCOFFObjectFile

typedef struct {
  uint8_t LanguageId;
  uint8_t CpuTypeId;
} CFileLanguageIdAndTypeIdType;

struct XCOFFSymbolEntry32 {
  typedef struct {
    support::big32_t Magic; // Zero indicates name in string table.
    support::ubig32_t Offset;
  } NameInStrTblType;

  union {
    char SymbolName[XCOFF::NameSize];
    NameInStrTblType NameInStrTbl;
  };

  support::ubig32_t Value; // Symbol value; storage class-dependent.
  support::big16_t SectionNumber;

  union {
    support::ubig16_t SymbolType;
    CFileLanguageIdAndTypeIdType CFileLanguageIdAndTypeId;
  };

  XCOFF::StorageClass StorageClass;
  uint8_t NumberOfAuxEntries;
};

struct XCOFFSymbolEntry64 {
  support::ubig64_t Value; // Symbol value; storage class-dependent.
  support::ubig32_t Offset;
  support::big16_t SectionNumber;

  union {
    support::ubig16_t SymbolType;
    CFileLanguageIdAndTypeIdType CFileLanguageIdAndTypeId;
  };

  XCOFF::StorageClass StorageClass;
  uint8_t NumberOfAuxEntries;
};

class XCOFFSymbolRef {
public:
  enum { NAME_IN_STR_TBL_MAGIC = 0x0 };

  XCOFFSymbolRef(DataRefImpl SymEntDataRef,
                 const XCOFFObjectFile *OwningObjectPtr)
      : OwningObjectPtr(OwningObjectPtr) {
    assert(OwningObjectPtr && "OwningObjectPtr cannot be nullptr!");
    assert(SymEntDataRef.p != 0 &&
           "Symbol table entry pointer cannot be nullptr!");

    if (OwningObjectPtr->is64Bit())
      Entry64 = reinterpret_cast<const XCOFFSymbolEntry64 *>(SymEntDataRef.p);
    else
      Entry32 = reinterpret_cast<const XCOFFSymbolEntry32 *>(SymEntDataRef.p);
  }

  const XCOFFSymbolEntry32 *getSymbol32() { return Entry32; }
  const XCOFFSymbolEntry64 *getSymbol64() { return Entry64; }

  uint64_t getValue() const { return Entry32 ? getValue32() : getValue64(); }

  uint32_t getValue32() const { return Entry32->Value; }

  uint64_t getValue64() const { return Entry64->Value; }

#define GETVALUE(X) Entry32 ? Entry32->X : Entry64->X

  int16_t getSectionNumber() const { return GETVALUE(SectionNumber); }

  uint16_t getSymbolType() const { return GETVALUE(SymbolType); }

  uint8_t getLanguageIdForCFile() const {
    assert(getStorageClass() == XCOFF::C_FILE &&
           "This interface is for C_FILE only.");
    return GETVALUE(CFileLanguageIdAndTypeId.LanguageId);
  }

  uint8_t getCPUTypeIddForCFile() const {
    assert(getStorageClass() == XCOFF::C_FILE &&
           "This interface is for C_FILE only.");
    return GETVALUE(CFileLanguageIdAndTypeId.CpuTypeId);
  }

  XCOFF::StorageClass getStorageClass() const { return GETVALUE(StorageClass); }

  uint8_t getNumberOfAuxEntries() const { return GETVALUE(NumberOfAuxEntries); }

#undef GETVALUE

  uintptr_t getEntryAddress() const {
    return Entry32 ? reinterpret_cast<uintptr_t>(Entry32)
                   : reinterpret_cast<uintptr_t>(Entry64);
  }

  Expected<StringRef> getName() const;
  bool isFunction() const;
  bool isCsectSymbol() const;
  Expected<XCOFFCsectAuxRef> getXCOFFCsectAuxRef() const;

private:
  const XCOFFObjectFile *OwningObjectPtr;
  const XCOFFSymbolEntry32 *Entry32 = nullptr;
  const XCOFFSymbolEntry64 *Entry64 = nullptr;
};

class TBVectorExt {
  uint16_t Data;
  SmallString<32> VecParmsInfo;

  TBVectorExt(StringRef TBvectorStrRef, Error &Err);

public:
  static Expected<TBVectorExt> create(StringRef TBvectorStrRef);
  uint8_t getNumberOfVRSaved() const;
  bool isVRSavedOnStack() const;
  bool hasVarArgs() const;
  uint8_t getNumberOfVectorParms() const;
  bool hasVMXInstruction() const;
  SmallString<32> getVectorParmsInfo() const { return VecParmsInfo; };
};

/// This class provides methods to extract traceback table data from a buffer.
/// The various accessors may reference the buffer provided via the constructor.

class XCOFFTracebackTable {
  const uint8_t *const TBPtr;
  bool Is64BitObj;
  std::optional<SmallString<32>> ParmsType;
  std::optional<uint32_t> TraceBackTableOffset;
  std::optional<uint32_t> HandlerMask;
  std::optional<uint32_t> NumOfCtlAnchors;
  std::optional<SmallVector<uint32_t, 8>> ControlledStorageInfoDisp;
  std::optional<StringRef> FunctionName;
  std::optional<uint8_t> AllocaRegister;
  std::optional<TBVectorExt> VecExt;
  std::optional<uint8_t> ExtensionTable;
  std::optional<uint64_t> EhInfoDisp;

  XCOFFTracebackTable(const uint8_t *Ptr, uint64_t &Size, Error &Err,
                      bool Is64Bit = false);

public:
  /// Parse an XCOFF Traceback Table from \a Ptr with \a Size bytes.
  /// Returns an XCOFFTracebackTable upon successful parsing, otherwise an
  /// Error is returned.
  ///
  /// \param[in] Ptr
  ///   A pointer that points just past the initial 4 bytes of zeros at the
  ///   beginning of an XCOFF Traceback Table.
  ///
  /// \param[in, out] Size
  ///    A pointer that points to the length of the XCOFF Traceback Table.
  ///    If the XCOFF Traceback Table is not parsed successfully or there are
  ///    extra bytes that are not recognized, \a Size will be updated to be the
  ///    size up to the end of the last successfully parsed field of the table.
  static Expected<XCOFFTracebackTable>
  create(const uint8_t *Ptr, uint64_t &Size, bool Is64Bits = false);
  uint8_t getVersion() const;
  uint8_t getLanguageID() const;

  bool isGlobalLinkage() const;
  bool isOutOfLineEpilogOrPrologue() const;
  bool hasTraceBackTableOffset() const;
  bool isInternalProcedure() const;
  bool hasControlledStorage() const;
  bool isTOCless() const;
  bool isFloatingPointPresent() const;
  bool isFloatingPointOperationLogOrAbortEnabled() const;

  bool isInterruptHandler() const;
  bool isFuncNamePresent() const;
  bool isAllocaUsed() const;
  uint8_t getOnConditionDirective() const;
  bool isCRSaved() const;
  bool isLRSaved() const;

  bool isBackChainStored() const;
  bool isFixup() const;
  uint8_t getNumOfFPRsSaved() const;

  bool hasVectorInfo() const;
  bool hasExtensionTable() const;
  uint8_t getNumOfGPRsSaved() const;

  uint8_t getNumberOfFixedParms() const;

  uint8_t getNumberOfFPParms() const;
  bool hasParmsOnStack() const;

  const std::optional<SmallString<32>> &getParmsType() const {
    return ParmsType;
  }
  const std::optional<uint32_t> &getTraceBackTableOffset() const {
    return TraceBackTableOffset;
  }
  const std::optional<uint32_t> &getHandlerMask() const { return HandlerMask; }
  const std::optional<uint32_t> &getNumOfCtlAnchors() {
    return NumOfCtlAnchors;
  }
  const std::optional<SmallVector<uint32_t, 8>> &
  getControlledStorageInfoDisp() {
    return ControlledStorageInfoDisp;
  }
  const std::optional<StringRef> &getFunctionName() const {
    return FunctionName;
  }
  const std::optional<uint8_t> &getAllocaRegister() const {
    return AllocaRegister;
  }
  const std::optional<TBVectorExt> &getVectorExt() const { return VecExt; }
  const std::optional<uint8_t> &getExtensionTable() const {
    return ExtensionTable;
  }
  const std::optional<uint64_t> &getEhInfoDisp() const { return EhInfoDisp; }
};

bool doesXCOFFTracebackTableBegin(ArrayRef<uint8_t> Bytes);
} // namespace object
} // namespace llvm

#endif // LLVM_OBJECT_XCOFFOBJECTFILE_H
PKhwFZ;R��S�S�Object/ELF.hnu�[���//===- ELF.h - ELF object file implementation -------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the ELFFile template class.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJECT_ELF_H
#define LLVM_OBJECT_ELF_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/BinaryFormat/ELF.h"
#include "llvm/Object/ELFTypes.h"
#include "llvm/Object/Error.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Error.h"
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <limits>
#include <utility>

namespace llvm {
namespace object {

struct VerdAux {
  unsigned Offset;
  std::string Name;
};

struct VerDef {
  unsigned Offset;
  unsigned Version;
  unsigned Flags;
  unsigned Ndx;
  unsigned Cnt;
  unsigned Hash;
  std::string Name;
  std::vector<VerdAux> AuxV;
};

struct VernAux {
  unsigned Hash;
  unsigned Flags;
  unsigned Other;
  unsigned Offset;
  std::string Name;
};

struct VerNeed {
  unsigned Version;
  unsigned Cnt;
  unsigned Offset;
  std::string File;
  std::vector<VernAux> AuxV;
};

struct VersionEntry {
  std::string Name;
  bool IsVerDef;
};

StringRef getELFRelocationTypeName(uint32_t Machine, uint32_t Type);
uint32_t getELFRelativeRelocationType(uint32_t Machine);
StringRef getELFSectionTypeName(uint32_t Machine, uint32_t Type);

// Subclasses of ELFFile may need this for template instantiation
inline std::pair<unsigned char, unsigned char>
getElfArchType(StringRef Object) {
  if (Object.size() < ELF::EI_NIDENT)
    return std::make_pair((uint8_t)ELF::ELFCLASSNONE,
                          (uint8_t)ELF::ELFDATANONE);
  return std::make_pair((uint8_t)Object[ELF::EI_CLASS],
                        (uint8_t)Object[ELF::EI_DATA]);
}

enum PPCInstrMasks : uint64_t {
  PADDI_R12_NO_DISP = 0x0610000039800000,
  ADDIS_R12_TO_R2_NO_DISP = 0x3D820000,
  ADDI_R12_TO_R2_NO_DISP = 0x39820000,
  ADDI_R12_TO_R12_NO_DISP = 0x398C0000,
  PLD_R12_NO_DISP = 0x04100000E5800000,
  MTCTR_R12 = 0x7D8903A6,
  BCTR = 0x4E800420,
};

template <class ELFT> class ELFFile;

template <class T> struct DataRegion {
  // This constructor is used when we know the start and the size of a data
  // region. We assume that Arr does not go past the end of the file.
  DataRegion(ArrayRef<T> Arr) : First(Arr.data()), Size(Arr.size()) {}

  // Sometimes we only know the start of a data region. We still don't want to
  // read past the end of the file, so we provide the end of a buffer.
  DataRegion(const T *Data, const uint8_t *BufferEnd)
      : First(Data), BufEnd(BufferEnd) {}

  Expected<T> operator[](uint64_t N) {
    assert(Size || BufEnd);
    if (Size) {
      if (N >= *Size)
        return createError(
            "the index is greater than or equal to the number of entries (" +
            Twine(*Size) + ")");
    } else {
      const uint8_t *EntryStart = (const uint8_t *)First + N * sizeof(T);
      if (EntryStart + sizeof(T) > BufEnd)
        return createError("can't read past the end of the file");
    }
    return *(First + N);
  }

  const T *First;
  std::optional<uint64_t> Size;
  const uint8_t *BufEnd = nullptr;
};

template <class ELFT>
std::string getSecIndexForError(const ELFFile<ELFT> &Obj,
                                const typename ELFT::Shdr &Sec) {
  auto TableOrErr = Obj.sections();
  if (TableOrErr)
    return "[index " + std::to_string(&Sec - &TableOrErr->front()) + "]";
  // To make this helper be more convenient for error reporting purposes we
  // drop the error. But really it should never be triggered. Before this point,
  // our code should have called 'sections()' and reported a proper error on
  // failure.
  llvm::consumeError(TableOrErr.takeError());
  return "[unknown index]";
}

template <class ELFT>
static std::string describe(const ELFFile<ELFT> &Obj,
                            const typename ELFT::Shdr &Sec) {
  unsigned SecNdx = &Sec - &cantFail(Obj.sections()).front();
  return (object::getELFSectionTypeName(Obj.getHeader().e_machine,
                                        Sec.sh_type) +
          " section with index " + Twine(SecNdx))
      .str();
}

template <class ELFT>
std::string getPhdrIndexForError(const ELFFile<ELFT> &Obj,
                                 const typename ELFT::Phdr &Phdr) {
  auto Headers = Obj.program_headers();
  if (Headers)
    return ("[index " + Twine(&Phdr - &Headers->front()) + "]").str();
  // See comment in the getSecIndexForError() above.
  llvm::consumeError(Headers.takeError());
  return "[unknown index]";
}

static inline Error defaultWarningHandler(const Twine &Msg) {
  return createError(Msg);
}

template <class ELFT>
class ELFFile {
public:
  LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)

  // This is a callback that can be passed to a number of functions.
  // It can be used to ignore non-critical errors (warnings), which is
  // useful for dumpers, like llvm-readobj.
  // It accepts a warning message string and returns a success
  // when the warning should be ignored or an error otherwise.
  using WarningHandler = llvm::function_ref<Error(const Twine &Msg)>;

  const uint8_t *base() const { return Buf.bytes_begin(); }
  const uint8_t *end() const { return base() + getBufSize(); }

  size_t getBufSize() const { return Buf.size(); }

private:
  StringRef Buf;
  std::vector<Elf_Shdr> FakeSections;
  SmallString<0> FakeSectionStrings;

  ELFFile(StringRef Object);

public:
  const Elf_Ehdr &getHeader() const {
    return *reinterpret_cast<const Elf_Ehdr *>(base());
  }

  template <typename T>
  Expected<const T *> getEntry(uint32_t Section, uint32_t Entry) const;
  template <typename T>
  Expected<const T *> getEntry(const Elf_Shdr &Section, uint32_t Entry) const;

  Expected<std::vector<VerDef>>
  getVersionDefinitions(const Elf_Shdr &Sec) const;
  Expected<std::vector<VerNeed>> getVersionDependencies(
      const Elf_Shdr &Sec,
      WarningHandler WarnHandler = &defaultWarningHandler) const;
  Expected<StringRef> getSymbolVersionByIndex(
      uint32_t SymbolVersionIndex, bool &IsDefault,
      SmallVector<std::optional<VersionEntry>, 0> &VersionMap,
      std::optional<bool> IsSymHidden) const;

  Expected<StringRef>
  getStringTable(const Elf_Shdr &Section,
                 WarningHandler WarnHandler = &defaultWarningHandler) const;
  Expected<StringRef> getStringTableForSymtab(const Elf_Shdr &Section) const;
  Expected<StringRef> getStringTableForSymtab(const Elf_Shdr &Section,
                                              Elf_Shdr_Range Sections) const;
  Expected<StringRef> getLinkAsStrtab(const typename ELFT::Shdr &Sec) const;

  Expected<ArrayRef<Elf_Word>> getSHNDXTable(const Elf_Shdr &Section) const;
  Expected<ArrayRef<Elf_Word>> getSHNDXTable(const Elf_Shdr &Section,
                                             Elf_Shdr_Range Sections) const;

  Expected<uint64_t> getDynSymtabSize() const;

  StringRef getRelocationTypeName(uint32_t Type) const;
  void getRelocationTypeName(uint32_t Type,
                             SmallVectorImpl<char> &Result) const;
  uint32_t getRelativeRelocationType() const;

  std::string getDynamicTagAsString(unsigned Arch, uint64_t Type) const;
  std::string getDynamicTagAsString(uint64_t Type) const;

  /// Get the symbol for a given relocation.
  Expected<const Elf_Sym *> getRelocationSymbol(const Elf_Rel &Rel,
                                                const Elf_Shdr *SymTab) const;

  Expected<SmallVector<std::optional<VersionEntry>, 0>>
  loadVersionMap(const Elf_Shdr *VerNeedSec, const Elf_Shdr *VerDefSec) const;

  static Expected<ELFFile> create(StringRef Object);

  bool isLE() const {
    return getHeader().getDataEncoding() == ELF::ELFDATA2LSB;
  }

  bool isMipsELF64() const {
    return getHeader().e_machine == ELF::EM_MIPS &&
           getHeader().getFileClass() == ELF::ELFCLASS64;
  }

  bool isMips64EL() const { return isMipsELF64() && isLE(); }

  Expected<Elf_Shdr_Range> sections() const;

  Expected<Elf_Dyn_Range> dynamicEntries() const;

  Expected<const uint8_t *>
  toMappedAddr(uint64_t VAddr,
               WarningHandler WarnHandler = &defaultWarningHandler) const;

  Expected<Elf_Sym_Range> symbols(const Elf_Shdr *Sec) const {
    if (!Sec)
      return ArrayRef<Elf_Sym>(nullptr, nullptr);
    return getSectionContentsAsArray<Elf_Sym>(*Sec);
  }

  Expected<Elf_Rela_Range> relas(const Elf_Shdr &Sec) const {
    return getSectionContentsAsArray<Elf_Rela>(Sec);
  }

  Expected<Elf_Rel_Range> rels(const Elf_Shdr &Sec) const {
    return getSectionContentsAsArray<Elf_Rel>(Sec);
  }

  Expected<Elf_Relr_Range> relrs(const Elf_Shdr &Sec) const {
    return getSectionContentsAsArray<Elf_Relr>(Sec);
  }

  std::vector<Elf_Rel> decode_relrs(Elf_Relr_Range relrs) const;

  Expected<std::vector<Elf_Rela>> android_relas(const Elf_Shdr &Sec) const;

  /// Iterate over program header table.
  Expected<Elf_Phdr_Range> program_headers() const {
    if (getHeader().e_phnum && getHeader().e_phentsize != sizeof(Elf_Phdr))
      return createError("invalid e_phentsize: " +
                         Twine(getHeader().e_phentsize));

    uint64_t HeadersSize =
        (uint64_t)getHeader().e_phnum * getHeader().e_phentsize;
    uint64_t PhOff = getHeader().e_phoff;
    if (PhOff + HeadersSize < PhOff || PhOff + HeadersSize > getBufSize())
      return createError("program headers are longer than binary of size " +
                         Twine(getBufSize()) + ": e_phoff = 0x" +
                         Twine::utohexstr(getHeader().e_phoff) +
                         ", e_phnum = " + Twine(getHeader().e_phnum) +
                         ", e_phentsize = " + Twine(getHeader().e_phentsize));

    auto *Begin = reinterpret_cast<const Elf_Phdr *>(base() + PhOff);
    return ArrayRef(Begin, Begin + getHeader().e_phnum);
  }

  /// Get an iterator over notes in a program header.
  ///
  /// The program header must be of type \c PT_NOTE.
  ///
  /// \param Phdr the program header to iterate over.
  /// \param Err [out] an error to support fallible iteration, which should
  ///  be checked after iteration ends.
  Elf_Note_Iterator notes_begin(const Elf_Phdr &Phdr, Error &Err) const {
    assert(Phdr.p_type == ELF::PT_NOTE && "Phdr is not of type PT_NOTE");
    ErrorAsOutParameter ErrAsOutParam(&Err);
    if (Phdr.p_offset + Phdr.p_filesz > getBufSize()) {
      Err =
          createError("invalid offset (0x" + Twine::utohexstr(Phdr.p_offset) +
                      ") or size (0x" + Twine::utohexstr(Phdr.p_filesz) + ")");
      return Elf_Note_Iterator(Err);
    }
    // Allow 4, 8, and (for Linux core dumps) 0.
    // TODO: Disallow 1 after all tests are fixed.
    if (Phdr.p_align != 0 && Phdr.p_align != 1 && Phdr.p_align != 4 &&
        Phdr.p_align != 8) {
      Err =
          createError("alignment (" + Twine(Phdr.p_align) + ") is not 4 or 8");
      return Elf_Note_Iterator(Err);
    }
    return Elf_Note_Iterator(base() + Phdr.p_offset, Phdr.p_filesz,
                             std::max<size_t>(Phdr.p_align, 4), Err);
  }

  /// Get an iterator over notes in a section.
  ///
  /// The section must be of type \c SHT_NOTE.
  ///
  /// \param Shdr the section to iterate over.
  /// \param Err [out] an error to support fallible iteration, which should
  ///  be checked after iteration ends.
  Elf_Note_Iterator notes_begin(const Elf_Shdr &Shdr, Error &Err) const {
    assert(Shdr.sh_type == ELF::SHT_NOTE && "Shdr is not of type SHT_NOTE");
    ErrorAsOutParameter ErrAsOutParam(&Err);
    if (Shdr.sh_offset + Shdr.sh_size > getBufSize()) {
      Err =
          createError("invalid offset (0x" + Twine::utohexstr(Shdr.sh_offset) +
                      ") or size (0x" + Twine::utohexstr(Shdr.sh_size) + ")");
      return Elf_Note_Iterator(Err);
    }
    // TODO: Allow just 4 and 8 after all tests are fixed.
    if (Shdr.sh_addralign != 0 && Shdr.sh_addralign != 1 &&
        Shdr.sh_addralign != 4 && Shdr.sh_addralign != 8) {
      Err = createError("alignment (" + Twine(Shdr.sh_addralign) +
                        ") is not 4 or 8");
      return Elf_Note_Iterator(Err);
    }
    return Elf_Note_Iterator(base() + Shdr.sh_offset, Shdr.sh_size,
                             std::max<size_t>(Shdr.sh_addralign, 4), Err);
  }

  /// Get the end iterator for notes.
  Elf_Note_Iterator notes_end() const {
    return Elf_Note_Iterator();
  }

  /// Get an iterator range over notes of a program header.
  ///
  /// The program header must be of type \c PT_NOTE.
  ///
  /// \param Phdr the program header to iterate over.
  /// \param Err [out] an error to support fallible iteration, which should
  ///  be checked after iteration ends.
  iterator_range<Elf_Note_Iterator> notes(const Elf_Phdr &Phdr,
                                          Error &Err) const {
    return make_range(notes_begin(Phdr, Err), notes_end());
  }

  /// Get an iterator range over notes of a section.
  ///
  /// The section must be of type \c SHT_NOTE.
  ///
  /// \param Shdr the section to iterate over.
  /// \param Err [out] an error to support fallible iteration, which should
  ///  be checked after iteration ends.
  iterator_range<Elf_Note_Iterator> notes(const Elf_Shdr &Shdr,
                                          Error &Err) const {
    return make_range(notes_begin(Shdr, Err), notes_end());
  }

  Expected<StringRef> getSectionStringTable(
      Elf_Shdr_Range Sections,
      WarningHandler WarnHandler = &defaultWarningHandler) const;
  Expected<uint32_t> getSectionIndex(const Elf_Sym &Sym, Elf_Sym_Range Syms,
                                     DataRegion<Elf_Word> ShndxTable) const;
  Expected<const Elf_Shdr *> getSection(const Elf_Sym &Sym,
                                        const Elf_Shdr *SymTab,
                                        DataRegion<Elf_Word> ShndxTable) const;
  Expected<const Elf_Shdr *> getSection(const Elf_Sym &Sym,
                                        Elf_Sym_Range Symtab,
                                        DataRegion<Elf_Word> ShndxTable) const;
  Expected<const Elf_Shdr *> getSection(uint32_t Index) const;

  Expected<const Elf_Sym *> getSymbol(const Elf_Shdr *Sec,
                                      uint32_t Index) const;

  Expected<StringRef>
  getSectionName(const Elf_Shdr &Section,
                 WarningHandler WarnHandler = &defaultWarningHandler) const;
  Expected<StringRef> getSectionName(const Elf_Shdr &Section,
                                     StringRef DotShstrtab) const;
  template <typename T>
  Expected<ArrayRef<T>> getSectionContentsAsArray(const Elf_Shdr &Sec) const;
  Expected<ArrayRef<uint8_t>> getSectionContents(const Elf_Shdr &Sec) const;
  Expected<ArrayRef<uint8_t>> getSegmentContents(const Elf_Phdr &Phdr) const;

  /// Returns a vector of BBAddrMap structs corresponding to each function
  /// within the text section that the SHT_LLVM_BB_ADDR_MAP section \p Sec
  /// is associated with. If the current ELFFile is relocatable, a corresponding
  /// \p RelaSec must be passed in as an argument.
  Expected<std::vector<BBAddrMap>>
  decodeBBAddrMap(const Elf_Shdr &Sec, const Elf_Shdr *RelaSec = nullptr) const;

  /// Returns a map from every section matching \p IsMatch to its relocation
  /// section, or \p nullptr if it has no relocation section. This function
  /// returns an error if any of the \p IsMatch calls fail or if it fails to
  /// retrieve the content section of any relocation section.
  Expected<MapVector<const Elf_Shdr *, const Elf_Shdr *>>
  getSectionAndRelocations(
      std::function<Expected<bool>(const Elf_Shdr &)> IsMatch) const;

  void createFakeSections();
};

using ELF32LEFile = ELFFile<ELF32LE>;
using ELF64LEFile = ELFFile<ELF64LE>;
using ELF32BEFile = ELFFile<ELF32BE>;
using ELF64BEFile = ELFFile<ELF64BE>;

template <class ELFT>
inline Expected<const typename ELFT::Shdr *>
getSection(typename ELFT::ShdrRange Sections, uint32_t Index) {
  if (Index >= Sections.size())
    return createError("invalid section index: " + Twine(Index));
  return &Sections[Index];
}

template <class ELFT>
inline Expected<uint32_t>
getExtendedSymbolTableIndex(const typename ELFT::Sym &Sym, unsigned SymIndex,
                            DataRegion<typename ELFT::Word> ShndxTable) {
  assert(Sym.st_shndx == ELF::SHN_XINDEX);
  if (!ShndxTable.First)
    return createError(
        "found an extended symbol index (" + Twine(SymIndex) +
        "), but unable to locate the extended symbol index table");

  Expected<typename ELFT::Word> TableOrErr = ShndxTable[SymIndex];
  if (!TableOrErr)
    return createError("unable to read an extended symbol table at index " +
                       Twine(SymIndex) + ": " +
                       toString(TableOrErr.takeError()));
  return *TableOrErr;
}

template <class ELFT>
Expected<uint32_t>
ELFFile<ELFT>::getSectionIndex(const Elf_Sym &Sym, Elf_Sym_Range Syms,
                               DataRegion<Elf_Word> ShndxTable) const {
  uint32_t Index = Sym.st_shndx;
  if (Index == ELF::SHN_XINDEX) {
    Expected<uint32_t> ErrorOrIndex =
        getExtendedSymbolTableIndex<ELFT>(Sym, &Sym - Syms.begin(), ShndxTable);
    if (!ErrorOrIndex)
      return ErrorOrIndex.takeError();
    return *ErrorOrIndex;
  }
  if (Index == ELF::SHN_UNDEF || Index >= ELF::SHN_LORESERVE)
    return 0;
  return Index;
}

template <class ELFT>
Expected<const typename ELFT::Shdr *>
ELFFile<ELFT>::getSection(const Elf_Sym &Sym, const Elf_Shdr *SymTab,
                          DataRegion<Elf_Word> ShndxTable) const {
  auto SymsOrErr = symbols(SymTab);
  if (!SymsOrErr)
    return SymsOrErr.takeError();
  return getSection(Sym, *SymsOrErr, ShndxTable);
}

template <class ELFT>
Expected<const typename ELFT::Shdr *>
ELFFile<ELFT>::getSection(const Elf_Sym &Sym, Elf_Sym_Range Symbols,
                          DataRegion<Elf_Word> ShndxTable) const {
  auto IndexOrErr = getSectionIndex(Sym, Symbols, ShndxTable);
  if (!IndexOrErr)
    return IndexOrErr.takeError();
  uint32_t Index = *IndexOrErr;
  if (Index == 0)
    return nullptr;
  return getSection(Index);
}

template <class ELFT>
Expected<const typename ELFT::Sym *>
ELFFile<ELFT>::getSymbol(const Elf_Shdr *Sec, uint32_t Index) const {
  auto SymsOrErr = symbols(Sec);
  if (!SymsOrErr)
    return SymsOrErr.takeError();

  Elf_Sym_Range Symbols = *SymsOrErr;
  if (Index >= Symbols.size())
    return createError("unable to get symbol from section " +
                       getSecIndexForError(*this, *Sec) +
                       ": invalid symbol index (" + Twine(Index) + ")");
  return &Symbols[Index];
}

template <class ELFT>
template <typename T>
Expected<ArrayRef<T>>
ELFFile<ELFT>::getSectionContentsAsArray(const Elf_Shdr &Sec) const {
  if (Sec.sh_entsize != sizeof(T) && sizeof(T) != 1)
    return createError("section " + getSecIndexForError(*this, Sec) +
                       " has invalid sh_entsize: expected " + Twine(sizeof(T)) +
                       ", but got " + Twine(Sec.sh_entsize));

  uintX_t Offset = Sec.sh_offset;
  uintX_t Size = Sec.sh_size;

  if (Size % sizeof(T))
    return createError("section " + getSecIndexForError(*this, Sec) +
                       " has an invalid sh_size (" + Twine(Size) +
                       ") which is not a multiple of its sh_entsize (" +
                       Twine(Sec.sh_entsize) + ")");
  if (std::numeric_limits<uintX_t>::max() - Offset < Size)
    return createError("section " + getSecIndexForError(*this, Sec) +
                       " has a sh_offset (0x" + Twine::utohexstr(Offset) +
                       ") + sh_size (0x" + Twine::utohexstr(Size) +
                       ") that cannot be represented");
  if (Offset + Size > Buf.size())
    return createError("section " + getSecIndexForError(*this, Sec) +
                       " has a sh_offset (0x" + Twine::utohexstr(Offset) +
                       ") + sh_size (0x" + Twine::utohexstr(Size) +
                       ") that is greater than the file size (0x" +
                       Twine::utohexstr(Buf.size()) + ")");

  if (Offset % alignof(T))
    // TODO: this error is untested.
    return createError("unaligned data");

  const T *Start = reinterpret_cast<const T *>(base() + Offset);
  return ArrayRef(Start, Size / sizeof(T));
}

template <class ELFT>
Expected<ArrayRef<uint8_t>>
ELFFile<ELFT>::getSegmentContents(const Elf_Phdr &Phdr) const {
  uintX_t Offset = Phdr.p_offset;
  uintX_t Size = Phdr.p_filesz;

  if (std::numeric_limits<uintX_t>::max() - Offset < Size)
    return createError("program header " + getPhdrIndexForError(*this, Phdr) +
                       " has a p_offset (0x" + Twine::utohexstr(Offset) +
                       ") + p_filesz (0x" + Twine::utohexstr(Size) +
                       ") that cannot be represented");
  if (Offset + Size > Buf.size())
    return createError("program header  " + getPhdrIndexForError(*this, Phdr) +
                       " has a p_offset (0x" + Twine::utohexstr(Offset) +
                       ") + p_filesz (0x" + Twine::utohexstr(Size) +
                       ") that is greater than the file size (0x" +
                       Twine::utohexstr(Buf.size()) + ")");
  return ArrayRef(base() + Offset, Size);
}

template <class ELFT>
Expected<ArrayRef<uint8_t>>
ELFFile<ELFT>::getSectionContents(const Elf_Shdr &Sec) const {
  return getSectionContentsAsArray<uint8_t>(Sec);
}

template <class ELFT>
StringRef ELFFile<ELFT>::getRelocationTypeName(uint32_t Type) const {
  return getELFRelocationTypeName(getHeader().e_machine, Type);
}

template <class ELFT>
void ELFFile<ELFT>::getRelocationTypeName(uint32_t Type,
                                          SmallVectorImpl<char> &Result) const {
  if (!isMipsELF64()) {
    StringRef Name = getRelocationTypeName(Type);
    Result.append(Name.begin(), Name.end());
  } else {
    // The Mips N64 ABI allows up to three operations to be specified per
    // relocation record. Unfortunately there's no easy way to test for the
    // presence of N64 ELFs as they have no special flag that identifies them
    // as being N64. We can safely assume at the moment that all Mips
    // ELFCLASS64 ELFs are N64. New Mips64 ABIs should provide enough
    // information to disambiguate between old vs new ABIs.
    uint8_t Type1 = (Type >> 0) & 0xFF;
    uint8_t Type2 = (Type >> 8) & 0xFF;
    uint8_t Type3 = (Type >> 16) & 0xFF;

    // Concat all three relocation type names.
    StringRef Name = getRelocationTypeName(Type1);
    Result.append(Name.begin(), Name.end());

    Name = getRelocationTypeName(Type2);
    Result.append(1, '/');
    Result.append(Name.begin(), Name.end());

    Name = getRelocationTypeName(Type3);
    Result.append(1, '/');
    Result.append(Name.begin(), Name.end());
  }
}

template <class ELFT>
uint32_t ELFFile<ELFT>::getRelativeRelocationType() const {
  return getELFRelativeRelocationType(getHeader().e_machine);
}

template <class ELFT>
Expected<SmallVector<std::optional<VersionEntry>, 0>>
ELFFile<ELFT>::loadVersionMap(const Elf_Shdr *VerNeedSec,
                              const Elf_Shdr *VerDefSec) const {
  SmallVector<std::optional<VersionEntry>, 0> VersionMap;

  // The first two version indexes are reserved.
  // Index 0 is VER_NDX_LOCAL, index 1 is VER_NDX_GLOBAL.
  VersionMap.push_back(VersionEntry());
  VersionMap.push_back(VersionEntry());

  auto InsertEntry = [&](unsigned N, StringRef Version, bool IsVerdef) {
    if (N >= VersionMap.size())
      VersionMap.resize(N + 1);
    VersionMap[N] = {std::string(Version), IsVerdef};
  };

  if (VerDefSec) {
    Expected<std::vector<VerDef>> Defs = getVersionDefinitions(*VerDefSec);
    if (!Defs)
      return Defs.takeError();
    for (const VerDef &Def : *Defs)
      InsertEntry(Def.Ndx & ELF::VERSYM_VERSION, Def.Name, true);
  }

  if (VerNeedSec) {
    Expected<std::vector<VerNeed>> Deps = getVersionDependencies(*VerNeedSec);
    if (!Deps)
      return Deps.takeError();
    for (const VerNeed &Dep : *Deps)
      for (const VernAux &Aux : Dep.AuxV)
        InsertEntry(Aux.Other & ELF::VERSYM_VERSION, Aux.Name, false);
  }

  return VersionMap;
}

template <class ELFT>
Expected<const typename ELFT::Sym *>
ELFFile<ELFT>::getRelocationSymbol(const Elf_Rel &Rel,
                                   const Elf_Shdr *SymTab) const {
  uint32_t Index = Rel.getSymbol(isMips64EL());
  if (Index == 0)
    return nullptr;
  return getEntry<Elf_Sym>(*SymTab, Index);
}

template <class ELFT>
Expected<StringRef>
ELFFile<ELFT>::getSectionStringTable(Elf_Shdr_Range Sections,
                                     WarningHandler WarnHandler) const {
  uint32_t Index = getHeader().e_shstrndx;
  if (Index == ELF::SHN_XINDEX) {
    // If the section name string table section index is greater than
    // or equal to SHN_LORESERVE, then the actual index of the section name
    // string table section is contained in the sh_link field of the section
    // header at index 0.
    if (Sections.empty())
      return createError(
          "e_shstrndx == SHN_XINDEX, but the section header table is empty");

    Index = Sections[0].sh_link;
  }

  // There is no section name string table. Return FakeSectionStrings which
  // is non-empty if we have created fake sections.
  if (!Index)
    return FakeSectionStrings;

  if (Index >= Sections.size())
    return createError("section header string table index " + Twine(Index) +
                       " does not exist");
  return getStringTable(Sections[Index], WarnHandler);
}

/// This function finds the number of dynamic symbols using a GNU hash table.
///
/// @param Table The GNU hash table for .dynsym.
template <class ELFT>
static Expected<uint64_t>
getDynSymtabSizeFromGnuHash(const typename ELFT::GnuHash &Table,
                            const void *BufEnd) {
  using Elf_Word = typename ELFT::Word;
  if (Table.nbuckets == 0)
    return Table.symndx + 1;
  uint64_t LastSymIdx = 0;
  // Find the index of the first symbol in the last chain.
  for (Elf_Word Val : Table.buckets())
    LastSymIdx = std::max(LastSymIdx, (uint64_t)Val);
  const Elf_Word *It =
      reinterpret_cast<const Elf_Word *>(Table.values(LastSymIdx).end());
  // Locate the end of the chain to find the last symbol index.
  while (It < BufEnd && (*It & 1) == 0) {
    ++LastSymIdx;
    ++It;
  }
  if (It >= BufEnd) {
    return createStringError(
        object_error::parse_failed,
        "no terminator found for GNU hash section before buffer end");
  }
  return LastSymIdx + 1;
}

/// This function determines the number of dynamic symbols. It reads section
/// headers first. If section headers are not available, the number of
/// symbols will be inferred by parsing dynamic hash tables.
template <class ELFT>
Expected<uint64_t> ELFFile<ELFT>::getDynSymtabSize() const {
  // Read .dynsym section header first if available.
  Expected<Elf_Shdr_Range> SectionsOrError = sections();
  if (!SectionsOrError)
    return SectionsOrError.takeError();
  for (const Elf_Shdr &Sec : *SectionsOrError) {
    if (Sec.sh_type == ELF::SHT_DYNSYM) {
      if (Sec.sh_size % Sec.sh_entsize != 0) {
        return createStringError(object_error::parse_failed,
                                 "SHT_DYNSYM section has sh_size (" +
                                     Twine(Sec.sh_size) + ") % sh_entsize (" +
                                     Twine(Sec.sh_entsize) + ") that is not 0");
      }
      return Sec.sh_size / Sec.sh_entsize;
    }
  }

  if (!SectionsOrError->empty()) {
    // Section headers are available but .dynsym header is not found.
    // Return 0 as .dynsym does not exist.
    return 0;
  }

  // Section headers do not exist. Falling back to infer
  // upper bound of .dynsym from .gnu.hash and .hash.
  Expected<Elf_Dyn_Range> DynTable = dynamicEntries();
  if (!DynTable)
    return DynTable.takeError();
  std::optional<uint64_t> ElfHash;
  std::optional<uint64_t> ElfGnuHash;
  for (const Elf_Dyn &Entry : *DynTable) {
    switch (Entry.d_tag) {
    case ELF::DT_HASH:
      ElfHash = Entry.d_un.d_ptr;
      break;
    case ELF::DT_GNU_HASH:
      ElfGnuHash = Entry.d_un.d_ptr;
      break;
    }
  }
  if (ElfGnuHash) {
    Expected<const uint8_t *> TablePtr = toMappedAddr(*ElfGnuHash);
    if (!TablePtr)
      return TablePtr.takeError();
    const Elf_GnuHash *Table =
        reinterpret_cast<const Elf_GnuHash *>(TablePtr.get());
    return getDynSymtabSizeFromGnuHash<ELFT>(*Table, this->Buf.bytes_end());
  }

  // Search SYSV hash table to try to find the upper bound of dynsym.
  if (ElfHash) {
    Expected<const uint8_t *> TablePtr = toMappedAddr(*ElfHash);
    if (!TablePtr)
      return TablePtr.takeError();
    const Elf_Hash *Table = reinterpret_cast<const Elf_Hash *>(TablePtr.get());
    return Table->nchain;
  }
  return 0;
}

template <class ELFT> ELFFile<ELFT>::ELFFile(StringRef Object) : Buf(Object) {}

template <class ELFT>
Expected<ELFFile<ELFT>> ELFFile<ELFT>::create(StringRef Object) {
  if (sizeof(Elf_Ehdr) > Object.size())
    return createError("invalid buffer: the size (" + Twine(Object.size()) +
                       ") is smaller than an ELF header (" +
                       Twine(sizeof(Elf_Ehdr)) + ")");
  return ELFFile(Object);
}

/// Used by llvm-objdump -d (which needs sections for disassembly) to
/// disassemble objects without a section header table (e.g. ET_CORE objects
/// analyzed by linux perf or ET_EXEC with llvm-strip --strip-sections).
template <class ELFT> void ELFFile<ELFT>::createFakeSections() {
  if (!FakeSections.empty())
    return;
  auto PhdrsOrErr = program_headers();
  if (!PhdrsOrErr)
    return;

  FakeSectionStrings += '\0';
  for (auto [Idx, Phdr] : llvm::enumerate(*PhdrsOrErr)) {
    if (Phdr.p_type != ELF::PT_LOAD || !(Phdr.p_flags & ELF::PF_X))
      continue;
    Elf_Shdr FakeShdr = {};
    FakeShdr.sh_type = ELF::SHT_PROGBITS;
    FakeShdr.sh_flags = ELF::SHF_ALLOC | ELF::SHF_EXECINSTR;
    FakeShdr.sh_addr = Phdr.p_vaddr;
    FakeShdr.sh_size = Phdr.p_memsz;
    FakeShdr.sh_offset = Phdr.p_offset;
    // Create a section name based on the p_type and index.
    FakeShdr.sh_name = FakeSectionStrings.size();
    FakeSectionStrings += ("PT_LOAD#" + Twine(Idx)).str();
    FakeSectionStrings += '\0';
    FakeSections.push_back(FakeShdr);
  }
}

template <class ELFT>
Expected<typename ELFT::ShdrRange> ELFFile<ELFT>::sections() const {
  const uintX_t SectionTableOffset = getHeader().e_shoff;
  if (SectionTableOffset == 0) {
    if (!FakeSections.empty())
      return ArrayRef(FakeSections.data(), FakeSections.size());
    return ArrayRef<Elf_Shdr>();
  }

  if (getHeader().e_shentsize != sizeof(Elf_Shdr))
    return createError("invalid e_shentsize in ELF header: " +
                       Twine(getHeader().e_shentsize));

  const uint64_t FileSize = Buf.size();
  if (SectionTableOffset + sizeof(Elf_Shdr) > FileSize ||
      SectionTableOffset + (uintX_t)sizeof(Elf_Shdr) < SectionTableOffset)
    return createError(
        "section header table goes past the end of the file: e_shoff = 0x" +
        Twine::utohexstr(SectionTableOffset));

  // Invalid address alignment of section headers
  if (SectionTableOffset & (alignof(Elf_Shdr) - 1))
    // TODO: this error is untested.
    return createError("invalid alignment of section headers");

  const Elf_Shdr *First =
      reinterpret_cast<const Elf_Shdr *>(base() + SectionTableOffset);

  uintX_t NumSections = getHeader().e_shnum;
  if (NumSections == 0)
    NumSections = First->sh_size;

  if (NumSections > UINT64_MAX / sizeof(Elf_Shdr))
    return createError("invalid number of sections specified in the NULL "
                       "section's sh_size field (" +
                       Twine(NumSections) + ")");

  const uint64_t SectionTableSize = NumSections * sizeof(Elf_Shdr);
  if (SectionTableOffset + SectionTableSize < SectionTableOffset)
    return createError(
        "invalid section header table offset (e_shoff = 0x" +
        Twine::utohexstr(SectionTableOffset) +
        ") or invalid number of sections specified in the first section "
        "header's sh_size field (0x" +
        Twine::utohexstr(NumSections) + ")");

  // Section table goes past end of file!
  if (SectionTableOffset + SectionTableSize > FileSize)
    return createError("section table goes past the end of file");
  return ArrayRef(First, NumSections);
}

template <class ELFT>
template <typename T>
Expected<const T *> ELFFile<ELFT>::getEntry(uint32_t Section,
                                            uint32_t Entry) const {
  auto SecOrErr = getSection(Section);
  if (!SecOrErr)
    return SecOrErr.takeError();
  return getEntry<T>(**SecOrErr, Entry);
}

template <class ELFT>
template <typename T>
Expected<const T *> ELFFile<ELFT>::getEntry(const Elf_Shdr &Section,
                                            uint32_t Entry) const {
  Expected<ArrayRef<T>> EntriesOrErr = getSectionContentsAsArray<T>(Section);
  if (!EntriesOrErr)
    return EntriesOrErr.takeError();

  ArrayRef<T> Arr = *EntriesOrErr;
  if (Entry >= Arr.size())
    return createError(
        "can't read an entry at 0x" +
        Twine::utohexstr(Entry * static_cast<uint64_t>(sizeof(T))) +
        ": it goes past the end of the section (0x" +
        Twine::utohexstr(Section.sh_size) + ")");
  return &Arr[Entry];
}

template <typename ELFT>
Expected<StringRef> ELFFile<ELFT>::getSymbolVersionByIndex(
    uint32_t SymbolVersionIndex, bool &IsDefault,
    SmallVector<std::optional<VersionEntry>, 0> &VersionMap,
    std::optional<bool> IsSymHidden) const {
  size_t VersionIndex = SymbolVersionIndex & llvm::ELF::VERSYM_VERSION;

  // Special markers for unversioned symbols.
  if (VersionIndex == llvm::ELF::VER_NDX_LOCAL ||
      VersionIndex == llvm::ELF::VER_NDX_GLOBAL) {
    IsDefault = false;
    return "";
  }

  // Lookup this symbol in the version table.
  if (VersionIndex >= VersionMap.size() || !VersionMap[VersionIndex])
    return createError("SHT_GNU_versym section refers to a version index " +
                       Twine(VersionIndex) + " which is missing");

  const VersionEntry &Entry = *VersionMap[VersionIndex];
  // A default version (@@) is only available for defined symbols.
  if (!Entry.IsVerDef || IsSymHidden.value_or(false))
    IsDefault = false;
  else
    IsDefault = !(SymbolVersionIndex & llvm::ELF::VERSYM_HIDDEN);
  return Entry.Name.c_str();
}

template <class ELFT>
Expected<std::vector<VerDef>>
ELFFile<ELFT>::getVersionDefinitions(const Elf_Shdr &Sec) const {
  Expected<StringRef> StrTabOrErr = getLinkAsStrtab(Sec);
  if (!StrTabOrErr)
    return StrTabOrErr.takeError();

  Expected<ArrayRef<uint8_t>> ContentsOrErr = getSectionContents(Sec);
  if (!ContentsOrErr)
    return createError("cannot read content of " + describe(*this, Sec) + ": " +
                       toString(ContentsOrErr.takeError()));

  const uint8_t *Start = ContentsOrErr->data();
  const uint8_t *End = Start + ContentsOrErr->size();

  auto ExtractNextAux = [&](const uint8_t *&VerdauxBuf,
                            unsigned VerDefNdx) -> Expected<VerdAux> {
    if (VerdauxBuf + sizeof(Elf_Verdaux) > End)
      return createError("invalid " + describe(*this, Sec) +
                         ": version definition " + Twine(VerDefNdx) +
                         " refers to an auxiliary entry that goes past the end "
                         "of the section");

    auto *Verdaux = reinterpret_cast<const Elf_Verdaux *>(VerdauxBuf);
    VerdauxBuf += Verdaux->vda_next;

    VerdAux Aux;
    Aux.Offset = VerdauxBuf - Start;
    if (Verdaux->vda_name <= StrTabOrErr->size())
      Aux.Name = std::string(StrTabOrErr->drop_front(Verdaux->vda_name));
    else
      Aux.Name = ("<invalid vda_name: " + Twine(Verdaux->vda_name) + ">").str();
    return Aux;
  };

  std::vector<VerDef> Ret;
  const uint8_t *VerdefBuf = Start;
  for (unsigned I = 1; I <= /*VerDefsNum=*/Sec.sh_info; ++I) {
    if (VerdefBuf + sizeof(Elf_Verdef) > End)
      return createError("invalid " + describe(*this, Sec) +
                         ": version definition " + Twine(I) +
                         " goes past the end of the section");

    if (reinterpret_cast<uintptr_t>(VerdefBuf) % sizeof(uint32_t) != 0)
      return createError(
          "invalid " + describe(*this, Sec) +
          ": found a misaligned version definition entry at offset 0x" +
          Twine::utohexstr(VerdefBuf - Start));

    unsigned Version = *reinterpret_cast<const Elf_Half *>(VerdefBuf);
    if (Version != 1)
      return createError("unable to dump " + describe(*this, Sec) +
                         ": version " + Twine(Version) +
                         " is not yet supported");

    const Elf_Verdef *D = reinterpret_cast<const Elf_Verdef *>(VerdefBuf);
    VerDef &VD = *Ret.emplace(Ret.end());
    VD.Offset = VerdefBuf - Start;
    VD.Version = D->vd_version;
    VD.Flags = D->vd_flags;
    VD.Ndx = D->vd_ndx;
    VD.Cnt = D->vd_cnt;
    VD.Hash = D->vd_hash;

    const uint8_t *VerdauxBuf = VerdefBuf + D->vd_aux;
    for (unsigned J = 0; J < D->vd_cnt; ++J) {
      if (reinterpret_cast<uintptr_t>(VerdauxBuf) % sizeof(uint32_t) != 0)
        return createError("invalid " + describe(*this, Sec) +
                           ": found a misaligned auxiliary entry at offset 0x" +
                           Twine::utohexstr(VerdauxBuf - Start));

      Expected<VerdAux> AuxOrErr = ExtractNextAux(VerdauxBuf, I);
      if (!AuxOrErr)
        return AuxOrErr.takeError();

      if (J == 0)
        VD.Name = AuxOrErr->Name;
      else
        VD.AuxV.push_back(*AuxOrErr);
    }

    VerdefBuf += D->vd_next;
  }

  return Ret;
}

template <class ELFT>
Expected<std::vector<VerNeed>>
ELFFile<ELFT>::getVersionDependencies(const Elf_Shdr &Sec,
                                      WarningHandler WarnHandler) const {
  StringRef StrTab;
  Expected<StringRef> StrTabOrErr = getLinkAsStrtab(Sec);
  if (!StrTabOrErr) {
    if (Error E = WarnHandler(toString(StrTabOrErr.takeError())))
      return std::move(E);
  } else {
    StrTab = *StrTabOrErr;
  }

  Expected<ArrayRef<uint8_t>> ContentsOrErr = getSectionContents(Sec);
  if (!ContentsOrErr)
    return createError("cannot read content of " + describe(*this, Sec) + ": " +
                       toString(ContentsOrErr.takeError()));

  const uint8_t *Start = ContentsOrErr->data();
  const uint8_t *End = Start + ContentsOrErr->size();
  const uint8_t *VerneedBuf = Start;

  std::vector<VerNeed> Ret;
  for (unsigned I = 1; I <= /*VerneedNum=*/Sec.sh_info; ++I) {
    if (VerneedBuf + sizeof(Elf_Verdef) > End)
      return createError("invalid " + describe(*this, Sec) +
                         ": version dependency " + Twine(I) +
                         " goes past the end of the section");

    if (reinterpret_cast<uintptr_t>(VerneedBuf) % sizeof(uint32_t) != 0)
      return createError(
          "invalid " + describe(*this, Sec) +
          ": found a misaligned version dependency entry at offset 0x" +
          Twine::utohexstr(VerneedBuf - Start));

    unsigned Version = *reinterpret_cast<const Elf_Half *>(VerneedBuf);
    if (Version != 1)
      return createError("unable to dump " + describe(*this, Sec) +
                         ": version " + Twine(Version) +
                         " is not yet supported");

    const Elf_Verneed *Verneed =
        reinterpret_cast<const Elf_Verneed *>(VerneedBuf);

    VerNeed &VN = *Ret.emplace(Ret.end());
    VN.Version = Verneed->vn_version;
    VN.Cnt = Verneed->vn_cnt;
    VN.Offset = VerneedBuf - Start;

    if (Verneed->vn_file < StrTab.size())
      VN.File = std::string(StrTab.data() + Verneed->vn_file);
    else
      VN.File = ("<corrupt vn_file: " + Twine(Verneed->vn_file) + ">").str();

    const uint8_t *VernauxBuf = VerneedBuf + Verneed->vn_aux;
    for (unsigned J = 0; J < Verneed->vn_cnt; ++J) {
      if (reinterpret_cast<uintptr_t>(VernauxBuf) % sizeof(uint32_t) != 0)
        return createError("invalid " + describe(*this, Sec) +
                           ": found a misaligned auxiliary entry at offset 0x" +
                           Twine::utohexstr(VernauxBuf - Start));

      if (VernauxBuf + sizeof(Elf_Vernaux) > End)
        return createError(
            "invalid " + describe(*this, Sec) + ": version dependency " +
            Twine(I) +
            " refers to an auxiliary entry that goes past the end "
            "of the section");

      const Elf_Vernaux *Vernaux =
          reinterpret_cast<const Elf_Vernaux *>(VernauxBuf);

      VernAux &Aux = *VN.AuxV.emplace(VN.AuxV.end());
      Aux.Hash = Vernaux->vna_hash;
      Aux.Flags = Vernaux->vna_flags;
      Aux.Other = Vernaux->vna_other;
      Aux.Offset = VernauxBuf - Start;
      if (StrTab.size() <= Vernaux->vna_name)
        Aux.Name = "<corrupt>";
      else
        Aux.Name = std::string(StrTab.drop_front(Vernaux->vna_name));

      VernauxBuf += Vernaux->vna_next;
    }
    VerneedBuf += Verneed->vn_next;
  }
  return Ret;
}

template <class ELFT>
Expected<const typename ELFT::Shdr *>
ELFFile<ELFT>::getSection(uint32_t Index) const {
  auto TableOrErr = sections();
  if (!TableOrErr)
    return TableOrErr.takeError();
  return object::getSection<ELFT>(*TableOrErr, Index);
}

template <class ELFT>
Expected<StringRef>
ELFFile<ELFT>::getStringTable(const Elf_Shdr &Section,
                              WarningHandler WarnHandler) const {
  if (Section.sh_type != ELF::SHT_STRTAB)
    if (Error E = WarnHandler("invalid sh_type for string table section " +
                              getSecIndexForError(*this, Section) +
                              ": expected SHT_STRTAB, but got " +
                              object::getELFSectionTypeName(
                                  getHeader().e_machine, Section.sh_type)))
      return std::move(E);

  auto V = getSectionContentsAsArray<char>(Section);
  if (!V)
    return V.takeError();
  ArrayRef<char> Data = *V;
  if (Data.empty())
    return createError("SHT_STRTAB string table section " +
                       getSecIndexForError(*this, Section) + " is empty");
  if (Data.back() != '\0')
    return createError("SHT_STRTAB string table section " +
                       getSecIndexForError(*this, Section) +
                       " is non-null terminated");
  return StringRef(Data.begin(), Data.size());
}

template <class ELFT>
Expected<ArrayRef<typename ELFT::Word>>
ELFFile<ELFT>::getSHNDXTable(const Elf_Shdr &Section) const {
  auto SectionsOrErr = sections();
  if (!SectionsOrErr)
    return SectionsOrErr.takeError();
  return getSHNDXTable(Section, *SectionsOrErr);
}

template <class ELFT>
Expected<ArrayRef<typename ELFT::Word>>
ELFFile<ELFT>::getSHNDXTable(const Elf_Shdr &Section,
                             Elf_Shdr_Range Sections) const {
  assert(Section.sh_type == ELF::SHT_SYMTAB_SHNDX);
  auto VOrErr = getSectionContentsAsArray<Elf_Word>(Section);
  if (!VOrErr)
    return VOrErr.takeError();
  ArrayRef<Elf_Word> V = *VOrErr;
  auto SymTableOrErr = object::getSection<ELFT>(Sections, Section.sh_link);
  if (!SymTableOrErr)
    return SymTableOrErr.takeError();
  const Elf_Shdr &SymTable = **SymTableOrErr;
  if (SymTable.sh_type != ELF::SHT_SYMTAB &&
      SymTable.sh_type != ELF::SHT_DYNSYM)
    return createError(
        "SHT_SYMTAB_SHNDX section is linked with " +
        object::getELFSectionTypeName(getHeader().e_machine, SymTable.sh_type) +
        " section (expected SHT_SYMTAB/SHT_DYNSYM)");

  uint64_t Syms = SymTable.sh_size / sizeof(Elf_Sym);
  if (V.size() != Syms)
    return createError("SHT_SYMTAB_SHNDX has " + Twine(V.size()) +
                       " entries, but the symbol table associated has " +
                       Twine(Syms));

  return V;
}

template <class ELFT>
Expected<StringRef>
ELFFile<ELFT>::getStringTableForSymtab(const Elf_Shdr &Sec) const {
  auto SectionsOrErr = sections();
  if (!SectionsOrErr)
    return SectionsOrErr.takeError();
  return getStringTableForSymtab(Sec, *SectionsOrErr);
}

template <class ELFT>
Expected<StringRef>
ELFFile<ELFT>::getStringTableForSymtab(const Elf_Shdr &Sec,
                                       Elf_Shdr_Range Sections) const {

  if (Sec.sh_type != ELF::SHT_SYMTAB && Sec.sh_type != ELF::SHT_DYNSYM)
    return createError(
        "invalid sh_type for symbol table, expected SHT_SYMTAB or SHT_DYNSYM");
  Expected<const Elf_Shdr *> SectionOrErr =
      object::getSection<ELFT>(Sections, Sec.sh_link);
  if (!SectionOrErr)
    return SectionOrErr.takeError();
  return getStringTable(**SectionOrErr);
}

template <class ELFT>
Expected<StringRef>
ELFFile<ELFT>::getLinkAsStrtab(const typename ELFT::Shdr &Sec) const {
  Expected<const typename ELFT::Shdr *> StrTabSecOrErr =
      getSection(Sec.sh_link);
  if (!StrTabSecOrErr)
    return createError("invalid section linked to " + describe(*this, Sec) +
                       ": " + toString(StrTabSecOrErr.takeError()));

  Expected<StringRef> StrTabOrErr = getStringTable(**StrTabSecOrErr);
  if (!StrTabOrErr)
    return createError("invalid string table linked to " +
                       describe(*this, Sec) + ": " +
                       toString(StrTabOrErr.takeError()));
  return *StrTabOrErr;
}

template <class ELFT>
Expected<StringRef>
ELFFile<ELFT>::getSectionName(const Elf_Shdr &Section,
                              WarningHandler WarnHandler) const {
  auto SectionsOrErr = sections();
  if (!SectionsOrErr)
    return SectionsOrErr.takeError();
  auto Table = getSectionStringTable(*SectionsOrErr, WarnHandler);
  if (!Table)
    return Table.takeError();
  return getSectionName(Section, *Table);
}

template <class ELFT>
Expected<StringRef> ELFFile<ELFT>::getSectionName(const Elf_Shdr &Section,
                                                  StringRef DotShstrtab) const {
  uint32_t Offset = Section.sh_name;
  if (Offset == 0)
    return StringRef();
  if (Offset >= DotShstrtab.size())
    return createError("a section " + getSecIndexForError(*this, Section) +
                       " has an invalid sh_name (0x" +
                       Twine::utohexstr(Offset) +
                       ") offset which goes past the end of the "
                       "section name string table");
  return StringRef(DotShstrtab.data() + Offset);
}

/// This function returns the hash value for a symbol in the .dynsym section
/// Name of the API remains consistent as specified in the libelf
/// REF : http://www.sco.com/developers/gabi/latest/ch5.dynamic.html#hash
inline uint32_t hashSysV(StringRef SymbolName) {
  uint32_t H = 0;
  for (uint8_t C : SymbolName) {
    H = (H << 4) + C;
    H ^= (H >> 24) & 0xf0;
  }
  return H & 0x0fffffff;
}

/// This function returns the hash value for a symbol in the .dynsym section
/// for the GNU hash table. The implementation is defined in the GNU hash ABI.
/// REF : https://sourceware.org/git/?p=binutils-gdb.git;a=blob;f=bfd/elf.c#l222
inline uint32_t hashGnu(StringRef Name) {
  uint32_t H = 5381;
  for (uint8_t C : Name)
    H = (H << 5) + H + C;
  return H;
}

} // end namespace object
} // end namespace llvm

#endif // LLVM_OBJECT_ELF_H
PKhwFZ�^�AObject/SymbolSize.hnu�[���//===- SymbolSize.h ---------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJECT_SYMBOLSIZE_H
#define LLVM_OBJECT_SYMBOLSIZE_H

#include "llvm/Object/ObjectFile.h"

namespace llvm {
namespace object {

struct SymEntry {
  symbol_iterator I;
  uint64_t Address;
  unsigned Number;
  unsigned SectionID;
};

int compareAddress(const SymEntry *A, const SymEntry *B);

std::vector<std::pair<SymbolRef, uint64_t>>
computeSymbolSizes(const ObjectFile &O);

}
} // namespace llvm

#endif
PKhwFZ�\�ˡ���Object/ELFObjectFile.hnu�[���//===- ELFObjectFile.h - ELF object file implementation ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the ELFObjectFile template class.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJECT_ELFOBJECTFILE_H
#define LLVM_OBJECT_ELFOBJECTFILE_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/BinaryFormat/ELF.h"
#include "llvm/Object/Binary.h"
#include "llvm/Object/ELF.h"
#include "llvm/Object/ELFTypes.h"
#include "llvm/Object/Error.h"
#include "llvm/Object/ObjectFile.h"
#include "llvm/Object/SymbolicFile.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ELFAttributeParser.h"
#include "llvm/Support/ELFAttributes.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MemoryBufferRef.h"
#include "llvm/Support/ScopedPrinter.h"
#include "llvm/TargetParser/SubtargetFeature.h"
#include "llvm/TargetParser/Triple.h"
#include <cassert>
#include <cstdint>

namespace llvm {

template <typename T> class SmallVectorImpl;

namespace object {

constexpr int NumElfSymbolTypes = 16;
extern const llvm::EnumEntry<unsigned> ElfSymbolTypes[NumElfSymbolTypes];

class elf_symbol_iterator;

struct ELFPltEntry {
  StringRef Section;
  std::optional<DataRefImpl> Symbol;
  uint64_t Address;
};

class ELFObjectFileBase : public ObjectFile {
  friend class ELFRelocationRef;
  friend class ELFSectionRef;
  friend class ELFSymbolRef;

  SubtargetFeatures getMIPSFeatures() const;
  SubtargetFeatures getARMFeatures() const;
  Expected<SubtargetFeatures> getRISCVFeatures() const;
  SubtargetFeatures getLoongArchFeatures() const;

  StringRef getAMDGPUCPUName() const;

protected:
  ELFObjectFileBase(unsigned int Type, MemoryBufferRef Source);

  virtual uint64_t getSymbolSize(DataRefImpl Symb) const = 0;
  virtual uint8_t getSymbolBinding(DataRefImpl Symb) const = 0;
  virtual uint8_t getSymbolOther(DataRefImpl Symb) const = 0;
  virtual uint8_t getSymbolELFType(DataRefImpl Symb) const = 0;

  virtual uint32_t getSectionType(DataRefImpl Sec) const = 0;
  virtual uint64_t getSectionFlags(DataRefImpl Sec) const = 0;
  virtual uint64_t getSectionOffset(DataRefImpl Sec) const = 0;

  virtual Expected<int64_t> getRelocationAddend(DataRefImpl Rel) const = 0;
  virtual Error getBuildAttributes(ELFAttributeParser &Attributes) const = 0;

public:
  using elf_symbol_iterator_range = iterator_range<elf_symbol_iterator>;

  virtual elf_symbol_iterator_range getDynamicSymbolIterators() const = 0;

  /// Returns platform-specific object flags, if any.
  virtual unsigned getPlatformFlags() const = 0;

  elf_symbol_iterator_range symbols() const;

  static bool classof(const Binary *v) { return v->isELF(); }

  Expected<SubtargetFeatures> getFeatures() const override;

  std::optional<StringRef> tryGetCPUName() const override;

  void setARMSubArch(Triple &TheTriple) const override;

  virtual uint16_t getEType() const = 0;

  virtual uint16_t getEMachine() const = 0;

  std::vector<ELFPltEntry> getPltEntries() const;

  /// Returns a vector containing a symbol version for each dynamic symbol.
  /// Returns an empty vector if version sections do not exist.
  Expected<std::vector<VersionEntry>> readDynsymVersions() const;

  /// Returns a vector of all BB address maps in the object file. When
  // `TextSectionIndex` is specified, only returns the BB address maps
  // corresponding to the section with that index.
  Expected<std::vector<BBAddrMap>>
  readBBAddrMap(std::optional<unsigned> TextSectionIndex = std::nullopt) const;
};

class ELFSectionRef : public SectionRef {
public:
  ELFSectionRef(const SectionRef &B) : SectionRef(B) {
    assert(isa<ELFObjectFileBase>(SectionRef::getObject()));
  }

  const ELFObjectFileBase *getObject() const {
    return cast<ELFObjectFileBase>(SectionRef::getObject());
  }

  uint32_t getType() const {
    return getObject()->getSectionType(getRawDataRefImpl());
  }

  uint64_t getFlags() const {
    return getObject()->getSectionFlags(getRawDataRefImpl());
  }

  uint64_t getOffset() const {
    return getObject()->getSectionOffset(getRawDataRefImpl());
  }
};

class elf_section_iterator : public section_iterator {
public:
  elf_section_iterator(const section_iterator &B) : section_iterator(B) {
    assert(isa<ELFObjectFileBase>(B->getObject()));
  }

  const ELFSectionRef *operator->() const {
    return static_cast<const ELFSectionRef *>(section_iterator::operator->());
  }

  const ELFSectionRef &operator*() const {
    return static_cast<const ELFSectionRef &>(section_iterator::operator*());
  }
};

class ELFSymbolRef : public SymbolRef {
public:
  ELFSymbolRef(const SymbolRef &B) : SymbolRef(B) {
    assert(isa<ELFObjectFileBase>(SymbolRef::getObject()));
  }

  const ELFObjectFileBase *getObject() const {
    return cast<ELFObjectFileBase>(BasicSymbolRef::getObject());
  }

  uint64_t getSize() const {
    return getObject()->getSymbolSize(getRawDataRefImpl());
  }

  uint8_t getBinding() const {
    return getObject()->getSymbolBinding(getRawDataRefImpl());
  }

  uint8_t getOther() const {
    return getObject()->getSymbolOther(getRawDataRefImpl());
  }

  uint8_t getELFType() const {
    return getObject()->getSymbolELFType(getRawDataRefImpl());
  }

  StringRef getELFTypeName() const {
    uint8_t Type = getELFType();
    for (const auto &EE : ElfSymbolTypes) {
      if (EE.Value == Type) {
        return EE.AltName;
      }
    }
    return "";
  }
};

class elf_symbol_iterator : public symbol_iterator {
public:
  elf_symbol_iterator(const basic_symbol_iterator &B)
      : symbol_iterator(SymbolRef(B->getRawDataRefImpl(),
                                  cast<ELFObjectFileBase>(B->getObject()))) {}

  const ELFSymbolRef *operator->() const {
    return static_cast<const ELFSymbolRef *>(symbol_iterator::operator->());
  }

  const ELFSymbolRef &operator*() const {
    return static_cast<const ELFSymbolRef &>(symbol_iterator::operator*());
  }
};

class ELFRelocationRef : public RelocationRef {
public:
  ELFRelocationRef(const RelocationRef &B) : RelocationRef(B) {
    assert(isa<ELFObjectFileBase>(RelocationRef::getObject()));
  }

  const ELFObjectFileBase *getObject() const {
    return cast<ELFObjectFileBase>(RelocationRef::getObject());
  }

  Expected<int64_t> getAddend() const {
    return getObject()->getRelocationAddend(getRawDataRefImpl());
  }
};

class elf_relocation_iterator : public relocation_iterator {
public:
  elf_relocation_iterator(const relocation_iterator &B)
      : relocation_iterator(RelocationRef(
            B->getRawDataRefImpl(), cast<ELFObjectFileBase>(B->getObject()))) {}

  const ELFRelocationRef *operator->() const {
    return static_cast<const ELFRelocationRef *>(
        relocation_iterator::operator->());
  }

  const ELFRelocationRef &operator*() const {
    return static_cast<const ELFRelocationRef &>(
        relocation_iterator::operator*());
  }
};

inline ELFObjectFileBase::elf_symbol_iterator_range
ELFObjectFileBase::symbols() const {
  return elf_symbol_iterator_range(symbol_begin(), symbol_end());
}

template <class ELFT> class ELFObjectFile : public ELFObjectFileBase {
  uint16_t getEMachine() const override;
  uint16_t getEType() const override;
  uint64_t getSymbolSize(DataRefImpl Sym) const override;

public:
  LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)

  SectionRef toSectionRef(const Elf_Shdr *Sec) const {
    return SectionRef(toDRI(Sec), this);
  }

  ELFSymbolRef toSymbolRef(const Elf_Shdr *SymTable, unsigned SymbolNum) const {
    return ELFSymbolRef({toDRI(SymTable, SymbolNum), this});
  }

  bool IsContentValid() const { return ContentValid; }

private:
  ELFObjectFile(MemoryBufferRef Object, ELFFile<ELFT> EF,
                const Elf_Shdr *DotDynSymSec, const Elf_Shdr *DotSymtabSec,
                const Elf_Shdr *DotSymtabShndxSec);

  bool ContentValid = false;

protected:
  ELFFile<ELFT> EF;

  const Elf_Shdr *DotDynSymSec = nullptr; // Dynamic symbol table section.
  const Elf_Shdr *DotSymtabSec = nullptr; // Symbol table section.
  const Elf_Shdr *DotSymtabShndxSec = nullptr; // SHT_SYMTAB_SHNDX section.

  Error initContent() override;

  void moveSymbolNext(DataRefImpl &Symb) const override;
  Expected<StringRef> getSymbolName(DataRefImpl Symb) const override;
  Expected<uint64_t> getSymbolAddress(DataRefImpl Symb) const override;
  uint64_t getSymbolValueImpl(DataRefImpl Symb) const override;
  uint32_t getSymbolAlignment(DataRefImpl Symb) const override;
  uint64_t getCommonSymbolSizeImpl(DataRefImpl Symb) const override;
  Expected<uint32_t> getSymbolFlags(DataRefImpl Symb) const override;
  uint8_t getSymbolBinding(DataRefImpl Symb) const override;
  uint8_t getSymbolOther(DataRefImpl Symb) const override;
  uint8_t getSymbolELFType(DataRefImpl Symb) const override;
  Expected<SymbolRef::Type> getSymbolType(DataRefImpl Symb) const override;
  Expected<section_iterator> getSymbolSection(const Elf_Sym *Symb,
                                              const Elf_Shdr *SymTab) const;
  Expected<section_iterator> getSymbolSection(DataRefImpl Symb) const override;

  void moveSectionNext(DataRefImpl &Sec) const override;
  Expected<StringRef> getSectionName(DataRefImpl Sec) const override;
  uint64_t getSectionAddress(DataRefImpl Sec) const override;
  uint64_t getSectionIndex(DataRefImpl Sec) const override;
  uint64_t getSectionSize(DataRefImpl Sec) const override;
  Expected<ArrayRef<uint8_t>>
  getSectionContents(DataRefImpl Sec) const override;
  uint64_t getSectionAlignment(DataRefImpl Sec) const override;
  bool isSectionCompressed(DataRefImpl Sec) const override;
  bool isSectionText(DataRefImpl Sec) const override;
  bool isSectionData(DataRefImpl Sec) const override;
  bool isSectionBSS(DataRefImpl Sec) const override;
  bool isSectionVirtual(DataRefImpl Sec) const override;
  bool isBerkeleyText(DataRefImpl Sec) const override;
  bool isBerkeleyData(DataRefImpl Sec) const override;
  bool isDebugSection(DataRefImpl Sec) const override;
  relocation_iterator section_rel_begin(DataRefImpl Sec) const override;
  relocation_iterator section_rel_end(DataRefImpl Sec) const override;
  std::vector<SectionRef> dynamic_relocation_sections() const override;
  Expected<section_iterator>
  getRelocatedSection(DataRefImpl Sec) const override;

  void moveRelocationNext(DataRefImpl &Rel) const override;
  uint64_t getRelocationOffset(DataRefImpl Rel) const override;
  symbol_iterator getRelocationSymbol(DataRefImpl Rel) const override;
  uint64_t getRelocationType(DataRefImpl Rel) const override;
  void getRelocationTypeName(DataRefImpl Rel,
                             SmallVectorImpl<char> &Result) const override;

  uint32_t getSectionType(DataRefImpl Sec) const override;
  uint64_t getSectionFlags(DataRefImpl Sec) const override;
  uint64_t getSectionOffset(DataRefImpl Sec) const override;
  StringRef getRelocationTypeName(uint32_t Type) const;

  DataRefImpl toDRI(const Elf_Shdr *SymTable, unsigned SymbolNum) const {
    DataRefImpl DRI;
    if (!SymTable) {
      DRI.d.a = 0;
      DRI.d.b = 0;
      return DRI;
    }
    assert(SymTable->sh_type == ELF::SHT_SYMTAB ||
           SymTable->sh_type == ELF::SHT_DYNSYM);

    auto SectionsOrErr = EF.sections();
    if (!SectionsOrErr) {
      DRI.d.a = 0;
      DRI.d.b = 0;
      return DRI;
    }
    uintptr_t SHT = reinterpret_cast<uintptr_t>((*SectionsOrErr).begin());
    unsigned SymTableIndex =
        (reinterpret_cast<uintptr_t>(SymTable) - SHT) / sizeof(Elf_Shdr);

    DRI.d.a = SymTableIndex;
    DRI.d.b = SymbolNum;
    return DRI;
  }

  const Elf_Shdr *toELFShdrIter(DataRefImpl Sec) const {
    return reinterpret_cast<const Elf_Shdr *>(Sec.p);
  }

  DataRefImpl toDRI(const Elf_Shdr *Sec) const {
    DataRefImpl DRI;
    DRI.p = reinterpret_cast<uintptr_t>(Sec);
    return DRI;
  }

  DataRefImpl toDRI(const Elf_Dyn *Dyn) const {
    DataRefImpl DRI;
    DRI.p = reinterpret_cast<uintptr_t>(Dyn);
    return DRI;
  }

  bool isExportedToOtherDSO(const Elf_Sym *ESym) const {
    unsigned char Binding = ESym->getBinding();
    unsigned char Visibility = ESym->getVisibility();

    // A symbol is exported if its binding is either GLOBAL or WEAK, and its
    // visibility is either DEFAULT or PROTECTED. All other symbols are not
    // exported.
    return (
        (Binding == ELF::STB_GLOBAL || Binding == ELF::STB_WEAK ||
         Binding == ELF::STB_GNU_UNIQUE) &&
        (Visibility == ELF::STV_DEFAULT || Visibility == ELF::STV_PROTECTED));
  }

  Error getBuildAttributes(ELFAttributeParser &Attributes) const override {
    auto SectionsOrErr = EF.sections();
    if (!SectionsOrErr)
      return SectionsOrErr.takeError();

    for (const Elf_Shdr &Sec : *SectionsOrErr) {
      if (Sec.sh_type == ELF::SHT_ARM_ATTRIBUTES ||
          Sec.sh_type == ELF::SHT_RISCV_ATTRIBUTES) {
        auto ErrorOrContents = EF.getSectionContents(Sec);
        if (!ErrorOrContents)
          return ErrorOrContents.takeError();

        auto Contents = ErrorOrContents.get();
        if (Contents[0] != ELFAttrs::Format_Version || Contents.size() == 1)
          return Error::success();

        if (Error E = Attributes.parse(Contents, ELFT::TargetEndianness))
          return E;
        break;
      }
    }
    return Error::success();
  }

  // This flag is used for classof, to distinguish ELFObjectFile from
  // its subclass. If more subclasses will be created, this flag will
  // have to become an enum.
  bool isDyldELFObject = false;

public:
  ELFObjectFile(ELFObjectFile<ELFT> &&Other);
  static Expected<ELFObjectFile<ELFT>> create(MemoryBufferRef Object,
                                              bool InitContent = true);

  const Elf_Rel *getRel(DataRefImpl Rel) const;
  const Elf_Rela *getRela(DataRefImpl Rela) const;

  Expected<const Elf_Sym *> getSymbol(DataRefImpl Sym) const {
    return EF.template getEntry<Elf_Sym>(Sym.d.a, Sym.d.b);
  }

  /// Get the relocation section that contains \a Rel.
  const Elf_Shdr *getRelSection(DataRefImpl Rel) const {
    auto RelSecOrErr = EF.getSection(Rel.d.a);
    if (!RelSecOrErr)
      report_fatal_error(
          Twine(errorToErrorCode(RelSecOrErr.takeError()).message()));
    return *RelSecOrErr;
  }

  const Elf_Shdr *getSection(DataRefImpl Sec) const {
    return reinterpret_cast<const Elf_Shdr *>(Sec.p);
  }

  basic_symbol_iterator symbol_begin() const override;
  basic_symbol_iterator symbol_end() const override;

  bool is64Bit() const override { return getBytesInAddress() == 8; }

  elf_symbol_iterator dynamic_symbol_begin() const;
  elf_symbol_iterator dynamic_symbol_end() const;

  section_iterator section_begin() const override;
  section_iterator section_end() const override;

  Expected<int64_t> getRelocationAddend(DataRefImpl Rel) const override;

  uint8_t getBytesInAddress() const override;
  StringRef getFileFormatName() const override;
  Triple::ArchType getArch() const override;
  Expected<uint64_t> getStartAddress() const override;

  unsigned getPlatformFlags() const override { return EF.getHeader().e_flags; }

  const ELFFile<ELFT> &getELFFile() const { return EF; }

  bool isDyldType() const { return isDyldELFObject; }
  static bool classof(const Binary *v) {
    return v->getType() == getELFType(ELFT::TargetEndianness == support::little,
                                      ELFT::Is64Bits);
  }

  elf_symbol_iterator_range getDynamicSymbolIterators() const override;

  bool isRelocatableObject() const override;

  void createFakeSections() { EF.createFakeSections(); }
};

using ELF32LEObjectFile = ELFObjectFile<ELF32LE>;
using ELF64LEObjectFile = ELFObjectFile<ELF64LE>;
using ELF32BEObjectFile = ELFObjectFile<ELF32BE>;
using ELF64BEObjectFile = ELFObjectFile<ELF64BE>;

template <class ELFT>
void ELFObjectFile<ELFT>::moveSymbolNext(DataRefImpl &Sym) const {
  ++Sym.d.b;
}

template <class ELFT> Error ELFObjectFile<ELFT>::initContent() {
  auto SectionsOrErr = EF.sections();
  if (!SectionsOrErr)
    return SectionsOrErr.takeError();

  for (const Elf_Shdr &Sec : *SectionsOrErr) {
    switch (Sec.sh_type) {
    case ELF::SHT_DYNSYM: {
      if (!DotDynSymSec)
        DotDynSymSec = &Sec;
      break;
    }
    case ELF::SHT_SYMTAB: {
      if (!DotSymtabSec)
        DotSymtabSec = &Sec;
      break;
    }
    case ELF::SHT_SYMTAB_SHNDX: {
      if (!DotSymtabShndxSec)
        DotSymtabShndxSec = &Sec;
      break;
    }
    }
  }

  ContentValid = true;
  return Error::success();
}

template <class ELFT>
Expected<StringRef> ELFObjectFile<ELFT>::getSymbolName(DataRefImpl Sym) const {
  Expected<const Elf_Sym *> SymOrErr = getSymbol(Sym);
  if (!SymOrErr)
    return SymOrErr.takeError();
  auto SymTabOrErr = EF.getSection(Sym.d.a);
  if (!SymTabOrErr)
    return SymTabOrErr.takeError();
  const Elf_Shdr *SymTableSec = *SymTabOrErr;
  auto StrTabOrErr = EF.getSection(SymTableSec->sh_link);
  if (!StrTabOrErr)
    return StrTabOrErr.takeError();
  const Elf_Shdr *StringTableSec = *StrTabOrErr;
  auto SymStrTabOrErr = EF.getStringTable(*StringTableSec);
  if (!SymStrTabOrErr)
    return SymStrTabOrErr.takeError();
  Expected<StringRef> Name = (*SymOrErr)->getName(*SymStrTabOrErr);
  if (Name && !Name->empty())
    return Name;

  // If the symbol name is empty use the section name.
  if ((*SymOrErr)->getType() == ELF::STT_SECTION) {
    Expected<section_iterator> SecOrErr = getSymbolSection(Sym);
    if (SecOrErr)
      return (*SecOrErr)->getName();
    return SecOrErr.takeError();
  }
  return Name;
}

template <class ELFT>
uint64_t ELFObjectFile<ELFT>::getSectionFlags(DataRefImpl Sec) const {
  return getSection(Sec)->sh_flags;
}

template <class ELFT>
uint32_t ELFObjectFile<ELFT>::getSectionType(DataRefImpl Sec) const {
  return getSection(Sec)->sh_type;
}

template <class ELFT>
uint64_t ELFObjectFile<ELFT>::getSectionOffset(DataRefImpl Sec) const {
  return getSection(Sec)->sh_offset;
}

template <class ELFT>
uint64_t ELFObjectFile<ELFT>::getSymbolValueImpl(DataRefImpl Symb) const {
  Expected<const Elf_Sym *> SymOrErr = getSymbol(Symb);
  if (!SymOrErr)
    report_fatal_error(SymOrErr.takeError());

  uint64_t Ret = (*SymOrErr)->st_value;
  if ((*SymOrErr)->st_shndx == ELF::SHN_ABS)
    return Ret;

  const Elf_Ehdr &Header = EF.getHeader();
  // Clear the ARM/Thumb or microMIPS indicator flag.
  if ((Header.e_machine == ELF::EM_ARM || Header.e_machine == ELF::EM_MIPS) &&
      (*SymOrErr)->getType() == ELF::STT_FUNC)
    Ret &= ~1;

  return Ret;
}

template <class ELFT>
Expected<uint64_t>
ELFObjectFile<ELFT>::getSymbolAddress(DataRefImpl Symb) const {
  Expected<uint64_t> SymbolValueOrErr = getSymbolValue(Symb);
  if (!SymbolValueOrErr)
    // TODO: Test this error.
    return SymbolValueOrErr.takeError();

  uint64_t Result = *SymbolValueOrErr;
  Expected<const Elf_Sym *> SymOrErr = getSymbol(Symb);
  if (!SymOrErr)
    return SymOrErr.takeError();

  switch ((*SymOrErr)->st_shndx) {
  case ELF::SHN_COMMON:
  case ELF::SHN_UNDEF:
  case ELF::SHN_ABS:
    return Result;
  }

  auto SymTabOrErr = EF.getSection(Symb.d.a);
  if (!SymTabOrErr)
    return SymTabOrErr.takeError();

  if (EF.getHeader().e_type == ELF::ET_REL) {
    ArrayRef<Elf_Word> ShndxTable;
    if (DotSymtabShndxSec) {
      // TODO: Test this error.
      if (Expected<ArrayRef<Elf_Word>> ShndxTableOrErr =
              EF.getSHNDXTable(*DotSymtabShndxSec))
        ShndxTable = *ShndxTableOrErr;
      else
        return ShndxTableOrErr.takeError();
    }

    Expected<const Elf_Shdr *> SectionOrErr =
        EF.getSection(**SymOrErr, *SymTabOrErr, ShndxTable);
    if (!SectionOrErr)
      return SectionOrErr.takeError();
    const Elf_Shdr *Section = *SectionOrErr;
    if (Section)
      Result += Section->sh_addr;
  }

  return Result;
}

template <class ELFT>
uint32_t ELFObjectFile<ELFT>::getSymbolAlignment(DataRefImpl Symb) const {
  Expected<const Elf_Sym *> SymOrErr = getSymbol(Symb);
  if (!SymOrErr)
    report_fatal_error(SymOrErr.takeError());
  if ((*SymOrErr)->st_shndx == ELF::SHN_COMMON)
    return (*SymOrErr)->st_value;
  return 0;
}

template <class ELFT>
uint16_t ELFObjectFile<ELFT>::getEMachine() const {
  return EF.getHeader().e_machine;
}

template <class ELFT> uint16_t ELFObjectFile<ELFT>::getEType() const {
  return EF.getHeader().e_type;
}

template <class ELFT>
uint64_t ELFObjectFile<ELFT>::getSymbolSize(DataRefImpl Sym) const {
  Expected<const Elf_Sym *> SymOrErr = getSymbol(Sym);
  if (!SymOrErr)
    report_fatal_error(SymOrErr.takeError());
  return (*SymOrErr)->st_size;
}

template <class ELFT>
uint64_t ELFObjectFile<ELFT>::getCommonSymbolSizeImpl(DataRefImpl Symb) const {
  return getSymbolSize(Symb);
}

template <class ELFT>
uint8_t ELFObjectFile<ELFT>::getSymbolBinding(DataRefImpl Symb) const {
  Expected<const Elf_Sym *> SymOrErr = getSymbol(Symb);
  if (!SymOrErr)
    report_fatal_error(SymOrErr.takeError());
  return (*SymOrErr)->getBinding();
}

template <class ELFT>
uint8_t ELFObjectFile<ELFT>::getSymbolOther(DataRefImpl Symb) const {
  Expected<const Elf_Sym *> SymOrErr = getSymbol(Symb);
  if (!SymOrErr)
    report_fatal_error(SymOrErr.takeError());
  return (*SymOrErr)->st_other;
}

template <class ELFT>
uint8_t ELFObjectFile<ELFT>::getSymbolELFType(DataRefImpl Symb) const {
  Expected<const Elf_Sym *> SymOrErr = getSymbol(Symb);
  if (!SymOrErr)
    report_fatal_error(SymOrErr.takeError());
  return (*SymOrErr)->getType();
}

template <class ELFT>
Expected<SymbolRef::Type>
ELFObjectFile<ELFT>::getSymbolType(DataRefImpl Symb) const {
  Expected<const Elf_Sym *> SymOrErr = getSymbol(Symb);
  if (!SymOrErr)
    return SymOrErr.takeError();

  switch ((*SymOrErr)->getType()) {
  case ELF::STT_NOTYPE:
    return SymbolRef::ST_Unknown;
  case ELF::STT_SECTION:
    return SymbolRef::ST_Debug;
  case ELF::STT_FILE:
    return SymbolRef::ST_File;
  case ELF::STT_FUNC:
    return SymbolRef::ST_Function;
  case ELF::STT_OBJECT:
  case ELF::STT_COMMON:
    return SymbolRef::ST_Data;
  case ELF::STT_TLS:
  default:
    return SymbolRef::ST_Other;
  }
}

template <class ELFT>
Expected<uint32_t> ELFObjectFile<ELFT>::getSymbolFlags(DataRefImpl Sym) const {
  Expected<const Elf_Sym *> SymOrErr = getSymbol(Sym);
  if (!SymOrErr)
    return SymOrErr.takeError();

  const Elf_Sym *ESym = *SymOrErr;
  uint32_t Result = SymbolRef::SF_None;

  if (ESym->getBinding() != ELF::STB_LOCAL)
    Result |= SymbolRef::SF_Global;

  if (ESym->getBinding() == ELF::STB_WEAK)
    Result |= SymbolRef::SF_Weak;

  if (ESym->st_shndx == ELF::SHN_ABS)
    Result |= SymbolRef::SF_Absolute;

  if (ESym->getType() == ELF::STT_FILE || ESym->getType() == ELF::STT_SECTION)
    Result |= SymbolRef::SF_FormatSpecific;

  if (Expected<typename ELFT::SymRange> SymbolsOrErr =
          EF.symbols(DotSymtabSec)) {
    // Set the SF_FormatSpecific flag for the 0-index null symbol.
    if (ESym == SymbolsOrErr->begin())
      Result |= SymbolRef::SF_FormatSpecific;
  } else
    // TODO: Test this error.
    return SymbolsOrErr.takeError();

  if (Expected<typename ELFT::SymRange> SymbolsOrErr =
          EF.symbols(DotDynSymSec)) {
    // Set the SF_FormatSpecific flag for the 0-index null symbol.
    if (ESym == SymbolsOrErr->begin())
      Result |= SymbolRef::SF_FormatSpecific;
  } else
    // TODO: Test this error.
    return SymbolsOrErr.takeError();

  if (EF.getHeader().e_machine == ELF::EM_AARCH64) {
    if (Expected<StringRef> NameOrErr = getSymbolName(Sym)) {
      StringRef Name = *NameOrErr;
      if (Name.startswith("$d") || Name.startswith("$x"))
        Result |= SymbolRef::SF_FormatSpecific;
    } else {
      // TODO: Actually report errors helpfully.
      consumeError(NameOrErr.takeError());
    }
  } else if (EF.getHeader().e_machine == ELF::EM_ARM) {
    if (Expected<StringRef> NameOrErr = getSymbolName(Sym)) {
      StringRef Name = *NameOrErr;
      // TODO Investigate why empty name symbols need to be marked.
      if (Name.empty() || Name.startswith("$d") || Name.startswith("$t") ||
          Name.startswith("$a"))
        Result |= SymbolRef::SF_FormatSpecific;
    } else {
      // TODO: Actually report errors helpfully.
      consumeError(NameOrErr.takeError());
    }
    if (ESym->getType() == ELF::STT_FUNC && (ESym->st_value & 1) == 1)
      Result |= SymbolRef::SF_Thumb;
  } else if (EF.getHeader().e_machine == ELF::EM_RISCV) {
    if (Expected<StringRef> NameOrErr = getSymbolName(Sym)) {
      // Mark empty name symbols used for label differences.
      if (NameOrErr->empty())
        Result |= SymbolRef::SF_FormatSpecific;
    } else {
      // TODO: Actually report errors helpfully.
      consumeError(NameOrErr.takeError());
    }
  }

  if (ESym->st_shndx == ELF::SHN_UNDEF)
    Result |= SymbolRef::SF_Undefined;

  if (ESym->getType() == ELF::STT_COMMON || ESym->st_shndx == ELF::SHN_COMMON)
    Result |= SymbolRef::SF_Common;

  if (isExportedToOtherDSO(ESym))
    Result |= SymbolRef::SF_Exported;

  if (ESym->getType() == ELF::STT_GNU_IFUNC)
    Result |= SymbolRef::SF_Indirect;

  if (ESym->getVisibility() == ELF::STV_HIDDEN)
    Result |= SymbolRef::SF_Hidden;

  return Result;
}

template <class ELFT>
Expected<section_iterator>
ELFObjectFile<ELFT>::getSymbolSection(const Elf_Sym *ESym,
                                      const Elf_Shdr *SymTab) const {
  ArrayRef<Elf_Word> ShndxTable;
  if (DotSymtabShndxSec) {
    // TODO: Test this error.
    Expected<ArrayRef<Elf_Word>> ShndxTableOrErr =
        EF.getSHNDXTable(*DotSymtabShndxSec);
    if (!ShndxTableOrErr)
      return ShndxTableOrErr.takeError();
    ShndxTable = *ShndxTableOrErr;
  }

  auto ESecOrErr = EF.getSection(*ESym, SymTab, ShndxTable);
  if (!ESecOrErr)
    return ESecOrErr.takeError();

  const Elf_Shdr *ESec = *ESecOrErr;
  if (!ESec)
    return section_end();

  DataRefImpl Sec;
  Sec.p = reinterpret_cast<intptr_t>(ESec);
  return section_iterator(SectionRef(Sec, this));
}

template <class ELFT>
Expected<section_iterator>
ELFObjectFile<ELFT>::getSymbolSection(DataRefImpl Symb) const {
  Expected<const Elf_Sym *> SymOrErr = getSymbol(Symb);
  if (!SymOrErr)
    return SymOrErr.takeError();

  auto SymTabOrErr = EF.getSection(Symb.d.a);
  if (!SymTabOrErr)
    return SymTabOrErr.takeError();
  return getSymbolSection(*SymOrErr, *SymTabOrErr);
}

template <class ELFT>
void ELFObjectFile<ELFT>::moveSectionNext(DataRefImpl &Sec) const {
  const Elf_Shdr *ESec = getSection(Sec);
  Sec = toDRI(++ESec);
}

template <class ELFT>
Expected<StringRef> ELFObjectFile<ELFT>::getSectionName(DataRefImpl Sec) const {
  return EF.getSectionName(*getSection(Sec));
}

template <class ELFT>
uint64_t ELFObjectFile<ELFT>::getSectionAddress(DataRefImpl Sec) const {
  return getSection(Sec)->sh_addr;
}

template <class ELFT>
uint64_t ELFObjectFile<ELFT>::getSectionIndex(DataRefImpl Sec) const {
  auto SectionsOrErr = EF.sections();
  handleAllErrors(std::move(SectionsOrErr.takeError()),
                  [](const ErrorInfoBase &) {
                    llvm_unreachable("unable to get section index");
                  });
  const Elf_Shdr *First = SectionsOrErr->begin();
  return getSection(Sec) - First;
}

template <class ELFT>
uint64_t ELFObjectFile<ELFT>::getSectionSize(DataRefImpl Sec) const {
  return getSection(Sec)->sh_size;
}

template <class ELFT>
Expected<ArrayRef<uint8_t>>
ELFObjectFile<ELFT>::getSectionContents(DataRefImpl Sec) const {
  const Elf_Shdr *EShdr = getSection(Sec);
  if (EShdr->sh_type == ELF::SHT_NOBITS)
    return ArrayRef((const uint8_t *)base(), (size_t)0);
  if (Error E =
          checkOffset(getMemoryBufferRef(),
                      (uintptr_t)base() + EShdr->sh_offset, EShdr->sh_size))
    return std::move(E);
  return ArrayRef((const uint8_t *)base() + EShdr->sh_offset, EShdr->sh_size);
}

template <class ELFT>
uint64_t ELFObjectFile<ELFT>::getSectionAlignment(DataRefImpl Sec) const {
  return getSection(Sec)->sh_addralign;
}

template <class ELFT>
bool ELFObjectFile<ELFT>::isSectionCompressed(DataRefImpl Sec) const {
  return getSection(Sec)->sh_flags & ELF::SHF_COMPRESSED;
}

template <class ELFT>
bool ELFObjectFile<ELFT>::isSectionText(DataRefImpl Sec) const {
  return getSection(Sec)->sh_flags & ELF::SHF_EXECINSTR;
}

template <class ELFT>
bool ELFObjectFile<ELFT>::isSectionData(DataRefImpl Sec) const {
  const Elf_Shdr *EShdr = getSection(Sec);
  return EShdr->sh_type == ELF::SHT_PROGBITS &&
         EShdr->sh_flags & ELF::SHF_ALLOC &&
         !(EShdr->sh_flags & ELF::SHF_EXECINSTR);
}

template <class ELFT>
bool ELFObjectFile<ELFT>::isSectionBSS(DataRefImpl Sec) const {
  const Elf_Shdr *EShdr = getSection(Sec);
  return EShdr->sh_flags & (ELF::SHF_ALLOC | ELF::SHF_WRITE) &&
         EShdr->sh_type == ELF::SHT_NOBITS;
}

template <class ELFT>
std::vector<SectionRef>
ELFObjectFile<ELFT>::dynamic_relocation_sections() const {
  std::vector<SectionRef> Res;
  std::vector<uintptr_t> Offsets;

  auto SectionsOrErr = EF.sections();
  if (!SectionsOrErr)
    return Res;

  for (const Elf_Shdr &Sec : *SectionsOrErr) {
    if (Sec.sh_type != ELF::SHT_DYNAMIC)
      continue;
    Elf_Dyn *Dynamic =
        reinterpret_cast<Elf_Dyn *>((uintptr_t)base() + Sec.sh_offset);
    for (; Dynamic->d_tag != ELF::DT_NULL; Dynamic++) {
      if (Dynamic->d_tag == ELF::DT_REL || Dynamic->d_tag == ELF::DT_RELA ||
          Dynamic->d_tag == ELF::DT_JMPREL) {
        Offsets.push_back(Dynamic->d_un.d_val);
      }
    }
  }
  for (const Elf_Shdr &Sec : *SectionsOrErr) {
    if (is_contained(Offsets, Sec.sh_addr))
      Res.emplace_back(toDRI(&Sec), this);
  }
  return Res;
}

template <class ELFT>
bool ELFObjectFile<ELFT>::isSectionVirtual(DataRefImpl Sec) const {
  return getSection(Sec)->sh_type == ELF::SHT_NOBITS;
}

template <class ELFT>
bool ELFObjectFile<ELFT>::isBerkeleyText(DataRefImpl Sec) const {
  return getSection(Sec)->sh_flags & ELF::SHF_ALLOC &&
         (getSection(Sec)->sh_flags & ELF::SHF_EXECINSTR ||
          !(getSection(Sec)->sh_flags & ELF::SHF_WRITE));
}

template <class ELFT>
bool ELFObjectFile<ELFT>::isBerkeleyData(DataRefImpl Sec) const {
  const Elf_Shdr *EShdr = getSection(Sec);
  return !isBerkeleyText(Sec) && EShdr->sh_type != ELF::SHT_NOBITS &&
         EShdr->sh_flags & ELF::SHF_ALLOC;
}

template <class ELFT>
bool ELFObjectFile<ELFT>::isDebugSection(DataRefImpl Sec) const {
  Expected<StringRef> SectionNameOrErr = getSectionName(Sec);
  if (!SectionNameOrErr) {
    // TODO: Report the error message properly.
    consumeError(SectionNameOrErr.takeError());
    return false;
  }
  StringRef SectionName = SectionNameOrErr.get();
  return SectionName.startswith(".debug") ||
         SectionName.startswith(".zdebug") || SectionName == ".gdb_index";
}

template <class ELFT>
relocation_iterator
ELFObjectFile<ELFT>::section_rel_begin(DataRefImpl Sec) const {
  DataRefImpl RelData;
  auto SectionsOrErr = EF.sections();
  if (!SectionsOrErr)
    return relocation_iterator(RelocationRef());
  uintptr_t SHT = reinterpret_cast<uintptr_t>((*SectionsOrErr).begin());
  RelData.d.a = (Sec.p - SHT) / EF.getHeader().e_shentsize;
  RelData.d.b = 0;
  return relocation_iterator(RelocationRef(RelData, this));
}

template <class ELFT>
relocation_iterator
ELFObjectFile<ELFT>::section_rel_end(DataRefImpl Sec) const {
  const Elf_Shdr *S = reinterpret_cast<const Elf_Shdr *>(Sec.p);
  relocation_iterator Begin = section_rel_begin(Sec);
  if (S->sh_type != ELF::SHT_RELA && S->sh_type != ELF::SHT_REL)
    return Begin;
  DataRefImpl RelData = Begin->getRawDataRefImpl();
  const Elf_Shdr *RelSec = getRelSection(RelData);

  // Error check sh_link here so that getRelocationSymbol can just use it.
  auto SymSecOrErr = EF.getSection(RelSec->sh_link);
  if (!SymSecOrErr)
    report_fatal_error(
        Twine(errorToErrorCode(SymSecOrErr.takeError()).message()));

  RelData.d.b += S->sh_size / S->sh_entsize;
  return relocation_iterator(RelocationRef(RelData, this));
}

template <class ELFT>
Expected<section_iterator>
ELFObjectFile<ELFT>::getRelocatedSection(DataRefImpl Sec) const {
  const Elf_Shdr *EShdr = getSection(Sec);
  uintX_t Type = EShdr->sh_type;
  if (Type != ELF::SHT_REL && Type != ELF::SHT_RELA)
    return section_end();

  Expected<const Elf_Shdr *> SecOrErr = EF.getSection(EShdr->sh_info);
  if (!SecOrErr)
    return SecOrErr.takeError();
  return section_iterator(SectionRef(toDRI(*SecOrErr), this));
}

// Relocations
template <class ELFT>
void ELFObjectFile<ELFT>::moveRelocationNext(DataRefImpl &Rel) const {
  ++Rel.d.b;
}

template <class ELFT>
symbol_iterator
ELFObjectFile<ELFT>::getRelocationSymbol(DataRefImpl Rel) const {
  uint32_t symbolIdx;
  const Elf_Shdr *sec = getRelSection(Rel);
  if (sec->sh_type == ELF::SHT_REL)
    symbolIdx = getRel(Rel)->getSymbol(EF.isMips64EL());
  else
    symbolIdx = getRela(Rel)->getSymbol(EF.isMips64EL());
  if (!symbolIdx)
    return symbol_end();

  // FIXME: error check symbolIdx
  DataRefImpl SymbolData;
  SymbolData.d.a = sec->sh_link;
  SymbolData.d.b = symbolIdx;
  return symbol_iterator(SymbolRef(SymbolData, this));
}

template <class ELFT>
uint64_t ELFObjectFile<ELFT>::getRelocationOffset(DataRefImpl Rel) const {
  const Elf_Shdr *sec = getRelSection(Rel);
  if (sec->sh_type == ELF::SHT_REL)
    return getRel(Rel)->r_offset;

  return getRela(Rel)->r_offset;
}

template <class ELFT>
uint64_t ELFObjectFile<ELFT>::getRelocationType(DataRefImpl Rel) const {
  const Elf_Shdr *sec = getRelSection(Rel);
  if (sec->sh_type == ELF::SHT_REL)
    return getRel(Rel)->getType(EF.isMips64EL());
  else
    return getRela(Rel)->getType(EF.isMips64EL());
}

template <class ELFT>
StringRef ELFObjectFile<ELFT>::getRelocationTypeName(uint32_t Type) const {
  return getELFRelocationTypeName(EF.getHeader().e_machine, Type);
}

template <class ELFT>
void ELFObjectFile<ELFT>::getRelocationTypeName(
    DataRefImpl Rel, SmallVectorImpl<char> &Result) const {
  uint32_t type = getRelocationType(Rel);
  EF.getRelocationTypeName(type, Result);
}

template <class ELFT>
Expected<int64_t>
ELFObjectFile<ELFT>::getRelocationAddend(DataRefImpl Rel) const {
  if (getRelSection(Rel)->sh_type != ELF::SHT_RELA)
    return createError("Section is not SHT_RELA");
  return (int64_t)getRela(Rel)->r_addend;
}

template <class ELFT>
const typename ELFObjectFile<ELFT>::Elf_Rel *
ELFObjectFile<ELFT>::getRel(DataRefImpl Rel) const {
  assert(getRelSection(Rel)->sh_type == ELF::SHT_REL);
  auto Ret = EF.template getEntry<Elf_Rel>(Rel.d.a, Rel.d.b);
  if (!Ret)
    report_fatal_error(Twine(errorToErrorCode(Ret.takeError()).message()));
  return *Ret;
}

template <class ELFT>
const typename ELFObjectFile<ELFT>::Elf_Rela *
ELFObjectFile<ELFT>::getRela(DataRefImpl Rela) const {
  assert(getRelSection(Rela)->sh_type == ELF::SHT_RELA);
  auto Ret = EF.template getEntry<Elf_Rela>(Rela.d.a, Rela.d.b);
  if (!Ret)
    report_fatal_error(Twine(errorToErrorCode(Ret.takeError()).message()));
  return *Ret;
}

template <class ELFT>
Expected<ELFObjectFile<ELFT>>
ELFObjectFile<ELFT>::create(MemoryBufferRef Object, bool InitContent) {
  auto EFOrErr = ELFFile<ELFT>::create(Object.getBuffer());
  if (Error E = EFOrErr.takeError())
    return std::move(E);

  ELFObjectFile<ELFT> Obj = {Object, std::move(*EFOrErr), nullptr, nullptr,
                             nullptr};
  if (InitContent)
    if (Error E = Obj.initContent())
      return std::move(E);
  return std::move(Obj);
}

template <class ELFT>
ELFObjectFile<ELFT>::ELFObjectFile(MemoryBufferRef Object, ELFFile<ELFT> EF,
                                   const Elf_Shdr *DotDynSymSec,
                                   const Elf_Shdr *DotSymtabSec,
                                   const Elf_Shdr *DotSymtabShndx)
    : ELFObjectFileBase(
          getELFType(ELFT::TargetEndianness == support::little, ELFT::Is64Bits),
          Object),
      EF(EF), DotDynSymSec(DotDynSymSec), DotSymtabSec(DotSymtabSec),
      DotSymtabShndxSec(DotSymtabShndx) {}

template <class ELFT>
ELFObjectFile<ELFT>::ELFObjectFile(ELFObjectFile<ELFT> &&Other)
    : ELFObjectFile(Other.Data, Other.EF, Other.DotDynSymSec,
                    Other.DotSymtabSec, Other.DotSymtabShndxSec) {}

template <class ELFT>
basic_symbol_iterator ELFObjectFile<ELFT>::symbol_begin() const {
  DataRefImpl Sym =
      toDRI(DotSymtabSec,
            DotSymtabSec && DotSymtabSec->sh_size >= sizeof(Elf_Sym) ? 1 : 0);
  return basic_symbol_iterator(SymbolRef(Sym, this));
}

template <class ELFT>
basic_symbol_iterator ELFObjectFile<ELFT>::symbol_end() const {
  const Elf_Shdr *SymTab = DotSymtabSec;
  if (!SymTab)
    return symbol_begin();
  DataRefImpl Sym = toDRI(SymTab, SymTab->sh_size / sizeof(Elf_Sym));
  return basic_symbol_iterator(SymbolRef(Sym, this));
}

template <class ELFT>
elf_symbol_iterator ELFObjectFile<ELFT>::dynamic_symbol_begin() const {
  if (!DotDynSymSec || DotDynSymSec->sh_size < sizeof(Elf_Sym))
    // Ignore errors here where the dynsym is empty or sh_size less than the
    // size of one symbol. These should be handled elsewhere.
    return symbol_iterator(SymbolRef(toDRI(DotDynSymSec, 0), this));
  // Skip 0-index NULL symbol.
  return symbol_iterator(SymbolRef(toDRI(DotDynSymSec, 1), this));
}

template <class ELFT>
elf_symbol_iterator ELFObjectFile<ELFT>::dynamic_symbol_end() const {
  const Elf_Shdr *SymTab = DotDynSymSec;
  if (!SymTab)
    return dynamic_symbol_begin();
  DataRefImpl Sym = toDRI(SymTab, SymTab->sh_size / sizeof(Elf_Sym));
  return basic_symbol_iterator(SymbolRef(Sym, this));
}

template <class ELFT>
section_iterator ELFObjectFile<ELFT>::section_begin() const {
  auto SectionsOrErr = EF.sections();
  if (!SectionsOrErr)
    return section_iterator(SectionRef());
  return section_iterator(SectionRef(toDRI((*SectionsOrErr).begin()), this));
}

template <class ELFT>
section_iterator ELFObjectFile<ELFT>::section_end() const {
  auto SectionsOrErr = EF.sections();
  if (!SectionsOrErr)
    return section_iterator(SectionRef());
  return section_iterator(SectionRef(toDRI((*SectionsOrErr).end()), this));
}

template <class ELFT>
uint8_t ELFObjectFile<ELFT>::getBytesInAddress() const {
  return ELFT::Is64Bits ? 8 : 4;
}

template <class ELFT>
StringRef ELFObjectFile<ELFT>::getFileFormatName() const {
  constexpr bool IsLittleEndian = ELFT::TargetEndianness == support::little;
  switch (EF.getHeader().e_ident[ELF::EI_CLASS]) {
  case ELF::ELFCLASS32:
    switch (EF.getHeader().e_machine) {
    case ELF::EM_68K:
      return "elf32-m68k";
    case ELF::EM_386:
      return "elf32-i386";
    case ELF::EM_IAMCU:
      return "elf32-iamcu";
    case ELF::EM_X86_64:
      return "elf32-x86-64";
    case ELF::EM_ARM:
      return (IsLittleEndian ? "elf32-littlearm" : "elf32-bigarm");
    case ELF::EM_AVR:
      return "elf32-avr";
    case ELF::EM_HEXAGON:
      return "elf32-hexagon";
    case ELF::EM_LANAI:
      return "elf32-lanai";
    case ELF::EM_MIPS:
      return "elf32-mips";
    case ELF::EM_MSP430:
      return "elf32-msp430";
    case ELF::EM_PPC:
      return (IsLittleEndian ? "elf32-powerpcle" : "elf32-powerpc");
    case ELF::EM_RISCV:
      return "elf32-littleriscv";
    case ELF::EM_CSKY:
      return "elf32-csky";
    case ELF::EM_SPARC:
    case ELF::EM_SPARC32PLUS:
      return "elf32-sparc";
    case ELF::EM_AMDGPU:
      return "elf32-amdgpu";
    case ELF::EM_LOONGARCH:
      return "elf32-loongarch";
    case ELF::EM_XTENSA:
      return "elf32-xtensa";
    default:
      return "elf32-unknown";
    }
  case ELF::ELFCLASS64:
    switch (EF.getHeader().e_machine) {
    case ELF::EM_386:
      return "elf64-i386";
    case ELF::EM_X86_64:
      return "elf64-x86-64";
    case ELF::EM_AARCH64:
      return (IsLittleEndian ? "elf64-littleaarch64" : "elf64-bigaarch64");
    case ELF::EM_PPC64:
      return (IsLittleEndian ? "elf64-powerpcle" : "elf64-powerpc");
    case ELF::EM_RISCV:
      return "elf64-littleriscv";
    case ELF::EM_S390:
      return "elf64-s390";
    case ELF::EM_SPARCV9:
      return "elf64-sparc";
    case ELF::EM_MIPS:
      return "elf64-mips";
    case ELF::EM_AMDGPU:
      return "elf64-amdgpu";
    case ELF::EM_BPF:
      return "elf64-bpf";
    case ELF::EM_VE:
      return "elf64-ve";
    case ELF::EM_LOONGARCH:
      return "elf64-loongarch";
    default:
      return "elf64-unknown";
    }
  default:
    // FIXME: Proper error handling.
    report_fatal_error("Invalid ELFCLASS!");
  }
}

template <class ELFT> Triple::ArchType ELFObjectFile<ELFT>::getArch() const {
  bool IsLittleEndian = ELFT::TargetEndianness == support::little;
  switch (EF.getHeader().e_machine) {
  case ELF::EM_68K:
    return Triple::m68k;
  case ELF::EM_386:
  case ELF::EM_IAMCU:
    return Triple::x86;
  case ELF::EM_X86_64:
    return Triple::x86_64;
  case ELF::EM_AARCH64:
    return IsLittleEndian ? Triple::aarch64 : Triple::aarch64_be;
  case ELF::EM_ARM:
    return Triple::arm;
  case ELF::EM_AVR:
    return Triple::avr;
  case ELF::EM_HEXAGON:
    return Triple::hexagon;
  case ELF::EM_LANAI:
    return Triple::lanai;
  case ELF::EM_MIPS:
    switch (EF.getHeader().e_ident[ELF::EI_CLASS]) {
    case ELF::ELFCLASS32:
      return IsLittleEndian ? Triple::mipsel : Triple::mips;
    case ELF::ELFCLASS64:
      return IsLittleEndian ? Triple::mips64el : Triple::mips64;
    default:
      report_fatal_error("Invalid ELFCLASS!");
    }
  case ELF::EM_MSP430:
    return Triple::msp430;
  case ELF::EM_PPC:
    return IsLittleEndian ? Triple::ppcle : Triple::ppc;
  case ELF::EM_PPC64:
    return IsLittleEndian ? Triple::ppc64le : Triple::ppc64;
  case ELF::EM_RISCV:
    switch (EF.getHeader().e_ident[ELF::EI_CLASS]) {
    case ELF::ELFCLASS32:
      return Triple::riscv32;
    case ELF::ELFCLASS64:
      return Triple::riscv64;
    default:
      report_fatal_error("Invalid ELFCLASS!");
    }
  case ELF::EM_S390:
    return Triple::systemz;

  case ELF::EM_SPARC:
  case ELF::EM_SPARC32PLUS:
    return IsLittleEndian ? Triple::sparcel : Triple::sparc;
  case ELF::EM_SPARCV9:
    return Triple::sparcv9;

  case ELF::EM_AMDGPU: {
    if (!IsLittleEndian)
      return Triple::UnknownArch;

    unsigned MACH = EF.getHeader().e_flags & ELF::EF_AMDGPU_MACH;
    if (MACH >= ELF::EF_AMDGPU_MACH_R600_FIRST &&
        MACH <= ELF::EF_AMDGPU_MACH_R600_LAST)
      return Triple::r600;
    if (MACH >= ELF::EF_AMDGPU_MACH_AMDGCN_FIRST &&
        MACH <= ELF::EF_AMDGPU_MACH_AMDGCN_LAST)
      return Triple::amdgcn;

    return Triple::UnknownArch;
  }

  case ELF::EM_BPF:
    return IsLittleEndian ? Triple::bpfel : Triple::bpfeb;

  case ELF::EM_VE:
    return Triple::ve;
  case ELF::EM_CSKY:
    return Triple::csky;

  case ELF::EM_LOONGARCH:
    switch (EF.getHeader().e_ident[ELF::EI_CLASS]) {
    case ELF::ELFCLASS32:
      return Triple::loongarch32;
    case ELF::ELFCLASS64:
      return Triple::loongarch64;
    default:
      report_fatal_error("Invalid ELFCLASS!");
    }

  case ELF::EM_XTENSA:
    return Triple::xtensa;

  default:
    return Triple::UnknownArch;
  }
}

template <class ELFT>
Expected<uint64_t> ELFObjectFile<ELFT>::getStartAddress() const {
  return EF.getHeader().e_entry;
}

template <class ELFT>
ELFObjectFileBase::elf_symbol_iterator_range
ELFObjectFile<ELFT>::getDynamicSymbolIterators() const {
  return make_range(dynamic_symbol_begin(), dynamic_symbol_end());
}

template <class ELFT> bool ELFObjectFile<ELFT>::isRelocatableObject() const {
  return EF.getHeader().e_type == ELF::ET_REL;
}

} // end namespace object
} // end namespace llvm

#endif // LLVM_OBJECT_ELFOBJECTFILE_H
PKhwFZhc^��Object/MachOUniversal.hnu�[���//===- MachOUniversal.h - Mach-O universal binaries -------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares Mach-O fat/universal binaries.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJECT_MACHOUNIVERSAL_H
#define LLVM_OBJECT_MACHOUNIVERSAL_H

#include "llvm/ADT/iterator_range.h"
#include "llvm/BinaryFormat/MachO.h"
#include "llvm/Object/Binary.h"
#include "llvm/Object/MachO.h"
#include "llvm/TargetParser/Triple.h"

namespace llvm {
class StringRef;
class LLVMContext;

namespace object {
class Archive;
class IRObjectFile;

class MachOUniversalBinary : public Binary {
  virtual void anchor();

  uint32_t Magic;
  uint32_t NumberOfObjects;
public:
  static constexpr uint32_t MaxSectionAlignment = 15; /* 2**15 or 0x8000 */

  class ObjectForArch {
    const MachOUniversalBinary *Parent;
    /// Index of object in the universal binary.
    uint32_t Index;
    /// Descriptor of the object.
    MachO::fat_arch Header;
    MachO::fat_arch_64 Header64;

  public:
    ObjectForArch(const MachOUniversalBinary *Parent, uint32_t Index);

    void clear() {
      Parent = nullptr;
      Index = 0;
    }

    bool operator==(const ObjectForArch &Other) const {
      return (Parent == Other.Parent) && (Index == Other.Index);
    }

    ObjectForArch getNext() const { return ObjectForArch(Parent, Index + 1); }
    uint32_t getCPUType() const {
      if (Parent->getMagic() == MachO::FAT_MAGIC)
        return Header.cputype;
      else // Parent->getMagic() == MachO::FAT_MAGIC_64
        return Header64.cputype;
    }
    uint32_t getCPUSubType() const {
      if (Parent->getMagic() == MachO::FAT_MAGIC)
        return Header.cpusubtype;
      else // Parent->getMagic() == MachO::FAT_MAGIC_64
        return Header64.cpusubtype;
    }
    uint64_t getOffset() const {
      if (Parent->getMagic() == MachO::FAT_MAGIC)
        return Header.offset;
      else // Parent->getMagic() == MachO::FAT_MAGIC_64
        return Header64.offset;
    }
    uint64_t getSize() const {
      if (Parent->getMagic() == MachO::FAT_MAGIC)
        return Header.size;
      else // Parent->getMagic() == MachO::FAT_MAGIC_64
        return Header64.size;
    }
    uint32_t getAlign() const {
      if (Parent->getMagic() == MachO::FAT_MAGIC)
        return Header.align;
      else // Parent->getMagic() == MachO::FAT_MAGIC_64
        return Header64.align;
    }
    uint32_t getReserved() const {
      if (Parent->getMagic() == MachO::FAT_MAGIC)
        return 0;
      else // Parent->getMagic() == MachO::FAT_MAGIC_64
        return Header64.reserved;
    }
    Triple getTriple() const {
      return MachOObjectFile::getArchTriple(getCPUType(), getCPUSubType());
    }
    std::string getArchFlagName() const {
      const char *McpuDefault, *ArchFlag;
      MachOObjectFile::getArchTriple(getCPUType(), getCPUSubType(),
                                     &McpuDefault, &ArchFlag);
      return ArchFlag ? ArchFlag : std::string();
    }

    Expected<std::unique_ptr<MachOObjectFile>> getAsObjectFile() const;
    Expected<std::unique_ptr<IRObjectFile>>
    getAsIRObject(LLVMContext &Ctx) const;

    Expected<std::unique_ptr<Archive>> getAsArchive() const;
  };

  class object_iterator {
    ObjectForArch Obj;
  public:
    object_iterator(const ObjectForArch &Obj) : Obj(Obj) {}
    const ObjectForArch *operator->() const { return &Obj; }
    const ObjectForArch &operator*() const { return Obj; }

    bool operator==(const object_iterator &Other) const {
      return Obj == Other.Obj;
    }
    bool operator!=(const object_iterator &Other) const {
      return !(*this == Other);
    }

    object_iterator& operator++() {  // Preincrement
      Obj = Obj.getNext();
      return *this;
    }
  };

  MachOUniversalBinary(MemoryBufferRef Souce, Error &Err);
  static Expected<std::unique_ptr<MachOUniversalBinary>>
  create(MemoryBufferRef Source);

  object_iterator begin_objects() const {
    return ObjectForArch(this, 0);
  }
  object_iterator end_objects() const {
    return ObjectForArch(nullptr, 0);
  }

  iterator_range<object_iterator> objects() const {
    return make_range(begin_objects(), end_objects());
  }

  uint32_t getMagic() const { return Magic; }
  uint32_t getNumberOfObjects() const { return NumberOfObjects; }

  // Cast methods.
  static bool classof(Binary const *V) {
    return V->isMachOUniversalBinary();
  }

  Expected<ObjectForArch>
  getObjectForArch(StringRef ArchName) const;

  Expected<std::unique_ptr<MachOObjectFile>>
  getMachOObjectForArch(StringRef ArchName) const;

  Expected<std::unique_ptr<IRObjectFile>>
  getIRObjectForArch(StringRef ArchName, LLVMContext &Ctx) const;

  Expected<std::unique_ptr<Archive>>
  getArchiveForArch(StringRef ArchName) const;
};

}
}

#endif
PKhwFZ�`��llObject/COFFImportFile.hnu�[���//===- COFFImportFile.h - COFF short import file implementation -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// COFF short import file is a special kind of file which contains
// only symbol names for DLL-exported symbols. This class implements
// exporting of Symbols to create libraries and a SymbolicFile
// interface for the file type.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJECT_COFFIMPORTFILE_H
#define LLVM_OBJECT_COFFIMPORTFILE_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/Object/COFF.h"
#include "llvm/Object/ObjectFile.h"
#include "llvm/Object/SymbolicFile.h"
#include "llvm/Support/MemoryBufferRef.h"
#include "llvm/Support/raw_ostream.h"

namespace llvm {
namespace object {

class COFFImportFile : public SymbolicFile {
public:
  COFFImportFile(MemoryBufferRef Source)
      : SymbolicFile(ID_COFFImportFile, Source) {}

  static bool classof(Binary const *V) { return V->isCOFFImportFile(); }

  void moveSymbolNext(DataRefImpl &Symb) const override { ++Symb.p; }

  Error printSymbolName(raw_ostream &OS, DataRefImpl Symb) const override {
    if (Symb.p == 0)
      OS << "__imp_";
    OS << StringRef(Data.getBufferStart() + sizeof(coff_import_header));
    return Error::success();
  }

  Expected<uint32_t> getSymbolFlags(DataRefImpl Symb) const override {
    return SymbolRef::SF_Global;
  }

  basic_symbol_iterator symbol_begin() const override {
    return BasicSymbolRef(DataRefImpl(), this);
  }

  basic_symbol_iterator symbol_end() const override {
    DataRefImpl Symb;
    Symb.p = isData() ? 1 : 2;
    return BasicSymbolRef(Symb, this);
  }

  bool is64Bit() const override { return false; }

  const coff_import_header *getCOFFImportHeader() const {
    return reinterpret_cast<const object::coff_import_header *>(
        Data.getBufferStart());
  }

private:
  bool isData() const {
    return getCOFFImportHeader()->getType() == COFF::IMPORT_DATA;
  }
};

struct COFFShortExport {
  /// The name of the export as specified in the .def file or on the command
  /// line, i.e. "foo" in "/EXPORT:foo", and "bar" in "/EXPORT:foo=bar". This
  /// may lack mangling, such as underscore prefixing and stdcall suffixing.
  std::string Name;

  /// The external, exported name. Only non-empty when export renaming is in
  /// effect, i.e. "foo" in "/EXPORT:foo=bar".
  std::string ExtName;

  /// The real, mangled symbol name from the object file. Given
  /// "/export:foo=bar", this could be "_bar@8" if bar is stdcall.
  std::string SymbolName;

  /// Creates a weak alias. This is the name of the weak aliasee. In a .def
  /// file, this is "baz" in "EXPORTS\nfoo = bar == baz".
  std::string AliasTarget;

  uint16_t Ordinal = 0;
  bool Noname = false;
  bool Data = false;
  bool Private = false;
  bool Constant = false;

  friend bool operator==(const COFFShortExport &L, const COFFShortExport &R) {
    return L.Name == R.Name && L.ExtName == R.ExtName &&
            L.Ordinal == R.Ordinal && L.Noname == R.Noname &&
            L.Data == R.Data && L.Private == R.Private;
  }

  friend bool operator!=(const COFFShortExport &L, const COFFShortExport &R) {
    return !(L == R);
  }
};

Error writeImportLibrary(StringRef ImportName, StringRef Path,
                         ArrayRef<COFFShortExport> Exports,
                         COFF::MachineTypes Machine, bool MinGW);

} // namespace object
} // namespace llvm

#endif
PKhwFZ:���
�
Object/TapiUniversal.hnu�[���//===-- TapiUniversal.h - Text-based Dynamic Library Stub -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the TapiUniversal interface.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJECT_TAPIUNIVERSAL_H
#define LLVM_OBJECT_TAPIUNIVERSAL_H

#include "llvm/ADT/StringRef.h"
#include "llvm/Object/Binary.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/MemoryBufferRef.h"
#include "llvm/TextAPI/Architecture.h"
#include "llvm/TextAPI/InterfaceFile.h"

namespace llvm {
namespace object {

class TapiFile;

class TapiUniversal : public Binary {
public:
  class ObjectForArch {
    const TapiUniversal *Parent;
    int Index;

  public:
    ObjectForArch(const TapiUniversal *Parent, int Index)
        : Parent(Parent), Index(Index) {}

    ObjectForArch getNext() const { return ObjectForArch(Parent, Index + 1); }

    bool operator==(const ObjectForArch &Other) const {
      return (Parent == Other.Parent) && (Index == Other.Index);
    }

    uint32_t getCPUType() const {
      auto Result =
          MachO::getCPUTypeFromArchitecture(Parent->Libraries[Index].Arch);
      return Result.first;
    }

    uint32_t getCPUSubType() const {
      auto Result =
          MachO::getCPUTypeFromArchitecture(Parent->Libraries[Index].Arch);
      return Result.second;
    }

    StringRef getArchFlagName() const {
      return MachO::getArchitectureName(Parent->Libraries[Index].Arch);
    }

    std::string getInstallName() const {
      return std::string(Parent->Libraries[Index].InstallName);
    }

    bool isTopLevelLib() const {
      return Parent->ParsedFile->getInstallName() == getInstallName();
    }

    Expected<std::unique_ptr<TapiFile>> getAsObjectFile() const;
  };

  class object_iterator {
    ObjectForArch Obj;

  public:
    object_iterator(const ObjectForArch &Obj) : Obj(Obj) {}
    const ObjectForArch *operator->() const { return &Obj; }
    const ObjectForArch &operator*() const { return Obj; }

    bool operator==(const object_iterator &Other) const {
      return Obj == Other.Obj;
    }
    bool operator!=(const object_iterator &Other) const {
      return !(*this == Other);
    }

    object_iterator &operator++() { // Preincrement
      Obj = Obj.getNext();
      return *this;
    }
  };

  TapiUniversal(MemoryBufferRef Source, Error &Err);
  static Expected<std::unique_ptr<TapiUniversal>>
  create(MemoryBufferRef Source);
  ~TapiUniversal() override;

  object_iterator begin_objects() const { return ObjectForArch(this, 0); }
  object_iterator end_objects() const {
    return ObjectForArch(this, Libraries.size());
  }

  iterator_range<object_iterator> objects() const {
    return make_range(begin_objects(), end_objects());
  }

  const MachO::InterfaceFile &getInterfaceFile() { return *ParsedFile; }

  uint32_t getNumberOfObjects() const { return Libraries.size(); }

  static bool classof(const Binary *v) { return v->isTapiUniversal(); }

private:
  struct Library {
    StringRef InstallName;
    MachO::Architecture Arch;
  };

  std::unique_ptr<MachO::InterfaceFile> ParsedFile;
  std::vector<Library> Libraries;
};

} // end namespace object.
} // end namespace llvm.

#endif // LLVM_OBJECT_TAPIUNIVERSAL_H
PKhwFZ�N��͈͈Object/MachO.hnu�[���//===- MachO.h - MachO object file implementation ---------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the MachOObjectFile class, which implement the ObjectFile
// interface for MachO files.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJECT_MACHO_H
#define LLVM_OBJECT_MACHO_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/BinaryFormat/MachO.h"
#include "llvm/BinaryFormat/Swift.h"
#include "llvm/Object/Binary.h"
#include "llvm/Object/ObjectFile.h"
#include "llvm/Object/SymbolicFile.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/TargetParser/SubtargetFeature.h"
#include "llvm/TargetParser/Triple.h"
#include <cstdint>
#include <memory>
#include <string>
#include <system_error>

namespace llvm {
namespace object {

/// DiceRef - This is a value type class that represents a single
/// data in code entry in the table in a Mach-O object file.
class DiceRef {
  DataRefImpl DicePimpl;
  const ObjectFile *OwningObject = nullptr;

public:
  DiceRef() = default;
  DiceRef(DataRefImpl DiceP, const ObjectFile *Owner);

  bool operator==(const DiceRef &Other) const;
  bool operator<(const DiceRef &Other) const;

  void moveNext();

  std::error_code getOffset(uint32_t &Result) const;
  std::error_code getLength(uint16_t &Result) const;
  std::error_code getKind(uint16_t &Result) const;

  DataRefImpl getRawDataRefImpl() const;
  const ObjectFile *getObjectFile() const;
};
using dice_iterator = content_iterator<DiceRef>;

/// ExportEntry encapsulates the current-state-of-the-walk used when doing a
/// non-recursive walk of the trie data structure.  This allows you to iterate
/// across all exported symbols using:
///      Error Err = Error::success();
///      for (const llvm::object::ExportEntry &AnExport : Obj->exports(&Err)) {
///      }
///      if (Err) { report error ...
class ExportEntry {
public:
  ExportEntry(Error *Err, const MachOObjectFile *O, ArrayRef<uint8_t> Trie);

  StringRef name() const;
  uint64_t flags() const;
  uint64_t address() const;
  uint64_t other() const;
  StringRef otherName() const;
  uint32_t nodeOffset() const;

  bool operator==(const ExportEntry &) const;

  void moveNext();

private:
  friend class MachOObjectFile;

  void moveToFirst();
  void moveToEnd();
  uint64_t readULEB128(const uint8_t *&p, const char **error);
  void pushDownUntilBottom();
  void pushNode(uint64_t Offset);

  // Represents a node in the mach-o exports trie.
  struct NodeState {
    NodeState(const uint8_t *Ptr);

    const uint8_t *Start;
    const uint8_t *Current;
    uint64_t Flags = 0;
    uint64_t Address = 0;
    uint64_t Other = 0;
    const char *ImportName = nullptr;
    unsigned ChildCount = 0;
    unsigned NextChildIndex = 0;
    unsigned ParentStringLength = 0;
    bool IsExportNode = false;
  };
  using NodeList = SmallVector<NodeState, 16>;
  using node_iterator = NodeList::const_iterator;

  Error *E;
  const MachOObjectFile *O;
  ArrayRef<uint8_t> Trie;
  SmallString<256> CumulativeString;
  NodeList Stack;
  bool Done = false;

  iterator_range<node_iterator> nodes() const {
    return make_range(Stack.begin(), Stack.end());
  }
};
using export_iterator = content_iterator<ExportEntry>;

// Segment info so SegIndex/SegOffset pairs in a Mach-O Bind or Rebase entry
// can be checked and translated.  Only the SegIndex/SegOffset pairs from
// checked entries are to be used with the segmentName(), sectionName() and
// address() methods below.
class BindRebaseSegInfo {
public:
  BindRebaseSegInfo(const MachOObjectFile *Obj);

  // Used to check a Mach-O Bind or Rebase entry for errors when iterating.
  const char* checkSegAndOffsets(int32_t SegIndex, uint64_t SegOffset,
                                 uint8_t PointerSize, uint32_t Count=1,
                                 uint32_t Skip=0);
  // Used with valid SegIndex/SegOffset values from checked entries.
  StringRef segmentName(int32_t SegIndex);
  StringRef sectionName(int32_t SegIndex, uint64_t SegOffset);
  uint64_t address(uint32_t SegIndex, uint64_t SegOffset);

private:
  struct SectionInfo {
    uint64_t Address;
    uint64_t Size;
    StringRef SectionName;
    StringRef SegmentName;
    uint64_t OffsetInSegment;
    uint64_t SegmentStartAddress;
    int32_t SegmentIndex;
  };
  const SectionInfo &findSection(int32_t SegIndex, uint64_t SegOffset);

  SmallVector<SectionInfo, 32> Sections;
  int32_t MaxSegIndex;
};

/// MachORebaseEntry encapsulates the current state in the decompression of
/// rebasing opcodes. This allows you to iterate through the compressed table of
/// rebasing using:
///    Error Err = Error::success();
///    for (const llvm::object::MachORebaseEntry &Entry : Obj->rebaseTable(&Err)) {
///    }
///    if (Err) { report error ...
class MachORebaseEntry {
public:
  MachORebaseEntry(Error *Err, const MachOObjectFile *O,
                   ArrayRef<uint8_t> opcodes, bool is64Bit);

  int32_t segmentIndex() const;
  uint64_t segmentOffset() const;
  StringRef typeName() const;
  StringRef segmentName() const;
  StringRef sectionName() const;
  uint64_t address() const;

  bool operator==(const MachORebaseEntry &) const;

  void moveNext();

private:
  friend class MachOObjectFile;

  void moveToFirst();
  void moveToEnd();
  uint64_t readULEB128(const char **error);

  Error *E;
  const MachOObjectFile *O;
  ArrayRef<uint8_t> Opcodes;
  const uint8_t *Ptr;
  uint64_t SegmentOffset = 0;
  int32_t SegmentIndex = -1;
  uint64_t RemainingLoopCount = 0;
  uint64_t AdvanceAmount = 0;
  uint8_t  RebaseType = 0;
  uint8_t  PointerSize;
  bool     Done = false;
};
using rebase_iterator = content_iterator<MachORebaseEntry>;

/// MachOBindEntry encapsulates the current state in the decompression of
/// binding opcodes. This allows you to iterate through the compressed table of
/// bindings using:
///    Error Err = Error::success();
///    for (const llvm::object::MachOBindEntry &Entry : Obj->bindTable(&Err)) {
///    }
///    if (Err) { report error ...
class MachOBindEntry {
public:
  enum class Kind { Regular, Lazy, Weak };

  MachOBindEntry(Error *Err, const MachOObjectFile *O,
                 ArrayRef<uint8_t> Opcodes, bool is64Bit, MachOBindEntry::Kind);

  int32_t segmentIndex() const;
  uint64_t segmentOffset() const;
  StringRef typeName() const;
  StringRef symbolName() const;
  uint32_t flags() const;
  int64_t addend() const;
  int ordinal() const;

  StringRef segmentName() const;
  StringRef sectionName() const;
  uint64_t address() const;

  bool operator==(const MachOBindEntry &) const;

  void moveNext();

private:
  friend class MachOObjectFile;

  void moveToFirst();
  void moveToEnd();
  uint64_t readULEB128(const char **error);
  int64_t readSLEB128(const char **error);

  Error *E;
  const MachOObjectFile *O;
  ArrayRef<uint8_t> Opcodes;
  const uint8_t *Ptr;
  uint64_t SegmentOffset = 0;
  int32_t  SegmentIndex = -1;
  StringRef SymbolName;
  bool     LibraryOrdinalSet = false;
  int      Ordinal = 0;
  uint32_t Flags = 0;
  int64_t  Addend = 0;
  uint64_t RemainingLoopCount = 0;
  uint64_t AdvanceAmount = 0;
  uint8_t  BindType = 0;
  uint8_t  PointerSize;
  Kind     TableKind;
  bool     Done = false;
};
using bind_iterator = content_iterator<MachOBindEntry>;

/// ChainedFixupTarget holds all the information about an external symbol
/// necessary to bind this binary to that symbol. These values are referenced
/// indirectly by chained fixup binds. This structure captures values from all
/// import and symbol formats.
///
/// Be aware there are two notions of weak here:
///   WeakImport == true
///     The associated bind may be set to 0 if this symbol is missing from its
///     parent library. This is called a "weak import."
///   LibOrdinal == BIND_SPECIAL_DYLIB_WEAK_LOOKUP
///     This symbol may be coalesced with other libraries vending the same
///     symbol. E.g., C++'s "operator new". This is called a "weak bind."
struct ChainedFixupTarget {
public:
  ChainedFixupTarget(int LibOrdinal, uint32_t NameOffset, StringRef Symbol,
                     uint64_t Addend, bool WeakImport)
      : LibOrdinal(LibOrdinal), NameOffset(NameOffset), SymbolName(Symbol),
        Addend(Addend), WeakImport(WeakImport) {}

  int libOrdinal() { return LibOrdinal; }
  uint32_t nameOffset() { return NameOffset; }
  StringRef symbolName() { return SymbolName; }
  uint64_t addend() { return Addend; }
  bool weakImport() { return WeakImport; }
  bool weakBind() {
    return LibOrdinal == MachO::BIND_SPECIAL_DYLIB_WEAK_LOOKUP;
  }

private:
  int LibOrdinal;
  uint32_t NameOffset;
  StringRef SymbolName;
  uint64_t Addend;
  bool WeakImport;
};

struct ChainedFixupsSegment {
  ChainedFixupsSegment(uint8_t SegIdx, uint32_t Offset,
                       const MachO::dyld_chained_starts_in_segment &Header,
                       std::vector<uint16_t> &&PageStarts)
      : SegIdx(SegIdx), Offset(Offset), Header(Header),
        PageStarts(PageStarts){};

  uint32_t SegIdx;
  uint32_t Offset; // dyld_chained_starts_in_image::seg_info_offset[SegIdx]
  MachO::dyld_chained_starts_in_segment Header;
  std::vector<uint16_t> PageStarts; // page_start[] entries, host endianness
};

/// MachOAbstractFixupEntry is an abstract class representing a fixup in a
/// MH_DYLDLINK file. Fixups generally represent rebases and binds. Binds also
/// subdivide into additional subtypes (weak, lazy, reexport).
///
/// The two concrete subclasses of MachOAbstractFixupEntry are:
///
///   MachORebaseBindEntry   - for dyld opcode-based tables, including threaded-
///                            rebase, where rebases are mixed in with other
///                            bind opcodes.
///   MachOChainedFixupEntry - for pointer chains embedded in data pages.
class MachOAbstractFixupEntry {
public:
  MachOAbstractFixupEntry(Error *Err, const MachOObjectFile *O);

  int32_t segmentIndex() const;
  uint64_t segmentOffset() const;
  uint64_t segmentAddress() const;
  StringRef segmentName() const;
  StringRef sectionName() const;
  StringRef typeName() const;
  StringRef symbolName() const;
  uint32_t flags() const;
  int64_t addend() const;
  int ordinal() const;

  /// \return the location of this fixup as a VM Address. For the VM
  /// Address this fixup is pointing to, use pointerValue().
  uint64_t address() const;

  /// \return the VM Address pointed to by this fixup. Use
  /// pointerValue() to compare against other VM Addresses, such as
  /// section addresses or segment vmaddrs.
  uint64_t pointerValue() const { return PointerValue; }

  /// \return the raw "on-disk" representation of the fixup. For
  /// Threaded rebases and Chained pointers these values are generally
  /// encoded into various different pointer formats. This value is
  /// exposed in API for tools that want to display and annotate the
  /// raw bits.
  uint64_t rawValue() const { return RawValue; }

  void moveNext();

protected:
  Error *E;
  const MachOObjectFile *O;
  uint64_t SegmentOffset = 0;
  int32_t SegmentIndex = -1;
  StringRef SymbolName;
  int32_t Ordinal = 0;
  uint32_t Flags = 0;
  int64_t Addend = 0;
  uint64_t PointerValue = 0;
  uint64_t RawValue = 0;
  bool Done = false;

  void moveToFirst();
  void moveToEnd();

  /// \return the vm address of the start of __TEXT segment.
  uint64_t textAddress() const { return TextAddress; }

private:
  uint64_t TextAddress;
};

class MachOChainedFixupEntry : public MachOAbstractFixupEntry {
public:
  enum class FixupKind { Bind, Rebase };

  MachOChainedFixupEntry(Error *Err, const MachOObjectFile *O, bool Parse);

  bool operator==(const MachOChainedFixupEntry &) const;

  bool isBind() const { return Kind == FixupKind::Bind; }
  bool isRebase() const { return Kind == FixupKind::Rebase; }

  void moveNext();
  void moveToFirst();
  void moveToEnd();

private:
  void findNextPageWithFixups();

  std::vector<ChainedFixupTarget> FixupTargets;
  std::vector<ChainedFixupsSegment> Segments;
  ArrayRef<uint8_t> SegmentData;
  FixupKind Kind;
  uint32_t InfoSegIndex = 0; // Index into Segments
  uint32_t PageIndex = 0;    // Index into Segments[InfoSegIdx].PageStarts
  uint32_t PageOffset = 0;   // Page offset of the current fixup
};
using fixup_iterator = content_iterator<MachOChainedFixupEntry>;

class MachOObjectFile : public ObjectFile {
public:
  struct LoadCommandInfo {
    const char *Ptr;      // Where in memory the load command is.
    MachO::load_command C; // The command itself.
  };
  using LoadCommandList = SmallVector<LoadCommandInfo, 4>;
  using load_command_iterator = LoadCommandList::const_iterator;

  static Expected<std::unique_ptr<MachOObjectFile>>
  create(MemoryBufferRef Object, bool IsLittleEndian, bool Is64Bits,
         uint32_t UniversalCputype = 0, uint32_t UniversalIndex = 0);

  static bool isMachOPairedReloc(uint64_t RelocType, uint64_t Arch);

  void moveSymbolNext(DataRefImpl &Symb) const override;

  uint64_t getNValue(DataRefImpl Sym) const;
  Expected<StringRef> getSymbolName(DataRefImpl Symb) const override;

  // MachO specific.
  Error checkSymbolTable() const;

  std::error_code getIndirectName(DataRefImpl Symb, StringRef &Res) const;
  unsigned getSectionType(SectionRef Sec) const;

  Expected<uint64_t> getSymbolAddress(DataRefImpl Symb) const override;
  uint32_t getSymbolAlignment(DataRefImpl Symb) const override;
  uint64_t getCommonSymbolSizeImpl(DataRefImpl Symb) const override;
  Expected<SymbolRef::Type> getSymbolType(DataRefImpl Symb) const override;
  Expected<uint32_t> getSymbolFlags(DataRefImpl Symb) const override;
  Expected<section_iterator> getSymbolSection(DataRefImpl Symb) const override;
  unsigned getSymbolSectionID(SymbolRef Symb) const;
  unsigned getSectionID(SectionRef Sec) const;

  void moveSectionNext(DataRefImpl &Sec) const override;
  Expected<StringRef> getSectionName(DataRefImpl Sec) const override;
  uint64_t getSectionAddress(DataRefImpl Sec) const override;
  uint64_t getSectionIndex(DataRefImpl Sec) const override;
  uint64_t getSectionSize(DataRefImpl Sec) const override;
  ArrayRef<uint8_t> getSectionContents(uint32_t Offset, uint64_t Size) const;
  Expected<ArrayRef<uint8_t>>
  getSectionContents(DataRefImpl Sec) const override;
  uint64_t getSectionAlignment(DataRefImpl Sec) const override;
  Expected<SectionRef> getSection(unsigned SectionIndex) const;
  Expected<SectionRef> getSection(StringRef SectionName) const;
  bool isSectionCompressed(DataRefImpl Sec) const override;
  bool isSectionText(DataRefImpl Sec) const override;
  bool isSectionData(DataRefImpl Sec) const override;
  bool isSectionBSS(DataRefImpl Sec) const override;
  bool isSectionVirtual(DataRefImpl Sec) const override;
  bool isSectionBitcode(DataRefImpl Sec) const override;
  bool isDebugSection(DataRefImpl Sec) const override;

  /// Return the raw contents of an entire segment.
  ArrayRef<uint8_t> getSegmentContents(StringRef SegmentName) const;
  ArrayRef<uint8_t> getSegmentContents(size_t SegmentIndex) const;

  /// When dsymutil generates the companion file, it strips all unnecessary
  /// sections (e.g. everything in the _TEXT segment) by omitting their body
  /// and setting the offset in their corresponding load command to zero.
  ///
  /// While the load command itself is valid, reading the section corresponds
  /// to reading the number of bytes specified in the load command, starting
  /// from offset 0 (i.e. the Mach-O header at the beginning of the file).
  bool isSectionStripped(DataRefImpl Sec) const override;

  relocation_iterator section_rel_begin(DataRefImpl Sec) const override;
  relocation_iterator section_rel_end(DataRefImpl Sec) const override;

  relocation_iterator extrel_begin() const;
  relocation_iterator extrel_end() const;
  iterator_range<relocation_iterator> external_relocations() const {
    return make_range(extrel_begin(), extrel_end());
  }

  relocation_iterator locrel_begin() const;
  relocation_iterator locrel_end() const;

  void moveRelocationNext(DataRefImpl &Rel) const override;
  uint64_t getRelocationOffset(DataRefImpl Rel) const override;
  symbol_iterator getRelocationSymbol(DataRefImpl Rel) const override;
  section_iterator getRelocationSection(DataRefImpl Rel) const;
  uint64_t getRelocationType(DataRefImpl Rel) const override;
  void getRelocationTypeName(DataRefImpl Rel,
                             SmallVectorImpl<char> &Result) const override;
  uint8_t getRelocationLength(DataRefImpl Rel) const;

  // MachO specific.
  std::error_code getLibraryShortNameByIndex(unsigned Index, StringRef &) const;
  uint32_t getLibraryCount() const;

  section_iterator getRelocationRelocatedSection(relocation_iterator Rel) const;

  // TODO: Would be useful to have an iterator based version
  // of the load command interface too.

  basic_symbol_iterator symbol_begin() const override;
  basic_symbol_iterator symbol_end() const override;

  bool is64Bit() const override;

  // MachO specific.
  symbol_iterator getSymbolByIndex(unsigned Index) const;
  uint64_t getSymbolIndex(DataRefImpl Symb) const;

  section_iterator section_begin() const override;
  section_iterator section_end() const override;

  uint8_t getBytesInAddress() const override;

  StringRef getFileFormatName() const override;
  Triple::ArchType getArch() const override;
  Expected<SubtargetFeatures> getFeatures() const override {
    return SubtargetFeatures();
  }
  Triple getArchTriple(const char **McpuDefault = nullptr) const;

  relocation_iterator section_rel_begin(unsigned Index) const;
  relocation_iterator section_rel_end(unsigned Index) const;

  dice_iterator begin_dices() const;
  dice_iterator end_dices() const;

  load_command_iterator begin_load_commands() const;
  load_command_iterator end_load_commands() const;
  iterator_range<load_command_iterator> load_commands() const;

  /// For use iterating over all exported symbols.
  iterator_range<export_iterator> exports(Error &Err) const;

  /// For use examining a trie not in a MachOObjectFile.
  static iterator_range<export_iterator> exports(Error &Err,
                                                 ArrayRef<uint8_t> Trie,
                                                 const MachOObjectFile *O =
                                                                      nullptr);

  /// For use iterating over all rebase table entries.
  iterator_range<rebase_iterator> rebaseTable(Error &Err);

  /// For use examining rebase opcodes in a MachOObjectFile.
  static iterator_range<rebase_iterator> rebaseTable(Error &Err,
                                                     MachOObjectFile *O,
                                                     ArrayRef<uint8_t> Opcodes,
                                                     bool is64);

  /// For use iterating over all bind table entries.
  iterator_range<bind_iterator> bindTable(Error &Err);

  /// For iterating over all chained fixups.
  iterator_range<fixup_iterator> fixupTable(Error &Err);

  /// For use iterating over all lazy bind table entries.
  iterator_range<bind_iterator> lazyBindTable(Error &Err);

  /// For use iterating over all weak bind table entries.
  iterator_range<bind_iterator> weakBindTable(Error &Err);

  /// For use examining bind opcodes in a MachOObjectFile.
  static iterator_range<bind_iterator> bindTable(Error &Err,
                                                 MachOObjectFile *O,
                                                 ArrayRef<uint8_t> Opcodes,
                                                 bool is64,
                                                 MachOBindEntry::Kind);

  // Given a SegIndex, SegOffset, and PointerSize, verify a valid section exists
  // that fully contains a pointer at that location. Multiple fixups in a bind
  // (such as with the BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB opcode) can
  // be tested via the Count and Skip parameters.
  //
  // This is used by MachOBindEntry::moveNext() to validate a MachOBindEntry.
  const char *BindEntryCheckSegAndOffsets(int32_t SegIndex, uint64_t SegOffset,
                                         uint8_t PointerSize, uint32_t Count=1,
                                          uint32_t Skip=0) const {
    return BindRebaseSectionTable->checkSegAndOffsets(SegIndex, SegOffset,
                                                     PointerSize, Count, Skip);
  }

  // Given a SegIndex, SegOffset, and PointerSize, verify a valid section exists
  // that fully contains a pointer at that location. Multiple fixups in a rebase
  // (such as with the REBASE_OPCODE_DO_*_TIMES* opcodes) can be tested via the
  // Count and Skip parameters.
  //
  // This is used by MachORebaseEntry::moveNext() to validate a MachORebaseEntry
  const char *RebaseEntryCheckSegAndOffsets(int32_t SegIndex,
                                            uint64_t SegOffset,
                                            uint8_t PointerSize,
                                            uint32_t Count=1,
                                            uint32_t Skip=0) const {
    return BindRebaseSectionTable->checkSegAndOffsets(SegIndex, SegOffset,
                                                      PointerSize, Count, Skip);
  }

  /// For use with the SegIndex of a checked Mach-O Bind or Rebase entry to
  /// get the segment name.
  StringRef BindRebaseSegmentName(int32_t SegIndex) const {
    return BindRebaseSectionTable->segmentName(SegIndex);
  }

  /// For use with a SegIndex,SegOffset pair from a checked Mach-O Bind or
  /// Rebase entry to get the section name.
  StringRef BindRebaseSectionName(uint32_t SegIndex, uint64_t SegOffset) const {
    return BindRebaseSectionTable->sectionName(SegIndex, SegOffset);
  }

  /// For use with a SegIndex,SegOffset pair from a checked Mach-O Bind or
  /// Rebase entry to get the address.
  uint64_t BindRebaseAddress(uint32_t SegIndex, uint64_t SegOffset) const {
    return BindRebaseSectionTable->address(SegIndex, SegOffset);
  }

  // In a MachO file, sections have a segment name. This is used in the .o
  // files. They have a single segment, but this field specifies which segment
  // a section should be put in the final object.
  StringRef getSectionFinalSegmentName(DataRefImpl Sec) const;

  // Names are stored as 16 bytes. These returns the raw 16 bytes without
  // interpreting them as a C string.
  ArrayRef<char> getSectionRawName(DataRefImpl Sec) const;
  ArrayRef<char> getSectionRawFinalSegmentName(DataRefImpl Sec) const;

  // MachO specific Info about relocations.
  bool isRelocationScattered(const MachO::any_relocation_info &RE) const;
  unsigned getPlainRelocationSymbolNum(
                                    const MachO::any_relocation_info &RE) const;
  bool getPlainRelocationExternal(const MachO::any_relocation_info &RE) const;
  bool getScatteredRelocationScattered(
                                    const MachO::any_relocation_info &RE) const;
  uint32_t getScatteredRelocationValue(
                                    const MachO::any_relocation_info &RE) const;
  uint32_t getScatteredRelocationType(
                                    const MachO::any_relocation_info &RE) const;
  unsigned getAnyRelocationAddress(const MachO::any_relocation_info &RE) const;
  unsigned getAnyRelocationPCRel(const MachO::any_relocation_info &RE) const;
  unsigned getAnyRelocationLength(const MachO::any_relocation_info &RE) const;
  unsigned getAnyRelocationType(const MachO::any_relocation_info &RE) const;
  SectionRef getAnyRelocationSection(const MachO::any_relocation_info &RE) const;

  // MachO specific structures.
  MachO::section getSection(DataRefImpl DRI) const;
  MachO::section_64 getSection64(DataRefImpl DRI) const;
  MachO::section getSection(const LoadCommandInfo &L, unsigned Index) const;
  MachO::section_64 getSection64(const LoadCommandInfo &L,unsigned Index) const;
  MachO::nlist getSymbolTableEntry(DataRefImpl DRI) const;
  MachO::nlist_64 getSymbol64TableEntry(DataRefImpl DRI) const;

  MachO::linkedit_data_command
  getLinkeditDataLoadCommand(const LoadCommandInfo &L) const;
  MachO::segment_command
  getSegmentLoadCommand(const LoadCommandInfo &L) const;
  MachO::segment_command_64
  getSegment64LoadCommand(const LoadCommandInfo &L) const;
  MachO::linker_option_command
  getLinkerOptionLoadCommand(const LoadCommandInfo &L) const;
  MachO::version_min_command
  getVersionMinLoadCommand(const LoadCommandInfo &L) const;
  MachO::note_command
  getNoteLoadCommand(const LoadCommandInfo &L) const;
  MachO::build_version_command
  getBuildVersionLoadCommand(const LoadCommandInfo &L) const;
  MachO::build_tool_version
  getBuildToolVersion(unsigned index) const;
  MachO::dylib_command
  getDylibIDLoadCommand(const LoadCommandInfo &L) const;
  MachO::dyld_info_command
  getDyldInfoLoadCommand(const LoadCommandInfo &L) const;
  MachO::dylinker_command
  getDylinkerCommand(const LoadCommandInfo &L) const;
  MachO::uuid_command
  getUuidCommand(const LoadCommandInfo &L) const;
  MachO::rpath_command
  getRpathCommand(const LoadCommandInfo &L) const;
  MachO::source_version_command
  getSourceVersionCommand(const LoadCommandInfo &L) const;
  MachO::entry_point_command
  getEntryPointCommand(const LoadCommandInfo &L) const;
  MachO::encryption_info_command
  getEncryptionInfoCommand(const LoadCommandInfo &L) const;
  MachO::encryption_info_command_64
  getEncryptionInfoCommand64(const LoadCommandInfo &L) const;
  MachO::sub_framework_command
  getSubFrameworkCommand(const LoadCommandInfo &L) const;
  MachO::sub_umbrella_command
  getSubUmbrellaCommand(const LoadCommandInfo &L) const;
  MachO::sub_library_command
  getSubLibraryCommand(const LoadCommandInfo &L) const;
  MachO::sub_client_command
  getSubClientCommand(const LoadCommandInfo &L) const;
  MachO::routines_command
  getRoutinesCommand(const LoadCommandInfo &L) const;
  MachO::routines_command_64
  getRoutinesCommand64(const LoadCommandInfo &L) const;
  MachO::thread_command
  getThreadCommand(const LoadCommandInfo &L) const;

  MachO::any_relocation_info getRelocation(DataRefImpl Rel) const;
  MachO::data_in_code_entry getDice(DataRefImpl Rel) const;
  const MachO::mach_header &getHeader() const;
  const MachO::mach_header_64 &getHeader64() const;
  uint32_t
  getIndirectSymbolTableEntry(const MachO::dysymtab_command &DLC,
                              unsigned Index) const;
  MachO::data_in_code_entry getDataInCodeTableEntry(uint32_t DataOffset,
                                                    unsigned Index) const;
  MachO::symtab_command getSymtabLoadCommand() const;
  MachO::dysymtab_command getDysymtabLoadCommand() const;
  MachO::linkedit_data_command getDataInCodeLoadCommand() const;
  MachO::linkedit_data_command getLinkOptHintsLoadCommand() const;
  ArrayRef<uint8_t> getDyldInfoRebaseOpcodes() const;
  ArrayRef<uint8_t> getDyldInfoBindOpcodes() const;
  ArrayRef<uint8_t> getDyldInfoWeakBindOpcodes() const;
  ArrayRef<uint8_t> getDyldInfoLazyBindOpcodes() const;
  ArrayRef<uint8_t> getDyldInfoExportsTrie() const;

  /// If the optional is std::nullopt, no header was found, but the object was
  /// well-formed.
  Expected<std::optional<MachO::dyld_chained_fixups_header>>
  getChainedFixupsHeader() const;
  Expected<std::vector<ChainedFixupTarget>> getDyldChainedFixupTargets() const;

  // Note: This is a limited, temporary API, which will be removed when Apple
  // upstreams their implementation. Please do not rely on this.
  Expected<std::optional<MachO::linkedit_data_command>>
  getChainedFixupsLoadCommand() const;
  // Returns the number of sections listed in dyld_chained_starts_in_image, and
  // a ChainedFixupsSegment for each segment that has fixups.
  Expected<std::pair<size_t, std::vector<ChainedFixupsSegment>>>
  getChainedFixupsSegments() const;
  ArrayRef<uint8_t> getDyldExportsTrie() const;

  SmallVector<uint64_t> getFunctionStarts() const;
  ArrayRef<uint8_t> getUuid() const;

  StringRef getStringTableData() const;

  void ReadULEB128s(uint64_t Index, SmallVectorImpl<uint64_t> &Out) const;

  static StringRef guessLibraryShortName(StringRef Name, bool &isFramework,
                                         StringRef &Suffix);

  static Triple::ArchType getArch(uint32_t CPUType, uint32_t CPUSubType);
  static Triple getArchTriple(uint32_t CPUType, uint32_t CPUSubType,
                              const char **McpuDefault = nullptr,
                              const char **ArchFlag = nullptr);
  static bool isValidArch(StringRef ArchFlag);
  static ArrayRef<StringRef> getValidArchs();
  static Triple getHostArch();

  bool isRelocatableObject() const override;

  StringRef mapDebugSectionName(StringRef Name) const override;

  llvm::binaryformat::Swift5ReflectionSectionKind
  mapReflectionSectionNameToEnumValue(StringRef SectionName) const override;

  bool hasPageZeroSegment() const { return HasPageZeroSegment; }

  static bool classof(const Binary *v) {
    return v->isMachO();
  }

  static uint32_t
  getVersionMinMajor(MachO::version_min_command &C, bool SDK) {
    uint32_t VersionOrSDK = (SDK) ? C.sdk : C.version;
    return (VersionOrSDK >> 16) & 0xffff;
  }

  static uint32_t
  getVersionMinMinor(MachO::version_min_command &C, bool SDK) {
    uint32_t VersionOrSDK = (SDK) ? C.sdk : C.version;
    return (VersionOrSDK >> 8) & 0xff;
  }

  static uint32_t
  getVersionMinUpdate(MachO::version_min_command &C, bool SDK) {
    uint32_t VersionOrSDK = (SDK) ? C.sdk : C.version;
    return VersionOrSDK & 0xff;
  }

  static std::string getBuildPlatform(uint32_t platform) {
    switch (platform) {
    case MachO::PLATFORM_MACOS: return "macos";
    case MachO::PLATFORM_IOS: return "ios";
    case MachO::PLATFORM_TVOS: return "tvos";
    case MachO::PLATFORM_WATCHOS: return "watchos";
    case MachO::PLATFORM_BRIDGEOS: return "bridgeos";
    case MachO::PLATFORM_MACCATALYST: return "macCatalyst";
    case MachO::PLATFORM_IOSSIMULATOR: return "iossimulator";
    case MachO::PLATFORM_TVOSSIMULATOR: return "tvossimulator";
    case MachO::PLATFORM_WATCHOSSIMULATOR: return "watchossimulator";
    case MachO::PLATFORM_DRIVERKIT: return "driverkit";
    default:
      std::string ret;
      raw_string_ostream ss(ret);
      ss << format_hex(platform, 8, true);
      return ss.str();
    }
  }

  static std::string getBuildTool(uint32_t tools) {
    switch (tools) {
    case MachO::TOOL_CLANG: return "clang";
    case MachO::TOOL_SWIFT: return "swift";
    case MachO::TOOL_LD: return "ld";
    case MachO::TOOL_LLD:
      return "lld";
    default:
      std::string ret;
      raw_string_ostream ss(ret);
      ss << format_hex(tools, 8, true);
      return ss.str();
    }
  }

  static std::string getVersionString(uint32_t version) {
    uint32_t major = (version >> 16) & 0xffff;
    uint32_t minor = (version >> 8) & 0xff;
    uint32_t update = version & 0xff;

    SmallString<32> Version;
    Version = utostr(major) + "." + utostr(minor);
    if (update != 0)
      Version += "." + utostr(update);
    return std::string(std::string(Version.str()));
  }

  /// If the input path is a .dSYM bundle (as created by the dsymutil tool),
  /// return the paths to the object files found in the bundle, otherwise return
  /// an empty vector. If the path appears to be a .dSYM bundle but no objects
  /// were found or there was a filesystem error, then return an error.
  static Expected<std::vector<std::string>>
  findDsymObjectMembers(StringRef Path);

private:
  MachOObjectFile(MemoryBufferRef Object, bool IsLittleEndian, bool Is64Bits,
                  Error &Err, uint32_t UniversalCputype = 0,
                  uint32_t UniversalIndex = 0);

  uint64_t getSymbolValueImpl(DataRefImpl Symb) const override;

  union {
    MachO::mach_header_64 Header64;
    MachO::mach_header Header;
  };
  using SectionList = SmallVector<const char*, 1>;
  SectionList Sections;
  using LibraryList = SmallVector<const char*, 1>;
  LibraryList Libraries;
  LoadCommandList LoadCommands;
  using LibraryShortName = SmallVector<StringRef, 1>;
  using BuildToolList = SmallVector<const char*, 1>;
  BuildToolList BuildTools;
  mutable LibraryShortName LibrariesShortNames;
  std::unique_ptr<BindRebaseSegInfo> BindRebaseSectionTable;
  const char *SymtabLoadCmd = nullptr;
  const char *DysymtabLoadCmd = nullptr;
  const char *DataInCodeLoadCmd = nullptr;
  const char *LinkOptHintsLoadCmd = nullptr;
  const char *DyldInfoLoadCmd = nullptr;
  const char *FuncStartsLoadCmd = nullptr;
  const char *DyldChainedFixupsLoadCmd = nullptr;
  const char *DyldExportsTrieLoadCmd = nullptr;
  const char *UuidLoadCmd = nullptr;
  bool HasPageZeroSegment = false;
};

/// DiceRef
inline DiceRef::DiceRef(DataRefImpl DiceP, const ObjectFile *Owner)
  : DicePimpl(DiceP) , OwningObject(Owner) {}

inline bool DiceRef::operator==(const DiceRef &Other) const {
  return DicePimpl == Other.DicePimpl;
}

inline bool DiceRef::operator<(const DiceRef &Other) const {
  return DicePimpl < Other.DicePimpl;
}

inline void DiceRef::moveNext() {
  const MachO::data_in_code_entry *P =
    reinterpret_cast<const MachO::data_in_code_entry *>(DicePimpl.p);
  DicePimpl.p = reinterpret_cast<uintptr_t>(P + 1);
}

// Since a Mach-O data in code reference, a DiceRef, can only be created when
// the OwningObject ObjectFile is a MachOObjectFile a static_cast<> is used for
// the methods that get the values of the fields of the reference.

inline std::error_code DiceRef::getOffset(uint32_t &Result) const {
  const MachOObjectFile *MachOOF =
    static_cast<const MachOObjectFile *>(OwningObject);
  MachO::data_in_code_entry Dice = MachOOF->getDice(DicePimpl);
  Result = Dice.offset;
  return std::error_code();
}

inline std::error_code DiceRef::getLength(uint16_t &Result) const {
  const MachOObjectFile *MachOOF =
    static_cast<const MachOObjectFile *>(OwningObject);
  MachO::data_in_code_entry Dice = MachOOF->getDice(DicePimpl);
  Result = Dice.length;
  return std::error_code();
}

inline std::error_code DiceRef::getKind(uint16_t &Result) const {
  const MachOObjectFile *MachOOF =
    static_cast<const MachOObjectFile *>(OwningObject);
  MachO::data_in_code_entry Dice = MachOOF->getDice(DicePimpl);
  Result = Dice.kind;
  return std::error_code();
}

inline DataRefImpl DiceRef::getRawDataRefImpl() const {
  return DicePimpl;
}

inline const ObjectFile *DiceRef::getObjectFile() const {
  return OwningObject;
}

} // end namespace object
} // end namespace llvm

#endif // LLVM_OBJECT_MACHO_H
PKhwFZ1����)WindowsResource/ResourceScriptTokenList.hnu�[���//===-- ResourceScriptTokenList.h -------------------------------*- C++-*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===---------------------------------------------------------------------===//
//
// This is a part of llvm-rc tokens header. It lists all the possible tokens
// that might occur in a correct .rc script.
//
//===---------------------------------------------------------------------===//


// Long tokens. They might consist of more than one character.
TOKEN(Invalid)      // Invalid token. Should not occur in a valid script.
TOKEN(Int)          // Integer (decimal, octal or hexadecimal).
TOKEN(String)       // String value.
TOKEN(Identifier)   // Script identifier (resource name or type).

// Short tokens. They usually consist of exactly one character.
// The definitions are of the form SHORT_TOKEN(TokenName, TokenChar).
// TokenChar is the one-character token representation occuring in the correct
// .rc scripts.
SHORT_TOKEN(BlockBegin, '{')   // Start of the script block; can also be BEGIN.
SHORT_TOKEN(BlockEnd, '}')     // End of the block; can also be END.
SHORT_TOKEN(Comma, ',')        // Comma - resource arguments separator.
SHORT_TOKEN(Plus, '+')         // Addition operator.
SHORT_TOKEN(Minus, '-')        // Subtraction operator.
SHORT_TOKEN(Pipe, '|')         // Bitwise-OR operator.
SHORT_TOKEN(Amp, '&')          // Bitwise-AND operator.
SHORT_TOKEN(Tilde, '~')        // Bitwise-NOT operator.
SHORT_TOKEN(LeftParen, '(')    // Left parenthesis in the script expressions.
SHORT_TOKEN(RightParen, ')')   // Right parenthesis.
PKhwFZ�jo#WindowsResource/ResourceProcessor.hnu�[���//===-- ResourceProcessor.h -------------------------------------*- C++-*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===---------------------------------------------------------------------===//

#ifndef LLVM_INCLUDE_LLVM_SUPPORT_WINDOWS_RESOURCE_PROCESSOR_H
#define LLVM_INCLUDE_LLVM_SUPPORT_WINDOWS_RESOURCE_PROCESSOR_H

#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/raw_ostream.h"

#include <memory>
#include <vector>


namespace llvm {

class WindowsResourceProcessor {
public:
  using PathType = SmallVector<char, 64>;

  WindowsResourceProcessor() {}

  void addDefine(StringRef Key, StringRef Value = StringRef()) {
    PreprocessorDefines.emplace_back(Key, Value);
  }
  void addInclude(const PathType &IncludePath) {
    IncludeList.push_back(IncludePath);
  }
  void setVerbose(bool Verbose) { IsVerbose = Verbose; }
  void setNullAtEnd(bool NullAtEnd) { AppendNull = NullAtEnd; }

  Error process(StringRef InputData,
    std::unique_ptr<raw_fd_ostream> OutputStream);

private:
  StringRef InputData;
  std::vector<PathType> IncludeList;
  std::vector<std::pair<StringRef, StringRef>> PreprocessorDefines;
  bool IsVerbose, AppendNull;
};

}

#endif
PKhwFZ�����%WindowsResource/ResourceScriptToken.hnu�[���//===-- ResourceScriptToken.h -----------------------------------*- C++-*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===---------------------------------------------------------------------===//
//
// This declares the .rc script tokens.
// The list of available tokens is located at ResourceScriptTokenList.h.
//
// Ref: msdn.microsoft.com/en-us/library/windows/desktop/aa380599(v=vs.85).aspx
//
//===---------------------------------------------------------------------===//

#ifndef LLVM_INCLUDE_LLVM_SUPPORT_WINDOWS_RESOURCE_SCRIPTTOKEN_H
#define LLVM_INCLUDE_LLVM_SUPPORT_WINDOWS_RESOURCE_SCRIPTTOKEN_H

#include "llvm/ADT/StringRef.h"

namespace llvm {

// A definition of a single resource script token. Each token has its kind
// (declared in ResourceScriptTokenList) and holds a value - a reference
// representation of the token.
// RCToken does not claim ownership on its value. A memory buffer containing
// the token value should be stored in a safe place and cannot be freed
// nor reallocated.
class RCToken {
public:
  enum class Kind {
#define TOKEN(Name) Name,
#define SHORT_TOKEN(Name, Ch) Name,
#include "ResourceScriptTokenList.h"
#undef TOKEN
#undef SHORT_TOKEN
  };

  RCToken(RCToken::Kind RCTokenKind, StringRef Value);

  // Get an integer value of the integer token.
  uint32_t intValue() const;
  bool isLongInt() const;

  StringRef value() const;
  Kind kind() const;

  // Check if a token describes a binary operator.
  bool isBinaryOp() const;

private:
  Kind TokenKind;
  StringRef TokenValue;
};

} // namespace llvm

#endif
PKhwFZm��ww#ProfileData/SymbolRemappingReader.hnu�[���//===- SymbolRemappingReader.h - Read symbol remapping file -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains definitions needed for reading and applying symbol
// remapping files.
//
// Support is provided only for the Itanium C++ name mangling scheme for now.
//
// NOTE: If you are making changes to this file format, please remember
//       to document them in the Clang documentation at
//       tools/clang/docs/UsersManual.rst.
//
// File format
// -----------
//
// The symbol remappings are written as an ASCII text file. Blank lines and
// lines starting with a # are ignored. All other lines specify a kind of
// mangled name fragment, along with two fragments of that kind that should
// be treated as equivalent, separated by spaces.
//
// See http://itanium-cxx-abi.github.io/cxx-abi/abi.html#mangling for a
// description of the Itanium name mangling scheme.
//
// The accepted fragment kinds are:
//
//  * name  A <name>, such as 6foobar or St3__1
//  * type  A <type>, such as Ss or N4llvm9StringRefE
//  * encoding  An <encoding> (a complete mangling without the leading _Z)
//
// For example:
//
// # Ignore int / long differences to treat symbols from 32-bit and 64-bit
// # builds with differing size_t / ptrdiff_t / intptr_t as equivalent.
// type i l
// type j m
//
// # Ignore differences between libc++ and libstdc++, and between libstdc++'s
// # C++98 and C++11 ABIs.
// name 3std St3__1
// name 3std St7__cxx11
//
// # Remap a function overload to a specialization of a template (including
// # any local symbols declared within it).
// encoding N2NS1fEi N2NS1fIiEEvT_
//
// # Substitutions must be remapped separately from namespace 'std' for now.
// name Sa NSt3__19allocatorE
// name Sb NSt3__112basic_stringE
// type Ss NSt3__112basic_stringIcSt11char_traitsIcESaE
// # ...
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_PROFILEDATA_SYMBOLREMAPPINGREADER_H
#define LLVM_PROFILEDATA_SYMBOLREMAPPINGREADER_H

#include "llvm/ADT/StringRef.h"
#include "llvm/ProfileData/ItaniumManglingCanonicalizer.h"
#include "llvm/Support/Error.h"

namespace llvm {

class MemoryBuffer;

class SymbolRemappingParseError : public ErrorInfo<SymbolRemappingParseError> {
public:
  SymbolRemappingParseError(StringRef File, int64_t Line, const Twine &Message)
      : File(File), Line(Line), Message(Message.str()) {}

  void log(llvm::raw_ostream &OS) const override {
    OS << File << ':' << Line << ": " << Message;
  }
  std::error_code convertToErrorCode() const override {
    return llvm::inconvertibleErrorCode();
  }

  StringRef getFileName() const { return File; }
  int64_t getLineNum() const { return Line; }
  StringRef getMessage() const { return Message; }

  static char ID;

private:
  std::string File;
  int64_t Line;
  std::string Message;
};

/// Reader for symbol remapping files.
///
/// Remaps the symbol names in profile data to match those in the program
/// according to a set of rules specified in a given file.
class SymbolRemappingReader {
public:
  /// Read remappings from the given buffer, which must live as long as
  /// the remapper.
  Error read(MemoryBuffer &B);

  /// A Key represents an equivalence class of symbol names.
  using Key = uintptr_t;

  /// Construct a key for the given symbol, or return an existing one if an
  /// equivalent name has already been inserted. The symbol name must live
  /// as long as the remapper.
  ///
  /// The result will be Key() if the name cannot be remapped (typically
  /// because it is not a valid mangled name).
  Key insert(StringRef FunctionName) {
    return Canonicalizer.canonicalize(FunctionName);
  }

  /// Map the given symbol name into the key for the corresponding equivalence
  /// class.
  ///
  /// The result will typically be Key() if no equivalent symbol has been
  /// inserted, but this is not guaranteed: a Key different from all keys ever
  /// returned by \c insert may be returned instead.
  Key lookup(StringRef FunctionName) {
    return Canonicalizer.lookup(FunctionName);
  }

private:
  ItaniumManglingCanonicalizer Canonicalizer;
};

} // end namespace llvm

#endif // LLVM_PROFILEDATA_SYMBOLREMAPPINGREADER_H
PKhwFZO*���!ProfileData/InstrProfCorrelator.hnu�[���//===- InstrProfCorrelator.h ------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// This file defines InstrProfCorrelator used to generate PGO profiles from
// raw profile data and debug info.
//===----------------------------------------------------------------------===//

#ifndef LLVM_PROFILEDATA_INSTRPROFCORRELATOR_H
#define LLVM_PROFILEDATA_INSTRPROFCORRELATOR_H

#include "llvm/ADT/DenseSet.h"
#include "llvm/ProfileData/InstrProf.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/YAMLTraits.h"
#include <optional>
#include <vector>

namespace llvm {
class DWARFContext;
class DWARFDie;
namespace object {
class ObjectFile;
}

/// InstrProfCorrelator - A base class used to create raw instrumentation data
/// to their functions.
class InstrProfCorrelator {
public:
  static llvm::Expected<std::unique_ptr<InstrProfCorrelator>>
  get(StringRef DebugInfoFilename);

  /// Construct a ProfileData vector used to correlate raw instrumentation data
  /// to their functions.
  virtual Error correlateProfileData() = 0;

  /// Process debug info and dump the correlation data.
  virtual Error dumpYaml(raw_ostream &OS) = 0;

  /// Return the number of ProfileData elements.
  std::optional<size_t> getDataSize() const;

  /// Return a pointer to the names string that this class constructs.
  const char *getNamesPointer() const { return Names.c_str(); }

  /// Return the number of bytes in the names string.
  size_t getNamesSize() const { return Names.size(); }

  /// Return the size of the counters section in bytes.
  uint64_t getCountersSectionSize() const {
    return Ctx->CountersSectionEnd - Ctx->CountersSectionStart;
  }

  static const char *FunctionNameAttributeName;
  static const char *CFGHashAttributeName;
  static const char *NumCountersAttributeName;

  enum InstrProfCorrelatorKind { CK_32Bit, CK_64Bit };
  InstrProfCorrelatorKind getKind() const { return Kind; }
  virtual ~InstrProfCorrelator() = default;

protected:
  struct Context {
    static llvm::Expected<std::unique_ptr<Context>>
    get(std::unique_ptr<MemoryBuffer> Buffer, const object::ObjectFile &Obj);
    std::unique_ptr<MemoryBuffer> Buffer;
    /// The address range of the __llvm_prf_cnts section.
    uint64_t CountersSectionStart;
    uint64_t CountersSectionEnd;
    /// True if target and host have different endian orders.
    bool ShouldSwapBytes;
  };
  const std::unique_ptr<Context> Ctx;

  InstrProfCorrelator(InstrProfCorrelatorKind K, std::unique_ptr<Context> Ctx)
      : Ctx(std::move(Ctx)), Kind(K) {}

  std::string Names;
  std::vector<std::string> NamesVec;

  struct Probe {
    std::string FunctionName;
    std::optional<std::string> LinkageName;
    yaml::Hex64 CFGHash;
    yaml::Hex64 CounterOffset;
    uint32_t NumCounters;
    std::optional<std::string> FilePath;
    std::optional<int> LineNumber;
  };

  struct CorrelationData {
    std::vector<Probe> Probes;
  };

  friend struct yaml::MappingTraits<Probe>;
  friend struct yaml::SequenceElementTraits<Probe>;
  friend struct yaml::MappingTraits<CorrelationData>;

private:
  static llvm::Expected<std::unique_ptr<InstrProfCorrelator>>
  get(std::unique_ptr<MemoryBuffer> Buffer);

  const InstrProfCorrelatorKind Kind;
};

/// InstrProfCorrelatorImpl - A child of InstrProfCorrelator with a template
/// pointer type so that the ProfileData vector can be materialized.
template <class IntPtrT>
class InstrProfCorrelatorImpl : public InstrProfCorrelator {
public:
  InstrProfCorrelatorImpl(std::unique_ptr<InstrProfCorrelator::Context> Ctx);
  static bool classof(const InstrProfCorrelator *C);

  /// Return a pointer to the underlying ProfileData vector that this class
  /// constructs.
  const RawInstrProf::ProfileData<IntPtrT> *getDataPointer() const {
    return Data.empty() ? nullptr : Data.data();
  }

  /// Return the number of ProfileData elements.
  size_t getDataSize() const { return Data.size(); }

  static llvm::Expected<std::unique_ptr<InstrProfCorrelatorImpl<IntPtrT>>>
  get(std::unique_ptr<InstrProfCorrelator::Context> Ctx,
      const object::ObjectFile &Obj);

protected:
  std::vector<RawInstrProf::ProfileData<IntPtrT>> Data;

  Error correlateProfileData() override;
  virtual void correlateProfileDataImpl(
      InstrProfCorrelator::CorrelationData *Data = nullptr) = 0;

  Error dumpYaml(raw_ostream &OS) override;

  void addProbe(StringRef FunctionName, uint64_t CFGHash, IntPtrT CounterOffset,
                IntPtrT FunctionPtr, uint32_t NumCounters);

private:
  InstrProfCorrelatorImpl(InstrProfCorrelatorKind Kind,
                          std::unique_ptr<InstrProfCorrelator::Context> Ctx)
      : InstrProfCorrelator(Kind, std::move(Ctx)){};
  llvm::DenseSet<IntPtrT> CounterOffsets;

  // Byte-swap the value if necessary.
  template <class T> T maybeSwap(T Value) const {
    return Ctx->ShouldSwapBytes ? sys::getSwappedBytes(Value) : Value;
  }
};

/// DwarfInstrProfCorrelator - A child of InstrProfCorrelatorImpl that takes
/// DWARF debug info as input to correlate profiles.
template <class IntPtrT>
class DwarfInstrProfCorrelator : public InstrProfCorrelatorImpl<IntPtrT> {
public:
  DwarfInstrProfCorrelator(std::unique_ptr<DWARFContext> DICtx,
                           std::unique_ptr<InstrProfCorrelator::Context> Ctx)
      : InstrProfCorrelatorImpl<IntPtrT>(std::move(Ctx)),
        DICtx(std::move(DICtx)) {}

private:
  std::unique_ptr<DWARFContext> DICtx;

  /// Return the address of the object that the provided DIE symbolizes.
  std::optional<uint64_t> getLocation(const DWARFDie &Die) const;

  /// Returns true if the provided DIE symbolizes an instrumentation probe
  /// symbol.
  static bool isDIEOfProbe(const DWARFDie &Die);

  /// Iterate over DWARF DIEs to find those that symbolize instrumentation
  /// probes and construct the ProfileData vector and Names string.
  ///
  /// Here is some example DWARF for an instrumentation probe we are looking
  /// for:
  /// \code
  ///   DW_TAG_subprogram
  ///   DW_AT_low_pc	(0x0000000000000000)
  ///   DW_AT_high_pc	(0x0000000000000014)
  ///   DW_AT_name	("foo")
  ///     DW_TAG_variable
  ///       DW_AT_name	("__profc_foo")
  ///       DW_AT_location	(DW_OP_addr 0x0)
  ///       DW_TAG_LLVM_annotation
  ///         DW_AT_name	("Function Name")
  ///         DW_AT_const_value	("foo")
  ///       DW_TAG_LLVM_annotation
  ///         DW_AT_name	("CFG Hash")
  ///         DW_AT_const_value	(12345678)
  ///       DW_TAG_LLVM_annotation
  ///         DW_AT_name	("Num Counters")
  ///         DW_AT_const_value	(2)
  ///       NULL
  ///     NULL
  /// \endcode
  void correlateProfileDataImpl(
      InstrProfCorrelator::CorrelationData *Data = nullptr) override;
};

} // end namespace llvm

#endif // LLVM_PROFILEDATA_INSTRPROFCORRELATOR_H
PKhwFZ��h�5
5
ProfileData/MIBEntryDef.incnu�[���/*===-- MemEntryDef.inc - MemProf profiling runtime macros -*- C++ -*-======== *\
|*
|* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|* See https://llvm.org/LICENSE.txt for license information.
|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|*
\*===----------------------------------------------------------------------===*/
/*
 * This file defines the macros for memprof profiling data structures.
 * Eg. usage to define the memprof meminfoblock struct:
 *
 * struct MemInfoBlock {
 * #define MIBEntryDef(NameTag, Name, Type) Type Name;
 * #include MIBEntryDef.inc
 * #undef MIBEntryDef
 * };
 *
 * This file has two identical copies. The primary copy lives in LLVM and
 * the other one sits in compiler-rt/include/profile directory. To make changes
 * in this file, first modify the primary copy and copy it over to compiler-rt.
 * Testing of any change in this file can start only after the two copies are
 * synced up.
 *
\*===----------------------------------------------------------------------===*/
#ifndef MIBEntryDef
#define MIBEntryDef(NameTag, Name, Type)
#endif

MIBEntryDef(AllocCount = 1, AllocCount, uint32_t)
MIBEntryDef(TotalAccessCount = 2, TotalAccessCount, uint64_t)
MIBEntryDef(MinAccessCount = 3, MinAccessCount, uint64_t)
MIBEntryDef(MaxAccessCount = 4, MaxAccessCount, uint64_t)
MIBEntryDef(TotalSize = 5, TotalSize, uint64_t)
MIBEntryDef(MinSize = 6, MinSize, uint32_t)
MIBEntryDef(MaxSize = 7, MaxSize, uint32_t)
MIBEntryDef(AllocTimestamp = 8, AllocTimestamp, uint32_t)
MIBEntryDef(DeallocTimestamp = 9, DeallocTimestamp, uint32_t)
MIBEntryDef(TotalLifetime = 10, TotalLifetime, uint64_t)
MIBEntryDef(MinLifetime = 11, MinLifetime, uint32_t)
MIBEntryDef(MaxLifetime = 12, MaxLifetime, uint32_t)
MIBEntryDef(AllocCpuId = 13, AllocCpuId, uint32_t)
MIBEntryDef(DeallocCpuId = 14, DeallocCpuId, uint32_t)
MIBEntryDef(NumMigratedCpu = 15, NumMigratedCpu, uint32_t)
MIBEntryDef(NumLifetimeOverlaps = 16, NumLifetimeOverlaps, uint32_t)
MIBEntryDef(NumSameAllocCpu = 17, NumSameAllocCpu, uint32_t)
MIBEntryDef(NumSameDeallocCpu = 18, NumSameDeallocCpu, uint32_t)
MIBEntryDef(DataTypeId = 19, DataTypeId, uint64_t)
MIBEntryDef(TotalAccessDensity = 20, TotalAccessDensity, uint64_t)
MIBEntryDef(MinAccessDensity = 21, MinAccessDensity, uint32_t)
MIBEntryDef(MaxAccessDensity = 22, MaxAccessDensity, uint32_t)
MIBEntryDef(TotalLifetimeAccessDensity = 23, TotalLifetimeAccessDensity, uint64_t)
MIBEntryDef(MinLifetimeAccessDensity = 24, MinLifetimeAccessDensity, uint32_t)
MIBEntryDef(MaxLifetimeAccessDensity = 25, MaxLifetimeAccessDensity, uint32_t)
PKhwFZ��UgjjProfileData/InstrProfReader.hnu�[���//===- InstrProfReader.h - Instrumented profiling readers -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains support for reading profiling data for instrumentation
// based PGO and coverage.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_PROFILEDATA_INSTRPROFREADER_H
#define LLVM_PROFILEDATA_INSTRPROFREADER_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/IR/ProfileSummary.h"
#include "llvm/Object/BuildID.h"
#include "llvm/ProfileData/InstrProf.h"
#include "llvm/ProfileData/InstrProfCorrelator.h"
#include "llvm/ProfileData/MemProf.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/LineIterator.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/OnDiskHashTable.h"
#include "llvm/Support/SwapByteOrder.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <memory>
#include <utility>
#include <vector>

namespace llvm {

class InstrProfReader;

namespace vfs {
class FileSystem;
} // namespace vfs

/// A file format agnostic iterator over profiling data.
template <class record_type = NamedInstrProfRecord,
          class reader_type = InstrProfReader>
class InstrProfIterator {
public:
  using iterator_category = std::input_iterator_tag;
  using value_type = record_type;
  using difference_type = std::ptrdiff_t;
  using pointer = value_type *;
  using reference = value_type &;

private:
  reader_type *Reader = nullptr;
  value_type Record;

  void increment() {
    if (Error E = Reader->readNextRecord(Record)) {
      // Handle errors in the reader.
      InstrProfError::take(std::move(E));
      *this = InstrProfIterator();
    }
  }

public:
  InstrProfIterator() = default;
  InstrProfIterator(reader_type *Reader) : Reader(Reader) { increment(); }

  InstrProfIterator &operator++() {
    increment();
    return *this;
  }
  bool operator==(const InstrProfIterator &RHS) const {
    return Reader == RHS.Reader;
  }
  bool operator!=(const InstrProfIterator &RHS) const {
    return Reader != RHS.Reader;
  }
  value_type &operator*() { return Record; }
  value_type *operator->() { return &Record; }
};

/// Base class and interface for reading profiling data of any known instrprof
/// format. Provides an iterator over NamedInstrProfRecords.
class InstrProfReader {
  instrprof_error LastError = instrprof_error::success;
  std::string LastErrorMsg;

public:
  InstrProfReader() = default;
  virtual ~InstrProfReader() = default;

  /// Read the header.  Required before reading first record.
  virtual Error readHeader() = 0;

  /// Read a single record.
  virtual Error readNextRecord(NamedInstrProfRecord &Record) = 0;

  /// Read a list of binary ids.
  virtual Error readBinaryIds(std::vector<llvm::object::BuildID> &BinaryIds) {
    return success();
  }

  /// Print binary ids.
  virtual Error printBinaryIds(raw_ostream &OS) { return success(); };

  /// Iterator over profile data.
  InstrProfIterator<> begin() { return InstrProfIterator<>(this); }
  InstrProfIterator<> end() { return InstrProfIterator<>(); }

  /// Return the profile version.
  virtual uint64_t getVersion() const = 0;

  virtual bool isIRLevelProfile() const = 0;

  virtual bool hasCSIRLevelProfile() const = 0;

  virtual bool instrEntryBBEnabled() const = 0;

  /// Return true if we must provide debug info to create PGO profiles.
  virtual bool useDebugInfoCorrelate() const { return false; }

  /// Return true if the profile has single byte counters representing coverage.
  virtual bool hasSingleByteCoverage() const = 0;

  /// Return true if the profile only instruments function entries.
  virtual bool functionEntryOnly() const = 0;

  /// Return true if profile includes a memory profile.
  virtual bool hasMemoryProfile() const = 0;

  /// Return true if this has a temporal profile.
  virtual bool hasTemporalProfile() const = 0;

  /// Returns a BitsetEnum describing the attributes of the profile. To check
  /// individual attributes prefer using the helpers above.
  virtual InstrProfKind getProfileKind() const = 0;

  /// Return the PGO symtab. There are three different readers:
  /// Raw, Text, and Indexed profile readers. The first two types
  /// of readers are used only by llvm-profdata tool, while the indexed
  /// profile reader is also used by llvm-cov tool and the compiler (
  /// backend or frontend). Since creating PGO symtab can create
  /// significant runtime and memory overhead (as it touches data
  /// for the whole program), InstrProfSymtab for the indexed profile
  /// reader should be created on demand and it is recommended to be
  /// only used for dumping purpose with llvm-proftool, not with the
  /// compiler.
  virtual InstrProfSymtab &getSymtab() = 0;

  /// Compute the sum of counts and return in Sum.
  void accumulateCounts(CountSumOrPercent &Sum, bool IsCS);

protected:
  std::unique_ptr<InstrProfSymtab> Symtab;
  /// A list of temporal profile traces.
  SmallVector<TemporalProfTraceTy> TemporalProfTraces;
  /// The total number of temporal profile traces seen.
  uint64_t TemporalProfTraceStreamSize = 0;

  /// Set the current error and return same.
  Error error(instrprof_error Err, const std::string &ErrMsg = "") {
    LastError = Err;
    LastErrorMsg = ErrMsg;
    if (Err == instrprof_error::success)
      return Error::success();
    return make_error<InstrProfError>(Err, ErrMsg);
  }

  Error error(Error &&E) {
    handleAllErrors(std::move(E), [&](const InstrProfError &IPE) {
      LastError = IPE.get();
      LastErrorMsg = IPE.getMessage();
    });
    return make_error<InstrProfError>(LastError, LastErrorMsg);
  }

  /// Clear the current error and return a successful one.
  Error success() { return error(instrprof_error::success); }

public:
  /// Return true if the reader has finished reading the profile data.
  bool isEOF() { return LastError == instrprof_error::eof; }

  /// Return true if the reader encountered an error reading profiling data.
  bool hasError() { return LastError != instrprof_error::success && !isEOF(); }

  /// Get the current error.
  Error getError() {
    if (hasError())
      return make_error<InstrProfError>(LastError, LastErrorMsg);
    return Error::success();
  }

  /// Factory method to create an appropriately typed reader for the given
  /// instrprof file.
  static Expected<std::unique_ptr<InstrProfReader>>
  create(const Twine &Path, vfs::FileSystem &FS,
         const InstrProfCorrelator *Correlator = nullptr);

  static Expected<std::unique_ptr<InstrProfReader>>
  create(std::unique_ptr<MemoryBuffer> Buffer,
         const InstrProfCorrelator *Correlator = nullptr);

  /// \param Weight for raw profiles use this as the temporal profile trace
  ///               weight
  /// \returns a list of temporal profile traces.
  virtual SmallVector<TemporalProfTraceTy> &
  getTemporalProfTraces(std::optional<uint64_t> Weight = {}) {
    // For non-raw profiles we ignore the input weight and instead use the
    // weights already in the traces.
    return TemporalProfTraces;
  }
  /// \returns the total number of temporal profile traces seen.
  uint64_t getTemporalProfTraceStreamSize() {
    return TemporalProfTraceStreamSize;
  }
};

/// Reader for the simple text based instrprof format.
///
/// This format is a simple text format that's suitable for test data. Records
/// are separated by one or more blank lines, and record fields are separated by
/// new lines.
///
/// Each record consists of a function name, a function hash, a number of
/// counters, and then each counter value, in that order.
class TextInstrProfReader : public InstrProfReader {
private:
  /// The profile data file contents.
  std::unique_ptr<MemoryBuffer> DataBuffer;
  /// Iterator over the profile data.
  line_iterator Line;
  /// The attributes of the current profile.
  InstrProfKind ProfileKind = InstrProfKind::Unknown;

  Error readValueProfileData(InstrProfRecord &Record);

  Error readTemporalProfTraceData();

public:
  TextInstrProfReader(std::unique_ptr<MemoryBuffer> DataBuffer_)
      : DataBuffer(std::move(DataBuffer_)), Line(*DataBuffer, true, '#') {}
  TextInstrProfReader(const TextInstrProfReader &) = delete;
  TextInstrProfReader &operator=(const TextInstrProfReader &) = delete;

  /// Return true if the given buffer is in text instrprof format.
  static bool hasFormat(const MemoryBuffer &Buffer);

  // Text format does not have version, so return 0.
  uint64_t getVersion() const override { return 0; }

  bool isIRLevelProfile() const override {
    return static_cast<bool>(ProfileKind & InstrProfKind::IRInstrumentation);
  }

  bool hasCSIRLevelProfile() const override {
    return static_cast<bool>(ProfileKind & InstrProfKind::ContextSensitive);
  }

  bool instrEntryBBEnabled() const override {
    return static_cast<bool>(ProfileKind &
                             InstrProfKind::FunctionEntryInstrumentation);
  }

  bool hasSingleByteCoverage() const override {
    return static_cast<bool>(ProfileKind & InstrProfKind::SingleByteCoverage);
  }

  bool functionEntryOnly() const override {
    return static_cast<bool>(ProfileKind & InstrProfKind::FunctionEntryOnly);
  }

  bool hasMemoryProfile() const override {
    // TODO: Add support for text format memory profiles.
    return false;
  }

  bool hasTemporalProfile() const override {
    return static_cast<bool>(ProfileKind & InstrProfKind::TemporalProfile);
  }

  InstrProfKind getProfileKind() const override { return ProfileKind; }

  /// Read the header.
  Error readHeader() override;

  /// Read a single record.
  Error readNextRecord(NamedInstrProfRecord &Record) override;

  InstrProfSymtab &getSymtab() override {
    assert(Symtab);
    return *Symtab;
  }
};

/// Reader for the raw instrprof binary format from runtime.
///
/// This format is a raw memory dump of the instrumentation-based profiling data
/// from the runtime.  It has no index.
///
/// Templated on the unsigned type whose size matches pointers on the platform
/// that wrote the profile.
template <class IntPtrT>
class RawInstrProfReader : public InstrProfReader {
private:
  /// The profile data file contents.
  std::unique_ptr<MemoryBuffer> DataBuffer;
  /// If available, this hold the ProfileData array used to correlate raw
  /// instrumentation data to their functions.
  const InstrProfCorrelatorImpl<IntPtrT> *Correlator;
  /// A list of timestamps paired with a function name reference.
  std::vector<std::pair<uint64_t, uint64_t>> TemporalProfTimestamps;
  bool ShouldSwapBytes;
  // The value of the version field of the raw profile data header. The lower 56
  // bits specifies the format version and the most significant 8 bits specify
  // the variant types of the profile.
  uint64_t Version;
  uint64_t CountersDelta;
  uint64_t NamesDelta;
  const RawInstrProf::ProfileData<IntPtrT> *Data;
  const RawInstrProf::ProfileData<IntPtrT> *DataEnd;
  const char *CountersStart;
  const char *CountersEnd;
  const char *NamesStart;
  const char *NamesEnd;
  // After value profile is all read, this pointer points to
  // the header of next profile data (if exists)
  const uint8_t *ValueDataStart;
  uint32_t ValueKindLast;
  uint32_t CurValueDataSize;

  /// Total size of binary ids.
  uint64_t BinaryIdsSize{0};
  /// Start address of binary id length and data pairs.
  const uint8_t *BinaryIdsStart;

public:
  RawInstrProfReader(std::unique_ptr<MemoryBuffer> DataBuffer,
                     const InstrProfCorrelator *Correlator)
      : DataBuffer(std::move(DataBuffer)),
        Correlator(dyn_cast_or_null<const InstrProfCorrelatorImpl<IntPtrT>>(
            Correlator)) {}
  RawInstrProfReader(const RawInstrProfReader &) = delete;
  RawInstrProfReader &operator=(const RawInstrProfReader &) = delete;

  static bool hasFormat(const MemoryBuffer &DataBuffer);
  Error readHeader() override;
  Error readNextRecord(NamedInstrProfRecord &Record) override;
  Error readBinaryIds(std::vector<llvm::object::BuildID> &BinaryIds) override;
  Error printBinaryIds(raw_ostream &OS) override;

  uint64_t getVersion() const override { return Version; }

  bool isIRLevelProfile() const override {
    return (Version & VARIANT_MASK_IR_PROF) != 0;
  }

  bool hasCSIRLevelProfile() const override {
    return (Version & VARIANT_MASK_CSIR_PROF) != 0;
  }

  bool instrEntryBBEnabled() const override {
    return (Version & VARIANT_MASK_INSTR_ENTRY) != 0;
  }

  bool useDebugInfoCorrelate() const override {
    return (Version & VARIANT_MASK_DBG_CORRELATE) != 0;
  }

  bool hasSingleByteCoverage() const override {
    return (Version & VARIANT_MASK_BYTE_COVERAGE) != 0;
  }

  bool functionEntryOnly() const override {
    return (Version & VARIANT_MASK_FUNCTION_ENTRY_ONLY) != 0;
  }

  bool hasMemoryProfile() const override {
    // Memory profiles have a separate raw format, so this should never be set.
    assert(!(Version & VARIANT_MASK_MEMPROF));
    return false;
  }

  bool hasTemporalProfile() const override {
    return (Version & VARIANT_MASK_TEMPORAL_PROF) != 0;
  }

  /// Returns a BitsetEnum describing the attributes of the raw instr profile.
  InstrProfKind getProfileKind() const override;

  InstrProfSymtab &getSymtab() override {
    assert(Symtab.get());
    return *Symtab.get();
  }

  SmallVector<TemporalProfTraceTy> &
  getTemporalProfTraces(std::optional<uint64_t> Weight = {}) override;

private:
  Error createSymtab(InstrProfSymtab &Symtab);
  Error readNextHeader(const char *CurrentPos);
  Error readHeader(const RawInstrProf::Header &Header);

  template <class IntT> IntT swap(IntT Int) const {
    return ShouldSwapBytes ? sys::getSwappedBytes(Int) : Int;
  }

  support::endianness getDataEndianness() const {
    support::endianness HostEndian = getHostEndianness();
    if (!ShouldSwapBytes)
      return HostEndian;
    if (HostEndian == support::little)
      return support::big;
    else
      return support::little;
  }

  inline uint8_t getNumPaddingBytes(uint64_t SizeInBytes) {
    return 7 & (sizeof(uint64_t) - SizeInBytes % sizeof(uint64_t));
  }

  Error readName(NamedInstrProfRecord &Record);
  Error readFuncHash(NamedInstrProfRecord &Record);
  Error readRawCounts(InstrProfRecord &Record);
  Error readValueProfilingData(InstrProfRecord &Record);
  bool atEnd() const { return Data == DataEnd; }

  void advanceData() {
    // `CountersDelta` is a constant zero when using debug info correlation.
    if (!Correlator) {
      // The initial CountersDelta is the in-memory address difference between
      // the data and counts sections:
      // start(__llvm_prf_cnts) - start(__llvm_prf_data)
      // As we advance to the next record, we maintain the correct CountersDelta
      // with respect to the next record.
      CountersDelta -= sizeof(*Data);
    }
    Data++;
    ValueDataStart += CurValueDataSize;
  }

  const char *getNextHeaderPos() const {
      assert(atEnd());
      return (const char *)ValueDataStart;
  }

  StringRef getName(uint64_t NameRef) const {
    return Symtab->getFuncName(swap(NameRef));
  }

  int getCounterTypeSize() const {
    return hasSingleByteCoverage() ? sizeof(uint8_t) : sizeof(uint64_t);
  }
};

using RawInstrProfReader32 = RawInstrProfReader<uint32_t>;
using RawInstrProfReader64 = RawInstrProfReader<uint64_t>;

namespace IndexedInstrProf {

enum class HashT : uint32_t;

} // end namespace IndexedInstrProf

/// Trait for lookups into the on-disk hash table for the binary instrprof
/// format.
class InstrProfLookupTrait {
  std::vector<NamedInstrProfRecord> DataBuffer;
  IndexedInstrProf::HashT HashType;
  unsigned FormatVersion;
  // Endianness of the input value profile data.
  // It should be LE by default, but can be changed
  // for testing purpose.
  support::endianness ValueProfDataEndianness = support::little;

public:
  InstrProfLookupTrait(IndexedInstrProf::HashT HashType, unsigned FormatVersion)
      : HashType(HashType), FormatVersion(FormatVersion) {}

  using data_type = ArrayRef<NamedInstrProfRecord>;

  using internal_key_type = StringRef;
  using external_key_type = StringRef;
  using hash_value_type = uint64_t;
  using offset_type = uint64_t;

  static bool EqualKey(StringRef A, StringRef B) { return A == B; }
  static StringRef GetInternalKey(StringRef K) { return K; }
  static StringRef GetExternalKey(StringRef K) { return K; }

  hash_value_type ComputeHash(StringRef K);

  static std::pair<offset_type, offset_type>
  ReadKeyDataLength(const unsigned char *&D) {
    using namespace support;

    offset_type KeyLen = endian::readNext<offset_type, little, unaligned>(D);
    offset_type DataLen = endian::readNext<offset_type, little, unaligned>(D);
    return std::make_pair(KeyLen, DataLen);
  }

  StringRef ReadKey(const unsigned char *D, offset_type N) {
    return StringRef((const char *)D, N);
  }

  bool readValueProfilingData(const unsigned char *&D,
                              const unsigned char *const End);
  data_type ReadData(StringRef K, const unsigned char *D, offset_type N);

  // Used for testing purpose only.
  void setValueProfDataEndianness(support::endianness Endianness) {
    ValueProfDataEndianness = Endianness;
  }
};

struct InstrProfReaderIndexBase {
  virtual ~InstrProfReaderIndexBase() = default;

  // Read all the profile records with the same key pointed to the current
  // iterator.
  virtual Error getRecords(ArrayRef<NamedInstrProfRecord> &Data) = 0;

  // Read all the profile records with the key equal to FuncName
  virtual Error getRecords(StringRef FuncName,
                                     ArrayRef<NamedInstrProfRecord> &Data) = 0;
  virtual void advanceToNextKey() = 0;
  virtual bool atEnd() const = 0;
  virtual void setValueProfDataEndianness(support::endianness Endianness) = 0;
  virtual uint64_t getVersion() const = 0;
  virtual bool isIRLevelProfile() const = 0;
  virtual bool hasCSIRLevelProfile() const = 0;
  virtual bool instrEntryBBEnabled() const = 0;
  virtual bool hasSingleByteCoverage() const = 0;
  virtual bool functionEntryOnly() const = 0;
  virtual bool hasMemoryProfile() const = 0;
  virtual bool hasTemporalProfile() const = 0;
  virtual InstrProfKind getProfileKind() const = 0;
  virtual Error populateSymtab(InstrProfSymtab &) = 0;
};

using OnDiskHashTableImplV3 =
    OnDiskIterableChainedHashTable<InstrProfLookupTrait>;

using MemProfRecordHashTable =
    OnDiskIterableChainedHashTable<memprof::RecordLookupTrait>;
using MemProfFrameHashTable =
    OnDiskIterableChainedHashTable<memprof::FrameLookupTrait>;

template <typename HashTableImpl>
class InstrProfReaderItaniumRemapper;

template <typename HashTableImpl>
class InstrProfReaderIndex : public InstrProfReaderIndexBase {
private:
  std::unique_ptr<HashTableImpl> HashTable;
  typename HashTableImpl::data_iterator RecordIterator;
  uint64_t FormatVersion;

  friend class InstrProfReaderItaniumRemapper<HashTableImpl>;

public:
  InstrProfReaderIndex(const unsigned char *Buckets,
                       const unsigned char *const Payload,
                       const unsigned char *const Base,
                       IndexedInstrProf::HashT HashType, uint64_t Version);
  ~InstrProfReaderIndex() override = default;

  Error getRecords(ArrayRef<NamedInstrProfRecord> &Data) override;
  Error getRecords(StringRef FuncName,
                   ArrayRef<NamedInstrProfRecord> &Data) override;
  void advanceToNextKey() override { RecordIterator++; }

  bool atEnd() const override {
    return RecordIterator == HashTable->data_end();
  }

  void setValueProfDataEndianness(support::endianness Endianness) override {
    HashTable->getInfoObj().setValueProfDataEndianness(Endianness);
  }

  uint64_t getVersion() const override { return GET_VERSION(FormatVersion); }

  bool isIRLevelProfile() const override {
    return (FormatVersion & VARIANT_MASK_IR_PROF) != 0;
  }

  bool hasCSIRLevelProfile() const override {
    return (FormatVersion & VARIANT_MASK_CSIR_PROF) != 0;
  }

  bool instrEntryBBEnabled() const override {
    return (FormatVersion & VARIANT_MASK_INSTR_ENTRY) != 0;
  }

  bool hasSingleByteCoverage() const override {
    return (FormatVersion & VARIANT_MASK_BYTE_COVERAGE) != 0;
  }

  bool functionEntryOnly() const override {
    return (FormatVersion & VARIANT_MASK_FUNCTION_ENTRY_ONLY) != 0;
  }

  bool hasMemoryProfile() const override {
    return (FormatVersion & VARIANT_MASK_MEMPROF) != 0;
  }

  bool hasTemporalProfile() const override {
    return (FormatVersion & VARIANT_MASK_TEMPORAL_PROF) != 0;
  }

  InstrProfKind getProfileKind() const override;

  Error populateSymtab(InstrProfSymtab &Symtab) override {
    return Symtab.create(HashTable->keys());
  }
};

/// Name matcher supporting fuzzy matching of symbol names to names in profiles.
class InstrProfReaderRemapper {
public:
  virtual ~InstrProfReaderRemapper() = default;
  virtual Error populateRemappings() { return Error::success(); }
  virtual Error getRecords(StringRef FuncName,
                           ArrayRef<NamedInstrProfRecord> &Data) = 0;
};

/// Reader for the indexed binary instrprof format.
class IndexedInstrProfReader : public InstrProfReader {
private:
  /// The profile data file contents.
  std::unique_ptr<MemoryBuffer> DataBuffer;
  /// The profile remapping file contents.
  std::unique_ptr<MemoryBuffer> RemappingBuffer;
  /// The index into the profile data.
  std::unique_ptr<InstrProfReaderIndexBase> Index;
  /// The profile remapping file contents.
  std::unique_ptr<InstrProfReaderRemapper> Remapper;
  /// Profile summary data.
  std::unique_ptr<ProfileSummary> Summary;
  /// Context sensitive profile summary data.
  std::unique_ptr<ProfileSummary> CS_Summary;
  /// MemProf profile schema (if available).
  memprof::MemProfSchema Schema;
  /// MemProf record profile data on-disk indexed via llvm::md5(FunctionName).
  std::unique_ptr<MemProfRecordHashTable> MemProfRecordTable;
  /// MemProf frame profile data on-disk indexed via frame id.
  std::unique_ptr<MemProfFrameHashTable> MemProfFrameTable;
  /// Total size of binary ids.
  uint64_t BinaryIdsSize{0};
  /// Start address of binary id length and data pairs.
  const uint8_t *BinaryIdsStart = nullptr;

  // Index to the current record in the record array.
  unsigned RecordIndex;

  // Read the profile summary. Return a pointer pointing to one byte past the
  // end of the summary data if it exists or the input \c Cur.
  // \c UseCS indicates whether to use the context-sensitive profile summary.
  const unsigned char *readSummary(IndexedInstrProf::ProfVersion Version,
                                   const unsigned char *Cur, bool UseCS);

public:
  IndexedInstrProfReader(
      std::unique_ptr<MemoryBuffer> DataBuffer,
      std::unique_ptr<MemoryBuffer> RemappingBuffer = nullptr)
      : DataBuffer(std::move(DataBuffer)),
        RemappingBuffer(std::move(RemappingBuffer)), RecordIndex(0) {}
  IndexedInstrProfReader(const IndexedInstrProfReader &) = delete;
  IndexedInstrProfReader &operator=(const IndexedInstrProfReader &) = delete;

  /// Return the profile version.
  uint64_t getVersion() const override { return Index->getVersion(); }
  bool isIRLevelProfile() const override { return Index->isIRLevelProfile(); }
  bool hasCSIRLevelProfile() const override {
    return Index->hasCSIRLevelProfile();
  }

  bool instrEntryBBEnabled() const override {
    return Index->instrEntryBBEnabled();
  }

  bool hasSingleByteCoverage() const override {
    return Index->hasSingleByteCoverage();
  }

  bool functionEntryOnly() const override { return Index->functionEntryOnly(); }

  bool hasMemoryProfile() const override { return Index->hasMemoryProfile(); }

  bool hasTemporalProfile() const override {
    return Index->hasTemporalProfile();
  }

  /// Returns a BitsetEnum describing the attributes of the indexed instr
  /// profile.
  InstrProfKind getProfileKind() const override {
    return Index->getProfileKind();
  }

  /// Return true if the given buffer is in an indexed instrprof format.
  static bool hasFormat(const MemoryBuffer &DataBuffer);

  /// Read the file header.
  Error readHeader() override;
  /// Read a single record.
  Error readNextRecord(NamedInstrProfRecord &Record) override;

  /// Return the NamedInstrProfRecord associated with FuncName and FuncHash.
  /// When return a hash_mismatch error and MismatchedFuncSum is not nullptr,
  /// the sum of all counters in the mismatched function will be set to
  /// MismatchedFuncSum. If there are multiple instances of mismatched
  /// functions, MismatchedFuncSum returns the maximum.
  Expected<InstrProfRecord>
  getInstrProfRecord(StringRef FuncName, uint64_t FuncHash,
                     uint64_t *MismatchedFuncSum = nullptr);

  /// Return the memprof record for the function identified by
  /// llvm::md5(Name).
  Expected<memprof::MemProfRecord> getMemProfRecord(uint64_t FuncNameHash);

  /// Fill Counts with the profile data for the given function name.
  Error getFunctionCounts(StringRef FuncName, uint64_t FuncHash,
                          std::vector<uint64_t> &Counts);

  /// Return the maximum of all known function counts.
  /// \c UseCS indicates whether to use the context-sensitive count.
  uint64_t getMaximumFunctionCount(bool UseCS) {
    if (UseCS) {
      assert(CS_Summary && "No context sensitive profile summary");
      return CS_Summary->getMaxFunctionCount();
    } else {
      assert(Summary && "No profile summary");
      return Summary->getMaxFunctionCount();
    }
  }

  /// Factory method to create an indexed reader.
  static Expected<std::unique_ptr<IndexedInstrProfReader>>
  create(const Twine &Path, vfs::FileSystem &FS,
         const Twine &RemappingPath = "");

  static Expected<std::unique_ptr<IndexedInstrProfReader>>
  create(std::unique_ptr<MemoryBuffer> Buffer,
         std::unique_ptr<MemoryBuffer> RemappingBuffer = nullptr);

  // Used for testing purpose only.
  void setValueProfDataEndianness(support::endianness Endianness) {
    Index->setValueProfDataEndianness(Endianness);
  }

  // See description in the base class. This interface is designed
  // to be used by llvm-profdata (for dumping). Avoid using this when
  // the client is the compiler.
  InstrProfSymtab &getSymtab() override;

  /// Return the profile summary.
  /// \c UseCS indicates whether to use the context-sensitive summary.
  ProfileSummary &getSummary(bool UseCS) {
    if (UseCS) {
      assert(CS_Summary && "No context sensitive summary");
      return *CS_Summary;
    } else {
      assert(Summary && "No profile summary");
      return *Summary;
    }
  }

  Error readBinaryIds(std::vector<llvm::object::BuildID> &BinaryIds) override;
  Error printBinaryIds(raw_ostream &OS) override;
};

} // end namespace llvm

#endif // LLVM_PROFILEDATA_INSTRPROFREADER_H
PKhwFZ8�.C����ProfileData/InstrProfData.incnu�[���/*===-- InstrProfData.inc - instr profiling runtime structures -*- C++ -*-=== *\
|*
|* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|* See https://llvm.org/LICENSE.txt for license information.
|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|*
\*===----------------------------------------------------------------------===*/
/*
 * This is the main file that defines all the data structure, signature,
 * constant literals that are shared across profiling runtime library,
 * compiler (instrumentation), and host tools (reader/writer). The entities
 * defined in this file affect the profile runtime ABI, the raw profile format,
 * or both.
 *
 * The file has two identical copies. The primary copy lives in LLVM and
 * the other one  sits in compiler-rt/lib/profile directory. To make changes
 * in this file, first modify the primary copy and copy it over to compiler-rt.
 * Testing of any change in this file can start only after the two copies are
 * synced up.
 *
 * The first part of the file includes macros that defines types, names, and
 * initializers for the member fields of the core data structures. The field
 * declarations for one structure is enabled by defining the field activation
 * macro associated with that structure. Only one field activation record
 * can be defined at one time and the rest definitions will be filtered out by
 * the preprocessor.
 *
 * Examples of how the template is used to instantiate structure definition:
 * 1. To declare a structure:
 *
 * struct ProfData {
 * #define INSTR_PROF_DATA(Type, LLVMType, Name, Initializer) \
 *    Type Name;
 * #include "llvm/ProfileData/InstrProfData.inc"
 * };
 *
 * 2. To construct LLVM type arrays for the struct type:
 *
 * Type *DataTypes[] = {
 * #define INSTR_PROF_DATA(Type, LLVMType, Name, Initializer) \
 *   LLVMType,
 * #include "llvm/ProfileData/InstrProfData.inc"
 * };
 *
 * 4. To construct constant array for the initializers:
 * #define INSTR_PROF_DATA(Type, LLVMType, Name, Initializer) \
 *   Initializer,
 * Constant *ConstantVals[] = {
 * #include "llvm/ProfileData/InstrProfData.inc"
 * };
 *
 *
 * The second part of the file includes definitions all other entities that
 * are related to runtime ABI and format. When no field activation macro is
 * defined, this file can be included to introduce the definitions.
 *
\*===----------------------------------------------------------------------===*/

/* Functions marked with INSTR_PROF_VISIBILITY must have hidden visibility in
 * the compiler runtime. */
#ifndef INSTR_PROF_VISIBILITY
#define INSTR_PROF_VISIBILITY
#endif

/* INSTR_PROF_DATA start. */
/* Definition of member fields of the per-function control structure. */
#ifndef INSTR_PROF_DATA
#define INSTR_PROF_DATA(Type, LLVMType, Name, Initializer)
#else
#define INSTR_PROF_DATA_DEFINED
#endif
INSTR_PROF_DATA(const uint64_t, llvm::Type::getInt64Ty(Ctx), NameRef, \
                ConstantInt::get(llvm::Type::getInt64Ty(Ctx), \
                IndexedInstrProf::ComputeHash(getPGOFuncNameVarInitializer(Inc->getName()))))
INSTR_PROF_DATA(const uint64_t, llvm::Type::getInt64Ty(Ctx), FuncHash, \
                ConstantInt::get(llvm::Type::getInt64Ty(Ctx), \
                Inc->getHash()->getZExtValue()))
INSTR_PROF_DATA(const IntPtrT, IntPtrTy, CounterPtr, RelativeCounterPtr)
/* This is used to map function pointers for the indirect call targets to
 * function name hashes during the conversion from raw to merged profile
 * data.
 */
INSTR_PROF_DATA(const IntPtrT, llvm::Type::getInt8PtrTy(Ctx), FunctionPointer, \
                FunctionAddr)
INSTR_PROF_DATA(IntPtrT, llvm::Type::getInt8PtrTy(Ctx), Values, \
                ValuesPtrExpr)
INSTR_PROF_DATA(const uint32_t, llvm::Type::getInt32Ty(Ctx), NumCounters, \
                ConstantInt::get(llvm::Type::getInt32Ty(Ctx), NumCounters))
INSTR_PROF_DATA(const uint16_t, Int16ArrayTy, NumValueSites[IPVK_Last+1], \
                ConstantArray::get(Int16ArrayTy, Int16ArrayVals))
#undef INSTR_PROF_DATA
/* INSTR_PROF_DATA end. */


/* This is an internal data structure used by value profiler. It
 * is defined here to allow serialization code sharing by LLVM
 * to be used in unit test.
 *
 * typedef struct ValueProfNode {
 *   // InstrProfValueData VData;
 *   uint64_t Value;
 *   uint64_t Count;
 *   struct ValueProfNode *Next;
 * } ValueProfNode;
 */
/* INSTR_PROF_VALUE_NODE start. */
#ifndef INSTR_PROF_VALUE_NODE
#define INSTR_PROF_VALUE_NODE(Type, LLVMType, Name, Initializer)
#else
#define INSTR_PROF_DATA_DEFINED
#endif
INSTR_PROF_VALUE_NODE(uint64_t, llvm::Type::getInt64Ty(Ctx), Value, \
                      ConstantInt::get(llvm::Type::GetInt64Ty(Ctx), 0))
INSTR_PROF_VALUE_NODE(uint64_t, llvm::Type::getInt64Ty(Ctx), Count, \
                      ConstantInt::get(llvm::Type::GetInt64Ty(Ctx), 0))
INSTR_PROF_VALUE_NODE(PtrToNodeT, llvm::Type::getInt8PtrTy(Ctx), Next, \
                      ConstantInt::get(llvm::Type::GetInt8PtrTy(Ctx), 0))
#undef INSTR_PROF_VALUE_NODE
/* INSTR_PROF_VALUE_NODE end. */

/* INSTR_PROF_RAW_HEADER  start */
/* Definition of member fields of the raw profile header data structure. */
#ifndef INSTR_PROF_RAW_HEADER
#define INSTR_PROF_RAW_HEADER(Type, Name, Initializer)
#else
#define INSTR_PROF_DATA_DEFINED
#endif
INSTR_PROF_RAW_HEADER(uint64_t, Magic, __llvm_profile_get_magic())
INSTR_PROF_RAW_HEADER(uint64_t, Version, __llvm_profile_get_version())
INSTR_PROF_RAW_HEADER(uint64_t, BinaryIdsSize, __llvm_write_binary_ids(NULL))
/* FIXME: A more accurate name is NumData */
INSTR_PROF_RAW_HEADER(uint64_t, DataSize, DataSize)
INSTR_PROF_RAW_HEADER(uint64_t, PaddingBytesBeforeCounters, PaddingBytesBeforeCounters)
/* FIXME: A more accurate name is NumCounters */
INSTR_PROF_RAW_HEADER(uint64_t, CountersSize, CountersSize)
INSTR_PROF_RAW_HEADER(uint64_t, PaddingBytesAfterCounters, PaddingBytesAfterCounters)
INSTR_PROF_RAW_HEADER(uint64_t, NamesSize,  NamesSize)
INSTR_PROF_RAW_HEADER(uint64_t, CountersDelta,
                      (uintptr_t)CountersBegin - (uintptr_t)DataBegin)
INSTR_PROF_RAW_HEADER(uint64_t, NamesDelta, (uintptr_t)NamesBegin)
INSTR_PROF_RAW_HEADER(uint64_t, ValueKindLast, IPVK_Last)
#undef INSTR_PROF_RAW_HEADER
/* INSTR_PROF_RAW_HEADER  end */

/* VALUE_PROF_FUNC_PARAM start */
/* Definition of parameter types of the runtime API used to do value profiling
 * for a given value site.
 */
#ifndef VALUE_PROF_FUNC_PARAM
#define VALUE_PROF_FUNC_PARAM(ArgType, ArgName, ArgLLVMType)
#define INSTR_PROF_COMMA
#else
#define INSTR_PROF_DATA_DEFINED
#define INSTR_PROF_COMMA ,
#endif
VALUE_PROF_FUNC_PARAM(uint64_t, TargetValue, Type::getInt64Ty(Ctx)) \
                      INSTR_PROF_COMMA
VALUE_PROF_FUNC_PARAM(void *, Data, Type::getInt8PtrTy(Ctx)) INSTR_PROF_COMMA
VALUE_PROF_FUNC_PARAM(uint32_t, CounterIndex, Type::getInt32Ty(Ctx))
#undef VALUE_PROF_FUNC_PARAM
#undef INSTR_PROF_COMMA
/* VALUE_PROF_FUNC_PARAM end */

/* VALUE_PROF_KIND start */
#ifndef VALUE_PROF_KIND
#define VALUE_PROF_KIND(Enumerator, Value, Descr)
#else
#define INSTR_PROF_DATA_DEFINED
#endif
/* For indirect function call value profiling, the addresses of the target
 * functions are profiled by the instrumented code. The target addresses are
 * written in the raw profile data and converted to target function name's MD5
 * hash by the profile reader during deserialization.  Typically, this happens
 * when the raw profile data is read during profile merging.
 *
 * For this remapping the ProfData is used.  ProfData contains both the function
 * name hash and the function address.
 */
VALUE_PROF_KIND(IPVK_IndirectCallTarget, 0, "indirect call target")
/* For memory intrinsic functions size profiling. */
VALUE_PROF_KIND(IPVK_MemOPSize, 1, "memory intrinsic functions size")
/* These two kinds must be the last to be
 * declared. This is to make sure the string
 * array created with the template can be
 * indexed with the kind value.
 */
VALUE_PROF_KIND(IPVK_First, IPVK_IndirectCallTarget, "first")
VALUE_PROF_KIND(IPVK_Last, IPVK_MemOPSize, "last")

#undef VALUE_PROF_KIND
/* VALUE_PROF_KIND end */

#undef COVMAP_V2_OR_V3
#ifdef COVMAP_V2
#define COVMAP_V2_OR_V3
#endif
#ifdef COVMAP_V3
#define COVMAP_V2_OR_V3
#endif

/* COVMAP_FUNC_RECORD start */
/* Definition of member fields of the function record structure in coverage
 * map.
 */
#ifndef COVMAP_FUNC_RECORD
#define COVMAP_FUNC_RECORD(Type, LLVMType, Name, Initializer)
#else
#define INSTR_PROF_DATA_DEFINED
#endif
#ifdef COVMAP_V1
COVMAP_FUNC_RECORD(const IntPtrT, llvm::Type::getInt8PtrTy(Ctx), \
                   NamePtr, llvm::ConstantExpr::getBitCast(NamePtr, \
                   llvm::Type::getInt8PtrTy(Ctx)))
COVMAP_FUNC_RECORD(const uint32_t, llvm::Type::getInt32Ty(Ctx), NameSize, \
                   llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), \
                   NameValue.size()))
#endif
#ifdef COVMAP_V2_OR_V3
COVMAP_FUNC_RECORD(const int64_t, llvm::Type::getInt64Ty(Ctx), NameRef, \
                   llvm::ConstantInt::get( \
                     llvm::Type::getInt64Ty(Ctx), NameHash))
#endif
COVMAP_FUNC_RECORD(const uint32_t, llvm::Type::getInt32Ty(Ctx), DataSize, \
                   llvm::ConstantInt::get( \
                     llvm::Type::getInt32Ty(Ctx), CoverageMapping.size()))
COVMAP_FUNC_RECORD(const uint64_t, llvm::Type::getInt64Ty(Ctx), FuncHash, \
                   llvm::ConstantInt::get( \
                     llvm::Type::getInt64Ty(Ctx), FuncHash))
#ifdef COVMAP_V3
COVMAP_FUNC_RECORD(const uint64_t, llvm::Type::getInt64Ty(Ctx), FilenamesRef, \
                   llvm::ConstantInt::get( \
                     llvm::Type::getInt64Ty(Ctx), FilenamesRef))
COVMAP_FUNC_RECORD(const char, \
                   llvm::ArrayType::get(llvm::Type::getInt8Ty(Ctx), \
                                        CoverageMapping.size()), \
                   CoverageMapping,
                   llvm::ConstantDataArray::getRaw( \
                     CoverageMapping, CoverageMapping.size(), \
                     llvm::Type::getInt8Ty(Ctx)))
#endif
#undef COVMAP_FUNC_RECORD
/* COVMAP_FUNC_RECORD end.  */

/* COVMAP_HEADER start */
/* Definition of member fields of coverage map header.
 */
#ifndef COVMAP_HEADER
#define COVMAP_HEADER(Type, LLVMType, Name, Initializer)
#else
#define INSTR_PROF_DATA_DEFINED
#endif
COVMAP_HEADER(uint32_t, Int32Ty, NRecords, \
              llvm::ConstantInt::get(Int32Ty, NRecords))
COVMAP_HEADER(uint32_t, Int32Ty, FilenamesSize, \
              llvm::ConstantInt::get(Int32Ty, FilenamesSize))
COVMAP_HEADER(uint32_t, Int32Ty, CoverageSize, \
              llvm::ConstantInt::get(Int32Ty, CoverageMappingSize))
COVMAP_HEADER(uint32_t, Int32Ty, Version, \
              llvm::ConstantInt::get(Int32Ty, CovMapVersion::CurrentVersion))
#undef COVMAP_HEADER
/* COVMAP_HEADER end.  */


#ifdef INSTR_PROF_SECT_ENTRY
#define INSTR_PROF_DATA_DEFINED
INSTR_PROF_SECT_ENTRY(IPSK_data, \
                      INSTR_PROF_QUOTE(INSTR_PROF_DATA_COMMON), \
                      INSTR_PROF_DATA_COFF, "__DATA,")
INSTR_PROF_SECT_ENTRY(IPSK_cnts, \
                      INSTR_PROF_QUOTE(INSTR_PROF_CNTS_COMMON), \
                      INSTR_PROF_CNTS_COFF, "__DATA,")
INSTR_PROF_SECT_ENTRY(IPSK_name, \
                      INSTR_PROF_QUOTE(INSTR_PROF_NAME_COMMON), \
                      INSTR_PROF_NAME_COFF, "__DATA,")
INSTR_PROF_SECT_ENTRY(IPSK_vals, \
                      INSTR_PROF_QUOTE(INSTR_PROF_VALS_COMMON), \
                      INSTR_PROF_VALS_COFF, "__DATA,")
INSTR_PROF_SECT_ENTRY(IPSK_vnodes, \
                      INSTR_PROF_QUOTE(INSTR_PROF_VNODES_COMMON), \
                      INSTR_PROF_VNODES_COFF, "__DATA,")
INSTR_PROF_SECT_ENTRY(IPSK_covmap, \
                      INSTR_PROF_QUOTE(INSTR_PROF_COVMAP_COMMON), \
                      INSTR_PROF_COVMAP_COFF, "__LLVM_COV,")
INSTR_PROF_SECT_ENTRY(IPSK_covfun, \
                      INSTR_PROF_QUOTE(INSTR_PROF_COVFUN_COMMON), \
                      INSTR_PROF_COVFUN_COFF, "__LLVM_COV,")
INSTR_PROF_SECT_ENTRY(IPSK_orderfile, \
                      INSTR_PROF_QUOTE(INSTR_PROF_ORDERFILE_COMMON), \
                      INSTR_PROF_QUOTE(INSTR_PROF_ORDERFILE_COFF), "__DATA,")

#undef INSTR_PROF_SECT_ENTRY
#endif


#ifdef INSTR_PROF_VALUE_PROF_DATA
#define INSTR_PROF_DATA_DEFINED

#define INSTR_PROF_MAX_NUM_VAL_PER_SITE 255
/*!
 * This is the header of the data structure that defines the on-disk
 * layout of the value profile data of a particular kind for one function.
 */
typedef struct ValueProfRecord {
  /* The kind of the value profile record. */
  uint32_t Kind;
  /*
   * The number of value profile sites. It is guaranteed to be non-zero;
   * otherwise the record for this kind won't be emitted.
   */
  uint32_t NumValueSites;
  /*
   * The first element of the array that stores the number of profiled
   * values for each value site. The size of the array is NumValueSites.
   * Since NumValueSites is greater than zero, there is at least one
   * element in the array.
   */
  uint8_t SiteCountArray[1];

  /*
   * The fake declaration is for documentation purpose only.
   * Align the start of next field to be on 8 byte boundaries.
  uint8_t Padding[X];
   */

  /* The array of value profile data. The size of the array is the sum
   * of all elements in SiteCountArray[].
  InstrProfValueData ValueData[];
   */

#ifdef __cplusplus
  /*!
   * Return the number of value sites.
   */
  uint32_t getNumValueSites() const { return NumValueSites; }
  /*!
   * Read data from this record and save it to Record.
   */
  void deserializeTo(InstrProfRecord &Record,
                     InstrProfSymtab *SymTab);
  /*
   * In-place byte swap:
   * Do byte swap for this instance. \c Old is the original order before
   * the swap, and \c New is the New byte order.
   */
  void swapBytes(support::endianness Old, support::endianness New);
#endif
} ValueProfRecord;

/*!
 * Per-function header/control data structure for value profiling
 * data in indexed format.
 */
typedef struct ValueProfData {
  /*
   * Total size in bytes including this field. It must be a multiple
   * of sizeof(uint64_t).
   */
  uint32_t TotalSize;
  /*
   *The number of value profile kinds that has value profile data.
   * In this implementation, a value profile kind is considered to
   * have profile data if the number of value profile sites for the
   * kind is not zero. More aggressively, the implementation can
   * choose to check the actual data value: if none of the value sites
   * has any profiled values, the kind can be skipped.
   */
  uint32_t NumValueKinds;

  /*
   * Following are a sequence of variable length records. The prefix/header
   * of each record is defined by ValueProfRecord type. The number of
   * records is NumValueKinds.
   * ValueProfRecord Record_1;
   * ValueProfRecord Record_N;
   */

#if __cplusplus
  /*!
   * Return the total size in bytes of the on-disk value profile data
   * given the data stored in Record.
   */
  static uint32_t getSize(const InstrProfRecord &Record);
  /*!
   * Return a pointer to \c ValueProfData instance ready to be streamed.
   */
  static std::unique_ptr<ValueProfData>
  serializeFrom(const InstrProfRecord &Record);
  /*!
   * Check the integrity of the record.
   */
  Error checkIntegrity();
  /*!
   * Return a pointer to \c ValueProfileData instance ready to be read.
   * All data in the instance are properly byte swapped. The input
   * data is assumed to be in little endian order.
   */
  static Expected<std::unique_ptr<ValueProfData>>
  getValueProfData(const unsigned char *SrcBuffer,
                   const unsigned char *const SrcBufferEnd,
                   support::endianness SrcDataEndianness);
  /*!
   * Swap byte order from \c Endianness order to host byte order.
   */
  void swapBytesToHost(support::endianness Endianness);
  /*!
   * Swap byte order from host byte order to \c Endianness order.
   */
  void swapBytesFromHost(support::endianness Endianness);
  /*!
   * Return the total size of \c ValueProfileData.
   */
  uint32_t getSize() const { return TotalSize; }
  /*!
   * Read data from this data and save it to \c Record.
   */
  void deserializeTo(InstrProfRecord &Record,
                     InstrProfSymtab *SymTab);
  void operator delete(void *ptr) { ::operator delete(ptr); }
#endif
} ValueProfData;

/*
 * The closure is designed to abstact away two types of value profile data:
 * - InstrProfRecord which is the primary data structure used to
 *   represent profile data in host tools (reader, writer, and profile-use)
 * - value profile runtime data structure suitable to be used by C
 *   runtime library.
 *
 * Both sources of data need to serialize to disk/memory-buffer in common
 * format: ValueProfData. The abstraction allows compiler-rt's raw profiler
 * writer to share the same format and code with indexed profile writer.
 *
 * For documentation of the member methods below, refer to corresponding methods
 * in class InstrProfRecord.
 */
typedef struct ValueProfRecordClosure {
  const void *Record;
  uint32_t (*GetNumValueKinds)(const void *Record);
  uint32_t (*GetNumValueSites)(const void *Record, uint32_t VKind);
  uint32_t (*GetNumValueData)(const void *Record, uint32_t VKind);
  uint32_t (*GetNumValueDataForSite)(const void *R, uint32_t VK, uint32_t S);

  /*
   * After extracting the value profile data from the value profile record,
   * this method is used to map the in-memory value to on-disk value. If
   * the method is null, value will be written out untranslated.
   */
  uint64_t (*RemapValueData)(uint32_t, uint64_t Value);
  void (*GetValueForSite)(const void *R, InstrProfValueData *Dst, uint32_t K,
                          uint32_t S);
  ValueProfData *(*AllocValueProfData)(size_t TotalSizeInBytes);
} ValueProfRecordClosure;

INSTR_PROF_VISIBILITY ValueProfRecord *
getFirstValueProfRecord(ValueProfData *VPD);
INSTR_PROF_VISIBILITY ValueProfRecord *
getValueProfRecordNext(ValueProfRecord *VPR);
INSTR_PROF_VISIBILITY InstrProfValueData *
getValueProfRecordValueData(ValueProfRecord *VPR);
INSTR_PROF_VISIBILITY uint32_t
getValueProfRecordHeaderSize(uint32_t NumValueSites);

#undef INSTR_PROF_VALUE_PROF_DATA
#endif  /* INSTR_PROF_VALUE_PROF_DATA */


#ifdef INSTR_PROF_COMMON_API_IMPL
#define INSTR_PROF_DATA_DEFINED
#ifdef __cplusplus
#define INSTR_PROF_INLINE inline
#define INSTR_PROF_NULLPTR nullptr
#else
#define INSTR_PROF_INLINE
#define INSTR_PROF_NULLPTR NULL
#endif

#ifndef offsetof
#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
#endif

/*!
 * Return the \c ValueProfRecord header size including the
 * padding bytes.
 */
INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
uint32_t getValueProfRecordHeaderSize(uint32_t NumValueSites) {
  uint32_t Size = offsetof(ValueProfRecord, SiteCountArray) +
                  sizeof(uint8_t) * NumValueSites;
  /* Round the size to multiple of 8 bytes. */
  Size = (Size + 7) & ~7;
  return Size;
}

/*!
 * Return the total size of the value profile record including the
 * header and the value data.
 */
INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
uint32_t getValueProfRecordSize(uint32_t NumValueSites,
                                uint32_t NumValueData) {
  return getValueProfRecordHeaderSize(NumValueSites) +
         sizeof(InstrProfValueData) * NumValueData;
}

/*!
 * Return the pointer to the start of value data array.
 */
INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
InstrProfValueData *getValueProfRecordValueData(ValueProfRecord *This) {
  return (InstrProfValueData *)((char *)This + getValueProfRecordHeaderSize(
                                                   This->NumValueSites));
}

/*!
 * Return the total number of value data for \c This record.
 */
INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
uint32_t getValueProfRecordNumValueData(ValueProfRecord *This) {
  uint32_t NumValueData = 0;
  uint32_t I;
  for (I = 0; I < This->NumValueSites; I++)
    NumValueData += This->SiteCountArray[I];
  return NumValueData;
}

/*!
 * Use this method to advance to the next \c This \c ValueProfRecord.
 */
INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
ValueProfRecord *getValueProfRecordNext(ValueProfRecord *This) {
  uint32_t NumValueData = getValueProfRecordNumValueData(This);
  return (ValueProfRecord *)((char *)This +
                             getValueProfRecordSize(This->NumValueSites,
                                                    NumValueData));
}

/*!
 * Return the first \c ValueProfRecord instance.
 */
INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
ValueProfRecord *getFirstValueProfRecord(ValueProfData *This) {
  return (ValueProfRecord *)((char *)This + sizeof(ValueProfData));
}

/* Closure based interfaces.  */

/*!
 * Return the total size in bytes of the on-disk value profile data
 * given the data stored in Record.
 */
INSTR_PROF_VISIBILITY uint32_t
getValueProfDataSize(ValueProfRecordClosure *Closure) {
  uint32_t Kind;
  uint32_t TotalSize = sizeof(ValueProfData);
  const void *Record = Closure->Record;

  for (Kind = IPVK_First; Kind <= IPVK_Last; Kind++) {
    uint32_t NumValueSites = Closure->GetNumValueSites(Record, Kind);
    if (!NumValueSites)
      continue;
    TotalSize += getValueProfRecordSize(NumValueSites,
                                        Closure->GetNumValueData(Record, Kind));
  }
  return TotalSize;
}

/*!
 * Extract value profile data of a function for the profile kind \c ValueKind
 * from the \c Closure and serialize the data into \c This record instance.
 */
INSTR_PROF_VISIBILITY void
serializeValueProfRecordFrom(ValueProfRecord *This,
                             ValueProfRecordClosure *Closure,
                             uint32_t ValueKind, uint32_t NumValueSites) {
  uint32_t S;
  const void *Record = Closure->Record;
  This->Kind = ValueKind;
  This->NumValueSites = NumValueSites;
  InstrProfValueData *DstVD = getValueProfRecordValueData(This);

  for (S = 0; S < NumValueSites; S++) {
    uint32_t ND = Closure->GetNumValueDataForSite(Record, ValueKind, S);
    This->SiteCountArray[S] = ND;
    Closure->GetValueForSite(Record, DstVD, ValueKind, S);
    DstVD += ND;
  }
}

/*!
 * Extract value profile data of a function  from the \c Closure
 * and serialize the data into \c DstData if it is not NULL or heap
 * memory allocated by the \c Closure's allocator method. If \c
 * DstData is not null, the caller is expected to set the TotalSize
 * in DstData.
 */
INSTR_PROF_VISIBILITY ValueProfData *
serializeValueProfDataFrom(ValueProfRecordClosure *Closure,
                           ValueProfData *DstData) {
  uint32_t Kind;
  uint32_t TotalSize =
      DstData ? DstData->TotalSize : getValueProfDataSize(Closure);

  ValueProfData *VPD =
      DstData ? DstData : Closure->AllocValueProfData(TotalSize);

  VPD->TotalSize = TotalSize;
  VPD->NumValueKinds = Closure->GetNumValueKinds(Closure->Record);
  ValueProfRecord *VR = getFirstValueProfRecord(VPD);
  for (Kind = IPVK_First; Kind <= IPVK_Last; Kind++) {
    uint32_t NumValueSites = Closure->GetNumValueSites(Closure->Record, Kind);
    if (!NumValueSites)
      continue;
    serializeValueProfRecordFrom(VR, Closure, Kind, NumValueSites);
    VR = getValueProfRecordNext(VR);
  }
  return VPD;
}

#undef INSTR_PROF_COMMON_API_IMPL
#endif /* INSTR_PROF_COMMON_API_IMPL */

/*============================================================================*/

#ifndef INSTR_PROF_DATA_DEFINED

#ifndef INSTR_PROF_DATA_INC
#define INSTR_PROF_DATA_INC

/* Helper macros.  */
#define INSTR_PROF_SIMPLE_QUOTE(x) #x
#define INSTR_PROF_QUOTE(x) INSTR_PROF_SIMPLE_QUOTE(x)
#define INSTR_PROF_SIMPLE_CONCAT(x,y) x ## y
#define INSTR_PROF_CONCAT(x,y) INSTR_PROF_SIMPLE_CONCAT(x,y)

/* Magic number to detect file format and endianness.
 * Use 255 at one end, since no UTF-8 file can use that character.  Avoid 0,
 * so that utilities, like strings, don't grab it as a string.  129 is also
 * invalid UTF-8, and high enough to be interesting.
 * Use "lprofr" in the centre to stand for "LLVM Profile Raw", or "lprofR"
 * for 32-bit platforms.
 */
#define INSTR_PROF_RAW_MAGIC_64 (uint64_t)255 << 56 | (uint64_t)'l' << 48 | \
       (uint64_t)'p' << 40 | (uint64_t)'r' << 32 | (uint64_t)'o' << 24 |  \
        (uint64_t)'f' << 16 | (uint64_t)'r' << 8 | (uint64_t)129
#define INSTR_PROF_RAW_MAGIC_32 (uint64_t)255 << 56 | (uint64_t)'l' << 48 | \
       (uint64_t)'p' << 40 | (uint64_t)'r' << 32 | (uint64_t)'o' << 24 |  \
        (uint64_t)'f' << 16 | (uint64_t)'R' << 8 | (uint64_t)129

/* FIXME: Please remedy the fixme in the header before bumping the version. */
/* Raw profile format version (start from 1). */
#define INSTR_PROF_RAW_VERSION 8
/* Indexed profile format version (start from 1). */
#define INSTR_PROF_INDEX_VERSION 10
/* Coverage mapping format version (start from 0). */
#define INSTR_PROF_COVMAP_VERSION 5

/* Profile version is always of type uint64_t. Reserve the upper 8 bits in the
 * version for other variants of profile. We set the lowest bit of the upper 8
 * bits (i.e. bit 56) to 1 to indicate if this is an IR-level instrumentation
 * generated profile, and 0 if this is a Clang FE generated profile.
 * 1 in bit 57 indicates there are context-sensitive records in the profile.
 * The 59th bit indicates whether to use debug info to correlate profiles.
 * The 60th bit indicates single byte coverage instrumentation.
 * The 61st bit indicates function entry instrumentation only.
 * The 62nd bit indicates whether memory profile information is present.
 * The 63rd bit indicates if this is a temporal profile.
 */
#define VARIANT_MASKS_ALL 0xff00000000000000ULL
#define GET_VERSION(V) ((V) & ~VARIANT_MASKS_ALL)
#define VARIANT_MASK_IR_PROF (0x1ULL << 56)
#define VARIANT_MASK_CSIR_PROF (0x1ULL << 57)
#define VARIANT_MASK_INSTR_ENTRY (0x1ULL << 58)
#define VARIANT_MASK_DBG_CORRELATE (0x1ULL << 59)
#define VARIANT_MASK_BYTE_COVERAGE (0x1ULL << 60)
#define VARIANT_MASK_FUNCTION_ENTRY_ONLY (0x1ULL << 61)
#define VARIANT_MASK_MEMPROF (0x1ULL << 62)
#define VARIANT_MASK_TEMPORAL_PROF (0x1ULL << 63)
#define INSTR_PROF_RAW_VERSION_VAR __llvm_profile_raw_version
#define INSTR_PROF_PROFILE_RUNTIME_VAR __llvm_profile_runtime
#define INSTR_PROF_PROFILE_COUNTER_BIAS_VAR __llvm_profile_counter_bias
#define INSTR_PROF_PROFILE_SET_TIMESTAMP __llvm_profile_set_timestamp

/* The variable that holds the name of the profile data
 * specified via command line. */
#define INSTR_PROF_PROFILE_NAME_VAR __llvm_profile_filename

/* section name strings common to all targets other
   than WIN32 */
#define INSTR_PROF_DATA_COMMON __llvm_prf_data
#define INSTR_PROF_NAME_COMMON __llvm_prf_names
#define INSTR_PROF_CNTS_COMMON __llvm_prf_cnts
#define INSTR_PROF_VALS_COMMON __llvm_prf_vals
#define INSTR_PROF_VNODES_COMMON __llvm_prf_vnds
#define INSTR_PROF_COVMAP_COMMON __llvm_covmap
#define INSTR_PROF_COVFUN_COMMON __llvm_covfun
#define INSTR_PROF_ORDERFILE_COMMON __llvm_orderfile
/* Windows section names. Because these section names contain dollar characters,
 * they must be quoted.
 */
#define INSTR_PROF_DATA_COFF ".lprfd$M"
#define INSTR_PROF_NAME_COFF ".lprfn$M"
#define INSTR_PROF_CNTS_COFF ".lprfc$M"
#define INSTR_PROF_VALS_COFF ".lprfv$M"
#define INSTR_PROF_VNODES_COFF ".lprfnd$M"
#define INSTR_PROF_COVMAP_COFF ".lcovmap$M"
#define INSTR_PROF_COVFUN_COFF ".lcovfun$M"
#define INSTR_PROF_ORDERFILE_COFF ".lorderfile$M"

#ifdef _WIN32
/* Runtime section names and name strings.  */
#define INSTR_PROF_DATA_SECT_NAME INSTR_PROF_DATA_COFF
#define INSTR_PROF_NAME_SECT_NAME INSTR_PROF_NAME_COFF
#define INSTR_PROF_CNTS_SECT_NAME INSTR_PROF_CNTS_COFF
/* Array of pointers. Each pointer points to a list
 * of value nodes associated with one value site.
 */
#define INSTR_PROF_VALS_SECT_NAME INSTR_PROF_VALS_COFF
/* Value profile nodes section. */
#define INSTR_PROF_VNODES_SECT_NAME INSTR_PROF_VNODES_COFF
#define INSTR_PROF_COVMAP_SECT_NAME INSTR_PROF_COVMAP_COFF
#define INSTR_PROF_COVFUN_SECT_NAME INSTR_PROF_COVFUN_COFF
#define INSTR_PROF_ORDERFILE_SECT_NAME INSTR_PROF_ORDERFILE_COFF
#else
/* Runtime section names and name strings.  */
#define INSTR_PROF_DATA_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_DATA_COMMON)
#define INSTR_PROF_NAME_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_NAME_COMMON)
#define INSTR_PROF_CNTS_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_CNTS_COMMON)
/* Array of pointers. Each pointer points to a list
 * of value nodes associated with one value site.
 */
#define INSTR_PROF_VALS_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_VALS_COMMON)
/* Value profile nodes section. */
#define INSTR_PROF_VNODES_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_VNODES_COMMON)
#define INSTR_PROF_COVMAP_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_COVMAP_COMMON)
#define INSTR_PROF_COVFUN_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_COVFUN_COMMON)
/* Order file instrumentation. */
#define INSTR_PROF_ORDERFILE_SECT_NAME                                         \
  INSTR_PROF_QUOTE(INSTR_PROF_ORDERFILE_COMMON)
#endif

#define INSTR_PROF_ORDERFILE_BUFFER_NAME _llvm_order_file_buffer
#define INSTR_PROF_ORDERFILE_BUFFER_NAME_STR                                   \
  INSTR_PROF_QUOTE(INSTR_PROF_ORDERFILE_BUFFER_NAME)
#define INSTR_PROF_ORDERFILE_BUFFER_IDX_NAME _llvm_order_file_buffer_idx
#define INSTR_PROF_ORDERFILE_BUFFER_IDX_NAME_STR                               \
  INSTR_PROF_QUOTE(INSTR_PROF_ORDERFILE_BUFFER_IDX_NAME)

/* Macros to define start/stop section symbol for a given
 * section on Linux. For instance
 * INSTR_PROF_SECT_START(INSTR_PROF_DATA_SECT_NAME) will
 * expand to __start___llvm_prof_data
 */
#define INSTR_PROF_SECT_START(Sect) \
        INSTR_PROF_CONCAT(__start_,Sect)
#define INSTR_PROF_SECT_STOP(Sect) \
        INSTR_PROF_CONCAT(__stop_,Sect)

/* Value Profiling API linkage name.  */
#define INSTR_PROF_VALUE_PROF_FUNC __llvm_profile_instrument_target
#define INSTR_PROF_VALUE_PROF_FUNC_STR \
        INSTR_PROF_QUOTE(INSTR_PROF_VALUE_PROF_FUNC)
#define INSTR_PROF_VALUE_PROF_MEMOP_FUNC __llvm_profile_instrument_memop
#define INSTR_PROF_VALUE_PROF_MEMOP_FUNC_STR                                   \
  INSTR_PROF_QUOTE(INSTR_PROF_VALUE_PROF_MEMOP_FUNC)

/* InstrProfile per-function control data alignment.  */
#define INSTR_PROF_DATA_ALIGNMENT 8

/* The data structure that represents a tracked value by the
 * value profiler.
 */
typedef struct InstrProfValueData {
  /* Profiled value. */
  uint64_t Value;
  /* Number of times the value appears in the training run. */
  uint64_t Count;
} InstrProfValueData;

#endif /* INSTR_PROF_DATA_INC */

#ifndef INSTR_ORDER_FILE_INC
/* The maximal # of functions: 128*1024 (the buffer size will be 128*4 KB). */
#define INSTR_ORDER_FILE_BUFFER_SIZE 131072
#define INSTR_ORDER_FILE_BUFFER_BITS 17
#define INSTR_ORDER_FILE_BUFFER_MASK 0x1ffff
#endif /* INSTR_ORDER_FILE_INC */
#else
#undef INSTR_PROF_DATA_DEFINED
#endif

#undef COVMAP_V2_OR_V3

#ifdef INSTR_PROF_VALUE_PROF_MEMOP_API

#ifdef __cplusplus
#define INSTR_PROF_INLINE inline
#else
#define INSTR_PROF_INLINE
#endif

/* The value range buckets (22 buckets) for the memop size value profiling looks
 * like:
 *
 *   [0, 0]
 *   [1, 1]
 *   [2, 2]
 *   [3, 3]
 *   [4, 4]
 *   [5, 5]
 *   [6, 6]
 *   [7, 7]
 *   [8, 8]
 *   [9, 15]
 *   [16, 16]
 *   [17, 31]
 *   [32, 32]
 *   [33, 63]
 *   [64, 64]
 *   [65, 127]
 *   [128, 128]
 *   [129, 255]
 *   [256, 256]
 *   [257, 511]
 *   [512, 512]
 *   [513, UINT64_MAX]
 *
 * Each range has a 'representative value' which is the lower end value of the
 * range and used to store in the runtime profile data records and the VP
 * metadata. For example, it's 2 for [2, 2] and 64 for [65, 127].
 */
#define INSTR_PROF_NUM_BUCKETS 22

/*
 * Clz and Popcount. This code was copied from
 * compiler-rt/lib/fuzzer/{FuzzerBuiltins.h,FuzzerBuiltinsMsvc.h} and
 * llvm/include/llvm/Support/MathExtras.h.
 */
#if defined(_MSC_VER) && !defined(__clang__)

#include <intrin.h>
INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
int InstProfClzll(unsigned long long X) {
  unsigned long LeadZeroIdx = 0;
#if !defined(_M_ARM64) && !defined(_M_X64)
  // Scan the high 32 bits.
  if (_BitScanReverse(&LeadZeroIdx, (unsigned long)(X >> 32)))
    return (int)(63 - (LeadZeroIdx + 32)); // Create a bit offset
                                                      // from the MSB.
  // Scan the low 32 bits.
  if (_BitScanReverse(&LeadZeroIdx, (unsigned long)(X)))
    return (int)(63 - LeadZeroIdx);
#else
  if (_BitScanReverse64(&LeadZeroIdx, X)) return 63 - LeadZeroIdx;
#endif
  return 64;
}
INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
int InstProfPopcountll(unsigned long long X) {
  // This code originates from https://reviews.llvm.org/rG30626254510f.
  unsigned long long v = X;
  v = v - ((v >> 1) & 0x5555555555555555ULL);
  v = (v & 0x3333333333333333ULL) + ((v >> 2) & 0x3333333333333333ULL);
  v = (v + (v >> 4)) & 0x0F0F0F0F0F0F0F0FULL;
  return (int)((unsigned long long)(v * 0x0101010101010101ULL) >> 56);
}

#else

INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
int InstProfClzll(unsigned long long X) { return __builtin_clzll(X); }
INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
int InstProfPopcountll(unsigned long long X) { return __builtin_popcountll(X); }

#endif  /* defined(_MSC_VER) && !defined(__clang__) */

/* Map an (observed) memop size value to the representative value of its range.
 * For example, 5 -> 5, 22 -> 17, 99 -> 65, 256 -> 256, 1001 -> 513. */
INSTR_PROF_VISIBILITY INSTR_PROF_INLINE uint64_t
InstrProfGetRangeRepValue(uint64_t Value) {
  if (Value <= 8)
    // The first ranges are individually tracked. Use the value as is.
    return Value;
  else if (Value >= 513)
    // The last range is mapped to its lowest value.
    return 513;
  else if (InstProfPopcountll(Value) == 1)
    // If it's a power of two, use it as is.
    return Value;
  else
    // Otherwise, take to the previous power of two + 1.
    return (UINT64_C(1) << (64 - InstProfClzll(Value) - 1)) + 1;
}

/* Return true if the range that an (observed) memop size value belongs to has
 * only a single value in the range.  For example, 0 -> true, 8 -> true, 10 ->
 * false, 64 -> true, 100 -> false, 513 -> false. */
INSTR_PROF_VISIBILITY INSTR_PROF_INLINE unsigned
InstrProfIsSingleValRange(uint64_t Value) {
  if (Value <= 8)
    // The first ranges are individually tracked.
    return 1;
  else if (InstProfPopcountll(Value) == 1)
    // If it's a power of two, there's only one value.
    return 1;
  else
    // Otherwise, there's more than one value in the range.
    return 0;
}

#endif /* INSTR_PROF_VALUE_PROF_MEMOP_API */
PKhwFZ�/V�b$b$ProfileData/GCOV.hnu�[���//===- GCOV.h - LLVM coverage tool ------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This header provides the interface to read and write coverage files that
// use 'gcov' format.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_PROFILEDATA_GCOV_H
#define LLVM_PROFILEDATA_GCOV_H

#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/DataExtractor.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <map>
#include <memory>
#include <string>
#include <utility>

namespace llvm {

class GCOVFunction;
class GCOVBlock;

namespace GCOV {

enum GCOVVersion { V304, V407, V408, V800, V900, V1200 };

/// A struct for passing gcov options between functions.
struct Options {
  Options(bool A, bool B, bool C, bool F, bool P, bool U, bool I, bool L,
          bool M, bool N, bool R, bool T, bool X, std::string SourcePrefix)
      : AllBlocks(A), BranchInfo(B), BranchCount(C), FuncCoverage(F),
        PreservePaths(P), UncondBranch(U), Intermediate(I), LongFileNames(L),
        Demangle(M), NoOutput(N), RelativeOnly(R), UseStdout(T),
        HashFilenames(X), SourcePrefix(std::move(SourcePrefix)) {}

  bool AllBlocks;
  bool BranchInfo;
  bool BranchCount;
  bool FuncCoverage;
  bool PreservePaths;
  bool UncondBranch;
  bool Intermediate;
  bool LongFileNames;
  bool Demangle;
  bool NoOutput;
  bool RelativeOnly;
  bool UseStdout;
  bool HashFilenames;
  std::string SourcePrefix;
};

} // end namespace GCOV

/// GCOVBuffer - A wrapper around MemoryBuffer to provide GCOV specific
/// read operations.
class GCOVBuffer {
public:
  GCOVBuffer(MemoryBuffer *B) : Buffer(B) {}
  ~GCOVBuffer() { consumeError(cursor.takeError()); }

  /// readGCNOFormat - Check GCNO signature is valid at the beginning of buffer.
  bool readGCNOFormat() {
    StringRef buf = Buffer->getBuffer();
    StringRef magic = buf.substr(0, 4);
    if (magic == "gcno") {
      de = DataExtractor(buf.substr(4), false, 0);
    } else if (magic == "oncg") {
      de = DataExtractor(buf.substr(4), true, 0);
    } else {
      errs() << "unexpected magic: " << magic << "\n";
      return false;
    }
    return true;
  }

  /// readGCDAFormat - Check GCDA signature is valid at the beginning of buffer.
  bool readGCDAFormat() {
    StringRef buf = Buffer->getBuffer();
    StringRef magic = buf.substr(0, 4);
    if (magic == "gcda") {
      de = DataExtractor(buf.substr(4), false, 0);
    } else if (magic == "adcg") {
      de = DataExtractor(buf.substr(4), true, 0);
    } else {
      return false;
    }
    return true;
  }

  /// readGCOVVersion - Read GCOV version.
  bool readGCOVVersion(GCOV::GCOVVersion &version) {
    std::string str(de.getBytes(cursor, 4));
    if (str.size() != 4)
      return false;
    if (de.isLittleEndian())
      std::reverse(str.begin(), str.end());
    int ver = str[0] >= 'A'
                  ? (str[0] - 'A') * 100 + (str[1] - '0') * 10 + str[2] - '0'
                  : (str[0] - '0') * 10 + str[2] - '0';
    if (ver >= 120) {
      this->version = version = GCOV::V1200;
      return true;
    } else if (ver >= 90) {
      // PR gcov-profile/84846, r269678
      this->version = version = GCOV::V900;
      return true;
    } else if (ver >= 80) {
      // PR gcov-profile/48463
      this->version = version = GCOV::V800;
      return true;
    } else if (ver >= 48) {
      // r189778: the exit block moved from the last to the second.
      this->version = version = GCOV::V408;
      return true;
    } else if (ver >= 47) {
      // r173147: split checksum into cfg checksum and line checksum.
      this->version = version = GCOV::V407;
      return true;
    } else if (ver >= 34) {
      this->version = version = GCOV::V304;
      return true;
    }
    errs() << "unexpected version: " << str << "\n";
    return false;
  }

  uint32_t getWord() { return de.getU32(cursor); }
  StringRef getString() {
    uint32_t len;
    if (!readInt(len) || len == 0)
      return {};
    return de.getBytes(cursor, len * 4).split('\0').first;
  }

  bool readInt(uint32_t &Val) {
    if (cursor.tell() + 4 > de.size()) {
      Val = 0;
      errs() << "unexpected end of memory buffer: " << cursor.tell() << "\n";
      return false;
    }
    Val = de.getU32(cursor);
    return true;
  }

  bool readInt64(uint64_t &Val) {
    uint32_t Lo, Hi;
    if (!readInt(Lo) || !readInt(Hi))
      return false;
    Val = ((uint64_t)Hi << 32) | Lo;
    return true;
  }

  bool readString(StringRef &str) {
    uint32_t len;
    if (!readInt(len) || len == 0)
      return false;
    if (version >= GCOV::V1200)
      str = de.getBytes(cursor, len).drop_back();
    else
      str = de.getBytes(cursor, len * 4).split('\0').first;
    return bool(cursor);
  }

  DataExtractor de{ArrayRef<uint8_t>{}, false, 0};
  DataExtractor::Cursor cursor{0};

private:
  MemoryBuffer *Buffer;
  GCOV::GCOVVersion version{};
};

/// GCOVFile - Collects coverage information for one pair of coverage file
/// (.gcno and .gcda).
class GCOVFile {
public:
  GCOVFile() = default;

  bool readGCNO(GCOVBuffer &Buffer);
  bool readGCDA(GCOVBuffer &Buffer);
  GCOV::GCOVVersion getVersion() const { return version; }
  void print(raw_ostream &OS) const;
  void dump() const;

  std::vector<std::string> filenames;
  StringMap<unsigned> filenameToIdx;

public:
  bool GCNOInitialized = false;
  GCOV::GCOVVersion version{};
  uint32_t checksum = 0;
  StringRef cwd;
  SmallVector<std::unique_ptr<GCOVFunction>, 16> functions;
  std::map<uint32_t, GCOVFunction *> identToFunction;
  uint32_t runCount = 0;
  uint32_t programCount = 0;

  using iterator = pointee_iterator<
      SmallVectorImpl<std::unique_ptr<GCOVFunction>>::const_iterator>;
  iterator begin() const { return iterator(functions.begin()); }
  iterator end() const { return iterator(functions.end()); }

private:
  unsigned addNormalizedPathToMap(StringRef filename);
};

struct GCOVArc {
  GCOVArc(GCOVBlock &src, GCOVBlock &dst, uint32_t flags)
      : src(src), dst(dst), flags(flags) {}
  bool onTree() const;

  GCOVBlock &src;
  GCOVBlock &dst;
  uint32_t flags;
  uint64_t count = 0;
  uint64_t cycleCount = 0;
};

/// GCOVFunction - Collects function information.
class GCOVFunction {
public:
  using BlockIterator = pointee_iterator<
      SmallVectorImpl<std::unique_ptr<GCOVBlock>>::const_iterator>;

  GCOVFunction(GCOVFile &file) : file(file) {}

  StringRef getName(bool demangle) const;
  StringRef getFilename() const;
  uint64_t getEntryCount() const;
  GCOVBlock &getExitBlock() const;

  iterator_range<BlockIterator> blocksRange() const {
    return make_range(blocks.begin(), blocks.end());
  }

  uint64_t propagateCounts(const GCOVBlock &v, GCOVArc *pred);
  void print(raw_ostream &OS) const;
  void dump() const;

  GCOVFile &file;
  uint32_t ident = 0;
  uint32_t linenoChecksum;
  uint32_t cfgChecksum = 0;
  uint32_t startLine = 0;
  uint32_t startColumn = 0;
  uint32_t endLine = 0;
  uint32_t endColumn = 0;
  uint8_t artificial = 0;
  StringRef Name;
  mutable SmallString<0> demangled;
  unsigned srcIdx;
  SmallVector<std::unique_ptr<GCOVBlock>, 0> blocks;
  SmallVector<std::unique_ptr<GCOVArc>, 0> arcs, treeArcs;
  DenseSet<const GCOVBlock *> visited;
};

/// GCOVBlock - Collects block information.
class GCOVBlock {
public:
  using EdgeIterator = SmallVectorImpl<GCOVArc *>::const_iterator;
  using BlockVector = SmallVector<const GCOVBlock *, 1>;
  using BlockVectorLists = SmallVector<BlockVector, 4>;
  using Edges = SmallVector<GCOVArc *, 4>;

  GCOVBlock(uint32_t N) : number(N) {}

  void addLine(uint32_t N) { lines.push_back(N); }
  uint32_t getLastLine() const { return lines.back(); }
  uint64_t getCount() const { return count; }

  void addSrcEdge(GCOVArc *Edge) { pred.push_back(Edge); }

  void addDstEdge(GCOVArc *Edge) { succ.push_back(Edge); }

  iterator_range<EdgeIterator> srcs() const {
    return make_range(pred.begin(), pred.end());
  }

  iterator_range<EdgeIterator> dsts() const {
    return make_range(succ.begin(), succ.end());
  }

  void print(raw_ostream &OS) const;
  void dump() const;

  static uint64_t
  augmentOneCycle(GCOVBlock *src,
                  std::vector<std::pair<GCOVBlock *, size_t>> &stack);
  static uint64_t getCyclesCount(const BlockVector &blocks);
  static uint64_t getLineCount(const BlockVector &Blocks);

public:
  uint32_t number;
  uint64_t count = 0;
  SmallVector<GCOVArc *, 2> pred;
  SmallVector<GCOVArc *, 2> succ;
  SmallVector<uint32_t, 4> lines;
  bool traversable = false;
  GCOVArc *incoming = nullptr;
};

void gcovOneInput(const GCOV::Options &options, StringRef filename,
                  StringRef gcno, StringRef gcda, GCOVFile &file);

} // end namespace llvm

#endif // LLVM_PROFILEDATA_GCOV_H
PKhwFZ%�U���ProfileData/InstrProfWriter.hnu�[���//===- InstrProfWriter.h - Instrumented profiling writer --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains support for writing profiling data for instrumentation
// based PGO and coverage.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_PROFILEDATA_INSTRPROFWRITER_H
#define LLVM_PROFILEDATA_INSTRPROFWRITER_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/Object/BuildID.h"
#include "llvm/ProfileData/InstrProf.h"
#include "llvm/ProfileData/MemProf.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Error.h"
#include <cstdint>
#include <memory>
#include <random>

namespace llvm {

/// Writer for instrumentation based profile data.
class InstrProfRecordWriterTrait;
class ProfOStream;
class MemoryBuffer;
class raw_fd_ostream;

class InstrProfWriter {
public:
  using ProfilingData = SmallDenseMap<uint64_t, InstrProfRecord>;

private:
  bool Sparse;
  StringMap<ProfilingData> FunctionData;
  /// The maximum length of a single temporal profile trace.
  uint64_t MaxTemporalProfTraceLength;
  /// The maximum number of stored temporal profile traces.
  uint64_t TemporalProfTraceReservoirSize;
  /// The total number of temporal profile traces seen.
  uint64_t TemporalProfTraceStreamSize = 0;
  /// The list of temporal profile traces.
  SmallVector<TemporalProfTraceTy> TemporalProfTraces;
  std::mt19937 RNG;

  // A map to hold memprof data per function. The lower 64 bits obtained from
  // the md5 hash of the function name is used to index into the map.
  llvm::MapVector<GlobalValue::GUID, memprof::IndexedMemProfRecord>
      MemProfRecordData;
  // A map to hold frame id to frame mappings. The mappings are used to
  // convert IndexedMemProfRecord to MemProfRecords with frame information
  // inline.
  llvm::MapVector<memprof::FrameId, memprof::Frame> MemProfFrameData;

  // List of binary ids.
  std::vector<llvm::object::BuildID> BinaryIds;

  // An enum describing the attributes of the profile.
  InstrProfKind ProfileKind = InstrProfKind::Unknown;
  // Use raw pointer here for the incomplete type object.
  InstrProfRecordWriterTrait *InfoObj;

public:
  InstrProfWriter(bool Sparse = false,
                  uint64_t TemporalProfTraceReservoirSize = 0,
                  uint64_t MaxTemporalProfTraceLength = 0);
  ~InstrProfWriter();

  StringMap<ProfilingData> &getProfileData() { return FunctionData; }

  /// Add function counts for the given function. If there are already counts
  /// for this function and the hash and number of counts match, each counter is
  /// summed. Optionally scale counts by \p Weight.
  void addRecord(NamedInstrProfRecord &&I, uint64_t Weight,
                 function_ref<void(Error)> Warn);
  void addRecord(NamedInstrProfRecord &&I, function_ref<void(Error)> Warn) {
    addRecord(std::move(I), 1, Warn);
  }

  /// Add \p SrcTraces using reservoir sampling where \p SrcStreamSize is the
  /// total number of temporal profiling traces the source has seen.
  void addTemporalProfileTraces(SmallVectorImpl<TemporalProfTraceTy> &SrcTraces,
                                uint64_t SrcStreamSize);

  /// Add a memprof record for a function identified by its \p Id.
  void addMemProfRecord(const GlobalValue::GUID Id,
                        const memprof::IndexedMemProfRecord &Record);

  /// Add a memprof frame identified by the hash of the contents of the frame in
  /// \p FrameId.
  bool addMemProfFrame(const memprof::FrameId, const memprof::Frame &F,
                       function_ref<void(Error)> Warn);

  // Add a binary id to the binary ids list.
  void addBinaryIds(ArrayRef<llvm::object::BuildID> BIs);

  /// Merge existing function counts from the given writer.
  void mergeRecordsFromWriter(InstrProfWriter &&IPW,
                              function_ref<void(Error)> Warn);

  /// Write the profile to \c OS
  Error write(raw_fd_ostream &OS);

  /// Write the profile to a string output stream \c OS
  Error write(raw_string_ostream &OS);

  /// Write the profile in text format to \c OS
  Error writeText(raw_fd_ostream &OS);

  /// Write temporal profile trace data to the header in text format to \c OS
  void writeTextTemporalProfTraceData(raw_fd_ostream &OS,
                                      InstrProfSymtab &Symtab);

  Error validateRecord(const InstrProfRecord &Func);

  /// Write \c Record in text format to \c OS
  static void writeRecordInText(StringRef Name, uint64_t Hash,
                                const InstrProfRecord &Counters,
                                InstrProfSymtab &Symtab, raw_fd_ostream &OS);

  /// Write the profile, returning the raw data. For testing.
  std::unique_ptr<MemoryBuffer> writeBuffer();

  /// Update the attributes of the current profile from the attributes
  /// specified. An error is returned if IR and FE profiles are mixed.
  Error mergeProfileKind(const InstrProfKind Other) {
    // If the kind is unset, this is the first profile we are merging so just
    // set it to the given type.
    if (ProfileKind == InstrProfKind::Unknown) {
      ProfileKind = Other;
      return Error::success();
    }

    // Returns true if merging is should fail assuming A and B are incompatible.
    auto testIncompatible = [&](InstrProfKind A, InstrProfKind B) {
      return (static_cast<bool>(ProfileKind & A) &&
              static_cast<bool>(Other & B)) ||
             (static_cast<bool>(ProfileKind & B) &&
              static_cast<bool>(Other & A));
    };

    // Check if the profiles are in-compatible. Clang frontend profiles can't be
    // merged with other profile types.
    if (static_cast<bool>(
            (ProfileKind & InstrProfKind::FrontendInstrumentation) ^
            (Other & InstrProfKind::FrontendInstrumentation))) {
      return make_error<InstrProfError>(instrprof_error::unsupported_version);
    }
    if (testIncompatible(InstrProfKind::FunctionEntryOnly,
                         InstrProfKind::FunctionEntryInstrumentation)) {
      return make_error<InstrProfError>(
          instrprof_error::unsupported_version,
          "cannot merge FunctionEntryOnly profiles and BB profiles together");
    }

    // Now we update the profile type with the bits that are set.
    ProfileKind |= Other;
    return Error::success();
  }

  InstrProfKind getProfileKind() const { return ProfileKind; }

  // Internal interface for testing purpose only.
  void setValueProfDataEndianness(support::endianness Endianness);
  void setOutputSparse(bool Sparse);
  // Compute the overlap b/w this object and Other. Program level result is
  // stored in Overlap and function level result is stored in FuncLevelOverlap.
  void overlapRecord(NamedInstrProfRecord &&Other, OverlapStats &Overlap,
                     OverlapStats &FuncLevelOverlap,
                     const OverlapFuncFilters &FuncFilter);

private:
  void addRecord(StringRef Name, uint64_t Hash, InstrProfRecord &&I,
                 uint64_t Weight, function_ref<void(Error)> Warn);
  bool shouldEncodeData(const ProfilingData &PD);
  /// Add \p Trace using reservoir sampling.
  void addTemporalProfileTrace(TemporalProfTraceTy Trace);

  Error writeImpl(ProfOStream &OS);
};

} // end namespace llvm

#endif // LLVM_PROFILEDATA_INSTRPROFWRITER_H
PKhwFZ�(�u�R�RProfileData/MemProf.hnu�[���#ifndef LLVM_PROFILEDATA_MEMPROF_H_
#define LLVM_PROFILEDATA_MEMPROF_H_

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLFunctionalExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/ProfileData/MemProfData.inc"
#include "llvm/Support/Endian.h"
#include "llvm/Support/EndianStream.h"
#include "llvm/Support/raw_ostream.h"

#include <cstdint>
#include <optional>

namespace llvm {
namespace memprof {

enum class Meta : uint64_t {
  Start = 0,
#define MIBEntryDef(NameTag, Name, Type) NameTag,
#include "llvm/ProfileData/MIBEntryDef.inc"
#undef MIBEntryDef
  Size
};

using MemProfSchema = llvm::SmallVector<Meta, static_cast<int>(Meta::Size)>;

// Holds the actual MemInfoBlock data with all fields. Contents may be read or
// written partially by providing an appropriate schema to the serialize and
// deserialize methods.
struct PortableMemInfoBlock {
  PortableMemInfoBlock() = default;
  explicit PortableMemInfoBlock(const MemInfoBlock &Block) {
#define MIBEntryDef(NameTag, Name, Type) Name = Block.Name;
#include "llvm/ProfileData/MIBEntryDef.inc"
#undef MIBEntryDef
  }

  PortableMemInfoBlock(const MemProfSchema &Schema, const unsigned char *Ptr) {
    deserialize(Schema, Ptr);
  }

  // Read the contents of \p Ptr based on the \p Schema to populate the
  // MemInfoBlock member.
  void deserialize(const MemProfSchema &Schema, const unsigned char *Ptr) {
    using namespace support;

    for (const Meta Id : Schema) {
      switch (Id) {
#define MIBEntryDef(NameTag, Name, Type)                                       \
  case Meta::Name: {                                                           \
    Name = endian::readNext<Type, little, unaligned>(Ptr);                     \
  } break;
#include "llvm/ProfileData/MIBEntryDef.inc"
#undef MIBEntryDef
      default:
        llvm_unreachable("Unknown meta type id, is the profile collected from "
                         "a newer version of the runtime?");
      }
    }
  }

  // Write the contents of the MemInfoBlock based on the \p Schema provided to
  // the raw_ostream \p OS.
  void serialize(const MemProfSchema &Schema, raw_ostream &OS) const {
    using namespace support;

    endian::Writer LE(OS, little);
    for (const Meta Id : Schema) {
      switch (Id) {
#define MIBEntryDef(NameTag, Name, Type)                                       \
  case Meta::Name: {                                                           \
    LE.write<Type>(Name);                                                      \
  } break;
#include "llvm/ProfileData/MIBEntryDef.inc"
#undef MIBEntryDef
      default:
        llvm_unreachable("Unknown meta type id, invalid input?");
      }
    }
  }

  // Print out the contents of the MemInfoBlock in YAML format.
  void printYAML(raw_ostream &OS) const {
    OS << "      MemInfoBlock:\n";
#define MIBEntryDef(NameTag, Name, Type)                                       \
  OS << "        " << #Name << ": " << Name << "\n";
#include "llvm/ProfileData/MIBEntryDef.inc"
#undef MIBEntryDef
  }

  // Define getters for each type which can be called by analyses.
#define MIBEntryDef(NameTag, Name, Type)                                       \
  Type get##Name() const { return Name; }
#include "llvm/ProfileData/MIBEntryDef.inc"
#undef MIBEntryDef

  void clear() { *this = PortableMemInfoBlock(); }

  // Returns the full schema currently in use.
  static MemProfSchema getSchema() {
    MemProfSchema List;
#define MIBEntryDef(NameTag, Name, Type) List.push_back(Meta::Name);
#include "llvm/ProfileData/MIBEntryDef.inc"
#undef MIBEntryDef
    return List;
  }

  bool operator==(const PortableMemInfoBlock &Other) const {
#define MIBEntryDef(NameTag, Name, Type)                                       \
  if (Other.get##Name() != get##Name())                                        \
    return false;
#include "llvm/ProfileData/MIBEntryDef.inc"
#undef MIBEntryDef
    return true;
  }

  bool operator!=(const PortableMemInfoBlock &Other) const {
    return !operator==(Other);
  }

  static constexpr size_t serializedSize() {
    size_t Result = 0;
#define MIBEntryDef(NameTag, Name, Type) Result += sizeof(Type);
#include "llvm/ProfileData/MIBEntryDef.inc"
#undef MIBEntryDef
    return Result;
  }

private:
#define MIBEntryDef(NameTag, Name, Type) Type Name = Type();
#include "llvm/ProfileData/MIBEntryDef.inc"
#undef MIBEntryDef
};

// A type representing the id generated by hashing the contents of the Frame.
using FrameId = uint64_t;
// Describes a call frame for a dynamic allocation context. The contents of
// the frame are populated by symbolizing the stack depot call frame from the
// compiler runtime.
struct Frame {
  // A uuid (uint64_t) identifying the function. It is obtained by
  // llvm::md5(FunctionName) which returns the lower 64 bits.
  GlobalValue::GUID Function;
  // The symbol name for the function. Only populated in the Frame by the reader
  // if requested during initialization. This field should not be serialized.
  std::optional<std::string> SymbolName;
  // The source line offset of the call from the beginning of parent function.
  uint32_t LineOffset;
  // The source column number of the call to help distinguish multiple calls
  // on the same line.
  uint32_t Column;
  // Whether the current frame is inlined.
  bool IsInlineFrame;

  Frame(const Frame &Other) {
    Function = Other.Function;
    SymbolName = Other.SymbolName;
    LineOffset = Other.LineOffset;
    Column = Other.Column;
    IsInlineFrame = Other.IsInlineFrame;
  }

  Frame(uint64_t Hash, uint32_t Off, uint32_t Col, bool Inline)
      : Function(Hash), LineOffset(Off), Column(Col), IsInlineFrame(Inline) {}

  bool operator==(const Frame &Other) const {
    // Ignore the SymbolName field to avoid a string compare. Comparing the
    // function hash serves the same purpose.
    return Other.Function == Function && Other.LineOffset == LineOffset &&
           Other.Column == Column && Other.IsInlineFrame == IsInlineFrame;
  }

  Frame &operator=(const Frame &Other) {
    Function = Other.Function;
    SymbolName = Other.SymbolName;
    LineOffset = Other.LineOffset;
    Column = Other.Column;
    IsInlineFrame = Other.IsInlineFrame;
    return *this;
  }

  bool operator!=(const Frame &Other) const { return !operator==(Other); }

  // Write the contents of the frame to the ostream \p OS.
  void serialize(raw_ostream &OS) const {
    using namespace support;

    endian::Writer LE(OS, little);

    // If the type of the GlobalValue::GUID changes, then we need to update
    // the reader and the writer.
    static_assert(std::is_same<GlobalValue::GUID, uint64_t>::value,
                  "Expect GUID to be uint64_t.");
    LE.write<uint64_t>(Function);

    LE.write<uint32_t>(LineOffset);
    LE.write<uint32_t>(Column);
    LE.write<bool>(IsInlineFrame);
  }

  // Read a frame from char data which has been serialized as little endian.
  static Frame deserialize(const unsigned char *Ptr) {
    using namespace support;

    const uint64_t F = endian::readNext<uint64_t, little, unaligned>(Ptr);
    const uint32_t L = endian::readNext<uint32_t, little, unaligned>(Ptr);
    const uint32_t C = endian::readNext<uint32_t, little, unaligned>(Ptr);
    const bool I = endian::readNext<bool, little, unaligned>(Ptr);
    return Frame(/*Function=*/F, /*LineOffset=*/L, /*Column=*/C,
                 /*IsInlineFrame=*/I);
  }

  // Returns the size of the frame information.
  static constexpr size_t serializedSize() {
    return sizeof(Frame::Function) + sizeof(Frame::LineOffset) +
           sizeof(Frame::Column) + sizeof(Frame::IsInlineFrame);
  }

  // Print the frame information in YAML format.
  void printYAML(raw_ostream &OS) const {
    OS << "      -\n"
       << "        Function: " << Function << "\n"
       << "        SymbolName: " << SymbolName.value_or("<None>") << "\n"
       << "        LineOffset: " << LineOffset << "\n"
       << "        Column: " << Column << "\n"
       << "        Inline: " << IsInlineFrame << "\n";
  }

  // Return a hash value based on the contents of the frame. Here we don't use
  // hashing from llvm ADT since we are going to persist the hash id, the hash
  // combine algorithm in ADT uses a new randomized seed each time.
  inline FrameId hash() const {
    auto HashCombine = [](auto Value, size_t Seed) {
      std::hash<decltype(Value)> Hasher;
      // The constant used below is the 64 bit representation of the fractional
      // part of the golden ratio. Used here for the randomness in their bit
      // pattern.
      return Hasher(Value) + 0x9e3779b97f4a7c15 + (Seed << 6) + (Seed >> 2);
    };

    size_t Result = 0;
    Result ^= HashCombine(Function, Result);
    Result ^= HashCombine(LineOffset, Result);
    Result ^= HashCombine(Column, Result);
    Result ^= HashCombine(IsInlineFrame, Result);
    return static_cast<FrameId>(Result);
  }
};

// Holds allocation information in a space efficient format where frames are
// represented using unique identifiers.
struct IndexedAllocationInfo {
  // The dynamic calling context for the allocation in bottom-up (leaf-to-root)
  // order. Frame contents are stored out-of-line.
  llvm::SmallVector<FrameId> CallStack;
  // The statistics obtained from the runtime for the allocation.
  PortableMemInfoBlock Info;

  IndexedAllocationInfo() = default;
  IndexedAllocationInfo(ArrayRef<FrameId> CS, const MemInfoBlock &MB)
      : CallStack(CS.begin(), CS.end()), Info(MB) {}

  // Returns the size in bytes when this allocation info struct is serialized.
  size_t serializedSize() const {
    return sizeof(uint64_t) + // The number of frames to serialize.
           sizeof(FrameId) * CallStack.size() +    // The callstack frame ids.
           PortableMemInfoBlock::serializedSize(); // The size of the payload.
  }

  bool operator==(const IndexedAllocationInfo &Other) const {
    if (Other.Info != Info)
      return false;

    if (Other.CallStack.size() != CallStack.size())
      return false;

    for (size_t J = 0; J < Other.CallStack.size(); J++) {
      if (Other.CallStack[J] != CallStack[J])
        return false;
    }
    return true;
  }

  bool operator!=(const IndexedAllocationInfo &Other) const {
    return !operator==(Other);
  }
};

// Holds allocation information with frame contents inline. The type should
// be used for temporary in-memory instances.
struct AllocationInfo {
  // Same as IndexedAllocationInfo::CallStack with the frame contents inline.
  llvm::SmallVector<Frame> CallStack;
  // Same as IndexedAllocationInfo::Info;
  PortableMemInfoBlock Info;

  AllocationInfo() = default;
  AllocationInfo(
      const IndexedAllocationInfo &IndexedAI,
      llvm::function_ref<const Frame(const FrameId)> IdToFrameCallback) {
    for (const FrameId &Id : IndexedAI.CallStack) {
      CallStack.push_back(IdToFrameCallback(Id));
    }
    Info = IndexedAI.Info;
  }

  void printYAML(raw_ostream &OS) const {
    OS << "    -\n";
    OS << "      Callstack:\n";
    // TODO: Print out the frame on one line with to make it easier for deep
    // callstacks once we have a test to check valid YAML is generated.
    for (const Frame &F : CallStack) {
      F.printYAML(OS);
    }
    Info.printYAML(OS);
  }
};

// Holds the memprof profile information for a function. The internal
// representation stores frame ids for efficiency. This representation should
// be used in the profile conversion and manipulation tools.
struct IndexedMemProfRecord {
  // Memory allocation sites in this function for which we have memory
  // profiling data.
  llvm::SmallVector<IndexedAllocationInfo> AllocSites;
  // Holds call sites in this function which are part of some memory
  // allocation context. We store this as a list of locations, each with its
  // list of inline locations in bottom-up order i.e. from leaf to root. The
  // inline location list may include additional entries, users should pick
  // the last entry in the list with the same function GUID.
  llvm::SmallVector<llvm::SmallVector<FrameId>> CallSites;

  void clear() {
    AllocSites.clear();
    CallSites.clear();
  }

  void merge(const IndexedMemProfRecord &Other) {
    // TODO: Filter out duplicates which may occur if multiple memprof
    // profiles are merged together using llvm-profdata.
    AllocSites.append(Other.AllocSites);
    CallSites.append(Other.CallSites);
  }

  size_t serializedSize() const {
    size_t Result = sizeof(GlobalValue::GUID);
    for (const IndexedAllocationInfo &N : AllocSites)
      Result += N.serializedSize();

    // The number of callsites we have information for.
    Result += sizeof(uint64_t);
    for (const auto &Frames : CallSites) {
      // The number of frame ids to serialize.
      Result += sizeof(uint64_t);
      Result += Frames.size() * sizeof(FrameId);
    }
    return Result;
  }

  bool operator==(const IndexedMemProfRecord &Other) const {
    if (Other.AllocSites.size() != AllocSites.size())
      return false;

    if (Other.CallSites.size() != CallSites.size())
      return false;

    for (size_t I = 0; I < AllocSites.size(); I++) {
      if (AllocSites[I] != Other.AllocSites[I])
        return false;
    }

    for (size_t I = 0; I < CallSites.size(); I++) {
      if (CallSites[I] != Other.CallSites[I])
        return false;
    }
    return true;
  }

  // Serializes the memprof records in \p Records to the ostream \p OS based
  // on the schema provided in \p Schema.
  void serialize(const MemProfSchema &Schema, raw_ostream &OS);

  // Deserializes memprof records from the Buffer.
  static IndexedMemProfRecord deserialize(const MemProfSchema &Schema,
                                          const unsigned char *Buffer);

  // Returns the GUID for the function name after canonicalization. For
  // memprof, we remove any .llvm suffix added by LTO. MemProfRecords are
  // mapped to functions using this GUID.
  static GlobalValue::GUID getGUID(const StringRef FunctionName);
};

// Holds the memprof profile information for a function. The internal
// representation stores frame contents inline. This representation should
// be used for small amount of temporary, in memory instances.
struct MemProfRecord {
  // Same as IndexedMemProfRecord::AllocSites with frame contents inline.
  llvm::SmallVector<AllocationInfo> AllocSites;
  // Same as IndexedMemProfRecord::CallSites with frame contents inline.
  llvm::SmallVector<llvm::SmallVector<Frame>> CallSites;

  MemProfRecord() = default;
  MemProfRecord(
      const IndexedMemProfRecord &Record,
      llvm::function_ref<const Frame(const FrameId Id)> IdToFrameCallback) {
    for (const IndexedAllocationInfo &IndexedAI : Record.AllocSites) {
      AllocSites.emplace_back(IndexedAI, IdToFrameCallback);
    }
    for (const ArrayRef<FrameId> Site : Record.CallSites) {
      llvm::SmallVector<Frame> Frames;
      for (const FrameId Id : Site) {
        Frames.push_back(IdToFrameCallback(Id));
      }
      CallSites.push_back(Frames);
    }
  }

  // Prints out the contents of the memprof record in YAML.
  void print(llvm::raw_ostream &OS) const {
    if (!AllocSites.empty()) {
      OS << "    AllocSites:\n";
      for (const AllocationInfo &N : AllocSites)
        N.printYAML(OS);
    }

    if (!CallSites.empty()) {
      OS << "    CallSites:\n";
      for (const llvm::SmallVector<Frame> &Frames : CallSites) {
        for (const Frame &F : Frames) {
          OS << "    -\n";
          F.printYAML(OS);
        }
      }
    }
  }
};

// Reads a memprof schema from a buffer. All entries in the buffer are
// interpreted as uint64_t. The first entry in the buffer denotes the number of
// ids in the schema. Subsequent entries are integers which map to memprof::Meta
// enum class entries. After successfully reading the schema, the pointer is one
// byte past the schema contents.
Expected<MemProfSchema> readMemProfSchema(const unsigned char *&Buffer);

// Trait for reading IndexedMemProfRecord data from the on-disk hash table.
class RecordLookupTrait {
public:
  using data_type = const IndexedMemProfRecord &;
  using internal_key_type = uint64_t;
  using external_key_type = uint64_t;
  using hash_value_type = uint64_t;
  using offset_type = uint64_t;

  RecordLookupTrait() = delete;
  RecordLookupTrait(const MemProfSchema &S) : Schema(S) {}

  static bool EqualKey(uint64_t A, uint64_t B) { return A == B; }
  static uint64_t GetInternalKey(uint64_t K) { return K; }
  static uint64_t GetExternalKey(uint64_t K) { return K; }

  hash_value_type ComputeHash(uint64_t K) { return K; }

  static std::pair<offset_type, offset_type>
  ReadKeyDataLength(const unsigned char *&D) {
    using namespace support;

    offset_type KeyLen = endian::readNext<offset_type, little, unaligned>(D);
    offset_type DataLen = endian::readNext<offset_type, little, unaligned>(D);
    return std::make_pair(KeyLen, DataLen);
  }

  uint64_t ReadKey(const unsigned char *D, offset_type /*Unused*/) {
    using namespace support;
    return endian::readNext<external_key_type, little, unaligned>(D);
  }

  data_type ReadData(uint64_t K, const unsigned char *D,
                     offset_type /*Unused*/) {
    Record = IndexedMemProfRecord::deserialize(Schema, D);
    return Record;
  }

private:
  // Holds the memprof schema used to deserialize records.
  MemProfSchema Schema;
  // Holds the records from one function deserialized from the indexed format.
  IndexedMemProfRecord Record;
};

// Trait for writing IndexedMemProfRecord data to the on-disk hash table.
class RecordWriterTrait {
public:
  using key_type = uint64_t;
  using key_type_ref = uint64_t;

  using data_type = IndexedMemProfRecord;
  using data_type_ref = IndexedMemProfRecord &;

  using hash_value_type = uint64_t;
  using offset_type = uint64_t;

  // Pointer to the memprof schema to use for the generator. Unlike the reader
  // we must use a default constructor with no params for the writer trait so we
  // have a public member which must be initialized by the user.
  MemProfSchema *Schema = nullptr;

  RecordWriterTrait() = default;

  static hash_value_type ComputeHash(key_type_ref K) { return K; }

  static std::pair<offset_type, offset_type>
  EmitKeyDataLength(raw_ostream &Out, key_type_ref K, data_type_ref V) {
    using namespace support;

    endian::Writer LE(Out, little);
    offset_type N = sizeof(K);
    LE.write<offset_type>(N);
    offset_type M = V.serializedSize();
    LE.write<offset_type>(M);
    return std::make_pair(N, M);
  }

  void EmitKey(raw_ostream &Out, key_type_ref K, offset_type /*Unused*/) {
    using namespace support;
    endian::Writer LE(Out, little);
    LE.write<uint64_t>(K);
  }

  void EmitData(raw_ostream &Out, key_type_ref /*Unused*/, data_type_ref V,
                offset_type /*Unused*/) {
    assert(Schema != nullptr && "MemProf schema is not initialized!");
    V.serialize(*Schema, Out);
  }
};

// Trait for writing frame mappings to the on-disk hash table.
class FrameWriterTrait {
public:
  using key_type = FrameId;
  using key_type_ref = FrameId;

  using data_type = Frame;
  using data_type_ref = Frame &;

  using hash_value_type = FrameId;
  using offset_type = uint64_t;

  static hash_value_type ComputeHash(key_type_ref K) { return K; }

  static std::pair<offset_type, offset_type>
  EmitKeyDataLength(raw_ostream &Out, key_type_ref K, data_type_ref V) {
    using namespace support;
    endian::Writer LE(Out, little);
    offset_type N = sizeof(K);
    LE.write<offset_type>(N);
    offset_type M = V.serializedSize();
    LE.write<offset_type>(M);
    return std::make_pair(N, M);
  }

  void EmitKey(raw_ostream &Out, key_type_ref K, offset_type /*Unused*/) {
    using namespace support;
    endian::Writer LE(Out, little);
    LE.write<key_type>(K);
  }

  void EmitData(raw_ostream &Out, key_type_ref /*Unused*/, data_type_ref V,
                offset_type /*Unused*/) {
    V.serialize(Out);
  }
};

// Trait for reading frame mappings from the on-disk hash table.
class FrameLookupTrait {
public:
  using data_type = const Frame;
  using internal_key_type = FrameId;
  using external_key_type = FrameId;
  using hash_value_type = FrameId;
  using offset_type = uint64_t;

  static bool EqualKey(internal_key_type A, internal_key_type B) {
    return A == B;
  }
  static uint64_t GetInternalKey(internal_key_type K) { return K; }
  static uint64_t GetExternalKey(external_key_type K) { return K; }

  hash_value_type ComputeHash(internal_key_type K) { return K; }

  static std::pair<offset_type, offset_type>
  ReadKeyDataLength(const unsigned char *&D) {
    using namespace support;

    offset_type KeyLen = endian::readNext<offset_type, little, unaligned>(D);
    offset_type DataLen = endian::readNext<offset_type, little, unaligned>(D);
    return std::make_pair(KeyLen, DataLen);
  }

  uint64_t ReadKey(const unsigned char *D, offset_type /*Unused*/) {
    using namespace support;
    return endian::readNext<external_key_type, little, unaligned>(D);
  }

  data_type ReadData(uint64_t K, const unsigned char *D,
                     offset_type /*Unused*/) {
    return Frame::deserialize(D);
  }
};
} // namespace memprof
} // namespace llvm

#endif // LLVM_PROFILEDATA_MEMPROF_H_
PKhwFZ^F�c!c!,ProfileData/Coverage/CoverageMappingReader.hnu�[���//===- CoverageMappingReader.h - Code coverage mapping reader ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains support for reading coverage mapping data for
// instrumentation based coverage.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_PROFILEDATA_COVERAGE_COVERAGEMAPPINGREADER_H
#define LLVM_PROFILEDATA_COVERAGE_COVERAGEMAPPINGREADER_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ProfileData/Coverage/CoverageMapping.h"
#include "llvm/ProfileData/InstrProf.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/MemoryBuffer.h"
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <memory>
#include <vector>

namespace llvm {
namespace coverage {

class CoverageMappingReader;

/// Coverage mapping information for a single function.
struct CoverageMappingRecord {
  StringRef FunctionName;
  uint64_t FunctionHash;
  ArrayRef<StringRef> Filenames;
  ArrayRef<CounterExpression> Expressions;
  ArrayRef<CounterMappingRegion> MappingRegions;
};

/// A file format agnostic iterator over coverage mapping data.
class CoverageMappingIterator {
  CoverageMappingReader *Reader;
  CoverageMappingRecord Record;
  coveragemap_error ReadErr;

  void increment();

public:
  using iterator_category = std::input_iterator_tag;
  using value_type = CoverageMappingRecord;
  using difference_type = std::ptrdiff_t;
  using pointer = value_type *;
  using reference = value_type &;

  CoverageMappingIterator()
      : Reader(nullptr), ReadErr(coveragemap_error::success) {}

  CoverageMappingIterator(CoverageMappingReader *Reader)
      : Reader(Reader), ReadErr(coveragemap_error::success) {
    increment();
  }

  ~CoverageMappingIterator() {
    if (ReadErr != coveragemap_error::success)
      llvm_unreachable("Unexpected error in coverage mapping iterator");
  }

  CoverageMappingIterator &operator++() {
    increment();
    return *this;
  }
  bool operator==(const CoverageMappingIterator &RHS) const {
    return Reader == RHS.Reader;
  }
  bool operator!=(const CoverageMappingIterator &RHS) const {
    return Reader != RHS.Reader;
  }
  Expected<CoverageMappingRecord &> operator*() {
    if (ReadErr != coveragemap_error::success) {
      auto E = make_error<CoverageMapError>(ReadErr);
      ReadErr = coveragemap_error::success;
      return std::move(E);
    }
    return Record;
  }
  Expected<CoverageMappingRecord *> operator->() {
    if (ReadErr != coveragemap_error::success) {
      auto E = make_error<CoverageMapError>(ReadErr);
      ReadErr = coveragemap_error::success;
      return std::move(E);
    }
    return &Record;
  }
};

class CoverageMappingReader {
public:
  virtual ~CoverageMappingReader() = default;

  virtual Error readNextRecord(CoverageMappingRecord &Record) = 0;
  CoverageMappingIterator begin() { return CoverageMappingIterator(this); }
  CoverageMappingIterator end() { return CoverageMappingIterator(); }
};

/// Base class for the raw coverage mapping and filenames data readers.
class RawCoverageReader {
protected:
  StringRef Data;

  RawCoverageReader(StringRef Data) : Data(Data) {}

  Error readULEB128(uint64_t &Result);
  Error readIntMax(uint64_t &Result, uint64_t MaxPlus1);
  Error readSize(uint64_t &Result);
  Error readString(StringRef &Result);
};

/// Checks if the given coverage mapping data is exported for
/// an unused function.
class RawCoverageMappingDummyChecker : public RawCoverageReader {
public:
  RawCoverageMappingDummyChecker(StringRef MappingData)
      : RawCoverageReader(MappingData) {}

  Expected<bool> isDummy();
};

/// Reader for the raw coverage mapping data.
class RawCoverageMappingReader : public RawCoverageReader {
  ArrayRef<std::string> &TranslationUnitFilenames;
  std::vector<StringRef> &Filenames;
  std::vector<CounterExpression> &Expressions;
  std::vector<CounterMappingRegion> &MappingRegions;

public:
  RawCoverageMappingReader(StringRef MappingData,
                           ArrayRef<std::string> &TranslationUnitFilenames,
                           std::vector<StringRef> &Filenames,
                           std::vector<CounterExpression> &Expressions,
                           std::vector<CounterMappingRegion> &MappingRegions)
      : RawCoverageReader(MappingData),
        TranslationUnitFilenames(TranslationUnitFilenames),
        Filenames(Filenames), Expressions(Expressions),
        MappingRegions(MappingRegions) {}
  RawCoverageMappingReader(const RawCoverageMappingReader &) = delete;
  RawCoverageMappingReader &
  operator=(const RawCoverageMappingReader &) = delete;

  Error read();

private:
  Error decodeCounter(unsigned Value, Counter &C);
  Error readCounter(Counter &C);
  Error
  readMappingRegionsSubArray(std::vector<CounterMappingRegion> &MappingRegions,
                             unsigned InferredFileID, size_t NumFileIDs);
};

/// Reader for the coverage mapping data that is emitted by the
/// frontend and stored in an object file.
class BinaryCoverageReader : public CoverageMappingReader {
public:
  struct ProfileMappingRecord {
    CovMapVersion Version;
    StringRef FunctionName;
    uint64_t FunctionHash;
    StringRef CoverageMapping;
    size_t FilenamesBegin;
    size_t FilenamesSize;

    ProfileMappingRecord(CovMapVersion Version, StringRef FunctionName,
                         uint64_t FunctionHash, StringRef CoverageMapping,
                         size_t FilenamesBegin, size_t FilenamesSize)
        : Version(Version), FunctionName(FunctionName),
          FunctionHash(FunctionHash), CoverageMapping(CoverageMapping),
          FilenamesBegin(FilenamesBegin), FilenamesSize(FilenamesSize) {}
  };

  using FuncRecordsStorage = std::unique_ptr<MemoryBuffer>;

private:
  std::vector<std::string> Filenames;
  std::vector<ProfileMappingRecord> MappingRecords;
  InstrProfSymtab ProfileNames;
  size_t CurrentRecord = 0;
  std::vector<StringRef> FunctionsFilenames;
  std::vector<CounterExpression> Expressions;
  std::vector<CounterMappingRegion> MappingRegions;

  // Used to tie the lifetimes of coverage function records to the lifetime of
  // this BinaryCoverageReader instance. Needed to support the format change in
  // D69471, which can split up function records into multiple sections on ELF.
  FuncRecordsStorage FuncRecords;

  BinaryCoverageReader(FuncRecordsStorage &&FuncRecords)
      : FuncRecords(std::move(FuncRecords)) {}

public:
  BinaryCoverageReader(const BinaryCoverageReader &) = delete;
  BinaryCoverageReader &operator=(const BinaryCoverageReader &) = delete;

  static Expected<std::vector<std::unique_ptr<BinaryCoverageReader>>>
  create(MemoryBufferRef ObjectBuffer, StringRef Arch,
         SmallVectorImpl<std::unique_ptr<MemoryBuffer>> &ObjectFileBuffers,
         StringRef CompilationDir = "",
         SmallVectorImpl<object::BuildIDRef> *BinaryIDs = nullptr);

  static Expected<std::unique_ptr<BinaryCoverageReader>>
  createCoverageReaderFromBuffer(StringRef Coverage,
                                 FuncRecordsStorage &&FuncRecords,
                                 InstrProfSymtab &&ProfileNames,
                                 uint8_t BytesInAddress,
                                 support::endianness Endian,
                                 StringRef CompilationDir = "");

  Error readNextRecord(CoverageMappingRecord &Record) override;
};

/// Reader for the raw coverage filenames.
class RawCoverageFilenamesReader : public RawCoverageReader {
  std::vector<std::string> &Filenames;
  StringRef CompilationDir;

  // Read an uncompressed sequence of filenames.
  Error readUncompressed(CovMapVersion Version, uint64_t NumFilenames);

public:
  RawCoverageFilenamesReader(StringRef Data,
                             std::vector<std::string> &Filenames,
                             StringRef CompilationDir = "")
      : RawCoverageReader(Data), Filenames(Filenames),
        CompilationDir(CompilationDir) {}
  RawCoverageFilenamesReader(const RawCoverageFilenamesReader &) = delete;
  RawCoverageFilenamesReader &
  operator=(const RawCoverageFilenamesReader &) = delete;

  Error read(CovMapVersion Version);
};

} // end namespace coverage
} // end namespace llvm

#endif // LLVM_PROFILEDATA_COVERAGE_COVERAGEMAPPINGREADER_H
PKhwFZ�d��ҘҘ&ProfileData/Coverage/CoverageMapping.hnu�[���//===- CoverageMapping.h - Code coverage mapping support --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Code coverage mapping data is generated by clang and read by
// llvm-cov to show code coverage statistics for a file.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_PROFILEDATA_COVERAGE_COVERAGEMAPPING_H
#define LLVM_PROFILEDATA_COVERAGE_COVERAGEMAPPING_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Object/BuildID.h"
#include "llvm/ProfileData/InstrProf.h"
#include "llvm/Support/Alignment.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/raw_ostream.h"
#include <cassert>
#include <cstdint>
#include <iterator>
#include <memory>
#include <string>
#include <system_error>
#include <tuple>
#include <utility>
#include <vector>

namespace llvm {

class IndexedInstrProfReader;

namespace object {
class BuildIDFetcher;
} // namespace object

namespace vfs {
class FileSystem;
} // namespace vfs

namespace coverage {

class CoverageMappingReader;
struct CoverageMappingRecord;

enum class coveragemap_error {
  success = 0,
  eof,
  no_data_found,
  unsupported_version,
  truncated,
  malformed,
  decompression_failed,
  invalid_or_missing_arch_specifier
};

const std::error_category &coveragemap_category();

inline std::error_code make_error_code(coveragemap_error E) {
  return std::error_code(static_cast<int>(E), coveragemap_category());
}

class CoverageMapError : public ErrorInfo<CoverageMapError> {
public:
  CoverageMapError(coveragemap_error Err) : Err(Err) {
    assert(Err != coveragemap_error::success && "Not an error");
  }

  std::string message() const override;

  void log(raw_ostream &OS) const override { OS << message(); }

  std::error_code convertToErrorCode() const override {
    return make_error_code(Err);
  }

  coveragemap_error get() const { return Err; }

  static char ID;

private:
  coveragemap_error Err;
};

/// A Counter is an abstract value that describes how to compute the
/// execution count for a region of code using the collected profile count data.
struct Counter {
  /// The CounterExpression kind (Add or Subtract) is encoded in bit 0 next to
  /// the CounterKind. This means CounterKind has to leave bit 0 free.
  enum CounterKind { Zero, CounterValueReference, Expression };
  static const unsigned EncodingTagBits = 2;
  static const unsigned EncodingTagMask = 0x3;
  static const unsigned EncodingCounterTagAndExpansionRegionTagBits =
      EncodingTagBits + 1;

private:
  CounterKind Kind = Zero;
  unsigned ID = 0;

  Counter(CounterKind Kind, unsigned ID) : Kind(Kind), ID(ID) {}

public:
  Counter() = default;

  CounterKind getKind() const { return Kind; }

  bool isZero() const { return Kind == Zero; }

  bool isExpression() const { return Kind == Expression; }

  unsigned getCounterID() const { return ID; }

  unsigned getExpressionID() const { return ID; }

  friend bool operator==(const Counter &LHS, const Counter &RHS) {
    return LHS.Kind == RHS.Kind && LHS.ID == RHS.ID;
  }

  friend bool operator!=(const Counter &LHS, const Counter &RHS) {
    return !(LHS == RHS);
  }

  friend bool operator<(const Counter &LHS, const Counter &RHS) {
    return std::tie(LHS.Kind, LHS.ID) < std::tie(RHS.Kind, RHS.ID);
  }

  /// Return the counter that represents the number zero.
  static Counter getZero() { return Counter(); }

  /// Return the counter that corresponds to a specific profile counter.
  static Counter getCounter(unsigned CounterId) {
    return Counter(CounterValueReference, CounterId);
  }

  /// Return the counter that corresponds to a specific addition counter
  /// expression.
  static Counter getExpression(unsigned ExpressionId) {
    return Counter(Expression, ExpressionId);
  }
};

/// A Counter expression is a value that represents an arithmetic operation
/// with two counters.
struct CounterExpression {
  enum ExprKind { Subtract, Add };
  ExprKind Kind;
  Counter LHS, RHS;

  CounterExpression(ExprKind Kind, Counter LHS, Counter RHS)
      : Kind(Kind), LHS(LHS), RHS(RHS) {}
};

/// A Counter expression builder is used to construct the counter expressions.
/// It avoids unnecessary duplication and simplifies algebraic expressions.
class CounterExpressionBuilder {
  /// A list of all the counter expressions
  std::vector<CounterExpression> Expressions;

  /// A lookup table for the index of a given expression.
  DenseMap<CounterExpression, unsigned> ExpressionIndices;

  /// Return the counter which corresponds to the given expression.
  ///
  /// If the given expression is already stored in the builder, a counter
  /// that references that expression is returned. Otherwise, the given
  /// expression is added to the builder's collection of expressions.
  Counter get(const CounterExpression &E);

  /// Represents a term in a counter expression tree.
  struct Term {
    unsigned CounterID;
    int Factor;

    Term(unsigned CounterID, int Factor)
        : CounterID(CounterID), Factor(Factor) {}
  };

  /// Gather the terms of the expression tree for processing.
  ///
  /// This collects each addition and subtraction referenced by the counter into
  /// a sequence that can be sorted and combined to build a simplified counter
  /// expression.
  void extractTerms(Counter C, int Sign, SmallVectorImpl<Term> &Terms);

  /// Simplifies the given expression tree
  /// by getting rid of algebraically redundant operations.
  Counter simplify(Counter ExpressionTree);

public:
  ArrayRef<CounterExpression> getExpressions() const { return Expressions; }

  /// Return a counter that represents the expression that adds LHS and RHS.
  Counter add(Counter LHS, Counter RHS, bool Simplify = true);

  /// Return a counter that represents the expression that subtracts RHS from
  /// LHS.
  Counter subtract(Counter LHS, Counter RHS, bool Simplify = true);
};

using LineColPair = std::pair<unsigned, unsigned>;

/// A Counter mapping region associates a source range with a specific counter.
struct CounterMappingRegion {
  enum RegionKind {
    /// A CodeRegion associates some code with a counter
    CodeRegion,

    /// An ExpansionRegion represents a file expansion region that associates
    /// a source range with the expansion of a virtual source file, such as
    /// for a macro instantiation or #include file.
    ExpansionRegion,

    /// A SkippedRegion represents a source range with code that was skipped
    /// by a preprocessor or similar means.
    SkippedRegion,

    /// A GapRegion is like a CodeRegion, but its count is only set as the
    /// line execution count when its the only region in the line.
    GapRegion,

    /// A BranchRegion represents leaf-level boolean expressions and is
    /// associated with two counters, each representing the number of times the
    /// expression evaluates to true or false.
    BranchRegion
  };

  /// Primary Counter that is also used for Branch Regions (TrueCount).
  Counter Count;

  /// Secondary Counter used for Branch Regions (FalseCount).
  Counter FalseCount;

  unsigned FileID, ExpandedFileID;
  unsigned LineStart, ColumnStart, LineEnd, ColumnEnd;
  RegionKind Kind;

  CounterMappingRegion(Counter Count, unsigned FileID, unsigned ExpandedFileID,
                       unsigned LineStart, unsigned ColumnStart,
                       unsigned LineEnd, unsigned ColumnEnd, RegionKind Kind)
      : Count(Count), FileID(FileID), ExpandedFileID(ExpandedFileID),
        LineStart(LineStart), ColumnStart(ColumnStart), LineEnd(LineEnd),
        ColumnEnd(ColumnEnd), Kind(Kind) {}

  CounterMappingRegion(Counter Count, Counter FalseCount, unsigned FileID,
                       unsigned ExpandedFileID, unsigned LineStart,
                       unsigned ColumnStart, unsigned LineEnd,
                       unsigned ColumnEnd, RegionKind Kind)
      : Count(Count), FalseCount(FalseCount), FileID(FileID),
        ExpandedFileID(ExpandedFileID), LineStart(LineStart),
        ColumnStart(ColumnStart), LineEnd(LineEnd), ColumnEnd(ColumnEnd),
        Kind(Kind) {}

  static CounterMappingRegion
  makeRegion(Counter Count, unsigned FileID, unsigned LineStart,
             unsigned ColumnStart, unsigned LineEnd, unsigned ColumnEnd) {
    return CounterMappingRegion(Count, FileID, 0, LineStart, ColumnStart,
                                LineEnd, ColumnEnd, CodeRegion);
  }

  static CounterMappingRegion
  makeExpansion(unsigned FileID, unsigned ExpandedFileID, unsigned LineStart,
                unsigned ColumnStart, unsigned LineEnd, unsigned ColumnEnd) {
    return CounterMappingRegion(Counter(), FileID, ExpandedFileID, LineStart,
                                ColumnStart, LineEnd, ColumnEnd,
                                ExpansionRegion);
  }

  static CounterMappingRegion
  makeSkipped(unsigned FileID, unsigned LineStart, unsigned ColumnStart,
              unsigned LineEnd, unsigned ColumnEnd) {
    return CounterMappingRegion(Counter(), FileID, 0, LineStart, ColumnStart,
                                LineEnd, ColumnEnd, SkippedRegion);
  }

  static CounterMappingRegion
  makeGapRegion(Counter Count, unsigned FileID, unsigned LineStart,
                unsigned ColumnStart, unsigned LineEnd, unsigned ColumnEnd) {
    return CounterMappingRegion(Count, FileID, 0, LineStart, ColumnStart,
                                LineEnd, (1U << 31) | ColumnEnd, GapRegion);
  }

  static CounterMappingRegion
  makeBranchRegion(Counter Count, Counter FalseCount, unsigned FileID,
                   unsigned LineStart, unsigned ColumnStart, unsigned LineEnd,
                   unsigned ColumnEnd) {
    return CounterMappingRegion(Count, FalseCount, FileID, 0, LineStart,
                                ColumnStart, LineEnd, ColumnEnd, BranchRegion);
  }

  inline LineColPair startLoc() const {
    return LineColPair(LineStart, ColumnStart);
  }

  inline LineColPair endLoc() const { return LineColPair(LineEnd, ColumnEnd); }
};

/// Associates a source range with an execution count.
struct CountedRegion : public CounterMappingRegion {
  uint64_t ExecutionCount;
  uint64_t FalseExecutionCount;
  bool Folded;

  CountedRegion(const CounterMappingRegion &R, uint64_t ExecutionCount)
      : CounterMappingRegion(R), ExecutionCount(ExecutionCount),
        FalseExecutionCount(0), Folded(false) {}

  CountedRegion(const CounterMappingRegion &R, uint64_t ExecutionCount,
                uint64_t FalseExecutionCount)
      : CounterMappingRegion(R), ExecutionCount(ExecutionCount),
        FalseExecutionCount(FalseExecutionCount), Folded(false) {}
};

/// A Counter mapping context is used to connect the counters, expressions
/// and the obtained counter values.
class CounterMappingContext {
  ArrayRef<CounterExpression> Expressions;
  ArrayRef<uint64_t> CounterValues;

public:
  CounterMappingContext(ArrayRef<CounterExpression> Expressions,
                        ArrayRef<uint64_t> CounterValues = std::nullopt)
      : Expressions(Expressions), CounterValues(CounterValues) {}

  void setCounts(ArrayRef<uint64_t> Counts) { CounterValues = Counts; }

  void dump(const Counter &C, raw_ostream &OS) const;
  void dump(const Counter &C) const { dump(C, dbgs()); }

  /// Return the number of times that a region of code associated with this
  /// counter was executed.
  Expected<int64_t> evaluate(const Counter &C) const;

  unsigned getMaxCounterID(const Counter &C) const;
};

/// Code coverage information for a single function.
struct FunctionRecord {
  /// Raw function name.
  std::string Name;
  /// Mapping from FileID (i.e. vector index) to filename. Used to support
  /// macro expansions within a function in which the macro and function are
  /// defined in separate files.
  ///
  /// TODO: Uniquing filenames across all function records may be a performance
  /// optimization.
  std::vector<std::string> Filenames;
  /// Regions in the function along with their counts.
  std::vector<CountedRegion> CountedRegions;
  /// Branch Regions in the function along with their counts.
  std::vector<CountedRegion> CountedBranchRegions;
  /// The number of times this function was executed.
  uint64_t ExecutionCount = 0;

  FunctionRecord(StringRef Name, ArrayRef<StringRef> Filenames)
      : Name(Name), Filenames(Filenames.begin(), Filenames.end()) {}

  FunctionRecord(FunctionRecord &&FR) = default;
  FunctionRecord &operator=(FunctionRecord &&) = default;

  void pushRegion(CounterMappingRegion Region, uint64_t Count,
                  uint64_t FalseCount) {
    if (Region.Kind == CounterMappingRegion::BranchRegion) {
      CountedBranchRegions.emplace_back(Region, Count, FalseCount);
      // If both counters are hard-coded to zero, then this region represents a
      // constant-folded branch.
      if (Region.Count.isZero() && Region.FalseCount.isZero())
        CountedBranchRegions.back().Folded = true;
      return;
    }
    if (CountedRegions.empty())
      ExecutionCount = Count;
    CountedRegions.emplace_back(Region, Count, FalseCount);
  }
};

/// Iterator over Functions, optionally filtered to a single file.
class FunctionRecordIterator
    : public iterator_facade_base<FunctionRecordIterator,
                                  std::forward_iterator_tag, FunctionRecord> {
  ArrayRef<FunctionRecord> Records;
  ArrayRef<FunctionRecord>::iterator Current;
  StringRef Filename;

  /// Skip records whose primary file is not \c Filename.
  void skipOtherFiles();

public:
  FunctionRecordIterator(ArrayRef<FunctionRecord> Records_,
                         StringRef Filename = "")
      : Records(Records_), Current(Records.begin()), Filename(Filename) {
    skipOtherFiles();
  }

  FunctionRecordIterator() : Current(Records.begin()) {}

  bool operator==(const FunctionRecordIterator &RHS) const {
    return Current == RHS.Current && Filename == RHS.Filename;
  }

  const FunctionRecord &operator*() const { return *Current; }

  FunctionRecordIterator &operator++() {
    assert(Current != Records.end() && "incremented past end");
    ++Current;
    skipOtherFiles();
    return *this;
  }
};

/// Coverage information for a macro expansion or #included file.
///
/// When covered code has pieces that can be expanded for more detail, such as a
/// preprocessor macro use and its definition, these are represented as
/// expansions whose coverage can be looked up independently.
struct ExpansionRecord {
  /// The abstract file this expansion covers.
  unsigned FileID;
  /// The region that expands to this record.
  const CountedRegion &Region;
  /// Coverage for the expansion.
  const FunctionRecord &Function;

  ExpansionRecord(const CountedRegion &Region,
                  const FunctionRecord &Function)
      : FileID(Region.ExpandedFileID), Region(Region), Function(Function) {}
};

/// The execution count information starting at a point in a file.
///
/// A sequence of CoverageSegments gives execution counts for a file in format
/// that's simple to iterate through for processing.
struct CoverageSegment {
  /// The line where this segment begins.
  unsigned Line;
  /// The column where this segment begins.
  unsigned Col;
  /// The execution count, or zero if no count was recorded.
  uint64_t Count;
  /// When false, the segment was uninstrumented or skipped.
  bool HasCount;
  /// Whether this enters a new region or returns to a previous count.
  bool IsRegionEntry;
  /// Whether this enters a gap region.
  bool IsGapRegion;

  CoverageSegment(unsigned Line, unsigned Col, bool IsRegionEntry)
      : Line(Line), Col(Col), Count(0), HasCount(false),
        IsRegionEntry(IsRegionEntry), IsGapRegion(false) {}

  CoverageSegment(unsigned Line, unsigned Col, uint64_t Count,
                  bool IsRegionEntry, bool IsGapRegion = false,
                  bool IsBranchRegion = false)
      : Line(Line), Col(Col), Count(Count), HasCount(true),
        IsRegionEntry(IsRegionEntry), IsGapRegion(IsGapRegion) {}

  friend bool operator==(const CoverageSegment &L, const CoverageSegment &R) {
    return std::tie(L.Line, L.Col, L.Count, L.HasCount, L.IsRegionEntry,
                    L.IsGapRegion) == std::tie(R.Line, R.Col, R.Count,
                                               R.HasCount, R.IsRegionEntry,
                                               R.IsGapRegion);
  }
};

/// An instantiation group contains a \c FunctionRecord list, such that each
/// record corresponds to a distinct instantiation of the same function.
///
/// Note that it's possible for a function to have more than one instantiation
/// (consider C++ template specializations or static inline functions).
class InstantiationGroup {
  friend class CoverageMapping;

  unsigned Line;
  unsigned Col;
  std::vector<const FunctionRecord *> Instantiations;

  InstantiationGroup(unsigned Line, unsigned Col,
                     std::vector<const FunctionRecord *> Instantiations)
      : Line(Line), Col(Col), Instantiations(std::move(Instantiations)) {}

public:
  InstantiationGroup(const InstantiationGroup &) = delete;
  InstantiationGroup(InstantiationGroup &&) = default;

  /// Get the number of instantiations in this group.
  size_t size() const { return Instantiations.size(); }

  /// Get the line where the common function was defined.
  unsigned getLine() const { return Line; }

  /// Get the column where the common function was defined.
  unsigned getColumn() const { return Col; }

  /// Check if the instantiations in this group have a common mangled name.
  bool hasName() const {
    for (unsigned I = 1, E = Instantiations.size(); I < E; ++I)
      if (Instantiations[I]->Name != Instantiations[0]->Name)
        return false;
    return true;
  }

  /// Get the common mangled name for instantiations in this group.
  StringRef getName() const {
    assert(hasName() && "Instantiations don't have a shared name");
    return Instantiations[0]->Name;
  }

  /// Get the total execution count of all instantiations in this group.
  uint64_t getTotalExecutionCount() const {
    uint64_t Count = 0;
    for (const FunctionRecord *F : Instantiations)
      Count += F->ExecutionCount;
    return Count;
  }

  /// Get the instantiations in this group.
  ArrayRef<const FunctionRecord *> getInstantiations() const {
    return Instantiations;
  }
};

/// Coverage information to be processed or displayed.
///
/// This represents the coverage of an entire file, expansion, or function. It
/// provides a sequence of CoverageSegments to iterate through, as well as the
/// list of expansions that can be further processed.
class CoverageData {
  friend class CoverageMapping;

  std::string Filename;
  std::vector<CoverageSegment> Segments;
  std::vector<ExpansionRecord> Expansions;
  std::vector<CountedRegion> BranchRegions;

public:
  CoverageData() = default;

  CoverageData(StringRef Filename) : Filename(Filename) {}

  /// Get the name of the file this data covers.
  StringRef getFilename() const { return Filename; }

  /// Get an iterator over the coverage segments for this object. The segments
  /// are guaranteed to be uniqued and sorted by location.
  std::vector<CoverageSegment>::const_iterator begin() const {
    return Segments.begin();
  }

  std::vector<CoverageSegment>::const_iterator end() const {
    return Segments.end();
  }

  bool empty() const { return Segments.empty(); }

  /// Expansions that can be further processed.
  ArrayRef<ExpansionRecord> getExpansions() const { return Expansions; }

  /// Branches that can be further processed.
  ArrayRef<CountedRegion> getBranches() const { return BranchRegions; }
};

/// The mapping of profile information to coverage data.
///
/// This is the main interface to get coverage information, using a profile to
/// fill out execution counts.
class CoverageMapping {
  DenseMap<size_t, DenseSet<size_t>> RecordProvenance;
  std::vector<FunctionRecord> Functions;
  DenseMap<size_t, SmallVector<unsigned, 0>> FilenameHash2RecordIndices;
  std::vector<std::pair<std::string, uint64_t>> FuncHashMismatches;

  CoverageMapping() = default;

  // Load coverage records from readers.
  static Error loadFromReaders(
      ArrayRef<std::unique_ptr<CoverageMappingReader>> CoverageReaders,
      IndexedInstrProfReader &ProfileReader, CoverageMapping &Coverage);

  // Load coverage records from file.
  static Error
  loadFromFile(StringRef Filename, StringRef Arch, StringRef CompilationDir,
               IndexedInstrProfReader &ProfileReader, CoverageMapping &Coverage,
               bool &DataFound,
               SmallVectorImpl<object::BuildID> *FoundBinaryIDs = nullptr);

  /// Add a function record corresponding to \p Record.
  Error loadFunctionRecord(const CoverageMappingRecord &Record,
                           IndexedInstrProfReader &ProfileReader);

  /// Look up the indices for function records which are at least partially
  /// defined in the specified file. This is guaranteed to return a superset of
  /// such records: extra records not in the file may be included if there is
  /// a hash collision on the filename. Clients must be robust to collisions.
  ArrayRef<unsigned>
  getImpreciseRecordIndicesForFilename(StringRef Filename) const;

public:
  CoverageMapping(const CoverageMapping &) = delete;
  CoverageMapping &operator=(const CoverageMapping &) = delete;

  /// Load the coverage mapping using the given readers.
  static Expected<std::unique_ptr<CoverageMapping>>
  load(ArrayRef<std::unique_ptr<CoverageMappingReader>> CoverageReaders,
       IndexedInstrProfReader &ProfileReader);

  /// Load the coverage mapping from the given object files and profile. If
  /// \p Arches is non-empty, it must specify an architecture for each object.
  /// Ignores non-instrumented object files unless all are not instrumented.
  static Expected<std::unique_ptr<CoverageMapping>>
  load(ArrayRef<StringRef> ObjectFilenames, StringRef ProfileFilename,
       vfs::FileSystem &FS, ArrayRef<StringRef> Arches = std::nullopt,
       StringRef CompilationDir = "",
       const object::BuildIDFetcher *BIDFetcher = nullptr,
       bool CheckBinaryIDs = false);

  /// The number of functions that couldn't have their profiles mapped.
  ///
  /// This is a count of functions whose profile is out of date or otherwise
  /// can't be associated with any coverage information.
  unsigned getMismatchedCount() const { return FuncHashMismatches.size(); }

  /// A hash mismatch occurs when a profile record for a symbol does not have
  /// the same hash as a coverage mapping record for the same symbol. This
  /// returns a list of hash mismatches, where each mismatch is a pair of the
  /// symbol name and its coverage mapping hash.
  ArrayRef<std::pair<std::string, uint64_t>> getHashMismatches() const {
    return FuncHashMismatches;
  }

  /// Returns a lexicographically sorted, unique list of files that are
  /// covered.
  std::vector<StringRef> getUniqueSourceFiles() const;

  /// Get the coverage for a particular file.
  ///
  /// The given filename must be the name as recorded in the coverage
  /// information. That is, only names returned from getUniqueSourceFiles will
  /// yield a result.
  CoverageData getCoverageForFile(StringRef Filename) const;

  /// Get the coverage for a particular function.
  CoverageData getCoverageForFunction(const FunctionRecord &Function) const;

  /// Get the coverage for an expansion within a coverage set.
  CoverageData getCoverageForExpansion(const ExpansionRecord &Expansion) const;

  /// Gets all of the functions covered by this profile.
  iterator_range<FunctionRecordIterator> getCoveredFunctions() const {
    return make_range(FunctionRecordIterator(Functions),
                      FunctionRecordIterator());
  }

  /// Gets all of the functions in a particular file.
  iterator_range<FunctionRecordIterator>
  getCoveredFunctions(StringRef Filename) const {
    return make_range(FunctionRecordIterator(Functions, Filename),
                      FunctionRecordIterator());
  }

  /// Get the list of function instantiation groups in a particular file.
  ///
  /// Every instantiation group in a program is attributed to exactly one file:
  /// the file in which the definition for the common function begins.
  std::vector<InstantiationGroup>
  getInstantiationGroups(StringRef Filename) const;
};

/// Coverage statistics for a single line.
class LineCoverageStats {
  uint64_t ExecutionCount;
  bool HasMultipleRegions;
  bool Mapped;
  unsigned Line;
  ArrayRef<const CoverageSegment *> LineSegments;
  const CoverageSegment *WrappedSegment;

  friend class LineCoverageIterator;
  LineCoverageStats() = default;

public:
  LineCoverageStats(ArrayRef<const CoverageSegment *> LineSegments,
                    const CoverageSegment *WrappedSegment, unsigned Line);

  uint64_t getExecutionCount() const { return ExecutionCount; }

  bool hasMultipleRegions() const { return HasMultipleRegions; }

  bool isMapped() const { return Mapped; }

  unsigned getLine() const { return Line; }

  ArrayRef<const CoverageSegment *> getLineSegments() const {
    return LineSegments;
  }

  const CoverageSegment *getWrappedSegment() const { return WrappedSegment; }
};

/// An iterator over the \c LineCoverageStats objects for lines described by
/// a \c CoverageData instance.
class LineCoverageIterator
    : public iterator_facade_base<LineCoverageIterator,
                                  std::forward_iterator_tag,
                                  const LineCoverageStats> {
public:
  LineCoverageIterator(const CoverageData &CD)
      : LineCoverageIterator(CD, CD.begin()->Line) {}

  LineCoverageIterator(const CoverageData &CD, unsigned Line)
      : CD(CD), WrappedSegment(nullptr), Next(CD.begin()), Ended(false),
        Line(Line) {
    this->operator++();
  }

  bool operator==(const LineCoverageIterator &R) const {
    return &CD == &R.CD && Next == R.Next && Ended == R.Ended;
  }

  const LineCoverageStats &operator*() const { return Stats; }

  LineCoverageIterator &operator++();

  LineCoverageIterator getEnd() const {
    auto EndIt = *this;
    EndIt.Next = CD.end();
    EndIt.Ended = true;
    return EndIt;
  }

private:
  const CoverageData &CD;
  const CoverageSegment *WrappedSegment;
  std::vector<CoverageSegment>::const_iterator Next;
  bool Ended;
  unsigned Line;
  SmallVector<const CoverageSegment *, 4> Segments;
  LineCoverageStats Stats;
};

/// Get a \c LineCoverageIterator range for the lines described by \p CD.
static inline iterator_range<LineCoverageIterator>
getLineCoverageStats(const coverage::CoverageData &CD) {
  auto Begin = LineCoverageIterator(CD);
  auto End = Begin.getEnd();
  return make_range(Begin, End);
}

// Coverage mappping data (V2) has the following layout:
// IPSK_covmap:
//   [CoverageMapFileHeader]
//   [ArrayStart]
//    [CovMapFunctionRecordV2]
//    [CovMapFunctionRecordV2]
//    ...
//   [ArrayEnd]
//   [Encoded Filenames and Region Mapping Data]
//
// Coverage mappping data (V3) has the following layout:
// IPSK_covmap:
//   [CoverageMapFileHeader]
//   [Encoded Filenames]
// IPSK_covfun:
//   [ArrayStart]
//     odr_name_1: [CovMapFunctionRecordV3]
//     odr_name_2: [CovMapFunctionRecordV3]
//     ...
//   [ArrayEnd]
//
// Both versions of the coverage mapping format encode the same information,
// but the V3 format does so more compactly by taking advantage of linkonce_odr
// semantics (it allows exactly 1 function record per name reference).

/// This namespace defines accessors shared by different versions of coverage
/// mapping records.
namespace accessors {

/// Return the structural hash associated with the function.
template <class FuncRecordTy, support::endianness Endian>
uint64_t getFuncHash(const FuncRecordTy *Record) {
  return support::endian::byte_swap<uint64_t, Endian>(Record->FuncHash);
}

/// Return the coverage map data size for the function.
template <class FuncRecordTy, support::endianness Endian>
uint64_t getDataSize(const FuncRecordTy *Record) {
  return support::endian::byte_swap<uint32_t, Endian>(Record->DataSize);
}

/// Return the function lookup key. The value is considered opaque.
template <class FuncRecordTy, support::endianness Endian>
uint64_t getFuncNameRef(const FuncRecordTy *Record) {
  return support::endian::byte_swap<uint64_t, Endian>(Record->NameRef);
}

/// Return the PGO name of the function. Used for formats in which the name is
/// a hash.
template <class FuncRecordTy, support::endianness Endian>
Error getFuncNameViaRef(const FuncRecordTy *Record,
                        InstrProfSymtab &ProfileNames, StringRef &FuncName) {
  uint64_t NameRef = getFuncNameRef<FuncRecordTy, Endian>(Record);
  FuncName = ProfileNames.getFuncName(NameRef);
  return Error::success();
}

/// Read coverage mapping out-of-line, from \p MappingBuf. This is used when the
/// coverage mapping is attached to the file header, instead of to the function
/// record.
template <class FuncRecordTy, support::endianness Endian>
StringRef getCoverageMappingOutOfLine(const FuncRecordTy *Record,
                                      const char *MappingBuf) {
  return {MappingBuf, size_t(getDataSize<FuncRecordTy, Endian>(Record))};
}

/// Advance to the next out-of-line coverage mapping and its associated
/// function record.
template <class FuncRecordTy, support::endianness Endian>
std::pair<const char *, const FuncRecordTy *>
advanceByOneOutOfLine(const FuncRecordTy *Record, const char *MappingBuf) {
  return {MappingBuf + getDataSize<FuncRecordTy, Endian>(Record), Record + 1};
}

} // end namespace accessors

LLVM_PACKED_START
template <class IntPtrT>
struct CovMapFunctionRecordV1 {
  using ThisT = CovMapFunctionRecordV1<IntPtrT>;

#define COVMAP_V1
#define COVMAP_FUNC_RECORD(Type, LLVMType, Name, Init) Type Name;
#include "llvm/ProfileData/InstrProfData.inc"
#undef COVMAP_V1
  CovMapFunctionRecordV1() = delete;

  template <support::endianness Endian> uint64_t getFuncHash() const {
    return accessors::getFuncHash<ThisT, Endian>(this);
  }

  template <support::endianness Endian> uint64_t getDataSize() const {
    return accessors::getDataSize<ThisT, Endian>(this);
  }

  /// Return function lookup key. The value is consider opaque.
  template <support::endianness Endian> IntPtrT getFuncNameRef() const {
    return support::endian::byte_swap<IntPtrT, Endian>(NamePtr);
  }

  /// Return the PGO name of the function.
  template <support::endianness Endian>
  Error getFuncName(InstrProfSymtab &ProfileNames, StringRef &FuncName) const {
    IntPtrT NameRef = getFuncNameRef<Endian>();
    uint32_t NameS = support::endian::byte_swap<uint32_t, Endian>(NameSize);
    FuncName = ProfileNames.getFuncName(NameRef, NameS);
    if (NameS && FuncName.empty())
      return make_error<CoverageMapError>(coveragemap_error::malformed);
    return Error::success();
  }

  template <support::endianness Endian>
  std::pair<const char *, const ThisT *>
  advanceByOne(const char *MappingBuf) const {
    return accessors::advanceByOneOutOfLine<ThisT, Endian>(this, MappingBuf);
  }

  template <support::endianness Endian> uint64_t getFilenamesRef() const {
    llvm_unreachable("V1 function format does not contain a filenames ref");
  }

  template <support::endianness Endian>
  StringRef getCoverageMapping(const char *MappingBuf) const {
    return accessors::getCoverageMappingOutOfLine<ThisT, Endian>(this,
                                                                 MappingBuf);
  }
};

struct CovMapFunctionRecordV2 {
  using ThisT = CovMapFunctionRecordV2;

#define COVMAP_V2
#define COVMAP_FUNC_RECORD(Type, LLVMType, Name, Init) Type Name;
#include "llvm/ProfileData/InstrProfData.inc"
#undef COVMAP_V2
  CovMapFunctionRecordV2() = delete;

  template <support::endianness Endian> uint64_t getFuncHash() const {
    return accessors::getFuncHash<ThisT, Endian>(this);
  }

  template <support::endianness Endian> uint64_t getDataSize() const {
    return accessors::getDataSize<ThisT, Endian>(this);
  }

  template <support::endianness Endian> uint64_t getFuncNameRef() const {
    return accessors::getFuncNameRef<ThisT, Endian>(this);
  }

  template <support::endianness Endian>
  Error getFuncName(InstrProfSymtab &ProfileNames, StringRef &FuncName) const {
    return accessors::getFuncNameViaRef<ThisT, Endian>(this, ProfileNames,
                                                       FuncName);
  }

  template <support::endianness Endian>
  std::pair<const char *, const ThisT *>
  advanceByOne(const char *MappingBuf) const {
    return accessors::advanceByOneOutOfLine<ThisT, Endian>(this, MappingBuf);
  }

  template <support::endianness Endian> uint64_t getFilenamesRef() const {
    llvm_unreachable("V2 function format does not contain a filenames ref");
  }

  template <support::endianness Endian>
  StringRef getCoverageMapping(const char *MappingBuf) const {
    return accessors::getCoverageMappingOutOfLine<ThisT, Endian>(this,
                                                                 MappingBuf);
  }
};

struct CovMapFunctionRecordV3 {
  using ThisT = CovMapFunctionRecordV3;

#define COVMAP_V3
#define COVMAP_FUNC_RECORD(Type, LLVMType, Name, Init) Type Name;
#include "llvm/ProfileData/InstrProfData.inc"
#undef COVMAP_V3
  CovMapFunctionRecordV3() = delete;

  template <support::endianness Endian> uint64_t getFuncHash() const {
    return accessors::getFuncHash<ThisT, Endian>(this);
  }

  template <support::endianness Endian> uint64_t getDataSize() const {
    return accessors::getDataSize<ThisT, Endian>(this);
  }

  template <support::endianness Endian> uint64_t getFuncNameRef() const {
    return accessors::getFuncNameRef<ThisT, Endian>(this);
  }

  template <support::endianness Endian>
  Error getFuncName(InstrProfSymtab &ProfileNames, StringRef &FuncName) const {
    return accessors::getFuncNameViaRef<ThisT, Endian>(this, ProfileNames,
                                                       FuncName);
  }

  /// Get the filename set reference.
  template <support::endianness Endian> uint64_t getFilenamesRef() const {
    return support::endian::byte_swap<uint64_t, Endian>(FilenamesRef);
  }

  /// Read the inline coverage mapping. Ignore the buffer parameter, it is for
  /// out-of-line coverage mapping data only.
  template <support::endianness Endian>
  StringRef getCoverageMapping(const char *) const {
    return StringRef(&CoverageMapping, getDataSize<Endian>());
  }

  // Advance to the next inline coverage mapping and its associated function
  // record. Ignore the out-of-line coverage mapping buffer.
  template <support::endianness Endian>
  std::pair<const char *, const CovMapFunctionRecordV3 *>
  advanceByOne(const char *) const {
    assert(isAddrAligned(Align(8), this) && "Function record not aligned");
    const char *Next = ((const char *)this) + sizeof(CovMapFunctionRecordV3) -
                       sizeof(char) + getDataSize<Endian>();
    // Each function record has an alignment of 8, so we need to adjust
    // alignment before reading the next record.
    Next += offsetToAlignedAddr(Next, Align(8));
    return {nullptr, reinterpret_cast<const CovMapFunctionRecordV3 *>(Next)};
  }
};

// Per module coverage mapping data header, i.e. CoverageMapFileHeader
// documented above.
struct CovMapHeader {
#define COVMAP_HEADER(Type, LLVMType, Name, Init) Type Name;
#include "llvm/ProfileData/InstrProfData.inc"
  template <support::endianness Endian> uint32_t getNRecords() const {
    return support::endian::byte_swap<uint32_t, Endian>(NRecords);
  }

  template <support::endianness Endian> uint32_t getFilenamesSize() const {
    return support::endian::byte_swap<uint32_t, Endian>(FilenamesSize);
  }

  template <support::endianness Endian> uint32_t getCoverageSize() const {
    return support::endian::byte_swap<uint32_t, Endian>(CoverageSize);
  }

  template <support::endianness Endian> uint32_t getVersion() const {
    return support::endian::byte_swap<uint32_t, Endian>(Version);
  }
};

LLVM_PACKED_END

enum CovMapVersion {
  Version1 = 0,
  // Function's name reference from CovMapFuncRecord is changed from raw
  // name string pointer to MD5 to support name section compression. Name
  // section is also compressed.
  Version2 = 1,
  // A new interpretation of the columnEnd field is added in order to mark
  // regions as gap areas.
  Version3 = 2,
  // Function records are named, uniqued, and moved to a dedicated section.
  Version4 = 3,
  // Branch regions referring to two counters are added
  Version5 = 4,
  // Compilation directory is stored separately and combined with relative
  // filenames to produce an absolute file path.
  Version6 = 5,
  // The current version is Version6.
  CurrentVersion = INSTR_PROF_COVMAP_VERSION
};

template <int CovMapVersion, class IntPtrT> struct CovMapTraits {
  using CovMapFuncRecordType = CovMapFunctionRecordV3;
  using NameRefType = uint64_t;
};

template <class IntPtrT> struct CovMapTraits<CovMapVersion::Version3, IntPtrT> {
  using CovMapFuncRecordType = CovMapFunctionRecordV2;
  using NameRefType = uint64_t;
};

template <class IntPtrT> struct CovMapTraits<CovMapVersion::Version2, IntPtrT> {
  using CovMapFuncRecordType = CovMapFunctionRecordV2;
  using NameRefType = uint64_t;
};

template <class IntPtrT> struct CovMapTraits<CovMapVersion::Version1, IntPtrT> {
  using CovMapFuncRecordType = CovMapFunctionRecordV1<IntPtrT>;
  using NameRefType = IntPtrT;
};

} // end namespace coverage

/// Provide DenseMapInfo for CounterExpression
template<> struct DenseMapInfo<coverage::CounterExpression> {
  static inline coverage::CounterExpression getEmptyKey() {
    using namespace coverage;

    return CounterExpression(CounterExpression::ExprKind::Subtract,
                             Counter::getCounter(~0U),
                             Counter::getCounter(~0U));
  }

  static inline coverage::CounterExpression getTombstoneKey() {
    using namespace coverage;

    return CounterExpression(CounterExpression::ExprKind::Add,
                             Counter::getCounter(~0U),
                             Counter::getCounter(~0U));
  }

  static unsigned getHashValue(const coverage::CounterExpression &V) {
    return static_cast<unsigned>(
        hash_combine(V.Kind, V.LHS.getKind(), V.LHS.getCounterID(),
                     V.RHS.getKind(), V.RHS.getCounterID()));
  }

  static bool isEqual(const coverage::CounterExpression &LHS,
                      const coverage::CounterExpression &RHS) {
    return LHS.Kind == RHS.Kind && LHS.LHS == RHS.LHS && LHS.RHS == RHS.RHS;
  }
};

} // end namespace llvm

#endif // LLVM_PROFILEDATA_COVERAGE_COVERAGEMAPPING_H
PKhwFZ��",ProfileData/Coverage/CoverageMappingWriter.hnu�[���//===- CoverageMappingWriter.h - Code coverage mapping writer ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains support for writing coverage mapping data for
// instrumentation based coverage.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_PROFILEDATA_COVERAGE_COVERAGEMAPPINGWRITER_H
#define LLVM_PROFILEDATA_COVERAGE_COVERAGEMAPPINGWRITER_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ProfileData/Coverage/CoverageMapping.h"

namespace llvm {

class raw_ostream;

namespace coverage {

/// Writer of the filenames section for the instrumentation
/// based code coverage.
class CoverageFilenamesSectionWriter {
  ArrayRef<std::string> Filenames;

public:
  CoverageFilenamesSectionWriter(ArrayRef<std::string> Filenames);

  /// Write encoded filenames to the given output stream. If \p Compress is
  /// true, attempt to compress the filenames.
  void write(raw_ostream &OS, bool Compress = true);
};

/// Writer for instrumentation based coverage mapping data.
class CoverageMappingWriter {
  ArrayRef<unsigned> VirtualFileMapping;
  ArrayRef<CounterExpression> Expressions;
  MutableArrayRef<CounterMappingRegion> MappingRegions;

public:
  CoverageMappingWriter(ArrayRef<unsigned> VirtualFileMapping,
                        ArrayRef<CounterExpression> Expressions,
                        MutableArrayRef<CounterMappingRegion> MappingRegions)
      : VirtualFileMapping(VirtualFileMapping), Expressions(Expressions),
        MappingRegions(MappingRegions) {}

  /// Write encoded coverage mapping data to the given output stream.
  void write(raw_ostream &OS);
};

} // end namespace coverage

} // end namespace llvm

#endif // LLVM_PROFILEDATA_COVERAGE_COVERAGEMAPPINGWRITER_H
PKhwFZ.W�ZC
C
ProfileData/ProfileCommon.hnu�[���//===- ProfileCommon.h - Common profiling APIs. -----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains data structures and functions common to both instrumented
// and sample profiling.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_PROFILEDATA_PROFILECOMMON_H
#define LLVM_PROFILEDATA_PROFILECOMMON_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/IR/ProfileSummary.h"
#include "llvm/ProfileData/InstrProf.h"
#include "llvm/ProfileData/SampleProf.h"
#include "llvm/Support/Error.h"
#include <algorithm>
#include <cstdint>
#include <functional>
#include <map>
#include <memory>
#include <vector>

namespace llvm {

namespace sampleprof {

class FunctionSamples;

} // end namespace sampleprof

class ProfileSummaryBuilder {
private:
  /// We keep track of the number of times a count (block count or samples)
  /// appears in the profile. The map is kept sorted in the descending order of
  /// counts.
  std::map<uint64_t, uint32_t, std::greater<uint64_t>> CountFrequencies;
  std::vector<uint32_t> DetailedSummaryCutoffs;

protected:
  SummaryEntryVector DetailedSummary;
  uint64_t TotalCount = 0;
  uint64_t MaxCount = 0;
  uint64_t MaxFunctionCount = 0;
  uint32_t NumCounts = 0;
  uint32_t NumFunctions = 0;

  ProfileSummaryBuilder(std::vector<uint32_t> Cutoffs)
      : DetailedSummaryCutoffs(std::move(Cutoffs)) {}
  ~ProfileSummaryBuilder() = default;

  inline void addCount(uint64_t Count);
  void computeDetailedSummary();

public:
  /// A vector of useful cutoff values for detailed summary.
  static const ArrayRef<uint32_t> DefaultCutoffs;

  /// Find the summary entry for a desired percentile of counts.
  static const ProfileSummaryEntry &
  getEntryForPercentile(const SummaryEntryVector &DS, uint64_t Percentile);
  static uint64_t getHotCountThreshold(const SummaryEntryVector &DS);
  static uint64_t getColdCountThreshold(const SummaryEntryVector &DS);
};

class InstrProfSummaryBuilder final : public ProfileSummaryBuilder {
  uint64_t MaxInternalBlockCount = 0;

  inline void addEntryCount(uint64_t Count);
  inline void addInternalCount(uint64_t Count);

public:
  InstrProfSummaryBuilder(std::vector<uint32_t> Cutoffs)
      : ProfileSummaryBuilder(std::move(Cutoffs)) {}

  void addRecord(const InstrProfRecord &);
  std::unique_ptr<ProfileSummary> getSummary();
};

class SampleProfileSummaryBuilder final : public ProfileSummaryBuilder {
public:
  SampleProfileSummaryBuilder(std::vector<uint32_t> Cutoffs)
      : ProfileSummaryBuilder(std::move(Cutoffs)) {}

  void addRecord(const sampleprof::FunctionSamples &FS,
                 bool isCallsiteSample = false);
  std::unique_ptr<ProfileSummary>
  computeSummaryForProfiles(const sampleprof::SampleProfileMap &Profiles);
  std::unique_ptr<ProfileSummary> getSummary();
};

/// This is called when a count is seen in the profile.
void ProfileSummaryBuilder::addCount(uint64_t Count) {
  TotalCount += Count;
  if (Count > MaxCount)
    MaxCount = Count;
  NumCounts++;
  CountFrequencies[Count]++;
}

} // end namespace llvm

#endif // LLVM_PROFILEDATA_PROFILECOMMON_H
PKhwFZ��%�3�3�ProfileData/SampleProfReader.hnu�[���//===- SampleProfReader.h - Read LLVM sample profile data -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains definitions needed for reading sample profiles.
//
// NOTE: If you are making changes to this file format, please remember
//       to document them in the Clang documentation at
//       tools/clang/docs/UsersManual.rst.
//
// Text format
// -----------
//
// Sample profiles are written as ASCII text. The file is divided into
// sections, which correspond to each of the functions executed at runtime.
// Each section has the following format
//
//     function1:total_samples:total_head_samples
//      offset1[.discriminator]: number_of_samples [fn1:num fn2:num ... ]
//      offset2[.discriminator]: number_of_samples [fn3:num fn4:num ... ]
//      ...
//      offsetN[.discriminator]: number_of_samples [fn5:num fn6:num ... ]
//      offsetA[.discriminator]: fnA:num_of_total_samples
//       offsetA1[.discriminator]: number_of_samples [fn7:num fn8:num ... ]
//       ...
//      !CFGChecksum: num
//      !Attribute: flags
//
// This is a nested tree in which the indentation represents the nesting level
// of the inline stack. There are no blank lines in the file. And the spacing
// within a single line is fixed. Additional spaces will result in an error
// while reading the file.
//
// Any line starting with the '#' character is completely ignored.
//
// Inlined calls are represented with indentation. The Inline stack is a
// stack of source locations in which the top of the stack represents the
// leaf function, and the bottom of the stack represents the actual
// symbol to which the instruction belongs.
//
// Function names must be mangled in order for the profile loader to
// match them in the current translation unit. The two numbers in the
// function header specify how many total samples were accumulated in the
// function (first number), and the total number of samples accumulated
// in the prologue of the function (second number). This head sample
// count provides an indicator of how frequently the function is invoked.
//
// There are three types of lines in the function body.
//
// * Sampled line represents the profile information of a source location.
// * Callsite line represents the profile information of a callsite.
// * Metadata line represents extra metadata of the function.
//
// Each sampled line may contain several items. Some are optional (marked
// below):
//
// a. Source line offset. This number represents the line number
//    in the function where the sample was collected. The line number is
//    always relative to the line where symbol of the function is
//    defined. So, if the function has its header at line 280, the offset
//    13 is at line 293 in the file.
//
//    Note that this offset should never be a negative number. This could
//    happen in cases like macros. The debug machinery will register the
//    line number at the point of macro expansion. So, if the macro was
//    expanded in a line before the start of the function, the profile
//    converter should emit a 0 as the offset (this means that the optimizers
//    will not be able to associate a meaningful weight to the instructions
//    in the macro).
//
// b. [OPTIONAL] Discriminator. This is used if the sampled program
//    was compiled with DWARF discriminator support
//    (http://wiki.dwarfstd.org/index.php?title=Path_Discriminators).
//    DWARF discriminators are unsigned integer values that allow the
//    compiler to distinguish between multiple execution paths on the
//    same source line location.
//
//    For example, consider the line of code ``if (cond) foo(); else bar();``.
//    If the predicate ``cond`` is true 80% of the time, then the edge
//    into function ``foo`` should be considered to be taken most of the
//    time. But both calls to ``foo`` and ``bar`` are at the same source
//    line, so a sample count at that line is not sufficient. The
//    compiler needs to know which part of that line is taken more
//    frequently.
//
//    This is what discriminators provide. In this case, the calls to
//    ``foo`` and ``bar`` will be at the same line, but will have
//    different discriminator values. This allows the compiler to correctly
//    set edge weights into ``foo`` and ``bar``.
//
// c. Number of samples. This is an integer quantity representing the
//    number of samples collected by the profiler at this source
//    location.
//
// d. [OPTIONAL] Potential call targets and samples. If present, this
//    line contains a call instruction. This models both direct and
//    number of samples. For example,
//
//      130: 7  foo:3  bar:2  baz:7
//
//    The above means that at relative line offset 130 there is a call
//    instruction that calls one of ``foo()``, ``bar()`` and ``baz()``,
//    with ``baz()`` being the relatively more frequently called target.
//
// Each callsite line may contain several items. Some are optional.
//
// a. Source line offset. This number represents the line number of the
//    callsite that is inlined in the profiled binary.
//
// b. [OPTIONAL] Discriminator. Same as the discriminator for sampled line.
//
// c. Number of samples. This is an integer quantity representing the
//    total number of samples collected for the inlined instance at this
//    callsite
//
// Metadata line can occur in lines with one indent only, containing extra
// information for the top-level function. Furthermore, metadata can only
// occur after all the body samples and callsite samples.
// Each metadata line may contain a particular type of metadata, marked by
// the starting characters annotated with !. We process each metadata line
// independently, hence each metadata line has to form an independent piece
// of information that does not require cross-line reference.
// We support the following types of metadata:
//
// a. CFG Checksum (a.k.a. function hash):
//   !CFGChecksum: 12345
// b. CFG Checksum (see ContextAttributeMask):
//   !Atribute: 1
//
//
// Binary format
// -------------
//
// This is a more compact encoding. Numbers are encoded as ULEB128 values
// and all strings are encoded in a name table. The file is organized in
// the following sections:
//
// MAGIC (uint64_t)
//    File identifier computed by function SPMagic() (0x5350524f463432ff)
//
// VERSION (uint32_t)
//    File format version number computed by SPVersion()
//
// SUMMARY
//    TOTAL_COUNT (uint64_t)
//        Total number of samples in the profile.
//    MAX_COUNT (uint64_t)
//        Maximum value of samples on a line.
//    MAX_FUNCTION_COUNT (uint64_t)
//        Maximum number of samples at function entry (head samples).
//    NUM_COUNTS (uint64_t)
//        Number of lines with samples.
//    NUM_FUNCTIONS (uint64_t)
//        Number of functions with samples.
//    NUM_DETAILED_SUMMARY_ENTRIES (size_t)
//        Number of entries in detailed summary
//    DETAILED_SUMMARY
//        A list of detailed summary entry. Each entry consists of
//        CUTOFF (uint32_t)
//            Required percentile of total sample count expressed as a fraction
//            multiplied by 1000000.
//        MIN_COUNT (uint64_t)
//            The minimum number of samples required to reach the target
//            CUTOFF.
//        NUM_COUNTS (uint64_t)
//            Number of samples to get to the desrired percentile.
//
// NAME TABLE
//    SIZE (uint64_t)
//        Number of entries in the name table.
//    NAMES
//        A NUL-separated list of SIZE strings.
//
// FUNCTION BODY (one for each uninlined function body present in the profile)
//    HEAD_SAMPLES (uint64_t) [only for top-level functions]
//        Total number of samples collected at the head (prologue) of the
//        function.
//        NOTE: This field should only be present for top-level functions
//              (i.e., not inlined into any caller). Inlined function calls
//              have no prologue, so they don't need this.
//    NAME_IDX (uint64_t)
//        Index into the name table indicating the function name.
//    SAMPLES (uint64_t)
//        Total number of samples collected in this function.
//    NRECS (uint32_t)
//        Total number of sampling records this function's profile.
//    BODY RECORDS
//        A list of NRECS entries. Each entry contains:
//          OFFSET (uint32_t)
//            Line offset from the start of the function.
//          DISCRIMINATOR (uint32_t)
//            Discriminator value (see description of discriminators
//            in the text format documentation above).
//          SAMPLES (uint64_t)
//            Number of samples collected at this location.
//          NUM_CALLS (uint32_t)
//            Number of non-inlined function calls made at this location. In the
//            case of direct calls, this number will always be 1. For indirect
//            calls (virtual functions and function pointers) this will
//            represent all the actual functions called at runtime.
//          CALL_TARGETS
//            A list of NUM_CALLS entries for each called function:
//               NAME_IDX (uint64_t)
//                  Index into the name table with the callee name.
//               SAMPLES (uint64_t)
//                  Number of samples collected at the call site.
//    NUM_INLINED_FUNCTIONS (uint32_t)
//      Number of callees inlined into this function.
//    INLINED FUNCTION RECORDS
//      A list of NUM_INLINED_FUNCTIONS entries describing each of the inlined
//      callees.
//        OFFSET (uint32_t)
//          Line offset from the start of the function.
//        DISCRIMINATOR (uint32_t)
//          Discriminator value (see description of discriminators
//          in the text format documentation above).
//        FUNCTION BODY
//          A FUNCTION BODY entry describing the inlined function.
//===----------------------------------------------------------------------===//

#ifndef LLVM_PROFILEDATA_SAMPLEPROFREADER_H
#define LLVM_PROFILEDATA_SAMPLEPROFREADER_H

#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/ProfileSummary.h"
#include "llvm/ProfileData/GCOV.h"
#include "llvm/ProfileData/SampleProf.h"
#include "llvm/ProfileData/SymbolRemappingReader.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/Discriminator.h"
#include "llvm/Support/ErrorOr.h"
#include "llvm/Support/MemoryBuffer.h"
#include <cstdint>
#include <list>
#include <memory>
#include <optional>
#include <string>
#include <system_error>
#include <unordered_set>
#include <vector>

namespace llvm {

class raw_ostream;
class Twine;

namespace vfs {
class FileSystem;
} // namespace vfs

namespace sampleprof {

class SampleProfileReader;

/// SampleProfileReaderItaniumRemapper remaps the profile data from a
/// sample profile data reader, by applying a provided set of equivalences
/// between components of the symbol names in the profile.
class SampleProfileReaderItaniumRemapper {
public:
  SampleProfileReaderItaniumRemapper(std::unique_ptr<MemoryBuffer> B,
                                     std::unique_ptr<SymbolRemappingReader> SRR,
                                     SampleProfileReader &R)
      : Buffer(std::move(B)), Remappings(std::move(SRR)), Reader(R) {
    assert(Remappings && "Remappings cannot be nullptr");
  }

  /// Create a remapper from the given remapping file. The remapper will
  /// be used for profile read in by Reader.
  static ErrorOr<std::unique_ptr<SampleProfileReaderItaniumRemapper>>
  create(const std::string Filename, vfs::FileSystem &FS,
         SampleProfileReader &Reader, LLVMContext &C);

  /// Create a remapper from the given Buffer. The remapper will
  /// be used for profile read in by Reader.
  static ErrorOr<std::unique_ptr<SampleProfileReaderItaniumRemapper>>
  create(std::unique_ptr<MemoryBuffer> &B, SampleProfileReader &Reader,
         LLVMContext &C);

  /// Apply remappings to the profile read by Reader.
  void applyRemapping(LLVMContext &Ctx);

  bool hasApplied() { return RemappingApplied; }

  /// Insert function name into remapper.
  void insert(StringRef FunctionName) { Remappings->insert(FunctionName); }

  /// Query whether there is equivalent in the remapper which has been
  /// inserted.
  bool exist(StringRef FunctionName) {
    return Remappings->lookup(FunctionName);
  }

  /// Return the equivalent name in the profile for \p FunctionName if
  /// it exists.
  std::optional<StringRef> lookUpNameInProfile(StringRef FunctionName);

private:
  // The buffer holding the content read from remapping file.
  std::unique_ptr<MemoryBuffer> Buffer;
  std::unique_ptr<SymbolRemappingReader> Remappings;
  // Map remapping key to the name in the profile. By looking up the
  // key in the remapper, a given new name can be mapped to the
  // cannonical name using the NameMap.
  DenseMap<SymbolRemappingReader::Key, StringRef> NameMap;
  // The Reader the remapper is servicing.
  SampleProfileReader &Reader;
  // Indicate whether remapping has been applied to the profile read
  // by Reader -- by calling applyRemapping.
  bool RemappingApplied = false;
};

/// Sample-based profile reader.
///
/// Each profile contains sample counts for all the functions
/// executed. Inside each function, statements are annotated with the
/// collected samples on all the instructions associated with that
/// statement.
///
/// For this to produce meaningful data, the program needs to be
/// compiled with some debug information (at minimum, line numbers:
/// -gline-tables-only). Otherwise, it will be impossible to match IR
/// instructions to the line numbers collected by the profiler.
///
/// From the profile file, we are interested in collecting the
/// following information:
///
/// * A list of functions included in the profile (mangled names).
///
/// * For each function F:
///   1. The total number of samples collected in F.
///
///   2. The samples collected at each line in F. To provide some
///      protection against source code shuffling, line numbers should
///      be relative to the start of the function.
///
/// The reader supports two file formats: text and binary. The text format
/// is useful for debugging and testing, while the binary format is more
/// compact and I/O efficient. They can both be used interchangeably.
class SampleProfileReader {
public:
  SampleProfileReader(std::unique_ptr<MemoryBuffer> B, LLVMContext &C,
                      SampleProfileFormat Format = SPF_None)
      : Profiles(0), Ctx(C), Buffer(std::move(B)), Format(Format) {}

  virtual ~SampleProfileReader() = default;

  /// Read and validate the file header.
  virtual std::error_code readHeader() = 0;

  /// Set the bits for FS discriminators. Parameter Pass specify the sequence
  /// number, Pass == i is for the i-th round of adding FS discriminators.
  /// Pass == 0 is for using base discriminators.
  void setDiscriminatorMaskedBitFrom(FSDiscriminatorPass P) {
    MaskedBitFrom = getFSPassBitEnd(P);
  }

  /// Get the bitmask the discriminators: For FS profiles, return the bit
  /// mask for this pass. For non FS profiles, return (unsigned) -1.
  uint32_t getDiscriminatorMask() const {
    if (!ProfileIsFS)
      return 0xFFFFFFFF;
    assert((MaskedBitFrom != 0) && "MaskedBitFrom is not set properly");
    return getN1Bits(MaskedBitFrom);
  }

  /// The interface to read sample profiles from the associated file.
  std::error_code read() {
    if (std::error_code EC = readImpl())
      return EC;
    if (Remapper)
      Remapper->applyRemapping(Ctx);
    FunctionSamples::UseMD5 = useMD5();
    return sampleprof_error::success;
  }

  /// The implementaion to read sample profiles from the associated file.
  virtual std::error_code readImpl() = 0;

  /// Print the profile for \p FContext on stream \p OS.
  void dumpFunctionProfile(SampleContext FContext, raw_ostream &OS = dbgs());

  /// Collect functions with definitions in Module M. For reader which
  /// support loading function profiles on demand, return true when the
  /// reader has been given a module. Always return false for reader
  /// which doesn't support loading function profiles on demand.
  virtual bool collectFuncsFromModule() { return false; }

  /// Print all the profiles on stream \p OS.
  void dump(raw_ostream &OS = dbgs());

  /// Print all the profiles on stream \p OS in the JSON format.
  void dumpJson(raw_ostream &OS = dbgs());

  /// Return the samples collected for function \p F.
  FunctionSamples *getSamplesFor(const Function &F) {
    // The function name may have been updated by adding suffix. Call
    // a helper to (optionally) strip off suffixes so that we can
    // match against the original function name in the profile.
    StringRef CanonName = FunctionSamples::getCanonicalFnName(F);
    return getSamplesFor(CanonName);
  }

  /// Return the samples collected for function \p F, create empty
  /// FunctionSamples if it doesn't exist.
  FunctionSamples *getOrCreateSamplesFor(const Function &F) {
    std::string FGUID;
    StringRef CanonName = FunctionSamples::getCanonicalFnName(F);
    CanonName = getRepInFormat(CanonName, useMD5(), FGUID);
    auto It = Profiles.find(CanonName);
    if (It != Profiles.end())
      return &It->second;
    if (!FGUID.empty()) {
      assert(useMD5() && "New name should only be generated for md5 profile");
      CanonName = *MD5NameBuffer.insert(FGUID).first;
    }
    return &Profiles[CanonName];
  }

  /// Return the samples collected for function \p F.
  virtual FunctionSamples *getSamplesFor(StringRef Fname) {
    std::string FGUID;
    Fname = getRepInFormat(Fname, useMD5(), FGUID);
    auto It = Profiles.find(Fname);
    if (It != Profiles.end())
      return &It->second;

    if (Remapper) {
      if (auto NameInProfile = Remapper->lookUpNameInProfile(Fname)) {
        auto It = Profiles.find(*NameInProfile);
        if (It != Profiles.end())
          return &It->second;
      }
    }
    return nullptr;
  }

  /// Return all the profiles.
  SampleProfileMap &getProfiles() { return Profiles; }

  /// Report a parse error message.
  void reportError(int64_t LineNumber, const Twine &Msg) const {
    Ctx.diagnose(DiagnosticInfoSampleProfile(Buffer->getBufferIdentifier(),
                                             LineNumber, Msg));
  }

  /// Create a sample profile reader appropriate to the file format.
  /// Create a remapper underlying if RemapFilename is not empty.
  /// Parameter P specifies the FSDiscriminatorPass.
  static ErrorOr<std::unique_ptr<SampleProfileReader>>
  create(const std::string Filename, LLVMContext &C, vfs::FileSystem &FS,
         FSDiscriminatorPass P = FSDiscriminatorPass::Base,
         const std::string RemapFilename = "");

  /// Create a sample profile reader from the supplied memory buffer.
  /// Create a remapper underlying if RemapFilename is not empty.
  /// Parameter P specifies the FSDiscriminatorPass.
  static ErrorOr<std::unique_ptr<SampleProfileReader>>
  create(std::unique_ptr<MemoryBuffer> &B, LLVMContext &C, vfs::FileSystem &FS,
         FSDiscriminatorPass P = FSDiscriminatorPass::Base,
         const std::string RemapFilename = "");

  /// Return the profile summary.
  ProfileSummary &getSummary() const { return *(Summary.get()); }

  MemoryBuffer *getBuffer() const { return Buffer.get(); }

  /// \brief Return the profile format.
  SampleProfileFormat getFormat() const { return Format; }

  /// Whether input profile is based on pseudo probes.
  bool profileIsProbeBased() const { return ProfileIsProbeBased; }

  /// Whether input profile is fully context-sensitive.
  bool profileIsCS() const { return ProfileIsCS; }

  /// Whether input profile contains ShouldBeInlined contexts.
  bool profileIsPreInlined() const { return ProfileIsPreInlined; }

  /// Whether input profile is flow-sensitive.
  bool profileIsFS() const { return ProfileIsFS; }

  virtual std::unique_ptr<ProfileSymbolList> getProfileSymbolList() {
    return nullptr;
  };

  /// It includes all the names that have samples either in outline instance
  /// or inline instance.
  virtual std::vector<StringRef> *getNameTable() { return nullptr; }
  virtual bool dumpSectionInfo(raw_ostream &OS = dbgs()) { return false; };

  /// Return whether names in the profile are all MD5 numbers.
  bool useMD5() const { return ProfileIsMD5; }

  /// Force the profile to use MD5 in Sample contexts, even if function names
  /// are present.
  virtual void setProfileUseMD5() { ProfileIsMD5 = true; }

  /// Don't read profile without context if the flag is set. This is only meaningful
  /// for ExtBinary format.
  virtual void setSkipFlatProf(bool Skip) {}
  /// Return whether any name in the profile contains ".__uniq." suffix.
  virtual bool hasUniqSuffix() { return false; }

  SampleProfileReaderItaniumRemapper *getRemapper() { return Remapper.get(); }

  void setModule(const Module *Mod) { M = Mod; }

protected:
  /// Map every function to its associated profile.
  ///
  /// The profile of every function executed at runtime is collected
  /// in the structure FunctionSamples. This maps function objects
  /// to their corresponding profiles.
  SampleProfileMap Profiles;

  /// LLVM context used to emit diagnostics.
  LLVMContext &Ctx;

  /// Memory buffer holding the profile file.
  std::unique_ptr<MemoryBuffer> Buffer;

  /// Extra name buffer holding names created on demand.
  /// This should only be needed for md5 profiles.
  std::unordered_set<std::string> MD5NameBuffer;

  /// Profile summary information.
  std::unique_ptr<ProfileSummary> Summary;

  /// Take ownership of the summary of this reader.
  static std::unique_ptr<ProfileSummary>
  takeSummary(SampleProfileReader &Reader) {
    return std::move(Reader.Summary);
  }

  /// Compute summary for this profile.
  void computeSummary();

  std::unique_ptr<SampleProfileReaderItaniumRemapper> Remapper;

  /// \brief Whether samples are collected based on pseudo probes.
  bool ProfileIsProbeBased = false;

  /// Whether function profiles are context-sensitive flat profiles.
  bool ProfileIsCS = false;

  /// Whether function profile contains ShouldBeInlined contexts.
  bool ProfileIsPreInlined = false;

  /// Number of context-sensitive profiles.
  uint32_t CSProfileCount = 0;

  /// Whether the function profiles use FS discriminators.
  bool ProfileIsFS = false;

  /// \brief The format of sample.
  SampleProfileFormat Format = SPF_None;

  /// \brief The current module being compiled if SampleProfileReader
  /// is used by compiler. If SampleProfileReader is used by other
  /// tools which are not compiler, M is usually nullptr.
  const Module *M = nullptr;

  /// Zero out the discriminator bits higher than bit MaskedBitFrom (0 based).
  /// The default is to keep all the bits.
  uint32_t MaskedBitFrom = 31;

  /// Whether the profile uses MD5 for Sample Contexts and function names. This
  /// can be one-way overriden by the user to force use MD5.
  bool ProfileIsMD5 = false;
};

class SampleProfileReaderText : public SampleProfileReader {
public:
  SampleProfileReaderText(std::unique_ptr<MemoryBuffer> B, LLVMContext &C)
      : SampleProfileReader(std::move(B), C, SPF_Text) {}

  /// Read and validate the file header.
  std::error_code readHeader() override { return sampleprof_error::success; }

  /// Read sample profiles from the associated file.
  std::error_code readImpl() override;

  /// Return true if \p Buffer is in the format supported by this class.
  static bool hasFormat(const MemoryBuffer &Buffer);

  /// Text format sample profile does not support MD5 for now.
  void setProfileUseMD5() override {}

private:
  /// CSNameTable is used to save full context vectors. This serves as an
  /// underlying immutable buffer for all clients.
  std::list<SampleContextFrameVector> CSNameTable;
};

class SampleProfileReaderBinary : public SampleProfileReader {
public:
  SampleProfileReaderBinary(std::unique_ptr<MemoryBuffer> B, LLVMContext &C,
                            SampleProfileFormat Format = SPF_None)
      : SampleProfileReader(std::move(B), C, Format) {}

  /// Read and validate the file header.
  std::error_code readHeader() override;

  /// Read sample profiles from the associated file.
  std::error_code readImpl() override;

  /// It includes all the names that have samples either in outline instance
  /// or inline instance.
  std::vector<StringRef> *getNameTable() override { return &NameTable; }

protected:
  /// Read a numeric value of type T from the profile.
  ///
  /// If an error occurs during decoding, a diagnostic message is emitted and
  /// EC is set.
  ///
  /// \returns the read value.
  template <typename T> ErrorOr<T> readNumber();

  /// Read a numeric value of type T from the profile. The value is saved
  /// without encoded.
  template <typename T> ErrorOr<T> readUnencodedNumber();

  /// Read a string from the profile.
  ///
  /// If an error occurs during decoding, a diagnostic message is emitted and
  /// EC is set.
  ///
  /// \returns the read value.
  ErrorOr<StringRef> readString();

  /// Read the string index and check whether it overflows the table.
  template <typename T> inline ErrorOr<size_t> readStringIndex(T &Table);

  /// Read the next function profile instance.
  std::error_code readFuncProfile(const uint8_t *Start);

  /// Read the contents of the given profile instance.
  std::error_code readProfile(FunctionSamples &FProfile);

  /// Read the contents of Magic number and Version number.
  std::error_code readMagicIdent();

  /// Read profile summary.
  std::error_code readSummary();

  /// Read the whole name table.
  std::error_code readNameTable();

  /// Read a string indirectly via the name table.
  ErrorOr<StringRef> readStringFromTable();

  /// Read a context indirectly via the CSNameTable.
  ErrorOr<SampleContextFrames> readContextFromTable();

  /// Read a context indirectly via the CSNameTable if the profile has context,
  /// otherwise same as readStringFromTable.
  ErrorOr<SampleContext> readSampleContextFromTable();

  /// Points to the current location in the buffer.
  const uint8_t *Data = nullptr;

  /// Points to the end of the buffer.
  const uint8_t *End = nullptr;

  /// Function name table.
  std::vector<StringRef> NameTable;

  /// If MD5 is used in NameTable section, the section saves uint64_t data.
  /// The uint64_t data has to be converted to a string and then the string
  /// will be used to initialize StringRef in NameTable.
  /// Note NameTable contains StringRef so it needs another buffer to own
  /// the string data. MD5StringBuf serves as the string buffer that is
  /// referenced by NameTable (vector of StringRef). We make sure
  /// the lifetime of MD5StringBuf is not shorter than that of NameTable.
  std::vector<std::string> MD5StringBuf;

  /// The starting address of NameTable containing fixed length MD5.
  const uint8_t *MD5NameMemStart = nullptr;

  /// CSNameTable is used to save full context vectors. It is the backing buffer
  /// for SampleContextFrames.
  std::vector<SampleContextFrameVector> CSNameTable;

private:
  std::error_code readSummaryEntry(std::vector<ProfileSummaryEntry> &Entries);
  virtual std::error_code verifySPMagic(uint64_t Magic) = 0;
};

class SampleProfileReaderRawBinary : public SampleProfileReaderBinary {
private:
  std::error_code verifySPMagic(uint64_t Magic) override;

public:
  SampleProfileReaderRawBinary(std::unique_ptr<MemoryBuffer> B, LLVMContext &C,
                               SampleProfileFormat Format = SPF_Binary)
      : SampleProfileReaderBinary(std::move(B), C, Format) {}

  /// \brief Return true if \p Buffer is in the format supported by this class.
  static bool hasFormat(const MemoryBuffer &Buffer);
};

/// SampleProfileReaderExtBinaryBase/SampleProfileWriterExtBinaryBase defines
/// the basic structure of the extensible binary format.
/// The format is organized in sections except the magic and version number
/// at the beginning. There is a section table before all the sections, and
/// each entry in the table describes the entry type, start, size and
/// attributes. The format in each section is defined by the section itself.
///
/// It is easy to add a new section while maintaining the backward
/// compatibility of the profile. Nothing extra needs to be done. If we want
/// to extend an existing section, like add cache misses information in
/// addition to the sample count in the profile body, we can add a new section
/// with the extension and retire the existing section, and we could choose
/// to keep the parser of the old section if we want the reader to be able
/// to read both new and old format profile.
///
/// SampleProfileReaderExtBinary/SampleProfileWriterExtBinary define the
/// commonly used sections of a profile in extensible binary format. It is
/// possible to define other types of profile inherited from
/// SampleProfileReaderExtBinaryBase/SampleProfileWriterExtBinaryBase.
class SampleProfileReaderExtBinaryBase : public SampleProfileReaderBinary {
private:
  std::error_code decompressSection(const uint8_t *SecStart,
                                    const uint64_t SecSize,
                                    const uint8_t *&DecompressBuf,
                                    uint64_t &DecompressBufSize);

  BumpPtrAllocator Allocator;

protected:
  std::vector<SecHdrTableEntry> SecHdrTable;
  std::error_code readSecHdrTableEntry(uint64_t Idx);
  std::error_code readSecHdrTable();

  std::error_code readFuncMetadata(bool ProfileHasAttribute);
  std::error_code readFuncMetadata(bool ProfileHasAttribute,
                                   FunctionSamples *FProfile);
  std::error_code readFuncOffsetTable();
  std::error_code readFuncProfiles();
  std::error_code readNameTableSec(bool IsMD5, bool FixedLengthMD5);
  std::error_code readCSNameTableSec();
  std::error_code readProfileSymbolList();

  std::error_code readHeader() override;
  std::error_code verifySPMagic(uint64_t Magic) override = 0;
  virtual std::error_code readOneSection(const uint8_t *Start, uint64_t Size,
                                         const SecHdrTableEntry &Entry);
  // placeholder for subclasses to dispatch their own section readers.
  virtual std::error_code readCustomSection(const SecHdrTableEntry &Entry) = 0;

  /// Determine which container readFuncOffsetTable() should populate, the list
  /// FuncOffsetList or the map FuncOffsetTable.
  bool useFuncOffsetList() const;

  std::unique_ptr<ProfileSymbolList> ProfSymList;

  /// The table mapping from function context to the offset of its
  /// FunctionSample towards file start.
  /// At most one of FuncOffsetTable and FuncOffsetList is populated.
  DenseMap<SampleContext, uint64_t> FuncOffsetTable;

  /// The list version of FuncOffsetTable. This is used if every entry is
  /// being accessed.
  std::vector<std::pair<SampleContext, uint64_t>> FuncOffsetList;

  /// The set containing the functions to use when compiling a module.
  DenseSet<StringRef> FuncsToUse;

  /// If SkipFlatProf is true, skip the sections with
  /// SecFlagFlat flag.
  bool SkipFlatProf = false;

public:
  SampleProfileReaderExtBinaryBase(std::unique_ptr<MemoryBuffer> B,
                                   LLVMContext &C, SampleProfileFormat Format)
      : SampleProfileReaderBinary(std::move(B), C, Format) {}

  /// Read sample profiles in extensible format from the associated file.
  std::error_code readImpl() override;

  /// Get the total size of all \p Type sections.
  uint64_t getSectionSize(SecType Type);
  /// Get the total size of header and all sections.
  uint64_t getFileSize();
  bool dumpSectionInfo(raw_ostream &OS = dbgs()) override;

  /// Collect functions with definitions in Module M. Return true if
  /// the reader has been given a module.
  bool collectFuncsFromModule() override;

  std::unique_ptr<ProfileSymbolList> getProfileSymbolList() override {
    return std::move(ProfSymList);
  };

  void setSkipFlatProf(bool Skip) override { SkipFlatProf = Skip; }
};

class SampleProfileReaderExtBinary : public SampleProfileReaderExtBinaryBase {
private:
  std::error_code verifySPMagic(uint64_t Magic) override;
  std::error_code readCustomSection(const SecHdrTableEntry &Entry) override {
    // Update the data reader pointer to the end of the section.
    Data = End;
    return sampleprof_error::success;
  };

public:
  SampleProfileReaderExtBinary(std::unique_ptr<MemoryBuffer> B, LLVMContext &C,
                               SampleProfileFormat Format = SPF_Ext_Binary)
      : SampleProfileReaderExtBinaryBase(std::move(B), C, Format) {}

  /// \brief Return true if \p Buffer is in the format supported by this class.
  static bool hasFormat(const MemoryBuffer &Buffer);
};

using InlineCallStack = SmallVector<FunctionSamples *, 10>;

// Supported histogram types in GCC.  Currently, we only need support for
// call target histograms.
enum HistType {
  HIST_TYPE_INTERVAL,
  HIST_TYPE_POW2,
  HIST_TYPE_SINGLE_VALUE,
  HIST_TYPE_CONST_DELTA,
  HIST_TYPE_INDIR_CALL,
  HIST_TYPE_AVERAGE,
  HIST_TYPE_IOR,
  HIST_TYPE_INDIR_CALL_TOPN
};

class SampleProfileReaderGCC : public SampleProfileReader {
public:
  SampleProfileReaderGCC(std::unique_ptr<MemoryBuffer> B, LLVMContext &C)
      : SampleProfileReader(std::move(B), C, SPF_GCC),
        GcovBuffer(Buffer.get()) {}

  /// Read and validate the file header.
  std::error_code readHeader() override;

  /// Read sample profiles from the associated file.
  std::error_code readImpl() override;

  /// Return true if \p Buffer is in the format supported by this class.
  static bool hasFormat(const MemoryBuffer &Buffer);

protected:
  std::error_code readNameTable();
  std::error_code readOneFunctionProfile(const InlineCallStack &InlineStack,
                                         bool Update, uint32_t Offset);
  std::error_code readFunctionProfiles();
  std::error_code skipNextWord();
  template <typename T> ErrorOr<T> readNumber();
  ErrorOr<StringRef> readString();

  /// Read the section tag and check that it's the same as \p Expected.
  std::error_code readSectionTag(uint32_t Expected);

  /// GCOV buffer containing the profile.
  GCOVBuffer GcovBuffer;

  /// Function names in this profile.
  std::vector<std::string> Names;

  /// GCOV tags used to separate sections in the profile file.
  static const uint32_t GCOVTagAFDOFileNames = 0xaa000000;
  static const uint32_t GCOVTagAFDOFunction = 0xac000000;
};

} // end namespace sampleprof

} // end namespace llvm

#endif // LLVM_PROFILEDATA_SAMPLEPROFREADER_H
PKhwFZ�%�e�B�BProfileData/SampleProfWriter.hnu�[���//===- SampleProfWriter.h - Write LLVM sample profile data ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains definitions needed for writing sample profiles.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_PROFILEDATA_SAMPLEPROFWRITER_H
#define LLVM_PROFILEDATA_SAMPLEPROFWRITER_H

#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/IR/ProfileSummary.h"
#include "llvm/ProfileData/SampleProf.h"
#include "llvm/Support/ErrorOr.h"
#include "llvm/Support/raw_ostream.h"
#include <cstdint>
#include <memory>
#include <set>
#include <system_error>

namespace llvm {
namespace sampleprof {

enum SectionLayout {
  DefaultLayout,
  // The layout splits profile with context information from profile without
  // context information. When Thinlto is enabled, ThinLTO postlink phase only
  // has to load profile with context information and can skip the other part.
  CtxSplitLayout,
  NumOfLayout,
};

/// When writing a profile with size limit, user may want to use a different
/// strategy to reduce function count other than dropping functions with fewest
/// samples first. In this case a class implementing the same interfaces should
/// be provided to SampleProfileWriter::writeWithSizeLimit().
class FunctionPruningStrategy {
protected:
  SampleProfileMap &ProfileMap;
  size_t OutputSizeLimit;

public:
  /// \p ProfileMap A reference to the original profile map. It will be modified
  /// by Erase().
  /// \p OutputSizeLimit Size limit in bytes of the output profile. This is
  /// necessary to estimate how many functions to remove.
  FunctionPruningStrategy(SampleProfileMap &ProfileMap, size_t OutputSizeLimit)
      : ProfileMap(ProfileMap), OutputSizeLimit(OutputSizeLimit) {}

  virtual ~FunctionPruningStrategy() = default;

  /// SampleProfileWriter::writeWithSizeLimit() calls this after every write
  /// iteration if the output size still exceeds the limit. This function
  /// should erase some functions from the profile map so that the writer tries
  /// to write the profile again with fewer functions. At least 1 entry from the
  /// profile map must be erased.
  ///
  /// \p CurrentOutputSize Number of bytes in the output if current profile map
  /// is written.
  virtual void Erase(size_t CurrentOutputSize) = 0;
};

class DefaultFunctionPruningStrategy : public FunctionPruningStrategy {
  std::vector<NameFunctionSamples> SortedFunctions;

public:
  DefaultFunctionPruningStrategy(SampleProfileMap &ProfileMap,
                                 size_t OutputSizeLimit);

  /// In this default implementation, functions with fewest samples are dropped
  /// first. Since the exact size of the output cannot be easily calculated due
  /// to compression, we use a heuristic to remove as many functions as
  /// necessary but not too many, aiming to minimize the number of write
  /// iterations.
  /// Empirically, functions with larger total sample count contain linearly
  /// more sample entries, meaning it takes linearly more space to write them.
  /// The cumulative length is therefore quadratic if all functions are sorted
  /// by total sample count.
  /// TODO: Find better heuristic.
  void Erase(size_t CurrentOutputSize) override;
};

/// Sample-based profile writer. Base class.
class SampleProfileWriter {
public:
  virtual ~SampleProfileWriter() = default;

  /// Write sample profiles in \p S.
  ///
  /// \returns status code of the file update operation.
  virtual std::error_code writeSample(const FunctionSamples &S) = 0;

  /// Write all the sample profiles in the given map of samples.
  ///
  /// \returns status code of the file update operation.
  virtual std::error_code write(const SampleProfileMap &ProfileMap);

  /// Write sample profiles up to given size limit, using the pruning strategy
  /// to drop some functions if necessary.
  ///
  /// \returns status code of the file update operation.
  template <typename FunctionPruningStrategy = DefaultFunctionPruningStrategy>
  std::error_code writeWithSizeLimit(SampleProfileMap &ProfileMap,
                                     size_t OutputSizeLimit) {
    FunctionPruningStrategy Strategy(ProfileMap, OutputSizeLimit);
    return writeWithSizeLimitInternal(ProfileMap, OutputSizeLimit, &Strategy);
  }

  raw_ostream &getOutputStream() { return *OutputStream; }

  /// Profile writer factory.
  ///
  /// Create a new file writer based on the value of \p Format.
  static ErrorOr<std::unique_ptr<SampleProfileWriter>>
  create(StringRef Filename, SampleProfileFormat Format);

  /// Create a new stream writer based on the value of \p Format.
  /// For testing.
  static ErrorOr<std::unique_ptr<SampleProfileWriter>>
  create(std::unique_ptr<raw_ostream> &OS, SampleProfileFormat Format);

  virtual void setProfileSymbolList(ProfileSymbolList *PSL) {}
  virtual void setToCompressAllSections() {}
  virtual void setUseMD5() {}
  virtual void setPartialProfile() {}
  virtual void resetSecLayout(SectionLayout SL) {}

protected:
  SampleProfileWriter(std::unique_ptr<raw_ostream> &OS)
      : OutputStream(std::move(OS)) {}

  /// Write a file header for the profile file.
  virtual std::error_code writeHeader(const SampleProfileMap &ProfileMap) = 0;

  // Write function profiles to the profile file.
  virtual std::error_code writeFuncProfiles(const SampleProfileMap &ProfileMap);

  std::error_code writeWithSizeLimitInternal(SampleProfileMap &ProfileMap,
                                             size_t OutputSizeLimit,
                                             FunctionPruningStrategy *Strategy);

  /// For writeWithSizeLimit in text mode, each newline takes 1 additional byte
  /// on Windows when actually written to the file, but not written to a memory
  /// buffer. This needs to be accounted for when rewriting the profile.
  size_t LineCount;

  /// Output stream where to emit the profile to.
  std::unique_ptr<raw_ostream> OutputStream;

  /// Profile summary.
  std::unique_ptr<ProfileSummary> Summary;

  /// Compute summary for this profile.
  void computeSummary(const SampleProfileMap &ProfileMap);

  /// Profile format.
  SampleProfileFormat Format = SPF_None;
};

/// Sample-based profile writer (text format).
class SampleProfileWriterText : public SampleProfileWriter {
public:
  std::error_code writeSample(const FunctionSamples &S) override;

protected:
  SampleProfileWriterText(std::unique_ptr<raw_ostream> &OS)
      : SampleProfileWriter(OS), Indent(0) {}

  std::error_code writeHeader(const SampleProfileMap &ProfileMap) override {
    LineCount = 0;
    return sampleprof_error::success;
  }

private:
  /// Indent level to use when writing.
  ///
  /// This is used when printing inlined callees.
  unsigned Indent;

  friend ErrorOr<std::unique_ptr<SampleProfileWriter>>
  SampleProfileWriter::create(std::unique_ptr<raw_ostream> &OS,
                              SampleProfileFormat Format);
};

/// Sample-based profile writer (binary format).
class SampleProfileWriterBinary : public SampleProfileWriter {
public:
  SampleProfileWriterBinary(std::unique_ptr<raw_ostream> &OS)
      : SampleProfileWriter(OS) {}

  std::error_code writeSample(const FunctionSamples &S) override;

protected:
  virtual MapVector<StringRef, uint32_t> &getNameTable() { return NameTable; }
  virtual std::error_code writeMagicIdent(SampleProfileFormat Format);
  virtual std::error_code writeNameTable();
  std::error_code writeHeader(const SampleProfileMap &ProfileMap) override;
  std::error_code writeSummary();
  virtual std::error_code writeContextIdx(const SampleContext &Context);
  std::error_code writeNameIdx(StringRef FName);
  std::error_code writeBody(const FunctionSamples &S);
  inline void stablizeNameTable(MapVector<StringRef, uint32_t> &NameTable,
                                std::set<StringRef> &V);

  MapVector<StringRef, uint32_t> NameTable;

  void addName(StringRef FName);
  virtual void addContext(const SampleContext &Context);
  void addNames(const FunctionSamples &S);

private:
  friend ErrorOr<std::unique_ptr<SampleProfileWriter>>
  SampleProfileWriter::create(std::unique_ptr<raw_ostream> &OS,
                              SampleProfileFormat Format);
};

class SampleProfileWriterRawBinary : public SampleProfileWriterBinary {
  using SampleProfileWriterBinary::SampleProfileWriterBinary;
};

const std::array<SmallVector<SecHdrTableEntry, 8>, NumOfLayout>
    ExtBinaryHdrLayoutTable = {
        // Note that SecFuncOffsetTable section is written after SecLBRProfile
        // in the profile, but is put before SecLBRProfile in SectionHdrLayout.
        // This is because sample reader follows the order in SectionHdrLayout
        // to read each section. To read function profiles on demand, sample
        // reader need to get the offset of each function profile first.
        //
        // DefaultLayout
        SmallVector<SecHdrTableEntry, 8>({{SecProfSummary, 0, 0, 0, 0},
                                          {SecNameTable, 0, 0, 0, 0},
                                          {SecCSNameTable, 0, 0, 0, 0},
                                          {SecFuncOffsetTable, 0, 0, 0, 0},
                                          {SecLBRProfile, 0, 0, 0, 0},
                                          {SecProfileSymbolList, 0, 0, 0, 0},
                                          {SecFuncMetadata, 0, 0, 0, 0}}),
        // CtxSplitLayout
        SmallVector<SecHdrTableEntry, 8>({{SecProfSummary, 0, 0, 0, 0},
                                          {SecNameTable, 0, 0, 0, 0},
                                          // profile with context
                                          // for next two sections
                                          {SecFuncOffsetTable, 0, 0, 0, 0},
                                          {SecLBRProfile, 0, 0, 0, 0},
                                          // profile without context
                                          // for next two sections
                                          {SecFuncOffsetTable, 0, 0, 0, 0},
                                          {SecLBRProfile, 0, 0, 0, 0},
                                          {SecProfileSymbolList, 0, 0, 0, 0},
                                          {SecFuncMetadata, 0, 0, 0, 0}}),
};

class SampleProfileWriterExtBinaryBase : public SampleProfileWriterBinary {
  using SampleProfileWriterBinary::SampleProfileWriterBinary;
public:
  std::error_code write(const SampleProfileMap &ProfileMap) override;

  void setToCompressAllSections() override;
  void setToCompressSection(SecType Type);
  std::error_code writeSample(const FunctionSamples &S) override;

  // Set to use MD5 to represent string in NameTable.
  void setUseMD5() override {
    UseMD5 = true;
    addSectionFlag(SecNameTable, SecNameTableFlags::SecFlagMD5Name);
    // MD5 will be stored as plain uint64_t instead of variable-length
    // quantity format in NameTable section.
    addSectionFlag(SecNameTable, SecNameTableFlags::SecFlagFixedLengthMD5);
  }

  // Set the profile to be partial. It means the profile is for
  // common/shared code. The common profile is usually merged from
  // profiles collected from running other targets.
  void setPartialProfile() override {
    addSectionFlag(SecProfSummary, SecProfSummaryFlags::SecFlagPartial);
  }

  void setProfileSymbolList(ProfileSymbolList *PSL) override {
    ProfSymList = PSL;
  };

  void resetSecLayout(SectionLayout SL) override {
    verifySecLayout(SL);
#ifndef NDEBUG
    // Make sure resetSecLayout is called before any flag setting.
    for (auto &Entry : SectionHdrLayout) {
      assert(Entry.Flags == 0 &&
             "resetSecLayout has to be called before any flag setting");
    }
#endif
    SecLayout = SL;
    SectionHdrLayout = ExtBinaryHdrLayoutTable[SL];
  }

protected:
  uint64_t markSectionStart(SecType Type, uint32_t LayoutIdx);
  std::error_code addNewSection(SecType Sec, uint32_t LayoutIdx,
                                uint64_t SectionStart);
  template <class SecFlagType>
  void addSectionFlag(SecType Type, SecFlagType Flag) {
    for (auto &Entry : SectionHdrLayout) {
      if (Entry.Type == Type)
        addSecFlag(Entry, Flag);
    }
  }
  template <class SecFlagType>
  void addSectionFlag(uint32_t SectionIdx, SecFlagType Flag) {
    addSecFlag(SectionHdrLayout[SectionIdx], Flag);
  }

  void addContext(const SampleContext &Context) override;

  // placeholder for subclasses to dispatch their own section writers.
  virtual std::error_code writeCustomSection(SecType Type) = 0;
  // Verify the SecLayout is supported by the format.
  virtual void verifySecLayout(SectionLayout SL) = 0;

  // specify the order to write sections.
  virtual std::error_code writeSections(const SampleProfileMap &ProfileMap) = 0;

  // Dispatch section writer for each section. \p LayoutIdx is the sequence
  // number indicating where the section is located in SectionHdrLayout.
  virtual std::error_code writeOneSection(SecType Type, uint32_t LayoutIdx,
                                          const SampleProfileMap &ProfileMap);

  // Helper function to write name table.
  std::error_code writeNameTable() override;
  std::error_code writeContextIdx(const SampleContext &Context) override;
  std::error_code writeCSNameIdx(const SampleContext &Context);
  std::error_code writeCSNameTableSection();

  std::error_code writeFuncMetadata(const SampleProfileMap &Profiles);
  std::error_code writeFuncMetadata(const FunctionSamples &Profile);

  // Functions to write various kinds of sections.
  std::error_code writeNameTableSection(const SampleProfileMap &ProfileMap);
  std::error_code writeFuncOffsetTable();
  std::error_code writeProfileSymbolListSection();

  SectionLayout SecLayout = DefaultLayout;
  // Specifiy the order of sections in section header table. Note
  // the order of sections in SecHdrTable may be different that the
  // order in SectionHdrLayout. sample Reader will follow the order
  // in SectionHdrLayout to read each section.
  SmallVector<SecHdrTableEntry, 8> SectionHdrLayout =
      ExtBinaryHdrLayoutTable[DefaultLayout];

  // Save the start of SecLBRProfile so we can compute the offset to the
  // start of SecLBRProfile for each Function's Profile and will keep it
  // in FuncOffsetTable.
  uint64_t SecLBRProfileStart = 0;

private:
  void allocSecHdrTable();
  std::error_code writeSecHdrTable();
  std::error_code writeHeader(const SampleProfileMap &ProfileMap) override;
  std::error_code compressAndOutput();

  // We will swap the raw_ostream held by LocalBufStream and that
  // held by OutputStream if we try to add a section which needs
  // compression. After the swap, all the data written to output
  // will be temporarily buffered into the underlying raw_string_ostream
  // originally held by LocalBufStream. After the data writing for the
  // section is completed, compress the data in the local buffer,
  // swap the raw_ostream back and write the compressed data to the
  // real output.
  std::unique_ptr<raw_ostream> LocalBufStream;
  // The location where the output stream starts.
  uint64_t FileStart;
  // The location in the output stream where the SecHdrTable should be
  // written to.
  uint64_t SecHdrTableOffset;
  // The table contains SecHdrTableEntry entries in order of how they are
  // populated in the writer. It may be different from the order in
  // SectionHdrLayout which specifies the sequence in which sections will
  // be read.
  std::vector<SecHdrTableEntry> SecHdrTable;

  // FuncOffsetTable maps function context to its profile offset in
  // SecLBRProfile section. It is used to load function profile on demand.
  MapVector<SampleContext, uint64_t> FuncOffsetTable;
  // Whether to use MD5 to represent string.
  bool UseMD5 = false;

  /// CSNameTable maps function context to its offset in SecCSNameTable section.
  /// The offset will be used everywhere where the context is referenced.
  MapVector<SampleContext, uint32_t> CSNameTable;

  ProfileSymbolList *ProfSymList = nullptr;
};

class SampleProfileWriterExtBinary : public SampleProfileWriterExtBinaryBase {
public:
  SampleProfileWriterExtBinary(std::unique_ptr<raw_ostream> &OS)
      : SampleProfileWriterExtBinaryBase(OS) {}

private:
  std::error_code writeDefaultLayout(const SampleProfileMap &ProfileMap);
  std::error_code writeCtxSplitLayout(const SampleProfileMap &ProfileMap);

  std::error_code writeSections(const SampleProfileMap &ProfileMap) override;

  std::error_code writeCustomSection(SecType Type) override {
    return sampleprof_error::success;
  };

  void verifySecLayout(SectionLayout SL) override {
    assert((SL == DefaultLayout || SL == CtxSplitLayout) &&
           "Unsupported layout");
  }
};

} // end namespace sampleprof
} // end namespace llvm

#endif // LLVM_PROFILEDATA_SAMPLEPROFWRITER_H
PKhwFZ1U��WWProfileData/MemProfData.incnu�[���#ifndef MEMPROF_DATA_INC
#define MEMPROF_DATA_INC
/*===-- MemProfData.inc - MemProf profiling runtime structures -*- C++ -*-=== *\
|*
|* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|* See https://llvm.org/LICENSE.txt for license information.
|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|*
\*===----------------------------------------------------------------------===*/
/*
 * This is the main file that defines all the data structure, signature,
 * constant literals that are shared across profiling runtime library,
 * and host tools (reader/writer).
 *
 * This file has two identical copies. The primary copy lives in LLVM and
 * the other one sits in compiler-rt/include/profile directory. To make changes
 * in this file, first modify the primary copy and copy it over to compiler-rt.
 * Testing of any change in this file can start only after the two copies are
 * synced up.
 *
\*===----------------------------------------------------------------------===*/
#include <string.h>

#ifdef _MSC_VER
#define PACKED(...) __pragma(pack(push,1)) __VA_ARGS__ __pragma(pack(pop))
#else
#define PACKED(...) __VA_ARGS__ __attribute__((__packed__))
#endif

// A 64-bit magic number to uniquely identify the raw binary memprof profile file.
#define MEMPROF_RAW_MAGIC_64                                                                        \
  ((uint64_t)255 << 56 | (uint64_t)'m' << 48 | (uint64_t)'p' << 40 | (uint64_t)'r' << 32 |          \
   (uint64_t)'o' << 24 | (uint64_t)'f' << 16 | (uint64_t)'r' << 8 | (uint64_t)129)

// The version number of the raw binary format.
#define MEMPROF_RAW_VERSION 3ULL

#define MEMPROF_BUILDID_MAX_SIZE 32ULL

namespace llvm {
namespace memprof {
// A struct describing the header used for the raw binary memprof profile format.
PACKED(struct Header {
  uint64_t Magic;
  uint64_t Version;
  uint64_t TotalSize;
  uint64_t SegmentOffset;
  uint64_t MIBOffset;
  uint64_t StackOffset;
});

// A struct describing the information necessary to describe a /proc/maps
// segment entry for a particular binary/library identified by its build id.
PACKED(struct SegmentEntry {
  uint64_t Start;
  uint64_t End;
  uint64_t Offset;
  uint64_t BuildIdSize;
  uint8_t BuildId[MEMPROF_BUILDID_MAX_SIZE] = {0};

  // This constructor is only used in tests so don't set the BuildId.
  SegmentEntry(uint64_t S, uint64_t E, uint64_t O)
      : Start(S), End(E), Offset(O), BuildIdSize(0) {}

  SegmentEntry(const SegmentEntry& S) {
    Start = S.Start;
    End = S.End;
    Offset = S.Offset;
    BuildIdSize = S.BuildIdSize;
    memcpy(BuildId, S.BuildId, S.BuildIdSize);
  }

  SegmentEntry& operator=(const SegmentEntry& S) {
    Start = S.Start;
    End = S.End;
    Offset = S.Offset;
    BuildIdSize = S.BuildIdSize;
    memcpy(BuildId, S.BuildId, S.BuildIdSize);
    return *this;
  }

  bool operator==(const SegmentEntry& S) const {
    return Start == S.Start && End == S.End && Offset == S.Offset &&
           BuildIdSize == S.BuildIdSize &&
           memcmp(BuildId, S.BuildId, S.BuildIdSize) == 0;
  }
});

// Packed struct definition for MSVC. We can't use the PACKED macro defined in
// MemProfData.inc since it would mean we are embedding a directive (the
// #include for MIBEntryDef) into the macros which is undefined behaviour.
#ifdef _MSC_VER
__pragma(pack(push,1))
#endif

// A struct representing the heap allocation characteristics of a particular
// runtime context. This struct is shared between the compiler-rt runtime and
// the raw profile reader. The indexed format uses a separate, self-describing
// backwards compatible format.
struct MemInfoBlock{

#define MIBEntryDef(NameTag, Name, Type) Type Name;
#include "MIBEntryDef.inc"
#undef MIBEntryDef

bool operator==(const MemInfoBlock& Other) const {
  bool IsEqual = true;
#define MIBEntryDef(NameTag, Name, Type) \
  IsEqual = (IsEqual && Name == Other.Name);
#include "MIBEntryDef.inc"
#undef MIBEntryDef
  return IsEqual;
}

MemInfoBlock() {
#define MIBEntryDef(NameTag, Name, Type) Name = Type();
#include "MIBEntryDef.inc"
#undef MIBEntryDef
}

MemInfoBlock(uint32_t Size, uint64_t AccessCount, uint32_t AllocTs,
             uint32_t DeallocTs, uint32_t AllocCpu, uint32_t DeallocCpu)
    : MemInfoBlock() {
  AllocCount = 1U;
  TotalAccessCount = AccessCount;
  MinAccessCount = AccessCount;
  MaxAccessCount = AccessCount;
  TotalSize = Size;
  MinSize = Size;
  MaxSize = Size;
  AllocTimestamp = AllocTs;
  DeallocTimestamp = DeallocTs;
  TotalLifetime = DeallocTimestamp - AllocTimestamp;
  MinLifetime = TotalLifetime;
  MaxLifetime = TotalLifetime;
  // Access density is accesses per byte. Multiply by 100 to include the
  // fractional part.
  TotalAccessDensity = AccessCount * 100 / Size;
  MinAccessDensity = TotalAccessDensity;
  MaxAccessDensity = TotalAccessDensity;
  // Lifetime access density is the access density per second of lifetime.
  // Multiply by 1000 to convert denominator lifetime to seconds (using a
  // minimum lifetime of 1ms to avoid divide by 0. Do the multiplication first
  // to reduce truncations to 0.
  TotalLifetimeAccessDensity =
      TotalAccessDensity * 1000 / (TotalLifetime ? TotalLifetime : 1);
  MinLifetimeAccessDensity = TotalLifetimeAccessDensity;
  MaxLifetimeAccessDensity = TotalLifetimeAccessDensity;
  AllocCpuId = AllocCpu;
  DeallocCpuId = DeallocCpu;
  NumMigratedCpu = AllocCpuId != DeallocCpuId;
}

void Merge(const MemInfoBlock &newMIB) {
  AllocCount += newMIB.AllocCount;

  TotalAccessCount += newMIB.TotalAccessCount;
  MinAccessCount = newMIB.MinAccessCount < MinAccessCount ? newMIB.MinAccessCount : MinAccessCount;
  MaxAccessCount = newMIB.MaxAccessCount > MaxAccessCount ? newMIB.MaxAccessCount : MaxAccessCount;

  TotalSize += newMIB.TotalSize;
  MinSize = newMIB.MinSize < MinSize ? newMIB.MinSize : MinSize;
  MaxSize = newMIB.MaxSize > MaxSize ? newMIB.MaxSize : MaxSize;

  TotalLifetime += newMIB.TotalLifetime;
  MinLifetime = newMIB.MinLifetime < MinLifetime ? newMIB.MinLifetime : MinLifetime;
  MaxLifetime = newMIB.MaxLifetime > MaxLifetime ? newMIB.MaxLifetime : MaxLifetime;

  TotalAccessDensity += newMIB.TotalAccessDensity;
  MinAccessDensity = newMIB.MinAccessDensity < MinAccessDensity
                         ? newMIB.MinAccessDensity
                         : MinAccessDensity;
  MaxAccessDensity = newMIB.MaxAccessDensity > MaxAccessDensity
                         ? newMIB.MaxAccessDensity
                         : MaxAccessDensity;

  TotalLifetimeAccessDensity += newMIB.TotalLifetimeAccessDensity;
  MinLifetimeAccessDensity =
      newMIB.MinLifetimeAccessDensity < MinLifetimeAccessDensity
          ? newMIB.MinLifetimeAccessDensity
          : MinLifetimeAccessDensity;
  MaxLifetimeAccessDensity =
      newMIB.MaxLifetimeAccessDensity > MaxLifetimeAccessDensity
          ? newMIB.MaxLifetimeAccessDensity
          : MaxLifetimeAccessDensity;

  // We know newMIB was deallocated later, so just need to check if it was
  // allocated before last one deallocated.
  NumLifetimeOverlaps += newMIB.AllocTimestamp < DeallocTimestamp;
  AllocTimestamp = newMIB.AllocTimestamp;
  DeallocTimestamp = newMIB.DeallocTimestamp;

  NumSameAllocCpu += AllocCpuId == newMIB.AllocCpuId;
  NumSameDeallocCpu += DeallocCpuId == newMIB.DeallocCpuId;
  AllocCpuId = newMIB.AllocCpuId;
  DeallocCpuId = newMIB.DeallocCpuId;
}

#ifdef _MSC_VER
} __pragma(pack(pop));
#else
} __attribute__((__packed__));
#endif

} // namespace memprof
} // namespace llvm

#endif
PKhwFZc�A�8�8�ProfileData/SampleProf.hnu�[���//===- SampleProf.h - Sampling profiling format support ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains common definitions used in the reading and writing of
// sample profile data.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_PROFILEDATA_SAMPLEPROF_H
#define LLVM_PROFILEDATA_SAMPLEPROF_H

#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorOr.h"
#include "llvm/Support/MathExtras.h"
#include <algorithm>
#include <cstdint>
#include <list>
#include <map>
#include <set>
#include <sstream>
#include <string>
#include <system_error>
#include <unordered_map>
#include <utility>

namespace llvm {

class DILocation;
class raw_ostream;

const std::error_category &sampleprof_category();

enum class sampleprof_error {
  success = 0,
  bad_magic,
  unsupported_version,
  too_large,
  truncated,
  malformed,
  unrecognized_format,
  unsupported_writing_format,
  truncated_name_table,
  not_implemented,
  counter_overflow,
  ostream_seek_unsupported,
  uncompress_failed,
  zlib_unavailable,
  hash_mismatch
};

inline std::error_code make_error_code(sampleprof_error E) {
  return std::error_code(static_cast<int>(E), sampleprof_category());
}

inline sampleprof_error MergeResult(sampleprof_error &Accumulator,
                                    sampleprof_error Result) {
  // Prefer first error encountered as later errors may be secondary effects of
  // the initial problem.
  if (Accumulator == sampleprof_error::success &&
      Result != sampleprof_error::success)
    Accumulator = Result;
  return Accumulator;
}

} // end namespace llvm

namespace std {

template <>
struct is_error_code_enum<llvm::sampleprof_error> : std::true_type {};

} // end namespace std

namespace llvm {
namespace sampleprof {

enum SampleProfileFormat {
  SPF_None = 0,
  SPF_Text = 0x1,
  SPF_Compact_Binary = 0x2, // Deprecated
  SPF_GCC = 0x3,
  SPF_Ext_Binary = 0x4,
  SPF_Binary = 0xff
};

enum SampleProfileLayout {
  SPL_None = 0,
  SPL_Nest = 0x1,
  SPL_Flat = 0x2,
};

static inline uint64_t SPMagic(SampleProfileFormat Format = SPF_Binary) {
  return uint64_t('S') << (64 - 8) | uint64_t('P') << (64 - 16) |
         uint64_t('R') << (64 - 24) | uint64_t('O') << (64 - 32) |
         uint64_t('F') << (64 - 40) | uint64_t('4') << (64 - 48) |
         uint64_t('2') << (64 - 56) | uint64_t(Format);
}

/// Get the proper representation of a string according to whether the
/// current Format uses MD5 to represent the string.
static inline StringRef getRepInFormat(StringRef Name, bool UseMD5,
                                       std::string &GUIDBuf) {
  if (Name.empty() || !UseMD5)
    return Name;
  GUIDBuf = std::to_string(Function::getGUID(Name));
  return GUIDBuf;
}

static inline uint64_t SPVersion() { return 103; }

// Section Type used by SampleProfileExtBinaryBaseReader and
// SampleProfileExtBinaryBaseWriter. Never change the existing
// value of enum. Only append new ones.
enum SecType {
  SecInValid = 0,
  SecProfSummary = 1,
  SecNameTable = 2,
  SecProfileSymbolList = 3,
  SecFuncOffsetTable = 4,
  SecFuncMetadata = 5,
  SecCSNameTable = 6,
  // marker for the first type of profile.
  SecFuncProfileFirst = 32,
  SecLBRProfile = SecFuncProfileFirst
};

static inline std::string getSecName(SecType Type) {
  switch ((int)Type) { // Avoid -Wcovered-switch-default
  case SecInValid:
    return "InvalidSection";
  case SecProfSummary:
    return "ProfileSummarySection";
  case SecNameTable:
    return "NameTableSection";
  case SecProfileSymbolList:
    return "ProfileSymbolListSection";
  case SecFuncOffsetTable:
    return "FuncOffsetTableSection";
  case SecFuncMetadata:
    return "FunctionMetadata";
  case SecCSNameTable:
    return "CSNameTableSection";
  case SecLBRProfile:
    return "LBRProfileSection";
  default:
    return "UnknownSection";
  }
}

// Entry type of section header table used by SampleProfileExtBinaryBaseReader
// and SampleProfileExtBinaryBaseWriter.
struct SecHdrTableEntry {
  SecType Type;
  uint64_t Flags;
  uint64_t Offset;
  uint64_t Size;
  // The index indicating the location of the current entry in
  // SectionHdrLayout table.
  uint64_t LayoutIndex;
};

// Flags common for all sections are defined here. In SecHdrTableEntry::Flags,
// common flags will be saved in the lower 32bits and section specific flags
// will be saved in the higher 32 bits.
enum class SecCommonFlags : uint32_t {
  SecFlagInValid = 0,
  SecFlagCompress = (1 << 0),
  // Indicate the section contains only profile without context.
  SecFlagFlat = (1 << 1)
};

// Section specific flags are defined here.
// !!!Note: Everytime a new enum class is created here, please add
// a new check in verifySecFlag.
enum class SecNameTableFlags : uint32_t {
  SecFlagInValid = 0,
  SecFlagMD5Name = (1 << 0),
  // Store MD5 in fixed length instead of ULEB128 so NameTable can be
  // accessed like an array.
  SecFlagFixedLengthMD5 = (1 << 1),
  // Profile contains ".__uniq." suffix name. Compiler shouldn't strip
  // the suffix when doing profile matching when seeing the flag.
  SecFlagUniqSuffix = (1 << 2)
};
enum class SecProfSummaryFlags : uint32_t {
  SecFlagInValid = 0,
  /// SecFlagPartial means the profile is for common/shared code.
  /// The common profile is usually merged from profiles collected
  /// from running other targets.
  SecFlagPartial = (1 << 0),
  /// SecFlagContext means this is context-sensitive flat profile for
  /// CSSPGO
  SecFlagFullContext = (1 << 1),
  /// SecFlagFSDiscriminator means this profile uses flow-sensitive
  /// discriminators.
  SecFlagFSDiscriminator = (1 << 2),
  /// SecFlagIsPreInlined means this profile contains ShouldBeInlined
  /// contexts thus this is CS preinliner computed.
  SecFlagIsPreInlined = (1 << 4),
};

enum class SecFuncMetadataFlags : uint32_t {
  SecFlagInvalid = 0,
  SecFlagIsProbeBased = (1 << 0),
  SecFlagHasAttribute = (1 << 1),
};

enum class SecFuncOffsetFlags : uint32_t {
  SecFlagInvalid = 0,
  // Store function offsets in an order of contexts. The order ensures that
  // callee contexts of a given context laid out next to it.
  SecFlagOrdered = (1 << 0),
};

// Verify section specific flag is used for the correct section.
template <class SecFlagType>
static inline void verifySecFlag(SecType Type, SecFlagType Flag) {
  // No verification is needed for common flags.
  if (std::is_same<SecCommonFlags, SecFlagType>())
    return;

  // Verification starts here for section specific flag.
  bool IsFlagLegal = false;
  switch (Type) {
  case SecNameTable:
    IsFlagLegal = std::is_same<SecNameTableFlags, SecFlagType>();
    break;
  case SecProfSummary:
    IsFlagLegal = std::is_same<SecProfSummaryFlags, SecFlagType>();
    break;
  case SecFuncMetadata:
    IsFlagLegal = std::is_same<SecFuncMetadataFlags, SecFlagType>();
    break;
  default:
  case SecFuncOffsetTable:
    IsFlagLegal = std::is_same<SecFuncOffsetFlags, SecFlagType>();
    break;
  }
  if (!IsFlagLegal)
    llvm_unreachable("Misuse of a flag in an incompatible section");
}

template <class SecFlagType>
static inline void addSecFlag(SecHdrTableEntry &Entry, SecFlagType Flag) {
  verifySecFlag(Entry.Type, Flag);
  auto FVal = static_cast<uint64_t>(Flag);
  bool IsCommon = std::is_same<SecCommonFlags, SecFlagType>();
  Entry.Flags |= IsCommon ? FVal : (FVal << 32);
}

template <class SecFlagType>
static inline void removeSecFlag(SecHdrTableEntry &Entry, SecFlagType Flag) {
  verifySecFlag(Entry.Type, Flag);
  auto FVal = static_cast<uint64_t>(Flag);
  bool IsCommon = std::is_same<SecCommonFlags, SecFlagType>();
  Entry.Flags &= ~(IsCommon ? FVal : (FVal << 32));
}

template <class SecFlagType>
static inline bool hasSecFlag(const SecHdrTableEntry &Entry, SecFlagType Flag) {
  verifySecFlag(Entry.Type, Flag);
  auto FVal = static_cast<uint64_t>(Flag);
  bool IsCommon = std::is_same<SecCommonFlags, SecFlagType>();
  return Entry.Flags & (IsCommon ? FVal : (FVal << 32));
}

/// Represents the relative location of an instruction.
///
/// Instruction locations are specified by the line offset from the
/// beginning of the function (marked by the line where the function
/// header is) and the discriminator value within that line.
///
/// The discriminator value is useful to distinguish instructions
/// that are on the same line but belong to different basic blocks
/// (e.g., the two post-increment instructions in "if (p) x++; else y++;").
struct LineLocation {
  LineLocation(uint32_t L, uint32_t D) : LineOffset(L), Discriminator(D) {}

  void print(raw_ostream &OS) const;
  void dump() const;

  bool operator<(const LineLocation &O) const {
    return LineOffset < O.LineOffset ||
           (LineOffset == O.LineOffset && Discriminator < O.Discriminator);
  }

  bool operator==(const LineLocation &O) const {
    return LineOffset == O.LineOffset && Discriminator == O.Discriminator;
  }

  bool operator!=(const LineLocation &O) const {
    return LineOffset != O.LineOffset || Discriminator != O.Discriminator;
  }

  uint32_t LineOffset;
  uint32_t Discriminator;
};

struct LineLocationHash {
  uint64_t operator()(const LineLocation &Loc) const {
    return std::hash<std::uint64_t>{}((((uint64_t)Loc.LineOffset) << 32) |
                                      Loc.Discriminator);
  }
};

raw_ostream &operator<<(raw_ostream &OS, const LineLocation &Loc);

/// Representation of a single sample record.
///
/// A sample record is represented by a positive integer value, which
/// indicates how frequently was the associated line location executed.
///
/// Additionally, if the associated location contains a function call,
/// the record will hold a list of all the possible called targets. For
/// direct calls, this will be the exact function being invoked. For
/// indirect calls (function pointers, virtual table dispatch), this
/// will be a list of one or more functions.
class SampleRecord {
public:
  using CallTarget = std::pair<StringRef, uint64_t>;
  struct CallTargetComparator {
    bool operator()(const CallTarget &LHS, const CallTarget &RHS) const {
      if (LHS.second != RHS.second)
        return LHS.second > RHS.second;

      return LHS.first < RHS.first;
    }
  };

  using SortedCallTargetSet = std::set<CallTarget, CallTargetComparator>;
  using CallTargetMap = StringMap<uint64_t>;
  SampleRecord() = default;

  /// Increment the number of samples for this record by \p S.
  /// Optionally scale sample count \p S by \p Weight.
  ///
  /// Sample counts accumulate using saturating arithmetic, to avoid wrapping
  /// around unsigned integers.
  sampleprof_error addSamples(uint64_t S, uint64_t Weight = 1) {
    bool Overflowed;
    NumSamples = SaturatingMultiplyAdd(S, Weight, NumSamples, &Overflowed);
    return Overflowed ? sampleprof_error::counter_overflow
                      : sampleprof_error::success;
  }

  /// Decrease the number of samples for this record by \p S. Return the amout
  /// of samples actually decreased.
  uint64_t removeSamples(uint64_t S) {
    if (S > NumSamples)
      S = NumSamples;
    NumSamples -= S;
    return S;
  }

  /// Add called function \p F with samples \p S.
  /// Optionally scale sample count \p S by \p Weight.
  ///
  /// Sample counts accumulate using saturating arithmetic, to avoid wrapping
  /// around unsigned integers.
  sampleprof_error addCalledTarget(StringRef F, uint64_t S,
                                   uint64_t Weight = 1) {
    uint64_t &TargetSamples = CallTargets[F];
    bool Overflowed;
    TargetSamples =
        SaturatingMultiplyAdd(S, Weight, TargetSamples, &Overflowed);
    return Overflowed ? sampleprof_error::counter_overflow
                      : sampleprof_error::success;
  }

  /// Remove called function from the call target map. Return the target sample
  /// count of the called function.
  uint64_t removeCalledTarget(StringRef F) {
    uint64_t Count = 0;
    auto I = CallTargets.find(F);
    if (I != CallTargets.end()) {
      Count = I->second;
      CallTargets.erase(I);
    }
    return Count;
  }

  /// Return true if this sample record contains function calls.
  bool hasCalls() const { return !CallTargets.empty(); }

  uint64_t getSamples() const { return NumSamples; }
  const CallTargetMap &getCallTargets() const { return CallTargets; }
  const SortedCallTargetSet getSortedCallTargets() const {
    return SortCallTargets(CallTargets);
  }

  uint64_t getCallTargetSum() const {
    uint64_t Sum = 0;
    for (const auto &I : CallTargets)
      Sum += I.second;
    return Sum;
  }

  /// Sort call targets in descending order of call frequency.
  static const SortedCallTargetSet SortCallTargets(const CallTargetMap &Targets) {
    SortedCallTargetSet SortedTargets;
    for (const auto &[Target, Frequency] : Targets) {
      SortedTargets.emplace(Target, Frequency);
    }
    return SortedTargets;
  }

  /// Prorate call targets by a distribution factor.
  static const CallTargetMap adjustCallTargets(const CallTargetMap &Targets,
                                               float DistributionFactor) {
    CallTargetMap AdjustedTargets;
    for (const auto &[Target, Frequency] : Targets) {
      AdjustedTargets[Target] = Frequency * DistributionFactor;
    }
    return AdjustedTargets;
  }

  /// Merge the samples in \p Other into this record.
  /// Optionally scale sample counts by \p Weight.
  sampleprof_error merge(const SampleRecord &Other, uint64_t Weight = 1);
  void print(raw_ostream &OS, unsigned Indent) const;
  void dump() const;

  bool operator==(const SampleRecord &Other) const {
    return NumSamples == Other.NumSamples && CallTargets == Other.CallTargets;
  }

  bool operator!=(const SampleRecord &Other) const {
    return !(*this == Other);
  }

private:
  uint64_t NumSamples = 0;
  CallTargetMap CallTargets;
};

raw_ostream &operator<<(raw_ostream &OS, const SampleRecord &Sample);

// State of context associated with FunctionSamples
enum ContextStateMask {
  UnknownContext = 0x0,   // Profile without context
  RawContext = 0x1,       // Full context profile from input profile
  SyntheticContext = 0x2, // Synthetic context created for context promotion
  InlinedContext = 0x4,   // Profile for context that is inlined into caller
  MergedContext = 0x8     // Profile for context merged into base profile
};

// Attribute of context associated with FunctionSamples
enum ContextAttributeMask {
  ContextNone = 0x0,
  ContextWasInlined = 0x1,      // Leaf of context was inlined in previous build
  ContextShouldBeInlined = 0x2, // Leaf of context should be inlined
  ContextDuplicatedIntoBase =
      0x4, // Leaf of context is duplicated into the base profile
};

// Represents a context frame with function name and line location
struct SampleContextFrame {
  StringRef FuncName;
  LineLocation Location;

  SampleContextFrame() : Location(0, 0) {}

  SampleContextFrame(StringRef FuncName, LineLocation Location)
      : FuncName(FuncName), Location(Location) {}

  bool operator==(const SampleContextFrame &That) const {
    return Location == That.Location && FuncName == That.FuncName;
  }

  bool operator!=(const SampleContextFrame &That) const {
    return !(*this == That);
  }

  std::string toString(bool OutputLineLocation) const {
    std::ostringstream OContextStr;
    OContextStr << FuncName.str();
    if (OutputLineLocation) {
      OContextStr << ":" << Location.LineOffset;
      if (Location.Discriminator)
        OContextStr << "." << Location.Discriminator;
    }
    return OContextStr.str();
  }
};

static inline hash_code hash_value(const SampleContextFrame &arg) {
  return hash_combine(arg.FuncName, arg.Location.LineOffset,
                      arg.Location.Discriminator);
}

using SampleContextFrameVector = SmallVector<SampleContextFrame, 1>;
using SampleContextFrames = ArrayRef<SampleContextFrame>;

struct SampleContextFrameHash {
  uint64_t operator()(const SampleContextFrameVector &S) const {
    return hash_combine_range(S.begin(), S.end());
  }
};

// Sample context for FunctionSamples. It consists of the calling context,
// the function name and context state. Internally sample context is represented
// using ArrayRef, which is also the input for constructing a `SampleContext`.
// It can accept and represent both full context string as well as context-less
// function name.
// For a CS profile, a full context vector can look like:
//    `main:3 _Z5funcAi:1 _Z8funcLeafi`
// For a base CS profile without calling context, the context vector should only
// contain the leaf frame name.
// For a non-CS profile, the context vector should be empty.
class SampleContext {
public:
  SampleContext() : State(UnknownContext), Attributes(ContextNone) {}

  SampleContext(StringRef Name)
      : Name(Name), State(UnknownContext), Attributes(ContextNone) {}

  SampleContext(SampleContextFrames Context,
                ContextStateMask CState = RawContext)
      : Attributes(ContextNone) {
    assert(!Context.empty() && "Context is empty");
    setContext(Context, CState);
  }

  // Give a context string, decode and populate internal states like
  // Function name, Calling context and context state. Example of input
  // `ContextStr`: `[main:3 @ _Z5funcAi:1 @ _Z8funcLeafi]`
  SampleContext(StringRef ContextStr,
                std::list<SampleContextFrameVector> &CSNameTable,
                ContextStateMask CState = RawContext)
      : Attributes(ContextNone) {
    assert(!ContextStr.empty());
    // Note that `[]` wrapped input indicates a full context string, otherwise
    // it's treated as context-less function name only.
    bool HasContext = ContextStr.startswith("[");
    if (!HasContext) {
      State = UnknownContext;
      Name = ContextStr;
    } else {
      CSNameTable.emplace_back();
      SampleContextFrameVector &Context = CSNameTable.back();
      createCtxVectorFromStr(ContextStr, Context);
      setContext(Context, CState);
    }
  }

  /// Create a context vector from a given context string and save it in
  /// `Context`.
  static void createCtxVectorFromStr(StringRef ContextStr,
                                     SampleContextFrameVector &Context) {
    // Remove encapsulating '[' and ']' if any
    ContextStr = ContextStr.substr(1, ContextStr.size() - 2);
    StringRef ContextRemain = ContextStr;
    StringRef ChildContext;
    StringRef CalleeName;
    while (!ContextRemain.empty()) {
      auto ContextSplit = ContextRemain.split(" @ ");
      ChildContext = ContextSplit.first;
      ContextRemain = ContextSplit.second;
      LineLocation CallSiteLoc(0, 0);
      decodeContextString(ChildContext, CalleeName, CallSiteLoc);
      Context.emplace_back(CalleeName, CallSiteLoc);
    }
  }

  // Decode context string for a frame to get function name and location.
  // `ContextStr` is in the form of `FuncName:StartLine.Discriminator`.
  static void decodeContextString(StringRef ContextStr, StringRef &FName,
                                  LineLocation &LineLoc) {
    // Get function name
    auto EntrySplit = ContextStr.split(':');
    FName = EntrySplit.first;

    LineLoc = {0, 0};
    if (!EntrySplit.second.empty()) {
      // Get line offset, use signed int for getAsInteger so string will
      // be parsed as signed.
      int LineOffset = 0;
      auto LocSplit = EntrySplit.second.split('.');
      LocSplit.first.getAsInteger(10, LineOffset);
      LineLoc.LineOffset = LineOffset;

      // Get discriminator
      if (!LocSplit.second.empty())
        LocSplit.second.getAsInteger(10, LineLoc.Discriminator);
    }
  }

  operator SampleContextFrames() const { return FullContext; }
  bool hasAttribute(ContextAttributeMask A) { return Attributes & (uint32_t)A; }
  void setAttribute(ContextAttributeMask A) { Attributes |= (uint32_t)A; }
  uint32_t getAllAttributes() { return Attributes; }
  void setAllAttributes(uint32_t A) { Attributes = A; }
  bool hasState(ContextStateMask S) { return State & (uint32_t)S; }
  void setState(ContextStateMask S) { State |= (uint32_t)S; }
  void clearState(ContextStateMask S) { State &= (uint32_t)~S; }
  bool hasContext() const { return State != UnknownContext; }
  bool isBaseContext() const { return FullContext.size() == 1; }
  StringRef getName() const { return Name; }
  SampleContextFrames getContextFrames() const { return FullContext; }

  static std::string getContextString(SampleContextFrames Context,
                                      bool IncludeLeafLineLocation = false) {
    std::ostringstream OContextStr;
    for (uint32_t I = 0; I < Context.size(); I++) {
      if (OContextStr.str().size()) {
        OContextStr << " @ ";
      }
      OContextStr << Context[I].toString(I != Context.size() - 1 ||
                                         IncludeLeafLineLocation);
    }
    return OContextStr.str();
  }

  std::string toString() const {
    if (!hasContext())
      return Name.str();
    return getContextString(FullContext, false);
  }

  uint64_t getHashCode() const {
    return hasContext() ? hash_value(getContextFrames())
                        : hash_value(getName());
  }

  /// Set the name of the function and clear the current context.
  void setName(StringRef FunctionName) {
    Name = FunctionName;
    FullContext = SampleContextFrames();
    State = UnknownContext;
  }

  void setContext(SampleContextFrames Context,
                  ContextStateMask CState = RawContext) {
    assert(CState != UnknownContext);
    FullContext = Context;
    Name = Context.back().FuncName;
    State = CState;
  }

  bool operator==(const SampleContext &That) const {
    return State == That.State && Name == That.Name &&
           FullContext == That.FullContext;
  }

  bool operator!=(const SampleContext &That) const { return !(*this == That); }

  bool operator<(const SampleContext &That) const {
    if (State != That.State)
      return State < That.State;

    if (!hasContext()) {
      return Name < That.Name;
    }

    uint64_t I = 0;
    while (I < std::min(FullContext.size(), That.FullContext.size())) {
      auto &Context1 = FullContext[I];
      auto &Context2 = That.FullContext[I];
      auto V = Context1.FuncName.compare(Context2.FuncName);
      if (V)
        return V < 0;
      if (Context1.Location != Context2.Location)
        return Context1.Location < Context2.Location;
      I++;
    }

    return FullContext.size() < That.FullContext.size();
  }

  struct Hash {
    uint64_t operator()(const SampleContext &Context) const {
      return Context.getHashCode();
    }
  };

  bool IsPrefixOf(const SampleContext &That) const {
    auto ThisContext = FullContext;
    auto ThatContext = That.FullContext;
    if (ThatContext.size() < ThisContext.size())
      return false;
    ThatContext = ThatContext.take_front(ThisContext.size());
    // Compare Leaf frame first
    if (ThisContext.back().FuncName != ThatContext.back().FuncName)
      return false;
    // Compare leading context
    return ThisContext.drop_back() == ThatContext.drop_back();
  }

private:
  /// Mangled name of the function.
  StringRef Name;
  // Full context including calling context and leaf function name
  SampleContextFrames FullContext;
  // State of the associated sample profile
  uint32_t State;
  // Attribute of the associated sample profile
  uint32_t Attributes;
};

static inline hash_code hash_value(const SampleContext &arg) {
  return arg.hasContext() ? hash_value(arg.getContextFrames())
                          : hash_value(arg.getName());
}

class FunctionSamples;
class SampleProfileReaderItaniumRemapper;

using BodySampleMap = std::map<LineLocation, SampleRecord>;
// NOTE: Using a StringMap here makes parsed profiles consume around 17% more
// memory, which is *very* significant for large profiles.
using FunctionSamplesMap = std::map<std::string, FunctionSamples, std::less<>>;
using CallsiteSampleMap = std::map<LineLocation, FunctionSamplesMap>;
using LocToLocMap =
    std::unordered_map<LineLocation, LineLocation, LineLocationHash>;

/// Representation of the samples collected for a function.
///
/// This data structure contains all the collected samples for the body
/// of a function. Each sample corresponds to a LineLocation instance
/// within the body of the function.
class FunctionSamples {
public:
  FunctionSamples() = default;

  void print(raw_ostream &OS = dbgs(), unsigned Indent = 0) const;
  void dump() const;

  sampleprof_error addTotalSamples(uint64_t Num, uint64_t Weight = 1) {
    bool Overflowed;
    TotalSamples =
        SaturatingMultiplyAdd(Num, Weight, TotalSamples, &Overflowed);
    return Overflowed ? sampleprof_error::counter_overflow
                      : sampleprof_error::success;
  }

  void removeTotalSamples(uint64_t Num) {
    if (TotalSamples < Num)
      TotalSamples = 0;
    else
      TotalSamples -= Num;
  }

  void setTotalSamples(uint64_t Num) { TotalSamples = Num; }

  void setHeadSamples(uint64_t Num) { TotalHeadSamples = Num; }

  sampleprof_error addHeadSamples(uint64_t Num, uint64_t Weight = 1) {
    bool Overflowed;
    TotalHeadSamples =
        SaturatingMultiplyAdd(Num, Weight, TotalHeadSamples, &Overflowed);
    return Overflowed ? sampleprof_error::counter_overflow
                      : sampleprof_error::success;
  }

  sampleprof_error addBodySamples(uint32_t LineOffset, uint32_t Discriminator,
                                  uint64_t Num, uint64_t Weight = 1) {
    return BodySamples[LineLocation(LineOffset, Discriminator)].addSamples(
        Num, Weight);
  }

  sampleprof_error addCalledTargetSamples(uint32_t LineOffset,
                                          uint32_t Discriminator,
                                          StringRef FName, uint64_t Num,
                                          uint64_t Weight = 1) {
    return BodySamples[LineLocation(LineOffset, Discriminator)].addCalledTarget(
        FName, Num, Weight);
  }

  sampleprof_error addSampleRecord(LineLocation Location,
                                   const SampleRecord &SampleRecord, uint64_t Weight = 1) {
    return BodySamples[Location].merge(SampleRecord, Weight);
  }

  // Remove a call target and decrease the body sample correspondingly. Return
  // the number of body samples actually decreased.
  uint64_t removeCalledTargetAndBodySample(uint32_t LineOffset,
                                           uint32_t Discriminator,
                                           StringRef FName) {
    uint64_t Count = 0;
    auto I = BodySamples.find(LineLocation(LineOffset, Discriminator));
    if (I != BodySamples.end()) {
      Count = I->second.removeCalledTarget(FName);
      Count = I->second.removeSamples(Count);
      if (!I->second.getSamples())
        BodySamples.erase(I);
    }
    return Count;
  }

  // Remove all call site samples for inlinees. This is needed when flattening
  // a nested profile.
  void removeAllCallsiteSamples() {
    CallsiteSamples.clear();
  }

  // Accumulate all call target samples to update the body samples.
  void updateCallsiteSamples() {
    for (auto &I : BodySamples) {
      uint64_t TargetSamples = I.second.getCallTargetSum();
      // It's possible that the body sample count can be greater than the call
      // target sum. E.g, if some call targets are external targets, they won't
      // be considered valid call targets, but the body sample count which is
      // from lbr ranges can actually include them.
      if (TargetSamples > I.second.getSamples())
        I.second.addSamples(TargetSamples - I.second.getSamples());
    }
  }

  // Accumulate all body samples to set total samples.
  void updateTotalSamples() {
    setTotalSamples(0);
    for (const auto &I : BodySamples)
      addTotalSamples(I.second.getSamples());

    for (auto &I : CallsiteSamples) {
      for (auto &CS : I.second) {
        CS.second.updateTotalSamples();
        addTotalSamples(CS.second.getTotalSamples());
      }
    }
  }

  // Set current context and all callee contexts to be synthetic.
  void SetContextSynthetic() {
    Context.setState(SyntheticContext);
    for (auto &I : CallsiteSamples) {
      for (auto &CS : I.second) {
        CS.second.SetContextSynthetic();
      }
    }
  }

  // Query the stale profile matching results and remap the location.
  const LineLocation &mapIRLocToProfileLoc(const LineLocation &IRLoc) const {
    // There is no remapping if the profile is not stale or the matching gives
    // the same location.
    if (!IRToProfileLocationMap)
      return IRLoc;
    const auto &ProfileLoc = IRToProfileLocationMap->find(IRLoc);
    if (ProfileLoc != IRToProfileLocationMap->end())
      return ProfileLoc->second;
    else
      return IRLoc;
  }

  /// Return the number of samples collected at the given location.
  /// Each location is specified by \p LineOffset and \p Discriminator.
  /// If the location is not found in profile, return error.
  ErrorOr<uint64_t> findSamplesAt(uint32_t LineOffset,
                                  uint32_t Discriminator) const {
    const auto &ret = BodySamples.find(
        mapIRLocToProfileLoc(LineLocation(LineOffset, Discriminator)));
    if (ret == BodySamples.end())
      return std::error_code();
    return ret->second.getSamples();
  }

  /// Returns the call target map collected at a given location.
  /// Each location is specified by \p LineOffset and \p Discriminator.
  /// If the location is not found in profile, return error.
  ErrorOr<SampleRecord::CallTargetMap>
  findCallTargetMapAt(uint32_t LineOffset, uint32_t Discriminator) const {
    const auto &ret = BodySamples.find(
        mapIRLocToProfileLoc(LineLocation(LineOffset, Discriminator)));
    if (ret == BodySamples.end())
      return std::error_code();
    return ret->second.getCallTargets();
  }

  /// Returns the call target map collected at a given location specified by \p
  /// CallSite. If the location is not found in profile, return error.
  ErrorOr<SampleRecord::CallTargetMap>
  findCallTargetMapAt(const LineLocation &CallSite) const {
    const auto &Ret = BodySamples.find(mapIRLocToProfileLoc(CallSite));
    if (Ret == BodySamples.end())
      return std::error_code();
    return Ret->second.getCallTargets();
  }

  /// Return the function samples at the given callsite location.
  FunctionSamplesMap &functionSamplesAt(const LineLocation &Loc) {
    return CallsiteSamples[mapIRLocToProfileLoc(Loc)];
  }

  /// Returns the FunctionSamplesMap at the given \p Loc.
  const FunctionSamplesMap *
  findFunctionSamplesMapAt(const LineLocation &Loc) const {
    auto iter = CallsiteSamples.find(mapIRLocToProfileLoc(Loc));
    if (iter == CallsiteSamples.end())
      return nullptr;
    return &iter->second;
  }

  /// Returns a pointer to FunctionSamples at the given callsite location
  /// \p Loc with callee \p CalleeName. If no callsite can be found, relax
  /// the restriction to return the FunctionSamples at callsite location
  /// \p Loc with the maximum total sample count. If \p Remapper is not
  /// nullptr, use \p Remapper to find FunctionSamples with equivalent name
  /// as \p CalleeName.
  const FunctionSamples *
  findFunctionSamplesAt(const LineLocation &Loc, StringRef CalleeName,
                        SampleProfileReaderItaniumRemapper *Remapper) const;

  bool empty() const { return TotalSamples == 0; }

  /// Return the total number of samples collected inside the function.
  uint64_t getTotalSamples() const { return TotalSamples; }

  /// For top-level functions, return the total number of branch samples that
  /// have the function as the branch target (or 0 otherwise). This is the raw
  /// data fetched from the profile. This should be equivalent to the sample of
  /// the first instruction of the symbol. But as we directly get this info for
  /// raw profile without referring to potentially inaccurate debug info, this
  /// gives more accurate profile data and is preferred for standalone symbols.
  uint64_t getHeadSamples() const { return TotalHeadSamples; }

  /// Return an estimate of the sample count of the function entry basic block.
  /// The function can be either a standalone symbol or an inlined function.
  /// For Context-Sensitive profiles, this will prefer returning the head
  /// samples (i.e. getHeadSamples()), if non-zero. Otherwise it estimates from
  /// the function body's samples or callsite samples.
  uint64_t getHeadSamplesEstimate() const {
    if (FunctionSamples::ProfileIsCS && getHeadSamples()) {
      // For CS profile, if we already have more accurate head samples
      // counted by branch sample from caller, use them as entry samples.
      return getHeadSamples();
    }
    uint64_t Count = 0;
    // Use either BodySamples or CallsiteSamples which ever has the smaller
    // lineno.
    if (!BodySamples.empty() &&
        (CallsiteSamples.empty() ||
         BodySamples.begin()->first < CallsiteSamples.begin()->first))
      Count = BodySamples.begin()->second.getSamples();
    else if (!CallsiteSamples.empty()) {
      // An indirect callsite may be promoted to several inlined direct calls.
      // We need to get the sum of them.
      for (const auto &N_FS : CallsiteSamples.begin()->second)
        Count += N_FS.second.getHeadSamplesEstimate();
    }
    // Return at least 1 if total sample is not 0.
    return Count ? Count : TotalSamples > 0;
  }

  /// Return all the samples collected in the body of the function.
  const BodySampleMap &getBodySamples() const { return BodySamples; }

  /// Return all the callsite samples collected in the body of the function.
  const CallsiteSampleMap &getCallsiteSamples() const {
    return CallsiteSamples;
  }

  /// Return the maximum of sample counts in a function body. When SkipCallSite
  /// is false, which is the default, the return count includes samples in the
  /// inlined functions. When SkipCallSite is true, the return count only
  /// considers the body samples.
  uint64_t getMaxCountInside(bool SkipCallSite = false) const {
    uint64_t MaxCount = 0;
    for (const auto &L : getBodySamples())
      MaxCount = std::max(MaxCount, L.second.getSamples());
    if (SkipCallSite)
      return MaxCount;
    for (const auto &C : getCallsiteSamples())
      for (const FunctionSamplesMap::value_type &F : C.second)
        MaxCount = std::max(MaxCount, F.second.getMaxCountInside());
    return MaxCount;
  }

  /// Merge the samples in \p Other into this one.
  /// Optionally scale samples by \p Weight.
  sampleprof_error merge(const FunctionSamples &Other, uint64_t Weight = 1) {
    sampleprof_error Result = sampleprof_error::success;
    if (!GUIDToFuncNameMap)
      GUIDToFuncNameMap = Other.GUIDToFuncNameMap;
    if (Context.getName().empty())
      Context = Other.getContext();
    if (FunctionHash == 0) {
      // Set the function hash code for the target profile.
      FunctionHash = Other.getFunctionHash();
    } else if (FunctionHash != Other.getFunctionHash()) {
      // The two profiles coming with different valid hash codes indicates
      // either:
      // 1. They are same-named static functions from different compilation
      // units (without using -unique-internal-linkage-names), or
      // 2. They are really the same function but from different compilations.
      // Let's bail out in either case for now, which means one profile is
      // dropped.
      return sampleprof_error::hash_mismatch;
    }

    MergeResult(Result, addTotalSamples(Other.getTotalSamples(), Weight));
    MergeResult(Result, addHeadSamples(Other.getHeadSamples(), Weight));
    for (const auto &I : Other.getBodySamples()) {
      const LineLocation &Loc = I.first;
      const SampleRecord &Rec = I.second;
      MergeResult(Result, BodySamples[Loc].merge(Rec, Weight));
    }
    for (const auto &I : Other.getCallsiteSamples()) {
      const LineLocation &Loc = I.first;
      FunctionSamplesMap &FSMap = functionSamplesAt(Loc);
      for (const auto &Rec : I.second)
        MergeResult(Result, FSMap[Rec.first].merge(Rec.second, Weight));
    }
    return Result;
  }

  /// Recursively traverses all children, if the total sample count of the
  /// corresponding function is no less than \p Threshold, add its corresponding
  /// GUID to \p S. Also traverse the BodySamples to add hot CallTarget's GUID
  /// to \p S.
  void findInlinedFunctions(DenseSet<GlobalValue::GUID> &S,
                            const StringMap<Function *> &SymbolMap,
                            uint64_t Threshold) const {
    if (TotalSamples <= Threshold)
      return;
    auto isDeclaration = [](const Function *F) {
      return !F || F->isDeclaration();
    };
    if (isDeclaration(SymbolMap.lookup(getFuncName()))) {
      // Add to the import list only when it's defined out of module.
      S.insert(getGUID(getName()));
    }
    // Import hot CallTargets, which may not be available in IR because full
    // profile annotation cannot be done until backend compilation in ThinLTO.
    for (const auto &BS : BodySamples)
      for (const auto &TS : BS.second.getCallTargets())
        if (TS.getValue() > Threshold) {
          const Function *Callee = SymbolMap.lookup(getFuncName(TS.getKey()));
          if (isDeclaration(Callee))
            S.insert(getGUID(TS.getKey()));
        }
    for (const auto &CS : CallsiteSamples)
      for (const auto &NameFS : CS.second)
        NameFS.second.findInlinedFunctions(S, SymbolMap, Threshold);
  }

  /// Set the name of the function.
  void setName(StringRef FunctionName) { Context.setName(FunctionName); }

  /// Return the function name.
  StringRef getName() const { return Context.getName(); }

  /// Return the original function name.
  StringRef getFuncName() const { return getFuncName(getName()); }

  void setFunctionHash(uint64_t Hash) { FunctionHash = Hash; }

  uint64_t getFunctionHash() const { return FunctionHash; }

  void setIRToProfileLocationMap(const LocToLocMap *LTLM) {
    assert(IRToProfileLocationMap == nullptr && "this should be set only once");
    IRToProfileLocationMap = LTLM;
  }

  /// Return the canonical name for a function, taking into account
  /// suffix elision policy attributes.
  static StringRef getCanonicalFnName(const Function &F) {
    auto AttrName = "sample-profile-suffix-elision-policy";
    auto Attr = F.getFnAttribute(AttrName).getValueAsString();
    return getCanonicalFnName(F.getName(), Attr);
  }

  /// Name suffixes which canonicalization should handle to avoid
  /// profile mismatch.
  static constexpr const char *LLVMSuffix = ".llvm.";
  static constexpr const char *PartSuffix = ".part.";
  static constexpr const char *UniqSuffix = ".__uniq.";

  static StringRef getCanonicalFnName(StringRef FnName,
                                      StringRef Attr = "selected") {
    // Note the sequence of the suffixes in the knownSuffixes array matters.
    // If suffix "A" is appended after the suffix "B", "A" should be in front
    // of "B" in knownSuffixes.
    const char *knownSuffixes[] = {LLVMSuffix, PartSuffix, UniqSuffix};
    if (Attr == "" || Attr == "all") {
      return FnName.split('.').first;
    } else if (Attr == "selected") {
      StringRef Cand(FnName);
      for (const auto &Suf : knownSuffixes) {
        StringRef Suffix(Suf);
        // If the profile contains ".__uniq." suffix, don't strip the
        // suffix for names in the IR.
        if (Suffix == UniqSuffix && FunctionSamples::HasUniqSuffix)
          continue;
        auto It = Cand.rfind(Suffix);
        if (It == StringRef::npos)
          continue;
        auto Dit = Cand.rfind('.');
        if (Dit == It + Suffix.size() - 1)
          Cand = Cand.substr(0, It);
      }
      return Cand;
    } else if (Attr == "none") {
      return FnName;
    } else {
      assert(false && "internal error: unknown suffix elision policy");
    }
    return FnName;
  }

  /// Translate \p Name into its original name.
  /// When profile doesn't use MD5, \p Name needs no translation.
  /// When profile uses MD5, \p Name in current FunctionSamples
  /// is actually GUID of the original function name. getFuncName will
  /// translate \p Name in current FunctionSamples into its original name
  /// by looking up in the function map GUIDToFuncNameMap.
  /// If the original name doesn't exist in the map, return empty StringRef.
  StringRef getFuncName(StringRef Name) const {
    if (!UseMD5)
      return Name;

    assert(GUIDToFuncNameMap && "GUIDToFuncNameMap needs to be populated first");
    return GUIDToFuncNameMap->lookup(std::stoull(Name.data()));
  }

  /// Returns the line offset to the start line of the subprogram.
  /// We assume that a single function will not exceed 65535 LOC.
  static unsigned getOffset(const DILocation *DIL);

  /// Returns a unique call site identifier for a given debug location of a call
  /// instruction. This is wrapper of two scenarios, the probe-based profile and
  /// regular profile, to hide implementation details from the sample loader and
  /// the context tracker.
  static LineLocation getCallSiteIdentifier(const DILocation *DIL,
                                            bool ProfileIsFS = false);

  /// Returns a unique hash code for a combination of a callsite location and
  /// the callee function name.
  static uint64_t getCallSiteHash(StringRef CalleeName,
                                  const LineLocation &Callsite);

  /// Get the FunctionSamples of the inline instance where DIL originates
  /// from.
  ///
  /// The FunctionSamples of the instruction (Machine or IR) associated to
  /// \p DIL is the inlined instance in which that instruction is coming from.
  /// We traverse the inline stack of that instruction, and match it with the
  /// tree nodes in the profile.
  ///
  /// \returns the FunctionSamples pointer to the inlined instance.
  /// If \p Remapper is not nullptr, it will be used to find matching
  /// FunctionSamples with not exactly the same but equivalent name.
  const FunctionSamples *findFunctionSamples(
      const DILocation *DIL,
      SampleProfileReaderItaniumRemapper *Remapper = nullptr) const;

  static bool ProfileIsProbeBased;

  static bool ProfileIsCS;

  static bool ProfileIsPreInlined;

  SampleContext &getContext() const { return Context; }

  void setContext(const SampleContext &FContext) { Context = FContext; }

  /// Whether the profile uses MD5 to represent string.
  static bool UseMD5;

  /// Whether the profile contains any ".__uniq." suffix in a name.
  static bool HasUniqSuffix;

  /// If this profile uses flow sensitive discriminators.
  static bool ProfileIsFS;

  /// GUIDToFuncNameMap saves the mapping from GUID to the symbol name, for
  /// all the function symbols defined or declared in current module.
  DenseMap<uint64_t, StringRef> *GUIDToFuncNameMap = nullptr;

  // Assume the input \p Name is a name coming from FunctionSamples itself.
  // If UseMD5 is true, the name is already a GUID and we
  // don't want to return the GUID of GUID.
  static uint64_t getGUID(StringRef Name) {
    return UseMD5 ? std::stoull(Name.data()) : Function::getGUID(Name);
  }

  // Find all the names in the current FunctionSamples including names in
  // all the inline instances and names of call targets.
  void findAllNames(DenseSet<StringRef> &NameSet) const;

  bool operator==(const FunctionSamples &Other) const {
    return (GUIDToFuncNameMap == Other.GUIDToFuncNameMap ||
            (GUIDToFuncNameMap && Other.GUIDToFuncNameMap &&
             *GUIDToFuncNameMap == *Other.GUIDToFuncNameMap)) &&
           FunctionHash == Other.FunctionHash && Context == Other.Context &&
           TotalSamples == Other.TotalSamples &&
           TotalHeadSamples == Other.TotalHeadSamples &&
           BodySamples == Other.BodySamples &&
           CallsiteSamples == Other.CallsiteSamples;
  }

  bool operator!=(const FunctionSamples &Other) const {
    return !(*this == Other);
  }

private:
  /// CFG hash value for the function.
  uint64_t FunctionHash = 0;

  /// Calling context for function profile
  mutable SampleContext Context;

  /// Total number of samples collected inside this function.
  ///
  /// Samples are cumulative, they include all the samples collected
  /// inside this function and all its inlined callees.
  uint64_t TotalSamples = 0;

  /// Total number of samples collected at the head of the function.
  /// This is an approximation of the number of calls made to this function
  /// at runtime.
  uint64_t TotalHeadSamples = 0;

  /// Map instruction locations to collected samples.
  ///
  /// Each entry in this map contains the number of samples
  /// collected at the corresponding line offset. All line locations
  /// are an offset from the start of the function.
  BodySampleMap BodySamples;

  /// Map call sites to collected samples for the called function.
  ///
  /// Each entry in this map corresponds to all the samples
  /// collected for the inlined function call at the given
  /// location. For example, given:
  ///
  ///     void foo() {
  ///  1    bar();
  ///  ...
  ///  8    baz();
  ///     }
  ///
  /// If the bar() and baz() calls were inlined inside foo(), this
  /// map will contain two entries.  One for all the samples collected
  /// in the call to bar() at line offset 1, the other for all the samples
  /// collected in the call to baz() at line offset 8.
  CallsiteSampleMap CallsiteSamples;

  /// IR to profile location map generated by stale profile matching.
  ///
  /// Each entry is a mapping from the location on current build to the matched
  /// location in the "stale" profile. For example:
  ///   Profiled source code:
  ///      void foo() {
  ///   1    bar();
  ///      }
  ///
  ///   Current source code:
  ///      void foo() {
  ///   1    // Code change
  ///   2    bar();
  ///      }
  /// Supposing the stale profile matching algorithm generated the mapping [2 ->
  /// 1], the profile query using the location of bar on the IR which is 2 will
  /// be remapped to 1 and find the location of bar in the profile.
  const LocToLocMap *IRToProfileLocationMap = nullptr;
};

raw_ostream &operator<<(raw_ostream &OS, const FunctionSamples &FS);

using SampleProfileMap =
    std::unordered_map<SampleContext, FunctionSamples, SampleContext::Hash>;

using NameFunctionSamples = std::pair<SampleContext, const FunctionSamples *>;

void sortFuncProfiles(const SampleProfileMap &ProfileMap,
                      std::vector<NameFunctionSamples> &SortedProfiles);

/// Sort a LocationT->SampleT map by LocationT.
///
/// It produces a sorted list of <LocationT, SampleT> records by ascending
/// order of LocationT.
template <class LocationT, class SampleT> class SampleSorter {
public:
  using SamplesWithLoc = std::pair<const LocationT, SampleT>;
  using SamplesWithLocList = SmallVector<const SamplesWithLoc *, 20>;

  SampleSorter(const std::map<LocationT, SampleT> &Samples) {
    for (const auto &I : Samples)
      V.push_back(&I);
    llvm::stable_sort(V, [](const SamplesWithLoc *A, const SamplesWithLoc *B) {
      return A->first < B->first;
    });
  }

  const SamplesWithLocList &get() const { return V; }

private:
  SamplesWithLocList V;
};

/// SampleContextTrimmer impelements helper functions to trim, merge cold
/// context profiles. It also supports context profile canonicalization to make
/// sure ProfileMap's key is consistent with FunctionSample's name/context.
class SampleContextTrimmer {
public:
  SampleContextTrimmer(SampleProfileMap &Profiles) : ProfileMap(Profiles){};
  // Trim and merge cold context profile when requested. TrimBaseProfileOnly
  // should only be effective when TrimColdContext is true. On top of
  // TrimColdContext, TrimBaseProfileOnly can be used to specify to trim all
  // cold profiles or only cold base profiles. Trimming base profiles only is
  // mainly to honor the preinliner decsion. Note that when MergeColdContext is
  // true, preinliner decsion is not honored anyway so TrimBaseProfileOnly will
  // be ignored.
  void trimAndMergeColdContextProfiles(uint64_t ColdCountThreshold,
                                       bool TrimColdContext,
                                       bool MergeColdContext,
                                       uint32_t ColdContextFrameLength,
                                       bool TrimBaseProfileOnly);
  // Canonicalize context profile name and attributes.
  void canonicalizeContextProfiles();

private:
  SampleProfileMap &ProfileMap;
};

/// Helper class for profile conversion.
///
/// It supports full context-sensitive profile to nested profile conversion,
/// nested profile to flatten profile conversion, etc.
class ProfileConverter {
public:
  ProfileConverter(SampleProfileMap &Profiles);
  // Convert a full context-sensitive flat sample profile into a nested sample
  // profile.
  void convertCSProfiles();
  struct FrameNode {
    FrameNode(StringRef FName = StringRef(),
              FunctionSamples *FSamples = nullptr,
              LineLocation CallLoc = {0, 0})
        : FuncName(FName), FuncSamples(FSamples), CallSiteLoc(CallLoc){};

    // Map line+discriminator location to child frame
    std::map<uint64_t, FrameNode> AllChildFrames;
    // Function name for current frame
    StringRef FuncName;
    // Function Samples for current frame
    FunctionSamples *FuncSamples;
    // Callsite location in parent context
    LineLocation CallSiteLoc;

    FrameNode *getOrCreateChildFrame(const LineLocation &CallSite,
                                     StringRef CalleeName);
  };

  static void flattenProfile(SampleProfileMap &ProfileMap,
                             bool ProfileIsCS = false) {
    SampleProfileMap TmpProfiles;
    flattenProfile(ProfileMap, TmpProfiles, ProfileIsCS);
    ProfileMap = std::move(TmpProfiles);
  }

  static void flattenProfile(const SampleProfileMap &InputProfiles,
                             SampleProfileMap &OutputProfiles,
                             bool ProfileIsCS = false) {
    if (ProfileIsCS) {
      for (const auto &I : InputProfiles)
        OutputProfiles[I.second.getName()].merge(I.second);
      // Retain the profile name and clear the full context for each function
      // profile.
      for (auto &I : OutputProfiles)
        I.second.setContext(SampleContext(I.first));
    } else {
      for (const auto &I : InputProfiles)
        flattenNestedProfile(OutputProfiles, I.second);
    }
  }

private:
  static void flattenNestedProfile(SampleProfileMap &OutputProfiles,
                                   const FunctionSamples &FS) {
    // To retain the context, checksum, attributes of the original profile, make
    // a copy of it if no profile is found.
    SampleContext &Context = FS.getContext();
    auto Ret = OutputProfiles.try_emplace(Context, FS);
    FunctionSamples &Profile = Ret.first->second;
    if (Ret.second) {
      // Clear nested inlinees' samples for the flattened copy. These inlinees
      // will have their own top-level entries after flattening.
      Profile.removeAllCallsiteSamples();
      // We recompute TotalSamples later, so here set to zero.
      Profile.setTotalSamples(0);
    } else {
      for (const auto &[LineLocation, SampleRecord] : FS.getBodySamples()) {
        Profile.addSampleRecord(LineLocation, SampleRecord);
      }
    }

    assert(Profile.getCallsiteSamples().empty() &&
           "There should be no inlinees' profiles after flattening.");

    // TotalSamples might not be equal to the sum of all samples from
    // BodySamples and CallsiteSamples. So here we use "TotalSamples =
    // Original_TotalSamples - All_of_Callsite_TotalSamples +
    // All_of_Callsite_HeadSamples" to compute the new TotalSamples.
    uint64_t TotalSamples = FS.getTotalSamples();

    for (const auto &I : FS.getCallsiteSamples()) {
      for (const auto &Callee : I.second) {
        const auto &CalleeProfile = Callee.second;
        // Add body sample.
        Profile.addBodySamples(I.first.LineOffset, I.first.Discriminator,
                               CalleeProfile.getHeadSamplesEstimate());
        // Add callsite sample.
        Profile.addCalledTargetSamples(
            I.first.LineOffset, I.first.Discriminator, CalleeProfile.getName(),
            CalleeProfile.getHeadSamplesEstimate());
        // Update total samples.
        TotalSamples = TotalSamples >= CalleeProfile.getTotalSamples()
                           ? TotalSamples - CalleeProfile.getTotalSamples()
                           : 0;
        TotalSamples += CalleeProfile.getHeadSamplesEstimate();
        // Recursively convert callee profile.
        flattenNestedProfile(OutputProfiles, CalleeProfile);
      }
    }
    Profile.addTotalSamples(TotalSamples);

    Profile.setHeadSamples(Profile.getHeadSamplesEstimate());
  }

  // Nest all children profiles into the profile of Node.
  void convertCSProfiles(FrameNode &Node);
  FrameNode *getOrCreateContextPath(const SampleContext &Context);

  SampleProfileMap &ProfileMap;
  FrameNode RootFrame;
};

/// ProfileSymbolList records the list of function symbols shown up
/// in the binary used to generate the profile. It is useful to
/// to discriminate a function being so cold as not to shown up
/// in the profile and a function newly added.
class ProfileSymbolList {
public:
  /// copy indicates whether we need to copy the underlying memory
  /// for the input Name.
  void add(StringRef Name, bool copy = false) {
    if (!copy) {
      Syms.insert(Name);
      return;
    }
    Syms.insert(Name.copy(Allocator));
  }

  bool contains(StringRef Name) { return Syms.count(Name); }

  void merge(const ProfileSymbolList &List) {
    for (auto Sym : List.Syms)
      add(Sym, true);
  }

  unsigned size() { return Syms.size(); }

  void setToCompress(bool TC) { ToCompress = TC; }
  bool toCompress() { return ToCompress; }

  std::error_code read(const uint8_t *Data, uint64_t ListSize);
  std::error_code write(raw_ostream &OS);
  void dump(raw_ostream &OS = dbgs()) const;

private:
  // Determine whether or not to compress the symbol list when
  // writing it into profile. The variable is unused when the symbol
  // list is read from an existing profile.
  bool ToCompress = false;
  DenseSet<StringRef> Syms;
  BumpPtrAllocator Allocator;
};

} // end namespace sampleprof

using namespace sampleprof;
// Provide DenseMapInfo for SampleContext.
template <> struct DenseMapInfo<SampleContext> {
  static inline SampleContext getEmptyKey() { return SampleContext(); }

  static inline SampleContext getTombstoneKey() { return SampleContext("@"); }

  static unsigned getHashValue(const SampleContext &Val) {
    return Val.getHashCode();
  }

  static bool isEqual(const SampleContext &LHS, const SampleContext &RHS) {
    return LHS == RHS;
  }
};

// Prepend "__uniq" before the hash for tools like profilers to understand
// that this symbol is of internal linkage type.  The "__uniq" is the
// pre-determined prefix that is used to tell tools that this symbol was
// created with -funique-internal-linakge-symbols and the tools can strip or
// keep the prefix as needed.
inline std::string getUniqueInternalLinkagePostfix(const StringRef &FName) {
  llvm::MD5 Md5;
  Md5.update(FName);
  llvm::MD5::MD5Result R;
  Md5.final(R);
  SmallString<32> Str;
  llvm::MD5::stringifyResult(R, Str);
  // Convert MD5hash to Decimal. Demangler suffixes can either contain
  // numbers or characters but not both.
  llvm::APInt IntHash(128, Str.str(), 16);
  return toString(IntHash, /* Radix = */ 10, /* Signed = */ false)
      .insert(0, FunctionSamples::UniqSuffix);
}

} // end namespace llvm

#endif // LLVM_PROFILEDATA_SAMPLEPROF_H
PKhwFZl�?~��ProfileData/InstrProf.hnu�[���//===- InstrProf.h - Instrumented profiling format support ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Instrumentation-based profiling data is generated by instrumented
// binaries through library functions in compiler-rt, and read by the clang
// frontend to feed PGO.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_PROFILEDATA_INSTRPROF_H
#define LLVM_PROFILEDATA_INSTRPROF_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/BitmaskEnum.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/ProfileSummary.h"
#include "llvm/ProfileData/InstrProfData.inc"
#include "llvm/Support/BalancedPartitioning.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MD5.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/TargetParser/Host.h"
#include "llvm/TargetParser/Triple.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <list>
#include <memory>
#include <string>
#include <system_error>
#include <utility>
#include <vector>

namespace llvm {

class Function;
class GlobalVariable;
struct InstrProfRecord;
class InstrProfSymtab;
class Instruction;
class MDNode;
class Module;

enum InstrProfSectKind {
#define INSTR_PROF_SECT_ENTRY(Kind, SectNameCommon, SectNameCoff, Prefix) Kind,
#include "llvm/ProfileData/InstrProfData.inc"
};

/// Return the max count value. We reserver a few large values for special use.
inline uint64_t getInstrMaxCountValue() {
  return std::numeric_limits<uint64_t>::max() - 2;
}

/// Return the name of the profile section corresponding to \p IPSK.
///
/// The name of the section depends on the object format type \p OF. If
/// \p AddSegmentInfo is true, a segment prefix and additional linker hints may
/// be added to the section name (this is the default).
std::string getInstrProfSectionName(InstrProfSectKind IPSK,
                                    Triple::ObjectFormatType OF,
                                    bool AddSegmentInfo = true);

/// Return the name profile runtime entry point to do value profiling
/// for a given site.
inline StringRef getInstrProfValueProfFuncName() {
  return INSTR_PROF_VALUE_PROF_FUNC_STR;
}

/// Return the name profile runtime entry point to do memop size value
/// profiling.
inline StringRef getInstrProfValueProfMemOpFuncName() {
  return INSTR_PROF_VALUE_PROF_MEMOP_FUNC_STR;
}

/// Return the name prefix of variables containing instrumented function names.
inline StringRef getInstrProfNameVarPrefix() { return "__profn_"; }

/// Return the name prefix of variables containing per-function control data.
inline StringRef getInstrProfDataVarPrefix() { return "__profd_"; }

/// Return the name prefix of profile counter variables.
inline StringRef getInstrProfCountersVarPrefix() { return "__profc_"; }

/// Return the name prefix of value profile variables.
inline StringRef getInstrProfValuesVarPrefix() { return "__profvp_"; }

/// Return the name of value profile node array variables:
inline StringRef getInstrProfVNodesVarName() { return "__llvm_prf_vnodes"; }

/// Return the name of the variable holding the strings (possibly compressed)
/// of all function's PGO names.
inline StringRef getInstrProfNamesVarName() {
  return "__llvm_prf_nm";
}

/// Return the name of a covarage mapping variable (internal linkage)
/// for each instrumented source module. Such variables are allocated
/// in the __llvm_covmap section.
inline StringRef getCoverageMappingVarName() {
  return "__llvm_coverage_mapping";
}

/// Return the name of the internal variable recording the array
/// of PGO name vars referenced by the coverage mapping. The owning
/// functions of those names are not emitted by FE (e.g, unused inline
/// functions.)
inline StringRef getCoverageUnusedNamesVarName() {
  return "__llvm_coverage_names";
}

/// Return the name of function that registers all the per-function control
/// data at program startup time by calling __llvm_register_function. This
/// function has internal linkage and is called by  __llvm_profile_init
/// runtime method. This function is not generated for these platforms:
/// Darwin, Linux, and FreeBSD.
inline StringRef getInstrProfRegFuncsName() {
  return "__llvm_profile_register_functions";
}

/// Return the name of the runtime interface that registers per-function control
/// data for one instrumented function.
inline StringRef getInstrProfRegFuncName() {
  return "__llvm_profile_register_function";
}

/// Return the name of the runtime interface that registers the PGO name strings.
inline StringRef getInstrProfNamesRegFuncName() {
  return "__llvm_profile_register_names_function";
}

/// Return the name of the runtime initialization method that is generated by
/// the compiler. The function calls __llvm_profile_register_functions and
/// __llvm_profile_override_default_filename functions if needed. This function
/// has internal linkage and invoked at startup time via init_array.
inline StringRef getInstrProfInitFuncName() { return "__llvm_profile_init"; }

/// Return the name of the hook variable defined in profile runtime library.
/// A reference to the variable causes the linker to link in the runtime
/// initialization module (which defines the hook variable).
inline StringRef getInstrProfRuntimeHookVarName() {
  return INSTR_PROF_QUOTE(INSTR_PROF_PROFILE_RUNTIME_VAR);
}

/// Return the name of the compiler generated function that references the
/// runtime hook variable. The function is a weak global.
inline StringRef getInstrProfRuntimeHookVarUseFuncName() {
  return "__llvm_profile_runtime_user";
}

inline StringRef getInstrProfCounterBiasVarName() {
  return INSTR_PROF_QUOTE(INSTR_PROF_PROFILE_COUNTER_BIAS_VAR);
}

/// Return the marker used to separate PGO names during serialization.
inline StringRef getInstrProfNameSeparator() { return "\01"; }

/// Return the modified name for function \c F suitable to be
/// used the key for profile lookup. Variable \c InLTO indicates if this
/// is called in LTO optimization passes.
std::string getPGOFuncName(const Function &F, bool InLTO = false,
                           uint64_t Version = INSTR_PROF_INDEX_VERSION);

/// Return the modified name for a function suitable to be
/// used the key for profile lookup. The function's original
/// name is \c RawFuncName and has linkage of type \c Linkage.
/// The function is defined in module \c FileName.
std::string getPGOFuncName(StringRef RawFuncName,
                           GlobalValue::LinkageTypes Linkage,
                           StringRef FileName,
                           uint64_t Version = INSTR_PROF_INDEX_VERSION);

/// Return the name of the global variable used to store a function
/// name in PGO instrumentation. \c FuncName is the name of the function
/// returned by the \c getPGOFuncName call.
std::string getPGOFuncNameVarName(StringRef FuncName,
                                  GlobalValue::LinkageTypes Linkage);

/// Create and return the global variable for function name used in PGO
/// instrumentation. \c FuncName is the name of the function returned
/// by \c getPGOFuncName call.
GlobalVariable *createPGOFuncNameVar(Function &F, StringRef PGOFuncName);

/// Create and return the global variable for function name used in PGO
/// instrumentation.  /// \c FuncName is the name of the function
/// returned by \c getPGOFuncName call, \c M is the owning module,
/// and \c Linkage is the linkage of the instrumented function.
GlobalVariable *createPGOFuncNameVar(Module &M,
                                     GlobalValue::LinkageTypes Linkage,
                                     StringRef PGOFuncName);

/// Return the initializer in string of the PGO name var \c NameVar.
StringRef getPGOFuncNameVarInitializer(GlobalVariable *NameVar);

/// Given a PGO function name, remove the filename prefix and return
/// the original (static) function name.
StringRef getFuncNameWithoutPrefix(StringRef PGOFuncName,
                                   StringRef FileName = "<unknown>");

/// Given a vector of strings (function PGO names) \c NameStrs, the
/// method generates a combined string \c Result that is ready to be
/// serialized.  The \c Result string is comprised of three fields:
/// The first field is the length of the uncompressed strings, and the
/// the second field is the length of the zlib-compressed string.
/// Both fields are encoded in ULEB128.  If \c doCompress is false, the
///  third field is the uncompressed strings; otherwise it is the
/// compressed string. When the string compression is off, the
/// second field will have value zero.
Error collectPGOFuncNameStrings(ArrayRef<std::string> NameStrs,
                                bool doCompression, std::string &Result);

/// Produce \c Result string with the same format described above. The input
/// is vector of PGO function name variables that are referenced.
Error collectPGOFuncNameStrings(ArrayRef<GlobalVariable *> NameVars,
                                std::string &Result, bool doCompression = true);

/// \c NameStrings is a string composed of one of more sub-strings encoded in
/// the format described above. The substrings are separated by 0 or more zero
/// bytes. This method decodes the string and populates the \c Symtab.
Error readPGOFuncNameStrings(StringRef NameStrings, InstrProfSymtab &Symtab);

/// Check if INSTR_PROF_RAW_VERSION_VAR is defined. This global is only being
/// set in IR PGO compilation.
bool isIRPGOFlagSet(const Module *M);

/// Check if we can safely rename this Comdat function. Instances of the same
/// comdat function may have different control flows thus can not share the
/// same counter variable.
bool canRenameComdatFunc(const Function &F, bool CheckAddressTaken = false);

enum InstrProfValueKind : uint32_t {
#define VALUE_PROF_KIND(Enumerator, Value, Descr) Enumerator = Value,
#include "llvm/ProfileData/InstrProfData.inc"
};

/// Get the value profile data for value site \p SiteIdx from \p InstrProfR
/// and annotate the instruction \p Inst with the value profile meta data.
/// Annotate up to \p MaxMDCount (default 3) number of records per value site.
void annotateValueSite(Module &M, Instruction &Inst,
                       const InstrProfRecord &InstrProfR,
                       InstrProfValueKind ValueKind, uint32_t SiteIndx,
                       uint32_t MaxMDCount = 3);

/// Same as the above interface but using an ArrayRef, as well as \p Sum.
void annotateValueSite(Module &M, Instruction &Inst,
                       ArrayRef<InstrProfValueData> VDs, uint64_t Sum,
                       InstrProfValueKind ValueKind, uint32_t MaxMDCount);

/// Extract the value profile data from \p Inst which is annotated with
/// value profile meta data. Return false if there is no value data annotated,
/// otherwise  return true.
bool getValueProfDataFromInst(const Instruction &Inst,
                              InstrProfValueKind ValueKind,
                              uint32_t MaxNumValueData,
                              InstrProfValueData ValueData[],
                              uint32_t &ActualNumValueData, uint64_t &TotalC,
                              bool GetNoICPValue = false);

inline StringRef getPGOFuncNameMetadataName() { return "PGOFuncName"; }

/// Return the PGOFuncName meta data associated with a function.
MDNode *getPGOFuncNameMetadata(const Function &F);

/// Create the PGOFuncName meta data if PGOFuncName is different from
/// function's raw name. This should only apply to internal linkage functions
/// declared by users only.
void createPGOFuncNameMetadata(Function &F, StringRef PGOFuncName);

/// Check if we can use Comdat for profile variables. This will eliminate
/// the duplicated profile variables for Comdat functions.
bool needsComdatForCounter(const Function &F, const Module &M);

/// An enum describing the attributes of an instrumented profile.
enum class InstrProfKind {
  Unknown = 0x0,
  // A frontend clang profile, incompatible with other attrs.
  FrontendInstrumentation = 0x1,
  // An IR-level profile (default when -fprofile-generate is used).
  IRInstrumentation = 0x2,
  // A profile with entry basic block instrumentation.
  FunctionEntryInstrumentation = 0x4,
  // A context sensitive IR-level profile.
  ContextSensitive = 0x8,
  // Use single byte probes for coverage.
  SingleByteCoverage = 0x10,
  // Only instrument the function entry basic block.
  FunctionEntryOnly = 0x20,
  // A memory profile collected using -fprofile=memory.
  MemProf = 0x40,
  // A temporal profile.
  TemporalProfile = 0x80,
  LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/TemporalProfile)
};

const std::error_category &instrprof_category();

enum class instrprof_error {
  success = 0,
  eof,
  unrecognized_format,
  bad_magic,
  bad_header,
  unsupported_version,
  unsupported_hash_type,
  too_large,
  truncated,
  malformed,
  missing_debug_info_for_correlation,
  unexpected_debug_info_for_correlation,
  unable_to_correlate_profile,
  unknown_function,
  invalid_prof,
  hash_mismatch,
  count_mismatch,
  counter_overflow,
  value_site_count_mismatch,
  compress_failed,
  uncompress_failed,
  empty_raw_profile,
  zlib_unavailable,
  raw_profile_version_mismatch
};

/// An ordered list of functions identified by their NameRef found in
/// INSTR_PROF_DATA
struct TemporalProfTraceTy {
  std::vector<uint64_t> FunctionNameRefs;
  uint64_t Weight;
  TemporalProfTraceTy(std::initializer_list<uint64_t> Trace = {},
                      uint64_t Weight = 1)
      : FunctionNameRefs(Trace), Weight(Weight) {}

  /// Use a set of temporal profile traces to create a list of balanced
  /// partitioning function nodes used by BalancedPartitioning to generate a
  /// function order that reduces page faults during startup
  static std::vector<BPFunctionNode>
  createBPFunctionNodes(ArrayRef<TemporalProfTraceTy> Traces);
};

inline std::error_code make_error_code(instrprof_error E) {
  return std::error_code(static_cast<int>(E), instrprof_category());
}

class InstrProfError : public ErrorInfo<InstrProfError> {
public:
  InstrProfError(instrprof_error Err, const Twine &ErrStr = Twine())
      : Err(Err), Msg(ErrStr.str()) {
    assert(Err != instrprof_error::success && "Not an error");
  }

  std::string message() const override;

  void log(raw_ostream &OS) const override { OS << message(); }

  std::error_code convertToErrorCode() const override {
    return make_error_code(Err);
  }

  instrprof_error get() const { return Err; }
  const std::string &getMessage() const { return Msg; }

  /// Consume an Error and return the raw enum value contained within it, and
  /// the optional error message. The Error must either be a success value, or
  /// contain a single InstrProfError.
  static std::pair<instrprof_error, std::string> take(Error E) {
    auto Err = instrprof_error::success;
    std::string Msg = "";
    handleAllErrors(std::move(E), [&Err, &Msg](const InstrProfError &IPE) {
      assert(Err == instrprof_error::success && "Multiple errors encountered");
      Err = IPE.get();
      Msg = IPE.getMessage();
    });
    return {Err, Msg};
  }

  static char ID;

private:
  instrprof_error Err;
  std::string Msg;
};

namespace object {

class SectionRef;

} // end namespace object

namespace IndexedInstrProf {

uint64_t ComputeHash(StringRef K);

} // end namespace IndexedInstrProf

/// A symbol table used for function PGO name look-up with keys
/// (such as pointers, md5hash values) to the function. A function's
/// PGO name or name's md5hash are used in retrieving the profile
/// data of the function. See \c getPGOFuncName() method for details
/// on how PGO name is formed.
class InstrProfSymtab {
public:
  using AddrHashMap = std::vector<std::pair<uint64_t, uint64_t>>;

private:
  StringRef Data;
  uint64_t Address = 0;
  // Unique name strings.
  StringSet<> NameTab;
  // A map from MD5 keys to function name strings.
  std::vector<std::pair<uint64_t, StringRef>> MD5NameMap;
  // A map from MD5 keys to function define. We only populate this map
  // when build the Symtab from a Module.
  std::vector<std::pair<uint64_t, Function *>> MD5FuncMap;
  // A map from function runtime address to function name MD5 hash.
  // This map is only populated and used by raw instr profile reader.
  AddrHashMap AddrToMD5Map;
  bool Sorted = false;

  static StringRef getExternalSymbol() {
    return "** External Symbol **";
  }

  // If the symtab is created by a series of calls to \c addFuncName, \c
  // finalizeSymtab needs to be called before looking up function names.
  // This is required because the underlying map is a vector (for space
  // efficiency) which needs to be sorted.
  inline void finalizeSymtab();

public:
  InstrProfSymtab() = default;

  /// Create InstrProfSymtab from an object file section which
  /// contains function PGO names. When section may contain raw
  /// string data or string data in compressed form. This method
  /// only initialize the symtab with reference to the data and
  /// the section base address. The decompression will be delayed
  /// until before it is used. See also \c create(StringRef) method.
  Error create(object::SectionRef &Section);

  /// This interface is used by reader of CoverageMapping test
  /// format.
  inline Error create(StringRef D, uint64_t BaseAddr);

  /// \c NameStrings is a string composed of one of more sub-strings
  ///  encoded in the format described in \c collectPGOFuncNameStrings.
  /// This method is a wrapper to \c readPGOFuncNameStrings method.
  inline Error create(StringRef NameStrings);

  /// A wrapper interface to populate the PGO symtab with functions
  /// decls from module \c M. This interface is used by transformation
  /// passes such as indirect function call promotion. Variable \c InLTO
  /// indicates if this is called from LTO optimization passes.
  Error create(Module &M, bool InLTO = false);

  /// Create InstrProfSymtab from a set of names iteratable from
  /// \p IterRange. This interface is used by IndexedProfReader.
  template <typename NameIterRange> Error create(const NameIterRange &IterRange);

  /// Update the symtab by adding \p FuncName to the table. This interface
  /// is used by the raw and text profile readers.
  Error addFuncName(StringRef FuncName) {
    if (FuncName.empty())
      return make_error<InstrProfError>(instrprof_error::malformed,
                                        "function name is empty");
    auto Ins = NameTab.insert(FuncName);
    if (Ins.second) {
      MD5NameMap.push_back(std::make_pair(
          IndexedInstrProf::ComputeHash(FuncName), Ins.first->getKey()));
      Sorted = false;
    }
    return Error::success();
  }

  /// Map a function address to its name's MD5 hash. This interface
  /// is only used by the raw profiler reader.
  void mapAddress(uint64_t Addr, uint64_t MD5Val) {
    AddrToMD5Map.push_back(std::make_pair(Addr, MD5Val));
  }

  /// Return a function's hash, or 0, if the function isn't in this SymTab.
  uint64_t getFunctionHashFromAddress(uint64_t Address);

  /// Return function's PGO name from the function name's symbol
  /// address in the object file. If an error occurs, return
  /// an empty string.
  StringRef getFuncName(uint64_t FuncNameAddress, size_t NameSize);

  /// Return function's PGO name from the name's md5 hash value.
  /// If not found, return an empty string.
  inline StringRef getFuncName(uint64_t FuncMD5Hash);

  /// Just like getFuncName, except that it will return a non-empty StringRef
  /// if the function is external to this symbol table. All such cases
  /// will be represented using the same StringRef value.
  inline StringRef getFuncNameOrExternalSymbol(uint64_t FuncMD5Hash);

  /// True if Symbol is the value used to represent external symbols.
  static bool isExternalSymbol(const StringRef &Symbol) {
    return Symbol == InstrProfSymtab::getExternalSymbol();
  }

  /// Return function from the name's md5 hash. Return nullptr if not found.
  inline Function *getFunction(uint64_t FuncMD5Hash);

  /// Return the function's original assembly name by stripping off
  /// the prefix attached (to symbols with priviate linkage). For
  /// global functions, it returns the same string as getFuncName.
  inline StringRef getOrigFuncName(uint64_t FuncMD5Hash);

  /// Return the name section data.
  inline StringRef getNameData() const { return Data; }

  /// Dump the symbols in this table.
  void dumpNames(raw_ostream &OS) const;
};

Error InstrProfSymtab::create(StringRef D, uint64_t BaseAddr) {
  Data = D;
  Address = BaseAddr;
  return Error::success();
}

Error InstrProfSymtab::create(StringRef NameStrings) {
  return readPGOFuncNameStrings(NameStrings, *this);
}

template <typename NameIterRange>
Error InstrProfSymtab::create(const NameIterRange &IterRange) {
  for (auto Name : IterRange)
    if (Error E = addFuncName(Name))
      return E;

  finalizeSymtab();
  return Error::success();
}

void InstrProfSymtab::finalizeSymtab() {
  if (Sorted)
    return;
  llvm::sort(MD5NameMap, less_first());
  llvm::sort(MD5FuncMap, less_first());
  llvm::sort(AddrToMD5Map, less_first());
  AddrToMD5Map.erase(std::unique(AddrToMD5Map.begin(), AddrToMD5Map.end()),
                     AddrToMD5Map.end());
  Sorted = true;
}

StringRef InstrProfSymtab::getFuncNameOrExternalSymbol(uint64_t FuncMD5Hash) {
  StringRef ret = getFuncName(FuncMD5Hash);
  if (ret.empty())
    return InstrProfSymtab::getExternalSymbol();
  return ret;
}

StringRef InstrProfSymtab::getFuncName(uint64_t FuncMD5Hash) {
  finalizeSymtab();
  auto Result = llvm::lower_bound(MD5NameMap, FuncMD5Hash,
                                  [](const std::pair<uint64_t, StringRef> &LHS,
                                     uint64_t RHS) { return LHS.first < RHS; });
  if (Result != MD5NameMap.end() && Result->first == FuncMD5Hash)
    return Result->second;
  return StringRef();
}

Function* InstrProfSymtab::getFunction(uint64_t FuncMD5Hash) {
  finalizeSymtab();
  auto Result = llvm::lower_bound(MD5FuncMap, FuncMD5Hash,
                                  [](const std::pair<uint64_t, Function *> &LHS,
                                     uint64_t RHS) { return LHS.first < RHS; });
  if (Result != MD5FuncMap.end() && Result->first == FuncMD5Hash)
    return Result->second;
  return nullptr;
}

// See also getPGOFuncName implementation. These two need to be
// matched.
StringRef InstrProfSymtab::getOrigFuncName(uint64_t FuncMD5Hash) {
  StringRef PGOName = getFuncName(FuncMD5Hash);
  size_t S = PGOName.find_first_of(':');
  if (S == StringRef::npos)
    return PGOName;
  return PGOName.drop_front(S + 1);
}

// To store the sums of profile count values, or the percentage of
// the sums of the total count values.
struct CountSumOrPercent {
  uint64_t NumEntries;
  double CountSum;
  double ValueCounts[IPVK_Last - IPVK_First + 1];
  CountSumOrPercent() : NumEntries(0), CountSum(0.0f), ValueCounts() {}
  void reset() {
    NumEntries = 0;
    CountSum = 0.0f;
    for (double &VC : ValueCounts)
      VC = 0.0f;
  }
};

// Function level or program level overlap information.
struct OverlapStats {
  enum OverlapStatsLevel { ProgramLevel, FunctionLevel };
  // Sum of the total count values for the base profile.
  CountSumOrPercent Base;
  // Sum of the total count values for the test profile.
  CountSumOrPercent Test;
  // Overlap lap score. Should be in range of [0.0f to 1.0f].
  CountSumOrPercent Overlap;
  CountSumOrPercent Mismatch;
  CountSumOrPercent Unique;
  OverlapStatsLevel Level;
  const std::string *BaseFilename;
  const std::string *TestFilename;
  StringRef FuncName;
  uint64_t FuncHash;
  bool Valid;

  OverlapStats(OverlapStatsLevel L = ProgramLevel)
      : Level(L), BaseFilename(nullptr), TestFilename(nullptr), FuncHash(0),
        Valid(false) {}

  void dump(raw_fd_ostream &OS) const;

  void setFuncInfo(StringRef Name, uint64_t Hash) {
    FuncName = Name;
    FuncHash = Hash;
  }

  Error accumulateCounts(const std::string &BaseFilename,
                         const std::string &TestFilename, bool IsCS);
  void addOneMismatch(const CountSumOrPercent &MismatchFunc);
  void addOneUnique(const CountSumOrPercent &UniqueFunc);

  static inline double score(uint64_t Val1, uint64_t Val2, double Sum1,
                             double Sum2) {
    if (Sum1 < 1.0f || Sum2 < 1.0f)
      return 0.0f;
    return std::min(Val1 / Sum1, Val2 / Sum2);
  }
};

// This is used to filter the functions whose overlap information
// to be output.
struct OverlapFuncFilters {
  uint64_t ValueCutoff;
  const std::string NameFilter;
};

struct InstrProfValueSiteRecord {
  /// Value profiling data pairs at a given value site.
  std::list<InstrProfValueData> ValueData;

  InstrProfValueSiteRecord() { ValueData.clear(); }
  template <class InputIterator>
  InstrProfValueSiteRecord(InputIterator F, InputIterator L)
      : ValueData(F, L) {}

  /// Sort ValueData ascending by Value
  void sortByTargetValues() {
    ValueData.sort(
        [](const InstrProfValueData &left, const InstrProfValueData &right) {
          return left.Value < right.Value;
        });
  }
  /// Sort ValueData Descending by Count
  inline void sortByCount();

  /// Merge data from another InstrProfValueSiteRecord
  /// Optionally scale merged counts by \p Weight.
  void merge(InstrProfValueSiteRecord &Input, uint64_t Weight,
             function_ref<void(instrprof_error)> Warn);
  /// Scale up value profile data counts by N (Numerator) / D (Denominator).
  void scale(uint64_t N, uint64_t D, function_ref<void(instrprof_error)> Warn);

  /// Compute the overlap b/w this record and Input record.
  void overlap(InstrProfValueSiteRecord &Input, uint32_t ValueKind,
               OverlapStats &Overlap, OverlapStats &FuncLevelOverlap);
};

/// Profiling information for a single function.
struct InstrProfRecord {
  std::vector<uint64_t> Counts;

  InstrProfRecord() = default;
  InstrProfRecord(std::vector<uint64_t> Counts) : Counts(std::move(Counts)) {}
  InstrProfRecord(InstrProfRecord &&) = default;
  InstrProfRecord(const InstrProfRecord &RHS)
      : Counts(RHS.Counts),
        ValueData(RHS.ValueData
                      ? std::make_unique<ValueProfData>(*RHS.ValueData)
                      : nullptr) {}
  InstrProfRecord &operator=(InstrProfRecord &&) = default;
  InstrProfRecord &operator=(const InstrProfRecord &RHS) {
    Counts = RHS.Counts;
    if (!RHS.ValueData) {
      ValueData = nullptr;
      return *this;
    }
    if (!ValueData)
      ValueData = std::make_unique<ValueProfData>(*RHS.ValueData);
    else
      *ValueData = *RHS.ValueData;
    return *this;
  }

  /// Return the number of value profile kinds with non-zero number
  /// of profile sites.
  inline uint32_t getNumValueKinds() const;
  /// Return the number of instrumented sites for ValueKind.
  inline uint32_t getNumValueSites(uint32_t ValueKind) const;

  /// Return the total number of ValueData for ValueKind.
  inline uint32_t getNumValueData(uint32_t ValueKind) const;

  /// Return the number of value data collected for ValueKind at profiling
  /// site: Site.
  inline uint32_t getNumValueDataForSite(uint32_t ValueKind,
                                         uint32_t Site) const;

  /// Return the array of profiled values at \p Site. If \p TotalC
  /// is not null, the total count of all target values at this site
  /// will be stored in \c *TotalC.
  inline std::unique_ptr<InstrProfValueData[]>
  getValueForSite(uint32_t ValueKind, uint32_t Site,
                  uint64_t *TotalC = nullptr) const;

  /// Get the target value/counts of kind \p ValueKind collected at site
  /// \p Site and store the result in array \p Dest. Return the total
  /// counts of all target values at this site.
  inline uint64_t getValueForSite(InstrProfValueData Dest[], uint32_t ValueKind,
                                  uint32_t Site) const;

  /// Reserve space for NumValueSites sites.
  inline void reserveSites(uint32_t ValueKind, uint32_t NumValueSites);

  /// Add ValueData for ValueKind at value Site.
  void addValueData(uint32_t ValueKind, uint32_t Site,
                    InstrProfValueData *VData, uint32_t N,
                    InstrProfSymtab *SymTab);

  /// Merge the counts in \p Other into this one.
  /// Optionally scale merged counts by \p Weight.
  void merge(InstrProfRecord &Other, uint64_t Weight,
             function_ref<void(instrprof_error)> Warn);

  /// Scale up profile counts (including value profile data) by
  /// a factor of (N / D).
  void scale(uint64_t N, uint64_t D, function_ref<void(instrprof_error)> Warn);

  /// Sort value profile data (per site) by count.
  void sortValueData() {
    for (uint32_t Kind = IPVK_First; Kind <= IPVK_Last; ++Kind)
      for (auto &SR : getValueSitesForKind(Kind))
        SR.sortByCount();
  }

  /// Clear value data entries and edge counters.
  void Clear() {
    Counts.clear();
    clearValueData();
  }

  /// Clear value data entries
  void clearValueData() { ValueData = nullptr; }

  /// Compute the sums of all counts and store in Sum.
  void accumulateCounts(CountSumOrPercent &Sum) const;

  /// Compute the overlap b/w this IntrprofRecord and Other.
  void overlap(InstrProfRecord &Other, OverlapStats &Overlap,
               OverlapStats &FuncLevelOverlap, uint64_t ValueCutoff);

  /// Compute the overlap of value profile counts.
  void overlapValueProfData(uint32_t ValueKind, InstrProfRecord &Src,
                            OverlapStats &Overlap,
                            OverlapStats &FuncLevelOverlap);

  enum CountPseudoKind {
    NotPseudo = 0,
    PseudoHot,
    PseudoWarm,
  };
  enum PseudoCountVal {
    HotFunctionVal = -1,
    WarmFunctionVal = -2,
  };
  CountPseudoKind getCountPseudoKind() const {
    uint64_t FirstCount = Counts[0];
    if (FirstCount == (uint64_t)HotFunctionVal)
      return PseudoHot;
    if (FirstCount == (uint64_t)WarmFunctionVal)
      return PseudoWarm;
    return NotPseudo;
  }
  void setPseudoCount(CountPseudoKind Kind) {
    if (Kind == PseudoHot)
      Counts[0] = (uint64_t)HotFunctionVal;
    else if (Kind == PseudoWarm)
      Counts[0] = (uint64_t)WarmFunctionVal;
  }

private:
  struct ValueProfData {
    std::vector<InstrProfValueSiteRecord> IndirectCallSites;
    std::vector<InstrProfValueSiteRecord> MemOPSizes;
  };
  std::unique_ptr<ValueProfData> ValueData;

  MutableArrayRef<InstrProfValueSiteRecord>
  getValueSitesForKind(uint32_t ValueKind) {
    // Cast to /add/ const (should be an implicit_cast, ideally, if that's ever
    // implemented in LLVM) to call the const overload of this function, then
    // cast away the constness from the result.
    auto AR = const_cast<const InstrProfRecord *>(this)->getValueSitesForKind(
        ValueKind);
    return MutableArrayRef(
        const_cast<InstrProfValueSiteRecord *>(AR.data()), AR.size());
  }
  ArrayRef<InstrProfValueSiteRecord>
  getValueSitesForKind(uint32_t ValueKind) const {
    if (!ValueData)
      return std::nullopt;
    switch (ValueKind) {
    case IPVK_IndirectCallTarget:
      return ValueData->IndirectCallSites;
    case IPVK_MemOPSize:
      return ValueData->MemOPSizes;
    default:
      llvm_unreachable("Unknown value kind!");
    }
  }

  std::vector<InstrProfValueSiteRecord> &
  getOrCreateValueSitesForKind(uint32_t ValueKind) {
    if (!ValueData)
      ValueData = std::make_unique<ValueProfData>();
    switch (ValueKind) {
    case IPVK_IndirectCallTarget:
      return ValueData->IndirectCallSites;
    case IPVK_MemOPSize:
      return ValueData->MemOPSizes;
    default:
      llvm_unreachable("Unknown value kind!");
    }
  }

  // Map indirect call target name hash to name string.
  uint64_t remapValue(uint64_t Value, uint32_t ValueKind,
                      InstrProfSymtab *SymTab);

  // Merge Value Profile data from Src record to this record for ValueKind.
  // Scale merged value counts by \p Weight.
  void mergeValueProfData(uint32_t ValkeKind, InstrProfRecord &Src,
                          uint64_t Weight,
                          function_ref<void(instrprof_error)> Warn);

  // Scale up value profile data count by N (Numerator) / D (Denominator).
  void scaleValueProfData(uint32_t ValueKind, uint64_t N, uint64_t D,
                          function_ref<void(instrprof_error)> Warn);
};

struct NamedInstrProfRecord : InstrProfRecord {
  StringRef Name;
  uint64_t Hash;

  // We reserve this bit as the flag for context sensitive profile record.
  static const int CS_FLAG_IN_FUNC_HASH = 60;

  NamedInstrProfRecord() = default;
  NamedInstrProfRecord(StringRef Name, uint64_t Hash,
                       std::vector<uint64_t> Counts)
      : InstrProfRecord(std::move(Counts)), Name(Name), Hash(Hash) {}

  static bool hasCSFlagInHash(uint64_t FuncHash) {
    return ((FuncHash >> CS_FLAG_IN_FUNC_HASH) & 1);
  }
  static void setCSFlagInHash(uint64_t &FuncHash) {
    FuncHash |= ((uint64_t)1 << CS_FLAG_IN_FUNC_HASH);
  }
};

uint32_t InstrProfRecord::getNumValueKinds() const {
  uint32_t NumValueKinds = 0;
  for (uint32_t Kind = IPVK_First; Kind <= IPVK_Last; ++Kind)
    NumValueKinds += !(getValueSitesForKind(Kind).empty());
  return NumValueKinds;
}

uint32_t InstrProfRecord::getNumValueData(uint32_t ValueKind) const {
  uint32_t N = 0;
  for (const auto &SR : getValueSitesForKind(ValueKind))
    N += SR.ValueData.size();
  return N;
}

uint32_t InstrProfRecord::getNumValueSites(uint32_t ValueKind) const {
  return getValueSitesForKind(ValueKind).size();
}

uint32_t InstrProfRecord::getNumValueDataForSite(uint32_t ValueKind,
                                                 uint32_t Site) const {
  return getValueSitesForKind(ValueKind)[Site].ValueData.size();
}

std::unique_ptr<InstrProfValueData[]>
InstrProfRecord::getValueForSite(uint32_t ValueKind, uint32_t Site,
                                 uint64_t *TotalC) const {
  uint64_t Dummy = 0;
  uint64_t &TotalCount = (TotalC == nullptr ? Dummy : *TotalC);
  uint32_t N = getNumValueDataForSite(ValueKind, Site);
  if (N == 0) {
    TotalCount = 0;
    return std::unique_ptr<InstrProfValueData[]>(nullptr);
  }

  auto VD = std::make_unique<InstrProfValueData[]>(N);
  TotalCount = getValueForSite(VD.get(), ValueKind, Site);

  return VD;
}

uint64_t InstrProfRecord::getValueForSite(InstrProfValueData Dest[],
                                          uint32_t ValueKind,
                                          uint32_t Site) const {
  uint32_t I = 0;
  uint64_t TotalCount = 0;
  for (auto V : getValueSitesForKind(ValueKind)[Site].ValueData) {
    Dest[I].Value = V.Value;
    Dest[I].Count = V.Count;
    TotalCount = SaturatingAdd(TotalCount, V.Count);
    I++;
  }
  return TotalCount;
}

void InstrProfRecord::reserveSites(uint32_t ValueKind, uint32_t NumValueSites) {
  if (!NumValueSites)
    return;
  getOrCreateValueSitesForKind(ValueKind).reserve(NumValueSites);
}

inline support::endianness getHostEndianness() {
  return sys::IsLittleEndianHost ? support::little : support::big;
}

// Include definitions for value profile data
#define INSTR_PROF_VALUE_PROF_DATA
#include "llvm/ProfileData/InstrProfData.inc"

void InstrProfValueSiteRecord::sortByCount() {
  ValueData.sort(
      [](const InstrProfValueData &left, const InstrProfValueData &right) {
        return left.Count > right.Count;
      });
  // Now truncate
  size_t max_s = INSTR_PROF_MAX_NUM_VAL_PER_SITE;
  if (ValueData.size() > max_s)
    ValueData.resize(max_s);
}

namespace IndexedInstrProf {

enum class HashT : uint32_t {
  MD5,
  Last = MD5
};

inline uint64_t ComputeHash(HashT Type, StringRef K) {
  switch (Type) {
  case HashT::MD5:
    return MD5Hash(K);
  }
  llvm_unreachable("Unhandled hash type");
}

const uint64_t Magic = 0x8169666f72706cff; // "\xfflprofi\x81"

enum ProfVersion {
  // Version 1 is the first version. In this version, the value of
  // a key/value pair can only include profile data of a single function.
  // Due to this restriction, the number of block counters for a given
  // function is not recorded but derived from the length of the value.
  Version1 = 1,
  // The version 2 format supports recording profile data of multiple
  // functions which share the same key in one value field. To support this,
  // the number block counters is recorded as an uint64_t field right after the
  // function structural hash.
  Version2 = 2,
  // Version 3 supports value profile data. The value profile data is expected
  // to follow the block counter profile data.
  Version3 = 3,
  // In this version, profile summary data \c IndexedInstrProf::Summary is
  // stored after the profile header.
  Version4 = 4,
  // In this version, the frontend PGO stable hash algorithm defaults to V2.
  Version5 = 5,
  // In this version, the frontend PGO stable hash algorithm got fixed and
  // may produce hashes different from Version5.
  Version6 = 6,
  // An additional counter is added around logical operators.
  Version7 = 7,
  // An additional (optional) memory profile type is added.
  Version8 = 8,
  // Binary ids are added.
  Version9 = 9,
  // An additional (optional) temporal profile traces section is added.
  Version10 = 10,
  // The current version is 10.
  CurrentVersion = INSTR_PROF_INDEX_VERSION
};
const uint64_t Version = ProfVersion::CurrentVersion;

const HashT HashType = HashT::MD5;

inline uint64_t ComputeHash(StringRef K) { return ComputeHash(HashType, K); }

// This structure defines the file header of the LLVM profile
// data file in indexed-format.
struct Header {
  uint64_t Magic;
  uint64_t Version;
  uint64_t Unused; // Becomes unused since version 4
  uint64_t HashType;
  uint64_t HashOffset;
  uint64_t MemProfOffset;
  uint64_t BinaryIdOffset;
  uint64_t TemporalProfTracesOffset;
  // New fields should only be added at the end to ensure that the size
  // computation is correct. The methods below need to be updated to ensure that
  // the new field is read correctly.

  // Reads a header struct from the buffer.
  static Expected<Header> readFromBuffer(const unsigned char *Buffer);

  // Returns the size of the header in bytes for all valid fields based on the
  // version. I.e a older version header will return a smaller size.
  size_t size() const;

  // Returns the format version in little endian. The header retains the version
  // in native endian of the compiler runtime.
  uint64_t formatVersion() const;
};

// Profile summary data recorded in the profile data file in indexed
// format. It is introduced in version 4. The summary data follows
// right after the profile file header.
struct Summary {
  struct Entry {
    uint64_t Cutoff; ///< The required percentile of total execution count.
    uint64_t
        MinBlockCount;  ///< The minimum execution count for this percentile.
    uint64_t NumBlocks; ///< Number of blocks >= the minumum execution count.
  };
  // The field kind enumerator to assigned value mapping should remain
  // unchanged  when a new kind is added or an old kind gets deleted in
  // the future.
  enum SummaryFieldKind {
    /// The total number of functions instrumented.
    TotalNumFunctions = 0,
    /// Total number of instrumented blocks/edges.
    TotalNumBlocks = 1,
    /// The maximal execution count among all functions.
    /// This field does not exist for profile data from IR based
    /// instrumentation.
    MaxFunctionCount = 2,
    /// Max block count of the program.
    MaxBlockCount = 3,
    /// Max internal block count of the program (excluding entry blocks).
    MaxInternalBlockCount = 4,
    /// The sum of all instrumented block counts.
    TotalBlockCount = 5,
    NumKinds = TotalBlockCount + 1
  };

  // The number of summmary fields following the summary header.
  uint64_t NumSummaryFields;
  // The number of Cutoff Entries (Summary::Entry) following summary fields.
  uint64_t NumCutoffEntries;

  Summary() = delete;
  Summary(uint32_t Size) { memset(this, 0, Size); }

  void operator delete(void *ptr) { ::operator delete(ptr); }

  static uint32_t getSize(uint32_t NumSumFields, uint32_t NumCutoffEntries) {
    return sizeof(Summary) + NumCutoffEntries * sizeof(Entry) +
           NumSumFields * sizeof(uint64_t);
  }

  const uint64_t *getSummaryDataBase() const {
    return reinterpret_cast<const uint64_t *>(this + 1);
  }

  uint64_t *getSummaryDataBase() {
    return reinterpret_cast<uint64_t *>(this + 1);
  }

  const Entry *getCutoffEntryBase() const {
    return reinterpret_cast<const Entry *>(
        &getSummaryDataBase()[NumSummaryFields]);
  }

  Entry *getCutoffEntryBase() {
    return reinterpret_cast<Entry *>(&getSummaryDataBase()[NumSummaryFields]);
  }

  uint64_t get(SummaryFieldKind K) const {
    return getSummaryDataBase()[K];
  }

  void set(SummaryFieldKind K, uint64_t V) {
    getSummaryDataBase()[K] = V;
  }

  const Entry &getEntry(uint32_t I) const { return getCutoffEntryBase()[I]; }

  void setEntry(uint32_t I, const ProfileSummaryEntry &E) {
    Entry &ER = getCutoffEntryBase()[I];
    ER.Cutoff = E.Cutoff;
    ER.MinBlockCount = E.MinCount;
    ER.NumBlocks = E.NumCounts;
  }
};

inline std::unique_ptr<Summary> allocSummary(uint32_t TotalSize) {
  return std::unique_ptr<Summary>(new (::operator new(TotalSize))
                                      Summary(TotalSize));
}

} // end namespace IndexedInstrProf

namespace RawInstrProf {

// Version 1: First version
// Version 2: Added value profile data section. Per-function control data
// struct has more fields to describe value profile information.
// Version 3: Compressed name section support. Function PGO name reference
// from control data struct is changed from raw pointer to Name's MD5 value.
// Version 4: ValueDataBegin and ValueDataSizes fields are removed from the
// raw header.
// Version 5: Bit 60 of FuncHash is reserved for the flag for the context
// sensitive records.
// Version 6: Added binary id.
// Version 7: Reorder binary id and include version in signature.
// Version 8: Use relative counter pointer.
const uint64_t Version = INSTR_PROF_RAW_VERSION;

template <class IntPtrT> inline uint64_t getMagic();
template <> inline uint64_t getMagic<uint64_t>() {
  return INSTR_PROF_RAW_MAGIC_64;
}

template <> inline uint64_t getMagic<uint32_t>() {
  return INSTR_PROF_RAW_MAGIC_32;
}

// Per-function profile data header/control structure.
// The definition should match the structure defined in
// compiler-rt/lib/profile/InstrProfiling.h.
// It should also match the synthesized type in
// Transforms/Instrumentation/InstrProfiling.cpp:getOrCreateRegionCounters.
template <class IntPtrT> struct alignas(8) ProfileData {
  #define INSTR_PROF_DATA(Type, LLVMType, Name, Init) Type Name;
  #include "llvm/ProfileData/InstrProfData.inc"
};

// File header structure of the LLVM profile data in raw format.
// The definition should match the header referenced in
// compiler-rt/lib/profile/InstrProfilingFile.c  and
// InstrProfilingBuffer.c.
struct Header {
#define INSTR_PROF_RAW_HEADER(Type, Name, Init) const Type Name;
#include "llvm/ProfileData/InstrProfData.inc"
};

} // end namespace RawInstrProf

// Create the variable for the profile file name.
void createProfileFileNameVar(Module &M, StringRef InstrProfileOutput);

// Whether to compress function names in profile records, and filenames in
// code coverage mappings. Used by the Instrumentation library and unit tests.
extern cl::opt<bool> DoInstrProfNameCompression;

} // end namespace llvm
#endif // LLVM_PROFILEDATA_INSTRPROF_H
PKhwFZ�`�;��ProfileData/RawMemProfReader.hnu�[���#ifndef LLVM_PROFILEDATA_RAWMEMPROFREADER_H_
#define LLVM_PROFILEDATA_RAWMEMPROFREADER_H_
//===- MemProfReader.h - Instrumented memory profiling reader ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains support for reading MemProf profiling data.
//
//===----------------------------------------------------------------------===//

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/DebugInfo/Symbolize/SymbolizableModule.h"
#include "llvm/DebugInfo/Symbolize/Symbolize.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/Object/Binary.h"
#include "llvm/Object/ObjectFile.h"
#include "llvm/ProfileData/InstrProfReader.h"
#include "llvm/ProfileData/MemProf.h"
#include "llvm/ProfileData/MemProfData.inc"
#include "llvm/Support/Error.h"
#include "llvm/Support/MemoryBuffer.h"

#include <cstddef>

namespace llvm {
namespace memprof {

// Map from id (recorded from sanitizer stack depot) to virtual addresses for
// each program counter address in the callstack.
using CallStackMap = llvm::DenseMap<uint64_t, llvm::SmallVector<uint64_t>>;

class RawMemProfReader {
public:
  RawMemProfReader(const RawMemProfReader &) = delete;
  RawMemProfReader &operator=(const RawMemProfReader &) = delete;

  // Prints the contents of the profile in YAML format.
  void printYAML(raw_ostream &OS);

  // Return true if the \p DataBuffer starts with magic bytes indicating it is
  // a raw binary memprof profile.
  static bool hasFormat(const MemoryBuffer &DataBuffer);
  // Return true if the file at \p Path starts with magic bytes indicating it is
  // a raw binary memprof profile.
  static bool hasFormat(const StringRef Path);

  // Create a RawMemProfReader after sanity checking the contents of the file at
  // \p Path or the \p Buffer. The binary from which the profile has been
  // collected is specified via a path in \p ProfiledBinary.
  static Expected<std::unique_ptr<RawMemProfReader>>
  create(const Twine &Path, StringRef ProfiledBinary, bool KeepName = false);
  static Expected<std::unique_ptr<RawMemProfReader>>
  create(std::unique_ptr<MemoryBuffer> Buffer, StringRef ProfiledBinary,
         bool KeepName = false);

  // Returns a list of build ids recorded in the segment information.
  static std::vector<std::string> peekBuildIds(MemoryBuffer *DataBuffer);

  using GuidMemProfRecordPair = std::pair<GlobalValue::GUID, MemProfRecord>;
  using Iterator = InstrProfIterator<GuidMemProfRecordPair, RawMemProfReader>;
  Iterator end() { return Iterator(); }
  Iterator begin() {
    Iter = FunctionProfileData.begin();
    return Iterator(this);
  }

  Error readNextRecord(GuidMemProfRecordPair &GuidRecord);

  // The RawMemProfReader only holds memory profile information.
  InstrProfKind getProfileKind() const { return InstrProfKind::MemProf; }

  // Constructor for unittests only.
  RawMemProfReader(std::unique_ptr<llvm::symbolize::SymbolizableModule> Sym,
                   llvm::SmallVectorImpl<SegmentEntry> &Seg,
                   llvm::MapVector<uint64_t, MemInfoBlock> &Prof,
                   CallStackMap &SM, bool KeepName = false)
      : Symbolizer(std::move(Sym)), SegmentInfo(Seg.begin(), Seg.end()),
        CallstackProfileData(Prof), StackMap(SM), KeepSymbolName(KeepName) {
    // We don't call initialize here since there is no raw profile to read. The
    // test should pass in the raw profile as structured data.

    // If there is an error here then the mock symbolizer has not been
    // initialized properly.
    if (Error E = symbolizeAndFilterStackFrames())
      report_fatal_error(std::move(E));
    if (Error E = mapRawProfileToRecords())
      report_fatal_error(std::move(E));
  }

  // Return a const reference to the internal Id to Frame mappings.
  const llvm::DenseMap<FrameId, Frame> &getFrameMapping() const {
    return IdToFrame;
  }

  // Return a const reference to the internal function profile data.
  const llvm::MapVector<GlobalValue::GUID, IndexedMemProfRecord> &
  getProfileData() const {
    return FunctionProfileData;
  }

private:
  RawMemProfReader(object::OwningBinary<object::Binary> &&Bin, bool KeepName)
      : Binary(std::move(Bin)), KeepSymbolName(KeepName) {}
  // Initializes the RawMemProfReader with the contents in `DataBuffer`.
  Error initialize(std::unique_ptr<MemoryBuffer> DataBuffer);
  // Read and parse the contents of the `DataBuffer` as a binary format profile.
  Error readRawProfile(std::unique_ptr<MemoryBuffer> DataBuffer);
  // Initialize the segment mapping information for symbolization.
  Error setupForSymbolization();
  // Symbolize and cache all the virtual addresses we encounter in the
  // callstacks from the raw profile. Also prune callstack frames which we can't
  // symbolize or those that belong to the runtime. For profile entries where
  // the entire callstack is pruned, we drop the entry from the profile.
  Error symbolizeAndFilterStackFrames();
  // Construct memprof records for each function and store it in the
  // `FunctionProfileData` map. A function may have allocation profile data or
  // callsite data or both.
  Error mapRawProfileToRecords();

  // A helper method to extract the frame from the IdToFrame map.
  const Frame &idToFrame(const FrameId Id) const {
    auto It = IdToFrame.find(Id);
    assert(It != IdToFrame.end() && "Id not found in map.");
    return It->getSecond();
  }

  object::SectionedAddress getModuleOffset(uint64_t VirtualAddress);

  // The profiled binary.
  object::OwningBinary<object::Binary> Binary;
  // A symbolizer to translate virtual addresses to code locations.
  std::unique_ptr<llvm::symbolize::SymbolizableModule> Symbolizer;
  // The preferred load address of the executable segment.
  uint64_t PreferredTextSegmentAddress = 0;
  // The base address of the text segment in the process during profiling.
  uint64_t ProfiledTextSegmentStart = 0;
  // The limit address of the text segment in the process during profiling.
  uint64_t ProfiledTextSegmentEnd = 0;

  // The memory mapped segment information for all executable segments in the
  // profiled binary (filtered from the raw profile using the build id).
  llvm::SmallVector<SegmentEntry, 2> SegmentInfo;

  // A map from callstack id (same as key in CallStackMap below) to the heap
  // information recorded for that allocation context.
  llvm::MapVector<uint64_t, MemInfoBlock> CallstackProfileData;
  CallStackMap StackMap;

  // Cached symbolization from PC to Frame.
  llvm::DenseMap<uint64_t, llvm::SmallVector<FrameId>> SymbolizedFrame;
  llvm::DenseMap<FrameId, Frame> IdToFrame;

  llvm::MapVector<GlobalValue::GUID, IndexedMemProfRecord> FunctionProfileData;
  llvm::MapVector<GlobalValue::GUID, IndexedMemProfRecord>::iterator Iter;

  // Whether to keep the symbol name for each frame after hashing.
  bool KeepSymbolName = false;
  // A mapping of the hash to symbol name, only used if KeepSymbolName is true.
  llvm::DenseMap<uint64_t, std::string> GuidToSymbolName;
};
} // namespace memprof
} // namespace llvm

#endif // LLVM_PROFILEDATA_RAWMEMPROFREADER_H_
PKhwFZ,��@��*ProfileData/ItaniumManglingCanonicalizer.hnu�[���//===--- ItaniumManglingCanonicalizer.h -------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines a class for computing equivalence classes of mangled names
// given a set of equivalences between name fragments.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_PROFILEDATA_ITANIUMMANGLINGCANONICALIZER_H
#define LLVM_PROFILEDATA_ITANIUMMANGLINGCANONICALIZER_H

#include <cstdint>

namespace llvm {

class StringRef;

/// Canonicalizer for mangled names.
///
/// This class allows specifying a list of "equivalent" manglings. For example,
/// you can specify that Ss is equivalent to
///   NSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEE
/// and then manglings that refer to libstdc++'s 'std::string' will be
/// considered equivalent to manglings that are the same except that they refer
/// to libc++'s 'std::string'.
///
/// This can be used when data (eg, profiling data) is available for a version
/// of a program built in a different configuration, with correspondingly
/// different manglings.
class ItaniumManglingCanonicalizer {
public:
  ItaniumManglingCanonicalizer();
  ItaniumManglingCanonicalizer(const ItaniumManglingCanonicalizer &) = delete;
  void operator=(const ItaniumManglingCanonicalizer &) = delete;
  ~ItaniumManglingCanonicalizer();

  enum class EquivalenceError {
    Success,

    /// Both the equivalent manglings have already been used as components of
    /// some other mangling we've looked at. It's too late to add this
    /// equivalence.
    ManglingAlreadyUsed,

    /// The first equivalent mangling is invalid.
    InvalidFirstMangling,

    /// The second equivalent mangling is invalid.
    InvalidSecondMangling,
  };

  enum class FragmentKind {
    /// The mangling fragment is a <name> (or a predefined <substitution>).
    Name,
    /// The mangling fragment is a <type>.
    Type,
    /// The mangling fragment is an <encoding>.
    Encoding,
  };

  /// Add an equivalence between \p First and \p Second. Both manglings must
  /// live at least as long as the canonicalizer.
  EquivalenceError addEquivalence(FragmentKind Kind, StringRef First,
                                  StringRef Second);

  using Key = uintptr_t;

  /// Form a canonical key for the specified mangling. They key will be the
  /// same for all equivalent manglings, and different for any two
  /// non-equivalent manglings, but is otherwise unspecified.
  ///
  /// Returns Key() if (and only if) the mangling is not a valid Itanium C++
  /// ABI mangling.
  ///
  /// The string denoted by Mangling must live as long as the canonicalizer.
  Key canonicalize(StringRef Mangling);

  /// Find a canonical key for the specified mangling, if one has already been
  /// formed. Otherwise returns Key().
  Key lookup(StringRef Mangling);

private:
  struct Impl;
  Impl *P;
};
} // namespace llvm

#endif // LLVM_PROFILEDATA_ITANIUMMANGLINGCANONICALIZER_H
PKhwFZ����6�6Pass.hnu�[���//===- llvm/Pass.h - Base class for Passes ----------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines a base class that indicates that a specified class is a
// transformation pass implementation.
//
// Passes are designed this way so that it is possible to run passes in a cache
// and organizationally optimal order without having to specify it at the front
// end.  This allows arbitrary passes to be strung together and have them
// executed as efficiently as possible.
//
// Passes should extend one of the classes below, depending on the guarantees
// that it can make about what will be modified as it is run.  For example, most
// global optimizations should derive from FunctionPass, because they do not add
// or delete functions, they operate on the internals of the function.
//
// Note that this file #includes PassSupport.h and PassAnalysisSupport.h (at the
// bottom), so the APIs exposed by these files are also automatically available
// to all users of this file.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_PASS_H
#define LLVM_PASS_H

#ifdef EXPENSIVE_CHECKS
#include <cstdint>
#endif
#include <string>

namespace llvm {

class AnalysisResolver;
class AnalysisUsage;
class Function;
class ImmutablePass;
class Module;
class PassInfo;
class PMDataManager;
class PMStack;
class raw_ostream;
class StringRef;

// AnalysisID - Use the PassInfo to identify a pass...
using AnalysisID = const void *;

/// Different types of internal pass managers. External pass managers
/// (PassManager and FunctionPassManager) are not represented here.
/// Ordering of pass manager types is important here.
enum PassManagerType {
  PMT_Unknown = 0,
  PMT_ModulePassManager = 1, ///< MPPassManager
  PMT_CallGraphPassManager,  ///< CGPassManager
  PMT_FunctionPassManager,   ///< FPPassManager
  PMT_LoopPassManager,       ///< LPPassManager
  PMT_RegionPassManager,     ///< RGPassManager
  PMT_Last
};

// Different types of passes.
enum PassKind {
  PT_Region,
  PT_Loop,
  PT_Function,
  PT_CallGraphSCC,
  PT_Module,
  PT_PassManager
};

/// This enumerates the LLVM full LTO or ThinLTO optimization phases.
enum class ThinOrFullLTOPhase {
  /// No LTO/ThinLTO behavior needed.
  None,
  /// ThinLTO prelink (summary) phase.
  ThinLTOPreLink,
  /// ThinLTO postlink (backend compile) phase.
  ThinLTOPostLink,
  /// Full LTO prelink phase.
  FullLTOPreLink,
  /// Full LTO postlink (backend compile) phase.
  FullLTOPostLink
};

//===----------------------------------------------------------------------===//
/// Pass interface - Implemented by all 'passes'.  Subclass this if you are an
/// interprocedural optimization or you do not fit into any of the more
/// constrained passes described below.
///
class Pass {
  AnalysisResolver *Resolver = nullptr;  // Used to resolve analysis
  const void *PassID;
  PassKind Kind;

public:
  explicit Pass(PassKind K, char &pid) : PassID(&pid), Kind(K) {}
  Pass(const Pass &) = delete;
  Pass &operator=(const Pass &) = delete;
  virtual ~Pass();

  PassKind getPassKind() const { return Kind; }

  /// getPassName - Return a nice clean name for a pass.  This usually
  /// implemented in terms of the name that is registered by one of the
  /// Registration templates, but can be overloaded directly.
  virtual StringRef getPassName() const;

  /// getPassID - Return the PassID number that corresponds to this pass.
  AnalysisID getPassID() const {
    return PassID;
  }

  /// doInitialization - Virtual method overridden by subclasses to do
  /// any necessary initialization before any pass is run.
  virtual bool doInitialization(Module &)  { return false; }

  /// doFinalization - Virtual method overriden by subclasses to do any
  /// necessary clean up after all passes have run.
  virtual bool doFinalization(Module &) { return false; }

  /// print - Print out the internal state of the pass.  This is called by
  /// Analyze to print out the contents of an analysis.  Otherwise it is not
  /// necessary to implement this method.  Beware that the module pointer MAY be
  /// null.  This automatically forwards to a virtual function that does not
  /// provide the Module* in case the analysis doesn't need it it can just be
  /// ignored.
  virtual void print(raw_ostream &OS, const Module *M) const;

  void dump() const; // dump - Print to stderr.

  /// createPrinterPass - Get a Pass appropriate to print the IR this
  /// pass operates on (Module, Function or MachineFunction).
  virtual Pass *createPrinterPass(raw_ostream &OS,
                                  const std::string &Banner) const = 0;

  /// Each pass is responsible for assigning a pass manager to itself.
  /// PMS is the stack of available pass manager.
  virtual void assignPassManager(PMStack &,
                                 PassManagerType) {}

  /// Check if available pass managers are suitable for this pass or not.
  virtual void preparePassManager(PMStack &);

  ///  Return what kind of Pass Manager can manage this pass.
  virtual PassManagerType getPotentialPassManagerType() const;

  // Access AnalysisResolver
  void setResolver(AnalysisResolver *AR);
  AnalysisResolver *getResolver() const { return Resolver; }

  /// getAnalysisUsage - This function should be overriden by passes that need
  /// analysis information to do their job.  If a pass specifies that it uses a
  /// particular analysis result to this function, it can then use the
  /// getAnalysis<AnalysisType>() function, below.
  virtual void getAnalysisUsage(AnalysisUsage &) const;

  /// releaseMemory() - This member can be implemented by a pass if it wants to
  /// be able to release its memory when it is no longer needed.  The default
  /// behavior of passes is to hold onto memory for the entire duration of their
  /// lifetime (which is the entire compile time).  For pipelined passes, this
  /// is not a big deal because that memory gets recycled every time the pass is
  /// invoked on another program unit.  For IP passes, it is more important to
  /// free memory when it is unused.
  ///
  /// Optionally implement this function to release pass memory when it is no
  /// longer used.
  virtual void releaseMemory();

  /// getAdjustedAnalysisPointer - This method is used when a pass implements
  /// an analysis interface through multiple inheritance.  If needed, it should
  /// override this to adjust the this pointer as needed for the specified pass
  /// info.
  virtual void *getAdjustedAnalysisPointer(AnalysisID ID);
  virtual ImmutablePass *getAsImmutablePass();
  virtual PMDataManager *getAsPMDataManager();

  /// verifyAnalysis() - This member can be implemented by a analysis pass to
  /// check state of analysis information.
  virtual void verifyAnalysis() const;

  // dumpPassStructure - Implement the -debug-passes=PassStructure option
  virtual void dumpPassStructure(unsigned Offset = 0);

  // lookupPassInfo - Return the pass info object for the specified pass class,
  // or null if it is not known.
  static const PassInfo *lookupPassInfo(const void *TI);

  // lookupPassInfo - Return the pass info object for the pass with the given
  // argument string, or null if it is not known.
  static const PassInfo *lookupPassInfo(StringRef Arg);

  // createPass - Create a object for the specified pass class,
  // or null if it is not known.
  static Pass *createPass(AnalysisID ID);

  /// getAnalysisIfAvailable<AnalysisType>() - Subclasses use this function to
  /// get analysis information that might be around, for example to update it.
  /// This is different than getAnalysis in that it can fail (if the analysis
  /// results haven't been computed), so should only be used if you can handle
  /// the case when the analysis is not available.  This method is often used by
  /// transformation APIs to update analysis results for a pass automatically as
  /// the transform is performed.
  template<typename AnalysisType> AnalysisType *
    getAnalysisIfAvailable() const; // Defined in PassAnalysisSupport.h

  /// mustPreserveAnalysisID - This method serves the same function as
  /// getAnalysisIfAvailable, but works if you just have an AnalysisID.  This
  /// obviously cannot give you a properly typed instance of the class if you
  /// don't have the class name available (use getAnalysisIfAvailable if you
  /// do), but it can tell you if you need to preserve the pass at least.
  bool mustPreserveAnalysisID(char &AID) const;

  /// getAnalysis<AnalysisType>() - This function is used by subclasses to get
  /// to the analysis information that they claim to use by overriding the
  /// getAnalysisUsage function.
  template<typename AnalysisType>
  AnalysisType &getAnalysis() const; // Defined in PassAnalysisSupport.h

  template <typename AnalysisType>
  AnalysisType &
  getAnalysis(Function &F,
              bool *Changed = nullptr); // Defined in PassAnalysisSupport.h

  template<typename AnalysisType>
  AnalysisType &getAnalysisID(AnalysisID PI) const;

  template <typename AnalysisType>
  AnalysisType &getAnalysisID(AnalysisID PI, Function &F,
                              bool *Changed = nullptr);

#ifdef EXPENSIVE_CHECKS
  /// Hash a module in order to detect when a module (or more specific) pass has
  /// modified it.
  uint64_t structuralHash(Module &M) const;

  /// Hash a function in order to detect when a function (or more specific) pass
  /// has modified it.
  virtual uint64_t structuralHash(Function &F) const;
#endif
};

//===----------------------------------------------------------------------===//
/// ModulePass class - This class is used to implement unstructured
/// interprocedural optimizations and analyses.  ModulePasses may do anything
/// they want to the program.
///
class ModulePass : public Pass {
public:
  explicit ModulePass(char &pid) : Pass(PT_Module, pid) {}

  // Force out-of-line virtual method.
  ~ModulePass() override;

  /// createPrinterPass - Get a module printer pass.
  Pass *createPrinterPass(raw_ostream &OS,
                          const std::string &Banner) const override;

  /// runOnModule - Virtual method overriden by subclasses to process the module
  /// being operated on.
  virtual bool runOnModule(Module &M) = 0;

  void assignPassManager(PMStack &PMS, PassManagerType T) override;

  ///  Return what kind of Pass Manager can manage this pass.
  PassManagerType getPotentialPassManagerType() const override;

protected:
  /// Optional passes call this function to check whether the pass should be
  /// skipped. This is the case when optimization bisect is over the limit.
  bool skipModule(Module &M) const;
};

//===----------------------------------------------------------------------===//
/// ImmutablePass class - This class is used to provide information that does
/// not need to be run.  This is useful for things like target information and
/// "basic" versions of AnalysisGroups.
///
class ImmutablePass : public ModulePass {
public:
  explicit ImmutablePass(char &pid) : ModulePass(pid) {}

  // Force out-of-line virtual method.
  ~ImmutablePass() override;

  /// initializePass - This method may be overriden by immutable passes to allow
  /// them to perform various initialization actions they require.  This is
  /// primarily because an ImmutablePass can "require" another ImmutablePass,
  /// and if it does, the overloaded version of initializePass may get access to
  /// these passes with getAnalysis<>.
  virtual void initializePass();

  ImmutablePass *getAsImmutablePass() override { return this; }

  /// ImmutablePasses are never run.
  bool runOnModule(Module &) override { return false; }
};

//===----------------------------------------------------------------------===//
/// FunctionPass class - This class is used to implement most global
/// optimizations.  Optimizations should subclass this class if they meet the
/// following constraints:
///
///  1. Optimizations are organized globally, i.e., a function at a time
///  2. Optimizing a function does not cause the addition or removal of any
///     functions in the module
///
class FunctionPass : public Pass {
public:
  explicit FunctionPass(char &pid) : Pass(PT_Function, pid) {}

  /// createPrinterPass - Get a function printer pass.
  Pass *createPrinterPass(raw_ostream &OS,
                          const std::string &Banner) const override;

  /// runOnFunction - Virtual method overriden by subclasses to do the
  /// per-function processing of the pass.
  virtual bool runOnFunction(Function &F) = 0;

  void assignPassManager(PMStack &PMS, PassManagerType T) override;

  ///  Return what kind of Pass Manager can manage this pass.
  PassManagerType getPotentialPassManagerType() const override;

protected:
  /// Optional passes call this function to check whether the pass should be
  /// skipped. This is the case when Attribute::OptimizeNone is set or when
  /// optimization bisect is over the limit.
  bool skipFunction(const Function &F) const;
};

/// If the user specifies the -time-passes argument on an LLVM tool command line
/// then the value of this boolean will be true, otherwise false.
/// This is the storage for the -time-passes option.
extern bool TimePassesIsEnabled;
/// If TimePassesPerRun is true, there would be one line of report for
/// each pass invocation.
/// If TimePassesPerRun is false, there would be only one line of
/// report for each pass (even there are more than one pass objects).
/// (For new pass manager only)
extern bool TimePassesPerRun;

} // end namespace llvm

// Include support files that contain important APIs commonly used by Passes,
// but that we want to separate out to make it easier to read the header files.
#include "llvm/PassAnalysisSupport.h"
#include "llvm/PassSupport.h"

#endif // LLVM_PASS_H
PKhwFZ�O��

Support/UnicodeCharRanges.hnu�[���//===--- UnicodeCharRanges.h - Types and functions for character ranges ---===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_SUPPORT_UNICODECHARRANGES_H
#define LLVM_SUPPORT_UNICODECHARRANGES_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>

#define DEBUG_TYPE "unicode"

namespace llvm {
namespace sys {

/// Represents a closed range of Unicode code points [Lower, Upper].
struct UnicodeCharRange {
  uint32_t Lower;
  uint32_t Upper;
};

inline bool operator<(uint32_t Value, UnicodeCharRange Range) {
  return Value < Range.Lower;
}
inline bool operator<(UnicodeCharRange Range, uint32_t Value) {
  return Range.Upper < Value;
}

/// Holds a reference to an ordered array of UnicodeCharRange and allows
/// to quickly check if a code point is contained in the set represented by this
/// array.
class UnicodeCharSet {
public:
  typedef ArrayRef<UnicodeCharRange> CharRanges;

  /// Constructs a UnicodeCharSet instance from an array of
  /// UnicodeCharRanges.
  ///
  /// Array pointed by \p Ranges should have the lifetime at least as long as
  /// the UnicodeCharSet instance, and should not change. Array is validated by
  /// the constructor, so it makes sense to create as few UnicodeCharSet
  /// instances per each array of ranges, as possible.
#ifdef NDEBUG

  // FIXME: This could use constexpr + static_assert. This way we
  // may get rid of NDEBUG in this header. Unfortunately there are some
  // problems to get this working with MSVC 2013. Change this when
  // the support for MSVC 2013 is dropped.
  constexpr UnicodeCharSet(CharRanges Ranges) : Ranges(Ranges) {}
#else
  UnicodeCharSet(CharRanges Ranges) : Ranges(Ranges) {
    assert(rangesAreValid());
  }
#endif

  /// Returns true if the character set contains the Unicode code point
  /// \p C.
  bool contains(uint32_t C) const {
    return std::binary_search(Ranges.begin(), Ranges.end(), C);
  }

private:
  /// Returns true if each of the ranges is a proper closed range
  /// [min, max], and if the ranges themselves are ordered and non-overlapping.
  bool rangesAreValid() const {
    uint32_t Prev = 0;
    for (CharRanges::const_iterator I = Ranges.begin(), E = Ranges.end();
         I != E; ++I) {
      if (I != Ranges.begin() && Prev >= I->Lower) {
        LLVM_DEBUG(dbgs() << "Upper bound 0x");
        LLVM_DEBUG(dbgs().write_hex(Prev));
        LLVM_DEBUG(dbgs() << " should be less than succeeding lower bound 0x");
        LLVM_DEBUG(dbgs().write_hex(I->Lower) << "\n");
        return false;
      }
      if (I->Upper < I->Lower) {
        LLVM_DEBUG(dbgs() << "Upper bound 0x");
        LLVM_DEBUG(dbgs().write_hex(I->Lower));
        LLVM_DEBUG(dbgs() << " should not be less than lower bound 0x");
        LLVM_DEBUG(dbgs().write_hex(I->Upper) << "\n");
        return false;
      }
      Prev = I->Upper;
    }

    return true;
  }

  const CharRanges Ranges;
};

} // namespace sys
} // namespace llvm

#undef DEBUG_TYPE // "unicode"

#endif // LLVM_SUPPORT_UNICODECHARRANGES_H
PKhwFZ檞�Support/MSP430Attributes.hnu�[���//===-- MSP430Attributes.h - MSP430 Attributes ------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===-----------------------------------------------------------------------===//
///
/// \file
/// This file contains enumerations for MSP430 ELF build attributes as
/// defined in the MSP430 ELF psABI specification.
///
/// MSP430 ELF psABI specification
///
/// https://www.ti.com/lit/pdf/slaa534
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_SUPPORT_MSP430ATTRIBUTES_H
#define LLVM_SUPPORT_MSP430ATTRIBUTES_H

#include "llvm/Support/ELFAttributes.h"

namespace llvm {
namespace MSP430Attrs {

const TagNameMap &getMSP430AttributeTags();

enum AttrType : unsigned {
  // Attribute types in ELF/.MSP430.attributes.
  TagISA = 4,
  TagCodeModel = 6,
  TagDataModel = 8,
  TagEnumSize = 10
};

enum ISA { ISAMSP430 = 1, ISAMSP430X = 2 };
enum CodeModel { CMSmall = 1, CMLarge = 2 };
enum DataModel { DMSmall = 1, DMLarge = 2, DMRestricted = 3 };
enum EnumSize { ESSmall = 1, ESInteger = 2, ESDontCare = 3 };

} // namespace MSP430Attrs
} // namespace llvm

#endif
PKhwFZ=3�z��Support/Locale.hnu�[���#ifndef LLVM_SUPPORT_LOCALE_H
#define LLVM_SUPPORT_LOCALE_H

namespace llvm {
class StringRef;

namespace sys {
namespace locale {

int columnWidth(StringRef s);
bool isPrint(int c);

}
}
}

#endif // LLVM_SUPPORT_LOCALE_H
PKhwFZ#�,�,Support/Program.hnu�[���//===- llvm/Support/Program.h ------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the llvm::sys::Program class.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_PROGRAM_H
#define LLVM_SUPPORT_PROGRAM_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/Support/ErrorOr.h"
#include "llvm/Support/FileSystem.h"
#include <chrono>
#include <optional>
#include <system_error>

namespace llvm {
class BitVector;
namespace sys {

  /// This is the OS-specific separator for PATH like environment variables:
  // a colon on Unix or a semicolon on Windows.
#if defined(LLVM_ON_UNIX)
  const char EnvPathSeparator = ':';
#elif defined (_WIN32)
  const char EnvPathSeparator = ';';
#endif

#if defined(_WIN32)
  typedef unsigned long procid_t; // Must match the type of DWORD on Windows.
  typedef void *process_t;        // Must match the type of HANDLE on Windows.
#else
  typedef ::pid_t procid_t;
  typedef procid_t process_t;
#endif

  /// This struct encapsulates information about a process.
  struct ProcessInfo {
    enum : procid_t { InvalidPid = 0 };

    procid_t Pid;      /// The process identifier.
    process_t Process; /// Platform-dependent process object.

    /// The return code, set after execution.
    int ReturnCode;

    ProcessInfo();
  };

  /// This struct encapsulates information about a process execution.
  struct ProcessStatistics {
    std::chrono::microseconds TotalTime;
    std::chrono::microseconds UserTime;
    uint64_t PeakMemory = 0; ///< Maximum resident set size in KiB.
  };

  /// Find the first executable file \p Name in \p Paths.
  ///
  /// This does not perform hashing as a shell would but instead stats each PATH
  /// entry individually so should generally be avoided. Core LLVM library
  /// functions and options should instead require fully specified paths.
  ///
  /// \param Name name of the executable to find. If it contains any system
  ///   slashes, it will be returned as is.
  /// \param Paths optional list of paths to search for \p Name. If empty it
  ///   will use the system PATH environment instead.
  ///
  /// \returns The fully qualified path to the first \p Name in \p Paths if it
  ///   exists. \p Name if \p Name has slashes in it. Otherwise an error.
  ErrorOr<std::string>
  findProgramByName(StringRef Name, ArrayRef<StringRef> Paths = {});

  // These functions change the specified standard stream (stdin or stdout) mode
  // based on the Flags. They return errc::success if the specified stream was
  // changed. Otherwise, a platform dependent error is returned.
  std::error_code ChangeStdinMode(fs::OpenFlags Flags);
  std::error_code ChangeStdoutMode(fs::OpenFlags Flags);

  // These functions change the specified standard stream (stdin or stdout) to
  // binary mode. They return errc::success if the specified stream
  // was changed. Otherwise a platform dependent error is returned.
  std::error_code ChangeStdinToBinary();
  std::error_code ChangeStdoutToBinary();

  /// This function executes the program using the arguments provided.  The
  /// invoked program will inherit the stdin, stdout, and stderr file
  /// descriptors, the environment and other configuration settings of the
  /// invoking program.
  /// This function waits for the program to finish, so should be avoided in
  /// library functions that aren't expected to block. Consider using
  /// ExecuteNoWait() instead.
  /// \returns an integer result code indicating the status of the program.
  /// A zero or positive value indicates the result code of the program.
  /// -1 indicates failure to execute
  /// -2 indicates a crash during execution or timeout
  int ExecuteAndWait(
      StringRef Program, ///< Path of the program to be executed. It is
      ///< presumed this is the result of the findProgramByName method.
      ArrayRef<StringRef> Args, ///< An array of strings that are passed to the
      ///< program.  The first element should be the name of the program.
      ///< The array should **not** be terminated by an empty StringRef.
      std::optional<ArrayRef<StringRef>> Env =
          std::nullopt, ///< An optional vector of
      ///< strings to use for the program's environment. If not provided, the
      ///< current program's environment will be used.  If specified, the
      ///< vector should **not** be terminated by an empty StringRef.
      ArrayRef<std::optional<StringRef>> Redirects = {}, ///<
      ///< An array of optional paths. Should have a size of zero or three.
      ///< If the array is empty, no redirections are performed.
      ///< Otherwise, the inferior process's stdin(0), stdout(1), and stderr(2)
      ///< will be redirected to the corresponding paths, if the optional path
      ///< is present (not \c std::nullopt).
      ///< When an empty path is passed in, the corresponding file descriptor
      ///< will be disconnected (ie, /dev/null'd) in a portable way.
      unsigned SecondsToWait = 0, ///< If non-zero, this specifies the amount
      ///< of time to wait for the child process to exit. If the time
      ///< expires, the child is killed and this call returns. If zero,
      ///< this function will wait until the child finishes or forever if
      ///< it doesn't.
      unsigned MemoryLimit = 0, ///< If non-zero, this specifies max. amount
      ///< of memory can be allocated by process. If memory usage will be
      ///< higher limit, the child is killed and this call returns. If zero
      ///< - no memory limit.
      std::string *ErrMsg = nullptr, ///< If non-zero, provides a pointer to a
      ///< string instance in which error messages will be returned. If the
      ///< string is non-empty upon return an error occurred while invoking the
      ///< program.
      bool *ExecutionFailed = nullptr,
      std::optional<ProcessStatistics> *ProcStat = nullptr, ///< If non-zero,
      /// provides a pointer to a structure in which process execution
      /// statistics will be stored.
      BitVector *AffinityMask = nullptr ///< CPUs or processors the new
                                        /// program shall run on.
  );

  /// Similar to ExecuteAndWait, but returns immediately.
  /// @returns The \see ProcessInfo of the newly launched process.
  /// \note On Microsoft Windows systems, users will need to either call
  /// \see Wait until the process finished execution or win32 CloseHandle() API
  /// on ProcessInfo.ProcessHandle to avoid memory leaks.
  ProcessInfo ExecuteNoWait(StringRef Program, ArrayRef<StringRef> Args,
                            std::optional<ArrayRef<StringRef>> Env,
                            ArrayRef<std::optional<StringRef>> Redirects = {},
                            unsigned MemoryLimit = 0,
                            std::string *ErrMsg = nullptr,
                            bool *ExecutionFailed = nullptr,
                            BitVector *AffinityMask = nullptr);

  /// Return true if the given arguments fit within system-specific
  /// argument length limits.
  bool commandLineFitsWithinSystemLimits(StringRef Program,
                                         ArrayRef<StringRef> Args);

  /// Return true if the given arguments fit within system-specific
  /// argument length limits.
  bool commandLineFitsWithinSystemLimits(StringRef Program,
                                         ArrayRef<const char *> Args);

  /// File encoding options when writing contents that a non-UTF8 tool will
  /// read (on Windows systems). For UNIX, we always use UTF-8.
  enum WindowsEncodingMethod {
    /// UTF-8 is the LLVM native encoding, being the same as "do not perform
    /// encoding conversion".
    WEM_UTF8,
    WEM_CurrentCodePage,
    WEM_UTF16
  };

  /// Saves the UTF8-encoded \p contents string into the file \p FileName
  /// using a specific encoding.
  ///
  /// This write file function adds the possibility to choose which encoding
  /// to use when writing a text file. On Windows, this is important when
  /// writing files with internationalization support with an encoding that is
  /// different from the one used in LLVM (UTF-8). We use this when writing
  /// response files, since GCC tools on MinGW only understand legacy code
  /// pages, and VisualStudio tools only understand UTF-16.
  /// For UNIX, using different encodings is silently ignored, since all tools
  /// work well with UTF-8.
  /// This function assumes that you only use UTF-8 *text* data and will convert
  /// it to your desired encoding before writing to the file.
  ///
  /// FIXME: We use EM_CurrentCodePage to write response files for GNU tools in
  /// a MinGW/MinGW-w64 environment, which has serious flaws but currently is
  /// our best shot to make gcc/ld understand international characters. This
  /// should be changed as soon as binutils fix this to support UTF16 on mingw.
  ///
  /// \returns non-zero error_code if failed
  std::error_code
  writeFileWithEncoding(StringRef FileName, StringRef Contents,
                        WindowsEncodingMethod Encoding = WEM_UTF8);

  /// This function waits for the process specified by \p PI to finish.
  /// \returns A \see ProcessInfo struct with Pid set to:
  /// \li The process id of the child process if the child process has changed
  /// state.
  /// \li 0 if the child process has not changed state.
  /// \note Users of this function should always check the ReturnCode member of
  /// the \see ProcessInfo returned from this function.
  ProcessInfo
  Wait(const ProcessInfo &PI, ///< The child process that should be waited on.
       std::optional<unsigned> SecondsToWait, ///< If std::nullopt, waits until
       ///< child has terminated.
       ///< If a value, this specifies the amount of time to wait for the child
       ///< process. If the time expires, and \p Polling is false, the child is
       ///< killed and this < function returns. If the time expires and \p
       ///< Polling is true, the child is resumed.
       ///<
       ///< If zero, this function will perform a non-blocking
       ///< wait on the child process.
       std::string *ErrMsg = nullptr, ///< If non-zero, provides a pointer to a
       ///< string instance in which error messages will be returned. If the
       ///< string is non-empty upon return an error occurred while invoking the
       ///< program.
       std::optional<ProcessStatistics> *ProcStat =
           nullptr, ///< If non-zero, provides
       /// a pointer to a structure in which process execution statistics will
       /// be stored.

       bool Polling = false ///< If true, do not kill the process on timeout.
  );

  /// Print a command argument, and optionally quote it.
  void printArg(llvm::raw_ostream &OS, StringRef Arg, bool Quote);

#if defined(_WIN32)
  /// Given a list of command line arguments, quote and escape them as necessary
  /// to build a single flat command line appropriate for calling CreateProcess
  /// on
  /// Windows.
  ErrorOr<std::wstring> flattenWindowsCommandLine(ArrayRef<StringRef> Args);
#endif
  }
}

#endif
PKhwFZ2�ILLSupport/Solaris/sys/regset.hnu�[���/*===- llvm/Support/Solaris/sys/regset.h ------------------------*- C++ -*-===*
 *
 * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 * See https://llvm.org/LICENSE.txt for license information.
 * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 *
 *===----------------------------------------------------------------------===*
 *
 * This file works around excessive name space pollution from the system header
 * on Solaris hosts.
 *
 *===----------------------------------------------------------------------===*/

#ifndef LLVM_SUPPORT_SOLARIS_SYS_REGSET_H

#include_next <sys/regset.h>

#undef CS
#undef DS
#undef ES
#undef FS
#undef GS
#undef SS
#undef EAX
#undef ECX
#undef EDX
#undef EBX
#undef ESP
#undef EBP
#undef ESI
#undef EDI
#undef EIP
#undef UESP
#undef EFL
#undef ERR
#undef TRAPNO

#endif
PKhwFZ�bv��Support/TypeName.hnu�[���//===- TypeName.h -----------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_TYPENAME_H
#define LLVM_SUPPORT_TYPENAME_H

#include "llvm/ADT/StringRef.h"

namespace llvm {

/// We provide a function which tries to compute the (demangled) name of a type
/// statically.
///
/// This routine may fail on some platforms or for particularly unusual types.
/// Do not use it for anything other than logging and debugging aids. It isn't
/// portable or dependendable in any real sense.
///
/// The returned StringRef will point into a static storage duration string.
/// However, it may not be null terminated and may be some strangely aligned
/// inner substring of a larger string.
template <typename DesiredTypeName>
inline StringRef getTypeName() {
#if defined(__clang__) || defined(__GNUC__)
  StringRef Name = __PRETTY_FUNCTION__;

  StringRef Key = "DesiredTypeName = ";
  Name = Name.substr(Name.find(Key));
  assert(!Name.empty() && "Unable to find the template parameter!");
  Name = Name.drop_front(Key.size());

  assert(Name.endswith("]") && "Name doesn't end in the substitution key!");
  return Name.drop_back(1);
#elif defined(_MSC_VER)
  StringRef Name = __FUNCSIG__;

  StringRef Key = "getTypeName<";
  Name = Name.substr(Name.find(Key));
  assert(!Name.empty() && "Unable to find the function name!");
  Name = Name.drop_front(Key.size());

  for (StringRef Prefix : {"class ", "struct ", "union ", "enum "})
    if (Name.startswith(Prefix)) {
      Name = Name.drop_front(Prefix.size());
      break;
    }

  auto AnglePos = Name.rfind('>');
  assert(AnglePos != StringRef::npos && "Unable to find the closing '>'!");
  return Name.substr(0, AnglePos);
#else
  // No known technique for statically extracting a type name on this compiler.
  // We return a string that is unlikely to look like any type in LLVM.
  return "UNKNOWN_TYPE";
#endif
}

} // namespace llvm

#endif
PKhwFZg4U�

Support/SwapByteOrder.hnu�[���//===- SwapByteOrder.h - Generic and optimized byte swaps -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares generic and optimized functions to swap the byte order of
// an integral type.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_SWAPBYTEORDER_H
#define LLVM_SUPPORT_SWAPBYTEORDER_H

#include "llvm/ADT/bit.h"
#include <cstddef>
#include <cstdint>
#include <type_traits>

#if defined(__linux__) || defined(__GNU__) || defined(__HAIKU__) ||            \
    defined(__Fuchsia__) || defined(__EMSCRIPTEN__)
#include <endian.h>
#elif defined(_AIX)
#include <sys/machine.h>
#elif defined(__sun)
/* Solaris provides _BIG_ENDIAN/_LITTLE_ENDIAN selector in sys/types.h */
#include <sys/types.h>
#define BIG_ENDIAN 4321
#define LITTLE_ENDIAN 1234
#if defined(_BIG_ENDIAN)
#define BYTE_ORDER BIG_ENDIAN
#else
#define BYTE_ORDER LITTLE_ENDIAN
#endif
#elif defined(__MVS__)
#define BIG_ENDIAN 4321
#define LITTLE_ENDIAN 1234
#define BYTE_ORDER BIG_ENDIAN
#else
#if !defined(BYTE_ORDER) && !defined(_WIN32)
#include <machine/endian.h>
#endif
#endif

namespace llvm {

namespace sys {

#if defined(BYTE_ORDER) && defined(BIG_ENDIAN) && BYTE_ORDER == BIG_ENDIAN
constexpr bool IsBigEndianHost = true;
#else
constexpr bool IsBigEndianHost = false;
#endif

static const bool IsLittleEndianHost = !IsBigEndianHost;

inline unsigned char      getSwappedBytes(unsigned char      C) { return llvm::byteswap(C); }
inline   signed char      getSwappedBytes( signed  char      C) { return llvm::byteswap(C); }
inline          char      getSwappedBytes(         char      C) { return llvm::byteswap(C); }

inline unsigned short     getSwappedBytes(unsigned short     C) { return llvm::byteswap(C); }
inline   signed short     getSwappedBytes(  signed short     C) { return llvm::byteswap(C); }

inline unsigned int       getSwappedBytes(unsigned int       C) { return llvm::byteswap(C); }
inline   signed int       getSwappedBytes(  signed int       C) { return llvm::byteswap(C); }

inline unsigned long      getSwappedBytes(unsigned long      C) { return llvm::byteswap(C); }
inline   signed long      getSwappedBytes(  signed long      C) { return llvm::byteswap(C); }

inline unsigned long long getSwappedBytes(unsigned long long C) { return llvm::byteswap(C); }
inline   signed long long getSwappedBytes(  signed long long C) { return llvm::byteswap(C); }

inline float getSwappedBytes(float C) {
  union {
    uint32_t i;
    float f;
  } in, out;
  in.f = C;
  out.i = llvm::byteswap(in.i);
  return out.f;
}

inline double getSwappedBytes(double C) {
  union {
    uint64_t i;
    double d;
  } in, out;
  in.d = C;
  out.i = llvm::byteswap(in.i);
  return out.d;
}

template <typename T>
inline std::enable_if_t<std::is_enum_v<T>, T> getSwappedBytes(T C) {
  return static_cast<T>(
      llvm::byteswap(static_cast<std::underlying_type_t<T>>(C)));
}

template<typename T>
inline void swapByteOrder(T &Value) {
  Value = getSwappedBytes(Value);
}

} // end namespace sys
} // end namespace llvm

#endif
PKhwFZ�h�oPPSupport/TargetParser.hnu�[���//===-- llvm/Support/TargetParser.h -----------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This header is deprecated in favour of `llvm/TargetParser/TargetParser.h`.
///
//===----------------------------------------------------------------------===//

#include "llvm/TargetParser/TargetParser.h"
PKhwFZ���TATASupport/SMTAPI.hnu�[���//===- SMTAPI.h -------------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
//  This file defines a SMT generic Solver API, which will be the base class
//  for every SMT solver specific class.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_SMTAPI_H
#define LLVM_SUPPORT_SMTAPI_H

#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/Support/raw_ostream.h"
#include <memory>

namespace llvm {

/// Generic base class for SMT sorts
class SMTSort {
public:
  SMTSort() = default;
  virtual ~SMTSort() = default;

  /// Returns true if the sort is a bitvector, calls isBitvectorSortImpl().
  virtual bool isBitvectorSort() const { return isBitvectorSortImpl(); }

  /// Returns true if the sort is a floating-point, calls isFloatSortImpl().
  virtual bool isFloatSort() const { return isFloatSortImpl(); }

  /// Returns true if the sort is a boolean, calls isBooleanSortImpl().
  virtual bool isBooleanSort() const { return isBooleanSortImpl(); }

  /// Returns the bitvector size, fails if the sort is not a bitvector
  /// Calls getBitvectorSortSizeImpl().
  virtual unsigned getBitvectorSortSize() const {
    assert(isBitvectorSort() && "Not a bitvector sort!");
    unsigned Size = getBitvectorSortSizeImpl();
    assert(Size && "Size is zero!");
    return Size;
  };

  /// Returns the floating-point size, fails if the sort is not a floating-point
  /// Calls getFloatSortSizeImpl().
  virtual unsigned getFloatSortSize() const {
    assert(isFloatSort() && "Not a floating-point sort!");
    unsigned Size = getFloatSortSizeImpl();
    assert(Size && "Size is zero!");
    return Size;
  };

  virtual void Profile(llvm::FoldingSetNodeID &ID) const = 0;

  bool operator<(const SMTSort &Other) const {
    llvm::FoldingSetNodeID ID1, ID2;
    Profile(ID1);
    Other.Profile(ID2);
    return ID1 < ID2;
  }

  friend bool operator==(SMTSort const &LHS, SMTSort const &RHS) {
    return LHS.equal_to(RHS);
  }

  virtual void print(raw_ostream &OS) const = 0;

  LLVM_DUMP_METHOD void dump() const;

protected:
  /// Query the SMT solver and returns true if two sorts are equal (same kind
  /// and bit width). This does not check if the two sorts are the same objects.
  virtual bool equal_to(SMTSort const &other) const = 0;

  /// Query the SMT solver and checks if a sort is bitvector.
  virtual bool isBitvectorSortImpl() const = 0;

  /// Query the SMT solver and checks if a sort is floating-point.
  virtual bool isFloatSortImpl() const = 0;

  /// Query the SMT solver and checks if a sort is boolean.
  virtual bool isBooleanSortImpl() const = 0;

  /// Query the SMT solver and returns the sort bit width.
  virtual unsigned getBitvectorSortSizeImpl() const = 0;

  /// Query the SMT solver and returns the sort bit width.
  virtual unsigned getFloatSortSizeImpl() const = 0;
};

/// Shared pointer for SMTSorts, used by SMTSolver API.
using SMTSortRef = const SMTSort *;

/// Generic base class for SMT exprs
class SMTExpr {
public:
  SMTExpr() = default;
  virtual ~SMTExpr() = default;

  bool operator<(const SMTExpr &Other) const {
    llvm::FoldingSetNodeID ID1, ID2;
    Profile(ID1);
    Other.Profile(ID2);
    return ID1 < ID2;
  }

  virtual void Profile(llvm::FoldingSetNodeID &ID) const = 0;

  friend bool operator==(SMTExpr const &LHS, SMTExpr const &RHS) {
    return LHS.equal_to(RHS);
  }

  virtual void print(raw_ostream &OS) const = 0;

  LLVM_DUMP_METHOD void dump() const;

protected:
  /// Query the SMT solver and returns true if two sorts are equal (same kind
  /// and bit width). This does not check if the two sorts are the same objects.
  virtual bool equal_to(SMTExpr const &other) const = 0;
};

/// Shared pointer for SMTExprs, used by SMTSolver API.
using SMTExprRef = const SMTExpr *;

/// Generic base class for SMT Solvers
///
/// This class is responsible for wrapping all sorts and expression generation,
/// through the mk* methods. It also provides methods to create SMT expressions
/// straight from clang's AST, through the from* methods.
class SMTSolver {
public:
  SMTSolver() = default;
  virtual ~SMTSolver() = default;

  LLVM_DUMP_METHOD void dump() const;

  // Returns an appropriate floating-point sort for the given bitwidth.
  SMTSortRef getFloatSort(unsigned BitWidth) {
    switch (BitWidth) {
    case 16:
      return getFloat16Sort();
    case 32:
      return getFloat32Sort();
    case 64:
      return getFloat64Sort();
    case 128:
      return getFloat128Sort();
    default:;
    }
    llvm_unreachable("Unsupported floating-point bitwidth!");
  }

  // Returns a boolean sort.
  virtual SMTSortRef getBoolSort() = 0;

  // Returns an appropriate bitvector sort for the given bitwidth.
  virtual SMTSortRef getBitvectorSort(const unsigned BitWidth) = 0;

  // Returns a floating-point sort of width 16
  virtual SMTSortRef getFloat16Sort() = 0;

  // Returns a floating-point sort of width 32
  virtual SMTSortRef getFloat32Sort() = 0;

  // Returns a floating-point sort of width 64
  virtual SMTSortRef getFloat64Sort() = 0;

  // Returns a floating-point sort of width 128
  virtual SMTSortRef getFloat128Sort() = 0;

  // Returns an appropriate sort for the given AST.
  virtual SMTSortRef getSort(const SMTExprRef &AST) = 0;

  /// Given a constraint, adds it to the solver
  virtual void addConstraint(const SMTExprRef &Exp) const = 0;

  /// Creates a bitvector addition operation
  virtual SMTExprRef mkBVAdd(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;

  /// Creates a bitvector subtraction operation
  virtual SMTExprRef mkBVSub(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;

  /// Creates a bitvector multiplication operation
  virtual SMTExprRef mkBVMul(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;

  /// Creates a bitvector signed modulus operation
  virtual SMTExprRef mkBVSRem(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;

  /// Creates a bitvector unsigned modulus operation
  virtual SMTExprRef mkBVURem(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;

  /// Creates a bitvector signed division operation
  virtual SMTExprRef mkBVSDiv(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;

  /// Creates a bitvector unsigned division operation
  virtual SMTExprRef mkBVUDiv(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;

  /// Creates a bitvector logical shift left operation
  virtual SMTExprRef mkBVShl(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;

  /// Creates a bitvector arithmetic shift right operation
  virtual SMTExprRef mkBVAshr(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;

  /// Creates a bitvector logical shift right operation
  virtual SMTExprRef mkBVLshr(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;

  /// Creates a bitvector negation operation
  virtual SMTExprRef mkBVNeg(const SMTExprRef &Exp) = 0;

  /// Creates a bitvector not operation
  virtual SMTExprRef mkBVNot(const SMTExprRef &Exp) = 0;

  /// Creates a bitvector xor operation
  virtual SMTExprRef mkBVXor(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;

  /// Creates a bitvector or operation
  virtual SMTExprRef mkBVOr(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;

  /// Creates a bitvector and operation
  virtual SMTExprRef mkBVAnd(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;

  /// Creates a bitvector unsigned less-than operation
  virtual SMTExprRef mkBVUlt(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;

  /// Creates a bitvector signed less-than operation
  virtual SMTExprRef mkBVSlt(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;

  /// Creates a bitvector unsigned greater-than operation
  virtual SMTExprRef mkBVUgt(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;

  /// Creates a bitvector signed greater-than operation
  virtual SMTExprRef mkBVSgt(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;

  /// Creates a bitvector unsigned less-equal-than operation
  virtual SMTExprRef mkBVUle(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;

  /// Creates a bitvector signed less-equal-than operation
  virtual SMTExprRef mkBVSle(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;

  /// Creates a bitvector unsigned greater-equal-than operation
  virtual SMTExprRef mkBVUge(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;

  /// Creates a bitvector signed greater-equal-than operation
  virtual SMTExprRef mkBVSge(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;

  /// Creates a boolean not operation
  virtual SMTExprRef mkNot(const SMTExprRef &Exp) = 0;

  /// Creates a boolean equality operation
  virtual SMTExprRef mkEqual(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;

  /// Creates a boolean and operation
  virtual SMTExprRef mkAnd(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;

  /// Creates a boolean or operation
  virtual SMTExprRef mkOr(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;

  /// Creates a boolean ite operation
  virtual SMTExprRef mkIte(const SMTExprRef &Cond, const SMTExprRef &T,
                           const SMTExprRef &F) = 0;

  /// Creates a bitvector sign extension operation
  virtual SMTExprRef mkBVSignExt(unsigned i, const SMTExprRef &Exp) = 0;

  /// Creates a bitvector zero extension operation
  virtual SMTExprRef mkBVZeroExt(unsigned i, const SMTExprRef &Exp) = 0;

  /// Creates a bitvector extract operation
  virtual SMTExprRef mkBVExtract(unsigned High, unsigned Low,
                                 const SMTExprRef &Exp) = 0;

  /// Creates a bitvector concat operation
  virtual SMTExprRef mkBVConcat(const SMTExprRef &LHS,
                                const SMTExprRef &RHS) = 0;

  /// Creates a predicate that checks for overflow in a bitvector addition
  /// operation
  virtual SMTExprRef mkBVAddNoOverflow(const SMTExprRef &LHS,
                                       const SMTExprRef &RHS,
                                       bool isSigned) = 0;

  /// Creates a predicate that checks for underflow in a signed bitvector
  /// addition operation
  virtual SMTExprRef mkBVAddNoUnderflow(const SMTExprRef &LHS,
                                        const SMTExprRef &RHS) = 0;

  /// Creates a predicate that checks for overflow in a signed bitvector
  /// subtraction operation
  virtual SMTExprRef mkBVSubNoOverflow(const SMTExprRef &LHS,
                                       const SMTExprRef &RHS) = 0;

  /// Creates a predicate that checks for underflow in a bitvector subtraction
  /// operation
  virtual SMTExprRef mkBVSubNoUnderflow(const SMTExprRef &LHS,
                                        const SMTExprRef &RHS,
                                        bool isSigned) = 0;

  /// Creates a predicate that checks for overflow in a signed bitvector
  /// division/modulus operation
  virtual SMTExprRef mkBVSDivNoOverflow(const SMTExprRef &LHS,
                                        const SMTExprRef &RHS) = 0;

  /// Creates a predicate that checks for overflow in a bitvector negation
  /// operation
  virtual SMTExprRef mkBVNegNoOverflow(const SMTExprRef &Exp) = 0;

  /// Creates a predicate that checks for overflow in a bitvector multiplication
  /// operation
  virtual SMTExprRef mkBVMulNoOverflow(const SMTExprRef &LHS,
                                       const SMTExprRef &RHS,
                                       bool isSigned) = 0;

  /// Creates a predicate that checks for underflow in a signed bitvector
  /// multiplication operation
  virtual SMTExprRef mkBVMulNoUnderflow(const SMTExprRef &LHS,
                                        const SMTExprRef &RHS) = 0;

  /// Creates a floating-point negation operation
  virtual SMTExprRef mkFPNeg(const SMTExprRef &Exp) = 0;

  /// Creates a floating-point isInfinite operation
  virtual SMTExprRef mkFPIsInfinite(const SMTExprRef &Exp) = 0;

  /// Creates a floating-point isNaN operation
  virtual SMTExprRef mkFPIsNaN(const SMTExprRef &Exp) = 0;

  /// Creates a floating-point isNormal operation
  virtual SMTExprRef mkFPIsNormal(const SMTExprRef &Exp) = 0;

  /// Creates a floating-point isZero operation
  virtual SMTExprRef mkFPIsZero(const SMTExprRef &Exp) = 0;

  /// Creates a floating-point multiplication operation
  virtual SMTExprRef mkFPMul(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;

  /// Creates a floating-point division operation
  virtual SMTExprRef mkFPDiv(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;

  /// Creates a floating-point remainder operation
  virtual SMTExprRef mkFPRem(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;

  /// Creates a floating-point addition operation
  virtual SMTExprRef mkFPAdd(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;

  /// Creates a floating-point subtraction operation
  virtual SMTExprRef mkFPSub(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;

  /// Creates a floating-point less-than operation
  virtual SMTExprRef mkFPLt(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;

  /// Creates a floating-point greater-than operation
  virtual SMTExprRef mkFPGt(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;

  /// Creates a floating-point less-than-or-equal operation
  virtual SMTExprRef mkFPLe(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;

  /// Creates a floating-point greater-than-or-equal operation
  virtual SMTExprRef mkFPGe(const SMTExprRef &LHS, const SMTExprRef &RHS) = 0;

  /// Creates a floating-point equality operation
  virtual SMTExprRef mkFPEqual(const SMTExprRef &LHS,
                               const SMTExprRef &RHS) = 0;

  /// Creates a floating-point conversion from floatint-point to floating-point
  /// operation
  virtual SMTExprRef mkFPtoFP(const SMTExprRef &From, const SMTSortRef &To) = 0;

  /// Creates a floating-point conversion from signed bitvector to
  /// floatint-point operation
  virtual SMTExprRef mkSBVtoFP(const SMTExprRef &From,
                               const SMTSortRef &To) = 0;

  /// Creates a floating-point conversion from unsigned bitvector to
  /// floatint-point operation
  virtual SMTExprRef mkUBVtoFP(const SMTExprRef &From,
                               const SMTSortRef &To) = 0;

  /// Creates a floating-point conversion from floatint-point to signed
  /// bitvector operation
  virtual SMTExprRef mkFPtoSBV(const SMTExprRef &From, unsigned ToWidth) = 0;

  /// Creates a floating-point conversion from floatint-point to unsigned
  /// bitvector operation
  virtual SMTExprRef mkFPtoUBV(const SMTExprRef &From, unsigned ToWidth) = 0;

  /// Creates a new symbol, given a name and a sort
  virtual SMTExprRef mkSymbol(const char *Name, SMTSortRef Sort) = 0;

  // Returns an appropriate floating-point rounding mode.
  virtual SMTExprRef getFloatRoundingMode() = 0;

  // If the a model is available, returns the value of a given bitvector symbol
  virtual llvm::APSInt getBitvector(const SMTExprRef &Exp, unsigned BitWidth,
                                    bool isUnsigned) = 0;

  // If the a model is available, returns the value of a given boolean symbol
  virtual bool getBoolean(const SMTExprRef &Exp) = 0;

  /// Constructs an SMTExprRef from a boolean.
  virtual SMTExprRef mkBoolean(const bool b) = 0;

  /// Constructs an SMTExprRef from a finite APFloat.
  virtual SMTExprRef mkFloat(const llvm::APFloat Float) = 0;

  /// Constructs an SMTExprRef from an APSInt and its bit width
  virtual SMTExprRef mkBitvector(const llvm::APSInt Int, unsigned BitWidth) = 0;

  /// Given an expression, extract the value of this operand in the model.
  virtual bool getInterpretation(const SMTExprRef &Exp, llvm::APSInt &Int) = 0;

  /// Given an expression extract the value of this operand in the model.
  virtual bool getInterpretation(const SMTExprRef &Exp,
                                 llvm::APFloat &Float) = 0;

  /// Check if the constraints are satisfiable
  virtual std::optional<bool> check() const = 0;

  /// Push the current solver state
  virtual void push() = 0;

  /// Pop the previous solver state
  virtual void pop(unsigned NumStates = 1) = 0;

  /// Reset the solver and remove all constraints.
  virtual void reset() = 0;

  /// Checks if the solver supports floating-points.
  virtual bool isFPSupported() = 0;

  virtual void print(raw_ostream &OS) const = 0;
};

/// Shared pointer for SMTSolvers.
using SMTSolverRef = std::shared_ptr<SMTSolver>;

/// Convenience method to create and Z3Solver object
SMTSolverRef CreateZ3Solver();

} // namespace llvm

#endif
PKhwFZUk�kkSupport/raw_ostream.hnu�[���//===--- raw_ostream.h - Raw output stream ----------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
//  This file defines the raw_ostream class.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_RAW_OSTREAM_H
#define LLVM_SUPPORT_RAW_OSTREAM_H

#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/DataTypes.h"
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <optional>
#include <string>
#include <string_view>
#include <system_error>
#include <type_traits>

namespace llvm {

class Duration;
class formatv_object_base;
class format_object_base;
class FormattedString;
class FormattedNumber;
class FormattedBytes;
template <class T> class [[nodiscard]] Expected;

namespace sys {
namespace fs {
enum FileAccess : unsigned;
enum OpenFlags : unsigned;
enum CreationDisposition : unsigned;
class FileLocker;
} // end namespace fs
} // end namespace sys

/// This class implements an extremely fast bulk output stream that can *only*
/// output to a stream.  It does not support seeking, reopening, rewinding, line
/// buffered disciplines etc. It is a simple buffer that outputs
/// a chunk at a time.
class raw_ostream {
public:
  // Class kinds to support LLVM-style RTTI.
  enum class OStreamKind {
    OK_OStream,
    OK_FDStream,
  };

private:
  OStreamKind Kind;

  /// The buffer is handled in such a way that the buffer is
  /// uninitialized, unbuffered, or out of space when OutBufCur >=
  /// OutBufEnd. Thus a single comparison suffices to determine if we
  /// need to take the slow path to write a single character.
  ///
  /// The buffer is in one of three states:
  ///  1. Unbuffered (BufferMode == Unbuffered)
  ///  1. Uninitialized (BufferMode != Unbuffered && OutBufStart == 0).
  ///  2. Buffered (BufferMode != Unbuffered && OutBufStart != 0 &&
  ///               OutBufEnd - OutBufStart >= 1).
  ///
  /// If buffered, then the raw_ostream owns the buffer if (BufferMode ==
  /// InternalBuffer); otherwise the buffer has been set via SetBuffer and is
  /// managed by the subclass.
  ///
  /// If a subclass installs an external buffer using SetBuffer then it can wait
  /// for a \see write_impl() call to handle the data which has been put into
  /// this buffer.
  char *OutBufStart, *OutBufEnd, *OutBufCur;
  bool ColorEnabled = false;

  /// Optional stream this stream is tied to. If this stream is written to, the
  /// tied-to stream will be flushed first.
  raw_ostream *TiedStream = nullptr;

  enum class BufferKind {
    Unbuffered = 0,
    InternalBuffer,
    ExternalBuffer
  } BufferMode;

public:
  // color order matches ANSI escape sequence, don't change
  enum class Colors {
    BLACK = 0,
    RED,
    GREEN,
    YELLOW,
    BLUE,
    MAGENTA,
    CYAN,
    WHITE,
    SAVEDCOLOR,
    RESET,
  };

  static constexpr Colors BLACK = Colors::BLACK;
  static constexpr Colors RED = Colors::RED;
  static constexpr Colors GREEN = Colors::GREEN;
  static constexpr Colors YELLOW = Colors::YELLOW;
  static constexpr Colors BLUE = Colors::BLUE;
  static constexpr Colors MAGENTA = Colors::MAGENTA;
  static constexpr Colors CYAN = Colors::CYAN;
  static constexpr Colors WHITE = Colors::WHITE;
  static constexpr Colors SAVEDCOLOR = Colors::SAVEDCOLOR;
  static constexpr Colors RESET = Colors::RESET;

  explicit raw_ostream(bool unbuffered = false,
                       OStreamKind K = OStreamKind::OK_OStream)
      : Kind(K), BufferMode(unbuffered ? BufferKind::Unbuffered
                                       : BufferKind::InternalBuffer) {
    // Start out ready to flush.
    OutBufStart = OutBufEnd = OutBufCur = nullptr;
  }

  raw_ostream(const raw_ostream &) = delete;
  void operator=(const raw_ostream &) = delete;

  virtual ~raw_ostream();

  /// tell - Return the current offset with the file.
  uint64_t tell() const { return current_pos() + GetNumBytesInBuffer(); }

  OStreamKind get_kind() const { return Kind; }

  //===--------------------------------------------------------------------===//
  // Configuration Interface
  //===--------------------------------------------------------------------===//

  /// If possible, pre-allocate \p ExtraSize bytes for stream data.
  /// i.e. it extends internal buffers to keep additional ExtraSize bytes.
  /// So that the stream could keep at least tell() + ExtraSize bytes
  /// without re-allocations. reserveExtraSpace() does not change
  /// the size/data of the stream.
  virtual void reserveExtraSpace(uint64_t ExtraSize) {}

  /// Set the stream to be buffered, with an automatically determined buffer
  /// size.
  void SetBuffered();

  /// Set the stream to be buffered, using the specified buffer size.
  void SetBufferSize(size_t Size) {
    flush();
    SetBufferAndMode(new char[Size], Size, BufferKind::InternalBuffer);
  }

  size_t GetBufferSize() const {
    // If we're supposed to be buffered but haven't actually gotten around
    // to allocating the buffer yet, return the value that would be used.
    if (BufferMode != BufferKind::Unbuffered && OutBufStart == nullptr)
      return preferred_buffer_size();

    // Otherwise just return the size of the allocated buffer.
    return OutBufEnd - OutBufStart;
  }

  /// Set the stream to be unbuffered. When unbuffered, the stream will flush
  /// after every write. This routine will also flush the buffer immediately
  /// when the stream is being set to unbuffered.
  void SetUnbuffered() {
    flush();
    SetBufferAndMode(nullptr, 0, BufferKind::Unbuffered);
  }

  size_t GetNumBytesInBuffer() const {
    return OutBufCur - OutBufStart;
  }

  //===--------------------------------------------------------------------===//
  // Data Output Interface
  //===--------------------------------------------------------------------===//

  void flush() {
    if (OutBufCur != OutBufStart)
      flush_nonempty();
  }

  raw_ostream &operator<<(char C) {
    if (OutBufCur >= OutBufEnd)
      return write(C);
    *OutBufCur++ = C;
    return *this;
  }

  raw_ostream &operator<<(unsigned char C) {
    if (OutBufCur >= OutBufEnd)
      return write(C);
    *OutBufCur++ = C;
    return *this;
  }

  raw_ostream &operator<<(signed char C) {
    if (OutBufCur >= OutBufEnd)
      return write(C);
    *OutBufCur++ = C;
    return *this;
  }

  raw_ostream &operator<<(StringRef Str) {
    // Inline fast path, particularly for strings with a known length.
    size_t Size = Str.size();

    // Make sure we can use the fast path.
    if (Size > (size_t)(OutBufEnd - OutBufCur))
      return write(Str.data(), Size);

    if (Size) {
      memcpy(OutBufCur, Str.data(), Size);
      OutBufCur += Size;
    }
    return *this;
  }

#if defined(__cpp_char8_t)
  // When using `char8_t *` integers or pointers are written to the ostream
  // instead of UTF-8 code as one might expect. This might lead to unexpected
  // behavior, especially as `u8""` literals are of type `char8_t*` instead of
  // type `char_t*` from C++20 onwards. Thus we disallow using them with
  // raw_ostreams.
  // If you have u8"" literals to stream, you can rewrite them as ordinary
  // literals with escape sequences
  // e.g.  replace `u8"\u00a0"` by `"\xc2\xa0"`
  // or use `reinterpret_cast`:
  // e.g. replace `u8"\u00a0"` by `reinterpret_cast<const char *>(u8"\u00a0")`
  raw_ostream &operator<<(const char8_t *Str) = delete;
#endif

  raw_ostream &operator<<(const char *Str) {
    // Inline fast path, particularly for constant strings where a sufficiently
    // smart compiler will simplify strlen.

    return this->operator<<(StringRef(Str));
  }

  raw_ostream &operator<<(const std::string &Str) {
    // Avoid the fast path, it would only increase code size for a marginal win.
    return write(Str.data(), Str.length());
  }

  raw_ostream &operator<<(const std::string_view &Str) {
    return write(Str.data(), Str.length());
  }

  raw_ostream &operator<<(const SmallVectorImpl<char> &Str) {
    return write(Str.data(), Str.size());
  }

  raw_ostream &operator<<(unsigned long N);
  raw_ostream &operator<<(long N);
  raw_ostream &operator<<(unsigned long long N);
  raw_ostream &operator<<(long long N);
  raw_ostream &operator<<(const void *P);

  raw_ostream &operator<<(unsigned int N) {
    return this->operator<<(static_cast<unsigned long>(N));
  }

  raw_ostream &operator<<(int N) {
    return this->operator<<(static_cast<long>(N));
  }

  raw_ostream &operator<<(double N);

  /// Output \p N in hexadecimal, without any prefix or padding.
  raw_ostream &write_hex(unsigned long long N);

  // Change the foreground color of text.
  raw_ostream &operator<<(Colors C);

  /// Output a formatted UUID with dash separators.
  using uuid_t = uint8_t[16];
  raw_ostream &write_uuid(const uuid_t UUID);

  /// Output \p Str, turning '\\', '\t', '\n', '"', and anything that doesn't
  /// satisfy llvm::isPrint into an escape sequence.
  raw_ostream &write_escaped(StringRef Str, bool UseHexEscapes = false);

  raw_ostream &write(unsigned char C);
  raw_ostream &write(const char *Ptr, size_t Size);

  // Formatted output, see the format() function in Support/Format.h.
  raw_ostream &operator<<(const format_object_base &Fmt);

  // Formatted output, see the leftJustify() function in Support/Format.h.
  raw_ostream &operator<<(const FormattedString &);

  // Formatted output, see the formatHex() function in Support/Format.h.
  raw_ostream &operator<<(const FormattedNumber &);

  // Formatted output, see the formatv() function in Support/FormatVariadic.h.
  raw_ostream &operator<<(const formatv_object_base &);

  // Formatted output, see the format_bytes() function in Support/Format.h.
  raw_ostream &operator<<(const FormattedBytes &);

  /// indent - Insert 'NumSpaces' spaces.
  raw_ostream &indent(unsigned NumSpaces);

  /// write_zeros - Insert 'NumZeros' nulls.
  raw_ostream &write_zeros(unsigned NumZeros);

  /// Changes the foreground color of text that will be output from this point
  /// forward.
  /// @param Color ANSI color to use, the special SAVEDCOLOR can be used to
  /// change only the bold attribute, and keep colors untouched
  /// @param Bold bold/brighter text, default false
  /// @param BG if true change the background, default: change foreground
  /// @returns itself so it can be used within << invocations
  virtual raw_ostream &changeColor(enum Colors Color, bool Bold = false,
                                   bool BG = false);

  /// Resets the colors to terminal defaults. Call this when you are done
  /// outputting colored text, or before program exit.
  virtual raw_ostream &resetColor();

  /// Reverses the foreground and background colors.
  virtual raw_ostream &reverseColor();

  /// This function determines if this stream is connected to a "tty" or
  /// "console" window. That is, the output would be displayed to the user
  /// rather than being put on a pipe or stored in a file.
  virtual bool is_displayed() const { return false; }

  /// This function determines if this stream is displayed and supports colors.
  /// The result is unaffected by calls to enable_color().
  virtual bool has_colors() const { return is_displayed(); }

  // Enable or disable colors. Once enable_colors(false) is called,
  // changeColor() has no effect until enable_colors(true) is called.
  virtual void enable_colors(bool enable) { ColorEnabled = enable; }

  bool colors_enabled() const { return ColorEnabled; }

  /// Tie this stream to the specified stream. Replaces any existing tied-to
  /// stream. Specifying a nullptr unties the stream.
  void tie(raw_ostream *TieTo) { TiedStream = TieTo; }

  //===--------------------------------------------------------------------===//
  // Subclass Interface
  //===--------------------------------------------------------------------===//

private:
  /// The is the piece of the class that is implemented by subclasses.  This
  /// writes the \p Size bytes starting at
  /// \p Ptr to the underlying stream.
  ///
  /// This function is guaranteed to only be called at a point at which it is
  /// safe for the subclass to install a new buffer via SetBuffer.
  ///
  /// \param Ptr The start of the data to be written. For buffered streams this
  /// is guaranteed to be the start of the buffer.
  ///
  /// \param Size The number of bytes to be written.
  ///
  /// \invariant { Size > 0 }
  virtual void write_impl(const char *Ptr, size_t Size) = 0;

  /// Return the current position within the stream, not counting the bytes
  /// currently in the buffer.
  virtual uint64_t current_pos() const = 0;

protected:
  /// Use the provided buffer as the raw_ostream buffer. This is intended for
  /// use only by subclasses which can arrange for the output to go directly
  /// into the desired output buffer, instead of being copied on each flush.
  void SetBuffer(char *BufferStart, size_t Size) {
    SetBufferAndMode(BufferStart, Size, BufferKind::ExternalBuffer);
  }

  /// Return an efficient buffer size for the underlying output mechanism.
  virtual size_t preferred_buffer_size() const;

  /// Return the beginning of the current stream buffer, or 0 if the stream is
  /// unbuffered.
  const char *getBufferStart() const { return OutBufStart; }

  //===--------------------------------------------------------------------===//
  // Private Interface
  //===--------------------------------------------------------------------===//
private:
  /// Install the given buffer and mode.
  void SetBufferAndMode(char *BufferStart, size_t Size, BufferKind Mode);

  /// Flush the current buffer, which is known to be non-empty. This outputs the
  /// currently buffered data and resets the buffer to empty.
  void flush_nonempty();

  /// Copy data into the buffer. Size must not be greater than the number of
  /// unused bytes in the buffer.
  void copy_to_buffer(const char *Ptr, size_t Size);

  /// Compute whether colors should be used and do the necessary work such as
  /// flushing. The result is affected by calls to enable_color().
  bool prepare_colors();

  /// Flush the tied-to stream (if present) and then write the required data.
  void flush_tied_then_write(const char *Ptr, size_t Size);

  virtual void anchor();
};

/// Call the appropriate insertion operator, given an rvalue reference to a
/// raw_ostream object and return a stream of the same type as the argument.
template <typename OStream, typename T>
std::enable_if_t<!std::is_reference_v<OStream> &&
                     std::is_base_of_v<raw_ostream, OStream>,
                 OStream &&>
operator<<(OStream &&OS, const T &Value) {
  OS << Value;
  return std::move(OS);
}

/// An abstract base class for streams implementations that also support a
/// pwrite operation. This is useful for code that can mostly stream out data,
/// but needs to patch in a header that needs to know the output size.
class raw_pwrite_stream : public raw_ostream {
  virtual void pwrite_impl(const char *Ptr, size_t Size, uint64_t Offset) = 0;
  void anchor() override;

public:
  explicit raw_pwrite_stream(bool Unbuffered = false,
                             OStreamKind K = OStreamKind::OK_OStream)
      : raw_ostream(Unbuffered, K) {}
  void pwrite(const char *Ptr, size_t Size, uint64_t Offset) {
#ifndef NDEBUG
    uint64_t Pos = tell();
    // /dev/null always reports a pos of 0, so we cannot perform this check
    // in that case.
    if (Pos)
      assert(Size + Offset <= Pos && "We don't support extending the stream");
#endif
    pwrite_impl(Ptr, Size, Offset);
  }
};

//===----------------------------------------------------------------------===//
// File Output Streams
//===----------------------------------------------------------------------===//

/// A raw_ostream that writes to a file descriptor.
///
class raw_fd_ostream : public raw_pwrite_stream {
  int FD;
  bool ShouldClose;
  bool SupportsSeeking = false;
  bool IsRegularFile = false;
  mutable std::optional<bool> HasColors;

#ifdef _WIN32
  /// True if this fd refers to a Windows console device. Mintty and other
  /// terminal emulators are TTYs, but they are not consoles.
  bool IsWindowsConsole = false;
#endif

  std::error_code EC;

  uint64_t pos = 0;

  /// See raw_ostream::write_impl.
  void write_impl(const char *Ptr, size_t Size) override;

  void pwrite_impl(const char *Ptr, size_t Size, uint64_t Offset) override;

  /// Return the current position within the stream, not counting the bytes
  /// currently in the buffer.
  uint64_t current_pos() const override { return pos; }

  /// Determine an efficient buffer size.
  size_t preferred_buffer_size() const override;

  void anchor() override;

protected:
  /// Set the flag indicating that an output error has been encountered.
  void error_detected(std::error_code EC) { this->EC = EC; }

  /// Return the file descriptor.
  int get_fd() const { return FD; }

  // Update the file position by increasing \p Delta.
  void inc_pos(uint64_t Delta) { pos += Delta; }

public:
  /// Open the specified file for writing. If an error occurs, information
  /// about the error is put into EC, and the stream should be immediately
  /// destroyed;
  /// \p Flags allows optional flags to control how the file will be opened.
  ///
  /// As a special case, if Filename is "-", then the stream will use
  /// STDOUT_FILENO instead of opening a file. This will not close the stdout
  /// descriptor.
  raw_fd_ostream(StringRef Filename, std::error_code &EC);
  raw_fd_ostream(StringRef Filename, std::error_code &EC,
                 sys::fs::CreationDisposition Disp);
  raw_fd_ostream(StringRef Filename, std::error_code &EC,
                 sys::fs::FileAccess Access);
  raw_fd_ostream(StringRef Filename, std::error_code &EC,
                 sys::fs::OpenFlags Flags);
  raw_fd_ostream(StringRef Filename, std::error_code &EC,
                 sys::fs::CreationDisposition Disp, sys::fs::FileAccess Access,
                 sys::fs::OpenFlags Flags);

  /// FD is the file descriptor that this writes to.  If ShouldClose is true,
  /// this closes the file when the stream is destroyed. If FD is for stdout or
  /// stderr, it will not be closed.
  raw_fd_ostream(int fd, bool shouldClose, bool unbuffered = false,
                 OStreamKind K = OStreamKind::OK_OStream);

  ~raw_fd_ostream() override;

  /// Manually flush the stream and close the file. Note that this does not call
  /// fsync.
  void close();

  bool supportsSeeking() const { return SupportsSeeking; }

  bool isRegularFile() const { return IsRegularFile; }

  /// Flushes the stream and repositions the underlying file descriptor position
  /// to the offset specified from the beginning of the file.
  uint64_t seek(uint64_t off);

  bool is_displayed() const override;

  bool has_colors() const override;

  std::error_code error() const { return EC; }

  /// Return the value of the flag in this raw_fd_ostream indicating whether an
  /// output error has been encountered.
  /// This doesn't implicitly flush any pending output.  Also, it doesn't
  /// guarantee to detect all errors unless the stream has been closed.
  bool has_error() const { return bool(EC); }

  /// Set the flag read by has_error() to false. If the error flag is set at the
  /// time when this raw_ostream's destructor is called, report_fatal_error is
  /// called to report the error. Use clear_error() after handling the error to
  /// avoid this behavior.
  ///
  ///   "Errors should never pass silently.
  ///    Unless explicitly silenced."
  ///      - from The Zen of Python, by Tim Peters
  ///
  void clear_error() { EC = std::error_code(); }

  /// Locks the underlying file.
  ///
  /// @returns RAII object that releases the lock upon leaving the scope, if the
  ///          locking was successful. Otherwise returns corresponding
  ///          error code.
  ///
  /// The function blocks the current thread until the lock become available or
  /// error occurs.
  ///
  /// Possible use of this function may be as follows:
  ///
  ///   @code{.cpp}
  ///   if (auto L = stream.lock()) {
  ///     // ... do action that require file to be locked.
  ///   } else {
  ///     handleAllErrors(std::move(L.takeError()), [&](ErrorInfoBase &EIB) {
  ///       // ... handle lock error.
  ///     });
  ///   }
  ///   @endcode
  [[nodiscard]] Expected<sys::fs::FileLocker> lock();

  /// Tries to lock the underlying file within the specified period.
  ///
  /// @returns RAII object that releases the lock upon leaving the scope, if the
  ///          locking was successful. Otherwise returns corresponding
  ///          error code.
  ///
  /// It is used as @ref lock.
  [[nodiscard]] Expected<sys::fs::FileLocker>
  tryLockFor(Duration const &Timeout);
};

/// This returns a reference to a raw_fd_ostream for standard output. Use it
/// like: outs() << "foo" << "bar";
raw_fd_ostream &outs();

/// This returns a reference to a raw_ostream for standard error.
/// Use it like: errs() << "foo" << "bar";
/// By default, the stream is tied to stdout to ensure stdout is flushed before
/// stderr is written, to ensure the error messages are written in their
/// expected place.
raw_fd_ostream &errs();

/// This returns a reference to a raw_ostream which simply discards output.
raw_ostream &nulls();

//===----------------------------------------------------------------------===//
// File Streams
//===----------------------------------------------------------------------===//

/// A raw_ostream of a file for reading/writing/seeking.
///
class raw_fd_stream : public raw_fd_ostream {
public:
  /// Open the specified file for reading/writing/seeking. If an error occurs,
  /// information about the error is put into EC, and the stream should be
  /// immediately destroyed.
  raw_fd_stream(StringRef Filename, std::error_code &EC);

  /// This reads the \p Size bytes into a buffer pointed by \p Ptr.
  ///
  /// \param Ptr The start of the buffer to hold data to be read.
  ///
  /// \param Size The number of bytes to be read.
  ///
  /// On success, the number of bytes read is returned, and the file position is
  /// advanced by this number. On error, -1 is returned, use error() to get the
  /// error code.
  ssize_t read(char *Ptr, size_t Size);

  /// Check if \p OS is a pointer of type raw_fd_stream*.
  static bool classof(const raw_ostream *OS);
};

//===----------------------------------------------------------------------===//
// Output Stream Adaptors
//===----------------------------------------------------------------------===//

/// A raw_ostream that writes to an std::string.  This is a simple adaptor
/// class. This class does not encounter output errors.
/// raw_string_ostream operates without a buffer, delegating all memory
/// management to the std::string. Thus the std::string is always up-to-date,
/// may be used directly and there is no need to call flush().
class raw_string_ostream : public raw_ostream {
  std::string &OS;

  /// See raw_ostream::write_impl.
  void write_impl(const char *Ptr, size_t Size) override;

  /// Return the current position within the stream, not counting the bytes
  /// currently in the buffer.
  uint64_t current_pos() const override { return OS.size(); }

public:
  explicit raw_string_ostream(std::string &O) : OS(O) {
    SetUnbuffered();
  }

  /// Returns the string's reference. In most cases it is better to simply use
  /// the underlying std::string directly.
  /// TODO: Consider removing this API.
  std::string &str() { return OS; }

  void reserveExtraSpace(uint64_t ExtraSize) override {
    OS.reserve(tell() + ExtraSize);
  }
};

/// A raw_ostream that writes to an SmallVector or SmallString.  This is a
/// simple adaptor class. This class does not encounter output errors.
/// raw_svector_ostream operates without a buffer, delegating all memory
/// management to the SmallString. Thus the SmallString is always up-to-date,
/// may be used directly and there is no need to call flush().
class raw_svector_ostream : public raw_pwrite_stream {
  SmallVectorImpl<char> &OS;

  /// See raw_ostream::write_impl.
  void write_impl(const char *Ptr, size_t Size) override;

  void pwrite_impl(const char *Ptr, size_t Size, uint64_t Offset) override;

  /// Return the current position within the stream.
  uint64_t current_pos() const override;

public:
  /// Construct a new raw_svector_ostream.
  ///
  /// \param O The vector to write to; this should generally have at least 128
  /// bytes free to avoid any extraneous memory overhead.
  explicit raw_svector_ostream(SmallVectorImpl<char> &O) : OS(O) {
    SetUnbuffered();
  }

  ~raw_svector_ostream() override = default;

  void flush() = delete;

  /// Return a StringRef for the vector contents.
  StringRef str() const { return StringRef(OS.data(), OS.size()); }

  void reserveExtraSpace(uint64_t ExtraSize) override {
    OS.reserve(tell() + ExtraSize);
  }
};

/// A raw_ostream that discards all output.
class raw_null_ostream : public raw_pwrite_stream {
  /// See raw_ostream::write_impl.
  void write_impl(const char *Ptr, size_t size) override;
  void pwrite_impl(const char *Ptr, size_t Size, uint64_t Offset) override;

  /// Return the current position within the stream, not counting the bytes
  /// currently in the buffer.
  uint64_t current_pos() const override;

public:
  explicit raw_null_ostream() = default;
  ~raw_null_ostream() override;
};

class buffer_ostream : public raw_svector_ostream {
  raw_ostream &OS;
  SmallVector<char, 0> Buffer;

  void anchor() override;

public:
  buffer_ostream(raw_ostream &OS) : raw_svector_ostream(Buffer), OS(OS) {}
  ~buffer_ostream() override { OS << str(); }
};

class buffer_unique_ostream : public raw_svector_ostream {
  std::unique_ptr<raw_ostream> OS;
  SmallVector<char, 0> Buffer;

  void anchor() override;

public:
  buffer_unique_ostream(std::unique_ptr<raw_ostream> OS)
      : raw_svector_ostream(Buffer), OS(std::move(OS)) {
    // Turn off buffering on OS, which we now own, to avoid allocating a buffer
    // when the destructor writes only to be immediately flushed again.
    this->OS->SetUnbuffered();
  }
  ~buffer_unique_ostream() override { *OS << str(); }
};

class Error;

/// This helper creates an output stream and then passes it to \p Write.
/// The stream created is based on the specified \p OutputFileName:
/// llvm::outs for "-", raw_null_ostream for "/dev/null", and raw_fd_ostream
/// for other names. For raw_fd_ostream instances, the stream writes to
/// a temporary file. The final output file is atomically replaced with the
/// temporary file after the \p Write function is finished.
Error writeToOutput(StringRef OutputFileName,
                    std::function<Error(raw_ostream &)> Write);

raw_ostream &operator<<(raw_ostream &OS, std::nullopt_t);

template <typename T, typename = decltype(std::declval<raw_ostream &>()
                                          << std::declval<const T &>())>
raw_ostream &operator<<(raw_ostream &OS, const std::optional<T> &O) {
  if (O)
    OS << *O;
  else
    OS << std::nullopt;
  return OS;
}

} // end namespace llvm

#endif // LLVM_SUPPORT_RAW_OSTREAM_H
PKhwFZw1�vǐǐSupport/JSON.hnu�[���//===--- JSON.h - JSON values, parsing and serialization -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===---------------------------------------------------------------------===//
///
/// \file
/// This file supports working with JSON data.
///
/// It comprises:
///
/// - classes which hold dynamically-typed parsed JSON structures
///   These are value types that can be composed, inspected, and modified.
///   See json::Value, and the related types json::Object and json::Array.
///
/// - functions to parse JSON text into Values, and to serialize Values to text.
///   See parse(), operator<<, and format_provider.
///
/// - a convention and helpers for mapping between json::Value and user-defined
///   types. See fromJSON(), ObjectMapper, and the class comment on Value.
///
/// - an output API json::OStream which can emit JSON without materializing
///   all structures as json::Value.
///
/// Typically, JSON data would be read from an external source, parsed into
/// a Value, and then converted into some native data structure before doing
/// real work on it. (And vice versa when writing).
///
/// Other serialization mechanisms you may consider:
///
/// - YAML is also text-based, and more human-readable than JSON. It's a more
///   complex format and data model, and YAML parsers aren't ubiquitous.
///   YAMLParser.h is a streaming parser suitable for parsing large documents
///   (including JSON, as YAML is a superset). It can be awkward to use
///   directly. YAML I/O (YAMLTraits.h) provides data mapping that is more
///   declarative than the toJSON/fromJSON conventions here.
///
/// - LLVM bitstream is a space- and CPU- efficient binary format. Typically it
///   encodes LLVM IR ("bitcode"), but it can be a container for other data.
///   Low-level reader/writer libraries are in Bitstream/Bitstream*.h
///
//===---------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_JSON_H
#define LLVM_SUPPORT_JSON_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/STLFunctionalExtras.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/FormatVariadic.h"
#include "llvm/Support/raw_ostream.h"
#include <cmath>
#include <map>

namespace llvm {
namespace json {

// === String encodings ===
//
// JSON strings are character sequences (not byte sequences like std::string).
// We need to know the encoding, and for simplicity only support UTF-8.
//
//   - When parsing, invalid UTF-8 is a syntax error like any other
//
//   - When creating Values from strings, callers must ensure they are UTF-8.
//        with asserts on, invalid UTF-8 will crash the program
//        with asserts off, we'll substitute the replacement character (U+FFFD)
//     Callers can use json::isUTF8() and json::fixUTF8() for validation.
//
//   - When retrieving strings from Values (e.g. asString()), the result will
//     always be valid UTF-8.

template <typename T>
constexpr bool is_uint_64_bit_v =
    std::is_integral_v<T> && std::is_unsigned_v<T> &&
    sizeof(T) == sizeof(uint64_t);

/// Returns true if \p S is valid UTF-8, which is required for use as JSON.
/// If it returns false, \p Offset is set to a byte offset near the first error.
bool isUTF8(llvm::StringRef S, size_t *ErrOffset = nullptr);
/// Replaces invalid UTF-8 sequences in \p S with the replacement character
/// (U+FFFD). The returned string is valid UTF-8.
/// This is much slower than isUTF8, so test that first.
std::string fixUTF8(llvm::StringRef S);

class Array;
class ObjectKey;
class Value;
template <typename T> Value toJSON(const std::optional<T> &Opt);

/// An Object is a JSON object, which maps strings to heterogenous JSON values.
/// It simulates DenseMap<ObjectKey, Value>. ObjectKey is a maybe-owned string.
class Object {
  using Storage = DenseMap<ObjectKey, Value, llvm::DenseMapInfo<StringRef>>;
  Storage M;

public:
  using key_type = ObjectKey;
  using mapped_type = Value;
  using value_type = Storage::value_type;
  using iterator = Storage::iterator;
  using const_iterator = Storage::const_iterator;

  Object() = default;
  // KV is a trivial key-value struct for list-initialization.
  // (using std::pair forces extra copies).
  struct KV;
  explicit Object(std::initializer_list<KV> Properties);

  iterator begin() { return M.begin(); }
  const_iterator begin() const { return M.begin(); }
  iterator end() { return M.end(); }
  const_iterator end() const { return M.end(); }

  bool empty() const { return M.empty(); }
  size_t size() const { return M.size(); }

  void clear() { M.clear(); }
  std::pair<iterator, bool> insert(KV E);
  template <typename... Ts>
  std::pair<iterator, bool> try_emplace(const ObjectKey &K, Ts &&... Args) {
    return M.try_emplace(K, std::forward<Ts>(Args)...);
  }
  template <typename... Ts>
  std::pair<iterator, bool> try_emplace(ObjectKey &&K, Ts &&... Args) {
    return M.try_emplace(std::move(K), std::forward<Ts>(Args)...);
  }
  bool erase(StringRef K);
  void erase(iterator I) { M.erase(I); }

  iterator find(StringRef K) { return M.find_as(K); }
  const_iterator find(StringRef K) const { return M.find_as(K); }
  // operator[] acts as if Value was default-constructible as null.
  Value &operator[](const ObjectKey &K);
  Value &operator[](ObjectKey &&K);
  // Look up a property, returning nullptr if it doesn't exist.
  Value *get(StringRef K);
  const Value *get(StringRef K) const;
  // Typed accessors return std::nullopt/nullptr if
  //   - the property doesn't exist
  //   - or it has the wrong type
  std::optional<std::nullptr_t> getNull(StringRef K) const;
  std::optional<bool> getBoolean(StringRef K) const;
  std::optional<double> getNumber(StringRef K) const;
  std::optional<int64_t> getInteger(StringRef K) const;
  std::optional<llvm::StringRef> getString(StringRef K) const;
  const json::Object *getObject(StringRef K) const;
  json::Object *getObject(StringRef K);
  const json::Array *getArray(StringRef K) const;
  json::Array *getArray(StringRef K);
};
bool operator==(const Object &LHS, const Object &RHS);
inline bool operator!=(const Object &LHS, const Object &RHS) {
  return !(LHS == RHS);
}

/// An Array is a JSON array, which contains heterogeneous JSON values.
/// It simulates std::vector<Value>.
class Array {
  std::vector<Value> V;

public:
  using value_type = Value;
  using iterator = std::vector<Value>::iterator;
  using const_iterator = std::vector<Value>::const_iterator;

  Array() = default;
  explicit Array(std::initializer_list<Value> Elements);
  template <typename Collection> explicit Array(const Collection &C) {
    for (const auto &V : C)
      emplace_back(V);
  }

  Value &operator[](size_t I);
  const Value &operator[](size_t I) const;
  Value &front();
  const Value &front() const;
  Value &back();
  const Value &back() const;
  Value *data();
  const Value *data() const;

  iterator begin();
  const_iterator begin() const;
  iterator end();
  const_iterator end() const;

  bool empty() const;
  size_t size() const;
  void reserve(size_t S);

  void clear();
  void push_back(const Value &E);
  void push_back(Value &&E);
  template <typename... Args> void emplace_back(Args &&...A);
  void pop_back();
  iterator insert(const_iterator P, const Value &E);
  iterator insert(const_iterator P, Value &&E);
  template <typename It> iterator insert(const_iterator P, It A, It Z);
  template <typename... Args> iterator emplace(const_iterator P, Args &&...A);

  friend bool operator==(const Array &L, const Array &R);
};
inline bool operator!=(const Array &L, const Array &R) { return !(L == R); }

/// A Value is an JSON value of unknown type.
/// They can be copied, but should generally be moved.
///
/// === Composing values ===
///
/// You can implicitly construct Values from:
///   - strings: std::string, SmallString, formatv, StringRef, char*
///              (char*, and StringRef are references, not copies!)
///   - numbers
///   - booleans
///   - null: nullptr
///   - arrays: {"foo", 42.0, false}
///   - serializable things: types with toJSON(const T&)->Value, found by ADL
///
/// They can also be constructed from object/array helpers:
///   - json::Object is a type like map<ObjectKey, Value>
///   - json::Array is a type like vector<Value>
/// These can be list-initialized, or used to build up collections in a loop.
/// json::ary(Collection) converts all items in a collection to Values.
///
/// === Inspecting values ===
///
/// Each Value is one of the JSON kinds:
///   null    (nullptr_t)
///   boolean (bool)
///   number  (double, int64 or uint64)
///   string  (StringRef)
///   array   (json::Array)
///   object  (json::Object)
///
/// The kind can be queried directly, or implicitly via the typed accessors:
///   if (std::optional<StringRef> S = E.getAsString()
///     assert(E.kind() == Value::String);
///
/// Array and Object also have typed indexing accessors for easy traversal:
///   Expected<Value> E = parse(R"( {"options": {"font": "sans-serif"}} )");
///   if (Object* O = E->getAsObject())
///     if (Object* Opts = O->getObject("options"))
///       if (std::optional<StringRef> Font = Opts->getString("font"))
///         assert(Opts->at("font").kind() == Value::String);
///
/// === Converting JSON values to C++ types ===
///
/// The convention is to have a deserializer function findable via ADL:
///     fromJSON(const json::Value&, T&, Path) -> bool
///
/// The return value indicates overall success, and Path is used for precise
/// error reporting. (The Path::Root passed in at the top level fromJSON call
/// captures any nested error and can render it in context).
/// If conversion fails, fromJSON calls Path::report() and immediately returns.
/// This ensures that the first fatal error survives.
///
/// Deserializers are provided for:
///   - bool
///   - int and int64_t
///   - double
///   - std::string
///   - vector<T>, where T is deserializable
///   - map<string, T>, where T is deserializable
///   - std::optional<T>, where T is deserializable
/// ObjectMapper can help writing fromJSON() functions for object types.
///
/// For conversion in the other direction, the serializer function is:
///    toJSON(const T&) -> json::Value
/// If this exists, then it also allows constructing Value from T, and can
/// be used to serialize vector<T>, map<string, T>, and std::optional<T>.
///
/// === Serialization ===
///
/// Values can be serialized to JSON:
///   1) raw_ostream << Value                    // Basic formatting.
///   2) raw_ostream << formatv("{0}", Value)    // Basic formatting.
///   3) raw_ostream << formatv("{0:2}", Value)  // Pretty-print with indent 2.
///
/// And parsed:
///   Expected<Value> E = json::parse("[1, 2, null]");
///   assert(E && E->kind() == Value::Array);
class Value {
public:
  enum Kind {
    Null,
    Boolean,
    /// Number values can store both int64s and doubles at full precision,
    /// depending on what they were constructed/parsed from.
    Number,
    String,
    Array,
    Object,
  };

  // It would be nice to have Value() be null. But that would make {} null too.
  Value(const Value &M) { copyFrom(M); }
  Value(Value &&M) { moveFrom(std::move(M)); }
  Value(std::initializer_list<Value> Elements);
  Value(json::Array &&Elements) : Type(T_Array) {
    create<json::Array>(std::move(Elements));
  }
  template <typename Elt>
  Value(const std::vector<Elt> &C) : Value(json::Array(C)) {}
  Value(json::Object &&Properties) : Type(T_Object) {
    create<json::Object>(std::move(Properties));
  }
  template <typename Elt>
  Value(const std::map<std::string, Elt> &C) : Value(json::Object(C)) {}
  // Strings: types with value semantics. Must be valid UTF-8.
  Value(std::string V) : Type(T_String) {
    if (LLVM_UNLIKELY(!isUTF8(V))) {
      assert(false && "Invalid UTF-8 in value used as JSON");
      V = fixUTF8(std::move(V));
    }
    create<std::string>(std::move(V));
  }
  Value(const llvm::SmallVectorImpl<char> &V)
      : Value(std::string(V.begin(), V.end())) {}
  Value(const llvm::formatv_object_base &V) : Value(V.str()) {}
  // Strings: types with reference semantics. Must be valid UTF-8.
  Value(StringRef V) : Type(T_StringRef) {
    create<llvm::StringRef>(V);
    if (LLVM_UNLIKELY(!isUTF8(V))) {
      assert(false && "Invalid UTF-8 in value used as JSON");
      *this = Value(fixUTF8(V));
    }
  }
  Value(const char *V) : Value(StringRef(V)) {}
  Value(std::nullptr_t) : Type(T_Null) {}
  // Boolean (disallow implicit conversions).
  // (The last template parameter is a dummy to keep templates distinct.)
  template <typename T, typename = std::enable_if_t<std::is_same_v<T, bool>>,
            bool = false>
  Value(T B) : Type(T_Boolean) {
    create<bool>(B);
  }

  // Unsigned 64-bit integers.
  template <typename T, typename = std::enable_if_t<is_uint_64_bit_v<T>>>
  Value(T V) : Type(T_UINT64) {
    create<uint64_t>(uint64_t{V});
  }

  // Integers (except boolean and uint64_t).
  // Must be non-narrowing convertible to int64_t.
  template <typename T, typename = std::enable_if_t<std::is_integral_v<T>>,
            typename = std::enable_if_t<!std::is_same_v<T, bool>>,
            typename = std::enable_if_t<!is_uint_64_bit_v<T>>>
  Value(T I) : Type(T_Integer) {
    create<int64_t>(int64_t{I});
  }
  // Floating point. Must be non-narrowing convertible to double.
  template <typename T,
            typename = std::enable_if_t<std::is_floating_point_v<T>>,
            double * = nullptr>
  Value(T D) : Type(T_Double) {
    create<double>(double{D});
  }
  // Serializable types: with a toJSON(const T&)->Value function, found by ADL.
  template <typename T,
            typename = std::enable_if_t<
                std::is_same_v<Value, decltype(toJSON(*(const T *)nullptr))>>,
            Value * = nullptr>
  Value(const T &V) : Value(toJSON(V)) {}

  Value &operator=(const Value &M) {
    destroy();
    copyFrom(M);
    return *this;
  }
  Value &operator=(Value &&M) {
    destroy();
    moveFrom(std::move(M));
    return *this;
  }
  ~Value() { destroy(); }

  Kind kind() const {
    switch (Type) {
    case T_Null:
      return Null;
    case T_Boolean:
      return Boolean;
    case T_Double:
    case T_Integer:
    case T_UINT64:
      return Number;
    case T_String:
    case T_StringRef:
      return String;
    case T_Object:
      return Object;
    case T_Array:
      return Array;
    }
    llvm_unreachable("Unknown kind");
  }

  // Typed accessors return std::nullopt/nullptr if the Value is not of this
  // type.
  std::optional<std::nullptr_t> getAsNull() const {
    if (LLVM_LIKELY(Type == T_Null))
      return nullptr;
    return std::nullopt;
  }
  std::optional<bool> getAsBoolean() const {
    if (LLVM_LIKELY(Type == T_Boolean))
      return as<bool>();
    return std::nullopt;
  }
  std::optional<double> getAsNumber() const {
    if (LLVM_LIKELY(Type == T_Double))
      return as<double>();
    if (LLVM_LIKELY(Type == T_Integer))
      return as<int64_t>();
    if (LLVM_LIKELY(Type == T_UINT64))
      return as<uint64_t>();
    return std::nullopt;
  }
  // Succeeds if the Value is a Number, and exactly representable as int64_t.
  std::optional<int64_t> getAsInteger() const {
    if (LLVM_LIKELY(Type == T_Integer))
      return as<int64_t>();
    if (LLVM_LIKELY(Type == T_UINT64)) {
      uint64_t U = as<uint64_t>();
      if (LLVM_LIKELY(U <= uint64_t(std::numeric_limits<int64_t>::max()))) {
        return U;
      }
    }
    if (LLVM_LIKELY(Type == T_Double)) {
      double D = as<double>();
      if (LLVM_LIKELY(std::modf(D, &D) == 0.0 &&
                      D >= double(std::numeric_limits<int64_t>::min()) &&
                      D <= double(std::numeric_limits<int64_t>::max())))
        return D;
    }
    return std::nullopt;
  }
  std::optional<uint64_t> getAsUINT64() const {
    if (Type == T_UINT64)
      return as<uint64_t>();
    else if (Type == T_Integer) {
      int64_t N = as<int64_t>();
      if (N >= 0)
        return as<uint64_t>();
    }
    return std::nullopt;
  }
  std::optional<llvm::StringRef> getAsString() const {
    if (Type == T_String)
      return llvm::StringRef(as<std::string>());
    if (LLVM_LIKELY(Type == T_StringRef))
      return as<llvm::StringRef>();
    return std::nullopt;
  }
  const json::Object *getAsObject() const {
    return LLVM_LIKELY(Type == T_Object) ? &as<json::Object>() : nullptr;
  }
  json::Object *getAsObject() {
    return LLVM_LIKELY(Type == T_Object) ? &as<json::Object>() : nullptr;
  }
  const json::Array *getAsArray() const {
    return LLVM_LIKELY(Type == T_Array) ? &as<json::Array>() : nullptr;
  }
  json::Array *getAsArray() {
    return LLVM_LIKELY(Type == T_Array) ? &as<json::Array>() : nullptr;
  }

private:
  void destroy();
  void copyFrom(const Value &M);
  // We allow moving from *const* Values, by marking all members as mutable!
  // This hack is needed to support initializer-list syntax efficiently.
  // (std::initializer_list<T> is a container of const T).
  void moveFrom(const Value &&M);
  friend class Array;
  friend class Object;

  template <typename T, typename... U> void create(U &&... V) {
    new (reinterpret_cast<T *>(&Union)) T(std::forward<U>(V)...);
  }
  template <typename T> T &as() const {
    // Using this two-step static_cast via void * instead of reinterpret_cast
    // silences a -Wstrict-aliasing false positive from GCC6 and earlier.
    void *Storage = static_cast<void *>(&Union);
    return *static_cast<T *>(Storage);
  }

  friend class OStream;

  enum ValueType : char16_t {
    T_Null,
    T_Boolean,
    T_Double,
    T_Integer,
    T_UINT64,
    T_StringRef,
    T_String,
    T_Object,
    T_Array,
  };
  // All members mutable, see moveFrom().
  mutable ValueType Type;
  mutable llvm::AlignedCharArrayUnion<bool, double, int64_t, uint64_t,
                                      llvm::StringRef, std::string, json::Array,
                                      json::Object>
      Union;
  friend bool operator==(const Value &, const Value &);
};

bool operator==(const Value &, const Value &);
inline bool operator!=(const Value &L, const Value &R) { return !(L == R); }

// Array Methods
inline Value &Array::operator[](size_t I) { return V[I]; }
inline const Value &Array::operator[](size_t I) const { return V[I]; }
inline Value &Array::front() { return V.front(); }
inline const Value &Array::front() const { return V.front(); }
inline Value &Array::back() { return V.back(); }
inline const Value &Array::back() const { return V.back(); }
inline Value *Array::data() { return V.data(); }
inline const Value *Array::data() const { return V.data(); }

inline typename Array::iterator Array::begin() { return V.begin(); }
inline typename Array::const_iterator Array::begin() const { return V.begin(); }
inline typename Array::iterator Array::end() { return V.end(); }
inline typename Array::const_iterator Array::end() const { return V.end(); }

inline bool Array::empty() const { return V.empty(); }
inline size_t Array::size() const { return V.size(); }
inline void Array::reserve(size_t S) { V.reserve(S); }

inline void Array::clear() { V.clear(); }
inline void Array::push_back(const Value &E) { V.push_back(E); }
inline void Array::push_back(Value &&E) { V.push_back(std::move(E)); }
template <typename... Args> inline void Array::emplace_back(Args &&...A) {
  V.emplace_back(std::forward<Args>(A)...);
}
inline void Array::pop_back() { V.pop_back(); }
inline typename Array::iterator Array::insert(const_iterator P, const Value &E) {
  return V.insert(P, E);
}
inline typename Array::iterator Array::insert(const_iterator P, Value &&E) {
  return V.insert(P, std::move(E));
}
template <typename It>
inline typename Array::iterator Array::insert(const_iterator P, It A, It Z) {
  return V.insert(P, A, Z);
}
template <typename... Args>
inline typename Array::iterator Array::emplace(const_iterator P, Args &&...A) {
  return V.emplace(P, std::forward<Args>(A)...);
}
inline bool operator==(const Array &L, const Array &R) { return L.V == R.V; }

/// ObjectKey is a used to capture keys in Object. Like Value but:
///   - only strings are allowed
///   - it's optimized for the string literal case (Owned == nullptr)
/// Like Value, strings must be UTF-8. See isUTF8 documentation for details.
class ObjectKey {
public:
  ObjectKey(const char *S) : ObjectKey(StringRef(S)) {}
  ObjectKey(std::string S) : Owned(new std::string(std::move(S))) {
    if (LLVM_UNLIKELY(!isUTF8(*Owned))) {
      assert(false && "Invalid UTF-8 in value used as JSON");
      *Owned = fixUTF8(std::move(*Owned));
    }
    Data = *Owned;
  }
  ObjectKey(llvm::StringRef S) : Data(S) {
    if (LLVM_UNLIKELY(!isUTF8(Data))) {
      assert(false && "Invalid UTF-8 in value used as JSON");
      *this = ObjectKey(fixUTF8(S));
    }
  }
  ObjectKey(const llvm::SmallVectorImpl<char> &V)
      : ObjectKey(std::string(V.begin(), V.end())) {}
  ObjectKey(const llvm::formatv_object_base &V) : ObjectKey(V.str()) {}

  ObjectKey(const ObjectKey &C) { *this = C; }
  ObjectKey(ObjectKey &&C) : ObjectKey(static_cast<const ObjectKey &&>(C)) {}
  ObjectKey &operator=(const ObjectKey &C) {
    if (C.Owned) {
      Owned.reset(new std::string(*C.Owned));
      Data = *Owned;
    } else {
      Data = C.Data;
    }
    return *this;
  }
  ObjectKey &operator=(ObjectKey &&) = default;

  operator llvm::StringRef() const { return Data; }
  std::string str() const { return Data.str(); }

private:
  // FIXME: this is unneccesarily large (3 pointers). Pointer + length + owned
  // could be 2 pointers at most.
  std::unique_ptr<std::string> Owned;
  llvm::StringRef Data;
};

inline bool operator==(const ObjectKey &L, const ObjectKey &R) {
  return llvm::StringRef(L) == llvm::StringRef(R);
}
inline bool operator!=(const ObjectKey &L, const ObjectKey &R) {
  return !(L == R);
}
inline bool operator<(const ObjectKey &L, const ObjectKey &R) {
  return StringRef(L) < StringRef(R);
}

struct Object::KV {
  ObjectKey K;
  Value V;
};

inline Object::Object(std::initializer_list<KV> Properties) {
  for (const auto &P : Properties) {
    auto R = try_emplace(P.K, nullptr);
    if (R.second)
      R.first->getSecond().moveFrom(std::move(P.V));
  }
}
inline std::pair<Object::iterator, bool> Object::insert(KV E) {
  return try_emplace(std::move(E.K), std::move(E.V));
}
inline bool Object::erase(StringRef K) {
  return M.erase(ObjectKey(K));
}

/// A "cursor" marking a position within a Value.
/// The Value is a tree, and this is the path from the root to the current node.
/// This is used to associate errors with particular subobjects.
class Path {
public:
  class Root;

  /// Records that the value at the current path is invalid.
  /// Message is e.g. "expected number" and becomes part of the final error.
  /// This overwrites any previously written error message in the root.
  void report(llvm::StringLiteral Message);

  /// The root may be treated as a Path.
  Path(Root &R) : Parent(nullptr), Seg(&R) {}
  /// Derives a path for an array element: this[Index]
  Path index(unsigned Index) const { return Path(this, Segment(Index)); }
  /// Derives a path for an object field: this.Field
  Path field(StringRef Field) const { return Path(this, Segment(Field)); }

private:
  /// One element in a JSON path: an object field (.foo) or array index [27].
  /// Exception: the root Path encodes a pointer to the Path::Root.
  class Segment {
    uintptr_t Pointer;
    unsigned Offset;

  public:
    Segment() = default;
    Segment(Root *R) : Pointer(reinterpret_cast<uintptr_t>(R)) {}
    Segment(llvm::StringRef Field)
        : Pointer(reinterpret_cast<uintptr_t>(Field.data())),
          Offset(static_cast<unsigned>(Field.size())) {}
    Segment(unsigned Index) : Pointer(0), Offset(Index) {}

    bool isField() const { return Pointer != 0; }
    StringRef field() const {
      return StringRef(reinterpret_cast<const char *>(Pointer), Offset);
    }
    unsigned index() const { return Offset; }
    Root *root() const { return reinterpret_cast<Root *>(Pointer); }
  };

  const Path *Parent;
  Segment Seg;

  Path(const Path *Parent, Segment S) : Parent(Parent), Seg(S) {}
};

/// The root is the trivial Path to the root value.
/// It also stores the latest reported error and the path where it occurred.
class Path::Root {
  llvm::StringRef Name;
  llvm::StringLiteral ErrorMessage;
  std::vector<Path::Segment> ErrorPath; // Only valid in error state. Reversed.

  friend void Path::report(llvm::StringLiteral Message);

public:
  Root(llvm::StringRef Name = "") : Name(Name), ErrorMessage("") {}
  // No copy/move allowed as there are incoming pointers.
  Root(Root &&) = delete;
  Root &operator=(Root &&) = delete;
  Root(const Root &) = delete;
  Root &operator=(const Root &) = delete;

  /// Returns the last error reported, or else a generic error.
  Error getError() const;
  /// Print the root value with the error shown inline as a comment.
  /// Unrelated parts of the value are elided for brevity, e.g.
  ///   {
  ///      "id": 42,
  ///      "name": /* expected string */ null,
  ///      "properties": { ... }
  ///   }
  void printErrorContext(const Value &, llvm::raw_ostream &) const;
};

// Standard deserializers are provided for primitive types.
// See comments on Value.
inline bool fromJSON(const Value &E, std::string &Out, Path P) {
  if (auto S = E.getAsString()) {
    Out = std::string(*S);
    return true;
  }
  P.report("expected string");
  return false;
}
inline bool fromJSON(const Value &E, int &Out, Path P) {
  if (auto S = E.getAsInteger()) {
    Out = *S;
    return true;
  }
  P.report("expected integer");
  return false;
}
inline bool fromJSON(const Value &E, int64_t &Out, Path P) {
  if (auto S = E.getAsInteger()) {
    Out = *S;
    return true;
  }
  P.report("expected integer");
  return false;
}
inline bool fromJSON(const Value &E, double &Out, Path P) {
  if (auto S = E.getAsNumber()) {
    Out = *S;
    return true;
  }
  P.report("expected number");
  return false;
}
inline bool fromJSON(const Value &E, bool &Out, Path P) {
  if (auto S = E.getAsBoolean()) {
    Out = *S;
    return true;
  }
  P.report("expected boolean");
  return false;
}
inline bool fromJSON(const Value &E, uint64_t &Out, Path P) {
  if (auto S = E.getAsUINT64()) {
    Out = *S;
    return true;
  }
  P.report("expected uint64_t");
  return false;
}
inline bool fromJSON(const Value &E, std::nullptr_t &Out, Path P) {
  if (auto S = E.getAsNull()) {
    Out = *S;
    return true;
  }
  P.report("expected null");
  return false;
}
template <typename T>
bool fromJSON(const Value &E, std::optional<T> &Out, Path P) {
  if (E.getAsNull()) {
    Out = std::nullopt;
    return true;
  }
  T Result = {};
  if (!fromJSON(E, Result, P))
    return false;
  Out = std::move(Result);
  return true;
}
template <typename T>
bool fromJSON(const Value &E, std::vector<T> &Out, Path P) {
  if (auto *A = E.getAsArray()) {
    Out.clear();
    Out.resize(A->size());
    for (size_t I = 0; I < A->size(); ++I)
      if (!fromJSON((*A)[I], Out[I], P.index(I)))
        return false;
    return true;
  }
  P.report("expected array");
  return false;
}
template <typename T>
bool fromJSON(const Value &E, std::map<std::string, T> &Out, Path P) {
  if (auto *O = E.getAsObject()) {
    Out.clear();
    for (const auto &KV : *O)
      if (!fromJSON(KV.second, Out[std::string(llvm::StringRef(KV.first))],
                    P.field(KV.first)))
        return false;
    return true;
  }
  P.report("expected object");
  return false;
}

// Allow serialization of std::optional<T> for supported T.
template <typename T> Value toJSON(const std::optional<T> &Opt) {
  return Opt ? Value(*Opt) : Value(nullptr);
}

/// Helper for mapping JSON objects onto protocol structs.
///
/// Example:
/// \code
///   bool fromJSON(const Value &E, MyStruct &R, Path P) {
///     ObjectMapper O(E, P);
///     // When returning false, error details were already reported.
///     return O && O.map("mandatory_field", R.MandatoryField) &&
///         O.mapOptional("optional_field", R.OptionalField);
///   }
/// \endcode
class ObjectMapper {
public:
  /// If O is not an object, this mapper is invalid and an error is reported.
  ObjectMapper(const Value &E, Path P) : O(E.getAsObject()), P(P) {
    if (!O)
      P.report("expected object");
  }

  /// True if the expression is an object.
  /// Must be checked before calling map().
  operator bool() const { return O; }

  /// Maps a property to a field.
  /// If the property is missing or invalid, reports an error.
  template <typename T> bool map(StringLiteral Prop, T &Out) {
    assert(*this && "Must check this is an object before calling map()");
    if (const Value *E = O->get(Prop))
      return fromJSON(*E, Out, P.field(Prop));
    P.field(Prop).report("missing value");
    return false;
  }

  /// Maps a property to a field, if it exists.
  /// If the property exists and is invalid, reports an error.
  /// (Optional requires special handling, because missing keys are OK).
  template <typename T> bool map(StringLiteral Prop, std::optional<T> &Out) {
    assert(*this && "Must check this is an object before calling map()");
    if (const Value *E = O->get(Prop))
      return fromJSON(*E, Out, P.field(Prop));
    Out = std::nullopt;
    return true;
  }

  /// Maps a property to a field, if it exists.
  /// If the property exists and is invalid, reports an error.
  /// If the property does not exist, Out is unchanged.
  template <typename T> bool mapOptional(StringLiteral Prop, T &Out) {
    assert(*this && "Must check this is an object before calling map()");
    if (const Value *E = O->get(Prop))
      return fromJSON(*E, Out, P.field(Prop));
    return true;
  }

private:
  const Object *O;
  Path P;
};

/// Parses the provided JSON source, or returns a ParseError.
/// The returned Value is self-contained and owns its strings (they do not refer
/// to the original source).
llvm::Expected<Value> parse(llvm::StringRef JSON);

class ParseError : public llvm::ErrorInfo<ParseError> {
  const char *Msg;
  unsigned Line, Column, Offset;

public:
  static char ID;
  ParseError(const char *Msg, unsigned Line, unsigned Column, unsigned Offset)
      : Msg(Msg), Line(Line), Column(Column), Offset(Offset) {}
  void log(llvm::raw_ostream &OS) const override {
    OS << llvm::formatv("[{0}:{1}, byte={2}]: {3}", Line, Column, Offset, Msg);
  }
  std::error_code convertToErrorCode() const override {
    return llvm::inconvertibleErrorCode();
  }
};

/// Version of parse() that converts the parsed value to the type T.
/// RootName describes the root object and is used in error messages.
template <typename T>
Expected<T> parse(const llvm::StringRef &JSON, const char *RootName = "") {
  auto V = parse(JSON);
  if (!V)
    return V.takeError();
  Path::Root R(RootName);
  T Result;
  if (fromJSON(*V, Result, R))
    return std::move(Result);
  return R.getError();
}

/// json::OStream allows writing well-formed JSON without materializing
/// all structures as json::Value ahead of time.
/// It's faster, lower-level, and less safe than OS << json::Value.
/// It also allows emitting more constructs, such as comments.
///
/// Only one "top-level" object can be written to a stream.
/// Simplest usage involves passing lambdas (Blocks) to fill in containers:
///
///   json::OStream J(OS);
///   J.array([&]{
///     for (const Event &E : Events)
///       J.object([&] {
///         J.attribute("timestamp", int64_t(E.Time));
///         J.attributeArray("participants", [&] {
///           for (const Participant &P : E.Participants)
///             J.value(P.toString());
///         });
///       });
///   });
///
/// This would produce JSON like:
///
///   [
///     {
///       "timestamp": 19287398741,
///       "participants": [
///         "King Kong",
///         "Miley Cyrus",
///         "Cleopatra"
///       ]
///     },
///     ...
///   ]
///
/// The lower level begin/end methods (arrayBegin()) are more flexible but
/// care must be taken to pair them correctly:
///
///   json::OStream J(OS);
//    J.arrayBegin();
///   for (const Event &E : Events) {
///     J.objectBegin();
///     J.attribute("timestamp", int64_t(E.Time));
///     J.attributeBegin("participants");
///     for (const Participant &P : E.Participants)
///       J.value(P.toString());
///     J.attributeEnd();
///     J.objectEnd();
///   }
///   J.arrayEnd();
///
/// If the call sequence isn't valid JSON, asserts will fire in debug mode.
/// This can be mismatched begin()/end() pairs, trying to emit attributes inside
/// an array, and so on.
/// With asserts disabled, this is undefined behavior.
class OStream {
 public:
  using Block = llvm::function_ref<void()>;
  // If IndentSize is nonzero, output is pretty-printed.
  explicit OStream(llvm::raw_ostream &OS, unsigned IndentSize = 0)
      : OS(OS), IndentSize(IndentSize) {
    Stack.emplace_back();
  }
  ~OStream() {
    assert(Stack.size() == 1 && "Unmatched begin()/end()");
    assert(Stack.back().Ctx == Singleton);
    assert(Stack.back().HasValue && "Did not write top-level value");
  }

  /// Flushes the underlying ostream. OStream does not buffer internally.
  void flush() { OS.flush(); }

  // High level functions to output a value.
  // Valid at top-level (exactly once), in an attribute value (exactly once),
  // or in an array (any number of times).

  /// Emit a self-contained value (number, string, vector<string> etc).
  void value(const Value &V);
  /// Emit an array whose elements are emitted in the provided Block.
  void array(Block Contents) {
    arrayBegin();
    Contents();
    arrayEnd();
  }
  /// Emit an object whose elements are emitted in the provided Block.
  void object(Block Contents) {
    objectBegin();
    Contents();
    objectEnd();
  }
  /// Emit an externally-serialized value.
  /// The caller must write exactly one valid JSON value to the provided stream.
  /// No validation or formatting of this value occurs.
  void rawValue(llvm::function_ref<void(raw_ostream &)> Contents) {
    rawValueBegin();
    Contents(OS);
    rawValueEnd();
  }
  void rawValue(llvm::StringRef Contents) {
    rawValue([&](raw_ostream &OS) { OS << Contents; });
  }
  /// Emit a JavaScript comment associated with the next printed value.
  /// The string must be valid until the next attribute or value is emitted.
  /// Comments are not part of standard JSON, and many parsers reject them!
  void comment(llvm::StringRef);

  // High level functions to output object attributes.
  // Valid only within an object (any number of times).

  /// Emit an attribute whose value is self-contained (number, vector<int> etc).
  void attribute(llvm::StringRef Key, const Value& Contents) {
    attributeImpl(Key, [&] { value(Contents); });
  }
  /// Emit an attribute whose value is an array with elements from the Block.
  void attributeArray(llvm::StringRef Key, Block Contents) {
    attributeImpl(Key, [&] { array(Contents); });
  }
  /// Emit an attribute whose value is an object with attributes from the Block.
  void attributeObject(llvm::StringRef Key, Block Contents) {
    attributeImpl(Key, [&] { object(Contents); });
  }

  // Low-level begin/end functions to output arrays, objects, and attributes.
  // Must be correctly paired. Allowed contexts are as above.

  void arrayBegin();
  void arrayEnd();
  void objectBegin();
  void objectEnd();
  void attributeBegin(llvm::StringRef Key);
  void attributeEnd();
  raw_ostream &rawValueBegin();
  void rawValueEnd();

private:
  void attributeImpl(llvm::StringRef Key, Block Contents) {
    attributeBegin(Key);
    Contents();
    attributeEnd();
  }

  void valueBegin();
  void flushComment();
  void newline();

  enum Context {
    Singleton, // Top level, or object attribute.
    Array,
    Object,
    RawValue, // External code writing a value to OS directly.
  };
  struct State {
    Context Ctx = Singleton;
    bool HasValue = false;
  };
  llvm::SmallVector<State, 16> Stack; // Never empty.
  llvm::StringRef PendingComment;
  llvm::raw_ostream &OS;
  unsigned IndentSize;
  unsigned Indent = 0;
};

/// Serializes this Value to JSON, writing it to the provided stream.
/// The formatting is compact (no extra whitespace) and deterministic.
/// For pretty-printing, use the formatv() format_provider below.
inline llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, const Value &V) {
  OStream(OS).value(V);
  return OS;
}
} // namespace json

/// Allow printing json::Value with formatv().
/// The default style is basic/compact formatting, like operator<<.
/// A format string like formatv("{0:2}", Value) pretty-prints with indent 2.
template <> struct format_provider<llvm::json::Value> {
  static void format(const llvm::json::Value &, raw_ostream &, StringRef);
};
} // namespace llvm

#endif
PKhwFZu��">.>.Support/MemoryBuffer.hnu�[���//===--- MemoryBuffer.h - Memory Buffer Interface ---------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
//  This file defines the MemoryBuffer interface.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_MEMORYBUFFER_H
#define LLVM_SUPPORT_MEMORYBUFFER_H

#include "llvm-c/Types.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Support/Alignment.h"
#include "llvm/Support/CBindingWrapping.h"
#include "llvm/Support/ErrorOr.h"
#include "llvm/Support/MemoryBufferRef.h"
#include <cstddef>
#include <cstdint>
#include <memory>

namespace llvm {
namespace sys {
namespace fs {
// Duplicated from FileSystem.h to avoid a dependency.
#if defined(_WIN32)
// A Win32 HANDLE is a typedef of void*
using file_t = void *;
#else
using file_t = int;
#endif
} // namespace fs
} // namespace sys

/// This interface provides simple read-only access to a block of memory, and
/// provides simple methods for reading files and standard input into a memory
/// buffer.  In addition to basic access to the characters in the file, this
/// interface guarantees you can read one character past the end of the file,
/// and that this character will read as '\0'.
///
/// The '\0' guarantee is needed to support an optimization -- it's intended to
/// be more efficient for clients which are reading all the data to stop
/// reading when they encounter a '\0' than to continually check the file
/// position to see if it has reached the end of the file.
class MemoryBuffer {
  const char *BufferStart; // Start of the buffer.
  const char *BufferEnd;   // End of the buffer.

protected:
  MemoryBuffer() = default;

  void init(const char *BufStart, const char *BufEnd,
            bool RequiresNullTerminator);

public:
  MemoryBuffer(const MemoryBuffer &) = delete;
  MemoryBuffer &operator=(const MemoryBuffer &) = delete;
  virtual ~MemoryBuffer();

  const char *getBufferStart() const { return BufferStart; }
  const char *getBufferEnd() const   { return BufferEnd; }
  size_t getBufferSize() const { return BufferEnd-BufferStart; }

  StringRef getBuffer() const {
    return StringRef(BufferStart, getBufferSize());
  }

  /// Return an identifier for this buffer, typically the filename it was read
  /// from.
  virtual StringRef getBufferIdentifier() const { return "Unknown buffer"; }

  /// For read-only MemoryBuffer_MMap, mark the buffer as unused in the near
  /// future and the kernel can free resources associated with it. Further
  /// access is supported but may be expensive. This calls
  /// madvise(MADV_DONTNEED) on read-only file mappings on *NIX systems. This
  /// function should not be called on a writable buffer.
  virtual void dontNeedIfMmap() {}

  /// Open the specified file as a MemoryBuffer, returning a new MemoryBuffer
  /// if successful, otherwise returning null.
  ///
  /// \param IsText Set to true to indicate that the file should be read in
  /// text mode.
  ///
  /// \param IsVolatile Set to true to indicate that the contents of the file
  /// can change outside the user's control, e.g. when libclang tries to parse
  /// while the user is editing/updating the file or if the file is on an NFS.
  ///
  /// \param Alignment Set to indicate that the buffer should be aligned to at
  /// least the specified alignment.
  static ErrorOr<std::unique_ptr<MemoryBuffer>>
  getFile(const Twine &Filename, bool IsText = false,
          bool RequiresNullTerminator = true, bool IsVolatile = false,
          std::optional<Align> Alignment = std::nullopt);

  /// Read all of the specified file into a MemoryBuffer as a stream
  /// (i.e. until EOF reached). This is useful for special files that
  /// look like a regular file but have 0 size (e.g. /proc/cpuinfo on Linux).
  static ErrorOr<std::unique_ptr<MemoryBuffer>>
  getFileAsStream(const Twine &Filename);

  /// Given an already-open file descriptor, map some slice of it into a
  /// MemoryBuffer. The slice is specified by an \p Offset and \p MapSize.
  /// Since this is in the middle of a file, the buffer is not null terminated.
  static ErrorOr<std::unique_ptr<MemoryBuffer>>
  getOpenFileSlice(sys::fs::file_t FD, const Twine &Filename, uint64_t MapSize,
                   int64_t Offset, bool IsVolatile = false,
                   std::optional<Align> Alignment = std::nullopt);

  /// Given an already-open file descriptor, read the file and return a
  /// MemoryBuffer.
  ///
  /// \param IsVolatile Set to true to indicate that the contents of the file
  /// can change outside the user's control, e.g. when libclang tries to parse
  /// while the user is editing/updating the file or if the file is on an NFS.
  ///
  /// \param Alignment Set to indicate that the buffer should be aligned to at
  /// least the specified alignment.
  static ErrorOr<std::unique_ptr<MemoryBuffer>>
  getOpenFile(sys::fs::file_t FD, const Twine &Filename, uint64_t FileSize,
              bool RequiresNullTerminator = true, bool IsVolatile = false,
              std::optional<Align> Alignment = std::nullopt);

  /// Open the specified memory range as a MemoryBuffer. Note that InputData
  /// must be null terminated if RequiresNullTerminator is true.
  static std::unique_ptr<MemoryBuffer>
  getMemBuffer(StringRef InputData, StringRef BufferName = "",
               bool RequiresNullTerminator = true);

  static std::unique_ptr<MemoryBuffer>
  getMemBuffer(MemoryBufferRef Ref, bool RequiresNullTerminator = true);

  /// Open the specified memory range as a MemoryBuffer, copying the contents
  /// and taking ownership of it. InputData does not have to be null terminated.
  static std::unique_ptr<MemoryBuffer>
  getMemBufferCopy(StringRef InputData, const Twine &BufferName = "");

  /// Read all of stdin into a file buffer, and return it.
  static ErrorOr<std::unique_ptr<MemoryBuffer>> getSTDIN();

  /// Open the specified file as a MemoryBuffer, or open stdin if the Filename
  /// is "-".
  static ErrorOr<std::unique_ptr<MemoryBuffer>>
  getFileOrSTDIN(const Twine &Filename, bool IsText = false,
                 bool RequiresNullTerminator = true,
                 std::optional<Align> Alignment = std::nullopt);

  /// Map a subrange of the specified file as a MemoryBuffer.
  static ErrorOr<std::unique_ptr<MemoryBuffer>>
  getFileSlice(const Twine &Filename, uint64_t MapSize, uint64_t Offset,
               bool IsVolatile = false,
               std::optional<Align> Alignment = std::nullopt);

  //===--------------------------------------------------------------------===//
  // Provided for performance analysis.
  //===--------------------------------------------------------------------===//

  /// The kind of memory backing used to support the MemoryBuffer.
  enum BufferKind {
    MemoryBuffer_Malloc,
    MemoryBuffer_MMap
  };

  /// Return information on the memory mechanism used to support the
  /// MemoryBuffer.
  virtual BufferKind getBufferKind() const = 0;

  MemoryBufferRef getMemBufferRef() const;
};

/// This class is an extension of MemoryBuffer, which allows copy-on-write
/// access to the underlying contents.  It only supports creation methods that
/// are guaranteed to produce a writable buffer.  For example, mapping a file
/// read-only is not supported.
class WritableMemoryBuffer : public MemoryBuffer {
protected:
  WritableMemoryBuffer() = default;

public:
  using MemoryBuffer::getBuffer;
  using MemoryBuffer::getBufferEnd;
  using MemoryBuffer::getBufferStart;

  // const_cast is well-defined here, because the underlying buffer is
  // guaranteed to have been initialized with a mutable buffer.
  char *getBufferStart() {
    return const_cast<char *>(MemoryBuffer::getBufferStart());
  }
  char *getBufferEnd() {
    return const_cast<char *>(MemoryBuffer::getBufferEnd());
  }
  MutableArrayRef<char> getBuffer() {
    return {getBufferStart(), getBufferEnd()};
  }

  static ErrorOr<std::unique_ptr<WritableMemoryBuffer>>
  getFile(const Twine &Filename, bool IsVolatile = false,
          std::optional<Align> Alignment = std::nullopt);

  /// Map a subrange of the specified file as a WritableMemoryBuffer.
  static ErrorOr<std::unique_ptr<WritableMemoryBuffer>>
  getFileSlice(const Twine &Filename, uint64_t MapSize, uint64_t Offset,
               bool IsVolatile = false,
               std::optional<Align> Alignment = std::nullopt);

  /// Allocate a new MemoryBuffer of the specified size that is not initialized.
  /// Note that the caller should initialize the memory allocated by this
  /// method. The memory is owned by the MemoryBuffer object.
  ///
  /// \param Alignment Set to indicate that the buffer should be aligned to at
  /// least the specified alignment.
  static std::unique_ptr<WritableMemoryBuffer>
  getNewUninitMemBuffer(size_t Size, const Twine &BufferName = "",
                        std::optional<Align> Alignment = std::nullopt);

  /// Allocate a new zero-initialized MemoryBuffer of the specified size. Note
  /// that the caller need not initialize the memory allocated by this method.
  /// The memory is owned by the MemoryBuffer object.
  static std::unique_ptr<WritableMemoryBuffer>
  getNewMemBuffer(size_t Size, const Twine &BufferName = "");

private:
  // Hide these base class factory function so one can't write
  //   WritableMemoryBuffer::getXXX()
  // and be surprised that he got a read-only Buffer.
  using MemoryBuffer::getFileAsStream;
  using MemoryBuffer::getFileOrSTDIN;
  using MemoryBuffer::getMemBuffer;
  using MemoryBuffer::getMemBufferCopy;
  using MemoryBuffer::getOpenFile;
  using MemoryBuffer::getOpenFileSlice;
  using MemoryBuffer::getSTDIN;
};

/// This class is an extension of MemoryBuffer, which allows write access to
/// the underlying contents and committing those changes to the original source.
/// It only supports creation methods that are guaranteed to produce a writable
/// buffer.  For example, mapping a file read-only is not supported.
class WriteThroughMemoryBuffer : public MemoryBuffer {
protected:
  WriteThroughMemoryBuffer() = default;

public:
  using MemoryBuffer::getBuffer;
  using MemoryBuffer::getBufferEnd;
  using MemoryBuffer::getBufferStart;

  // const_cast is well-defined here, because the underlying buffer is
  // guaranteed to have been initialized with a mutable buffer.
  char *getBufferStart() {
    return const_cast<char *>(MemoryBuffer::getBufferStart());
  }
  char *getBufferEnd() {
    return const_cast<char *>(MemoryBuffer::getBufferEnd());
  }
  MutableArrayRef<char> getBuffer() {
    return {getBufferStart(), getBufferEnd()};
  }

  static ErrorOr<std::unique_ptr<WriteThroughMemoryBuffer>>
  getFile(const Twine &Filename, int64_t FileSize = -1);

  /// Map a subrange of the specified file as a ReadWriteMemoryBuffer.
  static ErrorOr<std::unique_ptr<WriteThroughMemoryBuffer>>
  getFileSlice(const Twine &Filename, uint64_t MapSize, uint64_t Offset);

private:
  // Hide these base class factory function so one can't write
  //   WritableMemoryBuffer::getXXX()
  // and be surprised that he got a read-only Buffer.
  using MemoryBuffer::getFileAsStream;
  using MemoryBuffer::getFileOrSTDIN;
  using MemoryBuffer::getMemBuffer;
  using MemoryBuffer::getMemBufferCopy;
  using MemoryBuffer::getOpenFile;
  using MemoryBuffer::getOpenFileSlice;
  using MemoryBuffer::getSTDIN;
};

// Create wrappers for C Binding types (see CBindingWrapping.h).
DEFINE_SIMPLE_CONVERSION_FUNCTIONS(MemoryBuffer, LLVMMemoryBufferRef)

} // end namespace llvm

#endif // LLVM_SUPPORT_MEMORYBUFFER_H
PKhwFZ,���!!Support/MemoryBufferRef.hnu�[���//===- MemoryBufferRef.h - Memory Buffer Reference --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
//  This file defines the MemoryBuffer interface.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_MEMORYBUFFERREF_H
#define LLVM_SUPPORT_MEMORYBUFFERREF_H

#include "llvm/ADT/StringRef.h"

namespace llvm {

class MemoryBuffer;

class MemoryBufferRef {
  StringRef Buffer;
  StringRef Identifier;

public:
  MemoryBufferRef() = default;
  MemoryBufferRef(const MemoryBuffer &Buffer);
  MemoryBufferRef(StringRef Buffer, StringRef Identifier)
      : Buffer(Buffer), Identifier(Identifier) {}

  StringRef getBuffer() const { return Buffer; }
  StringRef getBufferIdentifier() const { return Identifier; }

  const char *getBufferStart() const { return Buffer.begin(); }
  const char *getBufferEnd() const { return Buffer.end(); }
  size_t getBufferSize() const { return Buffer.size(); }

  /// Check pointer identity (not value) of identifier and data.
  friend bool operator==(const MemoryBufferRef &LHS,
                         const MemoryBufferRef &RHS) {
    return LHS.Buffer.begin() == RHS.Buffer.begin() &&
           LHS.Buffer.end() == RHS.Buffer.end() &&
           LHS.Identifier.begin() == RHS.Identifier.begin() &&
           LHS.Identifier.end() == RHS.Identifier.end();
  }

  friend bool operator!=(const MemoryBufferRef &LHS,
                         const MemoryBufferRef &RHS) {
    return !(LHS == RHS);
  }
};

} // namespace llvm

#endif // LLVM_SUPPORT_MEMORYBUFFERREF_H
PKhwFZZ�`.SSSupport/RISCVTargetParser.defnu�[���#ifndef PROC_ALIAS
#define PROC_ALIAS(NAME, RV32, RV64)
#endif

PROC_ALIAS("generic", "generic-rv32", "generic-rv64")
PROC_ALIAS("rocket", "rocket-rv32", "rocket-rv64")
PROC_ALIAS("sifive-7-series", "sifive-7-rv32", "sifive-7-rv64")

#undef PROC_ALIAS

#ifndef PROC
#define PROC(ENUM, NAME, FEATURES, DEFAULT_MARCH)
#endif

PROC(INVALID, {"invalid"}, FK_INVALID, {""})
PROC(GENERIC_RV32, {"generic-rv32"}, FK_NONE, {""})
PROC(GENERIC_RV64, {"generic-rv64"}, FK_64BIT, {""})
PROC(ROCKET_RV32, {"rocket-rv32"}, FK_NONE, {""})
PROC(ROCKET_RV64, {"rocket-rv64"}, FK_64BIT, {""})
PROC(SIFIVE_732, {"sifive-7-rv32"}, FK_NONE, {""})
PROC(SIFIVE_764, {"sifive-7-rv64"}, FK_64BIT, {""})
PROC(SIFIVE_E20, {"sifive-e20"}, FK_NONE, {"rv32imc"})
PROC(SIFIVE_E21, {"sifive-e21"}, FK_NONE, {"rv32imac"})
PROC(SIFIVE_E24, {"sifive-e24"}, FK_NONE, {"rv32imafc"})
PROC(SIFIVE_E31, {"sifive-e31"}, FK_NONE, {"rv32imac"})
PROC(SIFIVE_E34, {"sifive-e34"}, FK_NONE, {"rv32imafc"})
PROC(SIFIVE_E76, {"sifive-e76"}, FK_NONE, {"rv32imafc"})
PROC(SIFIVE_S21, {"sifive-s21"}, FK_64BIT, {"rv64imac"})
PROC(SIFIVE_S51, {"sifive-s51"}, FK_64BIT, {"rv64imac"})
PROC(SIFIVE_S54, {"sifive-s54"}, FK_64BIT, {"rv64gc"})
PROC(SIFIVE_S76, {"sifive-s76"}, FK_64BIT, {"rv64gc"})
PROC(SIFIVE_U54, {"sifive-u54"}, FK_64BIT, {"rv64gc"})
PROC(SIFIVE_U74, {"sifive-u74"}, FK_64BIT, {"rv64gc"})

#undef PROC
PKhwFZT��\\Support/Atomic.hnu�[���//===- llvm/Support/Atomic.h - Atomic Operations -----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the llvm::sys atomic operations.
//
// DO NOT USE IN NEW CODE!
//
// New code should always rely on the std::atomic facilities in C++11.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_ATOMIC_H
#define LLVM_SUPPORT_ATOMIC_H

#include "llvm/Support/DataTypes.h"

// Windows will at times define MemoryFence.
#ifdef MemoryFence
#undef MemoryFence
#endif

namespace llvm {
  namespace sys {
    void MemoryFence();

#ifdef _MSC_VER
    typedef long cas_flag;
#else
    typedef uint32_t cas_flag;
#endif
    cas_flag CompareAndSwap(volatile cas_flag* ptr,
                            cas_flag new_value,
                            cas_flag old_value);
  }
}

#endif
PKhwFZd�S$S$Support/ARMBuildAttributes.hnu�[���//===-- ARMBuildAttributes.h - ARM Build Attributes -------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains enumerations and support routines for ARM build attributes
// as defined in ARM ABI addenda document (ABI release 2.08).
//
// ELF for the ARM Architecture r2.09 - November 30, 2012
//
// http://infocenter.arm.com/help/topic/com.arm.doc.ihi0044e/IHI0044E_aaelf.pdf
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_ARMBUILDATTRIBUTES_H
#define LLVM_SUPPORT_ARMBUILDATTRIBUTES_H

#include "llvm/Support/ELFAttributes.h"

namespace llvm {
namespace ARMBuildAttrs {

const TagNameMap &getARMAttributeTags();

enum SpecialAttr {
  // This is for the .cpu asm attr. It translates into one or more
  // AttrType (below) entries in the .ARM.attributes section in the ELF.
  SEL_CPU
};

enum AttrType : unsigned {
  // Rest correspond to ELF/.ARM.attributes
  File = 1,
  CPU_raw_name = 4,
  CPU_name = 5,
  CPU_arch = 6,
  CPU_arch_profile = 7,
  ARM_ISA_use = 8,
  THUMB_ISA_use = 9,
  FP_arch = 10,
  WMMX_arch = 11,
  Advanced_SIMD_arch = 12,
  PCS_config = 13,
  ABI_PCS_R9_use = 14,
  ABI_PCS_RW_data = 15,
  ABI_PCS_RO_data = 16,
  ABI_PCS_GOT_use = 17,
  ABI_PCS_wchar_t = 18,
  ABI_FP_rounding = 19,
  ABI_FP_denormal = 20,
  ABI_FP_exceptions = 21,
  ABI_FP_user_exceptions = 22,
  ABI_FP_number_model = 23,
  ABI_align_needed = 24,
  ABI_align_preserved = 25,
  ABI_enum_size = 26,
  ABI_HardFP_use = 27,
  ABI_VFP_args = 28,
  ABI_WMMX_args = 29,
  ABI_optimization_goals = 30,
  ABI_FP_optimization_goals = 31,
  compatibility = 32,
  CPU_unaligned_access = 34,
  FP_HP_extension = 36,
  ABI_FP_16bit_format = 38,
  MPextension_use = 42, // recoded from 70 (ABI r2.08)
  DIV_use = 44,
  DSP_extension = 46,
  MVE_arch = 48,
  PAC_extension = 50,
  BTI_extension = 52,
  also_compatible_with = 65,
  conformance = 67,
  Virtualization_use = 68,
  BTI_use = 74,
  PACRET_use = 76,

  /// Legacy Tags
  Section = 2,               // deprecated (ABI r2.09)
  Symbol = 3,                // deprecated (ABI r2.09)
  ABI_align8_needed = 24,    // renamed to ABI_align_needed (ABI r2.09)
  ABI_align8_preserved = 25, // renamed to ABI_align_preserved (ABI r2.09)
  nodefaults = 64,           // deprecated (ABI r2.09)
  T2EE_use = 66,             // deprecated (ABI r2.09)
  MPextension_use_old = 70   // recoded to MPextension_use (ABI r2.08)
};

// Legal Values for CPU_arch, (=6), uleb128
enum CPUArch {
  Pre_v4 = 0,
  v4 = 1,           // e.g. SA110
  v4T = 2,          // e.g. ARM7TDMI
  v5T = 3,          // e.g. ARM9TDMI
  v5TE = 4,         // e.g. ARM946E_S
  v5TEJ = 5,        // e.g. ARM926EJ_S
  v6 = 6,           // e.g. ARM1136J_S
  v6KZ = 7,         // e.g. ARM1176JZ_S
  v6T2 = 8,         // e.g. ARM1156T2_S
  v6K = 9,          // e.g. ARM1176JZ_S
  v7 = 10,          // e.g. Cortex A8, Cortex M3
  v6_M = 11,        // e.g. Cortex M1
  v6S_M = 12,       // v6_M with the System extensions
  v7E_M = 13,       // v7_M with DSP extensions
  v8_A = 14,        // v8_A AArch32
  v8_R = 15,        // e.g. Cortex R52
  v8_M_Base = 16,   // v8_M_Base AArch32
  v8_M_Main = 17,   // v8_M_Main AArch32
  v8_1_M_Main = 21, // v8_1_M_Main AArch32
  v9_A = 22,        // v9_A AArch32
};

enum CPUArchProfile {               // (=7), uleb128
  Not_Applicable          = 0,      // pre v7, or cross-profile code
  ApplicationProfile      = (0x41), // 'A' (e.g. for Cortex A8)
  RealTimeProfile         = (0x52), // 'R' (e.g. for Cortex R4)
  MicroControllerProfile  = (0x4D), // 'M' (e.g. for Cortex M3)
  SystemProfile           = (0x53)  // 'S' Application or real-time profile
};

// The following have a lot of common use cases
enum {
  Not_Allowed = 0,
  Allowed = 1,

  // Tag_ARM_ISA_use (=8), uleb128

  // Tag_THUMB_ISA_use, (=9), uleb128
  AllowThumb32 = 2, // 32-bit Thumb (implies 16-bit instructions)
  AllowThumbDerived = 3, // Thumb allowed, derived from arch/profile

  // Tag_FP_arch (=10), uleb128 (formerly Tag_VFP_arch = 10)
  AllowFPv2  = 2,     // v2 FP ISA permitted (implies use of the v1 FP ISA)
  AllowFPv3A = 3,     // v3 FP ISA permitted (implies use of the v2 FP ISA)
  AllowFPv3B = 4,     // v3 FP ISA permitted, but only D0-D15, S0-S31
  AllowFPv4A = 5,     // v4 FP ISA permitted (implies use of v3 FP ISA)
  AllowFPv4B = 6,     // v4 FP ISA was permitted, but only D0-D15, S0-S31
  AllowFPARMv8A = 7,  // Use of the ARM v8-A FP ISA was permitted
  AllowFPARMv8B = 8,  // Use of the ARM v8-A FP ISA was permitted, but only
                      // D0-D15, S0-S31

  // Tag_WMMX_arch, (=11), uleb128
  AllowWMMXv1 = 1,  // The user permitted this entity to use WMMX v1
  AllowWMMXv2 = 2,  // The user permitted this entity to use WMMX v2

  // Tag_Advanced_SIMD_arch, (=12), uleb128
  AllowNeon = 1,      // SIMDv1 was permitted
  AllowNeon2 = 2,     // SIMDv2 was permitted (Half-precision FP, MAC operations)
  AllowNeonARMv8 = 3, // ARM v8-A SIMD was permitted
  AllowNeonARMv8_1a = 4,// ARM v8.1-A SIMD was permitted (RDMA)

  // Tag_MVE_arch, (=48), uleb128
  AllowMVEInteger = 1, // integer-only MVE was permitted
  AllowMVEIntegerAndFloat = 2, // both integer and floating point MVE were permitted

  // Tag_ABI_PCS_R9_use, (=14), uleb128
  R9IsGPR = 0,        // R9 used as v6 (just another callee-saved register)
  R9IsSB = 1,         // R9 used as a global static base rgister
  R9IsTLSPointer = 2, // R9 used as a thread local storage pointer
  R9Reserved = 3,     // R9 not used by code associated with attributed entity

  // Tag_ABI_PCS_RW_data, (=15), uleb128
  AddressRWPCRel = 1, // Address RW static data PC-relative
  AddressRWSBRel = 2, // Address RW static data SB-relative
  AddressRWNone = 3, // No RW static data permitted

  // Tag_ABI_PCS_RO_data, (=14), uleb128
  AddressROPCRel = 1, // Address RO static data PC-relative
  AddressRONone = 2, // No RO static data permitted

  // Tag_ABI_PCS_GOT_use, (=17), uleb128
  AddressDirect = 1, // Address imported data directly
  AddressGOT = 2, // Address imported data indirectly (via GOT)

  // Tag_ABI_PCS_wchar_t, (=18), uleb128
  WCharProhibited = 0,  // wchar_t is not used
  WCharWidth2Bytes = 2, // sizeof(wchar_t) == 2
  WCharWidth4Bytes = 4, // sizeof(wchar_t) == 4

  // Tag_ABI_align_needed, (=24), uleb128
  Align8Byte = 1,
  Align4Byte = 2,
  AlignReserved = 3,

  // Tag_ABI_align_needed, (=25), uleb128
  AlignNotPreserved = 0,
  AlignPreserve8Byte = 1,
  AlignPreserveAll = 2,

  // Tag_ABI_FP_denormal, (=20), uleb128
  PositiveZero = 0,
  IEEEDenormals = 1,
  PreserveFPSign = 2, // sign when flushed-to-zero is preserved

  // Tag_ABI_FP_number_model, (=23), uleb128
  AllowIEEENormal = 1,
  AllowRTABI = 2,  // numbers, infinities, and one quiet NaN (see [RTABI])
  AllowIEEE754 = 3, // this code to use all the IEEE 754-defined FP encodings

  // Tag_ABI_enum_size, (=26), uleb128
  EnumProhibited = 0, // The user prohibited the use of enums when building
                      // this entity.
  EnumSmallest = 1,   // Enum is smallest container big enough to hold all
                      // values.
  Enum32Bit = 2,      // Enum is at least 32 bits.
  Enum32BitABI = 3,   // Every enumeration visible across an ABI-complying
                      // interface contains a value needing 32 bits to encode
                      // it; other enums can be containerized.

  // Tag_ABI_HardFP_use, (=27), uleb128
  HardFPImplied = 0,          // FP use should be implied by Tag_FP_arch
  HardFPSinglePrecision = 1,  // Single-precision only

  // Tag_ABI_VFP_args, (=28), uleb128
  BaseAAPCS = 0,
  HardFPAAPCS = 1,
  ToolChainFPPCS = 2,
  CompatibleFPAAPCS = 3,

  // Tag_FP_HP_extension, (=36), uleb128
  AllowHPFP = 1, // Allow use of Half Precision FP

  // Tag_FP_16bit_format, (=38), uleb128
  FP16FormatIEEE = 1,
  FP16VFP3 = 2,

  // Tag_MPextension_use, (=42), uleb128
  AllowMP = 1, // Allow use of MP extensions

  // Tag_DIV_use, (=44), uleb128
  // Note: AllowDIVExt must be emitted if and only if the permission to use
  // hardware divide cannot be conveyed using AllowDIVIfExists or DisallowDIV
  AllowDIVIfExists = 0, // Allow hardware divide if available in arch, or no
                        // info exists.
  DisallowDIV = 1,      // Hardware divide explicitly disallowed.
  AllowDIVExt = 2,      // Allow hardware divide as optional architecture
                        // extension above the base arch specified by
                        // Tag_CPU_arch and Tag_CPU_arch_profile.

  // Tag_Virtualization_use, (=68), uleb128
  AllowTZ = 1,
  AllowVirtualization = 2,
  AllowTZVirtualization = 3,

  // Tag_PAC_extension, (=50), uleb128
  DisallowPAC = 0,
  AllowPACInNOPSpace = 1,
  AllowPAC = 2,

  // Tag_BTI_extension, (=52), uleb128
  DisallowBTI = 0,
  AllowBTIInNOPSpace = 1,
  AllowBTI = 2,

  // Tag_BTI_use, (=74), uleb128
  BTINotUsed = 0,
  BTIUsed = 1,

  // Tag_PACRET_use, (=76), uleb128
  PACRETNotUsed = 0,
  PACRETUsed = 1
};

} // namespace ARMBuildAttrs
} // namespace llvm

#endif
PKhwFZ͓NB//Support/LICENSE.TXTnu�[���LLVM System Interface Library
-------------------------------------------------------------------------------

Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
See https://llvm.org/LICENSE.txt for license information.
SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
PKhwFZ������Support/circular_raw_ostream.hnu�[���//===-- llvm/Support/circular_raw_ostream.h - Buffered streams --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains raw_ostream implementations for streams to do circular
// buffering of their output.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_CIRCULAR_RAW_OSTREAM_H
#define LLVM_SUPPORT_CIRCULAR_RAW_OSTREAM_H

#include "llvm/Support/raw_ostream.h"

namespace llvm {
  /// circular_raw_ostream - A raw_ostream which *can* save its data
  /// to a circular buffer, or can pass it through directly to an
  /// underlying stream if specified with a buffer of zero.
  ///
  class circular_raw_ostream : public raw_ostream {
  public:
    /// TAKE_OWNERSHIP - Tell this stream that it owns the underlying
    /// stream and is responsible for cleanup, memory management
    /// issues, etc.
    ///
    static constexpr bool TAKE_OWNERSHIP = true;

    /// REFERENCE_ONLY - Tell this stream it should not manage the
    /// held stream.
    ///
    static constexpr bool REFERENCE_ONLY = false;

  private:
    /// TheStream - The real stream we output to. We set it to be
    /// unbuffered, since we're already doing our own buffering.
    ///
    raw_ostream *TheStream = nullptr;

    /// OwnsStream - Are we responsible for managing the underlying
    /// stream?
    ///
    bool OwnsStream;

    /// BufferSize - The size of the buffer in bytes.
    ///
    size_t BufferSize;

    /// BufferArray - The actual buffer storage.
    ///
    char *BufferArray = nullptr;

    /// Cur - Pointer to the current output point in BufferArray.
    ///
    char *Cur;

    /// Filled - Indicate whether the buffer has been completely
    /// filled.  This helps avoid garbage output.
    ///
    bool Filled = false;

    /// Banner - A pointer to a banner to print before dumping the
    /// log.
    ///
    const char *Banner;

    /// flushBuffer - Dump the contents of the buffer to Stream.
    ///
    void flushBuffer() {
      if (Filled)
        // Write the older portion of the buffer.
        TheStream->write(Cur, BufferArray + BufferSize - Cur);
      // Write the newer portion of the buffer.
      TheStream->write(BufferArray, Cur - BufferArray);
      Cur = BufferArray;
      Filled = false;
    }

    void write_impl(const char *Ptr, size_t Size) override;

    /// current_pos - Return the current position within the stream,
    /// not counting the bytes currently in the buffer.
    ///
    uint64_t current_pos() const override {
      // This has the same effect as calling TheStream.current_pos(),
      // but that interface is private.
      return TheStream->tell() - TheStream->GetNumBytesInBuffer();
    }

  public:
    /// circular_raw_ostream - Construct an optionally
    /// circular-buffered stream, handing it an underlying stream to
    /// do the "real" output.
    ///
    /// As a side effect, if BuffSize is nonzero, the given Stream is
    /// set to be Unbuffered.  This is because circular_raw_ostream
    /// does its own buffering, so it doesn't want another layer of
    /// buffering to be happening underneath it.
    ///
    /// "Owns" tells the circular_raw_ostream whether it is
    /// responsible for managing the held stream, doing memory
    /// management of it, etc.
    ///
    circular_raw_ostream(raw_ostream &Stream, const char *Header,
                         size_t BuffSize = 0, bool Owns = REFERENCE_ONLY)
        : raw_ostream(/*unbuffered*/ true), OwnsStream(Owns),
          BufferSize(BuffSize), Banner(Header) {
      if (BufferSize != 0)
        BufferArray = new char[BufferSize];
      Cur = BufferArray;
      setStream(Stream, Owns);
    }

    ~circular_raw_ostream() override {
      flush();
      flushBufferWithBanner();
      releaseStream();
      delete[] BufferArray;
    }

    bool is_displayed() const override {
      return TheStream->is_displayed();
    }

    /// setStream - Tell the circular_raw_ostream to output a
    /// different stream.  "Owns" tells circular_raw_ostream whether
    /// it should take responsibility for managing the underlying
    /// stream.
    ///
    void setStream(raw_ostream &Stream, bool Owns = REFERENCE_ONLY) {
      releaseStream();
      TheStream = &Stream;
      OwnsStream = Owns;
    }

    /// flushBufferWithBanner - Force output of the buffer along with
    /// a small header.
    ///
    void flushBufferWithBanner();

  private:
    /// releaseStream - Delete the held stream if needed. Otherwise,
    /// transfer the buffer settings from this circular_raw_ostream
    /// back to the underlying stream.
    ///
    void releaseStream() {
      if (!TheStream)
        return;
      if (OwnsStream)
        delete TheStream;
    }
  };
} // end llvm namespace

#endif
PKhwFZ�G��;�;Support/TrailingObjects.hnu�[���//===--- TrailingObjects.h - Variable-length classes ------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This header defines support for implementing classes that have
/// some trailing object (or arrays of objects) appended to them. The
/// main purpose is to make it obvious where this idiom is being used,
/// and to make the usage more idiomatic and more difficult to get
/// wrong.
///
/// The TrailingObject template abstracts away the reinterpret_cast,
/// pointer arithmetic, and size calculations used for the allocation
/// and access of appended arrays of objects, and takes care that they
/// are all allocated at their required alignment. Additionally, it
/// ensures that the base type is final -- deriving from a class that
/// expects data appended immediately after it is typically not safe.
///
/// Users are expected to derive from this template, and provide
/// numTrailingObjects implementations for each trailing type except
/// the last, e.g. like this sample:
///
/// \code
/// class VarLengthObj : private TrailingObjects<VarLengthObj, int, double> {
///   friend TrailingObjects;
///
///   unsigned NumInts, NumDoubles;
///   size_t numTrailingObjects(OverloadToken<int>) const { return NumInts; }
///  };
/// \endcode
///
/// You can access the appended arrays via 'getTrailingObjects', and
/// determine the size needed for allocation via
/// 'additionalSizeToAlloc' and 'totalSizeToAlloc'.
///
/// All the methods implemented by this class are are intended for use
/// by the implementation of the class, not as part of its interface
/// (thus, private inheritance is suggested).
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_TRAILINGOBJECTS_H
#define LLVM_SUPPORT_TRAILINGOBJECTS_H

#include "llvm/Support/AlignOf.h"
#include "llvm/Support/Alignment.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/type_traits.h"
#include <new>
#include <type_traits>

namespace llvm {

namespace trailing_objects_internal {
/// Helper template to calculate the max alignment requirement for a set of
/// objects.
template <typename First, typename... Rest> class AlignmentCalcHelper {
private:
  enum {
    FirstAlignment = alignof(First),
    RestAlignment = AlignmentCalcHelper<Rest...>::Alignment,
  };

public:
  enum {
    Alignment = FirstAlignment > RestAlignment ? FirstAlignment : RestAlignment
  };
};

template <typename First> class AlignmentCalcHelper<First> {
public:
  enum { Alignment = alignof(First) };
};

/// The base class for TrailingObjects* classes.
class TrailingObjectsBase {
protected:
  /// OverloadToken's purpose is to allow specifying function overloads
  /// for different types, without actually taking the types as
  /// parameters. (Necessary because member function templates cannot
  /// be specialized, so overloads must be used instead of
  /// specialization.)
  template <typename T> struct OverloadToken {};
};

// Just a little helper for transforming a type pack into the same
// number of a different type. e.g.:
//   ExtractSecondType<Foo..., int>::type
template <typename Ty1, typename Ty2> struct ExtractSecondType {
  typedef Ty2 type;
};

// TrailingObjectsImpl is somewhat complicated, because it is a
// recursively inheriting template, in order to handle the template
// varargs. Each level of inheritance picks off a single trailing type
// then recurses on the rest. The "Align", "BaseTy", and
// "TopTrailingObj" arguments are passed through unchanged through the
// recursion. "PrevTy" is, at each level, the type handled by the
// level right above it.

template <int Align, typename BaseTy, typename TopTrailingObj, typename PrevTy,
          typename... MoreTys>
class TrailingObjectsImpl {
  // The main template definition is never used -- the two
  // specializations cover all possibilities.
};

template <int Align, typename BaseTy, typename TopTrailingObj, typename PrevTy,
          typename NextTy, typename... MoreTys>
class TrailingObjectsImpl<Align, BaseTy, TopTrailingObj, PrevTy, NextTy,
                          MoreTys...>
    : public TrailingObjectsImpl<Align, BaseTy, TopTrailingObj, NextTy,
                                 MoreTys...> {

  typedef TrailingObjectsImpl<Align, BaseTy, TopTrailingObj, NextTy, MoreTys...>
      ParentType;

  struct RequiresRealignment {
    static const bool value = alignof(PrevTy) < alignof(NextTy);
  };

  static constexpr bool requiresRealignment() {
    return RequiresRealignment::value;
  }

protected:
  // Ensure the inherited getTrailingObjectsImpl is not hidden.
  using ParentType::getTrailingObjectsImpl;

  // These two functions are helper functions for
  // TrailingObjects::getTrailingObjects. They recurse to the left --
  // the result for each type in the list of trailing types depends on
  // the result of calling the function on the type to the
  // left. However, the function for the type to the left is
  // implemented by a *subclass* of this class, so we invoke it via
  // the TopTrailingObj, which is, via the
  // curiously-recurring-template-pattern, the most-derived type in
  // this recursion, and thus, contains all the overloads.
  static const NextTy *
  getTrailingObjectsImpl(const BaseTy *Obj,
                         TrailingObjectsBase::OverloadToken<NextTy>) {
    auto *Ptr = TopTrailingObj::getTrailingObjectsImpl(
                    Obj, TrailingObjectsBase::OverloadToken<PrevTy>()) +
                TopTrailingObj::callNumTrailingObjects(
                    Obj, TrailingObjectsBase::OverloadToken<PrevTy>());

    if (requiresRealignment())
      return reinterpret_cast<const NextTy *>(
          alignAddr(Ptr, Align::Of<NextTy>()));
    else
      return reinterpret_cast<const NextTy *>(Ptr);
  }

  static NextTy *
  getTrailingObjectsImpl(BaseTy *Obj,
                         TrailingObjectsBase::OverloadToken<NextTy>) {
    auto *Ptr = TopTrailingObj::getTrailingObjectsImpl(
                    Obj, TrailingObjectsBase::OverloadToken<PrevTy>()) +
                TopTrailingObj::callNumTrailingObjects(
                    Obj, TrailingObjectsBase::OverloadToken<PrevTy>());

    if (requiresRealignment())
      return reinterpret_cast<NextTy *>(alignAddr(Ptr, Align::Of<NextTy>()));
    else
      return reinterpret_cast<NextTy *>(Ptr);
  }

  // Helper function for TrailingObjects::additionalSizeToAlloc: this
  // function recurses to superclasses, each of which requires one
  // fewer size_t argument, and adds its own size.
  static constexpr size_t additionalSizeToAllocImpl(
      size_t SizeSoFar, size_t Count1,
      typename ExtractSecondType<MoreTys, size_t>::type... MoreCounts) {
    return ParentType::additionalSizeToAllocImpl(
        (requiresRealignment() ? llvm::alignTo<alignof(NextTy)>(SizeSoFar)
                               : SizeSoFar) +
            sizeof(NextTy) * Count1,
        MoreCounts...);
  }
};

// The base case of the TrailingObjectsImpl inheritance recursion,
// when there's no more trailing types.
template <int Align, typename BaseTy, typename TopTrailingObj, typename PrevTy>
class alignas(Align) TrailingObjectsImpl<Align, BaseTy, TopTrailingObj, PrevTy>
    : public TrailingObjectsBase {
protected:
  // This is a dummy method, only here so the "using" doesn't fail --
  // it will never be called, because this function recurses backwards
  // up the inheritance chain to subclasses.
  static void getTrailingObjectsImpl();

  static constexpr size_t additionalSizeToAllocImpl(size_t SizeSoFar) {
    return SizeSoFar;
  }

  template <bool CheckAlignment> static void verifyTrailingObjectsAlignment() {}
};

} // end namespace trailing_objects_internal

// Finally, the main type defined in this file, the one intended for users...

/// See the file comment for details on the usage of the
/// TrailingObjects type.
template <typename BaseTy, typename... TrailingTys>
class TrailingObjects : private trailing_objects_internal::TrailingObjectsImpl<
                            trailing_objects_internal::AlignmentCalcHelper<
                                TrailingTys...>::Alignment,
                            BaseTy, TrailingObjects<BaseTy, TrailingTys...>,
                            BaseTy, TrailingTys...> {

  template <int A, typename B, typename T, typename P, typename... M>
  friend class trailing_objects_internal::TrailingObjectsImpl;

  template <typename... Tys> class Foo {};

  typedef trailing_objects_internal::TrailingObjectsImpl<
      trailing_objects_internal::AlignmentCalcHelper<TrailingTys...>::Alignment,
      BaseTy, TrailingObjects<BaseTy, TrailingTys...>, BaseTy, TrailingTys...>
      ParentType;
  using TrailingObjectsBase = trailing_objects_internal::TrailingObjectsBase;

  using ParentType::getTrailingObjectsImpl;

  // This function contains only a static_assert BaseTy is final. The
  // static_assert must be in a function, and not at class-level
  // because BaseTy isn't complete at class instantiation time, but
  // will be by the time this function is instantiated.
  static void verifyTrailingObjectsAssertions() {
    static_assert(std::is_final<BaseTy>(), "BaseTy must be final.");
  }

  // These two methods are the base of the recursion for this method.
  static const BaseTy *
  getTrailingObjectsImpl(const BaseTy *Obj,
                         TrailingObjectsBase::OverloadToken<BaseTy>) {
    return Obj;
  }

  static BaseTy *
  getTrailingObjectsImpl(BaseTy *Obj,
                         TrailingObjectsBase::OverloadToken<BaseTy>) {
    return Obj;
  }

  // callNumTrailingObjects simply calls numTrailingObjects on the
  // provided Obj -- except when the type being queried is BaseTy
  // itself. There is always only one of the base object, so that case
  // is handled here. (An additional benefit of indirecting through
  // this function is that consumers only say "friend
  // TrailingObjects", and thus, only this class itself can call the
  // numTrailingObjects function.)
  static size_t
  callNumTrailingObjects(const BaseTy *Obj,
                         TrailingObjectsBase::OverloadToken<BaseTy>) {
    return 1;
  }

  template <typename T>
  static size_t callNumTrailingObjects(const BaseTy *Obj,
                                       TrailingObjectsBase::OverloadToken<T>) {
    return Obj->numTrailingObjects(TrailingObjectsBase::OverloadToken<T>());
  }

public:
  // Make this (privately inherited) member public.
#ifndef _MSC_VER
  using ParentType::OverloadToken;
#else
  // An MSVC bug prevents the above from working, (last tested at CL version
  // 19.28). "Class5" in TrailingObjectsTest.cpp tests the problematic case.
  template <typename T>
  using OverloadToken = typename ParentType::template OverloadToken<T>;
#endif

  /// Returns a pointer to the trailing object array of the given type
  /// (which must be one of those specified in the class template). The
  /// array may have zero or more elements in it.
  template <typename T> const T *getTrailingObjects() const {
    verifyTrailingObjectsAssertions();
    // Forwards to an impl function with overloads, since member
    // function templates can't be specialized.
    return this->getTrailingObjectsImpl(
        static_cast<const BaseTy *>(this),
        TrailingObjectsBase::OverloadToken<T>());
  }

  /// Returns a pointer to the trailing object array of the given type
  /// (which must be one of those specified in the class template). The
  /// array may have zero or more elements in it.
  template <typename T> T *getTrailingObjects() {
    verifyTrailingObjectsAssertions();
    // Forwards to an impl function with overloads, since member
    // function templates can't be specialized.
    return this->getTrailingObjectsImpl(
        static_cast<BaseTy *>(this), TrailingObjectsBase::OverloadToken<T>());
  }

  /// Returns the size of the trailing data, if an object were
  /// allocated with the given counts (The counts are in the same order
  /// as the template arguments). This does not include the size of the
  /// base object.  The template arguments must be the same as those
  /// used in the class; they are supplied here redundantly only so
  /// that it's clear what the counts are counting in callers.
  template <typename... Tys>
  static constexpr std::enable_if_t<
      std::is_same_v<Foo<TrailingTys...>, Foo<Tys...>>, size_t>
  additionalSizeToAlloc(typename trailing_objects_internal::ExtractSecondType<
                        TrailingTys, size_t>::type... Counts) {
    return ParentType::additionalSizeToAllocImpl(0, Counts...);
  }

  /// Returns the total size of an object if it were allocated with the
  /// given trailing object counts. This is the same as
  /// additionalSizeToAlloc, except it *does* include the size of the base
  /// object.
  template <typename... Tys>
  static constexpr std::enable_if_t<
      std::is_same_v<Foo<TrailingTys...>, Foo<Tys...>>, size_t>
  totalSizeToAlloc(typename trailing_objects_internal::ExtractSecondType<
                   TrailingTys, size_t>::type... Counts) {
    return sizeof(BaseTy) + ParentType::additionalSizeToAllocImpl(0, Counts...);
  }

  TrailingObjects() = default;
  TrailingObjects(const TrailingObjects &) = delete;
  TrailingObjects(TrailingObjects &&) = delete;
  TrailingObjects &operator=(const TrailingObjects &) = delete;
  TrailingObjects &operator=(TrailingObjects &&) = delete;

  /// A type where its ::with_counts template member has a ::type member
  /// suitable for use as uninitialized storage for an object with the given
  /// trailing object counts. The template arguments are similar to those
  /// of additionalSizeToAlloc.
  ///
  /// Use with FixedSizeStorageOwner, e.g.:
  ///
  /// \code{.cpp}
  ///
  /// MyObj::FixedSizeStorage<void *>::with_counts<1u>::type myStackObjStorage;
  /// MyObj::FixedSizeStorageOwner
  ///     myStackObjOwner(new ((void *)&myStackObjStorage) MyObj);
  /// MyObj *const myStackObjPtr = myStackObjOwner.get();
  ///
  /// \endcode
  template <typename... Tys> struct FixedSizeStorage {
    template <size_t... Counts> struct with_counts {
      enum { Size = totalSizeToAlloc<Tys...>(Counts...) };
      struct type {
        alignas(BaseTy) char buffer[Size];
      };
    };
  };

  /// A type that acts as the owner for an object placed into fixed storage.
  class FixedSizeStorageOwner {
  public:
    FixedSizeStorageOwner(BaseTy *p) : p(p) {}
    ~FixedSizeStorageOwner() {
      assert(p && "FixedSizeStorageOwner owns null?");
      p->~BaseTy();
    }

    BaseTy *get() { return p; }
    const BaseTy *get() const { return p; }

  private:
    FixedSizeStorageOwner(const FixedSizeStorageOwner &) = delete;
    FixedSizeStorageOwner(FixedSizeStorageOwner &&) = delete;
    FixedSizeStorageOwner &operator=(const FixedSizeStorageOwner &) = delete;
    FixedSizeStorageOwner &operator=(FixedSizeStorageOwner &&) = delete;

    BaseTy *const p;
  };
};

} // end namespace llvm

#endif
PKhwFZ[��66Support/ErrorHandling.hnu�[���//===- llvm/Support/ErrorHandling.h - Fatal error handling ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines an API used to indicate fatal error conditions.  Non-fatal
// errors (most of them) should be handled through LLVMContext.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_ERRORHANDLING_H
#define LLVM_SUPPORT_ERRORHANDLING_H

#include "llvm/Support/Compiler.h"

namespace llvm {
  class StringRef;
  class Twine;

  /// An error handler callback.
  typedef void (*fatal_error_handler_t)(void *user_data,
                                        const char *reason,
                                        bool gen_crash_diag);

  /// install_fatal_error_handler - Installs a new error handler to be used
  /// whenever a serious (non-recoverable) error is encountered by LLVM.
  ///
  /// If no error handler is installed the default is to print the error message
  /// to stderr, and call exit(1).  If an error handler is installed then it is
  /// the handler's responsibility to log the message, it will no longer be
  /// printed to stderr.  If the error handler returns, then exit(1) will be
  /// called.
  ///
  /// It is dangerous to naively use an error handler which throws an exception.
  /// Even though some applications desire to gracefully recover from arbitrary
  /// faults, blindly throwing exceptions through unfamiliar code isn't a way to
  /// achieve this.
  ///
  /// \param user_data - An argument which will be passed to the install error
  /// handler.
  void install_fatal_error_handler(fatal_error_handler_t handler,
                                   void *user_data = nullptr);

  /// Restores default error handling behaviour.
  void remove_fatal_error_handler();

  /// ScopedFatalErrorHandler - This is a simple helper class which just
  /// calls install_fatal_error_handler in its constructor and
  /// remove_fatal_error_handler in its destructor.
  struct ScopedFatalErrorHandler {
    explicit ScopedFatalErrorHandler(fatal_error_handler_t handler,
                                     void *user_data = nullptr) {
      install_fatal_error_handler(handler, user_data);
    }

    ~ScopedFatalErrorHandler() { remove_fatal_error_handler(); }
  };

/// Reports a serious error, calling any installed error handler. These
/// functions are intended to be used for error conditions which are outside
/// the control of the compiler (I/O errors, invalid user input, etc.)
///
/// If no error handler is installed the default is to print the message to
/// standard error, followed by a newline.
/// After the error handler is called this function will call abort(), it
/// does not return.
/// NOTE: The std::string variant was removed to avoid a <string> dependency.
[[noreturn]] void report_fatal_error(const char *reason,
                                     bool gen_crash_diag = true);
[[noreturn]] void report_fatal_error(StringRef reason,
                                     bool gen_crash_diag = true);
[[noreturn]] void report_fatal_error(const Twine &reason,
                                     bool gen_crash_diag = true);

/// Installs a new bad alloc error handler that should be used whenever a
/// bad alloc error, e.g. failing malloc/calloc, is encountered by LLVM.
///
/// The user can install a bad alloc handler, in order to define the behavior
/// in case of failing allocations, e.g. throwing an exception. Note that this
/// handler must not trigger any additional allocations itself.
///
/// If no error handler is installed the default is to print the error message
/// to stderr, and call exit(1).  If an error handler is installed then it is
/// the handler's responsibility to log the message, it will no longer be
/// printed to stderr.  If the error handler returns, then exit(1) will be
/// called.
///
///
/// \param user_data - An argument which will be passed to the installed error
/// handler.
void install_bad_alloc_error_handler(fatal_error_handler_t handler,
                                     void *user_data = nullptr);

/// Restores default bad alloc error handling behavior.
void remove_bad_alloc_error_handler();

void install_out_of_memory_new_handler();

/// Reports a bad alloc error, calling any user defined bad alloc
/// error handler. In contrast to the generic 'report_fatal_error'
/// functions, this function might not terminate, e.g. the user
/// defined error handler throws an exception, but it won't return.
///
/// Note: When throwing an exception in the bad alloc handler, make sure that
/// the following unwind succeeds, e.g. do not trigger additional allocations
/// in the unwind chain.
///
/// If no error handler is installed (default), throws a bad_alloc exception
/// if LLVM is compiled with exception support. Otherwise prints the error
/// to standard error and calls abort().
[[noreturn]] void report_bad_alloc_error(const char *Reason,
                                         bool GenCrashDiag = true);

/// This function calls abort(), and prints the optional message to stderr.
/// Use the llvm_unreachable macro (that adds location info), instead of
/// calling this function directly.
[[noreturn]] void
llvm_unreachable_internal(const char *msg = nullptr, const char *file = nullptr,
                          unsigned line = 0);
}

/// Marks that the current location is not supposed to be reachable.
/// In !NDEBUG builds, prints the message and location info to stderr.
/// In NDEBUG builds, if the platform does not support a builtin unreachable
/// then we call an internal LLVM runtime function. Otherwise the behavior is
/// controlled by the CMake flag
///   -DLLVM_UNREACHABLE_OPTIMIZE
/// * When "ON" (default) llvm_unreachable() becomes an optimizer hint
///   that the current location is not supposed to be reachable: the hint
///   turns such code path into undefined behavior.  On compilers that don't
///   support such hints, prints a reduced message instead and aborts the
///   program.
/// * When "OFF", a builtin_trap is emitted instead of an
//    optimizer hint or printing a reduced message.
///
/// Use this instead of assert(0). It conveys intent more clearly, suppresses
/// diagnostics for unreachable code paths, and allows compilers to omit
/// unnecessary code.
#ifndef NDEBUG
#define llvm_unreachable(msg) \
  ::llvm::llvm_unreachable_internal(msg, __FILE__, __LINE__)
#elif !defined(LLVM_BUILTIN_UNREACHABLE)
#define llvm_unreachable(msg) ::llvm::llvm_unreachable_internal()
#elif LLVM_UNREACHABLE_OPTIMIZE
#define llvm_unreachable(msg) LLVM_BUILTIN_UNREACHABLE
#else
#define llvm_unreachable(msg)                                                  \
  do {                                                                         \
    LLVM_BUILTIN_TRAP;                                                         \
    LLVM_BUILTIN_UNREACHABLE;                                                  \
  } while (false)
#endif

#endif
PKhwFZ2Ƕ��A�ASupport/LowLevelTypeImpl.hnu�[���//== llvm/Support/LowLevelTypeImpl.h --------------------------- -*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// Implement a low-level type suitable for MachineInstr level instruction
/// selection.
///
/// For a type attached to a MachineInstr, we only care about 2 details: total
/// size and the number of vector lanes (if any). Accordingly, there are 4
/// possible valid type-kinds:
///
///    * `sN` for scalars and aggregates
///    * `<N x sM>` for vectors, which must have at least 2 elements.
///    * `pN` for pointers
///
/// Other information required for correct selection is expected to be carried
/// by the opcode, or non-type flags. For example the distinction between G_ADD
/// and G_FADD for int/float or fast-math flags.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_LOWLEVELTYPEIMPL_H
#define LLVM_SUPPORT_LOWLEVELTYPEIMPL_H

#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/MachineValueType.h"
#include <cassert>

namespace llvm {

class Type;
class raw_ostream;

class LLT {
public:
  /// Get a low-level scalar or aggregate "bag of bits".
  static constexpr LLT scalar(unsigned SizeInBits) {
    return LLT{/*isPointer=*/false, /*isVector=*/false, /*isScalar=*/true,
               ElementCount::getFixed(0), SizeInBits,
               /*AddressSpace=*/0};
  }

  /// Get a low-level pointer in the given address space.
  static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits) {
    assert(SizeInBits > 0 && "invalid pointer size");
    return LLT{/*isPointer=*/true, /*isVector=*/false, /*isScalar=*/false,
               ElementCount::getFixed(0), SizeInBits, AddressSpace};
  }

  /// Get a low-level vector of some number of elements and element width.
  static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits) {
    assert(!EC.isScalar() && "invalid number of vector elements");
    return LLT{/*isPointer=*/false, /*isVector=*/true, /*isScalar=*/false,
               EC, ScalarSizeInBits, /*AddressSpace=*/0};
  }

  /// Get a low-level vector of some number of elements and element type.
  static constexpr LLT vector(ElementCount EC, LLT ScalarTy) {
    assert(!EC.isScalar() && "invalid number of vector elements");
    assert(!ScalarTy.isVector() && "invalid vector element type");
    return LLT{ScalarTy.isPointer(),
               /*isVector=*/true,
               /*isScalar=*/false,
               EC,
               ScalarTy.getSizeInBits().getFixedValue(),
               ScalarTy.isPointer() ? ScalarTy.getAddressSpace() : 0};
  }

  /// Get a low-level fixed-width vector of some number of elements and element
  /// width.
  static constexpr LLT fixed_vector(unsigned NumElements,
                                    unsigned ScalarSizeInBits) {
    return vector(ElementCount::getFixed(NumElements), ScalarSizeInBits);
  }

  /// Get a low-level fixed-width vector of some number of elements and element
  /// type.
  static constexpr LLT fixed_vector(unsigned NumElements, LLT ScalarTy) {
    return vector(ElementCount::getFixed(NumElements), ScalarTy);
  }

  /// Get a low-level scalable vector of some number of elements and element
  /// width.
  static constexpr LLT scalable_vector(unsigned MinNumElements,
                                       unsigned ScalarSizeInBits) {
    return vector(ElementCount::getScalable(MinNumElements), ScalarSizeInBits);
  }

  /// Get a low-level scalable vector of some number of elements and element
  /// type.
  static constexpr LLT scalable_vector(unsigned MinNumElements, LLT ScalarTy) {
    return vector(ElementCount::getScalable(MinNumElements), ScalarTy);
  }

  static constexpr LLT scalarOrVector(ElementCount EC, LLT ScalarTy) {
    return EC.isScalar() ? ScalarTy : LLT::vector(EC, ScalarTy);
  }

  static constexpr LLT scalarOrVector(ElementCount EC, uint64_t ScalarSize) {
    assert(ScalarSize <= std::numeric_limits<unsigned>::max() &&
           "Not enough bits in LLT to represent size");
    return scalarOrVector(EC, LLT::scalar(static_cast<unsigned>(ScalarSize)));
  }

  explicit constexpr LLT(bool isPointer, bool isVector, bool isScalar,
                         ElementCount EC, uint64_t SizeInBits,
                         unsigned AddressSpace)
      : LLT() {
    init(isPointer, isVector, isScalar, EC, SizeInBits, AddressSpace);
  }
  explicit constexpr LLT()
      : IsScalar(false), IsPointer(false), IsVector(false), RawData(0) {}

  explicit LLT(MVT VT);

  constexpr bool isValid() const { return IsScalar || RawData != 0; }

  constexpr bool isScalar() const { return IsScalar; }

  constexpr bool isPointer() const {
    return isValid() && IsPointer && !IsVector;
  }

  constexpr bool isVector() const { return isValid() && IsVector; }

  /// Returns the number of elements in a vector LLT. Must only be called on
  /// vector types.
  constexpr uint16_t getNumElements() const {
    if (isScalable())
      llvm::reportInvalidSizeRequest(
          "Possible incorrect use of LLT::getNumElements() for "
          "scalable vector. Scalable flag may be dropped, use "
          "LLT::getElementCount() instead");
    return getElementCount().getKnownMinValue();
  }

  /// Returns true if the LLT is a scalable vector. Must only be called on
  /// vector types.
  constexpr bool isScalable() const {
    assert(isVector() && "Expected a vector type");
    return IsPointer ? getFieldValue(PointerVectorScalableFieldInfo)
                     : getFieldValue(VectorScalableFieldInfo);
  }

  constexpr ElementCount getElementCount() const {
    assert(IsVector && "cannot get number of elements on scalar/aggregate");
    return ElementCount::get(IsPointer
                                 ? getFieldValue(PointerVectorElementsFieldInfo)
                                 : getFieldValue(VectorElementsFieldInfo),
                             isScalable());
  }

  /// Returns the total size of the type. Must only be called on sized types.
  constexpr TypeSize getSizeInBits() const {
    if (isPointer() || isScalar())
      return TypeSize::Fixed(getScalarSizeInBits());
    auto EC = getElementCount();
    return TypeSize(getScalarSizeInBits() * EC.getKnownMinValue(),
                    EC.isScalable());
  }

  /// Returns the total size of the type in bytes, i.e. number of whole bytes
  /// needed to represent the size in bits. Must only be called on sized types.
  constexpr TypeSize getSizeInBytes() const {
    TypeSize BaseSize = getSizeInBits();
    return {(BaseSize.getKnownMinValue() + 7) / 8, BaseSize.isScalable()};
  }

  constexpr LLT getScalarType() const {
    return isVector() ? getElementType() : *this;
  }

  /// If this type is a vector, return a vector with the same number of elements
  /// but the new element type. Otherwise, return the new element type.
  constexpr LLT changeElementType(LLT NewEltTy) const {
    return isVector() ? LLT::vector(getElementCount(), NewEltTy) : NewEltTy;
  }

  /// If this type is a vector, return a vector with the same number of elements
  /// but the new element size. Otherwise, return the new element type. Invalid
  /// for pointer types. For pointer types, use changeElementType.
  constexpr LLT changeElementSize(unsigned NewEltSize) const {
    assert(!getScalarType().isPointer() &&
           "invalid to directly change element size for pointers");
    return isVector() ? LLT::vector(getElementCount(), NewEltSize)
                      : LLT::scalar(NewEltSize);
  }

  /// Return a vector or scalar with the same element type and the new element
  /// count.
  constexpr LLT changeElementCount(ElementCount EC) const {
    return LLT::scalarOrVector(EC, getScalarType());
  }

  /// Return a type that is \p Factor times smaller. Reduces the number of
  /// elements if this is a vector, or the bitwidth for scalar/pointers. Does
  /// not attempt to handle cases that aren't evenly divisible.
  constexpr LLT divide(int Factor) const {
    assert(Factor != 1);
    assert((!isScalar() || getScalarSizeInBits() != 0) &&
           "cannot divide scalar of size zero");
    if (isVector()) {
      assert(getElementCount().isKnownMultipleOf(Factor));
      return scalarOrVector(getElementCount().divideCoefficientBy(Factor),
                            getElementType());
    }

    assert(getScalarSizeInBits() % Factor == 0);
    return scalar(getScalarSizeInBits() / Factor);
  }

  /// Produce a vector type that is \p Factor times bigger, preserving the
  /// element type. For a scalar or pointer, this will produce a new vector with
  /// \p Factor elements.
  constexpr LLT multiplyElements(int Factor) const {
    if (isVector()) {
      return scalarOrVector(getElementCount().multiplyCoefficientBy(Factor),
                            getElementType());
    }

    return fixed_vector(Factor, *this);
  }

  constexpr bool isByteSized() const {
    return getSizeInBits().isKnownMultipleOf(8);
  }

  constexpr unsigned getScalarSizeInBits() const {
    if (IsScalar)
      return getFieldValue(ScalarSizeFieldInfo);
    if (IsVector) {
      if (!IsPointer)
        return getFieldValue(VectorSizeFieldInfo);
      else
        return getFieldValue(PointerVectorSizeFieldInfo);
    } else if (IsPointer)
      return getFieldValue(PointerSizeFieldInfo);
    else
      llvm_unreachable("unexpected LLT");
  }

  constexpr unsigned getAddressSpace() const {
    assert(RawData != 0 && "Invalid Type");
    assert(IsPointer && "cannot get address space of non-pointer type");
    if (!IsVector)
      return getFieldValue(PointerAddressSpaceFieldInfo);
    else
      return getFieldValue(PointerVectorAddressSpaceFieldInfo);
  }

  /// Returns the vector's element type. Only valid for vector types.
  constexpr LLT getElementType() const {
    assert(isVector() && "cannot get element type of scalar/aggregate");
    if (IsPointer)
      return pointer(getAddressSpace(), getScalarSizeInBits());
    else
      return scalar(getScalarSizeInBits());
  }

  void print(raw_ostream &OS) const;

#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
  LLVM_DUMP_METHOD void dump() const {
    print(dbgs());
    dbgs() << '\n';
  }
#endif

  constexpr bool operator==(const LLT &RHS) const {
    return IsPointer == RHS.IsPointer && IsVector == RHS.IsVector &&
           IsScalar == RHS.IsScalar && RHS.RawData == RawData;
  }

  constexpr bool operator!=(const LLT &RHS) const { return !(*this == RHS); }

  friend struct DenseMapInfo<LLT>;
  friend class GISelInstProfileBuilder;

private:
  /// LLT is packed into 64 bits as follows:
  /// isScalar : 1
  /// isPointer : 1
  /// isVector  : 1
  /// with 61 bits remaining for Kind-specific data, packed in bitfields
  /// as described below. As there isn't a simple portable way to pack bits
  /// into bitfields, here the different fields in the packed structure is
  /// described in static const *Field variables. Each of these variables
  /// is a 2-element array, with the first element describing the bitfield size
  /// and the second element describing the bitfield offset.
  typedef int BitFieldInfo[2];
  ///
  /// This is how the bitfields are packed per Kind:
  /// * Invalid:
  ///   gets encoded as RawData == 0, as that is an invalid encoding, since for
  ///   valid encodings, SizeInBits/SizeOfElement must be larger than 0.
  /// * Non-pointer scalar (isPointer == 0 && isVector == 0):
  ///   SizeInBits: 32;
  static const constexpr BitFieldInfo ScalarSizeFieldInfo{32, 0};
  /// * Pointer (isPointer == 1 && isVector == 0):
  ///   SizeInBits: 16;
  ///   AddressSpace: 24;
  static const constexpr BitFieldInfo PointerSizeFieldInfo{16, 0};
  static const constexpr BitFieldInfo PointerAddressSpaceFieldInfo{
      24, PointerSizeFieldInfo[0] + PointerSizeFieldInfo[1]};
  static_assert((PointerAddressSpaceFieldInfo[0] +
                 PointerAddressSpaceFieldInfo[1]) <= 61,
                "Insufficient bits to encode all data");
  /// * Vector-of-non-pointer (isPointer == 0 && isVector == 1):
  ///   NumElements: 16;
  ///   SizeOfElement: 32;
  ///   Scalable: 1;
  static const constexpr BitFieldInfo VectorElementsFieldInfo{16, 0};
  static const constexpr BitFieldInfo VectorSizeFieldInfo{
      32, VectorElementsFieldInfo[0] + VectorElementsFieldInfo[1]};
  static const constexpr BitFieldInfo VectorScalableFieldInfo{
      1, VectorSizeFieldInfo[0] + VectorSizeFieldInfo[1]};
  static_assert((VectorSizeFieldInfo[0] + VectorSizeFieldInfo[1]) <= 61,
                "Insufficient bits to encode all data");
  /// * Vector-of-pointer (isPointer == 1 && isVector == 1):
  ///   NumElements: 16;
  ///   SizeOfElement: 16;
  ///   AddressSpace: 24;
  ///   Scalable: 1;
  static const constexpr BitFieldInfo PointerVectorElementsFieldInfo{16, 0};
  static const constexpr BitFieldInfo PointerVectorSizeFieldInfo{
      16,
      PointerVectorElementsFieldInfo[1] + PointerVectorElementsFieldInfo[0]};
  static const constexpr BitFieldInfo PointerVectorAddressSpaceFieldInfo{
      24, PointerVectorSizeFieldInfo[1] + PointerVectorSizeFieldInfo[0]};
  static const constexpr BitFieldInfo PointerVectorScalableFieldInfo{
      1, PointerVectorAddressSpaceFieldInfo[0] +
             PointerVectorAddressSpaceFieldInfo[1]};
  static_assert((PointerVectorAddressSpaceFieldInfo[0] +
                 PointerVectorAddressSpaceFieldInfo[1]) <= 61,
                "Insufficient bits to encode all data");

  uint64_t IsScalar : 1;
  uint64_t IsPointer : 1;
  uint64_t IsVector : 1;
  uint64_t RawData : 61;

  static constexpr uint64_t getMask(const BitFieldInfo FieldInfo) {
    const int FieldSizeInBits = FieldInfo[0];
    return (((uint64_t)1) << FieldSizeInBits) - 1;
  }
  static constexpr uint64_t maskAndShift(uint64_t Val, uint64_t Mask,
                                         uint8_t Shift) {
    assert(Val <= Mask && "Value too large for field");
    return (Val & Mask) << Shift;
  }
  static constexpr uint64_t maskAndShift(uint64_t Val,
                                         const BitFieldInfo FieldInfo) {
    return maskAndShift(Val, getMask(FieldInfo), FieldInfo[1]);
  }

  constexpr uint64_t getFieldValue(const BitFieldInfo FieldInfo) const {
    return getMask(FieldInfo) & (RawData >> FieldInfo[1]);
  }

  constexpr void init(bool IsPointer, bool IsVector, bool IsScalar,
                      ElementCount EC, uint64_t SizeInBits,
                      unsigned AddressSpace) {
    assert(SizeInBits <= std::numeric_limits<unsigned>::max() &&
           "Not enough bits in LLT to represent size");
    this->IsPointer = IsPointer;
    this->IsVector = IsVector;
    this->IsScalar = IsScalar;
    if (IsScalar)
      RawData = maskAndShift(SizeInBits, ScalarSizeFieldInfo);
    else if (IsVector) {
      assert(EC.isVector() && "invalid number of vector elements");
      if (!IsPointer)
        RawData =
            maskAndShift(EC.getKnownMinValue(), VectorElementsFieldInfo) |
            maskAndShift(SizeInBits, VectorSizeFieldInfo) |
            maskAndShift(EC.isScalable() ? 1 : 0, VectorScalableFieldInfo);
      else
        RawData =
            maskAndShift(EC.getKnownMinValue(),
                         PointerVectorElementsFieldInfo) |
            maskAndShift(SizeInBits, PointerVectorSizeFieldInfo) |
            maskAndShift(AddressSpace, PointerVectorAddressSpaceFieldInfo) |
            maskAndShift(EC.isScalable() ? 1 : 0,
                         PointerVectorScalableFieldInfo);
    } else if (IsPointer)
      RawData = maskAndShift(SizeInBits, PointerSizeFieldInfo) |
                maskAndShift(AddressSpace, PointerAddressSpaceFieldInfo);
    else
      llvm_unreachable("unexpected LLT configuration");
  }

public:
  constexpr uint64_t getUniqueRAWLLTData() const {
    return ((uint64_t)RawData) << 3 | ((uint64_t)IsScalar) << 2 |
           ((uint64_t)IsPointer) << 1 | ((uint64_t)IsVector);
  }
};

inline raw_ostream& operator<<(raw_ostream &OS, const LLT &Ty) {
  Ty.print(OS);
  return OS;
}

template<> struct DenseMapInfo<LLT> {
  static inline LLT getEmptyKey() {
    LLT Invalid;
    Invalid.IsPointer = true;
    return Invalid;
  }
  static inline LLT getTombstoneKey() {
    LLT Invalid;
    Invalid.IsVector = true;
    return Invalid;
  }
  static inline unsigned getHashValue(const LLT &Ty) {
    uint64_t Val = Ty.getUniqueRAWLLTData();
    return DenseMapInfo<uint64_t>::getHashValue(Val);
  }
  static bool isEqual(const LLT &LHS, const LLT &RHS) {
    return LHS == RHS;
  }
};

}

#endif // LLVM_SUPPORT_LOWLEVELTYPEIMPL_H
PKhwFZ@����Support/Valgrind.hnu�[���//===- llvm/Support/Valgrind.h - Communication with Valgrind ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Methods for communicating with a valgrind instance this program is running
// under.  These are all no-ops unless LLVM was configured on a system with the
// valgrind headers installed and valgrind is controlling this process.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_VALGRIND_H
#define LLVM_SUPPORT_VALGRIND_H

#include <cstddef>

namespace llvm {
namespace sys {
  // True if Valgrind is controlling this process.
  bool RunningOnValgrind();

  // Discard valgrind's translation of code in the range [Addr .. Addr + Len).
  // Otherwise valgrind may continue to execute the old version of the code.
  void ValgrindDiscardTranslations(const void *Addr, size_t Len);
} // namespace sys
} // end namespace llvm

#endif // LLVM_SUPPORT_VALGRIND_H
PKhwFZO+��	�	Support/SHA256.hnu�[���//====- SHA256.cpp - SHA256 implementation ---*- C++ -* ======//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/*
 *  The SHA-256 Secure Hash Standard was published by NIST in 2002.
 *
 *  http://csrc.nist.gov/publications/fips/fips180-2/fips180-2.pdf
 *
 *   The implementation is based on nacl's sha256 implementation [0] and LLVM's
 *  pre-exsiting SHA1 code [1].
 *
 *   [0] https://hyperelliptic.org/nacl/nacl-20110221.tar.bz2 (public domain
 *       code)
 *   [1] llvm/lib/Support/SHA1.{h,cpp}
 */
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_SHA256_H
#define LLVM_SUPPORT_SHA256_H

#include <array>
#include <cstdint>

namespace llvm {

template <typename T> class ArrayRef;
class StringRef;

class SHA256 {
public:
  explicit SHA256() { init(); }

  /// Reinitialize the internal state
  void init();

  /// Digest more data.
  void update(ArrayRef<uint8_t> Data);

  /// Digest more data.
  void update(StringRef Str);

  /// Return the current raw 256-bits SHA256 for the digested
  /// data since the last call to init(). This call will add data to the
  /// internal state and as such is not suited for getting an intermediate
  /// result (see result()).
  std::array<uint8_t, 32> final();

  /// Return the current raw 256-bits SHA256 for the digested
  /// data since the last call to init(). This is suitable for getting the
  /// SHA256 at any time without invalidating the internal state so that more
  /// calls can be made into update.
  std::array<uint8_t, 32> result();

  /// Returns a raw 256-bit SHA256 hash for the given data.
  static std::array<uint8_t, 32> hash(ArrayRef<uint8_t> Data);

private:
  /// Define some constants.
  /// "static constexpr" would be cleaner but MSVC does not support it yet.
  enum { BLOCK_LENGTH = 64 };
  enum { HASH_LENGTH = 32 };

  // Internal State
  struct {
    union {
      uint8_t C[BLOCK_LENGTH];
      uint32_t L[BLOCK_LENGTH / 4];
    } Buffer;
    uint32_t State[HASH_LENGTH / 4];
    uint32_t ByteCount;
    uint8_t BufferOffset;
  } InternalState;

  // Helper
  void writebyte(uint8_t data);
  void hashBlock();
  void addUncounted(uint8_t data);
  void pad();

  void final(std::array<uint32_t, HASH_LENGTH / 4> &HashResult);
};

} // namespace llvm

#endif // LLVM_SUPPORT_SHA256_H
PKhwFZ�Œq�.�.Support/Alignment.hnu�[���//===-- llvm/Support/Alignment.h - Useful alignment functions ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains types to represent alignments.
// They are instrumented to guarantee some invariants are preserved and prevent
// invalid manipulations.
//
// - Align represents an alignment in bytes, it is always set and always a valid
// power of two, its minimum value is 1 which means no alignment requirements.
//
// - MaybeAlign is an optional type, it may be undefined or set. When it's set
// you can get the underlying Align type by using the getValue() method.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_ALIGNMENT_H_
#define LLVM_SUPPORT_ALIGNMENT_H_

#include "llvm/Support/MathExtras.h"
#include <cassert>
#include <optional>
#ifndef NDEBUG
#include <string>
#endif // NDEBUG

namespace llvm {

#define ALIGN_CHECK_ISPOSITIVE(decl)                                           \
  assert(decl > 0 && (#decl " should be defined"))

/// This struct is a compact representation of a valid (non-zero power of two)
/// alignment.
/// It is suitable for use as static global constants.
struct Align {
private:
  uint8_t ShiftValue = 0; /// The log2 of the required alignment.
                          /// ShiftValue is less than 64 by construction.

  friend struct MaybeAlign;
  friend unsigned Log2(Align);
  friend bool operator==(Align Lhs, Align Rhs);
  friend bool operator!=(Align Lhs, Align Rhs);
  friend bool operator<=(Align Lhs, Align Rhs);
  friend bool operator>=(Align Lhs, Align Rhs);
  friend bool operator<(Align Lhs, Align Rhs);
  friend bool operator>(Align Lhs, Align Rhs);
  friend unsigned encode(struct MaybeAlign A);
  friend struct MaybeAlign decodeMaybeAlign(unsigned Value);

  /// A trivial type to allow construction of constexpr Align.
  /// This is currently needed to workaround a bug in GCC 5.3 which prevents
  /// definition of constexpr assign operators.
  /// https://stackoverflow.com/questions/46756288/explicitly-defaulted-function-cannot-be-declared-as-constexpr-because-the-implic
  /// FIXME: Remove this, make all assign operators constexpr and introduce user
  /// defined literals when we don't have to support GCC 5.3 anymore.
  /// https://llvm.org/docs/GettingStarted.html#getting-a-modern-host-c-toolchain
  struct LogValue {
    uint8_t Log;
  };

public:
  /// Default is byte-aligned.
  constexpr Align() = default;
  /// Do not perform checks in case of copy/move construct/assign, because the
  /// checks have been performed when building `Other`.
  constexpr Align(const Align &Other) = default;
  constexpr Align(Align &&Other) = default;
  Align &operator=(const Align &Other) = default;
  Align &operator=(Align &&Other) = default;

  explicit Align(uint64_t Value) {
    assert(Value > 0 && "Value must not be 0");
    assert(llvm::isPowerOf2_64(Value) && "Alignment is not a power of 2");
    ShiftValue = Log2_64(Value);
    assert(ShiftValue < 64 && "Broken invariant");
  }

  /// This is a hole in the type system and should not be abused.
  /// Needed to interact with C for instance.
  uint64_t value() const { return uint64_t(1) << ShiftValue; }

  // Returns the previous alignment.
  Align previous() const {
    assert(ShiftValue != 0 && "Undefined operation");
    Align Out;
    Out.ShiftValue = ShiftValue - 1;
    return Out;
  }

  /// Allow constructions of constexpr Align.
  template <size_t kValue> constexpr static Align Constant() {
    return LogValue{static_cast<uint8_t>(CTLog2<kValue>())};
  }

  /// Allow constructions of constexpr Align from types.
  /// Compile time equivalent to Align(alignof(T)).
  template <typename T> constexpr static Align Of() {
    return Constant<std::alignment_of_v<T>>();
  }

  /// Constexpr constructor from LogValue type.
  constexpr Align(LogValue CA) : ShiftValue(CA.Log) {}
};

/// Treats the value 0 as a 1, so Align is always at least 1.
inline Align assumeAligned(uint64_t Value) {
  return Value ? Align(Value) : Align();
}

/// This struct is a compact representation of a valid (power of two) or
/// undefined (0) alignment.
struct MaybeAlign : public std::optional<Align> {
private:
  using UP = std::optional<Align>;

public:
  /// Default is undefined.
  MaybeAlign() = default;
  /// Do not perform checks in case of copy/move construct/assign, because the
  /// checks have been performed when building `Other`.
  MaybeAlign(const MaybeAlign &Other) = default;
  MaybeAlign &operator=(const MaybeAlign &Other) = default;
  MaybeAlign(MaybeAlign &&Other) = default;
  MaybeAlign &operator=(MaybeAlign &&Other) = default;

  constexpr MaybeAlign(std::nullopt_t None) : UP(None) {}
  constexpr MaybeAlign(Align Value) : UP(Value) {}
  explicit MaybeAlign(uint64_t Value) {
    assert((Value == 0 || llvm::isPowerOf2_64(Value)) &&
           "Alignment is neither 0 nor a power of 2");
    if (Value)
      emplace(Value);
  }

  /// For convenience, returns a valid alignment or 1 if undefined.
  Align valueOrOne() const { return value_or(Align()); }
};

/// Checks that SizeInBytes is a multiple of the alignment.
inline bool isAligned(Align Lhs, uint64_t SizeInBytes) {
  return SizeInBytes % Lhs.value() == 0;
}

/// Checks that Addr is a multiple of the alignment.
inline bool isAddrAligned(Align Lhs, const void *Addr) {
  return isAligned(Lhs, reinterpret_cast<uintptr_t>(Addr));
}

/// Returns a multiple of A needed to store `Size` bytes.
inline uint64_t alignTo(uint64_t Size, Align A) {
  const uint64_t Value = A.value();
  // The following line is equivalent to `(Size + Value - 1) / Value * Value`.

  // The division followed by a multiplication can be thought of as a right
  // shift followed by a left shift which zeros out the extra bits produced in
  // the bump; `~(Value - 1)` is a mask where all those bits being zeroed out
  // are just zero.

  // Most compilers can generate this code but the pattern may be missed when
  // multiple functions gets inlined.
  return (Size + Value - 1) & ~(Value - 1U);
}

/// If non-zero \p Skew is specified, the return value will be a minimal integer
/// that is greater than or equal to \p Size and equal to \p A * N + \p Skew for
/// some integer N. If \p Skew is larger than \p A, its value is adjusted to '\p
/// Skew mod \p A'.
///
/// Examples:
/// \code
///   alignTo(5, Align(8), 7) = 7
///   alignTo(17, Align(8), 1) = 17
///   alignTo(~0LL, Align(8), 3) = 3
/// \endcode
inline uint64_t alignTo(uint64_t Size, Align A, uint64_t Skew) {
  const uint64_t Value = A.value();
  Skew %= Value;
  return alignTo(Size - Skew, A) + Skew;
}

/// Aligns `Addr` to `Alignment` bytes, rounding up.
inline uintptr_t alignAddr(const void *Addr, Align Alignment) {
  uintptr_t ArithAddr = reinterpret_cast<uintptr_t>(Addr);
  assert(static_cast<uintptr_t>(ArithAddr + Alignment.value() - 1) >=
             ArithAddr &&
         "Overflow");
  return alignTo(ArithAddr, Alignment);
}

/// Returns the offset to the next integer (mod 2**64) that is greater than
/// or equal to \p Value and is a multiple of \p Align.
inline uint64_t offsetToAlignment(uint64_t Value, Align Alignment) {
  return alignTo(Value, Alignment) - Value;
}

/// Returns the necessary adjustment for aligning `Addr` to `Alignment`
/// bytes, rounding up.
inline uint64_t offsetToAlignedAddr(const void *Addr, Align Alignment) {
  return offsetToAlignment(reinterpret_cast<uintptr_t>(Addr), Alignment);
}

/// Returns the log2 of the alignment.
inline unsigned Log2(Align A) { return A.ShiftValue; }

/// Returns the alignment that satisfies both alignments.
/// Same semantic as MinAlign.
inline Align commonAlignment(Align A, uint64_t Offset) {
  return Align(MinAlign(A.value(), Offset));
}

/// Returns a representation of the alignment that encodes undefined as 0.
inline unsigned encode(MaybeAlign A) { return A ? A->ShiftValue + 1 : 0; }

/// Dual operation of the encode function above.
inline MaybeAlign decodeMaybeAlign(unsigned Value) {
  if (Value == 0)
    return MaybeAlign();
  Align Out;
  Out.ShiftValue = Value - 1;
  return Out;
}

/// Returns a representation of the alignment, the encoded value is positive by
/// definition.
inline unsigned encode(Align A) { return encode(MaybeAlign(A)); }

/// Comparisons between Align and scalars. Rhs must be positive.
inline bool operator==(Align Lhs, uint64_t Rhs) {
  ALIGN_CHECK_ISPOSITIVE(Rhs);
  return Lhs.value() == Rhs;
}
inline bool operator!=(Align Lhs, uint64_t Rhs) {
  ALIGN_CHECK_ISPOSITIVE(Rhs);
  return Lhs.value() != Rhs;
}
inline bool operator<=(Align Lhs, uint64_t Rhs) {
  ALIGN_CHECK_ISPOSITIVE(Rhs);
  return Lhs.value() <= Rhs;
}
inline bool operator>=(Align Lhs, uint64_t Rhs) {
  ALIGN_CHECK_ISPOSITIVE(Rhs);
  return Lhs.value() >= Rhs;
}
inline bool operator<(Align Lhs, uint64_t Rhs) {
  ALIGN_CHECK_ISPOSITIVE(Rhs);
  return Lhs.value() < Rhs;
}
inline bool operator>(Align Lhs, uint64_t Rhs) {
  ALIGN_CHECK_ISPOSITIVE(Rhs);
  return Lhs.value() > Rhs;
}

/// Comparisons operators between Align.
inline bool operator==(Align Lhs, Align Rhs) {
  return Lhs.ShiftValue == Rhs.ShiftValue;
}
inline bool operator!=(Align Lhs, Align Rhs) {
  return Lhs.ShiftValue != Rhs.ShiftValue;
}
inline bool operator<=(Align Lhs, Align Rhs) {
  return Lhs.ShiftValue <= Rhs.ShiftValue;
}
inline bool operator>=(Align Lhs, Align Rhs) {
  return Lhs.ShiftValue >= Rhs.ShiftValue;
}
inline bool operator<(Align Lhs, Align Rhs) {
  return Lhs.ShiftValue < Rhs.ShiftValue;
}
inline bool operator>(Align Lhs, Align Rhs) {
  return Lhs.ShiftValue > Rhs.ShiftValue;
}

// Don't allow relational comparisons with MaybeAlign.
bool operator<=(Align Lhs, MaybeAlign Rhs) = delete;
bool operator>=(Align Lhs, MaybeAlign Rhs) = delete;
bool operator<(Align Lhs, MaybeAlign Rhs) = delete;
bool operator>(Align Lhs, MaybeAlign Rhs) = delete;

bool operator<=(MaybeAlign Lhs, Align Rhs) = delete;
bool operator>=(MaybeAlign Lhs, Align Rhs) = delete;
bool operator<(MaybeAlign Lhs, Align Rhs) = delete;
bool operator>(MaybeAlign Lhs, Align Rhs) = delete;

bool operator<=(MaybeAlign Lhs, MaybeAlign Rhs) = delete;
bool operator>=(MaybeAlign Lhs, MaybeAlign Rhs) = delete;
bool operator<(MaybeAlign Lhs, MaybeAlign Rhs) = delete;
bool operator>(MaybeAlign Lhs, MaybeAlign Rhs) = delete;

// Allow equality comparisons between Align and MaybeAlign.
inline bool operator==(MaybeAlign Lhs, Align Rhs) { return Lhs && *Lhs == Rhs; }
inline bool operator!=(MaybeAlign Lhs, Align Rhs) { return !(Lhs == Rhs); }
inline bool operator==(Align Lhs, MaybeAlign Rhs) { return Rhs == Lhs; }
inline bool operator!=(Align Lhs, MaybeAlign Rhs) { return !(Rhs == Lhs); }
// Allow equality comparisons with MaybeAlign.
inline bool operator==(MaybeAlign Lhs, MaybeAlign Rhs) {
  return (Lhs && Rhs && (*Lhs == *Rhs)) || (!Lhs && !Rhs);
}
inline bool operator!=(MaybeAlign Lhs, MaybeAlign Rhs) { return !(Lhs == Rhs); }
// Allow equality comparisons with std::nullopt.
inline bool operator==(MaybeAlign Lhs, std::nullopt_t) { return !bool(Lhs); }
inline bool operator!=(MaybeAlign Lhs, std::nullopt_t) { return bool(Lhs); }
inline bool operator==(std::nullopt_t, MaybeAlign Rhs) { return !bool(Rhs); }
inline bool operator!=(std::nullopt_t, MaybeAlign Rhs) { return bool(Rhs); }

#ifndef NDEBUG
// For usage in LLVM_DEBUG macros.
inline std::string DebugStr(const Align &A) {
  return std::to_string(A.value());
}
// For usage in LLVM_DEBUG macros.
inline std::string DebugStr(const MaybeAlign &MA) {
  if (MA)
    return std::to_string(MA->value());
  return "None";
}
#endif // NDEBUG

#undef ALIGN_CHECK_ISPOSITIVE

} // namespace llvm

#endif // LLVM_SUPPORT_ALIGNMENT_H_
PKhwFZ*6N}�%�%Support/CrashRecoveryContext.hnu�[���//===--- CrashRecoveryContext.h - Crash Recovery ----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_CRASHRECOVERYCONTEXT_H
#define LLVM_SUPPORT_CRASHRECOVERYCONTEXT_H

#include "llvm/ADT/STLFunctionalExtras.h"

namespace llvm {
class CrashRecoveryContextCleanup;

/// Crash recovery helper object.
///
/// This class implements support for running operations in a safe context so
/// that crashes (memory errors, stack overflow, assertion violations) can be
/// detected and control restored to the crashing thread. Crash detection is
/// purely "best effort", the exact set of failures which can be recovered from
/// is platform dependent.
///
/// Clients make use of this code by first calling
/// CrashRecoveryContext::Enable(), and then executing unsafe operations via a
/// CrashRecoveryContext object. For example:
///
/// \code
///    void actual_work(void *);
///
///    void foo() {
///      CrashRecoveryContext CRC;
///
///      if (!CRC.RunSafely(actual_work, 0)) {
///         ... a crash was detected, report error to user ...
///      }
///
///      ... no crash was detected ...
///    }
/// \endcode
///
/// To assist recovery the class allows specifying set of actions that will be
/// executed in any case, whether crash occurs or not. These actions may be used
/// to reclaim resources in the case of crash.
class CrashRecoveryContext {
  void *Impl = nullptr;
  CrashRecoveryContextCleanup *head = nullptr;

public:
  CrashRecoveryContext();
  ~CrashRecoveryContext();

  /// Register cleanup handler, which is used when the recovery context is
  /// finished.
  /// The recovery context owns the handler.
  void registerCleanup(CrashRecoveryContextCleanup *cleanup);

  void unregisterCleanup(CrashRecoveryContextCleanup *cleanup);

  /// Enable crash recovery.
  static void Enable();

  /// Disable crash recovery.
  static void Disable();

  /// Return the active context, if the code is currently executing in a
  /// thread which is in a protected context.
  static CrashRecoveryContext *GetCurrent();

  /// Return true if the current thread is recovering from a crash.
  static bool isRecoveringFromCrash();

  /// Execute the provided callback function (with the given arguments) in
  /// a protected context.
  ///
  /// \return True if the function completed successfully, and false if the
  /// function crashed (or HandleCrash was called explicitly). Clients should
  /// make as little assumptions as possible about the program state when
  /// RunSafely has returned false.
  bool RunSafely(function_ref<void()> Fn);
  bool RunSafely(void (*Fn)(void*), void *UserData) {
    return RunSafely([&]() { Fn(UserData); });
  }

  /// Execute the provide callback function (with the given arguments) in
  /// a protected context which is run in another thread (optionally with a
  /// requested stack size).
  ///
  /// See RunSafely().
  ///
  /// On Darwin, if PRIO_DARWIN_BG is set on the calling thread, it will be
  /// propagated to the new thread as well.
  bool RunSafelyOnThread(function_ref<void()>, unsigned RequestedStackSize = 0);
  bool RunSafelyOnThread(void (*Fn)(void*), void *UserData,
                         unsigned RequestedStackSize = 0) {
    return RunSafelyOnThread([&]() { Fn(UserData); }, RequestedStackSize);
  }

  /// Explicitly trigger a crash recovery in the current process, and
  /// return failure from RunSafely(). This function does not return.
  [[noreturn]] void HandleExit(int RetCode);

  /// Return true if RetCode indicates that a signal or an exception occurred.
  static bool isCrash(int RetCode);

  /// Throw again a signal or an exception, after it was catched once by a
  /// CrashRecoveryContext.
  static bool throwIfCrash(int RetCode);

  /// In case of a crash, this is the crash identifier.
  int RetCode = 0;

  /// Selects whether handling of failures should be done in the same way as
  /// for regular crashes. When this is active, a crash would print the
  /// callstack, clean-up any temporary files and create a coredump/minidump.
  bool DumpStackAndCleanupOnFailure = false;
};

/// Abstract base class of cleanup handlers.
///
/// Derived classes override method recoverResources, which makes actual work on
/// resource recovery.
///
/// Cleanup handlers are stored in a double list, which is owned and managed by
/// a crash recovery context.
class CrashRecoveryContextCleanup {
protected:
  CrashRecoveryContext *context = nullptr;
  CrashRecoveryContextCleanup(CrashRecoveryContext *context)
      : context(context) {}

public:
  bool cleanupFired = false;

  virtual ~CrashRecoveryContextCleanup();
  virtual void recoverResources() = 0;

  CrashRecoveryContext *getContext() const {
    return context;
  }

private:
  friend class CrashRecoveryContext;
  CrashRecoveryContextCleanup *prev = nullptr, *next = nullptr;
};

/// Base class of cleanup handler that controls recovery of resources of the
/// given type.
///
/// \tparam Derived Class that uses this class as a base.
/// \tparam T Type of controlled resource.
///
/// This class serves as a base for its template parameter as implied by
/// Curiously Recurring Template Pattern.
///
/// This class factors out creation of a cleanup handler. The latter requires
/// knowledge of the current recovery context, which is provided by this class.
template<typename Derived, typename T>
class CrashRecoveryContextCleanupBase : public CrashRecoveryContextCleanup {
protected:
  T *resource;
  CrashRecoveryContextCleanupBase(CrashRecoveryContext *context, T *resource)
      : CrashRecoveryContextCleanup(context), resource(resource) {}

public:
  /// Creates cleanup handler.
  /// \param x Pointer to the resource recovered by this handler.
  /// \return New handler or null if the method was called outside a recovery
  ///         context.
  static Derived *create(T *x) {
    if (x) {
      if (CrashRecoveryContext *context = CrashRecoveryContext::GetCurrent())
        return new Derived(context, x);
    }
    return nullptr;
  }
};

/// Cleanup handler that reclaims resource by calling destructor on it.
template <typename T>
class CrashRecoveryContextDestructorCleanup : public
  CrashRecoveryContextCleanupBase<CrashRecoveryContextDestructorCleanup<T>, T> {
public:
  CrashRecoveryContextDestructorCleanup(CrashRecoveryContext *context,
                                        T *resource)
      : CrashRecoveryContextCleanupBase<
            CrashRecoveryContextDestructorCleanup<T>, T>(context, resource) {}

  void recoverResources() override {
    this->resource->~T();
  }
};

/// Cleanup handler that reclaims resource by calling 'delete' on it.
template <typename T>
class CrashRecoveryContextDeleteCleanup : public
  CrashRecoveryContextCleanupBase<CrashRecoveryContextDeleteCleanup<T>, T> {
public:
  CrashRecoveryContextDeleteCleanup(CrashRecoveryContext *context, T *resource)
    : CrashRecoveryContextCleanupBase<
        CrashRecoveryContextDeleteCleanup<T>, T>(context, resource) {}

  void recoverResources() override { delete this->resource; }
};

/// Cleanup handler that reclaims resource by calling its method 'Release'.
template <typename T>
class CrashRecoveryContextReleaseRefCleanup : public
  CrashRecoveryContextCleanupBase<CrashRecoveryContextReleaseRefCleanup<T>, T> {
public:
  CrashRecoveryContextReleaseRefCleanup(CrashRecoveryContext *context,
                                        T *resource)
    : CrashRecoveryContextCleanupBase<CrashRecoveryContextReleaseRefCleanup<T>,
          T>(context, resource) {}

  void recoverResources() override { this->resource->Release(); }
};

/// Helper class for managing resource cleanups.
///
/// \tparam T Type of resource been reclaimed.
/// \tparam Cleanup Class that defines how the resource is reclaimed.
///
/// Clients create objects of this type in the code executed in a crash recovery
/// context to ensure that the resource will be reclaimed even in the case of
/// crash. For example:
///
/// \code
///    void actual_work(void *) {
///      ...
///      std::unique_ptr<Resource> R(new Resource());
///      CrashRecoveryContextCleanupRegistrar D(R.get());
///      ...
///    }
///
///    void foo() {
///      CrashRecoveryContext CRC;
///
///      if (!CRC.RunSafely(actual_work, 0)) {
///         ... a crash was detected, report error to user ...
///      }
/// \endcode
///
/// If the code of `actual_work` in the example above does not crash, the
/// destructor of CrashRecoveryContextCleanupRegistrar removes cleanup code from
/// the current CrashRecoveryContext and the resource is reclaimed by the
/// destructor of std::unique_ptr. If crash happens, destructors are not called
/// and the resource is reclaimed by cleanup object registered in the recovery
/// context by the constructor of CrashRecoveryContextCleanupRegistrar.
template <typename T, typename Cleanup = CrashRecoveryContextDeleteCleanup<T> >
class CrashRecoveryContextCleanupRegistrar {
  CrashRecoveryContextCleanup *cleanup;

public:
  CrashRecoveryContextCleanupRegistrar(T *x)
    : cleanup(Cleanup::create(x)) {
    if (cleanup)
      cleanup->getContext()->registerCleanup(cleanup);
  }

  ~CrashRecoveryContextCleanupRegistrar() { unregister(); }

  void unregister() {
    if (cleanup && !cleanup->cleanupFired)
      cleanup->getContext()->unregisterCleanup(cleanup);
    cleanup = nullptr;
  }
};
} // end namespace llvm

#endif // LLVM_SUPPORT_CRASHRECOVERYCONTEXT_H
PKhwFZ`9���Support/CheckedArithmetic.hnu�[���//==-- llvm/Support/CheckedArithmetic.h - Safe arithmetical operations *- C++ //
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains generic functions for operating on integers which
// give the indication on whether the operation has overflown.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_CHECKEDARITHMETIC_H
#define LLVM_SUPPORT_CHECKEDARITHMETIC_H

#include "llvm/ADT/APInt.h"

#include <optional>
#include <type_traits>

namespace {

/// Utility function to apply a given method of \c APInt \p F to \p LHS and
/// \p RHS.
/// \return Empty optional if the operation overflows, or result otherwise.
template <typename T, typename F>
std::enable_if_t<std::is_integral_v<T> && sizeof(T) * 8 <= 64, std::optional<T>>
checkedOp(T LHS, T RHS, F Op, bool Signed = true) {
  llvm::APInt ALHS(sizeof(T) * 8, LHS, Signed);
  llvm::APInt ARHS(sizeof(T) * 8, RHS, Signed);
  bool Overflow;
  llvm::APInt Out = (ALHS.*Op)(ARHS, Overflow);
  if (Overflow)
    return std::nullopt;
  return Signed ? Out.getSExtValue() : Out.getZExtValue();
}
}

namespace llvm {

/// Add two signed integers \p LHS and \p RHS.
/// \return Optional of sum if no signed overflow occurred,
/// \c std::nullopt otherwise.
template <typename T>
std::enable_if_t<std::is_signed_v<T>, std::optional<T>> checkedAdd(T LHS,
                                                                   T RHS) {
  return checkedOp(LHS, RHS, &llvm::APInt::sadd_ov);
}

/// Subtract two signed integers \p LHS and \p RHS.
/// \return Optional of sum if no signed overflow occurred,
/// \c std::nullopt otherwise.
template <typename T>
std::enable_if_t<std::is_signed_v<T>, std::optional<T>> checkedSub(T LHS,
                                                                   T RHS) {
  return checkedOp(LHS, RHS, &llvm::APInt::ssub_ov);
}

/// Multiply two signed integers \p LHS and \p RHS.
/// \return Optional of product if no signed overflow occurred,
/// \c std::nullopt otherwise.
template <typename T>
std::enable_if_t<std::is_signed_v<T>, std::optional<T>> checkedMul(T LHS,
                                                                   T RHS) {
  return checkedOp(LHS, RHS, &llvm::APInt::smul_ov);
}

/// Multiply A and B, and add C to the resulting product.
/// \return Optional of result if no signed overflow occurred,
/// \c std::nullopt otherwise.
template <typename T>
std::enable_if_t<std::is_signed_v<T>, std::optional<T>> checkedMulAdd(T A, T B,
                                                                      T C) {
  if (auto Product = checkedMul(A, B))
    return checkedAdd(*Product, C);
  return std::nullopt;
}

/// Add two unsigned integers \p LHS and \p RHS.
/// \return Optional of sum if no unsigned overflow occurred,
/// \c std::nullopt otherwise.
template <typename T>
std::enable_if_t<std::is_unsigned_v<T>, std::optional<T>>
checkedAddUnsigned(T LHS, T RHS) {
  return checkedOp(LHS, RHS, &llvm::APInt::uadd_ov, /*Signed=*/false);
}

/// Multiply two unsigned integers \p LHS and \p RHS.
/// \return Optional of product if no unsigned overflow occurred,
/// \c std::nullopt otherwise.
template <typename T>
std::enable_if_t<std::is_unsigned_v<T>, std::optional<T>>
checkedMulUnsigned(T LHS, T RHS) {
  return checkedOp(LHS, RHS, &llvm::APInt::umul_ov, /*Signed=*/false);
}

/// Multiply unsigned integers A and B, and add C to the resulting product.
/// \return Optional of result if no unsigned overflow occurred,
/// \c std::nullopt otherwise.
template <typename T>
std::enable_if_t<std::is_unsigned_v<T>, std::optional<T>>
checkedMulAddUnsigned(T A, T B, T C) {
  if (auto Product = checkedMulUnsigned(A, B))
    return checkedAddUnsigned(*Product, C);
  return std::nullopt;
}

} // End llvm namespace

#endif
PKhwFZا�Y5Y5Support/ConvertUTF.hnu�[���/*===--- ConvertUTF.h - Universal Character Names conversions ---------------===
 *
 * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 * See https://llvm.org/LICENSE.txt for license information.
 * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 *
 *==------------------------------------------------------------------------==*/
/*
 * Copyright © 1991-2015 Unicode, Inc. All rights reserved.
 * Distributed under the Terms of Use in
 * http://www.unicode.org/copyright.html.
 *
 * Permission is hereby granted, free of charge, to any person obtaining
 * a copy of the Unicode data files and any associated documentation
 * (the "Data Files") or Unicode software and any associated documentation
 * (the "Software") to deal in the Data Files or Software
 * without restriction, including without limitation the rights to use,
 * copy, modify, merge, publish, distribute, and/or sell copies of
 * the Data Files or Software, and to permit persons to whom the Data Files
 * or Software are furnished to do so, provided that
 * (a) this copyright and permission notice appear with all copies
 * of the Data Files or Software,
 * (b) this copyright and permission notice appear in associated
 * documentation, and
 * (c) there is clear notice in each modified Data File or in the Software
 * as well as in the documentation associated with the Data File(s) or
 * Software that the data or software has been modified.
 *
 * THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF
 * ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
 * WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT OF THIRD PARTY RIGHTS.
 * IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS
 * NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL
 * DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
 * PERFORMANCE OF THE DATA FILES OR SOFTWARE.
 *
 * Except as contained in this notice, the name of a copyright holder
 * shall not be used in advertising or otherwise to promote the sale,
 * use or other dealings in these Data Files or Software without prior
 * written authorization of the copyright holder.
 */

/* ---------------------------------------------------------------------

    Conversions between UTF32, UTF-16, and UTF-8.  Header file.

    Several funtions are included here, forming a complete set of
    conversions between the three formats.  UTF-7 is not included
    here, but is handled in a separate source file.

    Each of these routines takes pointers to input buffers and output
    buffers.  The input buffers are const.

    Each routine converts the text between *sourceStart and sourceEnd,
    putting the result into the buffer between *targetStart and
    targetEnd. Note: the end pointers are *after* the last item: e.g.
    *(sourceEnd - 1) is the last item.

    The return result indicates whether the conversion was successful,
    and if not, whether the problem was in the source or target buffers.
    (Only the first encountered problem is indicated.)

    After the conversion, *sourceStart and *targetStart are both
    updated to point to the end of last text successfully converted in
    the respective buffers.

    Input parameters:
        sourceStart - pointer to a pointer to the source buffer.
                The contents of this are modified on return so that
                it points at the next thing to be converted.
        targetStart - similarly, pointer to pointer to the target buffer.
        sourceEnd, targetEnd - respectively pointers to the ends of the
                two buffers, for overflow checking only.

    These conversion functions take a ConversionFlags argument. When this
    flag is set to strict, both irregular sequences and isolated surrogates
    will cause an error.  When the flag is set to lenient, both irregular
    sequences and isolated surrogates are converted.

    Whether the flag is strict or lenient, all illegal sequences will cause
    an error return. This includes sequences such as: <F4 90 80 80>, <C0 80>,
    or <A0> in UTF-8, and values above 0x10FFFF in UTF-32. Conformant code
    must check for illegal sequences.

    When the flag is set to lenient, characters over 0x10FFFF are converted
    to the replacement character; otherwise (when the flag is set to strict)
    they constitute an error.

    Output parameters:
        The value "sourceIllegal" is returned from some routines if the input
        sequence is malformed.  When "sourceIllegal" is returned, the source
        value will point to the illegal value that caused the problem. E.g.,
        in UTF-8 when a sequence is malformed, it points to the start of the
        malformed sequence.

    Author: Mark E. Davis, 1994.
    Rev History: Rick McGowan, fixes & updates May 2001.
         Fixes & updates, Sept 2001.

------------------------------------------------------------------------ */

#ifndef LLVM_SUPPORT_CONVERTUTF_H
#define LLVM_SUPPORT_CONVERTUTF_H

#include <cstddef>
#include <string>

#if defined(_WIN32)
#include <system_error>
#endif

// Wrap everything in namespace llvm so that programs can link with llvm and
// their own version of the unicode libraries.

namespace llvm {

/* ---------------------------------------------------------------------
    The following 4 definitions are compiler-specific.
    The C standard does not guarantee that wchar_t has at least
    16 bits, so wchar_t is no less portable than unsigned short!
    All should be unsigned values to avoid sign extension during
    bit mask & shift operations.
------------------------------------------------------------------------ */

typedef unsigned int    UTF32;  /* at least 32 bits */
typedef unsigned short  UTF16;  /* at least 16 bits */
typedef unsigned char   UTF8;   /* typically 8 bits */
typedef unsigned char   Boolean; /* 0 or 1 */

/* Some fundamental constants */
#define UNI_REPLACEMENT_CHAR (UTF32)0x0000FFFD
#define UNI_MAX_BMP (UTF32)0x0000FFFF
#define UNI_MAX_UTF16 (UTF32)0x0010FFFF
#define UNI_MAX_UTF32 (UTF32)0x7FFFFFFF
#define UNI_MAX_LEGAL_UTF32 (UTF32)0x0010FFFF

#define UNI_MAX_UTF8_BYTES_PER_CODE_POINT 4

#define UNI_UTF16_BYTE_ORDER_MARK_NATIVE  0xFEFF
#define UNI_UTF16_BYTE_ORDER_MARK_SWAPPED 0xFFFE

#define UNI_UTF32_BYTE_ORDER_MARK_NATIVE 0x0000FEFF
#define UNI_UTF32_BYTE_ORDER_MARK_SWAPPED 0xFFFE0000

typedef enum {
  conversionOK,           /* conversion successful */
  sourceExhausted,        /* partial character in source, but hit end */
  targetExhausted,        /* insuff. room in target for conversion */
  sourceIllegal           /* source sequence is illegal/malformed */
} ConversionResult;

typedef enum {
  strictConversion = 0,
  lenientConversion
} ConversionFlags;

ConversionResult ConvertUTF8toUTF16 (
  const UTF8** sourceStart, const UTF8* sourceEnd,
  UTF16** targetStart, UTF16* targetEnd, ConversionFlags flags);

/**
 * Convert a partial UTF8 sequence to UTF32.  If the sequence ends in an
 * incomplete code unit sequence, returns \c sourceExhausted.
 */
ConversionResult ConvertUTF8toUTF32Partial(
  const UTF8** sourceStart, const UTF8* sourceEnd,
  UTF32** targetStart, UTF32* targetEnd, ConversionFlags flags);

/**
 * Convert a partial UTF8 sequence to UTF32.  If the sequence ends in an
 * incomplete code unit sequence, returns \c sourceIllegal.
 */
ConversionResult ConvertUTF8toUTF32(
  const UTF8** sourceStart, const UTF8* sourceEnd,
  UTF32** targetStart, UTF32* targetEnd, ConversionFlags flags);

ConversionResult ConvertUTF16toUTF8 (
  const UTF16** sourceStart, const UTF16* sourceEnd,
  UTF8** targetStart, UTF8* targetEnd, ConversionFlags flags);

ConversionResult ConvertUTF32toUTF8 (
  const UTF32** sourceStart, const UTF32* sourceEnd,
  UTF8** targetStart, UTF8* targetEnd, ConversionFlags flags);

ConversionResult ConvertUTF16toUTF32 (
  const UTF16** sourceStart, const UTF16* sourceEnd,
  UTF32** targetStart, UTF32* targetEnd, ConversionFlags flags);

ConversionResult ConvertUTF32toUTF16 (
  const UTF32** sourceStart, const UTF32* sourceEnd,
  UTF16** targetStart, UTF16* targetEnd, ConversionFlags flags);

Boolean isLegalUTF8Sequence(const UTF8 *source, const UTF8 *sourceEnd);

Boolean isLegalUTF8String(const UTF8 **source, const UTF8 *sourceEnd);

unsigned getUTF8SequenceSize(const UTF8 *source, const UTF8 *sourceEnd);

unsigned getNumBytesForUTF8(UTF8 firstByte);

/*************************************************************************/
/* Below are LLVM-specific wrappers of the functions above. */

template <typename T> class ArrayRef;
template <typename T> class SmallVectorImpl;
class StringRef;

/**
 * Convert an UTF8 StringRef to UTF8, UTF16, or UTF32 depending on
 * WideCharWidth. The converted data is written to ResultPtr, which needs to
 * point to at least WideCharWidth * (Source.Size() + 1) bytes. On success,
 * ResultPtr will point one after the end of the copied string. On failure,
 * ResultPtr will not be changed, and ErrorPtr will be set to the location of
 * the first character which could not be converted.
 * \return true on success.
 */
bool ConvertUTF8toWide(unsigned WideCharWidth, llvm::StringRef Source,
                       char *&ResultPtr, const UTF8 *&ErrorPtr);

/**
* Converts a UTF-8 StringRef to a std::wstring.
* \return true on success.
*/
bool ConvertUTF8toWide(llvm::StringRef Source, std::wstring &Result);

/**
* Converts a UTF-8 C-string to a std::wstring.
* \return true on success.
*/
bool ConvertUTF8toWide(const char *Source, std::wstring &Result);

/**
* Converts a std::wstring to a UTF-8 encoded std::string.
* \return true on success.
*/
bool convertWideToUTF8(const std::wstring &Source, std::string &Result);


/**
 * Convert an Unicode code point to UTF8 sequence.
 *
 * \param Source a Unicode code point.
 * \param [in,out] ResultPtr pointer to the output buffer, needs to be at least
 * \c UNI_MAX_UTF8_BYTES_PER_CODE_POINT bytes.  On success \c ResultPtr is
 * updated one past end of the converted sequence.
 *
 * \returns true on success.
 */
bool ConvertCodePointToUTF8(unsigned Source, char *&ResultPtr);

/**
 * Convert the first UTF8 sequence in the given source buffer to a UTF32
 * code point.
 *
 * \param [in,out] source A pointer to the source buffer. If the conversion
 * succeeds, this pointer will be updated to point to the byte just past the
 * end of the converted sequence.
 * \param sourceEnd A pointer just past the end of the source buffer.
 * \param [out] target The converted code
 * \param flags Whether the conversion is strict or lenient.
 *
 * \returns conversionOK on success
 *
 * \sa ConvertUTF8toUTF32
 */
inline ConversionResult convertUTF8Sequence(const UTF8 **source,
                                            const UTF8 *sourceEnd,
                                            UTF32 *target,
                                            ConversionFlags flags) {
  if (*source == sourceEnd)
    return sourceExhausted;
  unsigned size = getNumBytesForUTF8(**source);
  if ((ptrdiff_t)size > sourceEnd - *source)
    return sourceExhausted;
  return ConvertUTF8toUTF32(source, *source + size, &target, target + 1, flags);
}

/**
 * Returns true if a blob of text starts with a UTF-16 big or little endian byte
 * order mark.
 */
bool hasUTF16ByteOrderMark(ArrayRef<char> SrcBytes);

/**
 * Converts a stream of raw bytes assumed to be UTF16 into a UTF8 std::string.
 *
 * \param [in] SrcBytes A buffer of what is assumed to be UTF-16 encoded text.
 * \param [out] Out Converted UTF-8 is stored here on success.
 * \returns true on success
 */
bool convertUTF16ToUTF8String(ArrayRef<char> SrcBytes, std::string &Out);

/**
* Converts a UTF16 string into a UTF8 std::string.
*
* \param [in] Src A buffer of UTF-16 encoded text.
* \param [out] Out Converted UTF-8 is stored here on success.
* \returns true on success
*/
bool convertUTF16ToUTF8String(ArrayRef<UTF16> Src, std::string &Out);

/**
 * Converts a stream of raw bytes assumed to be UTF32 into a UTF8 std::string.
 *
 * \param [in] SrcBytes A buffer of what is assumed to be UTF-32 encoded text.
 * \param [out] Out Converted UTF-8 is stored here on success.
 * \returns true on success
 */
bool convertUTF32ToUTF8String(ArrayRef<char> SrcBytes, std::string &Out);

/**
 * Converts a UTF32 string into a UTF8 std::string.
 *
 * \param [in] Src A buffer of UTF-32 encoded text.
 * \param [out] Out Converted UTF-8 is stored here on success.
 * \returns true on success
 */
bool convertUTF32ToUTF8String(ArrayRef<UTF32> Src, std::string &Out);

/**
 * Converts a UTF-8 string into a UTF-16 string with native endianness.
 *
 * \returns true on success
 */
bool convertUTF8ToUTF16String(StringRef SrcUTF8,
                              SmallVectorImpl<UTF16> &DstUTF16);

#if defined(_WIN32)
namespace sys {
namespace windows {
std::error_code UTF8ToUTF16(StringRef utf8, SmallVectorImpl<wchar_t> &utf16);
/// Convert to UTF16 from the current code page used in the system
std::error_code CurCPToUTF16(StringRef utf8, SmallVectorImpl<wchar_t> &utf16);
std::error_code UTF16ToUTF8(const wchar_t *utf16, size_t utf16_len,
                            SmallVectorImpl<char> &utf8);
/// Convert from UTF16 to the current code page used in the system
std::error_code UTF16ToCurCP(const wchar_t *utf16, size_t utf16_len,
                             SmallVectorImpl<char> &utf8);
} // namespace windows
} // namespace sys
#endif

} /* end namespace llvm */

#endif
PKhwFZ�vd���Support/Error.hnu�[���//===- llvm/Support/Error.h - Recoverable error handling --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines an API used to report recoverable errors.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_ERROR_H
#define LLVM_SUPPORT_ERROR_H

#include "llvm-c/Error.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Config/abi-breaking.h"
#include "llvm/Support/AlignOf.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/ErrorOr.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/raw_ostream.h"
#include <cassert>
#include <cstdint>
#include <cstdlib>
#include <functional>
#include <memory>
#include <new>
#include <optional>
#include <string>
#include <system_error>
#include <type_traits>
#include <utility>
#include <vector>

namespace llvm {

class ErrorSuccess;

/// Base class for error info classes. Do not extend this directly: Extend
/// the ErrorInfo template subclass instead.
class ErrorInfoBase {
public:
  virtual ~ErrorInfoBase() = default;

  /// Print an error message to an output stream.
  virtual void log(raw_ostream &OS) const = 0;

  /// Return the error message as a string.
  virtual std::string message() const {
    std::string Msg;
    raw_string_ostream OS(Msg);
    log(OS);
    return OS.str();
  }

  /// Convert this error to a std::error_code.
  ///
  /// This is a temporary crutch to enable interaction with code still
  /// using std::error_code. It will be removed in the future.
  virtual std::error_code convertToErrorCode() const = 0;

  // Returns the class ID for this type.
  static const void *classID() { return &ID; }

  // Returns the class ID for the dynamic type of this ErrorInfoBase instance.
  virtual const void *dynamicClassID() const = 0;

  // Check whether this instance is a subclass of the class identified by
  // ClassID.
  virtual bool isA(const void *const ClassID) const {
    return ClassID == classID();
  }

  // Check whether this instance is a subclass of ErrorInfoT.
  template <typename ErrorInfoT> bool isA() const {
    return isA(ErrorInfoT::classID());
  }

private:
  virtual void anchor();

  static char ID;
};

/// Lightweight error class with error context and mandatory checking.
///
/// Instances of this class wrap a ErrorInfoBase pointer. Failure states
/// are represented by setting the pointer to a ErrorInfoBase subclass
/// instance containing information describing the failure. Success is
/// represented by a null pointer value.
///
/// Instances of Error also contains a 'Checked' flag, which must be set
/// before the destructor is called, otherwise the destructor will trigger a
/// runtime error. This enforces at runtime the requirement that all Error
/// instances be checked or returned to the caller.
///
/// There are two ways to set the checked flag, depending on what state the
/// Error instance is in. For Error instances indicating success, it
/// is sufficient to invoke the boolean conversion operator. E.g.:
///
///   @code{.cpp}
///   Error foo(<...>);
///
///   if (auto E = foo(<...>))
///     return E; // <- Return E if it is in the error state.
///   // We have verified that E was in the success state. It can now be safely
///   // destroyed.
///   @endcode
///
/// A success value *can not* be dropped. For example, just calling 'foo(<...>)'
/// without testing the return value will raise a runtime error, even if foo
/// returns success.
///
/// For Error instances representing failure, you must use either the
/// handleErrors or handleAllErrors function with a typed handler. E.g.:
///
///   @code{.cpp}
///   class MyErrorInfo : public ErrorInfo<MyErrorInfo> {
///     // Custom error info.
///   };
///
///   Error foo(<...>) { return make_error<MyErrorInfo>(...); }
///
///   auto E = foo(<...>); // <- foo returns failure with MyErrorInfo.
///   auto NewE =
///     handleErrors(E,
///       [](const MyErrorInfo &M) {
///         // Deal with the error.
///       },
///       [](std::unique_ptr<OtherError> M) -> Error {
///         if (canHandle(*M)) {
///           // handle error.
///           return Error::success();
///         }
///         // Couldn't handle this error instance. Pass it up the stack.
///         return Error(std::move(M));
///       );
///   // Note - we must check or return NewE in case any of the handlers
///   // returned a new error.
///   @endcode
///
/// The handleAllErrors function is identical to handleErrors, except
/// that it has a void return type, and requires all errors to be handled and
/// no new errors be returned. It prevents errors (assuming they can all be
/// handled) from having to be bubbled all the way to the top-level.
///
/// *All* Error instances must be checked before destruction, even if
/// they're moved-assigned or constructed from Success values that have already
/// been checked. This enforces checking through all levels of the call stack.
class [[nodiscard]] Error {
  // ErrorList needs to be able to yank ErrorInfoBase pointers out of Errors
  // to add to the error list. It can't rely on handleErrors for this, since
  // handleErrors does not support ErrorList handlers.
  friend class ErrorList;

  // handleErrors needs to be able to set the Checked flag.
  template <typename... HandlerTs>
  friend Error handleErrors(Error E, HandlerTs &&... Handlers);

  // Expected<T> needs to be able to steal the payload when constructed from an
  // error.
  template <typename T> friend class Expected;

  // wrap needs to be able to steal the payload.
  friend LLVMErrorRef wrap(Error);

protected:
  /// Create a success value. Prefer using 'Error::success()' for readability
  Error() {
    setPtr(nullptr);
    setChecked(false);
  }

public:
  /// Create a success value.
  static ErrorSuccess success();

  // Errors are not copy-constructable.
  Error(const Error &Other) = delete;

  /// Move-construct an error value. The newly constructed error is considered
  /// unchecked, even if the source error had been checked. The original error
  /// becomes a checked Success value, regardless of its original state.
  Error(Error &&Other) {
    setChecked(true);
    *this = std::move(Other);
  }

  /// Create an error value. Prefer using the 'make_error' function, but
  /// this constructor can be useful when "re-throwing" errors from handlers.
  Error(std::unique_ptr<ErrorInfoBase> Payload) {
    setPtr(Payload.release());
    setChecked(false);
  }

  // Errors are not copy-assignable.
  Error &operator=(const Error &Other) = delete;

  /// Move-assign an error value. The current error must represent success, you
  /// you cannot overwrite an unhandled error. The current error is then
  /// considered unchecked. The source error becomes a checked success value,
  /// regardless of its original state.
  Error &operator=(Error &&Other) {
    // Don't allow overwriting of unchecked values.
    assertIsChecked();
    setPtr(Other.getPtr());

    // This Error is unchecked, even if the source error was checked.
    setChecked(false);

    // Null out Other's payload and set its checked bit.
    Other.setPtr(nullptr);
    Other.setChecked(true);

    return *this;
  }

  /// Destroy a Error. Fails with a call to abort() if the error is
  /// unchecked.
  ~Error() {
    assertIsChecked();
    delete getPtr();
  }

  /// Bool conversion. Returns true if this Error is in a failure state,
  /// and false if it is in an accept state. If the error is in a Success state
  /// it will be considered checked.
  explicit operator bool() {
    setChecked(getPtr() == nullptr);
    return getPtr() != nullptr;
  }

  /// Check whether one error is a subclass of another.
  template <typename ErrT> bool isA() const {
    return getPtr() && getPtr()->isA(ErrT::classID());
  }

  /// Returns the dynamic class id of this error, or null if this is a success
  /// value.
  const void* dynamicClassID() const {
    if (!getPtr())
      return nullptr;
    return getPtr()->dynamicClassID();
  }

private:
#if LLVM_ENABLE_ABI_BREAKING_CHECKS
  // assertIsChecked() happens very frequently, but under normal circumstances
  // is supposed to be a no-op.  So we want it to be inlined, but having a bunch
  // of debug prints can cause the function to be too large for inlining.  So
  // it's important that we define this function out of line so that it can't be
  // inlined.
  [[noreturn]] void fatalUncheckedError() const;
#endif

  void assertIsChecked() {
#if LLVM_ENABLE_ABI_BREAKING_CHECKS
    if (LLVM_UNLIKELY(!getChecked() || getPtr()))
      fatalUncheckedError();
#endif
  }

  ErrorInfoBase *getPtr() const {
#if LLVM_ENABLE_ABI_BREAKING_CHECKS
    return reinterpret_cast<ErrorInfoBase*>(
             reinterpret_cast<uintptr_t>(Payload) &
             ~static_cast<uintptr_t>(0x1));
#else
    return Payload;
#endif
  }

  void setPtr(ErrorInfoBase *EI) {
#if LLVM_ENABLE_ABI_BREAKING_CHECKS
    Payload = reinterpret_cast<ErrorInfoBase*>(
                (reinterpret_cast<uintptr_t>(EI) &
                 ~static_cast<uintptr_t>(0x1)) |
                (reinterpret_cast<uintptr_t>(Payload) & 0x1));
#else
    Payload = EI;
#endif
  }

  bool getChecked() const {
#if LLVM_ENABLE_ABI_BREAKING_CHECKS
    return (reinterpret_cast<uintptr_t>(Payload) & 0x1) == 0;
#else
    return true;
#endif
  }

  void setChecked(bool V) {
#if LLVM_ENABLE_ABI_BREAKING_CHECKS
    Payload = reinterpret_cast<ErrorInfoBase*>(
                (reinterpret_cast<uintptr_t>(Payload) &
                  ~static_cast<uintptr_t>(0x1)) |
                  (V ? 0 : 1));
#endif
  }

  std::unique_ptr<ErrorInfoBase> takePayload() {
    std::unique_ptr<ErrorInfoBase> Tmp(getPtr());
    setPtr(nullptr);
    setChecked(true);
    return Tmp;
  }

  friend raw_ostream &operator<<(raw_ostream &OS, const Error &E) {
    if (auto *P = E.getPtr())
      P->log(OS);
    else
      OS << "success";
    return OS;
  }

  ErrorInfoBase *Payload = nullptr;
};

/// Subclass of Error for the sole purpose of identifying the success path in
/// the type system. This allows to catch invalid conversion to Expected<T> at
/// compile time.
class ErrorSuccess final : public Error {};

inline ErrorSuccess Error::success() { return ErrorSuccess(); }

/// Make a Error instance representing failure using the given error info
/// type.
template <typename ErrT, typename... ArgTs> Error make_error(ArgTs &&... Args) {
  return Error(std::make_unique<ErrT>(std::forward<ArgTs>(Args)...));
}

/// Base class for user error types. Users should declare their error types
/// like:
///
/// class MyError : public ErrorInfo<MyError> {
///   ....
/// };
///
/// This class provides an implementation of the ErrorInfoBase::kind
/// method, which is used by the Error RTTI system.
template <typename ThisErrT, typename ParentErrT = ErrorInfoBase>
class ErrorInfo : public ParentErrT {
public:
  using ParentErrT::ParentErrT; // inherit constructors

  static const void *classID() { return &ThisErrT::ID; }

  const void *dynamicClassID() const override { return &ThisErrT::ID; }

  bool isA(const void *const ClassID) const override {
    return ClassID == classID() || ParentErrT::isA(ClassID);
  }
};

/// Special ErrorInfo subclass representing a list of ErrorInfos.
/// Instances of this class are constructed by joinError.
class ErrorList final : public ErrorInfo<ErrorList> {
  // handleErrors needs to be able to iterate the payload list of an
  // ErrorList.
  template <typename... HandlerTs>
  friend Error handleErrors(Error E, HandlerTs &&... Handlers);

  // joinErrors is implemented in terms of join.
  friend Error joinErrors(Error, Error);

public:
  void log(raw_ostream &OS) const override {
    OS << "Multiple errors:\n";
    for (const auto &ErrPayload : Payloads) {
      ErrPayload->log(OS);
      OS << "\n";
    }
  }

  std::error_code convertToErrorCode() const override;

  // Used by ErrorInfo::classID.
  static char ID;

private:
  ErrorList(std::unique_ptr<ErrorInfoBase> Payload1,
            std::unique_ptr<ErrorInfoBase> Payload2) {
    assert(!Payload1->isA<ErrorList>() && !Payload2->isA<ErrorList>() &&
           "ErrorList constructor payloads should be singleton errors");
    Payloads.push_back(std::move(Payload1));
    Payloads.push_back(std::move(Payload2));
  }

  static Error join(Error E1, Error E2) {
    if (!E1)
      return E2;
    if (!E2)
      return E1;
    if (E1.isA<ErrorList>()) {
      auto &E1List = static_cast<ErrorList &>(*E1.getPtr());
      if (E2.isA<ErrorList>()) {
        auto E2Payload = E2.takePayload();
        auto &E2List = static_cast<ErrorList &>(*E2Payload);
        for (auto &Payload : E2List.Payloads)
          E1List.Payloads.push_back(std::move(Payload));
      } else
        E1List.Payloads.push_back(E2.takePayload());

      return E1;
    }
    if (E2.isA<ErrorList>()) {
      auto &E2List = static_cast<ErrorList &>(*E2.getPtr());
      E2List.Payloads.insert(E2List.Payloads.begin(), E1.takePayload());
      return E2;
    }
    return Error(std::unique_ptr<ErrorList>(
        new ErrorList(E1.takePayload(), E2.takePayload())));
  }

  std::vector<std::unique_ptr<ErrorInfoBase>> Payloads;
};

/// Concatenate errors. The resulting Error is unchecked, and contains the
/// ErrorInfo(s), if any, contained in E1, followed by the
/// ErrorInfo(s), if any, contained in E2.
inline Error joinErrors(Error E1, Error E2) {
  return ErrorList::join(std::move(E1), std::move(E2));
}

/// Tagged union holding either a T or a Error.
///
/// This class parallels ErrorOr, but replaces error_code with Error. Since
/// Error cannot be copied, this class replaces getError() with
/// takeError(). It also adds an bool errorIsA<ErrT>() method for testing the
/// error class type.
///
/// Example usage of 'Expected<T>' as a function return type:
///
///   @code{.cpp}
///     Expected<int> myDivide(int A, int B) {
///       if (B == 0) {
///         // return an Error
///         return createStringError(inconvertibleErrorCode(),
///                                  "B must not be zero!");
///       }
///       // return an integer
///       return A / B;
///     }
///   @endcode
///
///   Checking the results of to a function returning 'Expected<T>':
///   @code{.cpp}
///     if (auto E = Result.takeError()) {
///       // We must consume the error. Typically one of:
///       // - return the error to our caller
///       // - toString(), when logging
///       // - consumeError(), to silently swallow the error
///       // - handleErrors(), to distinguish error types
///       errs() << "Problem with division " << toString(std::move(E)) << "\n";
///       return;
///     }
///     // use the result
///     outs() << "The answer is " << *Result << "\n";
///   @endcode
///
///  For unit-testing a function returning an 'Expected<T>', see the
///  'EXPECT_THAT_EXPECTED' macros in llvm/Testing/Support/Error.h

template <class T> class [[nodiscard]] Expected {
  template <class T1> friend class ExpectedAsOutParameter;
  template <class OtherT> friend class Expected;

  static constexpr bool isRef = std::is_reference_v<T>;

  using wrap = std::reference_wrapper<std::remove_reference_t<T>>;

  using error_type = std::unique_ptr<ErrorInfoBase>;

public:
  using storage_type = std::conditional_t<isRef, wrap, T>;
  using value_type = T;

private:
  using reference = std::remove_reference_t<T> &;
  using const_reference = const std::remove_reference_t<T> &;
  using pointer = std::remove_reference_t<T> *;
  using const_pointer = const std::remove_reference_t<T> *;

public:
  /// Create an Expected<T> error value from the given Error.
  Expected(Error Err)
      : HasError(true)
#if LLVM_ENABLE_ABI_BREAKING_CHECKS
        // Expected is unchecked upon construction in Debug builds.
        , Unchecked(true)
#endif
  {
    assert(Err && "Cannot create Expected<T> from Error success value.");
    new (getErrorStorage()) error_type(Err.takePayload());
  }

  /// Forbid to convert from Error::success() implicitly, this avoids having
  /// Expected<T> foo() { return Error::success(); } which compiles otherwise
  /// but triggers the assertion above.
  Expected(ErrorSuccess) = delete;

  /// Create an Expected<T> success value from the given OtherT value, which
  /// must be convertible to T.
  template <typename OtherT>
  Expected(OtherT &&Val,
           std::enable_if_t<std::is_convertible_v<OtherT, T>> * = nullptr)
      : HasError(false)
#if LLVM_ENABLE_ABI_BREAKING_CHECKS
        // Expected is unchecked upon construction in Debug builds.
        ,
        Unchecked(true)
#endif
  {
    new (getStorage()) storage_type(std::forward<OtherT>(Val));
  }

  /// Move construct an Expected<T> value.
  Expected(Expected &&Other) { moveConstruct(std::move(Other)); }

  /// Move construct an Expected<T> value from an Expected<OtherT>, where OtherT
  /// must be convertible to T.
  template <class OtherT>
  Expected(Expected<OtherT> &&Other,
           std::enable_if_t<std::is_convertible_v<OtherT, T>> * = nullptr) {
    moveConstruct(std::move(Other));
  }

  /// Move construct an Expected<T> value from an Expected<OtherT>, where OtherT
  /// isn't convertible to T.
  template <class OtherT>
  explicit Expected(
      Expected<OtherT> &&Other,
      std::enable_if_t<!std::is_convertible_v<OtherT, T>> * = nullptr) {
    moveConstruct(std::move(Other));
  }

  /// Move-assign from another Expected<T>.
  Expected &operator=(Expected &&Other) {
    moveAssign(std::move(Other));
    return *this;
  }

  /// Destroy an Expected<T>.
  ~Expected() {
    assertIsChecked();
    if (!HasError)
      getStorage()->~storage_type();
    else
      getErrorStorage()->~error_type();
  }

  /// Return false if there is an error.
  explicit operator bool() {
#if LLVM_ENABLE_ABI_BREAKING_CHECKS
    Unchecked = HasError;
#endif
    return !HasError;
  }

  /// Returns a reference to the stored T value.
  reference get() {
    assertIsChecked();
    return *getStorage();
  }

  /// Returns a const reference to the stored T value.
  const_reference get() const {
    assertIsChecked();
    return const_cast<Expected<T> *>(this)->get();
  }

  /// Returns \a takeError() after moving the held T (if any) into \p V.
  template <class OtherT>
  Error moveInto(
      OtherT &Value,
      std::enable_if_t<std::is_assignable_v<OtherT &, T &&>> * = nullptr) && {
    if (*this)
      Value = std::move(get());
    return takeError();
  }

  /// Check that this Expected<T> is an error of type ErrT.
  template <typename ErrT> bool errorIsA() const {
    return HasError && (*getErrorStorage())->template isA<ErrT>();
  }

  /// Take ownership of the stored error.
  /// After calling this the Expected<T> is in an indeterminate state that can
  /// only be safely destructed. No further calls (beside the destructor) should
  /// be made on the Expected<T> value.
  Error takeError() {
#if LLVM_ENABLE_ABI_BREAKING_CHECKS
    Unchecked = false;
#endif
    return HasError ? Error(std::move(*getErrorStorage())) : Error::success();
  }

  /// Returns a pointer to the stored T value.
  pointer operator->() {
    assertIsChecked();
    return toPointer(getStorage());
  }

  /// Returns a const pointer to the stored T value.
  const_pointer operator->() const {
    assertIsChecked();
    return toPointer(getStorage());
  }

  /// Returns a reference to the stored T value.
  reference operator*() {
    assertIsChecked();
    return *getStorage();
  }

  /// Returns a const reference to the stored T value.
  const_reference operator*() const {
    assertIsChecked();
    return *getStorage();
  }

private:
  template <class T1>
  static bool compareThisIfSameType(const T1 &a, const T1 &b) {
    return &a == &b;
  }

  template <class T1, class T2>
  static bool compareThisIfSameType(const T1 &, const T2 &) {
    return false;
  }

  template <class OtherT> void moveConstruct(Expected<OtherT> &&Other) {
    HasError = Other.HasError;
#if LLVM_ENABLE_ABI_BREAKING_CHECKS
    Unchecked = true;
    Other.Unchecked = false;
#endif

    if (!HasError)
      new (getStorage()) storage_type(std::move(*Other.getStorage()));
    else
      new (getErrorStorage()) error_type(std::move(*Other.getErrorStorage()));
  }

  template <class OtherT> void moveAssign(Expected<OtherT> &&Other) {
    assertIsChecked();

    if (compareThisIfSameType(*this, Other))
      return;

    this->~Expected();
    new (this) Expected(std::move(Other));
  }

  pointer toPointer(pointer Val) { return Val; }

  const_pointer toPointer(const_pointer Val) const { return Val; }

  pointer toPointer(wrap *Val) { return &Val->get(); }

  const_pointer toPointer(const wrap *Val) const { return &Val->get(); }

  storage_type *getStorage() {
    assert(!HasError && "Cannot get value when an error exists!");
    return reinterpret_cast<storage_type *>(&TStorage);
  }

  const storage_type *getStorage() const {
    assert(!HasError && "Cannot get value when an error exists!");
    return reinterpret_cast<const storage_type *>(&TStorage);
  }

  error_type *getErrorStorage() {
    assert(HasError && "Cannot get error when a value exists!");
    return reinterpret_cast<error_type *>(&ErrorStorage);
  }

  const error_type *getErrorStorage() const {
    assert(HasError && "Cannot get error when a value exists!");
    return reinterpret_cast<const error_type *>(&ErrorStorage);
  }

  // Used by ExpectedAsOutParameter to reset the checked flag.
  void setUnchecked() {
#if LLVM_ENABLE_ABI_BREAKING_CHECKS
    Unchecked = true;
#endif
  }

#if LLVM_ENABLE_ABI_BREAKING_CHECKS
  [[noreturn]] LLVM_ATTRIBUTE_NOINLINE void fatalUncheckedExpected() const {
    dbgs() << "Expected<T> must be checked before access or destruction.\n";
    if (HasError) {
      dbgs() << "Unchecked Expected<T> contained error:\n";
      (*getErrorStorage())->log(dbgs());
    } else
      dbgs() << "Expected<T> value was in success state. (Note: Expected<T> "
                "values in success mode must still be checked prior to being "
                "destroyed).\n";
    abort();
  }
#endif

  void assertIsChecked() const {
#if LLVM_ENABLE_ABI_BREAKING_CHECKS
    if (LLVM_UNLIKELY(Unchecked))
      fatalUncheckedExpected();
#endif
  }

  union {
    AlignedCharArrayUnion<storage_type> TStorage;
    AlignedCharArrayUnion<error_type> ErrorStorage;
  };
  bool HasError : 1;
#if LLVM_ENABLE_ABI_BREAKING_CHECKS
  bool Unchecked : 1;
#endif
};

/// Report a serious error, calling any installed error handler. See
/// ErrorHandling.h.
[[noreturn]] void report_fatal_error(Error Err, bool gen_crash_diag = true);

/// Report a fatal error if Err is a failure value.
///
/// This function can be used to wrap calls to fallible functions ONLY when it
/// is known that the Error will always be a success value. E.g.
///
///   @code{.cpp}
///   // foo only attempts the fallible operation if DoFallibleOperation is
///   // true. If DoFallibleOperation is false then foo always returns
///   // Error::success().
///   Error foo(bool DoFallibleOperation);
///
///   cantFail(foo(false));
///   @endcode
inline void cantFail(Error Err, const char *Msg = nullptr) {
  if (Err) {
    if (!Msg)
      Msg = "Failure value returned from cantFail wrapped call";
#ifndef NDEBUG
    std::string Str;
    raw_string_ostream OS(Str);
    OS << Msg << "\n" << Err;
    Msg = OS.str().c_str();
#endif
    llvm_unreachable(Msg);
  }
}

/// Report a fatal error if ValOrErr is a failure value, otherwise unwraps and
/// returns the contained value.
///
/// This function can be used to wrap calls to fallible functions ONLY when it
/// is known that the Error will always be a success value. E.g.
///
///   @code{.cpp}
///   // foo only attempts the fallible operation if DoFallibleOperation is
///   // true. If DoFallibleOperation is false then foo always returns an int.
///   Expected<int> foo(bool DoFallibleOperation);
///
///   int X = cantFail(foo(false));
///   @endcode
template <typename T>
T cantFail(Expected<T> ValOrErr, const char *Msg = nullptr) {
  if (ValOrErr)
    return std::move(*ValOrErr);
  else {
    if (!Msg)
      Msg = "Failure value returned from cantFail wrapped call";
#ifndef NDEBUG
    std::string Str;
    raw_string_ostream OS(Str);
    auto E = ValOrErr.takeError();
    OS << Msg << "\n" << E;
    Msg = OS.str().c_str();
#endif
    llvm_unreachable(Msg);
  }
}

/// Report a fatal error if ValOrErr is a failure value, otherwise unwraps and
/// returns the contained reference.
///
/// This function can be used to wrap calls to fallible functions ONLY when it
/// is known that the Error will always be a success value. E.g.
///
///   @code{.cpp}
///   // foo only attempts the fallible operation if DoFallibleOperation is
///   // true. If DoFallibleOperation is false then foo always returns a Bar&.
///   Expected<Bar&> foo(bool DoFallibleOperation);
///
///   Bar &X = cantFail(foo(false));
///   @endcode
template <typename T>
T& cantFail(Expected<T&> ValOrErr, const char *Msg = nullptr) {
  if (ValOrErr)
    return *ValOrErr;
  else {
    if (!Msg)
      Msg = "Failure value returned from cantFail wrapped call";
#ifndef NDEBUG
    std::string Str;
    raw_string_ostream OS(Str);
    auto E = ValOrErr.takeError();
    OS << Msg << "\n" << E;
    Msg = OS.str().c_str();
#endif
    llvm_unreachable(Msg);
  }
}

/// Helper for testing applicability of, and applying, handlers for
/// ErrorInfo types.
template <typename HandlerT>
class ErrorHandlerTraits
    : public ErrorHandlerTraits<
          decltype(&std::remove_reference_t<HandlerT>::operator())> {};

// Specialization functions of the form 'Error (const ErrT&)'.
template <typename ErrT> class ErrorHandlerTraits<Error (&)(ErrT &)> {
public:
  static bool appliesTo(const ErrorInfoBase &E) {
    return E.template isA<ErrT>();
  }

  template <typename HandlerT>
  static Error apply(HandlerT &&H, std::unique_ptr<ErrorInfoBase> E) {
    assert(appliesTo(*E) && "Applying incorrect handler");
    return H(static_cast<ErrT &>(*E));
  }
};

// Specialization functions of the form 'void (const ErrT&)'.
template <typename ErrT> class ErrorHandlerTraits<void (&)(ErrT &)> {
public:
  static bool appliesTo(const ErrorInfoBase &E) {
    return E.template isA<ErrT>();
  }

  template <typename HandlerT>
  static Error apply(HandlerT &&H, std::unique_ptr<ErrorInfoBase> E) {
    assert(appliesTo(*E) && "Applying incorrect handler");
    H(static_cast<ErrT &>(*E));
    return Error::success();
  }
};

/// Specialization for functions of the form 'Error (std::unique_ptr<ErrT>)'.
template <typename ErrT>
class ErrorHandlerTraits<Error (&)(std::unique_ptr<ErrT>)> {
public:
  static bool appliesTo(const ErrorInfoBase &E) {
    return E.template isA<ErrT>();
  }

  template <typename HandlerT>
  static Error apply(HandlerT &&H, std::unique_ptr<ErrorInfoBase> E) {
    assert(appliesTo(*E) && "Applying incorrect handler");
    std::unique_ptr<ErrT> SubE(static_cast<ErrT *>(E.release()));
    return H(std::move(SubE));
  }
};

/// Specialization for functions of the form 'void (std::unique_ptr<ErrT>)'.
template <typename ErrT>
class ErrorHandlerTraits<void (&)(std::unique_ptr<ErrT>)> {
public:
  static bool appliesTo(const ErrorInfoBase &E) {
    return E.template isA<ErrT>();
  }

  template <typename HandlerT>
  static Error apply(HandlerT &&H, std::unique_ptr<ErrorInfoBase> E) {
    assert(appliesTo(*E) && "Applying incorrect handler");
    std::unique_ptr<ErrT> SubE(static_cast<ErrT *>(E.release()));
    H(std::move(SubE));
    return Error::success();
  }
};

// Specialization for member functions of the form 'RetT (const ErrT&)'.
template <typename C, typename RetT, typename ErrT>
class ErrorHandlerTraits<RetT (C::*)(ErrT &)>
    : public ErrorHandlerTraits<RetT (&)(ErrT &)> {};

// Specialization for member functions of the form 'RetT (const ErrT&) const'.
template <typename C, typename RetT, typename ErrT>
class ErrorHandlerTraits<RetT (C::*)(ErrT &) const>
    : public ErrorHandlerTraits<RetT (&)(ErrT &)> {};

// Specialization for member functions of the form 'RetT (const ErrT&)'.
template <typename C, typename RetT, typename ErrT>
class ErrorHandlerTraits<RetT (C::*)(const ErrT &)>
    : public ErrorHandlerTraits<RetT (&)(ErrT &)> {};

// Specialization for member functions of the form 'RetT (const ErrT&) const'.
template <typename C, typename RetT, typename ErrT>
class ErrorHandlerTraits<RetT (C::*)(const ErrT &) const>
    : public ErrorHandlerTraits<RetT (&)(ErrT &)> {};

/// Specialization for member functions of the form
/// 'RetT (std::unique_ptr<ErrT>)'.
template <typename C, typename RetT, typename ErrT>
class ErrorHandlerTraits<RetT (C::*)(std::unique_ptr<ErrT>)>
    : public ErrorHandlerTraits<RetT (&)(std::unique_ptr<ErrT>)> {};

/// Specialization for member functions of the form
/// 'RetT (std::unique_ptr<ErrT>) const'.
template <typename C, typename RetT, typename ErrT>
class ErrorHandlerTraits<RetT (C::*)(std::unique_ptr<ErrT>) const>
    : public ErrorHandlerTraits<RetT (&)(std::unique_ptr<ErrT>)> {};

inline Error handleErrorImpl(std::unique_ptr<ErrorInfoBase> Payload) {
  return Error(std::move(Payload));
}

template <typename HandlerT, typename... HandlerTs>
Error handleErrorImpl(std::unique_ptr<ErrorInfoBase> Payload,
                      HandlerT &&Handler, HandlerTs &&... Handlers) {
  if (ErrorHandlerTraits<HandlerT>::appliesTo(*Payload))
    return ErrorHandlerTraits<HandlerT>::apply(std::forward<HandlerT>(Handler),
                                               std::move(Payload));
  return handleErrorImpl(std::move(Payload),
                         std::forward<HandlerTs>(Handlers)...);
}

/// Pass the ErrorInfo(s) contained in E to their respective handlers. Any
/// unhandled errors (or Errors returned by handlers) are re-concatenated and
/// returned.
/// Because this function returns an error, its result must also be checked
/// or returned. If you intend to handle all errors use handleAllErrors
/// (which returns void, and will abort() on unhandled errors) instead.
template <typename... HandlerTs>
Error handleErrors(Error E, HandlerTs &&... Hs) {
  if (!E)
    return Error::success();

  std::unique_ptr<ErrorInfoBase> Payload = E.takePayload();

  if (Payload->isA<ErrorList>()) {
    ErrorList &List = static_cast<ErrorList &>(*Payload);
    Error R;
    for (auto &P : List.Payloads)
      R = ErrorList::join(
          std::move(R),
          handleErrorImpl(std::move(P), std::forward<HandlerTs>(Hs)...));
    return R;
  }

  return handleErrorImpl(std::move(Payload), std::forward<HandlerTs>(Hs)...);
}

/// Behaves the same as handleErrors, except that by contract all errors
/// *must* be handled by the given handlers (i.e. there must be no remaining
/// errors after running the handlers, or llvm_unreachable is called).
template <typename... HandlerTs>
void handleAllErrors(Error E, HandlerTs &&... Handlers) {
  cantFail(handleErrors(std::move(E), std::forward<HandlerTs>(Handlers)...));
}

/// Check that E is a non-error, then drop it.
/// If E is an error, llvm_unreachable will be called.
inline void handleAllErrors(Error E) {
  cantFail(std::move(E));
}

/// Handle any errors (if present) in an Expected<T>, then try a recovery path.
///
/// If the incoming value is a success value it is returned unmodified. If it
/// is a failure value then it the contained error is passed to handleErrors.
/// If handleErrors is able to handle the error then the RecoveryPath functor
/// is called to supply the final result. If handleErrors is not able to
/// handle all errors then the unhandled errors are returned.
///
/// This utility enables the follow pattern:
///
///   @code{.cpp}
///   enum FooStrategy { Aggressive, Conservative };
///   Expected<Foo> foo(FooStrategy S);
///
///   auto ResultOrErr =
///     handleExpected(
///       foo(Aggressive),
///       []() { return foo(Conservative); },
///       [](AggressiveStrategyError&) {
///         // Implicitly conusme this - we'll recover by using a conservative
///         // strategy.
///       });
///
///   @endcode
template <typename T, typename RecoveryFtor, typename... HandlerTs>
Expected<T> handleExpected(Expected<T> ValOrErr, RecoveryFtor &&RecoveryPath,
                           HandlerTs &&... Handlers) {
  if (ValOrErr)
    return ValOrErr;

  if (auto Err = handleErrors(ValOrErr.takeError(),
                              std::forward<HandlerTs>(Handlers)...))
    return std::move(Err);

  return RecoveryPath();
}

/// Log all errors (if any) in E to OS. If there are any errors, ErrorBanner
/// will be printed before the first one is logged. A newline will be printed
/// after each error.
///
/// This function is compatible with the helpers from Support/WithColor.h. You
/// can pass any of them as the OS. Please consider using them instead of
/// including 'error: ' in the ErrorBanner.
///
/// This is useful in the base level of your program to allow clean termination
/// (allowing clean deallocation of resources, etc.), while reporting error
/// information to the user.
void logAllUnhandledErrors(Error E, raw_ostream &OS, Twine ErrorBanner = {});

/// Write all error messages (if any) in E to a string. The newline character
/// is used to separate error messages.
std::string toString(Error E);

/// Consume a Error without doing anything. This method should be used
/// only where an error can be considered a reasonable and expected return
/// value.
///
/// Uses of this method are potentially indicative of design problems: If it's
/// legitimate to do nothing while processing an "error", the error-producer
/// might be more clearly refactored to return an std::optional<T>.
inline void consumeError(Error Err) {
  handleAllErrors(std::move(Err), [](const ErrorInfoBase &) {});
}

/// Convert an Expected to an Optional without doing anything. This method
/// should be used only where an error can be considered a reasonable and
/// expected return value.
///
/// Uses of this method are potentially indicative of problems: perhaps the
/// error should be propagated further, or the error-producer should just
/// return an Optional in the first place.
template <typename T> std::optional<T> expectedToOptional(Expected<T> &&E) {
  if (E)
    return std::move(*E);
  consumeError(E.takeError());
  return std::nullopt;
}

template <typename T> std::optional<T> expectedToStdOptional(Expected<T> &&E) {
  if (E)
    return std::move(*E);
  consumeError(E.takeError());
  return std::nullopt;
}

/// Helper for converting an Error to a bool.
///
/// This method returns true if Err is in an error state, or false if it is
/// in a success state.  Puts Err in a checked state in both cases (unlike
/// Error::operator bool(), which only does this for success states).
inline bool errorToBool(Error Err) {
  bool IsError = static_cast<bool>(Err);
  if (IsError)
    consumeError(std::move(Err));
  return IsError;
}

/// Helper for Errors used as out-parameters.
///
/// This helper is for use with the Error-as-out-parameter idiom, where an error
/// is passed to a function or method by reference, rather than being returned.
/// In such cases it is helpful to set the checked bit on entry to the function
/// so that the error can be written to (unchecked Errors abort on assignment)
/// and clear the checked bit on exit so that clients cannot accidentally forget
/// to check the result. This helper performs these actions automatically using
/// RAII:
///
///   @code{.cpp}
///   Result foo(Error &Err) {
///     ErrorAsOutParameter ErrAsOutParam(&Err); // 'Checked' flag set
///     // <body of foo>
///     // <- 'Checked' flag auto-cleared when ErrAsOutParam is destructed.
///   }
///   @endcode
///
/// ErrorAsOutParameter takes an Error* rather than Error& so that it can be
/// used with optional Errors (Error pointers that are allowed to be null). If
/// ErrorAsOutParameter took an Error reference, an instance would have to be
/// created inside every condition that verified that Error was non-null. By
/// taking an Error pointer we can just create one instance at the top of the
/// function.
class ErrorAsOutParameter {
public:
  ErrorAsOutParameter(Error *Err) : Err(Err) {
    // Raise the checked bit if Err is success.
    if (Err)
      (void)!!*Err;
  }

  ~ErrorAsOutParameter() {
    // Clear the checked bit.
    if (Err && !*Err)
      *Err = Error::success();
  }

private:
  Error *Err;
};

/// Helper for Expected<T>s used as out-parameters.
///
/// See ErrorAsOutParameter.
template <typename T>
class ExpectedAsOutParameter {
public:
  ExpectedAsOutParameter(Expected<T> *ValOrErr)
    : ValOrErr(ValOrErr) {
    if (ValOrErr)
      (void)!!*ValOrErr;
  }

  ~ExpectedAsOutParameter() {
    if (ValOrErr)
      ValOrErr->setUnchecked();
  }

private:
  Expected<T> *ValOrErr;
};

/// This class wraps a std::error_code in a Error.
///
/// This is useful if you're writing an interface that returns a Error
/// (or Expected) and you want to call code that still returns
/// std::error_codes.
class ECError : public ErrorInfo<ECError> {
  friend Error errorCodeToError(std::error_code);

  void anchor() override;

public:
  void setErrorCode(std::error_code EC) { this->EC = EC; }
  std::error_code convertToErrorCode() const override { return EC; }
  void log(raw_ostream &OS) const override { OS << EC.message(); }

  // Used by ErrorInfo::classID.
  static char ID;

protected:
  ECError() = default;
  ECError(std::error_code EC) : EC(EC) {}

  std::error_code EC;
};

/// The value returned by this function can be returned from convertToErrorCode
/// for Error values where no sensible translation to std::error_code exists.
/// It should only be used in this situation, and should never be used where a
/// sensible conversion to std::error_code is available, as attempts to convert
/// to/from this error will result in a fatal error. (i.e. it is a programmatic
/// error to try to convert such a value).
std::error_code inconvertibleErrorCode();

/// Helper for converting an std::error_code to a Error.
Error errorCodeToError(std::error_code EC);

/// Helper for converting an ECError to a std::error_code.
///
/// This method requires that Err be Error() or an ECError, otherwise it
/// will trigger a call to abort().
std::error_code errorToErrorCode(Error Err);

/// Convert an ErrorOr<T> to an Expected<T>.
template <typename T> Expected<T> errorOrToExpected(ErrorOr<T> &&EO) {
  if (auto EC = EO.getError())
    return errorCodeToError(EC);
  return std::move(*EO);
}

/// Convert an Expected<T> to an ErrorOr<T>.
template <typename T> ErrorOr<T> expectedToErrorOr(Expected<T> &&E) {
  if (auto Err = E.takeError())
    return errorToErrorCode(std::move(Err));
  return std::move(*E);
}

/// This class wraps a string in an Error.
///
/// StringError is useful in cases where the client is not expected to be able
/// to consume the specific error message programmatically (for example, if the
/// error message is to be presented to the user).
///
/// StringError can also be used when additional information is to be printed
/// along with a error_code message. Depending on the constructor called, this
/// class can either display:
///    1. the error_code message (ECError behavior)
///    2. a string
///    3. the error_code message and a string
///
/// These behaviors are useful when subtyping is required; for example, when a
/// specific library needs an explicit error type. In the example below,
/// PDBError is derived from StringError:
///
///   @code{.cpp}
///   Expected<int> foo() {
///      return llvm::make_error<PDBError>(pdb_error_code::dia_failed_loading,
///                                        "Additional information");
///   }
///   @endcode
///
class StringError : public ErrorInfo<StringError> {
public:
  static char ID;

  // Prints EC + S and converts to EC
  StringError(std::error_code EC, const Twine &S = Twine());

  // Prints S and converts to EC
  StringError(const Twine &S, std::error_code EC);

  void log(raw_ostream &OS) const override;
  std::error_code convertToErrorCode() const override;

  const std::string &getMessage() const { return Msg; }

private:
  std::string Msg;
  std::error_code EC;
  const bool PrintMsgOnly = false;
};

/// Create formatted StringError object.
template <typename... Ts>
inline Error createStringError(std::error_code EC, char const *Fmt,
                               const Ts &... Vals) {
  std::string Buffer;
  raw_string_ostream Stream(Buffer);
  Stream << format(Fmt, Vals...);
  return make_error<StringError>(Stream.str(), EC);
}

Error createStringError(std::error_code EC, char const *Msg);

inline Error createStringError(std::error_code EC, const Twine &S) {
  return createStringError(EC, S.str().c_str());
}

template <typename... Ts>
inline Error createStringError(std::errc EC, char const *Fmt,
                               const Ts &... Vals) {
  return createStringError(std::make_error_code(EC), Fmt, Vals...);
}

/// This class wraps a filename and another Error.
///
/// In some cases, an error needs to live along a 'source' name, in order to
/// show more detailed information to the user.
class FileError final : public ErrorInfo<FileError> {

  friend Error createFileError(const Twine &, Error);
  friend Error createFileError(const Twine &, size_t, Error);

public:
  void log(raw_ostream &OS) const override {
    assert(Err && "Trying to log after takeError().");
    OS << "'" << FileName << "': ";
    if (Line)
      OS << "line " << *Line << ": ";
    Err->log(OS);
  }

  std::string messageWithoutFileInfo() const {
    std::string Msg;
    raw_string_ostream OS(Msg);
    Err->log(OS);
    return OS.str();
  }

  StringRef getFileName() const { return FileName; }

  Error takeError() { return Error(std::move(Err)); }

  std::error_code convertToErrorCode() const override;

  // Used by ErrorInfo::classID.
  static char ID;

private:
  FileError(const Twine &F, std::optional<size_t> LineNum,
            std::unique_ptr<ErrorInfoBase> E) {
    assert(E && "Cannot create FileError from Error success value.");
    FileName = F.str();
    Err = std::move(E);
    Line = std::move(LineNum);
  }

  static Error build(const Twine &F, std::optional<size_t> Line, Error E) {
    std::unique_ptr<ErrorInfoBase> Payload;
    handleAllErrors(std::move(E),
                    [&](std::unique_ptr<ErrorInfoBase> EIB) -> Error {
                      Payload = std::move(EIB);
                      return Error::success();
                    });
    return Error(
        std::unique_ptr<FileError>(new FileError(F, Line, std::move(Payload))));
  }

  std::string FileName;
  std::optional<size_t> Line;
  std::unique_ptr<ErrorInfoBase> Err;
};

/// Concatenate a source file path and/or name with an Error. The resulting
/// Error is unchecked.
inline Error createFileError(const Twine &F, Error E) {
  return FileError::build(F, std::optional<size_t>(), std::move(E));
}

/// Concatenate a source file path and/or name with line number and an Error.
/// The resulting Error is unchecked.
inline Error createFileError(const Twine &F, size_t Line, Error E) {
  return FileError::build(F, std::optional<size_t>(Line), std::move(E));
}

/// Concatenate a source file path and/or name with a std::error_code 
/// to form an Error object.
inline Error createFileError(const Twine &F, std::error_code EC) {
  return createFileError(F, errorCodeToError(EC));
}

/// Concatenate a source file path and/or name with line number and
/// std::error_code to form an Error object.
inline Error createFileError(const Twine &F, size_t Line, std::error_code EC) {
  return createFileError(F, Line, errorCodeToError(EC));
}

Error createFileError(const Twine &F, ErrorSuccess) = delete;

/// Helper for check-and-exit error handling.
///
/// For tool use only. NOT FOR USE IN LIBRARY CODE.
///
class ExitOnError {
public:
  /// Create an error on exit helper.
  ExitOnError(std::string Banner = "", int DefaultErrorExitCode = 1)
      : Banner(std::move(Banner)),
        GetExitCode([=](const Error &) { return DefaultErrorExitCode; }) {}

  /// Set the banner string for any errors caught by operator().
  void setBanner(std::string Banner) { this->Banner = std::move(Banner); }

  /// Set the exit-code mapper function.
  void setExitCodeMapper(std::function<int(const Error &)> GetExitCode) {
    this->GetExitCode = std::move(GetExitCode);
  }

  /// Check Err. If it's in a failure state log the error(s) and exit.
  void operator()(Error Err) const { checkError(std::move(Err)); }

  /// Check E. If it's in a success state then return the contained value. If
  /// it's in a failure state log the error(s) and exit.
  template <typename T> T operator()(Expected<T> &&E) const {
    checkError(E.takeError());
    return std::move(*E);
  }

  /// Check E. If it's in a success state then return the contained reference. If
  /// it's in a failure state log the error(s) and exit.
  template <typename T> T& operator()(Expected<T&> &&E) const {
    checkError(E.takeError());
    return *E;
  }

private:
  void checkError(Error Err) const {
    if (Err) {
      int ExitCode = GetExitCode(Err);
      logAllUnhandledErrors(std::move(Err), errs(), Banner);
      exit(ExitCode);
    }
  }

  std::string Banner;
  std::function<int(const Error &)> GetExitCode;
};

/// Conversion from Error to LLVMErrorRef for C error bindings.
inline LLVMErrorRef wrap(Error Err) {
  return reinterpret_cast<LLVMErrorRef>(Err.takePayload().release());
}

/// Conversion from LLVMErrorRef to Error for C error bindings.
inline Error unwrap(LLVMErrorRef ErrRef) {
  return Error(std::unique_ptr<ErrorInfoBase>(
      reinterpret_cast<ErrorInfoBase *>(ErrRef)));
}

} // end namespace llvm

#endif // LLVM_SUPPORT_ERROR_H
PKhwFZ_�)-AyAySupport/Casting.hnu�[���//===- llvm/Support/Casting.h - Allow flexible, checked, casts --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the isa<X>(), cast<X>(), dyn_cast<X>(),
// cast_if_present<X>(), and dyn_cast_if_present<X>() templates.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_CASTING_H
#define LLVM_SUPPORT_CASTING_H

#include "llvm/Support/Compiler.h"
#include "llvm/Support/type_traits.h"
#include <cassert>
#include <memory>
#include <optional>
#include <type_traits>

namespace llvm {

//===----------------------------------------------------------------------===//
// simplify_type
//===----------------------------------------------------------------------===//

/// Define a template that can be specialized by smart pointers to reflect the
/// fact that they are automatically dereferenced, and are not involved with the
/// template selection process...  the default implementation is a noop.
// TODO: rename this and/or replace it with other cast traits.
template <typename From> struct simplify_type {
  using SimpleType = From; // The real type this represents...

  // An accessor to get the real value...
  static SimpleType &getSimplifiedValue(From &Val) { return Val; }
};

template <typename From> struct simplify_type<const From> {
  using NonConstSimpleType = typename simplify_type<From>::SimpleType;
  using SimpleType = typename add_const_past_pointer<NonConstSimpleType>::type;
  using RetType =
      typename add_lvalue_reference_if_not_pointer<SimpleType>::type;

  static RetType getSimplifiedValue(const From &Val) {
    return simplify_type<From>::getSimplifiedValue(const_cast<From &>(Val));
  }
};

// TODO: add this namespace once everyone is switched to using the new
//       interface.
// namespace detail {

//===----------------------------------------------------------------------===//
// isa_impl
//===----------------------------------------------------------------------===//

// The core of the implementation of isa<X> is here; To and From should be
// the names of classes.  This template can be specialized to customize the
// implementation of isa<> without rewriting it from scratch.
template <typename To, typename From, typename Enabler = void> struct isa_impl {
  static inline bool doit(const From &Val) { return To::classof(&Val); }
};

// Always allow upcasts, and perform no dynamic check for them.
template <typename To, typename From>
struct isa_impl<To, From, std::enable_if_t<std::is_base_of_v<To, From>>> {
  static inline bool doit(const From &) { return true; }
};

template <typename To, typename From> struct isa_impl_cl {
  static inline bool doit(const From &Val) {
    return isa_impl<To, From>::doit(Val);
  }
};

template <typename To, typename From> struct isa_impl_cl<To, const From> {
  static inline bool doit(const From &Val) {
    return isa_impl<To, From>::doit(Val);
  }
};

template <typename To, typename From>
struct isa_impl_cl<To, const std::unique_ptr<From>> {
  static inline bool doit(const std::unique_ptr<From> &Val) {
    assert(Val && "isa<> used on a null pointer");
    return isa_impl_cl<To, From>::doit(*Val);
  }
};

template <typename To, typename From> struct isa_impl_cl<To, From *> {
  static inline bool doit(const From *Val) {
    assert(Val && "isa<> used on a null pointer");
    return isa_impl<To, From>::doit(*Val);
  }
};

template <typename To, typename From> struct isa_impl_cl<To, From *const> {
  static inline bool doit(const From *Val) {
    assert(Val && "isa<> used on a null pointer");
    return isa_impl<To, From>::doit(*Val);
  }
};

template <typename To, typename From> struct isa_impl_cl<To, const From *> {
  static inline bool doit(const From *Val) {
    assert(Val && "isa<> used on a null pointer");
    return isa_impl<To, From>::doit(*Val);
  }
};

template <typename To, typename From>
struct isa_impl_cl<To, const From *const> {
  static inline bool doit(const From *Val) {
    assert(Val && "isa<> used on a null pointer");
    return isa_impl<To, From>::doit(*Val);
  }
};

template <typename To, typename From, typename SimpleFrom>
struct isa_impl_wrap {
  // When From != SimplifiedType, we can simplify the type some more by using
  // the simplify_type template.
  static bool doit(const From &Val) {
    return isa_impl_wrap<To, SimpleFrom,
                         typename simplify_type<SimpleFrom>::SimpleType>::
        doit(simplify_type<const From>::getSimplifiedValue(Val));
  }
};

template <typename To, typename FromTy>
struct isa_impl_wrap<To, FromTy, FromTy> {
  // When From == SimpleType, we are as simple as we are going to get.
  static bool doit(const FromTy &Val) {
    return isa_impl_cl<To, FromTy>::doit(Val);
  }
};

//===----------------------------------------------------------------------===//
// cast_retty + cast_retty_impl
//===----------------------------------------------------------------------===//

template <class To, class From> struct cast_retty;

// Calculate what type the 'cast' function should return, based on a requested
// type of To and a source type of From.
template <class To, class From> struct cast_retty_impl {
  using ret_type = To &; // Normal case, return Ty&
};
template <class To, class From> struct cast_retty_impl<To, const From> {
  using ret_type = const To &; // Normal case, return Ty&
};

template <class To, class From> struct cast_retty_impl<To, From *> {
  using ret_type = To *; // Pointer arg case, return Ty*
};

template <class To, class From> struct cast_retty_impl<To, const From *> {
  using ret_type = const To *; // Constant pointer arg case, return const Ty*
};

template <class To, class From> struct cast_retty_impl<To, const From *const> {
  using ret_type = const To *; // Constant pointer arg case, return const Ty*
};

template <class To, class From>
struct cast_retty_impl<To, std::unique_ptr<From>> {
private:
  using PointerType = typename cast_retty_impl<To, From *>::ret_type;
  using ResultType = std::remove_pointer_t<PointerType>;

public:
  using ret_type = std::unique_ptr<ResultType>;
};

template <class To, class From, class SimpleFrom> struct cast_retty_wrap {
  // When the simplified type and the from type are not the same, use the type
  // simplifier to reduce the type, then reuse cast_retty_impl to get the
  // resultant type.
  using ret_type = typename cast_retty<To, SimpleFrom>::ret_type;
};

template <class To, class FromTy> struct cast_retty_wrap<To, FromTy, FromTy> {
  // When the simplified type is equal to the from type, use it directly.
  using ret_type = typename cast_retty_impl<To, FromTy>::ret_type;
};

template <class To, class From> struct cast_retty {
  using ret_type = typename cast_retty_wrap<
      To, From, typename simplify_type<From>::SimpleType>::ret_type;
};

//===----------------------------------------------------------------------===//
// cast_convert_val
//===----------------------------------------------------------------------===//

// Ensure the non-simple values are converted using the simplify_type template
// that may be specialized by smart pointers...
//
template <class To, class From, class SimpleFrom> struct cast_convert_val {
  // This is not a simple type, use the template to simplify it...
  static typename cast_retty<To, From>::ret_type doit(const From &Val) {
    return cast_convert_val<To, SimpleFrom,
                            typename simplify_type<SimpleFrom>::SimpleType>::
        doit(simplify_type<From>::getSimplifiedValue(const_cast<From &>(Val)));
  }
};

template <class To, class FromTy> struct cast_convert_val<To, FromTy, FromTy> {
  // If it's a reference, switch to a pointer to do the cast and then deref it.
  static typename cast_retty<To, FromTy>::ret_type doit(const FromTy &Val) {
    return *(std::remove_reference_t<typename cast_retty<To, FromTy>::ret_type>
                 *)&const_cast<FromTy &>(Val);
  }
};

template <class To, class FromTy>
struct cast_convert_val<To, FromTy *, FromTy *> {
  // If it's a pointer, we can use c-style casting directly.
  static typename cast_retty<To, FromTy *>::ret_type doit(const FromTy *Val) {
    return (typename cast_retty<To, FromTy *>::ret_type) const_cast<FromTy *>(
        Val);
  }
};

//===----------------------------------------------------------------------===//
// is_simple_type
//===----------------------------------------------------------------------===//

template <class X> struct is_simple_type {
  static const bool value =
      std::is_same_v<X, typename simplify_type<X>::SimpleType>;
};

// } // namespace detail

//===----------------------------------------------------------------------===//
// CastIsPossible
//===----------------------------------------------------------------------===//

/// This struct provides a way to check if a given cast is possible. It provides
/// a static function called isPossible that is used to check if a cast can be
/// performed. It should be overridden like this:
///
/// template<> struct CastIsPossible<foo, bar> {
///   static inline bool isPossible(const bar &b) {
///     return bar.isFoo();
///   }
/// };
template <typename To, typename From, typename Enable = void>
struct CastIsPossible {
  static inline bool isPossible(const From &f) {
    return isa_impl_wrap<
        To, const From,
        typename simplify_type<const From>::SimpleType>::doit(f);
  }
};

// Needed for optional unwrapping. This could be implemented with isa_impl, but
// we want to implement things in the new method and move old implementations
// over. In fact, some of the isa_impl templates should be moved over to
// CastIsPossible.
template <typename To, typename From>
struct CastIsPossible<To, std::optional<From>> {
  static inline bool isPossible(const std::optional<From> &f) {
    assert(f && "CastIsPossible::isPossible called on a nullopt!");
    return isa_impl_wrap<
        To, const From,
        typename simplify_type<const From>::SimpleType>::doit(*f);
  }
};

/// Upcasting (from derived to base) and casting from a type to itself should
/// always be possible.
template <typename To, typename From>
struct CastIsPossible<To, From, std::enable_if_t<std::is_base_of_v<To, From>>> {
  static inline bool isPossible(const From &f) { return true; }
};

//===----------------------------------------------------------------------===//
// Cast traits
//===----------------------------------------------------------------------===//

/// All of these cast traits are meant to be implementations for useful casts
/// that users may want to use that are outside the standard behavior. An
/// example of how to use a special cast called `CastTrait` is:
///
/// template<> struct CastInfo<foo, bar> : public CastTrait<foo, bar> {};
///
/// Essentially, if your use case falls directly into one of the use cases
/// supported by a given cast trait, simply inherit your special CastInfo
/// directly from one of these to avoid having to reimplement the boilerplate
/// `isPossible/castFailed/doCast/doCastIfPossible`. A cast trait can also
/// provide a subset of those functions.

/// This cast trait just provides castFailed for the specified `To` type to make
/// CastInfo specializations more declarative. In order to use this, the target
/// result type must be `To` and `To` must be constructible from `nullptr`.
template <typename To> struct NullableValueCastFailed {
  static To castFailed() { return To(nullptr); }
};

/// This cast trait just provides the default implementation of doCastIfPossible
/// to make CastInfo specializations more declarative. The `Derived` template
/// parameter *must* be provided for forwarding castFailed and doCast.
template <typename To, typename From, typename Derived>
struct DefaultDoCastIfPossible {
  static To doCastIfPossible(From f) {
    if (!Derived::isPossible(f))
      return Derived::castFailed();
    return Derived::doCast(f);
  }
};

namespace detail {
/// A helper to derive the type to use with `Self` for cast traits, when the
/// provided CRTP derived type is allowed to be void.
template <typename OptionalDerived, typename Default>
using SelfType = std::conditional_t<std::is_same_v<OptionalDerived, void>,
                                    Default, OptionalDerived>;
} // namespace detail

/// This cast trait provides casting for the specific case of casting to a
/// value-typed object from a pointer-typed object. Note that `To` must be
/// nullable/constructible from a pointer to `From` to use this cast.
template <typename To, typename From, typename Derived = void>
struct ValueFromPointerCast
    : public CastIsPossible<To, From *>,
      public NullableValueCastFailed<To>,
      public DefaultDoCastIfPossible<
          To, From *,
          detail::SelfType<Derived, ValueFromPointerCast<To, From>>> {
  static inline To doCast(From *f) { return To(f); }
};

/// This cast trait provides std::unique_ptr casting. It has the semantics of
/// moving the contents of the input unique_ptr into the output unique_ptr
/// during the cast. It's also a good example of how to implement a move-only
/// cast.
template <typename To, typename From, typename Derived = void>
struct UniquePtrCast : public CastIsPossible<To, From *> {
  using Self = detail::SelfType<Derived, UniquePtrCast<To, From>>;
  using CastResultType = std::unique_ptr<
      std::remove_reference_t<typename cast_retty<To, From>::ret_type>>;

  static inline CastResultType doCast(std::unique_ptr<From> &&f) {
    return CastResultType((typename CastResultType::element_type *)f.release());
  }

  static inline CastResultType castFailed() { return CastResultType(nullptr); }

  static inline CastResultType doCastIfPossible(std::unique_ptr<From> &&f) {
    if (!Self::isPossible(f))
      return castFailed();
    return doCast(f);
  }
};

/// This cast trait provides std::optional<T> casting. This means that if you
/// have a value type, you can cast it to another value type and have dyn_cast
/// return an std::optional<T>.
template <typename To, typename From, typename Derived = void>
struct OptionalValueCast
    : public CastIsPossible<To, From>,
      public DefaultDoCastIfPossible<
          std::optional<To>, From,
          detail::SelfType<Derived, OptionalValueCast<To, From>>> {
  static inline std::optional<To> castFailed() { return std::optional<To>{}; }

  static inline std::optional<To> doCast(const From &f) { return To(f); }
};

/// Provides a cast trait that strips `const` from types to make it easier to
/// implement a const-version of a non-const cast. It just removes boilerplate
/// and reduces the amount of code you as the user need to implement. You can
/// use it like this:
///
/// template<> struct CastInfo<foo, bar> {
///   ...verbose implementation...
/// };
///
/// template<> struct CastInfo<foo, const bar> : public
///        ConstStrippingForwardingCast<foo, const bar, CastInfo<foo, bar>> {};
///
template <typename To, typename From, typename ForwardTo>
struct ConstStrippingForwardingCast {
  // Remove the pointer if it exists, then we can get rid of consts/volatiles.
  using DecayedFrom = std::remove_cv_t<std::remove_pointer_t<From>>;
  // Now if it's a pointer, add it back. Otherwise, we want a ref.
  using NonConstFrom =
      std::conditional_t<std::is_pointer_v<From>, DecayedFrom *, DecayedFrom &>;

  static inline bool isPossible(const From &f) {
    return ForwardTo::isPossible(const_cast<NonConstFrom>(f));
  }

  static inline decltype(auto) castFailed() { return ForwardTo::castFailed(); }

  static inline decltype(auto) doCast(const From &f) {
    return ForwardTo::doCast(const_cast<NonConstFrom>(f));
  }

  static inline decltype(auto) doCastIfPossible(const From &f) {
    return ForwardTo::doCastIfPossible(const_cast<NonConstFrom>(f));
  }
};

/// Provides a cast trait that uses a defined pointer to pointer cast as a base
/// for reference-to-reference casts. Note that it does not provide castFailed
/// and doCastIfPossible because a pointer-to-pointer cast would likely just
/// return `nullptr` which could cause nullptr dereference. You can use it like
/// this:
///
///   template <> struct CastInfo<foo, bar *> { ... verbose implementation... };
///
///   template <>
///   struct CastInfo<foo, bar>
///       : public ForwardToPointerCast<foo, bar, CastInfo<foo, bar *>> {};
///
template <typename To, typename From, typename ForwardTo>
struct ForwardToPointerCast {
  static inline bool isPossible(const From &f) {
    return ForwardTo::isPossible(&f);
  }

  static inline decltype(auto) doCast(const From &f) {
    return *ForwardTo::doCast(&f);
  }
};

//===----------------------------------------------------------------------===//
// CastInfo
//===----------------------------------------------------------------------===//

/// This struct provides a method for customizing the way a cast is performed.
/// It inherits from CastIsPossible, to support the case of declaring many
/// CastIsPossible specializations without having to specialize the full
/// CastInfo.
///
/// In order to specialize different behaviors, specify different functions in
/// your CastInfo specialization.
/// For isa<> customization, provide:
///
///   `static bool isPossible(const From &f)`
///
/// For cast<> customization, provide:
///
///  `static To doCast(const From &f)`
///
/// For dyn_cast<> and the *_if_present<> variants' customization, provide:
///
///  `static To castFailed()` and `static To doCastIfPossible(const From &f)`
///
/// Your specialization might look something like this:
///
///  template<> struct CastInfo<foo, bar> : public CastIsPossible<foo, bar> {
///    static inline foo doCast(const bar &b) {
///      return foo(const_cast<bar &>(b));
///    }
///    static inline foo castFailed() { return foo(); }
///    static inline foo doCastIfPossible(const bar &b) {
///      if (!CastInfo<foo, bar>::isPossible(b))
///        return castFailed();
///      return doCast(b);
///    }
///  };

// The default implementations of CastInfo don't use cast traits for now because
// we need to specify types all over the place due to the current expected
// casting behavior and the way cast_retty works. New use cases can and should
// take advantage of the cast traits whenever possible!

template <typename To, typename From, typename Enable = void>
struct CastInfo : public CastIsPossible<To, From> {
  using Self = CastInfo<To, From, Enable>;

  using CastReturnType = typename cast_retty<To, From>::ret_type;

  static inline CastReturnType doCast(const From &f) {
    return cast_convert_val<
        To, From,
        typename simplify_type<From>::SimpleType>::doit(const_cast<From &>(f));
  }

  // This assumes that you can construct the cast return type from `nullptr`.
  // This is largely to support legacy use cases - if you don't want this
  // behavior you should specialize CastInfo for your use case.
  static inline CastReturnType castFailed() { return CastReturnType(nullptr); }

  static inline CastReturnType doCastIfPossible(const From &f) {
    if (!Self::isPossible(f))
      return castFailed();
    return doCast(f);
  }
};

/// This struct provides an overload for CastInfo where From has simplify_type
/// defined. This simply forwards to the appropriate CastInfo with the
/// simplified type/value, so you don't have to implement both.
template <typename To, typename From>
struct CastInfo<To, From, std::enable_if_t<!is_simple_type<From>::value>> {
  using Self = CastInfo<To, From>;
  using SimpleFrom = typename simplify_type<From>::SimpleType;
  using SimplifiedSelf = CastInfo<To, SimpleFrom>;

  static inline bool isPossible(From &f) {
    return SimplifiedSelf::isPossible(
        simplify_type<From>::getSimplifiedValue(f));
  }

  static inline decltype(auto) doCast(From &f) {
    return SimplifiedSelf::doCast(simplify_type<From>::getSimplifiedValue(f));
  }

  static inline decltype(auto) castFailed() {
    return SimplifiedSelf::castFailed();
  }

  static inline decltype(auto) doCastIfPossible(From &f) {
    return SimplifiedSelf::doCastIfPossible(
        simplify_type<From>::getSimplifiedValue(f));
  }
};

//===----------------------------------------------------------------------===//
// Pre-specialized CastInfo
//===----------------------------------------------------------------------===//

/// Provide a CastInfo specialized for std::unique_ptr.
template <typename To, typename From>
struct CastInfo<To, std::unique_ptr<From>> : public UniquePtrCast<To, From> {};

/// Provide a CastInfo specialized for std::optional<From>. It's assumed that if
/// the input is std::optional<From> that the output can be std::optional<To>.
/// If that's not the case, specialize CastInfo for your use case.
template <typename To, typename From>
struct CastInfo<To, std::optional<From>> : public OptionalValueCast<To, From> {
};

/// isa<X> - Return true if the parameter to the template is an instance of one
/// of the template type arguments.  Used like this:
///
///  if (isa<Type>(myVal)) { ... }
///  if (isa<Type0, Type1, Type2>(myVal)) { ... }
template <typename To, typename From>
[[nodiscard]] inline bool isa(const From &Val) {
  return CastInfo<To, const From>::isPossible(Val);
}

template <typename First, typename Second, typename... Rest, typename From>
[[nodiscard]] inline bool isa(const From &Val) {
  return isa<First>(Val) || isa<Second, Rest...>(Val);
}

/// cast<X> - Return the argument parameter cast to the specified type.  This
/// casting operator asserts that the type is correct, so it does not return
/// null on failure.  It does not allow a null argument (use cast_if_present for
/// that). It is typically used like this:
///
///  cast<Instruction>(myVal)->getParent()

template <typename To, typename From>
[[nodiscard]] inline decltype(auto) cast(const From &Val) {
  assert(isa<To>(Val) && "cast<Ty>() argument of incompatible type!");
  return CastInfo<To, const From>::doCast(Val);
}

template <typename To, typename From>
[[nodiscard]] inline decltype(auto) cast(From &Val) {
  assert(isa<To>(Val) && "cast<Ty>() argument of incompatible type!");
  return CastInfo<To, From>::doCast(Val);
}

template <typename To, typename From>
[[nodiscard]] inline decltype(auto) cast(From *Val) {
  assert(isa<To>(Val) && "cast<Ty>() argument of incompatible type!");
  return CastInfo<To, From *>::doCast(Val);
}

template <typename To, typename From>
[[nodiscard]] inline decltype(auto) cast(std::unique_ptr<From> &&Val) {
  assert(isa<To>(Val) && "cast<Ty>() argument of incompatible type!");
  return CastInfo<To, std::unique_ptr<From>>::doCast(std::move(Val));
}

//===----------------------------------------------------------------------===//
// ValueIsPresent
//===----------------------------------------------------------------------===//

template <typename T>
constexpr bool IsNullable =
    std::is_pointer_v<T> || std::is_constructible_v<T, std::nullptr_t>;

/// ValueIsPresent provides a way to check if a value is, well, present. For
/// pointers, this is the equivalent of checking against nullptr, for Optionals
/// this is the equivalent of checking hasValue(). It also provides a method for
/// unwrapping a value (think calling .value() on an optional).

// Generic values can't *not* be present.
template <typename T, typename Enable = void> struct ValueIsPresent {
  using UnwrappedType = T;
  static inline bool isPresent(const T &t) { return true; }
  static inline decltype(auto) unwrapValue(T &t) { return t; }
};

// Optional provides its own way to check if something is present.
template <typename T> struct ValueIsPresent<std::optional<T>> {
  using UnwrappedType = T;
  static inline bool isPresent(const std::optional<T> &t) {
    return t.has_value();
  }
  static inline decltype(auto) unwrapValue(std::optional<T> &t) { return *t; }
};

// If something is "nullable" then we just compare it to nullptr to see if it
// exists.
template <typename T>
struct ValueIsPresent<T, std::enable_if_t<IsNullable<T>>> {
  using UnwrappedType = T;
  static inline bool isPresent(const T &t) { return t != T(nullptr); }
  static inline decltype(auto) unwrapValue(T &t) { return t; }
};

namespace detail {
// Convenience function we can use to check if a value is present. Because of
// simplify_type, we have to call it on the simplified type for now.
template <typename T> inline bool isPresent(const T &t) {
  return ValueIsPresent<typename simplify_type<T>::SimpleType>::isPresent(
      simplify_type<T>::getSimplifiedValue(const_cast<T &>(t)));
}

// Convenience function we can use to unwrap a value.
template <typename T> inline decltype(auto) unwrapValue(T &t) {
  return ValueIsPresent<T>::unwrapValue(t);
}
} // namespace detail

/// dyn_cast<X> - Return the argument parameter cast to the specified type. This
/// casting operator returns null if the argument is of the wrong type, so it
/// can be used to test for a type as well as cast if successful. The value
/// passed in must be present, if not, use dyn_cast_if_present. This should be
/// used in the context of an if statement like this:
///
///  if (const Instruction *I = dyn_cast<Instruction>(myVal)) { ... }

template <typename To, typename From>
[[nodiscard]] inline decltype(auto) dyn_cast(const From &Val) {
  assert(detail::isPresent(Val) && "dyn_cast on a non-existent value");
  return CastInfo<To, const From>::doCastIfPossible(Val);
}

template <typename To, typename From>
[[nodiscard]] inline decltype(auto) dyn_cast(From &Val) {
  assert(detail::isPresent(Val) && "dyn_cast on a non-existent value");
  return CastInfo<To, From>::doCastIfPossible(Val);
}

template <typename To, typename From>
[[nodiscard]] inline decltype(auto) dyn_cast(From *Val) {
  assert(detail::isPresent(Val) && "dyn_cast on a non-existent value");
  return CastInfo<To, From *>::doCastIfPossible(Val);
}

template <typename To, typename From>
[[nodiscard]] inline decltype(auto) dyn_cast(std::unique_ptr<From> &&Val) {
  assert(detail::isPresent(Val) && "dyn_cast on a non-existent value");
  return CastInfo<To, std::unique_ptr<From>>::doCastIfPossible(
      std::forward<std::unique_ptr<From> &&>(Val));
}

/// isa_and_present<X> - Functionally identical to isa, except that a null value
/// is accepted.
template <typename... X, class Y>
[[nodiscard]] inline bool isa_and_present(const Y &Val) {
  if (!detail::isPresent(Val))
    return false;
  return isa<X...>(Val);
}

template <typename... X, class Y>
[[nodiscard]] inline bool isa_and_nonnull(const Y &Val) {
  return isa_and_present<X...>(Val);
}

/// cast_if_present<X> - Functionally identical to cast, except that a null
/// value is accepted.
template <class X, class Y>
[[nodiscard]] inline auto cast_if_present(const Y &Val) {
  if (!detail::isPresent(Val))
    return CastInfo<X, const Y>::castFailed();
  assert(isa<X>(Val) && "cast_if_present<Ty>() argument of incompatible type!");
  return cast<X>(detail::unwrapValue(Val));
}

template <class X, class Y> [[nodiscard]] inline auto cast_if_present(Y &Val) {
  if (!detail::isPresent(Val))
    return CastInfo<X, Y>::castFailed();
  assert(isa<X>(Val) && "cast_if_present<Ty>() argument of incompatible type!");
  return cast<X>(detail::unwrapValue(Val));
}

template <class X, class Y> [[nodiscard]] inline auto cast_if_present(Y *Val) {
  if (!detail::isPresent(Val))
    return CastInfo<X, Y *>::castFailed();
  assert(isa<X>(Val) && "cast_if_present<Ty>() argument of incompatible type!");
  return cast<X>(detail::unwrapValue(Val));
}

template <class X, class Y>
[[nodiscard]] inline auto cast_if_present(std::unique_ptr<Y> &&Val) {
  if (!detail::isPresent(Val))
    return UniquePtrCast<X, Y>::castFailed();
  return UniquePtrCast<X, Y>::doCast(std::move(Val));
}

// Provide a forwarding from cast_or_null to cast_if_present for current
// users. This is deprecated and will be removed in a future patch, use
// cast_if_present instead.
template <class X, class Y> auto cast_or_null(const Y &Val) {
  return cast_if_present<X>(Val);
}

template <class X, class Y> auto cast_or_null(Y &Val) {
  return cast_if_present<X>(Val);
}

template <class X, class Y> auto cast_or_null(Y *Val) {
  return cast_if_present<X>(Val);
}

template <class X, class Y> auto cast_or_null(std::unique_ptr<Y> &&Val) {
  return cast_if_present<X>(std::move(Val));
}

/// dyn_cast_if_present<X> - Functionally identical to dyn_cast, except that a
/// null (or none in the case of optionals) value is accepted.
template <class X, class Y> auto dyn_cast_if_present(const Y &Val) {
  if (!detail::isPresent(Val))
    return CastInfo<X, const Y>::castFailed();
  return CastInfo<X, const Y>::doCastIfPossible(detail::unwrapValue(Val));
}

template <class X, class Y> auto dyn_cast_if_present(Y &Val) {
  if (!detail::isPresent(Val))
    return CastInfo<X, Y>::castFailed();
  return CastInfo<X, Y>::doCastIfPossible(detail::unwrapValue(Val));
}

template <class X, class Y> auto dyn_cast_if_present(Y *Val) {
  if (!detail::isPresent(Val))
    return CastInfo<X, Y *>::castFailed();
  return CastInfo<X, Y *>::doCastIfPossible(detail::unwrapValue(Val));
}

// Forwards to dyn_cast_if_present to avoid breaking current users. This is
// deprecated and will be removed in a future patch, use
// cast_if_present instead.
template <class X, class Y> auto dyn_cast_or_null(const Y &Val) {
  return dyn_cast_if_present<X>(Val);
}

template <class X, class Y> auto dyn_cast_or_null(Y &Val) {
  return dyn_cast_if_present<X>(Val);
}

template <class X, class Y> auto dyn_cast_or_null(Y *Val) {
  return dyn_cast_if_present<X>(Val);
}

/// unique_dyn_cast<X> - Given a unique_ptr<Y>, try to return a unique_ptr<X>,
/// taking ownership of the input pointer iff isa<X>(Val) is true.  If the
/// cast is successful, From refers to nullptr on exit and the casted value
/// is returned.  If the cast is unsuccessful, the function returns nullptr
/// and From is unchanged.
template <class X, class Y>
[[nodiscard]] inline typename CastInfo<X, std::unique_ptr<Y>>::CastResultType
unique_dyn_cast(std::unique_ptr<Y> &Val) {
  if (!isa<X>(Val))
    return nullptr;
  return cast<X>(std::move(Val));
}

template <class X, class Y>
[[nodiscard]] inline auto unique_dyn_cast(std::unique_ptr<Y> &&Val) {
  return unique_dyn_cast<X, Y>(Val);
}

// unique_dyn_cast_or_null<X> - Functionally identical to unique_dyn_cast,
// except that a null value is accepted.
template <class X, class Y>
[[nodiscard]] inline typename CastInfo<X, std::unique_ptr<Y>>::CastResultType
unique_dyn_cast_or_null(std::unique_ptr<Y> &Val) {
  if (!Val)
    return nullptr;
  return unique_dyn_cast<X, Y>(Val);
}

template <class X, class Y>
[[nodiscard]] inline auto unique_dyn_cast_or_null(std::unique_ptr<Y> &&Val) {
  return unique_dyn_cast_or_null<X, Y>(Val);
}

} // end namespace llvm

#endif // LLVM_SUPPORT_CASTING_H
PKhwFZ�_ffSupport/ARMTargetParserCommon.hnu�[���//===-- llvm/Support/ARMTargetParserCommon.def ------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This header is deprecated in favour of
/// `llvm/TargetParser/ARMTargetParserCommon.h`.
///
//===----------------------------------------------------------------------===//

#include "llvm/TargetParser/ARMTargetParserCommon.h"
PKhwFZ��ȯSupport/Discriminator.hnu�[���//===---- llvm/Support/Discriminator.h -- Discriminator Utils ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the constants and utility functions for discriminators.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_DISCRIMINATOR_H
#define LLVM_SUPPORT_DISCRIMINATOR_H

#include "llvm/Support/Error.h"
#include <assert.h>

// Utility functions for encoding / decoding discriminators.
/// With a given unsigned int \p U, use up to 13 bits to represent it.
/// old_bit 1~5  --> new_bit 1~5
/// old_bit 6~12 --> new_bit 7~13
/// new_bit_6 is 0 if higher bits (7~13) are all 0
static inline unsigned getPrefixEncodingFromUnsigned(unsigned U) {
  U &= 0xfff;
  return U > 0x1f ? (((U & 0xfe0) << 1) | (U & 0x1f) | 0x20) : U;
}

/// Reverse transformation as getPrefixEncodingFromUnsigned.
static inline unsigned getUnsignedFromPrefixEncoding(unsigned U) {
  if (U & 1)
    return 0;
  U >>= 1;
  return (U & 0x20) ? (((U >> 1) & 0xfe0) | (U & 0x1f)) : (U & 0x1f);
}

/// Returns the next component stored in discriminator.
static inline unsigned getNextComponentInDiscriminator(unsigned D) {
  if ((D & 1) == 0)
    return D >> ((D & 0x40) ? 14 : 7);
  else
    return D >> 1;
}

static inline unsigned encodeComponent(unsigned C) {
  return (C == 0) ? 1U : (getPrefixEncodingFromUnsigned(C) << 1);
}

static inline unsigned encodingBits(unsigned C) {
  return (C == 0) ? 1 : (C > 0x1f ? 14 : 7);
}

// Some constants used in FS Discriminators.
//
namespace llvm {
namespace sampleprof {
enum FSDiscriminatorPass {
  Base = 0,
  Pass0 = 0,
  Pass1 = 1,
  Pass2 = 2,
  Pass3 = 3,
  Pass4 = 4,
  PassLast = 4,
};
} // namespace sampleprof

// The number of bits reserved for the base discrimininator. The base
// discriminaitor starts from bit 0.
static const unsigned BaseDiscriminatorBitWidth = 8;

// The number of bits reserved for each FS discriminator pass.
static const unsigned FSDiscriminatorBitWidth = 6;

// Return the number of FS passes, excluding the pass adding the base
// discriminators.
// The number of passes for FS discriminators. Note that the total
// number of discriminaitor bits, i.e.
// BaseDiscriminatorBitWidth
//  + FSDiscriminatorBitWidth * getNumFSPasses()
// needs to fit in an unsigned int type.
static inline unsigned getNumFSPasses() {
  return static_cast<unsigned>(sampleprof::FSDiscriminatorPass::PassLast);
}

// Return the ending bit for FSPass P.
static inline unsigned getFSPassBitEnd(sampleprof::FSDiscriminatorPass P) {
  unsigned I = static_cast<unsigned>(P);
  assert(I <= getNumFSPasses() && "Invalid FS discriminator pass number.");
  return BaseDiscriminatorBitWidth + I * FSDiscriminatorBitWidth - 1;
}

// Return the begining bit for FSPass P.
static inline unsigned getFSPassBitBegin(sampleprof::FSDiscriminatorPass P) {
  if (P == sampleprof::FSDiscriminatorPass::Base)
    return 0;
  unsigned I = static_cast<unsigned>(P);
  assert(I <= getNumFSPasses() && "Invalid FS discriminator pass number.");
  return getFSPassBitEnd(static_cast<sampleprof::FSDiscriminatorPass>(I - 1)) +
         1;
}

// Return the beginning bit for the last FSPass.
static inline int getLastFSPassBitBegin() {
  return getFSPassBitBegin(
      static_cast<sampleprof::FSDiscriminatorPass>(getNumFSPasses()));
}

// Return the ending bit for the last FSPass.
static inline unsigned getLastFSPassBitEnd() {
  return getFSPassBitEnd(
      static_cast<sampleprof::FSDiscriminatorPass>(getNumFSPasses()));
}

// Return the beginning bit for the base (first) FSPass.
static inline unsigned getBaseFSBitBegin() { return 0; }

// Return the ending bit for the base (first) FSPass.
static inline unsigned getBaseFSBitEnd() {
  return BaseDiscriminatorBitWidth - 1;
}

// Set bits in range of [0 .. n] to 1. Used in FS Discriminators.
static inline unsigned getN1Bits(int N) {
  // Work around the g++ bug that folding "(1U << (N + 1)) - 1" to 0.
  if (N == 31)
    return 0xFFFFFFFF;
  assert((N < 32) && "N is invalid");
  return (1U << (N + 1)) - 1;
}

} // namespace llvm

#endif /* LLVM_SUPPORT_DISCRIMINATOR_H */
PKhwFZt�E�ESupport/KnownBits.hnu�[���//===- llvm/Support/KnownBits.h - Stores known zeros/ones -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains a class for representing known zeros and ones used by
// computeKnownBits.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_KNOWNBITS_H
#define LLVM_SUPPORT_KNOWNBITS_H

#include "llvm/ADT/APInt.h"
#include <optional>

namespace llvm {

// Struct for tracking the known zeros and ones of a value.
struct KnownBits {
  APInt Zero;
  APInt One;

private:
  // Internal constructor for creating a KnownBits from two APInts.
  KnownBits(APInt Zero, APInt One)
      : Zero(std::move(Zero)), One(std::move(One)) {}

public:
  // Default construct Zero and One.
  KnownBits() = default;

  /// Create a known bits object of BitWidth bits initialized to unknown.
  KnownBits(unsigned BitWidth) : Zero(BitWidth, 0), One(BitWidth, 0) {}

  /// Get the bit width of this value.
  unsigned getBitWidth() const {
    assert(Zero.getBitWidth() == One.getBitWidth() &&
           "Zero and One should have the same width!");
    return Zero.getBitWidth();
  }

  /// Returns true if there is conflicting information.
  bool hasConflict() const { return Zero.intersects(One); }

  /// Returns true if we know the value of all bits.
  bool isConstant() const {
    assert(!hasConflict() && "KnownBits conflict!");
    return Zero.popcount() + One.popcount() == getBitWidth();
  }

  /// Returns the value when all bits have a known value. This just returns One
  /// with a protective assertion.
  const APInt &getConstant() const {
    assert(isConstant() && "Can only get value when all bits are known");
    return One;
  }

  /// Returns true if we don't know any bits.
  bool isUnknown() const { return Zero.isZero() && One.isZero(); }

  /// Resets the known state of all bits.
  void resetAll() {
    Zero.clearAllBits();
    One.clearAllBits();
  }

  /// Returns true if value is all zero.
  bool isZero() const {
    assert(!hasConflict() && "KnownBits conflict!");
    return Zero.isAllOnes();
  }

  /// Returns true if value is all one bits.
  bool isAllOnes() const {
    assert(!hasConflict() && "KnownBits conflict!");
    return One.isAllOnes();
  }

  /// Make all bits known to be zero and discard any previous information.
  void setAllZero() {
    Zero.setAllBits();
    One.clearAllBits();
  }

  /// Make all bits known to be one and discard any previous information.
  void setAllOnes() {
    Zero.clearAllBits();
    One.setAllBits();
  }

  /// Returns true if this value is known to be negative.
  bool isNegative() const { return One.isSignBitSet(); }

  /// Returns true if this value is known to be non-negative.
  bool isNonNegative() const { return Zero.isSignBitSet(); }

  /// Returns true if this value is known to be non-zero.
  bool isNonZero() const { return !One.isZero(); }

  /// Returns true if this value is known to be positive.
  bool isStrictlyPositive() const {
    return Zero.isSignBitSet() && !One.isZero();
  }

  /// Make this value negative.
  void makeNegative() {
    One.setSignBit();
  }

  /// Make this value non-negative.
  void makeNonNegative() {
    Zero.setSignBit();
  }

  /// Return the minimal unsigned value possible given these KnownBits.
  APInt getMinValue() const {
    // Assume that all bits that aren't known-ones are zeros.
    return One;
  }

  /// Return the minimal signed value possible given these KnownBits.
  APInt getSignedMinValue() const {
    // Assume that all bits that aren't known-ones are zeros.
    APInt Min = One;
    // Sign bit is unknown.
    if (Zero.isSignBitClear())
      Min.setSignBit();
    return Min;
  }

  /// Return the maximal unsigned value possible given these KnownBits.
  APInt getMaxValue() const {
    // Assume that all bits that aren't known-zeros are ones.
    return ~Zero;
  }

  /// Return the maximal signed value possible given these KnownBits.
  APInt getSignedMaxValue() const {
    // Assume that all bits that aren't known-zeros are ones.
    APInt Max = ~Zero;
    // Sign bit is unknown.
    if (One.isSignBitClear())
      Max.clearSignBit();
    return Max;
  }

  /// Return known bits for a truncation of the value we're tracking.
  KnownBits trunc(unsigned BitWidth) const {
    return KnownBits(Zero.trunc(BitWidth), One.trunc(BitWidth));
  }

  /// Return known bits for an "any" extension of the value we're tracking,
  /// where we don't know anything about the extended bits.
  KnownBits anyext(unsigned BitWidth) const {
    return KnownBits(Zero.zext(BitWidth), One.zext(BitWidth));
  }

  /// Return known bits for a zero extension of the value we're tracking.
  KnownBits zext(unsigned BitWidth) const {
    unsigned OldBitWidth = getBitWidth();
    APInt NewZero = Zero.zext(BitWidth);
    NewZero.setBitsFrom(OldBitWidth);
    return KnownBits(NewZero, One.zext(BitWidth));
  }

  /// Return known bits for a sign extension of the value we're tracking.
  KnownBits sext(unsigned BitWidth) const {
    return KnownBits(Zero.sext(BitWidth), One.sext(BitWidth));
  }

  /// Return known bits for an "any" extension or truncation of the value we're
  /// tracking.
  KnownBits anyextOrTrunc(unsigned BitWidth) const {
    if (BitWidth > getBitWidth())
      return anyext(BitWidth);
    if (BitWidth < getBitWidth())
      return trunc(BitWidth);
    return *this;
  }

  /// Return known bits for a zero extension or truncation of the value we're
  /// tracking.
  KnownBits zextOrTrunc(unsigned BitWidth) const {
    if (BitWidth > getBitWidth())
      return zext(BitWidth);
    if (BitWidth < getBitWidth())
      return trunc(BitWidth);
    return *this;
  }

  /// Return known bits for a sign extension or truncation of the value we're
  /// tracking.
  KnownBits sextOrTrunc(unsigned BitWidth) const {
    if (BitWidth > getBitWidth())
      return sext(BitWidth);
    if (BitWidth < getBitWidth())
      return trunc(BitWidth);
    return *this;
  }

  /// Return known bits for a in-register sign extension of the value we're
  /// tracking.
  KnownBits sextInReg(unsigned SrcBitWidth) const;

  /// Insert the bits from a smaller known bits starting at bitPosition.
  void insertBits(const KnownBits &SubBits, unsigned BitPosition) {
    Zero.insertBits(SubBits.Zero, BitPosition);
    One.insertBits(SubBits.One, BitPosition);
  }

  /// Return a subset of the known bits from [bitPosition,bitPosition+numBits).
  KnownBits extractBits(unsigned NumBits, unsigned BitPosition) const {
    return KnownBits(Zero.extractBits(NumBits, BitPosition),
                     One.extractBits(NumBits, BitPosition));
  }

  /// Concatenate the bits from \p Lo onto the bottom of *this.  This is
  /// equivalent to:
  ///   (this->zext(NewWidth) << Lo.getBitWidth()) | Lo.zext(NewWidth)
  KnownBits concat(const KnownBits &Lo) const {
    return KnownBits(Zero.concat(Lo.Zero), One.concat(Lo.One));
  }

  /// Return KnownBits based on this, but updated given that the underlying
  /// value is known to be greater than or equal to Val.
  KnownBits makeGE(const APInt &Val) const;

  /// Returns the minimum number of trailing zero bits.
  unsigned countMinTrailingZeros() const { return Zero.countr_one(); }

  /// Returns the minimum number of trailing one bits.
  unsigned countMinTrailingOnes() const { return One.countr_one(); }

  /// Returns the minimum number of leading zero bits.
  unsigned countMinLeadingZeros() const { return Zero.countl_one(); }

  /// Returns the minimum number of leading one bits.
  unsigned countMinLeadingOnes() const { return One.countl_one(); }

  /// Returns the number of times the sign bit is replicated into the other
  /// bits.
  unsigned countMinSignBits() const {
    if (isNonNegative())
      return countMinLeadingZeros();
    if (isNegative())
      return countMinLeadingOnes();
    // Every value has at least 1 sign bit.
    return 1;
  }

  /// Returns the maximum number of bits needed to represent all possible
  /// signed values with these known bits. This is the inverse of the minimum
  /// number of known sign bits. Examples for bitwidth 5:
  /// 110?? --> 4
  /// 0000? --> 2
  unsigned countMaxSignificantBits() const {
    return getBitWidth() - countMinSignBits() + 1;
  }

  /// Returns the maximum number of trailing zero bits possible.
  unsigned countMaxTrailingZeros() const { return One.countr_zero(); }

  /// Returns the maximum number of trailing one bits possible.
  unsigned countMaxTrailingOnes() const { return Zero.countr_zero(); }

  /// Returns the maximum number of leading zero bits possible.
  unsigned countMaxLeadingZeros() const { return One.countl_zero(); }

  /// Returns the maximum number of leading one bits possible.
  unsigned countMaxLeadingOnes() const { return Zero.countl_zero(); }

  /// Returns the number of bits known to be one.
  unsigned countMinPopulation() const { return One.popcount(); }

  /// Returns the maximum number of bits that could be one.
  unsigned countMaxPopulation() const {
    return getBitWidth() - Zero.popcount();
  }

  /// Returns the maximum number of bits needed to represent all possible
  /// unsigned values with these known bits. This is the inverse of the
  /// minimum number of leading zeros.
  unsigned countMaxActiveBits() const {
    return getBitWidth() - countMinLeadingZeros();
  }

  /// Create known bits from a known constant.
  static KnownBits makeConstant(const APInt &C) {
    return KnownBits(~C, C);
  }

  /// Returns KnownBits information that is known to be true for both this and
  /// RHS.
  ///
  /// When an operation is known to return one of its operands, this can be used
  /// to combine information about the known bits of the operands to get the
  /// information that must be true about the result.
  KnownBits intersectWith(const KnownBits &RHS) const {
    return KnownBits(Zero & RHS.Zero, One & RHS.One);
  }

  /// Returns KnownBits information that is known to be true for either this or
  /// RHS or both.
  ///
  /// This can be used to combine different sources of information about the
  /// known bits of a single value, e.g. information about the low bits and the
  /// high bits of the result of a multiplication.
  KnownBits unionWith(const KnownBits &RHS) const {
    return KnownBits(Zero | RHS.Zero, One | RHS.One);
  }

  /// Compute known bits common to LHS and RHS.
  LLVM_DEPRECATED("use intersectWith instead", "intersectWith")
  static KnownBits commonBits(const KnownBits &LHS, const KnownBits &RHS) {
    return LHS.intersectWith(RHS);
  }

  /// Return true if LHS and RHS have no common bits set.
  static bool haveNoCommonBitsSet(const KnownBits &LHS, const KnownBits &RHS) {
    return (LHS.Zero | RHS.Zero).isAllOnes();
  }

  /// Compute known bits resulting from adding LHS, RHS and a 1-bit Carry.
  static KnownBits computeForAddCarry(
      const KnownBits &LHS, const KnownBits &RHS, const KnownBits &Carry);

  /// Compute known bits resulting from adding LHS and RHS.
  static KnownBits computeForAddSub(bool Add, bool NSW, const KnownBits &LHS,
                                    KnownBits RHS);

  /// Compute knownbits resulting from llvm.sadd.sat(LHS, RHS)
  static KnownBits sadd_sat(const KnownBits &LHS, const KnownBits &RHS);

  /// Compute knownbits resulting from llvm.uadd.sat(LHS, RHS)
  static KnownBits uadd_sat(const KnownBits &LHS, const KnownBits &RHS);

  /// Compute knownbits resulting from llvm.ssub.sat(LHS, RHS)
  static KnownBits ssub_sat(const KnownBits &LHS, const KnownBits &RHS);

  /// Compute knownbits resulting from llvm.usub.sat(LHS, RHS)
  static KnownBits usub_sat(const KnownBits &LHS, const KnownBits &RHS);

  /// Compute known bits resulting from multiplying LHS and RHS.
  static KnownBits mul(const KnownBits &LHS, const KnownBits &RHS,
                       bool NoUndefSelfMultiply = false);

  /// Compute known bits from sign-extended multiply-hi.
  static KnownBits mulhs(const KnownBits &LHS, const KnownBits &RHS);

  /// Compute known bits from zero-extended multiply-hi.
  static KnownBits mulhu(const KnownBits &LHS, const KnownBits &RHS);

  /// Compute known bits for sdiv(LHS, RHS).
  static KnownBits sdiv(const KnownBits &LHS, const KnownBits &RHS,
                        bool Exact = false);

  /// Compute known bits for udiv(LHS, RHS).
  static KnownBits udiv(const KnownBits &LHS, const KnownBits &RHS,
                        bool Exact = false);

  /// Compute known bits for urem(LHS, RHS).
  static KnownBits urem(const KnownBits &LHS, const KnownBits &RHS);

  /// Compute known bits for srem(LHS, RHS).
  static KnownBits srem(const KnownBits &LHS, const KnownBits &RHS);

  /// Compute known bits for umax(LHS, RHS).
  static KnownBits umax(const KnownBits &LHS, const KnownBits &RHS);

  /// Compute known bits for umin(LHS, RHS).
  static KnownBits umin(const KnownBits &LHS, const KnownBits &RHS);

  /// Compute known bits for smax(LHS, RHS).
  static KnownBits smax(const KnownBits &LHS, const KnownBits &RHS);

  /// Compute known bits for smin(LHS, RHS).
  static KnownBits smin(const KnownBits &LHS, const KnownBits &RHS);

  /// Compute known bits for shl(LHS, RHS).
  /// NOTE: RHS (shift amount) bitwidth doesn't need to be the same as LHS.
  static KnownBits shl(const KnownBits &LHS, const KnownBits &RHS,
                       bool NUW = false, bool NSW = false,
                       bool ShAmtNonZero = false);

  /// Compute known bits for lshr(LHS, RHS).
  /// NOTE: RHS (shift amount) bitwidth doesn't need to be the same as LHS.
  static KnownBits lshr(const KnownBits &LHS, const KnownBits &RHS,
                        bool ShAmtNonZero = false);

  /// Compute known bits for ashr(LHS, RHS).
  /// NOTE: RHS (shift amount) bitwidth doesn't need to be the same as LHS.
  static KnownBits ashr(const KnownBits &LHS, const KnownBits &RHS,
                        bool ShAmtNonZero = false);

  /// Determine if these known bits always give the same ICMP_EQ result.
  static std::optional<bool> eq(const KnownBits &LHS, const KnownBits &RHS);

  /// Determine if these known bits always give the same ICMP_NE result.
  static std::optional<bool> ne(const KnownBits &LHS, const KnownBits &RHS);

  /// Determine if these known bits always give the same ICMP_UGT result.
  static std::optional<bool> ugt(const KnownBits &LHS, const KnownBits &RHS);

  /// Determine if these known bits always give the same ICMP_UGE result.
  static std::optional<bool> uge(const KnownBits &LHS, const KnownBits &RHS);

  /// Determine if these known bits always give the same ICMP_ULT result.
  static std::optional<bool> ult(const KnownBits &LHS, const KnownBits &RHS);

  /// Determine if these known bits always give the same ICMP_ULE result.
  static std::optional<bool> ule(const KnownBits &LHS, const KnownBits &RHS);

  /// Determine if these known bits always give the same ICMP_SGT result.
  static std::optional<bool> sgt(const KnownBits &LHS, const KnownBits &RHS);

  /// Determine if these known bits always give the same ICMP_SGE result.
  static std::optional<bool> sge(const KnownBits &LHS, const KnownBits &RHS);

  /// Determine if these known bits always give the same ICMP_SLT result.
  static std::optional<bool> slt(const KnownBits &LHS, const KnownBits &RHS);

  /// Determine if these known bits always give the same ICMP_SLE result.
  static std::optional<bool> sle(const KnownBits &LHS, const KnownBits &RHS);

  /// Update known bits based on ANDing with RHS.
  KnownBits &operator&=(const KnownBits &RHS);

  /// Update known bits based on ORing with RHS.
  KnownBits &operator|=(const KnownBits &RHS);

  /// Update known bits based on XORing with RHS.
  KnownBits &operator^=(const KnownBits &RHS);

  /// Compute known bits for the absolute value.
  KnownBits abs(bool IntMinIsPoison = false) const;

  KnownBits byteSwap() const {
    return KnownBits(Zero.byteSwap(), One.byteSwap());
  }

  KnownBits reverseBits() const {
    return KnownBits(Zero.reverseBits(), One.reverseBits());
  }

  /// Compute known bits for X & -X, which has only the lowest bit set of X set.
  /// The name comes from the X86 BMI instruction
  KnownBits blsi() const;

  /// Compute known bits for X ^ (X - 1), which has all bits up to and including
  /// the lowest set bit of X set. The name comes from the X86 BMI instruction.
  KnownBits blsmsk() const;

  bool operator==(const KnownBits &Other) const {
    return Zero == Other.Zero && One == Other.One;
  }

  bool operator!=(const KnownBits &Other) const { return !(*this == Other); }

  void print(raw_ostream &OS) const;
  void dump() const;

private:
  // Internal helper for getting the initial KnownBits for an `srem` or `urem`
  // operation with the low-bits set.
  static KnownBits remGetLowBits(const KnownBits &LHS, const KnownBits &RHS);
};

inline KnownBits operator&(KnownBits LHS, const KnownBits &RHS) {
  LHS &= RHS;
  return LHS;
}

inline KnownBits operator&(const KnownBits &LHS, KnownBits &&RHS) {
  RHS &= LHS;
  return std::move(RHS);
}

inline KnownBits operator|(KnownBits LHS, const KnownBits &RHS) {
  LHS |= RHS;
  return LHS;
}

inline KnownBits operator|(const KnownBits &LHS, KnownBits &&RHS) {
  RHS |= LHS;
  return std::move(RHS);
}

inline KnownBits operator^(KnownBits LHS, const KnownBits &RHS) {
  LHS ^= RHS;
  return LHS;
}

inline KnownBits operator^(const KnownBits &LHS, KnownBits &&RHS) {
  RHS ^= LHS;
  return std::move(RHS);
}

inline raw_ostream &operator<<(raw_ostream &OS, const KnownBits &Known) {
  Known.print(OS);
  return OS;
}

} // end namespace llvm

#endif
PKhwFZ=`WV#9#9Support/Endian.hnu�[���//===- Endian.h - Utilities for IO with endian specific data ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares generic functions to read and write endian specific data.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_ENDIAN_H
#define LLVM_SUPPORT_ENDIAN_H

#include "llvm/Support/Compiler.h"
#include "llvm/Support/SwapByteOrder.h"
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <type_traits>

namespace llvm {
namespace support {

enum endianness {big, little, native};

// These are named values for common alignments.
enum {aligned = 0, unaligned = 1};

namespace detail {

/// ::value is either alignment, or alignof(T) if alignment is 0.
template<class T, int alignment>
struct PickAlignment {
 enum { value = alignment == 0 ? alignof(T) : alignment };
};

} // end namespace detail

namespace endian {

constexpr endianness system_endianness() {
  return sys::IsBigEndianHost ? big : little;
}

template <typename value_type>
inline value_type byte_swap(value_type value, endianness endian) {
  if ((endian != native) && (endian != system_endianness()))
    sys::swapByteOrder(value);
  return value;
}

/// Swap the bytes of value to match the given endianness.
template<typename value_type, endianness endian>
inline value_type byte_swap(value_type value) {
  return byte_swap(value, endian);
}

/// Read a value of a particular endianness from memory.
template <typename value_type, std::size_t alignment>
inline value_type read(const void *memory, endianness endian) {
  value_type ret;

  memcpy(&ret,
         LLVM_ASSUME_ALIGNED(
             memory, (detail::PickAlignment<value_type, alignment>::value)),
         sizeof(value_type));
  return byte_swap<value_type>(ret, endian);
}

template<typename value_type,
         endianness endian,
         std::size_t alignment>
inline value_type read(const void *memory) {
  return read<value_type, alignment>(memory, endian);
}

/// Read a value of a particular endianness from a buffer, and increment the
/// buffer past that value.
template <typename value_type, std::size_t alignment, typename CharT>
inline value_type readNext(const CharT *&memory, endianness endian) {
  value_type ret = read<value_type, alignment>(memory, endian);
  memory += sizeof(value_type);
  return ret;
}

template<typename value_type, endianness endian, std::size_t alignment,
         typename CharT>
inline value_type readNext(const CharT *&memory) {
  return readNext<value_type, alignment, CharT>(memory, endian);
}

/// Write a value to memory with a particular endianness.
template <typename value_type, std::size_t alignment>
inline void write(void *memory, value_type value, endianness endian) {
  value = byte_swap<value_type>(value, endian);
  memcpy(LLVM_ASSUME_ALIGNED(
             memory, (detail::PickAlignment<value_type, alignment>::value)),
         &value, sizeof(value_type));
}

template<typename value_type,
         endianness endian,
         std::size_t alignment>
inline void write(void *memory, value_type value) {
  write<value_type, alignment>(memory, value, endian);
}

template <typename value_type>
using make_unsigned_t = std::make_unsigned_t<value_type>;

/// Read a value of a particular endianness from memory, for a location
/// that starts at the given bit offset within the first byte.
template <typename value_type, endianness endian, std::size_t alignment>
inline value_type readAtBitAlignment(const void *memory, uint64_t startBit) {
  assert(startBit < 8);
  if (startBit == 0)
    return read<value_type, endian, alignment>(memory);
  else {
    // Read two values and compose the result from them.
    value_type val[2];
    memcpy(&val[0],
           LLVM_ASSUME_ALIGNED(
               memory, (detail::PickAlignment<value_type, alignment>::value)),
           sizeof(value_type) * 2);
    val[0] = byte_swap<value_type, endian>(val[0]);
    val[1] = byte_swap<value_type, endian>(val[1]);

    // Shift bits from the lower value into place.
    make_unsigned_t<value_type> lowerVal = val[0] >> startBit;
    // Mask off upper bits after right shift in case of signed type.
    make_unsigned_t<value_type> numBitsFirstVal =
        (sizeof(value_type) * 8) - startBit;
    lowerVal &= ((make_unsigned_t<value_type>)1 << numBitsFirstVal) - 1;

    // Get the bits from the upper value.
    make_unsigned_t<value_type> upperVal =
        val[1] & (((make_unsigned_t<value_type>)1 << startBit) - 1);
    // Shift them in to place.
    upperVal <<= numBitsFirstVal;

    return lowerVal | upperVal;
  }
}

/// Write a value to memory with a particular endianness, for a location
/// that starts at the given bit offset within the first byte.
template <typename value_type, endianness endian, std::size_t alignment>
inline void writeAtBitAlignment(void *memory, value_type value,
                                uint64_t startBit) {
  assert(startBit < 8);
  if (startBit == 0)
    write<value_type, endian, alignment>(memory, value);
  else {
    // Read two values and shift the result into them.
    value_type val[2];
    memcpy(&val[0],
           LLVM_ASSUME_ALIGNED(
               memory, (detail::PickAlignment<value_type, alignment>::value)),
           sizeof(value_type) * 2);
    val[0] = byte_swap<value_type, endian>(val[0]);
    val[1] = byte_swap<value_type, endian>(val[1]);

    // Mask off any existing bits in the upper part of the lower value that
    // we want to replace.
    val[0] &= ((make_unsigned_t<value_type>)1 << startBit) - 1;
    make_unsigned_t<value_type> numBitsFirstVal =
        (sizeof(value_type) * 8) - startBit;
    make_unsigned_t<value_type> lowerVal = value;
    if (startBit > 0) {
      // Mask off the upper bits in the new value that are not going to go into
      // the lower value. This avoids a left shift of a negative value, which
      // is undefined behavior.
      lowerVal &= (((make_unsigned_t<value_type>)1 << numBitsFirstVal) - 1);
      // Now shift the new bits into place
      lowerVal <<= startBit;
    }
    val[0] |= lowerVal;

    // Mask off any existing bits in the lower part of the upper value that
    // we want to replace.
    val[1] &= ~(((make_unsigned_t<value_type>)1 << startBit) - 1);
    // Next shift the bits that go into the upper value into position.
    make_unsigned_t<value_type> upperVal = value >> numBitsFirstVal;
    // Mask off upper bits after right shift in case of signed type.
    upperVal &= ((make_unsigned_t<value_type>)1 << startBit) - 1;
    val[1] |= upperVal;

    // Finally, rewrite values.
    val[0] = byte_swap<value_type, endian>(val[0]);
    val[1] = byte_swap<value_type, endian>(val[1]);
    memcpy(LLVM_ASSUME_ALIGNED(
               memory, (detail::PickAlignment<value_type, alignment>::value)),
           &val[0], sizeof(value_type) * 2);
  }
}

} // end namespace endian

namespace detail {

template <typename ValueType, endianness Endian, std::size_t Alignment,
          std::size_t ALIGN = PickAlignment<ValueType, Alignment>::value>
struct packed_endian_specific_integral {
  using value_type = ValueType;
  static constexpr endianness endian = Endian;
  static constexpr std::size_t alignment = Alignment;

  packed_endian_specific_integral() = default;

  explicit packed_endian_specific_integral(value_type val) { *this = val; }

  operator value_type() const {
    return endian::read<value_type, endian, alignment>(
      (const void*)Value.buffer);
  }

  void operator=(value_type newValue) {
    endian::write<value_type, endian, alignment>(
      (void*)Value.buffer, newValue);
  }

  packed_endian_specific_integral &operator+=(value_type newValue) {
    *this = *this + newValue;
    return *this;
  }

  packed_endian_specific_integral &operator-=(value_type newValue) {
    *this = *this - newValue;
    return *this;
  }

  packed_endian_specific_integral &operator|=(value_type newValue) {
    *this = *this | newValue;
    return *this;
  }

  packed_endian_specific_integral &operator&=(value_type newValue) {
    *this = *this & newValue;
    return *this;
  }

private:
  struct {
    alignas(ALIGN) char buffer[sizeof(value_type)];
  } Value;

public:
  struct ref {
    explicit ref(void *Ptr) : Ptr(Ptr) {}

    operator value_type() const {
      return endian::read<value_type, endian, alignment>(Ptr);
    }

    void operator=(value_type NewValue) {
      endian::write<value_type, endian, alignment>(Ptr, NewValue);
    }

  private:
    void *Ptr;
  };
};

} // end namespace detail

using ulittle16_t =
    detail::packed_endian_specific_integral<uint16_t, little, unaligned>;
using ulittle32_t =
    detail::packed_endian_specific_integral<uint32_t, little, unaligned>;
using ulittle64_t =
    detail::packed_endian_specific_integral<uint64_t, little, unaligned>;

using little16_t =
    detail::packed_endian_specific_integral<int16_t, little, unaligned>;
using little32_t =
    detail::packed_endian_specific_integral<int32_t, little, unaligned>;
using little64_t =
    detail::packed_endian_specific_integral<int64_t, little, unaligned>;

using aligned_ulittle16_t =
    detail::packed_endian_specific_integral<uint16_t, little, aligned>;
using aligned_ulittle32_t =
    detail::packed_endian_specific_integral<uint32_t, little, aligned>;
using aligned_ulittle64_t =
    detail::packed_endian_specific_integral<uint64_t, little, aligned>;

using aligned_little16_t =
    detail::packed_endian_specific_integral<int16_t, little, aligned>;
using aligned_little32_t =
    detail::packed_endian_specific_integral<int32_t, little, aligned>;
using aligned_little64_t =
    detail::packed_endian_specific_integral<int64_t, little, aligned>;

using ubig16_t =
    detail::packed_endian_specific_integral<uint16_t, big, unaligned>;
using ubig32_t =
    detail::packed_endian_specific_integral<uint32_t, big, unaligned>;
using ubig64_t =
    detail::packed_endian_specific_integral<uint64_t, big, unaligned>;

using big16_t =
    detail::packed_endian_specific_integral<int16_t, big, unaligned>;
using big32_t =
    detail::packed_endian_specific_integral<int32_t, big, unaligned>;
using big64_t =
    detail::packed_endian_specific_integral<int64_t, big, unaligned>;

using aligned_ubig16_t =
    detail::packed_endian_specific_integral<uint16_t, big, aligned>;
using aligned_ubig32_t =
    detail::packed_endian_specific_integral<uint32_t, big, aligned>;
using aligned_ubig64_t =
    detail::packed_endian_specific_integral<uint64_t, big, aligned>;

using aligned_big16_t =
    detail::packed_endian_specific_integral<int16_t, big, aligned>;
using aligned_big32_t =
    detail::packed_endian_specific_integral<int32_t, big, aligned>;
using aligned_big64_t =
    detail::packed_endian_specific_integral<int64_t, big, aligned>;

using unaligned_uint16_t =
    detail::packed_endian_specific_integral<uint16_t, native, unaligned>;
using unaligned_uint32_t =
    detail::packed_endian_specific_integral<uint32_t, native, unaligned>;
using unaligned_uint64_t =
    detail::packed_endian_specific_integral<uint64_t, native, unaligned>;

using unaligned_int16_t =
    detail::packed_endian_specific_integral<int16_t, native, unaligned>;
using unaligned_int32_t =
    detail::packed_endian_specific_integral<int32_t, native, unaligned>;
using unaligned_int64_t =
    detail::packed_endian_specific_integral<int64_t, native, unaligned>;

template <typename T>
using little_t = detail::packed_endian_specific_integral<T, little, unaligned>;
template <typename T>
using big_t = detail::packed_endian_specific_integral<T, big, unaligned>;

template <typename T>
using aligned_little_t =
    detail::packed_endian_specific_integral<T, little, aligned>;
template <typename T>
using aligned_big_t = detail::packed_endian_specific_integral<T, big, aligned>;

namespace endian {

template <typename T> inline T read(const void *P, endianness E) {
  return read<T, unaligned>(P, E);
}

template <typename T, endianness E> inline T read(const void *P) {
  return *(const detail::packed_endian_specific_integral<T, E, unaligned> *)P;
}

inline uint16_t read16(const void *P, endianness E) {
  return read<uint16_t>(P, E);
}
inline uint32_t read32(const void *P, endianness E) {
  return read<uint32_t>(P, E);
}
inline uint64_t read64(const void *P, endianness E) {
  return read<uint64_t>(P, E);
}

template <endianness E> inline uint16_t read16(const void *P) {
  return read<uint16_t, E>(P);
}
template <endianness E> inline uint32_t read32(const void *P) {
  return read<uint32_t, E>(P);
}
template <endianness E> inline uint64_t read64(const void *P) {
  return read<uint64_t, E>(P);
}

inline uint16_t read16le(const void *P) { return read16<little>(P); }
inline uint32_t read32le(const void *P) { return read32<little>(P); }
inline uint64_t read64le(const void *P) { return read64<little>(P); }
inline uint16_t read16be(const void *P) { return read16<big>(P); }
inline uint32_t read32be(const void *P) { return read32<big>(P); }
inline uint64_t read64be(const void *P) { return read64<big>(P); }

template <typename T> inline void write(void *P, T V, endianness E) {
  write<T, unaligned>(P, V, E);
}

template <typename T, endianness E> inline void write(void *P, T V) {
  *(detail::packed_endian_specific_integral<T, E, unaligned> *)P = V;
}

inline void write16(void *P, uint16_t V, endianness E) {
  write<uint16_t>(P, V, E);
}
inline void write32(void *P, uint32_t V, endianness E) {
  write<uint32_t>(P, V, E);
}
inline void write64(void *P, uint64_t V, endianness E) {
  write<uint64_t>(P, V, E);
}

template <endianness E> inline void write16(void *P, uint16_t V) {
  write<uint16_t, E>(P, V);
}
template <endianness E> inline void write32(void *P, uint32_t V) {
  write<uint32_t, E>(P, V);
}
template <endianness E> inline void write64(void *P, uint64_t V) {
  write<uint64_t, E>(P, V);
}

inline void write16le(void *P, uint16_t V) { write16<little>(P, V); }
inline void write32le(void *P, uint32_t V) { write32<little>(P, V); }
inline void write64le(void *P, uint64_t V) { write64<little>(P, V); }
inline void write16be(void *P, uint16_t V) { write16<big>(P, V); }
inline void write32be(void *P, uint32_t V) { write32<big>(P, V); }
inline void write64be(void *P, uint64_t V) { write64<big>(P, V); }

} // end namespace endian

} // end namespace support
} // end namespace llvm

#endif // LLVM_SUPPORT_ENDIAN_H
PKhwFZ��n͘�&Support/ItaniumManglingCanonicalizer.hnu�[���//===--- ItaniumManglingCanonicalizer.h -------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines a class for computing equivalence classes of mangled names
// given a set of equivalences between name fragments.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_ITANIUMMANGLINGCANONICALIZER_H
#define LLVM_SUPPORT_ITANIUMMANGLINGCANONICALIZER_H

#include <cstdint>

namespace llvm {

class StringRef;

/// Canonicalizer for mangled names.
///
/// This class allows specifying a list of "equivalent" manglings. For example,
/// you can specify that Ss is equivalent to
///   NSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocatorIcEEEE
/// and then manglings that refer to libstdc++'s 'std::string' will be
/// considered equivalent to manglings that are the same except that they refer
/// to libc++'s 'std::string'.
///
/// This can be used when data (eg, profiling data) is available for a version
/// of a program built in a different configuration, with correspondingly
/// different manglings.
class ItaniumManglingCanonicalizer {
public:
  ItaniumManglingCanonicalizer();
  ItaniumManglingCanonicalizer(const ItaniumManglingCanonicalizer &) = delete;
  void operator=(const ItaniumManglingCanonicalizer &) = delete;
  ~ItaniumManglingCanonicalizer();

  enum class EquivalenceError {
    Success,

    /// Both the equivalent manglings have already been used as components of
    /// some other mangling we've looked at. It's too late to add this
    /// equivalence.
    ManglingAlreadyUsed,

    /// The first equivalent mangling is invalid.
    InvalidFirstMangling,

    /// The second equivalent mangling is invalid.
    InvalidSecondMangling,
  };

  enum class FragmentKind {
    /// The mangling fragment is a <name> (or a predefined <substitution>).
    Name,
    /// The mangling fragment is a <type>.
    Type,
    /// The mangling fragment is an <encoding>.
    Encoding,
  };

  /// Add an equivalence between \p First and \p Second. Both manglings must
  /// live at least as long as the canonicalizer.
  EquivalenceError addEquivalence(FragmentKind Kind, StringRef First,
                                  StringRef Second);

  using Key = uintptr_t;

  /// Form a canonical key for the specified mangling. They key will be the
  /// same for all equivalent manglings, and different for any two
  /// non-equivalent manglings, but is otherwise unspecified.
  ///
  /// Returns Key() if (and only if) the mangling is not a valid Itanium C++
  /// ABI mangling.
  ///
  /// The string denoted by Mangling must live as long as the canonicalizer.
  Key canonicalize(StringRef Mangling);

  /// Find a canonical key for the specified mangling, if one has already been
  /// formed. Otherwise returns Key().
  Key lookup(StringRef Mangling);

private:
  struct Impl;
  Impl *P;
};
} // namespace llvm

#endif // LLVM_SUPPORT_ITANIUMMANGLINGCANONICALIZER_H
PKhwFZ�|"vzzSupport/ReverseIteration.hnu�[���#ifndef LLVM_SUPPORT_REVERSEITERATION_H
#define LLVM_SUPPORT_REVERSEITERATION_H

#include "llvm/Config/abi-breaking.h"
#include "llvm/Support/PointerLikeTypeTraits.h"

namespace llvm {

template<class T = void *>
bool shouldReverseIterate() {
#if LLVM_ENABLE_REVERSE_ITERATION
  return detail::IsPointerLike<T>::value;
#else
  return false;
#endif
}

} // namespace llvm
#endif
PKhwFZ�T,�
�
Support/Recycler.hnu�[���//==- llvm/Support/Recycler.h - Recycling Allocator --------------*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Recycler class template.  See the doxygen comment for
// Recycler for more details.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_RECYCLER_H
#define LLVM_SUPPORT_RECYCLER_H

#include "llvm/ADT/ilist.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/ErrorHandling.h"
#include <cassert>

namespace llvm {

/// PrintRecyclingAllocatorStats - Helper for RecyclingAllocator for
/// printing statistics.
///
void PrintRecyclerStats(size_t Size, size_t Align, size_t FreeListSize);

/// Recycler - This class manages a linked-list of deallocated nodes
/// and facilitates reusing deallocated memory in place of allocating
/// new memory.
///
template <class T, size_t Size = sizeof(T), size_t Align = alignof(T)>
class Recycler {
  struct FreeNode {
    FreeNode *Next;
  };

  /// List of nodes that have deleted contents and are not in active use.
  FreeNode *FreeList = nullptr;

  FreeNode *pop_val() {
    auto *Val = FreeList;
    __asan_unpoison_memory_region(Val, Size);
    FreeList = FreeList->Next;
    __msan_allocated_memory(Val, Size);
    return Val;
  }

  void push(FreeNode *N) {
    N->Next = FreeList;
    FreeList = N;
    __asan_poison_memory_region(N, Size);
  }

public:
  ~Recycler() {
    // If this fails, either the callee has lost track of some allocation,
    // or the callee isn't tracking allocations and should just call
    // clear() before deleting the Recycler.
    assert(!FreeList && "Non-empty recycler deleted!");
  }

  /// clear - Release all the tracked allocations to the allocator. The
  /// recycler must be free of any tracked allocations before being
  /// deleted; calling clear is one way to ensure this.
  template<class AllocatorType>
  void clear(AllocatorType &Allocator) {
    while (FreeList) {
      T *t = reinterpret_cast<T *>(pop_val());
      Allocator.Deallocate(t);
    }
  }

  /// Special case for BumpPtrAllocator which has an empty Deallocate()
  /// function.
  ///
  /// There is no need to traverse the free list, pulling all the objects into
  /// cache.
  void clear(BumpPtrAllocator &) { FreeList = nullptr; }

  template<class SubClass, class AllocatorType>
  SubClass *Allocate(AllocatorType &Allocator) {
    static_assert(alignof(SubClass) <= Align,
                  "Recycler allocation alignment is less than object align!");
    static_assert(sizeof(SubClass) <= Size,
                  "Recycler allocation size is less than object size!");
    return FreeList ? reinterpret_cast<SubClass *>(pop_val())
                    : static_cast<SubClass *>(Allocator.Allocate(Size, Align));
  }

  template<class AllocatorType>
  T *Allocate(AllocatorType &Allocator) {
    return Allocate<T>(Allocator);
  }

  template<class SubClass, class AllocatorType>
  void Deallocate(AllocatorType & /*Allocator*/, SubClass* Element) {
    push(reinterpret_cast<FreeNode *>(Element));
  }

  void PrintStats();
};

template <class T, size_t Size, size_t Align>
void Recycler<T, Size, Align>::PrintStats() {
  size_t S = 0;
  for (auto *I = FreeList; I; I = I->Next)
    ++S;
  PrintRecyclerStats(Size, Align, S);
}

}

#endif
PKhwFZf���Support/SaveAndRestore.hnu�[���//===-- SaveAndRestore.h - Utility  -------------------------------*- C++ -*-=//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file provides utility classes that use RAII to save and restore
/// values.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_SAVEANDRESTORE_H
#define LLVM_SUPPORT_SAVEANDRESTORE_H

#include <utility>

namespace llvm {

/// A utility class that uses RAII to save and restore the value of a variable.
template <typename T> struct SaveAndRestore {
  SaveAndRestore(T &X) : X(X), OldValue(X) {}
  SaveAndRestore(T &X, const T &NewValue) : X(X), OldValue(X) { X = NewValue; }
  SaveAndRestore(T &X, T &&NewValue) : X(X), OldValue(std::move(X)) {
    X = std::move(NewValue);
  }
  ~SaveAndRestore() { X = std::move(OldValue); }
  const T &get() { return OldValue; }

private:
  T &X;
  T OldValue;
};

// User-defined CTAD guides.
template <typename T> SaveAndRestore(T &) -> SaveAndRestore<T>;
template <typename T> SaveAndRestore(T &, const T &) -> SaveAndRestore<T>;
template <typename T> SaveAndRestore(T &, T &&) -> SaveAndRestore<T>;

} // namespace llvm

#endif
PKhwFZ��t)Support/SystemUtils.hnu�[���//===- SystemUtils.h - Utilities to do low-level system stuff ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains functions used to do a variety of low-level, often
// system-specific, tasks.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_SYSTEMUTILS_H
#define LLVM_SUPPORT_SYSTEMUTILS_H

namespace llvm {
class raw_ostream;

/// Determine if the raw_ostream provided is connected to a terminal. If so,
/// generate a warning message to errs() advising against display of bitcode
/// and return true. Otherwise just return false.
/// Check for output written to a console
bool CheckBitcodeOutputToConsole(
    raw_ostream &stream_to_check ///< The stream to be checked
);

} // namespace llvm

#endif
PKhwFZ�f���Support/BinaryStreamWriter.hnu�[���//===- BinaryStreamWriter.h - Writes objects to a BinaryStream ---*- C++-*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_BINARYSTREAMWRITER_H
#define LLVM_SUPPORT_BINARYSTREAMWRITER_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/BinaryStreamArray.h"
#include "llvm/Support/BinaryStreamError.h"
#include "llvm/Support/BinaryStreamRef.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Error.h"
#include <cstdint>
#include <type_traits>
#include <utility>

namespace llvm {

/// Provides write only access to a subclass of `WritableBinaryStream`.
/// Provides bounds checking and helpers for writing certain common data types
/// such as null-terminated strings, integers in various flavors of endianness,
/// etc.  Can be subclassed to provide reading and writing of custom datatypes,
/// although no methods are overridable.
class BinaryStreamWriter {
public:
  BinaryStreamWriter() = default;
  explicit BinaryStreamWriter(WritableBinaryStreamRef Ref);
  explicit BinaryStreamWriter(WritableBinaryStream &Stream);
  explicit BinaryStreamWriter(MutableArrayRef<uint8_t> Data,
                              llvm::support::endianness Endian);

  BinaryStreamWriter(const BinaryStreamWriter &Other) = default;

  BinaryStreamWriter &operator=(const BinaryStreamWriter &Other) = default;

  virtual ~BinaryStreamWriter() = default;

  /// Write the bytes specified in \p Buffer to the underlying stream.
  /// On success, updates the offset so that subsequent writes will occur
  /// at the next unwritten position.
  ///
  /// \returns a success error code if the data was successfully written,
  /// otherwise returns an appropriate error code.
  Error writeBytes(ArrayRef<uint8_t> Buffer);

  /// Write the integer \p Value to the underlying stream in the
  /// specified endianness.  On success, updates the offset so that
  /// subsequent writes occur at the next unwritten position.
  ///
  /// \returns a success error code if the data was successfully written,
  /// otherwise returns an appropriate error code.
  template <typename T> Error writeInteger(T Value) {
    static_assert(std::is_integral_v<T>,
                  "Cannot call writeInteger with non-integral value!");
    uint8_t Buffer[sizeof(T)];
    llvm::support::endian::write<T, llvm::support::unaligned>(
        Buffer, Value, Stream.getEndian());
    return writeBytes(Buffer);
  }

  /// Similar to writeInteger
  template <typename T> Error writeEnum(T Num) {
    static_assert(std::is_enum<T>::value,
                  "Cannot call writeEnum with non-Enum type");

    using U = std::underlying_type_t<T>;
    return writeInteger<U>(static_cast<U>(Num));
  }

  /// Write the unsigned integer Value to the underlying stream using ULEB128
  /// encoding.
  ///
  /// \returns a success error code if the data was successfully written,
  /// otherwise returns an appropriate error code.
  Error writeULEB128(uint64_t Value);

  /// Write the unsigned integer Value to the underlying stream using ULEB128
  /// encoding.
  ///
  /// \returns a success error code if the data was successfully written,
  /// otherwise returns an appropriate error code.
  Error writeSLEB128(int64_t Value);

  /// Write the string \p Str to the underlying stream followed by a null
  /// terminator.  On success, updates the offset so that subsequent writes
  /// occur at the next unwritten position.  \p Str need not be null terminated
  /// on input.
  ///
  /// \returns a success error code if the data was successfully written,
  /// otherwise returns an appropriate error code.
  Error writeCString(StringRef Str);

  /// Write the string \p Str to the underlying stream without a null
  /// terminator.  On success, updates the offset so that subsequent writes
  /// occur at the next unwritten position.
  ///
  /// \returns a success error code if the data was successfully written,
  /// otherwise returns an appropriate error code.
  Error writeFixedString(StringRef Str);

  /// Efficiently reads all data from \p Ref, and writes it to this stream.
  /// This operation will not invoke any copies of the source data, regardless
  /// of the source stream's implementation.
  ///
  /// \returns a success error code if the data was successfully written,
  /// otherwise returns an appropriate error code.
  Error writeStreamRef(BinaryStreamRef Ref);

  /// Efficiently reads \p Size bytes from \p Ref, and writes it to this stream.
  /// This operation will not invoke any copies of the source data, regardless
  /// of the source stream's implementation.
  ///
  /// \returns a success error code if the data was successfully written,
  /// otherwise returns an appropriate error code.
  Error writeStreamRef(BinaryStreamRef Ref, uint64_t Size);

  /// Writes the object \p Obj to the underlying stream, as if by using memcpy.
  /// It is up to the caller to ensure that type of \p Obj can be safely copied
  /// in this fashion, as no checks are made to ensure that this is safe.
  ///
  /// \returns a success error code if the data was successfully written,
  /// otherwise returns an appropriate error code.
  template <typename T> Error writeObject(const T &Obj) {
    static_assert(!std::is_pointer<T>::value,
                  "writeObject should not be used with pointers, to write "
                  "the pointed-to value dereference the pointer before calling "
                  "writeObject");
    return writeBytes(
        ArrayRef<uint8_t>(reinterpret_cast<const uint8_t *>(&Obj), sizeof(T)));
  }

  /// Writes an array of objects of type T to the underlying stream, as if by
  /// using memcpy.  It is up to the caller to ensure that type of \p Obj can
  /// be safely copied in this fashion, as no checks are made to ensure that
  /// this is safe.
  ///
  /// \returns a success error code if the data was successfully written,
  /// otherwise returns an appropriate error code.
  template <typename T> Error writeArray(ArrayRef<T> Array) {
    if (Array.empty())
      return Error::success();
    if (Array.size() > UINT32_MAX / sizeof(T))
      return make_error<BinaryStreamError>(
          stream_error_code::invalid_array_size);

    return writeBytes(
        ArrayRef<uint8_t>(reinterpret_cast<const uint8_t *>(Array.data()),
                          Array.size() * sizeof(T)));
  }

  /// Writes all data from the array \p Array to the underlying stream.
  ///
  /// \returns a success error code if the data was successfully written,
  /// otherwise returns an appropriate error code.
  template <typename T, typename U>
  Error writeArray(VarStreamArray<T, U> Array) {
    return writeStreamRef(Array.getUnderlyingStream());
  }

  /// Writes all elements from the array \p Array to the underlying stream.
  ///
  /// \returns a success error code if the data was successfully written,
  /// otherwise returns an appropriate error code.
  template <typename T> Error writeArray(FixedStreamArray<T> Array) {
    return writeStreamRef(Array.getUnderlyingStream());
  }

  /// Splits the Writer into two Writers at a given offset.
  std::pair<BinaryStreamWriter, BinaryStreamWriter> split(uint64_t Off) const;

  void setOffset(uint64_t Off) { Offset = Off; }
  uint64_t getOffset() const { return Offset; }
  uint64_t getLength() const { return Stream.getLength(); }
  uint64_t bytesRemaining() const { return getLength() - getOffset(); }
  Error padToAlignment(uint32_t Align);

protected:
  WritableBinaryStreamRef Stream;
  uint64_t Offset = 0;
};

} // end namespace llvm

#endif // LLVM_SUPPORT_BINARYSTREAMWRITER_H
PKhwFZQ�"�lvlv&Support/X86DisassemblerDecoderCommon.hnu�[���//===-- X86DisassemblerDecoderCommon.h - Disassembler decoder ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is part of the X86 Disassembler.
// It contains common definitions used by both the disassembler and the table
//  generator.
// Documentation for the disassembler can be found in X86Disassembler.h.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_X86DISASSEMBLERDECODERCOMMON_H
#define LLVM_SUPPORT_X86DISASSEMBLERDECODERCOMMON_H

#include "llvm/Support/DataTypes.h"

namespace llvm {
namespace X86Disassembler {

#define INSTRUCTIONS_SYM  x86DisassemblerInstrSpecifiers
#define CONTEXTS_SYM      x86DisassemblerContexts
#define ONEBYTE_SYM       x86DisassemblerOneByteOpcodes
#define TWOBYTE_SYM       x86DisassemblerTwoByteOpcodes
#define THREEBYTE38_SYM   x86DisassemblerThreeByte38Opcodes
#define THREEBYTE3A_SYM   x86DisassemblerThreeByte3AOpcodes
#define XOP8_MAP_SYM      x86DisassemblerXOP8Opcodes
#define XOP9_MAP_SYM      x86DisassemblerXOP9Opcodes
#define XOPA_MAP_SYM      x86DisassemblerXOPAOpcodes
#define THREEDNOW_MAP_SYM x86Disassembler3DNowOpcodes
#define MAP5_SYM          x86DisassemblerMap5Opcodes
#define MAP6_SYM          x86DisassemblerMap6Opcodes

#define INSTRUCTIONS_STR  "x86DisassemblerInstrSpecifiers"
#define CONTEXTS_STR      "x86DisassemblerContexts"
#define ONEBYTE_STR       "x86DisassemblerOneByteOpcodes"
#define TWOBYTE_STR       "x86DisassemblerTwoByteOpcodes"
#define THREEBYTE38_STR   "x86DisassemblerThreeByte38Opcodes"
#define THREEBYTE3A_STR   "x86DisassemblerThreeByte3AOpcodes"
#define XOP8_MAP_STR      "x86DisassemblerXOP8Opcodes"
#define XOP9_MAP_STR      "x86DisassemblerXOP9Opcodes"
#define XOPA_MAP_STR      "x86DisassemblerXOPAOpcodes"
#define THREEDNOW_MAP_STR "x86Disassembler3DNowOpcodes"
#define MAP5_STR          "x86DisassemblerMap5Opcodes"
#define MAP6_STR          "x86DisassemblerMap6Opcodes"

// Attributes of an instruction that must be known before the opcode can be
// processed correctly.  Most of these indicate the presence of particular
// prefixes, but ATTR_64BIT is simply an attribute of the decoding context.
enum attributeBits {
  ATTR_NONE   = 0x00,
  ATTR_64BIT  = 0x1 << 0,
  ATTR_XS     = 0x1 << 1,
  ATTR_XD     = 0x1 << 2,
  ATTR_REXW   = 0x1 << 3,
  ATTR_OPSIZE = 0x1 << 4,
  ATTR_ADSIZE = 0x1 << 5,
  ATTR_VEX    = 0x1 << 6,
  ATTR_VEXL   = 0x1 << 7,
  ATTR_EVEX   = 0x1 << 8,
  ATTR_EVEXL2 = 0x1 << 9,
  ATTR_EVEXK  = 0x1 << 10,
  ATTR_EVEXKZ = 0x1 << 11,
  ATTR_EVEXB  = 0x1 << 12,
  ATTR_max    = 0x1 << 13,
};

// Combinations of the above attributes that are relevant to instruction
// decode. Although other combinations are possible, they can be reduced to
// these without affecting the ultimately decoded instruction.

//           Class name           Rank  Rationale for rank assignment
#define INSTRUCTION_CONTEXTS                                                   \
  ENUM_ENTRY(IC,                    0,  "says nothing about the instruction")  \
  ENUM_ENTRY(IC_64BIT,              1,  "says the instruction applies in "     \
                                        "64-bit mode but no more")             \
  ENUM_ENTRY(IC_OPSIZE,             3,  "requires an OPSIZE prefix, so "       \
                                        "operands change width")               \
  ENUM_ENTRY(IC_ADSIZE,             3,  "requires an ADSIZE prefix, so "       \
                                        "operands change width")               \
  ENUM_ENTRY(IC_OPSIZE_ADSIZE,      4,  "requires ADSIZE and OPSIZE prefixes") \
  ENUM_ENTRY(IC_XD,                 2,  "may say something about the opcode "  \
                                        "but not the operands")                \
  ENUM_ENTRY(IC_XS,                 2,  "may say something about the opcode "  \
                                        "but not the operands")                \
  ENUM_ENTRY(IC_XD_OPSIZE,          3,  "requires an OPSIZE prefix, so "       \
                                        "operands change width")               \
  ENUM_ENTRY(IC_XS_OPSIZE,          3,  "requires an OPSIZE prefix, so "       \
                                        "operands change width")               \
  ENUM_ENTRY(IC_XD_ADSIZE,          3,  "requires an ADSIZE prefix, so "       \
                                        "operands change width")               \
  ENUM_ENTRY(IC_XS_ADSIZE,          3,  "requires an ADSIZE prefix, so "       \
                                        "operands change width")               \
  ENUM_ENTRY(IC_64BIT_REXW,         5,  "requires a REX.W prefix, so operands "\
                                        "change width; overrides IC_OPSIZE")   \
  ENUM_ENTRY(IC_64BIT_REXW_ADSIZE,  6,  "requires a REX.W prefix and 0x67 "    \
                                        "prefix")                              \
  ENUM_ENTRY(IC_64BIT_OPSIZE,       3,  "Just as meaningful as IC_OPSIZE")     \
  ENUM_ENTRY(IC_64BIT_ADSIZE,       3,  "Just as meaningful as IC_ADSIZE")     \
  ENUM_ENTRY(IC_64BIT_OPSIZE_ADSIZE, 4, "Just as meaningful as IC_OPSIZE/"     \
                                        "IC_ADSIZE")                           \
  ENUM_ENTRY(IC_64BIT_XD,           6,  "XD instructions are SSE; REX.W is "   \
                                        "secondary")                           \
  ENUM_ENTRY(IC_64BIT_XS,           6,  "Just as meaningful as IC_64BIT_XD")   \
  ENUM_ENTRY(IC_64BIT_XD_OPSIZE,    3,  "Just as meaningful as IC_XD_OPSIZE")  \
  ENUM_ENTRY(IC_64BIT_XS_OPSIZE,    3,  "Just as meaningful as IC_XS_OPSIZE")  \
  ENUM_ENTRY(IC_64BIT_XD_ADSIZE,    3,  "Just as meaningful as IC_XD_ADSIZE")  \
  ENUM_ENTRY(IC_64BIT_XS_ADSIZE,    3,  "Just as meaningful as IC_XS_ADSIZE")  \
  ENUM_ENTRY(IC_64BIT_REXW_XS,      7,  "OPSIZE could mean a different "       \
                                        "opcode")                              \
  ENUM_ENTRY(IC_64BIT_REXW_XD,      7,  "Just as meaningful as "               \
                                        "IC_64BIT_REXW_XS")                    \
  ENUM_ENTRY(IC_64BIT_REXW_OPSIZE,  8,  "The Dynamic Duo!  Prefer over all "   \
                                        "else because this changes most "      \
                                        "operands' meaning")                   \
  ENUM_ENTRY(IC_VEX,                1,  "requires a VEX prefix")               \
  ENUM_ENTRY(IC_VEX_XS,             2,  "requires VEX and the XS prefix")      \
  ENUM_ENTRY(IC_VEX_XD,             2,  "requires VEX and the XD prefix")      \
  ENUM_ENTRY(IC_VEX_OPSIZE,         2,  "requires VEX and the OpSize prefix")  \
  ENUM_ENTRY(IC_VEX_W,              3,  "requires VEX and the W prefix")       \
  ENUM_ENTRY(IC_VEX_W_XS,           4,  "requires VEX, W, and XS prefix")      \
  ENUM_ENTRY(IC_VEX_W_XD,           4,  "requires VEX, W, and XD prefix")      \
  ENUM_ENTRY(IC_VEX_W_OPSIZE,       4,  "requires VEX, W, and OpSize")         \
  ENUM_ENTRY(IC_VEX_L,              3,  "requires VEX and the L prefix")       \
  ENUM_ENTRY(IC_VEX_L_XS,           4,  "requires VEX and the L and XS prefix")\
  ENUM_ENTRY(IC_VEX_L_XD,           4,  "requires VEX and the L and XD prefix")\
  ENUM_ENTRY(IC_VEX_L_OPSIZE,       4,  "requires VEX, L, and OpSize")         \
  ENUM_ENTRY(IC_VEX_L_W,            4,  "requires VEX, L and W")               \
  ENUM_ENTRY(IC_VEX_L_W_XS,         5,  "requires VEX, L, W and XS prefix")    \
  ENUM_ENTRY(IC_VEX_L_W_XD,         5,  "requires VEX, L, W and XD prefix")    \
  ENUM_ENTRY(IC_VEX_L_W_OPSIZE,     5,  "requires VEX, L, W and OpSize")       \
  ENUM_ENTRY(IC_EVEX,               1,  "requires an EVEX prefix")             \
  ENUM_ENTRY(IC_EVEX_XS,            2,  "requires EVEX and the XS prefix")     \
  ENUM_ENTRY(IC_EVEX_XD,            2,  "requires EVEX and the XD prefix")     \
  ENUM_ENTRY(IC_EVEX_OPSIZE,        2,  "requires EVEX and the OpSize prefix") \
  ENUM_ENTRY(IC_EVEX_W,             3,  "requires EVEX and the W prefix")      \
  ENUM_ENTRY(IC_EVEX_W_XS,          4,  "requires EVEX, W, and XS prefix")     \
  ENUM_ENTRY(IC_EVEX_W_XD,          4,  "requires EVEX, W, and XD prefix")     \
  ENUM_ENTRY(IC_EVEX_W_OPSIZE,      4,  "requires EVEX, W, and OpSize")        \
  ENUM_ENTRY(IC_EVEX_L,             3,  "requires EVEX and the L prefix")       \
  ENUM_ENTRY(IC_EVEX_L_XS,          4,  "requires EVEX and the L and XS prefix")\
  ENUM_ENTRY(IC_EVEX_L_XD,          4,  "requires EVEX and the L and XD prefix")\
  ENUM_ENTRY(IC_EVEX_L_OPSIZE,      4,  "requires EVEX, L, and OpSize")         \
  ENUM_ENTRY(IC_EVEX_L_W,           3,  "requires EVEX, L and W")               \
  ENUM_ENTRY(IC_EVEX_L_W_XS,        4,  "requires EVEX, L, W and XS prefix")    \
  ENUM_ENTRY(IC_EVEX_L_W_XD,        4,  "requires EVEX, L, W and XD prefix")    \
  ENUM_ENTRY(IC_EVEX_L_W_OPSIZE,    4,  "requires EVEX, L, W and OpSize")       \
  ENUM_ENTRY(IC_EVEX_L2,            3,  "requires EVEX and the L2 prefix")       \
  ENUM_ENTRY(IC_EVEX_L2_XS,         4,  "requires EVEX and the L2 and XS prefix")\
  ENUM_ENTRY(IC_EVEX_L2_XD,         4,  "requires EVEX and the L2 and XD prefix")\
  ENUM_ENTRY(IC_EVEX_L2_OPSIZE,     4,  "requires EVEX, L2, and OpSize")         \
  ENUM_ENTRY(IC_EVEX_L2_W,          3,  "requires EVEX, L2 and W")               \
  ENUM_ENTRY(IC_EVEX_L2_W_XS,       4,  "requires EVEX, L2, W and XS prefix")    \
  ENUM_ENTRY(IC_EVEX_L2_W_XD,       4,  "requires EVEX, L2, W and XD prefix")    \
  ENUM_ENTRY(IC_EVEX_L2_W_OPSIZE,   4,  "requires EVEX, L2, W and OpSize")       \
  ENUM_ENTRY(IC_EVEX_K,             1,  "requires an EVEX_K prefix")             \
  ENUM_ENTRY(IC_EVEX_XS_K,          2,  "requires EVEX_K and the XS prefix")     \
  ENUM_ENTRY(IC_EVEX_XD_K,          2,  "requires EVEX_K and the XD prefix")     \
  ENUM_ENTRY(IC_EVEX_OPSIZE_K,      2,  "requires EVEX_K and the OpSize prefix") \
  ENUM_ENTRY(IC_EVEX_W_K,           3,  "requires EVEX_K and the W prefix")      \
  ENUM_ENTRY(IC_EVEX_W_XS_K,        4,  "requires EVEX_K, W, and XS prefix")     \
  ENUM_ENTRY(IC_EVEX_W_XD_K,        4,  "requires EVEX_K, W, and XD prefix")     \
  ENUM_ENTRY(IC_EVEX_W_OPSIZE_K,    4,  "requires EVEX_K, W, and OpSize")        \
  ENUM_ENTRY(IC_EVEX_L_K,           3,  "requires EVEX_K and the L prefix")       \
  ENUM_ENTRY(IC_EVEX_L_XS_K,        4,  "requires EVEX_K and the L and XS prefix")\
  ENUM_ENTRY(IC_EVEX_L_XD_K,        4,  "requires EVEX_K and the L and XD prefix")\
  ENUM_ENTRY(IC_EVEX_L_OPSIZE_K,    4,  "requires EVEX_K, L, and OpSize")         \
  ENUM_ENTRY(IC_EVEX_L_W_K,         3,  "requires EVEX_K, L and W")               \
  ENUM_ENTRY(IC_EVEX_L_W_XS_K,      4,  "requires EVEX_K, L, W and XS prefix")    \
  ENUM_ENTRY(IC_EVEX_L_W_XD_K,      4,  "requires EVEX_K, L, W and XD prefix")    \
  ENUM_ENTRY(IC_EVEX_L_W_OPSIZE_K,  4,  "requires EVEX_K, L, W and OpSize")       \
  ENUM_ENTRY(IC_EVEX_L2_K,          3,  "requires EVEX_K and the L2 prefix")       \
  ENUM_ENTRY(IC_EVEX_L2_XS_K,       4,  "requires EVEX_K and the L2 and XS prefix")\
  ENUM_ENTRY(IC_EVEX_L2_XD_K,       4,  "requires EVEX_K and the L2 and XD prefix")\
  ENUM_ENTRY(IC_EVEX_L2_OPSIZE_K,   4,  "requires EVEX_K, L2, and OpSize")         \
  ENUM_ENTRY(IC_EVEX_L2_W_K,        3,  "requires EVEX_K, L2 and W")               \
  ENUM_ENTRY(IC_EVEX_L2_W_XS_K,     4,  "requires EVEX_K, L2, W and XS prefix")    \
  ENUM_ENTRY(IC_EVEX_L2_W_XD_K,     4,  "requires EVEX_K, L2, W and XD prefix")    \
  ENUM_ENTRY(IC_EVEX_L2_W_OPSIZE_K, 4,  "requires EVEX_K, L2, W and OpSize")     \
  ENUM_ENTRY(IC_EVEX_B,             1,  "requires an EVEX_B prefix")             \
  ENUM_ENTRY(IC_EVEX_XS_B,          2,  "requires EVEX_B and the XS prefix")     \
  ENUM_ENTRY(IC_EVEX_XD_B,          2,  "requires EVEX_B and the XD prefix")     \
  ENUM_ENTRY(IC_EVEX_OPSIZE_B,      2,  "requires EVEX_B and the OpSize prefix") \
  ENUM_ENTRY(IC_EVEX_W_B,           3,  "requires EVEX_B and the W prefix")      \
  ENUM_ENTRY(IC_EVEX_W_XS_B,        4,  "requires EVEX_B, W, and XS prefix")     \
  ENUM_ENTRY(IC_EVEX_W_XD_B,        4,  "requires EVEX_B, W, and XD prefix")     \
  ENUM_ENTRY(IC_EVEX_W_OPSIZE_B,    4,  "requires EVEX_B, W, and OpSize")        \
  ENUM_ENTRY(IC_EVEX_L_B,           3,  "requires EVEX_B and the L prefix")       \
  ENUM_ENTRY(IC_EVEX_L_XS_B,        4,  "requires EVEX_B and the L and XS prefix")\
  ENUM_ENTRY(IC_EVEX_L_XD_B,        4,  "requires EVEX_B and the L and XD prefix")\
  ENUM_ENTRY(IC_EVEX_L_OPSIZE_B,    4,  "requires EVEX_B, L, and OpSize")         \
  ENUM_ENTRY(IC_EVEX_L_W_B,         3,  "requires EVEX_B, L and W")               \
  ENUM_ENTRY(IC_EVEX_L_W_XS_B,      4,  "requires EVEX_B, L, W and XS prefix")    \
  ENUM_ENTRY(IC_EVEX_L_W_XD_B,      4,  "requires EVEX_B, L, W and XD prefix")    \
  ENUM_ENTRY(IC_EVEX_L_W_OPSIZE_B,  4,  "requires EVEX_B, L, W and OpSize")       \
  ENUM_ENTRY(IC_EVEX_L2_B,          3,  "requires EVEX_B and the L2 prefix")       \
  ENUM_ENTRY(IC_EVEX_L2_XS_B,       4,  "requires EVEX_B and the L2 and XS prefix")\
  ENUM_ENTRY(IC_EVEX_L2_XD_B,       4,  "requires EVEX_B and the L2 and XD prefix")\
  ENUM_ENTRY(IC_EVEX_L2_OPSIZE_B,   4,  "requires EVEX_B, L2, and OpSize")         \
  ENUM_ENTRY(IC_EVEX_L2_W_B,        3,  "requires EVEX_B, L2 and W")               \
  ENUM_ENTRY(IC_EVEX_L2_W_XS_B,     4,  "requires EVEX_B, L2, W and XS prefix")    \
  ENUM_ENTRY(IC_EVEX_L2_W_XD_B,     4,  "requires EVEX_B, L2, W and XD prefix")    \
  ENUM_ENTRY(IC_EVEX_L2_W_OPSIZE_B, 4,  "requires EVEX_B, L2, W and OpSize")       \
  ENUM_ENTRY(IC_EVEX_K_B,           1,  "requires EVEX_B and EVEX_K prefix")             \
  ENUM_ENTRY(IC_EVEX_XS_K_B,        2,  "requires EVEX_B, EVEX_K and the XS prefix")     \
  ENUM_ENTRY(IC_EVEX_XD_K_B,        2,  "requires EVEX_B, EVEX_K and the XD prefix")     \
  ENUM_ENTRY(IC_EVEX_OPSIZE_K_B,    2,  "requires EVEX_B, EVEX_K and the OpSize prefix") \
  ENUM_ENTRY(IC_EVEX_W_K_B,         3,  "requires EVEX_B, EVEX_K and the W prefix")      \
  ENUM_ENTRY(IC_EVEX_W_XS_K_B,      4,  "requires EVEX_B, EVEX_K, W, and XS prefix")     \
  ENUM_ENTRY(IC_EVEX_W_XD_K_B,      4,  "requires EVEX_B, EVEX_K, W, and XD prefix")     \
  ENUM_ENTRY(IC_EVEX_W_OPSIZE_K_B,  4,  "requires EVEX_B, EVEX_K, W, and OpSize")        \
  ENUM_ENTRY(IC_EVEX_L_K_B,         3,  "requires EVEX_B, EVEX_K and the L prefix")       \
  ENUM_ENTRY(IC_EVEX_L_XS_K_B,      4,  "requires EVEX_B, EVEX_K and the L and XS prefix")\
  ENUM_ENTRY(IC_EVEX_L_XD_K_B,      4,  "requires EVEX_B, EVEX_K and the L and XD prefix")\
  ENUM_ENTRY(IC_EVEX_L_OPSIZE_K_B,  4,  "requires EVEX_B, EVEX_K, L, and OpSize")         \
  ENUM_ENTRY(IC_EVEX_L_W_K_B,       3,  "requires EVEX_B, EVEX_K, L and W")               \
  ENUM_ENTRY(IC_EVEX_L_W_XS_K_B,    4,  "requires EVEX_B, EVEX_K, L, W and XS prefix")    \
  ENUM_ENTRY(IC_EVEX_L_W_XD_K_B,    4,  "requires EVEX_B, EVEX_K, L, W and XD prefix")    \
  ENUM_ENTRY(IC_EVEX_L_W_OPSIZE_K_B,4,  "requires EVEX_B, EVEX_K, L, W and OpSize")       \
  ENUM_ENTRY(IC_EVEX_L2_K_B,        3,  "requires EVEX_B, EVEX_K and the L2 prefix")       \
  ENUM_ENTRY(IC_EVEX_L2_XS_K_B,     4,  "requires EVEX_B, EVEX_K and the L2 and XS prefix")\
  ENUM_ENTRY(IC_EVEX_L2_XD_K_B,     4,  "requires EVEX_B, EVEX_K and the L2 and XD prefix")\
  ENUM_ENTRY(IC_EVEX_L2_OPSIZE_K_B, 4,  "requires EVEX_B, EVEX_K, L2, and OpSize")         \
  ENUM_ENTRY(IC_EVEX_L2_W_K_B,      3,  "requires EVEX_B, EVEX_K, L2 and W")               \
  ENUM_ENTRY(IC_EVEX_L2_W_XS_K_B,   4,  "requires EVEX_B, EVEX_K, L2, W and XS prefix")    \
  ENUM_ENTRY(IC_EVEX_L2_W_XD_K_B,   4,  "requires EVEX_B, EVEX_K, L2, W and XD prefix")    \
  ENUM_ENTRY(IC_EVEX_L2_W_OPSIZE_K_B,4,  "requires EVEX_B, EVEX_K, L2, W and OpSize")       \
  ENUM_ENTRY(IC_EVEX_KZ_B,           1,  "requires EVEX_B and EVEX_KZ prefix")             \
  ENUM_ENTRY(IC_EVEX_XS_KZ_B,        2,  "requires EVEX_B, EVEX_KZ and the XS prefix")     \
  ENUM_ENTRY(IC_EVEX_XD_KZ_B,        2,  "requires EVEX_B, EVEX_KZ and the XD prefix")     \
  ENUM_ENTRY(IC_EVEX_OPSIZE_KZ_B,    2,  "requires EVEX_B, EVEX_KZ and the OpSize prefix") \
  ENUM_ENTRY(IC_EVEX_W_KZ_B,         3,  "requires EVEX_B, EVEX_KZ and the W prefix")      \
  ENUM_ENTRY(IC_EVEX_W_XS_KZ_B,      4,  "requires EVEX_B, EVEX_KZ, W, and XS prefix")     \
  ENUM_ENTRY(IC_EVEX_W_XD_KZ_B,      4,  "requires EVEX_B, EVEX_KZ, W, and XD prefix")     \
  ENUM_ENTRY(IC_EVEX_W_OPSIZE_KZ_B,  4,  "requires EVEX_B, EVEX_KZ, W, and OpSize")        \
  ENUM_ENTRY(IC_EVEX_L_KZ_B,           3,  "requires EVEX_B, EVEX_KZ and the L prefix")       \
  ENUM_ENTRY(IC_EVEX_L_XS_KZ_B,        4,  "requires EVEX_B, EVEX_KZ and the L and XS prefix")\
  ENUM_ENTRY(IC_EVEX_L_XD_KZ_B,        4,  "requires EVEX_B, EVEX_KZ and the L and XD prefix")\
  ENUM_ENTRY(IC_EVEX_L_OPSIZE_KZ_B,    4,  "requires EVEX_B, EVEX_KZ, L, and OpSize")         \
  ENUM_ENTRY(IC_EVEX_L_W_KZ_B,         3,  "requires EVEX_B, EVEX_KZ, L and W")               \
  ENUM_ENTRY(IC_EVEX_L_W_XS_KZ_B,      4,  "requires EVEX_B, EVEX_KZ, L, W and XS prefix")    \
  ENUM_ENTRY(IC_EVEX_L_W_XD_KZ_B,      4,  "requires EVEX_B, EVEX_KZ, L, W and XD prefix")    \
  ENUM_ENTRY(IC_EVEX_L_W_OPSIZE_KZ_B,  4,  "requires EVEX_B, EVEX_KZ, L, W and OpSize")       \
  ENUM_ENTRY(IC_EVEX_L2_KZ_B,          3,  "requires EVEX_B, EVEX_KZ and the L2 prefix")       \
  ENUM_ENTRY(IC_EVEX_L2_XS_KZ_B,       4,  "requires EVEX_B, EVEX_KZ and the L2 and XS prefix")\
  ENUM_ENTRY(IC_EVEX_L2_XD_KZ_B,       4,  "requires EVEX_B, EVEX_KZ and the L2 and XD prefix")\
  ENUM_ENTRY(IC_EVEX_L2_OPSIZE_KZ_B,   4,  "requires EVEX_B, EVEX_KZ, L2, and OpSize")         \
  ENUM_ENTRY(IC_EVEX_L2_W_KZ_B,        3,  "requires EVEX_B, EVEX_KZ, L2 and W")               \
  ENUM_ENTRY(IC_EVEX_L2_W_XS_KZ_B,     4,  "requires EVEX_B, EVEX_KZ, L2, W and XS prefix")    \
  ENUM_ENTRY(IC_EVEX_L2_W_XD_KZ_B,     4,  "requires EVEX_B, EVEX_KZ, L2, W and XD prefix")    \
  ENUM_ENTRY(IC_EVEX_L2_W_OPSIZE_KZ_B, 4,  "requires EVEX_B, EVEX_KZ, L2, W and OpSize")       \
  ENUM_ENTRY(IC_EVEX_KZ,             1,  "requires an EVEX_KZ prefix")             \
  ENUM_ENTRY(IC_EVEX_XS_KZ,          2,  "requires EVEX_KZ and the XS prefix")     \
  ENUM_ENTRY(IC_EVEX_XD_KZ,          2,  "requires EVEX_KZ and the XD prefix")     \
  ENUM_ENTRY(IC_EVEX_OPSIZE_KZ,      2,  "requires EVEX_KZ and the OpSize prefix") \
  ENUM_ENTRY(IC_EVEX_W_KZ,           3,  "requires EVEX_KZ and the W prefix")      \
  ENUM_ENTRY(IC_EVEX_W_XS_KZ,        4,  "requires EVEX_KZ, W, and XS prefix")     \
  ENUM_ENTRY(IC_EVEX_W_XD_KZ,        4,  "requires EVEX_KZ, W, and XD prefix")     \
  ENUM_ENTRY(IC_EVEX_W_OPSIZE_KZ,    4,  "requires EVEX_KZ, W, and OpSize")        \
  ENUM_ENTRY(IC_EVEX_L_KZ,           3,  "requires EVEX_KZ and the L prefix")       \
  ENUM_ENTRY(IC_EVEX_L_XS_KZ,        4,  "requires EVEX_KZ and the L and XS prefix")\
  ENUM_ENTRY(IC_EVEX_L_XD_KZ,        4,  "requires EVEX_KZ and the L and XD prefix")\
  ENUM_ENTRY(IC_EVEX_L_OPSIZE_KZ,    4,  "requires EVEX_KZ, L, and OpSize")         \
  ENUM_ENTRY(IC_EVEX_L_W_KZ,         3,  "requires EVEX_KZ, L and W")               \
  ENUM_ENTRY(IC_EVEX_L_W_XS_KZ,      4,  "requires EVEX_KZ, L, W and XS prefix")    \
  ENUM_ENTRY(IC_EVEX_L_W_XD_KZ,      4,  "requires EVEX_KZ, L, W and XD prefix")    \
  ENUM_ENTRY(IC_EVEX_L_W_OPSIZE_KZ,  4,  "requires EVEX_KZ, L, W and OpSize")       \
  ENUM_ENTRY(IC_EVEX_L2_KZ,          3,  "requires EVEX_KZ and the L2 prefix")       \
  ENUM_ENTRY(IC_EVEX_L2_XS_KZ,       4,  "requires EVEX_KZ and the L2 and XS prefix")\
  ENUM_ENTRY(IC_EVEX_L2_XD_KZ,       4,  "requires EVEX_KZ and the L2 and XD prefix")\
  ENUM_ENTRY(IC_EVEX_L2_OPSIZE_KZ,   4,  "requires EVEX_KZ, L2, and OpSize")         \
  ENUM_ENTRY(IC_EVEX_L2_W_KZ,        3,  "requires EVEX_KZ, L2 and W")               \
  ENUM_ENTRY(IC_EVEX_L2_W_XS_KZ,     4,  "requires EVEX_KZ, L2, W and XS prefix")    \
  ENUM_ENTRY(IC_EVEX_L2_W_XD_KZ,     4,  "requires EVEX_KZ, L2, W and XD prefix")    \
  ENUM_ENTRY(IC_EVEX_L2_W_OPSIZE_KZ, 4,  "requires EVEX_KZ, L2, W and OpSize")

#define ENUM_ENTRY(n, r, d) n,
enum InstructionContext {
  INSTRUCTION_CONTEXTS
  IC_max
};
#undef ENUM_ENTRY

// Opcode types, which determine which decode table to use, both in the Intel
// manual and also for the decoder.
enum OpcodeType {
  ONEBYTE       = 0,
  TWOBYTE       = 1,
  THREEBYTE_38  = 2,
  THREEBYTE_3A  = 3,
  XOP8_MAP      = 4,
  XOP9_MAP      = 5,
  XOPA_MAP      = 6,
  THREEDNOW_MAP = 7,
  MAP5          = 8,
  MAP6          = 9
};

// The following structs are used for the hierarchical decode table.  After
// determining the instruction's class (i.e., which IC_* constant applies to
// it), the decoder reads the opcode.  Some instructions require specific
// values of the ModR/M byte, so the ModR/M byte indexes into the final table.
//
// If a ModR/M byte is not required, "required" is left unset, and the values
// for each instructionID are identical.
typedef uint16_t InstrUID;

// ModRMDecisionType - describes the type of ModR/M decision, allowing the
// consumer to determine the number of entries in it.
//
// MODRM_ONEENTRY - No matter what the value of the ModR/M byte is, the decoded
//                  instruction is the same.
// MODRM_SPLITRM  - If the ModR/M byte is between 0x00 and 0xbf, the opcode
//                  corresponds to one instruction; otherwise, it corresponds to
//                  a different instruction.
// MODRM_SPLITMISC- If the ModR/M byte is between 0x00 and 0xbf, ModR/M byte
//                  divided by 8 is used to select instruction; otherwise, each
//                  value of the ModR/M byte could correspond to a different
//                  instruction.
// MODRM_SPLITREG - ModR/M byte divided by 8 is used to select instruction. This
//                  corresponds to instructions that use reg field as opcode
// MODRM_FULL     - Potentially, each value of the ModR/M byte could correspond
//                  to a different instruction.
#define MODRMTYPES            \
  ENUM_ENTRY(MODRM_ONEENTRY)  \
  ENUM_ENTRY(MODRM_SPLITRM)   \
  ENUM_ENTRY(MODRM_SPLITMISC)  \
  ENUM_ENTRY(MODRM_SPLITREG)  \
  ENUM_ENTRY(MODRM_FULL)

#define ENUM_ENTRY(n) n,
enum ModRMDecisionType {
  MODRMTYPES
  MODRM_max
};
#undef ENUM_ENTRY

#define CASE_ENCODING_RM     \
    case ENCODING_RM:        \
    case ENCODING_RM_CD2:    \
    case ENCODING_RM_CD4:    \
    case ENCODING_RM_CD8:    \
    case ENCODING_RM_CD16:   \
    case ENCODING_RM_CD32:   \
    case ENCODING_RM_CD64

#define CASE_ENCODING_VSIB   \
    case ENCODING_VSIB:      \
    case ENCODING_VSIB_CD2:  \
    case ENCODING_VSIB_CD4:  \
    case ENCODING_VSIB_CD8:  \
    case ENCODING_VSIB_CD16: \
    case ENCODING_VSIB_CD32: \
    case ENCODING_VSIB_CD64

// Physical encodings of instruction operands.
#define ENCODINGS                                                              \
  ENUM_ENTRY(ENCODING_NONE,   "")                                              \
  ENUM_ENTRY(ENCODING_REG,    "Register operand in ModR/M byte.")              \
  ENUM_ENTRY(ENCODING_RM,     "R/M operand in ModR/M byte.")                   \
  ENUM_ENTRY(ENCODING_RM_CD2, "R/M operand with CDisp scaling of 2")           \
  ENUM_ENTRY(ENCODING_RM_CD4, "R/M operand with CDisp scaling of 4")           \
  ENUM_ENTRY(ENCODING_RM_CD8, "R/M operand with CDisp scaling of 8")           \
  ENUM_ENTRY(ENCODING_RM_CD16,"R/M operand with CDisp scaling of 16")          \
  ENUM_ENTRY(ENCODING_RM_CD32,"R/M operand with CDisp scaling of 32")          \
  ENUM_ENTRY(ENCODING_RM_CD64,"R/M operand with CDisp scaling of 64")          \
  ENUM_ENTRY(ENCODING_SIB,      "Force SIB operand in ModR/M byte.")           \
  ENUM_ENTRY(ENCODING_VSIB,     "VSIB operand in ModR/M byte.")                \
  ENUM_ENTRY(ENCODING_VSIB_CD2, "VSIB operand with CDisp scaling of 2")        \
  ENUM_ENTRY(ENCODING_VSIB_CD4, "VSIB operand with CDisp scaling of 4")        \
  ENUM_ENTRY(ENCODING_VSIB_CD8, "VSIB operand with CDisp scaling of 8")        \
  ENUM_ENTRY(ENCODING_VSIB_CD16,"VSIB operand with CDisp scaling of 16")       \
  ENUM_ENTRY(ENCODING_VSIB_CD32,"VSIB operand with CDisp scaling of 32")       \
  ENUM_ENTRY(ENCODING_VSIB_CD64,"VSIB operand with CDisp scaling of 64")       \
  ENUM_ENTRY(ENCODING_VVVV,   "Register operand in VEX.vvvv byte.")            \
  ENUM_ENTRY(ENCODING_WRITEMASK, "Register operand in EVEX.aaa byte.")         \
  ENUM_ENTRY(ENCODING_IB,     "1-byte immediate")                              \
  ENUM_ENTRY(ENCODING_IW,     "2-byte")                                        \
  ENUM_ENTRY(ENCODING_ID,     "4-byte")                                        \
  ENUM_ENTRY(ENCODING_IO,     "8-byte")                                        \
  ENUM_ENTRY(ENCODING_RB,     "(AL..DIL, R8B..R15B) Register code added to "   \
                              "the opcode byte")                               \
  ENUM_ENTRY(ENCODING_RW,     "(AX..DI, R8W..R15W)")                           \
  ENUM_ENTRY(ENCODING_RD,     "(EAX..EDI, R8D..R15D)")                         \
  ENUM_ENTRY(ENCODING_RO,     "(RAX..RDI, R8..R15)")                           \
  ENUM_ENTRY(ENCODING_FP,     "Position on floating-point stack in ModR/M "    \
                              "byte.")                                         \
                                                                               \
  ENUM_ENTRY(ENCODING_Iv,     "Immediate of operand size")                     \
  ENUM_ENTRY(ENCODING_Ia,     "Immediate of address size")                     \
  ENUM_ENTRY(ENCODING_IRC,    "Immediate for static rounding control")         \
  ENUM_ENTRY(ENCODING_Rv,     "Register code of operand size added to the "    \
                              "opcode byte")                                   \
  ENUM_ENTRY(ENCODING_CC,     "Condition code encoded in opcode")              \
  ENUM_ENTRY(ENCODING_DUP,    "Duplicate of another operand; ID is encoded "   \
                              "in type")                                       \
  ENUM_ENTRY(ENCODING_SI,     "Source index; encoded in OpSize/Adsize prefix") \
  ENUM_ENTRY(ENCODING_DI,     "Destination index; encoded in prefixes")

#define ENUM_ENTRY(n, d) n,
enum OperandEncoding {
  ENCODINGS
  ENCODING_max
};
#undef ENUM_ENTRY

// Semantic interpretations of instruction operands.
#define TYPES                                                                  \
  ENUM_ENTRY(TYPE_NONE,       "")                                              \
  ENUM_ENTRY(TYPE_REL,        "immediate address")                             \
  ENUM_ENTRY(TYPE_R8,         "1-byte register operand")                       \
  ENUM_ENTRY(TYPE_R16,        "2-byte")                                        \
  ENUM_ENTRY(TYPE_R32,        "4-byte")                                        \
  ENUM_ENTRY(TYPE_R64,        "8-byte")                                        \
  ENUM_ENTRY(TYPE_IMM,        "immediate operand")                             \
  ENUM_ENTRY(TYPE_UIMM8,      "1-byte unsigned immediate operand")             \
  ENUM_ENTRY(TYPE_M,          "Memory operand")                                \
  ENUM_ENTRY(TYPE_MSIB,       "Memory operand force sib encoding")             \
  ENUM_ENTRY(TYPE_MVSIBX,     "Memory operand using XMM index")                \
  ENUM_ENTRY(TYPE_MVSIBY,     "Memory operand using YMM index")                \
  ENUM_ENTRY(TYPE_MVSIBZ,     "Memory operand using ZMM index")                \
  ENUM_ENTRY(TYPE_SRCIDX,     "memory at source index")                        \
  ENUM_ENTRY(TYPE_DSTIDX,     "memory at destination index")                   \
  ENUM_ENTRY(TYPE_MOFFS,      "memory offset (relative to segment base)")      \
  ENUM_ENTRY(TYPE_ST,         "Position on the floating-point stack")          \
  ENUM_ENTRY(TYPE_MM64,       "8-byte MMX register")                           \
  ENUM_ENTRY(TYPE_XMM,        "16-byte")                                       \
  ENUM_ENTRY(TYPE_YMM,        "32-byte")                                       \
  ENUM_ENTRY(TYPE_ZMM,        "64-byte")                                       \
  ENUM_ENTRY(TYPE_VK,         "mask register")                                 \
  ENUM_ENTRY(TYPE_VK_PAIR,    "mask register pair")                            \
  ENUM_ENTRY(TYPE_TMM,        "tile")                                          \
  ENUM_ENTRY(TYPE_SEGMENTREG, "Segment register operand")                      \
  ENUM_ENTRY(TYPE_DEBUGREG,   "Debug register operand")                        \
  ENUM_ENTRY(TYPE_CONTROLREG, "Control register operand")                      \
  ENUM_ENTRY(TYPE_BNDR,       "MPX bounds register")                           \
                                                                               \
  ENUM_ENTRY(TYPE_Rv,         "Register operand of operand size")              \
  ENUM_ENTRY(TYPE_RELv,       "Immediate address of operand size")             \
  ENUM_ENTRY(TYPE_DUP0,       "Duplicate of operand 0")                        \
  ENUM_ENTRY(TYPE_DUP1,       "operand 1")                                     \
  ENUM_ENTRY(TYPE_DUP2,       "operand 2")                                     \
  ENUM_ENTRY(TYPE_DUP3,       "operand 3")                                     \
  ENUM_ENTRY(TYPE_DUP4,       "operand 4")                                     \

#define ENUM_ENTRY(n, d) n,
enum OperandType {
  TYPES
  TYPE_max
};
#undef ENUM_ENTRY

/// The specification for how to extract and interpret one operand.
struct OperandSpecifier {
  uint8_t encoding;
  uint8_t type;
};

static const unsigned X86_MAX_OPERANDS = 6;

/// Decoding mode for the Intel disassembler.  16-bit, 32-bit, and 64-bit mode
/// are supported, and represent real mode, IA-32e, and IA-32e in 64-bit mode,
/// respectively.
enum DisassemblerMode {
  MODE_16BIT,
  MODE_32BIT,
  MODE_64BIT
};

} // namespace X86Disassembler
} // namespace llvm

#endif
PKhwFZ?���  Support/BalancedPartitioning.hnu�[���//===- BalancedPartitioning.h ---------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements BalancedPartitioning, a recursive balanced graph
// partitioning algorithm.
//
// The algorithm is used to find an ordering of FunctionNodes while optimizing
// a specified objective. The algorithm uses recursive bisection; it starts
// with a collection of unordered FunctionNodes and tries to split them into
// two sets (buckets) of equal cardinality. Each bisection step is comprised of
// iterations that greedily swap the FunctionNodes between the two buckets while
// there is an improvement of the objective. Once the process converges, the
// problem is divided into two sub-problems of half the size, which are
// recursively applied for the two buckets. The final ordering of the
// FunctionNodes is obtained by concatenating the two (recursively computed)
// orderings.
//
// In order to speed up the computation, we limit the depth of the recursive
// tree by a specified constant (SplitDepth) and apply at most a constant
// number of greedy iterations per split (IterationsPerSplit). The worst-case
// time complexity of the implementation is bounded by O(M*log^2 N), where
// N is the number of FunctionNodes and M is the number of
// FunctionNode-UtilityNode edges; (assuming that any collection of D
// FunctionNodes contains O(D) UtilityNodes). Notice that the two different
// recursive sub-problems are independent and thus can be efficiently processed
// in parallel.
//
// Reference:
//   * Optimizing Function Layout for Mobile Applications,
//     https://arxiv.org/abs/2211.09285
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_BALANCED_PARTITIONING_H
#define LLVM_SUPPORT_BALANCED_PARTITIONING_H

#include "raw_ostream.h"
#include "llvm/ADT/ArrayRef.h"

#include <atomic>
#include <condition_variable>
#include <mutex>
#include <random>
#include <vector>

namespace llvm {

class ThreadPool;
/// A function with a set of utility nodes where it is beneficial to order two
/// functions close together if they have similar utility nodes
class BPFunctionNode {
  friend class BalancedPartitioning;

public:
  using IDT = uint64_t;
  using UtilityNodeT = uint32_t;

  /// \param UtilityNodes the set of utility nodes (must be unique'd)
  BPFunctionNode(IDT Id, ArrayRef<UtilityNodeT> UtilityNodes)
      : Id(Id), UtilityNodes(UtilityNodes) {}

  /// The ID of this node
  IDT Id;

  void dump(raw_ostream &OS) const;

protected:
  /// The list of utility nodes associated with this node
  SmallVector<UtilityNodeT, 4> UtilityNodes;
  /// The bucket assigned by balanced partitioning
  std::optional<unsigned> Bucket;
  /// The index of the input order of the FunctionNodes
  uint64_t InputOrderIndex = 0;

  friend class BPFunctionNodeTest_Basic_Test;
  friend class BalancedPartitioningTest_Basic_Test;
  friend class BalancedPartitioningTest_Large_Test;
};

/// Algorithm parameters; default values are tuned on real-world binaries
struct BalancedPartitioningConfig {
  /// The depth of the recursive bisection
  unsigned SplitDepth = 18;
  /// The maximum number of bp iterations per split
  unsigned IterationsPerSplit = 40;
  /// The probability for a vertex to skip a move from its current bucket to
  /// another bucket; it often helps to escape from a local optima
  float SkipProbability = 0.1f;
  /// Recursive subtasks up to the given depth are added to the queue and
  /// distributed among threads by ThreadPool; all subsequent calls are executed
  /// on the same thread
  unsigned TaskSplitDepth = 9;
};

class BalancedPartitioning {
public:
  BalancedPartitioning(const BalancedPartitioningConfig &Config);

  /// Run recursive graph partitioning that optimizes a given objective.
  void run(std::vector<BPFunctionNode> &Nodes) const;

private:
  struct UtilitySignature;
  using SignaturesT = SmallVector<UtilitySignature, 4>;
  using FunctionNodeRange =
      iterator_range<std::vector<BPFunctionNode>::iterator>;

  /// A special ThreadPool that allows for spawning new tasks after blocking on
  /// wait(). BalancedPartitioning recursively spawns new threads inside other
  /// threads, so we need to track how many active threads that could spawn more
  /// threads.
  struct BPThreadPool {
    ThreadPool &TheThreadPool;
    std::mutex mtx;
    std::condition_variable cv;
    /// The number of threads that could spawn more threads
    std::atomic<int> NumActiveThreads = 0;
    /// Only true when all threads are down spawning new threads
    bool IsFinishedSpawning = false;
    /// Asynchronous submission of the task to the pool
    template <typename Func> void async(Func &&F);
    /// Blocking wait for all threads to complete. Unlike ThreadPool, it is
    /// acceptable for other threads to add more tasks while blocking on this
    /// call.
    void wait();
    BPThreadPool(ThreadPool &TheThreadPool) : TheThreadPool(TheThreadPool) {}
  };

  /// Run a recursive bisection of a given list of FunctionNodes
  /// \param RecDepth the current depth of recursion
  /// \param RootBucket the initial bucket of the dataVertices
  /// \param Offset the assigned buckets are the range [Offset, Offset +
  /// Nodes.size()]
  void bisect(const FunctionNodeRange Nodes, unsigned RecDepth,
              unsigned RootBucket, unsigned Offset,
              std::optional<BPThreadPool> &TP) const;

  /// Run bisection iterations
  void runIterations(const FunctionNodeRange Nodes, unsigned RecDepth,
                     unsigned LeftBucket, unsigned RightBucket,
                     std::mt19937 &RNG) const;

  /// Run a bisection iteration to improve the optimization goal
  /// \returns the total number of moved FunctionNodes
  unsigned runIteration(const FunctionNodeRange Nodes, unsigned LeftBucket,
                        unsigned RightBucket, SignaturesT &Signatures,
                        std::mt19937 &RNG) const;

  /// Try to move \p N from one bucket to another
  /// \returns true iff \p N is moved
  bool moveFunctionNode(BPFunctionNode &N, unsigned LeftBucket,
                        unsigned RightBucket, SignaturesT &Signatures,
                        std::mt19937 &RNG) const;

  /// Split all the FunctionNodes into 2 buckets, StartBucket and StartBucket +
  /// 1 The method is used for an initial assignment before a bisection step
  void split(const FunctionNodeRange Nodes, unsigned StartBucket) const;

  /// The cost of the uniform log-gap cost, assuming a utility node has \p X
  /// FunctionNodes in the left bucket and \p Y FunctionNodes in the right one.
  float logCost(unsigned X, unsigned Y) const;

  float log2Cached(unsigned i) const;

  const BalancedPartitioningConfig &Config;

  /// Precomputed values of log2(x). Table size is small enough to fit in cache.
  static constexpr unsigned LOG_CACHE_SIZE = 16384;
  float Log2Cache[LOG_CACHE_SIZE];

  /// The signature of a particular utility node used for the bisection step,
  /// i.e., the number of \p FunctionNodes in each of the two buckets
  struct UtilitySignature {
    /// The number of \p FunctionNodes in the left bucket
    unsigned LeftCount = 0;
    /// The number of \p FunctionNodes in the right bucket
    unsigned RightCount = 0;
    /// The cached gain of moving a \p FunctionNode from the left bucket to the
    /// right bucket
    float CachedGainLR;
    /// The cached gain of moving a \p FunctionNode from the right bucket to the
    /// left bucket
    float CachedGainRL;
    /// Whether \p CachedGainLR and \p CachedGainRL are valid
    bool CachedGainIsValid = false;
  };

protected:
  /// Compute the move gain for uniform log-gap cost
  static float moveGain(const BPFunctionNode &N, bool FromLeftToRight,
                        const SignaturesT &Signatures);
  friend class BalancedPartitioningTest_MoveGain_Test;
};

} // end namespace llvm

#endif // LLVM_SUPPORT_BALANCED_PARTITIONING_H
PKhwFZ����ffSupport/LoongArchTargetParser.hnu�[���//===-- llvm/Support/LoongArchTargetParser.h --------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This header is deprecated in favour of
/// `llvm/TargetParser/LoongArchTargetParser.h`.
///
//===----------------------------------------------------------------------===//

#include "llvm/TargetParser/LoongArchTargetParser.h"
PKhwFZ5-!�J�JSupport/AArch64TargetParser.defnu�[���//===- AARCH64TargetParser.def - AARCH64 target parsing defines ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file provides defines to build up the AARCH64 target parser's logic.
//
//===----------------------------------------------------------------------===//

// NOTE: NO INCLUDE GUARD DESIRED!

#ifndef AARCH64_ARCH
#define AARCH64_ARCH(NAME, ID, CPU_ATTR, SUB_ARCH, ARCH_ATTR, ARCH_FPU, ARCH_BASE_EXT)
#endif
AARCH64_ARCH("invalid", INVALID, "", "",
             ARMBuildAttrs::CPUArch::v8_A, FK_NONE, AArch64::AEK_NONE)
AARCH64_ARCH("armv8-a", ARMV8A, "8-A", "v8", ARMBuildAttrs::CPUArch::v8_A,
             FK_CRYPTO_NEON_FP_ARMV8,
             (AArch64::AEK_CRYPTO | AArch64::AEK_FP | AArch64::AEK_SIMD))
AARCH64_ARCH("armv8.1-a", ARMV8_1A, "8.1-A", "v8.1a",
             ARMBuildAttrs::CPUArch::v8_A, FK_CRYPTO_NEON_FP_ARMV8,
             (AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_FP |
              AArch64::AEK_SIMD | AArch64::AEK_LSE | AArch64::AEK_RDM))
AARCH64_ARCH("armv8.2-a", ARMV8_2A, "8.2-A", "v8.2a",
             ARMBuildAttrs::CPUArch::v8_A, FK_CRYPTO_NEON_FP_ARMV8,
             (AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_FP |
              AArch64::AEK_SIMD | AArch64::AEK_RAS | AArch64::AEK_LSE |
              AArch64::AEK_RDM))
AARCH64_ARCH("armv8.3-a", ARMV8_3A, "8.3-A", "v8.3a",
             ARMBuildAttrs::CPUArch::v8_A, FK_CRYPTO_NEON_FP_ARMV8,
             (AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_FP |
              AArch64::AEK_SIMD | AArch64::AEK_RAS | AArch64::AEK_LSE |
              AArch64::AEK_RDM | AArch64::AEK_RCPC))
AARCH64_ARCH("armv8.4-a", ARMV8_4A, "8.4-A", "v8.4a",
             ARMBuildAttrs::CPUArch::v8_A, FK_CRYPTO_NEON_FP_ARMV8,
             (AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_FP |
              AArch64::AEK_SIMD | AArch64::AEK_RAS | AArch64::AEK_LSE |
              AArch64::AEK_RDM | AArch64::AEK_RCPC | AArch64::AEK_DOTPROD))
AARCH64_ARCH("armv8.5-a", ARMV8_5A, "8.5-A", "v8.5a",
             ARMBuildAttrs::CPUArch::v8_A, FK_CRYPTO_NEON_FP_ARMV8,
             (AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_FP |
              AArch64::AEK_SIMD | AArch64::AEK_RAS | AArch64::AEK_LSE |
              AArch64::AEK_RDM | AArch64::AEK_RCPC | AArch64::AEK_DOTPROD))
AARCH64_ARCH("armv8.6-a", ARMV8_6A, "8.6-A", "v8.6a",
             ARMBuildAttrs::CPUArch::v8_A, FK_CRYPTO_NEON_FP_ARMV8,
             (AArch64::AEK_CRC  | AArch64::AEK_FP   |
              AArch64::AEK_SIMD | AArch64::AEK_RAS  | AArch64::AEK_LSE     |
              AArch64::AEK_RDM  | AArch64::AEK_RCPC | AArch64::AEK_DOTPROD |
              AArch64::AEK_SM4  | AArch64::AEK_SHA3 | AArch64::AEK_BF16    |
              AArch64::AEK_SHA2 | AArch64::AEK_AES  | AArch64::AEK_I8MM))
AARCH64_ARCH("armv8.7-a", ARMV8_7A, "8.7-A", "v8.7a",
             ARMBuildAttrs::CPUArch::v8_A, FK_CRYPTO_NEON_FP_ARMV8,
             (AArch64::AEK_CRC | AArch64::AEK_FP |
              AArch64::AEK_SIMD | AArch64::AEK_RAS | AArch64::AEK_LSE |
              AArch64::AEK_RDM | AArch64::AEK_RCPC | AArch64::AEK_DOTPROD |
              AArch64::AEK_SM4 | AArch64::AEK_SHA3 | AArch64::AEK_BF16 |
              AArch64::AEK_SHA2 | AArch64::AEK_AES | AArch64::AEK_I8MM))
AARCH64_ARCH("armv8.8-a", ARMV8_8A, "8.8-A", "v8.8a",
             ARMBuildAttrs::CPUArch::v8_A, FK_CRYPTO_NEON_FP_ARMV8,
             (AArch64::AEK_CRC | AArch64::AEK_FP |
              AArch64::AEK_SIMD | AArch64::AEK_RAS | AArch64::AEK_LSE |
              AArch64::AEK_RDM | AArch64::AEK_RCPC | AArch64::AEK_DOTPROD |
              AArch64::AEK_SM4 | AArch64::AEK_SHA3 | AArch64::AEK_BF16 |
              AArch64::AEK_SHA2 | AArch64::AEK_AES | AArch64::AEK_I8MM))
AARCH64_ARCH("armv9-a",   ARMV9A, "9-A", "v9a",
             ARMBuildAttrs::CPUArch::v8_A, FK_NEON_FP_ARMV8,
             (AArch64::AEK_CRC | AArch64::AEK_FP |
              AArch64::AEK_SIMD | AArch64::AEK_RAS | AArch64::AEK_LSE |
              AArch64::AEK_RDM | AArch64::AEK_RCPC | AArch64::AEK_DOTPROD |
              AArch64::AEK_SVE2))
AARCH64_ARCH("armv9.1-a", ARMV9_1A, "9.1-A", "v9.1a",
             ARMBuildAttrs::CPUArch::v8_A, FK_NEON_FP_ARMV8,
             (AArch64::AEK_CRC | AArch64::AEK_FP |
              AArch64::AEK_SIMD | AArch64::AEK_RAS | AArch64::AEK_LSE |
              AArch64::AEK_RDM | AArch64::AEK_RCPC | AArch64::AEK_DOTPROD |
              AArch64::AEK_SVE2))
AARCH64_ARCH("armv9.2-a", ARMV9_2A, "9.2-A", "v9.2a",
             ARMBuildAttrs::CPUArch::v8_A, FK_NEON_FP_ARMV8,
             (AArch64::AEK_CRC | AArch64::AEK_FP |
              AArch64::AEK_SIMD | AArch64::AEK_RAS | AArch64::AEK_LSE |
              AArch64::AEK_RDM | AArch64::AEK_RCPC | AArch64::AEK_DOTPROD |
              AArch64::AEK_SVE2))
AARCH64_ARCH("armv9.3-a", ARMV9_3A, "9.3-A", "v9.3",
             ARMBuildAttrs::CPUArch::v8_A, FK_CRYPTO_NEON_FP_ARMV8,
             (AArch64::AEK_CRC | AArch64::AEK_FP |
              AArch64::AEK_SIMD | AArch64::AEK_RAS | AArch64::AEK_LSE |
              AArch64::AEK_RDM | AArch64::AEK_RCPC | AArch64::AEK_DOTPROD |
              AArch64::AEK_SVE2))
// For v8-R, we do not enable crypto and align with GCC that enables a more
// minimal set of optional architecture extensions.
AARCH64_ARCH("armv8-r", ARMV8R, "8-R", "v8r",
             ARMBuildAttrs::CPUArch::v8_R, FK_CRYPTO_NEON_FP_ARMV8,
             (AArch64::AEK_CRC     | AArch64::AEK_RDM  | AArch64::AEK_SSBS |
              AArch64::AEK_DOTPROD | AArch64::AEK_FP   | AArch64::AEK_SIMD |
              AArch64::AEK_FP16    | AArch64::AEK_FP16FML | AArch64::AEK_RAS |
              AArch64::AEK_RCPC    | AArch64::AEK_SB))
#undef AARCH64_ARCH

#ifndef AARCH64_ARCH_EXT_NAME
#define AARCH64_ARCH_EXT_NAME(NAME, ID, FEATURE, NEGFEATURE)
#endif
// FIXME: This would be nicer were it tablegen
AARCH64_ARCH_EXT_NAME("invalid",      AArch64::AEK_INVALID,     nullptr,         nullptr)
AARCH64_ARCH_EXT_NAME("none",         AArch64::AEK_NONE,        nullptr,         nullptr)
AARCH64_ARCH_EXT_NAME("crc",          AArch64::AEK_CRC,         "+crc",          "-crc")
AARCH64_ARCH_EXT_NAME("lse",          AArch64::AEK_LSE,         "+lse",          "-lse")
AARCH64_ARCH_EXT_NAME("rdm",          AArch64::AEK_RDM,         "+rdm",          "-rdm")
AARCH64_ARCH_EXT_NAME("crypto",       AArch64::AEK_CRYPTO,      "+crypto",       "-crypto")
AARCH64_ARCH_EXT_NAME("sm4",          AArch64::AEK_SM4,         "+sm4",          "-sm4")
AARCH64_ARCH_EXT_NAME("sha3",         AArch64::AEK_SHA3,        "+sha3",         "-sha3")
AARCH64_ARCH_EXT_NAME("sha2",         AArch64::AEK_SHA2,        "+sha2",         "-sha2")
AARCH64_ARCH_EXT_NAME("aes",          AArch64::AEK_AES,         "+aes",          "-aes")
AARCH64_ARCH_EXT_NAME("dotprod",      AArch64::AEK_DOTPROD,     "+dotprod",      "-dotprod")
AARCH64_ARCH_EXT_NAME("fp",           AArch64::AEK_FP,          "+fp-armv8",     "-fp-armv8")
AARCH64_ARCH_EXT_NAME("simd",         AArch64::AEK_SIMD,        "+neon",         "-neon")
AARCH64_ARCH_EXT_NAME("fp16",         AArch64::AEK_FP16,        "+fullfp16",     "-fullfp16")
AARCH64_ARCH_EXT_NAME("fp16fml",      AArch64::AEK_FP16FML,     "+fp16fml",      "-fp16fml")
AARCH64_ARCH_EXT_NAME("profile",      AArch64::AEK_PROFILE,     "+spe",          "-spe")
AARCH64_ARCH_EXT_NAME("ras",          AArch64::AEK_RAS,         "+ras",          "-ras")
AARCH64_ARCH_EXT_NAME("sve",          AArch64::AEK_SVE,         "+sve",          "-sve")
AARCH64_ARCH_EXT_NAME("sve2",         AArch64::AEK_SVE2,        "+sve2",         "-sve2")
AARCH64_ARCH_EXT_NAME("sve2-aes",     AArch64::AEK_SVE2AES,     "+sve2-aes",     "-sve2-aes")
AARCH64_ARCH_EXT_NAME("sve2-sm4",     AArch64::AEK_SVE2SM4,     "+sve2-sm4",     "-sve2-sm4")
AARCH64_ARCH_EXT_NAME("sve2-sha3",    AArch64::AEK_SVE2SHA3,    "+sve2-sha3",    "-sve2-sha3")
AARCH64_ARCH_EXT_NAME("sve2-bitperm", AArch64::AEK_SVE2BITPERM, "+sve2-bitperm", "-sve2-bitperm")
AARCH64_ARCH_EXT_NAME("rcpc",         AArch64::AEK_RCPC,        "+rcpc",         "-rcpc")
AARCH64_ARCH_EXT_NAME("rng",          AArch64::AEK_RAND,        "+rand",         "-rand")
AARCH64_ARCH_EXT_NAME("memtag",       AArch64::AEK_MTE,         "+mte",          "-mte")
AARCH64_ARCH_EXT_NAME("ssbs",         AArch64::AEK_SSBS,        "+ssbs",         "-ssbs")
AARCH64_ARCH_EXT_NAME("sb",           AArch64::AEK_SB,          "+sb",           "-sb")
AARCH64_ARCH_EXT_NAME("predres",      AArch64::AEK_PREDRES,     "+predres",      "-predres")
AARCH64_ARCH_EXT_NAME("bf16",         AArch64::AEK_BF16,        "+bf16",         "-bf16")
AARCH64_ARCH_EXT_NAME("i8mm",         AArch64::AEK_I8MM,        "+i8mm",         "-i8mm")
AARCH64_ARCH_EXT_NAME("f32mm",        AArch64::AEK_F32MM,       "+f32mm",        "-f32mm")
AARCH64_ARCH_EXT_NAME("f64mm",        AArch64::AEK_F64MM,       "+f64mm",        "-f64mm")
AARCH64_ARCH_EXT_NAME("tme",          AArch64::AEK_TME,         "+tme",          "-tme")
AARCH64_ARCH_EXT_NAME("ls64",         AArch64::AEK_LS64,        "+ls64",         "-ls64")
AARCH64_ARCH_EXT_NAME("brbe",         AArch64::AEK_BRBE,        "+brbe",         "-brbe")
AARCH64_ARCH_EXT_NAME("pauth",        AArch64::AEK_PAUTH,       "+pauth",        "-pauth")
AARCH64_ARCH_EXT_NAME("flagm",        AArch64::AEK_FLAGM,       "+flagm",        "-flagm")
AARCH64_ARCH_EXT_NAME("sme",          AArch64::AEK_SME,         "+sme",          "-sme")
AARCH64_ARCH_EXT_NAME("sme-f64",      AArch64::AEK_SMEF64,      "+sme-f64",      "-sme-f64")
AARCH64_ARCH_EXT_NAME("sme-i64",      AArch64::AEK_SMEI64,      "+sme-i64",      "-sme-i64")
AARCH64_ARCH_EXT_NAME("hbc",          AArch64::AEK_HBC,         "+hbc",          "-hbc")
AARCH64_ARCH_EXT_NAME("mops",         AArch64::AEK_MOPS,        "+mops",         "-mops")
AARCH64_ARCH_EXT_NAME("pmuv3",        AArch64::AEK_PERFMON,     "+perfmon",      "-perfmon")
#undef AARCH64_ARCH_EXT_NAME

#ifndef AARCH64_CPU_NAME
#define AARCH64_CPU_NAME(NAME, ID, DEFAULT_FPU, IS_DEFAULT, DEFAULT_EXT)
#endif
AARCH64_CPU_NAME("cortex-a34", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
                 (AArch64::AEK_CRC))
AARCH64_CPU_NAME("cortex-a35", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
                 (AArch64::AEK_CRC))
AARCH64_CPU_NAME("cortex-a53", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, true,
                 (AArch64::AEK_CRC))
AARCH64_CPU_NAME("cortex-a55", ARMV8_2A, FK_CRYPTO_NEON_FP_ARMV8, false,
                 (AArch64::AEK_FP16 | AArch64::AEK_DOTPROD | AArch64::AEK_RCPC))
AARCH64_CPU_NAME("cortex-a510", ARMV9A, FK_NEON_FP_ARMV8, false,
                 (AArch64::AEK_BF16 | AArch64::AEK_I8MM | AArch64::AEK_SB |
                  AArch64::AEK_PAUTH | AArch64::AEK_MTE | AArch64::AEK_SSBS |
                  AArch64::AEK_SVE | AArch64::AEK_SVE2 | AArch64::AEK_SVE2BITPERM |
                  AArch64::AEK_FP16FML))
AARCH64_CPU_NAME("cortex-a57", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
                 (AArch64::AEK_CRC))
AARCH64_CPU_NAME("cortex-a65", ARMV8_2A, FK_CRYPTO_NEON_FP_ARMV8, false,
                 (AArch64::AEK_DOTPROD | AArch64::AEK_FP16 |
                  AArch64::AEK_RCPC | AArch64::AEK_SSBS))
AARCH64_CPU_NAME("cortex-a65ae", ARMV8_2A, FK_CRYPTO_NEON_FP_ARMV8, false,
                 (AArch64::AEK_DOTPROD | AArch64::AEK_FP16 |
                  AArch64::AEK_RCPC | AArch64::AEK_SSBS))
AARCH64_CPU_NAME("cortex-a72", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
                 (AArch64::AEK_CRC))
AARCH64_CPU_NAME("cortex-a73", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
                 (AArch64::AEK_CRC))
AARCH64_CPU_NAME("cortex-a75", ARMV8_2A, FK_CRYPTO_NEON_FP_ARMV8, false,
                 (AArch64::AEK_FP16 | AArch64::AEK_DOTPROD | AArch64::AEK_RCPC))
AARCH64_CPU_NAME("cortex-a76", ARMV8_2A, FK_CRYPTO_NEON_FP_ARMV8, false,
                 (AArch64::AEK_FP16 | AArch64::AEK_DOTPROD | AArch64::AEK_RCPC |
                  AArch64::AEK_SSBS))
AARCH64_CPU_NAME("cortex-a76ae", ARMV8_2A, FK_CRYPTO_NEON_FP_ARMV8, false,
                 (AArch64::AEK_FP16 | AArch64::AEK_DOTPROD | AArch64::AEK_RCPC |
                  AArch64::AEK_SSBS))
AARCH64_CPU_NAME("cortex-a77", ARMV8_2A, FK_CRYPTO_NEON_FP_ARMV8, false,
                 (AArch64::AEK_FP16 | AArch64::AEK_RCPC | AArch64::AEK_DOTPROD |
                  AArch64::AEK_SSBS))
AARCH64_CPU_NAME("cortex-a78", ARMV8_2A, FK_CRYPTO_NEON_FP_ARMV8, false,
                 (AArch64::AEK_FP16 | AArch64::AEK_DOTPROD | AArch64::AEK_RCPC |
                  AArch64::AEK_SSBS | AArch64::AEK_PROFILE))
AARCH64_CPU_NAME("cortex-a78c", ARMV8_2A, FK_CRYPTO_NEON_FP_ARMV8, false,
                 (AArch64::AEK_FP16 | AArch64::AEK_DOTPROD | AArch64::AEK_RCPC |
                  AArch64::AEK_SSBS | AArch64::AEK_PROFILE | AArch64::AEK_FLAGM |
                  AArch64::AEK_PAUTH | AArch64::AEK_FP16FML))
AARCH64_CPU_NAME("cortex-a710", ARMV9A, FK_NEON_FP_ARMV8, false,
                 (AArch64::AEK_MTE | AArch64::AEK_PAUTH | AArch64::AEK_FLAGM |
                  AArch64::AEK_SB | AArch64::AEK_I8MM | AArch64::AEK_FP16FML |
                  AArch64::AEK_SVE | AArch64::AEK_SVE2 | AArch64::AEK_SVE2BITPERM |
                  AArch64::AEK_BF16))
AARCH64_CPU_NAME("cortex-r82", ARMV8R, FK_CRYPTO_NEON_FP_ARMV8, false,
                 (AArch64::AEK_LSE))
AARCH64_CPU_NAME("cortex-x1", ARMV8_2A, FK_CRYPTO_NEON_FP_ARMV8, false,
                 (AArch64::AEK_FP16 | AArch64::AEK_DOTPROD | AArch64::AEK_RCPC |
                  AArch64::AEK_SSBS | AArch64::AEK_PROFILE))
AARCH64_CPU_NAME("cortex-x1c", ARMV8_2A, FK_CRYPTO_NEON_FP_ARMV8, false,
                 (AArch64::AEK_FP16 | AArch64::AEK_DOTPROD | AArch64::AEK_RCPC |
                  AArch64::AEK_SSBS | AArch64::AEK_PAUTH | AArch64::AEK_PROFILE))
AARCH64_CPU_NAME("cortex-x2", ARMV9A, FK_NEON_FP_ARMV8, false,
                 (AArch64::AEK_MTE | AArch64::AEK_BF16 | AArch64::AEK_I8MM |
                  AArch64::AEK_PAUTH | AArch64::AEK_SSBS | AArch64::AEK_SB |
                  AArch64::AEK_SVE | AArch64::AEK_SVE2 | AArch64::AEK_SVE2BITPERM |
                  AArch64::AEK_FP16FML))
AARCH64_CPU_NAME("neoverse-e1", ARMV8_2A, FK_CRYPTO_NEON_FP_ARMV8, false,
                 (AArch64::AEK_DOTPROD | AArch64::AEK_FP16 |
                  AArch64::AEK_RCPC | AArch64::AEK_SSBS))
AARCH64_CPU_NAME("neoverse-n1", ARMV8_2A, FK_CRYPTO_NEON_FP_ARMV8, false,
                 (AArch64::AEK_DOTPROD | AArch64::AEK_FP16 |
                  AArch64::AEK_PROFILE | AArch64::AEK_RCPC |
                  AArch64::AEK_SSBS))
AARCH64_CPU_NAME("neoverse-n2", ARMV8_5A, FK_CRYPTO_NEON_FP_ARMV8, false,
                 (AArch64::AEK_BF16 | AArch64::AEK_DOTPROD | AArch64::AEK_FP16 |
                  AArch64::AEK_I8MM | AArch64::AEK_MTE |
                  AArch64::AEK_SB | AArch64::AEK_SSBS |
                  AArch64::AEK_SVE | AArch64::AEK_SVE2 | AArch64::AEK_SVE2BITPERM))
AARCH64_CPU_NAME("neoverse-512tvb", ARMV8_4A, FK_CRYPTO_NEON_FP_ARMV8, false,
                 (AArch64::AEK_SVE | AArch64::AEK_SSBS |
                  AArch64::AEK_FP16 | AArch64::AEK_BF16 |
                  AArch64::AEK_DOTPROD | AArch64::AEK_PROFILE |
                  AArch64::AEK_RAND | AArch64::AEK_FP16FML | AArch64::AEK_I8MM))
AARCH64_CPU_NAME("neoverse-v1", ARMV8_4A, FK_CRYPTO_NEON_FP_ARMV8, false,
                 (AArch64::AEK_SVE | AArch64::AEK_SSBS |
                  AArch64::AEK_FP16 | AArch64::AEK_BF16 |
                  AArch64::AEK_DOTPROD | AArch64::AEK_PROFILE |
                  AArch64::AEK_RAND | AArch64::AEK_FP16FML | AArch64::AEK_I8MM))
AARCH64_CPU_NAME("cyclone", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
                 (AArch64::AEK_NONE))
AARCH64_CPU_NAME("apple-a7", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
                 (AArch64::AEK_NONE))
AARCH64_CPU_NAME("apple-a8", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
                 (AArch64::AEK_NONE))
AARCH64_CPU_NAME("apple-a9", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
                 (AArch64::AEK_NONE))
AARCH64_CPU_NAME("apple-a10", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
                 (AArch64::AEK_CRC | AArch64::AEK_RDM))
AARCH64_CPU_NAME("apple-a11", ARMV8_2A, FK_CRYPTO_NEON_FP_ARMV8, false,
                 (AArch64::AEK_FP16))
AARCH64_CPU_NAME("apple-a12", ARMV8_3A, FK_CRYPTO_NEON_FP_ARMV8, false,
                 (AArch64::AEK_FP16))
AARCH64_CPU_NAME("apple-a13", ARMV8_4A, FK_CRYPTO_NEON_FP_ARMV8, false,
                 (AArch64::AEK_FP16 | AArch64::AEK_FP16FML | AArch64::AEK_SHA3))
AARCH64_CPU_NAME("apple-a14", ARMV8_5A, FK_CRYPTO_NEON_FP_ARMV8, false,
                 (AArch64::AEK_FP16 | AArch64::AEK_FP16FML | AArch64::AEK_SHA3))
AARCH64_CPU_NAME("apple-m1", ARMV8_5A, FK_CRYPTO_NEON_FP_ARMV8, false,
                 (AArch64::AEK_FP16 | AArch64::AEK_FP16FML | AArch64::AEK_SHA3))
AARCH64_CPU_NAME("apple-s4", ARMV8_3A, FK_CRYPTO_NEON_FP_ARMV8, false,
                 (AArch64::AEK_FP16))
AARCH64_CPU_NAME("apple-s5", ARMV8_3A, FK_CRYPTO_NEON_FP_ARMV8, false,
                 (AArch64::AEK_FP16))
AARCH64_CPU_NAME("exynos-m3", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
                 (AArch64::AEK_CRC))
AARCH64_CPU_NAME("exynos-m4", ARMV8_2A, FK_CRYPTO_NEON_FP_ARMV8, false,
                 (AArch64::AEK_DOTPROD | AArch64::AEK_FP16))
AARCH64_CPU_NAME("exynos-m5", ARMV8_2A, FK_CRYPTO_NEON_FP_ARMV8, false,
                 (AArch64::AEK_DOTPROD | AArch64::AEK_FP16))
AARCH64_CPU_NAME("falkor", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
                 (AArch64::AEK_CRC | AArch64::AEK_RDM))
AARCH64_CPU_NAME("saphira", ARMV8_3A, FK_CRYPTO_NEON_FP_ARMV8, false,
                 (AArch64::AEK_PROFILE))
AARCH64_CPU_NAME("kryo", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
                 (AArch64::AEK_CRC))
AARCH64_CPU_NAME("thunderx2t99", ARMV8_1A, FK_CRYPTO_NEON_FP_ARMV8, false,
                 (AArch64::AEK_NONE))
AARCH64_CPU_NAME("thunderx3t110", ARMV8_3A, FK_CRYPTO_NEON_FP_ARMV8, false,
                 (AArch64::AEK_NONE))
AARCH64_CPU_NAME("thunderx", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
                 (AArch64::AEK_CRC))
AARCH64_CPU_NAME("thunderxt88", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
                 (AArch64::AEK_CRC))
AARCH64_CPU_NAME("thunderxt81", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
                 (AArch64::AEK_CRC))
AARCH64_CPU_NAME("thunderxt83", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
                 (AArch64::AEK_CRC))
AARCH64_CPU_NAME("tsv110", ARMV8_2A, FK_CRYPTO_NEON_FP_ARMV8, false,
                 (AArch64::AEK_DOTPROD |
                  AArch64::AEK_FP16 | AArch64::AEK_FP16FML |
                  AArch64::AEK_PROFILE))
AARCH64_CPU_NAME("a64fx", ARMV8_2A, FK_CRYPTO_NEON_FP_ARMV8, false,
                 (AArch64::AEK_FP16 | AArch64::AEK_SVE))
AARCH64_CPU_NAME("carmel", ARMV8_2A, FK_CRYPTO_NEON_FP_ARMV8, false,
                 AArch64::AEK_FP16)
AARCH64_CPU_NAME("ampere1", ARMV8_6A, FK_CRYPTO_NEON_FP_ARMV8, false,
                 (AArch64::AEK_FP16 | AArch64::AEK_SB | AArch64::AEK_SSBS))
// Invalid CPU
AARCH64_CPU_NAME("invalid", INVALID, FK_INVALID, true, AArch64::AEK_INVALID)
#undef AARCH64_CPU_NAME
PKhwFZԲ�Q��Support/BLAKE3.hnu�[���//==- BLAKE3.h - BLAKE3 C++ wrapper for LLVM ---------------------*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This is a C++ wrapper of the BLAKE3 C interface.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_BLAKE3_H
#define LLVM_SUPPORT_BLAKE3_H

#include "llvm-c/blake3.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"

namespace llvm {

/// The constant \p LLVM_BLAKE3_OUT_LEN provides the default output length,
/// 32 bytes, which is recommended for most callers.
///
/// Outputs shorter than the default length of 32 bytes (256 bits) provide
/// less security. An N-bit BLAKE3 output is intended to provide N bits of
/// first and second preimage resistance and N/2 bits of collision
/// resistance, for any N up to 256. Longer outputs don't provide any
/// additional security.
///
/// Shorter BLAKE3 outputs are prefixes of longer ones. Explicitly
/// requesting a short output is equivalent to truncating the default-length
/// output.
template <size_t NumBytes = LLVM_BLAKE3_OUT_LEN>
using BLAKE3Result = std::array<uint8_t, NumBytes>;

/// A class that wraps the BLAKE3 algorithm.
class BLAKE3 {
public:
  BLAKE3() { init(); }

  /// Reinitialize the internal state
  void init() { llvm_blake3_hasher_init(&Hasher); }

  /// Digest more data.
  void update(ArrayRef<uint8_t> Data) {
    llvm_blake3_hasher_update(&Hasher, Data.data(), Data.size());
  }

  /// Digest more data.
  void update(StringRef Str) {
    llvm_blake3_hasher_update(&Hasher, Str.data(), Str.size());
  }

  /// Finalize the hasher and put the result in \p Result.
  /// This doesn't modify the hasher itself, and it's possible to finalize again
  /// after adding more input.
  template <size_t NumBytes = LLVM_BLAKE3_OUT_LEN>
  void final(BLAKE3Result<NumBytes> &Result) {
    llvm_blake3_hasher_finalize(&Hasher, Result.data(), Result.size());
  }

  /// Finalize the hasher and return an output of any length, given in bytes.
  /// This doesn't modify the hasher itself, and it's possible to finalize again
  /// after adding more input.
  template <size_t NumBytes = LLVM_BLAKE3_OUT_LEN>
  BLAKE3Result<NumBytes> final() {
    BLAKE3Result<NumBytes> Result;
    llvm_blake3_hasher_finalize(&Hasher, Result.data(), Result.size());
    return Result;
  }

  /// Return the current output for the digested data since the last call to
  /// init().
  ///
  /// Other hash functions distinguish between \p result() and \p final(), with
  /// \p result() allowing more calls into \p update(), but there's no
  // difference for the BLAKE3 hash function.
  template <size_t NumBytes = LLVM_BLAKE3_OUT_LEN>
  BLAKE3Result<NumBytes> result() {
    return final<NumBytes>();
  }

  /// Returns a BLAKE3 hash for the given data.
  template <size_t NumBytes = LLVM_BLAKE3_OUT_LEN>
  static BLAKE3Result<NumBytes> hash(ArrayRef<uint8_t> Data) {
    BLAKE3 Hasher;
    Hasher.update(Data);
    return Hasher.final<NumBytes>();
  }

private:
  llvm_blake3_hasher Hasher;
};

/// Like \p BLAKE3 but using a class-level template parameter for specifying the
/// hash size of the \p final() and \p result() functions.
///
/// This is useful for using BLAKE3 as the hasher type for \p HashBuilder with
/// non-default hash sizes.
template <size_t NumBytes> class TruncatedBLAKE3 : public BLAKE3 {
public:
  /// Finalize the hasher and put the result in \p Result.
  /// This doesn't modify the hasher itself, and it's possible to finalize again
  /// after adding more input.
  void final(BLAKE3Result<NumBytes> &Result) { return BLAKE3::final(Result); }

  /// Finalize the hasher and return an output of any length, given in bytes.
  /// This doesn't modify the hasher itself, and it's possible to finalize again
  /// after adding more input.
  BLAKE3Result<NumBytes> final() { return BLAKE3::final<NumBytes>(); }

  /// Return the current output for the digested data since the last call to
  /// init().
  ///
  /// Other hash functions distinguish between \p result() and \p final(), with
  /// \p result() allowing more calls into \p update(), but there's no
  // difference for the BLAKE3 hash function.
  BLAKE3Result<NumBytes> result() { return BLAKE3::result<NumBytes>(); }
};

} // namespace llvm

#endif
PKhwFZ�s�>>Support/FormatVariadicDetails.hnu�[���//===- FormatVariadicDetails.h - Helpers for FormatVariadic.h ----*- C++-*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_FORMATVARIADICDETAILS_H
#define LLVM_SUPPORT_FORMATVARIADICDETAILS_H

#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/raw_ostream.h"

#include <type_traits>

namespace llvm {
template <typename T, typename Enable = void> struct format_provider {};
class Error;

namespace detail {
class format_adapter {
  virtual void anchor();

protected:
  virtual ~format_adapter() = default;

public:
  virtual void format(raw_ostream &S, StringRef Options) = 0;
};

template <typename T> class provider_format_adapter : public format_adapter {
  T Item;

public:
  explicit provider_format_adapter(T &&Item) : Item(std::forward<T>(Item)) {}

  void format(llvm::raw_ostream &S, StringRef Options) override {
    format_provider<std::decay_t<T>>::format(Item, S, Options);
  }
};

template <typename T>
class stream_operator_format_adapter : public format_adapter {
  T Item;

public:
  explicit stream_operator_format_adapter(T &&Item)
      : Item(std::forward<T>(Item)) {}

  void format(llvm::raw_ostream &S, StringRef) override { S << Item; }
};

template <typename T> class missing_format_adapter;

// Test if format_provider<T> is defined on T and contains a member function
// with the signature:
//   static void format(const T&, raw_stream &, StringRef);
//
template <class T> class has_FormatProvider {
public:
  using Decayed = std::decay_t<T>;
  typedef void (*Signature_format)(const Decayed &, llvm::raw_ostream &,
                                   StringRef);

  template <typename U>
  static char test(SameType<Signature_format, &U::format> *);

  template <typename U> static double test(...);

  static bool const value =
      (sizeof(test<llvm::format_provider<Decayed>>(nullptr)) == 1);
};

// Test if raw_ostream& << T -> raw_ostream& is findable via ADL.
template <class T> class has_StreamOperator {
public:
  using ConstRefT = const std::decay_t<T> &;

  template <typename U>
  static char test(std::enable_if_t<
                   std::is_same_v<decltype(std::declval<llvm::raw_ostream &>()
                                           << std::declval<U>()),
                                  llvm::raw_ostream &>,
                   int *>);

  template <typename U> static double test(...);

  static bool const value = (sizeof(test<ConstRefT>(nullptr)) == 1);
};

// Simple template that decides whether a type T should use the member-function
// based format() invocation.
template <typename T>
struct uses_format_member
    : public std::integral_constant<
          bool, std::is_base_of_v<format_adapter, std::remove_reference_t<T>>> {
};

// Simple template that decides whether a type T should use the format_provider
// based format() invocation.  The member function takes priority, so this test
// will only be true if there is not ALSO a format member.
template <typename T>
struct uses_format_provider
    : public std::integral_constant<
          bool, !uses_format_member<T>::value && has_FormatProvider<T>::value> {
};

// Simple template that decides whether a type T should use the operator<<
// based format() invocation.  This takes last priority.
template <typename T>
struct uses_stream_operator
    : public std::integral_constant<bool, !uses_format_member<T>::value &&
                                              !uses_format_provider<T>::value &&
                                              has_StreamOperator<T>::value> {};

// Simple template that decides whether a type T has neither a member-function
// nor format_provider based implementation that it can use.  Mostly used so
// that the compiler spits out a nice diagnostic when a type with no format
// implementation can be located.
template <typename T>
struct uses_missing_provider
    : public std::integral_constant<bool, !uses_format_member<T>::value &&
                                              !uses_format_provider<T>::value &&
                                              !uses_stream_operator<T>::value> {
};

template <typename T>
std::enable_if_t<uses_format_member<T>::value, T>
build_format_adapter(T &&Item) {
  return std::forward<T>(Item);
}

template <typename T>
std::enable_if_t<uses_format_provider<T>::value, provider_format_adapter<T>>
build_format_adapter(T &&Item) {
  return provider_format_adapter<T>(std::forward<T>(Item));
}

template <typename T>
std::enable_if_t<uses_stream_operator<T>::value,
                 stream_operator_format_adapter<T>>
build_format_adapter(T &&Item) {
  // If the caller passed an Error by value, then stream_operator_format_adapter
  // would be responsible for consuming it.
  // Make the caller opt into this by calling fmt_consume().
  static_assert(
      !std::is_same_v<llvm::Error, std::remove_cv_t<T>>,
      "llvm::Error-by-value must be wrapped in fmt_consume() for formatv");
  return stream_operator_format_adapter<T>(std::forward<T>(Item));
}

template <typename T>
std::enable_if_t<uses_missing_provider<T>::value, missing_format_adapter<T>>
build_format_adapter(T &&) {
  return missing_format_adapter<T>();
}
}
}

#endif
PKhwFZg}�]]Support/ARMEHABI.hnu�[���//===--- ARMEHABI.h - ARM Exception Handling ABI ----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the constants for the ARM unwind opcodes and exception
// handling table entry kinds.
//
// The enumerations and constants in this file reflect the ARM EHABI
// Specification as published by ARM.
//
// Exception Handling ABI for the ARM Architecture r2.09 - November 30, 2012
//
// http://infocenter.arm.com/help/topic/com.arm.doc.ihi0038a/IHI0038A_ehabi.pdf
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_ARMEHABI_H
#define LLVM_SUPPORT_ARMEHABI_H

namespace llvm {
namespace ARM {
namespace EHABI {
  /// ARM exception handling table entry kinds
  enum EHTEntryKind {
    EHT_GENERIC = 0x00,
    EHT_COMPACT = 0x80
  };

  enum {
    /// Special entry for the function never unwind
    EXIDX_CANTUNWIND = 0x1
  };

  /// ARM-defined frame unwinding opcodes
  enum UnwindOpcodes {
    // Format: 00xxxxxx
    // Purpose: vsp = vsp + ((x << 2) + 4)
    UNWIND_OPCODE_INC_VSP = 0x00,

    // Format: 01xxxxxx
    // Purpose: vsp = vsp - ((x << 2) + 4)
    UNWIND_OPCODE_DEC_VSP = 0x40,

    // Format: 10000000 00000000
    // Purpose: refuse to unwind
    UNWIND_OPCODE_REFUSE = 0x8000,

    // Format: 1000xxxx xxxxxxxx
    // Purpose: pop r[15:12], r[11:4]
    // Constraint: x != 0
    UNWIND_OPCODE_POP_REG_MASK_R4 = 0x8000,

    // Format: 1001xxxx
    // Purpose: vsp = r[x]
    // Constraint: x != 13 && x != 15
    UNWIND_OPCODE_SET_VSP = 0x90,

    // Format: 10100xxx
    // Purpose: pop r[(4+x):4]
    UNWIND_OPCODE_POP_REG_RANGE_R4 = 0xa0,

    // Format: 10101xxx
    // Purpose: pop r14, r[(4+x):4]
    UNWIND_OPCODE_POP_REG_RANGE_R4_R14 = 0xa8,

    // Format: 10110000
    // Purpose: finish
    UNWIND_OPCODE_FINISH = 0xb0,

    // Format: 10110100
    // Purpose: Pop Return Address Authetication Code
    UNWIND_OPCODE_POP_RA_AUTH_CODE = 0xb4,

    // Format: 10110001 0000xxxx
    // Purpose: pop r[3:0]
    // Constraint: x != 0
    UNWIND_OPCODE_POP_REG_MASK = 0xb100,

    // Format: 10110010 x(uleb128)
    // Purpose: vsp = vsp + ((x << 2) + 0x204)
    UNWIND_OPCODE_INC_VSP_ULEB128 = 0xb2,

    // Format: 10110011 xxxxyyyy
    // Purpose: pop d[(x+y):x]
    UNWIND_OPCODE_POP_VFP_REG_RANGE_FSTMFDX = 0xb300,

    // Format: 10111xxx
    // Purpose: pop d[(8+x):8]
    UNWIND_OPCODE_POP_VFP_REG_RANGE_FSTMFDX_D8 = 0xb8,

    // Format: 11000xxx
    // Purpose: pop wR[(10+x):10]
    UNWIND_OPCODE_POP_WIRELESS_MMX_REG_RANGE_WR10 = 0xc0,

    // Format: 11000110 xxxxyyyy
    // Purpose: pop wR[(x+y):x]
    UNWIND_OPCODE_POP_WIRELESS_MMX_REG_RANGE = 0xc600,

    // Format: 11000111 0000xxxx
    // Purpose: pop wCGR[3:0]
    // Constraint: x != 0
    UNWIND_OPCODE_POP_WIRELESS_MMX_REG_MASK = 0xc700,

    // Format: 11001000 xxxxyyyy
    // Purpose: pop d[(16+x+y):(16+x)]
    UNWIND_OPCODE_POP_VFP_REG_RANGE_FSTMFDD_D16 = 0xc800,

    // Format: 11001001 xxxxyyyy
    // Purpose: pop d[(x+y):x]
    UNWIND_OPCODE_POP_VFP_REG_RANGE_FSTMFDD = 0xc900,

    // Format: 11010xxx
    // Purpose: pop d[(8+x):8]
    UNWIND_OPCODE_POP_VFP_REG_RANGE_FSTMFDD_D8 = 0xd0
  };

  /// ARM-defined Personality Routine Index
  enum PersonalityRoutineIndex {
    // To make the exception handling table become more compact, ARM defined
    // several personality routines in EHABI.  There are 3 different
    // personality routines in ARM EHABI currently.  It is possible to have 16
    // pre-defined personality routines at most.
    AEABI_UNWIND_CPP_PR0 = 0,
    AEABI_UNWIND_CPP_PR1 = 1,
    AEABI_UNWIND_CPP_PR2 = 2,

    NUM_PERSONALITY_INDEX
  };
}
}
}

#endif
PKhwFZ����'�'Support/Parallel.hnu�[���//===- llvm/Support/Parallel.h - Parallel algorithms ----------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_PARALLEL_H
#define LLVM_SUPPORT_PARALLEL_H

#include "llvm/ADT/STLExtras.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/Threading.h"

#include <algorithm>
#include <condition_variable>
#include <functional>
#include <mutex>

namespace llvm {

namespace parallel {

// Strategy for the default executor used by the parallel routines provided by
// this file. It defaults to using all hardware threads and should be
// initialized before the first use of parallel routines.
extern ThreadPoolStrategy strategy;

#if LLVM_ENABLE_THREADS
#define GET_THREAD_INDEX_IMPL                                                  \
  if (parallel::strategy.ThreadsRequested == 1)                                \
    return 0;                                                                  \
  assert((threadIndex != UINT_MAX) &&                                          \
         "getThreadIndex() must be called from a thread created by "           \
         "ThreadPoolExecutor");                                                \
  return threadIndex;

#ifdef _WIN32
// Direct access to thread_local variables from a different DLL isn't
// possible with Windows Native TLS.
unsigned getThreadIndex();
#else
// Don't access this directly, use the getThreadIndex wrapper.
extern thread_local unsigned threadIndex;

inline unsigned getThreadIndex() { GET_THREAD_INDEX_IMPL; }
#endif

size_t getThreadCount();
#else
inline unsigned getThreadIndex() { return 0; }
inline size_t getThreadCount() { return 1; }
#endif

namespace detail {
class Latch {
  uint32_t Count;
  mutable std::mutex Mutex;
  mutable std::condition_variable Cond;

public:
  explicit Latch(uint32_t Count = 0) : Count(Count) {}
  ~Latch() {
    // Ensure at least that sync() was called.
    assert(Count == 0);
  }

  void inc() {
    std::lock_guard<std::mutex> lock(Mutex);
    ++Count;
  }

  void dec() {
    std::lock_guard<std::mutex> lock(Mutex);
    if (--Count == 0)
      Cond.notify_all();
  }

  void sync() const {
    std::unique_lock<std::mutex> lock(Mutex);
    Cond.wait(lock, [&] { return Count == 0; });
  }
};
} // namespace detail

class TaskGroup {
  detail::Latch L;
  bool Parallel;

public:
  TaskGroup();
  ~TaskGroup();

  // Spawn a task, but does not wait for it to finish.
  // Tasks marked with \p Sequential will be executed
  // exactly in the order which they were spawned.
  // Note: Sequential tasks may be executed on different
  // threads, but strictly in sequential order.
  void spawn(std::function<void()> f, bool Sequential = false);

  void sync() const { L.sync(); }

  bool isParallel() const { return Parallel; }
};

namespace detail {

#if LLVM_ENABLE_THREADS
const ptrdiff_t MinParallelSize = 1024;

/// Inclusive median.
template <class RandomAccessIterator, class Comparator>
RandomAccessIterator medianOf3(RandomAccessIterator Start,
                               RandomAccessIterator End,
                               const Comparator &Comp) {
  RandomAccessIterator Mid = Start + (std::distance(Start, End) / 2);
  return Comp(*Start, *(End - 1))
             ? (Comp(*Mid, *(End - 1)) ? (Comp(*Start, *Mid) ? Mid : Start)
                                       : End - 1)
             : (Comp(*Mid, *Start) ? (Comp(*(End - 1), *Mid) ? Mid : End - 1)
                                   : Start);
}

template <class RandomAccessIterator, class Comparator>
void parallel_quick_sort(RandomAccessIterator Start, RandomAccessIterator End,
                         const Comparator &Comp, TaskGroup &TG, size_t Depth) {
  // Do a sequential sort for small inputs.
  if (std::distance(Start, End) < detail::MinParallelSize || Depth == 0) {
    llvm::sort(Start, End, Comp);
    return;
  }

  // Partition.
  auto Pivot = medianOf3(Start, End, Comp);
  // Move Pivot to End.
  std::swap(*(End - 1), *Pivot);
  Pivot = std::partition(Start, End - 1, [&Comp, End](decltype(*Start) V) {
    return Comp(V, *(End - 1));
  });
  // Move Pivot to middle of partition.
  std::swap(*Pivot, *(End - 1));

  // Recurse.
  TG.spawn([=, &Comp, &TG] {
    parallel_quick_sort(Start, Pivot, Comp, TG, Depth - 1);
  });
  parallel_quick_sort(Pivot + 1, End, Comp, TG, Depth - 1);
}

template <class RandomAccessIterator, class Comparator>
void parallel_sort(RandomAccessIterator Start, RandomAccessIterator End,
                   const Comparator &Comp) {
  TaskGroup TG;
  parallel_quick_sort(Start, End, Comp, TG,
                      llvm::Log2_64(std::distance(Start, End)) + 1);
}

// TaskGroup has a relatively high overhead, so we want to reduce
// the number of spawn() calls. We'll create up to 1024 tasks here.
// (Note that 1024 is an arbitrary number. This code probably needs
// improving to take the number of available cores into account.)
enum { MaxTasksPerGroup = 1024 };

template <class IterTy, class ResultTy, class ReduceFuncTy,
          class TransformFuncTy>
ResultTy parallel_transform_reduce(IterTy Begin, IterTy End, ResultTy Init,
                                   ReduceFuncTy Reduce,
                                   TransformFuncTy Transform) {
  // Limit the number of tasks to MaxTasksPerGroup to limit job scheduling
  // overhead on large inputs.
  size_t NumInputs = std::distance(Begin, End);
  if (NumInputs == 0)
    return std::move(Init);
  size_t NumTasks = std::min(static_cast<size_t>(MaxTasksPerGroup), NumInputs);
  std::vector<ResultTy> Results(NumTasks, Init);
  {
    // Each task processes either TaskSize or TaskSize+1 inputs. Any inputs
    // remaining after dividing them equally amongst tasks are distributed as
    // one extra input over the first tasks.
    TaskGroup TG;
    size_t TaskSize = NumInputs / NumTasks;
    size_t RemainingInputs = NumInputs % NumTasks;
    IterTy TBegin = Begin;
    for (size_t TaskId = 0; TaskId < NumTasks; ++TaskId) {
      IterTy TEnd = TBegin + TaskSize + (TaskId < RemainingInputs ? 1 : 0);
      TG.spawn([=, &Transform, &Reduce, &Results] {
        // Reduce the result of transformation eagerly within each task.
        ResultTy R = Init;
        for (IterTy It = TBegin; It != TEnd; ++It)
          R = Reduce(R, Transform(*It));
        Results[TaskId] = R;
      });
      TBegin = TEnd;
    }
    assert(TBegin == End);
  }

  // Do a final reduction. There are at most 1024 tasks, so this only adds
  // constant single-threaded overhead for large inputs. Hopefully most
  // reductions are cheaper than the transformation.
  ResultTy FinalResult = std::move(Results.front());
  for (ResultTy &PartialResult :
       MutableArrayRef(Results.data() + 1, Results.size() - 1))
    FinalResult = Reduce(FinalResult, std::move(PartialResult));
  return std::move(FinalResult);
}

#endif

} // namespace detail
} // namespace parallel

template <class RandomAccessIterator,
          class Comparator = std::less<
              typename std::iterator_traits<RandomAccessIterator>::value_type>>
void parallelSort(RandomAccessIterator Start, RandomAccessIterator End,
                  const Comparator &Comp = Comparator()) {
#if LLVM_ENABLE_THREADS
  if (parallel::strategy.ThreadsRequested != 1) {
    parallel::detail::parallel_sort(Start, End, Comp);
    return;
  }
#endif
  llvm::sort(Start, End, Comp);
}

void parallelFor(size_t Begin, size_t End, function_ref<void(size_t)> Fn);

template <class IterTy, class FuncTy>
void parallelForEach(IterTy Begin, IterTy End, FuncTy Fn) {
  parallelFor(0, End - Begin, [&](size_t I) { Fn(Begin[I]); });
}

template <class IterTy, class ResultTy, class ReduceFuncTy,
          class TransformFuncTy>
ResultTy parallelTransformReduce(IterTy Begin, IterTy End, ResultTy Init,
                                 ReduceFuncTy Reduce,
                                 TransformFuncTy Transform) {
#if LLVM_ENABLE_THREADS
  if (parallel::strategy.ThreadsRequested != 1) {
    return parallel::detail::parallel_transform_reduce(Begin, End, Init, Reduce,
                                                       Transform);
  }
#endif
  for (IterTy I = Begin; I != End; ++I)
    Init = Reduce(std::move(Init), Transform(*I));
  return std::move(Init);
}

// Range wrappers.
template <class RangeTy,
          class Comparator = std::less<decltype(*std::begin(RangeTy()))>>
void parallelSort(RangeTy &&R, const Comparator &Comp = Comparator()) {
  parallelSort(std::begin(R), std::end(R), Comp);
}

template <class RangeTy, class FuncTy>
void parallelForEach(RangeTy &&R, FuncTy Fn) {
  parallelForEach(std::begin(R), std::end(R), Fn);
}

template <class RangeTy, class ResultTy, class ReduceFuncTy,
          class TransformFuncTy>
ResultTy parallelTransformReduce(RangeTy &&R, ResultTy Init,
                                 ReduceFuncTy Reduce,
                                 TransformFuncTy Transform) {
  return parallelTransformReduce(std::begin(R), std::end(R), Init, Reduce,
                                 Transform);
}

// Parallel for-each, but with error handling.
template <class RangeTy, class FuncTy>
Error parallelForEachError(RangeTy &&R, FuncTy Fn) {
  // The transform_reduce algorithm requires that the initial value be copyable.
  // Error objects are uncopyable. We only need to copy initial success values,
  // so work around this mismatch via the C API. The C API represents success
  // values with a null pointer. The joinErrors discards null values and joins
  // multiple errors into an ErrorList.
  return unwrap(parallelTransformReduce(
      std::begin(R), std::end(R), wrap(Error::success()),
      [](LLVMErrorRef Lhs, LLVMErrorRef Rhs) {
        return wrap(joinErrors(unwrap(Lhs), unwrap(Rhs)));
      },
      [&Fn](auto &&V) { return wrap(Fn(V)); }));
}

} // namespace llvm

#endif // LLVM_SUPPORT_PARALLEL_H
PKhwFZ}ƄZZSupport/ARMTargetParser.hnu�[���//===-- llvm/Support/ARMTargetParser.h --------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This header is deprecated in favour of
/// `llvm/TargetParser/ARMTargetParser.h`.
///
//===----------------------------------------------------------------------===//

#include "llvm/TargetParser/ARMTargetParser.h"
PKhwFZv�)���Support/Errno.hnu�[���//===- llvm/Support/Errno.h - Portable+convenient errno handling -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares some portable and convenient functions to deal with errno.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_ERRNO_H
#define LLVM_SUPPORT_ERRNO_H

#include <cerrno>
#include <string>

namespace llvm {
namespace sys {

/// Returns a string representation of the errno value, using whatever
/// thread-safe variant of strerror() is available.  Be sure to call this
/// immediately after the function that set errno, or errno may have been
/// overwritten by an intervening call.
std::string StrError();

/// Like the no-argument version above, but uses \p errnum instead of errno.
std::string StrError(int errnum);

template <typename FailT, typename Fun, typename... Args>
inline decltype(auto) RetryAfterSignal(const FailT &Fail, const Fun &F,
                                       const Args &... As) {
  decltype(F(As...)) Res;
  do {
    errno = 0;
    Res = F(As...);
  } while (Res == Fail && errno == EINTR);
  return Res;
}

}  // namespace sys
}  // namespace llvm

#endif // LLVM_SUPPORT_ERRNO_H
PKhwFZe��++Support/Memory.hnu�[���//===- llvm/Support/Memory.h - Memory Support -------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the llvm::sys::Memory class.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_MEMORY_H
#define LLVM_SUPPORT_MEMORY_H

#include "llvm/Support/DataTypes.h"
#include <system_error>

namespace llvm {

// Forward declare raw_ostream: it is used for debug dumping below.
class raw_ostream;

namespace sys {

  /// This class encapsulates the notion of a memory block which has an address
  /// and a size. It is used by the Memory class (a friend) as the result of
  /// various memory allocation operations.
  /// @see Memory
  /// Memory block abstraction.
  class MemoryBlock {
  public:
    MemoryBlock() : Address(nullptr), AllocatedSize(0) {}
    MemoryBlock(void *addr, size_t allocatedSize)
        : Address(addr), AllocatedSize(allocatedSize) {}
    void *base() const { return Address; }
    /// The size as it was allocated. This is always greater or equal to the
    /// size that was originally requested.
    size_t allocatedSize() const { return AllocatedSize; }

  private:
    void *Address;    ///< Address of first byte of memory area
    size_t AllocatedSize; ///< Size, in bytes of the memory area
    unsigned Flags = 0;
    friend class Memory;
  };

  /// This class provides various memory handling functions that manipulate
  /// MemoryBlock instances.
  /// @since 1.4
  /// An abstraction for memory operations.
  class Memory {
  public:
    enum ProtectionFlags {
      MF_READ = 0x1000000,
      MF_WRITE = 0x2000000,
      MF_EXEC = 0x4000000,
      MF_RWE_MASK = 0x7000000,

      /// The \p MF_HUGE_HINT flag is used to indicate that the request for
      /// a memory block should be satisfied with large pages if possible.
      /// This is only a hint and small pages will be used as fallback.
      ///
      /// The presence or absence of this flag in the returned memory block
      /// is (at least currently) *not* a reliable indicator that the memory
      /// block will use or will not use large pages. On some systems a request
      /// without this flag can be backed by large pages without this flag being
      /// set, and on some other systems a request with this flag can fallback
      /// to small pages without this flag being cleared.
      MF_HUGE_HINT = 0x0000001
    };

    /// This method allocates a block of memory that is suitable for loading
    /// dynamically generated code (e.g. JIT). An attempt to allocate
    /// \p NumBytes bytes of virtual memory is made.
    /// \p NearBlock may point to an existing allocation in which case
    /// an attempt is made to allocate more memory near the existing block.
    /// The actual allocated address is not guaranteed to be near the requested
    /// address.
    /// \p Flags is used to set the initial protection flags for the block
    /// of the memory.
    /// \p EC [out] returns an object describing any error that occurs.
    ///
    /// This method may allocate more than the number of bytes requested.  The
    /// actual number of bytes allocated is indicated in the returned
    /// MemoryBlock.
    ///
    /// The start of the allocated block must be aligned with the
    /// system allocation granularity (64K on Windows, page size on Linux).
    /// If the address following \p NearBlock is not so aligned, it will be
    /// rounded up to the next allocation granularity boundary.
    ///
    /// \r a non-null MemoryBlock if the function was successful,
    /// otherwise a null MemoryBlock is with \p EC describing the error.
    ///
    /// Allocate mapped memory.
    static MemoryBlock allocateMappedMemory(size_t NumBytes,
                                            const MemoryBlock *const NearBlock,
                                            unsigned Flags,
                                            std::error_code &EC);

    /// This method releases a block of memory that was allocated with the
    /// allocateMappedMemory method. It should not be used to release any
    /// memory block allocated any other way.
    /// \p Block describes the memory to be released.
    ///
    /// \r error_success if the function was successful, or an error_code
    /// describing the failure if an error occurred.
    ///
    /// Release mapped memory.
    static std::error_code releaseMappedMemory(MemoryBlock &Block);

    /// This method sets the protection flags for a block of memory to the
    /// state specified by /p Flags.  The behavior is not specified if the
    /// memory was not allocated using the allocateMappedMemory method.
    /// \p Block describes the memory block to be protected.
    /// \p Flags specifies the new protection state to be assigned to the block.
    ///
    /// If \p Flags is MF_WRITE, the actual behavior varies
    /// with the operating system (i.e. MF_READ | MF_WRITE on Windows) and the
    /// target architecture (i.e. MF_WRITE -> MF_READ | MF_WRITE on i386).
    ///
    /// \r error_success if the function was successful, or an error_code
    /// describing the failure if an error occurred.
    ///
    /// Set memory protection state.
    static std::error_code protectMappedMemory(const MemoryBlock &Block,
                                               unsigned Flags);

    /// InvalidateInstructionCache - Before the JIT can run a block of code
    /// that has been emitted it must invalidate the instruction cache on some
    /// platforms.
    static void InvalidateInstructionCache(const void *Addr, size_t Len);
  };

  /// Owning version of MemoryBlock.
  class OwningMemoryBlock {
  public:
    OwningMemoryBlock() = default;
    explicit OwningMemoryBlock(MemoryBlock M) : M(M) {}
    OwningMemoryBlock(OwningMemoryBlock &&Other) {
      M = Other.M;
      Other.M = MemoryBlock();
    }
    OwningMemoryBlock& operator=(OwningMemoryBlock &&Other) {
      M = Other.M;
      Other.M = MemoryBlock();
      return *this;
    }
    ~OwningMemoryBlock() {
      if (M.base())
        Memory::releaseMappedMemory(M);
    }
    void *base() const { return M.base(); }
    /// The size as it was allocated. This is always greater or equal to the
    /// size that was originally requested.
    size_t allocatedSize() const { return M.allocatedSize(); }
    MemoryBlock getMemoryBlock() const { return M; }
    std::error_code release() {
      std::error_code EC;
      if (M.base()) {
        EC = Memory::releaseMappedMemory(M);
        M = MemoryBlock();
      }
      return EC;
    }
  private:
    MemoryBlock M;
  };

#ifndef NDEBUG
  /// Debugging output for Memory::ProtectionFlags.
  raw_ostream &operator<<(raw_ostream &OS, const Memory::ProtectionFlags &PF);

  /// Debugging output for MemoryBlock.
  raw_ostream &operator<<(raw_ostream &OS, const MemoryBlock &MB);
#endif // ifndef NDEBUG
  }    // end namespace sys
  }    // end namespace llvm

#endif
PKhwFZ�\3�}
}
Support/MSVCErrorWorkarounds.hnu�[���//===--- MSVCErrorWorkarounds.h - Enable future<Error> in MSVC --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// MSVC's promise/future implementation requires types to be default
// constructible, so this header provides analogues of Error an Expected
// that are default constructed in a safely destructible state.
//
// FIXME: Kill off this header and migrate all users to Error/Expected once we
//        move to MSVC versions that support non-default-constructible types.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_MSVCERRORWORKAROUNDS_H
#define LLVM_SUPPORT_MSVCERRORWORKAROUNDS_H

#include "llvm/Support/Error.h"

namespace llvm {

// A default-constructible llvm::Error that is suitable for use with MSVC's
// std::future implementation which requires default constructible types.
class MSVCPError : public Error {
public:
  MSVCPError() { (void)!!*this; }

  MSVCPError(MSVCPError &&Other) : Error(std::move(Other)) {}

  MSVCPError &operator=(MSVCPError Other) {
    Error::operator=(std::move(Other));
    return *this;
  }

  MSVCPError(Error Err) : Error(std::move(Err)) {}
};

// A default-constructible llvm::Expected that is suitable for use with MSVC's
// std::future implementation, which requires default constructible types.
template <typename T> class MSVCPExpected : public Expected<T> {
public:
  MSVCPExpected()
      : Expected<T>(make_error<StringError>("", inconvertibleErrorCode())) {
    consumeError(this->takeError());
  }

  MSVCPExpected(MSVCPExpected &&Other) : Expected<T>(std::move(Other)) {}

  MSVCPExpected &operator=(MSVCPExpected &&Other) {
    Expected<T>::operator=(std::move(Other));
    return *this;
  }

  MSVCPExpected(Error Err) : Expected<T>(std::move(Err)) {}

  template <typename OtherT>
  MSVCPExpected(
      OtherT &&Val,
      std::enable_if_t<std::is_convertible<OtherT, T>::value> * = nullptr)
      : Expected<T>(std::move(Val)) {}

  template <class OtherT>
  MSVCPExpected(
      Expected<OtherT> &&Other,
      std::enable_if_t<std::is_convertible<OtherT, T>::value> * = nullptr)
      : Expected<T>(std::move(Other)) {}

  template <class OtherT>
  explicit MSVCPExpected(
      Expected<OtherT> &&Other,
      std::enable_if_t<!std::is_convertible<OtherT, T>::value> * = nullptr)
      : Expected<T>(std::move(Other)) {}
};

} // end namespace llvm

#endif // LLVM_SUPPORT_MSVCERRORWORKAROUNDS_H
PKhwFZ�'��
�

Support/MD5.hnu�[���/* -*- C++ -*-
 * This code is derived from (original license follows):
 *
 * This is an OpenSSL-compatible implementation of the RSA Data Security, Inc.
 * MD5 Message-Digest Algorithm (RFC 1321).
 *
 * Homepage:
 * http://openwall.info/wiki/people/solar/software/public-domain-source-code/md5
 *
 * Author:
 * Alexander Peslyak, better known as Solar Designer <solar at openwall.com>
 *
 * This software was written by Alexander Peslyak in 2001.  No copyright is
 * claimed, and the software is hereby placed in the public domain.
 * In case this attempt to disclaim copyright and place the software in the
 * public domain is deemed null and void, then the software is
 * Copyright (c) 2001 Alexander Peslyak and it is hereby released to the
 * general public under the following terms:
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted.
 *
 * There's ABSOLUTELY NO WARRANTY, express or implied.
 *
 * See md5.c for more information.
 */

#ifndef LLVM_SUPPORT_MD5_H
#define LLVM_SUPPORT_MD5_H

#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Endian.h"
#include <array>
#include <cstdint>

namespace llvm {

template <unsigned N> class SmallString;
template <typename T> class ArrayRef;

class MD5 {
public:
  struct MD5Result : public std::array<uint8_t, 16> {
    SmallString<32> digest() const;

    uint64_t low() const {
      // Our MD5 implementation returns the result in little endian, so the low
      // word is first.
      using namespace support;
      return endian::read<uint64_t, little, unaligned>(data());
    }

    uint64_t high() const {
      using namespace support;
      return endian::read<uint64_t, little, unaligned>(data() + 8);
    }
    std::pair<uint64_t, uint64_t> words() const {
      using namespace support;
      return std::make_pair(high(), low());
    }
  };

  MD5();

  /// Updates the hash for the byte stream provided.
  void update(ArrayRef<uint8_t> Data);

  /// Updates the hash for the StringRef provided.
  void update(StringRef Str);

  /// Finishes off the hash and puts the result in result.
  void final(MD5Result &Result);

  /// Finishes off the hash, and returns the 16-byte hash data.
  MD5Result final();

  /// Finishes off the hash, and returns the 16-byte hash data.
  /// This is suitable for getting the MD5 at any time without invalidating the
  /// internal state, so that more calls can be made into `update`.
  MD5Result result();

  /// Translates the bytes in \p Res to a hex string that is
  /// deposited into \p Str. The result will be of length 32.
  static void stringifyResult(MD5Result &Result, SmallVectorImpl<char> &Str);

  /// Computes the hash for a given bytes.
  static MD5Result hash(ArrayRef<uint8_t> Data);

private:
  // Any 32-bit or wider unsigned integer data type will do.
  typedef uint32_t MD5_u32plus;

  // Internal State
  struct {
    MD5_u32plus a = 0x67452301;
    MD5_u32plus b = 0xefcdab89;
    MD5_u32plus c = 0x98badcfe;
    MD5_u32plus d = 0x10325476;
    MD5_u32plus hi = 0;
    MD5_u32plus lo = 0;
    uint8_t buffer[64];
    MD5_u32plus block[16];
  } InternalState;

  const uint8_t *body(ArrayRef<uint8_t> Data);
};

/// Helper to compute and return lower 64 bits of the given string's MD5 hash.
inline uint64_t MD5Hash(StringRef Str) {
  using namespace support;

  MD5 Hash;
  Hash.update(Str);
  MD5::MD5Result Result;
  Hash.final(Result);
  // Return the least significant word.
  return Result.low();
}

} // end namespace llvm

#endif // LLVM_SUPPORT_MD5_H
PKhwFZ]�T2m=m=Support/TypeSize.hnu�[���//===- TypeSize.h - Wrapper around type sizes -------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file provides a struct that can be used to query the size of IR types
// which may be scalable vectors. It provides convenience operators so that
// it can be used in much the same way as a single scalar value.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_TYPESIZE_H
#define LLVM_SUPPORT_TYPESIZE_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"

#include <algorithm>
#include <array>
#include <cassert>
#include <cstdint>
#include <type_traits>

namespace llvm {

/// Reports a diagnostic message to indicate an invalid size request has been
/// done on a scalable vector. This function may not return.
void reportInvalidSizeRequest(const char *Msg);

/// StackOffset holds a fixed and a scalable offset in bytes.
class StackOffset {
  int64_t Fixed = 0;
  int64_t Scalable = 0;

  StackOffset(int64_t Fixed, int64_t Scalable)
      : Fixed(Fixed), Scalable(Scalable) {}

public:
  StackOffset() = default;
  static StackOffset getFixed(int64_t Fixed) { return {Fixed, 0}; }
  static StackOffset getScalable(int64_t Scalable) { return {0, Scalable}; }
  static StackOffset get(int64_t Fixed, int64_t Scalable) {
    return {Fixed, Scalable};
  }

  /// Returns the fixed component of the stack.
  int64_t getFixed() const { return Fixed; }

  /// Returns the scalable component of the stack.
  int64_t getScalable() const { return Scalable; }

  // Arithmetic operations.
  StackOffset operator+(const StackOffset &RHS) const {
    return {Fixed + RHS.Fixed, Scalable + RHS.Scalable};
  }
  StackOffset operator-(const StackOffset &RHS) const {
    return {Fixed - RHS.Fixed, Scalable - RHS.Scalable};
  }
  StackOffset &operator+=(const StackOffset &RHS) {
    Fixed += RHS.Fixed;
    Scalable += RHS.Scalable;
    return *this;
  }
  StackOffset &operator-=(const StackOffset &RHS) {
    Fixed -= RHS.Fixed;
    Scalable -= RHS.Scalable;
    return *this;
  }
  StackOffset operator-() const { return {-Fixed, -Scalable}; }

  // Equality comparisons.
  bool operator==(const StackOffset &RHS) const {
    return Fixed == RHS.Fixed && Scalable == RHS.Scalable;
  }
  bool operator!=(const StackOffset &RHS) const {
    return Fixed != RHS.Fixed || Scalable != RHS.Scalable;
  }

  // The bool operator returns true iff any of the components is non zero.
  explicit operator bool() const { return Fixed != 0 || Scalable != 0; }
};

namespace details {

// Base class for ElementCount and TypeSize below.
template <typename LeafTy, typename ValueTy> class FixedOrScalableQuantity {
public:
  using ScalarTy = ValueTy;

protected:
  ScalarTy Quantity = 0;
  bool Scalable = false;

  constexpr FixedOrScalableQuantity() = default;
  constexpr FixedOrScalableQuantity(ScalarTy Quantity, bool Scalable)
      : Quantity(Quantity), Scalable(Scalable) {}

  friend constexpr LeafTy &operator+=(LeafTy &LHS, const LeafTy &RHS) {
    assert(LHS.Scalable == RHS.Scalable && "Incompatible types");
    LHS.Quantity += RHS.Quantity;
    return LHS;
  }

  friend constexpr LeafTy &operator-=(LeafTy &LHS, const LeafTy &RHS) {
    assert(LHS.Scalable == RHS.Scalable && "Incompatible types");
    LHS.Quantity -= RHS.Quantity;
    return LHS;
  }

  friend constexpr LeafTy &operator*=(LeafTy &LHS, ScalarTy RHS) {
    LHS.Quantity *= RHS;
    return LHS;
  }

  friend constexpr LeafTy operator+(const LeafTy &LHS, const LeafTy &RHS) {
    LeafTy Copy = LHS;
    return Copy += RHS;
  }

  friend constexpr LeafTy operator-(const LeafTy &LHS, const LeafTy &RHS) {
    LeafTy Copy = LHS;
    return Copy -= RHS;
  }

  friend constexpr LeafTy operator*(const LeafTy &LHS, ScalarTy RHS) {
    LeafTy Copy = LHS;
    return Copy *= RHS;
  }

  template <typename U = ScalarTy>
  friend constexpr std::enable_if_t<std::is_signed_v<U>, LeafTy>
  operator-(const LeafTy &LHS) {
    LeafTy Copy = LHS;
    return Copy *= -1;
  }

public:
  constexpr bool operator==(const FixedOrScalableQuantity &RHS) const {
    return Quantity == RHS.Quantity && Scalable == RHS.Scalable;
  }

  constexpr bool operator!=(const FixedOrScalableQuantity &RHS) const {
    return Quantity != RHS.Quantity || Scalable != RHS.Scalable;
  }

  constexpr bool isZero() const { return Quantity == 0; }

  constexpr bool isNonZero() const { return Quantity != 0; }

  explicit operator bool() const { return isNonZero(); }

  /// Add \p RHS to the underlying quantity.
  constexpr LeafTy getWithIncrement(ScalarTy RHS) const {
    return LeafTy::get(Quantity + RHS, Scalable);
  }

  /// Returns the minimum value this quantity can represent.
  constexpr ScalarTy getKnownMinValue() const { return Quantity; }

  /// Returns whether the quantity is scaled by a runtime quantity (vscale).
  constexpr bool isScalable() const { return Scalable; }

  /// A return value of true indicates we know at compile time that the number
  /// of elements (vscale * Min) is definitely even. However, returning false
  /// does not guarantee that the total number of elements is odd.
  constexpr bool isKnownEven() const { return (getKnownMinValue() & 0x1) == 0; }

  /// This function tells the caller whether the element count is known at
  /// compile time to be a multiple of the scalar value RHS.
  constexpr bool isKnownMultipleOf(ScalarTy RHS) const {
    return getKnownMinValue() % RHS == 0;
  }

  // Return the minimum value with the assumption that the count is exact.
  // Use in places where a scalable count doesn't make sense (e.g. non-vector
  // types, or vectors in backends which don't support scalable vectors).
  constexpr ScalarTy getFixedValue() const {
    assert(!isScalable() &&
           "Request for a fixed element count on a scalable object");
    return getKnownMinValue();
  }

  // For some cases, quantity ordering between scalable and fixed quantity types
  // cannot be determined at compile time, so such comparisons aren't allowed.
  //
  // e.g. <vscale x 2 x i16> could be bigger than <4 x i32> with a runtime
  // vscale >= 5, equal sized with a vscale of 4, and smaller with
  // a vscale <= 3.
  //
  // All the functions below make use of the fact vscale is always >= 1, which
  // means that <vscale x 4 x i32> is guaranteed to be >= <4 x i32>, etc.

  static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS,
                                  const FixedOrScalableQuantity &RHS) {
    if (!LHS.isScalable() || RHS.isScalable())
      return LHS.getKnownMinValue() < RHS.getKnownMinValue();
    return false;
  }

  static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS,
                                  const FixedOrScalableQuantity &RHS) {
    if (LHS.isScalable() || !RHS.isScalable())
      return LHS.getKnownMinValue() > RHS.getKnownMinValue();
    return false;
  }

  static constexpr bool isKnownLE(const FixedOrScalableQuantity &LHS,
                                  const FixedOrScalableQuantity &RHS) {
    if (!LHS.isScalable() || RHS.isScalable())
      return LHS.getKnownMinValue() <= RHS.getKnownMinValue();
    return false;
  }

  static constexpr bool isKnownGE(const FixedOrScalableQuantity &LHS,
                                  const FixedOrScalableQuantity &RHS) {
    if (LHS.isScalable() || !RHS.isScalable())
      return LHS.getKnownMinValue() >= RHS.getKnownMinValue();
    return false;
  }

  /// We do not provide the '/' operator here because division for polynomial
  /// types does not work in the same way as for normal integer types. We can
  /// only divide the minimum value (or coefficient) by RHS, which is not the
  /// same as
  ///   (Min * Vscale) / RHS
  /// The caller is recommended to use this function in combination with
  /// isKnownMultipleOf(RHS), which lets the caller know if it's possible to
  /// perform a lossless divide by RHS.
  constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const {
    return LeafTy::get(getKnownMinValue() / RHS, isScalable());
  }

  constexpr LeafTy multiplyCoefficientBy(ScalarTy RHS) const {
    return LeafTy::get(getKnownMinValue() * RHS, isScalable());
  }

  constexpr LeafTy coefficientNextPowerOf2() const {
    return LeafTy::get(
        static_cast<ScalarTy>(llvm::NextPowerOf2(getKnownMinValue())),
        isScalable());
  }

  /// Returns true if there exists a value X where RHS.multiplyCoefficientBy(X)
  /// will result in a value whose quantity matches our own.
  constexpr bool
  hasKnownScalarFactor(const FixedOrScalableQuantity &RHS) const {
    return isScalable() == RHS.isScalable() &&
           getKnownMinValue() % RHS.getKnownMinValue() == 0;
  }

  /// Returns a value X where RHS.multiplyCoefficientBy(X) will result in a
  /// value whose quantity matches our own.
  constexpr ScalarTy
  getKnownScalarFactor(const FixedOrScalableQuantity &RHS) const {
    assert(hasKnownScalarFactor(RHS) && "Expected RHS to be a known factor!");
    return getKnownMinValue() / RHS.getKnownMinValue();
  }

  /// Printing function.
  void print(raw_ostream &OS) const {
    if (isScalable())
      OS << "vscale x ";
    OS << getKnownMinValue();
  }
};

} // namespace details

// Stores the number of elements for a type and whether this type is fixed
// (N-Elements) or scalable (e.g., SVE).
//  - ElementCount::getFixed(1) : A scalar value.
//  - ElementCount::getFixed(2) : A vector type holding 2 values.
//  - ElementCount::getScalable(4) : A scalable vector type holding 4 values.
class ElementCount
    : public details::FixedOrScalableQuantity<ElementCount, unsigned> {
  constexpr ElementCount(ScalarTy MinVal, bool Scalable)
      : FixedOrScalableQuantity(MinVal, Scalable) {}

  constexpr ElementCount(
      const FixedOrScalableQuantity<ElementCount, unsigned> &V)
      : FixedOrScalableQuantity(V) {}

public:
  constexpr ElementCount() : FixedOrScalableQuantity() {}

  static constexpr ElementCount getFixed(ScalarTy MinVal) {
    return ElementCount(MinVal, false);
  }
  static constexpr ElementCount getScalable(ScalarTy MinVal) {
    return ElementCount(MinVal, true);
  }
  static constexpr ElementCount get(ScalarTy MinVal, bool Scalable) {
    return ElementCount(MinVal, Scalable);
  }

  /// Exactly one element.
  constexpr bool isScalar() const {
    return !isScalable() && getKnownMinValue() == 1;
  }
  /// One or more elements.
  constexpr bool isVector() const {
    return (isScalable() && getKnownMinValue() != 0) || getKnownMinValue() > 1;
  }
};

// Stores the size of a type. If the type is of fixed size, it will represent
// the exact size. If the type is a scalable vector, it will represent the known
// minimum size.
class TypeSize : public details::FixedOrScalableQuantity<TypeSize, uint64_t> {
  TypeSize(const FixedOrScalableQuantity<TypeSize, uint64_t> &V)
      : FixedOrScalableQuantity(V) {}

public:
  constexpr TypeSize(ScalarTy Quantity, bool Scalable)
      : FixedOrScalableQuantity(Quantity, Scalable) {}

  static constexpr TypeSize getFixed(ScalarTy ExactSize) {
    return TypeSize(ExactSize, false);
  }
  static constexpr TypeSize getScalable(ScalarTy MinimumSize) {
    return TypeSize(MinimumSize, true);
  }
  static constexpr TypeSize get(ScalarTy Quantity, bool Scalable) {
    return TypeSize(Quantity, Scalable);
  }
  static constexpr TypeSize Fixed(ScalarTy ExactSize) {
    return TypeSize(ExactSize, false);
  }
  static constexpr TypeSize Scalable(ScalarTy MinimumSize) {
    return TypeSize(MinimumSize, true);
  }

  LLVM_DEPRECATED("Use getFixedValue() instead", "getFixedValue")
  constexpr ScalarTy getFixedSize() const { return getFixedValue(); }
  
  LLVM_DEPRECATED("Use getKnownMinValue() instead", "getKnownMinValue")
  constexpr ScalarTy getKnownMinSize() const { return getKnownMinValue(); }

  // All code for this class below this point is needed because of the
  // temporary implicit conversion to uint64_t. The operator overloads are
  // needed because otherwise the conversion of the parent class
  // UnivariateLinearPolyBase -> TypeSize is ambiguous.
  // TODO: Remove the implicit conversion.

  // Casts to a uint64_t if this is a fixed-width size.
  //
  // This interface is deprecated and will be removed in a future version
  // of LLVM in favour of upgrading uses that rely on this implicit conversion
  // to uint64_t. Calls to functions that return a TypeSize should use the
  // proper interfaces to TypeSize.
  // In practice this is mostly calls to MVT/EVT::getSizeInBits().
  //
  // To determine how to upgrade the code:
  //
  //   if (<algorithm works for both scalable and fixed-width vectors>)
  //     use getKnownMinValue()
  //   else if (<algorithm works only for fixed-width vectors>) {
  //     if <algorithm can be adapted for both scalable and fixed-width vectors>
  //       update the algorithm and use getKnownMinValue()
  //     else
  //       bail out early for scalable vectors and use getFixedValue()
  //   }
  operator ScalarTy() const;

  // Additional operators needed to avoid ambiguous parses
  // because of the implicit conversion hack.
  friend constexpr TypeSize operator*(const TypeSize &LHS, const int RHS) {
    return LHS * (ScalarTy)RHS;
  }
  friend constexpr TypeSize operator*(const TypeSize &LHS, const unsigned RHS) {
    return LHS * (ScalarTy)RHS;
  }
  friend constexpr TypeSize operator*(const TypeSize &LHS, const int64_t RHS) {
    return LHS * (ScalarTy)RHS;
  }
  friend constexpr TypeSize operator*(const int LHS, const TypeSize &RHS) {
    return RHS * LHS;
  }
  friend constexpr TypeSize operator*(const unsigned LHS, const TypeSize &RHS) {
    return RHS * LHS;
  }
  friend constexpr TypeSize operator*(const int64_t LHS, const TypeSize &RHS) {
    return RHS * LHS;
  }
  friend constexpr TypeSize operator*(const uint64_t LHS, const TypeSize &RHS) {
    return RHS * LHS;
  }
};

//===----------------------------------------------------------------------===//
// Utilities
//===----------------------------------------------------------------------===//

/// Returns a TypeSize with a known minimum size that is the next integer
/// (mod 2**64) that is greater than or equal to \p Quantity and is a multiple
/// of \p Align. \p Align must be non-zero.
///
/// Similar to the alignTo functions in MathExtras.h
inline constexpr TypeSize alignTo(TypeSize Size, uint64_t Align) {
  assert(Align != 0u && "Align must be non-zero");
  return {(Size.getKnownMinValue() + Align - 1) / Align * Align,
          Size.isScalable()};
}

/// Stream operator function for `FixedOrScalableQuantity`.
template <typename LeafTy, typename ScalarTy>
inline raw_ostream &
operator<<(raw_ostream &OS,
           const details::FixedOrScalableQuantity<LeafTy, ScalarTy> &PS) {
  PS.print(OS);
  return OS;
}

template <> struct DenseMapInfo<ElementCount, void> {
  static inline ElementCount getEmptyKey() {
    return ElementCount::getScalable(~0U);
  }
  static inline ElementCount getTombstoneKey() {
    return ElementCount::getFixed(~0U - 1);
  }
  static unsigned getHashValue(const ElementCount &EltCnt) {
    unsigned HashVal = EltCnt.getKnownMinValue() * 37U;
    if (EltCnt.isScalable())
      return (HashVal - 1U);

    return HashVal;
  }
  static bool isEqual(const ElementCount &LHS, const ElementCount &RHS) {
    return LHS == RHS;
  }
};

} // end namespace llvm

#endif // LLVM_SUPPORT_TYPESIZE_H
PKhwFZ�֊��r�rSupport/GenericLoopInfoImpl.hnu�[���//===- GenericLoopInfoImp.h - Generic Loop Info Implementation --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This fle contains the implementation of GenericLoopInfo. It should only be
// included in files that explicitly instantiate a GenericLoopInfo.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_GENERICLOOPINFOIMPL_H
#define LLVM_SUPPORT_GENERICLOOPINFOIMPL_H

#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetOperations.h"
#include "llvm/Support/GenericLoopInfo.h"

namespace llvm {

//===----------------------------------------------------------------------===//
// APIs for simple analysis of the loop. See header notes.

/// getExitingBlocks - Return all blocks inside the loop that have successors
/// outside of the loop.  These are the blocks _inside of the current loop_
/// which branch out.  The returned list is always unique.
///
template <class BlockT, class LoopT>
void LoopBase<BlockT, LoopT>::getExitingBlocks(
    SmallVectorImpl<BlockT *> &ExitingBlocks) const {
  assert(!isInvalid() && "Loop not in a valid state!");
  for (const auto BB : blocks())
    for (auto *Succ : children<BlockT *>(BB))
      if (!contains(Succ)) {
        // Not in current loop? It must be an exit block.
        ExitingBlocks.push_back(BB);
        break;
      }
}

/// getExitingBlock - If getExitingBlocks would return exactly one block,
/// return that block. Otherwise return null.
template <class BlockT, class LoopT>
BlockT *LoopBase<BlockT, LoopT>::getExitingBlock() const {
  assert(!isInvalid() && "Loop not in a valid state!");
  auto notInLoop = [&](BlockT *BB) { return !contains(BB); };
  auto isExitBlock = [&](BlockT *BB, bool AllowRepeats) -> BlockT * {
    assert(!AllowRepeats && "Unexpected parameter value.");
    // Child not in current loop?  It must be an exit block.
    return any_of(children<BlockT *>(BB), notInLoop) ? BB : nullptr;
  };

  return find_singleton<BlockT>(blocks(), isExitBlock);
}

/// getExitBlocks - Return all of the successor blocks of this loop.  These
/// are the blocks _outside of the current loop_ which are branched to.
///
template <class BlockT, class LoopT>
void LoopBase<BlockT, LoopT>::getExitBlocks(
    SmallVectorImpl<BlockT *> &ExitBlocks) const {
  assert(!isInvalid() && "Loop not in a valid state!");
  for (const auto BB : blocks())
    for (auto *Succ : children<BlockT *>(BB))
      if (!contains(Succ))
        // Not in current loop? It must be an exit block.
        ExitBlocks.push_back(Succ);
}

/// getExitBlock - If getExitBlocks would return exactly one block,
/// return that block. Otherwise return null.
template <class BlockT, class LoopT>
std::pair<BlockT *, bool> getExitBlockHelper(const LoopBase<BlockT, LoopT> *L,
                                             bool Unique) {
  assert(!L->isInvalid() && "Loop not in a valid state!");
  auto notInLoop = [&](BlockT *BB,
                       bool AllowRepeats) -> std::pair<BlockT *, bool> {
    assert(AllowRepeats == Unique && "Unexpected parameter value.");
    return {!L->contains(BB) ? BB : nullptr, false};
  };
  auto singleExitBlock = [&](BlockT *BB,
                             bool AllowRepeats) -> std::pair<BlockT *, bool> {
    assert(AllowRepeats == Unique && "Unexpected parameter value.");
    return find_singleton_nested<BlockT>(children<BlockT *>(BB), notInLoop,
                                         AllowRepeats);
  };
  return find_singleton_nested<BlockT>(L->blocks(), singleExitBlock, Unique);
}

template <class BlockT, class LoopT>
bool LoopBase<BlockT, LoopT>::hasNoExitBlocks() const {
  auto RC = getExitBlockHelper(this, false);
  if (RC.second)
    // found multiple exit blocks
    return false;
  // return true if there is no exit block
  return !RC.first;
}

/// getExitBlock - If getExitBlocks would return exactly one block,
/// return that block. Otherwise return null.
template <class BlockT, class LoopT>
BlockT *LoopBase<BlockT, LoopT>::getExitBlock() const {
  return getExitBlockHelper(this, false).first;
}

template <class BlockT, class LoopT>
bool LoopBase<BlockT, LoopT>::hasDedicatedExits() const {
  // Each predecessor of each exit block of a normal loop is contained
  // within the loop.
  SmallVector<BlockT *, 4> UniqueExitBlocks;
  getUniqueExitBlocks(UniqueExitBlocks);
  for (BlockT *EB : UniqueExitBlocks)
    for (BlockT *Predecessor : children<Inverse<BlockT *>>(EB))
      if (!contains(Predecessor))
        return false;
  // All the requirements are met.
  return true;
}

// Helper function to get unique loop exits. Pred is a predicate pointing to
// BasicBlocks in a loop which should be considered to find loop exits.
template <class BlockT, class LoopT, typename PredicateT>
void getUniqueExitBlocksHelper(const LoopT *L,
                               SmallVectorImpl<BlockT *> &ExitBlocks,
                               PredicateT Pred) {
  assert(!L->isInvalid() && "Loop not in a valid state!");
  SmallPtrSet<BlockT *, 32> Visited;
  auto Filtered = make_filter_range(L->blocks(), Pred);
  for (BlockT *BB : Filtered)
    for (BlockT *Successor : children<BlockT *>(BB))
      if (!L->contains(Successor))
        if (Visited.insert(Successor).second)
          ExitBlocks.push_back(Successor);
}

template <class BlockT, class LoopT>
void LoopBase<BlockT, LoopT>::getUniqueExitBlocks(
    SmallVectorImpl<BlockT *> &ExitBlocks) const {
  getUniqueExitBlocksHelper(this, ExitBlocks,
                            [](const BlockT *BB) { return true; });
}

template <class BlockT, class LoopT>
void LoopBase<BlockT, LoopT>::getUniqueNonLatchExitBlocks(
    SmallVectorImpl<BlockT *> &ExitBlocks) const {
  const BlockT *Latch = getLoopLatch();
  assert(Latch && "Latch block must exists");
  getUniqueExitBlocksHelper(this, ExitBlocks,
                            [Latch](const BlockT *BB) { return BB != Latch; });
}

template <class BlockT, class LoopT>
BlockT *LoopBase<BlockT, LoopT>::getUniqueExitBlock() const {
  return getExitBlockHelper(this, true).first;
}

/// getExitEdges - Return all pairs of (_inside_block_,_outside_block_).
template <class BlockT, class LoopT>
void LoopBase<BlockT, LoopT>::getExitEdges(
    SmallVectorImpl<Edge> &ExitEdges) const {
  assert(!isInvalid() && "Loop not in a valid state!");
  for (const auto BB : blocks())
    for (auto *Succ : children<BlockT *>(BB))
      if (!contains(Succ))
        // Not in current loop? It must be an exit block.
        ExitEdges.emplace_back(BB, Succ);
}

namespace detail {
template <class BlockT>
using has_hoist_check = decltype(&BlockT::isLegalToHoistInto);

template <class BlockT>
using detect_has_hoist_check = llvm::is_detected<has_hoist_check, BlockT>;

/// SFINAE functions that dispatch to the isLegalToHoistInto member function or
/// return false, if it doesn't exist.
template <class BlockT> bool isLegalToHoistInto(BlockT *Block) {
  if constexpr (detect_has_hoist_check<BlockT>::value)
    return Block->isLegalToHoistInto();
  return false;
}
} // namespace detail

/// getLoopPreheader - If there is a preheader for this loop, return it.  A
/// loop has a preheader if there is only one edge to the header of the loop
/// from outside of the loop and it is legal to hoist instructions into the
/// predecessor. If this is the case, the block branching to the header of the
/// loop is the preheader node.
///
/// This method returns null if there is no preheader for the loop.
///
template <class BlockT, class LoopT>
BlockT *LoopBase<BlockT, LoopT>::getLoopPreheader() const {
  assert(!isInvalid() && "Loop not in a valid state!");
  // Keep track of nodes outside the loop branching to the header...
  BlockT *Out = getLoopPredecessor();
  if (!Out)
    return nullptr;

  // Make sure we are allowed to hoist instructions into the predecessor.
  if (!detail::isLegalToHoistInto(Out))
    return nullptr;

  // Make sure there is only one exit out of the preheader.
  typedef GraphTraits<BlockT *> BlockTraits;
  typename BlockTraits::ChildIteratorType SI = BlockTraits::child_begin(Out);
  ++SI;
  if (SI != BlockTraits::child_end(Out))
    return nullptr; // Multiple exits from the block, must not be a preheader.

  // The predecessor has exactly one successor, so it is a preheader.
  return Out;
}

/// getLoopPredecessor - If the given loop's header has exactly one unique
/// predecessor outside the loop, return it. Otherwise return null.
/// This is less strict that the loop "preheader" concept, which requires
/// the predecessor to have exactly one successor.
///
template <class BlockT, class LoopT>
BlockT *LoopBase<BlockT, LoopT>::getLoopPredecessor() const {
  assert(!isInvalid() && "Loop not in a valid state!");
  // Keep track of nodes outside the loop branching to the header...
  BlockT *Out = nullptr;

  // Loop over the predecessors of the header node...
  BlockT *Header = getHeader();
  for (const auto Pred : children<Inverse<BlockT *>>(Header)) {
    if (!contains(Pred)) { // If the block is not in the loop...
      if (Out && Out != Pred)
        return nullptr; // Multiple predecessors outside the loop
      Out = Pred;
    }
  }

  return Out;
}

/// getLoopLatch - If there is a single latch block for this loop, return it.
/// A latch block is a block that contains a branch back to the header.
template <class BlockT, class LoopT>
BlockT *LoopBase<BlockT, LoopT>::getLoopLatch() const {
  assert(!isInvalid() && "Loop not in a valid state!");
  BlockT *Header = getHeader();
  BlockT *Latch = nullptr;
  for (const auto Pred : children<Inverse<BlockT *>>(Header)) {
    if (contains(Pred)) {
      if (Latch)
        return nullptr;
      Latch = Pred;
    }
  }

  return Latch;
}

//===----------------------------------------------------------------------===//
// APIs for updating loop information after changing the CFG
//

/// addBasicBlockToLoop - This method is used by other analyses to update loop
/// information.  NewBB is set to be a new member of the current loop.
/// Because of this, it is added as a member of all parent loops, and is added
/// to the specified LoopInfo object as being in the current basic block.  It
/// is not valid to replace the loop header with this method.
///
template <class BlockT, class LoopT>
void LoopBase<BlockT, LoopT>::addBasicBlockToLoop(
    BlockT *NewBB, LoopInfoBase<BlockT, LoopT> &LIB) {
  assert(!isInvalid() && "Loop not in a valid state!");
#ifndef NDEBUG
  if (!Blocks.empty()) {
    auto SameHeader = LIB[getHeader()];
    assert(contains(SameHeader) && getHeader() == SameHeader->getHeader() &&
           "Incorrect LI specified for this loop!");
  }
#endif
  assert(NewBB && "Cannot add a null basic block to the loop!");
  assert(!LIB[NewBB] && "BasicBlock already in the loop!");

  LoopT *L = static_cast<LoopT *>(this);

  // Add the loop mapping to the LoopInfo object...
  LIB.BBMap[NewBB] = L;

  // Add the basic block to this loop and all parent loops...
  while (L) {
    L->addBlockEntry(NewBB);
    L = L->getParentLoop();
  }
}

/// replaceChildLoopWith - This is used when splitting loops up.  It replaces
/// the OldChild entry in our children list with NewChild, and updates the
/// parent pointer of OldChild to be null and the NewChild to be this loop.
/// This updates the loop depth of the new child.
template <class BlockT, class LoopT>
void LoopBase<BlockT, LoopT>::replaceChildLoopWith(LoopT *OldChild,
                                                   LoopT *NewChild) {
  assert(!isInvalid() && "Loop not in a valid state!");
  assert(OldChild->ParentLoop == this && "This loop is already broken!");
  assert(!NewChild->ParentLoop && "NewChild already has a parent!");
  typename std::vector<LoopT *>::iterator I = find(SubLoops, OldChild);
  assert(I != SubLoops.end() && "OldChild not in loop!");
  *I = NewChild;
  OldChild->ParentLoop = nullptr;
  NewChild->ParentLoop = static_cast<LoopT *>(this);
}

/// verifyLoop - Verify loop structure
template <class BlockT, class LoopT>
void LoopBase<BlockT, LoopT>::verifyLoop() const {
  assert(!isInvalid() && "Loop not in a valid state!");
#ifndef NDEBUG
  assert(!Blocks.empty() && "Loop header is missing");

  // Setup for using a depth-first iterator to visit every block in the loop.
  SmallVector<BlockT *, 8> ExitBBs;
  getExitBlocks(ExitBBs);
  df_iterator_default_set<BlockT *> VisitSet;
  VisitSet.insert(ExitBBs.begin(), ExitBBs.end());

  // Keep track of the BBs visited.
  SmallPtrSet<BlockT *, 8> VisitedBBs;

  // Check the individual blocks.
  for (BlockT *BB : depth_first_ext(getHeader(), VisitSet)) {
    assert(std::any_of(GraphTraits<BlockT *>::child_begin(BB),
                       GraphTraits<BlockT *>::child_end(BB),
                       [&](BlockT *B) { return contains(B); }) &&
           "Loop block has no in-loop successors!");

    assert(std::any_of(GraphTraits<Inverse<BlockT *>>::child_begin(BB),
                       GraphTraits<Inverse<BlockT *>>::child_end(BB),
                       [&](BlockT *B) { return contains(B); }) &&
           "Loop block has no in-loop predecessors!");

    SmallVector<BlockT *, 2> OutsideLoopPreds;
    for (BlockT *B :
         llvm::make_range(GraphTraits<Inverse<BlockT *>>::child_begin(BB),
                          GraphTraits<Inverse<BlockT *>>::child_end(BB)))
      if (!contains(B))
        OutsideLoopPreds.push_back(B);

    if (BB == getHeader()) {
      assert(!OutsideLoopPreds.empty() && "Loop is unreachable!");
    } else if (!OutsideLoopPreds.empty()) {
      // A non-header loop shouldn't be reachable from outside the loop,
      // though it is permitted if the predecessor is not itself actually
      // reachable.
      BlockT *EntryBB = &BB->getParent()->front();
      for (BlockT *CB : depth_first(EntryBB))
        for (unsigned i = 0, e = OutsideLoopPreds.size(); i != e; ++i)
          assert(CB != OutsideLoopPreds[i] &&
                 "Loop has multiple entry points!");
    }
    assert(BB != &getHeader()->getParent()->front() &&
           "Loop contains function entry block!");

    VisitedBBs.insert(BB);
  }

  if (VisitedBBs.size() != getNumBlocks()) {
    dbgs() << "The following blocks are unreachable in the loop: ";
    for (auto *BB : Blocks) {
      if (!VisitedBBs.count(BB)) {
        dbgs() << *BB << "\n";
      }
    }
    assert(false && "Unreachable block in loop");
  }

  // Check the subloops.
  for (iterator I = begin(), E = end(); I != E; ++I)
    // Each block in each subloop should be contained within this loop.
    for (block_iterator BI = (*I)->block_begin(), BE = (*I)->block_end();
         BI != BE; ++BI) {
      assert(contains(*BI) &&
             "Loop does not contain all the blocks of a subloop!");
    }

  // Check the parent loop pointer.
  if (ParentLoop) {
    assert(is_contained(ParentLoop->getSubLoops(), this) &&
           "Loop is not a subloop of its parent!");
  }
#endif
}

/// verifyLoop - Verify loop structure of this loop and all nested loops.
template <class BlockT, class LoopT>
void LoopBase<BlockT, LoopT>::verifyLoopNest(
    DenseSet<const LoopT *> *Loops) const {
  assert(!isInvalid() && "Loop not in a valid state!");
  Loops->insert(static_cast<const LoopT *>(this));
  // Verify this loop.
  verifyLoop();
  // Verify the subloops.
  for (iterator I = begin(), E = end(); I != E; ++I)
    (*I)->verifyLoopNest(Loops);
}

template <class BlockT, class LoopT>
void LoopBase<BlockT, LoopT>::print(raw_ostream &OS, bool Verbose,
                                    bool PrintNested, unsigned Depth) const {
  OS.indent(Depth * 2);
  if (static_cast<const LoopT *>(this)->isAnnotatedParallel())
    OS << "Parallel ";
  OS << "Loop at depth " << getLoopDepth() << " containing: ";

  BlockT *H = getHeader();
  for (unsigned i = 0; i < getBlocks().size(); ++i) {
    BlockT *BB = getBlocks()[i];
    if (!Verbose) {
      if (i)
        OS << ",";
      BB->printAsOperand(OS, false);
    } else
      OS << "\n";

    if (BB == H)
      OS << "<header>";
    if (isLoopLatch(BB))
      OS << "<latch>";
    if (isLoopExiting(BB))
      OS << "<exiting>";
    if (Verbose)
      BB->print(OS);
  }

  if (PrintNested) {
    OS << "\n";

    for (iterator I = begin(), E = end(); I != E; ++I)
      (*I)->print(OS, /*Verbose*/ false, PrintNested, Depth + 2);
  }
}

//===----------------------------------------------------------------------===//
/// Stable LoopInfo Analysis - Build a loop tree using stable iterators so the
/// result does / not depend on use list (block predecessor) order.
///

/// Discover a subloop with the specified backedges such that: All blocks within
/// this loop are mapped to this loop or a subloop. And all subloops within this
/// loop have their parent loop set to this loop or a subloop.
template <class BlockT, class LoopT>
static void discoverAndMapSubloop(LoopT *L, ArrayRef<BlockT *> Backedges,
                                  LoopInfoBase<BlockT, LoopT> *LI,
                                  const DomTreeBase<BlockT> &DomTree) {
  typedef GraphTraits<Inverse<BlockT *>> InvBlockTraits;

  unsigned NumBlocks = 0;
  unsigned NumSubloops = 0;

  // Perform a backward CFG traversal using a worklist.
  std::vector<BlockT *> ReverseCFGWorklist(Backedges.begin(), Backedges.end());
  while (!ReverseCFGWorklist.empty()) {
    BlockT *PredBB = ReverseCFGWorklist.back();
    ReverseCFGWorklist.pop_back();

    LoopT *Subloop = LI->getLoopFor(PredBB);
    if (!Subloop) {
      if (!DomTree.isReachableFromEntry(PredBB))
        continue;

      // This is an undiscovered block. Map it to the current loop.
      LI->changeLoopFor(PredBB, L);
      ++NumBlocks;
      if (PredBB == L->getHeader())
        continue;
      // Push all block predecessors on the worklist.
      ReverseCFGWorklist.insert(ReverseCFGWorklist.end(),
                                InvBlockTraits::child_begin(PredBB),
                                InvBlockTraits::child_end(PredBB));
    } else {
      // This is a discovered block. Find its outermost discovered loop.
      Subloop = Subloop->getOutermostLoop();

      // If it is already discovered to be a subloop of this loop, continue.
      if (Subloop == L)
        continue;

      // Discover a subloop of this loop.
      Subloop->setParentLoop(L);
      ++NumSubloops;
      NumBlocks += Subloop->getBlocksVector().capacity();
      PredBB = Subloop->getHeader();
      // Continue traversal along predecessors that are not loop-back edges from
      // within this subloop tree itself. Note that a predecessor may directly
      // reach another subloop that is not yet discovered to be a subloop of
      // this loop, which we must traverse.
      for (const auto Pred : children<Inverse<BlockT *>>(PredBB)) {
        if (LI->getLoopFor(Pred) != Subloop)
          ReverseCFGWorklist.push_back(Pred);
      }
    }
  }
  L->getSubLoopsVector().reserve(NumSubloops);
  L->reserveBlocks(NumBlocks);
}

/// Populate all loop data in a stable order during a single forward DFS.
template <class BlockT, class LoopT> class PopulateLoopsDFS {
  typedef GraphTraits<BlockT *> BlockTraits;
  typedef typename BlockTraits::ChildIteratorType SuccIterTy;

  LoopInfoBase<BlockT, LoopT> *LI;

public:
  PopulateLoopsDFS(LoopInfoBase<BlockT, LoopT> *li) : LI(li) {}

  void traverse(BlockT *EntryBlock);

protected:
  void insertIntoLoop(BlockT *Block);
};

/// Top-level driver for the forward DFS within the loop.
template <class BlockT, class LoopT>
void PopulateLoopsDFS<BlockT, LoopT>::traverse(BlockT *EntryBlock) {
  for (BlockT *BB : post_order(EntryBlock))
    insertIntoLoop(BB);
}

/// Add a single Block to its ancestor loops in PostOrder. If the block is a
/// subloop header, add the subloop to its parent in PostOrder, then reverse the
/// Block and Subloop vectors of the now complete subloop to achieve RPO.
template <class BlockT, class LoopT>
void PopulateLoopsDFS<BlockT, LoopT>::insertIntoLoop(BlockT *Block) {
  LoopT *Subloop = LI->getLoopFor(Block);
  if (Subloop && Block == Subloop->getHeader()) {
    // We reach this point once per subloop after processing all the blocks in
    // the subloop.
    if (!Subloop->isOutermost())
      Subloop->getParentLoop()->getSubLoopsVector().push_back(Subloop);
    else
      LI->addTopLevelLoop(Subloop);

    // For convenience, Blocks and Subloops are inserted in postorder. Reverse
    // the lists, except for the loop header, which is always at the beginning.
    Subloop->reverseBlock(1);
    std::reverse(Subloop->getSubLoopsVector().begin(),
                 Subloop->getSubLoopsVector().end());

    Subloop = Subloop->getParentLoop();
  }
  for (; Subloop; Subloop = Subloop->getParentLoop())
    Subloop->addBlockEntry(Block);
}

/// Analyze LoopInfo discovers loops during a postorder DominatorTree traversal
/// interleaved with backward CFG traversals within each subloop
/// (discoverAndMapSubloop). The backward traversal skips inner subloops, so
/// this part of the algorithm is linear in the number of CFG edges. Subloop and
/// Block vectors are then populated during a single forward CFG traversal
/// (PopulateLoopDFS).
///
/// During the two CFG traversals each block is seen three times:
/// 1) Discovered and mapped by a reverse CFG traversal.
/// 2) Visited during a forward DFS CFG traversal.
/// 3) Reverse-inserted in the loop in postorder following forward DFS.
///
/// The Block vectors are inclusive, so step 3 requires loop-depth number of
/// insertions per block.
template <class BlockT, class LoopT>
void LoopInfoBase<BlockT, LoopT>::analyze(const DomTreeBase<BlockT> &DomTree) {
  // Postorder traversal of the dominator tree.
  const DomTreeNodeBase<BlockT> *DomRoot = DomTree.getRootNode();
  for (auto DomNode : post_order(DomRoot)) {

    BlockT *Header = DomNode->getBlock();
    SmallVector<BlockT *, 4> Backedges;

    // Check each predecessor of the potential loop header.
    for (const auto Backedge : children<Inverse<BlockT *>>(Header)) {
      // If Header dominates predBB, this is a new loop. Collect the backedges.
      if (DomTree.dominates(Header, Backedge) &&
          DomTree.isReachableFromEntry(Backedge)) {
        Backedges.push_back(Backedge);
      }
    }
    // Perform a backward CFG traversal to discover and map blocks in this loop.
    if (!Backedges.empty()) {
      LoopT *L = AllocateLoop(Header);
      discoverAndMapSubloop(L, ArrayRef<BlockT *>(Backedges), this, DomTree);
    }
  }
  // Perform a single forward CFG traversal to populate block and subloop
  // vectors for all loops.
  PopulateLoopsDFS<BlockT, LoopT> DFS(this);
  DFS.traverse(DomRoot->getBlock());
}

template <class BlockT, class LoopT>
SmallVector<LoopT *, 4>
LoopInfoBase<BlockT, LoopT>::getLoopsInPreorder() const {
  SmallVector<LoopT *, 4> PreOrderLoops, PreOrderWorklist;
  // The outer-most loop actually goes into the result in the same relative
  // order as we walk it. But LoopInfo stores the top level loops in reverse
  // program order so for here we reverse it to get forward program order.
  // FIXME: If we change the order of LoopInfo we will want to remove the
  // reverse here.
  for (LoopT *RootL : reverse(*this)) {
    auto PreOrderLoopsInRootL = RootL->getLoopsInPreorder();
    PreOrderLoops.append(PreOrderLoopsInRootL.begin(),
                         PreOrderLoopsInRootL.end());
  }

  return PreOrderLoops;
}

template <class BlockT, class LoopT>
SmallVector<LoopT *, 4>
LoopInfoBase<BlockT, LoopT>::getLoopsInReverseSiblingPreorder() const {
  SmallVector<LoopT *, 4> PreOrderLoops, PreOrderWorklist;
  // The outer-most loop actually goes into the result in the same relative
  // order as we walk it. LoopInfo stores the top level loops in reverse
  // program order so we walk in order here.
  // FIXME: If we change the order of LoopInfo we will want to add a reverse
  // here.
  for (LoopT *RootL : *this) {
    assert(PreOrderWorklist.empty() &&
           "Must start with an empty preorder walk worklist.");
    PreOrderWorklist.push_back(RootL);
    do {
      LoopT *L = PreOrderWorklist.pop_back_val();
      // Sub-loops are stored in forward program order, but will process the
      // worklist backwards so we can just append them in order.
      PreOrderWorklist.append(L->begin(), L->end());
      PreOrderLoops.push_back(L);
    } while (!PreOrderWorklist.empty());
  }

  return PreOrderLoops;
}

// Debugging
template <class BlockT, class LoopT>
void LoopInfoBase<BlockT, LoopT>::print(raw_ostream &OS) const {
  for (unsigned i = 0; i < TopLevelLoops.size(); ++i)
    TopLevelLoops[i]->print(OS);
#if 0
  for (DenseMap<BasicBlock*, LoopT*>::const_iterator I = BBMap.begin(),
         E = BBMap.end(); I != E; ++I)
    OS << "BB '" << I->first->getName() << "' level = "
       << I->second->getLoopDepth() << "\n";
#endif
}

template <typename T>
bool compareVectors(std::vector<T> &BB1, std::vector<T> &BB2) {
  llvm::sort(BB1);
  llvm::sort(BB2);
  return BB1 == BB2;
}

template <class BlockT, class LoopT>
void addInnerLoopsToHeadersMap(DenseMap<BlockT *, const LoopT *> &LoopHeaders,
                               const LoopInfoBase<BlockT, LoopT> &LI,
                               const LoopT &L) {
  LoopHeaders[L.getHeader()] = &L;
  for (LoopT *SL : L)
    addInnerLoopsToHeadersMap(LoopHeaders, LI, *SL);
}

#ifndef NDEBUG
template <class BlockT, class LoopT>
static void compareLoops(const LoopT *L, const LoopT *OtherL,
                         DenseMap<BlockT *, const LoopT *> &OtherLoopHeaders) {
  BlockT *H = L->getHeader();
  BlockT *OtherH = OtherL->getHeader();
  assert(H == OtherH &&
         "Mismatched headers even though found in the same map entry!");

  assert(L->getLoopDepth() == OtherL->getLoopDepth() &&
         "Mismatched loop depth!");
  const LoopT *ParentL = L, *OtherParentL = OtherL;
  do {
    assert(ParentL->getHeader() == OtherParentL->getHeader() &&
           "Mismatched parent loop headers!");
    ParentL = ParentL->getParentLoop();
    OtherParentL = OtherParentL->getParentLoop();
  } while (ParentL);

  for (const LoopT *SubL : *L) {
    BlockT *SubH = SubL->getHeader();
    const LoopT *OtherSubL = OtherLoopHeaders.lookup(SubH);
    assert(OtherSubL && "Inner loop is missing in computed loop info!");
    OtherLoopHeaders.erase(SubH);
    compareLoops(SubL, OtherSubL, OtherLoopHeaders);
  }

  std::vector<BlockT *> BBs = L->getBlocks();
  std::vector<BlockT *> OtherBBs = OtherL->getBlocks();
  assert(compareVectors(BBs, OtherBBs) &&
         "Mismatched basic blocks in the loops!");

  const SmallPtrSetImpl<const BlockT *> &BlocksSet = L->getBlocksSet();
  const SmallPtrSetImpl<const BlockT *> &OtherBlocksSet =
      OtherL->getBlocksSet();
  assert(BlocksSet.size() == OtherBlocksSet.size() &&
         llvm::set_is_subset(BlocksSet, OtherBlocksSet) &&
         "Mismatched basic blocks in BlocksSets!");
}
#endif

template <class BlockT, class LoopT>
void LoopInfoBase<BlockT, LoopT>::verify(
    const DomTreeBase<BlockT> &DomTree) const {
  DenseSet<const LoopT *> Loops;
  for (iterator I = begin(), E = end(); I != E; ++I) {
    assert((*I)->isOutermost() && "Top-level loop has a parent!");
    (*I)->verifyLoopNest(&Loops);
  }

// Verify that blocks are mapped to valid loops.
#ifndef NDEBUG
  for (auto &Entry : BBMap) {
    const BlockT *BB = Entry.first;
    LoopT *L = Entry.second;
    assert(Loops.count(L) && "orphaned loop");
    assert(L->contains(BB) && "orphaned block");
    for (LoopT *ChildLoop : *L)
      assert(!ChildLoop->contains(BB) &&
             "BBMap should point to the innermost loop containing BB");
  }

  // Recompute LoopInfo to verify loops structure.
  LoopInfoBase<BlockT, LoopT> OtherLI;
  OtherLI.analyze(DomTree);

  // Build a map we can use to move from our LI to the computed one. This
  // allows us to ignore the particular order in any layer of the loop forest
  // while still comparing the structure.
  DenseMap<BlockT *, const LoopT *> OtherLoopHeaders;
  for (LoopT *L : OtherLI)
    addInnerLoopsToHeadersMap(OtherLoopHeaders, OtherLI, *L);

  // Walk the top level loops and ensure there is a corresponding top-level
  // loop in the computed version and then recursively compare those loop
  // nests.
  for (LoopT *L : *this) {
    BlockT *Header = L->getHeader();
    const LoopT *OtherL = OtherLoopHeaders.lookup(Header);
    assert(OtherL && "Top level loop is missing in computed loop info!");
    // Now that we've matched this loop, erase its header from the map.
    OtherLoopHeaders.erase(Header);
    // And recursively compare these loops.
    compareLoops(L, OtherL, OtherLoopHeaders);
  }

  // Any remaining entries in the map are loops which were found when computing
  // a fresh LoopInfo but not present in the current one.
  if (!OtherLoopHeaders.empty()) {
    for (const auto &HeaderAndLoop : OtherLoopHeaders)
      dbgs() << "Found new loop: " << *HeaderAndLoop.second << "\n";
    llvm_unreachable("Found new loops when recomputing LoopInfo!");
  }
#endif
}

} // namespace llvm

#endif // LLVM_SUPPORT_GENERICLOOPINFOIMPL_H
PKhwFZ���	�	Support/RecyclingAllocator.hnu�[���//==- llvm/Support/RecyclingAllocator.h - Recycling Allocator ----*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the RecyclingAllocator class.  See the doxygen comment for
// RecyclingAllocator for more details on the implementation.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_RECYCLINGALLOCATOR_H
#define LLVM_SUPPORT_RECYCLINGALLOCATOR_H

#include "llvm/Support/Recycler.h"

namespace llvm {

/// RecyclingAllocator - This class wraps an Allocator, adding the
/// functionality of recycling deleted objects.
///
template <class AllocatorType, class T, size_t Size = sizeof(T),
          size_t Align = alignof(T)>
class RecyclingAllocator {
private:
  /// Base - Implementation details.
  ///
  Recycler<T, Size, Align> Base;

  /// Allocator - The wrapped allocator.
  ///
  AllocatorType Allocator;

public:
  ~RecyclingAllocator() { Base.clear(Allocator); }

  /// Allocate - Return a pointer to storage for an object of type
  /// SubClass. The storage may be either newly allocated or recycled.
  ///
  template<class SubClass>
  SubClass *Allocate() { return Base.template Allocate<SubClass>(Allocator); }

  T *Allocate() { return Base.Allocate(Allocator); }

  /// Deallocate - Release storage for the pointed-to object. The
  /// storage will be kept track of and may be recycled.
  ///
  template<class SubClass>
  void Deallocate(SubClass* E) { return Base.Deallocate(Allocator, E); }

  void PrintStats() {
    Allocator.PrintStats();
    Base.PrintStats();
  }
};

}

template<class AllocatorType, class T, size_t Size, size_t Align>
inline void *operator new(size_t size,
                          llvm::RecyclingAllocator<AllocatorType,
                                                   T, Size, Align> &Allocator) {
  assert(size <= Size && "allocation size exceeded");
  return Allocator.Allocate();
}

template<class AllocatorType, class T, size_t Size, size_t Align>
inline void operator delete(void *E,
                            llvm::RecyclingAllocator<AllocatorType,
                                                     T, Size, Align> &A) {
  A.Deallocate(E);
}

#endif
PKhwFZ����nnSupport/DataTypes.hnu�[���//===-- llvm/Support/DataTypes.h - Define fixed size types ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Due to layering constraints (Support depends on llvm-c) this is a thin
// wrapper around the implementation that lives in llvm-c, though most clients
// can/should think of this as being provided by Support for simplicity (not
// many clients are aware of their dependency on llvm-c).
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_DATATYPES_H
#define LLVM_SUPPORT_DATATYPES_H

#include "llvm-c/DataTypes.h"

#endif // LLVM_SUPPORT_DATATYPES_H
PKhwFZ{.��!,!,Support/CommandLine.hnu�[���//===- llvm/Support/CommandLine.h - Command line handler --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This class implements a command line argument processor that is useful when
// creating a tool.  It provides a simple, minimalistic interface that is easily
// extensible and supports nonlocal (library) command line options.
//
// Note that rather than trying to figure out what this code does, you should
// read the library documentation located in docs/CommandLine.html or looks at
// the many example usages in tools/*/*.cpp
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_COMMANDLINE_H
#define LLVM_SUPPORT_COMMANDLINE_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/StringSaver.h"
#include "llvm/Support/raw_ostream.h"
#include <cassert>
#include <climits>
#include <cstddef>
#include <functional>
#include <initializer_list>
#include <string>
#include <type_traits>
#include <vector>

namespace llvm {

namespace vfs {
class FileSystem;
}

class StringSaver;

/// This namespace contains all of the command line option processing machinery.
/// It is intentionally a short name to make qualified usage concise.
namespace cl {

//===----------------------------------------------------------------------===//
// Command line option processing entry point.
//
// Returns true on success. Otherwise, this will print the error message to
// stderr and exit if \p Errs is not set (nullptr by default), or print the
// error message to \p Errs and return false if \p Errs is provided.
//
// If EnvVar is not nullptr, command-line options are also parsed from the
// environment variable named by EnvVar.  Precedence is given to occurrences
// from argv.  This precedence is currently implemented by parsing argv after
// the environment variable, so it is only implemented correctly for options
// that give precedence to later occurrences.  If your program supports options
// that give precedence to earlier occurrences, you will need to extend this
// function to support it correctly.
bool ParseCommandLineOptions(int argc, const char *const *argv,
                             StringRef Overview = "",
                             raw_ostream *Errs = nullptr,
                             const char *EnvVar = nullptr,
                             bool LongOptionsUseDoubleDash = false);

// Function pointer type for printing version information.
using VersionPrinterTy = std::function<void(raw_ostream &)>;

///===---------------------------------------------------------------------===//
/// Override the default (LLVM specific) version printer used to print out the
/// version when --version is given on the command line. This allows other
/// systems using the CommandLine utilities to print their own version string.
void SetVersionPrinter(VersionPrinterTy func);

///===---------------------------------------------------------------------===//
/// Add an extra printer to use in addition to the default one. This can be
/// called multiple times, and each time it adds a new function to the list
/// which will be called after the basic LLVM version printing is complete.
/// Each can then add additional information specific to the tool.
void AddExtraVersionPrinter(VersionPrinterTy func);

// Print option values.
// With -print-options print the difference between option values and defaults.
// With -print-all-options print all option values.
// (Currently not perfect, but best-effort.)
void PrintOptionValues();

// Forward declaration - AddLiteralOption needs to be up here to make gcc happy.
class Option;

/// Adds a new option for parsing and provides the option it refers to.
///
/// \param O pointer to the option
/// \param Name the string name for the option to handle during parsing
///
/// Literal options are used by some parsers to register special option values.
/// This is how the PassNameParser registers pass names for opt.
void AddLiteralOption(Option &O, StringRef Name);

//===----------------------------------------------------------------------===//
// Flags permitted to be passed to command line arguments
//

enum NumOccurrencesFlag { // Flags for the number of occurrences allowed
  Optional = 0x00,        // Zero or One occurrence
  ZeroOrMore = 0x01,      // Zero or more occurrences allowed
  Required = 0x02,        // One occurrence required
  OneOrMore = 0x03,       // One or more occurrences required

  // Indicates that this option is fed anything that follows the last positional
  // argument required by the application (it is an error if there are zero
  // positional arguments, and a ConsumeAfter option is used).
  // Thus, for example, all arguments to LLI are processed until a filename is
  // found.  Once a filename is found, all of the succeeding arguments are
  // passed, unprocessed, to the ConsumeAfter option.
  //
  ConsumeAfter = 0x04
};

enum ValueExpected { // Is a value required for the option?
  // zero reserved for the unspecified value
  ValueOptional = 0x01,  // The value can appear... or not
  ValueRequired = 0x02,  // The value is required to appear!
  ValueDisallowed = 0x03 // A value may not be specified (for flags)
};

enum OptionHidden {   // Control whether -help shows this option
  NotHidden = 0x00,   // Option included in -help & -help-hidden
  Hidden = 0x01,      // -help doesn't, but -help-hidden does
  ReallyHidden = 0x02 // Neither -help nor -help-hidden show this arg
};

// This controls special features that the option might have that cause it to be
// parsed differently...
//
// Prefix - This option allows arguments that are otherwise unrecognized to be
// matched by options that are a prefix of the actual value.  This is useful for
// cases like a linker, where options are typically of the form '-lfoo' or
// '-L../../include' where -l or -L are the actual flags.  When prefix is
// enabled, and used, the value for the flag comes from the suffix of the
// argument.
//
// AlwaysPrefix - Only allow the behavior enabled by the Prefix flag and reject
// the Option=Value form.
//

enum FormattingFlags {
  NormalFormatting = 0x00, // Nothing special
  Positional = 0x01,       // Is a positional argument, no '-' required
  Prefix = 0x02,           // Can this option directly prefix its value?
  AlwaysPrefix = 0x03      // Can this option only directly prefix its value?
};

enum MiscFlags {             // Miscellaneous flags to adjust argument
  CommaSeparated = 0x01,     // Should this cl::list split between commas?
  PositionalEatsArgs = 0x02, // Should this positional cl::list eat -args?
  Sink = 0x04,               // Should this cl::list eat all unknown options?

  // Can this option group with other options?
  // If this is enabled, multiple letter options are allowed to bunch together
  // with only a single hyphen for the whole group.  This allows emulation
  // of the behavior that ls uses for example: ls -la === ls -l -a
  Grouping = 0x08,

  // Default option
  DefaultOption = 0x10
};

//===----------------------------------------------------------------------===//
//
class OptionCategory {
private:
  StringRef const Name;
  StringRef const Description;

  void registerCategory();

public:
  OptionCategory(StringRef const Name,
                 StringRef const Description = "")
      : Name(Name), Description(Description) {
    registerCategory();
  }

  StringRef getName() const { return Name; }
  StringRef getDescription() const { return Description; }
};

// The general Option Category (used as default category).
OptionCategory &getGeneralCategory();

//===----------------------------------------------------------------------===//
//
class SubCommand {
private:
  StringRef Name;
  StringRef Description;

protected:
  void registerSubCommand();
  void unregisterSubCommand();

public:
  SubCommand(StringRef Name, StringRef Description = "")
      : Name(Name), Description(Description) {
        registerSubCommand();
  }
  SubCommand() = default;

  // Get the special subcommand representing no subcommand.
  static SubCommand &getTopLevel();

  // Get the special subcommand that can be used to put an option into all
  // subcommands.
  static SubCommand &getAll();

  void reset();

  explicit operator bool() const;

  StringRef getName() const { return Name; }
  StringRef getDescription() const { return Description; }

  SmallVector<Option *, 4> PositionalOpts;
  SmallVector<Option *, 4> SinkOpts;
  StringMap<Option *> OptionsMap;

  Option *ConsumeAfterOpt = nullptr; // The ConsumeAfter option if it exists.
};

// A special subcommand representing no subcommand
extern ManagedStatic<SubCommand> TopLevelSubCommand;

// A special subcommand that can be used to put an option into all subcommands.
extern ManagedStatic<SubCommand> AllSubCommands;

//===----------------------------------------------------------------------===//
//
class Option {
  friend class alias;

  // Overriden by subclasses to handle the value passed into an argument. Should
  // return true if there was an error processing the argument and the program
  // should exit.
  //
  virtual bool handleOccurrence(unsigned pos, StringRef ArgName,
                                StringRef Arg) = 0;

  virtual enum ValueExpected getValueExpectedFlagDefault() const {
    return ValueOptional;
  }

  // Out of line virtual function to provide home for the class.
  virtual void anchor();

  uint16_t NumOccurrences; // The number of times specified
  // Occurrences, HiddenFlag, and Formatting are all enum types but to avoid
  // problems with signed enums in bitfields.
  uint16_t Occurrences : 3; // enum NumOccurrencesFlag
  // not using the enum type for 'Value' because zero is an implementation
  // detail representing the non-value
  uint16_t Value : 2;
  uint16_t HiddenFlag : 2; // enum OptionHidden
  uint16_t Formatting : 2; // enum FormattingFlags
  uint16_t Misc : 5;
  uint16_t FullyInitialized : 1; // Has addArgument been called?
  uint16_t Position;             // Position of last occurrence of the option
  uint16_t AdditionalVals;       // Greater than 0 for multi-valued option.

public:
  StringRef ArgStr;   // The argument string itself (ex: "help", "o")
  StringRef HelpStr;  // The descriptive text message for -help
  StringRef ValueStr; // String describing what the value of this option is
  SmallVector<OptionCategory *, 1>
      Categories;                    // The Categories this option belongs to
  SmallPtrSet<SubCommand *, 1> Subs; // The subcommands this option belongs to.

  inline enum NumOccurrencesFlag getNumOccurrencesFlag() const {
    return (enum NumOccurrencesFlag)Occurrences;
  }

  inline enum ValueExpected getValueExpectedFlag() const {
    return Value ? ((enum ValueExpected)Value) : getValueExpectedFlagDefault();
  }

  inline enum OptionHidden getOptionHiddenFlag() const {
    return (enum OptionHidden)HiddenFlag;
  }

  inline enum FormattingFlags getFormattingFlag() const {
    return (enum FormattingFlags)Formatting;
  }

  inline unsigned getMiscFlags() const { return Misc; }
  inline unsigned getPosition() const { return Position; }
  inline unsigned getNumAdditionalVals() const { return AdditionalVals; }

  // Return true if the argstr != ""
  bool hasArgStr() const { return !ArgStr.empty(); }
  bool isPositional() const { return getFormattingFlag() == cl::Positional; }
  bool isSink() const { return getMiscFlags() & cl::Sink; }
  bool isDefaultOption() const { return getMiscFlags() & cl::DefaultOption; }

  bool isConsumeAfter() const {
    return getNumOccurrencesFlag() == cl::ConsumeAfter;
  }

  bool isInAllSubCommands() const {
    return Subs.contains(&SubCommand::getAll());
  }

  //-------------------------------------------------------------------------===
  // Accessor functions set by OptionModifiers
  //
  void setArgStr(StringRef S);
  void setDescription(StringRef S) { HelpStr = S; }
  void setValueStr(StringRef S) { ValueStr = S; }
  void setNumOccurrencesFlag(enum NumOccurrencesFlag Val) { Occurrences = Val; }
  void setValueExpectedFlag(enum ValueExpected Val) { Value = Val; }
  void setHiddenFlag(enum OptionHidden Val) { HiddenFlag = Val; }
  void setFormattingFlag(enum FormattingFlags V) { Formatting = V; }
  void setMiscFlag(enum MiscFlags M) { Misc |= M; }
  void setPosition(unsigned pos) { Position = pos; }
  void addCategory(OptionCategory &C);
  void addSubCommand(SubCommand &S) { Subs.insert(&S); }

protected:
  explicit Option(enum NumOccurrencesFlag OccurrencesFlag,
                  enum OptionHidden Hidden)
      : NumOccurrences(0), Occurrences(OccurrencesFlag), Value(0),
        HiddenFlag(Hidden), Formatting(NormalFormatting), Misc(0),
        FullyInitialized(false), Position(0), AdditionalVals(0) {
    Categories.push_back(&getGeneralCategory());
  }

  inline void setNumAdditionalVals(unsigned n) { AdditionalVals = n; }

public:
  virtual ~Option() = default;

  // Register this argument with the commandline system.
  //
  void addArgument();

  /// Unregisters this option from the CommandLine system.
  ///
  /// This option must have been the last option registered.
  /// For testing purposes only.
  void removeArgument();

  // Return the width of the option tag for printing...
  virtual size_t getOptionWidth() const = 0;

  // Print out information about this option. The to-be-maintained width is
  // specified.
  //
  virtual void printOptionInfo(size_t GlobalWidth) const = 0;

  virtual void printOptionValue(size_t GlobalWidth, bool Force) const = 0;

  virtual void setDefault() = 0;

  // Prints the help string for an option.
  //
  // This maintains the Indent for multi-line descriptions.
  // FirstLineIndentedBy is the count of chars of the first line
  //      i.e. the one containing the --<option name>.
  static void printHelpStr(StringRef HelpStr, size_t Indent,
                           size_t FirstLineIndentedBy);

  // Prints the help string for an enum value.
  //
  // This maintains the Indent for multi-line descriptions.
  // FirstLineIndentedBy is the count of chars of the first line
  //      i.e. the one containing the =<value>.
  static void printEnumValHelpStr(StringRef HelpStr, size_t Indent,
                                  size_t FirstLineIndentedBy);

  virtual void getExtraOptionNames(SmallVectorImpl<StringRef> &) {}

  // Wrapper around handleOccurrence that enforces Flags.
  //
  virtual bool addOccurrence(unsigned pos, StringRef ArgName, StringRef Value,
                             bool MultiArg = false);

  // Prints option name followed by message.  Always returns true.
  bool error(const Twine &Message, StringRef ArgName = StringRef(), raw_ostream &Errs = llvm::errs());
  bool error(const Twine &Message, raw_ostream &Errs) {
    return error(Message, StringRef(), Errs);
  }

  inline int getNumOccurrences() const { return NumOccurrences; }
  void reset();
};

//===----------------------------------------------------------------------===//
// Command line option modifiers that can be used to modify the behavior of
// command line option parsers...
//

// Modifier to set the description shown in the -help output...
struct desc {
  StringRef Desc;

  desc(StringRef Str) : Desc(Str) {}

  void apply(Option &O) const { O.setDescription(Desc); }
};

// Modifier to set the value description shown in the -help output...
struct value_desc {
  StringRef Desc;

  value_desc(StringRef Str) : Desc(Str) {}

  void apply(Option &O) const { O.setValueStr(Desc); }
};

// Specify a default (initial) value for the command line argument, if the
// default constructor for the argument type does not give you what you want.
// This is only valid on "opt" arguments, not on "list" arguments.
template <class Ty> struct initializer {
  const Ty &Init;
  initializer(const Ty &Val) : Init(Val) {}

  template <class Opt> void apply(Opt &O) const { O.setInitialValue(Init); }
};

template <class Ty> struct list_initializer {
  ArrayRef<Ty> Inits;
  list_initializer(ArrayRef<Ty> Vals) : Inits(Vals) {}

  template <class Opt> void apply(Opt &O) const { O.setInitialValues(Inits); }
};

template <class Ty> initializer<Ty> init(const Ty &Val) {
  return initializer<Ty>(Val);
}

template <class Ty>
list_initializer<Ty> list_init(ArrayRef<Ty> Vals) {
  return list_initializer<Ty>(Vals);
}

// Allow the user to specify which external variable they want to store the
// results of the command line argument processing into, if they don't want to
// store it in the option itself.
template <class Ty> struct LocationClass {
  Ty &Loc;

  LocationClass(Ty &L) : Loc(L) {}

  template <class Opt> void apply(Opt &O) const { O.setLocation(O, Loc); }
};

template <class Ty> LocationClass<Ty> location(Ty &L) {
  return LocationClass<Ty>(L);
}

// Specify the Option category for the command line argument to belong to.
struct cat {
  OptionCategory &Category;

  cat(OptionCategory &c) : Category(c) {}

  template <class Opt> void apply(Opt &O) const { O.addCategory(Category); }
};

// Specify the subcommand that this option belongs to.
struct sub {
  SubCommand &Sub;

  sub(SubCommand &S) : Sub(S) {}

  template <class Opt> void apply(Opt &O) const { O.addSubCommand(Sub); }
};

// Specify a callback function to be called when an option is seen.
// Can be used to set other options automatically.
template <typename R, typename Ty> struct cb {
  std::function<R(Ty)> CB;

  cb(std::function<R(Ty)> CB) : CB(CB) {}

  template <typename Opt> void apply(Opt &O) const { O.setCallback(CB); }
};

namespace detail {
template <typename F>
struct callback_traits : public callback_traits<decltype(&F::operator())> {};

template <typename R, typename C, typename... Args>
struct callback_traits<R (C::*)(Args...) const> {
  using result_type = R;
  using arg_type = std::tuple_element_t<0, std::tuple<Args...>>;
  static_assert(sizeof...(Args) == 1, "callback function must have one and only one parameter");
  static_assert(std::is_same_v<result_type, void>,
                "callback return type must be void");
  static_assert(std::is_lvalue_reference_v<arg_type> &&
                    std::is_const_v<std::remove_reference_t<arg_type>>,
                "callback arg_type must be a const lvalue reference");
};
} // namespace detail

template <typename F>
cb<typename detail::callback_traits<F>::result_type,
   typename detail::callback_traits<F>::arg_type>
callback(F CB) {
  using result_type = typename detail::callback_traits<F>::result_type;
  using arg_type = typename detail::callback_traits<F>::arg_type;
  return cb<result_type, arg_type>(CB);
}

//===----------------------------------------------------------------------===//

// Support value comparison outside the template.
struct GenericOptionValue {
  virtual bool compare(const GenericOptionValue &V) const = 0;

protected:
  GenericOptionValue() = default;
  GenericOptionValue(const GenericOptionValue&) = default;
  GenericOptionValue &operator=(const GenericOptionValue &) = default;
  ~GenericOptionValue() = default;

private:
  virtual void anchor();
};

template <class DataType> struct OptionValue;

// The default value safely does nothing. Option value printing is only
// best-effort.
template <class DataType, bool isClass>
struct OptionValueBase : public GenericOptionValue {
  // Temporary storage for argument passing.
  using WrapperType = OptionValue<DataType>;

  bool hasValue() const { return false; }

  const DataType &getValue() const { llvm_unreachable("no default value"); }

  // Some options may take their value from a different data type.
  template <class DT> void setValue(const DT & /*V*/) {}

  bool compare(const DataType & /*V*/) const { return false; }

  bool compare(const GenericOptionValue & /*V*/) const override {
    return false;
  }

protected:
  ~OptionValueBase() = default;
};

// Simple copy of the option value.
template <class DataType> class OptionValueCopy : public GenericOptionValue {
  DataType Value;
  bool Valid = false;

protected:
  OptionValueCopy(const OptionValueCopy&) = default;
  OptionValueCopy &operator=(const OptionValueCopy &) = default;
  ~OptionValueCopy() = default;

public:
  OptionValueCopy() = default;

  bool hasValue() const { return Valid; }

  const DataType &getValue() const {
    assert(Valid && "invalid option value");
    return Value;
  }

  void setValue(const DataType &V) {
    Valid = true;
    Value = V;
  }

  bool compare(const DataType &V) const { return Valid && (Value != V); }

  bool compare(const GenericOptionValue &V) const override {
    const OptionValueCopy<DataType> &VC =
        static_cast<const OptionValueCopy<DataType> &>(V);
    if (!VC.hasValue())
      return false;
    return compare(VC.getValue());
  }
};

// Non-class option values.
template <class DataType>
struct OptionValueBase<DataType, false> : OptionValueCopy<DataType> {
  using WrapperType = DataType;

protected:
  OptionValueBase() = default;
  OptionValueBase(const OptionValueBase&) = default;
  OptionValueBase &operator=(const OptionValueBase &) = default;
  ~OptionValueBase() = default;
};

// Top-level option class.
template <class DataType>
struct OptionValue final
    : OptionValueBase<DataType, std::is_class_v<DataType>> {
  OptionValue() = default;

  OptionValue(const DataType &V) { this->setValue(V); }

  // Some options may take their value from a different data type.
  template <class DT> OptionValue<DataType> &operator=(const DT &V) {
    this->setValue(V);
    return *this;
  }
};

// Other safe-to-copy-by-value common option types.
enum boolOrDefault { BOU_UNSET, BOU_TRUE, BOU_FALSE };
template <>
struct OptionValue<cl::boolOrDefault> final
    : OptionValueCopy<cl::boolOrDefault> {
  using WrapperType = cl::boolOrDefault;

  OptionValue() = default;

  OptionValue(const cl::boolOrDefault &V) { this->setValue(V); }

  OptionValue<cl::boolOrDefault> &operator=(const cl::boolOrDefault &V) {
    setValue(V);
    return *this;
  }

private:
  void anchor() override;
};

template <>
struct OptionValue<std::string> final : OptionValueCopy<std::string> {
  using WrapperType = StringRef;

  OptionValue() = default;

  OptionValue(const std::string &V) { this->setValue(V); }

  OptionValue<std::string> &operator=(const std::string &V) {
    setValue(V);
    return *this;
  }

private:
  void anchor() override;
};

//===----------------------------------------------------------------------===//
// Enum valued command line option
//

// This represents a single enum value, using "int" as the underlying type.
struct OptionEnumValue {
  StringRef Name;
  int Value;
  StringRef Description;
};

#define clEnumVal(ENUMVAL, DESC)                                               \
  llvm::cl::OptionEnumValue { #ENUMVAL, int(ENUMVAL), DESC }
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)                                    \
  llvm::cl::OptionEnumValue { FLAGNAME, int(ENUMVAL), DESC }

// For custom data types, allow specifying a group of values together as the
// values that go into the mapping that the option handler uses.
//
class ValuesClass {
  // Use a vector instead of a map, because the lists should be short,
  // the overhead is less, and most importantly, it keeps them in the order
  // inserted so we can print our option out nicely.
  SmallVector<OptionEnumValue, 4> Values;

public:
  ValuesClass(std::initializer_list<OptionEnumValue> Options)
      : Values(Options) {}

  template <class Opt> void apply(Opt &O) const {
    for (const auto &Value : Values)
      O.getParser().addLiteralOption(Value.Name, Value.Value,
                                     Value.Description);
  }
};

/// Helper to build a ValuesClass by forwarding a variable number of arguments
/// as an initializer list to the ValuesClass constructor.
template <typename... OptsTy> ValuesClass values(OptsTy... Options) {
  return ValuesClass({Options...});
}

//===----------------------------------------------------------------------===//
// Parameterizable parser for different data types. By default, known data types
// (string, int, bool) have specialized parsers, that do what you would expect.
// The default parser, used for data types that are not built-in, uses a mapping
// table to map specific options to values, which is used, among other things,
// to handle enum types.

//--------------------------------------------------
// This class holds all the non-generic code that we do not need replicated for
// every instance of the generic parser.  This also allows us to put stuff into
// CommandLine.cpp
//
class generic_parser_base {
protected:
  class GenericOptionInfo {
  public:
    GenericOptionInfo(StringRef name, StringRef helpStr)
        : Name(name), HelpStr(helpStr) {}
    StringRef Name;
    StringRef HelpStr;
  };

public:
  generic_parser_base(Option &O) : Owner(O) {}

  virtual ~generic_parser_base() = default;
  // Base class should have virtual-destructor

  // Virtual function implemented by generic subclass to indicate how many
  // entries are in Values.
  //
  virtual unsigned getNumOptions() const = 0;

  // Return option name N.
  virtual StringRef getOption(unsigned N) const = 0;

  // Return description N
  virtual StringRef getDescription(unsigned N) const = 0;

  // Return the width of the option tag for printing...
  virtual size_t getOptionWidth(const Option &O) const;

  virtual const GenericOptionValue &getOptionValue(unsigned N) const = 0;

  // Print out information about this option. The to-be-maintained width is
  // specified.
  //
  virtual void printOptionInfo(const Option &O, size_t GlobalWidth) const;

  void printGenericOptionDiff(const Option &O, const GenericOptionValue &V,
                              const GenericOptionValue &Default,
                              size_t GlobalWidth) const;

  // Print the value of an option and it's default.
  //
  // Template definition ensures that the option and default have the same
  // DataType (via the same AnyOptionValue).
  template <class AnyOptionValue>
  void printOptionDiff(const Option &O, const AnyOptionValue &V,
                       const AnyOptionValue &Default,
                       size_t GlobalWidth) const {
    printGenericOptionDiff(O, V, Default, GlobalWidth);
  }

  void initialize() {}

  void getExtraOptionNames(SmallVectorImpl<StringRef> &OptionNames) {
    // If there has been no argstr specified, that means that we need to add an
    // argument for every possible option.  This ensures that our options are
    // vectored to us.
    if (!Owner.hasArgStr())
      for (unsigned i = 0, e = getNumOptions(); i != e; ++i)
        OptionNames.push_back(getOption(i));
  }

  enum ValueExpected getValueExpectedFlagDefault() const {
    // If there is an ArgStr specified, then we are of the form:
    //
    //    -opt=O2   or   -opt O2  or  -optO2
    //
    // In which case, the value is required.  Otherwise if an arg str has not
    // been specified, we are of the form:
    //
    //    -O2 or O2 or -la (where -l and -a are separate options)
    //
    // If this is the case, we cannot allow a value.
    //
    if (Owner.hasArgStr())
      return ValueRequired;
    else
      return ValueDisallowed;
  }

  // Return the option number corresponding to the specified
  // argument string.  If the option is not found, getNumOptions() is returned.
  //
  unsigned findOption(StringRef Name);

protected:
  Option &Owner;
};

// Default parser implementation - This implementation depends on having a
// mapping of recognized options to values of some sort.  In addition to this,
// each entry in the mapping also tracks a help message that is printed with the
// command line option for -help.  Because this is a simple mapping parser, the
// data type can be any unsupported type.
//
template <class DataType> class parser : public generic_parser_base {
protected:
  class OptionInfo : public GenericOptionInfo {
  public:
    OptionInfo(StringRef name, DataType v, StringRef helpStr)
        : GenericOptionInfo(name, helpStr), V(v) {}

    OptionValue<DataType> V;
  };
  SmallVector<OptionInfo, 8> Values;

public:
  parser(Option &O) : generic_parser_base(O) {}

  using parser_data_type = DataType;

  // Implement virtual functions needed by generic_parser_base
  unsigned getNumOptions() const override { return unsigned(Values.size()); }
  StringRef getOption(unsigned N) const override { return Values[N].Name; }
  StringRef getDescription(unsigned N) const override {
    return Values[N].HelpStr;
  }

  // Return the value of option name N.
  const GenericOptionValue &getOptionValue(unsigned N) const override {
    return Values[N].V;
  }

  // Return true on error.
  bool parse(Option &O, StringRef ArgName, StringRef Arg, DataType &V) {
    StringRef ArgVal;
    if (Owner.hasArgStr())
      ArgVal = Arg;
    else
      ArgVal = ArgName;

    for (size_t i = 0, e = Values.size(); i != e; ++i)
      if (Values[i].Name == ArgVal) {
        V = Values[i].V.getValue();
        return false;
      }

    return O.error("Cannot find option named '" + ArgVal + "'!");
  }

  /// Add an entry to the mapping table.
  ///
  template <class DT>
  void addLiteralOption(StringRef Name, const DT &V, StringRef HelpStr) {
    assert(findOption(Name) == Values.size() && "Option already exists!");
    OptionInfo X(Name, static_cast<DataType>(V), HelpStr);
    Values.push_back(X);
    AddLiteralOption(Owner, Name);
  }

  /// Remove the specified option.
  ///
  void removeLiteralOption(StringRef Name) {
    unsigned N = findOption(Name);
    assert(N != Values.size() && "Option not found!");
    Values.erase(Values.begin() + N);
  }
};

//--------------------------------------------------
// Super class of parsers to provide boilerplate code
//
class basic_parser_impl { // non-template implementation of basic_parser<t>
public:
  basic_parser_impl(Option &) {}

  virtual ~basic_parser_impl() = default;

  enum ValueExpected getValueExpectedFlagDefault() const {
    return ValueRequired;
  }

  void getExtraOptionNames(SmallVectorImpl<StringRef> &) {}

  void initialize() {}

  // Return the width of the option tag for printing...
  size_t getOptionWidth(const Option &O) const;

  // Print out information about this option. The to-be-maintained width is
  // specified.
  //
  void printOptionInfo(const Option &O, size_t GlobalWidth) const;

  // Print a placeholder for options that don't yet support printOptionDiff().
  void printOptionNoValue(const Option &O, size_t GlobalWidth) const;

  // Overload in subclass to provide a better default value.
  virtual StringRef getValueName() const { return "value"; }

  // An out-of-line virtual method to provide a 'home' for this class.
  virtual void anchor();

protected:
  // A helper for basic_parser::printOptionDiff.
  void printOptionName(const Option &O, size_t GlobalWidth) const;
};

// The real basic parser is just a template wrapper that provides a typedef for
// the provided data type.
//
template <class DataType> class basic_parser : public basic_parser_impl {
public:
  using parser_data_type = DataType;
  using OptVal = OptionValue<DataType>;

  basic_parser(Option &O) : basic_parser_impl(O) {}
};

//--------------------------------------------------

extern template class basic_parser<bool>;

template <> class parser<bool> : public basic_parser<bool> {
public:
  parser(Option &O) : basic_parser(O) {}

  // Return true on error.
  bool parse(Option &O, StringRef ArgName, StringRef Arg, bool &Val);

  void initialize() {}

  enum ValueExpected getValueExpectedFlagDefault() const {
    return ValueOptional;
  }

  // Do not print =<value> at all.
  StringRef getValueName() const override { return StringRef(); }

  void printOptionDiff(const Option &O, bool V, OptVal Default,
                       size_t GlobalWidth) const;

  // An out-of-line virtual method to provide a 'home' for this class.
  void anchor() override;
};

//--------------------------------------------------

extern template class basic_parser<boolOrDefault>;

template <> class parser<boolOrDefault> : public basic_parser<boolOrDefault> {
public:
  parser(Option &O) : basic_parser(O) {}

  // Return true on error.
  bool parse(Option &O, StringRef ArgName, StringRef Arg, boolOrDefault &Val);

  enum ValueExpected getValueExpectedFlagDefault() const {
    return ValueOptional;
  }

  // Do not print =<value> at all.
  StringRef getValueName() const override { return StringRef(); }

  void printOptionDiff(const Option &O, boolOrDefault V, OptVal Default,
                       size_t GlobalWidth) const;

  // An out-of-line virtual method to provide a 'home' for this class.
  void anchor() override;
};

//--------------------------------------------------

extern template class basic_parser<int>;

template <> class parser<int> : public basic_parser<int> {
public:
  parser(Option &O) : basic_parser(O) {}

  // Return true on error.
  bool parse(Option &O, StringRef ArgName, StringRef Arg, int &Val);

  // Overload in subclass to provide a better default value.
  StringRef getValueName() const override { return "int"; }

  void printOptionDiff(const Option &O, int V, OptVal Default,
                       size_t GlobalWidth) const;

  // An out-of-line virtual method to provide a 'home' for this class.
  void anchor() override;
};

//--------------------------------------------------

extern template class basic_parser<long>;

template <> class parser<long> final : public basic_parser<long> {
public:
  parser(Option &O) : basic_parser(O) {}

  // Return true on error.
  bool parse(Option &O, StringRef ArgName, StringRef Arg, long &Val);

  // Overload in subclass to provide a better default value.
  StringRef getValueName() const override { return "long"; }

  void printOptionDiff(const Option &O, long V, OptVal Default,
                       size_t GlobalWidth) const;

  // An out-of-line virtual method to provide a 'home' for this class.
  void anchor() override;
};

//--------------------------------------------------

extern template class basic_parser<long long>;

template <> class parser<long long> : public basic_parser<long long> {
public:
  parser(Option &O) : basic_parser(O) {}

  // Return true on error.
  bool parse(Option &O, StringRef ArgName, StringRef Arg, long long &Val);

  // Overload in subclass to provide a better default value.
  StringRef getValueName() const override { return "long"; }

  void printOptionDiff(const Option &O, long long V, OptVal Default,
                       size_t GlobalWidth) const;

  // An out-of-line virtual method to provide a 'home' for this class.
  void anchor() override;
};

//--------------------------------------------------

extern template class basic_parser<unsigned>;

template <> class parser<unsigned> : public basic_parser<unsigned> {
public:
  parser(Option &O) : basic_parser(O) {}

  // Return true on error.
  bool parse(Option &O, StringRef ArgName, StringRef Arg, unsigned &Val);

  // Overload in subclass to provide a better default value.
  StringRef getValueName() const override { return "uint"; }

  void printOptionDiff(const Option &O, unsigned V, OptVal Default,
                       size_t GlobalWidth) const;

  // An out-of-line virtual method to provide a 'home' for this class.
  void anchor() override;
};

//--------------------------------------------------

extern template class basic_parser<unsigned long>;

template <>
class parser<unsigned long> final : public basic_parser<unsigned long> {
public:
  parser(Option &O) : basic_parser(O) {}

  // Return true on error.
  bool parse(Option &O, StringRef ArgName, StringRef Arg, unsigned long &Val);

  // Overload in subclass to provide a better default value.
  StringRef getValueName() const override { return "ulong"; }

  void printOptionDiff(const Option &O, unsigned long V, OptVal Default,
                       size_t GlobalWidth) const;

  // An out-of-line virtual method to provide a 'home' for this class.
  void anchor() override;
};

//--------------------------------------------------

extern template class basic_parser<unsigned long long>;

template <>
class parser<unsigned long long> : public basic_parser<unsigned long long> {
public:
  parser(Option &O) : basic_parser(O) {}

  // Return true on error.
  bool parse(Option &O, StringRef ArgName, StringRef Arg,
             unsigned long long &Val);

  // Overload in subclass to provide a better default value.
  StringRef getValueName() const override { return "ulong"; }

  void printOptionDiff(const Option &O, unsigned long long V, OptVal Default,
                       size_t GlobalWidth) const;

  // An out-of-line virtual method to provide a 'home' for this class.
  void anchor() override;
};

//--------------------------------------------------

extern template class basic_parser<double>;

template <> class parser<double> : public basic_parser<double> {
public:
  parser(Option &O) : basic_parser(O) {}

  // Return true on error.
  bool parse(Option &O, StringRef ArgName, StringRef Arg, double &Val);

  // Overload in subclass to provide a better default value.
  StringRef getValueName() const override { return "number"; }

  void printOptionDiff(const Option &O, double V, OptVal Default,
                       size_t GlobalWidth) const;

  // An out-of-line virtual method to provide a 'home' for this class.
  void anchor() override;
};

//--------------------------------------------------

extern template class basic_parser<float>;

template <> class parser<float> : public basic_parser<float> {
public:
  parser(Option &O) : basic_parser(O) {}

  // Return true on error.
  bool parse(Option &O, StringRef ArgName, StringRef Arg, float &Val);

  // Overload in subclass to provide a better default value.
  StringRef getValueName() const override { return "number"; }

  void printOptionDiff(const Option &O, float V, OptVal Default,
                       size_t GlobalWidth) const;

  // An out-of-line virtual method to provide a 'home' for this class.
  void anchor() override;
};

//--------------------------------------------------

extern template class basic_parser<std::string>;

template <> class parser<std::string> : public basic_parser<std::string> {
public:
  parser(Option &O) : basic_parser(O) {}

  // Return true on error.
  bool parse(Option &, StringRef, StringRef Arg, std::string &Value) {
    Value = Arg.str();
    return false;
  }

  // Overload in subclass to provide a better default value.
  StringRef getValueName() const override { return "string"; }

  void printOptionDiff(const Option &O, StringRef V, const OptVal &Default,
                       size_t GlobalWidth) const;

  // An out-of-line virtual method to provide a 'home' for this class.
  void anchor() override;
};

//--------------------------------------------------

extern template class basic_parser<char>;

template <> class parser<char> : public basic_parser<char> {
public:
  parser(Option &O) : basic_parser(O) {}

  // Return true on error.
  bool parse(Option &, StringRef, StringRef Arg, char &Value) {
    Value = Arg[0];
    return false;
  }

  // Overload in subclass to provide a better default value.
  StringRef getValueName() const override { return "char"; }

  void printOptionDiff(const Option &O, char V, OptVal Default,
                       size_t GlobalWidth) const;

  // An out-of-line virtual method to provide a 'home' for this class.
  void anchor() override;
};

//--------------------------------------------------
// This collection of wrappers is the intermediary between class opt and class
// parser to handle all the template nastiness.

// This overloaded function is selected by the generic parser.
template <class ParserClass, class DT>
void printOptionDiff(const Option &O, const generic_parser_base &P, const DT &V,
                     const OptionValue<DT> &Default, size_t GlobalWidth) {
  OptionValue<DT> OV = V;
  P.printOptionDiff(O, OV, Default, GlobalWidth);
}

// This is instantiated for basic parsers when the parsed value has a different
// type than the option value. e.g. HelpPrinter.
template <class ParserDT, class ValDT> struct OptionDiffPrinter {
  void print(const Option &O, const parser<ParserDT> &P, const ValDT & /*V*/,
             const OptionValue<ValDT> & /*Default*/, size_t GlobalWidth) {
    P.printOptionNoValue(O, GlobalWidth);
  }
};

// This is instantiated for basic parsers when the parsed value has the same
// type as the option value.
template <class DT> struct OptionDiffPrinter<DT, DT> {
  void print(const Option &O, const parser<DT> &P, const DT &V,
             const OptionValue<DT> &Default, size_t GlobalWidth) {
    P.printOptionDiff(O, V, Default, GlobalWidth);
  }
};

// This overloaded function is selected by the basic parser, which may parse a
// different type than the option type.
template <class ParserClass, class ValDT>
void printOptionDiff(
    const Option &O,
    const basic_parser<typename ParserClass::parser_data_type> &P,
    const ValDT &V, const OptionValue<ValDT> &Default, size_t GlobalWidth) {

  OptionDiffPrinter<typename ParserClass::parser_data_type, ValDT> printer;
  printer.print(O, static_cast<const ParserClass &>(P), V, Default,
                GlobalWidth);
}

//===----------------------------------------------------------------------===//
// This class is used because we must use partial specialization to handle
// literal string arguments specially (const char* does not correctly respond to
// the apply method). Because the syntax to use this is a pain, we have the
// 'apply' method below to handle the nastiness...
//
template <class Mod> struct applicator {
  template <class Opt> static void opt(const Mod &M, Opt &O) { M.apply(O); }
};

// Handle const char* as a special case...
template <unsigned n> struct applicator<char[n]> {
  template <class Opt> static void opt(StringRef Str, Opt &O) {
    O.setArgStr(Str);
  }
};
template <unsigned n> struct applicator<const char[n]> {
  template <class Opt> static void opt(StringRef Str, Opt &O) {
    O.setArgStr(Str);
  }
};
template <> struct applicator<StringRef > {
  template <class Opt> static void opt(StringRef Str, Opt &O) {
    O.setArgStr(Str);
  }
};

template <> struct applicator<NumOccurrencesFlag> {
  static void opt(NumOccurrencesFlag N, Option &O) {
    O.setNumOccurrencesFlag(N);
  }
};

template <> struct applicator<ValueExpected> {
  static void opt(ValueExpected VE, Option &O) { O.setValueExpectedFlag(VE); }
};

template <> struct applicator<OptionHidden> {
  static void opt(OptionHidden OH, Option &O) { O.setHiddenFlag(OH); }
};

template <> struct applicator<FormattingFlags> {
  static void opt(FormattingFlags FF, Option &O) { O.setFormattingFlag(FF); }
};

template <> struct applicator<MiscFlags> {
  static void opt(MiscFlags MF, Option &O) {
    assert((MF != Grouping || O.ArgStr.size() == 1) &&
           "cl::Grouping can only apply to single character Options.");
    O.setMiscFlag(MF);
  }
};

// Apply modifiers to an option in a type safe way.
template <class Opt, class Mod, class... Mods>
void apply(Opt *O, const Mod &M, const Mods &... Ms) {
  applicator<Mod>::opt(M, *O);
  apply(O, Ms...);
}

template <class Opt, class Mod> void apply(Opt *O, const Mod &M) {
  applicator<Mod>::opt(M, *O);
}

//===----------------------------------------------------------------------===//
// Default storage class definition: external storage.  This implementation
// assumes the user will specify a variable to store the data into with the
// cl::location(x) modifier.
//
template <class DataType, bool ExternalStorage, bool isClass>
class opt_storage {
  DataType *Location = nullptr; // Where to store the object...
  OptionValue<DataType> Default;

  void check_location() const {
    assert(Location && "cl::location(...) not specified for a command "
                       "line option with external storage, "
                       "or cl::init specified before cl::location()!!");
  }

public:
  opt_storage() = default;

  bool setLocation(Option &O, DataType &L) {
    if (Location)
      return O.error("cl::location(x) specified more than once!");
    Location = &L;
    Default = L;
    return false;
  }

  template <class T> void setValue(const T &V, bool initial = false) {
    check_location();
    *Location = V;
    if (initial)
      Default = V;
  }

  DataType &getValue() {
    check_location();
    return *Location;
  }
  const DataType &getValue() const {
    check_location();
    return *Location;
  }

  operator DataType() const { return this->getValue(); }

  const OptionValue<DataType> &getDefault() const { return Default; }
};

// Define how to hold a class type object, such as a string.  Since we can
// inherit from a class, we do so.  This makes us exactly compatible with the
// object in all cases that it is used.
//
template <class DataType>
class opt_storage<DataType, false, true> : public DataType {
public:
  OptionValue<DataType> Default;

  template <class T> void setValue(const T &V, bool initial = false) {
    DataType::operator=(V);
    if (initial)
      Default = V;
  }

  DataType &getValue() { return *this; }
  const DataType &getValue() const { return *this; }

  const OptionValue<DataType> &getDefault() const { return Default; }
};

// Define a partial specialization to handle things we cannot inherit from.  In
// this case, we store an instance through containment, and overload operators
// to get at the value.
//
template <class DataType> class opt_storage<DataType, false, false> {
public:
  DataType Value;
  OptionValue<DataType> Default;

  // Make sure we initialize the value with the default constructor for the
  // type.
  opt_storage() : Value(DataType()), Default() {}

  template <class T> void setValue(const T &V, bool initial = false) {
    Value = V;
    if (initial)
      Default = V;
  }
  DataType &getValue() { return Value; }
  DataType getValue() const { return Value; }

  const OptionValue<DataType> &getDefault() const { return Default; }

  operator DataType() const { return getValue(); }

  // If the datatype is a pointer, support -> on it.
  DataType operator->() const { return Value; }
};

//===----------------------------------------------------------------------===//
// A scalar command line option.
//
template <class DataType, bool ExternalStorage = false,
          class ParserClass = parser<DataType>>
class opt
    : public Option,
      public opt_storage<DataType, ExternalStorage, std::is_class_v<DataType>> {
  ParserClass Parser;

  bool handleOccurrence(unsigned pos, StringRef ArgName,
                        StringRef Arg) override {
    typename ParserClass::parser_data_type Val =
        typename ParserClass::parser_data_type();
    if (Parser.parse(*this, ArgName, Arg, Val))
      return true; // Parse error!
    this->setValue(Val);
    this->setPosition(pos);
    Callback(Val);
    return false;
  }

  enum ValueExpected getValueExpectedFlagDefault() const override {
    return Parser.getValueExpectedFlagDefault();
  }

  void getExtraOptionNames(SmallVectorImpl<StringRef> &OptionNames) override {
    return Parser.getExtraOptionNames(OptionNames);
  }

  // Forward printing stuff to the parser...
  size_t getOptionWidth() const override {
    return Parser.getOptionWidth(*this);
  }

  void printOptionInfo(size_t GlobalWidth) const override {
    Parser.printOptionInfo(*this, GlobalWidth);
  }

  void printOptionValue(size_t GlobalWidth, bool Force) const override {
    if (Force || this->getDefault().compare(this->getValue())) {
      cl::printOptionDiff<ParserClass>(*this, Parser, this->getValue(),
                                       this->getDefault(), GlobalWidth);
    }
  }

  template <class T, class = std::enable_if_t<std::is_assignable_v<T &, T>>>
  void setDefaultImpl() {
    const OptionValue<DataType> &V = this->getDefault();
    if (V.hasValue())
      this->setValue(V.getValue());
    else
      this->setValue(T());
  }

  template <class T, class = std::enable_if_t<!std::is_assignable_v<T &, T>>>
  void setDefaultImpl(...) {}

  void setDefault() override { setDefaultImpl<DataType>(); }

  void done() {
    addArgument();
    Parser.initialize();
  }

public:
  // Command line options should not be copyable
  opt(const opt &) = delete;
  opt &operator=(const opt &) = delete;

  // setInitialValue - Used by the cl::init modifier...
  void setInitialValue(const DataType &V) { this->setValue(V, true); }

  ParserClass &getParser() { return Parser; }

  template <class T> DataType &operator=(const T &Val) {
    this->setValue(Val);
    Callback(Val);
    return this->getValue();
  }

  template <class... Mods>
  explicit opt(const Mods &... Ms)
      : Option(llvm::cl::Optional, NotHidden), Parser(*this) {
    apply(this, Ms...);
    done();
  }

  void setCallback(
      std::function<void(const typename ParserClass::parser_data_type &)> CB) {
    Callback = CB;
  }

  std::function<void(const typename ParserClass::parser_data_type &)> Callback =
      [](const typename ParserClass::parser_data_type &) {};
};

extern template class opt<unsigned>;
extern template class opt<int>;
extern template class opt<std::string>;
extern template class opt<char>;
extern template class opt<bool>;

//===----------------------------------------------------------------------===//
// Default storage class definition: external storage.  This implementation
// assumes the user will specify a variable to store the data into with the
// cl::location(x) modifier.
//
template <class DataType, class StorageClass> class list_storage {
  StorageClass *Location = nullptr; // Where to store the object...
  std::vector<OptionValue<DataType>> Default =
      std::vector<OptionValue<DataType>>();
  bool DefaultAssigned = false;

public:
  list_storage() = default;

  void clear() {}

  bool setLocation(Option &O, StorageClass &L) {
    if (Location)
      return O.error("cl::location(x) specified more than once!");
    Location = &L;
    return false;
  }

  template <class T> void addValue(const T &V, bool initial = false) {
    assert(Location != nullptr &&
           "cl::location(...) not specified for a command "
           "line option with external storage!");
    Location->push_back(V);
    if (initial)
      Default.push_back(V);
  }

  const std::vector<OptionValue<DataType>> &getDefault() const {
    return Default;
  }

  void assignDefault() { DefaultAssigned = true; }
  void overwriteDefault() { DefaultAssigned = false; }
  bool isDefaultAssigned() { return DefaultAssigned; }
};

// Define how to hold a class type object, such as a string.
// Originally this code inherited from std::vector. In transitioning to a new
// API for command line options we should change this. The new implementation
// of this list_storage specialization implements the minimum subset of the
// std::vector API required for all the current clients.
//
// FIXME: Reduce this API to a more narrow subset of std::vector
//
template <class DataType> class list_storage<DataType, bool> {
  std::vector<DataType> Storage;
  std::vector<OptionValue<DataType>> Default;
  bool DefaultAssigned = false;

public:
  using iterator = typename std::vector<DataType>::iterator;

  iterator begin() { return Storage.begin(); }
  iterator end() { return Storage.end(); }

  using const_iterator = typename std::vector<DataType>::const_iterator;

  const_iterator begin() const { return Storage.begin(); }
  const_iterator end() const { return Storage.end(); }

  using size_type = typename std::vector<DataType>::size_type;

  size_type size() const { return Storage.size(); }

  bool empty() const { return Storage.empty(); }

  void push_back(const DataType &value) { Storage.push_back(value); }
  void push_back(DataType &&value) { Storage.push_back(value); }

  using reference = typename std::vector<DataType>::reference;
  using const_reference = typename std::vector<DataType>::const_reference;

  reference operator[](size_type pos) { return Storage[pos]; }
  const_reference operator[](size_type pos) const { return Storage[pos]; }

  void clear() {
    Storage.clear();
  }

  iterator erase(const_iterator pos) { return Storage.erase(pos); }
  iterator erase(const_iterator first, const_iterator last) {
    return Storage.erase(first, last);
  }

  iterator erase(iterator pos) { return Storage.erase(pos); }
  iterator erase(iterator first, iterator last) {
    return Storage.erase(first, last);
  }

  iterator insert(const_iterator pos, const DataType &value) {
    return Storage.insert(pos, value);
  }
  iterator insert(const_iterator pos, DataType &&value) {
    return Storage.insert(pos, value);
  }

  iterator insert(iterator pos, const DataType &value) {
    return Storage.insert(pos, value);
  }
  iterator insert(iterator pos, DataType &&value) {
    return Storage.insert(pos, value);
  }

  reference front() { return Storage.front(); }
  const_reference front() const { return Storage.front(); }

  operator std::vector<DataType> &() { return Storage; }
  operator ArrayRef<DataType>() const { return Storage; }
  std::vector<DataType> *operator&() { return &Storage; }
  const std::vector<DataType> *operator&() const { return &Storage; }

  template <class T> void addValue(const T &V, bool initial = false) {
    Storage.push_back(V);
    if (initial)
      Default.push_back(OptionValue<DataType>(V));
  }

  const std::vector<OptionValue<DataType>> &getDefault() const {
    return Default;
  }

  void assignDefault() { DefaultAssigned = true; }
  void overwriteDefault() { DefaultAssigned = false; }
  bool isDefaultAssigned() { return DefaultAssigned; }
};

//===----------------------------------------------------------------------===//
// A list of command line options.
//
template <class DataType, class StorageClass = bool,
          class ParserClass = parser<DataType>>
class list : public Option, public list_storage<DataType, StorageClass> {
  std::vector<unsigned> Positions;
  ParserClass Parser;

  enum ValueExpected getValueExpectedFlagDefault() const override {
    return Parser.getValueExpectedFlagDefault();
  }

  void getExtraOptionNames(SmallVectorImpl<StringRef> &OptionNames) override {
    return Parser.getExtraOptionNames(OptionNames);
  }

  bool handleOccurrence(unsigned pos, StringRef ArgName,
                        StringRef Arg) override {
    typename ParserClass::parser_data_type Val =
        typename ParserClass::parser_data_type();
    if (list_storage<DataType, StorageClass>::isDefaultAssigned()) {
      clear();
      list_storage<DataType, StorageClass>::overwriteDefault();
    }
    if (Parser.parse(*this, ArgName, Arg, Val))
      return true; // Parse Error!
    list_storage<DataType, StorageClass>::addValue(Val);
    setPosition(pos);
    Positions.push_back(pos);
    Callback(Val);
    return false;
  }

  // Forward printing stuff to the parser...
  size_t getOptionWidth() const override {
    return Parser.getOptionWidth(*this);
  }

  void printOptionInfo(size_t GlobalWidth) const override {
    Parser.printOptionInfo(*this, GlobalWidth);
  }

  // Unimplemented: list options don't currently store their default value.
  void printOptionValue(size_t /*GlobalWidth*/, bool /*Force*/) const override {
  }

  void setDefault() override {
    Positions.clear();
    list_storage<DataType, StorageClass>::clear();
    for (auto &Val : list_storage<DataType, StorageClass>::getDefault())
      list_storage<DataType, StorageClass>::addValue(Val.getValue());
  }

  void done() {
    addArgument();
    Parser.initialize();
  }

public:
  // Command line options should not be copyable
  list(const list &) = delete;
  list &operator=(const list &) = delete;

  ParserClass &getParser() { return Parser; }

  unsigned getPosition(unsigned optnum) const {
    assert(optnum < this->size() && "Invalid option index");
    return Positions[optnum];
  }

  void clear() {
    Positions.clear();
    list_storage<DataType, StorageClass>::clear();
  }

  // setInitialValues - Used by the cl::list_init modifier...
  void setInitialValues(ArrayRef<DataType> Vs) {
    assert(!(list_storage<DataType, StorageClass>::isDefaultAssigned()) &&
           "Cannot have two default values");
    list_storage<DataType, StorageClass>::assignDefault();
    for (auto &Val : Vs)
      list_storage<DataType, StorageClass>::addValue(Val, true);
  }

  void setNumAdditionalVals(unsigned n) { Option::setNumAdditionalVals(n); }

  template <class... Mods>
  explicit list(const Mods &... Ms)
      : Option(ZeroOrMore, NotHidden), Parser(*this) {
    apply(this, Ms...);
    done();
  }

  void setCallback(
      std::function<void(const typename ParserClass::parser_data_type &)> CB) {
    Callback = CB;
  }

  std::function<void(const typename ParserClass::parser_data_type &)> Callback =
      [](const typename ParserClass::parser_data_type &) {};
};

// Modifier to set the number of additional values.
struct multi_val {
  unsigned AdditionalVals;
  explicit multi_val(unsigned N) : AdditionalVals(N) {}

  template <typename D, typename S, typename P>
  void apply(list<D, S, P> &L) const {
    L.setNumAdditionalVals(AdditionalVals);
  }
};

//===----------------------------------------------------------------------===//
// Default storage class definition: external storage.  This implementation
// assumes the user will specify a variable to store the data into with the
// cl::location(x) modifier.
//
template <class DataType, class StorageClass> class bits_storage {
  unsigned *Location = nullptr; // Where to store the bits...

  template <class T> static unsigned Bit(const T &V) {
    unsigned BitPos = static_cast<unsigned>(V);
    assert(BitPos < sizeof(unsigned) * CHAR_BIT &&
           "enum exceeds width of bit vector!");
    return 1 << BitPos;
  }

public:
  bits_storage() = default;

  bool setLocation(Option &O, unsigned &L) {
    if (Location)
      return O.error("cl::location(x) specified more than once!");
    Location = &L;
    return false;
  }

  template <class T> void addValue(const T &V) {
    assert(Location != nullptr &&
           "cl::location(...) not specified for a command "
           "line option with external storage!");
    *Location |= Bit(V);
  }

  unsigned getBits() { return *Location; }

  void clear() {
    if (Location)
      *Location = 0;
  }

  template <class T> bool isSet(const T &V) {
    return (*Location & Bit(V)) != 0;
  }
};

// Define how to hold bits.  Since we can inherit from a class, we do so.
// This makes us exactly compatible with the bits in all cases that it is used.
//
template <class DataType> class bits_storage<DataType, bool> {
  unsigned Bits{0}; // Where to store the bits...

  template <class T> static unsigned Bit(const T &V) {
    unsigned BitPos = static_cast<unsigned>(V);
    assert(BitPos < sizeof(unsigned) * CHAR_BIT &&
           "enum exceeds width of bit vector!");
    return 1 << BitPos;
  }

public:
  template <class T> void addValue(const T &V) { Bits |= Bit(V); }

  unsigned getBits() { return Bits; }

  void clear() { Bits = 0; }

  template <class T> bool isSet(const T &V) { return (Bits & Bit(V)) != 0; }
};

//===----------------------------------------------------------------------===//
// A bit vector of command options.
//
template <class DataType, class Storage = bool,
          class ParserClass = parser<DataType>>
class bits : public Option, public bits_storage<DataType, Storage> {
  std::vector<unsigned> Positions;
  ParserClass Parser;

  enum ValueExpected getValueExpectedFlagDefault() const override {
    return Parser.getValueExpectedFlagDefault();
  }

  void getExtraOptionNames(SmallVectorImpl<StringRef> &OptionNames) override {
    return Parser.getExtraOptionNames(OptionNames);
  }

  bool handleOccurrence(unsigned pos, StringRef ArgName,
                        StringRef Arg) override {
    typename ParserClass::parser_data_type Val =
        typename ParserClass::parser_data_type();
    if (Parser.parse(*this, ArgName, Arg, Val))
      return true; // Parse Error!
    this->addValue(Val);
    setPosition(pos);
    Positions.push_back(pos);
    Callback(Val);
    return false;
  }

  // Forward printing stuff to the parser...
  size_t getOptionWidth() const override {
    return Parser.getOptionWidth(*this);
  }

  void printOptionInfo(size_t GlobalWidth) const override {
    Parser.printOptionInfo(*this, GlobalWidth);
  }

  // Unimplemented: bits options don't currently store their default values.
  void printOptionValue(size_t /*GlobalWidth*/, bool /*Force*/) const override {
  }

  void setDefault() override { bits_storage<DataType, Storage>::clear(); }

  void done() {
    addArgument();
    Parser.initialize();
  }

public:
  // Command line options should not be copyable
  bits(const bits &) = delete;
  bits &operator=(const bits &) = delete;

  ParserClass &getParser() { return Parser; }

  unsigned getPosition(unsigned optnum) const {
    assert(optnum < this->size() && "Invalid option index");
    return Positions[optnum];
  }

  template <class... Mods>
  explicit bits(const Mods &... Ms)
      : Option(ZeroOrMore, NotHidden), Parser(*this) {
    apply(this, Ms...);
    done();
  }

  void setCallback(
      std::function<void(const typename ParserClass::parser_data_type &)> CB) {
    Callback = CB;
  }

  std::function<void(const typename ParserClass::parser_data_type &)> Callback =
      [](const typename ParserClass::parser_data_type &) {};
};

//===----------------------------------------------------------------------===//
// Aliased command line option (alias this name to a preexisting name)
//

class alias : public Option {
  Option *AliasFor;

  bool handleOccurrence(unsigned pos, StringRef /*ArgName*/,
                        StringRef Arg) override {
    return AliasFor->handleOccurrence(pos, AliasFor->ArgStr, Arg);
  }

  bool addOccurrence(unsigned pos, StringRef /*ArgName*/, StringRef Value,
                     bool MultiArg = false) override {
    return AliasFor->addOccurrence(pos, AliasFor->ArgStr, Value, MultiArg);
  }

  // Handle printing stuff...
  size_t getOptionWidth() const override;
  void printOptionInfo(size_t GlobalWidth) const override;

  // Aliases do not need to print their values.
  void printOptionValue(size_t /*GlobalWidth*/, bool /*Force*/) const override {
  }

  void setDefault() override { AliasFor->setDefault(); }

  ValueExpected getValueExpectedFlagDefault() const override {
    return AliasFor->getValueExpectedFlag();
  }

  void done() {
    if (!hasArgStr())
      error("cl::alias must have argument name specified!");
    if (!AliasFor)
      error("cl::alias must have an cl::aliasopt(option) specified!");
    if (!Subs.empty())
      error("cl::alias must not have cl::sub(), aliased option's cl::sub() will be used!");
    Subs = AliasFor->Subs;
    Categories = AliasFor->Categories;
    addArgument();
  }

public:
  // Command line options should not be copyable
  alias(const alias &) = delete;
  alias &operator=(const alias &) = delete;

  void setAliasFor(Option &O) {
    if (AliasFor)
      error("cl::alias must only have one cl::aliasopt(...) specified!");
    AliasFor = &O;
  }

  template <class... Mods>
  explicit alias(const Mods &... Ms)
      : Option(Optional, Hidden), AliasFor(nullptr) {
    apply(this, Ms...);
    done();
  }
};

// Modifier to set the option an alias aliases.
struct aliasopt {
  Option &Opt;

  explicit aliasopt(Option &O) : Opt(O) {}

  void apply(alias &A) const { A.setAliasFor(Opt); }
};

// Provide additional help at the end of the normal help output. All occurrences
// of cl::extrahelp will be accumulated and printed to stderr at the end of the
// regular help, just before exit is called.
struct extrahelp {
  StringRef morehelp;

  explicit extrahelp(StringRef help);
};

void PrintVersionMessage();

/// This function just prints the help message, exactly the same way as if the
/// -help or -help-hidden option had been given on the command line.
///
/// \param Hidden if true will print hidden options
/// \param Categorized if true print options in categories
void PrintHelpMessage(bool Hidden = false, bool Categorized = false);

//===----------------------------------------------------------------------===//
// Public interface for accessing registered options.
//

/// Use this to get a StringMap to all registered named options
/// (e.g. -help).
///
/// \return A reference to the StringMap used by the cl APIs to parse options.
///
/// Access to unnamed arguments (i.e. positional) are not provided because
/// it is expected that the client already has access to these.
///
/// Typical usage:
/// \code
/// main(int argc,char* argv[]) {
/// StringMap<llvm::cl::Option*> &opts = llvm::cl::getRegisteredOptions();
/// assert(opts.count("help") == 1)
/// opts["help"]->setDescription("Show alphabetical help information")
/// // More code
/// llvm::cl::ParseCommandLineOptions(argc,argv);
/// //More code
/// }
/// \endcode
///
/// This interface is useful for modifying options in libraries that are out of
/// the control of the client. The options should be modified before calling
/// llvm::cl::ParseCommandLineOptions().
///
/// Hopefully this API can be deprecated soon. Any situation where options need
/// to be modified by tools or libraries should be handled by sane APIs rather
/// than just handing around a global list.
StringMap<Option *> &
getRegisteredOptions(SubCommand &Sub = SubCommand::getTopLevel());

/// Use this to get all registered SubCommands from the provided parser.
///
/// \return A range of all SubCommand pointers registered with the parser.
///
/// Typical usage:
/// \code
/// main(int argc, char* argv[]) {
///   llvm::cl::ParseCommandLineOptions(argc, argv);
///   for (auto* S : llvm::cl::getRegisteredSubcommands()) {
///     if (*S) {
///       std::cout << "Executing subcommand: " << S->getName() << std::endl;
///       // Execute some function based on the name...
///     }
///   }
/// }
/// \endcode
///
/// This interface is useful for defining subcommands in libraries and
/// the dispatch from a single point (like in the main function).
iterator_range<typename SmallPtrSet<SubCommand *, 4>::iterator>
getRegisteredSubcommands();

//===----------------------------------------------------------------------===//
// Standalone command line processing utilities.
//

/// Tokenizes a command line that can contain escapes and quotes.
//
/// The quoting rules match those used by GCC and other tools that use
/// libiberty's buildargv() or expandargv() utilities, and do not match bash.
/// They differ from buildargv() on treatment of backslashes that do not escape
/// a special character to make it possible to accept most Windows file paths.
///
/// \param [in] Source The string to be split on whitespace with quotes.
/// \param [in] Saver Delegates back to the caller for saving parsed strings.
/// \param [in] MarkEOLs true if tokenizing a response file and you want end of
/// lines and end of the response file to be marked with a nullptr string.
/// \param [out] NewArgv All parsed strings are appended to NewArgv.
void TokenizeGNUCommandLine(StringRef Source, StringSaver &Saver,
                            SmallVectorImpl<const char *> &NewArgv,
                            bool MarkEOLs = false);

/// Tokenizes a string of Windows command line arguments, which may contain
/// quotes and escaped quotes.
///
/// See MSDN docs for CommandLineToArgvW for information on the quoting rules.
/// http://msdn.microsoft.com/en-us/library/windows/desktop/17w5ykft(v=vs.85).aspx
///
/// For handling a full Windows command line including the executable name at
/// the start, see TokenizeWindowsCommandLineFull below.
///
/// \param [in] Source The string to be split on whitespace with quotes.
/// \param [in] Saver Delegates back to the caller for saving parsed strings.
/// \param [in] MarkEOLs true if tokenizing a response file and you want end of
/// lines and end of the response file to be marked with a nullptr string.
/// \param [out] NewArgv All parsed strings are appended to NewArgv.
void TokenizeWindowsCommandLine(StringRef Source, StringSaver &Saver,
                                SmallVectorImpl<const char *> &NewArgv,
                                bool MarkEOLs = false);

/// Tokenizes a Windows command line while attempting to avoid copies. If no
/// quoting or escaping was used, this produces substrings of the original
/// string. If a token requires unquoting, it will be allocated with the
/// StringSaver.
void TokenizeWindowsCommandLineNoCopy(StringRef Source, StringSaver &Saver,
                                      SmallVectorImpl<StringRef> &NewArgv);

/// Tokenizes a Windows full command line, including command name at the start.
///
/// This uses the same syntax rules as TokenizeWindowsCommandLine for all but
/// the first token. But the first token is expected to be parsed as the
/// executable file name in the way CreateProcess would do it, rather than the
/// way the C library startup code would do it: CreateProcess does not consider
/// that \ is ever an escape character (because " is not a valid filename char,
/// hence there's never a need to escape it to be used literally).
///
/// Parameters are the same as for TokenizeWindowsCommandLine. In particular,
/// if you set MarkEOLs = true, then the first word of every line will be
/// parsed using the special rules for command names, making this function
/// suitable for parsing a file full of commands to execute.
void TokenizeWindowsCommandLineFull(StringRef Source, StringSaver &Saver,
                                    SmallVectorImpl<const char *> &NewArgv,
                                    bool MarkEOLs = false);

/// String tokenization function type.  Should be compatible with either
/// Windows or Unix command line tokenizers.
using TokenizerCallback = void (*)(StringRef Source, StringSaver &Saver,
                                   SmallVectorImpl<const char *> &NewArgv,
                                   bool MarkEOLs);

/// Tokenizes content of configuration file.
///
/// \param [in] Source The string representing content of config file.
/// \param [in] Saver Delegates back to the caller for saving parsed strings.
/// \param [out] NewArgv All parsed strings are appended to NewArgv.
/// \param [in] MarkEOLs Added for compatibility with TokenizerCallback.
///
/// It works like TokenizeGNUCommandLine with ability to skip comment lines.
///
void tokenizeConfigFile(StringRef Source, StringSaver &Saver,
                        SmallVectorImpl<const char *> &NewArgv,
                        bool MarkEOLs = false);

/// Contains options that control response file expansion.
class ExpansionContext {
  /// Provides persistent storage for parsed strings.
  StringSaver Saver;

  /// Tokenization strategy. Typically Unix or Windows.
  TokenizerCallback Tokenizer;

  /// File system used for all file access when running the expansion.
  vfs::FileSystem *FS;

  /// Path used to resolve relative rsp files. If empty, the file system
  /// current directory is used instead.
  StringRef CurrentDir;

  /// Directories used for search of config files.
  ArrayRef<StringRef> SearchDirs;

  /// True if names of nested response files must be resolved relative to
  /// including file.
  bool RelativeNames = false;

  /// If true, mark end of lines and the end of the response file with nullptrs
  /// in the Argv vector.
  bool MarkEOLs = false;

  /// If true, body of config file is expanded.
  bool InConfigFile = false;

  llvm::Error expandResponseFile(StringRef FName,
                                 SmallVectorImpl<const char *> &NewArgv);

public:
  ExpansionContext(BumpPtrAllocator &A, TokenizerCallback T);

  ExpansionContext &setMarkEOLs(bool X) {
    MarkEOLs = X;
    return *this;
  }

  ExpansionContext &setRelativeNames(bool X) {
    RelativeNames = X;
    return *this;
  }

  ExpansionContext &setCurrentDir(StringRef X) {
    CurrentDir = X;
    return *this;
  }

  ExpansionContext &setSearchDirs(ArrayRef<StringRef> X) {
    SearchDirs = X;
    return *this;
  }

  ExpansionContext &setVFS(vfs::FileSystem *X) {
    FS = X;
    return *this;
  }

  /// Looks for the specified configuration file.
  ///
  /// \param[in]  FileName Name of the file to search for.
  /// \param[out] FilePath File absolute path, if it was found.
  /// \return True if file was found.
  ///
  /// If the specified file name contains a directory separator, it is searched
  /// for by its absolute path. Otherwise looks for file sequentially in
  /// directories specified by SearchDirs field.
  bool findConfigFile(StringRef FileName, SmallVectorImpl<char> &FilePath);

  /// Reads command line options from the given configuration file.
  ///
  /// \param [in] CfgFile Path to configuration file.
  /// \param [out] Argv Array to which the read options are added.
  /// \return true if the file was successfully read.
  ///
  /// It reads content of the specified file, tokenizes it and expands "@file"
  /// commands resolving file names in them relative to the directory where
  /// CfgFilename resides. It also expands "<CFGDIR>" to the base path of the
  /// current config file.
  Error readConfigFile(StringRef CfgFile, SmallVectorImpl<const char *> &Argv);

  /// Expands constructs "@file" in the provided array of arguments recursively.
  Error expandResponseFiles(SmallVectorImpl<const char *> &Argv);
};

/// A convenience helper which concatenates the options specified by the
/// environment variable EnvVar and command line options, then expands
/// response files recursively.
/// \return true if all @files were expanded successfully or there were none.
bool expandResponseFiles(int Argc, const char *const *Argv, const char *EnvVar,
                         SmallVectorImpl<const char *> &NewArgv);

/// A convenience helper which supports the typical use case of expansion
/// function call.
bool ExpandResponseFiles(StringSaver &Saver, TokenizerCallback Tokenizer,
                         SmallVectorImpl<const char *> &Argv);

/// A convenience helper which concatenates the options specified by the
/// environment variable EnvVar and command line options, then expands response
/// files recursively. The tokenizer is a predefined GNU or Windows one.
/// \return true if all @files were expanded successfully or there were none.
bool expandResponseFiles(int Argc, const char *const *Argv, const char *EnvVar,
                         StringSaver &Saver,
                         SmallVectorImpl<const char *> &NewArgv);

/// Mark all options not part of this category as cl::ReallyHidden.
///
/// \param Category the category of options to keep displaying
///
/// Some tools (like clang-format) like to be able to hide all options that are
/// not specific to the tool. This function allows a tool to specify a single
/// option category to display in the -help output.
void HideUnrelatedOptions(cl::OptionCategory &Category,
                          SubCommand &Sub = SubCommand::getTopLevel());

/// Mark all options not part of the categories as cl::ReallyHidden.
///
/// \param Categories the categories of options to keep displaying.
///
/// Some tools (like clang-format) like to be able to hide all options that are
/// not specific to the tool. This function allows a tool to specify a single
/// option category to display in the -help output.
void HideUnrelatedOptions(ArrayRef<const cl::OptionCategory *> Categories,
                          SubCommand &Sub = SubCommand::getTopLevel());

/// Reset all command line options to a state that looks as if they have
/// never appeared on the command line.  This is useful for being able to parse
/// a command line multiple times (especially useful for writing tests).
void ResetAllOptionOccurrences();

/// Reset the command line parser back to its initial state.  This
/// removes
/// all options, categories, and subcommands and returns the parser to a state
/// where no options are supported.
void ResetCommandLineParser();

/// Parses `Arg` into the option handler `Handler`.
bool ProvidePositionalOption(Option *Handler, StringRef Arg, int i);

} // end namespace cl

} // end namespace llvm

#endif // LLVM_SUPPORT_COMMANDLINE_H
PKhwFZ�#{KSupport/LEB128.hnu�[���//===- llvm/Support/LEB128.h - [SU]LEB128 utility functions -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares some utility functions for encoding SLEB128 and
// ULEB128 values.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_LEB128_H
#define LLVM_SUPPORT_LEB128_H

#include "llvm/Support/raw_ostream.h"

namespace llvm {

/// Utility function to encode a SLEB128 value to an output stream. Returns
/// the length in bytes of the encoded value.
inline unsigned encodeSLEB128(int64_t Value, raw_ostream &OS,
                              unsigned PadTo = 0) {
  bool More;
  unsigned Count = 0;
  do {
    uint8_t Byte = Value & 0x7f;
    // NOTE: this assumes that this signed shift is an arithmetic right shift.
    Value >>= 7;
    More = !((((Value == 0 ) && ((Byte & 0x40) == 0)) ||
              ((Value == -1) && ((Byte & 0x40) != 0))));
    Count++;
    if (More || Count < PadTo)
      Byte |= 0x80; // Mark this byte to show that more bytes will follow.
    OS << char(Byte);
  } while (More);

  // Pad with 0x80 and emit a terminating byte at the end.
  if (Count < PadTo) {
    uint8_t PadValue = Value < 0 ? 0x7f : 0x00;
    for (; Count < PadTo - 1; ++Count)
      OS << char(PadValue | 0x80);
    OS << char(PadValue);
    Count++;
  }
  return Count;
}

/// Utility function to encode a SLEB128 value to a buffer. Returns
/// the length in bytes of the encoded value.
inline unsigned encodeSLEB128(int64_t Value, uint8_t *p, unsigned PadTo = 0) {
  uint8_t *orig_p = p;
  unsigned Count = 0;
  bool More;
  do {
    uint8_t Byte = Value & 0x7f;
    // NOTE: this assumes that this signed shift is an arithmetic right shift.
    Value >>= 7;
    More = !((((Value == 0 ) && ((Byte & 0x40) == 0)) ||
              ((Value == -1) && ((Byte & 0x40) != 0))));
    Count++;
    if (More || Count < PadTo)
      Byte |= 0x80; // Mark this byte to show that more bytes will follow.
    *p++ = Byte;
  } while (More);

  // Pad with 0x80 and emit a terminating byte at the end.
  if (Count < PadTo) {
    uint8_t PadValue = Value < 0 ? 0x7f : 0x00;
    for (; Count < PadTo - 1; ++Count)
      *p++ = (PadValue | 0x80);
    *p++ = PadValue;
  }
  return (unsigned)(p - orig_p);
}

/// Utility function to encode a ULEB128 value to an output stream. Returns
/// the length in bytes of the encoded value.
inline unsigned encodeULEB128(uint64_t Value, raw_ostream &OS,
                              unsigned PadTo = 0) {
  unsigned Count = 0;
  do {
    uint8_t Byte = Value & 0x7f;
    Value >>= 7;
    Count++;
    if (Value != 0 || Count < PadTo)
      Byte |= 0x80; // Mark this byte to show that more bytes will follow.
    OS << char(Byte);
  } while (Value != 0);

  // Pad with 0x80 and emit a null byte at the end.
  if (Count < PadTo) {
    for (; Count < PadTo - 1; ++Count)
      OS << '\x80';
    OS << '\x00';
    Count++;
  }
  return Count;
}

/// Utility function to encode a ULEB128 value to a buffer. Returns
/// the length in bytes of the encoded value.
inline unsigned encodeULEB128(uint64_t Value, uint8_t *p,
                              unsigned PadTo = 0) {
  uint8_t *orig_p = p;
  unsigned Count = 0;
  do {
    uint8_t Byte = Value & 0x7f;
    Value >>= 7;
    Count++;
    if (Value != 0 || Count < PadTo)
      Byte |= 0x80; // Mark this byte to show that more bytes will follow.
    *p++ = Byte;
  } while (Value != 0);

  // Pad with 0x80 and emit a null byte at the end.
  if (Count < PadTo) {
    for (; Count < PadTo - 1; ++Count)
      *p++ = '\x80';
    *p++ = '\x00';
  }

  return (unsigned)(p - orig_p);
}

/// Utility function to decode a ULEB128 value.
inline uint64_t decodeULEB128(const uint8_t *p, unsigned *n = nullptr,
                              const uint8_t *end = nullptr,
                              const char **error = nullptr) {
  const uint8_t *orig_p = p;
  uint64_t Value = 0;
  unsigned Shift = 0;
  if (error)
    *error = nullptr;
  do {
    if (p == end) {
      if (error)
        *error = "malformed uleb128, extends past end";
      if (n)
        *n = (unsigned)(p - orig_p);
      return 0;
    }
    uint64_t Slice = *p & 0x7f;
    if ((Shift >= 64 && Slice != 0) || Slice << Shift >> Shift != Slice) {
      if (error)
        *error = "uleb128 too big for uint64";
      if (n)
        *n = (unsigned)(p - orig_p);
      return 0;
    }
    Value += Slice << Shift;
    Shift += 7;
  } while (*p++ >= 128);
  if (n)
    *n = (unsigned)(p - orig_p);
  return Value;
}

/// Utility function to decode a SLEB128 value.
inline int64_t decodeSLEB128(const uint8_t *p, unsigned *n = nullptr,
                             const uint8_t *end = nullptr,
                             const char **error = nullptr) {
  const uint8_t *orig_p = p;
  int64_t Value = 0;
  unsigned Shift = 0;
  uint8_t Byte;
  if (error)
    *error = nullptr;
  do {
    if (p == end) {
      if (error)
        *error = "malformed sleb128, extends past end";
      if (n)
        *n = (unsigned)(p - orig_p);
      return 0;
    }
    Byte = *p;
    uint64_t Slice = Byte & 0x7f;
    if ((Shift >= 64 && Slice != (Value < 0 ? 0x7f : 0x00)) ||
        (Shift == 63 && Slice != 0 && Slice != 0x7f)) {
      if (error)
        *error = "sleb128 too big for int64";
      if (n)
        *n = (unsigned)(p - orig_p);
      return 0;
    }
    Value |= Slice << Shift;
    Shift += 7;
    ++p;
  } while (Byte >= 128);
  // Sign extend negative numbers if needed.
  if (Shift < 64 && (Byte & 0x40))
    Value |= UINT64_MAX << Shift;
  if (n)
    *n = (unsigned)(p - orig_p);
  return Value;
}

/// Utility function to get the size of the ULEB128-encoded value.
extern unsigned getULEB128Size(uint64_t Value);

/// Utility function to get the size of the SLEB128-encoded value.
extern unsigned getSLEB128Size(int64_t Value);

} // namespace llvm

#endif // LLVM_SUPPORT_LEB128_H
PKhwFZo�:kkSupport/GenericLoopInfo.hnu�[���//===- GenericLoopInfo - Generic Loop Info for graphs -----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the LoopInfoBase class that is used to identify natural
// loops and determine the loop depth of various nodes in a generic graph of
// blocks.  A natural loop has exactly one entry-point, which is called the
// header. Note that natural loops may actually be several loops that share the
// same header node.
//
// This analysis calculates the nesting structure of loops in a function.  For
// each natural loop identified, this analysis identifies natural loops
// contained entirely within the loop and the basic blocks that make up the
// loop.
//
// It can calculate on the fly various bits of information, for example:
//
//  * whether there is a preheader for the loop
//  * the number of back edges to the header
//  * whether or not a particular block branches out of the loop
//  * the successor blocks of the loop
//  * the loop depth
//  * etc...
//
// Note that this analysis specifically identifies *Loops* not cycles or SCCs
// in the graph.  There can be strongly connected components in the graph which
// this analysis will not recognize and that will not be represented by a Loop
// instance.  In particular, a Loop might be inside such a non-loop SCC, or a
// non-loop SCC might contain a sub-SCC which is a Loop.
//
// For an overview of terminology used in this API (and thus all of our loop
// analyses or transforms), see docs/LoopTerminology.rst.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_GENERICLOOPINFO_H
#define LLVM_SUPPORT_GENERICLOOPINFO_H

#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetOperations.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/GenericDomTree.h"

namespace llvm {

template <class N, class M> class LoopInfoBase;
template <class N, class M> class LoopBase;

//===----------------------------------------------------------------------===//
/// Instances of this class are used to represent loops that are detected in the
/// flow graph.
///
template <class BlockT, class LoopT> class LoopBase {
  LoopT *ParentLoop;
  // Loops contained entirely within this one.
  std::vector<LoopT *> SubLoops;

  // The list of blocks in this loop. First entry is the header node.
  std::vector<BlockT *> Blocks;

  SmallPtrSet<const BlockT *, 8> DenseBlockSet;

#if LLVM_ENABLE_ABI_BREAKING_CHECKS
  /// Indicator that this loop is no longer a valid loop.
  bool IsInvalid = false;
#endif

  LoopBase(const LoopBase<BlockT, LoopT> &) = delete;
  const LoopBase<BlockT, LoopT> &
  operator=(const LoopBase<BlockT, LoopT> &) = delete;

public:
  /// Return the nesting level of this loop.  An outer-most loop has depth 1,
  /// for consistency with loop depth values used for basic blocks, where depth
  /// 0 is used for blocks not inside any loops.
  unsigned getLoopDepth() const {
    assert(!isInvalid() && "Loop not in a valid state!");
    unsigned D = 1;
    for (const LoopT *CurLoop = ParentLoop; CurLoop;
         CurLoop = CurLoop->ParentLoop)
      ++D;
    return D;
  }
  BlockT *getHeader() const { return getBlocks().front(); }
  /// Return the parent loop if it exists or nullptr for top
  /// level loops.

  /// A loop is either top-level in a function (that is, it is not
  /// contained in any other loop) or it is entirely enclosed in
  /// some other loop.
  /// If a loop is top-level, it has no parent, otherwise its
  /// parent is the innermost loop in which it is enclosed.
  LoopT *getParentLoop() const { return ParentLoop; }

  /// Get the outermost loop in which this loop is contained.
  /// This may be the loop itself, if it already is the outermost loop.
  const LoopT *getOutermostLoop() const {
    const LoopT *L = static_cast<const LoopT *>(this);
    while (L->ParentLoop)
      L = L->ParentLoop;
    return L;
  }

  LoopT *getOutermostLoop() {
    LoopT *L = static_cast<LoopT *>(this);
    while (L->ParentLoop)
      L = L->ParentLoop;
    return L;
  }

  /// This is a raw interface for bypassing addChildLoop.
  void setParentLoop(LoopT *L) {
    assert(!isInvalid() && "Loop not in a valid state!");
    ParentLoop = L;
  }

  /// Return true if the specified loop is contained within in this loop.
  bool contains(const LoopT *L) const {
    assert(!isInvalid() && "Loop not in a valid state!");
    if (L == this)
      return true;
    if (!L)
      return false;
    return contains(L->getParentLoop());
  }

  /// Return true if the specified basic block is in this loop.
  bool contains(const BlockT *BB) const {
    assert(!isInvalid() && "Loop not in a valid state!");
    return DenseBlockSet.count(BB);
  }

  /// Return true if the specified instruction is in this loop.
  template <class InstT> bool contains(const InstT *Inst) const {
    return contains(Inst->getParent());
  }

  /// Return the loops contained entirely within this loop.
  const std::vector<LoopT *> &getSubLoops() const {
    assert(!isInvalid() && "Loop not in a valid state!");
    return SubLoops;
  }
  std::vector<LoopT *> &getSubLoopsVector() {
    assert(!isInvalid() && "Loop not in a valid state!");
    return SubLoops;
  }
  typedef typename std::vector<LoopT *>::const_iterator iterator;
  typedef
      typename std::vector<LoopT *>::const_reverse_iterator reverse_iterator;
  iterator begin() const { return getSubLoops().begin(); }
  iterator end() const { return getSubLoops().end(); }
  reverse_iterator rbegin() const { return getSubLoops().rbegin(); }
  reverse_iterator rend() const { return getSubLoops().rend(); }

  // LoopInfo does not detect irreducible control flow, just natural
  // loops. That is, it is possible that there is cyclic control
  // flow within the "innermost loop" or around the "outermost
  // loop".

  /// Return true if the loop does not contain any (natural) loops.
  bool isInnermost() const { return getSubLoops().empty(); }
  /// Return true if the loop does not have a parent (natural) loop
  // (i.e. it is outermost, which is the same as top-level).
  bool isOutermost() const { return getParentLoop() == nullptr; }

  /// Get a list of the basic blocks which make up this loop.
  ArrayRef<BlockT *> getBlocks() const {
    assert(!isInvalid() && "Loop not in a valid state!");
    return Blocks;
  }
  typedef typename ArrayRef<BlockT *>::const_iterator block_iterator;
  block_iterator block_begin() const { return getBlocks().begin(); }
  block_iterator block_end() const { return getBlocks().end(); }
  inline iterator_range<block_iterator> blocks() const {
    assert(!isInvalid() && "Loop not in a valid state!");
    return make_range(block_begin(), block_end());
  }

  /// Get the number of blocks in this loop in constant time.
  /// Invalidate the loop, indicating that it is no longer a loop.
  unsigned getNumBlocks() const {
    assert(!isInvalid() && "Loop not in a valid state!");
    return Blocks.size();
  }

  /// Return a direct, mutable handle to the blocks vector so that we can
  /// mutate it efficiently with techniques like `std::remove`.
  std::vector<BlockT *> &getBlocksVector() {
    assert(!isInvalid() && "Loop not in a valid state!");
    return Blocks;
  }
  /// Return a direct, mutable handle to the blocks set so that we can
  /// mutate it efficiently.
  SmallPtrSetImpl<const BlockT *> &getBlocksSet() {
    assert(!isInvalid() && "Loop not in a valid state!");
    return DenseBlockSet;
  }

  /// Return a direct, immutable handle to the blocks set.
  const SmallPtrSetImpl<const BlockT *> &getBlocksSet() const {
    assert(!isInvalid() && "Loop not in a valid state!");
    return DenseBlockSet;
  }

  /// Return true if this loop is no longer valid.  The only valid use of this
  /// helper is "assert(L.isInvalid())" or equivalent, since IsInvalid is set to
  /// true by the destructor.  In other words, if this accessor returns true,
  /// the caller has already triggered UB by calling this accessor; and so it
  /// can only be called in a context where a return value of true indicates a
  /// programmer error.
  bool isInvalid() const {
#if LLVM_ENABLE_ABI_BREAKING_CHECKS
    return IsInvalid;
#else
    return false;
#endif
  }

  /// True if terminator in the block can branch to another block that is
  /// outside of the current loop. \p BB must be inside the loop.
  bool isLoopExiting(const BlockT *BB) const {
    assert(!isInvalid() && "Loop not in a valid state!");
    assert(contains(BB) && "Exiting block must be part of the loop");
    for (const auto *Succ : children<const BlockT *>(BB)) {
      if (!contains(Succ))
        return true;
    }
    return false;
  }

  /// Returns true if \p BB is a loop-latch.
  /// A latch block is a block that contains a branch back to the header.
  /// This function is useful when there are multiple latches in a loop
  /// because \fn getLoopLatch will return nullptr in that case.
  bool isLoopLatch(const BlockT *BB) const {
    assert(!isInvalid() && "Loop not in a valid state!");
    assert(contains(BB) && "block does not belong to the loop");

    BlockT *Header = getHeader();
    auto PredBegin = GraphTraits<Inverse<BlockT *>>::child_begin(Header);
    auto PredEnd = GraphTraits<Inverse<BlockT *>>::child_end(Header);
    return std::find(PredBegin, PredEnd, BB) != PredEnd;
  }

  /// Calculate the number of back edges to the loop header.
  unsigned getNumBackEdges() const {
    assert(!isInvalid() && "Loop not in a valid state!");
    unsigned NumBackEdges = 0;
    BlockT *H = getHeader();

    for (const auto Pred : children<Inverse<BlockT *>>(H))
      if (contains(Pred))
        ++NumBackEdges;

    return NumBackEdges;
  }

  //===--------------------------------------------------------------------===//
  // APIs for simple analysis of the loop.
  //
  // Note that all of these methods can fail on general loops (ie, there may not
  // be a preheader, etc).  For best success, the loop simplification and
  // induction variable canonicalization pass should be used to normalize loops
  // for easy analysis.  These methods assume canonical loops.

  /// Return all blocks inside the loop that have successors outside of the
  /// loop. These are the blocks _inside of the current loop_ which branch out.
  /// The returned list is always unique.
  void getExitingBlocks(SmallVectorImpl<BlockT *> &ExitingBlocks) const;

  /// If getExitingBlocks would return exactly one block, return that block.
  /// Otherwise return null.
  BlockT *getExitingBlock() const;

  /// Return all of the successor blocks of this loop. These are the blocks
  /// _outside of the current loop_ which are branched to.
  void getExitBlocks(SmallVectorImpl<BlockT *> &ExitBlocks) const;

  /// If getExitBlocks would return exactly one block, return that block.
  /// Otherwise return null.
  BlockT *getExitBlock() const;

  /// Return true if no exit block for the loop has a predecessor that is
  /// outside the loop.
  bool hasDedicatedExits() const;

  /// Return all unique successor blocks of this loop.
  /// These are the blocks _outside of the current loop_ which are branched to.
  void getUniqueExitBlocks(SmallVectorImpl<BlockT *> &ExitBlocks) const;

  /// Return all unique successor blocks of this loop except successors from
  /// Latch block are not considered. If the exit comes from Latch has also
  /// non Latch predecessor in a loop it will be added to ExitBlocks.
  /// These are the blocks _outside of the current loop_ which are branched to.
  void getUniqueNonLatchExitBlocks(SmallVectorImpl<BlockT *> &ExitBlocks) const;

  /// If getUniqueExitBlocks would return exactly one block, return that block.
  /// Otherwise return null.
  BlockT *getUniqueExitBlock() const;

  /// Return true if this loop does not have any exit blocks.
  bool hasNoExitBlocks() const;

  /// Edge type.
  typedef std::pair<BlockT *, BlockT *> Edge;

  /// Return all pairs of (_inside_block_,_outside_block_).
  void getExitEdges(SmallVectorImpl<Edge> &ExitEdges) const;

  /// If there is a preheader for this loop, return it. A loop has a preheader
  /// if there is only one edge to the header of the loop from outside of the
  /// loop. If this is the case, the block branching to the header of the loop
  /// is the preheader node.
  ///
  /// This method returns null if there is no preheader for the loop.
  BlockT *getLoopPreheader() const;

  /// If the given loop's header has exactly one unique predecessor outside the
  /// loop, return it. Otherwise return null.
  ///  This is less strict that the loop "preheader" concept, which requires
  /// the predecessor to have exactly one successor.
  BlockT *getLoopPredecessor() const;

  /// If there is a single latch block for this loop, return it.
  /// A latch block is a block that contains a branch back to the header.
  BlockT *getLoopLatch() const;

  /// Return all loop latch blocks of this loop. A latch block is a block that
  /// contains a branch back to the header.
  void getLoopLatches(SmallVectorImpl<BlockT *> &LoopLatches) const {
    assert(!isInvalid() && "Loop not in a valid state!");
    BlockT *H = getHeader();
    for (const auto Pred : children<Inverse<BlockT *>>(H))
      if (contains(Pred))
        LoopLatches.push_back(Pred);
  }

  /// Return all inner loops in the loop nest rooted by the loop in preorder,
  /// with siblings in forward program order.
  template <class Type>
  static void getInnerLoopsInPreorder(const LoopT &L,
                                      SmallVectorImpl<Type> &PreOrderLoops) {
    SmallVector<LoopT *, 4> PreOrderWorklist;
    PreOrderWorklist.append(L.rbegin(), L.rend());

    while (!PreOrderWorklist.empty()) {
      LoopT *L = PreOrderWorklist.pop_back_val();
      // Sub-loops are stored in forward program order, but will process the
      // worklist backwards so append them in reverse order.
      PreOrderWorklist.append(L->rbegin(), L->rend());
      PreOrderLoops.push_back(L);
    }
  }

  /// Return all loops in the loop nest rooted by the loop in preorder, with
  /// siblings in forward program order.
  SmallVector<const LoopT *, 4> getLoopsInPreorder() const {
    SmallVector<const LoopT *, 4> PreOrderLoops;
    const LoopT *CurLoop = static_cast<const LoopT *>(this);
    PreOrderLoops.push_back(CurLoop);
    getInnerLoopsInPreorder(*CurLoop, PreOrderLoops);
    return PreOrderLoops;
  }
  SmallVector<LoopT *, 4> getLoopsInPreorder() {
    SmallVector<LoopT *, 4> PreOrderLoops;
    LoopT *CurLoop = static_cast<LoopT *>(this);
    PreOrderLoops.push_back(CurLoop);
    getInnerLoopsInPreorder(*CurLoop, PreOrderLoops);
    return PreOrderLoops;
  }

  //===--------------------------------------------------------------------===//
  // APIs for updating loop information after changing the CFG
  //

  /// This method is used by other analyses to update loop information.
  /// NewBB is set to be a new member of the current loop.
  /// Because of this, it is added as a member of all parent loops, and is added
  /// to the specified LoopInfo object as being in the current basic block.  It
  /// is not valid to replace the loop header with this method.
  void addBasicBlockToLoop(BlockT *NewBB, LoopInfoBase<BlockT, LoopT> &LI);

  /// This is used when splitting loops up. It replaces the OldChild entry in
  /// our children list with NewChild, and updates the parent pointer of
  /// OldChild to be null and the NewChild to be this loop.
  /// This updates the loop depth of the new child.
  void replaceChildLoopWith(LoopT *OldChild, LoopT *NewChild);

  /// Add the specified loop to be a child of this loop.
  /// This updates the loop depth of the new child.
  void addChildLoop(LoopT *NewChild) {
    assert(!isInvalid() && "Loop not in a valid state!");
    assert(!NewChild->ParentLoop && "NewChild already has a parent!");
    NewChild->ParentLoop = static_cast<LoopT *>(this);
    SubLoops.push_back(NewChild);
  }

  /// This removes the specified child from being a subloop of this loop. The
  /// loop is not deleted, as it will presumably be inserted into another loop.
  LoopT *removeChildLoop(iterator I) {
    assert(!isInvalid() && "Loop not in a valid state!");
    assert(I != SubLoops.end() && "Cannot remove end iterator!");
    LoopT *Child = *I;
    assert(Child->ParentLoop == this && "Child is not a child of this loop!");
    SubLoops.erase(SubLoops.begin() + (I - begin()));
    Child->ParentLoop = nullptr;
    return Child;
  }

  /// This removes the specified child from being a subloop of this loop. The
  /// loop is not deleted, as it will presumably be inserted into another loop.
  LoopT *removeChildLoop(LoopT *Child) {
    return removeChildLoop(llvm::find(*this, Child));
  }

  /// This adds a basic block directly to the basic block list.
  /// This should only be used by transformations that create new loops.  Other
  /// transformations should use addBasicBlockToLoop.
  void addBlockEntry(BlockT *BB) {
    assert(!isInvalid() && "Loop not in a valid state!");
    Blocks.push_back(BB);
    DenseBlockSet.insert(BB);
  }

  /// interface to reverse Blocks[from, end of loop] in this loop
  void reverseBlock(unsigned from) {
    assert(!isInvalid() && "Loop not in a valid state!");
    std::reverse(Blocks.begin() + from, Blocks.end());
  }

  /// interface to do reserve() for Blocks
  void reserveBlocks(unsigned size) {
    assert(!isInvalid() && "Loop not in a valid state!");
    Blocks.reserve(size);
  }

  /// This method is used to move BB (which must be part of this loop) to be the
  /// loop header of the loop (the block that dominates all others).
  void moveToHeader(BlockT *BB) {
    assert(!isInvalid() && "Loop not in a valid state!");
    if (Blocks[0] == BB)
      return;
    for (unsigned i = 0;; ++i) {
      assert(i != Blocks.size() && "Loop does not contain BB!");
      if (Blocks[i] == BB) {
        Blocks[i] = Blocks[0];
        Blocks[0] = BB;
        return;
      }
    }
  }

  /// This removes the specified basic block from the current loop, updating the
  /// Blocks as appropriate. This does not update the mapping in the LoopInfo
  /// class.
  void removeBlockFromLoop(BlockT *BB) {
    assert(!isInvalid() && "Loop not in a valid state!");
    auto I = find(Blocks, BB);
    assert(I != Blocks.end() && "N is not in this list!");
    Blocks.erase(I);

    DenseBlockSet.erase(BB);
  }

  /// Verify loop structure
  void verifyLoop() const;

  /// Verify loop structure of this loop and all nested loops.
  void verifyLoopNest(DenseSet<const LoopT *> *Loops) const;

  /// Returns true if the loop is annotated parallel.
  ///
  /// Derived classes can override this method using static template
  /// polymorphism.
  bool isAnnotatedParallel() const { return false; }

  /// Print loop with all the BBs inside it.
  void print(raw_ostream &OS, bool Verbose = false, bool PrintNested = true,
             unsigned Depth = 0) const;

protected:
  friend class LoopInfoBase<BlockT, LoopT>;

  /// This creates an empty loop.
  LoopBase() : ParentLoop(nullptr) {}

  explicit LoopBase(BlockT *BB) : ParentLoop(nullptr) {
    Blocks.push_back(BB);
    DenseBlockSet.insert(BB);
  }

  // Since loop passes like SCEV are allowed to key analysis results off of
  // `Loop` pointers, we cannot re-use pointers within a loop pass manager.
  // This means loop passes should not be `delete` ing `Loop` objects directly
  // (and risk a later `Loop` allocation re-using the address of a previous one)
  // but should be using LoopInfo::markAsRemoved, which keeps around the `Loop`
  // pointer till the end of the lifetime of the `LoopInfo` object.
  //
  // To make it easier to follow this rule, we mark the destructor as
  // non-public.
  ~LoopBase() {
    for (auto *SubLoop : SubLoops)
      SubLoop->~LoopT();

#if LLVM_ENABLE_ABI_BREAKING_CHECKS
    IsInvalid = true;
#endif
    SubLoops.clear();
    Blocks.clear();
    DenseBlockSet.clear();
    ParentLoop = nullptr;
  }
};

template <class BlockT, class LoopT>
raw_ostream &operator<<(raw_ostream &OS, const LoopBase<BlockT, LoopT> &Loop) {
  Loop.print(OS);
  return OS;
}

//===----------------------------------------------------------------------===//
/// This class builds and contains all of the top-level loop
/// structures in the specified function.
///

template <class BlockT, class LoopT> class LoopInfoBase {
  // BBMap - Mapping of basic blocks to the inner most loop they occur in
  DenseMap<const BlockT *, LoopT *> BBMap;
  std::vector<LoopT *> TopLevelLoops;
  BumpPtrAllocator LoopAllocator;

  friend class LoopBase<BlockT, LoopT>;
  friend class LoopInfo;

  void operator=(const LoopInfoBase &) = delete;
  LoopInfoBase(const LoopInfoBase &) = delete;

public:
  LoopInfoBase() = default;
  ~LoopInfoBase() { releaseMemory(); }

  LoopInfoBase(LoopInfoBase &&Arg)
      : BBMap(std::move(Arg.BBMap)),
        TopLevelLoops(std::move(Arg.TopLevelLoops)),
        LoopAllocator(std::move(Arg.LoopAllocator)) {
    // We have to clear the arguments top level loops as we've taken ownership.
    Arg.TopLevelLoops.clear();
  }
  LoopInfoBase &operator=(LoopInfoBase &&RHS) {
    BBMap = std::move(RHS.BBMap);

    for (auto *L : TopLevelLoops)
      L->~LoopT();

    TopLevelLoops = std::move(RHS.TopLevelLoops);
    LoopAllocator = std::move(RHS.LoopAllocator);
    RHS.TopLevelLoops.clear();
    return *this;
  }

  void releaseMemory() {
    BBMap.clear();

    for (auto *L : TopLevelLoops)
      L->~LoopT();
    TopLevelLoops.clear();
    LoopAllocator.Reset();
  }

  template <typename... ArgsTy> LoopT *AllocateLoop(ArgsTy &&...Args) {
    LoopT *Storage = LoopAllocator.Allocate<LoopT>();
    return new (Storage) LoopT(std::forward<ArgsTy>(Args)...);
  }

  /// iterator/begin/end - The interface to the top-level loops in the current
  /// function.
  ///
  typedef typename std::vector<LoopT *>::const_iterator iterator;
  typedef
      typename std::vector<LoopT *>::const_reverse_iterator reverse_iterator;
  iterator begin() const { return TopLevelLoops.begin(); }
  iterator end() const { return TopLevelLoops.end(); }
  reverse_iterator rbegin() const { return TopLevelLoops.rbegin(); }
  reverse_iterator rend() const { return TopLevelLoops.rend(); }
  bool empty() const { return TopLevelLoops.empty(); }

  /// Return all of the loops in the function in preorder across the loop
  /// nests, with siblings in forward program order.
  ///
  /// Note that because loops form a forest of trees, preorder is equivalent to
  /// reverse postorder.
  SmallVector<LoopT *, 4> getLoopsInPreorder() const;

  /// Return all of the loops in the function in preorder across the loop
  /// nests, with siblings in *reverse* program order.
  ///
  /// Note that because loops form a forest of trees, preorder is equivalent to
  /// reverse postorder.
  ///
  /// Also note that this is *not* a reverse preorder. Only the siblings are in
  /// reverse program order.
  SmallVector<LoopT *, 4> getLoopsInReverseSiblingPreorder() const;

  /// Return the inner most loop that BB lives in. If a basic block is in no
  /// loop (for example the entry node), null is returned.
  LoopT *getLoopFor(const BlockT *BB) const { return BBMap.lookup(BB); }

  /// Same as getLoopFor.
  const LoopT *operator[](const BlockT *BB) const { return getLoopFor(BB); }

  /// Return the loop nesting level of the specified block. A depth of 0 means
  /// the block is not inside any loop.
  unsigned getLoopDepth(const BlockT *BB) const {
    const LoopT *L = getLoopFor(BB);
    return L ? L->getLoopDepth() : 0;
  }

  // True if the block is a loop header node
  bool isLoopHeader(const BlockT *BB) const {
    const LoopT *L = getLoopFor(BB);
    return L && L->getHeader() == BB;
  }

  /// Return the top-level loops.
  const std::vector<LoopT *> &getTopLevelLoops() const { return TopLevelLoops; }

  /// Return the top-level loops.
  std::vector<LoopT *> &getTopLevelLoopsVector() { return TopLevelLoops; }

  /// This removes the specified top-level loop from this loop info object.
  /// The loop is not deleted, as it will presumably be inserted into
  /// another loop.
  LoopT *removeLoop(iterator I) {
    assert(I != end() && "Cannot remove end iterator!");
    LoopT *L = *I;
    assert(L->isOutermost() && "Not a top-level loop!");
    TopLevelLoops.erase(TopLevelLoops.begin() + (I - begin()));
    return L;
  }

  /// Change the top-level loop that contains BB to the specified loop.
  /// This should be used by transformations that restructure the loop hierarchy
  /// tree.
  void changeLoopFor(BlockT *BB, LoopT *L) {
    if (!L) {
      BBMap.erase(BB);
      return;
    }
    BBMap[BB] = L;
  }

  /// Replace the specified loop in the top-level loops list with the indicated
  /// loop.
  void changeTopLevelLoop(LoopT *OldLoop, LoopT *NewLoop) {
    auto I = find(TopLevelLoops, OldLoop);
    assert(I != TopLevelLoops.end() && "Old loop not at top level!");
    *I = NewLoop;
    assert(!NewLoop->ParentLoop && !OldLoop->ParentLoop &&
           "Loops already embedded into a subloop!");
  }

  /// This adds the specified loop to the collection of top-level loops.
  void addTopLevelLoop(LoopT *New) {
    assert(New->isOutermost() && "Loop already in subloop!");
    TopLevelLoops.push_back(New);
  }

  /// This method completely removes BB from all data structures,
  /// including all of the Loop objects it is nested in and our mapping from
  /// BasicBlocks to loops.
  void removeBlock(BlockT *BB) {
    auto I = BBMap.find(BB);
    if (I != BBMap.end()) {
      for (LoopT *L = I->second; L; L = L->getParentLoop())
        L->removeBlockFromLoop(BB);

      BBMap.erase(I);
    }
  }

  // Internals

  static bool isNotAlreadyContainedIn(const LoopT *SubLoop,
                                      const LoopT *ParentLoop) {
    if (!SubLoop)
      return true;
    if (SubLoop == ParentLoop)
      return false;
    return isNotAlreadyContainedIn(SubLoop->getParentLoop(), ParentLoop);
  }

  /// Create the loop forest using a stable algorithm.
  void analyze(const DominatorTreeBase<BlockT, false> &DomTree);

  // Debugging
  void print(raw_ostream &OS) const;

  void verify(const DominatorTreeBase<BlockT, false> &DomTree) const;

  /// Destroy a loop that has been removed from the `LoopInfo` nest.
  ///
  /// This runs the destructor of the loop object making it invalid to
  /// reference afterward. The memory is retained so that the *pointer* to the
  /// loop remains valid.
  ///
  /// The caller is responsible for removing this loop from the loop nest and
  /// otherwise disconnecting it from the broader `LoopInfo` data structures.
  /// Callers that don't naturally handle this themselves should probably call
  /// `erase' instead.
  void destroy(LoopT *L) {
    L->~LoopT();

    // Since LoopAllocator is a BumpPtrAllocator, this Deallocate only poisons
    // \c L, but the pointer remains valid for non-dereferencing uses.
    LoopAllocator.Deallocate(L);
  }
};

} // namespace llvm

#endif // LLVM_SUPPORT_GENERICLOOPINFO_H
PKhwFZ�*^G�U�USupport/ARMTargetParser.defnu�[���//===- ARMTargetParser.def - ARM target parsing defines ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file provides defines to build up the ARM target parser's logic.
//
//===----------------------------------------------------------------------===//

// NOTE: NO INCLUDE GUARD DESIRED!

#ifndef ARM_FPU
#define ARM_FPU(NAME, KIND, VERSION, NEON_SUPPORT, RESTRICTION)
#endif
ARM_FPU("invalid", FK_INVALID, FPUVersion::NONE, NeonSupportLevel::None, FPURestriction::None)
ARM_FPU("none", FK_NONE, FPUVersion::NONE, NeonSupportLevel::None, FPURestriction::None)
ARM_FPU("vfp", FK_VFP, FPUVersion::VFPV2, NeonSupportLevel::None, FPURestriction::None)
ARM_FPU("vfpv2", FK_VFPV2, FPUVersion::VFPV2, NeonSupportLevel::None, FPURestriction::None)
ARM_FPU("vfpv3", FK_VFPV3, FPUVersion::VFPV3, NeonSupportLevel::None, FPURestriction::None)
ARM_FPU("vfpv3-fp16", FK_VFPV3_FP16, FPUVersion::VFPV3_FP16, NeonSupportLevel::None, FPURestriction::None)
ARM_FPU("vfpv3-d16", FK_VFPV3_D16, FPUVersion::VFPV3, NeonSupportLevel::None, FPURestriction::D16)
ARM_FPU("vfpv3-d16-fp16", FK_VFPV3_D16_FP16, FPUVersion::VFPV3_FP16, NeonSupportLevel::None, FPURestriction::D16)
ARM_FPU("vfpv3xd", FK_VFPV3XD, FPUVersion::VFPV3, NeonSupportLevel::None, FPURestriction::SP_D16)
ARM_FPU("vfpv3xd-fp16", FK_VFPV3XD_FP16, FPUVersion::VFPV3_FP16, NeonSupportLevel::None, FPURestriction::SP_D16)
ARM_FPU("vfpv4", FK_VFPV4, FPUVersion::VFPV4, NeonSupportLevel::None, FPURestriction::None)
ARM_FPU("vfpv4-d16", FK_VFPV4_D16, FPUVersion::VFPV4, NeonSupportLevel::None, FPURestriction::D16)
ARM_FPU("fpv4-sp-d16", FK_FPV4_SP_D16, FPUVersion::VFPV4, NeonSupportLevel::None, FPURestriction::SP_D16)
ARM_FPU("fpv5-d16", FK_FPV5_D16, FPUVersion::VFPV5, NeonSupportLevel::None, FPURestriction::D16)
ARM_FPU("fpv5-sp-d16", FK_FPV5_SP_D16, FPUVersion::VFPV5, NeonSupportLevel::None, FPURestriction::SP_D16)
ARM_FPU("fp-armv8", FK_FP_ARMV8, FPUVersion::VFPV5, NeonSupportLevel::None, FPURestriction::None)
ARM_FPU("fp-armv8-fullfp16-d16", FK_FP_ARMV8_FULLFP16_D16, FPUVersion::VFPV5_FULLFP16, NeonSupportLevel::None, FPURestriction::D16)
ARM_FPU("fp-armv8-fullfp16-sp-d16", FK_FP_ARMV8_FULLFP16_SP_D16, FPUVersion::VFPV5_FULLFP16, NeonSupportLevel::None, FPURestriction::SP_D16)
ARM_FPU("neon", FK_NEON, FPUVersion::VFPV3, NeonSupportLevel::Neon, FPURestriction::None)
ARM_FPU("neon-fp16", FK_NEON_FP16, FPUVersion::VFPV3_FP16, NeonSupportLevel::Neon, FPURestriction::None)
ARM_FPU("neon-vfpv4", FK_NEON_VFPV4, FPUVersion::VFPV4, NeonSupportLevel::Neon, FPURestriction::None)
ARM_FPU("neon-fp-armv8", FK_NEON_FP_ARMV8, FPUVersion::VFPV5, NeonSupportLevel::Neon, FPURestriction::None)
ARM_FPU("crypto-neon-fp-armv8", FK_CRYPTO_NEON_FP_ARMV8, FPUVersion::VFPV5, NeonSupportLevel::Crypto,
        FPURestriction::None)
ARM_FPU("softvfp", FK_SOFTVFP, FPUVersion::NONE, NeonSupportLevel::None, FPURestriction::None)
#undef ARM_FPU

#ifndef ARM_ARCH
#define ARM_ARCH(NAME, ID, CPU_ATTR, SUB_ARCH, ARCH_ATTR, ARCH_FPU, ARCH_BASE_EXT)
#endif
ARM_ARCH("invalid", INVALID, "", "",
          ARMBuildAttrs::CPUArch::Pre_v4, FK_NONE, ARM::AEK_NONE)
ARM_ARCH("armv2", ARMV2, "2", "v2", ARMBuildAttrs::CPUArch::Pre_v4,
          FK_NONE, ARM::AEK_NONE)
ARM_ARCH("armv2a", ARMV2A, "2A", "v2a", ARMBuildAttrs::CPUArch::Pre_v4,
          FK_NONE, ARM::AEK_NONE)
ARM_ARCH("armv3", ARMV3, "3", "v3", ARMBuildAttrs::CPUArch::Pre_v4,
          FK_NONE, ARM::AEK_NONE)
ARM_ARCH("armv3m", ARMV3M, "3M", "v3m", ARMBuildAttrs::CPUArch::Pre_v4,
          FK_NONE, ARM::AEK_NONE)
ARM_ARCH("armv4", ARMV4, "4", "v4", ARMBuildAttrs::CPUArch::v4,
          FK_NONE, ARM::AEK_NONE)
ARM_ARCH("armv4t", ARMV4T, "4T", "v4t", ARMBuildAttrs::CPUArch::v4T,
          FK_NONE, ARM::AEK_NONE)
ARM_ARCH("armv5t", ARMV5T, "5T", "v5", ARMBuildAttrs::CPUArch::v5T,
          FK_NONE, ARM::AEK_NONE)
ARM_ARCH("armv5te", ARMV5TE, "5TE", "v5e", ARMBuildAttrs::CPUArch::v5TE,
          FK_NONE, ARM::AEK_DSP)
ARM_ARCH("armv5tej", ARMV5TEJ, "5TEJ", "v5e", ARMBuildAttrs::CPUArch::v5TEJ,
          FK_NONE, ARM::AEK_DSP)
ARM_ARCH("armv6", ARMV6, "6", "v6", ARMBuildAttrs::CPUArch::v6,
          FK_VFPV2, ARM::AEK_DSP)
ARM_ARCH("armv6k", ARMV6K, "6K", "v6k", ARMBuildAttrs::CPUArch::v6K,
          FK_VFPV2, ARM::AEK_DSP)
ARM_ARCH("armv6t2", ARMV6T2, "6T2", "v6t2", ARMBuildAttrs::CPUArch::v6T2,
          FK_NONE, ARM::AEK_DSP)
ARM_ARCH("armv6kz", ARMV6KZ, "6KZ", "v6kz", ARMBuildAttrs::CPUArch::v6KZ,
          FK_VFPV2, (ARM::AEK_SEC | ARM::AEK_DSP))
ARM_ARCH("armv6-m", ARMV6M, "6-M", "v6m", ARMBuildAttrs::CPUArch::v6_M,
          FK_NONE, ARM::AEK_NONE)
ARM_ARCH("armv7-a", ARMV7A, "7-A", "v7", ARMBuildAttrs::CPUArch::v7,
          FK_NEON, ARM::AEK_DSP)
ARM_ARCH("armv7ve", ARMV7VE, "7VE", "v7ve", ARMBuildAttrs::CPUArch::v7,
          FK_NEON, (ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT |
          ARM::AEK_HWDIVARM | ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP))
ARM_ARCH("armv7-r", ARMV7R, "7-R", "v7r", ARMBuildAttrs::CPUArch::v7,
          FK_NONE, (ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP))
ARM_ARCH("armv7-m", ARMV7M, "7-M", "v7m", ARMBuildAttrs::CPUArch::v7,
          FK_NONE, ARM::AEK_HWDIVTHUMB)
ARM_ARCH("armv7e-m", ARMV7EM, "7E-M", "v7em", ARMBuildAttrs::CPUArch::v7E_M,
          FK_NONE, (ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP))
ARM_ARCH("armv8-a", ARMV8A, "8-A", "v8", ARMBuildAttrs::CPUArch::v8_A,
         FK_CRYPTO_NEON_FP_ARMV8,
         (ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM |
          ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP | ARM::AEK_CRC))
ARM_ARCH("armv8.1-a", ARMV8_1A, "8.1-A", "v8.1a",
         ARMBuildAttrs::CPUArch::v8_A, FK_CRYPTO_NEON_FP_ARMV8,
         (ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM |
          ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP | ARM::AEK_CRC))
ARM_ARCH("armv8.2-a", ARMV8_2A, "8.2-A", "v8.2a",
         ARMBuildAttrs::CPUArch::v8_A, FK_CRYPTO_NEON_FP_ARMV8,
         (ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM |
          ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP | ARM::AEK_CRC | ARM::AEK_RAS))
ARM_ARCH("armv8.3-a", ARMV8_3A, "8.3-A", "v8.3a",
         ARMBuildAttrs::CPUArch::v8_A, FK_CRYPTO_NEON_FP_ARMV8,
         (ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM |
          ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP | ARM::AEK_CRC | ARM::AEK_RAS))
ARM_ARCH("armv8.4-a", ARMV8_4A, "8.4-A", "v8.4a",
         ARMBuildAttrs::CPUArch::v8_A, FK_CRYPTO_NEON_FP_ARMV8,
         (ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM |
          ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP | ARM::AEK_CRC | ARM::AEK_RAS |
          ARM::AEK_DOTPROD))
ARM_ARCH("armv8.5-a", ARMV8_5A, "8.5-A", "v8.5a",
         ARMBuildAttrs::CPUArch::v8_A, FK_CRYPTO_NEON_FP_ARMV8,
         (ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM |
          ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP | ARM::AEK_CRC | ARM::AEK_RAS |
          ARM::AEK_DOTPROD))
ARM_ARCH("armv8.6-a", ARMV8_6A, "8.6-A", "v8.6a",
         ARMBuildAttrs::CPUArch::v8_A, FK_CRYPTO_NEON_FP_ARMV8,
         (ARM::AEK_SEC        | ARM::AEK_MP   | ARM::AEK_VIRT | ARM::AEK_HWDIVARM |
          ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP  | ARM::AEK_CRC  | ARM::AEK_RAS |
          ARM::AEK_DOTPROD    | ARM::AEK_BF16 | ARM::AEK_I8MM))
ARM_ARCH("armv8.7-a", ARMV8_7A, "8.7-A", "v8.7a",
         ARMBuildAttrs::CPUArch::v8_A, FK_CRYPTO_NEON_FP_ARMV8,
         (ARM::AEK_SEC        | ARM::AEK_MP   | ARM::AEK_VIRT | ARM::AEK_HWDIVARM |
          ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP  | ARM::AEK_CRC  | ARM::AEK_RAS |
          ARM::AEK_DOTPROD    | ARM::AEK_BF16 | ARM::AEK_I8MM))
ARM_ARCH("armv8.8-a", ARMV8_8A, "8.8-A", "v8.8a",
         ARMBuildAttrs::CPUArch::v8_A, FK_CRYPTO_NEON_FP_ARMV8,
         (ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM |
          ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP | ARM::AEK_CRC | ARM::AEK_RAS |
          ARM::AEK_DOTPROD | ARM::AEK_BF16 | ARM::AEK_SHA2 | ARM::AEK_AES |
          ARM::AEK_I8MM))
ARM_ARCH("armv9-a", ARMV9A, "9-A", "v9a",
         ARMBuildAttrs::CPUArch::v9_A, FK_NEON_FP_ARMV8,
         (ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM |
          ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP | ARM::AEK_CRC | ARM::AEK_RAS |
          ARM::AEK_DOTPROD))
ARM_ARCH("armv9.1-a", ARMV9_1A, "9.1-A", "v9.1a",
         ARMBuildAttrs::CPUArch::v9_A, FK_NEON_FP_ARMV8,
         (ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM |
          ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP | ARM::AEK_CRC | ARM::AEK_RAS |
          ARM::AEK_DOTPROD | ARM::AEK_BF16 | ARM::AEK_I8MM))
ARM_ARCH("armv9.2-a", ARMV9_2A, "9.2-A", "v9.2a",
         ARMBuildAttrs::CPUArch::v9_A, FK_NEON_FP_ARMV8,
         (ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM |
          ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP | ARM::AEK_CRC | ARM::AEK_RAS |
          ARM::AEK_DOTPROD | ARM::AEK_BF16 | ARM::AEK_I8MM))
ARM_ARCH("armv9.3-a", ARMV9_3A, "9.3-A", "v9.3a",
         ARMBuildAttrs::CPUArch::v9_A, FK_CRYPTO_NEON_FP_ARMV8,
         (ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM |
          ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP | ARM::AEK_CRC | ARM::AEK_RAS |
          ARM::AEK_DOTPROD | ARM::AEK_BF16 | ARM::AEK_I8MM))
ARM_ARCH("armv8-r", ARMV8R, "8-R", "v8r", ARMBuildAttrs::CPUArch::v8_R,
          FK_NEON_FP_ARMV8,
          (ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM | ARM::AEK_HWDIVTHUMB |
           ARM::AEK_DSP | ARM::AEK_CRC))
ARM_ARCH("armv8-m.base", ARMV8MBaseline, "8-M.Baseline", "v8m.base",
          ARMBuildAttrs::CPUArch::v8_M_Base, FK_NONE, ARM::AEK_HWDIVTHUMB)
ARM_ARCH("armv8-m.main", ARMV8MMainline, "8-M.Mainline", "v8m.main",
          ARMBuildAttrs::CPUArch::v8_M_Main, FK_FPV5_D16, ARM::AEK_HWDIVTHUMB)
ARM_ARCH("armv8.1-m.main", ARMV8_1MMainline, "8.1-M.Mainline", "v8.1m.main",
          ARMBuildAttrs::CPUArch::v8_1_M_Main, FK_FP_ARMV8_FULLFP16_SP_D16, ARM::AEK_HWDIVTHUMB | ARM::AEK_RAS | ARM::AEK_LOB)
// Non-standard Arch names.
ARM_ARCH("iwmmxt", IWMMXT, "iwmmxt", "", ARMBuildAttrs::CPUArch::v5TE,
          FK_NONE, ARM::AEK_NONE)
ARM_ARCH("iwmmxt2", IWMMXT2, "iwmmxt2", "", ARMBuildAttrs::CPUArch::v5TE,
          FK_NONE, ARM::AEK_NONE)
ARM_ARCH("xscale", XSCALE, "xscale", "v5e", ARMBuildAttrs::CPUArch::v5TE,
          FK_NONE, ARM::AEK_NONE)
ARM_ARCH("armv7s", ARMV7S, "7-S", "v7s", ARMBuildAttrs::CPUArch::v7,
          FK_NEON_VFPV4, ARM::AEK_DSP)
ARM_ARCH("armv7k", ARMV7K, "7-K", "v7k", ARMBuildAttrs::CPUArch::v7,
          FK_NONE, ARM::AEK_DSP)
#undef ARM_ARCH

#ifndef ARM_ARCH_EXT_NAME
#define ARM_ARCH_EXT_NAME(NAME, ID, FEATURE, NEGFEATURE)
#endif
// FIXME: This would be nicer were it tablegen
ARM_ARCH_EXT_NAME("invalid",  ARM::AEK_INVALID,  nullptr,  nullptr)
ARM_ARCH_EXT_NAME("none",     ARM::AEK_NONE,     nullptr,  nullptr)
ARM_ARCH_EXT_NAME("crc",      ARM::AEK_CRC,      "+crc",   "-crc")
ARM_ARCH_EXT_NAME("crypto",   ARM::AEK_CRYPTO,   "+crypto","-crypto")
ARM_ARCH_EXT_NAME("sha2",     ARM::AEK_SHA2,     "+sha2",  "-sha2")
ARM_ARCH_EXT_NAME("aes",      ARM::AEK_AES,      "+aes",   "-aes")
ARM_ARCH_EXT_NAME("dotprod",  ARM::AEK_DOTPROD,  "+dotprod","-dotprod")
ARM_ARCH_EXT_NAME("dsp",      ARM::AEK_DSP,      "+dsp",   "-dsp")
ARM_ARCH_EXT_NAME("fp",       ARM::AEK_FP,       nullptr,  nullptr)
ARM_ARCH_EXT_NAME("fp.dp",    ARM::AEK_FP_DP,    nullptr,  nullptr)
ARM_ARCH_EXT_NAME("mve",     (ARM::AEK_DSP | ARM::AEK_SIMD), "+mve", "-mve")
ARM_ARCH_EXT_NAME("mve.fp",  (ARM::AEK_DSP | ARM::AEK_SIMD | ARM::AEK_FP), "+mve.fp", "-mve.fp")
ARM_ARCH_EXT_NAME("idiv",     (ARM::AEK_HWDIVARM | ARM::AEK_HWDIVTHUMB), nullptr, nullptr)
ARM_ARCH_EXT_NAME("mp",       ARM::AEK_MP,       nullptr,  nullptr)
ARM_ARCH_EXT_NAME("simd",     ARM::AEK_SIMD,     nullptr,  nullptr)
ARM_ARCH_EXT_NAME("sec",      ARM::AEK_SEC,      nullptr,  nullptr)
ARM_ARCH_EXT_NAME("virt",     ARM::AEK_VIRT,     nullptr,  nullptr)
ARM_ARCH_EXT_NAME("fp16",     ARM::AEK_FP16,     "+fullfp16",  "-fullfp16")
ARM_ARCH_EXT_NAME("ras",      ARM::AEK_RAS,      "+ras", "-ras")
ARM_ARCH_EXT_NAME("os",       ARM::AEK_OS,       nullptr,  nullptr)
ARM_ARCH_EXT_NAME("iwmmxt",   ARM::AEK_IWMMXT,   nullptr,  nullptr)
ARM_ARCH_EXT_NAME("iwmmxt2",  ARM::AEK_IWMMXT2,  nullptr,  nullptr)
ARM_ARCH_EXT_NAME("maverick", ARM::AEK_MAVERICK, nullptr,  nullptr)
ARM_ARCH_EXT_NAME("xscale",   ARM::AEK_XSCALE,   nullptr,  nullptr)
ARM_ARCH_EXT_NAME("fp16fml",  ARM::AEK_FP16FML,  "+fp16fml", "-fp16fml")
ARM_ARCH_EXT_NAME("bf16",     ARM::AEK_BF16,     "+bf16",    "-bf16")
ARM_ARCH_EXT_NAME("sb",       ARM::AEK_SB,       "+sb",      "-sb")
ARM_ARCH_EXT_NAME("i8mm",     ARM::AEK_I8MM,     "+i8mm",    "-i8mm")
ARM_ARCH_EXT_NAME("lob",      ARM::AEK_LOB,      "+lob",   "-lob")
ARM_ARCH_EXT_NAME("cdecp0",   ARM::AEK_CDECP0,   "+cdecp0",  "-cdecp0")
ARM_ARCH_EXT_NAME("cdecp1",   ARM::AEK_CDECP1,   "+cdecp1",  "-cdecp1")
ARM_ARCH_EXT_NAME("cdecp2",   ARM::AEK_CDECP2,   "+cdecp2",  "-cdecp2")
ARM_ARCH_EXT_NAME("cdecp3",   ARM::AEK_CDECP3,   "+cdecp3",  "-cdecp3")
ARM_ARCH_EXT_NAME("cdecp4",   ARM::AEK_CDECP4,   "+cdecp4",  "-cdecp4")
ARM_ARCH_EXT_NAME("cdecp5",   ARM::AEK_CDECP5,   "+cdecp5",  "-cdecp5")
ARM_ARCH_EXT_NAME("cdecp6",   ARM::AEK_CDECP6,   "+cdecp6",  "-cdecp6")
ARM_ARCH_EXT_NAME("cdecp7",   ARM::AEK_CDECP7,   "+cdecp7",  "-cdecp7")
ARM_ARCH_EXT_NAME("pacbti",   ARM::AEK_PACBTI,   "+pacbti",  "-pacbti")
#undef ARM_ARCH_EXT_NAME

#ifndef ARM_HW_DIV_NAME
#define ARM_HW_DIV_NAME(NAME, ID)
#endif
ARM_HW_DIV_NAME("invalid", ARM::AEK_INVALID)
ARM_HW_DIV_NAME("none", ARM::AEK_NONE)
ARM_HW_DIV_NAME("thumb", ARM::AEK_HWDIVTHUMB)
ARM_HW_DIV_NAME("arm", ARM::AEK_HWDIVARM)
ARM_HW_DIV_NAME("arm,thumb", (ARM::AEK_HWDIVARM | ARM::AEK_HWDIVTHUMB))
#undef ARM_HW_DIV_NAME

#ifndef ARM_CPU_NAME
#define ARM_CPU_NAME(NAME, ID, DEFAULT_FPU, IS_DEFAULT, DEFAULT_EXT)
#endif
ARM_CPU_NAME("arm8", ARMV4, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("arm810", ARMV4, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("strongarm", ARMV4, FK_NONE, true, ARM::AEK_NONE)
ARM_CPU_NAME("strongarm110", ARMV4, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("strongarm1100", ARMV4, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("strongarm1110", ARMV4, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("arm7tdmi", ARMV4T, FK_NONE, true, ARM::AEK_NONE)
ARM_CPU_NAME("arm7tdmi-s", ARMV4T, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("arm710t", ARMV4T, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("arm720t", ARMV4T, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("arm9", ARMV4T, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("arm9tdmi", ARMV4T, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("arm920", ARMV4T, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("arm920t", ARMV4T, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("arm922t", ARMV4T, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("arm940t", ARMV4T, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("ep9312", ARMV4T, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("arm10tdmi", ARMV5T, FK_NONE, true, ARM::AEK_NONE)
ARM_CPU_NAME("arm1020t", ARMV5T, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("arm9e", ARMV5TE, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("arm946e-s", ARMV5TE, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("arm966e-s", ARMV5TE, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("arm968e-s", ARMV5TE, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("arm10e", ARMV5TE, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("arm1020e", ARMV5TE, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("arm1022e", ARMV5TE, FK_NONE, true, ARM::AEK_NONE)
ARM_CPU_NAME("arm926ej-s", ARMV5TEJ, FK_NONE, true, ARM::AEK_NONE)
ARM_CPU_NAME("arm1136j-s", ARMV6, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("arm1136jf-s", ARMV6, FK_VFPV2, true, ARM::AEK_NONE)
ARM_CPU_NAME("mpcore", ARMV6K, FK_VFPV2, true, ARM::AEK_NONE)
ARM_CPU_NAME("mpcorenovfp", ARMV6K, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("arm1176jz-s", ARMV6KZ, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("arm1176jzf-s", ARMV6KZ, FK_VFPV2, true, ARM::AEK_NONE)
ARM_CPU_NAME("arm1156t2-s", ARMV6T2, FK_NONE, true, ARM::AEK_NONE)
ARM_CPU_NAME("arm1156t2f-s", ARMV6T2, FK_VFPV2, false, ARM::AEK_NONE)
ARM_CPU_NAME("cortex-m0", ARMV6M, FK_NONE, true, ARM::AEK_NONE)
ARM_CPU_NAME("cortex-m0plus", ARMV6M, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("cortex-m1", ARMV6M, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("sc000", ARMV6M, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("cortex-a5", ARMV7A, FK_NEON_VFPV4, false,
             (ARM::AEK_SEC | ARM::AEK_MP))
ARM_CPU_NAME("cortex-a7", ARMV7A, FK_NEON_VFPV4, false,
             (ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM |
              ARM::AEK_HWDIVTHUMB))
ARM_CPU_NAME("cortex-a8", ARMV7A, FK_NEON, false, ARM::AEK_SEC)
ARM_CPU_NAME("cortex-a9", ARMV7A, FK_NEON_FP16, false, (ARM::AEK_SEC | ARM::AEK_MP))
ARM_CPU_NAME("cortex-a12", ARMV7A, FK_NEON_VFPV4, false,
             (ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM |
              ARM::AEK_HWDIVTHUMB))
ARM_CPU_NAME("cortex-a15", ARMV7A, FK_NEON_VFPV4, false,
             (ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM |
              ARM::AEK_HWDIVTHUMB))
ARM_CPU_NAME("cortex-a17", ARMV7A, FK_NEON_VFPV4, false,
             (ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM |
              ARM::AEK_HWDIVTHUMB))
ARM_CPU_NAME("krait", ARMV7A, FK_NEON_VFPV4, false,
             (ARM::AEK_HWDIVARM | ARM::AEK_HWDIVTHUMB))
ARM_CPU_NAME("cortex-r4", ARMV7R, FK_NONE, true, ARM::AEK_NONE)
ARM_CPU_NAME("cortex-r4f", ARMV7R, FK_VFPV3_D16, false, ARM::AEK_NONE)
ARM_CPU_NAME("cortex-r5", ARMV7R, FK_VFPV3_D16, false,
             (ARM::AEK_MP | ARM::AEK_HWDIVARM))
ARM_CPU_NAME("cortex-r7", ARMV7R, FK_VFPV3_D16_FP16, false,
             (ARM::AEK_MP | ARM::AEK_HWDIVARM))
ARM_CPU_NAME("cortex-r8", ARMV7R, FK_VFPV3_D16_FP16, false,
             (ARM::AEK_MP | ARM::AEK_HWDIVARM))
ARM_CPU_NAME("cortex-r52", ARMV8R, FK_NEON_FP_ARMV8, true, ARM::AEK_NONE)
ARM_CPU_NAME("sc300", ARMV7M, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("cortex-m3", ARMV7M, FK_NONE, true, ARM::AEK_NONE)
ARM_CPU_NAME("cortex-m4", ARMV7EM, FK_FPV4_SP_D16, true, ARM::AEK_NONE)
ARM_CPU_NAME("cortex-m7", ARMV7EM, FK_FPV5_D16, false, ARM::AEK_NONE)
ARM_CPU_NAME("cortex-m23", ARMV8MBaseline, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("cortex-m33", ARMV8MMainline, FK_FPV5_SP_D16, false, ARM::AEK_DSP)
ARM_CPU_NAME("cortex-m35p", ARMV8MMainline, FK_FPV5_SP_D16, false, ARM::AEK_DSP)
ARM_CPU_NAME("cortex-m55", ARMV8_1MMainline, FK_FP_ARMV8_FULLFP16_D16, false,
             (ARM::AEK_DSP | ARM::AEK_SIMD | ARM::AEK_FP | ARM::AEK_FP16))
ARM_CPU_NAME("cortex-m85", ARMV8_1MMainline, FK_FP_ARMV8_FULLFP16_D16, false,
             (ARM::AEK_DSP | ARM::AEK_SIMD | ARM::AEK_FP | ARM::AEK_FP16 |
              ARM::AEK_RAS | ARM::AEK_PACBTI))
ARM_CPU_NAME("cortex-a32", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, ARM::AEK_CRC)
ARM_CPU_NAME("cortex-a35", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, ARM::AEK_CRC)
ARM_CPU_NAME("cortex-a53", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, ARM::AEK_CRC)
ARM_CPU_NAME("cortex-a55", ARMV8_2A, FK_CRYPTO_NEON_FP_ARMV8, false,
            (ARM::AEK_FP16 | ARM::AEK_DOTPROD))
ARM_CPU_NAME("cortex-a57", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, ARM::AEK_CRC)
ARM_CPU_NAME("cortex-a72", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, ARM::AEK_CRC)
ARM_CPU_NAME("cortex-a73", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, ARM::AEK_CRC)
ARM_CPU_NAME("cortex-a75", ARMV8_2A, FK_CRYPTO_NEON_FP_ARMV8, false,
            (ARM::AEK_FP16 | ARM::AEK_DOTPROD))
ARM_CPU_NAME("cortex-a76", ARMV8_2A, FK_CRYPTO_NEON_FP_ARMV8, false,
            (ARM::AEK_FP16 | ARM::AEK_DOTPROD))
ARM_CPU_NAME("cortex-a76ae", ARMV8_2A, FK_CRYPTO_NEON_FP_ARMV8, false,
            (ARM::AEK_FP16 | ARM::AEK_DOTPROD))
ARM_CPU_NAME("cortex-a77", ARMV8_2A, FK_CRYPTO_NEON_FP_ARMV8, false,
            (ARM::AEK_FP16 | ARM::AEK_DOTPROD))
ARM_CPU_NAME("cortex-a78", ARMV8_2A, FK_CRYPTO_NEON_FP_ARMV8, false,
             (ARM::AEK_FP16 | ARM::AEK_DOTPROD))
ARM_CPU_NAME("cortex-a78c", ARMV8_2A, FK_CRYPTO_NEON_FP_ARMV8, false,
             ARM::AEK_FP16 | ARM::AEK_DOTPROD)
ARM_CPU_NAME("cortex-a710", ARMV9A, FK_NEON_FP_ARMV8, false,
             (ARM::AEK_DOTPROD | ARM::AEK_FP16FML | ARM::AEK_BF16 | ARM::AEK_SB |
              ARM::AEK_I8MM))
ARM_CPU_NAME("cortex-x1", ARMV8_2A, FK_CRYPTO_NEON_FP_ARMV8, false,
             (ARM::AEK_FP16 | ARM::AEK_DOTPROD))
ARM_CPU_NAME("cortex-x1c", ARMV8_2A, FK_CRYPTO_NEON_FP_ARMV8, false,
             (ARM::AEK_FP16 | ARM::AEK_DOTPROD))
ARM_CPU_NAME("neoverse-n1", ARMV8_2A, FK_CRYPTO_NEON_FP_ARMV8, false,
             (ARM::AEK_FP16 | ARM::AEK_DOTPROD))
ARM_CPU_NAME("neoverse-n2", ARMV8_5A, FK_CRYPTO_NEON_FP_ARMV8, false,
             (ARM::AEK_BF16 | ARM::AEK_DOTPROD | ARM::AEK_I8MM | ARM::AEK_RAS |
              ARM::AEK_SB))
ARM_CPU_NAME("neoverse-v1", ARMV8_4A, FK_CRYPTO_NEON_FP_ARMV8, false,
             (ARM::AEK_RAS | ARM::AEK_FP16 | ARM::AEK_BF16 | ARM::AEK_DOTPROD))
ARM_CPU_NAME("cyclone", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, ARM::AEK_CRC)
ARM_CPU_NAME("exynos-m3", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, ARM::AEK_CRC)
ARM_CPU_NAME("exynos-m4", ARMV8_2A, FK_CRYPTO_NEON_FP_ARMV8, false,
             (ARM::AEK_FP16 | ARM::AEK_DOTPROD))
ARM_CPU_NAME("exynos-m5", ARMV8_2A, FK_CRYPTO_NEON_FP_ARMV8, false,
             (ARM::AEK_FP16 | ARM::AEK_DOTPROD))
ARM_CPU_NAME("kryo", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, ARM::AEK_CRC)
// Non-standard Arch names.
ARM_CPU_NAME("iwmmxt", IWMMXT, FK_NONE, true, ARM::AEK_NONE)
ARM_CPU_NAME("xscale", XSCALE, FK_NONE, true, ARM::AEK_NONE)
ARM_CPU_NAME("swift", ARMV7S, FK_NEON_VFPV4, true,
             (ARM::AEK_HWDIVARM | ARM::AEK_HWDIVTHUMB))
// Invalid CPU
ARM_CPU_NAME("invalid", INVALID, FK_INVALID, true, ARM::AEK_INVALID)
#undef ARM_CPU_NAME
PKhwFZfRs(	(	Support/RandomNumberGenerator.hnu�[���//==- llvm/Support/RandomNumberGenerator.h - RNG for diversity ---*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines an abstraction for deterministic random number
// generation (RNG).  Note that the current implementation is not
// cryptographically secure as it uses the C++11 <random> facilities.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_RANDOMNUMBERGENERATOR_H_
#define LLVM_SUPPORT_RANDOMNUMBERGENERATOR_H_

#include "llvm/Support/Compiler.h"
#include "llvm/Support/DataTypes.h" // Needed for uint64_t on Windows.
#include <random>
#include <system_error>

namespace llvm {
class StringRef;

/// A random number generator.
///
/// Instances of this class should not be shared across threads. The
/// seed should be set by passing the -rng-seed=<uint64> option. Use
/// Module::createRNG to create a new RNG instance for use with that
/// module.
class RandomNumberGenerator {

  // 64-bit Mersenne Twister by Matsumoto and Nishimura, 2000
  // http://en.cppreference.com/w/cpp/numeric/random/mersenne_twister_engine
  // This RNG is deterministically portable across C++11
  // implementations.
  using generator_type = std::mt19937_64;

public:
  using result_type = generator_type::result_type;

  /// Returns a random number in the range [0, Max).
  result_type operator()();

  static constexpr result_type min() { return generator_type::min(); }
  static constexpr result_type max() { return generator_type::max(); }

private:
  /// Seeds and salts the underlying RNG engine.
  ///
  /// This constructor should not be used directly. Instead use
  /// Module::createRNG to create a new RNG salted with the Module ID.
  RandomNumberGenerator(StringRef Salt);

  generator_type Generator;

  // Noncopyable.
  RandomNumberGenerator(const RandomNumberGenerator &other) = delete;
  RandomNumberGenerator &operator=(const RandomNumberGenerator &other) = delete;

  friend class Module;
};

// Get random vector of specified size
std::error_code getRandomBytes(void *Buffer, size_t Size);
}

#endif
PKhwFZ��7�  Support/Compression.hnu�[���//===-- llvm/Support/Compression.h ---Compression----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains basic functions for compression/decompression.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_COMPRESSION_H
#define LLVM_SUPPORT_COMPRESSION_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/Support/DataTypes.h"

namespace llvm {
template <typename T> class SmallVectorImpl;
class Error;

// None indicates no compression. The other members are a subset of
// compression::Format, which is used for compressed debug sections in some
// object file formats (e.g. ELF). This is a separate class as we may add new
// compression::Format members for non-debugging purposes.
enum class DebugCompressionType {
  None, ///< No compression
  Zlib, ///< zlib
  Zstd, ///< Zstandard
};

namespace compression {
namespace zlib {

constexpr int NoCompression = 0;
constexpr int BestSpeedCompression = 1;
constexpr int DefaultCompression = 6;
constexpr int BestSizeCompression = 9;

bool isAvailable();

void compress(ArrayRef<uint8_t> Input,
              SmallVectorImpl<uint8_t> &CompressedBuffer,
              int Level = DefaultCompression);

Error decompress(ArrayRef<uint8_t> Input, uint8_t *Output,
                 size_t &UncompressedSize);

Error decompress(ArrayRef<uint8_t> Input, SmallVectorImpl<uint8_t> &Output,
                 size_t UncompressedSize);

} // End of namespace zlib

namespace zstd {

constexpr int NoCompression = -5;
constexpr int BestSpeedCompression = 1;
constexpr int DefaultCompression = 5;
constexpr int BestSizeCompression = 12;

bool isAvailable();

void compress(ArrayRef<uint8_t> Input,
              SmallVectorImpl<uint8_t> &CompressedBuffer,
              int Level = DefaultCompression);

Error decompress(ArrayRef<uint8_t> Input, uint8_t *Output,
                 size_t &UncompressedSize);

Error decompress(ArrayRef<uint8_t> Input, SmallVectorImpl<uint8_t> &Output,
                 size_t UncompressedSize);

} // End of namespace zstd

enum class Format {
  Zlib,
  Zstd,
};

inline Format formatFor(DebugCompressionType Type) {
  switch (Type) {
  case DebugCompressionType::None:
    llvm_unreachable("not a compression type");
  case DebugCompressionType::Zlib:
    return Format::Zlib;
  case DebugCompressionType::Zstd:
    return Format::Zstd;
  }
  llvm_unreachable("");
}

struct Params {
  constexpr Params(Format F)
      : format(F), level(F == Format::Zlib ? zlib::DefaultCompression
                                           : zstd::DefaultCompression) {}
  Params(DebugCompressionType Type) : Params(formatFor(Type)) {}

  Format format;
  int level;
  // This may support multi-threading for zstd in the future. Note that
  // different threads may produce different output, so be careful if certain
  // output determinism is desired.
};

// Return nullptr if LLVM was built with support (LLVM_ENABLE_ZLIB,
// LLVM_ENABLE_ZSTD) for the specified compression format; otherwise
// return a string literal describing the reason.
const char *getReasonIfUnsupported(Format F);

// Compress Input with the specified format P.Format. If Level is -1, use
// *::DefaultCompression for the format.
void compress(Params P, ArrayRef<uint8_t> Input,
              SmallVectorImpl<uint8_t> &Output);

// Decompress Input. The uncompressed size must be available.
Error decompress(DebugCompressionType T, ArrayRef<uint8_t> Input,
                 uint8_t *Output, size_t UncompressedSize);
Error decompress(Format F, ArrayRef<uint8_t> Input,
                 SmallVectorImpl<uint8_t> &Output, size_t UncompressedSize);
Error decompress(DebugCompressionType T, ArrayRef<uint8_t> Input,
                 SmallVectorImpl<uint8_t> &Output, size_t UncompressedSize);

} // End of namespace compression

} // End of namespace llvm

#endif
PKhwFZ�àdSupport/ArrayRecycler.hnu�[���//==- llvm/Support/ArrayRecycler.h - Recycling of Arrays ---------*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the ArrayRecycler class template which can recycle small
// arrays allocated from one of the allocators in Allocator.h
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_ARRAYRECYCLER_H
#define LLVM_SUPPORT_ARRAYRECYCLER_H

#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/MathExtras.h"

namespace llvm {

/// Recycle small arrays allocated from a BumpPtrAllocator.
///
/// Arrays are allocated in a small number of fixed sizes. For each supported
/// array size, the ArrayRecycler keeps a free list of available arrays.
///
template <class T, size_t Align = alignof(T)> class ArrayRecycler {
  // The free list for a given array size is a simple singly linked list.
  // We can't use iplist or Recycler here since those classes can't be copied.
  struct FreeList {
    FreeList *Next;
  };

  static_assert(Align >= alignof(FreeList), "Object underaligned");
  static_assert(sizeof(T) >= sizeof(FreeList), "Objects are too small");

  // Keep a free list for each array size.
  SmallVector<FreeList*, 8> Bucket;

  // Remove an entry from the free list in Bucket[Idx] and return it.
  // Return NULL if no entries are available.
  T *pop(unsigned Idx) {
    if (Idx >= Bucket.size())
      return nullptr;
    FreeList *Entry = Bucket[Idx];
    if (!Entry)
      return nullptr;
    __asan_unpoison_memory_region(Entry, Capacity::get(Idx).getSize());
    Bucket[Idx] = Entry->Next;
    __msan_allocated_memory(Entry, Capacity::get(Idx).getSize());
    return reinterpret_cast<T*>(Entry);
  }

  // Add an entry to the free list at Bucket[Idx].
  void push(unsigned Idx, T *Ptr) {
    assert(Ptr && "Cannot recycle NULL pointer");
    FreeList *Entry = reinterpret_cast<FreeList*>(Ptr);
    if (Idx >= Bucket.size())
      Bucket.resize(size_t(Idx) + 1);
    Entry->Next = Bucket[Idx];
    Bucket[Idx] = Entry;
    __asan_poison_memory_region(Ptr, Capacity::get(Idx).getSize());
  }

public:
  /// The size of an allocated array is represented by a Capacity instance.
  ///
  /// This class is much smaller than a size_t, and it provides methods to work
  /// with the set of legal array capacities.
  class Capacity {
    uint8_t Index;
    explicit Capacity(uint8_t idx) : Index(idx) {}

  public:
    Capacity() : Index(0) {}

    /// Get the capacity of an array that can hold at least N elements.
    static Capacity get(size_t N) {
      return Capacity(N ? Log2_64_Ceil(N) : 0);
    }

    /// Get the number of elements in an array with this capacity.
    size_t getSize() const { return size_t(1u) << Index; }

    /// Get the bucket number for this capacity.
    unsigned getBucket() const { return Index; }

    /// Get the next larger capacity. Large capacities grow exponentially, so
    /// this function can be used to reallocate incrementally growing vectors
    /// in amortized linear time.
    Capacity getNext() const { return Capacity(Index + 1); }
  };

  ~ArrayRecycler() {
    // The client should always call clear() so recycled arrays can be returned
    // to the allocator.
    assert(Bucket.empty() && "Non-empty ArrayRecycler deleted!");
  }

  /// Release all the tracked allocations to the allocator. The recycler must
  /// be free of any tracked allocations before being deleted.
  template<class AllocatorType>
  void clear(AllocatorType &Allocator) {
    for (; !Bucket.empty(); Bucket.pop_back())
      while (T *Ptr = pop(Bucket.size() - 1))
        Allocator.Deallocate(Ptr);
  }

  /// Special case for BumpPtrAllocator which has an empty Deallocate()
  /// function.
  ///
  /// There is no need to traverse the free lists, pulling all the objects into
  /// cache.
  void clear(BumpPtrAllocator&) {
    Bucket.clear();
  }

  /// Allocate an array of at least the requested capacity.
  ///
  /// Return an existing recycled array, or allocate one from Allocator if
  /// none are available for recycling.
  ///
  template<class AllocatorType>
  T *allocate(Capacity Cap, AllocatorType &Allocator) {
    // Try to recycle an existing array.
    if (T *Ptr = pop(Cap.getBucket()))
      return Ptr;
    // Nope, get more memory.
    return static_cast<T*>(Allocator.Allocate(sizeof(T)*Cap.getSize(), Align));
  }

  /// Deallocate an array with the specified Capacity.
  ///
  /// Cap must be the same capacity that was given to allocate().
  ///
  void deallocate(Capacity Cap, T *Ptr) {
    push(Cap.getBucket(), Ptr);
  }
};

} // end llvm namespace

#endif
PKhwFZ�G��b+b+Support/BinaryStreamReader.hnu�[���//===- BinaryStreamReader.h - Reads objects from a binary stream *- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_BINARYSTREAMREADER_H
#define LLVM_SUPPORT_BINARYSTREAMREADER_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Alignment.h"
#include "llvm/Support/BinaryStreamArray.h"
#include "llvm/Support/BinaryStreamRef.h"
#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Error.h"
#include <type_traits>

namespace llvm {

/// Provides read only access to a subclass of `BinaryStream`.  Provides
/// bounds checking and helpers for writing certain common data types such as
/// null-terminated strings, integers in various flavors of endianness, etc.
/// Can be subclassed to provide reading of custom datatypes, although no
/// are overridable.
class BinaryStreamReader {
public:
  BinaryStreamReader() = default;
  explicit BinaryStreamReader(BinaryStreamRef Ref);
  explicit BinaryStreamReader(BinaryStream &Stream);
  explicit BinaryStreamReader(ArrayRef<uint8_t> Data,
                              llvm::support::endianness Endian);
  explicit BinaryStreamReader(StringRef Data, llvm::support::endianness Endian);

  BinaryStreamReader(const BinaryStreamReader &Other) = default;

  BinaryStreamReader &operator=(const BinaryStreamReader &Other) = default;

  virtual ~BinaryStreamReader() = default;

  /// Read as much as possible from the underlying string at the current offset
  /// without invoking a copy, and set \p Buffer to the resulting data slice.
  /// Updates the stream's offset to point after the newly read data.
  ///
  /// \returns a success error code if the data was successfully read, otherwise
  /// returns an appropriate error code.
  Error readLongestContiguousChunk(ArrayRef<uint8_t> &Buffer);

  /// Read \p Size bytes from the underlying stream at the current offset and
  /// and set \p Buffer to the resulting data slice.  Whether a copy occurs
  /// depends on the implementation of the underlying stream.  Updates the
  /// stream's offset to point after the newly read data.
  ///
  /// \returns a success error code if the data was successfully read, otherwise
  /// returns an appropriate error code.
  Error readBytes(ArrayRef<uint8_t> &Buffer, uint32_t Size);

  /// Read an integer of the specified endianness into \p Dest and update the
  /// stream's offset.  The data is always copied from the stream's underlying
  /// buffer into \p Dest. Updates the stream's offset to point after the newly
  /// read data.
  ///
  /// \returns a success error code if the data was successfully read, otherwise
  /// returns an appropriate error code.
  template <typename T> Error readInteger(T &Dest) {
    static_assert(std::is_integral_v<T>,
                  "Cannot call readInteger with non-integral value!");

    ArrayRef<uint8_t> Bytes;
    if (auto EC = readBytes(Bytes, sizeof(T)))
      return EC;

    Dest = llvm::support::endian::read<T, llvm::support::unaligned>(
        Bytes.data(), Stream.getEndian());
    return Error::success();
  }

  /// Similar to readInteger.
  template <typename T> Error readEnum(T &Dest) {
    static_assert(std::is_enum<T>::value,
                  "Cannot call readEnum with non-enum value!");
    std::underlying_type_t<T> N;
    if (auto EC = readInteger(N))
      return EC;
    Dest = static_cast<T>(N);
    return Error::success();
  }

  /// Read an unsigned LEB128 encoded value.
  ///
  /// \returns a success error code if the data was successfully read, otherwise
  /// returns an appropriate error code.
  Error readULEB128(uint64_t &Dest);

  /// Read a signed LEB128 encoded value.
  ///
  /// \returns a success error code if the data was successfully read, otherwise
  /// returns an appropriate error code.
  Error readSLEB128(int64_t &Dest);

  /// Read a null terminated string from \p Dest.  Whether a copy occurs depends
  /// on the implementation of the underlying stream.  Updates the stream's
  /// offset to point after the newly read data.
  ///
  /// \returns a success error code if the data was successfully read, otherwise
  /// returns an appropriate error code.
  Error readCString(StringRef &Dest);

  /// Similar to readCString, however read a null-terminated UTF16 string
  /// instead.
  ///
  /// \returns a success error code if the data was successfully read, otherwise
  /// returns an appropriate error code.
  Error readWideString(ArrayRef<UTF16> &Dest);

  /// Read a \p Length byte string into \p Dest.  Whether a copy occurs depends
  /// on the implementation of the underlying stream.  Updates the stream's
  /// offset to point after the newly read data.
  ///
  /// \returns a success error code if the data was successfully read, otherwise
  /// returns an appropriate error code.
  Error readFixedString(StringRef &Dest, uint32_t Length);

  /// Read the entire remainder of the underlying stream into \p Ref.  This is
  /// equivalent to calling getUnderlyingStream().slice(Offset).  Updates the
  /// stream's offset to point to the end of the stream.  Never causes a copy.
  ///
  /// \returns a success error code if the data was successfully read, otherwise
  /// returns an appropriate error code.
  Error readStreamRef(BinaryStreamRef &Ref);

  /// Read \p Length bytes from the underlying stream into \p Ref.  This is
  /// equivalent to calling getUnderlyingStream().slice(Offset, Length).
  /// Updates the stream's offset to point after the newly read object.  Never
  /// causes a copy.
  ///
  /// \returns a success error code if the data was successfully read, otherwise
  /// returns an appropriate error code.
  Error readStreamRef(BinaryStreamRef &Ref, uint32_t Length);

  /// Read \p Length bytes from the underlying stream into \p Ref.  This is
  /// equivalent to calling getUnderlyingStream().slice(Offset, Length).
  /// Updates the stream's offset to point after the newly read object.  Never
  /// causes a copy.
  ///
  /// \returns a success error code if the data was successfully read, otherwise
  /// returns an appropriate error code.
  Error readSubstream(BinarySubstreamRef &Ref, uint32_t Length);

  /// Get a pointer to an object of type T from the underlying stream, as if by
  /// memcpy, and store the result into \p Dest.  It is up to the caller to
  /// ensure that objects of type T can be safely treated in this manner.
  /// Updates the stream's offset to point after the newly read object.  Whether
  /// a copy occurs depends upon the implementation of the underlying
  /// stream.
  ///
  /// \returns a success error code if the data was successfully read, otherwise
  /// returns an appropriate error code.
  template <typename T> Error readObject(const T *&Dest) {
    ArrayRef<uint8_t> Buffer;
    if (auto EC = readBytes(Buffer, sizeof(T)))
      return EC;
    Dest = reinterpret_cast<const T *>(Buffer.data());
    return Error::success();
  }

  /// Get a reference to a \p NumElements element array of objects of type T
  /// from the underlying stream as if by memcpy, and store the resulting array
  /// slice into \p array.  It is up to the caller to ensure that objects of
  /// type T can be safely treated in this manner.  Updates the stream's offset
  /// to point after the newly read object.  Whether a copy occurs depends upon
  /// the implementation of the underlying stream.
  ///
  /// \returns a success error code if the data was successfully read, otherwise
  /// returns an appropriate error code.
  template <typename T>
  Error readArray(ArrayRef<T> &Array, uint32_t NumElements) {
    ArrayRef<uint8_t> Bytes;
    if (NumElements == 0) {
      Array = ArrayRef<T>();
      return Error::success();
    }

    if (NumElements > UINT32_MAX / sizeof(T))
      return make_error<BinaryStreamError>(
          stream_error_code::invalid_array_size);

    if (auto EC = readBytes(Bytes, NumElements * sizeof(T)))
      return EC;

    assert(isAddrAligned(Align::Of<T>(), Bytes.data()) &&
           "Reading at invalid alignment!");

    Array = ArrayRef<T>(reinterpret_cast<const T *>(Bytes.data()), NumElements);
    return Error::success();
  }

  /// Read a VarStreamArray of size \p Size bytes and store the result into
  /// \p Array.  Updates the stream's offset to point after the newly read
  /// array.  Never causes a copy (although iterating the elements of the
  /// VarStreamArray may, depending upon the implementation of the underlying
  /// stream).
  ///
  /// \returns a success error code if the data was successfully read, otherwise
  /// returns an appropriate error code.
  template <typename T, typename U>
  Error readArray(VarStreamArray<T, U> &Array, uint32_t Size,
                  uint32_t Skew = 0) {
    BinaryStreamRef S;
    if (auto EC = readStreamRef(S, Size))
      return EC;
    Array.setUnderlyingStream(S, Skew);
    return Error::success();
  }

  /// Read a FixedStreamArray of \p NumItems elements and store the result into
  /// \p Array.  Updates the stream's offset to point after the newly read
  /// array.  Never causes a copy (although iterating the elements of the
  /// FixedStreamArray may, depending upon the implementation of the underlying
  /// stream).
  ///
  /// \returns a success error code if the data was successfully read, otherwise
  /// returns an appropriate error code.
  template <typename T>
  Error readArray(FixedStreamArray<T> &Array, uint32_t NumItems) {
    if (NumItems == 0) {
      Array = FixedStreamArray<T>();
      return Error::success();
    }

    if (NumItems > UINT32_MAX / sizeof(T))
      return make_error<BinaryStreamError>(
          stream_error_code::invalid_array_size);

    BinaryStreamRef View;
    if (auto EC = readStreamRef(View, NumItems * sizeof(T)))
      return EC;

    Array = FixedStreamArray<T>(View);
    return Error::success();
  }

  bool empty() const { return bytesRemaining() == 0; }
  void setOffset(uint64_t Off) { Offset = Off; }
  uint64_t getOffset() const { return Offset; }
  uint64_t getLength() const { return Stream.getLength(); }
  uint64_t bytesRemaining() const { return getLength() - getOffset(); }

  /// Advance the stream's offset by \p Amount bytes.
  ///
  /// \returns a success error code if at least \p Amount bytes remain in the
  /// stream, otherwise returns an appropriate error code.
  Error skip(uint64_t Amount);

  /// Examine the next byte of the underlying stream without advancing the
  /// stream's offset.  If the stream is empty the behavior is undefined.
  ///
  /// \returns the next byte in the stream.
  uint8_t peek() const;

  Error padToAlignment(uint32_t Align);

  std::pair<BinaryStreamReader, BinaryStreamReader>
  split(uint64_t Offset) const;

private:
  BinaryStreamRef Stream;
  uint64_t Offset = 0;
};
} // namespace llvm

#endif // LLVM_SUPPORT_BINARYSTREAMREADER_H
PKhwFZ��	�Support/AutoConvert.hnu�[���//===- AutoConvert.h - Auto conversion between ASCII/EBCDIC -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains functions used for auto conversion between
// ASCII/EBCDIC codepages specific to z/OS.
//
//===----------------------------------------------------------------------===//i

#ifndef LLVM_SUPPORT_AUTOCONVERT_H
#define LLVM_SUPPORT_AUTOCONVERT_H

#ifdef __MVS__
#define CCSID_IBM_1047 1047
#define CCSID_UTF_8 1208
#include <system_error>

namespace llvm {

/// \brief Disable the z/OS enhanced ASCII auto-conversion for the file
/// descriptor.
std::error_code disableAutoConversion(int FD);

/// \brief Query the z/OS enhanced ASCII auto-conversion status of a file
/// descriptor and force the conversion if the file is not tagged with a
/// codepage.
std::error_code enableAutoConversion(int FD);

/// \brief Set the tag information for a file descriptor.
std::error_code setFileTag(int FD, int CCSID, bool Text);

} // namespace llvm

#endif // __MVS__

#endif // LLVM_SUPPORT_AUTOCONVERT_H
PKhwFZFKq=��Support/DynamicLibrary.hnu�[���//===-- llvm/Support/DynamicLibrary.h - Portable Dynamic Library -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the sys::DynamicLibrary class.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_DYNAMICLIBRARY_H
#define LLVM_SUPPORT_DYNAMICLIBRARY_H

#include <string>

namespace llvm {

class StringRef;

namespace sys {

/// This class provides a portable interface to dynamic libraries which also
/// might be known as shared libraries, shared objects, dynamic shared
/// objects, or dynamic link libraries. Regardless of the terminology or the
/// operating system interface, this class provides a portable interface that
/// allows dynamic libraries to be loaded and searched for externally
/// defined symbols. This is typically used to provide "plug-in" support.
/// It also allows for symbols to be defined which don't live in any library,
/// but rather the main program itself, useful on Windows where the main
/// executable cannot be searched.
class DynamicLibrary {
  // Placeholder whose address represents an invalid library.
  // We use this instead of NULL or a pointer-int pair because the OS library
  // might define 0 or 1 to be "special" handles, such as "search all".
  static char Invalid;

  // Opaque data used to interface with OS-specific dynamic library handling.
  void *Data;

public:
  explicit DynamicLibrary(void *data = &Invalid) : Data(data) {}

  /// Return the OS specific handle value.
  void *getOSSpecificHandle() const { return Data; }

  /// Returns true if the object refers to a valid library.
  bool isValid() const { return Data != &Invalid; }

  /// Searches through the library for the symbol \p symbolName. If it is
  /// found, the address of that symbol is returned. If not, NULL is returned.
  /// Note that NULL will also be returned if the library failed to load.
  /// Use isValid() to distinguish these cases if it is important.
  /// Note that this will \e not search symbols explicitly registered by
  /// AddSymbol().
  void *getAddressOfSymbol(const char *symbolName);

  /// This function permanently loads the dynamic library at the given path
  /// using the library load operation from the host operating system. The
  /// library instance will only be closed when global destructors run, and
  /// there is no guarantee when the library will be unloaded.
  ///
  /// This returns a valid DynamicLibrary instance on success and an invalid
  /// instance on failure (see isValid()). \p *errMsg will only be modified if
  /// the library fails to load.
  ///
  /// It is safe to call this function multiple times for the same library.
  /// Open a dynamic library permanently.
  static DynamicLibrary getPermanentLibrary(const char *filename,
                                            std::string *errMsg = nullptr);

  /// Registers an externally loaded library. The library will be unloaded
  /// when the program terminates.
  ///
  /// It is safe to call this function multiple times for the same library,
  /// though ownership is only taken if there was no error.
  static DynamicLibrary addPermanentLibrary(void *handle,
                                            std::string *errMsg = nullptr);

  /// This function permanently loads the dynamic library at the given path.
  /// Use this instead of getPermanentLibrary() when you won't need to get
  /// symbols from the library itself.
  ///
  /// It is safe to call this function multiple times for the same library.
  static bool LoadLibraryPermanently(const char *Filename,
                                     std::string *ErrMsg = nullptr) {
    return !getPermanentLibrary(Filename, ErrMsg).isValid();
  }

  /// This function loads the dynamic library at the given path, using the
  /// library load operation from the host operating system. The library
  /// instance will be closed when closeLibrary is called or global destructors
  /// are run, but there is no guarantee when the library will be unloaded.
  ///
  /// This returns a valid DynamicLibrary instance on success and an invalid
  /// instance on failure (see isValid()). \p *Err will only be modified if the
  /// library fails to load.
  ///
  /// It is safe to call this function multiple times for the same library.
  static DynamicLibrary getLibrary(const char *FileName,
                                   std::string *Err = nullptr);

  /// This function closes the dynamic library at the given path, using the
  /// library close operation of the host operating system, and there is no
  /// guarantee if or when this will cause the the library to be unloaded.
  ///
  /// This function should be called only if the library was loaded using the
  /// getLibrary() function.
  static void closeLibrary(DynamicLibrary &Lib);

  enum SearchOrdering {
    /// SO_Linker - Search as a call to dlsym(dlopen(NULL)) would when
    /// DynamicLibrary::getPermanentLibrary(NULL) has been called or
    /// search the list of explcitly loaded symbols if not.
    SO_Linker,
    /// SO_LoadedFirst - Search all loaded libraries, then as SO_Linker would.
    SO_LoadedFirst,
    /// SO_LoadedLast - Search as SO_Linker would, then loaded libraries.
    /// Only useful to search if libraries with RTLD_LOCAL have been added.
    SO_LoadedLast,
    /// SO_LoadOrder - Or this in to search libraries in the ordered loaded.
    /// The default bahaviour is to search loaded libraries in reverse.
    SO_LoadOrder = 4
  };
  static SearchOrdering SearchOrder; // = SO_Linker

  /// This function will search through all previously loaded dynamic
  /// libraries for the symbol \p symbolName. If it is found, the address of
  /// that symbol is returned. If not, null is returned. Note that this will
  /// search permanently loaded libraries (getPermanentLibrary()) as well
  /// as explicitly registered symbols (AddSymbol()).
  /// @throws std::string on error.
  /// Search through libraries for address of a symbol
  static void *SearchForAddressOfSymbol(const char *symbolName);

  /// Convenience function for C++ophiles.
  static void *SearchForAddressOfSymbol(const std::string &symbolName) {
    return SearchForAddressOfSymbol(symbolName.c_str());
  }

  /// This functions permanently adds the symbol \p symbolName with the
  /// value \p symbolValue.  These symbols are searched before any
  /// libraries.
  /// Add searchable symbol/value pair.
  static void AddSymbol(StringRef symbolName, void *symbolValue);

  class HandleSet;
};

} // End sys namespace
} // End llvm namespace

#endif
PKhwFZ]}��Support/Signposts.hnu�[���//===-- llvm/Support/Signposts.h - Interval debug annotations ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file Some OS's provide profilers that allow applications to provide custom
/// annotations to the profiler. For example, on Xcode 10 and later 'signposts'
/// can be emitted by the application and these will be rendered to the Points
/// of Interest track on the instruments timeline.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_SIGNPOSTS_H
#define LLVM_SUPPORT_SIGNPOSTS_H

#include <memory>

namespace llvm {
class SignpostEmitterImpl;
class StringRef;

/// Manages the emission of signposts into the recording method supported by
/// the OS.
class SignpostEmitter {
  std::unique_ptr<SignpostEmitterImpl> Impl;

public:
  SignpostEmitter();
  ~SignpostEmitter();

  bool isEnabled() const;

  /// Begin a signposted interval for a given object.
  void startInterval(const void *O, StringRef Name);
  /// End a signposted interval for a given object.
  void endInterval(const void *O, StringRef Name);
};

} // end namespace llvm

#endif // LLVM_SUPPORT_SIGNPOSTS_H
PKhwFZ��D�Support/PGOOptions.hnu�[���//===------ PGOOptions.h -- PGO option tunables ----------------*- C++ -*--===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// Define option tunables for PGO.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_PGOOPTIONS_H
#define LLVM_SUPPORT_PGOOPTIONS_H

#include "llvm/ADT/IntrusiveRefCntPtr.h"
#include "llvm/Support/Error.h"

namespace llvm {

namespace vfs {
class FileSystem;
} // namespace vfs

/// A struct capturing PGO tunables.
struct PGOOptions {
  enum PGOAction { NoAction, IRInstr, IRUse, SampleUse };
  enum CSPGOAction { NoCSAction, CSIRInstr, CSIRUse };
  PGOOptions(std::string ProfileFile, std::string CSProfileGenFile,
             std::string ProfileRemappingFile, std::string MemoryProfile,
             IntrusiveRefCntPtr<vfs::FileSystem> FS,
             PGOAction Action = NoAction, CSPGOAction CSAction = NoCSAction,
             bool DebugInfoForProfiling = false,
             bool PseudoProbeForProfiling = false);
  PGOOptions(const PGOOptions &);
  ~PGOOptions();
  PGOOptions &operator=(const PGOOptions &);

  std::string ProfileFile;
  std::string CSProfileGenFile;
  std::string ProfileRemappingFile;
  std::string MemoryProfile;
  PGOAction Action;
  CSPGOAction CSAction;
  bool DebugInfoForProfiling;
  bool PseudoProbeForProfiling;
  IntrusiveRefCntPtr<vfs::FileSystem> FS;
};
} // namespace llvm

#endif
PKhwFZ8��&&Support/Format.hnu�[���//===- Format.h - Efficient printf-style formatting for streams -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements the format() function, which can be used with other
// LLVM subsystems to provide printf-style formatting.  This gives all the power
// and risk of printf.  This can be used like this (with raw_ostreams as an
// example):
//
//    OS << "mynumber: " << format("%4.5f", 1234.412) << '\n';
//
// Or if you prefer:
//
//  OS << format("mynumber: %4.5f\n", 1234.412);
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_FORMAT_H
#define LLVM_SUPPORT_FORMAT_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/DataTypes.h"
#include <cassert>
#include <cstdio>
#include <optional>
#include <tuple>
#include <utility>

namespace llvm {

/// This is a helper class used for handling formatted output.  It is the
/// abstract base class of a templated derived class.
class format_object_base {
protected:
  const char *Fmt;
  ~format_object_base() = default; // Disallow polymorphic deletion.
  format_object_base(const format_object_base &) = default;
  virtual void home(); // Out of line virtual method.

  /// Call snprintf() for this object, on the given buffer and size.
  virtual int snprint(char *Buffer, unsigned BufferSize) const = 0;

public:
  format_object_base(const char *fmt) : Fmt(fmt) {}

  /// Format the object into the specified buffer.  On success, this returns
  /// the length of the formatted string.  If the buffer is too small, this
  /// returns a length to retry with, which will be larger than BufferSize.
  unsigned print(char *Buffer, unsigned BufferSize) const {
    assert(BufferSize && "Invalid buffer size!");

    // Print the string, leaving room for the terminating null.
    int N = snprint(Buffer, BufferSize);

    // VC++ and old GlibC return negative on overflow, just double the size.
    if (N < 0)
      return BufferSize * 2;

    // Other implementations yield number of bytes needed, not including the
    // final '\0'.
    if (unsigned(N) >= BufferSize)
      return N + 1;

    // Otherwise N is the length of output (not including the final '\0').
    return N;
  }
};

/// These are templated helper classes used by the format function that
/// capture the object to be formatted and the format string. When actually
/// printed, this synthesizes the string into a temporary buffer provided and
/// returns whether or not it is big enough.

// Helper to validate that format() parameters are scalars or pointers.
template <typename... Args> struct validate_format_parameters;
template <typename Arg, typename... Args>
struct validate_format_parameters<Arg, Args...> {
  static_assert(std::is_scalar_v<Arg>,
                "format can't be used with non fundamental / non pointer type");
  validate_format_parameters() { validate_format_parameters<Args...>(); }
};
template <> struct validate_format_parameters<> {};

template <typename... Ts>
class format_object final : public format_object_base {
  std::tuple<Ts...> Vals;

  template <std::size_t... Is>
  int snprint_tuple(char *Buffer, unsigned BufferSize,
                    std::index_sequence<Is...>) const {
#ifdef _MSC_VER
    return _snprintf(Buffer, BufferSize, Fmt, std::get<Is>(Vals)...);
#else
    return snprintf(Buffer, BufferSize, Fmt, std::get<Is>(Vals)...);
#endif
  }

public:
  format_object(const char *fmt, const Ts &... vals)
      : format_object_base(fmt), Vals(vals...) {
    validate_format_parameters<Ts...>();
  }

  int snprint(char *Buffer, unsigned BufferSize) const override {
    return snprint_tuple(Buffer, BufferSize, std::index_sequence_for<Ts...>());
  }
};

/// These are helper functions used to produce formatted output.  They use
/// template type deduction to construct the appropriate instance of the
/// format_object class to simplify their construction.
///
/// This is typically used like:
/// \code
///   OS << format("%0.4f", myfloat) << '\n';
/// \endcode

template <typename... Ts>
inline format_object<Ts...> format(const char *Fmt, const Ts &... Vals) {
  return format_object<Ts...>(Fmt, Vals...);
}

/// This is a helper class for left_justify, right_justify, and center_justify.
class FormattedString {
public:
  enum Justification { JustifyNone, JustifyLeft, JustifyRight, JustifyCenter };
  FormattedString(StringRef S, unsigned W, Justification J)
      : Str(S), Width(W), Justify(J) {}

private:
  StringRef Str;
  unsigned Width;
  Justification Justify;
  friend class raw_ostream;
};

/// left_justify - append spaces after string so total output is
/// \p Width characters.  If \p Str is larger that \p Width, full string
/// is written with no padding.
inline FormattedString left_justify(StringRef Str, unsigned Width) {
  return FormattedString(Str, Width, FormattedString::JustifyLeft);
}

/// right_justify - add spaces before string so total output is
/// \p Width characters.  If \p Str is larger that \p Width, full string
/// is written with no padding.
inline FormattedString right_justify(StringRef Str, unsigned Width) {
  return FormattedString(Str, Width, FormattedString::JustifyRight);
}

/// center_justify - add spaces before and after string so total output is
/// \p Width characters.  If \p Str is larger that \p Width, full string
/// is written with no padding.
inline FormattedString center_justify(StringRef Str, unsigned Width) {
  return FormattedString(Str, Width, FormattedString::JustifyCenter);
}

/// This is a helper class used for format_hex() and format_decimal().
class FormattedNumber {
  uint64_t HexValue;
  int64_t DecValue;
  unsigned Width;
  bool Hex;
  bool Upper;
  bool HexPrefix;
  friend class raw_ostream;

public:
  FormattedNumber(uint64_t HV, int64_t DV, unsigned W, bool H, bool U,
                  bool Prefix)
      : HexValue(HV), DecValue(DV), Width(W), Hex(H), Upper(U),
        HexPrefix(Prefix) {}
};

/// format_hex - Output \p N as a fixed width hexadecimal. If number will not
/// fit in width, full number is still printed.  Examples:
///   OS << format_hex(255, 4)              => 0xff
///   OS << format_hex(255, 4, true)        => 0xFF
///   OS << format_hex(255, 6)              => 0x00ff
///   OS << format_hex(255, 2)              => 0xff
inline FormattedNumber format_hex(uint64_t N, unsigned Width,
                                  bool Upper = false) {
  assert(Width <= 18 && "hex width must be <= 18");
  return FormattedNumber(N, 0, Width, true, Upper, true);
}

/// format_hex_no_prefix - Output \p N as a fixed width hexadecimal. Does not
/// prepend '0x' to the outputted string.  If number will not fit in width,
/// full number is still printed.  Examples:
///   OS << format_hex_no_prefix(255, 2)              => ff
///   OS << format_hex_no_prefix(255, 2, true)        => FF
///   OS << format_hex_no_prefix(255, 4)              => 00ff
///   OS << format_hex_no_prefix(255, 1)              => ff
inline FormattedNumber format_hex_no_prefix(uint64_t N, unsigned Width,
                                            bool Upper = false) {
  assert(Width <= 16 && "hex width must be <= 16");
  return FormattedNumber(N, 0, Width, true, Upper, false);
}

/// format_decimal - Output \p N as a right justified, fixed-width decimal. If
/// number will not fit in width, full number is still printed.  Examples:
///   OS << format_decimal(0, 5)     => "    0"
///   OS << format_decimal(255, 5)   => "  255"
///   OS << format_decimal(-1, 3)    => " -1"
///   OS << format_decimal(12345, 3) => "12345"
inline FormattedNumber format_decimal(int64_t N, unsigned Width) {
  return FormattedNumber(0, N, Width, false, false, false);
}

class FormattedBytes {
  ArrayRef<uint8_t> Bytes;

  // If not std::nullopt, display offsets for each line relative to starting
  // value.
  std::optional<uint64_t> FirstByteOffset;
  uint32_t IndentLevel;  // Number of characters to indent each line.
  uint32_t NumPerLine;   // Number of bytes to show per line.
  uint8_t ByteGroupSize; // How many hex bytes are grouped without spaces
  bool Upper;            // Show offset and hex bytes as upper case.
  bool ASCII;            // Show the ASCII bytes for the hex bytes to the right.
  friend class raw_ostream;

public:
  FormattedBytes(ArrayRef<uint8_t> B, uint32_t IL, std::optional<uint64_t> O,
                 uint32_t NPL, uint8_t BGS, bool U, bool A)
      : Bytes(B), FirstByteOffset(O), IndentLevel(IL), NumPerLine(NPL),
        ByteGroupSize(BGS), Upper(U), ASCII(A) {

    if (ByteGroupSize > NumPerLine)
      ByteGroupSize = NumPerLine;
  }
};

inline FormattedBytes
format_bytes(ArrayRef<uint8_t> Bytes,
             std::optional<uint64_t> FirstByteOffset = std::nullopt,
             uint32_t NumPerLine = 16, uint8_t ByteGroupSize = 4,
             uint32_t IndentLevel = 0, bool Upper = false) {
  return FormattedBytes(Bytes, IndentLevel, FirstByteOffset, NumPerLine,
                        ByteGroupSize, Upper, false);
}

inline FormattedBytes
format_bytes_with_ascii(ArrayRef<uint8_t> Bytes,
                        std::optional<uint64_t> FirstByteOffset = std::nullopt,
                        uint32_t NumPerLine = 16, uint8_t ByteGroupSize = 4,
                        uint32_t IndentLevel = 0, bool Upper = false) {
  return FormattedBytes(Bytes, IndentLevel, FirstByteOffset, NumPerLine,
                        ByteGroupSize, Upper, true);
}

} // end namespace llvm

#endif
PKhwFZ�}��Support/ManagedStatic.hnu�[���//===-- llvm/Support/ManagedStatic.h - Static Global wrapper ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the ManagedStatic class and the llvm_shutdown() function.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_MANAGEDSTATIC_H
#define LLVM_SUPPORT_MANAGEDSTATIC_H

#include <atomic>
#include <cstddef>

namespace llvm {

/// object_creator - Helper method for ManagedStatic.
template <class C> struct object_creator {
  static void *call() { return new C(); }
};

/// object_deleter - Helper method for ManagedStatic.
///
template <typename T> struct object_deleter {
  static void call(void *Ptr) { delete (T *)Ptr; }
};
template <typename T, size_t N> struct object_deleter<T[N]> {
  static void call(void *Ptr) { delete[](T *)Ptr; }
};

// ManagedStatic must be initialized to zero, and it must *not* have a dynamic
// initializer because managed statics are often created while running other
// dynamic initializers. In standard C++11, the best way to accomplish this is
// with a constexpr default constructor. However, different versions of the
// Visual C++ compiler have had bugs where, even though the constructor may be
// constexpr, a dynamic initializer may be emitted depending on optimization
// settings. For the affected versions of MSVC, use the old linker
// initialization pattern of not providing a constructor and leaving the fields
// uninitialized. See http://llvm.org/PR41367 for details.
#if !defined(_MSC_VER) || (_MSC_VER >= 1925) || defined(__clang__)
#define LLVM_USE_CONSTEXPR_CTOR
#endif

/// ManagedStaticBase - Common base class for ManagedStatic instances.
class ManagedStaticBase {
protected:
#ifdef LLVM_USE_CONSTEXPR_CTOR
  mutable std::atomic<void *> Ptr{};
  mutable void (*DeleterFn)(void *) = nullptr;
  mutable const ManagedStaticBase *Next = nullptr;
#else
  // This should only be used as a static variable, which guarantees that this
  // will be zero initialized.
  mutable std::atomic<void *> Ptr;
  mutable void (*DeleterFn)(void *);
  mutable const ManagedStaticBase *Next;
#endif

  void RegisterManagedStatic(void *(*creator)(), void (*deleter)(void*)) const;

public:
#ifdef LLVM_USE_CONSTEXPR_CTOR
  constexpr ManagedStaticBase() = default;
#endif

  /// isConstructed - Return true if this object has not been created yet.
  bool isConstructed() const { return Ptr != nullptr; }

  void destroy() const;
};

/// ManagedStatic - This transparently changes the behavior of global statics to
/// be lazily constructed on demand (good for reducing startup times of dynamic
/// libraries that link in LLVM components) and for making destruction be
/// explicit through the llvm_shutdown() function call.
///
template <class C, class Creator = object_creator<C>,
          class Deleter = object_deleter<C>>
class ManagedStatic : public ManagedStaticBase {
public:
  // Accessors.
  C &operator*() {
    void *Tmp = Ptr.load(std::memory_order_acquire);
    if (!Tmp)
      RegisterManagedStatic(Creator::call, Deleter::call);

    return *static_cast<C *>(Ptr.load(std::memory_order_relaxed));
  }

  C *operator->() { return &**this; }

  const C &operator*() const {
    void *Tmp = Ptr.load(std::memory_order_acquire);
    if (!Tmp)
      RegisterManagedStatic(Creator::call, Deleter::call);

    return *static_cast<C *>(Ptr.load(std::memory_order_relaxed));
  }

  const C *operator->() const { return &**this; }

  // Extract the instance, leaving the ManagedStatic uninitialized. The
  // user is then responsible for the lifetime of the returned instance.
  C *claim() {
    return static_cast<C *>(Ptr.exchange(nullptr));
  }
};

/// llvm_shutdown - Deallocate and destroy all ManagedStatic variables.
void llvm_shutdown();

/// llvm_shutdown_obj - This is a simple helper class that calls
/// llvm_shutdown() when it is destroyed.
struct llvm_shutdown_obj {
  llvm_shutdown_obj() = default;
  ~llvm_shutdown_obj() { llvm_shutdown(); }
};

} // end namespace llvm

#endif // LLVM_SUPPORT_MANAGEDSTATIC_H
PKhwFZs����Support/Signals.hnu�[���//===- llvm/Support/Signals.h - Signal Handling support ----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines some helpful functions for dealing with the possibility of
// unix signals occurring while your program is running.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_SIGNALS_H
#define LLVM_SUPPORT_SIGNALS_H

#include <cstdint>
#include <string>

namespace llvm {
class StringRef;
class raw_ostream;

namespace sys {

  /// This function runs all the registered interrupt handlers, including the
  /// removal of files registered by RemoveFileOnSignal.
  void RunInterruptHandlers();

  /// This function registers signal handlers to ensure that if a signal gets
  /// delivered that the named file is removed.
  /// Remove a file if a fatal signal occurs.
  bool RemoveFileOnSignal(StringRef Filename, std::string* ErrMsg = nullptr);

  /// This function removes a file from the list of files to be removed on
  /// signal delivery.
  void DontRemoveFileOnSignal(StringRef Filename);

  /// When an error signal (such as SIGABRT or SIGSEGV) is delivered to the
  /// process, print a stack trace and then exit.
  /// Print a stack trace if a fatal signal occurs.
  /// \param Argv0 the current binary name, used to find the symbolizer
  ///        relative to the current binary before searching $PATH; can be
  ///        StringRef(), in which case we will only search $PATH.
  /// \param DisableCrashReporting if \c true, disable the normal crash
  ///        reporting mechanisms on the underlying operating system.
  void PrintStackTraceOnErrorSignal(StringRef Argv0,
                                    bool DisableCrashReporting = false);

  /// Disable all system dialog boxes that appear when the process crashes.
  void DisableSystemDialogsOnCrash();

  /// Print the stack trace using the given \c raw_ostream object.
  /// \param Depth refers to the number of stackframes to print. If not
  ///        specified, the entire frame is printed.
  void PrintStackTrace(raw_ostream &OS, int Depth = 0);

  // Run all registered signal handlers.
  void RunSignalHandlers();

  using SignalHandlerCallback = void (*)(void *);

  /// Add a function to be called when an abort/kill signal is delivered to the
  /// process. The handler can have a cookie passed to it to identify what
  /// instance of the handler it is.
  void AddSignalHandler(SignalHandlerCallback FnPtr, void *Cookie);

  /// This function registers a function to be called when the user "interrupts"
  /// the program (typically by pressing ctrl-c).  When the user interrupts the
  /// program, the specified interrupt function is called instead of the program
  /// being killed, and the interrupt function automatically disabled.
  ///
  /// Note that interrupt functions are not allowed to call any non-reentrant
  /// functions.  An null interrupt function pointer disables the current
  /// installed function.  Note also that the handler may be executed on a
  /// different thread on some platforms.
  void SetInterruptFunction(void (*IF)());

  /// Registers a function to be called when an "info" signal is delivered to
  /// the process.
  ///
  /// On POSIX systems, this will be SIGUSR1; on systems that have it, SIGINFO
  /// will also be used (typically ctrl-t).
  ///
  /// Note that signal handlers are not allowed to call any non-reentrant
  /// functions.  An null function pointer disables the current installed
  /// function.  Note also that the handler may be executed on a different
  /// thread on some platforms.
  void SetInfoSignalFunction(void (*Handler)());

  /// Registers a function to be called in a "one-shot" manner when a pipe
  /// signal is delivered to the process (i.e., on a failed write to a pipe).
  /// After the pipe signal is handled once, the handler is unregistered.
  ///
  /// The LLVM signal handling code will not install any handler for the pipe
  /// signal unless one is provided with this API (see \ref
  /// DefaultOneShotPipeSignalHandler). This handler must be provided before
  /// any other LLVM signal handlers are installed: the \ref InitLLVM
  /// constructor has a flag that can simplify this setup.
  ///
  /// Note that the handler is not allowed to call any non-reentrant
  /// functions.  A null handler pointer disables the current installed
  /// function.  Note also that the handler may be executed on a
  /// different thread on some platforms.
  void SetOneShotPipeSignalFunction(void (*Handler)());

  /// On Unix systems and Windows, this function exits with an "IO error" exit
  /// code.
  void DefaultOneShotPipeSignalHandler();

#ifdef _WIN32
  /// Windows does not support signals and this handler must be called manually.
  void CallOneShotPipeSignalHandler();
#endif

  /// This function does the following:
  /// - clean up any temporary files registered with RemoveFileOnSignal()
  /// - dump the callstack from the exception context
  /// - call any relevant interrupt/signal handlers
  /// - create a core/mini dump of the exception context whenever possible
  /// Context is a system-specific failure context: it is the signal type on
  /// Unix; the ExceptionContext on Windows.
  void CleanupOnSignal(uintptr_t Context);

  void unregisterHandlers();
} // End sys namespace
} // End llvm namespace

#endif
PKhwFZ3D���Support/RISCVAttributes.hnu�[���//===-- RISCVAttributes.h - RISCV Attributes --------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains enumerations for RISCV attributes as defined in RISC-V
// ELF psABI specification.
//
// RISC-V ELF psABI specification
//
// https://github.com/riscv/riscv-elf-psabi-doc/blob/master/riscv-elf.md
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_SUPPORT_RISCVATTRIBUTES_H
#define LLVM_SUPPORT_RISCVATTRIBUTES_H

#include "llvm/Support/ELFAttributes.h"

namespace llvm {
namespace RISCVAttrs {

const TagNameMap &getRISCVAttributeTags();

enum AttrType : unsigned {
  // Attribute types in ELF/.riscv.attributes.
  STACK_ALIGN = 4,
  ARCH = 5,
  UNALIGNED_ACCESS = 6,
  PRIV_SPEC = 8,
  PRIV_SPEC_MINOR = 10,
  PRIV_SPEC_REVISION = 12,
};

enum StackAlign { ALIGN_4 = 4, ALIGN_16 = 16 };

enum { NOT_ALLOWED = 0, ALLOWED = 1 };

} // namespace RISCVAttrs
} // namespace llvm

#endif
PKhwFZ�ffSupport/TrigramIndex.hnu�[���//===-- TrigramIndex.h - a heuristic for SpecialCaseList --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//===----------------------------------------------------------------------===//
//
// TrigramIndex implements a heuristic for SpecialCaseList that allows to
// filter out ~99% incoming queries when all regular expressions in the
// SpecialCaseList are simple wildcards with '*' and '.'. If rules are more
// complicated, the check is defeated and it will always pass the queries to a
// full regex.
//
// The basic idea is that in order for a wildcard to match a query, the query
// needs to have all trigrams which occur in the wildcard. We create a trigram
// index (trigram -> list of rules with it) and then count trigrams in the query
// for each rule. If the count for one of the rules reaches the expected value,
// the check passes the query to a regex. If none of the rules got enough
// trigrams, the check tells that the query is definitely not matched by any
// of the rules, and no regex matching is needed.
// A similar idea was used in Google Code Search as described in the blog post:
// https://swtch.com/~rsc/regexp/regexp4.html
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_TRIGRAMINDEX_H
#define LLVM_SUPPORT_TRIGRAMINDEX_H

#include "llvm/ADT/SmallVector.h"
#include <string>
#include <unordered_map>
#include <vector>

namespace llvm {
class StringRef;

class TrigramIndex {
 public:
  /// Inserts a new Regex into the index.
  void insert(const std::string &Regex);

  /// Returns true, if special case list definitely does not have a line
  /// that matches the query. Returns false, if it's not sure.
  bool isDefinitelyOut(StringRef Query) const;

  /// Returned true, iff the heuristic is defeated and not useful.
  /// In this case isDefinitelyOut always returns false.
  bool isDefeated() { return Defeated; }
 private:
  // If true, the rules are too complicated for the check to work, and full
  // regex matching is needed for every rule.
  bool Defeated = false;
  // The minimum number of trigrams which should match for a rule to have a
  // chance to match the query. The number of elements equals the number of
  // regex rules in the SpecialCaseList.
  std::vector<unsigned> Counts;
  // Index holds a list of rules indices for each trigram. The same indices
  // are used in Counts to store per-rule limits.
  // If a trigram is too common (>4 rules with it), we stop tracking it,
  // which increases the probability for a need to match using regex, but
  // decreases the costs in the regular case.
  std::unordered_map<unsigned, SmallVector<size_t, 4>> Index{256};
};

}  // namespace llvm

#endif  // LLVM_SUPPORT_TRIGRAMINDEX_H
PKhwFZ9�@����Support/MachineValueType.hnu�[���//===- Support/MachineValueType.h - Machine-Level types ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the set of machine-level target independent types which
// legal values in the code generator use.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_MACHINEVALUETYPE_H
#define LLVM_SUPPORT_MACHINEVALUETYPE_H

#include "llvm/ADT/Sequence.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/TypeSize.h"
#include <cassert>

namespace llvm {

  class Type;

  /// Machine Value Type. Every type that is supported natively by some
  /// processor targeted by LLVM occurs here. This means that any legal value
  /// type can be represented by an MVT.
  class MVT {
  public:
    enum SimpleValueType : uint8_t {
      // clang-format off

      // Simple value types that aren't explicitly part of this enumeration
      // are considered extended value types.
      INVALID_SIMPLE_VALUE_TYPE = 0,

      // If you change this numbering, you must change the values in
      // ValueTypes.td as well!
      Other          =   1,   // This is a non-standard value
      i1             =   2,   // This is a 1 bit integer value
      i2             =   3,   // This is a 2 bit integer value
      i4             =   4,   // This is a 4 bit integer value
      i8             =   5,   // This is an 8 bit integer value
      i16            =   6,   // This is a 16 bit integer value
      i32            =   7,   // This is a 32 bit integer value
      i64            =   8,   // This is a 64 bit integer value
      i128           =   9,   // This is a 128 bit integer value

      FIRST_INTEGER_VALUETYPE = i1,
      LAST_INTEGER_VALUETYPE  = i128,

      bf16           =  10,   // This is a 16 bit brain floating point value
      f16            =  11,   // This is a 16 bit floating point value
      f32            =  12,   // This is a 32 bit floating point value
      f64            =  13,   // This is a 64 bit floating point value
      f80            =  14,   // This is a 80 bit floating point value
      f128           =  15,   // This is a 128 bit floating point value
      ppcf128        =  16,   // This is a PPC 128-bit floating point value

      FIRST_FP_VALUETYPE = bf16,
      LAST_FP_VALUETYPE  = ppcf128,

      v1i1           =  17,   //    1 x i1
      v2i1           =  18,   //    2 x i1
      v4i1           =  19,   //    4 x i1
      v8i1           =  20,   //    8 x i1
      v16i1          =  21,   //   16 x i1
      v32i1          =  22,   //   32 x i1
      v64i1          =  23,   //   64 x i1
      v128i1         =  24,   //  128 x i1
      v256i1         =  25,   //  256 x i1
      v512i1         =  26,   //  512 x i1
      v1024i1        =  27,   // 1024 x i1
      v2048i1        =  28,   // 2048 x i1

      v128i2         =  29,   //  128 x i2
      v256i2         =  30,   //  256 x i2

      v64i4          =  31,   //   64 x i4
      v128i4         =  32,   //  128 x i4

      v1i8           =  33,   //    1 x i8
      v2i8           =  34,   //    2 x i8
      v4i8           =  35,   //    4 x i8
      v8i8           =  36,   //    8 x i8
      v16i8          =  37,   //   16 x i8
      v32i8          =  38,   //   32 x i8
      v64i8          =  39,   //   64 x i8
      v128i8         =  40,   //  128 x i8
      v256i8         =  41,   //  256 x i8
      v512i8         =  42,   //  512 x i8
      v1024i8        =  43,   // 1024 x i8

      v1i16          =  44,   //   1 x i16
      v2i16          =  45,   //   2 x i16
      v3i16          =  46,   //   3 x i16
      v4i16          =  47,   //   4 x i16
      v8i16          =  48,   //   8 x i16
      v16i16         =  49,   //  16 x i16
      v32i16         =  50,   //  32 x i16
      v64i16         =  51,   //  64 x i16
      v128i16        =  52,   // 128 x i16
      v256i16        =  53,   // 256 x i16
      v512i16        =  54,   // 512 x i16

      v1i32          =  55,   //    1 x i32
      v2i32          =  56,   //    2 x i32
      v3i32          =  57,   //    3 x i32
      v4i32          =  58,   //    4 x i32
      v5i32          =  59,   //    5 x i32
      v6i32          =  60,   //    6 x i32
      v7i32          =  61,   //    7 x i32
      v8i32          =  62,   //    8 x i32
      v9i32          =  63,   //    9 x i32
      v10i32         =  64,   //   10 x i32
      v11i32         =  65,   //   11 x i32
      v12i32         =  66,   //   12 x i32
      v16i32         =  67,   //   16 x i32
      v32i32         =  68,   //   32 x i32
      v64i32         =  69,   //   64 x i32
      v128i32        =  70,   //  128 x i32
      v256i32        =  71,   //  256 x i32
      v512i32        =  72,   //  512 x i32
      v1024i32       =  73,   // 1024 x i32
      v2048i32       =  74,   // 2048 x i32

      v1i64          =  75,   //   1 x i64
      v2i64          =  76,   //   2 x i64
      v3i64          =  77,   //   3 x i64
      v4i64          =  78,   //   4 x i64
      v8i64          =  79,   //   8 x i64
      v16i64         =  80,   //  16 x i64
      v32i64         =  81,   //  32 x i64
      v64i64         =  82,   //  64 x i64
      v128i64        =  83,   // 128 x i64
      v256i64        =  84,   // 256 x i64

      v1i128         =  85,   //  1 x i128

      FIRST_INTEGER_FIXEDLEN_VECTOR_VALUETYPE = v1i1,
      LAST_INTEGER_FIXEDLEN_VECTOR_VALUETYPE = v1i128,

      v1f16          =  86,   //    1 x f16
      v2f16          =  87,   //    2 x f16
      v3f16          =  88,   //    3 x f16
      v4f16          =  89,   //    4 x f16
      v8f16          =  90,   //    8 x f16
      v16f16         =  91,   //   16 x f16
      v32f16         =  92,   //   32 x f16
      v64f16         =  93,   //   64 x f16
      v128f16        =  94,   //  128 x f16
      v256f16        =  95,   //  256 x f16
      v512f16        =  96,   //  512 x f16

      v2bf16         =  97,   //    2 x bf16
      v3bf16         =  98,   //    3 x bf16
      v4bf16         =  99,   //    4 x bf16
      v8bf16         = 100,   //    8 x bf16
      v16bf16        = 101,   //   16 x bf16
      v32bf16        = 102,   //   32 x bf16
      v64bf16        = 103,   //   64 x bf16
      v128bf16       = 104,   //  128 x bf16

      v1f32          = 105,   //    1 x f32
      v2f32          = 106,   //    2 x f32
      v3f32          = 107,   //    3 x f32
      v4f32          = 108,   //    4 x f32
      v5f32          = 109,   //    5 x f32
      v6f32          = 110,   //    6 x f32
      v7f32          = 111,   //    7 x f32
      v8f32          = 112,   //    8 x f32
      v9f32          = 113,   //    9 x f32
      v10f32         = 114,   //   10 x f32
      v11f32         = 115,   //   11 x f32
      v12f32         = 116,   //   12 x f32
      v16f32         = 117,   //   16 x f32

      v32f32         = 118,   //   32 x f32
      v64f32         = 119,   //   64 x f32
      v128f32        = 120,   //  128 x f32
      v256f32        = 121,   //  256 x f32
      v512f32        = 122,   //  512 x f32
      v1024f32       = 123,   // 1024 x f32
      v2048f32       = 124,   // 2048 x f32

      v1f64          = 125,   //    1 x f64
      v2f64          = 126,   //    2 x f64
      v3f64          = 127,   //    3 x f64
      v4f64          = 128,   //    4 x f64
      v8f64          = 129,   //    8 x f64
      v16f64         = 130,   //   16 x f64
      v32f64         = 131,   //   32 x f64
      v64f64         = 132,   //   64 x f64
      v128f64        = 133,   //  128 x f64
      v256f64        = 134,   //  256 x f64

      FIRST_FP_FIXEDLEN_VECTOR_VALUETYPE = v1f16,
      LAST_FP_FIXEDLEN_VECTOR_VALUETYPE = v256f64,

      FIRST_FIXEDLEN_VECTOR_VALUETYPE = v1i1,
      LAST_FIXEDLEN_VECTOR_VALUETYPE = v256f64,

      nxv1i1         = 135,   // n x  1 x i1
      nxv2i1         = 136,   // n x  2 x i1
      nxv4i1         = 137,   // n x  4 x i1
      nxv8i1         = 138,   // n x  8 x i1
      nxv16i1        = 139,   // n x 16 x i1
      nxv32i1        = 140,   // n x 32 x i1
      nxv64i1        = 141,   // n x 64 x i1

      nxv1i8         = 142,   // n x  1 x i8
      nxv2i8         = 143,   // n x  2 x i8
      nxv4i8         = 144,   // n x  4 x i8
      nxv8i8         = 145,   // n x  8 x i8
      nxv16i8        = 146,   // n x 16 x i8
      nxv32i8        = 147,   // n x 32 x i8
      nxv64i8        = 148,   // n x 64 x i8

      nxv1i16        = 149,  // n x  1 x i16
      nxv2i16        = 150,  // n x  2 x i16
      nxv4i16        = 151,  // n x  4 x i16
      nxv8i16        = 152,  // n x  8 x i16
      nxv16i16       = 153,  // n x 16 x i16
      nxv32i16       = 154,  // n x 32 x i16

      nxv1i32        = 155,  // n x  1 x i32
      nxv2i32        = 156,  // n x  2 x i32
      nxv4i32        = 157,  // n x  4 x i32
      nxv8i32        = 158,  // n x  8 x i32
      nxv16i32       = 159,  // n x 16 x i32
      nxv32i32       = 160,  // n x 32 x i32

      nxv1i64        = 161,  // n x  1 x i64
      nxv2i64        = 162,  // n x  2 x i64
      nxv4i64        = 163,  // n x  4 x i64
      nxv8i64        = 164,  // n x  8 x i64
      nxv16i64       = 165,  // n x 16 x i64
      nxv32i64       = 166,  // n x 32 x i64

      FIRST_INTEGER_SCALABLE_VECTOR_VALUETYPE = nxv1i1,
      LAST_INTEGER_SCALABLE_VECTOR_VALUETYPE = nxv32i64,

      nxv1f16        = 167,  // n x  1 x f16
      nxv2f16        = 168,  // n x  2 x f16
      nxv4f16        = 169,  // n x  4 x f16
      nxv8f16        = 170,  // n x  8 x f16
      nxv16f16       = 171,  // n x 16 x f16
      nxv32f16       = 172,  // n x 32 x f16

      nxv1bf16       = 173,  // n x  1 x bf16
      nxv2bf16       = 174,  // n x  2 x bf16
      nxv4bf16       = 175,  // n x  4 x bf16
      nxv8bf16       = 176,  // n x  8 x bf16
      nxv16bf16      = 177,  // n x 16 x bf16
      nxv32bf16      = 178,  // n x 32 x bf16

      nxv1f32        = 179,  // n x  1 x f32
      nxv2f32        = 180,  // n x  2 x f32
      nxv4f32        = 181,  // n x  4 x f32
      nxv8f32        = 182,  // n x  8 x f32
      nxv16f32       = 183,  // n x 16 x f32

      nxv1f64        = 184,  // n x  1 x f64
      nxv2f64        = 185,  // n x  2 x f64
      nxv4f64        = 186,  // n x  4 x f64
      nxv8f64        = 187,  // n x  8 x f64

      FIRST_FP_SCALABLE_VECTOR_VALUETYPE = nxv1f16,
      LAST_FP_SCALABLE_VECTOR_VALUETYPE = nxv8f64,

      FIRST_SCALABLE_VECTOR_VALUETYPE = nxv1i1,
      LAST_SCALABLE_VECTOR_VALUETYPE = nxv8f64,

      FIRST_VECTOR_VALUETYPE = v1i1,
      LAST_VECTOR_VALUETYPE  = nxv8f64,

      x86mmx         = 188,    // This is an X86 MMX value

      Glue           = 189,    // This glues nodes together during pre-RA sched

      isVoid         = 190,    // This has no value

      Untyped        = 191,    // This value takes a register, but has
                               // unspecified type.  The register class
                               // will be determined by the opcode.

      funcref        = 192,    // WebAssembly's funcref type
      externref      = 193,    // WebAssembly's externref type
      x86amx         = 194,    // This is an X86 AMX value
      i64x8          = 195,    // 8 Consecutive GPRs (AArch64)

      FIRST_VALUETYPE =  1,    // This is always the beginning of the list.
      LAST_VALUETYPE = i64x8,  // This always remains at the end of the list.
      VALUETYPE_SIZE = LAST_VALUETYPE + 1,

      // This is the current maximum for LAST_VALUETYPE.
      // MVT::MAX_ALLOWED_VALUETYPE is used for asserts and to size bit vectors
      // This value must be a multiple of 32.
      MAX_ALLOWED_VALUETYPE = 224,

      // A value of type llvm::TokenTy
      token          = 248,

      // This is MDNode or MDString.
      Metadata       = 249,

      // An int value the size of the pointer of the current
      // target to any address space. This must only be used internal to
      // tblgen. Other than for overloading, we treat iPTRAny the same as iPTR.
      iPTRAny        = 250,

      // A vector with any length and element size. This is used
      // for intrinsics that have overloadings based on vector types.
      // This is only for tblgen's consumption!
      vAny           = 251,

      // Any floating-point or vector floating-point value. This is used
      // for intrinsics that have overloadings based on floating-point types.
      // This is only for tblgen's consumption!
      fAny           = 252,

      // An integer or vector integer value of any bit width. This is
      // used for intrinsics that have overloadings based on integer bit widths.
      // This is only for tblgen's consumption!
      iAny           = 253,

      // An int value the size of the pointer of the current
      // target.  This should only be used internal to tblgen!
      iPTR           = 254,

      // Any type. This is used for intrinsics that have overloadings.
      // This is only for tblgen's consumption!
      Any            = 255

      // clang-format on
    };

    SimpleValueType SimpleTy = INVALID_SIMPLE_VALUE_TYPE;

    constexpr MVT() = default;
    constexpr MVT(SimpleValueType SVT) : SimpleTy(SVT) {}

    bool operator>(const MVT& S)  const { return SimpleTy >  S.SimpleTy; }
    bool operator<(const MVT& S)  const { return SimpleTy <  S.SimpleTy; }
    bool operator==(const MVT& S) const { return SimpleTy == S.SimpleTy; }
    bool operator!=(const MVT& S) const { return SimpleTy != S.SimpleTy; }
    bool operator>=(const MVT& S) const { return SimpleTy >= S.SimpleTy; }
    bool operator<=(const MVT& S) const { return SimpleTy <= S.SimpleTy; }

    /// Return true if this is a valid simple valuetype.
    bool isValid() const {
      return (SimpleTy >= MVT::FIRST_VALUETYPE &&
              SimpleTy <= MVT::LAST_VALUETYPE);
    }

    /// Return true if this is a FP or a vector FP type.
    bool isFloatingPoint() const {
      return ((SimpleTy >= MVT::FIRST_FP_VALUETYPE &&
               SimpleTy <= MVT::LAST_FP_VALUETYPE) ||
              (SimpleTy >= MVT::FIRST_FP_FIXEDLEN_VECTOR_VALUETYPE &&
               SimpleTy <= MVT::LAST_FP_FIXEDLEN_VECTOR_VALUETYPE) ||
              (SimpleTy >= MVT::FIRST_FP_SCALABLE_VECTOR_VALUETYPE &&
               SimpleTy <= MVT::LAST_FP_SCALABLE_VECTOR_VALUETYPE));
    }

    /// Return true if this is an integer or a vector integer type.
    bool isInteger() const {
      return ((SimpleTy >= MVT::FIRST_INTEGER_VALUETYPE &&
               SimpleTy <= MVT::LAST_INTEGER_VALUETYPE) ||
              (SimpleTy >= MVT::FIRST_INTEGER_FIXEDLEN_VECTOR_VALUETYPE &&
               SimpleTy <= MVT::LAST_INTEGER_FIXEDLEN_VECTOR_VALUETYPE) ||
              (SimpleTy >= MVT::FIRST_INTEGER_SCALABLE_VECTOR_VALUETYPE &&
               SimpleTy <= MVT::LAST_INTEGER_SCALABLE_VECTOR_VALUETYPE));
    }

    /// Return true if this is an integer, not including vectors.
    bool isScalarInteger() const {
      return (SimpleTy >= MVT::FIRST_INTEGER_VALUETYPE &&
              SimpleTy <= MVT::LAST_INTEGER_VALUETYPE);
    }

    /// Return true if this is a vector value type.
    bool isVector() const {
      return (SimpleTy >= MVT::FIRST_VECTOR_VALUETYPE &&
              SimpleTy <= MVT::LAST_VECTOR_VALUETYPE);
    }

    /// Return true if this is a vector value type where the
    /// runtime length is machine dependent
    bool isScalableVector() const {
      return (SimpleTy >= MVT::FIRST_SCALABLE_VECTOR_VALUETYPE &&
              SimpleTy <= MVT::LAST_SCALABLE_VECTOR_VALUETYPE);
    }

    bool isFixedLengthVector() const {
      return (SimpleTy >= MVT::FIRST_FIXEDLEN_VECTOR_VALUETYPE &&
              SimpleTy <= MVT::LAST_FIXEDLEN_VECTOR_VALUETYPE);
    }

    /// Return true if this is a 16-bit vector type.
    bool is16BitVector() const {
      return (SimpleTy == MVT::v2i8  || SimpleTy == MVT::v1i16 ||
              SimpleTy == MVT::v16i1 || SimpleTy == MVT::v1f16);
    }

    /// Return true if this is a 32-bit vector type.
    bool is32BitVector() const {
      return (SimpleTy == MVT::v32i1 || SimpleTy == MVT::v4i8   ||
              SimpleTy == MVT::v2i16 || SimpleTy == MVT::v1i32  ||
              SimpleTy == MVT::v2f16 || SimpleTy == MVT::v2bf16 ||
              SimpleTy == MVT::v1f32);
    }

    /// Return true if this is a 64-bit vector type.
    bool is64BitVector() const {
      return (SimpleTy == MVT::v64i1  || SimpleTy == MVT::v8i8  ||
              SimpleTy == MVT::v4i16  || SimpleTy == MVT::v2i32 ||
              SimpleTy == MVT::v1i64  || SimpleTy == MVT::v4f16 ||
              SimpleTy == MVT::v4bf16 ||SimpleTy == MVT::v2f32  ||
              SimpleTy == MVT::v1f64);
    }

    /// Return true if this is a 128-bit vector type.
    bool is128BitVector() const {
      return (SimpleTy == MVT::v128i1 || SimpleTy == MVT::v16i8  ||
              SimpleTy == MVT::v8i16  || SimpleTy == MVT::v4i32  ||
              SimpleTy == MVT::v2i64  || SimpleTy == MVT::v1i128 ||
              SimpleTy == MVT::v8f16  || SimpleTy == MVT::v8bf16 ||
              SimpleTy == MVT::v4f32  || SimpleTy == MVT::v2f64);
    }

    /// Return true if this is a 256-bit vector type.
    bool is256BitVector() const {
      return (SimpleTy == MVT::v16f16 || SimpleTy == MVT::v16bf16 ||
              SimpleTy == MVT::v8f32 || SimpleTy == MVT::v4f64 ||
              SimpleTy == MVT::v32i8 || SimpleTy == MVT::v16i16 ||
              SimpleTy == MVT::v8i32 || SimpleTy == MVT::v4i64 ||
              SimpleTy == MVT::v256i1 || SimpleTy == MVT::v128i2 ||
              SimpleTy == MVT::v64i4);
    }

    /// Return true if this is a 512-bit vector type.
    bool is512BitVector() const {
      return (SimpleTy == MVT::v32f16 || SimpleTy == MVT::v32bf16 ||
              SimpleTy == MVT::v16f32 || SimpleTy == MVT::v8f64 ||
              SimpleTy == MVT::v512i1 || SimpleTy == MVT::v256i2 ||
              SimpleTy == MVT::v128i4 || SimpleTy == MVT::v64i8 ||
              SimpleTy == MVT::v32i16 || SimpleTy == MVT::v16i32 ||
              SimpleTy == MVT::v8i64);
    }

    /// Return true if this is a 1024-bit vector type.
    bool is1024BitVector() const {
      return (SimpleTy == MVT::v1024i1 || SimpleTy == MVT::v128i8 ||
              SimpleTy == MVT::v64i16  || SimpleTy == MVT::v32i32 ||
              SimpleTy == MVT::v16i64  || SimpleTy == MVT::v64f16 ||
              SimpleTy == MVT::v32f32  || SimpleTy == MVT::v16f64 ||
              SimpleTy == MVT::v64bf16);
    }

    /// Return true if this is a 2048-bit vector type.
    bool is2048BitVector() const {
      return (SimpleTy == MVT::v256i8  || SimpleTy == MVT::v128i16 ||
              SimpleTy == MVT::v64i32  || SimpleTy == MVT::v32i64  ||
              SimpleTy == MVT::v128f16 || SimpleTy == MVT::v64f32  ||
              SimpleTy == MVT::v32f64  || SimpleTy == MVT::v128bf16 ||
              SimpleTy == MVT::v2048i1);
    }

    /// Return true if this is an overloaded type for TableGen.
    bool isOverloaded() const {
      return (SimpleTy == MVT::Any || SimpleTy == MVT::iAny ||
              SimpleTy == MVT::fAny || SimpleTy == MVT::vAny ||
              SimpleTy == MVT::iPTRAny);
    }

    /// Return a vector with the same number of elements as this vector, but
    /// with the element type converted to an integer type with the same
    /// bitwidth.
    MVT changeVectorElementTypeToInteger() const {
      MVT EltTy = getVectorElementType();
      MVT IntTy = MVT::getIntegerVT(EltTy.getSizeInBits());
      MVT VecTy = MVT::getVectorVT(IntTy, getVectorElementCount());
      assert(VecTy.SimpleTy != MVT::INVALID_SIMPLE_VALUE_TYPE &&
             "Simple vector VT not representable by simple integer vector VT!");
      return VecTy;
    }

    /// Return a VT for a vector type whose attributes match ourselves
    /// with the exception of the element type that is chosen by the caller.
    MVT changeVectorElementType(MVT EltVT) const {
      MVT VecTy = MVT::getVectorVT(EltVT, getVectorElementCount());
      assert(VecTy.SimpleTy != MVT::INVALID_SIMPLE_VALUE_TYPE &&
             "Simple vector VT not representable by simple integer vector VT!");
      return VecTy;
    }

    /// Return the type converted to an equivalently sized integer or vector
    /// with integer element type. Similar to changeVectorElementTypeToInteger,
    /// but also handles scalars.
    MVT changeTypeToInteger() {
      if (isVector())
        return changeVectorElementTypeToInteger();
      return MVT::getIntegerVT(getSizeInBits());
    }

    /// Return a VT for a vector type with the same element type but
    /// half the number of elements.
    MVT getHalfNumVectorElementsVT() const {
      MVT EltVT = getVectorElementType();
      auto EltCnt = getVectorElementCount();
      assert(EltCnt.isKnownEven() && "Splitting vector, but not in half!");
      return getVectorVT(EltVT, EltCnt.divideCoefficientBy(2));
    }

    /// Returns true if the given vector is a power of 2.
    bool isPow2VectorType() const {
      unsigned NElts = getVectorMinNumElements();
      return !(NElts & (NElts - 1));
    }

    /// Widens the length of the given vector MVT up to the nearest power of 2
    /// and returns that type.
    MVT getPow2VectorType() const {
      if (isPow2VectorType())
        return *this;

      ElementCount NElts = getVectorElementCount();
      unsigned NewMinCount = 1 << Log2_32_Ceil(NElts.getKnownMinValue());
      NElts = ElementCount::get(NewMinCount, NElts.isScalable());
      return MVT::getVectorVT(getVectorElementType(), NElts);
    }

    /// If this is a vector, return the element type, otherwise return this.
    MVT getScalarType() const {
      return isVector() ? getVectorElementType() : *this;
    }

    MVT getVectorElementType() const {
      // clang-format off
      switch (SimpleTy) {
      default:
        llvm_unreachable("Not a vector MVT!");
      case v1i1:
      case v2i1:
      case v4i1:
      case v8i1:
      case v16i1:
      case v32i1:
      case v64i1:
      case v128i1:
      case v256i1:
      case v512i1:
      case v1024i1:
      case v2048i1:
      case nxv1i1:
      case nxv2i1:
      case nxv4i1:
      case nxv8i1:
      case nxv16i1:
      case nxv32i1:
      case nxv64i1: return i1;
      case v128i2:
      case v256i2: return i2;
      case v64i4:
      case v128i4: return i4;
      case v1i8:
      case v2i8:
      case v4i8:
      case v8i8:
      case v16i8:
      case v32i8:
      case v64i8:
      case v128i8:
      case v256i8:
      case v512i8:
      case v1024i8:
      case nxv1i8:
      case nxv2i8:
      case nxv4i8:
      case nxv8i8:
      case nxv16i8:
      case nxv32i8:
      case nxv64i8: return i8;
      case v1i16:
      case v2i16:
      case v3i16:
      case v4i16:
      case v8i16:
      case v16i16:
      case v32i16:
      case v64i16:
      case v128i16:
      case v256i16:
      case v512i16:
      case nxv1i16:
      case nxv2i16:
      case nxv4i16:
      case nxv8i16:
      case nxv16i16:
      case nxv32i16: return i16;
      case v1i32:
      case v2i32:
      case v3i32:
      case v4i32:
      case v5i32:
      case v6i32:
      case v7i32:
      case v8i32:
      case v9i32:
      case v10i32:
      case v11i32:
      case v12i32:
      case v16i32:
      case v32i32:
      case v64i32:
      case v128i32:
      case v256i32:
      case v512i32:
      case v1024i32:
      case v2048i32:
      case nxv1i32:
      case nxv2i32:
      case nxv4i32:
      case nxv8i32:
      case nxv16i32:
      case nxv32i32: return i32;
      case v1i64:
      case v2i64:
      case v3i64:
      case v4i64:
      case v8i64:
      case v16i64:
      case v32i64:
      case v64i64:
      case v128i64:
      case v256i64:
      case nxv1i64:
      case nxv2i64:
      case nxv4i64:
      case nxv8i64:
      case nxv16i64:
      case nxv32i64: return i64;
      case v1i128: return i128;
      case v1f16:
      case v2f16:
      case v3f16:
      case v4f16:
      case v8f16:
      case v16f16:
      case v32f16:
      case v64f16:
      case v128f16:
      case v256f16:
      case v512f16:
      case nxv1f16:
      case nxv2f16:
      case nxv4f16:
      case nxv8f16:
      case nxv16f16:
      case nxv32f16: return f16;
      case v2bf16:
      case v3bf16:
      case v4bf16:
      case v8bf16:
      case v16bf16:
      case v32bf16:
      case v64bf16:
      case v128bf16:
      case nxv1bf16:
      case nxv2bf16:
      case nxv4bf16:
      case nxv8bf16:
      case nxv16bf16:
      case nxv32bf16: return bf16;
      case v1f32:
      case v2f32:
      case v3f32:
      case v4f32:
      case v5f32:
      case v6f32:
      case v7f32:
      case v8f32:
      case v9f32:
      case v10f32:
      case v11f32:
      case v12f32:
      case v16f32:
      case v32f32:
      case v64f32:
      case v128f32:
      case v256f32:
      case v512f32:
      case v1024f32:
      case v2048f32:
      case nxv1f32:
      case nxv2f32:
      case nxv4f32:
      case nxv8f32:
      case nxv16f32: return f32;
      case v1f64:
      case v2f64:
      case v3f64:
      case v4f64:
      case v8f64:
      case v16f64:
      case v32f64:
      case v64f64:
      case v128f64:
      case v256f64:
      case nxv1f64:
      case nxv2f64:
      case nxv4f64:
      case nxv8f64: return f64;
      }
      // clang-format on
    }

    /// Given a vector type, return the minimum number of elements it contains.
    unsigned getVectorMinNumElements() const {
      switch (SimpleTy) {
      default:
        llvm_unreachable("Not a vector MVT!");
      case v2048i1:
      case v2048i32:
      case v2048f32: return 2048;
      case v1024i1:
      case v1024i8:
      case v1024i32:
      case v1024f32: return 1024;
      case v512i1:
      case v512i8:
      case v512i16:
      case v512i32:
      case v512f16:
      case v512f32: return 512;
      case v256i1:
      case v256i2:
      case v256i8:
      case v256i16:
      case v256f16:
      case v256i32:
      case v256i64:
      case v256f32:
      case v256f64: return 256;
      case v128i1:
      case v128i2:
      case v128i4:
      case v128i8:
      case v128i16:
      case v128i32:
      case v128i64:
      case v128f16:
      case v128bf16:
      case v128f32:
      case v128f64: return 128;
      case v64i1:
      case v64i4:
      case v64i8:
      case v64i16:
      case v64i32:
      case v64i64:
      case v64f16:
      case v64bf16:
      case v64f32:
      case v64f64:
      case nxv64i1:
      case nxv64i8: return 64;
      case v32i1:
      case v32i8:
      case v32i16:
      case v32i32:
      case v32i64:
      case v32f16:
      case v32bf16:
      case v32f32:
      case v32f64:
      case nxv32i1:
      case nxv32i8:
      case nxv32i16:
      case nxv32i32:
      case nxv32i64:
      case nxv32f16:
      case nxv32bf16: return 32;
      case v16i1:
      case v16i8:
      case v16i16:
      case v16i32:
      case v16i64:
      case v16f16:
      case v16bf16:
      case v16f32:
      case v16f64:
      case nxv16i1:
      case nxv16i8:
      case nxv16i16:
      case nxv16i32:
      case nxv16i64:
      case nxv16f16:
      case nxv16bf16:
      case nxv16f32: return 16;
      case v12i32:
      case v12f32: return 12;
      case v11i32:
      case v11f32: return 11;
      case v10i32:
      case v10f32: return 10;
      case v9i32:
      case v9f32: return 9;
      case v8i1:
      case v8i8:
      case v8i16:
      case v8i32:
      case v8i64:
      case v8f16:
      case v8bf16:
      case v8f32:
      case v8f64:
      case nxv8i1:
      case nxv8i8:
      case nxv8i16:
      case nxv8i32:
      case nxv8i64:
      case nxv8f16:
      case nxv8bf16:
      case nxv8f32:
      case nxv8f64: return 8;
      case v7i32:
      case v7f32: return 7;
      case v6i32:
      case v6f32: return 6;
      case v5i32:
      case v5f32: return 5;
      case v4i1:
      case v4i8:
      case v4i16:
      case v4i32:
      case v4i64:
      case v4f16:
      case v4bf16:
      case v4f32:
      case v4f64:
      case nxv4i1:
      case nxv4i8:
      case nxv4i16:
      case nxv4i32:
      case nxv4i64:
      case nxv4f16:
      case nxv4bf16:
      case nxv4f32:
      case nxv4f64: return 4;
      case v3i16:
      case v3i32:
      case v3i64:
      case v3f16:
      case v3bf16:
      case v3f32:
      case v3f64: return 3;
      case v2i1:
      case v2i8:
      case v2i16:
      case v2i32:
      case v2i64:
      case v2f16:
      case v2bf16:
      case v2f32:
      case v2f64:
      case nxv2i1:
      case nxv2i8:
      case nxv2i16:
      case nxv2i32:
      case nxv2i64:
      case nxv2f16:
      case nxv2bf16:
      case nxv2f32:
      case nxv2f64: return 2;
      case v1i1:
      case v1i8:
      case v1i16:
      case v1i32:
      case v1i64:
      case v1i128:
      case v1f16:
      case v1f32:
      case v1f64:
      case nxv1i1:
      case nxv1i8:
      case nxv1i16:
      case nxv1i32:
      case nxv1i64:
      case nxv1f16:
      case nxv1bf16:
      case nxv1f32:
      case nxv1f64: return 1;
      }
    }

    ElementCount getVectorElementCount() const {
      return ElementCount::get(getVectorMinNumElements(), isScalableVector());
    }

    unsigned getVectorNumElements() const {
      if (isScalableVector())
        llvm::reportInvalidSizeRequest(
            "Possible incorrect use of MVT::getVectorNumElements() for "
            "scalable vector. Scalable flag may be dropped, use "
            "MVT::getVectorElementCount() instead");
      return getVectorMinNumElements();
    }

    /// Returns the size of the specified MVT in bits.
    ///
    /// If the value type is a scalable vector type, the scalable property will
    /// be set and the runtime size will be a positive integer multiple of the
    /// base size.
    TypeSize getSizeInBits() const {
      switch (SimpleTy) {
      default:
        llvm_unreachable("getSizeInBits called on extended MVT.");
      case Other:
        llvm_unreachable("Value type is non-standard value, Other.");
      case iPTR:
        llvm_unreachable("Value type size is target-dependent. Ask TLI.");
      case iPTRAny:
      case iAny:
      case fAny:
      case vAny:
      case Any:
        llvm_unreachable("Value type is overloaded.");
      case token:
        llvm_unreachable("Token type is a sentinel that cannot be used "
                         "in codegen and has no size");
      case Metadata:
        llvm_unreachable("Value type is metadata.");
      case i1:
      case v1i1: return TypeSize::Fixed(1);
      case nxv1i1: return TypeSize::Scalable(1);
      case i2:
      case v2i1: return TypeSize::Fixed(2);
      case nxv2i1: return TypeSize::Scalable(2);
      case i4:
      case v4i1: return TypeSize::Fixed(4);
      case nxv4i1: return TypeSize::Scalable(4);
      case i8  :
      case v1i8:
      case v8i1: return TypeSize::Fixed(8);
      case nxv1i8:
      case nxv8i1: return TypeSize::Scalable(8);
      case i16 :
      case f16:
      case bf16:
      case v16i1:
      case v2i8:
      case v1i16:
      case v1f16: return TypeSize::Fixed(16);
      case nxv16i1:
      case nxv2i8:
      case nxv1i16:
      case nxv1bf16:
      case nxv1f16: return TypeSize::Scalable(16);
      case f32 :
      case i32 :
      case v32i1:
      case v4i8:
      case v2i16:
      case v2f16:
      case v2bf16:
      case v1f32:
      case v1i32: return TypeSize::Fixed(32);
      case nxv32i1:
      case nxv4i8:
      case nxv2i16:
      case nxv1i32:
      case nxv2f16:
      case nxv2bf16:
      case nxv1f32: return TypeSize::Scalable(32);
      case v3i16:
      case v3f16:
      case v3bf16: return TypeSize::Fixed(48);
      case x86mmx:
      case f64 :
      case i64 :
      case v64i1:
      case v8i8:
      case v4i16:
      case v2i32:
      case v1i64:
      case v4f16:
      case v4bf16:
      case v2f32:
      case v1f64: return TypeSize::Fixed(64);
      case nxv64i1:
      case nxv8i8:
      case nxv4i16:
      case nxv2i32:
      case nxv1i64:
      case nxv4f16:
      case nxv4bf16:
      case nxv2f32:
      case nxv1f64: return TypeSize::Scalable(64);
      case f80 :  return TypeSize::Fixed(80);
      case v3i32:
      case v3f32: return TypeSize::Fixed(96);
      case f128:
      case ppcf128:
      case i128:
      case v128i1:
      case v16i8:
      case v8i16:
      case v4i32:
      case v2i64:
      case v1i128:
      case v8f16:
      case v8bf16:
      case v4f32:
      case v2f64: return TypeSize::Fixed(128);
      case nxv16i8:
      case nxv8i16:
      case nxv4i32:
      case nxv2i64:
      case nxv8f16:
      case nxv8bf16:
      case nxv4f32:
      case nxv2f64: return TypeSize::Scalable(128);
      case v5i32:
      case v5f32: return TypeSize::Fixed(160);
      case v6i32:
      case v3i64:
      case v6f32:
      case v3f64: return TypeSize::Fixed(192);
      case v7i32:
      case v7f32: return TypeSize::Fixed(224);
      case v256i1:
      case v128i2:
      case v64i4:
      case v32i8:
      case v16i16:
      case v8i32:
      case v4i64:
      case v16f16:
      case v16bf16:
      case v8f32:
      case v4f64: return TypeSize::Fixed(256);
      case nxv32i8:
      case nxv16i16:
      case nxv8i32:
      case nxv4i64:
      case nxv16f16:
      case nxv16bf16:
      case nxv8f32:
      case nxv4f64: return TypeSize::Scalable(256);
      case v9i32:
      case v9f32: return TypeSize::Fixed(288);
      case v10i32:
      case v10f32: return TypeSize::Fixed(320);
      case v11i32:
      case v11f32: return TypeSize::Fixed(352);
      case v12i32:
      case v12f32: return TypeSize::Fixed(384);
      case i64x8:
      case v512i1:
      case v256i2:
      case v128i4:
      case v64i8:
      case v32i16:
      case v16i32:
      case v8i64:
      case v32f16:
      case v32bf16:
      case v16f32:
      case v8f64: return TypeSize::Fixed(512);
      case nxv64i8:
      case nxv32i16:
      case nxv16i32:
      case nxv8i64:
      case nxv32f16:
      case nxv32bf16:
      case nxv16f32:
      case nxv8f64: return TypeSize::Scalable(512);
      case v1024i1:
      case v128i8:
      case v64i16:
      case v32i32:
      case v16i64:
      case v64f16:
      case v64bf16:
      case v32f32:
      case v16f64: return TypeSize::Fixed(1024);
      case nxv32i32:
      case nxv16i64: return TypeSize::Scalable(1024);
      case v2048i1:
      case v256i8:
      case v128i16:
      case v64i32:
      case v32i64:
      case v128f16:
      case v128bf16:
      case v64f32:
      case v32f64: return TypeSize::Fixed(2048);
      case nxv32i64: return TypeSize::Scalable(2048);
      case v512i8:
      case v256i16:
      case v128i32:
      case v64i64:
      case v256f16:
      case v128f32:
      case v64f64:  return TypeSize::Fixed(4096);
      case v1024i8:
      case v512i16:
      case v256i32:
      case v128i64:
      case v512f16:
      case v256f32:
      case x86amx:
      case v128f64:  return TypeSize::Fixed(8192);
      case v512i32:
      case v256i64:
      case v512f32:
      case v256f64:  return TypeSize::Fixed(16384);
      case v1024i32:
      case v1024f32:  return TypeSize::Fixed(32768);
      case v2048i32:
      case v2048f32:  return TypeSize::Fixed(65536);
      case funcref:
      case externref: return TypeSize::Fixed(0); // opaque type
      }
    }

    /// Return the size of the specified fixed width value type in bits. The
    /// function will assert if the type is scalable.
    uint64_t getFixedSizeInBits() const {
      return getSizeInBits().getFixedValue();
    }

    uint64_t getScalarSizeInBits() const {
      return getScalarType().getSizeInBits().getFixedValue();
    }

    /// Return the number of bytes overwritten by a store of the specified value
    /// type.
    ///
    /// If the value type is a scalable vector type, the scalable property will
    /// be set and the runtime size will be a positive integer multiple of the
    /// base size.
    TypeSize getStoreSize() const {
      TypeSize BaseSize = getSizeInBits();
      return {(BaseSize.getKnownMinValue() + 7) / 8, BaseSize.isScalable()};
    }

    // Return the number of bytes overwritten by a store of this value type or
    // this value type's element type in the case of a vector.
    uint64_t getScalarStoreSize() const {
      return getScalarType().getStoreSize().getFixedValue();
    }

    /// Return the number of bits overwritten by a store of the specified value
    /// type.
    ///
    /// If the value type is a scalable vector type, the scalable property will
    /// be set and the runtime size will be a positive integer multiple of the
    /// base size.
    TypeSize getStoreSizeInBits() const {
      return getStoreSize() * 8;
    }

    /// Returns true if the number of bits for the type is a multiple of an
    /// 8-bit byte.
    bool isByteSized() const { return getSizeInBits().isKnownMultipleOf(8); }

    /// Return true if we know at compile time this has more bits than VT.
    bool knownBitsGT(MVT VT) const {
      return TypeSize::isKnownGT(getSizeInBits(), VT.getSizeInBits());
    }

    /// Return true if we know at compile time this has more than or the same
    /// bits as VT.
    bool knownBitsGE(MVT VT) const {
      return TypeSize::isKnownGE(getSizeInBits(), VT.getSizeInBits());
    }

    /// Return true if we know at compile time this has fewer bits than VT.
    bool knownBitsLT(MVT VT) const {
      return TypeSize::isKnownLT(getSizeInBits(), VT.getSizeInBits());
    }

    /// Return true if we know at compile time this has fewer than or the same
    /// bits as VT.
    bool knownBitsLE(MVT VT) const {
      return TypeSize::isKnownLE(getSizeInBits(), VT.getSizeInBits());
    }

    /// Return true if this has more bits than VT.
    bool bitsGT(MVT VT) const {
      assert(isScalableVector() == VT.isScalableVector() &&
             "Comparison between scalable and fixed types");
      return knownBitsGT(VT);
    }

    /// Return true if this has no less bits than VT.
    bool bitsGE(MVT VT) const {
      assert(isScalableVector() == VT.isScalableVector() &&
             "Comparison between scalable and fixed types");
      return knownBitsGE(VT);
    }

    /// Return true if this has less bits than VT.
    bool bitsLT(MVT VT) const {
      assert(isScalableVector() == VT.isScalableVector() &&
             "Comparison between scalable and fixed types");
      return knownBitsLT(VT);
    }

    /// Return true if this has no more bits than VT.
    bool bitsLE(MVT VT) const {
      assert(isScalableVector() == VT.isScalableVector() &&
             "Comparison between scalable and fixed types");
      return knownBitsLE(VT);
    }

    static MVT getFloatingPointVT(unsigned BitWidth) {
      switch (BitWidth) {
      default:
        llvm_unreachable("Bad bit width!");
      case 16:
        return MVT::f16;
      case 32:
        return MVT::f32;
      case 64:
        return MVT::f64;
      case 80:
        return MVT::f80;
      case 128:
        return MVT::f128;
      }
    }

    static MVT getIntegerVT(unsigned BitWidth) {
      switch (BitWidth) {
      default:
        return (MVT::SimpleValueType)(MVT::INVALID_SIMPLE_VALUE_TYPE);
      case 1:
        return MVT::i1;
      case 2:
        return MVT::i2;
      case 4:
        return MVT::i4;
      case 8:
        return MVT::i8;
      case 16:
        return MVT::i16;
      case 32:
        return MVT::i32;
      case 64:
        return MVT::i64;
      case 128:
        return MVT::i128;
      }
    }

    static MVT getVectorVT(MVT VT, unsigned NumElements) {
      // clang-format off
      switch (VT.SimpleTy) {
      default:
        break;
      case MVT::i1:
        if (NumElements == 1)    return MVT::v1i1;
        if (NumElements == 2)    return MVT::v2i1;
        if (NumElements == 4)    return MVT::v4i1;
        if (NumElements == 8)    return MVT::v8i1;
        if (NumElements == 16)   return MVT::v16i1;
        if (NumElements == 32)   return MVT::v32i1;
        if (NumElements == 64)   return MVT::v64i1;
        if (NumElements == 128)  return MVT::v128i1;
        if (NumElements == 256)  return MVT::v256i1;
        if (NumElements == 512)  return MVT::v512i1;
        if (NumElements == 1024) return MVT::v1024i1;
        if (NumElements == 2048) return MVT::v2048i1;
        break;
      case MVT::i2:
        if (NumElements == 128) return MVT::v128i2;
        if (NumElements == 256) return MVT::v256i2;
        break;
      case MVT::i4:
        if (NumElements == 64)  return MVT::v64i4;
        if (NumElements == 128) return MVT::v128i4;
        break;
      case MVT::i8:
        if (NumElements == 1)   return MVT::v1i8;
        if (NumElements == 2)   return MVT::v2i8;
        if (NumElements == 4)   return MVT::v4i8;
        if (NumElements == 8)   return MVT::v8i8;
        if (NumElements == 16)  return MVT::v16i8;
        if (NumElements == 32)  return MVT::v32i8;
        if (NumElements == 64)  return MVT::v64i8;
        if (NumElements == 128) return MVT::v128i8;
        if (NumElements == 256) return MVT::v256i8;
        if (NumElements == 512) return MVT::v512i8;
        if (NumElements == 1024) return MVT::v1024i8;
        break;
      case MVT::i16:
        if (NumElements == 1)   return MVT::v1i16;
        if (NumElements == 2)   return MVT::v2i16;
        if (NumElements == 3)   return MVT::v3i16;
        if (NumElements == 4)   return MVT::v4i16;
        if (NumElements == 8)   return MVT::v8i16;
        if (NumElements == 16)  return MVT::v16i16;
        if (NumElements == 32)  return MVT::v32i16;
        if (NumElements == 64)  return MVT::v64i16;
        if (NumElements == 128) return MVT::v128i16;
        if (NumElements == 256) return MVT::v256i16;
        if (NumElements == 512) return MVT::v512i16;
        break;
      case MVT::i32:
        if (NumElements == 1)    return MVT::v1i32;
        if (NumElements == 2)    return MVT::v2i32;
        if (NumElements == 3)    return MVT::v3i32;
        if (NumElements == 4)    return MVT::v4i32;
        if (NumElements == 5)    return MVT::v5i32;
        if (NumElements == 6)    return MVT::v6i32;
        if (NumElements == 7)    return MVT::v7i32;
        if (NumElements == 8)    return MVT::v8i32;
        if (NumElements == 9)    return MVT::v9i32;
        if (NumElements == 10)   return MVT::v10i32;
        if (NumElements == 11)   return MVT::v11i32;
        if (NumElements == 12)   return MVT::v12i32;
        if (NumElements == 16)   return MVT::v16i32;
        if (NumElements == 32)   return MVT::v32i32;
        if (NumElements == 64)   return MVT::v64i32;
        if (NumElements == 128)  return MVT::v128i32;
        if (NumElements == 256)  return MVT::v256i32;
        if (NumElements == 512)  return MVT::v512i32;
        if (NumElements == 1024) return MVT::v1024i32;
        if (NumElements == 2048) return MVT::v2048i32;
        break;
      case MVT::i64:
        if (NumElements == 1)  return MVT::v1i64;
        if (NumElements == 2)  return MVT::v2i64;
        if (NumElements == 3)  return MVT::v3i64;
        if (NumElements == 4)  return MVT::v4i64;
        if (NumElements == 8)  return MVT::v8i64;
        if (NumElements == 16) return MVT::v16i64;
        if (NumElements == 32) return MVT::v32i64;
        if (NumElements == 64) return MVT::v64i64;
        if (NumElements == 128) return MVT::v128i64;
        if (NumElements == 256) return MVT::v256i64;
        break;
      case MVT::i128:
        if (NumElements == 1)  return MVT::v1i128;
        break;
      case MVT::f16:
        if (NumElements == 1)   return MVT::v1f16;
        if (NumElements == 2)   return MVT::v2f16;
        if (NumElements == 3)   return MVT::v3f16;
        if (NumElements == 4)   return MVT::v4f16;
        if (NumElements == 8)   return MVT::v8f16;
        if (NumElements == 16)  return MVT::v16f16;
        if (NumElements == 32)  return MVT::v32f16;
        if (NumElements == 64)  return MVT::v64f16;
        if (NumElements == 128) return MVT::v128f16;
        if (NumElements == 256) return MVT::v256f16;
        if (NumElements == 512) return MVT::v512f16;
        break;
      case MVT::bf16:
        if (NumElements == 2)   return MVT::v2bf16;
        if (NumElements == 3)   return MVT::v3bf16;
        if (NumElements == 4)   return MVT::v4bf16;
        if (NumElements == 8)   return MVT::v8bf16;
        if (NumElements == 16)  return MVT::v16bf16;
        if (NumElements == 32)  return MVT::v32bf16;
        if (NumElements == 64)  return MVT::v64bf16;
        if (NumElements == 128) return MVT::v128bf16;
        break;
      case MVT::f32:
        if (NumElements == 1)    return MVT::v1f32;
        if (NumElements == 2)    return MVT::v2f32;
        if (NumElements == 3)    return MVT::v3f32;
        if (NumElements == 4)    return MVT::v4f32;
        if (NumElements == 5)    return MVT::v5f32;
        if (NumElements == 6)    return MVT::v6f32;
        if (NumElements == 7)    return MVT::v7f32;
        if (NumElements == 8)    return MVT::v8f32;
        if (NumElements == 9)    return MVT::v9f32;
        if (NumElements == 10)   return MVT::v10f32;
        if (NumElements == 11)   return MVT::v11f32;
        if (NumElements == 12)   return MVT::v12f32;
        if (NumElements == 16)   return MVT::v16f32;
        if (NumElements == 32)   return MVT::v32f32;
        if (NumElements == 64)   return MVT::v64f32;
        if (NumElements == 128)  return MVT::v128f32;
        if (NumElements == 256)  return MVT::v256f32;
        if (NumElements == 512)  return MVT::v512f32;
        if (NumElements == 1024) return MVT::v1024f32;
        if (NumElements == 2048) return MVT::v2048f32;
        break;
      case MVT::f64:
        if (NumElements == 1)  return MVT::v1f64;
        if (NumElements == 2)  return MVT::v2f64;
        if (NumElements == 3)  return MVT::v3f64;
        if (NumElements == 4)  return MVT::v4f64;
        if (NumElements == 8)  return MVT::v8f64;
        if (NumElements == 16) return MVT::v16f64;
        if (NumElements == 32) return MVT::v32f64;
        if (NumElements == 64) return MVT::v64f64;
        if (NumElements == 128) return MVT::v128f64;
        if (NumElements == 256) return MVT::v256f64;
        break;
      }
      return (MVT::SimpleValueType)(MVT::INVALID_SIMPLE_VALUE_TYPE);
      // clang-format on
    }

    static MVT getScalableVectorVT(MVT VT, unsigned NumElements) {
      switch(VT.SimpleTy) {
        default:
          break;
        case MVT::i1:
          if (NumElements == 1)  return MVT::nxv1i1;
          if (NumElements == 2)  return MVT::nxv2i1;
          if (NumElements == 4)  return MVT::nxv4i1;
          if (NumElements == 8)  return MVT::nxv8i1;
          if (NumElements == 16) return MVT::nxv16i1;
          if (NumElements == 32) return MVT::nxv32i1;
          if (NumElements == 64) return MVT::nxv64i1;
          break;
        case MVT::i8:
          if (NumElements == 1)  return MVT::nxv1i8;
          if (NumElements == 2)  return MVT::nxv2i8;
          if (NumElements == 4)  return MVT::nxv4i8;
          if (NumElements == 8)  return MVT::nxv8i8;
          if (NumElements == 16) return MVT::nxv16i8;
          if (NumElements == 32) return MVT::nxv32i8;
          if (NumElements == 64) return MVT::nxv64i8;
          break;
        case MVT::i16:
          if (NumElements == 1)  return MVT::nxv1i16;
          if (NumElements == 2)  return MVT::nxv2i16;
          if (NumElements == 4)  return MVT::nxv4i16;
          if (NumElements == 8)  return MVT::nxv8i16;
          if (NumElements == 16) return MVT::nxv16i16;
          if (NumElements == 32) return MVT::nxv32i16;
          break;
        case MVT::i32:
          if (NumElements == 1)  return MVT::nxv1i32;
          if (NumElements == 2)  return MVT::nxv2i32;
          if (NumElements == 4)  return MVT::nxv4i32;
          if (NumElements == 8)  return MVT::nxv8i32;
          if (NumElements == 16) return MVT::nxv16i32;
          if (NumElements == 32) return MVT::nxv32i32;
          break;
        case MVT::i64:
          if (NumElements == 1)  return MVT::nxv1i64;
          if (NumElements == 2)  return MVT::nxv2i64;
          if (NumElements == 4)  return MVT::nxv4i64;
          if (NumElements == 8)  return MVT::nxv8i64;
          if (NumElements == 16) return MVT::nxv16i64;
          if (NumElements == 32) return MVT::nxv32i64;
          break;
        case MVT::f16:
          if (NumElements == 1)  return MVT::nxv1f16;
          if (NumElements == 2)  return MVT::nxv2f16;
          if (NumElements == 4)  return MVT::nxv4f16;
          if (NumElements == 8)  return MVT::nxv8f16;
          if (NumElements == 16)  return MVT::nxv16f16;
          if (NumElements == 32)  return MVT::nxv32f16;
          break;
        case MVT::bf16:
          if (NumElements == 1)  return MVT::nxv1bf16;
          if (NumElements == 2)  return MVT::nxv2bf16;
          if (NumElements == 4)  return MVT::nxv4bf16;
          if (NumElements == 8)  return MVT::nxv8bf16;
          if (NumElements == 16)  return MVT::nxv16bf16;
          if (NumElements == 32)  return MVT::nxv32bf16;
          break;
        case MVT::f32:
          if (NumElements == 1)  return MVT::nxv1f32;
          if (NumElements == 2)  return MVT::nxv2f32;
          if (NumElements == 4)  return MVT::nxv4f32;
          if (NumElements == 8)  return MVT::nxv8f32;
          if (NumElements == 16) return MVT::nxv16f32;
          break;
        case MVT::f64:
          if (NumElements == 1)  return MVT::nxv1f64;
          if (NumElements == 2)  return MVT::nxv2f64;
          if (NumElements == 4)  return MVT::nxv4f64;
          if (NumElements == 8)  return MVT::nxv8f64;
          break;
      }
      return (MVT::SimpleValueType)(MVT::INVALID_SIMPLE_VALUE_TYPE);
    }

    static MVT getVectorVT(MVT VT, unsigned NumElements, bool IsScalable) {
      if (IsScalable)
        return getScalableVectorVT(VT, NumElements);
      return getVectorVT(VT, NumElements);
    }

    static MVT getVectorVT(MVT VT, ElementCount EC) {
      if (EC.isScalable())
        return getScalableVectorVT(VT, EC.getKnownMinValue());
      return getVectorVT(VT, EC.getKnownMinValue());
    }

    /// Return the value type corresponding to the specified type.  This returns
    /// all pointers as iPTR.  If HandleUnknown is true, unknown types are
    /// returned as Other, otherwise they are invalid.
    static MVT getVT(Type *Ty, bool HandleUnknown = false);

  public:
    /// SimpleValueType Iteration
    /// @{
    static auto all_valuetypes() {
      return enum_seq_inclusive(MVT::FIRST_VALUETYPE, MVT::LAST_VALUETYPE,
                                force_iteration_on_noniterable_enum);
    }

    static auto integer_valuetypes() {
      return enum_seq_inclusive(MVT::FIRST_INTEGER_VALUETYPE,
                                MVT::LAST_INTEGER_VALUETYPE,
                                force_iteration_on_noniterable_enum);
    }

    static auto fp_valuetypes() {
      return enum_seq_inclusive(MVT::FIRST_FP_VALUETYPE, MVT::LAST_FP_VALUETYPE,
                                force_iteration_on_noniterable_enum);
    }

    static auto vector_valuetypes() {
      return enum_seq_inclusive(MVT::FIRST_VECTOR_VALUETYPE,
                                MVT::LAST_VECTOR_VALUETYPE,
                                force_iteration_on_noniterable_enum);
    }

    static auto fixedlen_vector_valuetypes() {
      return enum_seq_inclusive(MVT::FIRST_FIXEDLEN_VECTOR_VALUETYPE,
                                MVT::LAST_FIXEDLEN_VECTOR_VALUETYPE,
                                force_iteration_on_noniterable_enum);
    }

    static auto scalable_vector_valuetypes() {
      return enum_seq_inclusive(MVT::FIRST_SCALABLE_VECTOR_VALUETYPE,
                                MVT::LAST_SCALABLE_VECTOR_VALUETYPE,
                                force_iteration_on_noniterable_enum);
    }

    static auto integer_fixedlen_vector_valuetypes() {
      return enum_seq_inclusive(MVT::FIRST_INTEGER_FIXEDLEN_VECTOR_VALUETYPE,
                                MVT::LAST_INTEGER_FIXEDLEN_VECTOR_VALUETYPE,
                                force_iteration_on_noniterable_enum);
    }

    static auto fp_fixedlen_vector_valuetypes() {
      return enum_seq_inclusive(MVT::FIRST_FP_FIXEDLEN_VECTOR_VALUETYPE,
                                MVT::LAST_FP_FIXEDLEN_VECTOR_VALUETYPE,
                                force_iteration_on_noniterable_enum);
    }

    static auto integer_scalable_vector_valuetypes() {
      return enum_seq_inclusive(MVT::FIRST_INTEGER_SCALABLE_VECTOR_VALUETYPE,
                                MVT::LAST_INTEGER_SCALABLE_VECTOR_VALUETYPE,
                                force_iteration_on_noniterable_enum);
    }

    static auto fp_scalable_vector_valuetypes() {
      return enum_seq_inclusive(MVT::FIRST_FP_SCALABLE_VECTOR_VALUETYPE,
                                MVT::LAST_FP_SCALABLE_VECTOR_VALUETYPE,
                                force_iteration_on_noniterable_enum);
    }
    /// @}
  };

} // end namespace llvm

#endif // LLVM_SUPPORT_MACHINEVALUETYPE_H
PKhwFZ[�u�PPSupport/ThreadLocal.hnu�[���//===- llvm/Support/ThreadLocal.h - Thread Local Data ------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the llvm::sys::ThreadLocal class.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_THREADLOCAL_H
#define LLVM_SUPPORT_THREADLOCAL_H

#include "llvm/Support/DataTypes.h"
#include "llvm/Support/Threading.h"
#include <cassert>

namespace llvm {
  namespace sys {
    // ThreadLocalImpl - Common base class of all ThreadLocal instantiations.
    // YOU SHOULD NEVER USE THIS DIRECTLY.
    class ThreadLocalImpl {
      typedef uint64_t ThreadLocalDataTy;
      /// Platform-specific thread local data.
      ///
      /// This is embedded in the class and we avoid malloc'ing/free'ing it,
      /// to make this class more safe for use along with CrashRecoveryContext.
      union {
        char data[sizeof(ThreadLocalDataTy)];
        ThreadLocalDataTy align_data;
      };
    public:
      ThreadLocalImpl();
      virtual ~ThreadLocalImpl();
      void setInstance(const void* d);
      void *getInstance();
      void removeInstance();
    };

    /// ThreadLocal - A class used to abstract thread-local storage.  It holds,
    /// for each thread, a pointer a single object of type T.
    template<class T>
    class ThreadLocal : public ThreadLocalImpl {
    public:
      ThreadLocal() : ThreadLocalImpl() { }

      /// get - Fetches a pointer to the object associated with the current
      /// thread.  If no object has yet been associated, it returns NULL;
      T* get() { return static_cast<T*>(getInstance()); }

      // set - Associates a pointer to an object with the current thread.
      void set(T* d) { setInstance(d); }

      // erase - Removes the pointer associated with the current thread.
      void erase() { removeInstance(); }
    };
  }
}

#endif
PKhwFZ���L�LSupport/Compiler.hnu�[���//===-- llvm/Support/Compiler.h - Compiler abstraction support --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines several macros, based on the current compiler.  This allows
// use of compiler-specific features in a way that remains portable. This header
// can be included from either C or C++.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_COMPILER_H
#define LLVM_SUPPORT_COMPILER_H

#include "llvm/Config/llvm-config.h"

#include <stddef.h>

#if defined(_MSC_VER)
#include <sal.h>
#endif

#ifndef __has_feature
# define __has_feature(x) 0
#endif

#ifndef __has_extension
# define __has_extension(x) 0
#endif

#ifndef __has_attribute
# define __has_attribute(x) 0
#endif

#ifndef __has_builtin
# define __has_builtin(x) 0
#endif

#ifndef __has_include
# define __has_include(x) 0
#endif

// Only use __has_cpp_attribute in C++ mode. GCC defines __has_cpp_attribute in
// C mode, but the :: in __has_cpp_attribute(scoped::attribute) is invalid.
#ifndef LLVM_HAS_CPP_ATTRIBUTE
#if defined(__cplusplus) && defined(__has_cpp_attribute)
# define LLVM_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x)
#else
# define LLVM_HAS_CPP_ATTRIBUTE(x) 0
#endif
#endif

/// \macro LLVM_GNUC_PREREQ
/// Extend the default __GNUC_PREREQ even if glibc's features.h isn't
/// available.
#ifndef LLVM_GNUC_PREREQ
# if defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__)
#  define LLVM_GNUC_PREREQ(maj, min, patch) \
    ((__GNUC__ << 20) + (__GNUC_MINOR__ << 10) + __GNUC_PATCHLEVEL__ >= \
     ((maj) << 20) + ((min) << 10) + (patch))
# elif defined(__GNUC__) && defined(__GNUC_MINOR__)
#  define LLVM_GNUC_PREREQ(maj, min, patch) \
    ((__GNUC__ << 20) + (__GNUC_MINOR__ << 10) >= ((maj) << 20) + ((min) << 10))
# else
#  define LLVM_GNUC_PREREQ(maj, min, patch) 0
# endif
#endif

/// \macro LLVM_MSC_PREREQ
/// Is the compiler MSVC of at least the specified version?
/// The common \param version values to check for are:
/// * 1910: VS2017, version 15.1 & 15.2
/// * 1911: VS2017, version 15.3 & 15.4
/// * 1912: VS2017, version 15.5
/// * 1913: VS2017, version 15.6
/// * 1914: VS2017, version 15.7
/// * 1915: VS2017, version 15.8
/// * 1916: VS2017, version 15.9
/// * 1920: VS2019, version 16.0
/// * 1921: VS2019, version 16.1
/// * 1922: VS2019, version 16.2
/// * 1923: VS2019, version 16.3
/// * 1924: VS2019, version 16.4
/// * 1925: VS2019, version 16.5
/// * 1926: VS2019, version 16.6
/// * 1927: VS2019, version 16.7
/// * 1928: VS2019, version 16.8 + 16.9
/// * 1929: VS2019, version 16.10 + 16.11
/// * 1930: VS2022, version 17.0
#ifdef _MSC_VER
#define LLVM_MSC_PREREQ(version) (_MSC_VER >= (version))

// We require at least VS 2019.
#if !defined(LLVM_FORCE_USE_OLD_TOOLCHAIN)
#if !LLVM_MSC_PREREQ(1920)
#error LLVM requires at least VS 2019.
#endif
#endif

#else
#define LLVM_MSC_PREREQ(version) 0
#endif

/// LLVM_LIBRARY_VISIBILITY - If a class marked with this attribute is linked
/// into a shared library, then the class should be private to the library and
/// not accessible from outside it.  Can also be used to mark variables and
/// functions, making them private to any shared library they are linked into.
/// On PE/COFF targets, library visibility is the default, so this isn't needed.
///
/// LLVM_EXTERNAL_VISIBILITY - classes, functions, and variables marked with
/// this attribute will be made public and visible outside of any shared library
/// they are linked in to.

#if LLVM_HAS_CPP_ATTRIBUTE(gnu::visibility)
#define LLVM_ATTRIBUTE_VISIBILITY_HIDDEN [[gnu::visibility("hidden")]]
#define LLVM_ATTRIBUTE_VISIBILITY_DEFAULT [[gnu::visibility("default")]]
#elif __has_attribute(visibility)
#define LLVM_ATTRIBUTE_VISIBILITY_HIDDEN __attribute__((visibility("hidden")))
#define LLVM_ATTRIBUTE_VISIBILITY_DEFAULT __attribute__((visibility("default")))
#else
#define LLVM_ATTRIBUTE_VISIBILITY_HIDDEN
#define LLVM_ATTRIBUTE_VISIBILITY_DEFAULT
#endif


#if (!(defined(_WIN32) || defined(__CYGWIN__)) ||                              \
     (defined(__MINGW32__) && defined(__clang__)))
#define LLVM_LIBRARY_VISIBILITY LLVM_ATTRIBUTE_VISIBILITY_HIDDEN
#if defined(LLVM_BUILD_LLVM_DYLIB) || defined(LLVM_BUILD_SHARED_LIBS)
#define LLVM_EXTERNAL_VISIBILITY LLVM_ATTRIBUTE_VISIBILITY_DEFAULT
#else
#define LLVM_EXTERNAL_VISIBILITY
#endif
#else
#define LLVM_LIBRARY_VISIBILITY
#define LLVM_EXTERNAL_VISIBILITY
#endif

#if defined(__GNUC__)
#define LLVM_PREFETCH(addr, rw, locality) __builtin_prefetch(addr, rw, locality)
#else
#define LLVM_PREFETCH(addr, rw, locality)
#endif

#if __has_attribute(used)
#define LLVM_ATTRIBUTE_USED __attribute__((__used__))
#else
#define LLVM_ATTRIBUTE_USED
#endif

#if defined(__clang__)
#define LLVM_DEPRECATED(MSG, FIX) __attribute__((deprecated(MSG, FIX)))
#else
#define LLVM_DEPRECATED(MSG, FIX) [[deprecated(MSG)]]
#endif

// Indicate that a non-static, non-const C++ member function reinitializes
// the entire object to a known state, independent of the previous state of
// the object.
//
// The clang-tidy check bugprone-use-after-move recognizes this attribute as a
// marker that a moved-from object has left the indeterminate state and can be
// reused.
#if LLVM_HAS_CPP_ATTRIBUTE(clang::reinitializes)
#define LLVM_ATTRIBUTE_REINITIALIZES [[clang::reinitializes]]
#else
#define LLVM_ATTRIBUTE_REINITIALIZES
#endif

// Some compilers warn about unused functions. When a function is sometimes
// used or not depending on build settings (e.g. a function only called from
// within "assert"), this attribute can be used to suppress such warnings.
//
// However, it shouldn't be used for unused *variables*, as those have a much
// more portable solution:
//   (void)unused_var_name;
// Prefer cast-to-void wherever it is sufficient.
#if __has_attribute(unused)
#define LLVM_ATTRIBUTE_UNUSED __attribute__((__unused__))
#else
#define LLVM_ATTRIBUTE_UNUSED
#endif

// FIXME: Provide this for PE/COFF targets.
#if __has_attribute(weak) && !defined(__MINGW32__) && !defined(__CYGWIN__) &&  \
    !defined(_WIN32)
#define LLVM_ATTRIBUTE_WEAK __attribute__((__weak__))
#else
#define LLVM_ATTRIBUTE_WEAK
#endif

// Prior to clang 3.2, clang did not accept any spelling of
// __has_attribute(const), so assume it is supported.
#if defined(__clang__) || defined(__GNUC__)
// aka 'CONST' but following LLVM Conventions.
#define LLVM_READNONE __attribute__((__const__))
#else
#define LLVM_READNONE
#endif

#if __has_attribute(pure) || defined(__GNUC__)
// aka 'PURE' but following LLVM Conventions.
#define LLVM_READONLY __attribute__((__pure__))
#else
#define LLVM_READONLY
#endif

#if __has_attribute(minsize)
#define LLVM_ATTRIBUTE_MINSIZE __attribute__((minsize))
#else
#define LLVM_ATTRIBUTE_MINSIZE
#endif

#if __has_builtin(__builtin_expect) || defined(__GNUC__)
#define LLVM_LIKELY(EXPR) __builtin_expect((bool)(EXPR), true)
#define LLVM_UNLIKELY(EXPR) __builtin_expect((bool)(EXPR), false)
#else
#define LLVM_LIKELY(EXPR) (EXPR)
#define LLVM_UNLIKELY(EXPR) (EXPR)
#endif

/// LLVM_ATTRIBUTE_NOINLINE - On compilers where we have a directive to do so,
/// mark a method "not for inlining".
#if __has_attribute(noinline)
#define LLVM_ATTRIBUTE_NOINLINE __attribute__((noinline))
#elif defined(_MSC_VER)
#define LLVM_ATTRIBUTE_NOINLINE __declspec(noinline)
#else
#define LLVM_ATTRIBUTE_NOINLINE
#endif

/// LLVM_ATTRIBUTE_ALWAYS_INLINE - On compilers where we have a directive to do
/// so, mark a method "always inline" because it is performance sensitive.
#if __has_attribute(always_inline)
#define LLVM_ATTRIBUTE_ALWAYS_INLINE inline __attribute__((always_inline))
#elif defined(_MSC_VER)
#define LLVM_ATTRIBUTE_ALWAYS_INLINE __forceinline
#else
#define LLVM_ATTRIBUTE_ALWAYS_INLINE inline
#endif

/// LLVM_ATTRIBUTE_NO_DEBUG - On compilers where we have a directive to do
/// so, mark a method "no debug" because debug info makes the debugger
/// experience worse.
#if __has_attribute(nodebug)
#define LLVM_ATTRIBUTE_NODEBUG __attribute__((nodebug))
#else
#define LLVM_ATTRIBUTE_NODEBUG
#endif

#if __has_attribute(returns_nonnull)
#define LLVM_ATTRIBUTE_RETURNS_NONNULL __attribute__((returns_nonnull))
#elif defined(_MSC_VER)
#define LLVM_ATTRIBUTE_RETURNS_NONNULL _Ret_notnull_
#else
#define LLVM_ATTRIBUTE_RETURNS_NONNULL
#endif

/// \macro LLVM_ATTRIBUTE_RETURNS_NOALIAS Used to mark a function as returning a
/// pointer that does not alias any other valid pointer.
#ifdef __GNUC__
#define LLVM_ATTRIBUTE_RETURNS_NOALIAS __attribute__((__malloc__))
#elif defined(_MSC_VER)
#define LLVM_ATTRIBUTE_RETURNS_NOALIAS __declspec(restrict)
#else
#define LLVM_ATTRIBUTE_RETURNS_NOALIAS
#endif

/// LLVM_FALLTHROUGH - Mark fallthrough cases in switch statements.
#if defined(__cplusplus) && __cplusplus > 201402L && LLVM_HAS_CPP_ATTRIBUTE(fallthrough)
#define LLVM_FALLTHROUGH [[fallthrough]]
#elif LLVM_HAS_CPP_ATTRIBUTE(gnu::fallthrough)
#define LLVM_FALLTHROUGH [[gnu::fallthrough]]
#elif __has_attribute(fallthrough)
#define LLVM_FALLTHROUGH __attribute__((fallthrough))
#elif LLVM_HAS_CPP_ATTRIBUTE(clang::fallthrough)
#define LLVM_FALLTHROUGH [[clang::fallthrough]]
#else
#define LLVM_FALLTHROUGH
#endif

/// LLVM_REQUIRE_CONSTANT_INITIALIZATION - Apply this to globals to ensure that
/// they are constant initialized.
#if LLVM_HAS_CPP_ATTRIBUTE(clang::require_constant_initialization)
#define LLVM_REQUIRE_CONSTANT_INITIALIZATION                                   \
  [[clang::require_constant_initialization]]
#else
#define LLVM_REQUIRE_CONSTANT_INITIALIZATION
#endif

/// LLVM_GSL_OWNER - Apply this to owning classes like SmallVector to enable
/// lifetime warnings.
#if LLVM_HAS_CPP_ATTRIBUTE(gsl::Owner)
#define LLVM_GSL_OWNER [[gsl::Owner]]
#else
#define LLVM_GSL_OWNER
#endif

/// LLVM_GSL_POINTER - Apply this to non-owning classes like
/// StringRef to enable lifetime warnings.
#if LLVM_HAS_CPP_ATTRIBUTE(gsl::Pointer)
#define LLVM_GSL_POINTER [[gsl::Pointer]]
#else
#define LLVM_GSL_POINTER
#endif

/// LLVM_EXTENSION - Support compilers where we have a keyword to suppress
/// pedantic diagnostics.
#ifdef __GNUC__
#define LLVM_EXTENSION __extension__
#else
#define LLVM_EXTENSION
#endif

/// LLVM_BUILTIN_UNREACHABLE - On compilers which support it, expands
/// to an expression which states that it is undefined behavior for the
/// compiler to reach this point.  Otherwise is not defined.
///
/// '#else' is intentionally left out so that other macro logic (e.g.,
/// LLVM_ASSUME_ALIGNED and llvm_unreachable()) can detect whether
/// LLVM_BUILTIN_UNREACHABLE has a definition.
#if __has_builtin(__builtin_unreachable) || defined(__GNUC__)
# define LLVM_BUILTIN_UNREACHABLE __builtin_unreachable()
#elif defined(_MSC_VER)
# define LLVM_BUILTIN_UNREACHABLE __assume(false)
#endif

/// LLVM_BUILTIN_TRAP - On compilers which support it, expands to an expression
/// which causes the program to exit abnormally.
#if __has_builtin(__builtin_trap) || defined(__GNUC__)
# define LLVM_BUILTIN_TRAP __builtin_trap()
#elif defined(_MSC_VER)
// The __debugbreak intrinsic is supported by MSVC, does not require forward
// declarations involving platform-specific typedefs (unlike RaiseException),
// results in a call to vectored exception handlers, and encodes to a short
// instruction that still causes the trapping behavior we want.
# define LLVM_BUILTIN_TRAP __debugbreak()
#else
# define LLVM_BUILTIN_TRAP *(volatile int*)0x11 = 0
#endif

/// LLVM_BUILTIN_DEBUGTRAP - On compilers which support it, expands to
/// an expression which causes the program to break while running
/// under a debugger.
#if __has_builtin(__builtin_debugtrap)
# define LLVM_BUILTIN_DEBUGTRAP __builtin_debugtrap()
#elif defined(_MSC_VER)
// The __debugbreak intrinsic is supported by MSVC and breaks while
// running under the debugger, and also supports invoking a debugger
// when the OS is configured appropriately.
# define LLVM_BUILTIN_DEBUGTRAP __debugbreak()
#else
// Just continue execution when built with compilers that have no
// support. This is a debugging aid and not intended to force the
// program to abort if encountered.
# define LLVM_BUILTIN_DEBUGTRAP
#endif

/// \macro LLVM_ASSUME_ALIGNED
/// Returns a pointer with an assumed alignment.
#if __has_builtin(__builtin_assume_aligned) || defined(__GNUC__)
# define LLVM_ASSUME_ALIGNED(p, a) __builtin_assume_aligned(p, a)
#elif defined(LLVM_BUILTIN_UNREACHABLE)
# define LLVM_ASSUME_ALIGNED(p, a) \
           (((uintptr_t(p) % (a)) == 0) ? (p) : (LLVM_BUILTIN_UNREACHABLE, (p)))
#else
# define LLVM_ASSUME_ALIGNED(p, a) (p)
#endif

/// \macro LLVM_PACKED
/// Used to specify a packed structure.
/// LLVM_PACKED(
///    struct A {
///      int i;
///      int j;
///      int k;
///      long long l;
///   });
///
/// LLVM_PACKED_START
/// struct B {
///   int i;
///   int j;
///   int k;
///   long long l;
/// };
/// LLVM_PACKED_END
#ifdef _MSC_VER
# define LLVM_PACKED(d) __pragma(pack(push, 1)) d __pragma(pack(pop))
# define LLVM_PACKED_START __pragma(pack(push, 1))
# define LLVM_PACKED_END   __pragma(pack(pop))
#else
# define LLVM_PACKED(d) d __attribute__((packed))
# define LLVM_PACKED_START _Pragma("pack(push, 1)")
# define LLVM_PACKED_END   _Pragma("pack(pop)")
#endif

/// \macro LLVM_MEMORY_SANITIZER_BUILD
/// Whether LLVM itself is built with MemorySanitizer instrumentation.
#if __has_feature(memory_sanitizer)
# define LLVM_MEMORY_SANITIZER_BUILD 1
# include <sanitizer/msan_interface.h>
# define LLVM_NO_SANITIZE_MEMORY_ATTRIBUTE __attribute__((no_sanitize_memory))
#else
# define LLVM_MEMORY_SANITIZER_BUILD 0
# define __msan_allocated_memory(p, size)
# define __msan_unpoison(p, size)
# define LLVM_NO_SANITIZE_MEMORY_ATTRIBUTE
#endif

/// \macro LLVM_ADDRESS_SANITIZER_BUILD
/// Whether LLVM itself is built with AddressSanitizer instrumentation.
#if __has_feature(address_sanitizer) || defined(__SANITIZE_ADDRESS__)
# define LLVM_ADDRESS_SANITIZER_BUILD 1
#if __has_include(<sanitizer/asan_interface.h>)
# include <sanitizer/asan_interface.h>
#else
// These declarations exist to support ASan with MSVC. If MSVC eventually ships
// asan_interface.h in their headers, then we can remove this.
#ifdef __cplusplus
extern "C" {
#endif
void __asan_poison_memory_region(void const volatile *addr, size_t size);
void __asan_unpoison_memory_region(void const volatile *addr, size_t size);
#ifdef __cplusplus
} // extern "C"
#endif
#endif
#else
# define LLVM_ADDRESS_SANITIZER_BUILD 0
# define __asan_poison_memory_region(p, size)
# define __asan_unpoison_memory_region(p, size)
#endif

/// \macro LLVM_HWADDRESS_SANITIZER_BUILD
/// Whether LLVM itself is built with HWAddressSanitizer instrumentation.
#if __has_feature(hwaddress_sanitizer)
#define LLVM_HWADDRESS_SANITIZER_BUILD 1
#else
#define LLVM_HWADDRESS_SANITIZER_BUILD 0
#endif

/// \macro LLVM_THREAD_SANITIZER_BUILD
/// Whether LLVM itself is built with ThreadSanitizer instrumentation.
#if __has_feature(thread_sanitizer) || defined(__SANITIZE_THREAD__)
# define LLVM_THREAD_SANITIZER_BUILD 1
#else
# define LLVM_THREAD_SANITIZER_BUILD 0
#endif

#if LLVM_THREAD_SANITIZER_BUILD
// Thread Sanitizer is a tool that finds races in code.
// See http://code.google.com/p/data-race-test/wiki/DynamicAnnotations .
// tsan detects these exact functions by name.
#ifdef __cplusplus
extern "C" {
#endif
void AnnotateHappensAfter(const char *file, int line, const volatile void *cv);
void AnnotateHappensBefore(const char *file, int line, const volatile void *cv);
void AnnotateIgnoreWritesBegin(const char *file, int line);
void AnnotateIgnoreWritesEnd(const char *file, int line);
#ifdef __cplusplus
}
#endif

// This marker is used to define a happens-before arc. The race detector will
// infer an arc from the begin to the end when they share the same pointer
// argument.
# define TsanHappensBefore(cv) AnnotateHappensBefore(__FILE__, __LINE__, cv)

// This marker defines the destination of a happens-before arc.
# define TsanHappensAfter(cv) AnnotateHappensAfter(__FILE__, __LINE__, cv)

// Ignore any races on writes between here and the next TsanIgnoreWritesEnd.
# define TsanIgnoreWritesBegin() AnnotateIgnoreWritesBegin(__FILE__, __LINE__)

// Resume checking for racy writes.
# define TsanIgnoreWritesEnd() AnnotateIgnoreWritesEnd(__FILE__, __LINE__)
#else
# define TsanHappensBefore(cv)
# define TsanHappensAfter(cv)
# define TsanIgnoreWritesBegin()
# define TsanIgnoreWritesEnd()
#endif

/// \macro LLVM_NO_SANITIZE
/// Disable a particular sanitizer for a function.
#if __has_attribute(no_sanitize)
#define LLVM_NO_SANITIZE(KIND) __attribute__((no_sanitize(KIND)))
#else
#define LLVM_NO_SANITIZE(KIND)
#endif

/// Mark debug helper function definitions like dump() that should not be
/// stripped from debug builds.
/// Note that you should also surround dump() functions with
/// `#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)` so they do always
/// get stripped in release builds.
// FIXME: Move this to a private config.h as it's not usable in public headers.
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
#define LLVM_DUMP_METHOD LLVM_ATTRIBUTE_NOINLINE LLVM_ATTRIBUTE_USED
#else
#define LLVM_DUMP_METHOD LLVM_ATTRIBUTE_NOINLINE
#endif

/// \macro LLVM_PRETTY_FUNCTION
/// Gets a user-friendly looking function signature for the current scope
/// using the best available method on each platform.  The exact format of the
/// resulting string is implementation specific and non-portable, so this should
/// only be used, for example, for logging or diagnostics.
#if defined(_MSC_VER)
#define LLVM_PRETTY_FUNCTION __FUNCSIG__
#elif defined(__GNUC__) || defined(__clang__)
#define LLVM_PRETTY_FUNCTION __PRETTY_FUNCTION__
#else
#define LLVM_PRETTY_FUNCTION __func__
#endif

/// \macro LLVM_THREAD_LOCAL
/// A thread-local storage specifier which can be used with globals,
/// extern globals, and static globals.
///
/// This is essentially an extremely restricted analog to C++11's thread_local
/// support. It uses thread_local if available, falling back on gcc __thread
/// if not. __thread doesn't support many of the C++11 thread_local's
/// features. You should only use this for PODs that you can statically
/// initialize to some constant value. In almost all circumstances this is most
/// appropriate for use with a pointer, integer, or small aggregation of
/// pointers and integers.
#if LLVM_ENABLE_THREADS
#if __has_feature(cxx_thread_local) || defined(_MSC_VER)
#define LLVM_THREAD_LOCAL thread_local
#else
// Clang, GCC, and other compatible compilers used __thread prior to C++11 and
// we only need the restricted functionality that provides.
#define LLVM_THREAD_LOCAL __thread
#endif
#else // !LLVM_ENABLE_THREADS
// If threading is disabled entirely, this compiles to nothing and you get
// a normal global variable.
#define LLVM_THREAD_LOCAL
#endif

/// \macro LLVM_ENABLE_EXCEPTIONS
/// Whether LLVM is built with exception support.
#if __has_feature(cxx_exceptions)
#define LLVM_ENABLE_EXCEPTIONS 1
#elif defined(__GNUC__) && defined(__EXCEPTIONS)
#define LLVM_ENABLE_EXCEPTIONS 1
#elif defined(_MSC_VER) && defined(_CPPUNWIND)
#define LLVM_ENABLE_EXCEPTIONS 1
#endif

/// \macro LLVM_NO_PROFILE_INSTRUMENT_FUNCTION
/// Disable the profile instrument for a function.
#if __has_attribute(no_profile_instrument_function)
#define LLVM_NO_PROFILE_INSTRUMENT_FUNCTION                                    \
  __attribute__((no_profile_instrument_function))
#else
#define LLVM_NO_PROFILE_INSTRUMENT_FUNCTION
#endif

#endif
PKhwFZ��X��Support/type_traits.hnu�[���//===- llvm/Support/type_traits.h - Simplfied type traits -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file provides useful additions to the standard type_traits library.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_TYPE_TRAITS_H
#define LLVM_SUPPORT_TYPE_TRAITS_H

#include "llvm/Support/Compiler.h"
#include <type_traits>
#include <utility>

namespace llvm {


/// Metafunction that determines whether the given type is either an
/// integral type or an enumeration type, including enum classes.
///
/// Note that this accepts potentially more integral types than is_integral
/// because it is based on being implicitly convertible to an integral type.
/// Also note that enum classes aren't implicitly convertible to integral types,
/// the value may therefore need to be explicitly converted before being used.
template <typename T> class is_integral_or_enum {
  using UnderlyingT = std::remove_reference_t<T>;

public:
  static const bool value =
      !std::is_class_v<UnderlyingT> && // Filter conversion operators.
      !std::is_pointer_v<UnderlyingT> &&
      !std::is_floating_point_v<UnderlyingT> &&
      (std::is_enum_v<UnderlyingT> ||
       std::is_convertible_v<UnderlyingT, unsigned long long>);
};

/// If T is a pointer, just return it. If it is not, return T&.
template<typename T, typename Enable = void>
struct add_lvalue_reference_if_not_pointer { using type = T &; };

template <typename T>
struct add_lvalue_reference_if_not_pointer<
    T, std::enable_if_t<std::is_pointer_v<T>>> {
  using type = T;
};

/// If T is a pointer to X, return a pointer to const X. If it is not,
/// return const T.
template<typename T, typename Enable = void>
struct add_const_past_pointer { using type = const T; };

template <typename T>
struct add_const_past_pointer<T, std::enable_if_t<std::is_pointer_v<T>>> {
  using type = const std::remove_pointer_t<T> *;
};

template <typename T, typename Enable = void>
struct const_pointer_or_const_ref {
  using type = const T &;
};
template <typename T>
struct const_pointer_or_const_ref<T, std::enable_if_t<std::is_pointer_v<T>>> {
  using type = typename add_const_past_pointer<T>::type;
};

namespace detail {
template<class T>
union trivial_helper {
    T t;
};

} // end namespace detail

template <typename T>
struct is_copy_assignable {
  template<class F>
    static auto get(F*) -> decltype(std::declval<F &>() = std::declval<const F &>(), std::true_type{});
    static std::false_type get(...);
    static constexpr bool value = decltype(get((T*)nullptr))::value;
};

template <typename T>
struct is_move_assignable {
  template<class F>
    static auto get(F*) -> decltype(std::declval<F &>() = std::declval<F &&>(), std::true_type{});
    static std::false_type get(...);
    static constexpr bool value = decltype(get((T*)nullptr))::value;
};

} // end namespace llvm

#endif // LLVM_SUPPORT_TYPE_TRAITS_H
PKhwFZ(�1P��Support/MipsABIFlags.hnu�[���//===--- MipsABIFlags.h - MIPS ABI flags ----------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the constants for the ABI flags structure contained
// in the .MIPS.abiflags section.
//
// https://dmz-portal.mips.com/wiki/MIPS_O32_ABI_-_FR0_and_FR1_Interlinking
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_MIPSABIFLAGS_H
#define LLVM_SUPPORT_MIPSABIFLAGS_H

namespace llvm {
namespace Mips {

// Values for the xxx_size bytes of an ABI flags structure.
enum AFL_REG {
  AFL_REG_NONE = 0x00, // No registers
  AFL_REG_32 = 0x01,   // 32-bit registers
  AFL_REG_64 = 0x02,   // 64-bit registers
  AFL_REG_128 = 0x03   // 128-bit registers
};

// Masks for the ases word of an ABI flags structure.
enum AFL_ASE {
  AFL_ASE_DSP = 0x00000001,       // DSP ASE
  AFL_ASE_DSPR2 = 0x00000002,     // DSP R2 ASE
  AFL_ASE_EVA = 0x00000004,       // Enhanced VA Scheme
  AFL_ASE_MCU = 0x00000008,       // MCU (MicroController) ASE
  AFL_ASE_MDMX = 0x00000010,      // MDMX ASE
  AFL_ASE_MIPS3D = 0x00000020,    // MIPS-3D ASE
  AFL_ASE_MT = 0x00000040,        // MT ASE
  AFL_ASE_SMARTMIPS = 0x00000080, // SmartMIPS ASE
  AFL_ASE_VIRT = 0x00000100,      // VZ ASE
  AFL_ASE_MSA = 0x00000200,       // MSA ASE
  AFL_ASE_MIPS16 = 0x00000400,    // MIPS16 ASE
  AFL_ASE_MICROMIPS = 0x00000800, // MICROMIPS ASE
  AFL_ASE_XPA = 0x00001000,       // XPA ASE
  AFL_ASE_CRC = 0x00008000,       // CRC ASE
  AFL_ASE_GINV = 0x00020000       // GINV ASE
};

// Values for the isa_ext word of an ABI flags structure.
enum AFL_EXT {
  AFL_EXT_NONE = 0,         // None
  AFL_EXT_XLR = 1,          // RMI Xlr instruction
  AFL_EXT_OCTEON2 = 2,      // Cavium Networks Octeon2
  AFL_EXT_OCTEONP = 3,      // Cavium Networks OcteonP
  AFL_EXT_LOONGSON_3A = 4,  // Loongson 3A
  AFL_EXT_OCTEON = 5,       // Cavium Networks Octeon
  AFL_EXT_5900 = 6,         // MIPS R5900 instruction
  AFL_EXT_4650 = 7,         // MIPS R4650 instruction
  AFL_EXT_4010 = 8,         // LSI R4010 instruction
  AFL_EXT_4100 = 9,         // NEC VR4100 instruction
  AFL_EXT_3900 = 10,        // Toshiba R3900 instruction
  AFL_EXT_10000 = 11,       // MIPS R10000 instruction
  AFL_EXT_SB1 = 12,         // Broadcom SB-1 instruction
  AFL_EXT_4111 = 13,        // NEC VR4111/VR4181 instruction
  AFL_EXT_4120 = 14,        // NEC VR4120 instruction
  AFL_EXT_5400 = 15,        // NEC VR5400 instruction
  AFL_EXT_5500 = 16,        // NEC VR5500 instruction
  AFL_EXT_LOONGSON_2E = 17, // ST Microelectronics Loongson 2E
  AFL_EXT_LOONGSON_2F = 18, // ST Microelectronics Loongson 2F
  AFL_EXT_OCTEON3 = 19      // Cavium Networks Octeon3
};

// Values for the flags1 word of an ABI flags structure.
enum AFL_FLAGS1 { AFL_FLAGS1_ODDSPREG = 1 };

// MIPS object attribute tags
enum {
  Tag_GNU_MIPS_ABI_FP = 4,  // Floating-point ABI used by this object file
  Tag_GNU_MIPS_ABI_MSA = 8, // MSA ABI used by this object file
};

// Values for the fp_abi word of an ABI flags structure
// and for the Tag_GNU_MIPS_ABI_FP attribute tag.
enum Val_GNU_MIPS_ABI_FP {
  Val_GNU_MIPS_ABI_FP_ANY = 0,    // not tagged
  Val_GNU_MIPS_ABI_FP_DOUBLE = 1, // hard float / -mdouble-float
  Val_GNU_MIPS_ABI_FP_SINGLE = 2, // hard float / -msingle-float
  Val_GNU_MIPS_ABI_FP_SOFT = 3,   // soft float
  Val_GNU_MIPS_ABI_FP_OLD_64 = 4, // -mips32r2 -mfp64
  Val_GNU_MIPS_ABI_FP_XX = 5,     // -mfpxx
  Val_GNU_MIPS_ABI_FP_64 = 6,     // -mips32r2 -mfp64
  Val_GNU_MIPS_ABI_FP_64A = 7     // -mips32r2 -mfp64 -mno-odd-spreg
};

// Values for the Tag_GNU_MIPS_ABI_MSA attribute tag.
enum Val_GNU_MIPS_ABI_MSA {
  Val_GNU_MIPS_ABI_MSA_ANY = 0, // not tagged
  Val_GNU_MIPS_ABI_MSA_128 = 1  // 128-bit MSA
};
}
}

#endif
PKhwFZ䮦��W�WSupport/OnDiskHashTable.hnu�[���//===--- OnDiskHashTable.h - On-Disk Hash Table Implementation --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// Defines facilities for reading and writing on-disk hash tables.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_SUPPORT_ONDISKHASHTABLE_H
#define LLVM_SUPPORT_ONDISKHASHTABLE_H

#include "llvm/Support/Alignment.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/EndianStream.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include <cassert>
#include <cstdlib>

namespace llvm {

/// Generates an on disk hash table.
///
/// This needs an \c Info that handles storing values into the hash table's
/// payload and computes the hash for a given key. This should provide the
/// following interface:
///
/// \code
/// class ExampleInfo {
/// public:
///   typedef ExampleKey key_type;   // Must be copy constructible
///   typedef ExampleKey &key_type_ref;
///   typedef ExampleData data_type; // Must be copy constructible
///   typedef ExampleData &data_type_ref;
///   typedef uint32_t hash_value_type; // The type the hash function returns.
///   typedef uint32_t offset_type; // The type for offsets into the table.
///
///   /// Calculate the hash for Key
///   static hash_value_type ComputeHash(key_type_ref Key);
///   /// Return the lengths, in bytes, of the given Key/Data pair.
///   static std::pair<offset_type, offset_type>
///   EmitKeyDataLength(raw_ostream &Out, key_type_ref Key, data_type_ref Data);
///   /// Write Key to Out.  KeyLen is the length from EmitKeyDataLength.
///   static void EmitKey(raw_ostream &Out, key_type_ref Key,
///                       offset_type KeyLen);
///   /// Write Data to Out.  DataLen is the length from EmitKeyDataLength.
///   static void EmitData(raw_ostream &Out, key_type_ref Key,
///                        data_type_ref Data, offset_type DataLen);
///   /// Determine if two keys are equal. Optional, only needed by contains.
///   static bool EqualKey(key_type_ref Key1, key_type_ref Key2);
/// };
/// \endcode
template <typename Info> class OnDiskChainedHashTableGenerator {
  /// A single item in the hash table.
  class Item {
  public:
    typename Info::key_type Key;
    typename Info::data_type Data;
    Item *Next;
    const typename Info::hash_value_type Hash;

    Item(typename Info::key_type_ref Key, typename Info::data_type_ref Data,
         Info &InfoObj)
        : Key(Key), Data(Data), Next(nullptr), Hash(InfoObj.ComputeHash(Key)) {}
  };

  typedef typename Info::offset_type offset_type;
  offset_type NumBuckets;
  offset_type NumEntries;
  llvm::SpecificBumpPtrAllocator<Item> BA;

  /// A linked list of values in a particular hash bucket.
  struct Bucket {
    offset_type Off;
    unsigned Length;
    Item *Head;
  };

  Bucket *Buckets;

private:
  /// Insert an item into the appropriate hash bucket.
  void insert(Bucket *Buckets, size_t Size, Item *E) {
    Bucket &B = Buckets[E->Hash & (Size - 1)];
    E->Next = B.Head;
    ++B.Length;
    B.Head = E;
  }

  /// Resize the hash table, moving the old entries into the new buckets.
  void resize(size_t NewSize) {
    Bucket *NewBuckets = static_cast<Bucket *>(
        safe_calloc(NewSize, sizeof(Bucket)));
    // Populate NewBuckets with the old entries.
    for (size_t I = 0; I < NumBuckets; ++I)
      for (Item *E = Buckets[I].Head; E;) {
        Item *N = E->Next;
        E->Next = nullptr;
        insert(NewBuckets, NewSize, E);
        E = N;
      }

    free(Buckets);
    NumBuckets = NewSize;
    Buckets = NewBuckets;
  }

public:
  /// Insert an entry into the table.
  void insert(typename Info::key_type_ref Key,
              typename Info::data_type_ref Data) {
    Info InfoObj;
    insert(Key, Data, InfoObj);
  }

  /// Insert an entry into the table.
  ///
  /// Uses the provided Info instead of a stack allocated one.
  void insert(typename Info::key_type_ref Key,
              typename Info::data_type_ref Data, Info &InfoObj) {
    ++NumEntries;
    if (4 * NumEntries >= 3 * NumBuckets)
      resize(NumBuckets * 2);
    insert(Buckets, NumBuckets, new (BA.Allocate()) Item(Key, Data, InfoObj));
  }

  /// Determine whether an entry has been inserted.
  bool contains(typename Info::key_type_ref Key, Info &InfoObj) {
    unsigned Hash = InfoObj.ComputeHash(Key);
    for (Item *I = Buckets[Hash & (NumBuckets - 1)].Head; I; I = I->Next)
      if (I->Hash == Hash && InfoObj.EqualKey(I->Key, Key))
        return true;
    return false;
  }

  /// Emit the table to Out, which must not be at offset 0.
  offset_type Emit(raw_ostream &Out) {
    Info InfoObj;
    return Emit(Out, InfoObj);
  }

  /// Emit the table to Out, which must not be at offset 0.
  ///
  /// Uses the provided Info instead of a stack allocated one.
  offset_type Emit(raw_ostream &Out, Info &InfoObj) {
    using namespace llvm::support;
    endian::Writer LE(Out, little);

    // Now we're done adding entries, resize the bucket list if it's
    // significantly too large. (This only happens if the number of
    // entries is small and we're within our initial allocation of
    // 64 buckets.) We aim for an occupancy ratio in [3/8, 3/4).
    //
    // As a special case, if there are two or fewer entries, just
    // form a single bucket. A linear scan is fine in that case, and
    // this is very common in C++ class lookup tables. This also
    // guarantees we produce at least one bucket for an empty table.
    //
    // FIXME: Try computing a perfect hash function at this point.
    unsigned TargetNumBuckets =
        NumEntries <= 2 ? 1 : llvm::bit_ceil(NumEntries * 4 / 3 + 1);
    if (TargetNumBuckets != NumBuckets)
      resize(TargetNumBuckets);

    // Emit the payload of the table.
    for (offset_type I = 0; I < NumBuckets; ++I) {
      Bucket &B = Buckets[I];
      if (!B.Head)
        continue;

      // Store the offset for the data of this bucket.
      B.Off = Out.tell();
      assert(B.Off && "Cannot write a bucket at offset 0. Please add padding.");

      // Write out the number of items in the bucket.
      LE.write<uint16_t>(B.Length);
      assert(B.Length != 0 && "Bucket has a head but zero length?");

      // Write out the entries in the bucket.
      for (Item *I = B.Head; I; I = I->Next) {
        LE.write<typename Info::hash_value_type>(I->Hash);
        const std::pair<offset_type, offset_type> &Len =
            InfoObj.EmitKeyDataLength(Out, I->Key, I->Data);
#ifdef NDEBUG
        InfoObj.EmitKey(Out, I->Key, Len.first);
        InfoObj.EmitData(Out, I->Key, I->Data, Len.second);
#else
        // In asserts mode, check that the users length matches the data they
        // wrote.
        uint64_t KeyStart = Out.tell();
        InfoObj.EmitKey(Out, I->Key, Len.first);
        uint64_t DataStart = Out.tell();
        InfoObj.EmitData(Out, I->Key, I->Data, Len.second);
        uint64_t End = Out.tell();
        assert(offset_type(DataStart - KeyStart) == Len.first &&
               "key length does not match bytes written");
        assert(offset_type(End - DataStart) == Len.second &&
               "data length does not match bytes written");
#endif
      }
    }

    // Pad with zeros so that we can start the hashtable at an aligned address.
    offset_type TableOff = Out.tell();
    uint64_t N = offsetToAlignment(TableOff, Align(alignof(offset_type)));
    TableOff += N;
    while (N--)
      LE.write<uint8_t>(0);

    // Emit the hashtable itself.
    LE.write<offset_type>(NumBuckets);
    LE.write<offset_type>(NumEntries);
    for (offset_type I = 0; I < NumBuckets; ++I)
      LE.write<offset_type>(Buckets[I].Off);

    return TableOff;
  }

  OnDiskChainedHashTableGenerator() {
    NumEntries = 0;
    NumBuckets = 64;
    // Note that we do not need to run the constructors of the individual
    // Bucket objects since 'calloc' returns bytes that are all 0.
    Buckets = static_cast<Bucket *>(safe_calloc(NumBuckets, sizeof(Bucket)));
  }

  ~OnDiskChainedHashTableGenerator() { std::free(Buckets); }
};

/// Provides lookup on an on disk hash table.
///
/// This needs an \c Info that handles reading values from the hash table's
/// payload and computes the hash for a given key. This should provide the
/// following interface:
///
/// \code
/// class ExampleLookupInfo {
/// public:
///   typedef ExampleData data_type;
///   typedef ExampleInternalKey internal_key_type; // The stored key type.
///   typedef ExampleKey external_key_type; // The type to pass to find().
///   typedef uint32_t hash_value_type; // The type the hash function returns.
///   typedef uint32_t offset_type; // The type for offsets into the table.
///
///   /// Compare two keys for equality.
///   static bool EqualKey(internal_key_type &Key1, internal_key_type &Key2);
///   /// Calculate the hash for the given key.
///   static hash_value_type ComputeHash(internal_key_type &IKey);
///   /// Translate from the semantic type of a key in the hash table to the
///   /// type that is actually stored and used for hashing and comparisons.
///   /// The internal and external types are often the same, in which case this
///   /// can simply return the passed in value.
///   static const internal_key_type &GetInternalKey(external_key_type &EKey);
///   /// Read the key and data length from Buffer, leaving it pointing at the
///   /// following byte.
///   static std::pair<offset_type, offset_type>
///   ReadKeyDataLength(const unsigned char *&Buffer);
///   /// Read the key from Buffer, given the KeyLen as reported from
///   /// ReadKeyDataLength.
///   const internal_key_type &ReadKey(const unsigned char *Buffer,
///                                    offset_type KeyLen);
///   /// Read the data for Key from Buffer, given the DataLen as reported from
///   /// ReadKeyDataLength.
///   data_type ReadData(StringRef Key, const unsigned char *Buffer,
///                      offset_type DataLen);
/// };
/// \endcode
template <typename Info> class OnDiskChainedHashTable {
  const typename Info::offset_type NumBuckets;
  const typename Info::offset_type NumEntries;
  const unsigned char *const Buckets;
  const unsigned char *const Base;
  Info InfoObj;

public:
  typedef Info InfoType;
  typedef typename Info::internal_key_type internal_key_type;
  typedef typename Info::external_key_type external_key_type;
  typedef typename Info::data_type data_type;
  typedef typename Info::hash_value_type hash_value_type;
  typedef typename Info::offset_type offset_type;

  OnDiskChainedHashTable(offset_type NumBuckets, offset_type NumEntries,
                         const unsigned char *Buckets,
                         const unsigned char *Base,
                         const Info &InfoObj = Info())
      : NumBuckets(NumBuckets), NumEntries(NumEntries), Buckets(Buckets),
        Base(Base), InfoObj(InfoObj) {
    assert((reinterpret_cast<uintptr_t>(Buckets) & 0x3) == 0 &&
           "'buckets' must have a 4-byte alignment");
  }

  /// Read the number of buckets and the number of entries from a hash table
  /// produced by OnDiskHashTableGenerator::Emit, and advance the Buckets
  /// pointer past them.
  static std::pair<offset_type, offset_type>
  readNumBucketsAndEntries(const unsigned char *&Buckets) {
    assert((reinterpret_cast<uintptr_t>(Buckets) & 0x3) == 0 &&
           "buckets should be 4-byte aligned.");
    using namespace llvm::support;
    offset_type NumBuckets =
        endian::readNext<offset_type, little, aligned>(Buckets);
    offset_type NumEntries =
        endian::readNext<offset_type, little, aligned>(Buckets);
    return std::make_pair(NumBuckets, NumEntries);
  }

  offset_type getNumBuckets() const { return NumBuckets; }
  offset_type getNumEntries() const { return NumEntries; }
  const unsigned char *getBase() const { return Base; }
  const unsigned char *getBuckets() const { return Buckets; }

  bool isEmpty() const { return NumEntries == 0; }

  class iterator {
    internal_key_type Key;
    const unsigned char *const Data;
    const offset_type Len;
    Info *InfoObj;

  public:
    iterator() : Key(), Data(nullptr), Len(0), InfoObj(nullptr) {}
    iterator(const internal_key_type K, const unsigned char *D, offset_type L,
             Info *InfoObj)
        : Key(K), Data(D), Len(L), InfoObj(InfoObj) {}

    data_type operator*() const { return InfoObj->ReadData(Key, Data, Len); }

    const unsigned char *getDataPtr() const { return Data; }
    offset_type getDataLen() const { return Len; }

    bool operator==(const iterator &X) const { return X.Data == Data; }
    bool operator!=(const iterator &X) const { return X.Data != Data; }
  };

  /// Look up the stored data for a particular key.
  iterator find(const external_key_type &EKey, Info *InfoPtr = nullptr) {
    const internal_key_type &IKey = InfoObj.GetInternalKey(EKey);
    hash_value_type KeyHash = InfoObj.ComputeHash(IKey);
    return find_hashed(IKey, KeyHash, InfoPtr);
  }

  /// Look up the stored data for a particular key with a known hash.
  iterator find_hashed(const internal_key_type &IKey, hash_value_type KeyHash,
                       Info *InfoPtr = nullptr) {
    using namespace llvm::support;

    if (!InfoPtr)
      InfoPtr = &InfoObj;

    // Each bucket is just an offset into the hash table file.
    offset_type Idx = KeyHash & (NumBuckets - 1);
    const unsigned char *Bucket = Buckets + sizeof(offset_type) * Idx;

    offset_type Offset = endian::readNext<offset_type, little, aligned>(Bucket);
    if (Offset == 0)
      return iterator(); // Empty bucket.
    const unsigned char *Items = Base + Offset;

    // 'Items' starts with a 16-bit unsigned integer representing the
    // number of items in this bucket.
    unsigned Len = endian::readNext<uint16_t, little, unaligned>(Items);

    for (unsigned i = 0; i < Len; ++i) {
      // Read the hash.
      hash_value_type ItemHash =
          endian::readNext<hash_value_type, little, unaligned>(Items);

      // Determine the length of the key and the data.
      const std::pair<offset_type, offset_type> &L =
          Info::ReadKeyDataLength(Items);
      offset_type ItemLen = L.first + L.second;

      // Compare the hashes.  If they are not the same, skip the entry entirely.
      if (ItemHash != KeyHash) {
        Items += ItemLen;
        continue;
      }

      // Read the key.
      const internal_key_type &X =
          InfoPtr->ReadKey((const unsigned char *const)Items, L.first);

      // If the key doesn't match just skip reading the value.
      if (!InfoPtr->EqualKey(X, IKey)) {
        Items += ItemLen;
        continue;
      }

      // The key matches!
      return iterator(X, Items + L.first, L.second, InfoPtr);
    }

    return iterator();
  }

  iterator end() const { return iterator(); }

  Info &getInfoObj() { return InfoObj; }

  /// Create the hash table.
  ///
  /// \param Buckets is the beginning of the hash table itself, which follows
  /// the payload of entire structure. This is the value returned by
  /// OnDiskHashTableGenerator::Emit.
  ///
  /// \param Base is the point from which all offsets into the structure are
  /// based. This is offset 0 in the stream that was used when Emitting the
  /// table.
  static OnDiskChainedHashTable *Create(const unsigned char *Buckets,
                                        const unsigned char *const Base,
                                        const Info &InfoObj = Info()) {
    assert(Buckets > Base);
    auto NumBucketsAndEntries = readNumBucketsAndEntries(Buckets);
    return new OnDiskChainedHashTable<Info>(NumBucketsAndEntries.first,
                                            NumBucketsAndEntries.second,
                                            Buckets, Base, InfoObj);
  }
};

/// Provides lookup and iteration over an on disk hash table.
///
/// \copydetails llvm::OnDiskChainedHashTable
template <typename Info>
class OnDiskIterableChainedHashTable : public OnDiskChainedHashTable<Info> {
  const unsigned char *Payload;

public:
  typedef OnDiskChainedHashTable<Info>          base_type;
  typedef typename base_type::internal_key_type internal_key_type;
  typedef typename base_type::external_key_type external_key_type;
  typedef typename base_type::data_type         data_type;
  typedef typename base_type::hash_value_type   hash_value_type;
  typedef typename base_type::offset_type       offset_type;

private:
  /// Iterates over all of the keys in the table.
  class iterator_base {
    const unsigned char *Ptr;
    offset_type NumItemsInBucketLeft;
    offset_type NumEntriesLeft;

  public:
    typedef external_key_type value_type;

    iterator_base(const unsigned char *const Ptr, offset_type NumEntries)
        : Ptr(Ptr), NumItemsInBucketLeft(0), NumEntriesLeft(NumEntries) {}
    iterator_base()
        : Ptr(nullptr), NumItemsInBucketLeft(0), NumEntriesLeft(0) {}

    friend bool operator==(const iterator_base &X, const iterator_base &Y) {
      return X.NumEntriesLeft == Y.NumEntriesLeft;
    }
    friend bool operator!=(const iterator_base &X, const iterator_base &Y) {
      return X.NumEntriesLeft != Y.NumEntriesLeft;
    }

    /// Move to the next item.
    void advance() {
      using namespace llvm::support;
      if (!NumItemsInBucketLeft) {
        // 'Items' starts with a 16-bit unsigned integer representing the
        // number of items in this bucket.
        NumItemsInBucketLeft =
            endian::readNext<uint16_t, little, unaligned>(Ptr);
      }
      Ptr += sizeof(hash_value_type); // Skip the hash.
      // Determine the length of the key and the data.
      const std::pair<offset_type, offset_type> &L =
          Info::ReadKeyDataLength(Ptr);
      Ptr += L.first + L.second;
      assert(NumItemsInBucketLeft);
      --NumItemsInBucketLeft;
      assert(NumEntriesLeft);
      --NumEntriesLeft;
    }

    /// Get the start of the item as written by the trait (after the hash and
    /// immediately before the key and value length).
    const unsigned char *getItem() const {
      return Ptr + (NumItemsInBucketLeft ? 0 : 2) + sizeof(hash_value_type);
    }
  };

public:
  OnDiskIterableChainedHashTable(offset_type NumBuckets, offset_type NumEntries,
                                 const unsigned char *Buckets,
                                 const unsigned char *Payload,
                                 const unsigned char *Base,
                                 const Info &InfoObj = Info())
      : base_type(NumBuckets, NumEntries, Buckets, Base, InfoObj),
        Payload(Payload) {}

  /// Iterates over all of the keys in the table.
  class key_iterator : public iterator_base {
    Info *InfoObj;

  public:
    typedef external_key_type value_type;

    key_iterator(const unsigned char *const Ptr, offset_type NumEntries,
                 Info *InfoObj)
        : iterator_base(Ptr, NumEntries), InfoObj(InfoObj) {}
    key_iterator() : iterator_base(), InfoObj() {}

    key_iterator &operator++() {
      this->advance();
      return *this;
    }
    key_iterator operator++(int) { // Postincrement
      key_iterator tmp = *this;
      ++*this;
      return tmp;
    }

    internal_key_type getInternalKey() const {
      auto *LocalPtr = this->getItem();

      // Determine the length of the key and the data.
      auto L = Info::ReadKeyDataLength(LocalPtr);

      // Read the key.
      return InfoObj->ReadKey(LocalPtr, L.first);
    }

    value_type operator*() const {
      return InfoObj->GetExternalKey(getInternalKey());
    }
  };

  key_iterator key_begin() {
    return key_iterator(Payload, this->getNumEntries(), &this->getInfoObj());
  }
  key_iterator key_end() { return key_iterator(); }

  iterator_range<key_iterator> keys() {
    return make_range(key_begin(), key_end());
  }

  /// Iterates over all the entries in the table, returning the data.
  class data_iterator : public iterator_base {
    Info *InfoObj;

  public:
    typedef data_type value_type;

    data_iterator(const unsigned char *const Ptr, offset_type NumEntries,
                  Info *InfoObj)
        : iterator_base(Ptr, NumEntries), InfoObj(InfoObj) {}
    data_iterator() : iterator_base(), InfoObj() {}

    data_iterator &operator++() { // Preincrement
      this->advance();
      return *this;
    }
    data_iterator operator++(int) { // Postincrement
      data_iterator tmp = *this;
      ++*this;
      return tmp;
    }

    value_type operator*() const {
      auto *LocalPtr = this->getItem();

      // Determine the length of the key and the data.
      auto L = Info::ReadKeyDataLength(LocalPtr);

      // Read the key.
      const internal_key_type &Key = InfoObj->ReadKey(LocalPtr, L.first);
      return InfoObj->ReadData(Key, LocalPtr + L.first, L.second);
    }
  };

  data_iterator data_begin() {
    return data_iterator(Payload, this->getNumEntries(), &this->getInfoObj());
  }
  data_iterator data_end() { return data_iterator(); }

  iterator_range<data_iterator> data() {
    return make_range(data_begin(), data_end());
  }

  /// Create the hash table.
  ///
  /// \param Buckets is the beginning of the hash table itself, which follows
  /// the payload of entire structure. This is the value returned by
  /// OnDiskHashTableGenerator::Emit.
  ///
  /// \param Payload is the beginning of the data contained in the table.  This
  /// is Base plus any padding or header data that was stored, ie, the offset
  /// that the stream was at when calling Emit.
  ///
  /// \param Base is the point from which all offsets into the structure are
  /// based. This is offset 0 in the stream that was used when Emitting the
  /// table.
  static OnDiskIterableChainedHashTable *
  Create(const unsigned char *Buckets, const unsigned char *const Payload,
         const unsigned char *const Base, const Info &InfoObj = Info()) {
    assert(Buckets > Base);
    auto NumBucketsAndEntries =
        OnDiskIterableChainedHashTable<Info>::readNumBucketsAndEntries(Buckets);
    return new OnDiskIterableChainedHashTable<Info>(
        NumBucketsAndEntries.first, NumBucketsAndEntries.second,
        Buckets, Payload, Base, InfoObj);
  }
};

} // end namespace llvm

#endif
PKhwFZO�][ZZSupport/X86TargetParser.hnu�[���//===-- llvm/Support/X86TargetParser.h --------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This header is deprecated in favour of
/// `llvm/TargetParser/X86TargetParser.h`.
///
//===----------------------------------------------------------------------===//

#include "llvm/TargetParser/X86TargetParser.h"
PKhwFZ<��€�
Support/CRC.hnu�[���//===-- llvm/Support/CRC.h - Cyclic Redundancy Check-------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains implementations of CRC functions.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_CRC_H
#define LLVM_SUPPORT_CRC_H

#include "llvm/Support/DataTypes.h"

namespace llvm {
template <typename T> class ArrayRef;

// Compute the CRC-32 of Data.
uint32_t crc32(ArrayRef<uint8_t> Data);

// Compute the running CRC-32 of Data, with CRC being the previous value of the
// checksum.
uint32_t crc32(uint32_t CRC, ArrayRef<uint8_t> Data);

// Class for computing the JamCRC.
//
// We will use the "Rocksoft^tm Model CRC Algorithm" to describe the properties
// of this CRC:
//   Width  : 32
//   Poly   : 04C11DB7
//   Init   : FFFFFFFF
//   RefIn  : True
//   RefOut : True
//   XorOut : 00000000
//   Check  : 340BC6D9 (result of CRC for "123456789")
//
// In other words, this is the same as CRC-32, except that XorOut is 0 instead
// of FFFFFFFF.
//
// N.B.  We permit flexibility of the "Init" value.  Some consumers of this need
//       it to be zero.
class JamCRC {
public:
  JamCRC(uint32_t Init = 0xFFFFFFFFU) : CRC(Init) {}

  // Update the CRC calculation with Data.
  void update(ArrayRef<uint8_t> Data);

  uint32_t getCRC() const { return CRC; }

private:
  uint32_t CRC;
};

} // end namespace llvm

#endif
PKhwFZ�f���Support/TaskQueue.hnu�[���//===-- llvm/Support/TaskQueue.h - A TaskQueue implementation ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines a crude C++11 based task queue.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_TASKQUEUE_H
#define LLVM_SUPPORT_TASKQUEUE_H

#include "llvm/Config/llvm-config.h"
#include "llvm/Support/ThreadPool.h"
#include "llvm/Support/thread.h"

#include <atomic>
#include <cassert>
#include <condition_variable>
#include <deque>
#include <functional>
#include <future>
#include <memory>
#include <mutex>
#include <utility>

namespace llvm {
/// TaskQueue executes serialized work on a user-defined Thread Pool.  It
/// guarantees that if task B is enqueued after task A, task B begins after
/// task A completes and there is no overlap between the two.
class TaskQueue {
  // Because we don't have init capture to use move-only local variables that
  // are captured into a lambda, we create the promise inside an explicit
  // callable struct. We want to do as much of the wrapping in the
  // type-specialized domain (before type erasure) and then erase this into a
  // std::function.
  template <typename Callable> struct Task {
    using ResultTy = std::invoke_result_t<Callable>;
    explicit Task(Callable C, TaskQueue &Parent)
        : C(std::move(C)), P(std::make_shared<std::promise<ResultTy>>()),
          Parent(&Parent) {}

    template<typename T>
    void invokeCallbackAndSetPromise(T*) {
      P->set_value(C());
    }

    void invokeCallbackAndSetPromise(void*) {
      C();
      P->set_value();
    }

    void operator()() noexcept {
      ResultTy *Dummy = nullptr;
      invokeCallbackAndSetPromise(Dummy);
      Parent->completeTask();
    }

    Callable C;
    std::shared_ptr<std::promise<ResultTy>> P;
    TaskQueue *Parent;
  };

public:
  /// Construct a task queue with no work.
  TaskQueue(ThreadPool &Scheduler) : Scheduler(Scheduler) { (void)Scheduler; }

  /// Blocking destructor: the queue will wait for all work to complete.
  ~TaskQueue() {
    Scheduler.wait();
    assert(Tasks.empty());
  }

  /// Asynchronous submission of a task to the queue. The returned future can be
  /// used to wait for the task (and all previous tasks that have not yet
  /// completed) to finish.
  template <typename Callable>
  std::future<std::invoke_result_t<Callable>> async(Callable &&C) {
#if !LLVM_ENABLE_THREADS
    static_assert(false,
                  "TaskQueue requires building with LLVM_ENABLE_THREADS!");
#endif
    Task<Callable> T{std::move(C), *this};
    using ResultTy = std::invoke_result_t<Callable>;
    std::future<ResultTy> F = T.P->get_future();
    {
      std::lock_guard<std::mutex> Lock(QueueLock);
      // If there's already a task in flight, just queue this one up.  If
      // there is not a task in flight, bypass the queue and schedule this
      // task immediately.
      if (IsTaskInFlight)
        Tasks.push_back(std::move(T));
      else {
        Scheduler.async(std::move(T));
        IsTaskInFlight = true;
      }
    }
    return F;
  }

private:
  void completeTask() {
    // We just completed a task.  If there are no more tasks in the queue,
    // update IsTaskInFlight to false and stop doing work.  Otherwise
    // schedule the next task (while not holding the lock).
    std::function<void()> Continuation;
    {
      std::lock_guard<std::mutex> Lock(QueueLock);
      if (Tasks.empty()) {
        IsTaskInFlight = false;
        return;
      }

      Continuation = std::move(Tasks.front());
      Tasks.pop_front();
    }
    Scheduler.async(std::move(Continuation));
  }

  /// The thread pool on which to run the work.
  ThreadPool &Scheduler;

  /// State which indicates whether the queue currently is currently processing
  /// any work.
  bool IsTaskInFlight = false;

  /// Mutex for synchronizing access to the Tasks array.
  std::mutex QueueLock;

  /// Tasks waiting for execution in the queue.
  std::deque<std::function<void()>> Tasks;
};
} // namespace llvm

#endif // LLVM_SUPPORT_TASKQUEUE_H
PKhwFZb�m�

Support/ARMAttributeParser.hnu�[���//===- ARMAttributeParser.h - ARM Attribute Information Printer -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_ARMATTRIBUTEPARSER_H
#define LLVM_SUPPORT_ARMATTRIBUTEPARSER_H

#include "ARMBuildAttributes.h"
#include "ELFAttributeParser.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Error.h"

namespace llvm {

class ScopedPrinter;

class ARMAttributeParser : public ELFAttributeParser {
  struct DisplayHandler {
    ARMBuildAttrs::AttrType attribute;
    Error (ARMAttributeParser::*routine)(ARMBuildAttrs::AttrType);
  };
  static const DisplayHandler displayRoutines[];

  Error handler(uint64_t tag, bool &handled) override;

  Error stringAttribute(ARMBuildAttrs::AttrType tag);

  Error CPU_arch(ARMBuildAttrs::AttrType tag);
  Error CPU_arch_profile(ARMBuildAttrs::AttrType tag);
  Error ARM_ISA_use(ARMBuildAttrs::AttrType tag);
  Error THUMB_ISA_use(ARMBuildAttrs::AttrType tag);
  Error FP_arch(ARMBuildAttrs::AttrType tag);
  Error WMMX_arch(ARMBuildAttrs::AttrType tag);
  Error Advanced_SIMD_arch(ARMBuildAttrs::AttrType tag);
  Error MVE_arch(ARMBuildAttrs::AttrType tag);
  Error PCS_config(ARMBuildAttrs::AttrType tag);
  Error ABI_PCS_R9_use(ARMBuildAttrs::AttrType tag);
  Error ABI_PCS_RW_data(ARMBuildAttrs::AttrType tag);
  Error ABI_PCS_RO_data(ARMBuildAttrs::AttrType tag);
  Error ABI_PCS_GOT_use(ARMBuildAttrs::AttrType tag);
  Error ABI_PCS_wchar_t(ARMBuildAttrs::AttrType tag);
  Error ABI_FP_rounding(ARMBuildAttrs::AttrType tag);
  Error ABI_FP_denormal(ARMBuildAttrs::AttrType tag);
  Error ABI_FP_exceptions(ARMBuildAttrs::AttrType tag);
  Error ABI_FP_user_exceptions(ARMBuildAttrs::AttrType tag);
  Error ABI_FP_number_model(ARMBuildAttrs::AttrType tag);
  Error ABI_align_needed(ARMBuildAttrs::AttrType tag);
  Error ABI_align_preserved(ARMBuildAttrs::AttrType tag);
  Error ABI_enum_size(ARMBuildAttrs::AttrType tag);
  Error ABI_HardFP_use(ARMBuildAttrs::AttrType tag);
  Error ABI_VFP_args(ARMBuildAttrs::AttrType tag);
  Error ABI_WMMX_args(ARMBuildAttrs::AttrType tag);
  Error ABI_optimization_goals(ARMBuildAttrs::AttrType tag);
  Error ABI_FP_optimization_goals(ARMBuildAttrs::AttrType tag);
  Error compatibility(ARMBuildAttrs::AttrType tag);
  Error CPU_unaligned_access(ARMBuildAttrs::AttrType tag);
  Error FP_HP_extension(ARMBuildAttrs::AttrType tag);
  Error ABI_FP_16bit_format(ARMBuildAttrs::AttrType tag);
  Error MPextension_use(ARMBuildAttrs::AttrType tag);
  Error DIV_use(ARMBuildAttrs::AttrType tag);
  Error DSP_extension(ARMBuildAttrs::AttrType tag);
  Error T2EE_use(ARMBuildAttrs::AttrType tag);
  Error Virtualization_use(ARMBuildAttrs::AttrType tag);
  Error PAC_extension(ARMBuildAttrs::AttrType tag);
  Error BTI_extension(ARMBuildAttrs::AttrType tag);
  Error PACRET_use(ARMBuildAttrs::AttrType tag);
  Error BTI_use(ARMBuildAttrs::AttrType tag);
  Error nodefaults(ARMBuildAttrs::AttrType tag);
  Error also_compatible_with(ARMBuildAttrs::AttrType tag);

public:
  ARMAttributeParser(ScopedPrinter *sw)
      : ELFAttributeParser(sw, ARMBuildAttrs::getARMAttributeTags(), "aeabi") {}
  ARMAttributeParser()
      : ELFAttributeParser(ARMBuildAttrs::getARMAttributeTags(), "aeabi") {}
};
}

#endif
PKhwFZ
vU		Support/CSKYAttributes.hnu�[���//===---- CSKYAttributes.h - CSKY Attributes --------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains enumerations for CSKY attributes.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_SUPPORT_CSKYATTRIBUTES_H
#define LLVM_SUPPORT_CSKYATTRIBUTES_H

#include "llvm/Support/ELFAttributes.h"

namespace llvm {
namespace CSKYAttrs {

const TagNameMap &getCSKYAttributeTags();

enum AttrType {
  CSKY_ARCH_NAME = 4,
  CSKY_CPU_NAME = 5,
  CSKY_ISA_FLAGS = 6,
  CSKY_ISA_EXT_FLAGS = 7,
  CSKY_DSP_VERSION = 8,
  CSKY_VDSP_VERSION = 9,
  CSKY_FPU_VERSION = 16,
  CSKY_FPU_ABI = 17,
  CSKY_FPU_ROUNDING = 18,
  CSKY_FPU_DENORMAL = 19,
  CSKY_FPU_EXCEPTION = 20,
  CSKY_FPU_NUMBER_MODULE = 21,
  CSKY_FPU_HARDFP = 22
};

enum ISA_FLAGS {
  V2_ISA_E1 = 1 << 1,
  V2_ISA_1E2 = 1 << 2,
  V2_ISA_2E3 = 1 << 3,
  V2_ISA_3E7 = 1 << 4,
  V2_ISA_7E10 = 1 << 5,
  V2_ISA_3E3R1 = 1 << 6,
  V2_ISA_3E3R2 = 1 << 7,
  V2_ISA_10E60 = 1 << 8,
  V2_ISA_3E3R3 = 1 << 9,
  ISA_TRUST = 1 << 11,
  ISA_CACHE = 1 << 12,
  ISA_NVIC = 1 << 13,
  ISA_CP = 1 << 14,
  ISA_MP = 1 << 15,
  ISA_MP_1E2 = 1 << 16,
  ISA_JAVA = 1 << 17,
  ISA_MAC = 1 << 18,
  ISA_MAC_DSP = 1 << 19,
  ISA_DSP = 1 << 20,
  ISA_DSP_1E2 = 1 << 21,
  ISA_DSP_ENHANCE = 1 << 22,
  ISA_DSP_SILAN = 1 << 23,
  ISA_VDSP = 1 << 24,
  ISA_VDSP_2 = 1 << 25,
  ISA_VDSP_2E3 = 1 << 26,
  V2_ISA_DSPE60 = 1 << 27,
  ISA_VDSP_2E60F = 1 << 28
};

enum ISA_EXT_FLAGS {
  ISA_FLOAT_E1 = 1 << 0,
  ISA_FLOAT_1E2 = 1 << 1,
  ISA_FLOAT_1E3 = 1 << 2,
  ISA_FLOAT_3E4 = 1 << 3,
  ISA_FLOAT_7E60 = 1 << 4
};

enum { NONE = 0, NEEDED = 1 };

enum DSP_VERSION { DSP_VERSION_EXTENSION = 1, DSP_VERSION_2 = 2 };

enum VDSP_VERSION { VDSP_VERSION_1 = 1, VDSP_VERSION_2 = 2 };

enum FPU_VERSION { FPU_VERSION_1 = 1, FPU_VERSION_2 = 2, FPU_VERSION_3 = 3 };

enum FPU_ABI { FPU_ABI_SOFT = 1, FPU_ABI_SOFTFP = 2, FPU_ABI_HARD = 3 };

enum FPU_HARDFP {
  FPU_HARDFP_HALF = 1,
  FPU_HARDFP_SINGLE = 2,
  FPU_HARDFP_DOUBLE = 4
};

} // namespace CSKYAttrs
} // namespace llvm

#endif
PKhwFZtO6�&#&#Support/ModRef.hnu�[���//===--- ModRef.h - Memory effect modelling ---------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Definitions of ModRefInfo and MemoryEffects, which are used to
// describe the memory effects of instructions.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_MODREF_H
#define LLVM_SUPPORT_MODREF_H

#include "llvm/ADT/BitmaskEnum.h"
#include "llvm/ADT/Sequence.h"
#include "llvm/Support/raw_ostream.h"

namespace llvm {

/// Flags indicating whether a memory access modifies or references memory.
///
/// This is no access at all, a modification, a reference, or both
/// a modification and a reference.
enum class ModRefInfo : uint8_t {
  /// The access neither references nor modifies the value stored in memory.
  NoModRef = 0,
  /// The access may reference the value stored in memory.
  Ref = 1,
  /// The access may modify the value stored in memory.
  Mod = 2,
  /// The access may reference and may modify the value stored in memory.
  ModRef = Ref | Mod,
  LLVM_MARK_AS_BITMASK_ENUM(ModRef),
};

[[nodiscard]] inline bool isNoModRef(const ModRefInfo MRI) {
  return MRI == ModRefInfo::NoModRef;
}
[[nodiscard]] inline bool isModOrRefSet(const ModRefInfo MRI) {
  return MRI != ModRefInfo::NoModRef;
}
[[nodiscard]] inline bool isModAndRefSet(const ModRefInfo MRI) {
  return MRI == ModRefInfo::ModRef;
}
[[nodiscard]] inline bool isModSet(const ModRefInfo MRI) {
  return static_cast<int>(MRI) & static_cast<int>(ModRefInfo::Mod);
}
[[nodiscard]] inline bool isRefSet(const ModRefInfo MRI) {
  return static_cast<int>(MRI) & static_cast<int>(ModRefInfo::Ref);
}

/// Debug print ModRefInfo.
raw_ostream &operator<<(raw_ostream &OS, ModRefInfo MR);

/// The locations at which a function might access memory.
enum class IRMemLocation {
  /// Access to memory via argument pointers.
  ArgMem = 0,
  /// Memory that is inaccessible via LLVM IR.
  InaccessibleMem = 1,
  /// Any other memory.
  Other = 2,

  /// Helpers to iterate all locations in the MemoryEffectsBase class.
  First = ArgMem,
  Last = Other,
};

template <typename LocationEnum> class MemoryEffectsBase {
public:
  using Location = LocationEnum;

private:
  uint32_t Data = 0;

  static constexpr uint32_t BitsPerLoc = 2;
  static constexpr uint32_t LocMask = (1 << BitsPerLoc) - 1;

  static uint32_t getLocationPos(Location Loc) {
    return (uint32_t)Loc * BitsPerLoc;
  }

  MemoryEffectsBase(uint32_t Data) : Data(Data) {}

  void setModRef(Location Loc, ModRefInfo MR) {
    Data &= ~(LocMask << getLocationPos(Loc));
    Data |= static_cast<uint32_t>(MR) << getLocationPos(Loc);
  }

public:
  /// Returns iterator over all supported location kinds.
  static auto locations() {
    return enum_seq_inclusive(Location::First, Location::Last,
                              force_iteration_on_noniterable_enum);
  }

  /// Create MemoryEffectsBase that can access only the given location with the
  /// given ModRefInfo.
  MemoryEffectsBase(Location Loc, ModRefInfo MR) { setModRef(Loc, MR); }

  /// Create MemoryEffectsBase that can access any location with the given
  /// ModRefInfo.
  explicit MemoryEffectsBase(ModRefInfo MR) {
    for (Location Loc : locations())
      setModRef(Loc, MR);
  }

  /// Create MemoryEffectsBase that can read and write any memory.
  static MemoryEffectsBase unknown() {
    return MemoryEffectsBase(ModRefInfo::ModRef);
  }

  /// Create MemoryEffectsBase that cannot read or write any memory.
  static MemoryEffectsBase none() {
    return MemoryEffectsBase(ModRefInfo::NoModRef);
  }

  /// Create MemoryEffectsBase that can read any memory.
  static MemoryEffectsBase readOnly() {
    return MemoryEffectsBase(ModRefInfo::Ref);
  }

  /// Create MemoryEffectsBase that can write any memory.
  static MemoryEffectsBase writeOnly() {
    return MemoryEffectsBase(ModRefInfo::Mod);
  }

  /// Create MemoryEffectsBase that can only access argument memory.
  static MemoryEffectsBase argMemOnly(ModRefInfo MR = ModRefInfo::ModRef) {
    return MemoryEffectsBase(Location::ArgMem, MR);
  }

  /// Create MemoryEffectsBase that can only access inaccessible memory.
  static MemoryEffectsBase
  inaccessibleMemOnly(ModRefInfo MR = ModRefInfo::ModRef) {
    return MemoryEffectsBase(Location::InaccessibleMem, MR);
  }

  /// Create MemoryEffectsBase that can only access inaccessible or argument
  /// memory.
  static MemoryEffectsBase
  inaccessibleOrArgMemOnly(ModRefInfo MR = ModRefInfo::ModRef) {
    MemoryEffectsBase FRMB = none();
    FRMB.setModRef(Location::ArgMem, MR);
    FRMB.setModRef(Location::InaccessibleMem, MR);
    return FRMB;
  }

  /// Create MemoryEffectsBase from an encoded integer value (used by memory
  /// attribute).
  static MemoryEffectsBase createFromIntValue(uint32_t Data) {
    return MemoryEffectsBase(Data);
  }

  /// Convert MemoryEffectsBase into an encoded integer value (used by memory
  /// attribute).
  uint32_t toIntValue() const {
    return Data;
  }

  /// Get ModRefInfo for the given Location.
  ModRefInfo getModRef(Location Loc) const {
    return ModRefInfo((Data >> getLocationPos(Loc)) & LocMask);
  }

  /// Get new MemoryEffectsBase with modified ModRefInfo for Loc.
  MemoryEffectsBase getWithModRef(Location Loc, ModRefInfo MR) const {
    MemoryEffectsBase ME = *this;
    ME.setModRef(Loc, MR);
    return ME;
  }

  /// Get new MemoryEffectsBase with NoModRef on the given Loc.
  MemoryEffectsBase getWithoutLoc(Location Loc) const {
    MemoryEffectsBase ME = *this;
    ME.setModRef(Loc, ModRefInfo::NoModRef);
    return ME;
  }

  /// Get ModRefInfo for any location.
  ModRefInfo getModRef() const {
    ModRefInfo MR = ModRefInfo::NoModRef;
    for (Location Loc : locations())
      MR |= getModRef(Loc);
    return MR;
  }

  /// Whether this function accesses no memory.
  bool doesNotAccessMemory() const { return Data == 0; }

  /// Whether this function only (at most) reads memory.
  bool onlyReadsMemory() const { return !isModSet(getModRef()); }

  /// Whether this function only (at most) writes memory.
  bool onlyWritesMemory() const { return !isRefSet(getModRef()); }

  /// Whether this function only (at most) accesses argument memory.
  bool onlyAccessesArgPointees() const {
    return getWithoutLoc(Location::ArgMem).doesNotAccessMemory();
  }

  /// Whether this function may access argument memory.
  bool doesAccessArgPointees() const {
    return isModOrRefSet(getModRef(Location::ArgMem));
  }

  /// Whether this function only (at most) accesses inaccessible memory.
  bool onlyAccessesInaccessibleMem() const {
    return getWithoutLoc(Location::InaccessibleMem).doesNotAccessMemory();
  }

  /// Whether this function only (at most) accesses argument and inaccessible
  /// memory.
  bool onlyAccessesInaccessibleOrArgMem() const {
    return getWithoutLoc(Location::InaccessibleMem)
        .getWithoutLoc(Location::ArgMem)
        .doesNotAccessMemory();
  }

  /// Intersect with other MemoryEffectsBase.
  MemoryEffectsBase operator&(MemoryEffectsBase Other) const {
    return MemoryEffectsBase(Data & Other.Data);
  }

  /// Intersect (in-place) with other MemoryEffectsBase.
  MemoryEffectsBase &operator&=(MemoryEffectsBase Other) {
    Data &= Other.Data;
    return *this;
  }

  /// Union with other MemoryEffectsBase.
  MemoryEffectsBase operator|(MemoryEffectsBase Other) const {
    return MemoryEffectsBase(Data | Other.Data);
  }

  /// Union (in-place) with other MemoryEffectsBase.
  MemoryEffectsBase &operator|=(MemoryEffectsBase Other) {
    Data |= Other.Data;
    return *this;
  }

  /// Subtract other MemoryEffectsBase.
  MemoryEffectsBase operator-(MemoryEffectsBase Other) const {
    return MemoryEffectsBase(Data & ~Other.Data);
  }

  /// Subtract (in-place) with other MemoryEffectsBase.
  MemoryEffectsBase &operator-=(MemoryEffectsBase Other) {
    Data &= ~Other.Data;
    return *this;
  }

  /// Check whether this is the same as other MemoryEffectsBase.
  bool operator==(MemoryEffectsBase Other) const { return Data == Other.Data; }

  /// Check whether this is different from other MemoryEffectsBase.
  bool operator!=(MemoryEffectsBase Other) const { return !operator==(Other); }
};

/// Summary of how a function affects memory in the program.
///
/// Loads from constant globals are not considered memory accesses for this
/// interface. Also, functions may freely modify stack space local to their
/// invocation without having to report it through these interfaces.
using MemoryEffects = MemoryEffectsBase<IRMemLocation>;

/// Debug print MemoryEffects.
raw_ostream &operator<<(raw_ostream &OS, MemoryEffects RMRB);

// Legacy alias.
using FunctionModRefBehavior = MemoryEffects;

} // namespace llvm

#endif
PKhwFZ� :��Support/RISCVAttributeParser.hnu�[���//===-- RISCVAttributeParser.h - RISCV Attribute Parser ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_RISCVATTRIBUTEPARSER_H
#define LLVM_SUPPORT_RISCVATTRIBUTEPARSER_H

#include "llvm/Support/ELFAttributeParser.h"
#include "llvm/Support/RISCVAttributes.h"

namespace llvm {
class RISCVAttributeParser : public ELFAttributeParser {
  struct DisplayHandler {
    RISCVAttrs::AttrType attribute;
    Error (RISCVAttributeParser::*routine)(unsigned);
  };
  static const DisplayHandler displayRoutines[];

  Error handler(uint64_t tag, bool &handled) override;

  Error unalignedAccess(unsigned tag);
  Error stackAlign(unsigned tag);

public:
  RISCVAttributeParser(ScopedPrinter *sw)
      : ELFAttributeParser(sw, RISCVAttrs::getRISCVAttributeTags(), "riscv") {}
  RISCVAttributeParser()
      : ELFAttributeParser(RISCVAttrs::getRISCVAttributeTags(), "riscv") {}
};

} // namespace llvm

#endif
PKhwFZ�,p���Support/LockFileManager.hnu�[���//===--- LockFileManager.h - File-level locking utility ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_SUPPORT_LOCKFILEMANAGER_H
#define LLVM_SUPPORT_LOCKFILEMANAGER_H

#include "llvm/ADT/SmallString.h"
#include <optional>
#include <system_error>
#include <utility> // for std::pair

namespace llvm {
class StringRef;

/// Class that manages the creation of a lock file to aid
/// implicit coordination between different processes.
///
/// The implicit coordination works by creating a ".lock" file alongside
/// the file that we're coordinating for, using the atomicity of the file
/// system to ensure that only a single process can create that ".lock" file.
/// When the lock file is removed, the owning process has finished the
/// operation.
class LockFileManager {
public:
  /// Describes the state of a lock file.
  enum LockFileState {
    /// The lock file has been created and is owned by this instance
    /// of the object.
    LFS_Owned,
    /// The lock file already exists and is owned by some other
    /// instance.
    LFS_Shared,
    /// An error occurred while trying to create or find the lock
    /// file.
    LFS_Error
  };

  /// Describes the result of waiting for the owner to release the lock.
  enum WaitForUnlockResult {
    /// The lock was released successfully.
    Res_Success,
    /// Owner died while holding the lock.
    Res_OwnerDied,
    /// Reached timeout while waiting for the owner to release the lock.
    Res_Timeout
  };

private:
  SmallString<128> FileName;
  SmallString<128> LockFileName;
  SmallString<128> UniqueLockFileName;

  std::optional<std::pair<std::string, int>> Owner;
  std::error_code ErrorCode;
  std::string ErrorDiagMsg;

  LockFileManager(const LockFileManager &) = delete;
  LockFileManager &operator=(const LockFileManager &) = delete;

  static std::optional<std::pair<std::string, int>>
  readLockFile(StringRef LockFileName);

  static bool processStillExecuting(StringRef Hostname, int PID);

public:

  LockFileManager(StringRef FileName);
  ~LockFileManager();

  /// Determine the state of the lock file.
  LockFileState getState() const;

  operator LockFileState() const { return getState(); }

  /// For a shared lock, wait until the owner releases the lock.
  /// Total timeout for the file to appear is ~1.5 minutes.
  /// \param MaxSeconds the maximum total wait time in seconds.
  WaitForUnlockResult waitForUnlock(const unsigned MaxSeconds = 90);

  /// Remove the lock file.  This may delete a different lock file than
  /// the one previously read if there is a race.
  std::error_code unsafeRemoveLockFile();

  /// Get error message, or "" if there is no error.
  std::string getErrorMessage() const;

  /// Set error and error message
  void setError(const std::error_code &EC, StringRef ErrorMsg = "") {
    ErrorCode = EC;
    ErrorDiagMsg = ErrorMsg.str();
  }
};

} // end namespace llvm

#endif // LLVM_SUPPORT_LOCKFILEMANAGER_H
PKhwFZ�kzzSupport/Regex.hnu�[���//===-- Regex.h - Regular Expression matcher implementation -*- C++ -*-----===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements a POSIX regular expression matcher.  Both Basic and
// Extended POSIX regular expressions (ERE) are supported.  EREs were extended
// to support backreferences in matches.
// This implementation also supports matching strings with embedded NUL chars.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_REGEX_H
#define LLVM_SUPPORT_REGEX_H

#include "llvm/ADT/BitmaskEnum.h"
#include <string>

struct llvm_regex;

namespace llvm {
  class StringRef;
  template<typename T> class SmallVectorImpl;

  class Regex {
  public:
    enum RegexFlags : unsigned {
      NoFlags = 0,
      /// Compile for matching that ignores upper/lower case distinctions.
      IgnoreCase = 1,
      /// Compile for newline-sensitive matching. With this flag '[^' bracket
      /// expressions and '.' never match newline. A ^ anchor matches the
      /// null string after any newline in the string in addition to its normal
      /// function, and the $ anchor matches the null string before any
      /// newline in the string in addition to its normal function.
      Newline = 2,
      /// By default, the POSIX extended regular expression (ERE) syntax is
      /// assumed. Pass this flag to turn on basic regular expressions (BRE)
      /// instead.
      BasicRegex = 4,

      LLVM_MARK_AS_BITMASK_ENUM(BasicRegex)
    };

    Regex();
    /// Compiles the given regular expression \p Regex.
    ///
    /// \param Regex - referenced string is no longer needed after this
    /// constructor does finish.  Only its compiled form is kept stored.
    Regex(StringRef Regex, RegexFlags Flags = NoFlags);
    Regex(StringRef Regex, unsigned Flags);
    Regex(const Regex &) = delete;
    Regex &operator=(Regex regex) {
      std::swap(preg, regex.preg);
      std::swap(error, regex.error);
      return *this;
    }
    Regex(Regex &&regex);
    ~Regex();

    /// isValid - returns the error encountered during regex compilation, if
    /// any.
    bool isValid(std::string &Error) const;
    bool isValid() const { return !error; }

    /// getNumMatches - In a valid regex, return the number of parenthesized
    /// matches it contains.  The number filled in by match will include this
    /// many entries plus one for the whole regex (as element 0).
    unsigned getNumMatches() const;

    /// matches - Match the regex against a given \p String.
    ///
    /// \param Matches - If given, on a successful match this will be filled in
    /// with references to the matched group expressions (inside \p String),
    /// the first group is always the entire pattern.
    ///
    /// \param Error - If non-null, any errors in the matching will be recorded
    /// as a non-empty string. If there is no error, it will be an empty string.
    ///
    /// This returns true on a successful match.
    bool match(StringRef String, SmallVectorImpl<StringRef> *Matches = nullptr,
               std::string *Error = nullptr) const;

    /// sub - Return the result of replacing the first match of the regex in
    /// \p String with the \p Repl string. Backreferences like "\0" in the
    /// replacement string are replaced with the appropriate match substring.
    ///
    /// Note that the replacement string has backslash escaping performed on
    /// it. Invalid backreferences are ignored (replaced by empty strings).
    ///
    /// \param Error If non-null, any errors in the substitution (invalid
    /// backreferences, trailing backslashes) will be recorded as a non-empty
    /// string. If there is no error, it will be an empty string.
    std::string sub(StringRef Repl, StringRef String,
                    std::string *Error = nullptr) const;

    /// If this function returns true, ^Str$ is an extended regular
    /// expression that matches Str and only Str.
    static bool isLiteralERE(StringRef Str);

    /// Turn String into a regex by escaping its special characters.
    static std::string escape(StringRef String);

  private:
    struct llvm_regex *preg;
    int error;
  };
}

#endif // LLVM_SUPPORT_REGEX_H
PKhwFZ�W�>ZZSupport/VersionTuple.hnu�[���//===- VersionTuple.h - Version Number Handling -----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// Defines the llvm::VersionTuple class, which represents a version in
/// the form major[.minor[.subminor]].
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_SUPPORT_VERSIONTUPLE_H
#define LLVM_SUPPORT_VERSIONTUPLE_H

#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/ADT/Hashing.h"
#include "llvm/Support/Endian.h"
#include <optional>
#include <string>
#include <tuple>

namespace llvm {
template <typename HasherT, support::endianness Endianness>
class HashBuilderImpl;
class raw_ostream;
class StringRef;

/// Represents a version number in the form major[.minor[.subminor[.build]]].
class VersionTuple {
  unsigned Major : 32;

  unsigned Minor : 31;
  unsigned HasMinor : 1;

  unsigned Subminor : 31;
  unsigned HasSubminor : 1;

  unsigned Build : 31;
  unsigned HasBuild : 1;

public:
  constexpr VersionTuple()
      : Major(0), Minor(0), HasMinor(false), Subminor(0), HasSubminor(false),
        Build(0), HasBuild(false) {}

  explicit constexpr VersionTuple(unsigned Major)
      : Major(Major), Minor(0), HasMinor(false), Subminor(0),
        HasSubminor(false), Build(0), HasBuild(false) {}

  explicit constexpr VersionTuple(unsigned Major, unsigned Minor)
      : Major(Major), Minor(Minor), HasMinor(true), Subminor(0),
        HasSubminor(false), Build(0), HasBuild(false) {}

  explicit constexpr VersionTuple(unsigned Major, unsigned Minor,
                                  unsigned Subminor)
      : Major(Major), Minor(Minor), HasMinor(true), Subminor(Subminor),
        HasSubminor(true), Build(0), HasBuild(false) {}

  explicit constexpr VersionTuple(unsigned Major, unsigned Minor,
                                  unsigned Subminor, unsigned Build)
      : Major(Major), Minor(Minor), HasMinor(true), Subminor(Subminor),
        HasSubminor(true), Build(Build), HasBuild(true) {}

  /// Determine whether this version information is empty
  /// (e.g., all version components are zero).
  bool empty() const {
    return Major == 0 && Minor == 0 && Subminor == 0 && Build == 0;
  }

  /// Retrieve the major version number.
  unsigned getMajor() const { return Major; }

  /// Retrieve the minor version number, if provided.
  std::optional<unsigned> getMinor() const {
    if (!HasMinor)
      return std::nullopt;
    return Minor;
  }

  /// Retrieve the subminor version number, if provided.
  std::optional<unsigned> getSubminor() const {
    if (!HasSubminor)
      return std::nullopt;
    return Subminor;
  }

  /// Retrieve the build version number, if provided.
  std::optional<unsigned> getBuild() const {
    if (!HasBuild)
      return std::nullopt;
    return Build;
  }

  /// Return a version tuple that contains only the first 3 version components.
  VersionTuple withoutBuild() const {
    if (HasBuild)
      return VersionTuple(Major, Minor, Subminor);
    return *this;
  }

  /// Return a version tuple that contains a different major version but
  /// everything else is the same.
  VersionTuple withMajorReplaced(unsigned NewMajor) const {
    return VersionTuple(NewMajor, Minor, Subminor, Build);
  }

  /// Return a version tuple that contains only components that are non-zero.
  VersionTuple normalize() const {
    VersionTuple Result = *this;
    if (Result.Build == 0) {
      Result.HasBuild = false;
      if (Result.Subminor == 0) {
        Result.HasSubminor = false;
        if (Result.Minor == 0)
          Result.HasMinor = false;
      }
    }
    return Result;
  }

  /// Determine if two version numbers are equivalent. If not
  /// provided, minor and subminor version numbers are considered to be zero.
  friend bool operator==(const VersionTuple &X, const VersionTuple &Y) {
    return X.Major == Y.Major && X.Minor == Y.Minor &&
           X.Subminor == Y.Subminor && X.Build == Y.Build;
  }

  /// Determine if two version numbers are not equivalent.
  ///
  /// If not provided, minor and subminor version numbers are considered to be
  /// zero.
  friend bool operator!=(const VersionTuple &X, const VersionTuple &Y) {
    return !(X == Y);
  }

  /// Determine whether one version number precedes another.
  ///
  /// If not provided, minor and subminor version numbers are considered to be
  /// zero.
  friend bool operator<(const VersionTuple &X, const VersionTuple &Y) {
    return std::tie(X.Major, X.Minor, X.Subminor, X.Build) <
           std::tie(Y.Major, Y.Minor, Y.Subminor, Y.Build);
  }

  /// Determine whether one version number follows another.
  ///
  /// If not provided, minor and subminor version numbers are considered to be
  /// zero.
  friend bool operator>(const VersionTuple &X, const VersionTuple &Y) {
    return Y < X;
  }

  /// Determine whether one version number precedes or is
  /// equivalent to another.
  ///
  /// If not provided, minor and subminor version numbers are considered to be
  /// zero.
  friend bool operator<=(const VersionTuple &X, const VersionTuple &Y) {
    return !(Y < X);
  }

  /// Determine whether one version number follows or is
  /// equivalent to another.
  ///
  /// If not provided, minor and subminor version numbers are considered to be
  /// zero.
  friend bool operator>=(const VersionTuple &X, const VersionTuple &Y) {
    return !(X < Y);
  }

  friend hash_code hash_value(const VersionTuple &VT) {
    return hash_combine(VT.Major, VT.Minor, VT.Subminor, VT.Build);
  }

  template <typename HasherT, llvm::support::endianness Endianness>
  friend void addHash(HashBuilderImpl<HasherT, Endianness> &HBuilder,
                      const VersionTuple &VT) {
    HBuilder.add(VT.Major, VT.Minor, VT.Subminor, VT.Build);
  }

  /// Retrieve a string representation of the version number.
  std::string getAsString() const;

  /// Try to parse the given string as a version number.
  /// \returns \c true if the string does not match the regular expression
  ///   [0-9]+(\.[0-9]+){0,3}
  bool tryParse(StringRef string);
};

/// Print a version number.
raw_ostream &operator<<(raw_ostream &Out, const VersionTuple &V);

// Provide DenseMapInfo for version tuples.
template <> struct DenseMapInfo<VersionTuple> {
  static inline VersionTuple getEmptyKey() { return VersionTuple(0x7FFFFFFF); }
  static inline VersionTuple getTombstoneKey() {
    return VersionTuple(0x7FFFFFFE);
  }
  static unsigned getHashValue(const VersionTuple &Value) {
    unsigned Result = Value.getMajor();
    if (auto Minor = Value.getMinor())
      Result = detail::combineHashValue(Result, *Minor);
    if (auto Subminor = Value.getSubminor())
      Result = detail::combineHashValue(Result, *Subminor);
    if (auto Build = Value.getBuild())
      Result = detail::combineHashValue(Result, *Build);

    return Result;
  }

  static bool isEqual(const VersionTuple &LHS, const VersionTuple &RHS) {
    return LHS == RHS;
  }
};

} // end namespace llvm
#endif // LLVM_SUPPORT_VERSIONTUPLE_H
PKhwFZ�_`��Support/Registry.hnu�[���//=== Registry.h - Linker-supported plugin registries -----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Defines a registry template for discovering pluggable modules.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_REGISTRY_H
#define LLVM_SUPPORT_REGISTRY_H

#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/DynamicLibrary.h"
#include <memory>

namespace llvm {
  /// A simple registry entry which provides only a name, description, and
  /// no-argument constructor.
  template <typename T>
  class SimpleRegistryEntry {
    StringRef Name, Desc;
    std::unique_ptr<T> (*Ctor)();

  public:
    SimpleRegistryEntry(StringRef N, StringRef D, std::unique_ptr<T> (*C)())
        : Name(N), Desc(D), Ctor(C) {}

    StringRef getName() const { return Name; }
    StringRef getDesc() const { return Desc; }
    std::unique_ptr<T> instantiate() const { return Ctor(); }
  };

  /// A global registry used in conjunction with static constructors to make
  /// pluggable components (like targets or garbage collectors) "just work" when
  /// linked with an executable.
  template <typename T>
  class Registry {
  public:
    typedef T type;
    typedef SimpleRegistryEntry<T> entry;

    class node;
    class iterator;

  private:
    Registry() = delete;

    friend class node;
    static node *Head, *Tail;

  public:
    /// Node in linked list of entries.
    ///
    class node {
      friend class iterator;
      friend Registry<T>;

      node *Next;
      const entry& Val;

    public:
      node(const entry &V) : Next(nullptr), Val(V) {}
    };

    /// Add a node to the Registry: this is the interface between the plugin and
    /// the executable.
    ///
    /// This function is exported by the executable and called by the plugin to
    /// add a node to the executable's registry. Therefore it's not defined here
    /// to avoid it being instantiated in the plugin and is instead defined in
    /// the executable (see LLVM_INSTANTIATE_REGISTRY below).
    static void add_node(node *N);

    /// Iterators for registry entries.
    ///
    class iterator
        : public llvm::iterator_facade_base<iterator, std::forward_iterator_tag,
                                            const entry> {
      const node *Cur;

    public:
      explicit iterator(const node *N) : Cur(N) {}

      bool operator==(const iterator &That) const { return Cur == That.Cur; }
      iterator &operator++() { Cur = Cur->Next; return *this; }
      const entry &operator*() const { return Cur->Val; }
    };

    // begin is not defined here in order to avoid usage of an undefined static
    // data member, instead it's instantiated by LLVM_INSTANTIATE_REGISTRY.
    static iterator begin();
    static iterator end()   { return iterator(nullptr); }

    static iterator_range<iterator> entries() {
      return make_range(begin(), end());
    }

    /// A static registration template. Use like such:
    ///
    ///   Registry<Collector>::Add<FancyGC>
    ///   X("fancy-gc", "Newfangled garbage collector.");
    ///
    /// Use of this template requires that:
    ///
    ///  1. The registered subclass has a default constructor.
    template <typename V>
    class Add {
      entry Entry;
      node Node;

      static std::unique_ptr<T> CtorFn() { return std::make_unique<V>(); }

    public:
      Add(StringRef Name, StringRef Desc)
          : Entry(Name, Desc, CtorFn), Node(Entry) {
        add_node(&Node);
      }
    };
  };
} // end namespace llvm

/// Instantiate a registry class.
///
/// This provides template definitions of add_node, begin, and the Head and Tail
/// pointers, then explicitly instantiates them. We could explicitly specialize
/// them, instead of the two-step process of define then instantiate, but
/// strictly speaking that's not allowed by the C++ standard (we would need to
/// have explicit specialization declarations in all translation units where the
/// specialization is used) so we don't.
#define LLVM_INSTANTIATE_REGISTRY(REGISTRY_CLASS) \
  namespace llvm { \
  template<typename T> typename Registry<T>::node *Registry<T>::Head = nullptr;\
  template<typename T> typename Registry<T>::node *Registry<T>::Tail = nullptr;\
  template<typename T> \
  void Registry<T>::add_node(typename Registry<T>::node *N) { \
    if (Tail) \
      Tail->Next = N; \
    else \
      Head = N; \
    Tail = N; \
  } \
  template<typename T> typename Registry<T>::iterator Registry<T>::begin() { \
    return iterator(Head); \
  } \
  template REGISTRY_CLASS::node *Registry<REGISTRY_CLASS::type>::Head; \
  template REGISTRY_CLASS::node *Registry<REGISTRY_CLASS::type>::Tail; \
  template \
  void Registry<REGISTRY_CLASS::type>::add_node(REGISTRY_CLASS::node*); \
  template REGISTRY_CLASS::iterator Registry<REGISTRY_CLASS::type>::begin(); \
  }

#endif // LLVM_SUPPORT_REGISTRY_H
PKhwFZ�e��bbSupport/AArch64TargetParser.hnu�[���//===-- llvm/Support/AArch64TargetParser.h ----------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This header is deprecated in favour of
/// `llvm/TargetParser/AArch64TargetParser.h`.
///
//===----------------------------------------------------------------------===//

#include "llvm/TargetParser/AArch64TargetParser.h"
PKhwFZTX ���Support/SpecialCaseList.hnu�[���//===-- SpecialCaseList.h - special case list for sanitizers ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//===----------------------------------------------------------------------===//
//
// This is a utility class used to parse user-provided text files with
// "special case lists" for code sanitizers. Such files are used to
// define an "ABI list" for DataFlowSanitizer and allow/exclusion lists for
// sanitizers like AddressSanitizer or UndefinedBehaviorSanitizer.
//
// Empty lines and lines starting with "#" are ignored. Sections are defined
// using a '[section_name]' header and can be used to specify sanitizers the
// entries below it apply to. Section names are regular expressions, and
// entries without a section header match all sections (e.g. an '[*]' header
// is assumed.)
// The remaining lines should have the form:
//   prefix:wildcard_expression[=category]
// If category is not specified, it is assumed to be empty string.
// Definitions of "prefix" and "category" are sanitizer-specific. For example,
// sanitizer exclusion support prefixes "src", "mainfile", "fun" and "global".
// Wildcard expressions define, respectively, source files, main files,
// functions or globals which shouldn't be instrumented.
// Examples of categories:
//   "functional": used in DFSan to list functions with pure functional
//                 semantics.
//   "init": used in ASan exclusion list to disable initialization-order bugs
//           detection for certain globals or source files.
// Full special case list file example:
// ---
// [address]
// # Excluded items:
// fun:*_ZN4base6subtle*
// global:*global_with_bad_access_or_initialization*
// global:*global_with_initialization_issues*=init
// type:*Namespace::ClassName*=init
// src:file_with_tricky_code.cc
// src:ignore-global-initializers-issues.cc=init
// mainfile:main_file.cc
//
// [dataflow]
// # Functions with pure functional semantics:
// fun:cos=functional
// fun:sin=functional
// ---
// Note that the wild card is in fact an llvm::Regex, but * is automatically
// replaced with .*
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_SPECIALCASELIST_H
#define LLVM_SUPPORT_SPECIALCASELIST_H

#include "llvm/ADT/StringMap.h"
#include "llvm/Support/Regex.h"
#include <memory>
#include <string>
#include <vector>

namespace llvm {
class MemoryBuffer;
class StringRef;

namespace vfs {
class FileSystem;
}

class SpecialCaseList {
public:
  /// Parses the special case list entries from files. On failure, returns
  /// 0 and writes an error message to string.
  static std::unique_ptr<SpecialCaseList>
  create(const std::vector<std::string> &Paths, llvm::vfs::FileSystem &FS,
         std::string &Error);
  /// Parses the special case list from a memory buffer. On failure, returns
  /// 0 and writes an error message to string.
  static std::unique_ptr<SpecialCaseList> create(const MemoryBuffer *MB,
                                                 std::string &Error);
  /// Parses the special case list entries from files. On failure, reports a
  /// fatal error.
  static std::unique_ptr<SpecialCaseList>
  createOrDie(const std::vector<std::string> &Paths, llvm::vfs::FileSystem &FS);

  ~SpecialCaseList();

  /// Returns true, if special case list contains a line
  /// \code
  ///   @Prefix:<E>=@Category
  /// \endcode
  /// where @Query satisfies wildcard expression <E> in a given @Section.
  bool inSection(StringRef Section, StringRef Prefix, StringRef Query,
                 StringRef Category = StringRef()) const;

  /// Returns the line number corresponding to the special case list entry if
  /// the special case list contains a line
  /// \code
  ///   @Prefix:<E>=@Category
  /// \endcode
  /// where @Query satisfies wildcard expression <E> in a given @Section.
  /// Returns zero if there is no exclusion entry corresponding to this
  /// expression.
  unsigned inSectionBlame(StringRef Section, StringRef Prefix, StringRef Query,
                          StringRef Category = StringRef()) const;

protected:
  // Implementations of the create*() functions that can also be used by derived
  // classes.
  bool createInternal(const std::vector<std::string> &Paths,
                      vfs::FileSystem &VFS, std::string &Error);
  bool createInternal(const MemoryBuffer *MB, std::string &Error);

  SpecialCaseList() = default;
  SpecialCaseList(SpecialCaseList const &) = delete;
  SpecialCaseList &operator=(SpecialCaseList const &) = delete;

  /// Represents a set of regular expressions.  Regular expressions which are
  /// "literal" (i.e. no regex metacharacters) are stored in Strings.  The
  /// reason for doing so is efficiency; StringMap is much faster at matching
  /// literal strings than Regex.
  class Matcher {
  public:
    bool insert(std::string Regexp, unsigned LineNumber, std::string &REError);
    // Returns the line number in the source file that this query matches to.
    // Returns zero if no match is found.
    unsigned match(StringRef Query) const;

  private:
    StringMap<unsigned> Strings;
    std::vector<std::pair<std::unique_ptr<Regex>, unsigned>> RegExes;
  };

  using SectionEntries = StringMap<StringMap<Matcher>>;

  struct Section {
    Section(std::unique_ptr<Matcher> M) : SectionMatcher(std::move(M)){};

    std::unique_ptr<Matcher> SectionMatcher;
    SectionEntries Entries;
  };

  std::vector<Section> Sections;

  /// Parses just-constructed SpecialCaseList entries from a memory buffer.
  bool parse(const MemoryBuffer *MB, StringMap<size_t> &SectionsMap,
             std::string &Error);

  // Helper method for derived classes to search by Prefix, Query, and Category
  // once they have already resolved a section entry.
  unsigned inSectionBlame(const SectionEntries &Entries, StringRef Prefix,
                          StringRef Query, StringRef Category) const;
};

}  // namespace llvm

#endif // LLVM_SUPPORT_SPECIALCASELIST_H
PKhwFZ�+�$$Support/raw_os_ostream.hnu�[���//===- raw_os_ostream.h - std::ostream adaptor for raw_ostream --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
//  This file defines the raw_os_ostream class.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_RAW_OS_OSTREAM_H
#define LLVM_SUPPORT_RAW_OS_OSTREAM_H

#include "llvm/Support/raw_ostream.h"
#include <iosfwd>

namespace llvm {

/// raw_os_ostream - A raw_ostream that writes to an std::ostream.  This is a
/// simple adaptor class.  It does not check for output errors; clients should
/// use the underlying stream to detect errors.
class raw_os_ostream : public raw_ostream {
  std::ostream &OS;

  /// write_impl - See raw_ostream::write_impl.
  void write_impl(const char *Ptr, size_t Size) override;

  /// current_pos - Return the current position within the stream, not
  /// counting the bytes currently in the buffer.
  uint64_t current_pos() const override;

public:
  raw_os_ostream(std::ostream &O) : OS(O) {}
  ~raw_os_ostream() override;
};

} // end llvm namespace

#endif
PKhwFZ|��-�$�$Support/Timer.hnu�[���//===-- llvm/Support/Timer.h - Interval Timing Support ----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_TIMER_H
#define LLVM_SUPPORT_TIMER_H

#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/DataTypes.h"
#include <cassert>
#include <memory>
#include <string>
#include <vector>

namespace llvm {

class TimerGroup;
class raw_ostream;

class TimeRecord {
  double WallTime = 0.0;             ///< Wall clock time elapsed in seconds.
  double UserTime = 0.0;             ///< User time elapsed.
  double SystemTime = 0.0;           ///< System time elapsed.
  ssize_t MemUsed = 0;               ///< Memory allocated (in bytes).
  uint64_t InstructionsExecuted = 0; ///< Number of instructions executed
public:
  TimeRecord() = default;

  /// Get the current time and memory usage.  If Start is true we get the memory
  /// usage before the time, otherwise we get time before memory usage.  This
  /// matters if the time to get the memory usage is significant and shouldn't
  /// be counted as part of a duration.
  static TimeRecord getCurrentTime(bool Start = true);

  double getProcessTime() const { return UserTime + SystemTime; }
  double getUserTime() const { return UserTime; }
  double getSystemTime() const { return SystemTime; }
  double getWallTime() const { return WallTime; }
  ssize_t getMemUsed() const { return MemUsed; }
  uint64_t getInstructionsExecuted() const { return InstructionsExecuted; }

  bool operator<(const TimeRecord &T) const {
    // Sort by Wall Time elapsed, as it is the only thing really accurate
    return WallTime < T.WallTime;
  }

  void operator+=(const TimeRecord &RHS) {
    WallTime += RHS.WallTime;
    UserTime += RHS.UserTime;
    SystemTime += RHS.SystemTime;
    MemUsed += RHS.MemUsed;
    InstructionsExecuted += RHS.InstructionsExecuted;
  }
  void operator-=(const TimeRecord &RHS) {
    WallTime -= RHS.WallTime;
    UserTime -= RHS.UserTime;
    SystemTime -= RHS.SystemTime;
    MemUsed -= RHS.MemUsed;
    InstructionsExecuted -= RHS.InstructionsExecuted;
  }

  /// Print the current time record to \p OS, with a breakdown showing
  /// contributions to the \p Total time record.
  void print(const TimeRecord &Total, raw_ostream &OS) const;
};

/// This class is used to track the amount of time spent between invocations of
/// its startTimer()/stopTimer() methods.  Given appropriate OS support it can
/// also keep track of the RSS of the program at various points.  By default,
/// the Timer will print the amount of time it has captured to standard error
/// when the last timer is destroyed, otherwise it is printed when its
/// TimerGroup is destroyed.  Timers do not print their information if they are
/// never started.
class Timer {
  TimeRecord Time;          ///< The total time captured.
  TimeRecord StartTime;     ///< The time startTimer() was last called.
  std::string Name;         ///< The name of this time variable.
  std::string Description;  ///< Description of this time variable.
  bool Running = false;     ///< Is the timer currently running?
  bool Triggered = false;   ///< Has the timer ever been triggered?
  TimerGroup *TG = nullptr; ///< The TimerGroup this Timer is in.

  Timer **Prev = nullptr;   ///< Pointer to \p Next of previous timer in group.
  Timer *Next = nullptr;    ///< Next timer in the group.
public:
  explicit Timer(StringRef TimerName, StringRef TimerDescription) {
    init(TimerName, TimerDescription);
  }
  Timer(StringRef TimerName, StringRef TimerDescription, TimerGroup &tg) {
    init(TimerName, TimerDescription, tg);
  }
  Timer(const Timer &RHS) {
    assert(!RHS.TG && "Can only copy uninitialized timers");
  }
  const Timer &operator=(const Timer &T) {
    assert(!TG && !T.TG && "Can only assign uninit timers");
    return *this;
  }
  ~Timer();

  /// Create an uninitialized timer, client must use 'init'.
  explicit Timer() = default;
  void init(StringRef TimerName, StringRef TimerDescription);
  void init(StringRef TimerName, StringRef TimerDescription, TimerGroup &tg);

  const std::string &getName() const { return Name; }
  const std::string &getDescription() const { return Description; }
  bool isInitialized() const { return TG != nullptr; }

  /// Check if the timer is currently running.
  bool isRunning() const { return Running; }

  /// Check if startTimer() has ever been called on this timer.
  bool hasTriggered() const { return Triggered; }

  /// Start the timer running.  Time between calls to startTimer/stopTimer is
  /// counted by the Timer class.  Note that these calls must be correctly
  /// paired.
  void startTimer();

  /// Stop the timer.
  void stopTimer();

  /// Clear the timer state.
  void clear();

  /// Return the duration for which this timer has been running.
  TimeRecord getTotalTime() const { return Time; }

private:
  friend class TimerGroup;
};

/// The TimeRegion class is used as a helper class to call the startTimer() and
/// stopTimer() methods of the Timer class.  When the object is constructed, it
/// starts the timer specified as its argument.  When it is destroyed, it stops
/// the relevant timer.  This makes it easy to time a region of code.
class TimeRegion {
  Timer *T;
  TimeRegion(const TimeRegion &) = delete;

public:
  explicit TimeRegion(Timer &t) : T(&t) {
    T->startTimer();
  }
  explicit TimeRegion(Timer *t) : T(t) {
    if (T) T->startTimer();
  }
  ~TimeRegion() {
    if (T) T->stopTimer();
  }
};

/// This class is basically a combination of TimeRegion and Timer.  It allows
/// you to declare a new timer, AND specify the region to time, all in one
/// statement.  All timers with the same name are merged.  This is primarily
/// used for debugging and for hunting performance problems.
struct NamedRegionTimer : public TimeRegion {
  explicit NamedRegionTimer(StringRef Name, StringRef Description,
                            StringRef GroupName,
                            StringRef GroupDescription, bool Enabled = true);
};

/// The TimerGroup class is used to group together related timers into a single
/// report that is printed when the TimerGroup is destroyed.  It is illegal to
/// destroy a TimerGroup object before all of the Timers in it are gone.  A
/// TimerGroup can be specified for a newly created timer in its constructor.
class TimerGroup {
  struct PrintRecord {
    TimeRecord Time;
    std::string Name;
    std::string Description;

    PrintRecord(const PrintRecord &Other) = default;
    PrintRecord &operator=(const PrintRecord &Other) = default;
    PrintRecord(const TimeRecord &Time, const std::string &Name,
                const std::string &Description)
      : Time(Time), Name(Name), Description(Description) {}

    bool operator <(const PrintRecord &Other) const {
      return Time < Other.Time;
    }
  };
  std::string Name;
  std::string Description;
  Timer *FirstTimer = nullptr; ///< First timer in the group.
  std::vector<PrintRecord> TimersToPrint;

  TimerGroup **Prev; ///< Pointer to Next field of previous timergroup in list.
  TimerGroup *Next;  ///< Pointer to next timergroup in list.
  TimerGroup(const TimerGroup &TG) = delete;
  void operator=(const TimerGroup &TG) = delete;

public:
  explicit TimerGroup(StringRef Name, StringRef Description);

  explicit TimerGroup(StringRef Name, StringRef Description,
                      const StringMap<TimeRecord> &Records);

  ~TimerGroup();

  void setName(StringRef NewName, StringRef NewDescription) {
    Name.assign(NewName.begin(), NewName.end());
    Description.assign(NewDescription.begin(), NewDescription.end());
  }

  /// Print any started timers in this group, optionally resetting timers after
  /// printing them.
  void print(raw_ostream &OS, bool ResetAfterPrint = false);

  /// Clear all timers in this group.
  void clear();

  /// This static method prints all timers.
  static void printAll(raw_ostream &OS);

  /// Clear out all timers. This is mostly used to disable automatic
  /// printing on shutdown, when timers have already been printed explicitly
  /// using \c printAll or \c printJSONValues.
  static void clearAll();

  const char *printJSONValues(raw_ostream &OS, const char *delim);

  /// Prints all timers as JSON key/value pairs.
  static const char *printAllJSONValues(raw_ostream &OS, const char *delim);

  /// Ensure global objects required for statistics printing are initialized.
  /// This function is used by the Statistic code to ensure correct order of
  /// global constructors and destructors.
  static void constructForStatistics();

  /// This makes the default group unmanaged, and lets the user manage the
  /// group's lifetime.
  static std::unique_ptr<TimerGroup> aquireDefaultGroup();

private:
  friend class Timer;
  friend void PrintStatisticsJSON(raw_ostream &OS);
  void addTimer(Timer &T);
  void removeTimer(Timer &T);
  void prepareToPrintList(bool reset_time = false);
  void PrintQueuedTimers(raw_ostream &OS);
  void printJSONValue(raw_ostream &OS, const PrintRecord &R,
                      const char *suffix, double Value);
};

} // end namespace llvm

#endif
PKhwFZŗ��11Support/ELFAttributes.hnu�[���//===-- ELFAttributes.h - ELF Attributes ------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_ELFATTRIBUTES_H
#define LLVM_SUPPORT_ELFATTRIBUTES_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
#include <optional>

namespace llvm {

struct TagNameItem {
  unsigned attr;
  StringRef tagName;
};

using TagNameMap = ArrayRef<TagNameItem>;

namespace ELFAttrs {

enum AttrType : unsigned { File = 1, Section = 2, Symbol = 3 };

StringRef attrTypeAsString(unsigned attr, TagNameMap tagNameMap,
                           bool hasTagPrefix = true);
std::optional<unsigned> attrTypeFromString(StringRef tag, TagNameMap tagNameMap);

// Magic numbers for ELF attributes.
enum AttrMagic { Format_Version = 0x41 };

} // namespace ELFAttrs
} // namespace llvm
#endif
PKhwFZ;�L��k�kSupport/CSKYTargetParser.defnu�[���//===- CSKYTargetParser.def - CSKY target parsing defines -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file provides defines to build up the CSKY target parser's logic.
//
//===----------------------------------------------------------------------===//

// NOTE: NO INCLUDE GUARD DESIRED!

#ifndef CSKY_FPU
#define CSKY_FPU(NAME, KIND, VERSION)
#endif
CSKY_FPU("invalid", FK_INVALID, FPUVersion::NONE)
CSKY_FPU("auto", FK_AUTO, FPUVersion::FPV2)
CSKY_FPU("fpv2", FK_FPV2, FPUVersion::FPV2)
CSKY_FPU("fpv2_divd", FK_FPV2_DIVD, FPUVersion::FPV2)
CSKY_FPU("fpv2_sf", FK_FPV2_SF, FPUVersion::FPV2)
CSKY_FPU("fpv3", FK_FPV3, FPUVersion::FPV3)
CSKY_FPU("fpv3_hf", FK_FPV3_HF, FPUVersion::FPV3)
CSKY_FPU("fpv3_hsf", FK_FPV3_HSF, FPUVersion::FPV3)
CSKY_FPU("fpv3_sdf", FK_FPV3_SDF, FPUVersion::FPV3)

#undef CSKY_FPU

#ifndef CSKY_ARCH
#define CSKY_ARCH(NAME, ID, ARCH_BASE_EXT)
#endif
CSKY_ARCH("invalid", INVALID, CSKY::AEK_INVALID)
CSKY_ARCH("ck801", CK801, CSKY::MAEK_E1 | CSKY::AEK_TRUST)
CSKY_ARCH("ck802", CK802, CSKY::MAEK_E2 | CSKY::AEK_TRUST | CSKY::AEK_NVIC)
CSKY_ARCH("ck803", CK803,
          CSKY::MAEK_2E3 | CSKY::AEK_MP | CSKY::AEK_TRUST | CSKY::AEK_NVIC |
              CSKY::AEK_HWDIV)
CSKY_ARCH("ck803s", CK803S,
          CSKY::MAEK_2E3 | CSKY::AEK_MP | CSKY::AEK_TRUST | CSKY::AEK_NVIC |
              CSKY::AEK_HWDIV)
CSKY_ARCH("ck804", CK804,
          CSKY::MAEK_2E3 | CSKY::AEK_MP | CSKY::AEK_TRUST | CSKY::AEK_NVIC |
              CSKY::AEK_HWDIV | CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3)
CSKY_ARCH("ck805", CK805,
          CSKY::MAEK_2E3 | CSKY::AEK_MP | CSKY::AEK_TRUST | CSKY::AEK_NVIC |
              CSKY::AEK_HWDIV | CSKY::AEK_HIGHREG | CSKY::MAEK_3E3R2 |
              CSKY::AEK_3E3R3 | CSKY::AEK_VDSPV2 | CSKY::AEK_VDSP2E3)
CSKY_ARCH("ck807", CK807,
          CSKY::MAEK_3E7 | CSKY::MAEK_MP | CSKY::MAEK_MP1E2 | CSKY::AEK_TRUST |
              CSKY::AEK_HWDIV | CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 |
              CSKY::AEK_DSPE60 | CSKY::AEK_HIGHREG | CSKY::AEK_HARDTP |
              CSKY::AEK_NVIC | CSKY::AEK_CACHE)
CSKY_ARCH("ck810", CK810,
          CSKY::MAEK_7E10 | CSKY::MAEK_MP | CSKY::MAEK_MP1E2 | CSKY::AEK_TRUST |
              CSKY::AEK_HWDIV | CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 |
              CSKY::AEK_DSPE60 | CSKY::AEK_HIGHREG | CSKY::AEK_HARDTP |
              CSKY::AEK_NVIC | CSKY::AEK_CACHE)
CSKY_ARCH("ck810v", CK810V,
          CSKY::MAEK_7E10 | CSKY::MAEK_MP | CSKY::MAEK_MP1E2 | CSKY::AEK_TRUST |
              CSKY::AEK_HWDIV | CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 |
              CSKY::AEK_DSPE60 | CSKY::AEK_HIGHREG | CSKY::AEK_HARDTP |
              CSKY::AEK_NVIC | CSKY::AEK_CACHE | CSKY::AEK_VDSPV1)
CSKY_ARCH("ck860", CK860,
          CSKY::MAEK_10E60 | CSKY::MAEK_MP | CSKY::MAEK_MP1E2 |
              CSKY::AEK_TRUST | CSKY::AEK_HWDIV | CSKY::AEK_DSPE60 |
              CSKY::AEK_HIGHREG | CSKY::AEK_HARDTP | CSKY::AEK_NVIC |
              CSKY::AEK_CACHE | CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3)
CSKY_ARCH("ck860v", CK860V,
          CSKY::MAEK_10E60 | CSKY::MAEK_MP | CSKY::MAEK_MP1E2 |
              CSKY::AEK_TRUST | CSKY::AEK_HWDIV | CSKY::AEK_DSPE60 |
              CSKY::AEK_HIGHREG | CSKY::AEK_HARDTP | CSKY::AEK_NVIC |
              CSKY::AEK_CACHE | CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3 |
              CSKY::AEK_VDSPV2 | CSKY::AEK_VDSP2E60F)
#undef CSKY_ARCH

#ifndef CSKY_ARCH_EXT_NAME
#define CSKY_ARCH_EXT_NAME(NAME, ID, FEATURE, NEGFEATURE)
#endif
CSKY_ARCH_EXT_NAME("invalid", CSKY::AEK_INVALID, nullptr, nullptr)
CSKY_ARCH_EXT_NAME("none", CSKY::AEK_NONE, nullptr, nullptr)
CSKY_ARCH_EXT_NAME("fpuv2_sf", CSKY::AEK_FPUV2SF, "+fpuv2_sf", "-fpuv2_sf")
CSKY_ARCH_EXT_NAME("fpuv2_df", CSKY::AEK_FPUV2DF, "+fpuv2_df", "-fpuv2_df")
CSKY_ARCH_EXT_NAME("fdivdu", CSKY::AEK_FDIVDU, "+fdivdu", "-fdivdu")
CSKY_ARCH_EXT_NAME("fpuv3_hi", CSKY::AEK_FPUV3HI, "+fpuv3_hi", "-fpuv3_hi")
CSKY_ARCH_EXT_NAME("fpuv3_hf", CSKY::AEK_FPUV3HF, "+fpuv3_hf", "-fpuv3_hf")
CSKY_ARCH_EXT_NAME("fpuv3_sf", CSKY::AEK_FPUV3SF, "+fpuv3_sf", "-fpuv3_sf")
CSKY_ARCH_EXT_NAME("fpuv3_df", CSKY::AEK_FPUV3DF, "+fpuv3_df", "-fpuv3_df")
CSKY_ARCH_EXT_NAME("floate1", CSKY::AEK_FLOATE1, "+floate1", "-floate1")
CSKY_ARCH_EXT_NAME("float1e2", CSKY::AEK_FLOAT1E2, "+float1e2", "-float1e2")
CSKY_ARCH_EXT_NAME("float1e3", CSKY::AEK_FLOAT1E3, "+float1e3", "-float1e3")
CSKY_ARCH_EXT_NAME("float3e4", CSKY::AEK_FLOAT3E4, "+float3e4", "-float3e4")
CSKY_ARCH_EXT_NAME("float7e60", CSKY::AEK_FLOAT7E60, "+float7e60", "-float7e60")
CSKY_ARCH_EXT_NAME("hwdiv", CSKY::AEK_HWDIV, "+hwdiv", "-hwdiv")
CSKY_ARCH_EXT_NAME("multiple_stld", CSKY::AEK_STLD, "+multiple_stld",
                   "-multiple_stld")
CSKY_ARCH_EXT_NAME("pushpop", CSKY::AEK_PUSHPOP, "+pushpop", "-pushpop")
CSKY_ARCH_EXT_NAME("edsp", CSKY::AEK_EDSP, "+edsp", "-edsp")
CSKY_ARCH_EXT_NAME("dsp1e2", CSKY::AEK_DSP1E2, "+dsp1e2", "-dsp1e2")
CSKY_ARCH_EXT_NAME("dspe60", CSKY::AEK_DSPE60, "+dspe60", "-dspe60")
CSKY_ARCH_EXT_NAME("dspv2", CSKY::AEK_DSPV2, "+dspv2", "-dspv2")
CSKY_ARCH_EXT_NAME("dsp_silan", CSKY::AEK_DSPSILAN, "+dsp_silan", "-dsp_silan")
CSKY_ARCH_EXT_NAME("elrw", CSKY::AEK_ELRW, "+elrw", "-elrw")
CSKY_ARCH_EXT_NAME("trust", CSKY::AEK_TRUST, "+trust", "-trust")
CSKY_ARCH_EXT_NAME("java", CSKY::AEK_JAVA, "+java", "-java")
CSKY_ARCH_EXT_NAME("cache", CSKY::AEK_CACHE, "+cache", "-cache")
CSKY_ARCH_EXT_NAME("nvic", CSKY::AEK_NVIC, "+nvic", "-nvic")
CSKY_ARCH_EXT_NAME("doloop", CSKY::AEK_DOLOOP, "+doloop", "-doloop")
CSKY_ARCH_EXT_NAME("high-registers", CSKY::AEK_HIGHREG, "+high-registers",
                   "-high-registers")
CSKY_ARCH_EXT_NAME("smart", CSKY::AEK_SMART, "+smart", "-smart")
CSKY_ARCH_EXT_NAME("vdsp2e3", CSKY::AEK_VDSP2E3, "+vdsp2e3", "-vdsp2e3")
CSKY_ARCH_EXT_NAME("vdsp2e60f", CSKY::AEK_VDSP2E60F, "+vdsp2e60f", "-vdsp2e60f")
CSKY_ARCH_EXT_NAME("vdspv2", CSKY::AEK_VDSPV2, "+vdspv2", "-vdspv2")
CSKY_ARCH_EXT_NAME("hard-tp", CSKY::AEK_HARDTP, "+hard-tp", "-hard-tp")
CSKY_ARCH_EXT_NAME("soft-tp", CSKY::AEK_SOFTTP, "+soft-tp", "-soft-tp")
CSKY_ARCH_EXT_NAME("istack", CSKY::AEK_ISTACK, "+istack", "-istack")
CSKY_ARCH_EXT_NAME("constpool", CSKY::AEK_CONSTPOOL, "+constpool", "-constpool")
CSKY_ARCH_EXT_NAME("stack-size", CSKY::AEK_STACKSIZE, "+stack-size",
                   "-stack-size")
CSKY_ARCH_EXT_NAME("ccrt", CSKY::AEK_CCRT, "+ccrt", "-ccrt")
CSKY_ARCH_EXT_NAME("vdspv1", CSKY::AEK_VDSPV1, "+vdspv1", "-vdspv1")

CSKY_ARCH_EXT_NAME("e1", CSKY::AEK_E1, "+e1", "-e1")
CSKY_ARCH_EXT_NAME("e2", CSKY::AEK_E2, "+e2", "-e2")
CSKY_ARCH_EXT_NAME("2e3", CSKY::AEK_2E3, "+2e3", "-2e3")
CSKY_ARCH_EXT_NAME("mp", CSKY::AEK_MP, "+mp", "-mp")
CSKY_ARCH_EXT_NAME("3e3r1", CSKY::AEK_3E3R1, "+3e3r1", "-3e3r1")
CSKY_ARCH_EXT_NAME("3e3r2", CSKY::AEK_3E3R2, "+3e3r2", "-3e3r2")
CSKY_ARCH_EXT_NAME("3e3r3", CSKY::AEK_3E3R3, "+3e3r3", "-3e3r3")
CSKY_ARCH_EXT_NAME("3e7", CSKY::AEK_3E7, "+3e7", "-3e7")
CSKY_ARCH_EXT_NAME("mp1e2", CSKY::AEK_MP1E2, "+mp1e2", "-mp1e2")
CSKY_ARCH_EXT_NAME("7e10", CSKY::AEK_7E10, "+7e10", "-7e10")
CSKY_ARCH_EXT_NAME("10e60", CSKY::AEK_10E60, "+10e60", "-10e60")

#undef CSKY_ARCH_EXT_NAME

#ifndef CSKY_CPU_NAME
#define CSKY_CPU_NAME(NAME, ARCH_ID, DEFAULT_EXT)
#endif

CSKY_CPU_NAME("ck801", CK801, CSKY::AEK_NONE)
CSKY_CPU_NAME("ck801t", CK801, CSKY::AEK_NONE)
CSKY_CPU_NAME("e801", CK801, CSKY::AEK_NONE)

CSKY_CPU_NAME("ck802", CK802, CSKY::AEK_NONE)
CSKY_CPU_NAME("ck802t", CK802, CSKY::AEK_NONE)
CSKY_CPU_NAME("ck802j", CK802, CSKY::AEK_JAVA)
CSKY_CPU_NAME("e802", CK802, CSKY::AEK_NONE)
CSKY_CPU_NAME("e802t", CK802, CSKY::AEK_NONE)
CSKY_CPU_NAME("s802", CK802, CSKY::AEK_NONE)
CSKY_CPU_NAME("s802t", CK802, CSKY::AEK_NONE)

CSKY_CPU_NAME("ck803", CK803, CSKY::AEK_NONE)
CSKY_CPU_NAME("ck803h", CK803, CSKY::AEK_NONE)
CSKY_CPU_NAME("ck803t", CK803, CSKY::AEK_NONE)
CSKY_CPU_NAME("ck803ht", CK803, CSKY::AEK_NONE)
CSKY_CPU_NAME("ck803f", CK803,
              CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3)
CSKY_CPU_NAME("ck803fh", CK803,
              CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3)
CSKY_CPU_NAME("ck803e", CK803,
              CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60)
CSKY_CPU_NAME("ck803eh", CK803,
              CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60)
CSKY_CPU_NAME("ck803et", CK803,
              CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60)
CSKY_CPU_NAME("ck803eht", CK803,
              CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60)
CSKY_CPU_NAME("ck803ef", CK803,
              CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3)
CSKY_CPU_NAME("ck803efh", CK803,
              CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3)
CSKY_CPU_NAME("ck803ft", CK803,
              CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3)
CSKY_CPU_NAME("ck803eft", CK803,
              CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3)
CSKY_CPU_NAME("ck803efht", CK803,
              CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3)
CSKY_CPU_NAME("ck803r1", CK803,
              CSKY::MAEK_3E3R1 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2)
CSKY_CPU_NAME("ck803r2", CK803,
              CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2)
CSKY_CPU_NAME("ck803r3", CK803,
              CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2)
CSKY_CPU_NAME("ck803hr1", CK803,
              CSKY::MAEK_3E3R1 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2)
CSKY_CPU_NAME("ck803hr2", CK803,
              CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2)
CSKY_CPU_NAME("ck803hr3", CK803,
              CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2)
CSKY_CPU_NAME("ck803tr1", CK803,
              CSKY::MAEK_3E3R1 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2)
CSKY_CPU_NAME("ck803tr2", CK803,
              CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2)
CSKY_CPU_NAME("ck803tr3", CK803,
              CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2)
CSKY_CPU_NAME("ck803htr1", CK803,
              CSKY::MAEK_3E3R1 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2)
CSKY_CPU_NAME("ck803htr2", CK803,
              CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2)
CSKY_CPU_NAME("ck803htr3", CK803,
              CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2)
CSKY_CPU_NAME("ck803fr1", CK803,
              CSKY::MAEK_3E3R1 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3)
CSKY_CPU_NAME("ck803fr2", CK803,
              CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3)
CSKY_CPU_NAME("ck803fr3", CK803,
              CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3)
CSKY_CPU_NAME("ck803fhr1", CK803,
              CSKY::MAEK_3E3R1 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3)
CSKY_CPU_NAME("ck803fhr2", CK803,
              CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3)
CSKY_CPU_NAME("ck803fhr3", CK803,
              CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3)
CSKY_CPU_NAME("ck803er1", CK803,
              CSKY::MAEK_3E3R1 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("ck803er2", CK803,
              CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("ck803er3", CK803,
              CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("ck803ehr1", CK803,
              CSKY::MAEK_3E3R1 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("ck803ehr2", CK803,
              CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("ck803ehr3", CK803,
              CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("ck803etr1", CK803,
              CSKY::MAEK_3E3R1 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("ck803etr2", CK803,
              CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("ck803etr3", CK803,
              CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("ck803ehtr1", CK803,
              CSKY::MAEK_3E3R1 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("ck803ehtr2", CK803,
              CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("ck803ehtr3", CK803,
              CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("ck803efr1", CK803,
              CSKY::MAEK_3E3R1 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3 |
                  CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("ck803efr2", CK803,
              CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3 |
                  CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("ck803efr3", CK803,
              CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3 |
                  CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("ck803efhr1", CK803,
              CSKY::MAEK_3E3R1 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3 |
                  CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("ck803efhr2", CK803,
              CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3 |
                  CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("ck803efhr3", CK803,
              CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3 |
                  CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("ck803ftr1", CK803,
              CSKY::MAEK_3E3R1 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3)
CSKY_CPU_NAME("ck803ftr2", CK803,
              CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3)
CSKY_CPU_NAME("ck803ftr3", CK803,
              CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3)
CSKY_CPU_NAME("ck803eftr1", CK803,
              CSKY::MAEK_3E3R1 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3 |
                  CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("ck803eftr2", CK803,
              CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3 |
                  CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("ck803eftr3", CK803,
              CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3 |
                  CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("ck803efhtr1", CK803,
              CSKY::MAEK_3E3R1 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3 |
                  CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("ck803efhtr2", CK803,
              CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3 |
                  CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("ck803efhtr3", CK803,
              CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3 |
                  CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("s803", CK803, CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3)
CSKY_CPU_NAME("s803t", CK803, CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3)
CSKY_CPU_NAME("e803", CK803, CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3)
CSKY_CPU_NAME("e803t", CK803, CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3)

CSKY_CPU_NAME("ck803s", CK803S, CSKY::AEK_NONE)
CSKY_CPU_NAME("ck803st", CK803S, CSKY::AEK_NONE)
CSKY_CPU_NAME("ck803se", CK803S,
              CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60)
CSKY_CPU_NAME("ck803sf", CK803S,
              CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3)
CSKY_CPU_NAME("ck803sef", CK803S,
              CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3)
CSKY_CPU_NAME("ck803seft", CK803S,
              CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3)

CSKY_CPU_NAME("ck804", CK804, CSKY::AEK_NONE)
CSKY_CPU_NAME("ck804h", CK804, CSKY::AEK_NONE)
CSKY_CPU_NAME("ck804t", CK804, CSKY::AEK_NONE)
CSKY_CPU_NAME("ck804ht", CK804, CSKY::AEK_NONE)
CSKY_CPU_NAME("ck804f", CK804,
              CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3)
CSKY_CPU_NAME("ck804fh", CK804,
              CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3)
CSKY_CPU_NAME("ck804e", CK804,
              CSKY::AEK_DSPV2 | CSKY::AEK_3E3R1 | CSKY::AEK_3E3R3 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("ck804eh", CK804,
              CSKY::AEK_DSPV2 | CSKY::AEK_3E3R1 | CSKY::AEK_3E3R3 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("ck804et", CK804,
              CSKY::AEK_DSPV2 | CSKY::AEK_3E3R1 | CSKY::AEK_3E3R3 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("ck804eht", CK804,
              CSKY::AEK_DSPV2 | CSKY::AEK_3E3R1 | CSKY::AEK_3E3R3 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("ck804ef", CK804,
              CSKY::AEK_DSPV2 | CSKY::AEK_3E3R1 | CSKY::AEK_3E3R3 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("ck804efh", CK804,
              CSKY::AEK_DSPV2 | CSKY::AEK_3E3R1 | CSKY::AEK_3E3R3 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("ck804ft", CK804,
              CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3)
CSKY_CPU_NAME("ck804eft", CK804,
              CSKY::AEK_DSPV2 | CSKY::AEK_3E3R1 | CSKY::AEK_3E3R3 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("ck804efht", CK804,
              CSKY::AEK_DSPV2 | CSKY::AEK_3E3R1 | CSKY::AEK_3E3R3 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("e804d", CK804,
              CSKY::AEK_DSPV2 | CSKY::AEK_3E3R1 | CSKY::AEK_3E3R3 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("e804dt", CK804,
              CSKY::AEK_DSPV2 | CSKY::AEK_3E3R1 | CSKY::AEK_3E3R3 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("e804f", CK804,
              CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3)
CSKY_CPU_NAME("e804ft", CK804,
              CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3)
CSKY_CPU_NAME("e804df", CK804,
              CSKY::AEK_DSPV2 | CSKY::AEK_3E3R1 | CSKY::AEK_3E3R3 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("e804dft", CK804,
              CSKY::AEK_DSPV2 | CSKY::AEK_3E3R1 | CSKY::AEK_3E3R3 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3 |
                  CSKY::AEK_HIGHREG)

CSKY_CPU_NAME("ck805", CK805, CSKY::AEK_NONE)
CSKY_CPU_NAME("ck805e", CK805,
              CSKY::AEK_DSPV2 | CSKY::AEK_3E3R1 | CSKY::AEK_3E3R3)
CSKY_CPU_NAME("ck805f", CK805,
              CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3)
CSKY_CPU_NAME("ck805t", CK805, CSKY::AEK_NONE)
CSKY_CPU_NAME("ck805ef", CK805,
              CSKY::AEK_DSPV2 | CSKY::AEK_3E3R1 | CSKY::AEK_3E3R3 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3)
CSKY_CPU_NAME("ck805et", CK805,
              CSKY::AEK_DSPV2 | CSKY::AEK_3E3R1 | CSKY::AEK_3E3R3)
CSKY_CPU_NAME("ck805ft", CK805,
              CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3)
CSKY_CPU_NAME("ck805eft", CK805,
              CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3 |
                  CSKY::AEK_DSPV2 | CSKY::AEK_3E3R1 | CSKY::AEK_3E3R3)
CSKY_CPU_NAME("i805", CK805, CSKY::AEK_NONE)
CSKY_CPU_NAME("i805f", CK805,
              CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3)

CSKY_CPU_NAME("ck807", CK807, CSKY::AEK_NONE)
CSKY_CPU_NAME("ck807e", CK807,
              CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60)
CSKY_CPU_NAME("ck807f", CK807,
              CSKY::AEK_FPUV2SF | CSKY::AEK_FPUV2DF | CSKY::AEK_FDIVDU |
                  CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E2 | CSKY::AEK_FLOAT1E3 |
                  CSKY::AEK_FLOAT3E4)
CSKY_CPU_NAME("ck807ef", CK807,
              CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FPUV2DF | CSKY::AEK_FDIVDU |
                  CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E2 | CSKY::AEK_FLOAT1E3 |
                  CSKY::AEK_FLOAT3E4)
CSKY_CPU_NAME("c807", CK807, CSKY::AEK_NONE)
CSKY_CPU_NAME("c807f", CK807,
              CSKY::AEK_FPUV2SF | CSKY::AEK_FPUV2DF | CSKY::AEK_FDIVDU |
                  CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E2 | CSKY::AEK_FLOAT1E3 |
                  CSKY::AEK_FLOAT3E4)
CSKY_CPU_NAME("r807", CK807, CSKY::AEK_NONE)
CSKY_CPU_NAME("r807f", CK807,
              CSKY::AEK_FPUV2SF | CSKY::AEK_FPUV2DF | CSKY::AEK_FDIVDU |
                  CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E2 | CSKY::AEK_FLOAT1E3 |
                  CSKY::AEK_FLOAT3E4)

CSKY_CPU_NAME("ck810e", CK810, CSKY::AEK_NONE)
CSKY_CPU_NAME("ck810et", CK810, CSKY::AEK_NONE)
CSKY_CPU_NAME("ck810ef", CK810,
              CSKY::AEK_FPUV2SF | CSKY::AEK_FPUV2DF | CSKY::AEK_FDIVDU |
                  CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E2)
CSKY_CPU_NAME("ck810eft", CK810,
              CSKY::AEK_FPUV2SF | CSKY::AEK_FPUV2DF | CSKY::AEK_FDIVDU |
                  CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E2)
CSKY_CPU_NAME("ck810", CK810, CSKY::AEK_NONE)
CSKY_CPU_NAME("ck810f", CK810,
              CSKY::AEK_FPUV2SF | CSKY::AEK_FPUV2DF | CSKY::AEK_FDIVDU |
                  CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E2)
CSKY_CPU_NAME("ck810t", CK810, CSKY::AEK_NONE)
CSKY_CPU_NAME("ck810ft", CK810,
              CSKY::AEK_FPUV2SF | CSKY::AEK_FPUV2DF | CSKY::AEK_FDIVDU |
                  CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E2)
CSKY_CPU_NAME("c810", CK810,
              CSKY::AEK_FPUV2SF | CSKY::AEK_FPUV2DF | CSKY::AEK_FDIVDU |
                  CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E2)
CSKY_CPU_NAME("c810t", CK810,
              CSKY::AEK_FPUV2SF | CSKY::AEK_FPUV2DF | CSKY::AEK_FDIVDU |
                  CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E2)

CSKY_CPU_NAME("ck810v", CK810V, CSKY::AEK_NONE)
CSKY_CPU_NAME("ck810ev", CK810V, CSKY::AEK_NONE)
CSKY_CPU_NAME("ck810tv", CK810V, CSKY::AEK_NONE)
CSKY_CPU_NAME("ck810etv", CK810V, CSKY::AEK_NONE)
CSKY_CPU_NAME("c810v", CK810V,
              CSKY::AEK_FPUV2SF | CSKY::AEK_FPUV2DF | CSKY::AEK_FDIVDU |
                  CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E2)
CSKY_CPU_NAME("ck810fv", CK810V,
              CSKY::AEK_FPUV2SF | CSKY::AEK_FPUV2DF | CSKY::AEK_FDIVDU |
                  CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E2)
CSKY_CPU_NAME("ck810efv", CK810V,
              CSKY::AEK_FPUV2SF | CSKY::AEK_FPUV2DF | CSKY::AEK_FDIVDU |
                  CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E2)
CSKY_CPU_NAME("ck810ftv", CK810V,
              CSKY::AEK_FPUV2SF | CSKY::AEK_FPUV2DF | CSKY::AEK_FDIVDU |
                  CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E2)
CSKY_CPU_NAME("c810tv", CK810V,
              CSKY::AEK_FPUV2SF | CSKY::AEK_FPUV2DF | CSKY::AEK_FDIVDU |
                  CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E2)
CSKY_CPU_NAME("c810eftv", CK810V,
              CSKY::AEK_FPUV2SF | CSKY::AEK_FPUV2DF | CSKY::AEK_FDIVDU |
                  CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E2)

CSKY_CPU_NAME("ck860", CK860, CSKY::AEK_NONE)
CSKY_CPU_NAME("ck860f", CK860,
              CSKY::AEK_FPUV3HI | CSKY::AEK_FPUV3HF | CSKY::AEK_FPUV3SF |
                  CSKY::AEK_FPUV3DF | CSKY::AEK_FLOAT7E60)
CSKY_CPU_NAME("c860", CK860,
              CSKY::AEK_FPUV3HI | CSKY::AEK_FPUV3HF | CSKY::AEK_FPUV3SF |
                  CSKY::AEK_FPUV3DF | CSKY::AEK_FLOAT7E60)

CSKY_CPU_NAME("ck860v", CK860V, CSKY::AEK_NONE)
CSKY_CPU_NAME("ck860fv", CK860V,
              CSKY::AEK_FPUV3HI | CSKY::AEK_FPUV3HF | CSKY::AEK_FPUV3SF |
                  CSKY::AEK_FPUV3DF | CSKY::AEK_FLOAT7E60)
CSKY_CPU_NAME("c860v", CK860V,
              CSKY::AEK_FPUV3HI | CSKY::AEK_FPUV3HF | CSKY::AEK_FPUV3SF |
                  CSKY::AEK_FPUV3DF | CSKY::AEK_FLOAT7E60)
// Invalid CPU
CSKY_CPU_NAME("invalid", INVALID, CSKY::AEK_INVALID)
#undef CSKY_CPU_NAME
PKhwFZ�� ^^Support/X86TargetParser.defnu�[���//===-- llvm/Support/X86TargetParser.def ------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This header is deprecated in favour of
/// `llvm/TargetParser/X86TargetParser.def`.
///
//===----------------------------------------------------------------------===//

#include "llvm/TargetParser/X86TargetParser.def"
PKhwFZ��DN�z�zSupport/ScaledNumber.hnu�[���//===- llvm/Support/ScaledNumber.h - Support for scaled numbers -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains functions (and a class) useful for working with scaled
// numbers -- in particular, pairs of integers where one represents digits and
// another represents a scale.  The functions are helpers and live in the
// namespace ScaledNumbers.  The class ScaledNumber is useful for modelling
// certain cost metrics that need simple, integer-like semantics that are easy
// to reason about.
//
// These might remind you of soft-floats.  If you want one of those, you're in
// the wrong place.  Look at include/llvm/ADT/APFloat.h instead.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_SCALEDNUMBER_H
#define LLVM_SUPPORT_SCALEDNUMBER_H

#include "llvm/Support/MathExtras.h"
#include <algorithm>
#include <cstdint>
#include <limits>
#include <string>
#include <tuple>
#include <utility>

namespace llvm {
namespace ScaledNumbers {

/// Maximum scale; same as APFloat for easy debug printing.
const int32_t MaxScale = 16383;

/// Maximum scale; same as APFloat for easy debug printing.
const int32_t MinScale = -16382;

/// Get the width of a number.
template <class DigitsT> inline int getWidth() { return sizeof(DigitsT) * 8; }

/// Conditionally round up a scaled number.
///
/// Given \c Digits and \c Scale, round up iff \c ShouldRound is \c true.
/// Always returns \c Scale unless there's an overflow, in which case it
/// returns \c 1+Scale.
///
/// \pre adding 1 to \c Scale will not overflow INT16_MAX.
template <class DigitsT>
inline std::pair<DigitsT, int16_t> getRounded(DigitsT Digits, int16_t Scale,
                                              bool ShouldRound) {
  static_assert(!std::numeric_limits<DigitsT>::is_signed, "expected unsigned");

  if (ShouldRound)
    if (!++Digits)
      // Overflow.
      return std::make_pair(DigitsT(1) << (getWidth<DigitsT>() - 1), Scale + 1);
  return std::make_pair(Digits, Scale);
}

/// Convenience helper for 32-bit rounding.
inline std::pair<uint32_t, int16_t> getRounded32(uint32_t Digits, int16_t Scale,
                                                 bool ShouldRound) {
  return getRounded(Digits, Scale, ShouldRound);
}

/// Convenience helper for 64-bit rounding.
inline std::pair<uint64_t, int16_t> getRounded64(uint64_t Digits, int16_t Scale,
                                                 bool ShouldRound) {
  return getRounded(Digits, Scale, ShouldRound);
}

/// Adjust a 64-bit scaled number down to the appropriate width.
///
/// \pre Adding 64 to \c Scale will not overflow INT16_MAX.
template <class DigitsT>
inline std::pair<DigitsT, int16_t> getAdjusted(uint64_t Digits,
                                               int16_t Scale = 0) {
  static_assert(!std::numeric_limits<DigitsT>::is_signed, "expected unsigned");

  const int Width = getWidth<DigitsT>();
  if (Width == 64 || Digits <= std::numeric_limits<DigitsT>::max())
    return std::make_pair(Digits, Scale);

  // Shift right and round.
  int Shift = llvm::bit_width(Digits) - Width;
  return getRounded<DigitsT>(Digits >> Shift, Scale + Shift,
                             Digits & (UINT64_C(1) << (Shift - 1)));
}

/// Convenience helper for adjusting to 32 bits.
inline std::pair<uint32_t, int16_t> getAdjusted32(uint64_t Digits,
                                                  int16_t Scale = 0) {
  return getAdjusted<uint32_t>(Digits, Scale);
}

/// Convenience helper for adjusting to 64 bits.
inline std::pair<uint64_t, int16_t> getAdjusted64(uint64_t Digits,
                                                  int16_t Scale = 0) {
  return getAdjusted<uint64_t>(Digits, Scale);
}

/// Multiply two 64-bit integers to create a 64-bit scaled number.
///
/// Implemented with four 64-bit integer multiplies.
std::pair<uint64_t, int16_t> multiply64(uint64_t LHS, uint64_t RHS);

/// Multiply two 32-bit integers to create a 32-bit scaled number.
///
/// Implemented with one 64-bit integer multiply.
template <class DigitsT>
inline std::pair<DigitsT, int16_t> getProduct(DigitsT LHS, DigitsT RHS) {
  static_assert(!std::numeric_limits<DigitsT>::is_signed, "expected unsigned");

  if (getWidth<DigitsT>() <= 32 || (LHS <= UINT32_MAX && RHS <= UINT32_MAX))
    return getAdjusted<DigitsT>(uint64_t(LHS) * RHS);

  return multiply64(LHS, RHS);
}

/// Convenience helper for 32-bit product.
inline std::pair<uint32_t, int16_t> getProduct32(uint32_t LHS, uint32_t RHS) {
  return getProduct(LHS, RHS);
}

/// Convenience helper for 64-bit product.
inline std::pair<uint64_t, int16_t> getProduct64(uint64_t LHS, uint64_t RHS) {
  return getProduct(LHS, RHS);
}

/// Divide two 64-bit integers to create a 64-bit scaled number.
///
/// Implemented with long division.
///
/// \pre \c Dividend and \c Divisor are non-zero.
std::pair<uint64_t, int16_t> divide64(uint64_t Dividend, uint64_t Divisor);

/// Divide two 32-bit integers to create a 32-bit scaled number.
///
/// Implemented with one 64-bit integer divide/remainder pair.
///
/// \pre \c Dividend and \c Divisor are non-zero.
std::pair<uint32_t, int16_t> divide32(uint32_t Dividend, uint32_t Divisor);

/// Divide two 32-bit numbers to create a 32-bit scaled number.
///
/// Implemented with one 64-bit integer divide/remainder pair.
///
/// Returns \c (DigitsT_MAX, MaxScale) for divide-by-zero (0 for 0/0).
template <class DigitsT>
std::pair<DigitsT, int16_t> getQuotient(DigitsT Dividend, DigitsT Divisor) {
  static_assert(!std::numeric_limits<DigitsT>::is_signed, "expected unsigned");
  static_assert(sizeof(DigitsT) == 4 || sizeof(DigitsT) == 8,
                "expected 32-bit or 64-bit digits");

  // Check for zero.
  if (!Dividend)
    return std::make_pair(0, 0);
  if (!Divisor)
    return std::make_pair(std::numeric_limits<DigitsT>::max(), MaxScale);

  if (getWidth<DigitsT>() == 64)
    return divide64(Dividend, Divisor);
  return divide32(Dividend, Divisor);
}

/// Convenience helper for 32-bit quotient.
inline std::pair<uint32_t, int16_t> getQuotient32(uint32_t Dividend,
                                                  uint32_t Divisor) {
  return getQuotient(Dividend, Divisor);
}

/// Convenience helper for 64-bit quotient.
inline std::pair<uint64_t, int16_t> getQuotient64(uint64_t Dividend,
                                                  uint64_t Divisor) {
  return getQuotient(Dividend, Divisor);
}

/// Implementation of getLg() and friends.
///
/// Returns the rounded lg of \c Digits*2^Scale and an int specifying whether
/// this was rounded up (1), down (-1), or exact (0).
///
/// Returns \c INT32_MIN when \c Digits is zero.
template <class DigitsT>
inline std::pair<int32_t, int> getLgImpl(DigitsT Digits, int16_t Scale) {
  static_assert(!std::numeric_limits<DigitsT>::is_signed, "expected unsigned");

  if (!Digits)
    return std::make_pair(INT32_MIN, 0);

  // Get the floor of the lg of Digits.
  static_assert(sizeof(Digits) <= sizeof(uint64_t));
  int32_t LocalFloor = llvm::Log2_64(Digits);

  // Get the actual floor.
  int32_t Floor = Scale + LocalFloor;
  if (Digits == UINT64_C(1) << LocalFloor)
    return std::make_pair(Floor, 0);

  // Round based on the next digit.
  assert(LocalFloor >= 1);
  bool Round = Digits & UINT64_C(1) << (LocalFloor - 1);
  return std::make_pair(Floor + Round, Round ? 1 : -1);
}

/// Get the lg (rounded) of a scaled number.
///
/// Get the lg of \c Digits*2^Scale.
///
/// Returns \c INT32_MIN when \c Digits is zero.
template <class DigitsT> int32_t getLg(DigitsT Digits, int16_t Scale) {
  return getLgImpl(Digits, Scale).first;
}

/// Get the lg floor of a scaled number.
///
/// Get the floor of the lg of \c Digits*2^Scale.
///
/// Returns \c INT32_MIN when \c Digits is zero.
template <class DigitsT> int32_t getLgFloor(DigitsT Digits, int16_t Scale) {
  auto Lg = getLgImpl(Digits, Scale);
  return Lg.first - (Lg.second > 0);
}

/// Get the lg ceiling of a scaled number.
///
/// Get the ceiling of the lg of \c Digits*2^Scale.
///
/// Returns \c INT32_MIN when \c Digits is zero.
template <class DigitsT> int32_t getLgCeiling(DigitsT Digits, int16_t Scale) {
  auto Lg = getLgImpl(Digits, Scale);
  return Lg.first + (Lg.second < 0);
}

/// Implementation for comparing scaled numbers.
///
/// Compare two 64-bit numbers with different scales.  Given that the scale of
/// \c L is higher than that of \c R by \c ScaleDiff, compare them.  Return -1,
/// 1, and 0 for less than, greater than, and equal, respectively.
///
/// \pre 0 <= ScaleDiff < 64.
int compareImpl(uint64_t L, uint64_t R, int ScaleDiff);

/// Compare two scaled numbers.
///
/// Compare two scaled numbers.  Returns 0 for equal, -1 for less than, and 1
/// for greater than.
template <class DigitsT>
int compare(DigitsT LDigits, int16_t LScale, DigitsT RDigits, int16_t RScale) {
  static_assert(!std::numeric_limits<DigitsT>::is_signed, "expected unsigned");

  // Check for zero.
  if (!LDigits)
    return RDigits ? -1 : 0;
  if (!RDigits)
    return 1;

  // Check for the scale.  Use getLgFloor to be sure that the scale difference
  // is always lower than 64.
  int32_t lgL = getLgFloor(LDigits, LScale), lgR = getLgFloor(RDigits, RScale);
  if (lgL != lgR)
    return lgL < lgR ? -1 : 1;

  // Compare digits.
  if (LScale < RScale)
    return compareImpl(LDigits, RDigits, RScale - LScale);

  return -compareImpl(RDigits, LDigits, LScale - RScale);
}

/// Match scales of two numbers.
///
/// Given two scaled numbers, match up their scales.  Change the digits and
/// scales in place.  Shift the digits as necessary to form equivalent numbers,
/// losing precision only when necessary.
///
/// If the output value of \c LDigits (\c RDigits) is \c 0, the output value of
/// \c LScale (\c RScale) is unspecified.
///
/// As a convenience, returns the matching scale.  If the output value of one
/// number is zero, returns the scale of the other.  If both are zero, which
/// scale is returned is unspecified.
template <class DigitsT>
int16_t matchScales(DigitsT &LDigits, int16_t &LScale, DigitsT &RDigits,
                    int16_t &RScale) {
  static_assert(!std::numeric_limits<DigitsT>::is_signed, "expected unsigned");

  if (LScale < RScale)
    // Swap arguments.
    return matchScales(RDigits, RScale, LDigits, LScale);
  if (!LDigits)
    return RScale;
  if (!RDigits || LScale == RScale)
    return LScale;

  // Now LScale > RScale.  Get the difference.
  int32_t ScaleDiff = int32_t(LScale) - RScale;
  if (ScaleDiff >= 2 * getWidth<DigitsT>()) {
    // Don't bother shifting.  RDigits will get zero-ed out anyway.
    RDigits = 0;
    return LScale;
  }

  // Shift LDigits left as much as possible, then shift RDigits right.
  int32_t ShiftL = std::min<int32_t>(llvm::countl_zero(LDigits), ScaleDiff);
  assert(ShiftL < getWidth<DigitsT>() && "can't shift more than width");

  int32_t ShiftR = ScaleDiff - ShiftL;
  if (ShiftR >= getWidth<DigitsT>()) {
    // Don't bother shifting.  RDigits will get zero-ed out anyway.
    RDigits = 0;
    return LScale;
  }

  LDigits <<= ShiftL;
  RDigits >>= ShiftR;

  LScale -= ShiftL;
  RScale += ShiftR;
  assert(LScale == RScale && "scales should match");
  return LScale;
}

/// Get the sum of two scaled numbers.
///
/// Get the sum of two scaled numbers with as much precision as possible.
///
/// \pre Adding 1 to \c LScale (or \c RScale) will not overflow INT16_MAX.
template <class DigitsT>
std::pair<DigitsT, int16_t> getSum(DigitsT LDigits, int16_t LScale,
                                   DigitsT RDigits, int16_t RScale) {
  static_assert(!std::numeric_limits<DigitsT>::is_signed, "expected unsigned");

  // Check inputs up front.  This is only relevant if addition overflows, but
  // testing here should catch more bugs.
  assert(LScale < INT16_MAX && "scale too large");
  assert(RScale < INT16_MAX && "scale too large");

  // Normalize digits to match scales.
  int16_t Scale = matchScales(LDigits, LScale, RDigits, RScale);

  // Compute sum.
  DigitsT Sum = LDigits + RDigits;
  if (Sum >= RDigits)
    return std::make_pair(Sum, Scale);

  // Adjust sum after arithmetic overflow.
  DigitsT HighBit = DigitsT(1) << (getWidth<DigitsT>() - 1);
  return std::make_pair(HighBit | Sum >> 1, Scale + 1);
}

/// Convenience helper for 32-bit sum.
inline std::pair<uint32_t, int16_t> getSum32(uint32_t LDigits, int16_t LScale,
                                             uint32_t RDigits, int16_t RScale) {
  return getSum(LDigits, LScale, RDigits, RScale);
}

/// Convenience helper for 64-bit sum.
inline std::pair<uint64_t, int16_t> getSum64(uint64_t LDigits, int16_t LScale,
                                             uint64_t RDigits, int16_t RScale) {
  return getSum(LDigits, LScale, RDigits, RScale);
}

/// Get the difference of two scaled numbers.
///
/// Get LHS minus RHS with as much precision as possible.
///
/// Returns \c (0, 0) if the RHS is larger than the LHS.
template <class DigitsT>
std::pair<DigitsT, int16_t> getDifference(DigitsT LDigits, int16_t LScale,
                                          DigitsT RDigits, int16_t RScale) {
  static_assert(!std::numeric_limits<DigitsT>::is_signed, "expected unsigned");

  // Normalize digits to match scales.
  const DigitsT SavedRDigits = RDigits;
  const int16_t SavedRScale = RScale;
  matchScales(LDigits, LScale, RDigits, RScale);

  // Compute difference.
  if (LDigits <= RDigits)
    return std::make_pair(0, 0);
  if (RDigits || !SavedRDigits)
    return std::make_pair(LDigits - RDigits, LScale);

  // Check if RDigits just barely lost its last bit.  E.g., for 32-bit:
  //
  //   1*2^32 - 1*2^0 == 0xffffffff != 1*2^32
  const auto RLgFloor = getLgFloor(SavedRDigits, SavedRScale);
  if (!compare(LDigits, LScale, DigitsT(1), RLgFloor + getWidth<DigitsT>()))
    return std::make_pair(std::numeric_limits<DigitsT>::max(), RLgFloor);

  return std::make_pair(LDigits, LScale);
}

/// Convenience helper for 32-bit difference.
inline std::pair<uint32_t, int16_t> getDifference32(uint32_t LDigits,
                                                    int16_t LScale,
                                                    uint32_t RDigits,
                                                    int16_t RScale) {
  return getDifference(LDigits, LScale, RDigits, RScale);
}

/// Convenience helper for 64-bit difference.
inline std::pair<uint64_t, int16_t> getDifference64(uint64_t LDigits,
                                                    int16_t LScale,
                                                    uint64_t RDigits,
                                                    int16_t RScale) {
  return getDifference(LDigits, LScale, RDigits, RScale);
}

} // end namespace ScaledNumbers
} // end namespace llvm

namespace llvm {

class raw_ostream;
class ScaledNumberBase {
public:
  static constexpr int DefaultPrecision = 10;

  static void dump(uint64_t D, int16_t E, int Width);
  static raw_ostream &print(raw_ostream &OS, uint64_t D, int16_t E, int Width,
                            unsigned Precision);
  static std::string toString(uint64_t D, int16_t E, int Width,
                              unsigned Precision);
  static int countLeadingZeros32(uint32_t N) { return llvm::countl_zero(N); }
  static int countLeadingZeros64(uint64_t N) { return llvm::countl_zero(N); }
  static uint64_t getHalf(uint64_t N) { return (N >> 1) + (N & 1); }

  static std::pair<uint64_t, bool> splitSigned(int64_t N) {
    if (N >= 0)
      return std::make_pair(N, false);
    uint64_t Unsigned = N == INT64_MIN ? UINT64_C(1) << 63 : uint64_t(-N);
    return std::make_pair(Unsigned, true);
  }
  static int64_t joinSigned(uint64_t U, bool IsNeg) {
    if (U > uint64_t(INT64_MAX))
      return IsNeg ? INT64_MIN : INT64_MAX;
    return IsNeg ? -int64_t(U) : int64_t(U);
  }
};

/// Simple representation of a scaled number.
///
/// ScaledNumber is a number represented by digits and a scale.  It uses simple
/// saturation arithmetic and every operation is well-defined for every value.
/// It's somewhat similar in behaviour to a soft-float, but is *not* a
/// replacement for one.  If you're doing numerics, look at \a APFloat instead.
/// Nevertheless, we've found these semantics useful for modelling certain cost
/// metrics.
///
/// The number is split into a signed scale and unsigned digits.  The number
/// represented is \c getDigits()*2^getScale().  In this way, the digits are
/// much like the mantissa in the x87 long double, but there is no canonical
/// form so the same number can be represented by many bit representations.
///
/// ScaledNumber is templated on the underlying integer type for digits, which
/// is expected to be unsigned.
///
/// Unlike APFloat, ScaledNumber does not model architecture floating point
/// behaviour -- while this might make it a little faster and easier to reason
/// about, it certainly makes it more dangerous for general numerics.
///
/// ScaledNumber is totally ordered.  However, there is no canonical form, so
/// there are multiple representations of most scalars.  E.g.:
///
///     ScaledNumber(8u, 0) == ScaledNumber(4u, 1)
///     ScaledNumber(4u, 1) == ScaledNumber(2u, 2)
///     ScaledNumber(2u, 2) == ScaledNumber(1u, 3)
///
/// ScaledNumber implements most arithmetic operations.  Precision is kept
/// where possible.  Uses simple saturation arithmetic, so that operations
/// saturate to 0.0 or getLargest() rather than under or overflowing.  It has
/// some extra arithmetic for unit inversion.  0.0/0.0 is defined to be 0.0.
/// Any other division by 0.0 is defined to be getLargest().
///
/// As a convenience for modifying the exponent, left and right shifting are
/// both implemented, and both interpret negative shifts as positive shifts in
/// the opposite direction.
///
/// Scales are limited to the range accepted by x87 long double.  This makes
/// it trivial to add functionality to convert to APFloat (this is already
/// relied on for the implementation of printing).
///
/// Possible (and conflicting) future directions:
///
///  1. Turn this into a wrapper around \a APFloat.
///  2. Share the algorithm implementations with \a APFloat.
///  3. Allow \a ScaledNumber to represent a signed number.
template <class DigitsT> class ScaledNumber : ScaledNumberBase {
public:
  static_assert(!std::numeric_limits<DigitsT>::is_signed,
                "only unsigned floats supported");

  typedef DigitsT DigitsType;

private:
  typedef std::numeric_limits<DigitsType> DigitsLimits;

  static constexpr int Width = sizeof(DigitsType) * 8;
  static_assert(Width <= 64, "invalid integer width for digits");

private:
  DigitsType Digits = 0;
  int16_t Scale = 0;

public:
  ScaledNumber() = default;

  constexpr ScaledNumber(DigitsType Digits, int16_t Scale)
      : Digits(Digits), Scale(Scale) {}

private:
  ScaledNumber(const std::pair<DigitsT, int16_t> &X)
      : Digits(X.first), Scale(X.second) {}

public:
  static ScaledNumber getZero() { return ScaledNumber(0, 0); }
  static ScaledNumber getOne() { return ScaledNumber(1, 0); }
  static ScaledNumber getLargest() {
    return ScaledNumber(DigitsLimits::max(), ScaledNumbers::MaxScale);
  }
  static ScaledNumber get(uint64_t N) { return adjustToWidth(N, 0); }
  static ScaledNumber getInverse(uint64_t N) {
    return get(N).invert();
  }
  static ScaledNumber getFraction(DigitsType N, DigitsType D) {
    return getQuotient(N, D);
  }

  int16_t getScale() const { return Scale; }
  DigitsType getDigits() const { return Digits; }

  /// Convert to the given integer type.
  ///
  /// Convert to \c IntT using simple saturating arithmetic, truncating if
  /// necessary.
  template <class IntT> IntT toInt() const;

  bool isZero() const { return !Digits; }
  bool isLargest() const { return *this == getLargest(); }
  bool isOne() const {
    if (Scale > 0 || Scale <= -Width)
      return false;
    return Digits == DigitsType(1) << -Scale;
  }

  /// The log base 2, rounded.
  ///
  /// Get the lg of the scalar.  lg 0 is defined to be INT32_MIN.
  int32_t lg() const { return ScaledNumbers::getLg(Digits, Scale); }

  /// The log base 2, rounded towards INT32_MIN.
  ///
  /// Get the lg floor.  lg 0 is defined to be INT32_MIN.
  int32_t lgFloor() const { return ScaledNumbers::getLgFloor(Digits, Scale); }

  /// The log base 2, rounded towards INT32_MAX.
  ///
  /// Get the lg ceiling.  lg 0 is defined to be INT32_MIN.
  int32_t lgCeiling() const {
    return ScaledNumbers::getLgCeiling(Digits, Scale);
  }

  bool operator==(const ScaledNumber &X) const { return compare(X) == 0; }
  bool operator<(const ScaledNumber &X) const { return compare(X) < 0; }
  bool operator!=(const ScaledNumber &X) const { return compare(X) != 0; }
  bool operator>(const ScaledNumber &X) const { return compare(X) > 0; }
  bool operator<=(const ScaledNumber &X) const { return compare(X) <= 0; }
  bool operator>=(const ScaledNumber &X) const { return compare(X) >= 0; }

  bool operator!() const { return isZero(); }

  /// Convert to a decimal representation in a string.
  ///
  /// Convert to a string.  Uses scientific notation for very large/small
  /// numbers.  Scientific notation is used roughly for numbers outside of the
  /// range 2^-64 through 2^64.
  ///
  /// \c Precision indicates the number of decimal digits of precision to use;
  /// 0 requests the maximum available.
  ///
  /// As a special case to make debugging easier, if the number is small enough
  /// to convert without scientific notation and has more than \c Precision
  /// digits before the decimal place, it's printed accurately to the first
  /// digit past zero.  E.g., assuming 10 digits of precision:
  ///
  ///     98765432198.7654... => 98765432198.8
  ///      8765432198.7654... =>  8765432198.8
  ///       765432198.7654... =>   765432198.8
  ///        65432198.7654... =>    65432198.77
  ///         5432198.7654... =>     5432198.765
  std::string toString(unsigned Precision = DefaultPrecision) {
    return ScaledNumberBase::toString(Digits, Scale, Width, Precision);
  }

  /// Print a decimal representation.
  ///
  /// Print a string.  See toString for documentation.
  raw_ostream &print(raw_ostream &OS,
                     unsigned Precision = DefaultPrecision) const {
    return ScaledNumberBase::print(OS, Digits, Scale, Width, Precision);
  }
  void dump() const { return ScaledNumberBase::dump(Digits, Scale, Width); }

  ScaledNumber &operator+=(const ScaledNumber &X) {
    std::tie(Digits, Scale) =
        ScaledNumbers::getSum(Digits, Scale, X.Digits, X.Scale);
    // Check for exponent past MaxScale.
    if (Scale > ScaledNumbers::MaxScale)
      *this = getLargest();
    return *this;
  }
  ScaledNumber &operator-=(const ScaledNumber &X) {
    std::tie(Digits, Scale) =
        ScaledNumbers::getDifference(Digits, Scale, X.Digits, X.Scale);
    return *this;
  }
  ScaledNumber &operator*=(const ScaledNumber &X);
  ScaledNumber &operator/=(const ScaledNumber &X);
  ScaledNumber &operator<<=(int16_t Shift) {
    shiftLeft(Shift);
    return *this;
  }
  ScaledNumber &operator>>=(int16_t Shift) {
    shiftRight(Shift);
    return *this;
  }

private:
  void shiftLeft(int32_t Shift);
  void shiftRight(int32_t Shift);

  /// Adjust two floats to have matching exponents.
  ///
  /// Adjust \c this and \c X to have matching exponents.  Returns the new \c X
  /// by value.  Does nothing if \a isZero() for either.
  ///
  /// The value that compares smaller will lose precision, and possibly become
  /// \a isZero().
  ScaledNumber matchScales(ScaledNumber X) {
    ScaledNumbers::matchScales(Digits, Scale, X.Digits, X.Scale);
    return X;
  }

public:
  /// Scale a large number accurately.
  ///
  /// Scale N (multiply it by this).  Uses full precision multiplication, even
  /// if Width is smaller than 64, so information is not lost.
  uint64_t scale(uint64_t N) const;
  uint64_t scaleByInverse(uint64_t N) const {
    // TODO: implement directly, rather than relying on inverse.  Inverse is
    // expensive.
    return inverse().scale(N);
  }
  int64_t scale(int64_t N) const {
    std::pair<uint64_t, bool> Unsigned = splitSigned(N);
    return joinSigned(scale(Unsigned.first), Unsigned.second);
  }
  int64_t scaleByInverse(int64_t N) const {
    std::pair<uint64_t, bool> Unsigned = splitSigned(N);
    return joinSigned(scaleByInverse(Unsigned.first), Unsigned.second);
  }

  int compare(const ScaledNumber &X) const {
    return ScaledNumbers::compare(Digits, Scale, X.Digits, X.Scale);
  }
  int compareTo(uint64_t N) const {
    return ScaledNumbers::compare<uint64_t>(Digits, Scale, N, 0);
  }
  int compareTo(int64_t N) const { return N < 0 ? 1 : compareTo(uint64_t(N)); }

  ScaledNumber &invert() { return *this = ScaledNumber::get(1) / *this; }
  ScaledNumber inverse() const { return ScaledNumber(*this).invert(); }

private:
  static ScaledNumber getProduct(DigitsType LHS, DigitsType RHS) {
    return ScaledNumbers::getProduct(LHS, RHS);
  }
  static ScaledNumber getQuotient(DigitsType Dividend, DigitsType Divisor) {
    return ScaledNumbers::getQuotient(Dividend, Divisor);
  }

  static int countLeadingZerosWidth(DigitsType Digits) {
    if (Width == 64)
      return countLeadingZeros64(Digits);
    if (Width == 32)
      return countLeadingZeros32(Digits);
    return countLeadingZeros32(Digits) + Width - 32;
  }

  /// Adjust a number to width, rounding up if necessary.
  ///
  /// Should only be called for \c Shift close to zero.
  ///
  /// \pre Shift >= MinScale && Shift + 64 <= MaxScale.
  static ScaledNumber adjustToWidth(uint64_t N, int32_t Shift) {
    assert(Shift >= ScaledNumbers::MinScale && "Shift should be close to 0");
    assert(Shift <= ScaledNumbers::MaxScale - 64 &&
           "Shift should be close to 0");
    auto Adjusted = ScaledNumbers::getAdjusted<DigitsT>(N, Shift);
    return Adjusted;
  }

  static ScaledNumber getRounded(ScaledNumber P, bool Round) {
    // Saturate.
    if (P.isLargest())
      return P;

    return ScaledNumbers::getRounded(P.Digits, P.Scale, Round);
  }
};

#define SCALED_NUMBER_BOP(op, base)                                            \
  template <class DigitsT>                                                     \
  ScaledNumber<DigitsT> operator op(const ScaledNumber<DigitsT> &L,            \
                                    const ScaledNumber<DigitsT> &R) {          \
    return ScaledNumber<DigitsT>(L) base R;                                    \
  }
SCALED_NUMBER_BOP(+, += )
SCALED_NUMBER_BOP(-, -= )
SCALED_NUMBER_BOP(*, *= )
SCALED_NUMBER_BOP(/, /= )
#undef SCALED_NUMBER_BOP

template <class DigitsT>
ScaledNumber<DigitsT> operator<<(const ScaledNumber<DigitsT> &L,
                                 int16_t Shift) {
  return ScaledNumber<DigitsT>(L) <<= Shift;
}

template <class DigitsT>
ScaledNumber<DigitsT> operator>>(const ScaledNumber<DigitsT> &L,
                                 int16_t Shift) {
  return ScaledNumber<DigitsT>(L) >>= Shift;
}

template <class DigitsT>
raw_ostream &operator<<(raw_ostream &OS, const ScaledNumber<DigitsT> &X) {
  return X.print(OS, 10);
}

#define SCALED_NUMBER_COMPARE_TO_TYPE(op, T1, T2)                              \
  template <class DigitsT>                                                     \
  bool operator op(const ScaledNumber<DigitsT> &L, T1 R) {                     \
    return L.compareTo(T2(R)) op 0;                                            \
  }                                                                            \
  template <class DigitsT>                                                     \
  bool operator op(T1 L, const ScaledNumber<DigitsT> &R) {                     \
    return 0 op R.compareTo(T2(L));                                            \
  }
#define SCALED_NUMBER_COMPARE_TO(op)                                           \
  SCALED_NUMBER_COMPARE_TO_TYPE(op, uint64_t, uint64_t)                        \
  SCALED_NUMBER_COMPARE_TO_TYPE(op, uint32_t, uint64_t)                        \
  SCALED_NUMBER_COMPARE_TO_TYPE(op, int64_t, int64_t)                          \
  SCALED_NUMBER_COMPARE_TO_TYPE(op, int32_t, int64_t)
SCALED_NUMBER_COMPARE_TO(< )
SCALED_NUMBER_COMPARE_TO(> )
SCALED_NUMBER_COMPARE_TO(== )
SCALED_NUMBER_COMPARE_TO(!= )
SCALED_NUMBER_COMPARE_TO(<= )
SCALED_NUMBER_COMPARE_TO(>= )
#undef SCALED_NUMBER_COMPARE_TO
#undef SCALED_NUMBER_COMPARE_TO_TYPE

template <class DigitsT>
uint64_t ScaledNumber<DigitsT>::scale(uint64_t N) const {
  if (Width == 64 || N <= DigitsLimits::max())
    return (get(N) * *this).template toInt<uint64_t>();

  // Defer to the 64-bit version.
  return ScaledNumber<uint64_t>(Digits, Scale).scale(N);
}

template <class DigitsT>
template <class IntT>
IntT ScaledNumber<DigitsT>::toInt() const {
  typedef std::numeric_limits<IntT> Limits;
  if (*this < 1)
    return 0;
  if (*this >= Limits::max())
    return Limits::max();

  IntT N = Digits;
  if (Scale > 0) {
    assert(size_t(Scale) < sizeof(IntT) * 8);
    return N << Scale;
  }
  if (Scale < 0) {
    assert(size_t(-Scale) < sizeof(IntT) * 8);
    return N >> -Scale;
  }
  return N;
}

template <class DigitsT>
ScaledNumber<DigitsT> &ScaledNumber<DigitsT>::
operator*=(const ScaledNumber &X) {
  if (isZero())
    return *this;
  if (X.isZero())
    return *this = X;

  // Save the exponents.
  int32_t Scales = int32_t(Scale) + int32_t(X.Scale);

  // Get the raw product.
  *this = getProduct(Digits, X.Digits);

  // Combine with exponents.
  return *this <<= Scales;
}
template <class DigitsT>
ScaledNumber<DigitsT> &ScaledNumber<DigitsT>::
operator/=(const ScaledNumber &X) {
  if (isZero())
    return *this;
  if (X.isZero())
    return *this = getLargest();

  // Save the exponents.
  int32_t Scales = int32_t(Scale) - int32_t(X.Scale);

  // Get the raw quotient.
  *this = getQuotient(Digits, X.Digits);

  // Combine with exponents.
  return *this <<= Scales;
}
template <class DigitsT> void ScaledNumber<DigitsT>::shiftLeft(int32_t Shift) {
  if (!Shift || isZero())
    return;
  assert(Shift != INT32_MIN);
  if (Shift < 0) {
    shiftRight(-Shift);
    return;
  }

  // Shift as much as we can in the exponent.
  int32_t ScaleShift = std::min(Shift, ScaledNumbers::MaxScale - Scale);
  Scale += ScaleShift;
  if (ScaleShift == Shift)
    return;

  // Check this late, since it's rare.
  if (isLargest())
    return;

  // Shift the digits themselves.
  Shift -= ScaleShift;
  if (Shift > countLeadingZerosWidth(Digits)) {
    // Saturate.
    *this = getLargest();
    return;
  }

  Digits <<= Shift;
}

template <class DigitsT> void ScaledNumber<DigitsT>::shiftRight(int32_t Shift) {
  if (!Shift || isZero())
    return;
  assert(Shift != INT32_MIN);
  if (Shift < 0) {
    shiftLeft(-Shift);
    return;
  }

  // Shift as much as we can in the exponent.
  int32_t ScaleShift = std::min(Shift, Scale - ScaledNumbers::MinScale);
  Scale -= ScaleShift;
  if (ScaleShift == Shift)
    return;

  // Shift the digits themselves.
  Shift -= ScaleShift;
  if (Shift >= Width) {
    // Saturate.
    *this = getZero();
    return;
  }

  Digits >>= Shift;
}


} // end namespace llvm

#endif // LLVM_SUPPORT_SCALEDNUMBER_H
PKhwFZzb�&66Support/FormatCommon.hnu�[���//===- FormatCommon.h - Formatters for common LLVM types --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_FORMATCOMMON_H
#define LLVM_SUPPORT_FORMATCOMMON_H

#include "llvm/ADT/SmallString.h"
#include "llvm/Support/FormatVariadicDetails.h"
#include "llvm/Support/raw_ostream.h"

namespace llvm {
enum class AlignStyle { Left, Center, Right };

struct FmtAlign {
  detail::format_adapter &Adapter;
  AlignStyle Where;
  size_t Amount;
  char Fill;

  FmtAlign(detail::format_adapter &Adapter, AlignStyle Where, size_t Amount,
           char Fill = ' ')
      : Adapter(Adapter), Where(Where), Amount(Amount), Fill(Fill) {}

  void format(raw_ostream &S, StringRef Options) {
    // If we don't need to align, we can format straight into the underlying
    // stream.  Otherwise we have to go through an intermediate stream first
    // in order to calculate how long the output is so we can align it.
    // TODO: Make the format method return the number of bytes written, that
    // way we can also skip the intermediate stream for left-aligned output.
    if (Amount == 0) {
      Adapter.format(S, Options);
      return;
    }
    SmallString<64> Item;
    raw_svector_ostream Stream(Item);

    Adapter.format(Stream, Options);
    if (Amount <= Item.size()) {
      S << Item;
      return;
    }

    size_t PadAmount = Amount - Item.size();
    switch (Where) {
    case AlignStyle::Left:
      S << Item;
      fill(S, PadAmount);
      break;
    case AlignStyle::Center: {
      size_t X = PadAmount / 2;
      fill(S, X);
      S << Item;
      fill(S, PadAmount - X);
      break;
    }
    default:
      fill(S, PadAmount);
      S << Item;
      break;
    }
  }

private:
  void fill(llvm::raw_ostream &S, uint32_t Count) {
    for (uint32_t I = 0; I < Count; ++I)
      S << Fill;
  }
};
}

#endif
PKhwFZ#��C_C_Support/MathExtras.hnu�[���//===-- llvm/Support/MathExtras.h - Useful math functions -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains some functions that are useful for math stuff.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_MATHEXTRAS_H
#define LLVM_SUPPORT_MATHEXTRAS_H

#include "llvm/ADT/bit.h"
#include "llvm/Support/Compiler.h"
#include <cassert>
#include <climits>
#include <cstdint>
#include <cstring>
#include <limits>
#include <type_traits>

namespace llvm {

/// Mathematical constants.
namespace numbers {
// TODO: Track C++20 std::numbers.
// TODO: Favor using the hexadecimal FP constants (requires C++17).
constexpr double e          = 2.7182818284590452354, // (0x1.5bf0a8b145749P+1) https://oeis.org/A001113
                 egamma     = .57721566490153286061, // (0x1.2788cfc6fb619P-1) https://oeis.org/A001620
                 ln2        = .69314718055994530942, // (0x1.62e42fefa39efP-1) https://oeis.org/A002162
                 ln10       = 2.3025850929940456840, // (0x1.24bb1bbb55516P+1) https://oeis.org/A002392
                 log2e      = 1.4426950408889634074, // (0x1.71547652b82feP+0)
                 log10e     = .43429448190325182765, // (0x1.bcb7b1526e50eP-2)
                 pi         = 3.1415926535897932385, // (0x1.921fb54442d18P+1) https://oeis.org/A000796
                 inv_pi     = .31830988618379067154, // (0x1.45f306bc9c883P-2) https://oeis.org/A049541
                 sqrtpi     = 1.7724538509055160273, // (0x1.c5bf891b4ef6bP+0) https://oeis.org/A002161
                 inv_sqrtpi = .56418958354775628695, // (0x1.20dd750429b6dP-1) https://oeis.org/A087197
                 sqrt2      = 1.4142135623730950488, // (0x1.6a09e667f3bcdP+0) https://oeis.org/A00219
                 inv_sqrt2  = .70710678118654752440, // (0x1.6a09e667f3bcdP-1)
                 sqrt3      = 1.7320508075688772935, // (0x1.bb67ae8584caaP+0) https://oeis.org/A002194
                 inv_sqrt3  = .57735026918962576451, // (0x1.279a74590331cP-1)
                 phi        = 1.6180339887498948482; // (0x1.9e3779b97f4a8P+0) https://oeis.org/A001622
constexpr float ef          = 2.71828183F, // (0x1.5bf0a8P+1) https://oeis.org/A001113
                egammaf     = .577215665F, // (0x1.2788d0P-1) https://oeis.org/A001620
                ln2f        = .693147181F, // (0x1.62e430P-1) https://oeis.org/A002162
                ln10f       = 2.30258509F, // (0x1.26bb1cP+1) https://oeis.org/A002392
                log2ef      = 1.44269504F, // (0x1.715476P+0)
                log10ef     = .434294482F, // (0x1.bcb7b2P-2)
                pif         = 3.14159265F, // (0x1.921fb6P+1) https://oeis.org/A000796
                inv_pif     = .318309886F, // (0x1.45f306P-2) https://oeis.org/A049541
                sqrtpif     = 1.77245385F, // (0x1.c5bf8aP+0) https://oeis.org/A002161
                inv_sqrtpif = .564189584F, // (0x1.20dd76P-1) https://oeis.org/A087197
                sqrt2f      = 1.41421356F, // (0x1.6a09e6P+0) https://oeis.org/A002193
                inv_sqrt2f  = .707106781F, // (0x1.6a09e6P-1)
                sqrt3f      = 1.73205081F, // (0x1.bb67aeP+0) https://oeis.org/A002194
                inv_sqrt3f  = .577350269F, // (0x1.279a74P-1)
                phif        = 1.61803399F; // (0x1.9e377aP+0) https://oeis.org/A001622
} // namespace numbers

/// Create a bitmask with the N right-most bits set to 1, and all other
/// bits set to 0.  Only unsigned types are allowed.
template <typename T> T maskTrailingOnes(unsigned N) {
  static_assert(std::is_unsigned_v<T>, "Invalid type!");
  const unsigned Bits = CHAR_BIT * sizeof(T);
  assert(N <= Bits && "Invalid bit index");
  return N == 0 ? 0 : (T(-1) >> (Bits - N));
}

/// Create a bitmask with the N left-most bits set to 1, and all other
/// bits set to 0.  Only unsigned types are allowed.
template <typename T> T maskLeadingOnes(unsigned N) {
  return ~maskTrailingOnes<T>(CHAR_BIT * sizeof(T) - N);
}

/// Create a bitmask with the N right-most bits set to 0, and all other
/// bits set to 1.  Only unsigned types are allowed.
template <typename T> T maskTrailingZeros(unsigned N) {
  return maskLeadingOnes<T>(CHAR_BIT * sizeof(T) - N);
}

/// Create a bitmask with the N left-most bits set to 0, and all other
/// bits set to 1.  Only unsigned types are allowed.
template <typename T> T maskLeadingZeros(unsigned N) {
  return maskTrailingOnes<T>(CHAR_BIT * sizeof(T) - N);
}

/// Macro compressed bit reversal table for 256 bits.
///
/// http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable
static const unsigned char BitReverseTable256[256] = {
#define R2(n) n, n + 2 * 64, n + 1 * 64, n + 3 * 64
#define R4(n) R2(n), R2(n + 2 * 16), R2(n + 1 * 16), R2(n + 3 * 16)
#define R6(n) R4(n), R4(n + 2 * 4), R4(n + 1 * 4), R4(n + 3 * 4)
  R6(0), R6(2), R6(1), R6(3)
#undef R2
#undef R4
#undef R6
};

/// Reverse the bits in \p Val.
template <typename T> T reverseBits(T Val) {
#if __has_builtin(__builtin_bitreverse8)
  if constexpr (std::is_same_v<T, uint8_t>)
    return __builtin_bitreverse8(Val);
#endif
#if __has_builtin(__builtin_bitreverse16)
  if constexpr (std::is_same_v<T, uint16_t>)
    return __builtin_bitreverse16(Val);
#endif
#if __has_builtin(__builtin_bitreverse32)
  if constexpr (std::is_same_v<T, uint32_t>)
    return __builtin_bitreverse32(Val);
#endif
#if __has_builtin(__builtin_bitreverse64)
  if constexpr (std::is_same_v<T, uint64_t>)
    return __builtin_bitreverse64(Val);
#endif

  unsigned char in[sizeof(Val)];
  unsigned char out[sizeof(Val)];
  std::memcpy(in, &Val, sizeof(Val));
  for (unsigned i = 0; i < sizeof(Val); ++i)
    out[(sizeof(Val) - i) - 1] = BitReverseTable256[in[i]];
  std::memcpy(&Val, out, sizeof(Val));
  return Val;
}

// NOTE: The following support functions use the _32/_64 extensions instead of
// type overloading so that signed and unsigned integers can be used without
// ambiguity.

/// Return the high 32 bits of a 64 bit value.
constexpr inline uint32_t Hi_32(uint64_t Value) {
  return static_cast<uint32_t>(Value >> 32);
}

/// Return the low 32 bits of a 64 bit value.
constexpr inline uint32_t Lo_32(uint64_t Value) {
  return static_cast<uint32_t>(Value);
}

/// Make a 64-bit integer from a high / low pair of 32-bit integers.
constexpr inline uint64_t Make_64(uint32_t High, uint32_t Low) {
  return ((uint64_t)High << 32) | (uint64_t)Low;
}

/// Checks if an integer fits into the given bit width.
template <unsigned N> constexpr inline bool isInt(int64_t x) {
  if constexpr (N == 8)
    return static_cast<int8_t>(x) == x;
  if constexpr (N == 16)
    return static_cast<int16_t>(x) == x;
  if constexpr (N == 32)
    return static_cast<int32_t>(x) == x;
  if constexpr (N < 64)
    return -(INT64_C(1) << (N - 1)) <= x && x < (INT64_C(1) << (N - 1));
  (void)x; // MSVC v19.25 warns that x is unused.
  return true;
}

/// Checks if a signed integer is an N bit number shifted left by S.
template <unsigned N, unsigned S>
constexpr inline bool isShiftedInt(int64_t x) {
  static_assert(
      N > 0, "isShiftedInt<0> doesn't make sense (refers to a 0-bit number.");
  static_assert(N + S <= 64, "isShiftedInt<N, S> with N + S > 64 is too wide.");
  return isInt<N + S>(x) && (x % (UINT64_C(1) << S) == 0);
}

/// Checks if an unsigned integer fits into the given bit width.
template <unsigned N> constexpr inline bool isUInt(uint64_t x) {
  static_assert(N > 0, "isUInt<0> doesn't make sense");
  if constexpr (N == 8)
    return static_cast<uint8_t>(x) == x;
  if constexpr (N == 16)
    return static_cast<uint16_t>(x) == x;
  if constexpr (N == 32)
    return static_cast<uint32_t>(x) == x;
  if constexpr (N < 64)
    return x < (UINT64_C(1) << (N));
  (void)x; // MSVC v19.25 warns that x is unused.
  return true;
}

/// Checks if a unsigned integer is an N bit number shifted left by S.
template <unsigned N, unsigned S>
constexpr inline bool isShiftedUInt(uint64_t x) {
  static_assert(
      N > 0, "isShiftedUInt<0> doesn't make sense (refers to a 0-bit number)");
  static_assert(N + S <= 64,
                "isShiftedUInt<N, S> with N + S > 64 is too wide.");
  // Per the two static_asserts above, S must be strictly less than 64.  So
  // 1 << S is not undefined behavior.
  return isUInt<N + S>(x) && (x % (UINT64_C(1) << S) == 0);
}

/// Gets the maximum value for a N-bit unsigned integer.
inline uint64_t maxUIntN(uint64_t N) {
  assert(N > 0 && N <= 64 && "integer width out of range");

  // uint64_t(1) << 64 is undefined behavior, so we can't do
  //   (uint64_t(1) << N) - 1
  // without checking first that N != 64.  But this works and doesn't have a
  // branch.
  return UINT64_MAX >> (64 - N);
}

/// Gets the minimum value for a N-bit signed integer.
inline int64_t minIntN(int64_t N) {
  assert(N > 0 && N <= 64 && "integer width out of range");

  return UINT64_C(1) + ~(UINT64_C(1) << (N - 1));
}

/// Gets the maximum value for a N-bit signed integer.
inline int64_t maxIntN(int64_t N) {
  assert(N > 0 && N <= 64 && "integer width out of range");

  // This relies on two's complement wraparound when N == 64, so we convert to
  // int64_t only at the very end to avoid UB.
  return (UINT64_C(1) << (N - 1)) - 1;
}

/// Checks if an unsigned integer fits into the given (dynamic) bit width.
inline bool isUIntN(unsigned N, uint64_t x) {
  return N >= 64 || x <= maxUIntN(N);
}

/// Checks if an signed integer fits into the given (dynamic) bit width.
inline bool isIntN(unsigned N, int64_t x) {
  return N >= 64 || (minIntN(N) <= x && x <= maxIntN(N));
}

/// Return true if the argument is a non-empty sequence of ones starting at the
/// least significant bit with the remainder zero (32 bit version).
/// Ex. isMask_32(0x0000FFFFU) == true.
constexpr inline bool isMask_32(uint32_t Value) {
  return Value && ((Value + 1) & Value) == 0;
}

/// Return true if the argument is a non-empty sequence of ones starting at the
/// least significant bit with the remainder zero (64 bit version).
constexpr inline bool isMask_64(uint64_t Value) {
  return Value && ((Value + 1) & Value) == 0;
}

/// Return true if the argument contains a non-empty sequence of ones with the
/// remainder zero (32 bit version.) Ex. isShiftedMask_32(0x0000FF00U) == true.
constexpr inline bool isShiftedMask_32(uint32_t Value) {
  return Value && isMask_32((Value - 1) | Value);
}

/// Return true if the argument contains a non-empty sequence of ones with the
/// remainder zero (64 bit version.)
constexpr inline bool isShiftedMask_64(uint64_t Value) {
  return Value && isMask_64((Value - 1) | Value);
}

/// Return true if the argument is a power of two > 0.
/// Ex. isPowerOf2_32(0x00100000U) == true (32 bit edition.)
constexpr inline bool isPowerOf2_32(uint32_t Value) {
  return llvm::has_single_bit(Value);
}

/// Return true if the argument is a power of two > 0 (64 bit edition.)
constexpr inline bool isPowerOf2_64(uint64_t Value) {
  return llvm::has_single_bit(Value);
}

/// Return true if the argument contains a non-empty sequence of ones with the
/// remainder zero (32 bit version.) Ex. isShiftedMask_32(0x0000FF00U) == true.
/// If true, \p MaskIdx will specify the index of the lowest set bit and \p
/// MaskLen is updated to specify the length of the mask, else neither are
/// updated.
inline bool isShiftedMask_32(uint32_t Value, unsigned &MaskIdx,
                             unsigned &MaskLen) {
  if (!isShiftedMask_32(Value))
    return false;
  MaskIdx = llvm::countr_zero(Value);
  MaskLen = llvm::popcount(Value);
  return true;
}

/// Return true if the argument contains a non-empty sequence of ones with the
/// remainder zero (64 bit version.) If true, \p MaskIdx will specify the index
/// of the lowest set bit and \p MaskLen is updated to specify the length of the
/// mask, else neither are updated.
inline bool isShiftedMask_64(uint64_t Value, unsigned &MaskIdx,
                             unsigned &MaskLen) {
  if (!isShiftedMask_64(Value))
    return false;
  MaskIdx = llvm::countr_zero(Value);
  MaskLen = llvm::popcount(Value);
  return true;
}

/// Compile time Log2.
/// Valid only for positive powers of two.
template <size_t kValue> constexpr inline size_t CTLog2() {
  static_assert(kValue > 0 && llvm::isPowerOf2_64(kValue),
                "Value is not a valid power of 2");
  return 1 + CTLog2<kValue / 2>();
}

template <> constexpr inline size_t CTLog2<1>() { return 0; }

/// Return the floor log base 2 of the specified value, -1 if the value is zero.
/// (32 bit edition.)
/// Ex. Log2_32(32) == 5, Log2_32(1) == 0, Log2_32(0) == -1, Log2_32(6) == 2
inline unsigned Log2_32(uint32_t Value) {
  return 31 - llvm::countl_zero(Value);
}

/// Return the floor log base 2 of the specified value, -1 if the value is zero.
/// (64 bit edition.)
inline unsigned Log2_64(uint64_t Value) {
  return 63 - llvm::countl_zero(Value);
}

/// Return the ceil log base 2 of the specified value, 32 if the value is zero.
/// (32 bit edition).
/// Ex. Log2_32_Ceil(32) == 5, Log2_32_Ceil(1) == 0, Log2_32_Ceil(6) == 3
inline unsigned Log2_32_Ceil(uint32_t Value) {
  return 32 - llvm::countl_zero(Value - 1);
}

/// Return the ceil log base 2 of the specified value, 64 if the value is zero.
/// (64 bit edition.)
inline unsigned Log2_64_Ceil(uint64_t Value) {
  return 64 - llvm::countl_zero(Value - 1);
}

/// A and B are either alignments or offsets. Return the minimum alignment that
/// may be assumed after adding the two together.
constexpr inline uint64_t MinAlign(uint64_t A, uint64_t B) {
  // The largest power of 2 that divides both A and B.
  //
  // Replace "-Value" by "1+~Value" in the following commented code to avoid
  // MSVC warning C4146
  //    return (A | B) & -(A | B);
  return (A | B) & (1 + ~(A | B));
}

/// Returns the next power of two (in 64-bits) that is strictly greater than A.
/// Returns zero on overflow.
constexpr inline uint64_t NextPowerOf2(uint64_t A) {
  A |= (A >> 1);
  A |= (A >> 2);
  A |= (A >> 4);
  A |= (A >> 8);
  A |= (A >> 16);
  A |= (A >> 32);
  return A + 1;
}

/// Returns the power of two which is greater than or equal to the given value.
/// Essentially, it is a ceil operation across the domain of powers of two.
inline uint64_t PowerOf2Ceil(uint64_t A) {
  if (!A)
    return 0;
  return NextPowerOf2(A - 1);
}

/// Returns the next integer (mod 2**64) that is greater than or equal to
/// \p Value and is a multiple of \p Align. \p Align must be non-zero.
///
/// Examples:
/// \code
///   alignTo(5, 8) = 8
///   alignTo(17, 8) = 24
///   alignTo(~0LL, 8) = 0
///   alignTo(321, 255) = 510
/// \endcode
inline uint64_t alignTo(uint64_t Value, uint64_t Align) {
  assert(Align != 0u && "Align can't be 0.");
  return (Value + Align - 1) / Align * Align;
}

inline uint64_t alignToPowerOf2(uint64_t Value, uint64_t Align) {
  assert(Align != 0 && (Align & (Align - 1)) == 0 &&
         "Align must be a power of 2");
  return (Value + Align - 1) & -Align;
}

/// If non-zero \p Skew is specified, the return value will be a minimal integer
/// that is greater than or equal to \p Size and equal to \p A * N + \p Skew for
/// some integer N. If \p Skew is larger than \p A, its value is adjusted to '\p
/// Skew mod \p A'. \p Align must be non-zero.
///
/// Examples:
/// \code
///   alignTo(5, 8, 7) = 7
///   alignTo(17, 8, 1) = 17
///   alignTo(~0LL, 8, 3) = 3
///   alignTo(321, 255, 42) = 552
/// \endcode
inline uint64_t alignTo(uint64_t Value, uint64_t Align, uint64_t Skew) {
  assert(Align != 0u && "Align can't be 0.");
  Skew %= Align;
  return alignTo(Value - Skew, Align) + Skew;
}

/// Returns the next integer (mod 2**64) that is greater than or equal to
/// \p Value and is a multiple of \c Align. \c Align must be non-zero.
template <uint64_t Align> constexpr inline uint64_t alignTo(uint64_t Value) {
  static_assert(Align != 0u, "Align must be non-zero");
  return (Value + Align - 1) / Align * Align;
}

/// Returns the integer ceil(Numerator / Denominator).
inline uint64_t divideCeil(uint64_t Numerator, uint64_t Denominator) {
  return alignTo(Numerator, Denominator) / Denominator;
}

/// Returns the integer nearest(Numerator / Denominator).
inline uint64_t divideNearest(uint64_t Numerator, uint64_t Denominator) {
  return (Numerator + (Denominator / 2)) / Denominator;
}

/// Returns the largest uint64_t less than or equal to \p Value and is
/// \p Skew mod \p Align. \p Align must be non-zero
inline uint64_t alignDown(uint64_t Value, uint64_t Align, uint64_t Skew = 0) {
  assert(Align != 0u && "Align can't be 0.");
  Skew %= Align;
  return (Value - Skew) / Align * Align + Skew;
}

/// Sign-extend the number in the bottom B bits of X to a 32-bit integer.
/// Requires 0 < B <= 32.
template <unsigned B> constexpr inline int32_t SignExtend32(uint32_t X) {
  static_assert(B > 0, "Bit width can't be 0.");
  static_assert(B <= 32, "Bit width out of range.");
  return int32_t(X << (32 - B)) >> (32 - B);
}

/// Sign-extend the number in the bottom B bits of X to a 32-bit integer.
/// Requires 0 < B <= 32.
inline int32_t SignExtend32(uint32_t X, unsigned B) {
  assert(B > 0 && "Bit width can't be 0.");
  assert(B <= 32 && "Bit width out of range.");
  return int32_t(X << (32 - B)) >> (32 - B);
}

/// Sign-extend the number in the bottom B bits of X to a 64-bit integer.
/// Requires 0 < B <= 64.
template <unsigned B> constexpr inline int64_t SignExtend64(uint64_t x) {
  static_assert(B > 0, "Bit width can't be 0.");
  static_assert(B <= 64, "Bit width out of range.");
  return int64_t(x << (64 - B)) >> (64 - B);
}

/// Sign-extend the number in the bottom B bits of X to a 64-bit integer.
/// Requires 0 < B <= 64.
inline int64_t SignExtend64(uint64_t X, unsigned B) {
  assert(B > 0 && "Bit width can't be 0.");
  assert(B <= 64 && "Bit width out of range.");
  return int64_t(X << (64 - B)) >> (64 - B);
}

/// Subtract two unsigned integers, X and Y, of type T and return the absolute
/// value of the result.
template <typename T>
std::enable_if_t<std::is_unsigned_v<T>, T> AbsoluteDifference(T X, T Y) {
  return X > Y ? (X - Y) : (Y - X);
}

/// Add two unsigned integers, X and Y, of type T.  Clamp the result to the
/// maximum representable value of T on overflow.  ResultOverflowed indicates if
/// the result is larger than the maximum representable value of type T.
template <typename T>
std::enable_if_t<std::is_unsigned_v<T>, T>
SaturatingAdd(T X, T Y, bool *ResultOverflowed = nullptr) {
  bool Dummy;
  bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy;
  // Hacker's Delight, p. 29
  T Z = X + Y;
  Overflowed = (Z < X || Z < Y);
  if (Overflowed)
    return std::numeric_limits<T>::max();
  else
    return Z;
}

/// Add multiple unsigned integers of type T.  Clamp the result to the
/// maximum representable value of T on overflow.
template <class T, class... Ts>
std::enable_if_t<std::is_unsigned_v<T>, T> SaturatingAdd(T X, T Y, T Z,
                                                         Ts... Args) {
  bool Overflowed = false;
  T XY = SaturatingAdd(X, Y, &Overflowed);
  if (Overflowed)
    return SaturatingAdd(std::numeric_limits<T>::max(), T(1), Args...);
  return SaturatingAdd(XY, Z, Args...);
}

/// Multiply two unsigned integers, X and Y, of type T.  Clamp the result to the
/// maximum representable value of T on overflow.  ResultOverflowed indicates if
/// the result is larger than the maximum representable value of type T.
template <typename T>
std::enable_if_t<std::is_unsigned_v<T>, T>
SaturatingMultiply(T X, T Y, bool *ResultOverflowed = nullptr) {
  bool Dummy;
  bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy;

  // Hacker's Delight, p. 30 has a different algorithm, but we don't use that
  // because it fails for uint16_t (where multiplication can have undefined
  // behavior due to promotion to int), and requires a division in addition
  // to the multiplication.

  Overflowed = false;

  // Log2(Z) would be either Log2Z or Log2Z + 1.
  // Special case: if X or Y is 0, Log2_64 gives -1, and Log2Z
  // will necessarily be less than Log2Max as desired.
  int Log2Z = Log2_64(X) + Log2_64(Y);
  const T Max = std::numeric_limits<T>::max();
  int Log2Max = Log2_64(Max);
  if (Log2Z < Log2Max) {
    return X * Y;
  }
  if (Log2Z > Log2Max) {
    Overflowed = true;
    return Max;
  }

  // We're going to use the top bit, and maybe overflow one
  // bit past it. Multiply all but the bottom bit then add
  // that on at the end.
  T Z = (X >> 1) * Y;
  if (Z & ~(Max >> 1)) {
    Overflowed = true;
    return Max;
  }
  Z <<= 1;
  if (X & 1)
    return SaturatingAdd(Z, Y, ResultOverflowed);

  return Z;
}

/// Multiply two unsigned integers, X and Y, and add the unsigned integer, A to
/// the product. Clamp the result to the maximum representable value of T on
/// overflow. ResultOverflowed indicates if the result is larger than the
/// maximum representable value of type T.
template <typename T>
std::enable_if_t<std::is_unsigned_v<T>, T>
SaturatingMultiplyAdd(T X, T Y, T A, bool *ResultOverflowed = nullptr) {
  bool Dummy;
  bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy;

  T Product = SaturatingMultiply(X, Y, &Overflowed);
  if (Overflowed)
    return Product;

  return SaturatingAdd(A, Product, &Overflowed);
}

/// Use this rather than HUGE_VALF; the latter causes warnings on MSVC.
extern const float huge_valf;


/// Add two signed integers, computing the two's complement truncated result,
/// returning true if overflow occurred.
template <typename T>
std::enable_if_t<std::is_signed_v<T>, T> AddOverflow(T X, T Y, T &Result) {
#if __has_builtin(__builtin_add_overflow)
  return __builtin_add_overflow(X, Y, &Result);
#else
  // Perform the unsigned addition.
  using U = std::make_unsigned_t<T>;
  const U UX = static_cast<U>(X);
  const U UY = static_cast<U>(Y);
  const U UResult = UX + UY;

  // Convert to signed.
  Result = static_cast<T>(UResult);

  // Adding two positive numbers should result in a positive number.
  if (X > 0 && Y > 0)
    return Result <= 0;
  // Adding two negatives should result in a negative number.
  if (X < 0 && Y < 0)
    return Result >= 0;
  return false;
#endif
}

/// Subtract two signed integers, computing the two's complement truncated
/// result, returning true if an overflow ocurred.
template <typename T>
std::enable_if_t<std::is_signed_v<T>, T> SubOverflow(T X, T Y, T &Result) {
#if __has_builtin(__builtin_sub_overflow)
  return __builtin_sub_overflow(X, Y, &Result);
#else
  // Perform the unsigned addition.
  using U = std::make_unsigned_t<T>;
  const U UX = static_cast<U>(X);
  const U UY = static_cast<U>(Y);
  const U UResult = UX - UY;

  // Convert to signed.
  Result = static_cast<T>(UResult);

  // Subtracting a positive number from a negative results in a negative number.
  if (X <= 0 && Y > 0)
    return Result >= 0;
  // Subtracting a negative number from a positive results in a positive number.
  if (X >= 0 && Y < 0)
    return Result <= 0;
  return false;
#endif
}

/// Multiply two signed integers, computing the two's complement truncated
/// result, returning true if an overflow ocurred.
template <typename T>
std::enable_if_t<std::is_signed_v<T>, T> MulOverflow(T X, T Y, T &Result) {
  // Perform the unsigned multiplication on absolute values.
  using U = std::make_unsigned_t<T>;
  const U UX = X < 0 ? (0 - static_cast<U>(X)) : static_cast<U>(X);
  const U UY = Y < 0 ? (0 - static_cast<U>(Y)) : static_cast<U>(Y);
  const U UResult = UX * UY;

  // Convert to signed.
  const bool IsNegative = (X < 0) ^ (Y < 0);
  Result = IsNegative ? (0 - UResult) : UResult;

  // If any of the args was 0, result is 0 and no overflow occurs.
  if (UX == 0 || UY == 0)
    return false;

  // UX and UY are in [1, 2^n], where n is the number of digits.
  // Check how the max allowed absolute value (2^n for negative, 2^(n-1) for
  // positive) divided by an argument compares to the other.
  if (IsNegative)
    return UX > (static_cast<U>(std::numeric_limits<T>::max()) + U(1)) / UY;
  else
    return UX > (static_cast<U>(std::numeric_limits<T>::max())) / UY;
}

} // End llvm namespace

#endif
PKhwFZ�+�UUSupport/AllocatorBase.hnu�[���//===- AllocatorBase.h - Simple memory allocation abstraction ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This file defines MallocAllocator. MallocAllocator conforms to the LLVM
/// "Allocator" concept which consists of an Allocate method accepting a size
/// and alignment, and a Deallocate accepting a pointer and size. Further, the
/// LLVM "Allocator" concept has overloads of Allocate and Deallocate for
/// setting size and alignment based on the final type. These overloads are
/// typically provided by a base class template \c AllocatorBase.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_ALLOCATORBASE_H
#define LLVM_SUPPORT_ALLOCATORBASE_H

#ifdef _MSC_VER
#define LLVM_ALLOCATORHOLDER_EMPTYBASE __declspec(empty_bases)
#else
#define LLVM_ALLOCATORHOLDER_EMPTYBASE
#endif // _MSC_VER

#include "llvm/Support/Compiler.h"
#include "llvm/Support/MemAlloc.h"
#include <type_traits>

namespace llvm {

/// CRTP base class providing obvious overloads for the core \c
/// Allocate() methods of LLVM-style allocators.
///
/// This base class both documents the full public interface exposed by all
/// LLVM-style allocators, and redirects all of the overloads to a single core
/// set of methods which the derived class must define.
template <typename DerivedT> class AllocatorBase {
public:
  /// Allocate \a Size bytes of \a Alignment aligned memory. This method
  /// must be implemented by \c DerivedT.
  void *Allocate(size_t Size, size_t Alignment) {
#ifdef __clang__
    static_assert(static_cast<void *(AllocatorBase::*)(size_t, size_t)>(
                      &AllocatorBase::Allocate) !=
                      static_cast<void *(DerivedT::*)(size_t, size_t)>(
                          &DerivedT::Allocate),
                  "Class derives from AllocatorBase without implementing the "
                  "core Allocate(size_t, size_t) overload!");
#endif
    return static_cast<DerivedT *>(this)->Allocate(Size, Alignment);
  }

  /// Deallocate \a Ptr to \a Size bytes of memory allocated by this
  /// allocator.
  void Deallocate(const void *Ptr, size_t Size, size_t Alignment) {
#ifdef __clang__
    static_assert(
        static_cast<void (AllocatorBase::*)(const void *, size_t, size_t)>(
            &AllocatorBase::Deallocate) !=
            static_cast<void (DerivedT::*)(const void *, size_t, size_t)>(
                &DerivedT::Deallocate),
        "Class derives from AllocatorBase without implementing the "
        "core Deallocate(void *) overload!");
#endif
    return static_cast<DerivedT *>(this)->Deallocate(Ptr, Size, Alignment);
  }

  // The rest of these methods are helpers that redirect to one of the above
  // core methods.

  /// Allocate space for a sequence of objects without constructing them.
  template <typename T> T *Allocate(size_t Num = 1) {
    return static_cast<T *>(Allocate(Num * sizeof(T), alignof(T)));
  }

  /// Deallocate space for a sequence of objects without constructing them.
  template <typename T>
  std::enable_if_t<!std::is_same_v<std::remove_cv_t<T>, void>, void>
  Deallocate(T *Ptr, size_t Num = 1) {
    Deallocate(static_cast<const void *>(Ptr), Num * sizeof(T), alignof(T));
  }
};

class MallocAllocator : public AllocatorBase<MallocAllocator> {
public:
  void Reset() {}

  LLVM_ATTRIBUTE_RETURNS_NONNULL void *Allocate(size_t Size, size_t Alignment) {
    return allocate_buffer(Size, Alignment);
  }

  // Pull in base class overloads.
  using AllocatorBase<MallocAllocator>::Allocate;

  void Deallocate(const void *Ptr, size_t Size, size_t Alignment) {
    deallocate_buffer(const_cast<void *>(Ptr), Size, Alignment);
  }

  // Pull in base class overloads.
  using AllocatorBase<MallocAllocator>::Deallocate;

  void PrintStats() const {}
};

namespace detail {

template <typename Alloc> class AllocatorHolder : Alloc {
public:
  AllocatorHolder() = default;
  AllocatorHolder(const Alloc &A) : Alloc(A) {}
  AllocatorHolder(Alloc &&A) : Alloc(static_cast<Alloc &&>(A)) {}
  Alloc &getAllocator() { return *this; }
  const Alloc &getAllocator() const { return *this; }
};

template <typename Alloc> class AllocatorHolder<Alloc &> {
  Alloc &A;

public:
  AllocatorHolder(Alloc &A) : A(A) {}
  Alloc &getAllocator() { return A; }
  const Alloc &getAllocator() const { return A; }
};

} // namespace detail

} // namespace llvm

#endif // LLVM_SUPPORT_ALLOCATORBASE_H
PKhwFZ��
֢֢Support/VirtualFileSystem.hnu�[���//===- VirtualFileSystem.h - Virtual File System Layer ----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file
/// Defines the virtual file system interface vfs::FileSystem.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_VIRTUALFILESYSTEM_H
#define LLVM_SUPPORT_VIRTUALFILESYSTEM_H

#include "llvm/ADT/IntrusiveRefCntPtr.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/STLFunctionalExtras.h"
#include "llvm/Support/Chrono.h"
#include "llvm/Support/ErrorOr.h"
#include "llvm/Support/Errc.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/SourceMgr.h"
#include <cassert>
#include <cstdint>
#include <ctime>
#include <memory>
#include <optional>
#include <stack>
#include <string>
#include <system_error>
#include <utility>
#include <vector>

namespace llvm {

class MemoryBuffer;
class MemoryBufferRef;
class Twine;

namespace vfs {

/// The result of a \p status operation.
class Status {
  std::string Name;
  llvm::sys::fs::UniqueID UID;
  llvm::sys::TimePoint<> MTime;
  uint32_t User;
  uint32_t Group;
  uint64_t Size;
  llvm::sys::fs::file_type Type = llvm::sys::fs::file_type::status_error;
  llvm::sys::fs::perms Perms;

public:
  // FIXME: remove when files support multiple names
  bool IsVFSMapped = false;

  /// Whether this entity has an external path different from the virtual path,
  /// and the external path is exposed by leaking it through the abstraction.
  /// For example, a RedirectingFileSystem will set this for paths where
  /// UseExternalName is true.
  ///
  /// FIXME: Currently the external path is exposed by replacing the virtual
  /// path in this Status object. Instead, we should leave the path in the
  /// Status intact (matching the requested virtual path) - see
  /// FileManager::getFileRef for how how we plan to fix this.
  bool ExposesExternalVFSPath = false;

  Status() = default;
  Status(const llvm::sys::fs::file_status &Status);
  Status(const Twine &Name, llvm::sys::fs::UniqueID UID,
         llvm::sys::TimePoint<> MTime, uint32_t User, uint32_t Group,
         uint64_t Size, llvm::sys::fs::file_type Type,
         llvm::sys::fs::perms Perms);

  /// Get a copy of a Status with a different size.
  static Status copyWithNewSize(const Status &In, uint64_t NewSize);
  /// Get a copy of a Status with a different name.
  static Status copyWithNewName(const Status &In, const Twine &NewName);
  static Status copyWithNewName(const llvm::sys::fs::file_status &In,
                                const Twine &NewName);

  /// Returns the name that should be used for this file or directory.
  StringRef getName() const { return Name; }

  /// @name Status interface from llvm::sys::fs
  /// @{
  llvm::sys::fs::file_type getType() const { return Type; }
  llvm::sys::fs::perms getPermissions() const { return Perms; }
  llvm::sys::TimePoint<> getLastModificationTime() const { return MTime; }
  llvm::sys::fs::UniqueID getUniqueID() const { return UID; }
  uint32_t getUser() const { return User; }
  uint32_t getGroup() const { return Group; }
  uint64_t getSize() const { return Size; }
  /// @}
  /// @name Status queries
  /// These are static queries in llvm::sys::fs.
  /// @{
  bool equivalent(const Status &Other) const;
  bool isDirectory() const;
  bool isRegularFile() const;
  bool isOther() const;
  bool isSymlink() const;
  bool isStatusKnown() const;
  bool exists() const;
  /// @}
};

/// Represents an open file.
class File {
public:
  /// Destroy the file after closing it (if open).
  /// Sub-classes should generally call close() inside their destructors.  We
  /// cannot do that from the base class, since close is virtual.
  virtual ~File();

  /// Get the status of the file.
  virtual llvm::ErrorOr<Status> status() = 0;

  /// Get the name of the file
  virtual llvm::ErrorOr<std::string> getName() {
    if (auto Status = status())
      return Status->getName().str();
    else
      return Status.getError();
  }

  /// Get the contents of the file as a \p MemoryBuffer.
  virtual llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>>
  getBuffer(const Twine &Name, int64_t FileSize = -1,
            bool RequiresNullTerminator = true, bool IsVolatile = false) = 0;

  /// Closes the file.
  virtual std::error_code close() = 0;

  // Get the same file with a different path.
  static ErrorOr<std::unique_ptr<File>>
  getWithPath(ErrorOr<std::unique_ptr<File>> Result, const Twine &P);

protected:
  // Set the file's underlying path.
  virtual void setPath(const Twine &Path) {}
};

/// A member of a directory, yielded by a directory_iterator.
/// Only information available on most platforms is included.
class directory_entry {
  std::string Path;
  llvm::sys::fs::file_type Type = llvm::sys::fs::file_type::type_unknown;

public:
  directory_entry() = default;
  directory_entry(std::string Path, llvm::sys::fs::file_type Type)
      : Path(std::move(Path)), Type(Type) {}

  llvm::StringRef path() const { return Path; }
  llvm::sys::fs::file_type type() const { return Type; }
};

namespace detail {

/// An interface for virtual file systems to provide an iterator over the
/// (non-recursive) contents of a directory.
struct DirIterImpl {
  virtual ~DirIterImpl();

  /// Sets \c CurrentEntry to the next entry in the directory on success,
  /// to directory_entry() at end,  or returns a system-defined \c error_code.
  virtual std::error_code increment() = 0;

  directory_entry CurrentEntry;
};

} // namespace detail

/// An input iterator over the entries in a virtual path, similar to
/// llvm::sys::fs::directory_iterator.
class directory_iterator {
  std::shared_ptr<detail::DirIterImpl> Impl; // Input iterator semantics on copy

public:
  directory_iterator(std::shared_ptr<detail::DirIterImpl> I)
      : Impl(std::move(I)) {
    assert(Impl.get() != nullptr && "requires non-null implementation");
    if (Impl->CurrentEntry.path().empty())
      Impl.reset(); // Normalize the end iterator to Impl == nullptr.
  }

  /// Construct an 'end' iterator.
  directory_iterator() = default;

  /// Equivalent to operator++, with an error code.
  directory_iterator &increment(std::error_code &EC) {
    assert(Impl && "attempting to increment past end");
    EC = Impl->increment();
    if (Impl->CurrentEntry.path().empty())
      Impl.reset(); // Normalize the end iterator to Impl == nullptr.
    return *this;
  }

  const directory_entry &operator*() const { return Impl->CurrentEntry; }
  const directory_entry *operator->() const { return &Impl->CurrentEntry; }

  bool operator==(const directory_iterator &RHS) const {
    if (Impl && RHS.Impl)
      return Impl->CurrentEntry.path() == RHS.Impl->CurrentEntry.path();
    return !Impl && !RHS.Impl;
  }
  bool operator!=(const directory_iterator &RHS) const {
    return !(*this == RHS);
  }
};

class FileSystem;

namespace detail {

/// Keeps state for the recursive_directory_iterator.
struct RecDirIterState {
  std::stack<directory_iterator, std::vector<directory_iterator>> Stack;
  bool HasNoPushRequest = false;
};

} // end namespace detail

/// An input iterator over the recursive contents of a virtual path,
/// similar to llvm::sys::fs::recursive_directory_iterator.
class recursive_directory_iterator {
  FileSystem *FS;
  std::shared_ptr<detail::RecDirIterState>
      State; // Input iterator semantics on copy.

public:
  recursive_directory_iterator(FileSystem &FS, const Twine &Path,
                               std::error_code &EC);

  /// Construct an 'end' iterator.
  recursive_directory_iterator() = default;

  /// Equivalent to operator++, with an error code.
  recursive_directory_iterator &increment(std::error_code &EC);

  const directory_entry &operator*() const { return *State->Stack.top(); }
  const directory_entry *operator->() const { return &*State->Stack.top(); }

  bool operator==(const recursive_directory_iterator &Other) const {
    return State == Other.State; // identity
  }
  bool operator!=(const recursive_directory_iterator &RHS) const {
    return !(*this == RHS);
  }

  /// Gets the current level. Starting path is at level 0.
  int level() const {
    assert(!State->Stack.empty() &&
           "Cannot get level without any iteration state");
    return State->Stack.size() - 1;
  }

  void no_push() { State->HasNoPushRequest = true; }
};

/// The virtual file system interface.
class FileSystem : public llvm::ThreadSafeRefCountedBase<FileSystem> {
public:
  virtual ~FileSystem();

  /// Get the status of the entry at \p Path, if one exists.
  virtual llvm::ErrorOr<Status> status(const Twine &Path) = 0;

  /// Get a \p File object for the file at \p Path, if one exists.
  virtual llvm::ErrorOr<std::unique_ptr<File>>
  openFileForRead(const Twine &Path) = 0;

  /// This is a convenience method that opens a file, gets its content and then
  /// closes the file.
  llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>>
  getBufferForFile(const Twine &Name, int64_t FileSize = -1,
                   bool RequiresNullTerminator = true, bool IsVolatile = false);

  /// Get a directory_iterator for \p Dir.
  /// \note The 'end' iterator is directory_iterator().
  virtual directory_iterator dir_begin(const Twine &Dir,
                                       std::error_code &EC) = 0;

  /// Set the working directory. This will affect all following operations on
  /// this file system and may propagate down for nested file systems.
  virtual std::error_code setCurrentWorkingDirectory(const Twine &Path) = 0;

  /// Get the working directory of this file system.
  virtual llvm::ErrorOr<std::string> getCurrentWorkingDirectory() const = 0;

  /// Gets real path of \p Path e.g. collapse all . and .. patterns, resolve
  /// symlinks. For real file system, this uses `llvm::sys::fs::real_path`.
  /// This returns errc::operation_not_permitted if not implemented by subclass.
  virtual std::error_code getRealPath(const Twine &Path,
                                      SmallVectorImpl<char> &Output) const;

  /// Check whether a file exists. Provided for convenience.
  bool exists(const Twine &Path);

  /// Is the file mounted on a local filesystem?
  virtual std::error_code isLocal(const Twine &Path, bool &Result);

  /// Make \a Path an absolute path.
  ///
  /// Makes \a Path absolute using the current directory if it is not already.
  /// An empty \a Path will result in the current directory.
  ///
  /// /absolute/path   => /absolute/path
  /// relative/../path => <current-directory>/relative/../path
  ///
  /// \param Path A path that is modified to be an absolute path.
  /// \returns success if \a path has been made absolute, otherwise a
  ///          platform-specific error_code.
  virtual std::error_code makeAbsolute(SmallVectorImpl<char> &Path) const;

  enum class PrintType { Summary, Contents, RecursiveContents };
  void print(raw_ostream &OS, PrintType Type = PrintType::Contents,
             unsigned IndentLevel = 0) const {
    printImpl(OS, Type, IndentLevel);
  }

#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
  LLVM_DUMP_METHOD void dump() const;
#endif

protected:
  virtual void printImpl(raw_ostream &OS, PrintType Type,
                         unsigned IndentLevel) const {
    printIndent(OS, IndentLevel);
    OS << "FileSystem\n";
  }

  void printIndent(raw_ostream &OS, unsigned IndentLevel) const {
    for (unsigned i = 0; i < IndentLevel; ++i)
      OS << "  ";
  }
};

/// Gets an \p vfs::FileSystem for the 'real' file system, as seen by
/// the operating system.
/// The working directory is linked to the process's working directory.
/// (This is usually thread-hostile).
IntrusiveRefCntPtr<FileSystem> getRealFileSystem();

/// Create an \p vfs::FileSystem for the 'real' file system, as seen by
/// the operating system.
/// It has its own working directory, independent of (but initially equal to)
/// that of the process.
std::unique_ptr<FileSystem> createPhysicalFileSystem();

/// A file system that allows overlaying one \p AbstractFileSystem on top
/// of another.
///
/// Consists of a stack of >=1 \p FileSystem objects, which are treated as being
/// one merged file system. When there is a directory that exists in more than
/// one file system, the \p OverlayFileSystem contains a directory containing
/// the union of their contents.  The attributes (permissions, etc.) of the
/// top-most (most recently added) directory are used.  When there is a file
/// that exists in more than one file system, the file in the top-most file
/// system overrides the other(s).
class OverlayFileSystem : public FileSystem {
  using FileSystemList = SmallVector<IntrusiveRefCntPtr<FileSystem>, 1>;

  /// The stack of file systems, implemented as a list in order of
  /// their addition.
  FileSystemList FSList;

public:
  OverlayFileSystem(IntrusiveRefCntPtr<FileSystem> Base);

  /// Pushes a file system on top of the stack.
  void pushOverlay(IntrusiveRefCntPtr<FileSystem> FS);

  llvm::ErrorOr<Status> status(const Twine &Path) override;
  llvm::ErrorOr<std::unique_ptr<File>>
  openFileForRead(const Twine &Path) override;
  directory_iterator dir_begin(const Twine &Dir, std::error_code &EC) override;
  llvm::ErrorOr<std::string> getCurrentWorkingDirectory() const override;
  std::error_code setCurrentWorkingDirectory(const Twine &Path) override;
  std::error_code isLocal(const Twine &Path, bool &Result) override;
  std::error_code getRealPath(const Twine &Path,
                              SmallVectorImpl<char> &Output) const override;

  using iterator = FileSystemList::reverse_iterator;
  using const_iterator = FileSystemList::const_reverse_iterator;
  using reverse_iterator = FileSystemList::iterator;
  using const_reverse_iterator = FileSystemList::const_iterator;
  using range = iterator_range<iterator>;
  using const_range = iterator_range<const_iterator>;

  /// Get an iterator pointing to the most recently added file system.
  iterator overlays_begin() { return FSList.rbegin(); }
  const_iterator overlays_begin() const { return FSList.rbegin(); }

  /// Get an iterator pointing one-past the least recently added file system.
  iterator overlays_end() { return FSList.rend(); }
  const_iterator overlays_end() const { return FSList.rend(); }

  /// Get an iterator pointing to the least recently added file system.
  reverse_iterator overlays_rbegin() { return FSList.begin(); }
  const_reverse_iterator overlays_rbegin() const { return FSList.begin(); }

  /// Get an iterator pointing one-past the most recently added file system.
  reverse_iterator overlays_rend() { return FSList.end(); }
  const_reverse_iterator overlays_rend() const { return FSList.end(); }

  range overlays_range() { return llvm::reverse(FSList); }
  const_range overlays_range() const { return llvm::reverse(FSList); }

protected:
  void printImpl(raw_ostream &OS, PrintType Type,
                 unsigned IndentLevel) const override;
};

/// By default, this delegates all calls to the underlying file system. This
/// is useful when derived file systems want to override some calls and still
/// proxy other calls.
class ProxyFileSystem : public FileSystem {
public:
  explicit ProxyFileSystem(IntrusiveRefCntPtr<FileSystem> FS)
      : FS(std::move(FS)) {}

  llvm::ErrorOr<Status> status(const Twine &Path) override {
    return FS->status(Path);
  }
  llvm::ErrorOr<std::unique_ptr<File>>
  openFileForRead(const Twine &Path) override {
    return FS->openFileForRead(Path);
  }
  directory_iterator dir_begin(const Twine &Dir, std::error_code &EC) override {
    return FS->dir_begin(Dir, EC);
  }
  llvm::ErrorOr<std::string> getCurrentWorkingDirectory() const override {
    return FS->getCurrentWorkingDirectory();
  }
  std::error_code setCurrentWorkingDirectory(const Twine &Path) override {
    return FS->setCurrentWorkingDirectory(Path);
  }
  std::error_code getRealPath(const Twine &Path,
                              SmallVectorImpl<char> &Output) const override {
    return FS->getRealPath(Path, Output);
  }
  std::error_code isLocal(const Twine &Path, bool &Result) override {
    return FS->isLocal(Path, Result);
  }

protected:
  FileSystem &getUnderlyingFS() const { return *FS; }

private:
  IntrusiveRefCntPtr<FileSystem> FS;

  virtual void anchor();
};

namespace detail {

class InMemoryDirectory;
class InMemoryNode;

struct NewInMemoryNodeInfo {
  llvm::sys::fs::UniqueID DirUID;
  StringRef Path;
  StringRef Name;
  time_t ModificationTime;
  std::unique_ptr<llvm::MemoryBuffer> Buffer;
  uint32_t User;
  uint32_t Group;
  llvm::sys::fs::file_type Type;
  llvm::sys::fs::perms Perms;

  Status makeStatus() const;
};

class NamedNodeOrError {
  ErrorOr<std::pair<llvm::SmallString<128>, const detail::InMemoryNode *>>
      Value;

public:
  NamedNodeOrError(llvm::SmallString<128> Name,
                   const detail::InMemoryNode *Node)
      : Value(std::make_pair(Name, Node)) {}
  NamedNodeOrError(std::error_code EC) : Value(EC) {}
  NamedNodeOrError(llvm::errc EC) : Value(EC) {}

  StringRef getName() const { return (*Value).first; }
  explicit operator bool() const { return static_cast<bool>(Value); }
  operator std::error_code() const { return Value.getError(); }
  std::error_code getError() const { return Value.getError(); }
  const detail::InMemoryNode *operator*() const { return (*Value).second; }
};

} // namespace detail

/// An in-memory file system.
class InMemoryFileSystem : public FileSystem {
  std::unique_ptr<detail::InMemoryDirectory> Root;
  std::string WorkingDirectory;
  bool UseNormalizedPaths = true;

  using MakeNodeFn = llvm::function_ref<std::unique_ptr<detail::InMemoryNode>(
      detail::NewInMemoryNodeInfo)>;

  /// Create node with \p MakeNode and add it into this filesystem at \p Path.
  bool addFile(const Twine &Path, time_t ModificationTime,
               std::unique_ptr<llvm::MemoryBuffer> Buffer,
               std::optional<uint32_t> User, std::optional<uint32_t> Group,
               std::optional<llvm::sys::fs::file_type> Type,
               std::optional<llvm::sys::fs::perms> Perms, MakeNodeFn MakeNode);

  /// Looks up the in-memory node for the path \p P.
  /// If \p FollowFinalSymlink is true, the returned node is guaranteed to
  /// not be a symlink and its path may differ from \p P.
  detail::NamedNodeOrError lookupNode(const Twine &P, bool FollowFinalSymlink,
                                      size_t SymlinkDepth = 0) const;

  class DirIterator;

public:
  explicit InMemoryFileSystem(bool UseNormalizedPaths = true);
  ~InMemoryFileSystem() override;

  /// Add a file containing a buffer or a directory to the VFS with a
  /// path. The VFS owns the buffer.  If present, User, Group, Type
  /// and Perms apply to the newly-created file or directory.
  /// \return true if the file or directory was successfully added,
  /// false if the file or directory already exists in the file system with
  /// different contents.
  bool addFile(const Twine &Path, time_t ModificationTime,
               std::unique_ptr<llvm::MemoryBuffer> Buffer,
               std::optional<uint32_t> User = std::nullopt,
               std::optional<uint32_t> Group = std::nullopt,
               std::optional<llvm::sys::fs::file_type> Type = std::nullopt,
               std::optional<llvm::sys::fs::perms> Perms = std::nullopt);

  /// Add a hard link to a file.
  ///
  /// Here hard links are not intended to be fully equivalent to the classical
  /// filesystem. Both the hard link and the file share the same buffer and
  /// status (and thus have the same UniqueID). Because of this there is no way
  /// to distinguish between the link and the file after the link has been
  /// added.
  ///
  /// The \p Target path must be an existing file or a hardlink. The
  /// \p NewLink file must not have been added before. The \p Target
  /// path must not be a directory. The \p NewLink node is added as a hard
  /// link which points to the resolved file of \p Target node.
  /// \return true if the above condition is satisfied and hardlink was
  /// successfully created, false otherwise.
  bool addHardLink(const Twine &NewLink, const Twine &Target);

  /// Arbitrary max depth to search through symlinks. We can get into problems
  /// if a link links to a link that links back to the link, for example.
  static constexpr size_t MaxSymlinkDepth = 16;

  /// Add a symbolic link. Unlike a HardLink, because \p Target doesn't need
  /// to refer to a file (or refer to anything, as it happens). Also, an
  /// in-memory directory for \p Target isn't automatically created.
  bool
  addSymbolicLink(const Twine &NewLink, const Twine &Target,
                  time_t ModificationTime,
                  std::optional<uint32_t> User = std::nullopt,
                  std::optional<uint32_t> Group = std::nullopt,
                  std::optional<llvm::sys::fs::perms> Perms = std::nullopt);

  /// Add a buffer to the VFS with a path. The VFS does not own the buffer.
  /// If present, User, Group, Type and Perms apply to the newly-created file
  /// or directory.
  /// \return true if the file or directory was successfully added,
  /// false if the file or directory already exists in the file system with
  /// different contents.
  bool addFileNoOwn(const Twine &Path, time_t ModificationTime,
                    const llvm::MemoryBufferRef &Buffer,
                    std::optional<uint32_t> User = std::nullopt,
                    std::optional<uint32_t> Group = std::nullopt,
                    std::optional<llvm::sys::fs::file_type> Type = std::nullopt,
                    std::optional<llvm::sys::fs::perms> Perms = std::nullopt);

  std::string toString() const;

  /// Return true if this file system normalizes . and .. in paths.
  bool useNormalizedPaths() const { return UseNormalizedPaths; }

  llvm::ErrorOr<Status> status(const Twine &Path) override;
  llvm::ErrorOr<std::unique_ptr<File>>
  openFileForRead(const Twine &Path) override;
  directory_iterator dir_begin(const Twine &Dir, std::error_code &EC) override;

  llvm::ErrorOr<std::string> getCurrentWorkingDirectory() const override {
    return WorkingDirectory;
  }
  /// Canonicalizes \p Path by combining with the current working
  /// directory and normalizing the path (e.g. remove dots). If the current
  /// working directory is not set, this returns errc::operation_not_permitted.
  ///
  /// This doesn't resolve symlinks as they are not supported in in-memory file
  /// system.
  std::error_code getRealPath(const Twine &Path,
                              SmallVectorImpl<char> &Output) const override;
  std::error_code isLocal(const Twine &Path, bool &Result) override;
  std::error_code setCurrentWorkingDirectory(const Twine &Path) override;

protected:
  void printImpl(raw_ostream &OS, PrintType Type,
                 unsigned IndentLevel) const override;
};

/// Get a globally unique ID for a virtual file or directory.
llvm::sys::fs::UniqueID getNextVirtualUniqueID();

/// Gets a \p FileSystem for a virtual file system described in YAML
/// format.
std::unique_ptr<FileSystem>
getVFSFromYAML(std::unique_ptr<llvm::MemoryBuffer> Buffer,
               llvm::SourceMgr::DiagHandlerTy DiagHandler,
               StringRef YAMLFilePath, void *DiagContext = nullptr,
               IntrusiveRefCntPtr<FileSystem> ExternalFS = getRealFileSystem());

struct YAMLVFSEntry {
  template <typename T1, typename T2>
  YAMLVFSEntry(T1 &&VPath, T2 &&RPath, bool IsDirectory = false)
      : VPath(std::forward<T1>(VPath)), RPath(std::forward<T2>(RPath)),
        IsDirectory(IsDirectory) {}
  std::string VPath;
  std::string RPath;
  bool IsDirectory = false;
};

class RedirectingFSDirIterImpl;
class RedirectingFileSystemParser;

/// A virtual file system parsed from a YAML file.
///
/// Currently, this class allows creating virtual files and directories. Virtual
/// files map to existing external files in \c ExternalFS, and virtual
/// directories may either map to existing directories in \c ExternalFS or list
/// their contents in the form of other virtual directories and/or files.
///
/// The basic structure of the parsed file is:
/// \verbatim
/// {
///   'version': <version number>,
///   <optional configuration>
///   'roots': [
///              <directory entries>
///            ]
/// }
/// \endverbatim
///
/// The roots may be absolute or relative. If relative they will be made
/// absolute against either current working directory or the directory where
/// the Overlay YAML file is located, depending on the 'root-relative'
/// configuration.
///
/// All configuration options are optional.
///   'case-sensitive': <boolean, default=(true for Posix, false for Windows)>
///   'use-external-names': <boolean, default=true>
///   'root-relative': <string, one of 'cwd' or 'overlay-dir', default='cwd'>
///   'overlay-relative': <boolean, default=false>
///   'fallthrough': <boolean, default=true, deprecated - use 'redirecting-with'
///                   instead>
///   'redirecting-with': <string, one of 'fallthrough', 'fallback', or
///                        'redirect-only', default='fallthrough'>
///
/// To clarify, 'root-relative' option will prepend the current working
/// directory, or the overlay directory to the 'roots->name' field only if
/// 'roots->name' is a relative path. On the other hand, when 'overlay-relative'
/// is set to 'true', external paths will always be prepended with the overlay
/// directory, even if external paths are not relative paths. The
/// 'root-relative' option has no interaction with the 'overlay-relative'
/// option.
///
/// Virtual directories that list their contents are represented as
/// \verbatim
/// {
///   'type': 'directory',
///   'name': <string>,
///   'contents': [ <file or directory entries> ]
/// }
/// \endverbatim
///
/// The default attributes for such virtual directories are:
/// \verbatim
/// MTime = now() when created
/// Perms = 0777
/// User = Group = 0
/// Size = 0
/// UniqueID = unspecified unique value
/// \endverbatim
///
/// When a path prefix matches such a directory, the next component in the path
/// is matched against the entries in the 'contents' array.
///
/// Re-mapped directories, on the other hand, are represented as
/// /// \verbatim
/// {
///   'type': 'directory-remap',
///   'name': <string>,
///   'use-external-name': <boolean>, # Optional
///   'external-contents': <path to external directory>
/// }
/// \endverbatim
///
/// and inherit their attributes from the external directory. When a path
/// prefix matches such an entry, the unmatched components are appended to the
/// 'external-contents' path, and the resulting path is looked up in the
/// external file system instead.
///
/// Re-mapped files are represented as
/// \verbatim
/// {
///   'type': 'file',
///   'name': <string>,
///   'use-external-name': <boolean>, # Optional
///   'external-contents': <path to external file>
/// }
/// \endverbatim
///
/// Their attributes and file contents are determined by looking up the file at
/// their 'external-contents' path in the external file system.
///
/// For 'file', 'directory' and 'directory-remap' entries the 'name' field may
/// contain multiple path components (e.g. /path/to/file). However, any
/// directory in such a path that contains more than one child must be uniquely
/// represented by a 'directory' entry.
///
/// When the 'use-external-name' field is set, calls to \a vfs::File::status()
/// give the external (remapped) filesystem name instead of the name the file
/// was accessed by. This is an intentional leak through the \a
/// RedirectingFileSystem abstraction layer. It enables clients to discover
/// (and use) the external file location when communicating with users or tools
/// that don't use the same VFS overlay.
///
/// FIXME: 'use-external-name' causes behaviour that's inconsistent with how
/// "real" filesystems behave. Maybe there should be a separate channel for
/// this information.
class RedirectingFileSystem : public vfs::FileSystem {
public:
  enum EntryKind { EK_Directory, EK_DirectoryRemap, EK_File };
  enum NameKind { NK_NotSet, NK_External, NK_Virtual };

  /// The type of redirection to perform.
  enum class RedirectKind {
    /// Lookup the redirected path first (ie. the one specified in
    /// 'external-contents') and if that fails "fallthrough" to a lookup of the
    /// originally provided path.
    Fallthrough,
    /// Lookup the provided path first and if that fails, "fallback" to a
    /// lookup of the redirected path.
    Fallback,
    /// Only lookup the redirected path, do not lookup the originally provided
    /// path.
    RedirectOnly
  };

  /// The type of relative path used by Roots.
  enum class RootRelativeKind {
    /// The roots are relative to the current working directory.
    CWD,
    /// The roots are relative to the directory where the Overlay YAML file
    // locates.
    OverlayDir
  };

  /// A single file or directory in the VFS.
  class Entry {
    EntryKind Kind;
    std::string Name;

  public:
    Entry(EntryKind K, StringRef Name) : Kind(K), Name(Name) {}
    virtual ~Entry() = default;

    StringRef getName() const { return Name; }
    EntryKind getKind() const { return Kind; }
  };

  /// A directory in the vfs with explicitly specified contents.
  class DirectoryEntry : public Entry {
    std::vector<std::unique_ptr<Entry>> Contents;
    Status S;

  public:
    /// Constructs a directory entry with explicitly specified contents.
    DirectoryEntry(StringRef Name, std::vector<std::unique_ptr<Entry>> Contents,
                   Status S)
        : Entry(EK_Directory, Name), Contents(std::move(Contents)),
          S(std::move(S)) {}

    /// Constructs an empty directory entry.
    DirectoryEntry(StringRef Name, Status S)
        : Entry(EK_Directory, Name), S(std::move(S)) {}

    Status getStatus() { return S; }

    void addContent(std::unique_ptr<Entry> Content) {
      Contents.push_back(std::move(Content));
    }

    Entry *getLastContent() const { return Contents.back().get(); }

    using iterator = decltype(Contents)::iterator;

    iterator contents_begin() { return Contents.begin(); }
    iterator contents_end() { return Contents.end(); }

    static bool classof(const Entry *E) { return E->getKind() == EK_Directory; }
  };

  /// A file or directory in the vfs that is mapped to a file or directory in
  /// the external filesystem.
  class RemapEntry : public Entry {
    std::string ExternalContentsPath;
    NameKind UseName;

  protected:
    RemapEntry(EntryKind K, StringRef Name, StringRef ExternalContentsPath,
               NameKind UseName)
        : Entry(K, Name), ExternalContentsPath(ExternalContentsPath),
          UseName(UseName) {}

  public:
    StringRef getExternalContentsPath() const { return ExternalContentsPath; }

    /// Whether to use the external path as the name for this file or directory.
    bool useExternalName(bool GlobalUseExternalName) const {
      return UseName == NK_NotSet ? GlobalUseExternalName
                                  : (UseName == NK_External);
    }

    NameKind getUseName() const { return UseName; }

    static bool classof(const Entry *E) {
      switch (E->getKind()) {
      case EK_DirectoryRemap:
        [[fallthrough]];
      case EK_File:
        return true;
      case EK_Directory:
        return false;
      }
      llvm_unreachable("invalid entry kind");
    }
  };

  /// A directory in the vfs that maps to a directory in the external file
  /// system.
  class DirectoryRemapEntry : public RemapEntry {
  public:
    DirectoryRemapEntry(StringRef Name, StringRef ExternalContentsPath,
                        NameKind UseName)
        : RemapEntry(EK_DirectoryRemap, Name, ExternalContentsPath, UseName) {}

    static bool classof(const Entry *E) {
      return E->getKind() == EK_DirectoryRemap;
    }
  };

  /// A file in the vfs that maps to a file in the external file system.
  class FileEntry : public RemapEntry {
  public:
    FileEntry(StringRef Name, StringRef ExternalContentsPath, NameKind UseName)
        : RemapEntry(EK_File, Name, ExternalContentsPath, UseName) {}

    static bool classof(const Entry *E) { return E->getKind() == EK_File; }
  };

  /// Represents the result of a path lookup into the RedirectingFileSystem.
  struct LookupResult {
    /// Chain of parent directory entries for \c E.
    llvm::SmallVector<Entry *, 32> Parents;

    /// The entry the looked-up path corresponds to.
    Entry *E;

  private:
    /// When the found Entry is a DirectoryRemapEntry, stores the path in the
    /// external file system that the looked-up path in the virtual file system
    //  corresponds to.
    std::optional<std::string> ExternalRedirect;

  public:
    LookupResult(Entry *E, sys::path::const_iterator Start,
                 sys::path::const_iterator End);

    /// If the found Entry maps the the input path to a path in the external
    /// file system (i.e. it is a FileEntry or DirectoryRemapEntry), returns
    /// that path.
    std::optional<StringRef> getExternalRedirect() const {
      if (isa<DirectoryRemapEntry>(E))
        return StringRef(*ExternalRedirect);
      if (auto *FE = dyn_cast<FileEntry>(E))
        return FE->getExternalContentsPath();
      return std::nullopt;
    }

    /// Get the (canonical) path of the found entry. This uses the as-written
    /// path components from the VFS specification.
    void getPath(llvm::SmallVectorImpl<char> &Path) const;
  };

private:
  friend class RedirectingFSDirIterImpl;
  friend class RedirectingFileSystemParser;

  /// Canonicalize path by removing ".", "..", "./", components. This is
  /// a VFS request, do not bother about symlinks in the path components
  /// but canonicalize in order to perform the correct entry search.
  std::error_code makeCanonical(SmallVectorImpl<char> &Path) const;

  /// Get the File status, or error, from the underlying external file system.
  /// This returns the status with the originally requested name, while looking
  /// up the entry using the canonical path.
  ErrorOr<Status> getExternalStatus(const Twine &CanonicalPath,
                                    const Twine &OriginalPath) const;

  /// Make \a Path an absolute path.
  ///
  /// Makes \a Path absolute using the \a WorkingDir if it is not already.
  ///
  /// /absolute/path   => /absolute/path
  /// relative/../path => <WorkingDir>/relative/../path
  ///
  /// \param WorkingDir  A path that will be used as the base Dir if \a Path
  ///                    is not already absolute.
  /// \param Path A path that is modified to be an absolute path.
  /// \returns success if \a path has been made absolute, otherwise a
  ///          platform-specific error_code.
  std::error_code makeAbsolute(StringRef WorkingDir,
                               SmallVectorImpl<char> &Path) const;

  // In a RedirectingFileSystem, keys can be specified in Posix or Windows
  // style (or even a mixture of both), so this comparison helper allows
  // slashes (representing a root) to match backslashes (and vice versa).  Note
  // that, other than the root, path components should not contain slashes or
  // backslashes.
  bool pathComponentMatches(llvm::StringRef lhs, llvm::StringRef rhs) const {
    if ((CaseSensitive ? lhs.equals(rhs) : lhs.equals_insensitive(rhs)))
      return true;
    return (lhs == "/" && rhs == "\\") || (lhs == "\\" && rhs == "/");
  }

  /// The root(s) of the virtual file system.
  std::vector<std::unique_ptr<Entry>> Roots;

  /// The current working directory of the file system.
  std::string WorkingDirectory;

  /// The file system to use for external references.
  IntrusiveRefCntPtr<FileSystem> ExternalFS;

  /// This represents the directory path that the YAML file is located.
  /// This will be prefixed to each 'external-contents' if IsRelativeOverlay
  /// is set. This will also be prefixed to each 'roots->name' if RootRelative
  /// is set to RootRelativeKind::OverlayDir and the path is relative.
  std::string OverlayFileDir;

  /// @name Configuration
  /// @{

  /// Whether to perform case-sensitive comparisons.
  ///
  /// Currently, case-insensitive matching only works correctly with ASCII.
  bool CaseSensitive = is_style_posix(sys::path::Style::native);

  /// IsRelativeOverlay marks whether a OverlayFileDir path must
  /// be prefixed in every 'external-contents' when reading from YAML files.
  bool IsRelativeOverlay = false;

  /// Whether to use to use the value of 'external-contents' for the
  /// names of files.  This global value is overridable on a per-file basis.
  bool UseExternalNames = true;

  /// Determines the lookups to perform, as well as their order. See
  /// \c RedirectKind for details.
  RedirectKind Redirection = RedirectKind::Fallthrough;

  /// Determine the prefix directory if the roots are relative paths. See
  /// \c RootRelativeKind for details.
  RootRelativeKind RootRelative = RootRelativeKind::CWD;
  /// @}

  RedirectingFileSystem(IntrusiveRefCntPtr<FileSystem> ExternalFS);

  /// Looks up the path <tt>[Start, End)</tt> in \p From, possibly recursing
  /// into the contents of \p From if it is a directory. Returns a LookupResult
  /// giving the matched entry and, if that entry is a FileEntry or
  /// DirectoryRemapEntry, the path it redirects to in the external file system.
  ErrorOr<LookupResult>
  lookupPathImpl(llvm::sys::path::const_iterator Start,
                 llvm::sys::path::const_iterator End, Entry *From,
                 llvm::SmallVectorImpl<Entry *> &Entries) const;

  /// Get the status for a path with the provided \c LookupResult.
  ErrorOr<Status> status(const Twine &CanonicalPath, const Twine &OriginalPath,
                         const LookupResult &Result);

public:
  /// Looks up \p Path in \c Roots and returns a LookupResult giving the
  /// matched entry and, if the entry was a FileEntry or DirectoryRemapEntry,
  /// the path it redirects to in the external file system.
  ErrorOr<LookupResult> lookupPath(StringRef Path) const;

  /// Parses \p Buffer, which is expected to be in YAML format and
  /// returns a virtual file system representing its contents.
  static std::unique_ptr<RedirectingFileSystem>
  create(std::unique_ptr<MemoryBuffer> Buffer,
         SourceMgr::DiagHandlerTy DiagHandler, StringRef YAMLFilePath,
         void *DiagContext, IntrusiveRefCntPtr<FileSystem> ExternalFS);

  /// Redirect each of the remapped files from first to second.
  static std::unique_ptr<RedirectingFileSystem>
  create(ArrayRef<std::pair<std::string, std::string>> RemappedFiles,
         bool UseExternalNames, FileSystem &ExternalFS);

  ErrorOr<Status> status(const Twine &Path) override;
  ErrorOr<std::unique_ptr<File>> openFileForRead(const Twine &Path) override;

  std::error_code getRealPath(const Twine &Path,
                              SmallVectorImpl<char> &Output) const override;

  llvm::ErrorOr<std::string> getCurrentWorkingDirectory() const override;

  std::error_code setCurrentWorkingDirectory(const Twine &Path) override;

  std::error_code isLocal(const Twine &Path, bool &Result) override;

  std::error_code makeAbsolute(SmallVectorImpl<char> &Path) const override;

  directory_iterator dir_begin(const Twine &Dir, std::error_code &EC) override;

  void setOverlayFileDir(StringRef PrefixDir);

  StringRef getOverlayFileDir() const;

  /// Sets the redirection kind to \c Fallthrough if true or \c RedirectOnly
  /// otherwise. Will removed in the future, use \c setRedirection instead.
  void setFallthrough(bool Fallthrough);

  void setRedirection(RedirectingFileSystem::RedirectKind Kind);

  std::vector<llvm::StringRef> getRoots() const;

  void printEntry(raw_ostream &OS, Entry *E, unsigned IndentLevel = 0) const;

protected:
  void printImpl(raw_ostream &OS, PrintType Type,
                 unsigned IndentLevel) const override;
};

/// Collect all pairs of <virtual path, real path> entries from the
/// \p YAMLFilePath. This is used by the module dependency collector to forward
/// the entries into the reproducer output VFS YAML file.
void collectVFSFromYAML(
    std::unique_ptr<llvm::MemoryBuffer> Buffer,
    llvm::SourceMgr::DiagHandlerTy DiagHandler, StringRef YAMLFilePath,
    SmallVectorImpl<YAMLVFSEntry> &CollectedEntries,
    void *DiagContext = nullptr,
    IntrusiveRefCntPtr<FileSystem> ExternalFS = getRealFileSystem());

class YAMLVFSWriter {
  std::vector<YAMLVFSEntry> Mappings;
  std::optional<bool> IsCaseSensitive;
  std::optional<bool> IsOverlayRelative;
  std::optional<bool> UseExternalNames;
  std::string OverlayDir;

  void addEntry(StringRef VirtualPath, StringRef RealPath, bool IsDirectory);

public:
  YAMLVFSWriter() = default;

  void addFileMapping(StringRef VirtualPath, StringRef RealPath);
  void addDirectoryMapping(StringRef VirtualPath, StringRef RealPath);

  void setCaseSensitivity(bool CaseSensitive) {
    IsCaseSensitive = CaseSensitive;
  }

  void setUseExternalNames(bool UseExtNames) { UseExternalNames = UseExtNames; }

  void setOverlayDir(StringRef OverlayDirectory) {
    IsOverlayRelative = true;
    OverlayDir.assign(OverlayDirectory.str());
  }

  const std::vector<YAMLVFSEntry> &getMappings() const { return Mappings; }

  void write(llvm::raw_ostream &OS);
};

} // namespace vfs
} // namespace llvm

#endif // LLVM_SUPPORT_VIRTUALFILESYSTEM_H
PKhwFZG}��""Support/Printable.hnu�[���//===--- Printable.h - Print function helpers -------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
//  This file defines the Printable struct.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_PRINTABLE_H
#define LLVM_SUPPORT_PRINTABLE_H

#include <functional>
#include <utility>

namespace llvm {

class raw_ostream;

/// Simple wrapper around std::function<void(raw_ostream&)>.
/// This class is useful to construct print helpers for raw_ostream.
///
/// Example:
///     Printable printRegister(unsigned Register) {
///       return Printable([Register](raw_ostream &OS) {
///         OS << getRegisterName(Register);
///       });
///     }
///     ... OS << printRegister(Register); ...
///
/// Implementation note: Ideally this would just be a typedef, but doing so
/// leads to operator << being ambiguous as function has matching constructors
/// in some STL versions. I have seen the problem on gcc 4.6 libstdc++ and
/// microsoft STL.
class Printable {
public:
  std::function<void(raw_ostream &OS)> Print;
  Printable(std::function<void(raw_ostream &OS)> Print)
      : Print(std::move(Print)) {}
};

inline raw_ostream &operator<<(raw_ostream &OS, const Printable &P) {
  P.Print(OS);
  return OS;
}

} // namespace llvm

#endif
PKhwFZ[ffSupport/thread.hnu�[���//===-- llvm/Support/thread.h - Wrapper for <thread> ------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This header is a wrapper for <thread> that works around problems with the
// MSVC headers when exceptions are disabled. It also provides llvm::thread,
// which is either a typedef of std::thread or a replacement that calls the
// function synchronously depending on the value of LLVM_ENABLE_THREADS.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_THREAD_H
#define LLVM_SUPPORT_THREAD_H

#include "llvm/Config/llvm-config.h"
#include <optional>

#ifdef _WIN32
typedef unsigned long DWORD;
typedef void *PVOID;
typedef PVOID HANDLE;
#endif

#if LLVM_ENABLE_THREADS

#include <thread>

namespace llvm {

#if LLVM_ON_UNIX || _WIN32

/// LLVM thread following std::thread interface with added constructor to
/// specify stack size.
class thread {
  template <typename CalleeTuple> static void GenericThreadProxy(void *Ptr) {
    std::unique_ptr<CalleeTuple> Callee(static_cast<CalleeTuple *>(Ptr));
    std::apply(
        [](auto &&F, auto &&...Args) {
          std::forward<decltype(F)>(F)(std::forward<decltype(Args)>(Args)...);
        },
        *Callee);
  }

public:
#if LLVM_ON_UNIX
  using native_handle_type = pthread_t;
  using id = pthread_t;
  using start_routine_type = void *(*)(void *);

  template <typename CalleeTuple> static void *ThreadProxy(void *Ptr) {
    GenericThreadProxy<CalleeTuple>(Ptr);
    return nullptr;
  }
#elif _WIN32
  using native_handle_type = HANDLE;
  using id = DWORD;
  using start_routine_type = unsigned(__stdcall *)(void *);

  template <typename CalleeTuple>
  static unsigned __stdcall ThreadProxy(void *Ptr) {
    GenericThreadProxy<CalleeTuple>(Ptr);
    return 0;
  }
#endif

  static const std::optional<unsigned> DefaultStackSize;

  thread() : Thread(native_handle_type()) {}
  thread(thread &&Other) noexcept
      : Thread(std::exchange(Other.Thread, native_handle_type())) {}

  template <class Function, class... Args>
  explicit thread(Function &&f, Args &&...args)
      : thread(DefaultStackSize, f, args...) {}

  template <class Function, class... Args>
  explicit thread(std::optional<unsigned> StackSizeInBytes, Function &&f,
                  Args &&...args);
  thread(const thread &) = delete;

  ~thread() {
    if (joinable())
      std::terminate();
  }

  thread &operator=(thread &&Other) noexcept {
    if (joinable())
      std::terminate();
    Thread = std::exchange(Other.Thread, native_handle_type());
    return *this;
  }

  bool joinable() const noexcept { return Thread != native_handle_type(); }

  inline id get_id() const noexcept;

  native_handle_type native_handle() const noexcept { return Thread; }

  static unsigned hardware_concurrency() {
    return std::thread::hardware_concurrency();
  };

  inline void join();
  inline void detach();

  void swap(llvm::thread &Other) noexcept { std::swap(Thread, Other.Thread); }

private:
  native_handle_type Thread;
};

thread::native_handle_type
llvm_execute_on_thread_impl(thread::start_routine_type ThreadFunc, void *Arg,
                            std::optional<unsigned> StackSizeInBytes);
void llvm_thread_join_impl(thread::native_handle_type Thread);
void llvm_thread_detach_impl(thread::native_handle_type Thread);
thread::id llvm_thread_get_id_impl(thread::native_handle_type Thread);
thread::id llvm_thread_get_current_id_impl();

template <class Function, class... Args>
thread::thread(std::optional<unsigned> StackSizeInBytes, Function &&f,
               Args &&...args) {
  typedef std::tuple<std::decay_t<Function>, std::decay_t<Args>...> CalleeTuple;
  std::unique_ptr<CalleeTuple> Callee(
      new CalleeTuple(std::forward<Function>(f), std::forward<Args>(args)...));

  Thread = llvm_execute_on_thread_impl(ThreadProxy<CalleeTuple>, Callee.get(),
                                       StackSizeInBytes);
  if (Thread != native_handle_type())
    Callee.release();
}

thread::id thread::get_id() const noexcept {
  return llvm_thread_get_id_impl(Thread);
}

void thread::join() {
  llvm_thread_join_impl(Thread);
  Thread = native_handle_type();
}

void thread::detach() {
  llvm_thread_detach_impl(Thread);
  Thread = native_handle_type();
}

namespace this_thread {
inline thread::id get_id() { return llvm_thread_get_current_id_impl(); }
} // namespace this_thread

#else // !LLVM_ON_UNIX && !_WIN32

/// std::thread backed implementation of llvm::thread interface that ignores the
/// stack size request.
class thread {
public:
  using native_handle_type = std::thread::native_handle_type;
  using id = std::thread::id;

  thread() : Thread(std::thread()) {}
  thread(thread &&Other) noexcept
      : Thread(std::exchange(Other.Thread, std::thread())) {}

  template <class Function, class... Args>
  explicit thread(std::optional<unsigned> StackSizeInBytes, Function &&f,
                  Args &&...args)
      : Thread(std::forward<Function>(f), std::forward<Args>(args)...) {}

  template <class Function, class... Args>
  explicit thread(Function &&f, Args &&...args) : Thread(f, args...) {}

  thread(const thread &) = delete;

  ~thread() {}

  thread &operator=(thread &&Other) noexcept {
    Thread = std::exchange(Other.Thread, std::thread());
    return *this;
  }

  bool joinable() const noexcept { return Thread.joinable(); }

  id get_id() const noexcept { return Thread.get_id(); }

  native_handle_type native_handle() noexcept { return Thread.native_handle(); }

  static unsigned hardware_concurrency() {
    return std::thread::hardware_concurrency();
  };

  inline void join() { Thread.join(); }
  inline void detach() { Thread.detach(); }

  void swap(llvm::thread &Other) noexcept { std::swap(Thread, Other.Thread); }

private:
  std::thread Thread;
};

namespace this_thread {
  inline thread::id get_id() { return std::this_thread::get_id(); }
}

#endif // LLVM_ON_UNIX || _WIN32

} // namespace llvm

#else // !LLVM_ENABLE_THREADS

#include <utility>

namespace llvm {

struct thread {
  thread() {}
  thread(thread &&other) {}
  template <class Function, class... Args>
  explicit thread(std::optional<unsigned> StackSizeInBytes, Function &&f,
                  Args &&...args) {
    f(std::forward<Args>(args)...);
  }
  template <class Function, class... Args>
  explicit thread(Function &&f, Args &&...args) {
    f(std::forward<Args>(args)...);
  }
  thread(const thread &) = delete;

  void detach() {
    report_fatal_error("Detaching from a thread does not make sense with no "
                       "threading support");
  }
  void join() {}
  static unsigned hardware_concurrency() { return 1; };
};

} // namespace llvm

#endif // LLVM_ENABLE_THREADS

#endif // LLVM_SUPPORT_THREAD_H
PKhwFZ�G5?::Support/raw_sha1_ostream.hnu�[���//==- raw_sha1_ostream.h - raw_ostream that compute SHA1        --*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
//  This file defines the raw_sha1_ostream class.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_RAW_SHA1_OSTREAM_H
#define LLVM_SUPPORT_RAW_SHA1_OSTREAM_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/Support/SHA1.h"
#include "llvm/Support/raw_ostream.h"

namespace llvm {

/// A raw_ostream that hash the content using the sha1 algorithm.
class raw_sha1_ostream : public raw_ostream {
  SHA1 State;

  /// See raw_ostream::write_impl.
  void write_impl(const char *Ptr, size_t Size) override {
    State.update(ArrayRef<uint8_t>((const uint8_t *)Ptr, Size));
  }

public:
  /// Return the current SHA1 hash for the content of the stream
  std::array<uint8_t, 20> sha1() {
    flush();
    return State.result();
  }

  /// Reset the internal state to start over from scratch.
  void resetHash() { State.init(); }

  uint64_t current_pos() const override { return 0; }
};

} // end llvm namespace

#endif
PKhwFZ�V�]]Support/PluginLoader.hnu�[���//===-- llvm/Support/PluginLoader.h - Plugin Loader for Tools ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// A tool can #include this file to get a -load option that allows the user to
// load arbitrary shared objects into the tool's address space.  Note that this
// header can only be included by a program ONCE, so it should never to used by
// library authors.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_PLUGINLOADER_H
#define LLVM_SUPPORT_PLUGINLOADER_H

#ifndef DONT_GET_PLUGIN_LOADER_OPTION
#include "llvm/Support/CommandLine.h"
#endif

#include <string>

namespace llvm {
  struct PluginLoader {
    void operator=(const std::string &Filename);
    static unsigned getNumPlugins();
    static std::string& getPlugin(unsigned num);
  };

#ifndef DONT_GET_PLUGIN_LOADER_OPTION
  // This causes operator= above to be invoked for every -load option.
  static cl::opt<PluginLoader, false, cl::parser<std::string>>
      LoadOpt("load", cl::value_desc("pluginfilename"),
              cl::desc("Load the specified plugin"));
#endif
}

#endif
PKhwFZ�g��
Support/COM.hnu�[���//===- llvm/Support/COM.h ---------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// Provides a library for accessing COM functionality of the Host OS.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_COM_H
#define LLVM_SUPPORT_COM_H

namespace llvm {
namespace sys {

enum class COMThreadingMode { SingleThreaded, MultiThreaded };

class InitializeCOMRAII {
public:
  explicit InitializeCOMRAII(COMThreadingMode Threading,
                             bool SpeedOverMemory = false);
  ~InitializeCOMRAII();

private:
  InitializeCOMRAII(const InitializeCOMRAII &) = delete;
  void operator=(const InitializeCOMRAII &) = delete;
};
}
}

#endif
PKhwFZ��o�!�!Support/InstructionCost.hnu�[���//===- InstructionCost.h ----------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// This file defines an InstructionCost class that is used when calculating
/// the cost of an instruction, or a group of instructions. In addition to a
/// numeric value representing the cost the class also contains a state that
/// can be used to encode particular properties, such as a cost being invalid.
/// Operations on InstructionCost implement saturation arithmetic, so that
/// accumulating costs on large cost-values don't overflow.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_INSTRUCTIONCOST_H
#define LLVM_SUPPORT_INSTRUCTIONCOST_H

#include "llvm/Support/MathExtras.h"
#include <limits>
#include <optional>

namespace llvm {

class raw_ostream;

class InstructionCost {
public:
  using CostType = int64_t;

  /// CostState describes the state of a cost.
  enum CostState {
    Valid,  /// < The cost value represents a valid cost, even when the
            /// cost-value is large.
    Invalid /// < Invalid indicates there is no way to represent the cost as a
            /// numeric value. This state exists to represent a possible issue,
            /// e.g. if the cost-model knows the operation cannot be expanded
            /// into a valid code-sequence by the code-generator.  While some
            /// passes may assert that the calculated cost must be valid, it is
            /// up to individual passes how to interpret an Invalid cost. For
            /// example, a transformation pass could choose not to perform a
            /// transformation if the resulting cost would end up Invalid.
            /// Because some passes may assert a cost is Valid, it is not
            /// recommended to use Invalid costs to model 'Unknown'.
            /// Note that Invalid is semantically different from a (very) high,
            /// but valid cost, which intentionally indicates no issue, but
            /// rather a strong preference not to select a certain operation.
  };

private:
  CostType Value = 0;
  CostState State = Valid;

  void propagateState(const InstructionCost &RHS) {
    if (RHS.State == Invalid)
      State = Invalid;
  }

  static CostType getMaxValue() { return std::numeric_limits<CostType>::max(); }
  static CostType getMinValue() { return std::numeric_limits<CostType>::min(); }

public:
  // A default constructed InstructionCost is a valid zero cost
  InstructionCost() = default;

  InstructionCost(CostState) = delete;
  InstructionCost(CostType Val) : Value(Val), State(Valid) {}

  static InstructionCost getMax() { return getMaxValue(); }
  static InstructionCost getMin() { return getMinValue(); }
  static InstructionCost getInvalid(CostType Val = 0) {
    InstructionCost Tmp(Val);
    Tmp.setInvalid();
    return Tmp;
  }

  bool isValid() const { return State == Valid; }
  void setValid() { State = Valid; }
  void setInvalid() { State = Invalid; }
  CostState getState() const { return State; }

  /// This function is intended to be used as sparingly as possible, since the
  /// class provides the full range of operator support required for arithmetic
  /// and comparisons.
  std::optional<CostType> getValue() const {
    if (isValid())
      return Value;
    return std::nullopt;
  }

  /// For all of the arithmetic operators provided here any invalid state is
  /// perpetuated and cannot be removed. Once a cost becomes invalid it stays
  /// invalid, and it also inherits any invalid state from the RHS.
  /// Arithmetic work on the actual values is implemented with saturation,
  /// to avoid overflow when using more extreme cost values.

  InstructionCost &operator+=(const InstructionCost &RHS) {
    propagateState(RHS);

    // Saturating addition.
    InstructionCost::CostType Result;
    if (AddOverflow(Value, RHS.Value, Result))
      Result = RHS.Value > 0 ? getMaxValue() : getMinValue();

    Value = Result;
    return *this;
  }

  InstructionCost &operator+=(const CostType RHS) {
    InstructionCost RHS2(RHS);
    *this += RHS2;
    return *this;
  }

  InstructionCost &operator-=(const InstructionCost &RHS) {
    propagateState(RHS);

    // Saturating subtract.
    InstructionCost::CostType Result;
    if (SubOverflow(Value, RHS.Value, Result))
      Result = RHS.Value > 0 ? getMinValue() : getMaxValue();
    Value = Result;
    return *this;
  }

  InstructionCost &operator-=(const CostType RHS) {
    InstructionCost RHS2(RHS);
    *this -= RHS2;
    return *this;
  }

  InstructionCost &operator*=(const InstructionCost &RHS) {
    propagateState(RHS);

    // Saturating multiply.
    InstructionCost::CostType Result;
    if (MulOverflow(Value, RHS.Value, Result)) {
      if ((Value > 0 && RHS.Value > 0) || (Value < 0 && RHS.Value < 0))
        Result = getMaxValue();
      else
        Result = getMinValue();
    }

    Value = Result;
    return *this;
  }

  InstructionCost &operator*=(const CostType RHS) {
    InstructionCost RHS2(RHS);
    *this *= RHS2;
    return *this;
  }

  InstructionCost &operator/=(const InstructionCost &RHS) {
    propagateState(RHS);
    Value /= RHS.Value;
    return *this;
  }

  InstructionCost &operator/=(const CostType RHS) {
    InstructionCost RHS2(RHS);
    *this /= RHS2;
    return *this;
  }

  InstructionCost &operator++() {
    *this += 1;
    return *this;
  }

  InstructionCost operator++(int) {
    InstructionCost Copy = *this;
    ++*this;
    return Copy;
  }

  InstructionCost &operator--() {
    *this -= 1;
    return *this;
  }

  InstructionCost operator--(int) {
    InstructionCost Copy = *this;
    --*this;
    return Copy;
  }

  /// For the comparison operators we have chosen to use lexicographical
  /// ordering where valid costs are always considered to be less than invalid
  /// costs. This avoids having to add asserts to the comparison operators that
  /// the states are valid and users can test for validity of the cost
  /// explicitly.
  bool operator<(const InstructionCost &RHS) const {
    if (State != RHS.State)
      return State < RHS.State;
    return Value < RHS.Value;
  }

  // Implement in terms of operator< to ensure that the two comparisons stay in
  // sync
  bool operator==(const InstructionCost &RHS) const {
    return !(*this < RHS) && !(RHS < *this);
  }

  bool operator!=(const InstructionCost &RHS) const { return !(*this == RHS); }

  bool operator==(const CostType RHS) const {
    InstructionCost RHS2(RHS);
    return *this == RHS2;
  }

  bool operator!=(const CostType RHS) const { return !(*this == RHS); }

  bool operator>(const InstructionCost &RHS) const { return RHS < *this; }

  bool operator<=(const InstructionCost &RHS) const { return !(RHS < *this); }

  bool operator>=(const InstructionCost &RHS) const { return !(*this < RHS); }

  bool operator<(const CostType RHS) const {
    InstructionCost RHS2(RHS);
    return *this < RHS2;
  }

  bool operator>(const CostType RHS) const {
    InstructionCost RHS2(RHS);
    return *this > RHS2;
  }

  bool operator<=(const CostType RHS) const {
    InstructionCost RHS2(RHS);
    return *this <= RHS2;
  }

  bool operator>=(const CostType RHS) const {
    InstructionCost RHS2(RHS);
    return *this >= RHS2;
  }

  void print(raw_ostream &OS) const;

  template <class Function>
  auto map(const Function &F) const -> InstructionCost {
    if (isValid())
      return F(Value);
    return getInvalid();
  }
};

inline InstructionCost operator+(const InstructionCost &LHS,
                                 const InstructionCost &RHS) {
  InstructionCost LHS2(LHS);
  LHS2 += RHS;
  return LHS2;
}

inline InstructionCost operator-(const InstructionCost &LHS,
                                 const InstructionCost &RHS) {
  InstructionCost LHS2(LHS);
  LHS2 -= RHS;
  return LHS2;
}

inline InstructionCost operator*(const InstructionCost &LHS,
                                 const InstructionCost &RHS) {
  InstructionCost LHS2(LHS);
  LHS2 *= RHS;
  return LHS2;
}

inline InstructionCost operator/(const InstructionCost &LHS,
                                 const InstructionCost &RHS) {
  InstructionCost LHS2(LHS);
  LHS2 /= RHS;
  return LHS2;
}

inline raw_ostream &operator<<(raw_ostream &OS, const InstructionCost &V) {
  V.print(OS);
  return OS;
}

} // namespace llvm

#endif
PKhwFZ�ܴ�����Support/FileSystem.hnu�[���//===- llvm/Support/FileSystem.h - File System OS Concept -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the llvm::sys::fs namespace. It is designed after
// TR2/boost filesystem (v3), but modified to remove exception handling and the
// path class.
//
// All functions return an error_code and their actual work via the last out
// argument. The out argument is defined if and only if errc::success is
// returned. A function may return any error code in the generic or system
// category. However, they shall be equivalent to any error conditions listed
// in each functions respective documentation if the condition applies. [ note:
// this does not guarantee that error_code will be in the set of explicitly
// listed codes, but it does guarantee that if any of the explicitly listed
// errors occur, the correct error_code will be used ]. All functions may
// return errc::not_enough_memory if there is not enough memory to complete the
// operation.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_FILESYSTEM_H
#define LLVM_SUPPORT_FILESYSTEM_H

#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/Support/Chrono.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/ErrorOr.h"
#include "llvm/Support/FileSystem/UniqueID.h"
#include "llvm/Support/MD5.h"
#include <cassert>
#include <cstdint>
#include <ctime>
#include <memory>
#include <stack>
#include <string>
#include <system_error>
#include <vector>

#ifdef HAVE_SYS_STAT_H
#include <sys/stat.h>
#endif

namespace llvm {
namespace sys {
namespace fs {

#if defined(_WIN32)
// A Win32 HANDLE is a typedef of void*
using file_t = void *;
#else
using file_t = int;
#endif

extern const file_t kInvalidFile;

/// An enumeration for the file system's view of the type.
enum class file_type {
  status_error,
  file_not_found,
  regular_file,
  directory_file,
  symlink_file,
  block_file,
  character_file,
  fifo_file,
  socket_file,
  type_unknown
};

/// space_info - Self explanatory.
struct space_info {
  uint64_t capacity;
  uint64_t free;
  uint64_t available;
};

enum perms {
  no_perms = 0,
  owner_read = 0400,
  owner_write = 0200,
  owner_exe = 0100,
  owner_all = owner_read | owner_write | owner_exe,
  group_read = 040,
  group_write = 020,
  group_exe = 010,
  group_all = group_read | group_write | group_exe,
  others_read = 04,
  others_write = 02,
  others_exe = 01,
  others_all = others_read | others_write | others_exe,
  all_read = owner_read | group_read | others_read,
  all_write = owner_write | group_write | others_write,
  all_exe = owner_exe | group_exe | others_exe,
  all_all = owner_all | group_all | others_all,
  set_uid_on_exe = 04000,
  set_gid_on_exe = 02000,
  sticky_bit = 01000,
  all_perms = all_all | set_uid_on_exe | set_gid_on_exe | sticky_bit,
  perms_not_known = 0xFFFF
};

// Helper functions so that you can use & and | to manipulate perms bits:
inline perms operator|(perms l, perms r) {
  return static_cast<perms>(static_cast<unsigned short>(l) |
                            static_cast<unsigned short>(r));
}
inline perms operator&(perms l, perms r) {
  return static_cast<perms>(static_cast<unsigned short>(l) &
                            static_cast<unsigned short>(r));
}
inline perms &operator|=(perms &l, perms r) {
  l = l | r;
  return l;
}
inline perms &operator&=(perms &l, perms r) {
  l = l & r;
  return l;
}
inline perms operator~(perms x) {
  // Avoid UB by explicitly truncating the (unsigned) ~ result.
  return static_cast<perms>(
      static_cast<unsigned short>(~static_cast<unsigned short>(x)));
}

/// Represents the result of a call to directory_iterator::status(). This is a
/// subset of the information returned by a regular sys::fs::status() call, and
/// represents the information provided by Windows FileFirstFile/FindNextFile.
class basic_file_status {
protected:
  #if defined(LLVM_ON_UNIX)
  time_t fs_st_atime = 0;
  time_t fs_st_mtime = 0;
  uint32_t fs_st_atime_nsec = 0;
  uint32_t fs_st_mtime_nsec = 0;
  uid_t fs_st_uid = 0;
  gid_t fs_st_gid = 0;
  off_t fs_st_size = 0;
  #elif defined (_WIN32)
  uint32_t LastAccessedTimeHigh = 0;
  uint32_t LastAccessedTimeLow = 0;
  uint32_t LastWriteTimeHigh = 0;
  uint32_t LastWriteTimeLow = 0;
  uint32_t FileSizeHigh = 0;
  uint32_t FileSizeLow = 0;
  #endif
  file_type Type = file_type::status_error;
  perms Perms = perms_not_known;

public:
  basic_file_status() = default;

  explicit basic_file_status(file_type Type) : Type(Type) {}

  #if defined(LLVM_ON_UNIX)
  basic_file_status(file_type Type, perms Perms, time_t ATime,
                    uint32_t ATimeNSec, time_t MTime, uint32_t MTimeNSec,
                    uid_t UID, gid_t GID, off_t Size)
      : fs_st_atime(ATime), fs_st_mtime(MTime),
        fs_st_atime_nsec(ATimeNSec), fs_st_mtime_nsec(MTimeNSec),
        fs_st_uid(UID), fs_st_gid(GID),
        fs_st_size(Size), Type(Type), Perms(Perms) {}
#elif defined(_WIN32)
  basic_file_status(file_type Type, perms Perms, uint32_t LastAccessTimeHigh,
                    uint32_t LastAccessTimeLow, uint32_t LastWriteTimeHigh,
                    uint32_t LastWriteTimeLow, uint32_t FileSizeHigh,
                    uint32_t FileSizeLow)
      : LastAccessedTimeHigh(LastAccessTimeHigh),
        LastAccessedTimeLow(LastAccessTimeLow),
        LastWriteTimeHigh(LastWriteTimeHigh),
        LastWriteTimeLow(LastWriteTimeLow), FileSizeHigh(FileSizeHigh),
        FileSizeLow(FileSizeLow), Type(Type), Perms(Perms) {}
  #endif

  // getters
  file_type type() const { return Type; }
  perms permissions() const { return Perms; }

  /// The file access time as reported from the underlying file system.
  ///
  /// Also see comments on \c getLastModificationTime() related to the precision
  /// of the returned value.
  TimePoint<> getLastAccessedTime() const;

  /// The file modification time as reported from the underlying file system.
  ///
  /// The returned value allows for nanosecond precision but the actual
  /// resolution is an implementation detail of the underlying file system.
  /// There is no guarantee for what kind of resolution you can expect, the
  /// resolution can differ across platforms and even across mountpoints on the
  /// same machine.
  TimePoint<> getLastModificationTime() const;

  #if defined(LLVM_ON_UNIX)
  uint32_t getUser() const { return fs_st_uid; }
  uint32_t getGroup() const { return fs_st_gid; }
  uint64_t getSize() const { return fs_st_size; }
  #elif defined (_WIN32)
  uint32_t getUser() const {
    return 9999; // Not applicable to Windows, so...
  }

  uint32_t getGroup() const {
    return 9999; // Not applicable to Windows, so...
  }

  uint64_t getSize() const {
    return (uint64_t(FileSizeHigh) << 32) + FileSizeLow;
  }
  #endif

  // setters
  void type(file_type v) { Type = v; }
  void permissions(perms p) { Perms = p; }
};

/// Represents the result of a call to sys::fs::status().
class file_status : public basic_file_status {
  friend bool equivalent(file_status A, file_status B);

  #if defined(LLVM_ON_UNIX)
  dev_t fs_st_dev = 0;
  nlink_t fs_st_nlinks = 0;
  ino_t fs_st_ino = 0;
  #elif defined (_WIN32)
  uint32_t NumLinks = 0;
  uint32_t VolumeSerialNumber = 0;
  uint32_t FileIndexHigh = 0;
  uint32_t FileIndexLow = 0;
  #endif

public:
  file_status() = default;

  explicit file_status(file_type Type) : basic_file_status(Type) {}

  #if defined(LLVM_ON_UNIX)
  file_status(file_type Type, perms Perms, dev_t Dev, nlink_t Links, ino_t Ino,
              time_t ATime, uint32_t ATimeNSec,
              time_t MTime, uint32_t MTimeNSec,
              uid_t UID, gid_t GID, off_t Size)
      : basic_file_status(Type, Perms, ATime, ATimeNSec, MTime, MTimeNSec,
                          UID, GID, Size),
        fs_st_dev(Dev), fs_st_nlinks(Links), fs_st_ino(Ino) {}
  #elif defined(_WIN32)
  file_status(file_type Type, perms Perms, uint32_t LinkCount,
              uint32_t LastAccessTimeHigh, uint32_t LastAccessTimeLow,
              uint32_t LastWriteTimeHigh, uint32_t LastWriteTimeLow,
              uint32_t VolumeSerialNumber, uint32_t FileSizeHigh,
              uint32_t FileSizeLow, uint32_t FileIndexHigh,
              uint32_t FileIndexLow)
      : basic_file_status(Type, Perms, LastAccessTimeHigh, LastAccessTimeLow,
                          LastWriteTimeHigh, LastWriteTimeLow, FileSizeHigh,
                          FileSizeLow),
        NumLinks(LinkCount), VolumeSerialNumber(VolumeSerialNumber),
        FileIndexHigh(FileIndexHigh), FileIndexLow(FileIndexLow) {}
  #endif

  UniqueID getUniqueID() const;
  uint32_t getLinkCount() const;
};

/// @}
/// @name Physical Operators
/// @{

/// Make \a path an absolute path.
///
/// Makes \a path absolute using the \a current_directory if it is not already.
/// An empty \a path will result in the \a current_directory.
///
/// /absolute/path   => /absolute/path
/// relative/../path => <current-directory>/relative/../path
///
/// @param path A path that is modified to be an absolute path.
void make_absolute(const Twine &current_directory, SmallVectorImpl<char> &path);

/// Make \a path an absolute path.
///
/// Makes \a path absolute using the current directory if it is not already. An
/// empty \a path will result in the current directory.
///
/// /absolute/path   => /absolute/path
/// relative/../path => <current-directory>/relative/../path
///
/// @param path A path that is modified to be an absolute path.
/// @returns errc::success if \a path has been made absolute, otherwise a
///          platform-specific error_code.
std::error_code make_absolute(SmallVectorImpl<char> &path);

/// Create all the non-existent directories in path.
///
/// @param path Directories to create.
/// @returns errc::success if is_directory(path), otherwise a platform
///          specific error_code. If IgnoreExisting is false, also returns
///          error if the directory already existed.
std::error_code create_directories(const Twine &path,
                                   bool IgnoreExisting = true,
                                   perms Perms = owner_all | group_all);

/// Create the directory in path.
///
/// @param path Directory to create.
/// @returns errc::success if is_directory(path), otherwise a platform
///          specific error_code. If IgnoreExisting is false, also returns
///          error if the directory already existed.
std::error_code create_directory(const Twine &path, bool IgnoreExisting = true,
                                 perms Perms = owner_all | group_all);

/// Create a link from \a from to \a to.
///
/// The link may be a soft or a hard link, depending on the platform. The caller
/// may not assume which one. Currently on windows it creates a hard link since
/// soft links require extra privileges. On unix, it creates a soft link since
/// hard links don't work on SMB file systems.
///
/// @param to The path to hard link to.
/// @param from The path to hard link from. This is created.
/// @returns errc::success if the link was created, otherwise a platform
/// specific error_code.
std::error_code create_link(const Twine &to, const Twine &from);

/// Create a hard link from \a from to \a to, or return an error.
///
/// @param to The path to hard link to.
/// @param from The path to hard link from. This is created.
/// @returns errc::success if the link was created, otherwise a platform
/// specific error_code.
std::error_code create_hard_link(const Twine &to, const Twine &from);

/// Collapse all . and .. patterns, resolve all symlinks, and optionally
///        expand ~ expressions to the user's home directory.
///
/// @param path The path to resolve.
/// @param output The location to store the resolved path.
/// @param expand_tilde If true, resolves ~ expressions to the user's home
///                     directory.
std::error_code real_path(const Twine &path, SmallVectorImpl<char> &output,
                          bool expand_tilde = false);

/// Expands ~ expressions to the user's home directory. On Unix ~user
/// directories are resolved as well.
///
/// @param path The path to resolve.
void expand_tilde(const Twine &path, SmallVectorImpl<char> &output);

/// Get the current path.
///
/// @param result Holds the current path on return.
/// @returns errc::success if the current path has been stored in result,
///          otherwise a platform-specific error_code.
std::error_code current_path(SmallVectorImpl<char> &result);

/// Set the current path.
///
/// @param path The path to set.
/// @returns errc::success if the current path was successfully set,
///          otherwise a platform-specific error_code.
std::error_code set_current_path(const Twine &path);

/// Remove path. Equivalent to POSIX remove().
///
/// @param path Input path.
/// @returns errc::success if path has been removed or didn't exist, otherwise a
///          platform-specific error code. If IgnoreNonExisting is false, also
///          returns error if the file didn't exist.
std::error_code remove(const Twine &path, bool IgnoreNonExisting = true);

/// Recursively delete a directory.
///
/// @param path Input path.
/// @returns errc::success if path has been removed or didn't exist, otherwise a
///          platform-specific error code.
std::error_code remove_directories(const Twine &path, bool IgnoreErrors = true);

/// Rename \a from to \a to.
///
/// Files are renamed as if by POSIX rename(), except that on Windows there may
/// be a short interval of time during which the destination file does not
/// exist.
///
/// @param from The path to rename from.
/// @param to The path to rename to. This is created.
std::error_code rename(const Twine &from, const Twine &to);

/// Copy the contents of \a From to \a To.
///
/// @param From The path to copy from.
/// @param To The path to copy to. This is created.
std::error_code copy_file(const Twine &From, const Twine &To);

/// Copy the contents of \a From to \a To.
///
/// @param From The path to copy from.
/// @param ToFD The open file descriptor of the destination file.
std::error_code copy_file(const Twine &From, int ToFD);

/// Resize path to size. File is resized as if by POSIX truncate().
///
/// @param FD Input file descriptor.
/// @param Size Size to resize to.
/// @returns errc::success if \a path has been resized to \a size, otherwise a
///          platform-specific error_code.
std::error_code resize_file(int FD, uint64_t Size);

/// Resize \p FD to \p Size before mapping \a mapped_file_region::readwrite. On
/// non-Windows, this calls \a resize_file(). On Windows, this is a no-op,
/// since the subsequent mapping (via \c CreateFileMapping) automatically
/// extends the file.
inline std::error_code resize_file_before_mapping_readwrite(int FD,
                                                            uint64_t Size) {
#ifdef _WIN32
  (void)FD;
  (void)Size;
  return std::error_code();
#else
  return resize_file(FD, Size);
#endif
}

/// Compute an MD5 hash of a file's contents.
///
/// @param FD Input file descriptor.
/// @returns An MD5Result with the hash computed, if successful, otherwise a
///          std::error_code.
ErrorOr<MD5::MD5Result> md5_contents(int FD);

/// Version of compute_md5 that doesn't require an open file descriptor.
ErrorOr<MD5::MD5Result> md5_contents(const Twine &Path);

/// @}
/// @name Physical Observers
/// @{

/// Does file exist?
///
/// @param status A basic_file_status previously returned from stat.
/// @returns True if the file represented by status exists, false if it does
///          not.
bool exists(const basic_file_status &status);

enum class AccessMode { Exist, Write, Execute };

/// Can the file be accessed?
///
/// @param Path Input path.
/// @returns errc::success if the path can be accessed, otherwise a
///          platform-specific error_code.
std::error_code access(const Twine &Path, AccessMode Mode);

/// Does file exist?
///
/// @param Path Input path.
/// @returns True if it exists, false otherwise.
inline bool exists(const Twine &Path) {
  return !access(Path, AccessMode::Exist);
}

/// Can we execute this file?
///
/// @param Path Input path.
/// @returns True if we can execute it, false otherwise.
bool can_execute(const Twine &Path);

/// Can we write this file?
///
/// @param Path Input path.
/// @returns True if we can write to it, false otherwise.
inline bool can_write(const Twine &Path) {
  return !access(Path, AccessMode::Write);
}

/// Do file_status's represent the same thing?
///
/// @param A Input file_status.
/// @param B Input file_status.
///
/// assert(status_known(A) || status_known(B));
///
/// @returns True if A and B both represent the same file system entity, false
///          otherwise.
bool equivalent(file_status A, file_status B);

/// Do paths represent the same thing?
///
/// assert(status_known(A) || status_known(B));
///
/// @param A Input path A.
/// @param B Input path B.
/// @param result Set to true if stat(A) and stat(B) have the same device and
///               inode (or equivalent).
/// @returns errc::success if result has been successfully set, otherwise a
///          platform-specific error_code.
std::error_code equivalent(const Twine &A, const Twine &B, bool &result);

/// Simpler version of equivalent for clients that don't need to
///        differentiate between an error and false.
inline bool equivalent(const Twine &A, const Twine &B) {
  bool result;
  return !equivalent(A, B, result) && result;
}

/// Is the file mounted on a local filesystem?
///
/// @param path Input path.
/// @param result Set to true if \a path is on fixed media such as a hard disk,
///               false if it is not.
/// @returns errc::success if result has been successfully set, otherwise a
///          platform specific error_code.
std::error_code is_local(const Twine &path, bool &result);

/// Version of is_local accepting an open file descriptor.
std::error_code is_local(int FD, bool &result);

/// Simpler version of is_local for clients that don't need to
///        differentiate between an error and false.
inline bool is_local(const Twine &Path) {
  bool Result;
  return !is_local(Path, Result) && Result;
}

/// Simpler version of is_local accepting an open file descriptor for
///        clients that don't need to differentiate between an error and false.
inline bool is_local(int FD) {
  bool Result;
  return !is_local(FD, Result) && Result;
}

/// Does status represent a directory?
///
/// @param Path The path to get the type of.
/// @param Follow For symbolic links, indicates whether to return the file type
///               of the link itself, or of the target.
/// @returns A value from the file_type enumeration indicating the type of file.
file_type get_file_type(const Twine &Path, bool Follow = true);

/// Does status represent a directory?
///
/// @param status A basic_file_status previously returned from status.
/// @returns status.type() == file_type::directory_file.
bool is_directory(const basic_file_status &status);

/// Is path a directory?
///
/// @param path Input path.
/// @param result Set to true if \a path is a directory (after following
///               symlinks, false if it is not. Undefined otherwise.
/// @returns errc::success if result has been successfully set, otherwise a
///          platform-specific error_code.
std::error_code is_directory(const Twine &path, bool &result);

/// Simpler version of is_directory for clients that don't need to
///        differentiate between an error and false.
inline bool is_directory(const Twine &Path) {
  bool Result;
  return !is_directory(Path, Result) && Result;
}

/// Does status represent a regular file?
///
/// @param status A basic_file_status previously returned from status.
/// @returns status_known(status) && status.type() == file_type::regular_file.
bool is_regular_file(const basic_file_status &status);

/// Is path a regular file?
///
/// @param path Input path.
/// @param result Set to true if \a path is a regular file (after following
///               symlinks), false if it is not. Undefined otherwise.
/// @returns errc::success if result has been successfully set, otherwise a
///          platform-specific error_code.
std::error_code is_regular_file(const Twine &path, bool &result);

/// Simpler version of is_regular_file for clients that don't need to
///        differentiate between an error and false.
inline bool is_regular_file(const Twine &Path) {
  bool Result;
  if (is_regular_file(Path, Result))
    return false;
  return Result;
}

/// Does status represent a symlink file?
///
/// @param status A basic_file_status previously returned from status.
/// @returns status_known(status) && status.type() == file_type::symlink_file.
bool is_symlink_file(const basic_file_status &status);

/// Is path a symlink file?
///
/// @param path Input path.
/// @param result Set to true if \a path is a symlink file, false if it is not.
///               Undefined otherwise.
/// @returns errc::success if result has been successfully set, otherwise a
///          platform-specific error_code.
std::error_code is_symlink_file(const Twine &path, bool &result);

/// Simpler version of is_symlink_file for clients that don't need to
///        differentiate between an error and false.
inline bool is_symlink_file(const Twine &Path) {
  bool Result;
  if (is_symlink_file(Path, Result))
    return false;
  return Result;
}

/// Does this status represent something that exists but is not a
///        directory or regular file?
///
/// @param status A basic_file_status previously returned from status.
/// @returns exists(s) && !is_regular_file(s) && !is_directory(s)
bool is_other(const basic_file_status &status);

/// Is path something that exists but is not a directory,
///        regular file, or symlink?
///
/// @param path Input path.
/// @param result Set to true if \a path exists, but is not a directory, regular
///               file, or a symlink, false if it does not. Undefined otherwise.
/// @returns errc::success if result has been successfully set, otherwise a
///          platform-specific error_code.
std::error_code is_other(const Twine &path, bool &result);

/// Get file status as if by POSIX stat().
///
/// @param path Input path.
/// @param result Set to the file status.
/// @param follow When true, follows symlinks.  Otherwise, the symlink itself is
///               statted.
/// @returns errc::success if result has been successfully set, otherwise a
///          platform-specific error_code.
std::error_code status(const Twine &path, file_status &result,
                       bool follow = true);

/// A version for when a file descriptor is already available.
std::error_code status(int FD, file_status &Result);

#ifdef _WIN32
/// A version for when a file descriptor is already available.
std::error_code status(file_t FD, file_status &Result);
#endif

/// Get file creation mode mask of the process.
///
/// @returns Mask reported by umask(2)
/// @note There is no umask on Windows. This function returns 0 always
///       on Windows. This function does not return an error_code because
///       umask(2) never fails. It is not thread safe.
unsigned getUmask();

/// Set file permissions.
///
/// @param Path File to set permissions on.
/// @param Permissions New file permissions.
/// @returns errc::success if the permissions were successfully set, otherwise
///          a platform-specific error_code.
/// @note On Windows, all permissions except *_write are ignored. Using any of
///       owner_write, group_write, or all_write will make the file writable.
///       Otherwise, the file will be marked as read-only.
std::error_code setPermissions(const Twine &Path, perms Permissions);

/// Vesion of setPermissions accepting a file descriptor.
/// TODO Delete the path based overload once we implement the FD based overload
/// on Windows.
std::error_code setPermissions(int FD, perms Permissions);

/// Get file permissions.
///
/// @param Path File to get permissions from.
/// @returns the permissions if they were successfully retrieved, otherwise a
///          platform-specific error_code.
/// @note On Windows, if the file does not have the FILE_ATTRIBUTE_READONLY
///       attribute, all_all will be returned. Otherwise, all_read | all_exe
///       will be returned.
ErrorOr<perms> getPermissions(const Twine &Path);

/// Get file size.
///
/// @param Path Input path.
/// @param Result Set to the size of the file in \a Path.
/// @returns errc::success if result has been successfully set, otherwise a
///          platform-specific error_code.
inline std::error_code file_size(const Twine &Path, uint64_t &Result) {
  file_status Status;
  std::error_code EC = status(Path, Status);
  if (EC)
    return EC;
  Result = Status.getSize();
  return std::error_code();
}

/// Set the file modification and access time.
///
/// @returns errc::success if the file times were successfully set, otherwise a
///          platform-specific error_code or errc::function_not_supported on
///          platforms where the functionality isn't available.
std::error_code setLastAccessAndModificationTime(int FD, TimePoint<> AccessTime,
                                                 TimePoint<> ModificationTime);

/// Simpler version that sets both file modification and access time to the same
/// time.
inline std::error_code setLastAccessAndModificationTime(int FD,
                                                        TimePoint<> Time) {
  return setLastAccessAndModificationTime(FD, Time, Time);
}

/// Is status available?
///
/// @param s Input file status.
/// @returns True if status() != status_error.
bool status_known(const basic_file_status &s);

/// Is status available?
///
/// @param path Input path.
/// @param result Set to true if status() != status_error.
/// @returns errc::success if result has been successfully set, otherwise a
///          platform-specific error_code.
std::error_code status_known(const Twine &path, bool &result);

enum CreationDisposition : unsigned {
  /// CD_CreateAlways - When opening a file:
  ///   * If it already exists, truncate it.
  ///   * If it does not already exist, create a new file.
  CD_CreateAlways = 0,

  /// CD_CreateNew - When opening a file:
  ///   * If it already exists, fail.
  ///   * If it does not already exist, create a new file.
  CD_CreateNew = 1,

  /// CD_OpenExisting - When opening a file:
  ///   * If it already exists, open the file with the offset set to 0.
  ///   * If it does not already exist, fail.
  CD_OpenExisting = 2,

  /// CD_OpenAlways - When opening a file:
  ///   * If it already exists, open the file with the offset set to 0.
  ///   * If it does not already exist, create a new file.
  CD_OpenAlways = 3,
};

enum FileAccess : unsigned {
  FA_Read = 1,
  FA_Write = 2,
};

enum OpenFlags : unsigned {
  OF_None = 0,

  /// The file should be opened in text mode on platforms like z/OS that make
  /// this distinction.
  OF_Text = 1,

  /// The file should use a carriage linefeed '\r\n'. This flag should only be
  /// used with OF_Text. Only makes a difference on Windows.
  OF_CRLF = 2,

  /// The file should be opened in text mode and use a carriage linefeed '\r\n'.
  /// This flag has the same functionality as OF_Text on z/OS but adds a
  /// carriage linefeed on Windows.
  OF_TextWithCRLF = OF_Text | OF_CRLF,

  /// The file should be opened in append mode.
  OF_Append = 4,

  /// The returned handle can be used for deleting the file. Only makes a
  /// difference on windows.
  OF_Delete = 8,

  /// When a child process is launched, this file should remain open in the
  /// child process.
  OF_ChildInherit = 16,

  /// Force files Atime to be updated on access. Only makes a difference on
  /// Windows.
  OF_UpdateAtime = 32,
};

/// Create a potentially unique file name but does not create it.
///
/// Generates a unique path suitable for a temporary file but does not
/// open or create the file. The name is based on \a Model with '%'
/// replaced by a random char in [0-9a-f]. If \a MakeAbsolute is true
/// then the system's temp directory is prepended first. If \a MakeAbsolute
/// is false the current directory will be used instead.
///
/// This function does not check if the file exists. If you want to be sure
/// that the file does not yet exist, you should use use enough '%' characters
/// in your model to ensure this. Each '%' gives 4-bits of entropy so you can
/// use 32 of them to get 128 bits of entropy.
///
/// Example: clang-%%-%%-%%-%%-%%.s => clang-a0-b1-c2-d3-e4.s
///
/// @param Model Name to base unique path off of.
/// @param ResultPath Set to the file's path.
/// @param MakeAbsolute Whether to use the system temp directory.
void createUniquePath(const Twine &Model, SmallVectorImpl<char> &ResultPath,
                      bool MakeAbsolute);

/// Create a uniquely named file.
///
/// Generates a unique path suitable for a temporary file and then opens it as a
/// file. The name is based on \a Model with '%' replaced by a random char in
/// [0-9a-f]. If \a Model is not an absolute path, the temporary file will be
/// created in the current directory.
///
/// Example: clang-%%-%%-%%-%%-%%.s => clang-a0-b1-c2-d3-e4.s
///
/// This is an atomic operation. Either the file is created and opened, or the
/// file system is left untouched.
///
/// The intended use is for files that are to be kept, possibly after
/// renaming them. For example, when running 'clang -c foo.o', the file can
/// be first created as foo-abc123.o and then renamed.
///
/// @param Model Name to base unique path off of.
/// @param ResultFD Set to the opened file's file descriptor.
/// @param ResultPath Set to the opened file's absolute path.
/// @param Flags Set to the opened file's flags.
/// @param Mode Set to the opened file's permissions.
/// @returns errc::success if Result{FD,Path} have been successfully set,
///          otherwise a platform-specific error_code.
std::error_code createUniqueFile(const Twine &Model, int &ResultFD,
                                 SmallVectorImpl<char> &ResultPath,
                                 OpenFlags Flags = OF_None,
                                 unsigned Mode = all_read | all_write);

/// Simpler version for clients that don't want an open file. An empty
/// file will still be created.
std::error_code createUniqueFile(const Twine &Model,
                                 SmallVectorImpl<char> &ResultPath,
                                 unsigned Mode = all_read | all_write);

/// Represents a temporary file.
///
/// The temporary file must be eventually discarded or given a final name and
/// kept.
///
/// The destructor doesn't implicitly discard because there is no way to
/// properly handle errors in a destructor.
class TempFile {
  bool Done = false;
  TempFile(StringRef Name, int FD);

public:
  /// This creates a temporary file with createUniqueFile and schedules it for
  /// deletion with sys::RemoveFileOnSignal.
  static Expected<TempFile> create(const Twine &Model,
                                   unsigned Mode = all_read | all_write,
                                   OpenFlags ExtraFlags = OF_None);
  TempFile(TempFile &&Other);
  TempFile &operator=(TempFile &&Other);

  // Name of the temporary file.
  std::string TmpName;

  // The open file descriptor.
  int FD = -1;

#ifdef _WIN32
  // Whether we need to manually remove the file on close.
  bool RemoveOnClose = false;
#endif

  // Keep this with the given name.
  Error keep(const Twine &Name);

  // Keep this with the temporary name.
  Error keep();

  // Delete the file.
  Error discard();

  // This checks that keep or delete was called.
  ~TempFile();
};

/// Create a file in the system temporary directory.
///
/// The filename is of the form prefix-random_chars.suffix. Since the directory
/// is not know to the caller, Prefix and Suffix cannot have path separators.
/// The files are created with mode 0600.
///
/// This should be used for things like a temporary .s that is removed after
/// running the assembler.
std::error_code createTemporaryFile(const Twine &Prefix, StringRef Suffix,
                                    int &ResultFD,
                                    SmallVectorImpl<char> &ResultPath,
                                    OpenFlags Flags = OF_None);

/// Simpler version for clients that don't want an open file. An empty
/// file will still be created.
std::error_code createTemporaryFile(const Twine &Prefix, StringRef Suffix,
                                    SmallVectorImpl<char> &ResultPath,
                                    OpenFlags Flags = OF_None);

std::error_code createUniqueDirectory(const Twine &Prefix,
                                      SmallVectorImpl<char> &ResultPath);

/// Get a unique name, not currently exisiting in the filesystem. Subject
/// to race conditions, prefer to use createUniqueFile instead.
///
/// Similar to createUniqueFile, but instead of creating a file only
/// checks if it exists. This function is subject to race conditions, if you
/// want to use the returned name to actually create a file, use
/// createUniqueFile instead.
std::error_code getPotentiallyUniqueFileName(const Twine &Model,
                                             SmallVectorImpl<char> &ResultPath);

/// Get a unique temporary file name, not currently exisiting in the
/// filesystem. Subject to race conditions, prefer to use createTemporaryFile
/// instead.
///
/// Similar to createTemporaryFile, but instead of creating a file only
/// checks if it exists. This function is subject to race conditions, if you
/// want to use the returned name to actually create a file, use
/// createTemporaryFile instead.
std::error_code
getPotentiallyUniqueTempFileName(const Twine &Prefix, StringRef Suffix,
                                 SmallVectorImpl<char> &ResultPath);

inline OpenFlags operator|(OpenFlags A, OpenFlags B) {
  return OpenFlags(unsigned(A) | unsigned(B));
}

inline OpenFlags &operator|=(OpenFlags &A, OpenFlags B) {
  A = A | B;
  return A;
}

inline FileAccess operator|(FileAccess A, FileAccess B) {
  return FileAccess(unsigned(A) | unsigned(B));
}

inline FileAccess &operator|=(FileAccess &A, FileAccess B) {
  A = A | B;
  return A;
}

/// @brief Opens a file with the specified creation disposition, access mode,
/// and flags and returns a file descriptor.
///
/// The caller is responsible for closing the file descriptor once they are
/// finished with it.
///
/// @param Name The path of the file to open, relative or absolute.
/// @param ResultFD If the file could be opened successfully, its descriptor
///                 is stored in this location. Otherwise, this is set to -1.
/// @param Disp Value specifying the existing-file behavior.
/// @param Access Value specifying whether to open the file in read, write, or
///               read-write mode.
/// @param Flags Additional flags.
/// @param Mode The access permissions of the file, represented in octal.
/// @returns errc::success if \a Name has been opened, otherwise a
///          platform-specific error_code.
std::error_code openFile(const Twine &Name, int &ResultFD,
                         CreationDisposition Disp, FileAccess Access,
                         OpenFlags Flags, unsigned Mode = 0666);

/// @brief Opens a file with the specified creation disposition, access mode,
/// and flags and returns a platform-specific file object.
///
/// The caller is responsible for closing the file object once they are
/// finished with it.
///
/// @param Name The path of the file to open, relative or absolute.
/// @param Disp Value specifying the existing-file behavior.
/// @param Access Value specifying whether to open the file in read, write, or
///               read-write mode.
/// @param Flags Additional flags.
/// @param Mode The access permissions of the file, represented in octal.
/// @returns errc::success if \a Name has been opened, otherwise a
///          platform-specific error_code.
Expected<file_t> openNativeFile(const Twine &Name, CreationDisposition Disp,
                                FileAccess Access, OpenFlags Flags,
                                unsigned Mode = 0666);

/// Converts from a Posix file descriptor number to a native file handle.
/// On Windows, this retreives the underlying handle. On non-Windows, this is a
/// no-op.
file_t convertFDToNativeFile(int FD);

#ifndef _WIN32
inline file_t convertFDToNativeFile(int FD) { return FD; }
#endif

/// Return an open handle to standard in. On Unix, this is typically FD 0.
/// Returns kInvalidFile when the stream is closed.
file_t getStdinHandle();

/// Return an open handle to standard out. On Unix, this is typically FD 1.
/// Returns kInvalidFile when the stream is closed.
file_t getStdoutHandle();

/// Return an open handle to standard error. On Unix, this is typically FD 2.
/// Returns kInvalidFile when the stream is closed.
file_t getStderrHandle();

/// Reads \p Buf.size() bytes from \p FileHandle into \p Buf. Returns the number
/// of bytes actually read. On Unix, this is equivalent to `return ::read(FD,
/// Buf.data(), Buf.size())`, with error reporting. Returns 0 when reaching EOF.
///
/// @param FileHandle File to read from.
/// @param Buf Buffer to read into.
/// @returns The number of bytes read, or error.
Expected<size_t> readNativeFile(file_t FileHandle, MutableArrayRef<char> Buf);

/// Default chunk size for \a readNativeFileToEOF().
enum : size_t { DefaultReadChunkSize = 4 * 4096 };

/// Reads from \p FileHandle until EOF, appending to \p Buffer in chunks of
/// size \p ChunkSize.
///
/// This calls \a readNativeFile() in a loop. On Error, previous chunks that
/// were read successfully are left in \p Buffer and returned.
///
/// Note: For reading the final chunk at EOF, \p Buffer's capacity needs extra
/// storage of \p ChunkSize.
///
/// \param FileHandle File to read from.
/// \param Buffer Where to put the file content.
/// \param ChunkSize Size of chunks.
/// \returns The error if EOF was not found.
Error readNativeFileToEOF(file_t FileHandle, SmallVectorImpl<char> &Buffer,
                          ssize_t ChunkSize = DefaultReadChunkSize);

/// Reads \p Buf.size() bytes from \p FileHandle at offset \p Offset into \p
/// Buf. If 'pread' is available, this will use that, otherwise it will use
/// 'lseek'. Returns the number of bytes actually read. Returns 0 when reaching
/// EOF.
///
/// @param FileHandle File to read from.
/// @param Buf Buffer to read into.
/// @param Offset Offset into the file at which the read should occur.
/// @returns The number of bytes read, or error.
Expected<size_t> readNativeFileSlice(file_t FileHandle,
                                     MutableArrayRef<char> Buf,
                                     uint64_t Offset);

/// @brief Opens the file with the given name in a write-only or read-write
/// mode, returning its open file descriptor. If the file does not exist, it
/// is created.
///
/// The caller is responsible for closing the file descriptor once they are
/// finished with it.
///
/// @param Name The path of the file to open, relative or absolute.
/// @param ResultFD If the file could be opened successfully, its descriptor
///                 is stored in this location. Otherwise, this is set to -1.
/// @param Flags Additional flags used to determine whether the file should be
///              opened in, for example, read-write or in write-only mode.
/// @param Mode The access permissions of the file, represented in octal.
/// @returns errc::success if \a Name has been opened, otherwise a
///          platform-specific error_code.
inline std::error_code
openFileForWrite(const Twine &Name, int &ResultFD,
                 CreationDisposition Disp = CD_CreateAlways,
                 OpenFlags Flags = OF_None, unsigned Mode = 0666) {
  return openFile(Name, ResultFD, Disp, FA_Write, Flags, Mode);
}

/// @brief Opens the file with the given name in a write-only or read-write
/// mode, returning its open file descriptor. If the file does not exist, it
/// is created.
///
/// The caller is responsible for closing the freeing the file once they are
/// finished with it.
///
/// @param Name The path of the file to open, relative or absolute.
/// @param Flags Additional flags used to determine whether the file should be
///              opened in, for example, read-write or in write-only mode.
/// @param Mode The access permissions of the file, represented in octal.
/// @returns a platform-specific file descriptor if \a Name has been opened,
///          otherwise an error object.
inline Expected<file_t> openNativeFileForWrite(const Twine &Name,
                                               CreationDisposition Disp,
                                               OpenFlags Flags,
                                               unsigned Mode = 0666) {
  return openNativeFile(Name, Disp, FA_Write, Flags, Mode);
}

/// @brief Opens the file with the given name in a write-only or read-write
/// mode, returning its open file descriptor. If the file does not exist, it
/// is created.
///
/// The caller is responsible for closing the file descriptor once they are
/// finished with it.
///
/// @param Name The path of the file to open, relative or absolute.
/// @param ResultFD If the file could be opened successfully, its descriptor
///                 is stored in this location. Otherwise, this is set to -1.
/// @param Flags Additional flags used to determine whether the file should be
///              opened in, for example, read-write or in write-only mode.
/// @param Mode The access permissions of the file, represented in octal.
/// @returns errc::success if \a Name has been opened, otherwise a
///          platform-specific error_code.
inline std::error_code openFileForReadWrite(const Twine &Name, int &ResultFD,
                                            CreationDisposition Disp,
                                            OpenFlags Flags,
                                            unsigned Mode = 0666) {
  return openFile(Name, ResultFD, Disp, FA_Write | FA_Read, Flags, Mode);
}

/// @brief Opens the file with the given name in a write-only or read-write
/// mode, returning its open file descriptor. If the file does not exist, it
/// is created.
///
/// The caller is responsible for closing the freeing the file once they are
/// finished with it.
///
/// @param Name The path of the file to open, relative or absolute.
/// @param Flags Additional flags used to determine whether the file should be
///              opened in, for example, read-write or in write-only mode.
/// @param Mode The access permissions of the file, represented in octal.
/// @returns a platform-specific file descriptor if \a Name has been opened,
///          otherwise an error object.
inline Expected<file_t> openNativeFileForReadWrite(const Twine &Name,
                                                   CreationDisposition Disp,
                                                   OpenFlags Flags,
                                                   unsigned Mode = 0666) {
  return openNativeFile(Name, Disp, FA_Write | FA_Read, Flags, Mode);
}

/// @brief Opens the file with the given name in a read-only mode, returning
/// its open file descriptor.
///
/// The caller is responsible for closing the file descriptor once they are
/// finished with it.
///
/// @param Name The path of the file to open, relative or absolute.
/// @param ResultFD If the file could be opened successfully, its descriptor
///                 is stored in this location. Otherwise, this is set to -1.
/// @param RealPath If nonnull, extra work is done to determine the real path
///                 of the opened file, and that path is stored in this
///                 location.
/// @returns errc::success if \a Name has been opened, otherwise a
///          platform-specific error_code.
std::error_code openFileForRead(const Twine &Name, int &ResultFD,
                                OpenFlags Flags = OF_None,
                                SmallVectorImpl<char> *RealPath = nullptr);

/// @brief Opens the file with the given name in a read-only mode, returning
/// its open file descriptor.
///
/// The caller is responsible for closing the freeing the file once they are
/// finished with it.
///
/// @param Name The path of the file to open, relative or absolute.
/// @param RealPath If nonnull, extra work is done to determine the real path
///                 of the opened file, and that path is stored in this
///                 location.
/// @returns a platform-specific file descriptor if \a Name has been opened,
///          otherwise an error object.
Expected<file_t>
openNativeFileForRead(const Twine &Name, OpenFlags Flags = OF_None,
                      SmallVectorImpl<char> *RealPath = nullptr);

/// Try to locks the file during the specified time.
///
/// This function implements advisory locking on entire file. If it returns
/// <em>errc::success</em>, the file is locked by the calling process. Until the
/// process unlocks the file by calling \a unlockFile, all attempts to lock the
/// same file will fail/block. The process that locked the file may assume that
/// none of other processes read or write this file, provided that all processes
/// lock the file prior to accessing its content.
///
/// @param FD      The descriptor representing the file to lock.
/// @param Timeout Time in milliseconds that the process should wait before
///                reporting lock failure. Zero value means try to get lock only
///                once.
/// @returns errc::success if lock is successfully obtained,
/// errc::no_lock_available if the file cannot be locked, or platform-specific
/// error_code otherwise.
///
/// @note Care should be taken when using this function in a multithreaded
/// context, as it may not prevent other threads in the same process from
/// obtaining a lock on the same file, even if they are using a different file
/// descriptor.
std::error_code
tryLockFile(int FD,
            std::chrono::milliseconds Timeout = std::chrono::milliseconds(0));

/// Lock the file.
///
/// This function acts as @ref tryLockFile but it waits infinitely.
std::error_code lockFile(int FD);

/// Unlock the file.
///
/// @param FD The descriptor representing the file to unlock.
/// @returns errc::success if lock is successfully released or platform-specific
/// error_code otherwise.
std::error_code unlockFile(int FD);

/// @brief Close the file object.  This should be used instead of ::close for
/// portability. On error, the caller should assume the file is closed, as is
/// the case for Process::SafelyCloseFileDescriptor
///
/// @param F On input, this is the file to close.  On output, the file is
/// set to kInvalidFile.
///
/// @returns An error code if closing the file failed. Typically, an error here
/// means that the filesystem may have failed to perform some buffered writes.
std::error_code closeFile(file_t &F);

#ifdef LLVM_ON_UNIX
/// @brief Change ownership of a file.
///
/// @param Owner The owner of the file to change to.
/// @param Group The group of the file to change to.
/// @returns errc::success if successfully updated file ownership, otherwise an
///          error code is returned.
std::error_code changeFileOwnership(int FD, uint32_t Owner, uint32_t Group);
#endif

/// RAII class that facilitates file locking.
class FileLocker {
  int FD; ///< Locked file handle.
  FileLocker(int FD) : FD(FD) {}
  friend class llvm::raw_fd_ostream;

public:
  FileLocker(const FileLocker &L) = delete;
  FileLocker(FileLocker &&L) : FD(L.FD) { L.FD = -1; }
  ~FileLocker() {
    if (FD != -1)
      unlockFile(FD);
  }
  FileLocker &operator=(FileLocker &&L) {
    FD = L.FD;
    L.FD = -1;
    return *this;
  }
  FileLocker &operator=(const FileLocker &L) = delete;
  std::error_code unlock() {
    if (FD != -1) {
      std::error_code Result = unlockFile(FD);
      FD = -1;
      return Result;
    }
    return std::error_code();
  }
};

std::error_code getUniqueID(const Twine Path, UniqueID &Result);

/// Get disk space usage information.
///
/// Note: Users must be careful about "Time Of Check, Time Of Use" kind of bug.
/// Note: Windows reports results according to the quota allocated to the user.
///
/// @param Path Input path.
/// @returns a space_info structure filled with the capacity, free, and
/// available space on the device \a Path is on. A platform specific error_code
/// is returned on error.
ErrorOr<space_info> disk_space(const Twine &Path);

/// This class represents a memory mapped file. It is based on
/// boost::iostreams::mapped_file.
class mapped_file_region {
public:
  enum mapmode {
    readonly, ///< May only access map via const_data as read only.
    readwrite, ///< May access map via data and modify it. Written to path.
    priv ///< May modify via data, but changes are lost on destruction.
  };

private:
  /// Platform-specific mapping state.
  size_t Size = 0;
  void *Mapping = nullptr;
#ifdef _WIN32
  sys::fs::file_t FileHandle = nullptr;
#endif
  mapmode Mode = readonly;

  void copyFrom(const mapped_file_region &Copied) {
    Size = Copied.Size;
    Mapping = Copied.Mapping;
#ifdef _WIN32
    FileHandle = Copied.FileHandle;
#endif
    Mode = Copied.Mode;
  }

  void moveFromImpl(mapped_file_region &Moved) {
    copyFrom(Moved);
    Moved.copyFrom(mapped_file_region());
  }

  void unmapImpl();
  void dontNeedImpl();

  std::error_code init(sys::fs::file_t FD, uint64_t Offset, mapmode Mode);

public:
  mapped_file_region() = default;
  mapped_file_region(mapped_file_region &&Moved) { moveFromImpl(Moved); }
  mapped_file_region &operator=(mapped_file_region &&Moved) {
    unmap();
    moveFromImpl(Moved);
    return *this;
  }

  mapped_file_region(const mapped_file_region &) = delete;
  mapped_file_region &operator=(const mapped_file_region &) = delete;

  /// \param fd An open file descriptor to map. Does not take ownership of fd.
  mapped_file_region(sys::fs::file_t fd, mapmode mode, size_t length, uint64_t offset,
                     std::error_code &ec);

  ~mapped_file_region() { unmapImpl(); }

  /// Check if this is a valid mapping.
  explicit operator bool() const { return Mapping; }

  /// Unmap.
  void unmap() {
    unmapImpl();
    copyFrom(mapped_file_region());
  }
  void dontNeed() { dontNeedImpl(); }

  size_t size() const;
  char *data() const;

  /// Get a const view of the data. Modifying this memory has undefined
  /// behavior.
  const char *const_data() const;

  /// \returns The minimum alignment offset must be.
  static int alignment();
};

/// Return the path to the main executable, given the value of argv[0] from
/// program startup and the address of main itself. In extremis, this function
/// may fail and return an empty path.
std::string getMainExecutable(const char *argv0, void *MainExecAddr);

/// @}
/// @name Iterators
/// @{

/// directory_entry - A single entry in a directory.
class directory_entry {
  // FIXME: different platforms make different information available "for free"
  // when traversing a directory. The design of this class wraps most of the
  // information in basic_file_status, so on platforms where we can't populate
  // that whole structure, callers end up paying for a stat().
  // std::filesystem::directory_entry may be a better model.
  std::string Path;
  file_type Type = file_type::type_unknown; // Most platforms can provide this.
  bool FollowSymlinks = true;               // Affects the behavior of status().
  basic_file_status Status;                 // If available.

public:
  explicit directory_entry(const Twine &Path, bool FollowSymlinks = true,
                           file_type Type = file_type::type_unknown,
                           basic_file_status Status = basic_file_status())
      : Path(Path.str()), Type(Type), FollowSymlinks(FollowSymlinks),
        Status(Status) {}

  directory_entry() = default;

  void replace_filename(const Twine &Filename, file_type Type,
                        basic_file_status Status = basic_file_status());

  const std::string &path() const { return Path; }
  // Get basic information about entry file (a subset of fs::status()).
  // On most platforms this is a stat() call.
  // On windows the information was already retrieved from the directory.
  ErrorOr<basic_file_status> status() const;
  // Get the type of this file.
  // On most platforms (Linux/Mac/Windows/BSD), this was already retrieved.
  // On some platforms (e.g. Solaris) this is a stat() call.
  file_type type() const {
    if (Type != file_type::type_unknown)
      return Type;
    auto S = status();
    return S ? S->type() : file_type::type_unknown;
  }

  bool operator==(const directory_entry& RHS) const { return Path == RHS.Path; }
  bool operator!=(const directory_entry& RHS) const { return !(*this == RHS); }
  bool operator< (const directory_entry& RHS) const;
  bool operator<=(const directory_entry& RHS) const;
  bool operator> (const directory_entry& RHS) const;
  bool operator>=(const directory_entry& RHS) const;
};

namespace detail {

  struct DirIterState;

  std::error_code directory_iterator_construct(DirIterState &, StringRef, bool);
  std::error_code directory_iterator_increment(DirIterState &);
  std::error_code directory_iterator_destruct(DirIterState &);

  /// Keeps state for the directory_iterator.
  struct DirIterState {
    ~DirIterState() {
      directory_iterator_destruct(*this);
    }

    intptr_t IterationHandle = 0;
    directory_entry CurrentEntry;
  };

} // end namespace detail

/// directory_iterator - Iterates through the entries in path. There is no
/// operator++ because we need an error_code. If it's really needed we can make
/// it call report_fatal_error on error.
class directory_iterator {
  std::shared_ptr<detail::DirIterState> State;
  bool FollowSymlinks = true;

public:
  explicit directory_iterator(const Twine &path, std::error_code &ec,
                              bool follow_symlinks = true)
      : FollowSymlinks(follow_symlinks) {
    State = std::make_shared<detail::DirIterState>();
    SmallString<128> path_storage;
    ec = detail::directory_iterator_construct(
        *State, path.toStringRef(path_storage), FollowSymlinks);
  }

  explicit directory_iterator(const directory_entry &de, std::error_code &ec,
                              bool follow_symlinks = true)
      : FollowSymlinks(follow_symlinks) {
    State = std::make_shared<detail::DirIterState>();
    ec = detail::directory_iterator_construct(
        *State, de.path(), FollowSymlinks);
  }

  /// Construct end iterator.
  directory_iterator() = default;

  // No operator++ because we need error_code.
  directory_iterator &increment(std::error_code &ec) {
    ec = directory_iterator_increment(*State);
    return *this;
  }

  const directory_entry &operator*() const { return State->CurrentEntry; }
  const directory_entry *operator->() const { return &State->CurrentEntry; }

  bool operator==(const directory_iterator &RHS) const {
    if (State == RHS.State)
      return true;
    if (!RHS.State)
      return State->CurrentEntry == directory_entry();
    if (!State)
      return RHS.State->CurrentEntry == directory_entry();
    return State->CurrentEntry == RHS.State->CurrentEntry;
  }

  bool operator!=(const directory_iterator &RHS) const {
    return !(*this == RHS);
  }
};

namespace detail {

  /// Keeps state for the recursive_directory_iterator.
  struct RecDirIterState {
    std::stack<directory_iterator, std::vector<directory_iterator>> Stack;
    uint16_t Level = 0;
    bool HasNoPushRequest = false;
  };

} // end namespace detail

/// recursive_directory_iterator - Same as directory_iterator except for it
/// recurses down into child directories.
class recursive_directory_iterator {
  std::shared_ptr<detail::RecDirIterState> State;
  bool Follow;

public:
  recursive_directory_iterator() = default;
  explicit recursive_directory_iterator(const Twine &path, std::error_code &ec,
                                        bool follow_symlinks = true)
      : State(std::make_shared<detail::RecDirIterState>()),
        Follow(follow_symlinks) {
    State->Stack.push(directory_iterator(path, ec, Follow));
    if (State->Stack.top() == directory_iterator())
      State.reset();
  }

  // No operator++ because we need error_code.
  recursive_directory_iterator &increment(std::error_code &ec) {
    const directory_iterator end_itr = {};

    if (State->HasNoPushRequest)
      State->HasNoPushRequest = false;
    else {
      file_type type = State->Stack.top()->type();
      if (type == file_type::symlink_file && Follow) {
        // Resolve the symlink: is it a directory to recurse into?
        ErrorOr<basic_file_status> status = State->Stack.top()->status();
        if (status)
          type = status->type();
        // Otherwise broken symlink, and we'll continue.
      }
      if (type == file_type::directory_file) {
        State->Stack.push(directory_iterator(*State->Stack.top(), ec, Follow));
        if (State->Stack.top() != end_itr) {
          ++State->Level;
          return *this;
        }
        State->Stack.pop();
      }
    }

    while (!State->Stack.empty()
           && State->Stack.top().increment(ec) == end_itr) {
      State->Stack.pop();
      --State->Level;
    }

    // Check if we are done. If so, create an end iterator.
    if (State->Stack.empty())
      State.reset();

    return *this;
  }

  const directory_entry &operator*() const { return *State->Stack.top(); }
  const directory_entry *operator->() const { return &*State->Stack.top(); }

  // observers
  /// Gets the current level. Starting path is at level 0.
  int level() const { return State->Level; }

  /// Returns true if no_push has been called for this directory_entry.
  bool no_push_request() const { return State->HasNoPushRequest; }

  // modifiers
  /// Goes up one level if Level > 0.
  void pop() {
    assert(State && "Cannot pop an end iterator!");
    assert(State->Level > 0 && "Cannot pop an iterator with level < 1");

    const directory_iterator end_itr = {};
    std::error_code ec;
    do {
      if (ec)
        report_fatal_error("Error incrementing directory iterator.");
      State->Stack.pop();
      --State->Level;
    } while (!State->Stack.empty()
             && State->Stack.top().increment(ec) == end_itr);

    // Check if we are done. If so, create an end iterator.
    if (State->Stack.empty())
      State.reset();
  }

  /// Does not go down into the current directory_entry.
  void no_push() { State->HasNoPushRequest = true; }

  bool operator==(const recursive_directory_iterator &RHS) const {
    return State == RHS.State;
  }

  bool operator!=(const recursive_directory_iterator &RHS) const {
    return !(*this == RHS);
  }
};

/// @}

} // end namespace fs
} // end namespace sys
} // end namespace llvm

#endif // LLVM_SUPPORT_FILESYSTEM_H
PKhwFZd"�e��Support/ExitCodes.hnu�[���//===-- llvm/Support/ExitCodes.h - Exit codes for exit()  -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file contains definitions of exit codes for exit() function. They are
/// either defined by sysexits.h if it is supported, or defined here if
/// sysexits.h is not supported.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_EXITCODES_H
#define LLVM_SUPPORT_EXITCODES_H

#include "llvm/Config/llvm-config.h"

#if HAVE_SYSEXITS_H
#include <sysexits.h>
#elif __MVS__ || defined(_WIN32)
// <sysexits.h> does not exist on z/OS and Windows. The only value used in LLVM
// is EX_IOERR, which is used to signal a special error condition (broken pipe).
// Define the macro with its usual value from BSD systems, which is chosen to
// not clash with more standard exit codes like 1.
#define EX_IOERR 74
#elif LLVM_ON_UNIX
#error Exit code EX_IOERR not available
#endif

#endif
PKhwFZE<XX Support/DivisionByConstantInfo.hnu�[���//===- llvm/Support/DivisionByConstantInfo.h ---------------------*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// This file implements support for optimizing divisions by a constant
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_DIVISIONBYCONSTANTINFO_H
#define LLVM_SUPPORT_DIVISIONBYCONSTANTINFO_H

#include "llvm/ADT/APInt.h"

namespace llvm {

/// Magic data for optimising signed division by a constant.
struct SignedDivisionByConstantInfo {
  static SignedDivisionByConstantInfo get(const APInt &D);
  APInt Magic;          ///< magic number
  unsigned ShiftAmount; ///< shift amount
};

/// Magic data for optimising unsigned division by a constant.
struct UnsignedDivisionByConstantInfo {
  static UnsignedDivisionByConstantInfo
  get(const APInt &D, unsigned LeadingZeros = 0,
      bool AllowEvenDivisorOptimization = true);
  APInt Magic;          ///< magic number
  bool IsAdd;           ///< add indicator
  unsigned PostShift;   ///< post-shift amount
  unsigned PreShift;    ///< pre-shift amount
};

} // namespace llvm

#endif
PKhwFZJ�	�	Support/ELFAttributeParser.hnu�[���//===- ELF AttributeParser.h - ELF Attribute Parser -------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_ELFATTRIBUTEPARSER_H
#define LLVM_SUPPORT_ELFATTRIBUTEPARSER_H

#include "ELFAttributes.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/Support/DataExtractor.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Error.h"

#include <optional>
#include <unordered_map>

namespace llvm {
class StringRef;
class ScopedPrinter;

class ELFAttributeParser {
  StringRef vendor;
  std::unordered_map<unsigned, unsigned> attributes;
  std::unordered_map<unsigned, StringRef> attributesStr;

  virtual Error handler(uint64_t tag, bool &handled) = 0;

protected:
  ScopedPrinter *sw;
  TagNameMap tagToStringMap;
  DataExtractor de{ArrayRef<uint8_t>{}, true, 0};
  DataExtractor::Cursor cursor{0};

  void printAttribute(unsigned tag, unsigned value, StringRef valueDesc);

  Error parseStringAttribute(const char *name, unsigned tag,
                             ArrayRef<const char *> strings);
  Error parseAttributeList(uint32_t length);
  void parseIndexList(SmallVectorImpl<uint8_t> &indexList);
  Error parseSubsection(uint32_t length);

  void setAttributeString(unsigned tag, StringRef value) {
    attributesStr.emplace(tag, value);
  }

public:
  virtual ~ELFAttributeParser() { static_cast<void>(!cursor.takeError()); }
  Error integerAttribute(unsigned tag);
  Error stringAttribute(unsigned tag);

  ELFAttributeParser(ScopedPrinter *sw, TagNameMap tagNameMap, StringRef vendor)
      : vendor(vendor), sw(sw), tagToStringMap(tagNameMap) {}

  ELFAttributeParser(TagNameMap tagNameMap, StringRef vendor)
      : vendor(vendor), sw(nullptr), tagToStringMap(tagNameMap) {}

  Error parse(ArrayRef<uint8_t> section, support::endianness endian);

  std::optional<unsigned> getAttributeValue(unsigned tag) const {
    auto I = attributes.find(tag);
    if (I == attributes.end())
      return std::nullopt;
    return I->second;
  }
  std::optional<StringRef> getAttributeString(unsigned tag) const {
    auto I = attributesStr.find(tag);
    if (I == attributesStr.end())
      return std::nullopt;
    return I->second;
  }
};

} // namespace llvm
#endif
PKhwFZ�Nk���Support/X86FoldTablesUtils.hnu�[���//===-- X86FoldTablesUtils.h ------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_X86FOLDTABLESUTILS_H
#define LLVM_SUPPORT_X86FOLDTABLESUTILS_H

namespace llvm {
enum {
  // Select which memory operand is being unfolded.
  // (stored in bits 0 - 2)
  TB_INDEX_0    = 0,
  TB_INDEX_1    = 1,
  TB_INDEX_2    = 2,
  TB_INDEX_3    = 3,
  TB_INDEX_4    = 4,
  TB_INDEX_MASK = 0x7,

  // Do not insert the reverse map (MemOp -> RegOp) into the table.
  // This may be needed because there is a many -> one mapping.
  TB_NO_REVERSE   = 1 << 3,

  // Do not insert the forward map (RegOp -> MemOp) into the table.
  // This is needed for Native Client, which prohibits branch
  // instructions from using a memory operand.
  TB_NO_FORWARD   = 1 << 4,

  TB_FOLDED_LOAD  = 1 << 5,
  TB_FOLDED_STORE = 1 << 6,
  TB_FOLDED_BCAST = 1 << 7,

  // Minimum alignment required for load/store.
  // Used for RegOp->MemOp conversion. Encoded as Log2(Align)
  // (stored in bits 9 - 11)
  TB_ALIGN_SHIFT = 8,
  TB_ALIGN_1     =   0 << TB_ALIGN_SHIFT,
  TB_ALIGN_16    =   4 << TB_ALIGN_SHIFT,
  TB_ALIGN_32    =   5 << TB_ALIGN_SHIFT,
  TB_ALIGN_64    =   6 << TB_ALIGN_SHIFT,
  TB_ALIGN_MASK  = 0x7 << TB_ALIGN_SHIFT,

  // Broadcast type.
  // (stored in bits 12 - 13)
  TB_BCAST_TYPE_SHIFT = TB_ALIGN_SHIFT + 3,
  TB_BCAST_D    =   0 << TB_BCAST_TYPE_SHIFT,
  TB_BCAST_Q    =   1 << TB_BCAST_TYPE_SHIFT,
  TB_BCAST_SS   =   2 << TB_BCAST_TYPE_SHIFT,
  TB_BCAST_SD   =   3 << TB_BCAST_TYPE_SHIFT,
  TB_BCAST_MASK = 0x3 << TB_BCAST_TYPE_SHIFT,

  // Unused bits 14-15
};
} // namespace llvm
#endif // LLVM_SUPPORT_X86FOLDTABLESUTILS_H
PKhwFZ���� 5 5Support/GraphWriter.hnu�[���//===- llvm/Support/GraphWriter.h - Write graph to a .dot file --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines a simple interface that can be used to print out generic
// LLVM graphs to ".dot" files.  "dot" is a tool that is part of the AT&T
// graphviz package (http://www.research.att.com/sw/tools/graphviz/) which can
// be used to turn the files output by this interface into a variety of
// different graphics formats.
//
// Graphs do not need to implement any interface past what is already required
// by the GraphTraits template, but they can choose to implement specializations
// of the DOTGraphTraits template if they want to customize the graphs output in
// any way.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_GRAPHWRITER_H
#define LLVM_SUPPORT_GRAPHWRITER_H

#include "llvm/ADT/GraphTraits.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Support/DOTGraphTraits.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/raw_ostream.h"
#include <iterator>
#include <string>
#include <type_traits>
#include <vector>

namespace llvm {

namespace DOT {  // Private functions...

std::string EscapeString(const std::string &Label);

/// Get a color string for this node number. Simply round-robin selects
/// from a reasonable number of colors.
StringRef getColorString(unsigned NodeNumber);

} // end namespace DOT

namespace GraphProgram {

enum Name {
  DOT,
  FDP,
  NEATO,
  TWOPI,
  CIRCO
};

} // end namespace GraphProgram

bool DisplayGraph(StringRef Filename, bool wait = true,
                  GraphProgram::Name program = GraphProgram::DOT);

template<typename GraphType>
class GraphWriter {
  raw_ostream &O;
  const GraphType &G;
  bool RenderUsingHTML = false;

  using DOTTraits = DOTGraphTraits<GraphType>;
  using GTraits = GraphTraits<GraphType>;
  using NodeRef = typename GTraits::NodeRef;
  using node_iterator = typename GTraits::nodes_iterator;
  using child_iterator = typename GTraits::ChildIteratorType;
  DOTTraits DTraits;

  static_assert(std::is_pointer_v<NodeRef>,
                "FIXME: Currently GraphWriter requires the NodeRef type to be "
                "a pointer.\nThe pointer usage should be moved to "
                "DOTGraphTraits, and removed from GraphWriter itself.");

  // Writes the edge labels of the node to O and returns true if there are any
  // edge labels not equal to the empty string "".
  bool getEdgeSourceLabels(raw_ostream &O, NodeRef Node) {
    child_iterator EI = GTraits::child_begin(Node);
    child_iterator EE = GTraits::child_end(Node);
    bool hasEdgeSourceLabels = false;

    if (RenderUsingHTML)
      O << "</tr><tr>";

    for (unsigned i = 0; EI != EE && i != 64; ++EI, ++i) {
      std::string label = DTraits.getEdgeSourceLabel(Node, EI);

      if (label.empty())
        continue;

      hasEdgeSourceLabels = true;

      if (RenderUsingHTML)
        O << "<td colspan=\"1\" port=\"s" << i << "\">" << label << "</td>";
      else {
        if (i)
          O << "|";

        O << "<s" << i << ">" << DOT::EscapeString(label);
      }
    }

    if (EI != EE && hasEdgeSourceLabels) {
      if (RenderUsingHTML)
        O << "<td colspan=\"1\" port=\"s64\">truncated...</td>";
      else
        O << "|<s64>truncated...";
    }

    return hasEdgeSourceLabels;
  }

public:
  GraphWriter(raw_ostream &o, const GraphType &g, bool SN) : O(o), G(g) {
    DTraits = DOTTraits(SN);
    RenderUsingHTML = DTraits.renderNodesUsingHTML();
  }

  void writeGraph(const std::string &Title = "") {
    // Output the header for the graph...
    writeHeader(Title);

    // Emit all of the nodes in the graph...
    writeNodes();

    // Output any customizations on the graph
    DOTGraphTraits<GraphType>::addCustomGraphFeatures(G, *this);

    // Output the end of the graph
    writeFooter();
  }

  void writeHeader(const std::string &Title) {
    std::string GraphName(DTraits.getGraphName(G));

    if (!Title.empty())
      O << "digraph \"" << DOT::EscapeString(Title) << "\" {\n";
    else if (!GraphName.empty())
      O << "digraph \"" << DOT::EscapeString(GraphName) << "\" {\n";
    else
      O << "digraph unnamed {\n";

    if (DTraits.renderGraphFromBottomUp())
      O << "\trankdir=\"BT\";\n";

    if (!Title.empty())
      O << "\tlabel=\"" << DOT::EscapeString(Title) << "\";\n";
    else if (!GraphName.empty())
      O << "\tlabel=\"" << DOT::EscapeString(GraphName) << "\";\n";
    O << DTraits.getGraphProperties(G);
    O << "\n";
  }

  void writeFooter() {
    // Finish off the graph
    O << "}\n";
  }

  void writeNodes() {
    // Loop over the graph, printing it out...
    for (const auto Node : nodes<GraphType>(G))
      if (!isNodeHidden(Node))
        writeNode(Node);
  }

  bool isNodeHidden(NodeRef Node) { return DTraits.isNodeHidden(Node, G); }

  void writeNode(NodeRef Node) {
    std::string NodeAttributes = DTraits.getNodeAttributes(Node, G);

    O << "\tNode" << static_cast<const void *>(Node) << " [shape=";
    if (RenderUsingHTML)
      O << "none,";
    else
      O << "record,";

    if (!NodeAttributes.empty()) O << NodeAttributes << ",";
    O << "label=";

    if (RenderUsingHTML) {
      // Count the numbewr of edges out of the node to determine how
      // many columns to span (max 64)
      unsigned ColSpan = 0;
      child_iterator EI = GTraits::child_begin(Node);
      child_iterator EE = GTraits::child_end(Node);
      for (; EI != EE && ColSpan != 64; ++EI, ++ColSpan)
        ;
      if (ColSpan == 0)
        ColSpan = 1;
      // Include truncated messages when counting.
      if (EI != EE)
        ++ColSpan;
      O << "<<table border=\"0\" cellborder=\"1\" cellspacing=\"0\""
        << " cellpadding=\"0\"><tr><td align=\"text\" colspan=\"" << ColSpan
        << "\">";
    } else
      O << "\"{";

    if (!DTraits.renderGraphFromBottomUp()) {
      if (RenderUsingHTML)
        O << DTraits.getNodeLabel(Node, G) << "</td>";
      else
        O << DOT::EscapeString(DTraits.getNodeLabel(Node, G));

      // If we should include the address of the node in the label, do so now.
      std::string Id = DTraits.getNodeIdentifierLabel(Node, G);
      if (!Id.empty())
        O << "|" << DOT::EscapeString(Id);

      std::string NodeDesc = DTraits.getNodeDescription(Node, G);
      if (!NodeDesc.empty())
        O << "|" << DOT::EscapeString(NodeDesc);
    }

    std::string edgeSourceLabels;
    raw_string_ostream EdgeSourceLabels(edgeSourceLabels);
    bool hasEdgeSourceLabels = getEdgeSourceLabels(EdgeSourceLabels, Node);

    if (hasEdgeSourceLabels) {
      if (!DTraits.renderGraphFromBottomUp())
        if (!RenderUsingHTML)
          O << "|";

      if (RenderUsingHTML)
        O << EdgeSourceLabels.str();
      else
        O << "{" << EdgeSourceLabels.str() << "}";

      if (DTraits.renderGraphFromBottomUp())
        if (!RenderUsingHTML)
          O << "|";
    }

    if (DTraits.renderGraphFromBottomUp()) {
      if (RenderUsingHTML)
        O << DTraits.getNodeLabel(Node, G);
      else
        O << DOT::EscapeString(DTraits.getNodeLabel(Node, G));

      // If we should include the address of the node in the label, do so now.
      std::string Id = DTraits.getNodeIdentifierLabel(Node, G);
      if (!Id.empty())
        O << "|" << DOT::EscapeString(Id);

      std::string NodeDesc = DTraits.getNodeDescription(Node, G);
      if (!NodeDesc.empty())
        O << "|" << DOT::EscapeString(NodeDesc);
    }

    if (DTraits.hasEdgeDestLabels()) {
      O << "|{";

      unsigned i = 0, e = DTraits.numEdgeDestLabels(Node);
      for (; i != e && i != 64; ++i) {
        if (i) O << "|";
        O << "<d" << i << ">"
          << DOT::EscapeString(DTraits.getEdgeDestLabel(Node, i));
      }

      if (i != e)
        O << "|<d64>truncated...";
      O << "}";
    }

    if (RenderUsingHTML)
      O << "</tr></table>>";
    else
      O << "}\"";
    O << "];\n"; // Finish printing the "node" line

    // Output all of the edges now
    child_iterator EI = GTraits::child_begin(Node);
    child_iterator EE = GTraits::child_end(Node);
    for (unsigned i = 0; EI != EE && i != 64; ++EI, ++i)
      if (!DTraits.isNodeHidden(*EI, G))
        writeEdge(Node, i, EI);
    for (; EI != EE; ++EI)
      if (!DTraits.isNodeHidden(*EI, G))
        writeEdge(Node, 64, EI);
  }

  void writeEdge(NodeRef Node, unsigned edgeidx, child_iterator EI) {
    if (NodeRef TargetNode = *EI) {
      int DestPort = -1;
      if (DTraits.edgeTargetsEdgeSource(Node, EI)) {
        child_iterator TargetIt = DTraits.getEdgeTarget(Node, EI);

        // Figure out which edge this targets...
        unsigned Offset =
          (unsigned)std::distance(GTraits::child_begin(TargetNode), TargetIt);
        DestPort = static_cast<int>(Offset);
      }

      if (DTraits.getEdgeSourceLabel(Node, EI).empty())
        edgeidx = -1;

      emitEdge(static_cast<const void*>(Node), edgeidx,
               static_cast<const void*>(TargetNode), DestPort,
               DTraits.getEdgeAttributes(Node, EI, G));
    }
  }

  /// emitSimpleNode - Outputs a simple (non-record) node
  void emitSimpleNode(const void *ID, const std::string &Attr,
                   const std::string &Label, unsigned NumEdgeSources = 0,
                   const std::vector<std::string> *EdgeSourceLabels = nullptr) {
    O << "\tNode" << ID << "[ ";
    if (!Attr.empty())
      O << Attr << ",";
    O << " label =\"";
    if (NumEdgeSources) O << "{";
    O << DOT::EscapeString(Label);
    if (NumEdgeSources) {
      O << "|{";

      for (unsigned i = 0; i != NumEdgeSources; ++i) {
        if (i) O << "|";
        O << "<s" << i << ">";
        if (EdgeSourceLabels) O << DOT::EscapeString((*EdgeSourceLabels)[i]);
      }
      O << "}}";
    }
    O << "\"];\n";
  }

  /// emitEdge - Output an edge from a simple node into the graph...
  void emitEdge(const void *SrcNodeID, int SrcNodePort,
                const void *DestNodeID, int DestNodePort,
                const std::string &Attrs) {
    if (SrcNodePort  > 64) return;             // Eminating from truncated part?
    if (DestNodePort > 64) DestNodePort = 64;  // Targeting the truncated part?

    O << "\tNode" << SrcNodeID;
    if (SrcNodePort >= 0)
      O << ":s" << SrcNodePort;
    O << " -> Node" << DestNodeID;
    if (DestNodePort >= 0 && DTraits.hasEdgeDestLabels())
      O << ":d" << DestNodePort;

    if (!Attrs.empty())
      O << "[" << Attrs << "]";
    O << ";\n";
  }

  /// getOStream - Get the raw output stream into the graph file. Useful to
  /// write fancy things using addCustomGraphFeatures().
  raw_ostream &getOStream() {
    return O;
  }
};

template<typename GraphType>
raw_ostream &WriteGraph(raw_ostream &O, const GraphType &G,
                        bool ShortNames = false,
                        const Twine &Title = "") {
  // Start the graph emission process...
  GraphWriter<GraphType> W(O, G, ShortNames);

  // Emit the graph.
  W.writeGraph(Title.str());

  return O;
}

std::string createGraphFilename(const Twine &Name, int &FD);

/// Writes graph into a provided @c Filename.
/// If @c Filename is empty, generates a random one.
/// \return The resulting filename, or an empty string if writing
/// failed.
template <typename GraphType>
std::string WriteGraph(const GraphType &G, const Twine &Name,
                       bool ShortNames = false,
                       const Twine &Title = "",
                       std::string Filename = "") {
  int FD;
  if (Filename.empty()) {
    Filename = createGraphFilename(Name.str(), FD);
  } else {
    std::error_code EC = sys::fs::openFileForWrite(
        Filename, FD, sys::fs::CD_CreateAlways, sys::fs::OF_Text);

    // Writing over an existing file is not considered an error.
    if (EC == std::errc::file_exists) {
      errs() << "file exists, overwriting" << "\n";
    } else if (EC) {
      errs() << "error writing into file" << "\n";
      return "";
    } else {
      errs() << "writing to the newly created file " << Filename << "\n";
    }
  }
  raw_fd_ostream O(FD, /*shouldClose=*/ true);

  if (FD == -1) {
    errs() << "error opening file '" << Filename << "' for writing!\n";
    return "";
  }

  llvm::WriteGraph(O, G, ShortNames, Title);
  errs() << " done. \n";

  return Filename;
}

/// DumpDotGraph - Just dump a dot graph to the user-provided file name.
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
template <typename GraphType>
LLVM_DUMP_METHOD void
dumpDotGraphToFile(const GraphType &G, const Twine &FileName,
                   const Twine &Title, bool ShortNames = false,
                   const Twine &Name = "") {
  llvm::WriteGraph(G, Name, ShortNames, Title, FileName.str());
}
#endif

/// ViewGraph - Emit a dot graph, run 'dot', run gv on the postscript file,
/// then cleanup.  For use from the debugger.
///
template<typename GraphType>
void ViewGraph(const GraphType &G, const Twine &Name,
               bool ShortNames = false, const Twine &Title = "",
               GraphProgram::Name Program = GraphProgram::DOT) {
  std::string Filename = llvm::WriteGraph(G, Name, ShortNames, Title);

  if (Filename.empty())
    return;

  DisplayGraph(Filename, false, Program);
}

} // end namespace llvm

#endif // LLVM_SUPPORT_GRAPHWRITER_H
PKhwFZ��RS��Support/FormattedStream.hnu�[���//===-- llvm/Support/FormattedStream.h - Formatted streams ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains raw_ostream implementations for streams to do
// things like pretty-print comments.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_FORMATTEDSTREAM_H
#define LLVM_SUPPORT_FORMATTEDSTREAM_H

#include "llvm/ADT/SmallString.h"
#include "llvm/Support/raw_ostream.h"
#include <utility>

namespace llvm {

/// formatted_raw_ostream - A raw_ostream that wraps another one and keeps track
/// of line and column position, allowing padding out to specific column
/// boundaries and querying the number of lines written to the stream. This
/// assumes that the contents of the stream is valid UTF-8 encoded text. This
/// doesn't attempt to handle everything Unicode can do (combining characters,
/// right-to-left markers, etc), but should cover the cases likely to appear in
/// source code or diagnostic messages.
class formatted_raw_ostream : public raw_ostream {
  /// TheStream - The real stream we output to. We set it to be
  /// unbuffered, since we're already doing our own buffering.
  ///
  raw_ostream *TheStream;

  /// Position - The current output column and line of the data that's
  /// been flushed and the portion of the buffer that's been
  /// scanned.  The line and column scheme is zero-based.
  ///
  std::pair<unsigned, unsigned> Position;

  /// Scanned - This points to one past the last character in the
  /// buffer we've scanned.
  ///
  const char *Scanned;

  /// PartialUTF8Char - Either empty or a prefix of a UTF-8 code unit sequence
  /// for a Unicode scalar value which should be prepended to the buffer for the
  /// next call to ComputePosition. This is needed when the buffer is flushed
  /// when it ends part-way through the UTF-8 encoding of a Unicode scalar
  /// value, so that we can compute the display width of the character once we
  /// have the rest of it.
  SmallString<4> PartialUTF8Char;

  void write_impl(const char *Ptr, size_t Size) override;

  /// current_pos - Return the current position within the stream,
  /// not counting the bytes currently in the buffer.
  uint64_t current_pos() const override {
    // Our current position in the stream is all the contents which have been
    // written to the underlying stream (*not* the current position of the
    // underlying stream).
    return TheStream->tell();
  }

  /// ComputePosition - Examine the given output buffer and figure out the new
  /// position after output. This is safe to call multiple times on the same
  /// buffer, as it records the most recently scanned character and resumes from
  /// there when the buffer has not been flushed.
  void ComputePosition(const char *Ptr, size_t size);

  /// UpdatePosition - scan the characters in [Ptr, Ptr+Size), and update the
  /// line and column numbers. Unlike ComputePosition, this must be called
  /// exactly once on each region of the buffer.
  void UpdatePosition(const char *Ptr, size_t Size);

  void setStream(raw_ostream &Stream) {
    releaseStream();

    TheStream = &Stream;

    // This formatted_raw_ostream inherits from raw_ostream, so it'll do its
    // own buffering, and it doesn't need or want TheStream to do another
    // layer of buffering underneath. Resize the buffer to what TheStream
    // had been using, and tell TheStream not to do its own buffering.
    if (size_t BufferSize = TheStream->GetBufferSize())
      SetBufferSize(BufferSize);
    else
      SetUnbuffered();
    TheStream->SetUnbuffered();

    Scanned = nullptr;
  }

public:
  /// formatted_raw_ostream - Open the specified file for
  /// writing. If an error occurs, information about the error is
  /// put into ErrorInfo, and the stream should be immediately
  /// destroyed; the string will be empty if no error occurred.
  ///
  /// As a side effect, the given Stream is set to be Unbuffered.
  /// This is because formatted_raw_ostream does its own buffering,
  /// so it doesn't want another layer of buffering to be happening
  /// underneath it.
  ///
  formatted_raw_ostream(raw_ostream &Stream)
      : TheStream(nullptr), Position(0, 0) {
    setStream(Stream);
  }
  explicit formatted_raw_ostream() : TheStream(nullptr), Position(0, 0) {
    Scanned = nullptr;
  }

  ~formatted_raw_ostream() override {
    flush();
    releaseStream();
  }

  /// PadToColumn - Align the output to some column number.  If the current
  /// column is already equal to or more than NewCol, PadToColumn inserts one
  /// space.
  ///
  /// \param NewCol - The column to move to.
  formatted_raw_ostream &PadToColumn(unsigned NewCol);

  unsigned getColumn() {
    // Calculate current position, taking buffer contents into account.
    ComputePosition(getBufferStart(), GetNumBytesInBuffer());
    return Position.first;
  }

  unsigned getLine() {
    // Calculate current position, taking buffer contents into account.
    ComputePosition(getBufferStart(), GetNumBytesInBuffer());
    return Position.second;
  }

  raw_ostream &resetColor() override {
    TheStream->resetColor();
    return *this;
  }

  raw_ostream &reverseColor() override {
    TheStream->reverseColor();
    return *this;
  }

  raw_ostream &changeColor(enum Colors Color, bool Bold, bool BG) override {
    TheStream->changeColor(Color, Bold, BG);
    return *this;
  }

  bool is_displayed() const override {
    return TheStream->is_displayed();
  }

private:
  void releaseStream() {
    // Transfer the buffer settings from this raw_ostream back to the underlying
    // stream.
    if (!TheStream)
      return;
    if (size_t BufferSize = GetBufferSize())
      TheStream->SetBufferSize(BufferSize);
    else
      TheStream->SetUnbuffered();
  }
};

/// fouts() - This returns a reference to a formatted_raw_ostream for
/// standard output.  Use it like: fouts() << "foo" << "bar";
formatted_raw_ostream &fouts();

/// ferrs() - This returns a reference to a formatted_raw_ostream for
/// standard error.  Use it like: ferrs() << "foo" << "bar";
formatted_raw_ostream &ferrs();

/// fdbgs() - This returns a reference to a formatted_raw_ostream for
/// debug output.  Use it like: fdbgs() << "foo" << "bar";
formatted_raw_ostream &fdbgs();

} // end llvm namespace


#endif
PKhwFZ��ެIISupport/xxhash.hnu�[���/*
   xxHash - Extremely Fast Hash algorithm
   Header File
   Copyright (C) 2012-2016, Yann Collet.

   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)

   Redistribution and use in source and binary forms, with or without
   modification, are permitted provided that the following conditions are
   met:

       * Redistributions of source code must retain the above copyright
   notice, this list of conditions and the following disclaimer.
       * Redistributions in binary form must reproduce the above
   copyright notice, this list of conditions and the following disclaimer
   in the documentation and/or other materials provided with the
   distribution.

   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

   You can contact the author at :
   - xxHash source repository : https://github.com/Cyan4973/xxHash
*/

/* based on revision d2df04efcbef7d7f6886d345861e5dfda4edacc1 Removed
 * everything but a simple interface for computing XXh64. */

#ifndef LLVM_SUPPORT_XXHASH_H
#define LLVM_SUPPORT_XXHASH_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"

namespace llvm {
uint64_t xxHash64(llvm::StringRef Data);
uint64_t xxHash64(llvm::ArrayRef<uint8_t> Data);

uint64_t xxh3_64bits(ArrayRef<uint8_t> data);
inline uint64_t xxh3_64bits(StringRef data) {
  return xxh3_64bits(ArrayRef(data.bytes_begin(), data.size()));
}
}

#endif
PKhwFZ�AG ??Support/Unicode.hnu�[���//===- llvm/Support/Unicode.h - Unicode character properties  -*- C++ -*-=====//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines functions that allow querying certain properties of Unicode
// characters.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_UNICODE_H
#define LLVM_SUPPORT_UNICODE_H

#include "llvm/ADT/SmallString.h"
#include <optional>
#include <string>

namespace llvm {
class StringRef;

namespace sys {
namespace unicode {

enum ColumnWidthErrors {
  ErrorInvalidUTF8 = -2,
  ErrorNonPrintableCharacter = -1
};

/// Determines if a character is likely to be displayed correctly on the
/// terminal. Exact implementation would have to depend on the specific
/// terminal, so we define the semantic that should be suitable for generic case
/// of a terminal capable to output Unicode characters.
///
/// Printable codepoints are those in the categories L, M, N, P, S and Zs
/// \return true if the character is considered printable.
bool isPrintable(int UCS);

// Formatting codepoints are codepoints in the Cf category.
bool isFormatting(int UCS);

/// Gets the number of positions the UTF8-encoded \p Text is likely to occupy
/// when output on a terminal ("character width"). This depends on the
/// implementation of the terminal, and there's no standard definition of
/// character width.
///
/// The implementation defines it in a way that is expected to be compatible
/// with a generic Unicode-capable terminal.
///
/// \return Character width:
///   * ErrorNonPrintableCharacter (-1) if \p Text contains non-printable
///     characters (as identified by isPrintable);
///   * 0 for each non-spacing and enclosing combining mark;
///   * 2 for each CJK character excluding halfwidth forms;
///   * 1 for each of the remaining characters.
int columnWidthUTF8(StringRef Text);

/// Fold input unicode character according the Simple unicode case folding
/// rules.
int foldCharSimple(int C);

/// Maps the name or the alias of a Unicode character to its associated
/// codepoints.
/// The names and aliases are derived from UnicodeData.txt and NameAliases.txt
/// For compatibility with the semantics of named character escape sequences in
/// C++, this mapping does an exact match sensitive to casing and spacing.
/// \return The codepoint of the corresponding character, if any.
std::optional<char32_t> nameToCodepointStrict(StringRef Name);

struct LooseMatchingResult {
  char32_t CodePoint;
  SmallString<64> Name;
};

std::optional<LooseMatchingResult> nameToCodepointLooseMatching(StringRef Name);

struct MatchForCodepointName {
  std::string Name;
  uint32_t Distance = 0;
  char32_t Value = 0;
};

SmallVector<MatchForCodepointName>
nearestMatchesForCodepointName(StringRef Pattern, std::size_t MaxMatchesCount);

} // namespace unicode
} // namespace sys
} // namespace llvm

#endif
PKhwFZ]+r�NNSupport/Mutex.hnu�[���//===- llvm/Support/Mutex.h - Mutex Operating System Concept -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the llvm::sys::Mutex class.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_MUTEX_H
#define LLVM_SUPPORT_MUTEX_H

#include "llvm/Support/Threading.h"
#include <cassert>
#include <mutex>

namespace llvm
{
  namespace sys
  {
    /// SmartMutex - A mutex with a compile time constant parameter that
    /// indicates whether this mutex should become a no-op when we're not
    /// running in multithreaded mode.
    template<bool mt_only>
    class SmartMutex {
      std::recursive_mutex impl;
      unsigned acquired = 0;

    public:
      bool lock() {
        if (!mt_only || llvm_is_multithreaded()) {
          impl.lock();
          return true;
        }
        // Single-threaded debugging code.  This would be racy in
        // multithreaded mode, but provides not basic checks in single
        // threaded mode.
        ++acquired;
        return true;
      }

      bool unlock() {
        if (!mt_only || llvm_is_multithreaded()) {
          impl.unlock();
          return true;
        }
        // Single-threaded debugging code.  This would be racy in
        // multithreaded mode, but provides not basic checks in single
        // threaded mode.
        assert(acquired && "Lock not acquired before release!");
        --acquired;
        return true;
      }

      bool try_lock() {
        if (!mt_only || llvm_is_multithreaded())
          return impl.try_lock();
        return true;
      }
    };

    /// Mutex - A standard, always enforced mutex.
    typedef SmartMutex<false> Mutex;

    template <bool mt_only>
    using SmartScopedLock = std::lock_guard<SmartMutex<mt_only>>;

    typedef SmartScopedLock<false> ScopedLock;
  }
}

#endif
PKhwFZ-��n��Support/OptimizedStructLayout.hnu�[���//===-- OptimizedStructLayout.h - Struct layout algorithm ---------*- C++ -*-=//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file provides an interface for laying out a sequence of fields
/// as a struct in a way that attempts to minimizes the total space
/// requirements of the struct while still satisfying the layout
/// requirements of the individual fields.  The resulting layout may be
/// substantially more compact than simply laying out the fields in their
/// original order.
///
/// Fields may be pre-assigned fixed offsets.  They may also be given sizes
/// that are not multiples of their alignments.  There is no currently no
/// way to describe that a field has interior padding that other fields may
/// be allocated into.
///
/// This algorithm does not claim to be "optimal" for several reasons:
///
/// - First, it does not guarantee that the result is minimal in size.
///   There is no known efficient algoorithm to achieve minimality for
///   unrestricted inputs.  Nonetheless, this algorithm
///
/// - Second, there are other ways that a struct layout could be optimized
///   besides space usage, such as locality.  This layout may have a mixed
///   impact on locality: less overall memory may be used, but adjacent
///   fields in the original array may be moved further from one another.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_OPTIMIZEDSTRUCTLAYOUT_H
#define LLVM_SUPPORT_OPTIMIZEDSTRUCTLAYOUT_H

#include "llvm/Support/Alignment.h"
#include "llvm/ADT/ArrayRef.h"
#include <utility>

namespace llvm {

/// A field in a structure.
struct OptimizedStructLayoutField {
  /// A special value for Offset indicating that the field can be moved
  /// anywhere.
  static constexpr uint64_t FlexibleOffset = ~(uint64_t)0;

  OptimizedStructLayoutField(const void *Id, uint64_t Size, Align Alignment,
                             uint64_t FixedOffset = FlexibleOffset)
      : Offset(FixedOffset), Size(Size), Id(Id), Alignment(Alignment) {
    assert(Size > 0 && "adding an empty field to the layout");
  }

  /// The offset of this field in the final layout.  If this is
  /// initialized to FlexibleOffset, layout will overwrite it with
  /// the assigned offset of the field.
  uint64_t Offset;

  /// The required size of this field in bytes.  Does not have to be
  /// a multiple of Alignment.  Must be non-zero.
  uint64_t Size;

  /// A opaque value which uniquely identifies this field.
  const void *Id;

  /// Private scratch space for the algorithm.  The implementation
  /// must treat this as uninitialized memory on entry.
  void *Scratch;

  /// The required alignment of this field.
  Align Alignment;

  /// Return true if this field has been assigned a fixed offset.
  /// After layout, this will be true of all the fields.
  bool hasFixedOffset() const {
    return (Offset != FlexibleOffset);
  }

  /// Given that this field has a fixed offset, return the offset
  /// of the first byte following it.
  uint64_t getEndOffset() const {
    assert(hasFixedOffset());
    return Offset + Size;
  }
};

/// Compute a layout for a struct containing the given fields, making a
/// best-effort attempt to minimize the amount of space required.
///
/// Two features are supported which require a more careful solution
/// than the well-known "sort by decreasing alignment" solution:
///
/// - Fields may be assigned a fixed offset in the layout.  If there are
///   gaps among the fixed-offset fields, the algorithm may attempt
///   to allocate flexible-offset fields into those gaps.  If that's
///   undesirable, the caller should "block out" those gaps by e.g.
///   just creating a single fixed-offset field that represents the
///   entire "header".
///
/// - The size of a field is not required to be a multiple of, or even
///   greater than, the field's required alignment.  The only constraint
///   on fields is that they must not be zero-sized.
///
/// To simplify the implementation, any fixed-offset fields in the
/// layout must appear at the start of the field array, and they must
/// be ordered by increasing offset.
///
/// The algorithm will produce a guaranteed-minimal layout with no
/// interior padding in the following "C-style" case:
///
/// - every field's size is a multiple of its required alignment and
/// - either no fields have initially fixed offsets, or the fixed-offset
///   fields have no interior padding and end at an offset that is at
///   least as aligned as all the flexible-offset fields.
///
/// Otherwise, while the algorithm will make a best-effort attempt to
/// avoid padding, it cannot guarantee a minimal layout, as there is
/// no known efficient algorithm for doing so.
///
/// The layout produced by this algorithm may not be stable across LLVM
/// releases.  Do not use this anywhere where ABI stability is required.
///
/// Flexible-offset fields with the same size and alignment will be ordered
/// the same way they were in the initial array.  Otherwise the current
/// algorithm makes no effort to preserve the initial order of
/// flexible-offset fields.
///
/// On return, all fields will have been assigned a fixed offset, and the
/// array will be sorted in order of ascending offsets.  Note that this
/// means that the fixed-offset fields may no longer form a strict prefix
/// if there's any padding before they end.
///
/// The return value is the total size of the struct and its required
/// alignment.  Note that the total size is not rounded up to a multiple
/// of the required alignment; clients which require this can do so easily.
std::pair<uint64_t, Align> performOptimizedStructLayout(
                        MutableArrayRef<OptimizedStructLayoutField> Fields);

} // namespace llvm

#endif
PKhwFZ��W,,Support/VCSRevision.hnu�[���#undef LLVM_REVISION
#undef LLVM_REPOSITORY
PKhwFZ�&&#Support/PerThreadBumpPtrAllocator.hnu�[���//===- PerThreadBumpPtrAllocator.h ------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_PERTHREADBUMPPTRALLOCATOR_H
#define LLVM_SUPPORT_PERTHREADBUMPPTRALLOCATOR_H

#include "llvm/Support/Allocator.h"
#include "llvm/Support/Parallel.h"

namespace llvm {
namespace parallel {

/// PerThreadAllocator is used in conjunction with ThreadPoolExecutor to allow
/// per-thread allocations. It wraps a possibly thread-unsafe allocator,
/// e.g. BumpPtrAllocator. PerThreadAllocator must be used with only main thread
/// or threads created by ThreadPoolExecutor, as it utilizes getThreadIndex,
/// which is set by ThreadPoolExecutor. To work properly, ThreadPoolExecutor
/// should be initialized before PerThreadAllocator is created.
/// TODO: The same approach might be implemented for ThreadPool.

template <typename AllocatorTy>
class PerThreadAllocator
    : public AllocatorBase<PerThreadAllocator<AllocatorTy>> {
public:
  PerThreadAllocator()
      : NumOfAllocators(parallel::getThreadCount()),
        Allocators(std::make_unique<AllocatorTy[]>(NumOfAllocators)) {}

  /// \defgroup Methods which could be called asynchronously:
  ///
  /// @{

  using AllocatorBase<PerThreadAllocator<AllocatorTy>>::Allocate;

  using AllocatorBase<PerThreadAllocator<AllocatorTy>>::Deallocate;

  /// Allocate \a Size bytes of \a Alignment aligned memory.
  void *Allocate(size_t Size, size_t Alignment) {
    assert(getThreadIndex() < NumOfAllocators);
    return Allocators[getThreadIndex()].Allocate(Size, Alignment);
  }

  /// Deallocate \a Ptr to \a Size bytes of memory allocated by this
  /// allocator.
  void Deallocate(const void *Ptr, size_t Size, size_t Alignment) {
    assert(getThreadIndex() < NumOfAllocators);
    return Allocators[getThreadIndex()].Deallocate(Ptr, Size, Alignment);
  }

  /// Return allocator corresponding to the current thread.
  AllocatorTy &getThreadLocalAllocator() {
    assert(getThreadIndex() < NumOfAllocators);
    return Allocators[getThreadIndex()];
  }

  // Return number of used allocators.
  size_t getNumberOfAllocators() const { return NumOfAllocators; }
  /// @}

  /// \defgroup Methods which could not be called asynchronously:
  ///
  /// @{

  /// Reset state of allocators.
  void Reset() {
    for (size_t Idx = 0; Idx < getNumberOfAllocators(); Idx++)
      Allocators[Idx].Reset();
  }

  /// Return total memory size used by all allocators.
  size_t getTotalMemory() const {
    size_t TotalMemory = 0;

    for (size_t Idx = 0; Idx < getNumberOfAllocators(); Idx++)
      TotalMemory += Allocators[Idx].getTotalMemory();

    return TotalMemory;
  }

  /// Return allocated size by all allocators.
  size_t getBytesAllocated() const {
    size_t BytesAllocated = 0;

    for (size_t Idx = 0; Idx < getNumberOfAllocators(); Idx++)
      BytesAllocated += Allocators[Idx].getBytesAllocated();

    return BytesAllocated;
  }

  /// Set red zone for all allocators.
  void setRedZoneSize(size_t NewSize) {
    for (size_t Idx = 0; Idx < getNumberOfAllocators(); Idx++)
      Allocators[Idx].setRedZoneSize(NewSize);
  }

  /// Print statistic for each allocator.
  void PrintStats() const {
    for (size_t Idx = 0; Idx < getNumberOfAllocators(); Idx++) {
      errs() << "\n Allocator " << Idx << "\n";
      Allocators[Idx].PrintStats();
    }
  }
  /// @}

protected:
  size_t NumOfAllocators;
  std::unique_ptr<AllocatorTy[]> Allocators;
};

using PerThreadBumpPtrAllocator = PerThreadAllocator<BumpPtrAllocator>;

} // end namespace parallel
} // end namespace llvm

#endif // LLVM_SUPPORT_PERTHREADBUMPPTRALLOCATOR_H
PKhwFZ��X����$Support/GenericDomTreeConstruction.hnu�[���//===- GenericDomTreeConstruction.h - Dominator Calculation ------*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// Generic dominator tree construction - this file provides routines to
/// construct immediate dominator information for a flow-graph based on the
/// Semi-NCA algorithm described in this dissertation:
///
///   [1] Linear-Time Algorithms for Dominators and Related Problems
///   Loukas Georgiadis, Princeton University, November 2005, pp. 21-23:
///   ftp://ftp.cs.princeton.edu/reports/2005/737.pdf
///
/// Semi-NCA algorithm runs in O(n^2) worst-case time but usually slightly
/// faster than Simple Lengauer-Tarjan in practice.
///
/// O(n^2) worst cases happen when the computation of nearest common ancestors
/// requires O(n) average time, which is very unlikely in real world. If this
/// ever turns out to be an issue, consider implementing a hybrid algorithm
/// that uses SLT to perform full constructions and SemiNCA for incremental
/// updates.
///
/// The file uses the Depth Based Search algorithm to perform incremental
/// updates (insertion and deletions). The implemented algorithm is based on
/// this publication:
///
///   [2] An Experimental Study of Dynamic Dominators
///   Loukas Georgiadis, et al., April 12 2016, pp. 5-7, 9-10:
///   https://arxiv.org/pdf/1604.02711.pdf
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_GENERICDOMTREECONSTRUCTION_H
#define LLVM_SUPPORT_GENERICDOMTREECONSTRUCTION_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/GenericDomTree.h"
#include <optional>
#include <queue>

#define DEBUG_TYPE "dom-tree-builder"

namespace llvm {
namespace DomTreeBuilder {

template <typename DomTreeT>
struct SemiNCAInfo {
  using NodePtr = typename DomTreeT::NodePtr;
  using NodeT = typename DomTreeT::NodeType;
  using TreeNodePtr = DomTreeNodeBase<NodeT> *;
  using RootsT = decltype(DomTreeT::Roots);
  static constexpr bool IsPostDom = DomTreeT::IsPostDominator;
  using GraphDiffT = GraphDiff<NodePtr, IsPostDom>;

  // Information record used by Semi-NCA during tree construction.
  struct InfoRec {
    unsigned DFSNum = 0;
    unsigned Parent = 0;
    unsigned Semi = 0;
    NodePtr Label = nullptr;
    NodePtr IDom = nullptr;
    SmallVector<NodePtr, 2> ReverseChildren;
  };

  // Number to node mapping is 1-based. Initialize the mapping to start with
  // a dummy element.
  std::vector<NodePtr> NumToNode = {nullptr};
  DenseMap<NodePtr, InfoRec> NodeToInfo;

  using UpdateT = typename DomTreeT::UpdateType;
  using UpdateKind = typename DomTreeT::UpdateKind;
  struct BatchUpdateInfo {
    // Note: Updates inside PreViewCFG are already legalized.
    BatchUpdateInfo(GraphDiffT &PreViewCFG, GraphDiffT *PostViewCFG = nullptr)
        : PreViewCFG(PreViewCFG), PostViewCFG(PostViewCFG),
          NumLegalized(PreViewCFG.getNumLegalizedUpdates()) {}

    // Remembers if the whole tree was recalculated at some point during the
    // current batch update.
    bool IsRecalculated = false;
    GraphDiffT &PreViewCFG;
    GraphDiffT *PostViewCFG;
    const size_t NumLegalized;
  };

  BatchUpdateInfo *BatchUpdates;
  using BatchUpdatePtr = BatchUpdateInfo *;

  // If BUI is a nullptr, then there's no batch update in progress.
  SemiNCAInfo(BatchUpdatePtr BUI) : BatchUpdates(BUI) {}

  void clear() {
    NumToNode = {nullptr}; // Restore to initial state with a dummy start node.
    NodeToInfo.clear();
    // Don't reset the pointer to BatchUpdateInfo here -- if there's an update
    // in progress, we need this information to continue it.
  }

  template <bool Inversed>
  static SmallVector<NodePtr, 8> getChildren(NodePtr N, BatchUpdatePtr BUI) {
    if (BUI)
      return BUI->PreViewCFG.template getChildren<Inversed>(N);
    return getChildren<Inversed>(N);
  }

  template <bool Inversed>
  static SmallVector<NodePtr, 8> getChildren(NodePtr N) {
    using DirectedNodeT =
        std::conditional_t<Inversed, Inverse<NodePtr>, NodePtr>;
    auto R = children<DirectedNodeT>(N);
    SmallVector<NodePtr, 8> Res(detail::reverse_if<!Inversed>(R));

    // Remove nullptr children for clang.
    llvm::erase_value(Res, nullptr);
    return Res;
  }

  NodePtr getIDom(NodePtr BB) const {
    auto InfoIt = NodeToInfo.find(BB);
    if (InfoIt == NodeToInfo.end()) return nullptr;

    return InfoIt->second.IDom;
  }

  TreeNodePtr getNodeForBlock(NodePtr BB, DomTreeT &DT) {
    if (TreeNodePtr Node = DT.getNode(BB)) return Node;

    // Haven't calculated this node yet?  Get or calculate the node for the
    // immediate dominator.
    NodePtr IDom = getIDom(BB);

    assert(IDom || DT.DomTreeNodes[nullptr]);
    TreeNodePtr IDomNode = getNodeForBlock(IDom, DT);

    // Add a new tree node for this NodeT, and link it as a child of
    // IDomNode
    return DT.createChild(BB, IDomNode);
  }

  static bool AlwaysDescend(NodePtr, NodePtr) { return true; }

  struct BlockNamePrinter {
    NodePtr N;

    BlockNamePrinter(NodePtr Block) : N(Block) {}
    BlockNamePrinter(TreeNodePtr TN) : N(TN ? TN->getBlock() : nullptr) {}

    friend raw_ostream &operator<<(raw_ostream &O, const BlockNamePrinter &BP) {
      if (!BP.N)
        O << "nullptr";
      else
        BP.N->printAsOperand(O, false);

      return O;
    }
  };

  using NodeOrderMap = DenseMap<NodePtr, unsigned>;

  // Custom DFS implementation which can skip nodes based on a provided
  // predicate. It also collects ReverseChildren so that we don't have to spend
  // time getting predecessors in SemiNCA.
  //
  // If IsReverse is set to true, the DFS walk will be performed backwards
  // relative to IsPostDom -- using reverse edges for dominators and forward
  // edges for postdominators.
  //
  // If SuccOrder is specified then in this order the DFS traverses the children
  // otherwise the order is implied by the results of getChildren().
  template <bool IsReverse = false, typename DescendCondition>
  unsigned runDFS(NodePtr V, unsigned LastNum, DescendCondition Condition,
                  unsigned AttachToNum,
                  const NodeOrderMap *SuccOrder = nullptr) {
    assert(V);
    SmallVector<NodePtr, 64> WorkList = {V};
    if (NodeToInfo.count(V) != 0) NodeToInfo[V].Parent = AttachToNum;

    while (!WorkList.empty()) {
      const NodePtr BB = WorkList.pop_back_val();
      auto &BBInfo = NodeToInfo[BB];

      // Visited nodes always have positive DFS numbers.
      if (BBInfo.DFSNum != 0) continue;
      BBInfo.DFSNum = BBInfo.Semi = ++LastNum;
      BBInfo.Label = BB;
      NumToNode.push_back(BB);

      constexpr bool Direction = IsReverse != IsPostDom;  // XOR.
      auto Successors = getChildren<Direction>(BB, BatchUpdates);
      if (SuccOrder && Successors.size() > 1)
        llvm::sort(
            Successors.begin(), Successors.end(), [=](NodePtr A, NodePtr B) {
              return SuccOrder->find(A)->second < SuccOrder->find(B)->second;
            });

      for (const NodePtr Succ : Successors) {
        const auto SIT = NodeToInfo.find(Succ);
        // Don't visit nodes more than once but remember to collect
        // ReverseChildren.
        if (SIT != NodeToInfo.end() && SIT->second.DFSNum != 0) {
          if (Succ != BB) SIT->second.ReverseChildren.push_back(BB);
          continue;
        }

        if (!Condition(BB, Succ)) continue;

        // It's fine to add Succ to the map, because we know that it will be
        // visited later.
        auto &SuccInfo = NodeToInfo[Succ];
        WorkList.push_back(Succ);
        SuccInfo.Parent = LastNum;
        SuccInfo.ReverseChildren.push_back(BB);
      }
    }

    return LastNum;
  }

  // V is a predecessor of W. eval() returns V if V < W, otherwise the minimum
  // of sdom(U), where U > W and there is a virtual forest path from U to V. The
  // virtual forest consists of linked edges of processed vertices.
  //
  // We can follow Parent pointers (virtual forest edges) to determine the
  // ancestor U with minimum sdom(U). But it is slow and thus we employ the path
  // compression technique to speed up to O(m*log(n)). Theoretically the virtual
  // forest can be organized as balanced trees to achieve almost linear
  // O(m*alpha(m,n)) running time. But it requires two auxiliary arrays (Size
  // and Child) and is unlikely to be faster than the simple implementation.
  //
  // For each vertex V, its Label points to the vertex with the minimal sdom(U)
  // (Semi) in its path from V (included) to NodeToInfo[V].Parent (excluded).
  NodePtr eval(NodePtr V, unsigned LastLinked,
               SmallVectorImpl<InfoRec *> &Stack) {
    InfoRec *VInfo = &NodeToInfo[V];
    if (VInfo->Parent < LastLinked)
      return VInfo->Label;

    // Store ancestors except the last (root of a virtual tree) into a stack.
    assert(Stack.empty());
    do {
      Stack.push_back(VInfo);
      VInfo = &NodeToInfo[NumToNode[VInfo->Parent]];
    } while (VInfo->Parent >= LastLinked);

    // Path compression. Point each vertex's Parent to the root and update its
    // Label if any of its ancestors (PInfo->Label) has a smaller Semi.
    const InfoRec *PInfo = VInfo;
    const InfoRec *PLabelInfo = &NodeToInfo[PInfo->Label];
    do {
      VInfo = Stack.pop_back_val();
      VInfo->Parent = PInfo->Parent;
      const InfoRec *VLabelInfo = &NodeToInfo[VInfo->Label];
      if (PLabelInfo->Semi < VLabelInfo->Semi)
        VInfo->Label = PInfo->Label;
      else
        PLabelInfo = VLabelInfo;
      PInfo = VInfo;
    } while (!Stack.empty());
    return VInfo->Label;
  }

  // This function requires DFS to be run before calling it.
  void runSemiNCA(DomTreeT &DT, const unsigned MinLevel = 0) {
    const unsigned NextDFSNum(NumToNode.size());
    // Initialize IDoms to spanning tree parents.
    for (unsigned i = 1; i < NextDFSNum; ++i) {
      const NodePtr V = NumToNode[i];
      auto &VInfo = NodeToInfo[V];
      VInfo.IDom = NumToNode[VInfo.Parent];
    }

    // Step #1: Calculate the semidominators of all vertices.
    SmallVector<InfoRec *, 32> EvalStack;
    for (unsigned i = NextDFSNum - 1; i >= 2; --i) {
      NodePtr W = NumToNode[i];
      auto &WInfo = NodeToInfo[W];

      // Initialize the semi dominator to point to the parent node.
      WInfo.Semi = WInfo.Parent;
      for (const auto &N : WInfo.ReverseChildren) {
        if (NodeToInfo.count(N) == 0)  // Skip unreachable predecessors.
          continue;

        const TreeNodePtr TN = DT.getNode(N);
        // Skip predecessors whose level is above the subtree we are processing.
        if (TN && TN->getLevel() < MinLevel)
          continue;

        unsigned SemiU = NodeToInfo[eval(N, i + 1, EvalStack)].Semi;
        if (SemiU < WInfo.Semi) WInfo.Semi = SemiU;
      }
    }

    // Step #2: Explicitly define the immediate dominator of each vertex.
    //          IDom[i] = NCA(SDom[i], SpanningTreeParent(i)).
    // Note that the parents were stored in IDoms and later got invalidated
    // during path compression in Eval.
    for (unsigned i = 2; i < NextDFSNum; ++i) {
      const NodePtr W = NumToNode[i];
      auto &WInfo = NodeToInfo[W];
      const unsigned SDomNum = NodeToInfo[NumToNode[WInfo.Semi]].DFSNum;
      NodePtr WIDomCandidate = WInfo.IDom;
      while (NodeToInfo[WIDomCandidate].DFSNum > SDomNum)
        WIDomCandidate = NodeToInfo[WIDomCandidate].IDom;

      WInfo.IDom = WIDomCandidate;
    }
  }

  // PostDominatorTree always has a virtual root that represents a virtual CFG
  // node that serves as a single exit from the function. All the other exits
  // (CFG nodes with terminators and nodes in infinite loops are logically
  // connected to this virtual CFG exit node).
  // This functions maps a nullptr CFG node to the virtual root tree node.
  void addVirtualRoot() {
    assert(IsPostDom && "Only postdominators have a virtual root");
    assert(NumToNode.size() == 1 && "SNCAInfo must be freshly constructed");

    auto &BBInfo = NodeToInfo[nullptr];
    BBInfo.DFSNum = BBInfo.Semi = 1;
    BBInfo.Label = nullptr;

    NumToNode.push_back(nullptr);  // NumToNode[1] = nullptr;
  }

  // For postdominators, nodes with no forward successors are trivial roots that
  // are always selected as tree roots. Roots with forward successors correspond
  // to CFG nodes within infinite loops.
  static bool HasForwardSuccessors(const NodePtr N, BatchUpdatePtr BUI) {
    assert(N && "N must be a valid node");
    return !getChildren<false>(N, BUI).empty();
  }

  static NodePtr GetEntryNode(const DomTreeT &DT) {
    assert(DT.Parent && "Parent not set");
    return GraphTraits<typename DomTreeT::ParentPtr>::getEntryNode(DT.Parent);
  }

  // Finds all roots without relaying on the set of roots already stored in the
  // tree.
  // We define roots to be some non-redundant set of the CFG nodes
  static RootsT FindRoots(const DomTreeT &DT, BatchUpdatePtr BUI) {
    assert(DT.Parent && "Parent pointer is not set");
    RootsT Roots;

    // For dominators, function entry CFG node is always a tree root node.
    if (!IsPostDom) {
      Roots.push_back(GetEntryNode(DT));
      return Roots;
    }

    SemiNCAInfo SNCA(BUI);

    // PostDominatorTree always has a virtual root.
    SNCA.addVirtualRoot();
    unsigned Num = 1;

    LLVM_DEBUG(dbgs() << "\t\tLooking for trivial roots\n");

    // Step #1: Find all the trivial roots that are going to will definitely
    // remain tree roots.
    unsigned Total = 0;
    // It may happen that there are some new nodes in the CFG that are result of
    // the ongoing batch update, but we cannot really pretend that they don't
    // exist -- we won't see any outgoing or incoming edges to them, so it's
    // fine to discover them here, as they would end up appearing in the CFG at
    // some point anyway.
    for (const NodePtr N : nodes(DT.Parent)) {
      ++Total;
      // If it has no *successors*, it is definitely a root.
      if (!HasForwardSuccessors(N, BUI)) {
        Roots.push_back(N);
        // Run DFS not to walk this part of CFG later.
        Num = SNCA.runDFS(N, Num, AlwaysDescend, 1);
        LLVM_DEBUG(dbgs() << "Found a new trivial root: " << BlockNamePrinter(N)
                          << "\n");
        LLVM_DEBUG(dbgs() << "Last visited node: "
                          << BlockNamePrinter(SNCA.NumToNode[Num]) << "\n");
      }
    }

    LLVM_DEBUG(dbgs() << "\t\tLooking for non-trivial roots\n");

    // Step #2: Find all non-trivial root candidates. Those are CFG nodes that
    // are reverse-unreachable were not visited by previous DFS walks (i.e. CFG
    // nodes in infinite loops).
    bool HasNonTrivialRoots = false;
    // Accounting for the virtual exit, see if we had any reverse-unreachable
    // nodes.
    if (Total + 1 != Num) {
      HasNonTrivialRoots = true;

      // SuccOrder is the order of blocks in the function. It is needed to make
      // the calculation of the FurthestAway node and the whole PostDomTree
      // immune to swap successors transformation (e.g. canonicalizing branch
      // predicates). SuccOrder is initialized lazily only for successors of
      // reverse unreachable nodes.
      std::optional<NodeOrderMap> SuccOrder;
      auto InitSuccOrderOnce = [&]() {
        SuccOrder = NodeOrderMap();
        for (const auto Node : nodes(DT.Parent))
          if (SNCA.NodeToInfo.count(Node) == 0)
            for (const auto Succ : getChildren<false>(Node, SNCA.BatchUpdates))
              SuccOrder->try_emplace(Succ, 0);

        // Add mapping for all entries of SuccOrder.
        unsigned NodeNum = 0;
        for (const auto Node : nodes(DT.Parent)) {
          ++NodeNum;
          auto Order = SuccOrder->find(Node);
          if (Order != SuccOrder->end()) {
            assert(Order->second == 0);
            Order->second = NodeNum;
          }
        }
      };

      // Make another DFS pass over all other nodes to find the
      // reverse-unreachable blocks, and find the furthest paths we'll be able
      // to make.
      // Note that this looks N^2, but it's really 2N worst case, if every node
      // is unreachable. This is because we are still going to only visit each
      // unreachable node once, we may just visit it in two directions,
      // depending on how lucky we get.
      for (const NodePtr I : nodes(DT.Parent)) {
        if (SNCA.NodeToInfo.count(I) == 0) {
          LLVM_DEBUG(dbgs()
                     << "\t\t\tVisiting node " << BlockNamePrinter(I) << "\n");
          // Find the furthest away we can get by following successors, then
          // follow them in reverse.  This gives us some reasonable answer about
          // the post-dom tree inside any infinite loop. In particular, it
          // guarantees we get to the farthest away point along *some*
          // path. This also matches the GCC's behavior.
          // If we really wanted a totally complete picture of dominance inside
          // this infinite loop, we could do it with SCC-like algorithms to find
          // the lowest and highest points in the infinite loop.  In theory, it
          // would be nice to give the canonical backedge for the loop, but it's
          // expensive and does not always lead to a minimal set of roots.
          LLVM_DEBUG(dbgs() << "\t\t\tRunning forward DFS\n");

          if (!SuccOrder)
            InitSuccOrderOnce();
          assert(SuccOrder);

          const unsigned NewNum =
              SNCA.runDFS<true>(I, Num, AlwaysDescend, Num, &*SuccOrder);
          const NodePtr FurthestAway = SNCA.NumToNode[NewNum];
          LLVM_DEBUG(dbgs() << "\t\t\tFound a new furthest away node "
                            << "(non-trivial root): "
                            << BlockNamePrinter(FurthestAway) << "\n");
          Roots.push_back(FurthestAway);
          LLVM_DEBUG(dbgs() << "\t\t\tPrev DFSNum: " << Num << ", new DFSNum: "
                            << NewNum << "\n\t\t\tRemoving DFS info\n");
          for (unsigned i = NewNum; i > Num; --i) {
            const NodePtr N = SNCA.NumToNode[i];
            LLVM_DEBUG(dbgs() << "\t\t\t\tRemoving DFS info for "
                              << BlockNamePrinter(N) << "\n");
            SNCA.NodeToInfo.erase(N);
            SNCA.NumToNode.pop_back();
          }
          const unsigned PrevNum = Num;
          LLVM_DEBUG(dbgs() << "\t\t\tRunning reverse DFS\n");
          Num = SNCA.runDFS(FurthestAway, Num, AlwaysDescend, 1);
          for (unsigned i = PrevNum + 1; i <= Num; ++i)
            LLVM_DEBUG(dbgs() << "\t\t\t\tfound node "
                              << BlockNamePrinter(SNCA.NumToNode[i]) << "\n");
        }
      }
    }

    LLVM_DEBUG(dbgs() << "Total: " << Total << ", Num: " << Num << "\n");
    LLVM_DEBUG(dbgs() << "Discovered CFG nodes:\n");
    LLVM_DEBUG(for (size_t i = 0; i <= Num; ++i) dbgs()
               << i << ": " << BlockNamePrinter(SNCA.NumToNode[i]) << "\n");

    assert((Total + 1 == Num) && "Everything should have been visited");

    // Step #3: If we found some non-trivial roots, make them non-redundant.
    if (HasNonTrivialRoots) RemoveRedundantRoots(DT, BUI, Roots);

    LLVM_DEBUG(dbgs() << "Found roots: ");
    LLVM_DEBUG(for (auto *Root
                    : Roots) dbgs()
               << BlockNamePrinter(Root) << " ");
    LLVM_DEBUG(dbgs() << "\n");

    return Roots;
  }

  // This function only makes sense for postdominators.
  // We define roots to be some set of CFG nodes where (reverse) DFS walks have
  // to start in order to visit all the CFG nodes (including the
  // reverse-unreachable ones).
  // When the search for non-trivial roots is done it may happen that some of
  // the non-trivial roots are reverse-reachable from other non-trivial roots,
  // which makes them redundant. This function removes them from the set of
  // input roots.
  static void RemoveRedundantRoots(const DomTreeT &DT, BatchUpdatePtr BUI,
                                   RootsT &Roots) {
    assert(IsPostDom && "This function is for postdominators only");
    LLVM_DEBUG(dbgs() << "Removing redundant roots\n");

    SemiNCAInfo SNCA(BUI);

    for (unsigned i = 0; i < Roots.size(); ++i) {
      auto &Root = Roots[i];
      // Trivial roots are always non-redundant.
      if (!HasForwardSuccessors(Root, BUI)) continue;
      LLVM_DEBUG(dbgs() << "\tChecking if " << BlockNamePrinter(Root)
                        << " remains a root\n");
      SNCA.clear();
      // Do a forward walk looking for the other roots.
      const unsigned Num = SNCA.runDFS<true>(Root, 0, AlwaysDescend, 0);
      // Skip the start node and begin from the second one (note that DFS uses
      // 1-based indexing).
      for (unsigned x = 2; x <= Num; ++x) {
        const NodePtr N = SNCA.NumToNode[x];
        // If we wound another root in a (forward) DFS walk, remove the current
        // root from the set of roots, as it is reverse-reachable from the other
        // one.
        if (llvm::is_contained(Roots, N)) {
          LLVM_DEBUG(dbgs() << "\tForward DFS walk found another root "
                            << BlockNamePrinter(N) << "\n\tRemoving root "
                            << BlockNamePrinter(Root) << "\n");
          std::swap(Root, Roots.back());
          Roots.pop_back();

          // Root at the back takes the current root's place.
          // Start the next loop iteration with the same index.
          --i;
          break;
        }
      }
    }
  }

  template <typename DescendCondition>
  void doFullDFSWalk(const DomTreeT &DT, DescendCondition DC) {
    if (!IsPostDom) {
      assert(DT.Roots.size() == 1 && "Dominators should have a singe root");
      runDFS(DT.Roots[0], 0, DC, 0);
      return;
    }

    addVirtualRoot();
    unsigned Num = 1;
    for (const NodePtr Root : DT.Roots) Num = runDFS(Root, Num, DC, 0);
  }

  static void CalculateFromScratch(DomTreeT &DT, BatchUpdatePtr BUI) {
    auto *Parent = DT.Parent;
    DT.reset();
    DT.Parent = Parent;
    // If the update is using the actual CFG, BUI is null. If it's using a view,
    // BUI is non-null and the PreCFGView is used. When calculating from
    // scratch, make the PreViewCFG equal to the PostCFGView, so Post is used.
    BatchUpdatePtr PostViewBUI = nullptr;
    if (BUI && BUI->PostViewCFG) {
      BUI->PreViewCFG = *BUI->PostViewCFG;
      PostViewBUI = BUI;
    }
    // This is rebuilding the whole tree, not incrementally, but PostViewBUI is
    // used in case the caller needs a DT update with a CFGView.
    SemiNCAInfo SNCA(PostViewBUI);

    // Step #0: Number blocks in depth-first order and initialize variables used
    // in later stages of the algorithm.
    DT.Roots = FindRoots(DT, PostViewBUI);
    SNCA.doFullDFSWalk(DT, AlwaysDescend);

    SNCA.runSemiNCA(DT);
    if (BUI) {
      BUI->IsRecalculated = true;
      LLVM_DEBUG(
          dbgs() << "DomTree recalculated, skipping future batch updates\n");
    }

    if (DT.Roots.empty()) return;

    // Add a node for the root. If the tree is a PostDominatorTree it will be
    // the virtual exit (denoted by (BasicBlock *) nullptr) which postdominates
    // all real exits (including multiple exit blocks, infinite loops).
    NodePtr Root = IsPostDom ? nullptr : DT.Roots[0];

    DT.RootNode = DT.createNode(Root);
    SNCA.attachNewSubtree(DT, DT.RootNode);
  }

  void attachNewSubtree(DomTreeT& DT, const TreeNodePtr AttachTo) {
    // Attach the first unreachable block to AttachTo.
    NodeToInfo[NumToNode[1]].IDom = AttachTo->getBlock();
    // Loop over all of the discovered blocks in the function...
    for (size_t i = 1, e = NumToNode.size(); i != e; ++i) {
      NodePtr W = NumToNode[i];

      // Don't replace this with 'count', the insertion side effect is important
      if (DT.DomTreeNodes[W]) continue;  // Haven't calculated this node yet?

      NodePtr ImmDom = getIDom(W);

      // Get or calculate the node for the immediate dominator.
      TreeNodePtr IDomNode = getNodeForBlock(ImmDom, DT);

      // Add a new tree node for this BasicBlock, and link it as a child of
      // IDomNode.
      DT.createChild(W, IDomNode);
    }
  }

  void reattachExistingSubtree(DomTreeT &DT, const TreeNodePtr AttachTo) {
    NodeToInfo[NumToNode[1]].IDom = AttachTo->getBlock();
    for (size_t i = 1, e = NumToNode.size(); i != e; ++i) {
      const NodePtr N = NumToNode[i];
      const TreeNodePtr TN = DT.getNode(N);
      assert(TN);
      const TreeNodePtr NewIDom = DT.getNode(NodeToInfo[N].IDom);
      TN->setIDom(NewIDom);
    }
  }

  // Helper struct used during edge insertions.
  struct InsertionInfo {
    struct Compare {
      bool operator()(TreeNodePtr LHS, TreeNodePtr RHS) const {
        return LHS->getLevel() < RHS->getLevel();
      }
    };

    // Bucket queue of tree nodes ordered by descending level. For simplicity,
    // we use a priority_queue here.
    std::priority_queue<TreeNodePtr, SmallVector<TreeNodePtr, 8>,
                        Compare>
        Bucket;
    SmallDenseSet<TreeNodePtr, 8> Visited;
    SmallVector<TreeNodePtr, 8> Affected;
#ifdef LLVM_ENABLE_ABI_BREAKING_CHECKS
    SmallVector<TreeNodePtr, 8> VisitedUnaffected;
#endif
  };

  static void InsertEdge(DomTreeT &DT, const BatchUpdatePtr BUI,
                         const NodePtr From, const NodePtr To) {
    assert((From || IsPostDom) &&
           "From has to be a valid CFG node or a virtual root");
    assert(To && "Cannot be a nullptr");
    LLVM_DEBUG(dbgs() << "Inserting edge " << BlockNamePrinter(From) << " -> "
                      << BlockNamePrinter(To) << "\n");
    TreeNodePtr FromTN = DT.getNode(From);

    if (!FromTN) {
      // Ignore edges from unreachable nodes for (forward) dominators.
      if (!IsPostDom) return;

      // The unreachable node becomes a new root -- a tree node for it.
      TreeNodePtr VirtualRoot = DT.getNode(nullptr);
      FromTN = DT.createChild(From, VirtualRoot);
      DT.Roots.push_back(From);
    }

    DT.DFSInfoValid = false;

    const TreeNodePtr ToTN = DT.getNode(To);
    if (!ToTN)
      InsertUnreachable(DT, BUI, FromTN, To);
    else
      InsertReachable(DT, BUI, FromTN, ToTN);
  }

  // Determines if some existing root becomes reverse-reachable after the
  // insertion. Rebuilds the whole tree if that situation happens.
  static bool UpdateRootsBeforeInsertion(DomTreeT &DT, const BatchUpdatePtr BUI,
                                         const TreeNodePtr From,
                                         const TreeNodePtr To) {
    assert(IsPostDom && "This function is only for postdominators");
    // Destination node is not attached to the virtual root, so it cannot be a
    // root.
    if (!DT.isVirtualRoot(To->getIDom())) return false;

    if (!llvm::is_contained(DT.Roots, To->getBlock()))
      return false;  // To is not a root, nothing to update.

    LLVM_DEBUG(dbgs() << "\t\tAfter the insertion, " << BlockNamePrinter(To)
                      << " is no longer a root\n\t\tRebuilding the tree!!!\n");

    CalculateFromScratch(DT, BUI);
    return true;
  }

  static bool isPermutation(const SmallVectorImpl<NodePtr> &A,
                            const SmallVectorImpl<NodePtr> &B) {
    if (A.size() != B.size())
      return false;
    SmallPtrSet<NodePtr, 4> Set(A.begin(), A.end());
    for (NodePtr N : B)
      if (Set.count(N) == 0)
        return false;
    return true;
  }

  // Updates the set of roots after insertion or deletion. This ensures that
  // roots are the same when after a series of updates and when the tree would
  // be built from scratch.
  static void UpdateRootsAfterUpdate(DomTreeT &DT, const BatchUpdatePtr BUI) {
    assert(IsPostDom && "This function is only for postdominators");

    // The tree has only trivial roots -- nothing to update.
    if (llvm::none_of(DT.Roots, [BUI](const NodePtr N) {
          return HasForwardSuccessors(N, BUI);
        }))
      return;

    // Recalculate the set of roots.
    RootsT Roots = FindRoots(DT, BUI);
    if (!isPermutation(DT.Roots, Roots)) {
      // The roots chosen in the CFG have changed. This is because the
      // incremental algorithm does not really know or use the set of roots and
      // can make a different (implicit) decision about which node within an
      // infinite loop becomes a root.

      LLVM_DEBUG(dbgs() << "Roots are different in updated trees\n"
                        << "The entire tree needs to be rebuilt\n");
      // It may be possible to update the tree without recalculating it, but
      // we do not know yet how to do it, and it happens rarely in practice.
      CalculateFromScratch(DT, BUI);
    }
  }

  // Handles insertion to a node already in the dominator tree.
  static void InsertReachable(DomTreeT &DT, const BatchUpdatePtr BUI,
                              const TreeNodePtr From, const TreeNodePtr To) {
    LLVM_DEBUG(dbgs() << "\tReachable " << BlockNamePrinter(From->getBlock())
                      << " -> " << BlockNamePrinter(To->getBlock()) << "\n");
    if (IsPostDom && UpdateRootsBeforeInsertion(DT, BUI, From, To)) return;
    // DT.findNCD expects both pointers to be valid. When From is a virtual
    // root, then its CFG block pointer is a nullptr, so we have to 'compute'
    // the NCD manually.
    const NodePtr NCDBlock =
        (From->getBlock() && To->getBlock())
            ? DT.findNearestCommonDominator(From->getBlock(), To->getBlock())
            : nullptr;
    assert(NCDBlock || DT.isPostDominator());
    const TreeNodePtr NCD = DT.getNode(NCDBlock);
    assert(NCD);

    LLVM_DEBUG(dbgs() << "\t\tNCA == " << BlockNamePrinter(NCD) << "\n");
    const unsigned NCDLevel = NCD->getLevel();

    // Based on Lemma 2.5 from [2], after insertion of (From,To), v is affected
    // iff depth(NCD)+1 < depth(v) && a path P from To to v exists where every
    // w on P s.t. depth(v) <= depth(w)
    //
    // This reduces to a widest path problem (maximizing the depth of the
    // minimum vertex in the path) which can be solved by a modified version of
    // Dijkstra with a bucket queue (named depth-based search in [2]).

    // To is in the path, so depth(NCD)+1 < depth(v) <= depth(To). Nothing
    // affected if this does not hold.
    if (NCDLevel + 1 >= To->getLevel())
      return;

    InsertionInfo II;
    SmallVector<TreeNodePtr, 8> UnaffectedOnCurrentLevel;
    II.Bucket.push(To);
    II.Visited.insert(To);

    while (!II.Bucket.empty()) {
      TreeNodePtr TN = II.Bucket.top();
      II.Bucket.pop();
      II.Affected.push_back(TN);

      const unsigned CurrentLevel = TN->getLevel();
      LLVM_DEBUG(dbgs() << "Mark " << BlockNamePrinter(TN) <<
                 "as affected, CurrentLevel " << CurrentLevel << "\n");

      assert(TN->getBlock() && II.Visited.count(TN) && "Preconditions!");

      while (true) {
        // Unlike regular Dijkstra, we have an inner loop to expand more
        // vertices. The first iteration is for the (affected) vertex popped
        // from II.Bucket and the rest are for vertices in
        // UnaffectedOnCurrentLevel, which may eventually expand to affected
        // vertices.
        //
        // Invariant: there is an optimal path from `To` to TN with the minimum
        // depth being CurrentLevel.
        for (const NodePtr Succ : getChildren<IsPostDom>(TN->getBlock(), BUI)) {
          const TreeNodePtr SuccTN = DT.getNode(Succ);
          assert(SuccTN &&
                 "Unreachable successor found at reachable insertion");
          const unsigned SuccLevel = SuccTN->getLevel();

          LLVM_DEBUG(dbgs() << "\tSuccessor " << BlockNamePrinter(Succ)
                            << ", level = " << SuccLevel << "\n");

          // There is an optimal path from `To` to Succ with the minimum depth
          // being min(CurrentLevel, SuccLevel).
          //
          // If depth(NCD)+1 < depth(Succ) is not satisfied, Succ is unaffected
          // and no affected vertex may be reached by a path passing through it.
          // Stop here. Also, Succ may be visited by other predecessors but the
          // first visit has the optimal path. Stop if Succ has been visited.
          if (SuccLevel <= NCDLevel + 1 || !II.Visited.insert(SuccTN).second)
            continue;

          if (SuccLevel > CurrentLevel) {
            // Succ is unaffected but it may (transitively) expand to affected
            // vertices. Store it in UnaffectedOnCurrentLevel.
            LLVM_DEBUG(dbgs() << "\t\tMarking visited not affected "
                              << BlockNamePrinter(Succ) << "\n");
            UnaffectedOnCurrentLevel.push_back(SuccTN);
#ifndef NDEBUG
            II.VisitedUnaffected.push_back(SuccTN);
#endif
          } else {
            // The condition is satisfied (Succ is affected). Add Succ to the
            // bucket queue.
            LLVM_DEBUG(dbgs() << "\t\tAdd " << BlockNamePrinter(Succ)
                              << " to a Bucket\n");
            II.Bucket.push(SuccTN);
          }
        }

        if (UnaffectedOnCurrentLevel.empty())
          break;
        TN = UnaffectedOnCurrentLevel.pop_back_val();
        LLVM_DEBUG(dbgs() << " Next: " << BlockNamePrinter(TN) << "\n");
      }
    }

    // Finish by updating immediate dominators and levels.
    UpdateInsertion(DT, BUI, NCD, II);
  }

  // Updates immediate dominators and levels after insertion.
  static void UpdateInsertion(DomTreeT &DT, const BatchUpdatePtr BUI,
                              const TreeNodePtr NCD, InsertionInfo &II) {
    LLVM_DEBUG(dbgs() << "Updating NCD = " << BlockNamePrinter(NCD) << "\n");

    for (const TreeNodePtr TN : II.Affected) {
      LLVM_DEBUG(dbgs() << "\tIDom(" << BlockNamePrinter(TN)
                        << ") = " << BlockNamePrinter(NCD) << "\n");
      TN->setIDom(NCD);
    }

#if defined(LLVM_ENABLE_ABI_BREAKING_CHECKS) && !defined(NDEBUG)
    for (const TreeNodePtr TN : II.VisitedUnaffected)
      assert(TN->getLevel() == TN->getIDom()->getLevel() + 1 &&
             "TN should have been updated by an affected ancestor");
#endif

    if (IsPostDom) UpdateRootsAfterUpdate(DT, BUI);
  }

  // Handles insertion to previously unreachable nodes.
  static void InsertUnreachable(DomTreeT &DT, const BatchUpdatePtr BUI,
                                const TreeNodePtr From, const NodePtr To) {
    LLVM_DEBUG(dbgs() << "Inserting " << BlockNamePrinter(From)
                      << " -> (unreachable) " << BlockNamePrinter(To) << "\n");

    // Collect discovered edges to already reachable nodes.
    SmallVector<std::pair<NodePtr, TreeNodePtr>, 8> DiscoveredEdgesToReachable;
    // Discover and connect nodes that became reachable with the insertion.
    ComputeUnreachableDominators(DT, BUI, To, From, DiscoveredEdgesToReachable);

    LLVM_DEBUG(dbgs() << "Inserted " << BlockNamePrinter(From)
                      << " -> (prev unreachable) " << BlockNamePrinter(To)
                      << "\n");

    // Used the discovered edges and inset discovered connecting (incoming)
    // edges.
    for (const auto &Edge : DiscoveredEdgesToReachable) {
      LLVM_DEBUG(dbgs() << "\tInserting discovered connecting edge "
                        << BlockNamePrinter(Edge.first) << " -> "
                        << BlockNamePrinter(Edge.second) << "\n");
      InsertReachable(DT, BUI, DT.getNode(Edge.first), Edge.second);
    }
  }

  // Connects nodes that become reachable with an insertion.
  static void ComputeUnreachableDominators(
      DomTreeT &DT, const BatchUpdatePtr BUI, const NodePtr Root,
      const TreeNodePtr Incoming,
      SmallVectorImpl<std::pair<NodePtr, TreeNodePtr>>
          &DiscoveredConnectingEdges) {
    assert(!DT.getNode(Root) && "Root must not be reachable");

    // Visit only previously unreachable nodes.
    auto UnreachableDescender = [&DT, &DiscoveredConnectingEdges](NodePtr From,
                                                                  NodePtr To) {
      const TreeNodePtr ToTN = DT.getNode(To);
      if (!ToTN) return true;

      DiscoveredConnectingEdges.push_back({From, ToTN});
      return false;
    };

    SemiNCAInfo SNCA(BUI);
    SNCA.runDFS(Root, 0, UnreachableDescender, 0);
    SNCA.runSemiNCA(DT);
    SNCA.attachNewSubtree(DT, Incoming);

    LLVM_DEBUG(dbgs() << "After adding unreachable nodes\n");
  }

  static void DeleteEdge(DomTreeT &DT, const BatchUpdatePtr BUI,
                         const NodePtr From, const NodePtr To) {
    assert(From && To && "Cannot disconnect nullptrs");
    LLVM_DEBUG(dbgs() << "Deleting edge " << BlockNamePrinter(From) << " -> "
                      << BlockNamePrinter(To) << "\n");

#ifdef LLVM_ENABLE_ABI_BREAKING_CHECKS
    // Ensure that the edge was in fact deleted from the CFG before informing
    // the DomTree about it.
    // The check is O(N), so run it only in debug configuration.
    auto IsSuccessor = [BUI](const NodePtr SuccCandidate, const NodePtr Of) {
      auto Successors = getChildren<IsPostDom>(Of, BUI);
      return llvm::is_contained(Successors, SuccCandidate);
    };
    (void)IsSuccessor;
    assert(!IsSuccessor(To, From) && "Deleted edge still exists in the CFG!");
#endif

    const TreeNodePtr FromTN = DT.getNode(From);
    // Deletion in an unreachable subtree -- nothing to do.
    if (!FromTN) return;

    const TreeNodePtr ToTN = DT.getNode(To);
    if (!ToTN) {
      LLVM_DEBUG(
          dbgs() << "\tTo (" << BlockNamePrinter(To)
                 << ") already unreachable -- there is no edge to delete\n");
      return;
    }

    const NodePtr NCDBlock = DT.findNearestCommonDominator(From, To);
    const TreeNodePtr NCD = DT.getNode(NCDBlock);

    // If To dominates From -- nothing to do.
    if (ToTN != NCD) {
      DT.DFSInfoValid = false;

      const TreeNodePtr ToIDom = ToTN->getIDom();
      LLVM_DEBUG(dbgs() << "\tNCD " << BlockNamePrinter(NCD) << ", ToIDom "
                        << BlockNamePrinter(ToIDom) << "\n");

      // To remains reachable after deletion.
      // (Based on the caption under Figure 4. from [2].)
      if (FromTN != ToIDom || HasProperSupport(DT, BUI, ToTN))
        DeleteReachable(DT, BUI, FromTN, ToTN);
      else
        DeleteUnreachable(DT, BUI, ToTN);
    }

    if (IsPostDom) UpdateRootsAfterUpdate(DT, BUI);
  }

  // Handles deletions that leave destination nodes reachable.
  static void DeleteReachable(DomTreeT &DT, const BatchUpdatePtr BUI,
                              const TreeNodePtr FromTN,
                              const TreeNodePtr ToTN) {
    LLVM_DEBUG(dbgs() << "Deleting reachable " << BlockNamePrinter(FromTN)
                      << " -> " << BlockNamePrinter(ToTN) << "\n");
    LLVM_DEBUG(dbgs() << "\tRebuilding subtree\n");

    // Find the top of the subtree that needs to be rebuilt.
    // (Based on the lemma 2.6 from [2].)
    const NodePtr ToIDom =
        DT.findNearestCommonDominator(FromTN->getBlock(), ToTN->getBlock());
    assert(ToIDom || DT.isPostDominator());
    const TreeNodePtr ToIDomTN = DT.getNode(ToIDom);
    assert(ToIDomTN);
    const TreeNodePtr PrevIDomSubTree = ToIDomTN->getIDom();
    // Top of the subtree to rebuild is the root node. Rebuild the tree from
    // scratch.
    if (!PrevIDomSubTree) {
      LLVM_DEBUG(dbgs() << "The entire tree needs to be rebuilt\n");
      CalculateFromScratch(DT, BUI);
      return;
    }

    // Only visit nodes in the subtree starting at To.
    const unsigned Level = ToIDomTN->getLevel();
    auto DescendBelow = [Level, &DT](NodePtr, NodePtr To) {
      return DT.getNode(To)->getLevel() > Level;
    };

    LLVM_DEBUG(dbgs() << "\tTop of subtree: " << BlockNamePrinter(ToIDomTN)
                      << "\n");

    SemiNCAInfo SNCA(BUI);
    SNCA.runDFS(ToIDom, 0, DescendBelow, 0);
    LLVM_DEBUG(dbgs() << "\tRunning Semi-NCA\n");
    SNCA.runSemiNCA(DT, Level);
    SNCA.reattachExistingSubtree(DT, PrevIDomSubTree);
  }

  // Checks if a node has proper support, as defined on the page 3 and later
  // explained on the page 7 of [2].
  static bool HasProperSupport(DomTreeT &DT, const BatchUpdatePtr BUI,
                               const TreeNodePtr TN) {
    LLVM_DEBUG(dbgs() << "IsReachableFromIDom " << BlockNamePrinter(TN)
                      << "\n");
    auto TNB = TN->getBlock();
    for (const NodePtr Pred : getChildren<!IsPostDom>(TNB, BUI)) {
      LLVM_DEBUG(dbgs() << "\tPred " << BlockNamePrinter(Pred) << "\n");
      if (!DT.getNode(Pred)) continue;

      const NodePtr Support = DT.findNearestCommonDominator(TNB, Pred);
      LLVM_DEBUG(dbgs() << "\tSupport " << BlockNamePrinter(Support) << "\n");
      if (Support != TNB) {
        LLVM_DEBUG(dbgs() << "\t" << BlockNamePrinter(TN)
                          << " is reachable from support "
                          << BlockNamePrinter(Support) << "\n");
        return true;
      }
    }

    return false;
  }

  // Handle deletions that make destination node unreachable.
  // (Based on the lemma 2.7 from the [2].)
  static void DeleteUnreachable(DomTreeT &DT, const BatchUpdatePtr BUI,
                                const TreeNodePtr ToTN) {
    LLVM_DEBUG(dbgs() << "Deleting unreachable subtree "
                      << BlockNamePrinter(ToTN) << "\n");
    assert(ToTN);
    assert(ToTN->getBlock());

    if (IsPostDom) {
      // Deletion makes a region reverse-unreachable and creates a new root.
      // Simulate that by inserting an edge from the virtual root to ToTN and
      // adding it as a new root.
      LLVM_DEBUG(dbgs() << "\tDeletion made a region reverse-unreachable\n");
      LLVM_DEBUG(dbgs() << "\tAdding new root " << BlockNamePrinter(ToTN)
                        << "\n");
      DT.Roots.push_back(ToTN->getBlock());
      InsertReachable(DT, BUI, DT.getNode(nullptr), ToTN);
      return;
    }

    SmallVector<NodePtr, 16> AffectedQueue;
    const unsigned Level = ToTN->getLevel();

    // Traverse destination node's descendants with greater level in the tree
    // and collect visited nodes.
    auto DescendAndCollect = [Level, &AffectedQueue, &DT](NodePtr, NodePtr To) {
      const TreeNodePtr TN = DT.getNode(To);
      assert(TN);
      if (TN->getLevel() > Level) return true;
      if (!llvm::is_contained(AffectedQueue, To))
        AffectedQueue.push_back(To);

      return false;
    };

    SemiNCAInfo SNCA(BUI);
    unsigned LastDFSNum =
        SNCA.runDFS(ToTN->getBlock(), 0, DescendAndCollect, 0);

    TreeNodePtr MinNode = ToTN;

    // Identify the top of the subtree to rebuild by finding the NCD of all
    // the affected nodes.
    for (const NodePtr N : AffectedQueue) {
      const TreeNodePtr TN = DT.getNode(N);
      const NodePtr NCDBlock =
          DT.findNearestCommonDominator(TN->getBlock(), ToTN->getBlock());
      assert(NCDBlock || DT.isPostDominator());
      const TreeNodePtr NCD = DT.getNode(NCDBlock);
      assert(NCD);

      LLVM_DEBUG(dbgs() << "Processing affected node " << BlockNamePrinter(TN)
                        << " with NCD = " << BlockNamePrinter(NCD)
                        << ", MinNode =" << BlockNamePrinter(MinNode) << "\n");
      if (NCD != TN && NCD->getLevel() < MinNode->getLevel()) MinNode = NCD;
    }

    // Root reached, rebuild the whole tree from scratch.
    if (!MinNode->getIDom()) {
      LLVM_DEBUG(dbgs() << "The entire tree needs to be rebuilt\n");
      CalculateFromScratch(DT, BUI);
      return;
    }

    // Erase the unreachable subtree in reverse preorder to process all children
    // before deleting their parent.
    for (unsigned i = LastDFSNum; i > 0; --i) {
      const NodePtr N = SNCA.NumToNode[i];
      const TreeNodePtr TN = DT.getNode(N);
      LLVM_DEBUG(dbgs() << "Erasing node " << BlockNamePrinter(TN) << "\n");

      EraseNode(DT, TN);
    }

    // The affected subtree start at the To node -- there's no extra work to do.
    if (MinNode == ToTN) return;

    LLVM_DEBUG(dbgs() << "DeleteUnreachable: running DFS with MinNode = "
                      << BlockNamePrinter(MinNode) << "\n");
    const unsigned MinLevel = MinNode->getLevel();
    const TreeNodePtr PrevIDom = MinNode->getIDom();
    assert(PrevIDom);
    SNCA.clear();

    // Identify nodes that remain in the affected subtree.
    auto DescendBelow = [MinLevel, &DT](NodePtr, NodePtr To) {
      const TreeNodePtr ToTN = DT.getNode(To);
      return ToTN && ToTN->getLevel() > MinLevel;
    };
    SNCA.runDFS(MinNode->getBlock(), 0, DescendBelow, 0);

    LLVM_DEBUG(dbgs() << "Previous IDom(MinNode) = "
                      << BlockNamePrinter(PrevIDom) << "\nRunning Semi-NCA\n");

    // Rebuild the remaining part of affected subtree.
    SNCA.runSemiNCA(DT, MinLevel);
    SNCA.reattachExistingSubtree(DT, PrevIDom);
  }

  // Removes leaf tree nodes from the dominator tree.
  static void EraseNode(DomTreeT &DT, const TreeNodePtr TN) {
    assert(TN);
    assert(TN->getNumChildren() == 0 && "Not a tree leaf");

    const TreeNodePtr IDom = TN->getIDom();
    assert(IDom);

    auto ChIt = llvm::find(IDom->Children, TN);
    assert(ChIt != IDom->Children.end());
    std::swap(*ChIt, IDom->Children.back());
    IDom->Children.pop_back();

    DT.DomTreeNodes.erase(TN->getBlock());
  }

  //~~
  //===--------------------- DomTree Batch Updater --------------------------===
  //~~

  static void ApplyUpdates(DomTreeT &DT, GraphDiffT &PreViewCFG,
                           GraphDiffT *PostViewCFG) {
    // Note: the PostViewCFG is only used when computing from scratch. It's data
    // should already included in the PreViewCFG for incremental updates.
    const size_t NumUpdates = PreViewCFG.getNumLegalizedUpdates();
    if (NumUpdates == 0)
      return;

    // Take the fast path for a single update and avoid running the batch update
    // machinery.
    if (NumUpdates == 1) {
      UpdateT Update = PreViewCFG.popUpdateForIncrementalUpdates();
      if (!PostViewCFG) {
        if (Update.getKind() == UpdateKind::Insert)
          InsertEdge(DT, /*BUI=*/nullptr, Update.getFrom(), Update.getTo());
        else
          DeleteEdge(DT, /*BUI=*/nullptr, Update.getFrom(), Update.getTo());
      } else {
        BatchUpdateInfo BUI(*PostViewCFG, PostViewCFG);
        if (Update.getKind() == UpdateKind::Insert)
          InsertEdge(DT, &BUI, Update.getFrom(), Update.getTo());
        else
          DeleteEdge(DT, &BUI, Update.getFrom(), Update.getTo());
      }
      return;
    }

    BatchUpdateInfo BUI(PreViewCFG, PostViewCFG);
    // Recalculate the DominatorTree when the number of updates
    // exceeds a threshold, which usually makes direct updating slower than
    // recalculation. We select this threshold proportional to the
    // size of the DominatorTree. The constant is selected
    // by choosing the one with an acceptable performance on some real-world
    // inputs.

    // Make unittests of the incremental algorithm work
    if (DT.DomTreeNodes.size() <= 100) {
      if (BUI.NumLegalized > DT.DomTreeNodes.size())
        CalculateFromScratch(DT, &BUI);
    } else if (BUI.NumLegalized > DT.DomTreeNodes.size() / 40)
      CalculateFromScratch(DT, &BUI);

    // If the DominatorTree was recalculated at some point, stop the batch
    // updates. Full recalculations ignore batch updates and look at the actual
    // CFG.
    for (size_t i = 0; i < BUI.NumLegalized && !BUI.IsRecalculated; ++i)
      ApplyNextUpdate(DT, BUI);
  }

  static void ApplyNextUpdate(DomTreeT &DT, BatchUpdateInfo &BUI) {
    // Popping the next update, will move the PreViewCFG to the next snapshot.
    UpdateT CurrentUpdate = BUI.PreViewCFG.popUpdateForIncrementalUpdates();
#if 0
    // FIXME: The LLVM_DEBUG macro only plays well with a modular
    // build of LLVM when the header is marked as textual, but doing
    // so causes redefinition errors.
    LLVM_DEBUG(dbgs() << "Applying update: ");
    LLVM_DEBUG(CurrentUpdate.dump(); dbgs() << "\n");
#endif

    if (CurrentUpdate.getKind() == UpdateKind::Insert)
      InsertEdge(DT, &BUI, CurrentUpdate.getFrom(), CurrentUpdate.getTo());
    else
      DeleteEdge(DT, &BUI, CurrentUpdate.getFrom(), CurrentUpdate.getTo());
  }

  //~~
  //===--------------- DomTree correctness verification ---------------------===
  //~~

  // Check if the tree has correct roots. A DominatorTree always has a single
  // root which is the function's entry node. A PostDominatorTree can have
  // multiple roots - one for each node with no successors and for infinite
  // loops.
  // Running time: O(N).
  bool verifyRoots(const DomTreeT &DT) {
    if (!DT.Parent && !DT.Roots.empty()) {
      errs() << "Tree has no parent but has roots!\n";
      errs().flush();
      return false;
    }

    if (!IsPostDom) {
      if (DT.Roots.empty()) {
        errs() << "Tree doesn't have a root!\n";
        errs().flush();
        return false;
      }

      if (DT.getRoot() != GetEntryNode(DT)) {
        errs() << "Tree's root is not its parent's entry node!\n";
        errs().flush();
        return false;
      }
    }

    RootsT ComputedRoots = FindRoots(DT, nullptr);
    if (!isPermutation(DT.Roots, ComputedRoots)) {
      errs() << "Tree has different roots than freshly computed ones!\n";
      errs() << "\tPDT roots: ";
      for (const NodePtr N : DT.Roots) errs() << BlockNamePrinter(N) << ", ";
      errs() << "\n\tComputed roots: ";
      for (const NodePtr N : ComputedRoots)
        errs() << BlockNamePrinter(N) << ", ";
      errs() << "\n";
      errs().flush();
      return false;
    }

    return true;
  }

  // Checks if the tree contains all reachable nodes in the input graph.
  // Running time: O(N).
  bool verifyReachability(const DomTreeT &DT) {
    clear();
    doFullDFSWalk(DT, AlwaysDescend);

    for (auto &NodeToTN : DT.DomTreeNodes) {
      const TreeNodePtr TN = NodeToTN.second.get();
      const NodePtr BB = TN->getBlock();

      // Virtual root has a corresponding virtual CFG node.
      if (DT.isVirtualRoot(TN)) continue;

      if (NodeToInfo.count(BB) == 0) {
        errs() << "DomTree node " << BlockNamePrinter(BB)
               << " not found by DFS walk!\n";
        errs().flush();

        return false;
      }
    }

    for (const NodePtr N : NumToNode) {
      if (N && !DT.getNode(N)) {
        errs() << "CFG node " << BlockNamePrinter(N)
               << " not found in the DomTree!\n";
        errs().flush();

        return false;
      }
    }

    return true;
  }

  // Check if for every parent with a level L in the tree all of its children
  // have level L + 1.
  // Running time: O(N).
  static bool VerifyLevels(const DomTreeT &DT) {
    for (auto &NodeToTN : DT.DomTreeNodes) {
      const TreeNodePtr TN = NodeToTN.second.get();
      const NodePtr BB = TN->getBlock();
      if (!BB) continue;

      const TreeNodePtr IDom = TN->getIDom();
      if (!IDom && TN->getLevel() != 0) {
        errs() << "Node without an IDom " << BlockNamePrinter(BB)
               << " has a nonzero level " << TN->getLevel() << "!\n";
        errs().flush();

        return false;
      }

      if (IDom && TN->getLevel() != IDom->getLevel() + 1) {
        errs() << "Node " << BlockNamePrinter(BB) << " has level "
               << TN->getLevel() << " while its IDom "
               << BlockNamePrinter(IDom->getBlock()) << " has level "
               << IDom->getLevel() << "!\n";
        errs().flush();

        return false;
      }
    }

    return true;
  }

  // Check if the computed DFS numbers are correct. Note that DFS info may not
  // be valid, and when that is the case, we don't verify the numbers.
  // Running time: O(N log(N)).
  static bool VerifyDFSNumbers(const DomTreeT &DT) {
    if (!DT.DFSInfoValid || !DT.Parent)
      return true;

    const NodePtr RootBB = IsPostDom ? nullptr : *DT.root_begin();
    const TreeNodePtr Root = DT.getNode(RootBB);

    auto PrintNodeAndDFSNums = [](const TreeNodePtr TN) {
      errs() << BlockNamePrinter(TN) << " {" << TN->getDFSNumIn() << ", "
             << TN->getDFSNumOut() << '}';
    };

    // Verify the root's DFS In number. Although DFS numbering would also work
    // if we started from some other value, we assume 0-based numbering.
    if (Root->getDFSNumIn() != 0) {
      errs() << "DFSIn number for the tree root is not:\n\t";
      PrintNodeAndDFSNums(Root);
      errs() << '\n';
      errs().flush();
      return false;
    }

    // For each tree node verify if children's DFS numbers cover their parent's
    // DFS numbers with no gaps.
    for (const auto &NodeToTN : DT.DomTreeNodes) {
      const TreeNodePtr Node = NodeToTN.second.get();

      // Handle tree leaves.
      if (Node->isLeaf()) {
        if (Node->getDFSNumIn() + 1 != Node->getDFSNumOut()) {
          errs() << "Tree leaf should have DFSOut = DFSIn + 1:\n\t";
          PrintNodeAndDFSNums(Node);
          errs() << '\n';
          errs().flush();
          return false;
        }

        continue;
      }

      // Make a copy and sort it such that it is possible to check if there are
      // no gaps between DFS numbers of adjacent children.
      SmallVector<TreeNodePtr, 8> Children(Node->begin(), Node->end());
      llvm::sort(Children, [](const TreeNodePtr Ch1, const TreeNodePtr Ch2) {
        return Ch1->getDFSNumIn() < Ch2->getDFSNumIn();
      });

      auto PrintChildrenError = [Node, &Children, PrintNodeAndDFSNums](
          const TreeNodePtr FirstCh, const TreeNodePtr SecondCh) {
        assert(FirstCh);

        errs() << "Incorrect DFS numbers for:\n\tParent ";
        PrintNodeAndDFSNums(Node);

        errs() << "\n\tChild ";
        PrintNodeAndDFSNums(FirstCh);

        if (SecondCh) {
          errs() << "\n\tSecond child ";
          PrintNodeAndDFSNums(SecondCh);
        }

        errs() << "\nAll children: ";
        for (const TreeNodePtr Ch : Children) {
          PrintNodeAndDFSNums(Ch);
          errs() << ", ";
        }

        errs() << '\n';
        errs().flush();
      };

      if (Children.front()->getDFSNumIn() != Node->getDFSNumIn() + 1) {
        PrintChildrenError(Children.front(), nullptr);
        return false;
      }

      if (Children.back()->getDFSNumOut() + 1 != Node->getDFSNumOut()) {
        PrintChildrenError(Children.back(), nullptr);
        return false;
      }

      for (size_t i = 0, e = Children.size() - 1; i != e; ++i) {
        if (Children[i]->getDFSNumOut() + 1 != Children[i + 1]->getDFSNumIn()) {
          PrintChildrenError(Children[i], Children[i + 1]);
          return false;
        }
      }
    }

    return true;
  }

  // The below routines verify the correctness of the dominator tree relative to
  // the CFG it's coming from.  A tree is a dominator tree iff it has two
  // properties, called the parent property and the sibling property.  Tarjan
  // and Lengauer prove (but don't explicitly name) the properties as part of
  // the proofs in their 1972 paper, but the proofs are mostly part of proving
  // things about semidominators and idoms, and some of them are simply asserted
  // based on even earlier papers (see, e.g., lemma 2).  Some papers refer to
  // these properties as "valid" and "co-valid".  See, e.g., "Dominators,
  // directed bipolar orders, and independent spanning trees" by Loukas
  // Georgiadis and Robert E. Tarjan, as well as "Dominator Tree Verification
  // and Vertex-Disjoint Paths " by the same authors.

  // A very simple and direct explanation of these properties can be found in
  // "An Experimental Study of Dynamic Dominators", found at
  // https://arxiv.org/abs/1604.02711

  // The easiest way to think of the parent property is that it's a requirement
  // of being a dominator.  Let's just take immediate dominators.  For PARENT to
  // be an immediate dominator of CHILD, all paths in the CFG must go through
  // PARENT before they hit CHILD.  This implies that if you were to cut PARENT
  // out of the CFG, there should be no paths to CHILD that are reachable.  If
  // there are, then you now have a path from PARENT to CHILD that goes around
  // PARENT and still reaches CHILD, which by definition, means PARENT can't be
  // a dominator of CHILD (let alone an immediate one).

  // The sibling property is similar.  It says that for each pair of sibling
  // nodes in the dominator tree (LEFT and RIGHT) , they must not dominate each
  // other.  If sibling LEFT dominated sibling RIGHT, it means there are no
  // paths in the CFG from sibling LEFT to sibling RIGHT that do not go through
  // LEFT, and thus, LEFT is really an ancestor (in the dominator tree) of
  // RIGHT, not a sibling.

  // It is possible to verify the parent and sibling properties in linear time,
  // but the algorithms are complex. Instead, we do it in a straightforward
  // N^2 and N^3 way below, using direct path reachability.

  // Checks if the tree has the parent property: if for all edges from V to W in
  // the input graph, such that V is reachable, the parent of W in the tree is
  // an ancestor of V in the tree.
  // Running time: O(N^2).
  //
  // This means that if a node gets disconnected from the graph, then all of
  // the nodes it dominated previously will now become unreachable.
  bool verifyParentProperty(const DomTreeT &DT) {
    for (auto &NodeToTN : DT.DomTreeNodes) {
      const TreeNodePtr TN = NodeToTN.second.get();
      const NodePtr BB = TN->getBlock();
      if (!BB || TN->isLeaf())
        continue;

      LLVM_DEBUG(dbgs() << "Verifying parent property of node "
                        << BlockNamePrinter(TN) << "\n");
      clear();
      doFullDFSWalk(DT, [BB](NodePtr From, NodePtr To) {
        return From != BB && To != BB;
      });

      for (TreeNodePtr Child : TN->children())
        if (NodeToInfo.count(Child->getBlock()) != 0) {
          errs() << "Child " << BlockNamePrinter(Child)
                 << " reachable after its parent " << BlockNamePrinter(BB)
                 << " is removed!\n";
          errs().flush();

          return false;
        }
    }

    return true;
  }

  // Check if the tree has sibling property: if a node V does not dominate a
  // node W for all siblings V and W in the tree.
  // Running time: O(N^3).
  //
  // This means that if a node gets disconnected from the graph, then all of its
  // siblings will now still be reachable.
  bool verifySiblingProperty(const DomTreeT &DT) {
    for (auto &NodeToTN : DT.DomTreeNodes) {
      const TreeNodePtr TN = NodeToTN.second.get();
      const NodePtr BB = TN->getBlock();
      if (!BB || TN->isLeaf())
        continue;

      for (const TreeNodePtr N : TN->children()) {
        clear();
        NodePtr BBN = N->getBlock();
        doFullDFSWalk(DT, [BBN](NodePtr From, NodePtr To) {
          return From != BBN && To != BBN;
        });

        for (const TreeNodePtr S : TN->children()) {
          if (S == N) continue;

          if (NodeToInfo.count(S->getBlock()) == 0) {
            errs() << "Node " << BlockNamePrinter(S)
                   << " not reachable when its sibling " << BlockNamePrinter(N)
                   << " is removed!\n";
            errs().flush();

            return false;
          }
        }
      }
    }

    return true;
  }

  // Check if the given tree is the same as a freshly computed one for the same
  // Parent.
  // Running time: O(N^2), but faster in practice (same as tree construction).
  //
  // Note that this does not check if that the tree construction algorithm is
  // correct and should be only used for fast (but possibly unsound)
  // verification.
  static bool IsSameAsFreshTree(const DomTreeT &DT) {
    DomTreeT FreshTree;
    FreshTree.recalculate(*DT.Parent);
    const bool Different = DT.compare(FreshTree);

    if (Different) {
      errs() << (DT.isPostDominator() ? "Post" : "")
             << "DominatorTree is different than a freshly computed one!\n"
             << "\tCurrent:\n";
      DT.print(errs());
      errs() << "\n\tFreshly computed tree:\n";
      FreshTree.print(errs());
      errs().flush();
    }

    return !Different;
  }
};

template <class DomTreeT>
void Calculate(DomTreeT &DT) {
  SemiNCAInfo<DomTreeT>::CalculateFromScratch(DT, nullptr);
}

template <typename DomTreeT>
void CalculateWithUpdates(DomTreeT &DT,
                          ArrayRef<typename DomTreeT::UpdateType> Updates) {
  // FIXME: Updated to use the PreViewCFG and behave the same as until now.
  // This behavior is however incorrect; this actually needs the PostViewCFG.
  GraphDiff<typename DomTreeT::NodePtr, DomTreeT::IsPostDominator> PreViewCFG(
      Updates, /*ReverseApplyUpdates=*/true);
  typename SemiNCAInfo<DomTreeT>::BatchUpdateInfo BUI(PreViewCFG);
  SemiNCAInfo<DomTreeT>::CalculateFromScratch(DT, &BUI);
}

template <class DomTreeT>
void InsertEdge(DomTreeT &DT, typename DomTreeT::NodePtr From,
                typename DomTreeT::NodePtr To) {
  if (DT.isPostDominator()) std::swap(From, To);
  SemiNCAInfo<DomTreeT>::InsertEdge(DT, nullptr, From, To);
}

template <class DomTreeT>
void DeleteEdge(DomTreeT &DT, typename DomTreeT::NodePtr From,
                typename DomTreeT::NodePtr To) {
  if (DT.isPostDominator()) std::swap(From, To);
  SemiNCAInfo<DomTreeT>::DeleteEdge(DT, nullptr, From, To);
}

template <class DomTreeT>
void ApplyUpdates(DomTreeT &DT,
                  GraphDiff<typename DomTreeT::NodePtr,
                            DomTreeT::IsPostDominator> &PreViewCFG,
                  GraphDiff<typename DomTreeT::NodePtr,
                            DomTreeT::IsPostDominator> *PostViewCFG) {
  SemiNCAInfo<DomTreeT>::ApplyUpdates(DT, PreViewCFG, PostViewCFG);
}

template <class DomTreeT>
bool Verify(const DomTreeT &DT, typename DomTreeT::VerificationLevel VL) {
  SemiNCAInfo<DomTreeT> SNCA(nullptr);

  // Simplist check is to compare against a new tree. This will also
  // usefully print the old and new trees, if they are different.
  if (!SNCA.IsSameAsFreshTree(DT))
    return false;

  // Common checks to verify the properties of the tree. O(N log N) at worst.
  if (!SNCA.verifyRoots(DT) || !SNCA.verifyReachability(DT) ||
      !SNCA.VerifyLevels(DT) || !SNCA.VerifyDFSNumbers(DT))
    return false;

  // Extra checks depending on VerificationLevel. Up to O(N^3).
  if (VL == DomTreeT::VerificationLevel::Basic ||
      VL == DomTreeT::VerificationLevel::Full)
    if (!SNCA.verifyParentProperty(DT))
      return false;
  if (VL == DomTreeT::VerificationLevel::Full)
    if (!SNCA.verifySiblingProperty(DT))
      return false;

  return true;
}

}  // namespace DomTreeBuilder
}  // namespace llvm

#undef DEBUG_TYPE

#endif
PKhwFZ@�|�))Support/BinaryStreamError.hnu�[���//===- BinaryStreamError.h - Error extensions for Binary Streams *- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_BINARYSTREAMERROR_H
#define LLVM_SUPPORT_BINARYSTREAMERROR_H

#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Error.h"

#include <string>

namespace llvm {
enum class stream_error_code {
  unspecified,
  stream_too_short,
  invalid_array_size,
  invalid_offset,
  filesystem_error
};

/// Base class for errors originating when parsing raw PDB files
class BinaryStreamError : public ErrorInfo<BinaryStreamError> {
public:
  static char ID;
  explicit BinaryStreamError(stream_error_code C);
  explicit BinaryStreamError(StringRef Context);
  BinaryStreamError(stream_error_code C, StringRef Context);

  void log(raw_ostream &OS) const override;
  std::error_code convertToErrorCode() const override;

  StringRef getErrorMessage() const;

  stream_error_code getErrorCode() const { return Code; }

private:
  std::string ErrMsg;
  stream_error_code Code;
};
} // namespace llvm

#endif // LLVM_SUPPORT_BINARYSTREAMERROR_H
PKhwFZ3s����Support/MemAlloc.hnu�[���//===- MemAlloc.h - Memory allocation functions -----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This file defines counterparts of C library allocation functions defined in
/// the namespace 'std'. The new allocation functions crash on allocation
/// failure instead of returning null pointer.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_MEMALLOC_H
#define LLVM_SUPPORT_MEMALLOC_H

#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include <cstdlib>

namespace llvm {

LLVM_ATTRIBUTE_RETURNS_NONNULL inline void *safe_malloc(size_t Sz) {
  void *Result = std::malloc(Sz);
  if (Result == nullptr) {
    // It is implementation-defined whether allocation occurs if the space
    // requested is zero (ISO/IEC 9899:2018 7.22.3). Retry, requesting
    // non-zero, if the space requested was zero.
    if (Sz == 0)
      return safe_malloc(1);
    report_bad_alloc_error("Allocation failed");
  }
  return Result;
}

LLVM_ATTRIBUTE_RETURNS_NONNULL inline void *safe_calloc(size_t Count,
                                                        size_t Sz) {
  void *Result = std::calloc(Count, Sz);
  if (Result == nullptr) {
    // It is implementation-defined whether allocation occurs if the space
    // requested is zero (ISO/IEC 9899:2018 7.22.3). Retry, requesting
    // non-zero, if the space requested was zero.
    if (Count == 0 || Sz == 0)
      return safe_malloc(1);
    report_bad_alloc_error("Allocation failed");
  }
  return Result;
}

LLVM_ATTRIBUTE_RETURNS_NONNULL inline void *safe_realloc(void *Ptr, size_t Sz) {
  void *Result = std::realloc(Ptr, Sz);
  if (Result == nullptr) {
    // It is implementation-defined whether allocation occurs if the space
    // requested is zero (ISO/IEC 9899:2018 7.22.3). Retry, requesting
    // non-zero, if the space requested was zero.
    if (Sz == 0)
      return safe_malloc(1);
    report_bad_alloc_error("Allocation failed");
  }
  return Result;
}

/// Allocate a buffer of memory with the given size and alignment.
///
/// When the compiler supports aligned operator new, this will use it to to
/// handle even over-aligned allocations.
///
/// However, this doesn't make any attempt to leverage the fancier techniques
/// like posix_memalign due to portability. It is mostly intended to allow
/// compatibility with platforms that, after aligned allocation was added, use
/// reduced default alignment.
LLVM_ATTRIBUTE_RETURNS_NONNULL LLVM_ATTRIBUTE_RETURNS_NOALIAS void *
allocate_buffer(size_t Size, size_t Alignment);

/// Deallocate a buffer of memory with the given size and alignment.
///
/// If supported, this will used the sized delete operator. Also if supported,
/// this will pass the alignment to the delete operator.
///
/// The pointer must have been allocated with the corresponding new operator,
/// most likely using the above helper.
void deallocate_buffer(void *Ptr, size_t Size, size_t Alignment);

} // namespace llvm
#endif
PKhwFZ͉��Support/CodeGenCoverage.hnu�[���//== llvm/Support/CodeGenCoverage.h ------------------------------*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file This file provides rule coverage tracking for tablegen-erated CodeGen.
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_CODEGENCOVERAGE_H
#define LLVM_SUPPORT_CODEGENCOVERAGE_H

#include "llvm/ADT/BitVector.h"

namespace llvm {
class MemoryBuffer;

class CodeGenCoverage {
protected:
  BitVector RuleCoverage;

public:
  using const_covered_iterator = BitVector::const_set_bits_iterator;

  CodeGenCoverage();

  void setCovered(uint64_t RuleID);
  bool isCovered(uint64_t RuleID) const;
  iterator_range<const_covered_iterator> covered() const;

  bool parse(MemoryBuffer &Buffer, StringRef BackendName);
  bool emit(StringRef FilePrefix, StringRef BackendName) const;
  void reset();
};
} // namespace llvm

#endif // LLVM_SUPPORT_CODEGENCOVERAGE_H
PKhwFZۂG��1�1Support/BinaryStreamArray.hnu�[���//===- BinaryStreamArray.h - Array backed by an arbitrary stream *- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// Lightweight arrays that are backed by an arbitrary BinaryStream.  This file
/// provides two different array implementations.
///
///     VarStreamArray - Arrays of variable length records.  The user specifies
///       an Extractor type that can extract a record from a given offset and
///       return the number of bytes consumed by the record.
///
///     FixedStreamArray - Arrays of fixed length records.  This is similar in
///       spirit to ArrayRef<T>, but since it is backed by a BinaryStream, the
///       elements of the array need not be laid out in contiguous memory.
///

#ifndef LLVM_SUPPORT_BINARYSTREAMARRAY_H
#define LLVM_SUPPORT_BINARYSTREAMARRAY_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/iterator.h"
#include "llvm/Support/Alignment.h"
#include "llvm/Support/BinaryStreamRef.h"
#include "llvm/Support/Error.h"
#include <cassert>
#include <cstdint>

namespace llvm {

/// VarStreamArrayExtractor is intended to be specialized to provide customized
/// extraction logic.  On input it receives a BinaryStreamRef pointing to the
/// beginning of the next record, but where the length of the record is not yet
/// known.  Upon completion, it should return an appropriate Error instance if
/// a record could not be extracted, or if one could be extracted it should
/// return success and set Len to the number of bytes this record occupied in
/// the underlying stream, and it should fill out the fields of the value type
/// Item appropriately to represent the current record.
///
/// You can specialize this template for your own custom value types to avoid
/// having to specify a second template argument to VarStreamArray (documented
/// below).
template <typename T> struct VarStreamArrayExtractor {
  // Method intentionally deleted.  You must provide an explicit specialization
  // with the following method implemented.
  Error operator()(BinaryStreamRef Stream, uint32_t &Len,
                   T &Item) const = delete;
};

/// VarStreamArray represents an array of variable length records backed by a
/// stream.  This could be a contiguous sequence of bytes in memory, it could
/// be a file on disk, or it could be a PDB stream where bytes are stored as
/// discontiguous blocks in a file.  Usually it is desirable to treat arrays
/// as contiguous blocks of memory, but doing so with large PDB files, for
/// example, could mean allocating huge amounts of memory just to allow
/// re-ordering of stream data to be contiguous before iterating over it.  By
/// abstracting this out, we need not duplicate this memory, and we can
/// iterate over arrays in arbitrarily formatted streams.  Elements are parsed
/// lazily on iteration, so there is no upfront cost associated with building
/// or copying a VarStreamArray, no matter how large it may be.
///
/// You create a VarStreamArray by specifying a ValueType and an Extractor type.
/// If you do not specify an Extractor type, you are expected to specialize
/// VarStreamArrayExtractor<T> for your ValueType.
///
/// By default an Extractor is default constructed in the class, but in some
/// cases you might find it useful for an Extractor to maintain state across
/// extractions.  In this case you can provide your own Extractor through a
/// secondary constructor.  The following examples show various ways of
/// creating a VarStreamArray.
///
///       // Will use VarStreamArrayExtractor<MyType> as the extractor.
///       VarStreamArray<MyType> MyTypeArray;
///
///       // Will use a default-constructed MyExtractor as the extractor.
///       VarStreamArray<MyType, MyExtractor> MyTypeArray2;
///
///       // Will use the specific instance of MyExtractor provided.
///       // MyExtractor need not be default-constructible in this case.
///       MyExtractor E(SomeContext);
///       VarStreamArray<MyType, MyExtractor> MyTypeArray3(E);
///

template <typename ValueType, typename Extractor> class VarStreamArrayIterator;

template <typename ValueType,
          typename Extractor = VarStreamArrayExtractor<ValueType>>
class VarStreamArray {
  friend class VarStreamArrayIterator<ValueType, Extractor>;

public:
  typedef VarStreamArrayIterator<ValueType, Extractor> Iterator;

  VarStreamArray() = default;

  explicit VarStreamArray(const Extractor &E) : E(E) {}

  explicit VarStreamArray(BinaryStreamRef Stream, uint32_t Skew = 0)
      : Stream(Stream), Skew(Skew) {}

  VarStreamArray(BinaryStreamRef Stream, const Extractor &E, uint32_t Skew = 0)
      : Stream(Stream), E(E), Skew(Skew) {}

  Iterator begin(bool *HadError = nullptr) const {
    return Iterator(*this, E, Skew, nullptr);
  }

  bool valid() const { return Stream.valid(); }

  bool isOffsetValid(uint32_t Offset) const { return at(Offset) != end(); }

  uint32_t skew() const { return Skew; }
  Iterator end() const { return Iterator(E); }

  bool empty() const { return Stream.getLength() == 0; }

  VarStreamArray<ValueType, Extractor> substream(uint32_t Begin,
                                                 uint32_t End) const {
    assert(Begin >= Skew);
    // We should never cut off the beginning of the stream since it might be
    // skewed, meaning the initial bytes are important.
    BinaryStreamRef NewStream = Stream.slice(0, End);
    return {NewStream, E, Begin};
  }

  /// given an offset into the array's underlying stream, return an
  /// iterator to the record at that offset.  This is considered unsafe
  /// since the behavior is undefined if \p Offset does not refer to the
  /// beginning of a valid record.
  Iterator at(uint32_t Offset) const {
    return Iterator(*this, E, Offset, nullptr);
  }

  const Extractor &getExtractor() const { return E; }
  Extractor &getExtractor() { return E; }

  BinaryStreamRef getUnderlyingStream() const { return Stream; }
  void setUnderlyingStream(BinaryStreamRef NewStream, uint32_t NewSkew = 0) {
    Stream = NewStream;
    Skew = NewSkew;
  }

  void drop_front() { Skew += begin()->length(); }

private:
  BinaryStreamRef Stream;
  Extractor E;
  uint32_t Skew = 0;
};

template <typename ValueType, typename Extractor>
class VarStreamArrayIterator
    : public iterator_facade_base<VarStreamArrayIterator<ValueType, Extractor>,
                                  std::forward_iterator_tag, const ValueType> {
  typedef VarStreamArrayIterator<ValueType, Extractor> IterType;
  typedef VarStreamArray<ValueType, Extractor> ArrayType;

public:
  VarStreamArrayIterator(const ArrayType &Array, const Extractor &E,
                         uint32_t Offset, bool *HadError)
      : IterRef(Array.Stream.drop_front(Offset)), Extract(E),
        Array(&Array), AbsOffset(Offset), HadError(HadError) {
    if (IterRef.getLength() == 0)
      moveToEnd();
    else {
      auto EC = Extract(IterRef, ThisLen, ThisValue);
      if (EC) {
        consumeError(std::move(EC));
        markError();
      }
    }
  }

  VarStreamArrayIterator() = default;
  explicit VarStreamArrayIterator(const Extractor &E) : Extract(E) {}
  ~VarStreamArrayIterator() = default;

  bool operator==(const IterType &R) const {
    if (Array && R.Array) {
      // Both have a valid array, make sure they're same.
      assert(Array == R.Array);
      return IterRef == R.IterRef;
    }

    // Both iterators are at the end.
    if (!Array && !R.Array)
      return true;

    // One is not at the end and one is.
    return false;
  }

  const ValueType &operator*() const {
    assert(Array && !HasError);
    return ThisValue;
  }

  IterType &operator+=(unsigned N) {
    for (unsigned I = 0; I < N; ++I) {
      // We are done with the current record, discard it so that we are
      // positioned at the next record.
      AbsOffset += ThisLen;
      IterRef = IterRef.drop_front(ThisLen);
      if (IterRef.getLength() == 0) {
        // There is nothing after the current record, we must make this an end
        // iterator.
        moveToEnd();
      } else {
        // There is some data after the current record.
        auto EC = Extract(IterRef, ThisLen, ThisValue);
        if (EC) {
          consumeError(std::move(EC));
          markError();
        } else if (ThisLen == 0) {
          // An empty record? Make this an end iterator.
          moveToEnd();
        }
      }
    }
    return *this;
  }

  uint32_t offset() const { return AbsOffset; }
  uint32_t getRecordLength() const { return ThisLen; }

private:
  void moveToEnd() {
    Array = nullptr;
    ThisLen = 0;
  }
  void markError() {
    moveToEnd();
    HasError = true;
    if (HadError != nullptr)
      *HadError = true;
  }

  ValueType ThisValue;
  BinaryStreamRef IterRef;
  Extractor Extract;
  const ArrayType *Array{nullptr};
  uint32_t ThisLen{0};
  uint32_t AbsOffset{0};
  bool HasError{false};
  bool *HadError{nullptr};
};

template <typename T> class FixedStreamArrayIterator;

/// FixedStreamArray is similar to VarStreamArray, except with each record
/// having a fixed-length.  As with VarStreamArray, there is no upfront
/// cost associated with building or copying a FixedStreamArray, as the
/// memory for each element is not read from the backing stream until that
/// element is iterated.
template <typename T> class FixedStreamArray {
  friend class FixedStreamArrayIterator<T>;

public:
  typedef FixedStreamArrayIterator<T> Iterator;

  FixedStreamArray() = default;
  explicit FixedStreamArray(BinaryStreamRef Stream) : Stream(Stream) {
    assert(Stream.getLength() % sizeof(T) == 0);
  }

  bool operator==(const FixedStreamArray<T> &Other) const {
    return Stream == Other.Stream;
  }

  bool operator!=(const FixedStreamArray<T> &Other) const {
    return !(*this == Other);
  }

  FixedStreamArray(const FixedStreamArray &) = default;
  FixedStreamArray &operator=(const FixedStreamArray &) = default;

  const T &operator[](uint32_t Index) const {
    assert(Index < size());
    uint32_t Off = Index * sizeof(T);
    ArrayRef<uint8_t> Data;
    if (auto EC = Stream.readBytes(Off, sizeof(T), Data)) {
      assert(false && "Unexpected failure reading from stream");
      // This should never happen since we asserted that the stream length was
      // an exact multiple of the element size.
      consumeError(std::move(EC));
    }
    assert(isAddrAligned(Align::Of<T>(), Data.data()));
    return *reinterpret_cast<const T *>(Data.data());
  }

  uint32_t size() const { return Stream.getLength() / sizeof(T); }

  bool empty() const { return size() == 0; }

  FixedStreamArrayIterator<T> begin() const {
    return FixedStreamArrayIterator<T>(*this, 0);
  }

  FixedStreamArrayIterator<T> end() const {
    return FixedStreamArrayIterator<T>(*this, size());
  }

  const T &front() const { return *begin(); }
  const T &back() const {
    FixedStreamArrayIterator<T> I = end();
    return *(--I);
  }

  BinaryStreamRef getUnderlyingStream() const { return Stream; }

private:
  BinaryStreamRef Stream;
};

template <typename T>
class FixedStreamArrayIterator
    : public iterator_facade_base<FixedStreamArrayIterator<T>,
                                  std::random_access_iterator_tag, const T> {

public:
  FixedStreamArrayIterator(const FixedStreamArray<T> &Array, uint32_t Index)
      : Array(Array), Index(Index) {}

  FixedStreamArrayIterator(const FixedStreamArrayIterator<T> &Other)
      : Array(Other.Array), Index(Other.Index) {}
  FixedStreamArrayIterator<T> &
  operator=(const FixedStreamArrayIterator<T> &Other) {
    Array = Other.Array;
    Index = Other.Index;
    return *this;
  }

  const T &operator*() const { return Array[Index]; }
  const T &operator*() { return Array[Index]; }

  bool operator==(const FixedStreamArrayIterator<T> &R) const {
    assert(Array == R.Array);
    return (Index == R.Index) && (Array == R.Array);
  }

  FixedStreamArrayIterator<T> &operator+=(std::ptrdiff_t N) {
    Index += N;
    return *this;
  }

  FixedStreamArrayIterator<T> &operator-=(std::ptrdiff_t N) {
    assert(std::ptrdiff_t(Index) >= N);
    Index -= N;
    return *this;
  }

  std::ptrdiff_t operator-(const FixedStreamArrayIterator<T> &R) const {
    assert(Array == R.Array);
    assert(Index >= R.Index);
    return Index - R.Index;
  }

  bool operator<(const FixedStreamArrayIterator<T> &RHS) const {
    assert(Array == RHS.Array);
    return Index < RHS.Index;
  }

private:
  FixedStreamArray<T> Array;
  uint32_t Index;
};

} // namespace llvm

#endif // LLVM_SUPPORT_BINARYSTREAMARRAY_H
PKhwFZ�2�C�CSupport/Allocator.hnu�[���//===- Allocator.h - Simple memory allocation abstraction -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This file defines the BumpPtrAllocator interface. BumpPtrAllocator conforms
/// to the LLVM "Allocator" concept and is similar to MallocAllocator, but
/// objects cannot be deallocated. Their lifetime is tied to the lifetime of the
/// allocator.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_ALLOCATOR_H
#define LLVM_SUPPORT_ALLOCATOR_H

#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Alignment.h"
#include "llvm/Support/AllocatorBase.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/MathExtras.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <optional>
#include <utility>

namespace llvm {

namespace detail {

// We call out to an external function to actually print the message as the
// printing code uses Allocator.h in its implementation.
void printBumpPtrAllocatorStats(unsigned NumSlabs, size_t BytesAllocated,
                                size_t TotalMemory);

} // end namespace detail

/// Allocate memory in an ever growing pool, as if by bump-pointer.
///
/// This isn't strictly a bump-pointer allocator as it uses backing slabs of
/// memory rather than relying on a boundless contiguous heap. However, it has
/// bump-pointer semantics in that it is a monotonically growing pool of memory
/// where every allocation is found by merely allocating the next N bytes in
/// the slab, or the next N bytes in the next slab.
///
/// Note that this also has a threshold for forcing allocations above a certain
/// size into their own slab.
///
/// The BumpPtrAllocatorImpl template defaults to using a MallocAllocator
/// object, which wraps malloc, to allocate memory, but it can be changed to
/// use a custom allocator.
///
/// The GrowthDelay specifies after how many allocated slabs the allocator
/// increases the size of the slabs.
template <typename AllocatorT = MallocAllocator, size_t SlabSize = 4096,
          size_t SizeThreshold = SlabSize, size_t GrowthDelay = 128>
class BumpPtrAllocatorImpl
    : public AllocatorBase<BumpPtrAllocatorImpl<AllocatorT, SlabSize,
                                                SizeThreshold, GrowthDelay>>,
      private detail::AllocatorHolder<AllocatorT> {
  using AllocTy = detail::AllocatorHolder<AllocatorT>;

public:
  static_assert(SizeThreshold <= SlabSize,
                "The SizeThreshold must be at most the SlabSize to ensure "
                "that objects larger than a slab go into their own memory "
                "allocation.");
  static_assert(GrowthDelay > 0,
                "GrowthDelay must be at least 1 which already increases the"
                "slab size after each allocated slab.");

  BumpPtrAllocatorImpl() = default;

  template <typename T>
  BumpPtrAllocatorImpl(T &&Allocator)
      : AllocTy(std::forward<T &&>(Allocator)) {}

  // Manually implement a move constructor as we must clear the old allocator's
  // slabs as a matter of correctness.
  BumpPtrAllocatorImpl(BumpPtrAllocatorImpl &&Old)
      : AllocTy(std::move(Old.getAllocator())), CurPtr(Old.CurPtr),
        End(Old.End), Slabs(std::move(Old.Slabs)),
        CustomSizedSlabs(std::move(Old.CustomSizedSlabs)),
        BytesAllocated(Old.BytesAllocated), RedZoneSize(Old.RedZoneSize) {
    Old.CurPtr = Old.End = nullptr;
    Old.BytesAllocated = 0;
    Old.Slabs.clear();
    Old.CustomSizedSlabs.clear();
  }

  ~BumpPtrAllocatorImpl() {
    DeallocateSlabs(Slabs.begin(), Slabs.end());
    DeallocateCustomSizedSlabs();
  }

  BumpPtrAllocatorImpl &operator=(BumpPtrAllocatorImpl &&RHS) {
    DeallocateSlabs(Slabs.begin(), Slabs.end());
    DeallocateCustomSizedSlabs();

    CurPtr = RHS.CurPtr;
    End = RHS.End;
    BytesAllocated = RHS.BytesAllocated;
    RedZoneSize = RHS.RedZoneSize;
    Slabs = std::move(RHS.Slabs);
    CustomSizedSlabs = std::move(RHS.CustomSizedSlabs);
    AllocTy::operator=(std::move(RHS.getAllocator()));

    RHS.CurPtr = RHS.End = nullptr;
    RHS.BytesAllocated = 0;
    RHS.Slabs.clear();
    RHS.CustomSizedSlabs.clear();
    return *this;
  }

  /// Deallocate all but the current slab and reset the current pointer
  /// to the beginning of it, freeing all memory allocated so far.
  void Reset() {
    // Deallocate all but the first slab, and deallocate all custom-sized slabs.
    DeallocateCustomSizedSlabs();
    CustomSizedSlabs.clear();

    if (Slabs.empty())
      return;

    // Reset the state.
    BytesAllocated = 0;
    CurPtr = (char *)Slabs.front();
    End = CurPtr + SlabSize;

    __asan_poison_memory_region(*Slabs.begin(), computeSlabSize(0));
    DeallocateSlabs(std::next(Slabs.begin()), Slabs.end());
    Slabs.erase(std::next(Slabs.begin()), Slabs.end());
  }

  /// Allocate space at the specified alignment.
  // This method is *not* marked noalias, because
  // SpecificBumpPtrAllocator::DestroyAll() loops over all allocations, and
  // that loop is not based on the Allocate() return value.
  //
  // Allocate(0, N) is valid, it returns a non-null pointer (which should not
  // be dereferenced).
  LLVM_ATTRIBUTE_RETURNS_NONNULL void *Allocate(size_t Size, Align Alignment) {
    // Keep track of how many bytes we've allocated.
    BytesAllocated += Size;

    size_t Adjustment = offsetToAlignedAddr(CurPtr, Alignment);
    assert(Adjustment + Size >= Size && "Adjustment + Size must not overflow");

    size_t SizeToAllocate = Size;
#if LLVM_ADDRESS_SANITIZER_BUILD
    // Add trailing bytes as a "red zone" under ASan.
    SizeToAllocate += RedZoneSize;
#endif

    // Check if we have enough space.
    if (Adjustment + SizeToAllocate <= size_t(End - CurPtr)
        // We can't return nullptr even for a zero-sized allocation!
        && CurPtr != nullptr) {
      char *AlignedPtr = CurPtr + Adjustment;
      CurPtr = AlignedPtr + SizeToAllocate;
      // Update the allocation point of this memory block in MemorySanitizer.
      // Without this, MemorySanitizer messages for values originated from here
      // will point to the allocation of the entire slab.
      __msan_allocated_memory(AlignedPtr, Size);
      // Similarly, tell ASan about this space.
      __asan_unpoison_memory_region(AlignedPtr, Size);
      return AlignedPtr;
    }

    // If Size is really big, allocate a separate slab for it.
    size_t PaddedSize = SizeToAllocate + Alignment.value() - 1;
    if (PaddedSize > SizeThreshold) {
      void *NewSlab =
          this->getAllocator().Allocate(PaddedSize, alignof(std::max_align_t));
      // We own the new slab and don't want anyone reading anyting other than
      // pieces returned from this method.  So poison the whole slab.
      __asan_poison_memory_region(NewSlab, PaddedSize);
      CustomSizedSlabs.push_back(std::make_pair(NewSlab, PaddedSize));

      uintptr_t AlignedAddr = alignAddr(NewSlab, Alignment);
      assert(AlignedAddr + Size <= (uintptr_t)NewSlab + PaddedSize);
      char *AlignedPtr = (char*)AlignedAddr;
      __msan_allocated_memory(AlignedPtr, Size);
      __asan_unpoison_memory_region(AlignedPtr, Size);
      return AlignedPtr;
    }

    // Otherwise, start a new slab and try again.
    StartNewSlab();
    uintptr_t AlignedAddr = alignAddr(CurPtr, Alignment);
    assert(AlignedAddr + SizeToAllocate <= (uintptr_t)End &&
           "Unable to allocate memory!");
    char *AlignedPtr = (char*)AlignedAddr;
    CurPtr = AlignedPtr + SizeToAllocate;
    __msan_allocated_memory(AlignedPtr, Size);
    __asan_unpoison_memory_region(AlignedPtr, Size);
    return AlignedPtr;
  }

  inline LLVM_ATTRIBUTE_RETURNS_NONNULL void *
  Allocate(size_t Size, size_t Alignment) {
    assert(Alignment > 0 && "0-byte alignment is not allowed. Use 1 instead.");
    return Allocate(Size, Align(Alignment));
  }

  // Pull in base class overloads.
  using AllocatorBase<BumpPtrAllocatorImpl>::Allocate;

  // Bump pointer allocators are expected to never free their storage; and
  // clients expect pointers to remain valid for non-dereferencing uses even
  // after deallocation.
  void Deallocate(const void *Ptr, size_t Size, size_t /*Alignment*/) {
    __asan_poison_memory_region(Ptr, Size);
  }

  // Pull in base class overloads.
  using AllocatorBase<BumpPtrAllocatorImpl>::Deallocate;

  size_t GetNumSlabs() const { return Slabs.size() + CustomSizedSlabs.size(); }

  /// \return An index uniquely and reproducibly identifying
  /// an input pointer \p Ptr in the given allocator.
  /// The returned value is negative iff the object is inside a custom-size
  /// slab.
  /// Returns an empty optional if the pointer is not found in the allocator.
  std::optional<int64_t> identifyObject(const void *Ptr) {
    const char *P = static_cast<const char *>(Ptr);
    int64_t InSlabIdx = 0;
    for (size_t Idx = 0, E = Slabs.size(); Idx < E; Idx++) {
      const char *S = static_cast<const char *>(Slabs[Idx]);
      if (P >= S && P < S + computeSlabSize(Idx))
        return InSlabIdx + static_cast<int64_t>(P - S);
      InSlabIdx += static_cast<int64_t>(computeSlabSize(Idx));
    }

    // Use negative index to denote custom sized slabs.
    int64_t InCustomSizedSlabIdx = -1;
    for (size_t Idx = 0, E = CustomSizedSlabs.size(); Idx < E; Idx++) {
      const char *S = static_cast<const char *>(CustomSizedSlabs[Idx].first);
      size_t Size = CustomSizedSlabs[Idx].second;
      if (P >= S && P < S + Size)
        return InCustomSizedSlabIdx - static_cast<int64_t>(P - S);
      InCustomSizedSlabIdx -= static_cast<int64_t>(Size);
    }
    return std::nullopt;
  }

  /// A wrapper around identifyObject that additionally asserts that
  /// the object is indeed within the allocator.
  /// \return An index uniquely and reproducibly identifying
  /// an input pointer \p Ptr in the given allocator.
  int64_t identifyKnownObject(const void *Ptr) {
    std::optional<int64_t> Out = identifyObject(Ptr);
    assert(Out && "Wrong allocator used");
    return *Out;
  }

  /// A wrapper around identifyKnownObject. Accepts type information
  /// about the object and produces a smaller identifier by relying on
  /// the alignment information. Note that sub-classes may have different
  /// alignment, so the most base class should be passed as template parameter
  /// in order to obtain correct results. For that reason automatic template
  /// parameter deduction is disabled.
  /// \return An index uniquely and reproducibly identifying
  /// an input pointer \p Ptr in the given allocator. This identifier is
  /// different from the ones produced by identifyObject and
  /// identifyAlignedObject.
  template <typename T>
  int64_t identifyKnownAlignedObject(const void *Ptr) {
    int64_t Out = identifyKnownObject(Ptr);
    assert(Out % alignof(T) == 0 && "Wrong alignment information");
    return Out / alignof(T);
  }

  size_t getTotalMemory() const {
    size_t TotalMemory = 0;
    for (auto I = Slabs.begin(), E = Slabs.end(); I != E; ++I)
      TotalMemory += computeSlabSize(std::distance(Slabs.begin(), I));
    for (const auto &PtrAndSize : CustomSizedSlabs)
      TotalMemory += PtrAndSize.second;
    return TotalMemory;
  }

  size_t getBytesAllocated() const { return BytesAllocated; }

  void setRedZoneSize(size_t NewSize) {
    RedZoneSize = NewSize;
  }

  void PrintStats() const {
    detail::printBumpPtrAllocatorStats(Slabs.size(), BytesAllocated,
                                       getTotalMemory());
  }

private:
  /// The current pointer into the current slab.
  ///
  /// This points to the next free byte in the slab.
  char *CurPtr = nullptr;

  /// The end of the current slab.
  char *End = nullptr;

  /// The slabs allocated so far.
  SmallVector<void *, 4> Slabs;

  /// Custom-sized slabs allocated for too-large allocation requests.
  SmallVector<std::pair<void *, size_t>, 0> CustomSizedSlabs;

  /// How many bytes we've allocated.
  ///
  /// Used so that we can compute how much space was wasted.
  size_t BytesAllocated = 0;

  /// The number of bytes to put between allocations when running under
  /// a sanitizer.
  size_t RedZoneSize = 1;

  static size_t computeSlabSize(unsigned SlabIdx) {
    // Scale the actual allocated slab size based on the number of slabs
    // allocated. Every GrowthDelay slabs allocated, we double
    // the allocated size to reduce allocation frequency, but saturate at
    // multiplying the slab size by 2^30.
    return SlabSize *
           ((size_t)1 << std::min<size_t>(30, SlabIdx / GrowthDelay));
  }

  /// Allocate a new slab and move the bump pointers over into the new
  /// slab, modifying CurPtr and End.
  void StartNewSlab() {
    size_t AllocatedSlabSize = computeSlabSize(Slabs.size());

    void *NewSlab = this->getAllocator().Allocate(AllocatedSlabSize,
                                                  alignof(std::max_align_t));
    // We own the new slab and don't want anyone reading anything other than
    // pieces returned from this method.  So poison the whole slab.
    __asan_poison_memory_region(NewSlab, AllocatedSlabSize);

    Slabs.push_back(NewSlab);
    CurPtr = (char *)(NewSlab);
    End = ((char *)NewSlab) + AllocatedSlabSize;
  }

  /// Deallocate a sequence of slabs.
  void DeallocateSlabs(SmallVectorImpl<void *>::iterator I,
                       SmallVectorImpl<void *>::iterator E) {
    for (; I != E; ++I) {
      size_t AllocatedSlabSize =
          computeSlabSize(std::distance(Slabs.begin(), I));
      this->getAllocator().Deallocate(*I, AllocatedSlabSize,
                                      alignof(std::max_align_t));
    }
  }

  /// Deallocate all memory for custom sized slabs.
  void DeallocateCustomSizedSlabs() {
    for (auto &PtrAndSize : CustomSizedSlabs) {
      void *Ptr = PtrAndSize.first;
      size_t Size = PtrAndSize.second;
      this->getAllocator().Deallocate(Ptr, Size, alignof(std::max_align_t));
    }
  }

  template <typename T> friend class SpecificBumpPtrAllocator;
};

/// The standard BumpPtrAllocator which just uses the default template
/// parameters.
typedef BumpPtrAllocatorImpl<> BumpPtrAllocator;

/// A BumpPtrAllocator that allows only elements of a specific type to be
/// allocated.
///
/// This allows calling the destructor in DestroyAll() and when the allocator is
/// destroyed.
template <typename T> class SpecificBumpPtrAllocator {
  BumpPtrAllocator Allocator;

public:
  SpecificBumpPtrAllocator() {
    // Because SpecificBumpPtrAllocator walks the memory to call destructors,
    // it can't have red zones between allocations.
    Allocator.setRedZoneSize(0);
  }
  SpecificBumpPtrAllocator(SpecificBumpPtrAllocator &&Old)
      : Allocator(std::move(Old.Allocator)) {}
  ~SpecificBumpPtrAllocator() { DestroyAll(); }

  SpecificBumpPtrAllocator &operator=(SpecificBumpPtrAllocator &&RHS) {
    Allocator = std::move(RHS.Allocator);
    return *this;
  }

  /// Call the destructor of each allocated object and deallocate all but the
  /// current slab and reset the current pointer to the beginning of it, freeing
  /// all memory allocated so far.
  void DestroyAll() {
    auto DestroyElements = [](char *Begin, char *End) {
      assert(Begin == (char *)alignAddr(Begin, Align::Of<T>()));
      for (char *Ptr = Begin; Ptr + sizeof(T) <= End; Ptr += sizeof(T))
        reinterpret_cast<T *>(Ptr)->~T();
    };

    for (auto I = Allocator.Slabs.begin(), E = Allocator.Slabs.end(); I != E;
         ++I) {
      size_t AllocatedSlabSize = BumpPtrAllocator::computeSlabSize(
          std::distance(Allocator.Slabs.begin(), I));
      char *Begin = (char *)alignAddr(*I, Align::Of<T>());
      char *End = *I == Allocator.Slabs.back() ? Allocator.CurPtr
                                               : (char *)*I + AllocatedSlabSize;

      DestroyElements(Begin, End);
    }

    for (auto &PtrAndSize : Allocator.CustomSizedSlabs) {
      void *Ptr = PtrAndSize.first;
      size_t Size = PtrAndSize.second;
      DestroyElements((char *)alignAddr(Ptr, Align::Of<T>()),
                      (char *)Ptr + Size);
    }

    Allocator.Reset();
  }

  /// Allocate space for an array of objects without constructing them.
  T *Allocate(size_t num = 1) { return Allocator.Allocate<T>(num); }
};

} // end namespace llvm

template <typename AllocatorT, size_t SlabSize, size_t SizeThreshold,
          size_t GrowthDelay>
void *
operator new(size_t Size,
             llvm::BumpPtrAllocatorImpl<AllocatorT, SlabSize, SizeThreshold,
                                        GrowthDelay> &Allocator) {
  return Allocator.Allocate(Size, std::min((size_t)llvm::NextPowerOf2(Size),
                                           alignof(std::max_align_t)));
}

template <typename AllocatorT, size_t SlabSize, size_t SizeThreshold,
          size_t GrowthDelay>
void operator delete(void *,
                     llvm::BumpPtrAllocatorImpl<AllocatorT, SlabSize,
                                                SizeThreshold, GrowthDelay> &) {
}

#endif // LLVM_SUPPORT_ALLOCATOR_H
PKhwFZ|����Support/LLVMDriver.hnu�[���//===- LLVMDriver.h ---------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_LLVMDRIVER_H
#define LLVM_SUPPORT_LLVMDRIVER_H

#include "llvm/ADT/SmallVector.h"

namespace llvm {

struct ToolContext {
  const char *Path;
  const char *PrependArg;
  // PrependArg will be added unconditionally by the llvm-driver, but
  // NeedsPrependArg will be false if Path is adequate to reinvoke the tool.
  // This is useful if realpath is ever called on Path, in which case it will
  // point to the llvm-driver executable, where PrependArg will be needed to
  // invoke the correct tool.
  bool NeedsPrependArg;
};

} // namespace llvm

#endif
PKhwFZP���NNSupport/SMLoc.hnu�[���//===- SMLoc.h - Source location for use with diagnostics -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the SMLoc class.  This class encapsulates a location in
// source code for use in diagnostics.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_SMLOC_H
#define LLVM_SUPPORT_SMLOC_H

#include <cassert>
#include <optional>

namespace llvm {

/// Represents a location in source code.
class SMLoc {
  const char *Ptr = nullptr;

public:
  constexpr SMLoc() = default;

  constexpr bool isValid() const { return Ptr != nullptr; }

  constexpr bool operator==(const SMLoc &RHS) const { return RHS.Ptr == Ptr; }
  constexpr bool operator!=(const SMLoc &RHS) const { return RHS.Ptr != Ptr; }

  constexpr const char *getPointer() const { return Ptr; }

  static SMLoc getFromPointer(const char *Ptr) {
    SMLoc L;
    L.Ptr = Ptr;
    return L;
  }
};

/// Represents a range in source code.
///
/// SMRange is implemented using a half-open range, as is the convention in C++.
/// In the string "abc", the range [1,3) represents the substring "bc", and the
/// range [2,2) represents an empty range between the characters "b" and "c".
class SMRange {
public:
  SMLoc Start, End;

  SMRange() = default;
  SMRange(std::nullopt_t) {}
  SMRange(SMLoc St, SMLoc En) : Start(St), End(En) {
    assert(Start.isValid() == End.isValid() &&
           "Start and End should either both be valid or both be invalid!");
  }

  bool isValid() const { return Start.isValid(); }
};

} // end namespace llvm

#endif // LLVM_SUPPORT_SMLOC_H
PKhwFZ~��zzSupport/CachePruning.hnu�[���//=- CachePruning.h - Helper to manage the pruning of a cache dir -*- C++ -*-=//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements pruning of a directory intended for cache storage, using
// various policies.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_CACHEPRUNING_H
#define LLVM_SUPPORT_CACHEPRUNING_H

#include "llvm/Support/MemoryBuffer.h"
#include <chrono>
#include <optional>

namespace llvm {

template <typename T> class Expected;
class StringRef;

/// Policy for the pruneCache() function. A default constructed
/// CachePruningPolicy provides a reasonable default policy.
struct CachePruningPolicy {
  /// The pruning interval. This is intended to be used to avoid scanning the
  /// directory too often. It does not impact the decision of which file to
  /// prune. A value of 0 forces the scan to occur. A value of std::nullopt
  /// disables pruning.
  std::optional<std::chrono::seconds> Interval = std::chrono::seconds(1200);

  /// The expiration for a file. When a file hasn't been accessed for Expiration
  /// seconds, it is removed from the cache. A value of 0 disables the
  /// expiration-based pruning.
  std::chrono::seconds Expiration = std::chrono::hours(7 * 24); // 1w

  /// The maximum size for the cache directory, in terms of percentage of the
  /// available space on the disk. Set to 100 to indicate no limit, 50 to
  /// indicate that the cache size will not be left over half the available disk
  /// space. A value over 100 will be reduced to 100. A value of 0 disables the
  /// percentage size-based pruning.
  unsigned MaxSizePercentageOfAvailableSpace = 75;

  /// The maximum size for the cache directory in bytes. A value over the amount
  /// of available space on the disk will be reduced to the amount of available
  /// space. A value of 0 disables the absolute size-based pruning.
  uint64_t MaxSizeBytes = 0;

  /// The maximum number of files in the cache directory. A value of 0 disables
  /// the number of files based pruning.
  ///
  /// This defaults to 1000000 because with that many files there are
  /// diminishing returns on the effectiveness of the cache. Some systems have a
  /// limit on total number of files, and some also limit the number of files
  /// per directory, such as Linux ext4, with the default setting (block size is
  /// 4096 and large_dir disabled), there is a per-directory entry limit of
  /// 508*510*floor(4096/(40+8))~=20M for average filename length of 40.
  uint64_t MaxSizeFiles = 1000000;
};

/// Parse the given string as a cache pruning policy. Defaults are taken from a
/// default constructed CachePruningPolicy object.
/// For example: "prune_interval=30s:prune_after=24h:cache_size=50%"
/// which means a pruning interval of 30 seconds, expiration time of 24 hours
/// and maximum cache size of 50% of available disk space.
Expected<CachePruningPolicy> parseCachePruningPolicy(StringRef PolicyStr);

/// Peform pruning using the supplied policy, returns true if pruning
/// occurred, i.e. if Policy.Interval was expired.
///
/// Check whether cache pruning happens using the supplied policy, adds a
/// ThinLTO warning if cache_size_bytes or cache_size_files is too small for the
/// current link job. The warning recommends the user to consider adjusting
/// --thinlto-cache-policy.
///
/// As a safeguard against data loss if the user specifies the wrong directory
/// as their cache directory, this function will ignore files not matching the
/// pattern "llvmcache-*".
bool pruneCache(StringRef Path, CachePruningPolicy Policy,
                const std::vector<std::unique_ptr<MemoryBuffer>> &Files = {});
} // namespace llvm

#endif
PKhwFZ��Support/YAMLTraits.hnu�[���//===- llvm/Support/YAMLTraits.h --------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_YAMLTRAITS_H
#define LLVM_SUPPORT_YAMLTRAITS_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Support/AlignOf.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/SMLoc.h"
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/YAMLParser.h"
#include "llvm/Support/raw_ostream.h"
#include <cassert>
#include <map>
#include <memory>
#include <new>
#include <optional>
#include <string>
#include <system_error>
#include <type_traits>
#include <vector>

namespace llvm {

class VersionTuple;

namespace yaml {

enum class NodeKind : uint8_t {
  Scalar,
  Map,
  Sequence,
};

struct EmptyContext {};

/// This class should be specialized by any type that needs to be converted
/// to/from a YAML mapping.  For example:
///
///     struct MappingTraits<MyStruct> {
///       static void mapping(IO &io, MyStruct &s) {
///         io.mapRequired("name", s.name);
///         io.mapRequired("size", s.size);
///         io.mapOptional("age",  s.age);
///       }
///     };
template<class T>
struct MappingTraits {
  // Must provide:
  // static void mapping(IO &io, T &fields);
  // Optionally may provide:
  // static std::string validate(IO &io, T &fields);
  // static void enumInput(IO &io, T &value);
  //
  // The optional flow flag will cause generated YAML to use a flow mapping
  // (e.g. { a: 0, b: 1 }):
  // static const bool flow = true;
};

/// This class is similar to MappingTraits<T> but allows you to pass in
/// additional context for each map operation.  For example:
///
///     struct MappingContextTraits<MyStruct, MyContext> {
///       static void mapping(IO &io, MyStruct &s, MyContext &c) {
///         io.mapRequired("name", s.name);
///         io.mapRequired("size", s.size);
///         io.mapOptional("age",  s.age);
///         ++c.TimesMapped;
///       }
///     };
template <class T, class Context> struct MappingContextTraits {
  // Must provide:
  // static void mapping(IO &io, T &fields, Context &Ctx);
  // Optionally may provide:
  // static std::string validate(IO &io, T &fields, Context &Ctx);
  //
  // The optional flow flag will cause generated YAML to use a flow mapping
  // (e.g. { a: 0, b: 1 }):
  // static const bool flow = true;
};

/// This class should be specialized by any integral type that converts
/// to/from a YAML scalar where there is a one-to-one mapping between
/// in-memory values and a string in YAML.  For example:
///
///     struct ScalarEnumerationTraits<Colors> {
///         static void enumeration(IO &io, Colors &value) {
///           io.enumCase(value, "red",   cRed);
///           io.enumCase(value, "blue",  cBlue);
///           io.enumCase(value, "green", cGreen);
///         }
///       };
template <typename T, typename Enable = void> struct ScalarEnumerationTraits {
  // Must provide:
  // static void enumeration(IO &io, T &value);
};

/// This class should be specialized by any integer type that is a union
/// of bit values and the YAML representation is a flow sequence of
/// strings.  For example:
///
///      struct ScalarBitSetTraits<MyFlags> {
///        static void bitset(IO &io, MyFlags &value) {
///          io.bitSetCase(value, "big",   flagBig);
///          io.bitSetCase(value, "flat",  flagFlat);
///          io.bitSetCase(value, "round", flagRound);
///        }
///      };
template <typename T, typename Enable = void> struct ScalarBitSetTraits {
  // Must provide:
  // static void bitset(IO &io, T &value);
};

/// Describe which type of quotes should be used when quoting is necessary.
/// Some non-printable characters need to be double-quoted, while some others
/// are fine with simple-quoting, and some don't need any quoting.
enum class QuotingType { None, Single, Double };

/// This class should be specialized by type that requires custom conversion
/// to/from a yaml scalar.  For example:
///
///    template<>
///    struct ScalarTraits<MyType> {
///      static void output(const MyType &val, void*, llvm::raw_ostream &out) {
///        // stream out custom formatting
///        out << llvm::format("%x", val);
///      }
///      static StringRef input(StringRef scalar, void*, MyType &value) {
///        // parse scalar and set `value`
///        // return empty string on success, or error string
///        return StringRef();
///      }
///      static QuotingType mustQuote(StringRef) { return QuotingType::Single; }
///    };
template <typename T, typename Enable = void> struct ScalarTraits {
  // Must provide:
  //
  // Function to write the value as a string:
  // static void output(const T &value, void *ctxt, llvm::raw_ostream &out);
  //
  // Function to convert a string to a value.  Returns the empty
  // StringRef on success or an error string if string is malformed:
  // static StringRef input(StringRef scalar, void *ctxt, T &value);
  //
  // Function to determine if the value should be quoted.
  // static QuotingType mustQuote(StringRef);
};

/// This class should be specialized by type that requires custom conversion
/// to/from a YAML literal block scalar. For example:
///
///    template <>
///    struct BlockScalarTraits<MyType> {
///      static void output(const MyType &Value, void*, llvm::raw_ostream &Out)
///      {
///        // stream out custom formatting
///        Out << Value;
///      }
///      static StringRef input(StringRef Scalar, void*, MyType &Value) {
///        // parse scalar and set `value`
///        // return empty string on success, or error string
///        return StringRef();
///      }
///    };
template <typename T>
struct BlockScalarTraits {
  // Must provide:
  //
  // Function to write the value as a string:
  // static void output(const T &Value, void *ctx, llvm::raw_ostream &Out);
  //
  // Function to convert a string to a value.  Returns the empty
  // StringRef on success or an error string if string is malformed:
  // static StringRef input(StringRef Scalar, void *ctxt, T &Value);
  //
  // Optional:
  // static StringRef inputTag(T &Val, std::string Tag)
  // static void outputTag(const T &Val, raw_ostream &Out)
};

/// This class should be specialized by type that requires custom conversion
/// to/from a YAML scalar with optional tags. For example:
///
///    template <>
///    struct TaggedScalarTraits<MyType> {
///      static void output(const MyType &Value, void*, llvm::raw_ostream
///      &ScalarOut, llvm::raw_ostream &TagOut)
///      {
///        // stream out custom formatting including optional Tag
///        Out << Value;
///      }
///      static StringRef input(StringRef Scalar, StringRef Tag, void*, MyType
///      &Value) {
///        // parse scalar and set `value`
///        // return empty string on success, or error string
///        return StringRef();
///      }
///      static QuotingType mustQuote(const MyType &Value, StringRef) {
///        return QuotingType::Single;
///      }
///    };
template <typename T> struct TaggedScalarTraits {
  // Must provide:
  //
  // Function to write the value and tag as strings:
  // static void output(const T &Value, void *ctx, llvm::raw_ostream &ScalarOut,
  // llvm::raw_ostream &TagOut);
  //
  // Function to convert a string to a value.  Returns the empty
  // StringRef on success or an error string if string is malformed:
  // static StringRef input(StringRef Scalar, StringRef Tag, void *ctxt, T
  // &Value);
  //
  // Function to determine if the value should be quoted.
  // static QuotingType mustQuote(const T &Value, StringRef Scalar);
};

/// This class should be specialized by any type that needs to be converted
/// to/from a YAML sequence.  For example:
///
///    template<>
///    struct SequenceTraits<MyContainer> {
///      static size_t size(IO &io, MyContainer &seq) {
///        return seq.size();
///      }
///      static MyType& element(IO &, MyContainer &seq, size_t index) {
///        if ( index >= seq.size() )
///          seq.resize(index+1);
///        return seq[index];
///      }
///    };
template<typename T, typename EnableIf = void>
struct SequenceTraits {
  // Must provide:
  // static size_t size(IO &io, T &seq);
  // static T::value_type& element(IO &io, T &seq, size_t index);
  //
  // The following is option and will cause generated YAML to use
  // a flow sequence (e.g. [a,b,c]).
  // static const bool flow = true;
};

/// This class should be specialized by any type for which vectors of that
/// type need to be converted to/from a YAML sequence.
template<typename T, typename EnableIf = void>
struct SequenceElementTraits {
  // Must provide:
  // static const bool flow;
};

/// This class should be specialized by any type that needs to be converted
/// to/from a list of YAML documents.
template<typename T>
struct DocumentListTraits {
  // Must provide:
  // static size_t size(IO &io, T &seq);
  // static T::value_type& element(IO &io, T &seq, size_t index);
};

/// This class should be specialized by any type that needs to be converted
/// to/from a YAML mapping in the case where the names of the keys are not known
/// in advance, e.g. a string map.
template <typename T>
struct CustomMappingTraits {
  // static void inputOne(IO &io, StringRef key, T &elem);
  // static void output(IO &io, T &elem);
};

/// This class should be specialized by any type that can be represented as
/// a scalar, map, or sequence, decided dynamically. For example:
///
///    typedef std::unique_ptr<MyBase> MyPoly;
///
///    template<>
///    struct PolymorphicTraits<MyPoly> {
///      static NodeKind getKind(const MyPoly &poly) {
///        return poly->getKind();
///      }
///      static MyScalar& getAsScalar(MyPoly &poly) {
///        if (!poly || !isa<MyScalar>(poly))
///          poly.reset(new MyScalar());
///        return *cast<MyScalar>(poly.get());
///      }
///      // ...
///    };
template <typename T> struct PolymorphicTraits {
  // Must provide:
  // static NodeKind getKind(const T &poly);
  // static scalar_type &getAsScalar(T &poly);
  // static map_type &getAsMap(T &poly);
  // static sequence_type &getAsSequence(T &poly);
};

// Only used for better diagnostics of missing traits
template <typename T>
struct MissingTrait;

// Test if ScalarEnumerationTraits<T> is defined on type T.
template <class T>
struct has_ScalarEnumerationTraits
{
  using Signature_enumeration = void (*)(class IO&, T&);

  template <typename U>
  static char test(SameType<Signature_enumeration, &U::enumeration>*);

  template <typename U>
  static double test(...);

  static bool const value =
    (sizeof(test<ScalarEnumerationTraits<T>>(nullptr)) == 1);
};

// Test if ScalarBitSetTraits<T> is defined on type T.
template <class T>
struct has_ScalarBitSetTraits
{
  using Signature_bitset = void (*)(class IO&, T&);

  template <typename U>
  static char test(SameType<Signature_bitset, &U::bitset>*);

  template <typename U>
  static double test(...);

  static bool const value = (sizeof(test<ScalarBitSetTraits<T>>(nullptr)) == 1);
};

// Test if ScalarTraits<T> is defined on type T.
template <class T>
struct has_ScalarTraits
{
  using Signature_input = StringRef (*)(StringRef, void*, T&);
  using Signature_output = void (*)(const T&, void*, raw_ostream&);
  using Signature_mustQuote = QuotingType (*)(StringRef);

  template <typename U>
  static char test(SameType<Signature_input, &U::input> *,
                   SameType<Signature_output, &U::output> *,
                   SameType<Signature_mustQuote, &U::mustQuote> *);

  template <typename U>
  static double test(...);

  static bool const value =
      (sizeof(test<ScalarTraits<T>>(nullptr, nullptr, nullptr)) == 1);
};

// Test if BlockScalarTraits<T> is defined on type T.
template <class T>
struct has_BlockScalarTraits
{
  using Signature_input = StringRef (*)(StringRef, void *, T &);
  using Signature_output = void (*)(const T &, void *, raw_ostream &);

  template <typename U>
  static char test(SameType<Signature_input, &U::input> *,
                   SameType<Signature_output, &U::output> *);

  template <typename U>
  static double test(...);

  static bool const value =
      (sizeof(test<BlockScalarTraits<T>>(nullptr, nullptr)) == 1);
};

// Test if TaggedScalarTraits<T> is defined on type T.
template <class T> struct has_TaggedScalarTraits {
  using Signature_input = StringRef (*)(StringRef, StringRef, void *, T &);
  using Signature_output = void (*)(const T &, void *, raw_ostream &,
                                    raw_ostream &);
  using Signature_mustQuote = QuotingType (*)(const T &, StringRef);

  template <typename U>
  static char test(SameType<Signature_input, &U::input> *,
                   SameType<Signature_output, &U::output> *,
                   SameType<Signature_mustQuote, &U::mustQuote> *);

  template <typename U> static double test(...);

  static bool const value =
      (sizeof(test<TaggedScalarTraits<T>>(nullptr, nullptr, nullptr)) == 1);
};

// Test if MappingContextTraits<T> is defined on type T.
template <class T, class Context> struct has_MappingTraits {
  using Signature_mapping = void (*)(class IO &, T &, Context &);

  template <typename U>
  static char test(SameType<Signature_mapping, &U::mapping>*);

  template <typename U>
  static double test(...);

  static bool const value =
      (sizeof(test<MappingContextTraits<T, Context>>(nullptr)) == 1);
};

// Test if MappingTraits<T> is defined on type T.
template <class T> struct has_MappingTraits<T, EmptyContext> {
  using Signature_mapping = void (*)(class IO &, T &);

  template <typename U>
  static char test(SameType<Signature_mapping, &U::mapping> *);

  template <typename U> static double test(...);

  static bool const value = (sizeof(test<MappingTraits<T>>(nullptr)) == 1);
};

// Test if MappingContextTraits<T>::validate() is defined on type T.
template <class T, class Context> struct has_MappingValidateTraits {
  using Signature_validate = std::string (*)(class IO &, T &, Context &);

  template <typename U>
  static char test(SameType<Signature_validate, &U::validate>*);

  template <typename U>
  static double test(...);

  static bool const value =
      (sizeof(test<MappingContextTraits<T, Context>>(nullptr)) == 1);
};

// Test if MappingTraits<T>::validate() is defined on type T.
template <class T> struct has_MappingValidateTraits<T, EmptyContext> {
  using Signature_validate = std::string (*)(class IO &, T &);

  template <typename U>
  static char test(SameType<Signature_validate, &U::validate> *);

  template <typename U> static double test(...);

  static bool const value = (sizeof(test<MappingTraits<T>>(nullptr)) == 1);
};

// Test if MappingContextTraits<T>::enumInput() is defined on type T.
template <class T, class Context> struct has_MappingEnumInputTraits {
  using Signature_validate = void (*)(class IO &, T &);

  template <typename U>
  static char test(SameType<Signature_validate, &U::enumInput> *);

  template <typename U> static double test(...);

  static bool const value =
      (sizeof(test<MappingContextTraits<T, Context>>(nullptr)) == 1);
};

// Test if MappingTraits<T>::enumInput() is defined on type T.
template <class T> struct has_MappingEnumInputTraits<T, EmptyContext> {
  using Signature_validate = void (*)(class IO &, T &);

  template <typename U>
  static char test(SameType<Signature_validate, &U::enumInput> *);

  template <typename U> static double test(...);

  static bool const value = (sizeof(test<MappingTraits<T>>(nullptr)) == 1);
};

// Test if SequenceTraits<T> is defined on type T.
template <class T>
struct has_SequenceMethodTraits
{
  using Signature_size = size_t (*)(class IO&, T&);

  template <typename U>
  static char test(SameType<Signature_size, &U::size>*);

  template <typename U>
  static double test(...);

  static bool const value =  (sizeof(test<SequenceTraits<T>>(nullptr)) == 1);
};

// Test if CustomMappingTraits<T> is defined on type T.
template <class T>
struct has_CustomMappingTraits
{
  using Signature_input = void (*)(IO &io, StringRef key, T &v);

  template <typename U>
  static char test(SameType<Signature_input, &U::inputOne>*);

  template <typename U>
  static double test(...);

  static bool const value =
      (sizeof(test<CustomMappingTraits<T>>(nullptr)) == 1);
};

// has_FlowTraits<int> will cause an error with some compilers because
// it subclasses int.  Using this wrapper only instantiates the
// real has_FlowTraits only if the template type is a class.
template <typename T, bool Enabled = std::is_class_v<T>> class has_FlowTraits {
public:
   static const bool value = false;
};

// Some older gcc compilers don't support straight forward tests
// for members, so test for ambiguity cause by the base and derived
// classes both defining the member.
template <class T>
struct has_FlowTraits<T, true>
{
  struct Fallback { bool flow; };
  struct Derived : T, Fallback { };

  template<typename C>
  static char (&f(SameType<bool Fallback::*, &C::flow>*))[1];

  template<typename C>
  static char (&f(...))[2];

  static bool const value = sizeof(f<Derived>(nullptr)) == 2;
};

// Test if SequenceTraits<T> is defined on type T
template<typename T>
struct has_SequenceTraits : public std::integral_constant<bool,
                                      has_SequenceMethodTraits<T>::value > { };

// Test if DocumentListTraits<T> is defined on type T
template <class T>
struct has_DocumentListTraits
{
  using Signature_size = size_t (*)(class IO &, T &);

  template <typename U>
  static char test(SameType<Signature_size, &U::size>*);

  template <typename U>
  static double test(...);

  static bool const value = (sizeof(test<DocumentListTraits<T>>(nullptr))==1);
};

template <class T> struct has_PolymorphicTraits {
  using Signature_getKind = NodeKind (*)(const T &);

  template <typename U>
  static char test(SameType<Signature_getKind, &U::getKind> *);

  template <typename U> static double test(...);

  static bool const value = (sizeof(test<PolymorphicTraits<T>>(nullptr)) == 1);
};

inline bool isNumeric(StringRef S) {
  const auto skipDigits = [](StringRef Input) {
    return Input.ltrim("0123456789");
  };

  // Make S.front() and S.drop_front().front() (if S.front() is [+-]) calls
  // safe.
  if (S.empty() || S.equals("+") || S.equals("-"))
    return false;

  if (S.equals(".nan") || S.equals(".NaN") || S.equals(".NAN"))
    return true;

  // Infinity and decimal numbers can be prefixed with sign.
  StringRef Tail = (S.front() == '-' || S.front() == '+') ? S.drop_front() : S;

  // Check for infinity first, because checking for hex and oct numbers is more
  // expensive.
  if (Tail.equals(".inf") || Tail.equals(".Inf") || Tail.equals(".INF"))
    return true;

  // Section 10.3.2 Tag Resolution
  // YAML 1.2 Specification prohibits Base 8 and Base 16 numbers prefixed with
  // [-+], so S should be used instead of Tail.
  if (S.startswith("0o"))
    return S.size() > 2 &&
           S.drop_front(2).find_first_not_of("01234567") == StringRef::npos;

  if (S.startswith("0x"))
    return S.size() > 2 && S.drop_front(2).find_first_not_of(
                               "0123456789abcdefABCDEF") == StringRef::npos;

  // Parse float: [-+]? (\. [0-9]+ | [0-9]+ (\. [0-9]* )?) ([eE] [-+]? [0-9]+)?
  S = Tail;

  // Handle cases when the number starts with '.' and hence needs at least one
  // digit after dot (as opposed by number which has digits before the dot), but
  // doesn't have one.
  if (S.startswith(".") &&
      (S.equals(".") ||
       (S.size() > 1 && std::strchr("0123456789", S[1]) == nullptr)))
    return false;

  if (S.startswith("E") || S.startswith("e"))
    return false;

  enum ParseState {
    Default,
    FoundDot,
    FoundExponent,
  };
  ParseState State = Default;

  S = skipDigits(S);

  // Accept decimal integer.
  if (S.empty())
    return true;

  if (S.front() == '.') {
    State = FoundDot;
    S = S.drop_front();
  } else if (S.front() == 'e' || S.front() == 'E') {
    State = FoundExponent;
    S = S.drop_front();
  } else {
    return false;
  }

  if (State == FoundDot) {
    S = skipDigits(S);
    if (S.empty())
      return true;

    if (S.front() == 'e' || S.front() == 'E') {
      State = FoundExponent;
      S = S.drop_front();
    } else {
      return false;
    }
  }

  assert(State == FoundExponent && "Should have found exponent at this point.");
  if (S.empty())
    return false;

  if (S.front() == '+' || S.front() == '-') {
    S = S.drop_front();
    if (S.empty())
      return false;
  }

  return skipDigits(S).empty();
}

inline bool isNull(StringRef S) {
  return S.equals("null") || S.equals("Null") || S.equals("NULL") ||
         S.equals("~");
}

inline bool isBool(StringRef S) {
  // FIXME: using parseBool is causing multiple tests to fail.
  return S.equals("true") || S.equals("True") || S.equals("TRUE") ||
         S.equals("false") || S.equals("False") || S.equals("FALSE");
}

// 5.1. Character Set
// The allowed character range explicitly excludes the C0 control block #x0-#x1F
// (except for TAB #x9, LF #xA, and CR #xD which are allowed), DEL #x7F, the C1
// control block #x80-#x9F (except for NEL #x85 which is allowed), the surrogate
// block #xD800-#xDFFF, #xFFFE, and #xFFFF.
inline QuotingType needsQuotes(StringRef S) {
  if (S.empty())
    return QuotingType::Single;

  QuotingType MaxQuotingNeeded = QuotingType::None;
  if (isSpace(static_cast<unsigned char>(S.front())) ||
      isSpace(static_cast<unsigned char>(S.back())))
    MaxQuotingNeeded = QuotingType::Single;
  if (isNull(S))
    MaxQuotingNeeded = QuotingType::Single;
  if (isBool(S))
    MaxQuotingNeeded = QuotingType::Single;
  if (isNumeric(S))
    MaxQuotingNeeded = QuotingType::Single;

  // 7.3.3 Plain Style
  // Plain scalars must not begin with most indicators, as this would cause
  // ambiguity with other YAML constructs.
  if (std::strchr(R"(-?:\,[]{}#&*!|>'"%@`)", S[0]) != nullptr)
    MaxQuotingNeeded = QuotingType::Single;

  for (unsigned char C : S) {
    // Alphanum is safe.
    if (isAlnum(C))
      continue;

    switch (C) {
    // Safe scalar characters.
    case '_':
    case '-':
    case '^':
    case '.':
    case ',':
    case ' ':
    // TAB (0x9) is allowed in unquoted strings.
    case 0x9:
      continue;
    // LF(0xA) and CR(0xD) may delimit values and so require at least single
    // quotes. LLVM YAML parser cannot handle single quoted multiline so use
    // double quoting to produce valid YAML.
    case 0xA:
    case 0xD:
      return QuotingType::Double;
    // DEL (0x7F) are excluded from the allowed character range.
    case 0x7F:
      return QuotingType::Double;
    // Forward slash is allowed to be unquoted, but we quote it anyway.  We have
    // many tests that use FileCheck against YAML output, and this output often
    // contains paths.  If we quote backslashes but not forward slashes then
    // paths will come out either quoted or unquoted depending on which platform
    // the test is run on, making FileCheck comparisons difficult.
    case '/':
    default: {
      // C0 control block (0x0 - 0x1F) is excluded from the allowed character
      // range.
      if (C <= 0x1F)
        return QuotingType::Double;

      // Always double quote UTF-8.
      if ((C & 0x80) != 0)
        return QuotingType::Double;

      // The character is not safe, at least simple quoting needed.
      MaxQuotingNeeded = QuotingType::Single;
    }
    }
  }

  return MaxQuotingNeeded;
}

template <typename T, typename Context>
struct missingTraits
    : public std::integral_constant<bool,
                                    !has_ScalarEnumerationTraits<T>::value &&
                                        !has_ScalarBitSetTraits<T>::value &&
                                        !has_ScalarTraits<T>::value &&
                                        !has_BlockScalarTraits<T>::value &&
                                        !has_TaggedScalarTraits<T>::value &&
                                        !has_MappingTraits<T, Context>::value &&
                                        !has_SequenceTraits<T>::value &&
                                        !has_CustomMappingTraits<T>::value &&
                                        !has_DocumentListTraits<T>::value &&
                                        !has_PolymorphicTraits<T>::value> {};

template <typename T, typename Context>
struct validatedMappingTraits
    : public std::integral_constant<
          bool, has_MappingTraits<T, Context>::value &&
                    has_MappingValidateTraits<T, Context>::value> {};

template <typename T, typename Context>
struct unvalidatedMappingTraits
    : public std::integral_constant<
          bool, has_MappingTraits<T, Context>::value &&
                    !has_MappingValidateTraits<T, Context>::value> {};

// Base class for Input and Output.
class IO {
public:
  IO(void *Ctxt = nullptr);
  virtual ~IO();

  virtual bool outputting() const = 0;

  virtual unsigned beginSequence() = 0;
  virtual bool preflightElement(unsigned, void *&) = 0;
  virtual void postflightElement(void*) = 0;
  virtual void endSequence() = 0;
  virtual bool canElideEmptySequence() = 0;

  virtual unsigned beginFlowSequence() = 0;
  virtual bool preflightFlowElement(unsigned, void *&) = 0;
  virtual void postflightFlowElement(void*) = 0;
  virtual void endFlowSequence() = 0;

  virtual bool mapTag(StringRef Tag, bool Default=false) = 0;
  virtual void beginMapping() = 0;
  virtual void endMapping() = 0;
  virtual bool preflightKey(const char*, bool, bool, bool &, void *&) = 0;
  virtual void postflightKey(void*) = 0;
  virtual std::vector<StringRef> keys() = 0;

  virtual void beginFlowMapping() = 0;
  virtual void endFlowMapping() = 0;

  virtual void beginEnumScalar() = 0;
  virtual bool matchEnumScalar(const char*, bool) = 0;
  virtual bool matchEnumFallback() = 0;
  virtual void endEnumScalar() = 0;

  virtual bool beginBitSetScalar(bool &) = 0;
  virtual bool bitSetMatch(const char*, bool) = 0;
  virtual void endBitSetScalar() = 0;

  virtual void scalarString(StringRef &, QuotingType) = 0;
  virtual void blockScalarString(StringRef &) = 0;
  virtual void scalarTag(std::string &) = 0;

  virtual NodeKind getNodeKind() = 0;

  virtual void setError(const Twine &) = 0;
  virtual void setAllowUnknownKeys(bool Allow);

  template <typename T>
  void enumCase(T &Val, const char* Str, const T ConstVal) {
    if ( matchEnumScalar(Str, outputting() && Val == ConstVal) ) {
      Val = ConstVal;
    }
  }

  // allow anonymous enum values to be used with LLVM_YAML_STRONG_TYPEDEF
  template <typename T>
  void enumCase(T &Val, const char* Str, const uint32_t ConstVal) {
    if ( matchEnumScalar(Str, outputting() && Val == static_cast<T>(ConstVal)) ) {
      Val = ConstVal;
    }
  }

  template <typename FBT, typename T>
  void enumFallback(T &Val) {
    if (matchEnumFallback()) {
      EmptyContext Context;
      // FIXME: Force integral conversion to allow strong typedefs to convert.
      FBT Res = static_cast<typename FBT::BaseType>(Val);
      yamlize(*this, Res, true, Context);
      Val = static_cast<T>(static_cast<typename FBT::BaseType>(Res));
    }
  }

  template <typename T>
  void bitSetCase(T &Val, const char* Str, const T ConstVal) {
    if ( bitSetMatch(Str, outputting() && (Val & ConstVal) == ConstVal) ) {
      Val = static_cast<T>(Val | ConstVal);
    }
  }

  // allow anonymous enum values to be used with LLVM_YAML_STRONG_TYPEDEF
  template <typename T>
  void bitSetCase(T &Val, const char* Str, const uint32_t ConstVal) {
    if ( bitSetMatch(Str, outputting() && (Val & ConstVal) == ConstVal) ) {
      Val = static_cast<T>(Val | ConstVal);
    }
  }

  template <typename T>
  void maskedBitSetCase(T &Val, const char *Str, T ConstVal, T Mask) {
    if (bitSetMatch(Str, outputting() && (Val & Mask) == ConstVal))
      Val = Val | ConstVal;
  }

  template <typename T>
  void maskedBitSetCase(T &Val, const char *Str, uint32_t ConstVal,
                        uint32_t Mask) {
    if (bitSetMatch(Str, outputting() && (Val & Mask) == ConstVal))
      Val = Val | ConstVal;
  }

  void *getContext() const;
  void setContext(void *);

  template <typename T> void mapRequired(const char *Key, T &Val) {
    EmptyContext Ctx;
    this->processKey(Key, Val, true, Ctx);
  }

  template <typename T, typename Context>
  void mapRequired(const char *Key, T &Val, Context &Ctx) {
    this->processKey(Key, Val, true, Ctx);
  }

  template <typename T> void mapOptional(const char *Key, T &Val) {
    EmptyContext Ctx;
    mapOptionalWithContext(Key, Val, Ctx);
  }

  template <typename T, typename DefaultT>
  void mapOptional(const char *Key, T &Val, const DefaultT &Default) {
    EmptyContext Ctx;
    mapOptionalWithContext(Key, Val, Default, Ctx);
  }

  template <typename T, typename Context>
  std::enable_if_t<has_SequenceTraits<T>::value, void>
  mapOptionalWithContext(const char *Key, T &Val, Context &Ctx) {
    // omit key/value instead of outputting empty sequence
    if (this->canElideEmptySequence() && !(Val.begin() != Val.end()))
      return;
    this->processKey(Key, Val, false, Ctx);
  }

  template <typename T, typename Context>
  void mapOptionalWithContext(const char *Key, std::optional<T> &Val,
                              Context &Ctx) {
    this->processKeyWithDefault(Key, Val, std::optional<T>(),
                                /*Required=*/false, Ctx);
  }

  template <typename T, typename Context>
  std::enable_if_t<!has_SequenceTraits<T>::value, void>
  mapOptionalWithContext(const char *Key, T &Val, Context &Ctx) {
    this->processKey(Key, Val, false, Ctx);
  }

  template <typename T, typename Context, typename DefaultT>
  void mapOptionalWithContext(const char *Key, T &Val, const DefaultT &Default,
                              Context &Ctx) {
    static_assert(std::is_convertible<DefaultT, T>::value,
                  "Default type must be implicitly convertible to value type!");
    this->processKeyWithDefault(Key, Val, static_cast<const T &>(Default),
                                false, Ctx);
  }

private:
  template <typename T, typename Context>
  void processKeyWithDefault(const char *Key, std::optional<T> &Val,
                             const std::optional<T> &DefaultValue,
                             bool Required, Context &Ctx);

  template <typename T, typename Context>
  void processKeyWithDefault(const char *Key, T &Val, const T &DefaultValue,
                             bool Required, Context &Ctx) {
    void *SaveInfo;
    bool UseDefault;
    const bool sameAsDefault = outputting() && Val == DefaultValue;
    if ( this->preflightKey(Key, Required, sameAsDefault, UseDefault,
                                                                  SaveInfo) ) {
      yamlize(*this, Val, Required, Ctx);
      this->postflightKey(SaveInfo);
    }
    else {
      if ( UseDefault )
        Val = DefaultValue;
    }
  }

  template <typename T, typename Context>
  void processKey(const char *Key, T &Val, bool Required, Context &Ctx) {
    void *SaveInfo;
    bool UseDefault;
    if ( this->preflightKey(Key, Required, false, UseDefault, SaveInfo) ) {
      yamlize(*this, Val, Required, Ctx);
      this->postflightKey(SaveInfo);
    }
  }

private:
  void *Ctxt;
};

namespace detail {

template <typename T, typename Context>
void doMapping(IO &io, T &Val, Context &Ctx) {
  MappingContextTraits<T, Context>::mapping(io, Val, Ctx);
}

template <typename T> void doMapping(IO &io, T &Val, EmptyContext &Ctx) {
  MappingTraits<T>::mapping(io, Val);
}

} // end namespace detail

template <typename T>
std::enable_if_t<has_ScalarEnumerationTraits<T>::value, void>
yamlize(IO &io, T &Val, bool, EmptyContext &Ctx) {
  io.beginEnumScalar();
  ScalarEnumerationTraits<T>::enumeration(io, Val);
  io.endEnumScalar();
}

template <typename T>
std::enable_if_t<has_ScalarBitSetTraits<T>::value, void>
yamlize(IO &io, T &Val, bool, EmptyContext &Ctx) {
  bool DoClear;
  if ( io.beginBitSetScalar(DoClear) ) {
    if ( DoClear )
      Val = T();
    ScalarBitSetTraits<T>::bitset(io, Val);
    io.endBitSetScalar();
  }
}

template <typename T>
std::enable_if_t<has_ScalarTraits<T>::value, void> yamlize(IO &io, T &Val, bool,
                                                           EmptyContext &Ctx) {
  if ( io.outputting() ) {
    SmallString<128> Storage;
    raw_svector_ostream Buffer(Storage);
    ScalarTraits<T>::output(Val, io.getContext(), Buffer);
    StringRef Str = Buffer.str();
    io.scalarString(Str, ScalarTraits<T>::mustQuote(Str));
  }
  else {
    StringRef Str;
    io.scalarString(Str, ScalarTraits<T>::mustQuote(Str));
    StringRef Result = ScalarTraits<T>::input(Str, io.getContext(), Val);
    if ( !Result.empty() ) {
      io.setError(Twine(Result));
    }
  }
}

template <typename T>
std::enable_if_t<has_BlockScalarTraits<T>::value, void>
yamlize(IO &YamlIO, T &Val, bool, EmptyContext &Ctx) {
  if (YamlIO.outputting()) {
    std::string Storage;
    raw_string_ostream Buffer(Storage);
    BlockScalarTraits<T>::output(Val, YamlIO.getContext(), Buffer);
    StringRef Str = Buffer.str();
    YamlIO.blockScalarString(Str);
  } else {
    StringRef Str;
    YamlIO.blockScalarString(Str);
    StringRef Result =
        BlockScalarTraits<T>::input(Str, YamlIO.getContext(), Val);
    if (!Result.empty())
      YamlIO.setError(Twine(Result));
  }
}

template <typename T>
std::enable_if_t<has_TaggedScalarTraits<T>::value, void>
yamlize(IO &io, T &Val, bool, EmptyContext &Ctx) {
  if (io.outputting()) {
    std::string ScalarStorage, TagStorage;
    raw_string_ostream ScalarBuffer(ScalarStorage), TagBuffer(TagStorage);
    TaggedScalarTraits<T>::output(Val, io.getContext(), ScalarBuffer,
                                  TagBuffer);
    io.scalarTag(TagBuffer.str());
    StringRef ScalarStr = ScalarBuffer.str();
    io.scalarString(ScalarStr,
                    TaggedScalarTraits<T>::mustQuote(Val, ScalarStr));
  } else {
    std::string Tag;
    io.scalarTag(Tag);
    StringRef Str;
    io.scalarString(Str, QuotingType::None);
    StringRef Result =
        TaggedScalarTraits<T>::input(Str, Tag, io.getContext(), Val);
    if (!Result.empty()) {
      io.setError(Twine(Result));
    }
  }
}

template <typename T, typename Context>
std::enable_if_t<validatedMappingTraits<T, Context>::value, void>
yamlize(IO &io, T &Val, bool, Context &Ctx) {
  if (has_FlowTraits<MappingTraits<T>>::value)
    io.beginFlowMapping();
  else
    io.beginMapping();
  if (io.outputting()) {
    std::string Err = MappingTraits<T>::validate(io, Val);
    if (!Err.empty()) {
      errs() << Err << "\n";
      assert(Err.empty() && "invalid struct trying to be written as yaml");
    }
  }
  detail::doMapping(io, Val, Ctx);
  if (!io.outputting()) {
    std::string Err = MappingTraits<T>::validate(io, Val);
    if (!Err.empty())
      io.setError(Err);
  }
  if (has_FlowTraits<MappingTraits<T>>::value)
    io.endFlowMapping();
  else
    io.endMapping();
}

template <typename T, typename Context>
std::enable_if_t<!has_MappingEnumInputTraits<T, Context>::value, bool>
yamlizeMappingEnumInput(IO &io, T &Val) {
  return false;
}

template <typename T, typename Context>
std::enable_if_t<has_MappingEnumInputTraits<T, Context>::value, bool>
yamlizeMappingEnumInput(IO &io, T &Val) {
  if (io.outputting())
    return false;

  io.beginEnumScalar();
  MappingTraits<T>::enumInput(io, Val);
  bool Matched = !io.matchEnumFallback();
  io.endEnumScalar();
  return Matched;
}

template <typename T, typename Context>
std::enable_if_t<unvalidatedMappingTraits<T, Context>::value, void>
yamlize(IO &io, T &Val, bool, Context &Ctx) {
  if (yamlizeMappingEnumInput<T, Context>(io, Val))
    return;
  if (has_FlowTraits<MappingTraits<T>>::value) {
    io.beginFlowMapping();
    detail::doMapping(io, Val, Ctx);
    io.endFlowMapping();
  } else {
    io.beginMapping();
    detail::doMapping(io, Val, Ctx);
    io.endMapping();
  }
}

template <typename T>
std::enable_if_t<has_CustomMappingTraits<T>::value, void>
yamlize(IO &io, T &Val, bool, EmptyContext &Ctx) {
  if ( io.outputting() ) {
    io.beginMapping();
    CustomMappingTraits<T>::output(io, Val);
    io.endMapping();
  } else {
    io.beginMapping();
    for (StringRef key : io.keys())
      CustomMappingTraits<T>::inputOne(io, key, Val);
    io.endMapping();
  }
}

template <typename T>
std::enable_if_t<has_PolymorphicTraits<T>::value, void>
yamlize(IO &io, T &Val, bool, EmptyContext &Ctx) {
  switch (io.outputting() ? PolymorphicTraits<T>::getKind(Val)
                          : io.getNodeKind()) {
  case NodeKind::Scalar:
    return yamlize(io, PolymorphicTraits<T>::getAsScalar(Val), true, Ctx);
  case NodeKind::Map:
    return yamlize(io, PolymorphicTraits<T>::getAsMap(Val), true, Ctx);
  case NodeKind::Sequence:
    return yamlize(io, PolymorphicTraits<T>::getAsSequence(Val), true, Ctx);
  }
}

template <typename T>
std::enable_if_t<missingTraits<T, EmptyContext>::value, void>
yamlize(IO &io, T &Val, bool, EmptyContext &Ctx) {
  char missing_yaml_trait_for_type[sizeof(MissingTrait<T>)];
}

template <typename T, typename Context>
std::enable_if_t<has_SequenceTraits<T>::value, void>
yamlize(IO &io, T &Seq, bool, Context &Ctx) {
  if ( has_FlowTraits< SequenceTraits<T>>::value ) {
    unsigned incnt = io.beginFlowSequence();
    unsigned count = io.outputting() ? SequenceTraits<T>::size(io, Seq) : incnt;
    for(unsigned i=0; i < count; ++i) {
      void *SaveInfo;
      if ( io.preflightFlowElement(i, SaveInfo) ) {
        yamlize(io, SequenceTraits<T>::element(io, Seq, i), true, Ctx);
        io.postflightFlowElement(SaveInfo);
      }
    }
    io.endFlowSequence();
  }
  else {
    unsigned incnt = io.beginSequence();
    unsigned count = io.outputting() ? SequenceTraits<T>::size(io, Seq) : incnt;
    for(unsigned i=0; i < count; ++i) {
      void *SaveInfo;
      if ( io.preflightElement(i, SaveInfo) ) {
        yamlize(io, SequenceTraits<T>::element(io, Seq, i), true, Ctx);
        io.postflightElement(SaveInfo);
      }
    }
    io.endSequence();
  }
}

template<>
struct ScalarTraits<bool> {
  static void output(const bool &, void* , raw_ostream &);
  static StringRef input(StringRef, void *, bool &);
  static QuotingType mustQuote(StringRef) { return QuotingType::None; }
};

template<>
struct ScalarTraits<StringRef> {
  static void output(const StringRef &, void *, raw_ostream &);
  static StringRef input(StringRef, void *, StringRef &);
  static QuotingType mustQuote(StringRef S) { return needsQuotes(S); }
};

template<>
struct ScalarTraits<std::string> {
  static void output(const std::string &, void *, raw_ostream &);
  static StringRef input(StringRef, void *, std::string &);
  static QuotingType mustQuote(StringRef S) { return needsQuotes(S); }
};

template<>
struct ScalarTraits<uint8_t> {
  static void output(const uint8_t &, void *, raw_ostream &);
  static StringRef input(StringRef, void *, uint8_t &);
  static QuotingType mustQuote(StringRef) { return QuotingType::None; }
};

template<>
struct ScalarTraits<uint16_t> {
  static void output(const uint16_t &, void *, raw_ostream &);
  static StringRef input(StringRef, void *, uint16_t &);
  static QuotingType mustQuote(StringRef) { return QuotingType::None; }
};

template<>
struct ScalarTraits<uint32_t> {
  static void output(const uint32_t &, void *, raw_ostream &);
  static StringRef input(StringRef, void *, uint32_t &);
  static QuotingType mustQuote(StringRef) { return QuotingType::None; }
};

template<>
struct ScalarTraits<uint64_t> {
  static void output(const uint64_t &, void *, raw_ostream &);
  static StringRef input(StringRef, void *, uint64_t &);
  static QuotingType mustQuote(StringRef) { return QuotingType::None; }
};

template<>
struct ScalarTraits<int8_t> {
  static void output(const int8_t &, void *, raw_ostream &);
  static StringRef input(StringRef, void *, int8_t &);
  static QuotingType mustQuote(StringRef) { return QuotingType::None; }
};

template<>
struct ScalarTraits<int16_t> {
  static void output(const int16_t &, void *, raw_ostream &);
  static StringRef input(StringRef, void *, int16_t &);
  static QuotingType mustQuote(StringRef) { return QuotingType::None; }
};

template<>
struct ScalarTraits<int32_t> {
  static void output(const int32_t &, void *, raw_ostream &);
  static StringRef input(StringRef, void *, int32_t &);
  static QuotingType mustQuote(StringRef) { return QuotingType::None; }
};

template<>
struct ScalarTraits<int64_t> {
  static void output(const int64_t &, void *, raw_ostream &);
  static StringRef input(StringRef, void *, int64_t &);
  static QuotingType mustQuote(StringRef) { return QuotingType::None; }
};

template<>
struct ScalarTraits<float> {
  static void output(const float &, void *, raw_ostream &);
  static StringRef input(StringRef, void *, float &);
  static QuotingType mustQuote(StringRef) { return QuotingType::None; }
};

template<>
struct ScalarTraits<double> {
  static void output(const double &, void *, raw_ostream &);
  static StringRef input(StringRef, void *, double &);
  static QuotingType mustQuote(StringRef) { return QuotingType::None; }
};

// For endian types, we use existing scalar Traits class for the underlying
// type.  This way endian aware types are supported whenever the traits are
// defined for the underlying type.
template <typename value_type, support::endianness endian, size_t alignment>
struct ScalarTraits<support::detail::packed_endian_specific_integral<
                        value_type, endian, alignment>,
                    std::enable_if_t<has_ScalarTraits<value_type>::value>> {
  using endian_type =
      support::detail::packed_endian_specific_integral<value_type, endian,
                                                       alignment>;

  static void output(const endian_type &E, void *Ctx, raw_ostream &Stream) {
    ScalarTraits<value_type>::output(static_cast<value_type>(E), Ctx, Stream);
  }

  static StringRef input(StringRef Str, void *Ctx, endian_type &E) {
    value_type V;
    auto R = ScalarTraits<value_type>::input(Str, Ctx, V);
    E = static_cast<endian_type>(V);
    return R;
  }

  static QuotingType mustQuote(StringRef Str) {
    return ScalarTraits<value_type>::mustQuote(Str);
  }
};

template <typename value_type, support::endianness endian, size_t alignment>
struct ScalarEnumerationTraits<
    support::detail::packed_endian_specific_integral<value_type, endian,
                                                     alignment>,
    std::enable_if_t<has_ScalarEnumerationTraits<value_type>::value>> {
  using endian_type =
      support::detail::packed_endian_specific_integral<value_type, endian,
                                                       alignment>;

  static void enumeration(IO &io, endian_type &E) {
    value_type V = E;
    ScalarEnumerationTraits<value_type>::enumeration(io, V);
    E = V;
  }
};

template <typename value_type, support::endianness endian, size_t alignment>
struct ScalarBitSetTraits<
    support::detail::packed_endian_specific_integral<value_type, endian,
                                                     alignment>,
    std::enable_if_t<has_ScalarBitSetTraits<value_type>::value>> {
  using endian_type =
      support::detail::packed_endian_specific_integral<value_type, endian,
                                                       alignment>;
  static void bitset(IO &io, endian_type &E) {
    value_type V = E;
    ScalarBitSetTraits<value_type>::bitset(io, V);
    E = V;
  }
};

// Utility for use within MappingTraits<>::mapping() method
// to [de]normalize an object for use with YAML conversion.
template <typename TNorm, typename TFinal>
struct MappingNormalization {
  MappingNormalization(IO &i_o, TFinal &Obj)
      : io(i_o), BufPtr(nullptr), Result(Obj) {
    if ( io.outputting() ) {
      BufPtr = new (&Buffer) TNorm(io, Obj);
    }
    else {
      BufPtr = new (&Buffer) TNorm(io);
    }
  }

  ~MappingNormalization() {
    if ( ! io.outputting() ) {
      Result = BufPtr->denormalize(io);
    }
    BufPtr->~TNorm();
  }

  TNorm* operator->() { return BufPtr; }

private:
  using Storage = AlignedCharArrayUnion<TNorm>;

  Storage       Buffer;
  IO           &io;
  TNorm        *BufPtr;
  TFinal       &Result;
};

// Utility for use within MappingTraits<>::mapping() method
// to [de]normalize an object for use with YAML conversion.
template <typename TNorm, typename TFinal>
struct MappingNormalizationHeap {
  MappingNormalizationHeap(IO &i_o, TFinal &Obj, BumpPtrAllocator *allocator)
    : io(i_o), Result(Obj) {
    if ( io.outputting() ) {
      BufPtr = new (&Buffer) TNorm(io, Obj);
    }
    else if (allocator) {
      BufPtr = allocator->Allocate<TNorm>();
      new (BufPtr) TNorm(io);
    } else {
      BufPtr = new TNorm(io);
    }
  }

  ~MappingNormalizationHeap() {
    if ( io.outputting() ) {
      BufPtr->~TNorm();
    }
    else {
      Result = BufPtr->denormalize(io);
    }
  }

  TNorm* operator->() { return BufPtr; }

private:
  using Storage = AlignedCharArrayUnion<TNorm>;

  Storage       Buffer;
  IO           &io;
  TNorm        *BufPtr = nullptr;
  TFinal       &Result;
};

///
/// The Input class is used to parse a yaml document into in-memory structs
/// and vectors.
///
/// It works by using YAMLParser to do a syntax parse of the entire yaml
/// document, then the Input class builds a graph of HNodes which wraps
/// each yaml Node.  The extra layer is buffering.  The low level yaml
/// parser only lets you look at each node once.  The buffering layer lets
/// you search and interate multiple times.  This is necessary because
/// the mapRequired() method calls may not be in the same order
/// as the keys in the document.
///
class Input : public IO {
public:
  // Construct a yaml Input object from a StringRef and optional
  // user-data. The DiagHandler can be specified to provide
  // alternative error reporting.
  Input(StringRef InputContent,
        void *Ctxt = nullptr,
        SourceMgr::DiagHandlerTy DiagHandler = nullptr,
        void *DiagHandlerCtxt = nullptr);
  Input(MemoryBufferRef Input,
        void *Ctxt = nullptr,
        SourceMgr::DiagHandlerTy DiagHandler = nullptr,
        void *DiagHandlerCtxt = nullptr);
  ~Input() override;

  // Check if there was an syntax or semantic error during parsing.
  std::error_code error();

private:
  bool outputting() const override;
  bool mapTag(StringRef, bool) override;
  void beginMapping() override;
  void endMapping() override;
  bool preflightKey(const char *, bool, bool, bool &, void *&) override;
  void postflightKey(void *) override;
  std::vector<StringRef> keys() override;
  void beginFlowMapping() override;
  void endFlowMapping() override;
  unsigned beginSequence() override;
  void endSequence() override;
  bool preflightElement(unsigned index, void *&) override;
  void postflightElement(void *) override;
  unsigned beginFlowSequence() override;
  bool preflightFlowElement(unsigned , void *&) override;
  void postflightFlowElement(void *) override;
  void endFlowSequence() override;
  void beginEnumScalar() override;
  bool matchEnumScalar(const char*, bool) override;
  bool matchEnumFallback() override;
  void endEnumScalar() override;
  bool beginBitSetScalar(bool &) override;
  bool bitSetMatch(const char *, bool ) override;
  void endBitSetScalar() override;
  void scalarString(StringRef &, QuotingType) override;
  void blockScalarString(StringRef &) override;
  void scalarTag(std::string &) override;
  NodeKind getNodeKind() override;
  void setError(const Twine &message) override;
  bool canElideEmptySequence() override;

  class HNode {
    virtual void anchor();

  public:
    HNode(Node *n) : _node(n) { }
    virtual ~HNode() = default;

    static bool classof(const HNode *) { return true; }

    Node *_node;
  };

  class EmptyHNode : public HNode {
    void anchor() override;

  public:
    EmptyHNode(Node *n) : HNode(n) { }

    static bool classof(const HNode *n) { return NullNode::classof(n->_node); }

    static bool classof(const EmptyHNode *) { return true; }
  };

  class ScalarHNode : public HNode {
    void anchor() override;

  public:
    ScalarHNode(Node *n, StringRef s) : HNode(n), _value(s) { }

    StringRef value() const { return _value; }

    static bool classof(const HNode *n) {
      return ScalarNode::classof(n->_node) ||
             BlockScalarNode::classof(n->_node);
    }

    static bool classof(const ScalarHNode *) { return true; }

  protected:
    StringRef _value;
  };

  class MapHNode : public HNode {
    void anchor() override;

  public:
    MapHNode(Node *n) : HNode(n) { }

    static bool classof(const HNode *n) {
      return MappingNode::classof(n->_node);
    }

    static bool classof(const MapHNode *) { return true; }

    using NameToNodeAndLoc =
        StringMap<std::pair<std::unique_ptr<HNode>, SMRange>>;

    NameToNodeAndLoc Mapping;
    SmallVector<std::string, 6> ValidKeys;
  };

  class SequenceHNode : public HNode {
    void anchor() override;

  public:
    SequenceHNode(Node *n) : HNode(n) { }

    static bool classof(const HNode *n) {
      return SequenceNode::classof(n->_node);
    }

    static bool classof(const SequenceHNode *) { return true; }

    std::vector<std::unique_ptr<HNode>> Entries;
  };

  std::unique_ptr<Input::HNode> createHNodes(Node *node);
  void setError(HNode *hnode, const Twine &message);
  void setError(Node *node, const Twine &message);
  void setError(const SMRange &Range, const Twine &message);

  void reportWarning(HNode *hnode, const Twine &message);
  void reportWarning(Node *hnode, const Twine &message);
  void reportWarning(const SMRange &Range, const Twine &message);

public:
  // These are only used by operator>>. They could be private
  // if those templated things could be made friends.
  bool setCurrentDocument();
  bool nextDocument();

  /// Returns the current node that's being parsed by the YAML Parser.
  const Node *getCurrentNode() const;

  void setAllowUnknownKeys(bool Allow) override;

private:
  SourceMgr                           SrcMgr; // must be before Strm
  std::unique_ptr<llvm::yaml::Stream> Strm;
  std::unique_ptr<HNode>              TopNode;
  std::error_code                     EC;
  BumpPtrAllocator                    StringAllocator;
  document_iterator                   DocIterator;
  llvm::BitVector                     BitValuesUsed;
  HNode *CurrentNode = nullptr;
  bool                                ScalarMatchFound = false;
  bool AllowUnknownKeys = false;
};

///
/// The Output class is used to generate a yaml document from in-memory structs
/// and vectors.
///
class Output : public IO {
public:
  Output(raw_ostream &, void *Ctxt = nullptr, int WrapColumn = 70);
  ~Output() override;

  /// Set whether or not to output optional values which are equal
  /// to the default value.  By default, when outputting if you attempt
  /// to write a value that is equal to the default, the value gets ignored.
  /// Sometimes, it is useful to be able to see these in the resulting YAML
  /// anyway.
  void setWriteDefaultValues(bool Write) { WriteDefaultValues = Write; }

  bool outputting() const override;
  bool mapTag(StringRef, bool) override;
  void beginMapping() override;
  void endMapping() override;
  bool preflightKey(const char *key, bool, bool, bool &, void *&) override;
  void postflightKey(void *) override;
  std::vector<StringRef> keys() override;
  void beginFlowMapping() override;
  void endFlowMapping() override;
  unsigned beginSequence() override;
  void endSequence() override;
  bool preflightElement(unsigned, void *&) override;
  void postflightElement(void *) override;
  unsigned beginFlowSequence() override;
  bool preflightFlowElement(unsigned, void *&) override;
  void postflightFlowElement(void *) override;
  void endFlowSequence() override;
  void beginEnumScalar() override;
  bool matchEnumScalar(const char*, bool) override;
  bool matchEnumFallback() override;
  void endEnumScalar() override;
  bool beginBitSetScalar(bool &) override;
  bool bitSetMatch(const char *, bool ) override;
  void endBitSetScalar() override;
  void scalarString(StringRef &, QuotingType) override;
  void blockScalarString(StringRef &) override;
  void scalarTag(std::string &) override;
  NodeKind getNodeKind() override;
  void setError(const Twine &message) override;
  bool canElideEmptySequence() override;

  // These are only used by operator<<. They could be private
  // if that templated operator could be made a friend.
  void beginDocuments();
  bool preflightDocument(unsigned);
  void postflightDocument();
  void endDocuments();

private:
  void output(StringRef s);
  void outputUpToEndOfLine(StringRef s);
  void newLineCheck(bool EmptySequence = false);
  void outputNewLine();
  void paddedKey(StringRef key);
  void flowKey(StringRef Key);

  enum InState {
    inSeqFirstElement,
    inSeqOtherElement,
    inFlowSeqFirstElement,
    inFlowSeqOtherElement,
    inMapFirstKey,
    inMapOtherKey,
    inFlowMapFirstKey,
    inFlowMapOtherKey
  };

  static bool inSeqAnyElement(InState State);
  static bool inFlowSeqAnyElement(InState State);
  static bool inMapAnyKey(InState State);
  static bool inFlowMapAnyKey(InState State);

  raw_ostream &Out;
  int WrapColumn;
  SmallVector<InState, 8> StateStack;
  int Column = 0;
  int ColumnAtFlowStart = 0;
  int ColumnAtMapFlowStart = 0;
  bool NeedBitValueComma = false;
  bool NeedFlowSequenceComma = false;
  bool EnumerationMatchFound = false;
  bool WriteDefaultValues = false;
  StringRef Padding;
  StringRef PaddingBeforeContainer;
};

template <typename T, typename Context>
void IO::processKeyWithDefault(const char *Key, std::optional<T> &Val,
                               const std::optional<T> &DefaultValue,
                               bool Required, Context &Ctx) {
  assert(!DefaultValue && "std::optional<T> shouldn't have a value!");
  void *SaveInfo;
  bool UseDefault = true;
  const bool sameAsDefault = outputting() && !Val;
  if (!outputting() && !Val)
    Val = T();
  if (Val &&
      this->preflightKey(Key, Required, sameAsDefault, UseDefault, SaveInfo)) {

    // When reading an std::optional<X> key from a YAML description, we allow
    // the special "<none>" value, which can be used to specify that no value
    // was requested, i.e. the DefaultValue will be assigned. The DefaultValue
    // is usually None.
    bool IsNone = false;
    if (!outputting())
      if (const auto *Node =
              dyn_cast<ScalarNode>(((Input *)this)->getCurrentNode()))
        // We use rtrim to ignore possible white spaces that might exist when a
        // comment is present on the same line.
        IsNone = Node->getRawValue().rtrim(' ') == "<none>";

    if (IsNone)
      Val = DefaultValue;
    else
      yamlize(*this, *Val, Required, Ctx);
    this->postflightKey(SaveInfo);
  } else {
    if (UseDefault)
      Val = DefaultValue;
  }
}

/// YAML I/O does conversion based on types. But often native data types
/// are just a typedef of built in intergral types (e.g. int).  But the C++
/// type matching system sees through the typedef and all the typedefed types
/// look like a built in type. This will cause the generic YAML I/O conversion
/// to be used. To provide better control over the YAML conversion, you can
/// use this macro instead of typedef.  It will create a class with one field
/// and automatic conversion operators to and from the base type.
/// Based on BOOST_STRONG_TYPEDEF
#define LLVM_YAML_STRONG_TYPEDEF(_base, _type)                                 \
    struct _type {                                                             \
        _type() = default;                                                     \
        _type(const _base v) : value(v) {}                                     \
        _type(const _type &v) = default;                                       \
        _type &operator=(const _type &rhs) = default;                          \
        _type &operator=(const _base &rhs) { value = rhs; return *this; }      \
        operator const _base & () const { return value; }                      \
        bool operator==(const _type &rhs) const { return value == rhs.value; } \
        bool operator==(const _base &rhs) const { return value == rhs; }       \
        bool operator<(const _type &rhs) const { return value < rhs.value; }   \
        _base value;                                                           \
        using BaseType = _base;                                                \
    };

///
/// Use these types instead of uintXX_t in any mapping to have
/// its yaml output formatted as hexadecimal.
///
LLVM_YAML_STRONG_TYPEDEF(uint8_t, Hex8)
LLVM_YAML_STRONG_TYPEDEF(uint16_t, Hex16)
LLVM_YAML_STRONG_TYPEDEF(uint32_t, Hex32)
LLVM_YAML_STRONG_TYPEDEF(uint64_t, Hex64)

template<>
struct ScalarTraits<Hex8> {
  static void output(const Hex8 &, void *, raw_ostream &);
  static StringRef input(StringRef, void *, Hex8 &);
  static QuotingType mustQuote(StringRef) { return QuotingType::None; }
};

template<>
struct ScalarTraits<Hex16> {
  static void output(const Hex16 &, void *, raw_ostream &);
  static StringRef input(StringRef, void *, Hex16 &);
  static QuotingType mustQuote(StringRef) { return QuotingType::None; }
};

template<>
struct ScalarTraits<Hex32> {
  static void output(const Hex32 &, void *, raw_ostream &);
  static StringRef input(StringRef, void *, Hex32 &);
  static QuotingType mustQuote(StringRef) { return QuotingType::None; }
};

template<>
struct ScalarTraits<Hex64> {
  static void output(const Hex64 &, void *, raw_ostream &);
  static StringRef input(StringRef, void *, Hex64 &);
  static QuotingType mustQuote(StringRef) { return QuotingType::None; }
};

template <> struct ScalarTraits<VersionTuple> {
  static void output(const VersionTuple &Value, void *, llvm::raw_ostream &Out);
  static StringRef input(StringRef, void *, VersionTuple &);
  static QuotingType mustQuote(StringRef) { return QuotingType::None; }
};

// Define non-member operator>> so that Input can stream in a document list.
template <typename T>
inline std::enable_if_t<has_DocumentListTraits<T>::value, Input &>
operator>>(Input &yin, T &docList) {
  int i = 0;
  EmptyContext Ctx;
  while ( yin.setCurrentDocument() ) {
    yamlize(yin, DocumentListTraits<T>::element(yin, docList, i), true, Ctx);
    if ( yin.error() )
      return yin;
    yin.nextDocument();
    ++i;
  }
  return yin;
}

// Define non-member operator>> so that Input can stream in a map as a document.
template <typename T>
inline std::enable_if_t<has_MappingTraits<T, EmptyContext>::value, Input &>
operator>>(Input &yin, T &docMap) {
  EmptyContext Ctx;
  yin.setCurrentDocument();
  yamlize(yin, docMap, true, Ctx);
  return yin;
}

// Define non-member operator>> so that Input can stream in a sequence as
// a document.
template <typename T>
inline std::enable_if_t<has_SequenceTraits<T>::value, Input &>
operator>>(Input &yin, T &docSeq) {
  EmptyContext Ctx;
  if (yin.setCurrentDocument())
    yamlize(yin, docSeq, true, Ctx);
  return yin;
}

// Define non-member operator>> so that Input can stream in a block scalar.
template <typename T>
inline std::enable_if_t<has_BlockScalarTraits<T>::value, Input &>
operator>>(Input &In, T &Val) {
  EmptyContext Ctx;
  if (In.setCurrentDocument())
    yamlize(In, Val, true, Ctx);
  return In;
}

// Define non-member operator>> so that Input can stream in a string map.
template <typename T>
inline std::enable_if_t<has_CustomMappingTraits<T>::value, Input &>
operator>>(Input &In, T &Val) {
  EmptyContext Ctx;
  if (In.setCurrentDocument())
    yamlize(In, Val, true, Ctx);
  return In;
}

// Define non-member operator>> so that Input can stream in a polymorphic type.
template <typename T>
inline std::enable_if_t<has_PolymorphicTraits<T>::value, Input &>
operator>>(Input &In, T &Val) {
  EmptyContext Ctx;
  if (In.setCurrentDocument())
    yamlize(In, Val, true, Ctx);
  return In;
}

// Provide better error message about types missing a trait specialization
template <typename T>
inline std::enable_if_t<missingTraits<T, EmptyContext>::value, Input &>
operator>>(Input &yin, T &docSeq) {
  char missing_yaml_trait_for_type[sizeof(MissingTrait<T>)];
  return yin;
}

// Define non-member operator<< so that Output can stream out document list.
template <typename T>
inline std::enable_if_t<has_DocumentListTraits<T>::value, Output &>
operator<<(Output &yout, T &docList) {
  EmptyContext Ctx;
  yout.beginDocuments();
  const size_t count = DocumentListTraits<T>::size(yout, docList);
  for(size_t i=0; i < count; ++i) {
    if ( yout.preflightDocument(i) ) {
      yamlize(yout, DocumentListTraits<T>::element(yout, docList, i), true,
              Ctx);
      yout.postflightDocument();
    }
  }
  yout.endDocuments();
  return yout;
}

// Define non-member operator<< so that Output can stream out a map.
template <typename T>
inline std::enable_if_t<has_MappingTraits<T, EmptyContext>::value, Output &>
operator<<(Output &yout, T &map) {
  EmptyContext Ctx;
  yout.beginDocuments();
  if ( yout.preflightDocument(0) ) {
    yamlize(yout, map, true, Ctx);
    yout.postflightDocument();
  }
  yout.endDocuments();
  return yout;
}

// Define non-member operator<< so that Output can stream out a sequence.
template <typename T>
inline std::enable_if_t<has_SequenceTraits<T>::value, Output &>
operator<<(Output &yout, T &seq) {
  EmptyContext Ctx;
  yout.beginDocuments();
  if ( yout.preflightDocument(0) ) {
    yamlize(yout, seq, true, Ctx);
    yout.postflightDocument();
  }
  yout.endDocuments();
  return yout;
}

// Define non-member operator<< so that Output can stream out a block scalar.
template <typename T>
inline std::enable_if_t<has_BlockScalarTraits<T>::value, Output &>
operator<<(Output &Out, T &Val) {
  EmptyContext Ctx;
  Out.beginDocuments();
  if (Out.preflightDocument(0)) {
    yamlize(Out, Val, true, Ctx);
    Out.postflightDocument();
  }
  Out.endDocuments();
  return Out;
}

// Define non-member operator<< so that Output can stream out a string map.
template <typename T>
inline std::enable_if_t<has_CustomMappingTraits<T>::value, Output &>
operator<<(Output &Out, T &Val) {
  EmptyContext Ctx;
  Out.beginDocuments();
  if (Out.preflightDocument(0)) {
    yamlize(Out, Val, true, Ctx);
    Out.postflightDocument();
  }
  Out.endDocuments();
  return Out;
}

// Define non-member operator<< so that Output can stream out a polymorphic
// type.
template <typename T>
inline std::enable_if_t<has_PolymorphicTraits<T>::value, Output &>
operator<<(Output &Out, T &Val) {
  EmptyContext Ctx;
  Out.beginDocuments();
  if (Out.preflightDocument(0)) {
    // FIXME: The parser does not support explicit documents terminated with a
    // plain scalar; the end-marker is included as part of the scalar token.
    assert(PolymorphicTraits<T>::getKind(Val) != NodeKind::Scalar && "plain scalar documents are not supported");
    yamlize(Out, Val, true, Ctx);
    Out.postflightDocument();
  }
  Out.endDocuments();
  return Out;
}

// Provide better error message about types missing a trait specialization
template <typename T>
inline std::enable_if_t<missingTraits<T, EmptyContext>::value, Output &>
operator<<(Output &yout, T &seq) {
  char missing_yaml_trait_for_type[sizeof(MissingTrait<T>)];
  return yout;
}

template <bool B> struct IsFlowSequenceBase {};
template <> struct IsFlowSequenceBase<true> { static const bool flow = true; };

template <typename T, typename U = void>
struct IsResizable : std::false_type {};

template <typename T>
struct IsResizable<T, std::void_t<decltype(std::declval<T>().resize(0))>>
    : public std::true_type {};

template <typename T, bool B> struct IsResizableBase {
  using type = typename T::value_type;

  static type &element(IO &io, T &seq, size_t index) {
    if (index >= seq.size())
      seq.resize(index + 1);
    return seq[index];
  }
};

template <typename T> struct IsResizableBase<T, false> {
  using type = typename T::value_type;

  static type &element(IO &io, T &seq, size_t index) {
    if (index >= seq.size()) {
      io.setError(Twine("value sequence extends beyond static size (") +
                  Twine(seq.size()) + ")");
      return seq[0];
    }
    return seq[index];
  }
};

template <typename T, bool Flow>
struct SequenceTraitsImpl
    : IsFlowSequenceBase<Flow>, IsResizableBase<T, IsResizable<T>::value> {
  static size_t size(IO &io, T &seq) { return seq.size(); }
};

// Simple helper to check an expression can be used as a bool-valued template
// argument.
template <bool> struct CheckIsBool { static const bool value = true; };

// If T has SequenceElementTraits, then vector<T> and SmallVector<T, N> have
// SequenceTraits that do the obvious thing.
template <typename T>
struct SequenceTraits<
    std::vector<T>,
    std::enable_if_t<CheckIsBool<SequenceElementTraits<T>::flow>::value>>
    : SequenceTraitsImpl<std::vector<T>, SequenceElementTraits<T>::flow> {};
template <typename T, unsigned N>
struct SequenceTraits<
    SmallVector<T, N>,
    std::enable_if_t<CheckIsBool<SequenceElementTraits<T>::flow>::value>>
    : SequenceTraitsImpl<SmallVector<T, N>, SequenceElementTraits<T>::flow> {};
template <typename T>
struct SequenceTraits<
    SmallVectorImpl<T>,
    std::enable_if_t<CheckIsBool<SequenceElementTraits<T>::flow>::value>>
    : SequenceTraitsImpl<SmallVectorImpl<T>, SequenceElementTraits<T>::flow> {};
template <typename T>
struct SequenceTraits<
    MutableArrayRef<T>,
    std::enable_if_t<CheckIsBool<SequenceElementTraits<T>::flow>::value>>
    : SequenceTraitsImpl<MutableArrayRef<T>, SequenceElementTraits<T>::flow> {};

// Sequences of fundamental types use flow formatting.
template <typename T>
struct SequenceElementTraits<T, std::enable_if_t<std::is_fundamental_v<T>>> {
  static const bool flow = true;
};

// Sequences of strings use block formatting.
template<> struct SequenceElementTraits<std::string> {
  static const bool flow = false;
};
template<> struct SequenceElementTraits<StringRef> {
  static const bool flow = false;
};
template<> struct SequenceElementTraits<std::pair<std::string, std::string>> {
  static const bool flow = false;
};

/// Implementation of CustomMappingTraits for std::map<std::string, T>.
template <typename T> struct StdMapStringCustomMappingTraitsImpl {
  using map_type = std::map<std::string, T>;

  static void inputOne(IO &io, StringRef key, map_type &v) {
    io.mapRequired(key.str().c_str(), v[std::string(key)]);
  }

  static void output(IO &io, map_type &v) {
    for (auto &p : v)
      io.mapRequired(p.first.c_str(), p.second);
  }
};

} // end namespace yaml
} // end namespace llvm

#define LLVM_YAML_IS_SEQUENCE_VECTOR_IMPL(TYPE, FLOW)                          \
  namespace llvm {                                                             \
  namespace yaml {                                                             \
  static_assert(                                                               \
      !std::is_fundamental_v<TYPE> && !std::is_same_v<TYPE, std::string> &&    \
          !std::is_same_v<TYPE, llvm::StringRef>,                              \
      "only use LLVM_YAML_IS_SEQUENCE_VECTOR for types you control");          \
  template <> struct SequenceElementTraits<TYPE> {                             \
    static const bool flow = FLOW;                                             \
  };                                                                           \
  }                                                                            \
  }

/// Utility for declaring that a std::vector of a particular type
/// should be considered a YAML sequence.
#define LLVM_YAML_IS_SEQUENCE_VECTOR(type)                                     \
  LLVM_YAML_IS_SEQUENCE_VECTOR_IMPL(type, false)

/// Utility for declaring that a std::vector of a particular type
/// should be considered a YAML flow sequence.
#define LLVM_YAML_IS_FLOW_SEQUENCE_VECTOR(type)                                \
  LLVM_YAML_IS_SEQUENCE_VECTOR_IMPL(type, true)

#define LLVM_YAML_DECLARE_MAPPING_TRAITS(Type)                                 \
  namespace llvm {                                                             \
  namespace yaml {                                                             \
  template <> struct MappingTraits<Type> {                                     \
    static void mapping(IO &IO, Type &Obj);                                    \
  };                                                                           \
  }                                                                            \
  }

#define LLVM_YAML_DECLARE_ENUM_TRAITS(Type)                                    \
  namespace llvm {                                                             \
  namespace yaml {                                                             \
  template <> struct ScalarEnumerationTraits<Type> {                           \
    static void enumeration(IO &io, Type &Value);                              \
  };                                                                           \
  }                                                                            \
  }

#define LLVM_YAML_DECLARE_BITSET_TRAITS(Type)                                  \
  namespace llvm {                                                             \
  namespace yaml {                                                             \
  template <> struct ScalarBitSetTraits<Type> {                                \
    static void bitset(IO &IO, Type &Options);                                 \
  };                                                                           \
  }                                                                            \
  }

#define LLVM_YAML_DECLARE_SCALAR_TRAITS(Type, MustQuote)                       \
  namespace llvm {                                                             \
  namespace yaml {                                                             \
  template <> struct ScalarTraits<Type> {                                      \
    static void output(const Type &Value, void *ctx, raw_ostream &Out);        \
    static StringRef input(StringRef Scalar, void *ctxt, Type &Value);         \
    static QuotingType mustQuote(StringRef) { return MustQuote; }              \
  };                                                                           \
  }                                                                            \
  }

/// Utility for declaring that a std::vector of a particular type
/// should be considered a YAML document list.
#define LLVM_YAML_IS_DOCUMENT_LIST_VECTOR(_type)                               \
  namespace llvm {                                                             \
  namespace yaml {                                                             \
  template <unsigned N>                                                        \
  struct DocumentListTraits<SmallVector<_type, N>>                             \
      : public SequenceTraitsImpl<SmallVector<_type, N>, false> {};            \
  template <>                                                                  \
  struct DocumentListTraits<std::vector<_type>>                                \
      : public SequenceTraitsImpl<std::vector<_type>, false> {};               \
  }                                                                            \
  }

/// Utility for declaring that std::map<std::string, _type> should be considered
/// a YAML map.
#define LLVM_YAML_IS_STRING_MAP(_type)                                         \
  namespace llvm {                                                             \
  namespace yaml {                                                             \
  template <>                                                                  \
  struct CustomMappingTraits<std::map<std::string, _type>>                     \
      : public StdMapStringCustomMappingTraitsImpl<_type> {};                  \
  }                                                                            \
  }

LLVM_YAML_IS_FLOW_SEQUENCE_VECTOR(llvm::yaml::Hex64)
LLVM_YAML_IS_FLOW_SEQUENCE_VECTOR(llvm::yaml::Hex32)
LLVM_YAML_IS_FLOW_SEQUENCE_VECTOR(llvm::yaml::Hex16)
LLVM_YAML_IS_FLOW_SEQUENCE_VECTOR(llvm::yaml::Hex8)

#endif // LLVM_SUPPORT_YAMLTRAITS_H
PKhwFZ
l�.mm*Support/GenericIteratedDominanceFrontier.hnu�[���//===- IteratedDominanceFrontier.h - Calculate IDF --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// Compute iterated dominance frontiers using a linear time algorithm.
///
/// The algorithm used here is based on:
///
///   Sreedhar and Gao. A linear time algorithm for placing phi-nodes.
///   In Proceedings of the 22nd ACM SIGPLAN-SIGACT Symposium on Principles of
///   Programming Languages
///   POPL '95. ACM, New York, NY, 62-73.
///
/// It has been modified to not explicitly use the DJ graph data structure and
/// to directly compute pruned SSA using per-variable liveness information.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_GENERICITERATEDDOMINANCEFRONTIER_H
#define LLVM_SUPPORT_GENERICITERATEDDOMINANCEFRONTIER_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/GenericDomTree.h"
#include <queue>

namespace llvm {

namespace IDFCalculatorDetail {

/// Generic utility class used for getting the children of a basic block.
/// May be specialized if, for example, one wouldn't like to return nullpointer
/// successors.
template <class NodeTy, bool IsPostDom> struct ChildrenGetterTy {
  using NodeRef = typename GraphTraits<NodeTy *>::NodeRef;
  using ChildrenTy = SmallVector<NodeRef, 8>;

  ChildrenTy get(const NodeRef &N);
};

} // end of namespace IDFCalculatorDetail

/// Determine the iterated dominance frontier, given a set of defining
/// blocks, and optionally, a set of live-in blocks.
///
/// In turn, the results can be used to place phi nodes.
///
/// This algorithm is a linear time computation of Iterated Dominance Frontiers,
/// pruned using the live-in set.
/// By default, liveness is not used to prune the IDF computation.
/// The template parameters should be of a CFG block type.
template <class NodeTy, bool IsPostDom> class IDFCalculatorBase {
public:
  using OrderedNodeTy =
      std::conditional_t<IsPostDom, Inverse<NodeTy *>, NodeTy *>;
  using ChildrenGetterTy =
      IDFCalculatorDetail::ChildrenGetterTy<NodeTy, IsPostDom>;

  IDFCalculatorBase(DominatorTreeBase<NodeTy, IsPostDom> &DT) : DT(DT) {}

  IDFCalculatorBase(DominatorTreeBase<NodeTy, IsPostDom> &DT,
                    const ChildrenGetterTy &C)
      : DT(DT), ChildrenGetter(C) {}

  /// Give the IDF calculator the set of blocks in which the value is
  /// defined.  This is equivalent to the set of starting blocks it should be
  /// calculating the IDF for (though later gets pruned based on liveness).
  ///
  /// Note: This set *must* live for the entire lifetime of the IDF calculator.
  void setDefiningBlocks(const SmallPtrSetImpl<NodeTy *> &Blocks) {
    DefBlocks = &Blocks;
  }

  /// Give the IDF calculator the set of blocks in which the value is
  /// live on entry to the block.   This is used to prune the IDF calculation to
  /// not include blocks where any phi insertion would be dead.
  ///
  /// Note: This set *must* live for the entire lifetime of the IDF calculator.
  void setLiveInBlocks(const SmallPtrSetImpl<NodeTy *> &Blocks) {
    LiveInBlocks = &Blocks;
    useLiveIn = true;
  }

  /// Reset the live-in block set to be empty, and tell the IDF
  /// calculator to not use liveness anymore.
  void resetLiveInBlocks() {
    LiveInBlocks = nullptr;
    useLiveIn = false;
  }

  /// Calculate iterated dominance frontiers
  ///
  /// This uses the linear-time phi algorithm based on DJ-graphs mentioned in
  /// the file-level comment.  It performs DF->IDF pruning using the live-in
  /// set, to avoid computing the IDF for blocks where an inserted PHI node
  /// would be dead.
  void calculate(SmallVectorImpl<NodeTy *> &IDFBlocks);

private:
  DominatorTreeBase<NodeTy, IsPostDom> &DT;
  ChildrenGetterTy ChildrenGetter;
  bool useLiveIn = false;
  const SmallPtrSetImpl<NodeTy *> *LiveInBlocks;
  const SmallPtrSetImpl<NodeTy *> *DefBlocks;
};

//===----------------------------------------------------------------------===//
// Implementation.
//===----------------------------------------------------------------------===//

namespace IDFCalculatorDetail {

template <class NodeTy, bool IsPostDom>
typename ChildrenGetterTy<NodeTy, IsPostDom>::ChildrenTy
ChildrenGetterTy<NodeTy, IsPostDom>::get(const NodeRef &N) {
  using OrderedNodeTy =
      typename IDFCalculatorBase<NodeTy, IsPostDom>::OrderedNodeTy;

  auto Children = children<OrderedNodeTy>(N);
  return {Children.begin(), Children.end()};
}

} // end of namespace IDFCalculatorDetail

template <class NodeTy, bool IsPostDom>
void IDFCalculatorBase<NodeTy, IsPostDom>::calculate(
    SmallVectorImpl<NodeTy *> &IDFBlocks) {
  // Use a priority queue keyed on dominator tree level so that inserted nodes
  // are handled from the bottom of the dominator tree upwards. We also augment
  // the level with a DFS number to ensure that the blocks are ordered in a
  // deterministic way.
  using DomTreeNodePair =
      std::pair<DomTreeNodeBase<NodeTy> *, std::pair<unsigned, unsigned>>;
  using IDFPriorityQueue =
      std::priority_queue<DomTreeNodePair, SmallVector<DomTreeNodePair, 32>,
                          less_second>;

  IDFPriorityQueue PQ;

  DT.updateDFSNumbers();

  SmallVector<DomTreeNodeBase<NodeTy> *, 32> Worklist;
  SmallPtrSet<DomTreeNodeBase<NodeTy> *, 32> VisitedPQ;
  SmallPtrSet<DomTreeNodeBase<NodeTy> *, 32> VisitedWorklist;

  for (NodeTy *BB : *DefBlocks)
    if (DomTreeNodeBase<NodeTy> *Node = DT.getNode(BB)) {
      PQ.push({Node, std::make_pair(Node->getLevel(), Node->getDFSNumIn())});
      VisitedWorklist.insert(Node);
    }

  while (!PQ.empty()) {
    DomTreeNodePair RootPair = PQ.top();
    PQ.pop();
    DomTreeNodeBase<NodeTy> *Root = RootPair.first;
    unsigned RootLevel = RootPair.second.first;

    // Walk all dominator tree children of Root, inspecting their CFG edges with
    // targets elsewhere on the dominator tree. Only targets whose level is at
    // most Root's level are added to the iterated dominance frontier of the
    // definition set.

    assert(Worklist.empty());
    Worklist.push_back(Root);

    while (!Worklist.empty()) {
      DomTreeNodeBase<NodeTy> *Node = Worklist.pop_back_val();
      NodeTy *BB = Node->getBlock();
      // Succ is the successor in the direction we are calculating IDF, so it is
      // successor for IDF, and predecessor for Reverse IDF.
      auto DoWork = [&](NodeTy *Succ) {
        DomTreeNodeBase<NodeTy> *SuccNode = DT.getNode(Succ);

        const unsigned SuccLevel = SuccNode->getLevel();
        if (SuccLevel > RootLevel)
          return;

        if (!VisitedPQ.insert(SuccNode).second)
          return;

        NodeTy *SuccBB = SuccNode->getBlock();
        if (useLiveIn && !LiveInBlocks->count(SuccBB))
          return;

        IDFBlocks.emplace_back(SuccBB);
        if (!DefBlocks->count(SuccBB))
          PQ.push(std::make_pair(
              SuccNode, std::make_pair(SuccLevel, SuccNode->getDFSNumIn())));
      };

      for (auto *Succ : ChildrenGetter.get(BB))
        DoWork(Succ);

      for (auto DomChild : *Node) {
        if (VisitedWorklist.insert(DomChild).second)
          Worklist.push_back(DomChild);
      }
    }
  }
}

} // end of namespace llvm

#endif
PKhwFZޑZ�y�ySupport/DataExtractor.hnu�[���//===-- DataExtractor.h -----------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_DATAEXTRACTOR_H
#define LLVM_SUPPORT_DATAEXTRACTOR_H

#include "llvm/ADT/StringRef.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/Error.h"

namespace llvm {

/// An auxiliary type to facilitate extraction of 3-byte entities.
struct Uint24 {
  uint8_t Bytes[3];
  Uint24(uint8_t U) {
    Bytes[0] = Bytes[1] = Bytes[2] = U;
  }
  Uint24(uint8_t U0, uint8_t U1, uint8_t U2) {
    Bytes[0] = U0; Bytes[1] = U1; Bytes[2] = U2;
  }
  uint32_t getAsUint32(bool IsLittleEndian) const {
    int LoIx = IsLittleEndian ? 0 : 2;
    return Bytes[LoIx] + (Bytes[1] << 8) + (Bytes[2-LoIx] << 16);
  }
};

using uint24_t = Uint24;
static_assert(sizeof(uint24_t) == 3, "sizeof(uint24_t) != 3");

/// Needed by swapByteOrder().
inline uint24_t getSwappedBytes(uint24_t C) {
  return uint24_t(C.Bytes[2], C.Bytes[1], C.Bytes[0]);
}

class DataExtractor {
  StringRef Data;
  uint8_t IsLittleEndian;
  uint8_t AddressSize;
public:
  /// A class representing a position in a DataExtractor, as well as any error
  /// encountered during extraction. It enables one to extract a sequence of
  /// values without error-checking and then checking for errors in bulk at the
  /// end. The class holds an Error object, so failing to check the result of
  /// the parse will result in a runtime error. The error flag is sticky and
  /// will cause all subsequent extraction functions to fail without even
  /// attempting to parse and without updating the Cursor offset. After clearing
  /// the error flag, one can again use the Cursor object for parsing.
  class Cursor {
    uint64_t Offset;
    Error Err;

    friend class DataExtractor;

  public:
    /// Construct a cursor for extraction from the given offset.
    explicit Cursor(uint64_t Offset) : Offset(Offset), Err(Error::success()) {}

    /// Checks whether the cursor is valid (i.e. no errors were encountered). In
    /// case of errors, this does not clear the error flag -- one must call
    /// takeError() instead.
    explicit operator bool() { return !Err; }

    /// Return the current position of this Cursor. In the error state this is
    /// the position of the Cursor before the first error was encountered.
    uint64_t tell() const { return Offset; }

    /// Set the cursor to the new offset. This does not impact the error state.
    void seek(uint64_t NewOffSet) { Offset = NewOffSet; }

    /// Return error contained inside this Cursor, if any. Clears the internal
    /// Cursor state.
    Error takeError() { return std::move(Err); }
  };

  /// Construct with a buffer that is owned by the caller.
  ///
  /// This constructor allows us to use data that is owned by the
  /// caller. The data must stay around as long as this object is
  /// valid.
  DataExtractor(StringRef Data, bool IsLittleEndian, uint8_t AddressSize)
    : Data(Data), IsLittleEndian(IsLittleEndian), AddressSize(AddressSize) {}
  DataExtractor(ArrayRef<uint8_t> Data, bool IsLittleEndian,
                uint8_t AddressSize)
      : Data(StringRef(reinterpret_cast<const char *>(Data.data()),
                       Data.size())),
        IsLittleEndian(IsLittleEndian), AddressSize(AddressSize) {}

  /// Get the data pointed to by this extractor.
  StringRef getData() const { return Data; }
  /// Get the endianness for this extractor.
  bool isLittleEndian() const { return IsLittleEndian; }
  /// Get the address size for this extractor.
  uint8_t getAddressSize() const { return AddressSize; }
  /// Set the address size for this extractor.
  void setAddressSize(uint8_t Size) { AddressSize = Size; }

  /// Extract a C string from \a *offset_ptr.
  ///
  /// Returns a pointer to a C String from the data at the offset
  /// pointed to by \a offset_ptr. A variable length NULL terminated C
  /// string will be extracted and the \a offset_ptr will be
  /// updated with the offset of the byte that follows the NULL
  /// terminator byte.
  ///
  /// @param[in,out] OffsetPtr
  ///     A pointer to an offset within the data that will be advanced
  ///     by the appropriate number of bytes if the value is extracted
  ///     correctly. If the offset is out of bounds or there are not
  ///     enough bytes to extract this value, the offset will be left
  ///     unmodified.
  ///
  /// @param[in,out] Err
  ///     A pointer to an Error object. Upon return the Error object is set to
  ///     indicate the result (success/failure) of the function. If the Error
  ///     object is already set when calling this function, no extraction is
  ///     performed.
  ///
  /// @return
  ///     A pointer to the C string value in the data. If the offset
  ///     pointed to by \a offset_ptr is out of bounds, or if the
  ///     offset plus the length of the C string is out of bounds,
  ///     NULL will be returned.
  const char *getCStr(uint64_t *OffsetPtr, Error *Err = nullptr) const {
    return getCStrRef(OffsetPtr, Err).data();
  }

  /// Extract a C string from the location given by the cursor. In case of an
  /// extraction error, or if the cursor is already in an error state, a
  /// nullptr is returned.
  const char *getCStr(Cursor &C) const { return getCStrRef(C).data(); }

  /// Extract a C string from \a *offset_ptr.
  ///
  /// Returns a StringRef for the C String from the data at the offset
  /// pointed to by \a offset_ptr. A variable length NULL terminated C
  /// string will be extracted and the \a offset_ptr will be
  /// updated with the offset of the byte that follows the NULL
  /// terminator byte.
  ///
  /// \param[in,out] OffsetPtr
  ///     A pointer to an offset within the data that will be advanced
  ///     by the appropriate number of bytes if the value is extracted
  ///     correctly. If the offset is out of bounds or there are not
  ///     enough bytes to extract this value, the offset will be left
  ///     unmodified.
  ///
  /// @param[in,out] Err
  ///     A pointer to an Error object. Upon return the Error object is set to
  ///     indicate the result (success/failure) of the function. If the Error
  ///     object is already set when calling this function, no extraction is
  ///     performed.
  ///
  /// \return
  ///     A StringRef for the C string value in the data. If the offset
  ///     pointed to by \a offset_ptr is out of bounds, or if the
  ///     offset plus the length of the C string is out of bounds,
  ///     a default-initialized StringRef will be returned.
  StringRef getCStrRef(uint64_t *OffsetPtr, Error *Err = nullptr) const;

  /// Extract a C string (as a StringRef) from the location given by the cursor.
  /// In case of an extraction error, or if the cursor is already in an error
  /// state, a default-initialized StringRef is returned.
  StringRef getCStrRef(Cursor &C) const {
    return getCStrRef(&C.Offset, &C.Err);
  }

  /// Extract a fixed length string from \a *OffsetPtr and consume \a Length
  /// bytes.
  ///
  /// Returns a StringRef for the string from the data at the offset
  /// pointed to by \a OffsetPtr. A fixed length C string will be extracted
  /// and the \a OffsetPtr will be advanced by \a Length bytes.
  ///
  /// \param[in,out] OffsetPtr
  ///     A pointer to an offset within the data that will be advanced
  ///     by the appropriate number of bytes if the value is extracted
  ///     correctly. If the offset is out of bounds or there are not
  ///     enough bytes to extract this value, the offset will be left
  ///     unmodified.
  ///
  /// \param[in] Length
  ///     The length of the fixed length string to extract. If there are not
  ///     enough bytes in the data to extract the full string, the offset will
  ///     be left unmodified.
  ///
  /// \param[in] TrimChars
  ///     A set of characters to trim from the end of the string. Fixed length
  ///     strings are commonly either NULL terminated by one or more zero
  ///     bytes. Some clients have one or more spaces at the end of the string,
  ///     but a good default is to trim the NULL characters.
  ///
  /// \return
  ///     A StringRef for the C string value in the data. If the offset
  ///     pointed to by \a OffsetPtr is out of bounds, or if the
  ///     offset plus the length of the C string is out of bounds,
  ///     a default-initialized StringRef will be returned.
  StringRef getFixedLengthString(uint64_t *OffsetPtr,
      uint64_t Length, StringRef TrimChars = {"\0", 1}) const;

  /// Extract a fixed number of bytes from the specified offset.
  ///
  /// Returns a StringRef for the bytes from the data at the offset
  /// pointed to by \a OffsetPtr. A fixed length C string will be extracted
  /// and the \a OffsetPtr will be advanced by \a Length bytes.
  ///
  /// \param[in,out] OffsetPtr
  ///     A pointer to an offset within the data that will be advanced
  ///     by the appropriate number of bytes if the value is extracted
  ///     correctly. If the offset is out of bounds or there are not
  ///     enough bytes to extract this value, the offset will be left
  ///     unmodified.
  ///
  /// \param[in] Length
  ///     The number of bytes to extract. If there are not enough bytes in the
  ///     data to extract all of the bytes, the offset will be left unmodified.
  ///
  /// @param[in,out] Err
  ///     A pointer to an Error object. Upon return the Error object is set to
  ///     indicate the result (success/failure) of the function. If the Error
  ///     object is already set when calling this function, no extraction is
  ///     performed.
  ///
  /// \return
  ///     A StringRef for the extracted bytes. If the offset pointed to by
  ///     \a OffsetPtr is out of bounds, or if the offset plus the length
  ///     is out of bounds, a default-initialized StringRef will be returned.
  StringRef getBytes(uint64_t *OffsetPtr, uint64_t Length,
                     Error *Err = nullptr) const;

  /// Extract a fixed number of bytes from the location given by the cursor. In
  /// case of an extraction error, or if the cursor is already in an error
  /// state, a default-initialized StringRef is returned.
  StringRef getBytes(Cursor &C, uint64_t Length) {
    return getBytes(&C.Offset, Length, &C.Err);
  }

  /// Extract an unsigned integer of size \a byte_size from \a
  /// *offset_ptr.
  ///
  /// Extract a single unsigned integer value and update the offset
  /// pointed to by \a offset_ptr. The size of the extracted integer
  /// is specified by the \a byte_size argument. \a byte_size should
  /// have a value greater than or equal to one and less than or equal
  /// to eight since the return value is 64 bits wide. Any
  /// \a byte_size values less than 1 or greater than 8 will result in
  /// nothing being extracted, and zero being returned.
  ///
  /// @param[in,out] offset_ptr
  ///     A pointer to an offset within the data that will be advanced
  ///     by the appropriate number of bytes if the value is extracted
  ///     correctly. If the offset is out of bounds or there are not
  ///     enough bytes to extract this value, the offset will be left
  ///     unmodified.
  ///
  /// @param[in] byte_size
  ///     The size in byte of the integer to extract.
  ///
  /// @param[in,out] Err
  ///     A pointer to an Error object. Upon return the Error object is set to
  ///     indicate the result (success/failure) of the function. If the Error
  ///     object is already set when calling this function, no extraction is
  ///     performed.
  ///
  /// @return
  ///     The unsigned integer value that was extracted, or zero on
  ///     failure.
  uint64_t getUnsigned(uint64_t *offset_ptr, uint32_t byte_size,
                       Error *Err = nullptr) const;

  /// Extract an unsigned integer of the given size from the location given by
  /// the cursor. In case of an extraction error, or if the cursor is already in
  /// an error state, zero is returned.
  uint64_t getUnsigned(Cursor &C, uint32_t Size) const {
    return getUnsigned(&C.Offset, Size, &C.Err);
  }

  /// Extract an signed integer of size \a byte_size from \a *offset_ptr.
  ///
  /// Extract a single signed integer value (sign extending if required)
  /// and update the offset pointed to by \a offset_ptr. The size of
  /// the extracted integer is specified by the \a byte_size argument.
  /// \a byte_size should have a value greater than or equal to one
  /// and less than or equal to eight since the return value is 64
  /// bits wide. Any \a byte_size values less than 1 or greater than
  /// 8 will result in nothing being extracted, and zero being returned.
  ///
  /// @param[in,out] offset_ptr
  ///     A pointer to an offset within the data that will be advanced
  ///     by the appropriate number of bytes if the value is extracted
  ///     correctly. If the offset is out of bounds or there are not
  ///     enough bytes to extract this value, the offset will be left
  ///     unmodified.
  ///
  /// @param[in] size
  ///     The size in bytes of the integer to extract.
  ///
  /// @return
  ///     The sign extended signed integer value that was extracted,
  ///     or zero on failure.
  int64_t getSigned(uint64_t *offset_ptr, uint32_t size) const;

  //------------------------------------------------------------------
  /// Extract an pointer from \a *offset_ptr.
  ///
  /// Extract a single pointer from the data and update the offset
  /// pointed to by \a offset_ptr. The size of the extracted pointer
  /// is \a getAddressSize(), so the address size has to be
  /// set correctly prior to extracting any pointer values.
  ///
  /// @param[in,out] offset_ptr
  ///     A pointer to an offset within the data that will be advanced
  ///     by the appropriate number of bytes if the value is extracted
  ///     correctly. If the offset is out of bounds or there are not
  ///     enough bytes to extract this value, the offset will be left
  ///     unmodified.
  ///
  /// @return
  ///     The extracted pointer value as a 64 integer.
  uint64_t getAddress(uint64_t *offset_ptr) const {
    return getUnsigned(offset_ptr, AddressSize);
  }

  /// Extract a pointer-sized unsigned integer from the location given by the
  /// cursor. In case of an extraction error, or if the cursor is already in
  /// an error state, zero is returned.
  uint64_t getAddress(Cursor &C) const { return getUnsigned(C, AddressSize); }

  /// Extract a uint8_t value from \a *offset_ptr.
  ///
  /// Extract a single uint8_t from the binary data at the offset
  /// pointed to by \a offset_ptr, and advance the offset on success.
  ///
  /// @param[in,out] offset_ptr
  ///     A pointer to an offset within the data that will be advanced
  ///     by the appropriate number of bytes if the value is extracted
  ///     correctly. If the offset is out of bounds or there are not
  ///     enough bytes to extract this value, the offset will be left
  ///     unmodified.
  ///
  /// @param[in,out] Err
  ///     A pointer to an Error object. Upon return the Error object is set to
  ///     indicate the result (success/failure) of the function. If the Error
  ///     object is already set when calling this function, no extraction is
  ///     performed.
  ///
  /// @return
  ///     The extracted uint8_t value.
  uint8_t getU8(uint64_t *offset_ptr, Error *Err = nullptr) const;

  /// Extract a single uint8_t value from the location given by the cursor. In
  /// case of an extraction error, or if the cursor is already in an error
  /// state, zero is returned.
  uint8_t getU8(Cursor &C) const { return getU8(&C.Offset, &C.Err); }

  /// Extract \a count uint8_t values from \a *offset_ptr.
  ///
  /// Extract \a count uint8_t values from the binary data at the
  /// offset pointed to by \a offset_ptr, and advance the offset on
  /// success. The extracted values are copied into \a dst.
  ///
  /// @param[in,out] offset_ptr
  ///     A pointer to an offset within the data that will be advanced
  ///     by the appropriate number of bytes if the value is extracted
  ///     correctly. If the offset is out of bounds or there are not
  ///     enough bytes to extract this value, the offset will be left
  ///     unmodified.
  ///
  /// @param[out] dst
  ///     A buffer to copy \a count uint8_t values into. \a dst must
  ///     be large enough to hold all requested data.
  ///
  /// @param[in] count
  ///     The number of uint8_t values to extract.
  ///
  /// @return
  ///     \a dst if all values were properly extracted and copied,
  ///     NULL otherise.
  uint8_t *getU8(uint64_t *offset_ptr, uint8_t *dst, uint32_t count) const;

  /// Extract \a Count uint8_t values from the location given by the cursor and
  /// store them into the destination buffer. In case of an extraction error, or
  /// if the cursor is already in an error state, a nullptr is returned and the
  /// destination buffer is left unchanged.
  uint8_t *getU8(Cursor &C, uint8_t *Dst, uint32_t Count) const;

  /// Extract \a Count uint8_t values from the location given by the cursor and
  /// store them into the destination vector. The vector is resized to fit the
  /// extracted data. In case of an extraction error, or if the cursor is
  /// already in an error state, the destination vector is left unchanged and
  /// cursor is placed into an error state.
  void getU8(Cursor &C, SmallVectorImpl<uint8_t> &Dst, uint32_t Count) const {
    if (isValidOffsetForDataOfSize(C.Offset, Count))
      Dst.resize(Count);

    // This relies on the fact that getU8 will not attempt to write to the
    // buffer if isValidOffsetForDataOfSize(C.Offset, Count) is false.
    getU8(C, Dst.data(), Count);
  }

  //------------------------------------------------------------------
  /// Extract a uint16_t value from \a *offset_ptr.
  ///
  /// Extract a single uint16_t from the binary data at the offset
  /// pointed to by \a offset_ptr, and update the offset on success.
  ///
  /// @param[in,out] offset_ptr
  ///     A pointer to an offset within the data that will be advanced
  ///     by the appropriate number of bytes if the value is extracted
  ///     correctly. If the offset is out of bounds or there are not
  ///     enough bytes to extract this value, the offset will be left
  ///     unmodified.
  ///
  /// @param[in,out] Err
  ///     A pointer to an Error object. Upon return the Error object is set to
  ///     indicate the result (success/failure) of the function. If the Error
  ///     object is already set when calling this function, no extraction is
  ///     performed.
  ///
  /// @return
  ///     The extracted uint16_t value.
  //------------------------------------------------------------------
  uint16_t getU16(uint64_t *offset_ptr, Error *Err = nullptr) const;

  /// Extract a single uint16_t value from the location given by the cursor. In
  /// case of an extraction error, or if the cursor is already in an error
  /// state, zero is returned.
  uint16_t getU16(Cursor &C) const { return getU16(&C.Offset, &C.Err); }

  /// Extract \a count uint16_t values from \a *offset_ptr.
  ///
  /// Extract \a count uint16_t values from the binary data at the
  /// offset pointed to by \a offset_ptr, and advance the offset on
  /// success. The extracted values are copied into \a dst.
  ///
  /// @param[in,out] offset_ptr
  ///     A pointer to an offset within the data that will be advanced
  ///     by the appropriate number of bytes if the value is extracted
  ///     correctly. If the offset is out of bounds or there are not
  ///     enough bytes to extract this value, the offset will be left
  ///     unmodified.
  ///
  /// @param[out] dst
  ///     A buffer to copy \a count uint16_t values into. \a dst must
  ///     be large enough to hold all requested data.
  ///
  /// @param[in] count
  ///     The number of uint16_t values to extract.
  ///
  /// @return
  ///     \a dst if all values were properly extracted and copied,
  ///     NULL otherise.
  uint16_t *getU16(uint64_t *offset_ptr, uint16_t *dst, uint32_t count) const;

  /// Extract a 24-bit unsigned value from \a *offset_ptr and return it
  /// in a uint32_t.
  ///
  /// Extract 3 bytes from the binary data at the offset pointed to by
  /// \a offset_ptr, construct a uint32_t from them and update the offset
  /// on success.
  ///
  /// @param[in,out] OffsetPtr
  ///     A pointer to an offset within the data that will be advanced
  ///     by the 3 bytes if the value is extracted correctly. If the offset
  ///     is out of bounds or there are not enough bytes to extract this value,
  ///     the offset will be left unmodified.
  ///
  /// @param[in,out] Err
  ///     A pointer to an Error object. Upon return the Error object is set to
  ///     indicate the result (success/failure) of the function. If the Error
  ///     object is already set when calling this function, no extraction is
  ///     performed.
  ///
  /// @return
  ///     The extracted 24-bit value represented in a uint32_t.
  uint32_t getU24(uint64_t *OffsetPtr, Error *Err = nullptr) const;

  /// Extract a single 24-bit unsigned value from the location given by the
  /// cursor. In case of an extraction error, or if the cursor is already in an
  /// error state, zero is returned.
  uint32_t getU24(Cursor &C) const { return getU24(&C.Offset, &C.Err); }

  /// Extract a uint32_t value from \a *offset_ptr.
  ///
  /// Extract a single uint32_t from the binary data at the offset
  /// pointed to by \a offset_ptr, and update the offset on success.
  ///
  /// @param[in,out] offset_ptr
  ///     A pointer to an offset within the data that will be advanced
  ///     by the appropriate number of bytes if the value is extracted
  ///     correctly. If the offset is out of bounds or there are not
  ///     enough bytes to extract this value, the offset will be left
  ///     unmodified.
  ///
  /// @param[in,out] Err
  ///     A pointer to an Error object. Upon return the Error object is set to
  ///     indicate the result (success/failure) of the function. If the Error
  ///     object is already set when calling this function, no extraction is
  ///     performed.
  ///
  /// @return
  ///     The extracted uint32_t value.
  uint32_t getU32(uint64_t *offset_ptr, Error *Err = nullptr) const;

  /// Extract a single uint32_t value from the location given by the cursor. In
  /// case of an extraction error, or if the cursor is already in an error
  /// state, zero is returned.
  uint32_t getU32(Cursor &C) const { return getU32(&C.Offset, &C.Err); }

  /// Extract \a count uint32_t values from \a *offset_ptr.
  ///
  /// Extract \a count uint32_t values from the binary data at the
  /// offset pointed to by \a offset_ptr, and advance the offset on
  /// success. The extracted values are copied into \a dst.
  ///
  /// @param[in,out] offset_ptr
  ///     A pointer to an offset within the data that will be advanced
  ///     by the appropriate number of bytes if the value is extracted
  ///     correctly. If the offset is out of bounds or there are not
  ///     enough bytes to extract this value, the offset will be left
  ///     unmodified.
  ///
  /// @param[out] dst
  ///     A buffer to copy \a count uint32_t values into. \a dst must
  ///     be large enough to hold all requested data.
  ///
  /// @param[in] count
  ///     The number of uint32_t values to extract.
  ///
  /// @return
  ///     \a dst if all values were properly extracted and copied,
  ///     NULL otherise.
  uint32_t *getU32(uint64_t *offset_ptr, uint32_t *dst, uint32_t count) const;

  /// Extract a uint64_t value from \a *offset_ptr.
  ///
  /// Extract a single uint64_t from the binary data at the offset
  /// pointed to by \a offset_ptr, and update the offset on success.
  ///
  /// @param[in,out] offset_ptr
  ///     A pointer to an offset within the data that will be advanced
  ///     by the appropriate number of bytes if the value is extracted
  ///     correctly. If the offset is out of bounds or there are not
  ///     enough bytes to extract this value, the offset will be left
  ///     unmodified.
  ///
  /// @param[in,out] Err
  ///     A pointer to an Error object. Upon return the Error object is set to
  ///     indicate the result (success/failure) of the function. If the Error
  ///     object is already set when calling this function, no extraction is
  ///     performed.
  ///
  /// @return
  ///     The extracted uint64_t value.
  uint64_t getU64(uint64_t *offset_ptr, Error *Err = nullptr) const;

  /// Extract a single uint64_t value from the location given by the cursor. In
  /// case of an extraction error, or if the cursor is already in an error
  /// state, zero is returned.
  uint64_t getU64(Cursor &C) const { return getU64(&C.Offset, &C.Err); }

  /// Extract \a count uint64_t values from \a *offset_ptr.
  ///
  /// Extract \a count uint64_t values from the binary data at the
  /// offset pointed to by \a offset_ptr, and advance the offset on
  /// success. The extracted values are copied into \a dst.
  ///
  /// @param[in,out] offset_ptr
  ///     A pointer to an offset within the data that will be advanced
  ///     by the appropriate number of bytes if the value is extracted
  ///     correctly. If the offset is out of bounds or there are not
  ///     enough bytes to extract this value, the offset will be left
  ///     unmodified.
  ///
  /// @param[out] dst
  ///     A buffer to copy \a count uint64_t values into. \a dst must
  ///     be large enough to hold all requested data.
  ///
  /// @param[in] count
  ///     The number of uint64_t values to extract.
  ///
  /// @return
  ///     \a dst if all values were properly extracted and copied,
  ///     NULL otherise.
  uint64_t *getU64(uint64_t *offset_ptr, uint64_t *dst, uint32_t count) const;

  /// Extract a signed LEB128 value from \a *offset_ptr.
  ///
  /// Extracts an signed LEB128 number from this object's data
  /// starting at the offset pointed to by \a offset_ptr. The offset
  /// pointed to by \a offset_ptr will be updated with the offset of
  /// the byte following the last extracted byte.
  ///
  /// @param[in,out] OffsetPtr
  ///     A pointer to an offset within the data that will be advanced
  ///     by the appropriate number of bytes if the value is extracted
  ///     correctly. If the offset is out of bounds or there are not
  ///     enough bytes to extract this value, the offset will be left
  ///     unmodified.
  ///
  /// @param[in,out] Err
  ///     A pointer to an Error object. Upon return the Error object is set to
  ///     indicate the result (success/failure) of the function. If the Error
  ///     object is already set when calling this function, no extraction is
  ///     performed.
  ///
  /// @return
  ///     The extracted signed integer value.
  int64_t getSLEB128(uint64_t *OffsetPtr, Error *Err = nullptr) const;

  /// Extract an signed LEB128 value from the location given by the cursor.
  /// In case of an extraction error, or if the cursor is already in an error
  /// state, zero is returned.
  int64_t getSLEB128(Cursor &C) const { return getSLEB128(&C.Offset, &C.Err); }

  /// Extract a unsigned LEB128 value from \a *offset_ptr.
  ///
  /// Extracts an unsigned LEB128 number from this object's data
  /// starting at the offset pointed to by \a offset_ptr. The offset
  /// pointed to by \a offset_ptr will be updated with the offset of
  /// the byte following the last extracted byte.
  ///
  /// @param[in,out] offset_ptr
  ///     A pointer to an offset within the data that will be advanced
  ///     by the appropriate number of bytes if the value is extracted
  ///     correctly. If the offset is out of bounds or there are not
  ///     enough bytes to extract this value, the offset will be left
  ///     unmodified.
  ///
  /// @param[in,out] Err
  ///     A pointer to an Error object. Upon return the Error object is set to
  ///     indicate the result (success/failure) of the function. If the Error
  ///     object is already set when calling this function, no extraction is
  ///     performed.
  ///
  /// @return
  ///     The extracted unsigned integer value.
  uint64_t getULEB128(uint64_t *offset_ptr, llvm::Error *Err = nullptr) const;

  /// Extract an unsigned LEB128 value from the location given by the cursor.
  /// In case of an extraction error, or if the cursor is already in an error
  /// state, zero is returned.
  uint64_t getULEB128(Cursor &C) const { return getULEB128(&C.Offset, &C.Err); }

  /// Advance the Cursor position by the given number of bytes. No-op if the
  /// cursor is in an error state.
  void skip(Cursor &C, uint64_t Length) const;

  /// Return true iff the cursor is at the end of the buffer, regardless of the
  /// error state of the cursor. The only way both eof and error states can be
  /// true is if one attempts a read while the cursor is at the very end of the
  /// data buffer.
  bool eof(const Cursor &C) const { return size() == C.Offset; }

  /// Test the validity of \a offset.
  ///
  /// @return
  ///     \b true if \a offset is a valid offset into the data in this
  ///     object, \b false otherwise.
  bool isValidOffset(uint64_t offset) const { return size() > offset; }

  /// Test the availability of \a length bytes of data from \a offset.
  ///
  /// @return
  ///     \b true if \a offset is a valid offset and there are \a
  ///     length bytes available at that offset, \b false otherwise.
  bool isValidOffsetForDataOfSize(uint64_t offset, uint64_t length) const {
    return offset + length >= offset && isValidOffset(offset + length - 1);
  }

  /// Test the availability of enough bytes of data for a pointer from
  /// \a offset. The size of a pointer is \a getAddressSize().
  ///
  /// @return
  ///     \b true if \a offset is a valid offset and there are enough
  ///     bytes for a pointer available at that offset, \b false
  ///     otherwise.
  bool isValidOffsetForAddress(uint64_t offset) const {
    return isValidOffsetForDataOfSize(offset, AddressSize);
  }

  /// Return the number of bytes in the underlying buffer.
  size_t size() const { return Data.size(); }

protected:
  // Make it possible for subclasses to access these fields without making them
  // public.
  static uint64_t &getOffset(Cursor &C) { return C.Offset; }
  static Error &getError(Cursor &C) { return C.Err; }

private:
  /// If it is possible to read \a Size bytes at offset \a Offset, returns \b
  /// true. Otherwise, returns \b false. If \a E is not nullptr, also sets the
  /// error object to indicate an error.
  bool prepareRead(uint64_t Offset, uint64_t Size, Error *E) const;

  template <typename T> T getU(uint64_t *OffsetPtr, Error *Err) const;
  template <typename T>
  T *getUs(uint64_t *OffsetPtr, T *Dst, uint32_t Count, Error *Err) const;
};

} // namespace llvm

#endif
PKhwFZVh����!Support/SmallVectorMemoryBuffer.hnu�[���//===- SmallVectorMemoryBuffer.h --------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares a wrapper class to hold the memory into which an
// object will be generated.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_SMALLVECTORMEMORYBUFFER_H
#define LLVM_SUPPORT_SMALLVECTORMEMORYBUFFER_H

#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/raw_ostream.h"

namespace llvm {

/// SmallVector-backed MemoryBuffer instance.
///
/// This class enables efficient construction of MemoryBuffers from SmallVector
/// instances. This is useful for MCJIT and Orc, where object files are streamed
/// into SmallVectors, then inspected using ObjectFile (which takes a
/// MemoryBuffer).
class SmallVectorMemoryBuffer : public MemoryBuffer {
public:
  /// Construct a SmallVectorMemoryBuffer from the given SmallVector r-value.
  SmallVectorMemoryBuffer(SmallVectorImpl<char> &&SV,
                          bool RequiresNullTerminator = true)
      : SmallVectorMemoryBuffer(std::move(SV), "<in-memory object>",
                                RequiresNullTerminator) {}

  /// Construct a named SmallVectorMemoryBuffer from the given SmallVector
  /// r-value and StringRef.
  SmallVectorMemoryBuffer(SmallVectorImpl<char> &&SV, StringRef Name,
                          bool RequiresNullTerminator = true)
      : SV(std::move(SV)), BufferName(std::string(Name)) {
    if (RequiresNullTerminator) {
      this->SV.push_back('\0');
      this->SV.pop_back();
    }
    init(this->SV.begin(), this->SV.end(), false);
  }

  // Key function.
  ~SmallVectorMemoryBuffer() override;

  StringRef getBufferIdentifier() const override { return BufferName; }

  BufferKind getBufferKind() const override { return MemoryBuffer_Malloc; }

private:
  SmallVector<char, 0> SV;
  std::string BufferName;
};

} // namespace llvm

#endif
PKhwFZ�|�?Support/WithColor.hnu�[���//===- WithColor.h ----------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_WITHCOLOR_H
#define LLVM_SUPPORT_WITHCOLOR_H

#include "llvm/Support/raw_ostream.h"

namespace llvm {

class Error;
class StringRef;

namespace cl {
class OptionCategory;
}

extern cl::OptionCategory &getColorCategory();

// Symbolic names for various syntax elements.
enum class HighlightColor {
  Address,
  String,
  Tag,
  Attribute,
  Enumerator,
  Macro,
  Error,
  Warning,
  Note,
  Remark
};

enum class ColorMode {
  /// Determine whether to use color based on the command line argument and the
  /// raw_ostream.
  Auto,
  /// Enable colors. Because raw_ostream is the one implementing colors, this
  /// has no effect if the stream does not support colors or has colors
  /// disabled.
  Enable,
  /// Disable colors.
  Disable,
};

/// An RAII object that temporarily switches an output stream to a specific
/// color.
class WithColor {
public:
  using AutoDetectFunctionType = bool (*)(const raw_ostream &OS);

  /// To be used like this: WithColor(OS, HighlightColor::String) << "text";
  /// @param OS The output stream
  /// @param S Symbolic name for syntax element to color
  /// @param Mode Enable, disable or compute whether to use colors.
  WithColor(raw_ostream &OS, HighlightColor S,
            ColorMode Mode = ColorMode::Auto);
  /// To be used like this: WithColor(OS, raw_ostream::BLACK) << "text";
  /// @param OS The output stream
  /// @param Color ANSI color to use, the special SAVEDCOLOR can be used to
  /// change only the bold attribute, and keep colors untouched
  /// @param Bold Bold/brighter text, default false
  /// @param BG If true, change the background, default: change foreground
  /// @param Mode Enable, disable or compute whether to use colors.
  WithColor(raw_ostream &OS,
            raw_ostream::Colors Color = raw_ostream::SAVEDCOLOR,
            bool Bold = false, bool BG = false,
            ColorMode Mode = ColorMode::Auto)
      : OS(OS), Mode(Mode) {
    changeColor(Color, Bold, BG);
  }
  ~WithColor();

  raw_ostream &get() { return OS; }
  operator raw_ostream &() { return OS; }
  template <typename T> WithColor &operator<<(T &O) {
    OS << O;
    return *this;
  }
  template <typename T> WithColor &operator<<(const T &O) {
    OS << O;
    return *this;
  }

  /// Convenience method for printing "error: " to stderr.
  static raw_ostream &error();
  /// Convenience method for printing "warning: " to stderr.
  static raw_ostream &warning();
  /// Convenience method for printing "note: " to stderr.
  static raw_ostream &note();
  /// Convenience method for printing "remark: " to stderr.
  static raw_ostream &remark();

  /// Convenience method for printing "error: " to the given stream.
  static raw_ostream &error(raw_ostream &OS, StringRef Prefix = "",
                            bool DisableColors = false);
  /// Convenience method for printing "warning: " to the given stream.
  static raw_ostream &warning(raw_ostream &OS, StringRef Prefix = "",
                              bool DisableColors = false);
  /// Convenience method for printing "note: " to the given stream.
  static raw_ostream &note(raw_ostream &OS, StringRef Prefix = "",
                           bool DisableColors = false);
  /// Convenience method for printing "remark: " to the given stream.
  static raw_ostream &remark(raw_ostream &OS, StringRef Prefix = "",
                             bool DisableColors = false);

  /// Determine whether colors are displayed.
  bool colorsEnabled();

  /// Change the color of text that will be output from this point forward.
  /// @param Color ANSI color to use, the special SAVEDCOLOR can be used to
  /// change only the bold attribute, and keep colors untouched
  /// @param Bold Bold/brighter text, default false
  /// @param BG If true, change the background, default: change foreground
  WithColor &changeColor(raw_ostream::Colors Color, bool Bold = false,
                         bool BG = false);

  /// Reset the colors to terminal defaults. Call this when you are done
  /// outputting colored text, or before program exit.
  WithColor &resetColor();

  /// Implement default handling for Error.
  /// Print "error: " to stderr.
  static void defaultErrorHandler(Error Err);

  /// Implement default handling for Warning.
  /// Print "warning: " to stderr.
  static void defaultWarningHandler(Error Warning);

  /// Retrieve the default color auto detection function.
  static AutoDetectFunctionType defaultAutoDetectFunction();

  /// Change the global auto detection function.
  static void
  setAutoDetectFunction(AutoDetectFunctionType NewAutoDetectFunction);

private:
  raw_ostream &OS;
  ColorMode Mode;

  static AutoDetectFunctionType AutoDetectFunction;
};

} // end namespace llvm

#endif // LLVM_SUPPORT_WITHCOLOR_H
PKhwFZW�a'a'Support/FormatVariadic.hnu�[���//===- FormatVariadic.h - Efficient type-safe string formatting --*- C++-*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements the formatv() function which can be used with other LLVM
// subsystems to provide printf-like formatting, but with improved safety and
// flexibility.  The result of `formatv` is an object which can be streamed to
// a raw_ostream or converted to a std::string or llvm::SmallString.
//
//   // Convert to std::string.
//   std::string S = formatv("{0} {1}", 1234.412, "test").str();
//
//   // Convert to llvm::SmallString
//   SmallString<8> S = formatv("{0} {1}", 1234.412, "test").sstr<8>();
//
//   // Stream to an existing raw_ostream.
//   OS << formatv("{0} {1}", 1234.412, "test");
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_FORMATVARIADIC_H
#define LLVM_SUPPORT_FORMATVARIADIC_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/FormatCommon.h"
#include "llvm/Support/FormatProviders.h"
#include "llvm/Support/FormatVariadicDetails.h"
#include "llvm/Support/raw_ostream.h"
#include <array>
#include <cstddef>
#include <optional>
#include <string>
#include <tuple>
#include <utility>

namespace llvm {

enum class ReplacementType { Empty, Format, Literal };

struct ReplacementItem {
  ReplacementItem() = default;
  explicit ReplacementItem(StringRef Literal)
      : Type(ReplacementType::Literal), Spec(Literal) {}
  ReplacementItem(StringRef Spec, size_t Index, size_t Align, AlignStyle Where,
                  char Pad, StringRef Options)
      : Type(ReplacementType::Format), Spec(Spec), Index(Index), Align(Align),
        Where(Where), Pad(Pad), Options(Options) {}

  ReplacementType Type = ReplacementType::Empty;
  StringRef Spec;
  size_t Index = 0;
  size_t Align = 0;
  AlignStyle Where = AlignStyle::Right;
  char Pad = 0;
  StringRef Options;
};

class formatv_object_base {
protected:
  StringRef Fmt;
  ArrayRef<detail::format_adapter *> Adapters;

  static bool consumeFieldLayout(StringRef &Spec, AlignStyle &Where,
                                 size_t &Align, char &Pad);

  static std::pair<ReplacementItem, StringRef>
  splitLiteralAndReplacement(StringRef Fmt);

  formatv_object_base(StringRef Fmt,
                      ArrayRef<detail::format_adapter *> Adapters)
      : Fmt(Fmt), Adapters(Adapters) {}

  formatv_object_base(formatv_object_base const &rhs) = delete;
  formatv_object_base(formatv_object_base &&rhs) = default;

public:
  void format(raw_ostream &S) const {
    for (auto &R : parseFormatString(Fmt)) {
      if (R.Type == ReplacementType::Empty)
        continue;
      if (R.Type == ReplacementType::Literal) {
        S << R.Spec;
        continue;
      }
      if (R.Index >= Adapters.size()) {
        S << R.Spec;
        continue;
      }

      auto *W = Adapters[R.Index];

      FmtAlign Align(*W, R.Where, R.Align, R.Pad);
      Align.format(S, R.Options);
    }
  }
  static SmallVector<ReplacementItem, 2> parseFormatString(StringRef Fmt);

  static std::optional<ReplacementItem> parseReplacementItem(StringRef Spec);

  std::string str() const {
    std::string Result;
    raw_string_ostream Stream(Result);
    Stream << *this;
    Stream.flush();
    return Result;
  }

  template <unsigned N> SmallString<N> sstr() const {
    SmallString<N> Result;
    raw_svector_ostream Stream(Result);
    Stream << *this;
    return Result;
  }

  template <unsigned N> operator SmallString<N>() const { return sstr<N>(); }

  operator std::string() const { return str(); }
};

template <typename Tuple> class formatv_object : public formatv_object_base {
  // Storage for the parameter adapters.  Since the base class erases the type
  // of the parameters, we have to own the storage for the parameters here, and
  // have the base class store type-erased pointers into this tuple.
  Tuple Parameters;
  std::array<detail::format_adapter *, std::tuple_size<Tuple>::value>
      ParameterPointers;

  // The parameters are stored in a std::tuple, which does not provide runtime
  // indexing capabilities.  In order to enable runtime indexing, we use this
  // structure to put the parameters into a std::array.  Since the parameters
  // are not all the same type, we use some type-erasure by wrapping the
  // parameters in a template class that derives from a non-template superclass.
  // Essentially, we are converting a std::tuple<Derived<Ts...>> to a
  // std::array<Base*>.
  struct create_adapters {
    template <typename... Ts>
    std::array<detail::format_adapter *, std::tuple_size<Tuple>::value>
    operator()(Ts &... Items) {
      return {{&Items...}};
    }
  };

public:
  formatv_object(StringRef Fmt, Tuple &&Params)
      : formatv_object_base(Fmt, ParameterPointers),
        Parameters(std::move(Params)) {
    ParameterPointers = std::apply(create_adapters(), Parameters);
  }

  formatv_object(formatv_object const &rhs) = delete;

  formatv_object(formatv_object &&rhs)
      : formatv_object_base(std::move(rhs)),
        Parameters(std::move(rhs.Parameters)) {
    ParameterPointers = std::apply(create_adapters(), Parameters);
    Adapters = ParameterPointers;
  }
};

// Format text given a format string and replacement parameters.
//
// ===General Description===
//
// Formats textual output.  `Fmt` is a string consisting of one or more
// replacement sequences with the following grammar:
//
// rep_field ::= "{" index ["," layout] [":" format] "}"
// index     ::= <non-negative integer>
// layout    ::= [[[char]loc]width]
// format    ::= <any string not containing "{" or "}">
// char      ::= <any character except "{" or "}">
// loc       ::= "-" | "=" | "+"
// width     ::= <positive integer>
//
// index   - A non-negative integer specifying the index of the item in the
//           parameter pack to print.  Any other value is invalid.
// layout  - A string controlling how the field is laid out within the available
//           space.
// format  - A type-dependent string used to provide additional options to
//           the formatting operation.  Refer to the documentation of the
//           various individual format providers for per-type options.
// char    - The padding character.  Defaults to ' ' (space).  Only valid if
//           `loc` is also specified.
// loc     - Where to print the formatted text within the field.  Only valid if
//           `width` is also specified.
//           '-' : The field is left aligned within the available space.
//           '=' : The field is centered within the available space.
//           '+' : The field is right aligned within the available space (this
//                 is the default).
// width   - The width of the field within which to print the formatted text.
//           If this is less than the required length then the `char` and `loc`
//           fields are ignored, and the field is printed with no leading or
//           trailing padding.  If this is greater than the required length,
//           then the text is output according to the value of `loc`, and padded
//           as appropriate on the left and/or right by `char`.
//
// ===Special Characters===
//
// The characters '{' and '}' are reserved and cannot appear anywhere within a
// replacement sequence.  Outside of a replacement sequence, in order to print
// a literal '{' it must be doubled as "{{".
//
// ===Parameter Indexing===
//
// `index` specifies the index of the parameter in the parameter pack to format
// into the output.  Note that it is possible to refer to the same parameter
// index multiple times in a given format string.  This makes it possible to
// output the same value multiple times without passing it multiple times to the
// function. For example:
//
//   formatv("{0} {1} {0}", "a", "bb")
//
// would yield the string "abba".  This can be convenient when it is expensive
// to compute the value of the parameter, and you would otherwise have had to
// save it to a temporary.
//
// ===Formatter Search===
//
// For a given parameter of type T, the following steps are executed in order
// until a match is found:
//
//   1. If the parameter is of class type, and inherits from format_adapter,
//      Then format() is invoked on it to produce the formatted output.  The
//      implementation should write the formatted text into `Stream`.
//   2. If there is a suitable template specialization of format_provider<>
//      for type T containing a method whose signature is:
//      void format(const T &Obj, raw_ostream &Stream, StringRef Options)
//      Then this method is invoked as described in Step 1.
//   3. If an appropriate operator<< for raw_ostream exists, it will be used.
//      For this to work, (raw_ostream& << const T&) must return raw_ostream&.
//
// If a match cannot be found through either of the above methods, a compiler
// error is generated.
//
// ===Invalid Format String Handling===
//
// In the case of a format string which does not match the grammar described
// above, the output is undefined.  With asserts enabled, LLVM will trigger an
// assertion.  Otherwise, it will try to do something reasonable, but in general
// the details of what that is are undefined.
//
template <typename... Ts>
inline auto formatv(const char *Fmt, Ts &&... Vals) -> formatv_object<decltype(
    std::make_tuple(detail::build_format_adapter(std::forward<Ts>(Vals))...))> {
  using ParamTuple = decltype(
      std::make_tuple(detail::build_format_adapter(std::forward<Ts>(Vals))...));
  return formatv_object<ParamTuple>(
      Fmt,
      std::make_tuple(detail::build_format_adapter(std::forward<Ts>(Vals))...));
}

} // end namespace llvm

#endif // LLVM_SUPPORT_FORMATVARIADIC_H
PKhwFZ���>��Support/Capacity.hnu�[���//===--- Capacity.h - Generic computation of ADT memory use -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the capacity function that computes the amount of
// memory used by an ADT.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_CAPACITY_H
#define LLVM_SUPPORT_CAPACITY_H

#include <cstddef>

namespace llvm {

template <typename T>
static inline size_t capacity_in_bytes(const T &x) {
  // This default definition of capacity should work for things like std::vector
  // and friends.  More specialized versions will work for others.
  return x.capacity() * sizeof(typename T::value_type);
}

} // end namespace llvm

#endif

PKhwFZ��I��� Support/Windows/WindowsSupport.hnu�[���//===- WindowsSupport.h - Common Windows Include File -----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines things specific to Windows implementations.  In addition to
// providing some helpers for working with win32 APIs, this header wraps
// <windows.h> with some portability macros.  Always include WindowsSupport.h
// instead of including <windows.h> directly.
//
//===----------------------------------------------------------------------===//

//===----------------------------------------------------------------------===//
//=== WARNING: Implementation here must contain only generic Win32 code that
//===          is guaranteed to work on *all* Win32 variants.
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_WINDOWSSUPPORT_H
#define LLVM_SUPPORT_WINDOWSSUPPORT_H

// mingw-w64 tends to define it as 0x0502 in its headers.
#undef _WIN32_WINNT
#undef _WIN32_IE

// Require at least Windows 7 API.
#define _WIN32_WINNT 0x0601
#define _WIN32_IE    0x0800 // MinGW at it again. FIXME: verify if still needed.
#define WIN32_LEAN_AND_MEAN
#ifndef NOMINMAX
#define NOMINMAX
#endif

#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Config/llvm-config.h" // Get build system configuration settings
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Chrono.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/VersionTuple.h"
#include <cassert>
#include <string>
#include <system_error>
#include <windows.h>

// Must be included after windows.h
#include <wincrypt.h>

namespace llvm {

/// Determines if the program is running on Windows 8 or newer. This
/// reimplements one of the helpers in the Windows 8.1 SDK, which are intended
/// to supercede raw calls to GetVersionEx. Old SDKs, Cygwin, and MinGW don't
/// yet have VersionHelpers.h, so we have our own helper.
bool RunningWindows8OrGreater();

/// Determines if the program is running on Windows 11 or Windows Server 2022.
bool RunningWindows11OrGreater();

/// Returns the Windows version as Major.Minor.0.BuildNumber. Uses
/// RtlGetVersion or GetVersionEx under the hood depending on what is available.
/// GetVersionEx is deprecated, but this API exposes the build number which can
/// be useful for working around certain kernel bugs.
llvm::VersionTuple GetWindowsOSVersion();

bool MakeErrMsg(std::string *ErrMsg, const std::string &prefix);

// Include GetLastError() in a fatal error message.
[[noreturn]] inline void ReportLastErrorFatal(const char *Msg) {
  std::string ErrMsg;
  MakeErrMsg(&ErrMsg, Msg);
  llvm::report_fatal_error(Twine(ErrMsg));
}

template <typename HandleTraits>
class ScopedHandle {
  typedef typename HandleTraits::handle_type handle_type;
  handle_type Handle;

  ScopedHandle(const ScopedHandle &other) = delete;
  void operator=(const ScopedHandle &other) = delete;
public:
  ScopedHandle()
    : Handle(HandleTraits::GetInvalid()) {}

  explicit ScopedHandle(handle_type h)
    : Handle(h) {}

  ~ScopedHandle() {
    if (HandleTraits::IsValid(Handle))
      HandleTraits::Close(Handle);
  }

  handle_type take() {
    handle_type t = Handle;
    Handle = HandleTraits::GetInvalid();
    return t;
  }

  ScopedHandle &operator=(handle_type h) {
    if (HandleTraits::IsValid(Handle))
      HandleTraits::Close(Handle);
    Handle = h;
    return *this;
  }

  // True if Handle is valid.
  explicit operator bool() const {
    return HandleTraits::IsValid(Handle) ? true : false;
  }

  operator handle_type() const {
    return Handle;
  }
};

struct CommonHandleTraits {
  typedef HANDLE handle_type;

  static handle_type GetInvalid() {
    return INVALID_HANDLE_VALUE;
  }

  static void Close(handle_type h) {
    ::CloseHandle(h);
  }

  static bool IsValid(handle_type h) {
    return h != GetInvalid();
  }
};

struct JobHandleTraits : CommonHandleTraits {
  static handle_type GetInvalid() {
    return NULL;
  }
};

struct CryptContextTraits : CommonHandleTraits {
  typedef HCRYPTPROV handle_type;

  static handle_type GetInvalid() {
    return 0;
  }

  static void Close(handle_type h) {
    ::CryptReleaseContext(h, 0);
  }

  static bool IsValid(handle_type h) {
    return h != GetInvalid();
  }
};

struct RegTraits : CommonHandleTraits {
  typedef HKEY handle_type;

  static handle_type GetInvalid() {
    return NULL;
  }

  static void Close(handle_type h) {
    ::RegCloseKey(h);
  }

  static bool IsValid(handle_type h) {
    return h != GetInvalid();
  }
};

struct FindHandleTraits : CommonHandleTraits {
  static void Close(handle_type h) {
    ::FindClose(h);
  }
};

struct FileHandleTraits : CommonHandleTraits {};

typedef ScopedHandle<CommonHandleTraits> ScopedCommonHandle;
typedef ScopedHandle<FileHandleTraits>   ScopedFileHandle;
typedef ScopedHandle<CryptContextTraits> ScopedCryptContext;
typedef ScopedHandle<RegTraits>          ScopedRegHandle;
typedef ScopedHandle<FindHandleTraits>   ScopedFindHandle;
typedef ScopedHandle<JobHandleTraits>    ScopedJobHandle;

template <class T>
class SmallVectorImpl;

template <class T>
typename SmallVectorImpl<T>::const_pointer
c_str(SmallVectorImpl<T> &str) {
  str.push_back(0);
  str.pop_back();
  return str.data();
}

namespace sys {

inline std::chrono::nanoseconds toDuration(FILETIME Time) {
  ULARGE_INTEGER TimeInteger;
  TimeInteger.LowPart = Time.dwLowDateTime;
  TimeInteger.HighPart = Time.dwHighDateTime;

  // FILETIME's are # of 100 nanosecond ticks (1/10th of a microsecond)
  return std::chrono::nanoseconds(100 * TimeInteger.QuadPart);
}

inline TimePoint<> toTimePoint(FILETIME Time) {
  ULARGE_INTEGER TimeInteger;
  TimeInteger.LowPart = Time.dwLowDateTime;
  TimeInteger.HighPart = Time.dwHighDateTime;

  // Adjust for different epoch
  TimeInteger.QuadPart -= 11644473600ll * 10000000;

  // FILETIME's are # of 100 nanosecond ticks (1/10th of a microsecond)
  return TimePoint<>(std::chrono::nanoseconds(100 * TimeInteger.QuadPart));
}

inline FILETIME toFILETIME(TimePoint<> TP) {
  ULARGE_INTEGER TimeInteger;
  TimeInteger.QuadPart = TP.time_since_epoch().count() / 100;
  TimeInteger.QuadPart += 11644473600ll * 10000000;

  FILETIME Time;
  Time.dwLowDateTime = TimeInteger.LowPart;
  Time.dwHighDateTime = TimeInteger.HighPart;
  return Time;
}

namespace windows {
// Returns command line arguments. Unlike arguments given to main(),
// this function guarantees that the returned arguments are encoded in
// UTF-8 regardless of the current code page setting.
std::error_code GetCommandLineArguments(SmallVectorImpl<const char *> &Args,
                                        BumpPtrAllocator &Alloc);

/// Convert UTF-8 path to a suitable UTF-16 path for use with the Win32 Unicode
/// File API.
std::error_code widenPath(const Twine &Path8, SmallVectorImpl<wchar_t> &Path16,
                          size_t MaxPathLen = MAX_PATH);

} // end namespace windows
} // end namespace sys
} // end namespace llvm.

#endif
PKhwFZ�.�WWSupport/InitLLVM.hnu�[���//===- InitLLVM.h -----------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_INITLLVM_H
#define LLVM_SUPPORT_INITLLVM_H

#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/PrettyStackTrace.h"
#include <optional>

// The main() functions in typical LLVM tools start with InitLLVM which does
// the following one-time initializations:
//
//  1. Setting up a signal handler so that pretty stack trace is printed out
//     if a process crashes. A signal handler that exits when a failed write to
//     a pipe occurs may optionally be installed: this is on-by-default.
//
//  2. Set up the global new-handler which is called when a memory allocation
//     attempt fails.
//
//  3. If running on Windows, obtain command line arguments using a
//     multibyte character-aware API and convert arguments into UTF-8
//     encoding, so that you can assume that command line arguments are
//     always encoded in UTF-8 on any platform.
//
// InitLLVM calls llvm_shutdown() on destruction, which cleans up
// ManagedStatic objects.
namespace llvm {
class InitLLVM {
public:
  InitLLVM(int &Argc, const char **&Argv,
           bool InstallPipeSignalExitHandler = true);
  InitLLVM(int &Argc, char **&Argv, bool InstallPipeSignalExitHandler = true)
      : InitLLVM(Argc, const_cast<const char **&>(Argv),
                 InstallPipeSignalExitHandler) {}

  ~InitLLVM();

private:
  BumpPtrAllocator Alloc;
  SmallVector<const char *, 0> Args;
  std::optional<PrettyStackTraceProgram> StackPrinter;
};
} // namespace llvm

#endif
PKhwFZė[�NJNJSupport/AMDGPUMetadata.hnu�[���//===--- AMDGPUMetadata.h ---------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file
/// AMDGPU metadata definitions and in-memory representations.
///
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_AMDGPUMETADATA_H
#define LLVM_SUPPORT_AMDGPUMETADATA_H

#include "llvm/ADT/StringRef.h"
#include <cstdint>
#include <string>
#include <system_error>
#include <vector>

namespace llvm {
namespace AMDGPU {

//===----------------------------------------------------------------------===//
// HSA metadata.
//===----------------------------------------------------------------------===//
namespace HSAMD {

/// HSA metadata major version for code object V2.
constexpr uint32_t VersionMajorV2 = 1;
/// HSA metadata minor version for code object V2.
constexpr uint32_t VersionMinorV2 = 0;

/// HSA metadata major version for code object V3.
constexpr uint32_t VersionMajorV3 = 1;
/// HSA metadata minor version for code object V3.
constexpr uint32_t VersionMinorV3 = 0;

/// HSA metadata major version for code object V4.
constexpr uint32_t VersionMajorV4 = 1;
/// HSA metadata minor version for code object V4.
constexpr uint32_t VersionMinorV4 = 1;

/// HSA metadata major version for code object V5.
constexpr uint32_t VersionMajorV5 = 1;
/// HSA metadata minor version for code object V5.
constexpr uint32_t VersionMinorV5 = 2;

/// HSA metadata beginning assembler directive.
constexpr char AssemblerDirectiveBegin[] = ".amd_amdgpu_hsa_metadata";
/// HSA metadata ending assembler directive.
constexpr char AssemblerDirectiveEnd[] = ".end_amd_amdgpu_hsa_metadata";

/// Access qualifiers.
enum class AccessQualifier : uint8_t {
  Default   = 0,
  ReadOnly  = 1,
  WriteOnly = 2,
  ReadWrite = 3,
  Unknown   = 0xff
};

/// Address space qualifiers.
enum class AddressSpaceQualifier : uint8_t {
  Private  = 0,
  Global   = 1,
  Constant = 2,
  Local    = 3,
  Generic  = 4,
  Region   = 5,
  Unknown  = 0xff
};

/// Value kinds.
enum class ValueKind : uint8_t {
  ByValue                = 0,
  GlobalBuffer           = 1,
  DynamicSharedPointer   = 2,
  Sampler                = 3,
  Image                  = 4,
  Pipe                   = 5,
  Queue                  = 6,
  HiddenGlobalOffsetX    = 7,
  HiddenGlobalOffsetY    = 8,
  HiddenGlobalOffsetZ    = 9,
  HiddenNone             = 10,
  HiddenPrintfBuffer     = 11,
  HiddenDefaultQueue     = 12,
  HiddenCompletionAction = 13,
  HiddenMultiGridSyncArg = 14,
  HiddenHostcallBuffer   = 15,
  Unknown                = 0xff
};

/// Value types. This is deprecated and only remains for compatibility parsing
/// of old metadata.
enum class ValueType : uint8_t {
  Struct  = 0,
  I8      = 1,
  U8      = 2,
  I16     = 3,
  U16     = 4,
  F16     = 5,
  I32     = 6,
  U32     = 7,
  F32     = 8,
  I64     = 9,
  U64     = 10,
  F64     = 11,
  Unknown = 0xff
};

//===----------------------------------------------------------------------===//
// Kernel Metadata.
//===----------------------------------------------------------------------===//
namespace Kernel {

//===----------------------------------------------------------------------===//
// Kernel Attributes Metadata.
//===----------------------------------------------------------------------===//
namespace Attrs {

namespace Key {
/// Key for Kernel::Attr::Metadata::mReqdWorkGroupSize.
constexpr char ReqdWorkGroupSize[] = "ReqdWorkGroupSize";
/// Key for Kernel::Attr::Metadata::mWorkGroupSizeHint.
constexpr char WorkGroupSizeHint[] = "WorkGroupSizeHint";
/// Key for Kernel::Attr::Metadata::mVecTypeHint.
constexpr char VecTypeHint[] = "VecTypeHint";
/// Key for Kernel::Attr::Metadata::mRuntimeHandle.
constexpr char RuntimeHandle[] = "RuntimeHandle";
} // end namespace Key

/// In-memory representation of kernel attributes metadata.
struct Metadata final {
  /// 'reqd_work_group_size' attribute. Optional.
  std::vector<uint32_t> mReqdWorkGroupSize = std::vector<uint32_t>();
  /// 'work_group_size_hint' attribute. Optional.
  std::vector<uint32_t> mWorkGroupSizeHint = std::vector<uint32_t>();
  /// 'vec_type_hint' attribute. Optional.
  std::string mVecTypeHint = std::string();
  /// External symbol created by runtime to store the kernel address
  /// for enqueued blocks.
  std::string mRuntimeHandle = std::string();

  /// Default constructor.
  Metadata() = default;

  /// \returns True if kernel attributes metadata is empty, false otherwise.
  bool empty() const {
    return !notEmpty();
  }

  /// \returns True if kernel attributes metadata is not empty, false otherwise.
  bool notEmpty() const {
    return !mReqdWorkGroupSize.empty() || !mWorkGroupSizeHint.empty() ||
           !mVecTypeHint.empty() || !mRuntimeHandle.empty();
  }
};

} // end namespace Attrs

//===----------------------------------------------------------------------===//
// Kernel Argument Metadata.
//===----------------------------------------------------------------------===//
namespace Arg {

namespace Key {
/// Key for Kernel::Arg::Metadata::mName.
constexpr char Name[] = "Name";
/// Key for Kernel::Arg::Metadata::mTypeName.
constexpr char TypeName[] = "TypeName";
/// Key for Kernel::Arg::Metadata::mSize.
constexpr char Size[] = "Size";
/// Key for Kernel::Arg::Metadata::mOffset.
constexpr char Offset[] = "Offset";
/// Key for Kernel::Arg::Metadata::mAlign.
constexpr char Align[] = "Align";
/// Key for Kernel::Arg::Metadata::mValueKind.
constexpr char ValueKind[] = "ValueKind";
/// Key for Kernel::Arg::Metadata::mValueType. (deprecated)
constexpr char ValueType[] = "ValueType";
/// Key for Kernel::Arg::Metadata::mPointeeAlign.
constexpr char PointeeAlign[] = "PointeeAlign";
/// Key for Kernel::Arg::Metadata::mAddrSpaceQual.
constexpr char AddrSpaceQual[] = "AddrSpaceQual";
/// Key for Kernel::Arg::Metadata::mAccQual.
constexpr char AccQual[] = "AccQual";
/// Key for Kernel::Arg::Metadata::mActualAccQual.
constexpr char ActualAccQual[] = "ActualAccQual";
/// Key for Kernel::Arg::Metadata::mIsConst.
constexpr char IsConst[] = "IsConst";
/// Key for Kernel::Arg::Metadata::mIsRestrict.
constexpr char IsRestrict[] = "IsRestrict";
/// Key for Kernel::Arg::Metadata::mIsVolatile.
constexpr char IsVolatile[] = "IsVolatile";
/// Key for Kernel::Arg::Metadata::mIsPipe.
constexpr char IsPipe[] = "IsPipe";
} // end namespace Key

/// In-memory representation of kernel argument metadata.
struct Metadata final {
  /// Name. Optional.
  std::string mName = std::string();
  /// Type name. Optional.
  std::string mTypeName = std::string();
  /// Size in bytes. Required.
  uint32_t mSize = 0;
  /// Offset in bytes. Required for code object v3, unused for code object v2.
  uint32_t mOffset = 0;
  /// Alignment in bytes. Required.
  uint32_t mAlign = 0;
  /// Value kind. Required.
  ValueKind mValueKind = ValueKind::Unknown;
  /// Pointee alignment in bytes. Optional.
  uint32_t mPointeeAlign = 0;
  /// Address space qualifier. Optional.
  AddressSpaceQualifier mAddrSpaceQual = AddressSpaceQualifier::Unknown;
  /// Access qualifier. Optional.
  AccessQualifier mAccQual = AccessQualifier::Unknown;
  /// Actual access qualifier. Optional.
  AccessQualifier mActualAccQual = AccessQualifier::Unknown;
  /// True if 'const' qualifier is specified. Optional.
  bool mIsConst = false;
  /// True if 'restrict' qualifier is specified. Optional.
  bool mIsRestrict = false;
  /// True if 'volatile' qualifier is specified. Optional.
  bool mIsVolatile = false;
  /// True if 'pipe' qualifier is specified. Optional.
  bool mIsPipe = false;

  /// Default constructor.
  Metadata() = default;
};

} // end namespace Arg

//===----------------------------------------------------------------------===//
// Kernel Code Properties Metadata.
//===----------------------------------------------------------------------===//
namespace CodeProps {

namespace Key {
/// Key for Kernel::CodeProps::Metadata::mKernargSegmentSize.
constexpr char KernargSegmentSize[] = "KernargSegmentSize";
/// Key for Kernel::CodeProps::Metadata::mGroupSegmentFixedSize.
constexpr char GroupSegmentFixedSize[] = "GroupSegmentFixedSize";
/// Key for Kernel::CodeProps::Metadata::mPrivateSegmentFixedSize.
constexpr char PrivateSegmentFixedSize[] = "PrivateSegmentFixedSize";
/// Key for Kernel::CodeProps::Metadata::mKernargSegmentAlign.
constexpr char KernargSegmentAlign[] = "KernargSegmentAlign";
/// Key for Kernel::CodeProps::Metadata::mWavefrontSize.
constexpr char WavefrontSize[] = "WavefrontSize";
/// Key for Kernel::CodeProps::Metadata::mNumSGPRs.
constexpr char NumSGPRs[] = "NumSGPRs";
/// Key for Kernel::CodeProps::Metadata::mNumVGPRs.
constexpr char NumVGPRs[] = "NumVGPRs";
/// Key for Kernel::CodeProps::Metadata::mMaxFlatWorkGroupSize.
constexpr char MaxFlatWorkGroupSize[] = "MaxFlatWorkGroupSize";
/// Key for Kernel::CodeProps::Metadata::mIsDynamicCallStack.
constexpr char IsDynamicCallStack[] = "IsDynamicCallStack";
/// Key for Kernel::CodeProps::Metadata::mIsXNACKEnabled.
constexpr char IsXNACKEnabled[] = "IsXNACKEnabled";
/// Key for Kernel::CodeProps::Metadata::mNumSpilledSGPRs.
constexpr char NumSpilledSGPRs[] = "NumSpilledSGPRs";
/// Key for Kernel::CodeProps::Metadata::mNumSpilledVGPRs.
constexpr char NumSpilledVGPRs[] = "NumSpilledVGPRs";
} // end namespace Key

/// In-memory representation of kernel code properties metadata.
struct Metadata final {
  /// Size in bytes of the kernarg segment memory. Kernarg segment memory
  /// holds the values of the arguments to the kernel. Required.
  uint64_t mKernargSegmentSize = 0;
  /// Size in bytes of the group segment memory required by a workgroup.
  /// This value does not include any dynamically allocated group segment memory
  /// that may be added when the kernel is dispatched. Required.
  uint32_t mGroupSegmentFixedSize = 0;
  /// Size in bytes of the private segment memory required by a workitem.
  /// Private segment memory includes arg, spill and private segments. Required.
  uint32_t mPrivateSegmentFixedSize = 0;
  /// Maximum byte alignment of variables used by the kernel in the
  /// kernarg memory segment. Required.
  uint32_t mKernargSegmentAlign = 0;
  /// Wavefront size. Required.
  uint32_t mWavefrontSize = 0;
  /// Total number of SGPRs used by a wavefront. Optional.
  uint16_t mNumSGPRs = 0;
  /// Total number of VGPRs used by a workitem. Optional.
  uint16_t mNumVGPRs = 0;
  /// Maximum flat work-group size supported by the kernel. Optional.
  uint32_t mMaxFlatWorkGroupSize = 0;
  /// True if the generated machine code is using a dynamically sized
  /// call stack. Optional.
  bool mIsDynamicCallStack = false;
  /// True if the generated machine code is capable of supporting XNACK.
  /// Optional.
  bool mIsXNACKEnabled = false;
  /// Number of SGPRs spilled by a wavefront. Optional.
  uint16_t mNumSpilledSGPRs = 0;
  /// Number of VGPRs spilled by a workitem. Optional.
  uint16_t mNumSpilledVGPRs = 0;

  /// Default constructor.
  Metadata() = default;

  /// \returns True if kernel code properties metadata is empty, false
  /// otherwise.
  bool empty() const {
    return !notEmpty();
  }

  /// \returns True if kernel code properties metadata is not empty, false
  /// otherwise.
  bool notEmpty() const {
    return true;
  }
};

} // end namespace CodeProps

//===----------------------------------------------------------------------===//
// Kernel Debug Properties Metadata.
//===----------------------------------------------------------------------===//
namespace DebugProps {

namespace Key {
/// Key for Kernel::DebugProps::Metadata::mDebuggerABIVersion.
constexpr char DebuggerABIVersion[] = "DebuggerABIVersion";
/// Key for Kernel::DebugProps::Metadata::mReservedNumVGPRs.
constexpr char ReservedNumVGPRs[] = "ReservedNumVGPRs";
/// Key for Kernel::DebugProps::Metadata::mReservedFirstVGPR.
constexpr char ReservedFirstVGPR[] = "ReservedFirstVGPR";
/// Key for Kernel::DebugProps::Metadata::mPrivateSegmentBufferSGPR.
constexpr char PrivateSegmentBufferSGPR[] = "PrivateSegmentBufferSGPR";
/// Key for
///     Kernel::DebugProps::Metadata::mWavefrontPrivateSegmentOffsetSGPR.
constexpr char WavefrontPrivateSegmentOffsetSGPR[] =
    "WavefrontPrivateSegmentOffsetSGPR";
} // end namespace Key

/// In-memory representation of kernel debug properties metadata.
struct Metadata final {
  /// Debugger ABI version. Optional.
  std::vector<uint32_t> mDebuggerABIVersion = std::vector<uint32_t>();
  /// Consecutive number of VGPRs reserved for debugger use. Must be 0 if
  /// mDebuggerABIVersion is not set. Optional.
  uint16_t mReservedNumVGPRs = 0;
  /// First fixed VGPR reserved. Must be uint16_t(-1) if
  /// mDebuggerABIVersion is not set or mReservedFirstVGPR is 0. Optional.
  uint16_t mReservedFirstVGPR = uint16_t(-1);
  /// Fixed SGPR of the first of 4 SGPRs used to hold the scratch V# used
  /// for the entire kernel execution. Must be uint16_t(-1) if
  /// mDebuggerABIVersion is not set or SGPR not used or not known. Optional.
  uint16_t mPrivateSegmentBufferSGPR = uint16_t(-1);
  /// Fixed SGPR used to hold the wave scratch offset for the entire
  /// kernel execution. Must be uint16_t(-1) if mDebuggerABIVersion is not set
  /// or SGPR is not used or not known. Optional.
  uint16_t mWavefrontPrivateSegmentOffsetSGPR = uint16_t(-1);

  /// Default constructor.
  Metadata() = default;

  /// \returns True if kernel debug properties metadata is empty, false
  /// otherwise.
  bool empty() const {
    return !notEmpty();
  }

  /// \returns True if kernel debug properties metadata is not empty, false
  /// otherwise.
  bool notEmpty() const {
    return !mDebuggerABIVersion.empty();
  }
};

} // end namespace DebugProps

namespace Key {
/// Key for Kernel::Metadata::mName.
constexpr char Name[] = "Name";
/// Key for Kernel::Metadata::mSymbolName.
constexpr char SymbolName[] = "SymbolName";
/// Key for Kernel::Metadata::mLanguage.
constexpr char Language[] = "Language";
/// Key for Kernel::Metadata::mLanguageVersion.
constexpr char LanguageVersion[] = "LanguageVersion";
/// Key for Kernel::Metadata::mAttrs.
constexpr char Attrs[] = "Attrs";
/// Key for Kernel::Metadata::mArgs.
constexpr char Args[] = "Args";
/// Key for Kernel::Metadata::mCodeProps.
constexpr char CodeProps[] = "CodeProps";
/// Key for Kernel::Metadata::mDebugProps.
constexpr char DebugProps[] = "DebugProps";
} // end namespace Key

/// In-memory representation of kernel metadata.
struct Metadata final {
  /// Kernel source name. Required.
  std::string mName = std::string();
  /// Kernel descriptor name. Required.
  std::string mSymbolName = std::string();
  /// Language. Optional.
  std::string mLanguage = std::string();
  /// Language version. Optional.
  std::vector<uint32_t> mLanguageVersion = std::vector<uint32_t>();
  /// Attributes metadata. Optional.
  Attrs::Metadata mAttrs = Attrs::Metadata();
  /// Arguments metadata. Optional.
  std::vector<Arg::Metadata> mArgs = std::vector<Arg::Metadata>();
  /// Code properties metadata. Optional.
  CodeProps::Metadata mCodeProps = CodeProps::Metadata();
  /// Debug properties metadata. Optional.
  DebugProps::Metadata mDebugProps = DebugProps::Metadata();

  /// Default constructor.
  Metadata() = default;
};

} // end namespace Kernel

namespace Key {
/// Key for HSA::Metadata::mVersion.
constexpr char Version[] = "Version";
/// Key for HSA::Metadata::mPrintf.
constexpr char Printf[] = "Printf";
/// Key for HSA::Metadata::mKernels.
constexpr char Kernels[] = "Kernels";
} // end namespace Key

/// In-memory representation of HSA metadata.
struct Metadata final {
  /// HSA metadata version. Required.
  std::vector<uint32_t> mVersion = std::vector<uint32_t>();
  /// Printf metadata. Optional.
  std::vector<std::string> mPrintf = std::vector<std::string>();
  /// Kernels metadata. Required.
  std::vector<Kernel::Metadata> mKernels = std::vector<Kernel::Metadata>();

  /// Default constructor.
  Metadata() = default;
};

/// Converts \p String to \p HSAMetadata.
std::error_code fromString(StringRef String, Metadata &HSAMetadata);

/// Converts \p HSAMetadata to \p String.
std::error_code toString(Metadata HSAMetadata, std::string &String);

//===----------------------------------------------------------------------===//
// HSA metadata for v3 code object.
//===----------------------------------------------------------------------===//
namespace V3 {
/// HSA metadata major version.
constexpr uint32_t VersionMajor = 1;
/// HSA metadata minor version.
constexpr uint32_t VersionMinor = 0;

/// HSA metadata beginning assembler directive.
constexpr char AssemblerDirectiveBegin[] = ".amdgpu_metadata";
/// HSA metadata ending assembler directive.
constexpr char AssemblerDirectiveEnd[] = ".end_amdgpu_metadata";
} // end namespace V3

} // end namespace HSAMD

//===----------------------------------------------------------------------===//
// PAL metadata.
//===----------------------------------------------------------------------===//
namespace PALMD {

/// PAL metadata (old linear format) assembler directive.
constexpr char AssemblerDirective[] = ".amd_amdgpu_pal_metadata";

/// PAL metadata (new MsgPack format) beginning assembler directive.
constexpr char AssemblerDirectiveBegin[] = ".amdgpu_pal_metadata";

/// PAL metadata (new MsgPack format) ending assembler directive.
constexpr char AssemblerDirectiveEnd[] = ".end_amdgpu_pal_metadata";

/// PAL metadata keys.
enum Key : uint32_t {
  R_2E12_COMPUTE_PGM_RSRC1 = 0x2e12,
  R_2D4A_SPI_SHADER_PGM_RSRC1_LS = 0x2d4a,
  R_2D0A_SPI_SHADER_PGM_RSRC1_HS = 0x2d0a,
  R_2CCA_SPI_SHADER_PGM_RSRC1_ES = 0x2cca,
  R_2C8A_SPI_SHADER_PGM_RSRC1_GS = 0x2c8a,
  R_2C4A_SPI_SHADER_PGM_RSRC1_VS = 0x2c4a,
  R_2C0A_SPI_SHADER_PGM_RSRC1_PS = 0x2c0a,
  R_2E00_COMPUTE_DISPATCH_INITIATOR = 0x2e00,
  R_A1B3_SPI_PS_INPUT_ENA = 0xa1b3,
  R_A1B4_SPI_PS_INPUT_ADDR = 0xa1b4,
  R_A1B6_SPI_PS_IN_CONTROL = 0xa1b6,
  R_A2D5_VGT_SHADER_STAGES_EN = 0xa2d5,

  LS_NUM_USED_VGPRS = 0x10000021,
  HS_NUM_USED_VGPRS = 0x10000022,
  ES_NUM_USED_VGPRS = 0x10000023,
  GS_NUM_USED_VGPRS = 0x10000024,
  VS_NUM_USED_VGPRS = 0x10000025,
  PS_NUM_USED_VGPRS = 0x10000026,
  CS_NUM_USED_VGPRS = 0x10000027,

  LS_NUM_USED_SGPRS = 0x10000028,
  HS_NUM_USED_SGPRS = 0x10000029,
  ES_NUM_USED_SGPRS = 0x1000002a,
  GS_NUM_USED_SGPRS = 0x1000002b,
  VS_NUM_USED_SGPRS = 0x1000002c,
  PS_NUM_USED_SGPRS = 0x1000002d,
  CS_NUM_USED_SGPRS = 0x1000002e,

  LS_SCRATCH_SIZE = 0x10000044,
  HS_SCRATCH_SIZE = 0x10000045,
  ES_SCRATCH_SIZE = 0x10000046,
  GS_SCRATCH_SIZE = 0x10000047,
  VS_SCRATCH_SIZE = 0x10000048,
  PS_SCRATCH_SIZE = 0x10000049,
  CS_SCRATCH_SIZE = 0x1000004a
};

} // end namespace PALMD
} // end namespace AMDGPU
} // end namespace llvm

#endif // LLVM_SUPPORT_AMDGPUMETADATA_H
PKhwFZ���/��Support/SuffixTreeNode.hnu�[���//===- llvm/ADT/SuffixTreeNode.h - Nodes for SuffixTrees --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines nodes for use within a SuffixTree.
//
// Each node has either no children or at least two children, with the root
// being a exception in the empty tree.
//
// Children are represented as a map between unsigned integers and nodes. If
// a node N has a child M on unsigned integer k, then the mapping represented
// by N is a proper prefix of the mapping represented by M. Note that this,
// although similar to a trie is somewhat different: each node stores a full
// substring of the full mapping rather than a single character state.
//
// Each internal node contains a pointer to the internal node representing
// the same string, but with the first character chopped off. This is stored
// in \p Link. Each leaf node stores the start index of its respective
// suffix in \p SuffixIdx.
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_SUFFIXTREE_NODE_H
#define LLVM_SUPPORT_SUFFIXTREE_NODE_H
#include "llvm/ADT/DenseMap.h"

namespace llvm {

/// A node in a suffix tree which represents a substring or suffix.
struct SuffixTreeNode {
public:
  /// Represents an undefined index in the suffix tree.
  static const unsigned EmptyIdx = -1;
  enum class NodeKind { ST_Leaf, ST_Internal };

private:
  const NodeKind Kind;

  /// The start index of this node's substring in the main string.
  unsigned StartIdx = EmptyIdx;

  /// The length of the string formed by concatenating the edge labels from
  /// the root to this node.
  unsigned ConcatLen = 0;

public:
  // LLVM RTTI boilerplate.
  NodeKind getKind() const { return Kind; }

  /// \return the start index of this node's substring in the entire string.
  unsigned getStartIdx() const;

  /// \returns the end index of this node.
  virtual unsigned getEndIdx() const = 0;

  /// Advance this node's StartIdx by \p Inc.
  void incrementStartIdx(unsigned Inc);

  /// Set the length of the string from the root to this node to \p Len.
  void setConcatLen(unsigned Len);

  /// \returns the length of the string from the root to this node.
  unsigned getConcatLen() const;

  SuffixTreeNode(NodeKind Kind, unsigned StartIdx)
      : Kind(Kind), StartIdx(StartIdx) {}
  virtual ~SuffixTreeNode() = default;
};

// A node with two or more children, or the root.
struct SuffixTreeInternalNode : SuffixTreeNode {
private:
  /// The end index of this node's substring in the main string.
  ///
  /// Every leaf node must have its \p EndIdx incremented at the end of every
  /// step in the construction algorithm. To avoid having to update O(N)
  /// nodes individually at the end of every step, the end index is stored
  /// as a pointer.
  unsigned EndIdx = EmptyIdx;

  /// A pointer to the internal node representing the same sequence with the
  /// first character chopped off.
  ///
  /// This acts as a shortcut in Ukkonen's algorithm. One of the things that
  /// Ukkonen's algorithm does to achieve linear-time construction is
  /// keep track of which node the next insert should be at. This makes each
  /// insert O(1), and there are a total of O(N) inserts. The suffix link
  /// helps with inserting children of internal nodes.
  ///
  /// Say we add a child to an internal node with associated mapping S. The
  /// next insertion must be at the node representing S - its first character.
  /// This is given by the way that we iteratively build the tree in Ukkonen's
  /// algorithm. The main idea is to look at the suffixes of each prefix in the
  /// string, starting with the longest suffix of the prefix, and ending with
  /// the shortest. Therefore, if we keep pointers between such nodes, we can
  /// move to the next insertion point in O(1) time. If we don't, then we'd
  /// have to query from the root, which takes O(N) time. This would make the
  /// construction algorithm O(N^2) rather than O(N).
  SuffixTreeInternalNode *Link = nullptr;

public:
  // LLVM RTTI boilerplate.
  static bool classof(const SuffixTreeNode *N) {
    return N->getKind() == NodeKind::ST_Internal;
  }

  /// \returns true if this node is the root of its owning \p SuffixTree.
  bool isRoot() const;

  /// \returns the end index of this node's substring in the entire string.
  unsigned getEndIdx() const override;

  /// Sets \p Link to \p L. Assumes \p L is not null.
  void setLink(SuffixTreeInternalNode *L);

  /// \returns the pointer to the Link node.
  SuffixTreeInternalNode *getLink() const;

  /// The children of this node.
  ///
  /// A child existing on an unsigned integer implies that from the mapping
  /// represented by the current node, there is a way to reach another
  /// mapping by tacking that character on the end of the current string.
  DenseMap<unsigned, SuffixTreeNode *> Children;

  SuffixTreeInternalNode(unsigned StartIdx, unsigned EndIdx,
                         SuffixTreeInternalNode *Link)
      : SuffixTreeNode(NodeKind::ST_Internal, StartIdx), EndIdx(EndIdx),
        Link(Link) {}

  virtual ~SuffixTreeInternalNode() = default;
};

// A node representing a suffix.
struct SuffixTreeLeafNode : SuffixTreeNode {
private:
  /// The start index of the suffix represented by this leaf.
  unsigned SuffixIdx = EmptyIdx;

  /// The end index of this node's substring in the main string.
  ///
  /// Every leaf node must have its \p EndIdx incremented at the end of every
  /// step in the construction algorithm. To avoid having to update O(N)
  /// nodes individually at the end of every step, the end index is stored
  /// as a pointer.
  unsigned *EndIdx = nullptr;

public:
  // LLVM RTTI boilerplate.
  static bool classof(const SuffixTreeNode *N) {
    return N->getKind() == NodeKind::ST_Leaf;
  }

  /// \returns the end index of this node's substring in the entire string.
  unsigned getEndIdx() const override;

  /// \returns the start index of the suffix represented by this leaf.
  unsigned getSuffixIdx() const;

  /// Sets the start index of the suffix represented by this leaf to \p Idx.
  void setSuffixIdx(unsigned Idx);
  SuffixTreeLeafNode(unsigned StartIdx, unsigned *EndIdx)
      : SuffixTreeNode(NodeKind::ST_Leaf, StartIdx), EndIdx(EndIdx) {}

  virtual ~SuffixTreeLeafNode() = default;
};
} // namespace llvm
#endif // LLVM_SUPPORT_SUFFIXTREE_NODE_HPKhwFZ'��q$<$<Support/HashBuilder.hnu�[���//===- llvm/Support/HashBuilder.h - Convenient hashing interface-*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements an interface allowing to conveniently build hashes of
// various data types, without relying on the underlying hasher type to know
// about hashed data types.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_HASHBUILDER_H
#define LLVM_SUPPORT_HASHBUILDER_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/type_traits.h"

#include <iterator>
#include <optional>
#include <utility>

namespace llvm {

namespace hashbuilder_detail {
/// Trait to indicate whether a type's bits can be hashed directly (after
/// endianness correction).
template <typename U>
struct IsHashableData
    : std::integral_constant<bool, is_integral_or_enum<U>::value> {};

} // namespace hashbuilder_detail

/// Declares the hasher member, and functions forwarding directly to the hasher.
template <typename HasherT> class HashBuilderBase {
public:
  template <typename HasherT_ = HasherT>
  using HashResultTy = decltype(std::declval<HasherT_ &>().final());

  HasherT &getHasher() { return Hasher; }

  /// Forward to `HasherT::update(ArrayRef<uint8_t>)`.
  ///
  /// This may not take the size of `Data` into account.
  /// Users of this function should pay attention to respect endianness
  /// contraints.
  void update(ArrayRef<uint8_t> Data) { this->getHasher().update(Data); }

  /// Forward to `HasherT::update(ArrayRef<uint8_t>)`.
  ///
  /// This may not take the size of `Data` into account.
  /// Users of this function should pay attention to respect endianness
  /// contraints.
  void update(StringRef Data) {
    update(
        ArrayRef(reinterpret_cast<const uint8_t *>(Data.data()), Data.size()));
  }

  /// Forward to `HasherT::final()` if available.
  template <typename HasherT_ = HasherT> HashResultTy<HasherT_> final() {
    return this->getHasher().final();
  }

  /// Forward to `HasherT::result()` if available.
  template <typename HasherT_ = HasherT> HashResultTy<HasherT_> result() {
    return this->getHasher().result();
  }

protected:
  explicit HashBuilderBase(HasherT &Hasher) : Hasher(Hasher) {}

  template <typename... ArgTypes>
  explicit HashBuilderBase(ArgTypes &&...Args)
      : OptionalHasher(std::in_place, std::forward<ArgTypes>(Args)...),
        Hasher(*OptionalHasher) {}

private:
  std::optional<HasherT> OptionalHasher;
  HasherT &Hasher;
};

/// Implementation of the `HashBuilder` interface.
///
/// `support::endianness::native` is not supported. `HashBuilder` is
/// expected to canonicalize `support::endianness::native` to one of
/// `support::endianness::big` or `support::endianness::little`.
template <typename HasherT, support::endianness Endianness>
class HashBuilderImpl : public HashBuilderBase<HasherT> {
  static_assert(Endianness != support::endianness::native,
                "HashBuilder should canonicalize endianness");

public:
  explicit HashBuilderImpl(HasherT &Hasher)
      : HashBuilderBase<HasherT>(Hasher) {}
  template <typename... ArgTypes>
  explicit HashBuilderImpl(ArgTypes &&...Args)
      : HashBuilderBase<HasherT>(Args...) {}

  /// Implement hashing for hashable data types, e.g. integral or enum values.
  template <typename T>
  std::enable_if_t<hashbuilder_detail::IsHashableData<T>::value,
                   HashBuilderImpl &>
  add(T Value) {
    return adjustForEndiannessAndAdd(Value);
  }

  /// Support hashing `ArrayRef`.
  ///
  /// `Value.size()` is taken into account to ensure cases like
  /// ```
  /// builder.add({1});
  /// builder.add({2, 3});
  /// ```
  /// and
  /// ```
  /// builder.add({1, 2});
  /// builder.add({3});
  /// ```
  /// do not collide.
  template <typename T> HashBuilderImpl &add(ArrayRef<T> Value) {
    // As of implementation time, simply calling `addRange(Value)` would also go
    // through the `update` fast path. But that would rely on the implementation
    // details of `ArrayRef::begin()` and `ArrayRef::end()`. Explicitly call
    // `update` to guarantee the fast path.
    add(Value.size());
    if (hashbuilder_detail::IsHashableData<T>::value &&
        Endianness == support::endian::system_endianness()) {
      this->update(ArrayRef(reinterpret_cast<const uint8_t *>(Value.begin()),
                            Value.size() * sizeof(T)));
    } else {
      for (auto &V : Value)
        add(V);
    }
    return *this;
  }

  /// Support hashing `StringRef`.
  ///
  /// `Value.size()` is taken into account to ensure cases like
  /// ```
  /// builder.add("a");
  /// builder.add("bc");
  /// ```
  /// and
  /// ```
  /// builder.add("ab");
  /// builder.add("c");
  /// ```
  /// do not collide.
  HashBuilderImpl &add(StringRef Value) {
    // As of implementation time, simply calling `addRange(Value)` would also go
    // through `update`. But that would rely on the implementation of
    // `StringRef::begin()` and `StringRef::end()`. Explicitly call `update` to
    // guarantee the fast path.
    add(Value.size());
    this->update(ArrayRef(reinterpret_cast<const uint8_t *>(Value.begin()),
                          Value.size()));
    return *this;
  }

  template <typename T>
  using HasAddHashT =
      decltype(addHash(std::declval<HashBuilderImpl &>(), std::declval<T &>()));
  /// Implement hashing for user-defined `struct`s.
  ///
  /// Any user-define `struct` can participate in hashing via `HashBuilder` by
  /// providing a `addHash` templated function.
  ///
  /// ```
  /// template <typename HasherT, support::endianness Endianness>
  /// void addHash(HashBuilder<HasherT, Endianness> &HBuilder,
  ///              const UserDefinedStruct &Value);
  /// ```
  ///
  /// For example:
  /// ```
  /// struct SimpleStruct {
  ///   char c;
  ///   int i;
  /// };
  ///
  /// template <typename HasherT, support::endianness Endianness>
  /// void addHash(HashBuilderImpl<HasherT, Endianness> &HBuilder,
  ///              const SimpleStruct &Value) {
  ///   HBuilder.add(Value.c);
  ///   HBuilder.add(Value.i);
  /// }
  /// ```
  ///
  /// To avoid endianness issues, specializations of `addHash` should
  /// generally rely on exising `add`, `addRange`, and `addRangeElements`
  /// functions. If directly using `update`, an implementation must correctly
  /// handle endianness.
  ///
  /// ```
  /// struct __attribute__ ((packed)) StructWithFastHash {
  ///   int I;
  ///   char C;
  ///
  ///   // If possible, we want to hash both `I` and `C` in a single
  ///   // `update` call for performance concerns.
  ///   template <typename HasherT, support::endianness Endianness>
  ///   friend void addHash(HashBuilderImpl<HasherT, Endianness> &HBuilder,
  ///                       const StructWithFastHash &Value) {
  ///     if (Endianness == support::endian::system_endianness()) {
  ///       HBuilder.update(ArrayRef(
  ///           reinterpret_cast<const uint8_t *>(&Value), sizeof(Value)));
  ///     } else {
  ///       // Rely on existing `add` methods to handle endianness.
  ///       HBuilder.add(Value.I);
  ///       HBuilder.add(Value.C);
  ///     }
  ///   }
  /// };
  /// ```
  ///
  /// To avoid collisions, specialization of `addHash` for variable-size
  /// types must take the size into account.
  ///
  /// For example:
  /// ```
  /// struct CustomContainer {
  /// private:
  ///   size_t Size;
  ///   int Elements[100];
  ///
  /// public:
  ///   CustomContainer(size_t Size) : Size(Size) {
  ///     for (size_t I = 0; I != Size; ++I)
  ///       Elements[I] = I;
  ///   }
  ///   template <typename HasherT, support::endianness Endianness>
  ///   friend void addHash(HashBuilderImpl<HasherT, Endianness> &HBuilder,
  ///                       const CustomContainer &Value) {
  ///     if (Endianness == support::endian::system_endianness()) {
  ///       HBuilder.update(ArrayRef(
  ///           reinterpret_cast<const uint8_t *>(&Value.Size),
  ///           sizeof(Value.Size) + Value.Size * sizeof(Value.Elements[0])));
  ///     } else {
  ///       // `addRange` will take care of encoding the size.
  ///       HBuilder.addRange(&Value.Elements[0], &Value.Elements[0] +
  ///       Value.Size);
  ///     }
  ///   }
  /// };
  /// ```
  template <typename T>
  std::enable_if_t<is_detected<HasAddHashT, T>::value &&
                       !hashbuilder_detail::IsHashableData<T>::value,
                   HashBuilderImpl &>
  add(const T &Value) {
    addHash(*this, Value);
    return *this;
  }

  template <typename T1, typename T2>
  HashBuilderImpl &add(const std::pair<T1, T2> &Value) {
    return add(Value.first, Value.second);
  }

  template <typename... Ts> HashBuilderImpl &add(const std::tuple<Ts...> &Arg) {
    std::apply([this](const auto &...Args) { this->add(Args...); }, Arg);
    return *this;
  }

  /// A convenenience variadic helper.
  /// It simply iterates over its arguments, in order.
  /// ```
  /// add(Arg1, Arg2);
  /// ```
  /// is equivalent to
  /// ```
  /// add(Arg1)
  /// add(Arg2)
  /// ```
  template <typename... Ts>
  std::enable_if_t<(sizeof...(Ts) > 1), HashBuilderImpl &>
  add(const Ts &...Args) {
    return (add(Args), ...);
  }

  template <typename ForwardIteratorT>
  HashBuilderImpl &addRange(ForwardIteratorT First, ForwardIteratorT Last) {
    add(std::distance(First, Last));
    return addRangeElements(First, Last);
  }

  template <typename RangeT> HashBuilderImpl &addRange(const RangeT &Range) {
    return addRange(adl_begin(Range), adl_end(Range));
  }

  template <typename ForwardIteratorT>
  HashBuilderImpl &addRangeElements(ForwardIteratorT First,
                                    ForwardIteratorT Last) {
    return addRangeElementsImpl(
        First, Last,
        typename std::iterator_traits<ForwardIteratorT>::iterator_category());
  }

  template <typename RangeT>
  HashBuilderImpl &addRangeElements(const RangeT &Range) {
    return addRangeElements(adl_begin(Range), adl_end(Range));
  }

  template <typename T>
  using HasByteSwapT = decltype(support::endian::byte_swap(
      std::declval<T &>(), support::endianness::little));
  /// Adjust `Value` for the target endianness and add it to the hash.
  template <typename T>
  std::enable_if_t<is_detected<HasByteSwapT, T>::value, HashBuilderImpl &>
  adjustForEndiannessAndAdd(const T &Value) {
    T SwappedValue = support::endian::byte_swap(Value, Endianness);
    this->update(ArrayRef(reinterpret_cast<const uint8_t *>(&SwappedValue),
                          sizeof(SwappedValue)));
    return *this;
  }

private:
  // FIXME: Once available, specialize this function for `contiguous_iterator`s,
  // and use it for `ArrayRef` and `StringRef`.
  template <typename ForwardIteratorT>
  HashBuilderImpl &addRangeElementsImpl(ForwardIteratorT First,
                                        ForwardIteratorT Last,
                                        std::forward_iterator_tag) {
    for (auto It = First; It != Last; ++It)
      add(*It);
    return *this;
  }

  template <typename T>
  std::enable_if_t<hashbuilder_detail::IsHashableData<T>::value &&
                       Endianness == support::endian::system_endianness(),
                   HashBuilderImpl &>
  addRangeElementsImpl(T *First, T *Last, std::forward_iterator_tag) {
    this->update(ArrayRef(reinterpret_cast<const uint8_t *>(First),
                          (Last - First) * sizeof(T)));
    return *this;
  }
};

/// Interface to help hash various types through a hasher type.
///
/// Via provided specializations of `add`, `addRange`, and `addRangeElements`
/// functions, various types (e.g. `ArrayRef`, `StringRef`, etc.) can be hashed
/// without requiring any knowledge of hashed types from the hasher type.
///
/// The only method expected from the templated hasher type `HasherT` is:
/// * void update(ArrayRef<uint8_t> Data)
///
/// Additionally, the following methods will be forwarded to the hasher type:
/// * decltype(std::declval<HasherT &>().final()) final()
/// * decltype(std::declval<HasherT &>().result()) result()
///
/// From a user point of view, the interface provides the following:
/// * `template<typename T> add(const T &Value)`
///   The `add` function implements hashing of various types.
/// * `template <typename ItT> void addRange(ItT First, ItT Last)`
///   The `addRange` function is designed to aid hashing a range of values.
///   It explicitly adds the size of the range in the hash.
/// * `template <typename ItT> void addRangeElements(ItT First, ItT Last)`
///   The `addRangeElements` function is also designed to aid hashing a range of
///   values. In contrast to `addRange`, it **ignores** the size of the range,
///   behaving as if elements were added one at a time with `add`.
///
/// User-defined `struct` types can participate in this interface by providing
/// an `addHash` templated function. See the associated template specialization
/// for details.
///
/// This interface does not impose requirements on the hasher
/// `update(ArrayRef<uint8_t> Data)` method. We want to avoid collisions for
/// variable-size types; for example for
/// ```
/// builder.add({1});
/// builder.add({2, 3});
/// ```
/// and
/// ```
/// builder.add({1, 2});
/// builder.add({3});
/// ```
/// . Thus, specializations of `add` and `addHash` for variable-size types must
/// not assume that the hasher type considers the size as part of the hash; they
/// must explicitly add the size to the hash. See for example specializations
/// for `ArrayRef` and `StringRef`.
///
/// Additionally, since types are eventually forwarded to the hasher's
/// `void update(ArrayRef<uint8_t>)` method, endianness plays a role in the hash
/// computation (for example when computing `add((int)123)`).
/// Specifiying a non-`native` `Endianness` template parameter allows to compute
/// stable hash across platforms with different endianness.
template <class HasherT, support::endianness Endianness>
using HashBuilder =
    HashBuilderImpl<HasherT, (Endianness == support::endianness::native
                                  ? support::endian::system_endianness()
                                  : Endianness)>;

namespace hashbuilder_detail {
class HashCodeHasher {
public:
  HashCodeHasher() : Code(0) {}
  void update(ArrayRef<uint8_t> Data) {
    hash_code DataCode = hash_value(Data);
    Code = hash_combine(Code, DataCode);
  }
  hash_code Code;
};

using HashCodeHashBuilder = HashBuilder<hashbuilder_detail::HashCodeHasher,
                                        support::endianness::native>;
} // namespace hashbuilder_detail

/// Provide a default implementation of `hash_value` when `addHash(const T &)`
/// is supported.
template <typename T>
std::enable_if_t<
    is_detected<hashbuilder_detail::HashCodeHashBuilder::HasAddHashT, T>::value,
    hash_code>
hash_value(const T &Value) {
  hashbuilder_detail::HashCodeHashBuilder HBuilder;
  HBuilder.add(Value);
  return HBuilder.getHasher().Code;
}
} // end namespace llvm

#endif // LLVM_SUPPORT_HASHBUILDER_H
PKhwFZ PECVVSupport/Duration.hnu�[���//===--- Duration.h - wrapper around std::chrono::Duration ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
//  The sole purpose of this file is to avoid the dependency on <chrono> in
//  raw_ostream.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_DURATION_H
#define LLVM_SUPPORT_DURATION_H

#include <chrono>

namespace llvm {
class Duration {
  std::chrono::milliseconds Value;
  public:
  Duration(std::chrono::milliseconds Value) : Value(Value) {}
  std::chrono::milliseconds getDuration() const { return Value; }
};
}

#endif
PKhwFZ`����Support/Base64.hnu�[���//===--- Base64.h - Base64 Encoder/Decoder ----------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file provides generic base64 encoder/decoder.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_BASE64_H
#define LLVM_SUPPORT_BASE64_H

#include "llvm/Support/Error.h"
#include <cstdint>
#include <string>
#include <vector>

namespace llvm {

template <class InputBytes> std::string encodeBase64(InputBytes const &Bytes) {
  static const char Table[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
                              "abcdefghijklmnopqrstuvwxyz"
                              "0123456789+/";
  std::string Buffer;
  Buffer.resize(((Bytes.size() + 2) / 3) * 4);

  size_t i = 0, j = 0;
  for (size_t n = Bytes.size() / 3 * 3; i < n; i += 3, j += 4) {
    uint32_t x = ((unsigned char)Bytes[i] << 16) |
                 ((unsigned char)Bytes[i + 1] << 8) |
                 (unsigned char)Bytes[i + 2];
    Buffer[j + 0] = Table[(x >> 18) & 63];
    Buffer[j + 1] = Table[(x >> 12) & 63];
    Buffer[j + 2] = Table[(x >> 6) & 63];
    Buffer[j + 3] = Table[x & 63];
  }
  if (i + 1 == Bytes.size()) {
    uint32_t x = ((unsigned char)Bytes[i] << 16);
    Buffer[j + 0] = Table[(x >> 18) & 63];
    Buffer[j + 1] = Table[(x >> 12) & 63];
    Buffer[j + 2] = '=';
    Buffer[j + 3] = '=';
  } else if (i + 2 == Bytes.size()) {
    uint32_t x =
        ((unsigned char)Bytes[i] << 16) | ((unsigned char)Bytes[i + 1] << 8);
    Buffer[j + 0] = Table[(x >> 18) & 63];
    Buffer[j + 1] = Table[(x >> 12) & 63];
    Buffer[j + 2] = Table[(x >> 6) & 63];
    Buffer[j + 3] = '=';
  }
  return Buffer;
}

llvm::Error decodeBase64(llvm::StringRef Input, std::vector<char> &Output);

} // end namespace llvm

#endif
PKhwFZ��0�&�&Support/Automaton.hnu�[���//===-- Automaton.h - Support for driving TableGen-produced DFAs ----------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements class that drive and introspect deterministic finite-
// state automata (DFAs) as generated by TableGen's -gen-automata backend.
//
// For a description of how to define an automaton, see
// include/llvm/TableGen/Automaton.td.
//
// One important detail is that these deterministic automata are created from
// (potentially) nondeterministic definitions. Therefore a unique sequence of
// input symbols will produce one path through the DFA but multiple paths
// through the original NFA. An automaton by default only returns "accepted" or
// "not accepted", but frequently we want to analyze what NFA path was taken.
// Finding a path through the NFA states that results in a DFA state can help
// answer *what* the solution to a problem was, not just that there exists a
// solution.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_AUTOMATON_H
#define LLVM_SUPPORT_AUTOMATON_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Allocator.h"
#include <deque>
#include <map>
#include <memory>
#include <unordered_map>
#include <vector>

namespace llvm {

using NfaPath = SmallVector<uint64_t, 4>;

/// Forward define the pair type used by the automata transition info tables.
///
/// Experimental results with large tables have shown a significant (multiple
/// orders of magnitude) parsing speedup by using a custom struct here with a
/// trivial constructor rather than std::pair<uint64_t, uint64_t>.
struct NfaStatePair {
  uint64_t FromDfaState, ToDfaState;

  bool operator<(const NfaStatePair &Other) const {
    return std::make_tuple(FromDfaState, ToDfaState) <
           std::make_tuple(Other.FromDfaState, Other.ToDfaState);
  }
};

namespace internal {
/// The internal class that maintains all possible paths through an NFA based
/// on a path through the DFA.
class NfaTranscriber {
private:
  /// Cached transition table. This is a table of NfaStatePairs that contains
  /// zero-terminated sequences pointed to by DFA transitions.
  ArrayRef<NfaStatePair> TransitionInfo;

  /// A simple linked-list of traversed states that can have a shared tail. The
  /// traversed path is stored in reverse order with the latest state as the
  /// head.
  struct PathSegment {
    uint64_t State;
    PathSegment *Tail;
  };

  /// We allocate segment objects frequently. Allocate them upfront and dispose
  /// at the end of a traversal rather than hammering the system allocator.
  SpecificBumpPtrAllocator<PathSegment> Allocator;

  /// Heads of each tracked path. These are not ordered.
  std::deque<PathSegment *> Heads;

  /// The returned paths. This is populated during getPaths.
  SmallVector<NfaPath, 4> Paths;

  /// Create a new segment and return it.
  PathSegment *makePathSegment(uint64_t State, PathSegment *Tail) {
    PathSegment *P = Allocator.Allocate();
    *P = {State, Tail};
    return P;
  }

  /// Pairs defines a sequence of possible NFA transitions for a single DFA
  /// transition.
  void transition(ArrayRef<NfaStatePair> Pairs) {
    // Iterate over all existing heads. We will mutate the Heads deque during
    // iteration.
    unsigned NumHeads = Heads.size();
    for (unsigned I = 0; I < NumHeads; ++I) {
      PathSegment *Head = Heads[I];
      // The sequence of pairs is sorted. Select the set of pairs that
      // transition from the current head state.
      auto PI = lower_bound(Pairs, NfaStatePair{Head->State, 0ULL});
      auto PE = upper_bound(Pairs, NfaStatePair{Head->State, INT64_MAX});
      // For every transition from the current head state, add a new path
      // segment.
      for (; PI != PE; ++PI)
        if (PI->FromDfaState == Head->State)
          Heads.push_back(makePathSegment(PI->ToDfaState, Head));
    }
    // Now we've iterated over all the initial heads and added new ones,
    // dispose of the original heads.
    Heads.erase(Heads.begin(), std::next(Heads.begin(), NumHeads));
  }

public:
  NfaTranscriber(ArrayRef<NfaStatePair> TransitionInfo)
      : TransitionInfo(TransitionInfo) {
    reset();
  }

  ArrayRef<NfaStatePair> getTransitionInfo() const {
    return TransitionInfo;
  }

  void reset() {
    Paths.clear();
    Heads.clear();
    Allocator.DestroyAll();
    // The initial NFA state is 0.
    Heads.push_back(makePathSegment(0ULL, nullptr));
  }

  void transition(unsigned TransitionInfoIdx) {
    unsigned EndIdx = TransitionInfoIdx;
    while (TransitionInfo[EndIdx].ToDfaState != 0)
      ++EndIdx;
    ArrayRef<NfaStatePair> Pairs(&TransitionInfo[TransitionInfoIdx],
                                 EndIdx - TransitionInfoIdx);
    transition(Pairs);
  }

  ArrayRef<NfaPath> getPaths() {
    Paths.clear();
    for (auto *Head : Heads) {
      NfaPath P;
      while (Head->State != 0) {
        P.push_back(Head->State);
        Head = Head->Tail;
      }
      std::reverse(P.begin(), P.end());
      Paths.push_back(std::move(P));
    }
    return Paths;
  }
};
} // namespace internal

/// A deterministic finite-state automaton. The automaton is defined in
/// TableGen; this object drives an automaton defined by tblgen-emitted tables.
///
/// An automaton accepts a sequence of input tokens ("actions"). This class is
/// templated on the type of these actions.
template <typename ActionT> class Automaton {
  /// Map from {State, Action} to {NewState, TransitionInfoIdx}.
  /// TransitionInfoIdx is used by the DfaTranscriber to analyze the transition.
  /// FIXME: This uses a std::map because ActionT can be a pair type including
  /// an enum. In particular DenseMapInfo<ActionT> must be defined to use
  /// DenseMap here.
  /// This is a shared_ptr to allow very quick copy-construction of Automata; this
  /// state is immutable after construction so this is safe.
  using MapTy = std::map<std::pair<uint64_t, ActionT>, std::pair<uint64_t, unsigned>>;
  std::shared_ptr<MapTy> M;
  /// An optional transcription object. This uses much more state than simply
  /// traversing the DFA for acceptance, so is heap allocated.
  std::shared_ptr<internal::NfaTranscriber> Transcriber;
  /// The initial DFA state is 1.
  uint64_t State = 1;
  /// True if we should transcribe and false if not (even if Transcriber is defined).
  bool Transcribe;

public:
  /// Create an automaton.
  /// \param Transitions The Transitions table as created by TableGen. Note that
  ///                    because the action type differs per automaton, the
  ///                    table type is templated as ArrayRef<InfoT>.
  /// \param TranscriptionTable The TransitionInfo table as created by TableGen.
  ///
  /// Providing the TranscriptionTable argument as non-empty will enable the
  /// use of transcription, which analyzes the possible paths in the original
  /// NFA taken by the DFA. NOTE: This is substantially more work than simply
  /// driving the DFA, so unless you require the getPaths() method leave this
  /// empty.
  template <typename InfoT>
  Automaton(ArrayRef<InfoT> Transitions,
            ArrayRef<NfaStatePair> TranscriptionTable = {}) {
    if (!TranscriptionTable.empty())
      Transcriber =
          std::make_shared<internal::NfaTranscriber>(TranscriptionTable);
    Transcribe = Transcriber != nullptr;
    M = std::make_shared<MapTy>();
    for (const auto &I : Transitions)
      // Greedily read and cache the transition table.
      M->emplace(std::make_pair(I.FromDfaState, I.Action),
                 std::make_pair(I.ToDfaState, I.InfoIdx));
  }
  Automaton(const Automaton &Other)
      : M(Other.M), State(Other.State), Transcribe(Other.Transcribe) {
    // Transcriber is not thread-safe, so create a new instance on copy.
    if (Other.Transcriber)
      Transcriber = std::make_shared<internal::NfaTranscriber>(
          Other.Transcriber->getTransitionInfo());
  }

  /// Reset the automaton to its initial state.
  void reset() {
    State = 1;
    if (Transcriber)
      Transcriber->reset();
  }

  /// Enable or disable transcription. Transcription is only available if
  /// TranscriptionTable was provided to the constructor.
  void enableTranscription(bool Enable = true) {
    assert(Transcriber &&
           "Transcription is only available if TranscriptionTable was provided "
           "to the Automaton constructor");
    Transcribe = Enable;
  }

  /// Transition the automaton based on input symbol A. Return true if the
  /// automaton transitioned to a valid state, false if the automaton
  /// transitioned to an invalid state.
  ///
  /// If this function returns false, all methods are undefined until reset() is
  /// called.
  bool add(const ActionT &A) {
    auto I = M->find({State, A});
    if (I == M->end())
      return false;
    if (Transcriber && Transcribe)
      Transcriber->transition(I->second.second);
    State = I->second.first;
    return true;
  }

  /// Return true if the automaton can be transitioned based on input symbol A.
  bool canAdd(const ActionT &A) {
    auto I = M->find({State, A});
    return I != M->end();
  }

  /// Obtain a set of possible paths through the input nondeterministic
  /// automaton that could be obtained from the sequence of input actions
  /// presented to this deterministic automaton.
  ArrayRef<NfaPath> getNfaPaths() {
    assert(Transcriber && Transcribe &&
           "Can only obtain NFA paths if transcribing!");
    return Transcriber->getPaths();
  }
};

} // namespace llvm

#endif // LLVM_SUPPORT_AUTOMATON_H
PKhwFZ��̭��Support/GenericDomTree.hnu�[���//===- GenericDomTree.h - Generic dominator trees for graphs ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This file defines a set of templates that efficiently compute a dominator
/// tree over a generic graph. This is used typically in LLVM for fast
/// dominance queries on the CFG, but is fully generic w.r.t. the underlying
/// graph types.
///
/// Unlike ADT/* graph algorithms, generic dominator tree has more requirements
/// on the graph's NodeRef. The NodeRef should be a pointer and,
/// either NodeRef->getParent() must return the parent node that is also a
/// pointer or DomTreeNodeTraits needs to be specialized.
///
/// FIXME: Maybe GenericDomTree needs a TreeTraits, instead of GraphTraits.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_GENERICDOMTREE_H
#define LLVM_SUPPORT_GENERICDOMTREE_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/GraphTraits.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/CFGDiff.h"
#include "llvm/Support/CFGUpdate.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <iterator>
#include <memory>
#include <type_traits>
#include <utility>

namespace llvm {

template <typename NodeT, bool IsPostDom>
class DominatorTreeBase;

namespace DomTreeBuilder {
template <typename DomTreeT>
struct SemiNCAInfo;
}  // namespace DomTreeBuilder

/// Base class for the actual dominator tree node.
template <class NodeT> class DomTreeNodeBase {
  friend class PostDominatorTree;
  friend class DominatorTreeBase<NodeT, false>;
  friend class DominatorTreeBase<NodeT, true>;
  friend struct DomTreeBuilder::SemiNCAInfo<DominatorTreeBase<NodeT, false>>;
  friend struct DomTreeBuilder::SemiNCAInfo<DominatorTreeBase<NodeT, true>>;

  NodeT *TheBB;
  DomTreeNodeBase *IDom;
  unsigned Level;
  SmallVector<DomTreeNodeBase *, 4> Children;
  mutable unsigned DFSNumIn = ~0;
  mutable unsigned DFSNumOut = ~0;

 public:
  DomTreeNodeBase(NodeT *BB, DomTreeNodeBase *iDom)
      : TheBB(BB), IDom(iDom), Level(IDom ? IDom->Level + 1 : 0) {}

  using iterator = typename SmallVector<DomTreeNodeBase *, 4>::iterator;
  using const_iterator =
      typename SmallVector<DomTreeNodeBase *, 4>::const_iterator;

  iterator begin() { return Children.begin(); }
  iterator end() { return Children.end(); }
  const_iterator begin() const { return Children.begin(); }
  const_iterator end() const { return Children.end(); }

  DomTreeNodeBase *const &back() const { return Children.back(); }
  DomTreeNodeBase *&back() { return Children.back(); }

  iterator_range<iterator> children() { return make_range(begin(), end()); }
  iterator_range<const_iterator> children() const {
    return make_range(begin(), end());
  }

  NodeT *getBlock() const { return TheBB; }
  DomTreeNodeBase *getIDom() const { return IDom; }
  unsigned getLevel() const { return Level; }

  std::unique_ptr<DomTreeNodeBase> addChild(
      std::unique_ptr<DomTreeNodeBase> C) {
    Children.push_back(C.get());
    return C;
  }

  bool isLeaf() const { return Children.empty(); }
  size_t getNumChildren() const { return Children.size(); }

  void clearAllChildren() { Children.clear(); }

  bool compare(const DomTreeNodeBase *Other) const {
    if (getNumChildren() != Other->getNumChildren())
      return true;

    if (Level != Other->Level) return true;

    SmallPtrSet<const NodeT *, 4> OtherChildren;
    for (const DomTreeNodeBase *I : *Other) {
      const NodeT *Nd = I->getBlock();
      OtherChildren.insert(Nd);
    }

    for (const DomTreeNodeBase *I : *this) {
      const NodeT *N = I->getBlock();
      if (OtherChildren.count(N) == 0)
        return true;
    }
    return false;
  }

  void setIDom(DomTreeNodeBase *NewIDom) {
    assert(IDom && "No immediate dominator?");
    if (IDom == NewIDom) return;

    auto I = find(IDom->Children, this);
    assert(I != IDom->Children.end() &&
           "Not in immediate dominator children set!");
    // I am no longer your child...
    IDom->Children.erase(I);

    // Switch to new dominator
    IDom = NewIDom;
    IDom->Children.push_back(this);

    UpdateLevel();
  }

  /// getDFSNumIn/getDFSNumOut - These return the DFS visitation order for nodes
  /// in the dominator tree. They are only guaranteed valid if
  /// updateDFSNumbers() has been called.
  unsigned getDFSNumIn() const { return DFSNumIn; }
  unsigned getDFSNumOut() const { return DFSNumOut; }

private:
  // Return true if this node is dominated by other. Use this only if DFS info
  // is valid.
  bool DominatedBy(const DomTreeNodeBase *other) const {
    return this->DFSNumIn >= other->DFSNumIn &&
           this->DFSNumOut <= other->DFSNumOut;
  }

  void UpdateLevel() {
    assert(IDom);
    if (Level == IDom->Level + 1) return;

    SmallVector<DomTreeNodeBase *, 64> WorkStack = {this};

    while (!WorkStack.empty()) {
      DomTreeNodeBase *Current = WorkStack.pop_back_val();
      Current->Level = Current->IDom->Level + 1;

      for (DomTreeNodeBase *C : *Current) {
        assert(C->IDom);
        if (C->Level != C->IDom->Level + 1) WorkStack.push_back(C);
      }
    }
  }
};

template <class NodeT>
raw_ostream &operator<<(raw_ostream &O, const DomTreeNodeBase<NodeT> *Node) {
  if (Node->getBlock())
    Node->getBlock()->printAsOperand(O, false);
  else
    O << " <<exit node>>";

  O << " {" << Node->getDFSNumIn() << "," << Node->getDFSNumOut() << "} ["
    << Node->getLevel() << "]\n";

  return O;
}

template <class NodeT>
void PrintDomTree(const DomTreeNodeBase<NodeT> *N, raw_ostream &O,
                  unsigned Lev) {
  O.indent(2 * Lev) << "[" << Lev << "] " << N;
  for (typename DomTreeNodeBase<NodeT>::const_iterator I = N->begin(),
                                                       E = N->end();
       I != E; ++I)
    PrintDomTree<NodeT>(*I, O, Lev + 1);
}

namespace DomTreeBuilder {
// The routines below are provided in a separate header but referenced here.
template <typename DomTreeT>
void Calculate(DomTreeT &DT);

template <typename DomTreeT>
void CalculateWithUpdates(DomTreeT &DT,
                          ArrayRef<typename DomTreeT::UpdateType> Updates);

template <typename DomTreeT>
void InsertEdge(DomTreeT &DT, typename DomTreeT::NodePtr From,
                typename DomTreeT::NodePtr To);

template <typename DomTreeT>
void DeleteEdge(DomTreeT &DT, typename DomTreeT::NodePtr From,
                typename DomTreeT::NodePtr To);

template <typename DomTreeT>
void ApplyUpdates(DomTreeT &DT,
                  GraphDiff<typename DomTreeT::NodePtr,
                            DomTreeT::IsPostDominator> &PreViewCFG,
                  GraphDiff<typename DomTreeT::NodePtr,
                            DomTreeT::IsPostDominator> *PostViewCFG);

template <typename DomTreeT>
bool Verify(const DomTreeT &DT, typename DomTreeT::VerificationLevel VL);
}  // namespace DomTreeBuilder

/// Default DomTreeNode traits for NodeT. The default implementation assume a
/// Function-like NodeT. Can be specialized to support different node types.
template <typename NodeT> struct DomTreeNodeTraits {
  using NodeType = NodeT;
  using NodePtr = NodeT *;
  using ParentPtr = decltype(std::declval<NodePtr>()->getParent());
  static_assert(std::is_pointer_v<ParentPtr>,
                "Currently NodeT's parent must be a pointer type");
  using ParentType = std::remove_pointer_t<ParentPtr>;

  static NodeT *getEntryNode(ParentPtr Parent) { return &Parent->front(); }
  static ParentPtr getParent(NodePtr BB) { return BB->getParent(); }
};

/// Core dominator tree base class.
///
/// This class is a generic template over graph nodes. It is instantiated for
/// various graphs in the LLVM IR or in the code generator.
template <typename NodeT, bool IsPostDom>
class DominatorTreeBase {
 public:
  static_assert(std::is_pointer_v<typename GraphTraits<NodeT *>::NodeRef>,
                "Currently DominatorTreeBase supports only pointer nodes");
  using NodeTrait = DomTreeNodeTraits<NodeT>;
  using NodeType = typename NodeTrait::NodeType;
  using NodePtr = typename NodeTrait::NodePtr;
  using ParentPtr = typename NodeTrait::ParentPtr;
  static_assert(std::is_pointer_v<ParentPtr>,
                "Currently NodeT's parent must be a pointer type");
  using ParentType = std::remove_pointer_t<ParentPtr>;
  static constexpr bool IsPostDominator = IsPostDom;

  using UpdateType = cfg::Update<NodePtr>;
  using UpdateKind = cfg::UpdateKind;
  static constexpr UpdateKind Insert = UpdateKind::Insert;
  static constexpr UpdateKind Delete = UpdateKind::Delete;

  enum class VerificationLevel { Fast, Basic, Full };

protected:
  // Dominators always have a single root, postdominators can have more.
  SmallVector<NodeT *, IsPostDom ? 4 : 1> Roots;

  using DomTreeNodeMapType =
     DenseMap<NodeT *, std::unique_ptr<DomTreeNodeBase<NodeT>>>;
  DomTreeNodeMapType DomTreeNodes;
  DomTreeNodeBase<NodeT> *RootNode = nullptr;
  ParentPtr Parent = nullptr;

  mutable bool DFSInfoValid = false;
  mutable unsigned int SlowQueries = 0;

  friend struct DomTreeBuilder::SemiNCAInfo<DominatorTreeBase>;

 public:
  DominatorTreeBase() = default;

  DominatorTreeBase(DominatorTreeBase &&Arg)
      : Roots(std::move(Arg.Roots)),
        DomTreeNodes(std::move(Arg.DomTreeNodes)),
        RootNode(Arg.RootNode),
        Parent(Arg.Parent),
        DFSInfoValid(Arg.DFSInfoValid),
        SlowQueries(Arg.SlowQueries) {
    Arg.wipe();
  }

  DominatorTreeBase &operator=(DominatorTreeBase &&RHS) {
    Roots = std::move(RHS.Roots);
    DomTreeNodes = std::move(RHS.DomTreeNodes);
    RootNode = RHS.RootNode;
    Parent = RHS.Parent;
    DFSInfoValid = RHS.DFSInfoValid;
    SlowQueries = RHS.SlowQueries;
    RHS.wipe();
    return *this;
  }

  DominatorTreeBase(const DominatorTreeBase &) = delete;
  DominatorTreeBase &operator=(const DominatorTreeBase &) = delete;

  /// Iteration over roots.
  ///
  /// This may include multiple blocks if we are computing post dominators.
  /// For forward dominators, this will always be a single block (the entry
  /// block).
  using root_iterator = typename SmallVectorImpl<NodeT *>::iterator;
  using const_root_iterator = typename SmallVectorImpl<NodeT *>::const_iterator;

  root_iterator root_begin() { return Roots.begin(); }
  const_root_iterator root_begin() const { return Roots.begin(); }
  root_iterator root_end() { return Roots.end(); }
  const_root_iterator root_end() const { return Roots.end(); }

  size_t root_size() const { return Roots.size(); }

  iterator_range<root_iterator> roots() {
    return make_range(root_begin(), root_end());
  }
  iterator_range<const_root_iterator> roots() const {
    return make_range(root_begin(), root_end());
  }

  /// isPostDominator - Returns true if analysis based of postdoms
  ///
  bool isPostDominator() const { return IsPostDominator; }

  /// compare - Return false if the other dominator tree base matches this
  /// dominator tree base. Otherwise return true.
  bool compare(const DominatorTreeBase &Other) const {
    if (Parent != Other.Parent) return true;

    if (Roots.size() != Other.Roots.size())
      return true;

    if (!std::is_permutation(Roots.begin(), Roots.end(), Other.Roots.begin()))
      return true;

    const DomTreeNodeMapType &OtherDomTreeNodes = Other.DomTreeNodes;
    if (DomTreeNodes.size() != OtherDomTreeNodes.size())
      return true;

    for (const auto &DomTreeNode : DomTreeNodes) {
      NodeT *BB = DomTreeNode.first;
      typename DomTreeNodeMapType::const_iterator OI =
          OtherDomTreeNodes.find(BB);
      if (OI == OtherDomTreeNodes.end())
        return true;

      DomTreeNodeBase<NodeT> &MyNd = *DomTreeNode.second;
      DomTreeNodeBase<NodeT> &OtherNd = *OI->second;

      if (MyNd.compare(&OtherNd))
        return true;
    }

    return false;
  }

  /// getNode - return the (Post)DominatorTree node for the specified basic
  /// block.  This is the same as using operator[] on this class.  The result
  /// may (but is not required to) be null for a forward (backwards)
  /// statically unreachable block.
  DomTreeNodeBase<NodeT> *getNode(const NodeT *BB) const {
    auto I = DomTreeNodes.find(BB);
    if (I != DomTreeNodes.end())
      return I->second.get();
    return nullptr;
  }

  /// See getNode.
  DomTreeNodeBase<NodeT> *operator[](const NodeT *BB) const {
    return getNode(BB);
  }

  /// getRootNode - This returns the entry node for the CFG of the function.  If
  /// this tree represents the post-dominance relations for a function, however,
  /// this root may be a node with the block == NULL.  This is the case when
  /// there are multiple exit nodes from a particular function.  Consumers of
  /// post-dominance information must be capable of dealing with this
  /// possibility.
  ///
  DomTreeNodeBase<NodeT> *getRootNode() { return RootNode; }
  const DomTreeNodeBase<NodeT> *getRootNode() const { return RootNode; }

  /// Get all nodes dominated by R, including R itself.
  void getDescendants(NodeT *R, SmallVectorImpl<NodeT *> &Result) const {
    Result.clear();
    const DomTreeNodeBase<NodeT> *RN = getNode(R);
    if (!RN)
      return; // If R is unreachable, it will not be present in the DOM tree.
    SmallVector<const DomTreeNodeBase<NodeT> *, 8> WL;
    WL.push_back(RN);

    while (!WL.empty()) {
      const DomTreeNodeBase<NodeT> *N = WL.pop_back_val();
      Result.push_back(N->getBlock());
      WL.append(N->begin(), N->end());
    }
  }

  /// properlyDominates - Returns true iff A dominates B and A != B.
  /// Note that this is not a constant time operation!
  ///
  bool properlyDominates(const DomTreeNodeBase<NodeT> *A,
                         const DomTreeNodeBase<NodeT> *B) const {
    if (!A || !B)
      return false;
    if (A == B)
      return false;
    return dominates(A, B);
  }

  bool properlyDominates(const NodeT *A, const NodeT *B) const;

  /// isReachableFromEntry - Return true if A is dominated by the entry
  /// block of the function containing it.
  bool isReachableFromEntry(const NodeT *A) const {
    assert(!this->isPostDominator() &&
           "This is not implemented for post dominators");
    return isReachableFromEntry(getNode(const_cast<NodeT *>(A)));
  }

  bool isReachableFromEntry(const DomTreeNodeBase<NodeT> *A) const { return A; }

  /// dominates - Returns true iff A dominates B.  Note that this is not a
  /// constant time operation!
  ///
  bool dominates(const DomTreeNodeBase<NodeT> *A,
                 const DomTreeNodeBase<NodeT> *B) const {
    // A node trivially dominates itself.
    if (B == A)
      return true;

    // An unreachable node is dominated by anything.
    if (!isReachableFromEntry(B))
      return true;

    // And dominates nothing.
    if (!isReachableFromEntry(A))
      return false;

    if (B->getIDom() == A) return true;

    if (A->getIDom() == B) return false;

    // A can only dominate B if it is higher in the tree.
    if (A->getLevel() >= B->getLevel()) return false;

    // Compare the result of the tree walk and the dfs numbers, if expensive
    // checks are enabled.
#ifdef EXPENSIVE_CHECKS
    assert((!DFSInfoValid ||
            (dominatedBySlowTreeWalk(A, B) == B->DominatedBy(A))) &&
           "Tree walk disagrees with dfs numbers!");
#endif

    if (DFSInfoValid)
      return B->DominatedBy(A);

    // If we end up with too many slow queries, just update the
    // DFS numbers on the theory that we are going to keep querying.
    SlowQueries++;
    if (SlowQueries > 32) {
      updateDFSNumbers();
      return B->DominatedBy(A);
    }

    return dominatedBySlowTreeWalk(A, B);
  }

  bool dominates(const NodeT *A, const NodeT *B) const;

  NodeT *getRoot() const {
    assert(this->Roots.size() == 1 && "Should always have entry node!");
    return this->Roots[0];
  }

  /// Find nearest common dominator basic block for basic block A and B. A and B
  /// must have tree nodes.
  NodeT *findNearestCommonDominator(NodeT *A, NodeT *B) const {
    assert(A && B && "Pointers are not valid");
    assert(NodeTrait::getParent(A) == NodeTrait::getParent(B) &&
           "Two blocks are not in same function");

    // If either A or B is a entry block then it is nearest common dominator
    // (for forward-dominators).
    if (!isPostDominator()) {
      NodeT &Entry =
          *DomTreeNodeTraits<NodeT>::getEntryNode(NodeTrait::getParent(A));
      if (A == &Entry || B == &Entry)
        return &Entry;
    }

    DomTreeNodeBase<NodeT> *NodeA = getNode(A);
    DomTreeNodeBase<NodeT> *NodeB = getNode(B);
    assert(NodeA && "A must be in the tree");
    assert(NodeB && "B must be in the tree");

    // Use level information to go up the tree until the levels match. Then
    // continue going up til we arrive at the same node.
    while (NodeA != NodeB) {
      if (NodeA->getLevel() < NodeB->getLevel()) std::swap(NodeA, NodeB);

      NodeA = NodeA->IDom;
    }

    return NodeA->getBlock();
  }

  const NodeT *findNearestCommonDominator(const NodeT *A,
                                          const NodeT *B) const {
    // Cast away the const qualifiers here. This is ok since
    // const is re-introduced on the return type.
    return findNearestCommonDominator(const_cast<NodeT *>(A),
                                      const_cast<NodeT *>(B));
  }

  bool isVirtualRoot(const DomTreeNodeBase<NodeT> *A) const {
    return isPostDominator() && !A->getBlock();
  }

  //===--------------------------------------------------------------------===//
  // API to update (Post)DominatorTree information based on modifications to
  // the CFG...

  /// Inform the dominator tree about a sequence of CFG edge insertions and
  /// deletions and perform a batch update on the tree.
  ///
  /// This function should be used when there were multiple CFG updates after
  /// the last dominator tree update. It takes care of performing the updates
  /// in sync with the CFG and optimizes away the redundant operations that
  /// cancel each other.
  /// The functions expects the sequence of updates to be balanced. Eg.:
  ///  - {{Insert, A, B}, {Delete, A, B}, {Insert, A, B}} is fine, because
  ///    logically it results in a single insertions.
  ///  - {{Insert, A, B}, {Insert, A, B}} is invalid, because it doesn't make
  ///    sense to insert the same edge twice.
  ///
  /// What's more, the functions assumes that it's safe to ask every node in the
  /// CFG about its children and inverse children. This implies that deletions
  /// of CFG edges must not delete the CFG nodes before calling this function.
  ///
  /// The applyUpdates function can reorder the updates and remove redundant
  /// ones internally (as long as it is done in a deterministic fashion). The
  /// batch updater is also able to detect sequences of zero and exactly one
  /// update -- it's optimized to do less work in these cases.
  ///
  /// Note that for postdominators it automatically takes care of applying
  /// updates on reverse edges internally (so there's no need to swap the
  /// From and To pointers when constructing DominatorTree::UpdateType).
  /// The type of updates is the same for DomTreeBase<T> and PostDomTreeBase<T>
  /// with the same template parameter T.
  ///
  /// \param Updates An ordered sequence of updates to perform. The current CFG
  /// and the reverse of these updates provides the pre-view of the CFG.
  ///
  void applyUpdates(ArrayRef<UpdateType> Updates) {
    GraphDiff<NodePtr, IsPostDominator> PreViewCFG(
        Updates, /*ReverseApplyUpdates=*/true);
    DomTreeBuilder::ApplyUpdates(*this, PreViewCFG, nullptr);
  }

  /// \param Updates An ordered sequence of updates to perform. The current CFG
  /// and the reverse of these updates provides the pre-view of the CFG.
  /// \param PostViewUpdates An ordered sequence of update to perform in order
  /// to obtain a post-view of the CFG. The DT will be updated assuming the
  /// obtained PostViewCFG is the desired end state.
  void applyUpdates(ArrayRef<UpdateType> Updates,
                    ArrayRef<UpdateType> PostViewUpdates) {
    if (Updates.empty()) {
      GraphDiff<NodePtr, IsPostDom> PostViewCFG(PostViewUpdates);
      DomTreeBuilder::ApplyUpdates(*this, PostViewCFG, &PostViewCFG);
    } else {
      // PreViewCFG needs to merge Updates and PostViewCFG. The updates in
      // Updates need to be reversed, and match the direction in PostViewCFG.
      // The PostViewCFG is created with updates reversed (equivalent to changes
      // made to the CFG), so the PreViewCFG needs all the updates reverse
      // applied.
      SmallVector<UpdateType> AllUpdates(Updates.begin(), Updates.end());
      append_range(AllUpdates, PostViewUpdates);
      GraphDiff<NodePtr, IsPostDom> PreViewCFG(AllUpdates,
                                               /*ReverseApplyUpdates=*/true);
      GraphDiff<NodePtr, IsPostDom> PostViewCFG(PostViewUpdates);
      DomTreeBuilder::ApplyUpdates(*this, PreViewCFG, &PostViewCFG);
    }
  }

  /// Inform the dominator tree about a CFG edge insertion and update the tree.
  ///
  /// This function has to be called just before or just after making the update
  /// on the actual CFG. There cannot be any other updates that the dominator
  /// tree doesn't know about.
  ///
  /// Note that for postdominators it automatically takes care of inserting
  /// a reverse edge internally (so there's no need to swap the parameters).
  ///
  void insertEdge(NodeT *From, NodeT *To) {
    assert(From);
    assert(To);
    assert(NodeTrait::getParent(From) == Parent);
    assert(NodeTrait::getParent(To) == Parent);
    DomTreeBuilder::InsertEdge(*this, From, To);
  }

  /// Inform the dominator tree about a CFG edge deletion and update the tree.
  ///
  /// This function has to be called just after making the update on the actual
  /// CFG. An internal functions checks if the edge doesn't exist in the CFG in
  /// DEBUG mode. There cannot be any other updates that the
  /// dominator tree doesn't know about.
  ///
  /// Note that for postdominators it automatically takes care of deleting
  /// a reverse edge internally (so there's no need to swap the parameters).
  ///
  void deleteEdge(NodeT *From, NodeT *To) {
    assert(From);
    assert(To);
    assert(NodeTrait::getParent(From) == Parent);
    assert(NodeTrait::getParent(To) == Parent);
    DomTreeBuilder::DeleteEdge(*this, From, To);
  }

  /// Add a new node to the dominator tree information.
  ///
  /// This creates a new node as a child of DomBB dominator node, linking it
  /// into the children list of the immediate dominator.
  ///
  /// \param BB New node in CFG.
  /// \param DomBB CFG node that is dominator for BB.
  /// \returns New dominator tree node that represents new CFG node.
  ///
  DomTreeNodeBase<NodeT> *addNewBlock(NodeT *BB, NodeT *DomBB) {
    assert(getNode(BB) == nullptr && "Block already in dominator tree!");
    DomTreeNodeBase<NodeT> *IDomNode = getNode(DomBB);
    assert(IDomNode && "Not immediate dominator specified for block!");
    DFSInfoValid = false;
    return createChild(BB, IDomNode);
  }

  /// Add a new node to the forward dominator tree and make it a new root.
  ///
  /// \param BB New node in CFG.
  /// \returns New dominator tree node that represents new CFG node.
  ///
  DomTreeNodeBase<NodeT> *setNewRoot(NodeT *BB) {
    assert(getNode(BB) == nullptr && "Block already in dominator tree!");
    assert(!this->isPostDominator() &&
           "Cannot change root of post-dominator tree");
    DFSInfoValid = false;
    DomTreeNodeBase<NodeT> *NewNode = createNode(BB);
    if (Roots.empty()) {
      addRoot(BB);
    } else {
      assert(Roots.size() == 1);
      NodeT *OldRoot = Roots.front();
      auto &OldNode = DomTreeNodes[OldRoot];
      OldNode = NewNode->addChild(std::move(DomTreeNodes[OldRoot]));
      OldNode->IDom = NewNode;
      OldNode->UpdateLevel();
      Roots[0] = BB;
    }
    return RootNode = NewNode;
  }

  /// changeImmediateDominator - This method is used to update the dominator
  /// tree information when a node's immediate dominator changes.
  ///
  void changeImmediateDominator(DomTreeNodeBase<NodeT> *N,
                                DomTreeNodeBase<NodeT> *NewIDom) {
    assert(N && NewIDom && "Cannot change null node pointers!");
    DFSInfoValid = false;
    N->setIDom(NewIDom);
  }

  void changeImmediateDominator(NodeT *BB, NodeT *NewBB) {
    changeImmediateDominator(getNode(BB), getNode(NewBB));
  }

  /// eraseNode - Removes a node from the dominator tree. Block must not
  /// dominate any other blocks. Removes node from its immediate dominator's
  /// children list. Deletes dominator node associated with basic block BB.
  void eraseNode(NodeT *BB) {
    DomTreeNodeBase<NodeT> *Node = getNode(BB);
    assert(Node && "Removing node that isn't in dominator tree.");
    assert(Node->isLeaf() && "Node is not a leaf node.");

    DFSInfoValid = false;

    // Remove node from immediate dominator's children list.
    DomTreeNodeBase<NodeT> *IDom = Node->getIDom();
    if (IDom) {
      const auto I = find(IDom->Children, Node);
      assert(I != IDom->Children.end() &&
             "Not in immediate dominator children set!");
      // I am no longer your child...
      IDom->Children.erase(I);
    }

    DomTreeNodes.erase(BB);

    if (!IsPostDom) return;

    // Remember to update PostDominatorTree roots.
    auto RIt = llvm::find(Roots, BB);
    if (RIt != Roots.end()) {
      std::swap(*RIt, Roots.back());
      Roots.pop_back();
    }
  }

  /// splitBlock - BB is split and now it has one successor. Update dominator
  /// tree to reflect this change.
  void splitBlock(NodeT *NewBB) {
    if (IsPostDominator)
      Split<Inverse<NodeT *>>(NewBB);
    else
      Split<NodeT *>(NewBB);
  }

  /// print - Convert to human readable form
  ///
  void print(raw_ostream &O) const {
    O << "=============================--------------------------------\n";
    if (IsPostDominator)
      O << "Inorder PostDominator Tree: ";
    else
      O << "Inorder Dominator Tree: ";
    if (!DFSInfoValid)
      O << "DFSNumbers invalid: " << SlowQueries << " slow queries.";
    O << "\n";

    // The postdom tree can have a null root if there are no returns.
    if (getRootNode()) PrintDomTree<NodeT>(getRootNode(), O, 1);
    O << "Roots: ";
    for (const NodePtr Block : Roots) {
      Block->printAsOperand(O, false);
      O << " ";
    }
    O << "\n";
  }

public:
  /// updateDFSNumbers - Assign In and Out numbers to the nodes while walking
  /// dominator tree in dfs order.
  void updateDFSNumbers() const {
    if (DFSInfoValid) {
      SlowQueries = 0;
      return;
    }

    SmallVector<std::pair<const DomTreeNodeBase<NodeT> *,
                          typename DomTreeNodeBase<NodeT>::const_iterator>,
                32> WorkStack;

    const DomTreeNodeBase<NodeT> *ThisRoot = getRootNode();
    assert((!Parent || ThisRoot) && "Empty constructed DomTree");
    if (!ThisRoot)
      return;

    // Both dominators and postdominators have a single root node. In the case
    // case of PostDominatorTree, this node is a virtual root.
    WorkStack.push_back({ThisRoot, ThisRoot->begin()});

    unsigned DFSNum = 0;
    ThisRoot->DFSNumIn = DFSNum++;

    while (!WorkStack.empty()) {
      const DomTreeNodeBase<NodeT> *Node = WorkStack.back().first;
      const auto ChildIt = WorkStack.back().second;

      // If we visited all of the children of this node, "recurse" back up the
      // stack setting the DFOutNum.
      if (ChildIt == Node->end()) {
        Node->DFSNumOut = DFSNum++;
        WorkStack.pop_back();
      } else {
        // Otherwise, recursively visit this child.
        const DomTreeNodeBase<NodeT> *Child = *ChildIt;
        ++WorkStack.back().second;

        WorkStack.push_back({Child, Child->begin()});
        Child->DFSNumIn = DFSNum++;
      }
    }

    SlowQueries = 0;
    DFSInfoValid = true;
  }

  /// recalculate - compute a dominator tree for the given function
  void recalculate(ParentType &Func) {
    Parent = &Func;
    DomTreeBuilder::Calculate(*this);
  }

  void recalculate(ParentType &Func, ArrayRef<UpdateType> Updates) {
    Parent = &Func;
    DomTreeBuilder::CalculateWithUpdates(*this, Updates);
  }

  /// verify - checks if the tree is correct. There are 3 level of verification:
  ///  - Full --  verifies if the tree is correct by making sure all the
  ///             properties (including the parent and the sibling property)
  ///             hold.
  ///             Takes O(N^3) time.
  ///
  ///  - Basic -- checks if the tree is correct, but compares it to a freshly
  ///             constructed tree instead of checking the sibling property.
  ///             Takes O(N^2) time.
  ///
  ///  - Fast  -- checks basic tree structure and compares it with a freshly
  ///             constructed tree.
  ///             Takes O(N^2) time worst case, but is faster in practise (same
  ///             as tree construction).
  bool verify(VerificationLevel VL = VerificationLevel::Full) const {
    return DomTreeBuilder::Verify(*this, VL);
  }

  void reset() {
    DomTreeNodes.clear();
    Roots.clear();
    RootNode = nullptr;
    Parent = nullptr;
    DFSInfoValid = false;
    SlowQueries = 0;
  }

protected:
  void addRoot(NodeT *BB) { this->Roots.push_back(BB); }

  DomTreeNodeBase<NodeT> *createChild(NodeT *BB, DomTreeNodeBase<NodeT> *IDom) {
    return (DomTreeNodes[BB] = IDom->addChild(
                std::make_unique<DomTreeNodeBase<NodeT>>(BB, IDom)))
        .get();
  }

  DomTreeNodeBase<NodeT> *createNode(NodeT *BB) {
    return (DomTreeNodes[BB] =
                std::make_unique<DomTreeNodeBase<NodeT>>(BB, nullptr))
        .get();
  }

  // NewBB is split and now it has one successor. Update dominator tree to
  // reflect this change.
  template <class N>
  void Split(typename GraphTraits<N>::NodeRef NewBB) {
    using GraphT = GraphTraits<N>;
    using NodeRef = typename GraphT::NodeRef;
    assert(std::distance(GraphT::child_begin(NewBB),
                         GraphT::child_end(NewBB)) == 1 &&
           "NewBB should have a single successor!");
    NodeRef NewBBSucc = *GraphT::child_begin(NewBB);

    SmallVector<NodeRef, 4> PredBlocks(children<Inverse<N>>(NewBB));

    assert(!PredBlocks.empty() && "No predblocks?");

    bool NewBBDominatesNewBBSucc = true;
    for (auto *Pred : children<Inverse<N>>(NewBBSucc)) {
      if (Pred != NewBB && !dominates(NewBBSucc, Pred) &&
          isReachableFromEntry(Pred)) {
        NewBBDominatesNewBBSucc = false;
        break;
      }
    }

    // Find NewBB's immediate dominator and create new dominator tree node for
    // NewBB.
    NodeT *NewBBIDom = nullptr;
    unsigned i = 0;
    for (i = 0; i < PredBlocks.size(); ++i)
      if (isReachableFromEntry(PredBlocks[i])) {
        NewBBIDom = PredBlocks[i];
        break;
      }

    // It's possible that none of the predecessors of NewBB are reachable;
    // in that case, NewBB itself is unreachable, so nothing needs to be
    // changed.
    if (!NewBBIDom) return;

    for (i = i + 1; i < PredBlocks.size(); ++i) {
      if (isReachableFromEntry(PredBlocks[i]))
        NewBBIDom = findNearestCommonDominator(NewBBIDom, PredBlocks[i]);
    }

    // Create the new dominator tree node... and set the idom of NewBB.
    DomTreeNodeBase<NodeT> *NewBBNode = addNewBlock(NewBB, NewBBIDom);

    // If NewBB strictly dominates other blocks, then it is now the immediate
    // dominator of NewBBSucc.  Update the dominator tree as appropriate.
    if (NewBBDominatesNewBBSucc) {
      DomTreeNodeBase<NodeT> *NewBBSuccNode = getNode(NewBBSucc);
      changeImmediateDominator(NewBBSuccNode, NewBBNode);
    }
  }

 private:
  bool dominatedBySlowTreeWalk(const DomTreeNodeBase<NodeT> *A,
                               const DomTreeNodeBase<NodeT> *B) const {
    assert(A != B);
    assert(isReachableFromEntry(B));
    assert(isReachableFromEntry(A));

    const unsigned ALevel = A->getLevel();
    const DomTreeNodeBase<NodeT> *IDom;

    // Don't walk nodes above A's subtree. When we reach A's level, we must
    // either find A or be in some other subtree not dominated by A.
    while ((IDom = B->getIDom()) != nullptr && IDom->getLevel() >= ALevel)
      B = IDom;  // Walk up the tree

    return B == A;
  }

  /// Wipe this tree's state without releasing any resources.
  ///
  /// This is essentially a post-move helper only. It leaves the object in an
  /// assignable and destroyable state, but otherwise invalid.
  void wipe() {
    DomTreeNodes.clear();
    RootNode = nullptr;
    Parent = nullptr;
  }
};

template <typename T>
using DomTreeBase = DominatorTreeBase<T, false>;

template <typename T>
using PostDomTreeBase = DominatorTreeBase<T, true>;

// These two functions are declared out of line as a workaround for building
// with old (< r147295) versions of clang because of pr11642.
template <typename NodeT, bool IsPostDom>
bool DominatorTreeBase<NodeT, IsPostDom>::dominates(const NodeT *A,
                                                    const NodeT *B) const {
  if (A == B)
    return true;

  // Cast away the const qualifiers here. This is ok since
  // this function doesn't actually return the values returned
  // from getNode.
  return dominates(getNode(const_cast<NodeT *>(A)),
                   getNode(const_cast<NodeT *>(B)));
}
template <typename NodeT, bool IsPostDom>
bool DominatorTreeBase<NodeT, IsPostDom>::properlyDominates(
    const NodeT *A, const NodeT *B) const {
  if (A == B)
    return false;

  // Cast away the const qualifiers here. This is ok since
  // this function doesn't actually return the values returned
  // from getNode.
  return dominates(getNode(const_cast<NodeT *>(A)),
                   getNode(const_cast<NodeT *>(B)));
}

} // end namespace llvm

#endif // LLVM_SUPPORT_GENERICDOMTREE_H
PKhwFZ83�F��Support/Host.hnu�[���//===-- llvm/Support/Host.h -------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This header is deprecated in favour of `llvm/TargetParser/Host.h`.
///
//===----------------------------------------------------------------------===//

#ifdef __GNUC__
#pragma GCC warning                                                            \
    "This header is deprecated, please use llvm/TargetParser/Host.h"
#endif
#include "llvm/TargetParser/Host.h"
PKhwFZ��LSupport/DXILOperationCommon.hnu�[���//===-- DXILOperationCommon.h - DXIL Operation ------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is created to share common definitions used by both the
// DXILOpBuilder and the table
//  generator.
// Documentation for DXIL can be found in
// https://github.com/Microsoft/DirectXShaderCompiler/blob/main/docs/DXIL.rst.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_DXILOPERATIONCOMMON_H
#define LLVM_SUPPORT_DXILOPERATIONCOMMON_H

#include "llvm/ADT/StringSwitch.h"

namespace llvm {
namespace dxil {

enum class ParameterKind : uint8_t {
  INVALID = 0,
  VOID,
  HALF,
  FLOAT,
  DOUBLE,
  I1,
  I8,
  I16,
  I32,
  I64,
  OVERLOAD,
  CBUFFER_RET,
  RESOURCE_RET,
  DXIL_HANDLE,
};

inline ParameterKind parameterTypeNameToKind(StringRef Name) {
  return StringSwitch<ParameterKind>(Name)
      .Case("void", ParameterKind::VOID)
      .Case("half", ParameterKind::HALF)
      .Case("float", ParameterKind::FLOAT)
      .Case("double", ParameterKind::DOUBLE)
      .Case("i1", ParameterKind::I1)
      .Case("i8", ParameterKind::I8)
      .Case("i16", ParameterKind::I16)
      .Case("i32", ParameterKind::I32)
      .Case("i64", ParameterKind::I64)
      .Case("$o", ParameterKind::OVERLOAD)
      .Case("dx.types.Handle", ParameterKind::DXIL_HANDLE)
      .Case("dx.types.CBufRet", ParameterKind::CBUFFER_RET)
      .Case("dx.types.ResRet", ParameterKind::RESOURCE_RET)
      .Default(ParameterKind::INVALID);
}

} // namespace dxil
} // namespace llvm

#endif
PKhwFZ
���Support/PrettyStackTrace.hnu�[���//===- llvm/Support/PrettyStackTrace.h - Pretty Crash Handling --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the PrettyStackTraceEntry class, which is used to make
// crashes give more contextual information about what the program was doing
// when it crashed.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_PRETTYSTACKTRACE_H
#define LLVM_SUPPORT_PRETTYSTACKTRACE_H

#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Compiler.h"

namespace llvm {
  class raw_ostream;

  /// Enables dumping a "pretty" stack trace when the program crashes.
  ///
  /// \see PrettyStackTraceEntry
  void EnablePrettyStackTrace();

  /// Enables (or disables) dumping a "pretty" stack trace when the user sends
  /// SIGINFO or SIGUSR1 to the current process.
  ///
  /// This is a per-thread decision so that a program can choose to print stack
  /// traces only on a primary thread, or on all threads that use
  /// PrettyStackTraceEntry.
  ///
  /// \see EnablePrettyStackTrace
  /// \see PrettyStackTraceEntry
  void EnablePrettyStackTraceOnSigInfoForThisThread(bool ShouldEnable = true);

  /// Replaces the generic bug report message that is output upon
  /// a crash.
  void setBugReportMsg(const char *Msg);

  /// Get the bug report message that will be output upon a crash.
  const char *getBugReportMsg();

  /// PrettyStackTraceEntry - This class is used to represent a frame of the
  /// "pretty" stack trace that is dumped when a program crashes. You can define
  /// subclasses of this and declare them on the program stack: when they are
  /// constructed and destructed, they will add their symbolic frames to a
  /// virtual stack trace.  This gets dumped out if the program crashes.
  class PrettyStackTraceEntry {
    friend PrettyStackTraceEntry *ReverseStackTrace(PrettyStackTraceEntry *);

    PrettyStackTraceEntry *NextEntry;
    PrettyStackTraceEntry(const PrettyStackTraceEntry &) = delete;
    void operator=(const PrettyStackTraceEntry &) = delete;
  public:
    PrettyStackTraceEntry();
    virtual ~PrettyStackTraceEntry();

    /// print - Emit information about this stack frame to OS.
    virtual void print(raw_ostream &OS) const = 0;

    /// getNextEntry - Return the next entry in the list of frames.
    const PrettyStackTraceEntry *getNextEntry() const { return NextEntry; }
  };

  /// PrettyStackTraceString - This object prints a specified string (which
  /// should not contain newlines) to the stream as the stack trace when a crash
  /// occurs.
  class PrettyStackTraceString : public PrettyStackTraceEntry {
    const char *Str;
  public:
    PrettyStackTraceString(const char *str) : Str(str) {}
    void print(raw_ostream &OS) const override;
  };

  /// PrettyStackTraceFormat - This object prints a string (which may use
  /// printf-style formatting but should not contain newlines) to the stream
  /// as the stack trace when a crash occurs.
  class PrettyStackTraceFormat : public PrettyStackTraceEntry {
    llvm::SmallVector<char, 32> Str;
  public:
    PrettyStackTraceFormat(const char *Format, ...);
    void print(raw_ostream &OS) const override;
  };

  /// PrettyStackTraceProgram - This object prints a specified program arguments
  /// to the stream as the stack trace when a crash occurs.
  class PrettyStackTraceProgram : public PrettyStackTraceEntry {
    int ArgC;
    const char *const *ArgV;
  public:
    PrettyStackTraceProgram(int argc, const char * const*argv)
      : ArgC(argc), ArgV(argv) {
      EnablePrettyStackTrace();
    }
    void print(raw_ostream &OS) const override;
  };

  /// Returns the topmost element of the "pretty" stack state.
  const void *SavePrettyStackState();

  /// Restores the topmost element of the "pretty" stack state to State, which
  /// should come from a previous call to SavePrettyStackState().  This is
  /// useful when using a CrashRecoveryContext in code that also uses
  /// PrettyStackTraceEntries, to make sure the stack that's printed if a crash
  /// happens after a crash that's been recovered by CrashRecoveryContext
  /// doesn't have frames on it that were added in code unwound by the
  /// CrashRecoveryContext.
  void RestorePrettyStackState(const void *State);

} // end namespace llvm

#endif
PKhwFZ:��h�/�/Support/SourceMgr.hnu�[���//===- SourceMgr.h - Manager for Source Buffers & Diagnostics ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the SMDiagnostic and SourceMgr classes.  This
// provides a simple substrate for diagnostics, #include handling, and other low
// level things for simple parsers.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_SOURCEMGR_H
#define LLVM_SUPPORT_SOURCEMGR_H

#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/SMLoc.h"
#include <vector>

namespace llvm {

class raw_ostream;
class SMDiagnostic;
class SMFixIt;

/// This owns the files read by a parser, handles include stacks,
/// and handles diagnostic wrangling.
class SourceMgr {
public:
  enum DiagKind {
    DK_Error,
    DK_Warning,
    DK_Remark,
    DK_Note,
  };

  /// Clients that want to handle their own diagnostics in a custom way can
  /// register a function pointer+context as a diagnostic handler.
  /// It gets called each time PrintMessage is invoked.
  using DiagHandlerTy = void (*)(const SMDiagnostic &, void *Context);

private:
  struct SrcBuffer {
    /// The memory buffer for the file.
    std::unique_ptr<MemoryBuffer> Buffer;

    /// Vector of offsets into Buffer at which there are line-endings
    /// (lazily populated). Once populated, the '\n' that marks the end of
    /// line number N from [1..] is at Buffer[OffsetCache[N-1]]. Since
    /// these offsets are in sorted (ascending) order, they can be
    /// binary-searched for the first one after any given offset (eg. an
    /// offset corresponding to a particular SMLoc).
    ///
    /// Since we're storing offsets into relatively small files (often smaller
    /// than 2^8 or 2^16 bytes), we select the offset vector element type
    /// dynamically based on the size of Buffer.
    mutable void *OffsetCache = nullptr;

    /// Look up a given \p Ptr in in the buffer, determining which line it came
    /// from.
    unsigned getLineNumber(const char *Ptr) const;
    template <typename T>
    unsigned getLineNumberSpecialized(const char *Ptr) const;

    /// Return a pointer to the first character of the specified line number or
    /// null if the line number is invalid.
    const char *getPointerForLineNumber(unsigned LineNo) const;
    template <typename T>
    const char *getPointerForLineNumberSpecialized(unsigned LineNo) const;

    /// This is the location of the parent include, or null if at the top level.
    SMLoc IncludeLoc;

    SrcBuffer() = default;
    SrcBuffer(SrcBuffer &&);
    SrcBuffer(const SrcBuffer &) = delete;
    SrcBuffer &operator=(const SrcBuffer &) = delete;
    ~SrcBuffer();
  };

  /// This is all of the buffers that we are reading from.
  std::vector<SrcBuffer> Buffers;

  // This is the list of directories we should search for include files in.
  std::vector<std::string> IncludeDirectories;

  DiagHandlerTy DiagHandler = nullptr;
  void *DiagContext = nullptr;

  bool isValidBufferID(unsigned i) const { return i && i <= Buffers.size(); }

public:
  SourceMgr() = default;
  SourceMgr(const SourceMgr &) = delete;
  SourceMgr &operator=(const SourceMgr &) = delete;
  SourceMgr(SourceMgr &&) = default;
  SourceMgr &operator=(SourceMgr &&) = default;
  ~SourceMgr() = default;

  /// Return the include directories of this source manager.
  ArrayRef<std::string> getIncludeDirs() const { return IncludeDirectories; }

  void setIncludeDirs(const std::vector<std::string> &Dirs) {
    IncludeDirectories = Dirs;
  }

  /// Specify a diagnostic handler to be invoked every time PrintMessage is
  /// called. \p Ctx is passed into the handler when it is invoked.
  void setDiagHandler(DiagHandlerTy DH, void *Ctx = nullptr) {
    DiagHandler = DH;
    DiagContext = Ctx;
  }

  DiagHandlerTy getDiagHandler() const { return DiagHandler; }
  void *getDiagContext() const { return DiagContext; }

  const SrcBuffer &getBufferInfo(unsigned i) const {
    assert(isValidBufferID(i));
    return Buffers[i - 1];
  }

  const MemoryBuffer *getMemoryBuffer(unsigned i) const {
    assert(isValidBufferID(i));
    return Buffers[i - 1].Buffer.get();
  }

  unsigned getNumBuffers() const { return Buffers.size(); }

  unsigned getMainFileID() const {
    assert(getNumBuffers());
    return 1;
  }

  SMLoc getParentIncludeLoc(unsigned i) const {
    assert(isValidBufferID(i));
    return Buffers[i - 1].IncludeLoc;
  }

  /// Add a new source buffer to this source manager. This takes ownership of
  /// the memory buffer.
  unsigned AddNewSourceBuffer(std::unique_ptr<MemoryBuffer> F,
                              SMLoc IncludeLoc) {
    SrcBuffer NB;
    NB.Buffer = std::move(F);
    NB.IncludeLoc = IncludeLoc;
    Buffers.push_back(std::move(NB));
    return Buffers.size();
  }

  /// Takes the source buffers from the given source manager and append them to
  /// the current manager. `MainBufferIncludeLoc` is an optional include
  /// location to attach to the main buffer of `SrcMgr` after it gets moved to
  /// the current manager.
  void takeSourceBuffersFrom(SourceMgr &SrcMgr,
                             SMLoc MainBufferIncludeLoc = SMLoc()) {
    if (SrcMgr.Buffers.empty())
      return;

    size_t OldNumBuffers = getNumBuffers();
    std::move(SrcMgr.Buffers.begin(), SrcMgr.Buffers.end(),
              std::back_inserter(Buffers));
    SrcMgr.Buffers.clear();
    Buffers[OldNumBuffers].IncludeLoc = MainBufferIncludeLoc;
  }

  /// Search for a file with the specified name in the current directory or in
  /// one of the IncludeDirs.
  ///
  /// If no file is found, this returns 0, otherwise it returns the buffer ID
  /// of the stacked file. The full path to the included file can be found in
  /// \p IncludedFile.
  unsigned AddIncludeFile(const std::string &Filename, SMLoc IncludeLoc,
                          std::string &IncludedFile);

  /// Search for a file with the specified name in the current directory or in
  /// one of the IncludeDirs, and try to open it **without** adding to the
  /// SourceMgr. If the opened file is intended to be added to the source
  /// manager, prefer `AddIncludeFile` instead.
  ///
  /// If no file is found, this returns an Error, otherwise it returns the
  /// buffer of the stacked file. The full path to the included file can be
  /// found in \p IncludedFile.
  ErrorOr<std::unique_ptr<MemoryBuffer>>
  OpenIncludeFile(const std::string &Filename, std::string &IncludedFile);

  /// Return the ID of the buffer containing the specified location.
  ///
  /// 0 is returned if the buffer is not found.
  unsigned FindBufferContainingLoc(SMLoc Loc) const;

  /// Find the line number for the specified location in the specified file.
  /// This is not a fast method.
  unsigned FindLineNumber(SMLoc Loc, unsigned BufferID = 0) const {
    return getLineAndColumn(Loc, BufferID).first;
  }

  /// Find the line and column number for the specified location in the
  /// specified file. This is not a fast method.
  std::pair<unsigned, unsigned> getLineAndColumn(SMLoc Loc,
                                                 unsigned BufferID = 0) const;

  /// Get a string with the \p SMLoc filename and line number
  /// formatted in the standard style.
  std::string getFormattedLocationNoOffset(SMLoc Loc,
                                           bool IncludePath = false) const;

  /// Given a line and column number in a mapped buffer, turn it into an SMLoc.
  /// This will return a null SMLoc if the line/column location is invalid.
  SMLoc FindLocForLineAndColumn(unsigned BufferID, unsigned LineNo,
                                unsigned ColNo);

  /// Emit a message about the specified location with the specified string.
  ///
  /// \param ShowColors Display colored messages if output is a terminal and
  /// the default error handler is used.
  void PrintMessage(raw_ostream &OS, SMLoc Loc, DiagKind Kind, const Twine &Msg,
                    ArrayRef<SMRange> Ranges = {},
                    ArrayRef<SMFixIt> FixIts = {},
                    bool ShowColors = true) const;

  /// Emits a diagnostic to llvm::errs().
  void PrintMessage(SMLoc Loc, DiagKind Kind, const Twine &Msg,
                    ArrayRef<SMRange> Ranges = {},
                    ArrayRef<SMFixIt> FixIts = {},
                    bool ShowColors = true) const;

  /// Emits a manually-constructed diagnostic to the given output stream.
  ///
  /// \param ShowColors Display colored messages if output is a terminal and
  /// the default error handler is used.
  void PrintMessage(raw_ostream &OS, const SMDiagnostic &Diagnostic,
                    bool ShowColors = true) const;

  /// Return an SMDiagnostic at the specified location with the specified
  /// string.
  ///
  /// \param Msg If non-null, the kind of message (e.g., "error") which is
  /// prefixed to the message.
  SMDiagnostic GetMessage(SMLoc Loc, DiagKind Kind, const Twine &Msg,
                          ArrayRef<SMRange> Ranges = {},
                          ArrayRef<SMFixIt> FixIts = {}) const;

  /// Prints the names of included files and the line of the file they were
  /// included from. A diagnostic handler can use this before printing its
  /// custom formatted message.
  ///
  /// \param IncludeLoc The location of the include.
  /// \param OS the raw_ostream to print on.
  void PrintIncludeStack(SMLoc IncludeLoc, raw_ostream &OS) const;
};

/// Represents a single fixit, a replacement of one range of text with another.
class SMFixIt {
  SMRange Range;

  std::string Text;

public:
  SMFixIt(SMRange R, const Twine &Replacement);

  SMFixIt(SMLoc Loc, const Twine &Replacement)
      : SMFixIt(SMRange(Loc, Loc), Replacement) {}

  StringRef getText() const { return Text; }
  SMRange getRange() const { return Range; }

  bool operator<(const SMFixIt &Other) const {
    if (Range.Start.getPointer() != Other.Range.Start.getPointer())
      return Range.Start.getPointer() < Other.Range.Start.getPointer();
    if (Range.End.getPointer() != Other.Range.End.getPointer())
      return Range.End.getPointer() < Other.Range.End.getPointer();
    return Text < Other.Text;
  }
};

/// Instances of this class encapsulate one diagnostic report, allowing
/// printing to a raw_ostream as a caret diagnostic.
class SMDiagnostic {
  const SourceMgr *SM = nullptr;
  SMLoc Loc;
  std::string Filename;
  int LineNo = 0;
  int ColumnNo = 0;
  SourceMgr::DiagKind Kind = SourceMgr::DK_Error;
  std::string Message, LineContents;
  std::vector<std::pair<unsigned, unsigned>> Ranges;
  SmallVector<SMFixIt, 4> FixIts;

public:
  // Null diagnostic.
  SMDiagnostic() = default;
  // Diagnostic with no location (e.g. file not found, command line arg error).
  SMDiagnostic(StringRef filename, SourceMgr::DiagKind Knd, StringRef Msg)
      : Filename(filename), LineNo(-1), ColumnNo(-1), Kind(Knd), Message(Msg) {}

  // Diagnostic with a location.
  SMDiagnostic(const SourceMgr &sm, SMLoc L, StringRef FN, int Line, int Col,
               SourceMgr::DiagKind Kind, StringRef Msg, StringRef LineStr,
               ArrayRef<std::pair<unsigned, unsigned>> Ranges,
               ArrayRef<SMFixIt> FixIts = {});

  const SourceMgr *getSourceMgr() const { return SM; }
  SMLoc getLoc() const { return Loc; }
  StringRef getFilename() const { return Filename; }
  int getLineNo() const { return LineNo; }
  int getColumnNo() const { return ColumnNo; }
  SourceMgr::DiagKind getKind() const { return Kind; }
  StringRef getMessage() const { return Message; }
  StringRef getLineContents() const { return LineContents; }
  ArrayRef<std::pair<unsigned, unsigned>> getRanges() const { return Ranges; }

  void addFixIt(const SMFixIt &Hint) { FixIts.push_back(Hint); }

  ArrayRef<SMFixIt> getFixIts() const { return FixIts; }

  void print(const char *ProgName, raw_ostream &S, bool ShowColors = true,
             bool ShowKindLabel = true) const;
};

} // end namespace llvm

#endif // LLVM_SUPPORT_SOURCEMGR_H
PKhwFZLO�jSupport/LineIterator.hnu�[���//===- LineIterator.h - Iterator to read a text buffer's lines --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_LINEITERATOR_H
#define LLVM_SUPPORT_LINEITERATOR_H

#include "llvm/ADT/StringRef.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/MemoryBufferRef.h"
#include <iterator>
#include <optional>

namespace llvm {

class MemoryBuffer;

/// A forward iterator which reads text lines from a buffer.
///
/// This class provides a forward iterator interface for reading one line at
/// a time from a buffer. When default constructed the iterator will be the
/// "end" iterator.
///
/// The iterator is aware of what line number it is currently processing. It
/// strips blank lines by default, and comment lines given a comment-starting
/// character.
///
/// Note that this iterator requires the buffer to be nul terminated.
class line_iterator {
  std::optional<MemoryBufferRef> Buffer;
  char CommentMarker = '\0';
  bool SkipBlanks = true;

  unsigned LineNumber = 1;
  StringRef CurrentLine;

public:
  using iterator_category = std::forward_iterator_tag;
  using value_type = StringRef;
  using difference_type = std::ptrdiff_t;
  using pointer = value_type *;
  using reference = value_type &;

  /// Default construct an "end" iterator.
  line_iterator() = default;

  /// Construct a new iterator around an unowned memory buffer.
  explicit line_iterator(const MemoryBufferRef &Buffer, bool SkipBlanks = true,
                         char CommentMarker = '\0');

  /// Construct a new iterator around some memory buffer.
  explicit line_iterator(const MemoryBuffer &Buffer, bool SkipBlanks = true,
                         char CommentMarker = '\0');

  /// Return true if we've reached EOF or are an "end" iterator.
  bool is_at_eof() const { return !Buffer; }

  /// Return true if we're an "end" iterator or have reached EOF.
  bool is_at_end() const { return is_at_eof(); }

  /// Return the current line number. May return any number at EOF.
  int64_t line_number() const { return LineNumber; }

  /// Advance to the next (non-empty, non-comment) line.
  line_iterator &operator++() {
    advance();
    return *this;
  }
  line_iterator operator++(int) {
    line_iterator tmp(*this);
    advance();
    return tmp;
  }

  /// Get the current line as a \c StringRef.
  StringRef operator*() const { return CurrentLine; }
  const StringRef *operator->() const { return &CurrentLine; }

  friend bool operator==(const line_iterator &LHS, const line_iterator &RHS) {
    return LHS.Buffer == RHS.Buffer &&
           LHS.CurrentLine.begin() == RHS.CurrentLine.begin();
  }

  friend bool operator!=(const line_iterator &LHS, const line_iterator &RHS) {
    return !(LHS == RHS);
  }

private:
  /// Advance the iterator to the next line.
  void advance();
};
}

#endif
PKhwFZ��--Support/Extension.defnu�[���//extension handlers
#undef HANDLE_EXTENSION
PKhwFZ���CyySupport/CFGUpdate.hnu�[���//===- CFGUpdate.h - Encode a CFG Edge Update. ------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines a CFG Edge Update: Insert or Delete, and two Nodes as the
// Edge ends.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_CFGUPDATE_H
#define LLVM_SUPPORT_CFGUPDATE_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"

namespace llvm {
namespace cfg {
enum class UpdateKind : unsigned char { Insert, Delete };

template <typename NodePtr> class Update {
  using NodeKindPair = PointerIntPair<NodePtr, 1, UpdateKind>;
  NodePtr From;
  NodeKindPair ToAndKind;

public:
  Update(UpdateKind Kind, NodePtr From, NodePtr To)
      : From(From), ToAndKind(To, Kind) {}

  UpdateKind getKind() const { return ToAndKind.getInt(); }
  NodePtr getFrom() const { return From; }
  NodePtr getTo() const { return ToAndKind.getPointer(); }
  bool operator==(const Update &RHS) const {
    return From == RHS.From && ToAndKind == RHS.ToAndKind;
  }

  void print(raw_ostream &OS) const {
    OS << (getKind() == UpdateKind::Insert ? "Insert " : "Delete ");
    getFrom()->printAsOperand(OS, false);
    OS << " -> ";
    getTo()->printAsOperand(OS, false);
  }

#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
  LLVM_DUMP_METHOD void dump() const { print(dbgs()); }
#endif
};

// LegalizeUpdates function simplifies updates assuming a graph structure.
// This function serves double purpose:
// a) It removes redundant updates, which makes it easier to reverse-apply
//    them when traversing CFG.
// b) It optimizes away updates that cancel each other out, as the end result
//    is the same.
template <typename NodePtr>
void LegalizeUpdates(ArrayRef<Update<NodePtr>> AllUpdates,
                     SmallVectorImpl<Update<NodePtr>> &Result,
                     bool InverseGraph, bool ReverseResultOrder = false) {
  // Count the total number of inserions of each edge.
  // Each insertion adds 1 and deletion subtracts 1. The end number should be
  // one of {-1 (deletion), 0 (NOP), +1 (insertion)}. Otherwise, the sequence
  // of updates contains multiple updates of the same kind and we assert for
  // that case.
  SmallDenseMap<std::pair<NodePtr, NodePtr>, int, 4> Operations;
  Operations.reserve(AllUpdates.size());

  for (const auto &U : AllUpdates) {
    NodePtr From = U.getFrom();
    NodePtr To = U.getTo();
    if (InverseGraph)
      std::swap(From, To); // Reverse edge for postdominators.

    Operations[{From, To}] += (U.getKind() == UpdateKind::Insert ? 1 : -1);
  }

  Result.clear();
  Result.reserve(Operations.size());
  for (auto &Op : Operations) {
    const int NumInsertions = Op.second;
    assert(std::abs(NumInsertions) <= 1 && "Unbalanced operations!");
    if (NumInsertions == 0)
      continue;
    const UpdateKind UK =
        NumInsertions > 0 ? UpdateKind::Insert : UpdateKind::Delete;
    Result.push_back({UK, Op.first.first, Op.first.second});
  }

  // Make the order consistent by not relying on pointer values within the
  // set. Reuse the old Operations map.
  // In the future, we should sort by something else to minimize the amount
  // of work needed to perform the series of updates.
  for (size_t i = 0, e = AllUpdates.size(); i != e; ++i) {
    const auto &U = AllUpdates[i];
    if (!InverseGraph)
      Operations[{U.getFrom(), U.getTo()}] = int(i);
    else
      Operations[{U.getTo(), U.getFrom()}] = int(i);
  }

  llvm::sort(Result, [&](const Update<NodePtr> &A, const Update<NodePtr> &B) {
    const auto &OpA = Operations[{A.getFrom(), A.getTo()}];
    const auto &OpB = Operations[{B.getFrom(), B.getTo()}];
    return ReverseResultOrder ? OpA < OpB : OpA > OpB;
  });
}

} // end namespace cfg
} // end namespace llvm

#endif // LLVM_SUPPORT_CFGUPDATE_H
PKhwFZ�K�[[Support/BinaryItemStream.hnu�[���//===- BinaryItemStream.h ---------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_BINARYITEMSTREAM_H
#define LLVM_SUPPORT_BINARYITEMSTREAM_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/Support/BinaryStream.h"
#include "llvm/Support/BinaryStreamError.h"
#include "llvm/Support/Error.h"
#include <cstddef>
#include <cstdint>

namespace llvm {

template <typename T> struct BinaryItemTraits {
  static size_t length(const T &Item) = delete;
  static ArrayRef<uint8_t> bytes(const T &Item) = delete;
};

/// BinaryItemStream represents a sequence of objects stored in some kind of
/// external container but for which it is useful to view as a stream of
/// contiguous bytes.  An example of this might be if you have a collection of
/// records and you serialize each one into a buffer, and store these serialized
/// records in a container.  The pointers themselves are not laid out
/// contiguously in memory, but we may wish to read from or write to these
/// records as if they were.
template <typename T, typename Traits = BinaryItemTraits<T>>
class BinaryItemStream : public BinaryStream {
public:
  explicit BinaryItemStream(llvm::support::endianness Endian)
      : Endian(Endian) {}

  llvm::support::endianness getEndian() const override { return Endian; }

  Error readBytes(uint64_t Offset, uint64_t Size,
                  ArrayRef<uint8_t> &Buffer) override {
    auto ExpectedIndex = translateOffsetIndex(Offset);
    if (!ExpectedIndex)
      return ExpectedIndex.takeError();
    const auto &Item = Items[*ExpectedIndex];
    if (auto EC = checkOffsetForRead(Offset, Size))
      return EC;
    if (Size > Traits::length(Item))
      return make_error<BinaryStreamError>(stream_error_code::stream_too_short);
    Buffer = Traits::bytes(Item).take_front(Size);
    return Error::success();
  }

  Error readLongestContiguousChunk(uint64_t Offset,
                                   ArrayRef<uint8_t> &Buffer) override {
    auto ExpectedIndex = translateOffsetIndex(Offset);
    if (!ExpectedIndex)
      return ExpectedIndex.takeError();
    Buffer = Traits::bytes(Items[*ExpectedIndex]);
    return Error::success();
  }

  void setItems(ArrayRef<T> ItemArray) {
    Items = ItemArray;
    computeItemOffsets();
  }

  uint64_t getLength() override {
    return ItemEndOffsets.empty() ? 0 : ItemEndOffsets.back();
  }

private:
  void computeItemOffsets() {
    ItemEndOffsets.clear();
    ItemEndOffsets.reserve(Items.size());
    uint64_t CurrentOffset = 0;
    for (const auto &Item : Items) {
      uint64_t Len = Traits::length(Item);
      assert(Len > 0 && "no empty items");
      CurrentOffset += Len;
      ItemEndOffsets.push_back(CurrentOffset);
    }
  }

  Expected<uint32_t> translateOffsetIndex(uint64_t Offset) {
    // Make sure the offset is somewhere in our items array.
    if (Offset >= getLength())
      return make_error<BinaryStreamError>(stream_error_code::stream_too_short);
    ++Offset;
    auto Iter = llvm::lower_bound(ItemEndOffsets, Offset);
    size_t Idx = std::distance(ItemEndOffsets.begin(), Iter);
    assert(Idx < Items.size() && "binary search for offset failed");
    return Idx;
  }

  llvm::support::endianness Endian;
  ArrayRef<T> Items;

  // Sorted vector of offsets to accelerate lookup.
  std::vector<uint64_t> ItemEndOffsets;
};

} // end namespace llvm

#endif // LLVM_SUPPORT_BINARYITEMSTREAM_H
PKhwFZ[
�  Support/AtomicOrdering.hnu�[���//===-- llvm/Support/AtomicOrdering.h ---Atomic Ordering---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// Atomic ordering constants.
///
/// These values are used by LLVM to represent atomic ordering for C++11's
/// memory model and more, as detailed in docs/Atomics.rst.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_ATOMICORDERING_H
#define LLVM_SUPPORT_ATOMICORDERING_H

#include <cstddef>

namespace llvm {

/// Atomic ordering for C11 / C++11's memory models.
///
/// These values cannot change because they are shared with standard library
/// implementations as well as with other compilers.
enum class AtomicOrderingCABI {
  relaxed = 0,
  consume = 1,
  acquire = 2,
  release = 3,
  acq_rel = 4,
  seq_cst = 5,
};

bool operator<(AtomicOrderingCABI, AtomicOrderingCABI) = delete;
bool operator>(AtomicOrderingCABI, AtomicOrderingCABI) = delete;
bool operator<=(AtomicOrderingCABI, AtomicOrderingCABI) = delete;
bool operator>=(AtomicOrderingCABI, AtomicOrderingCABI) = delete;

// Validate an integral value which isn't known to fit within the enum's range
// is a valid AtomicOrderingCABI.
template <typename Int> inline bool isValidAtomicOrderingCABI(Int I) {
  return (Int)AtomicOrderingCABI::relaxed <= I &&
         I <= (Int)AtomicOrderingCABI::seq_cst;
}

/// Atomic ordering for LLVM's memory model.
///
/// C++ defines ordering as a lattice. LLVM supplements this with NotAtomic and
/// Unordered, which are both below the C++ orders.
///
/// not_atomic-->unordered-->relaxed-->release--------------->acq_rel-->seq_cst
///                                   \-->consume-->acquire--/
enum class AtomicOrdering : unsigned {
  NotAtomic = 0,
  Unordered = 1,
  Monotonic = 2, // Equivalent to C++'s relaxed.
  // Consume = 3,  // Not specified yet.
  Acquire = 4,
  Release = 5,
  AcquireRelease = 6,
  SequentiallyConsistent = 7,
  LAST = SequentiallyConsistent
};

bool operator<(AtomicOrdering, AtomicOrdering) = delete;
bool operator>(AtomicOrdering, AtomicOrdering) = delete;
bool operator<=(AtomicOrdering, AtomicOrdering) = delete;
bool operator>=(AtomicOrdering, AtomicOrdering) = delete;

// Validate an integral value which isn't known to fit within the enum's range
// is a valid AtomicOrdering.
template <typename Int> inline bool isValidAtomicOrdering(Int I) {
  return static_cast<Int>(AtomicOrdering::NotAtomic) <= I &&
         I <= static_cast<Int>(AtomicOrdering::SequentiallyConsistent) &&
         I != 3;
}

/// String used by LLVM IR to represent atomic ordering.
inline const char *toIRString(AtomicOrdering ao) {
  static const char *names[8] = {"not_atomic", "unordered", "monotonic",
                                 "consume",    "acquire",   "release",
                                 "acq_rel",    "seq_cst"};
  return names[static_cast<size_t>(ao)];
}

/// Returns true if ao is stronger than other as defined by the AtomicOrdering
/// lattice, which is based on C++'s definition.
inline bool isStrongerThan(AtomicOrdering AO, AtomicOrdering Other) {
  static const bool lookup[8][8] = {
      //               NA     UN     RX     CO     AC     RE     AR     SC
      /* NotAtomic */ {false, false, false, false, false, false, false, false},
      /* Unordered */ { true, false, false, false, false, false, false, false},
      /* relaxed   */ { true,  true, false, false, false, false, false, false},
      /* consume   */ { true,  true,  true, false, false, false, false, false},
      /* acquire   */ { true,  true,  true,  true, false, false, false, false},
      /* release   */ { true,  true,  true, false, false, false, false, false},
      /* acq_rel   */ { true,  true,  true,  true,  true,  true, false, false},
      /* seq_cst   */ { true,  true,  true,  true,  true,  true,  true, false},
  };
  return lookup[static_cast<size_t>(AO)][static_cast<size_t>(Other)];
}

inline bool isAtLeastOrStrongerThan(AtomicOrdering AO, AtomicOrdering Other) {
  static const bool lookup[8][8] = {
      //               NA     UN     RX     CO     AC     RE     AR     SC
      /* NotAtomic */ { true, false, false, false, false, false, false, false},
      /* Unordered */ { true,  true, false, false, false, false, false, false},
      /* relaxed   */ { true,  true,  true, false, false, false, false, false},
      /* consume   */ { true,  true,  true,  true, false, false, false, false},
      /* acquire   */ { true,  true,  true,  true,  true, false, false, false},
      /* release   */ { true,  true,  true, false, false,  true, false, false},
      /* acq_rel   */ { true,  true,  true,  true,  true,  true,  true, false},
      /* seq_cst   */ { true,  true,  true,  true,  true,  true,  true,  true},
  };
  return lookup[static_cast<size_t>(AO)][static_cast<size_t>(Other)];
}

inline bool isStrongerThanUnordered(AtomicOrdering AO) {
  return isStrongerThan(AO, AtomicOrdering::Unordered);
}

inline bool isStrongerThanMonotonic(AtomicOrdering AO) {
  return isStrongerThan(AO, AtomicOrdering::Monotonic);
}

inline bool isAcquireOrStronger(AtomicOrdering AO) {
  return isAtLeastOrStrongerThan(AO, AtomicOrdering::Acquire);
}

inline bool isReleaseOrStronger(AtomicOrdering AO) {
  return isAtLeastOrStrongerThan(AO, AtomicOrdering::Release);
}

/// Return a single atomic ordering that is at least as strong as both the \p AO
/// and \p Other orderings for an atomic operation.
inline AtomicOrdering getMergedAtomicOrdering(AtomicOrdering AO,
                                              AtomicOrdering Other) {
  if ((AO == AtomicOrdering::Acquire && Other == AtomicOrdering::Release) ||
      (AO == AtomicOrdering::Release && Other == AtomicOrdering::Acquire))
    return AtomicOrdering::AcquireRelease;
  return isStrongerThan(AO, Other) ? AO : Other;
}

inline AtomicOrderingCABI toCABI(AtomicOrdering AO) {
  static const AtomicOrderingCABI lookup[8] = {
      /* NotAtomic */ AtomicOrderingCABI::relaxed,
      /* Unordered */ AtomicOrderingCABI::relaxed,
      /* relaxed   */ AtomicOrderingCABI::relaxed,
      /* consume   */ AtomicOrderingCABI::consume,
      /* acquire   */ AtomicOrderingCABI::acquire,
      /* release   */ AtomicOrderingCABI::release,
      /* acq_rel   */ AtomicOrderingCABI::acq_rel,
      /* seq_cst   */ AtomicOrderingCABI::seq_cst,
  };
  return lookup[static_cast<size_t>(AO)];
}

} // end namespace llvm

#endif // LLVM_SUPPORT_ATOMICORDERING_H
PKhwFZ�$)-��Support/PointerLikeTypeTraits.hnu�[���//===- llvm/Support/PointerLikeTypeTraits.h - Pointer Traits ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the PointerLikeTypeTraits class.  This allows data
// structures to reason about pointers and other things that are pointer sized.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_POINTERLIKETYPETRAITS_H
#define LLVM_SUPPORT_POINTERLIKETYPETRAITS_H

#include "llvm/Support/DataTypes.h"
#include <cassert>
#include <type_traits>

namespace llvm {

/// A traits type that is used to handle pointer types and things that are just
/// wrappers for pointers as a uniform entity.
template <typename T> struct PointerLikeTypeTraits;

namespace detail {
/// A tiny meta function to compute the log2 of a compile time constant.
template <size_t N>
struct ConstantLog2
    : std::integral_constant<size_t, ConstantLog2<N / 2>::value + 1> {};
template <> struct ConstantLog2<1> : std::integral_constant<size_t, 0> {};

// Provide a trait to check if T is pointer-like.
template <typename T, typename U = void> struct HasPointerLikeTypeTraits {
  static const bool value = false;
};

// sizeof(T) is valid only for a complete T.
template <typename T>
struct HasPointerLikeTypeTraits<
    T, decltype((sizeof(PointerLikeTypeTraits<T>) + sizeof(T)), void())> {
  static const bool value = true;
};

template <typename T> struct IsPointerLike {
  static const bool value = HasPointerLikeTypeTraits<T>::value;
};

template <typename T> struct IsPointerLike<T *> {
  static const bool value = true;
};
} // namespace detail

// Provide PointerLikeTypeTraits for non-cvr pointers.
template <typename T> struct PointerLikeTypeTraits<T *> {
  static inline void *getAsVoidPointer(T *P) { return P; }
  static inline T *getFromVoidPointer(void *P) { return static_cast<T *>(P); }

  static constexpr int NumLowBitsAvailable =
      detail::ConstantLog2<alignof(T)>::value;
};

template <> struct PointerLikeTypeTraits<void *> {
  static inline void *getAsVoidPointer(void *P) { return P; }
  static inline void *getFromVoidPointer(void *P) { return P; }

  /// Note, we assume here that void* is related to raw malloc'ed memory and
  /// that malloc returns objects at least 4-byte aligned. However, this may be
  /// wrong, or pointers may be from something other than malloc. In this case,
  /// you should specify a real typed pointer or avoid this template.
  ///
  /// All clients should use assertions to do a run-time check to ensure that
  /// this is actually true.
  static constexpr int NumLowBitsAvailable = 2;
};

// Provide PointerLikeTypeTraits for const things.
template <typename T> struct PointerLikeTypeTraits<const T> {
  typedef PointerLikeTypeTraits<T> NonConst;

  static inline const void *getAsVoidPointer(const T P) {
    return NonConst::getAsVoidPointer(P);
  }
  static inline const T getFromVoidPointer(const void *P) {
    return NonConst::getFromVoidPointer(const_cast<void *>(P));
  }
  static constexpr int NumLowBitsAvailable = NonConst::NumLowBitsAvailable;
};

// Provide PointerLikeTypeTraits for const pointers.
template <typename T> struct PointerLikeTypeTraits<const T *> {
  typedef PointerLikeTypeTraits<T *> NonConst;

  static inline const void *getAsVoidPointer(const T *P) {
    return NonConst::getAsVoidPointer(const_cast<T *>(P));
  }
  static inline const T *getFromVoidPointer(const void *P) {
    return NonConst::getFromVoidPointer(const_cast<void *>(P));
  }
  static constexpr int NumLowBitsAvailable = NonConst::NumLowBitsAvailable;
};

// Provide PointerLikeTypeTraits for uintptr_t.
template <> struct PointerLikeTypeTraits<uintptr_t> {
  static inline void *getAsVoidPointer(uintptr_t P) {
    return reinterpret_cast<void *>(P);
  }
  static inline uintptr_t getFromVoidPointer(void *P) {
    return reinterpret_cast<uintptr_t>(P);
  }
  // No bits are available!
  static constexpr int NumLowBitsAvailable = 0;
};

/// Provide suitable custom traits struct for function pointers.
///
/// Function pointers can't be directly given these traits as functions can't
/// have their alignment computed with `alignof` and we need different casting.
///
/// To rely on higher alignment for a specialized use, you can provide a
/// customized form of this template explicitly with higher alignment, and
/// potentially use alignment attributes on functions to satisfy that.
template <int Alignment, typename FunctionPointerT>
struct FunctionPointerLikeTypeTraits {
  static constexpr int NumLowBitsAvailable =
      detail::ConstantLog2<Alignment>::value;
  static inline void *getAsVoidPointer(FunctionPointerT P) {
    assert((reinterpret_cast<uintptr_t>(P) &
            ~((uintptr_t)-1 << NumLowBitsAvailable)) == 0 &&
           "Alignment not satisfied for an actual function pointer!");
    return reinterpret_cast<void *>(P);
  }
  static inline FunctionPointerT getFromVoidPointer(void *P) {
    return reinterpret_cast<FunctionPointerT>(P);
  }
};

/// Provide a default specialization for function pointers that assumes 4-byte
/// alignment.
///
/// We assume here that functions used with this are always at least 4-byte
/// aligned. This means that, for example, thumb functions won't work or systems
/// with weird unaligned function pointers won't work. But all practical systems
/// we support satisfy this requirement.
template <typename ReturnT, typename... ParamTs>
struct PointerLikeTypeTraits<ReturnT (*)(ParamTs...)>
    : FunctionPointerLikeTypeTraits<4, ReturnT (*)(ParamTs...)> {};

} // end namespace llvm

#endif
PKhwFZ�Ĝ7��Support/ExtensibleRTTI.hnu�[���//===-- llvm/Support/ExtensibleRTTI.h - ExtensibleRTTI support --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// \file
//
// Defines an extensible RTTI mechanism designed to work with Casting.h.
//
// Extensible RTTI differs from LLVM's primary RTTI mechanism (see
// llvm.org/docs/HowToSetUpLLVMStyleRTTI.html) by supporting open type
// hierarchies, where new types can be added from outside libraries without
// needing to change existing code. LLVM's primary RTTI mechanism should be
// preferred where possible, but where open hierarchies are needed this system
// can be used.
//
// The RTTIRoot class defines methods for comparing type ids. Implementations
// of these methods can be injected into new classes using the RTTIExtends
// class template.
//
// E.g.
//
//   @code{.cpp}
//   class MyBaseClass : public RTTIExtends<MyBaseClass, RTTIRoot> {
//   public:
//     static char ID;
//     virtual void foo() = 0;
//   };
//
//   class MyDerivedClass1 : public RTTIExtends<MyDerivedClass1, MyBaseClass> {
//   public:
//     static char ID;
//     void foo() override {}
//   };
//
//   class MyDerivedClass2 : public RTTIExtends<MyDerivedClass2, MyBaseClass> {
//   public:
//     static char ID;
//     void foo() override {}
//   };
//
//   char MyBaseClass::ID = 0;
//   char MyDerivedClass1::ID = 0;
//   char MyDerivedClass2:: ID = 0;
//
//   void fn() {
//     std::unique_ptr<MyBaseClass> B = llvm::make_unique<MyDerivedClass1>();
//     llvm::outs() << isa<MyBaseClass>(B) << "\n"; // Outputs "1".
//     llvm::outs() << isa<MyDerivedClass1>(B) << "\n"; // Outputs "1".
//     llvm::outs() << isa<MyDerivedClass2>(B) << "\n"; // Outputs "0'.
//   }
//
//   @endcode
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_EXTENSIBLERTTI_H
#define LLVM_SUPPORT_EXTENSIBLERTTI_H

namespace llvm {

/// Base class for the extensible RTTI hierarchy.
///
/// This class defines virtual methods, dynamicClassID and isA, that enable
/// type comparisons.
class RTTIRoot {
public:
  virtual ~RTTIRoot() = default;

  /// Returns the class ID for this type.
  static const void *classID() { return &ID; }

  /// Returns the class ID for the dynamic type of this RTTIRoot instance.
  virtual const void *dynamicClassID() const = 0;

  /// Returns true if this class's ID matches the given class ID.
  virtual bool isA(const void *const ClassID) const {
    return ClassID == classID();
  }

  /// Check whether this instance is a subclass of QueryT.
  template <typename QueryT>
  bool isA() const { return isA(QueryT::classID()); }

private:
  virtual void anchor();

  static char ID;
};

/// Inheritance utility for extensible RTTI.
///
/// Supports single inheritance only: A class can only have one
/// ExtensibleRTTI-parent (i.e. a parent for which the isa<> test will work),
/// though it can have many non-ExtensibleRTTI parents.
///
/// RTTIExtents uses CRTP so the first template argument to RTTIExtends is the
/// newly introduced type, and the *second* argument is the parent class.
///
/// class MyType : public RTTIExtends<MyType, RTTIRoot> {
/// public:
///   static char ID;
/// };
///
/// class MyDerivedType : public RTTIExtends<MyDerivedType, MyType> {
/// public:
///   static char ID;
/// };
///
template <typename ThisT, typename ParentT>
class RTTIExtends : public ParentT {
public:
  // Inherit constructors from ParentT.
  using ParentT::ParentT;

  static const void *classID() { return &ThisT::ID; }

  const void *dynamicClassID() const override { return &ThisT::ID; }

  bool isA(const void *const ClassID) const override {
    return ClassID == classID() || ParentT::isA(ClassID);
  }

  static bool classof(const RTTIRoot *R) { return R->isA<ThisT>(); }
};

} // end namespace llvm

#endif // LLVM_SUPPORT_EXTENSIBLERTTI_H
PKhwFZ@ñi22
Support/DJB.hnu�[���//===-- llvm/Support/DJB.h ---DJB Hash --------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains support for the DJ Bernstein hash function.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_DJB_H
#define LLVM_SUPPORT_DJB_H

#include "llvm/ADT/StringRef.h"

namespace llvm {

/// The Bernstein hash function used by the DWARF accelerator tables.
inline uint32_t djbHash(StringRef Buffer, uint32_t H = 5381) {
  for (unsigned char C : Buffer.bytes())
    H = (H << 5) + H + C;
  return H;
}

/// Computes the Bernstein hash after folding the input according to the Dwarf 5
/// standard case folding rules.
uint32_t caseFoldingDjbHash(StringRef Buffer, uint32_t H = 5381);
} // namespace llvm

#endif // LLVM_SUPPORT_DJB_H
PKhwFZw���Support/DebugCounter.hnu�[���//===- llvm/Support/DebugCounter.h - Debug counter support ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// This file provides an implementation of debug counters.  Debug
/// counters are a tool that let you narrow down a miscompilation to a specific
/// thing happening.
///
/// To give a use case: Imagine you have a file, very large, and you
/// are trying to understand the minimal transformation that breaks it. Bugpoint
/// and bisection is often helpful here in narrowing it down to a specific pass,
/// but it's still a very large file, and a very complicated pass to try to
/// debug.  That is where debug counting steps in.  You can instrument the pass
/// with a debug counter before it does a certain thing, and depending on the
/// counts, it will either execute that thing or not.  The debug counter itself
/// consists of a skip and a count.  Skip is the number of times shouldExecute
/// needs to be called before it returns true.  Count is the number of times to
/// return true once Skip is 0.  So a skip=47, count=2 ,would skip the first 47
/// executions by returning false from shouldExecute, then execute twice, and
/// then return false again.
/// Note that a counter set to a negative number will always execute.
/// For a concrete example, during predicateinfo creation, the renaming pass
/// replaces each use with a renamed use.
////
/// If I use DEBUG_COUNTER to create a counter called "predicateinfo", and
/// variable name RenameCounter, and then instrument this renaming with a debug
/// counter, like so:
///
/// if (!DebugCounter::shouldExecute(RenameCounter)
/// <continue or return or whatever not executing looks like>
///
/// Now I can, from the command line, make it rename or not rename certain uses
/// by setting the skip and count.
/// So for example
/// bin/opt -debug-counter=predicateinfo-skip=47,predicateinfo-count=1
/// will skip renaming the first 47 uses, then rename one, then skip the rest.
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_DEBUGCOUNTER_H
#define LLVM_SUPPORT_DEBUGCOUNTER_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/UniqueVector.h"
#include "llvm/Support/Debug.h"
#include <string>

namespace llvm {

class raw_ostream;

class DebugCounter {
public:
  /// Returns a reference to the singleton instance.
  static DebugCounter &instance();

  // Used by the command line option parser to push a new value it parsed.
  void push_back(const std::string &);

  // Register a counter with the specified name.
  //
  // FIXME: Currently, counter registration is required to happen before command
  // line option parsing. The main reason to register counters is to produce a
  // nice list of them on the command line, but i'm not sure this is worth it.
  static unsigned registerCounter(StringRef Name, StringRef Desc) {
    return instance().addCounter(std::string(Name), std::string(Desc));
  }
  inline static bool shouldExecute(unsigned CounterName) {
    if (!isCountingEnabled())
      return true;

    auto &Us = instance();
    auto Result = Us.Counters.find(CounterName);
    if (Result != Us.Counters.end()) {
      auto &CounterInfo = Result->second;
      ++CounterInfo.Count;

      // We only execute while the Skip is not smaller than Count,
      // and the StopAfter + Skip is larger than Count.
      // Negative counters always execute.
      if (CounterInfo.Skip < 0)
        return true;
      if (CounterInfo.Skip >= CounterInfo.Count)
        return false;
      if (CounterInfo.StopAfter < 0)
        return true;
      return CounterInfo.StopAfter + CounterInfo.Skip >= CounterInfo.Count;
    }
    // Didn't find the counter, should we warn?
    return true;
  }

  // Return true if a given counter had values set (either programatically or on
  // the command line).  This will return true even if those values are
  // currently in a state where the counter will always execute.
  static bool isCounterSet(unsigned ID) {
    return instance().Counters[ID].IsSet;
  }

  // Return the Count for a counter. This only works for set counters.
  static int64_t getCounterValue(unsigned ID) {
    auto &Us = instance();
    auto Result = Us.Counters.find(ID);
    assert(Result != Us.Counters.end() && "Asking about a non-set counter");
    return Result->second.Count;
  }

  // Set a registered counter to a given Count value.
  static void setCounterValue(unsigned ID, int64_t Count) {
    auto &Us = instance();
    Us.Counters[ID].Count = Count;
  }

  // Dump or print the current counter set into llvm::dbgs().
  LLVM_DUMP_METHOD void dump() const;

  void print(raw_ostream &OS) const;

  // Get the counter ID for a given named counter, or return 0 if none is found.
  unsigned getCounterId(const std::string &Name) const {
    return RegisteredCounters.idFor(Name);
  }

  // Return the number of registered counters.
  unsigned int getNumCounters() const { return RegisteredCounters.size(); }

  // Return the name and description of the counter with the given ID.
  std::pair<std::string, std::string> getCounterInfo(unsigned ID) const {
    return std::make_pair(RegisteredCounters[ID], Counters.lookup(ID).Desc);
  }

  // Iterate through the registered counters
  typedef UniqueVector<std::string> CounterVector;
  CounterVector::const_iterator begin() const {
    return RegisteredCounters.begin();
  }
  CounterVector::const_iterator end() const { return RegisteredCounters.end(); }

  // Force-enables counting all DebugCounters.
  //
  // Since DebugCounters are incompatible with threading (not only do they not
  // make sense, but we'll also see data races), this should only be used in
  // contexts where we're certain we won't spawn threads.
  static void enableAllCounters() { instance().Enabled = true; }

  static bool isCountingEnabled() {
// Compile to nothing when debugging is off
#ifdef NDEBUG
    return false;
#else
    return instance().Enabled;
#endif
  }

private:
  unsigned addCounter(const std::string &Name, const std::string &Desc) {
    unsigned Result = RegisteredCounters.insert(Name);
    Counters[Result] = {};
    Counters[Result].Desc = Desc;
    return Result;
  }
  // Struct to store counter info.
  struct CounterInfo {
    int64_t Count = 0;
    int64_t Skip = 0;
    int64_t StopAfter = -1;
    bool IsSet = false;
    std::string Desc;
  };
  DenseMap<unsigned, CounterInfo> Counters;
  CounterVector RegisteredCounters;

  // Whether we should do DebugCounting at all. DebugCounters aren't
  // thread-safe, so this should always be false in multithreaded scenarios.
  bool Enabled = false;
};

#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)                              \
  static const unsigned VARNAME =                                              \
      DebugCounter::registerCounter(COUNTERNAME, DESC)

} // namespace llvm
#endif
PKhwFZ�h�rrSupport/CFGDiff.hnu�[���//===- CFGDiff.h - Define a CFG snapshot. -----------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines specializations of GraphTraits that allows generic
// algorithms to see a different snapshot of a CFG.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_CFGDIFF_H
#define LLVM_SUPPORT_CFGDIFF_H

#include "llvm/ADT/GraphTraits.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/CFGUpdate.h"
#include "llvm/Support/type_traits.h"
#include <cassert>
#include <cstddef>
#include <iterator>

// Two booleans are used to define orders in graphs:
// InverseGraph defines when we need to reverse the whole graph and is as such
// also equivalent to applying updates in reverse.
// InverseEdge defines whether we want to change the edges direction. E.g., for
// a non-inversed graph, the children are naturally the successors when
// InverseEdge is false and the predecessors when InverseEdge is true.

namespace llvm {

namespace detail {
template <typename Range>
auto reverse_if_helper(Range &&R, std::integral_constant<bool, false>) {
  return std::forward<Range>(R);
}

template <typename Range>
auto reverse_if_helper(Range &&R, std::integral_constant<bool, true>) {
  return llvm::reverse(std::forward<Range>(R));
}

template <bool B, typename Range> auto reverse_if(Range &&R) {
  return reverse_if_helper(std::forward<Range>(R),
                           std::integral_constant<bool, B>{});
}
} // namespace detail

// GraphDiff defines a CFG snapshot: given a set of Update<NodePtr>, provides
// a getChildren method to get a Node's children based on the additional updates
// in the snapshot. The current diff treats the CFG as a graph rather than a
// multigraph. Added edges are pruned to be unique, and deleted edges will
// remove all existing edges between two blocks.
template <typename NodePtr, bool InverseGraph = false> class GraphDiff {
  struct DeletesInserts {
    SmallVector<NodePtr, 2> DI[2];
  };
  using UpdateMapType = SmallDenseMap<NodePtr, DeletesInserts>;
  UpdateMapType Succ;
  UpdateMapType Pred;

  // By default, it is assumed that, given a CFG and a set of updates, we wish
  // to apply these updates as given. If UpdatedAreReverseApplied is set, the
  // updates will be applied in reverse: deleted edges are considered re-added
  // and inserted edges are considered deleted when returning children.
  bool UpdatedAreReverseApplied;

  // Keep the list of legalized updates for a deterministic order of updates
  // when using a GraphDiff for incremental updates in the DominatorTree.
  // The list is kept in reverse to allow popping from end.
  SmallVector<cfg::Update<NodePtr>, 4> LegalizedUpdates;

  void printMap(raw_ostream &OS, const UpdateMapType &M) const {
    StringRef DIText[2] = {"Delete", "Insert"};
    for (auto Pair : M) {
      for (unsigned IsInsert = 0; IsInsert <= 1; ++IsInsert) {
        OS << DIText[IsInsert] << " edges: \n";
        for (auto Child : Pair.second.DI[IsInsert]) {
          OS << "(";
          Pair.first->printAsOperand(OS, false);
          OS << ", ";
          Child->printAsOperand(OS, false);
          OS << ") ";
        }
      }
    }
    OS << "\n";
  }

public:
  GraphDiff() : UpdatedAreReverseApplied(false) {}
  GraphDiff(ArrayRef<cfg::Update<NodePtr>> Updates,
            bool ReverseApplyUpdates = false) {
    cfg::LegalizeUpdates<NodePtr>(Updates, LegalizedUpdates, InverseGraph);
    for (auto U : LegalizedUpdates) {
      unsigned IsInsert =
          (U.getKind() == cfg::UpdateKind::Insert) == !ReverseApplyUpdates;
      Succ[U.getFrom()].DI[IsInsert].push_back(U.getTo());
      Pred[U.getTo()].DI[IsInsert].push_back(U.getFrom());
    }
    UpdatedAreReverseApplied = ReverseApplyUpdates;
  }

  auto getLegalizedUpdates() const {
    return make_range(LegalizedUpdates.begin(), LegalizedUpdates.end());
  }

  unsigned getNumLegalizedUpdates() const { return LegalizedUpdates.size(); }

  cfg::Update<NodePtr> popUpdateForIncrementalUpdates() {
    assert(!LegalizedUpdates.empty() && "No updates to apply!");
    auto U = LegalizedUpdates.pop_back_val();
    unsigned IsInsert =
        (U.getKind() == cfg::UpdateKind::Insert) == !UpdatedAreReverseApplied;
    auto &SuccDIList = Succ[U.getFrom()];
    auto &SuccList = SuccDIList.DI[IsInsert];
    assert(SuccList.back() == U.getTo());
    SuccList.pop_back();
    if (SuccList.empty() && SuccDIList.DI[!IsInsert].empty())
      Succ.erase(U.getFrom());

    auto &PredDIList = Pred[U.getTo()];
    auto &PredList = PredDIList.DI[IsInsert];
    assert(PredList.back() == U.getFrom());
    PredList.pop_back();
    if (PredList.empty() && PredDIList.DI[!IsInsert].empty())
      Pred.erase(U.getTo());
    return U;
  }

  using VectRet = SmallVector<NodePtr, 8>;
  template <bool InverseEdge> VectRet getChildren(NodePtr N) const {
    using DirectedNodeT =
        std::conditional_t<InverseEdge, Inverse<NodePtr>, NodePtr>;
    auto R = children<DirectedNodeT>(N);
    VectRet Res = VectRet(detail::reverse_if<!InverseEdge>(R));

    // Remove nullptr children for clang.
    llvm::erase_value(Res, nullptr);

    auto &Children = (InverseEdge != InverseGraph) ? Pred : Succ;
    auto It = Children.find(N);
    if (It == Children.end())
      return Res;

    // Remove children present in the CFG but not in the snapshot.
    for (auto *Child : It->second.DI[0])
      llvm::erase_value(Res, Child);

    // Add children present in the snapshot for not in the real CFG.
    auto &AddedChildren = It->second.DI[1];
    llvm::append_range(Res, AddedChildren);

    return Res;
  }

  void print(raw_ostream &OS) const {
    OS << "===== GraphDiff: CFG edge changes to create a CFG snapshot. \n"
          "===== (Note: notion of children/inverse_children depends on "
          "the direction of edges and the graph.)\n";
    OS << "Children to delete/insert:\n\t";
    printMap(OS, Succ);
    OS << "Inverse_children to delete/insert:\n\t";
    printMap(OS, Pred);
    OS << "\n";
  }

#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
  LLVM_DUMP_METHOD void dump() const { print(dbgs()); }
#endif
};
} // end namespace llvm

#endif // LLVM_SUPPORT_CFGDIFF_H
PKhwFZъn:66Support/Debug.hnu�[���//===- llvm/Support/Debug.h - Easy way to add debug output ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements a handy way of adding debugging information to your
// code, without it being enabled all of the time, and without having to add
// command line options to enable it.
//
// In particular, just wrap your code with the LLVM_DEBUG() macro, and it will
// be enabled automatically if you specify '-debug' on the command-line.
// LLVM_DEBUG() requires the DEBUG_TYPE macro to be defined. Set it to "foo"
// specify that your debug code belongs to class "foo". Be careful that you only
// do this after including Debug.h and not around any #include of headers.
// Headers should define and undef the macro acround the code that needs to use
// the LLVM_DEBUG() macro. Then, on the command line, you can specify
// '-debug-only=foo' to enable JUST the debug information for the foo class.
//
// When compiling without assertions, the -debug-* options and all code in
// LLVM_DEBUG() statements disappears, so it does not affect the runtime of the
// code.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_DEBUG_H
#define LLVM_SUPPORT_DEBUG_H

namespace llvm {

class raw_ostream;

#ifndef NDEBUG

/// isCurrentDebugType - Return true if the specified string is the debug type
/// specified on the command line, or if none was specified on the command line
/// with the -debug-only=X option.
///
bool isCurrentDebugType(const char *Type);

/// setCurrentDebugType - Set the current debug type, as if the -debug-only=X
/// option were specified.  Note that DebugFlag also needs to be set to true for
/// debug output to be produced.
///
void setCurrentDebugType(const char *Type);

/// setCurrentDebugTypes - Set the current debug type, as if the
/// -debug-only=X,Y,Z option were specified. Note that DebugFlag
/// also needs to be set to true for debug output to be produced.
///
void setCurrentDebugTypes(const char **Types, unsigned Count);

/// DEBUG_WITH_TYPE macro - This macro should be used by passes to emit debug
/// information.  In the '-debug' option is specified on the commandline, and if
/// this is a debug build, then the code specified as the option to the macro
/// will be executed.  Otherwise it will not be.  Example:
///
/// DEBUG_WITH_TYPE("bitset", dbgs() << "Bitset contains: " << Bitset << "\n");
///
/// This will emit the debug information if -debug is present, and -debug-only
/// is not specified, or is specified as "bitset".
#define DEBUG_WITH_TYPE(TYPE, X)                                        \
  do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType(TYPE)) { X; } \
  } while (false)

#else
#define isCurrentDebugType(X) (false)
#define setCurrentDebugType(X) do { (void)(X); } while (false)
#define setCurrentDebugTypes(X, N) do { (void)(X); (void)(N); } while (false)
#define DEBUG_WITH_TYPE(TYPE, X) do { } while (false)
#endif

/// This boolean is set to true if the '-debug' command line option
/// is specified.  This should probably not be referenced directly, instead, use
/// the DEBUG macro below.
///
extern bool DebugFlag;

/// EnableDebugBuffering - This defaults to false.  If true, the debug
/// stream will install signal handlers to dump any buffered debug
/// output.  It allows clients to selectively allow the debug stream
/// to install signal handlers if they are certain there will be no
/// conflict.
///
extern bool EnableDebugBuffering;

/// dbgs() - This returns a reference to a raw_ostream for debugging
/// messages.  If debugging is disabled it returns errs().  Use it
/// like: dbgs() << "foo" << "bar";
raw_ostream &dbgs();

// DEBUG macro - This macro should be used by passes to emit debug information.
// In the '-debug' option is specified on the commandline, and if this is a
// debug build, then the code specified as the option to the macro will be
// executed.  Otherwise it will not be.  Example:
//
// LLVM_DEBUG(dbgs() << "Bitset contains: " << Bitset << "\n");
//
#define LLVM_DEBUG(X) DEBUG_WITH_TYPE(DEBUG_TYPE, X)

} // end namespace llvm

#endif // LLVM_SUPPORT_DEBUG_H
PKhwFZ��R^Support/CodeGen.hnu�[���//===-- llvm/Support/CodeGen.h - CodeGen Concepts ---------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file define some types which define code generation concepts. For
// example, relocation model.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_CODEGEN_H
#define LLVM_SUPPORT_CODEGEN_H

#include <cstdint>
#include <optional>

namespace llvm {

  // Relocation model types.
  namespace Reloc {
    // Cannot be named PIC due to collision with -DPIC
    enum Model { Static, PIC_, DynamicNoPIC, ROPI, RWPI, ROPI_RWPI };
  }

  // Code model types.
  namespace CodeModel {
    // Sync changes with CodeGenCWrappers.h.
    enum Model { Tiny, Small, Kernel, Medium, Large };
  }

  namespace PICLevel {
    // This is used to map -fpic/-fPIC.
    enum Level { NotPIC=0, SmallPIC=1, BigPIC=2 };
  }

  namespace PIELevel {
    enum Level { Default=0, Small=1, Large=2 };
  }

  // TLS models.
  namespace TLSModel {
    enum Model {
      GeneralDynamic,
      LocalDynamic,
      InitialExec,
      LocalExec
    };
  }

  namespace CodeGenOpt {
  /// Type for the unique integer IDs of code generation optimization levels.
  using IDType = int;
  /// Code generation optimization level.
  enum Level : IDType {
    None = 0,      ///< -O0
    Less = 1,      ///< -O1
    Default = 2,   ///< -O2, -Os
    Aggressive = 3 ///< -O3
  };
  /// Get the \c Level identified by the integer \p ID.
  ///
  /// Returns std::nullopt if \p ID is invalid.
  inline std::optional<Level> getLevel(IDType ID) {
    if (ID < 0 || ID > 3)
      return std::nullopt;
    return static_cast<Level>(ID);
  }
  /// Parse \p C as a single digit integer ID and get matching \c Level.
  ///
  /// Returns std::nullopt if the input is not a valid digit or not a valid ID.
  inline std::optional<Level> parseLevel(char C) {
    if (C < '0')
      return std::nullopt;
    return getLevel(static_cast<IDType>(C - '0'));
  }
  } // namespace CodeGenOpt

  /// These enums are meant to be passed into addPassesToEmitFile to indicate
  /// what type of file to emit, and returned by it to indicate what type of
  /// file could actually be made.
  enum CodeGenFileType {
    CGFT_AssemblyFile,
    CGFT_ObjectFile,
    CGFT_Null         // Do not emit any output.
  };

  // Specify what functions should keep the frame pointer.
  enum class FramePointerKind { None, NonLeaf, All };

  // Specify what type of zeroing callee-used registers.
  namespace ZeroCallUsedRegs {
  const unsigned ONLY_USED = 1U << 1;
  const unsigned ONLY_GPR = 1U << 2;
  const unsigned ONLY_ARG = 1U << 3;

  enum class ZeroCallUsedRegsKind : unsigned int {
    // Don't zero any call-used regs.
    Skip = 1U << 0,
    // Only zeros call-used GPRs used in the fn and pass args.
    UsedGPRArg = ONLY_USED | ONLY_GPR | ONLY_ARG,
    // Only zeros call-used GPRs used in the fn.
    UsedGPR = ONLY_USED | ONLY_GPR,
    // Only zeros call-used regs used in the fn and pass args.
    UsedArg = ONLY_USED | ONLY_ARG,
    // Only zeros call-used regs used in the fn.
    Used = ONLY_USED,
    // Zeros all call-used GPRs that pass args.
    AllGPRArg = ONLY_GPR | ONLY_ARG,
    // Zeros all call-used GPRs.
    AllGPR = ONLY_GPR,
    // Zeros all call-used regs that pass args.
    AllArg = ONLY_ARG,
    // Zeros all call-used regs.
    All = 0,
  };
  } // namespace ZeroCallUsedRegs

  enum class UWTableKind {
    None = 0,  ///< No unwind table requested
    Sync = 1,  ///< "Synchronous" unwind tables
    Async = 2, ///< "Asynchronous" unwind tables (instr precise)
    Default = 2,
  };

  enum class FunctionReturnThunksKind : unsigned int {
    Keep = 0,    ///< No function return thunk.
    Extern = 1,  ///< Replace returns with jump to thunk, don't emit thunk.
    Invalid = 2, ///< Not used.
  };

  } // namespace llvm

#endif
PKhwFZ}.��i$i$Support/BinaryByteStream.hnu�[���//===- BinaryByteStream.h ---------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//===----------------------------------------------------------------------===//
// A BinaryStream which stores data in a single continguous memory buffer.
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_BINARYBYTESTREAM_H
#define LLVM_SUPPORT_BINARYBYTESTREAM_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/BinaryStream.h"
#include "llvm/Support/BinaryStreamError.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/FileOutputBuffer.h"
#include "llvm/Support/MemoryBuffer.h"
#include <cstdint>
#include <cstring>
#include <memory>

namespace llvm {

/// An implementation of BinaryStream which holds its entire data set
/// in a single contiguous buffer.  BinaryByteStream guarantees that no read
/// operation will ever incur a copy.  Note that BinaryByteStream does not
/// own the underlying buffer.
class BinaryByteStream : public BinaryStream {
public:
  BinaryByteStream() = default;
  BinaryByteStream(ArrayRef<uint8_t> Data, llvm::support::endianness Endian)
      : Endian(Endian), Data(Data) {}
  BinaryByteStream(StringRef Data, llvm::support::endianness Endian)
      : Endian(Endian), Data(Data.bytes_begin(), Data.bytes_end()) {}

  llvm::support::endianness getEndian() const override { return Endian; }

  Error readBytes(uint64_t Offset, uint64_t Size,
                  ArrayRef<uint8_t> &Buffer) override {
    if (auto EC = checkOffsetForRead(Offset, Size))
      return EC;
    Buffer = Data.slice(Offset, Size);
    return Error::success();
  }

  Error readLongestContiguousChunk(uint64_t Offset,
                                   ArrayRef<uint8_t> &Buffer) override {
    if (auto EC = checkOffsetForRead(Offset, 1))
      return EC;
    Buffer = Data.slice(Offset);
    return Error::success();
  }

  uint64_t getLength() override { return Data.size(); }

  ArrayRef<uint8_t> data() const { return Data; }

  StringRef str() const {
    const char *CharData = reinterpret_cast<const char *>(Data.data());
    return StringRef(CharData, Data.size());
  }

protected:
  llvm::support::endianness Endian;
  ArrayRef<uint8_t> Data;
};

/// An implementation of BinaryStream whose data is backed by an llvm
/// MemoryBuffer object.  MemoryBufferByteStream owns the MemoryBuffer in
/// question.  As with BinaryByteStream, reading from a MemoryBufferByteStream
/// will never cause a copy.
class MemoryBufferByteStream : public BinaryByteStream {
public:
  MemoryBufferByteStream(std::unique_ptr<MemoryBuffer> Buffer,
                         llvm::support::endianness Endian)
      : BinaryByteStream(Buffer->getBuffer(), Endian),
        MemBuffer(std::move(Buffer)) {}

  std::unique_ptr<MemoryBuffer> MemBuffer;
};

/// An implementation of BinaryStream which holds its entire data set
/// in a single contiguous buffer.  As with BinaryByteStream, the mutable
/// version also guarantees that no read operation will ever incur a copy,
/// and similarly it does not own the underlying buffer.
class MutableBinaryByteStream : public WritableBinaryStream {
public:
  MutableBinaryByteStream() = default;
  MutableBinaryByteStream(MutableArrayRef<uint8_t> Data,
                          llvm::support::endianness Endian)
      : Data(Data), ImmutableStream(Data, Endian) {}

  llvm::support::endianness getEndian() const override {
    return ImmutableStream.getEndian();
  }

  Error readBytes(uint64_t Offset, uint64_t Size,
                  ArrayRef<uint8_t> &Buffer) override {
    return ImmutableStream.readBytes(Offset, Size, Buffer);
  }

  Error readLongestContiguousChunk(uint64_t Offset,
                                   ArrayRef<uint8_t> &Buffer) override {
    return ImmutableStream.readLongestContiguousChunk(Offset, Buffer);
  }

  uint64_t getLength() override { return ImmutableStream.getLength(); }

  Error writeBytes(uint64_t Offset, ArrayRef<uint8_t> Buffer) override {
    if (Buffer.empty())
      return Error::success();

    if (auto EC = checkOffsetForWrite(Offset, Buffer.size()))
      return EC;

    uint8_t *DataPtr = const_cast<uint8_t *>(Data.data());
    ::memcpy(DataPtr + Offset, Buffer.data(), Buffer.size());
    return Error::success();
  }

  Error commit() override { return Error::success(); }

  MutableArrayRef<uint8_t> data() const { return Data; }

private:
  MutableArrayRef<uint8_t> Data;
  BinaryByteStream ImmutableStream;
};

/// An implementation of WritableBinaryStream which can write at its end
/// causing the underlying data to grow.  This class owns the underlying data.
class AppendingBinaryByteStream : public WritableBinaryStream {
  std::vector<uint8_t> Data;
  llvm::support::endianness Endian = llvm::support::little;

public:
  AppendingBinaryByteStream() = default;
  AppendingBinaryByteStream(llvm::support::endianness Endian)
      : Endian(Endian) {}

  void clear() { Data.clear(); }

  llvm::support::endianness getEndian() const override { return Endian; }

  Error readBytes(uint64_t Offset, uint64_t Size,
                  ArrayRef<uint8_t> &Buffer) override {
    if (auto EC = checkOffsetForWrite(Offset, Buffer.size()))
      return EC;

    Buffer = ArrayRef(Data).slice(Offset, Size);
    return Error::success();
  }

  void insert(uint64_t Offset, ArrayRef<uint8_t> Bytes) {
    Data.insert(Data.begin() + Offset, Bytes.begin(), Bytes.end());
  }

  Error readLongestContiguousChunk(uint64_t Offset,
                                   ArrayRef<uint8_t> &Buffer) override {
    if (auto EC = checkOffsetForWrite(Offset, 1))
      return EC;

    Buffer = ArrayRef(Data).slice(Offset);
    return Error::success();
  }

  uint64_t getLength() override { return Data.size(); }

  Error writeBytes(uint64_t Offset, ArrayRef<uint8_t> Buffer) override {
    if (Buffer.empty())
      return Error::success();

    // This is well-defined for any case except where offset is strictly
    // greater than the current length.  If offset is equal to the current
    // length, we can still grow.  If offset is beyond the current length, we
    // would have to decide how to deal with the intermediate uninitialized
    // bytes.  So we punt on that case for simplicity and just say it's an
    // error.
    if (Offset > getLength())
      return make_error<BinaryStreamError>(stream_error_code::invalid_offset);

    uint64_t RequiredSize = Offset + Buffer.size();
    if (RequiredSize > Data.size())
      Data.resize(RequiredSize);

    ::memcpy(Data.data() + Offset, Buffer.data(), Buffer.size());
    return Error::success();
  }

  Error commit() override { return Error::success(); }

  /// Return the properties of this stream.
  BinaryStreamFlags getFlags() const override { return BSF_Write | BSF_Append; }

  MutableArrayRef<uint8_t> data() { return Data; }
};

/// An implementation of WritableBinaryStream backed by an llvm
/// FileOutputBuffer.
class FileBufferByteStream : public WritableBinaryStream {
private:
  class StreamImpl : public MutableBinaryByteStream {
  public:
    StreamImpl(std::unique_ptr<FileOutputBuffer> Buffer,
               llvm::support::endianness Endian)
        : MutableBinaryByteStream(
              MutableArrayRef<uint8_t>(Buffer->getBufferStart(),
                                       Buffer->getBufferEnd()),
              Endian),
          FileBuffer(std::move(Buffer)) {}

    Error commit() override {
      if (FileBuffer->commit())
        return make_error<BinaryStreamError>(
            stream_error_code::filesystem_error);
      return Error::success();
    }

    /// Returns a pointer to the start of the buffer.
    uint8_t *getBufferStart() const { return FileBuffer->getBufferStart(); }

    /// Returns a pointer to the end of the buffer.
    uint8_t *getBufferEnd() const { return FileBuffer->getBufferEnd(); }

  private:
    std::unique_ptr<FileOutputBuffer> FileBuffer;
  };

public:
  FileBufferByteStream(std::unique_ptr<FileOutputBuffer> Buffer,
                       llvm::support::endianness Endian)
      : Impl(std::move(Buffer), Endian) {}

  llvm::support::endianness getEndian() const override {
    return Impl.getEndian();
  }

  Error readBytes(uint64_t Offset, uint64_t Size,
                  ArrayRef<uint8_t> &Buffer) override {
    return Impl.readBytes(Offset, Size, Buffer);
  }

  Error readLongestContiguousChunk(uint64_t Offset,
                                   ArrayRef<uint8_t> &Buffer) override {
    return Impl.readLongestContiguousChunk(Offset, Buffer);
  }

  uint64_t getLength() override { return Impl.getLength(); }

  Error writeBytes(uint64_t Offset, ArrayRef<uint8_t> Data) override {
    return Impl.writeBytes(Offset, Data);
  }

  Error commit() override { return Impl.commit(); }

  /// Returns a pointer to the start of the buffer.
  uint8_t *getBufferStart() const { return Impl.getBufferStart(); }

  /// Returns a pointer to the end of the buffer.
  uint8_t *getBufferEnd() const { return Impl.getBufferEnd(); }

private:
  StreamImpl Impl;
};

} // end namespace llvm

#endif // LLVM_SUPPORT_BINARYBYTESTREAM_H
PKhwFZ��H'��Support/FileCollector.hnu�[���//===-- FileCollector.h -----------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_FILECOLLECTOR_H
#define LLVM_SUPPORT_FILECOLLECTOR_H

#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/Support/VirtualFileSystem.h"
#include <mutex>
#include <string>

namespace llvm {
class FileCollectorFileSystem;
class Twine;

class FileCollectorBase {
public:
  FileCollectorBase();
  virtual ~FileCollectorBase();

  void addFile(const Twine &file);
  void addDirectory(const Twine &Dir);

protected:
  bool markAsSeen(StringRef Path) {
    if (Path.empty())
      return false;
    return Seen.insert(Path).second;
  }

  virtual void addFileImpl(StringRef SrcPath) = 0;

  virtual llvm::vfs::directory_iterator
  addDirectoryImpl(const llvm::Twine &Dir,
                   IntrusiveRefCntPtr<vfs::FileSystem> FS,
                   std::error_code &EC) = 0;

  /// Synchronizes access to internal data structures.
  std::mutex Mutex;

  /// Tracks already seen files so they can be skipped.
  StringSet<> Seen;
};

/// Captures file system interaction and generates data to be later replayed
/// with the RedirectingFileSystem.
///
/// For any file that gets accessed we eventually create:
/// - a copy of the file inside Root
/// - a record in RedirectingFileSystem mapping that maps:
///   current real path -> path to the copy in Root
///
/// That intent is that later when the mapping is used by RedirectingFileSystem
/// it simulates the state of FS that we collected.
///
/// We generate file copies and mapping lazily - see writeMapping and copyFiles.
/// We don't try to capture the state of the file at the exact time when it's
/// accessed. Files might get changed, deleted ... we record only the "final"
/// state.
///
/// In order to preserve the relative topology of files we use their real paths
/// as relative paths inside of the Root.
class FileCollector : public FileCollectorBase {
public:
  /// Helper utility that encapsulates the logic for canonicalizing a virtual
  /// path and a path to copy from.
  class PathCanonicalizer {
  public:
    struct PathStorage {
      SmallString<256> CopyFrom;
      SmallString<256> VirtualPath;
    };

    /// Canonicalize a pair of virtual and real paths.
    PathStorage canonicalize(StringRef SrcPath);

  private:
    /// Replace with a (mostly) real path, or don't modify. Resolves symlinks
    /// in the directory, using \a CachedDirs to avoid redundant lookups, but
    /// leaves the filename as a possible symlink.
    void updateWithRealPath(SmallVectorImpl<char> &Path);

    StringMap<std::string> CachedDirs;
  };

  /// \p Root is the directory where collected files are will be stored.
  /// \p OverlayRoot is VFS mapping root.
  /// \p Root directory gets created in copyFiles unless it already exists.
  FileCollector(std::string Root, std::string OverlayRoot);

  /// Write the yaml mapping (for the VFS) to the given file.
  std::error_code writeMapping(StringRef MappingFile);

  /// Copy the files into the root directory.
  ///
  /// When StopOnError is true (the default) we abort as soon as one file
  /// cannot be copied. This is relatively common, for example when a file was
  /// removed after it was added to the mapping.
  std::error_code copyFiles(bool StopOnError = true);

  /// Create a VFS that uses \p Collector to collect files accessed via \p
  /// BaseFS.
  static IntrusiveRefCntPtr<vfs::FileSystem>
  createCollectorVFS(IntrusiveRefCntPtr<vfs::FileSystem> BaseFS,
                     std::shared_ptr<FileCollector> Collector);

private:
  friend FileCollectorFileSystem;

  void addFileToMapping(StringRef VirtualPath, StringRef RealPath) {
    if (sys::fs::is_directory(VirtualPath))
      VFSWriter.addDirectoryMapping(VirtualPath, RealPath);
    else
      VFSWriter.addFileMapping(VirtualPath, RealPath);
  }

protected:
  void addFileImpl(StringRef SrcPath) override;

  llvm::vfs::directory_iterator
  addDirectoryImpl(const llvm::Twine &Dir,
                   IntrusiveRefCntPtr<vfs::FileSystem> FS,
                   std::error_code &EC) override;

  /// The directory where collected files are copied to in copyFiles().
  const std::string Root;

  /// The root directory where the VFS overlay lives.
  const std::string OverlayRoot;

  /// The yaml mapping writer.
  vfs::YAMLVFSWriter VFSWriter;

  /// Helper utility for canonicalizing paths.
  PathCanonicalizer Canonicalizer;
};

} // end namespace llvm

#endif // LLVM_SUPPORT_FILECOLLECTOR_H
PKhwFZ�H��77Support/FileUtilities.hnu�[���//===- llvm/Support/FileUtilities.h - File System Utilities -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines a family of utility functions which are useful for doing
// various things with files.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_FILEUTILITIES_H
#define LLVM_SUPPORT_FILEUTILITIES_H

#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/FileSystem.h"

#include <system_error>

namespace llvm {

  /// DiffFilesWithTolerance - Compare the two files specified, returning 0 if
  /// the files match, 1 if they are different, and 2 if there is a file error.
  /// This function allows you to specify an absolute and relative FP error that
  /// is allowed to exist.  If you specify a string to fill in for the error
  /// option, it will set the string to an error message if an error occurs, or
  /// if the files are different.
  ///
  int DiffFilesWithTolerance(StringRef FileA,
                             StringRef FileB,
                             double AbsTol, double RelTol,
                             std::string *Error = nullptr);


  /// FileRemover - This class is a simple object meant to be stack allocated.
  /// If an exception is thrown from a region, the object removes the filename
  /// specified (if deleteIt is true).
  ///
  class FileRemover {
    SmallString<128> Filename;
    bool DeleteIt;
  public:
    FileRemover() : DeleteIt(false) {}

    explicit FileRemover(const Twine& filename, bool deleteIt = true)
      : DeleteIt(deleteIt) {
      filename.toVector(Filename);
    }

    ~FileRemover() {
      if (DeleteIt) {
        // Ignore problems deleting the file.
        sys::fs::remove(Filename);
      }
    }

    /// setFile - Give ownership of the file to the FileRemover so it will
    /// be removed when the object is destroyed.  If the FileRemover already
    /// had ownership of a file, remove it first.
    void setFile(const Twine& filename, bool deleteIt = true) {
      if (DeleteIt) {
        // Ignore problems deleting the file.
        sys::fs::remove(Filename);
      }

      Filename.clear();
      filename.toVector(Filename);
      DeleteIt = deleteIt;
    }

    /// releaseFile - Take ownership of the file away from the FileRemover so it
    /// will not be removed when the object is destroyed.
    void releaseFile() { DeleteIt = false; }
  };

  /// FilePermssionsApplier helps to copy permissions from an input file to
  /// an output one. It memorizes the status of the input file and can apply
  /// permissions and dates to the output file.
  class FilePermissionsApplier {
  public:
    static Expected<FilePermissionsApplier> create(StringRef InputFilename);

    /// Apply stored permissions to the \p OutputFilename.
    /// Copy LastAccess and ModificationTime if \p CopyDates is true.
    /// Overwrite stored permissions if \p OverwritePermissions is specified.
    Error
    apply(StringRef OutputFilename, bool CopyDates = false,
          std::optional<sys::fs::perms> OverwritePermissions = std::nullopt);

  private:
    FilePermissionsApplier(StringRef InputFilename, sys::fs::file_status Status)
        : InputFilename(InputFilename), InputStatus(Status) {}

    StringRef InputFilename;
    sys::fs::file_status InputStatus;
  };
} // End llvm namespace

#endif
PKhwFZ3�f���Support/ErrorOr.hnu�[���//===- llvm/Support/ErrorOr.h - Error Smart Pointer -------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
///
/// Provides ErrorOr<T> smart pointer.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_ERROROR_H
#define LLVM_SUPPORT_ERROROR_H

#include "llvm/Support/AlignOf.h"
#include <cassert>
#include <system_error>
#include <type_traits>
#include <utility>

namespace llvm {

/// Represents either an error or a value T.
///
/// ErrorOr<T> is a pointer-like class that represents the result of an
/// operation. The result is either an error, or a value of type T. This is
/// designed to emulate the usage of returning a pointer where nullptr indicates
/// failure. However instead of just knowing that the operation failed, we also
/// have an error_code and optional user data that describes why it failed.
///
/// It is used like the following.
/// \code
///   ErrorOr<Buffer> getBuffer();
///
///   auto buffer = getBuffer();
///   if (error_code ec = buffer.getError())
///     return ec;
///   buffer->write("adena");
/// \endcode
///
///
/// Implicit conversion to bool returns true if there is a usable value. The
/// unary * and -> operators provide pointer like access to the value. Accessing
/// the value when there is an error has undefined behavior.
///
/// When T is a reference type the behavior is slightly different. The reference
/// is held in a std::reference_wrapper<std::remove_reference<T>::type>, and
/// there is special handling to make operator -> work as if T was not a
/// reference.
///
/// T cannot be a rvalue reference.
template<class T>
class ErrorOr {
  template <class OtherT> friend class ErrorOr;

  static constexpr bool isRef = std::is_reference_v<T>;

  using wrap = std::reference_wrapper<std::remove_reference_t<T>>;

public:
  using storage_type = std::conditional_t<isRef, wrap, T>;

private:
  using reference = std::remove_reference_t<T> &;
  using const_reference = const std::remove_reference_t<T> &;
  using pointer = std::remove_reference_t<T> *;
  using const_pointer = const std::remove_reference_t<T> *;

public:
  template <class E>
  ErrorOr(E ErrorCode,
          std::enable_if_t<std::is_error_code_enum<E>::value ||
                               std::is_error_condition_enum<E>::value,
                           void *> = nullptr)
      : HasError(true) {
    new (getErrorStorage()) std::error_code(make_error_code(ErrorCode));
  }

  ErrorOr(std::error_code EC) : HasError(true) {
    new (getErrorStorage()) std::error_code(EC);
  }

  template <class OtherT>
  ErrorOr(OtherT &&Val,
          std::enable_if_t<std::is_convertible_v<OtherT, T>> * = nullptr)
      : HasError(false) {
    new (getStorage()) storage_type(std::forward<OtherT>(Val));
  }

  ErrorOr(const ErrorOr &Other) {
    copyConstruct(Other);
  }

  template <class OtherT>
  ErrorOr(const ErrorOr<OtherT> &Other,
          std::enable_if_t<std::is_convertible_v<OtherT, T>> * = nullptr) {
    copyConstruct(Other);
  }

  template <class OtherT>
  explicit ErrorOr(
      const ErrorOr<OtherT> &Other,
      std::enable_if_t<!std::is_convertible_v<OtherT, const T &>> * = nullptr) {
    copyConstruct(Other);
  }

  ErrorOr(ErrorOr &&Other) {
    moveConstruct(std::move(Other));
  }

  template <class OtherT>
  ErrorOr(ErrorOr<OtherT> &&Other,
          std::enable_if_t<std::is_convertible_v<OtherT, T>> * = nullptr) {
    moveConstruct(std::move(Other));
  }

  // This might eventually need SFINAE but it's more complex than is_convertible
  // & I'm too lazy to write it right now.
  template <class OtherT>
  explicit ErrorOr(
      ErrorOr<OtherT> &&Other,
      std::enable_if_t<!std::is_convertible_v<OtherT, T>> * = nullptr) {
    moveConstruct(std::move(Other));
  }

  ErrorOr &operator=(const ErrorOr &Other) {
    copyAssign(Other);
    return *this;
  }

  ErrorOr &operator=(ErrorOr &&Other) {
    moveAssign(std::move(Other));
    return *this;
  }

  ~ErrorOr() {
    if (!HasError)
      getStorage()->~storage_type();
  }

  /// Return false if there is an error.
  explicit operator bool() const {
    return !HasError;
  }

  reference get() { return *getStorage(); }
  const_reference get() const { return const_cast<ErrorOr<T> *>(this)->get(); }

  std::error_code getError() const {
    return HasError ? *getErrorStorage() : std::error_code();
  }

  pointer operator ->() {
    return toPointer(getStorage());
  }

  const_pointer operator->() const { return toPointer(getStorage()); }

  reference operator *() {
    return *getStorage();
  }

  const_reference operator*() const { return *getStorage(); }

private:
  template <class OtherT>
  void copyConstruct(const ErrorOr<OtherT> &Other) {
    if (!Other.HasError) {
      // Get the other value.
      HasError = false;
      new (getStorage()) storage_type(*Other.getStorage());
    } else {
      // Get other's error.
      HasError = true;
      new (getErrorStorage()) std::error_code(Other.getError());
    }
  }

  template <class T1>
  static bool compareThisIfSameType(const T1 &a, const T1 &b) {
    return &a == &b;
  }

  template <class T1, class T2>
  static bool compareThisIfSameType(const T1 &a, const T2 &b) {
    return false;
  }

  template <class OtherT>
  void copyAssign(const ErrorOr<OtherT> &Other) {
    if (compareThisIfSameType(*this, Other))
      return;

    this->~ErrorOr();
    new (this) ErrorOr(Other);
  }

  template <class OtherT>
  void moveConstruct(ErrorOr<OtherT> &&Other) {
    if (!Other.HasError) {
      // Get the other value.
      HasError = false;
      new (getStorage()) storage_type(std::move(*Other.getStorage()));
    } else {
      // Get other's error.
      HasError = true;
      new (getErrorStorage()) std::error_code(Other.getError());
    }
  }

  template <class OtherT>
  void moveAssign(ErrorOr<OtherT> &&Other) {
    if (compareThisIfSameType(*this, Other))
      return;

    this->~ErrorOr();
    new (this) ErrorOr(std::move(Other));
  }

  pointer toPointer(pointer Val) {
    return Val;
  }

  const_pointer toPointer(const_pointer Val) const { return Val; }

  pointer toPointer(wrap *Val) {
    return &Val->get();
  }

  const_pointer toPointer(const wrap *Val) const { return &Val->get(); }

  storage_type *getStorage() {
    assert(!HasError && "Cannot get value when an error exists!");
    return reinterpret_cast<storage_type *>(&TStorage);
  }

  const storage_type *getStorage() const {
    assert(!HasError && "Cannot get value when an error exists!");
    return reinterpret_cast<const storage_type *>(&TStorage);
  }

  std::error_code *getErrorStorage() {
    assert(HasError && "Cannot get error when a value exists!");
    return reinterpret_cast<std::error_code *>(&ErrorStorage);
  }

  const std::error_code *getErrorStorage() const {
    return const_cast<ErrorOr<T> *>(this)->getErrorStorage();
  }

  union {
    AlignedCharArrayUnion<storage_type> TStorage;
    AlignedCharArrayUnion<std::error_code> ErrorStorage;
  };
  bool HasError : 1;
};

template <class T, class E>
std::enable_if_t<std::is_error_code_enum<E>::value ||
                     std::is_error_condition_enum<E>::value,
                 bool>
operator==(const ErrorOr<T> &Err, E Code) {
  return Err.getError() == Code;
}

} // end namespace llvm

#endif // LLVM_SUPPORT_ERROROR_H
PKhwFZ�d2�	�	Support/ToolOutputFile.hnu�[���//===- ToolOutputFile.h - Output files for compiler-like tools --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
//  This file defines the ToolOutputFile class.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_TOOLOUTPUTFILE_H
#define LLVM_SUPPORT_TOOLOUTPUTFILE_H

#include "llvm/Support/raw_ostream.h"
#include <optional>

namespace llvm {

/// This class contains a raw_fd_ostream and adds a few extra features commonly
/// needed for compiler-like tool output files:
///   - The file is automatically deleted if the process is killed.
///   - The file is automatically deleted when the ToolOutputFile
///     object is destroyed unless the client calls keep().
class ToolOutputFile {
  /// This class is declared before the raw_fd_ostream so that it is constructed
  /// before the raw_fd_ostream is constructed and destructed after the
  /// raw_fd_ostream is destructed. It installs cleanups in its constructor and
  /// uninstalls them in its destructor.
  class CleanupInstaller {
  public:
    /// The name of the file.
    std::string Filename;

    /// The flag which indicates whether we should not delete the file.
    bool Keep;

    StringRef getFilename() { return Filename; }
    explicit CleanupInstaller(StringRef Filename);
    ~CleanupInstaller();
  } Installer;

  /// Storage for the stream, if we're owning our own stream. This is
  /// intentionally declared after Installer.
  std::optional<raw_fd_ostream> OSHolder;

  /// The actual stream to use.
  raw_fd_ostream *OS;

public:
  /// This constructor's arguments are passed to raw_fd_ostream's
  /// constructor.
  ToolOutputFile(StringRef Filename, std::error_code &EC,
                 sys::fs::OpenFlags Flags);

  ToolOutputFile(StringRef Filename, int FD);

  /// Return the contained raw_fd_ostream.
  raw_fd_ostream &os() { return *OS; }

  /// Return the filename initialized with.
  StringRef getFilename() { return Installer.getFilename(); }

  /// Indicate that the tool's job wrt this output file has been successful and
  /// the file should not be deleted.
  void keep() { Installer.Keep = true; }

  const std::string &outputFilename() { return Installer.Filename; }
};

} // end llvm namespace

#endif
PKhwFZ��;ZZSupport/CSKYAttributeParser.hnu�[���//===---- CSKYAttributeParser.h - CSKY Attribute Parser ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_CSKYATTRIBUTEPARSER_H
#define LLVM_SUPPORT_CSKYATTRIBUTEPARSER_H

#include "llvm/Support/CSKYAttributes.h"
#include "llvm/Support/ELFAttributeParser.h"

namespace llvm {
class CSKYAttributeParser : public ELFAttributeParser {
  struct DisplayHandler {
    CSKYAttrs::AttrType attribute;
    Error (CSKYAttributeParser::*routine)(unsigned);
  };
  static const DisplayHandler displayRoutines[];

  Error dspVersion(unsigned tag);
  Error vdspVersion(unsigned tag);
  Error fpuVersion(unsigned tag);
  Error fpuABI(unsigned tag);
  Error fpuRounding(unsigned tag);
  Error fpuDenormal(unsigned tag);
  Error fpuException(unsigned tag);
  Error fpuHardFP(unsigned tag);

  Error handler(uint64_t tag, bool &handled) override;

public:
  CSKYAttributeParser(ScopedPrinter *sw)
      : ELFAttributeParser(sw, CSKYAttrs::getCSKYAttributeTags(), "csky") {}
  CSKYAttributeParser()
      : ELFAttributeParser(CSKYAttrs::getCSKYAttributeTags(), "csky") {}
};

} // namespace llvm

#endif
PKhwFZ� �y

Support/StringSaver.hnu�[���//===- llvm/Support/StringSaver.h -------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_STRINGSAVER_H
#define LLVM_SUPPORT_STRINGSAVER_H

#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Support/Allocator.h"

namespace llvm {

/// Saves strings in the provided stable storage and returns a
/// StringRef with a stable character pointer.
class StringSaver final {
  BumpPtrAllocator &Alloc;

public:
  StringSaver(BumpPtrAllocator &Alloc) : Alloc(Alloc) {}

  BumpPtrAllocator &getAllocator() const { return Alloc; }

  // All returned strings are null-terminated: *save(S).end() == 0.
  StringRef save(const char *S) { return save(StringRef(S)); }
  StringRef save(StringRef S);
  StringRef save(const Twine &S) { return save(StringRef(S.str())); }
  StringRef save(const std::string &S) { return save(StringRef(S)); }
};

/// Saves strings in the provided stable storage and returns a StringRef with a
/// stable character pointer. Saving the same string yields the same StringRef.
///
/// Compared to StringSaver, it does more work but avoids saving the same string
/// multiple times.
///
/// Compared to StringPool, it performs fewer allocations but doesn't support
/// refcounting/deletion.
class UniqueStringSaver final {
  StringSaver Strings;
  llvm::DenseSet<llvm::StringRef> Unique;

public:
  UniqueStringSaver(BumpPtrAllocator &Alloc) : Strings(Alloc) {}

  // All returned strings are null-terminated: *save(S).end() == 0.
  StringRef save(const char *S) { return save(StringRef(S)); }
  StringRef save(StringRef S);
  StringRef save(const Twine &S) { return save(StringRef(S.str())); }
  StringRef save(const std::string &S) { return save(StringRef(S)); }
};

} // namespace llvm
#endif
PKhwFZk�rrSupport/DOTGraphTraits.hnu�[���//===-- llvm/Support/DOTGraphTraits.h - Customize .dot output ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines a template class that can be used to customize dot output
// graphs generated by the GraphWriter.h file.  The default implementation of
// this file will produce a simple, but not very polished graph.  By
// specializing this template, lots of customization opportunities are possible.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_DOTGRAPHTRAITS_H
#define LLVM_SUPPORT_DOTGRAPHTRAITS_H

#include <string>

namespace llvm {

/// DefaultDOTGraphTraits - This class provides the default implementations of
/// all of the DOTGraphTraits methods.  If a specialization does not need to
/// override all methods here it should inherit so that it can get the default
/// implementations.
///
struct DefaultDOTGraphTraits {
private:
  bool IsSimple;

protected:
  bool isSimple() {
    return IsSimple;
  }

public:
  explicit DefaultDOTGraphTraits(bool simple=false) : IsSimple (simple) {}

  /// getGraphName - Return the label for the graph as a whole.  Printed at the
  /// top of the graph.
  ///
  template<typename GraphType>
  static std::string getGraphName(const GraphType &) { return ""; }

  /// getGraphProperties - Return any custom properties that should be included
  /// in the top level graph structure for dot.
  ///
  template<typename GraphType>
  static std::string getGraphProperties(const GraphType &) {
    return "";
  }

  /// renderGraphFromBottomUp - If this function returns true, the graph is
  /// emitted bottom-up instead of top-down.  This requires graphviz 2.0 to work
  /// though.
  static bool renderGraphFromBottomUp() {
    return false;
  }

  /// isNodeHidden - If the function returns true, the given node is not
  /// displayed in the graph.
  template <typename GraphType>
  static bool isNodeHidden(const void *, const GraphType &) {
    return false;
  }

  // renderNodesUsingHTML - If the function returns true, nodes will be
  // rendered using HTML-like labels which allows colors, etc in the nodes
  // and the edge source labels.
  static bool renderNodesUsingHTML() { return false; }

  /// getNodeLabel - Given a node and a pointer to the top level graph, return
  /// the label to print in the node.
  template<typename GraphType>
  std::string getNodeLabel(const void *, const GraphType &) {
    return "";
  }

  // getNodeIdentifierLabel - Returns a string representing the
  // address or other unique identifier of the node. (Only used if
  // non-empty.)
  template <typename GraphType>
  static std::string getNodeIdentifierLabel(const void *, const GraphType &) {
    return "";
  }

  template<typename GraphType>
  static std::string getNodeDescription(const void *, const GraphType &) {
    return "";
  }

  /// If you want to specify custom node attributes, this is the place to do so
  ///
  template<typename GraphType>
  static std::string getNodeAttributes(const void *,
                                       const GraphType &) {
    return "";
  }

  /// If you want to override the dot attributes printed for a particular edge,
  /// override this method.
  template<typename EdgeIter, typename GraphType>
  static std::string getEdgeAttributes(const void *, EdgeIter,
                                       const GraphType &) {
    return "";
  }

  /// getEdgeSourceLabel - If you want to label the edge source itself,
  /// implement this method.
  template<typename EdgeIter>
  static std::string getEdgeSourceLabel(const void *, EdgeIter) {
    return "";
  }

  /// edgeTargetsEdgeSource - This method returns true if this outgoing edge
  /// should actually target another edge source, not a node.  If this method is
  /// implemented, getEdgeTarget should be implemented.
  template<typename EdgeIter>
  static bool edgeTargetsEdgeSource(const void *, EdgeIter) {
    return false;
  }

  /// getEdgeTarget - If edgeTargetsEdgeSource returns true, this method is
  /// called to determine which outgoing edge of Node is the target of this
  /// edge.
  template<typename EdgeIter>
  static EdgeIter getEdgeTarget(const void *, EdgeIter I) {
    return I;
  }

  /// hasEdgeDestLabels - If this function returns true, the graph is able
  /// to provide labels for edge destinations.
  static bool hasEdgeDestLabels() {
    return false;
  }

  /// numEdgeDestLabels - If hasEdgeDestLabels, this function returns the
  /// number of incoming edge labels the given node has.
  static unsigned numEdgeDestLabels(const void *) {
    return 0;
  }

  /// getEdgeDestLabel - If hasEdgeDestLabels, this function returns the
  /// incoming edge label with the given index in the given node.
  static std::string getEdgeDestLabel(const void *, unsigned) {
    return "";
  }

  /// addCustomGraphFeatures - If a graph is made up of more than just
  /// straight-forward nodes and edges, this is the place to put all of the
  /// custom stuff necessary.  The GraphWriter object, instantiated with your
  /// GraphType is passed in as an argument.  You may call arbitrary methods on
  /// it to add things to the output graph.
  ///
  template<typename GraphType, typename GraphWriter>
  static void addCustomGraphFeatures(const GraphType &, GraphWriter &) {}
};


/// DOTGraphTraits - Template class that can be specialized to customize how
/// graphs are converted to 'dot' graphs.  When specializing, you may inherit
/// from DefaultDOTGraphTraits if you don't need to override everything.
///
template <typename Ty>
struct DOTGraphTraits : public DefaultDOTGraphTraits {
  DOTGraphTraits (bool simple=false) : DefaultDOTGraphTraits (simple) {}
};

} // End llvm namespace

#endif
PKhwFZ��Support/EndianStream.hnu�[���//===- EndianStream.h - Stream ops with endian specific data ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines utilities for operating on streams that have endian
// specific data.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_ENDIANSTREAM_H
#define LLVM_SUPPORT_ENDIANSTREAM_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"

namespace llvm {
namespace support {

namespace endian {

template <typename value_type>
inline void write(raw_ostream &os, value_type value, endianness endian) {
  value = byte_swap<value_type>(value, endian);
  os.write((const char *)&value, sizeof(value_type));
}

template <>
inline void write<float>(raw_ostream &os, float value, endianness endian) {
  write(os, llvm::bit_cast<uint32_t>(value), endian);
}

template <>
inline void write<double>(raw_ostream &os, double value,
                          endianness endian) {
  write(os, llvm::bit_cast<uint64_t>(value), endian);
}

template <typename value_type>
inline void write(raw_ostream &os, ArrayRef<value_type> vals,
                  endianness endian) {
  for (value_type v : vals)
    write(os, v, endian);
}

template <typename value_type>
inline void write(SmallVectorImpl<char> &Out, value_type V, endianness E) {
  V = byte_swap<value_type>(V, E);
  Out.append((const char *)&V, (const char *)&V + sizeof(value_type));
}

/// Adapter to write values to a stream in a particular byte order.
struct Writer {
  raw_ostream &OS;
  endianness Endian;
  Writer(raw_ostream &OS, endianness Endian) : OS(OS), Endian(Endian) {}
  template <typename value_type> void write(ArrayRef<value_type> Val) {
    endian::write(OS, Val, Endian);
  }
  template <typename value_type> void write(value_type Val) {
    endian::write(OS, Val, Endian);
  }
};

} // end namespace endian

} // end namespace support
} // end namespace llvm

#endif
PKhwFZ�ӜF��Support/BranchProbability.hnu�[���//===- BranchProbability.h - Branch Probability Wrapper ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Definition of BranchProbability shared by IR and Machine Instructions.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_BRANCHPROBABILITY_H
#define LLVM_SUPPORT_BRANCHPROBABILITY_H

#include "llvm/Support/DataTypes.h"
#include <algorithm>
#include <cassert>
#include <iterator>
#include <numeric>

namespace llvm {

class raw_ostream;

// This class represents Branch Probability as a non-negative fraction that is
// no greater than 1. It uses a fixed-point-like implementation, in which the
// denominator is always a constant value (here we use 1<<31 for maximum
// precision).
class BranchProbability {
  // Numerator
  uint32_t N;

  // Denominator, which is a constant value.
  static constexpr uint32_t D = 1u << 31;
  static constexpr uint32_t UnknownN = UINT32_MAX;

  // Construct a BranchProbability with only numerator assuming the denominator
  // is 1<<31. For internal use only.
  explicit BranchProbability(uint32_t n) : N(n) {}

public:
  BranchProbability() : N(UnknownN) {}
  BranchProbability(uint32_t Numerator, uint32_t Denominator);

  bool isZero() const { return N == 0; }
  bool isUnknown() const { return N == UnknownN; }

  static BranchProbability getZero() { return BranchProbability(0); }
  static BranchProbability getOne() { return BranchProbability(D); }
  static BranchProbability getUnknown() { return BranchProbability(UnknownN); }
  // Create a BranchProbability object with the given numerator and 1<<31
  // as denominator.
  static BranchProbability getRaw(uint32_t N) { return BranchProbability(N); }
  // Create a BranchProbability object from 64-bit integers.
  static BranchProbability getBranchProbability(uint64_t Numerator,
                                                uint64_t Denominator);

  // Normalize given probabilties so that the sum of them becomes approximate
  // one.
  template <class ProbabilityIter>
  static void normalizeProbabilities(ProbabilityIter Begin,
                                     ProbabilityIter End);

  uint32_t getNumerator() const { return N; }
  static uint32_t getDenominator() { return D; }

  // Return (1 - Probability).
  BranchProbability getCompl() const { return BranchProbability(D - N); }

  raw_ostream &print(raw_ostream &OS) const;

  void dump() const;

  /// Scale a large integer.
  ///
  /// Scales \c Num.  Guarantees full precision.  Returns the floor of the
  /// result.
  ///
  /// \return \c Num times \c this.
  uint64_t scale(uint64_t Num) const;

  /// Scale a large integer by the inverse.
  ///
  /// Scales \c Num by the inverse of \c this.  Guarantees full precision.
  /// Returns the floor of the result.
  ///
  /// \return \c Num divided by \c this.
  uint64_t scaleByInverse(uint64_t Num) const;

  BranchProbability &operator+=(BranchProbability RHS) {
    assert(N != UnknownN && RHS.N != UnknownN &&
           "Unknown probability cannot participate in arithmetics.");
    // Saturate the result in case of overflow.
    N = (uint64_t(N) + RHS.N > D) ? D : N + RHS.N;
    return *this;
  }

  BranchProbability &operator-=(BranchProbability RHS) {
    assert(N != UnknownN && RHS.N != UnknownN &&
           "Unknown probability cannot participate in arithmetics.");
    // Saturate the result in case of underflow.
    N = N < RHS.N ? 0 : N - RHS.N;
    return *this;
  }

  BranchProbability &operator*=(BranchProbability RHS) {
    assert(N != UnknownN && RHS.N != UnknownN &&
           "Unknown probability cannot participate in arithmetics.");
    N = (static_cast<uint64_t>(N) * RHS.N + D / 2) / D;
    return *this;
  }

  BranchProbability &operator*=(uint32_t RHS) {
    assert(N != UnknownN &&
           "Unknown probability cannot participate in arithmetics.");
    N = (uint64_t(N) * RHS > D) ? D : N * RHS;
    return *this;
  }

  BranchProbability &operator/=(BranchProbability RHS) {
    assert(N != UnknownN && RHS.N != UnknownN &&
           "Unknown probability cannot participate in arithmetics.");
    N = (static_cast<uint64_t>(N) * D + RHS.N / 2) / RHS.N;
    return *this;
  }

  BranchProbability &operator/=(uint32_t RHS) {
    assert(N != UnknownN &&
           "Unknown probability cannot participate in arithmetics.");
    assert(RHS > 0 && "The divider cannot be zero.");
    N /= RHS;
    return *this;
  }

  BranchProbability operator+(BranchProbability RHS) const {
    BranchProbability Prob(*this);
    Prob += RHS;
    return Prob;
  }

  BranchProbability operator-(BranchProbability RHS) const {
    BranchProbability Prob(*this);
    Prob -= RHS;
    return Prob;
  }

  BranchProbability operator*(BranchProbability RHS) const {
    BranchProbability Prob(*this);
    Prob *= RHS;
    return Prob;
  }

  BranchProbability operator*(uint32_t RHS) const {
    BranchProbability Prob(*this);
    Prob *= RHS;
    return Prob;
  }

  BranchProbability operator/(BranchProbability RHS) const {
    BranchProbability Prob(*this);
    Prob /= RHS;
    return Prob;
  }

  BranchProbability operator/(uint32_t RHS) const {
    BranchProbability Prob(*this);
    Prob /= RHS;
    return Prob;
  }

  bool operator==(BranchProbability RHS) const { return N == RHS.N; }
  bool operator!=(BranchProbability RHS) const { return !(*this == RHS); }

  bool operator<(BranchProbability RHS) const {
    assert(N != UnknownN && RHS.N != UnknownN &&
           "Unknown probability cannot participate in comparisons.");
    return N < RHS.N;
  }

  bool operator>(BranchProbability RHS) const {
    assert(N != UnknownN && RHS.N != UnknownN &&
           "Unknown probability cannot participate in comparisons.");
    return RHS < *this;
  }

  bool operator<=(BranchProbability RHS) const {
    assert(N != UnknownN && RHS.N != UnknownN &&
           "Unknown probability cannot participate in comparisons.");
    return !(RHS < *this);
  }

  bool operator>=(BranchProbability RHS) const {
    assert(N != UnknownN && RHS.N != UnknownN &&
           "Unknown probability cannot participate in comparisons.");
    return !(*this < RHS);
  }
};

inline raw_ostream &operator<<(raw_ostream &OS, BranchProbability Prob) {
  return Prob.print(OS);
}

template <class ProbabilityIter>
void BranchProbability::normalizeProbabilities(ProbabilityIter Begin,
                                               ProbabilityIter End) {
  if (Begin == End)
    return;

  unsigned UnknownProbCount = 0;
  uint64_t Sum = std::accumulate(Begin, End, uint64_t(0),
                                 [&](uint64_t S, const BranchProbability &BP) {
                                   if (!BP.isUnknown())
                                     return S + BP.N;
                                   UnknownProbCount++;
                                   return S;
                                 });

  if (UnknownProbCount > 0) {
    BranchProbability ProbForUnknown = BranchProbability::getZero();
    // If the sum of all known probabilities is less than one, evenly distribute
    // the complement of sum to unknown probabilities. Otherwise, set unknown
    // probabilities to zeros and continue to normalize known probabilities.
    if (Sum < BranchProbability::getDenominator())
      ProbForUnknown = BranchProbability::getRaw(
          (BranchProbability::getDenominator() - Sum) / UnknownProbCount);

    std::replace_if(Begin, End,
                    [](const BranchProbability &BP) { return BP.isUnknown(); },
                    ProbForUnknown);

    if (Sum <= BranchProbability::getDenominator())
      return;
  }

  if (Sum == 0) {
    BranchProbability BP(1, std::distance(Begin, End));
    std::fill(Begin, End, BP);
    return;
  }

  for (auto I = Begin; I != End; ++I)
    I->N = (I->N * uint64_t(D) + Sum / 2) / Sum;
}

}

#endif
PKhwFZ}QF��j�jSupport/TargetOpcodes.defnu�[���//===-- llvm/Support/TargetOpcodes.def - Target Indep Opcodes ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the target independent instruction opcodes.
//
//===----------------------------------------------------------------------===//

// NOTE: NO INCLUDE GUARD DESIRED!

/// HANDLE_TARGET_OPCODE defines an opcode and its associated enum value.
///
#ifndef HANDLE_TARGET_OPCODE
#define HANDLE_TARGET_OPCODE(OPC, NUM)
#endif

/// HANDLE_TARGET_OPCODE_MARKER defines an alternative identifier for an opcode.
///
#ifndef HANDLE_TARGET_OPCODE_MARKER
#define HANDLE_TARGET_OPCODE_MARKER(IDENT, OPC)
#endif

/// Every instruction defined here must also appear in Target.td.
///
HANDLE_TARGET_OPCODE(PHI)
HANDLE_TARGET_OPCODE(INLINEASM)
HANDLE_TARGET_OPCODE(INLINEASM_BR)
HANDLE_TARGET_OPCODE(CFI_INSTRUCTION)
HANDLE_TARGET_OPCODE(EH_LABEL)
HANDLE_TARGET_OPCODE(GC_LABEL)
HANDLE_TARGET_OPCODE(ANNOTATION_LABEL)

/// KILL - This instruction is a noop that is used only to adjust the
/// liveness of registers. This can be useful when dealing with
/// sub-registers.
HANDLE_TARGET_OPCODE(KILL)

/// EXTRACT_SUBREG - This instruction takes two operands: a register
/// that has subregisters, and a subregister index. It returns the
/// extracted subregister value. This is commonly used to implement
/// truncation operations on target architectures which support it.
HANDLE_TARGET_OPCODE(EXTRACT_SUBREG)

/// INSERT_SUBREG - This instruction takes three operands: a register that
/// has subregisters, a register providing an insert value, and a
/// subregister index. It returns the value of the first register with the
/// value of the second register inserted. The first register is often
/// defined by an IMPLICIT_DEF, because it is commonly used to implement
/// anyext operations on target architectures which support it.
HANDLE_TARGET_OPCODE(INSERT_SUBREG)

/// IMPLICIT_DEF - This is the MachineInstr-level equivalent of undef.
HANDLE_TARGET_OPCODE(IMPLICIT_DEF)

/// SUBREG_TO_REG - Assert the value of bits in a super register.
/// The result of this instruction is the value of the second operand inserted
/// into the subregister specified by the third operand. All other bits are
/// assumed to be equal to the bits in the immediate integer constant in the
/// first operand. This instruction just communicates information; No code
/// should be generated.
/// This is typically used after an instruction where the write to a subregister
/// implicitly cleared the bits in the super registers.
HANDLE_TARGET_OPCODE(SUBREG_TO_REG)

/// COPY_TO_REGCLASS - This instruction is a placeholder for a plain
/// register-to-register copy into a specific register class. This is only
/// used between instruction selection and MachineInstr creation, before
/// virtual registers have been created for all the instructions, and it's
/// only needed in cases where the register classes implied by the
/// instructions are insufficient. It is emitted as a COPY MachineInstr.
  HANDLE_TARGET_OPCODE(COPY_TO_REGCLASS)

/// DBG_VALUE - a mapping of the llvm.dbg.value intrinsic
HANDLE_TARGET_OPCODE(DBG_VALUE)

/// DBG_VALUE - a mapping of the llvm.dbg.value intrinsic with a variadic
/// list of locations
HANDLE_TARGET_OPCODE(DBG_VALUE_LIST)

/// DBG_INSTR_REF - A mapping of llvm.dbg.value referring to the instruction
/// that defines the value, rather than a virtual register.
HANDLE_TARGET_OPCODE(DBG_INSTR_REF)

/// DBG_PHI - remainder of a PHI, identifies a program point where values
/// merge under control flow.
HANDLE_TARGET_OPCODE(DBG_PHI)

/// DBG_LABEL - a mapping of the llvm.dbg.label intrinsic
HANDLE_TARGET_OPCODE(DBG_LABEL)

/// REG_SEQUENCE - This variadic instruction is used to form a register that
/// represents a consecutive sequence of sub-registers. It's used as a
/// register coalescing / allocation aid and must be eliminated before code
/// emission.
// In SDNode form, the first operand encodes the register class created by
// the REG_SEQUENCE, while each subsequent pair names a vreg + subreg index
// pair.  Once it has been lowered to a MachineInstr, the regclass operand
// is no longer present.
/// e.g. v1027 = REG_SEQUENCE v1024, 3, v1025, 4, v1026, 5
/// After register coalescing references of v1024 should be replace with
/// v1027:3, v1025 with v1027:4, etc.
  HANDLE_TARGET_OPCODE(REG_SEQUENCE)

/// COPY - Target-independent register copy. This instruction can also be
/// used to copy between subregisters of virtual registers.
  HANDLE_TARGET_OPCODE(COPY)

/// BUNDLE - This instruction represents an instruction bundle. Instructions
/// which immediately follow a BUNDLE instruction which are marked with
/// 'InsideBundle' flag are inside the bundle.
HANDLE_TARGET_OPCODE(BUNDLE)

/// Lifetime markers.
HANDLE_TARGET_OPCODE(LIFETIME_START)
HANDLE_TARGET_OPCODE(LIFETIME_END)

/// Pseudo probe
HANDLE_TARGET_OPCODE(PSEUDO_PROBE)

/// Arithmetic fence.
HANDLE_TARGET_OPCODE(ARITH_FENCE)

/// A Stackmap instruction captures the location of live variables at its
/// position in the instruction stream. It is followed by a shadow of bytes
/// that must lie within the function and not contain another stackmap.
HANDLE_TARGET_OPCODE(STACKMAP)

/// FEntry all - This is a marker instruction which gets translated into a raw fentry call.
HANDLE_TARGET_OPCODE(FENTRY_CALL)

/// Patchable call instruction - this instruction represents a call to a
/// constant address, followed by a series of NOPs. It is intended to
/// support optimizations for dynamic languages (such as javascript) that
/// rewrite calls to runtimes with more efficient code sequences.
/// This also implies a stack map.
HANDLE_TARGET_OPCODE(PATCHPOINT)

/// This pseudo-instruction loads the stack guard value. Targets which need
/// to prevent the stack guard value or address from being spilled to the
/// stack should override TargetLowering::emitLoadStackGuardNode and
/// additionally expand this pseudo after register allocation.
HANDLE_TARGET_OPCODE(LOAD_STACK_GUARD)

/// These are used to support call sites that must have the stack adjusted
/// before the call (e.g. to initialize an argument passed by value).
/// See llvm.call.preallocated.{setup,arg} in the LangRef for more details.
HANDLE_TARGET_OPCODE(PREALLOCATED_SETUP)
HANDLE_TARGET_OPCODE(PREALLOCATED_ARG)

/// Call instruction with associated vm state for deoptimization and list
/// of live pointers for relocation by the garbage collector.  It is
/// intended to support garbage collection with fully precise relocating
/// collectors and deoptimizations in either the callee or caller.
HANDLE_TARGET_OPCODE(STATEPOINT)

/// Instruction that records the offset of a local stack allocation passed to
/// llvm.localescape. It has two arguments: the symbol for the label and the
/// frame index of the local stack allocation.
HANDLE_TARGET_OPCODE(LOCAL_ESCAPE)

/// Wraps a machine instruction which can fault, bundled with associated
/// information on how to handle such a fault.
/// For example loading instruction that may page fault, bundled with associated
/// information on how to handle such a page fault.  It is intended to support
/// "zero cost" null checks in managed languages by allowing LLVM to fold
/// comparisons into existing memory operations.
HANDLE_TARGET_OPCODE(FAULTING_OP)

/// Wraps a machine instruction to add patchability constraints.  An
/// instruction wrapped in PATCHABLE_OP has to either have a minimum
/// size or be preceded with a nop of that size.  The first operand is
/// an immediate denoting the minimum size of the instruction, the
/// second operand is an immediate denoting the opcode of the original
/// instruction.  The rest of the operands are the operands of the
/// original instruction.
/// PATCHABLE_OP can be used as second operand to only insert a nop of
/// required size.
HANDLE_TARGET_OPCODE(PATCHABLE_OP)

/// This is a marker instruction which gets translated into a nop sled, useful
/// for inserting instrumentation instructions at runtime.
HANDLE_TARGET_OPCODE(PATCHABLE_FUNCTION_ENTER)

/// Wraps a return instruction and its operands to enable adding nop sleds
/// either before or after the return. The nop sleds are useful for inserting
/// instrumentation instructions at runtime.
/// The patch here replaces the return instruction.
HANDLE_TARGET_OPCODE(PATCHABLE_RET)

/// This is a marker instruction which gets translated into a nop sled, useful
/// for inserting instrumentation instructions at runtime.
/// The patch here prepends the return instruction.
/// The same thing as in x86_64 is not possible for ARM because it has multiple
/// return instructions. Furthermore, CPU allows parametrized and even
/// conditional return instructions. In the current ARM implementation we are
/// making use of the fact that currently LLVM doesn't seem to generate
/// conditional return instructions.
/// On ARM, the same instruction can be used for popping multiple registers
/// from the stack and returning (it just pops pc register too), and LLVM
/// generates it sometimes. So we can't insert the sled between this stack
/// adjustment and the return without splitting the original instruction into 2
/// instructions. So on ARM, rather than jumping into the exit trampoline, we
/// call it, it does the tracing, preserves the stack and returns.
HANDLE_TARGET_OPCODE(PATCHABLE_FUNCTION_EXIT)

/// Wraps a tail call instruction and its operands to enable adding nop sleds
/// either before or after the tail exit. We use this as a disambiguation from
/// PATCHABLE_RET which specifically only works for return instructions.
HANDLE_TARGET_OPCODE(PATCHABLE_TAIL_CALL)

/// Wraps a logging call and its arguments with nop sleds. At runtime, this can
/// be patched to insert instrumentation instructions.
HANDLE_TARGET_OPCODE(PATCHABLE_EVENT_CALL)

/// Wraps a typed logging call and its argument with nop sleds. At runtime, this
/// can be patched to insert instrumentation instructions.
HANDLE_TARGET_OPCODE(PATCHABLE_TYPED_EVENT_CALL)

HANDLE_TARGET_OPCODE(ICALL_BRANCH_FUNNEL)

// This is a fence with the singlethread scope. It represents a compiler memory
// barrier, but does not correspond to any generated instruction.
HANDLE_TARGET_OPCODE(MEMBARRIER)

/// The following generic opcodes are not supposed to appear after ISel.
/// This is something we might want to relax, but for now, this is convenient
/// to produce diagnostics.

/// Instructions which should not exist past instruction selection, but do not
/// generate code. These instructions only act as optimization hints.
HANDLE_TARGET_OPCODE(G_ASSERT_SEXT)
HANDLE_TARGET_OPCODE(G_ASSERT_ZEXT)
HANDLE_TARGET_OPCODE(G_ASSERT_ALIGN)
HANDLE_TARGET_OPCODE_MARKER(PRE_ISEL_GENERIC_OPTIMIZATION_HINT_START,
                            G_ASSERT_SEXT)
HANDLE_TARGET_OPCODE_MARKER(PRE_ISEL_GENERIC_OPTIMIZATION_HINT_END,
                            G_ASSERT_ALIGN)

/// Generic ADD instruction. This is an integer add.
HANDLE_TARGET_OPCODE(G_ADD)
HANDLE_TARGET_OPCODE_MARKER(PRE_ISEL_GENERIC_OPCODE_START, G_ADD)

/// Generic SUB instruction. This is an integer sub.
HANDLE_TARGET_OPCODE(G_SUB)

// Generic multiply instruction.
HANDLE_TARGET_OPCODE(G_MUL)

// Generic signed division instruction.
HANDLE_TARGET_OPCODE(G_SDIV)

// Generic unsigned division instruction.
HANDLE_TARGET_OPCODE(G_UDIV)

// Generic signed remainder instruction.
HANDLE_TARGET_OPCODE(G_SREM)

// Generic unsigned remainder instruction.
HANDLE_TARGET_OPCODE(G_UREM)

// Generic signed divrem instruction.
HANDLE_TARGET_OPCODE(G_SDIVREM)

// Generic unsigned divrem instruction.
HANDLE_TARGET_OPCODE(G_UDIVREM)

/// Generic bitwise and instruction.
HANDLE_TARGET_OPCODE(G_AND)

/// Generic bitwise or instruction.
HANDLE_TARGET_OPCODE(G_OR)

/// Generic bitwise exclusive-or instruction.
HANDLE_TARGET_OPCODE(G_XOR)


HANDLE_TARGET_OPCODE(G_IMPLICIT_DEF)

/// Generic PHI instruction with types.
HANDLE_TARGET_OPCODE(G_PHI)

/// Generic instruction to materialize the address of an alloca or other
/// stack-based object.
HANDLE_TARGET_OPCODE(G_FRAME_INDEX)

/// Generic reference to global value.
HANDLE_TARGET_OPCODE(G_GLOBAL_VALUE)

/// Generic instruction to materialize the address of an object in the constant
/// pool.
HANDLE_TARGET_OPCODE(G_CONSTANT_POOL)

/// Generic instruction to extract blocks of bits from the register given
/// (typically a sub-register COPY after instruction selection).
HANDLE_TARGET_OPCODE(G_EXTRACT)

HANDLE_TARGET_OPCODE(G_UNMERGE_VALUES)

/// Generic instruction to insert blocks of bits from the registers given into
/// the source.
HANDLE_TARGET_OPCODE(G_INSERT)

/// Generic instruction to paste a variable number of components together into a
/// larger register.
HANDLE_TARGET_OPCODE(G_MERGE_VALUES)

/// Generic instruction to create a vector value from a number of scalar
/// components.
HANDLE_TARGET_OPCODE(G_BUILD_VECTOR)

/// Generic instruction to create a vector value from a number of scalar
/// components, which have types larger than the result vector elt type.
HANDLE_TARGET_OPCODE(G_BUILD_VECTOR_TRUNC)

/// Generic instruction to create a vector by concatenating multiple vectors.
HANDLE_TARGET_OPCODE(G_CONCAT_VECTORS)

/// Generic pointer to int conversion.
HANDLE_TARGET_OPCODE(G_PTRTOINT)

/// Generic int to pointer conversion.
HANDLE_TARGET_OPCODE(G_INTTOPTR)

/// Generic bitcast. The source and destination types must be different, or a
/// COPY is the relevant instruction.
HANDLE_TARGET_OPCODE(G_BITCAST)

/// Generic freeze.
HANDLE_TARGET_OPCODE(G_FREEZE)

/// Constant folding barrier.
HANDLE_TARGET_OPCODE(G_CONSTANT_FOLD_BARRIER)

// INTRINSIC fptrunc_round intrinsic.
HANDLE_TARGET_OPCODE(G_INTRINSIC_FPTRUNC_ROUND)

/// INTRINSIC trunc intrinsic.
HANDLE_TARGET_OPCODE(G_INTRINSIC_TRUNC)

/// INTRINSIC round intrinsic.
HANDLE_TARGET_OPCODE(G_INTRINSIC_ROUND)

/// INTRINSIC round to integer intrinsic.
HANDLE_TARGET_OPCODE(G_INTRINSIC_LRINT)

/// INTRINSIC roundeven intrinsic.
HANDLE_TARGET_OPCODE(G_INTRINSIC_ROUNDEVEN)

/// INTRINSIC readcyclecounter
HANDLE_TARGET_OPCODE(G_READCYCLECOUNTER)

/// Generic load (including anyext load)
HANDLE_TARGET_OPCODE(G_LOAD)

/// Generic signext load
HANDLE_TARGET_OPCODE(G_SEXTLOAD)

/// Generic zeroext load
HANDLE_TARGET_OPCODE(G_ZEXTLOAD)

/// Generic indexed load (including anyext load)
HANDLE_TARGET_OPCODE(G_INDEXED_LOAD)

/// Generic indexed signext load
HANDLE_TARGET_OPCODE(G_INDEXED_SEXTLOAD)

/// Generic indexed zeroext load
HANDLE_TARGET_OPCODE(G_INDEXED_ZEXTLOAD)

/// Generic store.
HANDLE_TARGET_OPCODE(G_STORE)

/// Generic indexed store.
HANDLE_TARGET_OPCODE(G_INDEXED_STORE)

/// Generic atomic cmpxchg with internal success check.
HANDLE_TARGET_OPCODE(G_ATOMIC_CMPXCHG_WITH_SUCCESS)

/// Generic atomic cmpxchg.
HANDLE_TARGET_OPCODE(G_ATOMIC_CMPXCHG)

/// Generic atomicrmw.
HANDLE_TARGET_OPCODE(G_ATOMICRMW_XCHG)
HANDLE_TARGET_OPCODE(G_ATOMICRMW_ADD)
HANDLE_TARGET_OPCODE(G_ATOMICRMW_SUB)
HANDLE_TARGET_OPCODE(G_ATOMICRMW_AND)
HANDLE_TARGET_OPCODE(G_ATOMICRMW_NAND)
HANDLE_TARGET_OPCODE(G_ATOMICRMW_OR)
HANDLE_TARGET_OPCODE(G_ATOMICRMW_XOR)
HANDLE_TARGET_OPCODE(G_ATOMICRMW_MAX)
HANDLE_TARGET_OPCODE(G_ATOMICRMW_MIN)
HANDLE_TARGET_OPCODE(G_ATOMICRMW_UMAX)
HANDLE_TARGET_OPCODE(G_ATOMICRMW_UMIN)
HANDLE_TARGET_OPCODE(G_ATOMICRMW_FADD)
HANDLE_TARGET_OPCODE(G_ATOMICRMW_FSUB)
HANDLE_TARGET_OPCODE(G_ATOMICRMW_FMAX)
HANDLE_TARGET_OPCODE(G_ATOMICRMW_FMIN)
HANDLE_TARGET_OPCODE(G_ATOMICRMW_UINC_WRAP)
HANDLE_TARGET_OPCODE(G_ATOMICRMW_UDEC_WRAP)

// Marker for start of Generic AtomicRMW opcodes
HANDLE_TARGET_OPCODE_MARKER(GENERIC_ATOMICRMW_OP_START, G_ATOMICRMW_XCHG)

// Marker for end of Generic AtomicRMW opcodes
HANDLE_TARGET_OPCODE_MARKER(GENERIC_ATOMICRMW_OP_END, G_ATOMICRMW_UDEC_WRAP)

// Generic atomic fence
HANDLE_TARGET_OPCODE(G_FENCE)

/// Generic conditional branch instruction.
HANDLE_TARGET_OPCODE(G_BRCOND)

/// Generic indirect branch instruction.
HANDLE_TARGET_OPCODE(G_BRINDIRECT)

/// Begin an invoke region marker.
HANDLE_TARGET_OPCODE(G_INVOKE_REGION_START)

/// Generic intrinsic use (without side effects).
HANDLE_TARGET_OPCODE(G_INTRINSIC)

/// Generic intrinsic use (with side effects).
HANDLE_TARGET_OPCODE(G_INTRINSIC_W_SIDE_EFFECTS)

/// Generic extension allowing rubbish in high bits.
HANDLE_TARGET_OPCODE(G_ANYEXT)

/// Generic instruction to discard the high bits of a register. This differs
/// from (G_EXTRACT val, 0) on its action on vectors: G_TRUNC will truncate
/// each element individually, G_EXTRACT will typically discard the high
/// elements of the vector.
HANDLE_TARGET_OPCODE(G_TRUNC)

/// Generic integer constant.
HANDLE_TARGET_OPCODE(G_CONSTANT)

/// Generic floating constant.
HANDLE_TARGET_OPCODE(G_FCONSTANT)

/// Generic va_start instruction. Stores to its one pointer operand.
HANDLE_TARGET_OPCODE(G_VASTART)

/// Generic va_start instruction. Stores to its one pointer operand.
HANDLE_TARGET_OPCODE(G_VAARG)

// Generic sign extend
HANDLE_TARGET_OPCODE(G_SEXT)
HANDLE_TARGET_OPCODE(G_SEXT_INREG)

// Generic zero extend
HANDLE_TARGET_OPCODE(G_ZEXT)

// Generic left-shift
HANDLE_TARGET_OPCODE(G_SHL)

// Generic logical right-shift
HANDLE_TARGET_OPCODE(G_LSHR)

// Generic arithmetic right-shift
HANDLE_TARGET_OPCODE(G_ASHR)

// Generic funnel left shift
HANDLE_TARGET_OPCODE(G_FSHL)

// Generic funnel right shift
HANDLE_TARGET_OPCODE(G_FSHR)

// Generic right rotate
HANDLE_TARGET_OPCODE(G_ROTR)

// Generic left rotate
HANDLE_TARGET_OPCODE(G_ROTL)

/// Generic integer-base comparison, also applicable to vectors of integers.
HANDLE_TARGET_OPCODE(G_ICMP)

/// Generic floating-point comparison, also applicable to vectors.
HANDLE_TARGET_OPCODE(G_FCMP)

/// Generic select.
HANDLE_TARGET_OPCODE(G_SELECT)

/// Generic unsigned add instruction, consuming the normal operands and
/// producing the result and a carry flag.
HANDLE_TARGET_OPCODE(G_UADDO)

/// Generic unsigned add instruction, consuming the normal operands plus a carry
/// flag, and similarly producing the result and a carry flag.
HANDLE_TARGET_OPCODE(G_UADDE)

/// Generic unsigned sub instruction, consuming the normal operands and
/// producing the result and a carry flag.
HANDLE_TARGET_OPCODE(G_USUBO)

/// Generic unsigned subtract instruction, consuming the normal operands plus a
/// carry flag, and similarly producing the result and a carry flag.
HANDLE_TARGET_OPCODE(G_USUBE)

/// Generic signed add instruction, producing the result and a signed overflow
/// flag.
HANDLE_TARGET_OPCODE(G_SADDO)

/// Generic signed add instruction, consuming the normal operands plus a carry
/// flag, and similarly producing the result and a carry flag.
HANDLE_TARGET_OPCODE(G_SADDE)

/// Generic signed subtract instruction, producing the result and a signed
/// overflow flag.
HANDLE_TARGET_OPCODE(G_SSUBO)

/// Generic signed sub instruction, consuming the normal operands plus a carry
/// flag, and similarly producing the result and a carry flag.
HANDLE_TARGET_OPCODE(G_SSUBE)

/// Generic unsigned multiply instruction, producing the result and a signed
/// overflow flag.
HANDLE_TARGET_OPCODE(G_UMULO)

/// Generic signed multiply instruction, producing the result and a signed
/// overflow flag.
HANDLE_TARGET_OPCODE(G_SMULO)

// Multiply two numbers at twice the incoming bit width (unsigned) and return
// the high half of the result.
HANDLE_TARGET_OPCODE(G_UMULH)

// Multiply two numbers at twice the incoming bit width (signed) and return
// the high half of the result.
HANDLE_TARGET_OPCODE(G_SMULH)

/// Generic saturating unsigned addition.
HANDLE_TARGET_OPCODE(G_UADDSAT)

/// Generic saturating signed addition.
HANDLE_TARGET_OPCODE(G_SADDSAT)

/// Generic saturating unsigned subtraction.
HANDLE_TARGET_OPCODE(G_USUBSAT)

/// Generic saturating signed subtraction.
HANDLE_TARGET_OPCODE(G_SSUBSAT)

/// Generic saturating unsigned left shift.
HANDLE_TARGET_OPCODE(G_USHLSAT)

/// Generic saturating signed left shift.
HANDLE_TARGET_OPCODE(G_SSHLSAT)

// Perform signed fixed point multiplication
HANDLE_TARGET_OPCODE(G_SMULFIX)

// Perform unsigned fixed point multiplication
HANDLE_TARGET_OPCODE(G_UMULFIX)

// Perform signed, saturating fixed point multiplication
HANDLE_TARGET_OPCODE(G_SMULFIXSAT)

// Perform unsigned, saturating fixed point multiplication
HANDLE_TARGET_OPCODE(G_UMULFIXSAT)

// Perform signed fixed point division
HANDLE_TARGET_OPCODE(G_SDIVFIX)

// Perform unsigned fixed point division
HANDLE_TARGET_OPCODE(G_UDIVFIX)

// Perform signed, saturating fixed point division
HANDLE_TARGET_OPCODE(G_SDIVFIXSAT)

// Perform unsigned, saturating fixed point division
HANDLE_TARGET_OPCODE(G_UDIVFIXSAT)

/// Generic FP addition.
HANDLE_TARGET_OPCODE(G_FADD)

/// Generic FP subtraction.
HANDLE_TARGET_OPCODE(G_FSUB)

/// Generic FP multiplication.
HANDLE_TARGET_OPCODE(G_FMUL)

/// Generic FMA multiplication. Behaves like llvm fma intrinsic
HANDLE_TARGET_OPCODE(G_FMA)

/// Generic FP multiply and add. Behaves as separate fmul and fadd.
HANDLE_TARGET_OPCODE(G_FMAD)

/// Generic FP division.
HANDLE_TARGET_OPCODE(G_FDIV)

/// Generic FP remainder.
HANDLE_TARGET_OPCODE(G_FREM)

/// Generic FP exponentiation.
HANDLE_TARGET_OPCODE(G_FPOW)

/// Generic FP exponentiation, with an integer exponent.
HANDLE_TARGET_OPCODE(G_FPOWI)

/// Generic base-e exponential of a value.
HANDLE_TARGET_OPCODE(G_FEXP)

/// Generic base-2 exponential of a value.
HANDLE_TARGET_OPCODE(G_FEXP2)

/// Floating point base-e logarithm of a value.
HANDLE_TARGET_OPCODE(G_FLOG)

/// Floating point base-2 logarithm of a value.
HANDLE_TARGET_OPCODE(G_FLOG2)

/// Floating point base-10 logarithm of a value.
HANDLE_TARGET_OPCODE(G_FLOG10)

/// Floating point x * 2^n
HANDLE_TARGET_OPCODE(G_FLDEXP)

/// Floating point extract fraction and exponent.
HANDLE_TARGET_OPCODE(G_FFREXP)

/// Generic FP negation.
HANDLE_TARGET_OPCODE(G_FNEG)

/// Generic FP extension.
HANDLE_TARGET_OPCODE(G_FPEXT)

/// Generic float to signed-int conversion
HANDLE_TARGET_OPCODE(G_FPTRUNC)

/// Generic float to signed-int conversion
HANDLE_TARGET_OPCODE(G_FPTOSI)

/// Generic float to unsigned-int conversion
HANDLE_TARGET_OPCODE(G_FPTOUI)

/// Generic signed-int to float conversion
HANDLE_TARGET_OPCODE(G_SITOFP)

/// Generic unsigned-int to float conversion
HANDLE_TARGET_OPCODE(G_UITOFP)

/// Generic FP absolute value.
HANDLE_TARGET_OPCODE(G_FABS)

/// FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.  NOTE: This does
/// not require that X and Y have the same type, just that they are both
/// floating point. X and the result must have the same type.  FCOPYSIGN(f32,
/// f64) is allowed.
HANDLE_TARGET_OPCODE(G_FCOPYSIGN)

/// Generic test for floating-point class.
HANDLE_TARGET_OPCODE(G_IS_FPCLASS)

/// Generic FP canonicalize value.
HANDLE_TARGET_OPCODE(G_FCANONICALIZE)

/// FP min/max matching libm's fmin/fmax
HANDLE_TARGET_OPCODE(G_FMINNUM)
HANDLE_TARGET_OPCODE(G_FMAXNUM)

/// FP min/max matching IEEE-754 2008's minnum/maxnum semantics.
HANDLE_TARGET_OPCODE(G_FMINNUM_IEEE)
HANDLE_TARGET_OPCODE(G_FMAXNUM_IEEE)

/// FP min/max matching IEEE-754 2018 draft semantics.
HANDLE_TARGET_OPCODE(G_FMINIMUM)
HANDLE_TARGET_OPCODE(G_FMAXIMUM)

/// Generic pointer offset
HANDLE_TARGET_OPCODE(G_PTR_ADD)

/// Clear the specified bits in a pointer.
HANDLE_TARGET_OPCODE(G_PTRMASK)

/// Generic signed integer minimum.
HANDLE_TARGET_OPCODE(G_SMIN)

/// Generic signed integer maximum.
HANDLE_TARGET_OPCODE(G_SMAX)

/// Generic unsigned integer maximum.
HANDLE_TARGET_OPCODE(G_UMIN)

/// Generic unsigned integer maximum.
HANDLE_TARGET_OPCODE(G_UMAX)

/// Generic integer absolute value.
HANDLE_TARGET_OPCODE(G_ABS)

HANDLE_TARGET_OPCODE(G_LROUND)
HANDLE_TARGET_OPCODE(G_LLROUND)

/// Generic BRANCH instruction. This is an unconditional branch.
HANDLE_TARGET_OPCODE(G_BR)

/// Generic branch to jump table entry.
HANDLE_TARGET_OPCODE(G_BRJT)

/// Generic insertelement.
HANDLE_TARGET_OPCODE(G_INSERT_VECTOR_ELT)

/// Generic extractelement.
HANDLE_TARGET_OPCODE(G_EXTRACT_VECTOR_ELT)

/// Generic shufflevector.
HANDLE_TARGET_OPCODE(G_SHUFFLE_VECTOR)

/// Generic count trailing zeroes.
HANDLE_TARGET_OPCODE(G_CTTZ)

/// Same as above, undefined for zero inputs.
HANDLE_TARGET_OPCODE(G_CTTZ_ZERO_UNDEF)

/// Generic count leading zeroes.
HANDLE_TARGET_OPCODE(G_CTLZ)

/// Same as above, undefined for zero inputs.
HANDLE_TARGET_OPCODE(G_CTLZ_ZERO_UNDEF)

/// Generic count bits.
HANDLE_TARGET_OPCODE(G_CTPOP)

/// Generic byte swap.
HANDLE_TARGET_OPCODE(G_BSWAP)

/// Generic bit reverse.
HANDLE_TARGET_OPCODE(G_BITREVERSE)

/// Floating point ceil.
HANDLE_TARGET_OPCODE(G_FCEIL)

/// Floating point cosine.
HANDLE_TARGET_OPCODE(G_FCOS)

/// Floating point sine.
HANDLE_TARGET_OPCODE(G_FSIN)

/// Floating point square root.
HANDLE_TARGET_OPCODE(G_FSQRT)

/// Floating point floor.
HANDLE_TARGET_OPCODE(G_FFLOOR)

/// Floating point round to next integer.
HANDLE_TARGET_OPCODE(G_FRINT)

/// Floating point round to nearest integer.
HANDLE_TARGET_OPCODE(G_FNEARBYINT)

/// Generic AddressSpaceCast.
HANDLE_TARGET_OPCODE(G_ADDRSPACE_CAST)

/// Generic block address
HANDLE_TARGET_OPCODE(G_BLOCK_ADDR)

/// Generic jump table address
HANDLE_TARGET_OPCODE(G_JUMP_TABLE)

/// Generic dynamic stack allocation.
HANDLE_TARGET_OPCODE(G_DYN_STACKALLOC)

/// Strict floating point instructions.
HANDLE_TARGET_OPCODE(G_STRICT_FADD)
HANDLE_TARGET_OPCODE(G_STRICT_FSUB)
HANDLE_TARGET_OPCODE(G_STRICT_FMUL)
HANDLE_TARGET_OPCODE(G_STRICT_FDIV)
HANDLE_TARGET_OPCODE(G_STRICT_FREM)
HANDLE_TARGET_OPCODE(G_STRICT_FMA)
HANDLE_TARGET_OPCODE(G_STRICT_FSQRT)
HANDLE_TARGET_OPCODE(G_STRICT_FLDEXP)

/// read_register intrinsic
HANDLE_TARGET_OPCODE(G_READ_REGISTER)

/// write_register intrinsic
HANDLE_TARGET_OPCODE(G_WRITE_REGISTER)

/// llvm.memcpy intrinsic
HANDLE_TARGET_OPCODE(G_MEMCPY)

/// llvm.memcpy.inline intrinsic
HANDLE_TARGET_OPCODE(G_MEMCPY_INLINE)

/// llvm.memmove intrinsic
HANDLE_TARGET_OPCODE(G_MEMMOVE)

/// llvm.memset intrinsic
HANDLE_TARGET_OPCODE(G_MEMSET)
HANDLE_TARGET_OPCODE(G_BZERO)

/// Vector reductions
HANDLE_TARGET_OPCODE(G_VECREDUCE_SEQ_FADD)
HANDLE_TARGET_OPCODE(G_VECREDUCE_SEQ_FMUL)
HANDLE_TARGET_OPCODE(G_VECREDUCE_FADD)
HANDLE_TARGET_OPCODE(G_VECREDUCE_FMUL)
HANDLE_TARGET_OPCODE(G_VECREDUCE_FMAX)
HANDLE_TARGET_OPCODE(G_VECREDUCE_FMIN)
HANDLE_TARGET_OPCODE(G_VECREDUCE_ADD)
HANDLE_TARGET_OPCODE(G_VECREDUCE_MUL)
HANDLE_TARGET_OPCODE(G_VECREDUCE_AND)
HANDLE_TARGET_OPCODE(G_VECREDUCE_OR)
HANDLE_TARGET_OPCODE(G_VECREDUCE_XOR)
HANDLE_TARGET_OPCODE(G_VECREDUCE_SMAX)
HANDLE_TARGET_OPCODE(G_VECREDUCE_SMIN)
HANDLE_TARGET_OPCODE(G_VECREDUCE_UMAX)
HANDLE_TARGET_OPCODE(G_VECREDUCE_UMIN)

HANDLE_TARGET_OPCODE(G_SBFX)
HANDLE_TARGET_OPCODE(G_UBFX)

/// Marker for the end of the generic opcode.
/// This is used to check if an opcode is in the range of the
/// generic opcodes.
HANDLE_TARGET_OPCODE_MARKER(PRE_ISEL_GENERIC_OPCODE_END, G_UBFX)

/// BUILTIN_OP_END - This must be the last enum value in this list.
/// The target-specific post-isel opcode values start here.
HANDLE_TARGET_OPCODE_MARKER(GENERIC_OP_END, PRE_ISEL_GENERIC_OPCODE_END)
PKhwFZ��wVVVVSupport/ARMWinEH.hnu�[���//===-- llvm/Support/ARMWinEH.h - Windows on ARM EH Constants ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_ARMWINEH_H
#define LLVM_SUPPORT_ARMWINEH_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/Support/Endian.h"

namespace llvm {
namespace ARM {
namespace WinEH {
enum class RuntimeFunctionFlag {
  RFF_Unpacked,       /// unpacked entry
  RFF_Packed,         /// packed entry
  RFF_PackedFragment, /// packed entry representing a fragment
  RFF_Reserved,       /// reserved
};

enum class ReturnType {
  RT_POP,             /// return via pop {pc} (L flag must be set)
  RT_B,               /// 16-bit branch
  RT_BW,              /// 32-bit branch
  RT_NoEpilogue,      /// no epilogue (fragment)
};

/// RuntimeFunction - An entry in the table of procedure data (.pdata)
///
/// This is ARM specific, but the Function Start RVA, Flag and
/// ExceptionInformationRVA fields work identically for ARM64.
///
///  3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0
///  1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
/// +---------------------------------------------------------------+
/// |                     Function Start RVA                        |
/// +-------------------+-+-+-+-----+-+---+---------------------+---+
/// |    Stack Adjust   |C|L|R| Reg |H|Ret|   Function Length   |Flg|
/// +-------------------+-+-+-+-----+-+---+---------------------+---+
///
/// Flag : 2-bit field with the following meanings:
///   - 00 = packed unwind data not used; reamining bits point to .xdata record
///   - 01 = packed unwind data
///   - 10 = packed unwind data, function assumed to have no prologue; useful
///          for function fragments that are discontiguous with the start of the
///          function
///   - 11 = reserved
/// Function Length : 11-bit field providing the length of the entire function
///                   in bytes, divided by 2; if the function is greater than
///                   4KB, a full .xdata record must be used instead
/// Ret : 2-bit field indicating how the function returns
///   - 00 = return via pop {pc} (the L bit must be set)
///   - 01 = return via 16-bit branch
///   - 10 = return via 32-bit branch
///   - 11 = no epilogue; useful for function fragments that may only contain a
///          prologue but the epilogue is elsewhere
/// H : 1-bit flag indicating whether the function "homes" the integer parameter
///     registers (r0-r3), allocating 16-bytes on the stack
/// Reg : 3-bit field indicating the index of the last saved non-volatile
///       register.  If the R bit is set to 0, then only integer registers are
///       saved (r4-rN, where N is 4 + Reg).  If the R bit is set to 1, then
///       only floating-point registers are being saved (d8-dN, where N is
///       8 + Reg).  The special case of the R bit being set to 1 and Reg equal
///       to 7 indicates that no registers are saved.
/// R : 1-bit flag indicating whether the non-volatile registers are integer or
///     floating-point.  0 indicates integer, 1 indicates floating-point.  The
///     special case of the R-flag being set and Reg being set to 7 indicates
///     that no non-volatile registers are saved.
/// L : 1-bit flag indicating whether the function saves/restores the link
///     register (LR)
/// C : 1-bit flag indicating whether the function includes extra instructions
///     to setup a frame chain for fast walking.  If this flag is set, r11 is
///     implicitly added to the list of saved non-volatile integer registers.
/// Stack Adjust : 10-bit field indicating the number of bytes of stack that are
///                allocated for this function.  Only values between 0x000 and
///                0x3f3 can be directly encoded.  If the value is 0x3f4 or
///                greater, then the low 4 bits have special meaning as follows:
///                - Bit 0-1
///                  indicate the number of words' of adjustment (1-4), minus 1
///                - Bit 2
///                  indicates if the prologue combined adjustment into push
///                - Bit 3
///                  indicates if the epilogue combined adjustment into pop
///
/// RESTRICTIONS:
///   - IF C is SET:
///     + L flag must be set since frame chaining requires r11 and lr
///     + r11 must NOT be included in the set of registers described by Reg
///   - IF Ret is 0:
///     + L flag must be set

// NOTE: RuntimeFunction is meant to be a simple class that provides raw access
// to all fields in the structure.  The accessor methods reflect the names of
// the bitfields that they correspond to.  Although some obvious simplifications
// are possible via merging of methods, it would prevent the use of this class
// to fully inspect the contents of the data structure which is particularly
// useful for scenarios such as llvm-readobj to aid in testing.

class RuntimeFunction {
public:
  const support::ulittle32_t BeginAddress;
  const support::ulittle32_t UnwindData;

  RuntimeFunction(const support::ulittle32_t *Data)
    : BeginAddress(Data[0]), UnwindData(Data[1]) {}

  RuntimeFunction(const support::ulittle32_t BeginAddress,
                  const support::ulittle32_t UnwindData)
    : BeginAddress(BeginAddress), UnwindData(UnwindData) {}

  RuntimeFunctionFlag Flag() const {
    return RuntimeFunctionFlag(UnwindData & 0x3);
  }

  uint32_t ExceptionInformationRVA() const {
    assert(Flag() == RuntimeFunctionFlag::RFF_Unpacked &&
           "unpacked form required for this operation");
    return (UnwindData & ~0x3);
  }

  uint32_t PackedUnwindData() const {
    assert((Flag() == RuntimeFunctionFlag::RFF_Packed ||
            Flag() == RuntimeFunctionFlag::RFF_PackedFragment) &&
           "packed form required for this operation");
    return (UnwindData & ~0x3);
  }
  uint32_t FunctionLength() const {
    assert((Flag() == RuntimeFunctionFlag::RFF_Packed ||
            Flag() == RuntimeFunctionFlag::RFF_PackedFragment) &&
           "packed form required for this operation");
    return (((UnwindData & 0x00001ffc) >> 2) << 1);
  }
  ReturnType Ret() const {
    assert((Flag() == RuntimeFunctionFlag::RFF_Packed ||
            Flag() == RuntimeFunctionFlag::RFF_PackedFragment) &&
           "packed form required for this operation");
    assert(((UnwindData & 0x00006000) || L()) && "L must be set to 1");
    return ReturnType((UnwindData & 0x00006000) >> 13);
  }
  bool H() const {
    assert((Flag() == RuntimeFunctionFlag::RFF_Packed ||
            Flag() == RuntimeFunctionFlag::RFF_PackedFragment) &&
           "packed form required for this operation");
    return ((UnwindData & 0x00008000) >> 15);
  }
  uint8_t Reg() const {
    assert((Flag() == RuntimeFunctionFlag::RFF_Packed ||
            Flag() == RuntimeFunctionFlag::RFF_PackedFragment) &&
           "packed form required for this operation");
    return ((UnwindData & 0x00070000) >> 16);
  }
  bool R() const {
    assert((Flag() == RuntimeFunctionFlag::RFF_Packed ||
            Flag() == RuntimeFunctionFlag::RFF_PackedFragment) &&
           "packed form required for this operation");
    return ((UnwindData & 0x00080000) >> 19);
  }
  bool L() const {
    assert((Flag() == RuntimeFunctionFlag::RFF_Packed ||
            Flag() == RuntimeFunctionFlag::RFF_PackedFragment) &&
           "packed form required for this operation");
    return ((UnwindData & 0x00100000) >> 20);
  }
  bool C() const {
    assert((Flag() == RuntimeFunctionFlag::RFF_Packed ||
            Flag() == RuntimeFunctionFlag::RFF_PackedFragment) &&
           "packed form required for this operation");
    assert(((~UnwindData & 0x00200000) || L()) &&
           "L flag must be set, chaining requires r11 and LR");
    assert(((~UnwindData & 0x00200000) || (Reg() < 7) || R()) &&
           "r11 must not be included in Reg; C implies r11");
    return ((UnwindData & 0x00200000) >> 21);
  }
  uint16_t StackAdjust() const {
    assert((Flag() == RuntimeFunctionFlag::RFF_Packed ||
            Flag() == RuntimeFunctionFlag::RFF_PackedFragment) &&
           "packed form required for this operation");
    return ((UnwindData & 0xffc00000) >> 22);
  }
};

/// PrologueFolding - pseudo-flag derived from Stack Adjust indicating that the
/// prologue has stack adjustment combined into the push
inline bool PrologueFolding(const RuntimeFunction &RF) {
  return RF.StackAdjust() >= 0x3f4 && (RF.StackAdjust() & 0x4);
}
/// Epilogue - pseudo-flag derived from Stack Adjust indicating that the
/// epilogue has stack adjustment combined into the pop
inline bool EpilogueFolding(const RuntimeFunction &RF) {
  return RF.StackAdjust() >= 0x3f4 && (RF.StackAdjust() & 0x8);
}
/// StackAdjustment - calculated stack adjustment in words.  The stack
/// adjustment should be determined via this function to account for the special
/// handling the special encoding when the value is >= 0x3f4.
inline uint16_t StackAdjustment(const RuntimeFunction &RF) {
  uint16_t Adjustment = RF.StackAdjust();
  if (Adjustment >= 0x3f4)
    return (Adjustment & 0x3) + 1;
  return Adjustment;
}

/// SavedRegisterMask - Utility function to calculate the set of saved general
/// purpose (r0-r15) and VFP (d0-d31) registers.
std::pair<uint16_t, uint32_t> SavedRegisterMask(const RuntimeFunction &RF,
                                                bool Prologue = true);

/// RuntimeFunctionARM64 - An entry in the table of procedure data (.pdata)
///
///  3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0
///  1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
/// +---------------------------------------------------------------+
/// |                     Function Start RVA                        |
/// +-----------------+---+-+-------+-----+---------------------+---+
/// |    Frame Size   |CR |H| RegI  |RegF |   Function Length   |Flg|
/// +-----------------+---+-+-------+-----+---------------------+---+
///
/// See https://docs.microsoft.com/en-us/cpp/build/arm64-exception-handling
/// for the full reference for this struct.

class RuntimeFunctionARM64 {
public:
  const support::ulittle32_t BeginAddress;
  const support::ulittle32_t UnwindData;

  RuntimeFunctionARM64(const support::ulittle32_t *Data)
      : BeginAddress(Data[0]), UnwindData(Data[1]) {}

  RuntimeFunctionARM64(const support::ulittle32_t BeginAddress,
                       const support::ulittle32_t UnwindData)
      : BeginAddress(BeginAddress), UnwindData(UnwindData) {}

  RuntimeFunctionFlag Flag() const {
    return RuntimeFunctionFlag(UnwindData & 0x3);
  }

  uint32_t ExceptionInformationRVA() const {
    assert(Flag() == RuntimeFunctionFlag::RFF_Unpacked &&
           "unpacked form required for this operation");
    return (UnwindData & ~0x3);
  }

  uint32_t PackedUnwindData() const {
    assert((Flag() == RuntimeFunctionFlag::RFF_Packed ||
            Flag() == RuntimeFunctionFlag::RFF_PackedFragment) &&
           "packed form required for this operation");
    return (UnwindData & ~0x3);
  }
  uint32_t FunctionLength() const {
    assert((Flag() == RuntimeFunctionFlag::RFF_Packed ||
            Flag() == RuntimeFunctionFlag::RFF_PackedFragment) &&
           "packed form required for this operation");
    return (((UnwindData & 0x00001ffc) >> 2) << 2);
  }
  uint8_t RegF() const {
    assert((Flag() == RuntimeFunctionFlag::RFF_Packed ||
            Flag() == RuntimeFunctionFlag::RFF_PackedFragment) &&
           "packed form required for this operation");
    return ((UnwindData & 0x0000e000) >> 13);
  }
  uint8_t RegI() const {
    assert((Flag() == RuntimeFunctionFlag::RFF_Packed ||
            Flag() == RuntimeFunctionFlag::RFF_PackedFragment) &&
           "packed form required for this operation");
    return ((UnwindData & 0x000f0000) >> 16);
  }
  bool H() const {
    assert((Flag() == RuntimeFunctionFlag::RFF_Packed ||
            Flag() == RuntimeFunctionFlag::RFF_PackedFragment) &&
           "packed form required for this operation");
    return ((UnwindData & 0x00100000) >> 20);
  }
  uint8_t CR() const {
    assert((Flag() == RuntimeFunctionFlag::RFF_Packed ||
            Flag() == RuntimeFunctionFlag::RFF_PackedFragment) &&
           "packed form required for this operation");
    return ((UnwindData & 0x600000) >> 21);
  }
  uint16_t FrameSize() const {
    assert((Flag() == RuntimeFunctionFlag::RFF_Packed ||
            Flag() == RuntimeFunctionFlag::RFF_PackedFragment) &&
           "packed form required for this operation");
    return ((UnwindData & 0xff800000) >> 23);
  }
};

/// ExceptionDataRecord - An entry in the table of exception data (.xdata)
///
/// The format on ARM is:
///
///  3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0
///  1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
/// +-------+---------+-+-+-+---+-----------------------------------+
/// | C Wrd | Epi Cnt |F|E|X|Ver|         Function Length           |
/// +-------+--------+'-'-'-'---'---+-------------------------------+
/// |    Reserved    |Ex. Code Words|   (Extended Epilogue Count)   |
/// +-------+--------+--------------+-------------------------------+
///
/// The format on ARM64 is:
///
///  3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0
///  1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
/// +---------+---------+-+-+---+-----------------------------------+
/// |  C Wrd  | Epi Cnt |E|X|Ver|         Function Length           |
/// +---------+------+--'-'-'---'---+-------------------------------+
/// |    Reserved    |Ex. Code Words|   (Extended Epilogue Count)   |
/// +-------+--------+--------------+-------------------------------+
///
/// Function Length : 18-bit field indicating the total length of the function
///                   in bytes divided by 2.  If a function is larger than
///                   512KB, then multiple pdata and xdata records must be used.
/// Vers : 2-bit field describing the version of the remaining structure.  Only
///        version 0 is currently defined (values 1-3 are not permitted).
/// X : 1-bit field indicating the presence of exception data
/// E : 1-bit field indicating that the single epilogue is packed into the
///     header
/// F : 1-bit field indicating that the record describes a function fragment
///     (implies that no prologue is present, and prologue processing should be
///     skipped) (ARM only)
/// Epilogue Count : 5-bit field that differs in meaning based on the E field.
///
///                  If E is set, then this field specifies the index of the
///                  first unwind code describing the (only) epilogue.
///
///                  Otherwise, this field indicates the number of exception
///                  scopes.  If more than 31 scopes exist, then this field and
///                  the Code Words field must both be set to 0 to indicate that
///                  an extension word is required.
/// Code Words : 4-bit (5-bit on ARM64) field that specifies the number of
///              32-bit words needed to contain all the unwind codes.  If more
///              than 15 words (31 words on ARM64) are required, then this field
///              and the Epilogue Count field must both be set to 0 to indicate
///              that an extension word is required.
/// Extended Epilogue Count, Extended Code Words :
///                          Valid only if Epilog Count and Code Words are both
///                          set to 0.  Provides an 8-bit extended code word
///                          count and 16-bits for epilogue count
///
/// The epilogue scope format on ARM is:
///
///  3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0
///  1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
/// +----------------+------+---+---+-------------------------------+
/// |  Ep Start Idx  | Cond |Res|       Epilogue Start Offset       |
/// +----------------+------+---+-----------------------------------+
///
/// The epilogue scope format on ARM64 is:
///
///  3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0
///  1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
/// +-------------------+-------+---+-------------------------------+
/// |  Ep Start Idx     |  Res  |   Epilogue Start Offset           |
/// +-------------------+-------+-----------------------------------+
///
/// If the E bit is unset in the header, the header is followed by a series of
/// epilogue scopes, which are sorted by their offset.
///
/// Epilogue Start Offset: 18-bit field encoding the offset of epilogue relative
///                        to the start of the function in bytes divided by two
/// Res : 2-bit field reserved for future expansion (must be set to 0)
/// Condition : (ARM only) 4-bit field providing the condition under which the
///             epilogue is executed.  Unconditional epilogues should set this
///             field to 0xe. Epilogues must be entirely conditional or
///             unconditional, and in Thumb-2 mode.  The epilogue begins with
///             the first instruction after the IT opcode.
/// Epilogue Start Index : 8-bit field indicating the byte index of the first
///                        unwind code describing the epilogue
///
///  3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0
///  1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
/// +---------------+---------------+---------------+---------------+
/// | Unwind Code 3 | Unwind Code 2 | Unwind Code 1 | Unwind Code 0 |
/// +---------------+---------------+---------------+---------------+
///
/// Following the epilogue scopes, the byte code describing the unwinding
/// follows.  This is padded to align up to word alignment.  Bytes are stored in
/// little endian.
///
///  3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0
///  1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
/// +---------------------------------------------------------------+
/// |           Exception Handler RVA (requires X = 1)              |
/// +---------------------------------------------------------------+
/// |  (possibly followed by data required for exception handler)   |
/// +---------------------------------------------------------------+
///
/// If the X bit is set in the header, the unwind byte code is followed by the
/// exception handler information.  This constants of one Exception Handler RVA
/// which is the address to the exception handler, followed immediately by the
/// variable length data associated with the exception handler.
///

struct EpilogueScope {
  const support::ulittle32_t ES;

  EpilogueScope(const support::ulittle32_t Data) : ES(Data) {}
  // Same for both ARM and AArch64.
  uint32_t EpilogueStartOffset() const {
    return (ES & 0x0003ffff);
  }

  // Different implementations for ARM and AArch64.
  uint8_t ResARM() const {
    return ((ES & 0x000c0000) >> 18);
  }

  uint8_t ResAArch64() const {
    return ((ES & 0x000f0000) >> 18);
  }

  // Condition is only applicable to ARM.
  uint8_t Condition() const {
    return ((ES & 0x00f00000) >> 20);
  }

  // Different implementations for ARM and AArch64.
  uint8_t EpilogueStartIndexARM() const {
    return ((ES & 0xff000000) >> 24);
  }

  uint16_t EpilogueStartIndexAArch64() const {
    return ((ES & 0xffc00000) >> 22);
  }
};

struct ExceptionDataRecord;
inline size_t HeaderWords(const ExceptionDataRecord &XR);

struct ExceptionDataRecord {
  const support::ulittle32_t *Data;
  bool isAArch64;

  ExceptionDataRecord(const support::ulittle32_t *Data, bool isAArch64) :
    Data(Data), isAArch64(isAArch64) {}

  uint32_t FunctionLength() const {
    return (Data[0] & 0x0003ffff);
  }

  uint32_t FunctionLengthInBytesARM() const {
    return FunctionLength() << 1;
  }

  uint32_t FunctionLengthInBytesAArch64() const {
    return FunctionLength() << 2;
  }

  uint8_t Vers() const {
    return (Data[0] & 0x000C0000) >> 18;
  }

  bool X() const {
    return ((Data[0] & 0x00100000) >> 20);
  }

  bool E() const {
    return ((Data[0] & 0x00200000) >> 21);
  }

  bool F() const {
    assert(!isAArch64 && "Fragments are only supported on ARMv7 WinEH");
    return ((Data[0] & 0x00400000) >> 22);
  }

  uint16_t EpilogueCount() const {
    if (HeaderWords(*this) == 1) {
      if (isAArch64)
        return (Data[0] & 0x07C00000) >> 22;
      return (Data[0] & 0x0f800000) >> 23;
    }
    return Data[1] & 0x0000ffff;
  }

  uint8_t CodeWords() const {
    if (HeaderWords(*this) == 1) {
      if (isAArch64)
        return (Data[0] & 0xf8000000) >> 27;
      return (Data[0] & 0xf0000000) >> 28;
    }
    return (Data[1] & 0x00ff0000) >> 16;
  }

  ArrayRef<support::ulittle32_t> EpilogueScopes() const {
    assert(E() == 0 && "epilogue scopes are only present when the E bit is 0");
    size_t Offset = HeaderWords(*this);
    return ArrayRef(&Data[Offset], EpilogueCount());
  }

  ArrayRef<uint8_t> UnwindByteCode() const {
    const size_t Offset = HeaderWords(*this)
                        + (E() ? 0 :  EpilogueCount());
    const uint8_t *ByteCode =
      reinterpret_cast<const uint8_t *>(&Data[Offset]);
    return ArrayRef(ByteCode, CodeWords() * sizeof(uint32_t));
  }

  uint32_t ExceptionHandlerRVA() const {
    assert(X() && "Exception Handler RVA is only valid if the X bit is set");
    return Data[HeaderWords(*this) + (E() ? 0 : EpilogueCount()) + CodeWords()];
  }

  uint32_t ExceptionHandlerParameter() const {
    assert(X() && "Exception Handler RVA is only valid if the X bit is set");
    return Data[HeaderWords(*this) + (E() ? 0 : EpilogueCount()) + CodeWords() +
                1];
  }
};

inline size_t HeaderWords(const ExceptionDataRecord &XR) {
  if (XR.isAArch64)
    return (XR.Data[0] & 0xffc00000) ? 1 : 2;
  return (XR.Data[0] & 0xff800000) ? 1 : 2;
}
}
}
}

#endif
PKhwFZ�y'?G?GSupport/Path.hnu�[���//===- llvm/Support/Path.h - Path Operating System Concept ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the llvm::sys::path namespace. It is designed after
// TR2/boost filesystem (v3), but modified to remove exception handling and the
// path class.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_PATH_H
#define LLVM_SUPPORT_PATH_H

#include "llvm/ADT/Twine.h"
#include "llvm/ADT/iterator.h"
#include "llvm/Support/DataTypes.h"
#include <iterator>

namespace llvm {
namespace sys {
namespace path {

enum class Style {
  native,
  posix,
  windows_slash,
  windows_backslash,
  windows = windows_backslash, // deprecated
};

/// Check if \p S uses POSIX path rules.
constexpr bool is_style_posix(Style S) {
  if (S == Style::posix)
    return true;
  if (S != Style::native)
    return false;
#if defined(_WIN32)
  return false;
#else
  return true;
#endif
}

/// Check if \p S uses Windows path rules.
constexpr bool is_style_windows(Style S) { return !is_style_posix(S); }

/// @name Lexical Component Iterator
/// @{

/// Path iterator.
///
/// This is an input iterator that iterates over the individual components in
/// \a path. The traversal order is as follows:
/// * The root-name element, if present.
/// * The root-directory element, if present.
/// * Each successive filename element, if present.
/// * Dot, if one or more trailing non-root slash characters are present.
/// Traversing backwards is possible with \a reverse_iterator
///
/// Iteration examples. Each component is separated by ',':
/// @code
///   /          => /
///   /foo       => /,foo
///   foo/       => foo,.
///   /foo/bar   => /,foo,bar
///   ../        => ..,.
///   C:\foo\bar => C:,\,foo,bar
/// @endcode
class const_iterator
    : public iterator_facade_base<const_iterator, std::input_iterator_tag,
                                  const StringRef> {
  StringRef Path;          ///< The entire path.
  StringRef Component;     ///< The current component. Not necessarily in Path.
  size_t    Position = 0;  ///< The iterators current position within Path.
  Style S = Style::native; ///< The path style to use.

  // An end iterator has Position = Path.size() + 1.
  friend const_iterator begin(StringRef path, Style style);
  friend const_iterator end(StringRef path);

public:
  reference operator*() const { return Component; }
  const_iterator &operator++();    // preincrement
  bool operator==(const const_iterator &RHS) const;

  /// Difference in bytes between this and RHS.
  ptrdiff_t operator-(const const_iterator &RHS) const;
};

/// Reverse path iterator.
///
/// This is an input iterator that iterates over the individual components in
/// \a path in reverse order. The traversal order is exactly reversed from that
/// of \a const_iterator
class reverse_iterator
    : public iterator_facade_base<reverse_iterator, std::input_iterator_tag,
                                  const StringRef> {
  StringRef Path;          ///< The entire path.
  StringRef Component;     ///< The current component. Not necessarily in Path.
  size_t    Position = 0;  ///< The iterators current position within Path.
  Style S = Style::native; ///< The path style to use.

  friend reverse_iterator rbegin(StringRef path, Style style);
  friend reverse_iterator rend(StringRef path);

public:
  reference operator*() const { return Component; }
  reverse_iterator &operator++();    // preincrement
  bool operator==(const reverse_iterator &RHS) const;

  /// Difference in bytes between this and RHS.
  ptrdiff_t operator-(const reverse_iterator &RHS) const;
};

/// Get begin iterator over \a path.
/// @param path Input path.
/// @returns Iterator initialized with the first component of \a path.
const_iterator begin(StringRef path, Style style = Style::native);

/// Get end iterator over \a path.
/// @param path Input path.
/// @returns Iterator initialized to the end of \a path.
const_iterator end(StringRef path);

/// Get reverse begin iterator over \a path.
/// @param path Input path.
/// @returns Iterator initialized with the first reverse component of \a path.
reverse_iterator rbegin(StringRef path, Style style = Style::native);

/// Get reverse end iterator over \a path.
/// @param path Input path.
/// @returns Iterator initialized to the reverse end of \a path.
reverse_iterator rend(StringRef path);

/// @}
/// @name Lexical Modifiers
/// @{

/// Remove the last component from \a path unless it is the root dir.
///
/// Similar to the POSIX "dirname" utility.
///
/// @code
///   directory/filename.cpp => directory/
///   directory/             => directory
///   filename.cpp           => <empty>
///   /                      => /
/// @endcode
///
/// @param path A path that is modified to not have a file component.
void remove_filename(SmallVectorImpl<char> &path, Style style = Style::native);

/// Replace the file extension of \a path with \a extension.
///
/// @code
///   ./filename.cpp => ./filename.extension
///   ./filename     => ./filename.extension
///   ./             => ./.extension
/// @endcode
///
/// @param path A path that has its extension replaced with \a extension.
/// @param extension The extension to be added. It may be empty. It may also
///                  optionally start with a '.', if it does not, one will be
///                  prepended.
void replace_extension(SmallVectorImpl<char> &path, const Twine &extension,
                       Style style = Style::native);

/// Replace matching path prefix with another path.
///
/// @code
///   /foo, /old, /new => /foo
///   /old, /old, /new => /new
///   /old, /old/, /new => /old
///   /old/foo, /old, /new => /new/foo
///   /old/foo, /old/, /new => /new/foo
///   /old/foo, /old/, /new/ => /new/foo
///   /oldfoo, /old, /new => /oldfoo
///   /foo, <empty>, /new => /new/foo
///   /foo, <empty>, new => new/foo
///   /old/foo, /old, <empty> => /foo
/// @endcode
///
/// @param Path If \a Path starts with \a OldPrefix modify to instead
///        start with \a NewPrefix.
/// @param OldPrefix The path prefix to strip from \a Path.
/// @param NewPrefix The path prefix to replace \a NewPrefix with.
/// @param style The style used to match the prefix. Exact match using
/// Posix style, case/separator insensitive match for Windows style.
/// @result true if \a Path begins with OldPrefix
bool replace_path_prefix(SmallVectorImpl<char> &Path, StringRef OldPrefix,
                         StringRef NewPrefix,
                         Style style = Style::native);

/// Remove redundant leading "./" pieces and consecutive separators.
///
/// @param path Input path.
/// @result The cleaned-up \a path.
StringRef remove_leading_dotslash(StringRef path, Style style = Style::native);

/// In-place remove any './' and optionally '../' components from a path.
///
/// @param path processed path
/// @param remove_dot_dot specify if '../' (except for leading "../") should be
/// removed
/// @result True if path was changed
bool remove_dots(SmallVectorImpl<char> &path, bool remove_dot_dot = false,
                 Style style = Style::native);

/// Append to path.
///
/// @code
///   /foo  + bar/f => /foo/bar/f
///   /foo/ + bar/f => /foo/bar/f
///   foo   + bar/f => foo/bar/f
/// @endcode
///
/// @param path Set to \a path + \a component.
/// @param a The component to be appended to \a path.
void append(SmallVectorImpl<char> &path, const Twine &a,
                                         const Twine &b = "",
                                         const Twine &c = "",
                                         const Twine &d = "");

void append(SmallVectorImpl<char> &path, Style style, const Twine &a,
            const Twine &b = "", const Twine &c = "", const Twine &d = "");

/// Append to path.
///
/// @code
///   /foo  + [bar,f] => /foo/bar/f
///   /foo/ + [bar,f] => /foo/bar/f
///   foo   + [bar,f] => foo/bar/f
/// @endcode
///
/// @param path Set to \a path + [\a begin, \a end).
/// @param begin Start of components to append.
/// @param end One past the end of components to append.
void append(SmallVectorImpl<char> &path, const_iterator begin,
            const_iterator end, Style style = Style::native);

/// @}
/// @name Transforms (or some other better name)
/// @{

/// Convert path to the native form. This is used to give paths to users and
/// operating system calls in the platform's normal way. For example, on Windows
/// all '/' are converted to '\'. On Unix, it converts all '\' to '/'.
///
/// @param path A path that is transformed to native format.
/// @param result Holds the result of the transformation.
void native(const Twine &path, SmallVectorImpl<char> &result,
            Style style = Style::native);

/// Convert path to the native form in place. This is used to give paths to
/// users and operating system calls in the platform's normal way. For example,
/// on Windows all '/' are converted to '\'.
///
/// @param path A path that is transformed to native format.
void native(SmallVectorImpl<char> &path, Style style = Style::native);

/// For Windows path styles, convert path to use the preferred path separators.
/// For other styles, do nothing.
///
/// @param path A path that is transformed to preferred format.
inline void make_preferred(SmallVectorImpl<char> &path,
                           Style style = Style::native) {
  if (!is_style_windows(style))
    return;
  native(path, style);
}

/// Replaces backslashes with slashes if Windows.
///
/// @param path processed path
/// @result The result of replacing backslashes with forward slashes if Windows.
/// On Unix, this function is a no-op because backslashes are valid path
/// chracters.
std::string convert_to_slash(StringRef path, Style style = Style::native);

/// @}
/// @name Lexical Observers
/// @{

/// Get root name.
///
/// @code
///   //net/hello => //net
///   c:/hello    => c: (on Windows, on other platforms nothing)
///   /hello      => <empty>
/// @endcode
///
/// @param path Input path.
/// @result The root name of \a path if it has one, otherwise "".
StringRef root_name(StringRef path, Style style = Style::native);

/// Get root directory.
///
/// @code
///   /goo/hello => /
///   c:/hello   => /
///   d/file.txt => <empty>
/// @endcode
///
/// @param path Input path.
/// @result The root directory of \a path if it has one, otherwise
///               "".
StringRef root_directory(StringRef path, Style style = Style::native);

/// Get root path.
///
/// Equivalent to root_name + root_directory.
///
/// @param path Input path.
/// @result The root path of \a path if it has one, otherwise "".
StringRef root_path(StringRef path, Style style = Style::native);

/// Get relative path.
///
/// @code
///   C:\hello\world => hello\world
///   foo/bar        => foo/bar
///   /foo/bar       => foo/bar
/// @endcode
///
/// @param path Input path.
/// @result The path starting after root_path if one exists, otherwise "".
StringRef relative_path(StringRef path, Style style = Style::native);

/// Get parent path.
///
/// @code
///   /          => <empty>
///   /foo       => /
///   foo/../bar => foo/..
/// @endcode
///
/// @param path Input path.
/// @result The parent path of \a path if one exists, otherwise "".
StringRef parent_path(StringRef path, Style style = Style::native);

/// Get filename.
///
/// @code
///   /foo.txt    => foo.txt
///   .          => .
///   ..         => ..
///   /          => /
/// @endcode
///
/// @param path Input path.
/// @result The filename part of \a path. This is defined as the last component
///         of \a path. Similar to the POSIX "basename" utility.
StringRef filename(StringRef path, Style style = Style::native);

/// Get stem.
///
/// If filename contains a dot but not solely one or two dots, result is the
/// substring of filename ending at (but not including) the last dot. Otherwise
/// it is filename.
///
/// @code
///   /foo/bar.txt => bar
///   /foo/bar     => bar
///   /foo/.txt    => <empty>
///   /foo/.       => .
///   /foo/..      => ..
/// @endcode
///
/// @param path Input path.
/// @result The stem of \a path.
StringRef stem(StringRef path, Style style = Style::native);

/// Get extension.
///
/// If filename contains a dot but not solely one or two dots, result is the
/// substring of filename starting at (and including) the last dot, and ending
/// at the end of \a path. Otherwise "".
///
/// @code
///   /foo/bar.txt => .txt
///   /foo/bar     => <empty>
///   /foo/.txt    => .txt
/// @endcode
///
/// @param path Input path.
/// @result The extension of \a path.
StringRef extension(StringRef path, Style style = Style::native);

/// Check whether the given char is a path separator on the host OS.
///
/// @param value a character
/// @result true if \a value is a path separator character on the host OS
bool is_separator(char value, Style style = Style::native);

/// Return the preferred separator for this platform.
///
/// @result StringRef of the preferred separator, null-terminated.
StringRef get_separator(Style style = Style::native);

/// Get the typical temporary directory for the system, e.g.,
/// "/var/tmp" or "C:/TEMP"
///
/// @param erasedOnReboot Whether to favor a path that is erased on reboot
/// rather than one that potentially persists longer. This parameter will be
/// ignored if the user or system has set the typical environment variable
/// (e.g., TEMP on Windows, TMPDIR on *nix) to specify a temporary directory.
///
/// @param result Holds the resulting path name.
void system_temp_directory(bool erasedOnReboot, SmallVectorImpl<char> &result);

/// Get the user's home directory.
///
/// @param result Holds the resulting path name.
/// @result True if a home directory is set, false otherwise.
bool home_directory(SmallVectorImpl<char> &result);

/// Get the directory where packages should read user-specific configurations.
/// e.g. $XDG_CONFIG_HOME.
///
/// @param result Holds the resulting path name.
/// @result True if the appropriate path was determined, it need not exist.
bool user_config_directory(SmallVectorImpl<char> &result);

/// Get the directory where installed packages should put their
/// machine-local cache, e.g. $XDG_CACHE_HOME.
///
/// @param result Holds the resulting path name.
/// @result True if the appropriate path was determined, it need not exist.
bool cache_directory(SmallVectorImpl<char> &result);

/// Has root name?
///
/// root_name != ""
///
/// @param path Input path.
/// @result True if the path has a root name, false otherwise.
bool has_root_name(const Twine &path, Style style = Style::native);

/// Has root directory?
///
/// root_directory != ""
///
/// @param path Input path.
/// @result True if the path has a root directory, false otherwise.
bool has_root_directory(const Twine &path, Style style = Style::native);

/// Has root path?
///
/// root_path != ""
///
/// @param path Input path.
/// @result True if the path has a root path, false otherwise.
bool has_root_path(const Twine &path, Style style = Style::native);

/// Has relative path?
///
/// relative_path != ""
///
/// @param path Input path.
/// @result True if the path has a relative path, false otherwise.
bool has_relative_path(const Twine &path, Style style = Style::native);

/// Has parent path?
///
/// parent_path != ""
///
/// @param path Input path.
/// @result True if the path has a parent path, false otherwise.
bool has_parent_path(const Twine &path, Style style = Style::native);

/// Has filename?
///
/// filename != ""
///
/// @param path Input path.
/// @result True if the path has a filename, false otherwise.
bool has_filename(const Twine &path, Style style = Style::native);

/// Has stem?
///
/// stem != ""
///
/// @param path Input path.
/// @result True if the path has a stem, false otherwise.
bool has_stem(const Twine &path, Style style = Style::native);

/// Has extension?
///
/// extension != ""
///
/// @param path Input path.
/// @result True if the path has a extension, false otherwise.
bool has_extension(const Twine &path, Style style = Style::native);

/// Is path absolute?
///
/// According to cppreference.com, C++17 states: "An absolute path is a path
/// that unambiguously identifies the location of a file without reference to
/// an additional starting location."
///
/// In other words, the rules are:
/// 1) POSIX style paths with nonempty root directory are absolute.
/// 2) Windows style paths with nonempty root name and root directory are
///    absolute.
/// 3) No other paths are absolute.
///
/// \see has_root_name
/// \see has_root_directory
///
/// @param path Input path.
/// @result True if the path is absolute, false if it is not.
bool is_absolute(const Twine &path, Style style = Style::native);

/// Is path absolute using GNU rules?
///
/// GNU rules are:
/// 1) Paths starting with a path separator are absolute.
/// 2) Windows style paths are also absolute if they start with a character
///    followed by ':'.
/// 3) No other paths are absolute.
///
/// On Windows style the path "C:\Users\Default" has "C:" as root name and "\"
/// as root directory.
///
/// Hence "C:" on Windows is absolute under GNU rules and not absolute under
/// C++17 because it has no root directory. Likewise "/" and "\" on Windows are
/// absolute under GNU and are not absolute under C++17 due to empty root name.
///
/// \see has_root_name
/// \see has_root_directory
///
/// @param path Input path.
/// @param style The style of \p path (e.g. Windows or POSIX). "native" style
/// means to derive the style from the host.
/// @result True if the path is absolute following GNU rules, false if it is
/// not.
bool is_absolute_gnu(const Twine &path, Style style = Style::native);

/// Is path relative?
///
/// @param path Input path.
/// @result True if the path is relative, false if it is not.
bool is_relative(const Twine &path, Style style = Style::native);

} // end namespace path
} // end namespace sys
} // end namespace llvm

#endif
PKhwFZ��ggSupport/SymbolRemappingReader.hnu�[���//===- SymbolRemappingReader.h - Read symbol remapping file -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains definitions needed for reading and applying symbol
// remapping files.
//
// Support is provided only for the Itanium C++ name mangling scheme for now.
//
// NOTE: If you are making changes to this file format, please remember
//       to document them in the Clang documentation at
//       tools/clang/docs/UsersManual.rst.
//
// File format
// -----------
//
// The symbol remappings are written as an ASCII text file. Blank lines and
// lines starting with a # are ignored. All other lines specify a kind of
// mangled name fragment, along with two fragments of that kind that should
// be treated as equivalent, separated by spaces.
//
// See http://itanium-cxx-abi.github.io/cxx-abi/abi.html#mangling for a
// description of the Itanium name mangling scheme.
//
// The accepted fragment kinds are:
//
//  * name  A <name>, such as 6foobar or St3__1
//  * type  A <type>, such as Ss or N4llvm9StringRefE
//  * encoding  An <encoding> (a complete mangling without the leading _Z)
//
// For example:
//
// # Ignore int / long differences to treat symbols from 32-bit and 64-bit
// # builds with differing size_t / ptrdiff_t / intptr_t as equivalent.
// type i l
// type j m
//
// # Ignore differences between libc++ and libstdc++, and between libstdc++'s
// # C++98 and C++11 ABIs.
// name 3std St3__1
// name 3std St7__cxx11
//
// # Remap a function overload to a specialization of a template (including
// # any local symbols declared within it).
// encoding N2NS1fEi N2NS1fIiEEvT_
//
// # Substitutions must be remapped separately from namespace 'std' for now.
// name Sa NSt3__19allocatorE
// name Sb NSt3__112basic_stringE
// type Ss NSt3__112basic_stringIcSt11char_traitsIcESaE
// # ...
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_SYMBOLREMAPPINGREADER_H
#define LLVM_SUPPORT_SYMBOLREMAPPINGREADER_H

#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/ItaniumManglingCanonicalizer.h"

namespace llvm {

class MemoryBuffer;

class SymbolRemappingParseError : public ErrorInfo<SymbolRemappingParseError> {
public:
  SymbolRemappingParseError(StringRef File, int64_t Line, const Twine &Message)
      : File(File), Line(Line), Message(Message.str()) {}

  void log(llvm::raw_ostream &OS) const override {
    OS << File << ':' << Line << ": " << Message;
  }
  std::error_code convertToErrorCode() const override {
    return llvm::inconvertibleErrorCode();
  }

  StringRef getFileName() const { return File; }
  int64_t getLineNum() const { return Line; }
  StringRef getMessage() const { return Message; }

  static char ID;

private:
  std::string File;
  int64_t Line;
  std::string Message;
};

/// Reader for symbol remapping files.
///
/// Remaps the symbol names in profile data to match those in the program
/// according to a set of rules specified in a given file.
class SymbolRemappingReader {
public:
  /// Read remappings from the given buffer, which must live as long as
  /// the remapper.
  Error read(MemoryBuffer &B);

  /// A Key represents an equivalence class of symbol names.
  using Key = uintptr_t;

  /// Construct a key for the given symbol, or return an existing one if an
  /// equivalent name has already been inserted. The symbol name must live
  /// as long as the remapper.
  ///
  /// The result will be Key() if the name cannot be remapped (typically
  /// because it is not a valid mangled name).
  Key insert(StringRef FunctionName) {
    return Canonicalizer.canonicalize(FunctionName);
  }

  /// Map the given symbol name into the key for the corresponding equivalence
  /// class.
  ///
  /// The result will typically be Key() if no equivalent symbol has been
  /// inserted, but this is not guaranteed: a Key different from all keys ever
  /// returned by \c insert may be returned instead.
  Key lookup(StringRef FunctionName) {
    return Canonicalizer.lookup(FunctionName);
  }

private:
  ItaniumManglingCanonicalizer Canonicalizer;
};

} // end namespace llvm

#endif // LLVM_SUPPORT_SYMBOLREMAPPINGREADER_H
PKhwFZ֊Š33Support/Chrono.hnu�[���//===- llvm/Support/Chrono.h - Utilities for Timing Manipulation-*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_CHRONO_H
#define LLVM_SUPPORT_CHRONO_H

#include "llvm/Support/Compiler.h"
#include "llvm/Support/FormatProviders.h"

#include <chrono>
#include <ctime>
#include <ratio>

namespace llvm {

class raw_ostream;

namespace sys {

/// A time point on the system clock. This is provided for two reasons:
/// - to insulate us against subtle differences in behavior to differences in
///   system clock precision (which is implementation-defined and differs
///   between platforms).
/// - to shorten the type name
/// The default precision is nanoseconds. If you need a specific precision
/// specify it explicitly. If unsure, use the default. If you need a time point
/// on a clock other than the system_clock, use std::chrono directly.
template <typename D = std::chrono::nanoseconds>
using TimePoint = std::chrono::time_point<std::chrono::system_clock, D>;

/// Convert a TimePoint to std::time_t
inline std::time_t toTimeT(TimePoint<> TP) {
  using namespace std::chrono;
  return system_clock::to_time_t(
      time_point_cast<system_clock::time_point::duration>(TP));
}

/// Convert a std::time_t to a TimePoint
inline TimePoint<std::chrono::seconds>
toTimePoint(std::time_t T) {
  using namespace std::chrono;
  return time_point_cast<seconds>(system_clock::from_time_t(T));
}

/// Convert a std::time_t + nanoseconds to a TimePoint
inline TimePoint<>
toTimePoint(std::time_t T, uint32_t nsec) {
  using namespace std::chrono;
  return time_point_cast<nanoseconds>(system_clock::from_time_t(T))
    + nanoseconds(nsec);
}

} // namespace sys

raw_ostream &operator<<(raw_ostream &OS, sys::TimePoint<> TP);

/// Format provider for TimePoint<>
///
/// The options string is a strftime format string, with extensions:
///   - %L is millis: 000-999
///   - %f is micros: 000000-999999
///   - %N is nanos: 000000000 - 999999999
///
/// If no options are given, the default format is "%Y-%m-%d %H:%M:%S.%N".
template <>
struct format_provider<sys::TimePoint<>> {
  static void format(const sys::TimePoint<> &TP, llvm::raw_ostream &OS,
                     StringRef Style);
};

namespace detail {
template <typename Period> struct unit { static const char value[]; };
template <typename Period> const char unit<Period>::value[] = "";

template <> struct unit<std::ratio<3600>> { static const char value[]; };
template <> struct unit<std::ratio<60>> { static const char value[]; };
template <> struct unit<std::ratio<1>> { static const char value[]; };
template <> struct unit<std::milli> { static const char value[]; };
template <> struct unit<std::micro> { static const char value[]; };
template <> struct unit<std::nano> { static const char value[]; };
} // namespace detail

/// Implementation of format_provider<T> for duration types.
///
/// The options string of a duration type has the grammar:
///
///   duration_options  ::= [unit][show_unit [number_options]]
///   unit              ::= `h`|`m`|`s`|`ms|`us`|`ns`
///   show_unit         ::= `+` | `-`
///   number_options    ::= options string for a integral or floating point type
///
///   Examples
///   =================================
///   |  options  | Input | Output    |
///   =================================
///   | ""        | 1s    | 1 s       |
///   | "ms"      | 1s    | 1000 ms   |
///   | "ms-"     | 1s    | 1000      |
///   | "ms-n"    | 1s    | 1,000     |
///   | ""        | 1.0s  | 1.00 s    |
///   =================================
///
///  If the unit of the duration type is not one of the units specified above,
///  it is still possible to format it, provided you explicitly request a
///  display unit or you request that the unit is not displayed.

template <typename Rep, typename Period>
struct format_provider<std::chrono::duration<Rep, Period>> {
private:
  typedef std::chrono::duration<Rep, Period> Dur;
  typedef std::conditional_t<std::chrono::treat_as_floating_point<Rep>::value,
                             double, intmax_t>
      InternalRep;

  template <typename AsPeriod> static InternalRep getAs(const Dur &D) {
    using namespace std::chrono;
    return duration_cast<duration<InternalRep, AsPeriod>>(D).count();
  }

  static std::pair<InternalRep, StringRef> consumeUnit(StringRef &Style,
                                                        const Dur &D) {
    using namespace std::chrono;
    if (Style.consume_front("ns"))
      return {getAs<std::nano>(D), "ns"};
    if (Style.consume_front("us"))
      return {getAs<std::micro>(D), "us"};
    if (Style.consume_front("ms"))
      return {getAs<std::milli>(D), "ms"};
    if (Style.consume_front("s"))
      return {getAs<std::ratio<1>>(D), "s"};
    if (Style.consume_front("m"))
      return {getAs<std::ratio<60>>(D), "m"};
    if (Style.consume_front("h"))
      return {getAs<std::ratio<3600>>(D), "h"};
    return {D.count(), detail::unit<Period>::value};
  }

  static bool consumeShowUnit(StringRef &Style) {
    if (Style.empty())
      return true;
    if (Style.consume_front("-"))
      return false;
    if (Style.consume_front("+"))
      return true;
    assert(0 && "Unrecognised duration format");
    return true;
  }

public:
  static void format(const Dur &D, llvm::raw_ostream &Stream, StringRef Style) {
    InternalRep count;
    StringRef unit;
    std::tie(count, unit) = consumeUnit(Style, D);
    bool show_unit = consumeShowUnit(Style);

    format_provider<InternalRep>::format(count, Stream, Style);

    if (show_unit) {
      assert(!unit.empty());
      Stream << " " << unit;
    }
  }
};

} // namespace llvm

#endif // LLVM_SUPPORT_CHRONO_H
PKhwFZ��穾�Support/RWMutex.hnu�[���//===- RWMutex.h - Reader/Writer Mutual Exclusion Lock ----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the llvm::sys::RWMutex class.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_RWMUTEX_H
#define LLVM_SUPPORT_RWMUTEX_H

#include "llvm/Config/llvm-config.h"
#include "llvm/Support/Threading.h"
#include <cassert>
#include <mutex>
#include <shared_mutex>

// std::shared_timed_mutex is only availble on macOS 10.12 and later.
#if defined(__APPLE__) && defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__)
#if __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 101200
#define LLVM_USE_RW_MUTEX_IMPL
#endif
#endif

namespace llvm {
namespace sys {

#if defined(LLVM_USE_RW_MUTEX_IMPL)
/// Platform agnostic RWMutex class.
class RWMutexImpl {
  /// @name Constructors
  /// @{
public:
  /// Initializes the lock but doesn't acquire it.
  /// Default Constructor.
  explicit RWMutexImpl();

  /// @}
  /// @name Do Not Implement
  /// @{
  RWMutexImpl(const RWMutexImpl &original) = delete;
  RWMutexImpl &operator=(const RWMutexImpl &) = delete;
  /// @}

  /// Releases and removes the lock
  /// Destructor
  ~RWMutexImpl();

  /// @}
  /// @name Methods
  /// @{
public:
  /// Attempts to unconditionally acquire the lock in reader mode. If the
  /// lock is held by a writer, this method will wait until it can acquire
  /// the lock.
  /// @returns false if any kind of error occurs, true otherwise.
  /// Unconditionally acquire the lock in reader mode.
  bool lock_shared();

  /// Attempts to release the lock in reader mode.
  /// @returns false if any kind of error occurs, true otherwise.
  /// Unconditionally release the lock in reader mode.
  bool unlock_shared();

  /// Attempts to unconditionally acquire the lock in reader mode. If the
  /// lock is held by any readers, this method will wait until it can
  /// acquire the lock.
  /// @returns false if any kind of error occurs, true otherwise.
  /// Unconditionally acquire the lock in writer mode.
  bool lock();

  /// Attempts to release the lock in writer mode.
  /// @returns false if any kind of error occurs, true otherwise.
  /// Unconditionally release the lock in write mode.
  bool unlock();

  //@}
  /// @name Platform Dependent Data
  /// @{
private:
#if defined(LLVM_ENABLE_THREADS) && LLVM_ENABLE_THREADS != 0
  void *data_ = nullptr; ///< We don't know what the data will be
#endif
};
#endif

/// SmartMutex - An R/W mutex with a compile time constant parameter that
/// indicates whether this mutex should become a no-op when we're not
/// running in multithreaded mode.
template <bool mt_only> class SmartRWMutex {
#if !defined(LLVM_USE_RW_MUTEX_IMPL)
  std::shared_mutex impl;
#else
  RWMutexImpl impl;
#endif
  unsigned readers = 0;
  unsigned writers = 0;

public:
  bool lock_shared() {
    if (!mt_only || llvm_is_multithreaded()) {
      impl.lock_shared();
      return true;
    }

    // Single-threaded debugging code.  This would be racy in multithreaded
    // mode, but provides not basic checks in single threaded mode.
    ++readers;
    return true;
  }

  bool unlock_shared() {
    if (!mt_only || llvm_is_multithreaded()) {
      impl.unlock_shared();
      return true;
    }

    // Single-threaded debugging code.  This would be racy in multithreaded
    // mode, but provides not basic checks in single threaded mode.
    assert(readers > 0 && "Reader lock not acquired before release!");
    --readers;
    return true;
  }

  bool lock() {
    if (!mt_only || llvm_is_multithreaded()) {
      impl.lock();
      return true;
    }

    // Single-threaded debugging code.  This would be racy in multithreaded
    // mode, but provides not basic checks in single threaded mode.
    assert(writers == 0 && "Writer lock already acquired!");
    ++writers;
    return true;
  }

  bool unlock() {
    if (!mt_only || llvm_is_multithreaded()) {
      impl.unlock();
      return true;
    }

    // Single-threaded debugging code.  This would be racy in multithreaded
    // mode, but provides not basic checks in single threaded mode.
    assert(writers == 1 && "Writer lock not acquired before release!");
    --writers;
    return true;
  }
};

typedef SmartRWMutex<false> RWMutex;

/// ScopedReader - RAII acquisition of a reader lock
#if !defined(LLVM_USE_RW_MUTEX_IMPL)
template <bool mt_only>
using SmartScopedReader = const std::shared_lock<SmartRWMutex<mt_only>>;
#else
template <bool mt_only> struct SmartScopedReader {
  SmartRWMutex<mt_only> &mutex;

  explicit SmartScopedReader(SmartRWMutex<mt_only> &m) : mutex(m) {
    mutex.lock_shared();
  }

  ~SmartScopedReader() { mutex.unlock_shared(); }
};
#endif
typedef SmartScopedReader<false> ScopedReader;

/// ScopedWriter - RAII acquisition of a writer lock
#if !defined(LLVM_USE_RW_MUTEX_IMPL)
template <bool mt_only>
using SmartScopedWriter = std::lock_guard<SmartRWMutex<mt_only>>;
#else
template <bool mt_only> struct SmartScopedWriter {
  SmartRWMutex<mt_only> &mutex;

  explicit SmartScopedWriter(SmartRWMutex<mt_only> &m) : mutex(m) {
    mutex.lock();
  }

  ~SmartScopedWriter() { mutex.unlock(); }
};
#endif
typedef SmartScopedWriter<false> ScopedWriter;

} // end namespace sys
} // end namespace llvm

#endif // LLVM_SUPPORT_RWMUTEX_H
PKhwFZ���66Support/Errc.hnu�[���//===- llvm/Support/Errc.h - Defines the llvm::errc enum --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// While std::error_code works OK on all platforms we use, there are some
// some problems with std::errc that can be avoided by using our own
// enumeration:
//
// * std::errc is a namespace in some implementations. That means that ADL
//   doesn't work and it is sometimes necessary to write std::make_error_code
//   or in templates:
//   using std::make_error_code;
//   make_error_code(...);
//
//   with this enum it is safe to always just use make_error_code.
//
// * Some implementations define fewer names than others. This header has
//   the intersection of all the ones we support.
//
// * std::errc is just marked with is_error_condition_enum. This means that
//   common patterns like AnErrorCode == errc::no_such_file_or_directory take
//   4 virtual calls instead of two comparisons.
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_ERRC_H
#define LLVM_SUPPORT_ERRC_H

#include <system_error>

namespace llvm {
enum class errc {
  argument_list_too_long = int(std::errc::argument_list_too_long),
  argument_out_of_domain = int(std::errc::argument_out_of_domain),
  bad_address = int(std::errc::bad_address),
  bad_file_descriptor = int(std::errc::bad_file_descriptor),
  broken_pipe = int(std::errc::broken_pipe),
  device_or_resource_busy = int(std::errc::device_or_resource_busy),
  directory_not_empty = int(std::errc::directory_not_empty),
  executable_format_error = int(std::errc::executable_format_error),
  file_exists = int(std::errc::file_exists),
  file_too_large = int(std::errc::file_too_large),
  filename_too_long = int(std::errc::filename_too_long),
  function_not_supported = int(std::errc::function_not_supported),
  illegal_byte_sequence = int(std::errc::illegal_byte_sequence),
  inappropriate_io_control_operation =
      int(std::errc::inappropriate_io_control_operation),
  interrupted = int(std::errc::interrupted),
  invalid_argument = int(std::errc::invalid_argument),
  invalid_seek = int(std::errc::invalid_seek),
  io_error = int(std::errc::io_error),
  is_a_directory = int(std::errc::is_a_directory),
  no_child_process = int(std::errc::no_child_process),
  no_lock_available = int(std::errc::no_lock_available),
  no_space_on_device = int(std::errc::no_space_on_device),
  no_such_device_or_address = int(std::errc::no_such_device_or_address),
  no_such_device = int(std::errc::no_such_device),
  no_such_file_or_directory = int(std::errc::no_such_file_or_directory),
  no_such_process = int(std::errc::no_such_process),
  not_a_directory = int(std::errc::not_a_directory),
  not_enough_memory = int(std::errc::not_enough_memory),
  not_supported = int(std::errc::not_supported),
  operation_not_permitted = int(std::errc::operation_not_permitted),
  permission_denied = int(std::errc::permission_denied),
  read_only_file_system = int(std::errc::read_only_file_system),
  resource_deadlock_would_occur = int(std::errc::resource_deadlock_would_occur),
  resource_unavailable_try_again =
      int(std::errc::resource_unavailable_try_again),
  result_out_of_range = int(std::errc::result_out_of_range),
  too_many_files_open_in_system = int(std::errc::too_many_files_open_in_system),
  too_many_files_open = int(std::errc::too_many_files_open),
  too_many_links = int(std::errc::too_many_links)
};

inline std::error_code make_error_code(errc E) {
  return std::error_code(static_cast<int>(E), std::generic_category());
}
}

namespace std {
template <> struct is_error_code_enum<llvm::errc> : std::true_type {};
}
#endif
PKhwFZ̰hSupport/WindowsError.hnu�[���//===-- WindowsError.h - Support for mapping windows errors to posix-------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_WINDOWSERROR_H
#define LLVM_SUPPORT_WINDOWSERROR_H

#include <system_error>

namespace llvm {
std::error_code mapWindowsError(unsigned EV);
}

#endif
PKhwFZ�[H
%
%Support/ThreadPool.hnu�[���//===-- llvm/Support/ThreadPool.h - A ThreadPool implementation -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines a crude C++11 based thread pool.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_THREADPOOL_H
#define LLVM_SUPPORT_THREADPOOL_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/Support/RWMutex.h"
#include "llvm/Support/Threading.h"
#include "llvm/Support/thread.h"

#include <future>

#include <condition_variable>
#include <deque>
#include <functional>
#include <memory>
#include <mutex>
#include <utility>

namespace llvm {

class ThreadPoolTaskGroup;

/// A ThreadPool for asynchronous parallel execution on a defined number of
/// threads.
///
/// The pool keeps a vector of threads alive, waiting on a condition variable
/// for some work to become available.
///
/// It is possible to reuse one thread pool for different groups of tasks
/// by grouping tasks using ThreadPoolTaskGroup. All tasks are processed using
/// the same queue, but it is possible to wait only for a specific group of
/// tasks to finish.
///
/// It is also possible for worker threads to submit new tasks and wait for
/// them. Note that this may result in a deadlock in cases such as when a task
/// (directly or indirectly) tries to wait for its own completion, or when all
/// available threads are used up by tasks waiting for a task that has no thread
/// left to run on (this includes waiting on the returned future). It should be
/// generally safe to wait() for a group as long as groups do not form a cycle.
class ThreadPool {
public:
  /// Construct a pool using the hardware strategy \p S for mapping hardware
  /// execution resources (threads, cores, CPUs)
  /// Defaults to using the maximum execution resources in the system, but
  /// accounting for the affinity mask.
  ThreadPool(ThreadPoolStrategy S = hardware_concurrency());

  /// Blocking destructor: the pool will wait for all the threads to complete.
  ~ThreadPool();

  /// Asynchronous submission of a task to the pool. The returned future can be
  /// used to wait for the task to finish and is *non-blocking* on destruction.
  template <typename Function, typename... Args>
  auto async(Function &&F, Args &&...ArgList) {
    auto Task =
        std::bind(std::forward<Function>(F), std::forward<Args>(ArgList)...);
    return async(std::move(Task));
  }

  /// Overload, task will be in the given task group.
  template <typename Function, typename... Args>
  auto async(ThreadPoolTaskGroup &Group, Function &&F, Args &&...ArgList) {
    auto Task =
        std::bind(std::forward<Function>(F), std::forward<Args>(ArgList)...);
    return async(Group, std::move(Task));
  }

  /// Asynchronous submission of a task to the pool. The returned future can be
  /// used to wait for the task to finish and is *non-blocking* on destruction.
  template <typename Func>
  auto async(Func &&F) -> std::shared_future<decltype(F())> {
    return asyncImpl(std::function<decltype(F())()>(std::forward<Func>(F)),
                     nullptr);
  }

  template <typename Func>
  auto async(ThreadPoolTaskGroup &Group, Func &&F)
      -> std::shared_future<decltype(F())> {
    return asyncImpl(std::function<decltype(F())()>(std::forward<Func>(F)),
                     &Group);
  }

  /// Blocking wait for all the threads to complete and the queue to be empty.
  /// It is an error to try to add new tasks while blocking on this call.
  /// Calling wait() from a task would deadlock waiting for itself.
  void wait();

  /// Blocking wait for only all the threads in the given group to complete.
  /// It is possible to wait even inside a task, but waiting (directly or
  /// indirectly) on itself will deadlock. If called from a task running on a
  /// worker thread, the call may process pending tasks while waiting in order
  /// not to waste the thread.
  void wait(ThreadPoolTaskGroup &Group);

  // TODO: misleading legacy name warning!
  // Returns the maximum number of worker threads in the pool, not the current
  // number of threads!
  unsigned getThreadCount() const { return MaxThreadCount; }

  /// Returns true if the current thread is a worker thread of this thread pool.
  bool isWorkerThread() const;

private:
  /// Helpers to create a promise and a callable wrapper of \p Task that sets
  /// the result of the promise. Returns the callable and a future to access the
  /// result.
  template <typename ResTy>
  static std::pair<std::function<void()>, std::future<ResTy>>
  createTaskAndFuture(std::function<ResTy()> Task) {
    std::shared_ptr<std::promise<ResTy>> Promise =
        std::make_shared<std::promise<ResTy>>();
    auto F = Promise->get_future();
    return {
        [Promise = std::move(Promise), Task]() { Promise->set_value(Task()); },
        std::move(F)};
  }
  static std::pair<std::function<void()>, std::future<void>>
  createTaskAndFuture(std::function<void()> Task) {
    std::shared_ptr<std::promise<void>> Promise =
        std::make_shared<std::promise<void>>();
    auto F = Promise->get_future();
    return {[Promise = std::move(Promise), Task]() {
              Task();
              Promise->set_value();
            },
            std::move(F)};
  }

  /// Returns true if all tasks in the given group have finished (nullptr means
  /// all tasks regardless of their group). QueueLock must be locked.
  bool workCompletedUnlocked(ThreadPoolTaskGroup *Group) const;

  /// Asynchronous submission of a task to the pool. The returned future can be
  /// used to wait for the task to finish and is *non-blocking* on destruction.
  template <typename ResTy>
  std::shared_future<ResTy> asyncImpl(std::function<ResTy()> Task,
                                      ThreadPoolTaskGroup *Group) {

#if LLVM_ENABLE_THREADS
    /// Wrap the Task in a std::function<void()> that sets the result of the
    /// corresponding future.
    auto R = createTaskAndFuture(Task);

    int requestedThreads;
    {
      // Lock the queue and push the new task
      std::unique_lock<std::mutex> LockGuard(QueueLock);

      // Don't allow enqueueing after disabling the pool
      assert(EnableFlag && "Queuing a thread during ThreadPool destruction");
      Tasks.emplace_back(std::make_pair(std::move(R.first), Group));
      requestedThreads = ActiveThreads + Tasks.size();
    }
    QueueCondition.notify_one();
    grow(requestedThreads);
    return R.second.share();

#else // LLVM_ENABLE_THREADS Disabled

    // Get a Future with launch::deferred execution using std::async
    auto Future = std::async(std::launch::deferred, std::move(Task)).share();
    // Wrap the future so that both ThreadPool::wait() can operate and the
    // returned future can be sync'ed on.
    Tasks.emplace_back(std::make_pair([Future]() { Future.get(); }, Group));
    return Future;
#endif
  }

#if LLVM_ENABLE_THREADS
  // Grow to ensure that we have at least `requested` Threads, but do not go
  // over MaxThreadCount.
  void grow(int requested);

  void processTasks(ThreadPoolTaskGroup *WaitingForGroup);
#endif

  /// Threads in flight
  std::vector<llvm::thread> Threads;
  /// Lock protecting access to the Threads vector.
  mutable llvm::sys::RWMutex ThreadsLock;

  /// Tasks waiting for execution in the pool.
  std::deque<std::pair<std::function<void()>, ThreadPoolTaskGroup *>> Tasks;

  /// Locking and signaling for accessing the Tasks queue.
  std::mutex QueueLock;
  std::condition_variable QueueCondition;

  /// Signaling for job completion (all tasks or all tasks in a group).
  std::condition_variable CompletionCondition;

  /// Keep track of the number of thread actually busy
  unsigned ActiveThreads = 0;
  /// Number of threads active for tasks in the given group (only non-zero).
  DenseMap<ThreadPoolTaskGroup *, unsigned> ActiveGroups;

#if LLVM_ENABLE_THREADS // avoids warning for unused variable
  /// Signal for the destruction of the pool, asking thread to exit.
  bool EnableFlag = true;
#endif

  const ThreadPoolStrategy Strategy;

  /// Maximum number of threads to potentially grow this pool to.
  const unsigned MaxThreadCount;
};

/// A group of tasks to be run on a thread pool. Thread pool tasks in different
/// groups can run on the same threadpool but can be waited for separately.
/// It is even possible for tasks of one group to submit and wait for tasks
/// of another group, as long as this does not form a loop.
class ThreadPoolTaskGroup {
public:
  /// The ThreadPool argument is the thread pool to forward calls to.
  ThreadPoolTaskGroup(ThreadPool &Pool) : Pool(Pool) {}

  /// Blocking destructor: will wait for all the tasks in the group to complete
  /// by calling ThreadPool::wait().
  ~ThreadPoolTaskGroup() { wait(); }

  /// Calls ThreadPool::async() for this group.
  template <typename Function, typename... Args>
  inline auto async(Function &&F, Args &&...ArgList) {
    return Pool.async(*this, std::forward<Function>(F),
                      std::forward<Args>(ArgList)...);
  }

  /// Calls ThreadPool::wait() for this group.
  void wait() { Pool.wait(*this); }

private:
  ThreadPool &Pool;
};

} // namespace llvm

#endif // LLVM_SUPPORT_THREADPOOL_H
PKhwFZ*��		Support/FileSystem/UniqueID.hnu�[���//===- llvm/Support/FileSystem/UniqueID.h - UniqueID for files --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is cut out of llvm/Support/FileSystem.h to allow UniqueID to be
// reused without bloating the includes.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_FILESYSTEM_UNIQUEID_H
#define LLVM_SUPPORT_FILESYSTEM_UNIQUEID_H

#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/ADT/Hashing.h"
#include <cstdint>
#include <utility>

namespace llvm {
namespace sys {
namespace fs {

class UniqueID {
  uint64_t Device;
  uint64_t File;

public:
  UniqueID() = default;
  UniqueID(uint64_t Device, uint64_t File) : Device(Device), File(File) {}

  bool operator==(const UniqueID &Other) const {
    return Device == Other.Device && File == Other.File;
  }
  bool operator!=(const UniqueID &Other) const { return !(*this == Other); }
  bool operator<(const UniqueID &Other) const {
    /// Don't use std::tie since it bloats the compile time of this header.
    if (Device < Other.Device)
      return true;
    if (Other.Device < Device)
      return false;
    return File < Other.File;
  }

  uint64_t getDevice() const { return Device; }
  uint64_t getFile() const { return File; }
};

} // end namespace fs
} // end namespace sys

// Support UniqueIDs as DenseMap keys.
template <> struct DenseMapInfo<llvm::sys::fs::UniqueID> {
  static inline llvm::sys::fs::UniqueID getEmptyKey() {
    auto EmptyKey = DenseMapInfo<std::pair<uint64_t, uint64_t>>::getEmptyKey();
    return {EmptyKey.first, EmptyKey.second};
  }

  static inline llvm::sys::fs::UniqueID getTombstoneKey() {
    auto TombstoneKey =
        DenseMapInfo<std::pair<uint64_t, uint64_t>>::getTombstoneKey();
    return {TombstoneKey.first, TombstoneKey.second};
  }

  static hash_code getHashValue(const llvm::sys::fs::UniqueID &Tag) {
    return hash_value(std::make_pair(Tag.getDevice(), Tag.getFile()));
  }

  static bool isEqual(const llvm::sys::fs::UniqueID &LHS,
                      const llvm::sys::fs::UniqueID &RHS) {
    return LHS == RHS;
  }
};

} // end namespace llvm

#endif // LLVM_SUPPORT_FILESYSTEM_UNIQUEID_H
PKhwFZ��L�\\Support/CSKYTargetParser.hnu�[���//===-- llvm/Support/CSKYTargetParser.h -------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This header is deprecated in favour of
/// `llvm/TargetParser/CSKYTargetParser.h`.
///
//===----------------------------------------------------------------------===//

#include "llvm/TargetParser/CSKYTargetParser.h"
PKhwFZ�V�'55Support/RISCVISAInfo.hnu�[���//===-- RISCVISAInfo.h - RISC-V ISA Information -----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_RISCVISAINFO_H
#define LLVM_SUPPORT_RISCVISAINFO_H

#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Error.h"

#include <map>
#include <string>
#include <vector>

namespace llvm {
struct RISCVExtensionInfo {
  unsigned MajorVersion;
  unsigned MinorVersion;
};

class RISCVISAInfo {
public:
  RISCVISAInfo(const RISCVISAInfo &) = delete;
  RISCVISAInfo &operator=(const RISCVISAInfo &) = delete;

  static bool compareExtension(const std::string &LHS, const std::string &RHS);

  /// Helper class for OrderedExtensionMap.
  struct ExtensionComparator {
    bool operator()(const std::string &LHS, const std::string &RHS) const {
      return compareExtension(LHS, RHS);
    }
  };

  /// OrderedExtensionMap is std::map, it's specialized to keep entries
  /// in canonical order of extension.
  typedef std::map<std::string, RISCVExtensionInfo, ExtensionComparator>
      OrderedExtensionMap;

  RISCVISAInfo(unsigned XLen, OrderedExtensionMap &Exts)
      : XLen(XLen), FLen(0), MinVLen(0), MaxELen(0), MaxELenFp(0), Exts(Exts) {}

  /// Parse RISC-V ISA info from arch string.
  /// If IgnoreUnknown is set, any unrecognised extension names or
  /// extensions with unrecognised versions will be silently dropped, except
  /// for the special case of the base 'i' and 'e' extensions, where the
  /// default version will be used (as ignoring the base is not possible).
  static llvm::Expected<std::unique_ptr<RISCVISAInfo>>
  parseArchString(StringRef Arch, bool EnableExperimentalExtension,
                  bool ExperimentalExtensionVersionCheck = true,
                  bool IgnoreUnknown = false);

  /// Parse RISC-V ISA info from an arch string that is already in normalized
  /// form (as defined in the psABI). Unlike parseArchString, this function
  /// will not error for unrecognized extension names or extension versions.
  static llvm::Expected<std::unique_ptr<RISCVISAInfo>>
  parseNormalizedArchString(StringRef Arch);

  /// Parse RISC-V ISA info from feature vector.
  static llvm::Expected<std::unique_ptr<RISCVISAInfo>>
  parseFeatures(unsigned XLen, const std::vector<std::string> &Features);

  /// Convert RISC-V ISA info to a feature vector.
  void toFeatures(std::vector<StringRef> &Features,
                  llvm::function_ref<StringRef(const Twine &)> StrAlloc,
                  bool AddAllExtensions) const;

  const OrderedExtensionMap &getExtensions() const { return Exts; };

  unsigned getXLen() const { return XLen; };
  unsigned getFLen() const { return FLen; };
  unsigned getMinVLen() const { return MinVLen; }
  unsigned getMaxVLen() const { return 65536; }
  unsigned getMaxELen() const { return MaxELen; }
  unsigned getMaxELenFp() const { return MaxELenFp; }

  bool hasExtension(StringRef Ext) const;
  std::string toString() const;
  std::vector<std::string> toFeatureVector() const;
  StringRef computeDefaultABI() const;

  static bool isSupportedExtensionFeature(StringRef Ext);
  static bool isSupportedExtension(StringRef Ext);
  static bool isSupportedExtension(StringRef Ext, unsigned MajorVersion,
                                   unsigned MinorVersion);
  static llvm::Expected<std::unique_ptr<RISCVISAInfo>>
  postProcessAndChecking(std::unique_ptr<RISCVISAInfo> &&ISAInfo);

private:
  RISCVISAInfo(unsigned XLen)
      : XLen(XLen), FLen(0), MinVLen(0), MaxELen(0), MaxELenFp(0) {}

  unsigned XLen;
  unsigned FLen;
  unsigned MinVLen;
  unsigned MaxELen, MaxELenFp;

  OrderedExtensionMap Exts;

  void addExtension(StringRef ExtName, unsigned MajorVersion,
                    unsigned MinorVersion);

  Error checkDependency();

  void updateImplication();
  void updateCombination();
  void updateFLen();
  void updateMinVLen();
  void updateMaxELen();
};

} // namespace llvm

#endif
PKhwFZ�|J*�
�
Support/Caching.hnu�[���//===- Caching.h - LLVM Local File Cache ------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the CachedFileStream and the localCache function, which
// simplifies caching files on the local filesystem in a directory whose
// contents are managed by a CachePruningPolicy.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_CACHING_H
#define LLVM_SUPPORT_CACHING_H

#include "llvm/Support/Error.h"

namespace llvm {

class MemoryBuffer;

/// This class wraps an output stream for a file. Most clients should just be
/// able to return an instance of this base class from the stream callback, but
/// if a client needs to perform some action after the stream is written to,
/// that can be done by deriving from this class and overriding the destructor.
class CachedFileStream {
public:
  CachedFileStream(std::unique_ptr<raw_pwrite_stream> OS,
                   std::string OSPath = "")
      : OS(std::move(OS)), ObjectPathName(OSPath) {}
  std::unique_ptr<raw_pwrite_stream> OS;
  std::string ObjectPathName;
  virtual ~CachedFileStream() = default;
};

/// This type defines the callback to add a file that is generated on the fly.
///
/// Stream callbacks must be thread safe.
using AddStreamFn = std::function<Expected<std::unique_ptr<CachedFileStream>>(
    unsigned Task, const Twine &ModuleName)>;

/// This is the type of a file cache. To request an item from the cache, pass a
/// unique string as the Key. For hits, the cached file will be added to the
/// link and this function will return AddStreamFn(). For misses, the cache will
/// return a stream callback which must be called at most once to produce
/// content for the stream. The file stream produced by the stream callback will
/// add the file to the link after the stream is written to. ModuleName is the
/// unique module identifier for the bitcode module the cache is being checked
/// for.
///
/// Clients generally look like this:
///
/// if (AddStreamFn AddStream = Cache(Task, Key, ModuleName))
///   ProduceContent(AddStream);
using FileCache = std::function<Expected<AddStreamFn>(
    unsigned Task, StringRef Key, const Twine &ModuleName)>;

/// This type defines the callback to add a pre-existing file (e.g. in a cache).
///
/// Buffer callbacks must be thread safe.
using AddBufferFn = std::function<void(unsigned Task, const Twine &ModuleName,
                                       std::unique_ptr<MemoryBuffer> MB)>;

/// Create a local file system cache which uses the given cache name, temporary
/// file prefix, cache directory and file callback.  This function does not
/// immediately create the cache directory if it does not yet exist; this is
/// done lazily the first time a file is added.  The cache name appears in error
/// messages for errors during caching. The temporary file prefix is used in the
/// temporary file naming scheme used when writing files atomically.
Expected<FileCache> localCache(
    const Twine &CacheNameRef, const Twine &TempFilePrefixRef,
    const Twine &CacheDirectoryPathRef,
    AddBufferFn AddBuffer = [](size_t Task, const Twine &ModuleName,
                               std::unique_ptr<MemoryBuffer> MB) {});
} // namespace llvm

#endif
PKhwFZ�8�!��Support/TimeProfiler.hnu�[���//===- llvm/Support/TimeProfiler.h - Hierarchical Time Profiler -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This provides lightweight and dependency-free machinery to trace execution
// time around arbitrary code. Two API flavors are available.
//
// The primary API uses a RAII object to trigger tracing:
//
// \code
//   {
//     TimeTraceScope scope("my_event_name");
//     ...my code...
//   }
// \endcode
//
// If the code to be profiled does not have a natural lexical scope then
// it is also possible to start and end events with respect to an implicit
// per-thread stack of profiling entries:
//
// \code
//   timeTraceProfilerBegin("my_event_name");
//   ...my code...
//   timeTraceProfilerEnd();  // must be called on all control flow paths
// \endcode
//
// Time profiling entries can be given an arbitrary name and, optionally,
// an arbitrary 'detail' string. The resulting trace will include 'Total'
// entries summing the time spent for each name. Thus, it's best to choose
// names to be fairly generic, and rely on the detail field to capture
// everything else of interest.
//
// To avoid lifetime issues name and detail strings are copied into the event
// entries at their time of creation. Care should be taken to make string
// construction cheap to prevent 'Heisenperf' effects. In particular, the
// 'detail' argument may be a string-returning closure:
//
// \code
//   int n;
//   {
//     TimeTraceScope scope("my_event_name",
//                          [n]() { return (Twine("x=") + Twine(n)).str(); });
//     ...my code...
//   }
// \endcode
// The closure will not be called if tracing is disabled. Otherwise, the
// resulting string will be directly moved into the entry.
//
// The main process should begin with a timeTraceProfilerInitialize, and
// finish with timeTraceProfileWrite and timeTraceProfilerCleanup calls.
// Each new thread should begin with a timeTraceProfilerInitialize, and
// finish with a timeTraceProfilerFinishThread call.
//
// Timestamps come from std::chrono::stable_clock. Note that threads need
// not see the same time from that clock, and the resolution may not be
// the best available.
//
// Currently, there are a number of compatible viewers:
//  - chrome://tracing is the original chromium trace viewer.
//  - http://ui.perfetto.dev is the replacement for the above, under active
//    development by Google as part of the 'Perfetto' project.
//  - https://www.speedscope.app/ has also been reported as an option.
//
// Future work:
//  - Support akin to LLVM_DEBUG for runtime enable/disable of named tracing
//    families for non-debug builds which wish to support optional tracing.
//  - Evaluate the detail closures at profile write time to avoid
//    stringification costs interfering with tracing.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_TIMEPROFILER_H
#define LLVM_SUPPORT_TIMEPROFILER_H

#include "llvm/ADT/STLFunctionalExtras.h"
#include "llvm/Support/Error.h"

namespace llvm {

class raw_pwrite_stream;

struct TimeTraceProfiler;
TimeTraceProfiler *getTimeTraceProfilerInstance();

/// Initialize the time trace profiler.
/// This sets up the global \p TimeTraceProfilerInstance
/// variable to be the profiler instance.
void timeTraceProfilerInitialize(unsigned TimeTraceGranularity,
                                 StringRef ProcName);

/// Cleanup the time trace profiler, if it was initialized.
void timeTraceProfilerCleanup();

/// Finish a time trace profiler running on a worker thread.
void timeTraceProfilerFinishThread();

/// Is the time trace profiler enabled, i.e. initialized?
inline bool timeTraceProfilerEnabled() {
  return getTimeTraceProfilerInstance() != nullptr;
}

/// Write profiling data to output stream.
/// Data produced is JSON, in Chrome "Trace Event" format, see
/// https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/preview
void timeTraceProfilerWrite(raw_pwrite_stream &OS);

/// Write profiling data to a file.
/// The function will write to \p PreferredFileName if provided, if not
/// then will write to \p FallbackFileName appending .time-trace.
/// Returns a StringError indicating a failure if the function is
/// unable to open the file for writing.
Error timeTraceProfilerWrite(StringRef PreferredFileName,
                             StringRef FallbackFileName);

/// Manually begin a time section, with the given \p Name and \p Detail.
/// Profiler copies the string data, so the pointers can be given into
/// temporaries. Time sections can be hierarchical; every Begin must have a
/// matching End pair but they can nest.
void timeTraceProfilerBegin(StringRef Name, StringRef Detail);
void timeTraceProfilerBegin(StringRef Name,
                            llvm::function_ref<std::string()> Detail);

/// Manually end the last time section.
void timeTraceProfilerEnd();

/// The TimeTraceScope is a helper class to call the begin and end functions
/// of the time trace profiler.  When the object is constructed, it begins
/// the section; and when it is destroyed, it stops it. If the time profiler
/// is not initialized, the overhead is a single branch.
struct TimeTraceScope {

  TimeTraceScope() = delete;
  TimeTraceScope(const TimeTraceScope &) = delete;
  TimeTraceScope &operator=(const TimeTraceScope &) = delete;
  TimeTraceScope(TimeTraceScope &&) = delete;
  TimeTraceScope &operator=(TimeTraceScope &&) = delete;

  TimeTraceScope(StringRef Name) {
    if (getTimeTraceProfilerInstance() != nullptr)
      timeTraceProfilerBegin(Name, StringRef(""));
  }
  TimeTraceScope(StringRef Name, StringRef Detail) {
    if (getTimeTraceProfilerInstance() != nullptr)
      timeTraceProfilerBegin(Name, Detail);
  }
  TimeTraceScope(StringRef Name, llvm::function_ref<std::string()> Detail) {
    if (getTimeTraceProfilerInstance() != nullptr)
      timeTraceProfilerBegin(Name, Detail);
  }
  ~TimeTraceScope() {
    if (getTimeTraceProfilerInstance() != nullptr)
      timeTraceProfilerEnd();
  }
};

} // end namespace llvm

#endif
PKhwFZN�Support/BuryPointer.hnu�[���//===- llvm/Support/BuryPointer.h - Memory Manipulation/Leak ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_BURYPOINTER_H
#define LLVM_SUPPORT_BURYPOINTER_H

#include <memory>

namespace llvm {

// In tools that will exit soon anyway, going through the process of explicitly
// deallocating resources can be unnecessary - better to leak the resources and
// let the OS clean them up when the process ends. Use this function to ensure
// the memory is not misdiagnosed as an unintentional leak by leak detection
// tools (this is achieved by preserving pointers to the object in a globally
// visible array).
void BuryPointer(const void *Ptr);
template <typename T> void BuryPointer(std::unique_ptr<T> Ptr) {
  BuryPointer(Ptr.release());
}

} // namespace llvm

#endif
PKhwFZ@�5���Support/NativeFormatting.hnu�[���//===- NativeFormatting.h - Low level formatting helpers ---------*- C++-*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_NATIVEFORMATTING_H
#define LLVM_SUPPORT_NATIVEFORMATTING_H

#include <cstdint>
#include <optional>

namespace llvm {
class raw_ostream;
enum class FloatStyle { Exponent, ExponentUpper, Fixed, Percent };
enum class IntegerStyle {
  Integer,
  Number,
};
enum class HexPrintStyle { Upper, Lower, PrefixUpper, PrefixLower };

size_t getDefaultPrecision(FloatStyle Style);

bool isPrefixedHexStyle(HexPrintStyle S);

void write_integer(raw_ostream &S, unsigned int N, size_t MinDigits,
                   IntegerStyle Style);
void write_integer(raw_ostream &S, int N, size_t MinDigits, IntegerStyle Style);
void write_integer(raw_ostream &S, unsigned long N, size_t MinDigits,
                   IntegerStyle Style);
void write_integer(raw_ostream &S, long N, size_t MinDigits,
                   IntegerStyle Style);
void write_integer(raw_ostream &S, unsigned long long N, size_t MinDigits,
                   IntegerStyle Style);
void write_integer(raw_ostream &S, long long N, size_t MinDigits,
                   IntegerStyle Style);

void write_hex(raw_ostream &S, uint64_t N, HexPrintStyle Style,
               std::optional<size_t> Width = std::nullopt);
void write_double(raw_ostream &S, double D, FloatStyle Style,
                  std::optional<size_t> Precision = std::nullopt);
}

#endif

PKhwFZi��>{
{
Support/FileOutputBuffer.hnu�[���//=== FileOutputBuffer.h - File Output Buffer -------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Utility for creating a in-memory buffer that will be written to a file.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_FILEOUTPUTBUFFER_H
#define LLVM_SUPPORT_FILEOUTPUTBUFFER_H

#include "llvm/ADT/StringRef.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/Error.h"

namespace llvm {
/// FileOutputBuffer - This interface provides simple way to create an in-memory
/// buffer which will be written to a file. During the lifetime of these
/// objects, the content or existence of the specified file is undefined. That
/// is, creating an OutputBuffer for a file may immediately remove the file.
/// If the FileOutputBuffer is committed, the target file's content will become
/// the buffer content at the time of the commit.  If the FileOutputBuffer is
/// not committed, the file will be deleted in the FileOutputBuffer destructor.
class FileOutputBuffer {
public:
  enum {
    /// Set the 'x' bit on the resulting file.
    F_executable = 1,

    /// Don't use mmap and instead write an in-memory buffer to a file when this
    /// buffer is closed.
    F_no_mmap = 2,
  };

  /// Factory method to create an OutputBuffer object which manages a read/write
  /// buffer of the specified size. When committed, the buffer will be written
  /// to the file at the specified path.
  ///
  /// When F_modify is specified and \p FilePath refers to an existing on-disk
  /// file \p Size may be set to -1, in which case the entire file is used.
  /// Otherwise, the file shrinks or grows as necessary based on the value of
  /// \p Size.  It is an error to specify F_modify and Size=-1 if \p FilePath
  /// does not exist.
  static Expected<std::unique_ptr<FileOutputBuffer>>
  create(StringRef FilePath, size_t Size, unsigned Flags = 0);

  /// Returns a pointer to the start of the buffer.
  virtual uint8_t *getBufferStart() const = 0;

  /// Returns a pointer to the end of the buffer.
  virtual uint8_t *getBufferEnd() const = 0;

  /// Returns size of the buffer.
  virtual size_t getBufferSize() const = 0;

  /// Returns path where file will show up if buffer is committed.
  StringRef getPath() const { return FinalPath; }

  /// Flushes the content of the buffer to its file and deallocates the
  /// buffer.  If commit() is not called before this object's destructor
  /// is called, the file is deleted in the destructor. The optional parameter
  /// is used if it turns out you want the file size to be smaller than
  /// initially requested.
  virtual Error commit() = 0;

  /// If this object was previously committed, the destructor just deletes
  /// this object.  If this object was not committed, the destructor
  /// deallocates the buffer and the target file is never written.
  virtual ~FileOutputBuffer() = default;

  /// This removes the temporary file (unless it already was committed)
  /// but keeps the memory mapping alive.
  virtual void discard() {}

protected:
  FileOutputBuffer(StringRef Path) : FinalPath(Path) {}

  std::string FinalPath;
};
} // end namespace llvm

#endif
PKhwFZ�1��X(X(Support/BinaryStreamRef.hnu�[���//===- BinaryStreamRef.h - A copyable reference to a stream -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_BINARYSTREAMREF_H
#define LLVM_SUPPORT_BINARYSTREAMREF_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/Support/BinaryStream.h"
#include "llvm/Support/BinaryStreamError.h"
#include "llvm/Support/Error.h"
#include <cstdint>
#include <memory>
#include <optional>

namespace llvm {

/// Common stuff for mutable and immutable StreamRefs.
template <class RefType, class StreamType> class BinaryStreamRefBase {
protected:
  BinaryStreamRefBase() = default;
  explicit BinaryStreamRefBase(StreamType &BorrowedImpl)
      : BorrowedImpl(&BorrowedImpl), ViewOffset(0) {
    if (!(BorrowedImpl.getFlags() & BSF_Append))
      Length = BorrowedImpl.getLength();
  }

  BinaryStreamRefBase(std::shared_ptr<StreamType> SharedImpl, uint64_t Offset,
                      std::optional<uint64_t> Length)
      : SharedImpl(SharedImpl), BorrowedImpl(SharedImpl.get()),
        ViewOffset(Offset), Length(Length) {}
  BinaryStreamRefBase(StreamType &BorrowedImpl, uint64_t Offset,
                      std::optional<uint64_t> Length)
      : BorrowedImpl(&BorrowedImpl), ViewOffset(Offset), Length(Length) {}
  BinaryStreamRefBase(const BinaryStreamRefBase &Other) = default;
  BinaryStreamRefBase &operator=(const BinaryStreamRefBase &Other) = default;

  BinaryStreamRefBase &operator=(BinaryStreamRefBase &&Other) = default;
  BinaryStreamRefBase(BinaryStreamRefBase &&Other) = default;

public:
  llvm::support::endianness getEndian() const {
    return BorrowedImpl->getEndian();
  }

  uint64_t getLength() const {
    if (Length)
      return *Length;

    return BorrowedImpl ? (BorrowedImpl->getLength() - ViewOffset) : 0;
  }

  /// Return a new BinaryStreamRef with the first \p N elements removed.  If
  /// this BinaryStreamRef is length-tracking, then the resulting one will be
  /// too.
  RefType drop_front(uint64_t N) const {
    if (!BorrowedImpl)
      return RefType();

    N = std::min(N, getLength());
    RefType Result(static_cast<const RefType &>(*this));
    if (N == 0)
      return Result;

    Result.ViewOffset += N;
    if (Result.Length)
      *Result.Length -= N;
    return Result;
  }

  /// Return a new BinaryStreamRef with the last \p N elements removed.  If
  /// this BinaryStreamRef is length-tracking and \p N is greater than 0, then
  /// this BinaryStreamRef will no longer length-track.
  RefType drop_back(uint64_t N) const {
    if (!BorrowedImpl)
      return RefType();

    RefType Result(static_cast<const RefType &>(*this));
    N = std::min(N, getLength());

    if (N == 0)
      return Result;

    // Since we're dropping non-zero bytes from the end, stop length-tracking
    // by setting the length of the resulting StreamRef to an explicit value.
    if (!Result.Length)
      Result.Length = getLength();

    *Result.Length -= N;
    return Result;
  }

  /// Return a new BinaryStreamRef with only the first \p N elements remaining.
  RefType keep_front(uint64_t N) const {
    assert(N <= getLength());
    return drop_back(getLength() - N);
  }

  /// Return a new BinaryStreamRef with only the last \p N elements remaining.
  RefType keep_back(uint64_t N) const {
    assert(N <= getLength());
    return drop_front(getLength() - N);
  }

  /// Return a new BinaryStreamRef with the first and last \p N elements
  /// removed.
  RefType drop_symmetric(uint64_t N) const {
    return drop_front(N).drop_back(N);
  }

  /// Return a new BinaryStreamRef with the first \p Offset elements removed,
  /// and retaining exactly \p Len elements.
  RefType slice(uint64_t Offset, uint64_t Len) const {
    return drop_front(Offset).keep_front(Len);
  }

  bool valid() const { return BorrowedImpl != nullptr; }

  friend bool operator==(const RefType &LHS, const RefType &RHS) {
    if (LHS.BorrowedImpl != RHS.BorrowedImpl)
      return false;
    if (LHS.ViewOffset != RHS.ViewOffset)
      return false;
    if (LHS.Length != RHS.Length)
      return false;
    return true;
  }

protected:
  Error checkOffsetForRead(uint64_t Offset, uint64_t DataSize) const {
    if (Offset > getLength())
      return make_error<BinaryStreamError>(stream_error_code::invalid_offset);
    if (getLength() < DataSize + Offset)
      return make_error<BinaryStreamError>(stream_error_code::stream_too_short);
    return Error::success();
  }

  std::shared_ptr<StreamType> SharedImpl;
  StreamType *BorrowedImpl = nullptr;
  uint64_t ViewOffset = 0;
  std::optional<uint64_t> Length;
};

/// BinaryStreamRef is to BinaryStream what ArrayRef is to an Array.  It
/// provides copy-semantics and read only access to a "window" of the underlying
/// BinaryStream. Note that BinaryStreamRef is *not* a BinaryStream.  That is to
/// say, it does not inherit and override the methods of BinaryStream.  In
/// general, you should not pass around pointers or references to BinaryStreams
/// and use inheritance to achieve polymorphism.  Instead, you should pass
/// around BinaryStreamRefs by value and achieve polymorphism that way.
class BinaryStreamRef
    : public BinaryStreamRefBase<BinaryStreamRef, BinaryStream> {
  friend BinaryStreamRefBase<BinaryStreamRef, BinaryStream>;
  friend class WritableBinaryStreamRef;
  BinaryStreamRef(std::shared_ptr<BinaryStream> Impl, uint64_t ViewOffset,
                  std::optional<uint64_t> Length)
      : BinaryStreamRefBase(Impl, ViewOffset, Length) {}

public:
  BinaryStreamRef() = default;
  BinaryStreamRef(BinaryStream &Stream);
  BinaryStreamRef(BinaryStream &Stream, uint64_t Offset,
                  std::optional<uint64_t> Length);
  explicit BinaryStreamRef(ArrayRef<uint8_t> Data,
                           llvm::support::endianness Endian);
  explicit BinaryStreamRef(StringRef Data, llvm::support::endianness Endian);

  BinaryStreamRef(const BinaryStreamRef &Other) = default;
  BinaryStreamRef &operator=(const BinaryStreamRef &Other) = default;
  BinaryStreamRef(BinaryStreamRef &&Other) = default;
  BinaryStreamRef &operator=(BinaryStreamRef &&Other) = default;

  // Use BinaryStreamRef.slice() instead.
  BinaryStreamRef(BinaryStreamRef &S, uint64_t Offset,
                  uint64_t Length) = delete;

  /// Given an Offset into this StreamRef and a Size, return a reference to a
  /// buffer owned by the stream.
  ///
  /// \returns a success error code if the entire range of data is within the
  /// bounds of this BinaryStreamRef's view and the implementation could read
  /// the data, and an appropriate error code otherwise.
  Error readBytes(uint64_t Offset, uint64_t Size,
                  ArrayRef<uint8_t> &Buffer) const;

  /// Given an Offset into this BinaryStreamRef, return a reference to the
  /// largest buffer the stream could support without necessitating a copy.
  ///
  /// \returns a success error code if implementation could read the data,
  /// and an appropriate error code otherwise.
  Error readLongestContiguousChunk(uint64_t Offset,
                                   ArrayRef<uint8_t> &Buffer) const;
};

struct BinarySubstreamRef {
  uint64_t Offset = 0;        // Offset in the parent stream
  BinaryStreamRef StreamData; // Stream Data

  BinarySubstreamRef slice(uint64_t Off, uint64_t Size) const {
    BinaryStreamRef SubSub = StreamData.slice(Off, Size);
    return {Off + Offset, SubSub};
  }
  BinarySubstreamRef drop_front(uint64_t N) const {
    return slice(N, size() - N);
  }
  BinarySubstreamRef keep_front(uint64_t N) const { return slice(0, N); }

  std::pair<BinarySubstreamRef, BinarySubstreamRef> split(uint64_t Off) const {
    return std::make_pair(keep_front(Off), drop_front(Off));
  }

  uint64_t size() const { return StreamData.getLength(); }
  bool empty() const { return size() == 0; }
};

class WritableBinaryStreamRef
    : public BinaryStreamRefBase<WritableBinaryStreamRef,
                                 WritableBinaryStream> {
  friend BinaryStreamRefBase<WritableBinaryStreamRef, WritableBinaryStream>;
  WritableBinaryStreamRef(std::shared_ptr<WritableBinaryStream> Impl,
                          uint64_t ViewOffset, std::optional<uint64_t> Length)
      : BinaryStreamRefBase(Impl, ViewOffset, Length) {}

  Error checkOffsetForWrite(uint64_t Offset, uint64_t DataSize) const {
    if (!(BorrowedImpl->getFlags() & BSF_Append))
      return checkOffsetForRead(Offset, DataSize);

    if (Offset > getLength())
      return make_error<BinaryStreamError>(stream_error_code::invalid_offset);
    return Error::success();
  }

public:
  WritableBinaryStreamRef() = default;
  WritableBinaryStreamRef(WritableBinaryStream &Stream);
  WritableBinaryStreamRef(WritableBinaryStream &Stream, uint64_t Offset,
                          std::optional<uint64_t> Length);
  explicit WritableBinaryStreamRef(MutableArrayRef<uint8_t> Data,
                                   llvm::support::endianness Endian);
  WritableBinaryStreamRef(const WritableBinaryStreamRef &Other) = default;
  WritableBinaryStreamRef &
  operator=(const WritableBinaryStreamRef &Other) = default;

  WritableBinaryStreamRef(WritableBinaryStreamRef &&Other) = default;
  WritableBinaryStreamRef &operator=(WritableBinaryStreamRef &&Other) = default;

  // Use WritableBinaryStreamRef.slice() instead.
  WritableBinaryStreamRef(WritableBinaryStreamRef &S, uint64_t Offset,
                          uint64_t Length) = delete;

  /// Given an Offset into this WritableBinaryStreamRef and some input data,
  /// writes the data to the underlying stream.
  ///
  /// \returns a success error code if the data could fit within the underlying
  /// stream at the specified location and the implementation could write the
  /// data, and an appropriate error code otherwise.
  Error writeBytes(uint64_t Offset, ArrayRef<uint8_t> Data) const;

  /// Conver this WritableBinaryStreamRef to a read-only BinaryStreamRef.
  operator BinaryStreamRef() const;

  /// For buffered streams, commits changes to the backing store.
  Error commit();
};

} // end namespace llvm

#endif // LLVM_SUPPORT_BINARYSTREAMREF_H
PKhwFZ�|�{�<�<Support/FormatProviders.hnu�[���//===- FormatProviders.h - Formatters for common LLVM types -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements format providers for many common LLVM types, for example
// allowing precision and width specifiers for scalar and string types.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_FORMATPROVIDERS_H
#define LLVM_SUPPORT_FORMATPROVIDERS_H

#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Support/FormatVariadicDetails.h"
#include "llvm/Support/NativeFormatting.h"

#include <array>
#include <optional>
#include <type_traits>

namespace llvm {
namespace detail {
template <typename T>
struct use_integral_formatter
    : public std::integral_constant<
          bool, is_one_of<T, uint8_t, int16_t, uint16_t, int32_t, uint32_t,
                          int64_t, uint64_t, int, unsigned, long, unsigned long,
                          long long, unsigned long long>::value> {};

template <typename T>
struct use_char_formatter
    : public std::integral_constant<bool, std::is_same_v<T, char>> {};

template <typename T>
struct is_cstring
    : public std::integral_constant<bool,
                                    is_one_of<T, char *, const char *>::value> {
};

template <typename T>
struct use_string_formatter
    : public std::integral_constant<bool,
                                    std::is_convertible_v<T, llvm::StringRef>> {
};

template <typename T>
struct use_pointer_formatter
    : public std::integral_constant<bool, std::is_pointer_v<T> &&
                                              !is_cstring<T>::value> {};

template <typename T>
struct use_double_formatter
    : public std::integral_constant<bool, std::is_floating_point_v<T>> {};

class HelperFunctions {
protected:
  static std::optional<size_t> parseNumericPrecision(StringRef Str) {
    size_t Prec;
    std::optional<size_t> Result;
    if (Str.empty())
      Result = std::nullopt;
    else if (Str.getAsInteger(10, Prec)) {
      assert(false && "Invalid precision specifier");
      Result = std::nullopt;
    } else {
      assert(Prec < 100 && "Precision out of range");
      Result = std::min<size_t>(99u, Prec);
    }
    return Result;
  }

  static bool consumeHexStyle(StringRef &Str, HexPrintStyle &Style) {
    if (!Str.starts_with_insensitive("x"))
      return false;

    if (Str.consume_front("x-"))
      Style = HexPrintStyle::Lower;
    else if (Str.consume_front("X-"))
      Style = HexPrintStyle::Upper;
    else if (Str.consume_front("x+") || Str.consume_front("x"))
      Style = HexPrintStyle::PrefixLower;
    else if (Str.consume_front("X+") || Str.consume_front("X"))
      Style = HexPrintStyle::PrefixUpper;
    return true;
  }

  static size_t consumeNumHexDigits(StringRef &Str, HexPrintStyle Style,
                                    size_t Default) {
    Str.consumeInteger(10, Default);
    if (isPrefixedHexStyle(Style))
      Default += 2;
    return Default;
  }
};
}

/// Implementation of format_provider<T> for integral arithmetic types.
///
/// The options string of an integral type has the grammar:
///
///   integer_options   :: [style][digits]
///   style             :: <see table below>
///   digits            :: <non-negative integer> 0-99
///
///   ==========================================================================
///   |  style  |     Meaning          |      Example     | Digits Meaning     |
///   --------------------------------------------------------------------------
///   |         |                      |  Input |  Output |                    |
///   ==========================================================================
///   |   x-    | Hex no prefix, lower |   42   |    2a   | Minimum # digits   |
///   |   X-    | Hex no prefix, upper |   42   |    2A   | Minimum # digits   |
///   | x+ / x  | Hex + prefix, lower  |   42   |   0x2a  | Minimum # digits   |
///   | X+ / X  | Hex + prefix, upper  |   42   |   0x2A  | Minimum # digits   |
///   | N / n   | Digit grouped number | 123456 | 123,456 | Ignored            |
///   | D / d   | Integer              | 100000 | 100000  | Ignored            |
///   | (empty) | Same as D / d        |        |         |                    |
///   ==========================================================================
///

template <typename T>
struct format_provider<
    T, std::enable_if_t<detail::use_integral_formatter<T>::value>>
    : public detail::HelperFunctions {
private:
public:
  static void format(const T &V, llvm::raw_ostream &Stream, StringRef Style) {
    HexPrintStyle HS;
    size_t Digits = 0;
    if (consumeHexStyle(Style, HS)) {
      Digits = consumeNumHexDigits(Style, HS, 0);
      write_hex(Stream, V, HS, Digits);
      return;
    }

    IntegerStyle IS = IntegerStyle::Integer;
    if (Style.consume_front("N") || Style.consume_front("n"))
      IS = IntegerStyle::Number;
    else if (Style.consume_front("D") || Style.consume_front("d"))
      IS = IntegerStyle::Integer;

    Style.consumeInteger(10, Digits);
    assert(Style.empty() && "Invalid integral format style!");
    write_integer(Stream, V, Digits, IS);
  }
};

/// Implementation of format_provider<T> for integral pointer types.
///
/// The options string of a pointer type has the grammar:
///
///   pointer_options   :: [style][precision]
///   style             :: <see table below>
///   digits            :: <non-negative integer> 0-sizeof(void*)
///
///   ==========================================================================
///   |   S     |     Meaning          |                Example                |
///   --------------------------------------------------------------------------
///   |         |                      |       Input       |      Output       |
///   ==========================================================================
///   |   x-    | Hex no prefix, lower |    0xDEADBEEF     |     deadbeef      |
///   |   X-    | Hex no prefix, upper |    0xDEADBEEF     |     DEADBEEF      |
///   | x+ / x  | Hex + prefix, lower  |    0xDEADBEEF     |    0xdeadbeef     |
///   | X+ / X  | Hex + prefix, upper  |    0xDEADBEEF     |    0xDEADBEEF     |
///   | (empty) | Same as X+ / X       |                   |                   |
///   ==========================================================================
///
/// The default precision is the number of nibbles in a machine word, and in all
/// cases indicates the minimum number of nibbles to print.
template <typename T>
struct format_provider<
    T, std::enable_if_t<detail::use_pointer_formatter<T>::value>>
    : public detail::HelperFunctions {
private:
public:
  static void format(const T &V, llvm::raw_ostream &Stream, StringRef Style) {
    HexPrintStyle HS = HexPrintStyle::PrefixUpper;
    consumeHexStyle(Style, HS);
    size_t Digits = consumeNumHexDigits(Style, HS, sizeof(void *) * 2);
    write_hex(Stream, reinterpret_cast<std::uintptr_t>(V), HS, Digits);
  }
};

/// Implementation of format_provider<T> for c-style strings and string
/// objects such as std::string and llvm::StringRef.
///
/// The options string of a string type has the grammar:
///
///   string_options :: [length]
///
/// where `length` is an optional integer specifying the maximum number of
/// characters in the string to print.  If `length` is omitted, the string is
/// printed up to the null terminator.

template <typename T>
struct format_provider<
    T, std::enable_if_t<detail::use_string_formatter<T>::value>> {
  static void format(const T &V, llvm::raw_ostream &Stream, StringRef Style) {
    size_t N = StringRef::npos;
    if (!Style.empty() && Style.getAsInteger(10, N)) {
      assert(false && "Style is not a valid integer");
    }
    llvm::StringRef S = V;
    Stream << S.substr(0, N);
  }
};

/// Implementation of format_provider<T> for llvm::Twine.
///
/// This follows the same rules as the string formatter.

template <> struct format_provider<Twine> {
  static void format(const Twine &V, llvm::raw_ostream &Stream,
                     StringRef Style) {
    format_provider<std::string>::format(V.str(), Stream, Style);
  }
};

/// Implementation of format_provider<T> for characters.
///
/// The options string of a character type has the grammar:
///
///   char_options :: (empty) | [integer_options]
///
/// If `char_options` is empty, the character is displayed as an ASCII
/// character.  Otherwise, it is treated as an integer options string.
///
template <typename T>
struct format_provider<T,
                       std::enable_if_t<detail::use_char_formatter<T>::value>> {
  static void format(const char &V, llvm::raw_ostream &Stream,
                     StringRef Style) {
    if (Style.empty())
      Stream << V;
    else {
      int X = static_cast<int>(V);
      format_provider<int>::format(X, Stream, Style);
    }
  }
};

/// Implementation of format_provider<T> for type `bool`
///
/// The options string of a boolean type has the grammar:
///
///   bool_options :: "" | "Y" | "y" | "D" | "d" | "T" | "t"
///
///   ==================================
///   |    C    |     Meaning          |
///   ==================================
///   |    Y    |       YES / NO       |
///   |    y    |       yes / no       |
///   |  D / d  |    Integer 0 or 1    |
///   |    T    |     TRUE / FALSE     |
///   |    t    |     true / false     |
///   | (empty) |   Equivalent to 't'  |
///   ==================================
template <> struct format_provider<bool> {
  static void format(const bool &B, llvm::raw_ostream &Stream,
                     StringRef Style) {
    Stream << StringSwitch<const char *>(Style)
                  .Case("Y", B ? "YES" : "NO")
                  .Case("y", B ? "yes" : "no")
                  .CaseLower("D", B ? "1" : "0")
                  .Case("T", B ? "TRUE" : "FALSE")
                  .Cases("t", "", B ? "true" : "false")
                  .Default(B ? "1" : "0");
  }
};

/// Implementation of format_provider<T> for floating point types.
///
/// The options string of a floating point type has the format:
///
///   float_options   :: [style][precision]
///   style           :: <see table below>
///   precision       :: <non-negative integer> 0-99
///
///   =====================================================
///   |  style  |     Meaning          |      Example     |
///   -----------------------------------------------------
///   |         |                      |  Input |  Output |
///   =====================================================
///   | P / p   | Percentage           |  0.05  |  5.00%  |
///   | F / f   | Fixed point          |   1.0  |  1.00   |
///   |   E     | Exponential with E   | 100000 | 1.0E+05 |
///   |   e     | Exponential with e   | 100000 | 1.0e+05 |
///   | (empty) | Same as F / f        |        |         |
///   =====================================================
///
/// The default precision is 6 for exponential (E / e) and 2 for everything
/// else.

template <typename T>
struct format_provider<T,
                       std::enable_if_t<detail::use_double_formatter<T>::value>>
    : public detail::HelperFunctions {
  static void format(const T &V, llvm::raw_ostream &Stream, StringRef Style) {
    FloatStyle S;
    if (Style.consume_front("P") || Style.consume_front("p"))
      S = FloatStyle::Percent;
    else if (Style.consume_front("F") || Style.consume_front("f"))
      S = FloatStyle::Fixed;
    else if (Style.consume_front("E"))
      S = FloatStyle::ExponentUpper;
    else if (Style.consume_front("e"))
      S = FloatStyle::Exponent;
    else
      S = FloatStyle::Fixed;

    std::optional<size_t> Precision = parseNumericPrecision(Style);
    if (!Precision)
      Precision = getDefaultPrecision(S);

    write_double(Stream, static_cast<double>(V), S, Precision);
  }
};

namespace detail {
template <typename IterT>
using IterValue = typename std::iterator_traits<IterT>::value_type;

template <typename IterT>
struct range_item_has_provider
    : public std::integral_constant<
          bool, !uses_missing_provider<IterValue<IterT>>::value> {};
}

/// Implementation of format_provider<T> for ranges.
///
/// This will print an arbitrary range as a delimited sequence of items.
///
/// The options string of a range type has the grammar:
///
///   range_style       ::= [separator] [element_style]
///   separator         ::= "$" delimeted_expr
///   element_style     ::= "@" delimeted_expr
///   delimeted_expr    ::= "[" expr "]" | "(" expr ")" | "<" expr ">"
///   expr              ::= <any string not containing delimeter>
///
/// where the separator expression is the string to insert between consecutive
/// items in the range and the argument expression is the Style specification to
/// be used when formatting the underlying type.  The default separator if
/// unspecified is ' ' (space).  The syntax of the argument expression follows
/// whatever grammar is dictated by the format provider or format adapter used
/// to format the value type.
///
/// Note that attempting to format an `iterator_range<T>` where no format
/// provider can be found for T will result in a compile error.
///

template <typename IterT> class format_provider<llvm::iterator_range<IterT>> {
  using value = typename std::iterator_traits<IterT>::value_type;

  static StringRef consumeOneOption(StringRef &Style, char Indicator,
                                    StringRef Default) {
    if (Style.empty())
      return Default;
    if (Style.front() != Indicator)
      return Default;
    Style = Style.drop_front();
    if (Style.empty()) {
      assert(false && "Invalid range style");
      return Default;
    }

    for (const char *D : std::array<const char *, 3>{"[]", "<>", "()"}) {
      if (Style.front() != D[0])
        continue;
      size_t End = Style.find_first_of(D[1]);
      if (End == StringRef::npos) {
        assert(false && "Missing range option end delimeter!");
        return Default;
      }
      StringRef Result = Style.slice(1, End);
      Style = Style.drop_front(End + 1);
      return Result;
    }
    assert(false && "Invalid range style!");
    return Default;
  }

  static std::pair<StringRef, StringRef> parseOptions(StringRef Style) {
    StringRef Sep = consumeOneOption(Style, '$', ", ");
    StringRef Args = consumeOneOption(Style, '@', "");
    assert(Style.empty() && "Unexpected text in range option string!");
    return std::make_pair(Sep, Args);
  }

public:
  static_assert(detail::range_item_has_provider<IterT>::value,
                "Range value_type does not have a format provider!");
  static void format(const llvm::iterator_range<IterT> &V,
                     llvm::raw_ostream &Stream, StringRef Style) {
    StringRef Sep;
    StringRef ArgStyle;
    std::tie(Sep, ArgStyle) = parseOptions(Style);
    auto Begin = V.begin();
    auto End = V.end();
    if (Begin != End) {
      auto Adapter = detail::build_format_adapter(*Begin);
      Adapter.format(Stream, ArgStyle);
      ++Begin;
    }
    while (Begin != End) {
      Stream << Sep;
      auto Adapter = detail::build_format_adapter(*Begin);
      Adapter.format(Stream, ArgStyle);
      ++Begin;
    }
  }
};
}

#endif
PKhwFZ�D��ff
Support/BCD.hnu�[���//===- llvm/Support/BCD.h - Binary-Coded Decimal utility functions -*- C++ -*-//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares some utility functions for encoding/decoding BCD values.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_BCD_H
#define LLVM_SUPPORT_BCD_H

#include <assert.h>
#include <cstddef>
#include <cstdint>

namespace llvm {

// Decode a packed BCD value.
// Maximum value of int64_t is 9,223,372,036,854,775,807. These are 18 usable
// decimal digits. Thus BCD numbers of up to 9 bytes can be converted.
// Please note that s390 supports BCD numbers up to a length of 16 bytes.
inline int64_t decodePackedBCD(const uint8_t *Ptr, size_t ByteLen,
                               bool IsSigned = true) {
  assert(ByteLen >= 1 && ByteLen <= 9 && "Invalid BCD number");
  int64_t Value = 0;
  size_t RunLen = ByteLen - static_cast<unsigned>(IsSigned);
  for (size_t I = 0; I < RunLen; ++I) {
    uint8_t DecodedByteValue = ((Ptr[I] >> 4) & 0x0f) * 10 + (Ptr[I] & 0x0f);
    Value = (Value * 100) + DecodedByteValue;
  }
  if (IsSigned) {
    uint8_t DecodedByteValue = (Ptr[ByteLen - 1] >> 4) & 0x0f;
    uint8_t Sign = Ptr[ByteLen - 1] & 0x0f;
    Value = (Value * 10) + DecodedByteValue;
    if (Sign == 0x0d || Sign == 0x0b)
      Value *= -1;
  }
  return Value;
}

template <typename ResultT, typename ValT>
inline ResultT decodePackedBCD(const ValT Val, bool IsSigned = true) {
  return static_cast<ResultT>(decodePackedBCD(
      reinterpret_cast<const uint8_t *>(&Val), sizeof(ValT), IsSigned));
}

} // namespace llvm

#endif // LLVM_SUPPORT_BCD_H
PKhwFZb�
K��Support/Win64EH.hnu�[���//===-- llvm/Support/Win64EH.h ---Win64 EH Constants-------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains constants and structures used for implementing
// exception handling on Win64 platforms. For more information, see
// http://msdn.microsoft.com/en-us/library/1eyas8tf.aspx
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_WIN64EH_H
#define LLVM_SUPPORT_WIN64EH_H

#include "llvm/Support/DataTypes.h"
#include "llvm/Support/Endian.h"

namespace llvm {
namespace Win64EH {

/// UnwindOpcodes - Enumeration whose values specify a single operation in
/// the prolog of a function.
enum UnwindOpcodes {
  // The following set of unwind opcodes is for x86_64.  They are documented at
  // https://docs.microsoft.com/en-us/cpp/build/exception-handling-x64.
  // Some generic values from this set are used for other architectures too.
  UOP_PushNonVol = 0,
  UOP_AllocLarge,
  UOP_AllocSmall,
  UOP_SetFPReg,
  UOP_SaveNonVol,
  UOP_SaveNonVolBig,
  UOP_Epilog,
  UOP_SpareCode,
  UOP_SaveXMM128,
  UOP_SaveXMM128Big,
  UOP_PushMachFrame,
  // The following set of unwind opcodes is for ARM64.  They are documented at
  // https://docs.microsoft.com/en-us/cpp/build/arm64-exception-handling
  UOP_AllocMedium,
  UOP_SaveR19R20X,
  UOP_SaveFPLRX,
  UOP_SaveFPLR,
  UOP_SaveReg,
  UOP_SaveRegX,
  UOP_SaveRegP,
  UOP_SaveRegPX,
  UOP_SaveLRPair,
  UOP_SaveFReg,
  UOP_SaveFRegX,
  UOP_SaveFRegP,
  UOP_SaveFRegPX,
  UOP_SetFP,
  UOP_AddFP,
  UOP_Nop,
  UOP_End,
  UOP_SaveNext,
  UOP_TrapFrame,
  UOP_Context,
  UOP_ClearUnwoundToCall,
  UOP_PACSignLR,
  UOP_SaveAnyRegI,
  UOP_SaveAnyRegIP,
  UOP_SaveAnyRegD,
  UOP_SaveAnyRegDP,
  UOP_SaveAnyRegQ,
  UOP_SaveAnyRegQP,
  UOP_SaveAnyRegIX,
  UOP_SaveAnyRegIPX,
  UOP_SaveAnyRegDX,
  UOP_SaveAnyRegDPX,
  UOP_SaveAnyRegQX,
  UOP_SaveAnyRegQPX,

  // The following set of unwind opcodes is for ARM.  They are documented at
  // https://docs.microsoft.com/en-us/cpp/build/arm-exception-handling

  // Stack allocations use UOP_AllocSmall, UOP_AllocLarge from above, plus
  // the following. AllocSmall, AllocLarge and AllocHuge represent a 16 bit
  // instruction, while the WideAlloc* opcodes represent a 32 bit instruction.
  // Small can represent a stack offset of 0x7f*4 (252) bytes, Medium can
  // represent up to 0x3ff*4 (4092) bytes, Large up to 0xffff*4 (262140) bytes,
  // and Huge up to 0xffffff*4 (67108860) bytes.
  UOP_AllocHuge,
  UOP_WideAllocMedium,
  UOP_WideAllocLarge,
  UOP_WideAllocHuge,

  UOP_WideSaveRegMask,
  UOP_SaveSP,
  UOP_SaveRegsR4R7LR,
  UOP_WideSaveRegsR4R11LR,
  UOP_SaveFRegD8D15,
  UOP_SaveRegMask,
  UOP_SaveLR,
  UOP_SaveFRegD0D15,
  UOP_SaveFRegD16D31,
  // Using UOP_Nop from above
  UOP_WideNop,
  // Using UOP_End from above
  UOP_EndNop,
  UOP_WideEndNop,
  // A custom unspecified opcode, consisting of one or more bytes. This
  // allows producing opcodes in the implementation defined/reserved range.
  UOP_Custom,
};

/// UnwindCode - This union describes a single operation in a function prolog,
/// or part thereof.
union UnwindCode {
  struct {
    uint8_t CodeOffset;
    uint8_t UnwindOpAndOpInfo;
  } u;
  support::ulittle16_t FrameOffset;

  uint8_t getUnwindOp() const {
    return u.UnwindOpAndOpInfo & 0x0F;
  }
  uint8_t getOpInfo() const {
    return (u.UnwindOpAndOpInfo >> 4) & 0x0F;
  }
};

enum {
  /// UNW_ExceptionHandler - Specifies that this function has an exception
  /// handler.
  UNW_ExceptionHandler = 0x01,
  /// UNW_TerminateHandler - Specifies that this function has a termination
  /// handler.
  UNW_TerminateHandler = 0x02,
  /// UNW_ChainInfo - Specifies that this UnwindInfo structure is chained to
  /// another one.
  UNW_ChainInfo = 0x04
};

/// RuntimeFunction - An entry in the table of functions with unwind info.
struct RuntimeFunction {
  support::ulittle32_t StartAddress;
  support::ulittle32_t EndAddress;
  support::ulittle32_t UnwindInfoOffset;
};

/// UnwindInfo - An entry in the exception table.
struct UnwindInfo {
  uint8_t VersionAndFlags;
  uint8_t PrologSize;
  uint8_t NumCodes;
  uint8_t FrameRegisterAndOffset;
  UnwindCode UnwindCodes[1];

  uint8_t getVersion() const {
    return VersionAndFlags & 0x07;
  }
  uint8_t getFlags() const {
    return (VersionAndFlags >> 3) & 0x1f;
  }
  uint8_t getFrameRegister() const {
    return FrameRegisterAndOffset & 0x0f;
  }
  uint8_t getFrameOffset() const {
    return (FrameRegisterAndOffset >> 4) & 0x0f;
  }

  // The data after unwindCodes depends on flags.
  // If UNW_ExceptionHandler or UNW_TerminateHandler is set then follows
  // the address of the language-specific exception handler.
  // If UNW_ChainInfo is set then follows a RuntimeFunction which defines
  // the chained unwind info.
  // For more information please see MSDN at:
  // http://msdn.microsoft.com/en-us/library/ddssxxy8.aspx

  /// Return pointer to language specific data part of UnwindInfo.
  void *getLanguageSpecificData() {
    return reinterpret_cast<void *>(&UnwindCodes[(NumCodes+1) & ~1]);
  }

  /// Return pointer to language specific data part of UnwindInfo.
  const void *getLanguageSpecificData() const {
    return reinterpret_cast<const void *>(&UnwindCodes[(NumCodes + 1) & ~1]);
  }

  /// Return image-relative offset of language-specific exception handler.
  uint32_t getLanguageSpecificHandlerOffset() const {
    return *reinterpret_cast<const support::ulittle32_t *>(
               getLanguageSpecificData());
  }

  /// Set image-relative offset of language-specific exception handler.
  void setLanguageSpecificHandlerOffset(uint32_t offset) {
    *reinterpret_cast<support::ulittle32_t *>(getLanguageSpecificData()) =
        offset;
  }

  /// Return pointer to exception-specific data.
  void *getExceptionData() {
    return reinterpret_cast<void *>(reinterpret_cast<uint32_t *>(
                                                  getLanguageSpecificData())+1);
  }

  /// Return pointer to chained unwind info.
  RuntimeFunction *getChainedFunctionEntry() {
    return reinterpret_cast<RuntimeFunction *>(getLanguageSpecificData());
  }

  /// Return pointer to chained unwind info.
  const RuntimeFunction *getChainedFunctionEntry() const {
    return reinterpret_cast<const RuntimeFunction *>(getLanguageSpecificData());
  }
};


} // End of namespace Win64EH
} // End of namespace llvm

#endif
PKhwFZ�����Support/TargetSelect.hnu�[���//===- TargetSelect.h - Target Selection & Registration ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file provides utilities to make sure that certain classes of targets are
// linked into the main application executable, and initialize them as
// appropriate.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_TARGETSELECT_H
#define LLVM_SUPPORT_TARGETSELECT_H

#include "llvm/Config/llvm-config.h"

extern "C" {
  // Declare all of the target-initialization functions that are available.
#define LLVM_TARGET(TargetName) void LLVMInitialize##TargetName##TargetInfo();
#include "llvm/Config/Targets.def"

#define LLVM_TARGET(TargetName) void LLVMInitialize##TargetName##Target();
#include "llvm/Config/Targets.def"

  // Declare all of the target-MC-initialization functions that are available.
#define LLVM_TARGET(TargetName) void LLVMInitialize##TargetName##TargetMC();
#include "llvm/Config/Targets.def"

  // Declare all of the available assembly printer initialization functions.
#define LLVM_ASM_PRINTER(TargetName) void LLVMInitialize##TargetName##AsmPrinter();
#include "llvm/Config/AsmPrinters.def"

  // Declare all of the available assembly parser initialization functions.
#define LLVM_ASM_PARSER(TargetName) void LLVMInitialize##TargetName##AsmParser();
#include "llvm/Config/AsmParsers.def"

  // Declare all of the available disassembler initialization functions.
#define LLVM_DISASSEMBLER(TargetName) \
  void LLVMInitialize##TargetName##Disassembler();
#include "llvm/Config/Disassemblers.def"

// Declare all of the available TargetMCA initialization functions.
#define LLVM_TARGETMCA(TargetName) void LLVMInitialize##TargetName##TargetMCA();
#include "llvm/Config/TargetMCAs.def"
}

namespace llvm {
  /// InitializeAllTargetInfos - The main program should call this function if
  /// it wants access to all available targets that LLVM is configured to
  /// support, to make them available via the TargetRegistry.
  ///
  /// It is legal for a client to make multiple calls to this function.
  inline void InitializeAllTargetInfos() {
#define LLVM_TARGET(TargetName) LLVMInitialize##TargetName##TargetInfo();
#include "llvm/Config/Targets.def"
  }

  /// InitializeAllTargets - The main program should call this function if it
  /// wants access to all available target machines that LLVM is configured to
  /// support, to make them available via the TargetRegistry.
  ///
  /// It is legal for a client to make multiple calls to this function.
  inline void InitializeAllTargets() {
    // FIXME: Remove this, clients should do it.
    InitializeAllTargetInfos();

#define LLVM_TARGET(TargetName) LLVMInitialize##TargetName##Target();
#include "llvm/Config/Targets.def"
  }

  /// InitializeAllTargetMCs - The main program should call this function if it
  /// wants access to all available target MC that LLVM is configured to
  /// support, to make them available via the TargetRegistry.
  ///
  /// It is legal for a client to make multiple calls to this function.
  inline void InitializeAllTargetMCs() {
#define LLVM_TARGET(TargetName) LLVMInitialize##TargetName##TargetMC();
#include "llvm/Config/Targets.def"
  }

  /// InitializeAllAsmPrinters - The main program should call this function if
  /// it wants all asm printers that LLVM is configured to support, to make them
  /// available via the TargetRegistry.
  ///
  /// It is legal for a client to make multiple calls to this function.
  inline void InitializeAllAsmPrinters() {
#define LLVM_ASM_PRINTER(TargetName) LLVMInitialize##TargetName##AsmPrinter();
#include "llvm/Config/AsmPrinters.def"
  }

  /// InitializeAllAsmParsers - The main program should call this function if it
  /// wants all asm parsers that LLVM is configured to support, to make them
  /// available via the TargetRegistry.
  ///
  /// It is legal for a client to make multiple calls to this function.
  inline void InitializeAllAsmParsers() {
#define LLVM_ASM_PARSER(TargetName) LLVMInitialize##TargetName##AsmParser();
#include "llvm/Config/AsmParsers.def"
  }

  /// InitializeAllDisassemblers - The main program should call this function if
  /// it wants all disassemblers that LLVM is configured to support, to make
  /// them available via the TargetRegistry.
  ///
  /// It is legal for a client to make multiple calls to this function.
  inline void InitializeAllDisassemblers() {
#define LLVM_DISASSEMBLER(TargetName) LLVMInitialize##TargetName##Disassembler();
#include "llvm/Config/Disassemblers.def"
  }

  /// InitializeNativeTarget - The main program should call this function to
  /// initialize the native target corresponding to the host.  This is useful
  /// for JIT applications to ensure that the target gets linked in correctly.
  ///
  /// It is legal for a client to make multiple calls to this function.
  inline bool InitializeNativeTarget() {
  // If we have a native target, initialize it to ensure it is linked in.
#ifdef LLVM_NATIVE_TARGET
    LLVM_NATIVE_TARGETINFO();
    LLVM_NATIVE_TARGET();
    LLVM_NATIVE_TARGETMC();
    return false;
#else
    return true;
#endif
  }

  /// InitializeNativeTargetAsmPrinter - The main program should call
  /// this function to initialize the native target asm printer.
  inline bool InitializeNativeTargetAsmPrinter() {
  // If we have a native target, initialize the corresponding asm printer.
#ifdef LLVM_NATIVE_ASMPRINTER
    LLVM_NATIVE_ASMPRINTER();
    return false;
#else
    return true;
#endif
  }

  /// InitializeNativeTargetAsmParser - The main program should call
  /// this function to initialize the native target asm parser.
  inline bool InitializeNativeTargetAsmParser() {
  // If we have a native target, initialize the corresponding asm parser.
#ifdef LLVM_NATIVE_ASMPARSER
    LLVM_NATIVE_ASMPARSER();
    return false;
#else
    return true;
#endif
  }

  /// InitializeNativeTargetDisassembler - The main program should call
  /// this function to initialize the native target disassembler.
  inline bool InitializeNativeTargetDisassembler() {
  // If we have a native target, initialize the corresponding disassembler.
#ifdef LLVM_NATIVE_DISASSEMBLER
    LLVM_NATIVE_DISASSEMBLER();
    return false;
#else
    return true;
#endif
  }

  /// InitializeAllTargetMCAs - The main program should call
  /// this function to initialize the target CustomBehaviour and
  /// InstrPostProcess classes.
  inline void InitializeAllTargetMCAs() {
#define LLVM_TARGETMCA(TargetName) LLVMInitialize##TargetName##TargetMCA();
#include "llvm/Config/TargetMCAs.def"
  }
}

#endif
PKhwFZ���llSupport/SuffixTree.hnu�[���//===- llvm/ADT/SuffixTree.h - Tree for substrings --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// A data structure for fast substring queries.
//
// Suffix trees represent the suffixes of their input strings in their leaves.
// A suffix tree is a type of compressed trie structure where each node
// represents an entire substring rather than a single character. Each leaf
// of the tree is a suffix.
//
// A suffix tree can be seen as a type of state machine where each state is a
// substring of the full string. The tree is structured so that, for a string
// of length N, there are exactly N leaves in the tree. This structure allows
// us to quickly find repeated substrings of the input string.
//
// In this implementation, a "string" is a vector of unsigned integers.
// These integers may result from hashing some data type. A suffix tree can
// contain 1 or many strings, which can then be queried as one large string.
//
// The suffix tree is implemented using Ukkonen's algorithm for linear-time
// suffix tree construction. Ukkonen's algorithm is explained in more detail
// in the paper by Esko Ukkonen "On-line construction of suffix trees. The
// paper is available at
//
// https://www.cs.helsinki.fi/u/ukkonen/SuffixT1withFigs.pdf
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_SUFFIXTREE_H
#define LLVM_SUPPORT_SUFFIXTREE_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/SuffixTreeNode.h"

namespace llvm {
class SuffixTree {
public:
  /// Each element is an integer representing an instruction in the module.
  ArrayRef<unsigned> Str;

  /// A repeated substring in the tree.
  struct RepeatedSubstring {
    /// The length of the string.
    unsigned Length;

    /// The start indices of each occurrence.
    SmallVector<unsigned> StartIndices;
  };

private:
  /// Maintains internal nodes in the tree.
  SpecificBumpPtrAllocator<SuffixTreeInternalNode> InternalNodeAllocator;
  /// Maintains leaf nodes in the tree.
  SpecificBumpPtrAllocator<SuffixTreeLeafNode> LeafNodeAllocator;

  /// The root of the suffix tree.
  ///
  /// The root represents the empty string. It is maintained by the
  /// \p NodeAllocator like every other node in the tree.
  SuffixTreeInternalNode *Root = nullptr;

  /// The end index of each leaf in the tree.
  unsigned LeafEndIdx = SuffixTreeNode::EmptyIdx;

  /// Helper struct which keeps track of the next insertion point in
  /// Ukkonen's algorithm.
  struct ActiveState {
    /// The next node to insert at.
    SuffixTreeInternalNode *Node = nullptr;

    /// The index of the first character in the substring currently being added.
    unsigned Idx = SuffixTreeNode::EmptyIdx;

    /// The length of the substring we have to add at the current step.
    unsigned Len = 0;
  };

  /// The point the next insertion will take place at in the
  /// construction algorithm.
  ActiveState Active;

  /// Allocate a leaf node and add it to the tree.
  ///
  /// \param Parent The parent of this node.
  /// \param StartIdx The start index of this node's associated string.
  /// \param Edge The label on the edge leaving \p Parent to this node.
  ///
  /// \returns A pointer to the allocated leaf node.
  SuffixTreeNode *insertLeaf(SuffixTreeInternalNode &Parent, unsigned StartIdx,
                             unsigned Edge);

  /// Allocate an internal node and add it to the tree.
  ///
  /// \param Parent The parent of this node. Only null when allocating the root.
  /// \param StartIdx The start index of this node's associated string.
  /// \param EndIdx The end index of this node's associated string.
  /// \param Edge The label on the edge leaving \p Parent to this node.
  ///
  /// \returns A pointer to the allocated internal node.
  SuffixTreeInternalNode *insertInternalNode(SuffixTreeInternalNode *Parent,
                                             unsigned StartIdx, unsigned EndIdx,
                                             unsigned Edge);

  /// Allocate the root node and add it to the tree.
  ///
  /// \returns A pointer to the root.
  SuffixTreeInternalNode *insertRoot();

  /// Set the suffix indices of the leaves to the start indices of their
  /// respective suffixes.
  void setSuffixIndices();

  /// Construct the suffix tree for the prefix of the input ending at
  /// \p EndIdx.
  ///
  /// Used to construct the full suffix tree iteratively. At the end of each
  /// step, the constructed suffix tree is either a valid suffix tree, or a
  /// suffix tree with implicit suffixes. At the end of the final step, the
  /// suffix tree is a valid tree.
  ///
  /// \param EndIdx The end index of the current prefix in the main string.
  /// \param SuffixesToAdd The number of suffixes that must be added
  /// to complete the suffix tree at the current phase.
  ///
  /// \returns The number of suffixes that have not been added at the end of
  /// this step.
  unsigned extend(unsigned EndIdx, unsigned SuffixesToAdd);

public:
  /// Construct a suffix tree from a sequence of unsigned integers.
  ///
  /// \param Str The string to construct the suffix tree for.
  SuffixTree(const ArrayRef<unsigned> &Str);

  /// Iterator for finding all repeated substrings in the suffix tree.
  struct RepeatedSubstringIterator {
  private:
    /// The current node we're visiting.
    SuffixTreeNode *N = nullptr;

    /// The repeated substring associated with this node.
    RepeatedSubstring RS;

    /// The nodes left to visit.
    SmallVector<SuffixTreeInternalNode *> InternalNodesToVisit;

    /// The minimum length of a repeated substring to find.
    /// Since we're outlining, we want at least two instructions in the range.
    /// FIXME: This may not be true for targets like X86 which support many
    /// instruction lengths.
    const unsigned MinLength = 2;

    /// Move the iterator to the next repeated substring.
    void advance();

  public:
    /// Return the current repeated substring.
    RepeatedSubstring &operator*() { return RS; }

    RepeatedSubstringIterator &operator++() {
      advance();
      return *this;
    }

    RepeatedSubstringIterator operator++(int I) {
      RepeatedSubstringIterator It(*this);
      advance();
      return It;
    }

    bool operator==(const RepeatedSubstringIterator &Other) const {
      return N == Other.N;
    }
    bool operator!=(const RepeatedSubstringIterator &Other) const {
      return !(*this == Other);
    }

    RepeatedSubstringIterator(SuffixTreeInternalNode *N) : N(N) {
      // Do we have a non-null node?
      if (!N)
        return;
      // Yes. At the first step, we need to visit all of N's children.
      // Note: This means that we visit N last.
      InternalNodesToVisit.push_back(N);
      advance();
    }
  };

  typedef RepeatedSubstringIterator iterator;
  iterator begin() { return iterator(Root); }
  iterator end() { return iterator(nullptr); }
};

} // namespace llvm

#endif // LLVM_SUPPORT_SUFFIXTREE_H
PKhwFZ��ԏ�Support/BlockFrequency.hnu�[���//===-------- BlockFrequency.h - Block Frequency Wrapper --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements Block Frequency class.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_BLOCKFREQUENCY_H
#define LLVM_SUPPORT_BLOCKFREQUENCY_H

#include <cassert>
#include <cstdint>

namespace llvm {

class BranchProbability;

// This class represents Block Frequency as a 64-bit value.
class BlockFrequency {
  uint64_t Frequency;

public:
  BlockFrequency(uint64_t Freq = 0) : Frequency(Freq) { }

  /// Returns the maximum possible frequency, the saturation value.
  static uint64_t getMaxFrequency() { return UINT64_MAX; }

  /// Returns the frequency as a fixpoint number scaled by the entry
  /// frequency.
  uint64_t getFrequency() const { return Frequency; }

  /// Multiplies with a branch probability. The computation will never
  /// overflow.
  BlockFrequency &operator*=(BranchProbability Prob);
  BlockFrequency operator*(BranchProbability Prob) const;

  /// Divide by a non-zero branch probability using saturating
  /// arithmetic.
  BlockFrequency &operator/=(BranchProbability Prob);
  BlockFrequency operator/(BranchProbability Prob) const;

  /// Adds another block frequency using saturating arithmetic.
  BlockFrequency &operator+=(BlockFrequency Freq) {
    uint64_t Before = Freq.Frequency;
    Frequency += Freq.Frequency;

    // If overflow, set frequency to the maximum value.
    if (Frequency < Before)
      Frequency = UINT64_MAX;

    return *this;
  }
  BlockFrequency operator+(BlockFrequency Freq) const {
    BlockFrequency NewFreq(Frequency);
    NewFreq += Freq;
    return NewFreq;
  }

  /// Subtracts another block frequency using saturating arithmetic.
  BlockFrequency &operator-=(BlockFrequency Freq) {
    // If underflow, set frequency to 0.
    if (Frequency <= Freq.Frequency)
      Frequency = 0;
    else
      Frequency -= Freq.Frequency;
    return *this;
  }
  BlockFrequency operator-(BlockFrequency Freq) const {
    BlockFrequency NewFreq(Frequency);
    NewFreq -= Freq;
    return NewFreq;
  }

  /// Shift block frequency to the right by count digits saturating to 1.
  BlockFrequency &operator>>=(const unsigned count) {
    // Frequency can never be 0 by design.
    assert(Frequency != 0);

    // Shift right by count.
    Frequency >>= count;

    // Saturate to 1 if we are 0.
    Frequency |= Frequency == 0;
    return *this;
  }

  bool operator<(BlockFrequency RHS) const {
    return Frequency < RHS.Frequency;
  }

  bool operator<=(BlockFrequency RHS) const {
    return Frequency <= RHS.Frequency;
  }

  bool operator>(BlockFrequency RHS) const {
    return Frequency > RHS.Frequency;
  }

  bool operator>=(BlockFrequency RHS) const {
    return Frequency >= RHS.Frequency;
  }

  bool operator==(BlockFrequency RHS) const {
    return Frequency == RHS.Frequency;
  }
};

} // namespace llvm

#endif
PKhwFZ��g�Support/TarWriter.hnu�[���//===-- llvm/Support/TarWriter.h - Tar archive file creator -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_TARWRITER_H
#define LLVM_SUPPORT_TARWRITER_H

#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/raw_ostream.h"

namespace llvm {
class TarWriter {
public:
  static Expected<std::unique_ptr<TarWriter>> create(StringRef OutputPath,
                                                     StringRef BaseDir);

  void append(StringRef Path, StringRef Data);

private:
  TarWriter(int FD, StringRef BaseDir);
  raw_fd_ostream OS;
  std::string BaseDir;
  StringSet<> Files;
};
}

#endif
PKhwFZ���ɑ&�& Support/AMDHSAKernelDescriptor.hnu�[���//===--- AMDHSAKernelDescriptor.h -----------------------------*- C++ -*---===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file
/// AMDHSA kernel descriptor definitions. For more information, visit
/// https://llvm.org/docs/AMDGPUUsage.html#kernel-descriptor
///
/// \warning
/// Any changes to this file should also be audited for corresponding changes
/// needed in both the assembler and disassembler, namely:
/// * AMDGPUAsmPrinter.{cpp,h}
/// * AMDGPUTargetStreamer.{cpp,h}
/// * AMDGPUDisassembler.{cpp,h}
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_AMDHSAKERNELDESCRIPTOR_H
#define LLVM_SUPPORT_AMDHSAKERNELDESCRIPTOR_H

#include <cstddef>
#include <cstdint>

// Gets offset of specified member in specified type.
#ifndef offsetof
#define offsetof(TYPE, MEMBER) ((size_t)&((TYPE*)0)->MEMBER)
#endif // offsetof

// Creates enumeration entries used for packing bits into integers. Enumeration
// entries include bit shift amount, bit width, and bit mask.
#ifndef AMDHSA_BITS_ENUM_ENTRY
#define AMDHSA_BITS_ENUM_ENTRY(NAME, SHIFT, WIDTH) \
  NAME ## _SHIFT = (SHIFT),                        \
  NAME ## _WIDTH = (WIDTH),                        \
  NAME = (((1 << (WIDTH)) - 1) << (SHIFT))
#endif // AMDHSA_BITS_ENUM_ENTRY

// Gets bits for specified bit mask from specified source.
#ifndef AMDHSA_BITS_GET
#define AMDHSA_BITS_GET(SRC, MSK) ((SRC & MSK) >> MSK ## _SHIFT)
#endif // AMDHSA_BITS_GET

// Sets bits for specified bit mask in specified destination.
#ifndef AMDHSA_BITS_SET
#define AMDHSA_BITS_SET(DST, MSK, VAL)  \
  DST &= ~MSK;                          \
  DST |= ((VAL << MSK ## _SHIFT) & MSK)
#endif // AMDHSA_BITS_SET

namespace llvm {
namespace amdhsa {

// Floating point rounding modes. Must match hardware definition.
enum : uint8_t {
  FLOAT_ROUND_MODE_NEAR_EVEN = 0,
  FLOAT_ROUND_MODE_PLUS_INFINITY = 1,
  FLOAT_ROUND_MODE_MINUS_INFINITY = 2,
  FLOAT_ROUND_MODE_ZERO = 3,
};

// Floating point denorm modes. Must match hardware definition.
enum : uint8_t {
  FLOAT_DENORM_MODE_FLUSH_SRC_DST = 0,
  FLOAT_DENORM_MODE_FLUSH_DST = 1,
  FLOAT_DENORM_MODE_FLUSH_SRC = 2,
  FLOAT_DENORM_MODE_FLUSH_NONE = 3,
};

// System VGPR workitem IDs. Must match hardware definition.
enum : uint8_t {
  SYSTEM_VGPR_WORKITEM_ID_X = 0,
  SYSTEM_VGPR_WORKITEM_ID_X_Y = 1,
  SYSTEM_VGPR_WORKITEM_ID_X_Y_Z = 2,
  SYSTEM_VGPR_WORKITEM_ID_UNDEFINED = 3,
};

// Compute program resource register 1. Must match hardware definition.
#define COMPUTE_PGM_RSRC1(NAME, SHIFT, WIDTH) \
  AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC1_ ## NAME, SHIFT, WIDTH)
enum : int32_t {
  COMPUTE_PGM_RSRC1(GRANULATED_WORKITEM_VGPR_COUNT, 0, 6),
  COMPUTE_PGM_RSRC1(GRANULATED_WAVEFRONT_SGPR_COUNT, 6, 4),
  COMPUTE_PGM_RSRC1(PRIORITY, 10, 2),
  COMPUTE_PGM_RSRC1(FLOAT_ROUND_MODE_32, 12, 2),
  COMPUTE_PGM_RSRC1(FLOAT_ROUND_MODE_16_64, 14, 2),
  COMPUTE_PGM_RSRC1(FLOAT_DENORM_MODE_32, 16, 2),
  COMPUTE_PGM_RSRC1(FLOAT_DENORM_MODE_16_64, 18, 2),
  COMPUTE_PGM_RSRC1(PRIV, 20, 1),
  COMPUTE_PGM_RSRC1(ENABLE_DX10_CLAMP, 21, 1),
  COMPUTE_PGM_RSRC1(DEBUG_MODE, 22, 1),
  COMPUTE_PGM_RSRC1(ENABLE_IEEE_MODE, 23, 1),
  COMPUTE_PGM_RSRC1(BULKY, 24, 1),
  COMPUTE_PGM_RSRC1(CDBG_USER, 25, 1),
  COMPUTE_PGM_RSRC1(FP16_OVFL, 26, 1),    // GFX9+
  COMPUTE_PGM_RSRC1(RESERVED0, 27, 2),
  COMPUTE_PGM_RSRC1(WGP_MODE, 29, 1),     // GFX10+
  COMPUTE_PGM_RSRC1(MEM_ORDERED, 30, 1),  // GFX10+
  COMPUTE_PGM_RSRC1(FWD_PROGRESS, 31, 1), // GFX10+
};
#undef COMPUTE_PGM_RSRC1

// Compute program resource register 2. Must match hardware definition.
#define COMPUTE_PGM_RSRC2(NAME, SHIFT, WIDTH) \
  AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC2_ ## NAME, SHIFT, WIDTH)
enum : int32_t {
  COMPUTE_PGM_RSRC2(ENABLE_PRIVATE_SEGMENT, 0, 1),
  COMPUTE_PGM_RSRC2(USER_SGPR_COUNT, 1, 5),
  COMPUTE_PGM_RSRC2(ENABLE_TRAP_HANDLER, 6, 1),
  COMPUTE_PGM_RSRC2(ENABLE_SGPR_WORKGROUP_ID_X, 7, 1),
  COMPUTE_PGM_RSRC2(ENABLE_SGPR_WORKGROUP_ID_Y, 8, 1),
  COMPUTE_PGM_RSRC2(ENABLE_SGPR_WORKGROUP_ID_Z, 9, 1),
  COMPUTE_PGM_RSRC2(ENABLE_SGPR_WORKGROUP_INFO, 10, 1),
  COMPUTE_PGM_RSRC2(ENABLE_VGPR_WORKITEM_ID, 11, 2),
  COMPUTE_PGM_RSRC2(ENABLE_EXCEPTION_ADDRESS_WATCH, 13, 1),
  COMPUTE_PGM_RSRC2(ENABLE_EXCEPTION_MEMORY, 14, 1),
  COMPUTE_PGM_RSRC2(GRANULATED_LDS_SIZE, 15, 9),
  COMPUTE_PGM_RSRC2(ENABLE_EXCEPTION_IEEE_754_FP_INVALID_OPERATION, 24, 1),
  COMPUTE_PGM_RSRC2(ENABLE_EXCEPTION_FP_DENORMAL_SOURCE, 25, 1),
  COMPUTE_PGM_RSRC2(ENABLE_EXCEPTION_IEEE_754_FP_DIVISION_BY_ZERO, 26, 1),
  COMPUTE_PGM_RSRC2(ENABLE_EXCEPTION_IEEE_754_FP_OVERFLOW, 27, 1),
  COMPUTE_PGM_RSRC2(ENABLE_EXCEPTION_IEEE_754_FP_UNDERFLOW, 28, 1),
  COMPUTE_PGM_RSRC2(ENABLE_EXCEPTION_IEEE_754_FP_INEXACT, 29, 1),
  COMPUTE_PGM_RSRC2(ENABLE_EXCEPTION_INT_DIVIDE_BY_ZERO, 30, 1),
  COMPUTE_PGM_RSRC2(RESERVED0, 31, 1),
};
#undef COMPUTE_PGM_RSRC2

// Compute program resource register 3 for GFX90A+. Must match hardware
// definition.
#define COMPUTE_PGM_RSRC3_GFX90A(NAME, SHIFT, WIDTH) \
  AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC3_GFX90A_ ## NAME, SHIFT, WIDTH)
enum : int32_t {
  COMPUTE_PGM_RSRC3_GFX90A(ACCUM_OFFSET, 0, 6),
  COMPUTE_PGM_RSRC3_GFX90A(RESERVED0, 6, 10),
  COMPUTE_PGM_RSRC3_GFX90A(TG_SPLIT, 16, 1),
  COMPUTE_PGM_RSRC3_GFX90A(RESERVED1, 17, 15),
};
#undef COMPUTE_PGM_RSRC3_GFX90A

// Compute program resource register 3 for GFX10+. Must match hardware
// definition.
#define COMPUTE_PGM_RSRC3_GFX10_PLUS(NAME, SHIFT, WIDTH) \
  AMDHSA_BITS_ENUM_ENTRY(COMPUTE_PGM_RSRC3_GFX10_PLUS_ ## NAME, SHIFT, WIDTH)
enum : int32_t {
  COMPUTE_PGM_RSRC3_GFX10_PLUS(SHARED_VGPR_COUNT, 0, 4), // GFX10+
  COMPUTE_PGM_RSRC3_GFX10_PLUS(INST_PREF_SIZE, 4, 6),    // GFX11+
  COMPUTE_PGM_RSRC3_GFX10_PLUS(TRAP_ON_START, 10, 1),    // GFX11+
  COMPUTE_PGM_RSRC3_GFX10_PLUS(TRAP_ON_END, 11, 1),      // GFX11+
  COMPUTE_PGM_RSRC3_GFX10_PLUS(RESERVED0, 12, 19),
  COMPUTE_PGM_RSRC3_GFX10_PLUS(IMAGE_OP, 31, 1),         // GFX11+
};
#undef COMPUTE_PGM_RSRC3_GFX10_PLUS

// Kernel code properties. Must be kept backwards compatible.
#define KERNEL_CODE_PROPERTY(NAME, SHIFT, WIDTH) \
  AMDHSA_BITS_ENUM_ENTRY(KERNEL_CODE_PROPERTY_ ## NAME, SHIFT, WIDTH)
enum : int32_t {
  KERNEL_CODE_PROPERTY(ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER, 0, 1),
  KERNEL_CODE_PROPERTY(ENABLE_SGPR_DISPATCH_PTR, 1, 1),
  KERNEL_CODE_PROPERTY(ENABLE_SGPR_QUEUE_PTR, 2, 1),
  KERNEL_CODE_PROPERTY(ENABLE_SGPR_KERNARG_SEGMENT_PTR, 3, 1),
  KERNEL_CODE_PROPERTY(ENABLE_SGPR_DISPATCH_ID, 4, 1),
  KERNEL_CODE_PROPERTY(ENABLE_SGPR_FLAT_SCRATCH_INIT, 5, 1),
  KERNEL_CODE_PROPERTY(ENABLE_SGPR_PRIVATE_SEGMENT_SIZE, 6, 1),
  KERNEL_CODE_PROPERTY(RESERVED0, 7, 3),
  KERNEL_CODE_PROPERTY(ENABLE_WAVEFRONT_SIZE32, 10, 1), // GFX10+
  KERNEL_CODE_PROPERTY(USES_DYNAMIC_STACK, 11, 1),
  KERNEL_CODE_PROPERTY(RESERVED1, 12, 4),
};
#undef KERNEL_CODE_PROPERTY

// Kernel descriptor. Must be kept backwards compatible.
struct kernel_descriptor_t {
  uint32_t group_segment_fixed_size;
  uint32_t private_segment_fixed_size;
  uint32_t kernarg_size;
  uint8_t reserved0[4];
  int64_t kernel_code_entry_byte_offset;
  uint8_t reserved1[20];
  uint32_t compute_pgm_rsrc3; // GFX10+ and GFX90A+
  uint32_t compute_pgm_rsrc1;
  uint32_t compute_pgm_rsrc2;
  uint16_t kernel_code_properties;
  uint8_t reserved2[6];
};

enum : uint32_t {
  GROUP_SEGMENT_FIXED_SIZE_OFFSET = 0,
  PRIVATE_SEGMENT_FIXED_SIZE_OFFSET = 4,
  KERNARG_SIZE_OFFSET = 8,
  RESERVED0_OFFSET = 12,
  KERNEL_CODE_ENTRY_BYTE_OFFSET_OFFSET = 16,
  RESERVED1_OFFSET = 24,
  COMPUTE_PGM_RSRC3_OFFSET = 44,
  COMPUTE_PGM_RSRC1_OFFSET = 48,
  COMPUTE_PGM_RSRC2_OFFSET = 52,
  KERNEL_CODE_PROPERTIES_OFFSET = 56,
  RESERVED2_OFFSET = 58,
};

static_assert(
    sizeof(kernel_descriptor_t) == 64,
    "invalid size for kernel_descriptor_t");
static_assert(offsetof(kernel_descriptor_t, group_segment_fixed_size) ==
                  GROUP_SEGMENT_FIXED_SIZE_OFFSET,
              "invalid offset for group_segment_fixed_size");
static_assert(offsetof(kernel_descriptor_t, private_segment_fixed_size) ==
                  PRIVATE_SEGMENT_FIXED_SIZE_OFFSET,
              "invalid offset for private_segment_fixed_size");
static_assert(offsetof(kernel_descriptor_t, kernarg_size) ==
                  KERNARG_SIZE_OFFSET,
              "invalid offset for kernarg_size");
static_assert(offsetof(kernel_descriptor_t, reserved0) == RESERVED0_OFFSET,
              "invalid offset for reserved0");
static_assert(offsetof(kernel_descriptor_t, kernel_code_entry_byte_offset) ==
                  KERNEL_CODE_ENTRY_BYTE_OFFSET_OFFSET,
              "invalid offset for kernel_code_entry_byte_offset");
static_assert(offsetof(kernel_descriptor_t, reserved1) == RESERVED1_OFFSET,
              "invalid offset for reserved1");
static_assert(offsetof(kernel_descriptor_t, compute_pgm_rsrc3) ==
                  COMPUTE_PGM_RSRC3_OFFSET,
              "invalid offset for compute_pgm_rsrc3");
static_assert(offsetof(kernel_descriptor_t, compute_pgm_rsrc1) ==
                  COMPUTE_PGM_RSRC1_OFFSET,
              "invalid offset for compute_pgm_rsrc1");
static_assert(offsetof(kernel_descriptor_t, compute_pgm_rsrc2) ==
                  COMPUTE_PGM_RSRC2_OFFSET,
              "invalid offset for compute_pgm_rsrc2");
static_assert(offsetof(kernel_descriptor_t, kernel_code_properties) ==
                  KERNEL_CODE_PROPERTIES_OFFSET,
              "invalid offset for kernel_code_properties");
static_assert(offsetof(kernel_descriptor_t, reserved2) == RESERVED2_OFFSET,
              "invalid offset for reserved2");

} // end namespace amdhsa
} // end namespace llvm

#endif // LLVM_SUPPORT_AMDHSAKERNELDESCRIPTOR_H
PKhwFZ��u��B�BSupport/YAMLParser.hnu�[���//===- YAMLParser.h - Simple YAML parser ------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
//  This is a YAML 1.2 parser.
//
//  See http://www.yaml.org/spec/1.2/spec.html for the full standard.
//
//  This currently does not implement the following:
//    * Tag resolution.
//    * UTF-16.
//    * BOMs anywhere other than the first Unicode scalar value in the file.
//
//  The most important class here is Stream. This represents a YAML stream with
//  0, 1, or many documents.
//
//  SourceMgr sm;
//  StringRef input = getInput();
//  yaml::Stream stream(input, sm);
//
//  for (yaml::document_iterator di = stream.begin(), de = stream.end();
//       di != de; ++di) {
//    yaml::Node *n = di->getRoot();
//    if (n) {
//      // Do something with n...
//    } else
//      break;
//  }
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_YAMLPARSER_H
#define LLVM_SUPPORT_YAMLPARSER_H

#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/SMLoc.h"
#include "llvm/Support/SourceMgr.h"
#include <cassert>
#include <cstddef>
#include <iterator>
#include <map>
#include <memory>
#include <optional>
#include <string>
#include <system_error>

namespace llvm {

class MemoryBufferRef;
class raw_ostream;
class Twine;

namespace yaml {

class Document;
class document_iterator;
class Node;
class Scanner;
struct Token;

/// Dump all the tokens in this stream to OS.
/// \returns true if there was an error, false otherwise.
bool dumpTokens(StringRef Input, raw_ostream &);

/// Scans all tokens in input without outputting anything. This is used
///        for benchmarking the tokenizer.
/// \returns true if there was an error, false otherwise.
bool scanTokens(StringRef Input);

/// Escape \a Input for a double quoted scalar; if \p EscapePrintable
/// is true, all UTF8 sequences will be escaped, if \p EscapePrintable is
/// false, those UTF8 sequences encoding printable unicode scalars will not be
/// escaped, but emitted verbatim.
std::string escape(StringRef Input, bool EscapePrintable = true);

/// Parse \p S as a bool according to https://yaml.org/type/bool.html.
std::optional<bool> parseBool(StringRef S);

/// This class represents a YAML stream potentially containing multiple
///        documents.
class Stream {
public:
  /// This keeps a reference to the string referenced by \p Input.
  Stream(StringRef Input, SourceMgr &, bool ShowColors = true,
         std::error_code *EC = nullptr);

  Stream(MemoryBufferRef InputBuffer, SourceMgr &, bool ShowColors = true,
         std::error_code *EC = nullptr);
  ~Stream();

  document_iterator begin();
  document_iterator end();
  void skip();
  bool failed();

  bool validate() {
    skip();
    return !failed();
  }

  void printError(Node *N, const Twine &Msg,
                  SourceMgr::DiagKind Kind = SourceMgr::DK_Error);
  void printError(const SMRange &Range, const Twine &Msg,
                  SourceMgr::DiagKind Kind = SourceMgr::DK_Error);

private:
  friend class Document;

  std::unique_ptr<Scanner> scanner;
  std::unique_ptr<Document> CurrentDoc;
};

/// Abstract base class for all Nodes.
class Node {
  virtual void anchor();

public:
  enum NodeKind {
    NK_Null,
    NK_Scalar,
    NK_BlockScalar,
    NK_KeyValue,
    NK_Mapping,
    NK_Sequence,
    NK_Alias
  };

  Node(unsigned int Type, std::unique_ptr<Document> &, StringRef Anchor,
       StringRef Tag);

  // It's not safe to copy YAML nodes; the document is streamed and the position
  // is part of the state.
  Node(const Node &) = delete;
  void operator=(const Node &) = delete;

  void *operator new(size_t Size, BumpPtrAllocator &Alloc,
                     size_t Alignment = 16) noexcept {
    return Alloc.Allocate(Size, Alignment);
  }

  void operator delete(void *Ptr, BumpPtrAllocator &Alloc,
                       size_t Size) noexcept {
    Alloc.Deallocate(Ptr, Size, 0);
  }

  void operator delete(void *) noexcept = delete;

  /// Get the value of the anchor attached to this node. If it does not
  ///        have one, getAnchor().size() will be 0.
  StringRef getAnchor() const { return Anchor; }

  /// Get the tag as it was written in the document. This does not
  ///   perform tag resolution.
  StringRef getRawTag() const { return Tag; }

  /// Get the verbatium tag for a given Node. This performs tag resoluton
  ///   and substitution.
  std::string getVerbatimTag() const;

  SMRange getSourceRange() const { return SourceRange; }
  void setSourceRange(SMRange SR) { SourceRange = SR; }

  // These functions forward to Document and Scanner.
  Token &peekNext();
  Token getNext();
  Node *parseBlockNode();
  BumpPtrAllocator &getAllocator();
  void setError(const Twine &Message, Token &Location) const;
  bool failed() const;

  virtual void skip() {}

  unsigned int getType() const { return TypeID; }

protected:
  std::unique_ptr<Document> &Doc;
  SMRange SourceRange;

  ~Node() = default;

private:
  unsigned int TypeID;
  StringRef Anchor;
  /// The tag as typed in the document.
  StringRef Tag;
};

/// A null value.
///
/// Example:
///   !!null null
class NullNode final : public Node {
  void anchor() override;

public:
  NullNode(std::unique_ptr<Document> &D)
      : Node(NK_Null, D, StringRef(), StringRef()) {}

  static bool classof(const Node *N) { return N->getType() == NK_Null; }
};

/// A scalar node is an opaque datum that can be presented as a
///        series of zero or more Unicode scalar values.
///
/// Example:
///   Adena
class ScalarNode final : public Node {
  void anchor() override;

public:
  ScalarNode(std::unique_ptr<Document> &D, StringRef Anchor, StringRef Tag,
             StringRef Val)
      : Node(NK_Scalar, D, Anchor, Tag), Value(Val) {
    SMLoc Start = SMLoc::getFromPointer(Val.begin());
    SMLoc End = SMLoc::getFromPointer(Val.end());
    SourceRange = SMRange(Start, End);
  }

  // Return Value without any escaping or folding or other fun YAML stuff. This
  // is the exact bytes that are contained in the file (after conversion to
  // utf8).
  StringRef getRawValue() const { return Value; }

  /// Gets the value of this node as a StringRef.
  ///
  /// \param Storage is used to store the content of the returned StringRef if
  ///        it requires any modification from how it appeared in the source.
  ///        This happens with escaped characters and multi-line literals.
  StringRef getValue(SmallVectorImpl<char> &Storage) const;

  static bool classof(const Node *N) {
    return N->getType() == NK_Scalar;
  }

private:
  StringRef Value;

  StringRef unescapeDoubleQuoted(StringRef UnquotedValue,
                                 StringRef::size_type Start,
                                 SmallVectorImpl<char> &Storage) const;
};

/// A block scalar node is an opaque datum that can be presented as a
///        series of zero or more Unicode scalar values.
///
/// Example:
///   |
///     Hello
///     World
class BlockScalarNode final : public Node {
  void anchor() override;

public:
  BlockScalarNode(std::unique_ptr<Document> &D, StringRef Anchor, StringRef Tag,
                  StringRef Value, StringRef RawVal)
      : Node(NK_BlockScalar, D, Anchor, Tag), Value(Value) {
    SMLoc Start = SMLoc::getFromPointer(RawVal.begin());
    SMLoc End = SMLoc::getFromPointer(RawVal.end());
    SourceRange = SMRange(Start, End);
  }

  /// Gets the value of this node as a StringRef.
  StringRef getValue() const { return Value; }

  static bool classof(const Node *N) {
    return N->getType() == NK_BlockScalar;
  }

private:
  StringRef Value;
};

/// A key and value pair. While not technically a Node under the YAML
///        representation graph, it is easier to treat them this way.
///
/// TODO: Consider making this not a child of Node.
///
/// Example:
///   Section: .text
class KeyValueNode final : public Node {
  void anchor() override;

public:
  KeyValueNode(std::unique_ptr<Document> &D)
      : Node(NK_KeyValue, D, StringRef(), StringRef()) {}

  /// Parse and return the key.
  ///
  /// This may be called multiple times.
  ///
  /// \returns The key, or nullptr if failed() == true.
  Node *getKey();

  /// Parse and return the value.
  ///
  /// This may be called multiple times.
  ///
  /// \returns The value, or nullptr if failed() == true.
  Node *getValue();

  void skip() override {
    if (Node *Key = getKey()) {
      Key->skip();
      if (Node *Val = getValue())
        Val->skip();
    }
  }

  static bool classof(const Node *N) {
    return N->getType() == NK_KeyValue;
  }

private:
  Node *Key = nullptr;
  Node *Value = nullptr;
};

/// This is an iterator abstraction over YAML collections shared by both
///        sequences and maps.
///
/// BaseT must have a ValueT* member named CurrentEntry and a member function
/// increment() which must set CurrentEntry to 0 to create an end iterator.
template <class BaseT, class ValueT> class basic_collection_iterator {
public:
  using iterator_category = std::input_iterator_tag;
  using value_type = ValueT;
  using difference_type = std::ptrdiff_t;
  using pointer = value_type *;
  using reference = value_type &;

  basic_collection_iterator() = default;
  basic_collection_iterator(BaseT *B) : Base(B) {}

  ValueT *operator->() const {
    assert(Base && Base->CurrentEntry && "Attempted to access end iterator!");
    return Base->CurrentEntry;
  }

  ValueT &operator*() const {
    assert(Base && Base->CurrentEntry &&
           "Attempted to dereference end iterator!");
    return *Base->CurrentEntry;
  }

  operator ValueT *() const {
    assert(Base && Base->CurrentEntry && "Attempted to access end iterator!");
    return Base->CurrentEntry;
  }

  /// Note on EqualityComparable:
  ///
  /// The iterator is not re-entrant,
  /// it is meant to be used for parsing YAML on-demand
  /// Once iteration started - it can point only to one entry at a time
  /// hence Base.CurrentEntry and Other.Base.CurrentEntry are equal
  /// iff Base and Other.Base are equal.
  bool operator==(const basic_collection_iterator &Other) const {
    if (Base && (Base == Other.Base)) {
      assert((Base->CurrentEntry == Other.Base->CurrentEntry)
             && "Equal Bases expected to point to equal Entries");
    }

    return Base == Other.Base;
  }

  bool operator!=(const basic_collection_iterator &Other) const {
    return !(Base == Other.Base);
  }

  basic_collection_iterator &operator++() {
    assert(Base && "Attempted to advance iterator past end!");
    Base->increment();
    // Create an end iterator.
    if (!Base->CurrentEntry)
      Base = nullptr;
    return *this;
  }

private:
  BaseT *Base = nullptr;
};

// The following two templates are used for both MappingNode and Sequence Node.
template <class CollectionType>
typename CollectionType::iterator begin(CollectionType &C) {
  assert(C.IsAtBeginning && "You may only iterate over a collection once!");
  C.IsAtBeginning = false;
  typename CollectionType::iterator ret(&C);
  ++ret;
  return ret;
}

template <class CollectionType> void skip(CollectionType &C) {
  // TODO: support skipping from the middle of a parsed collection ;/
  assert((C.IsAtBeginning || C.IsAtEnd) && "Cannot skip mid parse!");
  if (C.IsAtBeginning)
    for (typename CollectionType::iterator i = begin(C), e = C.end(); i != e;
         ++i)
      i->skip();
}

/// Represents a YAML map created from either a block map for a flow map.
///
/// This parses the YAML stream as increment() is called.
///
/// Example:
///   Name: _main
///   Scope: Global
class MappingNode final : public Node {
  void anchor() override;

public:
  enum MappingType {
    MT_Block,
    MT_Flow,
    MT_Inline ///< An inline mapping node is used for "[key: value]".
  };

  MappingNode(std::unique_ptr<Document> &D, StringRef Anchor, StringRef Tag,
              MappingType MT)
      : Node(NK_Mapping, D, Anchor, Tag), Type(MT) {}

  friend class basic_collection_iterator<MappingNode, KeyValueNode>;

  using iterator = basic_collection_iterator<MappingNode, KeyValueNode>;

  template <class T> friend typename T::iterator yaml::begin(T &);
  template <class T> friend void yaml::skip(T &);

  iterator begin() { return yaml::begin(*this); }

  iterator end() { return iterator(); }

  void skip() override { yaml::skip(*this); }

  static bool classof(const Node *N) {
    return N->getType() == NK_Mapping;
  }

private:
  MappingType Type;
  bool IsAtBeginning = true;
  bool IsAtEnd = false;
  KeyValueNode *CurrentEntry = nullptr;

  void increment();
};

/// Represents a YAML sequence created from either a block sequence for a
///        flow sequence.
///
/// This parses the YAML stream as increment() is called.
///
/// Example:
///   - Hello
///   - World
class SequenceNode final : public Node {
  void anchor() override;

public:
  enum SequenceType {
    ST_Block,
    ST_Flow,
    // Use for:
    //
    // key:
    // - val1
    // - val2
    //
    // As a BlockMappingEntry and BlockEnd are not created in this case.
    ST_Indentless
  };

  SequenceNode(std::unique_ptr<Document> &D, StringRef Anchor, StringRef Tag,
               SequenceType ST)
      : Node(NK_Sequence, D, Anchor, Tag), SeqType(ST) {}

  friend class basic_collection_iterator<SequenceNode, Node>;

  using iterator = basic_collection_iterator<SequenceNode, Node>;

  template <class T> friend typename T::iterator yaml::begin(T &);
  template <class T> friend void yaml::skip(T &);

  void increment();

  iterator begin() { return yaml::begin(*this); }

  iterator end() { return iterator(); }

  void skip() override { yaml::skip(*this); }

  static bool classof(const Node *N) {
    return N->getType() == NK_Sequence;
  }

private:
  SequenceType SeqType;
  bool IsAtBeginning = true;
  bool IsAtEnd = false;
  bool WasPreviousTokenFlowEntry = true; // Start with an imaginary ','.
  Node *CurrentEntry = nullptr;
};

/// Represents an alias to a Node with an anchor.
///
/// Example:
///   *AnchorName
class AliasNode final : public Node {
  void anchor() override;

public:
  AliasNode(std::unique_ptr<Document> &D, StringRef Val)
      : Node(NK_Alias, D, StringRef(), StringRef()), Name(Val) {}

  StringRef getName() const { return Name; }

  static bool classof(const Node *N) { return N->getType() == NK_Alias; }

private:
  StringRef Name;
};

/// A YAML Stream is a sequence of Documents. A document contains a root
///        node.
class Document {
public:
  Document(Stream &ParentStream);

  /// Root for parsing a node. Returns a single node.
  Node *parseBlockNode();

  /// Finish parsing the current document and return true if there are
  ///        more. Return false otherwise.
  bool skip();

  /// Parse and return the root level node.
  Node *getRoot() {
    if (Root)
      return Root;
    return Root = parseBlockNode();
  }

  const std::map<StringRef, StringRef> &getTagMap() const { return TagMap; }

private:
  friend class Node;
  friend class document_iterator;

  /// Stream to read tokens from.
  Stream &stream;

  /// Used to allocate nodes to. All are destroyed without calling their
  ///        destructor when the document is destroyed.
  BumpPtrAllocator NodeAllocator;

  /// The root node. Used to support skipping a partially parsed
  ///        document.
  Node *Root;

  /// Maps tag prefixes to their expansion.
  std::map<StringRef, StringRef> TagMap;

  Token &peekNext();
  Token getNext();
  void setError(const Twine &Message, Token &Location) const;
  bool failed() const;

  /// Parse %BLAH directives and return true if any were encountered.
  bool parseDirectives();

  /// Parse %YAML
  void parseYAMLDirective();

  /// Parse %TAG
  void parseTAGDirective();

  /// Consume the next token and error if it is not \a TK.
  bool expectToken(int TK);
};

/// Iterator abstraction for Documents over a Stream.
class document_iterator {
public:
  document_iterator() = default;
  document_iterator(std::unique_ptr<Document> &D) : Doc(&D) {}

  bool operator==(const document_iterator &Other) const {
    if (isAtEnd() || Other.isAtEnd())
      return isAtEnd() && Other.isAtEnd();

    return Doc == Other.Doc;
  }
  bool operator!=(const document_iterator &Other) const {
    return !(*this == Other);
  }

  document_iterator operator++() {
    assert(Doc && "incrementing iterator past the end.");
    if (!(*Doc)->skip()) {
      Doc->reset(nullptr);
    } else {
      Stream &S = (*Doc)->stream;
      Doc->reset(new Document(S));
    }
    return *this;
  }

  Document &operator*() { return **Doc; }

  std::unique_ptr<Document> &operator->() { return *Doc; }

private:
  bool isAtEnd() const { return !Doc || !*Doc; }

  std::unique_ptr<Document> *Doc = nullptr;
};

} // end namespace yaml

} // end namespace llvm

#endif // LLVM_SUPPORT_YAMLPARSER_H
PKhwFZ�‹��Support/BinaryStream.hnu�[���//===- BinaryStream.h - Base interface for a stream of data -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_BINARYSTREAM_H
#define LLVM_SUPPORT_BINARYSTREAM_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/BitmaskEnum.h"
#include "llvm/Support/BinaryStreamError.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Error.h"
#include <cstdint>

namespace llvm {

enum BinaryStreamFlags {
  BSF_None = 0,
  BSF_Write = 1,  // Stream supports writing.
  BSF_Append = 2, // Writing can occur at offset == length.
  LLVM_MARK_AS_BITMASK_ENUM(/* LargestValue = */ BSF_Append)
};

/// An interface for accessing data in a stream-like format, but which
/// discourages copying.  Instead of specifying a buffer in which to copy
/// data on a read, the API returns an ArrayRef to data owned by the stream's
/// implementation.  Since implementations may not necessarily store data in a
/// single contiguous buffer (or even in memory at all), in such cases a it may
/// be necessary for an implementation to cache such a buffer so that it can
/// return it.
class BinaryStream {
public:
  virtual ~BinaryStream() = default;

  virtual llvm::support::endianness getEndian() const = 0;

  /// Given an offset into the stream and a number of bytes, attempt to
  /// read the bytes and set the output ArrayRef to point to data owned by the
  /// stream.
  virtual Error readBytes(uint64_t Offset, uint64_t Size,
                          ArrayRef<uint8_t> &Buffer) = 0;

  /// Given an offset into the stream, read as much as possible without
  /// copying any data.
  virtual Error readLongestContiguousChunk(uint64_t Offset,
                                           ArrayRef<uint8_t> &Buffer) = 0;

  /// Return the number of bytes of data in this stream.
  virtual uint64_t getLength() = 0;

  /// Return the properties of this stream.
  virtual BinaryStreamFlags getFlags() const { return BSF_None; }

protected:
  Error checkOffsetForRead(uint64_t Offset, uint64_t DataSize) {
    if (Offset > getLength())
      return make_error<BinaryStreamError>(stream_error_code::invalid_offset);
    if (getLength() < DataSize + Offset)
      return make_error<BinaryStreamError>(stream_error_code::stream_too_short);
    return Error::success();
  }
};

/// A BinaryStream which can be read from as well as written to.  Note
/// that writing to a BinaryStream always necessitates copying from the input
/// buffer to the stream's backing store.  Streams are assumed to be buffered
/// so that to be portable it is necessary to call commit() on the stream when
/// all data has been written.
class WritableBinaryStream : public BinaryStream {
public:
  ~WritableBinaryStream() override = default;

  /// Attempt to write the given bytes into the stream at the desired
  /// offset. This will always necessitate a copy.  Cannot shrink or grow the
  /// stream, only writes into existing allocated space.
  virtual Error writeBytes(uint64_t Offset, ArrayRef<uint8_t> Data) = 0;

  /// For buffered streams, commits changes to the backing store.
  virtual Error commit() = 0;

  /// Return the properties of this stream.
  BinaryStreamFlags getFlags() const override { return BSF_Write; }

protected:
  Error checkOffsetForWrite(uint64_t Offset, uint64_t DataSize) {
    if (!(getFlags() & BSF_Append))
      return checkOffsetForRead(Offset, DataSize);

    if (Offset > getLength())
      return make_error<BinaryStreamError>(stream_error_code::invalid_offset);
    return Error::success();
  }
};

} // end namespace llvm

#endif // LLVM_SUPPORT_BINARYSTREAM_H
PKhwFZ�.�� ( (Support/Threading.hnu�[���//===-- llvm/Support/Threading.h - Control multithreading mode --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares helper functions for running LLVM in a multi-threaded
// environment.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_THREADING_H
#define LLVM_SUPPORT_THREADING_H

#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Config/llvm-config.h" // for LLVM_ON_UNIX
#include "llvm/Support/Compiler.h"
#include <ciso646> // So we can check the C++ standard lib macros.
#include <optional>

#if defined(_MSC_VER)
// MSVC's call_once implementation worked since VS 2015, which is the minimum
// supported version as of this writing.
#define LLVM_THREADING_USE_STD_CALL_ONCE 1
#elif defined(LLVM_ON_UNIX) &&                                                 \
    (defined(_LIBCPP_VERSION) ||                                               \
     !(defined(__NetBSD__) || defined(__OpenBSD__) || defined(__powerpc__)))
// std::call_once from libc++ is used on all Unix platforms. Other
// implementations like libstdc++ are known to have problems on NetBSD,
// OpenBSD and PowerPC.
#define LLVM_THREADING_USE_STD_CALL_ONCE 1
#elif defined(LLVM_ON_UNIX) &&                                                 \
    (defined(__powerpc__) && defined(__LITTLE_ENDIAN__))
#define LLVM_THREADING_USE_STD_CALL_ONCE 1
#else
#define LLVM_THREADING_USE_STD_CALL_ONCE 0
#endif

#if LLVM_THREADING_USE_STD_CALL_ONCE
#include <mutex>
#else
#include "llvm/Support/Atomic.h"
#endif

namespace llvm {
class Twine;

/// Returns true if LLVM is compiled with support for multi-threading, and
/// false otherwise.
constexpr bool llvm_is_multithreaded() { return LLVM_ENABLE_THREADS; }

#if LLVM_THREADING_USE_STD_CALL_ONCE

  typedef std::once_flag once_flag;

#else

  enum InitStatus { Uninitialized = 0, Wait = 1, Done = 2 };

  /// The llvm::once_flag structure
  ///
  /// This type is modeled after std::once_flag to use with llvm::call_once.
  /// This structure must be used as an opaque object. It is a struct to force
  /// autoinitialization and behave like std::once_flag.
  struct once_flag {
    volatile sys::cas_flag status = Uninitialized;
  };

#endif

  /// Execute the function specified as a parameter once.
  ///
  /// Typical usage:
  /// \code
  ///   void foo() {...};
  ///   ...
  ///   static once_flag flag;
  ///   call_once(flag, foo);
  /// \endcode
  ///
  /// \param flag Flag used for tracking whether or not this has run.
  /// \param F Function to call once.
  template <typename Function, typename... Args>
  void call_once(once_flag &flag, Function &&F, Args &&... ArgList) {
#if LLVM_THREADING_USE_STD_CALL_ONCE
    std::call_once(flag, std::forward<Function>(F),
                   std::forward<Args>(ArgList)...);
#else
    // For other platforms we use a generic (if brittle) version based on our
    // atomics.
    sys::cas_flag old_val = sys::CompareAndSwap(&flag.status, Wait, Uninitialized);
    if (old_val == Uninitialized) {
      std::forward<Function>(F)(std::forward<Args>(ArgList)...);
      sys::MemoryFence();
      TsanIgnoreWritesBegin();
      TsanHappensBefore(&flag.status);
      flag.status = Done;
      TsanIgnoreWritesEnd();
    } else {
      // Wait until any thread doing the call has finished.
      sys::cas_flag tmp = flag.status;
      sys::MemoryFence();
      while (tmp != Done) {
        tmp = flag.status;
        sys::MemoryFence();
      }
    }
    TsanHappensAfter(&flag.status);
#endif
  }

  /// This tells how a thread pool will be used
  class ThreadPoolStrategy {
  public:
    // The default value (0) means all available threads should be used,
    // taking the affinity mask into account. If set, this value only represents
    // a suggested high bound, the runtime might choose a lower value (not
    // higher).
    unsigned ThreadsRequested = 0;

    // If SMT is active, use hyper threads. If false, there will be only one
    // std::thread per core.
    bool UseHyperThreads = true;

    // If set, will constrain 'ThreadsRequested' to the number of hardware
    // threads, or hardware cores.
    bool Limit = false;

    /// Retrieves the max available threads for the current strategy. This
    /// accounts for affinity masks and takes advantage of all CPU sockets.
    unsigned compute_thread_count() const;

    /// Assign the current thread to an ideal hardware CPU or NUMA node. In a
    /// multi-socket system, this ensures threads are assigned to all CPU
    /// sockets. \p ThreadPoolNum represents a number bounded by [0,
    /// compute_thread_count()).
    void apply_thread_strategy(unsigned ThreadPoolNum) const;

    /// Finds the CPU socket where a thread should go. Returns 'std::nullopt' if
    /// the thread shall remain on the actual CPU socket.
    std::optional<unsigned> compute_cpu_socket(unsigned ThreadPoolNum) const;
  };

  /// Build a strategy from a number of threads as a string provided in \p Num.
  /// When Num is above the max number of threads specified by the \p Default
  /// strategy, we attempt to equally allocate the threads on all CPU sockets.
  /// "0" or an empty string will return the \p Default strategy.
  /// "all" for using all hardware threads.
  std::optional<ThreadPoolStrategy>
  get_threadpool_strategy(StringRef Num, ThreadPoolStrategy Default = {});

  /// Returns a thread strategy for tasks requiring significant memory or other
  /// resources. To be used for workloads where hardware_concurrency() proves to
  /// be less efficient. Avoid this strategy if doing lots of I/O. Currently
  /// based on physical cores, if available for the host system, otherwise falls
  /// back to hardware_concurrency(). Returns 1 when LLVM is configured with
  /// LLVM_ENABLE_THREADS = OFF.
  inline ThreadPoolStrategy
  heavyweight_hardware_concurrency(unsigned ThreadCount = 0) {
    ThreadPoolStrategy S;
    S.UseHyperThreads = false;
    S.ThreadsRequested = ThreadCount;
    return S;
  }

  /// Like heavyweight_hardware_concurrency() above, but builds a strategy
  /// based on the rules described for get_threadpool_strategy().
  /// If \p Num is invalid, returns a default strategy where one thread per
  /// hardware core is used.
  inline ThreadPoolStrategy heavyweight_hardware_concurrency(StringRef Num) {
    std::optional<ThreadPoolStrategy> S =
        get_threadpool_strategy(Num, heavyweight_hardware_concurrency());
    if (S)
      return *S;
    return heavyweight_hardware_concurrency();
  }

  /// Returns a default thread strategy where all available hardware resources
  /// are to be used, except for those initially excluded by an affinity mask.
  /// This function takes affinity into consideration. Returns 1 when LLVM is
  /// configured with LLVM_ENABLE_THREADS=OFF.
  inline ThreadPoolStrategy hardware_concurrency(unsigned ThreadCount = 0) {
    ThreadPoolStrategy S;
    S.ThreadsRequested = ThreadCount;
    return S;
  }

  /// Returns an optimal thread strategy to execute specified amount of tasks.
  /// This strategy should prevent us from creating too many threads if we
  /// occasionaly have an unexpectedly small amount of tasks.
  inline ThreadPoolStrategy optimal_concurrency(unsigned TaskCount = 0) {
    ThreadPoolStrategy S;
    S.Limit = true;
    S.ThreadsRequested = TaskCount;
    return S;
  }

  /// Return the current thread id, as used in various OS system calls.
  /// Note that not all platforms guarantee that the value returned will be
  /// unique across the entire system, so portable code should not assume
  /// this.
  uint64_t get_threadid();

  /// Get the maximum length of a thread name on this platform.
  /// A value of 0 means there is no limit.
  uint32_t get_max_thread_name_length();

  /// Set the name of the current thread.  Setting a thread's name can
  /// be helpful for enabling useful diagnostics under a debugger or when
  /// logging.  The level of support for setting a thread's name varies
  /// wildly across operating systems, and we only make a best effort to
  /// perform the operation on supported platforms.  No indication of success
  /// or failure is returned.
  void set_thread_name(const Twine &Name);

  /// Get the name of the current thread.  The level of support for
  /// getting a thread's name varies wildly across operating systems, and it
  /// is not even guaranteed that if you can successfully set a thread's name
  /// that you can later get it back.  This function is intended for diagnostic
  /// purposes, and as with setting a thread's name no indication of whether
  /// the operation succeeded or failed is returned.
  void get_thread_name(SmallVectorImpl<char> &Name);

  /// Returns a mask that represents on which hardware thread, core, CPU, NUMA
  /// group, the calling thread can be executed. On Windows, threads cannot
  /// cross CPU sockets boundaries.
  llvm::BitVector get_thread_affinity_mask();

  /// Returns how many physical CPUs or NUMA groups the system has.
  unsigned get_cpus();

  /// Returns how many physical cores (as opposed to logical cores returned from
  /// thread::hardware_concurrency(), which includes hyperthreads).
  /// Returns -1 if unknown for the current host system.
  int get_physical_cores();

  enum class ThreadPriority {
    /// Lower the current thread's priority as much as possible. Can be used
    /// for long-running tasks that are not time critical; more energy-
    /// efficient than Low.
    Background = 0,

    /// Lower the current thread's priority such that it does not affect
    /// foreground tasks significantly. This is a good default for long-
    /// running, latency-insensitive tasks to make sure cpu is not hogged
    /// by this task.
    Low = 1,

    /// Restore the current thread's priority to default scheduling priority.
    Default = 2,
  };
  enum class SetThreadPriorityResult { FAILURE, SUCCESS };
  SetThreadPriorityResult set_thread_priority(ThreadPriority Priority);
}

#endif
PKhwFZ{O(�nnSupport/CBindingWrapping.hnu�[���//===- llvm/Support/CBindingWrapping.h - C Interface Wrapping ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the wrapping macros for the C interface.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_CBINDINGWRAPPING_H
#define LLVM_SUPPORT_CBINDINGWRAPPING_H

#include "llvm-c/Types.h"
#include "llvm/Support/Casting.h"

#define DEFINE_SIMPLE_CONVERSION_FUNCTIONS(ty, ref)     \
  inline ty *unwrap(ref P) {                            \
    return reinterpret_cast<ty*>(P);                    \
  }                                                     \
                                                        \
  inline ref wrap(const ty *P) {                        \
    return reinterpret_cast<ref>(const_cast<ty*>(P));   \
  }

#define DEFINE_ISA_CONVERSION_FUNCTIONS(ty, ref)        \
  DEFINE_SIMPLE_CONVERSION_FUNCTIONS(ty, ref)           \
                                                        \
  template<typename T>                                  \
  inline T *unwrap(ref P) {                             \
    return cast<T>(unwrap(P));                          \
  }

#define DEFINE_STDCXX_CONVERSION_FUNCTIONS(ty, ref)     \
  DEFINE_SIMPLE_CONVERSION_FUNCTIONS(ty, ref)           \
                                                        \
  template<typename T>                                  \
  inline T *unwrap(ref P) {                             \
    T *Q = (T*)unwrap(P);                               \
    assert(Q && "Invalid cast!");                       \
    return Q;                                           \
  }

#endif
PKhwFZiX���Support/Watchdog.hnu�[���//===--- Watchdog.h - Watchdog timer ----------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
//  This file declares the llvm::sys::Watchdog class.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_WATCHDOG_H
#define LLVM_SUPPORT_WATCHDOG_H

#include "llvm/Support/Compiler.h"

namespace llvm {
  namespace sys {

    /// This class provides an abstraction for a timeout around an operation
    /// that must complete in a given amount of time. Failure to complete before
    /// the timeout is an unrecoverable situation and no mechanisms to attempt
    /// to handle it are provided.
    class Watchdog {
    public:
      Watchdog(unsigned int seconds);
      ~Watchdog();
    private:
      // Noncopyable.
      Watchdog(const Watchdog &other) = delete;
      Watchdog &operator=(const Watchdog &other) = delete;
    };
  }
}

#endif
PKhwFZ�K�qqSupport/GlobPattern.hnu�[���//===-- GlobPattern.h - glob pattern matcher implementation -*- C++ -*-----===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements a glob pattern matcher. The glob pattern is the
// rule used by the shell.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_GLOBPATTERN_H
#define LLVM_SUPPORT_GLOBPATTERN_H

#include "llvm/ADT/BitVector.h"
#include "llvm/Support/Error.h"
#include <optional>
#include <vector>

// This class represents a glob pattern. Supported metacharacters
// are "*", "?", "\", "[<chars>]", "[^<chars>]", and "[!<chars>]".
namespace llvm {

template <typename T> class ArrayRef;
class StringRef;

class GlobPattern {
public:
  static Expected<GlobPattern> create(StringRef Pat);
  bool match(StringRef S) const;

  // Returns true for glob pattern "*". Can be used to avoid expensive
  // preparation/acquisition of the input for match().
  bool isTrivialMatchAll() const {
    if (Prefix && Prefix->empty()) {
      assert(!Suffix);
      return true;
    }
    return false;
  }

private:
  bool matchOne(ArrayRef<BitVector> Pat, StringRef S) const;

  // Parsed glob pattern.
  std::vector<BitVector> Tokens;

  // The following members are for optimization.
  std::optional<StringRef> Exact;
  std::optional<StringRef> Prefix;
  std::optional<StringRef> Suffix;
};
}

#endif // LLVM_SUPPORT_GLOBPATTERN_H
PKhwFZ�k%�Support/MSP430AttributeParser.hnu�[���//===-- MSP430AttributeParser.h - MSP430 Attribute Parser -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file contains support routines for parsing MSP430 ELF build attributes.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_MSP430ATTRIBUTEPARSER_H
#define LLVM_SUPPORT_MSP430ATTRIBUTEPARSER_H

#include "llvm/Support/ELFAttributeParser.h"
#include "llvm/Support/MSP430Attributes.h"

namespace llvm {
class MSP430AttributeParser : public ELFAttributeParser {
  struct DisplayHandler {
    MSP430Attrs::AttrType Attribute;
    Error (MSP430AttributeParser::*Routine)(MSP430Attrs::AttrType);
  };
  static const std::array<DisplayHandler, 4> DisplayRoutines;

  Error parseISA(MSP430Attrs::AttrType Tag);
  Error parseCodeModel(MSP430Attrs::AttrType Tag);
  Error parseDataModel(MSP430Attrs::AttrType Tag);
  Error parseEnumSize(MSP430Attrs::AttrType Tag);

  Error handler(uint64_t Tag, bool &Handled) override;

public:
  MSP430AttributeParser(ScopedPrinter *SW)
      : ELFAttributeParser(SW, MSP430Attrs::getMSP430AttributeTags(),
                           "mspabi") {}
  MSP430AttributeParser()
      : ELFAttributeParser(MSP430Attrs::getMSP430AttributeTags(), "mspabi") {}
};
} // namespace llvm

#endif
PKhwFZK����%�%Support/Process.hnu�[���//===- llvm/Support/Process.h -----------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// Provides a library for accessing information about this process and other
/// processes on the operating system. Also provides means of spawning
/// subprocess for commands. The design of this library is modeled after the
/// proposed design of the Boost.Process library, and is design specifically to
/// follow the style of standard libraries and potentially become a proposal
/// for a standard library.
///
/// This file declares the llvm::sys::Process class which contains a collection
/// of legacy static interfaces for extracting various information about the
/// current process. The goal is to migrate users of this API over to the new
/// interfaces.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_PROCESS_H
#define LLVM_SUPPORT_PROCESS_H

#include "llvm/Support/Chrono.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/Program.h"
#include <optional>
#include <system_error>

namespace llvm {
template <typename T> class ArrayRef;
class StringRef;

namespace sys {


/// A collection of legacy interfaces for querying information about the
/// current executing process.
class Process {
public:
  using Pid = int32_t;

  /// Get the process's identifier.
  static Pid getProcessId();

  /// Get the process's page size.
  /// This may fail if the underlying syscall returns an error. In most cases,
  /// page size information is used for optimization, and this error can be
  /// safely discarded by calling consumeError, and an estimated page size
  /// substituted instead.
  static Expected<unsigned> getPageSize();

  /// Get the process's estimated page size.
  /// This function always succeeds, but if the underlying syscall to determine
  /// the page size fails then this will silently return an estimated page size.
  /// The estimated page size is guaranteed to be a power of 2.
  static unsigned getPageSizeEstimate() {
    if (auto PageSize = getPageSize())
      return *PageSize;
    else {
      consumeError(PageSize.takeError());
      return 4096;
    }
  }

  /// Return process memory usage.
  /// This static function will return the total amount of memory allocated
  /// by the process. This only counts the memory allocated via the malloc,
  /// calloc and realloc functions and includes any "free" holes in the
  /// allocated space.
  static size_t GetMallocUsage();

  /// This static function will set \p user_time to the amount of CPU time
  /// spent in user (non-kernel) mode and \p sys_time to the amount of CPU
  /// time spent in system (kernel) mode.  If the operating system does not
  /// support collection of these metrics, a zero duration will be for both
  /// values.
  /// \param elapsed Returns the system_clock::now() giving current time
  /// \param user_time Returns the current amount of user time for the process
  /// \param sys_time Returns the current amount of system time for the process
  static void GetTimeUsage(TimePoint<> &elapsed,
                           std::chrono::nanoseconds &user_time,
                           std::chrono::nanoseconds &sys_time);

  /// This function makes the necessary calls to the operating system to
  /// prevent core files or any other kind of large memory dumps that can
  /// occur when a program fails.
  /// Prevent core file generation.
  static void PreventCoreFiles();

  /// true if PreventCoreFiles has been called, false otherwise.
  static bool AreCoreFilesPrevented();

  // This function returns the environment variable \arg name's value as a UTF-8
  // string. \arg Name is assumed to be in UTF-8 encoding too.
  static std::optional<std::string> GetEnv(StringRef name);

  /// This function searches for an existing file in the list of directories
  /// in a PATH like environment variable, and returns the first file found,
  /// according to the order of the entries in the PATH like environment
  /// variable.  If an ignore list is specified, then any folder which is in
  /// the PATH like environment variable but is also in IgnoreList is not
  /// considered.
  static std::optional<std::string>
  FindInEnvPath(StringRef EnvName, StringRef FileName,
                ArrayRef<std::string> IgnoreList,
                char Separator = EnvPathSeparator);

  static std::optional<std::string>
  FindInEnvPath(StringRef EnvName, StringRef FileName,
                char Separator = EnvPathSeparator);

  // This functions ensures that the standard file descriptors (input, output,
  // and error) are properly mapped to a file descriptor before we use any of
  // them.  This should only be called by standalone programs, library
  // components should not call this.
  static std::error_code FixupStandardFileDescriptors();

  // This function safely closes a file descriptor.  It is not safe to retry
  // close(2) when it returns with errno equivalent to EINTR; this is because
  // *nixen cannot agree if the file descriptor is, in fact, closed when this
  // occurs.
  //
  // N.B. Some operating systems, due to thread cancellation, cannot properly
  // guarantee that it will or will not be closed one way or the other!
  static std::error_code SafelyCloseFileDescriptor(int FD);

  /// This function determines if the standard input is connected directly
  /// to a user's input (keyboard probably), rather than coming from a file
  /// or pipe.
  static bool StandardInIsUserInput();

  /// This function determines if the standard output is connected to a
  /// "tty" or "console" window. That is, the output would be displayed to
  /// the user rather than being put on a pipe or stored in a file.
  static bool StandardOutIsDisplayed();

  /// This function determines if the standard error is connected to a
  /// "tty" or "console" window. That is, the output would be displayed to
  /// the user rather than being put on a pipe or stored in a file.
  static bool StandardErrIsDisplayed();

  /// This function determines if the given file descriptor is connected to
  /// a "tty" or "console" window. That is, the output would be displayed to
  /// the user rather than being put on a pipe or stored in a file.
  static bool FileDescriptorIsDisplayed(int fd);

  /// This function determines if the given file descriptor is displayd and
  /// supports colors.
  static bool FileDescriptorHasColors(int fd);

  /// This function determines the number of columns in the window
  /// if standard output is connected to a "tty" or "console"
  /// window. If standard output is not connected to a tty or
  /// console, or if the number of columns cannot be determined,
  /// this routine returns zero.
  static unsigned StandardOutColumns();

  /// This function determines the number of columns in the window
  /// if standard error is connected to a "tty" or "console"
  /// window. If standard error is not connected to a tty or
  /// console, or if the number of columns cannot be determined,
  /// this routine returns zero.
  static unsigned StandardErrColumns();

  /// This function determines whether the terminal connected to standard
  /// output supports colors. If standard output is not connected to a
  /// terminal, this function returns false.
  static bool StandardOutHasColors();

  /// This function determines whether the terminal connected to standard
  /// error supports colors. If standard error is not connected to a
  /// terminal, this function returns false.
  static bool StandardErrHasColors();

  /// Enables or disables whether ANSI escape sequences are used to output
  /// colors. This only has an effect on Windows.
  /// Note: Setting this option is not thread-safe and should only be done
  /// during initialization.
  static void UseANSIEscapeCodes(bool enable);

  /// Whether changing colors requires the output to be flushed.
  /// This is needed on systems that don't support escape sequences for
  /// changing colors.
  static bool ColorNeedsFlush();

  /// This function returns the colorcode escape sequences.
  /// If ColorNeedsFlush() is true then this function will change the colors
  /// and return an empty escape sequence. In that case it is the
  /// responsibility of the client to flush the output stream prior to
  /// calling this function.
  static const char *OutputColor(char c, bool bold, bool bg);

  /// Same as OutputColor, but only enables the bold attribute.
  static const char *OutputBold(bool bg);

  /// This function returns the escape sequence to reverse forground and
  /// background colors.
  static const char *OutputReverse();

  /// Resets the terminals colors, or returns an escape sequence to do so.
  static const char *ResetColor();

  /// Get the result of a process wide random number generator. The
  /// generator will be automatically seeded in non-deterministic fashion.
  static unsigned GetRandomNumber();

  /// Equivalent to ::exit(), except when running inside a CrashRecoveryContext.
  /// In that case, the control flow will resume after RunSafely(), like for a
  /// crash, rather than exiting the current process.
  /// Use \arg NoCleanup for calling _exit() instead of exit().
  [[noreturn]] static void Exit(int RetCode, bool NoCleanup = false);

private:
  [[noreturn]] static void ExitNoCleanup(int RetCode);
};

}
}

#endif
PKhwFZ1!��J	J	Support/SHA1.hnu�[���//==- SHA1.h - SHA1 implementation for LLVM                     --*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// This code is taken from public domain
// (http://oauth.googlecode.com/svn/code/c/liboauth/src/sha1.c)
// and modified by wrapping it in a C++ interface for LLVM,
// and removing unnecessary code.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_SHA1_H
#define LLVM_SUPPORT_SHA1_H

#include <array>
#include <cstdint>

namespace llvm {
template <typename T> class ArrayRef;
class StringRef;

/// A class that wrap the SHA1 algorithm.
class SHA1 {
public:
  SHA1() { init(); }

  /// Reinitialize the internal state
  void init();

  /// Digest more data.
  void update(ArrayRef<uint8_t> Data);

  /// Digest more data.
  void update(StringRef Str);

  /// Return the current raw 160-bits SHA1 for the digested data
  /// since the last call to init(). This call will add data to the internal
  /// state and as such is not suited for getting an intermediate result
  /// (see result()).
  std::array<uint8_t, 20> final();

  /// Return the current raw 160-bits SHA1 for the digested data
  /// since the last call to init(). This is suitable for getting the SHA1 at
  /// any time without invalidating the internal state so that more calls can be
  /// made into update.
  std::array<uint8_t, 20> result();

  /// Returns a raw 160-bit SHA1 hash for the given data.
  static std::array<uint8_t, 20> hash(ArrayRef<uint8_t> Data);

private:
  /// Define some constants.
  /// "static constexpr" would be cleaner but MSVC does not support it yet.
  enum { BLOCK_LENGTH = 64 };
  enum { HASH_LENGTH = 20 };

  // Internal State
  struct {
    union {
      uint8_t C[BLOCK_LENGTH];
      uint32_t L[BLOCK_LENGTH / 4];
    } Buffer;
    uint32_t State[HASH_LENGTH / 4];
    uint32_t ByteCount;
    uint8_t BufferOffset;
  } InternalState;

  // Helper
  void writebyte(uint8_t data);
  void hashBlock();
  void addUncounted(uint8_t data);
  void pad();

  void final(std::array<uint32_t, HASH_LENGTH / 4> &HashResult);
};

} // end llvm namespace

#endif
PKhwFZ��I��
�
Support/FormatAdapters.hnu�[���//===- FormatAdapters.h - Formatters for common LLVM types -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_FORMATADAPTERS_H
#define LLVM_SUPPORT_FORMATADAPTERS_H

#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/FormatCommon.h"
#include "llvm/Support/FormatVariadicDetails.h"
#include "llvm/Support/raw_ostream.h"

namespace llvm {
template <typename T> class FormatAdapter : public detail::format_adapter {
protected:
  explicit FormatAdapter(T &&Item) : Item(std::forward<T>(Item)) {}

  T Item;
};

namespace detail {
template <typename T> class AlignAdapter final : public FormatAdapter<T> {
  AlignStyle Where;
  size_t Amount;
  char Fill;

public:
  AlignAdapter(T &&Item, AlignStyle Where, size_t Amount, char Fill)
      : FormatAdapter<T>(std::forward<T>(Item)), Where(Where), Amount(Amount),
        Fill(Fill) {}

  void format(llvm::raw_ostream &Stream, StringRef Style) override {
    auto Adapter = detail::build_format_adapter(std::forward<T>(this->Item));
    FmtAlign(Adapter, Where, Amount, Fill).format(Stream, Style);
  }
};

template <typename T> class PadAdapter final : public FormatAdapter<T> {
  size_t Left;
  size_t Right;

public:
  PadAdapter(T &&Item, size_t Left, size_t Right)
      : FormatAdapter<T>(std::forward<T>(Item)), Left(Left), Right(Right) {}

  void format(llvm::raw_ostream &Stream, StringRef Style) override {
    auto Adapter = detail::build_format_adapter(std::forward<T>(this->Item));
    Stream.indent(Left);
    Adapter.format(Stream, Style);
    Stream.indent(Right);
  }
};

template <typename T> class RepeatAdapter final : public FormatAdapter<T> {
  size_t Count;

public:
  RepeatAdapter(T &&Item, size_t Count)
      : FormatAdapter<T>(std::forward<T>(Item)), Count(Count) {}

  void format(llvm::raw_ostream &Stream, StringRef Style) override {
    auto Adapter = detail::build_format_adapter(std::forward<T>(this->Item));
    for (size_t I = 0; I < Count; ++I) {
      Adapter.format(Stream, Style);
    }
  }
};

class ErrorAdapter : public FormatAdapter<Error> {
public:
  ErrorAdapter(Error &&Item) : FormatAdapter(std::move(Item)) {}
  ErrorAdapter(ErrorAdapter &&) = default;
  ~ErrorAdapter() { consumeError(std::move(Item)); }
  void format(llvm::raw_ostream &Stream, StringRef Style) override {
    Stream << Item;
  }
};
}

template <typename T>
detail::AlignAdapter<T> fmt_align(T &&Item, AlignStyle Where, size_t Amount,
                                  char Fill = ' ') {
  return detail::AlignAdapter<T>(std::forward<T>(Item), Where, Amount, Fill);
}

template <typename T>
detail::PadAdapter<T> fmt_pad(T &&Item, size_t Left, size_t Right) {
  return detail::PadAdapter<T>(std::forward<T>(Item), Left, Right);
}

template <typename T>
detail::RepeatAdapter<T> fmt_repeat(T &&Item, size_t Count) {
  return detail::RepeatAdapter<T>(std::forward<T>(Item), Count);
}

// llvm::Error values must be consumed before being destroyed.
// Wrapping an error in fmt_consume explicitly indicates that the formatv_object
// should take ownership and consume it.
inline detail::ErrorAdapter fmt_consume(Error &&Item) {
  return detail::ErrorAdapter(std::move(Item));
}
}

#endif
PKhwFZ����lflfSupport/ScopedPrinter.hnu�[���//===-- ScopedPrinter.h ----------------------------------------*- C++ -*--===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_SCOPEDPRINTER_H
#define LLVM_SUPPORT_SCOPEDPRINTER_H

#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/JSON.h"
#include "llvm/Support/raw_ostream.h"

namespace llvm {

template <typename T> struct EnumEntry {
  StringRef Name;
  // While Name suffices in most of the cases, in certain cases
  // GNU style and LLVM style of ELFDumper do not
  // display same string for same enum. The AltName if initialized appropriately
  // will hold the string that GNU style emits.
  // Example:
  // "EM_X86_64" string on LLVM style for Elf_Ehdr->e_machine corresponds to
  // "Advanced Micro Devices X86-64" on GNU style
  StringRef AltName;
  T Value;
  constexpr EnumEntry(StringRef N, StringRef A, T V)
      : Name(N), AltName(A), Value(V) {}
  constexpr EnumEntry(StringRef N, T V) : Name(N), AltName(N), Value(V) {}
};

struct HexNumber {
  // To avoid sign-extension we have to explicitly cast to the appropriate
  // unsigned type. The overloads are here so that every type that is implicitly
  // convertible to an integer (including enums and endian helpers) can be used
  // without requiring type traits or call-site changes.
  HexNumber(char Value) : Value(static_cast<unsigned char>(Value)) {}
  HexNumber(signed char Value) : Value(static_cast<unsigned char>(Value)) {}
  HexNumber(signed short Value) : Value(static_cast<unsigned short>(Value)) {}
  HexNumber(signed int Value) : Value(static_cast<unsigned int>(Value)) {}
  HexNumber(signed long Value) : Value(static_cast<unsigned long>(Value)) {}
  HexNumber(signed long long Value)
      : Value(static_cast<unsigned long long>(Value)) {}
  HexNumber(unsigned char Value) : Value(Value) {}
  HexNumber(unsigned short Value) : Value(Value) {}
  HexNumber(unsigned int Value) : Value(Value) {}
  HexNumber(unsigned long Value) : Value(Value) {}
  HexNumber(unsigned long long Value) : Value(Value) {}
  uint64_t Value;
};

struct FlagEntry {
  FlagEntry(StringRef Name, char Value)
      : Name(Name), Value(static_cast<unsigned char>(Value)) {}
  FlagEntry(StringRef Name, signed char Value)
      : Name(Name), Value(static_cast<unsigned char>(Value)) {}
  FlagEntry(StringRef Name, signed short Value)
      : Name(Name), Value(static_cast<unsigned short>(Value)) {}
  FlagEntry(StringRef Name, signed int Value)
      : Name(Name), Value(static_cast<unsigned int>(Value)) {}
  FlagEntry(StringRef Name, signed long Value)
      : Name(Name), Value(static_cast<unsigned long>(Value)) {}
  FlagEntry(StringRef Name, signed long long Value)
      : Name(Name), Value(static_cast<unsigned long long>(Value)) {}
  FlagEntry(StringRef Name, unsigned char Value) : Name(Name), Value(Value) {}
  FlagEntry(StringRef Name, unsigned short Value) : Name(Name), Value(Value) {}
  FlagEntry(StringRef Name, unsigned int Value) : Name(Name), Value(Value) {}
  FlagEntry(StringRef Name, unsigned long Value) : Name(Name), Value(Value) {}
  FlagEntry(StringRef Name, unsigned long long Value)
      : Name(Name), Value(Value) {}
  StringRef Name;
  uint64_t Value;
};

raw_ostream &operator<<(raw_ostream &OS, const HexNumber &Value);

template <class T> std::string to_string(const T &Value) {
  std::string number;
  raw_string_ostream stream(number);
  stream << Value;
  return stream.str();
}

template <typename T, typename TEnum>
std::string enumToString(T Value, ArrayRef<EnumEntry<TEnum>> EnumValues) {
  for (const EnumEntry<TEnum> &EnumItem : EnumValues)
    if (EnumItem.Value == Value)
      return std::string(EnumItem.AltName);
  return utohexstr(Value, true);
}

class ScopedPrinter {
public:
  enum class ScopedPrinterKind {
    Base,
    JSON,
  };

  ScopedPrinter(raw_ostream &OS,
                ScopedPrinterKind Kind = ScopedPrinterKind::Base)
      : OS(OS), Kind(Kind) {}

  ScopedPrinterKind getKind() const { return Kind; }

  static bool classof(const ScopedPrinter *SP) {
    return SP->getKind() == ScopedPrinterKind::Base;
  }

  virtual ~ScopedPrinter() = default;

  void flush() { OS.flush(); }

  void indent(int Levels = 1) { IndentLevel += Levels; }

  void unindent(int Levels = 1) {
    IndentLevel = IndentLevel > Levels ? IndentLevel - Levels : 0;
  }

  void resetIndent() { IndentLevel = 0; }

  int getIndentLevel() { return IndentLevel; }

  void setPrefix(StringRef P) { Prefix = P; }

  void printIndent() {
    OS << Prefix;
    for (int i = 0; i < IndentLevel; ++i)
      OS << "  ";
  }

  template <typename T> HexNumber hex(T Value) { return HexNumber(Value); }

  template <typename T, typename TEnum>
  void printEnum(StringRef Label, T Value,
                 ArrayRef<EnumEntry<TEnum>> EnumValues) {
    StringRef Name;
    bool Found = false;
    for (const auto &EnumItem : EnumValues) {
      if (EnumItem.Value == Value) {
        Name = EnumItem.Name;
        Found = true;
        break;
      }
    }

    if (Found)
      printHex(Label, Name, Value);
    else
      printHex(Label, Value);
  }

  template <typename T, typename TFlag>
  void printFlags(StringRef Label, T Value, ArrayRef<EnumEntry<TFlag>> Flags,
                  TFlag EnumMask1 = {}, TFlag EnumMask2 = {},
                  TFlag EnumMask3 = {}) {
    SmallVector<FlagEntry, 10> SetFlags;

    for (const auto &Flag : Flags) {
      if (Flag.Value == 0)
        continue;

      TFlag EnumMask{};
      if (Flag.Value & EnumMask1)
        EnumMask = EnumMask1;
      else if (Flag.Value & EnumMask2)
        EnumMask = EnumMask2;
      else if (Flag.Value & EnumMask3)
        EnumMask = EnumMask3;
      bool IsEnum = (Flag.Value & EnumMask) != 0;
      if ((!IsEnum && (Value & Flag.Value) == Flag.Value) ||
          (IsEnum && (Value & EnumMask) == Flag.Value)) {
        SetFlags.emplace_back(Flag.Name, Flag.Value);
      }
    }

    llvm::sort(SetFlags, &flagName);
    printFlagsImpl(Label, hex(Value), SetFlags);
  }

  template <typename T> void printFlags(StringRef Label, T Value) {
    SmallVector<HexNumber, 10> SetFlags;
    uint64_t Flag = 1;
    uint64_t Curr = Value;
    while (Curr > 0) {
      if (Curr & 1)
        SetFlags.emplace_back(Flag);
      Curr >>= 1;
      Flag <<= 1;
    }
    printFlagsImpl(Label, hex(Value), SetFlags);
  }

  virtual void printNumber(StringRef Label, char Value) {
    startLine() << Label << ": " << static_cast<int>(Value) << "\n";
  }

  virtual void printNumber(StringRef Label, signed char Value) {
    startLine() << Label << ": " << static_cast<int>(Value) << "\n";
  }

  virtual void printNumber(StringRef Label, unsigned char Value) {
    startLine() << Label << ": " << static_cast<unsigned>(Value) << "\n";
  }

  virtual void printNumber(StringRef Label, short Value) {
    startLine() << Label << ": " << Value << "\n";
  }

  virtual void printNumber(StringRef Label, unsigned short Value) {
    startLine() << Label << ": " << Value << "\n";
  }

  virtual void printNumber(StringRef Label, int Value) {
    startLine() << Label << ": " << Value << "\n";
  }

  virtual void printNumber(StringRef Label, unsigned int Value) {
    startLine() << Label << ": " << Value << "\n";
  }

  virtual void printNumber(StringRef Label, long Value) {
    startLine() << Label << ": " << Value << "\n";
  }

  virtual void printNumber(StringRef Label, unsigned long Value) {
    startLine() << Label << ": " << Value << "\n";
  }

  virtual void printNumber(StringRef Label, long long Value) {
    startLine() << Label << ": " << Value << "\n";
  }

  virtual void printNumber(StringRef Label, unsigned long long Value) {
    startLine() << Label << ": " << Value << "\n";
  }

  virtual void printNumber(StringRef Label, const APSInt &Value) {
    startLine() << Label << ": " << Value << "\n";
  }

  virtual void printNumber(StringRef Label, float Value) {
    startLine() << Label << ": " << format("%5.1f", Value) << "\n";
  }

  virtual void printNumber(StringRef Label, double Value) {
    startLine() << Label << ": " << format("%5.1f", Value) << "\n";
  }

  template <typename T>
  void printNumber(StringRef Label, StringRef Str, T Value) {
    printNumberImpl(Label, Str, to_string(Value));
  }

  virtual void printBoolean(StringRef Label, bool Value) {
    startLine() << Label << ": " << (Value ? "Yes" : "No") << '\n';
  }

  template <typename... T> void printVersion(StringRef Label, T... Version) {
    startLine() << Label << ": ";
    printVersionInternal(Version...);
    getOStream() << "\n";
  }

  template <typename T>
  void printList(StringRef Label, const ArrayRef<T> List) {
    SmallVector<std::string, 10> StringList;
    for (const auto &Item : List)
      StringList.emplace_back(to_string(Item));
    printList(Label, StringList);
  }

  virtual void printList(StringRef Label, const ArrayRef<bool> List) {
    printListImpl(Label, List);
  }

  virtual void printList(StringRef Label, const ArrayRef<std::string> List) {
    printListImpl(Label, List);
  }

  virtual void printList(StringRef Label, const ArrayRef<uint64_t> List) {
    printListImpl(Label, List);
  }

  virtual void printList(StringRef Label, const ArrayRef<uint32_t> List) {
    printListImpl(Label, List);
  }

  virtual void printList(StringRef Label, const ArrayRef<uint16_t> List) {
    printListImpl(Label, List);
  }

  virtual void printList(StringRef Label, const ArrayRef<uint8_t> List) {
    SmallVector<unsigned> NumberList;
    for (const uint8_t &Item : List)
      NumberList.emplace_back(Item);
    printListImpl(Label, NumberList);
  }

  virtual void printList(StringRef Label, const ArrayRef<int64_t> List) {
    printListImpl(Label, List);
  }

  virtual void printList(StringRef Label, const ArrayRef<int32_t> List) {
    printListImpl(Label, List);
  }

  virtual void printList(StringRef Label, const ArrayRef<int16_t> List) {
    printListImpl(Label, List);
  }

  virtual void printList(StringRef Label, const ArrayRef<int8_t> List) {
    SmallVector<int> NumberList;
    for (const int8_t &Item : List)
      NumberList.emplace_back(Item);
    printListImpl(Label, NumberList);
  }

  virtual void printList(StringRef Label, const ArrayRef<APSInt> List) {
    printListImpl(Label, List);
  }

  template <typename T, typename U>
  void printList(StringRef Label, const T &List, const U &Printer) {
    startLine() << Label << ": [";
    ListSeparator LS;
    for (const auto &Item : List) {
      OS << LS;
      Printer(OS, Item);
    }
    OS << "]\n";
  }

  template <typename T> void printHexList(StringRef Label, const T &List) {
    SmallVector<HexNumber> HexList;
    for (const auto &Item : List)
      HexList.emplace_back(Item);
    printHexListImpl(Label, HexList);
  }

  template <typename T> void printHex(StringRef Label, T Value) {
    printHexImpl(Label, hex(Value));
  }

  template <typename T> void printHex(StringRef Label, StringRef Str, T Value) {
    printHexImpl(Label, Str, hex(Value));
  }

  template <typename T>
  void printSymbolOffset(StringRef Label, StringRef Symbol, T Value) {
    printSymbolOffsetImpl(Label, Symbol, hex(Value));
  }

  virtual void printString(StringRef Value) { startLine() << Value << "\n"; }

  virtual void printString(StringRef Label, StringRef Value) {
    startLine() << Label << ": " << Value << "\n";
  }

  void printStringEscaped(StringRef Label, StringRef Value) {
    printStringEscapedImpl(Label, Value);
  }

  void printBinary(StringRef Label, StringRef Str, ArrayRef<uint8_t> Value) {
    printBinaryImpl(Label, Str, Value, false);
  }

  void printBinary(StringRef Label, StringRef Str, ArrayRef<char> Value) {
    auto V =
        ArrayRef(reinterpret_cast<const uint8_t *>(Value.data()), Value.size());
    printBinaryImpl(Label, Str, V, false);
  }

  void printBinary(StringRef Label, ArrayRef<uint8_t> Value) {
    printBinaryImpl(Label, StringRef(), Value, false);
  }

  void printBinary(StringRef Label, ArrayRef<char> Value) {
    auto V =
        ArrayRef(reinterpret_cast<const uint8_t *>(Value.data()), Value.size());
    printBinaryImpl(Label, StringRef(), V, false);
  }

  void printBinary(StringRef Label, StringRef Value) {
    auto V =
        ArrayRef(reinterpret_cast<const uint8_t *>(Value.data()), Value.size());
    printBinaryImpl(Label, StringRef(), V, false);
  }

  void printBinaryBlock(StringRef Label, ArrayRef<uint8_t> Value,
                        uint32_t StartOffset) {
    printBinaryImpl(Label, StringRef(), Value, true, StartOffset);
  }

  void printBinaryBlock(StringRef Label, ArrayRef<uint8_t> Value) {
    printBinaryImpl(Label, StringRef(), Value, true);
  }

  void printBinaryBlock(StringRef Label, StringRef Value) {
    auto V =
        ArrayRef(reinterpret_cast<const uint8_t *>(Value.data()), Value.size());
    printBinaryImpl(Label, StringRef(), V, true);
  }

  template <typename T> void printObject(StringRef Label, const T &Value) {
    printString(Label, to_string(Value));
  }

  virtual void objectBegin() { scopedBegin('{'); }

  virtual void objectBegin(StringRef Label) { scopedBegin(Label, '{'); }

  virtual void objectEnd() { scopedEnd('}'); }

  virtual void arrayBegin() { scopedBegin('['); }

  virtual void arrayBegin(StringRef Label) { scopedBegin(Label, '['); }

  virtual void arrayEnd() { scopedEnd(']'); }

  virtual raw_ostream &startLine() {
    printIndent();
    return OS;
  }

  virtual raw_ostream &getOStream() { return OS; }

private:
  template <typename T> void printVersionInternal(T Value) {
    getOStream() << Value;
  }

  template <typename S, typename T, typename... TArgs>
  void printVersionInternal(S Value, T Value2, TArgs... Args) {
    getOStream() << Value << ".";
    printVersionInternal(Value2, Args...);
  }

  static bool flagName(const FlagEntry &LHS, const FlagEntry &RHS) {
    return LHS.Name < RHS.Name;
  }

  virtual void printBinaryImpl(StringRef Label, StringRef Str,
                               ArrayRef<uint8_t> Value, bool Block,
                               uint32_t StartOffset = 0);

  virtual void printFlagsImpl(StringRef Label, HexNumber Value,
                              ArrayRef<FlagEntry> Flags) {
    startLine() << Label << " [ (" << Value << ")\n";
    for (const auto &Flag : Flags)
      startLine() << "  " << Flag.Name << " (" << hex(Flag.Value) << ")\n";
    startLine() << "]\n";
  }

  virtual void printFlagsImpl(StringRef Label, HexNumber Value,
                              ArrayRef<HexNumber> Flags) {
    startLine() << Label << " [ (" << Value << ")\n";
    for (const auto &Flag : Flags)
      startLine() << "  " << Flag << '\n';
    startLine() << "]\n";
  }

  template <typename T> void printListImpl(StringRef Label, const T List) {
    startLine() << Label << ": [";
    ListSeparator LS;
    for (const auto &Item : List)
      OS << LS << Item;
    OS << "]\n";
  }

  virtual void printHexListImpl(StringRef Label,
                                const ArrayRef<HexNumber> List) {
    startLine() << Label << ": [";
    ListSeparator LS;
    for (const auto &Item : List)
      OS << LS << hex(Item);
    OS << "]\n";
  }

  virtual void printHexImpl(StringRef Label, HexNumber Value) {
    startLine() << Label << ": " << Value << "\n";
  }

  virtual void printHexImpl(StringRef Label, StringRef Str, HexNumber Value) {
    startLine() << Label << ": " << Str << " (" << Value << ")\n";
  }

  virtual void printSymbolOffsetImpl(StringRef Label, StringRef Symbol,
                                     HexNumber Value) {
    startLine() << Label << ": " << Symbol << '+' << Value << '\n';
  }

  virtual void printNumberImpl(StringRef Label, StringRef Str,
                               StringRef Value) {
    startLine() << Label << ": " << Str << " (" << Value << ")\n";
  }

  virtual void printStringEscapedImpl(StringRef Label, StringRef Value) {
    startLine() << Label << ": ";
    OS.write_escaped(Value);
    OS << '\n';
  }

  void scopedBegin(char Symbol) {
    startLine() << Symbol << '\n';
    indent();
  }

  void scopedBegin(StringRef Label, char Symbol) {
    startLine() << Label;
    if (!Label.empty())
      OS << ' ';
    OS << Symbol << '\n';
    indent();
  }

  void scopedEnd(char Symbol) {
    unindent();
    startLine() << Symbol << '\n';
  }

  raw_ostream &OS;
  int IndentLevel = 0;
  StringRef Prefix;
  ScopedPrinterKind Kind;
};

template <>
inline void
ScopedPrinter::printHex<support::ulittle16_t>(StringRef Label,
                                              support::ulittle16_t Value) {
  startLine() << Label << ": " << hex(Value) << "\n";
}

struct DelimitedScope;

class JSONScopedPrinter : public ScopedPrinter {
private:
  enum class Scope {
    Array,
    Object,
  };

  enum class ScopeKind {
    NoAttribute,
    Attribute,
    NestedAttribute,
  };

  struct ScopeContext {
    Scope Context;
    ScopeKind Kind;
    ScopeContext(Scope Context, ScopeKind Kind = ScopeKind::NoAttribute)
        : Context(Context), Kind(Kind) {}
  };

  SmallVector<ScopeContext, 8> ScopeHistory;
  json::OStream JOS;
  std::unique_ptr<DelimitedScope> OuterScope;

public:
  JSONScopedPrinter(raw_ostream &OS, bool PrettyPrint = false,
                    std::unique_ptr<DelimitedScope> &&OuterScope =
                        std::unique_ptr<DelimitedScope>{});

  static bool classof(const ScopedPrinter *SP) {
    return SP->getKind() == ScopedPrinter::ScopedPrinterKind::JSON;
  }

  void printNumber(StringRef Label, char Value) override {
    JOS.attribute(Label, Value);
  }

  void printNumber(StringRef Label, signed char Value) override {
    JOS.attribute(Label, Value);
  }

  void printNumber(StringRef Label, unsigned char Value) override {
    JOS.attribute(Label, Value);
  }

  void printNumber(StringRef Label, short Value) override {
    JOS.attribute(Label, Value);
  }

  void printNumber(StringRef Label, unsigned short Value) override {
    JOS.attribute(Label, Value);
  }

  void printNumber(StringRef Label, int Value) override {
    JOS.attribute(Label, Value);
  }

  void printNumber(StringRef Label, unsigned int Value) override {
    JOS.attribute(Label, Value);
  }

  void printNumber(StringRef Label, long Value) override {
    JOS.attribute(Label, Value);
  }

  void printNumber(StringRef Label, unsigned long Value) override {
    JOS.attribute(Label, Value);
  }

  void printNumber(StringRef Label, long long Value) override {
    JOS.attribute(Label, Value);
  }

  void printNumber(StringRef Label, unsigned long long Value) override {
    JOS.attribute(Label, Value);
  }

  void printNumber(StringRef Label, float Value) override {
    JOS.attribute(Label, Value);
  }

  void printNumber(StringRef Label, double Value) override {
    JOS.attribute(Label, Value);
  }

  void printNumber(StringRef Label, const APSInt &Value) override {
    JOS.attributeBegin(Label);
    printAPSInt(Value);
    JOS.attributeEnd();
  }

  void printBoolean(StringRef Label, bool Value) override {
    JOS.attribute(Label, Value);
  }

  void printList(StringRef Label, const ArrayRef<bool> List) override {
    printListImpl(Label, List);
  }

  void printList(StringRef Label, const ArrayRef<std::string> List) override {
    printListImpl(Label, List);
  }

  void printList(StringRef Label, const ArrayRef<uint64_t> List) override {
    printListImpl(Label, List);
  }

  void printList(StringRef Label, const ArrayRef<uint32_t> List) override {
    printListImpl(Label, List);
  }

  void printList(StringRef Label, const ArrayRef<uint16_t> List) override {
    printListImpl(Label, List);
  }

  void printList(StringRef Label, const ArrayRef<uint8_t> List) override {
    printListImpl(Label, List);
  }

  void printList(StringRef Label, const ArrayRef<int64_t> List) override {
    printListImpl(Label, List);
  }

  void printList(StringRef Label, const ArrayRef<int32_t> List) override {
    printListImpl(Label, List);
  }

  void printList(StringRef Label, const ArrayRef<int16_t> List) override {
    printListImpl(Label, List);
  }

  void printList(StringRef Label, const ArrayRef<int8_t> List) override {
    printListImpl(Label, List);
  }

  void printList(StringRef Label, const ArrayRef<APSInt> List) override {
    JOS.attributeArray(Label, [&]() {
      for (const APSInt &Item : List) {
        printAPSInt(Item);
      }
    });
  }

  void printString(StringRef Value) override { JOS.value(Value); }

  void printString(StringRef Label, StringRef Value) override {
    JOS.attribute(Label, Value);
  }

  void objectBegin() override {
    scopedBegin({Scope::Object, ScopeKind::NoAttribute});
  }

  void objectBegin(StringRef Label) override {
    scopedBegin(Label, Scope::Object);
  }

  void objectEnd() override { scopedEnd(); }

  void arrayBegin() override {
    scopedBegin({Scope::Array, ScopeKind::NoAttribute});
  }

  void arrayBegin(StringRef Label) override {
    scopedBegin(Label, Scope::Array);
  }

  void arrayEnd() override { scopedEnd(); }

private:
  // Output HexNumbers as decimals so that they're easier to parse.
  uint64_t hexNumberToInt(HexNumber Hex) { return Hex.Value; }

  void printAPSInt(const APSInt &Value) {
    JOS.rawValueBegin() << Value;
    JOS.rawValueEnd();
  }

  void printFlagsImpl(StringRef Label, HexNumber Value,
                      ArrayRef<FlagEntry> Flags) override {
    JOS.attributeObject(Label, [&]() {
      JOS.attribute("Value", hexNumberToInt(Value));
      JOS.attributeArray("Flags", [&]() {
        for (const FlagEntry &Flag : Flags) {
          JOS.objectBegin();
          JOS.attribute("Name", Flag.Name);
          JOS.attribute("Value", Flag.Value);
          JOS.objectEnd();
        }
      });
    });
  }

  void printFlagsImpl(StringRef Label, HexNumber Value,
                      ArrayRef<HexNumber> Flags) override {
    JOS.attributeObject(Label, [&]() {
      JOS.attribute("Value", hexNumberToInt(Value));
      JOS.attributeArray("Flags", [&]() {
        for (const HexNumber &Flag : Flags) {
          JOS.value(Flag.Value);
        }
      });
    });
  }

  template <typename T> void printListImpl(StringRef Label, const T &List) {
    JOS.attributeArray(Label, [&]() {
      for (const auto &Item : List)
        JOS.value(Item);
    });
  }

  void printHexListImpl(StringRef Label,
                        const ArrayRef<HexNumber> List) override {
    JOS.attributeArray(Label, [&]() {
      for (const HexNumber &Item : List) {
        JOS.value(hexNumberToInt(Item));
      }
    });
  }

  void printHexImpl(StringRef Label, HexNumber Value) override {
    JOS.attribute(Label, hexNumberToInt(Value));
  }

  void printHexImpl(StringRef Label, StringRef Str, HexNumber Value) override {
    JOS.attributeObject(Label, [&]() {
      JOS.attribute("Name", Str);
      JOS.attribute("Value", hexNumberToInt(Value));
    });
  }

  void printSymbolOffsetImpl(StringRef Label, StringRef Symbol,
                             HexNumber Value) override {
    JOS.attributeObject(Label, [&]() {
      JOS.attribute("SymName", Symbol);
      JOS.attribute("Offset", hexNumberToInt(Value));
    });
  }

  void printNumberImpl(StringRef Label, StringRef Str,
                       StringRef Value) override {
    JOS.attributeObject(Label, [&]() {
      JOS.attribute("Name", Str);
      JOS.attributeBegin("Value");
      JOS.rawValueBegin() << Value;
      JOS.rawValueEnd();
      JOS.attributeEnd();
    });
  }

  void printBinaryImpl(StringRef Label, StringRef Str, ArrayRef<uint8_t> Value,
                       bool Block, uint32_t StartOffset = 0) override {
    JOS.attributeObject(Label, [&]() {
      if (!Str.empty())
        JOS.attribute("Value", Str);
      JOS.attribute("Offset", StartOffset);
      JOS.attributeArray("Bytes", [&]() {
        for (uint8_t Val : Value)
          JOS.value(Val);
      });
    });
  }

  void scopedBegin(ScopeContext ScopeCtx) {
    if (ScopeCtx.Context == Scope::Object)
      JOS.objectBegin();
    else if (ScopeCtx.Context == Scope::Array)
      JOS.arrayBegin();
    ScopeHistory.push_back(ScopeCtx);
  }

  void scopedBegin(StringRef Label, Scope Ctx) {
    ScopeKind Kind = ScopeKind::Attribute;
    if (ScopeHistory.empty() || ScopeHistory.back().Context != Scope::Object) {
      JOS.objectBegin();
      Kind = ScopeKind::NestedAttribute;
    }
    JOS.attributeBegin(Label);
    scopedBegin({Ctx, Kind});
  }

  void scopedEnd() {
    ScopeContext ScopeCtx = ScopeHistory.back();
    if (ScopeCtx.Context == Scope::Object)
      JOS.objectEnd();
    else if (ScopeCtx.Context == Scope::Array)
      JOS.arrayEnd();
    if (ScopeCtx.Kind == ScopeKind::Attribute ||
        ScopeCtx.Kind == ScopeKind::NestedAttribute)
      JOS.attributeEnd();
    if (ScopeCtx.Kind == ScopeKind::NestedAttribute)
      JOS.objectEnd();
    ScopeHistory.pop_back();
  }
};

struct DelimitedScope {
  DelimitedScope(ScopedPrinter &W) : W(&W) {}
  DelimitedScope() : W(nullptr) {}
  virtual ~DelimitedScope() = default;
  virtual void setPrinter(ScopedPrinter &W) = 0;
  ScopedPrinter *W;
};

struct DictScope : DelimitedScope {
  explicit DictScope() = default;
  explicit DictScope(ScopedPrinter &W) : DelimitedScope(W) { W.objectBegin(); }

  DictScope(ScopedPrinter &W, StringRef N) : DelimitedScope(W) {
    W.objectBegin(N);
  }

  void setPrinter(ScopedPrinter &W) override {
    this->W = &W;
    W.objectBegin();
  }

  ~DictScope() {
    if (W)
      W->objectEnd();
  }
};

struct ListScope : DelimitedScope {
  explicit ListScope() = default;
  explicit ListScope(ScopedPrinter &W) : DelimitedScope(W) { W.arrayBegin(); }

  ListScope(ScopedPrinter &W, StringRef N) : DelimitedScope(W) {
    W.arrayBegin(N);
  }

  void setPrinter(ScopedPrinter &W) override {
    this->W = &W;
    W.arrayBegin();
  }

  ~ListScope() {
    if (W)
      W->arrayEnd();
  }
};

} // namespace llvm

#endif
PKhwFZ9;����Support/ConvertEBCDIC.hnu�[���//===--- ConvertEBCDIC.h - UTF8/EBCDIC CharSet Conversion -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file provides utility functions for converting between EBCDIC-1047 and
/// UTF-8.
///
///
//===----------------------------------------------------------------------===//

#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include <system_error>

namespace llvm {
namespace ConverterEBCDIC {
std::error_code convertToEBCDIC(StringRef Source,
                                SmallVectorImpl<char> &Result);

void convertToUTF8(StringRef Source, SmallVectorImpl<char> &Result);

} // namespace ConverterEBCDIC
} // namespace llvm
PKhwFZe	���Support/AlignOf.hnu�[���//===--- AlignOf.h - Portable calculation of type alignment -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the AlignedCharArrayUnion class.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_SUPPORT_ALIGNOF_H
#define LLVM_SUPPORT_ALIGNOF_H

#include <type_traits>

namespace llvm {

/// A suitably aligned and sized character array member which can hold elements
/// of any type.
///
/// This template is equivalent to std::aligned_union_t<1, ...>, but we cannot
/// use it due to a bug in the MSVC x86 compiler:
/// https://github.com/microsoft/STL/issues/1533
/// Using `alignas` here works around the bug.
template <typename T, typename... Ts> struct AlignedCharArrayUnion {
  using AlignedUnion = std::aligned_union_t<1, T, Ts...>;
  alignas(alignof(AlignedUnion)) char buffer[sizeof(AlignedUnion)];
};

} // end namespace llvm

#endif // LLVM_SUPPORT_ALIGNOF_H
PKhwFZP1�uuFileCheck/FileCheck.hnu�[���//==-- llvm/FileCheck/FileCheck.h --------------------------------*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file This file has some utilities to use FileCheck as an API
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_FILECHECK_FILECHECK_H
#define LLVM_FILECHECK_FILECHECK_H

#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Regex.h"
#include "llvm/Support/SMLoc.h"
#include <bitset>
#include <memory>
#include <string>
#include <vector>

namespace llvm {
class MemoryBuffer;
class SourceMgr;
template <typename T> class SmallVectorImpl;

/// Contains info about various FileCheck options.
struct FileCheckRequest {
  std::vector<StringRef> CheckPrefixes;
  std::vector<StringRef> CommentPrefixes;
  bool NoCanonicalizeWhiteSpace = false;
  std::vector<StringRef> ImplicitCheckNot;
  std::vector<StringRef> GlobalDefines;
  bool AllowEmptyInput = false;
  bool AllowUnusedPrefixes = false;
  bool MatchFullLines = false;
  bool IgnoreCase = false;
  bool IsDefaultCheckPrefix = false;
  bool EnableVarScope = false;
  bool AllowDeprecatedDagOverlap = false;
  bool Verbose = false;
  bool VerboseVerbose = false;
};

namespace Check {

enum FileCheckKind {
  CheckNone = 0,
  CheckMisspelled,
  CheckPlain,
  CheckNext,
  CheckSame,
  CheckNot,
  CheckDAG,
  CheckLabel,
  CheckEmpty,
  CheckComment,

  /// Indicates the pattern only matches the end of file. This is used for
  /// trailing CHECK-NOTs.
  CheckEOF,

  /// Marks when parsing found a -NOT check combined with another CHECK suffix.
  CheckBadNot,

  /// Marks when parsing found a -COUNT directive with invalid count value.
  CheckBadCount
};

enum FileCheckKindModifier {
  /// Modifies directive to perform literal match.
  ModifierLiteral = 0,

  // The number of modifier.
  Size
};

class FileCheckType {
  FileCheckKind Kind;
  int Count; ///< optional Count for some checks
  /// Modifers for the check directive.
  std::bitset<FileCheckKindModifier::Size> Modifiers;

public:
  FileCheckType(FileCheckKind Kind = CheckNone) : Kind(Kind), Count(1) {}
  FileCheckType(const FileCheckType &) = default;
  FileCheckType &operator=(const FileCheckType &) = default;

  operator FileCheckKind() const { return Kind; }

  int getCount() const { return Count; }
  FileCheckType &setCount(int C);

  bool isLiteralMatch() const {
    return Modifiers[FileCheckKindModifier::ModifierLiteral];
  }
  FileCheckType &setLiteralMatch(bool Literal = true) {
    Modifiers.set(FileCheckKindModifier::ModifierLiteral, Literal);
    return *this;
  }

  // \returns a description of \p Prefix.
  std::string getDescription(StringRef Prefix) const;

  // \returns a description of \p Modifiers.
  std::string getModifiersDescription() const;
};
} // namespace Check

/// Summary of a FileCheck diagnostic.
struct FileCheckDiag {
  /// What is the FileCheck directive for this diagnostic?
  Check::FileCheckType CheckTy;
  /// Where is the FileCheck directive for this diagnostic?
  SMLoc CheckLoc;
  /// What type of match result does this diagnostic describe?
  ///
  /// A directive's supplied pattern is said to be either expected or excluded
  /// depending on whether the pattern must have or must not have a match in
  /// order for the directive to succeed.  For example, a CHECK directive's
  /// pattern is expected, and a CHECK-NOT directive's pattern is excluded.
  ///
  /// There might be more than one match result for a single pattern.  For
  /// example, there might be several discarded matches
  /// (MatchFoundButDiscarded) before either a good match
  /// (MatchFoundAndExpected) or a failure to match (MatchNoneButExpected),
  /// and there might be a fuzzy match (MatchFuzzy) after the latter.
  enum MatchType {
    /// Indicates a good match for an expected pattern.
    MatchFoundAndExpected,
    /// Indicates a match for an excluded pattern.
    MatchFoundButExcluded,
    /// Indicates a match for an expected pattern, but the match is on the
    /// wrong line.
    MatchFoundButWrongLine,
    /// Indicates a discarded match for an expected pattern.
    MatchFoundButDiscarded,
    /// Indicates an error while processing a match after the match was found
    /// for an expected or excluded pattern.  The error is specified by \c Note,
    /// to which it should be appropriate to prepend "error: " later.  The full
    /// match itself should be recorded in a preceding diagnostic of a different
    /// \c MatchFound match type.
    MatchFoundErrorNote,
    /// Indicates no match for an excluded pattern.
    MatchNoneAndExcluded,
    /// Indicates no match for an expected pattern, but this might follow good
    /// matches when multiple matches are expected for the pattern, or it might
    /// follow discarded matches for the pattern.
    MatchNoneButExpected,
    /// Indicates no match due to an expected or excluded pattern that has
    /// proven to be invalid at match time.  The exact problems are usually
    /// reported in subsequent diagnostics of the same match type but with
    /// \c Note set.
    MatchNoneForInvalidPattern,
    /// Indicates a fuzzy match that serves as a suggestion for the next
    /// intended match for an expected pattern with too few or no good matches.
    MatchFuzzy,
  } MatchTy;
  /// The search range if MatchTy starts with MatchNone, or the match range
  /// otherwise.
  unsigned InputStartLine;
  unsigned InputStartCol;
  unsigned InputEndLine;
  unsigned InputEndCol;
  /// A note to replace the one normally indicated by MatchTy, or the empty
  /// string if none.
  std::string Note;
  FileCheckDiag(const SourceMgr &SM, const Check::FileCheckType &CheckTy,
                SMLoc CheckLoc, MatchType MatchTy, SMRange InputRange,
                StringRef Note = "");
};

class FileCheckPatternContext;
struct FileCheckString;

/// FileCheck class takes the request and exposes various methods that
/// use information from the request.
class FileCheck {
  FileCheckRequest Req;
  std::unique_ptr<FileCheckPatternContext> PatternContext;
  // C++17 TODO: make this a plain std::vector.
  std::unique_ptr<std::vector<FileCheckString>> CheckStrings;

public:
  explicit FileCheck(FileCheckRequest Req);
  ~FileCheck();

  // Combines the check prefixes into a single regex so that we can efficiently
  // scan for any of the set.
  //
  // The semantics are that the longest-match wins which matches our regex
  // library.
  Regex buildCheckPrefixRegex();

  /// Reads the check file from \p Buffer and records the expected strings it
  /// contains. Errors are reported against \p SM.
  ///
  /// Only expected strings whose prefix is one of those listed in \p PrefixRE
  /// are recorded. \returns true in case of an error, false otherwise.
  ///
  /// If \p ImpPatBufferIDRange, then the range (inclusive start, exclusive end)
  /// of IDs for source buffers added to \p SM for implicit patterns are
  /// recorded in it.  The range is empty if there are none.
  bool
  readCheckFile(SourceMgr &SM, StringRef Buffer, Regex &PrefixRE,
                std::pair<unsigned, unsigned> *ImpPatBufferIDRange = nullptr);

  bool ValidateCheckPrefixes();

  /// Canonicalizes whitespaces in the file. Line endings are replaced with
  /// UNIX-style '\n'.
  StringRef CanonicalizeFile(MemoryBuffer &MB,
                             SmallVectorImpl<char> &OutputBuffer);

  /// Checks the input to FileCheck provided in the \p Buffer against the
  /// expected strings read from the check file and record diagnostics emitted
  /// in \p Diags. Errors are recorded against \p SM.
  ///
  /// \returns false if the input fails to satisfy the checks.
  bool checkInput(SourceMgr &SM, StringRef Buffer,
                  std::vector<FileCheckDiag> *Diags = nullptr);
};

} // namespace llvm

#endif
PKhwFZ(�>�(ToolDrivers/llvm-dlltool/DlltoolDriver.hnu�[���//===- DlltoolDriver.h - dlltool.exe-compatible driver ----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Defines an interface to a dlltool.exe-compatible driver.
// Used by llvm-dlltool.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TOOLDRIVERS_LLVM_DLLTOOL_DLLTOOLDRIVER_H
#define LLVM_TOOLDRIVERS_LLVM_DLLTOOL_DLLTOOLDRIVER_H

namespace llvm {
template <typename T> class ArrayRef;

int dlltoolDriverMain(ArrayRef<const char *> ArgsArr);
} // namespace llvm

#endif
PKhwFZ���� ToolDrivers/llvm-lib/LibDriver.hnu�[���//===- llvm-lib/LibDriver.h - lib.exe-compatible driver ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Defines an interface to a lib.exe-compatible driver that also understands
// bitcode files. Used by llvm-lib and lld-link /lib.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TOOLDRIVERS_LLVM_LIB_LIBDRIVER_H
#define LLVM_TOOLDRIVERS_LLVM_LIB_LIBDRIVER_H

namespace llvm {
template <typename T> class ArrayRef;

int libDriverMain(ArrayRef<const char *> ARgs);

}

#endif
PKhwFZ�A(bbIRReader/IRReader.hnu�[���//===---- llvm/IRReader/IRReader.h - Reader for LLVM IR files ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines functions for reading LLVM IR. They support both
// Bitcode and Assembly, automatically detecting the input format.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IRREADER_IRREADER_H
#define LLVM_IRREADER_IRREADER_H

#include "llvm/ADT/STLFunctionalExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Bitcode/BitcodeReader.h"
#include <memory>
#include <optional>

namespace llvm {

class MemoryBuffer;
class MemoryBufferRef;
class Module;
class SMDiagnostic;
class LLVMContext;

/// If the given MemoryBuffer holds a bitcode image, return a Module
/// for it which does lazy deserialization of function bodies.  Otherwise,
/// attempt to parse it as LLVM Assembly and return a fully populated
/// Module. The ShouldLazyLoadMetadata flag is passed down to the bitcode
/// reader to optionally enable lazy metadata loading. This takes ownership
/// of \p Buffer.
std::unique_ptr<Module> getLazyIRModule(std::unique_ptr<MemoryBuffer> Buffer,
                                        SMDiagnostic &Err, LLVMContext &Context,
                                        bool ShouldLazyLoadMetadata = false);

/// If the given file holds a bitcode image, return a Module
/// for it which does lazy deserialization of function bodies.  Otherwise,
/// attempt to parse it as LLVM Assembly and return a fully populated
/// Module. The ShouldLazyLoadMetadata flag is passed down to the bitcode
/// reader to optionally enable lazy metadata loading.
std::unique_ptr<Module>
getLazyIRFileModule(StringRef Filename, SMDiagnostic &Err, LLVMContext &Context,
                    bool ShouldLazyLoadMetadata = false);

/// If the given MemoryBuffer holds a bitcode image, return a Module
/// for it.  Otherwise, attempt to parse it as LLVM Assembly and return
/// a Module for it.
/// \param DataLayoutCallback Override datalayout in the llvm assembly.
std::unique_ptr<Module> parseIR(MemoryBufferRef Buffer, SMDiagnostic &Err,
                                LLVMContext &Context,
                                ParserCallbacks Callbacks = {});

/// If the given file holds a bitcode image, return a Module for it.
/// Otherwise, attempt to parse it as LLVM Assembly and return a Module
/// for it.
/// \param DataLayoutCallback Override datalayout in the llvm assembly.
std::unique_ptr<Module> parseIRFile(StringRef Filename, SMDiagnostic &Err,
                                    LLVMContext &Context,
                                    ParserCallbacks Callbacks = {});
}

#endif
PKhwFZF�6�!!TextAPI/TextAPIReader.hnu�[���//===--- TextAPIReader.h - Text API Reader ----------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TEXTAPI_TEXTAPIREADER_H
#define LLVM_TEXTAPI_TEXTAPIREADER_H

#include "llvm/Support/Error.h"

namespace llvm {

class MemoryBufferRef;

namespace MachO {

class InterfaceFile;

class TextAPIReader {
public:
  static Expected<std::unique_ptr<InterfaceFile>>
  get(MemoryBufferRef InputBuffer);

  TextAPIReader() = delete;
};

} // end namespace MachO.
} // end namespace llvm.

#endif // LLVM_TEXTAPI_TEXTAPIREADER_H
PKhwFZ1]�K�7�7TextAPI/InterfaceFile.hnu�[���//===- llvm/TextAPI/InterfaceFile.h - TAPI Interface File -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// A generic and abstract interface representation for linkable objects. This
// could be an MachO executable, bundle, dylib, or text-based stub file.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TEXTAPI_INTERFACEFILE_H
#define LLVM_TEXTAPI_INTERFACEFILE_H

#include "llvm/ADT/BitmaskEnum.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator.h"
#include "llvm/Support/Allocator.h"
#include "llvm/TextAPI/ArchitectureSet.h"
#include "llvm/TextAPI/PackedVersion.h"
#include "llvm/TextAPI/Platform.h"
#include "llvm/TextAPI/Symbol.h"
#include "llvm/TextAPI/SymbolSet.h"
#include "llvm/TextAPI/Target.h"

namespace llvm {
namespace MachO {

/// Defines a list of Objective-C constraints.
enum class ObjCConstraintType : unsigned {
  /// No constraint.
  None = 0,

  /// Retain/Release.
  Retain_Release = 1,

  /// Retain/Release for Simulator.
  Retain_Release_For_Simulator = 2,

  /// Retain/Release or Garbage Collection.
  Retain_Release_Or_GC = 3,

  /// Garbage Collection.
  GC = 4,
};

// clang-format off

/// Defines the file type this file represents.
enum FileType : unsigned {
  /// Invalid file type.
  Invalid = 0U,

  /// Text-based stub file (.tbd) version 1.0
  TBD_V1  = 1U <<  0,

  /// Text-based stub file (.tbd) version 2.0
  TBD_V2  = 1U <<  1,

  /// Text-based stub file (.tbd) version 3.0
  TBD_V3  = 1U <<  2,

  /// Text-based stub file (.tbd) version 4.0
  TBD_V4  = 1U <<  3,

  /// Text-based stub file (.tbd) version 5.0
  TBD_V5  = 1U <<  4,

  All     = ~0U,

  LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/All),
};

// clang-format on

/// Reference to an interface file.
class InterfaceFileRef {
public:
  InterfaceFileRef() = default;

  InterfaceFileRef(StringRef InstallName) : InstallName(InstallName) {}

  InterfaceFileRef(StringRef InstallName, const TargetList Targets)
      : InstallName(InstallName), Targets(std::move(Targets)) {}

  StringRef getInstallName() const { return InstallName; };

  void addTarget(const Target &Target);
  template <typename RangeT> void addTargets(RangeT &&Targets) {
    for (const auto &Target : Targets)
      addTarget(Target(Target));
  }

  using const_target_iterator = TargetList::const_iterator;
  using const_target_range = llvm::iterator_range<const_target_iterator>;
  const_target_range targets() const { return {Targets}; }

  ArchitectureSet getArchitectures() const {
    return mapToArchitectureSet(Targets);
  }

  PlatformSet getPlatforms() const { return mapToPlatformSet(Targets); }

  bool operator==(const InterfaceFileRef &O) const {
    return std::tie(InstallName, Targets) == std::tie(O.InstallName, O.Targets);
  }

  bool operator!=(const InterfaceFileRef &O) const {
    return std::tie(InstallName, Targets) != std::tie(O.InstallName, O.Targets);
  }

  bool operator<(const InterfaceFileRef &O) const {
    return std::tie(InstallName, Targets) < std::tie(O.InstallName, O.Targets);
  }

private:
  std::string InstallName;
  TargetList Targets;
};

} // end namespace MachO.

namespace MachO {

/// Defines the interface file.
class InterfaceFile {
public:
  InterfaceFile(std::unique_ptr<SymbolSet> &&InputSymbols)
      : SymbolsSet(std::move(InputSymbols)) {}

  InterfaceFile() : SymbolsSet(std::make_unique<SymbolSet>()){};
  /// Set the path from which this file was generated (if applicable).
  ///
  /// \param Path_ The path to the source file.
  void setPath(StringRef Path_) { Path = std::string(Path_); }

  /// Get the path from which this file was generated (if applicable).
  ///
  /// \return The path to the source file or empty.
  StringRef getPath() const { return Path; }

  /// Set the file type.
  ///
  /// This is used by the YAML writer to identify the specification it should
  /// use for writing the file.
  ///
  /// \param Kind The file type.
  void setFileType(FileType Kind) { FileKind = Kind; }

  /// Get the file type.
  ///
  /// \return The file type.
  FileType getFileType() const { return FileKind; }

  /// Get the architectures.
  ///
  /// \return The applicable architectures.
  ArchitectureSet getArchitectures() const {
    return mapToArchitectureSet(Targets);
  }

  /// Get the platforms.
  ///
  /// \return The applicable platforms.
  PlatformSet getPlatforms() const { return mapToPlatformSet(Targets); }

  /// Set and add target.
  ///
  /// \param Target the target to add into.
  void addTarget(const Target &Target);

  /// Set and add targets.
  ///
  /// Add the subset of llvm::triples that is supported by Tapi
  ///
  /// \param Targets the collection of targets.
  template <typename RangeT> void addTargets(RangeT &&Targets) {
    for (const auto &Target_ : Targets)
      addTarget(Target(Target_));
  }

  using const_target_iterator = TargetList::const_iterator;
  using const_target_range = llvm::iterator_range<const_target_iterator>;
  const_target_range targets() const { return {Targets}; }

  using const_filtered_target_iterator =
      llvm::filter_iterator<const_target_iterator,
                            std::function<bool(const Target &)>>;
  using const_filtered_target_range =
      llvm::iterator_range<const_filtered_target_iterator>;
  const_filtered_target_range targets(ArchitectureSet Archs) const;

  /// Set the install name of the library.
  void setInstallName(StringRef InstallName_) {
    InstallName = std::string(InstallName_);
  }

  /// Get the install name of the library.
  StringRef getInstallName() const { return InstallName; }

  /// Set the current version of the library.
  void setCurrentVersion(PackedVersion Version) { CurrentVersion = Version; }

  /// Get the current version of the library.
  PackedVersion getCurrentVersion() const { return CurrentVersion; }

  /// Set the compatibility version of the library.
  void setCompatibilityVersion(PackedVersion Version) {
    CompatibilityVersion = Version;
  }

  /// Get the compatibility version of the library.
  PackedVersion getCompatibilityVersion() const { return CompatibilityVersion; }

  /// Set the Swift ABI version of the library.
  void setSwiftABIVersion(uint8_t Version) { SwiftABIVersion = Version; }

  /// Get the Swift ABI version of the library.
  uint8_t getSwiftABIVersion() const { return SwiftABIVersion; }

  /// Specify if the library uses two-level namespace (or flat namespace).
  void setTwoLevelNamespace(bool V = true) { IsTwoLevelNamespace = V; }

  /// Check if the library uses two-level namespace.
  bool isTwoLevelNamespace() const { return IsTwoLevelNamespace; }

  /// Specify if the library is application extension safe (or not).
  void setApplicationExtensionSafe(bool V = true) { IsAppExtensionSafe = V; }

  /// Check if the library is application extension safe.
  bool isApplicationExtensionSafe() const { return IsAppExtensionSafe; }

  /// Set the Objective-C constraint.
  void setObjCConstraint(ObjCConstraintType Constraint) {
    ObjcConstraint = Constraint;
  }

  /// Get the Objective-C constraint.
  ObjCConstraintType getObjCConstraint() const { return ObjcConstraint; }

  /// Set the parent umbrella frameworks.
  /// \param Target_ The target applicable to Parent
  /// \param Parent  The name of Parent
  void addParentUmbrella(const Target &Target_, StringRef Parent);

  /// Get the list of Parent Umbrella frameworks.
  ///
  /// \return Returns a list of target information and install name of parent
  /// umbrellas.
  const std::vector<std::pair<Target, std::string>> &umbrellas() const {
    return ParentUmbrellas;
  }

  /// Add an allowable client.
  ///
  /// Mach-O Dynamic libraries have the concept of allowable clients that are
  /// checked during static link time. The name of the application or library
  /// that is being generated needs to match one of the allowable clients or the
  /// linker refuses to link this library.
  ///
  /// \param InstallName The name of the client that is allowed to link this
  /// library.
  /// \param Target The target triple for which this applies.
  void addAllowableClient(StringRef InstallName, const Target &Target);

  /// Get the list of allowable clients.
  ///
  /// \return Returns a list of allowable clients.
  const std::vector<InterfaceFileRef> &allowableClients() const {
    return AllowableClients;
  }

  /// Add a re-exported library.
  ///
  /// \param InstallName The name of the library to re-export.
  /// \param Target The target triple for which this applies.
  void addReexportedLibrary(StringRef InstallName, const Target &Target);

  /// Get the list of re-exported libraries.
  ///
  /// \return Returns a list of re-exported libraries.
  const std::vector<InterfaceFileRef> &reexportedLibraries() const {
    return ReexportedLibraries;
  }

  /// Add a library for inlining to top level library.
  ///
  ///\param Document The library to inline with top level library.
  void addDocument(std::shared_ptr<InterfaceFile> &&Document);

  /// Returns the pointer to parent document if exists or nullptr otherwise.
  InterfaceFile *getParent() const { return Parent; }

  /// Get the list of inlined libraries.
  ///
  /// \return Returns a list of the inlined frameworks.
  const std::vector<std::shared_ptr<InterfaceFile>> &documents() const {
    return Documents;
  }

  /// Set the runpath search paths.
  /// \param InputTarget The target applicable to runpath search path.
  /// \param RPath The name of runpath.
  void addRPath(const Target &InputTarget, StringRef RPath);

  /// Get the list of runpath search paths.
  ///
  /// \return Returns a list of the rpaths per target.
  const std::vector<std::pair<Target, std::string>> &rpaths() const {
    return RPaths;
  }

  /// Get symbol if exists in file.
  ///
  /// \param Kind The kind of global symbol to record.
  /// \param Name The name of the symbol.
  std::optional<const Symbol *> getSymbol(SymbolKind Kind,
                                          StringRef Name) const {
    if (auto *Sym = SymbolsSet->findSymbol(Kind, Name))
      return Sym;
    return std::nullopt;
  }

  /// Add a symbol to the symbols list or extend an existing one.
  template <typename RangeT,
            typename ElT = typename std::remove_reference<
                decltype(*std::begin(std::declval<RangeT>()))>::type>
  void addSymbol(SymbolKind Kind, StringRef Name, RangeT &&Targets,
                 SymbolFlags Flags = SymbolFlags::None) {
    SymbolsSet->addGlobal(Kind, Name, Flags, Targets);
  }

  /// Add Symbol with multiple targets.
  ///
  /// \param Kind The kind of global symbol to record.
  /// \param Name The name of the symbol.
  /// \param Targets The list of targets the symbol is defined in.
  /// \param Flags The properties the symbol holds.
  void addSymbol(SymbolKind Kind, StringRef Name, TargetList &&Targets,
                 SymbolFlags Flags = SymbolFlags::None) {
    SymbolsSet->addGlobal(Kind, Name, Flags, Targets);
  }

  /// Add Symbol with single target.
  ///
  /// \param Kind The kind of global symbol to record.
  /// \param Name The name of the symbol.
  /// \param Target The target the symbol is defined in.
  /// \param Flags The properties the symbol holds.
  void addSymbol(SymbolKind Kind, StringRef Name, Target &Target,
                 SymbolFlags Flags = SymbolFlags::None) {
    SymbolsSet->addGlobal(Kind, Name, Flags, Target);
  }

  /// Get size of symbol set.
  /// \return The number of symbols the file holds.
  size_t symbolsCount() const { return SymbolsSet->size(); }

  using const_symbol_range = SymbolSet::const_symbol_range;
  using const_filtered_symbol_range = SymbolSet::const_filtered_symbol_range;

  const_symbol_range symbols() const { return SymbolsSet->symbols(); };
  const_filtered_symbol_range exports() const { return SymbolsSet->exports(); };
  const_filtered_symbol_range reexports() const {
    return SymbolsSet->reexports();
  };
  const_filtered_symbol_range undefineds() const {
    return SymbolsSet->undefineds();
  };

  /// The equality is determined by attributes that impact linking
  /// compatibilities. Path, & FileKind are irrelevant since these by
  /// itself should not impact linking.
  /// This is an expensive operation.
  bool operator==(const InterfaceFile &O) const;

  bool operator!=(const InterfaceFile &O) const { return !(*this == O); }

private:
  llvm::BumpPtrAllocator Allocator;
  StringRef copyString(StringRef String) {
    if (String.empty())
      return {};

    void *Ptr = Allocator.Allocate(String.size(), 1);
    memcpy(Ptr, String.data(), String.size());
    return StringRef(reinterpret_cast<const char *>(Ptr), String.size());
  }

  TargetList Targets;
  std::string Path;
  FileType FileKind{FileType::Invalid};
  std::string InstallName;
  PackedVersion CurrentVersion;
  PackedVersion CompatibilityVersion;
  uint8_t SwiftABIVersion{0};
  bool IsTwoLevelNamespace{false};
  bool IsAppExtensionSafe{false};
  ObjCConstraintType ObjcConstraint = ObjCConstraintType::None;
  std::vector<std::pair<Target, std::string>> ParentUmbrellas;
  std::vector<InterfaceFileRef> AllowableClients;
  std::vector<InterfaceFileRef> ReexportedLibraries;
  std::vector<std::shared_ptr<InterfaceFile>> Documents;
  std::vector<std::pair<Target, std::string>> RPaths;
  std::unique_ptr<SymbolSet> SymbolsSet;
  InterfaceFile *Parent = nullptr;
};

// Keep containers that hold InterfaceFileRefs in sorted order and uniqued.
template <typename C>
typename C::iterator addEntry(C &Container, StringRef InstallName) {
  auto I = partition_point(Container, [=](const InterfaceFileRef &O) {
    return O.getInstallName() < InstallName;
  });
  if (I != Container.end() && I->getInstallName() == InstallName)
    return I;

  return Container.emplace(I, InstallName);
}

} // end namespace MachO.
} // end namespace llvm.

#endif // LLVM_TEXTAPI_INTERFACEFILE_H
PKhwFZ"����TextAPI/Architecture.defnu�[���//===- llvm/TextAPI/Architecture.def - Architecture -----------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef ARCHINFO
#define ARCHINFO(arch)
#endif

///
/// X86 architectures sorted by cpu type and sub type id.
///
ARCHINFO(i386, MachO::CPU_TYPE_I386, MachO::CPU_SUBTYPE_I386_ALL, 32)
ARCHINFO(x86_64, MachO::CPU_TYPE_X86_64, MachO::CPU_SUBTYPE_X86_64_ALL, 64)
ARCHINFO(x86_64h, MachO::CPU_TYPE_X86_64, MachO::CPU_SUBTYPE_X86_64_H, 64)


///
/// ARM architectures sorted by cpu sub type id.
///
ARCHINFO(armv4t, MachO::CPU_TYPE_ARM, MachO::CPU_SUBTYPE_ARM_V4T, 32)
ARCHINFO(armv6, MachO::CPU_TYPE_ARM, MachO::CPU_SUBTYPE_ARM_V6, 32)
ARCHINFO(armv5, MachO::CPU_TYPE_ARM, MachO::CPU_SUBTYPE_ARM_V5TEJ, 32)
ARCHINFO(armv7, MachO::CPU_TYPE_ARM, MachO::CPU_SUBTYPE_ARM_V7, 32)
ARCHINFO(armv7s, MachO::CPU_TYPE_ARM, MachO::CPU_SUBTYPE_ARM_V7S, 32)
ARCHINFO(armv7k, MachO::CPU_TYPE_ARM, MachO::CPU_SUBTYPE_ARM_V7K, 32)
ARCHINFO(armv6m, MachO::CPU_TYPE_ARM, MachO::CPU_SUBTYPE_ARM_V6M, 32)
ARCHINFO(armv7m, MachO::CPU_TYPE_ARM, MachO::CPU_SUBTYPE_ARM_V7M, 32)
ARCHINFO(armv7em, MachO::CPU_TYPE_ARM, MachO::CPU_SUBTYPE_ARM_V7EM, 32)


///
/// ARM64 architectures sorted by cpu sub type id.
///
ARCHINFO(arm64, MachO::CPU_TYPE_ARM64, MachO::CPU_SUBTYPE_ARM64_ALL, 64)
ARCHINFO(arm64e, MachO::CPU_TYPE_ARM64, MachO::CPU_SUBTYPE_ARM64E, 64)


///
/// ARM64_32 architectures sorted by cpu sub type id
///
ARCHINFO(arm64_32, MachO::CPU_TYPE_ARM64_32, MachO::CPU_SUBTYPE_ARM64_32_V8, 32)
PKhwFZ��\]ttTextAPI/Platform.hnu�[���//===- llvm/TextAPI/Platform.h - Platform -----------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Defines the Platforms supported by Tapi and helpers.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_TEXTAPI_PLATFORM_H
#define LLVM_TEXTAPI_PLATFORM_H

#include "llvm/ADT/SmallSet.h"
#include "llvm/BinaryFormat/MachO.h"
#include "llvm/Support/VersionTuple.h"

namespace llvm {
namespace MachO {

using PlatformSet = SmallSet<PlatformType, 3>;
using PlatformVersionSet = SmallSet<std::pair<PlatformType, VersionTuple>, 3>;

PlatformType mapToPlatformType(PlatformType Platform, bool WantSim);
PlatformType mapToPlatformType(const Triple &Target);
PlatformSet mapToPlatformSet(ArrayRef<Triple> Targets);
StringRef getPlatformName(PlatformType Platform);
PlatformType getPlatformFromName(StringRef Name);
std::string getOSAndEnvironmentName(PlatformType Platform,
                                    std::string Version = "");
VersionTuple mapToSupportedOSVersion(const Triple &Triple);

} // end namespace MachO.
} // end namespace llvm.

#endif // LLVM_TEXTAPI_PLATFORM_H
PKhwFZ9�MTextAPI/Architecture.hnu�[���//===- llvm/TextAPI/Architecture.h - Architecture ---------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Defines the architecture enum and helper methods.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TEXTAPI_ARCHITECTURE_H
#define LLVM_TEXTAPI_ARCHITECTURE_H

#include <cstdint>
#include <utility>

namespace llvm {
class raw_ostream;
class StringRef;
class Triple;

namespace MachO {

/// Defines the architecture slices that are supported by Text-based Stub files.
enum Architecture : uint8_t {
#define ARCHINFO(Arch, Type, SubType, NumBits) AK_##Arch,
#include "llvm/TextAPI/Architecture.def"
#undef ARCHINFO
  AK_unknown, // this has to go last.
};

/// Convert a CPU Type and Subtype pair to an architecture slice.
Architecture getArchitectureFromCpuType(uint32_t CPUType, uint32_t CPUSubType);

/// Convert a name to an architecture slice.
Architecture getArchitectureFromName(StringRef Name);

/// Convert an architecture slice to a string.
StringRef getArchitectureName(Architecture Arch);

/// Convert an architecture slice to a CPU Type and Subtype pair.
std::pair<uint32_t, uint32_t> getCPUTypeFromArchitecture(Architecture Arch);

/// Convert a target to an architecture slice.
Architecture mapToArchitecture(const llvm::Triple &Target);

/// Check if architecture is 64 bit.
bool is64Bit(Architecture);

raw_ostream &operator<<(raw_ostream &OS, Architecture Arch);

} // end namespace MachO.
} // end namespace llvm.

#endif // LLVM_TEXTAPI_ARCHITECTURE_H
PKhwFZ�&�{{TextAPI/ArchitectureSet.hnu�[���//===- llvm/TextAPI/ArchitectureSet.h - ArchitectureSet ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Defines the architecture set.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TEXTAPI_ARCHITECTURESET_H
#define LLVM_TEXTAPI_ARCHITECTURESET_H

#include "llvm/TextAPI/Architecture.h"
#include <cstddef>
#include <iterator>
#include <limits>
#include <string>
#include <tuple>
#include <vector>

namespace llvm {
class raw_ostream;

namespace MachO {

class ArchitectureSet {
private:
  using ArchSetType = uint32_t;

  const static ArchSetType EndIndexVal =
      std::numeric_limits<ArchSetType>::max();
  ArchSetType ArchSet{0};

public:
  constexpr ArchitectureSet() = default;
  constexpr ArchitectureSet(ArchSetType Raw) : ArchSet(Raw) {}
  ArchitectureSet(Architecture Arch) : ArchitectureSet() { set(Arch); }
  ArchitectureSet(const std::vector<Architecture> &Archs);

  void set(Architecture Arch) {
    if (Arch == AK_unknown)
      return;
    ArchSet |= 1U << static_cast<int>(Arch);
  }

  void clear(Architecture Arch) { ArchSet &= ~(1U << static_cast<int>(Arch)); }

  bool has(Architecture Arch) const {
    return ArchSet & (1U << static_cast<int>(Arch));
  }

  bool contains(ArchitectureSet Archs) const {
    return (ArchSet & Archs.ArchSet) == Archs.ArchSet;
  }

  size_t count() const;

  bool empty() const { return ArchSet == 0; }

  ArchSetType rawValue() const { return ArchSet; }

  bool hasX86() const {
    return has(AK_i386) || has(AK_x86_64) || has(AK_x86_64h);
  }

  template <typename Ty> class arch_iterator {
  public:
    using iterator_category = std::forward_iterator_tag;
    using value_type = Architecture;
    using difference_type = std::size_t;
    using pointer = value_type *;
    using reference = value_type &;

  private:
    ArchSetType Index;
    Ty *ArchSet;

    void findNextSetBit() {
      if (Index == EndIndexVal)
        return;
      while (++Index < sizeof(Ty) * 8) {
        if (*ArchSet & (1UL << Index))
          return;
      }

      Index = EndIndexVal;
    }

  public:
    arch_iterator(Ty *ArchSet, ArchSetType Index = 0)
        : Index(Index), ArchSet(ArchSet) {
      if (Index != EndIndexVal && !(*ArchSet & (1UL << Index)))
        findNextSetBit();
    }

    Architecture operator*() const { return static_cast<Architecture>(Index); }

    arch_iterator &operator++() {
      findNextSetBit();
      return *this;
    }

    arch_iterator operator++(int) {
      auto tmp = *this;
      findNextSetBit();
      return tmp;
    }

    bool operator==(const arch_iterator &o) const {
      return std::tie(Index, ArchSet) == std::tie(o.Index, o.ArchSet);
    }

    bool operator!=(const arch_iterator &o) const { return !(*this == o); }
  };

  ArchitectureSet operator&(const ArchitectureSet &o) {
    return {ArchSet & o.ArchSet};
  }

  ArchitectureSet operator|(const ArchitectureSet &o) {
    return {ArchSet | o.ArchSet};
  }

  ArchitectureSet &operator|=(const ArchitectureSet &o) {
    ArchSet |= o.ArchSet;
    return *this;
  }

  ArchitectureSet &operator|=(const Architecture &Arch) {
    set(Arch);
    return *this;
  }

  bool operator==(const ArchitectureSet &o) const {
    return ArchSet == o.ArchSet;
  }

  bool operator!=(const ArchitectureSet &o) const {
    return ArchSet != o.ArchSet;
  }

  bool operator<(const ArchitectureSet &o) const { return ArchSet < o.ArchSet; }

  using iterator = arch_iterator<ArchSetType>;
  using const_iterator = arch_iterator<const ArchSetType>;

  iterator begin() { return {&ArchSet}; }
  iterator end() { return {&ArchSet, EndIndexVal}; }

  const_iterator begin() const { return {&ArchSet}; }
  const_iterator end() const { return {&ArchSet, EndIndexVal}; }

  operator std::string() const;
  operator std::vector<Architecture>() const;
  void print(raw_ostream &OS) const;
};

inline ArchitectureSet operator|(const Architecture &lhs,
                                 const Architecture &rhs) {
  return ArchitectureSet(lhs) | ArchitectureSet(rhs);
}

raw_ostream &operator<<(raw_ostream &OS, ArchitectureSet Set);

} // end namespace MachO.
} // end namespace llvm.

#endif // LLVM_TEXTAPI_ARCHITECTURESET_H
PKhwFZ�~
^00TextAPI/TextAPIWriter.hnu�[���//===--- TextAPIWriter.h - Text API Writer ----------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TEXTAPI_TEXTAPIWRITER_H
#define LLVM_TEXTAPI_TEXTAPIWRITER_H

namespace llvm {

class Error;
class raw_ostream;

namespace MachO {

class InterfaceFile;

class TextAPIWriter {
public:
  TextAPIWriter() = delete;

  static Error writeToStream(raw_ostream &OS, const InterfaceFile &File,
                             bool Compact = false);
};

} // end namespace MachO.
} // end namespace llvm.

#endif // LLVM_TEXTAPI_TEXTAPIWRITER_H
PKhwFZ#�

TextAPI/Target.hnu�[���//===- llvm/TextAPI/Target.h - TAPI Target ----------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TEXTAPI_TARGET_H
#define LLVM_TEXTAPI_TARGET_H

#include "llvm/Support/Error.h"
#include "llvm/Support/VersionTuple.h"
#include "llvm/TargetParser/Triple.h"
#include "llvm/TextAPI/Architecture.h"
#include "llvm/TextAPI/ArchitectureSet.h"
#include "llvm/TextAPI/Platform.h"

namespace llvm {

class Triple;

namespace MachO {

// This is similar to a llvm Triple, but the triple doesn't have all the
// information we need. For example there is no enum value for x86_64h. The
// only way to get that information is to parse the triple string.
class Target {
public:
  Target() = default;
  Target(Architecture Arch, PlatformType Platform,
         VersionTuple MinDeployment = {})
      : Arch(Arch), Platform(Platform), MinDeployment(MinDeployment) {}
  explicit Target(const llvm::Triple &Triple)
      : Arch(mapToArchitecture(Triple)), Platform(mapToPlatformType(Triple)),
        MinDeployment(mapToSupportedOSVersion(Triple)) {}

  static llvm::Expected<Target> create(StringRef Target);

  operator std::string() const;

  Architecture Arch;
  PlatformType Platform;
  VersionTuple MinDeployment;
};

inline bool operator==(const Target &LHS, const Target &RHS) {
  // In most cases the deployment version is not useful to compare.
  return std::tie(LHS.Arch, LHS.Platform) == std::tie(RHS.Arch, RHS.Platform);
}

inline bool operator!=(const Target &LHS, const Target &RHS) {
  return !(LHS == RHS);
}

inline bool operator<(const Target &LHS, const Target &RHS) {
  // In most cases the deployment version is not useful to compare.
  return std::tie(LHS.Arch, LHS.Platform) < std::tie(RHS.Arch, RHS.Platform);
}

inline bool operator==(const Target &LHS, const Architecture &RHS) {
  return LHS.Arch == RHS;
}

inline bool operator!=(const Target &LHS, const Architecture &RHS) {
  return LHS.Arch != RHS;
}

PlatformVersionSet mapToPlatformVersionSet(ArrayRef<Target> Targets);
PlatformSet mapToPlatformSet(ArrayRef<Target> Targets);
ArchitectureSet mapToArchitectureSet(ArrayRef<Target> Targets);

std::string getTargetTripleName(const Target &Targ);

raw_ostream &operator<<(raw_ostream &OS, const Target &Target);

} // namespace MachO
} // namespace llvm

#endif // LLVM_TEXTAPI_TARGET_H
PKhwFZ5?

TextAPI/PackedVersion.hnu�[���//===- llvm/TextAPI/PackedVersion.h - PackedVersion -------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Defines the Mach-O packed version format.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TEXTAPI_PACKEDVERSION_H
#define LLVM_TEXTAPI_PACKEDVERSION_H

#include <cstdint>
#include <string>
#include <utility>

namespace llvm {
class raw_ostream;
class StringRef;

namespace MachO {

class PackedVersion {
  uint32_t Version{0};

public:
  constexpr PackedVersion() = default;
  explicit constexpr PackedVersion(uint32_t RawVersion) : Version(RawVersion) {}
  PackedVersion(unsigned Major, unsigned Minor, unsigned Subminor)
      : Version((Major << 16) | ((Minor & 0xff) << 8) | (Subminor & 0xff)) {}

  bool empty() const { return Version == 0; }

  /// Retrieve the major version number.
  unsigned getMajor() const { return Version >> 16; }

  /// Retrieve the minor version number, if provided.
  unsigned getMinor() const { return (Version >> 8) & 0xff; }

  /// Retrieve the subminor version number, if provided.
  unsigned getSubminor() const { return Version & 0xff; }

  bool parse32(StringRef Str);
  std::pair<bool, bool> parse64(StringRef Str);

  bool operator<(const PackedVersion &O) const { return Version < O.Version; }

  bool operator==(const PackedVersion &O) const { return Version == O.Version; }

  bool operator!=(const PackedVersion &O) const { return Version != O.Version; }

  uint32_t rawValue() const { return Version; }

  operator std::string() const;

  void print(raw_ostream &OS) const;
};

inline raw_ostream &operator<<(raw_ostream &OS, const PackedVersion &Version) {
  Version.print(OS);
  return OS;
}

} // end namespace MachO.
} // end namespace llvm.

#endif // LLVM_TEXTAPI_PACKEDVERSION_H
PKhwFZ~��
��TextAPI/SymbolSet.hnu�[���//===- llvm/TextAPI/SymbolSet.h - TAPI Symbol Set --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TEXTAPI_SYMBOLSET_H
#define LLVM_TEXTAPI_SYMBOLSET_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Allocator.h"
#include "llvm/TextAPI/Architecture.h"
#include "llvm/TextAPI/ArchitectureSet.h"
#include "llvm/TextAPI/Symbol.h"
#include <stddef.h>

namespace llvm {

struct SymbolsMapKey {
  MachO::SymbolKind Kind;
  StringRef Name;

  SymbolsMapKey(MachO::SymbolKind Kind, StringRef Name)
      : Kind(Kind), Name(Name) {}
};
template <> struct DenseMapInfo<SymbolsMapKey> {
  static inline SymbolsMapKey getEmptyKey() {
    return SymbolsMapKey(MachO::SymbolKind::GlobalSymbol, StringRef{});
  }

  static inline SymbolsMapKey getTombstoneKey() {
    return SymbolsMapKey(MachO::SymbolKind::ObjectiveCInstanceVariable,
                         StringRef{});
  }

  static unsigned getHashValue(const SymbolsMapKey &Key) {
    return hash_combine(hash_value(Key.Kind), hash_value(Key.Name));
  }

  static bool isEqual(const SymbolsMapKey &LHS, const SymbolsMapKey &RHS) {
    return std::tie(LHS.Kind, LHS.Name) == std::tie(RHS.Kind, RHS.Name);
  }
};

template <typename DerivedT, typename KeyInfoT, typename BucketT>
bool operator==(const DenseMapBase<DerivedT, SymbolsMapKey, MachO::Symbol *,
                                   KeyInfoT, BucketT> &LHS,
                const DenseMapBase<DerivedT, SymbolsMapKey, MachO::Symbol *,
                                   KeyInfoT, BucketT> &RHS) {
  if (LHS.size() != RHS.size())
    return false;
  for (const auto &KV : LHS) {
    auto I = RHS.find(KV.first);
    if (I == RHS.end() || *I->second != *KV.second)
      return false;
  }
  return true;
}

template <typename DerivedT, typename KeyInfoT, typename BucketT>
bool operator!=(const DenseMapBase<DerivedT, SymbolsMapKey, MachO::Symbol *,
                                   KeyInfoT, BucketT> &LHS,
                const DenseMapBase<DerivedT, SymbolsMapKey, MachO::Symbol *,
                                   KeyInfoT, BucketT> &RHS) {
  return !(LHS == RHS);
}

namespace MachO {

class SymbolSet {
private:
  llvm::BumpPtrAllocator Allocator;
  StringRef copyString(StringRef String) {
    if (String.empty())
      return {};
    void *Ptr = Allocator.Allocate(String.size(), 1);
    memcpy(Ptr, String.data(), String.size());
    return StringRef(reinterpret_cast<const char *>(Ptr), String.size());
  }

  using SymbolsMapType = llvm::DenseMap<SymbolsMapKey, Symbol *>;
  SymbolsMapType Symbols;

  Symbol *addGlobalImpl(SymbolKind, StringRef Name, SymbolFlags Flags);

public:
  SymbolSet() = default;
  Symbol *addGlobal(SymbolKind Kind, StringRef Name, SymbolFlags Flags,
                    const Target &Targ);
  size_t size() const { return Symbols.size(); }

  template <typename RangeT,
            typename ElT = typename std::remove_reference<
                decltype(*std::begin(std::declval<RangeT>()))>::type>
  Symbol *addGlobal(SymbolKind Kind, StringRef Name, SymbolFlags Flags,
                    RangeT &&Targets) {
    auto *Global = addGlobalImpl(Kind, Name, Flags);
    for (const auto &Targ : Targets)
      Global->addTarget(Targ);
    if (Kind == SymbolKind::ObjectiveCClassEHType)
      addGlobal(SymbolKind::ObjectiveCClass, Name, Flags, Targets);
    return Global;
  }

  const Symbol *findSymbol(SymbolKind Kind, StringRef Name) const;

  struct const_symbol_iterator
      : public iterator_adaptor_base<
            const_symbol_iterator, SymbolsMapType::const_iterator,
            std::forward_iterator_tag, const Symbol *, ptrdiff_t,
            const Symbol *, const Symbol *> {
    const_symbol_iterator() = default;

    template <typename U>
    const_symbol_iterator(U &&u)
        : iterator_adaptor_base(std::forward<U &&>(u)) {}

    reference operator*() const { return I->second; }
    pointer operator->() const { return I->second; }
  };

  using const_symbol_range = iterator_range<const_symbol_iterator>;

  using const_filtered_symbol_iterator =
      filter_iterator<const_symbol_iterator,
                      std::function<bool(const Symbol *)>>;
  using const_filtered_symbol_range =
      iterator_range<const_filtered_symbol_iterator>;

  // Range that contains all symbols.
  const_symbol_range symbols() const {
    return {Symbols.begin(), Symbols.end()};
  }

  // Range that contains all defined and exported symbols.
  const_filtered_symbol_range exports() const {
    std::function<bool(const Symbol *)> fn = [](const Symbol *Symbol) {
      return !Symbol->isUndefined() && !Symbol->isReexported();
    };
    return make_filter_range(
        make_range<const_symbol_iterator>({Symbols.begin()}, {Symbols.end()}),
        fn);
  }

  // Range that contains all reexported symbols.
  const_filtered_symbol_range reexports() const {
    std::function<bool(const Symbol *)> fn = [](const Symbol *Symbol) {
      return Symbol->isReexported();
    };
    return make_filter_range(
        make_range<const_symbol_iterator>({Symbols.begin()}, {Symbols.end()}),
        fn);
  }

  // Range that contains all undefined and exported symbols.
  const_filtered_symbol_range undefineds() const {
    std::function<bool(const Symbol *)> fn = [](const Symbol *Symbol) {
      return Symbol->isUndefined();
    };
    return make_filter_range(
        make_range<const_symbol_iterator>({Symbols.begin()}, {Symbols.end()}),
        fn);
  }

  bool operator==(const SymbolSet &O) const;

  bool operator!=(const SymbolSet &O) const { return !(Symbols == O.Symbols); }

  void *allocate(size_t Size, unsigned Align = 8) {
    return Allocator.Allocate(Size, Align);
  }
};

} // namespace MachO
} // namespace llvm
#endif // LLVM_TEXTAPI_SYMBOLSET_H
PKhwFZ,�EgssTextAPI/Symbol.hnu�[���//===- llvm/TextAPI/Symbol.h - TAPI Symbol ----------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TEXTAPI_SYMBOL_H
#define LLVM_TEXTAPI_SYMBOL_H

#include "llvm/ADT/BitmaskEnum.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/TextAPI/ArchitectureSet.h"
#include "llvm/TextAPI/Target.h"

namespace llvm {
namespace MachO {

// clang-format off

/// Symbol flags.
enum class SymbolFlags : uint8_t {
  /// No flags
  None             = 0,

  /// Thread-local value symbol
  ThreadLocalValue = 1U << 0,

  /// Weak defined symbol
  WeakDefined      = 1U << 1,

  /// Weak referenced symbol
  WeakReferenced   = 1U << 2,

  /// Undefined
  Undefined        = 1U << 3,

  /// Rexported
  Rexported        = 1U << 4,

  /// Data Segment  
  Data             = 1U << 5,

  /// Text Segment
  Text             = 1U << 6,
  
  LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/Text),
};

// clang-format on

enum class SymbolKind : uint8_t {
  GlobalSymbol,
  ObjectiveCClass,
  ObjectiveCClassEHType,
  ObjectiveCInstanceVariable,
};

constexpr StringLiteral ObjC1ClassNamePrefix = ".objc_class_name_";
constexpr StringLiteral ObjC2ClassNamePrefix = "_OBJC_CLASS_$_";
constexpr StringLiteral ObjC2MetaClassNamePrefix = "_OBJC_METACLASS_$_";
constexpr StringLiteral ObjC2EHTypePrefix = "_OBJC_EHTYPE_$_";
constexpr StringLiteral ObjC2IVarPrefix = "_OBJC_IVAR_$_";

using TargetList = SmallVector<Target, 5>;

// Keep containers that hold Targets in sorted order and uniqued.
template <typename C>
typename C::iterator addEntry(C &Container, const Target &Targ) {
  auto Iter =
      lower_bound(Container, Targ, [](const Target &LHS, const Target &RHS) {
        return LHS < RHS;
      });
  if ((Iter != std::end(Container)) && !(Targ < *Iter))
    return Iter;

  return Container.insert(Iter, Targ);
}

class Symbol {
public:
  Symbol(SymbolKind Kind, StringRef Name, TargetList Targets, SymbolFlags Flags)
      : Name(Name), Targets(std::move(Targets)), Kind(Kind), Flags(Flags) {}

  void addTarget(Target InputTarget) { addEntry(Targets, InputTarget); }
  SymbolKind getKind() const { return Kind; }
  StringRef getName() const { return Name; }
  ArchitectureSet getArchitectures() const {
    return mapToArchitectureSet(Targets);
  }
  SymbolFlags getFlags() const { return Flags; }

  bool isWeakDefined() const {
    return (Flags & SymbolFlags::WeakDefined) == SymbolFlags::WeakDefined;
  }

  bool isWeakReferenced() const {
    return (Flags & SymbolFlags::WeakReferenced) == SymbolFlags::WeakReferenced;
  }

  bool isThreadLocalValue() const {
    return (Flags & SymbolFlags::ThreadLocalValue) ==
           SymbolFlags::ThreadLocalValue;
  }

  bool isUndefined() const {
    return (Flags & SymbolFlags::Undefined) == SymbolFlags::Undefined;
  }

  bool isReexported() const {
    return (Flags & SymbolFlags::Rexported) == SymbolFlags::Rexported;
  }

  bool isData() const {
    return (Flags & SymbolFlags::Data) == SymbolFlags::Data;
  }

  bool isText() const {
    return (Flags & SymbolFlags::Text) == SymbolFlags::Text;
  }

  using const_target_iterator = TargetList::const_iterator;
  using const_target_range = llvm::iterator_range<const_target_iterator>;
  const_target_range targets() const { return {Targets}; }

  using const_filtered_target_iterator =
      llvm::filter_iterator<const_target_iterator,
                            std::function<bool(const Target &)>>;
  using const_filtered_target_range =
      llvm::iterator_range<const_filtered_target_iterator>;
  const_filtered_target_range targets(ArchitectureSet architectures) const;

#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
  void dump(raw_ostream &OS) const;
  void dump() const { dump(llvm::errs()); }
#endif

  bool operator==(const Symbol &O) const;

  bool operator!=(const Symbol &O) const { return !(*this == O); }

  bool operator<(const Symbol &O) const {
    return std::tie(Name, Kind, Targets, Flags) <
           std::tie(O.Name, O.Kind, O.Targets, O.Flags);
  }

private:
  StringRef Name;
  TargetList Targets;
  SymbolKind Kind;
  SymbolFlags Flags;
};

} // end namespace MachO.
} // end namespace llvm.

#endif // LLVM_TEXTAPI_SYMBOL_H
PKhwFZu*Y�oMoMWindowsDriver/MSVCSetupApi.hnu�[���// <copyright file="Program.cpp" company="Microsoft Corporation">
// Copyright (C) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license.
// </copyright>
// <license>
// The MIT License (MIT)
//
// Copyright (C) Microsoft Corporation. All rights reserved.
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
// </license>

#pragma once

#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wnon-virtual-dtor"
#endif

// Constants
//
#ifndef E_NOTFOUND
#define E_NOTFOUND HRESULT_FROM_WIN32(ERROR_NOT_FOUND)
#endif

#ifndef E_FILENOTFOUND
#define E_FILENOTFOUND HRESULT_FROM_WIN32(ERROR_FILE_NOT_FOUND)
#endif

// Enumerations
//
/// <summary>
/// The state of an instance.
/// </summary>
enum InstanceState : unsigned {
  /// <summary>
  /// The instance state has not been determined.
  /// </summary>
  eNone = 0,

  /// <summary>
  /// The instance installation path exists.
  /// </summary>
  eLocal = 1,

  /// <summary>
  /// A product is registered to the instance.
  /// </summary>
  eRegistered = 2,

  /// <summary>
  /// No reboot is required for the instance.
  /// </summary>
  eNoRebootRequired = 4,

  /// <summary>
  /// The instance represents a complete install.
  /// </summary>
  eComplete = MAXUINT,
};

// Forward interface declarations
//
#ifndef __ISetupInstance_FWD_DEFINED__
#define __ISetupInstance_FWD_DEFINED__
typedef struct ISetupInstance ISetupInstance;
#endif

#ifndef __ISetupInstance2_FWD_DEFINED__
#define __ISetupInstance2_FWD_DEFINED__
typedef struct ISetupInstance2 ISetupInstance2;
#endif

#ifndef __IEnumSetupInstances_FWD_DEFINED__
#define __IEnumSetupInstances_FWD_DEFINED__
typedef struct IEnumSetupInstances IEnumSetupInstances;
#endif

#ifndef __ISetupConfiguration_FWD_DEFINED__
#define __ISetupConfiguration_FWD_DEFINED__
typedef struct ISetupConfiguration ISetupConfiguration;
#endif

#ifndef __ISetupConfiguration2_FWD_DEFINED__
#define __ISetupConfiguration2_FWD_DEFINED__
typedef struct ISetupConfiguration2 ISetupConfiguration2;
#endif

#ifndef __ISetupPackageReference_FWD_DEFINED__
#define __ISetupPackageReference_FWD_DEFINED__
typedef struct ISetupPackageReference ISetupPackageReference;
#endif

#ifndef __ISetupHelper_FWD_DEFINED__
#define __ISetupHelper_FWD_DEFINED__
typedef struct ISetupHelper ISetupHelper;
#endif

// Forward class declarations
//
#ifndef __SetupConfiguration_FWD_DEFINED__
#define __SetupConfiguration_FWD_DEFINED__

#ifdef __cplusplus
typedef class SetupConfiguration SetupConfiguration;
#endif

#endif

#ifdef __cplusplus
extern "C" {
#endif

// Interface definitions
//
EXTERN_C const IID IID_ISetupInstance;

#if defined(__cplusplus) && !defined(CINTERFACE)
/// <summary>
/// Information about an instance of a product.
/// </summary>
struct DECLSPEC_UUID("B41463C3-8866-43B5-BC33-2B0676F7F42E")
    DECLSPEC_NOVTABLE ISetupInstance : public IUnknown {
  /// <summary>
  /// Gets the instance identifier (should match the name of the parent instance
  /// directory).
  /// </summary>
  /// <param name="pbstrInstanceId">The instance identifier.</param>
  /// <returns>Standard HRESULT indicating success or failure, including
  /// E_FILENOTFOUND if the instance state does not exist.</returns>
  STDMETHOD(GetInstanceId)(_Out_ BSTR *pbstrInstanceId) = 0;

  /// <summary>
  /// Gets the local date and time when the installation was originally
  /// installed.
  /// </summary>
  /// <param name="pInstallDate">The local date and time when the installation
  /// was originally installed.</param>
  /// <returns>Standard HRESULT indicating success or failure, including
  /// E_FILENOTFOUND if the instance state does not exist and E_NOTFOUND if the
  /// property is not defined.</returns>
  STDMETHOD(GetInstallDate)(_Out_ LPFILETIME pInstallDate) = 0;

  /// <summary>
  /// Gets the unique name of the installation, often indicating the branch and
  /// other information used for telemetry.
  /// </summary>
  /// <param name="pbstrInstallationName">The unique name of the installation,
  /// often indicating the branch and other information used for
  /// telemetry.</param>
  /// <returns>Standard HRESULT indicating success or failure, including
  /// E_FILENOTFOUND if the instance state does not exist and E_NOTFOUND if the
  /// property is not defined.</returns>
  STDMETHOD(GetInstallationName)(_Out_ BSTR *pbstrInstallationName) = 0;

  /// <summary>
  /// Gets the path to the installation root of the product.
  /// </summary>
  /// <param name="pbstrInstallationPath">The path to the installation root of
  /// the product.</param>
  /// <returns>Standard HRESULT indicating success or failure, including
  /// E_FILENOTFOUND if the instance state does not exist and E_NOTFOUND if the
  /// property is not defined.</returns>
  STDMETHOD(GetInstallationPath)(_Out_ BSTR *pbstrInstallationPath) = 0;

  /// <summary>
  /// Gets the version of the product installed in this instance.
  /// </summary>
  /// <param name="pbstrInstallationVersion">The version of the product
  /// installed in this instance.</param>
  /// <returns>Standard HRESULT indicating success or failure, including
  /// E_FILENOTFOUND if the instance state does not exist and E_NOTFOUND if the
  /// property is not defined.</returns>
  STDMETHOD(GetInstallationVersion)(_Out_ BSTR *pbstrInstallationVersion) = 0;

  /// <summary>
  /// Gets the display name (title) of the product installed in this instance.
  /// </summary>
  /// <param name="lcid">The LCID for the display name.</param>
  /// <param name="pbstrDisplayName">The display name (title) of the product
  /// installed in this instance.</param>
  /// <returns>Standard HRESULT indicating success or failure, including
  /// E_FILENOTFOUND if the instance state does not exist and E_NOTFOUND if the
  /// property is not defined.</returns>
  STDMETHOD(GetDisplayName)(_In_ LCID lcid, _Out_ BSTR *pbstrDisplayName) = 0;

  /// <summary>
  /// Gets the description of the product installed in this instance.
  /// </summary>
  /// <param name="lcid">The LCID for the description.</param>
  /// <param name="pbstrDescription">The description of the product installed in
  /// this instance.</param>
  /// <returns>Standard HRESULT indicating success or failure, including
  /// E_FILENOTFOUND if the instance state does not exist and E_NOTFOUND if the
  /// property is not defined.</returns>
  STDMETHOD(GetDescription)(_In_ LCID lcid, _Out_ BSTR *pbstrDescription) = 0;

  /// <summary>
  /// Resolves the optional relative path to the root path of the instance.
  /// </summary>
  /// <param name="pwszRelativePath">A relative path within the instance to
  /// resolve, or NULL to get the root path.</param>
  /// <param name="pbstrAbsolutePath">The full path to the optional relative
  /// path within the instance. If the relative path is NULL, the root path will
  /// always terminate in a backslash.</param>
  /// <returns>Standard HRESULT indicating success or failure, including
  /// E_FILENOTFOUND if the instance state does not exist and E_NOTFOUND if the
  /// property is not defined.</returns>
  STDMETHOD(ResolvePath)
  (_In_opt_z_ LPCOLESTR pwszRelativePath, _Out_ BSTR *pbstrAbsolutePath) = 0;
};
#endif

EXTERN_C const IID IID_ISetupInstance2;

#if defined(__cplusplus) && !defined(CINTERFACE)
/// <summary>
/// Information about an instance of a product.
/// </summary>
struct DECLSPEC_UUID("89143C9A-05AF-49B0-B717-72E218A2185C")
    DECLSPEC_NOVTABLE ISetupInstance2 : public ISetupInstance {
  /// <summary>
  /// Gets the state of the instance.
  /// </summary>
  /// <param name="pState">The state of the instance.</param>
  /// <returns>Standard HRESULT indicating success or failure, including
  /// E_FILENOTFOUND if the instance state does not exist.</returns>
  STDMETHOD(GetState)(_Out_ InstanceState *pState) = 0;

  /// <summary>
  /// Gets an array of package references registered to the instance.
  /// </summary>
  /// <param name="ppsaPackages">Pointer to an array of <see
  /// cref="ISetupPackageReference"/>.</param>
  /// <returns>Standard HRESULT indicating success or failure, including
  /// E_FILENOTFOUND if the instance state does not exist and E_NOTFOUND if the
  /// packages property is not defined.</returns>
  STDMETHOD(GetPackages)(_Out_ LPSAFEARRAY *ppsaPackages) = 0;

  /// <summary>
  /// Gets a pointer to the <see cref="ISetupPackageReference"/> that represents
  /// the registered product.
  /// </summary>
  /// <param name="ppPackage">Pointer to an instance of <see
  /// cref="ISetupPackageReference"/>. This may be NULL if <see
  /// cref="GetState"/> does not return <see cref="eComplete"/>.</param>
  /// <returns>Standard HRESULT indicating success or failure, including
  /// E_FILENOTFOUND if the instance state does not exist and E_NOTFOUND if the
  /// packages property is not defined.</returns>
  STDMETHOD(GetProduct)
  (_Outptr_result_maybenull_ ISetupPackageReference **ppPackage) = 0;

  /// <summary>
  /// Gets the relative path to the product application, if available.
  /// </summary>
  /// <param name="pbstrProductPath">The relative path to the product
  /// application, if available.</param>
  /// <returns>Standard HRESULT indicating success or failure, including
  /// E_FILENOTFOUND if the instance state does not exist.</returns>
  STDMETHOD(GetProductPath)
  (_Outptr_result_maybenull_ BSTR *pbstrProductPath) = 0;
};
#endif

EXTERN_C const IID IID_IEnumSetupInstances;

#if defined(__cplusplus) && !defined(CINTERFACE)
/// <summary>
/// A enumerator of installed <see cref="ISetupInstance"/> objects.
/// </summary>
struct DECLSPEC_UUID("6380BCFF-41D3-4B2E-8B2E-BF8A6810C848")
    DECLSPEC_NOVTABLE IEnumSetupInstances : public IUnknown {
  /// <summary>
  /// Retrieves the next set of product instances in the enumeration sequence.
  /// </summary>
  /// <param name="celt">The number of product instances to retrieve.</param>
  /// <param name="rgelt">A pointer to an array of <see
  /// cref="ISetupInstance"/>.</param>
  /// <param name="pceltFetched">A pointer to the number of product instances
  /// retrieved. If celt is 1 this parameter may be NULL.</param>
  /// <returns>S_OK if the number of elements were fetched, S_FALSE if nothing
  /// was fetched (at end of enumeration), E_INVALIDARG if celt is greater than
  /// 1 and pceltFetched is NULL, or E_OUTOFMEMORY if an <see
  /// cref="ISetupInstance"/> could not be allocated.</returns>
  STDMETHOD(Next)
  (_In_ ULONG celt, _Out_writes_to_(celt, *pceltFetched) ISetupInstance **rgelt,
   _Out_opt_ _Deref_out_range_(0, celt) ULONG *pceltFetched) = 0;

  /// <summary>
  /// Skips the next set of product instances in the enumeration sequence.
  /// </summary>
  /// <param name="celt">The number of product instances to skip.</param>
  /// <returns>S_OK if the number of elements could be skipped; otherwise,
  /// S_FALSE;</returns>
  STDMETHOD(Skip)(_In_ ULONG celt) = 0;

  /// <summary>
  /// Resets the enumeration sequence to the beginning.
  /// </summary>
  /// <returns>Always returns S_OK;</returns>
  STDMETHOD(Reset)(void) = 0;

  /// <summary>
  /// Creates a new enumeration object in the same state as the current
  /// enumeration object: the new object points to the same place in the
  /// enumeration sequence.
  /// </summary>
  /// <param name="ppenum">A pointer to a pointer to a new <see
  /// cref="IEnumSetupInstances"/> interface. If the method fails, this
  /// parameter is undefined.</param>
  /// <returns>S_OK if a clone was returned; otherwise, E_OUTOFMEMORY.</returns>
  STDMETHOD(Clone)(_Deref_out_opt_ IEnumSetupInstances **ppenum) = 0;
};
#endif

EXTERN_C const IID IID_ISetupConfiguration;

#if defined(__cplusplus) && !defined(CINTERFACE)
/// <summary>
/// Gets information about product instances set up on the machine.
/// </summary>
struct DECLSPEC_UUID("42843719-DB4C-46C2-8E7C-64F1816EFD5B")
    DECLSPEC_NOVTABLE ISetupConfiguration : public IUnknown {
  /// <summary>
  /// Enumerates all completed product instances installed.
  /// </summary>
  /// <param name="ppEnumInstances">An enumeration of completed, installed
  /// product instances.</param>
  /// <returns>Standard HRESULT indicating success or failure.</returns>
  STDMETHOD(EnumInstances)(_Out_ IEnumSetupInstances **ppEnumInstances) = 0;

  /// <summary>
  /// Gets the instance for the current process path.
  /// </summary>
  /// <param name="ppInstance">The instance for the current process
  /// path.</param>
  /// <returns>The instance for the current process path, or E_NOTFOUND if not
  /// found.</returns>
  STDMETHOD(GetInstanceForCurrentProcess)
  (_Out_ ISetupInstance **ppInstance) = 0;

  /// <summary>
  /// Gets the instance for the given path.
  /// </summary>
  /// <param name="ppInstance">The instance for the given path.</param>
  /// <returns>The instance for the given path, or E_NOTFOUND if not
  /// found.</returns>
  STDMETHOD(GetInstanceForPath)
  (_In_z_ LPCWSTR wzPath, _Out_ ISetupInstance **ppInstance) = 0;
};
#endif

EXTERN_C const IID IID_ISetupConfiguration2;

#if defined(__cplusplus) && !defined(CINTERFACE)
/// <summary>
/// Gets information about product instances.
/// </summary>
struct DECLSPEC_UUID("26AAB78C-4A60-49D6-AF3B-3C35BC93365D")
    DECLSPEC_NOVTABLE ISetupConfiguration2 : public ISetupConfiguration {
  /// <summary>
  /// Enumerates all product instances.
  /// </summary>
  /// <param name="ppEnumInstances">An enumeration of all product
  /// instances.</param>
  /// <returns>Standard HRESULT indicating success or failure.</returns>
  STDMETHOD(EnumAllInstances)(_Out_ IEnumSetupInstances **ppEnumInstances) = 0;
};
#endif

EXTERN_C const IID IID_ISetupPackageReference;

#if defined(__cplusplus) && !defined(CINTERFACE)
/// <summary>
/// A reference to a package.
/// </summary>
struct DECLSPEC_UUID("da8d8a16-b2b6-4487-a2f1-594ccccd6bf5")
    DECLSPEC_NOVTABLE ISetupPackageReference : public IUnknown {
  /// <summary>
  /// Gets the general package identifier.
  /// </summary>
  /// <param name="pbstrId">The general package identifier.</param>
  /// <returns>Standard HRESULT indicating success or failure.</returns>
  STDMETHOD(GetId)(_Out_ BSTR *pbstrId) = 0;

  /// <summary>
  /// Gets the version of the package.
  /// </summary>
  /// <param name="pbstrVersion">The version of the package.</param>
  /// <returns>Standard HRESULT indicating success or failure.</returns>
  STDMETHOD(GetVersion)(_Out_ BSTR *pbstrVersion) = 0;

  /// <summary>
  /// Gets the target process architecture of the package.
  /// </summary>
  /// <param name="pbstrChip">The target process architecture of the
  /// package.</param>
  /// <returns>Standard HRESULT indicating success or failure.</returns>
  STDMETHOD(GetChip)(_Out_ BSTR *pbstrChip) = 0;

  /// <summary>
  /// Gets the language and optional region identifier.
  /// </summary>
  /// <param name="pbstrLanguage">The language and optional region
  /// identifier.</param>
  /// <returns>Standard HRESULT indicating success or failure.</returns>
  STDMETHOD(GetLanguage)(_Out_ BSTR *pbstrLanguage) = 0;

  /// <summary>
  /// Gets the build branch of the package.
  /// </summary>
  /// <param name="pbstrBranch">The build branch of the package.</param>
  /// <returns>Standard HRESULT indicating success or failure.</returns>
  STDMETHOD(GetBranch)(_Out_ BSTR *pbstrBranch) = 0;

  /// <summary>
  /// Gets the type of the package.
  /// </summary>
  /// <param name="pbstrType">The type of the package.</param>
  /// <returns>Standard HRESULT indicating success or failure.</returns>
  STDMETHOD(GetType)(_Out_ BSTR *pbstrType) = 0;

  /// <summary>
  /// Gets the unique identifier consisting of all defined tokens.
  /// </summary>
  /// <param name="pbstrUniqueId">The unique identifier consisting of all
  /// defined tokens.</param>
  /// <returns>Standard HRESULT indicating success or failure, including
  /// E_UNEXPECTED if no Id was defined (required).</returns>
  STDMETHOD(GetUniqueId)(_Out_ BSTR *pbstrUniqueId) = 0;
};
#endif

EXTERN_C const IID IID_ISetupHelper;

#if defined(__cplusplus) && !defined(CINTERFACE)
/// <summary>
/// Helper functions.
/// </summary>
/// <remarks>
/// You can query for this interface from the <see cref="SetupConfiguration"/>
/// class.
/// </remarks>
struct DECLSPEC_UUID("42b21b78-6192-463e-87bf-d577838f1d5c")
    DECLSPEC_NOVTABLE ISetupHelper : public IUnknown {
  /// <summary>
  /// Parses a dotted quad version string into a 64-bit unsigned integer.
  /// </summary>
  /// <param name="pwszVersion">The dotted quad version string to parse, e.g.
  /// 1.2.3.4.</param>
  /// <param name="pullVersion">A 64-bit unsigned integer representing the
  /// version. You can compare this to other versions.</param>
  /// <returns>Standard HRESULT indicating success or failure.</returns>
  STDMETHOD(ParseVersion)
  (_In_ LPCOLESTR pwszVersion, _Out_ PULONGLONG pullVersion) = 0;

  /// <summary>
  /// Parses a dotted quad version string into a 64-bit unsigned integer.
  /// </summary>
  /// <param name="pwszVersionRange">The string containing 1 or 2 dotted quad
  /// version strings to parse, e.g. [1.0,) that means 1.0.0.0 or newer.</param>
  /// <param name="pullMinVersion">A 64-bit unsigned integer representing the
  /// minimum version, which may be 0. You can compare this to other
  /// versions.</param>
  /// <param name="pullMaxVersion">A 64-bit unsigned integer representing the
  /// maximum version, which may be MAXULONGLONG. You can compare this to other
  /// versions.</param>
  /// <returns>Standard HRESULT indicating success or failure.</returns>
  STDMETHOD(ParseVersionRange)
  (_In_ LPCOLESTR pwszVersionRange, _Out_ PULONGLONG pullMinVersion,
   _Out_ PULONGLONG pullMaxVersion) = 0;
};
#endif

// Class declarations
//
EXTERN_C const CLSID CLSID_SetupConfiguration;

#ifdef __cplusplus
/// <summary>
/// This class implements <see cref="ISetupConfiguration"/>, <see
/// cref="ISetupConfiguration2"/>, and <see cref="ISetupHelper"/>.
/// </summary>
class DECLSPEC_UUID("177F0C4A-1CD3-4DE7-A32C-71DBBB9FA36D") SetupConfiguration;
#endif

// Function declarations
//
/// <summary>
/// Gets an <see cref="ISetupConfiguration"/> that provides information about
/// product instances installed on the machine.
/// </summary>
/// <param name="ppConfiguration">The <see cref="ISetupConfiguration"/> that
/// provides information about product instances installed on the
/// machine.</param>
/// <param name="pReserved">Reserved for future use.</param>
/// <returns>Standard HRESULT indicating success or failure.</returns>
STDMETHODIMP GetSetupConfiguration(_Out_ ISetupConfiguration **ppConfiguration,
                                   _Reserved_ LPVOID pReserved);

#ifdef __cplusplus
}
#endif

#ifdef __clang__
#pragma clang diagnostic pop
#endif
PKhwFZ2a^���WindowsDriver/MSVCPaths.hnu�[���//===-- MSVCPaths.h - MSVC path-parsing helpers -----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_WINDOWSDRIVER_MSVCPATHS_H
#define LLVM_WINDOWSDRIVER_MSVCPATHS_H

#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/TargetParser/Triple.h"
#include <optional>
#include <string>

namespace llvm {

namespace vfs {
class FileSystem;
}

enum class SubDirectoryType {
  Bin,
  Include,
  Lib,
};

enum class ToolsetLayout {
  OlderVS,
  VS2017OrNewer,
  DevDivInternal,
};

// Windows SDKs and VC Toolchains group their contents into subdirectories based
// on the target architecture. This function converts an llvm::Triple::ArchType
// to the corresponding subdirectory name.
const char *archToWindowsSDKArch(llvm::Triple::ArchType Arch);

// Similar to the above function, but for Visual Studios before VS2017.
const char *archToLegacyVCArch(llvm::Triple::ArchType Arch);

// Similar to the above function, but for DevDiv internal builds.
const char *archToDevDivInternalArch(llvm::Triple::ArchType Arch);

bool appendArchToWindowsSDKLibPath(int SDKMajor, llvm::SmallString<128> LibPath,
                                   llvm::Triple::ArchType Arch,
                                   std::string &path);

// Get the path to a specific subdirectory in the current toolchain for
// a given target architecture.
// VS2017 changed the VC toolchain layout, so this should be used instead
// of hardcoding paths.
std::string getSubDirectoryPath(SubDirectoryType Type, ToolsetLayout VSLayout,
                                const std::string &VCToolChainPath,
                                llvm::Triple::ArchType TargetArch,
                                llvm::StringRef SubdirParent = "");

// Check if the Include path of a specified version of Visual Studio contains
// specific header files. If not, they are probably shipped with Universal CRT.
bool useUniversalCRT(ToolsetLayout VSLayout, const std::string &VCToolChainPath,
                     llvm::Triple::ArchType TargetArch,
                     llvm::vfs::FileSystem &VFS);

/// Get Windows SDK installation directory.
bool getWindowsSDKDir(vfs::FileSystem &VFS,
                      std::optional<llvm::StringRef> WinSdkDir,
                      std::optional<llvm::StringRef> WinSdkVersion,
                      std::optional<llvm::StringRef> WinSysRoot,
                      std::string &Path, int &Major,
                      std::string &WindowsSDKIncludeVersion,
                      std::string &WindowsSDKLibVersion);

bool getUniversalCRTSdkDir(vfs::FileSystem &VFS,
                           std::optional<llvm::StringRef> WinSdkDir,
                           std::optional<llvm::StringRef> WinSdkVersion,
                           std::optional<llvm::StringRef> WinSysRoot,
                           std::string &Path, std::string &UCRTVersion);

// Check command line arguments to try and find a toolchain.
bool findVCToolChainViaCommandLine(
    vfs::FileSystem &VFS, std::optional<llvm::StringRef> VCToolsDir,
    std::optional<llvm::StringRef> VCToolsVersion,
    std::optional<llvm::StringRef> WinSysRoot, std::string &Path,
    ToolsetLayout &VSLayout);

// Check various environment variables to try and find a toolchain.
bool findVCToolChainViaEnvironment(vfs::FileSystem &VFS, std::string &Path,
                                   ToolsetLayout &VSLayout);

// Query the Setup Config server for installs, then pick the newest version
// and find its default VC toolchain. If `VCToolsVersion` is specified, that
// version is preferred over the latest version.
//
// This is the preferred way to discover new Visual Studios, as they're no
// longer listed in the registry.
bool
findVCToolChainViaSetupConfig(vfs::FileSystem &VFS,
                              std::optional<llvm::StringRef> VCToolsVersion,
                              std::string &Path, ToolsetLayout &VSLayout);

// Look in the registry for Visual Studio installs, and use that to get
// a toolchain path. VS2017 and newer don't get added to the registry.
// So if we find something here, we know that it's an older version.
bool findVCToolChainViaRegistry(std::string &Path, ToolsetLayout &VSLayout);

} // namespace llvm

#endif
PKhwFZZl)+__Testing/Support/Error.hnu�[���//===- llvm/Testing/Support/Error.h ---------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TESTING_SUPPORT_ERROR_H
#define LLVM_TESTING_SUPPORT_ERROR_H

#include "llvm/Support/Error.h"
#include "llvm/Testing/Support/SupportHelpers.h"

#include "gmock/gmock.h"
#include <ostream>

namespace llvm {
namespace detail {
ErrorHolder TakeError(Error Err);

template <typename T> ExpectedHolder<T> TakeExpected(Expected<T> &Exp) {
  return {TakeError(Exp.takeError()), Exp};
}

template <typename T> ExpectedHolder<T> TakeExpected(Expected<T> &&Exp) {
  return TakeExpected(Exp);
}

template <typename T>
class ValueMatchesMono
    : public testing::MatcherInterface<const ExpectedHolder<T> &> {
public:
  explicit ValueMatchesMono(const testing::Matcher<T> &Matcher)
      : Matcher(Matcher) {}

  bool MatchAndExplain(const ExpectedHolder<T> &Holder,
                       testing::MatchResultListener *listener) const override {
    if (!Holder.Success())
      return false;

    bool result = Matcher.MatchAndExplain(*Holder.Exp, listener);

    if (result || !listener->IsInterested())
      return result;
    *listener << "(";
    Matcher.DescribeNegationTo(listener->stream());
    *listener << ")";
    return result;
  }

  void DescribeTo(std::ostream *OS) const override {
    *OS << "succeeded with value (";
    Matcher.DescribeTo(OS);
    *OS << ")";
  }

  void DescribeNegationTo(std::ostream *OS) const override {
    *OS << "did not succeed or value (";
    Matcher.DescribeNegationTo(OS);
    *OS << ")";
  }

private:
  testing::Matcher<T> Matcher;
};

template<typename M>
class ValueMatchesPoly {
public:
  explicit ValueMatchesPoly(const M &Matcher) : Matcher(Matcher) {}

  template <typename T>
  operator testing::Matcher<const ExpectedHolder<T> &>() const {
    return MakeMatcher(
        new ValueMatchesMono<T>(testing::SafeMatcherCast<T>(Matcher)));
  }

private:
  M Matcher;
};

template <typename InfoT>
class ErrorMatchesMono : public testing::MatcherInterface<const ErrorHolder &> {
public:
  explicit ErrorMatchesMono(std::optional<testing::Matcher<InfoT &>> Matcher)
      : Matcher(std::move(Matcher)) {}

  bool MatchAndExplain(const ErrorHolder &Holder,
                       testing::MatchResultListener *listener) const override {
    if (Holder.Success())
      return false;

    if (Holder.Infos.size() > 1) {
      *listener << "multiple errors";
      return false;
    }

    auto &Info = *Holder.Infos[0];
    if (!Info.isA<InfoT>()) {
      *listener << "Error was not of given type";
      return false;
    }

    if (!Matcher)
      return true;

    return Matcher->MatchAndExplain(static_cast<InfoT &>(Info), listener);
  }

  void DescribeTo(std::ostream *OS) const override {
    *OS << "failed with Error of given type";
    if (Matcher) {
      *OS << " and the error ";
      Matcher->DescribeTo(OS);
    }
  }

  void DescribeNegationTo(std::ostream *OS) const override {
    *OS << "succeeded or did not fail with the error of given type";
    if (Matcher) {
      *OS << " or the error ";
      Matcher->DescribeNegationTo(OS);
    }
  }

private:
  std::optional<testing::Matcher<InfoT &>> Matcher;
};

class ErrorMessageMatches
    : public testing::MatcherInterface<const ErrorHolder &> {
public:
  explicit ErrorMessageMatches(
      testing::Matcher<std::vector<std::string>> Matcher)
      : Matcher(std::move(Matcher)) {}

  bool MatchAndExplain(const ErrorHolder &Holder,
                       testing::MatchResultListener *listener) const override {
    std::vector<std::string> Messages;
    Messages.reserve(Holder.Infos.size());
    for (const std::shared_ptr<ErrorInfoBase> &Info : Holder.Infos)
      Messages.push_back(Info->message());

    return Matcher.MatchAndExplain(Messages, listener);
  }

  void DescribeTo(std::ostream *OS) const override {
    *OS << "failed with Error whose message ";
    Matcher.DescribeTo(OS);
  }

  void DescribeNegationTo(std::ostream *OS) const override {
    *OS << "failed with an Error whose message ";
    Matcher.DescribeNegationTo(OS);
  }

private:
  testing::Matcher<std::vector<std::string>> Matcher;
};
} // namespace detail

#define EXPECT_THAT_ERROR(Err, Matcher)                                        \
  EXPECT_THAT(llvm::detail::TakeError(Err), Matcher)
#define ASSERT_THAT_ERROR(Err, Matcher)                                        \
  ASSERT_THAT(llvm::detail::TakeError(Err), Matcher)

/// Helper macro for checking the result of an 'Expected<T>'
///
///   @code{.cpp}
///     // function to be tested
///     Expected<int> myDivide(int A, int B);
///
///     TEST(myDivideTests, GoodAndBad) {
///       // test good case
///       // if you only care about success or failure:
///       EXPECT_THAT_EXPECTED(myDivide(10, 5), Succeeded());
///       // if you also care about the value:
///       EXPECT_THAT_EXPECTED(myDivide(10, 5), HasValue(2));
///
///       // test the error case
///       EXPECT_THAT_EXPECTED(myDivide(10, 0), Failed());
///       // also check the error message
///       EXPECT_THAT_EXPECTED(myDivide(10, 0),
///           FailedWithMessage("B must not be zero!"));
///     }
///   @endcode

#define EXPECT_THAT_EXPECTED(Err, Matcher)                                     \
  EXPECT_THAT(llvm::detail::TakeExpected(Err), Matcher)
#define ASSERT_THAT_EXPECTED(Err, Matcher)                                     \
  ASSERT_THAT(llvm::detail::TakeExpected(Err), Matcher)

MATCHER(Succeeded, "") { return arg.Success(); }
MATCHER(Failed, "") { return !arg.Success(); }

template <typename InfoT>
testing::Matcher<const detail::ErrorHolder &> Failed() {
  return MakeMatcher(new detail::ErrorMatchesMono<InfoT>(std::nullopt));
}

template <typename InfoT, typename M>
testing::Matcher<const detail::ErrorHolder &> Failed(M Matcher) {
  return MakeMatcher(new detail::ErrorMatchesMono<InfoT>(
      testing::SafeMatcherCast<InfoT &>(Matcher)));
}

template <typename... M>
testing::Matcher<const detail::ErrorHolder &> FailedWithMessage(M... Matcher) {
  static_assert(sizeof...(M) > 0);
  return MakeMatcher(
      new detail::ErrorMessageMatches(testing::ElementsAre(Matcher...)));
}

template <typename M>
testing::Matcher<const detail::ErrorHolder &> FailedWithMessageArray(M Matcher) {
  return MakeMatcher(new detail::ErrorMessageMatches(Matcher));
}

template <typename M>
detail::ValueMatchesPoly<M> HasValue(M Matcher) {
  return detail::ValueMatchesPoly<M>(Matcher);
}

} // namespace llvm

#endif
PKhwFZU@�RR Testing/Support/SupportHelpers.hnu�[���//===- Testing/Support/SupportHelpers.h -----------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TESTING_SUPPORT_SUPPORTHELPERS_H
#define LLVM_TESTING_SUPPORT_SUPPORTHELPERS_H

#include "llvm/ADT/SmallString.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/raw_os_ostream.h"
#include "gmock/gmock-matchers.h"
#include "gtest/gtest-printers.h"

#include <optional>
#include <string>

namespace llvm {
namespace detail {
struct ErrorHolder {
  std::vector<std::shared_ptr<ErrorInfoBase>> Infos;

  bool Success() const { return Infos.empty(); }
};

template <typename T> struct ExpectedHolder : public ErrorHolder {
  ExpectedHolder(ErrorHolder Err, Expected<T> &Exp)
      : ErrorHolder(std::move(Err)), Exp(Exp) {}

  Expected<T> &Exp;
};

inline void PrintTo(const ErrorHolder &Err, std::ostream *Out) {
  raw_os_ostream OS(*Out);
  OS << (Err.Success() ? "succeeded" : "failed");
  if (!Err.Success()) {
    const char *Delim = "  (";
    for (const auto &Info : Err.Infos) {
      OS << Delim;
      Delim = "; ";
      Info->log(OS);
    }
    OS << ")";
  }
}

template <typename T>
void PrintTo(const ExpectedHolder<T> &Item, std::ostream *Out) {
  if (Item.Success()) {
    *Out << "succeeded with value " << ::testing::PrintToString(*Item.Exp);
  } else {
    PrintTo(static_cast<const ErrorHolder &>(Item), Out);
  }
}

template <class InnerMatcher> class ValueIsMatcher {
public:
  explicit ValueIsMatcher(InnerMatcher ValueMatcher)
      : ValueMatcher(ValueMatcher) {}

  template <class T>
  operator ::testing::Matcher<const std::optional<T> &>() const {
    return ::testing::MakeMatcher(
        new Impl<T>(::testing::SafeMatcherCast<T>(ValueMatcher)));
  }

  template <class T, class O = std::optional<T>>
  class Impl : public ::testing::MatcherInterface<const O &> {
  public:
    explicit Impl(const ::testing::Matcher<T> &ValueMatcher)
        : ValueMatcher(ValueMatcher) {}

    bool MatchAndExplain(const O &Input,
                         testing::MatchResultListener *L) const override {
      return Input && ValueMatcher.MatchAndExplain(*Input, L);
    }

    void DescribeTo(std::ostream *OS) const override {
      *OS << "has a value that ";
      ValueMatcher.DescribeTo(OS);
    }
    void DescribeNegationTo(std::ostream *OS) const override {
      *OS << "does not have a value that ";
      ValueMatcher.DescribeTo(OS);
    }

  private:
    testing::Matcher<T> ValueMatcher;
  };

private:
  InnerMatcher ValueMatcher;
};
} // namespace detail

/// Matches an std::optional<T> with a value that conforms to an inner matcher.
/// To match std::nullopt you could use Eq(std::nullopt).
template <class InnerMatcher>
detail::ValueIsMatcher<InnerMatcher> ValueIs(const InnerMatcher &ValueMatcher) {
  return detail::ValueIsMatcher<InnerMatcher>(ValueMatcher);
}
namespace unittest {

SmallString<128> getInputFileDirectory(const char *Argv0);

/// A RAII object that creates a temporary directory upon initialization and
/// removes it upon destruction.
class TempDir {
  SmallString<128> Path;

public:
  /// Creates a managed temporary directory.
  ///
  /// @param Name The name of the directory to create.
  /// @param Unique If true, the directory will be created using
  ///               llvm::sys::fs::createUniqueDirectory.
  explicit TempDir(StringRef Name, bool Unique = false) {
    std::error_code EC;
    if (Unique) {
      EC = llvm::sys::fs::createUniqueDirectory(Name, Path);
      if (!EC) {
        // Resolve any symlinks in the new directory.
        std::string UnresolvedPath(Path.str());
        EC = llvm::sys::fs::real_path(UnresolvedPath, Path);
      }
    } else {
      Path = Name;
      EC = llvm::sys::fs::create_directory(Path);
    }
    if (EC)
      Path.clear();
    EXPECT_FALSE(EC) << EC.message();
  }

  ~TempDir() {
    if (!Path.empty()) {
      EXPECT_FALSE(llvm::sys::fs::remove_directories(Path.str()));
    }
  }

  TempDir(const TempDir &) = delete;
  TempDir &operator=(const TempDir &) = delete;

  TempDir(TempDir &&) = default;
  TempDir &operator=(TempDir &&) = default;

  /// The path to the temporary directory.
  StringRef path() const { return Path; }

  /// The null-terminated C string pointing to the path.
  const char *c_str() { return Path.c_str(); }

  /// Creates a new path by appending the argument to the path of the managed
  /// directory using the native path separator.
  SmallString<128> path(StringRef component) const {
    SmallString<128> Result(Path);
    SmallString<128> ComponentToAppend(component);
    llvm::sys::path::native(ComponentToAppend);
    llvm::sys::path::append(Result, Twine(ComponentToAppend));
    return Result;
  }
};

/// A RAII object that creates a link upon initialization and
/// removes it upon destruction.
///
/// The link may be a soft or a hard link, depending on the platform.
class TempLink {
  SmallString<128> Path;

public:
  /// Creates a managed link at path Link pointing to Target.
  TempLink(StringRef Target, StringRef Link) {
    Path = Link;
    std::error_code EC = sys::fs::create_link(Target, Link);
    if (EC)
      Path.clear();
    EXPECT_FALSE(EC);
  }
  ~TempLink() {
    if (!Path.empty()) {
      EXPECT_FALSE(llvm::sys::fs::remove(Path.str()));
    }
  }

  TempLink(const TempLink &) = delete;
  TempLink &operator=(const TempLink &) = delete;

  TempLink(TempLink &&) = default;
  TempLink &operator=(TempLink &&) = default;

  /// The path to the link.
  StringRef path() const { return Path; }
};

/// A RAII object that creates a file upon initialization and
/// removes it upon destruction.
class TempFile {
  SmallString<128> Path;

public:
  /// Creates a managed file.
  ///
  /// @param Name The name of the file to create.
  /// @param Contents The string to write to the file.
  /// @param Unique If true, the file will be created using
  ///               llvm::sys::fs::createTemporaryFile.
  TempFile(StringRef Name, StringRef Suffix = "", StringRef Contents = "",
           bool Unique = false) {
    std::error_code EC;
    int fd;
    if (Unique) {
      EC = llvm::sys::fs::createTemporaryFile(Name, Suffix, fd, Path);
    } else {
      Path = Name;
      if (!Suffix.empty()) {
        Path.append(".");
        Path.append(Suffix);
      }
      EC = llvm::sys::fs::openFileForWrite(Path, fd);
    }
    EXPECT_FALSE(EC);
    raw_fd_ostream OS(fd, /*shouldClose*/ true);
    OS << Contents;
    OS.flush();
    EXPECT_FALSE(OS.error());
    if (EC || OS.error())
      Path.clear();
  }
  ~TempFile() {
    if (!Path.empty()) {
      EXPECT_FALSE(llvm::sys::fs::remove(Path.str()));
    }
  }

  TempFile(const TempFile &) = delete;
  TempFile &operator=(const TempFile &) = delete;

  TempFile(TempFile &&) = default;
  TempFile &operator=(TempFile &&) = default;

  /// The path to the file.
  StringRef path() const { return Path; }
};

} // namespace unittest
} // namespace llvm

#endif
PKhwFZqT~���Testing/Support/Annotations.hnu�[���//===--- Annotations.h - Annotated source code for tests ---------*- C++-*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_TESTING_SUPPORT_ANNOTATIONS_H
#define LLVM_TESTING_SUPPORT_ANNOTATIONS_H

#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include <tuple>
#include <vector>

namespace llvm {

class raw_ostream;

/// Annotations lets you mark points and ranges inside source code, for tests:
///
///    Annotations Example(R"cpp(
///       int complete() { x.pri^ }         // ^ indicates a point
///       void err() { [["hello" == 42]]; } // [[this is a range]]
///       $definition^class Foo{};          // points can be named: "definition"
///       $fail[[static_assert(false, "")]] // ranges can be named too: "fail"
///    )cpp");
///
///    StringRef Code = Example.code();             // annotations stripped.
///    std::vector<size_t> PP = Example.points();   // all unnamed points
///    size_t P = Example.point();                  // there must be exactly one
///    llvm::Range R = Example.range("fail");       // find named ranges
///
/// Points/ranges are coordinated into `code()` which is stripped of
/// annotations.
///
/// Ranges may be nested (and points can be inside ranges), but there's no way
/// to define general overlapping ranges.
///
/// FIXME: the choice of the marking syntax makes it impossible to represent
///        some of the C++ and Objective C constructs (including common ones
///        like C++ attributes). We can fix this by:
///          1. introducing an escaping mechanism for the special characters,
///          2. making characters for marking points and ranges configurable,
///          3. changing the syntax to something less commonly used,
///          4. ...
class Annotations {
public:
  /// Two offsets pointing to a continuous substring. End is not included, i.e.
  /// represents a half-open range.
  struct Range {
    size_t Begin = 0;
    size_t End = 0;

    friend bool operator==(const Range &L, const Range &R) {
      return std::tie(L.Begin, L.End) == std::tie(R.Begin, R.End);
    }
    friend bool operator!=(const Range &L, const Range &R) { return !(L == R); }
  };

  /// Parses the annotations from Text. Crashes if it's malformed.
  Annotations(llvm::StringRef Text);

  /// The input text with all annotations stripped.
  /// All points and ranges are relative to this stripped text.
  llvm::StringRef code() const { return Code; }

  /// Returns the position of the point marked by ^ (or $name^) in the text.
  /// Crashes if there isn't exactly one.
  size_t point(llvm::StringRef Name = "") const;
  /// Returns the position of all points marked by ^ (or $name^) in the text.
  /// Order matches the order within the text.
  std::vector<size_t> points(llvm::StringRef Name = "") const;

  /// Returns the location of the range marked by [[ ]] (or $name[[ ]]).
  /// Crashes if there isn't exactly one.
  Range range(llvm::StringRef Name = "") const;
  /// Returns the location of all ranges marked by [[ ]] (or $name[[ ]]).
  /// They are ordered by start position within the text.
  std::vector<Range> ranges(llvm::StringRef Name = "") const;

private:
  std::string Code;
  llvm::StringMap<llvm::SmallVector<size_t, 1>> Points;
  llvm::StringMap<llvm::SmallVector<Range, 1>> Ranges;
};

llvm::raw_ostream &operator<<(llvm::raw_ostream &O,
                              const llvm::Annotations::Range &R);

} // namespace llvm

#endif
PKhwFZ��/�GG!Testing/Annotations/Annotations.hnu�[���//===--- Annotations.h - Annotated source code for tests ---------*- C++-*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_TESTING_SUPPORT_ANNOTATIONS_H
#define LLVM_TESTING_SUPPORT_ANNOTATIONS_H

#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include <tuple>
#include <vector>

namespace llvm {

class raw_ostream;

/// Annotations lets you mark points and ranges inside source code, for tests:
///
///    Annotations Example(R"cpp(
///       int complete() { x.pri^ }         // ^ indicates a point
///       void err() { [["hello" == 42]]; } // [[this is a range]]
///       $definition^class Foo{};          // points can be named: "definition"
///       $(foo)^class Foo{};               // ...or have a payload: "foo"
///       $definition(foo)^class Foo{};     // ...or both
///       $fail(runtime)[[assert(false)]]   // ranges can have names/payloads too
///    )cpp");
///
///    StringRef Code = Example.code();             // annotations stripped.
///    std::vector<size_t> PP = Example.points();   // all unnamed points
///    size_t P = Example.point();                  // there must be exactly one
///    llvm::Range R = Example.range("fail");       // find named ranges
///
/// Points/ranges are coordinated into `code()` which is stripped of
/// annotations.
///
/// Names consist of only alphanumeric characters or '_'.
/// Payloads can contain any character expect '(' and ')'.
///
/// Ranges may be nested (and points can be inside ranges), but there's no way
/// to define general overlapping ranges.
///
/// FIXME: the choice of the marking syntax makes it impossible to represent
///        some of the C++ and Objective C constructs (including common ones
///        like C++ attributes). We can fix this by:
///          1. introducing an escaping mechanism for the special characters,
///          2. making characters for marking points and ranges configurable,
///          3. changing the syntax to something less commonly used,
///          4. ...
class Annotations {
public:
  /// Two offsets pointing to a continuous substring. End is not included, i.e.
  /// represents a half-open range.
  struct Range {
    size_t Begin = 0;
    size_t End = 0;

    friend bool operator==(const Range &L, const Range &R) {
      return std::tie(L.Begin, L.End) == std::tie(R.Begin, R.End);
    }
    friend bool operator!=(const Range &L, const Range &R) { return !(L == R); }
  };

  /// Parses the annotations from Text. Crashes if it's malformed.
  Annotations(llvm::StringRef Text);

  /// The input text with all annotations stripped.
  /// All points and ranges are relative to this stripped text.
  llvm::StringRef code() const { return Code; }

  /// Returns the position of the point marked by ^ (or $name^) in the text.
  /// Crashes if there isn't exactly one.
  size_t point(llvm::StringRef Name = "") const;
  /// Returns the position of the point with \p Name and its payload (if any).
  std::pair<size_t, llvm::StringRef>
  pointWithPayload(llvm::StringRef Name = "") const;
  /// Returns the position of all points marked by ^ (or $name^) in the text.
  /// Order matches the order within the text.
  std::vector<size_t> points(llvm::StringRef Name = "") const;
  /// Returns the positions and payloads (if any) of all points named \p Name
  std::vector<std::pair<size_t, llvm::StringRef>>
  pointsWithPayload(llvm::StringRef Name = "") const;
  /// Returns the mapping of all names of points marked in the text to their
  /// position. Unnamed points are mapped to the empty string. The positions are
  /// sorted.
  /// FIXME Remove this and expose `All` directly (currently used out-of-tree)
  llvm::StringMap<llvm::SmallVector<size_t, 1>> all_points() const;

  /// Returns the location of the range marked by [[ ]] (or $name[[ ]]).
  /// Crashes if there isn't exactly one.
  Range range(llvm::StringRef Name = "") const;
  /// Returns the location and payload of the range marked by [[ ]]
  /// (or $name(payload)[[ ]]). Crashes if there isn't exactly one.
  std::pair<Range, llvm::StringRef>
  rangeWithPayload(llvm::StringRef Name = "") const;
  /// Returns the location of all ranges marked by [[ ]] (or $name[[ ]]).
  /// They are ordered by start position within the text.
  std::vector<Range> ranges(llvm::StringRef Name = "") const;
  /// Returns the location of all ranges marked by [[ ]]
  /// (or $name(payload)[[ ]]).
  /// They are ordered by start position within the text.
  std::vector<std::pair<Range, llvm::StringRef>>
  rangesWithPayload(llvm::StringRef Name = "") const;
  /// Returns the mapping of all names of ranges marked in the text to their
  /// location. Unnamed ranges are mapped to the empty string. The ranges are
  /// sorted by their start position.
  llvm::StringMap<llvm::SmallVector<Range, 1>> all_ranges() const;

private:
  std::string Code;
  /// Either a Point (Only Start) or a Range (Start and End)
  struct Annotation {
    size_t Begin;
    size_t End = -1;
    bool isPoint() const { return End == size_t(-1); }
    llvm::StringRef Name;
    llvm::StringRef Payload;
  };
  std::vector<Annotation> All;
  // Values are the indices into All
  llvm::StringMap<llvm::SmallVector<size_t, 1>> Points;
  llvm::StringMap<llvm::SmallVector<size_t, 1>> Ranges;
};

llvm::raw_ostream &operator<<(llvm::raw_ostream &O,
                              const llvm::Annotations::Range &R);

} // namespace llvm

#endif
PKhwFZ�Ʃ::Testing/ADT/StringMapEntry.hnu�[���//===- llvm/Testing/ADT/StringMapEntry.h ----------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TESTING_ADT_STRINGMAPENTRY_H_
#define LLVM_TESTING_ADT_STRINGMAPENTRY_H_

#include "llvm/ADT/StringMapEntry.h"
#include "gmock/gmock.h"
#include <ostream>
#include <type_traits>

namespace llvm {
namespace detail {

template <typename T, typename = std::void_t<>>
struct CanOutputToOStream : std::false_type {};

template <typename T>
struct CanOutputToOStream<T, std::void_t<decltype(std::declval<std::ostream &>()
                                                  << std::declval<T>())>>
    : std::true_type {};

} // namespace detail

/// Support for printing to std::ostream, for use with e.g. producing more
/// useful error messages with Google Test.
template <typename T>
std::ostream &operator<<(std::ostream &OS, const StringMapEntry<T> &E) {
  OS << "{\"" << E.getKey().data() << "\": ";
  if constexpr (detail::CanOutputToOStream<decltype(E.getValue())>::value) {
    OS << E.getValue();
  } else {
    OS << "non-printable value";
  }
  return OS << "}";
}

namespace detail {

template <typename StringMapEntryT>
class StringMapEntryMatcherImpl
    : public testing::MatcherInterface<StringMapEntryT> {
public:
  using ValueT = typename std::remove_reference_t<StringMapEntryT>::ValueType;

  template <typename KeyMatcherT, typename ValueMatcherT>
  StringMapEntryMatcherImpl(KeyMatcherT KeyMatcherArg,
                            ValueMatcherT ValueMatcherArg)
      : KeyMatcher(
            testing::SafeMatcherCast<const std::string &>(KeyMatcherArg)),
        ValueMatcher(
            testing::SafeMatcherCast<const ValueT &>(ValueMatcherArg)) {}

  void DescribeTo(std::ostream *OS) const override {
    *OS << "has a string key that ";
    KeyMatcher.DescribeTo(OS);
    *OS << ", and has a value that ";
    ValueMatcher.DescribeTo(OS);
  }

  void DescribeNegationTo(std::ostream *OS) const override {
    *OS << "has a string key that ";
    KeyMatcher.DescribeNegationTo(OS);
    *OS << ", or has a value that ";
    ValueMatcher.DescribeNegationTo(OS);
  }

  bool
  MatchAndExplain(StringMapEntryT Entry,
                  testing::MatchResultListener *ResultListener) const override {
    testing::StringMatchResultListener KeyListener;
    if (!KeyMatcher.MatchAndExplain(Entry.getKey().data(), &KeyListener)) {
      *ResultListener << ("which has a string key " +
                          (KeyListener.str().empty() ? "that doesn't match"
                                                     : KeyListener.str()));
      return false;
    }
    testing::StringMatchResultListener ValueListener;
    if (!ValueMatcher.MatchAndExplain(Entry.getValue(), &ValueListener)) {
      *ResultListener << ("which has a value " + (ValueListener.str().empty()
                                                      ? "that doesn't match"
                                                      : ValueListener.str()));
      return false;
    }
    *ResultListener << "which is a match";
    return true;
  }

private:
  const testing::Matcher<const std::string &> KeyMatcher;
  const testing::Matcher<const ValueT &> ValueMatcher;
};

template <typename KeyMatcherT, typename ValueMatcherT>
class StringMapEntryMatcher {
public:
  StringMapEntryMatcher(KeyMatcherT KMArg, ValueMatcherT VMArg)
      : KM(std::move(KMArg)), VM(std::move(VMArg)) {}

  template <typename StringMapEntryT>
  operator testing::Matcher<StringMapEntryT>() const { // NOLINT
    return testing::Matcher<StringMapEntryT>(
        new StringMapEntryMatcherImpl<const StringMapEntryT &>(KM, VM));
  }

private:
  const KeyMatcherT KM;
  const ValueMatcherT VM;
};

} // namespace detail

/// Returns a gMock matcher that matches a `StringMapEntry` whose string key
/// matches `KeyMatcher`, and whose value matches `ValueMatcher`.
template <typename KeyMatcherT, typename ValueMatcherT>
detail::StringMapEntryMatcher<KeyMatcherT, ValueMatcherT>
IsStringMapEntry(KeyMatcherT KM, ValueMatcherT VM) {
  return detail::StringMapEntryMatcher<KeyMatcherT, ValueMatcherT>(
      std::move(KM), std::move(VM));
}

} // namespace llvm

#endif
PKhwFZ!��$$Testing/ADT/StringMap.hnu�[���//===- llvm/Testing/ADT/StringMap.h ---------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TESTING_ADT_STRINGMAP_H_
#define LLVM_TESTING_ADT_STRINGMAP_H_

#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/Support/FormatVariadic.h"
#include "llvm/Testing/ADT/StringMapEntry.h"
#include <ostream>
#include <sstream>

namespace llvm {

/// Support for printing to std::ostream, for use with e.g. producing more
/// useful error messages with Google Test.
template <typename T>
std::ostream &operator<<(std::ostream &OS, const StringMap<T> &M) {
  if (M.empty()) {
    return OS << "{ }";
  }

  std::vector<std::string> Lines;
  for (const auto &E : M) {
    std::ostringstream SS;
    SS << E << ",";
    Lines.push_back(SS.str());
  }
  llvm::sort(Lines);
  Lines.insert(Lines.begin(), "{");
  Lines.insert(Lines.end(), "}");

  return OS << llvm::formatv("{0:$[\n]}",
                             make_range(Lines.begin(), Lines.end()))
                   .str();
}

} // namespace llvm

#endif
PKhwFZ-�	�	%DebugInfo/DWARF/DWARFDebugArangeSet.hnu�[���//===- DWARFDebugArangeSet.h ------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_DWARF_DWARFDEBUGARANGESET_H
#define LLVM_DEBUGINFO_DWARF_DWARFDEBUGARANGESET_H

#include "llvm/ADT/iterator_range.h"
#include "llvm/BinaryFormat/Dwarf.h"
#include "llvm/Support/Error.h"
#include <cstdint>
#include <vector>

namespace llvm {

class raw_ostream;
class DWARFDataExtractor;

class DWARFDebugArangeSet {
public:
  struct Header {
    /// The total length of the entries for that set, not including the length
    /// field itself.
    uint64_t Length;
    /// The DWARF format of the set.
    dwarf::DwarfFormat Format;
    /// The offset from the beginning of the .debug_info section of the
    /// compilation unit entry referenced by the table.
    uint64_t CuOffset;
    /// The DWARF version number.
    uint16_t Version;
    /// The size in bytes of an address on the target architecture. For segmented
    /// addressing, this is the size of the offset portion of the address.
    uint8_t AddrSize;
    /// The size in bytes of a segment descriptor on the target architecture.
    /// If the target system uses a flat address space, this value is 0.
    uint8_t SegSize;
  };

  struct Descriptor {
    uint64_t Address;
    uint64_t Length;

    uint64_t getEndAddress() const { return Address + Length; }
    void dump(raw_ostream &OS, uint32_t AddressSize) const;
  };

private:
  using DescriptorColl = std::vector<Descriptor>;
  using desc_iterator_range = iterator_range<DescriptorColl::const_iterator>;

  uint64_t Offset;
  Header HeaderData;
  DescriptorColl ArangeDescriptors;

public:
  DWARFDebugArangeSet() { clear(); }

  void clear();
  Error extract(DWARFDataExtractor data, uint64_t *offset_ptr,
                function_ref<void(Error)> WarningHandler);
  void dump(raw_ostream &OS) const;

  uint64_t getCompileUnitDIEOffset() const { return HeaderData.CuOffset; }

  const Header &getHeader() const { return HeaderData; }

  desc_iterator_range descriptors() const {
    return desc_iterator_range(ArangeDescriptors.begin(),
                               ArangeDescriptors.end());
  }
};

} // end namespace llvm

#endif // LLVM_DEBUGINFO_DWARF_DWARFDEBUGARANGESET_H
PKhwFZ��T�-�- DebugInfo/DWARF/DWARFListTable.hnu�[���//===- DWARFListTable.h -----------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_DWARF_DWARFLISTTABLE_H
#define LLVM_DEBUGINFO_DWARF_DWARFLISTTABLE_H

#include "llvm/BinaryFormat/Dwarf.h"
#include "llvm/DebugInfo/DIContext.h"
#include "llvm/DebugInfo/DWARF/DWARFDataExtractor.h"
#include "llvm/Support/Errc.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/raw_ostream.h"
#include <cstdint>
#include <map>
#include <vector>

namespace llvm {

/// A base class for DWARF list entries, such as range or location list
/// entries.
struct DWARFListEntryBase {
  /// The offset at which the entry is located in the section.
  uint64_t Offset;
  /// The DWARF encoding (DW_RLE_* or DW_LLE_*).
  uint8_t EntryKind;
  /// The index of the section this entry belongs to.
  uint64_t SectionIndex;
};

/// A base class for lists of entries that are extracted from a particular
/// section, such as range lists or location lists.
template <typename ListEntryType> class DWARFListType {
  using EntryType = ListEntryType;
  using ListEntries = std::vector<EntryType>;

protected:
  ListEntries Entries;

public:
  const ListEntries &getEntries() const { return Entries; }
  bool empty() const { return Entries.empty(); }
  void clear() { Entries.clear(); }
  Error extract(DWARFDataExtractor Data, uint64_t HeaderOffset,
                uint64_t *OffsetPtr, StringRef SectionName,
                StringRef ListStringName);
};

/// A class representing the header of a list table such as the range list
/// table in the .debug_rnglists section.
class DWARFListTableHeader {
  struct Header {
    /// The total length of the entries for this table, not including the length
    /// field itself.
    uint64_t Length = 0;
    /// The DWARF version number.
    uint16_t Version;
    /// The size in bytes of an address on the target architecture. For
    /// segmented addressing, this is the size of the offset portion of the
    /// address.
    uint8_t AddrSize;
    /// The size in bytes of a segment selector on the target architecture.
    /// If the target system uses a flat address space, this value is 0.
    uint8_t SegSize;
    /// The number of offsets that follow the header before the range lists.
    uint32_t OffsetEntryCount;
  };

  Header HeaderData;
  /// The table's format, either DWARF32 or DWARF64.
  dwarf::DwarfFormat Format;
  /// The offset at which the header (and hence the table) is located within
  /// its section.
  uint64_t HeaderOffset;
  /// The name of the section the list is located in.
  StringRef SectionName;
  /// A characterization of the list for dumping purposes, e.g. "range" or
  /// "location".
  StringRef ListTypeString;

public:
  DWARFListTableHeader(StringRef SectionName, StringRef ListTypeString)
      : SectionName(SectionName), ListTypeString(ListTypeString) {}

  void clear() {
    HeaderData = {};
  }
  uint64_t getHeaderOffset() const { return HeaderOffset; }
  uint8_t getAddrSize() const { return HeaderData.AddrSize; }
  uint64_t getLength() const { return HeaderData.Length; }
  uint16_t getVersion() const { return HeaderData.Version; }
  uint32_t getOffsetEntryCount() const { return HeaderData.OffsetEntryCount; }
  StringRef getSectionName() const { return SectionName; }
  StringRef getListTypeString() const { return ListTypeString; }
  dwarf::DwarfFormat getFormat() const { return Format; }

  /// Return the size of the table header including the length but not including
  /// the offsets.
  static uint8_t getHeaderSize(dwarf::DwarfFormat Format) {
    switch (Format) {
    case dwarf::DwarfFormat::DWARF32:
      return 12;
    case dwarf::DwarfFormat::DWARF64:
      return 20;
    }
    llvm_unreachable("Invalid DWARF format (expected DWARF32 or DWARF64");
  }

  void dump(DataExtractor Data, raw_ostream &OS,
            DIDumpOptions DumpOpts = {}) const;
  std::optional<uint64_t> getOffsetEntry(DataExtractor Data,
                                         uint32_t Index) const {
    if (Index >= HeaderData.OffsetEntryCount)
      return std::nullopt;

    return getOffsetEntry(Data, getHeaderOffset() + getHeaderSize(Format), Format, Index);
  }

  static std::optional<uint64_t> getOffsetEntry(DataExtractor Data,
                                                uint64_t OffsetTableOffset,
                                                dwarf::DwarfFormat Format,
                                                uint32_t Index) {
    uint8_t OffsetByteSize = Format == dwarf::DWARF64 ? 8 : 4;
    uint64_t Offset = OffsetTableOffset + OffsetByteSize * Index;
    auto R = Data.getUnsigned(&Offset, OffsetByteSize);
    return R;
  }

  /// Extract the table header and the array of offsets.
  Error extract(DWARFDataExtractor Data, uint64_t *OffsetPtr);

  /// Returns the length of the table, including the length field, or 0 if the
  /// length has not been determined (e.g. because the table has not yet been
  /// parsed, or there was a problem in parsing).
  uint64_t length() const;
};

/// A class representing a table of lists as specified in the DWARF v5
/// standard for location lists and range lists. The table consists of a header
/// followed by an array of offsets into a DWARF section, followed by zero or
/// more list entries. The list entries are kept in a map where the keys are
/// the lists' section offsets.
template <typename DWARFListType> class DWARFListTableBase {
  DWARFListTableHeader Header;
  /// A mapping between file offsets and lists. It is used to find a particular
  /// list based on an offset (obtained from DW_AT_ranges, for example).
  std::map<uint64_t, DWARFListType> ListMap;
  /// This string is displayed as a heading before the list is dumped
  /// (e.g. "ranges:").
  StringRef HeaderString;

protected:
  DWARFListTableBase(StringRef SectionName, StringRef HeaderString,
                     StringRef ListTypeString)
      : Header(SectionName, ListTypeString), HeaderString(HeaderString) {}

public:
  void clear() {
    Header.clear();
    ListMap.clear();
  }
  /// Extract the table header and the array of offsets.
  Error extractHeaderAndOffsets(DWARFDataExtractor Data, uint64_t *OffsetPtr) {
    return Header.extract(Data, OffsetPtr);
  }
  /// Extract an entire table, including all list entries.
  Error extract(DWARFDataExtractor Data, uint64_t *OffsetPtr);
  /// Look up a list based on a given offset. Extract it and enter it into the
  /// list map if necessary.
  Expected<DWARFListType> findList(DWARFDataExtractor Data,
                                   uint64_t Offset) const;

  uint64_t getHeaderOffset() const { return Header.getHeaderOffset(); }
  uint8_t getAddrSize() const { return Header.getAddrSize(); }
  uint32_t getOffsetEntryCount() const { return Header.getOffsetEntryCount(); }
  dwarf::DwarfFormat getFormat() const { return Header.getFormat(); }

  void
  dump(DWARFDataExtractor Data, raw_ostream &OS,
       llvm::function_ref<std::optional<object::SectionedAddress>(uint32_t)>
           LookupPooledAddress,
       DIDumpOptions DumpOpts = {}) const;

  /// Return the contents of the offset entry designated by a given index.
  std::optional<uint64_t> getOffsetEntry(DataExtractor Data,
                                         uint32_t Index) const {
    return Header.getOffsetEntry(Data, Index);
  }
  /// Return the size of the table header including the length but not including
  /// the offsets. This is dependent on the table format, which is unambiguously
  /// derived from parsing the table.
  uint8_t getHeaderSize() const {
    return DWARFListTableHeader::getHeaderSize(getFormat());
  }

  uint64_t length() { return Header.length(); }
};

template <typename DWARFListType>
Error DWARFListTableBase<DWARFListType>::extract(DWARFDataExtractor Data,
                                                 uint64_t *OffsetPtr) {
  clear();
  if (Error E = extractHeaderAndOffsets(Data, OffsetPtr))
    return E;

  Data.setAddressSize(Header.getAddrSize());
  Data = DWARFDataExtractor(Data, getHeaderOffset() + Header.length());
  while (Data.isValidOffset(*OffsetPtr)) {
    DWARFListType CurrentList;
    uint64_t Off = *OffsetPtr;
    if (Error E = CurrentList.extract(Data, getHeaderOffset(), OffsetPtr,
                                      Header.getSectionName(),
                                      Header.getListTypeString()))
      return E;
    ListMap[Off] = CurrentList;
  }

  assert(*OffsetPtr == Data.size() &&
         "mismatch between expected length of table and length "
         "of extracted data");
  return Error::success();
}

template <typename ListEntryType>
Error DWARFListType<ListEntryType>::extract(DWARFDataExtractor Data,
                                            uint64_t HeaderOffset,
                                            uint64_t *OffsetPtr,
                                            StringRef SectionName,
                                            StringRef ListTypeString) {
  if (*OffsetPtr < HeaderOffset || *OffsetPtr >= Data.size())
    return createStringError(errc::invalid_argument,
                       "invalid %s list offset 0x%" PRIx64,
                       ListTypeString.data(), *OffsetPtr);
  Entries.clear();
  while (Data.isValidOffset(*OffsetPtr)) {
    ListEntryType Entry;
    if (Error E = Entry.extract(Data, OffsetPtr))
      return E;
    Entries.push_back(Entry);
    if (Entry.isSentinel())
      return Error::success();
  }
  return createStringError(errc::illegal_byte_sequence,
                     "no end of list marker detected at end of %s table "
                     "starting at offset 0x%" PRIx64,
                     SectionName.data(), HeaderOffset);
}

template <typename DWARFListType>
void DWARFListTableBase<DWARFListType>::dump(
    DWARFDataExtractor Data, raw_ostream &OS,
    llvm::function_ref<std::optional<object::SectionedAddress>(uint32_t)>
        LookupPooledAddress,
    DIDumpOptions DumpOpts) const {
  Header.dump(Data, OS, DumpOpts);
  OS << HeaderString << "\n";

  // Determine the length of the longest encoding string we have in the table,
  // so we can align the output properly. We only need this in verbose mode.
  size_t MaxEncodingStringLength = 0;
  if (DumpOpts.Verbose) {
    for (const auto &List : ListMap)
      for (const auto &Entry : List.second.getEntries())
        MaxEncodingStringLength =
            std::max(MaxEncodingStringLength,
                     dwarf::RangeListEncodingString(Entry.EntryKind).size());
  }

  uint64_t CurrentBase = 0;
  for (const auto &List : ListMap)
    for (const auto &Entry : List.second.getEntries())
      Entry.dump(OS, getAddrSize(), MaxEncodingStringLength, CurrentBase,
                 DumpOpts, LookupPooledAddress);
}

template <typename DWARFListType>
Expected<DWARFListType>
DWARFListTableBase<DWARFListType>::findList(DWARFDataExtractor Data,
                                            uint64_t Offset) const {
  // Extract the list from the section and enter it into the list map.
  DWARFListType List;
  if (Header.length())
    Data = DWARFDataExtractor(Data, getHeaderOffset() + Header.length());
  if (Error E =
          List.extract(Data, Header.length() ? getHeaderOffset() : 0, &Offset,
                       Header.getSectionName(), Header.getListTypeString()))
    return std::move(E);
  return List;
}

} // end namespace llvm

#endif // LLVM_DEBUGINFO_DWARF_DWARFLISTTABLE_H
PKhwFZU��U��DebugInfo/DWARF/DWARFSection.hnu�[���//===- DWARFSection.h -------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_DWARF_DWARFSECTION_H
#define LLVM_DEBUGINFO_DWARF_DWARFSECTION_H

#include "llvm/ADT/StringRef.h"

namespace llvm {

struct DWARFSection {
  StringRef Data;
  uint64_t Address = 0;
};

struct SectionName {
  StringRef Name;
  bool IsNameUnique;
};

} // end namespace llvm

#endif // LLVM_DEBUGINFO_DWARF_DWARFSECTION_H
PKhwFZd2A~~DebugInfo/DWARF/DWARFDebugLoc.hnu�[���//===- DWARFDebugLoc.h ------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_DWARF_DWARFDEBUGLOC_H
#define LLVM_DEBUGINFO_DWARF_DWARFDEBUGLOC_H

#include "llvm/ADT/SmallVector.h"
#include "llvm/DebugInfo/DWARF/DWARFDataExtractor.h"
#include "llvm/Support/Errc.h"
#include <cstdint>

namespace llvm {
class DWARFUnit;
class MCRegisterInfo;
class raw_ostream;
class DWARFObject;
struct DIDumpOptions;
struct DWARFLocationExpression;
namespace object {
struct SectionedAddress;
}

/// A single location within a location list. Entries are stored in the DWARF5
/// form even if they originally come from a DWARF<=4 location list.
struct DWARFLocationEntry {
  /// The entry kind (DW_LLE_***).
  uint8_t Kind;

  /// The first value of the location entry (if applicable).
  uint64_t Value0;

  /// The second value of the location entry (if applicable).
  uint64_t Value1;

  /// The index of the section this entry is relative to (if applicable).
  uint64_t SectionIndex;

  /// The location expression itself (if applicable).
  SmallVector<uint8_t, 4> Loc;
};

/// An abstract base class for various kinds of location tables (.debug_loc,
/// .debug_loclists, and their dwo variants).
class DWARFLocationTable {
public:
  DWARFLocationTable(DWARFDataExtractor Data) : Data(std::move(Data)) {}
  virtual ~DWARFLocationTable() = default;

  /// Call the user-provided callback for each entry (including the end-of-list
  /// entry) in the location list starting at \p Offset. The callback can return
  /// false to terminate the iteration early. Returns an error if it was unable
  /// to parse the entire location list correctly. Upon successful termination
  /// \p Offset will be updated point past the end of the list.
  virtual Error visitLocationList(
      uint64_t *Offset,
      function_ref<bool(const DWARFLocationEntry &)> Callback) const = 0;

  /// Dump the location list at the given \p Offset. The function returns true
  /// iff it has successfully reched the end of the list. This means that one
  /// can attempt to parse another list after the current one (\p Offset will be
  /// updated to point past the end of the current list).
  bool dumpLocationList(uint64_t *Offset, raw_ostream &OS,
                        std::optional<object::SectionedAddress> BaseAddr,
                        const DWARFObject &Obj, DWARFUnit *U,
                        DIDumpOptions DumpOpts, unsigned Indent) const;

  Error visitAbsoluteLocationList(
      uint64_t Offset, std::optional<object::SectionedAddress> BaseAddr,
      std::function<std::optional<object::SectionedAddress>(uint32_t)>
          LookupAddr,
      function_ref<bool(Expected<DWARFLocationExpression>)> Callback) const;

  const DWARFDataExtractor &getData() { return Data; }

protected:
  DWARFDataExtractor Data;

  virtual void dumpRawEntry(const DWARFLocationEntry &Entry, raw_ostream &OS,
                            unsigned Indent, DIDumpOptions DumpOpts,
                            const DWARFObject &Obj) const = 0;
};

class DWARFDebugLoc final : public DWARFLocationTable {
public:
  /// A list of locations that contain one variable.
  struct LocationList {
    /// The beginning offset where this location list is stored in the debug_loc
    /// section.
    uint64_t Offset;
    /// All the locations in which the variable is stored.
    SmallVector<DWARFLocationEntry, 2> Entries;
  };

private:
  using LocationLists = SmallVector<LocationList, 4>;

  /// A list of all the variables in the debug_loc section, each one describing
  /// the locations in which the variable is stored.
  LocationLists Locations;

public:
  DWARFDebugLoc(DWARFDataExtractor Data)
      : DWARFLocationTable(std::move(Data)) {}

  /// Print the location lists found within the debug_loc section.
  void dump(raw_ostream &OS, const DWARFObject &Obj, DIDumpOptions DumpOpts,
            std::optional<uint64_t> Offset) const;

  Error visitLocationList(
      uint64_t *Offset,
      function_ref<bool(const DWARFLocationEntry &)> Callback) const override;

protected:
  void dumpRawEntry(const DWARFLocationEntry &Entry, raw_ostream &OS,
                    unsigned Indent, DIDumpOptions DumpOpts,
                    const DWARFObject &Obj) const override;
};

class DWARFDebugLoclists final : public DWARFLocationTable {
public:
  DWARFDebugLoclists(DWARFDataExtractor Data, uint16_t Version)
      : DWARFLocationTable(std::move(Data)), Version(Version) {}

  Error visitLocationList(
      uint64_t *Offset,
      function_ref<bool(const DWARFLocationEntry &)> Callback) const override;

  /// Dump all location lists within the given range.
  void dumpRange(uint64_t StartOffset, uint64_t Size, raw_ostream &OS,
                 const DWARFObject &Obj, DIDumpOptions DumpOpts);

protected:
  void dumpRawEntry(const DWARFLocationEntry &Entry, raw_ostream &OS,
                    unsigned Indent, DIDumpOptions DumpOpts,
                    const DWARFObject &Obj) const override;

private:
  uint16_t Version;
};

class ResolverError : public ErrorInfo<ResolverError> {
public:
  static char ID;

  ResolverError(uint32_t Index, dwarf::LoclistEntries Kind) : Index(Index), Kind(Kind) {}

  void log(raw_ostream &OS) const override;
  std::error_code convertToErrorCode() const override {
    return llvm::errc::invalid_argument;
  }

private:
  uint32_t Index;
  dwarf::LoclistEntries Kind;
};

} // end namespace llvm

#endif // LLVM_DEBUGINFO_DWARF_DWARFDEBUGLOC_H
PKhwFZp�W(("DebugInfo/DWARF/DWARFCompileUnit.hnu�[���//===- DWARFCompileUnit.h ---------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_DWARF_DWARFCOMPILEUNIT_H
#define LLVM_DEBUGINFO_DWARF_DWARFCOMPILEUNIT_H

#include "llvm/DebugInfo/DWARF/DWARFUnit.h"

namespace llvm {

class DWARFContext;
class DWARFDebugAbbrev;
class raw_ostream;
struct DIDumpOptions;
struct DWARFSection;

class DWARFCompileUnit : public DWARFUnit {
public:
  DWARFCompileUnit(DWARFContext &Context, const DWARFSection &Section,
                   const DWARFUnitHeader &Header, const DWARFDebugAbbrev *DA,
                   const DWARFSection *RS, const DWARFSection *LocSection,
                   StringRef SS, const DWARFSection &SOS,
                   const DWARFSection *AOS, const DWARFSection &LS, bool LE,
                   bool IsDWO, const DWARFUnitVector &UnitVector)
      : DWARFUnit(Context, Section, Header, DA, RS, LocSection, SS, SOS, AOS,
                  LS, LE, IsDWO, UnitVector) {}

  /// VTable anchor.
  ~DWARFCompileUnit() override;
  /// Dump this compile unit to \p OS.
  void dump(raw_ostream &OS, DIDumpOptions DumpOpts) override;
  /// Enable LLVM-style RTTI.
  static bool classof(const DWARFUnit *U) { return !U->isTypeUnit(); }
};

} // end namespace llvm

#endif // LLVM_DEBUGINFO_DWARF_DWARFCOMPILEUNIT_H
PKhwFZ���&�
�
 DebugInfo/DWARF/DWARFDebugAddr.hnu�[���//===- DWARFDebugAddr.h -------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_DWARF_DWARFDEBUGADDR_H
#define LLVM_DEBUGINFO_DWARF_DWARFDEBUGADDR_H

#include "llvm/BinaryFormat/Dwarf.h"
#include "llvm/DebugInfo/DIContext.h"
#include "llvm/Support/Error.h"
#include <cstdint>
#include <vector>

namespace llvm {

class raw_ostream;
class DWARFDataExtractor;

/// A class representing an address table as specified in DWARF v5.
/// The table consists of a header followed by an array of address values from
/// .debug_addr section.
class DWARFDebugAddrTable {
  dwarf::DwarfFormat Format;
  uint64_t Offset;
  /// The total length of the entries for this table, not including the length
  /// field itself.
  uint64_t Length = 0;
  /// The DWARF version number.
  uint16_t Version;
  /// The size in bytes of an address on the target architecture. For
  /// segmented addressing, this is the size of the offset portion of the
  /// address.
  uint8_t AddrSize;
  /// The size in bytes of a segment selector on the target architecture.
  /// If the target system uses a flat address space, this value is 0.
  uint8_t SegSize;
  std::vector<uint64_t> Addrs;

  /// Invalidate Length field to stop further processing.
  void invalidateLength() { Length = 0; }

  Error extractAddresses(const DWARFDataExtractor &Data, uint64_t *OffsetPtr,
                         uint64_t EndOffset);

public:

  /// Extract the entire table, including all addresses.
  Error extract(const DWARFDataExtractor &Data, uint64_t *OffsetPtr,
                uint16_t CUVersion, uint8_t CUAddrSize,
                std::function<void(Error)> WarnCallback);

  /// Extract a DWARFv5 address table.
  Error extractV5(const DWARFDataExtractor &Data, uint64_t *OffsetPtr,
                  uint8_t CUAddrSize, std::function<void(Error)> WarnCallback);

  /// Extract a pre-DWARFv5 address table. Such tables do not have a header
  /// and consist only of a series of addresses.
  /// See https://gcc.gnu.org/wiki/DebugFission for details.
  Error extractPreStandard(const DWARFDataExtractor &Data, uint64_t *OffsetPtr,
                           uint16_t CUVersion, uint8_t CUAddrSize);

  void dump(raw_ostream &OS, DIDumpOptions DumpOpts = {}) const;

  /// Return the address based on a given index.
  Expected<uint64_t> getAddrEntry(uint32_t Index) const;

  /// Return the full length of this table, including the length field.
  /// Return std::nullopt if the length cannot be identified reliably.
  std::optional<uint64_t> getFullLength() const;

  /// Return the DWARF format of this table.
  dwarf::DwarfFormat getFormat() const { return Format; }

  /// Return the length of this table.
  uint64_t getLength() const { return Length; }

  /// Return the version of this table.
  uint16_t getVersion() const { return Version; }

  /// Return the address size of this table.
  uint8_t getAddressSize() const { return AddrSize; }

  /// Return the segment selector size of this table.
  uint8_t getSegmentSelectorSize() const { return SegSize; }

  /// Return the parsed addresses of this table.
  ArrayRef<uint64_t> getAddressEntries() const { return Addrs; }
};

} // end namespace llvm

#endif // LLVM_DEBUGINFO_DWARF_DWARFDEBUGADDR_H
PKhwFZ�{�A�A DebugInfo/DWARF/DWARFDebugLine.hnu�[���//===- DWARFDebugLine.h -----------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_DWARF_DWARFDEBUGLINE_H
#define LLVM_DEBUGINFO_DWARF_DWARFDEBUGLINE_H

#include "llvm/ADT/StringRef.h"
#include "llvm/BinaryFormat/Dwarf.h"
#include "llvm/DebugInfo/DIContext.h"
#include "llvm/DebugInfo/DWARF/DWARFFormValue.h"
#include "llvm/DebugInfo/DWARF/DWARFUnit.h"
#include "llvm/Support/MD5.h"
#include "llvm/Support/Path.h"
#include <cstdint>
#include <map>
#include <string>
#include <vector>

namespace llvm {

class raw_ostream;

class DWARFDebugLine {
public:
  struct FileNameEntry {
    FileNameEntry() = default;

    DWARFFormValue Name;
    uint64_t DirIdx = 0;
    uint64_t ModTime = 0;
    uint64_t Length = 0;
    MD5::MD5Result Checksum;
    DWARFFormValue Source;
  };

  /// Tracks which optional content types are present in a DWARF file name
  /// entry format.
  struct ContentTypeTracker {
    ContentTypeTracker() = default;

    /// Whether filename entries provide a modification timestamp.
    bool HasModTime = false;
    /// Whether filename entries provide a file size.
    bool HasLength = false;
    /// For v5, whether filename entries provide an MD5 checksum.
    bool HasMD5 = false;
    /// For v5, whether filename entries provide source text.
    bool HasSource = false;

    /// Update tracked content types with \p ContentType.
    void trackContentType(dwarf::LineNumberEntryFormat ContentType);
  };

  struct Prologue {
    Prologue();

    /// The size in bytes of the statement information for this compilation unit
    /// (not including the total_length field itself).
    uint64_t TotalLength;
    /// Version, address size (starting in v5), and DWARF32/64 format; these
    /// parameters affect interpretation of forms (used in the directory and
    /// file tables starting with v5).
    dwarf::FormParams FormParams;
    /// The number of bytes following the prologue_length field to the beginning
    /// of the first byte of the statement program itself.
    uint64_t PrologueLength;
    /// In v5, size in bytes of a segment selector.
    uint8_t SegSelectorSize;
    /// The size in bytes of the smallest target machine instruction. Statement
    /// program opcodes that alter the address register first multiply their
    /// operands by this value.
    uint8_t MinInstLength;
    /// The maximum number of individual operations that may be encoded in an
    /// instruction.
    uint8_t MaxOpsPerInst;
    /// The initial value of theis_stmtregister.
    uint8_t DefaultIsStmt;
    /// This parameter affects the meaning of the special opcodes. See below.
    int8_t LineBase;
    /// This parameter affects the meaning of the special opcodes. See below.
    uint8_t LineRange;
    /// The number assigned to the first special opcode.
    uint8_t OpcodeBase;
    /// This tracks which optional file format content types are present.
    ContentTypeTracker ContentTypes;
    std::vector<uint8_t> StandardOpcodeLengths;
    std::vector<DWARFFormValue> IncludeDirectories;
    std::vector<FileNameEntry> FileNames;

    const dwarf::FormParams getFormParams() const { return FormParams; }
    uint16_t getVersion() const { return FormParams.Version; }
    uint8_t getAddressSize() const { return FormParams.AddrSize; }
    bool isDWARF64() const { return FormParams.Format == dwarf::DWARF64; }

    uint32_t sizeofTotalLength() const { return isDWARF64() ? 12 : 4; }

    uint32_t sizeofPrologueLength() const { return isDWARF64() ? 8 : 4; }

    bool totalLengthIsValid() const;

    /// Length of the prologue in bytes.
    uint64_t getLength() const;

    /// Get DWARF-version aware access to the file name entry at the provided
    /// index.
    const llvm::DWARFDebugLine::FileNameEntry &
    getFileNameEntry(uint64_t Index) const;

    bool hasFileAtIndex(uint64_t FileIndex) const;

    std::optional<uint64_t> getLastValidFileIndex() const;

    bool
    getFileNameByIndex(uint64_t FileIndex, StringRef CompDir,
                       DILineInfoSpecifier::FileLineInfoKind Kind,
                       std::string &Result,
                       sys::path::Style Style = sys::path::Style::native) const;

    void clear();
    void dump(raw_ostream &OS, DIDumpOptions DumpOptions) const;
    Error parse(DWARFDataExtractor Data, uint64_t *OffsetPtr,
                function_ref<void(Error)> RecoverableErrorHandler,
                const DWARFContext &Ctx, const DWARFUnit *U = nullptr);
  };

  /// Standard .debug_line state machine structure.
  struct Row {
    explicit Row(bool DefaultIsStmt = false);

    /// Called after a row is appended to the matrix.
    void postAppend();
    void reset(bool DefaultIsStmt);
    void dump(raw_ostream &OS) const;

    static void dumpTableHeader(raw_ostream &OS, unsigned Indent);

    static bool orderByAddress(const Row &LHS, const Row &RHS) {
      return std::tie(LHS.Address.SectionIndex, LHS.Address.Address) <
             std::tie(RHS.Address.SectionIndex, RHS.Address.Address);
    }

    /// The program-counter value corresponding to a machine instruction
    /// generated by the compiler and section index pointing to the section
    /// containg this PC. If relocation information is present then section
    /// index is the index of the section which contains above address.
    /// Otherwise this is object::SectionedAddress::Undef value.
    object::SectionedAddress Address;
    /// An unsigned integer indicating a source line number. Lines are numbered
    /// beginning at 1. The compiler may emit the value 0 in cases where an
    /// instruction cannot be attributed to any source line.
    uint32_t Line;
    /// An unsigned integer indicating a column number within a source line.
    /// Columns are numbered beginning at 1. The value 0 is reserved to indicate
    /// that a statement begins at the 'left edge' of the line.
    uint16_t Column;
    /// An unsigned integer indicating the identity of the source file
    /// corresponding to a machine instruction.
    uint16_t File;
    /// An unsigned integer representing the DWARF path discriminator value
    /// for this location.
    uint32_t Discriminator;
    /// An unsigned integer whose value encodes the applicable instruction set
    /// architecture for the current instruction.
    uint8_t Isa;
    /// An unsigned integer representing the index of an operation within a
    /// VLIW instruction. The index of the first operation is 0.
    /// For non-VLIW architectures, this register will always be 0.
    uint8_t OpIndex;
    /// A boolean indicating that the current instruction is the beginning of a
    /// statement.
    uint8_t IsStmt : 1,
        /// A boolean indicating that the current instruction is the
        /// beginning of a basic block.
        BasicBlock : 1,
        /// A boolean indicating that the current address is that of the
        /// first byte after the end of a sequence of target machine
        /// instructions.
        EndSequence : 1,
        /// A boolean indicating that the current address is one (of possibly
        /// many) where execution should be suspended for an entry breakpoint
        /// of a function.
        PrologueEnd : 1,
        /// A boolean indicating that the current address is one (of possibly
        /// many) where execution should be suspended for an exit breakpoint
        /// of a function.
        EpilogueBegin : 1;
  };

  /// Represents a series of contiguous machine instructions. Line table for
  /// each compilation unit may consist of multiple sequences, which are not
  /// guaranteed to be in the order of ascending instruction address.
  struct Sequence {
    Sequence();

    /// Sequence describes instructions at address range [LowPC, HighPC)
    /// and is described by line table rows [FirstRowIndex, LastRowIndex).
    uint64_t LowPC;
    uint64_t HighPC;
    /// If relocation information is present then this is the index of the
    /// section which contains above addresses. Otherwise this is
    /// object::SectionedAddress::Undef value.
    uint64_t SectionIndex;
    unsigned FirstRowIndex;
    unsigned LastRowIndex;
    bool Empty;

    void reset();

    static bool orderByHighPC(const Sequence &LHS, const Sequence &RHS) {
      return std::tie(LHS.SectionIndex, LHS.HighPC) <
             std::tie(RHS.SectionIndex, RHS.HighPC);
    }

    bool isValid() const {
      return !Empty && (LowPC < HighPC) && (FirstRowIndex < LastRowIndex);
    }

    bool containsPC(object::SectionedAddress PC) const {
      return SectionIndex == PC.SectionIndex &&
             (LowPC <= PC.Address && PC.Address < HighPC);
    }
  };

  struct LineTable {
    LineTable();

    /// Represents an invalid row
    const uint32_t UnknownRowIndex = UINT32_MAX;

    void appendRow(const DWARFDebugLine::Row &R) { Rows.push_back(R); }

    void appendSequence(const DWARFDebugLine::Sequence &S) {
      Sequences.push_back(S);
    }

    /// Returns the index of the row with file/line info for a given address,
    /// or UnknownRowIndex if there is no such row.
    uint32_t lookupAddress(object::SectionedAddress Address) const;

    bool lookupAddressRange(object::SectionedAddress Address, uint64_t Size,
                            std::vector<uint32_t> &Result) const;

    bool hasFileAtIndex(uint64_t FileIndex) const {
      return Prologue.hasFileAtIndex(FileIndex);
    }

    std::optional<uint64_t> getLastValidFileIndex() const {
      return Prologue.getLastValidFileIndex();
    }

    /// Extracts filename by its index in filename table in prologue.
    /// In Dwarf 4, the files are 1-indexed and the current compilation file
    /// name is not represented in the list. In DWARF v5, the files are
    /// 0-indexed and the primary source file has the index 0.
    /// Returns true on success.
    bool getFileNameByIndex(uint64_t FileIndex, StringRef CompDir,
                            DILineInfoSpecifier::FileLineInfoKind Kind,
                            std::string &Result) const {
      return Prologue.getFileNameByIndex(FileIndex, CompDir, Kind, Result);
    }

    /// Fills the Result argument with the file and line information
    /// corresponding to Address. Returns true on success.
    bool getFileLineInfoForAddress(object::SectionedAddress Address,
                                   const char *CompDir,
                                   DILineInfoSpecifier::FileLineInfoKind Kind,
                                   DILineInfo &Result) const;

    /// Extracts directory name by its Entry in include directories table
    /// in prologue. Returns true on success.
    bool getDirectoryForEntry(const FileNameEntry &Entry,
                              std::string &Directory) const;

    void dump(raw_ostream &OS, DIDumpOptions DumpOptions) const;
    void clear();

    /// Parse prologue and all rows.
    Error parse(DWARFDataExtractor &DebugLineData, uint64_t *OffsetPtr,
                const DWARFContext &Ctx, const DWARFUnit *U,
                function_ref<void(Error)> RecoverableErrorHandler,
                raw_ostream *OS = nullptr, bool Verbose = false);

    using RowVector = std::vector<Row>;
    using RowIter = RowVector::const_iterator;
    using SequenceVector = std::vector<Sequence>;
    using SequenceIter = SequenceVector::const_iterator;

    struct Prologue Prologue;
    RowVector Rows;
    SequenceVector Sequences;

  private:
    uint32_t findRowInSeq(const DWARFDebugLine::Sequence &Seq,
                          object::SectionedAddress Address) const;
    std::optional<StringRef>
    getSourceByIndex(uint64_t FileIndex,
                     DILineInfoSpecifier::FileLineInfoKind Kind) const;

    uint32_t lookupAddressImpl(object::SectionedAddress Address) const;

    bool lookupAddressRangeImpl(object::SectionedAddress Address, uint64_t Size,
                                std::vector<uint32_t> &Result) const;
  };

  const LineTable *getLineTable(uint64_t Offset) const;
  Expected<const LineTable *>
  getOrParseLineTable(DWARFDataExtractor &DebugLineData, uint64_t Offset,
                      const DWARFContext &Ctx, const DWARFUnit *U,
                      function_ref<void(Error)> RecoverableErrorHandler);
  void clearLineTable(uint64_t Offset);

  /// Helper to allow for parsing of an entire .debug_line section in sequence.
  class SectionParser {
  public:
    using LineToUnitMap = std::map<uint64_t, DWARFUnit *>;

    SectionParser(DWARFDataExtractor &Data, const DWARFContext &C,
                  DWARFUnitVector::iterator_range Units);

    /// Get the next line table from the section. Report any issues via the
    /// handlers.
    ///
    /// \param RecoverableErrorHandler - any issues that don't prevent further
    /// parsing of the table will be reported through this handler.
    /// \param UnrecoverableErrorHandler - any issues that prevent further
    /// parsing of the table will be reported through this handler.
    /// \param OS - if not null, the parser will print information about the
    /// table as it parses it.
    /// \param Verbose - if true, the parser will print verbose information when
    /// printing to the output.
    LineTable parseNext(function_ref<void(Error)> RecoverableErrorHandler,
                        function_ref<void(Error)> UnrecoverableErrorHandler,
                        raw_ostream *OS = nullptr, bool Verbose = false);

    /// Skip the current line table and go to the following line table (if
    /// present) immediately.
    ///
    /// \param RecoverableErrorHandler - report any recoverable prologue
    /// parsing issues via this handler.
    /// \param UnrecoverableErrorHandler - report any unrecoverable prologue
    /// parsing issues via this handler.
    void skip(function_ref<void(Error)> RecoverableErrorHandler,
              function_ref<void(Error)> UnrecoverableErrorHandler);

    /// Indicates if the parser has parsed as much as possible.
    ///
    /// \note Certain problems with the line table structure might mean that
    /// parsing stops before the end of the section is reached.
    bool done() const { return Done; }

    /// Get the offset the parser has reached.
    uint64_t getOffset() const { return Offset; }

  private:
    DWARFUnit *prepareToParse(uint64_t Offset);
    void moveToNextTable(uint64_t OldOffset, const Prologue &P);
    bool hasValidVersion(uint64_t Offset);

    LineToUnitMap LineToUnit;

    DWARFDataExtractor &DebugLineData;
    const DWARFContext &Context;
    uint64_t Offset = 0;
    bool Done = false;
  };

private:
  struct ParsingState {
    ParsingState(struct LineTable *LT, uint64_t TableOffset,
                 function_ref<void(Error)> ErrorHandler);

    void resetRowAndSequence();
    void appendRowToMatrix();

    struct AddrOpIndexDelta {
      uint64_t AddrOffset;
      int16_t OpIndexDelta;
    };

    /// Advance the address and op-index by the \p OperationAdvance value.
    /// \returns the amount advanced by.
    AddrOpIndexDelta advanceAddrOpIndex(uint64_t OperationAdvance,
                                        uint8_t Opcode, uint64_t OpcodeOffset);

    struct OpcodeAdvanceResults {
      uint64_t AddrDelta;
      int16_t OpIndexDelta;
      uint8_t AdjustedOpcode;
    };

    /// Advance the address and op-index as required by the specified \p Opcode.
    /// \returns the amount advanced by and the calculated adjusted opcode.
    OpcodeAdvanceResults advanceForOpcode(uint8_t Opcode,
                                          uint64_t OpcodeOffset);

    struct SpecialOpcodeDelta {
      uint64_t Address;
      int32_t Line;
      int16_t OpIndex;
    };

    /// Advance the line, address and op-index as required by the specified
    /// special \p Opcode. \returns the address, op-index and line delta.
    SpecialOpcodeDelta handleSpecialOpcode(uint8_t Opcode,
                                           uint64_t OpcodeOffset);

    /// Line table we're currently parsing.
    struct LineTable *LineTable;
    struct Row Row;
    struct Sequence Sequence;

  private:
    uint64_t LineTableOffset;

    bool ReportAdvanceAddrProblem = true;
    bool ReportBadLineRange = true;
    function_ref<void(Error)> ErrorHandler;
  };

  using LineTableMapTy = std::map<uint64_t, LineTable>;
  using LineTableIter = LineTableMapTy::iterator;
  using LineTableConstIter = LineTableMapTy::const_iterator;

  LineTableMapTy LineTableMap;
};

} // end namespace llvm

#endif // LLVM_DEBUGINFO_DWARF_DWARFDEBUGLINE_H
PKhwFZ�	PG��!DebugInfo/DWARF/DWARFDebugMacro.hnu�[���//===- DWARFDebugMacro.h ----------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_DWARF_DWARFDEBUGMACRO_H
#define LLVM_DEBUGINFO_DWARF_DWARFDEBUGMACRO_H

#include "llvm/ADT/SmallVector.h"
#include "llvm/DebugInfo/DWARF/DWARFDataExtractor.h"
#include "llvm/DebugInfo/DWARF/DWARFUnit.h"
#include "llvm/Support/Error.h"
#include <cstdint>

namespace llvm {

class raw_ostream;
class DwarfStreamer;

class DWARFDebugMacro {
  friend DwarfStreamer;

  /// DWARFv5 section 6.3.1 Macro Information Header.
  enum HeaderFlagMask {
#define HANDLE_MACRO_FLAG(ID, NAME) MACRO_##NAME = ID,
#include "llvm/BinaryFormat/Dwarf.def"
  };
  struct MacroHeader {
    /// Macro version information number.
    uint16_t Version = 0;

    /// The bits of the flags field are interpreted as a set of flags, some of
    /// which may indicate that additional fields follow. The following flags,
    /// beginning with the least significant bit, are defined:
    /// offset_size_flag:
    ///   If the offset_size_flag is zero, the header is for a 32-bit DWARF
    ///   format macro section and all offsets are 4 bytes long; if it is one,
    ///   the header is for a 64-bit DWARF format macro section and all offsets
    ///   are 8 bytes long.
    /// debug_line_offset_flag:
    ///   If the debug_line_offset_flag is one, the debug_line_offset field (see
    ///   below) is present. If zero, that field is omitted.
    /// opcode_operands_table_flag:
    ///   If the opcode_operands_table_flag is one, the opcode_operands_table
    ///   field (see below) is present. If zero, that field is omitted.
    uint8_t Flags = 0;

    /// debug_line_offset
    ///   An offset in the .debug_line section of the beginning of the line
    ///   number information in the containing compilation unit, encoded as a
    ///   4-byte offset for a 32-bit DWARF format macro section and an 8-byte
    ///   offset for a 64-bit DWARF format macro section.
    uint64_t DebugLineOffset;

    /// Print the macro header from the debug_macro section.
    void dumpMacroHeader(raw_ostream &OS) const;

    /// Parse the debug_macro header.
    Error parseMacroHeader(DWARFDataExtractor Data, uint64_t *Offset);

    /// Get the DWARF format according to the flags.
    dwarf::DwarfFormat getDwarfFormat() const;

    /// Get the size of a reference according to the DWARF format.
    uint8_t getOffsetByteSize() const;
  };

  /// A single macro entry within a macro list.
  struct Entry {
    /// The type of the macro entry.
    uint32_t Type;
    union {
      /// The source line where the macro is defined.
      uint64_t Line;
      /// Vendor extension constant value.
      uint64_t ExtConstant;
      /// Macro unit import offset.
      uint64_t ImportOffset;
    };

    union {
      /// The string (name, value) of the macro entry.
      const char *MacroStr;
      // An unsigned integer indicating the identity of the source file.
      uint64_t File;
      /// Vendor extension string.
      const char *ExtStr;
    };
  };

  struct MacroList {
    // A value 0 in the `Header.Version` field indicates that we're parsing
    // a macinfo[.dwo] section which doesn't have header itself, hence
    // for that case other fields in the `Header` are uninitialized.
    MacroHeader Header;
    SmallVector<Entry, 4> Macros;
    uint64_t Offset;

    /// Whether or not this is a .debug_macro section.
    bool IsDebugMacro;
  };

  /// A list of all the macro entries in the debug_macinfo section.
  std::vector<MacroList> MacroLists;

public:
  DWARFDebugMacro() = default;

  /// Print the macro list found within the debug_macinfo/debug_macro section.
  void dump(raw_ostream &OS) const;

  Error parseMacro(DWARFUnitVector::compile_unit_range Units,
                   DataExtractor StringExtractor,
                   DWARFDataExtractor MacroData) {
    return parseImpl(Units, StringExtractor, MacroData, /*IsMacro=*/true);
  }

  Error parseMacinfo(DWARFDataExtractor MacroData) {
    return parseImpl(std::nullopt, std::nullopt, MacroData, /*IsMacro=*/false);
  }

  /// Return whether the section has any entries.
  bool empty() const { return MacroLists.empty(); }

  bool hasEntryForOffset(uint64_t Offset) const {
    for (const MacroList &List : MacroLists)
      if (Offset == List.Offset)
        return true;

    return false;
  }

private:
  /// Parse the debug_macinfo/debug_macro section accessible via the 'MacroData'
  /// parameter.
  Error parseImpl(std::optional<DWARFUnitVector::compile_unit_range> Units,
                  std::optional<DataExtractor> StringExtractor,
                  DWARFDataExtractor Data, bool IsMacro);
};

} // end namespace llvm

#endif // LLVM_DEBUGINFO_DWARF_DWARFDEBUGMACRO_H
PKhwFZ�E�ssDebugInfo/DWARF/DWARFTypeUnit.hnu�[���//===- DWARFTypeUnit.h ------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_DWARF_DWARFTYPEUNIT_H
#define LLVM_DEBUGINFO_DWARF_DWARFTYPEUNIT_H

#include "llvm/ADT/StringRef.h"
#include "llvm/DebugInfo/DWARF/DWARFUnit.h"
#include <cstdint>

namespace llvm {

struct DIDumpOptions;
class DWARFContext;
class DWARFDebugAbbrev;
struct DWARFSection;
class raw_ostream;

class DWARFTypeUnit : public DWARFUnit {
public:
  DWARFTypeUnit(DWARFContext &Context, const DWARFSection &Section,
                const DWARFUnitHeader &Header, const DWARFDebugAbbrev *DA,
                const DWARFSection *RS, const DWARFSection *LocSection,
                StringRef SS, const DWARFSection &SOS, const DWARFSection *AOS,
                const DWARFSection &LS, bool LE, bool IsDWO,
                const DWARFUnitVector &UnitVector)
      : DWARFUnit(Context, Section, Header, DA, RS, LocSection, SS, SOS, AOS,
                  LS, LE, IsDWO, UnitVector) {}

  uint64_t getTypeHash() const { return getHeader().getTypeHash(); }
  uint64_t getTypeOffset() const { return getHeader().getTypeOffset(); }

  void dump(raw_ostream &OS, DIDumpOptions DumpOpts = {}) override;
  // Enable LLVM-style RTTI.
  static bool classof(const DWARFUnit *U) { return U->isTypeUnit(); }
};

} // end namespace llvm

#endif // LLVM_DEBUGINFO_DWARF_DWARFTYPEUNIT_H
PKhwFZD�����)DebugInfo/DWARF/DWARFLocationExpression.hnu�[���//===- DWARFLocationExpression.h --------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_DWARF_DWARFLOCATIONEXPRESSION_H
#define LLVM_DEBUGINFO_DWARF_DWARFLOCATIONEXPRESSION_H

#include "llvm/DebugInfo/DWARF/DWARFAddressRange.h"

namespace llvm {

class raw_ostream;

/// Represents a single DWARF expression, whose value is location-dependent.
/// Typically used in DW_AT_location attributes to describe the location of
/// objects.
struct DWARFLocationExpression {
  /// The address range in which this expression is valid. std::nullopt denotes a
  /// default entry which is valid in addresses not covered by other location
  /// expressions, or everywhere if there are no other expressions.
  std::optional<DWARFAddressRange> Range;

  /// The expression itself.
  SmallVector<uint8_t, 4> Expr;
};

inline bool operator==(const DWARFLocationExpression &L,
                       const DWARFLocationExpression &R) {
  return L.Range == R.Range && L.Expr == R.Expr;
}

inline bool operator!=(const DWARFLocationExpression &L,
                       const DWARFLocationExpression &R) {
  return !(L == R);
}

raw_ostream &operator<<(raw_ostream &OS, const DWARFLocationExpression &Loc);

/// Represents a set of absolute location expressions.
using DWARFLocationExpressionsVector = std::vector<DWARFLocationExpression>;

} // end namespace llvm

#endif // LLVM_DEBUGINFO_DWARF_DWARFLOCATIONEXPRESSION_H
PKhwFZ�-`1��DebugInfo/DWARF/DWARFGdbIndex.hnu�[���//===- DWARFGdbIndex.h ------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_DWARF_DWARFGDBINDEX_H
#define LLVM_DEBUGINFO_DWARF_DWARFGDBINDEX_H

#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include <cstdint>
#include <utility>

namespace llvm {

class raw_ostream;
class DataExtractor;

class DWARFGdbIndex {
  uint32_t Version;

  uint32_t CuListOffset;
  uint32_t TuListOffset;
  uint32_t AddressAreaOffset;
  uint32_t SymbolTableOffset;
  uint32_t ConstantPoolOffset;

  struct CompUnitEntry {
    uint64_t Offset; /// Offset of a CU in the .debug_info section.
    uint64_t Length; /// Length of that CU.
  };
  SmallVector<CompUnitEntry, 0> CuList;

  struct TypeUnitEntry {
    uint64_t Offset;
    uint64_t TypeOffset;
    uint64_t TypeSignature;
  };
  SmallVector<TypeUnitEntry, 0> TuList;

  struct AddressEntry {
    uint64_t LowAddress;  /// The low address.
    uint64_t HighAddress; /// The high address.
    uint32_t CuIndex;     /// The CU index.
  };
  SmallVector<AddressEntry, 0> AddressArea;

  struct SymTableEntry {
    uint32_t NameOffset; /// Offset of the symbol's name in the constant pool.
    uint32_t VecOffset;  /// Offset of the CU vector in the constant pool.
  };
  SmallVector<SymTableEntry, 0> SymbolTable;

  /// Each value is CU index + attributes.
  SmallVector<std::pair<uint32_t, SmallVector<uint32_t, 0>>, 0>
      ConstantPoolVectors;

  StringRef ConstantPoolStrings;
  uint32_t StringPoolOffset;

  void dumpCUList(raw_ostream &OS) const;
  void dumpTUList(raw_ostream &OS) const;
  void dumpAddressArea(raw_ostream &OS) const;
  void dumpSymbolTable(raw_ostream &OS) const;
  void dumpConstantPool(raw_ostream &OS) const;

  bool parseImpl(DataExtractor Data);

public:
  void dump(raw_ostream &OS);
  void parse(DataExtractor Data);

  bool HasContent = false;
  bool HasError = false;
};

} // end namespace llvm

#endif // LLVM_DEBUGINFO_DWARF_DWARFGDBINDEX_H
PKhwFZ�\�)�7�7 DebugInfo/DWARF/DWARFFormValue.hnu�[���//===- DWARFFormValue.h -----------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_DWARF_DWARFFORMVALUE_H
#define LLVM_DEBUGINFO_DWARF_DWARFFORMVALUE_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/BinaryFormat/Dwarf.h"
#include "llvm/DebugInfo/DIContext.h"
#include "llvm/Support/DataExtractor.h"
#include <cstdint>

namespace llvm {

class DWARFContext;
class DWARFObject;
class DWARFDataExtractor;
class DWARFUnit;
class raw_ostream;

class DWARFFormValue {
public:
  enum FormClass {
    FC_Unknown,
    FC_Address,
    FC_Block,
    FC_Constant,
    FC_String,
    FC_Flag,
    FC_Reference,
    FC_Indirect,
    FC_SectionOffset,
    FC_Exprloc
  };

  struct ValueType {
    ValueType() { uval = 0; }
    ValueType(int64_t V) : sval(V) {}
    ValueType(uint64_t V) : uval(V) {}
    ValueType(const char *V) : cstr(V) {}

    union {
      uint64_t uval;
      int64_t sval;
      const char *cstr;
    };
    const uint8_t *data = nullptr;
    uint64_t SectionIndex; /// Section index for reference forms.
  };

private:
  dwarf::Form Form; /// Form for this value.
  dwarf::DwarfFormat Format =
      dwarf::DWARF32;           /// Remember the DWARF format at extract time.
  ValueType Value;              /// Contains all data for the form.
  const DWARFUnit *U = nullptr; /// Remember the DWARFUnit at extract time.
  const DWARFContext *C = nullptr; /// Context for extract time.

  DWARFFormValue(dwarf::Form F, ValueType V) : Form(F), Value(V) {}

public:
  DWARFFormValue(dwarf::Form F = dwarf::Form(0)) : Form(F) {}

  static DWARFFormValue createFromSValue(dwarf::Form F, int64_t V);
  static DWARFFormValue createFromUValue(dwarf::Form F, uint64_t V);
  static DWARFFormValue createFromPValue(dwarf::Form F, const char *V);
  static DWARFFormValue createFromBlockValue(dwarf::Form F,
                                             ArrayRef<uint8_t> D);
  static DWARFFormValue createFromUnit(dwarf::Form F, const DWARFUnit *Unit,
                                       uint64_t *OffsetPtr);
  static std::optional<object::SectionedAddress>
  getAsSectionedAddress(const ValueType &Val, const dwarf::Form Form,
                        const DWARFUnit *U);

  dwarf::Form getForm() const { return Form; }
  uint64_t getRawUValue() const { return Value.uval; }

  bool isFormClass(FormClass FC) const;
  const DWARFUnit *getUnit() const { return U; }
  void dump(raw_ostream &OS, DIDumpOptions DumpOpts = DIDumpOptions()) const;
  void dumpSectionedAddress(raw_ostream &OS, DIDumpOptions DumpOpts,
                            object::SectionedAddress SA) const;
  void dumpAddress(raw_ostream &OS, uint64_t Address) const;
  static void dumpAddress(raw_ostream &OS, uint8_t AddressSize,
                          uint64_t Address);
  static void dumpAddressSection(const DWARFObject &Obj, raw_ostream &OS,
                                 DIDumpOptions DumpOpts, uint64_t SectionIndex);

  /// Extracts a value in \p Data at offset \p *OffsetPtr. The information
  /// in \p FormParams is needed to interpret some forms. The optional
  /// \p Context and \p Unit allows extracting information if the form refers
  /// to other sections (e.g., .debug_str).
  bool extractValue(const DWARFDataExtractor &Data, uint64_t *OffsetPtr,
                    dwarf::FormParams FormParams,
                    const DWARFContext *Context = nullptr,
                    const DWARFUnit *Unit = nullptr);

  bool extractValue(const DWARFDataExtractor &Data, uint64_t *OffsetPtr,
                    dwarf::FormParams FormParams, const DWARFUnit *U) {
    return extractValue(Data, OffsetPtr, FormParams, nullptr, U);
  }

  /// getAsFoo functions below return the extracted value as Foo if only
  /// DWARFFormValue has form class is suitable for representing Foo.
  std::optional<uint64_t> getAsReference() const;
  struct UnitOffset {
    DWARFUnit *Unit;
    uint64_t Offset;
  };
  std::optional<UnitOffset> getAsRelativeReference() const;
  std::optional<uint64_t> getAsUnsignedConstant() const;
  std::optional<int64_t> getAsSignedConstant() const;
  Expected<const char *> getAsCString() const;
  std::optional<uint64_t> getAsAddress() const;
  std::optional<object::SectionedAddress> getAsSectionedAddress() const;
  std::optional<uint64_t> getAsSectionOffset() const;
  std::optional<ArrayRef<uint8_t>> getAsBlock() const;
  std::optional<uint64_t> getAsCStringOffset() const;
  std::optional<uint64_t> getAsReferenceUVal() const;
  /// Correctly extract any file paths from a form value.
  ///
  /// These attributes can be in the from DW_AT_decl_file or DW_AT_call_file
  /// attributes. We need to use the file index in the correct DWARFUnit's line
  /// table prologue, and each DWARFFormValue has the DWARFUnit the form value
  /// was extracted from.
  ///
  /// \param Kind The kind of path to extract.
  ///
  /// \returns A valid string value on success, or std::nullopt if the form
  /// class is not FC_Constant, or if the file index is not valid.
  std::optional<std::string>
  getAsFile(DILineInfoSpecifier::FileLineInfoKind Kind) const;

  /// Skip a form's value in \p DebugInfoData at the offset specified by
  /// \p OffsetPtr.
  ///
  /// Skips the bytes for the current form and updates the offset.
  ///
  /// \param DebugInfoData The data where we want to skip the value.
  /// \param OffsetPtr A reference to the offset that will be updated.
  /// \param Params DWARF parameters to help interpret forms.
  /// \returns true on success, false if the form was not skipped.
  bool skipValue(DataExtractor DebugInfoData, uint64_t *OffsetPtr,
                 const dwarf::FormParams Params) const {
    return DWARFFormValue::skipValue(Form, DebugInfoData, OffsetPtr, Params);
  }

  /// Skip a form's value in \p DebugInfoData at the offset specified by
  /// \p OffsetPtr.
  ///
  /// Skips the bytes for the specified form and updates the offset.
  ///
  /// \param Form The DW_FORM enumeration that indicates the form to skip.
  /// \param DebugInfoData The data where we want to skip the value.
  /// \param OffsetPtr A reference to the offset that will be updated.
  /// \param FormParams DWARF parameters to help interpret forms.
  /// \returns true on success, false if the form was not skipped.
  static bool skipValue(dwarf::Form Form, DataExtractor DebugInfoData,
                        uint64_t *OffsetPtr,
                        const dwarf::FormParams FormParams);

private:
  void dumpString(raw_ostream &OS) const;
};

namespace dwarf {

/// Take an optional DWARFFormValue and try to extract a string value from it.
///
/// \param V and optional DWARFFormValue to attempt to extract the value from.
/// \returns an optional value that contains a value if the form value
/// was valid and was a string.
inline std::optional<const char *>
toString(const std::optional<DWARFFormValue> &V) {
  if (!V)
    return std::nullopt;
  Expected<const char*> E = V->getAsCString();
  if (!E) {
    consumeError(E.takeError());
    return std::nullopt;
  }
  return *E;
}

/// Take an optional DWARFFormValue and try to extract a string value from it.
///
/// \param V and optional DWARFFormValue to attempt to extract the value from.
/// \returns an optional value that contains a value if the form value
/// was valid and was a string.
inline StringRef toStringRef(const std::optional<DWARFFormValue> &V,
                             StringRef Default = {}) {
  if (!V)
    return Default;
  auto S = V->getAsCString();
  if (!S) {
    consumeError(S.takeError());
    return Default;
  }
  if (!*S)
    return Default;
  return *S;
}

/// Take an optional DWARFFormValue and extract a string value from it.
///
/// \param V and optional DWARFFormValue to attempt to extract the value from.
/// \param Default the default value to return in case of failure.
/// \returns the string value or Default if the V doesn't have a value or the
/// form value's encoding wasn't a string.
inline const char *toString(const std::optional<DWARFFormValue> &V,
                            const char *Default) {
  if (auto E = toString(V))
    return *E;
  return Default;
}

/// Take an optional DWARFFormValue and try to extract an unsigned constant.
///
/// \param V and optional DWARFFormValue to attempt to extract the value from.
/// \returns an optional value that contains a value if the form value
/// was valid and has a unsigned constant form.
inline std::optional<uint64_t>
toUnsigned(const std::optional<DWARFFormValue> &V) {
  if (V)
    return V->getAsUnsignedConstant();
  return std::nullopt;
}

/// Take an optional DWARFFormValue and extract a unsigned constant.
///
/// \param V and optional DWARFFormValue to attempt to extract the value from.
/// \param Default the default value to return in case of failure.
/// \returns the extracted unsigned value or Default if the V doesn't have a
/// value or the form value's encoding wasn't an unsigned constant form.
inline uint64_t toUnsigned(const std::optional<DWARFFormValue> &V,
                           uint64_t Default) {
  return toUnsigned(V).value_or(Default);
}

/// Take an optional DWARFFormValue and try to extract an reference.
///
/// \param V and optional DWARFFormValue to attempt to extract the value from.
/// \returns an optional value that contains a value if the form value
/// was valid and has a reference form.
inline std::optional<uint64_t>
toReference(const std::optional<DWARFFormValue> &V) {
  if (V)
    return V->getAsReference();
  return std::nullopt;
}

/// Take an optional DWARFFormValue and extract a reference.
///
/// \param V and optional DWARFFormValue to attempt to extract the value from.
/// \param Default the default value to return in case of failure.
/// \returns the extracted reference value or Default if the V doesn't have a
/// value or the form value's encoding wasn't a reference form.
inline uint64_t toReference(const std::optional<DWARFFormValue> &V,
                            uint64_t Default) {
  return toReference(V).value_or(Default);
}

/// Take an optional DWARFFormValue and try to extract an signed constant.
///
/// \param V and optional DWARFFormValue to attempt to extract the value from.
/// \returns an optional value that contains a value if the form value
/// was valid and has a signed constant form.
inline std::optional<int64_t> toSigned(const std::optional<DWARFFormValue> &V) {
  if (V)
    return V->getAsSignedConstant();
  return std::nullopt;
}

/// Take an optional DWARFFormValue and extract a signed integer.
///
/// \param V and optional DWARFFormValue to attempt to extract the value from.
/// \param Default the default value to return in case of failure.
/// \returns the extracted signed integer value or Default if the V doesn't
/// have a value or the form value's encoding wasn't a signed integer form.
inline int64_t toSigned(const std::optional<DWARFFormValue> &V,
                        int64_t Default) {
  return toSigned(V).value_or(Default);
}

/// Take an optional DWARFFormValue and try to extract an address.
///
/// \param V and optional DWARFFormValue to attempt to extract the value from.
/// \returns an optional value that contains a value if the form value
/// was valid and has a address form.
inline std::optional<uint64_t>
toAddress(const std::optional<DWARFFormValue> &V) {
  if (V)
    return V->getAsAddress();
  return std::nullopt;
}

inline std::optional<object::SectionedAddress>
toSectionedAddress(const std::optional<DWARFFormValue> &V) {
  if (V)
    return V->getAsSectionedAddress();
  return std::nullopt;
}

/// Take an optional DWARFFormValue and extract a address.
///
/// \param V and optional DWARFFormValue to attempt to extract the value from.
/// \param Default the default value to return in case of failure.
/// \returns the extracted address value or Default if the V doesn't have a
/// value or the form value's encoding wasn't an address form.
inline uint64_t toAddress(const std::optional<DWARFFormValue> &V,
                          uint64_t Default) {
  return toAddress(V).value_or(Default);
}

/// Take an optional DWARFFormValue and try to extract an section offset.
///
/// \param V and optional DWARFFormValue to attempt to extract the value from.
/// \returns an optional value that contains a value if the form value
/// was valid and has a section offset form.
inline std::optional<uint64_t>
toSectionOffset(const std::optional<DWARFFormValue> &V) {
  if (V)
    return V->getAsSectionOffset();
  return std::nullopt;
}

/// Take an optional DWARFFormValue and extract a section offset.
///
/// \param V and optional DWARFFormValue to attempt to extract the value from.
/// \param Default the default value to return in case of failure.
/// \returns the extracted section offset value or Default if the V doesn't
/// have a value or the form value's encoding wasn't a section offset form.
inline uint64_t toSectionOffset(const std::optional<DWARFFormValue> &V,
                                uint64_t Default) {
  return toSectionOffset(V).value_or(Default);
}

/// Take an optional DWARFFormValue and try to extract block data.
///
/// \param V and optional DWARFFormValue to attempt to extract the value from.
/// \returns an optional value that contains a value if the form value
/// was valid and has a block form.
inline std::optional<ArrayRef<uint8_t>>
toBlock(const std::optional<DWARFFormValue> &V) {
  if (V)
    return V->getAsBlock();
  return std::nullopt;
}

/// Check whether specified \p Form belongs to the \p FC class.
/// \param Form an attribute form.
/// \param FC an attribute form class to check.
/// \param DwarfVersion the version of DWARF debug info keeping the attribute.
/// \returns true if specified \p Form belongs to the \p FC class.
bool doesFormBelongToClass(dwarf::Form Form, DWARFFormValue::FormClass FC,
                           uint16_t DwarfVersion);

} // end namespace dwarf

} // end namespace llvm

#endif // LLVM_DEBUGINFO_DWARF_DWARFFORMVALUE_H
PKhwFZi��nU
U
$DebugInfo/DWARF/DWARFDebugPubTable.hnu�[���//===- DWARFDebugPubTable.h -------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_DWARF_DWARFDEBUGPUBTABLE_H
#define LLVM_DEBUGINFO_DWARF_DWARFDEBUGPUBTABLE_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLFunctionalExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/BinaryFormat/Dwarf.h"
#include <cstdint>
#include <vector>

namespace llvm {

class raw_ostream;
class DWARFDataExtractor;
class Error;

/// Represents structure for holding and parsing .debug_pub* tables.
class DWARFDebugPubTable {
public:
  struct Entry {
    /// Section offset from the beginning of the compilation unit.
    uint64_t SecOffset;

    /// An entry of the various gnu_pub* debug sections.
    dwarf::PubIndexEntryDescriptor Descriptor;

    /// The name of the object as given by the DW_AT_name attribute of the
    /// referenced DIE.
    StringRef Name;
  };

  /// Each table consists of sets of variable length entries. Each set describes
  /// the names of global objects and functions, or global types, respectively,
  /// whose definitions are represented by debugging information entries owned
  /// by a single compilation unit.
  struct Set {
    /// The total length of the entries for that set, not including the length
    /// field itself.
    uint64_t Length;

    /// The DWARF format of the set.
    dwarf::DwarfFormat Format;

    /// This number is specific to the name lookup table and is independent of
    /// the DWARF version number.
    uint16_t Version;

    /// The offset from the beginning of the .debug_info section of the
    /// compilation unit header referenced by the set.
    uint64_t Offset;

    /// The size in bytes of the contents of the .debug_info section generated
    /// to represent that compilation unit.
    uint64_t Size;

    std::vector<Entry> Entries;
  };

private:
  std::vector<Set> Sets;

  /// gnu styled tables contains additional information.
  /// This flag determines whether or not section we parse is debug_gnu* table.
  bool GnuStyle = false;

public:
  DWARFDebugPubTable() = default;

  void extract(DWARFDataExtractor Data, bool GnuStyle,
               function_ref<void(Error)> RecoverableErrorHandler);

  void dump(raw_ostream &OS) const;

  ArrayRef<Set> getData() { return Sets; }
};

} // end namespace llvm

#endif // LLVM_DEBUGINFO_DWARF_DWARFDEBUGPUBTABLE_H
PKhwFZ�F�d
d
$DebugInfo/DWARF/DWARFDebugRnglists.hnu�[���//===- DWARFDebugRnglists.h -------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_DWARF_DWARFDEBUGRNGLISTS_H
#define LLVM_DEBUGINFO_DWARF_DWARFDEBUGRNGLISTS_H

#include "llvm/ADT/STLFunctionalExtras.h"
#include "llvm/BinaryFormat/Dwarf.h"
#include "llvm/DebugInfo/DWARF/DWARFAddressRange.h"
#include "llvm/DebugInfo/DWARF/DWARFListTable.h"
#include <cstdint>

namespace llvm {

class Error;
class raw_ostream;
class DWARFUnit;
class DWARFDataExtractor;
struct DIDumpOptions;
namespace object {
struct SectionedAddress;
}

/// A class representing a single range list entry.
struct RangeListEntry : public DWARFListEntryBase {
  /// The values making up the range list entry. Most represent a range with
  /// a start and end address or a start address and a length. Others are
  /// single value base addresses or end-of-list with no values. The unneeded
  /// values are semantically undefined, but initialized to 0.
  uint64_t Value0;
  uint64_t Value1;

  Error extract(DWARFDataExtractor Data, uint64_t *OffsetPtr);
  void
  dump(raw_ostream &OS, uint8_t AddrSize, uint8_t MaxEncodingStringLength,
       uint64_t &CurrentBase, DIDumpOptions DumpOpts,
       llvm::function_ref<std::optional<object::SectionedAddress>(uint32_t)>
           LookupPooledAddress) const;
  bool isSentinel() const { return EntryKind == dwarf::DW_RLE_end_of_list; }
};

/// A class representing a single rangelist.
class DWARFDebugRnglist : public DWARFListType<RangeListEntry> {
public:
  /// Build a DWARFAddressRangesVector from a rangelist.
  DWARFAddressRangesVector getAbsoluteRanges(
      std::optional<object::SectionedAddress> BaseAddr, uint8_t AddressByteSize,
      function_ref<std::optional<object::SectionedAddress>(uint32_t)>
          LookupPooledAddress) const;

  /// Build a DWARFAddressRangesVector from a rangelist.
  DWARFAddressRangesVector
  getAbsoluteRanges(std::optional<object::SectionedAddress> BaseAddr,
                    DWARFUnit &U) const;
};

class DWARFDebugRnglistTable : public DWARFListTableBase<DWARFDebugRnglist> {
public:
  DWARFDebugRnglistTable()
      : DWARFListTableBase(/* SectionName    = */ ".debug_rnglists",
                           /* HeaderString   = */ "ranges:",
                           /* ListTypeString = */ "range") {}
};

} // end namespace llvm

#endif // LLVM_DEBUGINFO_DWARF_DWARFDEBUGRNGLISTS_H
PKhwFZx��"�	�	#DebugInfo/DWARF/DWARFDebugAranges.hnu�[���//===- DWARFDebugAranges.h --------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_DWARF_DWARFDEBUGARANGES_H
#define LLVM_DEBUGINFO_DWARF_DWARFDEBUGARANGES_H

#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/STLFunctionalExtras.h"
#include <cstdint>
#include <vector>

namespace llvm {
class DWARFDataExtractor;
class Error;

class DWARFContext;

class DWARFDebugAranges {
public:
  void generate(DWARFContext *CTX);
  uint64_t findAddress(uint64_t Address) const;

private:
  void clear();
  void extract(DWARFDataExtractor DebugArangesData,
               function_ref<void(Error)> RecoverableErrorHandler,
               function_ref<void(Error)> WarningHandler);

  /// Call appendRange multiple times and then call construct.
  void appendRange(uint64_t CUOffset, uint64_t LowPC, uint64_t HighPC);
  void construct();

  struct Range {
    explicit Range(uint64_t LowPC, uint64_t HighPC, uint64_t CUOffset)
      : LowPC(LowPC), Length(HighPC - LowPC), CUOffset(CUOffset) {}

    void setHighPC(uint64_t HighPC) {
      if (HighPC == -1ULL || HighPC <= LowPC)
        Length = 0;
      else
        Length = HighPC - LowPC;
    }

    uint64_t HighPC() const {
      if (Length)
        return LowPC + Length;
      return -1ULL;
    }

    bool operator<(const Range &other) const {
      return LowPC < other.LowPC;
    }

    uint64_t LowPC; /// Start of address range.
    uint64_t Length; /// End of address range (not including this address).
    uint64_t CUOffset; /// Offset of the compile unit or die.
  };

  struct RangeEndpoint {
    uint64_t Address;
    uint64_t CUOffset;
    bool IsRangeStart;

    RangeEndpoint(uint64_t Address, uint64_t CUOffset, bool IsRangeStart)
        : Address(Address), CUOffset(CUOffset), IsRangeStart(IsRangeStart) {}

    bool operator<(const RangeEndpoint &Other) const {
      return Address < Other.Address;
    }
  };

  using RangeColl = std::vector<Range>;
  using RangeCollIterator = RangeColl::const_iterator;

  std::vector<RangeEndpoint> Endpoints;
  RangeColl Aranges;
  DenseSet<uint64_t> ParsedCUOffsets;
};

} // end namespace llvm

#endif // LLVM_DEBUGINFO_DWARF_DWARFDEBUGARANGES_H
PKhwFZm��..#DebugInfo/DWARF/DWARFAddressRange.hnu�[���//===- DWARFAddressRange.h --------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_DWARF_DWARFADDRESSRANGE_H
#define LLVM_DEBUGINFO_DWARF_DWARFADDRESSRANGE_H

#include "llvm/DebugInfo/DIContext.h"
#include "llvm/Object/ObjectFile.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <tuple>
#include <vector>

namespace llvm {

class raw_ostream;
class DWARFObject;

struct DWARFAddressRange {
  uint64_t LowPC;
  uint64_t HighPC;
  uint64_t SectionIndex;

  DWARFAddressRange() = default;

  /// Used for unit testing.
  DWARFAddressRange(
      uint64_t LowPC, uint64_t HighPC,
      uint64_t SectionIndex = object::SectionedAddress::UndefSection)
      : LowPC(LowPC), HighPC(HighPC), SectionIndex(SectionIndex) {}

  /// Returns true if LowPC is smaller or equal to HighPC. This accounts for
  /// dead-stripped ranges.
  bool valid() const { return LowPC <= HighPC; }

  /// Returns true if [LowPC, HighPC) intersects with [RHS.LowPC, RHS.HighPC).
  bool intersects(const DWARFAddressRange &RHS) const {
    assert(valid() && RHS.valid());
    if (SectionIndex != RHS.SectionIndex)
      return false;
    // Empty ranges can't intersect.
    if (LowPC == HighPC || RHS.LowPC == RHS.HighPC)
      return false;
    return LowPC < RHS.HighPC && RHS.LowPC < HighPC;
  }

  /// Union two address ranges if they intersect.
  ///
  /// This function will union two address ranges if they intersect by
  /// modifying this range to be the union of both ranges. If the two ranges
  /// don't intersect this range will be left alone.
  ///
  /// \param RHS Another address range to combine with.
  ///
  /// \returns false if the ranges don't intersect, true if they do and the
  /// ranges were combined.
  bool merge(const DWARFAddressRange &RHS) {
    if (!intersects(RHS))
      return false;
    LowPC = std::min<uint64_t>(LowPC, RHS.LowPC);
    HighPC = std::max<uint64_t>(HighPC, RHS.HighPC);
    return true;
  }

  void dump(raw_ostream &OS, uint32_t AddressSize, DIDumpOptions DumpOpts = {},
            const DWARFObject *Obj = nullptr) const;
};

inline bool operator<(const DWARFAddressRange &LHS,
                      const DWARFAddressRange &RHS) {
  return std::tie(LHS.SectionIndex, LHS.LowPC, LHS.HighPC) < std::tie(RHS.SectionIndex, RHS.LowPC, RHS.HighPC);
}

inline bool operator==(const DWARFAddressRange &LHS,
                       const DWARFAddressRange &RHS) {
  return std::tie(LHS.SectionIndex, LHS.LowPC, LHS.HighPC) == std::tie(RHS.SectionIndex, RHS.LowPC, RHS.HighPC);
}

raw_ostream &operator<<(raw_ostream &OS, const DWARFAddressRange &R);

/// DWARFAddressRangesVector - represents a set of absolute address ranges.
using DWARFAddressRangesVector = std::vector<DWARFAddressRange>;

} // end namespace llvm

#endif // LLVM_DEBUGINFO_DWARF_DWARFADDRESSRANGE_H
PKhwFZ��gm0G0GDebugInfo/DWARF/DWARFDie.hnu�[���//===- DWARFDie.h -----------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_DWARF_DWARFDIE_H
#define LLVM_DEBUGINFO_DWARF_DWARFDIE_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/BinaryFormat/Dwarf.h"
#include "llvm/DebugInfo/DIContext.h"
#include "llvm/DebugInfo/DWARF/DWARFAddressRange.h"
#include "llvm/DebugInfo/DWARF/DWARFAttribute.h"
#include "llvm/DebugInfo/DWARF/DWARFDebugInfoEntry.h"
#include "llvm/DebugInfo/DWARF/DWARFLocationExpression.h"
#include <cassert>
#include <cstdint>
#include <iterator>

namespace llvm {

class DWARFUnit;
class raw_ostream;

//===----------------------------------------------------------------------===//
/// Utility class that carries the DWARF compile/type unit and the debug info
/// entry in an object.
///
/// When accessing information from a debug info entry we always need to DWARF
/// compile/type unit in order to extract the info correctly as some information
/// is relative to the compile/type unit. Prior to this class the DWARFUnit and
/// the DWARFDebugInfoEntry was passed around separately and there was the
/// possibility for error if the wrong DWARFUnit was used to extract a unit
/// relative offset. This class helps to ensure that this doesn't happen and
/// also simplifies the attribute extraction calls by not having to specify the
/// DWARFUnit for each call.
class DWARFDie {
  DWARFUnit *U = nullptr;
  const DWARFDebugInfoEntry *Die = nullptr;

public:
  DWARFDie() = default;
  DWARFDie(DWARFUnit *Unit, const DWARFDebugInfoEntry *D) : U(Unit), Die(D) {}

  bool isValid() const { return U && Die; }
  explicit operator bool() const { return isValid(); }
  const DWARFDebugInfoEntry *getDebugInfoEntry() const { return Die; }
  DWARFUnit *getDwarfUnit() const { return U; }

  /// Get the abbreviation declaration for this DIE.
  ///
  /// \returns the abbreviation declaration or NULL for null tags.
  const DWARFAbbreviationDeclaration *getAbbreviationDeclarationPtr() const {
    assert(isValid() && "must check validity prior to calling");
    return Die->getAbbreviationDeclarationPtr();
  }

  /// Get the absolute offset into the debug info or types section.
  ///
  /// \returns the DIE offset or -1U if invalid.
  uint64_t getOffset() const {
    assert(isValid() && "must check validity prior to calling");
    return Die->getOffset();
  }

  dwarf::Tag getTag() const {
    auto AbbrevDecl = getAbbreviationDeclarationPtr();
    if (AbbrevDecl)
      return AbbrevDecl->getTag();
    return dwarf::DW_TAG_null;
  }

  bool hasChildren() const {
    assert(isValid() && "must check validity prior to calling");
    return Die->hasChildren();
  }

  /// Returns true for a valid DIE that terminates a sibling chain.
  bool isNULL() const { return getAbbreviationDeclarationPtr() == nullptr; }

  /// Returns true if DIE represents a subprogram (not inlined).
  bool isSubprogramDIE() const;

  /// Returns true if DIE represents a subprogram or an inlined subroutine.
  bool isSubroutineDIE() const;

  /// Get the parent of this DIE object.
  ///
  /// \returns a valid DWARFDie instance if this object has a parent or an
  /// invalid DWARFDie instance if it doesn't.
  DWARFDie getParent() const;

  /// Get the sibling of this DIE object.
  ///
  /// \returns a valid DWARFDie instance if this object has a sibling or an
  /// invalid DWARFDie instance if it doesn't.
  DWARFDie getSibling() const;

  /// Get the previous sibling of this DIE object.
  ///
  /// \returns a valid DWARFDie instance if this object has a sibling or an
  /// invalid DWARFDie instance if it doesn't.
  DWARFDie getPreviousSibling() const;

  /// Get the first child of this DIE object.
  ///
  /// \returns a valid DWARFDie instance if this object has children or an
  /// invalid DWARFDie instance if it doesn't.
  DWARFDie getFirstChild() const;

  /// Get the last child of this DIE object.
  ///
  /// \returns a valid null DWARFDie instance if this object has children or an
  /// invalid DWARFDie instance if it doesn't.
  DWARFDie getLastChild() const;

  /// Dump the DIE and all of its attributes to the supplied stream.
  ///
  /// \param OS the stream to use for output.
  /// \param indent the number of characters to indent each line that is output.
  void dump(raw_ostream &OS, unsigned indent = 0,
            DIDumpOptions DumpOpts = DIDumpOptions()) const;

  /// Convenience zero-argument overload for debugging.
  LLVM_DUMP_METHOD void dump() const;

  /// Extract the specified attribute from this DIE.
  ///
  /// Extract an attribute value from this DIE only. This call doesn't look
  /// for the attribute value in any DW_AT_specification or
  /// DW_AT_abstract_origin referenced DIEs.
  ///
  /// \param Attr the attribute to extract.
  /// \returns an optional DWARFFormValue that will have the form value if the
  /// attribute was successfully extracted.
  std::optional<DWARFFormValue> find(dwarf::Attribute Attr) const;

  /// Extract the first value of any attribute in Attrs from this DIE.
  ///
  /// Extract the first attribute that matches from this DIE only. This call
  /// doesn't look for the attribute value in any DW_AT_specification or
  /// DW_AT_abstract_origin referenced DIEs. The attributes will be searched
  /// linearly in the order they are specified within Attrs.
  ///
  /// \param Attrs an array of DWARF attribute to look for.
  /// \returns an optional that has a valid DWARFFormValue for the first
  /// matching attribute in Attrs, or std::nullopt if none of the attributes in
  /// Attrs exist in this DIE.
  std::optional<DWARFFormValue> find(ArrayRef<dwarf::Attribute> Attrs) const;

  /// Extract the first value of any attribute in Attrs from this DIE and
  /// recurse into any DW_AT_specification or DW_AT_abstract_origin referenced
  /// DIEs.
  ///
  /// \param Attrs an array of DWARF attribute to look for.
  /// \returns an optional that has a valid DWARFFormValue for the first
  /// matching attribute in Attrs, or std::nullopt if none of the attributes in
  /// Attrs exist in this DIE or in any DW_AT_specification or
  /// DW_AT_abstract_origin DIEs.
  std::optional<DWARFFormValue>
  findRecursively(ArrayRef<dwarf::Attribute> Attrs) const;

  /// Extract the specified attribute from this DIE as the referenced DIE.
  ///
  /// Regardless of the reference type, return the correct DWARFDie instance if
  /// the attribute exists. The returned DWARFDie object might be from another
  /// DWARFUnit, but that is all encapsulated in the new DWARFDie object.
  ///
  /// Extract an attribute value from this DIE only. This call doesn't look
  /// for the attribute value in any DW_AT_specification or
  /// DW_AT_abstract_origin referenced DIEs.
  ///
  /// \param Attr the attribute to extract.
  /// \returns a valid DWARFDie instance if the attribute exists, or an invalid
  /// DWARFDie object if it doesn't.
  DWARFDie getAttributeValueAsReferencedDie(dwarf::Attribute Attr) const;
  DWARFDie getAttributeValueAsReferencedDie(const DWARFFormValue &V) const;

  DWARFDie resolveTypeUnitReference() const;

  /// Extract the range base attribute from this DIE as absolute section offset.
  ///
  /// This is a utility function that checks for either the DW_AT_rnglists_base
  /// or DW_AT_GNU_ranges_base attribute.
  ///
  /// \returns anm optional absolute section offset value for the attribute.
  std::optional<uint64_t> getRangesBaseAttribute() const;
  std::optional<uint64_t> getLocBaseAttribute() const;

  /// Get the DW_AT_high_pc attribute value as an address.
  ///
  /// In DWARF version 4 and later the high PC can be encoded as an offset from
  /// the DW_AT_low_pc. This function takes care of extracting the value as an
  /// address or offset and adds it to the low PC if needed and returns the
  /// value as an optional in case the DIE doesn't have a DW_AT_high_pc
  /// attribute.
  ///
  /// \param LowPC the low PC that might be needed to calculate the high PC.
  /// \returns an optional address value for the attribute.
  std::optional<uint64_t> getHighPC(uint64_t LowPC) const;

  /// Retrieves DW_AT_low_pc and DW_AT_high_pc from CU.
  /// Returns true if both attributes are present.
  bool getLowAndHighPC(uint64_t &LowPC, uint64_t &HighPC,
                       uint64_t &SectionIndex) const;

  /// Get the address ranges for this DIE.
  ///
  /// Get the hi/low PC range if both attributes are available or exrtracts the
  /// non-contiguous address ranges from the DW_AT_ranges attribute.
  ///
  /// Extracts the range information from this DIE only. This call doesn't look
  /// for the range in any DW_AT_specification or DW_AT_abstract_origin DIEs.
  ///
  /// \returns a address range vector that might be empty if no address range
  /// information is available.
  Expected<DWARFAddressRangesVector> getAddressRanges() const;

  bool addressRangeContainsAddress(const uint64_t Address) const;

  Expected<DWARFLocationExpressionsVector>
  getLocations(dwarf::Attribute Attr) const;

  /// If a DIE represents a subprogram (or inlined subroutine), returns its
  /// mangled name (or short name, if mangled is missing). This name may be
  /// fetched from specification or abstract origin for this subprogram.
  /// Returns null if no name is found.
  const char *getSubroutineName(DINameKind Kind) const;

  /// Return the DIE name resolving DW_AT_specification or DW_AT_abstract_origin
  /// references if necessary. For the LinkageName case it additionaly searches
  /// for ShortName if LinkageName is not found.
  /// Returns null if no name is found.
  const char *getName(DINameKind Kind) const;
  void getFullName(raw_string_ostream &,
                   std::string *OriginalFullName = nullptr) const;

  /// Return the DIE short name resolving DW_AT_specification or
  /// DW_AT_abstract_origin references if necessary. Returns null if no name
  /// is found.
  const char *getShortName() const;

  /// Return the DIE linkage name resolving DW_AT_specification or
  /// DW_AT_abstract_origin references if necessary. Returns null if no name
  /// is found.
  const char *getLinkageName() const;

  /// Returns the declaration line (start line) for a DIE, assuming it specifies
  /// a subprogram. This may be fetched from specification or abstract origin
  /// for this subprogram by resolving DW_AT_sepcification or
  /// DW_AT_abstract_origin references if necessary.
  uint64_t getDeclLine() const;
  std::string getDeclFile(DILineInfoSpecifier::FileLineInfoKind Kind) const;

  /// Retrieves values of DW_AT_call_file, DW_AT_call_line and DW_AT_call_column
  /// from DIE (or zeroes if they are missing). This function looks for
  /// DW_AT_call attributes in this DIE only, it will not resolve the attribute
  /// values in any DW_AT_specification or DW_AT_abstract_origin DIEs.
  /// \param CallFile filled in with non-zero if successful, zero if there is no
  /// DW_AT_call_file attribute in this DIE.
  /// \param CallLine filled in with non-zero if successful, zero if there is no
  /// DW_AT_call_line attribute in this DIE.
  /// \param CallColumn filled in with non-zero if successful, zero if there is
  /// no DW_AT_call_column attribute in this DIE.
  /// \param CallDiscriminator filled in with non-zero if successful, zero if
  /// there is no DW_AT_GNU_discriminator attribute in this DIE.
  void getCallerFrame(uint32_t &CallFile, uint32_t &CallLine,
                      uint32_t &CallColumn, uint32_t &CallDiscriminator) const;

  class attribute_iterator;

  /// Get an iterator range to all attributes in the current DIE only.
  ///
  /// \returns an iterator range for the attributes of the current DIE.
  iterator_range<attribute_iterator> attributes() const;

  /// Gets the type size (in bytes) for this DIE.
  ///
  /// \param PointerSize the pointer size of the containing CU.
  /// \returns if this is a type DIE, or this DIE contains a DW_AT_type, returns
  /// the size of the type.
  std::optional<uint64_t> getTypeSize(uint64_t PointerSize);

  class iterator;

  iterator begin() const;
  iterator end() const;

  std::reverse_iterator<iterator> rbegin() const;
  std::reverse_iterator<iterator> rend() const;

  iterator_range<iterator> children() const;
};

class DWARFDie::attribute_iterator
    : public iterator_facade_base<attribute_iterator, std::forward_iterator_tag,
                                  const DWARFAttribute> {
  /// The DWARF DIE we are extracting attributes from.
  DWARFDie Die;
  /// The value vended to clients via the operator*() or operator->().
  DWARFAttribute AttrValue;
  /// The attribute index within the abbreviation declaration in Die.
  uint32_t Index;

  friend bool operator==(const attribute_iterator &LHS,
                         const attribute_iterator &RHS);

  /// Update the attribute index and attempt to read the attribute value. If the
  /// attribute is able to be read, update AttrValue and the Index member
  /// variable. If the attribute value is not able to be read, an appropriate
  /// error will be set if the Err member variable is non-NULL and the iterator
  /// will be set to the end value so iteration stops.
  void updateForIndex(const DWARFAbbreviationDeclaration &AbbrDecl, uint32_t I);

public:
  attribute_iterator() = delete;
  explicit attribute_iterator(DWARFDie D, bool End);

  attribute_iterator &operator++();
  attribute_iterator &operator--();
  explicit operator bool() const { return AttrValue.isValid(); }
  const DWARFAttribute &operator*() const { return AttrValue; }
};

inline bool operator==(const DWARFDie::attribute_iterator &LHS,
                       const DWARFDie::attribute_iterator &RHS) {
  return LHS.Index == RHS.Index;
}

inline bool operator!=(const DWARFDie::attribute_iterator &LHS,
                       const DWARFDie::attribute_iterator &RHS) {
  return !(LHS == RHS);
}

inline bool operator==(const DWARFDie &LHS, const DWARFDie &RHS) {
  return LHS.getDebugInfoEntry() == RHS.getDebugInfoEntry() &&
         LHS.getDwarfUnit() == RHS.getDwarfUnit();
}

inline bool operator!=(const DWARFDie &LHS, const DWARFDie &RHS) {
  return !(LHS == RHS);
}

inline bool operator<(const DWARFDie &LHS, const DWARFDie &RHS) {
  return LHS.getOffset() < RHS.getOffset();
}

class DWARFDie::iterator
    : public iterator_facade_base<iterator, std::bidirectional_iterator_tag,
                                  const DWARFDie> {
  DWARFDie Die;

  friend std::reverse_iterator<llvm::DWARFDie::iterator>;
  friend bool operator==(const DWARFDie::iterator &LHS,
                         const DWARFDie::iterator &RHS);

public:
  iterator() = default;

  explicit iterator(DWARFDie D) : Die(D) {}

  iterator &operator++() {
    Die = Die.getSibling();
    return *this;
  }

  iterator &operator--() {
    Die = Die.getPreviousSibling();
    return *this;
  }

  const DWARFDie &operator*() const { return Die; }
};

inline bool operator==(const DWARFDie::iterator &LHS,
                       const DWARFDie::iterator &RHS) {
  return LHS.Die == RHS.Die;
}

// These inline functions must follow the DWARFDie::iterator definition above
// as they use functions from that class.
inline DWARFDie::iterator DWARFDie::begin() const {
  return iterator(getFirstChild());
}

inline DWARFDie::iterator DWARFDie::end() const {
  return iterator(getLastChild());
}

inline iterator_range<DWARFDie::iterator> DWARFDie::children() const {
  return make_range(begin(), end());
}

} // end namespace llvm

namespace std {

template <>
class reverse_iterator<llvm::DWARFDie::iterator>
    : public llvm::iterator_facade_base<
          reverse_iterator<llvm::DWARFDie::iterator>,
          bidirectional_iterator_tag, const llvm::DWARFDie> {

private:
  llvm::DWARFDie Die;
  bool AtEnd;

public:
  reverse_iterator(llvm::DWARFDie::iterator It)
      : Die(It.Die), AtEnd(!It.Die.getPreviousSibling()) {
    if (!AtEnd)
      Die = Die.getPreviousSibling();
  }

  llvm::DWARFDie::iterator base() const {
    return llvm::DWARFDie::iterator(AtEnd ? Die : Die.getSibling());
  }

  reverse_iterator<llvm::DWARFDie::iterator> &operator++() {
    assert(!AtEnd && "Incrementing rend");
    llvm::DWARFDie D = Die.getPreviousSibling();
    if (D)
      Die = D;
    else
      AtEnd = true;
    return *this;
  }

  reverse_iterator<llvm::DWARFDie::iterator> &operator--() {
    if (AtEnd) {
      AtEnd = false;
      return *this;
    }
    Die = Die.getSibling();
    assert(!Die.isNULL() && "Decrementing rbegin");
    return *this;
  }

  const llvm::DWARFDie &operator*() const {
    assert(Die.isValid());
    return Die;
  }

  // FIXME: We should be able to specify the equals operator as a friend, but
  //        that causes the compiler to think the operator overload is ambiguous
  //        with the friend declaration and the actual definition as candidates.
  bool equals(const reverse_iterator<llvm::DWARFDie::iterator> &RHS) const {
    return Die == RHS.Die && AtEnd == RHS.AtEnd;
  }
};

} // namespace std

namespace llvm {

inline bool operator==(const std::reverse_iterator<DWARFDie::iterator> &LHS,
                       const std::reverse_iterator<DWARFDie::iterator> &RHS) {
  return LHS.equals(RHS);
}

inline bool operator!=(const std::reverse_iterator<DWARFDie::iterator> &LHS,
                       const std::reverse_iterator<DWARFDie::iterator> &RHS) {
  return !(LHS == RHS);
}

inline std::reverse_iterator<DWARFDie::iterator> DWARFDie::rbegin() const {
  return std::make_reverse_iterator(end());
}

inline std::reverse_iterator<DWARFDie::iterator> DWARFDie::rend() const {
  return std::make_reverse_iterator(begin());
}

void dumpTypeQualifiedName(const DWARFDie &DIE, raw_ostream &OS);
void dumpTypeUnqualifiedName(const DWARFDie &DIE, raw_ostream &OS,
                             std::string *OriginalFullName = nullptr);

} // end namespace llvm

#endif // LLVM_DEBUGINFO_DWARF_DWARFDIE_H
PKhwFZ����r�r!DebugInfo/DWARF/DWARFDebugFrame.hnu�[���//===- DWARFDebugFrame.h - Parsing of .debug_frame --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_DWARF_DWARFDEBUGFRAME_H
#define LLVM_DEBUGINFO_DWARF_DWARFDEBUGFRAME_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/iterator.h"
#include "llvm/DebugInfo/DWARF/DWARFExpression.h"
#include "llvm/Support/Error.h"
#include "llvm/TargetParser/Triple.h"
#include <map>
#include <memory>
#include <vector>

namespace llvm {

class raw_ostream;
class DWARFDataExtractor;
class MCRegisterInfo;
struct DIDumpOptions;

namespace dwarf {

constexpr uint32_t InvalidRegisterNumber = UINT32_MAX;

/// A class that represents a location for the Call Frame Address (CFA) or a
/// register. This is decoded from the DWARF Call Frame Information
/// instructions and put into an UnwindRow.
class UnwindLocation {
public:
  enum Location {
    /// Not specified.
    Unspecified,
    /// Register is not available and can't be recovered.
    Undefined,
    /// Register value is in the register, nothing needs to be done to unwind
    /// it:
    ///   reg = reg
    Same,
    /// Register is in or at the CFA plus an offset:
    ///   reg = CFA + offset
    ///   reg = defef(CFA + offset)
    CFAPlusOffset,
    /// Register or CFA is in or at a register plus offset, optionally in
    /// an address space:
    ///   reg = reg + offset [in addrspace]
    ///   reg = deref(reg + offset [in addrspace])
    RegPlusOffset,
    /// Register or CFA value is in or at a value found by evaluating a DWARF
    /// expression:
    ///   reg = eval(dwarf_expr)
    ///   reg = deref(eval(dwarf_expr))
    DWARFExpr,
    /// Value is a constant value contained in "Offset":
    ///   reg = Offset
    Constant,
  };

private:
  Location Kind;   /// The type of the location that describes how to unwind it.
  uint32_t RegNum; /// The register number for Kind == RegPlusOffset.
  int32_t Offset;  /// The offset for Kind == CFAPlusOffset or RegPlusOffset.
  std::optional<uint32_t> AddrSpace;   /// The address space for Kind ==
                                       /// RegPlusOffset for CFA.
  std::optional<DWARFExpression> Expr; /// The DWARF expression for Kind ==
                                       /// DWARFExpression.
  bool Dereference; /// If true, the resulting location must be dereferenced
                    /// after the location value is computed.

  // Constructors are private to force people to use the create static
  // functions.
  UnwindLocation(Location K)
      : Kind(K), RegNum(InvalidRegisterNumber), Offset(0),
        AddrSpace(std::nullopt), Dereference(false) {}

  UnwindLocation(Location K, uint32_t Reg, int32_t Off,
                 std::optional<uint32_t> AS, bool Deref)
      : Kind(K), RegNum(Reg), Offset(Off), AddrSpace(AS), Dereference(Deref) {}

  UnwindLocation(DWARFExpression E, bool Deref)
      : Kind(DWARFExpr), RegNum(InvalidRegisterNumber), Offset(0), Expr(E),
        Dereference(Deref) {}

public:
  /// Create a location whose rule is set to Unspecified. This means the
  /// register value might be in the same register but it wasn't specified in
  /// the unwind opcodes.
  static UnwindLocation createUnspecified();
  /// Create a location where the value is undefined and not available. This can
  /// happen when a register is volatile and can't be recovered.
  static UnwindLocation createUndefined();
  /// Create a location where the value is known to be in the register itself.
  static UnwindLocation createSame();
  /// Create a location that is in (Deref == false) or at (Deref == true) the
  /// CFA plus an offset. Most registers that are spilled onto the stack use
  /// this rule. The rule for the register will use this rule and specify a
  /// unique offset from the CFA with \a Deref set to true. This value will be
  /// relative to a CFA value which is typically defined using the register
  /// plus offset location. \see createRegisterPlusOffset(...) for more
  /// information.
  static UnwindLocation createIsCFAPlusOffset(int32_t Off);
  static UnwindLocation createAtCFAPlusOffset(int32_t Off);
  /// Create a location where the saved value is in (Deref == false) or at
  /// (Deref == true) a regiser plus an offset and, optionally, in the specified
  /// address space (used mostly for the CFA).
  ///
  /// The CFA is usually defined using this rule by using the stack pointer or
  /// frame pointer as the register, with an offset that accounts for all
  /// spilled registers and all local variables in a function, and Deref ==
  /// false.
  static UnwindLocation
  createIsRegisterPlusOffset(uint32_t Reg, int32_t Off,
                             std::optional<uint32_t> AddrSpace = std::nullopt);
  static UnwindLocation
  createAtRegisterPlusOffset(uint32_t Reg, int32_t Off,
                             std::optional<uint32_t> AddrSpace = std::nullopt);
  /// Create a location whose value is the result of evaluating a DWARF
  /// expression. This allows complex expressions to be evaluated in order to
  /// unwind a register or CFA value.
  static UnwindLocation createIsDWARFExpression(DWARFExpression Expr);
  static UnwindLocation createAtDWARFExpression(DWARFExpression Expr);
  static UnwindLocation createIsConstant(int32_t Value);

  Location getLocation() const { return Kind; }
  uint32_t getRegister() const { return RegNum; }
  int32_t getOffset() const { return Offset; }
  uint32_t getAddressSpace() const {
    assert(Kind == RegPlusOffset && AddrSpace);
    return *AddrSpace;
  }
  int32_t getConstant() const { return Offset; }
  /// Some opcodes will modify the CFA location's register only, so we need
  /// to be able to modify the CFA register when evaluating DWARF Call Frame
  /// Information opcodes.
  void setRegister(uint32_t NewRegNum) { RegNum = NewRegNum; }
  /// Some opcodes will modify the CFA location's offset only, so we need
  /// to be able to modify the CFA offset when evaluating DWARF Call Frame
  /// Information opcodes.
  void setOffset(int32_t NewOffset) { Offset = NewOffset; }
  /// Some opcodes modify a constant value and we need to be able to update
  /// the constant value (DW_CFA_GNU_window_save which is also known as
  // DW_CFA_AARCH64_negate_ra_state).
  void setConstant(int32_t Value) { Offset = Value; }

  std::optional<DWARFExpression> getDWARFExpressionBytes() const {
    return Expr;
  }
  /// Dump a location expression as text and use the register information if
  /// some is provided.
  ///
  /// \param OS the stream to use for output.
  ///
  /// \param MRI register information that helps emit register names insteead
  /// of raw register numbers.
  ///
  /// \param IsEH true if the DWARF Call Frame Information is from .eh_frame
  /// instead of from .debug_frame. This is needed for register number
  /// conversion because some register numbers differ between the two sections
  /// for certain architectures like x86.
  void dump(raw_ostream &OS, DIDumpOptions DumpOpts) const;

  bool operator==(const UnwindLocation &RHS) const;
};

raw_ostream &operator<<(raw_ostream &OS, const UnwindLocation &R);

/// A class that can track all registers with locations in a UnwindRow object.
///
/// Register locations use a map where the key is the register number and the
/// the value is a UnwindLocation.
///
/// The register maps are put into a class so that all register locations can
/// be copied when parsing the unwind opcodes DW_CFA_remember_state and
/// DW_CFA_restore_state.
class RegisterLocations {
  std::map<uint32_t, UnwindLocation> Locations;

public:
  /// Return the location for the register in \a RegNum if there is a location.
  ///
  /// \param RegNum the register number to find a location for.
  ///
  /// \returns A location if one is available for \a RegNum, or std::nullopt
  /// otherwise.
  std::optional<UnwindLocation> getRegisterLocation(uint32_t RegNum) const {
    auto Pos = Locations.find(RegNum);
    if (Pos == Locations.end())
      return std::nullopt;
    return Pos->second;
  }

  /// Set the location for the register in \a RegNum to \a Location.
  ///
  /// \param RegNum the register number to set the location for.
  ///
  /// \param Location the UnwindLocation that describes how to unwind the value.
  void setRegisterLocation(uint32_t RegNum, const UnwindLocation &Location) {
    Locations.erase(RegNum);
    Locations.insert(std::make_pair(RegNum, Location));
  }

  /// Removes any rule for the register in \a RegNum.
  ///
  /// \param RegNum the register number to remove the location for.
  void removeRegisterLocation(uint32_t RegNum) { Locations.erase(RegNum); }

  /// Dump all registers + locations that are currently defined in this object.
  ///
  /// \param OS the stream to use for output.
  ///
  /// \param MRI register information that helps emit register names insteead
  /// of raw register numbers.
  ///
  /// \param IsEH true if the DWARF Call Frame Information is from .eh_frame
  /// instead of from .debug_frame. This is needed for register number
  /// conversion because some register numbers differ between the two sections
  /// for certain architectures like x86.
  void dump(raw_ostream &OS, DIDumpOptions DumpOpts) const;

  /// Returns true if we have any register locations in this object.
  bool hasLocations() const { return !Locations.empty(); }

  size_t size() const { return Locations.size(); }

  bool operator==(const RegisterLocations &RHS) const {
    return Locations == RHS.Locations;
  }
};

raw_ostream &operator<<(raw_ostream &OS, const RegisterLocations &RL);

/// A class that represents a single row in the unwind table that is decoded by
/// parsing the DWARF Call Frame Information opcodes.
///
/// The row consists of an optional address, the rule to unwind the CFA and all
/// rules to unwind any registers. If the address doesn't have a value, this
/// row represents the initial instructions for a CIE. If the address has a
/// value the UnwindRow represents a row in the UnwindTable for a FDE. The
/// address is the first address for which the CFA location and register rules
/// are valid within a function.
///
/// UnwindRow objects are created by parsing opcodes in the DWARF Call Frame
/// Information and UnwindRow objects are lazily populated and pushed onto a
/// stack in the UnwindTable when evaluating this state machine. Accessors are
/// needed for the address, CFA value, and register locations as the opcodes
/// encode a state machine that produces a sorted array of UnwindRow objects
/// \see UnwindTable.
class UnwindRow {
  /// The address will be valid when parsing the instructions in a FDE. If
  /// invalid, this object represents the initial instructions of a CIE.
  std::optional<uint64_t> Address; ///< Address for row in FDE, invalid for CIE.
  UnwindLocation CFAValue;    ///< How to unwind the Call Frame Address (CFA).
  RegisterLocations RegLocs;  ///< How to unwind all registers in this list.

public:
  UnwindRow() : CFAValue(UnwindLocation::createUnspecified()) {}

  /// Returns true if the address is valid in this object.
  bool hasAddress() const { return Address.has_value(); }

  /// Get the address for this row.
  ///
  /// Clients should only call this function after verifying it has a valid
  /// address with a call to \see hasAddress().
  uint64_t getAddress() const { return *Address; }

  /// Set the address for this UnwindRow.
  ///
  /// The address represents the first address for which the CFAValue and
  /// RegLocs are valid within a function.
  void setAddress(uint64_t Addr) { Address = Addr; }

  /// Offset the address for this UnwindRow.
  ///
  /// The address represents the first address for which the CFAValue and
  /// RegLocs are valid within a function. Clients must ensure that this object
  /// already has an address (\see hasAddress()) prior to calling this
  /// function.
  void slideAddress(uint64_t Offset) { *Address += Offset; }
  UnwindLocation &getCFAValue() { return CFAValue; }
  const UnwindLocation &getCFAValue() const { return CFAValue; }
  RegisterLocations &getRegisterLocations() { return RegLocs; }
  const RegisterLocations &getRegisterLocations() const { return RegLocs; }

  /// Dump the UnwindRow to the stream.
  ///
  /// \param OS the stream to use for output.
  ///
  /// \param MRI register information that helps emit register names insteead
  /// of raw register numbers.
  ///
  /// \param IsEH true if the DWARF Call Frame Information is from .eh_frame
  /// instead of from .debug_frame. This is needed for register number
  /// conversion because some register numbers differ between the two sections
  /// for certain architectures like x86.
  ///
  /// \param IndentLevel specify the indent level as an integer. The UnwindRow
  /// will be output to the stream preceded by 2 * IndentLevel number of spaces.
  void dump(raw_ostream &OS, DIDumpOptions DumpOpts,
            unsigned IndentLevel = 0) const;
};

raw_ostream &operator<<(raw_ostream &OS, const UnwindRow &Row);

class CFIProgram;
class CIE;
class FDE;

/// A class that contains all UnwindRow objects for an FDE or a single unwind
/// row for a CIE. To unwind an address the rows, which are sorted by start
/// address, can be searched to find the UnwindRow with the lowest starting
/// address that is greater than or equal to the address that is being looked
/// up.
class UnwindTable {
public:
  using RowContainer = std::vector<UnwindRow>;
  using iterator = RowContainer::iterator;
  using const_iterator = RowContainer::const_iterator;

  size_t size() const { return Rows.size(); }
  iterator begin() { return Rows.begin(); }
  const_iterator begin() const { return Rows.begin(); }
  iterator end() { return Rows.end(); }
  const_iterator end() const { return Rows.end(); }
  const UnwindRow &operator[](size_t Index) const {
    assert(Index < size());
    return Rows[Index];
  }

  /// Dump the UnwindTable to the stream.
  ///
  /// \param OS the stream to use for output.
  ///
  /// \param MRI register information that helps emit register names insteead
  /// of raw register numbers.
  ///
  /// \param IsEH true if the DWARF Call Frame Information is from .eh_frame
  /// instead of from .debug_frame. This is needed for register number
  /// conversion because some register numbers differ between the two sections
  /// for certain architectures like x86.
  ///
  /// \param IndentLevel specify the indent level as an integer. The UnwindRow
  /// will be output to the stream preceded by 2 * IndentLevel number of spaces.
  void dump(raw_ostream &OS, DIDumpOptions DumpOpts,
            unsigned IndentLevel = 0) const;

  /// Create an UnwindTable from a Common Information Entry (CIE).
  ///
  /// \param Cie The Common Information Entry to extract the table from. The
  /// CFIProgram is retrieved from the \a Cie object and used to create the
  /// UnwindTable.
  ///
  /// \returns An error if the DWARF Call Frame Information opcodes have state
  /// machine errors, or a valid UnwindTable otherwise.
  static Expected<UnwindTable> create(const CIE *Cie);

  /// Create an UnwindTable from a Frame Descriptor Entry (FDE).
  ///
  /// \param Fde The Frame Descriptor Entry to extract the table from. The
  /// CFIProgram is retrieved from the \a Fde object and used to create the
  /// UnwindTable.
  ///
  /// \returns An error if the DWARF Call Frame Information opcodes have state
  /// machine errors, or a valid UnwindTable otherwise.
  static Expected<UnwindTable> create(const FDE *Fde);

private:
  RowContainer Rows;
  /// The end address when data is extracted from a FDE. This value will be
  /// invalid when a UnwindTable is extracted from a CIE.
  std::optional<uint64_t> EndAddress;

  /// Parse the information in the CFIProgram and update the CurrRow object
  /// that the state machine describes.
  ///
  /// This is an internal implementation that emulates the state machine
  /// described in the DWARF Call Frame Information opcodes and will push
  /// CurrRow onto the Rows container when needed.
  ///
  /// \param CFIP the CFI program that contains the opcodes from a CIE or FDE.
  ///
  /// \param CurrRow the current row to modify while parsing the state machine.
  ///
  /// \param InitialLocs If non-NULL, we are parsing a FDE and this contains
  /// the initial register locations from the CIE. If NULL, then a CIE's
  /// opcodes are being parsed and this is not needed. This is used for the
  /// DW_CFA_restore and DW_CFA_restore_extended opcodes.
  Error parseRows(const CFIProgram &CFIP, UnwindRow &CurrRow,
                  const RegisterLocations *InitialLocs);
};

raw_ostream &operator<<(raw_ostream &OS, const UnwindTable &Rows);

/// Represent a sequence of Call Frame Information instructions that, when read
/// in order, construct a table mapping PC to frame state. This can also be
/// referred to as "CFI rules" in DWARF literature to avoid confusion with
/// computer programs in the broader sense, and in this context each instruction
/// would be a rule to establish the mapping. Refer to pg. 172 in the DWARF5
/// manual, "6.4.1 Structure of Call Frame Information".
class CFIProgram {
public:
  static constexpr size_t MaxOperands = 3;
  typedef SmallVector<uint64_t, MaxOperands> Operands;

  /// An instruction consists of a DWARF CFI opcode and an optional sequence of
  /// operands. If it refers to an expression, then this expression has its own
  /// sequence of operations and operands handled separately by DWARFExpression.
  struct Instruction {
    Instruction(uint8_t Opcode) : Opcode(Opcode) {}

    uint8_t Opcode;
    Operands Ops;
    // Associated DWARF expression in case this instruction refers to one
    std::optional<DWARFExpression> Expression;

    Expected<uint64_t> getOperandAsUnsigned(const CFIProgram &CFIP,
                                            uint32_t OperandIdx) const;

    Expected<int64_t> getOperandAsSigned(const CFIProgram &CFIP,
                                         uint32_t OperandIdx) const;
  };

  using InstrList = std::vector<Instruction>;
  using iterator = InstrList::iterator;
  using const_iterator = InstrList::const_iterator;

  iterator begin() { return Instructions.begin(); }
  const_iterator begin() const { return Instructions.begin(); }
  iterator end() { return Instructions.end(); }
  const_iterator end() const { return Instructions.end(); }

  unsigned size() const { return (unsigned)Instructions.size(); }
  bool empty() const { return Instructions.empty(); }
  uint64_t codeAlign() const { return CodeAlignmentFactor; }
  int64_t dataAlign() const { return DataAlignmentFactor; }
  Triple::ArchType triple() const { return Arch; }

  CFIProgram(uint64_t CodeAlignmentFactor, int64_t DataAlignmentFactor,
             Triple::ArchType Arch)
      : CodeAlignmentFactor(CodeAlignmentFactor),
        DataAlignmentFactor(DataAlignmentFactor),
        Arch(Arch) {}

  /// Parse and store a sequence of CFI instructions from Data,
  /// starting at *Offset and ending at EndOffset. *Offset is updated
  /// to EndOffset upon successful parsing, or indicates the offset
  /// where a problem occurred in case an error is returned.
  Error parse(DWARFDataExtractor Data, uint64_t *Offset, uint64_t EndOffset);

  void dump(raw_ostream &OS, DIDumpOptions DumpOpts,
            unsigned IndentLevel = 1) const;

  void addInstruction(const Instruction &I) { Instructions.push_back(I); }

  /// Get a DWARF CFI call frame string for the given DW_CFA opcode.
  StringRef callFrameString(unsigned Opcode) const;

private:
  std::vector<Instruction> Instructions;
  const uint64_t CodeAlignmentFactor;
  const int64_t DataAlignmentFactor;
  Triple::ArchType Arch;

  /// Convenience method to add a new instruction with the given opcode.
  void addInstruction(uint8_t Opcode) {
    Instructions.push_back(Instruction(Opcode));
  }

  /// Add a new single-operand instruction.
  void addInstruction(uint8_t Opcode, uint64_t Operand1) {
    Instructions.push_back(Instruction(Opcode));
    Instructions.back().Ops.push_back(Operand1);
  }

  /// Add a new instruction that has two operands.
  void addInstruction(uint8_t Opcode, uint64_t Operand1, uint64_t Operand2) {
    Instructions.push_back(Instruction(Opcode));
    Instructions.back().Ops.push_back(Operand1);
    Instructions.back().Ops.push_back(Operand2);
  }

  /// Add a new instruction that has three operands.
  void addInstruction(uint8_t Opcode, uint64_t Operand1, uint64_t Operand2,
                      uint64_t Operand3) {
    Instructions.push_back(Instruction(Opcode));
    Instructions.back().Ops.push_back(Operand1);
    Instructions.back().Ops.push_back(Operand2);
    Instructions.back().Ops.push_back(Operand3);
  }

  /// Types of operands to CFI instructions
  /// In DWARF, this type is implicitly tied to a CFI instruction opcode and
  /// thus this type doesn't need to be explictly written to the file (this is
  /// not a DWARF encoding). The relationship of instrs to operand types can
  /// be obtained from getOperandTypes() and is only used to simplify
  /// instruction printing.
  enum OperandType {
    OT_Unset,
    OT_None,
    OT_Address,
    OT_Offset,
    OT_FactoredCodeOffset,
    OT_SignedFactDataOffset,
    OT_UnsignedFactDataOffset,
    OT_Register,
    OT_AddressSpace,
    OT_Expression
  };

  /// Get the OperandType as a "const char *".
  static const char *operandTypeString(OperandType OT);

  /// Retrieve the array describing the types of operands according to the enum
  /// above. This is indexed by opcode.
  static ArrayRef<OperandType[MaxOperands]> getOperandTypes();

  /// Print \p Opcode's operand number \p OperandIdx which has value \p Operand.
  void printOperand(raw_ostream &OS, DIDumpOptions DumpOpts,
                    const Instruction &Instr, unsigned OperandIdx,
                    uint64_t Operand) const;
};

/// An entry in either debug_frame or eh_frame. This entry can be a CIE or an
/// FDE.
class FrameEntry {
public:
  enum FrameKind { FK_CIE, FK_FDE };

  FrameEntry(FrameKind K, bool IsDWARF64, uint64_t Offset, uint64_t Length,
             uint64_t CodeAlign, int64_t DataAlign, Triple::ArchType Arch)
      : Kind(K), IsDWARF64(IsDWARF64), Offset(Offset), Length(Length),
        CFIs(CodeAlign, DataAlign, Arch) {}

  virtual ~FrameEntry() = default;

  FrameKind getKind() const { return Kind; }
  uint64_t getOffset() const { return Offset; }
  uint64_t getLength() const { return Length; }
  const CFIProgram &cfis() const { return CFIs; }
  CFIProgram &cfis() { return CFIs; }

  /// Dump the instructions in this CFI fragment
  virtual void dump(raw_ostream &OS, DIDumpOptions DumpOpts) const = 0;

protected:
  const FrameKind Kind;

  const bool IsDWARF64;

  /// Offset of this entry in the section.
  const uint64_t Offset;

  /// Entry length as specified in DWARF.
  const uint64_t Length;

  CFIProgram CFIs;
};

/// DWARF Common Information Entry (CIE)
class CIE : public FrameEntry {
public:
  // CIEs (and FDEs) are simply container classes, so the only sensible way to
  // create them is by providing the full parsed contents in the constructor.
  CIE(bool IsDWARF64, uint64_t Offset, uint64_t Length, uint8_t Version,
      SmallString<8> Augmentation, uint8_t AddressSize,
      uint8_t SegmentDescriptorSize, uint64_t CodeAlignmentFactor,
      int64_t DataAlignmentFactor, uint64_t ReturnAddressRegister,
      SmallString<8> AugmentationData, uint32_t FDEPointerEncoding,
      uint32_t LSDAPointerEncoding, std::optional<uint64_t> Personality,
      std::optional<uint32_t> PersonalityEnc, Triple::ArchType Arch)
      : FrameEntry(FK_CIE, IsDWARF64, Offset, Length, CodeAlignmentFactor,
                   DataAlignmentFactor, Arch),
        Version(Version), Augmentation(std::move(Augmentation)),
        AddressSize(AddressSize), SegmentDescriptorSize(SegmentDescriptorSize),
        CodeAlignmentFactor(CodeAlignmentFactor),
        DataAlignmentFactor(DataAlignmentFactor),
        ReturnAddressRegister(ReturnAddressRegister),
        AugmentationData(std::move(AugmentationData)),
        FDEPointerEncoding(FDEPointerEncoding),
        LSDAPointerEncoding(LSDAPointerEncoding), Personality(Personality),
        PersonalityEnc(PersonalityEnc) {}

  static bool classof(const FrameEntry *FE) { return FE->getKind() == FK_CIE; }

  StringRef getAugmentationString() const { return Augmentation; }
  uint64_t getCodeAlignmentFactor() const { return CodeAlignmentFactor; }
  int64_t getDataAlignmentFactor() const { return DataAlignmentFactor; }
  uint8_t getVersion() const { return Version; }
  uint64_t getReturnAddressRegister() const { return ReturnAddressRegister; }
  std::optional<uint64_t> getPersonalityAddress() const { return Personality; }
  std::optional<uint32_t> getPersonalityEncoding() const {
    return PersonalityEnc;
  }

  StringRef getAugmentationData() const { return AugmentationData; }

  uint32_t getFDEPointerEncoding() const { return FDEPointerEncoding; }

  uint32_t getLSDAPointerEncoding() const { return LSDAPointerEncoding; }

  void dump(raw_ostream &OS, DIDumpOptions DumpOpts) const override;

private:
  /// The following fields are defined in section 6.4.1 of the DWARF standard v4
  const uint8_t Version;
  const SmallString<8> Augmentation;
  const uint8_t AddressSize;
  const uint8_t SegmentDescriptorSize;
  const uint64_t CodeAlignmentFactor;
  const int64_t DataAlignmentFactor;
  const uint64_t ReturnAddressRegister;

  // The following are used when the CIE represents an EH frame entry.
  const SmallString<8> AugmentationData;
  const uint32_t FDEPointerEncoding;
  const uint32_t LSDAPointerEncoding;
  const std::optional<uint64_t> Personality;
  const std::optional<uint32_t> PersonalityEnc;
};

/// DWARF Frame Description Entry (FDE)
class FDE : public FrameEntry {
public:
  FDE(bool IsDWARF64, uint64_t Offset, uint64_t Length, uint64_t CIEPointer,
      uint64_t InitialLocation, uint64_t AddressRange, CIE *Cie,
      std::optional<uint64_t> LSDAAddress, Triple::ArchType Arch)
      : FrameEntry(FK_FDE, IsDWARF64, Offset, Length,
                   Cie ? Cie->getCodeAlignmentFactor() : 0,
                   Cie ? Cie->getDataAlignmentFactor() : 0, Arch),
        CIEPointer(CIEPointer), InitialLocation(InitialLocation),
        AddressRange(AddressRange), LinkedCIE(Cie), LSDAAddress(LSDAAddress) {}

  ~FDE() override = default;

  const CIE *getLinkedCIE() const { return LinkedCIE; }
  uint64_t getCIEPointer() const { return CIEPointer; }
  uint64_t getInitialLocation() const { return InitialLocation; }
  uint64_t getAddressRange() const { return AddressRange; }
  std::optional<uint64_t> getLSDAAddress() const { return LSDAAddress; }

  void dump(raw_ostream &OS, DIDumpOptions DumpOpts) const override;

  static bool classof(const FrameEntry *FE) { return FE->getKind() == FK_FDE; }

private:
  /// The following fields are defined in section 6.4.1 of the DWARFv3 standard.
  /// Note that CIE pointers in EH FDEs, unlike DWARF FDEs, contain relative
  /// offsets to the linked CIEs. See the following link for more info:
  /// https://refspecs.linuxfoundation.org/LSB_5.0.0/LSB-Core-generic/LSB-Core-generic/ehframechpt.html
  const uint64_t CIEPointer;
  const uint64_t InitialLocation;
  const uint64_t AddressRange;
  const CIE *LinkedCIE;
  const std::optional<uint64_t> LSDAAddress;
};

} // end namespace dwarf

/// A parsed .debug_frame or .eh_frame section
class DWARFDebugFrame {
  const Triple::ArchType Arch;
  // True if this is parsing an eh_frame section.
  const bool IsEH;
  // Not zero for sane pointer values coming out of eh_frame
  const uint64_t EHFrameAddress;

  std::vector<std::unique_ptr<dwarf::FrameEntry>> Entries;
  using iterator = pointee_iterator<decltype(Entries)::const_iterator>;

  /// Return the entry at the given offset or nullptr.
  dwarf::FrameEntry *getEntryAtOffset(uint64_t Offset) const;

public:
  // If IsEH is true, assume it is a .eh_frame section. Otherwise,
  // it is a .debug_frame section. EHFrameAddress should be different
  // than zero for correct parsing of .eh_frame addresses when they
  // use a PC-relative encoding.
  DWARFDebugFrame(Triple::ArchType Arch,
                  bool IsEH = false, uint64_t EHFrameAddress = 0);
  ~DWARFDebugFrame();

  /// Dump the section data into the given stream.
  void dump(raw_ostream &OS, DIDumpOptions DumpOpts,
            std::optional<uint64_t> Offset) const;

  /// Parse the section from raw data. \p Data is assumed to contain the whole
  /// frame section contents to be parsed.
  Error parse(DWARFDataExtractor Data);

  /// Return whether the section has any entries.
  bool empty() const { return Entries.empty(); }

  /// DWARF Frame entries accessors
  iterator begin() const { return Entries.begin(); }
  iterator end() const { return Entries.end(); }
  iterator_range<iterator> entries() const {
    return iterator_range<iterator>(Entries.begin(), Entries.end());
  }

  uint64_t getEHFrameAddress() const { return EHFrameAddress; }
};

} // end namespace llvm

#endif // LLVM_DEBUGINFO_DWARF_DWARFDEBUGFRAME_H
PKhwFZ��3f$	$	"DebugInfo/DWARF/DWARFTypePrinter.hnu�[���//===- DWARFTypePrinter.h ---------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_DWARF_DWARFTYPEPRINTER_H
#define LLVM_DEBUGINFO_DWARF_DWARFTYPEPRINTER_H

#include "llvm/ADT/StringRef.h"
#include "llvm/BinaryFormat/Dwarf.h"
#include "llvm/DebugInfo/DWARF/DWARFDie.h"

#include <string>

namespace llvm {

class raw_ostream;

// FIXME: We should have pretty printers per language. Currently we print
// everything as if it was C++ and fall back to the TAG type name.
struct DWARFTypePrinter {
  raw_ostream &OS;
  bool Word = true;
  bool EndedWithTemplate = false;

  DWARFTypePrinter(raw_ostream &OS) : OS(OS) {}

  /// Dump the name encoded in the type tag.
  void appendTypeTagName(dwarf::Tag T);

  void appendArrayType(const DWARFDie &D);

  DWARFDie skipQualifiers(DWARFDie D);

  bool needsParens(DWARFDie D);

  void appendPointerLikeTypeBefore(DWARFDie D, DWARFDie Inner, StringRef Ptr);

  DWARFDie appendUnqualifiedNameBefore(DWARFDie D,
                                       std::string *OriginalFullName = nullptr);

  void appendUnqualifiedNameAfter(DWARFDie D, DWARFDie Inner,
                                  bool SkipFirstParamIfArtificial = false);
  void appendQualifiedName(DWARFDie D);
  DWARFDie appendQualifiedNameBefore(DWARFDie D);
  bool appendTemplateParameters(DWARFDie D, bool *FirstParameter = nullptr);
  void decomposeConstVolatile(DWARFDie &N, DWARFDie &T, DWARFDie &C,
                              DWARFDie &V);
  void appendConstVolatileQualifierAfter(DWARFDie N);
  void appendConstVolatileQualifierBefore(DWARFDie N);

  /// Recursively append the DIE type name when applicable.
  void appendUnqualifiedName(DWARFDie D,
                             std::string *OriginalFullName = nullptr);

  void appendSubroutineNameAfter(DWARFDie D, DWARFDie Inner,
                                 bool SkipFirstParamIfArtificial, bool Const,
                                 bool Volatile);
  void appendScopes(DWARFDie D);
};

} // namespace llvm

#endif // LLVM_DEBUGINFO_DWARF_DWARFTYPEPRINTER_H
PKhwFZ��d	d	%DebugInfo/DWARF/DWARFDebugInfoEntry.hnu�[���//===- DWARFDebugInfoEntry.h ------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_DWARF_DWARFDEBUGINFOENTRY_H
#define LLVM_DEBUGINFO_DWARF_DWARFDEBUGINFOENTRY_H

#include "llvm/BinaryFormat/Dwarf.h"
#include "llvm/DebugInfo/DWARF/DWARFAbbreviationDeclaration.h"
#include <cstdint>

namespace llvm {

class DWARFUnit;
class DWARFDataExtractor;

/// DWARFDebugInfoEntry - A DIE with only the minimum required data.
class DWARFDebugInfoEntry {
  /// Offset within the .debug_info of the start of this entry.
  uint64_t Offset = 0;

  /// Index of the parent die. UINT32_MAX if there is no parent.
  uint32_t ParentIdx = UINT32_MAX;

  /// Index of the sibling die. Zero if there is no sibling.
  uint32_t SiblingIdx = 0;

  const DWARFAbbreviationDeclaration *AbbrevDecl = nullptr;

public:
  DWARFDebugInfoEntry() = default;

  /// Extracts a debug info entry, which is a child of a given unit,
  /// starting at a given offset. If DIE can't be extracted, returns false and
  /// doesn't change OffsetPtr.
  /// High performance extraction should use this call.
  bool extractFast(const DWARFUnit &U, uint64_t *OffsetPtr,
                   const DWARFDataExtractor &DebugInfoData, uint64_t UEndOffset,
                   uint32_t ParentIdx);

  uint64_t getOffset() const { return Offset; }

  /// Returns index of the parent die.
  std::optional<uint32_t> getParentIdx() const {
    if (ParentIdx == UINT32_MAX)
      return std::nullopt;

    return ParentIdx;
  }

  /// Returns index of the sibling die.
  std::optional<uint32_t> getSiblingIdx() const {
    if (SiblingIdx == 0)
      return std::nullopt;

    return SiblingIdx;
  }

  /// Set index of sibling.
  void setSiblingIdx(uint32_t Idx) { SiblingIdx = Idx; }

  dwarf::Tag getTag() const {
    return AbbrevDecl ? AbbrevDecl->getTag() : dwarf::DW_TAG_null;
  }

  bool hasChildren() const { return AbbrevDecl && AbbrevDecl->hasChildren(); }

  const DWARFAbbreviationDeclaration *getAbbreviationDeclarationPtr() const {
    return AbbrevDecl;
  }
};

} // end namespace llvm

#endif // LLVM_DEBUGINFO_DWARF_DWARFDEBUGINFOENTRY_H
PKhwFZbw����.DebugInfo/DWARF/DWARFAbbreviationDeclaration.hnu�[���//===- DWARFAbbreviationDeclaration.h ---------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_DWARF_DWARFABBREVIATIONDECLARATION_H
#define LLVM_DEBUGINFO_DWARF_DWARFABBREVIATIONDECLARATION_H

#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/BinaryFormat/Dwarf.h"
#include "llvm/DebugInfo/DWARF/DWARFFormValue.h"
#include <cassert>
#include <cstddef>
#include <cstdint>

namespace llvm {

class DataExtractor;
class DWARFUnit;
class raw_ostream;

class DWARFAbbreviationDeclaration {
public:
  enum class ExtractState { Complete, MoreItems };
  struct AttributeSpec {
    AttributeSpec(dwarf::Attribute A, dwarf::Form F, int64_t Value)
        : Attr(A), Form(F), Value(Value) {
      assert(isImplicitConst());
    }
    AttributeSpec(dwarf::Attribute A, dwarf::Form F,
                  std::optional<uint8_t> ByteSize)
        : Attr(A), Form(F) {
      assert(!isImplicitConst());
      this->ByteSize.HasByteSize = ByteSize.has_value();
      if (this->ByteSize.HasByteSize)
        this->ByteSize.ByteSize = *ByteSize;
    }

    DWARFFormValue getFormValue() const {
      if (Form == dwarf::DW_FORM_implicit_const)
        return DWARFFormValue::createFromSValue(Form, getImplicitConstValue());

      return DWARFFormValue(Form);
    }

    dwarf::Attribute Attr;
    dwarf::Form Form;

  private:
    /// The following field is used for ByteSize for non-implicit_const
    /// attributes and as value for implicit_const ones, indicated by
    /// Form == DW_FORM_implicit_const.
    /// The following cases are distinguished:
    /// * Form != DW_FORM_implicit_const and HasByteSize is true:
    ///     ByteSize contains the fixed size in bytes for the Form in this
    ///     object.
    /// * Form != DW_FORM_implicit_const and HasByteSize is false:
    ///     byte size of Form either varies according to the DWARFUnit
    ///     that it is contained in or the value size varies and must be
    ///     decoded from the debug information in order to determine its size.
    /// * Form == DW_FORM_implicit_const:
    ///     Value contains value for the implicit_const attribute.
    struct ByteSizeStorage {
      bool HasByteSize;
      uint8_t ByteSize;
    };
    union {
      ByteSizeStorage ByteSize;
      int64_t Value;
    };

  public:
    bool isImplicitConst() const {
      return Form == dwarf::DW_FORM_implicit_const;
    }

    int64_t getImplicitConstValue() const {
      assert(isImplicitConst());
      return Value;
    }

    /// Get the fixed byte size of this Form if possible. This function might
    /// use the DWARFUnit to calculate the size of the Form, like for
    /// DW_AT_address and DW_AT_ref_addr, so this isn't just an accessor for
    /// the ByteSize member.
    std::optional<int64_t> getByteSize(const DWARFUnit &U) const;
  };
  using AttributeSpecVector = SmallVector<AttributeSpec, 8>;

  DWARFAbbreviationDeclaration();

  uint32_t getCode() const { return Code; }
  uint8_t getCodeByteSize() const { return CodeByteSize; }
  dwarf::Tag getTag() const { return Tag; }
  bool hasChildren() const { return HasChildren; }

  using attr_iterator_range =
      iterator_range<AttributeSpecVector::const_iterator>;

  attr_iterator_range attributes() const {
    return attr_iterator_range(AttributeSpecs.begin(), AttributeSpecs.end());
  }

  dwarf::Form getFormByIndex(uint32_t idx) const {
    assert(idx < AttributeSpecs.size());
    return AttributeSpecs[idx].Form;
  }

  size_t getNumAttributes() const {
    return AttributeSpecs.size();
  }

  dwarf::Attribute getAttrByIndex(uint32_t idx) const {
    assert(idx < AttributeSpecs.size());
    return AttributeSpecs[idx].Attr;
  }

  bool getAttrIsImplicitConstByIndex(uint32_t idx) const {
    assert(idx < AttributeSpecs.size());
    return AttributeSpecs[idx].isImplicitConst();
  }

  int64_t getAttrImplicitConstValueByIndex(uint32_t idx) const {
    assert(idx < AttributeSpecs.size());
    return AttributeSpecs[idx].getImplicitConstValue();
  }

  /// Get the index of the specified attribute.
  ///
  /// Searches the this abbreviation declaration for the index of the specified
  /// attribute.
  ///
  /// \param attr DWARF attribute to search for.
  /// \returns Optional index of the attribute if found, std::nullopt otherwise.
  std::optional<uint32_t> findAttributeIndex(dwarf::Attribute attr) const;

  /// Extract a DWARF form value from a DIE specified by DIE offset.
  ///
  /// Extract an attribute value for a DWARFUnit given the DIE offset and the
  /// attribute.
  ///
  /// \param DIEOffset the DIE offset that points to the ULEB128 abbreviation
  /// code in the .debug_info data.
  /// \param Attr DWARF attribute to search for.
  /// \param U the DWARFUnit the contains the DIE.
  /// \returns Optional DWARF form value if the attribute was extracted.
  std::optional<DWARFFormValue> getAttributeValue(const uint64_t DIEOffset,
                                                  const dwarf::Attribute Attr,
                                                  const DWARFUnit &U) const;

  /// Compute an offset from a DIE specified by DIE offset and attribute index.
  ///
  /// \param AttrIndex an index of DWARF attribute.
  /// \param DIEOffset the DIE offset that points to the ULEB128 abbreviation
  /// code in the .debug_info data.
  /// \param U the DWARFUnit the contains the DIE.
  /// \returns an offset of the attribute.
  uint64_t getAttributeOffsetFromIndex(uint32_t AttrIndex, uint64_t DIEOffset,
                                       const DWARFUnit &U) const;

  /// Extract a DWARF form value from a DIE speccified by attribute index and
  /// its offset.
  ///
  /// \param AttrIndex an index of DWARF attribute.
  /// \param Offset offset of the attribute.
  /// \param U the DWARFUnit the contains the DIE.
  /// \returns Optional DWARF form value if the attribute was extracted.
  std::optional<DWARFFormValue>
  getAttributeValueFromOffset(uint32_t AttrIndex, uint64_t Offset,
                              const DWARFUnit &U) const;

  llvm::Expected<ExtractState> extract(DataExtractor Data, uint64_t *OffsetPtr);
  void dump(raw_ostream &OS) const;

  // Return an optional byte size of all attribute data in this abbreviation
  // if a constant byte size can be calculated given a DWARFUnit. This allows
  // DWARF parsing to be faster as many DWARF DIEs have a fixed byte size.
  std::optional<size_t> getFixedAttributesByteSize(const DWARFUnit &U) const;

private:
  void clear();

  /// A helper structure that can quickly determine the size in bytes of an
  /// abbreviation declaration.
  struct FixedSizeInfo {
    /// The fixed byte size for fixed size forms.
    uint16_t NumBytes = 0;
    /// Number of DW_FORM_address forms in this abbrevation declaration.
    uint8_t NumAddrs = 0;
    /// Number of DW_FORM_ref_addr forms in this abbrevation declaration.
    uint8_t NumRefAddrs = 0;
    /// Number of 4 byte in DWARF32 and 8 byte in DWARF64 forms.
    uint8_t NumDwarfOffsets = 0;

    FixedSizeInfo() = default;

    /// Calculate the fixed size in bytes given a DWARFUnit.
    ///
    /// \param U the DWARFUnit to use when determing the byte size.
    /// \returns the size in bytes for all attribute data in this abbreviation.
    /// The returned size does not include bytes for the  ULEB128 abbreviation
    /// code
    size_t getByteSize(const DWARFUnit &U) const;
  };

  uint32_t Code;
  dwarf::Tag Tag;
  uint8_t CodeByteSize;
  bool HasChildren;
  AttributeSpecVector AttributeSpecs;
  /// If this abbreviation has a fixed byte size then FixedAttributeSize member
  /// variable below will have a value.
  std::optional<FixedSizeInfo> FixedAttributeSize;
};

} // end namespace llvm

#endif // LLVM_DEBUGINFO_DWARF_DWARFABBREVIATIONDECLARATION_H
PKhwFZo��g�g'DebugInfo/DWARF/DWARFAcceleratorTable.hnu�[���//===- DWARFAcceleratorTable.h ----------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_DWARF_DWARFACCELERATORTABLE_H
#define LLVM_DEBUGINFO_DWARF_DWARFACCELERATORTABLE_H

#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/BinaryFormat/Dwarf.h"
#include "llvm/DebugInfo/DWARF/DWARFDataExtractor.h"
#include "llvm/DebugInfo/DWARF/DWARFFormValue.h"
#include <cstdint>
#include <utility>

namespace llvm {

class raw_ostream;
class ScopedPrinter;

/// The accelerator tables are designed to allow efficient random access
/// (using a symbol name as a key) into debug info by providing an index of the
/// debug info DIEs. This class implements the common functionality of Apple and
/// DWARF 5 accelerator tables.
/// TODO: Generalize the rest of the AppleAcceleratorTable interface and move it
/// to this class.
class DWARFAcceleratorTable {
protected:
  DWARFDataExtractor AccelSection;
  DataExtractor StringSection;

public:
  /// An abstract class representing a single entry in the accelerator tables.
  class Entry {
  protected:
    SmallVector<DWARFFormValue, 3> Values;

    Entry() = default;

    // Make these protected so only (final) subclasses can be copied around.
    Entry(const Entry &) = default;
    Entry(Entry &&) = default;
    Entry &operator=(const Entry &) = default;
    Entry &operator=(Entry &&) = default;
    ~Entry() = default;


  public:
    /// Returns the Offset of the Compilation Unit associated with this
    /// Accelerator Entry or std::nullopt if the Compilation Unit offset is not
    /// recorded in this Accelerator Entry.
    virtual std::optional<uint64_t> getCUOffset() const = 0;

    /// Returns the Tag of the Debug Info Entry associated with this
    /// Accelerator Entry or std::nullopt if the Tag is not recorded in this
    /// Accelerator Entry.
    virtual std::optional<dwarf::Tag> getTag() const = 0;

    /// Returns the raw values of fields in the Accelerator Entry. In general,
    /// these can only be interpreted with the help of the metadata in the
    /// owning Accelerator Table.
    ArrayRef<DWARFFormValue> getValues() const { return Values; }
  };

  DWARFAcceleratorTable(const DWARFDataExtractor &AccelSection,
                        DataExtractor StringSection)
      : AccelSection(AccelSection), StringSection(StringSection) {}
  virtual ~DWARFAcceleratorTable();

  virtual Error extract() = 0;
  virtual void dump(raw_ostream &OS) const = 0;

  DWARFAcceleratorTable(const DWARFAcceleratorTable &) = delete;
  void operator=(const DWARFAcceleratorTable &) = delete;
};

/// This implements the Apple accelerator table format, a precursor of the
/// DWARF 5 accelerator table format.
class AppleAcceleratorTable : public DWARFAcceleratorTable {
  struct Header {
    uint32_t Magic;
    uint16_t Version;
    uint16_t HashFunction;
    uint32_t BucketCount;
    uint32_t HashCount;
    uint32_t HeaderDataLength;

    void dump(ScopedPrinter &W) const;
  };

  struct HeaderData {
    using AtomType = uint16_t;
    using Form = dwarf::Form;

    uint64_t DIEOffsetBase;
    SmallVector<std::pair<AtomType, Form>, 3> Atoms;

    std::optional<uint64_t>
    extractOffset(std::optional<DWARFFormValue> Value) const;
  };

  Header Hdr;
  HeaderData HdrData;
  dwarf::FormParams FormParams;
  uint32_t HashDataEntryLength;
  bool IsValid = false;

  /// Returns true if we should continue scanning for entries or false if we've
  /// reached the last (sentinel) entry of encountered a parsing error.
  bool dumpName(ScopedPrinter &W, SmallVectorImpl<DWARFFormValue> &AtomForms,
                uint64_t *DataOffset) const;

  /// Reads an uint32_t from the accelerator table at Offset, which is
  /// incremented by the number of bytes read.
  std::optional<uint32_t> readU32FromAccel(uint64_t &Offset,
                                           bool UseRelocation = false) const;

  /// Reads a StringRef from the string table at Offset.
  std::optional<StringRef>
  readStringFromStrSection(uint64_t StringSectionOffset) const;

  /// Return the offset into the section where the Buckets begin.
  uint64_t getBucketBase() const { return sizeof(Hdr) + Hdr.HeaderDataLength; }

  /// Return the offset into the section where the I-th bucket is.
  uint64_t getIthBucketBase(uint32_t I) const {
    return getBucketBase() + I * 4;
  }

  /// Return the offset into the section where the hash list begins.
  uint64_t getHashBase() const { return getBucketBase() + getNumBuckets() * 4; }

  /// Return the offset into the section where the I-th hash is.
  uint64_t getIthHashBase(uint32_t I) const { return getHashBase() + I * 4; }

  /// Return the offset into the section where the offset list begins.
  uint64_t getOffsetBase() const { return getHashBase() + getNumHashes() * 4; }

  /// Return the offset into the section where the table entries begin.
  uint64_t getEntriesBase() const {
    return getOffsetBase() + getNumHashes() * 4;
  }

  /// Return the offset into the section where the I-th offset is.
  uint64_t getIthOffsetBase(uint32_t I) const {
    return getOffsetBase() + I * 4;
  }

  /// Returns the index of the bucket where a hypothetical Hash would be.
  uint32_t hashToBucketIdx(uint32_t Hash) const {
    return Hash % getNumBuckets();
  }

  /// Returns true iff a hypothetical Hash would be assigned to the BucketIdx-th
  /// bucket.
  bool wouldHashBeInBucket(uint32_t Hash, uint32_t BucketIdx) const {
    return hashToBucketIdx(Hash) == BucketIdx;
  }

  /// Reads the contents of the I-th bucket, that is, the index in the hash list
  /// where the hashes corresponding to this bucket begin.
  std::optional<uint32_t> readIthBucket(uint32_t I) const {
    uint64_t Offset = getIthBucketBase(I);
    return readU32FromAccel(Offset);
  }

  /// Reads the I-th hash in the hash list.
  std::optional<uint32_t> readIthHash(uint32_t I) const {
    uint64_t Offset = getIthHashBase(I);
    return readU32FromAccel(Offset);
  }

  /// Reads the I-th offset in the offset list.
  std::optional<uint32_t> readIthOffset(uint32_t I) const {
    uint64_t Offset = getIthOffsetBase(I);
    return readU32FromAccel(Offset);
  }

  /// Reads a string offset from the accelerator table at Offset, which is
  /// incremented by the number of bytes read.
  std::optional<uint32_t> readStringOffsetAt(uint64_t &Offset) const {
    return readU32FromAccel(Offset, /*UseRelocation*/ true);
  }

  /// Scans through all Hashes in the BucketIdx-th bucket, attempting to find
  /// HashToFind. If it is found, its index in the list of hashes is returned.
  std::optional<uint32_t> idxOfHashInBucket(uint32_t HashToFind,
                                            uint32_t BucketIdx) const;

public:
  /// Apple-specific implementation of an Accelerator Entry.
  class Entry final : public DWARFAcceleratorTable::Entry {
    const AppleAcceleratorTable &Table;

    Entry(const AppleAcceleratorTable &Table);
    void extract(uint64_t *Offset);

  public:
    std::optional<uint64_t> getCUOffset() const override;

    /// Returns the Section Offset of the Debug Info Entry associated with this
    /// Accelerator Entry or std::nullopt if the DIE offset is not recorded in
    /// this Accelerator Entry. The returned offset is relative to the start of
    /// the Section containing the DIE.
    std::optional<uint64_t> getDIESectionOffset() const;

    std::optional<dwarf::Tag> getTag() const override;

    /// Returns the value of the Atom in this Accelerator Entry, if the Entry
    /// contains such Atom.
    std::optional<DWARFFormValue> lookup(HeaderData::AtomType Atom) const;

    friend class AppleAcceleratorTable;
    friend class ValueIterator;
  };

  /// An iterator for Entries all having the same string as key.
  class SameNameIterator
      : public iterator_facade_base<SameNameIterator, std::forward_iterator_tag,
                                    Entry> {
    Entry Current;
    uint64_t Offset = 0;

  public:
    /// Construct a new iterator for the entries at \p DataOffset.
    SameNameIterator(const AppleAcceleratorTable &AccelTable,
                     uint64_t DataOffset);

    const Entry &operator*() {
      uint64_t OffsetCopy = Offset;
      Current.extract(&OffsetCopy);
      return Current;
    }
    SameNameIterator &operator++() {
      Offset += Current.Table.getHashDataEntryLength();
      return *this;
    }
    friend bool operator==(const SameNameIterator &A,
                           const SameNameIterator &B) {
      return A.Offset == B.Offset;
    }
  };

  struct EntryWithName {
    EntryWithName(const AppleAcceleratorTable &Table)
        : BaseEntry(Table), StrOffset(0) {}

    std::optional<StringRef> readName() const {
      return BaseEntry.Table.readStringFromStrSection(StrOffset);
    }

    Entry BaseEntry;
    uint32_t StrOffset;
  };

  /// An iterator for all entries in the table.
  class Iterator
      : public iterator_facade_base<Iterator, std::forward_iterator_tag,
                                    EntryWithName> {
    constexpr static auto EndMarker = std::numeric_limits<uint64_t>::max();

    EntryWithName Current;
    uint64_t Offset = EndMarker;
    uint32_t NumEntriesToCome = 0;

    void setToEnd() { Offset = EndMarker; }
    bool isEnd() const { return Offset == EndMarker; }
    const AppleAcceleratorTable &getTable() const {
      return Current.BaseEntry.Table;
    }

    /// Reads the next Entry in the table, populating `Current`.
    /// If not possible (e.g. end of the section), becomes the end iterator.
    void prepareNextEntryOrEnd();

    /// Reads the next string pointer and the entry count for that string,
    /// populating `NumEntriesToCome`.
    /// If not possible (e.g. end of the section), becomes the end iterator.
    /// Assumes `Offset` points to a string reference.
    void prepareNextStringOrEnd();

  public:
    Iterator(const AppleAcceleratorTable &Table, bool SetEnd = false);

    Iterator &operator++() {
      prepareNextEntryOrEnd();
      return *this;
    }
    bool operator==(const Iterator &It) const { return Offset == It.Offset; }
    const EntryWithName &operator*() const {
      assert(!isEnd() && "dereferencing end iterator");
      return Current;
    }
  };

  AppleAcceleratorTable(const DWARFDataExtractor &AccelSection,
                        DataExtractor StringSection)
      : DWARFAcceleratorTable(AccelSection, StringSection) {}

  Error extract() override;
  uint32_t getNumBuckets() const;
  uint32_t getNumHashes() const;
  uint32_t getSizeHdr() const;
  uint32_t getHeaderDataLength() const;

  /// Returns the size of one HashData entry.
  uint32_t getHashDataEntryLength() const { return HashDataEntryLength; }

  /// Return the Atom description, which can be used to interpret the raw values
  /// of the Accelerator Entries in this table.
  ArrayRef<std::pair<HeaderData::AtomType, HeaderData::Form>> getAtomsDesc();

  /// Returns true iff `AtomTy` is one of the atoms available in Entries of this
  /// table.
  bool containsAtomType(HeaderData::AtomType AtomTy) const {
    return is_contained(make_first_range(HdrData.Atoms), AtomTy);
  }

  bool validateForms();

  /// Return information related to the DWARF DIE we're looking for when
  /// performing a lookup by name.
  ///
  /// \param HashDataOffset an offset into the hash data table
  /// \returns <DieOffset, DieTag>
  /// DieOffset is the offset into the .debug_info section for the DIE
  /// related to the input hash data offset.
  /// DieTag is the tag of the DIE
  std::pair<uint64_t, dwarf::Tag> readAtoms(uint64_t *HashDataOffset);
  void dump(raw_ostream &OS) const override;

  /// Look up all entries in the accelerator table matching \c Key.
  iterator_range<SameNameIterator> equal_range(StringRef Key) const;

  /// Lookup all entries in the accelerator table.
  auto entries() const {
    return make_range(Iterator(*this), Iterator(*this, /*SetEnd*/ true));
  }
};

/// .debug_names section consists of one or more units. Each unit starts with a
/// header, which is followed by a list of compilation units, local and foreign
/// type units.
///
/// These may be followed by an (optional) hash lookup table, which consists of
/// an array of buckets and hashes similar to the apple tables above. The only
/// difference is that the hashes array is 1-based, and consequently an empty
/// bucket is denoted by 0 and not UINT32_MAX.
///
/// Next is the name table, which consists of an array of names and array of
/// entry offsets. This is different from the apple tables, which store names
/// next to the actual entries.
///
/// The structure of the entries is described by an abbreviations table, which
/// comes after the name table. Unlike the apple tables, which have a uniform
/// entry structure described in the header, each .debug_names entry may have
/// different index attributes (DW_IDX_???) attached to it.
///
/// The last segment consists of a list of entries, which is a 0-terminated list
/// referenced by the name table and interpreted with the help of the
/// abbreviation table.
class DWARFDebugNames : public DWARFAcceleratorTable {
public:
  class NameIndex;
  class NameIterator;
  class ValueIterator;

  /// DWARF v5 Name Index header.
  struct Header {
    uint64_t UnitLength;
    dwarf::DwarfFormat Format;
    uint16_t Version;
    uint32_t CompUnitCount;
    uint32_t LocalTypeUnitCount;
    uint32_t ForeignTypeUnitCount;
    uint32_t BucketCount;
    uint32_t NameCount;
    uint32_t AbbrevTableSize;
    uint32_t AugmentationStringSize;
    SmallString<8> AugmentationString;

    Error extract(const DWARFDataExtractor &AS, uint64_t *Offset);
    void dump(ScopedPrinter &W) const;
  };

  /// Index attribute and its encoding.
  struct AttributeEncoding {
    dwarf::Index Index;
    dwarf::Form Form;

    constexpr AttributeEncoding(dwarf::Index Index, dwarf::Form Form)
        : Index(Index), Form(Form) {}

    friend bool operator==(const AttributeEncoding &LHS,
                           const AttributeEncoding &RHS) {
      return LHS.Index == RHS.Index && LHS.Form == RHS.Form;
    }
  };

  /// Abbreviation describing the encoding of Name Index entries.
  struct Abbrev {
    uint32_t Code;  ///< Abbreviation code
    dwarf::Tag Tag; ///< Dwarf Tag of the described entity.
    std::vector<AttributeEncoding> Attributes; ///< List of index attributes.

    Abbrev(uint32_t Code, dwarf::Tag Tag,
           std::vector<AttributeEncoding> Attributes)
        : Code(Code), Tag(Tag), Attributes(std::move(Attributes)) {}

    void dump(ScopedPrinter &W) const;
  };

  /// DWARF v5-specific implementation of an Accelerator Entry.
  class Entry final : public DWARFAcceleratorTable::Entry {
    const NameIndex *NameIdx;
    const Abbrev *Abbr;

    Entry(const NameIndex &NameIdx, const Abbrev &Abbr);

  public:
    std::optional<uint64_t> getCUOffset() const override;
    std::optional<dwarf::Tag> getTag() const override { return tag(); }

    /// Returns the Index into the Compilation Unit list of the owning Name
    /// Index or std::nullopt if this Accelerator Entry does not have an
    /// associated Compilation Unit. It is up to the user to verify that the
    /// returned Index is valid in the owning NameIndex (or use getCUOffset(),
    /// which will handle that check itself). Note that entries in NameIndexes
    /// which index just a single Compilation Unit are implicitly associated
    /// with that unit, so this function will return 0 even without an explicit
    /// DW_IDX_compile_unit attribute.
    std::optional<uint64_t> getCUIndex() const;

    /// .debug_names-specific getter, which always succeeds (DWARF v5 index
    /// entries always have a tag).
    dwarf::Tag tag() const { return Abbr->Tag; }

    /// Returns the Offset of the DIE within the containing CU or TU.
    std::optional<uint64_t> getDIEUnitOffset() const;

    /// Return the Abbreviation that can be used to interpret the raw values of
    /// this Accelerator Entry.
    const Abbrev &getAbbrev() const { return *Abbr; }

    /// Returns the value of the Index Attribute in this Accelerator Entry, if
    /// the Entry contains such Attribute.
    std::optional<DWARFFormValue> lookup(dwarf::Index Index) const;

    void dump(ScopedPrinter &W) const;

    friend class NameIndex;
    friend class ValueIterator;
  };

  /// Error returned by NameIndex::getEntry to report it has reached the end of
  /// the entry list.
  class SentinelError : public ErrorInfo<SentinelError> {
  public:
    static char ID;

    void log(raw_ostream &OS) const override { OS << "Sentinel"; }
    std::error_code convertToErrorCode() const override;
  };

private:
  /// DenseMapInfo for struct Abbrev.
  struct AbbrevMapInfo {
    static Abbrev getEmptyKey();
    static Abbrev getTombstoneKey();
    static unsigned getHashValue(uint32_t Code) {
      return DenseMapInfo<uint32_t>::getHashValue(Code);
    }
    static unsigned getHashValue(const Abbrev &Abbr) {
      return getHashValue(Abbr.Code);
    }
    static bool isEqual(uint32_t LHS, const Abbrev &RHS) {
      return LHS == RHS.Code;
    }
    static bool isEqual(const Abbrev &LHS, const Abbrev &RHS) {
      return LHS.Code == RHS.Code;
    }
  };

public:
  /// A single entry in the Name Table (DWARF v5 sect. 6.1.1.4.6) of the Name
  /// Index.
  class NameTableEntry {
    DataExtractor StrData;

    uint32_t Index;
    uint64_t StringOffset;
    uint64_t EntryOffset;

  public:
    NameTableEntry(const DataExtractor &StrData, uint32_t Index,
                   uint64_t StringOffset, uint64_t EntryOffset)
        : StrData(StrData), Index(Index), StringOffset(StringOffset),
          EntryOffset(EntryOffset) {}

    /// Return the index of this name in the parent Name Index.
    uint32_t getIndex() const { return Index; }

    /// Returns the offset of the name of the described entities.
    uint64_t getStringOffset() const { return StringOffset; }

    /// Return the string referenced by this name table entry or nullptr if the
    /// string offset is not valid.
    const char *getString() const {
      uint64_t Off = StringOffset;
      return StrData.getCStr(&Off);
    }

    /// Returns the offset of the first Entry in the list.
    uint64_t getEntryOffset() const { return EntryOffset; }
  };

  /// Represents a single accelerator table within the DWARF v5 .debug_names
  /// section.
  class NameIndex {
    DenseSet<Abbrev, AbbrevMapInfo> Abbrevs;
    struct Header Hdr;
    const DWARFDebugNames &Section;

    // Base of the whole unit and of various important tables, as offsets from
    // the start of the section.
    uint64_t Base;
    uint64_t CUsBase;
    uint64_t BucketsBase;
    uint64_t HashesBase;
    uint64_t StringOffsetsBase;
    uint64_t EntryOffsetsBase;
    uint64_t EntriesBase;

    void dumpCUs(ScopedPrinter &W) const;
    void dumpLocalTUs(ScopedPrinter &W) const;
    void dumpForeignTUs(ScopedPrinter &W) const;
    void dumpAbbreviations(ScopedPrinter &W) const;
    bool dumpEntry(ScopedPrinter &W, uint64_t *Offset) const;
    void dumpName(ScopedPrinter &W, const NameTableEntry &NTE,
                  std::optional<uint32_t> Hash) const;
    void dumpBucket(ScopedPrinter &W, uint32_t Bucket) const;

    Expected<AttributeEncoding> extractAttributeEncoding(uint64_t *Offset);

    Expected<std::vector<AttributeEncoding>>
    extractAttributeEncodings(uint64_t *Offset);

    Expected<Abbrev> extractAbbrev(uint64_t *Offset);

  public:
    NameIndex(const DWARFDebugNames &Section, uint64_t Base)
        : Section(Section), Base(Base) {}

    /// Reads offset of compilation unit CU. CU is 0-based.
    uint64_t getCUOffset(uint32_t CU) const;
    uint32_t getCUCount() const { return Hdr.CompUnitCount; }

    /// Reads offset of local type unit TU, TU is 0-based.
    uint64_t getLocalTUOffset(uint32_t TU) const;
    uint32_t getLocalTUCount() const { return Hdr.LocalTypeUnitCount; }

    /// Reads signature of foreign type unit TU. TU is 0-based.
    uint64_t getForeignTUSignature(uint32_t TU) const;
    uint32_t getForeignTUCount() const { return Hdr.ForeignTypeUnitCount; }

    /// Reads an entry in the Bucket Array for the given Bucket. The returned
    /// value is a (1-based) index into the Names, StringOffsets and
    /// EntryOffsets arrays. The input Bucket index is 0-based.
    uint32_t getBucketArrayEntry(uint32_t Bucket) const;
    uint32_t getBucketCount() const { return Hdr.BucketCount; }

    /// Reads an entry in the Hash Array for the given Index. The input Index
    /// is 1-based.
    uint32_t getHashArrayEntry(uint32_t Index) const;

    /// Reads an entry in the Name Table for the given Index. The Name Table
    /// consists of two arrays -- String Offsets and Entry Offsets. The returned
    /// offsets are relative to the starts of respective sections. Input Index
    /// is 1-based.
    NameTableEntry getNameTableEntry(uint32_t Index) const;

    uint32_t getNameCount() const { return Hdr.NameCount; }

    const DenseSet<Abbrev, AbbrevMapInfo> &getAbbrevs() const {
      return Abbrevs;
    }

    Expected<Entry> getEntry(uint64_t *Offset) const;

    /// Look up all entries in this Name Index matching \c Key.
    iterator_range<ValueIterator> equal_range(StringRef Key) const;

    NameIterator begin() const { return NameIterator(this, 1); }
    NameIterator end() const { return NameIterator(this, getNameCount() + 1); }

    Error extract();
    uint64_t getUnitOffset() const { return Base; }
    uint64_t getNextUnitOffset() const {
      return Base + dwarf::getUnitLengthFieldByteSize(Hdr.Format) +
             Hdr.UnitLength;
    }
    void dump(ScopedPrinter &W) const;

    friend class DWARFDebugNames;
  };

  class ValueIterator {
  public:
    using iterator_category = std::input_iterator_tag;
    using value_type = Entry;
    using difference_type = std::ptrdiff_t;
    using pointer = value_type *;
    using reference = value_type &;

  private:
    /// The Name Index we are currently iterating through. The implementation
    /// relies on the fact that this can also be used as an iterator into the
    /// "NameIndices" vector in the Accelerator section.
    const NameIndex *CurrentIndex = nullptr;

    /// Whether this is a local iterator (searches in CurrentIndex only) or not
    /// (searches all name indices).
    bool IsLocal;

    std::optional<Entry> CurrentEntry;
    uint64_t DataOffset = 0; ///< Offset into the section.
    std::string Key;         ///< The Key we are searching for.
    std::optional<uint32_t> Hash; ///< Hash of Key, if it has been computed.

    bool getEntryAtCurrentOffset();
    std::optional<uint64_t> findEntryOffsetInCurrentIndex();
    bool findInCurrentIndex();
    void searchFromStartOfCurrentIndex();
    void next();

    /// Set the iterator to the "end" state.
    void setEnd() { *this = ValueIterator(); }

  public:
    /// Create a "begin" iterator for looping over all entries in the
    /// accelerator table matching Key. The iterator will run through all Name
    /// Indexes in the section in sequence.
    ValueIterator(const DWARFDebugNames &AccelTable, StringRef Key);

    /// Create a "begin" iterator for looping over all entries in a specific
    /// Name Index. Other indices in the section will not be visited.
    ValueIterator(const NameIndex &NI, StringRef Key);

    /// End marker.
    ValueIterator() = default;

    const Entry &operator*() const { return *CurrentEntry; }
    ValueIterator &operator++() {
      next();
      return *this;
    }
    ValueIterator operator++(int) {
      ValueIterator I = *this;
      next();
      return I;
    }

    friend bool operator==(const ValueIterator &A, const ValueIterator &B) {
      return A.CurrentIndex == B.CurrentIndex && A.DataOffset == B.DataOffset;
    }
    friend bool operator!=(const ValueIterator &A, const ValueIterator &B) {
      return !(A == B);
    }
  };

  class NameIterator {

    /// The Name Index we are iterating through.
    const NameIndex *CurrentIndex;

    /// The current name in the Name Index.
    uint32_t CurrentName;

    void next() {
      assert(CurrentName <= CurrentIndex->getNameCount());
      ++CurrentName;
    }

  public:
    using iterator_category = std::input_iterator_tag;
    using value_type = NameTableEntry;
    using difference_type = uint32_t;
    using pointer = NameTableEntry *;
    using reference = NameTableEntry; // We return entries by value.

    /// Creates an iterator whose initial position is name CurrentName in
    /// CurrentIndex.
    NameIterator(const NameIndex *CurrentIndex, uint32_t CurrentName)
        : CurrentIndex(CurrentIndex), CurrentName(CurrentName) {}

    NameTableEntry operator*() const {
      return CurrentIndex->getNameTableEntry(CurrentName);
    }
    NameIterator &operator++() {
      next();
      return *this;
    }
    NameIterator operator++(int) {
      NameIterator I = *this;
      next();
      return I;
    }

    friend bool operator==(const NameIterator &A, const NameIterator &B) {
      return A.CurrentIndex == B.CurrentIndex && A.CurrentName == B.CurrentName;
    }
    friend bool operator!=(const NameIterator &A, const NameIterator &B) {
      return !(A == B);
    }
  };

private:
  SmallVector<NameIndex, 0> NameIndices;
  DenseMap<uint64_t, const NameIndex *> CUToNameIndex;

public:
  DWARFDebugNames(const DWARFDataExtractor &AccelSection,
                  DataExtractor StringSection)
      : DWARFAcceleratorTable(AccelSection, StringSection) {}

  Error extract() override;
  void dump(raw_ostream &OS) const override;

  /// Look up all entries in the accelerator table matching \c Key.
  iterator_range<ValueIterator> equal_range(StringRef Key) const;

  using const_iterator = SmallVector<NameIndex, 0>::const_iterator;
  const_iterator begin() const { return NameIndices.begin(); }
  const_iterator end() const { return NameIndices.end(); }

  /// Return the Name Index covering the compile unit at CUOffset, or nullptr if
  /// there is no Name Index covering that unit.
  const NameIndex *getCUNameIndex(uint64_t CUOffset);
};

} // end namespace llvm

#endif // LLVM_DEBUGINFO_DWARF_DWARFACCELERATORTABLE_H
PKhwFZ�J���$DebugInfo/DWARF/DWARFDataExtractor.hnu�[���//===- DWARFDataExtractor.h -------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_DWARF_DWARFDATAEXTRACTOR_H
#define LLVM_DEBUGINFO_DWARF_DWARFDATAEXTRACTOR_H

#include "llvm/BinaryFormat/Dwarf.h"
#include "llvm/DebugInfo/DWARF/DWARFSection.h"
#include "llvm/Support/DataExtractor.h"

namespace llvm {
class DWARFObject;

/// A DataExtractor (typically for an in-memory copy of an object-file section)
/// plus a relocation map for that section, if there is one.
class DWARFDataExtractor : public DataExtractor {
  const DWARFObject *Obj = nullptr;
  const DWARFSection *Section = nullptr;

public:
  /// Constructor for the normal case of extracting data from a DWARF section.
  /// The DWARFSection's lifetime must be at least as long as the extractor's.
  DWARFDataExtractor(const DWARFObject &Obj, const DWARFSection &Section,
                     bool IsLittleEndian, uint8_t AddressSize)
      : DataExtractor(Section.Data, IsLittleEndian, AddressSize), Obj(&Obj),
        Section(&Section) {}

  /// Constructor for cases when there are no relocations.
  DWARFDataExtractor(StringRef Data, bool IsLittleEndian, uint8_t AddressSize)
    : DataExtractor(Data, IsLittleEndian, AddressSize) {}
  DWARFDataExtractor(ArrayRef<uint8_t> Data, bool IsLittleEndian,
                     uint8_t AddressSize)
      : DataExtractor(
            StringRef(reinterpret_cast<const char *>(Data.data()), Data.size()),
            IsLittleEndian, AddressSize) {}

  /// Truncating constructor
  DWARFDataExtractor(const DWARFDataExtractor &Other, size_t Length)
      : DataExtractor(Other.getData().substr(0, Length), Other.isLittleEndian(),
                      Other.getAddressSize()),
        Obj(Other.Obj), Section(Other.Section) {}

  /// Extracts the DWARF "initial length" field, which can either be a 32-bit
  /// value smaller than 0xfffffff0, or the value 0xffffffff followed by a
  /// 64-bit length. Returns the actual length, and the DWARF format which is
  /// encoded in the field. In case of errors, it returns {0, DWARF32} and
  /// leaves the offset unchanged.
  std::pair<uint64_t, dwarf::DwarfFormat>
  getInitialLength(uint64_t *Off, Error *Err = nullptr) const;

  std::pair<uint64_t, dwarf::DwarfFormat> getInitialLength(Cursor &C) const {
    return getInitialLength(&getOffset(C), &getError(C));
  }

  /// Extracts a value and applies a relocation to the result if
  /// one exists for the given offset.
  uint64_t getRelocatedValue(uint32_t Size, uint64_t *Off,
                             uint64_t *SectionIndex = nullptr,
                             Error *Err = nullptr) const;
  uint64_t getRelocatedValue(Cursor &C, uint32_t Size,
                             uint64_t *SectionIndex = nullptr) const {
    return getRelocatedValue(Size, &getOffset(C), SectionIndex, &getError(C));
  }

  /// Extracts an address-sized value and applies a relocation to the result if
  /// one exists for the given offset.
  uint64_t getRelocatedAddress(uint64_t *Off, uint64_t *SecIx = nullptr) const {
    return getRelocatedValue(getAddressSize(), Off, SecIx);
  }
  uint64_t getRelocatedAddress(Cursor &C, uint64_t *SecIx = nullptr) const {
    return getRelocatedValue(getAddressSize(), &getOffset(C), SecIx,
                             &getError(C));
  }

  /// Extracts a DWARF-encoded pointer in \p Offset using \p Encoding.
  /// There is a DWARF encoding that uses a PC-relative adjustment.
  /// For these values, \p AbsPosOffset is used to fix them, which should
  /// reflect the absolute address of this pointer.
  std::optional<uint64_t> getEncodedPointer(uint64_t *Offset, uint8_t Encoding,
                                            uint64_t AbsPosOffset = 0) const;
};

} // end namespace llvm

#endif // LLVM_DEBUGINFO_DWARF_DWARFDATAEXTRACTOR_H
PKhwFZ>�1I��!DebugInfo/DWARF/DWARFExpression.hnu�[���//===--- DWARFExpression.h - DWARF Expression handling ----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_DWARF_DWARFEXPRESSION_H
#define LLVM_DEBUGINFO_DWARF_DWARFEXPRESSION_H

#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator.h"
#include "llvm/BinaryFormat/Dwarf.h"
#include "llvm/Support/DataExtractor.h"

namespace llvm {
class DWARFUnit;
struct DIDumpOptions;
class MCRegisterInfo;
class raw_ostream;

class DWARFExpression {
public:
  class iterator;

  /// This class represents an Operation in the Expression.
  ///
  /// An Operation can be in Error state (check with isError()). This
  /// means that it couldn't be decoded successfully and if it is the
  /// case, all others fields contain undefined values.
  class Operation {
  public:
    /// Size and signedness of expression operations' operands.
    enum Encoding : uint8_t {
      Size1 = 0,
      Size2 = 1,
      Size4 = 2,
      Size8 = 3,
      SizeLEB = 4,
      SizeAddr = 5,
      SizeRefAddr = 6,
      SizeBlock = 7, ///< Preceding operand contains block size
      BaseTypeRef = 8,
      /// The operand is a ULEB128 encoded SubOpcode. This is only valid
      /// for the first operand of an operation.
      SizeSubOpLEB = 9,
      WasmLocationArg = 30,
      SignBit = 0x80,
      SignedSize1 = SignBit | Size1,
      SignedSize2 = SignBit | Size2,
      SignedSize4 = SignBit | Size4,
      SignedSize8 = SignBit | Size8,
      SignedSizeLEB = SignBit | SizeLEB,
    };

    enum DwarfVersion : uint8_t {
      DwarfNA, ///< Serves as a marker for unused entries
      Dwarf2 = 2,
      Dwarf3,
      Dwarf4,
      Dwarf5
    };

    /// Description of the encoding of one expression Op.
    struct Description {
      DwarfVersion Version; ///< Dwarf version where the Op was introduced.
      SmallVector<Encoding> Op; ///< Encoding for Op operands.

      template <typename... Ts>
      Description(DwarfVersion Version, Ts... Op)
          : Version(Version), Op{Op...} {}
      Description() : Description(DwarfNA) {}
      ~Description() = default;
    };

  private:
    friend class DWARFExpression::iterator;
    uint8_t Opcode; ///< The Op Opcode, DW_OP_<something>.
    Description Desc;
    bool Error = false;
    uint64_t EndOffset;
    SmallVector<uint64_t> Operands;
    SmallVector<uint64_t> OperandEndOffsets;

  public:
    const Description &getDescription() const { return Desc; }
    uint8_t getCode() const { return Opcode; }
    std::optional<unsigned> getSubCode() const;
    uint64_t getNumOperands() const { return Operands.size(); }
    ArrayRef<uint64_t> getRawOperands() const { return Operands; };
    uint64_t getRawOperand(unsigned Idx) const { return Operands[Idx]; }
    ArrayRef<uint64_t> getOperandEndOffsets() const {
      return OperandEndOffsets;
    }
    uint64_t getOperandEndOffset(unsigned Idx) const {
      return OperandEndOffsets[Idx];
    }
    uint64_t getEndOffset() const { return EndOffset; }
    bool isError() const { return Error; }
    bool print(raw_ostream &OS, DIDumpOptions DumpOpts,
               const DWARFExpression *Expr, DWARFUnit *U) const;

    /// Verify \p Op. Does not affect the return of \a isError().
    static bool verify(const Operation &Op, DWARFUnit *U);

  private:
    bool extract(DataExtractor Data, uint8_t AddressSize, uint64_t Offset,
                 std::optional<dwarf::DwarfFormat> Format);
  };

  /// An iterator to go through the expression operations.
  class iterator
      : public iterator_facade_base<iterator, std::forward_iterator_tag,
                                    const Operation> {
    friend class DWARFExpression;
    const DWARFExpression *Expr;
    uint64_t Offset;
    Operation Op;
    iterator(const DWARFExpression *Expr, uint64_t Offset)
        : Expr(Expr), Offset(Offset) {
      Op.Error =
          Offset >= Expr->Data.getData().size() ||
          !Op.extract(Expr->Data, Expr->AddressSize, Offset, Expr->Format);
    }

  public:
    iterator &operator++() {
      Offset = Op.isError() ? Expr->Data.getData().size() : Op.EndOffset;
      Op.Error =
          Offset >= Expr->Data.getData().size() ||
          !Op.extract(Expr->Data, Expr->AddressSize, Offset, Expr->Format);
      return *this;
    }

    const Operation &operator*() const { return Op; }

    iterator skipBytes(uint64_t Add) const {
      return iterator(Expr, Op.EndOffset + Add);
    }

    // Comparison operators are provided out of line.
    friend bool operator==(const iterator &, const iterator &);
  };

  DWARFExpression(DataExtractor Data, uint8_t AddressSize,
                  std::optional<dwarf::DwarfFormat> Format = std::nullopt)
      : Data(Data), AddressSize(AddressSize), Format(Format) {
    assert(AddressSize == 8 || AddressSize == 4 || AddressSize == 2);
  }

  iterator begin() const { return iterator(this, 0); }
  iterator end() const { return iterator(this, Data.getData().size()); }

  void print(raw_ostream &OS, DIDumpOptions DumpOpts, DWARFUnit *U,
             bool IsEH = false) const;

  /// Print the expression in a format intended to be compact and useful to a
  /// user, but not perfectly unambiguous, or capable of representing every
  /// valid DWARF expression. Returns true if the expression was sucessfully
  /// printed.
  bool printCompact(raw_ostream &OS,
                    std::function<StringRef(uint64_t RegNum, bool IsEH)>
                        GetNameForDWARFReg = nullptr);

  bool verify(DWARFUnit *U);

  bool operator==(const DWARFExpression &RHS) const;

  StringRef getData() const { return Data.getData(); }

  static bool prettyPrintRegisterOp(DWARFUnit *U, raw_ostream &OS,
                                    DIDumpOptions DumpOpts, uint8_t Opcode,
                                    const ArrayRef<uint64_t> Operands);

private:
  DataExtractor Data;
  uint8_t AddressSize;
  std::optional<dwarf::DwarfFormat> Format;
};

inline bool operator==(const DWARFExpression::iterator &LHS,
                       const DWARFExpression::iterator &RHS) {
  return LHS.Expr == RHS.Expr && LHS.Offset == RHS.Offset;
}
}
#endif
PKhwFZ��dd DebugInfo/DWARF/DWARFUnitIndex.hnu�[���//===- DWARFUnitIndex.h -----------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_DWARF_DWARFUNITINDEX_H
#define LLVM_DEBUGINFO_DWARF_DWARFUNITINDEX_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
#include <cstdint>
#include <memory>

namespace llvm {

class raw_ostream;
class DataExtractor;

/// The enum of section identifiers to be used in internal interfaces.
///
/// Pre-standard implementation of package files defined a number of section
/// identifiers with values that clash definitions in the DWARFv5 standard.
/// See https://gcc.gnu.org/wiki/DebugFissionDWP and Section 7.3.5.3 in DWARFv5.
///
/// The following identifiers are the same in the proposal and in DWARFv5:
/// - DW_SECT_INFO         = 1 (.debug_info.dwo)
/// - DW_SECT_ABBREV       = 3 (.debug_abbrev.dwo)
/// - DW_SECT_LINE         = 4 (.debug_line.dwo)
/// - DW_SECT_STR_OFFSETS  = 6 (.debug_str_offsets.dwo)
///
/// The following identifiers are defined only in DWARFv5:
/// - DW_SECT_LOCLISTS     = 5 (.debug_loclists.dwo)
/// - DW_SECT_RNGLISTS     = 8 (.debug_rnglists.dwo)
///
/// The following identifiers are defined only in the GNU proposal:
/// - DW_SECT_TYPES        = 2 (.debug_types.dwo)
/// - DW_SECT_LOC          = 5 (.debug_loc.dwo)
/// - DW_SECT_MACINFO      = 7 (.debug_macinfo.dwo)
///
/// DW_SECT_MACRO for the .debug_macro.dwo section is defined in both standards,
/// but with different values, 8 in GNU and 7 in DWARFv5.
///
/// This enum defines constants to represent the identifiers of both sets.
/// For DWARFv5 ones, the values are the same as defined in the standard.
/// For pre-standard ones that correspond to sections being deprecated in
/// DWARFv5, the values are chosen arbitrary and a tag "_EXT_" is added to
/// the names.
///
/// The enum is for internal use only. The user should not expect the values
/// to correspond to any input/output constants. Special conversion functions,
/// serializeSectionKind() and deserializeSectionKind(), should be used for
/// the translation.
enum DWARFSectionKind {
  /// Denotes a value read from an index section that does not correspond
  /// to any of the supported standards.
  DW_SECT_EXT_unknown = 0,
#define HANDLE_DW_SECT(ID, NAME) DW_SECT_##NAME = ID,
#include "llvm/BinaryFormat/Dwarf.def"
  DW_SECT_EXT_TYPES = 2,
  DW_SECT_EXT_LOC = 9,
  DW_SECT_EXT_MACINFO = 10,
};

inline const char *toString(DWARFSectionKind Kind) {
  switch (Kind) {
  case DW_SECT_EXT_unknown:
    return "Unknown DW_SECT value 0";
#define STRINGIZE(X) #X
#define HANDLE_DW_SECT(ID, NAME)                                               \
  case DW_SECT_##NAME:                                                         \
    return "DW_SECT_" STRINGIZE(NAME);
#include "llvm/BinaryFormat/Dwarf.def"
  case DW_SECT_EXT_TYPES:
    return "DW_SECT_TYPES";
  case DW_SECT_EXT_LOC:
    return "DW_SECT_LOC";
  case DW_SECT_EXT_MACINFO:
    return "DW_SECT_MACINFO";
  }
  llvm_unreachable("unknown DWARFSectionKind");
}

/// Convert the internal value for a section kind to an on-disk value.
///
/// The conversion depends on the version of the index section.
/// IndexVersion is expected to be either 2 for pre-standard GNU proposal
/// or 5 for DWARFv5 package file.
uint32_t serializeSectionKind(DWARFSectionKind Kind, unsigned IndexVersion);

/// Convert a value read from an index section to the internal representation.
///
/// The conversion depends on the index section version, which is expected
/// to be either 2 for pre-standard GNU proposal or 5 for DWARFv5 package file.
DWARFSectionKind deserializeSectionKind(uint32_t Value, unsigned IndexVersion);

class DWARFUnitIndex {
  struct Header {
    uint32_t Version;
    uint32_t NumColumns;
    uint32_t NumUnits;
    uint32_t NumBuckets = 0;

    bool parse(DataExtractor IndexData, uint64_t *OffsetPtr);
    void dump(raw_ostream &OS) const;
  };

public:
  class Entry {
  public:
    class SectionContribution {
    private:
      uint64_t Offset;
      uint64_t Length;

    public:
      SectionContribution() : Offset(0), Length(0) {}
      SectionContribution(uint64_t Offset, uint64_t Length)
          : Offset(Offset), Length(Length) {}

      void setOffset(uint64_t Value) { Offset = Value; }
      void setLength(uint64_t Value) { Length = Value; }
      uint64_t getOffset() const { return Offset; }
      uint64_t getLength() const { return Length; }
      uint32_t getOffset32() const { return (uint32_t)Offset; }
      uint32_t getLength32() const { return (uint32_t)Length; }
    };

  private:
    const DWARFUnitIndex *Index;
    uint64_t Signature;
    std::unique_ptr<SectionContribution[]> Contributions;
    friend class DWARFUnitIndex;

  public:
    const SectionContribution *getContribution(DWARFSectionKind Sec) const;
    const SectionContribution *getContribution() const;
    SectionContribution &getContribution();

    const SectionContribution *getContributions() const {
      return Contributions.get();
    }

    uint64_t getSignature() const { return Signature; }
    bool isValid() { return Index; }
  };

private:
  struct Header Header;

  DWARFSectionKind InfoColumnKind;
  int InfoColumn = -1;
  std::unique_ptr<DWARFSectionKind[]> ColumnKinds;
  // This is a parallel array of section identifiers as they read from the input
  // file. The mapping from raw values to DWARFSectionKind is not revertable in
  // case of unknown identifiers, so we keep them here.
  std::unique_ptr<uint32_t[]> RawSectionIds;
  std::unique_ptr<Entry[]> Rows;
  mutable std::vector<Entry *> OffsetLookup;

  static StringRef getColumnHeader(DWARFSectionKind DS);

  bool parseImpl(DataExtractor IndexData);

public:
  DWARFUnitIndex(DWARFSectionKind InfoColumnKind)
      : InfoColumnKind(InfoColumnKind) {}

  explicit operator bool() const { return Header.NumBuckets; }

  bool parse(DataExtractor IndexData);
  void dump(raw_ostream &OS) const;

  uint32_t getVersion() const { return Header.Version; }

  const Entry *getFromOffset(uint64_t Offset) const;
  const Entry *getFromHash(uint64_t Offset) const;

  ArrayRef<DWARFSectionKind> getColumnKinds() const {
    return ArrayRef(ColumnKinds.get(), Header.NumColumns);
  }

  ArrayRef<Entry> getRows() const {
    return ArrayRef(Rows.get(), Header.NumBuckets);
  }

  MutableArrayRef<Entry> getMutableRows() {
    return MutableArrayRef(Rows.get(), Header.NumBuckets);
  }
};

} // end namespace llvm

#endif // LLVM_DEBUGINFO_DWARF_DWARFUNITINDEX_H
PKhwFZv��[�[DebugInfo/DWARF/DWARFUnit.hnu�[���//===- DWARFUnit.h ----------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_DWARF_DWARFUNIT_H
#define LLVM_DEBUGINFO_DWARF_DWARFUNIT_H

#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/BinaryFormat/Dwarf.h"
#include "llvm/DebugInfo/DWARF/DWARFAddressRange.h"
#include "llvm/DebugInfo/DWARF/DWARFDataExtractor.h"
#include "llvm/DebugInfo/DWARF/DWARFDebugInfoEntry.h"
#include "llvm/DebugInfo/DWARF/DWARFDie.h"
#include "llvm/DebugInfo/DWARF/DWARFLocationExpression.h"
#include "llvm/DebugInfo/DWARF/DWARFUnitIndex.h"
#include "llvm/Support/DataExtractor.h"
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <map>
#include <memory>
#include <set>
#include <utility>
#include <vector>

namespace llvm {

class DWARFAbbreviationDeclarationSet;
class DWARFContext;
class DWARFDebugAbbrev;
class DWARFUnit;
class DWARFDebugRangeList;
class DWARFLocationTable;
class DWARFObject;
class raw_ostream;
struct DIDumpOptions;
struct DWARFSection;
namespace dwarflinker_parallel {
class CompileUnit;
}

/// Base class describing the header of any kind of "unit."  Some information
/// is specific to certain unit types.  We separate this class out so we can
/// parse the header before deciding what specific kind of unit to construct.
class DWARFUnitHeader {
  // Offset within section.
  uint64_t Offset = 0;
  // Version, address size, and DWARF format.
  dwarf::FormParams FormParams;
  uint64_t Length = 0;
  uint64_t AbbrOffset = 0;

  // For DWO units only.
  const DWARFUnitIndex::Entry *IndexEntry = nullptr;

  // For type units only.
  uint64_t TypeHash = 0;
  uint64_t TypeOffset = 0;

  // For v5 split or skeleton compile units only.
  std::optional<uint64_t> DWOId;

  // Unit type as parsed, or derived from the section kind.
  uint8_t UnitType = 0;

  // Size as parsed. uint8_t for compactness.
  uint8_t Size = 0;

public:
  /// Parse a unit header from \p debug_info starting at \p offset_ptr.
  /// Note that \p SectionKind is used as a hint to guess the unit type
  /// for DWARF formats prior to DWARFv5. In DWARFv5 the unit type is
  /// explicitly defined in the header and the hint is ignored.
  bool extract(DWARFContext &Context, const DWARFDataExtractor &debug_info,
               uint64_t *offset_ptr, DWARFSectionKind SectionKind);
  // For units in DWARF Package File, remember the index entry and update
  // the abbreviation offset read by extract().
  bool applyIndexEntry(const DWARFUnitIndex::Entry *Entry);
  uint64_t getOffset() const { return Offset; }
  const dwarf::FormParams &getFormParams() const { return FormParams; }
  uint16_t getVersion() const { return FormParams.Version; }
  dwarf::DwarfFormat getFormat() const { return FormParams.Format; }
  uint8_t getAddressByteSize() const { return FormParams.AddrSize; }
  uint8_t getRefAddrByteSize() const { return FormParams.getRefAddrByteSize(); }
  uint8_t getDwarfOffsetByteSize() const {
    return FormParams.getDwarfOffsetByteSize();
  }
  uint64_t getLength() const { return Length; }
  uint64_t getAbbrOffset() const { return AbbrOffset; }
  std::optional<uint64_t> getDWOId() const { return DWOId; }
  void setDWOId(uint64_t Id) {
    assert((!DWOId || *DWOId == Id) && "setting DWOId to a different value");
    DWOId = Id;
  }
  const DWARFUnitIndex::Entry *getIndexEntry() const { return IndexEntry; }
  uint64_t getTypeHash() const { return TypeHash; }
  uint64_t getTypeOffset() const { return TypeOffset; }
  uint8_t getUnitType() const { return UnitType; }
  bool isTypeUnit() const {
    return UnitType == dwarf::DW_UT_type || UnitType == dwarf::DW_UT_split_type;
  }
  uint8_t getSize() const { return Size; }
  uint8_t getUnitLengthFieldByteSize() const {
    return dwarf::getUnitLengthFieldByteSize(FormParams.Format);
  }
  uint64_t getNextUnitOffset() const {
    return Offset + Length + getUnitLengthFieldByteSize();
  }
};

const DWARFUnitIndex &getDWARFUnitIndex(DWARFContext &Context,
                                        DWARFSectionKind Kind);

bool isCompileUnit(const std::unique_ptr<DWARFUnit> &U);

/// Describe a collection of units. Intended to hold all units either from
/// .debug_info and .debug_types, or from .debug_info.dwo and .debug_types.dwo.
class DWARFUnitVector final : public SmallVector<std::unique_ptr<DWARFUnit>, 1> {
  std::function<std::unique_ptr<DWARFUnit>(uint64_t, DWARFSectionKind,
                                           const DWARFSection *,
                                           const DWARFUnitIndex::Entry *)>
      Parser;
  int NumInfoUnits = -1;

public:
  using UnitVector = SmallVectorImpl<std::unique_ptr<DWARFUnit>>;
  using iterator = typename UnitVector::iterator;
  using iterator_range = llvm::iterator_range<typename UnitVector::iterator>;

  using compile_unit_range =
      decltype(make_filter_range(std::declval<iterator_range>(), isCompileUnit));

  DWARFUnit *getUnitForOffset(uint64_t Offset) const;
  DWARFUnit *getUnitForIndexEntry(const DWARFUnitIndex::Entry &E);

  /// Read units from a .debug_info or .debug_types section.  Calls made
  /// before finishedInfoUnits() are assumed to be for .debug_info sections,
  /// calls after finishedInfoUnits() are for .debug_types sections.  Caller
  /// must not mix calls to addUnitsForSection and addUnitsForDWOSection.
  void addUnitsForSection(DWARFContext &C, const DWARFSection &Section,
                          DWARFSectionKind SectionKind);
  /// Read units from a .debug_info.dwo or .debug_types.dwo section.  Calls
  /// made before finishedInfoUnits() are assumed to be for .debug_info.dwo
  /// sections, calls after finishedInfoUnits() are for .debug_types.dwo
  /// sections.  Caller must not mix calls to addUnitsForSection and
  /// addUnitsForDWOSection.
  void addUnitsForDWOSection(DWARFContext &C, const DWARFSection &DWOSection,
                             DWARFSectionKind SectionKind, bool Lazy = false);

  /// Add an existing DWARFUnit to this UnitVector. This is used by the DWARF
  /// verifier to process unit separately.
  DWARFUnit *addUnit(std::unique_ptr<DWARFUnit> Unit);

  /// Returns number of all units held by this instance.
  unsigned getNumUnits() const { return size(); }
  /// Returns number of units from all .debug_info[.dwo] sections.
  unsigned getNumInfoUnits() const {
    return NumInfoUnits == -1 ? size() : NumInfoUnits;
  }
  /// Returns number of units from all .debug_types[.dwo] sections.
  unsigned getNumTypesUnits() const { return size() - NumInfoUnits; }
  /// Indicate that parsing .debug_info[.dwo] is done, and remaining units
  /// will be from .debug_types[.dwo].
  void finishedInfoUnits() { NumInfoUnits = size(); }

private:
  void addUnitsImpl(DWARFContext &Context, const DWARFObject &Obj,
                    const DWARFSection &Section, const DWARFDebugAbbrev *DA,
                    const DWARFSection *RS, const DWARFSection *LocSection,
                    StringRef SS, const DWARFSection &SOS,
                    const DWARFSection *AOS, const DWARFSection &LS, bool LE,
                    bool IsDWO, bool Lazy, DWARFSectionKind SectionKind);
};

/// Represents base address of the CU.
/// Represents a unit's contribution to the string offsets table.
struct StrOffsetsContributionDescriptor {
  uint64_t Base = 0;
  /// The contribution size not including the header.
  uint64_t Size = 0;
  /// Format and version.
  dwarf::FormParams FormParams = {0, 0, dwarf::DwarfFormat::DWARF32};

  StrOffsetsContributionDescriptor(uint64_t Base, uint64_t Size,
                                   uint8_t Version, dwarf::DwarfFormat Format)
      : Base(Base), Size(Size), FormParams({Version, 0, Format}) {}
  StrOffsetsContributionDescriptor() = default;

  uint8_t getVersion() const { return FormParams.Version; }
  dwarf::DwarfFormat getFormat() const { return FormParams.Format; }
  uint8_t getDwarfOffsetByteSize() const {
    return FormParams.getDwarfOffsetByteSize();
  }
  /// Determine whether a contribution to the string offsets table is
  /// consistent with the relevant section size and that its length is
  /// a multiple of the size of one of its entries.
  Expected<StrOffsetsContributionDescriptor>
  validateContributionSize(DWARFDataExtractor &DA);
};

class DWARFUnit {
  DWARFContext &Context;
  /// Section containing this DWARFUnit.
  const DWARFSection &InfoSection;

  DWARFUnitHeader Header;
  const DWARFDebugAbbrev *Abbrev;
  const DWARFSection *RangeSection;
  uint64_t RangeSectionBase;
  uint64_t LocSectionBase;

  /// Location table of this unit.
  std::unique_ptr<DWARFLocationTable> LocTable;

  const DWARFSection &LineSection;
  StringRef StringSection;
  const DWARFSection &StringOffsetSection;
  const DWARFSection *AddrOffsetSection;
  DWARFUnit *SU;
  std::optional<uint64_t> AddrOffsetSectionBase;
  bool IsLittleEndian;
  bool IsDWO;
  const DWARFUnitVector &UnitVector;

  /// Start, length, and DWARF format of the unit's contribution to the string
  /// offsets table (DWARF v5).
  std::optional<StrOffsetsContributionDescriptor>
      StringOffsetsTableContribution;

  mutable const DWARFAbbreviationDeclarationSet *Abbrevs;
  std::optional<object::SectionedAddress> BaseAddr;
  /// The compile unit debug information entry items.
  std::vector<DWARFDebugInfoEntry> DieArray;

  /// Map from range's start address to end address and corresponding DIE.
  /// IntervalMap does not support range removal, as a result, we use the
  /// std::map::upper_bound for address range lookup.
  std::map<uint64_t, std::pair<uint64_t, DWARFDie>> AddrDieMap;

  /// Map from the location (interpreted DW_AT_location) of a DW_TAG_variable,
  /// to the end address and the corresponding DIE.
  std::map<uint64_t, std::pair<uint64_t, DWARFDie>> VariableDieMap;
  DenseSet<uint64_t> RootsParsedForVariables;

  using die_iterator_range =
      iterator_range<std::vector<DWARFDebugInfoEntry>::iterator>;

  std::shared_ptr<DWARFUnit> DWO;

protected:
  friend dwarflinker_parallel::CompileUnit;

  /// Return the index of a \p Die entry inside the unit's DIE vector.
  ///
  /// It is illegal to call this method with a DIE that hasn't be
  /// created by this unit. In other word, it's illegal to call this
  /// method on a DIE that isn't accessible by following
  /// children/sibling links starting from this unit's getUnitDIE().
  uint32_t getDIEIndex(const DWARFDebugInfoEntry *Die) const {
    auto First = DieArray.data();
    assert(Die >= First && Die < First + DieArray.size());
    return Die - First;
  }

  /// Return DWARFDebugInfoEntry for the specified index \p Index.
  const DWARFDebugInfoEntry *getDebugInfoEntry(unsigned Index) const {
    assert(Index < DieArray.size());
    return &DieArray[Index];
  }

  const DWARFDebugInfoEntry *
  getParentEntry(const DWARFDebugInfoEntry *Die) const;
  const DWARFDebugInfoEntry *
  getSiblingEntry(const DWARFDebugInfoEntry *Die) const;
  const DWARFDebugInfoEntry *
  getPreviousSiblingEntry(const DWARFDebugInfoEntry *Die) const;
  const DWARFDebugInfoEntry *
  getFirstChildEntry(const DWARFDebugInfoEntry *Die) const;
  const DWARFDebugInfoEntry *
  getLastChildEntry(const DWARFDebugInfoEntry *Die) const;

  const DWARFUnitHeader &getHeader() const { return Header; }

  /// Find the unit's contribution to the string offsets table and determine its
  /// length and form. The given offset is expected to be derived from the unit
  /// DIE's DW_AT_str_offsets_base attribute.
  Expected<std::optional<StrOffsetsContributionDescriptor>>
  determineStringOffsetsTableContribution(DWARFDataExtractor &DA);

  /// Find the unit's contribution to the string offsets table and determine its
  /// length and form. The given offset is expected to be 0 in a dwo file or,
  /// in a dwp file, the start of the unit's contribution to the string offsets
  /// table section (as determined by the index table).
  Expected<std::optional<StrOffsetsContributionDescriptor>>
  determineStringOffsetsTableContributionDWO(DWARFDataExtractor &DA);

public:
  DWARFUnit(DWARFContext &Context, const DWARFSection &Section,
            const DWARFUnitHeader &Header, const DWARFDebugAbbrev *DA,
            const DWARFSection *RS, const DWARFSection *LocSection,
            StringRef SS, const DWARFSection &SOS, const DWARFSection *AOS,
            const DWARFSection &LS, bool LE, bool IsDWO,
            const DWARFUnitVector &UnitVector);

  virtual ~DWARFUnit();

  bool isLittleEndian() const { return IsLittleEndian; }
  bool isDWOUnit() const { return IsDWO; }
  DWARFContext& getContext() const { return Context; }
  const DWARFSection &getInfoSection() const { return InfoSection; }
  uint64_t getOffset() const { return Header.getOffset(); }
  const dwarf::FormParams &getFormParams() const {
    return Header.getFormParams();
  }
  uint16_t getVersion() const { return Header.getVersion(); }
  uint8_t getAddressByteSize() const { return Header.getAddressByteSize(); }
  uint8_t getRefAddrByteSize() const { return Header.getRefAddrByteSize(); }
  uint8_t getDwarfOffsetByteSize() const {
    return Header.getDwarfOffsetByteSize();
  }
  /// Size in bytes of the parsed unit header.
  uint32_t getHeaderSize() const { return Header.getSize(); }
  uint64_t getLength() const { return Header.getLength(); }
  dwarf::DwarfFormat getFormat() const { return Header.getFormat(); }
  uint8_t getUnitType() const { return Header.getUnitType(); }
  bool isTypeUnit() const { return Header.isTypeUnit(); }
  uint64_t getAbbrOffset() const { return Header.getAbbrOffset(); }
  uint64_t getNextUnitOffset() const { return Header.getNextUnitOffset(); }
  const DWARFSection &getLineSection() const { return LineSection; }
  StringRef getStringSection() const { return StringSection; }
  const DWARFSection &getStringOffsetSection() const {
    return StringOffsetSection;
  }

  void setSkeletonUnit(DWARFUnit *SU) { this->SU = SU; }
  // Returns itself if not using Split DWARF, or if the unit is a skeleton unit
  // - otherwise returns the split full unit's corresponding skeleton, if
  // available.
  DWARFUnit *getLinkedUnit() { return IsDWO ? SU : this; }

  void setAddrOffsetSection(const DWARFSection *AOS, uint64_t Base) {
    AddrOffsetSection = AOS;
    AddrOffsetSectionBase = Base;
  }

  std::optional<uint64_t> getAddrOffsetSectionBase() const {
    return AddrOffsetSectionBase;
  }

  /// Returns offset to the indexed address value inside .debug_addr section.
  std::optional<uint64_t> getIndexedAddressOffset(uint64_t Index) {
    if (std::optional<uint64_t> AddrOffsetSectionBase =
            getAddrOffsetSectionBase())
      return *AddrOffsetSectionBase + Index * getAddressByteSize();

    return std::nullopt;
  }

  /// Recursively update address to Die map.
  void updateAddressDieMap(DWARFDie Die);

  /// Recursively update address to variable Die map.
  void updateVariableDieMap(DWARFDie Die);

  void setRangesSection(const DWARFSection *RS, uint64_t Base) {
    RangeSection = RS;
    RangeSectionBase = Base;
  }

  uint64_t getLocSectionBase() const {
    return LocSectionBase;
  }

  std::optional<object::SectionedAddress>
  getAddrOffsetSectionItem(uint32_t Index) const;
  Expected<uint64_t> getStringOffsetSectionItem(uint32_t Index) const;

  DWARFDataExtractor getDebugInfoExtractor() const;

  DataExtractor getStringExtractor() const {
    return DataExtractor(StringSection, false, 0);
  }

  const DWARFLocationTable &getLocationTable() { return *LocTable; }

  /// Extract the range list referenced by this compile unit from the
  /// .debug_ranges section. If the extraction is unsuccessful, an error
  /// is returned. Successful extraction requires that the compile unit
  /// has already been extracted.
  Error extractRangeList(uint64_t RangeListOffset,
                         DWARFDebugRangeList &RangeList) const;
  void clear();

  const std::optional<StrOffsetsContributionDescriptor> &
  getStringOffsetsTableContribution() const {
    return StringOffsetsTableContribution;
  }

  uint8_t getDwarfStringOffsetsByteSize() const {
    assert(StringOffsetsTableContribution);
    return StringOffsetsTableContribution->getDwarfOffsetByteSize();
  }

  uint64_t getStringOffsetsBase() const {
    assert(StringOffsetsTableContribution);
    return StringOffsetsTableContribution->Base;
  }

  uint64_t getAbbreviationsOffset() const { return Header.getAbbrOffset(); }

  const DWARFAbbreviationDeclarationSet *getAbbreviations() const;

  static bool isMatchingUnitTypeAndTag(uint8_t UnitType, dwarf::Tag Tag) {
    switch (UnitType) {
    case dwarf::DW_UT_compile:
      return Tag == dwarf::DW_TAG_compile_unit;
    case dwarf::DW_UT_type:
      return Tag == dwarf::DW_TAG_type_unit;
    case dwarf::DW_UT_partial:
      return Tag == dwarf::DW_TAG_partial_unit;
    case dwarf::DW_UT_skeleton:
      return Tag == dwarf::DW_TAG_skeleton_unit;
    case dwarf::DW_UT_split_compile:
    case dwarf::DW_UT_split_type:
      return dwarf::isUnitType(Tag);
    }
    return false;
  }

  std::optional<object::SectionedAddress> getBaseAddress();

  DWARFDie getUnitDIE(bool ExtractUnitDIEOnly = true) {
    extractDIEsIfNeeded(ExtractUnitDIEOnly);
    if (DieArray.empty())
      return DWARFDie();
    return DWARFDie(this, &DieArray[0]);
  }

  DWARFDie getNonSkeletonUnitDIE(bool ExtractUnitDIEOnly = true,
                                 StringRef DWOAlternativeLocation = {}) {
    parseDWO(DWOAlternativeLocation);
    return DWO ? DWO->getUnitDIE(ExtractUnitDIEOnly)
               : getUnitDIE(ExtractUnitDIEOnly);
  }

  const char *getCompilationDir();
  std::optional<uint64_t> getDWOId() {
    extractDIEsIfNeeded(/*CUDieOnly*/ true);
    return getHeader().getDWOId();
  }
  void setDWOId(uint64_t NewID) { Header.setDWOId(NewID); }

  /// Return a vector of address ranges resulting from a (possibly encoded)
  /// range list starting at a given offset in the appropriate ranges section.
  Expected<DWARFAddressRangesVector> findRnglistFromOffset(uint64_t Offset);

  /// Return a vector of address ranges retrieved from an encoded range
  /// list whose offset is found via a table lookup given an index (DWARF v5
  /// and later).
  Expected<DWARFAddressRangesVector> findRnglistFromIndex(uint32_t Index);

  /// Return a rangelist's offset based on an index. The index designates
  /// an entry in the rangelist table's offset array and is supplied by
  /// DW_FORM_rnglistx.
  std::optional<uint64_t> getRnglistOffset(uint32_t Index);

  std::optional<uint64_t> getLoclistOffset(uint32_t Index);

  Expected<DWARFAddressRangesVector> collectAddressRanges();

  Expected<DWARFLocationExpressionsVector>
  findLoclistFromOffset(uint64_t Offset);

  /// Returns subprogram DIE with address range encompassing the provided
  /// address. The pointer is alive as long as parsed compile unit DIEs are not
  /// cleared.
  DWARFDie getSubroutineForAddress(uint64_t Address);

  /// Returns variable DIE for the address provided. The pointer is alive as
  /// long as parsed compile unit DIEs are not cleared.
  DWARFDie getVariableForAddress(uint64_t Address);

  /// getInlinedChainForAddress - fetches inlined chain for a given address.
  /// Returns empty chain if there is no subprogram containing address. The
  /// chain is valid as long as parsed compile unit DIEs are not cleared.
  void getInlinedChainForAddress(uint64_t Address,
                                 SmallVectorImpl<DWARFDie> &InlinedChain);

  /// Return the DWARFUnitVector containing this unit.
  const DWARFUnitVector &getUnitVector() const { return UnitVector; }

  /// Returns the number of DIEs in the unit. Parses the unit
  /// if necessary.
  unsigned getNumDIEs() {
    extractDIEsIfNeeded(false);
    return DieArray.size();
  }

  /// Return the index of a DIE inside the unit's DIE vector.
  ///
  /// It is illegal to call this method with a DIE that hasn't be
  /// created by this unit. In other word, it's illegal to call this
  /// method on a DIE that isn't accessible by following
  /// children/sibling links starting from this unit's getUnitDIE().
  uint32_t getDIEIndex(const DWARFDie &D) const {
    return getDIEIndex(D.getDebugInfoEntry());
  }

  /// Return the DIE object at the given index \p Index.
  DWARFDie getDIEAtIndex(unsigned Index) {
    return DWARFDie(this, getDebugInfoEntry(Index));
  }

  DWARFDie getParent(const DWARFDebugInfoEntry *Die);
  DWARFDie getSibling(const DWARFDebugInfoEntry *Die);
  DWARFDie getPreviousSibling(const DWARFDebugInfoEntry *Die);
  DWARFDie getFirstChild(const DWARFDebugInfoEntry *Die);
  DWARFDie getLastChild(const DWARFDebugInfoEntry *Die);

  /// Return the DIE object for a given offset \p Offset inside the
  /// unit's DIE vector.
  DWARFDie getDIEForOffset(uint64_t Offset) {
    if (std::optional<uint32_t> DieIdx = getDIEIndexForOffset(Offset))
      return DWARFDie(this, &DieArray[*DieIdx]);

    return DWARFDie();
  }

  /// Return the DIE index for a given offset \p Offset inside the
  /// unit's DIE vector.
  std::optional<uint32_t> getDIEIndexForOffset(uint64_t Offset) {
    extractDIEsIfNeeded(false);
    auto It =
        llvm::partition_point(DieArray, [=](const DWARFDebugInfoEntry &DIE) {
          return DIE.getOffset() < Offset;
        });
    if (It != DieArray.end() && It->getOffset() == Offset)
      return It - DieArray.begin();
    return std::nullopt;
  }

  uint32_t getLineTableOffset() const {
    if (auto IndexEntry = Header.getIndexEntry())
      if (const auto *Contrib = IndexEntry->getContribution(DW_SECT_LINE))
        return Contrib->getOffset32();
    return 0;
  }

  die_iterator_range dies() {
    extractDIEsIfNeeded(false);
    return die_iterator_range(DieArray.begin(), DieArray.end());
  }

  virtual void dump(raw_ostream &OS, DIDumpOptions DumpOpts) = 0;

  Error tryExtractDIEsIfNeeded(bool CUDieOnly);

private:
  /// Size in bytes of the .debug_info data associated with this compile unit.
  size_t getDebugInfoSize() const {
    return Header.getLength() + Header.getUnitLengthFieldByteSize() -
           getHeaderSize();
  }

  /// extractDIEsIfNeeded - Parses a compile unit and indexes its DIEs if it
  /// hasn't already been done
  void extractDIEsIfNeeded(bool CUDieOnly);

  /// extractDIEsToVector - Appends all parsed DIEs to a vector.
  void extractDIEsToVector(bool AppendCUDie, bool AppendNonCUDIEs,
                           std::vector<DWARFDebugInfoEntry> &DIEs) const;

  /// clearDIEs - Clear parsed DIEs to keep memory usage low.
  void clearDIEs(bool KeepCUDie);

  /// parseDWO - Parses .dwo file for current compile unit. Returns true if
  /// it was actually constructed.
  /// The \p AlternativeLocation specifies an alternative location to get
  /// the DWARF context for the DWO object; this is the case when it has
  /// been moved from its original location.
  bool parseDWO(StringRef AlternativeLocation = {});
};

inline bool isCompileUnit(const std::unique_ptr<DWARFUnit> &U) {
  return !U->isTypeUnit();
}

} // end namespace llvm

#endif // LLVM_DEBUGINFO_DWARF_DWARFUNIT_H
PKhwFZ%5qRBCBCDebugInfo/DWARF/DWARFContext.hnu�[���//===- DWARFContext.h -------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===/

#ifndef LLVM_DEBUGINFO_DWARF_DWARFCONTEXT_H
#define LLVM_DEBUGINFO_DWARF_DWARFCONTEXT_H

#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/DebugInfo/DIContext.h"
#include "llvm/DebugInfo/DWARF/DWARFDebugLine.h"
#include "llvm/DebugInfo/DWARF/DWARFDie.h"
#include "llvm/DebugInfo/DWARF/DWARFObject.h"
#include "llvm/DebugInfo/DWARF/DWARFUnit.h"
#include "llvm/Object/Binary.h"
#include "llvm/Object/ObjectFile.h"
#include "llvm/Support/DataExtractor.h"
#include "llvm/Support/Error.h"
#include "llvm/TargetParser/Host.h"
#include <cstdint>
#include <memory>

namespace llvm {

class MemoryBuffer;
class AppleAcceleratorTable;
class DWARFCompileUnit;
class DWARFDebugAbbrev;
class DWARFDebugAranges;
class DWARFDebugFrame;
class DWARFDebugLoc;
class DWARFDebugMacro;
class DWARFDebugNames;
class DWARFGdbIndex;
class DWARFTypeUnit;
class DWARFUnitIndex;

/// DWARFContext
/// This data structure is the top level entity that deals with dwarf debug
/// information parsing. The actual data is supplied through DWARFObj.
class DWARFContext : public DIContext {
  DWARFUnitVector NormalUnits;
  std::optional<DenseMap<uint64_t, DWARFTypeUnit *>> NormalTypeUnits;
  std::unique_ptr<DWARFUnitIndex> CUIndex;
  std::unique_ptr<DWARFGdbIndex> GdbIndex;
  std::unique_ptr<DWARFUnitIndex> TUIndex;
  std::unique_ptr<DWARFDebugAbbrev> Abbrev;
  std::unique_ptr<DWARFDebugLoc> Loc;
  std::unique_ptr<DWARFDebugAranges> Aranges;
  std::unique_ptr<DWARFDebugLine> Line;
  std::unique_ptr<DWARFDebugFrame> DebugFrame;
  std::unique_ptr<DWARFDebugFrame> EHFrame;
  std::unique_ptr<DWARFDebugMacro> Macro;
  std::unique_ptr<DWARFDebugMacro> Macinfo;
  std::unique_ptr<DWARFDebugNames> Names;
  std::unique_ptr<AppleAcceleratorTable> AppleNames;
  std::unique_ptr<AppleAcceleratorTable> AppleTypes;
  std::unique_ptr<AppleAcceleratorTable> AppleNamespaces;
  std::unique_ptr<AppleAcceleratorTable> AppleObjC;

  DWARFUnitVector DWOUnits;
  std::optional<DenseMap<uint64_t, DWARFTypeUnit *>> DWOTypeUnits;
  std::unique_ptr<DWARFDebugAbbrev> AbbrevDWO;
  std::unique_ptr<DWARFDebugMacro> MacinfoDWO;
  std::unique_ptr<DWARFDebugMacro> MacroDWO;

  /// The maximum DWARF version of all units.
  unsigned MaxVersion = 0;

  struct DWOFile {
    object::OwningBinary<object::ObjectFile> File;
    std::unique_ptr<DWARFContext> Context;
  };
  StringMap<std::weak_ptr<DWOFile>> DWOFiles;
  std::weak_ptr<DWOFile> DWP;
  bool CheckedForDWP = false;
  std::string DWPName;
  std::function<void(Error)> RecoverableErrorHandler =
      WithColor::defaultErrorHandler;
  std::function<void(Error)> WarningHandler = WithColor::defaultWarningHandler;

  /// Read compile units from the debug_info section (if necessary)
  /// and type units from the debug_types sections (if necessary)
  /// and store them in NormalUnits.
  void parseNormalUnits();

  /// Read compile units from the debug_info.dwo section (if necessary)
  /// and type units from the debug_types.dwo section (if necessary)
  /// and store them in DWOUnits.
  /// If \p Lazy is true, set up to parse but don't actually parse them.
  enum { EagerParse = false, LazyParse = true };
  void parseDWOUnits(bool Lazy = false);

  std::unique_ptr<const DWARFObject> DObj;

  /// Helper enum to distinguish between macro[.dwo] and macinfo[.dwo]
  /// section.
  enum MacroSecType {
    MacinfoSection,
    MacinfoDwoSection,
    MacroSection,
    MacroDwoSection
  };

  // When set parses debug_info.dwo/debug_abbrev.dwo manually and populates CU
  // Index, and TU Index for DWARF5.
  bool ParseCUTUIndexManually = false;

public:
  DWARFContext(std::unique_ptr<const DWARFObject> DObj,
               std::string DWPName = "",
               std::function<void(Error)> RecoverableErrorHandler =
                   WithColor::defaultErrorHandler,
               std::function<void(Error)> WarningHandler =
                   WithColor::defaultWarningHandler);
  ~DWARFContext() override;

  DWARFContext(DWARFContext &) = delete;
  DWARFContext &operator=(DWARFContext &) = delete;

  const DWARFObject &getDWARFObj() const { return *DObj; }

  static bool classof(const DIContext *DICtx) {
    return DICtx->getKind() == CK_DWARF;
  }

  /// Dump a textual representation to \p OS. If any \p DumpOffsets are present,
  /// dump only the record at the specified offset.
  void dump(raw_ostream &OS, DIDumpOptions DumpOpts,
            std::array<std::optional<uint64_t>, DIDT_ID_Count> DumpOffsets);

  void dump(raw_ostream &OS, DIDumpOptions DumpOpts) override {
    std::array<std::optional<uint64_t>, DIDT_ID_Count> DumpOffsets;
    dump(OS, DumpOpts, DumpOffsets);
  }

  bool verify(raw_ostream &OS, DIDumpOptions DumpOpts = {}) override;

  using unit_iterator_range = DWARFUnitVector::iterator_range;
  using compile_unit_range = DWARFUnitVector::compile_unit_range;

  /// Get units from .debug_info in this context.
  unit_iterator_range info_section_units() {
    parseNormalUnits();
    return unit_iterator_range(NormalUnits.begin(),
                               NormalUnits.begin() +
                                   NormalUnits.getNumInfoUnits());
  }

  const DWARFUnitVector &getNormalUnitsVector() {
    parseNormalUnits();
    return NormalUnits;
  }

  /// Get units from .debug_types in this context.
  unit_iterator_range types_section_units() {
    parseNormalUnits();
    return unit_iterator_range(
        NormalUnits.begin() + NormalUnits.getNumInfoUnits(), NormalUnits.end());
  }

  /// Get compile units in this context.
  compile_unit_range compile_units() {
    return make_filter_range(info_section_units(), isCompileUnit);
  }

  // If you want type_units(), it'll need to be a concat iterator of a filter of
  // TUs in info_section + all the (all type) units in types_section

  /// Get all normal compile/type units in this context.
  unit_iterator_range normal_units() {
    parseNormalUnits();
    return unit_iterator_range(NormalUnits.begin(), NormalUnits.end());
  }

  /// Get units from .debug_info..dwo in the DWO context.
  unit_iterator_range dwo_info_section_units() {
    parseDWOUnits();
    return unit_iterator_range(DWOUnits.begin(),
                               DWOUnits.begin() + DWOUnits.getNumInfoUnits());
  }

  const DWARFUnitVector &getDWOUnitsVector() {
    parseDWOUnits();
    return DWOUnits;
  }

  /// Get units from .debug_types.dwo in the DWO context.
  unit_iterator_range dwo_types_section_units() {
    parseDWOUnits();
    return unit_iterator_range(DWOUnits.begin() + DWOUnits.getNumInfoUnits(),
                               DWOUnits.end());
  }

  /// Get compile units in the DWO context.
  compile_unit_range dwo_compile_units() {
    return make_filter_range(dwo_info_section_units(), isCompileUnit);
  }

  // If you want dwo_type_units(), it'll need to be a concat iterator of a
  // filter of TUs in dwo_info_section + all the (all type) units in
  // dwo_types_section.

  /// Get all units in the DWO context.
  unit_iterator_range dwo_units() {
    parseDWOUnits();
    return unit_iterator_range(DWOUnits.begin(), DWOUnits.end());
  }

  /// Get the number of compile units in this context.
  unsigned getNumCompileUnits() {
    parseNormalUnits();
    return NormalUnits.getNumInfoUnits();
  }

  /// Get the number of type units in this context.
  unsigned getNumTypeUnits() {
    parseNormalUnits();
    return NormalUnits.getNumTypesUnits();
  }

  /// Get the number of compile units in the DWO context.
  unsigned getNumDWOCompileUnits() {
    parseDWOUnits();
    return DWOUnits.getNumInfoUnits();
  }

  /// Get the number of type units in the DWO context.
  unsigned getNumDWOTypeUnits() {
    parseDWOUnits();
    return DWOUnits.getNumTypesUnits();
  }

  /// Get the unit at the specified index.
  DWARFUnit *getUnitAtIndex(unsigned index) {
    parseNormalUnits();
    return NormalUnits[index].get();
  }

  /// Get the unit at the specified index for the DWO units.
  DWARFUnit *getDWOUnitAtIndex(unsigned index) {
    parseDWOUnits();
    return DWOUnits[index].get();
  }

  DWARFCompileUnit *getDWOCompileUnitForHash(uint64_t Hash);
  DWARFTypeUnit *getTypeUnitForHash(uint16_t Version, uint64_t Hash, bool IsDWO);

  /// Return the compile unit that includes an offset (relative to .debug_info).
  DWARFCompileUnit *getCompileUnitForOffset(uint64_t Offset);

  /// Get a DIE given an exact offset.
  DWARFDie getDIEForOffset(uint64_t Offset);

  unsigned getMaxVersion() {
    // Ensure info units have been parsed to discover MaxVersion
    info_section_units();
    return MaxVersion;
  }

  unsigned getMaxDWOVersion() {
    // Ensure DWO info units have been parsed to discover MaxVersion
    dwo_info_section_units();
    return MaxVersion;
  }

  void setMaxVersionIfGreater(unsigned Version) {
    if (Version > MaxVersion)
      MaxVersion = Version;
  }

  const DWARFUnitIndex &getCUIndex();
  DWARFGdbIndex &getGdbIndex();
  const DWARFUnitIndex &getTUIndex();

  /// Get a pointer to the parsed DebugAbbrev object.
  const DWARFDebugAbbrev *getDebugAbbrev();

  /// Get a pointer to the parsed DebugLoc object.
  const DWARFDebugLoc *getDebugLoc();

  /// Get a pointer to the parsed dwo abbreviations object.
  const DWARFDebugAbbrev *getDebugAbbrevDWO();

  /// Get a pointer to the parsed DebugAranges object.
  const DWARFDebugAranges *getDebugAranges();

  /// Get a pointer to the parsed frame information object.
  Expected<const DWARFDebugFrame *> getDebugFrame();

  /// Get a pointer to the parsed eh frame information object.
  Expected<const DWARFDebugFrame *> getEHFrame();

  /// Get a pointer to the parsed DebugMacinfo information object.
  const DWARFDebugMacro *getDebugMacinfo();

  /// Get a pointer to the parsed DebugMacinfoDWO information object.
  const DWARFDebugMacro *getDebugMacinfoDWO();

  /// Get a pointer to the parsed DebugMacro information object.
  const DWARFDebugMacro *getDebugMacro();

  /// Get a pointer to the parsed DebugMacroDWO information object.
  const DWARFDebugMacro *getDebugMacroDWO();

  /// Get a reference to the parsed accelerator table object.
  const DWARFDebugNames &getDebugNames();

  /// Get a reference to the parsed accelerator table object.
  const AppleAcceleratorTable &getAppleNames();

  /// Get a reference to the parsed accelerator table object.
  const AppleAcceleratorTable &getAppleTypes();

  /// Get a reference to the parsed accelerator table object.
  const AppleAcceleratorTable &getAppleNamespaces();

  /// Get a reference to the parsed accelerator table object.
  const AppleAcceleratorTable &getAppleObjC();

  /// Get a pointer to a parsed line table corresponding to a compile unit.
  /// Report any parsing issues as warnings on stderr.
  const DWARFDebugLine::LineTable *getLineTableForUnit(DWARFUnit *U);

  /// Get a pointer to a parsed line table corresponding to a compile unit.
  /// Report any recoverable parsing problems using the handler.
  Expected<const DWARFDebugLine::LineTable *>
  getLineTableForUnit(DWARFUnit *U,
                      function_ref<void(Error)> RecoverableErrorHandler);

  // Clear the line table object corresponding to a compile unit for memory
  // management purpose. When it's referred to again, it'll be re-populated.
  void clearLineTableForUnit(DWARFUnit *U);

  DataExtractor getStringExtractor() const {
    return DataExtractor(DObj->getStrSection(), false, 0);
  }
  DataExtractor getStringDWOExtractor() const {
    return DataExtractor(DObj->getStrDWOSection(), false, 0);
  }
  DataExtractor getLineStringExtractor() const {
    return DataExtractor(DObj->getLineStrSection(), false, 0);
  }

  /// Wraps the returned DIEs for a given address.
  struct DIEsForAddress {
    DWARFCompileUnit *CompileUnit = nullptr;
    DWARFDie FunctionDIE;
    DWARFDie BlockDIE;
    explicit operator bool() const { return CompileUnit != nullptr; }
  };

  /// Get the compilation unit, the function DIE and lexical block DIE for the
  /// given address where applicable.
  /// TODO: change input parameter from "uint64_t Address"
  ///       into "SectionedAddress Address"
  DIEsForAddress getDIEsForAddress(uint64_t Address);

  DILineInfo getLineInfoForAddress(
      object::SectionedAddress Address,
      DILineInfoSpecifier Specifier = DILineInfoSpecifier()) override;
  DILineInfo
  getLineInfoForDataAddress(object::SectionedAddress Address) override;
  DILineInfoTable getLineInfoForAddressRange(
      object::SectionedAddress Address, uint64_t Size,
      DILineInfoSpecifier Specifier = DILineInfoSpecifier()) override;
  DIInliningInfo getInliningInfoForAddress(
      object::SectionedAddress Address,
      DILineInfoSpecifier Specifier = DILineInfoSpecifier()) override;

  std::vector<DILocal>
  getLocalsForAddress(object::SectionedAddress Address) override;

  bool isLittleEndian() const { return DObj->isLittleEndian(); }
  static unsigned getMaxSupportedVersion() { return 5; }
  static bool isSupportedVersion(unsigned version) {
    return version >= 2 && version <= getMaxSupportedVersion();
  }

  static SmallVector<uint8_t, 3> getSupportedAddressSizes() {
    return {2, 4, 8};
  }
  static bool isAddressSizeSupported(unsigned AddressSize) {
    return llvm::is_contained(getSupportedAddressSizes(), AddressSize);
  }
  template <typename... Ts>
  static Error checkAddressSizeSupported(unsigned AddressSize,
                                         std::error_code EC, char const *Fmt,
                                         const Ts &...Vals) {
    if (isAddressSizeSupported(AddressSize))
      return Error::success();
    std::string Buffer;
    raw_string_ostream Stream(Buffer);
    Stream << format(Fmt, Vals...)
           << " has unsupported address size: " << AddressSize
           << " (supported are ";
    ListSeparator LS;
    for (unsigned Size : DWARFContext::getSupportedAddressSizes())
      Stream << LS << Size;
    Stream << ')';
    return make_error<StringError>(Stream.str(), EC);
  }

  std::shared_ptr<DWARFContext> getDWOContext(StringRef AbsolutePath);

  function_ref<void(Error)> getRecoverableErrorHandler() {
    return RecoverableErrorHandler;
  }

  function_ref<void(Error)> getWarningHandler() { return WarningHandler; }

  enum class ProcessDebugRelocations { Process, Ignore };

  static std::unique_ptr<DWARFContext>
  create(const object::ObjectFile &Obj,
         ProcessDebugRelocations RelocAction = ProcessDebugRelocations::Process,
         const LoadedObjectInfo *L = nullptr, std::string DWPName = "",
         std::function<void(Error)> RecoverableErrorHandler =
             WithColor::defaultErrorHandler,
         std::function<void(Error)> WarningHandler =
             WithColor::defaultWarningHandler);

  static std::unique_ptr<DWARFContext>
  create(const StringMap<std::unique_ptr<MemoryBuffer>> &Sections,
         uint8_t AddrSize, bool isLittleEndian = sys::IsLittleEndianHost,
         std::function<void(Error)> RecoverableErrorHandler =
             WithColor::defaultErrorHandler,
         std::function<void(Error)> WarningHandler =
             WithColor::defaultWarningHandler);

  /// Get address size from CUs.
  /// TODO: refactor compile_units() to make this const.
  uint8_t getCUAddrSize();

  Triple::ArchType getArch() const {
    return getDWARFObj().getFile()->getArch();
  }

  /// Return the compile unit which contains instruction with provided
  /// address.
  /// TODO: change input parameter from "uint64_t Address"
  ///       into "SectionedAddress Address"
  DWARFCompileUnit *getCompileUnitForCodeAddress(uint64_t Address);

  /// Return the compile unit which contains data with the provided address.
  /// Note: This is more expensive than `getCompileUnitForAddress`, as if
  /// `Address` isn't found in the CU ranges (which is cheap), then it falls
  /// back to an expensive O(n) walk of all CU's looking for data that spans the
  /// address.
  /// TODO: change input parameter from "uint64_t Address" into
  ///       "SectionedAddress Address"
  DWARFCompileUnit *getCompileUnitForDataAddress(uint64_t Address);

  /// Returns whether CU/TU should be populated manually. TU Index populated
  /// manually only for DWARF5.
  bool getParseCUTUIndexManually() const { return ParseCUTUIndexManually; }

  /// Sets whether CU/TU should be populated manually. TU Index populated
  /// manually only for DWARF5.
  void setParseCUTUIndexManually(bool PCUTU) { ParseCUTUIndexManually = PCUTU; }

private:
  /// Parse a macro[.dwo] or macinfo[.dwo] section.
  std::unique_ptr<DWARFDebugMacro>
  parseMacroOrMacinfo(MacroSecType SectionType);

  void addLocalsForDie(DWARFCompileUnit *CU, DWARFDie Subprogram, DWARFDie Die,
                       std::vector<DILocal> &Result);
};

} // end namespace llvm

#endif // LLVM_DEBUGINFO_DWARF_DWARFCONTEXT_H
PKhwFZ�WY�� DebugInfo/DWARF/DWARFAttribute.hnu�[���//===- DWARFAttribute.h -----------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_DWARF_DWARFATTRIBUTE_H
#define LLVM_DEBUGINFO_DWARF_DWARFATTRIBUTE_H

#include "llvm/BinaryFormat/Dwarf.h"
#include "llvm/DebugInfo/DWARF/DWARFFormValue.h"
#include <cstdint>

namespace llvm {

//===----------------------------------------------------------------------===//
/// Encapsulates a DWARF attribute value and all of the data required to
/// describe the attribute value.
///
/// This class is designed to be used by clients that want to iterate across all
/// attributes in a DWARFDie.
struct DWARFAttribute {
  /// The debug info/types offset for this attribute.
  uint64_t Offset = 0;
  /// The debug info/types section byte size of the data for this attribute.
  uint32_t ByteSize = 0;
  /// The attribute enumeration of this attribute.
  dwarf::Attribute Attr = dwarf::Attribute(0);
  /// The form and value for this attribute.
  DWARFFormValue Value;

  bool isValid() const {
    return Offset != 0 && Attr != dwarf::Attribute(0);
  }

  explicit operator bool() const {
    return isValid();
  }

  /// Identify DWARF attributes that may contain a pointer to a location list.
  static bool mayHaveLocationList(dwarf::Attribute Attr);

  /// Identifies DWARF attributes that may contain a reference to a
  /// DWARF expression.
  static bool mayHaveLocationExpr(dwarf::Attribute Attr);
};

} // end namespace llvm

#endif // LLVM_DEBUGINFO_DWARF_DWARFATTRIBUTE_H
PKhwFZg�^R^^DebugInfo/DWARF/DWARFObject.hnu�[���//===- DWARFObject.h --------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===-----------------------------------------------------------------------===/

#ifndef LLVM_DEBUGINFO_DWARF_DWARFOBJECT_H
#define LLVM_DEBUGINFO_DWARF_DWARFOBJECT_H

#include "llvm/DebugInfo/DWARF/DWARFRelocMap.h"
#include "llvm/DebugInfo/DWARF/DWARFSection.h"
#include "llvm/Object/ObjectFile.h"
#include <optional>

namespace llvm {
// This is responsible for low level access to the object file. It
// knows how to find the required sections and compute relocated
// values.
// The default implementations of the get<Section> methods return dummy values.
// This is to allow clients that only need some of those to implement just the
// ones they need. We can't use unreachable for as many cases because the parser
// implementation is eager and will call some of these methods even if the
// result is not used.
class DWARFObject {
  DWARFSection Dummy;

public:
  virtual ~DWARFObject() = default;
  virtual StringRef getFileName() const { llvm_unreachable("unimplemented"); }
  virtual const object::ObjectFile *getFile() const { return nullptr; }
  virtual ArrayRef<SectionName> getSectionNames() const { return {}; }
  virtual bool isLittleEndian() const = 0;
  virtual uint8_t getAddressSize() const { llvm_unreachable("unimplemented"); }
  virtual void
  forEachInfoSections(function_ref<void(const DWARFSection &)> F) const {}
  virtual void
  forEachTypesSections(function_ref<void(const DWARFSection &)> F) const {}
  virtual StringRef getAbbrevSection() const { return ""; }
  virtual const DWARFSection &getLocSection() const { return Dummy; }
  virtual const DWARFSection &getLoclistsSection() const { return Dummy; }
  virtual StringRef getArangesSection() const { return ""; }
  virtual const DWARFSection &getFrameSection() const { return Dummy; }
  virtual const DWARFSection &getEHFrameSection() const { return Dummy; }
  virtual const DWARFSection &getLineSection() const { return Dummy; }
  virtual StringRef getLineStrSection() const { return ""; }
  virtual StringRef getStrSection() const { return ""; }
  virtual const DWARFSection &getRangesSection() const { return Dummy; }
  virtual const DWARFSection &getRnglistsSection() const { return Dummy; }
  virtual const DWARFSection &getMacroSection() const { return Dummy; }
  virtual StringRef getMacroDWOSection() const { return ""; }
  virtual StringRef getMacinfoSection() const { return ""; }
  virtual StringRef getMacinfoDWOSection() const { return ""; }
  virtual const DWARFSection &getPubnamesSection() const { return Dummy; }
  virtual const DWARFSection &getPubtypesSection() const { return Dummy; }
  virtual const DWARFSection &getGnuPubnamesSection() const { return Dummy; }
  virtual const DWARFSection &getGnuPubtypesSection() const { return Dummy; }
  virtual const DWARFSection &getStrOffsetsSection() const { return Dummy; }
  virtual void
  forEachInfoDWOSections(function_ref<void(const DWARFSection &)> F) const {}
  virtual void
  forEachTypesDWOSections(function_ref<void(const DWARFSection &)> F) const {}
  virtual StringRef getAbbrevDWOSection() const { return ""; }
  virtual const DWARFSection &getLineDWOSection() const { return Dummy; }
  virtual const DWARFSection &getLocDWOSection() const { return Dummy; }
  virtual const DWARFSection &getLoclistsDWOSection() const { return Dummy; }
  virtual StringRef getStrDWOSection() const { return ""; }
  virtual const DWARFSection &getStrOffsetsDWOSection() const {
    return Dummy;
  }
  virtual const DWARFSection &getRangesDWOSection() const { return Dummy; }
  virtual const DWARFSection &getRnglistsDWOSection() const { return Dummy; }
  virtual const DWARFSection &getAddrSection() const { return Dummy; }
  virtual const DWARFSection &getAppleNamesSection() const { return Dummy; }
  virtual const DWARFSection &getAppleTypesSection() const { return Dummy; }
  virtual const DWARFSection &getAppleNamespacesSection() const {
    return Dummy;
  }
  virtual const DWARFSection &getNamesSection() const { return Dummy; }
  virtual const DWARFSection &getAppleObjCSection() const { return Dummy; }
  virtual StringRef getCUIndexSection() const { return ""; }
  virtual StringRef getGdbIndexSection() const { return ""; }
  virtual StringRef getTUIndexSection() const { return ""; }
  virtual std::optional<RelocAddrEntry> find(const DWARFSection &Sec,
                                             uint64_t Pos) const = 0;
};

} // namespace llvm
#endif
PKhwFZ�~�zz%DebugInfo/DWARF/DWARFDebugRangeList.hnu�[���//===- DWARFDebugRangeList.h ------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_DWARF_DWARFDEBUGRANGELIST_H
#define LLVM_DEBUGINFO_DWARF_DWARFDEBUGRANGELIST_H

#include "llvm/DebugInfo/DWARF/DWARFAddressRange.h"
#include <cstdint>
#include <vector>

namespace llvm {

class raw_ostream;
class DWARFDataExtractor;
namespace object {
struct SectionedAddress;
}

class DWARFDebugRangeList {
public:
  struct RangeListEntry {
    /// A beginning address offset. This address offset has the size of an
    /// address and is relative to the applicable base address of the
    /// compilation unit referencing this range list. It marks the beginning
    /// of an address range.
    uint64_t StartAddress;
    /// An ending address offset. This address offset again has the size of
    /// an address and is relative to the applicable base address of the
    /// compilation unit referencing this range list. It marks the first
    /// address past the end of the address range. The ending address must
    /// be greater than or equal to the beginning address.
    uint64_t EndAddress;
    /// A section index this range belongs to.
    uint64_t SectionIndex;

    /// The end of any given range list is marked by an end of list entry,
    /// which consists of a 0 for the beginning address offset
    /// and a 0 for the ending address offset.
    bool isEndOfListEntry() const {
      return (StartAddress == 0) && (EndAddress == 0);
    }

    /// A base address selection entry consists of:
    /// 1. The value of the largest representable address offset
    /// (for example, 0xffffffff when the size of an address is 32 bits).
    /// 2. An address, which defines the appropriate base address for
    /// use in interpreting the beginning and ending address offsets of
    /// subsequent entries of the location list.
    bool isBaseAddressSelectionEntry(uint8_t AddressSize) const;
  };

private:
  /// Offset in .debug_ranges section.
  uint64_t Offset;
  uint8_t AddressSize;
  std::vector<RangeListEntry> Entries;

public:
  DWARFDebugRangeList() { clear(); }

  void clear();
  void dump(raw_ostream &OS) const;
  Error extract(const DWARFDataExtractor &data, uint64_t *offset_ptr);
  const std::vector<RangeListEntry> &getEntries() { return Entries; }

  /// getAbsoluteRanges - Returns absolute address ranges defined by this range
  /// list. Has to be passed base address of the compile unit referencing this
  /// range list.
  DWARFAddressRangesVector
  getAbsoluteRanges(std::optional<object::SectionedAddress> BaseAddr) const;
};

} // end namespace llvm

#endif // LLVM_DEBUGINFO_DWARF_DWARFDEBUGRANGELIST_H
PKhwFZ�s�	�	"DebugInfo/DWARF/DWARFDebugAbbrev.hnu�[���//===- DWARFDebugAbbrev.h ---------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_DWARF_DWARFDEBUGABBREV_H
#define LLVM_DEBUGINFO_DWARF_DWARFDEBUGABBREV_H

#include "llvm/DebugInfo/DWARF/DWARFAbbreviationDeclaration.h"
#include "llvm/Support/DataExtractor.h"
#include <cstdint>
#include <map>
#include <vector>

namespace llvm {

class raw_ostream;

class DWARFAbbreviationDeclarationSet {
  uint64_t Offset;
  /// Code of the first abbreviation, if all abbreviations in the set have
  /// consecutive codes. UINT32_MAX otherwise.
  uint32_t FirstAbbrCode;
  std::vector<DWARFAbbreviationDeclaration> Decls;

  using const_iterator =
      std::vector<DWARFAbbreviationDeclaration>::const_iterator;

public:
  DWARFAbbreviationDeclarationSet();

  uint64_t getOffset() const { return Offset; }
  void dump(raw_ostream &OS) const;
  Error extract(DataExtractor Data, uint64_t *OffsetPtr);

  const DWARFAbbreviationDeclaration *
  getAbbreviationDeclaration(uint32_t AbbrCode) const;

  const_iterator begin() const {
    return Decls.begin();
  }

  const_iterator end() const {
    return Decls.end();
  }

  std::string getCodeRange() const;

  uint32_t getFirstAbbrCode() const { return FirstAbbrCode; }

private:
  void clear();
};

class DWARFDebugAbbrev {
  using DWARFAbbreviationDeclarationSetMap =
      std::map<uint64_t, DWARFAbbreviationDeclarationSet>;

  mutable DWARFAbbreviationDeclarationSetMap AbbrDeclSets;
  mutable DWARFAbbreviationDeclarationSetMap::const_iterator PrevAbbrOffsetPos;
  mutable std::optional<DataExtractor> Data;

public:
  DWARFDebugAbbrev(DataExtractor Data);

  Expected<const DWARFAbbreviationDeclarationSet *>
  getAbbreviationDeclarationSet(uint64_t CUAbbrOffset) const;

  void dump(raw_ostream &OS) const;
  void parse() const;

  DWARFAbbreviationDeclarationSetMap::const_iterator begin() const {
    assert(!Data && "Must call parse before iterating over DWARFDebugAbbrev");
    return AbbrDeclSets.begin();
  }

  DWARFAbbreviationDeclarationSetMap::const_iterator end() const {
    return AbbrDeclSets.end();
  }
};

} // end namespace llvm

#endif // LLVM_DEBUGINFO_DWARF_DWARFDEBUGABBREV_H
PKhwFZ4Q>��DebugInfo/DWARF/DWARFRelocMap.hnu�[���//===- DWARFRelocMap.h ------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_DWARF_DWARFRELOCMAP_H
#define LLVM_DEBUGINFO_DWARF_DWARFRELOCMAP_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/Object/ObjectFile.h"
#include "llvm/Object/RelocationResolver.h"
#include <cstdint>

namespace llvm {

/// RelocAddrEntry contains relocated value and section index.
/// Section index is -1LL if relocation points to absolute symbol.
struct RelocAddrEntry {
  uint64_t SectionIndex;
  object::RelocationRef Reloc;
  uint64_t SymbolValue;
  std::optional<object::RelocationRef> Reloc2;
  uint64_t SymbolValue2;
  object::RelocationResolver Resolver;
};

/// In place of applying the relocations to the data we've read from disk we use
/// a separate mapping table to the side and checking that at locations in the
/// dwarf where we expect relocated values. This adds a bit of complexity to the
/// dwarf parsing/extraction at the benefit of not allocating memory for the
/// entire size of the debug info sections.
using RelocAddrMap = DenseMap<uint64_t, RelocAddrEntry>;

} // end namespace llvm

#endif // LLVM_DEBUGINFO_DWARF_DWARFRELOCMAP_H
PKhwFZ�y��8�8DebugInfo/DWARF/DWARFVerifier.hnu�[���//===- DWARFVerifier.h ----------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_DWARF_DWARFVERIFIER_H
#define LLVM_DEBUGINFO_DWARF_DWARFVERIFIER_H

#include "llvm/DebugInfo/DIContext.h"
#include "llvm/DebugInfo/DWARF/DWARFAcceleratorTable.h"
#include "llvm/DebugInfo/DWARF/DWARFAddressRange.h"
#include "llvm/DebugInfo/DWARF/DWARFDie.h"
#include "llvm/DebugInfo/DWARF/DWARFUnitIndex.h"
#include <cstdint>
#include <map>
#include <set>

namespace llvm {
class raw_ostream;
struct DWARFAddressRange;
class DWARFUnit;
class DWARFUnitVector;
struct DWARFAttribute;
class DWARFContext;
class DWARFDataExtractor;
class DWARFDebugAbbrev;
class DataExtractor;
struct DWARFSection;

/// A class that verifies DWARF debug information given a DWARF Context.
class DWARFVerifier {
public:
  /// A class that keeps the address range information for a single DIE.
  struct DieRangeInfo {
    DWARFDie Die;

    /// Sorted DWARFAddressRanges.
    std::vector<DWARFAddressRange> Ranges;

    /// Sorted DWARFAddressRangeInfo.
    std::set<DieRangeInfo> Children;

    DieRangeInfo() = default;
    DieRangeInfo(DWARFDie Die) : Die(Die) {}

    /// Used for unit testing.
    DieRangeInfo(std::vector<DWARFAddressRange> Ranges)
        : Ranges(std::move(Ranges)) {}

    typedef std::set<DieRangeInfo>::const_iterator die_range_info_iterator;

    /// Inserts the address range. If the range overlaps with an existing
    /// range, the range that it overlaps with will be returned and the two
    /// address ranges will be unioned together in "Ranges".
    ///
    /// This is used for finding overlapping ranges in the DW_AT_ranges
    /// attribute of a DIE. It is also used as a set of address ranges that
    /// children address ranges must all be contained in.
    std::optional<DWARFAddressRange> insert(const DWARFAddressRange &R);

    /// Inserts the address range info. If any of its ranges overlaps with a
    /// range in an existing range info, the range info is *not* added and an
    /// iterator to the overlapping range info.
    ///
    /// This is used for finding overlapping children of the same DIE.
    die_range_info_iterator insert(const DieRangeInfo &RI);

    /// Return true if ranges in this object contains all ranges within RHS.
    bool contains(const DieRangeInfo &RHS) const;

    /// Return true if any range in this object intersects with any range in
    /// RHS.
    bool intersects(const DieRangeInfo &RHS) const;
  };

private:
  raw_ostream &OS;
  DWARFContext &DCtx;
  DIDumpOptions DumpOpts;
  uint32_t NumDebugLineErrors = 0;
  // Used to relax some checks that do not currently work portably
  bool IsObjectFile;
  bool IsMachOObject;
  using ReferenceMap = std::map<uint64_t, std::set<uint64_t>>;

  raw_ostream &error() const;
  raw_ostream &warn() const;
  raw_ostream &note() const;
  raw_ostream &dump(const DWARFDie &Die, unsigned indent = 0) const;

  /// Verifies the abbreviations section.
  ///
  /// This function currently checks that:
  /// --No abbreviation declaration has more than one attributes with the same
  /// name.
  ///
  /// \param Abbrev Pointer to the abbreviations section we are verifying
  /// Abbrev can be a pointer to either .debug_abbrev or debug_abbrev.dwo.
  ///
  /// \returns The number of errors that occurred during verification.
  unsigned verifyAbbrevSection(const DWARFDebugAbbrev *Abbrev);

  /// Verifies the header of a unit in a .debug_info or .debug_types section.
  ///
  /// This function currently checks for:
  /// - Unit is in 32-bit DWARF format. The function can be modified to
  /// support 64-bit format.
  /// - The DWARF version is valid
  /// - The unit type is valid (if unit is in version >=5)
  /// - The unit doesn't extend beyond the containing section
  /// - The address size is valid
  /// - The offset in the .debug_abbrev section is valid
  ///
  /// \param DebugInfoData The section data
  /// \param Offset A reference to the offset start of the unit. The offset will
  /// be updated to point to the next unit in the section
  /// \param UnitIndex The index of the unit to be verified
  /// \param UnitType A reference to the type of the unit
  /// \param isUnitDWARF64 A reference to a flag that shows whether the unit is
  /// in 64-bit format.
  ///
  /// \returns true if the header is verified successfully, false otherwise.
  bool verifyUnitHeader(const DWARFDataExtractor DebugInfoData,
                        uint64_t *Offset, unsigned UnitIndex, uint8_t &UnitType,
                        bool &isUnitDWARF64);
  bool verifyName(const DWARFDie &Die);

  /// Verifies the header of a unit in a .debug_info or .debug_types section.
  ///
  /// This function currently verifies:
  ///  - The debug info attributes.
  ///  - The debug info form=s.
  ///  - The presence of a root DIE.
  ///  - That the root DIE is a unit DIE.
  ///  - If a unit type is provided, that the unit DIE matches the unit type.
  ///  - The DIE ranges.
  ///  - That call site entries are only nested within subprograms with a
  ///    DW_AT_call attribute.
  ///
  /// \param Unit      The DWARF Unit to verify.
  ///
  /// \returns The number of errors that occurred during verification.
  unsigned verifyUnitContents(DWARFUnit &Unit,
                              ReferenceMap &UnitLocalReferences,
                              ReferenceMap &CrossUnitReferences);

  /// Verifies the unit headers and contents in a .debug_info or .debug_types
  /// section.
  ///
  /// \param S           The DWARF Section to verify.
  ///
  /// \returns The number of errors that occurred during verification.
  unsigned verifyUnitSection(const DWARFSection &S);
  unsigned verifyUnits(const DWARFUnitVector &Units);

  unsigned verifyIndexes(const DWARFObject &DObj);
  unsigned verifyIndex(StringRef Name, DWARFSectionKind SectionKind,
                       StringRef Index);

  /// Verifies that a call site entry is nested within a subprogram with a
  /// DW_AT_call attribute.
  ///
  /// \returns Number of errors that occurred during verification.
  unsigned verifyDebugInfoCallSite(const DWARFDie &Die);

  /// Verify that all Die ranges are valid.
  ///
  /// This function currently checks for:
  /// - cases in which lowPC >= highPC
  ///
  /// \returns Number of errors that occurred during verification.
  unsigned verifyDieRanges(const DWARFDie &Die, DieRangeInfo &ParentRI);

  /// Verifies the attribute's DWARF attribute and its value.
  ///
  /// This function currently checks for:
  /// - DW_AT_ranges values is a valid .debug_ranges offset
  /// - DW_AT_stmt_list is a valid .debug_line offset
  ///
  /// \param Die          The DWARF DIE that owns the attribute value
  /// \param AttrValue    The DWARF attribute value to check
  ///
  /// \returns NumErrors The number of errors occurred during verification of
  /// attributes' values in a unit
  unsigned verifyDebugInfoAttribute(const DWARFDie &Die,
                                    DWARFAttribute &AttrValue);

  /// Verifies the attribute's DWARF form.
  ///
  /// This function currently checks for:
  /// - All DW_FORM_ref values that are CU relative have valid CU offsets
  /// - All DW_FORM_ref_addr values have valid section offsets
  /// - All DW_FORM_strp values have valid .debug_str offsets
  ///
  /// \param Die          The DWARF DIE that owns the attribute value
  /// \param AttrValue    The DWARF attribute value to check
  ///
  /// \returns NumErrors The number of errors occurred during verification of
  /// attributes' forms in a unit
  unsigned verifyDebugInfoForm(const DWARFDie &Die, DWARFAttribute &AttrValue,
                               ReferenceMap &UnitLocalReferences,
                               ReferenceMap &CrossUnitReferences);

  /// Verifies the all valid references that were found when iterating through
  /// all of the DIE attributes.
  ///
  /// This function will verify that all references point to DIEs whose DIE
  /// offset matches. This helps to ensure if a DWARF link phase moved things
  /// around, that it doesn't create invalid references by failing to relocate
  /// CU relative and absolute references.
  ///
  /// \returns NumErrors The number of errors occurred during verification of
  /// references for the .debug_info and .debug_types sections
  unsigned verifyDebugInfoReferences(
      const ReferenceMap &,
      llvm::function_ref<DWARFUnit *(uint64_t)> GetUnitForDieOffset);

  /// Verify the DW_AT_stmt_list encoding and value and ensure that no
  /// compile units that have the same DW_AT_stmt_list value.
  void verifyDebugLineStmtOffsets();

  /// Verify that all of the rows in the line table are valid.
  ///
  /// This function currently checks for:
  /// - addresses within a sequence that decrease in value
  /// - invalid file indexes
  void verifyDebugLineRows();

  /// Verify that an Apple-style accelerator table is valid.
  ///
  /// This function currently checks that:
  /// - The fixed part of the header fits in the section
  /// - The size of the section is as large as what the header describes
  /// - There is at least one atom
  /// - The form for each atom is valid
  /// - The tag for each DIE in the table is valid
  /// - The buckets have a valid index, or they are empty
  /// - Each hashdata offset is valid
  /// - Each DIE is valid
  ///
  /// \param AccelSection pointer to the section containing the acceleration table
  /// \param StrData pointer to the string section
  /// \param SectionName the name of the table we're verifying
  ///
  /// \returns The number of errors occurred during verification
  unsigned verifyAppleAccelTable(const DWARFSection *AccelSection,
                                 DataExtractor *StrData,
                                 const char *SectionName);

  unsigned verifyDebugNamesCULists(const DWARFDebugNames &AccelTable);
  unsigned verifyNameIndexBuckets(const DWARFDebugNames::NameIndex &NI,
                                  const DataExtractor &StrData);
  unsigned verifyNameIndexAbbrevs(const DWARFDebugNames::NameIndex &NI);
  unsigned verifyNameIndexAttribute(const DWARFDebugNames::NameIndex &NI,
                                    const DWARFDebugNames::Abbrev &Abbr,
                                    DWARFDebugNames::AttributeEncoding AttrEnc);
  unsigned verifyNameIndexEntries(const DWARFDebugNames::NameIndex &NI,
                                  const DWARFDebugNames::NameTableEntry &NTE);
  unsigned verifyNameIndexCompleteness(const DWARFDie &Die,
                                       const DWARFDebugNames::NameIndex &NI);

  /// Verify that the DWARF v5 accelerator table is valid.
  ///
  /// This function currently checks that:
  /// - Headers individual Name Indices fit into the section and can be parsed.
  /// - Abbreviation tables can be parsed and contain valid index attributes
  ///   with correct form encodings.
  /// - The CU lists reference existing compile units.
  /// - The buckets have a valid index, or they are empty.
  /// - All names are reachable via the hash table (they have the correct hash,
  ///   and the hash is in the correct bucket).
  /// - Information in the index entries is complete (all required entries are
  ///   present) and consistent with the debug_info section DIEs.
  ///
  /// \param AccelSection section containing the acceleration table
  /// \param StrData string section
  ///
  /// \returns The number of errors occurred during verification
  unsigned verifyDebugNames(const DWARFSection &AccelSection,
                            const DataExtractor &StrData);

public:
  DWARFVerifier(raw_ostream &S, DWARFContext &D,
                DIDumpOptions DumpOpts = DIDumpOptions::getForSingleDIE());

  /// Verify the information in any of the following sections, if available:
  /// .debug_abbrev, debug_abbrev.dwo
  ///
  /// Any errors are reported to the stream that was this object was
  /// constructed with.
  ///
  /// \returns true if .debug_abbrev and .debug_abbrev.dwo verify successfully,
  /// false otherwise.
  bool handleDebugAbbrev();

  /// Verify the information in the .debug_info and .debug_types sections.
  ///
  /// Any errors are reported to the stream that this object was
  /// constructed with.
  ///
  /// \returns true if all sections verify successfully, false otherwise.
  bool handleDebugInfo();

  /// Verify the information in the .debug_cu_index section.
  ///
  /// Any errors are reported to the stream that was this object was
  /// constructed with.
  ///
  /// \returns true if the .debug_cu_index verifies successfully, false
  /// otherwise.
  bool handleDebugCUIndex();

  /// Verify the information in the .debug_tu_index section.
  ///
  /// Any errors are reported to the stream that was this object was
  /// constructed with.
  ///
  /// \returns true if the .debug_tu_index verifies successfully, false
  /// otherwise.
  bool handleDebugTUIndex();

  /// Verify the information in the .debug_line section.
  ///
  /// Any errors are reported to the stream that was this object was
  /// constructed with.
  ///
  /// \returns true if the .debug_line verifies successfully, false otherwise.
  bool handleDebugLine();

  /// Verify the information in accelerator tables, if they exist.
  ///
  /// Any errors are reported to the stream that was this object was
  /// constructed with.
  ///
  /// \returns true if the existing Apple-style accelerator tables verify
  /// successfully, false otherwise.
  bool handleAccelTables();

  /// Verify the information in the .debug_str_offsets[.dwo].
  ///
  /// Any errors are reported to the stream that was this object was
  /// constructed with.
  ///
  /// \returns true if the .debug_line verifies successfully, false otherwise.
  bool handleDebugStrOffsets();
  bool verifyDebugStrOffsets(
      StringRef SectionName, const DWARFSection &Section, StringRef StrData,
      void (DWARFObject::*)(function_ref<void(const DWARFSection &)>) const);
};

static inline bool operator<(const DWARFVerifier::DieRangeInfo &LHS,
                             const DWARFVerifier::DieRangeInfo &RHS) {
  return std::tie(LHS.Ranges, LHS.Die) < std::tie(RHS.Ranges, RHS.Die);
}

} // end namespace llvm

#endif // LLVM_DEBUGINFO_DWARF_DWARFVERIFIER_H
PKhwFZ�K�:��DebugInfo/MSF/MSFCommon.hnu�[���//===- MSFCommon.h - Common types and functions for MSF files ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_MSF_MSFCOMMON_H
#define LLVM_DEBUGINFO_MSF_MSFCOMMON_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/MathExtras.h"
#include <cstdint>
#include <vector>

namespace llvm {
namespace msf {

static const char Magic[] = {'M',  'i',  'c',    'r', 'o', 's',  'o',  'f',
                             't',  ' ',  'C',    '/', 'C', '+',  '+',  ' ',
                             'M',  'S',  'F',    ' ', '7', '.',  '0',  '0',
                             '\r', '\n', '\x1a', 'D', 'S', '\0', '\0', '\0'};

// The superblock is overlaid at the beginning of the file (offset 0).
// It starts with a magic header and is followed by information which
// describes the layout of the file system.
struct SuperBlock {
  char MagicBytes[sizeof(Magic)];
  // The file system is split into a variable number of fixed size elements.
  // These elements are referred to as blocks.  The size of a block may vary
  // from system to system.
  support::ulittle32_t BlockSize;
  // The index of the free block map.
  support::ulittle32_t FreeBlockMapBlock;
  // This contains the number of blocks resident in the file system.  In
  // practice, NumBlocks * BlockSize is equivalent to the size of the MSF
  // file.
  support::ulittle32_t NumBlocks;
  // This contains the number of bytes which make up the directory.
  support::ulittle32_t NumDirectoryBytes;
  // This field's purpose is not yet known.
  support::ulittle32_t Unknown1;
  // This contains the block # of the block map.
  support::ulittle32_t BlockMapAddr;
};

struct MSFLayout {
  MSFLayout() = default;

  uint32_t mainFpmBlock() const {
    assert(SB->FreeBlockMapBlock == 1 || SB->FreeBlockMapBlock == 2);
    return SB->FreeBlockMapBlock;
  }

  uint32_t alternateFpmBlock() const {
    // If mainFpmBlock is 1, this is 2.  If mainFpmBlock is 2, this is 1.
    return 3U - mainFpmBlock();
  }

  const SuperBlock *SB = nullptr;
  BitVector FreePageMap;
  ArrayRef<support::ulittle32_t> DirectoryBlocks;
  ArrayRef<support::ulittle32_t> StreamSizes;
  std::vector<ArrayRef<support::ulittle32_t>> StreamMap;
};

/// Describes the layout of a stream in an MSF layout.  A "stream" here
/// is defined as any logical unit of data which may be arranged inside the MSF
/// file as a sequence of (possibly discontiguous) blocks.  When we want to read
/// from a particular MSF Stream, we fill out a stream layout structure and the
/// reader uses it to determine which blocks in the underlying MSF file contain
/// the data, so that it can be pieced together in the right order.
class MSFStreamLayout {
public:
  uint32_t Length;
  std::vector<support::ulittle32_t> Blocks;
};

/// Determine the layout of the FPM stream, given the MSF layout.  An FPM
/// stream spans 1 or more blocks, each at equally spaced intervals throughout
/// the file.
MSFStreamLayout getFpmStreamLayout(const MSFLayout &Msf,
                                   bool IncludeUnusedFpmData = false,
                                   bool AltFpm = false);

inline bool isValidBlockSize(uint32_t Size) {
  switch (Size) {
  case 512:
  case 1024:
  case 2048:
  case 4096:
  case 8192:
  case 16384:
  case 32768:
    return true;
  }
  return false;
}

/// Given the specified block size, returns the maximum possible file size.
/// Block Size  |  Max File Size
/// <= 4096     |      4GB
///    8192     |      8GB
///   16384     |      16GB
///   32768     |      32GB
/// \p Size - the block size of the MSF
inline uint64_t getMaxFileSizeFromBlockSize(uint32_t Size) {
  switch (Size) {
  case 8192:
    return (uint64_t)UINT32_MAX * 2ULL;
  case 16384:
    return (uint64_t)UINT32_MAX * 3ULL;
  case 32768:
    return (uint64_t)UINT32_MAX * 4ULL;
  default:
    return (uint64_t)UINT32_MAX;
  }
}

// Super Block, Fpm0, Fpm1, and Block Map
inline uint32_t getMinimumBlockCount() { return 4; }

// Super Block, Fpm0, and Fpm1 are reserved.  The Block Map, although required
// need not be at block 3.
inline uint32_t getFirstUnreservedBlock() { return 3; }

inline uint64_t bytesToBlocks(uint64_t NumBytes, uint64_t BlockSize) {
  return divideCeil(NumBytes, BlockSize);
}

inline uint64_t blockToOffset(uint64_t BlockNumber, uint64_t BlockSize) {
  return BlockNumber * BlockSize;
}

inline uint32_t getFpmIntervalLength(const MSFLayout &L) {
  return L.SB->BlockSize;
}

/// Given an MSF with the specified block size and number of blocks, determine
/// how many pieces the specified Fpm is split into.
/// \p BlockSize - the block size of the MSF
/// \p NumBlocks - the total number of blocks in the MSF
/// \p IncludeUnusedFpmData - When true, this will count every block that is
///    both in the file and matches the form of an FPM block, even if some of
///    those FPM blocks are unused (a single FPM block can describe the
///    allocation status of up to 32,767 blocks, although one appears only
///    every 4,096 blocks).  So there are 8x as many blocks that match the
///    form as there are blocks that are necessary to describe the allocation
///    status of the file.  When this parameter is false, these extraneous
///    trailing blocks are not counted.
inline uint32_t getNumFpmIntervals(uint32_t BlockSize, uint32_t NumBlocks,
                                   bool IncludeUnusedFpmData, int FpmNumber) {
  assert(FpmNumber == 1 || FpmNumber == 2);
  if (IncludeUnusedFpmData) {
    // This calculation determines how many times a number of the form
    // BlockSize * k + N appears in the range [0, NumBlocks).  We only need to
    // do this when unused data is included, since the number of blocks dwarfs
    // the number of fpm blocks.
    return divideCeil(NumBlocks - FpmNumber, BlockSize);
  }

  // We want the minimum number of intervals required, where each interval can
  // represent BlockSize * 8 blocks.
  return divideCeil(NumBlocks, 8 * BlockSize);
}

inline uint32_t getNumFpmIntervals(const MSFLayout &L,
                                   bool IncludeUnusedFpmData = false,
                                   bool AltFpm = false) {
  return getNumFpmIntervals(L.SB->BlockSize, L.SB->NumBlocks,
                            IncludeUnusedFpmData,
                            AltFpm ? L.alternateFpmBlock() : L.mainFpmBlock());
}

Error validateSuperBlock(const SuperBlock &SB);

} // end namespace msf
} // end namespace llvm

#endif // LLVM_DEBUGINFO_MSF_MSFCOMMON_H
PKhwFZ��T�88DebugInfo/MSF/IMSFFile.hnu�[���//===- IMSFFile.h - Abstract base class for an MSF file ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_MSF_IMSFFILE_H
#define LLVM_DEBUGINFO_MSF_IMSFFILE_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Error.h"
#include <cstdint>

namespace llvm {
namespace msf {

class IMSFFile {
public:
  virtual ~IMSFFile() = default;

  virtual uint32_t getBlockSize() const = 0;
  virtual uint32_t getBlockCount() const = 0;

  virtual uint32_t getNumStreams() const = 0;
  virtual uint32_t getStreamByteSize(uint32_t StreamIndex) const = 0;
  virtual ArrayRef<support::ulittle32_t>
  getStreamBlockList(uint32_t StreamIndex) const = 0;

  virtual Expected<ArrayRef<uint8_t>> getBlockData(uint32_t BlockIndex,
                                                   uint32_t NumBytes) const = 0;
  virtual Error setBlockData(uint32_t BlockIndex, uint32_t Offset,
                             ArrayRef<uint8_t> Data) const = 0;
};

} // end namespace msf
} // end namespace llvm

#endif // LLVM_DEBUGINFO_MSF_IMSFFILE_H
PKhwFZad%��DebugInfo/MSF/MSFBuilder.hnu�[���//===- MSFBuilder.h - MSF Directory & Metadata Builder ----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_MSF_MSFBUILDER_H
#define LLVM_DEBUGINFO_MSF_MSFBUILDER_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Error.h"
#include <cstdint>
#include <utility>
#include <vector>

namespace llvm {
class FileBufferByteStream;
namespace msf {

struct MSFLayout;

class MSFBuilder {
public:
  /// Create a new `MSFBuilder`.
  ///
  /// \param BlockSize The internal block size used by the PDB file.  See
  /// isValidBlockSize() for a list of valid block sizes.
  ///
  /// \param MinBlockCount Causes the builder to reserve up front space for
  /// at least `MinBlockCount` blocks.  This is useful when using `MSFBuilder`
  /// to read an existing MSF that you want to write back out later.  The
  /// original MSF file's SuperBlock contains the exact number of blocks used
  /// by the file, so is a good hint as to how many blocks the new MSF file
  /// will contain.  Furthermore, it is actually necessary in this case.  To
  /// preserve stability of the file's layout, it is helpful to try to keep
  /// all streams mapped to their original block numbers.  To ensure that this
  /// is possible, space for all blocks must be allocated beforehand so that
  /// streams can be assigned to them.
  ///
  /// \param CanGrow If true, any operation which results in an attempt to
  /// locate a free block when all available blocks have been exhausted will
  /// allocate a new block, thereby growing the size of the final MSF file.
  /// When false, any such attempt will result in an error.  This is especially
  /// useful in testing scenarios when you know your test isn't going to do
  /// anything to increase the size of the file, so having an Error returned if
  /// it were to happen would catch a programming error
  ///
  /// \returns an llvm::Error representing whether the operation succeeded or
  /// failed.  Currently the only way this can fail is if an invalid block size
  /// is specified, or `MinBlockCount` does not leave enough room for the
  /// mandatory reserved blocks required by an MSF file.
  static Expected<MSFBuilder> create(BumpPtrAllocator &Allocator,
                                     uint32_t BlockSize,
                                     uint32_t MinBlockCount = 0,
                                     bool CanGrow = true);

  /// Request the block map to be at a specific block address.  This is useful
  /// when editing a MSF and you want the layout to be as stable as possible.
  Error setBlockMapAddr(uint32_t Addr);
  Error setDirectoryBlocksHint(ArrayRef<uint32_t> DirBlocks);
  void setFreePageMap(uint32_t Fpm);
  void setUnknown1(uint32_t Unk1);

  /// Add a stream to the MSF file with the given size, occupying the given
  /// list of blocks.  This is useful when reading a MSF file and you want a
  /// particular stream to occupy the original set of blocks.  If the given
  /// blocks are already allocated, or if the number of blocks specified is
  /// incorrect for the given stream size, this function will return an Error.
  Expected<uint32_t> addStream(uint32_t Size, ArrayRef<uint32_t> Blocks);

  /// Add a stream to the MSF file with the given size, occupying any available
  /// blocks that the builder decides to use.  This is useful when building a
  /// new PDB file from scratch and you don't care what blocks a stream occupies
  /// but you just want it to work.
  Expected<uint32_t> addStream(uint32_t Size);

  /// Update the size of an existing stream.  This will allocate or deallocate
  /// blocks as needed to match the requested size.  This can fail if `CanGrow`
  /// was set to false when initializing the `MSFBuilder`.
  Error setStreamSize(uint32_t Idx, uint32_t Size);

  /// Get the total number of streams in the MSF layout.  This should return 1
  /// for every call to `addStream`.
  uint32_t getNumStreams() const;

  /// Get the size of a stream by index.
  uint32_t getStreamSize(uint32_t StreamIdx) const;

  /// Get the list of blocks allocated to a particular stream.
  ArrayRef<uint32_t> getStreamBlocks(uint32_t StreamIdx) const;

  /// Get the total number of blocks that will be allocated to actual data in
  /// this MSF file.
  uint32_t getNumUsedBlocks() const;

  /// Get the total number of blocks that exist in the MSF file but are not
  /// allocated to any valid data.
  uint32_t getNumFreeBlocks() const;

  /// Get the total number of blocks in the MSF file.  In practice this is equal
  /// to `getNumUsedBlocks() + getNumFreeBlocks()`.
  uint32_t getTotalBlockCount() const;

  /// Check whether a particular block is allocated or free.
  bool isBlockFree(uint32_t Idx) const;

  /// Finalize the layout and build the headers and structures that describe the
  /// MSF layout and can be written directly to the MSF file.
  Expected<MSFLayout> generateLayout();

  /// Write the MSF layout to the underlying file.
  Expected<FileBufferByteStream> commit(StringRef Path, MSFLayout &Layout);

  BumpPtrAllocator &getAllocator() { return Allocator; }

private:
  MSFBuilder(uint32_t BlockSize, uint32_t MinBlockCount, bool CanGrow,
             BumpPtrAllocator &Allocator);

  Error allocateBlocks(uint32_t NumBlocks, MutableArrayRef<uint32_t> Blocks);
  uint32_t computeDirectoryByteSize() const;

  using BlockList = std::vector<uint32_t>;

  BumpPtrAllocator &Allocator;

  bool IsGrowable;
  uint32_t FreePageMap;
  uint32_t Unknown1 = 0;
  uint32_t BlockSize;
  uint32_t BlockMapAddr;
  BitVector FreeBlocks;
  std::vector<uint32_t> DirectoryBlocks;
  std::vector<std::pair<uint32_t, BlockList>> StreamData;
};

} // end namespace msf
} // end namespace llvm

#endif // LLVM_DEBUGINFO_MSF_MSFBUILDER_H
PKhwFZ5o���DebugInfo/MSF/MSFError.hnu�[���//===- MSFError.h - Error extensions for MSF Files --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_MSF_MSFERROR_H
#define LLVM_DEBUGINFO_MSF_MSFERROR_H

#include "llvm/Support/Error.h"

namespace llvm {
namespace msf {
enum class msf_error_code {
  unspecified = 1,
  insufficient_buffer,
  not_writable,
  no_stream,
  invalid_format,
  block_in_use,
  size_overflow_4096,
  size_overflow_8192,
  size_overflow_16384,
  size_overflow_32768,
  stream_directory_overflow,
};
} // namespace msf
} // namespace llvm

namespace std {
template <>
struct is_error_code_enum<llvm::msf::msf_error_code> : std::true_type {};
} // namespace std

namespace llvm {
namespace msf {
const std::error_category &MSFErrCategory();

inline std::error_code make_error_code(msf_error_code E) {
  return std::error_code(static_cast<int>(E), MSFErrCategory());
}

/// Base class for errors originating when parsing raw PDB files
class MSFError : public ErrorInfo<MSFError, StringError> {
public:
  using ErrorInfo<MSFError, StringError>::ErrorInfo; // inherit constructors
  MSFError(const Twine &S) : ErrorInfo(S, msf_error_code::unspecified) {}

  bool isPageOverflow() const {
    switch (static_cast<msf_error_code>(convertToErrorCode().value())) {
    case msf_error_code::unspecified:
    case msf_error_code::insufficient_buffer:
    case msf_error_code::not_writable:
    case msf_error_code::no_stream:
    case msf_error_code::invalid_format:
    case msf_error_code::block_in_use:
      return false;
    case msf_error_code::size_overflow_4096:
    case msf_error_code::size_overflow_8192:
    case msf_error_code::size_overflow_16384:
    case msf_error_code::size_overflow_32768:
    case msf_error_code::stream_directory_overflow:
      return true;
    }
    llvm_unreachable("msf error code not implemented");
  }

  static char ID;
};
} // namespace msf
} // namespace llvm

#endif // LLVM_DEBUGINFO_MSF_MSFERROR_H
PKhwFZ��[��!DebugInfo/MSF/MappedBlockStream.hnu�[���//==- MappedBlockStream.h - Discontiguous stream data in an MSF --*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_MSF_MAPPEDBLOCKSTREAM_H
#define LLVM_DEBUGINFO_MSF_MAPPEDBLOCKSTREAM_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/DebugInfo/MSF/MSFCommon.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/BinaryStream.h"
#include "llvm/Support/BinaryStreamRef.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Error.h"
#include <cstdint>
#include <memory>
#include <vector>

namespace llvm {
namespace msf {

/// MappedBlockStream represents data stored in an MSF file into chunks of a
/// particular size (called the Block Size), and whose chunks may not be
/// necessarily contiguous.  The arrangement of these chunks MSF the file
/// is described by some other metadata contained within the MSF file.  In
/// the case of a standard MSF Stream, the layout of the stream's blocks
/// is described by the MSF "directory", but in the case of the directory
/// itself, the layout is described by an array at a fixed location within
/// the MSF.  MappedBlockStream provides methods for reading from and writing
/// to one of these streams transparently, as if it were a contiguous sequence
/// of bytes.
class MappedBlockStream : public BinaryStream {
  friend class WritableMappedBlockStream;

public:
  static std::unique_ptr<MappedBlockStream>
  createStream(uint32_t BlockSize, const MSFStreamLayout &Layout,
               BinaryStreamRef MsfData, BumpPtrAllocator &Allocator);

  static std::unique_ptr<MappedBlockStream>
  createIndexedStream(const MSFLayout &Layout, BinaryStreamRef MsfData,
                      uint32_t StreamIndex, BumpPtrAllocator &Allocator);

  static std::unique_ptr<MappedBlockStream>
  createFpmStream(const MSFLayout &Layout, BinaryStreamRef MsfData,
                  BumpPtrAllocator &Allocator);

  static std::unique_ptr<MappedBlockStream>
  createDirectoryStream(const MSFLayout &Layout, BinaryStreamRef MsfData,
                        BumpPtrAllocator &Allocator);

  support::endianness getEndian() const override {
    return support::little;
  }

  Error readBytes(uint64_t Offset, uint64_t Size,
                  ArrayRef<uint8_t> &Buffer) override;
  Error readLongestContiguousChunk(uint64_t Offset,
                                   ArrayRef<uint8_t> &Buffer) override;

  uint64_t getLength() override;

  BumpPtrAllocator &getAllocator() { return Allocator; }

  void invalidateCache();

  uint32_t getBlockSize() const { return BlockSize; }
  uint32_t getNumBlocks() const { return StreamLayout.Blocks.size(); }
  uint32_t getStreamLength() const { return StreamLayout.Length; }

protected:
  MappedBlockStream(uint32_t BlockSize, const MSFStreamLayout &StreamLayout,
                    BinaryStreamRef MsfData, BumpPtrAllocator &Allocator);

private:
  const MSFStreamLayout &getStreamLayout() const { return StreamLayout; }
  void fixCacheAfterWrite(uint64_t Offset, ArrayRef<uint8_t> Data) const;

  Error readBytes(uint64_t Offset, MutableArrayRef<uint8_t> Buffer);
  bool tryReadContiguously(uint64_t Offset, uint64_t Size,
                           ArrayRef<uint8_t> &Buffer);

  const uint32_t BlockSize;
  const MSFStreamLayout StreamLayout;
  BinaryStreamRef MsfData;

  using CacheEntry = MutableArrayRef<uint8_t>;

  // We just store the allocator by reference.  We use this to allocate
  // contiguous memory for things like arrays or strings that cross a block
  // boundary, and this memory is expected to outlive the stream.  For example,
  // someone could create a stream, read some stuff, then close the stream, and
  // we would like outstanding references to fields to remain valid since the
  // entire file is mapped anyway.  Because of that, the user must supply the
  // allocator to allocate broken records from.
  BumpPtrAllocator &Allocator;
  DenseMap<uint32_t, std::vector<CacheEntry>> CacheMap;
};

class WritableMappedBlockStream : public WritableBinaryStream {
public:
  static std::unique_ptr<WritableMappedBlockStream>
  createStream(uint32_t BlockSize, const MSFStreamLayout &Layout,
               WritableBinaryStreamRef MsfData, BumpPtrAllocator &Allocator);

  static std::unique_ptr<WritableMappedBlockStream>
  createIndexedStream(const MSFLayout &Layout, WritableBinaryStreamRef MsfData,
                      uint32_t StreamIndex, BumpPtrAllocator &Allocator);

  static std::unique_ptr<WritableMappedBlockStream>
  createDirectoryStream(const MSFLayout &Layout,
                        WritableBinaryStreamRef MsfData,
                        BumpPtrAllocator &Allocator);

  static std::unique_ptr<WritableMappedBlockStream>
  createFpmStream(const MSFLayout &Layout, WritableBinaryStreamRef MsfData,
                  BumpPtrAllocator &Allocator, bool AltFpm = false);

  support::endianness getEndian() const override {
    return support::little;
  }

  Error readBytes(uint64_t Offset, uint64_t Size,
                  ArrayRef<uint8_t> &Buffer) override;
  Error readLongestContiguousChunk(uint64_t Offset,
                                   ArrayRef<uint8_t> &Buffer) override;
  uint64_t getLength() override;

  Error writeBytes(uint64_t Offset, ArrayRef<uint8_t> Buffer) override;

  Error commit() override;

  const MSFStreamLayout &getStreamLayout() const {
    return ReadInterface.getStreamLayout();
  }

  uint32_t getBlockSize() const { return ReadInterface.getBlockSize(); }
  uint32_t getNumBlocks() const { return ReadInterface.getNumBlocks(); }
  uint32_t getStreamLength() const { return ReadInterface.getStreamLength(); }

protected:
  WritableMappedBlockStream(uint32_t BlockSize,
                            const MSFStreamLayout &StreamLayout,
                            WritableBinaryStreamRef MsfData,
                            BumpPtrAllocator &Allocator);

private:
  MappedBlockStream ReadInterface;
  WritableBinaryStreamRef WriteInterface;
};

} // namespace msf
} // end namespace llvm

#endif // LLVM_DEBUGINFO_MSF_MAPPEDBLOCKSTREAM_H
PKhwFZ�/F`{/{/DebugInfo/GSYM/GsymReader.hnu�[���//===- GsymReader.h ---------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_GSYM_GSYMREADER_H
#define LLVM_DEBUGINFO_GSYM_GSYMREADER_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/DebugInfo/GSYM/FileEntry.h"
#include "llvm/DebugInfo/GSYM/FunctionInfo.h"
#include "llvm/DebugInfo/GSYM/Header.h"
#include "llvm/DebugInfo/GSYM/LineEntry.h"
#include "llvm/DebugInfo/GSYM/StringTable.h"
#include "llvm/Support/DataExtractor.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/ErrorOr.h"
#include <inttypes.h>
#include <memory>
#include <stdint.h>
#include <vector>

namespace llvm {
class MemoryBuffer;
class raw_ostream;

namespace gsym {

/// GsymReader is used to read GSYM data from a file or buffer.
///
/// This class is optimized for very quick lookups when the endianness matches
/// the host system. The Header, address table, address info offsets, and file
/// table is designed to be mmap'ed as read only into memory and used without
/// any parsing needed. If the endianness doesn't match, we swap these objects
/// and tables into GsymReader::SwappedData and then point our header and
/// ArrayRefs to this swapped internal data.
///
/// GsymReader objects must use one of the static functions to create an
/// instance: GsymReader::openFile(...) and GsymReader::copyBuffer(...).

class GsymReader {
  GsymReader(std::unique_ptr<MemoryBuffer> Buffer);
  llvm::Error parse();

  std::unique_ptr<MemoryBuffer> MemBuffer;
  StringRef GsymBytes;
  llvm::support::endianness Endian;
  const Header *Hdr = nullptr;
  ArrayRef<uint8_t> AddrOffsets;
  ArrayRef<uint32_t> AddrInfoOffsets;
  ArrayRef<FileEntry> Files;
  StringTable StrTab;
  /// When the GSYM file's endianness doesn't match the host system then
  /// we must decode all data structures that need to be swapped into
  /// local storage and set point the ArrayRef objects above to these swapped
  /// copies.
  struct SwappedData {
    Header Hdr;
    std::vector<uint8_t> AddrOffsets;
    std::vector<uint32_t> AddrInfoOffsets;
    std::vector<FileEntry> Files;
  };
  std::unique_ptr<SwappedData> Swap;

public:
  GsymReader(GsymReader &&RHS);
  ~GsymReader();

  /// Construct a GsymReader from a file on disk.
  ///
  /// \param Path The file path the GSYM file to read.
  /// \returns An expected GsymReader that contains the object or an error
  /// object that indicates reason for failing to read the GSYM.
  static llvm::Expected<GsymReader> openFile(StringRef Path);

  /// Construct a GsymReader from a buffer.
  ///
  /// \param Bytes A set of bytes that will be copied and owned by the
  /// returned object on success.
  /// \returns An expected GsymReader that contains the object or an error
  /// object that indicates reason for failing to read the GSYM.
  static llvm::Expected<GsymReader> copyBuffer(StringRef Bytes);

  /// Access the GSYM header.
  /// \returns A native endian version of the GSYM header.
  const Header &getHeader() const;

  /// Get the full function info for an address.
  ///
  /// This should be called when a client will store a copy of the complete
  /// FunctionInfo for a given address. For one off lookups, use the lookup()
  /// function below.
  ///
  /// Symbolication server processes might want to parse the entire function
  /// info for a given address and cache it if the process stays around to
  /// service many symbolication addresses, like for parsing profiling
  /// information.
  ///
  /// \param Addr A virtual address from the orignal object file to lookup.
  ///
  /// \returns An expected FunctionInfo that contains the function info object
  /// or an error object that indicates reason for failing to lookup the
  /// address.
  llvm::Expected<FunctionInfo> getFunctionInfo(uint64_t Addr) const;

  /// Lookup an address in the a GSYM.
  ///
  /// Lookup just the information needed for a specific address \a Addr. This
  /// function is faster that calling getFunctionInfo() as it will only return
  /// information that pertains to \a Addr and allows the parsing to skip any
  /// extra information encoded for other addresses. For example the line table
  /// parsing can stop when a matching LineEntry has been fouhnd, and the
  /// InlineInfo can stop parsing early once a match has been found and also
  /// skip information that doesn't match. This avoids memory allocations and
  /// is much faster for lookups.
  ///
  /// \param Addr A virtual address from the orignal object file to lookup.
  /// \returns An expected LookupResult that contains only the information
  /// needed for the current address, or an error object that indicates reason
  /// for failing to lookup the address.
  llvm::Expected<LookupResult> lookup(uint64_t Addr) const;

  /// Get a string from the string table.
  ///
  /// \param Offset The string table offset for the string to retrieve.
  /// \returns The string from the strin table.
  StringRef getString(uint32_t Offset) const { return StrTab[Offset]; }

  /// Get the a file entry for the suppplied file index.
  ///
  /// Used to convert any file indexes in the FunctionInfo data back into
  /// files. This function can be used for iteration, but is more commonly used
  /// for random access when doing lookups.
  ///
  /// \param Index An index into the file table.
  /// \returns An optional FileInfo that will be valid if the file index is
  /// valid, or std::nullopt if the file index is out of bounds,
  std::optional<FileEntry> getFile(uint32_t Index) const {
    if (Index < Files.size())
      return Files[Index];
    return std::nullopt;
  }

  /// Dump the entire Gsym data contained in this object.
  ///
  /// \param  OS The output stream to dump to.
  void dump(raw_ostream &OS);

  /// Dump a FunctionInfo object.
  ///
  /// This function will convert any string table indexes and file indexes
  /// into human readable format.
  ///
  /// \param  OS The output stream to dump to.
  ///
  /// \param FI The object to dump.
  void dump(raw_ostream &OS, const FunctionInfo &FI);

  /// Dump a LineTable object.
  ///
  /// This function will convert any string table indexes and file indexes
  /// into human readable format.
  ///
  ///
  /// \param  OS The output stream to dump to.
  ///
  /// \param LT The object to dump.
  void dump(raw_ostream &OS, const LineTable &LT);

  /// Dump a InlineInfo object.
  ///
  /// This function will convert any string table indexes and file indexes
  /// into human readable format.
  ///
  /// \param  OS The output stream to dump to.
  ///
  /// \param II The object to dump.
  ///
  /// \param Indent The indentation as number of spaces. Used for recurive
  /// dumping.
  void dump(raw_ostream &OS, const InlineInfo &II, uint32_t Indent = 0);

  /// Dump a FileEntry object.
  ///
  /// This function will convert any string table indexes into human readable
  /// format.
  ///
  /// \param  OS The output stream to dump to.
  ///
  /// \param FE The object to dump.
  void dump(raw_ostream &OS, std::optional<FileEntry> FE);

  /// Get the number of addresses in this Gsym file.
  uint32_t getNumAddresses() const {
    return Hdr->NumAddresses;
  }

  /// Gets an address from the address table.
  ///
  /// Addresses are stored as offsets frrom the gsym::Header::BaseAddress.
  ///
  /// \param Index A index into the address table.
  /// \returns A resolved virtual address for adddress in the address table
  /// or std::nullopt if Index is out of bounds.
  std::optional<uint64_t> getAddress(size_t Index) const;

protected:

  /// Get an appropriate address info offsets array.
  ///
  /// The address table in the GSYM file is stored as array of 1, 2, 4 or 8
  /// byte offsets from the The gsym::Header::BaseAddress. The table is stored
  /// internally as a array of bytes that are in the correct endianness. When
  /// we access this table we must get an array that matches those sizes. This
  /// templatized helper function is used when accessing address offsets in the
  /// AddrOffsets member variable.
  ///
  /// \returns An ArrayRef of an appropriate address offset size.
  template <class T> ArrayRef<T>
  getAddrOffsets() const {
    return ArrayRef<T>(reinterpret_cast<const T *>(AddrOffsets.data()),
                       AddrOffsets.size()/sizeof(T));
  }

  /// Get an appropriate address from the address table.
  ///
  /// The address table in the GSYM file is stored as array of 1, 2, 4 or 8
  /// byte address offsets from the The gsym::Header::BaseAddress. The table is
  /// stored internally as a array of bytes that are in the correct endianness.
  /// In order to extract an address from the address table we must access the
  /// address offset using the correct size and then add it to the BaseAddress
  /// in the header.
  ///
  /// \param Index An index into the AddrOffsets array.
  /// \returns An virtual address that matches the original object file for the
  /// address as the specified index, or std::nullopt if Index is out of bounds.
  template <class T>
  std::optional<uint64_t> addressForIndex(size_t Index) const {
    ArrayRef<T> AIO = getAddrOffsets<T>();
    if (Index < AIO.size())
      return AIO[Index] + Hdr->BaseAddress;
    return std::nullopt;
  }
  /// Lookup an address offset in the AddrOffsets table.
  ///
  /// Given an address offset, look it up using a binary search of the
  /// AddrOffsets table.
  ///
  /// \param AddrOffset An address offset, that has already been computed by
  /// subtracting the gsym::Header::BaseAddress.
  /// \returns The matching address offset index. This index will be used to
  /// extract the FunctionInfo data's offset from the AddrInfoOffsets array.
  template <class T>
  std::optional<uint64_t>
  getAddressOffsetIndex(const uint64_t AddrOffset) const {
    ArrayRef<T> AIO = getAddrOffsets<T>();
    const auto Begin = AIO.begin();
    const auto End = AIO.end();
    auto Iter = std::lower_bound(Begin, End, AddrOffset);
    // Watch for addresses that fall between the gsym::Header::BaseAddress and
    // the first address offset.
    if (Iter == Begin && AddrOffset < *Begin)
      return std::nullopt;
    if (Iter == End || AddrOffset < *Iter)
      --Iter;
    return std::distance(Begin, Iter);
  }

  /// Create a GSYM from a memory buffer.
  ///
  /// Called by both openFile() and copyBuffer(), this function does all of the
  /// work of parsing the GSYM file and returning an error.
  ///
  /// \param MemBuffer A memory buffer that will transfer ownership into the
  /// GsymReader.
  /// \returns An expected GsymReader that contains the object or an error
  /// object that indicates reason for failing to read the GSYM.
  static llvm::Expected<llvm::gsym::GsymReader>
  create(std::unique_ptr<MemoryBuffer> &MemBuffer);


  /// Given an address, find the address index.
  ///
  /// Binary search the address table and find the matching address index.
  ///
  /// \param Addr A virtual address that matches the original object file
  /// to lookup.
  /// \returns An index into the address table. This index can be used to
  /// extract the FunctionInfo data's offset from the AddrInfoOffsets array.
  /// Returns an error if the address isn't in the GSYM with details of why.
  Expected<uint64_t> getAddressIndex(const uint64_t Addr) const;

  /// Given an address index, get the offset for the FunctionInfo.
  ///
  /// Looking up an address is done by finding the corresponding address
  /// index for the address. This index is then used to get the offset of the
  /// FunctionInfo data that we will decode using this function.
  ///
  /// \param Index An index into the address table.
  /// \returns An optional GSYM data offset for the offset of the FunctionInfo
  /// that needs to be decoded.
  std::optional<uint64_t> getAddressInfoOffset(size_t Index) const;
};

} // namespace gsym
} // namespace llvm

#endif // LLVM_DEBUGINFO_GSYM_GSYMREADER_H
PKhwFZ.F(�''DebugInfo/GSYM/Header.hnu�[���//===- Header.h -------------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_GSYM_HEADER_H
#define LLVM_DEBUGINFO_GSYM_HEADER_H

#include "llvm/Support/Error.h"

#include <cstddef>
#include <cstdint>

namespace llvm {
class raw_ostream;
class DataExtractor;

namespace gsym {
class FileWriter;

constexpr uint32_t GSYM_MAGIC = 0x4753594d; // 'GSYM'
constexpr uint32_t GSYM_CIGAM = 0x4d595347; // 'MYSG'
constexpr uint32_t GSYM_VERSION = 1;
constexpr size_t GSYM_MAX_UUID_SIZE = 20;

/// The GSYM header.
///
/// The GSYM header is found at the start of a stand alone GSYM file, or as
/// the first bytes in a section when GSYM is contained in a section of an
/// executable file (ELF, mach-o, COFF).
///
/// The structure is encoded exactly as it appears in the structure definition
/// with no gaps between members. Alignment should not change from system to
/// system as the members were laid out so that they shouldn't align
/// differently on different architectures.
///
/// When endianness of the system loading a GSYM file matches, the file can
/// be mmap'ed in and a pointer to the header can be cast to the first bytes
/// of the file (stand alone GSYM file) or section data (GSYM in a section).
/// When endianness is swapped, the Header::decode() function should be used to
/// decode the header.
struct Header {
  /// The magic bytes should be set to GSYM_MAGIC. This helps detect if a file
  /// is a GSYM file by scanning the first 4 bytes of a file or section.
  /// This value might appear byte swapped
  uint32_t Magic;
  /// The version can number determines how the header is decoded and how each
  /// InfoType in FunctionInfo is encoded/decoded. As version numbers increase,
  /// "Magic" and "Version" members should always appear at offset zero and 4
  /// respectively to ensure clients figure out if they can parse the format.
  uint16_t Version;
  /// The size in bytes of each address offset in the address offsets table.
  uint8_t AddrOffSize;
  /// The size in bytes of the UUID encoded in the "UUID" member.
  uint8_t UUIDSize;
  /// The 64 bit base address that all address offsets in the address offsets
  /// table are relative to. Storing a full 64 bit address allows our address
  /// offsets table to be smaller on disk.
  uint64_t BaseAddress;
  /// The number of addresses stored in the address offsets table.
  uint32_t NumAddresses;
  /// The file relative offset of the start of the string table for strings
  /// contained in the GSYM file. If the GSYM in contained in a stand alone
  /// file this will be the file offset of the start of the string table. If
  /// the GSYM is contained in a section within an executable file, this can
  /// be the offset of the first string used in the GSYM file and can possibly
  /// span one or more executable string tables. This allows the strings to
  /// share string tables in an ELF or mach-o file.
  uint32_t StrtabOffset;
  /// The size in bytes of the string table. For a stand alone GSYM file, this
  /// will be the exact size in bytes of the string table. When the GSYM data
  /// is in a section within an executable file, this size can span one or more
  /// sections that contains strings. This allows any strings that are already
  /// stored in the executable file to be re-used, and any extra strings could
  /// be added to another string table and the string table offset and size
  /// can be set to span all needed string tables.
  uint32_t StrtabSize;
  /// The UUID of the original executable file. This is stored to allow
  /// matching a GSYM file to an executable file when symbolication is
  /// required. Only the first "UUIDSize" bytes of the UUID are valid. Any
  /// bytes in the UUID value that appear after the first UUIDSize bytes should
  /// be set to zero.
  uint8_t UUID[GSYM_MAX_UUID_SIZE];

  /// Check if a header is valid and return an error if anything is wrong.
  ///
  /// This function can be used prior to encoding a header to ensure it is
  /// valid, or after decoding a header to ensure it is valid and supported.
  ///
  /// Check a correctly byte swapped header for errors:
  ///   - check magic value
  ///   - check that version number is supported
  ///   - check that the address offset size is supported
  ///   - check that the UUID size is valid
  ///
  /// \returns An error if anything is wrong in the header, or Error::success()
  /// if there are no errors.
  llvm::Error checkForError() const;

  /// Decode an object from a binary data stream.
  ///
  /// \param Data The binary stream to read the data from. This object must
  /// have the data for the object starting at offset zero. The data
  /// can contain more data than needed.
  ///
  /// \returns A Header or an error describing the issue that was
  /// encountered during decoding.
  static llvm::Expected<Header> decode(DataExtractor &Data);

  /// Encode this object into FileWriter stream.
  ///
  /// \param O The binary stream to write the data to at the current file
  /// position.
  ///
  /// \returns An error object that indicates success or failure of the
  /// encoding process.
  llvm::Error encode(FileWriter &O) const;
};

bool operator==(const Header &LHS, const Header &RHS);
raw_ostream &operator<<(raw_ostream &OS, const llvm::gsym::Header &H);

} // namespace gsym
} // namespace llvm

#endif // LLVM_DEBUGINFO_GSYM_HEADER_H
PKhwFZ2
8)4R4RDebugInfo/GSYM/GsymCreator.hnu�[���//===- GsymCreator.h --------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_GSYM_GSYMCREATOR_H
#define LLVM_DEBUGINFO_GSYM_GSYMCREATOR_H

#include <functional>
#include <memory>
#include <mutex>
#include <thread>

#include "llvm/ADT/AddressRanges.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/DebugInfo/GSYM/FileEntry.h"
#include "llvm/DebugInfo/GSYM/FunctionInfo.h"
#include "llvm/MC/StringTableBuilder.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/Path.h"

namespace llvm {

namespace gsym {
class FileWriter;

/// GsymCreator is used to emit GSYM data to a stand alone file or section
/// within a file.
///
/// The GsymCreator is designed to be used in 3 stages:
/// - Create FunctionInfo objects and add them
/// - Finalize the GsymCreator object
/// - Save to file or section
///
/// The first stage involves creating FunctionInfo objects from another source
/// of information like compiler debug info metadata, DWARF or Breakpad files.
/// Any strings in the FunctionInfo or contained information, like InlineInfo
/// or LineTable objects, should get the string table offsets by calling
/// GsymCreator::insertString(...). Any file indexes that are needed should be
/// obtained by calling GsymCreator::insertFile(...). All of the function calls
/// in GsymCreator are thread safe. This allows multiple threads to create and
/// add FunctionInfo objects while parsing debug information.
///
/// Once all of the FunctionInfo objects have been added, the
/// GsymCreator::finalize(...) must be called prior to saving. This function
/// will sort the FunctionInfo objects, finalize the string table, and do any
/// other passes on the information needed to prepare the information to be
/// saved.
///
/// Once the object has been finalized, it can be saved to a file or section.
///
/// ENCODING
///
/// GSYM files are designed to be memory mapped into a process as shared, read
/// only data, and used as is.
///
/// The GSYM file format when in a stand alone file consists of:
///   - Header
///   - Address Table
///   - Function Info Offsets
///   - File Table
///   - String Table
///   - Function Info Data
///
/// HEADER
///
/// The header is fully described in "llvm/DebugInfo/GSYM/Header.h".
///
/// ADDRESS TABLE
///
/// The address table immediately follows the header in the file and consists
/// of Header.NumAddresses address offsets. These offsets are sorted and can be
/// binary searched for efficient lookups. Addresses in the address table are
/// stored as offsets from a 64 bit base address found in Header.BaseAddress.
/// This allows the address table to contain 8, 16, or 32 offsets. This allows
/// the address table to not require full 64 bit addresses for each address.
/// The resulting GSYM size is smaller and causes fewer pages to be touched
/// during address lookups when the address table is smaller. The size of the
/// address offsets in the address table is specified in the header in
/// Header.AddrOffSize. The first offset in the address table is aligned to
/// Header.AddrOffSize alignment to ensure efficient access when loaded into
/// memory.
///
/// FUNCTION INFO OFFSETS TABLE
///
/// The function info offsets table immediately follows the address table and
/// consists of Header.NumAddresses 32 bit file offsets: one for each address
/// in the address table. This data is aligned to a 4 byte boundary. The
/// offsets in this table are the relative offsets from the start offset of the
/// GSYM header and point to the function info data for each address in the
/// address table. Keeping this data separate from the address table helps to
/// reduce the number of pages that are touched when address lookups occur on a
/// GSYM file.
///
/// FILE TABLE
///
/// The file table immediately follows the function info offsets table. The
/// encoding of the FileTable is:
///
/// struct FileTable {
///   uint32_t Count;
///   FileEntry Files[];
/// };
///
/// The file table starts with a 32 bit count of the number of files that are
/// used in all of the function info, followed by that number of FileEntry
/// structures. The file table is aligned to a 4 byte boundary, Each file in
/// the file table is represented with a FileEntry structure.
/// See "llvm/DebugInfo/GSYM/FileEntry.h" for details.
///
/// STRING TABLE
///
/// The string table follows the file table in stand alone GSYM files and
/// contains all strings for everything contained in the GSYM file. Any string
/// data should be added to the string table and any references to strings
/// inside GSYM information must be stored as 32 bit string table offsets into
/// this string table. The string table always starts with an empty string at
/// offset zero and is followed by any strings needed by the GSYM information.
/// The start of the string table is not aligned to any boundary.
///
/// FUNCTION INFO DATA
///
/// The function info data is the payload that contains information about the
/// address that is being looked up. It contains all of the encoded
/// FunctionInfo objects. Each encoded FunctionInfo's data is pointed to by an
/// entry in the Function Info Offsets Table. For details on the exact encoding
/// of FunctionInfo objects, see "llvm/DebugInfo/GSYM/FunctionInfo.h".
class GsymCreator {
  // Private member variables require Mutex protections
  mutable std::mutex Mutex;
  std::vector<FunctionInfo> Funcs;
  StringTableBuilder StrTab;
  StringSet<> StringStorage;
  DenseMap<llvm::gsym::FileEntry, uint32_t> FileEntryToIndex;
  // Needed for mapping string offsets back to the string stored in \a StrTab.
  DenseMap<uint64_t, CachedHashStringRef> StringOffsetMap;
  std::vector<llvm::gsym::FileEntry> Files;
  std::vector<uint8_t> UUID;
  std::optional<AddressRanges> ValidTextRanges;
  AddressRanges Ranges;
  std::optional<uint64_t> BaseAddress;
  bool Finalized = false;
  bool Quiet;


  /// Get the first function start address.
  ///
  /// \returns The start address of the first FunctionInfo or std::nullopt if
  /// there are no function infos.
  std::optional<uint64_t> getFirstFunctionAddress() const;

  /// Get the last function address.
  ///
  /// \returns The start address of the last FunctionInfo or std::nullopt if
  /// there are no function infos.
  std::optional<uint64_t> getLastFunctionAddress() const;

  /// Get the base address to use for this GSYM file.
  ///
  /// \returns The base address to put into the header and to use when creating
  ///          the address offset table or std::nullpt if there are no valid
  ///          function infos or if the base address wasn't specified.
  std::optional<uint64_t> getBaseAddress() const;

  /// Get the size of an address offset in the address offset table.
  ///
  /// GSYM files store offsets from the base address in the address offset table
  /// and we store the size of the address offsets in the GSYM header. This
  /// function will calculate the size in bytes of these address offsets based
  /// on the current contents of the GSYM file.
  ///
  /// \returns The size in byets of the address offsets.
  uint8_t getAddressOffsetSize() const;

  /// Get the maximum address offset for the current address offset size.
  ///
  /// This is used when creating the address offset table to ensure we have
  /// values that are in range so we don't end up truncating address offsets
  /// when creating GSYM files as the code evolves.
  ///
  /// \returns The maximum address offset value that will be encoded into a GSYM
  /// file.
  uint64_t getMaxAddressOffset() const;

  /// Calculate the byte size of the GSYM header and tables sizes.
  ///
  /// This function will calculate the exact size in bytes of the encocded GSYM
  /// for the following items:
  /// - The GSYM header
  /// - The Address offset table
  /// - The Address info offset table
  /// - The file table
  /// - The string table
  ///
  /// This is used to help split GSYM files into segments.
  ///
  /// \returns Size in bytes the GSYM header and tables.
  uint64_t calculateHeaderAndTableSize() const;

  /// Copy a FunctionInfo from the \a SrcGC GSYM creator into this creator.
  ///
  /// Copy the function info and only the needed files and strings and add a
  /// converted FunctionInfo into this object. This is used to segment GSYM
  /// files into separate files while only transferring the files and strings
  /// that are needed from \a SrcGC.
  ///
  /// \param SrcGC The source gsym creator to copy from.
  /// \param FuncInfoIdx The function info index within \a SrcGC to copy.
  /// \returns The number of bytes it will take to encode the function info in
  /// this GsymCreator. This helps calculate the size of the current GSYM
  /// segment file.
  uint64_t copyFunctionInfo(const GsymCreator &SrcGC, size_t FuncInfoIdx);

  /// Copy a string from \a SrcGC into this object.
  ///
  /// Copy a string from \a SrcGC by string table offset into this GSYM creator.
  /// If a string has already been copied, the uniqued string table offset will
  /// be returned, otherwise the string will be copied and a unique offset will
  /// be returned.
  ///
  /// \param SrcGC The source gsym creator to copy from.
  /// \param StrOff The string table offset from \a SrcGC to copy.
  /// \returns The new string table offset of the string within this object.
  uint32_t copyString(const GsymCreator &SrcGC, uint32_t StrOff);

  /// Copy a file from \a SrcGC into this object.
  ///
  /// Copy a file from \a SrcGC by file index into this GSYM creator. Files
  /// consist of two string table entries, one for the directory and one for the
  /// filename, this function will copy any needed strings ensure the file is
  /// uniqued within this object. If a file already exists in this GSYM creator
  /// the uniqued index will be returned, else the stirngs will be copied and
  /// the new file index will be returned.
  ///
  /// \param SrcGC The source gsym creator to copy from.
  /// \param FileIdx The 1 based file table index within \a SrcGC to copy. A
  /// file index of zero will always return zero as the zero is a reserved file
  /// index that means no file.
  /// \returns The new file index of the file within this object.
  uint32_t copyFile(const GsymCreator &SrcGC, uint32_t FileIdx);

  /// Inserts a FileEntry into the file table.
  ///
  /// This is used to insert a file entry in a thread safe way into this object.
  ///
  /// \param FE A file entry object that contains valid string table offsets
  /// from this object already.
  uint32_t insertFileEntry(FileEntry FE);

  /// Fixup any string and file references by updating any file indexes and
  /// strings offsets in the InlineInfo parameter.
  ///
  /// When copying InlineInfo entries, we can simply make a copy of the object
  /// and then fixup the files and strings for efficiency.
  ///
  /// \param SrcGC The source gsym creator to copy from.
  /// \param II The inline info that contains file indexes and string offsets
  /// that come from \a SrcGC. The entries will be updated by coping any files
  /// and strings over into this object.
  void fixupInlineInfo(const GsymCreator &SrcGC, InlineInfo &II);

  /// Save this GSYM file into segments that are roughly \a SegmentSize in size.
  ///
  /// When segemented GSYM files are saved to disk, they will use \a Path as a
  /// prefix and then have the first function info address appended to the path
  /// when each segment is saved. Each segmented GSYM file has a only the
  /// strings and files that are needed to save the function infos that are in
  /// each segment. These smaller files are easy to compress and download
  /// separately and allow for efficient lookups with very large GSYM files and
  /// segmenting them allows servers to download only the segments that are
  /// needed.
  ///
  /// \param Path The path prefix to use when saving the GSYM files.
  /// \param ByteOrder The endianness to use when saving the file.
  /// \param SegmentSize The size in bytes to segment the GSYM file into.
  llvm::Error saveSegments(StringRef Path,
                           llvm::support::endianness ByteOrder,
                           uint64_t SegmentSize) const;

public:
  GsymCreator(bool Quiet = false);

  /// Save a GSYM file to a stand alone file.
  ///
  /// \param Path The file path to save the GSYM file to.
  /// \param ByteOrder The endianness to use when saving the file.
  /// \param SegmentSize The size in bytes to segment the GSYM file into. If
  ///                    this option is set this function will create N segments
  ///                    that are all around \a SegmentSize bytes in size. This
  ///                    allows a very large GSYM file to be broken up into
  ///                    shards. Each GSYM file will have its own file table,
  ///                    and string table that only have the files and strings
  ///                    needed for the shared. If this argument has no value,
  ///                    a single GSYM file that contains all function
  ///                    information will be created.
  /// \returns An error object that indicates success or failure of the save.
  llvm::Error save(StringRef Path, llvm::support::endianness ByteOrder,
                   std::optional<uint64_t> SegmentSize = std::nullopt) const;

  /// Encode a GSYM into the file writer stream at the current position.
  ///
  /// \param O The stream to save the binary data to
  /// \returns An error object that indicates success or failure of the save.
  llvm::Error encode(FileWriter &O) const;

  /// Insert a string into the GSYM string table.
  ///
  /// All strings used by GSYM files must be uniqued by adding them to this
  /// string pool and using the returned offset for any string values.
  ///
  /// \param S The string to insert into the string table.
  /// \param Copy If true, then make a backing copy of the string. If false,
  ///             the string is owned by another object that will stay around
  ///             long enough for the GsymCreator to save the GSYM file.
  /// \returns The unique 32 bit offset into the string table.
  uint32_t insertString(StringRef S, bool Copy = true);

  /// Insert a file into this GSYM creator.
  ///
  /// Inserts a file by adding a FileEntry into the "Files" member variable if
  /// the file has not already been added. The file path is split into
  /// directory and filename which are both added to the string table. This
  /// allows paths to be stored efficiently by reusing the directories that are
  /// common between multiple files.
  ///
  /// \param   Path The path to the file to insert.
  /// \param   Style The path style for the "Path" parameter.
  /// \returns The unique file index for the inserted file.
  uint32_t insertFile(StringRef Path,
                      sys::path::Style Style = sys::path::Style::native);

  /// Add a function info to this GSYM creator.
  ///
  /// All information in the FunctionInfo object must use the
  /// GsymCreator::insertString(...) function when creating string table
  /// offsets for names and other strings.
  ///
  /// \param   FI The function info object to emplace into our functions list.
  void addFunctionInfo(FunctionInfo &&FI);

  /// Finalize the data in the GSYM creator prior to saving the data out.
  ///
  /// Finalize must be called after all FunctionInfo objects have been added
  /// and before GsymCreator::save() is called.
  ///
  /// \param  OS Output stream to report duplicate function infos, overlapping
  ///         function infos, and function infos that were merged or removed.
  /// \returns An error object that indicates success or failure of the
  ///          finalize.
  llvm::Error finalize(llvm::raw_ostream &OS);

  /// Set the UUID value.
  ///
  /// \param UUIDBytes The new UUID bytes.
  void setUUID(llvm::ArrayRef<uint8_t> UUIDBytes) {
    UUID.assign(UUIDBytes.begin(), UUIDBytes.end());
  }

  /// Thread safe iteration over all function infos.
  ///
  /// \param  Callback A callback function that will get called with each
  ///         FunctionInfo. If the callback returns false, stop iterating.
  void forEachFunctionInfo(
      std::function<bool(FunctionInfo &)> const &Callback);

  /// Thread safe const iteration over all function infos.
  ///
  /// \param  Callback A callback function that will get called with each
  ///         FunctionInfo. If the callback returns false, stop iterating.
  void forEachFunctionInfo(
      std::function<bool(const FunctionInfo &)> const &Callback) const;

  /// Get the current number of FunctionInfo objects contained in this
  /// object.
  size_t getNumFunctionInfos() const;

  /// Check if an address has already been added as a function info.
  ///
  /// FunctionInfo data can come from many sources: debug info, symbol tables,
  /// exception information, and more. Symbol tables should be added after
  /// debug info and can use this function to see if a symbol's start address
  /// has already been added to the GsymReader. Calling this before adding
  /// a function info from a source other than debug info avoids clients adding
  /// many redundant FunctionInfo objects from many sources only for them to be
  /// removed during the finalize() call.
  bool hasFunctionInfoForAddress(uint64_t Addr) const;

  /// Set valid .text address ranges that all functions must be contained in.
  void SetValidTextRanges(AddressRanges &TextRanges) {
    ValidTextRanges = TextRanges;
  }

  /// Get the valid text ranges.
  const std::optional<AddressRanges> GetValidTextRanges() const {
    return ValidTextRanges;
  }

  /// Check if an address is a valid code address.
  ///
  /// Any functions whose addresses do not exist within these function bounds
  /// will not be converted into the final GSYM. This allows the object file
  /// to figure out the valid file address ranges of all the code sections
  /// and ensure we don't add invalid functions to the final output. Many
  /// linkers have issues when dead stripping functions from DWARF debug info
  /// where they set the DW_AT_low_pc to zero, but newer DWARF has the
  /// DW_AT_high_pc as an offset from the DW_AT_low_pc and these size
  /// attributes have no relocations that can be applied. This results in DWARF
  /// where many functions have an DW_AT_low_pc of zero and a valid offset size
  /// for DW_AT_high_pc. If we extract all valid ranges from an object file
  /// that are marked with executable permissions, we can properly ensure that
  /// these functions are removed.
  ///
  /// \param Addr An address to check.
  ///
  /// \returns True if the address is in the valid text ranges or if no valid
  ///          text ranges have been set, false otherwise.
  bool IsValidTextAddress(uint64_t Addr) const;

  /// Set the base address to use for the GSYM file.
  ///
  /// Setting the base address to use for the GSYM file. Object files typically
  /// get loaded from a base address when the OS loads them into memory. Using
  /// GSYM files for symbolication becomes easier if the base address in the
  /// GSYM header is the same address as it allows addresses to be easily slid
  /// and allows symbolication without needing to find the original base
  /// address in the original object file.
  ///
  /// \param  Addr The address to use as the base address of the GSYM file
  ///              when it is saved to disk.
  void setBaseAddress(uint64_t Addr) {
    BaseAddress = Addr;
  }

  /// Whether the transformation should be quiet, i.e. not output warnings.
  bool isQuiet() const { return Quiet; }


  /// Create a segmented GSYM creator starting with function info index
  /// \a FuncIdx.
  ///
  /// This function will create a GsymCreator object that will encode into
  /// roughly \a SegmentSize bytes and return it. It is used by the private
  /// saveSegments(...) function and also is used by the GSYM unit tests to test
  /// segmenting of GSYM files. The returned GsymCreator can be finalized and
  /// encoded.
  ///
  /// \param [in] SegmentSize The size in bytes to roughly segment the GSYM file
  /// into.
  /// \param [in,out] FuncIdx The index of the first function info to encode
  /// into the returned GsymCreator. This index will be updated so it can be
  /// used in subsequent calls to this function to allow more segments to be
  /// created.
  /// \returns An expected unique pointer to a GsymCreator or an error. The
  /// returned unique pointer can be NULL if there are no more functions to
  /// encode.
  llvm::Expected<std::unique_ptr<GsymCreator>>
  createSegment(uint64_t SegmentSize, size_t &FuncIdx) const;
};

} // namespace gsym
} // namespace llvm

#endif // LLVM_DEBUGINFO_GSYM_GSYMCREATOR_H
PKhwFZP:{�
�
DebugInfo/GSYM/LookupResult.hnu�[���//===- LookupResult.h -------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_GSYM_LOOKUPRESULT_H
#define LLVM_DEBUGINFO_GSYM_LOOKUPRESULT_H

#include "llvm/ADT/AddressRanges.h"
#include "llvm/ADT/StringRef.h"
#include <inttypes.h>
#include <vector>

namespace llvm {
class raw_ostream;
namespace gsym {

struct SourceLocation {
  StringRef Name;      ///< Function or symbol name.
  StringRef Dir;       ///< Line entry source file directory path.
  StringRef Base;      ///< Line entry source file basename.
  uint32_t Line = 0;   ///< Source file line number.
  uint32_t Offset = 0; ///< Byte size offset within the named function.
};

inline bool operator==(const SourceLocation &LHS, const SourceLocation &RHS) {
  return LHS.Name == RHS.Name && LHS.Dir == RHS.Dir && LHS.Base == RHS.Base &&
         LHS.Line == RHS.Line && LHS.Offset == RHS.Offset;
}

raw_ostream &operator<<(raw_ostream &OS, const SourceLocation &R);

using SourceLocations = std::vector<SourceLocation>;

struct LookupResult {
  uint64_t LookupAddr = 0; ///< The address that this lookup pertains to.
  AddressRange FuncRange;  ///< The concrete function address range.
  StringRef FuncName; ///< The concrete function name that contains LookupAddr.
  /// The source locations that match this address. This information will only
  /// be filled in if the FunctionInfo contains a line table. If an address is
  /// for a concrete function with no inlined functions, this array will have
  /// one entry. If an address points to an inline function, there will be one
  /// SourceLocation for each inlined function with the last entry pointing to
  /// the concrete function itself. This allows one address to generate
  /// multiple locations and allows unwinding of inline call stacks. The
  /// deepest inline function will appear at index zero in the source locations
  /// array, and the concrete function will appear at the end of the array.
  SourceLocations Locations;
  std::string getSourceFile(uint32_t Index) const;
};

inline bool operator==(const LookupResult &LHS, const LookupResult &RHS) {
  if (LHS.LookupAddr != RHS.LookupAddr)
    return false;
  if (LHS.FuncRange != RHS.FuncRange)
    return false;
  if (LHS.FuncName != RHS.FuncName)
    return false;
  return LHS.Locations == RHS.Locations;
}

raw_ostream &operator<<(raw_ostream &OS, const LookupResult &R);

} // namespace gsym
} // namespace llvm

#endif // LLVM_DEBUGINFO_GSYM_LOOKUPRESULT_H
PKhwFZ��$$DebugInfo/GSYM/LineTable.hnu�[���//===- LineTable.h ----------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_GSYM_LINETABLE_H
#define LLVM_DEBUGINFO_GSYM_LINETABLE_H

#include "llvm/DebugInfo/GSYM/LineEntry.h"
#include "llvm/Support/Error.h"
#include <cstdint>
#include <vector>

namespace llvm {
namespace gsym {

struct FunctionInfo;
class FileWriter;

/// LineTable class contains deserialized versions of line tables for each
/// function's address ranges.
///
/// When saved to disk, the line table is encoded using a modified version of
/// the DWARF line tables that only tracks address to source file and line.
///
/// ENCODING
///
/// The line table starts with a small prolog that contains the following
/// values:
///
/// ENCODING NAME        DESCRIPTION
/// ======== =========== ====================================================
/// SLEB     MinDelta    The min line delta for special opcodes that  advance
///                      the address and line number.
/// SLEB     MaxDelta    The max line delta for single byte opcodes that
///                      advance the address and line number.
/// ULEB     FirstLine   The value of the first source line number to
///                      initialize the LineEntry with.
///
/// Once these prolog items are read, we initialize a LineEntry struct with
/// the start address of the function from the FunctionInfo's address range,
/// a default file index of 1, and the line number set to "FirstLine" from
/// the prolog above:
///
///   LineEntry Row(BaseAddr, 1, FirstLine);
///
/// The line table state machine is now initialized and ready to be parsed.
/// The stream that follows this encodes the line entries in a compact
/// form. Some opcodes cause "Row" to be modified and some opcodes may also
/// push "Row" onto the end of the "LineTable.Lines" vector. The end result
/// is a vector of LineEntry structs that is sorted in ascending address
/// order.
///
/// NORMAL OPCODES
///
/// The opcodes 0 through 3 are normal in opcodes. Their encoding and
/// descriptions are listed below:
///
/// ENCODING ENUMERATION       VALUE DESCRIPTION
/// ======== ================  ===== ========================================
///          LTOC_EndSequence  0x00  Parsing is done.
/// ULEB     LTOC_SetFile      0x01  Row.File = ULEB
/// ULEB     LTOC_AdvancePC    0x02  Row.Addr += ULEB, push "Row".
/// SLEB     LTOC_AdvanceLine  0x03  Row.Line += SLEB
///          LTOC_FirstSpecial 0x04  First special opcode (see SPECIAL
///                                  OPCODES below).
///
/// SPECIAL OPCODES
///
/// Opcodes LTOC_FirstSpecial through 255 are special opcodes that always
/// increment both the Row.Addr and Row.Line and push "Row" onto the
/// LineEntry.Lines array. They do this by using some of the bits to
/// increment/decrement the source line number, and some of the bits to
/// increment the address. Line numbers can go up or down when making line
/// tables, where addresses always only increase since line tables are sorted
/// by address.
///
/// In order to calculate the amount to increment the line and address for
/// these special opcodes, we calculate the number of values reserved for the
/// line increment/decrement using the "MinDelta" and "MaxDelta" from the
/// prolog:
///
///     const int64_t LineRange = MaxDelta - MinDelta + 1;
///
/// Then we can adjust the opcode to not include any of the normal opcodes:
///
///     const uint8_t AdjustedOp = Opcode - LTOC_FirstSpecial;
///
/// And we can calculate the line offset, and address offset:
///
///     const int64_t LineDelta = MinDelta + (AdjustedOp % LineRange);
///     const uint64_t AddrDelta = (AdjustedOp / LineRange);
///
/// And use these to modify our "Row":
///
///     Row.Line += LineDelta;
///     Row.Addr += AddrDelta;
///
/// And push a row onto the line table:
///
///     Lines.push_back(Row);
///
/// This is verify similar to the way that DWARF encodes its line tables. The
/// only difference is the DWARF line tables have more normal opcodes and the
/// "Row" contains more members, like source column number, bools for end of
/// prologue, beginnging of epilogue, is statement and many others. There are
/// also more complex rules that happen for the extra normal opcodes. By
/// leaving these extra opcodes out, we leave more bits for the special
/// opcodes that allows us to encode line tables in fewer bytes than standard
/// DWARF encodings.
///
/// Opcodes that will push "Row" onto the LineEntry.Lines include the
/// LTOC_AdvancePC opcode and all special opcodes. All other opcodes
/// only modify the current "Row", or cause the line table to end.
class LineTable {
  typedef std::vector<gsym::LineEntry> Collection;
  Collection Lines; ///< All line entries in the line table.
public:
  /// Lookup a single address within a line table's data.
  ///
  /// Clients have the option to decode an entire line table using
  /// LineTable::decode() or just find a single matching entry using this
  /// function. The benefit of using this function is that parsed LineEntry
  /// objects that do not match will not be stored in an array. This will avoid
  /// memory allocation costs and parsing can stop once a match has been found.
  ///
  /// \param Data The binary stream to read the data from. This object must
  /// have the data for the LineTable object starting at offset zero. The data
  /// can contain more data than needed.
  ///
  /// \param BaseAddr The base address to use when decoding the line table.
  /// This will be the FunctionInfo's start address and will be used to
  /// initialize the line table row prior to parsing any opcodes.
  ///
  /// \returns An LineEntry object if a match is found, error otherwise.
  static Expected<LineEntry> lookup(DataExtractor &Data, uint64_t BaseAddr,
                                    uint64_t Addr);

  /// Decode an LineTable object from a binary data stream.
  ///
  /// \param Data The binary stream to read the data from. This object must
  /// have the data for the LineTable object starting at offset zero. The data
  /// can contain more data than needed.
  ///
  /// \param BaseAddr The base address to use when decoding the line table.
  /// This will be the FunctionInfo's start address and will be used to
  /// initialize the line table row prior to parsing any opcodes.
  ///
  /// \returns An LineTable or an error describing the issue that was
  /// encountered during decoding.
  static llvm::Expected<LineTable> decode(DataExtractor &Data,
                                          uint64_t BaseAddr);
  /// Encode this LineTable object into FileWriter stream.
  ///
  /// \param O The binary stream to write the data to at the current file
  /// position.
  ///
  /// \param BaseAddr The base address to use when decoding the line table.
  /// This will be the FunctionInfo's start address.
  ///
  /// \returns An error object that indicates success or failure or the
  /// encoding process.
  llvm::Error encode(FileWriter &O, uint64_t BaseAddr) const;
  bool empty() const { return Lines.empty(); }
  void clear() { Lines.clear(); }
  /// Return the first line entry if the line table isn't empty.
  ///
  /// \returns An optional line entry with the first line entry if the line
  /// table isn't empty, or std::nullopt if the line table is emtpy.
  std::optional<LineEntry> first() const {
    if (Lines.empty())
      return std::nullopt;
    return Lines.front();
  }
  /// Return the last line entry if the line table isn't empty.
  ///
  /// \returns An optional line entry with the last line entry if the line
  /// table isn't empty, or std::nullopt if the line table is emtpy.
  std::optional<LineEntry> last() const {
    if (Lines.empty())
      return std::nullopt;
    return Lines.back();
  }
  void push(const LineEntry &LE) {
    Lines.push_back(LE);
  }
  size_t isValid() const {
    return !Lines.empty();
  }
  size_t size() const {
    return Lines.size();
  }
  LineEntry &get(size_t i) {
    assert(i < Lines.size());
    return Lines[i];
  }
  const LineEntry &get(size_t i) const {
    assert(i < Lines.size());
    return Lines[i];
  }
  LineEntry &operator[](size_t i) {
    return get(i);
  }
  const LineEntry &operator[](size_t i) const {
    return get(i);
  }
  bool operator==(const LineTable &RHS) const {
    return Lines == RHS.Lines;
  }
  bool operator!=(const LineTable &RHS) const {
    return Lines != RHS.Lines;
  }
  bool operator<(const LineTable &RHS) const {
    const auto LHSSize = Lines.size();
    const auto RHSSize = RHS.Lines.size();
    if (LHSSize == RHSSize)
      return Lines < RHS.Lines;
    return LHSSize < RHSSize;
  }
  Collection::const_iterator begin() const { return Lines.begin(); }
  Collection::const_iterator end() const { return Lines.end(); }

};

raw_ostream &operator<<(raw_ostream &OS, const gsym::LineTable &LT);

} // namespace gsym
} // namespace llvm

#endif // LLVM_DEBUGINFO_GSYM_LINETABLE_H
PKhwFZA�)��#�#DebugInfo/GSYM/FunctionInfo.hnu�[���//===- FunctionInfo.h -------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_GSYM_FUNCTIONINFO_H
#define LLVM_DEBUGINFO_GSYM_FUNCTIONINFO_H

#include "llvm/ADT/SmallString.h"
#include "llvm/DebugInfo/GSYM/ExtractRanges.h"
#include "llvm/DebugInfo/GSYM/InlineInfo.h"
#include "llvm/DebugInfo/GSYM/LineTable.h"
#include "llvm/DebugInfo/GSYM/LookupResult.h"
#include "llvm/DebugInfo/GSYM/StringTable.h"
#include <cstdint>
#include <tuple>

namespace llvm {
class raw_ostream;

namespace gsym {

class GsymReader;
/// Function information in GSYM files encodes information for one contiguous
/// address range. If a function has discontiguous address ranges, they will
/// need to be encoded using multiple FunctionInfo objects.
///
/// ENCODING
///
/// The function information gets the function start address as an argument
/// to the FunctionInfo::decode(...) function. This information is calculated
/// from the GSYM header and an address offset from the GSYM address offsets
/// table. The encoded FunctionInfo information must be aligned to a 4 byte
/// boundary.
///
/// The encoded data for a FunctionInfo starts with fixed data that all
/// function info objects have:
///
/// ENCODING  NAME        DESCRIPTION
/// ========= =========== ====================================================
/// uint32_t  Size        The size in bytes of this function.
/// uint32_t  Name        The string table offset of the function name.
///
/// The optional data in a FunctionInfo object follows this fixed information
/// and consists of a stream of tuples that consist of:
///
/// ENCODING  NAME        DESCRIPTION
/// ========= =========== ====================================================
/// uint32_t  InfoType    An "InfoType" enumeration that describes the type
///                       of optional data that is encoded.
/// uint32_t  InfoLength  The size in bytes of the encoded data that
///                       immediately follows this length if this value is
///                       greater than zero.
/// uint8_t[] InfoData    Encoded bytes that represent the data for the
///                       "InfoType". These bytes are only present if
///                       "InfoLength" is greater than zero.
///
/// The "InfoType" is an enumeration:
///
///   enum InfoType {
///     EndOfList = 0u,
///     LineTableInfo = 1u,
///     InlineInfo = 2u
///   };
///
/// This stream of tuples is terminated by a "InfoType" whose value is
/// InfoType::EndOfList and a zero for "InfoLength". This signifies the end of
/// the optional information list. This format allows us to add new optional
/// information data to a FunctionInfo object over time and allows older
/// clients to still parse the format and skip over any data that they don't
/// understand or want to parse.
///
/// So the function information encoding essientially looks like:
///
/// struct {
///   uint32_t Size;
///   uint32_t Name;
///   struct {
///     uint32_t InfoType;
///     uint32_t InfoLength;
///     uint8_t InfoData[InfoLength];
///   }[N];
/// }
///
/// Where "N" is the number of tuples.
struct FunctionInfo {
  AddressRange Range;
  uint32_t Name; ///< String table offset in the string table.
  std::optional<LineTable> OptLineTable;
  std::optional<InlineInfo> Inline;
  /// If we encode a FunctionInfo during segmenting so we know its size, we can
  /// cache that encoding here so we don't need to re-encode it when saving the
  /// GSYM file.
  SmallString<32> EncodingCache;

  FunctionInfo(uint64_t Addr = 0, uint64_t Size = 0, uint32_t N = 0)
      : Range(Addr, Addr + Size), Name(N) {}

  /// Query if a FunctionInfo has rich debug info.
  ///
  /// \returns A bool that indicates if this object has something else than
  /// range and name. When converting information from a symbol table and from
  /// debug info, we might end up with multiple FunctionInfo objects for the
  /// same range and we need to be able to tell which one is the better object
  /// to use.
  bool hasRichInfo() const { return OptLineTable || Inline; }

  /// Query if a FunctionInfo object is valid.
  ///
  /// Address and size can be zero and there can be no line entries for a
  /// symbol so the only indication this entry is valid is if the name is
  /// not zero. This can happen when extracting information from symbol
  /// tables that do not encode symbol sizes. In that case only the
  /// address and name will be filled in.
  ///
  /// \returns A boolean indicating if this FunctionInfo is valid.
  bool isValid() const {
    return Name != 0;
  }

  /// Decode an object from a binary data stream.
  ///
  /// \param Data The binary stream to read the data from. This object must
  /// have the data for the object starting at offset zero. The data
  /// can contain more data than needed.
  ///
  /// \param BaseAddr The FunctionInfo's start address and will be used as the
  /// base address when decoding any contained information like the line table
  /// and the inline info.
  ///
  /// \returns An FunctionInfo or an error describing the issue that was
  /// encountered during decoding.
  static llvm::Expected<FunctionInfo> decode(DataExtractor &Data,
                                             uint64_t BaseAddr);

  /// Encode this object into FileWriter stream.
  ///
  /// \param O The binary stream to write the data to at the current file
  /// position.
  ///
  /// \returns An error object that indicates failure or the offset of the
  /// function info that was successfully written into the stream.
  llvm::Expected<uint64_t> encode(FileWriter &O) const;

  /// Encode this function info into the internal byte cache and return the size
  /// in bytes.
  ///
  /// When segmenting GSYM files we need to know how big each FunctionInfo will
  /// encode into so we can generate segments of the right size. We don't want
  /// to have to encode a FunctionInfo twice, so we can cache the encoded bytes
  /// and re-use then when calling FunctionInfo::encode(...).
  ///
  /// \returns The size in bytes of the FunctionInfo if it were to be encoded
  /// into a byte stream.
  uint64_t cacheEncoding();

  /// Lookup an address within a FunctionInfo object's data stream.
  ///
  /// Instead of decoding an entire FunctionInfo object when doing lookups,
  /// we can decode only the information we need from the FunctionInfo's data
  /// for the specific address. The lookup result information is returned as
  /// a LookupResult.
  ///
  /// \param Data The binary stream to read the data from. This object must
  /// have the data for the object starting at offset zero. The data
  /// can contain more data than needed.
  ///
  /// \param GR The GSYM reader that contains the string and file table that
  /// will be used to fill in information in the returned result.
  ///
  /// \param FuncAddr The function start address decoded from the GsymReader.
  ///
  /// \param Addr The address to lookup.
  ///
  /// \returns An LookupResult or an error describing the issue that was
  /// encountered during decoding. An error should only be returned if the
  /// address is not contained in the FunctionInfo or if the data is corrupted.
  static llvm::Expected<LookupResult> lookup(DataExtractor &Data,
                                             const GsymReader &GR,
                                             uint64_t FuncAddr,
                                             uint64_t Addr);

  uint64_t startAddress() const { return Range.start(); }
  uint64_t endAddress() const { return Range.end(); }
  uint64_t size() const { return Range.size(); }

  void clear() {
    Range = {0, 0};
    Name = 0;
    OptLineTable = std::nullopt;
    Inline = std::nullopt;
  }
};

inline bool operator==(const FunctionInfo &LHS, const FunctionInfo &RHS) {
  return LHS.Range == RHS.Range && LHS.Name == RHS.Name &&
         LHS.OptLineTable == RHS.OptLineTable && LHS.Inline == RHS.Inline;
}
inline bool operator!=(const FunctionInfo &LHS, const FunctionInfo &RHS) {
  return !(LHS == RHS);
}
/// This sorting will order things consistently by address range first, but then
/// followed by inlining being valid and line tables. We might end up with a
/// FunctionInfo from debug info that will have the same range as one from the
/// symbol table, but we want to quickly be able to sort and use the best version
/// when creating the final GSYM file.
inline bool operator<(const FunctionInfo &LHS, const FunctionInfo &RHS) {
  // First sort by address range
  if (LHS.Range != RHS.Range)
    return LHS.Range < RHS.Range;

  // Then sort by inline
  if (LHS.Inline.has_value() != RHS.Inline.has_value())
    return RHS.Inline.has_value();

  return LHS.OptLineTable < RHS.OptLineTable;
}

raw_ostream &operator<<(raw_ostream &OS, const FunctionInfo &R);

} // namespace gsym
} // namespace llvm

#endif // LLVM_DEBUGINFO_GSYM_FUNCTIONINFO_H
PKhwFZ�ŔDebugInfo/GSYM/LineEntry.hnu�[���//===- LineEntry.h ----------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_GSYM_LINEENTRY_H
#define LLVM_DEBUGINFO_GSYM_LINEENTRY_H

#include "llvm/DebugInfo/GSYM/ExtractRanges.h"

namespace llvm {
namespace gsym {

/// Line entries are used to encode the line tables in FunctionInfo objects.
/// They are stored as a sorted vector of these objects and store the
/// address, file and line of the line table row for a given address. The
/// size of a line table entry is calculated by looking at the next entry
/// in the FunctionInfo's vector of entries.
struct LineEntry {
  uint64_t Addr; ///< Start address of this line entry.
  uint32_t File; ///< 1 based index of file in FileTable
  uint32_t Line; ///< Source line number.
  LineEntry(uint64_t A = 0, uint32_t F = 0, uint32_t L = 0)
      : Addr(A), File(F), Line(L) {}
  bool isValid() { return File != 0; }
};

inline raw_ostream &operator<<(raw_ostream &OS, const LineEntry &LE) {
  return OS << "addr=" << HEX64(LE.Addr) << ", file=" << format("%3u", LE.File)
      << ", line=" << format("%3u", LE.Line);
}

inline bool operator==(const LineEntry &LHS, const LineEntry &RHS) {
  return LHS.Addr == RHS.Addr && LHS.File == RHS.File && LHS.Line == RHS.Line;
}
inline bool operator!=(const LineEntry &LHS, const LineEntry &RHS) {
  return !(LHS == RHS);
}
inline bool operator<(const LineEntry &LHS, const LineEntry &RHS) {
  return LHS.Addr < RHS.Addr;
}
} // namespace gsym
} // namespace llvm
#endif // LLVM_DEBUGINFO_GSYM_LINEENTRY_H
PKhwFZq�&DebugInfo/GSYM/ObjectFileTransformer.hnu�[���//===- ObjectFileTransformer.h ----------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_GSYM_OBJECTFILETRANSFORMER_H
#define LLVM_DEBUGINFO_GSYM_OBJECTFILETRANSFORMER_H

#include "llvm/Support/Error.h"

namespace llvm {

class raw_ostream;

namespace object {
class ObjectFile;
}

namespace gsym {

class GsymCreator;

class ObjectFileTransformer {
public:
  /// Extract any object file data that is needed by the GsymCreator.
  ///
  /// The extracted information includes the UUID of the binary and converting
  /// all function symbols from any symbol tables into FunctionInfo objects.
  ///
  /// \param Obj The object file that contains the DWARF debug info.
  ///
  /// \param Log The stream to log warnings and non fatal issues to.
  ///
  /// \param Gsym The GSYM creator to populate with the function information
  /// from the debug info.
  ///
  /// \returns An error indicating any fatal issues that happen when parsing
  /// the DWARF, or Error::success() if all goes well.
  static llvm::Error convert(const object::ObjectFile &Obj,
                             raw_ostream &Log,
                             GsymCreator &Gsym);
};

} // namespace gsym
} // namespace llvm

#endif // LLVM_DEBUGINFO_GSYM_OBJECTFILETRANSFORMER_H
PKhwFZ(���^^!DebugInfo/GSYM/DwarfTransformer.hnu�[���//===- DwarfTransformer.h ---------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_GSYM_DWARFTRANSFORMER_H
#define LLVM_DEBUGINFO_GSYM_DWARFTRANSFORMER_H

#include "llvm/ADT/StringRef.h"
#include "llvm/DebugInfo/GSYM/ExtractRanges.h"
#include "llvm/Support/Error.h"

namespace llvm {

class raw_ostream;

namespace gsym {

struct CUInfo;
struct FunctionInfo;
class GsymCreator;

/// A class that transforms the DWARF in a DWARFContext into GSYM information
/// by populating the GsymCreator object that it is constructed with. This
/// class supports converting all DW_TAG_subprogram DIEs into
/// gsym::FunctionInfo objects that includes line table information and inline
/// function information. Creating a separate class to transform this data
/// allows this class to be unit tested.
class DwarfTransformer {
public:

  /// Create a DWARF transformer.
  ///
  /// \param D The DWARF to use when converting to GSYM.
  ///
  /// \param OS The stream to log warnings and non fatal issues to.
  ///
  /// \param G The GSYM creator to populate with the function information
  /// from the debug info.
  DwarfTransformer(DWARFContext &D, raw_ostream &OS, GsymCreator &G) :
      DICtx(D), Log(OS), Gsym(G) {}

  /// Extract the DWARF from the supplied object file and convert it into the
  /// Gsym format in the GsymCreator object that is passed in. Returns an
  /// error if something fatal is encountered.
  ///
  /// \returns An error indicating any fatal issues that happen when parsing
  /// the DWARF, or Error::success() if all goes well.
  llvm::Error convert(uint32_t NumThreads);

  llvm::Error verify(StringRef GsymPath);


private:

  /// Parse the DWARF in the object file and convert it into the GsymCreator.
  Error parse();

  /// Handle any DIE (debug info entry) from the DWARF.
  ///
  /// This function will find all DW_TAG_subprogram DIEs that convert them into
  /// GSYM FuntionInfo objects and add them to the GsymCreator supplied during
  /// construction. The DIE and all its children will be recursively parsed
  /// with calls to this function.
  ///
  /// \param Strm The thread specific log stream for any non fatal errors and
  /// warnings. Once a thread has finished parsing an entire compile unit, all
  /// information in this temporary stream will be forwarded to the member
  /// variable log. This keeps logging thread safe.
  ///
  /// \param CUI The compile unit specific information that contains the DWARF
  /// line table, cached file list, and other compile unit specific
  /// information.
  ///
  /// \param Die The DWARF debug info entry to parse.
  void handleDie(raw_ostream &Strm, CUInfo &CUI, DWARFDie Die);

  DWARFContext &DICtx;
  raw_ostream &Log;
  GsymCreator &Gsym;

  friend class DwarfTransformerTest;
};

} // namespace gsym
} // namespace llvm

#endif // LLVM_DEBUGINFO_GSYM_DWARFTRANSFORMER_H
PKhwFZO
��DebugInfo/GSYM/Range.hnu�[���//===- Range.h --------------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_GSYM_RANGE_H
#define LLVM_DEBUGINFO_GSYM_RANGE_H

#include "llvm/ADT/Optional.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/raw_ostream.h"
#include <stdint.h>
#include <vector>

#define HEX8(v) llvm::format_hex(v, 4)
#define HEX16(v) llvm::format_hex(v, 6)
#define HEX32(v) llvm::format_hex(v, 10)
#define HEX64(v) llvm::format_hex(v, 18)

namespace llvm {
class DataExtractor;
class raw_ostream;

namespace gsym {

class FileWriter;

/// A class that represents an address range. The range is specified using
/// a start and an end address.
struct AddressRange {
  uint64_t Start;
  uint64_t End;
  AddressRange() : Start(0), End(0) {}
  AddressRange(uint64_t S, uint64_t E) : Start(S), End(E) {}
  uint64_t size() const { return End - Start; }
  bool contains(uint64_t Addr) const { return Start <= Addr && Addr < End; }
  bool intersects(const AddressRange &R) const {
    return Start < R.End && R.Start < End;
  }

  bool operator==(const AddressRange &R) const {
    return Start == R.Start && End == R.End;
  }
  bool operator!=(const AddressRange &R) const {
    return !(*this == R);
  }
  bool operator<(const AddressRange &R) const {
    return std::make_pair(Start, End) < std::make_pair(R.Start, R.End);
  }
  /// AddressRange objects are encoded and decoded to be relative to a base
  /// address. This will be the FunctionInfo's start address if the AddressRange
  /// is directly contained in a FunctionInfo, or a base address of the
  /// containing parent AddressRange or AddressRanges. This allows address
  /// ranges to be efficiently encoded using ULEB128 encodings as we encode the
  /// offset and size of each range instead of full addresses. This also makes
  /// encoded addresses easy to relocate as we just need to relocate one base
  /// address.
  /// @{
  void decode(DataExtractor &Data, uint64_t BaseAddr, uint64_t &Offset);
  void encode(FileWriter &O, uint64_t BaseAddr) const;
  /// @}

  /// Skip an address range object in the specified data a the specified
  /// offset.
  ///
  /// \param Data The binary stream to read the data from.
  ///
  /// \param Offset The byte offset within \a Data.
  static void skip(DataExtractor &Data, uint64_t &Offset);
};

raw_ostream &operator<<(raw_ostream &OS, const AddressRange &R);

/// The AddressRanges class helps normalize address range collections.
/// This class keeps a sorted vector of AddressRange objects and can perform
/// insertions and searches efficiently. The address ranges are always sorted
/// and never contain any invalid or empty address ranges. This allows us to
/// emit address ranges into the GSYM file efficiently. Intersecting address
/// ranges are combined during insertion so that we can emit the most compact
/// representation for address ranges when writing to disk.
class AddressRanges {
protected:
  using Collection = std::vector<AddressRange>;
  Collection Ranges;
public:
  void clear() { Ranges.clear(); }
  bool empty() const { return Ranges.empty(); }
  bool contains(uint64_t Addr) const;
  bool contains(AddressRange Range) const;
  Optional<AddressRange> getRangeThatContains(uint64_t Addr) const;
  void insert(AddressRange Range);
  size_t size() const { return Ranges.size(); }
  bool operator==(const AddressRanges &RHS) const {
    return Ranges == RHS.Ranges;
  }
  const AddressRange &operator[](size_t i) const {
    assert(i < Ranges.size());
    return Ranges[i];
  }
  Collection::const_iterator begin() const { return Ranges.begin(); }
  Collection::const_iterator end() const { return Ranges.end(); }

  /// Address ranges are decoded and encoded to be relative to a base address.
  /// See the AddressRange comment for the encode and decode methods for full
  /// details.
  /// @{
  void decode(DataExtractor &Data, uint64_t BaseAddr, uint64_t &Offset);
  void encode(FileWriter &O, uint64_t BaseAddr) const;
  /// @}

  /// Skip an address range object in the specified data a the specified
  /// offset.
  ///
  /// \param Data The binary stream to read the data from.
  ///
  /// \param Offset The byte offset within \a Data.
  ///
  /// \returns The number of address ranges that were skipped.
  static uint64_t skip(DataExtractor &Data, uint64_t &Offset);
};

raw_ostream &operator<<(raw_ostream &OS, const AddressRanges &AR);

} // namespace gsym
} // namespace llvm

#endif // LLVM_DEBUGINFO_GSYM_RANGE_H
PKhwFZv����
�
DebugInfo/GSYM/ExtractRanges.hnu�[���//===- ExtractRanges.h ------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_GSYM_EXTRACTRANGES_H
#define LLVM_DEBUGINFO_GSYM_EXTRACTRANGES_H

#include "llvm/ADT/AddressRanges.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/raw_ostream.h"
#include <stdint.h>
#include <vector>

#define HEX8(v) llvm::format_hex(v, 4)
#define HEX16(v) llvm::format_hex(v, 6)
#define HEX32(v) llvm::format_hex(v, 10)
#define HEX64(v) llvm::format_hex(v, 18)

namespace llvm {
class DataExtractor;
class raw_ostream;

namespace gsym {

class FileWriter;

/// AddressRange objects are encoded and decoded to be relative to a base
/// address. This will be the FunctionInfo's start address if the AddressRange
/// is directly contained in a FunctionInfo, or a base address of the
/// containing parent AddressRange or AddressRanges. This allows address
/// ranges to be efficiently encoded using ULEB128 encodings as we encode the
/// offset and size of each range instead of full addresses. This also makes
/// encoded addresses easy to relocate as we just need to relocate one base
/// address.
/// @{
AddressRange decodeRange(DataExtractor &Data, uint64_t BaseAddr,
                         uint64_t &Offset);
void encodeRange(const AddressRange &Range, FileWriter &O, uint64_t BaseAddr);
/// @}

/// Skip an address range object in the specified data a the specified
/// offset.
///
/// \param Data The binary stream to read the data from.
///
/// \param Offset The byte offset within \a Data.
void skipRange(DataExtractor &Data, uint64_t &Offset);

/// Address ranges are decoded and encoded to be relative to a base address.
/// See the AddressRange comment for the encode and decode methods for full
/// details.
/// @{
void decodeRanges(AddressRanges &Ranges, DataExtractor &Data, uint64_t BaseAddr,
                  uint64_t &Offset);
void encodeRanges(const AddressRanges &Ranges, FileWriter &O,
                  uint64_t BaseAddr);
/// @}

/// Skip an address range object in the specified data a the specified
/// offset.
///
/// \param Data The binary stream to read the data from.
///
/// \param Offset The byte offset within \a Data.
///
/// \returns The number of address ranges that were skipped.
uint64_t skipRanges(DataExtractor &Data, uint64_t &Offset);

} // namespace gsym

raw_ostream &operator<<(raw_ostream &OS, const AddressRange &R);

raw_ostream &operator<<(raw_ostream &OS, const AddressRanges &AR);

} // namespace llvm

#endif // LLVM_DEBUGINFO_GSYM_EXTRACTRANGES_H
PKhwFZ�g���DebugInfo/GSYM/FileWriter.hnu�[���//===- FileWriter.h ---------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_GSYM_FILEWRITER_H
#define LLVM_DEBUGINFO_GSYM_FILEWRITER_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/Support/Endian.h"

#include <stddef.h>
#include <stdint.h>
#include <sys/types.h>

namespace llvm {
class raw_pwrite_stream;

namespace gsym {

/// A simplified binary data writer class that doesn't require targets, target
/// definitions, architectures, or require any other optional compile time
/// libraries to be enabled via the build process. This class needs the ability
/// to seek to different spots in the binary stream that is produces to fixup
/// offsets and sizes.
class FileWriter {
  llvm::raw_pwrite_stream &OS;
  llvm::support::endianness ByteOrder;
public:
  FileWriter(llvm::raw_pwrite_stream &S, llvm::support::endianness B)
      : OS(S), ByteOrder(B) {}
  ~FileWriter();
  /// Write a single uint8_t value into the stream at the current file
  /// position.
  ///
  /// \param   Value The value to write into the stream.
  void writeU8(uint8_t Value);

  /// Write a single uint16_t value into the stream at the current file
  /// position. The value will be byte swapped if needed to match the byte
  /// order specified during construction.
  ///
  /// \param   Value The value to write into the stream.
  void writeU16(uint16_t Value);

  /// Write a single uint32_t value into the stream at the current file
  /// position. The value will be byte swapped if needed to match the byte
  /// order specified during construction.
  ///
  /// \param   Value The value to write into the stream.
  void writeU32(uint32_t Value);

  /// Write a single uint64_t value into the stream at the current file
  /// position. The value will be byte swapped if needed to match the byte
  /// order specified during construction.
  ///
  /// \param   Value The value to write into the stream.
  void writeU64(uint64_t Value);

  /// Write the value into the stream encoded using signed LEB128 at the
  /// current file position.
  ///
  /// \param   Value The value to write into the stream.
  void writeSLEB(int64_t Value);

  /// Write the value into the stream encoded using unsigned LEB128 at the
  /// current file position.
  ///
  /// \param   Value The value to write into the stream.
  void writeULEB(uint64_t Value);

  /// Write an array of uint8_t values into the stream at the current file
  /// position.
  ///
  /// \param   Data An array of values to write into the stream.
  void writeData(llvm::ArrayRef<uint8_t> Data);

  /// Write a NULL terminated C string into the stream at the current file
  /// position. The entire contents of Str will be written into the steam at
  /// the current file position and then an extra NULL termation byte will be
  /// written. It is up to the user to ensure that Str doesn't contain any NULL
  /// characters unless the additional NULL characters are desired.
  ///
  /// \param   Str The value to write into the stream.
  void writeNullTerminated(llvm::StringRef Str);

  /// Fixup a uint32_t value at the specified offset in the stream. This
  /// function will save the current file position, seek to the specified
  /// offset, overwrite the data using Value, and then restore the file
  /// position to the previous file position.
  ///
  /// \param   Value The value to write into the stream.
  /// \param   Offset The offset at which to write the Value within the stream.
  void fixup32(uint32_t Value, uint64_t Offset);

  /// Pad with zeroes at the current file position until the current file
  /// position matches the specified alignment.
  ///
  /// \param  Align An integer speciying the desired alignment. This does not
  ///         need to be a power of two.
  void alignTo(size_t Align);

  /// Return the current offset within the file.
  ///
  /// \return The unsigned offset from the start of the file of the current
  ///         file position.
  uint64_t tell();

  llvm::raw_pwrite_stream &get_stream() {
    return OS;
  }

  llvm::support::endianness getByteOrder() const { return ByteOrder; }

private:
  FileWriter(const FileWriter &rhs) = delete;
  void operator=(const FileWriter &rhs) = delete;
};

} // namespace gsym
} // namespace llvm

#endif // LLVM_DEBUGINFO_GSYM_FILEWRITER_H
PKhwFZ�+�!!DebugInfo/GSYM/StringTable.hnu�[���//===- StringTable.h --------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_GSYM_STRINGTABLE_H
#define LLVM_DEBUGINFO_GSYM_STRINGTABLE_H

#include "llvm/ADT/StringRef.h"
#include "llvm/DebugInfo/GSYM/ExtractRanges.h"
#include <stdint.h>

namespace llvm {
namespace gsym {

/// String tables in GSYM files are required to start with an empty
/// string at offset zero. Strings must be UTF8 NULL terminated strings.
struct StringTable {
  StringRef Data;
  StringTable() = default;
  StringTable(StringRef D) : Data(D) {}
  StringRef operator[](size_t Offset) const { return getString(Offset); }
  StringRef getString(uint32_t Offset) const {
    if (Offset < Data.size()) {
      auto End = Data.find('\0', Offset);
      return Data.substr(Offset, End - Offset);
    }
    return StringRef();
  }
  void clear() { Data = StringRef(); }
};

inline raw_ostream &operator<<(raw_ostream &OS, const StringTable &S) {
  OS << "String table:\n";
  uint32_t Offset = 0;
  const size_t Size = S.Data.size();
  while (Offset < Size) {
    StringRef Str = S.getString(Offset);
    OS << HEX32(Offset) << ": \"" << Str << "\"\n";
    Offset += Str.size() + 1;
  }
  return OS;
}

} // namespace gsym
} // namespace llvm
#endif // LLVM_DEBUGINFO_GSYM_STRINGTABLE_H
PKhwFZ��b' ' DebugInfo/GSYM/InlineInfo.hnu�[���//===- InlineInfo.h ---------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_GSYM_INLINEINFO_H
#define LLVM_DEBUGINFO_GSYM_INLINEINFO_H

#include "llvm/DebugInfo/GSYM/ExtractRanges.h"
#include "llvm/DebugInfo/GSYM/LineEntry.h"
#include "llvm/DebugInfo/GSYM/LookupResult.h"
#include "llvm/Support/Error.h"
#include <stdint.h>
#include <vector>

namespace llvm {
class raw_ostream;

namespace gsym {

class GsymReader;
/// Inline information stores the name of the inline function along with
/// an array of address ranges. It also stores the call file and call line
/// that called this inline function. This allows us to unwind inline call
/// stacks back to the inline or concrete function that called this
/// function. Inlined functions contained in this function are stored in the
/// "Children" variable. All address ranges must be sorted and all address
/// ranges of all children must be contained in the ranges of this function.
/// Any clients that encode information will need to ensure the ranges are
/// all contined correctly or lookups could fail. Add ranges in these objects
/// must be contained in the top level FunctionInfo address ranges as well.
///
/// ENCODING
///
/// When saved to disk, the inline info encodes all ranges to be relative to
/// a parent address range. This will be the FunctionInfo's start address if
/// the InlineInfo is directly contained in a FunctionInfo, or a the start
/// address of the containing parent InlineInfo's first "Ranges" member. This
/// allows address ranges to be efficiently encoded using ULEB128 encodings as
/// we encode the offset and size of each range instead of full addresses. This
/// also makes any encoded addresses easy to relocate as we just need to
/// relocate the FunctionInfo's start address.
///
/// - The AddressRanges member "Ranges" is encoded using an appropriate base
///   address as described above.
/// - UINT8 boolean value that specifies if the InlineInfo object has children.
/// - UINT32 string table offset that points to the name of the inline
///   function.
/// - ULEB128 integer that specifies the file of the call site that called
///   this function.
/// - ULEB128 integer that specifies the source line of the call site that
///   called this function.
/// - if this object has children, enocode each child InlineInfo using the
///   the first address range's start address as the base address.
///
struct InlineInfo {

  uint32_t Name; ///< String table offset in the string table.
  uint32_t CallFile; ///< 1 based file index in the file table.
  uint32_t CallLine; ///< Source line number.
  AddressRanges Ranges;
  std::vector<InlineInfo> Children;
  InlineInfo() : Name(0), CallFile(0), CallLine(0) {}
  void clear() {
    Name = 0;
    CallFile = 0;
    CallLine = 0;
    Ranges.clear();
    Children.clear();
  }
  bool isValid() const { return !Ranges.empty(); }

  using InlineArray = std::vector<const InlineInfo *>;

  /// Lookup a single address within the inline info data.
  ///
  /// Clients have the option to decode an entire InlineInfo object (using
  /// InlineInfo::decode() ) or just find the matching inline info using this
  /// function. The benefit of using this function is that only the information
  /// needed for the lookup will be extracted, other info can be skipped and
  /// parsing can stop as soon as the deepest match is found. This allows
  /// symbolication tools to be fast and efficient and avoid allocation costs
  /// when doing lookups.
  ///
  /// This function will augment the SourceLocations array \a SrcLocs with any
  /// inline information that pertains to \a Addr. If no inline information
  /// exists for \a Addr, then \a SrcLocs will be left untouched. If there is
  /// inline information for \a Addr, then \a SrcLocs will be modifiied to
  /// contain the deepest most inline function's SourceLocation at index zero
  /// in the array and proceed up the the concrete function source file and
  /// line at the end of the array.
  ///
  /// \param GR The GSYM reader that contains the string and file table that
  /// will be used to fill in the source locations.
  ///
  /// \param Data The binary stream to read the data from. This object must
  /// have the data for the LineTable object starting at offset zero. The data
  /// can contain more data than needed.
  ///
  /// \param BaseAddr The base address to use when decoding the line table.
  /// This will be the FunctionInfo's start address and will be used to
  /// decode the correct addresses for the inline information.
  ///
  /// \param Addr The address to lookup.
  ///
  /// \param SrcLocs The inline source locations that matches \a Addr. This
  ///                array must be initialized with the matching line entry
  ///                from the line table upon entry. The name of the concrete
  ///                function must be supplied since it will get pushed to
  ///                the last SourceLocation entry and the inline information
  ///                will fill in the source file and line from the inline
  ///                information.
  ///
  /// \returns An error if the inline information is corrupt, or
  ///          Error::success() for all other cases, even when no information
  ///          is added to \a SrcLocs.
  static llvm::Error lookup(const GsymReader &GR, DataExtractor &Data,
                            uint64_t BaseAddr, uint64_t Addr,
                            SourceLocations &SrcLocs);

  /// Lookup an address in the InlineInfo object
  ///
  /// This function is used to symbolicate an inline call stack and can
  /// turn one address in the program into one or more inline call stacks
  /// and have the stack trace show the original call site from
  /// non-inlined code.
  ///
  /// \param Addr the address to lookup
  ///
  /// \returns optional vector of InlineInfo objects that describe the
  /// inline call stack for a given address, false otherwise.
  std::optional<InlineArray> getInlineStack(uint64_t Addr) const;

  /// Decode an InlineInfo object from a binary data stream.
  ///
  /// \param Data The binary stream to read the data from. This object must
  /// have the data for the InlineInfo object starting at offset zero. The data
  /// can contain more data than needed.
  ///
  /// \param BaseAddr The base address to use when decoding all address ranges.
  /// This will be the FunctionInfo's start address if this object is directly
  /// contained in a FunctionInfo object, or the start address of the first
  /// address range in an InlineInfo object of this object is a child of
  /// another InlineInfo object.
  /// \returns An InlineInfo or an error describing the issue that was
  /// encountered during decoding.
  static llvm::Expected<InlineInfo> decode(DataExtractor &Data,
                                           uint64_t BaseAddr);

  /// Encode this InlineInfo object into FileWriter stream.
  ///
  /// \param O The binary stream to write the data to at the current file
  /// position.
  ///
  /// \param BaseAddr The base address to use when encoding all address ranges.
  /// This will be the FunctionInfo's start address if this object is directly
  /// contained in a FunctionInfo object, or the start address of the first
  /// address range in an InlineInfo object of this object is a child of
  /// another InlineInfo object.
  ///
  /// \returns An error object that indicates success or failure or the
  /// encoding process.
  llvm::Error encode(FileWriter &O, uint64_t BaseAddr) const;
};

inline bool operator==(const InlineInfo &LHS, const InlineInfo &RHS) {
  return LHS.Name == RHS.Name && LHS.CallFile == RHS.CallFile &&
         LHS.CallLine == RHS.CallLine && LHS.Ranges == RHS.Ranges &&
         LHS.Children == RHS.Children;
}

raw_ostream &operator<<(raw_ostream &OS, const InlineInfo &FI);

} // namespace gsym
} // namespace llvm

#endif // LLVM_DEBUGINFO_GSYM_INLINEINFO_H
PKhwFZ��d@22DebugInfo/GSYM/FileEntry.hnu�[���//===- FileEntry.h ----------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_GSYM_FILEENTRY_H
#define LLVM_DEBUGINFO_GSYM_FILEENTRY_H

#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/ADT/Hashing.h"
#include <functional>
#include <stdint.h>

namespace llvm {
namespace gsym {

/// Files in GSYM are contained in FileEntry structs where we split the
/// directory and basename into two different strings in the string
/// table. This allows paths to shared commont directory and filename
/// strings and saves space.
struct FileEntry {

  /// Offsets in the string table.
  /// @{
  uint32_t Dir = 0;
  uint32_t Base = 0;
  /// @}

  FileEntry() = default;
  FileEntry(uint32_t D, uint32_t B) : Dir(D), Base(B) {}

  // Implement operator== so that FileEntry can be used as key in
  // unordered containers.
  bool operator==(const FileEntry &RHS) const {
    return Base == RHS.Base && Dir == RHS.Dir;
  };
  bool operator!=(const FileEntry &RHS) const {
    return Base != RHS.Base || Dir != RHS.Dir;
  };
};

} // namespace gsym

template <> struct DenseMapInfo<gsym::FileEntry> {
  static inline gsym::FileEntry getEmptyKey() {
    uint32_t key = DenseMapInfo<uint32_t>::getEmptyKey();
    return gsym::FileEntry(key, key);
  }
  static inline gsym::FileEntry getTombstoneKey() {
    uint32_t key = DenseMapInfo<uint32_t>::getTombstoneKey();
    return gsym::FileEntry(key, key);
  }
  static unsigned getHashValue(const gsym::FileEntry &Val) {
    return llvm::hash_combine(DenseMapInfo<uint32_t>::getHashValue(Val.Dir),
                              DenseMapInfo<uint32_t>::getHashValue(Val.Base));
  }
  static bool isEqual(const gsym::FileEntry &LHS, const gsym::FileEntry &RHS) {
    return LHS == RHS;
  }
};

} // namespace llvm
#endif // LLVM_DEBUGINFO_GSYM_FILEENTRY_H
PKhwFZ���,HHDebugInfo/PDB/IPDBSourceFile.hnu�[���//===- IPDBSourceFile.h - base interface for a PDB source file --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_IPDBSOURCEFILE_H
#define LLVM_DEBUGINFO_PDB_IPDBSOURCEFILE_H

#include "PDBTypes.h"
#include <memory>
#include <string>

namespace llvm {
class raw_ostream;

namespace pdb {

/// IPDBSourceFile defines an interface used to represent source files whose
/// information are stored in the PDB.
class IPDBSourceFile {
public:
  virtual ~IPDBSourceFile();

  void dump(raw_ostream &OS, int Indent) const;

  virtual std::string getFileName() const = 0;
  virtual uint32_t getUniqueId() const = 0;
  virtual std::string getChecksum() const = 0;
  virtual PDB_Checksum getChecksumType() const = 0;
  virtual std::unique_ptr<IPDBEnumChildren<PDBSymbolCompiland>>
  getCompilands() const = 0;
};
}
}

#endif
PKhwFZ��C%��DebugInfo/PDB/PDBExtras.hnu�[���//===- PDBExtras.h - helper functions and classes for PDBs ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_PDBEXTRAS_H
#define LLVM_DEBUGINFO_PDB_PDBEXTRAS_H

#include "llvm/ADT/StringRef.h"
#include "llvm/DebugInfo/CodeView/CodeView.h"
#include "llvm/DebugInfo/PDB/PDBTypes.h"
#include "llvm/Support/raw_ostream.h"
#include <cstdint>
#include <unordered_map>

namespace llvm {

namespace pdb {

using TagStats = std::unordered_map<PDB_SymType, int>;

raw_ostream &operator<<(raw_ostream &OS, const PDB_VariantType &Value);
raw_ostream &operator<<(raw_ostream &OS, const PDB_CallingConv &Conv);
raw_ostream &operator<<(raw_ostream &OS, const PDB_BuiltinType &Type);
raw_ostream &operator<<(raw_ostream &OS, const PDB_DataKind &Data);
raw_ostream &operator<<(raw_ostream &OS,
                        const llvm::codeview::CPURegister &CpuReg);
raw_ostream &operator<<(raw_ostream &OS, const PDB_LocType &Loc);
raw_ostream &operator<<(raw_ostream &OS, const codeview::ThunkOrdinal &Thunk);
raw_ostream &operator<<(raw_ostream &OS, const PDB_Checksum &Checksum);
raw_ostream &operator<<(raw_ostream &OS, const PDB_Lang &Lang);
raw_ostream &operator<<(raw_ostream &OS, const PDB_SymType &Tag);
raw_ostream &operator<<(raw_ostream &OS, const PDB_MemberAccess &Access);
raw_ostream &operator<<(raw_ostream &OS, const PDB_UdtType &Type);
raw_ostream &operator<<(raw_ostream &OS, const PDB_Machine &Machine);

raw_ostream &operator<<(raw_ostream &OS, const Variant &Value);
raw_ostream &operator<<(raw_ostream &OS, const VersionInfo &Version);
raw_ostream &operator<<(raw_ostream &OS, const TagStats &Stats);

raw_ostream& dumpPDBSourceCompression(raw_ostream& OS, uint32_t Compression);

template <typename T>
void dumpSymbolField(raw_ostream &OS, StringRef Name, T Value, int Indent) {
  OS << "\n";
  OS.indent(Indent);
  OS << Name << ": " << Value;
}

} // end namespace pdb

} // end namespace llvm

#endif // LLVM_DEBUGINFO_PDB_PDBEXTRAS_H
PKhwFZat�z,
,
DebugInfo/PDB/PDBSymDumper.hnu�[���//===- PDBSymDumper.h - base interface for PDB symbol dumper *- C++ -----*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_PDBSYMDUMPER_H
#define LLVM_DEBUGINFO_PDB_PDBSYMDUMPER_H

#include "PDBTypes.h"

namespace llvm {

class raw_ostream;
namespace pdb {

class PDBSymDumper {
public:
  PDBSymDumper(bool ShouldRequireImpl);
  virtual ~PDBSymDumper();

  virtual void dump(const PDBSymbolAnnotation &Symbol);
  virtual void dump(const PDBSymbolBlock &Symbol);
  virtual void dump(const PDBSymbolCompiland &Symbol);
  virtual void dump(const PDBSymbolCompilandDetails &Symbol);
  virtual void dump(const PDBSymbolCompilandEnv &Symbol);
  virtual void dump(const PDBSymbolCustom &Symbol);
  virtual void dump(const PDBSymbolData &Symbol);
  virtual void dump(const PDBSymbolExe &Symbol);
  virtual void dump(const PDBSymbolFunc &Symbol);
  virtual void dump(const PDBSymbolFuncDebugEnd &Symbol);
  virtual void dump(const PDBSymbolFuncDebugStart &Symbol);
  virtual void dump(const PDBSymbolLabel &Symbol);
  virtual void dump(const PDBSymbolPublicSymbol &Symbol);
  virtual void dump(const PDBSymbolThunk &Symbol);
  virtual void dump(const PDBSymbolTypeArray &Symbol);
  virtual void dump(const PDBSymbolTypeBaseClass &Symbol);
  virtual void dump(const PDBSymbolTypeBuiltin &Symbol);
  virtual void dump(const PDBSymbolTypeCustom &Symbol);
  virtual void dump(const PDBSymbolTypeDimension &Symbol);
  virtual void dump(const PDBSymbolTypeEnum &Symbol);
  virtual void dump(const PDBSymbolTypeFriend &Symbol);
  virtual void dump(const PDBSymbolTypeFunctionArg &Symbol);
  virtual void dump(const PDBSymbolTypeFunctionSig &Symbol);
  virtual void dump(const PDBSymbolTypeManaged &Symbol);
  virtual void dump(const PDBSymbolTypePointer &Symbol);
  virtual void dump(const PDBSymbolTypeTypedef &Symbol);
  virtual void dump(const PDBSymbolTypeUDT &Symbol);
  virtual void dump(const PDBSymbolTypeVTable &Symbol);
  virtual void dump(const PDBSymbolTypeVTableShape &Symbol);
  virtual void dump(const PDBSymbolUnknown &Symbol);
  virtual void dump(const PDBSymbolUsingNamespace &Symbol);

  virtual void dumpRight(const PDBSymbolTypeArray &Symbol) {}
  virtual void dumpRight(const PDBSymbolTypeBaseClass &Symbol) {}
  virtual void dumpRight(const PDBSymbolTypeBuiltin &Symbol) {}
  virtual void dumpRight(const PDBSymbolTypeCustom &Symbol) {}
  virtual void dumpRight(const PDBSymbolTypeDimension &Symbol) {}
  virtual void dumpRight(const PDBSymbolTypeEnum &Symbol) {}
  virtual void dumpRight(const PDBSymbolTypeFriend &Symbol) {}
  virtual void dumpRight(const PDBSymbolTypeFunctionArg &Symbol) {}
  virtual void dumpRight(const PDBSymbolTypeFunctionSig &Symbol) {}
  virtual void dumpRight(const PDBSymbolTypeManaged &Symbol) {}
  virtual void dumpRight(const PDBSymbolTypePointer &Symbol) {}
  virtual void dumpRight(const PDBSymbolTypeTypedef &Symbol) {}
  virtual void dumpRight(const PDBSymbolTypeUDT &Symbol) {}
  virtual void dumpRight(const PDBSymbolTypeVTable &Symbol) {}
  virtual void dumpRight(const PDBSymbolTypeVTableShape &Symbol) {}

private:
  bool RequireImpl;
};
}
}

#endif
PKhwFZ;�2��DebugInfo/PDB/UDTLayout.hnu�[���//===- UDTLayout.h - UDT layout info ----------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_UDTLAYOUT_H
#define LLVM_DEBUGINFO_PDB_UDTLAYOUT_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/DebugInfo/PDB/PDBSymbol.h"
#include "llvm/DebugInfo/PDB/PDBSymbolData.h"
#include "llvm/DebugInfo/PDB/PDBSymbolTypeBaseClass.h"
#include "llvm/DebugInfo/PDB/PDBSymbolTypeBuiltin.h"
#include "llvm/DebugInfo/PDB/PDBSymbolTypeUDT.h"
#include "llvm/DebugInfo/PDB/PDBSymbolTypeVTable.h"
#include <cstdint>
#include <memory>
#include <string>
#include <vector>

namespace llvm {
namespace pdb {

class BaseClassLayout;
class ClassLayout;
class UDTLayoutBase;

class LayoutItemBase {
public:
  LayoutItemBase(const UDTLayoutBase *Parent, const PDBSymbol *Symbol,
                 const std::string &Name, uint32_t OffsetInParent,
                 uint32_t Size, bool IsElided);
  virtual ~LayoutItemBase() = default;

  uint32_t deepPaddingSize() const;
  virtual uint32_t immediatePadding() const { return 0; }
  virtual uint32_t tailPadding() const;

  const UDTLayoutBase *getParent() const { return Parent; }
  StringRef getName() const { return Name; }
  uint32_t getOffsetInParent() const { return OffsetInParent; }
  uint32_t getSize() const { return SizeOf; }
  uint32_t getLayoutSize() const { return LayoutSize; }
  const PDBSymbol *getSymbol() const { return Symbol; }
  const BitVector &usedBytes() const { return UsedBytes; }
  bool isElided() const { return IsElided; }
  virtual bool isVBPtr() const { return false; }

  uint32_t containsOffset(uint32_t Off) const {
    uint32_t Begin = getOffsetInParent();
    uint32_t End = Begin + getSize();
    return (Off >= Begin && Off < End);
  }

protected:
  const PDBSymbol *Symbol = nullptr;
  const UDTLayoutBase *Parent = nullptr;
  BitVector UsedBytes;
  std::string Name;
  uint32_t OffsetInParent = 0;
  uint32_t SizeOf = 0;
  uint32_t LayoutSize = 0;
  bool IsElided = false;
};

class VBPtrLayoutItem : public LayoutItemBase {
public:
  VBPtrLayoutItem(const UDTLayoutBase &Parent,
                  std::unique_ptr<PDBSymbolTypeBuiltin> Sym, uint32_t Offset,
                  uint32_t Size);

  bool isVBPtr() const override { return true; }

private:
  std::unique_ptr<PDBSymbolTypeBuiltin> Type;
};

class DataMemberLayoutItem : public LayoutItemBase {
public:
  DataMemberLayoutItem(const UDTLayoutBase &Parent,
                       std::unique_ptr<PDBSymbolData> DataMember);

  const PDBSymbolData &getDataMember();
  bool hasUDTLayout() const;
  const ClassLayout &getUDTLayout() const;

private:
  std::unique_ptr<PDBSymbolData> DataMember;
  std::unique_ptr<ClassLayout> UdtLayout;
};

class VTableLayoutItem : public LayoutItemBase {
public:
  VTableLayoutItem(const UDTLayoutBase &Parent,
                   std::unique_ptr<PDBSymbolTypeVTable> VTable);

  uint32_t getElementSize() const { return ElementSize; }

private:
  uint32_t ElementSize = 0;
  std::unique_ptr<PDBSymbolTypeVTable> VTable;
};

class UDTLayoutBase : public LayoutItemBase {
  template <typename T> using UniquePtrVector = std::vector<std::unique_ptr<T>>;

public:
  UDTLayoutBase(const UDTLayoutBase *Parent, const PDBSymbol &Sym,
                const std::string &Name, uint32_t OffsetInParent, uint32_t Size,
                bool IsElided);

  uint32_t tailPadding() const override;
  ArrayRef<LayoutItemBase *> layout_items() const { return LayoutItems; }
  ArrayRef<BaseClassLayout *> bases() const { return AllBases; }
  ArrayRef<BaseClassLayout *> regular_bases() const { return NonVirtualBases; }
  ArrayRef<BaseClassLayout *> virtual_bases() const { return VirtualBases; }
  uint32_t directVirtualBaseCount() const { return DirectVBaseCount; }
  ArrayRef<std::unique_ptr<PDBSymbolFunc>> funcs() const { return Funcs; }
  ArrayRef<std::unique_ptr<PDBSymbol>> other_items() const { return Other; }

protected:
  bool hasVBPtrAtOffset(uint32_t Off) const;
  void initializeChildren(const PDBSymbol &Sym);

  void addChildToLayout(std::unique_ptr<LayoutItemBase> Child);

  uint32_t DirectVBaseCount = 0;

  UniquePtrVector<PDBSymbol> Other;
  UniquePtrVector<PDBSymbolFunc> Funcs;
  UniquePtrVector<LayoutItemBase> ChildStorage;
  std::vector<LayoutItemBase *> LayoutItems;

  std::vector<BaseClassLayout *> AllBases;
  ArrayRef<BaseClassLayout *> NonVirtualBases;
  ArrayRef<BaseClassLayout *> VirtualBases;

  VTableLayoutItem *VTable = nullptr;
  VBPtrLayoutItem *VBPtr = nullptr;
};

class BaseClassLayout : public UDTLayoutBase {
public:
  BaseClassLayout(const UDTLayoutBase &Parent, uint32_t OffsetInParent,
                  bool Elide, std::unique_ptr<PDBSymbolTypeBaseClass> Base);

  const PDBSymbolTypeBaseClass &getBase() const { return *Base; }
  bool isVirtualBase() const { return IsVirtualBase; }
  bool isEmptyBase() { return SizeOf == 1 && LayoutSize == 0; }

private:
  std::unique_ptr<PDBSymbolTypeBaseClass> Base;
  bool IsVirtualBase;
};

class ClassLayout : public UDTLayoutBase {
public:
  explicit ClassLayout(const PDBSymbolTypeUDT &UDT);
  explicit ClassLayout(std::unique_ptr<PDBSymbolTypeUDT> UDT);

  ClassLayout(ClassLayout &&Other) = default;

  const PDBSymbolTypeUDT &getClass() const { return UDT; }
  uint32_t immediatePadding() const override;

private:
  BitVector ImmediateUsedBytes;
  std::unique_ptr<PDBSymbolTypeUDT> OwnedStorage;
  const PDBSymbolTypeUDT &UDT;
};

} // end namespace pdb
} // end namespace llvm

#endif // LLVM_DEBUGINFO_PDB_UDTLAYOUT_H
PKhwFZ҈(�ZZ(DebugInfo/PDB/ConcreteSymbolEnumerator.hnu�[���//===- ConcreteSymbolEnumerator.h -------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_CONCRETESYMBOLENUMERATOR_H
#define LLVM_DEBUGINFO_PDB_CONCRETESYMBOLENUMERATOR_H

#include "llvm/DebugInfo/PDB/IPDBEnumChildren.h"
#include "llvm/DebugInfo/PDB/PDBTypes.h"
#include "llvm/Support/Casting.h"
#include <algorithm>
#include <cstdint>
#include <memory>

namespace llvm {
namespace pdb {

template <typename ChildType>
class ConcreteSymbolEnumerator : public IPDBEnumChildren<ChildType> {
public:
  ConcreteSymbolEnumerator(std::unique_ptr<IPDBEnumSymbols> SymbolEnumerator)
      : Enumerator(std::move(SymbolEnumerator)) {}

  ~ConcreteSymbolEnumerator() override = default;

  uint32_t getChildCount() const override {
    return Enumerator->getChildCount();
  }

  std::unique_ptr<ChildType> getChildAtIndex(uint32_t Index) const override {
    std::unique_ptr<PDBSymbol> Child = Enumerator->getChildAtIndex(Index);
    return unique_dyn_cast_or_null<ChildType>(Child);
  }

  std::unique_ptr<ChildType> getNext() override {
    return unique_dyn_cast_or_null<ChildType>(Enumerator->getNext());
  }

  void reset() override { Enumerator->reset(); }

private:

  std::unique_ptr<IPDBEnumSymbols> Enumerator;
};

} // end namespace pdb
} // end namespace llvm

#endif // LLVM_DEBUGINFO_PDB_CONCRETESYMBOLENUMERATOR_H
PKhwFZUu�� DebugInfo/PDB/IPDBEnumChildren.hnu�[���//===- IPDBEnumChildren.h - base interface for child enumerator -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_IPDBENUMCHILDREN_H
#define LLVM_DEBUGINFO_PDB_IPDBENUMCHILDREN_H

#include "llvm/DebugInfo/CodeView/LazyRandomTypeCollection.h"
#include <cassert>
#include <cstdint>
#include <memory>

namespace llvm {
namespace pdb {

template <typename ChildType> class IPDBEnumChildren {
public:
  using ChildTypePtr = std::unique_ptr<ChildType>;
  using MyType = IPDBEnumChildren<ChildType>;

  virtual ~IPDBEnumChildren() = default;

  virtual uint32_t getChildCount() const = 0;
  virtual ChildTypePtr getChildAtIndex(uint32_t Index) const = 0;
  virtual ChildTypePtr getNext() = 0;
  virtual void reset() = 0;
};

template <typename ChildType>
class NullEnumerator : public IPDBEnumChildren<ChildType> {
  uint32_t getChildCount() const override { return 0; }
  std::unique_ptr<ChildType> getChildAtIndex(uint32_t Index) const override {
    return nullptr;
  }
  std::unique_ptr<ChildType> getNext() override { return nullptr; }
  void reset() override {}
};

} // end namespace pdb
} // end namespace llvm

#endif // LLVM_DEBUGINFO_PDB_IPDBENUMCHILDREN_H
PKhwFZ
�@�@DebugInfo/PDB/PDBTypes.hnu�[���//===- PDBTypes.h - Defines enums for various fields contained in PDB ----====//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_PDBTYPES_H
#define LLVM_DEBUGINFO_PDB_PDBTYPES_H

#include "llvm/ADT/APFloat.h"
#include "llvm/DebugInfo/CodeView/CodeView.h"
#include "llvm/DebugInfo/PDB/IPDBEnumChildren.h"
#include "llvm/DebugInfo/PDB/IPDBFrameData.h"
#include "llvm/DebugInfo/PDB/Native/RawTypes.h"
#include <cctype>
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <functional>

namespace llvm {
namespace pdb {

typedef uint32_t SymIndexId;

class IPDBDataStream;
class IPDBInjectedSource;
class IPDBLineNumber;
class IPDBSectionContrib;
class IPDBSession;
class IPDBSourceFile;
class IPDBTable;
class PDBSymDumper;
class PDBSymbol;
class PDBSymbolExe;
class PDBSymbolCompiland;
class PDBSymbolCompilandDetails;
class PDBSymbolCompilandEnv;
class PDBSymbolFunc;
class PDBSymbolBlock;
class PDBSymbolData;
class PDBSymbolAnnotation;
class PDBSymbolLabel;
class PDBSymbolPublicSymbol;
class PDBSymbolTypeUDT;
class PDBSymbolTypeEnum;
class PDBSymbolTypeFunctionSig;
class PDBSymbolTypePointer;
class PDBSymbolTypeArray;
class PDBSymbolTypeBuiltin;
class PDBSymbolTypeTypedef;
class PDBSymbolTypeBaseClass;
class PDBSymbolTypeFriend;
class PDBSymbolTypeFunctionArg;
class PDBSymbolFuncDebugStart;
class PDBSymbolFuncDebugEnd;
class PDBSymbolUsingNamespace;
class PDBSymbolTypeVTableShape;
class PDBSymbolTypeVTable;
class PDBSymbolCustom;
class PDBSymbolThunk;
class PDBSymbolTypeCustom;
class PDBSymbolTypeManaged;
class PDBSymbolTypeDimension;
class PDBSymbolUnknown;

using IPDBEnumSymbols = IPDBEnumChildren<PDBSymbol>;
using IPDBEnumSourceFiles = IPDBEnumChildren<IPDBSourceFile>;
using IPDBEnumDataStreams = IPDBEnumChildren<IPDBDataStream>;
using IPDBEnumLineNumbers = IPDBEnumChildren<IPDBLineNumber>;
using IPDBEnumTables = IPDBEnumChildren<IPDBTable>;
using IPDBEnumInjectedSources = IPDBEnumChildren<IPDBInjectedSource>;
using IPDBEnumSectionContribs = IPDBEnumChildren<IPDBSectionContrib>;
using IPDBEnumFrameData = IPDBEnumChildren<IPDBFrameData>;

/// Specifies which PDB reader implementation is to be used.  Only a value
/// of PDB_ReaderType::DIA is currently supported, but Native is in the works.
enum class PDB_ReaderType {
  DIA = 0,
  Native = 1,
};

/// An enumeration indicating the type of data contained in this table.
enum class PDB_TableType {
  TableInvalid = 0,
  Symbols,
  SourceFiles,
  LineNumbers,
  SectionContribs,
  Segments,
  InjectedSources,
  FrameData,
  InputAssemblyFiles,
  Dbg
};

/// Defines flags used for enumerating child symbols.  This corresponds to the
/// NameSearchOptions enumeration which is documented here:
/// https://msdn.microsoft.com/en-us/library/yat28ads.aspx
enum PDB_NameSearchFlags {
  NS_Default = 0x0,
  NS_CaseSensitive = 0x1,
  NS_CaseInsensitive = 0x2,
  NS_FileNameExtMatch = 0x4,
  NS_Regex = 0x8,
  NS_UndecoratedName = 0x10,

  // For backward compatibility.
  NS_CaseInFileNameExt = NS_CaseInsensitive | NS_FileNameExtMatch,
  NS_CaseRegex = NS_Regex | NS_CaseSensitive,
  NS_CaseInRex = NS_Regex | NS_CaseInsensitive
};

/// Specifies the hash algorithm that a source file from a PDB was hashed with.
/// This corresponds to the CV_SourceChksum_t enumeration and are documented
/// here: https://msdn.microsoft.com/en-us/library/e96az21x.aspx
enum class PDB_Checksum { None = 0, MD5 = 1, SHA1 = 2, SHA256 = 3 };

/// These values correspond to the CV_CPU_TYPE_e enumeration, and are documented
/// here: https://msdn.microsoft.com/en-us/library/b2fc64ek.aspx
using PDB_Cpu = codeview::CPUType;

enum class PDB_Machine {
  Invalid = 0xffff,
  Unknown = 0x0,
  Am33 = 0x13,
  Amd64 = 0x8664,
  Arm = 0x1C0,
  Arm64 = 0xaa64,
  ArmNT = 0x1C4,
  Ebc = 0xEBC,
  x86 = 0x14C,
  Ia64 = 0x200,
  M32R = 0x9041,
  Mips16 = 0x266,
  MipsFpu = 0x366,
  MipsFpu16 = 0x466,
  PowerPC = 0x1F0,
  PowerPCFP = 0x1F1,
  R4000 = 0x166,
  SH3 = 0x1A2,
  SH3DSP = 0x1A3,
  SH4 = 0x1A6,
  SH5 = 0x1A8,
  Thumb = 0x1C2,
  WceMipsV2 = 0x169
};

// A struct with an inner unnamed enum with explicit underlying type resuls
// in an enum class that can implicitly convert to the underlying type, which
// is convenient for this enum.
struct PDB_SourceCompression {
  enum : uint32_t {
    // No compression. Produced e.g. by `link.exe /natvis:foo.natvis`.
    None,
    // Not known what produces this.
    RunLengthEncoded,
    // Not known what produces this.
    Huffman,
    // Not known what produces this.
    LZ,
    // Produced e.g. by `csc /debug`. The encoded data is its own mini-stream
    // with the following layout (in little endian):
    //   GUID LanguageTypeGuid;
    //   GUID LanguageVendorGuid;
    //   GUID DocumentTypeGuid;
    //   GUID HashFunctionGuid;
    //   uint32_t HashDataSize;
    //   uint32_t CompressedDataSize;
    // Followed by HashDataSize bytes containing a hash checksum,
    // followed by CompressedDataSize bytes containing source contents.
    //
    // CompressedDataSize can be 0, in this case only the hash data is present.
    // (CompressedDataSize is != 0 e.g. if `/embed` is passed to csc.exe.)
    // The compressed data format is:
    //   uint32_t UncompressedDataSize;
    // If UncompressedDataSize is 0, the data is stored uncompressed and
    // CompressedDataSize stores the uncompressed size.
    // If UncompressedDataSize is != 0, then the data is in raw deflate
    // encoding as described in rfc1951.
    //
    // A GUID is 16 bytes, stored in the usual
    //   uint32_t
    //   uint16_t
    //   uint16_t
    //   uint8_t[24]
    // layout.
    //
    // Well-known GUIDs for LanguageTypeGuid are:
    //   63a08714-fc37-11d2-904c-00c04fa302a1 C
    //   3a12d0b7-c26c-11d0-b442-00a0244a1dd2 C++
    //   3f5162f8-07c6-11d3-9053-00c04fa302a1 C#
    //   af046cd1-d0e1-11d2-977c-00a0c9b4d50c Cobol
    //   ab4f38c9-b6e6-43ba-be3b-58080b2ccce3 F#
    //   3a12d0b4-c26c-11d0-b442-00a0244a1dd2 Java
    //   3a12d0b6-c26c-11d0-b442-00a0244a1dd2 JScript
    //   af046cd2-d0e1-11d2-977c-00a0c9b4d50c Pascal
    //   3a12d0b8-c26c-11d0-b442-00a0244a1dd2 Visual Basic
    //
    // Well-known GUIDs for LanguageVendorGuid are:
    //   994b45c4-e6e9-11d2-903f-00c04fa302a1 Microsoft
    //
    // Well-known GUIDs for DocumentTypeGuid are:
    //   5a869d0b-6611-11d3-bd2a-0000f80849bd Text
    //
    // Well-known GUIDs for HashFunctionGuid are:
    //   406ea660-64cf-4c82-b6f0-42d48172a799 MD5    (HashDataSize is 16)
    //   ff1816ec-aa5e-4d10-87f7-6f4963833460 SHA1   (HashDataSize is 20)
    //   8829d00f-11b8-4213-878b-770e8597ac16 SHA256 (HashDataSize is 32)
    DotNet = 101,
  };
};

/// These values correspond to the CV_call_e enumeration, and are documented
/// at the following locations:
///   https://msdn.microsoft.com/en-us/library/b2fc64ek.aspx
///   https://msdn.microsoft.com/en-us/library/windows/desktop/ms680207(v=vs.85).aspx
using PDB_CallingConv = codeview::CallingConvention;

/// These values correspond to the CV_CFL_LANG enumeration, and are documented
/// here: https://msdn.microsoft.com/en-us/library/bw3aekw6.aspx
using PDB_Lang = codeview::SourceLanguage;

/// These values correspond to the DataKind enumeration, and are documented
/// here: https://msdn.microsoft.com/en-us/library/b2x2t313.aspx
enum class PDB_DataKind {
  Unknown,
  Local,
  StaticLocal,
  Param,
  ObjectPtr,
  FileStatic,
  Global,
  Member,
  StaticMember,
  Constant
};

/// These values correspond to the SymTagEnum enumeration, and are documented
/// here: https://msdn.microsoft.com/en-us/library/bkedss5f.aspx
enum class PDB_SymType {
  None,
  Exe,
  Compiland,
  CompilandDetails,
  CompilandEnv,
  Function,
  Block,
  Data,
  Annotation,
  Label,
  PublicSymbol,
  UDT,
  Enum,
  FunctionSig,
  PointerType,
  ArrayType,
  BuiltinType,
  Typedef,
  BaseClass,
  Friend,
  FunctionArg,
  FuncDebugStart,
  FuncDebugEnd,
  UsingNamespace,
  VTableShape,
  VTable,
  Custom,
  Thunk,
  CustomType,
  ManagedType,
  Dimension,
  CallSite,
  InlineSite,
  BaseInterface,
  VectorType,
  MatrixType,
  HLSLType,
  Caller,
  Callee,
  Export,
  HeapAllocationSite,
  CoffGroup,
  Inlinee,
  Max
};

/// These values correspond to the LocationType enumeration, and are documented
/// here: https://msdn.microsoft.com/en-us/library/f57kaez3.aspx
enum class PDB_LocType {
  Null,
  Static,
  TLS,
  RegRel,
  ThisRel,
  Enregistered,
  BitField,
  Slot,
  IlRel,
  MetaData,
  Constant,
  RegRelAliasIndir,
  Max
};

/// These values correspond to the UdtKind enumeration, and are documented
/// here: https://msdn.microsoft.com/en-us/library/wcstk66t.aspx
enum class PDB_UdtType { Struct, Class, Union, Interface };

/// These values correspond to the StackFrameTypeEnum enumeration, and are
/// documented here: https://msdn.microsoft.com/en-us/library/bc5207xw.aspx.
enum class PDB_StackFrameType : uint16_t {
  FPO,
  KernelTrap,
  KernelTSS,
  EBP,
  FrameData,
  Unknown = 0xffff
};

/// These values correspond to the MemoryTypeEnum enumeration, and are
/// documented here: https://msdn.microsoft.com/en-us/library/ms165609.aspx.
enum class PDB_MemoryType : uint16_t {
  Code,
  Data,
  Stack,
  HeapCode,
  Any = 0xffff
};

/// These values correspond to the Basictype enumeration, and are documented
/// here: https://msdn.microsoft.com/en-us/library/4szdtzc3.aspx
enum class PDB_BuiltinType {
  None = 0,
  Void = 1,
  Char = 2,
  WCharT = 3,
  Int = 6,
  UInt = 7,
  Float = 8,
  BCD = 9,
  Bool = 10,
  Long = 13,
  ULong = 14,
  Currency = 25,
  Date = 26,
  Variant = 27,
  Complex = 28,
  Bitfield = 29,
  BSTR = 30,
  HResult = 31,
  Char16 = 32,
  Char32 = 33,
  Char8 = 34,
};

/// These values correspond to the flags that can be combined to control the
/// return of an undecorated name for a C++ decorated name, and are documented
/// here: https://msdn.microsoft.com/en-us/library/kszfk0fs.aspx
enum PDB_UndnameFlags : uint32_t {
  Undname_Complete = 0x0,
  Undname_NoLeadingUnderscores = 0x1,
  Undname_NoMsKeywords = 0x2,
  Undname_NoFuncReturns = 0x4,
  Undname_NoAllocModel = 0x8,
  Undname_NoAllocLang = 0x10,
  Undname_Reserved1 = 0x20,
  Undname_Reserved2 = 0x40,
  Undname_NoThisType = 0x60,
  Undname_NoAccessSpec = 0x80,
  Undname_NoThrowSig = 0x100,
  Undname_NoMemberType = 0x200,
  Undname_NoReturnUDTModel = 0x400,
  Undname_32BitDecode = 0x800,
  Undname_NameOnly = 0x1000,
  Undname_TypeOnly = 0x2000,
  Undname_HaveParams = 0x4000,
  Undname_NoECSU = 0x8000,
  Undname_NoIdentCharCheck = 0x10000,
  Undname_NoPTR64 = 0x20000
};

enum class PDB_MemberAccess { Private = 1, Protected = 2, Public = 3 };

struct VersionInfo {
  uint32_t Major;
  uint32_t Minor;
  uint32_t Build;
  uint32_t QFE;
};

enum PDB_VariantType {
  Empty,
  Unknown,
  Int8,
  Int16,
  Int32,
  Int64,
  Single,
  Double,
  UInt8,
  UInt16,
  UInt32,
  UInt64,
  Bool,
  String
};

struct Variant {
  Variant() = default;

  explicit Variant(bool V) : Type(PDB_VariantType::Bool) { Value.Bool = V; }
  explicit Variant(int8_t V) : Type(PDB_VariantType::Int8) { Value.Int8 = V; }
  explicit Variant(int16_t V) : Type(PDB_VariantType::Int16) {
    Value.Int16 = V;
  }
  explicit Variant(int32_t V) : Type(PDB_VariantType::Int32) {
    Value.Int32 = V;
  }
  explicit Variant(int64_t V) : Type(PDB_VariantType::Int64) {
    Value.Int64 = V;
  }
  explicit Variant(float V) : Type(PDB_VariantType::Single) {
    Value.Single = V;
  }
  explicit Variant(double V) : Type(PDB_VariantType::Double) {
    Value.Double = V;
  }
  explicit Variant(uint8_t V) : Type(PDB_VariantType::UInt8) {
    Value.UInt8 = V;
  }
  explicit Variant(uint16_t V) : Type(PDB_VariantType::UInt16) {
    Value.UInt16 = V;
  }
  explicit Variant(uint32_t V) : Type(PDB_VariantType::UInt32) {
    Value.UInt32 = V;
  }
  explicit Variant(uint64_t V) : Type(PDB_VariantType::UInt64) {
    Value.UInt64 = V;
  }

  Variant(const Variant &Other) {
    *this = Other;
  }

  ~Variant() {
    if (Type == PDB_VariantType::String)
      delete[] Value.String;
  }

  PDB_VariantType Type = PDB_VariantType::Empty;
  union {
    bool Bool;
    int8_t Int8;
    int16_t Int16;
    int32_t Int32;
    int64_t Int64;
    float Single;
    double Double;
    uint8_t UInt8;
    uint16_t UInt16;
    uint32_t UInt32;
    uint64_t UInt64;
    char *String;
  } Value;

  bool isIntegralType() const {
    switch (Type) {
    case Bool:
    case Int8:
    case Int16:
    case Int32:
    case Int64:
    case UInt8:
    case UInt16:
    case UInt32:
    case UInt64:
      return true;
    default:
      return false;
    }
  }

#define VARIANT_WIDTH(Enum, NumBits)                                           \
  case PDB_VariantType::Enum:                                                  \
    return NumBits;

  unsigned getBitWidth() const {
    switch (Type) {
      VARIANT_WIDTH(Bool, 1u)
      VARIANT_WIDTH(Int8, 8u)
      VARIANT_WIDTH(Int16, 16u)
      VARIANT_WIDTH(Int32, 32u)
      VARIANT_WIDTH(Int64, 64u)
      VARIANT_WIDTH(Single, 32u)
      VARIANT_WIDTH(Double, 64u)
      VARIANT_WIDTH(UInt8, 8u)
      VARIANT_WIDTH(UInt16, 16u)
      VARIANT_WIDTH(UInt32, 32u)
      VARIANT_WIDTH(UInt64, 64u)
    default:
      assert(false && "Variant::toAPSInt called on non-numeric type");
      return 0u;
    }
  }

#undef VARIANT_WIDTH

#define VARIANT_APSINT(Enum, NumBits, IsUnsigned)                              \
  case PDB_VariantType::Enum:                                                  \
    return APSInt(APInt(NumBits, Value.Enum), IsUnsigned);

  APSInt toAPSInt() const {
    switch (Type) {
      VARIANT_APSINT(Bool, 1u, true)
      VARIANT_APSINT(Int8, 8u, false)
      VARIANT_APSINT(Int16, 16u, false)
      VARIANT_APSINT(Int32, 32u, false)
      VARIANT_APSINT(Int64, 64u, false)
      VARIANT_APSINT(UInt8, 8u, true)
      VARIANT_APSINT(UInt16, 16u, true)
      VARIANT_APSINT(UInt32, 32u, true)
      VARIANT_APSINT(UInt64, 64u, true)
    default:
      assert(false && "Variant::toAPSInt called on non-integral type");
      return APSInt();
    }
  }

#undef VARIANT_APSINT

  APFloat toAPFloat() const {
    // Float constants may be tagged as integers.
    switch (Type) {
    case PDB_VariantType::Single:
    case PDB_VariantType::UInt32:
    case PDB_VariantType::Int32:
      return APFloat(Value.Single);
    case PDB_VariantType::Double:
    case PDB_VariantType::UInt64:
    case PDB_VariantType::Int64:
      return APFloat(Value.Double);
    default:
      assert(false && "Variant::toAPFloat called on non-floating-point type");
      return APFloat::getZero(APFloat::IEEEsingle());
    }
  }

#define VARIANT_EQUAL_CASE(Enum)                                               \
  case PDB_VariantType::Enum:                                                  \
    return Value.Enum == Other.Value.Enum;

  bool operator==(const Variant &Other) const {
    if (Type != Other.Type)
      return false;
    switch (Type) {
      VARIANT_EQUAL_CASE(Bool)
      VARIANT_EQUAL_CASE(Int8)
      VARIANT_EQUAL_CASE(Int16)
      VARIANT_EQUAL_CASE(Int32)
      VARIANT_EQUAL_CASE(Int64)
      VARIANT_EQUAL_CASE(Single)
      VARIANT_EQUAL_CASE(Double)
      VARIANT_EQUAL_CASE(UInt8)
      VARIANT_EQUAL_CASE(UInt16)
      VARIANT_EQUAL_CASE(UInt32)
      VARIANT_EQUAL_CASE(UInt64)
      VARIANT_EQUAL_CASE(String)
    default:
      return true;
    }
  }

#undef VARIANT_EQUAL_CASE

  bool operator!=(const Variant &Other) const { return !(*this == Other); }
  Variant &operator=(const Variant &Other) {
    if (this == &Other)
      return *this;
    if (Type == PDB_VariantType::String)
      delete[] Value.String;
    Type = Other.Type;
    Value = Other.Value;
    if (Other.Type == PDB_VariantType::String &&
        Other.Value.String != nullptr) {
      Value.String = new char[strlen(Other.Value.String) + 1];
      ::strcpy(Value.String, Other.Value.String);
    }
    return *this;
  }
};

} // end namespace pdb
} // end namespace llvm

namespace std {

template <> struct hash<llvm::pdb::PDB_SymType> {
  using argument_type = llvm::pdb::PDB_SymType;
  using result_type = std::size_t;

  result_type operator()(const argument_type &Arg) const {
    return std::hash<int>()(static_cast<int>(Arg));
  }
};

} // end namespace std

#endif // LLVM_DEBUGINFO_PDB_PDBTYPES_H
PKhwFZ��e�==%DebugInfo/PDB/PDBSymbolPublicSymbol.hnu�[���//===- PDBSymbolPublicSymbol.h - public symbol info -------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLPUBLICSYMBOL_H
#define LLVM_DEBUGINFO_PDB_PDBSYMBOLPUBLICSYMBOL_H

#include "PDBSymbol.h"
#include "PDBTypes.h"

namespace llvm {

namespace pdb {

class PDBSymbolPublicSymbol : public PDBSymbol {
  DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::PublicSymbol)
public:
  void dump(PDBSymDumper &Dumper) const override;

  FORWARD_SYMBOL_METHOD(getAddressOffset)
  FORWARD_SYMBOL_METHOD(getAddressSection)
  FORWARD_SYMBOL_METHOD(isCode)
  FORWARD_SYMBOL_METHOD(isFunction)
  FORWARD_SYMBOL_METHOD(getLength)
  FORWARD_SYMBOL_ID_METHOD(getLexicalParent)
  FORWARD_SYMBOL_METHOD(getLocationType)
  FORWARD_SYMBOL_METHOD(isManagedCode)
  FORWARD_SYMBOL_METHOD(isMSILCode)
  FORWARD_SYMBOL_METHOD(getName)
  FORWARD_SYMBOL_METHOD(getRelativeVirtualAddress)
  FORWARD_SYMBOL_METHOD(getVirtualAddress)
  FORWARD_SYMBOL_METHOD(getUndecoratedName)
};

} // namespace pdb
} // namespace llvm

#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLPUBLICSYMBOL_H
PKhwFZƯpՎ�'DebugInfo/PDB/PDBSymbolUsingNamespace.hnu�[���//===- PDBSymbolUsingNamespace.h - using namespace info ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLUSINGNAMESPACE_H
#define LLVM_DEBUGINFO_PDB_PDBSYMBOLUSINGNAMESPACE_H

#include "PDBSymbol.h"
#include "PDBTypes.h"

namespace llvm {

namespace pdb {

class PDBSymbolUsingNamespace : public PDBSymbol {
  DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::UsingNamespace)

public:
  void dump(PDBSymDumper &Dumper) const override;

  FORWARD_SYMBOL_ID_METHOD(getLexicalParent)
  FORWARD_SYMBOL_METHOD(getName)
};

} // namespace pdb
} // namespace llvm

#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLUSINGNAMESPACE_H
PKhwFZԗ+���DebugInfo/PDB/PDBSymbolData.hnu�[���//===- PDBSymbolData.h - PDB data (e.g. variable) accessors -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLDATA_H
#define LLVM_DEBUGINFO_PDB_PDBSYMBOLDATA_H

#include "PDBSymbol.h"
#include "PDBTypes.h"
#include "llvm/DebugInfo/PDB/IPDBRawSymbol.h"

namespace llvm {

namespace pdb {

class PDBSymDumper;

class PDBSymbolData : public PDBSymbol {
  DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::Data)
public:
  void dump(PDBSymDumper &Dumper) const override;

  FORWARD_SYMBOL_METHOD(getAccess)
  FORWARD_SYMBOL_METHOD(getAddressOffset)
  FORWARD_SYMBOL_METHOD(getAddressSection)
  FORWARD_SYMBOL_METHOD(getAddressTaken)
  FORWARD_SYMBOL_METHOD(getBitPosition)
  FORWARD_SYMBOL_ID_METHOD(getClassParent)
  FORWARD_SYMBOL_METHOD(isCompilerGenerated)
  FORWARD_SYMBOL_METHOD(isConstType)
  FORWARD_SYMBOL_METHOD(getDataKind)
  FORWARD_SYMBOL_METHOD(isAggregated)
  FORWARD_SYMBOL_METHOD(isSplitted)
  FORWARD_SYMBOL_METHOD(getLength)
  FORWARD_SYMBOL_ID_METHOD(getLexicalParent)
  FORWARD_SYMBOL_METHOD(getLocationType)
  FORWARD_SYMBOL_METHOD(getName)
  FORWARD_SYMBOL_METHOD(getOffset)
  FORWARD_SYMBOL_METHOD(getRegisterId)
  FORWARD_SYMBOL_METHOD(getRelativeVirtualAddress)
  FORWARD_SYMBOL_METHOD(getSlot)
  FORWARD_SYMBOL_METHOD(getToken)
  FORWARD_SYMBOL_ID_METHOD(getType)
  FORWARD_SYMBOL_METHOD(isUnalignedType)
  FORWARD_SYMBOL_METHOD(getValue)
  FORWARD_SYMBOL_METHOD(getVirtualAddress)
  FORWARD_SYMBOL_METHOD(isVolatileType)

  std::unique_ptr<IPDBEnumLineNumbers> getLineNumbers() const;
  uint32_t getCompilandId() const;
};
} // namespace pdb
} // namespace llvm

#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLDATA_H
PKhwFZ���0��"DebugInfo/PDB/IPDBInjectedSource.hnu�[���//===- IPDBInjectedSource.h - base class for PDB injected file --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_IPDBINJECTEDSOURCE_H
#define LLVM_DEBUGINFO_PDB_IPDBINJECTEDSOURCE_H

#include <cstdint>
#include <string>

namespace llvm {
namespace pdb {
/// IPDBInjectedSource defines an interface used to represent source files
/// which were injected directly into the PDB file during the compilation
/// process.  This is used, for example, to add natvis files to a PDB, but
/// in theory could be used to add arbitrary source code.
class IPDBInjectedSource {
public:
  virtual ~IPDBInjectedSource();

  virtual uint32_t getCrc32() const = 0;
  virtual uint64_t getCodeByteSize() const = 0;
  virtual std::string getFileName() const = 0;
  virtual std::string getObjectFileName() const = 0;
  virtual std::string getVirtualFileName() const = 0;
  // The returned value depends on the PDB producer,
  // but 0 is guaranteed to mean "no compression".
  // The enum PDB_SourceCompression lists known return values.
  virtual uint32_t getCompression() const = 0;
  virtual std::string getCode() const = 0;
};
} // namespace pdb
} // namespace llvm

#endif // LLVM_DEBUGINFO_PDB_IPDBINJECTEDSOURCE_H
PKhwFZA%��_	_	DebugInfo/PDB/PDBContext.hnu�[���//===-- PDBContext.h --------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===/

#ifndef LLVM_DEBUGINFO_PDB_PDBCONTEXT_H
#define LLVM_DEBUGINFO_PDB_PDBCONTEXT_H

#include "llvm/DebugInfo/DIContext.h"
#include "llvm/DebugInfo/PDB/IPDBSession.h"
#include <cstdint>
#include <memory>
#include <string>

namespace llvm {

namespace object {
class COFFObjectFile;
} // end namespace object

namespace pdb {

  /// PDBContext
  /// This data structure is the top level entity that deals with PDB debug
  /// information parsing.  This data structure exists only when there is a
  /// need for a transparent interface to different debug information formats
  /// (e.g. PDB and DWARF).  More control and power over the debug information
  /// access can be had by using the PDB interfaces directly.
  class PDBContext : public DIContext {
  public:
    PDBContext(const object::COFFObjectFile &Object,
               std::unique_ptr<IPDBSession> PDBSession);
    PDBContext(PDBContext &) = delete;
    PDBContext &operator=(PDBContext &) = delete;

    static bool classof(const DIContext *DICtx) {
      return DICtx->getKind() == CK_PDB;
    }

    void dump(raw_ostream &OS, DIDumpOptions DIDumpOpts) override;

    DILineInfo getLineInfoForAddress(
        object::SectionedAddress Address,
        DILineInfoSpecifier Specifier = DILineInfoSpecifier()) override;
    DILineInfo
    getLineInfoForDataAddress(object::SectionedAddress Address) override;
    DILineInfoTable getLineInfoForAddressRange(
        object::SectionedAddress Address, uint64_t Size,
        DILineInfoSpecifier Specifier = DILineInfoSpecifier()) override;
    DIInliningInfo getInliningInfoForAddress(
        object::SectionedAddress Address,
        DILineInfoSpecifier Specifier = DILineInfoSpecifier()) override;

    std::vector<DILocal>
    getLocalsForAddress(object::SectionedAddress Address) override;

  private:
    std::string getFunctionName(uint64_t Address, DINameKind NameKind) const;
    std::unique_ptr<IPDBSession> Session;
  };

} // end namespace pdb

} // end namespace llvm

#endif // LLVM_DEBUGINFO_PDB_PDBCONTEXT_H
PKhwFZ4���DebugInfo/PDB/PDB.hnu�[���//===- PDB.h - base header file for creating a PDB reader -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_PDB_H
#define LLVM_DEBUGINFO_PDB_PDB_H

#include "llvm/ADT/StringRef.h"
#include "llvm/DebugInfo/PDB/PDBTypes.h"
#include "llvm/Support/Error.h"
#include <memory>

namespace llvm {
namespace pdb {

class IPDBSession;

Error loadDataForPDB(PDB_ReaderType Type, StringRef Path,
                     std::unique_ptr<IPDBSession> &Session);

Error loadDataForEXE(PDB_ReaderType Type, StringRef Path,
                     std::unique_ptr<IPDBSession> &Session);

} // end namespace pdb
} // end namespace llvm

#endif // LLVM_DEBUGINFO_PDB_PDB_H
PKhwFZ�PU���(DebugInfo/PDB/PDBSymbolTypeFunctionSig.hnu�[���//===- PDBSymbolTypeFunctionSig.h - function signature type info *- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEFUNCTIONSIG_H
#define LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEFUNCTIONSIG_H

#include "PDBSymbol.h"
#include "PDBTypes.h"

namespace llvm {

class raw_ostream;
namespace pdb {

class PDBSymbolTypeFunctionSig : public PDBSymbol {
  DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::FunctionSig)
public:
  std::unique_ptr<IPDBEnumSymbols> getArguments() const;

  void dump(PDBSymDumper &Dumper) const override;
  void dumpRight(PDBSymDumper &Dumper) const override;
  void dumpArgList(raw_ostream &OS) const;

  bool isCVarArgs() const;

  FORWARD_SYMBOL_METHOD(getCallingConvention)
  FORWARD_SYMBOL_ID_METHOD(getClassParent)
  FORWARD_SYMBOL_ID_METHOD(getUnmodifiedType)
  FORWARD_SYMBOL_METHOD(isConstType)
  FORWARD_SYMBOL_METHOD(getCount)
  FORWARD_SYMBOL_ID_METHOD(getLexicalParent)
  // FORWARD_SYMBOL_METHOD(getObjectPointerType)
  FORWARD_SYMBOL_METHOD(getThisAdjust)
  FORWARD_SYMBOL_ID_METHOD_WITH_NAME(getType, getReturnType)
  FORWARD_SYMBOL_METHOD(isUnalignedType)
  FORWARD_SYMBOL_METHOD(isVolatileType)
};

} // namespace pdb
} // namespace llvm

#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEFUNCTIONSIG_H
PKhwFZ�[�|��DebugInfo/PDB/IPDBLineNumber.hnu�[���//===- IPDBLineNumber.h - base interface for PDB line no. info ---*- C++-*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_IPDBLINENUMBER_H
#define LLVM_DEBUGINFO_PDB_IPDBLINENUMBER_H

#include <cstdint>

namespace llvm {
namespace pdb {
class IPDBLineNumber {
public:
  virtual ~IPDBLineNumber();

  virtual uint32_t getLineNumber() const = 0;
  virtual uint32_t getLineNumberEnd() const = 0;
  virtual uint32_t getColumnNumber() const = 0;
  virtual uint32_t getColumnNumberEnd() const = 0;
  virtual uint32_t getAddressSection() const = 0;
  virtual uint32_t getAddressOffset() const = 0;
  virtual uint32_t getRelativeVirtualAddress() const = 0;
  virtual uint64_t getVirtualAddress() const = 0;
  virtual uint32_t getLength() const = 0;
  virtual uint32_t getSourceFileId() const = 0;
  virtual uint32_t getCompilandId() const = 0;
  virtual bool isStatement() const = 0;
};
}
}

#endif
PKhwFZ�Ⱦ�&&"DebugInfo/PDB/PDBSymbolCompiland.hnu�[���//===- PDBSymbolCompiland.h - Accessors for querying PDB compilands -----*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLCOMPILAND_H
#define LLVM_DEBUGINFO_PDB_PDBSYMBOLCOMPILAND_H

#include "PDBSymbol.h"
#include "PDBTypes.h"
#include <string>

namespace llvm {

class raw_ostream;

namespace pdb {

class PDBSymbolCompiland : public PDBSymbol {
  DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::Compiland)
public:
  void dump(PDBSymDumper &Dumper) const override;

  FORWARD_SYMBOL_METHOD(isEditAndContinueEnabled)
  FORWARD_SYMBOL_ID_METHOD(getLexicalParent)
  FORWARD_SYMBOL_METHOD(getLibraryName)
  FORWARD_SYMBOL_METHOD(getName)

  std::string getSourceFileName() const;
  std::string getSourceFileFullPath() const;
};
}
}

#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLCOMPILAND_H
PKhwFZ��ypDebugInfo/PDB/PDBSymbolCustom.hnu�[���//===- PDBSymbolCustom.h - compiler-specific types --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLCUSTOM_H
#define LLVM_DEBUGINFO_PDB_PDBSYMBOLCUSTOM_H

#include "PDBSymbol.h"
#include "PDBTypes.h"
#include "llvm/ADT/SmallVector.h"

namespace llvm {

namespace pdb {
/// PDBSymbolCustom represents symbols that are compiler-specific and do not
/// fit anywhere else in the lexical hierarchy.
/// https://msdn.microsoft.com/en-us/library/d88sf09h.aspx
class PDBSymbolCustom : public PDBSymbol {
  DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::Custom)
public:
  void dump(PDBSymDumper &Dumper) const override;

  void getDataBytes(llvm::SmallVector<uint8_t, 32> &bytes);
};

} // namespace llvm
}

#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLCUSTOM_H
PKhwFZ
s���DebugInfo/PDB/IPDBDataStream.hnu�[���//===- IPDBDataStream.h - base interface for child enumerator ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_IPDBDATASTREAM_H
#define LLVM_DEBUGINFO_PDB_IPDBDATASTREAM_H

#include "llvm/ADT/SmallVector.h"
#include <cstdint>
#include <optional>
#include <string>

namespace llvm {
namespace pdb {

/// IPDBDataStream defines an interface used to represent a stream consisting
/// of a name and a series of records whose formats depend on the particular
/// stream type.
class IPDBDataStream {
public:
  using RecordType = SmallVector<uint8_t, 32>;

  virtual ~IPDBDataStream();

  virtual uint32_t getRecordCount() const = 0;
  virtual std::string getName() const = 0;
  virtual std::optional<RecordType> getItemAtIndex(uint32_t Index) const = 0;
  virtual bool getNext(RecordType &Record) = 0;
  virtual void reset() = 0;
};

} // end namespace pdb
} // end namespace llvm

#endif // LLVM_DEBUGINFO_PDB_IPDBDATASTREAM_H
PKhwFZ �{��%DebugInfo/PDB/PDBSymbolCompilandEnv.hnu�[���//===- PDBSymbolCompilandEnv.h - compiland environment variables *- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLCOMPILANDENV_H
#define LLVM_DEBUGINFO_PDB_PDBSYMBOLCOMPILANDENV_H

#include "PDBSymbol.h"
#include "PDBTypes.h"

namespace llvm {

namespace pdb {
class PDBSymbolCompilandEnv : public PDBSymbol {
  DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::CompilandEnv)
public:
  void dump(PDBSymDumper &Dumper) const override;

  FORWARD_SYMBOL_ID_METHOD(getLexicalParent)
  FORWARD_SYMBOL_METHOD(getName)
  std::string getValue() const;
};

} // namespace llvm
}

#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLCOMPILANDENV_H
PKhwFZ��s�xxDebugInfo/PDB/GenericError.hnu�[���//===- GenericError.h - system_error extensions for PDB ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_GENERICERROR_H
#define LLVM_DEBUGINFO_PDB_GENERICERROR_H

#include "llvm/Support/Error.h"

namespace llvm {
namespace pdb {

enum class pdb_error_code {
  invalid_utf8_path = 1,
  dia_sdk_not_present,
  dia_failed_loading,
  signature_out_of_date,
  no_matching_pch,
  unspecified,
};
} // namespace pdb
} // namespace llvm

namespace std {
template <>
struct is_error_code_enum<llvm::pdb::pdb_error_code> : std::true_type {};
} // namespace std

namespace llvm {
namespace pdb {
const std::error_category &PDBErrCategory();

inline std::error_code make_error_code(pdb_error_code E) {
  return std::error_code(static_cast<int>(E), PDBErrCategory());
}

/// Base class for errors originating when parsing raw PDB files
class PDBError : public ErrorInfo<PDBError, StringError> {
public:
  using ErrorInfo<PDBError, StringError>::ErrorInfo; // inherit constructors
  PDBError(const Twine &S) : ErrorInfo(S, pdb_error_code::unspecified) {}
  static char ID;
};
} // namespace pdb
} // namespace llvm
#endif
PKhwFZk	@qpp)DebugInfo/PDB/Native/NativePublicSymbol.hnu�[���//===- NativePublicSymbol.h - info about public symbols ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_NATIVE_NATIVEPUBLICSYMBOL_H
#define LLVM_DEBUGINFO_PDB_NATIVE_NATIVEPUBLICSYMBOL_H

#include "llvm/DebugInfo/CodeView/SymbolRecord.h"
#include "llvm/DebugInfo/PDB/Native/NativeRawSymbol.h"

namespace llvm {

class raw_ostream;
namespace pdb {
class NativeSession;

class NativePublicSymbol : public NativeRawSymbol {
public:
  NativePublicSymbol(NativeSession &Session, SymIndexId Id,
                     const codeview::PublicSym32 &Sym);

  ~NativePublicSymbol() override;

  void dump(raw_ostream &OS, int Indent, PdbSymbolIdField ShowIdFields,
            PdbSymbolIdField RecurseIdFields) const override;

  uint32_t getAddressOffset() const override;
  uint32_t getAddressSection() const override;
  std::string getName() const override;
  uint32_t getRelativeVirtualAddress() const override;
  uint64_t getVirtualAddress() const override;

protected:
  const codeview::PublicSym32 Sym;
};

} // namespace pdb
} // namespace llvm

#endif // LLVM_DEBUGINFO_PDB_NATIVE_NATIVEPUBLICSYMBOL_H
PKhwFZ��M��(DebugInfo/PDB/Native/NativeTypeVTShape.hnu�[���//===- NativeTypeVTShape.h - info about virtual table shape ------*- C++-*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_NATIVE_NATIVETYPEVTSHAPE_H
#define LLVM_DEBUGINFO_PDB_NATIVE_NATIVETYPEVTSHAPE_H

#include "llvm/DebugInfo/CodeView/TypeIndex.h"
#include "llvm/DebugInfo/CodeView/TypeRecord.h"
#include "llvm/DebugInfo/PDB/IPDBRawSymbol.h"
#include "llvm/DebugInfo/PDB/Native/NativeRawSymbol.h"
#include "llvm/DebugInfo/PDB/PDBTypes.h"

namespace llvm {
namespace pdb {
class NativeSession;

class NativeTypeVTShape : public NativeRawSymbol {
public:
  // Create a pointer record for a non-simple type.
  NativeTypeVTShape(NativeSession &Session, SymIndexId Id,
                    codeview::TypeIndex TI, codeview::VFTableShapeRecord SR);

  ~NativeTypeVTShape() override;

  void dump(raw_ostream &OS, int Indent, PdbSymbolIdField ShowIdFields,
            PdbSymbolIdField RecurseIdFields) const override;

  bool isConstType() const override;
  bool isVolatileType() const override;
  bool isUnalignedType() const override;
  uint32_t getCount() const override;

protected:
  codeview::TypeIndex TI;
  codeview::VFTableShapeRecord Record;
};

} // namespace pdb
} // namespace llvm

#endif // LLVM_DEBUGINFO_PDB_NATIVE_NATIVETYPEVTSHAPE_H
PKhwFZ	���(DebugInfo/PDB/Native/NativeEnumGlobals.hnu�[���//==- NativeEnumGlobals.h - Native Global Enumerator impl --------*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_NATIVE_NATIVEENUMGLOBALS_H
#define LLVM_DEBUGINFO_PDB_NATIVE_NATIVEENUMGLOBALS_H

#include "llvm/DebugInfo/CodeView/CodeView.h"
#include "llvm/DebugInfo/PDB/IPDBEnumChildren.h"
#include "llvm/DebugInfo/PDB/PDBSymbol.h"

#include <vector>

namespace llvm {
namespace pdb {

class NativeSession;

class NativeEnumGlobals : public IPDBEnumChildren<PDBSymbol> {
public:
  NativeEnumGlobals(NativeSession &Session,
                    std::vector<codeview::SymbolKind> Kinds);

  uint32_t getChildCount() const override;
  std::unique_ptr<PDBSymbol> getChildAtIndex(uint32_t Index) const override;
  std::unique_ptr<PDBSymbol> getNext() override;
  void reset() override;

private:
  std::vector<uint32_t> MatchOffsets;
  uint32_t Index;
  NativeSession &Session;
};

} // namespace pdb
} // namespace llvm

#endif
PKhwFZ�-J
J
$DebugInfo/PDB/Native/NativeTypeUDT.hnu�[���//===- NativeTypeUDT.h - info about class/struct type ------------*- C++-*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_NATIVE_NATIVETYPEUDT_H
#define LLVM_DEBUGINFO_PDB_NATIVE_NATIVETYPEUDT_H

#include "llvm/DebugInfo/CodeView/TypeIndex.h"
#include "llvm/DebugInfo/CodeView/TypeRecord.h"
#include "llvm/DebugInfo/PDB/IPDBRawSymbol.h"
#include "llvm/DebugInfo/PDB/Native/NativeRawSymbol.h"
#include "llvm/DebugInfo/PDB/PDBTypes.h"

namespace llvm {

class raw_ostream;
namespace pdb {
class NativeSession;

class NativeTypeUDT : public NativeRawSymbol {
public:
  NativeTypeUDT(NativeSession &Session, SymIndexId Id, codeview::TypeIndex TI,
                codeview::ClassRecord Class);

  NativeTypeUDT(NativeSession &Session, SymIndexId Id, codeview::TypeIndex TI,
                codeview::UnionRecord Union);

  NativeTypeUDT(NativeSession &Session, SymIndexId Id,
                NativeTypeUDT &UnmodifiedType,
                codeview::ModifierRecord Modifier);

  ~NativeTypeUDT() override;

  void dump(raw_ostream &OS, int Indent, PdbSymbolIdField ShowIdFields,
            PdbSymbolIdField RecurseIdFields) const override;

  std::string getName() const override;
  SymIndexId getLexicalParentId() const override;
  SymIndexId getUnmodifiedTypeId() const override;
  SymIndexId getVirtualTableShapeId() const override;
  uint64_t getLength() const override;
  PDB_UdtType getUdtKind() const override;
  bool hasConstructor() const override;
  bool isConstType() const override;
  bool hasAssignmentOperator() const override;
  bool hasCastOperator() const override;
  bool hasNestedTypes() const override;
  bool hasOverloadedOperator() const override;
  bool isInterfaceUdt() const override;
  bool isIntrinsic() const override;
  bool isNested() const override;
  bool isPacked() const override;
  bool isRefUdt() const override;
  bool isScoped() const override;
  bool isValueUdt() const override;
  bool isUnalignedType() const override;
  bool isVolatileType() const override;

protected:
  codeview::TypeIndex Index;

  std::optional<codeview::ClassRecord> Class;
  std::optional<codeview::UnionRecord> Union;
  NativeTypeUDT *UnmodifiedType = nullptr;
  codeview::TagRecord *Tag = nullptr;
  std::optional<codeview::ModifierRecord> Modifiers;
};

} // namespace pdb
} // namespace llvm

#endif // LLVM_DEBUGINFO_PDB_NATIVE_NATIVETYPEUDT_H
PKhwFZW����&DebugInfo/PDB/Native/NativeExeSymbol.hnu�[���//===- NativeExeSymbol.h - native impl for PDBSymbolExe ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_NATIVE_NATIVEEXESYMBOL_H
#define LLVM_DEBUGINFO_PDB_NATIVE_NATIVEEXESYMBOL_H

#include "llvm/DebugInfo/CodeView/GUID.h"
#include "llvm/DebugInfo/PDB/Native/NativeRawSymbol.h"
#include "llvm/DebugInfo/PDB/PDBTypes.h"

namespace llvm {
namespace pdb {

class NativeSession;

class DbiStream;

class NativeExeSymbol : public NativeRawSymbol {
  // EXE symbol is the authority on the various symbol types.
  DbiStream *Dbi = nullptr;

public:
  NativeExeSymbol(NativeSession &Session, SymIndexId Id);

  std::unique_ptr<IPDBEnumSymbols>
  findChildren(PDB_SymType Type) const override;

  uint32_t getAge() const override;
  std::string getSymbolsFileName() const override;
  codeview::GUID getGuid() const override;
  bool hasCTypes() const override;
  bool hasPrivateSymbols() const override;
};

} // namespace pdb
} // namespace llvm

#endif
PKhwFZJ,�<��"DebugInfo/PDB/Native/SymbolCache.hnu�[���//==- SymbolCache.h - Cache of native symbols and ids ------------*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_NATIVE_SYMBOLCACHE_H
#define LLVM_DEBUGINFO_PDB_NATIVE_SYMBOLCACHE_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/DebugInfo/CodeView/CVRecord.h"
#include "llvm/DebugInfo/CodeView/CodeView.h"
#include "llvm/DebugInfo/CodeView/Line.h"
#include "llvm/DebugInfo/CodeView/TypeDeserializer.h"
#include "llvm/DebugInfo/CodeView/TypeIndex.h"
#include "llvm/DebugInfo/PDB/Native/NativeRawSymbol.h"
#include "llvm/DebugInfo/PDB/Native/NativeSourceFile.h"
#include "llvm/DebugInfo/PDB/PDBTypes.h"

#include <memory>
#include <vector>

namespace llvm {
namespace codeview {
class InlineSiteSym;
struct FileChecksumEntry;
} // namespace codeview
namespace pdb {
class IPDBSourceFile;
class NativeSession;
class PDBSymbol;
class PDBSymbolCompiland;
class DbiStream;

class SymbolCache {
  NativeSession &Session;
  DbiStream *Dbi = nullptr;

  /// Cache of all stable symbols, indexed by SymIndexId.  Just because a
  /// symbol has been parsed does not imply that it will be stable and have
  /// an Id.  Id allocation is an implementation, with the only guarantee
  /// being that once an Id is allocated, the symbol can be assumed to be
  /// cached.
  mutable std::vector<std::unique_ptr<NativeRawSymbol>> Cache;

  /// For type records from the TPI stream which have been paresd and cached,
  /// stores a mapping to SymIndexId of the cached symbol.
  mutable DenseMap<codeview::TypeIndex, SymIndexId> TypeIndexToSymbolId;

  /// For field list members which have been parsed and cached, stores a mapping
  /// from (IndexOfClass, MemberIndex) to the corresponding SymIndexId of the
  /// cached symbol.
  mutable DenseMap<std::pair<codeview::TypeIndex, uint32_t>, SymIndexId>
      FieldListMembersToSymbolId;

  /// List of SymIndexIds for each compiland, indexed by compiland index as they
  /// appear in the PDB file.
  mutable std::vector<SymIndexId> Compilands;

  /// List of source files, indexed by unique source file index.
  mutable std::vector<std::unique_ptr<NativeSourceFile>> SourceFiles;

  /// Map from string table offset to source file Id.
  mutable DenseMap<uint32_t, SymIndexId> FileNameOffsetToId;

  /// Map from global symbol offset to SymIndexId.
  mutable DenseMap<uint32_t, SymIndexId> GlobalOffsetToSymbolId;

  /// Map from segment and code offset to function symbols.
  mutable DenseMap<std::pair<uint32_t, uint32_t>, SymIndexId> AddressToSymbolId;
  /// Map from segment and code offset to public symbols.
  mutable DenseMap<std::pair<uint32_t, uint32_t>, SymIndexId>
      AddressToPublicSymId;

  /// Map from module index and symbol table offset to SymIndexId.
  mutable DenseMap<std::pair<uint16_t, uint32_t>, SymIndexId>
      SymTabOffsetToSymbolId;

  struct LineTableEntry {
    uint64_t Addr;
    codeview::LineInfo Line;
    uint32_t ColumnNumber;
    uint32_t FileNameIndex;
    bool IsTerminalEntry;
  };

  std::vector<LineTableEntry> findLineTable(uint16_t Modi) const;
  mutable DenseMap<uint16_t, std::vector<LineTableEntry>> LineTable;

  SymIndexId createSymbolPlaceholder() const {
    SymIndexId Id = Cache.size();
    Cache.push_back(nullptr);
    return Id;
  }

  template <typename ConcreteSymbolT, typename CVRecordT, typename... Args>
  SymIndexId createSymbolForType(codeview::TypeIndex TI, codeview::CVType CVT,
                                 Args &&...ConstructorArgs) const {
    CVRecordT Record;
    if (auto EC =
            codeview::TypeDeserializer::deserializeAs<CVRecordT>(CVT, Record)) {
      consumeError(std::move(EC));
      return 0;
    }

    return createSymbol<ConcreteSymbolT>(
        TI, std::move(Record), std::forward<Args>(ConstructorArgs)...);
  }

  SymIndexId createSymbolForModifiedType(codeview::TypeIndex ModifierTI,
                                         codeview::CVType CVT) const;

  SymIndexId createSimpleType(codeview::TypeIndex TI,
                              codeview::ModifierOptions Mods) const;

  std::unique_ptr<PDBSymbol> findFunctionSymbolBySectOffset(uint32_t Sect,
                                                            uint32_t Offset);
  std::unique_ptr<PDBSymbol> findPublicSymbolBySectOffset(uint32_t Sect,
                                                          uint32_t Offset);

public:
  SymbolCache(NativeSession &Session, DbiStream *Dbi);

  template <typename ConcreteSymbolT, typename... Args>
  SymIndexId createSymbol(Args &&...ConstructorArgs) const {
    SymIndexId Id = Cache.size();

    // Initial construction must not access the cache, since it must be done
    // atomically.
    auto Result = std::make_unique<ConcreteSymbolT>(
        Session, Id, std::forward<Args>(ConstructorArgs)...);
    Result->SymbolId = Id;

    NativeRawSymbol *NRS = static_cast<NativeRawSymbol *>(Result.get());
    Cache.push_back(std::move(Result));

    // After the item is in the cache, we can do further initialization which
    // is then allowed to access the cache.
    NRS->initialize();
    return Id;
  }

  std::unique_ptr<IPDBEnumSymbols>
  createTypeEnumerator(codeview::TypeLeafKind Kind);

  std::unique_ptr<IPDBEnumSymbols>
  createTypeEnumerator(std::vector<codeview::TypeLeafKind> Kinds);

  std::unique_ptr<IPDBEnumSymbols>
  createGlobalsEnumerator(codeview::SymbolKind Kind);

  SymIndexId findSymbolByTypeIndex(codeview::TypeIndex TI) const;

  template <typename ConcreteSymbolT, typename... Args>
  SymIndexId getOrCreateFieldListMember(codeview::TypeIndex FieldListTI,
                                        uint32_t Index,
                                        Args &&... ConstructorArgs) {
    SymIndexId SymId = Cache.size();
    std::pair<codeview::TypeIndex, uint32_t> Key{FieldListTI, Index};
    auto Result = FieldListMembersToSymbolId.try_emplace(Key, SymId);
    if (Result.second)
      SymId =
          createSymbol<ConcreteSymbolT>(std::forward<Args>(ConstructorArgs)...);
    else
      SymId = Result.first->second;
    return SymId;
  }

  SymIndexId getOrCreateGlobalSymbolByOffset(uint32_t Offset);
  SymIndexId getOrCreateInlineSymbol(codeview::InlineSiteSym Sym,
                                     uint64_t ParentAddr, uint16_t Modi,
                                     uint32_t RecordOffset) const;

  std::unique_ptr<PDBSymbol>
  findSymbolBySectOffset(uint32_t Sect, uint32_t Offset, PDB_SymType Type);

  std::unique_ptr<IPDBEnumLineNumbers>
  findLineNumbersByVA(uint64_t VA, uint32_t Length) const;

  std::unique_ptr<PDBSymbolCompiland> getOrCreateCompiland(uint32_t Index);
  uint32_t getNumCompilands() const;

  std::unique_ptr<PDBSymbol> getSymbolById(SymIndexId SymbolId) const;

  NativeRawSymbol &getNativeSymbolById(SymIndexId SymbolId) const;

  template <typename ConcreteT>
  ConcreteT &getNativeSymbolById(SymIndexId SymbolId) const {
    return static_cast<ConcreteT &>(getNativeSymbolById(SymbolId));
  }

  std::unique_ptr<IPDBSourceFile> getSourceFileById(SymIndexId FileId) const;
  SymIndexId
  getOrCreateSourceFile(const codeview::FileChecksumEntry &Checksum) const;
};

} // namespace pdb
} // namespace llvm

#endif
PKhwFZ�CCQQ!DebugInfo/PDB/Native/FormatUtil.hnu�[���//===- FormatUtil.h ------------------------------------------- *- C++ --*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_NATIVE_FORMATUTIL_H
#define LLVM_DEBUGINFO_PDB_NATIVE_FORMATUTIL_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/DebugInfo/CodeView/CodeView.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/FormatAdapters.h"
#include "llvm/Support/FormatVariadic.h"

#include <string>
#include <type_traits>

namespace llvm {
namespace pdb {

#define PUSH_MASKED_FLAG(Enum, Mask, TheOpt, Value, Text)                      \
  if (Enum::TheOpt == (Value & Mask))                                          \
    Opts.push_back(Text);

#define PUSH_FLAG(Enum, TheOpt, Value, Text)                                   \
  PUSH_MASKED_FLAG(Enum, Enum::TheOpt, TheOpt, Value, Text)

#define RETURN_CASE(Enum, X, Ret)                                              \
  case Enum::X:                                                                \
    return Ret;

template <typename T> std::string formatUnknownEnum(T Value) {
  return formatv("unknown ({0})", static_cast<std::underlying_type_t<T>>(Value))
      .str();
}

std::string formatSegmentOffset(uint16_t Segment, uint32_t Offset);

enum class CharacteristicStyle {
  HeaderDefinition, // format as windows header definition
  Descriptive,      // format as human readable words
};
std::string formatSectionCharacteristics(
    uint32_t IndentLevel, uint32_t C, uint32_t FlagsPerLine,
    StringRef Separator,
    CharacteristicStyle Style = CharacteristicStyle::HeaderDefinition);

std::string typesetItemList(ArrayRef<std::string> Opts, uint32_t IndentLevel,
                            uint32_t GroupSize, StringRef Sep);

std::string typesetStringList(uint32_t IndentLevel,
                              ArrayRef<StringRef> Strings);

std::string formatChunkKind(codeview::DebugSubsectionKind Kind,
                            bool Friendly = true);
std::string formatSymbolKind(codeview::SymbolKind K);
std::string formatTypeLeafKind(codeview::TypeLeafKind K);

/// Returns the number of digits in the given integer.
inline int NumDigits(uint64_t N) {
  if (N < 10ULL)
    return 1;
  if (N < 100ULL)
    return 2;
  if (N < 1000ULL)
    return 3;
  if (N < 10000ULL)
    return 4;
  if (N < 100000ULL)
    return 5;
  if (N < 1000000ULL)
    return 6;
  if (N < 10000000ULL)
    return 7;
  if (N < 100000000ULL)
    return 8;
  if (N < 1000000000ULL)
    return 9;
  if (N < 10000000000ULL)
    return 10;
  if (N < 100000000000ULL)
    return 11;
  if (N < 1000000000000ULL)
    return 12;
  if (N < 10000000000000ULL)
    return 13;
  if (N < 100000000000000ULL)
    return 14;
  if (N < 1000000000000000ULL)
    return 15;
  if (N < 10000000000000000ULL)
    return 16;
  if (N < 100000000000000000ULL)
    return 17;
  if (N < 1000000000000000000ULL)
    return 18;
  if (N < 10000000000000000000ULL)
    return 19;
  return 20;
}

namespace detail {
template <typename T>
struct EndianAdapter final
    : public FormatAdapter<support::detail::packed_endian_specific_integral<
          T, support::little, support::unaligned>> {
  using EndianType =
      support::detail::packed_endian_specific_integral<T, support::little,
                                                       support::unaligned>;

  explicit EndianAdapter(EndianType &&Item)
      : FormatAdapter<EndianType>(std::move(Item)) {}

  void format(llvm::raw_ostream &Stream, StringRef Style) override {
    format_provider<T>::format(static_cast<T>(this->Item), Stream, Style);
  }
};
} // namespace detail

template <typename T>
detail::EndianAdapter<T>
fmtle(support::detail::packed_endian_specific_integral<T, support::little,
                                                       support::unaligned>
          Value) {
  return detail::EndianAdapter<T>(std::move(Value));
}
} // namespace pdb
} // namespace llvm
#endif
PKhwFZ��e�� DebugInfo/PDB/Native/DbiStream.hnu�[���//===- DbiStream.h - PDB Dbi Stream (Stream 3) Access -----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_NATIVE_DBISTREAM_H
#define LLVM_DEBUGINFO_PDB_NATIVE_DBISTREAM_H

#include "llvm/DebugInfo/CodeView/DebugFrameDataSubsection.h"
#include "llvm/DebugInfo/PDB/Native/DbiModuleList.h"
#include "llvm/DebugInfo/PDB/Native/PDBStringTable.h"
#include "llvm/DebugInfo/PDB/Native/RawConstants.h"
#include "llvm/DebugInfo/PDB/PDBTypes.h"
#include "llvm/Support/BinaryStreamArray.h"
#include "llvm/Support/BinaryStreamRef.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Error.h"

namespace llvm {
class BinaryStream;
namespace object {
struct FpoData;
struct coff_section;
}
namespace msf {
class MappedBlockStream;
}
namespace pdb {
struct DbiStreamHeader;
struct SecMapEntry;
struct SectionContrib2;
struct SectionContrib;
class PDBFile;
class ISectionContribVisitor;

class DbiStream {
  friend class DbiStreamBuilder;

public:
  explicit DbiStream(std::unique_ptr<BinaryStream> Stream);
  ~DbiStream();
  Error reload(PDBFile *Pdb);

  PdbRaw_DbiVer getDbiVersion() const;
  uint32_t getAge() const;
  uint16_t getPublicSymbolStreamIndex() const;
  uint16_t getGlobalSymbolStreamIndex() const;

  uint16_t getFlags() const;
  bool isIncrementallyLinked() const;
  bool hasCTypes() const;
  bool isStripped() const;

  uint16_t getBuildNumber() const;
  uint16_t getBuildMajorVersion() const;
  uint16_t getBuildMinorVersion() const;

  uint16_t getPdbDllRbld() const;
  uint32_t getPdbDllVersion() const;

  uint32_t getSymRecordStreamIndex() const;

  PDB_Machine getMachineType() const;

  const DbiStreamHeader *getHeader() const { return Header; }

  BinarySubstreamRef getSectionContributionData() const;
  BinarySubstreamRef getSecMapSubstreamData() const;
  BinarySubstreamRef getModiSubstreamData() const;
  BinarySubstreamRef getFileInfoSubstreamData() const;
  BinarySubstreamRef getTypeServerMapSubstreamData() const;
  BinarySubstreamRef getECSubstreamData() const;

  /// If the given stream type is present, returns its stream index. If it is
  /// not present, returns InvalidStreamIndex.
  uint32_t getDebugStreamIndex(DbgHeaderType Type) const;

  const DbiModuleList &modules() const;

  FixedStreamArray<object::coff_section> getSectionHeaders() const;

  bool hasOldFpoRecords() const;
  FixedStreamArray<object::FpoData> getOldFpoRecords() const;
  bool hasNewFpoRecords() const;
  const codeview::DebugFrameDataSubsectionRef &getNewFpoRecords() const;

  FixedStreamArray<SecMapEntry> getSectionMap() const;
  void visitSectionContributions(ISectionContribVisitor &Visitor) const;

  Expected<StringRef> getECName(uint32_t NI) const;

private:
  Error initializeSectionContributionData();
  Error initializeSectionHeadersData(PDBFile *Pdb);
  Error initializeSectionMapData();
  Error initializeOldFpoRecords(PDBFile *Pdb);
  Error initializeNewFpoRecords(PDBFile *Pdb);

  Expected<std::unique_ptr<msf::MappedBlockStream>>
  createIndexedStreamForHeaderType(PDBFile *Pdb, DbgHeaderType Type) const;

  std::unique_ptr<BinaryStream> Stream;

  PDBStringTable ECNames;

  BinarySubstreamRef SecContrSubstream;
  BinarySubstreamRef SecMapSubstream;
  BinarySubstreamRef ModiSubstream;
  BinarySubstreamRef FileInfoSubstream;
  BinarySubstreamRef TypeServerMapSubstream;
  BinarySubstreamRef ECSubstream;

  DbiModuleList Modules;

  FixedStreamArray<support::ulittle16_t> DbgStreams;

  PdbRaw_DbiSecContribVer SectionContribVersion =
      PdbRaw_DbiSecContribVer::DbiSecContribVer60;
  FixedStreamArray<SectionContrib> SectionContribs;
  FixedStreamArray<SectionContrib2> SectionContribs2;
  FixedStreamArray<SecMapEntry> SectionMap;

  std::unique_ptr<msf::MappedBlockStream> SectionHeaderStream;
  FixedStreamArray<object::coff_section> SectionHeaders;

  std::unique_ptr<msf::MappedBlockStream> OldFpoStream;
  FixedStreamArray<object::FpoData> OldFpoRecords;
  
  std::unique_ptr<msf::MappedBlockStream> NewFpoStream;
  codeview::DebugFrameDataSubsectionRef NewFpoRecords;

  const DbiStreamHeader *Header;
};
}
}

#endif
PKhwFZ��
\\$DebugInfo/PDB/Native/GlobalsStream.hnu�[���//===- GlobalsStream.h - PDB Index of Symbols by Name -----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_NATIVE_GLOBALSSTREAM_H
#define LLVM_DEBUGINFO_PDB_NATIVE_GLOBALSSTREAM_H

#include "llvm/ADT/iterator.h"
#include "llvm/DebugInfo/CodeView/CVRecord.h"
#include "llvm/DebugInfo/PDB/Native/RawTypes.h"
#include "llvm/Support/BinaryStreamArray.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Error.h"

namespace llvm {
class BinaryStreamReader;
namespace msf {
class MappedBlockStream;
}
namespace pdb {
class SymbolStream;

/// Iterator over hash records producing symbol record offsets. Abstracts away
/// the fact that symbol record offsets on disk are off-by-one.
class GSIHashIterator
    : public iterator_adaptor_base<
          GSIHashIterator, FixedStreamArrayIterator<PSHashRecord>,
          std::random_access_iterator_tag, const uint32_t> {
public:
  template <typename T>
  GSIHashIterator(T &&v)
      : GSIHashIterator::iterator_adaptor_base(std::forward<T &&>(v)) {}

  uint32_t operator*() const {
    uint32_t Off = this->I->Off;
    return --Off;
  }
};

/// From https://github.com/Microsoft/microsoft-pdb/blob/master/PDB/dbi/gsi.cpp
enum : unsigned { IPHR_HASH = 4096 };

/// A readonly view of a hash table used in the globals and publics streams.
/// Most clients will only want to iterate this to get symbol record offsets
/// into the PDB symbol stream.
class GSIHashTable {
public:
  const GSIHashHeader *HashHdr;
  FixedStreamArray<PSHashRecord> HashRecords;
  FixedStreamArray<support::ulittle32_t> HashBitmap;
  FixedStreamArray<support::ulittle32_t> HashBuckets;
  std::array<int32_t, IPHR_HASH + 1> BucketMap;

  Error read(BinaryStreamReader &Reader);

  uint32_t getVerSignature() const { return HashHdr->VerSignature; }
  uint32_t getVerHeader() const { return HashHdr->VerHdr; }
  uint32_t getHashRecordSize() const { return HashHdr->HrSize; }
  uint32_t getNumBuckets() const { return HashHdr->NumBuckets; }

  typedef GSIHashHeader iterator;
  GSIHashIterator begin() const { return GSIHashIterator(HashRecords.begin()); }
  GSIHashIterator end() const { return GSIHashIterator(HashRecords.end()); }
};

class GlobalsStream {
public:
  explicit GlobalsStream(std::unique_ptr<msf::MappedBlockStream> Stream);
  ~GlobalsStream();
  const GSIHashTable &getGlobalsTable() const { return GlobalsTable; }
  Error reload();

  std::vector<std::pair<uint32_t, codeview::CVSymbol>>
  findRecordsByName(StringRef Name, const SymbolStream &Symbols) const;

private:
  GSIHashTable GlobalsTable;
  std::unique_ptr<msf::MappedBlockStream> Stream;
};
} // namespace pdb
}

#endif
PKhwFZ�̋(DebugInfo/PDB/Native/NativeEnumModules.hnu�[���//==- NativeEnumModules.h - Native Module Enumerator impl --------*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_NATIVE_NATIVEENUMMODULES_H
#define LLVM_DEBUGINFO_PDB_NATIVE_NATIVEENUMMODULES_H

#include "llvm/DebugInfo/PDB/IPDBEnumChildren.h"
#include "llvm/DebugInfo/PDB/PDBSymbol.h"
namespace llvm {
namespace pdb {

class NativeSession;

class NativeEnumModules : public IPDBEnumChildren<PDBSymbol> {
public:
  NativeEnumModules(NativeSession &Session, uint32_t Index = 0);

  uint32_t getChildCount() const override;
  std::unique_ptr<PDBSymbol> getChildAtIndex(uint32_t Index) const override;
  std::unique_ptr<PDBSymbol> getNext() override;
  void reset() override;

private:
  NativeSession &Session;
  uint32_t Index;
};
}
}

#endif
PKhwFZx�V�55-DebugInfo/PDB/Native/NativeInlineSiteSymbol.hnu�[���//===- NativeInlineSiteSymbol.h - info about inline sites -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_NATIVE_NATIVEINLINESITESYMBOL_H
#define LLVM_DEBUGINFO_PDB_NATIVE_NATIVEINLINESITESYMBOL_H

#include "llvm/DebugInfo/CodeView/SymbolRecord.h"
#include "llvm/DebugInfo/PDB/IPDBRawSymbol.h"
#include "llvm/DebugInfo/PDB/Native/NativeRawSymbol.h"
#include "llvm/DebugInfo/PDB/PDBTypes.h"

namespace llvm {
namespace pdb {

class NativeSession;

class NativeInlineSiteSymbol : public NativeRawSymbol {
public:
  NativeInlineSiteSymbol(NativeSession &Session, SymIndexId Id,
                         const codeview::InlineSiteSym &Sym,
                         uint64_t ParentAddr);

  ~NativeInlineSiteSymbol() override;

  void dump(raw_ostream &OS, int Indent, PdbSymbolIdField ShowIdFields,
            PdbSymbolIdField RecurseIdFields) const override;

  std::string getName() const override;
  std::unique_ptr<IPDBEnumLineNumbers>
  findInlineeLinesByVA(uint64_t VA, uint32_t Length) const override;

private:
  const codeview::InlineSiteSym Sym;
  uint64_t ParentAddr;

  void getLineOffset(uint32_t OffsetInFunc, uint32_t &LineOffset,
                     uint32_t &FileOffset) const;
};

} // namespace pdb
} // namespace llvm

#endif // LLVM_DEBUGINFO_PDB_NATIVE_NATIVEINLINESITESYMBOL_H
PKhwFZ���		,DebugInfo/PDB/Native/NativeTypeFunctionSig.hnu�[���//===- NativeTypeFunctionSig.h - info about function signature ---*- C++-*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_NATIVE_NATIVETYPEFUNCTIONSIG_H
#define LLVM_DEBUGINFO_PDB_NATIVE_NATIVETYPEFUNCTIONSIG_H

#include "llvm/DebugInfo/CodeView/TypeIndex.h"
#include "llvm/DebugInfo/CodeView/TypeRecord.h"
#include "llvm/DebugInfo/PDB/IPDBRawSymbol.h"
#include "llvm/DebugInfo/PDB/Native/NativeRawSymbol.h"
#include "llvm/DebugInfo/PDB/PDBTypes.h"

namespace llvm {
namespace pdb {

class NativeTypeFunctionSig : public NativeRawSymbol {
protected:
  void initialize() override;

public:
  NativeTypeFunctionSig(NativeSession &Session, SymIndexId Id,
                        codeview::TypeIndex TI, codeview::ProcedureRecord Proc);

  NativeTypeFunctionSig(NativeSession &Session, SymIndexId Id,
                        codeview::TypeIndex TI,
                        codeview::MemberFunctionRecord MemberFunc);

  ~NativeTypeFunctionSig() override;

  void dump(raw_ostream &OS, int Indent, PdbSymbolIdField ShowIdFields,
            PdbSymbolIdField RecurseIdFields) const override;

  std::unique_ptr<IPDBEnumSymbols>
  findChildren(PDB_SymType Type) const override;

  SymIndexId getClassParentId() const override;
  PDB_CallingConv getCallingConvention() const override;
  uint32_t getCount() const override;
  SymIndexId getTypeId() const override;
  int32_t getThisAdjust() const override;
  bool hasConstructor() const override;
  bool isConstType() const override;
  bool isConstructorVirtualBase() const override;
  bool isCxxReturnUdt() const override;
  bool isUnalignedType() const override;
  bool isVolatileType() const override;

private:
  void initializeArgList(codeview::TypeIndex ArgListTI);

  union {
    codeview::MemberFunctionRecord MemberFunc;
    codeview::ProcedureRecord Proc;
  };

  SymIndexId ClassParentId = 0;
  codeview::TypeIndex Index;
  codeview::ArgListRecord ArgList;
  bool IsMemberFunction = false;
};

} // namespace pdb
} // namespace llvm

#endif // LLVM_DEBUGINFO_PDB_NATIVE_NATIVETYPEFUNCTIONSIG_H
PKhwFZ������DebugInfo/PDB/Native/RawError.hnu�[���//===- RawError.h - Error extensions for raw PDB implementation -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_NATIVE_RAWERROR_H
#define LLVM_DEBUGINFO_PDB_NATIVE_RAWERROR_H

#include "llvm/Support/Error.h"

namespace llvm {
namespace pdb {
enum class raw_error_code {
  unspecified = 1,
  feature_unsupported,
  invalid_format,
  corrupt_file,
  insufficient_buffer,
  no_stream,
  index_out_of_bounds,
  invalid_block_address,
  duplicate_entry,
  no_entry,
  not_writable,
  stream_too_long,
  invalid_tpi_hash,
};
} // namespace pdb
} // namespace llvm

namespace std {
template <>
struct is_error_code_enum<llvm::pdb::raw_error_code> : std::true_type {};
} // namespace std

namespace llvm {
namespace pdb {
const std::error_category &RawErrCategory();

inline std::error_code make_error_code(raw_error_code E) {
  return std::error_code(static_cast<int>(E), RawErrCategory());
}

/// Base class for errors originating when parsing raw PDB files
class RawError : public ErrorInfo<RawError, StringError> {
public:
  using ErrorInfo<RawError, StringError>::ErrorInfo; // inherit constructors
  RawError(const Twine &S) : ErrorInfo(S, raw_error_code::unspecified) {}
  static char ID;
};
} // namespace pdb
} // namespace llvm
#endif
PKhwFZ��g�+DebugInfo/PDB/Native/NativeFunctionSymbol.hnu�[���//===- NativeFunctionSymbol.h - info about function symbols -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_NATIVE_NATIVEFUNCTIONSYMBOL_H
#define LLVM_DEBUGINFO_PDB_NATIVE_NATIVEFUNCTIONSYMBOL_H

#include "llvm/DebugInfo/CodeView/SymbolRecord.h"
#include "llvm/DebugInfo/PDB/IPDBRawSymbol.h"
#include "llvm/DebugInfo/PDB/Native/NativeRawSymbol.h"
#include "llvm/DebugInfo/PDB/PDBTypes.h"

namespace llvm {
class raw_ostream;
namespace pdb {

class NativeSession;

class NativeFunctionSymbol : public NativeRawSymbol {
public:
  NativeFunctionSymbol(NativeSession &Session, SymIndexId Id,
                       const codeview::ProcSym &Sym, uint32_t RecordOffset);

  ~NativeFunctionSymbol() override;

  void dump(raw_ostream &OS, int Indent, PdbSymbolIdField ShowIdFields,
            PdbSymbolIdField RecurseIdFields) const override;

  uint32_t getAddressOffset() const override;
  uint32_t getAddressSection() const override;
  std::string getName() const override;
  uint64_t getLength() const override;
  uint32_t getRelativeVirtualAddress() const override;
  uint64_t getVirtualAddress() const override;
  std::unique_ptr<IPDBEnumSymbols>
  findInlineFramesByVA(uint64_t VA) const override;

protected:
  const codeview::ProcSym Sym;
  uint32_t RecordOffset = 0;
};

} // namespace pdb
} // namespace llvm

#endif // LLVM_DEBUGINFO_PDB_NATIVE_NATIVEFUNCTIONSYMBOL_H
PKhwFZ�~.LL(DebugInfo/PDB/Native/NativeTypeTypedef.hnu�[���//===- NativeTypeTypedef.h - info about typedef ------------------*- C++-*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_NATIVE_NATIVETYPETYPEDEF_H
#define LLVM_DEBUGINFO_PDB_NATIVE_NATIVETYPETYPEDEF_H

#include "llvm/DebugInfo/CodeView/SymbolRecord.h"
#include "llvm/DebugInfo/PDB/IPDBRawSymbol.h"
#include "llvm/DebugInfo/PDB/Native/NativeRawSymbol.h"
#include "llvm/DebugInfo/PDB/PDBTypes.h"

namespace llvm {

class raw_ostream;

namespace pdb {

class NativeSession;

class NativeTypeTypedef : public NativeRawSymbol {
public:
  // Create a pointer record for a non-simple type.
  NativeTypeTypedef(NativeSession &Session, SymIndexId Id,
                    codeview::UDTSym Typedef);

  ~NativeTypeTypedef() override;

  void dump(raw_ostream &OS, int Indent, PdbSymbolIdField ShowIdFields,
            PdbSymbolIdField RecurseIdFields) const override;

  std::string getName() const override;
  SymIndexId getTypeId() const override;

protected:
  codeview::UDTSym Record;
};

} // namespace pdb
} // namespace llvm

#endif // LLVM_DEBUGINFO_PDB_NATIVE_NATIVETYPETYPEDEF_H
PKhwFZ^"}�"DebugInfo/PDB/Native/LinePrinter.hnu�[���//===- LinePrinter.h ------------------------------------------ *- C++ --*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_NATIVE_LINEPRINTER_H
#define LLVM_DEBUGINFO_PDB_NATIVE_LINEPRINTER_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
#include "llvm/DebugInfo/PDB/Native/FormatUtil.h"
#include "llvm/Support/BinaryStreamRef.h"
#include "llvm/Support/FormatVariadic.h"
#include "llvm/Support/Regex.h"
#include "llvm/Support/raw_ostream.h"

#include <list>

// Container for filter options to control which elements will be printed.
struct FilterOptions {
  std::list<std::string> ExcludeTypes;
  std::list<std::string> ExcludeSymbols;
  std::list<std::string> ExcludeCompilands;
  std::list<std::string> IncludeTypes;
  std::list<std::string> IncludeSymbols;
  std::list<std::string> IncludeCompilands;
  uint32_t PaddingThreshold;
  uint32_t SizeThreshold;
  std::optional<uint32_t> DumpModi;
  std::optional<uint32_t> ParentRecurseDepth;
  std::optional<uint32_t> ChildrenRecurseDepth;
  std::optional<uint32_t> SymbolOffset;
  bool JustMyCode;
};

namespace llvm {
namespace msf {
class MSFStreamLayout;
} // namespace msf
namespace pdb {

class ClassLayout;
class PDBFile;
class SymbolGroup;

class LinePrinter {
  friend class WithColor;

public:
  LinePrinter(int Indent, bool UseColor, raw_ostream &Stream,
              const FilterOptions &Filters);

  void Indent(uint32_t Amount = 0);
  void Unindent(uint32_t Amount = 0);
  void NewLine();

  void printLine(const Twine &T);
  void print(const Twine &T);
  template <typename... Ts> void formatLine(const char *Fmt, Ts &&...Items) {
    printLine(formatv(Fmt, std::forward<Ts>(Items)...));
  }
  template <typename... Ts> void format(const char *Fmt, Ts &&...Items) {
    print(formatv(Fmt, std::forward<Ts>(Items)...));
  }

  void formatBinary(StringRef Label, ArrayRef<uint8_t> Data,
                    uint64_t StartOffset);
  void formatBinary(StringRef Label, ArrayRef<uint8_t> Data, uint64_t BaseAddr,
                    uint64_t StartOffset);

  void formatMsfStreamData(StringRef Label, PDBFile &File, uint32_t StreamIdx,
                           StringRef StreamPurpose, uint64_t Offset,
                           uint64_t Size);
  void formatMsfStreamData(StringRef Label, PDBFile &File,
                           const msf::MSFStreamLayout &Stream,
                           BinarySubstreamRef Substream);
  void formatMsfStreamBlocks(PDBFile &File, const msf::MSFStreamLayout &Stream);

  bool hasColor() const { return UseColor; }
  raw_ostream &getStream() { return OS; }
  int getIndentLevel() const { return CurrentIndent; }

  bool IsClassExcluded(const ClassLayout &Class);
  bool IsTypeExcluded(llvm::StringRef TypeName, uint64_t Size);
  bool IsSymbolExcluded(llvm::StringRef SymbolName);
  bool IsCompilandExcluded(llvm::StringRef CompilandName);

  const FilterOptions &getFilters() const { return Filters; }

private:
  template <typename Iter>
  void SetFilters(std::list<Regex> &List, Iter Begin, Iter End) {
    List.clear();
    for (; Begin != End; ++Begin)
      List.emplace_back(StringRef(*Begin));
  }

  raw_ostream &OS;
  int IndentSpaces;
  int CurrentIndent;
  bool UseColor;
  const FilterOptions &Filters;

  std::list<Regex> ExcludeCompilandFilters;
  std::list<Regex> ExcludeTypeFilters;
  std::list<Regex> ExcludeSymbolFilters;

  std::list<Regex> IncludeCompilandFilters;
  std::list<Regex> IncludeTypeFilters;
  std::list<Regex> IncludeSymbolFilters;
};

struct PrintScope {
  explicit PrintScope(LinePrinter &P, uint32_t IndentLevel)
      : P(P), IndentLevel(IndentLevel) {}
  explicit PrintScope(const PrintScope &Other, uint32_t LabelWidth)
      : P(Other.P), IndentLevel(Other.IndentLevel), LabelWidth(LabelWidth) {}

  LinePrinter &P;
  uint32_t IndentLevel;
  uint32_t LabelWidth = 0;
};

inline PrintScope withLabelWidth(const PrintScope &Scope, uint32_t W) {
  return PrintScope{Scope, W};
}

struct AutoIndent {
  explicit AutoIndent(LinePrinter &L, uint32_t Amount = 0)
      : L(&L), Amount(Amount) {
    L.Indent(Amount);
  }
  explicit AutoIndent(const PrintScope &Scope) {
    L = &Scope.P;
    Amount = Scope.IndentLevel;
  }
  ~AutoIndent() {
    if (L)
      L->Unindent(Amount);
  }

  LinePrinter *L = nullptr;
  uint32_t Amount = 0;
};

template <class T>
inline raw_ostream &operator<<(LinePrinter &Printer, const T &Item) {
  return Printer.getStream() << Item;
}

enum class PDB_ColorItem {
  None,
  Address,
  Type,
  Comment,
  Padding,
  Keyword,
  Offset,
  Identifier,
  Path,
  SectionHeader,
  LiteralValue,
  Register,
};

class WithColor {
public:
  WithColor(LinePrinter &P, PDB_ColorItem C);
  ~WithColor();

  raw_ostream &get() { return OS; }

private:
  void applyColor(PDB_ColorItem C);
  raw_ostream &OS;
  bool UseColor;
};
} // namespace pdb
} // namespace llvm

#endif
PKhwFZUP�njj,DebugInfo/PDB/Native/NativeEnumLineNumbers.hnu�[���//==- NativeEnumLineNumbers.h - Native Line Number Enumerator ------------*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_NATIVE_NATIVEENUMLINENUMBERS_H
#define LLVM_DEBUGINFO_PDB_NATIVE_NATIVEENUMLINENUMBERS_H

#include "llvm/DebugInfo/PDB/IPDBEnumChildren.h"
#include "llvm/DebugInfo/PDB/IPDBLineNumber.h"
#include "llvm/DebugInfo/PDB/Native/NativeLineNumber.h"
#include <vector>

namespace llvm {
namespace pdb {

class NativeEnumLineNumbers : public IPDBEnumChildren<IPDBLineNumber> {
public:
  explicit NativeEnumLineNumbers(std::vector<NativeLineNumber> LineNums);

  uint32_t getChildCount() const override;
  ChildTypePtr getChildAtIndex(uint32_t Index) const override;
  ChildTypePtr getNext() override;
  void reset() override;

private:
  std::vector<NativeLineNumber> Lines;
  uint32_t Index;
};
} // namespace pdb
} // namespace llvm

#endif
PKhwFZ�Zj�ll0DebugInfo/PDB/Native/NativeEnumInjectedSources.hnu�[���//==- NativeEnumInjectedSources.cpp - Native Injected Source Enumerator --*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_NATIVE_NATIVEENUMINJECTEDSOURCES_H
#define LLVM_DEBUGINFO_PDB_NATIVE_NATIVEENUMINJECTEDSOURCES_H

#include "llvm/DebugInfo/PDB/IPDBEnumChildren.h"
#include "llvm/DebugInfo/PDB/IPDBInjectedSource.h"
#include "llvm/DebugInfo/PDB/Native/InjectedSourceStream.h"

namespace llvm {
namespace pdb {

class InjectedSourceStream;
class PDBFile;
class PDBStringTable;

class NativeEnumInjectedSources : public IPDBEnumChildren<IPDBInjectedSource> {
public:
  NativeEnumInjectedSources(PDBFile &File, const InjectedSourceStream &IJS,
                            const PDBStringTable &Strings);

  uint32_t getChildCount() const override;
  std::unique_ptr<IPDBInjectedSource>
  getChildAtIndex(uint32_t Index) const override;
  std::unique_ptr<IPDBInjectedSource> getNext() override;
  void reset() override;

private:
  PDBFile &File;
  const InjectedSourceStream &Stream;
  const PDBStringTable &Strings;
  InjectedSourceStream::const_iterator Cur;
};

} // namespace pdb
} // namespace llvm

#endif
PKhwFZ��!NN$DebugInfo/PDB/Native/PublicsStream.hnu�[���//===- PublicsStream.h - PDB Public Symbol Stream -------- ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_NATIVE_PUBLICSSTREAM_H
#define LLVM_DEBUGINFO_PDB_NATIVE_PUBLICSSTREAM_H

#include "llvm/DebugInfo/PDB/Native/GlobalsStream.h"
#include "llvm/Support/BinaryStreamArray.h"
#include "llvm/Support/Error.h"

namespace llvm {
namespace msf {
class MappedBlockStream;
}
namespace pdb {
struct PublicsStreamHeader;
struct SectionOffset;

class PublicsStream {
public:
  PublicsStream(std::unique_ptr<msf::MappedBlockStream> Stream);
  ~PublicsStream();
  Error reload();

  uint32_t getSymHash() const;
  uint16_t getThunkTableSection() const;
  uint32_t getThunkTableOffset() const;
  const GSIHashTable &getPublicsTable() const { return PublicsTable; }
  FixedStreamArray<support::ulittle32_t> getAddressMap() const {
    return AddressMap;
  }
  FixedStreamArray<support::ulittle32_t> getThunkMap() const {
    return ThunkMap;
  }
  FixedStreamArray<SectionOffset> getSectionOffsets() const {
    return SectionOffsets;
  }

private:
  std::unique_ptr<msf::MappedBlockStream> Stream;
  GSIHashTable PublicsTable;
  FixedStreamArray<support::ulittle32_t> AddressMap;
  FixedStreamArray<support::ulittle32_t> ThunkMap;
  FixedStreamArray<SectionOffset> SectionOffsets;

  const PublicsStreamHeader *Header;
};
}
}

#endif
PKhwFZL��
�
 DebugInfo/PDB/Native/TpiStream.hnu�[���//===- TpiStream.cpp - PDB Type Info (TPI) Stream 2 Access ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_NATIVE_TPISTREAM_H
#define LLVM_DEBUGINFO_PDB_NATIVE_TPISTREAM_H

#include "llvm/DebugInfo/CodeView/CVRecord.h"
#include "llvm/DebugInfo/PDB/Native/HashTable.h"
#include "llvm/DebugInfo/PDB/Native/RawConstants.h"
#include "llvm/Support/BinaryStreamArray.h"
#include "llvm/Support/BinaryStreamRef.h"

#include "llvm/Support/Error.h"

namespace llvm {
class BinaryStream;
namespace codeview {
class TypeIndex;
struct TypeIndexOffset;
class LazyRandomTypeCollection;
}
namespace msf {
class MappedBlockStream;
}
namespace pdb {
struct TpiStreamHeader;
class PDBFile;

class TpiStream {
  friend class TpiStreamBuilder;

public:
  TpiStream(PDBFile &File, std::unique_ptr<msf::MappedBlockStream> Stream);
  ~TpiStream();
  Error reload();

  PdbRaw_TpiVer getTpiVersion() const;

  uint32_t TypeIndexBegin() const;
  uint32_t TypeIndexEnd() const;
  uint32_t getNumTypeRecords() const;
  uint16_t getTypeHashStreamIndex() const;
  uint16_t getTypeHashStreamAuxIndex() const;

  uint32_t getHashKeySize() const;
  uint32_t getNumHashBuckets() const;
  FixedStreamArray<support::ulittle32_t> getHashValues() const;
  FixedStreamArray<codeview::TypeIndexOffset> getTypeIndexOffsets() const;
  HashTable<support::ulittle32_t> &getHashAdjusters();

  codeview::CVTypeRange types(bool *HadError) const;
  const codeview::CVTypeArray &typeArray() const { return TypeRecords; }

  codeview::LazyRandomTypeCollection &typeCollection() { return *Types; }

  Expected<codeview::TypeIndex>
  findFullDeclForForwardRef(codeview::TypeIndex ForwardRefTI) const;

  std::vector<codeview::TypeIndex> findRecordsByName(StringRef Name) const;

  codeview::CVType getType(codeview::TypeIndex Index);

  BinarySubstreamRef getTypeRecordsSubstream() const;

  Error commit();

  void buildHashMap();

  bool supportsTypeLookup() const;

private:
  PDBFile &Pdb;
  std::unique_ptr<msf::MappedBlockStream> Stream;

  std::unique_ptr<codeview::LazyRandomTypeCollection> Types;

  BinarySubstreamRef TypeRecordsSubstream;

  codeview::CVTypeArray TypeRecords;

  std::unique_ptr<BinaryStream> HashStream;
  FixedStreamArray<support::ulittle32_t> HashValues;
  FixedStreamArray<codeview::TypeIndexOffset> TypeIndexOffsets;
  HashTable<support::ulittle32_t> HashAdjusters;

  std::vector<std::vector<codeview::TypeIndex>> HashMap;

  const TpiStreamHeader *Header;
};
}
}

#endif
PKhwFZ��DD+DebugInfo/PDB/Native/InjectedSourceStream.hnu�[���//===- InjectedSourceStream.h - PDB Headerblock Stream Access ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_NATIVE_INJECTEDSOURCESTREAM_H
#define LLVM_DEBUGINFO_PDB_NATIVE_INJECTEDSOURCESTREAM_H

#include "llvm/DebugInfo/MSF/MappedBlockStream.h"
#include "llvm/DebugInfo/PDB/Native/HashTable.h"
#include "llvm/Support/Error.h"

namespace llvm {
namespace pdb {
struct SrcHeaderBlockEntry;
struct SrcHeaderBlockHeader;
class PDBStringTable;

class InjectedSourceStream {
public:
  InjectedSourceStream(std::unique_ptr<msf::MappedBlockStream> Stream);
  Error reload(const PDBStringTable &Strings);

  using const_iterator = HashTable<SrcHeaderBlockEntry>::const_iterator;
  const_iterator begin() const { return InjectedSourceTable.begin(); }
  const_iterator end() const { return InjectedSourceTable.end(); }

  uint32_t size() const { return InjectedSourceTable.size(); }

private:
  std::unique_ptr<msf::MappedBlockStream> Stream;

  const SrcHeaderBlockHeader* Header;
  HashTable<SrcHeaderBlockEntry> InjectedSourceTable;
};
}
} // namespace llvm

#endif
PKhwFZv^��$DebugInfo/PDB/Native/NativeSession.hnu�[���//===- NativeSession.h - Native implementation of IPDBSession ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_NATIVE_NATIVESESSION_H
#define LLVM_DEBUGINFO_PDB_NATIVE_NATIVESESSION_H

#include "llvm/ADT/IntervalMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/DebugInfo/PDB/IPDBSession.h"
#include "llvm/DebugInfo/PDB/Native/SymbolCache.h"
#include "llvm/DebugInfo/PDB/PDBTypes.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Error.h"

namespace llvm {
class MemoryBuffer;
namespace pdb {
class PDBFile;
class NativeExeSymbol;
class IPDBSourceFile;
class ModuleDebugStreamRef;
class PDBSymbol;
class PDBSymbolCompiland;
class PDBSymbolExe;
template <typename ChildType> class IPDBEnumChildren;

class NativeSession : public IPDBSession {
  struct PdbSearchOptions {
    StringRef ExePath;
    // FIXME: Add other PDB search options (_NT_SYMBOL_PATH, symsrv)
  };

public:
  NativeSession(std::unique_ptr<PDBFile> PdbFile,
                std::unique_ptr<BumpPtrAllocator> Allocator);
  ~NativeSession() override;

  static Error createFromPdb(std::unique_ptr<MemoryBuffer> MB,
                             std::unique_ptr<IPDBSession> &Session);
  static Error createFromPdbPath(StringRef PdbPath,
                                 std::unique_ptr<IPDBSession> &Session);
  static Error createFromExe(StringRef Path,
                             std::unique_ptr<IPDBSession> &Session);
  static Expected<std::string> searchForPdb(const PdbSearchOptions &Opts);

  uint64_t getLoadAddress() const override;
  bool setLoadAddress(uint64_t Address) override;
  std::unique_ptr<PDBSymbolExe> getGlobalScope() override;
  std::unique_ptr<PDBSymbol> getSymbolById(SymIndexId SymbolId) const override;

  bool addressForVA(uint64_t VA, uint32_t &Section,
                    uint32_t &Offset) const override;
  bool addressForRVA(uint32_t RVA, uint32_t &Section,
                     uint32_t &Offset) const override;

  std::unique_ptr<PDBSymbol> findSymbolByAddress(uint64_t Address,
                                                 PDB_SymType Type) override;
  std::unique_ptr<PDBSymbol> findSymbolByRVA(uint32_t RVA,
                                             PDB_SymType Type) override;
  std::unique_ptr<PDBSymbol> findSymbolBySectOffset(uint32_t Sect,
                                                    uint32_t Offset,
                                                    PDB_SymType Type) override;

  std::unique_ptr<IPDBEnumLineNumbers>
  findLineNumbers(const PDBSymbolCompiland &Compiland,
                  const IPDBSourceFile &File) const override;
  std::unique_ptr<IPDBEnumLineNumbers>
  findLineNumbersByAddress(uint64_t Address, uint32_t Length) const override;
  std::unique_ptr<IPDBEnumLineNumbers>
  findLineNumbersByRVA(uint32_t RVA, uint32_t Length) const override;
  std::unique_ptr<IPDBEnumLineNumbers>
  findLineNumbersBySectOffset(uint32_t Section, uint32_t Offset,
                              uint32_t Length) const override;

  std::unique_ptr<IPDBEnumSourceFiles>
  findSourceFiles(const PDBSymbolCompiland *Compiland, llvm::StringRef Pattern,
                  PDB_NameSearchFlags Flags) const override;
  std::unique_ptr<IPDBSourceFile>
  findOneSourceFile(const PDBSymbolCompiland *Compiland,
                    llvm::StringRef Pattern,
                    PDB_NameSearchFlags Flags) const override;
  std::unique_ptr<IPDBEnumChildren<PDBSymbolCompiland>>
  findCompilandsForSourceFile(llvm::StringRef Pattern,
                              PDB_NameSearchFlags Flags) const override;
  std::unique_ptr<PDBSymbolCompiland>
  findOneCompilandForSourceFile(llvm::StringRef Pattern,
                                PDB_NameSearchFlags Flags) const override;
  std::unique_ptr<IPDBEnumSourceFiles> getAllSourceFiles() const override;
  std::unique_ptr<IPDBEnumSourceFiles> getSourceFilesForCompiland(
      const PDBSymbolCompiland &Compiland) const override;
  std::unique_ptr<IPDBSourceFile>
  getSourceFileById(uint32_t FileId) const override;

  std::unique_ptr<IPDBEnumDataStreams> getDebugStreams() const override;

  std::unique_ptr<IPDBEnumTables> getEnumTables() const override;

  std::unique_ptr<IPDBEnumInjectedSources> getInjectedSources() const override;

  std::unique_ptr<IPDBEnumSectionContribs> getSectionContribs() const override;

  std::unique_ptr<IPDBEnumFrameData> getFrameData() const override;

  PDBFile &getPDBFile() { return *Pdb; }
  const PDBFile &getPDBFile() const { return *Pdb; }

  NativeExeSymbol &getNativeGlobalScope() const;
  SymbolCache &getSymbolCache() { return Cache; }
  const SymbolCache &getSymbolCache() const { return Cache; }
  uint32_t getRVAFromSectOffset(uint32_t Section, uint32_t Offset) const;
  uint64_t getVAFromSectOffset(uint32_t Section, uint32_t Offset) const;
  bool moduleIndexForVA(uint64_t VA, uint16_t &ModuleIndex) const;
  bool moduleIndexForSectOffset(uint32_t Sect, uint32_t Offset,
                                uint16_t &ModuleIndex) const;
  Expected<ModuleDebugStreamRef> getModuleDebugStream(uint32_t Index) const;

private:
  void initializeExeSymbol();
  void parseSectionContribs();

  std::unique_ptr<PDBFile> Pdb;
  std::unique_ptr<BumpPtrAllocator> Allocator;

  SymbolCache Cache;
  SymIndexId ExeSymbol = 0;
  uint64_t LoadAddress = 0;

  /// Map from virtual address to module index.
  using IMap =
      IntervalMap<uint64_t, uint16_t, 8, IntervalMapHalfOpenInfo<uint64_t>>;
  IMap::Allocator IMapAllocator;
  IMap AddrToModuleIndex;
};
} // namespace pdb
} // namespace llvm

#endif
PKhwFZ�0^��/�/DebugInfo/PDB/Native/RawTypes.hnu�[���//===- RawTypes.h -----------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_NATIVE_RAWTYPES_H
#define LLVM_DEBUGINFO_PDB_NATIVE_RAWTYPES_H

#include "llvm/DebugInfo/CodeView/GUID.h"
#include "llvm/DebugInfo/CodeView/TypeRecord.h"
#include "llvm/Support/Endian.h"

namespace llvm {
namespace pdb {
// This struct is defined as "SO" in langapi/include/pdb.h.
struct SectionOffset {
  support::ulittle32_t Off;
  support::ulittle16_t Isect;
  char Padding[2];
};

/// Header of the hash tables found in the globals and publics sections.
/// Based on GSIHashHdr in
/// https://github.com/Microsoft/microsoft-pdb/blob/master/PDB/dbi/gsi.h
struct GSIHashHeader {
  enum : unsigned {
    HdrSignature = ~0U,
    HdrVersion = 0xeffe0000 + 19990810,
  };
  support::ulittle32_t VerSignature;
  support::ulittle32_t VerHdr;
  support::ulittle32_t HrSize;
  support::ulittle32_t NumBuckets;
};

// This is HRFile.
struct PSHashRecord {
  support::ulittle32_t Off; // Offset in the symbol record stream
  support::ulittle32_t CRef;
};

// This struct is defined as `SC` in include/dbicommon.h
struct SectionContrib {
  support::ulittle16_t ISect;
  char Padding[2];
  support::little32_t Off;
  support::little32_t Size;
  support::ulittle32_t Characteristics;
  support::ulittle16_t Imod;
  char Padding2[2];
  support::ulittle32_t DataCrc;
  support::ulittle32_t RelocCrc;
};

// This struct is defined as `SC2` in include/dbicommon.h
struct SectionContrib2 {
  // To guarantee SectionContrib2 is standard layout, we cannot use inheritance.
  SectionContrib Base;
  support::ulittle32_t ISectCoff;
};

// This corresponds to the `OMFSegMap` structure.
struct SecMapHeader {
  support::ulittle16_t SecCount;    // Number of segment descriptors in table
  support::ulittle16_t SecCountLog; // Number of logical segment descriptors
};

// This corresponds to the `OMFSegMapDesc` structure.  The definition is not
// present in the reference implementation, but the layout is derived from
// code that accesses the fields.
struct SecMapEntry {
  support::ulittle16_t Flags; // Descriptor flags.  See OMFSegDescFlags
  support::ulittle16_t Ovl;   // Logical overlay number.
  support::ulittle16_t Group; // Group index into descriptor array.
  support::ulittle16_t Frame;
  support::ulittle16_t SecName;       // Byte index of the segment or group name
                                      // in the sstSegName table, or 0xFFFF.
  support::ulittle16_t ClassName;     // Byte index of the class name in the
                                      // sstSegName table, or 0xFFFF.
  support::ulittle32_t Offset;        // Byte offset of the logical segment
                                      // within the specified physical segment.
                                      // If group is set in flags, offset is the
                                      // offset of the group.
  support::ulittle32_t SecByteLength; // Byte count of the segment or group.
};

/// Some of the values are stored in bitfields.  Since this needs to be portable
/// across compilers and architectures (big / little endian in particular) we
/// can't use the actual structures below, but must instead do the shifting
/// and masking ourselves.  The struct definitions are provided for reference.
struct DbiFlags {
  ///  uint16_t IncrementalLinking : 1; // True if linked incrementally
  ///  uint16_t IsStripped : 1;         // True if private symbols were
  ///  stripped.
  ///  uint16_t HasCTypes : 1;          // True if linked with /debug:ctypes.
  ///  uint16_t Reserved : 13;
  static const uint16_t FlagIncrementalMask = 0x0001;
  static const uint16_t FlagStrippedMask = 0x0002;
  static const uint16_t FlagHasCTypesMask = 0x0004;
};

struct DbiBuildNo {
  ///  uint16_t MinorVersion : 8;
  ///  uint16_t MajorVersion : 7;
  ///  uint16_t NewVersionFormat : 1;
  static const uint16_t BuildMinorMask = 0x00FF;
  static const uint16_t BuildMinorShift = 0;

  static const uint16_t BuildMajorMask = 0x7F00;
  static const uint16_t BuildMajorShift = 8;

  static const uint16_t NewVersionFormatMask = 0x8000;
};

/// The fixed size header that appears at the beginning of the DBI Stream.
struct DbiStreamHeader {
  support::little32_t VersionSignature;
  support::ulittle32_t VersionHeader;

  /// How "old" is this DBI Stream. Should match the age of the PDB InfoStream.
  support::ulittle32_t Age;

  /// Global symbol stream #
  support::ulittle16_t GlobalSymbolStreamIndex;

  /// See DbiBuildNo structure.
  support::ulittle16_t BuildNumber;

  /// Public symbols stream #
  support::ulittle16_t PublicSymbolStreamIndex;

  /// version of mspdbNNN.dll
  support::ulittle16_t PdbDllVersion;

  /// Symbol records stream #
  support::ulittle16_t SymRecordStreamIndex;

  /// rbld number of mspdbNNN.dll
  support::ulittle16_t PdbDllRbld;

  /// Size of module info stream
  support::little32_t ModiSubstreamSize;

  /// Size of sec. contrib stream
  support::little32_t SecContrSubstreamSize;

  /// Size of sec. map substream
  support::little32_t SectionMapSize;

  /// Size of file info substream
  support::little32_t FileInfoSize;

  /// Size of type server map
  support::little32_t TypeServerSize;

  /// Index of MFC Type Server
  support::ulittle32_t MFCTypeServerIndex;

  /// Size of DbgHeader info
  support::little32_t OptionalDbgHdrSize;

  /// Size of EC stream (what is EC?)
  support::little32_t ECSubstreamSize;

  /// See DbiFlags enum.
  support::ulittle16_t Flags;

  /// See PDB_MachineType enum.
  support::ulittle16_t MachineType;

  /// Pad to 64 bytes
  support::ulittle32_t Reserved;
};
static_assert(sizeof(DbiStreamHeader) == 64, "Invalid DbiStreamHeader size!");

/// The header preceding the File Info Substream of the DBI stream.
struct FileInfoSubstreamHeader {
  /// Total # of modules, should match number of records in the ModuleInfo
  /// substream.
  support::ulittle16_t NumModules;

  /// Total # of source files. This value is not accurate because PDB actually
  /// supports more than 64k source files, so we ignore it and compute the value
  /// from other stream fields.
  support::ulittle16_t NumSourceFiles;

  /// Following this header the File Info Substream is laid out as follows:
  ///   ulittle16_t ModIndices[NumModules];
  ///   ulittle16_t ModFileCounts[NumModules];
  ///   ulittle32_t FileNameOffsets[NumSourceFiles];
  ///   char Names[][NumSourceFiles];
  /// with the caveat that `NumSourceFiles` cannot be trusted, so
  /// it is computed by summing the `ModFileCounts` array.
};

struct ModInfoFlags {
  ///  uint16_t fWritten : 1;   // True if DbiModuleDescriptor is dirty
  ///  uint16_t fECEnabled : 1; // Is EC symbolic info present?  (What is EC?)
  ///  uint16_t unused : 6;     // Reserved
  ///  uint16_t iTSM : 8;       // Type Server Index for this module
  static const uint16_t HasECFlagMask = 0x2;

  static const uint16_t TypeServerIndexMask = 0xFF00;
  static const uint16_t TypeServerIndexShift = 8;
};

/// The header preceding each entry in the Module Info substream of the DBI
/// stream.  Corresponds to the type MODI in the reference implementation.
struct ModuleInfoHeader {
  /// Currently opened module. This field is a pointer in the reference
  /// implementation, but that won't work on 64-bit systems, and anyway it
  /// doesn't make sense to read a pointer from a file. For now it is unused,
  /// so just ignore it.
  support::ulittle32_t Mod;

  /// First section contribution of this module.
  SectionContrib SC;

  /// See ModInfoFlags definition.
  support::ulittle16_t Flags;

  /// Stream Number of module debug info
  support::ulittle16_t ModDiStream;

  /// Size of local symbol debug info in above stream
  support::ulittle32_t SymBytes;

  /// Size of C11 line number info in above stream
  support::ulittle32_t C11Bytes;

  /// Size of C13 line number info in above stream
  support::ulittle32_t C13Bytes;

  /// Number of files contributing to this module
  support::ulittle16_t NumFiles;

  /// Padding so the next field is 4-byte aligned.
  char Padding1[2];

  /// Array of [0..NumFiles) DBI name buffer offsets.  In the reference
  /// implementation this field is a pointer.  But since you can't portably
  /// serialize a pointer, on 64-bit platforms they copy all the values except
  /// this one into the 32-bit version of the struct and use that for
  /// serialization.  Regardless, this field is unused, it is only there to
  /// store a pointer that can be accessed at runtime.
  support::ulittle32_t FileNameOffs;

  /// Name Index for src file name
  support::ulittle32_t SrcFileNameNI;

  /// Name Index for path to compiler PDB
  support::ulittle32_t PdbFilePathNI;

  /// Following this header are two zero terminated strings.
  /// char ModuleName[];
  /// char ObjFileName[];
};

// This is PSGSIHDR struct defined in
// https://github.com/Microsoft/microsoft-pdb/blob/master/PDB/dbi/gsi.h
struct PublicsStreamHeader {
  support::ulittle32_t SymHash;
  support::ulittle32_t AddrMap;
  support::ulittle32_t NumThunks;
  support::ulittle32_t SizeOfThunk;
  support::ulittle16_t ISectThunkTable;
  char Padding[2];
  support::ulittle32_t OffThunkTable;
  support::ulittle32_t NumSections;
};

// The header preceding the global TPI stream.
// This corresponds to `HDR` in PDB/dbi/tpi.h.
struct TpiStreamHeader {
  struct EmbeddedBuf {
    support::little32_t Off;
    support::ulittle32_t Length;
  };

  support::ulittle32_t Version;
  support::ulittle32_t HeaderSize;
  support::ulittle32_t TypeIndexBegin;
  support::ulittle32_t TypeIndexEnd;
  support::ulittle32_t TypeRecordBytes;

  // The following members correspond to `TpiHash` in PDB/dbi/tpi.h.
  support::ulittle16_t HashStreamIndex;
  support::ulittle16_t HashAuxStreamIndex;
  support::ulittle32_t HashKeySize;
  support::ulittle32_t NumHashBuckets;

  EmbeddedBuf HashValueBuffer;
  EmbeddedBuf IndexOffsetBuffer;
  EmbeddedBuf HashAdjBuffer;
};

const uint32_t MinTpiHashBuckets = 0x1000;
const uint32_t MaxTpiHashBuckets = 0x40000;

/// The header preceding the global PDB Stream (Stream 1)
struct InfoStreamHeader {
  support::ulittle32_t Version;
  support::ulittle32_t Signature;
  support::ulittle32_t Age;
  codeview::GUID Guid;
};

/// The header preceding the /names stream.
struct PDBStringTableHeader {
  support::ulittle32_t Signature;   // PDBStringTableSignature
  support::ulittle32_t HashVersion; // 1 or 2
  support::ulittle32_t ByteSize;    // Number of bytes of names buffer.
};

const uint32_t PDBStringTableSignature = 0xEFFEEFFE;

/// The header preceding the /src/headerblock stream.
struct SrcHeaderBlockHeader {
  support::ulittle32_t Version; // PdbRaw_SrcHeaderBlockVer enumeration.
  support::ulittle32_t Size;    // Size of entire stream.
  uint64_t FileTime;            // Time stamp (Windows FILETIME format).
  support::ulittle32_t Age;     // Age
  uint8_t Padding[44];          // Pad to 64 bytes.
};
static_assert(sizeof(SrcHeaderBlockHeader) == 64, "Incorrect struct size!");

/// A single file record entry within the /src/headerblock stream.
struct SrcHeaderBlockEntry {
  support::ulittle32_t Size;     // Record Length.
  support::ulittle32_t Version;  // PdbRaw_SrcHeaderBlockVer enumeration.
  support::ulittle32_t CRC;      // CRC of the original file contents.
  support::ulittle32_t FileSize; // Size of original source file.
  support::ulittle32_t FileNI;   // String table index of file name.
  support::ulittle32_t ObjNI;    // String table index of object name.
  support::ulittle32_t VFileNI;  // String table index of virtual file name.
  uint8_t Compression;           // PDB_SourceCompression enumeration.
  uint8_t IsVirtual;             // Is this a virtual file (injected)?
  short Padding;                 // Pad to 4 bytes.
  char Reserved[8];
};
static_assert(sizeof(SrcHeaderBlockEntry) == 40, "Incorrect struct size!");

} // namespace pdb
} // namespace llvm

#endif
PKhwFZ�i��r'r'&DebugInfo/PDB/Native/NativeRawSymbol.hnu�[���//==- NativeRawSymbol.h - Native implementation of IPDBRawSymbol -*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_NATIVE_NATIVERAWSYMBOL_H
#define LLVM_DEBUGINFO_PDB_NATIVE_NATIVERAWSYMBOL_H

#include "llvm/DebugInfo/PDB/IPDBRawSymbol.h"
#include <cstdint>
#include <memory>

namespace llvm {
namespace pdb {

class NativeSession;

class NativeRawSymbol : public IPDBRawSymbol {
  friend class SymbolCache;
  virtual void initialize() {}

public:
  NativeRawSymbol(NativeSession &PDBSession, PDB_SymType Tag,
                  SymIndexId SymbolId);

  void dump(raw_ostream &OS, int Indent, PdbSymbolIdField ShowIdFields,
            PdbSymbolIdField RecurseIdFields) const override;

  std::unique_ptr<IPDBEnumSymbols>
    findChildren(PDB_SymType Type) const override;
  std::unique_ptr<IPDBEnumSymbols>
    findChildren(PDB_SymType Type, StringRef Name,
      PDB_NameSearchFlags Flags) const override;
  std::unique_ptr<IPDBEnumSymbols>
    findChildrenByAddr(PDB_SymType Type, StringRef Name,
                       PDB_NameSearchFlags Flags,
                       uint32_t Section, uint32_t Offset) const override;
  std::unique_ptr<IPDBEnumSymbols>
    findChildrenByVA(PDB_SymType Type, StringRef Name, PDB_NameSearchFlags Flags,
                     uint64_t VA) const override;
  std::unique_ptr<IPDBEnumSymbols>
    findChildrenByRVA(PDB_SymType Type, StringRef Name, PDB_NameSearchFlags Flags,
      uint32_t RVA) const override;

  std::unique_ptr<IPDBEnumSymbols>
    findInlineFramesByAddr(uint32_t Section, uint32_t Offset) const override;
  std::unique_ptr<IPDBEnumSymbols>
    findInlineFramesByRVA(uint32_t RVA) const override;
  std::unique_ptr<IPDBEnumSymbols>
    findInlineFramesByVA(uint64_t VA) const override;

  std::unique_ptr<IPDBEnumLineNumbers> findInlineeLines() const override;
  std::unique_ptr<IPDBEnumLineNumbers>
    findInlineeLinesByAddr(uint32_t Section, uint32_t Offset,
                           uint32_t Length) const override;
  std::unique_ptr<IPDBEnumLineNumbers>
    findInlineeLinesByRVA(uint32_t RVA, uint32_t Length) const override;
  std::unique_ptr<IPDBEnumLineNumbers>
    findInlineeLinesByVA(uint64_t VA, uint32_t Length) const override;

  void getDataBytes(SmallVector<uint8_t, 32> &Bytes) const override;
  void getFrontEndVersion(VersionInfo &Version) const override;
  void getBackEndVersion(VersionInfo &Version) const override;
  PDB_MemberAccess getAccess() const override;
  uint32_t getAddressOffset() const override;
  uint32_t getAddressSection() const override;
  uint32_t getAge() const override;
  SymIndexId getArrayIndexTypeId() const override;
  uint32_t getBaseDataOffset() const override;
  uint32_t getBaseDataSlot() const override;
  SymIndexId getBaseSymbolId() const override;
  PDB_BuiltinType getBuiltinType() const override;
  uint32_t getBitPosition() const override;
  PDB_CallingConv getCallingConvention() const override;
  SymIndexId getClassParentId() const override;
  std::string getCompilerName() const override;
  uint32_t getCount() const override;
  uint32_t getCountLiveRanges() const override;
  PDB_Lang getLanguage() const override;
  SymIndexId getLexicalParentId() const override;
  std::string getLibraryName() const override;
  uint32_t getLiveRangeStartAddressOffset() const override;
  uint32_t getLiveRangeStartAddressSection() const override;
  uint32_t getLiveRangeStartRelativeVirtualAddress() const override;
  codeview::RegisterId getLocalBasePointerRegisterId() const override;
  SymIndexId getLowerBoundId() const override;
  uint32_t getMemorySpaceKind() const override;
  std::string getName() const override;
  uint32_t getNumberOfAcceleratorPointerTags() const override;
  uint32_t getNumberOfColumns() const override;
  uint32_t getNumberOfModifiers() const override;
  uint32_t getNumberOfRegisterIndices() const override;
  uint32_t getNumberOfRows() const override;
  std::string getObjectFileName() const override;
  uint32_t getOemId() const override;
  SymIndexId getOemSymbolId() const override;
  uint32_t getOffsetInUdt() const override;
  PDB_Cpu getPlatform() const override;
  uint32_t getRank() const override;
  codeview::RegisterId getRegisterId() const override;
  uint32_t getRegisterType() const override;
  uint32_t getRelativeVirtualAddress() const override;
  uint32_t getSamplerSlot() const override;
  uint32_t getSignature() const override;
  uint32_t getSizeInUdt() const override;
  uint32_t getSlot() const override;
  std::string getSourceFileName() const override;
  std::unique_ptr<IPDBLineNumber> getSrcLineOnTypeDefn() const override;
  uint32_t getStride() const override;
  SymIndexId getSubTypeId() const override;
  std::string getSymbolsFileName() const override;
  SymIndexId getSymIndexId() const override;
  uint32_t getTargetOffset() const override;
  uint32_t getTargetRelativeVirtualAddress() const override;
  uint64_t getTargetVirtualAddress() const override;
  uint32_t getTargetSection() const override;
  uint32_t getTextureSlot() const override;
  uint32_t getTimeStamp() const override;
  uint32_t getToken() const override;
  SymIndexId getTypeId() const override;
  uint32_t getUavSlot() const override;
  std::string getUndecoratedName() const override;
  std::string getUndecoratedNameEx(PDB_UndnameFlags Flags) const override;
  SymIndexId getUnmodifiedTypeId() const override;
  SymIndexId getUpperBoundId() const override;
  Variant getValue() const override;
  uint32_t getVirtualBaseDispIndex() const override;
  uint32_t getVirtualBaseOffset() const override;
  SymIndexId getVirtualTableShapeId() const override;
  std::unique_ptr<PDBSymbolTypeBuiltin>
  getVirtualBaseTableType() const override;
  PDB_DataKind getDataKind() const override;
  PDB_SymType getSymTag() const override;
  codeview::GUID getGuid() const override;
  int32_t getOffset() const override;
  int32_t getThisAdjust() const override;
  int32_t getVirtualBasePointerOffset() const override;
  PDB_LocType getLocationType() const override;
  PDB_Machine getMachineType() const override;
  codeview::ThunkOrdinal getThunkOrdinal() const override;
  uint64_t getLength() const override;
  uint64_t getLiveRangeLength() const override;
  uint64_t getVirtualAddress() const override;
  PDB_UdtType getUdtKind() const override;
  bool hasConstructor() const override;
  bool hasCustomCallingConvention() const override;
  bool hasFarReturn() const override;
  bool isCode() const override;
  bool isCompilerGenerated() const override;
  bool isConstType() const override;
  bool isEditAndContinueEnabled() const override;
  bool isFunction() const override;
  bool getAddressTaken() const override;
  bool getNoStackOrdering() const override;
  bool hasAlloca() const override;
  bool hasAssignmentOperator() const override;
  bool hasCTypes() const override;
  bool hasCastOperator() const override;
  bool hasDebugInfo() const override;
  bool hasEH() const override;
  bool hasEHa() const override;
  bool hasInlAsm() const override;
  bool hasInlineAttribute() const override;
  bool hasInterruptReturn() const override;
  bool hasFramePointer() const override;
  bool hasLongJump() const override;
  bool hasManagedCode() const override;
  bool hasNestedTypes() const override;
  bool hasNoInlineAttribute() const override;
  bool hasNoReturnAttribute() const override;
  bool hasOptimizedCodeDebugInfo() const override;
  bool hasOverloadedOperator() const override;
  bool hasSEH() const override;
  bool hasSecurityChecks() const override;
  bool hasSetJump() const override;
  bool hasStrictGSCheck() const override;
  bool isAcceleratorGroupSharedLocal() const override;
  bool isAcceleratorPointerTagLiveRange() const override;
  bool isAcceleratorStubFunction() const override;
  bool isAggregated() const override;
  bool isIntroVirtualFunction() const override;
  bool isCVTCIL() const override;
  bool isConstructorVirtualBase() const override;
  bool isCxxReturnUdt() const override;
  bool isDataAligned() const override;
  bool isHLSLData() const override;
  bool isHotpatchable() const override;
  bool isIndirectVirtualBaseClass() const override;
  bool isInterfaceUdt() const override;
  bool isIntrinsic() const override;
  bool isLTCG() const override;
  bool isLocationControlFlowDependent() const override;
  bool isMSILNetmodule() const override;
  bool isMatrixRowMajor() const override;
  bool isManagedCode() const override;
  bool isMSILCode() const override;
  bool isMultipleInheritance() const override;
  bool isNaked() const override;
  bool isNested() const override;
  bool isOptimizedAway() const override;
  bool isPacked() const override;
  bool isPointerBasedOnSymbolValue() const override;
  bool isPointerToDataMember() const override;
  bool isPointerToMemberFunction() const override;
  bool isPureVirtual() const override;
  bool isRValueReference() const override;
  bool isRefUdt() const override;
  bool isReference() const override;
  bool isRestrictedType() const override;
  bool isReturnValue() const override;
  bool isSafeBuffers() const override;
  bool isScoped() const override;
  bool isSdl() const override;
  bool isSingleInheritance() const override;
  bool isSplitted() const override;
  bool isStatic() const override;
  bool hasPrivateSymbols() const override;
  bool isUnalignedType() const override;
  bool isUnreached() const override;
  bool isValueUdt() const override;
  bool isVirtual() const override;
  bool isVirtualBaseClass() const override;
  bool isVirtualInheritance() const override;
  bool isVolatileType() const override;
  bool wasInlined() const override;
  std::string getUnused() const override;

protected:
  NativeSession &Session;
  PDB_SymType Tag;
  SymIndexId SymbolId;
};

} // end namespace pdb
} // end namespace llvm

#endif // LLVM_DEBUGINFO_PDB_NATIVE_NATIVERAWSYMBOL_H
PKhwFZ&g��

(DebugInfo/PDB/Native/InfoStreamBuilder.hnu�[���//===- InfoStreamBuilder.h - PDB Info Stream Creation -----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_NATIVE_INFOSTREAMBUILDER_H
#define LLVM_DEBUGINFO_PDB_NATIVE_INFOSTREAMBUILDER_H

#include "llvm/Support/Error.h"

#include "llvm/DebugInfo/CodeView/GUID.h"
#include "llvm/DebugInfo/PDB/Native/RawConstants.h"

namespace llvm {
class WritableBinaryStreamRef;

namespace msf {
class MSFBuilder;
struct MSFLayout;
}
namespace pdb {
class NamedStreamMap;

class InfoStreamBuilder {
public:
  InfoStreamBuilder(msf::MSFBuilder &Msf, NamedStreamMap &NamedStreams);
  InfoStreamBuilder(const InfoStreamBuilder &) = delete;
  InfoStreamBuilder &operator=(const InfoStreamBuilder &) = delete;

  void setVersion(PdbRaw_ImplVer V);
  void addFeature(PdbRaw_FeatureSig Sig);

  // If this is true, the PDB contents are hashed and this hash is used as
  // PDB GUID and as Signature. The age is always 1.
  void setHashPDBContentsToGUID(bool B);

  // These only have an effect if hashPDBContentsToGUID() is false.
  void setSignature(uint32_t S);
  void setAge(uint32_t A);
  void setGuid(codeview::GUID G);

  bool hashPDBContentsToGUID() const { return HashPDBContentsToGUID; }
  uint32_t getAge() const { return Age; }
  codeview::GUID getGuid() const { return Guid; }
  std::optional<uint32_t> getSignature() const { return Signature; }

  uint32_t finalize();

  Error finalizeMsfLayout();

  Error commit(const msf::MSFLayout &Layout,
               WritableBinaryStreamRef Buffer) const;

private:
  msf::MSFBuilder &Msf;

  std::vector<PdbRaw_FeatureSig> Features;
  PdbRaw_ImplVer Ver;
  uint32_t Age;
  std::optional<uint32_t> Signature;
  codeview::GUID Guid;

  bool HashPDBContentsToGUID = false;

  NamedStreamMap &NamedStreams;
};
} // namespace pdb
}

#endif
PKhwFZ��}###DebugInfo/PDB/Native/RawConstants.hnu�[���//===- RawConstants.h -------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_NATIVE_RAWCONSTANTS_H
#define LLVM_DEBUGINFO_PDB_NATIVE_RAWCONSTANTS_H

#include "llvm/ADT/BitmaskEnum.h"
#include "llvm/DebugInfo/CodeView/CodeView.h"
#include <cstdint>

namespace llvm {
namespace pdb {

const uint16_t kInvalidStreamIndex = 0xFFFF;

enum PdbRaw_ImplVer : uint32_t {
  PdbImplVC2 = 19941610,
  PdbImplVC4 = 19950623,
  PdbImplVC41 = 19950814,
  PdbImplVC50 = 19960307,
  PdbImplVC98 = 19970604,
  PdbImplVC70Dep = 19990604, // deprecated
  PdbImplVC70 = 20000404,
  PdbImplVC80 = 20030901,
  PdbImplVC110 = 20091201,
  PdbImplVC140 = 20140508,
};

enum class PdbRaw_SrcHeaderBlockVer : uint32_t { SrcVerOne = 19980827 };

enum class PdbRaw_FeatureSig : uint32_t {
  VC110 = PdbImplVC110,
  VC140 = PdbImplVC140,
  NoTypeMerge = 0x4D544F4E,
  MinimalDebugInfo = 0x494E494D,
};

enum PdbRaw_Features : uint32_t {
  PdbFeatureNone = 0x0,
  PdbFeatureContainsIdStream = 0x1,
  PdbFeatureMinimalDebugInfo = 0x2,
  PdbFeatureNoTypeMerging = 0x4,
  LLVM_MARK_AS_BITMASK_ENUM(/* LargestValue = */ PdbFeatureNoTypeMerging)
};

enum PdbRaw_DbiVer : uint32_t {
  PdbDbiVC41 = 930803,
  PdbDbiV50 = 19960307,
  PdbDbiV60 = 19970606,
  PdbDbiV70 = 19990903,
  PdbDbiV110 = 20091201
};

enum PdbRaw_TpiVer : uint32_t {
  PdbTpiV40 = 19950410,
  PdbTpiV41 = 19951122,
  PdbTpiV50 = 19961031,
  PdbTpiV70 = 19990903,
  PdbTpiV80 = 20040203,
};

enum PdbRaw_DbiSecContribVer : uint32_t {
  DbiSecContribVer60 = 0xeffe0000 + 19970605,
  DbiSecContribV2 = 0xeffe0000 + 20140516
};

enum SpecialStream : uint32_t {
  // Stream 0 contains the copy of previous version of the MSF directory.
  // We are not currently using it, but technically if we find the main
  // MSF is corrupted, we could fallback to it.
  OldMSFDirectory = 0,

  StreamPDB = 1,
  StreamTPI = 2,
  StreamDBI = 3,
  StreamIPI = 4,

  kSpecialStreamCount
};

enum class DbgHeaderType : uint16_t {
  FPO,
  Exception,
  Fixup,
  OmapToSrc,
  OmapFromSrc,
  SectionHdr,
  TokenRidMap,
  Xdata,
  Pdata,
  NewFPO,
  SectionHdrOrig,
  Max
};

enum class OMFSegDescFlags : uint16_t {
  None = 0,
  Read = 1 << 0,              // Segment is readable.
  Write = 1 << 1,             // Segment is writable.
  Execute = 1 << 2,           // Segment is executable.
  AddressIs32Bit = 1 << 3,    // Descriptor describes a 32-bit linear address.
  IsSelector = 1 << 8,        // Frame represents a selector.
  IsAbsoluteAddress = 1 << 9, // Frame represents an absolute address.
  IsGroup = 1 << 10,          // If set, descriptor represents a group.
  LLVM_MARK_AS_BITMASK_ENUM(/* LargestValue = */ IsGroup)
};

LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE();

} // end namespace pdb
} // end namespace llvm

#endif // LLVM_DEBUGINFO_PDB_NATIVE_RAWCONSTANTS_H
PKhwFZ�X�.��$DebugInfo/PDB/Native/DbiModuleList.hnu�[���//===- DbiModuleList.h - PDB module information list ------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_NATIVE_DBIMODULELIST_H
#define LLVM_DEBUGINFO_PDB_NATIVE_DBIMODULELIST_H

#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/DebugInfo/PDB/Native/DbiModuleDescriptor.h"
#include "llvm/Support/BinaryStreamArray.h"
#include "llvm/Support/BinaryStreamRef.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Error.h"
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <vector>

namespace llvm {
namespace pdb {

class DbiModuleList;
struct FileInfoSubstreamHeader;

class DbiModuleSourceFilesIterator
    : public iterator_facade_base<DbiModuleSourceFilesIterator,
                                  std::random_access_iterator_tag, StringRef> {
  using BaseType = typename DbiModuleSourceFilesIterator::iterator_facade_base;

public:
  DbiModuleSourceFilesIterator(const DbiModuleList &Modules, uint32_t Modi,
                               uint16_t Filei);
  DbiModuleSourceFilesIterator() = default;
  DbiModuleSourceFilesIterator(const DbiModuleSourceFilesIterator &R) = default;
  DbiModuleSourceFilesIterator &
  operator=(const DbiModuleSourceFilesIterator &R) = default;

  bool operator==(const DbiModuleSourceFilesIterator &R) const;

  const StringRef &operator*() const { return ThisValue; }
  StringRef &operator*() { return ThisValue; }

  bool operator<(const DbiModuleSourceFilesIterator &RHS) const;
  std::ptrdiff_t operator-(const DbiModuleSourceFilesIterator &R) const;
  DbiModuleSourceFilesIterator &operator+=(std::ptrdiff_t N);
  DbiModuleSourceFilesIterator &operator-=(std::ptrdiff_t N);

private:
  void setValue();

  bool isEnd() const;
  bool isCompatible(const DbiModuleSourceFilesIterator &R) const;
  bool isUniversalEnd() const;

  StringRef ThisValue;
  const DbiModuleList *Modules{nullptr};
  uint32_t Modi{0};
  uint16_t Filei{0};
};

class DbiModuleList {
  friend DbiModuleSourceFilesIterator;

public:
  Error initialize(BinaryStreamRef ModInfo, BinaryStreamRef FileInfo);

  Expected<StringRef> getFileName(uint32_t Index) const;
  uint32_t getModuleCount() const;
  uint32_t getSourceFileCount() const;
  uint16_t getSourceFileCount(uint32_t Modi) const;

  iterator_range<DbiModuleSourceFilesIterator>
  source_files(uint32_t Modi) const;

  DbiModuleDescriptor getModuleDescriptor(uint32_t Modi) const;

private:
  Error initializeModInfo(BinaryStreamRef ModInfo);
  Error initializeFileInfo(BinaryStreamRef FileInfo);

  VarStreamArray<DbiModuleDescriptor> Descriptors;

  FixedStreamArray<support::little32_t> FileNameOffsets;
  FixedStreamArray<support::ulittle16_t> ModFileCountArray;

  // For each module, there are multiple filenames, which can be obtained by
  // knowing the index of the file.  Given the index of the file, one can use
  // that as an offset into the FileNameOffsets array, which contains the
  // absolute offset of the file name in NamesBuffer.  Thus, for each module
  // we store the first index in the FileNameOffsets array for this module.
  // The number of files for the corresponding module is stored in
  // ModFileCountArray.
  std::vector<uint32_t> ModuleInitialFileIndex;

  // In order to provide random access into the Descriptors array, we iterate it
  // once up front to find the offsets of the individual items and store them in
  // this array.
  std::vector<uint32_t> ModuleDescriptorOffsets;

  const FileInfoSubstreamHeader *FileInfoHeader = nullptr;

  BinaryStreamRef ModInfoSubstream;
  BinaryStreamRef FileInfoSubstream;
  BinaryStreamRef NamesBuffer;
};

} // end namespace pdb
} // end namespace llvm

#endif // LLVM_DEBUGINFO_PDB_NATIVE_DBIMODULELIST_H
PKhwFZw}�ߓ�(DebugInfo/PDB/Native/NativeEnumSymbols.hnu�[���//==- NativeEnumSymbols.h - Native Symbols Enumerator impl -------*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_NATIVE_NATIVEENUMSYMBOLS_H
#define LLVM_DEBUGINFO_PDB_NATIVE_NATIVEENUMSYMBOLS_H

#include "llvm/DebugInfo/PDB/IPDBEnumChildren.h"
#include "llvm/DebugInfo/PDB/PDBSymbol.h"
#include "llvm/DebugInfo/PDB/PDBTypes.h"

#include <vector>

namespace llvm {
namespace pdb {

class NativeSession;

class NativeEnumSymbols : public IPDBEnumChildren<PDBSymbol> {
public:
  NativeEnumSymbols(NativeSession &Session, std::vector<SymIndexId> Symbols);

  uint32_t getChildCount() const override;
  std::unique_ptr<PDBSymbol> getChildAtIndex(uint32_t Index) const override;
  std::unique_ptr<PDBSymbol> getNext() override;
  void reset() override;

private:
  std::vector<SymIndexId> Symbols;
  uint32_t Index;
  NativeSession &Session;
};

} // namespace pdb
} // namespace llvm

#endif
PKhwFZS�DJ��,DebugInfo/PDB/Native/NativeCompilandSymbol.hnu�[���//===- NativeCompilandSymbol.h - native impl for compiland syms -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_NATIVE_NATIVECOMPILANDSYMBOL_H
#define LLVM_DEBUGINFO_PDB_NATIVE_NATIVECOMPILANDSYMBOL_H

#include "llvm/DebugInfo/PDB/Native/DbiModuleDescriptor.h"
#include "llvm/DebugInfo/PDB/Native/NativeRawSymbol.h"

namespace llvm {
namespace pdb {

class NativeCompilandSymbol : public NativeRawSymbol {
public:
  NativeCompilandSymbol(NativeSession &Session, SymIndexId SymbolId,
                        DbiModuleDescriptor MI);

  void dump(raw_ostream &OS, int Indent, PdbSymbolIdField ShowIdFields,
            PdbSymbolIdField RecurseIdFields) const override;

  PDB_SymType getSymTag() const override;
  bool isEditAndContinueEnabled() const override;
  SymIndexId getLexicalParentId() const override;
  std::string getLibraryName() const override;
  std::string getName() const override;

private:
  DbiModuleDescriptor Module;
};

} // namespace pdb
} // namespace llvm

#endif
PKhwFZ.q�T
T
%DebugInfo/PDB/Native/NativeTypeEnum.hnu�[���//===- NativeTypeEnum.h - info about enum type ------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_NATIVE_NATIVETYPEENUM_H
#define LLVM_DEBUGINFO_PDB_NATIVE_NATIVETYPEENUM_H

#include "llvm/DebugInfo/CodeView/TypeIndex.h"
#include "llvm/DebugInfo/CodeView/TypeRecord.h"
#include "llvm/DebugInfo/PDB/IPDBRawSymbol.h"
#include "llvm/DebugInfo/PDB/Native/NativeRawSymbol.h"
#include "llvm/DebugInfo/PDB/PDBTypes.h"

namespace llvm {
class raw_ostream;
namespace pdb {

class NativeTypeBuiltin;

class NativeTypeEnum : public NativeRawSymbol {
public:
  NativeTypeEnum(NativeSession &Session, SymIndexId Id, codeview::TypeIndex TI,
                 codeview::EnumRecord Record);

  NativeTypeEnum(NativeSession &Session, SymIndexId Id,
                 NativeTypeEnum &UnmodifiedType,
                 codeview::ModifierRecord Modifier);
  ~NativeTypeEnum() override;

  void dump(raw_ostream &OS, int Indent, PdbSymbolIdField ShowIdFields,
            PdbSymbolIdField RecurseIdFields) const override;

  std::unique_ptr<IPDBEnumSymbols>
  findChildren(PDB_SymType Type) const override;

  PDB_BuiltinType getBuiltinType() const override;
  PDB_SymType getSymTag() const override;
  SymIndexId getUnmodifiedTypeId() const override;
  bool hasConstructor() const override;
  bool hasAssignmentOperator() const override;
  bool hasCastOperator() const override;
  uint64_t getLength() const override;
  std::string getName() const override;
  bool isConstType() const override;
  bool isVolatileType() const override;
  bool isUnalignedType() const override;
  bool isNested() const override;
  bool hasOverloadedOperator() const override;
  bool hasNestedTypes() const override;
  bool isIntrinsic() const override;
  bool isPacked() const override;
  bool isScoped() const override;
  SymIndexId getTypeId() const override;
  bool isRefUdt() const override;
  bool isValueUdt() const override;
  bool isInterfaceUdt() const override;

  const NativeTypeBuiltin &getUnderlyingBuiltinType() const;
  const codeview::EnumRecord &getEnumRecord() const { return *Record; }

protected:
  codeview::TypeIndex Index;
  std::optional<codeview::EnumRecord> Record;
  NativeTypeEnum *UnmodifiedType = nullptr;
  std::optional<codeview::ModifierRecord> Modifiers;
};

} // namespace pdb
} // namespace llvm

#endif // LLVM_DEBUGINFO_PDB_NATIVE_NATIVETYPEENUM_H
PKhwFZ�R�R��(DebugInfo/PDB/Native/NativeTypeBuiltin.hnu�[���//===- NativeTypeBuiltin.h ---------------------------------------- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_NATIVE_NATIVETYPEBUILTIN_H
#define LLVM_DEBUGINFO_PDB_NATIVE_NATIVETYPEBUILTIN_H

#include "llvm/DebugInfo/PDB/Native/NativeRawSymbol.h"

#include "llvm/DebugInfo/PDB/PDBTypes.h"

namespace llvm {
namespace pdb {

class NativeSession;

class NativeTypeBuiltin : public NativeRawSymbol {
public:
  NativeTypeBuiltin(NativeSession &PDBSession, SymIndexId Id,
                    codeview::ModifierOptions Mods, PDB_BuiltinType T,
                    uint64_t L);
  ~NativeTypeBuiltin() override;

  void dump(raw_ostream &OS, int Indent, PdbSymbolIdField ShowIdFields,
            PdbSymbolIdField RecurseIdFields) const override;

  PDB_SymType getSymTag() const override;

  PDB_BuiltinType getBuiltinType() const override;
  bool isConstType() const override;
  uint64_t getLength() const override;
  bool isUnalignedType() const override;
  bool isVolatileType() const override;

protected:
  NativeSession &Session;
  codeview::ModifierOptions Mods;
  PDB_BuiltinType Type;
  uint64_t Length;
};

} // namespace pdb
} // namespace llvm

#endif
PKhwFZ��߫�!DebugInfo/PDB/Native/EnumTables.hnu�[���//===- EnumTables.h - Enum to string conversion tables ----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_NATIVE_ENUMTABLES_H
#define LLVM_DEBUGINFO_PDB_NATIVE_ENUMTABLES_H

#include "llvm/ADT/ArrayRef.h"

namespace llvm {
template <typename T> struct EnumEntry;
namespace pdb {
ArrayRef<EnumEntry<uint16_t>> getOMFSegMapDescFlagNames();
}
}

#endif // LLVM_DEBUGINFO_PDB_NATIVE_ENUMTABLES_H
PKhwFZ_]����'DebugInfo/PDB/Native/DbiStreamBuilder.hnu�[���//===- DbiStreamBuilder.h - PDB Dbi Stream Creation -------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_NATIVE_DBISTREAMBUILDER_H
#define LLVM_DEBUGINFO_PDB_NATIVE_DBISTREAMBUILDER_H

#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/BinaryFormat/COFF.h"
#include "llvm/Object/COFF.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Error.h"

#include "llvm/DebugInfo/CodeView/DebugFrameDataSubsection.h"
#include "llvm/DebugInfo/PDB/Native/PDBStringTableBuilder.h"
#include "llvm/DebugInfo/PDB/Native/RawConstants.h"
#include "llvm/DebugInfo/PDB/Native/RawTypes.h"
#include "llvm/DebugInfo/PDB/PDBTypes.h"
#include "llvm/Support/BinaryByteStream.h"
#include "llvm/Support/BinaryStreamRef.h"

namespace llvm {

class BinaryStreamWriter;
namespace codeview {
struct FrameData;
}
namespace msf {
class MSFBuilder;
struct MSFLayout;
}
namespace pdb {
class DbiModuleDescriptorBuilder;

class DbiStreamBuilder {
public:
  DbiStreamBuilder(msf::MSFBuilder &Msf);
  ~DbiStreamBuilder();

  DbiStreamBuilder(const DbiStreamBuilder &) = delete;
  DbiStreamBuilder &operator=(const DbiStreamBuilder &) = delete;

  void setVersionHeader(PdbRaw_DbiVer V);
  void setAge(uint32_t A);
  void setBuildNumber(uint16_t B);
  void setBuildNumber(uint8_t Major, uint8_t Minor);
  void setPdbDllVersion(uint16_t V);
  void setPdbDllRbld(uint16_t R);
  void setFlags(uint16_t F);
  void setMachineType(PDB_Machine M);
  void setMachineType(COFF::MachineTypes M);

  // Add given bytes as a new stream.
  Error addDbgStream(pdb::DbgHeaderType Type, ArrayRef<uint8_t> Data);

  uint32_t addECName(StringRef Name);

  uint32_t calculateSerializedLength() const;

  void setGlobalsStreamIndex(uint32_t Index);
  void setPublicsStreamIndex(uint32_t Index);
  void setSymbolRecordStreamIndex(uint32_t Index);
  void addNewFpoData(const codeview::FrameData &FD);
  void addOldFpoData(const object::FpoData &Fpo);

  Expected<DbiModuleDescriptorBuilder &> addModuleInfo(StringRef ModuleName);
  Error addModuleSourceFile(DbiModuleDescriptorBuilder &Module, StringRef File);
  Expected<uint32_t> getSourceFileNameIndex(StringRef FileName);

  Error finalizeMsfLayout();

  Error commit(const msf::MSFLayout &Layout, WritableBinaryStreamRef MsfBuffer);

  void addSectionContrib(const SectionContrib &SC) {
    SectionContribs.emplace_back(SC);
  }

  // Populate the Section Map from COFF section headers.
  void createSectionMap(ArrayRef<llvm::object::coff_section> SecHdrs);

private:
  struct DebugStream {
    std::function<Error(BinaryStreamWriter &)> WriteFn;
    uint32_t Size = 0;
    uint16_t StreamNumber = kInvalidStreamIndex;
  };

  Error finalize();
  uint32_t calculateModiSubstreamSize() const;
  uint32_t calculateNamesOffset() const;
  uint32_t calculateSectionContribsStreamSize() const;
  uint32_t calculateSectionMapStreamSize() const;
  uint32_t calculateFileInfoSubstreamSize() const;
  uint32_t calculateNamesBufferSize() const;
  uint32_t calculateDbgStreamsSize() const;

  Error generateFileInfoSubstream();

  msf::MSFBuilder &Msf;
  BumpPtrAllocator &Allocator;

  std::optional<PdbRaw_DbiVer> VerHeader;
  uint32_t Age;
  uint16_t BuildNumber;
  uint16_t PdbDllVersion;
  uint16_t PdbDllRbld;
  uint16_t Flags;
  PDB_Machine MachineType;
  uint32_t GlobalsStreamIndex = kInvalidStreamIndex;
  uint32_t PublicsStreamIndex = kInvalidStreamIndex;
  uint32_t SymRecordStreamIndex = kInvalidStreamIndex;

  const DbiStreamHeader *Header;

  std::vector<std::unique_ptr<DbiModuleDescriptorBuilder>> ModiList;

  std::optional<codeview::DebugFrameDataSubsection> NewFpoData;
  std::vector<object::FpoData> OldFpoData;

  StringMap<uint32_t> SourceFileNames;

  PDBStringTableBuilder ECNamesBuilder;
  WritableBinaryStreamRef NamesBuffer;
  MutableBinaryByteStream FileInfoBuffer;
  std::vector<SectionContrib> SectionContribs;
  std::vector<SecMapEntry> SectionMap;
  std::array<std::optional<DebugStream>, (int)DbgHeaderType::Max> DbgStreams;
};
} // namespace pdb
}

#endif
PKhwFZ�p

'DebugInfo/PDB/Native/TpiStreamBuilder.hnu�[���//===- TpiStreamBuilder.h - PDB Tpi Stream Creation -------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_NATIVE_TPISTREAMBUILDER_H
#define LLVM_DEBUGINFO_PDB_NATIVE_TPISTREAMBUILDER_H

#include "llvm/DebugInfo/CodeView/CVRecord.h"
#include "llvm/DebugInfo/CodeView/TypeIndex.h"
#include "llvm/DebugInfo/PDB/Native/RawConstants.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/BinaryStreamRef.h"
#include "llvm/Support/Error.h"

#include <vector>

namespace llvm {
class BinaryByteStream;
template <typename T> struct BinaryItemTraits;

template <> struct BinaryItemTraits<llvm::codeview::CVType> {
  static size_t length(const codeview::CVType &Item) { return Item.length(); }
  static ArrayRef<uint8_t> bytes(const codeview::CVType &Item) {
    return Item.data();
  }
};

namespace msf {
class MSFBuilder;
struct MSFLayout;
}
namespace pdb {
struct TpiStreamHeader;

class TpiStreamBuilder {
public:
  explicit TpiStreamBuilder(msf::MSFBuilder &Msf, uint32_t StreamIdx);
  ~TpiStreamBuilder();

  TpiStreamBuilder(const TpiStreamBuilder &) = delete;
  TpiStreamBuilder &operator=(const TpiStreamBuilder &) = delete;

  void setVersionHeader(PdbRaw_TpiVer Version);
  void addTypeRecord(ArrayRef<uint8_t> Type, std::optional<uint32_t> Hash);
  void addTypeRecords(ArrayRef<uint8_t> Types, ArrayRef<uint16_t> Sizes,
                      ArrayRef<uint32_t> Hashes);

  Error finalizeMsfLayout();

  uint32_t getRecordCount() const { return TypeRecordCount; }

  Error commit(const msf::MSFLayout &Layout, WritableBinaryStreamRef Buffer);

  uint32_t calculateSerializedLength();

private:
  void updateTypeIndexOffsets(ArrayRef<uint16_t> Sizes);

  uint32_t calculateHashBufferSize() const;
  uint32_t calculateIndexOffsetSize() const;
  Error finalize();

  msf::MSFBuilder &Msf;
  BumpPtrAllocator &Allocator;

  uint32_t TypeRecordCount = 0;
  size_t TypeRecordBytes = 0;

  PdbRaw_TpiVer VerHeader = PdbRaw_TpiVer::PdbTpiV80;
  std::vector<ArrayRef<uint8_t>> TypeRecBuffers;
  std::vector<uint32_t> TypeHashes;
  std::vector<codeview::TypeIndexOffset> TypeIndexOffsets;
  uint32_t HashStreamIndex = kInvalidStreamIndex;
  std::unique_ptr<BinaryByteStream> HashValueStream;

  const TpiStreamHeader *Header;
  uint32_t Idx;
};
} // namespace pdb
}

#endif
PKhwFZ��=(DebugInfo/PDB/Native/ModuleDebugStream.hnu�[���//===- ModuleDebugStream.h - PDB Module Info Stream Access ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_NATIVE_MODULEDEBUGSTREAM_H
#define LLVM_DEBUGINFO_PDB_NATIVE_MODULEDEBUGSTREAM_H

#include "llvm/ADT/iterator_range.h"
#include "llvm/DebugInfo/CodeView/CVRecord.h"
#include "llvm/DebugInfo/CodeView/DebugSubsectionRecord.h"
#include "llvm/DebugInfo/PDB/Native/DbiModuleDescriptor.h"
#include "llvm/Support/BinaryStreamRef.h"
#include "llvm/Support/Error.h"
#include <cstdint>
#include <memory>

namespace llvm {
class BinaryStreamReader;
namespace codeview {
class DebugChecksumsSubsectionRef;
}
namespace msf {
class MappedBlockStream;
}
namespace pdb {

class ModuleDebugStreamRef {
  using DebugSubsectionIterator = codeview::DebugSubsectionArray::Iterator;

public:
  ModuleDebugStreamRef(const DbiModuleDescriptor &Module,
                       std::unique_ptr<msf::MappedBlockStream> Stream);
  ModuleDebugStreamRef(ModuleDebugStreamRef &&Other) = default;
  ModuleDebugStreamRef(const ModuleDebugStreamRef &Other) = default;
  ~ModuleDebugStreamRef();

  Error reload();

  uint32_t signature() const { return Signature; }

  iterator_range<codeview::CVSymbolArray::Iterator>
  symbols(bool *HadError) const;

  const codeview::CVSymbolArray &getSymbolArray() const { return SymbolArray; }
  const codeview::CVSymbolArray
  getSymbolArrayForScope(uint32_t ScopeBegin) const;

  BinarySubstreamRef getSymbolsSubstream() const;
  BinarySubstreamRef getC11LinesSubstream() const;
  BinarySubstreamRef getC13LinesSubstream() const;
  BinarySubstreamRef getGlobalRefsSubstream() const;

  ModuleDebugStreamRef &operator=(ModuleDebugStreamRef &&Other) = delete;

  codeview::CVSymbol readSymbolAtOffset(uint32_t Offset) const;

  iterator_range<DebugSubsectionIterator> subsections() const;
  codeview::DebugSubsectionArray getSubsectionsArray() const {
    return Subsections;
  }

  bool hasDebugSubsections() const;

  Error commit();

  Expected<codeview::DebugChecksumsSubsectionRef>
  findChecksumsSubsection() const;

private:
  Error reloadSerialize(BinaryStreamReader &Reader);

  DbiModuleDescriptor Mod;

  uint32_t Signature;

  std::shared_ptr<msf::MappedBlockStream> Stream;

  codeview::CVSymbolArray SymbolArray;

  BinarySubstreamRef SymbolsSubstream;
  BinarySubstreamRef C11LinesSubstream;
  BinarySubstreamRef C13LinesSubstream;
  BinarySubstreamRef GlobalRefsSubstream;

  codeview::DebugSubsectionArray Subsections;
};

} // end namespace pdb
} // end namespace llvm

#endif // LLVM_DEBUGINFO_PDB_NATIVE_MODULEDEBUGSTREAM_H
PKhwFZ�%��
�
%DebugInfo/PDB/Native/PDBFileBuilder.hnu�[���//===- PDBFileBuilder.h - PDB File Creation ---------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_NATIVE_PDBFILEBUILDER_H
#define LLVM_DEBUGINFO_PDB_NATIVE_PDBFILEBUILDER_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/DebugInfo/PDB/Native/HashTable.h"
#include "llvm/DebugInfo/PDB/Native/NamedStreamMap.h"
#include "llvm/DebugInfo/PDB/Native/PDBStringTableBuilder.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/MemoryBuffer.h"
#include <memory>

namespace llvm {
class WritableBinaryStream;
namespace codeview {
struct GUID;
}

namespace msf {
class MSFBuilder;
struct MSFLayout;
}
namespace pdb {
struct SrcHeaderBlockEntry;
class DbiStreamBuilder;
class InfoStreamBuilder;
class GSIStreamBuilder;
class TpiStreamBuilder;

class PDBFileBuilder {
public:
  explicit PDBFileBuilder(BumpPtrAllocator &Allocator);
  ~PDBFileBuilder();
  PDBFileBuilder(const PDBFileBuilder &) = delete;
  PDBFileBuilder &operator=(const PDBFileBuilder &) = delete;

  Error initialize(uint32_t BlockSize);

  msf::MSFBuilder &getMsfBuilder();
  InfoStreamBuilder &getInfoBuilder();
  DbiStreamBuilder &getDbiBuilder();
  TpiStreamBuilder &getTpiBuilder();
  TpiStreamBuilder &getIpiBuilder();
  PDBStringTableBuilder &getStringTableBuilder();
  GSIStreamBuilder &getGsiBuilder();

  // If HashPDBContentsToGUID is true on the InfoStreamBuilder, Guid is filled
  // with the computed PDB GUID on return.
  Error commit(StringRef Filename, codeview::GUID *Guid);

  Expected<uint32_t> getNamedStreamIndex(StringRef Name) const;
  Error addNamedStream(StringRef Name, StringRef Data);
  void addInjectedSource(StringRef Name, std::unique_ptr<MemoryBuffer> Buffer);

private:
  struct InjectedSourceDescriptor {
    // The full name of the stream that contains the contents of this injected
    // source.  This is built as a concatenation of the literal "/src/files"
    // plus the "vname".
    std::string StreamName;

    // The exact name of the file name as specified by the user.
    uint32_t NameIndex;

    // The string table index of the "vname" of the file.  As far as we
    // understand, this is the same as the name, except it is lowercased and
    // forward slashes are converted to backslashes.
    uint32_t VNameIndex;
    std::unique_ptr<MemoryBuffer> Content;
  };

  Error finalizeMsfLayout();
  Expected<uint32_t> allocateNamedStream(StringRef Name, uint32_t Size);

  void commitInjectedSources(WritableBinaryStream &MsfBuffer,
                             const msf::MSFLayout &Layout);
  void commitSrcHeaderBlock(WritableBinaryStream &MsfBuffer,
                            const msf::MSFLayout &Layout);

  BumpPtrAllocator &Allocator;

  std::unique_ptr<msf::MSFBuilder> Msf;
  std::unique_ptr<InfoStreamBuilder> Info;
  std::unique_ptr<DbiStreamBuilder> Dbi;
  std::unique_ptr<GSIStreamBuilder> Gsi;
  std::unique_ptr<TpiStreamBuilder> Tpi;
  std::unique_ptr<TpiStreamBuilder> Ipi;

  PDBStringTableBuilder Strings;
  StringTableHashTraits InjectedSourceHashTraits;
  HashTable<SrcHeaderBlockEntry> InjectedSourceTable;

  SmallVector<InjectedSourceDescriptor, 2> InjectedSources;

  NamedStreamMap NamedStreams;
  DenseMap<uint32_t, std::string> NamedStreamData;
};
}
}

#endif
PKhwFZ���\mm*DebugInfo/PDB/Native/DbiModuleDescriptor.hnu�[���//===- DbiModuleDescriptor.h - PDB module information -----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_NATIVE_DBIMODULEDESCRIPTOR_H
#define LLVM_DEBUGINFO_PDB_NATIVE_DBIMODULEDESCRIPTOR_H

#include "llvm/ADT/StringRef.h"
#include "llvm/Support/BinaryStreamRef.h"
#include "llvm/Support/Error.h"
#include <cstdint>

namespace llvm {
template <typename T> struct VarStreamArrayExtractor;

namespace pdb {
struct ModuleInfoHeader;
struct SectionContrib;
class DbiModuleDescriptor {
  friend class DbiStreamBuilder;

public:
  DbiModuleDescriptor() = default;
  DbiModuleDescriptor(const DbiModuleDescriptor &Info) = default;
  DbiModuleDescriptor &operator=(const DbiModuleDescriptor &Info) = default;

  static Error initialize(BinaryStreamRef Stream, DbiModuleDescriptor &Info);

  bool hasECInfo() const;
  uint16_t getTypeServerIndex() const;
  uint16_t getModuleStreamIndex() const;
  uint32_t getSymbolDebugInfoByteSize() const;
  uint32_t getC11LineInfoByteSize() const;
  uint32_t getC13LineInfoByteSize() const;
  uint32_t getNumberOfFiles() const;
  uint32_t getSourceFileNameIndex() const;
  uint32_t getPdbFilePathNameIndex() const;

  StringRef getModuleName() const;
  StringRef getObjFileName() const;

  uint32_t getRecordLength() const;

  const SectionContrib &getSectionContrib() const;

private:
  StringRef ModuleName;
  StringRef ObjFileName;
  const ModuleInfoHeader *Layout = nullptr;
};

} // end namespace pdb

template <> struct VarStreamArrayExtractor<pdb::DbiModuleDescriptor> {
  Error operator()(BinaryStreamRef Stream, uint32_t &Length,
                   pdb::DbiModuleDescriptor &Info) {
    if (auto EC = pdb::DbiModuleDescriptor::initialize(Stream, Info))
      return EC;
    Length = Info.getRecordLength();
    return Error::success();
  }
};

} // end namespace llvm

#endif // LLVM_DEBUGINFO_PDB_NATIVE_DBIMODULEDESCRIPTOR_H
PKhwFZ^��D'DebugInfo/PDB/Native/NativeLineNumber.hnu�[���//===- NativeLineNumber.h - Native line number implementation ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_NATIVE_NATIVELINENUMBER_H
#define LLVM_DEBUGINFO_PDB_NATIVE_NATIVELINENUMBER_H

#include "llvm/DebugInfo/CodeView/Line.h"
#include "llvm/DebugInfo/PDB/IPDBLineNumber.h"

namespace llvm {
namespace pdb {

class NativeSession;

class NativeLineNumber : public IPDBLineNumber {
public:
  explicit NativeLineNumber(const NativeSession &Session,
                            const codeview::LineInfo Line,
                            uint32_t ColumnNumber, uint32_t Length,
                            uint32_t Section, uint32_t Offset,
                            uint32_t SrcFileId, uint32_t CompilandId);

  uint32_t getLineNumber() const override;
  uint32_t getLineNumberEnd() const override;
  uint32_t getColumnNumber() const override;
  uint32_t getColumnNumberEnd() const override;
  uint32_t getAddressSection() const override;
  uint32_t getAddressOffset() const override;
  uint32_t getRelativeVirtualAddress() const override;
  uint64_t getVirtualAddress() const override;
  uint32_t getLength() const override;
  uint32_t getSourceFileId() const override;
  uint32_t getCompilandId() const override;
  bool isStatement() const override;

private:
  const NativeSession &Session;
  const codeview::LineInfo Line;
  uint32_t ColumnNumber;
  uint32_t Section;
  uint32_t Offset;
  uint32_t Length;
  uint32_t SrcFileId;
  uint32_t CompilandId;
};
} // namespace pdb
} // namespace llvm
#endif
PKhwFZG����'DebugInfo/PDB/Native/GSIStreamBuilder.hnu�[���//===- GSIStreamBuilder.h - PDB Publics/Globals Stream Creation -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_NATIVE_GSISTREAMBUILDER_H
#define LLVM_DEBUGINFO_PDB_NATIVE_GSISTREAMBUILDER_H

#include "llvm/ADT/DenseSet.h"
#include "llvm/DebugInfo/CodeView/CVRecord.h"
#include "llvm/DebugInfo/CodeView/CodeView.h"
#include "llvm/DebugInfo/PDB/Native/GlobalsStream.h"
#include "llvm/DebugInfo/PDB/Native/RawConstants.h"
#include "llvm/Support/BinaryStreamRef.h"
#include "llvm/Support/Error.h"

namespace llvm {
namespace codeview {
class ConstantSym;
class DataSym;
class ProcRefSym;
} // namespace codeview
template <typename T> struct BinaryItemTraits;

template <> struct BinaryItemTraits<codeview::CVSymbol> {
  static size_t length(const codeview::CVSymbol &Item) {
    return Item.RecordData.size();
  }
  static ArrayRef<uint8_t> bytes(const codeview::CVSymbol &Item) {
    return Item.RecordData;
  }
};

namespace msf {
class MSFBuilder;
struct MSFLayout;
} // namespace msf
namespace pdb {
struct GSIHashStreamBuilder;
struct BulkPublic;
struct SymbolDenseMapInfo;

class GSIStreamBuilder {

public:
  explicit GSIStreamBuilder(msf::MSFBuilder &Msf);
  ~GSIStreamBuilder();

  GSIStreamBuilder(const GSIStreamBuilder &) = delete;
  GSIStreamBuilder &operator=(const GSIStreamBuilder &) = delete;

  Error finalizeMsfLayout();

  Error commit(const msf::MSFLayout &Layout, WritableBinaryStreamRef Buffer);

  uint32_t getPublicsStreamIndex() const { return PublicsStreamIndex; }
  uint32_t getGlobalsStreamIndex() const { return GlobalsStreamIndex; }
  uint32_t getRecordStreamIndex() const { return RecordStreamIndex; }

  // Add public symbols in bulk.
  void addPublicSymbols(std::vector<BulkPublic> &&PublicsIn);

  void addGlobalSymbol(const codeview::ProcRefSym &Sym);
  void addGlobalSymbol(const codeview::DataSym &Sym);
  void addGlobalSymbol(const codeview::ConstantSym &Sym);

  // Add a pre-serialized global symbol record. The caller must ensure that the
  // symbol data remains alive until the global stream is committed to disk.
  void addGlobalSymbol(const codeview::CVSymbol &Sym);

private:
  void finalizePublicBuckets();
  void finalizeGlobalBuckets(uint32_t RecordZeroOffset);

  template <typename T> void serializeAndAddGlobal(const T &Symbol);

  uint32_t calculatePublicsHashStreamSize() const;
  uint32_t calculateGlobalsHashStreamSize() const;
  Error commitSymbolRecordStream(WritableBinaryStreamRef Stream);
  Error commitPublicsHashStream(WritableBinaryStreamRef Stream);
  Error commitGlobalsHashStream(WritableBinaryStreamRef Stream);

  uint32_t PublicsStreamIndex = kInvalidStreamIndex;
  uint32_t GlobalsStreamIndex = kInvalidStreamIndex;
  uint32_t RecordStreamIndex = kInvalidStreamIndex;
  msf::MSFBuilder &Msf;
  std::unique_ptr<GSIHashStreamBuilder> PSH;
  std::unique_ptr<GSIHashStreamBuilder> GSH;

  // List of all of the public records. These are stored unserialized so that we
  // can defer copying the names until we are ready to commit the PDB.
  std::vector<BulkPublic> Publics;

  // List of all of the global records.
  std::vector<codeview::CVSymbol> Globals;

  // Hash table for deduplicating global typedef and constant records. Only used
  // for globals.
  llvm::DenseSet<codeview::CVSymbol, SymbolDenseMapInfo> GlobalsSeen;
};

/// This struct is equivalent to codeview::PublicSym32, but it has been
/// optimized for size to speed up bulk serialization and sorting operations
/// during PDB writing.
struct BulkPublic {
  BulkPublic() : Flags(0), BucketIdx(0) {}

  const char *Name = nullptr;
  uint32_t NameLen = 0;

  // Offset of the symbol record in the publics stream.
  uint32_t SymOffset = 0;

  // Section offset of the symbol in the image.
  uint32_t Offset = 0;

  // Section index of the section containing the symbol.
  uint16_t Segment = 0;

  // PublicSymFlags.
  uint16_t Flags : 4;

  // GSI hash table bucket index. The maximum value is IPHR_HASH.
  uint16_t BucketIdx : 12;
  static_assert(IPHR_HASH <= 1 << 12, "bitfield too small");

  void setFlags(codeview::PublicSymFlags F) {
    Flags = uint32_t(F);
    assert(Flags == uint32_t(F) && "truncated");
  }

  void setBucketIdx(uint16_t B) {
    assert(B < IPHR_HASH);
    BucketIdx = B;
  }

  StringRef getName() const { return StringRef(Name, NameLen); }
};

static_assert(sizeof(BulkPublic) <= 24, "unexpected size increase");
static_assert(std::is_trivially_copyable<BulkPublic>::value,
              "should be trivial");

} // namespace pdb
} // namespace llvm

#endif
PKhwFZ��e�DebugInfo/PDB/Native/Hash.hnu�[���//===- Hash.h - PDB hash functions ------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_NATIVE_HASH_H
#define LLVM_DEBUGINFO_PDB_NATIVE_HASH_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
#include <cstdint>

namespace llvm {
namespace pdb {

uint32_t hashStringV1(StringRef Str);
uint32_t hashStringV2(StringRef Str);
uint32_t hashBufferV8(ArrayRef<uint8_t> Data);

} // end namespace pdb
} // end namespace llvm

#endif // LLVM_DEBUGINFO_PDB_NATIVE_HASH_H
PKhwFZ�8f�!DebugInfo/PDB/Native/Formatters.hnu�[���//===- Formatters.h ---------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_NATIVE_FORMATTERS_H
#define LLVM_DEBUGINFO_PDB_NATIVE_FORMATTERS_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/DebugInfo/CodeView/Formatters.h"
#include "llvm/DebugInfo/PDB/Native/RawConstants.h"
#include "llvm/DebugInfo/PDB/Native/RawTypes.h"
#include "llvm/Support/FormatProviders.h"

#define FORMAT_CASE(Value, Name)                                               \
  case Value:                                                                  \
    Stream << Name;                                                            \
    break;

namespace llvm {
template <> struct format_provider<pdb::PdbRaw_ImplVer> {
  static void format(const pdb::PdbRaw_ImplVer &V, llvm::raw_ostream &Stream,
                     StringRef Style) {
    switch (V) {
      FORMAT_CASE(pdb::PdbRaw_ImplVer::PdbImplVC110, "VC110")
      FORMAT_CASE(pdb::PdbRaw_ImplVer::PdbImplVC140, "VC140")
      FORMAT_CASE(pdb::PdbRaw_ImplVer::PdbImplVC2, "VC2")
      FORMAT_CASE(pdb::PdbRaw_ImplVer::PdbImplVC4, "VC4")
      FORMAT_CASE(pdb::PdbRaw_ImplVer::PdbImplVC41, "VC41")
      FORMAT_CASE(pdb::PdbRaw_ImplVer::PdbImplVC50, "VC50")
      FORMAT_CASE(pdb::PdbRaw_ImplVer::PdbImplVC70, "VC70")
      FORMAT_CASE(pdb::PdbRaw_ImplVer::PdbImplVC70Dep, "VC70Dep")
      FORMAT_CASE(pdb::PdbRaw_ImplVer::PdbImplVC80, "VC80")
      FORMAT_CASE(pdb::PdbRaw_ImplVer::PdbImplVC98, "VC98")
    }
  }
};
}

#endif
PKhwFZ%�aa!DebugInfo/PDB/Native/InfoStream.hnu�[���//===- InfoStream.h - PDB Info Stream (Stream 1) Access ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_NATIVE_INFOSTREAM_H
#define LLVM_DEBUGINFO_PDB_NATIVE_INFOSTREAM_H

#include "llvm/ADT/StringMap.h"
#include "llvm/DebugInfo/CodeView/GUID.h"
#include "llvm/DebugInfo/PDB/Native/NamedStreamMap.h"
#include "llvm/DebugInfo/PDB/Native/RawConstants.h"
#include "llvm/Support/BinaryStream.h"
#include "llvm/Support/BinaryStreamRef.h"

#include "llvm/Support/Error.h"

namespace llvm {
namespace pdb {
struct InfoStreamHeader;
class InfoStream {
  friend class InfoStreamBuilder;

public:
  InfoStream(std::unique_ptr<BinaryStream> Stream);

  Error reload();

  uint32_t getStreamSize() const;

  const InfoStreamHeader *getHeader() const { return Header; }

  bool containsIdStream() const;
  PdbRaw_ImplVer getVersion() const;
  uint32_t getSignature() const;
  uint32_t getAge() const;
  codeview::GUID getGuid() const;
  uint32_t getNamedStreamMapByteSize() const;

  PdbRaw_Features getFeatures() const;
  ArrayRef<PdbRaw_FeatureSig> getFeatureSignatures() const;

  const NamedStreamMap &getNamedStreams() const;

  BinarySubstreamRef getNamedStreamsBuffer() const;

  Expected<uint32_t> getNamedStreamIndex(llvm::StringRef Name) const;
  StringMap<uint32_t> named_streams() const;

private:
  std::unique_ptr<BinaryStream> Stream;

  const InfoStreamHeader *Header;

  BinarySubstreamRef SubNamedStreams;

  std::vector<PdbRaw_FeatureSig> FeatureSignatures;
  PdbRaw_Features Features = PdbFeatureNone;

  uint32_t NamedStreamMapByteSize = 0;

  NamedStreamMap NamedStreams;
};
}
}

#endif
PKhwFZ[�)��&DebugInfo/PDB/Native/NativeEnumTypes.hnu�[���//==- NativeEnumTypes.h - Native Type Enumerator impl ------------*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_NATIVE_NATIVEENUMTYPES_H
#define LLVM_DEBUGINFO_PDB_NATIVE_NATIVEENUMTYPES_H

#include "llvm/DebugInfo/CodeView/CodeView.h"
#include "llvm/DebugInfo/CodeView/TypeIndex.h"
#include "llvm/DebugInfo/PDB/IPDBEnumChildren.h"
#include "llvm/DebugInfo/PDB/PDBSymbol.h"

#include <vector>

namespace llvm {
namespace codeview {
class LazyRandomTypeCollection;
}
namespace pdb {

class NativeSession;

class NativeEnumTypes : public IPDBEnumChildren<PDBSymbol> {
public:
  NativeEnumTypes(NativeSession &Session,
                  codeview::LazyRandomTypeCollection &TypeCollection,
                  std::vector<codeview::TypeLeafKind> Kinds);

  NativeEnumTypes(NativeSession &Session,
                  std::vector<codeview::TypeIndex> Indices);

  uint32_t getChildCount() const override;
  std::unique_ptr<PDBSymbol> getChildAtIndex(uint32_t Index) const override;
  std::unique_ptr<PDBSymbol> getNext() override;
  void reset() override;

private:
  std::vector<codeview::TypeIndex> Matches;
  uint32_t Index;
  NativeSession &Session;
};

} // namespace pdb
} // namespace llvm

#endif
PKhwFZl���\\(DebugInfo/PDB/Native/NativeTypePointer.hnu�[���//===- NativeTypePointer.h - info about pointer type -------------*- C++-*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_NATIVE_NATIVETYPEPOINTER_H
#define LLVM_DEBUGINFO_PDB_NATIVE_NATIVETYPEPOINTER_H

#include "llvm/DebugInfo/CodeView/TypeIndex.h"
#include "llvm/DebugInfo/CodeView/TypeRecord.h"
#include "llvm/DebugInfo/PDB/IPDBRawSymbol.h"
#include "llvm/DebugInfo/PDB/Native/NativeRawSymbol.h"
#include "llvm/DebugInfo/PDB/PDBTypes.h"

namespace llvm {
namespace pdb {

class NativeTypePointer : public NativeRawSymbol {
public:
  // Create a pointer record for a simple type.
  NativeTypePointer(NativeSession &Session, SymIndexId Id,
                    codeview::TypeIndex TI);

  // Create a pointer record for a non-simple type.
  NativeTypePointer(NativeSession &Session, SymIndexId Id,
                    codeview::TypeIndex TI, codeview::PointerRecord PR);
  ~NativeTypePointer() override;

  void dump(raw_ostream &OS, int Indent, PdbSymbolIdField ShowIdFields,
            PdbSymbolIdField RecurseIdFields) const override;

  SymIndexId getClassParentId() const override;
  bool isConstType() const override;
  uint64_t getLength() const override;
  bool isReference() const override;
  bool isRValueReference() const override;
  bool isPointerToDataMember() const override;
  bool isPointerToMemberFunction() const override;
  SymIndexId getTypeId() const override;
  bool isRestrictedType() const override;
  bool isVolatileType() const override;
  bool isUnalignedType() const override;

  bool isSingleInheritance() const override;
  bool isMultipleInheritance() const override;
  bool isVirtualInheritance() const override;

protected:
  bool isMemberPointer() const;
  codeview::TypeIndex TI;
  std::optional<codeview::PointerRecord> Record;
};

} // namespace pdb
} // namespace llvm

#endif // LLVM_DEBUGINFO_PDB_NATIVE_NATIVETYPEPOINTER_H
PKhwFZƇ��ff'DebugInfo/PDB/Native/NativeSourceFile.hnu�[���//===- NativeSourceFile.h - Native source file implementation ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_NATIVE_NATIVESOURCEFILE_H
#define LLVM_DEBUGINFO_PDB_NATIVE_NATIVESOURCEFILE_H

#include "llvm/DebugInfo/CodeView/DebugChecksumsSubsection.h"
#include "llvm/DebugInfo/PDB/IPDBSourceFile.h"
#include "llvm/DebugInfo/PDB/PDBTypes.h"

namespace llvm {
namespace pdb {
class PDBSymbolCompiland;
template <typename ChildType> class IPDBEnumChildren;
class NativeSession;

class NativeSourceFile : public IPDBSourceFile {
public:
  explicit NativeSourceFile(NativeSession &Session, uint32_t FileId,
                            const codeview::FileChecksumEntry &Checksum);

  std::string getFileName() const override;
  uint32_t getUniqueId() const override;
  std::string getChecksum() const override;
  PDB_Checksum getChecksumType() const override;
  std::unique_ptr<IPDBEnumChildren<PDBSymbolCompiland>>
  getCompilands() const override;

private:
  NativeSession &Session;
  uint32_t FileId;
  const codeview::FileChecksumEntry Checksum;
};
} // namespace pdb
} // namespace llvm
#endif
PKhwFZ�G�t(t( DebugInfo/PDB/Native/HashTable.hnu�[���//===- HashTable.h - PDB Hash Table -----------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_NATIVE_HASHTABLE_H
#define LLVM_DEBUGINFO_PDB_NATIVE_HASHTABLE_H

#include "llvm/ADT/SparseBitVector.h"
#include "llvm/ADT/iterator.h"
#include "llvm/DebugInfo/PDB/Native/RawError.h"
#include "llvm/Support/BinaryStreamReader.h"
#include "llvm/Support/BinaryStreamWriter.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Error.h"
#include <cstdint>
#include <iterator>
#include <utility>
#include <vector>

namespace llvm {

namespace pdb {

Error readSparseBitVector(BinaryStreamReader &Stream, SparseBitVector<> &V);
Error writeSparseBitVector(BinaryStreamWriter &Writer, SparseBitVector<> &Vec);

template <typename ValueT> class HashTable;

template <typename ValueT>
class HashTableIterator
    : public iterator_facade_base<HashTableIterator<ValueT>,
                                  std::forward_iterator_tag,
                                  const std::pair<uint32_t, ValueT>> {
  using BaseT = typename HashTableIterator::iterator_facade_base;
  friend HashTable<ValueT>;

  HashTableIterator(const HashTable<ValueT> &Map, uint32_t Index,
                    bool IsEnd)
      : Map(&Map), Index(Index), IsEnd(IsEnd) {}

public:
  HashTableIterator(const HashTable<ValueT> &Map) : Map(&Map) {
    int I = Map.Present.find_first();
    if (I == -1) {
      Index = 0;
      IsEnd = true;
    } else {
      Index = static_cast<uint32_t>(I);
      IsEnd = false;
    }
  }

  HashTableIterator(const HashTableIterator &R) = default;
  HashTableIterator &operator=(const HashTableIterator &R) {
    Map = R.Map;
    return *this;
  }
  bool operator==(const HashTableIterator &R) const {
    if (IsEnd && R.IsEnd)
      return true;
    if (IsEnd != R.IsEnd)
      return false;

    return (Map == R.Map) && (Index == R.Index);
  }
  const std::pair<uint32_t, ValueT> &operator*() const {
    assert(Map->Present.test(Index));
    return Map->Buckets[Index];
  }

  // Implement postfix op++ in terms of prefix op++ by using the superclass
  // implementation.
  using BaseT::operator++;
  HashTableIterator &operator++() {
    while (Index < Map->Buckets.size()) {
      ++Index;
      if (Map->Present.test(Index))
        return *this;
    }

    IsEnd = true;
    return *this;
  }

private:
  bool isEnd() const { return IsEnd; }
  uint32_t index() const { return Index; }

  const HashTable<ValueT> *Map;
  uint32_t Index;
  bool IsEnd;
};

template <typename ValueT>
class HashTable {
  struct Header {
    support::ulittle32_t Size;
    support::ulittle32_t Capacity;
  };

  using BucketList = std::vector<std::pair<uint32_t, ValueT>>;

public:
  using const_iterator = HashTableIterator<ValueT>;
  friend const_iterator;

  HashTable() { Buckets.resize(8); }
  explicit HashTable(uint32_t Capacity) {
    Buckets.resize(Capacity);
  }

  Error load(BinaryStreamReader &Stream) {
    const Header *H;
    if (auto EC = Stream.readObject(H))
      return EC;
    if (H->Capacity == 0)
      return make_error<RawError>(raw_error_code::corrupt_file,
                                  "Invalid Hash Table Capacity");
    if (H->Size > maxLoad(H->Capacity))
      return make_error<RawError>(raw_error_code::corrupt_file,
                                  "Invalid Hash Table Size");

    Buckets.resize(H->Capacity);

    if (auto EC = readSparseBitVector(Stream, Present))
      return EC;
    if (Present.count() != H->Size)
      return make_error<RawError>(raw_error_code::corrupt_file,
                                  "Present bit vector does not match size!");

    if (auto EC = readSparseBitVector(Stream, Deleted))
      return EC;
    if (Present.intersects(Deleted))
      return make_error<RawError>(raw_error_code::corrupt_file,
                                  "Present bit vector intersects deleted!");

    for (uint32_t P : Present) {
      if (auto EC = Stream.readInteger(Buckets[P].first))
        return EC;
      const ValueT *Value;
      if (auto EC = Stream.readObject(Value))
        return EC;
      Buckets[P].second = *Value;
    }

    return Error::success();
  }

  uint32_t calculateSerializedLength() const {
    uint32_t Size = sizeof(Header);

    constexpr int BitsPerWord = 8 * sizeof(uint32_t);

    int NumBitsP = Present.find_last() + 1;
    int NumBitsD = Deleted.find_last() + 1;

    uint32_t NumWordsP = alignTo(NumBitsP, BitsPerWord) / BitsPerWord;
    uint32_t NumWordsD = alignTo(NumBitsD, BitsPerWord) / BitsPerWord;

    // Present bit set number of words (4 bytes), followed by that many actual
    // words (4 bytes each).
    Size += sizeof(uint32_t);
    Size += NumWordsP * sizeof(uint32_t);

    // Deleted bit set number of words (4 bytes), followed by that many actual
    // words (4 bytes each).
    Size += sizeof(uint32_t);
    Size += NumWordsD * sizeof(uint32_t);

    // One (Key, ValueT) pair for each entry Present.
    Size += (sizeof(uint32_t) + sizeof(ValueT)) * size();

    return Size;
  }

  Error commit(BinaryStreamWriter &Writer) const {
    Header H;
    H.Size = size();
    H.Capacity = capacity();
    if (auto EC = Writer.writeObject(H))
      return EC;

    if (auto EC = writeSparseBitVector(Writer, Present))
      return EC;

    if (auto EC = writeSparseBitVector(Writer, Deleted))
      return EC;

    for (const auto &Entry : *this) {
      if (auto EC = Writer.writeInteger(Entry.first))
        return EC;
      if (auto EC = Writer.writeObject(Entry.second))
        return EC;
    }
    return Error::success();
  }

  void clear() {
    Buckets.resize(8);
    Present.clear();
    Deleted.clear();
  }

  bool empty() const { return size() == 0; }
  uint32_t capacity() const { return Buckets.size(); }
  uint32_t size() const { return Present.count(); }

  const_iterator begin() const { return const_iterator(*this); }
  const_iterator end() const { return const_iterator(*this, 0, true); }

  /// Find the entry whose key has the specified hash value, using the specified
  /// traits defining hash function and equality.
  template <typename Key, typename TraitsT>
  const_iterator find_as(const Key &K, TraitsT &Traits) const {
    uint32_t H = Traits.hashLookupKey(K) % capacity();
    uint32_t I = H;
    std::optional<uint32_t> FirstUnused;
    do {
      if (isPresent(I)) {
        if (Traits.storageKeyToLookupKey(Buckets[I].first) == K)
          return const_iterator(*this, I, false);
      } else {
        if (!FirstUnused)
          FirstUnused = I;
        // Insertion occurs via linear probing from the slot hint, and will be
        // inserted at the first empty / deleted location.  Therefore, if we are
        // probing and find a location that is neither present nor deleted, then
        // nothing must have EVER been inserted at this location, and thus it is
        // not possible for a matching value to occur later.
        if (!isDeleted(I))
          break;
      }
      I = (I + 1) % capacity();
    } while (I != H);

    // The only way FirstUnused would not be set is if every single entry in the
    // table were Present.  But this would violate the load factor constraints
    // that we impose, so it should never happen.
    assert(FirstUnused);
    return const_iterator(*this, *FirstUnused, true);
  }

  /// Set the entry using a key type that the specified Traits can convert
  /// from a real key to an internal key.
  template <typename Key, typename TraitsT>
  bool set_as(const Key &K, ValueT V, TraitsT &Traits) {
    return set_as_internal(K, std::move(V), Traits, std::nullopt);
  }

  template <typename Key, typename TraitsT>
  ValueT get(const Key &K, TraitsT &Traits) const {
    auto Iter = find_as(K, Traits);
    assert(Iter != end());
    return (*Iter).second;
  }

protected:
  bool isPresent(uint32_t K) const { return Present.test(K); }
  bool isDeleted(uint32_t K) const { return Deleted.test(K); }

  BucketList Buckets;
  mutable SparseBitVector<> Present;
  mutable SparseBitVector<> Deleted;

private:
  /// Set the entry using a key type that the specified Traits can convert
  /// from a real key to an internal key.
  template <typename Key, typename TraitsT>
  bool set_as_internal(const Key &K, ValueT V, TraitsT &Traits,
                       std::optional<uint32_t> InternalKey) {
    auto Entry = find_as(K, Traits);
    if (Entry != end()) {
      assert(isPresent(Entry.index()));
      assert(Traits.storageKeyToLookupKey(Buckets[Entry.index()].first) == K);
      // We're updating, no need to do anything special.
      Buckets[Entry.index()].second = V;
      return false;
    }

    auto &B = Buckets[Entry.index()];
    assert(!isPresent(Entry.index()));
    assert(Entry.isEnd());
    B.first = InternalKey ? *InternalKey : Traits.lookupKeyToStorageKey(K);
    B.second = V;
    Present.set(Entry.index());
    Deleted.reset(Entry.index());

    grow(Traits);

    assert((find_as(K, Traits)) != end());
    return true;
  }

  static uint32_t maxLoad(uint32_t capacity) { return capacity * 2 / 3 + 1; }

  template <typename TraitsT>
  void grow(TraitsT &Traits) {
    uint32_t S = size();
    uint32_t MaxLoad = maxLoad(capacity());
    if (S < maxLoad(capacity()))
      return;
    assert(capacity() != UINT32_MAX && "Can't grow Hash table!");

    uint32_t NewCapacity = (capacity() <= INT32_MAX) ? MaxLoad * 2 : UINT32_MAX;

    // Growing requires rebuilding the table and re-hashing every item.  Make a
    // copy with a larger capacity, insert everything into the copy, then swap
    // it in.
    HashTable NewMap(NewCapacity);
    for (auto I : Present) {
      auto LookupKey = Traits.storageKeyToLookupKey(Buckets[I].first);
      NewMap.set_as_internal(LookupKey, Buckets[I].second, Traits,
                             Buckets[I].first);
    }

    Buckets.swap(NewMap.Buckets);
    std::swap(Present, NewMap.Present);
    std::swap(Deleted, NewMap.Deleted);
    assert(capacity() == NewCapacity);
    assert(size() == S);
  }
};

} // end namespace pdb

} // end namespace llvm

#endif // LLVM_DEBUGINFO_PDB_NATIVE_HASHTABLE_H
PKhwFZS�H��� DebugInfo/PDB/Native/InputFile.hnu�[���//===- InputFile.h -------------------------------------------- *- C++ --*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_NATIVE_INPUTFILE_H
#define LLVM_DEBUGINFO_PDB_NATIVE_INPUTFILE_H

#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/iterator.h"
#include "llvm/DebugInfo/CodeView/DebugChecksumsSubsection.h"
#include "llvm/DebugInfo/CodeView/StringsAndChecksums.h"
#include "llvm/DebugInfo/PDB/Native/LinePrinter.h"
#include "llvm/DebugInfo/PDB/Native/ModuleDebugStream.h"
#include "llvm/Object/Binary.h"
#include "llvm/Object/ObjectFile.h"
#include "llvm/Support/Error.h"

namespace llvm {
namespace codeview {
class LazyRandomTypeCollection;
}
namespace object {
class COFFObjectFile;
} // namespace object

namespace pdb {
class InputFile;
class LinePrinter;
class PDBFile;
class NativeSession;
class SymbolGroupIterator;
class SymbolGroup;

class InputFile {
  InputFile();

  std::unique_ptr<NativeSession> PdbSession;
  object::OwningBinary<object::Binary> CoffObject;
  std::unique_ptr<MemoryBuffer> UnknownFile;
  PointerUnion<PDBFile *, object::COFFObjectFile *, MemoryBuffer *> PdbOrObj;

  using TypeCollectionPtr = std::unique_ptr<codeview::LazyRandomTypeCollection>;

  TypeCollectionPtr Types;
  TypeCollectionPtr Ids;

  enum TypeCollectionKind { kTypes, kIds };
  codeview::LazyRandomTypeCollection &
  getOrCreateTypeCollection(TypeCollectionKind Kind);

public:
  InputFile(PDBFile *Pdb) { PdbOrObj = Pdb; }
  InputFile(object::COFFObjectFile *Obj) { PdbOrObj = Obj; }
  InputFile(MemoryBuffer *Buffer) { PdbOrObj = Buffer; }
  ~InputFile();
  InputFile(InputFile &&Other) = default;

  static Expected<InputFile> open(StringRef Path,
                                  bool AllowUnknownFile = false);

  PDBFile &pdb();
  const PDBFile &pdb() const;
  object::COFFObjectFile &obj();
  const object::COFFObjectFile &obj() const;
  MemoryBuffer &unknown();
  const MemoryBuffer &unknown() const;

  StringRef getFilePath() const;

  bool hasTypes() const;
  bool hasIds() const;

  codeview::LazyRandomTypeCollection &types();
  codeview::LazyRandomTypeCollection &ids();

  iterator_range<SymbolGroupIterator> symbol_groups();
  SymbolGroupIterator symbol_groups_begin();
  SymbolGroupIterator symbol_groups_end();

  bool isPdb() const;
  bool isObj() const;
  bool isUnknown() const;
};

class SymbolGroup {
  friend class SymbolGroupIterator;

public:
  explicit SymbolGroup(InputFile *File, uint32_t GroupIndex = 0);

  Expected<StringRef> getNameFromStringTable(uint32_t Offset) const;
  Expected<StringRef> getNameFromChecksums(uint32_t Offset) const;

  void formatFromFileName(LinePrinter &Printer, StringRef File,
                          bool Append = false) const;

  void formatFromChecksumsOffset(LinePrinter &Printer, uint32_t Offset,
                                 bool Append = false) const;

  StringRef name() const;

  codeview::DebugSubsectionArray getDebugSubsections() const {
    return Subsections;
  }
  const ModuleDebugStreamRef &getPdbModuleStream() const;

  const InputFile &getFile() const { return *File; }
  InputFile &getFile() { return *File; }

  bool hasDebugStream() const { return DebugStream != nullptr; }

private:
  void initializeForPdb(uint32_t Modi);
  void updatePdbModi(uint32_t Modi);
  void updateDebugS(const codeview::DebugSubsectionArray &SS);

  void rebuildChecksumMap();
  InputFile *File = nullptr;
  StringRef Name;
  codeview::DebugSubsectionArray Subsections;
  std::shared_ptr<ModuleDebugStreamRef> DebugStream;
  codeview::StringsAndChecksumsRef SC;
  StringMap<codeview::FileChecksumEntry> ChecksumsByFile;
};

class SymbolGroupIterator
    : public iterator_facade_base<SymbolGroupIterator,
                                  std::forward_iterator_tag, SymbolGroup> {
public:
  SymbolGroupIterator();
  explicit SymbolGroupIterator(InputFile &File);
  SymbolGroupIterator(const SymbolGroupIterator &Other) = default;
  SymbolGroupIterator &operator=(const SymbolGroupIterator &R) = default;

  const SymbolGroup &operator*() const;
  SymbolGroup &operator*();

  bool operator==(const SymbolGroupIterator &R) const;
  SymbolGroupIterator &operator++();

private:
  void scanToNextDebugS();
  bool isEnd() const;

  uint32_t Index = 0;
  std::optional<object::section_iterator> SectionIter;
  SymbolGroup Value;
};

Expected<ModuleDebugStreamRef>
getModuleDebugStream(PDBFile &File, StringRef &ModuleName, uint32_t Index);
Expected<ModuleDebugStreamRef> getModuleDebugStream(PDBFile &File,
                                                    uint32_t Index);

bool shouldDumpSymbolGroup(uint32_t Idx, const SymbolGroup &Group,
                           const FilterOptions &Filters);

// TODO: Change these callbacks to be function_refs (de-templatify them).
template <typename CallbackT>
Error iterateOneModule(InputFile &File, const PrintScope &HeaderScope,
                       const SymbolGroup &SG, uint32_t Modi,
                       CallbackT Callback) {
  HeaderScope.P.formatLine(
      "Mod {0:4} | `{1}`: ",
      fmt_align(Modi, AlignStyle::Right, HeaderScope.LabelWidth), SG.name());

  AutoIndent Indent(HeaderScope);
  return Callback(Modi, SG);
}

template <typename CallbackT>
Error iterateSymbolGroups(InputFile &Input, const PrintScope &HeaderScope,
                          CallbackT Callback) {
  AutoIndent Indent(HeaderScope);

  FilterOptions Filters = HeaderScope.P.getFilters();
  if (Filters.DumpModi) {
    uint32_t Modi = *Filters.DumpModi;
    SymbolGroup SG(&Input, Modi);
    return iterateOneModule(Input, withLabelWidth(HeaderScope, NumDigits(Modi)),
                            SG, Modi, Callback);
  }

  uint32_t I = 0;

  for (const auto &SG : Input.symbol_groups()) {
    if (shouldDumpSymbolGroup(I, SG, Filters))
      if (auto Err =
              iterateOneModule(Input, withLabelWidth(HeaderScope, NumDigits(I)),
                               SG, I, Callback))
        return Err;

    ++I;
  }
  return Error::success();
}

template <typename SubsectionT>
Error iterateModuleSubsections(
    InputFile &File, const PrintScope &HeaderScope,
    llvm::function_ref<Error(uint32_t, const SymbolGroup &, SubsectionT &)>
        Callback) {

  return iterateSymbolGroups(
      File, HeaderScope, [&](uint32_t Modi, const SymbolGroup &SG) -> Error {
        for (const auto &SS : SG.getDebugSubsections()) {
          SubsectionT Subsection;

          if (SS.kind() != Subsection.kind())
            continue;

          BinaryStreamReader Reader(SS.getRecordData());
          if (auto Err = Subsection.initialize(Reader))
            continue;
          if (auto Err = Callback(Modi, SG, Subsection))
            return Err;
        }
        return Error::success();
      });
}

} // namespace pdb
} // namespace llvm

#endif
PKhwFZ� H1��1DebugInfo/PDB/Native/DbiModuleDescriptorBuilder.hnu�[���//===- DbiModuleDescriptorBuilder.h - PDB module information ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_NATIVE_DBIMODULEDESCRIPTORBUILDER_H
#define LLVM_DEBUGINFO_PDB_NATIVE_DBIMODULEDESCRIPTORBUILDER_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/DebugInfo/CodeView/CVRecord.h"
#include "llvm/DebugInfo/CodeView/DebugSubsectionRecord.h"
#include "llvm/DebugInfo/PDB/Native/RawTypes.h"
#include "llvm/Support/BinaryStreamRef.h"
#include "llvm/Support/Error.h"
#include <cstdint>
#include <string>
#include <vector>

namespace llvm {
class BinaryStreamWriter;
namespace codeview {
class DebugSubsection;
}

namespace msf {
class MSFBuilder;
struct MSFLayout;
}
namespace pdb {

// Represents merged or unmerged symbols. Merged symbols can be written to the
// output file as is, but unmerged symbols must be rewritten first. In either
// case, the size must be known up front.
struct SymbolListWrapper {
  explicit SymbolListWrapper(ArrayRef<uint8_t> Syms)
      : SymPtr(const_cast<uint8_t *>(Syms.data())), SymSize(Syms.size()),
        NeedsToBeMerged(false) {}
  explicit SymbolListWrapper(void *SymSrc, uint32_t Length)
      : SymPtr(SymSrc), SymSize(Length), NeedsToBeMerged(true) {}

  ArrayRef<uint8_t> asArray() const {
    return ArrayRef<uint8_t>(static_cast<const uint8_t *>(SymPtr), SymSize);
  }

  uint32_t size() const { return SymSize; }

  void *SymPtr = nullptr;
  uint32_t SymSize = 0;
  bool NeedsToBeMerged = false;
};

/// Represents a string table reference at some offset in the module symbol
/// stream.
struct StringTableFixup {
  uint32_t StrTabOffset = 0;
  uint32_t SymOffsetOfReference = 0;
};

class DbiModuleDescriptorBuilder {
  friend class DbiStreamBuilder;

public:
  DbiModuleDescriptorBuilder(StringRef ModuleName, uint32_t ModIndex,
                             msf::MSFBuilder &Msf);
  ~DbiModuleDescriptorBuilder();

  DbiModuleDescriptorBuilder(const DbiModuleDescriptorBuilder &) = delete;
  DbiModuleDescriptorBuilder &
  operator=(const DbiModuleDescriptorBuilder &) = delete;

  void setPdbFilePathNI(uint32_t NI);
  void setObjFileName(StringRef Name);

  // Callback to merge one source of unmerged symbols.
  using MergeSymbolsCallback = Error (*)(void *Ctx, void *Symbols,
                                         BinaryStreamWriter &Writer);

  void setMergeSymbolsCallback(void *Ctx, MergeSymbolsCallback Callback) {
    MergeSymsCtx = Ctx;
    MergeSymsCallback = Callback;
  }

  void setStringTableFixups(std::vector<StringTableFixup> &&Fixups) {
    StringTableFixups = std::move(Fixups);
  }

  void setFirstSectionContrib(const SectionContrib &SC);
  void addSymbol(codeview::CVSymbol Symbol);
  void addSymbolsInBulk(ArrayRef<uint8_t> BulkSymbols);

  // Add symbols of known size which will be merged (rewritten) when committing
  // the PDB to disk.
  void addUnmergedSymbols(void *SymSrc, uint32_t SymLength);

  void
  addDebugSubsection(std::shared_ptr<codeview::DebugSubsection> Subsection);

  void
  addDebugSubsection(const codeview::DebugSubsectionRecord &SubsectionContents);

  uint16_t getStreamIndex() const;
  StringRef getModuleName() const { return ModuleName; }
  StringRef getObjFileName() const { return ObjFileName; }

  unsigned getModuleIndex() const { return Layout.Mod; }

  ArrayRef<std::string> source_files() const { return SourceFiles; }

  uint32_t calculateSerializedLength() const;

  /// Return the offset within the module symbol stream of the next symbol
  /// record passed to addSymbol. Add four to account for the signature.
  uint32_t getNextSymbolOffset() const { return SymbolByteSize + 4; }

  void finalize();
  Error finalizeMsfLayout();

  /// Commit the DBI descriptor to the DBI stream.
  Error commit(BinaryStreamWriter &ModiWriter);

  /// Commit the accumulated symbols to the module symbol stream. Safe to call
  /// in parallel on different DbiModuleDescriptorBuilder objects. Only modifies
  /// the pre-allocated stream in question.
  Error commitSymbolStream(const msf::MSFLayout &MsfLayout,
                           WritableBinaryStreamRef MsfBuffer);

private:
  uint32_t calculateC13DebugInfoSize() const;

  void addSourceFile(StringRef Path);
  msf::MSFBuilder &MSF;

  uint32_t SymbolByteSize = 0;
  uint32_t PdbFilePathNI = 0;
  std::string ModuleName;
  std::string ObjFileName;
  std::vector<std::string> SourceFiles;
  std::vector<SymbolListWrapper> Symbols;

  void *MergeSymsCtx = nullptr;
  MergeSymbolsCallback MergeSymsCallback = nullptr;

  std::vector<StringTableFixup> StringTableFixups;

  std::vector<codeview::DebugSubsectionRecordBuilder> C13Builders;

  ModuleInfoHeader Layout;
};

} // end namespace pdb

} // end namespace llvm

#endif // LLVM_DEBUGINFO_PDB_NATIVE_DBIMODULEDESCRIPTORBUILDER_H
PKhwFZ?O���%DebugInfo/PDB/Native/NamedStreamMap.hnu�[���//===- NamedStreamMap.h - PDB Named Stream Map ------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_NATIVE_NAMEDSTREAMMAP_H
#define LLVM_DEBUGINFO_PDB_NATIVE_NAMEDSTREAMMAP_H

#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/DebugInfo/PDB/Native/HashTable.h"
#include "llvm/Support/Error.h"
#include <cstdint>

namespace llvm {

class BinaryStreamReader;
class BinaryStreamWriter;

namespace pdb {

class NamedStreamMap;

struct NamedStreamMapTraits {
  NamedStreamMap *NS;

  explicit NamedStreamMapTraits(NamedStreamMap &NS);
  uint16_t hashLookupKey(StringRef S) const;
  StringRef storageKeyToLookupKey(uint32_t Offset) const;
  uint32_t lookupKeyToStorageKey(StringRef S);
};

class NamedStreamMap {
  friend class NamedStreamMapBuilder;

public:
  NamedStreamMap();

  Error load(BinaryStreamReader &Stream);
  Error commit(BinaryStreamWriter &Writer) const;
  uint32_t calculateSerializedLength() const;

  uint32_t size() const;
  bool get(StringRef Stream, uint32_t &StreamNo) const;
  void set(StringRef Stream, uint32_t StreamNo);

  uint32_t appendStringData(StringRef S);
  StringRef getString(uint32_t Offset) const;
  uint32_t hashString(uint32_t Offset) const;

  StringMap<uint32_t> entries() const;

private:
  NamedStreamMapTraits HashTraits;
  /// Closed hash table from Offset -> StreamNumber, where Offset is the offset
  /// of the stream name in NamesBuffer.
  HashTable<support::ulittle32_t> OffsetIndexMap;

  /// Buffer of string data.
  std::vector<char> NamesBuffer;
};

} // end namespace pdb

} // end namespace llvm

#endif // LLVM_DEBUGINFO_PDB_NATIVE_NAMEDSTREAMMAP_H
PKhwFZ}�U�**,DebugInfo/PDB/Native/PDBStringTableBuilder.hnu�[���//===- PDBStringTableBuilder.h - PDB String Table Builder -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file creates the "/names" stream.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_NATIVE_PDBSTRINGTABLEBUILDER_H
#define LLVM_DEBUGINFO_PDB_NATIVE_PDBSTRINGTABLEBUILDER_H

#include "llvm/ADT/StringRef.h"
#include "llvm/DebugInfo/CodeView/DebugStringTableSubsection.h"
#include "llvm/Support/Error.h"
#include <cstdint>

namespace llvm {
class BinaryStreamWriter;
class WritableBinaryStreamRef;

namespace msf {
struct MSFLayout;
}

namespace pdb {

class PDBFileBuilder;
class PDBStringTableBuilder;

struct StringTableHashTraits {
  PDBStringTableBuilder *Table;

  explicit StringTableHashTraits(PDBStringTableBuilder &Table);
  uint32_t hashLookupKey(StringRef S) const;
  StringRef storageKeyToLookupKey(uint32_t Offset) const;
  uint32_t lookupKeyToStorageKey(StringRef S);
};

class PDBStringTableBuilder {
public:
  // If string S does not exist in the string table, insert it.
  // Returns the ID for S.
  uint32_t insert(StringRef S);

  uint32_t getIdForString(StringRef S) const;
  StringRef getStringForId(uint32_t Id) const;

  uint32_t calculateSerializedSize() const;
  Error commit(BinaryStreamWriter &Writer) const;

  void setStrings(const codeview::DebugStringTableSubsection &Strings);

private:
  uint32_t calculateHashTableSize() const;
  Error writeHeader(BinaryStreamWriter &Writer) const;
  Error writeStrings(BinaryStreamWriter &Writer) const;
  Error writeHashTable(BinaryStreamWriter &Writer) const;
  Error writeEpilogue(BinaryStreamWriter &Writer) const;

  codeview::DebugStringTableSubsection Strings;
};

} // end namespace pdb
} // end namespace llvm

#endif // LLVM_DEBUGINFO_PDB_NATIVE_PDBSTRINGTABLEBUILDER_H
PKhwFZ������!DebugInfo/PDB/Native/TpiHashing.hnu�[���//===- TpiHashing.h ---------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_NATIVE_TPIHASHING_H
#define LLVM_DEBUGINFO_PDB_NATIVE_TPIHASHING_H

#include "llvm/DebugInfo/CodeView/TypeRecord.h"
#include "llvm/Support/Error.h"

namespace llvm {
namespace pdb {

Expected<uint32_t> hashTypeRecord(const llvm::codeview::CVType &Type);

struct TagRecordHash {
  explicit TagRecordHash(codeview::ClassRecord CR, uint32_t Full,
                         uint32_t Forward)
      : FullRecordHash(Full), ForwardDeclHash(Forward), Class(std::move(CR)) {
    State = 0;
  }

  explicit TagRecordHash(codeview::EnumRecord ER, uint32_t Full,
                         uint32_t Forward)
      : FullRecordHash(Full), ForwardDeclHash(Forward), Enum(std::move(ER)) {
    State = 1;
  }

  explicit TagRecordHash(codeview::UnionRecord UR, uint32_t Full,
                         uint32_t Forward)
      : FullRecordHash(Full), ForwardDeclHash(Forward), Union(std::move(UR)) {
    State = 2;
  }

  uint32_t FullRecordHash;
  uint32_t ForwardDeclHash;

  codeview::TagRecord &getRecord() {
    switch (State) {
    case 0:
      return Class;
    case 1:
      return Enum;
    case 2:
      return Union;
    }
    llvm_unreachable("unreachable!");
  }

private:
  union {
    codeview::ClassRecord Class;
    codeview::EnumRecord Enum;
    codeview::UnionRecord Union;
  };

  uint8_t State = 0;
};

/// Given a CVType referring to a class, structure, union, or enum, compute
/// the hash of its forward decl and full decl.
Expected<TagRecordHash> hashTagRecord(const codeview::CVType &Type);

} // end namespace pdb
} // end namespace llvm

#endif // LLVM_DEBUGINFO_PDB_NATIVE_TPIHASHING_H
PKhwFZ��v[[DebugInfo/PDB/Native/PDBFile.hnu�[���//===- PDBFile.h - Low level interface to a PDB file ------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_NATIVE_PDBFILE_H
#define LLVM_DEBUGINFO_PDB_NATIVE_PDBFILE_H

#include "llvm/DebugInfo/MSF/IMSFFile.h"
#include "llvm/DebugInfo/MSF/MSFCommon.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/BinaryStreamRef.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Error.h"

#include <memory>

namespace llvm {

class BinaryStream;

namespace msf {
class MappedBlockStream;
}

namespace pdb {
class DbiStream;
class GlobalsStream;
class InfoStream;
class InjectedSourceStream;
class PDBStringTable;
class PDBFileBuilder;
class PublicsStream;
class SymbolStream;
class TpiStream;

class PDBFile : public msf::IMSFFile {
  friend PDBFileBuilder;

public:
  PDBFile(StringRef Path, std::unique_ptr<BinaryStream> PdbFileBuffer,
          BumpPtrAllocator &Allocator);
  ~PDBFile() override;

  StringRef getFileDirectory() const;
  StringRef getFilePath() const;

  uint32_t getFreeBlockMapBlock() const;
  uint32_t getUnknown1() const;

  uint32_t getBlockSize() const override;
  uint32_t getBlockCount() const override;
  uint32_t getNumDirectoryBytes() const;
  uint32_t getBlockMapIndex() const;
  uint32_t getNumDirectoryBlocks() const;
  uint64_t getBlockMapOffset() const;

  uint32_t getNumStreams() const override;
  uint32_t getMaxStreamSize() const;
  uint32_t getStreamByteSize(uint32_t StreamIndex) const override;
  ArrayRef<support::ulittle32_t>
  getStreamBlockList(uint32_t StreamIndex) const override;
  uint64_t getFileSize() const;

  Expected<ArrayRef<uint8_t>> getBlockData(uint32_t BlockIndex,
                                           uint32_t NumBytes) const override;
  Error setBlockData(uint32_t BlockIndex, uint32_t Offset,
                     ArrayRef<uint8_t> Data) const override;

  ArrayRef<support::ulittle32_t> getStreamSizes() const {
    return ContainerLayout.StreamSizes;
  }
  ArrayRef<ArrayRef<support::ulittle32_t>> getStreamMap() const {
    return ContainerLayout.StreamMap;
  }

  const msf::MSFLayout &getMsfLayout() const { return ContainerLayout; }
  BinaryStreamRef getMsfBuffer() const { return *Buffer; }

  ArrayRef<support::ulittle32_t> getDirectoryBlockArray() const;

  std::unique_ptr<msf::MappedBlockStream>
  createIndexedStream(uint16_t SN) const;
  Expected<std::unique_ptr<msf::MappedBlockStream>>
  safelyCreateIndexedStream(uint32_t StreamIndex) const;
  Expected<std::unique_ptr<msf::MappedBlockStream>>
  safelyCreateNamedStream(StringRef Name);

  msf::MSFStreamLayout getStreamLayout(uint32_t StreamIdx) const;
  msf::MSFStreamLayout getFpmStreamLayout() const;

  Error parseFileHeaders();
  Error parseStreamData();

  Expected<InfoStream &> getPDBInfoStream();
  Expected<DbiStream &> getPDBDbiStream();
  Expected<GlobalsStream &> getPDBGlobalsStream();
  Expected<TpiStream &> getPDBTpiStream();
  Expected<TpiStream &> getPDBIpiStream();
  Expected<PublicsStream &> getPDBPublicsStream();
  Expected<SymbolStream &> getPDBSymbolStream();
  Expected<PDBStringTable &> getStringTable();
  Expected<InjectedSourceStream &> getInjectedSourceStream();

  BumpPtrAllocator &getAllocator() { return Allocator; }

  bool hasPDBDbiStream() const;
  bool hasPDBGlobalsStream();
  bool hasPDBInfoStream() const;
  bool hasPDBIpiStream() const;
  bool hasPDBPublicsStream();
  bool hasPDBSymbolStream();
  bool hasPDBTpiStream() const;
  bool hasPDBStringTable();
  bool hasPDBInjectedSourceStream();

  uint32_t getPointerSize();

private:
  std::string FilePath;
  BumpPtrAllocator &Allocator;

  std::unique_ptr<BinaryStream> Buffer;

  msf::MSFLayout ContainerLayout;

  std::unique_ptr<GlobalsStream> Globals;
  std::unique_ptr<InfoStream> Info;
  std::unique_ptr<DbiStream> Dbi;
  std::unique_ptr<TpiStream> Tpi;
  std::unique_ptr<TpiStream> Ipi;
  std::unique_ptr<PublicsStream> Publics;
  std::unique_ptr<SymbolStream> Symbols;
  std::unique_ptr<msf::MappedBlockStream> DirectoryStream;
  std::unique_ptr<msf::MappedBlockStream> StringTableStream;
  std::unique_ptr<InjectedSourceStream> InjectedSources;
  std::unique_ptr<PDBStringTable> Strings;
};
}
}

#endif
PKhwFZ=u	�-DebugInfo/PDB/Native/NativeSymbolEnumerator.hnu�[���//===- NativeSymbolEnumerator.h - info about enumerator values --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_NATIVE_NATIVESYMBOLENUMERATOR_H
#define LLVM_DEBUGINFO_PDB_NATIVE_NATIVESYMBOLENUMERATOR_H

#include "llvm/DebugInfo/CodeView/TypeRecord.h"
#include "llvm/DebugInfo/PDB/IPDBRawSymbol.h"
#include "llvm/DebugInfo/PDB/Native/NativeRawSymbol.h"
#include "llvm/DebugInfo/PDB/PDBTypes.h"

namespace llvm {

class raw_ostream;
namespace pdb {
class NativeSession;
class NativeTypeEnum;

class NativeSymbolEnumerator : public NativeRawSymbol {
public:
  NativeSymbolEnumerator(NativeSession &Session, SymIndexId Id,
                         const NativeTypeEnum &Parent,
                         codeview::EnumeratorRecord Record);

  ~NativeSymbolEnumerator() override;

  void dump(raw_ostream &OS, int Indent, PdbSymbolIdField ShowIdFields,
            PdbSymbolIdField RecurseIdFields) const override;

  SymIndexId getClassParentId() const override;
  SymIndexId getLexicalParentId() const override;
  std::string getName() const override;
  SymIndexId getTypeId() const override;
  PDB_DataKind getDataKind() const override;
  PDB_LocType getLocationType() const override;
  bool isConstType() const override;
  bool isVolatileType() const override;
  bool isUnalignedType() const override;
  Variant getValue() const override;

protected:
  const NativeTypeEnum &Parent;
  codeview::EnumeratorRecord Record;
};

} // namespace pdb
} // namespace llvm

#endif // LLVM_DEBUGINFO_PDB_NATIVE_NATIVESYMBOLENUMERATOR_H
PKhwFZM�M��&DebugInfo/PDB/Native/NativeTypeArray.hnu�[���//===- NativeTypeArray.h ------------------------------------------ C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_NATIVE_NATIVETYPEARRAY_H
#define LLVM_DEBUGINFO_PDB_NATIVE_NATIVETYPEARRAY_H

#include "llvm/DebugInfo/PDB/Native/NativeRawSymbol.h"

#include "llvm/DebugInfo/CodeView/TypeRecord.h"
#include "llvm/DebugInfo/PDB/PDBTypes.h"

namespace llvm {
namespace pdb {

class NativeSession;

class NativeTypeArray : public NativeRawSymbol {
public:
  NativeTypeArray(NativeSession &Session, SymIndexId Id, codeview::TypeIndex TI,
                  codeview::ArrayRecord Record);
  ~NativeTypeArray() override;

  void dump(raw_ostream &OS, int Indent, PdbSymbolIdField ShowIdFields,
            PdbSymbolIdField RecurseIdFields) const override;

  SymIndexId getArrayIndexTypeId() const override;

  bool isConstType() const override;
  bool isUnalignedType() const override;
  bool isVolatileType() const override;

  uint32_t getCount() const override;
  SymIndexId getTypeId() const override;
  uint64_t getLength() const override;

protected:
  codeview::ArrayRecord Record;
  codeview::TypeIndex Index;
};

} // namespace pdb
} // namespace llvm

#endif
PKhwFZ�Xewjj-DebugInfo/PDB/Native/ISectionContribVisitor.hnu�[���//===- ISectionContribVisitor.h ---------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_NATIVE_ISECTIONCONTRIBVISITOR_H
#define LLVM_DEBUGINFO_PDB_NATIVE_ISECTIONCONTRIBVISITOR_H

namespace llvm {
namespace pdb {

struct SectionContrib;
struct SectionContrib2;

class ISectionContribVisitor {
public:
  virtual ~ISectionContribVisitor() = default;

  virtual void visit(const SectionContrib &C) = 0;
  virtual void visit(const SectionContrib2 &C) = 0;
};

} // end namespace pdb
} // end namespace llvm

#endif // LLVM_DEBUGINFO_PDB_NATIVE_ISECTIONCONTRIBVISITOR_H
PKhwFZgD�ww#DebugInfo/PDB/Native/SymbolStream.hnu�[���//===- SymbolStream.cpp - PDB Symbol Stream Access --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_NATIVE_SYMBOLSTREAM_H
#define LLVM_DEBUGINFO_PDB_NATIVE_SYMBOLSTREAM_H

#include "llvm/DebugInfo/CodeView/CVRecord.h"

#include "llvm/Support/Error.h"

namespace llvm {
namespace msf {
class MappedBlockStream;
}
namespace pdb {

class SymbolStream {
public:
  SymbolStream(std::unique_ptr<msf::MappedBlockStream> Stream);
  ~SymbolStream();
  Error reload();

  const codeview::CVSymbolArray &getSymbolArray() const {
    return SymbolRecords;
  }

  codeview::CVSymbol readRecord(uint32_t Offset) const;

  iterator_range<codeview::CVSymbolArray::Iterator>
  getSymbols(bool *HadError) const;

  Error commit();

private:
  codeview::CVSymbolArray SymbolRecords;
  std::unique_ptr<msf::MappedBlockStream> Stream;
};
} // namespace pdb
}

#endif
PKhwFZ������%DebugInfo/PDB/Native/PDBStringTable.hnu�[���//===- PDBStringTable.h - PDB String Table -----------------------*- C++-*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_NATIVE_PDBSTRINGTABLE_H
#define LLVM_DEBUGINFO_PDB_NATIVE_PDBSTRINGTABLE_H

#include "llvm/ADT/StringRef.h"
#include "llvm/DebugInfo/CodeView/DebugStringTableSubsection.h"
#include "llvm/Support/BinaryStreamArray.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Error.h"
#include <cstdint>

namespace llvm {
class BinaryStreamReader;

namespace pdb {

struct PDBStringTableHeader;

class PDBStringTable {
public:
  Error reload(BinaryStreamReader &Reader);

  uint32_t getByteSize() const;
  uint32_t getNameCount() const;
  uint32_t getHashVersion() const;
  uint32_t getSignature() const;

  Expected<StringRef> getStringForID(uint32_t ID) const;
  Expected<uint32_t> getIDForString(StringRef Str) const;

  FixedStreamArray<support::ulittle32_t> name_ids() const;

  const codeview::DebugStringTableSubsectionRef &getStringTable() const;

private:
  Error readHeader(BinaryStreamReader &Reader);
  Error readStrings(BinaryStreamReader &Reader);
  Error readHashTable(BinaryStreamReader &Reader);
  Error readEpilogue(BinaryStreamReader &Reader);

  const PDBStringTableHeader *Header = nullptr;
  codeview::DebugStringTableSubsectionRef Strings;
  FixedStreamArray<support::ulittle32_t> IDs;
  uint32_t NameCount = 0;
};

} // end namespace pdb
} // end namespace llvm

#endif // LLVM_DEBUGINFO_PDB_NATIVE_PDBSTRINGTABLE_H
PKhwFZ�I��DebugInfo/PDB/PDBSymbolFunc.hnu�[���//===- PDBSymbolFunc.h - class representing a function instance -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLFUNC_H
#define LLVM_DEBUGINFO_PDB_PDBSYMBOLFUNC_H

#include "llvm/DebugInfo/PDB/IPDBRawSymbol.h"

#include "PDBSymbol.h"
#include "PDBTypes.h"

namespace llvm {

namespace pdb {

class PDBSymDumper;
class PDBSymbolData;
class PDBSymbolTypeFunctionSig;
template <typename ChildType> class IPDBEnumChildren;

class PDBSymbolFunc : public PDBSymbol {
  DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::Function)
public:
  void dump(PDBSymDumper &Dumper) const override;

  bool isDestructor() const;

  std::unique_ptr<IPDBEnumChildren<PDBSymbolData>> getArguments() const;

  FORWARD_SYMBOL_METHOD(getAccess)
  FORWARD_SYMBOL_METHOD(getAddressOffset)
  FORWARD_SYMBOL_METHOD(getAddressSection)
  FORWARD_SYMBOL_ID_METHOD(getClassParent)
  FORWARD_SYMBOL_METHOD(isCompilerGenerated)
  FORWARD_SYMBOL_METHOD(isConstructorVirtualBase)
  FORWARD_SYMBOL_METHOD(isConstType)
  FORWARD_SYMBOL_METHOD(isCxxReturnUdt)
  FORWARD_SYMBOL_METHOD(hasCustomCallingConvention)
  FORWARD_SYMBOL_METHOD(hasFarReturn)
  FORWARD_SYMBOL_METHOD(hasAlloca)
  FORWARD_SYMBOL_METHOD(hasEH)
  FORWARD_SYMBOL_METHOD(hasEHa)
  FORWARD_SYMBOL_METHOD(hasInlAsm)
  FORWARD_SYMBOL_METHOD(hasLongJump)
  FORWARD_SYMBOL_METHOD(hasSEH)
  FORWARD_SYMBOL_METHOD(hasSecurityChecks)
  FORWARD_SYMBOL_METHOD(hasSetJump)
  FORWARD_SYMBOL_METHOD(hasInterruptReturn)
  FORWARD_SYMBOL_METHOD(isIntroVirtualFunction)
  FORWARD_SYMBOL_METHOD(hasInlineAttribute)
  FORWARD_SYMBOL_METHOD(isNaked)
  FORWARD_SYMBOL_METHOD(isStatic)
  FORWARD_SYMBOL_METHOD(getLength)
  FORWARD_SYMBOL_ID_METHOD(getLexicalParent)
  FORWARD_SYMBOL_METHOD(getLocalBasePointerRegisterId)
  FORWARD_SYMBOL_METHOD(getLocationType)
  FORWARD_SYMBOL_METHOD(getName)
  FORWARD_SYMBOL_METHOD(hasFramePointer)
  FORWARD_SYMBOL_METHOD(hasNoInlineAttribute)
  FORWARD_SYMBOL_METHOD(hasNoReturnAttribute)
  FORWARD_SYMBOL_METHOD(isUnreached)
  FORWARD_SYMBOL_METHOD(getNoStackOrdering)
  FORWARD_SYMBOL_METHOD(hasOptimizedCodeDebugInfo)
  FORWARD_SYMBOL_METHOD(isPureVirtual)
  FORWARD_SYMBOL_METHOD(getRelativeVirtualAddress)
  FORWARD_SYMBOL_METHOD(getToken)
  FORWARD_CONCRETE_SYMBOL_ID_METHOD_WITH_NAME(PDBSymbolTypeFunctionSig, getType,
                                              getSignature)
  FORWARD_SYMBOL_METHOD(isUnalignedType)
  FORWARD_SYMBOL_METHOD(getUndecoratedName)
  FORWARD_SYMBOL_METHOD(isVirtual)
  FORWARD_SYMBOL_METHOD(getVirtualAddress)
  FORWARD_SYMBOL_METHOD(getVirtualBaseOffset)
  FORWARD_SYMBOL_METHOD(isVolatileType)

  std::unique_ptr<IPDBEnumLineNumbers> getLineNumbers() const;
  uint32_t getCompilandId() const;
};

} // namespace pdb
} // namespace llvm

#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLFUNC_H
PKhwFZ�LU�(DebugInfo/PDB/PDBSymbolTypeVTableShape.hnu�[���//===- PDBSymbolTypeVTableShape.h - VTable shape info -----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEVTABLESHAPE_H
#define LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEVTABLESHAPE_H

#include "PDBSymbol.h"
#include "PDBTypes.h"

namespace llvm {

namespace pdb {

class PDBSymbolTypeVTableShape : public PDBSymbol {
  DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::VTableShape)
public:
  void dump(PDBSymDumper &Dumper) const override;

  FORWARD_SYMBOL_METHOD(isConstType)
  FORWARD_SYMBOL_METHOD(getCount)
  FORWARD_SYMBOL_ID_METHOD(getLexicalParent)
  FORWARD_SYMBOL_METHOD(isUnalignedType)
  FORWARD_SYMBOL_METHOD(isVolatileType)
};

} // namespace pdb
} // namespace llvm

#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEVTABLESHAPE_H
PKhwFZ[&�'AA$DebugInfo/PDB/PDBSymbolTypeTypedef.hnu�[���//===- PDBSymbolTypeTypedef.h - typedef type info ---------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPETYPEDEF_H
#define LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPETYPEDEF_H

#include "PDBSymbol.h"
#include "PDBTypes.h"

namespace llvm {

namespace pdb {

class PDBSymbolTypeTypedef : public PDBSymbol {
  DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::Typedef)
public:
  void dump(PDBSymDumper &Dumper) const override;

  FORWARD_SYMBOL_METHOD(getBuiltinType)
  FORWARD_SYMBOL_ID_METHOD(getClassParent)
  FORWARD_SYMBOL_METHOD(hasConstructor)
  FORWARD_SYMBOL_METHOD(isConstType)
  FORWARD_SYMBOL_METHOD(hasAssignmentOperator)
  FORWARD_SYMBOL_METHOD(hasCastOperator)
  FORWARD_SYMBOL_METHOD(hasNestedTypes)
  FORWARD_SYMBOL_METHOD(getLength)
  FORWARD_SYMBOL_ID_METHOD(getLexicalParent)
  FORWARD_SYMBOL_METHOD(getName)
  FORWARD_SYMBOL_METHOD(isNested)
  FORWARD_SYMBOL_METHOD(hasOverloadedOperator)
  FORWARD_SYMBOL_METHOD(isPacked)
  FORWARD_SYMBOL_METHOD(isReference)
  FORWARD_SYMBOL_METHOD(isScoped)
  FORWARD_SYMBOL_ID_METHOD(getType)
  FORWARD_SYMBOL_METHOD(getUdtKind)
  FORWARD_SYMBOL_METHOD(isUnalignedType)
  FORWARD_SYMBOL_ID_METHOD(getVirtualTableShape)
  FORWARD_SYMBOL_METHOD(isVolatileType)
};

} // namespace pdb
} // namespace llvm

#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPETYPEDEF_H
PKhwFZ7
@QQ$DebugInfo/PDB/PDBSymbolTypeManaged.hnu�[���//===- PDBSymbolTypeManaged.h - managed type info ---------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEMANAGED_H
#define LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEMANAGED_H

#include "PDBSymbol.h"
#include "PDBTypes.h"

namespace llvm {

namespace pdb {

class PDBSymbolTypeManaged : public PDBSymbol {
  DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::ManagedType)
public:
  void dump(PDBSymDumper &Dumper) const override;

  FORWARD_SYMBOL_METHOD(getName)
};

} // namespace pdb
} // namespace llvm

#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEMANAGED_H
PKhwFZr��܅�DebugInfo/PDB/IPDBSession.hnu�[���//===- IPDBSession.h - base interface for a PDB symbol context --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_IPDBSESSION_H
#define LLVM_DEBUGINFO_PDB_IPDBSESSION_H

#include "PDBSymbol.h"
#include "PDBTypes.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Casting.h"
#include <memory>

namespace llvm {
namespace pdb {
class PDBSymbolCompiland;
class PDBSymbolExe;

/// IPDBSession defines an interface used to provide a context for querying
/// debug information from a debug data source (for example, a PDB).
class IPDBSession {
public:
  virtual ~IPDBSession();

  virtual uint64_t getLoadAddress() const = 0;
  virtual bool setLoadAddress(uint64_t Address) = 0;
  virtual std::unique_ptr<PDBSymbolExe> getGlobalScope() = 0;
  virtual std::unique_ptr<PDBSymbol>
  getSymbolById(SymIndexId SymbolId) const = 0;

  virtual bool addressForVA(uint64_t VA, uint32_t &Section,
                            uint32_t &Offset) const = 0;
  virtual bool addressForRVA(uint32_t RVA, uint32_t &Section,
                             uint32_t &Offset) const = 0;

  template <typename T>
  std::unique_ptr<T> getConcreteSymbolById(SymIndexId SymbolId) const {
    return unique_dyn_cast_or_null<T>(getSymbolById(SymbolId));
  }

  virtual std::unique_ptr<PDBSymbol> findSymbolByAddress(uint64_t Address,
                                                         PDB_SymType Type) = 0;
  virtual std::unique_ptr<PDBSymbol> findSymbolByRVA(uint32_t RVA,
                                                     PDB_SymType Type) = 0;
  virtual std::unique_ptr<PDBSymbol>
  findSymbolBySectOffset(uint32_t Sect, uint32_t Offset, PDB_SymType Type) = 0;

  virtual std::unique_ptr<IPDBEnumLineNumbers>
  findLineNumbers(const PDBSymbolCompiland &Compiland,
                  const IPDBSourceFile &File) const = 0;
  virtual std::unique_ptr<IPDBEnumLineNumbers>
  findLineNumbersByAddress(uint64_t Address, uint32_t Length) const = 0;
  virtual std::unique_ptr<IPDBEnumLineNumbers>
  findLineNumbersByRVA(uint32_t RVA, uint32_t Length) const = 0;
  virtual std::unique_ptr<IPDBEnumLineNumbers>
  findLineNumbersBySectOffset(uint32_t Section, uint32_t Offset,
                              uint32_t Length) const = 0;

  virtual std::unique_ptr<IPDBEnumSourceFiles>
  findSourceFiles(const PDBSymbolCompiland *Compiland, llvm::StringRef Pattern,
                  PDB_NameSearchFlags Flags) const = 0;
  virtual std::unique_ptr<IPDBSourceFile>
  findOneSourceFile(const PDBSymbolCompiland *Compiland,
                    llvm::StringRef Pattern,
                    PDB_NameSearchFlags Flags) const = 0;
  virtual std::unique_ptr<IPDBEnumChildren<PDBSymbolCompiland>>
  findCompilandsForSourceFile(llvm::StringRef Pattern,
                              PDB_NameSearchFlags Flags) const = 0;
  virtual std::unique_ptr<PDBSymbolCompiland>
  findOneCompilandForSourceFile(llvm::StringRef Pattern,
                                PDB_NameSearchFlags Flags) const = 0;

  virtual std::unique_ptr<IPDBEnumSourceFiles> getAllSourceFiles() const = 0;
  virtual std::unique_ptr<IPDBEnumSourceFiles>
  getSourceFilesForCompiland(const PDBSymbolCompiland &Compiland) const = 0;
  virtual std::unique_ptr<IPDBSourceFile>
  getSourceFileById(uint32_t FileId) const = 0;

  virtual std::unique_ptr<IPDBEnumDataStreams> getDebugStreams() const = 0;

  virtual std::unique_ptr<IPDBEnumTables> getEnumTables() const = 0;

  virtual std::unique_ptr<IPDBEnumInjectedSources>
  getInjectedSources() const = 0;

  virtual std::unique_ptr<IPDBEnumSectionContribs>
  getSectionContribs() const = 0;

  virtual std::unique_ptr<IPDBEnumFrameData>
  getFrameData() const = 0;
};
} // namespace pdb
} // namespace llvm

#endif
PKhwFZ�x����DebugInfo/PDB/PDBSymbolThunk.hnu�[���//===- PDBSymbolThunk.h - Support for querying PDB thunks ---------------*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLTHUNK_H
#define LLVM_DEBUGINFO_PDB_PDBSYMBOLTHUNK_H

#include "PDBSymbol.h"
#include "PDBTypes.h"

namespace llvm {

namespace pdb {

class PDBSymbolThunk : public PDBSymbol {
  DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::Thunk)
public:
  void dump(PDBSymDumper &Dumper) const override;

  FORWARD_SYMBOL_METHOD(getAccess)
  FORWARD_SYMBOL_METHOD(getAddressOffset)
  FORWARD_SYMBOL_METHOD(getAddressSection)
  FORWARD_SYMBOL_ID_METHOD(getClassParent)
  FORWARD_SYMBOL_METHOD(isConstType)
  FORWARD_SYMBOL_METHOD(isIntroVirtualFunction)
  FORWARD_SYMBOL_METHOD(isStatic)
  FORWARD_SYMBOL_METHOD(getLength)
  FORWARD_SYMBOL_ID_METHOD(getLexicalParent)
  FORWARD_SYMBOL_METHOD(getName)
  FORWARD_SYMBOL_METHOD(isPureVirtual)
  FORWARD_SYMBOL_METHOD(getRelativeVirtualAddress)
  FORWARD_SYMBOL_METHOD(getTargetOffset)
  FORWARD_SYMBOL_METHOD(getTargetRelativeVirtualAddress)
  FORWARD_SYMBOL_METHOD(getTargetVirtualAddress)
  FORWARD_SYMBOL_METHOD(getTargetSection)
  FORWARD_SYMBOL_METHOD(getThunkOrdinal)
  FORWARD_SYMBOL_ID_METHOD(getType)
  FORWARD_SYMBOL_METHOD(isUnalignedType)
  FORWARD_SYMBOL_METHOD(isVirtual)
  FORWARD_SYMBOL_METHOD(getVirtualAddress)
  FORWARD_SYMBOL_METHOD(getVirtualBaseOffset)
  FORWARD_SYMBOL_METHOD(isVolatileType)
};
} // namespace pdb
} // namespace llvm

#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTHUNK_H
PKhwFZIY��'DebugInfo/PDB/DIA/DIAEnumDebugStreams.hnu�[���//==- DIAEnumDebugStreams.h - DIA Debug Stream Enumerator impl ---*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_DIA_DIAENUMDEBUGSTREAMS_H
#define LLVM_DEBUGINFO_PDB_DIA_DIAENUMDEBUGSTREAMS_H

#include "DIASupport.h"
#include "llvm/DebugInfo/PDB/IPDBDataStream.h"
#include "llvm/DebugInfo/PDB/IPDBEnumChildren.h"

namespace llvm {
namespace pdb {

class IPDBDataStream;

class DIAEnumDebugStreams : public IPDBEnumChildren<IPDBDataStream> {
public:
  explicit DIAEnumDebugStreams(CComPtr<IDiaEnumDebugStreams> DiaEnumerator);

  uint32_t getChildCount() const override;
  ChildTypePtr getChildAtIndex(uint32_t Index) const override;
  ChildTypePtr getNext() override;
  void reset() override;

private:
  CComPtr<IDiaEnumDebugStreams> Enumerator;
};
}
}

#endif
PKhwFZ�l-�!DebugInfo/PDB/DIA/DIALineNumber.hnu�[���//===- DIALineNumber.h - DIA implementation of IPDBLineNumber ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_DIA_DIALINENUMBER_H
#define LLVM_DEBUGINFO_PDB_DIA_DIALINENUMBER_H

#include "DIASupport.h"
#include "llvm/DebugInfo/PDB/IPDBLineNumber.h"

namespace llvm {
namespace pdb {
class DIALineNumber : public IPDBLineNumber {
public:
  explicit DIALineNumber(CComPtr<IDiaLineNumber> DiaLineNumber);

  uint32_t getLineNumber() const override;
  uint32_t getLineNumberEnd() const override;
  uint32_t getColumnNumber() const override;
  uint32_t getColumnNumberEnd() const override;
  uint32_t getAddressSection() const override;
  uint32_t getAddressOffset() const override;
  uint32_t getRelativeVirtualAddress() const override;
  uint64_t getVirtualAddress() const override;
  uint32_t getLength() const override;
  uint32_t getSourceFileId() const override;
  uint32_t getCompilandId() const override;
  bool isStatement() const override;

private:
  CComPtr<IDiaLineNumber> LineNumber;
};
}
}
#endif
PKhwFZ�s����*DebugInfo/PDB/DIA/DIAEnumSectionContribs.hnu�[���//==- DIAEnumSectionContribs.h --------------------------------- -*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_DIA_DIAENUMSECTIONCONTRIBS_H
#define LLVM_DEBUGINFO_PDB_DIA_DIAENUMSECTIONCONTRIBS_H

#include "DIASupport.h"
#include "llvm/DebugInfo/PDB/IPDBEnumChildren.h"
#include "llvm/DebugInfo/PDB/IPDBSectionContrib.h"

namespace llvm {
namespace pdb {
class DIASession;

class DIAEnumSectionContribs : public IPDBEnumChildren<IPDBSectionContrib> {
public:
  explicit DIAEnumSectionContribs(
      const DIASession &PDBSession,
      CComPtr<IDiaEnumSectionContribs> DiaEnumerator);

  uint32_t getChildCount() const override;
  ChildTypePtr getChildAtIndex(uint32_t Index) const override;
  ChildTypePtr getNext() override;
  void reset() override;

private:
  const DIASession &Session;
  CComPtr<IDiaEnumSectionContribs> Enumerator;
};
} // namespace pdb
} // namespace llvm

#endif // LLVM_DEBUGINFO_PDB_DIA_DIAENUMSECTIONCONTRIBS_H
PKhwFZ��T���!DebugInfo/PDB/DIA/DIADataStream.hnu�[���//===- DIADataStream.h - DIA implementation of IPDBDataStream ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_DIA_DIADATASTREAM_H
#define LLVM_DEBUGINFO_PDB_DIA_DIADATASTREAM_H

#include "DIASupport.h"
#include "llvm/DebugInfo/PDB/IPDBDataStream.h"

namespace llvm {
namespace pdb {
class DIADataStream : public IPDBDataStream {
public:
  explicit DIADataStream(CComPtr<IDiaEnumDebugStreamData> DiaStreamData);

  uint32_t getRecordCount() const override;
  std::string getName() const override;
  std::optional<RecordType> getItemAtIndex(uint32_t Index) const override;
  bool getNext(RecordType &Record) override;
  void reset() override;

private:
  CComPtr<IDiaEnumDebugStreamData> StreamData;
};
}
}

#endif
PKhwFZXO�1dd&DebugInfo/PDB/DIA/DIAEnumSourceFiles.hnu�[���//==- DIAEnumSourceFiles.h - DIA Source File Enumerator impl -----*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_DIA_DIAENUMSOURCEFILES_H
#define LLVM_DEBUGINFO_PDB_DIA_DIAENUMSOURCEFILES_H

#include "DIASupport.h"
#include "llvm/DebugInfo/PDB/IPDBEnumChildren.h"
#include "llvm/DebugInfo/PDB/IPDBSourceFile.h"

namespace llvm {
namespace pdb {
class DIASession;

class DIAEnumSourceFiles : public IPDBEnumChildren<IPDBSourceFile> {
public:
  explicit DIAEnumSourceFiles(const DIASession &PDBSession,
                              CComPtr<IDiaEnumSourceFiles> DiaEnumerator);

  uint32_t getChildCount() const override;
  ChildTypePtr getChildAtIndex(uint32_t Index) const override;
  ChildTypePtr getNext() override;
  void reset() override;

private:
  const DIASession &Session;
  CComPtr<IDiaEnumSourceFiles> Enumerator;
};
}
}

#endif
PKhwFZ=
���!DebugInfo/PDB/DIA/DIASourceFile.hnu�[���//===- DIASourceFile.h - DIA implementation of IPDBSourceFile ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_DIA_DIASOURCEFILE_H
#define LLVM_DEBUGINFO_PDB_DIA_DIASOURCEFILE_H

#include "DIASupport.h"
#include "llvm/DebugInfo/PDB/IPDBSourceFile.h"

namespace llvm {
namespace pdb {
class DIASession;

class DIASourceFile : public IPDBSourceFile {
public:
  explicit DIASourceFile(const DIASession &Session,
                         CComPtr<IDiaSourceFile> DiaSourceFile);

  std::string getFileName() const override;
  uint32_t getUniqueId() const override;
  std::string getChecksum() const override;
  PDB_Checksum getChecksumType() const override;
  std::unique_ptr<IPDBEnumChildren<PDBSymbolCompiland>>
  getCompilands() const override;

  CComPtr<IDiaSourceFile> getDiaFile() const { return SourceFile; }

private:
  const DIASession &Session;
  CComPtr<IDiaSourceFile> SourceFile;
};
}
}

#endif
PKhwFZW��'��%DebugInfo/PDB/DIA/DIAInjectedSource.hnu�[���//===- DIAInjectedSource.h - DIA impl for IPDBInjectedSource ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_DIA_DIAINJECTEDSOURCE_H
#define LLVM_DEBUGINFO_PDB_DIA_DIAINJECTEDSOURCE_H

#include "DIASupport.h"
#include "llvm/DebugInfo/PDB/IPDBInjectedSource.h"

namespace llvm {
namespace pdb {
class DIASession;

class DIAInjectedSource : public IPDBInjectedSource {
public:
  explicit DIAInjectedSource(CComPtr<IDiaInjectedSource> DiaSourceFile);

  uint32_t getCrc32() const override;
  uint64_t getCodeByteSize() const override;
  std::string getFileName() const override;
  std::string getObjectFileName() const override;
  std::string getVirtualFileName() const override;
  uint32_t getCompression() const override;
  std::string getCode() const override;

private:
  CComPtr<IDiaInjectedSource> SourceFile;
};
} // namespace pdb
} // namespace llvm

#endif // LLVM_DEBUGINFO_PDB_DIA_DIAINJECTEDSOURCE_H
PKhwFZ� <�vv*DebugInfo/PDB/DIA/DIAEnumInjectedSources.hnu�[���//==- DIAEnumInjectedSources.h - DIA Injected Sources Enumerator -*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_DIA_DIAENUMINJECTEDSOURCES_H
#define LLVM_DEBUGINFO_PDB_DIA_DIAENUMINJECTEDSOURCES_H

#include "DIASupport.h"
#include "llvm/DebugInfo/PDB/IPDBEnumChildren.h"
#include "llvm/DebugInfo/PDB/IPDBInjectedSource.h"

namespace llvm {
namespace pdb {

class DIAEnumInjectedSources : public IPDBEnumChildren<IPDBInjectedSource> {
public:
  explicit DIAEnumInjectedSources(
      CComPtr<IDiaEnumInjectedSources> DiaEnumerator);

  uint32_t getChildCount() const override;
  ChildTypePtr getChildAtIndex(uint32_t Index) const override;
  ChildTypePtr getNext() override;
  void reset() override;

private:
  CComPtr<IDiaEnumInjectedSources> Enumerator;
};
} // namespace pdb
} // namespace llvm

#endif // LLVM_DEBUGINFO_PDB_DIA_DIAENUMINJECTEDSOURCES_H
PKhwFZ�2��DebugInfo/PDB/DIA/DIAUtils.hnu�[���//===- DIAUtils.h - Utility functions for working with DIA ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_DIA_DIAUTILS_H
#define LLVM_DEBUGINFO_PDB_DIA_DIAUTILS_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/Support/ConvertUTF.h"

template <typename Obj>
std::string invokeBstrMethod(Obj &Object,
                             HRESULT (__stdcall Obj::*Func)(BSTR *)) {
  CComBSTR Str16;
  HRESULT Result = (Object.*Func)(&Str16);
  if (S_OK != Result)
    return std::string();

  std::string Str8;
  llvm::ArrayRef<char> StrBytes(reinterpret_cast<char *>(Str16.m_str),
                                Str16.ByteLength());
  llvm::convertUTF16ToUTF8String(StrBytes, Str8);
  return Str8;
}

#endif // LLVM_DEBUGINFO_PDB_DIA_DIAUTILS_H
PKhwFZ���((!DebugInfo/PDB/DIA/DIAEnumTables.hnu�[���//===- DIAEnumTables.h - DIA Tables Enumerator Impl -------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_DIA_DIAENUMTABLES_H
#define LLVM_DEBUGINFO_PDB_DIA_DIAENUMTABLES_H

#include "DIASupport.h"
#include "llvm/DebugInfo/PDB/IPDBEnumChildren.h"
#include "llvm/DebugInfo/PDB/IPDBTable.h"

namespace llvm {
namespace pdb {
class IPDBTable;

class DIAEnumTables : public IPDBEnumChildren<IPDBTable> {
public:
  explicit DIAEnumTables(CComPtr<IDiaEnumTables> DiaEnumerator);

  uint32_t getChildCount() const override;
  std::unique_ptr<IPDBTable> getChildAtIndex(uint32_t Index) const override;
  std::unique_ptr<IPDBTable> getNext() override;
  void reset() override;

private:
  CComPtr<IDiaEnumTables> Enumerator;
};
}
}

#endif // LLVM_DEBUGINFO_PDB_DIA_DIAENUMTABLES_H
PKhwFZk�qD$DebugInfo/PDB/DIA/DIAEnumFrameData.hnu�[���//==- DIAEnumFrameData.h --------------------------------------- -*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_DIA_DIAENUMFRAMEDATA_H
#define LLVM_DEBUGINFO_PDB_DIA_DIAENUMFRAMEDATA_H

#include "DIASupport.h"
#include "llvm/DebugInfo/PDB/IPDBEnumChildren.h"
#include "llvm/DebugInfo/PDB/IPDBFrameData.h"

namespace llvm {
namespace pdb {

class DIAEnumFrameData : public IPDBEnumChildren<IPDBFrameData> {
public:
  explicit DIAEnumFrameData(CComPtr<IDiaEnumFrameData> DiaEnumerator);

  uint32_t getChildCount() const override;
  ChildTypePtr getChildAtIndex(uint32_t Index) const override;
  ChildTypePtr getNext() override;
  void reset() override;

private:
  CComPtr<IDiaEnumFrameData> Enumerator;
};

} // namespace pdb
} // namespace llvm

#endif
PKhwFZT�{k�&�& DebugInfo/PDB/DIA/DIARawSymbol.hnu�[���//===- DIARawSymbol.h - DIA implementation of IPDBRawSymbol ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_DIA_DIARAWSYMBOL_H
#define LLVM_DEBUGINFO_PDB_DIA_DIARAWSYMBOL_H

#include "DIASupport.h"
#include "llvm/DebugInfo/PDB/IPDBRawSymbol.h"

namespace llvm {
namespace pdb {
class DIASession;
class DIARawSymbol : public IPDBRawSymbol {
public:
  DIARawSymbol(const DIASession &PDBSession, CComPtr<IDiaSymbol> DiaSymbol);

  void dump(raw_ostream &OS, int Indent, PdbSymbolIdField ShowIdFields,
            PdbSymbolIdField RecurseIdFields) const override;

  CComPtr<IDiaSymbol> getDiaSymbol() const { return Symbol; }

  std::unique_ptr<IPDBEnumSymbols>
  findChildren(PDB_SymType Type) const override;
  std::unique_ptr<IPDBEnumSymbols>
  findChildren(PDB_SymType Type, StringRef Name,
               PDB_NameSearchFlags Flags) const override;
  std::unique_ptr<IPDBEnumSymbols>
  findChildrenByAddr(PDB_SymType Type, StringRef Name,
                     PDB_NameSearchFlags Flags,
                     uint32_t Section, uint32_t Offset) const override;
  std::unique_ptr<IPDBEnumSymbols>
  findChildrenByVA(PDB_SymType Type, StringRef Name, PDB_NameSearchFlags Flags,
                   uint64_t VA) const override;
  std::unique_ptr<IPDBEnumSymbols>
  findChildrenByRVA(PDB_SymType Type, StringRef Name, PDB_NameSearchFlags Flags,
                    uint32_t RVA) const override;

  std::unique_ptr<IPDBEnumSymbols>
  findInlineFramesByAddr(uint32_t Section, uint32_t Offset) const override;
  std::unique_ptr<IPDBEnumSymbols>
  findInlineFramesByRVA(uint32_t RVA) const override;
  std::unique_ptr<IPDBEnumSymbols>
  findInlineFramesByVA(uint64_t VA) const override;

  std::unique_ptr<IPDBEnumLineNumbers> findInlineeLines() const override;
  std::unique_ptr<IPDBEnumLineNumbers>
  findInlineeLinesByAddr(uint32_t Section, uint32_t Offset,
                         uint32_t Length) const override;
  std::unique_ptr<IPDBEnumLineNumbers>
  findInlineeLinesByRVA(uint32_t RVA, uint32_t Length) const override;
  std::unique_ptr<IPDBEnumLineNumbers>
  findInlineeLinesByVA(uint64_t VA, uint32_t Length) const override;

  void getDataBytes(llvm::SmallVector<uint8_t, 32> &bytes) const override;
  void getFrontEndVersion(VersionInfo &Version) const override;
  void getBackEndVersion(VersionInfo &Version) const override;
  PDB_MemberAccess getAccess() const override;
  uint32_t getAddressOffset() const override;
  uint32_t getAddressSection() const override;
  uint32_t getAge() const override;
  SymIndexId getArrayIndexTypeId() const override;
  uint32_t getBaseDataOffset() const override;
  uint32_t getBaseDataSlot() const override;
  SymIndexId getBaseSymbolId() const override;
  PDB_BuiltinType getBuiltinType() const override;
  uint32_t getBitPosition() const override;
  PDB_CallingConv getCallingConvention() const override;
  SymIndexId getClassParentId() const override;
  std::string getCompilerName() const override;
  uint32_t getCount() const override;
  uint32_t getCountLiveRanges() const override;
  PDB_Lang getLanguage() const override;
  SymIndexId getLexicalParentId() const override;
  std::string getLibraryName() const override;
  uint32_t getLiveRangeStartAddressOffset() const override;
  uint32_t getLiveRangeStartAddressSection() const override;
  uint32_t getLiveRangeStartRelativeVirtualAddress() const override;
  codeview::RegisterId getLocalBasePointerRegisterId() const override;
  SymIndexId getLowerBoundId() const override;
  uint32_t getMemorySpaceKind() const override;
  std::string getName() const override;
  uint32_t getNumberOfAcceleratorPointerTags() const override;
  uint32_t getNumberOfColumns() const override;
  uint32_t getNumberOfModifiers() const override;
  uint32_t getNumberOfRegisterIndices() const override;
  uint32_t getNumberOfRows() const override;
  std::string getObjectFileName() const override;
  uint32_t getOemId() const override;
  SymIndexId getOemSymbolId() const override;
  uint32_t getOffsetInUdt() const override;
  PDB_Cpu getPlatform() const override;
  uint32_t getRank() const override;
  codeview::RegisterId getRegisterId() const override;
  uint32_t getRegisterType() const override;
  uint32_t getRelativeVirtualAddress() const override;
  uint32_t getSamplerSlot() const override;
  uint32_t getSignature() const override;
  uint32_t getSizeInUdt() const override;
  uint32_t getSlot() const override;
  std::string getSourceFileName() const override;
  std::unique_ptr<IPDBLineNumber> getSrcLineOnTypeDefn() const override;
  uint32_t getStride() const override;
  SymIndexId getSubTypeId() const override;
  std::string getSymbolsFileName() const override;
  SymIndexId getSymIndexId() const override;
  uint32_t getTargetOffset() const override;
  uint32_t getTargetRelativeVirtualAddress() const override;
  uint64_t getTargetVirtualAddress() const override;
  uint32_t getTargetSection() const override;
  uint32_t getTextureSlot() const override;
  uint32_t getTimeStamp() const override;
  uint32_t getToken() const override;
  SymIndexId getTypeId() const override;
  uint32_t getUavSlot() const override;
  std::string getUndecoratedName() const override;
  std::string getUndecoratedNameEx(PDB_UndnameFlags Flags) const override;
  SymIndexId getUnmodifiedTypeId() const override;
  SymIndexId getUpperBoundId() const override;
  Variant getValue() const override;
  uint32_t getVirtualBaseDispIndex() const override;
  uint32_t getVirtualBaseOffset() const override;
  SymIndexId getVirtualTableShapeId() const override;
  std::unique_ptr<PDBSymbolTypeBuiltin>
  getVirtualBaseTableType() const override;
  PDB_DataKind getDataKind() const override;
  PDB_SymType getSymTag() const override;
  codeview::GUID getGuid() const override;
  int32_t getOffset() const override;
  int32_t getThisAdjust() const override;
  int32_t getVirtualBasePointerOffset() const override;
  PDB_LocType getLocationType() const override;
  PDB_Machine getMachineType() const override;
  codeview::ThunkOrdinal getThunkOrdinal() const override;
  uint64_t getLength() const override;
  uint64_t getLiveRangeLength() const override;
  uint64_t getVirtualAddress() const override;
  PDB_UdtType getUdtKind() const override;
  bool hasConstructor() const override;
  bool hasCustomCallingConvention() const override;
  bool hasFarReturn() const override;
  bool isCode() const override;
  bool isCompilerGenerated() const override;
  bool isConstType() const override;
  bool isEditAndContinueEnabled() const override;
  bool isFunction() const override;
  bool getAddressTaken() const override;
  bool getNoStackOrdering() const override;
  bool hasAlloca() const override;
  bool hasAssignmentOperator() const override;
  bool hasCTypes() const override;
  bool hasCastOperator() const override;
  bool hasDebugInfo() const override;
  bool hasEH() const override;
  bool hasEHa() const override;
  bool hasInlAsm() const override;
  bool hasInlineAttribute() const override;
  bool hasInterruptReturn() const override;
  bool hasFramePointer() const override;
  bool hasLongJump() const override;
  bool hasManagedCode() const override;
  bool hasNestedTypes() const override;
  bool hasNoInlineAttribute() const override;
  bool hasNoReturnAttribute() const override;
  bool hasOptimizedCodeDebugInfo() const override;
  bool hasOverloadedOperator() const override;
  bool hasSEH() const override;
  bool hasSecurityChecks() const override;
  bool hasSetJump() const override;
  bool hasStrictGSCheck() const override;
  bool isAcceleratorGroupSharedLocal() const override;
  bool isAcceleratorPointerTagLiveRange() const override;
  bool isAcceleratorStubFunction() const override;
  bool isAggregated() const override;
  bool isIntroVirtualFunction() const override;
  bool isCVTCIL() const override;
  bool isConstructorVirtualBase() const override;
  bool isCxxReturnUdt() const override;
  bool isDataAligned() const override;
  bool isHLSLData() const override;
  bool isHotpatchable() const override;
  bool isIndirectVirtualBaseClass() const override;
  bool isInterfaceUdt() const override;
  bool isIntrinsic() const override;
  bool isLTCG() const override;
  bool isLocationControlFlowDependent() const override;
  bool isMSILNetmodule() const override;
  bool isMatrixRowMajor() const override;
  bool isManagedCode() const override;
  bool isMSILCode() const override;
  bool isMultipleInheritance() const override;
  bool isNaked() const override;
  bool isNested() const override;
  bool isOptimizedAway() const override;
  bool isPacked() const override;
  bool isPointerBasedOnSymbolValue() const override;
  bool isPointerToDataMember() const override;
  bool isPointerToMemberFunction() const override;
  bool isPureVirtual() const override;
  bool isRValueReference() const override;
  bool isRefUdt() const override;
  bool isReference() const override;
  bool isRestrictedType() const override;
  bool isReturnValue() const override;
  bool isSafeBuffers() const override;
  bool isScoped() const override;
  bool isSdl() const override;
  bool isSingleInheritance() const override;
  bool isSplitted() const override;
  bool isStatic() const override;
  bool hasPrivateSymbols() const override;
  bool isUnalignedType() const override;
  bool isUnreached() const override;
  bool isValueUdt() const override;
  bool isVirtual() const override;
  bool isVirtualBaseClass() const override;
  bool isVirtualInheritance() const override;
  bool isVolatileType() const override;
  bool wasInlined() const override;
  std::string getUnused() const override;

private:
  const DIASession &Session;
  CComPtr<IDiaSymbol> Symbol;
};
}
}

#endif
PKhwFZa���DebugInfo/PDB/DIA/DIAError.hnu�[���//===- DIAError.h - Error extensions for PDB DIA implementation -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_DIA_DIAERROR_H
#define LLVM_DEBUGINFO_PDB_DIA_DIAERROR_H

#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Error.h"

namespace llvm {
namespace pdb {
enum class dia_error_code {
  unspecified = 1,
  could_not_create_impl,
  invalid_file_format,
  invalid_parameter,
  already_loaded,
  debug_info_mismatch,
};
} // namespace pdb
} // namespace llvm

namespace std {
template <>
struct is_error_code_enum<llvm::pdb::dia_error_code> : std::true_type {};
} // namespace std

namespace llvm {
namespace pdb {
const std::error_category &DIAErrCategory();

inline std::error_code make_error_code(dia_error_code E) {
  return std::error_code(static_cast<int>(E), DIAErrCategory());
}

/// Base class for errors originating in DIA SDK, e.g. COM calls
class DIAError : public ErrorInfo<DIAError, StringError> {
public:
  using ErrorInfo<DIAError, StringError>::ErrorInfo;
  DIAError(const Twine &S) : ErrorInfo(S, dia_error_code::unspecified) {}
  static char ID;
};
} // namespace pdb
} // namespace llvm
#endif
PKhwFZ6��99 DebugInfo/PDB/DIA/DIAFrameData.hnu�[���//===- DIAFrameData.h - DIA Impl. of IPDBFrameData ---------------- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_DIA_DIAFRAMEDATA_H
#define LLVM_DEBUGINFO_PDB_DIA_DIAFRAMEDATA_H

#include "DIASupport.h"
#include "llvm/DebugInfo/PDB/IPDBFrameData.h"

namespace llvm {
namespace pdb {

class DIASession;

class DIAFrameData : public IPDBFrameData {
public:
  explicit DIAFrameData(CComPtr<IDiaFrameData> DiaFrameData);

  uint32_t getAddressOffset() const override;
  uint32_t getAddressSection() const override;
  uint32_t getLengthBlock() const override;
  std::string getProgram() const override;
  uint32_t getRelativeVirtualAddress() const override;
  uint64_t getVirtualAddress() const override;

private:
  CComPtr<IDiaFrameData> FrameData;
};

} // namespace pdb
} // namespace llvm

#endif
PKhwFZ�R�WW"DebugInfo/PDB/DIA/DIAEnumSymbols.hnu�[���//==- DIAEnumSymbols.h - DIA Symbol Enumerator impl --------------*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_DIA_DIAENUMSYMBOLS_H
#define LLVM_DEBUGINFO_PDB_DIA_DIAENUMSYMBOLS_H

#include "DIASupport.h"
#include "llvm/DebugInfo/PDB/IPDBEnumChildren.h"
#include "llvm/DebugInfo/PDB/PDBSymbol.h"

namespace llvm {
namespace pdb {
class DIASession;

class DIAEnumSymbols : public IPDBEnumChildren<PDBSymbol> {
public:
  explicit DIAEnumSymbols(const DIASession &Session,
                          CComPtr<IDiaEnumSymbols> DiaEnumerator);

  uint32_t getChildCount() const override;
  std::unique_ptr<PDBSymbol> getChildAtIndex(uint32_t Index) const override;
  std::unique_ptr<PDBSymbol> getNext() override;
  void reset() override;

private:
  const DIASession &Session;
  CComPtr<IDiaEnumSymbols> Enumerator;
};
}
}

#endif
PKhwFZ^.(�&DebugInfo/PDB/DIA/DIAEnumLineNumbers.hnu�[���//==- DIAEnumLineNumbers.h - DIA Line Number Enumerator impl -----*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_DIA_DIAENUMLINENUMBERS_H
#define LLVM_DEBUGINFO_PDB_DIA_DIAENUMLINENUMBERS_H

#include "DIASupport.h"
#include "llvm/DebugInfo/PDB/IPDBEnumChildren.h"
#include "llvm/DebugInfo/PDB/IPDBLineNumber.h"

namespace llvm {
namespace pdb {
class IPDBLineNumber;

class DIAEnumLineNumbers : public IPDBEnumChildren<IPDBLineNumber> {
public:
  explicit DIAEnumLineNumbers(CComPtr<IDiaEnumLineNumbers> DiaEnumerator);

  uint32_t getChildCount() const override;
  ChildTypePtr getChildAtIndex(uint32_t Index) const override;
  ChildTypePtr getNext() override;
  void reset() override;

private:
  CComPtr<IDiaEnumLineNumbers> Enumerator;
};
}
}

#endif
PKhwFZ�5lDebugInfo/PDB/DIA/DIASupport.hnu�[���//===- DIASupport.h - Common header includes for DIA ------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// Common defines and header includes for all LLVMDebugInfoPDBDIA.  The
// definitions here configure the necessary #defines and include system headers
// in the proper order for using DIA.
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_DIA_DIASUPPORT_H
#define LLVM_DEBUGINFO_PDB_DIA_DIASUPPORT_H

// Require at least Vista
#define NTDDI_VERSION NTDDI_VISTA
#define _WIN32_WINNT _WIN32_WINNT_VISTA
#define WINVER _WIN32_WINNT_VISTA
#ifndef NOMINMAX
#define NOMINMAX
#endif

// atlbase.h has to come before windows.h
#include <atlbase.h>
#include <windows.h>

// DIA headers must come after windows headers.
#include <cvconst.h>
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wnon-virtual-dtor"
#endif
#include <dia2.h>
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#include <diacreate.h>

#endif // LLVM_DEBUGINFO_PDB_DIA_DIASUPPORT_H
PKhwFZ���eeDebugInfo/PDB/DIA/DIATable.hnu�[���//===- DIATable.h - DIA implementation of IPDBTable -------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_DIA_DIATABLE_H
#define LLVM_DEBUGINFO_PDB_DIA_DIATABLE_H

#include "DIASupport.h"
#include "llvm/DebugInfo/PDB/IPDBTable.h"

namespace llvm {
namespace pdb {
class DIATable : public IPDBTable {
public:
  explicit DIATable(CComPtr<IDiaTable> DiaTable);

  uint32_t getItemCount() const override;
  std::string getName() const override;
  PDB_TableType getTableType() const override;

private:
  CComPtr<IDiaTable> Table;
};
}
}

#endif // LLVM_DEBUGINFO_PDB_DIA_DIATABLE_H
PKhwFZ�.��dd%DebugInfo/PDB/DIA/DIASectionContrib.hnu�[���//===- DIASectionContrib.h - DIA Impl. of IPDBSectionContrib ------ C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_DIA_DIASECTIONCONTRIB_H
#define LLVM_DEBUGINFO_PDB_DIA_DIASECTIONCONTRIB_H

#include "DIASupport.h"
#include "llvm/DebugInfo/PDB/IPDBSectionContrib.h"

namespace llvm {
namespace pdb {
class DIASession;

class DIASectionContrib : public IPDBSectionContrib {
public:
  explicit DIASectionContrib(const DIASession &PDBSession,
                             CComPtr<IDiaSectionContrib> DiaSection);

  std::unique_ptr<PDBSymbolCompiland> getCompiland() const override;
  uint32_t getAddressSection() const override;
  uint32_t getAddressOffset() const override;
  uint32_t getRelativeVirtualAddress() const override;
  uint64_t getVirtualAddress() const override;
  uint32_t getLength() const override;
  bool isNotPaged() const override;
  bool hasCode() const override;
  bool hasCode16Bit() const override;
  bool hasInitializedData() const override;
  bool hasUninitializedData() const override;
  bool isRemoved() const override;
  bool hasComdat() const override;
  bool isDiscardable() const override;
  bool isNotCached() const override;
  bool isShared() const override;
  bool isExecutable() const override;
  bool isReadable() const override;
  bool isWritable() const override;
  uint32_t getDataCrc32() const override;
  uint32_t getRelocationsCrc32() const override;
  uint32_t getCompilandId() const override;

private:
  const DIASession &Session;
  CComPtr<IDiaSectionContrib> Section;
};
} // namespace pdb
} // namespace llvm

#endif // LLVM_DEBUGINFO_PDB_DIA_DIASECTIONCONTRIB_H
PKhwFZ��1mmDebugInfo/PDB/DIA/DIASession.hnu�[���//===- DIASession.h - DIA implementation of IPDBSession ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_DIA_DIASESSION_H
#define LLVM_DEBUGINFO_PDB_DIA_DIASESSION_H

#include "DIASupport.h"
#include "llvm/DebugInfo/PDB/IPDBSession.h"
#include "llvm/Support/Error.h"

#include <system_error>

namespace llvm {
class StringRef;

namespace pdb {
class DIASession : public IPDBSession {
public:
  explicit DIASession(CComPtr<IDiaSession> DiaSession);

  static Error createFromPdb(StringRef Path,
                             std::unique_ptr<IPDBSession> &Session);
  static Error createFromExe(StringRef Path,
                             std::unique_ptr<IPDBSession> &Session);

  uint64_t getLoadAddress() const override;
  bool setLoadAddress(uint64_t Address) override;
  std::unique_ptr<PDBSymbolExe> getGlobalScope() override;
  std::unique_ptr<PDBSymbol> getSymbolById(SymIndexId SymbolId) const override;

  bool addressForVA(uint64_t VA, uint32_t &Section,
                    uint32_t &Offset) const override;
  bool addressForRVA(uint32_t RVA, uint32_t &Section,
                     uint32_t &Offset) const override;

  std::unique_ptr<PDBSymbol> findSymbolByAddress(uint64_t Address,
                                                 PDB_SymType Type) override;
  std::unique_ptr<PDBSymbol> findSymbolByRVA(uint32_t RVA,
                                             PDB_SymType Type) override;
  std::unique_ptr<PDBSymbol> findSymbolBySectOffset(uint32_t Section,
                                                    uint32_t Offset,
                                                    PDB_SymType Type) override;

  std::unique_ptr<IPDBEnumLineNumbers>
  findLineNumbers(const PDBSymbolCompiland &Compiland,
                  const IPDBSourceFile &File) const override;
  std::unique_ptr<IPDBEnumLineNumbers>
  findLineNumbersByAddress(uint64_t Address, uint32_t Length) const override;
  std::unique_ptr<IPDBEnumLineNumbers>
  findLineNumbersByRVA(uint32_t RVA, uint32_t Length) const override;
  std::unique_ptr<IPDBEnumLineNumbers>
  findLineNumbersBySectOffset(uint32_t Section, uint32_t Offset,
                              uint32_t Length) const override;

  std::unique_ptr<IPDBEnumSourceFiles>
  findSourceFiles(const PDBSymbolCompiland *Compiland, llvm::StringRef Pattern,
                  PDB_NameSearchFlags Flags) const override;
  std::unique_ptr<IPDBSourceFile>
  findOneSourceFile(const PDBSymbolCompiland *Compiland,
                    llvm::StringRef Pattern,
                    PDB_NameSearchFlags Flags) const override;
  std::unique_ptr<IPDBEnumChildren<PDBSymbolCompiland>>
  findCompilandsForSourceFile(llvm::StringRef Pattern,
                              PDB_NameSearchFlags Flags) const override;
  std::unique_ptr<PDBSymbolCompiland>
  findOneCompilandForSourceFile(llvm::StringRef Pattern,
                                PDB_NameSearchFlags Flags) const override;
  std::unique_ptr<IPDBEnumSourceFiles> getAllSourceFiles() const override;
  std::unique_ptr<IPDBEnumSourceFiles> getSourceFilesForCompiland(
      const PDBSymbolCompiland &Compiland) const override;
  std::unique_ptr<IPDBSourceFile>
  getSourceFileById(uint32_t FileId) const override;

  std::unique_ptr<IPDBEnumDataStreams> getDebugStreams() const override;

  std::unique_ptr<IPDBEnumTables> getEnumTables() const override;

  std::unique_ptr<IPDBEnumInjectedSources> getInjectedSources() const override;

  std::unique_ptr<IPDBEnumSectionContribs> getSectionContribs() const override;

  std::unique_ptr<IPDBEnumFrameData> getFrameData() const override;
private:
  CComPtr<IDiaSession> Session;
};
} // namespace pdb
} // namespace llvm
#endif
PKhwFZ󣗰c,c,DebugInfo/PDB/IPDBRawSymbol.hnu�[���//===- IPDBRawSymbol.h - base interface for PDB symbol types ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_IPDBRAWSYMBOL_H
#define LLVM_DEBUGINFO_PDB_IPDBRAWSYMBOL_H

#include "PDBTypes.h"
#include "llvm/ADT/BitmaskEnum.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/DebugInfo/CodeView/CodeView.h"
#include <memory>

namespace llvm {
class raw_ostream;
class StringRef;

namespace pdb {

enum class PdbSymbolIdField : uint32_t {
  None = 0,
  SymIndexId = 1 << 0,
  LexicalParent = 1 << 1,
  ClassParent = 1 << 2,
  Type = 1 << 3,
  UnmodifiedType = 1 << 4,
  All = 0xFFFFFFFF,
  LLVM_MARK_AS_BITMASK_ENUM(/* LargestValue = */ All)
};

void dumpSymbolIdField(raw_ostream &OS, StringRef Name, SymIndexId Value,
                       int Indent, const IPDBSession &Session,
                       PdbSymbolIdField FieldId, PdbSymbolIdField ShowFlags,
                       PdbSymbolIdField RecurseFlags);

/// IPDBRawSymbol defines an interface used to represent an arbitrary symbol.
/// It exposes a monolithic interface consisting of accessors for the union of
/// all properties that are valid for any symbol type.  This interface is then
/// wrapped by a concrete class which exposes only those set of methods valid
/// for this particular symbol type.  See PDBSymbol.h for more details.
class IPDBRawSymbol {
public:
  virtual ~IPDBRawSymbol();

  virtual void dump(raw_ostream &OS, int Indent, PdbSymbolIdField ShowIdFields,
                    PdbSymbolIdField RecurseIdFields) const = 0;

  virtual std::unique_ptr<IPDBEnumSymbols>
  findChildren(PDB_SymType Type) const = 0;

  virtual std::unique_ptr<IPDBEnumSymbols>
  findChildren(PDB_SymType Type, StringRef Name,
               PDB_NameSearchFlags Flags) const = 0;
  virtual std::unique_ptr<IPDBEnumSymbols>
  findChildrenByAddr(PDB_SymType Type, StringRef Name,
                     PDB_NameSearchFlags Flags,
                     uint32_t Section, uint32_t Offset) const = 0;
  virtual std::unique_ptr<IPDBEnumSymbols>
  findChildrenByVA(PDB_SymType Type, StringRef Name, PDB_NameSearchFlags Flags,
                   uint64_t VA) const = 0;
  virtual std::unique_ptr<IPDBEnumSymbols>
  findChildrenByRVA(PDB_SymType Type, StringRef Name, PDB_NameSearchFlags Flags,
                    uint32_t RVA) const = 0;

  virtual std::unique_ptr<IPDBEnumSymbols>
  findInlineFramesByAddr(uint32_t Section, uint32_t Offset) const = 0;
  virtual std::unique_ptr<IPDBEnumSymbols>
  findInlineFramesByRVA(uint32_t RVA) const = 0;
  virtual std::unique_ptr<IPDBEnumSymbols>
  findInlineFramesByVA(uint64_t VA) const = 0;

  virtual std::unique_ptr<IPDBEnumLineNumbers> findInlineeLines() const = 0;
  virtual std::unique_ptr<IPDBEnumLineNumbers>
  findInlineeLinesByAddr(uint32_t Section, uint32_t Offset,
                         uint32_t Length) const = 0;
  virtual std::unique_ptr<IPDBEnumLineNumbers>
  findInlineeLinesByRVA(uint32_t RVA, uint32_t Length) const = 0;
  virtual std::unique_ptr<IPDBEnumLineNumbers>
  findInlineeLinesByVA(uint64_t VA, uint32_t Length) const = 0;

  virtual void getDataBytes(llvm::SmallVector<uint8_t, 32> &bytes) const = 0;
  virtual void getBackEndVersion(VersionInfo &Version) const = 0;
  virtual PDB_MemberAccess getAccess() const = 0;
  virtual uint32_t getAddressOffset() const = 0;
  virtual uint32_t getAddressSection() const = 0;
  virtual uint32_t getAge() const = 0;
  virtual SymIndexId getArrayIndexTypeId() const = 0;
  virtual uint32_t getBaseDataOffset() const = 0;
  virtual uint32_t getBaseDataSlot() const = 0;
  virtual SymIndexId getBaseSymbolId() const = 0;
  virtual PDB_BuiltinType getBuiltinType() const = 0;
  virtual uint32_t getBitPosition() const = 0;
  virtual PDB_CallingConv getCallingConvention() const = 0;
  virtual SymIndexId getClassParentId() const = 0;
  virtual std::string getCompilerName() const = 0;
  virtual uint32_t getCount() const = 0;
  virtual uint32_t getCountLiveRanges() const = 0;
  virtual void getFrontEndVersion(VersionInfo &Version) const = 0;
  virtual PDB_Lang getLanguage() const = 0;
  virtual SymIndexId getLexicalParentId() const = 0;
  virtual std::string getLibraryName() const = 0;
  virtual uint32_t getLiveRangeStartAddressOffset() const = 0;
  virtual uint32_t getLiveRangeStartAddressSection() const = 0;
  virtual uint32_t getLiveRangeStartRelativeVirtualAddress() const = 0;
  virtual codeview::RegisterId getLocalBasePointerRegisterId() const = 0;
  virtual SymIndexId getLowerBoundId() const = 0;
  virtual uint32_t getMemorySpaceKind() const = 0;
  virtual std::string getName() const = 0;
  virtual uint32_t getNumberOfAcceleratorPointerTags() const = 0;
  virtual uint32_t getNumberOfColumns() const = 0;
  virtual uint32_t getNumberOfModifiers() const = 0;
  virtual uint32_t getNumberOfRegisterIndices() const = 0;
  virtual uint32_t getNumberOfRows() const = 0;
  virtual std::string getObjectFileName() const = 0;
  virtual uint32_t getOemId() const = 0;
  virtual SymIndexId getOemSymbolId() const = 0;
  virtual uint32_t getOffsetInUdt() const = 0;
  virtual PDB_Cpu getPlatform() const = 0;
  virtual uint32_t getRank() const = 0;
  virtual codeview::RegisterId getRegisterId() const = 0;
  virtual uint32_t getRegisterType() const = 0;
  virtual uint32_t getRelativeVirtualAddress() const = 0;
  virtual uint32_t getSamplerSlot() const = 0;
  virtual uint32_t getSignature() const = 0;
  virtual uint32_t getSizeInUdt() const = 0;
  virtual uint32_t getSlot() const = 0;
  virtual std::string getSourceFileName() const = 0;
  virtual std::unique_ptr<IPDBLineNumber>
  getSrcLineOnTypeDefn() const = 0;
  virtual uint32_t getStride() const = 0;
  virtual SymIndexId getSubTypeId() const = 0;
  virtual std::string getSymbolsFileName() const = 0;
  virtual SymIndexId getSymIndexId() const = 0;
  virtual uint32_t getTargetOffset() const = 0;
  virtual uint32_t getTargetRelativeVirtualAddress() const = 0;
  virtual uint64_t getTargetVirtualAddress() const = 0;
  virtual uint32_t getTargetSection() const = 0;
  virtual uint32_t getTextureSlot() const = 0;
  virtual uint32_t getTimeStamp() const = 0;
  virtual uint32_t getToken() const = 0;
  virtual SymIndexId getTypeId() const = 0;
  virtual uint32_t getUavSlot() const = 0;
  virtual std::string getUndecoratedName() const = 0;
  virtual std::string getUndecoratedNameEx(PDB_UndnameFlags Flags) const = 0;
  virtual SymIndexId getUnmodifiedTypeId() const = 0;
  virtual SymIndexId getUpperBoundId() const = 0;
  virtual Variant getValue() const = 0;
  virtual uint32_t getVirtualBaseDispIndex() const = 0;
  virtual uint32_t getVirtualBaseOffset() const = 0;
  virtual std::unique_ptr<PDBSymbolTypeBuiltin>
  getVirtualBaseTableType() const = 0;
  virtual SymIndexId getVirtualTableShapeId() const = 0;
  virtual PDB_DataKind getDataKind() const = 0;
  virtual PDB_SymType getSymTag() const = 0;
  virtual codeview::GUID getGuid() const = 0;
  virtual int32_t getOffset() const = 0;
  virtual int32_t getThisAdjust() const = 0;
  virtual int32_t getVirtualBasePointerOffset() const = 0;
  virtual PDB_LocType getLocationType() const = 0;
  virtual PDB_Machine getMachineType() const = 0;
  virtual codeview::ThunkOrdinal getThunkOrdinal() const = 0;
  virtual uint64_t getLength() const = 0;
  virtual uint64_t getLiveRangeLength() const = 0;
  virtual uint64_t getVirtualAddress() const = 0;
  virtual PDB_UdtType getUdtKind() const = 0;
  virtual bool hasConstructor() const = 0;
  virtual bool hasCustomCallingConvention() const = 0;
  virtual bool hasFarReturn() const = 0;
  virtual bool isCode() const = 0;
  virtual bool isCompilerGenerated() const = 0;
  virtual bool isConstType() const = 0;
  virtual bool isEditAndContinueEnabled() const = 0;
  virtual bool isFunction() const = 0;
  virtual bool getAddressTaken() const = 0;
  virtual bool getNoStackOrdering() const = 0;
  virtual bool hasAlloca() const = 0;
  virtual bool hasAssignmentOperator() const = 0;
  virtual bool hasCTypes() const = 0;
  virtual bool hasCastOperator() const = 0;
  virtual bool hasDebugInfo() const = 0;
  virtual bool hasEH() const = 0;
  virtual bool hasEHa() const = 0;
  virtual bool hasFramePointer() const = 0;
  virtual bool hasInlAsm() const = 0;
  virtual bool hasInlineAttribute() const = 0;
  virtual bool hasInterruptReturn() const = 0;
  virtual bool hasLongJump() const = 0;
  virtual bool hasManagedCode() const = 0;
  virtual bool hasNestedTypes() const = 0;
  virtual bool hasNoInlineAttribute() const = 0;
  virtual bool hasNoReturnAttribute() const = 0;
  virtual bool hasOptimizedCodeDebugInfo() const = 0;
  virtual bool hasOverloadedOperator() const = 0;
  virtual bool hasSEH() const = 0;
  virtual bool hasSecurityChecks() const = 0;
  virtual bool hasSetJump() const = 0;
  virtual bool hasStrictGSCheck() const = 0;
  virtual bool isAcceleratorGroupSharedLocal() const = 0;
  virtual bool isAcceleratorPointerTagLiveRange() const = 0;
  virtual bool isAcceleratorStubFunction() const = 0;
  virtual bool isAggregated() const = 0;
  virtual bool isIntroVirtualFunction() const = 0;
  virtual bool isCVTCIL() const = 0;
  virtual bool isConstructorVirtualBase() const = 0;
  virtual bool isCxxReturnUdt() const = 0;
  virtual bool isDataAligned() const = 0;
  virtual bool isHLSLData() const = 0;
  virtual bool isHotpatchable() const = 0;
  virtual bool isIndirectVirtualBaseClass() const = 0;
  virtual bool isInterfaceUdt() const = 0;
  virtual bool isIntrinsic() const = 0;
  virtual bool isLTCG() const = 0;
  virtual bool isLocationControlFlowDependent() const = 0;
  virtual bool isMSILNetmodule() const = 0;
  virtual bool isMatrixRowMajor() const = 0;
  virtual bool isManagedCode() const = 0;
  virtual bool isMSILCode() const = 0;
  virtual bool isMultipleInheritance() const = 0;
  virtual bool isNaked() const = 0;
  virtual bool isNested() const = 0;
  virtual bool isOptimizedAway() const = 0;
  virtual bool isPacked() const = 0;
  virtual bool isPointerBasedOnSymbolValue() const = 0;
  virtual bool isPointerToDataMember() const = 0;
  virtual bool isPointerToMemberFunction() const = 0;
  virtual bool isPureVirtual() const = 0;
  virtual bool isRValueReference() const = 0;
  virtual bool isRefUdt() const = 0;
  virtual bool isReference() const = 0;
  virtual bool isRestrictedType() const = 0;
  virtual bool isReturnValue() const = 0;
  virtual bool isSafeBuffers() const = 0;
  virtual bool isScoped() const = 0;
  virtual bool isSdl() const = 0;
  virtual bool isSingleInheritance() const = 0;
  virtual bool isSplitted() const = 0;
  virtual bool isStatic() const = 0;
  virtual bool hasPrivateSymbols() const = 0;
  virtual bool isUnalignedType() const = 0;
  virtual bool isUnreached() const = 0;
  virtual bool isValueUdt() const = 0;
  virtual bool isVirtual() const = 0;
  virtual bool isVirtualBaseClass() const = 0;
  virtual bool isVirtualInheritance() const = 0;
  virtual bool isVolatileType() const = 0;
  virtual bool wasInlined() const = 0;
  virtual std::string getUnused() const = 0;
};

LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE();

} // namespace pdb
} // namespace llvm

#endif
PKhwFZ+�L�88)DebugInfo/PDB/PDBSymbolCompilandDetails.hnu�[���//===- PDBSymbolCompilandDetails.h - PDB compiland details ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLCOMPILANDDETAILS_H
#define LLVM_DEBUGINFO_PDB_PDBSYMBOLCOMPILANDDETAILS_H

#include "PDBSymbol.h"
#include "PDBTypes.h"

namespace llvm {

namespace pdb {

class PDBSymbolCompilandDetails : public PDBSymbol {
  DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::CompilandDetails)
public:
  void dump(PDBSymDumper &Dumper) const override;

  void getFrontEndVersion(VersionInfo &Version) const {
    RawSymbol->getFrontEndVersion(Version);
  }

  void getBackEndVersion(VersionInfo &Version) const {
    RawSymbol->getBackEndVersion(Version);
  }

  FORWARD_SYMBOL_METHOD(getCompilerName)
  FORWARD_SYMBOL_METHOD(isEditAndContinueEnabled)
  FORWARD_SYMBOL_METHOD(hasDebugInfo)
  FORWARD_SYMBOL_METHOD(hasManagedCode)
  FORWARD_SYMBOL_METHOD(hasSecurityChecks)
  FORWARD_SYMBOL_METHOD(isCVTCIL)
  FORWARD_SYMBOL_METHOD(isDataAligned)
  FORWARD_SYMBOL_METHOD(isHotpatchable)
  FORWARD_SYMBOL_METHOD(isLTCG)
  FORWARD_SYMBOL_METHOD(isMSILNetmodule)
  FORWARD_SYMBOL_METHOD(getLanguage)
  FORWARD_SYMBOL_ID_METHOD(getLexicalParent)
  FORWARD_SYMBOL_METHOD(getPlatform)
  FORWARD_SYMBOL_METHOD(getSourceFileName)
};

} // namespace llvm
}

#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLCOMPILANDDETAILS_H
PKhwFZ[�'$DebugInfo/PDB/PDBSymbolTypeBuiltin.hnu�[���//===- PDBSymbolTypeBuiltin.h - builtin type information --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEBUILTIN_H
#define LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEBUILTIN_H

#include "PDBSymbol.h"
#include "PDBTypes.h"

namespace llvm {

namespace pdb {

class PDBSymbolTypeBuiltin : public PDBSymbol {
  DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::BuiltinType)
public:
  void dump(PDBSymDumper &Dumper) const override;

  FORWARD_SYMBOL_METHOD(getBuiltinType)
  FORWARD_SYMBOL_METHOD(isConstType)
  FORWARD_SYMBOL_METHOD(getLength)
  FORWARD_SYMBOL_ID_METHOD(getLexicalParent)
  FORWARD_SYMBOL_METHOD(isUnalignedType)
  FORWARD_SYMBOL_METHOD(isVolatileType)
};

} // namespace pdb
} // namespace llvm

#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEBUILTIN_H
PKhwFZL���<<#DebugInfo/PDB/PDBSymbolTypeVTable.hnu�[���//===- PDBSymbolTypeVTable.h - VTable type info -----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEVTABLE_H
#define LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEVTABLE_H

#include "PDBSymbol.h"
#include "PDBTypes.h"

namespace llvm {

namespace pdb {

class PDBSymbolTypeVTable : public PDBSymbol {
  DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::VTable)
public:
  void dump(PDBSymDumper &Dumper) const override;

  FORWARD_SYMBOL_ID_METHOD(getClassParent)
  FORWARD_SYMBOL_METHOD(getOffset)
  FORWARD_SYMBOL_METHOD(isConstType)
  FORWARD_SYMBOL_ID_METHOD(getLexicalParent)
  FORWARD_SYMBOL_ID_METHOD(getType)
  FORWARD_SYMBOL_METHOD(isUnalignedType)
  FORWARD_SYMBOL_METHOD(isVolatileType)
};

} // namespace pdb
} // namespace llvm

#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEVTABLE_H
PKhwFZA�F
��!DebugInfo/PDB/PDBSymbolTypeEnum.hnu�[���//===- PDBSymbolTypeEnum.h - enum type info ---------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEENUM_H
#define LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEENUM_H

#include "PDBSymbol.h"
#include "PDBTypes.h"

#include "llvm/DebugInfo/PDB/IPDBRawSymbol.h"

namespace llvm {

namespace pdb {

class PDBSymDumper;
class PDBSymbolTypeBuiltin;

class PDBSymbolTypeEnum : public PDBSymbol {
  DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::Enum)
public:
  void dump(PDBSymDumper &Dumper) const override;

  FORWARD_SYMBOL_METHOD(getBuiltinType)
  FORWARD_SYMBOL_ID_METHOD(getClassParent)
  FORWARD_SYMBOL_METHOD(hasConstructor)
  FORWARD_SYMBOL_METHOD(isConstType)
  FORWARD_SYMBOL_METHOD(hasAssignmentOperator)
  FORWARD_SYMBOL_METHOD(hasCastOperator)
  FORWARD_SYMBOL_METHOD(hasNestedTypes)
  FORWARD_SYMBOL_METHOD(getLength)
  FORWARD_SYMBOL_ID_METHOD(getLexicalParent)
  FORWARD_SYMBOL_ID_METHOD(getUnmodifiedType)
  FORWARD_SYMBOL_METHOD(getName)
  FORWARD_SYMBOL_METHOD(getSrcLineOnTypeDefn)
  FORWARD_SYMBOL_METHOD(isNested)
  FORWARD_SYMBOL_METHOD(hasOverloadedOperator)
  FORWARD_SYMBOL_METHOD(isPacked)
  FORWARD_SYMBOL_METHOD(isScoped)
  FORWARD_CONCRETE_SYMBOL_ID_METHOD_WITH_NAME(PDBSymbolTypeBuiltin, getType,
                                              getUnderlyingType)
  FORWARD_SYMBOL_METHOD(isUnalignedType)
  FORWARD_SYMBOL_METHOD(isVolatileType)
};

} // namespace pdb
} // namespace llvm

#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEENUM_H
PKhwFZx*�#DebugInfo/PDB/PDBSymbolAnnotation.hnu�[���//===- PDBSymbolAnnotation.h - Accessors for querying PDB annotations ---*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLANNOTATION_H
#define LLVM_DEBUGINFO_PDB_PDBSYMBOLANNOTATION_H

#include "PDBSymbol.h"
#include "PDBTypes.h"

namespace llvm {

namespace pdb {

class PDBSymbolAnnotation : public PDBSymbol {
  DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::Annotation)

public:
  void dump(PDBSymDumper &Dumper) const override;

  FORWARD_SYMBOL_METHOD(getAddressOffset)
  FORWARD_SYMBOL_METHOD(getAddressSection)
  FORWARD_SYMBOL_METHOD(getDataKind)
  FORWARD_SYMBOL_METHOD(getRelativeVirtualAddress)
  // FORWARD_SYMBOL_METHOD(getValue)
  FORWARD_SYMBOL_METHOD(getVirtualAddress)
};
}
}

#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLANNOTATION_H
PKhwFZ,K�)��#DebugInfo/PDB/PDBSymbolTypeFriend.hnu�[���//===- PDBSymbolTypeFriend.h - friend type info -----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEFRIEND_H
#define LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEFRIEND_H

#include "PDBSymbol.h"
#include "PDBTypes.h"

namespace llvm {

namespace pdb {

class PDBSymbolTypeFriend : public PDBSymbol {
  DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::Friend)
public:
  void dump(PDBSymDumper &Dumper) const override;

  FORWARD_SYMBOL_ID_METHOD(getClassParent)
  FORWARD_SYMBOL_METHOD(getName)
  FORWARD_SYMBOL_ID_METHOD(getType)
};

} // namespace pdb
} // namespace llvm

#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEFRIEND_H
PKhwFZ��L		DebugInfo/PDB/IPDBFrameData.hnu�[���//===- IPDBFrameData.h - base interface for frame data ----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_IPDBFRAMEDATA_H
#define LLVM_DEBUGINFO_PDB_IPDBFRAMEDATA_H

#include <cstdint>
#include <string>

namespace llvm {
namespace pdb {

/// IPDBFrameData defines an interface used to represent a frame data of some
/// code block.
class IPDBFrameData {
public:
  virtual ~IPDBFrameData();

  virtual uint32_t getAddressOffset() const = 0;
  virtual uint32_t getAddressSection() const = 0;
  virtual uint32_t getLengthBlock() const = 0;
  virtual std::string getProgram() const = 0;
  virtual uint32_t getRelativeVirtualAddress() const = 0;
  virtual uint64_t getVirtualAddress() const = 0;
};

} // namespace pdb
} // namespace llvm

#endif
PKhwFZ�a	��%DebugInfo/PDB/PDBSymbolFuncDebugEnd.hnu�[���//===- PDBSymbolFuncDebugEnd.h - function end bounds info -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLFUNCDEBUGEND_H
#define LLVM_DEBUGINFO_PDB_PDBSYMBOLFUNCDEBUGEND_H

#include "PDBSymbol.h"
#include "PDBTypes.h"

namespace llvm {

namespace pdb {

class PDBSymbolFuncDebugEnd : public PDBSymbol {
  DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::FuncDebugEnd)
public:
  void dump(PDBSymDumper &Dumper) const override;

  FORWARD_SYMBOL_METHOD(getAddressOffset)
  FORWARD_SYMBOL_METHOD(getAddressSection)
  FORWARD_SYMBOL_METHOD(hasCustomCallingConvention)
  FORWARD_SYMBOL_METHOD(hasFarReturn)
  FORWARD_SYMBOL_METHOD(hasInterruptReturn)
  FORWARD_SYMBOL_METHOD(isStatic)
  FORWARD_SYMBOL_ID_METHOD(getLexicalParent)
  FORWARD_SYMBOL_METHOD(getLocationType)
  FORWARD_SYMBOL_METHOD(hasNoInlineAttribute)
  FORWARD_SYMBOL_METHOD(hasNoReturnAttribute)
  FORWARD_SYMBOL_METHOD(isUnreached)
  FORWARD_SYMBOL_METHOD(getOffset)
  FORWARD_SYMBOL_METHOD(hasOptimizedCodeDebugInfo)
  FORWARD_SYMBOL_METHOD(getRelativeVirtualAddress)
  FORWARD_SYMBOL_METHOD(getVirtualAddress)
};

} // namespace pdb
} // namespace llvm

#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLFUNCDEBUGEND_H
PKhwFZZ\%rr$DebugInfo/PDB/PDBSymbolTypePointer.hnu�[���//===- PDBSymbolTypePointer.h - pointer type info ---------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEPOINTER_H
#define LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEPOINTER_H

#include "PDBSymbol.h"
#include "PDBTypes.h"

namespace llvm {

namespace pdb {

class PDBSymbolTypePointer : public PDBSymbol {
  DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::PointerType)
public:
  void dump(PDBSymDumper &Dumper) const override;
  void dumpRight(PDBSymDumper &Dumper) const override;

  FORWARD_SYMBOL_METHOD(isConstType)
  FORWARD_SYMBOL_ID_METHOD(getClassParent)
  FORWARD_SYMBOL_METHOD(getLength)
  FORWARD_SYMBOL_ID_METHOD(getLexicalParent)
  FORWARD_SYMBOL_METHOD(isReference)
  FORWARD_SYMBOL_METHOD(isRValueReference)
  FORWARD_SYMBOL_METHOD(isPointerToDataMember)
  FORWARD_SYMBOL_METHOD(isPointerToMemberFunction)
  FORWARD_SYMBOL_ID_METHOD_WITH_NAME(getType, getPointeeType)
  FORWARD_SYMBOL_METHOD(isRestrictedType)
  FORWARD_SYMBOL_METHOD(isUnalignedType)
  FORWARD_SYMBOL_METHOD(isVolatileType)
};

} // namespace pdb
} // namespace llvm

#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEPOINTER_H
PKhwFZ�����&DebugInfo/PDB/PDBSymbolTypeDimension.hnu�[���//===- PDBSymbolTypeDimension.h - array dimension type info -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEDIMENSION_H
#define LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEDIMENSION_H

#include "PDBSymbol.h"
#include "PDBTypes.h"

namespace llvm {

namespace pdb {

class PDBSymbolTypeDimension : public PDBSymbol {
  DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::Dimension)
public:
  void dump(PDBSymDumper &Dumper) const override;

  FORWARD_SYMBOL_METHOD(getLowerBoundId)
  FORWARD_SYMBOL_METHOD(getUpperBoundId)
};

} // namespace pdb
} // namespace llvm

#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEDIMENSION_H
PKhwFZ�7|]��(DebugInfo/PDB/PDBSymbolTypeFunctionArg.hnu�[���//===- PDBSymbolTypeFunctionArg.h - function arg type info ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEFUNCTIONARG_H
#define LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEFUNCTIONARG_H

#include "PDBSymbol.h"
#include "PDBTypes.h"

namespace llvm {

namespace pdb {

class PDBSymbolTypeFunctionArg : public PDBSymbol {
  DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::FunctionArg)
public:
  void dump(PDBSymDumper &Dumper) const override;

  FORWARD_SYMBOL_ID_METHOD(getClassParent)
  FORWARD_SYMBOL_ID_METHOD(getLexicalParent)
  FORWARD_SYMBOL_ID_METHOD(getType)
};

} // namespace pdb
} // namespace llvm

#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEFUNCTIONARG_H
PKhwFZ}�@ZZ DebugInfo/PDB/PDBSymbolUnknown.hnu�[���//===- PDBSymbolUnknown.h - unknown symbol type -----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLUNKNOWN_H
#define LLVM_DEBUGINFO_PDB_PDBSYMBOLUNKNOWN_H

#include "PDBSymbol.h"

namespace llvm {

namespace pdb {

class PDBSymbolUnknown : public PDBSymbol {
  DECLARE_PDB_SYMBOL_CUSTOM_TYPE(S->getSymTag() == PDB_SymType::None ||
                                 S->getSymTag() >= PDB_SymType::Max)

public:
  void dump(PDBSymDumper &Dumper) const override;
};

} // namespace pdb
} // namespace llvm

#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLUNKNOWN_H
PKhwFZ�jP~mm DebugInfo/PDB/PDBSymbolTypeUDT.hnu�[���//===- PDBSymbolTypeUDT.h - UDT type info -----------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEUDT_H
#define LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEUDT_H

#include "llvm/DebugInfo/PDB/IPDBRawSymbol.h"

#include "PDBSymbol.h"
#include "PDBTypes.h"

namespace llvm {

namespace pdb {

class PDBSymDumper;

class PDBSymbolTypeUDT : public PDBSymbol {
  DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::UDT)
public:
  void dump(PDBSymDumper &Dumper) const override;

  FORWARD_SYMBOL_ID_METHOD(getClassParent)
  FORWARD_SYMBOL_ID_METHOD(getUnmodifiedType)
  FORWARD_SYMBOL_METHOD(hasConstructor)
  FORWARD_SYMBOL_METHOD(isConstType)
  FORWARD_SYMBOL_METHOD(hasAssignmentOperator)
  FORWARD_SYMBOL_METHOD(hasCastOperator)
  FORWARD_SYMBOL_METHOD(hasNestedTypes)
  FORWARD_SYMBOL_METHOD(getLength)
  FORWARD_SYMBOL_ID_METHOD(getLexicalParent)
  FORWARD_SYMBOL_METHOD(getName)
  FORWARD_SYMBOL_METHOD(getSrcLineOnTypeDefn)
  FORWARD_SYMBOL_METHOD(isNested)
  FORWARD_SYMBOL_METHOD(hasOverloadedOperator)
  FORWARD_SYMBOL_METHOD(isPacked)
  FORWARD_SYMBOL_METHOD(isScoped)
  FORWARD_SYMBOL_METHOD(getUdtKind)
  FORWARD_SYMBOL_METHOD(isUnalignedType)
  FORWARD_SYMBOL_ID_METHOD(getVirtualTableShape)
  FORWARD_SYMBOL_METHOD(isVolatileType)
  FORWARD_SYMBOL_METHOD(getAccess)
};
}
} // namespace llvm

#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEUDT_H
PKhwFZ���&DebugInfo/PDB/PDBSymbolTypeBaseClass.hnu�[���//===- PDBSymbolTypeBaseClass.h - base class type information ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEBASECLASS_H
#define LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEBASECLASS_H

#include "PDBSymbol.h"
#include "PDBTypes.h"

#include "llvm/DebugInfo/PDB/IPDBRawSymbol.h"

namespace llvm {

namespace pdb {

class PDBSymDumper;

class PDBSymbolTypeBaseClass : public PDBSymbol {
  DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::BaseClass)
public:
  void dump(PDBSymDumper &Dumper) const override;

  FORWARD_SYMBOL_METHOD(getAccess)
  FORWARD_SYMBOL_ID_METHOD(getClassParent)
  FORWARD_SYMBOL_METHOD(hasConstructor)
  FORWARD_SYMBOL_METHOD(isConstType)
  FORWARD_SYMBOL_METHOD(hasAssignmentOperator)
  FORWARD_SYMBOL_METHOD(hasCastOperator)
  FORWARD_SYMBOL_METHOD(hasNestedTypes)
  FORWARD_SYMBOL_METHOD(isIndirectVirtualBaseClass)
  FORWARD_SYMBOL_METHOD(getLength)
  FORWARD_SYMBOL_ID_METHOD(getLexicalParent)
  FORWARD_SYMBOL_METHOD(getName)
  FORWARD_SYMBOL_METHOD(isNested)
  FORWARD_SYMBOL_METHOD(getOffset)
  FORWARD_SYMBOL_METHOD(hasOverloadedOperator)
  FORWARD_SYMBOL_METHOD(isPacked)
  FORWARD_SYMBOL_METHOD(isScoped)
  FORWARD_SYMBOL_ID_METHOD(getType)
  FORWARD_SYMBOL_METHOD(getUdtKind)
  FORWARD_SYMBOL_METHOD(isUnalignedType)

  FORWARD_SYMBOL_METHOD(isVirtualBaseClass)
  FORWARD_SYMBOL_METHOD(getVirtualBaseDispIndex)
  FORWARD_SYMBOL_METHOD(getVirtualBasePointerOffset)
  // FORWARD_SYMBOL_METHOD(getVirtualBaseTableType)
  FORWARD_SYMBOL_ID_METHOD(getVirtualTableShape)
  FORWARD_SYMBOL_METHOD(isVolatileType)
};

} // namespace pdb
} // namespace llvm

#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEBASECLASS_H
PKhwFZ:�,���DebugInfo/PDB/IPDBTable.hnu�[���//===- IPDBTable.h - Base Interface for a PDB Symbol Context ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_IPDBTABLE_H
#define LLVM_DEBUGINFO_PDB_IPDBTABLE_H

#include "PDBTypes.h"

namespace llvm {
namespace pdb {
class IPDBTable {
public:
  virtual ~IPDBTable();

  virtual std::string getName() const = 0;
  virtual uint32_t getItemCount() const = 0;
  virtual PDB_TableType getTableType() const = 0;
};
}
}

#endif // LLVM_DEBUGINFO_PDB_IPDBTABLE_H
PKhwFZi��q��DebugInfo/PDB/PDBSymbolExe.hnu�[���//===- PDBSymbolExe.h - Accessors for querying executables in a PDB ----*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLEXE_H
#define LLVM_DEBUGINFO_PDB_PDBSYMBOLEXE_H

#include "PDBSymbol.h"
#include "PDBTypes.h"

namespace llvm {

class raw_ostream;

namespace pdb {

class PDBSymbolExe : public PDBSymbol {
  DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::Exe)
public:
  void dump(PDBSymDumper &Dumper) const override;

  FORWARD_SYMBOL_METHOD(getAge)
  FORWARD_SYMBOL_METHOD(getGuid)
  FORWARD_SYMBOL_METHOD(hasCTypes)
  FORWARD_SYMBOL_METHOD(hasPrivateSymbols)
  FORWARD_SYMBOL_METHOD(getMachineType)
  FORWARD_SYMBOL_METHOD(getName)
  FORWARD_SYMBOL_METHOD(getSignature)
  FORWARD_SYMBOL_METHOD(getSymbolsFileName)

  uint32_t getPointerByteSize() const;

private:
  void dumpChildren(raw_ostream &OS, StringRef Label, PDB_SymType ChildType,
                    int Indent) const;
};
} // namespace pdb
} // namespace llvm

#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLEXE_H
PKhwFZ��4I::DebugInfo/PDB/PDBSymbolBlock.hnu�[���//===- PDBSymbolBlock.h - Accessors for querying PDB blocks -------------*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLBLOCK_H
#define LLVM_DEBUGINFO_PDB_PDBSYMBOLBLOCK_H

#include "PDBSymbol.h"
#include "PDBTypes.h"

namespace llvm {

namespace pdb {

class PDBSymbolBlock : public PDBSymbol {
  DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::Block)
public:
  void dump(PDBSymDumper &Dumper) const override;

  FORWARD_SYMBOL_METHOD(getAddressOffset)
  FORWARD_SYMBOL_METHOD(getAddressSection)
  FORWARD_SYMBOL_METHOD(getLength)
  FORWARD_SYMBOL_ID_METHOD(getLexicalParent)
  FORWARD_SYMBOL_METHOD(getLocationType)
  FORWARD_SYMBOL_METHOD(getName)
  FORWARD_SYMBOL_METHOD(getRelativeVirtualAddress)
  FORWARD_SYMBOL_METHOD(getVirtualAddress)
};
}
}

#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLBLOCK_H
PKhwFZ�����DebugInfo/PDB/PDBSymbolLabel.hnu�[���//===- PDBSymbolLabel.h - label info ----------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLLABEL_H
#define LLVM_DEBUGINFO_PDB_PDBSYMBOLLABEL_H

#include "PDBSymbol.h"
#include "PDBTypes.h"

namespace llvm {

namespace pdb {

class PDBSymbolLabel : public PDBSymbol {
  DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::Label)
public:
  void dump(PDBSymDumper &Dumper) const override;

  FORWARD_SYMBOL_METHOD(getAddressOffset)
  FORWARD_SYMBOL_METHOD(getAddressSection)
  FORWARD_SYMBOL_METHOD(hasCustomCallingConvention)
  FORWARD_SYMBOL_METHOD(hasFarReturn)
  FORWARD_SYMBOL_METHOD(hasInterruptReturn)
  FORWARD_SYMBOL_ID_METHOD(getLexicalParent)
  FORWARD_SYMBOL_METHOD(getLocationType)
  FORWARD_SYMBOL_METHOD(getName)
  FORWARD_SYMBOL_METHOD(hasNoInlineAttribute)
  FORWARD_SYMBOL_METHOD(hasNoReturnAttribute)
  FORWARD_SYMBOL_METHOD(isUnreached)
  FORWARD_SYMBOL_METHOD(getOffset)
  FORWARD_SYMBOL_METHOD(hasOptimizedCodeDebugInfo)
  FORWARD_SYMBOL_METHOD(getRelativeVirtualAddress)
  FORWARD_SYMBOL_METHOD(getVirtualAddress)
};

} // namespace pdb
} // namespace llvm

#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLLABEL_H
PKhwFZ���Y��"DebugInfo/PDB/PDBSymbolTypeArray.hnu�[���//===- PDBSymbolTypeArray.h - array type information ------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEARRAY_H
#define LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEARRAY_H

#include "PDBSymbol.h"
#include "PDBTypes.h"

namespace llvm {

namespace pdb {

class PDBSymbolTypeArray : public PDBSymbol {
  DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::ArrayType)
public:
  void dump(PDBSymDumper &Dumper) const override;
  void dumpRight(PDBSymDumper &Dumper) const override;

  FORWARD_SYMBOL_ID_METHOD(getArrayIndexType)
  FORWARD_SYMBOL_METHOD(isConstType)
  FORWARD_SYMBOL_METHOD(getCount)
  FORWARD_SYMBOL_METHOD(getLength)
  FORWARD_SYMBOL_ID_METHOD(getLexicalParent)
  FORWARD_SYMBOL_METHOD(getRank)
  FORWARD_SYMBOL_ID_METHOD_WITH_NAME(getType, getElementType)
  FORWARD_SYMBOL_METHOD(isUnalignedType)
  FORWARD_SYMBOL_METHOD(isVolatileType)
};

} // namespace pdb
} // namespace llvm

#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEARRAY_H
PKhwFZ��b���DebugInfo/PDB/PDBSymbol.hnu�[���//===- PDBSymbol.h - base class for user-facing symbol types -----*- C++-*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOL_H
#define LLVM_DEBUGINFO_PDB_PDBSYMBOL_H

#include "IPDBRawSymbol.h"
#include "PDBExtras.h"
#include "PDBTypes.h"
#include "llvm/Support/Casting.h"

#define FORWARD_SYMBOL_METHOD(MethodName)                                      \
  decltype(auto) MethodName() const { return RawSymbol->MethodName(); }

#define FORWARD_CONCRETE_SYMBOL_ID_METHOD_WITH_NAME(ConcreteType, PrivateName, \
                                                    PublicName)                \
  decltype(auto) PublicName##Id() const {                                      \
    return RawSymbol->PrivateName##Id();                                       \
  }                                                                            \
  std::unique_ptr<ConcreteType> PublicName() const {                           \
    uint32_t Id = PublicName##Id();                                            \
    return getConcreteSymbolByIdHelper<ConcreteType>(Id);                      \
  }

#define FORWARD_SYMBOL_ID_METHOD_WITH_NAME(PrivateName, PublicName)            \
  FORWARD_CONCRETE_SYMBOL_ID_METHOD_WITH_NAME(PDBSymbol, PrivateName,          \
                                              PublicName)

#define FORWARD_SYMBOL_ID_METHOD(MethodName)                                   \
  FORWARD_SYMBOL_ID_METHOD_WITH_NAME(MethodName, MethodName)

namespace llvm {

class StringRef;
class raw_ostream;

namespace pdb {
class IPDBSession;
class PDBSymDumper;
class PDBSymbol;
template <typename ChildType> class ConcreteSymbolEnumerator;

#define DECLARE_PDB_SYMBOL_CONCRETE_TYPE(TagValue)                             \
private:                                                                       \
  using PDBSymbol::PDBSymbol;                                                  \
  friend class PDBSymbol;                                                      \
                                                                               \
public:                                                                        \
  static const PDB_SymType Tag = TagValue;                                     \
  static bool classof(const PDBSymbol *S) { return S->getSymTag() == Tag; }

#define DECLARE_PDB_SYMBOL_CUSTOM_TYPE(Condition)                              \
private:                                                                       \
  using PDBSymbol::PDBSymbol;                                                  \
  friend class PDBSymbol;                                                      \
                                                                               \
public:                                                                        \
  static bool classof(const PDBSymbol *S) { return Condition; }

/// PDBSymbol defines the base of the inheritance hierarchy for concrete symbol
/// types (e.g. functions, executables, vtables, etc).  All concrete symbol
/// types inherit from PDBSymbol and expose the exact set of methods that are
/// valid for that particular symbol type, as described in the Microsoft
/// reference "Lexical and Class Hierarchy of Symbol Types":
/// https://msdn.microsoft.com/en-us/library/370hs6k4.aspx
class PDBSymbol {
  static std::unique_ptr<PDBSymbol> createSymbol(const IPDBSession &PDBSession,
                                                 PDB_SymType Tag);

protected:
  explicit PDBSymbol(const IPDBSession &PDBSession);
  PDBSymbol(PDBSymbol &&Other);

public:
  static std::unique_ptr<PDBSymbol>
  create(const IPDBSession &PDBSession,
         std::unique_ptr<IPDBRawSymbol> RawSymbol);
  static std::unique_ptr<PDBSymbol> create(const IPDBSession &PDBSession,
                                           IPDBRawSymbol &RawSymbol);

  template <typename ConcreteT>
  static std::unique_ptr<ConcreteT>
  createAs(const IPDBSession &PDBSession,
           std::unique_ptr<IPDBRawSymbol> RawSymbol) {
    std::unique_ptr<PDBSymbol> S = create(PDBSession, std::move(RawSymbol));
    return unique_dyn_cast_or_null<ConcreteT>(std::move(S));
  }
  template <typename ConcreteT>
  static std::unique_ptr<ConcreteT> createAs(const IPDBSession &PDBSession,
                                             IPDBRawSymbol &RawSymbol) {
    std::unique_ptr<PDBSymbol> S = create(PDBSession, RawSymbol);
    return unique_dyn_cast_or_null<ConcreteT>(std::move(S));
  }

  virtual ~PDBSymbol();

  /// Dumps the contents of a symbol a raw_ostream.  By default this will just
  /// call dump() on the underlying RawSymbol, which allows us to discover
  /// unknown properties, but individual implementations of PDBSymbol may
  /// override the behavior to only dump known fields.
  virtual void dump(PDBSymDumper &Dumper) const = 0;

  /// For certain PDBSymbolTypes, dumps additional information for the type that
  /// normally goes on the right side of the symbol.
  virtual void dumpRight(PDBSymDumper &Dumper) const {}

  void defaultDump(raw_ostream &OS, int Indent, PdbSymbolIdField ShowFlags,
                   PdbSymbolIdField RecurseFlags) const;
  void dumpProperties() const;
  void dumpChildStats() const;

  PDB_SymType getSymTag() const;
  uint32_t getSymIndexId() const;

  template <typename T> std::unique_ptr<T> findOneChild() const {
    auto Enumerator(findAllChildren<T>());
    if (!Enumerator)
      return nullptr;
    return Enumerator->getNext();
  }

  template <typename T>
  std::unique_ptr<ConcreteSymbolEnumerator<T>> findAllChildren() const {
    auto BaseIter = RawSymbol->findChildren(T::Tag);
    if (!BaseIter)
      return nullptr;
    return std::make_unique<ConcreteSymbolEnumerator<T>>(std::move(BaseIter));
  }
  std::unique_ptr<IPDBEnumSymbols> findAllChildren(PDB_SymType Type) const;
  std::unique_ptr<IPDBEnumSymbols> findAllChildren() const;

  std::unique_ptr<IPDBEnumSymbols>
  findChildren(PDB_SymType Type, StringRef Name,
               PDB_NameSearchFlags Flags) const;
  std::unique_ptr<IPDBEnumSymbols> findChildrenByRVA(PDB_SymType Type,
                                                     StringRef Name,
                                                     PDB_NameSearchFlags Flags,
                                                     uint32_t RVA) const;
  std::unique_ptr<IPDBEnumSymbols> findInlineFramesByVA(uint64_t VA) const;
  std::unique_ptr<IPDBEnumSymbols> findInlineFramesByRVA(uint32_t RVA) const;
  std::unique_ptr<IPDBEnumLineNumbers>
  findInlineeLinesByVA(uint64_t VA, uint32_t Length) const;
  std::unique_ptr<IPDBEnumLineNumbers>
  findInlineeLinesByRVA(uint32_t RVA, uint32_t Length) const;

  std::string getName() const;

  const IPDBRawSymbol &getRawSymbol() const { return *RawSymbol; }
  IPDBRawSymbol &getRawSymbol() { return *RawSymbol; }

  const IPDBSession &getSession() const { return Session; }

  std::unique_ptr<IPDBEnumSymbols> getChildStats(TagStats &Stats) const;

protected:
  std::unique_ptr<PDBSymbol> getSymbolByIdHelper(uint32_t Id) const;

  template <typename ConcreteType>
  std::unique_ptr<ConcreteType> getConcreteSymbolByIdHelper(uint32_t Id) const {
    return unique_dyn_cast_or_null<ConcreteType>(getSymbolByIdHelper(Id));
  }

  const IPDBSession &Session;
  std::unique_ptr<IPDBRawSymbol> OwnedRawSymbol;
  IPDBRawSymbol *RawSymbol = nullptr;
};

} // namespace llvm
}

#endif
PKhwFZ����"DebugInfo/PDB/IPDBSectionContrib.hnu�[���//==- IPDBSectionContrib.h - Interfaces for PDB SectionContribs --*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_IPDBSECTIONCONTRIB_H
#define LLVM_DEBUGINFO_PDB_IPDBSECTIONCONTRIB_H

#include "PDBTypes.h"

namespace llvm {
namespace pdb {

/// IPDBSectionContrib defines an interface used to represent section
/// contributions whose information are stored in the PDB.
class IPDBSectionContrib {
public:
  virtual ~IPDBSectionContrib();

  virtual std::unique_ptr<PDBSymbolCompiland> getCompiland() const = 0;
  virtual uint32_t getAddressSection() const = 0;
  virtual uint32_t getAddressOffset() const = 0;
  virtual uint32_t getRelativeVirtualAddress() const = 0;
  virtual uint64_t getVirtualAddress() const  = 0;
  virtual uint32_t getLength() const = 0;
  virtual bool isNotPaged() const = 0;
  virtual bool hasCode() const = 0;
  virtual bool hasCode16Bit() const = 0;
  virtual bool hasInitializedData() const = 0;
  virtual bool hasUninitializedData() const = 0;
  virtual bool isRemoved() const = 0;
  virtual bool hasComdat() const = 0;
  virtual bool isDiscardable() const = 0;
  virtual bool isNotCached() const = 0;
  virtual bool isShared() const = 0;
  virtual bool isExecutable() const = 0;
  virtual bool isReadable() const = 0;
  virtual bool isWritable() const = 0;
  virtual uint32_t getDataCrc32() const = 0;
  virtual uint32_t getRelocationsCrc32() const = 0;
  virtual uint32_t getCompilandId() const = 0;
};
}
}

#endif // LLVM_DEBUGINFO_PDB_IPDBSECTIONCONTRIB_H
PKhwFZY1����'DebugInfo/PDB/PDBSymbolFuncDebugStart.hnu�[���//===- PDBSymbolFuncDebugStart.h - function start bounds info ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLFUNCDEBUGSTART_H
#define LLVM_DEBUGINFO_PDB_PDBSYMBOLFUNCDEBUGSTART_H

#include "PDBSymbol.h"
#include "PDBTypes.h"

namespace llvm {

namespace pdb {

class PDBSymbolFuncDebugStart : public PDBSymbol {
  DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::FuncDebugStart)
public:
  void dump(PDBSymDumper &Dumper) const override;

  FORWARD_SYMBOL_METHOD(getAddressOffset)
  FORWARD_SYMBOL_METHOD(getAddressSection)
  FORWARD_SYMBOL_METHOD(hasCustomCallingConvention)
  FORWARD_SYMBOL_METHOD(hasFarReturn)
  FORWARD_SYMBOL_METHOD(hasInterruptReturn)
  FORWARD_SYMBOL_METHOD(isStatic)
  FORWARD_SYMBOL_ID_METHOD(getLexicalParent)
  FORWARD_SYMBOL_METHOD(getLocationType)
  FORWARD_SYMBOL_METHOD(hasNoInlineAttribute)
  FORWARD_SYMBOL_METHOD(hasNoReturnAttribute)
  FORWARD_SYMBOL_METHOD(isUnreached)
  FORWARD_SYMBOL_METHOD(getOffset)
  FORWARD_SYMBOL_METHOD(hasOptimizedCodeDebugInfo)
  FORWARD_SYMBOL_METHOD(getRelativeVirtualAddress)
  FORWARD_SYMBOL_METHOD(getVirtualAddress)
};

} // namespace pdb
} // namespace llvm

#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLFUNCDEBUGSTART_H
PKhwFZ�m��uu#DebugInfo/PDB/PDBSymbolTypeCustom.hnu�[���//===- PDBSymbolTypeCustom.h - custom compiler type information -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPECUSTOM_H
#define LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPECUSTOM_H

#include "PDBSymbol.h"
#include "PDBTypes.h"

namespace llvm {

namespace pdb {

class PDBSymbolTypeCustom : public PDBSymbol {
  DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::CustomType)
public:
  void dump(PDBSymDumper &Dumper) const override;

  FORWARD_SYMBOL_METHOD(getOemId)
  FORWARD_SYMBOL_METHOD(getOemSymbolId)
};

} // namespace pdb
} // namespace llvm

#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPECUSTOM_H
PKhwFZ������DebugInfo/BTF/BTFContext.hnu�[���//===- BTFContext.h ---------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// BTFContext interface is used by llvm-objdump tool to print source
// code alongside disassembly.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_BTF_BTFCONTEXT_H
#define LLVM_DEBUGINFO_BTF_BTFCONTEXT_H

#include "llvm/DebugInfo/BTF/BTFParser.h"
#include "llvm/DebugInfo/DIContext.h"

namespace llvm {

class BTFContext final : public DIContext {
  BTFParser BTF;

public:
  BTFContext() : DIContext(CK_BTF) {}

  void dump(raw_ostream &OS, DIDumpOptions DumpOpts) override {
    // This function is called from objdump when --dwarf=? option is set.
    // BTF is no DWARF, so ignore this operation for now.
  }

  DILineInfo getLineInfoForAddress(
      object::SectionedAddress Address,
      DILineInfoSpecifier Specifier = DILineInfoSpecifier()) override;

  DILineInfo
  getLineInfoForDataAddress(object::SectionedAddress Address) override;

  DILineInfoTable getLineInfoForAddressRange(
      object::SectionedAddress Address, uint64_t Size,
      DILineInfoSpecifier Specifier = DILineInfoSpecifier()) override;

  DIInliningInfo getInliningInfoForAddress(
      object::SectionedAddress Address,
      DILineInfoSpecifier Specifier = DILineInfoSpecifier()) override;

  std::vector<DILocal>
  getLocalsForAddress(object::SectionedAddress Address) override;

  static std::unique_ptr<BTFContext> create(
      const object::ObjectFile &Obj,
      std::function<void(Error)> ErrorHandler = WithColor::defaultErrorHandler);
};

} // end namespace llvm

#endif // LLVM_DEBUGINFO_BTF_BTFCONTEXT_H
PKhwFZ��+##DebugInfo/BTF/BTF.hnu�[���//===-- BTF.h --------------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file contains the layout of .BTF and .BTF.ext ELF sections.
///
/// The binary layout for .BTF section:
///   struct Header
///   Type and Str subsections
/// The Type subsection is a collection of types with type id starting with 1.
/// The Str subsection is simply a collection of strings.
///
/// The binary layout for .BTF.ext section:
///   struct ExtHeader
///   FuncInfo, LineInfo, FieldReloc and ExternReloc subsections
/// The FuncInfo subsection is defined as below:
///   BTFFuncInfo Size
///   struct SecFuncInfo for ELF section #1
///   A number of struct BPFFuncInfo for ELF section #1
///   struct SecFuncInfo for ELF section #2
///   A number of struct BPFFuncInfo for ELF section #2
///   ...
/// The LineInfo subsection is defined as below:
///   BPFLineInfo Size
///   struct SecLineInfo for ELF section #1
///   A number of struct BPFLineInfo for ELF section #1
///   struct SecLineInfo for ELF section #2
///   A number of struct BPFLineInfo for ELF section #2
///   ...
/// The FieldReloc subsection is defined as below:
///   BPFFieldReloc Size
///   struct SecFieldReloc for ELF section #1
///   A number of struct BPFFieldReloc for ELF section #1
///   struct SecFieldReloc for ELF section #2
///   A number of struct BPFFieldReloc for ELF section #2
///   ...
///
/// The section formats are also defined at
///    https://github.com/torvalds/linux/blob/master/include/uapi/linux/btf.h
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_LIB_TARGET_BPF_BTF_H
#define LLVM_LIB_TARGET_BPF_BTF_H

#include <cstdint>

namespace llvm {
namespace BTF {

enum : uint32_t { MAGIC = 0xeB9F, VERSION = 1 };

/// Sizes in bytes of various things in the BTF format.
enum {
  HeaderSize = 24,
  ExtHeaderSize = 32,
  CommonTypeSize = 12,
  BTFArraySize = 12,
  BTFEnumSize = 8,
  BTFEnum64Size = 12,
  BTFMemberSize = 12,
  BTFParamSize = 8,
  BTFDataSecVarSize = 12,
  SecFuncInfoSize = 8,
  SecLineInfoSize = 8,
  SecFieldRelocSize = 8,
  BPFFuncInfoSize = 8,
  BPFLineInfoSize = 16,
  BPFFieldRelocSize = 16,
};

/// The .BTF section header definition.
struct Header {
  uint16_t Magic;  ///< Magic value
  uint8_t Version; ///< Version number
  uint8_t Flags;   ///< Extra flags
  uint32_t HdrLen; ///< Length of this header

  /// All offsets are in bytes relative to the end of this header.
  uint32_t TypeOff; ///< Offset of type section
  uint32_t TypeLen; ///< Length of type section
  uint32_t StrOff;  ///< Offset of string section
  uint32_t StrLen;  ///< Length of string section
};

enum : uint32_t {
  MAX_VLEN = 0xffff ///< Max # of struct/union/enum members or func args
};

enum TypeKinds : uint8_t {
#define HANDLE_BTF_KIND(ID, NAME) BTF_KIND_##NAME = ID,
#include "BTF.def"
};

/// The BTF common type definition. Different kinds may have
/// additional information after this structure data.
struct CommonType {
  /// Type name offset in the string table.
  uint32_t NameOff;

  /// "Info" bits arrangement:
  /// Bits  0-15: vlen (e.g. # of struct's members)
  /// Bits 16-23: unused
  /// Bits 24-27: kind (e.g. int, ptr, array...etc)
  /// Bits 28-30: unused
  /// Bit     31: kind_flag, currently used by
  ///             struct, union and fwd
  uint32_t Info;

  /// "Size" is used by INT, ENUM, STRUCT and UNION.
  /// "Size" tells the size of the type it is describing.
  ///
  /// "Type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT,
  /// FUNC, FUNC_PROTO, VAR, DECL_TAG and TYPE_TAG.
  /// "Type" is a type_id referring to another type.
  union {
    uint32_t Size;
    uint32_t Type;
  };
};

// For some specific BTF_KIND, "struct CommonType" is immediately
// followed by extra data.

// BTF_KIND_INT is followed by a u32 and the following
// is the 32 bits arrangement:
// BTF_INT_ENCODING(VAL) : (((VAL) & 0x0f000000) >> 24)
// BTF_INT_OFFSET(VAL) : (((VAL & 0x00ff0000)) >> 16)
// BTF_INT_BITS(VAL) : ((VAL) & 0x000000ff)

/// Attributes stored in the INT_ENCODING.
enum : uint8_t {
  INT_SIGNED = (1 << 0),
  INT_CHAR = (1 << 1),
  INT_BOOL = (1 << 2)
};

/// BTF_KIND_ENUM is followed by multiple "struct BTFEnum".
/// The exact number of btf_enum is stored in the vlen (of the
/// info in "struct CommonType").
struct BTFEnum {
  uint32_t NameOff; ///< Enum name offset in the string table
  int32_t Val;      ///< Enum member value
};

/// BTF_KIND_ENUM64 is followed by multiple "struct BTFEnum64".
/// The exact number of BTFEnum64 is stored in the vlen (of the
/// info in "struct CommonType").
struct BTFEnum64 {
  uint32_t NameOff;  ///< Enum name offset in the string table
  uint32_t Val_Lo32; ///< Enum member lo32 value
  uint32_t Val_Hi32; ///< Enum member hi32 value
};

/// BTF_KIND_ARRAY is followed by one "struct BTFArray".
struct BTFArray {
  uint32_t ElemType;  ///< Element type
  uint32_t IndexType; ///< Index type
  uint32_t Nelems;    ///< Number of elements for this array
};

/// BTF_KIND_STRUCT and BTF_KIND_UNION are followed
/// by multiple "struct BTFMember".  The exact number
/// of BTFMember is stored in the vlen (of the info in
/// "struct CommonType").
///
/// If the struct/union contains any bitfield member,
/// the Offset below represents BitOffset (bits 0 - 23)
/// and BitFieldSize(bits 24 - 31) with BitFieldSize = 0
/// for non bitfield members. Otherwise, the Offset
/// represents the BitOffset.
struct BTFMember {
  uint32_t NameOff; ///< Member name offset in the string table
  uint32_t Type;    ///< Member type
  uint32_t Offset;  ///< BitOffset or BitFieldSize+BitOffset
};

/// BTF_KIND_FUNC_PROTO are followed by multiple "struct BTFParam".
/// The exist number of BTFParam is stored in the vlen (of the info
/// in "struct CommonType").
struct BTFParam {
  uint32_t NameOff;
  uint32_t Type;
};

/// BTF_KIND_FUNC can be global, static or extern.
enum : uint8_t {
  FUNC_STATIC = 0,
  FUNC_GLOBAL = 1,
  FUNC_EXTERN = 2,
};

/// Variable scoping information.
enum : uint8_t {
  VAR_STATIC = 0,           ///< Linkage: InternalLinkage
  VAR_GLOBAL_ALLOCATED = 1, ///< Linkage: ExternalLinkage
  VAR_GLOBAL_EXTERNAL = 2,  ///< Linkage: ExternalLinkage
};

/// BTF_KIND_DATASEC are followed by multiple "struct BTFDataSecVar".
/// The exist number of BTFDataSec is stored in the vlen (of the info
/// in "struct CommonType").
struct BTFDataSec {
  uint32_t Type;   ///< A BTF_KIND_VAR type
  uint32_t Offset; ///< In-section offset
  uint32_t Size;   ///< Occupied memory size
};

/// The .BTF.ext section header definition.
struct ExtHeader {
  uint16_t Magic;
  uint8_t Version;
  uint8_t Flags;
  uint32_t HdrLen;

  uint32_t FuncInfoOff;   ///< Offset of func info section
  uint32_t FuncInfoLen;   ///< Length of func info section
  uint32_t LineInfoOff;   ///< Offset of line info section
  uint32_t LineInfoLen;   ///< Length of line info section
  uint32_t FieldRelocOff; ///< Offset of offset reloc section
  uint32_t FieldRelocLen; ///< Length of offset reloc section
};

/// Specifying one function info.
struct BPFFuncInfo {
  uint32_t InsnOffset; ///< Byte offset in the section
  uint32_t TypeId;     ///< Type id referring to .BTF type section
};

/// Specifying function info's in one section.
struct SecFuncInfo {
  uint32_t SecNameOff;  ///< Section name index in the .BTF string table
  uint32_t NumFuncInfo; ///< Number of func info's in this section
};

/// Specifying one line info.
struct BPFLineInfo {
  uint32_t InsnOffset;  ///< Byte offset in this section
  uint32_t FileNameOff; ///< File name index in the .BTF string table
  uint32_t LineOff;     ///< Line index in the .BTF string table
  uint32_t LineCol;     ///< Line num: line_col >> 10,
                        ///  col num: line_col & 0x3ff
  uint32_t getLine() const { return LineCol >> 10; }
  uint32_t getCol() const { return LineCol & 0x3ff; }
};

/// Specifying line info's in one section.
struct SecLineInfo {
  uint32_t SecNameOff;  ///< Section name index in the .BTF string table
  uint32_t NumLineInfo; ///< Number of line info's in this section
};

/// Specifying one offset relocation.
struct BPFFieldReloc {
  uint32_t InsnOffset;    ///< Byte offset in this section
  uint32_t TypeID;        ///< TypeID for the relocation
  uint32_t OffsetNameOff; ///< The string to traverse types
  uint32_t RelocKind;     ///< What to patch the instruction
};

/// Specifying offset relocation's in one section.
struct SecFieldReloc {
  uint32_t SecNameOff;    ///< Section name index in the .BTF string table
  uint32_t NumFieldReloc; ///< Number of offset reloc's in this section
};

} // End namespace BTF.
} // End namespace llvm.

#endif
PKhwFZ	R�jjDebugInfo/BTF/BTF.defnu�[���//===- BTF.def - BTF definitions --------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Macros for BTF.
//
//===----------------------------------------------------------------------===//

#if !defined(HANDLE_BTF_KIND)
#error "Missing macro definition of HANDLE_BTF_*"
#endif

HANDLE_BTF_KIND(0, UNKN)
HANDLE_BTF_KIND(1, INT)
HANDLE_BTF_KIND(2, PTR)
HANDLE_BTF_KIND(3, ARRAY)
HANDLE_BTF_KIND(4, STRUCT)
HANDLE_BTF_KIND(5, UNION)
HANDLE_BTF_KIND(6, ENUM)
HANDLE_BTF_KIND(7, FWD)
HANDLE_BTF_KIND(8, TYPEDEF)
HANDLE_BTF_KIND(9, VOLATILE)
HANDLE_BTF_KIND(10, CONST)
HANDLE_BTF_KIND(11, RESTRICT)
HANDLE_BTF_KIND(12, FUNC)
HANDLE_BTF_KIND(13, FUNC_PROTO)
HANDLE_BTF_KIND(14, VAR)
HANDLE_BTF_KIND(15, DATASEC)
HANDLE_BTF_KIND(16, FLOAT)
HANDLE_BTF_KIND(17, DECL_TAG)
HANDLE_BTF_KIND(18, TYPE_TAG)
HANDLE_BTF_KIND(19, ENUM64)

#undef HANDLE_BTF_KIND
PKhwFZ,�n8DebugInfo/BTF/BTFParser.hnu�[���//===- BTFParser.h ----------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// BTFParser reads .BTF and .BTF.ext ELF sections generated by LLVM
// BPF backend and provides introspection for the stored information.
// Currently the following information is accessible:
// - string table;
// - instruction offset to line information mapping.
//
// See llvm/DebugInfo/BTF/BTF.h for some details about binary format
// and links to Linux Kernel documentation.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_BTF_BTFPARSER_H
#define LLVM_DEBUGINFO_BTF_BTFPARSER_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/DebugInfo/BTF/BTF.h"
#include "llvm/Object/ObjectFile.h"
#include "llvm/Support/DataExtractor.h"

namespace llvm {
using object::ObjectFile;
using object::SectionedAddress;
using object::SectionRef;

class BTFParser {
  using BTFLinesVector = SmallVector<BTF::BPFLineInfo, 0>;

  // In BTF strings are stored as a continuous memory region with
  // individual strings separated by 0 bytes. Strings are identified
  // by an offset in such region.
  // The `StringsTable` points to this region in the parsed ObjectFile.
  StringRef StringsTable;

  // Maps ELF section number to instruction line number information.
  // Each BTFLinesVector is sorted by `InsnOffset` to allow fast lookups.
  DenseMap<uint64_t, BTFLinesVector> SectionLines;

  struct ParseContext;
  Error parseBTF(ParseContext &Ctx, SectionRef BTF);
  Error parseBTFExt(ParseContext &Ctx, SectionRef BTFExt);
  Error parseLineInfo(ParseContext &Ctx, DataExtractor &Extractor,
                      uint64_t LineInfoStart, uint64_t LineInfoEnd);

public:
  // Looks-up a string in the .BTF section's string table.
  // Offset is relative to string table start.
  StringRef findString(uint32_t Offset) const;

  // Search for line information for a specific address,
  // address match is exact (contrary to DWARFContext).
  // Return nullptr if no information found.
  // If information is present, return a pointer to object
  // owned by this class.
  const BTF::BPFLineInfo *findLineInfo(SectionedAddress Address) const;

  // Fills instance of BTFParser with information stored in .BTF and
  // .BTF.ext sections of the `Obj`. If this instance was already
  // filled, old data is discarded.
  //
  // If information cannot be parsed:
  // - return an error describing the failure;
  // - state of the BTFParser might be incomplete but is not invalid,
  //   queries might be run against it, but some (or all) information
  //   might be unavailable;
  Error parse(const ObjectFile &Obj);

  // Return true if `Obj` has .BTF and .BTF.ext sections.
  static bool hasBTFSections(const ObjectFile &Obj);
};

} // namespace llvm

#endif // LLVM_DEBUGINFO_BTF_BTFPARSER_H
PKhwFZ���r==,DebugInfo/Symbolize/SymbolizableObjectFile.hnu�[���//===- SymbolizableObjectFile.h ---------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the SymbolizableObjectFile class.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_DEBUGINFO_SYMBOLIZE_SYMBOLIZABLEOBJECTFILE_H
#define LLVM_DEBUGINFO_SYMBOLIZE_SYMBOLIZABLEOBJECTFILE_H

#include "llvm/ADT/StringRef.h"
#include "llvm/DebugInfo/DIContext.h"
#include "llvm/DebugInfo/Symbolize/SymbolizableModule.h"
#include "llvm/Support/Error.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>

namespace llvm {

class DataExtractor;

namespace symbolize {

class SymbolizableObjectFile : public SymbolizableModule {
public:
  static Expected<std::unique_ptr<SymbolizableObjectFile>>
  create(const object::ObjectFile *Obj, std::unique_ptr<DIContext> DICtx,
         bool UntagAddresses);

  DILineInfo symbolizeCode(object::SectionedAddress ModuleOffset,
                           DILineInfoSpecifier LineInfoSpecifier,
                           bool UseSymbolTable) const override;
  DIInliningInfo symbolizeInlinedCode(object::SectionedAddress ModuleOffset,
                                      DILineInfoSpecifier LineInfoSpecifier,
                                      bool UseSymbolTable) const override;
  DIGlobal symbolizeData(object::SectionedAddress ModuleOffset) const override;
  std::vector<DILocal>
  symbolizeFrame(object::SectionedAddress ModuleOffset) const override;

  // Return true if this is a 32-bit x86 PE COFF module.
  bool isWin32Module() const override;

  // Returns the preferred base of the module, i.e. where the loader would place
  // it in memory assuming there were no conflicts.
  uint64_t getModulePreferredBase() const override;

private:
  bool shouldOverrideWithSymbolTable(FunctionNameKind FNKind,
                                     bool UseSymbolTable) const;

  bool getNameFromSymbolTable(uint64_t Address, std::string &Name,
                              uint64_t &Addr, uint64_t &Size,
                              std::string &FileName) const;
  // For big-endian PowerPC64 ELF, OpdAddress is the address of the .opd
  // (function descriptor) section and OpdExtractor refers to its contents.
  Error addSymbol(const object::SymbolRef &Symbol, uint64_t SymbolSize,
                  DataExtractor *OpdExtractor = nullptr,
                  uint64_t OpdAddress = 0);
  Error addCoffExportSymbols(const object::COFFObjectFile *CoffObj);

  /// Search for the first occurence of specified Address in ObjectFile.
  uint64_t getModuleSectionIndexForAddress(uint64_t Address) const;

  const object::ObjectFile *Module;
  std::unique_ptr<DIContext> DebugInfoContext;
  bool UntagAddresses;

  struct SymbolDesc {
    uint64_t Addr;
    // If size is 0, assume that symbol occupies the whole memory range up to
    // the following symbol.
    uint64_t Size;

    StringRef Name;
    // Non-zero if this is an ELF local symbol. See the comment in
    // getNameFromSymbolTable.
    uint32_t ELFLocalSymIdx;

    bool operator<(const SymbolDesc &RHS) const {
      return Addr != RHS.Addr ? Addr < RHS.Addr : Size < RHS.Size;
    }
  };
  std::vector<SymbolDesc> Symbols;
  // (index, filename) pairs of ELF STT_FILE symbols.
  std::vector<std::pair<uint32_t, StringRef>> FileSymbols;

  SymbolizableObjectFile(const object::ObjectFile *Obj,
                         std::unique_ptr<DIContext> DICtx,
                         bool UntagAddresses);
};

} // end namespace symbolize

} // end namespace llvm

#endif // LLVM_DEBUGINFO_SYMBOLIZE_SYMBOLIZABLEOBJECTFILE_H
PKhwFZ]�^	��"DebugInfo/Symbolize/MarkupFilter.hnu�[���//===- MarkupFilter.h -------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file declares a filter that replaces symbolizer markup with
/// human-readable expressions.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_SYMBOLIZE_MARKUPFILTER_H
#define LLVM_DEBUGINFO_SYMBOLIZE_MARKUPFILTER_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/DebugInfo/Symbolize/Markup.h"
#include "llvm/Object/BuildID.h"
#include "llvm/Support/WithColor.h"
#include "llvm/Support/raw_ostream.h"
#include <map>

namespace llvm {
namespace symbolize {

class LLVMSymbolizer;

/// Filter to convert parsed log symbolizer markup elements into human-readable
/// text.
class MarkupFilter {
public:
  MarkupFilter(raw_ostream &OS, LLVMSymbolizer &Symbolizer,
               std::optional<bool> ColorsEnabled = std::nullopt);

  /// Filters a line containing symbolizer markup and writes the human-readable
  /// results to the output stream.
  ///
  /// Invalid or unimplemented markup elements are removed. Some output may be
  /// deferred until future filter() or finish() call.
  void filter(StringRef Line);

  /// Records that the input stream has ended and writes any deferred output.
  void finish();

private:
  struct Module {
    uint64_t ID;
    std::string Name;
    SmallVector<uint8_t> BuildID;
  };

  struct MMap {
    uint64_t Addr;
    uint64_t Size;
    const Module *Mod;
    std::string Mode; // Lowercase
    uint64_t ModuleRelativeAddr;

    bool contains(uint64_t Addr) const;
    uint64_t getModuleRelativeAddr(uint64_t Addr) const;
  };

  // An informational module line currently being constructed. As many mmap
  // elements as possible are folded into one ModuleInfo line.
  struct ModuleInfoLine {
    const Module *Mod;

    SmallVector<const MMap *> MMaps = {};
  };

  // The semantics of a possible program counter value.
  enum class PCType {
    // The address is a return address and must be adjusted to point to the call
    // itself.
    ReturnAddress,
    // The address is the precise location in the code and needs no adjustment.
    PreciseCode,
  };

  bool tryContextualElement(const MarkupNode &Node,
                            const SmallVector<MarkupNode> &DeferredNodes);
  bool tryMMap(const MarkupNode &Element,
               const SmallVector<MarkupNode> &DeferredNodes);
  bool tryReset(const MarkupNode &Element,
                const SmallVector<MarkupNode> &DeferredNodes);
  bool tryModule(const MarkupNode &Element,
                 const SmallVector<MarkupNode> &DeferredNodes);

  void beginModuleInfoLine(const Module *M);
  void endAnyModuleInfoLine();

  void filterNode(const MarkupNode &Node);

  bool tryPresentation(const MarkupNode &Node);
  bool trySymbol(const MarkupNode &Node);
  bool tryPC(const MarkupNode &Node);
  bool tryBackTrace(const MarkupNode &Node);
  bool tryData(const MarkupNode &Node);

  bool trySGR(const MarkupNode &Node);

  void highlight();
  void highlightValue();
  void restoreColor();
  void resetColor();

  void printRawElement(const MarkupNode &Element);
  void printValue(Twine Value);

  std::optional<Module> parseModule(const MarkupNode &Element) const;
  std::optional<MMap> parseMMap(const MarkupNode &Element) const;

  std::optional<uint64_t> parseAddr(StringRef Str) const;
  std::optional<uint64_t> parseModuleID(StringRef Str) const;
  std::optional<uint64_t> parseSize(StringRef Str) const;
  object::BuildID parseBuildID(StringRef Str) const;
  std::optional<std::string> parseMode(StringRef Str) const;
  std::optional<PCType> parsePCType(StringRef Str) const;
  std::optional<uint64_t> parseFrameNumber(StringRef Str) const;

  bool checkTag(const MarkupNode &Node) const;
  bool checkNumFields(const MarkupNode &Element, size_t Size) const;
  bool checkNumFieldsAtLeast(const MarkupNode &Element, size_t Size) const;
  void warnNumFieldsAtMost(const MarkupNode &Element, size_t Size) const;

  void reportTypeError(StringRef Str, StringRef TypeName) const;
  void reportLocation(StringRef::iterator Loc) const;

  const MMap *getOverlappingMMap(const MMap &Map) const;
  const MMap *getContainingMMap(uint64_t Addr) const;

  uint64_t adjustAddr(uint64_t Addr, PCType Type) const;

  StringRef lineEnding() const;

  raw_ostream &OS;
  LLVMSymbolizer &Symbolizer;
  const bool ColorsEnabled;

  MarkupParser Parser;

  // Current line being filtered.
  StringRef Line;

  // A module info line currently being built. This incorporates as much mmap
  // information as possible before being emitted.
  std::optional<ModuleInfoLine> MIL;

  // SGR state.
  std::optional<raw_ostream::Colors> Color;
  bool Bold = false;

  // Map from Module ID to Module.
  DenseMap<uint64_t, std::unique_ptr<Module>> Modules;

  // Ordered map from starting address to mmap.
  std::map<uint64_t, MMap> MMaps;
};

} // end namespace symbolize
} // end namespace llvm

#endif // LLVM_DEBUGINFO_SYMBOLIZE_MARKUPFILTER_H
PKhwFZB�K�xxDebugInfo/Symbolize/Markup.hnu�[���//===- Markup.h -------------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file declares the log symbolizer markup data model and parser.
///
/// See https://llvm.org/docs/SymbolizerMarkupFormat.html
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_SYMBOLIZE_MARKUP_H
#define LLVM_DEBUGINFO_SYMBOLIZE_MARKUP_H

#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/Support/Regex.h"

namespace llvm {
namespace symbolize {

/// A node of symbolizer markup.
///
/// If only the Text field is set, this represents a region of text outside a
/// markup element. ANSI SGR control codes are also reported this way; if
/// detected, then the control code will be the entirety of the Text field, and
/// any surrounding text will be reported as preceding and following nodes.
struct MarkupNode {
  /// The full text of this node in the input.
  StringRef Text;

  /// If this represents an element, the tag. Otherwise, empty.
  StringRef Tag;

  /// If this represents an element with fields, a list of the field contents.
  /// Otherwise, empty.
  SmallVector<StringRef> Fields;

  bool operator==(const MarkupNode &Other) const {
    return Text == Other.Text && Tag == Other.Tag && Fields == Other.Fields;
  }
  bool operator!=(const MarkupNode &Other) const { return !(*this == Other); }
};

/// Parses a log containing symbolizer markup into a sequence of nodes.
class MarkupParser {
public:
  MarkupParser(StringSet<> MultilineTags = {});

  /// Parses an individual \p Line of input.
  ///
  /// Nodes from the previous parseLine() call that haven't yet been extracted
  /// by nextNode() are discarded. The nodes returned by nextNode() may
  /// reference the input string, so it must be retained by the caller until the
  /// last use.
  ///
  /// Note that some elements may span multiple lines. If a line ends with the
  /// start of one of these elements, then no nodes will be produced until the
  /// either the end or something that cannot be part of an element is
  /// encountered. This may only occur after multiple calls to parseLine(),
  /// corresponding to the lines of the multi-line element.
  void parseLine(StringRef Line);

  /// Inform the parser of that the input stream has ended.
  ///
  /// This allows the parser to finish any deferred processing (e.g., an
  /// in-progress multi-line element) and may cause nextNode() to return
  /// additional nodes.
  void flush();

  /// Returns the next node in the input sequence.
  ///
  /// Calling nextNode() may invalidate the contents of the node returned by the
  /// previous call.
  ///
  /// \returns the next markup node or std::nullopt if none remain.
  std::optional<MarkupNode> nextNode();

  bool isSGR(const MarkupNode &Node) const {
    return SGRSyntax.match(Node.Text);
  }

private:
  std::optional<MarkupNode> parseElement(StringRef Line);
  void parseTextOutsideMarkup(StringRef Text);
  std::optional<StringRef> parseMultiLineBegin(StringRef Line);
  std::optional<StringRef> parseMultiLineEnd(StringRef Line);

  // Tags of elements that can span multiple lines.
  const StringSet<> MultilineTags;

  // Contents of a multi-line element that has finished being parsed. Retained
  // to keep returned StringRefs for the contents valid.
  std::string FinishedMultiline;

  // Contents of a multi-line element that is still in the process of receiving
  // lines.
  std::string InProgressMultiline;

  // The line currently being parsed.
  StringRef Line;

  // Buffer for nodes parsed from the current line.
  SmallVector<MarkupNode> Buffer;

  // Next buffer index to return.
  size_t NextIdx;

  // Regular expression matching supported ANSI SGR escape sequences.
  const Regex SGRSyntax;
};

} // end namespace symbolize
} // end namespace llvm

#endif // LLVM_DEBUGINFO_SYMBOLIZE_MARKUP_H
PKhwFZ����x$x$DebugInfo/Symbolize/Symbolize.hnu�[���//===- Symbolize.h ----------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Header for LLVM symbolization library.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_SYMBOLIZE_SYMBOLIZE_H
#define LLVM_DEBUGINFO_SYMBOLIZE_SYMBOLIZE_H

#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/ilist_node.h"
#include "llvm/ADT/simple_ilist.h"
#include "llvm/DebugInfo/DIContext.h"
#include "llvm/Object/Binary.h"
#include "llvm/Object/BuildID.h"
#include "llvm/Support/Error.h"
#include <algorithm>
#include <cstdint>
#include <map>
#include <memory>
#include <string>
#include <utility>
#include <vector>

namespace llvm {
namespace object {
class ELFObjectFileBase;
class MachOObjectFile;
class ObjectFile;
struct SectionedAddress;
} // namespace object

namespace symbolize {

class SymbolizableModule;

using namespace object;

using FunctionNameKind = DILineInfoSpecifier::FunctionNameKind;
using FileLineInfoKind = DILineInfoSpecifier::FileLineInfoKind;

class CachedBinary;

class LLVMSymbolizer {
public:
  struct Options {
    FunctionNameKind PrintFunctions = FunctionNameKind::LinkageName;
    FileLineInfoKind PathStyle = FileLineInfoKind::AbsoluteFilePath;
    bool UseSymbolTable = true;
    bool Demangle = true;
    bool RelativeAddresses = false;
    bool UntagAddresses = false;
    bool UseDIA = false;
    std::string DefaultArch;
    std::vector<std::string> DsymHints;
    std::string FallbackDebugPath;
    std::string DWPName;
    std::vector<std::string> DebugFileDirectory;
    size_t MaxCacheSize =
        sizeof(size_t) == 4
            ? 512 * 1024 * 1024 /* 512 MiB */
            : static_cast<size_t>(4ULL * 1024 * 1024 * 1024) /* 4 GiB */;
  };

  LLVMSymbolizer();
  LLVMSymbolizer(const Options &Opts);

  ~LLVMSymbolizer();

  // Overloads accepting ObjectFile does not support COFF currently
  Expected<DILineInfo> symbolizeCode(const ObjectFile &Obj,
                                     object::SectionedAddress ModuleOffset);
  Expected<DILineInfo> symbolizeCode(const std::string &ModuleName,
                                     object::SectionedAddress ModuleOffset);
  Expected<DILineInfo> symbolizeCode(ArrayRef<uint8_t> BuildID,
                                     object::SectionedAddress ModuleOffset);
  Expected<DIInliningInfo>
  symbolizeInlinedCode(const ObjectFile &Obj,
                       object::SectionedAddress ModuleOffset);
  Expected<DIInliningInfo>
  symbolizeInlinedCode(const std::string &ModuleName,
                       object::SectionedAddress ModuleOffset);
  Expected<DIInliningInfo>
  symbolizeInlinedCode(ArrayRef<uint8_t> BuildID,
                       object::SectionedAddress ModuleOffset);

  Expected<DIGlobal> symbolizeData(const ObjectFile &Obj,
                                   object::SectionedAddress ModuleOffset);
  Expected<DIGlobal> symbolizeData(const std::string &ModuleName,
                                   object::SectionedAddress ModuleOffset);
  Expected<DIGlobal> symbolizeData(ArrayRef<uint8_t> BuildID,
                                   object::SectionedAddress ModuleOffset);
  Expected<std::vector<DILocal>>
  symbolizeFrame(const ObjectFile &Obj, object::SectionedAddress ModuleOffset);
  Expected<std::vector<DILocal>>
  symbolizeFrame(const std::string &ModuleName,
                 object::SectionedAddress ModuleOffset);
  Expected<std::vector<DILocal>>
  symbolizeFrame(ArrayRef<uint8_t> BuildID,
                 object::SectionedAddress ModuleOffset);
  void flush();

  // Evict entries from the binary cache until it is under the maximum size
  // given in the options. Calling this invalidates references in the DI...
  // objects returned by the methods above.
  void pruneCache();

  static std::string
  DemangleName(const std::string &Name,
               const SymbolizableModule *DbiModuleDescriptor);

  void setBuildIDFetcher(std::unique_ptr<BuildIDFetcher> Fetcher) {
    BIDFetcher = std::move(Fetcher);
  }

  /// Returns a SymbolizableModule or an error if loading debug info failed.
  /// Only one attempt is made to load a module, and errors during loading are
  /// only reported once. Subsequent calls to get module info for a module that
  /// failed to load will return nullptr.
  Expected<SymbolizableModule *>
  getOrCreateModuleInfo(const std::string &ModuleName);

private:
  // Bundles together object file with code/data and object file with
  // corresponding debug info. These objects can be the same.
  using ObjectPair = std::pair<const ObjectFile *, const ObjectFile *>;

  template <typename T>
  Expected<DILineInfo>
  symbolizeCodeCommon(const T &ModuleSpecifier,
                      object::SectionedAddress ModuleOffset);
  template <typename T>
  Expected<DIInliningInfo>
  symbolizeInlinedCodeCommon(const T &ModuleSpecifier,
                             object::SectionedAddress ModuleOffset);
  template <typename T>
  Expected<DIGlobal> symbolizeDataCommon(const T &ModuleSpecifier,
                                         object::SectionedAddress ModuleOffset);
  template <typename T>
  Expected<std::vector<DILocal>>
  symbolizeFrameCommon(const T &ModuleSpecifier,
                       object::SectionedAddress ModuleOffset);

  Expected<SymbolizableModule *> getOrCreateModuleInfo(const ObjectFile &Obj);

  /// Returns a SymbolizableModule or an error if loading debug info failed.
  /// Unlike the above, errors are reported each time, since they are more
  /// likely to be transient.
  Expected<SymbolizableModule *>
  getOrCreateModuleInfo(ArrayRef<uint8_t> BuildID);

  Expected<SymbolizableModule *>
  createModuleInfo(const ObjectFile *Obj, std::unique_ptr<DIContext> Context,
                   StringRef ModuleName);

  ObjectFile *lookUpDsymFile(const std::string &Path,
                             const MachOObjectFile *ExeObj,
                             const std::string &ArchName);
  ObjectFile *lookUpDebuglinkObject(const std::string &Path,
                                    const ObjectFile *Obj,
                                    const std::string &ArchName);
  ObjectFile *lookUpBuildIDObject(const std::string &Path,
                                  const ELFObjectFileBase *Obj,
                                  const std::string &ArchName);

  bool findDebugBinary(const std::string &OrigPath,
                       const std::string &DebuglinkName, uint32_t CRCHash,
                       std::string &Result);

  bool getOrFindDebugBinary(const ArrayRef<uint8_t> BuildID,
                            std::string &Result);

  /// Returns pair of pointers to object and debug object.
  Expected<ObjectPair> getOrCreateObjectPair(const std::string &Path,
                                             const std::string &ArchName);

  /// Return a pointer to object file at specified path, for a specified
  /// architecture (e.g. if path refers to a Mach-O universal binary, only one
  /// object file from it will be returned).
  Expected<ObjectFile *> getOrCreateObject(const std::string &Path,
                                           const std::string &ArchName);

  /// Update the LRU cache order when a binary is accessed.
  void recordAccess(CachedBinary &Bin);

  std::map<std::string, std::unique_ptr<SymbolizableModule>, std::less<>>
      Modules;
  StringMap<std::string> BuildIDPaths;

  /// Contains cached results of getOrCreateObjectPair().
  std::map<std::pair<std::string, std::string>, ObjectPair>
      ObjectPairForPathArch;

  /// Contains parsed binary for each path, or parsing error.
  std::map<std::string, CachedBinary> BinaryForPath;

  /// A list of cached binaries in LRU order.
  simple_ilist<CachedBinary> LRUBinaries;
  /// Sum of the sizes of the cached binaries.
  size_t CacheSize = 0;

  /// Parsed object file for path/architecture pair, where "path" refers
  /// to Mach-O universal binary.
  std::map<std::pair<std::string, std::string>, std::unique_ptr<ObjectFile>>
      ObjectForUBPathAndArch;

  Options Opts;

  std::unique_ptr<BuildIDFetcher> BIDFetcher;
};

// A binary intrusively linked into a LRU cache list. If the binary is empty,
// then the entry marks that an error occurred, and it is not part of the LRU
// list.
class CachedBinary : public ilist_node<CachedBinary> {
public:
  CachedBinary() = default;
  CachedBinary(OwningBinary<Binary> Bin) : Bin(std::move(Bin)) {}

  OwningBinary<Binary> &operator*() { return Bin; }
  OwningBinary<Binary> *operator->() { return &Bin; }

  // Add an action to be performed when the binary is evicted, before all
  // previously registered evictors.
  void pushEvictor(std::function<void()> Evictor);

  // Run all registered evictors in the reverse of the order in which they were
  // added.
  void evict() {
    if (Evictor)
      Evictor();
  }

  size_t size() { return Bin.getBinary()->getData().size(); }

private:
  OwningBinary<Binary> Bin;
  std::function<void()> Evictor;
};

} // end namespace symbolize
} // end namespace llvm

#endif // LLVM_DEBUGINFO_SYMBOLIZE_SYMBOLIZE_H
PKhwFZ�%"QMM(DebugInfo/Symbolize/SymbolizableModule.hnu�[���//===- SymbolizableModule.h -------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the SymbolizableModule interface.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_DEBUGINFO_SYMBOLIZE_SYMBOLIZABLEMODULE_H
#define LLVM_DEBUGINFO_SYMBOLIZE_SYMBOLIZABLEMODULE_H

#include "llvm/DebugInfo/DIContext.h"
#include <cstdint>

namespace llvm {
namespace symbolize {

using FunctionNameKind = DILineInfoSpecifier::FunctionNameKind;

class SymbolizableModule {
public:
  virtual ~SymbolizableModule() = default;

  virtual DILineInfo symbolizeCode(object::SectionedAddress ModuleOffset,
                                   DILineInfoSpecifier LineInfoSpecifier,
                                   bool UseSymbolTable) const = 0;
  virtual DIInliningInfo
  symbolizeInlinedCode(object::SectionedAddress ModuleOffset,
                       DILineInfoSpecifier LineInfoSpecifier,
                       bool UseSymbolTable) const = 0;
  virtual DIGlobal
  symbolizeData(object::SectionedAddress ModuleOffset) const = 0;
  virtual std::vector<DILocal>
  symbolizeFrame(object::SectionedAddress ModuleOffset) const = 0;

  // Return true if this is a 32-bit x86 PE COFF module.
  virtual bool isWin32Module() const = 0;

  // Returns the preferred base of the module, i.e. where the loader would place
  // it in memory assuming there were no conflicts.
  virtual uint64_t getModulePreferredBase() const = 0;
};

} // end namespace symbolize
} // end namespace llvm

#endif  // LLVM_DEBUGINFO_SYMBOLIZE_SYMBOLIZABLEMODULE_H
PKhwFZL��DebugInfo/Symbolize/DIFetcher.hnu�[���//===-- llvm/DebugInfo/Symbolize/DIFetcher.h --------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file declares a DIFetcher abstraction for obtaining debug info from an
/// arbitrary outside source.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_SYMBOLIZE_DIFETCHER_H
#define LLVM_DEBUGINFO_SYMBOLIZE_DIFETCHER_H

#include <cstdint>
#include <string>

#include "llvm/ADT/ArrayRef.h"

namespace llvm {
namespace symbolize {

/// The DIFetcher interface provides arbitrary mechanisms for obtaining debug
/// info from an outside source.
class DIFetcher {
public:
  virtual ~DIFetcher() = default;
  virtual Optional<std::string>
  fetchBuildID(ArrayRef<uint8_t> BuildID) const = 0;
};

/// LocalDIFetcher searches local cache directories for debug info.
class LocalDIFetcher : public DIFetcher {
public:
  LocalDIFetcher(ArrayRef<std::string> DebugFileDirectory)
      : DebugFileDirectory(DebugFileDirectory){};
  virtual ~LocalDIFetcher() = default;

  Optional<std::string> fetchBuildID(ArrayRef<uint8_t> BuildID) const override;

private:
  const ArrayRef<std::string> DebugFileDirectory;
};

} // end namespace symbolize
} // end namespace llvm

#endif // LLVM_DEBUGINFO_SYMBOLIZE_DIFETCHER_H
PKhwFZoۿ���DebugInfo/Symbolize/DIPrinter.hnu�[���//===- llvm/DebugInfo/Symbolize/DIPrinter.h ---------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the DIPrinter class, which is responsible for printing
// structures defined in DebugInfo/DIContext.h
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_SYMBOLIZE_DIPRINTER_H
#define LLVM_DEBUGINFO_SYMBOLIZE_DIPRINTER_H

#include "llvm/ADT/StringRef.h"
#include "llvm/Support/JSON.h"
#include <memory>
#include <vector>

namespace llvm {
struct DILineInfo;
class DIInliningInfo;
struct DIGlobal;
struct DILocal;
class ErrorInfoBase;
class raw_ostream;

namespace symbolize {

class SourceCode;

struct Request {
  StringRef ModuleName;
  std::optional<uint64_t> Address;
};

class DIPrinter {
public:
  DIPrinter() = default;
  virtual ~DIPrinter() = default;

  virtual void print(const Request &Request, const DILineInfo &Info) = 0;
  virtual void print(const Request &Request, const DIInliningInfo &Info) = 0;
  virtual void print(const Request &Request, const DIGlobal &Global) = 0;
  virtual void print(const Request &Request,
                     const std::vector<DILocal> &Locals) = 0;

  virtual void printInvalidCommand(const Request &Request,
                                   StringRef Command) = 0;

  virtual bool printError(const Request &Request,
                          const ErrorInfoBase &ErrorInfo) = 0;

  virtual void listBegin() = 0;
  virtual void listEnd() = 0;
};

struct PrinterConfig {
  bool PrintAddress;
  bool PrintFunctions;
  bool Pretty;
  bool Verbose;
  int SourceContextLines;
};

using ErrorHandler = function_ref<void(const ErrorInfoBase &, StringRef)>;

class PlainPrinterBase : public DIPrinter {
protected:
  raw_ostream &OS;
  ErrorHandler ErrHandler;
  PrinterConfig Config;

  void print(const DILineInfo &Info, bool Inlined);
  void printFunctionName(StringRef FunctionName, bool Inlined);
  virtual void printSimpleLocation(StringRef Filename,
                                   const DILineInfo &Info) = 0;
  void printContext(SourceCode SourceCode);
  void printVerbose(StringRef Filename, const DILineInfo &Info);
  virtual void printStartAddress(const DILineInfo &Info) {}
  virtual void printFooter() {}

private:
  void printHeader(uint64_t Address);

public:
  PlainPrinterBase(raw_ostream &OS, ErrorHandler EH, PrinterConfig &Config)
      : OS(OS), ErrHandler(EH), Config(Config) {}

  void print(const Request &Request, const DILineInfo &Info) override;
  void print(const Request &Request, const DIInliningInfo &Info) override;
  void print(const Request &Request, const DIGlobal &Global) override;
  void print(const Request &Request,
             const std::vector<DILocal> &Locals) override;

  void printInvalidCommand(const Request &Request, StringRef Command) override;

  bool printError(const Request &Request,
                  const ErrorInfoBase &ErrorInfo) override;

  void listBegin() override {}
  void listEnd() override {}
};

class LLVMPrinter : public PlainPrinterBase {
private:
  void printSimpleLocation(StringRef Filename, const DILineInfo &Info) override;
  void printStartAddress(const DILineInfo &Info) override;
  void printFooter() override;

public:
  LLVMPrinter(raw_ostream &OS, ErrorHandler EH, PrinterConfig &Config)
      : PlainPrinterBase(OS, EH, Config) {}
};

class GNUPrinter : public PlainPrinterBase {
private:
  void printSimpleLocation(StringRef Filename, const DILineInfo &Info) override;

public:
  GNUPrinter(raw_ostream &OS, ErrorHandler EH, PrinterConfig &Config)
      : PlainPrinterBase(OS, EH, Config) {}

};

class JSONPrinter : public DIPrinter {
private:
  raw_ostream &OS;
  PrinterConfig Config;
  std::unique_ptr<json::Array> ObjectList;

  void printJSON(const json::Value &V) {
    json::OStream JOS(OS, Config.Pretty ? 2 : 0);
    JOS.value(V);
    OS << '\n';
  }

public:
  JSONPrinter(raw_ostream &OS, PrinterConfig &Config)
      : OS(OS), Config(Config) {}

  void print(const Request &Request, const DILineInfo &Info) override;
  void print(const Request &Request, const DIInliningInfo &Info) override;
  void print(const Request &Request, const DIGlobal &Global) override;
  void print(const Request &Request,
             const std::vector<DILocal> &Locals) override;

  void printInvalidCommand(const Request &Request, StringRef Command) override;

  bool printError(const Request &Request,
                  const ErrorInfoBase &ErrorInfo) override;

  void listBegin() override;
  void listEnd() override;
};
} // namespace symbolize
} // namespace llvm

#endif
PKhwFZ��]z;0;0&DebugInfo/LogicalView/Core/LVElement.hnu�[���//===-- LVElement.h ---------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the LVElement class, which is used to describe a debug
// information element.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVELEMENT_H
#define LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVELEMENT_H

#include "llvm/DebugInfo/LogicalView/Core/LVObject.h"
#include "llvm/Support/Casting.h"
#include <map>
#include <set>
#include <vector>

namespace llvm {
namespace logicalview {

// RTTI Subclasses ID.
enum class LVSubclassID : unsigned char {
  LV_ELEMENT,
  LV_LINE_FIRST,
  LV_LINE,
  LV_LINE_DEBUG,
  LV_LINE_ASSEMBLER,
  LV_LINE_LAST,
  lV_SCOPE_FIRST,
  LV_SCOPE,
  LV_SCOPE_AGGREGATE,
  LV_SCOPE_ALIAS,
  LV_SCOPE_ARRAY,
  LV_SCOPE_COMPILE_UNIT,
  LV_SCOPE_ENUMERATION,
  LV_SCOPE_FORMAL_PACK,
  LV_SCOPE_FUNCTION,
  LV_SCOPE_FUNCTION_INLINED,
  LV_SCOPE_FUNCTION_TYPE,
  LV_SCOPE_NAMESPACE,
  LV_SCOPE_ROOT,
  LV_SCOPE_TEMPLATE_PACK,
  LV_SCOPE_LAST,
  LV_SYMBOL_FIRST,
  LV_SYMBOL,
  LV_SYMBOL_LAST,
  LV_TYPE_FIRST,
  LV_TYPE,
  LV_TYPE_DEFINITION,
  LV_TYPE_ENUMERATOR,
  LV_TYPE_IMPORT,
  LV_TYPE_PARAM,
  LV_TYPE_SUBRANGE,
  LV_TYPE_LAST
};

enum class LVElementKind { Discarded, Global, Optimized, LastEntry };
using LVElementKindSet = std::set<LVElementKind>;
using LVElementDispatch = std::map<LVElementKind, LVElementGetFunction>;
using LVElementRequest = std::vector<LVElementGetFunction>;

class LVElement : public LVObject {
  enum class Property {
    IsLine,   // A logical line.
    IsScope,  // A logical scope.
    IsSymbol, // A logical symbol.
    IsType,   // A logical type.
    IsEnumClass,
    IsExternal,
    HasType,
    HasAugmentedName,
    IsTypedefReduced,
    IsArrayResolved,
    IsMemberPointerResolved,
    IsTemplateResolved,
    IsInlined,
    IsInlinedAbstract,
    InvalidFilename,
    HasReference,
    HasReferenceAbstract,
    HasReferenceExtension,
    HasReferenceSpecification,
    QualifiedResolved,
    IncludeInPrint,
    IsStatic,
    TransformName,
    IsScoped,        // CodeView local type.
    IsNested,        // CodeView nested type.
    IsScopedAlready, // CodeView nested type inserted in correct scope.
    IsArtificial,
    IsReferencedType,
    IsSystem,
    OffsetFromTypeIndex,
    IsAnonymous,
    LastEntry
  };
  // Typed bitvector with properties for this element.
  LVProperties<Property> Properties;
  static LVElementDispatch Dispatch;

  /// RTTI.
  const LVSubclassID SubclassID;

  // Indexes in the String Pool.
  size_t NameIndex = 0;
  size_t QualifiedNameIndex = 0;
  size_t FilenameIndex = 0;

  uint16_t AccessibilityCode : 2; // DW_AT_accessibility.
  uint16_t InlineCode : 2;        // DW_AT_inline.
  uint16_t VirtualityCode : 2;    // DW_AT_virtuality.

  // The given Specification points to an element that is connected via the
  // DW_AT_specification, DW_AT_abstract_origin or DW_AT_extension attribute.
  void setFileLine(LVElement *Specification);

  // Get the qualified name that include its parents name.
  void resolveQualifiedName();

protected:
  // Type of this element.
  LVElement *ElementType = nullptr;

  // Print the FileName Index.
  void printFileIndex(raw_ostream &OS, bool Full = true) const override;

public:
  LVElement(LVSubclassID ID)
      : LVObject(), SubclassID(ID), AccessibilityCode(0), InlineCode(0),
        VirtualityCode(0) {}
  LVElement(const LVElement &) = delete;
  LVElement &operator=(const LVElement &) = delete;
  virtual ~LVElement() = default;

  LVSubclassID getSubclassID() const { return SubclassID; }

  PROPERTY(Property, IsLine);
  PROPERTY(Property, IsScope);
  PROPERTY(Property, IsSymbol);
  PROPERTY(Property, IsType);
  PROPERTY(Property, IsEnumClass);
  PROPERTY(Property, IsExternal);
  PROPERTY(Property, HasType);
  PROPERTY(Property, HasAugmentedName);
  PROPERTY(Property, IsTypedefReduced);
  PROPERTY(Property, IsArrayResolved);
  PROPERTY(Property, IsMemberPointerResolved);
  PROPERTY(Property, IsTemplateResolved);
  PROPERTY(Property, IsInlined);
  PROPERTY(Property, IsInlinedAbstract);
  PROPERTY(Property, InvalidFilename);
  PROPERTY(Property, HasReference);
  PROPERTY(Property, HasReferenceAbstract);
  PROPERTY(Property, HasReferenceExtension);
  PROPERTY(Property, HasReferenceSpecification);
  PROPERTY(Property, QualifiedResolved);
  PROPERTY(Property, IncludeInPrint);
  PROPERTY(Property, IsStatic);
  PROPERTY(Property, TransformName);
  PROPERTY(Property, IsScoped);
  PROPERTY(Property, IsNested);
  PROPERTY(Property, IsScopedAlready);
  PROPERTY(Property, IsArtificial);
  PROPERTY(Property, IsReferencedType);
  PROPERTY(Property, IsSystem);
  PROPERTY(Property, OffsetFromTypeIndex);
  PROPERTY(Property, IsAnonymous);

  bool isNamed() const override { return NameIndex != 0; }
  bool isTyped() const override { return ElementType != nullptr; }
  bool isFiled() const override { return FilenameIndex != 0; }

  // The Element class type can point to a Type or Scope.
  bool getIsKindType() const { return ElementType && ElementType->getIsType(); }
  bool getIsKindScope() const {
    return ElementType && ElementType->getIsScope();
  }

  StringRef getName() const override {
    return getStringPool().getString(NameIndex);
  }
  void setName(StringRef ElementName) override;

  // Get pathname associated with the Element.
  StringRef getPathname() const {
    return getStringPool().getString(getFilenameIndex());
  }

  // Set filename associated with the Element.
  void setFilename(StringRef Filename);

  // Set the Element qualified name.
  void setQualifiedName(StringRef Name) {
    QualifiedNameIndex = getStringPool().getIndex(Name);
  }
  StringRef getQualifiedName() const {
    return getStringPool().getString(QualifiedNameIndex);
  }

  size_t getNameIndex() const { return NameIndex; }
  size_t getQualifiedNameIndex() const { return QualifiedNameIndex; }

  void setInnerComponent() { setInnerComponent(getName()); }
  void setInnerComponent(StringRef Name);

  // Element type name.
  StringRef getTypeName() const;

  virtual StringRef getProducer() const { return StringRef(); }
  virtual void setProducer(StringRef ProducerName) {}

  virtual bool isCompileUnit() const { return false; }
  virtual bool isRoot() const { return false; }

  virtual void setReference(LVElement *Element) {}
  virtual void setReference(LVScope *Scope) {}
  virtual void setReference(LVSymbol *Symbol) {}
  virtual void setReference(LVType *Type) {}

  virtual void setLinkageName(StringRef LinkageName) {}
  virtual StringRef getLinkageName() const { return StringRef(); }
  virtual size_t getLinkageNameIndex() const { return 0; }

  virtual uint32_t getCallLineNumber() const { return 0; }
  virtual void setCallLineNumber(uint32_t Number) {}
  virtual size_t getCallFilenameIndex() const { return 0; }
  virtual void setCallFilenameIndex(size_t Index) {}
  size_t getFilenameIndex() const { return FilenameIndex; }
  void setFilenameIndex(size_t Index) { FilenameIndex = Index; }

  // Set the File location for the Element.
  void setFile(LVElement *Reference = nullptr);

  virtual bool isBase() const { return false; }
  virtual bool isTemplateParam() const { return false; }

  virtual uint32_t getBitSize() const { return 0; }
  virtual void setBitSize(uint32_t Size) {}

  virtual int64_t getCount() const { return 0; }
  virtual void setCount(int64_t Value) {}
  virtual int64_t getLowerBound() const { return 0; }
  virtual void setLowerBound(int64_t Value) {}
  virtual int64_t getUpperBound() const { return 0; }
  virtual void setUpperBound(int64_t Value) {}
  virtual std::pair<unsigned, unsigned> getBounds() const { return {}; }
  virtual void setBounds(unsigned Lower, unsigned Upper) {}

  // Access DW_AT_GNU_discriminator attribute.
  virtual uint32_t getDiscriminator() const { return 0; }
  virtual void setDiscriminator(uint32_t Value) {}

  // Process the values for a DW_TAG_enumerator.
  virtual StringRef getValue() const { return {}; }
  virtual void setValue(StringRef Value) {}
  virtual size_t getValueIndex() const { return 0; }

  // DWARF Accessibility Codes.
  uint32_t getAccessibilityCode() const { return AccessibilityCode; }
  void setAccessibilityCode(uint32_t Access) { AccessibilityCode = Access; }
  StringRef
  accessibilityString(uint32_t Access = dwarf::DW_ACCESS_private) const;

  // CodeView Accessibility Codes.
  std::optional<uint32_t> getAccessibilityCode(codeview::MemberAccess Access);
  void setAccessibilityCode(codeview::MemberAccess Access) {
    if (std::optional<uint32_t> Code = getAccessibilityCode(Access))
      AccessibilityCode = Code.value();
  }

  // DWARF Inline Codes.
  uint32_t getInlineCode() const { return InlineCode; }
  void setInlineCode(uint32_t Code) { InlineCode = Code; }
  StringRef inlineCodeString(uint32_t Code) const;

  // DWARF Virtuality Codes.
  uint32_t getVirtualityCode() const { return VirtualityCode; }
  void setVirtualityCode(uint32_t Virtuality) { VirtualityCode = Virtuality; }
  StringRef
  virtualityString(uint32_t Virtuality = dwarf::DW_VIRTUALITY_none) const;

  // CodeView Virtuality Codes.
  std::optional<uint32_t> getVirtualityCode(codeview::MethodKind Virtuality);
  void setVirtualityCode(codeview::MethodKind Virtuality) {
    if (std::optional<uint32_t> Code = getVirtualityCode(Virtuality))
      VirtualityCode = Code.value();
  }

  // DWARF Extern Codes.
  StringRef externalString() const;

  LVElement *getType() const { return ElementType; }
  LVType *getTypeAsType() const;
  LVScope *getTypeAsScope() const;

  void setType(LVElement *Element = nullptr) {
    ElementType = Element;
    if (Element) {
      setHasType();
      Element->setIsReferencedType();
    }
  }

  // Set the type for the element, handling template parameters.
  void setGenericType(LVElement *Element);

  StringRef getTypeQualifiedName() const {
    return ElementType ? ElementType->getQualifiedName() : "";
  }

  StringRef typeAsString() const;
  std::string typeOffsetAsString() const;
  std::string discriminatorAsString() const;

  LVScope *traverseParents(LVScopeGetFunction GetFunction) const;

  LVScope *getFunctionParent() const;
  virtual LVScope *getCompileUnitParent() const;

  // Print any referenced element.
  void printReference(raw_ostream &OS, bool Full, LVElement *Parent) const;

  // Print the linkage name (Symbols and functions).
  void printLinkageName(raw_ostream &OS, bool Full, LVElement *Parent,
                        LVScope *Scope) const;
  void printLinkageName(raw_ostream &OS, bool Full, LVElement *Parent) const;

  // Generate the full name for the Element.
  void resolveFullname(LVElement *BaseType, StringRef Name = emptyString());

  // Generate a name for unnamed elements.
  void generateName(std::string &Prefix) const;
  void generateName();

  virtual bool removeElement(LVElement *Element) { return false; }
  virtual void updateLevel(LVScope *Parent, bool Moved = false);

  // During the parsing of the debug information, the logical elements are
  // created with information extracted from its description entries (DIE).
  // But they are not complete for the logical view concept. A second pass
  // is executed in order to collect their additional information.
  // The following functions 'resolve' some of their properties, such as
  // name, references, parents, extra information based on the element kind.
  virtual void resolve();
  virtual void resolveExtra() {}
  virtual void resolveName();
  virtual void resolveReferences() {}
  void resolveParents();

  bool referenceMatch(const LVElement *Element) const;

  // Returns true if current element is logically equal to the given 'Element'.
  bool equals(const LVElement *Element) const;

  // Report the current element as missing or added during comparison.
  virtual void report(LVComparePass Pass) {}

  static LVElementDispatch &getDispatch() { return Dispatch; }
};

} // end namespace logicalview
} // end namespace llvm

#endif // LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVELEMENT_H
PKhwFZ�p��w�w$DebugInfo/LogicalView/Core/LVScope.hnu�[���//===-- LVScope.h -----------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the LVScope class, which is used to describe a debug
// information scope.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVSCOPE_H
#define LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVSCOPE_H

#include "llvm/DebugInfo/LogicalView/Core/LVElement.h"
#include "llvm/DebugInfo/LogicalView/Core/LVLocation.h"
#include "llvm/DebugInfo/LogicalView/Core/LVSort.h"
#include "llvm/Object/ObjectFile.h"
#include <list>
#include <map>
#include <set>

namespace llvm {
namespace logicalview {

// Name address, Code size.
using LVNameInfo = std::pair<LVAddress, uint64_t>;
using LVPublicNames = std::map<LVScope *, LVNameInfo>;
using LVPublicAddresses = std::map<LVAddress, LVNameInfo>;

class LVRange;

enum class LVScopeKind {
  IsAggregate,
  IsArray,
  IsBlock,
  IsCallSite,
  IsCatchBlock,
  IsClass,
  IsCompileUnit,
  IsEntryPoint,
  IsEnumeration,
  IsFunction,
  IsFunctionType,
  IsInlinedFunction,
  IsLabel,
  IsLexicalBlock,
  IsMember,
  IsNamespace,
  IsRoot,
  IsStructure,
  IsSubprogram,
  IsTemplate,
  IsTemplateAlias,
  IsTemplatePack,
  IsTryBlock,
  IsUnion,
  LastEntry
};
using LVScopeKindSet = std::set<LVScopeKind>;
using LVScopeDispatch = std::map<LVScopeKind, LVScopeGetFunction>;
using LVScopeRequest = std::vector<LVScopeGetFunction>;

using LVOffsetElementMap = std::map<LVOffset, LVElement *>;
using LVOffsetLinesMap = std::map<LVOffset, LVLines>;
using LVOffsetLocationsMap = std::map<LVOffset, LVLocations>;
using LVOffsetSymbolMap = std::map<LVOffset, LVSymbol *>;
using LVTagOffsetsMap = std::map<dwarf::Tag, LVOffsets>;

// Class to represent a DWARF Scope.
class LVScope : public LVElement {
  enum class Property {
    HasDiscriminator,
    CanHaveRanges,
    CanHaveLines,
    HasGlobals,
    HasLocals,
    HasLines,
    HasScopes,
    HasSymbols,
    HasTypes,
    IsComdat,
    HasComdatScopes, // Compile Unit has comdat functions.
    HasRanges,
    AddedMissing, // Added missing referenced symbols.
    LastEntry
  };

  // Typed bitvector with kinds and properties for this scope.
  LVProperties<LVScopeKind> Kinds;
  LVProperties<Property> Properties;
  static LVScopeDispatch Dispatch;

  // Coverage factor in units (bytes).
  unsigned CoverageFactor = 0;

  // Calculate coverage factor.
  void calculateCoverage() {
    float CoveragePercentage = 0;
    LVLocation::calculateCoverage(Ranges.get(), CoverageFactor,
                                  CoveragePercentage);
  }

  // Decide if the scope will be printed, using some conditions given by:
  // only-globals, only-locals, a-pattern.
  bool resolvePrinting() const;

  // Find the current scope in the given 'Targets'.
  LVScope *findIn(const LVScopes *Targets) const;

  // Traverse the scope parent tree, executing the given callback function
  // on each scope.
  void traverseParents(LVScopeGetFunction GetFunction,
                       LVScopeSetFunction SetFunction);

protected:
  // Types, Symbols, Scopes, Lines, Locations in this scope.
  std::unique_ptr<LVTypes> Types;
  std::unique_ptr<LVSymbols> Symbols;
  std::unique_ptr<LVScopes> Scopes;
  std::unique_ptr<LVLines> Lines;
  std::unique_ptr<LVLocations> Ranges;

  // Vector of elements (types, scopes and symbols).
  // It is the union of (*Types, *Symbols and *Scopes) to be used for
  // the following reasons:
  // - Preserve the order the logical elements are read in.
  // - To have a single container with all the logical elements, when
  //   the traversal does not require any specific element kind.
  std::unique_ptr<LVElements> Children;

  // Resolve the template parameters/arguments relationship.
  void resolveTemplate();
  void printEncodedArgs(raw_ostream &OS, bool Full) const;

  void printActiveRanges(raw_ostream &OS, bool Full = true) const;
  virtual void printSizes(raw_ostream &OS) const {}
  virtual void printSummary(raw_ostream &OS) const {}

  // Encoded template arguments.
  virtual StringRef getEncodedArgs() const { return StringRef(); }
  virtual void setEncodedArgs(StringRef EncodedArgs) {}

public:
  LVScope() : LVElement(LVSubclassID::LV_SCOPE) {
    setIsScope();
    setIncludeInPrint();
  }
  LVScope(const LVScope &) = delete;
  LVScope &operator=(const LVScope &) = delete;
  virtual ~LVScope() = default;

  static bool classof(const LVElement *Element) {
    return Element->getSubclassID() == LVSubclassID::LV_SCOPE;
  }

  KIND(LVScopeKind, IsAggregate);
  KIND(LVScopeKind, IsArray);
  KIND_2(LVScopeKind, IsBlock, CanHaveRanges, CanHaveLines);
  KIND_1(LVScopeKind, IsCallSite, IsFunction);
  KIND_1(LVScopeKind, IsCatchBlock, IsBlock);
  KIND_1(LVScopeKind, IsClass, IsAggregate);
  KIND_3(LVScopeKind, IsCompileUnit, CanHaveRanges, CanHaveLines,
         TransformName);
  KIND_1(LVScopeKind, IsEntryPoint, IsFunction);
  KIND(LVScopeKind, IsEnumeration);
  KIND_2(LVScopeKind, IsFunction, CanHaveRanges, CanHaveLines);
  KIND_1(LVScopeKind, IsFunctionType, IsFunction);
  KIND_2(LVScopeKind, IsInlinedFunction, IsFunction, IsInlined);
  KIND_1(LVScopeKind, IsLabel, IsFunction);
  KIND_1(LVScopeKind, IsLexicalBlock, IsBlock);
  KIND(LVScopeKind, IsMember);
  KIND(LVScopeKind, IsNamespace);
  KIND_1(LVScopeKind, IsRoot, TransformName);
  KIND_1(LVScopeKind, IsStructure, IsAggregate);
  KIND_1(LVScopeKind, IsSubprogram, IsFunction);
  KIND(LVScopeKind, IsTemplate);
  KIND(LVScopeKind, IsTemplateAlias);
  KIND(LVScopeKind, IsTemplatePack);
  KIND_1(LVScopeKind, IsTryBlock, IsBlock);
  KIND_1(LVScopeKind, IsUnion, IsAggregate);

  PROPERTY(Property, HasDiscriminator);
  PROPERTY(Property, CanHaveRanges);
  PROPERTY(Property, CanHaveLines);
  PROPERTY(Property, HasGlobals);
  PROPERTY(Property, HasLocals);
  PROPERTY(Property, HasLines);
  PROPERTY(Property, HasScopes);
  PROPERTY(Property, HasSymbols);
  PROPERTY(Property, HasTypes);
  PROPERTY(Property, IsComdat);
  PROPERTY(Property, HasComdatScopes);
  PROPERTY(Property, HasRanges);
  PROPERTY(Property, AddedMissing);

  bool isCompileUnit() const override { return getIsCompileUnit(); }
  bool isRoot() const override { return getIsRoot(); }

  const char *kind() const override;

  // Get the specific children.
  const LVLines *getLines() const { return Lines.get(); }
  const LVLocations *getRanges() const { return Ranges.get(); }
  const LVScopes *getScopes() const { return Scopes.get(); }
  const LVSymbols *getSymbols() const { return Symbols.get(); }
  const LVTypes *getTypes() const { return Types.get(); }
  const LVElements *getChildren() const { return Children.get(); }

  void addElement(LVElement *Element);
  void addElement(LVLine *Line);
  void addElement(LVScope *Scope);
  void addElement(LVSymbol *Symbol);
  void addElement(LVType *Type);
  void addObject(LVLocation *Location);
  void addObject(LVAddress LowerAddress, LVAddress UpperAddress);
  void addToChildren(LVElement *Element);

  // Add the missing elements from the given 'Reference', which is the
  // scope associated with any DW_AT_specification, DW_AT_abstract_origin.
  void addMissingElements(LVScope *Reference);

  // Traverse the scope parent tree and the children, executing the given
  // callback function on each element.
  void traverseParentsAndChildren(LVObjectGetFunction GetFunction,
                                  LVObjectSetFunction SetFunction);

  // Get the size of specific children.
  size_t lineCount() const { return Lines ? Lines->size() : 0; }
  size_t rangeCount() const { return Ranges ? Ranges->size() : 0; }
  size_t scopeCount() const { return Scopes ? Scopes->size() : 0; }
  size_t symbolCount() const { return Symbols ? Symbols->size() : 0; }
  size_t typeCount() const { return Types ? Types->size() : 0; }

  // Find containing parent for the given address.
  LVScope *outermostParent(LVAddress Address);

  // Get all the locations associated with symbols.
  void getLocations(LVLocations &LocationList, LVValidLocation ValidLocation,
                    bool RecordInvalid = false);
  void getRanges(LVLocations &LocationList, LVValidLocation ValidLocation,
                 bool RecordInvalid = false);
  void getRanges(LVRange &RangeList);

  unsigned getCoverageFactor() const { return CoverageFactor; }

  Error doPrint(bool Split, bool Match, bool Print, raw_ostream &OS,
                bool Full = true) const override;
  // Sort the logical elements using the criteria specified by the
  // command line option '--output-sort'.
  void sort();

  // Get template parameter types.
  bool getTemplateParameterTypes(LVTypes &Params);

  // DW_AT_specification, DW_AT_abstract_origin, DW_AT_extension.
  virtual LVScope *getReference() const { return nullptr; }

  LVScope *getCompileUnitParent() const override {
    return LVElement::getCompileUnitParent();
  }

  // Follow a chain of references given by DW_AT_abstract_origin and/or
  // DW_AT_specification and update the scope name.
  StringRef resolveReferencesChain();

  bool removeElement(LVElement *Element) override;
  void updateLevel(LVScope *Parent, bool Moved) override;

  void resolve() override;
  void resolveName() override;
  void resolveReferences() override;

  // Return the chain of parents as a string.
  void getQualifiedName(std::string &QualifiedName) const;
  // Encode the template arguments.
  void encodeTemplateArguments(std::string &Name) const;
  void encodeTemplateArguments(std::string &Name, const LVTypes *Types) const;

  void resolveElements();

  // Iterate through the 'References' set and check that all its elements
  // are present in the 'Targets' set. For a missing element, mark its
  // parents as missing.
  static void markMissingParents(const LVScopes *References,
                                 const LVScopes *Targets,
                                 bool TraverseChildren);

  // Checks if the current scope is contained within the target scope.
  // Depending on the result, the callback may be performed.
  virtual void markMissingParents(const LVScope *Target, bool TraverseChildren);

  // Returns true if the current scope and the given 'Scope' have the
  // same number of children.
  virtual bool equalNumberOfChildren(const LVScope *Scope) const;

  // Returns true if current scope is logically equal to the given 'Scope'.
  virtual bool equals(const LVScope *Scope) const;

  // Returns true if the given 'References' are logically equal to the
  // given 'Targets'.
  static bool equals(const LVScopes *References, const LVScopes *Targets);

  // For the given 'Scopes' returns a scope that is logically equal
  // to the current scope; otherwise 'nullptr'.
  virtual LVScope *findEqualScope(const LVScopes *Scopes) const;

  // Report the current scope as missing or added during comparison.
  void report(LVComparePass Pass) override;

  static LVScopeDispatch &getDispatch() { return Dispatch; }

  void print(raw_ostream &OS, bool Full = true) const override;
  void printExtra(raw_ostream &OS, bool Full = true) const override;
  virtual void printWarnings(raw_ostream &OS, bool Full = true) const {}
  virtual void printMatchedElements(raw_ostream &OS, bool UseMatchedElements) {}

#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
  void dump() const override { print(dbgs()); }
#endif
};

// Class to represent a DWARF Union/Structure/Class.
class LVScopeAggregate final : public LVScope {
  LVScope *Reference = nullptr; // DW_AT_specification, DW_AT_abstract_origin.
  size_t EncodedArgsIndex = 0;  // Template encoded arguments.

public:
  LVScopeAggregate() : LVScope() {}
  LVScopeAggregate(const LVScopeAggregate &) = delete;
  LVScopeAggregate &operator=(const LVScopeAggregate &) = delete;
  ~LVScopeAggregate() = default;

  // DW_AT_specification, DW_AT_abstract_origin.
  LVScope *getReference() const override { return Reference; }
  void setReference(LVScope *Scope) override {
    Reference = Scope;
    setHasReference();
  }
  void setReference(LVElement *Element) override {
    setReference(static_cast<LVScope *>(Element));
  }

  StringRef getEncodedArgs() const override {
    return getStringPool().getString(EncodedArgsIndex);
  }
  void setEncodedArgs(StringRef EncodedArgs) override {
    EncodedArgsIndex = getStringPool().getIndex(EncodedArgs);
  }

  // Returns true if current scope is logically equal to the given 'Scope'.
  bool equals(const LVScope *Scope) const override;

  // For the given 'Scopes' returns a scope that is logically equal
  // to the current scope; otherwise 'nullptr'.
  LVScope *findEqualScope(const LVScopes *Scopes) const override;

  void printExtra(raw_ostream &OS, bool Full = true) const override;
};

// Class to represent a DWARF Template alias.
class LVScopeAlias final : public LVScope {
public:
  LVScopeAlias() : LVScope() {
    setIsTemplateAlias();
    setIsTemplate();
  }
  LVScopeAlias(const LVScopeAlias &) = delete;
  LVScopeAlias &operator=(const LVScopeAlias &) = delete;
  ~LVScopeAlias() = default;

  // Returns true if current scope is logically equal to the given 'Scope'.
  bool equals(const LVScope *Scope) const override;

  void printExtra(raw_ostream &OS, bool Full = true) const override;
};

// Class to represent a DWARF array (DW_TAG_array_type).
class LVScopeArray final : public LVScope {
public:
  LVScopeArray() : LVScope() { setIsArray(); }
  LVScopeArray(const LVScopeArray &) = delete;
  LVScopeArray &operator=(const LVScopeArray &) = delete;
  ~LVScopeArray() = default;

  void resolveExtra() override;

  // Returns true if current scope is logically equal to the given 'Scope'.
  bool equals(const LVScope *Scope) const override;

  void printExtra(raw_ostream &OS, bool Full = true) const override;
};

// Class to represent a DWARF Compilation Unit (CU).
class LVScopeCompileUnit final : public LVScope {
  // Names (files and directories) used by the Compile Unit.
  std::vector<size_t> Filenames;

  // As the .debug_pubnames section has been removed in DWARF5, we have a
  // similar functionality, which is used by the decoded functions. We use
  // the low-pc and high-pc for those scopes that are marked as public, in
  // order to support DWARF and CodeView.
  LVPublicNames PublicNames;

  // Toolchain producer.
  size_t ProducerIndex = 0;

  // Compilation directory name.
  size_t CompilationDirectoryIndex = 0;

  // Used by the CodeView Reader.
  codeview::CPUType CompilationCPUType = codeview::CPUType::X64;

  // Keep record of elements. They are needed at the compilation unit level
  // to print the summary at the end of the printing.
  LVCounter Allocated;
  LVCounter Found;
  LVCounter Printed;

  // Elements that match a given command line pattern.
  LVElements MatchedElements;
  LVScopes MatchedScopes;

  // It records the mapping between logical lines representing a debug line
  // entry and its address in the text section. It is used to find a line
  // giving its exact or closest address. To support comdat functions, all
  // addresses for the same section are recorded in the same map.
  using LVAddressToLine = std::map<LVAddress, LVLine *>;
  LVDoubleMap<LVSectionIndex, LVAddress, LVLine *> SectionMappings;

  // DWARF Tags (Tag, Element list).
  LVTagOffsetsMap DebugTags;

  // Offsets associated with objects being flagged as having invalid data
  // (ranges, locations, lines zero or coverages).
  LVOffsetElementMap WarningOffsets;

  // Symbols with invalid locations. (Symbol, Location List).
  LVOffsetLocationsMap InvalidLocations;

  // Symbols with invalid coverage values.
  LVOffsetSymbolMap InvalidCoverages;

  // Scopes with invalid ranges (Scope, Range list).
  LVOffsetLocationsMap InvalidRanges;

  // Scopes with lines zero (Scope, Line list).
  LVOffsetLinesMap LinesZero;

  // Record scopes contribution in bytes to the debug information.
  using LVSizesMap = std::map<const LVScope *, LVOffset>;
  LVSizesMap Sizes;
  LVOffset CUContributionSize = 0;

  // Helper function to add an invalid location/range.
  void addInvalidLocationOrRange(LVLocation *Location, LVElement *Element,
                                 LVOffsetLocationsMap *Map) {
    LVOffset Offset = Element->getOffset();
    addInvalidOffset(Offset, Element);
    addItem<LVOffsetLocationsMap, LVOffset, LVLocation *>(Map, Offset,
                                                          Location);
  }

  // Record scope sizes indexed by lexical level.
  // Setting an initial size that will cover a very deep nested scopes.
  const size_t TotalInitialSize = 8;
  using LVTotalsEntry = std::pair<unsigned, float>;
  SmallVector<LVTotalsEntry> Totals;
  // Maximum seen lexical level. It is used to control how many entries
  // in the 'Totals' vector are valid values.
  LVLevel MaxSeenLevel = 0;

  // Get the line located at the given address.
  LVLine *lineLowerBound(LVAddress Address, LVScope *Scope) const;
  LVLine *lineUpperBound(LVAddress Address, LVScope *Scope) const;

  void printScopeSize(const LVScope *Scope, raw_ostream &OS);
  void printScopeSize(const LVScope *Scope, raw_ostream &OS) const {
    (const_cast<LVScopeCompileUnit *>(this))->printScopeSize(Scope, OS);
  }
  void printTotals(raw_ostream &OS) const;

protected:
  void printSizes(raw_ostream &OS) const override;
  void printSummary(raw_ostream &OS) const override;

public:
  LVScopeCompileUnit() : LVScope(), Totals(TotalInitialSize, {0, 0.0}) {
    setIsCompileUnit();
  }
  LVScopeCompileUnit(const LVScopeCompileUnit &) = delete;
  LVScopeCompileUnit &operator=(const LVScopeCompileUnit &) = delete;
  ~LVScopeCompileUnit() = default;

  LVScope *getCompileUnitParent() const override {
    return static_cast<LVScope *>(const_cast<LVScopeCompileUnit *>(this));
  }

  // Add line to address mapping.
  void addMapping(LVLine *Line, LVSectionIndex SectionIndex);
  LVLineRange lineRange(LVLocation *Location) const;

  LVNameInfo NameNone = {UINT64_MAX, 0};
  void addPublicName(LVScope *Scope, LVAddress LowPC, LVAddress HighPC) {
    PublicNames.emplace(std::piecewise_construct, std::forward_as_tuple(Scope),
                        std::forward_as_tuple(LowPC, HighPC - LowPC));
  }
  const LVNameInfo &findPublicName(LVScope *Scope) {
    LVPublicNames::iterator Iter = PublicNames.find(Scope);
    return (Iter != PublicNames.end()) ? Iter->second : NameNone;
  }
  const LVPublicNames &getPublicNames() const { return PublicNames; }

  // The base address of the scope for any of the debugging information
  // entries listed, is given by either the DW_AT_low_pc attribute or the
  // first address in the first range entry in the list of ranges given by
  // the DW_AT_ranges attribute.
  LVAddress getBaseAddress() const {
    return Ranges ? Ranges->front()->getLowerAddress() : 0;
  }

  StringRef getCompilationDirectory() const {
    return getStringPool().getString(CompilationDirectoryIndex);
  }
  void setCompilationDirectory(StringRef CompilationDirectory) {
    CompilationDirectoryIndex = getStringPool().getIndex(CompilationDirectory);
  }

  StringRef getFilename(size_t Index) const;
  void addFilename(StringRef Name) {
    Filenames.push_back(getStringPool().getIndex(Name));
  }

  StringRef getProducer() const override {
    return getStringPool().getString(ProducerIndex);
  }
  void setProducer(StringRef ProducerName) override {
    ProducerIndex = getStringPool().getIndex(ProducerName);
  }

  void setCPUType(codeview::CPUType Type) { CompilationCPUType = Type; }
  codeview::CPUType getCPUType() { return CompilationCPUType; }

  // Record DWARF tags.
  void addDebugTag(dwarf::Tag Target, LVOffset Offset);
  // Record elements with invalid offsets.
  void addInvalidOffset(LVOffset Offset, LVElement *Element);
  // Record symbols with invalid coverage values.
  void addInvalidCoverage(LVSymbol *Symbol);
  // Record symbols with invalid locations.
  void addInvalidLocation(LVLocation *Location);
  // Record scopes with invalid ranges.
  void addInvalidRange(LVLocation *Location);
  // Record line zero.
  void addLineZero(LVLine *Line);

  const LVTagOffsetsMap &getDebugTags() const { return DebugTags; }
  const LVOffsetElementMap &getWarningOffsets() const { return WarningOffsets; }
  const LVOffsetLocationsMap &getInvalidLocations() const {
    return InvalidLocations;
  }
  const LVOffsetSymbolMap &getInvalidCoverages() const {
    return InvalidCoverages;
  }
  const LVOffsetLocationsMap &getInvalidRanges() const { return InvalidRanges; }
  const LVOffsetLinesMap &getLinesZero() const { return LinesZero; }

  // Process ranges, locations and calculate coverage.
  void processRangeLocationCoverage(
      LVValidLocation ValidLocation = &LVLocation::validateRanges);

  // Add matched element.
  void addMatched(LVElement *Element) { MatchedElements.push_back(Element); }
  void addMatched(LVScope *Scope) { MatchedScopes.push_back(Scope); }
  void propagatePatternMatch();

  const LVElements &getMatchedElements() const { return MatchedElements; }
  const LVScopes &getMatchedScopes() const { return MatchedScopes; }

  void printLocalNames(raw_ostream &OS, bool Full = true) const;
  void printSummary(raw_ostream &OS, const LVCounter &Counter,
                    const char *Header) const;

  void incrementPrintedLines();
  void incrementPrintedScopes();
  void incrementPrintedSymbols();
  void incrementPrintedTypes();

  // Values are used by '--summary' option (allocated).
  void increment(LVLine *Line);
  void increment(LVScope *Scope);
  void increment(LVSymbol *Symbol);
  void increment(LVType *Type);

  // A new element has been added to the scopes tree. Take the following steps:
  // Increase the added element counters, for printing summary.
  // During comparison notify the Reader of the new element.
  void addedElement(LVLine *Line);
  void addedElement(LVScope *Scope);
  void addedElement(LVSymbol *Symbol);
  void addedElement(LVType *Type);

  void addSize(LVScope *Scope, LVOffset Lower, LVOffset Upper);

  // Returns true if current scope is logically equal to the given 'Scope'.
  bool equals(const LVScope *Scope) const override;

  void print(raw_ostream &OS, bool Full = true) const override;
  void printExtra(raw_ostream &OS, bool Full = true) const override;
  void printWarnings(raw_ostream &OS, bool Full = true) const override;
  void printMatchedElements(raw_ostream &OS, bool UseMatchedElements) override;
};

// Class to represent a DWARF enumerator (DW_TAG_enumeration_type).
class LVScopeEnumeration final : public LVScope {
public:
  LVScopeEnumeration() : LVScope() { setIsEnumeration(); }
  LVScopeEnumeration(const LVScopeEnumeration &) = delete;
  LVScopeEnumeration &operator=(const LVScopeEnumeration &) = delete;
  ~LVScopeEnumeration() = default;

  // Returns true if current scope is logically equal to the given 'Scope'.
  bool equals(const LVScope *Scope) const override;

  void printExtra(raw_ostream &OS, bool Full = true) const override;
};

// Class to represent a DWARF formal parameter pack
// (DW_TAG_GNU_formal_parameter_pack).
class LVScopeFormalPack final : public LVScope {
public:
  LVScopeFormalPack() : LVScope() { setIsTemplatePack(); }
  LVScopeFormalPack(const LVScopeFormalPack &) = delete;
  LVScopeFormalPack &operator=(const LVScopeFormalPack &) = delete;
  ~LVScopeFormalPack() = default;

  // Returns true if current scope is logically equal to the given 'Scope'.
  bool equals(const LVScope *Scope) const override;

  void printExtra(raw_ostream &OS, bool Full = true) const override;
};

// Class to represent a DWARF Function.
class LVScopeFunction : public LVScope {
  LVScope *Reference = nullptr; // DW_AT_specification, DW_AT_abstract_origin.
  size_t LinkageNameIndex = 0;  // Function DW_AT_linkage_name attribute.
  size_t EncodedArgsIndex = 0;  // Template encoded arguments.

public:
  LVScopeFunction() : LVScope() {}
  LVScopeFunction(const LVScopeFunction &) = delete;
  LVScopeFunction &operator=(const LVScopeFunction &) = delete;
  virtual ~LVScopeFunction() = default;

  // DW_AT_specification, DW_AT_abstract_origin.
  LVScope *getReference() const override { return Reference; }
  void setReference(LVScope *Scope) override {
    Reference = Scope;
    setHasReference();
  }
  void setReference(LVElement *Element) override {
    setReference(static_cast<LVScope *>(Element));
  }

  StringRef getEncodedArgs() const override {
    return getStringPool().getString(EncodedArgsIndex);
  }
  void setEncodedArgs(StringRef EncodedArgs) override {
    EncodedArgsIndex = getStringPool().getIndex(EncodedArgs);
  }

  void setLinkageName(StringRef LinkageName) override {
    LinkageNameIndex = getStringPool().getIndex(LinkageName);
  }
  StringRef getLinkageName() const override {
    return getStringPool().getString(LinkageNameIndex);
  }
  size_t getLinkageNameIndex() const override { return LinkageNameIndex; }

  void setName(StringRef ObjectName) override;

  void resolveExtra() override;
  void resolveReferences() override;

  // Returns true if current scope is logically equal to the given 'Scope'.
  bool equals(const LVScope *Scope) const override;

  // For the given 'Scopes' returns a scope that is logically equal
  // to the current scope; otherwise 'nullptr'.
  LVScope *findEqualScope(const LVScopes *Scopes) const override;

  void printExtra(raw_ostream &OS, bool Full = true) const override;
};

// Class to represent a DWARF inlined function.
class LVScopeFunctionInlined final : public LVScopeFunction {
  size_t CallFilenameIndex = 0;
  uint32_t CallLineNumber = 0;
  uint32_t Discriminator = 0;

public:
  LVScopeFunctionInlined() : LVScopeFunction() { setIsInlinedFunction(); }
  LVScopeFunctionInlined(const LVScopeFunctionInlined &) = delete;
  LVScopeFunctionInlined &operator=(const LVScopeFunctionInlined &) = delete;
  ~LVScopeFunctionInlined() = default;

  uint32_t getDiscriminator() const override { return Discriminator; }
  void setDiscriminator(uint32_t Value) override {
    Discriminator = Value;
    setHasDiscriminator();
  }

  uint32_t getCallLineNumber() const override { return CallLineNumber; }
  void setCallLineNumber(uint32_t Number) override { CallLineNumber = Number; }
  size_t getCallFilenameIndex() const override { return CallFilenameIndex; }
  void setCallFilenameIndex(size_t Index) override {
    CallFilenameIndex = Index;
  }

  // Line number for display; in the case of Inlined Functions, we use the
  // DW_AT_call_line attribute; otherwise use DW_AT_decl_line attribute.
  std::string lineNumberAsString(bool ShowZero = false) const override {
    return lineAsString(getCallLineNumber(), getDiscriminator(), ShowZero);
  }

  void resolveExtra() override;

  // Returns true if current scope is logically equal to the given 'Scope'.
  bool equals(const LVScope *Scope) const override;

  // For the given 'Scopes' returns a scope that is logically equal
  // to the current scope; otherwise 'nullptr'.
  LVScope *findEqualScope(const LVScopes *Scopes) const override;

  void printExtra(raw_ostream &OS, bool Full = true) const override;
};

// Class to represent a DWARF subroutine type.
class LVScopeFunctionType final : public LVScopeFunction {
public:
  LVScopeFunctionType() : LVScopeFunction() { setIsFunctionType(); }
  LVScopeFunctionType(const LVScopeFunctionType &) = delete;
  LVScopeFunctionType &operator=(const LVScopeFunctionType &) = delete;
  ~LVScopeFunctionType() = default;

  void resolveExtra() override;
};

// Class to represent a DWARF Namespace.
class LVScopeNamespace final : public LVScope {
  LVScope *Reference = nullptr; // Reference to DW_AT_extension attribute.

public:
  LVScopeNamespace() : LVScope() { setIsNamespace(); }
  LVScopeNamespace(const LVScopeNamespace &) = delete;
  LVScopeNamespace &operator=(const LVScopeNamespace &) = delete;
  ~LVScopeNamespace() = default;

  // Access DW_AT_extension reference.
  LVScope *getReference() const override { return Reference; }
  void setReference(LVScope *Scope) override {
    Reference = Scope;
    setHasReference();
  }
  void setReference(LVElement *Element) override {
    setReference(static_cast<LVScope *>(Element));
  }

  // Returns true if current scope is logically equal to the given 'Scope'.
  bool equals(const LVScope *Scope) const override;

  // For the given 'Scopes' returns a scope that is logically equal
  // to the current scope; otherwise 'nullptr'.
  LVScope *findEqualScope(const LVScopes *Scopes) const override;

  void printExtra(raw_ostream &OS, bool Full = true) const override;
};

// Class to represent the binary file being analyzed.
class LVScopeRoot final : public LVScope {
  size_t FileFormatNameIndex = 0;

public:
  LVScopeRoot() : LVScope() { setIsRoot(); }
  LVScopeRoot(const LVScopeRoot &) = delete;
  LVScopeRoot &operator=(const LVScopeRoot &) = delete;
  ~LVScopeRoot() = default;

  StringRef getFileFormatName() const {
    return getStringPool().getString(FileFormatNameIndex);
  }
  void setFileFormatName(StringRef FileFormatName) {
    FileFormatNameIndex = getStringPool().getIndex(FileFormatName);
  }

  // The CodeView Reader uses scoped names. Recursively transform the
  // element name to use just the most inner component.
  void transformScopedName();

  // Process the collected location, ranges and calculate coverage.
  void processRangeInformation();

  // Returns true if current scope is logically equal to the given 'Scope'.
  bool equals(const LVScope *Scope) const override;

  void print(raw_ostream &OS, bool Full = true) const override;
  void printExtra(raw_ostream &OS, bool Full = true) const override;
  Error doPrintMatches(bool Split, raw_ostream &OS,
                       bool UseMatchedElements) const;
};

// Class to represent a DWARF template parameter pack
// (DW_TAG_GNU_template_parameter_pack).
class LVScopeTemplatePack final : public LVScope {
public:
  LVScopeTemplatePack() : LVScope() { setIsTemplatePack(); }
  LVScopeTemplatePack(const LVScopeTemplatePack &) = delete;
  LVScopeTemplatePack &operator=(const LVScopeTemplatePack &) = delete;
  ~LVScopeTemplatePack() = default;

  // Returns true if current scope is logically equal to the given 'Scope'.
  bool equals(const LVScope *Scope) const override;

  void printExtra(raw_ostream &OS, bool Full = true) const override;
};

} // end namespace logicalview
} // end namespace llvm

#endif // LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVSCOPE_H
PKhwFZ�q�55)5)&DebugInfo/LogicalView/Core/LVSupport.hnu�[���//===-- LVSupport.h ---------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines support functions.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVSUPPORT_H
#define LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVSUPPORT_H

#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/Twine.h"
#include "llvm/DebugInfo/LogicalView/Core/LVStringPool.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/raw_ostream.h"
#include <cctype>
#include <map>
#include <sstream>

namespace llvm {
namespace logicalview {

// Returns the unique string pool instance.
LVStringPool &getStringPool();

using LVStringRefs = std::vector<StringRef>;
using LVLexicalComponent = std::tuple<StringRef, StringRef>;
using LVLexicalIndex =
    std::tuple<LVStringRefs::size_type, LVStringRefs::size_type>;

// Used to record specific characteristics about the objects.
template <typename T> class LVProperties {
  SmallBitVector Bits = SmallBitVector(static_cast<unsigned>(T::LastEntry) + 1);

public:
  LVProperties() = default;

  void set(T Idx) { Bits[static_cast<unsigned>(Idx)] = 1; }
  void reset(T Idx) { Bits[static_cast<unsigned>(Idx)] = 0; }
  bool get(T Idx) const { return Bits[static_cast<unsigned>(Idx)]; }
};

// Generate get, set and reset 'bool' functions for LVProperties instances.
// FAMILY: instance name.
// ENUM: enumeration instance.
// FIELD: enumerator instance.
// F1, F2, F3: optional 'set' functions to be called.
#define BOOL_BIT(FAMILY, ENUM, FIELD)                                          \
  bool get##FIELD() const { return FAMILY.get(ENUM::FIELD); }                  \
  void set##FIELD() { FAMILY.set(ENUM::FIELD); }                               \
  void reset##FIELD() { FAMILY.reset(ENUM::FIELD); }

#define BOOL_BIT_1(FAMILY, ENUM, FIELD, F1)                                    \
  bool get##FIELD() const { return FAMILY.get(ENUM::FIELD); }                  \
  void set##FIELD() {                                                          \
    FAMILY.set(ENUM::FIELD);                                                   \
    set##F1();                                                                 \
  }                                                                            \
  void reset##FIELD() { FAMILY.reset(ENUM::FIELD); }

#define BOOL_BIT_2(FAMILY, ENUM, FIELD, F1, F2)                                \
  bool get##FIELD() const { return FAMILY.get(ENUM::FIELD); }                  \
  void set##FIELD() {                                                          \
    FAMILY.set(ENUM::FIELD);                                                   \
    set##F1();                                                                 \
    set##F2();                                                                 \
  }                                                                            \
  void reset##FIELD() { FAMILY.reset(ENUM::FIELD); }

#define BOOL_BIT_3(FAMILY, ENUM, FIELD, F1, F2, F3)                            \
  bool get##FIELD() const { return FAMILY.get(ENUM::FIELD); }                  \
  void set##FIELD() {                                                          \
    FAMILY.set(ENUM::FIELD);                                                   \
    set##F1();                                                                 \
    set##F2();                                                                 \
    set##F3();                                                                 \
  }                                                                            \
  void reset##FIELD() { FAMILY.reset(ENUM::FIELD); }

// Generate get, set and reset functions for 'properties'.
#define PROPERTY(ENUM, FIELD) BOOL_BIT(Properties, ENUM, FIELD)
#define PROPERTY_1(ENUM, FIELD, F1) BOOL_BIT_1(Properties, ENUM, FIELD, F1)
#define PROPERTY_2(ENUM, FIELD, F1, F2)                                        \
  BOOL_BIT_2(Properties, ENUM, FIELD, F1, F2)
#define PROPERTY_3(ENUM, FIELD, F1, F2, F3)                                    \
  BOOL_BIT_3(Properties, ENUM, FIELD, F1, F2, F3)

// Generate get, set and reset functions for 'kinds'.
#define KIND(ENUM, FIELD) BOOL_BIT(Kinds, ENUM, FIELD)
#define KIND_1(ENUM, FIELD, F1) BOOL_BIT_1(Kinds, ENUM, FIELD, F1)
#define KIND_2(ENUM, FIELD, F1, F2) BOOL_BIT_2(Kinds, ENUM, FIELD, F1, F2)
#define KIND_3(ENUM, FIELD, F1, F2, F3)                                        \
  BOOL_BIT_3(Kinds, ENUM, FIELD, F1, F2, F3)

const int HEX_WIDTH = 12;
inline FormattedNumber hexValue(uint64_t N, unsigned Width = HEX_WIDTH,
                                bool Upper = false) {
  return format_hex(N, Width, Upper);
}

// Output the hexadecimal representation of 'Value' using '[0x%08x]' format.
inline std::string hexString(uint64_t Value, size_t Width = HEX_WIDTH) {
  std::string String;
  raw_string_ostream Stream(String);
  Stream << hexValue(Value, Width, false);
  return Stream.str();
}

// Get a hexadecimal string representation for the given value.
inline std::string hexSquareString(uint64_t Value) {
  return (Twine("[") + Twine(hexString(Value)) + Twine("]")).str();
}

// Return a string with the First and Others separated by spaces.
template <typename... Args>
std::string formatAttributes(const StringRef First, Args... Others) {
  const auto List = {First, Others...};
  std::stringstream Stream;
  size_t Size = 0;
  for (const StringRef &Item : List) {
    Stream << (Size ? " " : "") << Item.str();
    Size = Item.size();
  }
  Stream << (Size ? " " : "");
  return Stream.str();
}

// Add an item to a map with second being a small vector.
template <typename MapType, typename KeyType, typename ValueType>
void addItem(MapType *Map, KeyType Key, ValueType Value) {
  (*Map)[Key].push_back(Value);
}

// Double map data structure.
template <typename FirstKeyType, typename SecondKeyType, typename ValueType>
class LVDoubleMap {
  static_assert(std::is_pointer<ValueType>::value,
                "ValueType must be a pointer.");
  using LVSecondMapType = std::map<SecondKeyType, ValueType>;
  using LVFirstMapType =
      std::map<FirstKeyType, std::unique_ptr<LVSecondMapType>>;
  using LVAuxMapType = std::map<SecondKeyType, FirstKeyType>;
  using LVValueTypes = std::vector<ValueType>;
  LVFirstMapType FirstMap;
  LVAuxMapType AuxMap;

public:
  void add(FirstKeyType FirstKey, SecondKeyType SecondKey, ValueType Value) {
    typename LVFirstMapType::iterator FirstIter = FirstMap.find(FirstKey);
    if (FirstIter == FirstMap.end()) {
      auto SecondMapSP = std::make_unique<LVSecondMapType>();
      SecondMapSP->emplace(SecondKey, Value);
      FirstMap.emplace(FirstKey, std::move(SecondMapSP));
    } else {
      LVSecondMapType *SecondMap = FirstIter->second.get();
      if (SecondMap->find(SecondKey) == SecondMap->end())
        SecondMap->emplace(SecondKey, Value);
    }

    typename LVAuxMapType::iterator AuxIter = AuxMap.find(SecondKey);
    if (AuxIter == AuxMap.end()) {
      AuxMap.emplace(SecondKey, FirstKey);
    }
  }

  LVSecondMapType *findMap(FirstKeyType FirstKey) const {
    typename LVFirstMapType::const_iterator FirstIter = FirstMap.find(FirstKey);
    if (FirstIter == FirstMap.end())
      return nullptr;

    return FirstIter->second.get();
  }

  ValueType find(FirstKeyType FirstKey, SecondKeyType SecondKey) const {
    LVSecondMapType *SecondMap = findMap(FirstKey);
    if (!SecondMap)
      return nullptr;

    typename LVSecondMapType::const_iterator SecondIter =
        SecondMap->find(SecondKey);
    return (SecondIter != SecondMap->end()) ? SecondIter->second : nullptr;
  }

  ValueType find(SecondKeyType SecondKey) const {
    typename LVAuxMapType::const_iterator AuxIter = AuxMap.find(SecondKey);
    if (AuxIter == AuxMap.end())
      return nullptr;
    return find(AuxIter->second, SecondKey);
  }

  // Return a vector with all the 'ValueType' values.
  LVValueTypes find() const {
    LVValueTypes Values;
    if (FirstMap.empty())
      return Values;
    for (typename LVFirstMapType::const_reference FirstEntry : FirstMap) {
      LVSecondMapType &SecondMap = *FirstEntry.second;
      for (typename LVSecondMapType::const_reference SecondEntry : SecondMap)
        Values.push_back(SecondEntry.second);
    }
    return Values;
  }
};

// Unified and flattened pathnames.
std::string transformPath(StringRef Path);
std::string flattenedFilePath(StringRef Path);

inline std::string formattedKind(StringRef Kind) {
  return (Twine("{") + Twine(Kind) + Twine("}")).str();
}

inline std::string formattedName(StringRef Name) {
  return (Twine("'") + Twine(Name) + Twine("'")).str();
}

inline std::string formattedNames(StringRef Name1, StringRef Name2) {
  return (Twine("'") + Twine(Name1) + Twine(Name2) + Twine("'")).str();
}

// The given string represents a symbol or type name with optional enclosing
// scopes, such as: name, name<..>, scope::name, scope::..::name, etc.
// The string can have multiple references to template instantiations.
// It returns the inner most component.
LVLexicalComponent getInnerComponent(StringRef Name);
LVStringRefs getAllLexicalComponents(StringRef Name);
std::string getScopedName(const LVStringRefs &Components,
                          StringRef BaseName = {});

// These are the values assigned to the debug location record IDs.
// See DebugInfo/CodeView/CodeViewSymbols.def.
// S_DEFRANGE                               0x113f
// S_DEFRANGE_SUBFIELD                      0x1140
// S_DEFRANGE_REGISTER                      0x1141
// S_DEFRANGE_FRAMEPOINTER_REL              0x1142
// S_DEFRANGE_SUBFIELD_REGISTER             0x1143
// S_DEFRANGE_FRAMEPOINTER_REL_FULL_SCOPE   0x1144
// S_DEFRANGE_REGISTER_REL                  0x1145
// When recording CodeView debug location, the above values are truncated
// to a uint8_t value in order to fit the 'OpCode' used for the logical
// debug location operations.
// Return the original CodeView enum value.
inline uint16_t getCodeViewOperationCode(uint8_t Code) { return 0x1100 | Code; }

} // end namespace logicalview
} // end namespace llvm

#endif // LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVSUPPORT_H
PKhwFZ��2kk'DebugInfo/LogicalView/Core/LVLocation.hnu�[���//===-- LVLocation.h --------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the LVOperation and LVLocation classes, which are used
// to describe variable locations.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVLOCATION_H
#define LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVLOCATION_H

#include "llvm/DebugInfo/LogicalView/Core/LVObject.h"

namespace llvm {
namespace logicalview {

using LVLineRange = std::pair<LVLine *, LVLine *>;

// The DW_AT_data_member_location attribute is a simple member offset.
const LVSmall LVLocationMemberOffset = 0;

class LVOperation final {
  // To describe an operation:
  // OpCode
  // Operands[0]: First operand.
  // Operands[1]: Second operand.
  //   OP_bregx, OP_bit_piece, OP_[GNU_]const_type,
  //   OP_[GNU_]deref_type, OP_[GNU_]entry_value, OP_implicit_value,
  //   OP_[GNU_]implicit_pointer, OP_[GNU_]regval_type, OP_xderef_type.
  LVSmall Opcode = 0;
  SmallVector<uint64_t> Operands;

public:
  LVOperation() = delete;
  LVOperation(LVSmall Opcode, ArrayRef<LVUnsigned> Operands)
      : Opcode(Opcode), Operands(Operands) {}
  LVOperation(const LVOperation &) = delete;
  LVOperation &operator=(const LVOperation &) = delete;
  ~LVOperation() = default;

  LVSmall getOpcode() const { return Opcode; }
  std::string getOperandsDWARFInfo();
  std::string getOperandsCodeViewInfo();

  void print(raw_ostream &OS, bool Full = true) const;

#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
  void dump() { print(dbgs()); }
#endif
};

class LVLocation : public LVObject {
  enum class Property {
    IsAddressRange,
    IsBaseClassOffset,
    IsBaseClassStep,
    IsClassOffset,
    IsFixedAddress,
    IsLocationSimple,
    IsGapEntry,
    IsOperation,
    IsOperationList,
    IsRegister,
    IsStackOffset,
    IsDiscardedRange,
    IsInvalidRange,
    IsInvalidLower,
    IsInvalidUpper,
    IsCallSite,
    LastEntry
  };
  // Typed bitvector with properties for this location.
  LVProperties<Property> Properties;

  // True if the location it is associated with a debug range.
  bool hasAssociatedRange() const {
    return !getIsClassOffset() && !getIsDiscardedRange();
  }

protected:
  // Line numbers associated with locations ranges.
  LVLine *LowerLine = nullptr;
  LVLine *UpperLine = nullptr;

  // Active range:
  // LowPC: an offset from an applicable base address, not a PC value.
  // HighPC: an offset from an applicable base address, or a length.
  LVAddress LowPC = 0;
  LVAddress HighPC = 0;

  void setKind();

public:
  LVLocation() : LVObject() { setIsLocation(); }
  LVLocation(const LVLocation &) = delete;
  LVLocation &operator=(const LVLocation &) = delete;
  virtual ~LVLocation() = default;

  PROPERTY(Property, IsAddressRange);
  PROPERTY(Property, IsBaseClassOffset);
  PROPERTY(Property, IsBaseClassStep);
  PROPERTY_1(Property, IsClassOffset, IsLocationSimple);
  PROPERTY_1(Property, IsFixedAddress, IsLocationSimple);
  PROPERTY(Property, IsLocationSimple);
  PROPERTY(Property, IsGapEntry);
  PROPERTY(Property, IsOperationList);
  PROPERTY(Property, IsOperation);
  PROPERTY(Property, IsRegister);
  PROPERTY_1(Property, IsStackOffset, IsLocationSimple);
  PROPERTY(Property, IsDiscardedRange);
  PROPERTY(Property, IsInvalidRange);
  PROPERTY(Property, IsInvalidLower);
  PROPERTY(Property, IsInvalidUpper);
  PROPERTY(Property, IsCallSite);

  const char *kind() const override;
  // Mark the locations that have only DW_OP_fbreg as stack offset based.
  virtual void updateKind() {}

  // Line numbers for locations.
  const LVLine *getLowerLine() const { return LowerLine; }
  void setLowerLine(LVLine *Line) { LowerLine = Line; }
  const LVLine *getUpperLine() const { return UpperLine; }
  void setUpperLine(LVLine *Line) { UpperLine = Line; }

  // Addresses for locations.
  LVAddress getLowerAddress() const override { return LowPC; }
  void setLowerAddress(LVAddress Address) override { LowPC = Address; }
  LVAddress getUpperAddress() const override { return HighPC; }
  void setUpperAddress(LVAddress Address) override { HighPC = Address; }

  std::string getIntervalInfo() const;

  bool validateRanges();

  // In order to calculate a symbol coverage (percentage), take the ranges
  // and obtain the number of units (bytes) covered by those ranges. We can't
  // use the line numbers, because they can be zero or invalid.
  // We return:
  //   false: No locations or multiple locations.
  //   true: a single location.
  static bool calculateCoverage(LVLocations *Locations, unsigned &Factor,
                                float &Percentage);

  virtual void addObject(LVAddress LowPC, LVAddress HighPC,
                         LVUnsigned SectionOffset, uint64_t LocDescOffset) {}
  virtual void addObject(LVSmall Opcode, ArrayRef<LVUnsigned> Operands) {}

  static void print(LVLocations *Locations, raw_ostream &OS, bool Full = true);
  void printInterval(raw_ostream &OS, bool Full = true) const;
  void printRaw(raw_ostream &OS, bool Full = true) const;
  virtual void printRawExtra(raw_ostream &OS, bool Full = true) const {}

  void print(raw_ostream &OS, bool Full = true) const override;
  void printExtra(raw_ostream &OS, bool Full = true) const override;

#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
  void dump() const override { print(dbgs()); }
#endif
};

class LVLocationSymbol final : public LVLocation {
  // Location descriptors for the active range.
  std::unique_ptr<LVOperations> Entries;

  void updateKind() override;

public:
  LVLocationSymbol() : LVLocation() {}
  LVLocationSymbol(const LVLocationSymbol &) = delete;
  LVLocationSymbol &operator=(const LVLocationSymbol &) = delete;
  ~LVLocationSymbol() = default;

  void addObject(LVAddress LowPC, LVAddress HighPC, LVUnsigned SectionOffset,
                 uint64_t LocDescOffset) override;
  void addObject(LVSmall Opcode, ArrayRef<LVUnsigned> Operands) override;

  void printRawExtra(raw_ostream &OS, bool Full = true) const override;
  void printExtra(raw_ostream &OS, bool Full = true) const override;
};

} // end namespace logicalview
} // end namespace llvm

#endif // LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVLOCATION_H
PKhwFZ�'�_DWDW&DebugInfo/LogicalView/Core/LVOptions.hnu�[���//===-- LVOptions.h ---------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the LVOptions class, which is used to record the command
// line options.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVOPTIONS_H
#define LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVOPTIONS_H

#include "llvm/ADT/StringSet.h"
#include "llvm/DebugInfo/LogicalView/Core/LVLine.h"
#include "llvm/DebugInfo/LogicalView/Core/LVScope.h"
#include "llvm/DebugInfo/LogicalView/Core/LVSymbol.h"
#include "llvm/DebugInfo/LogicalView/Core/LVType.h"
#include "llvm/Support/Regex.h"
#include <set>
#include <string>

namespace llvm {
namespace logicalview {

// Generate get and set 'bool' functions.
#define BOOL_FUNCTION(FAMILY, FIELD)                                           \
  bool get##FAMILY##FIELD() const { return FAMILY.FIELD; }                     \
  void set##FAMILY##FIELD() { FAMILY.FIELD = true; }                           \
  void reset##FAMILY##FIELD() { FAMILY.FIELD = false; }

// Generate get and set 'unsigned' functions.
#define UNSIGNED_FUNCTION(FAMILY, FIELD)                                       \
  unsigned get##FAMILY##FIELD() const { return FAMILY.FIELD; }                 \
  void set##FAMILY##FIELD(unsigned Value) { FAMILY.FIELD = Value; }            \
  void reset##FAMILY##FIELD() { FAMILY.FIELD = -1U; }

// Generate get and set 'std::string' functions.
#define STD_STRING_FUNCTION(FAMILY, FIELD)                                     \
  std::string get##FAMILY##FIELD() const { return FAMILY.FIELD; }              \
  void set##FAMILY##FIELD(std::string FIELD) { FAMILY.FIELD = FIELD; }         \
  void reset##FAMILY##FIELD() { FAMILY.FIELD = ""; }

// Generate get and set 'std::set' functions.
#define STDSET_FUNCTION_4(FAMILY, FIELD, TYPE, SET)                            \
  bool get##FAMILY##FIELD() const {                                            \
    return FAMILY.SET.find(TYPE::FIELD) != FAMILY.SET.end();                   \
  }                                                                            \
  void set##FAMILY##FIELD() { FAMILY.SET.insert(TYPE::FIELD); }                \
  void reset##FAMILY##FIELD() {                                                \
    std::set<TYPE>::iterator Iter = FAMILY.SET.find(TYPE::FIELD);              \
    if (Iter != FAMILY.SET.end())                                              \
      FAMILY.SET.erase(Iter);                                                  \
  }

#define STDSET_FUNCTION_5(FAMILY, FIELD, ENTRY, TYPE, SET)                     \
  bool get##FAMILY##FIELD##ENTRY() const {                                     \
    return FAMILY.SET.find(TYPE::ENTRY) != FAMILY.SET.end();                   \
  }                                                                            \
  void set##FAMILY##FIELD##ENTRY() { FAMILY.SET.insert(TYPE::ENTRY); }

// Generate get and set functions for '--attribute'
#define ATTRIBUTE_OPTION(FIELD)                                                \
  STDSET_FUNCTION_4(Attribute, FIELD, LVAttributeKind, Kinds)

// Generate get and set functions for '--output'
#define OUTPUT_OPTION(FIELD)                                                   \
  STDSET_FUNCTION_4(Output, FIELD, LVOutputKind, Kinds)

// Generate get and set functions for '--print'
#define PRINT_OPTION(FIELD) STDSET_FUNCTION_4(Print, FIELD, LVPrintKind, Kinds)

// Generate get and set functions for '--warning'
#define WARNING_OPTION(FIELD)                                                  \
  STDSET_FUNCTION_4(Warning, FIELD, LVWarningKind, Kinds)

// Generate get and set functions for '--compare'
#define COMPARE_OPTION(FIELD)                                                  \
  STDSET_FUNCTION_4(Compare, FIELD, LVCompareKind, Elements)

// Generate get and set functions for '--report'
#define REPORT_OPTION(FIELD)                                                   \
  STDSET_FUNCTION_4(Report, FIELD, LVReportKind, Kinds)

// Generate get and set functions for '--internal'
#define INTERNAL_OPTION(FIELD)                                                 \
  STDSET_FUNCTION_4(Internal, FIELD, LVInternalKind, Kinds)

using LVOffsetSet = std::set<uint64_t>;

enum class LVAttributeKind {
  All,           // --attribute=all
  Argument,      // --attribute=argument
  Base,          // --attribute=base
  Coverage,      // --attribute=coverage
  Directories,   // --attribute=directories
  Discarded,     // --attribute=discarded
  Discriminator, // --attribute=discriminator
  Encoded,       // --attribute=encoded
  Extended,      // --attribute=extended
  Filename,      // --attribute=filename
  Files,         // --attribute=files
  Format,        // --attribute=format
  Gaps,          // --attribute=gaps
  Generated,     // --attribute=generated
  Global,        // --attribute=global
  Inserted,      // --attribute=inserted
  Level,         // --attribute=level
  Linkage,       // --attribute=linkage
  Local,         // --attribute=local
  Location,      // --attribute=location
  Offset,        // --attribute=offset
  Pathname,      // --attribute=pathname
  Producer,      // --attribute=producer
  Publics,       // --attribute=publics
  Qualified,     // --attribute=qualified
  Qualifier,     // --attribute=qualifier
  Range,         // --attribute=range
  Reference,     // --attribute=reference
  Register,      // --attribute=register
  Standard,      // --attribute=standard
  Subrange,      // --attribute=subrange
  System,        // --attribute=system
  Typename,      // --attribute=typename
  Underlying,    // --attribute=underlying
  Zero           // --attribute=zero
};
using LVAttributeKindSet = std::set<LVAttributeKind>;

enum class LVCompareKind {
  All,     // --compare=all
  Lines,   // --compare=lines
  Scopes,  // --compare=scopes
  Symbols, // --compare=symbols
  Types    // --compare=types
};
using LVCompareKindSet = std::set<LVCompareKind>;

enum class LVOutputKind {
  All,   // --output=all
  Split, // --output=split
  Json,  // --output=json
  Text   // --output=text
};
using LVOutputKindSet = std::set<LVOutputKind>;

enum class LVPrintKind {
  All,          // --print=all
  Elements,     // --print=elements
  Instructions, // --print=instructions
  Lines,        // --print=lines
  Scopes,       // --print=scopes
  Sizes,        // --print=sizes
  Symbols,      // --print=symbols
  Summary,      // --print=summary
  Types,        // --print=types
  Warnings      // --print=warnings
};
using LVPrintKindSet = std::set<LVPrintKind>;

enum class LVReportKind {
  All,      // --report=all
  Children, // --report=children
  List,     // --report=list
  Parents,  // --report=parents
  View      // --report=view
};
using LVReportKindSet = std::set<LVReportKind>;

enum class LVWarningKind {
  All,       // --warning=all
  Coverages, // --warning=coverages
  Lines,     // --warning=lines
  Locations, // --warning=locations
  Ranges     // --warning=ranges
};
using LVWarningKindSet = std::set<LVWarningKind>;

enum class LVInternalKind {
  All,       // --internal=all
  Cmdline,   // --internal=cmdline
  ID,        // --internal=id
  Integrity, // --internal=integrity
  None,      // --internal=none
  Tag        // --internal=tag
};
using LVInternalKindSet = std::set<LVInternalKind>;

// The 'Kinds' members are a one-to-one mapping to the associated command
// options that supports comma separated values. There are other 'bool'
// members that in very few cases point to a command option (see associated
// comment). Other cases for 'bool' refers to internal values derivated from
// the command options.
class LVOptions {
  class LVAttribute {
  public:
    LVAttributeKindSet Kinds; // --attribute=<Kind>
    bool Added = false;       // Added elements found during comparison.
    bool AnyLocation = false; // Any kind of location information.
    bool AnySource = false;   // Any kind of source information.
    bool Missing = false;     // Missing elements found during comparison.
  };

  class LVCompare {
  public:
    LVCompareKindSet Elements; // --compare=<kind>
    bool Context = false;      // --compare-context
    bool Execute = false;      // Compare requested.
    bool Print = false;        // Enable any printing.
  };

  class LVPrint {
  public:
    LVPrintKindSet Kinds;      // --print=<Kind>
    bool AnyElement = false;   // Request to print any element.
    bool AnyLine = false;      // Print 'lines' or 'instructions'.
    bool Execute = false;      // Print requested.
    bool Formatting = true;    // Disable formatting during printing.
    bool Offset = false;       // Print offsets while formatting is disabled.
    bool SizesSummary = false; // Print 'sizes' or 'summary'.
  };

  class LVReport {
  public:
    LVReportKindSet Kinds; // --report=<kind>
    bool AnyView = false;  // View, Parents or Children.
    bool Execute = false;  // Report requested.
  };

  class LVSelect {
  public:
    bool IgnoreCase = false;     // --select-ignore-case
    bool UseRegex = false;       // --select-use-regex
    bool Execute = false;        // Select requested.
    bool GenericKind = false;    // We have collected generic kinds.
    bool GenericPattern = false; // We have collected generic patterns.
    bool OffsetPattern = false;  // We have collected offset patterns.
    StringSet<> Generic;         // --select=<Pattern>
    LVOffsetSet Offsets;         // --select-offset=<Offset>
    LVElementKindSet Elements;   // --select-elements=<Kind>
    LVLineKindSet Lines;         // --select-lines=<Kind>
    LVScopeKindSet Scopes;       // --select-scopes=<Kind>
    LVSymbolKindSet Symbols;     // --select-symbols=<Kind>
    LVTypeKindSelection Types;   // --select-types=<Kind>
  };

  class LVOutput {
  public:
    LVOutputKindSet Kinds;                  // --output=<kind>
    LVSortMode SortMode = LVSortMode::None; // --output-sort=<SortMode>
    std::string Folder;                     // --output-folder=<Folder>
    unsigned Level = -1U;                   // --output-level=<level>
  };

  class LVWarning {
  public:
    LVWarningKindSet Kinds; // --warning=<Kind>
  };

  class LVInternal {
  public:
    LVInternalKindSet Kinds; // --internal=<Kind>
  };

  class LVGeneral {
  public:
    bool CollectRanges = false; // Collect ranges information.
  };

  // Filters the output of the filename associated with the element being
  // printed in order to see clearly which logical elements belongs to
  // a particular filename. It is value is reset after the element
  // that represents the Compile Unit is printed.
  size_t LastFilenameIndex = 0;

  // Controls the amount of additional spaces to insert when printing
  // object attributes, in order to get a consistent printing layout.
  size_t IndentationSize = 0;

  // Calculate the indentation size, so we can use that value when printing
  // additional attributes to objects, such as location.
  void calculateIndentationSize();

public:
  void resetFilenameIndex() { LastFilenameIndex = 0; }
  bool changeFilenameIndex(size_t Index) {
    bool IndexChanged = (Index != LastFilenameIndex);
    if (IndexChanged)
      LastFilenameIndex = Index;
    return IndexChanged;
  }

  // Access to command line options, pattern and printing information.
  static LVOptions *getOptions();
  static void setOptions(LVOptions *Options);

  LVOptions() = default;
  LVOptions(const LVOptions &) = default;
  LVOptions &operator=(const LVOptions &) = default;
  ~LVOptions() = default;

  // Some command line options support shortcuts. For example:
  // The command line option '--print=elements' is a shortcut for:
  // '--print=instructions,lines,scopes,symbols,types'.
  // In the case of logical view comparison, some options related to
  // attributes must be set or reset for a proper comparison.
  // Resolve any dependencies between command line options.
  void resolveDependencies();
  size_t indentationSize() const { return IndentationSize; }

  LVAttribute Attribute;
  LVCompare Compare;
  LVOutput Output;
  LVPrint Print;
  LVReport Report;
  LVSelect Select;
  LVWarning Warning;
  LVInternal Internal;
  LVGeneral General;

  // --attribute.
  ATTRIBUTE_OPTION(All);
  ATTRIBUTE_OPTION(Argument);
  ATTRIBUTE_OPTION(Base);
  ATTRIBUTE_OPTION(Coverage);
  ATTRIBUTE_OPTION(Directories);
  ATTRIBUTE_OPTION(Discarded);
  ATTRIBUTE_OPTION(Discriminator);
  ATTRIBUTE_OPTION(Encoded);
  ATTRIBUTE_OPTION(Extended);
  ATTRIBUTE_OPTION(Filename);
  ATTRIBUTE_OPTION(Files);
  ATTRIBUTE_OPTION(Format);
  ATTRIBUTE_OPTION(Gaps);
  ATTRIBUTE_OPTION(Generated);
  ATTRIBUTE_OPTION(Global);
  ATTRIBUTE_OPTION(Inserted);
  ATTRIBUTE_OPTION(Level);
  ATTRIBUTE_OPTION(Linkage);
  ATTRIBUTE_OPTION(Location);
  ATTRIBUTE_OPTION(Local);
  ATTRIBUTE_OPTION(Offset);
  ATTRIBUTE_OPTION(Pathname);
  ATTRIBUTE_OPTION(Producer);
  ATTRIBUTE_OPTION(Publics);
  ATTRIBUTE_OPTION(Qualified);
  ATTRIBUTE_OPTION(Qualifier);
  ATTRIBUTE_OPTION(Range);
  ATTRIBUTE_OPTION(Reference);
  ATTRIBUTE_OPTION(Register);
  ATTRIBUTE_OPTION(Standard);
  ATTRIBUTE_OPTION(Subrange);
  ATTRIBUTE_OPTION(System);
  ATTRIBUTE_OPTION(Typename);
  ATTRIBUTE_OPTION(Underlying);
  ATTRIBUTE_OPTION(Zero);
  BOOL_FUNCTION(Attribute, Added);
  BOOL_FUNCTION(Attribute, AnyLocation);
  BOOL_FUNCTION(Attribute, AnySource);
  BOOL_FUNCTION(Attribute, Missing);

  // --compare.
  COMPARE_OPTION(All);
  COMPARE_OPTION(Lines);
  COMPARE_OPTION(Scopes);
  COMPARE_OPTION(Symbols);
  COMPARE_OPTION(Types);
  BOOL_FUNCTION(Compare, Context);
  BOOL_FUNCTION(Compare, Execute);
  BOOL_FUNCTION(Compare, Print);

  // --output.
  OUTPUT_OPTION(All);
  OUTPUT_OPTION(Split);
  OUTPUT_OPTION(Text);
  OUTPUT_OPTION(Json);
  STD_STRING_FUNCTION(Output, Folder);
  UNSIGNED_FUNCTION(Output, Level);
  LVSortMode getSortMode() const { return Output.SortMode; }
  void setSortMode(LVSortMode SortMode) { Output.SortMode = SortMode; }

  // --print.
  PRINT_OPTION(All);
  PRINT_OPTION(Elements);
  PRINT_OPTION(Instructions);
  PRINT_OPTION(Lines);
  PRINT_OPTION(Scopes);
  PRINT_OPTION(Sizes);
  PRINT_OPTION(Symbols);
  PRINT_OPTION(Summary);
  PRINT_OPTION(Types);
  PRINT_OPTION(Warnings);
  BOOL_FUNCTION(Print, AnyElement);
  BOOL_FUNCTION(Print, AnyLine);
  BOOL_FUNCTION(Print, Execute);
  BOOL_FUNCTION(Print, Formatting);
  BOOL_FUNCTION(Print, Offset);
  BOOL_FUNCTION(Print, SizesSummary);

  // --report.
  REPORT_OPTION(All);
  REPORT_OPTION(Children);
  REPORT_OPTION(List);
  REPORT_OPTION(Parents);
  REPORT_OPTION(View);
  BOOL_FUNCTION(Report, AnyView);
  BOOL_FUNCTION(Report, Execute);

  // --select.
  BOOL_FUNCTION(Select, IgnoreCase);
  BOOL_FUNCTION(Select, UseRegex);
  BOOL_FUNCTION(Select, Execute);
  BOOL_FUNCTION(Select, GenericKind);
  BOOL_FUNCTION(Select, GenericPattern);
  BOOL_FUNCTION(Select, OffsetPattern);

  // --warning.
  WARNING_OPTION(All);
  WARNING_OPTION(Coverages);
  WARNING_OPTION(Lines);
  WARNING_OPTION(Locations);
  WARNING_OPTION(Ranges);

  // --internal.
  INTERNAL_OPTION(All);
  INTERNAL_OPTION(Cmdline);
  INTERNAL_OPTION(ID);
  INTERNAL_OPTION(Integrity);
  INTERNAL_OPTION(None);
  INTERNAL_OPTION(Tag);

  // General shortcuts to some combinations.
  BOOL_FUNCTION(General, CollectRanges);

  void print(raw_ostream &OS) const;

#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
  void dump() const { print(dbgs()); }
#endif
};

inline LVOptions &options() { return (*LVOptions::getOptions()); }
inline void setOptions(LVOptions *Options) { LVOptions::setOptions(Options); }

class LVPatterns final {
  // Pattern Mode.
  enum class LVMatchMode {
    None = 0, // No given pattern.
    Match,    // Perfect match.
    NoCase,   // Ignore case.
    Regex     // Regular expression.
  };

  // Keep the search pattern information.
  struct LVMatch {
    std::string Pattern;                  // Normal pattern.
    std::shared_ptr<Regex> RE;            // Regular Expression Pattern.
    LVMatchMode Mode = LVMatchMode::None; // Match mode.
  };

  using LVMatchInfo = std::vector<LVMatch>;
  LVMatchInfo GenericMatchInfo;
  using LVMatchOffsets = std::vector<uint64_t>;
  LVMatchOffsets OffsetMatchInfo;

  // Element selection.
  LVElementDispatch ElementDispatch;
  LVLineDispatch LineDispatch;
  LVScopeDispatch ScopeDispatch;
  LVSymbolDispatch SymbolDispatch;
  LVTypeDispatch TypeDispatch;

  // Element selection request.
  LVElementRequest ElementRequest;
  LVLineRequest LineRequest;
  LVScopeRequest ScopeRequest;
  LVSymbolRequest SymbolRequest;
  LVTypeRequest TypeRequest;

  // Check an element printing Request.
  template <typename T, typename U>
  bool checkElementRequest(const T *Element, const U &Requests) const {
    assert(Element && "Element must not be nullptr");
    for (const auto &Request : Requests)
      if ((Element->*Request)())
        return true;
    // Check generic element requests.
    for (const LVElementGetFunction &Request : ElementRequest)
      if ((Element->*Request)())
        return true;
    return false;
  }

  // Add an element printing request based on its kind.
  template <typename T, typename U, typename V>
  void addRequest(const T &Selection, const U &Dispatch, V &Request) const {
    for (const auto &Entry : Selection) {
      // Find target function to fullfit request.
      typename U::const_iterator Iter = Dispatch.find(Entry);
      if (Iter != Dispatch.end())
        Request.push_back(Iter->second);
    }
  }

  void addElement(LVElement *Element);

  template <typename T, typename U>
  void resolveGenericPatternMatch(T *Element, const U &Requests) {
    assert(Element && "Element must not be nullptr");
    auto CheckPattern = [=]() -> bool {
      return (Element->isNamed() &&
              (matchGenericPattern(Element->getName()) ||
               matchGenericPattern(Element->getLinkageName()))) ||
             (Element->isTyped() &&
              matchGenericPattern(Element->getTypeName()));
    };
    auto CheckOffset = [=]() -> bool {
      return matchOffsetPattern(Element->getOffset());
    };
    if ((options().getSelectGenericPattern() && CheckPattern()) ||
        (options().getSelectOffsetPattern() && CheckOffset()) ||
        ((Requests.size() || ElementRequest.size()) &&
         checkElementRequest(Element, Requests)))
      addElement(Element);
  }

  template <typename U>
  void resolveGenericPatternMatch(LVLine *Line, const U &Requests) {
    assert(Line && "Line must not be nullptr");
    auto CheckPattern = [=]() -> bool {
      return matchGenericPattern(Line->lineNumberAsStringStripped()) ||
             matchGenericPattern(Line->getName()) ||
             matchGenericPattern(Line->getPathname());
    };
    auto CheckOffset = [=]() -> bool {
      return matchOffsetPattern(Line->getAddress());
    };
    if ((options().getSelectGenericPattern() && CheckPattern()) ||
        (options().getSelectOffsetPattern() && CheckOffset()) ||
        (Requests.size() && checkElementRequest(Line, Requests)))
      addElement(Line);
  }

  Error createMatchEntry(LVMatchInfo &Filters, StringRef Pattern,
                         bool IgnoreCase, bool UseRegex);

public:
  static LVPatterns *getPatterns();

  LVPatterns() {
    ElementDispatch = LVElement::getDispatch();
    LineDispatch = LVLine::getDispatch();
    ScopeDispatch = LVScope::getDispatch();
    SymbolDispatch = LVSymbol::getDispatch();
    TypeDispatch = LVType::getDispatch();
  }
  LVPatterns(const LVPatterns &) = delete;
  LVPatterns &operator=(const LVPatterns &) = delete;
  ~LVPatterns() = default;

  // Clear any existing patterns.
  void clear() {
    GenericMatchInfo.clear();
    OffsetMatchInfo.clear();
    ElementRequest.clear();
    LineRequest.clear();
    ScopeRequest.clear();
    SymbolRequest.clear();
    TypeRequest.clear();

    options().resetSelectGenericKind();
    options().resetSelectGenericPattern();
    options().resetSelectOffsetPattern();
  }

  void addRequest(LVElementKindSet &Selection) {
    addRequest(Selection, ElementDispatch, ElementRequest);
  }
  void addRequest(LVLineKindSet &Selection) {
    addRequest(Selection, LineDispatch, LineRequest);
  }
  void addRequest(LVScopeKindSet &Selection) {
    addRequest(Selection, ScopeDispatch, ScopeRequest);
  }
  void addRequest(LVSymbolKindSet &Selection) {
    addRequest(Selection, SymbolDispatch, SymbolRequest);
  }
  void addRequest(LVTypeKindSelection &Selection) {
    addRequest(Selection, TypeDispatch, TypeRequest);
  }

  void updateReportOptions();

  bool matchPattern(StringRef Input, const LVMatchInfo &MatchInfo);
  // Match a pattern (--select='pattern').
  bool matchGenericPattern(StringRef Input) {
    return matchPattern(Input, GenericMatchInfo);
  }
  bool matchOffsetPattern(LVOffset Offset) {
    return llvm::is_contained(OffsetMatchInfo, Offset);
  }

  void resolvePatternMatch(LVLine *Line) {
    resolveGenericPatternMatch(Line, LineRequest);
  }

  void resolvePatternMatch(LVScope *Scope) {
    resolveGenericPatternMatch(Scope, ScopeRequest);
  }

  void resolvePatternMatch(LVSymbol *Symbol) {
    resolveGenericPatternMatch(Symbol, SymbolRequest);
  }

  void resolvePatternMatch(LVType *Type) {
    resolveGenericPatternMatch(Type, TypeRequest);
  }

  void addPatterns(StringSet<> &Patterns, LVMatchInfo &Filters);

  // Add generic and offset patterns info.
  void addGenericPatterns(StringSet<> &Patterns);
  void addOffsetPatterns(const LVOffsetSet &Patterns);

  // Conditions to print an object.
  bool printElement(const LVLine *Line) const;
  bool printObject(const LVLocation *Location) const;
  bool printElement(const LVScope *Scope) const;
  bool printElement(const LVSymbol *Symbol) const;
  bool printElement(const LVType *Type) const;

  void print(raw_ostream &OS) const;

#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
  void dump() const { print(dbgs()); }
#endif
};

inline LVPatterns &patterns() { return *LVPatterns::getPatterns(); }

} // namespace logicalview
} // namespace llvm

#endif // LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVOPTIONS_H
PKhwFZq„t[[&DebugInfo/LogicalView/Core/LVCompare.hnu�[���//===-- LVCompare.h ---------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the LVCompare class, which is used to describe a logical
// view comparison.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVCOMPARE_H
#define LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVCOMPARE_H

#include "llvm/DebugInfo/LogicalView/Core/LVObject.h"

namespace llvm {
namespace logicalview {

class LVReader;

// Record the elements missing or added and their compare pass.
using LVPassEntry = std::tuple<LVReader *, LVElement *, LVComparePass>;
using LVPassTable = std::vector<LVPassEntry>;

class LVCompare final {
  raw_ostream &OS;
  LVScopes ScopeStack;

  // As the comparison is performed twice (by exchanging the reference
  // and target readers) the element missing/added status does specify
  // the comparison pass.
  // By recording each missing/added elements along with its pass, it
  // allows checking which elements were missing/added during each pass.
  LVPassTable PassTable;

  // Reader used on the LHS of the comparison.
  // In the 'Missing' pass, it points to the reference reader.
  // In the 'Added' pass it points to the target reader.
  LVReader *Reader = nullptr;

  bool FirstMissing = true;
  bool PrintLines = false;
  bool PrintScopes = false;
  bool PrintSymbols = false;
  bool PrintTypes = false;

  static void setInstance(LVCompare *Compare);

  void printCurrentStack();
  void printSummary() const;

public:
  LVCompare() = delete;
  LVCompare(raw_ostream &OS);
  LVCompare(const LVCompare &) = delete;
  LVCompare &operator=(const LVCompare &) = delete;
  ~LVCompare() = default;

  static LVCompare &getInstance();

  // Scopes stack used during the missing/added reporting.
  void push(LVScope *Scope) { ScopeStack.push_back(Scope); }
  void pop() { ScopeStack.pop_back(); }

  // Perform comparison between the 'Reference' and 'Target' scopes tree.
  Error execute(LVReader *ReferenceReader, LVReader *TargetReader);

  void addPassEntry(LVReader *Reader, LVElement *Element, LVComparePass Pass) {
    PassTable.emplace_back(Reader, Element, Pass);
  }
  const LVPassTable &getPassTable() const & { return PassTable; }

  void printItem(LVElement *Element, LVComparePass Pass);
  void print(raw_ostream &OS) const;

#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
  void dump() const { print(dbgs()); }
#endif
};

inline LVCompare &getComparator() { return LVCompare::getInstance(); }

} // end namespace logicalview
} // end namespace llvm

#endif // LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVCOMPARE_H
PKhwFZ�\��!!%DebugInfo/LogicalView/Core/LVSymbol.hnu�[���//===-- LVSymbol.h ----------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the LVSymbol class, which is used to describe a debug
// information symbol.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVSYMBOL_H
#define LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVSYMBOL_H

#include "llvm/DebugInfo/LogicalView/Core/LVElement.h"

namespace llvm {
namespace logicalview {

enum class LVSymbolKind {
  IsCallSiteParameter,
  IsConstant,
  IsInheritance,
  IsMember,
  IsParameter,
  IsUnspecified,
  IsVariable,
  LastEntry
};
using LVSymbolKindSet = std::set<LVSymbolKind>;
using LVSymbolDispatch = std::map<LVSymbolKind, LVSymbolGetFunction>;
using LVSymbolRequest = std::vector<LVSymbolGetFunction>;

class LVSymbol final : public LVElement {
  enum class Property { HasLocation, FillGaps, LastEntry };

  // Typed bitvector with kinds and properties for this symbol.
  LVProperties<LVSymbolKind> Kinds;
  LVProperties<Property> Properties;
  static LVSymbolDispatch Dispatch;

  // CodeView symbol Linkage name.
  size_t LinkageNameIndex = 0;

  // Reference to DW_AT_specification, DW_AT_abstract_origin attribute.
  LVSymbol *Reference = nullptr;
  std::unique_ptr<LVLocations> Locations;
  LVLocation *CurrentLocation = nullptr;

  // Bitfields length.
  uint32_t BitSize = 0;

  // Index in the String pool representing any initial value.
  size_t ValueIndex = 0;

  // Coverage factor in units (bytes).
  unsigned CoverageFactor = 0;
  float CoveragePercentage = 0;

  // Add a location gap into the location list.
  LVLocations::iterator addLocationGap(LVLocations::iterator Pos,
                                       LVAddress LowPC, LVAddress HighPC);

  // Find the current symbol in the given 'Targets'.
  LVSymbol *findIn(const LVSymbols *Targets) const;

public:
  LVSymbol() : LVElement(LVSubclassID::LV_SYMBOL) {
    setIsSymbol();
    setIncludeInPrint();
  }
  LVSymbol(const LVSymbol &) = delete;
  LVSymbol &operator=(const LVSymbol &) = delete;
  ~LVSymbol() = default;

  static bool classof(const LVElement *Element) {
    return Element->getSubclassID() == LVSubclassID::LV_SYMBOL;
  }

  KIND(LVSymbolKind, IsCallSiteParameter);
  KIND(LVSymbolKind, IsConstant);
  KIND(LVSymbolKind, IsInheritance);
  KIND(LVSymbolKind, IsMember);
  KIND(LVSymbolKind, IsParameter);
  KIND(LVSymbolKind, IsUnspecified);
  KIND(LVSymbolKind, IsVariable);

  PROPERTY(Property, HasLocation);
  PROPERTY(Property, FillGaps);

  const char *kind() const override;

  // Access DW_AT_specification, DW_AT_abstract_origin reference.
  LVSymbol *getReference() const { return Reference; }
  void setReference(LVSymbol *Symbol) override {
    Reference = Symbol;
    setHasReference();
  }
  void setReference(LVElement *Element) override {
    assert((!Element || isa<LVSymbol>(Element)) && "Invalid element");
    setReference(static_cast<LVSymbol *>(Element));
  }

  void setLinkageName(StringRef LinkageName) override {
    LinkageNameIndex = getStringPool().getIndex(LinkageName);
  }
  StringRef getLinkageName() const override {
    return getStringPool().getString(LinkageNameIndex);
  }
  size_t getLinkageNameIndex() const override { return LinkageNameIndex; }

  uint32_t getBitSize() const override { return BitSize; }
  void setBitSize(uint32_t Size) override { BitSize = Size; }

  // Process the values for a DW_AT_const_value.
  StringRef getValue() const override {
    return getStringPool().getString(ValueIndex);
  }
  void setValue(StringRef Value) override {
    ValueIndex = getStringPool().getIndex(Value);
  }
  size_t getValueIndex() const override { return ValueIndex; }

  // Add a Location Entry.
  void addLocationConstant(dwarf::Attribute Attr, LVUnsigned Constant,
                           uint64_t LocDescOffset);
  void addLocationOperands(LVSmall Opcode, ArrayRef<uint64_t> Operands);
  void addLocation(dwarf::Attribute Attr, LVAddress LowPC, LVAddress HighPC,
                   LVUnsigned SectionOffset, uint64_t LocDescOffset,
                   bool CallSiteLocation = false);

  // Fill gaps in the location list.
  void fillLocationGaps();

  // Get all the locations associated with symbols.
  void getLocations(LVLocations &LocationList, LVValidLocation ValidLocation,
                    bool RecordInvalid = false);
  void getLocations(LVLocations &LocationList) const;

  // Calculate coverage factor.
  void calculateCoverage();

  unsigned getCoverageFactor() const { return CoverageFactor; }
  void setCoverageFactor(unsigned Value) { CoverageFactor = Value; }
  float getCoveragePercentage() const { return CoveragePercentage; }
  void setCoveragePercentage(float Value) { CoveragePercentage = Value; }

  // Print location in raw format.
  void printLocations(raw_ostream &OS, bool Full = true) const;

  // Follow a chain of references given by DW_AT_abstract_origin and/or
  // DW_AT_specification and update the symbol name.
  StringRef resolveReferencesChain();

  void resolveName() override;
  void resolveReferences() override;

  static LVSymbolDispatch &getDispatch() { return Dispatch; }

  static bool parametersMatch(const LVSymbols *References,
                              const LVSymbols *Targets);

  static void getParameters(const LVSymbols *Symbols, LVSymbols *Parameters);

  // Iterate through the 'References' set and check that all its elements
  // are present in the 'Targets' set. For a missing element, mark its
  // parents as missing.
  static void markMissingParents(const LVSymbols *References,
                                 const LVSymbols *Targets);

  // Returns true if current type is logically equal to the given 'Symbol'.
  bool equals(const LVSymbol *Symbol) const;

  // Returns true if the given 'References' are logically equal to the
  // given 'Targets'.
  static bool equals(const LVSymbols *References, const LVSymbols *Targets);

  // Report the current symbol as missing or added during comparison.
  void report(LVComparePass Pass) override;

  void print(raw_ostream &OS, bool Full = true) const override;
  void printExtra(raw_ostream &OS, bool Full = true) const override;

#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
  void dump() const override { print(dbgs()); }
#endif
};

} // end namespace logicalview
} // end namespace llvm

#endif // LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVSYMBOL_H
PKhwFZ*T�=EE#DebugInfo/LogicalView/Core/LVSort.hnu�[���//===-- LVSort.h ------------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the sort algorithms.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVSORT_H
#define LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVSORT_H

namespace llvm {
namespace logicalview {

class LVObject;

// Object Sorting Mode.
enum class LVSortMode {
  None = 0, // No given sort.
  Kind,     // Sort by kind.
  Line,     // Sort by line.
  Name,     // Sort by name.
  Offset    // Sort by offset.
};

// Type of function to be called when sorting an object.
using LVSortValue = int;
using LVSortFunction = LVSortValue (*)(const LVObject *LHS,
                                       const LVObject *RHS);

// Get the comparator function, based on the command line options.
LVSortFunction getSortFunction();

// Comparator functions that can be used for sorting.
LVSortValue compareKind(const LVObject *LHS, const LVObject *RHS);
LVSortValue compareLine(const LVObject *LHS, const LVObject *RHS);
LVSortValue compareName(const LVObject *LHS, const LVObject *RHS);
LVSortValue compareOffset(const LVObject *LHS, const LVObject *RHS);
LVSortValue compareRange(const LVObject *LHS, const LVObject *RHS);
LVSortValue sortByKind(const LVObject *LHS, const LVObject *RHS);
LVSortValue sortByLine(const LVObject *LHS, const LVObject *RHS);
LVSortValue sortByName(const LVObject *LHS, const LVObject *RHS);

} // end namespace logicalview
} // end namespace llvm

#endif // LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVSORT_H
PKhwFZZ�j$DebugInfo/LogicalView/Core/LVRange.hnu�[���//===-- LVRange.h -----------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the LVRange class, which is used to describe a debug
// information range.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVRANGE_H
#define LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVRANGE_H

#include "llvm/ADT/IntervalTree.h"
#include "llvm/DebugInfo/LogicalView/Core/LVObject.h"

namespace llvm {
namespace logicalview {

using LVAddressRange = std::pair<LVAddress, LVAddress>;

class LVRangeEntry final {
  LVAddress Lower = 0;
  LVAddress Upper = 0;
  LVScope *Scope = nullptr;

public:
  using RangeType = LVAddress;

  LVRangeEntry() = delete;
  LVRangeEntry(LVAddress LowerAddress, LVAddress UpperAddress, LVScope *Scope)
      : Lower(LowerAddress), Upper(UpperAddress), Scope(Scope) {}

  RangeType lower() const { return Lower; }
  RangeType upper() const { return Upper; }
  LVAddressRange addressRange() const {
    return LVAddressRange(lower(), upper());
  }
  LVScope *scope() const { return Scope; }
};

// Class to represent a list of range addresses associated with a
// scope; the addresses are stored in ascending order and can overlap.
using LVRangeEntries = std::vector<LVRangeEntry>;

class LVRange final : public LVObject {
  /// Map of where a user value is live, and its location.
  using LVRangesTree = IntervalTree<LVAddress, LVScope *>;
  using LVAllocator = LVRangesTree::Allocator;

  LVAllocator Allocator;
  LVRangesTree RangesTree;
  LVRangeEntries RangeEntries;
  LVAddress Lower = MaxAddress;
  LVAddress Upper = 0;

public:
  LVRange() : LVObject(), RangesTree(Allocator) {}
  LVRange(const LVRange &) = delete;
  LVRange &operator=(const LVRange &) = delete;
  ~LVRange() = default;

  void addEntry(LVScope *Scope, LVAddress LowerAddress, LVAddress UpperAddress);
  void addEntry(LVScope *Scope);
  LVScope *getEntry(LVAddress Address) const;
  LVScope *getEntry(LVAddress LowerAddress, LVAddress UpperAddress) const;
  bool hasEntry(LVAddress Low, LVAddress High) const;
  LVAddress getLower() const { return Lower; }
  LVAddress getUpper() const { return Upper; }

  const LVRangeEntries &getEntries() const { return RangeEntries; }

  void clear() {
    RangeEntries.clear();
    Lower = MaxAddress;
    Upper = 0;
  }
  bool empty() const { return RangeEntries.empty(); }
  void sort();

  void startSearch();
  void endSearch() {}

  void print(raw_ostream &OS, bool Full = true) const override;
  void printExtra(raw_ostream &OS, bool Full = true) const override {}

#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
  void dump() const override { print(dbgs()); }
#endif
};

} // end namespace logicalview
} // end namespace llvm

#endif // LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVRANGE_H
PKhwFZ�����+�+%DebugInfo/LogicalView/Core/LVReader.hnu�[���//===-- LVReader.h ----------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the LVReader class, which is used to describe a debug
// information reader.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVREADER_H
#define LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVREADER_H

#include "llvm/DebugInfo/LogicalView/Core/LVOptions.h"
#include "llvm/DebugInfo/LogicalView/Core/LVRange.h"
#include "llvm/Support/Errc.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/ScopedPrinter.h"
#include "llvm/Support/ToolOutputFile.h"
#include <map>

namespace llvm {
namespace logicalview {

constexpr LVSectionIndex UndefinedSectionIndex = 0;

class LVScopeCompileUnit;
class LVObject;

class LVSplitContext final {
  std::unique_ptr<ToolOutputFile> OutputFile;
  std::string Location;

public:
  LVSplitContext() = default;
  LVSplitContext(const LVSplitContext &) = delete;
  LVSplitContext &operator=(const LVSplitContext &) = delete;
  ~LVSplitContext() = default;

  Error createSplitFolder(StringRef Where);
  std::error_code open(std::string Name, std::string Extension,
                       raw_ostream &OS);
  void close() {
    if (OutputFile) {
      OutputFile->os().close();
      OutputFile = nullptr;
    }
  }

  std::string getLocation() const { return Location; }
  raw_fd_ostream &os() { return OutputFile->os(); }
};

/// The logical reader owns of all the logical elements created during
/// the debug information parsing. For its creation it uses a specific
///  bump allocator for each type of logical element.
class LVReader {
  LVBinaryType BinaryType;

  // Context used by '--output=split' command line option.
  LVSplitContext SplitContext;

  // Compile Units DIE Offset => Scope.
  using LVCompileUnits = std::map<LVOffset, LVScopeCompileUnit *>;
  LVCompileUnits CompileUnits;

  // Added elements to be used during elements comparison.
  LVLines Lines;
  LVScopes Scopes;
  LVSymbols Symbols;
  LVTypes Types;

  // Create split folder.
  Error createSplitFolder();
  bool OutputSplit = false;

// Define a specific bump allocator for the given KIND.
#define LV_OBJECT_ALLOCATOR(KIND)                                              \
  llvm::SpecificBumpPtrAllocator<LV##KIND> Allocated##KIND;

  // Lines allocator.
  LV_OBJECT_ALLOCATOR(Line)
  LV_OBJECT_ALLOCATOR(LineDebug)
  LV_OBJECT_ALLOCATOR(LineAssembler)

  // Locations allocator.
  LV_OBJECT_ALLOCATOR(Location)
  LV_OBJECT_ALLOCATOR(LocationSymbol)

  // Operations allocator.
  LV_OBJECT_ALLOCATOR(Operation)

  // Scopes allocator.
  LV_OBJECT_ALLOCATOR(Scope)
  LV_OBJECT_ALLOCATOR(ScopeAggregate)
  LV_OBJECT_ALLOCATOR(ScopeAlias)
  LV_OBJECT_ALLOCATOR(ScopeArray)
  LV_OBJECT_ALLOCATOR(ScopeCompileUnit)
  LV_OBJECT_ALLOCATOR(ScopeEnumeration)
  LV_OBJECT_ALLOCATOR(ScopeFormalPack)
  LV_OBJECT_ALLOCATOR(ScopeFunction)
  LV_OBJECT_ALLOCATOR(ScopeFunctionInlined)
  LV_OBJECT_ALLOCATOR(ScopeFunctionType)
  LV_OBJECT_ALLOCATOR(ScopeNamespace)
  LV_OBJECT_ALLOCATOR(ScopeRoot)
  LV_OBJECT_ALLOCATOR(ScopeTemplatePack)

  // Symbols allocator.
  LV_OBJECT_ALLOCATOR(Symbol)

  // Types allocator.
  LV_OBJECT_ALLOCATOR(Type)
  LV_OBJECT_ALLOCATOR(TypeDefinition)
  LV_OBJECT_ALLOCATOR(TypeEnumerator)
  LV_OBJECT_ALLOCATOR(TypeImport)
  LV_OBJECT_ALLOCATOR(TypeParam)
  LV_OBJECT_ALLOCATOR(TypeSubrange)

#undef LV_OBJECT_ALLOCATOR

protected:
  LVScopeRoot *Root = nullptr;
  std::string InputFilename;
  std::string FileFormatName;
  ScopedPrinter &W;
  raw_ostream &OS;
  LVScopeCompileUnit *CompileUnit = nullptr;

  // Only for ELF format. The CodeView is handled in a different way.
  LVSectionIndex DotTextSectionIndex = UndefinedSectionIndex;

  // Record Compilation Unit entry.
  void addCompileUnitOffset(LVOffset Offset, LVScopeCompileUnit *CompileUnit) {
    CompileUnits.emplace(Offset, CompileUnit);
  }

  // Create the Scope Root.
  virtual Error createScopes() {
    Root = createScopeRoot();
    Root->setName(getFilename());
    if (options().getAttributeFormat())
      Root->setFileFormatName(FileFormatName);
    return Error::success();
  }

  // Return a pathname composed by: parent_path(InputFilename)/filename(From).
  // This is useful when a type server (PDB file associated with an object
  // file or a precompiled header file) or a DWARF split object have been
  // moved from their original location. That is the case when running
  // regression tests, where object files are created in one location and
  // executed in a different location.
  std::string createAlternativePath(StringRef From) {
    // During the reader initialization, any backslashes in 'InputFilename'
    // are converted to forward slashes.
    SmallString<128> Path;
    sys::path::append(Path, sys::path::Style::posix,
                      sys::path::parent_path(InputFilename),
                      sys::path::filename(sys::path::convert_to_slash(
                          From, sys::path::Style::windows)));
    return std::string(Path);
  }

  virtual Error printScopes();
  virtual Error printMatchedElements(bool UseMatchedElements);
  virtual void sortScopes() {}

public:
  LVReader() = delete;
  LVReader(StringRef InputFilename, StringRef FileFormatName, ScopedPrinter &W,
           LVBinaryType BinaryType = LVBinaryType::NONE)
      : BinaryType(BinaryType), OutputSplit(options().getOutputSplit()),
        InputFilename(InputFilename), FileFormatName(FileFormatName), W(W),
        OS(W.getOStream()) {}
  LVReader(const LVReader &) = delete;
  LVReader &operator=(const LVReader &) = delete;
  virtual ~LVReader() = default;

// Creates a logical object of the given KIND. The signature for the created
// functions looks like:
//   ...
//   LVScope *createScope()
//   LVScopeRoot *creatScopeRoot()
//   LVType *createType();
//   ...
#define LV_CREATE_OBJECT(KIND)                                                 \
  LV##KIND *create##KIND() {                                                   \
    return new (Allocated##KIND.Allocate()) LV##KIND();                        \
  }

  // Lines creation.
  LV_CREATE_OBJECT(Line)
  LV_CREATE_OBJECT(LineDebug)
  LV_CREATE_OBJECT(LineAssembler)

  // Locations creation.
  LV_CREATE_OBJECT(Location)
  LV_CREATE_OBJECT(LocationSymbol)

  // Scopes creation.
  LV_CREATE_OBJECT(Scope)
  LV_CREATE_OBJECT(ScopeAggregate)
  LV_CREATE_OBJECT(ScopeAlias)
  LV_CREATE_OBJECT(ScopeArray)
  LV_CREATE_OBJECT(ScopeCompileUnit)
  LV_CREATE_OBJECT(ScopeEnumeration)
  LV_CREATE_OBJECT(ScopeFormalPack)
  LV_CREATE_OBJECT(ScopeFunction)
  LV_CREATE_OBJECT(ScopeFunctionInlined)
  LV_CREATE_OBJECT(ScopeFunctionType)
  LV_CREATE_OBJECT(ScopeNamespace)
  LV_CREATE_OBJECT(ScopeRoot)
  LV_CREATE_OBJECT(ScopeTemplatePack)

  // Symbols creation.
  LV_CREATE_OBJECT(Symbol)

  // Types creation.
  LV_CREATE_OBJECT(Type)
  LV_CREATE_OBJECT(TypeDefinition)
  LV_CREATE_OBJECT(TypeEnumerator)
  LV_CREATE_OBJECT(TypeImport)
  LV_CREATE_OBJECT(TypeParam)
  LV_CREATE_OBJECT(TypeSubrange)

#undef LV_CREATE_OBJECT

  // Operations creation.
  LVOperation *createOperation(LVSmall OpCode, ArrayRef<LVUnsigned> Operands) {
    return new (AllocatedOperation.Allocate()) LVOperation(OpCode, Operands);
  }

  StringRef getFilename(LVObject *Object, size_t Index) const;
  StringRef getFilename() const { return InputFilename; }
  void setFilename(std::string Name) { InputFilename = std::move(Name); }
  StringRef getFileFormatName() const { return FileFormatName; }

  raw_ostream &outputStream() { return OS; }

  bool isBinaryTypeNone() const { return BinaryType == LVBinaryType::NONE; }
  bool isBinaryTypeELF() const { return BinaryType == LVBinaryType::ELF; }
  bool isBinaryTypeCOFF() const { return BinaryType == LVBinaryType::COFF; }

  LVScopeCompileUnit *getCompileUnit() const { return CompileUnit; }
  void setCompileUnit(LVScope *Scope) {
    assert(Scope && Scope->isCompileUnit() && "Scope is not a compile unit");
    CompileUnit = static_cast<LVScopeCompileUnit *>(Scope);
  }
  void setCompileUnitCPUType(codeview::CPUType Type) {
    CompileUnit->setCPUType(Type);
  }
  codeview::CPUType getCompileUnitCPUType() {
    return CompileUnit->getCPUType();
  }

  // Access to the scopes root.
  LVScopeRoot *getScopesRoot() const { return Root; }

  Error doPrint();
  Error doLoad();

  virtual std::string getRegisterName(LVSmall Opcode,
                                      ArrayRef<uint64_t> Operands) {
    llvm_unreachable("Invalid instance reader.");
    return {};
  }

  LVSectionIndex getDotTextSectionIndex() const { return DotTextSectionIndex; }
  virtual LVSectionIndex getSectionIndex(LVScope *Scope) {
    return getDotTextSectionIndex();
  }

  virtual bool isSystemEntry(LVElement *Element, StringRef Name = {}) const {
    return false;
  };

  // Access to split context.
  LVSplitContext &getSplitContext() { return SplitContext; }

  // In the case of element comparison, register that added element.
  void notifyAddedElement(LVLine *Line) {
    if (!options().getCompareContext() && options().getCompareLines())
      Lines.push_back(Line);
  }
  void notifyAddedElement(LVScope *Scope) {
    if (!options().getCompareContext() && options().getCompareScopes())
      Scopes.push_back(Scope);
  }
  void notifyAddedElement(LVSymbol *Symbol) {
    if (!options().getCompareContext() && options().getCompareSymbols())
      Symbols.push_back(Symbol);
  }
  void notifyAddedElement(LVType *Type) {
    if (!options().getCompareContext() && options().getCompareTypes())
      Types.push_back(Type);
  }

  const LVLines &getLines() const { return Lines; }
  const LVScopes &getScopes() const { return Scopes; }
  const LVSymbols &getSymbols() const { return Symbols; }
  const LVTypes &getTypes() const { return Types; }

  // Conditions to print an object.
  bool doPrintLine(const LVLine *Line) const {
    return patterns().printElement(Line);
  }
  bool doPrintLocation(const LVLocation *Location) const {
    return patterns().printObject(Location);
  }
  bool doPrintScope(const LVScope *Scope) const {
    return patterns().printElement(Scope);
  }
  bool doPrintSymbol(const LVSymbol *Symbol) const {
    return patterns().printElement(Symbol);
  }
  bool doPrintType(const LVType *Type) const {
    return patterns().printElement(Type);
  }

  static LVReader &getInstance();
  static void setInstance(LVReader *Reader);

  void print(raw_ostream &OS) const;
  virtual void printRecords(raw_ostream &OS) const {}

#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
  void dump() const { print(dbgs()); }
#endif
};

inline LVReader &getReader() { return LVReader::getInstance(); }
inline LVSplitContext &getReaderSplitContext() {
  return getReader().getSplitContext();
}
inline LVScopeCompileUnit *getReaderCompileUnit() {
  return getReader().getCompileUnit();
}

} // end namespace logicalview
} // end namespace llvm

#endif // LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVREADER_H
PKhwFZ_g�yvv)DebugInfo/LogicalView/Core/LVStringPool.hnu�[���//===-- LVStringPool.h ------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the LVStringPool class, which is used to implement a
// basic string pool table.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVSTRINGPOOL_H
#define LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVSTRINGPOOL_H

#include "llvm/ADT/StringMap.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/raw_ostream.h"
#include <iomanip>
#include <vector>

namespace llvm {
namespace logicalview {

class LVStringPool {
  static constexpr size_t BadIndex = std::numeric_limits<size_t>::max();
  using TableType = StringMap<size_t, BumpPtrAllocator>;
  using ValueType = TableType::value_type;
  BumpPtrAllocator Allocator;
  TableType StringTable;
  std::vector<ValueType *> Entries;

public:
  LVStringPool() { getIndex(""); }
  LVStringPool(LVStringPool const &other) = delete;
  LVStringPool(LVStringPool &&other) = delete;
  ~LVStringPool() = default;

  bool isValidIndex(size_t Index) const { return Index != BadIndex; }

  // Return number of strings in the pool. The empty string is allocated
  // at the slot zero. We substract 1 to indicate the number of non empty
  // strings.
  size_t getSize() const { return Entries.size() - 1; }

  // Return the index for the specified key, otherwise 'BadIndex'.
  size_t findIndex(StringRef Key) const {
    TableType::const_iterator Iter = StringTable.find(Key);
    if (Iter != StringTable.end())
      return Iter->second;
    return BadIndex;
  }

  // Return an index for the specified key.
  size_t getIndex(StringRef Key) {
    size_t Index = findIndex(Key);
    if (isValidIndex(Index))
      return Index;
    size_t Value = Entries.size();
    ValueType *Entry = ValueType::create(Key, Allocator, std::move(Value));
    StringTable.insert(Entry);
    Entries.push_back(Entry);
    return Value;
  }

  // Given the index, return its corresponding string.
  StringRef getString(size_t Index) const {
    return (Index >= Entries.size()) ? StringRef() : Entries[Index]->getKey();
  }

  void print(raw_ostream &OS) const {
    if (!Entries.empty()) {
      OS << "\nString Pool:\n";
      for (const ValueType *Entry : Entries)
        OS << "Index: " << Entry->getValue() << ", "
           << "Key: '" << Entry->getKey() << "'\n";
    }
  }

#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
  void dump() const { print(dbgs()); }
#endif
};

} // namespace logicalview
} // end namespace llvm

#endif // LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVSTRINGPOOL_H
PKhwFZ���#DebugInfo/LogicalView/Core/LVLine.hnu�[���//===-- LVLine.h ------------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the LVLine class, which is used to describe a debug
// information line.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVLINE_H
#define LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVLINE_H

#include "llvm/DebugInfo/LogicalView/Core/LVElement.h"

namespace llvm {
namespace logicalview {

enum class LVLineKind {
  IsBasicBlock,
  IsDiscriminator,
  IsEndSequence,
  IsEpilogueBegin,
  IsLineDebug,
  IsLineAssembler,
  IsNewStatement, // Shared with CodeView 'IsStatement' flag.
  IsPrologueEnd,
  IsAlwaysStepInto, // CodeView
  IsNeverStepInto,  // CodeView
  LastEntry
};
using LVLineKindSet = std::set<LVLineKind>;
using LVLineDispatch = std::map<LVLineKind, LVLineGetFunction>;
using LVLineRequest = std::vector<LVLineGetFunction>;

// Class to represent a logical line.
class LVLine : public LVElement {
  // Typed bitvector with kinds for this line.
  LVProperties<LVLineKind> Kinds;
  static LVLineDispatch Dispatch;

  // Find the current line in the given 'Targets'.
  LVLine *findIn(const LVLines *Targets) const;

public:
  LVLine() : LVElement(LVSubclassID::LV_LINE) {
    setIsLine();
    setIncludeInPrint();
  }
  LVLine(const LVLine &) = delete;
  LVLine &operator=(const LVLine &) = delete;
  virtual ~LVLine() = default;

  static bool classof(const LVElement *Element) {
    return Element->getSubclassID() == LVSubclassID::LV_LINE;
  }

  KIND(LVLineKind, IsBasicBlock);
  KIND(LVLineKind, IsDiscriminator);
  KIND(LVLineKind, IsEndSequence);
  KIND(LVLineKind, IsEpilogueBegin);
  KIND(LVLineKind, IsLineDebug);
  KIND(LVLineKind, IsLineAssembler);
  KIND(LVLineKind, IsNewStatement);
  KIND(LVLineKind, IsPrologueEnd);
  KIND(LVLineKind, IsAlwaysStepInto);
  KIND(LVLineKind, IsNeverStepInto);

  const char *kind() const override;

  // Use the offset to store the line address.
  uint64_t getAddress() const { return getOffset(); }
  void setAddress(uint64_t address) { setOffset(address); }

  // String used for printing objects with no line number.
  std::string noLineAsString(bool ShowZero = false) const override;

  // Line number for display; in the case of Inlined Functions, we use the
  // DW_AT_call_line attribute; otherwise use DW_AT_decl_line attribute.
  std::string lineNumberAsString(bool ShowZero = false) const override {
    return lineAsString(getLineNumber(), getDiscriminator(), ShowZero);
  }

  static LVLineDispatch &getDispatch() { return Dispatch; }

  // Iterate through the 'References' set and check that all its elements
  // are present in the 'Targets' set. For a missing element, mark its
  // parents as missing.
  static void markMissingParents(const LVLines *References,
                                 const LVLines *Targets);

  // Returns true if current line is logically equal to the given 'Line'.
  virtual bool equals(const LVLine *Line) const;

  // Returns true if the given 'References' are logically equal to the
  // given 'Targets'.
  static bool equals(const LVLines *References, const LVLines *Targets);

  // Report the current line as missing or added during comparison.
  void report(LVComparePass Pass) override;

  void print(raw_ostream &OS, bool Full = true) const override;
  void printExtra(raw_ostream &OS, bool Full = true) const override {}

#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
  void dump() const override { print(dbgs()); }
#endif
};

// Class to represent a DWARF line record object.
class LVLineDebug final : public LVLine {
  // Discriminator value (DW_LNE_set_discriminator). The DWARF standard
  // defines the discriminator as an unsigned LEB128 integer.
  uint32_t Discriminator = 0;

public:
  LVLineDebug() : LVLine() { setIsLineDebug(); }
  LVLineDebug(const LVLineDebug &) = delete;
  LVLineDebug &operator=(const LVLineDebug &) = delete;
  ~LVLineDebug() = default;

  // Additional line information. It includes attributes that describes
  // states in the machine instructions (basic block, end prologue, etc).
  std::string statesInfo(bool Formatted) const;

  // Access DW_LNE_set_discriminator attribute.
  uint32_t getDiscriminator() const override { return Discriminator; }
  void setDiscriminator(uint32_t Value) override {
    Discriminator = Value;
    setIsDiscriminator();
  }

  // Returns true if current line is logically equal to the given 'Line'.
  bool equals(const LVLine *Line) const override;

  void printExtra(raw_ostream &OS, bool Full = true) const override;
};

// Class to represent an assembler line extracted from the text section.
class LVLineAssembler final : public LVLine {
public:
  LVLineAssembler() : LVLine() { setIsLineAssembler(); }
  LVLineAssembler(const LVLineAssembler &) = delete;
  LVLineAssembler &operator=(const LVLineAssembler &) = delete;
  ~LVLineAssembler() = default;

  // Print blanks as the line number.
  std::string noLineAsString(bool ShowZero) const override {
    return std::string(8, ' ');
  };

  // Returns true if current line is logically equal to the given 'Line'.
  bool equals(const LVLine *Line) const override;

  void printExtra(raw_ostream &OS, bool Full = true) const override;
};

} // end namespace logicalview
} // end namespace llvm

#endif // LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVLINE_H
PKhwFZj`�X�$�$#DebugInfo/LogicalView/Core/LVType.hnu�[���//===-- LVType.h ------------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the LVType class, which is used to describe a debug
// information type.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVTYPE_H
#define LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVTYPE_H

#include "llvm/DebugInfo/LogicalView/Core/LVElement.h"

namespace llvm {
namespace logicalview {

enum class LVTypeKind {
  IsBase,
  IsConst,
  IsEnumerator,
  IsImport,
  IsImportDeclaration,
  IsImportModule,
  IsPointer,
  IsPointerMember,
  IsReference,
  IsRestrict,
  IsRvalueReference,
  IsSubrange,
  IsTemplateParam,
  IsTemplateTemplateParam,
  IsTemplateTypeParam,
  IsTemplateValueParam,
  IsTypedef,
  IsUnaligned,
  IsUnspecified,
  IsVolatile,
  IsModifier, // CodeView - LF_MODIFIER
  LastEntry
};
using LVTypeKindSelection = std::set<LVTypeKind>;
using LVTypeDispatch = std::map<LVTypeKind, LVTypeGetFunction>;
using LVTypeRequest = std::vector<LVTypeGetFunction>;

// Class to represent a DWARF Type.
class LVType : public LVElement {
  enum class Property { IsSubrangeCount, LastEntry };

  // Typed bitvector with kinds and properties for this type.
  LVProperties<LVTypeKind> Kinds;
  LVProperties<Property> Properties;
  static LVTypeDispatch Dispatch;

  // Find the current type in the given 'Targets'.
  LVType *findIn(const LVTypes *Targets) const;

public:
  LVType() : LVElement(LVSubclassID::LV_TYPE) { setIsType(); }
  LVType(const LVType &) = delete;
  LVType &operator=(const LVType &) = delete;
  virtual ~LVType() = default;

  static bool classof(const LVElement *Element) {
    return Element->getSubclassID() == LVSubclassID::LV_TYPE;
  }

  KIND(LVTypeKind, IsBase);
  KIND(LVTypeKind, IsConst);
  KIND(LVTypeKind, IsEnumerator);
  KIND(LVTypeKind, IsImport);
  KIND_1(LVTypeKind, IsImportDeclaration, IsImport);
  KIND_1(LVTypeKind, IsImportModule, IsImport);
  KIND(LVTypeKind, IsPointer);
  KIND(LVTypeKind, IsPointerMember);
  KIND(LVTypeKind, IsReference);
  KIND(LVTypeKind, IsRestrict);
  KIND(LVTypeKind, IsRvalueReference);
  KIND(LVTypeKind, IsSubrange);
  KIND(LVTypeKind, IsTemplateParam);
  KIND_1(LVTypeKind, IsTemplateTemplateParam, IsTemplateParam);
  KIND_1(LVTypeKind, IsTemplateTypeParam, IsTemplateParam);
  KIND_1(LVTypeKind, IsTemplateValueParam, IsTemplateParam);
  KIND(LVTypeKind, IsTypedef);
  KIND(LVTypeKind, IsUnaligned);
  KIND(LVTypeKind, IsUnspecified);
  KIND(LVTypeKind, IsVolatile);
  KIND(LVTypeKind, IsModifier);

  PROPERTY(Property, IsSubrangeCount);

  const char *kind() const override;

  // Follow a chain of references given by DW_AT_abstract_origin and/or
  // DW_AT_specification and update the type name.
  StringRef resolveReferencesChain();

  bool isBase() const override { return getIsBase(); }
  bool isTemplateParam() const override { return getIsTemplateParam(); }

  // Encode the specific template argument.
  virtual void encodeTemplateArgument(std::string &Name) const {}

  // Return the underlying type for a type definition.
  virtual LVElement *getUnderlyingType() { return nullptr; }
  virtual void setUnderlyingType(LVElement *Element) {}

  void resolveName() override;
  void resolveReferences() override;

  static LVTypeDispatch &getDispatch() { return Dispatch; }

  static bool parametersMatch(const LVTypes *References,
                              const LVTypes *Targets);

  static void getParameters(const LVTypes *Types, LVTypes *TypesParam,
                            LVScopes *ScopesParam);

  // Iterate through the 'References' set and check that all its elements
  // are present in the 'Targets' set. For a missing element, mark its
  // parents as missing.
  static void markMissingParents(const LVTypes *References,
                                 const LVTypes *Targets);

  // Returns true if current type is logically equal to the given 'Type'.
  virtual bool equals(const LVType *Type) const;

  // Returns true if the given 'References' are logically equal to the
  // given 'Targets'.
  static bool equals(const LVTypes *References, const LVTypes *Targets);

  // Report the current type as missing or added during comparison.
  void report(LVComparePass Pass) override;

  void print(raw_ostream &OS, bool Full = true) const override;
  void printExtra(raw_ostream &OS, bool Full = true) const override;

#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
  void dump() const override { print(dbgs()); }
#endif
};

// Class to represent DW_TAG_typedef_type.
class LVTypeDefinition final : public LVType {
public:
  LVTypeDefinition() : LVType() {
    setIsTypedef();
    setIncludeInPrint();
  }
  LVTypeDefinition(const LVTypeDefinition &) = delete;
  LVTypeDefinition &operator=(const LVTypeDefinition &) = delete;
  ~LVTypeDefinition() = default;

  // Return the underlying type for a type definition.
  LVElement *getUnderlyingType() override;
  void setUnderlyingType(LVElement *Element) override { setType(Element); }

  void resolveExtra() override;

  // Returns true if current type is logically equal to the given 'Type'.
  bool equals(const LVType *Type) const override;

  void printExtra(raw_ostream &OS, bool Full = true) const override;
};

// Class to represent a DW_TAG_enumerator.
class LVTypeEnumerator final : public LVType {
  // Index in the String pool representing any initial value.
  size_t ValueIndex = 0;

public:
  LVTypeEnumerator() : LVType() {
    setIsEnumerator();
    setIncludeInPrint();
  }
  LVTypeEnumerator(const LVTypeEnumerator &) = delete;
  LVTypeEnumerator &operator=(const LVTypeEnumerator &) = delete;
  ~LVTypeEnumerator() = default;

  // Process the values for a DW_TAG_enumerator.
  StringRef getValue() const override {
    return getStringPool().getString(ValueIndex);
  }
  void setValue(StringRef Value) override {
    ValueIndex = getStringPool().getIndex(Value);
  }
  size_t getValueIndex() const override { return ValueIndex; }

  // Returns true if current type is logically equal to the given 'Type'.
  bool equals(const LVType *Type) const override;

  void printExtra(raw_ostream &OS, bool Full = true) const override;
};

// Class to represent DW_TAG_imported_module / DW_TAG_imported_declaration.
class LVTypeImport final : public LVType {
public:
  LVTypeImport() : LVType() { setIncludeInPrint(); }
  LVTypeImport(const LVTypeImport &) = delete;
  LVTypeImport &operator=(const LVTypeImport &) = delete;
  ~LVTypeImport() = default;

  // Returns true if current type is logically equal to the given 'Type'.
  bool equals(const LVType *Type) const override;

  void printExtra(raw_ostream &OS, bool Full = true) const override;
};

// Class to represent a DWARF Template parameter holder (type or param).
class LVTypeParam final : public LVType {
  // Index in the String pool representing any initial value.
  size_t ValueIndex = 0;

public:
  LVTypeParam();
  LVTypeParam(const LVTypeParam &) = delete;
  LVTypeParam &operator=(const LVTypeParam &) = delete;
  ~LVTypeParam() = default;

  // Template parameter value.
  StringRef getValue() const override {
    return getStringPool().getString(ValueIndex);
  }
  void setValue(StringRef Value) override {
    ValueIndex = getStringPool().getIndex(Value);
  }
  size_t getValueIndex() const override { return ValueIndex; }

  // Encode the specific template argument.
  void encodeTemplateArgument(std::string &Name) const override;

  // Returns true if current type is logically equal to the given 'Type'.
  bool equals(const LVType *Type) const override;

  void printExtra(raw_ostream &OS, bool Full = true) const override;
};

// Class to represent a DW_TAG_subrange_type.
class LVTypeSubrange final : public LVType {
  // Values describing the subrange bounds.
  int64_t LowerBound = 0; // DW_AT_lower_bound or DW_AT_count value.
  int64_t UpperBound = 0; // DW_AT_upper_bound value.

public:
  LVTypeSubrange() : LVType() {
    setIsSubrange();
    setIncludeInPrint();
  }
  LVTypeSubrange(const LVTypeSubrange &) = delete;
  LVTypeSubrange &operator=(const LVTypeSubrange &) = delete;
  ~LVTypeSubrange() = default;

  int64_t getCount() const override {
    return getIsSubrangeCount() ? LowerBound : 0;
  }
  void setCount(int64_t Value) override {
    LowerBound = Value;
    setIsSubrangeCount();
  }

  int64_t getLowerBound() const override { return LowerBound; }
  void setLowerBound(int64_t Value) override { LowerBound = Value; }

  int64_t getUpperBound() const override { return UpperBound; }
  void setUpperBound(int64_t Value) override { UpperBound = Value; }

  std::pair<unsigned, unsigned> getBounds() const override {
    return {LowerBound, UpperBound};
  }
  void setBounds(unsigned Lower, unsigned Upper) override {
    LowerBound = Lower;
    UpperBound = Upper;
  }

  void resolveExtra() override;

  // Returns true if current type is logically equal to the given 'Type'.
  bool equals(const LVType *Type) const override;

  void printExtra(raw_ostream &OS, bool Full = true) const override;
};

} // end namespace logicalview
} // end namespace llvm

#endif // LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVTYPE_H
PKhwFZ�$��\-\-%DebugInfo/LogicalView/Core/LVObject.hnu�[���//===-- LVObject.h ----------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the LVObject class, which is used to describe a debug
// information object.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVOBJECT_H
#define LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVOBJECT_H

#include "llvm/BinaryFormat/Dwarf.h"
#include "llvm/DebugInfo/CodeView/CodeView.h"
#include "llvm/DebugInfo/CodeView/TypeIndex.h"
#include "llvm/DebugInfo/LogicalView/Core/LVSupport.h"
#include <limits>
#include <list>
#include <map>
#include <string>

namespace llvm {
namespace dwarf {
// Support for CodeView ModifierOptions::Unaligned.
constexpr Tag DW_TAG_unaligned = Tag(dwarf::DW_TAG_hi_user + 1);
} // namespace dwarf
} // namespace llvm

namespace llvm {
namespace logicalview {

using LVSectionIndex = uint64_t;
using LVAddress = uint64_t;
using LVHalf = uint16_t;
using LVLevel = uint32_t;
using LVOffset = uint64_t;
using LVSigned = int64_t;
using LVUnsigned = uint64_t;
using LVSmall = uint8_t;

class LVElement;
class LVLine;
class LVLocation;
class LVLocationSymbol;
class LVObject;
class LVOperation;
class LVScope;
class LVSymbol;
class LVType;

class LVOptions;
class LVPatterns;

StringRef typeNone();
StringRef typeVoid();
StringRef typeInt();
StringRef typeUnknown();
StringRef emptyString();

using LVElementSetFunction = void (LVElement::*)();
using LVElementGetFunction = bool (LVElement::*)() const;
using LVLineSetFunction = void (LVLine::*)();
using LVLineGetFunction = bool (LVLine::*)() const;
using LVObjectSetFunction = void (LVObject::*)();
using LVObjectGetFunction = bool (LVObject::*)() const;
using LVScopeSetFunction = void (LVScope::*)();
using LVScopeGetFunction = bool (LVScope::*)() const;
using LVSymbolSetFunction = void (LVSymbol::*)();
using LVSymbolGetFunction = bool (LVSymbol::*)() const;
using LVTypeSetFunction = void (LVType::*)();
using LVTypeGetFunction = bool (LVType::*)() const;

using LVElements = SmallVector<LVElement *, 8>;
using LVLines = SmallVector<LVLine *, 8>;
using LVLocations = SmallVector<LVLocation *, 8>;
using LVOperations = SmallVector<LVOperation *, 8>;
using LVScopes = SmallVector<LVScope *, 8>;
using LVSymbols = SmallVector<LVSymbol *, 8>;
using LVTypes = SmallVector<LVType *, 8>;

using LVOffsets = SmallVector<LVOffset, 8>;

const LVAddress MaxAddress = std::numeric_limits<uint64_t>::max();

enum class LVBinaryType { NONE, ELF, COFF };
enum class LVComparePass { Missing, Added };

// Validate functions.
using LVValidLocation = bool (LVLocation::*)();

// Keep counters of objects.
struct LVCounter {
  unsigned Lines = 0;
  unsigned Scopes = 0;
  unsigned Symbols = 0;
  unsigned Types = 0;
  void reset() {
    Lines = 0;
    Scopes = 0;
    Symbols = 0;
    Types = 0;
  }
};

class LVObject {
  enum class Property {
    IsLocation,          // Location.
    IsGlobalReference,   // This object is being referenced from another CU.
    IsGeneratedName,     // The Object name was generated.
    IsResolved,          // Object has been resolved.
    IsResolvedName,      // Object name has been resolved.
    IsDiscarded,         // Object has been stripped by the linker.
    IsOptimized,         // Object has been optimized by the compiler.
    IsAdded,             // Object has been 'added'.
    IsMatched,           // Object has been matched to a given pattern.
    IsMissing,           // Object is 'missing'.
    IsMissingLink,       // Object is indirectly 'missing'.
    IsInCompare,         // In 'compare' mode.
    IsFileFromReference, // File ID from specification.
    IsLineFromReference, // Line No from specification.
    HasMoved,            // The object was moved from 'target' to 'reference'.
    HasPattern,          // The object has a pattern.
    IsFinalized,         // CodeView object is finalized.
    IsReferenced,        // CodeView object being referenced.
    HasCodeViewLocation, // CodeView object with debug location.
    LastEntry
  };
  // Typed bitvector with properties for this object.
  LVProperties<Property> Properties;

  LVOffset Offset = 0;
  uint32_t LineNumber = 0;
  LVLevel ScopeLevel = 0;
  union {
    dwarf::Tag Tag;
    dwarf::Attribute Attr;
    LVSmall Opcode;
  } TagAttrOpcode = {dwarf::DW_TAG_null};

  // The parent of this object (nullptr if the root scope). For locations,
  // the parent is a symbol object; otherwise it is a scope object.
  union {
    LVElement *Element;
    LVScope *Scope;
    LVSymbol *Symbol;
  } Parent = {nullptr};

  // We do not support any object duplication, as they are created by parsing
  // the debug information. There is only the case where we need a very basic
  // object, to manipulate its offset, line number and scope level. Allow the
  // copy constructor to create that object; it is used to print a reference
  // to another object and in the case of templates, to print its encoded args.
  LVObject(const LVObject &Object) {
#ifndef NDEBUG
    incID();
#endif
    Properties = Object.Properties;
    Offset = Object.Offset;
    LineNumber = Object.LineNumber;
    ScopeLevel = Object.ScopeLevel;
    TagAttrOpcode = Object.TagAttrOpcode;
    Parent = Object.Parent;
  }

#ifndef NDEBUG
  // This is an internal ID used for debugging logical elements. It is used
  // for cases where an unique offset within the binary input file is not
  // available.
  static uint64_t GID;
  uint64_t ID = 0;

  void incID() {
    ++GID;
    ID = GID;
  }
#endif

protected:
  // Get a string representation for the given number and discriminator.
  std::string lineAsString(uint32_t LineNumber, LVHalf Discriminator,
                           bool ShowZero) const;

  // Get a string representation for the given number.
  std::string referenceAsString(uint32_t LineNumber, bool Spaces) const;

  // Print the Filename or Pathname.
  // Empty implementation for those objects that do not have any user
  // source file references, such as debug locations.
  virtual void printFileIndex(raw_ostream &OS, bool Full = true) const {}

public:
  LVObject() {
#ifndef NDEBUG
    incID();
#endif
  };
  LVObject &operator=(const LVObject &) = delete;
  virtual ~LVObject() = default;

  PROPERTY(Property, IsLocation);
  PROPERTY(Property, IsGlobalReference);
  PROPERTY(Property, IsGeneratedName);
  PROPERTY(Property, IsResolved);
  PROPERTY(Property, IsResolvedName);
  PROPERTY(Property, IsDiscarded);
  PROPERTY(Property, IsOptimized);
  PROPERTY(Property, IsAdded);
  PROPERTY(Property, IsMatched);
  PROPERTY(Property, IsMissing);
  PROPERTY(Property, IsMissingLink);
  PROPERTY(Property, IsInCompare);
  PROPERTY(Property, IsFileFromReference);
  PROPERTY(Property, IsLineFromReference);
  PROPERTY(Property, HasMoved);
  PROPERTY(Property, HasPattern);
  PROPERTY(Property, IsFinalized);
  PROPERTY(Property, IsReferenced);
  PROPERTY(Property, HasCodeViewLocation);

  // True if the scope has been named or typed or with line number.
  virtual bool isNamed() const { return false; }
  virtual bool isTyped() const { return false; }
  virtual bool isFiled() const { return false; }
  bool isLined() const { return LineNumber != 0; }

  // DWARF tag, attribute or expression opcode.
  dwarf::Tag getTag() const { return TagAttrOpcode.Tag; }
  void setTag(dwarf::Tag Tag) { TagAttrOpcode.Tag = Tag; }
  dwarf::Attribute getAttr() const { return TagAttrOpcode.Attr; }
  void setAttr(dwarf::Attribute Attr) { TagAttrOpcode.Attr = Attr; }
  LVSmall getOpcode() const { return TagAttrOpcode.Opcode; }
  void setOpcode(LVSmall Opcode) { TagAttrOpcode.Opcode = Opcode; }

  // DIE offset.
  LVOffset getOffset() const { return Offset; }
  void setOffset(LVOffset DieOffset) { Offset = DieOffset; }

  // Level where this object is located.
  LVLevel getLevel() const { return ScopeLevel; }
  void setLevel(LVLevel Level) { ScopeLevel = Level; }

  virtual StringRef getName() const { return StringRef(); }
  virtual void setName(StringRef ObjectName) {}

  LVElement *getParent() const {
    assert((!Parent.Element ||
            (Parent.Element && static_cast<LVElement *>(Parent.Element))) &&
           "Invalid element");
    return Parent.Element;
  }
  LVScope *getParentScope() const {
    assert((!Parent.Scope ||
            (Parent.Scope && static_cast<LVScope *>(Parent.Scope))) &&
           "Invalid scope");
    return Parent.Scope;
  }
  LVSymbol *getParentSymbol() const {
    assert((!Parent.Symbol ||
            (Parent.Symbol && static_cast<LVSymbol *>(Parent.Symbol))) &&
           "Invalid symbol");
    return Parent.Symbol;
  }
  void setParent(LVScope *Scope);
  void setParent(LVSymbol *Symbol);
  void resetParent() { Parent = {nullptr}; }

  virtual LVAddress getLowerAddress() const { return 0; }
  virtual void setLowerAddress(LVAddress Address) {}
  virtual LVAddress getUpperAddress() const { return 0; }
  virtual void setUpperAddress(LVAddress Address) {}

  uint32_t getLineNumber() const { return LineNumber; }
  void setLineNumber(uint32_t Number) { LineNumber = Number; }

  virtual const char *kind() const { return nullptr; }

  std::string indentAsString() const;
  std::string indentAsString(LVLevel Level) const;

  // String used as padding for printing objects with no line number.
  virtual std::string noLineAsString(bool ShowZero) const;

  // Line number for display; in the case of inlined functions, we use the
  // DW_AT_call_line attribute; otherwise use DW_AT_decl_line attribute.
  virtual std::string lineNumberAsString(bool ShowZero = false) const {
    return lineAsString(getLineNumber(), 0, ShowZero);
  }
  std::string lineNumberAsStringStripped(bool ShowZero = false) const;

  // This function prints the logical view to an output stream.
  // Split: Prints the compilation unit view to a file.
  // Match: Prints the object only if it satisfies the patterns collected
  // from the command line. See the '--select' option.
  // Print: Print the object only if satisfies the conditions specified by
  // the different '--print' options.
  // Full: Prints full information for objects representing debug locations,
  // aggregated scopes, compile unit, functions and namespaces.
  virtual Error doPrint(bool Split, bool Match, bool Print, raw_ostream &OS,
                        bool Full = true) const;
  void printAttributes(raw_ostream &OS, bool Full = true) const;
  void printAttributes(raw_ostream &OS, bool Full, StringRef Name,
                       LVObject *Parent, StringRef Value,
                       bool UseQuotes = false, bool PrintRef = false) const;

  // Mark branch as missing (current element and parents).
  void markBranchAsMissing();

  // Prints the common information for an object (name, type, etc).
  virtual void print(raw_ostream &OS, bool Full = true) const;
  // Prints additional information for an object, depending on its kind
  // (class attributes, debug ranges, files, directories, etc).
  virtual void printExtra(raw_ostream &OS, bool Full = true) const {}

#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
  virtual void dump() const { print(dbgs()); }
#endif

  uint64_t getID() const {
    return
#ifndef NDEBUG
        ID;
#else
        0;
#endif
  }
};

} // end namespace logicalview
} // end namespace llvm

#endif // LLVM_DEBUGINFO_LOGICALVIEW_CORE_LVOBJECT_H
PKhwFZw+%!W$W$0DebugInfo/LogicalView/Readers/LVCodeViewReader.hnu�[���//===-- LVCodeViewReader.h --------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the LVCodeViewReader class, which is used to describe a
// debug information (COFF) reader.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_LOGICALVIEW_READERS_CODEVIEWREADER_H
#define LLVM_DEBUGINFO_LOGICALVIEW_READERS_CODEVIEWREADER_H

#include "llvm/DebugInfo/CodeView/AppendingTypeTableBuilder.h"
#include "llvm/DebugInfo/CodeView/DebugInlineeLinesSubsection.h"
#include "llvm/DebugInfo/CodeView/DebugLinesSubsection.h"
#include "llvm/DebugInfo/CodeView/DebugStringTableSubsection.h"
#include "llvm/DebugInfo/CodeView/TypeRecord.h"
#include "llvm/DebugInfo/LogicalView/Readers/LVBinaryReader.h"
#include "llvm/DebugInfo/LogicalView/Readers/LVCodeViewVisitor.h"
#include "llvm/DebugInfo/PDB/Native/NativeSession.h"
#include "llvm/DebugInfo/PDB/PDB.h"
#include "llvm/Support/BinaryByteStream.h"
#include "llvm/Support/BinaryItemStream.h"
#include "llvm/Support/BinaryStreamArray.h"

namespace llvm {
template <> struct BinaryItemTraits<codeview::CVType> {
  static size_t length(const codeview::CVType &Item) { return Item.length(); }
  static ArrayRef<uint8_t> bytes(const codeview::CVType &Item) {
    return Item.data();
  }
};

namespace codeview {
class LazyRandomTypeCollection;
}
namespace object {
struct coff_section;
}
namespace pdb {
class SymbolGroup;
}
namespace logicalview {

class LVElement;
class LVLine;
class LVScope;
class LVScopeCompileUnit;
class LVSymbol;
class LVType;
class LVTypeVisitor;
class LVSymbolVisitor;
class LVSymbolVisitorDelegate;

using LVNames = SmallVector<StringRef, 16>;

// The ELF reader uses the DWARF constants to create the logical elements.
// The DW_TAG_* and DW_AT_* are used to select the logical object and to
// set specific attributes, such as name, type, etc.
// As the CodeView constants are different to the DWARF constants, the
// CodeView reader will map them to the DWARF ones.

class LVCodeViewReader final : public LVBinaryReader {
  friend class LVTypeVisitor;
  friend class LVSymbolVisitor;
  friend class LVSymbolVisitorDelegate;

  using LVModules = std::vector<LVScope *>;
  LVModules Modules;

  // Encapsulates access to the input file and any dependent type server,
  // including any precompiled header object.
  llvm::pdb::InputFile Input;
  std::shared_ptr<llvm::pdb::InputFile> TypeServer;
  std::shared_ptr<LazyRandomTypeCollection> PrecompHeader;

  // Persistance data when loading a type server.
  ErrorOr<std::unique_ptr<MemoryBuffer>> BuffOrErr = nullptr;
  std::unique_ptr<MemoryBuffer> MemBuffer;
  std::unique_ptr<llvm::pdb::IPDBSession> Session;
  std::unique_ptr<llvm::pdb::NativeSession> PdbSession;

  // Persistance data when loading a precompiled header.
  BumpPtrAllocator BuilderAllocator;
  std::unique_ptr<AppendingTypeTableBuilder> Builder;
  std::unique_ptr<BinaryItemStream<CVType>> ItemStream;
  std::unique_ptr<BinaryStreamReader> ReaderPrecomp;
  std::vector<CVType> TypeArray;
  CVTypeArray TypeStream;
  CVTypeArray CVTypesPrecomp;

  // Persistance data when loading an executable file.
  std::unique_ptr<MemoryBuffer> BinaryBuffer;
  std::unique_ptr<llvm::object::Binary> BinaryExecutable;

  Error loadTargetInfo(const object::ObjectFile &Obj);
  Error loadTargetInfo(const llvm::pdb::PDBFile &Pdb);

  void mapRangeAddress(const object::ObjectFile &Obj,
                       const object::SectionRef &Section,
                       bool IsComdat) override;

  llvm::object::COFFObjectFile &getObj() { return Input.obj(); }
  llvm::pdb::PDBFile &getPdb() { return Input.pdb(); }
  bool isObj() const { return Input.isObj(); }
  bool isPdb() const { return Input.isPdb(); }
  StringRef getFileName() { return Input.getFilePath(); }

  // Pathname to executable image.
  std::string ExePath;

  LVOffset CurrentOffset = 0;
  int32_t CurrentModule = -1;

  using RelocMapTy = DenseMap<const llvm::object::coff_section *,
                              std::vector<llvm::object::RelocationRef>>;
  RelocMapTy RelocMap;

  // Object files have only one type stream that contains both types and ids.
  // Precompiled header objects don't contain an IPI stream. Use the TPI.
  LazyRandomTypeCollection &types() {
    return TypeServer ? TypeServer->types()
                      : (PrecompHeader ? *PrecompHeader : Input.types());
  }
  LazyRandomTypeCollection &ids() {
    return TypeServer ? TypeServer->ids()
                      : (PrecompHeader ? *PrecompHeader : Input.ids());
  }

  LVLogicalVisitor LogicalVisitor;

  Expected<StringRef>
  getFileNameForFileOffset(uint32_t FileOffset,
                           const llvm::pdb::SymbolGroup *SG = nullptr);
  void printRelocatedField(StringRef Label,
                           const llvm::object::coff_section *CoffSection,
                           uint32_t RelocOffset, uint32_t Offset,
                           StringRef *RelocSym);

  Error printFileNameForOffset(StringRef Label, uint32_t FileOffset,
                               const llvm::pdb::SymbolGroup *SG = nullptr);

  Error loadPrecompiledObject(PrecompRecord &Precomp, CVTypeArray &CVTypesObj);
  Error loadTypeServer(TypeServer2Record &TS);
  Error traverseTypes(llvm::pdb::PDBFile &Pdb, LazyRandomTypeCollection &Types,
                      LazyRandomTypeCollection &Ids);

  Error collectInlineeInfo(DebugInlineeLinesSubsectionRef &Lines,
                           const llvm::pdb::SymbolGroup *SG = nullptr);

  void cacheRelocations();
  Error resolveSymbol(const llvm::object::coff_section *CoffSection,
                      uint64_t Offset, llvm::object::SymbolRef &Sym);
  Error resolveSymbolName(const llvm::object::coff_section *CoffSection,
                          uint64_t Offset, StringRef &Name);
  Error traverseTypeSection(StringRef SectionName,
                            const llvm::object::SectionRef &Section);
  Error traverseSymbolSection(StringRef SectionName,
                              const llvm::object::SectionRef &Section);
  Error traverseInlineeLines(StringRef Subsection);

  DebugChecksumsSubsectionRef CVFileChecksumTable;
  DebugStringTableSubsectionRef CVStringTable;

  Error traverseSymbolsSubsection(StringRef Subsection,
                                  const llvm::object::SectionRef &Section,
                                  StringRef SectionContents);

  /// Given a .debug$S section, find the string table and file checksum table.
  /// This function taken from (COFFDumper.cpp).
  /// TODO: It can be moved to the COFF library.
  Error initializeFileAndStringTables(BinaryStreamReader &Reader);

  Error createLines(const FixedStreamArray<LineNumberEntry> &LineNumbers,
                    LVAddress Addendum, uint32_t Segment, uint32_t Begin,
                    uint32_t Size, uint32_t NameIndex,
                    const llvm::pdb::SymbolGroup *SG = nullptr);
  Error createScopes(llvm::object::COFFObjectFile &Obj);
  Error createScopes(llvm::pdb::PDBFile &Pdb);
  Error processModule();

protected:
  Error createScopes() override;
  void sortScopes() override;

public:
  LVCodeViewReader() = delete;
  LVCodeViewReader(StringRef Filename, StringRef FileFormatName,
                   llvm::object::COFFObjectFile &Obj, ScopedPrinter &W,
                   StringRef ExePath)
      : LVBinaryReader(Filename, FileFormatName, W, LVBinaryType::COFF),
        Input(&Obj), ExePath(ExePath), LogicalVisitor(this, W, Input) {}
  LVCodeViewReader(StringRef Filename, StringRef FileFormatName,
                   llvm::pdb::PDBFile &Pdb, ScopedPrinter &W, StringRef ExePath)
      : LVBinaryReader(Filename, FileFormatName, W, LVBinaryType::COFF),
        Input(&Pdb), ExePath(ExePath), LogicalVisitor(this, W, Input) {}
  LVCodeViewReader(const LVCodeViewReader &) = delete;
  LVCodeViewReader &operator=(const LVCodeViewReader &) = delete;
  ~LVCodeViewReader() = default;

  void getLinkageName(const llvm::object::coff_section *CoffSection,
                      uint32_t RelocOffset, uint32_t Offset,
                      StringRef *RelocSym);

  void addModule(LVScope *Scope) { Modules.push_back(Scope); }
  LVScope *getScopeForModule(uint32_t Modi) {
    return Modi >= Modules.size() ? nullptr : Modules[Modi];
  }

  // Get the string representation for the CodeView symbols.
  static StringRef getSymbolKindName(SymbolKind Kind);
  static std::string formatRegisterId(RegisterId Register, CPUType CPU);

  std::string getRegisterName(LVSmall Opcode,
                              ArrayRef<uint64_t> Operands) override;

  bool isSystemEntry(LVElement *Element, StringRef Name) const override;

  void print(raw_ostream &OS) const;
  void printRecords(raw_ostream &OS) const override {
    LogicalVisitor.printRecords(OS);
  };

#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
  void dump() const { print(dbgs()); }
#endif
};

} // end namespace logicalview
} // end namespace llvm

#endif // LLVM_DEBUGINFO_LOGICALVIEW_READERS_CODEVIEWREADER_H
PKhwFZ�	"��.DebugInfo/LogicalView/Readers/LVBinaryReader.hnu�[���//===-- LVBinaryReader.h ----------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the LVBinaryReader class, which is used to describe a
// binary reader.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_LOGICALVIEW_READERS_LVBINARYREADER_H
#define LLVM_DEBUGINFO_LOGICALVIEW_READERS_LVBINARYREADER_H

#include "llvm/DebugInfo/LogicalView/Core/LVReader.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCDisassembler/MCDisassembler.h"
#include "llvm/MC/MCInstPrinter.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCObjectFileInfo.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/MC/TargetRegistry.h"
#include "llvm/Object/COFF.h"
#include "llvm/Object/ObjectFile.h"

namespace llvm {
namespace logicalview {

constexpr bool UpdateHighAddress = false;

// Logical scope, Section address, Section index, IsComdat.
struct LVSymbolTableEntry final {
  LVScope *Scope = nullptr;
  LVAddress Address = 0;
  LVSectionIndex SectionIndex = 0;
  bool IsComdat = false;
  LVSymbolTableEntry() = default;
  LVSymbolTableEntry(LVScope *Scope, LVAddress Address,
                     LVSectionIndex SectionIndex, bool IsComdat)
      : Scope(Scope), Address(Address), SectionIndex(SectionIndex),
        IsComdat(IsComdat) {}
};

// Function names extracted from the object symbol table.
class LVSymbolTable final {
  using LVSymbolNames = std::map<std::string, LVSymbolTableEntry>;
  LVSymbolNames SymbolNames;

public:
  LVSymbolTable() = default;

  void add(StringRef Name, LVScope *Function, LVSectionIndex SectionIndex = 0);
  void add(StringRef Name, LVAddress Address, LVSectionIndex SectionIndex,
           bool IsComdat);
  LVSectionIndex update(LVScope *Function);

  const LVSymbolTableEntry &getEntry(StringRef Name);
  LVAddress getAddress(StringRef Name);
  LVSectionIndex getIndex(StringRef Name);
  bool getIsComdat(StringRef Name);

  void print(raw_ostream &OS);
};

class LVBinaryReader : public LVReader {
  // Function names extracted from the object symbol table.
  LVSymbolTable SymbolTable;

  // It contains the LVLineDebug elements representing the inlined logical
  // lines for the current compile unit, created by parsing the CodeView
  // S_INLINESITE symbol annotation data.
  using LVInlineeLine = std::map<LVScope *, std::unique_ptr<LVLines>>;
  LVInlineeLine CUInlineeLines;

  // Instruction lines for a logical scope. These instructions are fetched
  // during its merge with the debug lines.
  LVDoubleMap<LVSectionIndex, LVScope *, LVLines *> ScopeInstructions;

  // Links the scope with its first assembler address line.
  LVDoubleMap<LVSectionIndex, LVAddress, LVScope *> AssemblerMappings;

  // Mapping from virtual address to section.
  // The virtual address refers to the address where the section is loaded.
  using LVSectionAddresses = std::map<LVSectionIndex, object::SectionRef>;
  LVSectionAddresses SectionAddresses;

  void addSectionAddress(const object::SectionRef &Section) {
    if (SectionAddresses.find(Section.getAddress()) == SectionAddresses.end())
      SectionAddresses.emplace(Section.getAddress(), Section);
  }

  // Scopes with ranges for current compile unit. It is used to find a line
  // giving its exact or closest address. To support comdat functions, all
  // addresses for the same section are recorded in the same map.
  using LVSectionRanges = std::map<LVSectionIndex, std::unique_ptr<LVRange>>;
  LVSectionRanges SectionRanges;

  // Image base and virtual address for Executable file.
  uint64_t ImageBaseAddress = 0;
  uint64_t VirtualAddress = 0;

  // Object sections with machine code.
  using LVSections = std::map<LVSectionIndex, object::SectionRef>;
  LVSections Sections;

  std::vector<std::unique_ptr<LVLines>> DiscoveredLines;

protected:
  // It contains the LVLineDebug elements representing the logical lines for
  // the current compile unit, created by parsing the debug line section.
  LVLines CULines;

  std::unique_ptr<const MCRegisterInfo> MRI;
  std::unique_ptr<const MCAsmInfo> MAI;
  std::unique_ptr<const MCSubtargetInfo> STI;
  std::unique_ptr<const MCInstrInfo> MII;
  std::unique_ptr<const MCDisassembler> MD;
  std::unique_ptr<MCContext> MC;
  std::unique_ptr<MCInstPrinter> MIP;

  // Loads all info for the architecture of the provided object file.
  Error loadGenericTargetInfo(StringRef TheTriple, StringRef TheFeatures);

  virtual void mapRangeAddress(const object::ObjectFile &Obj) {}
  virtual void mapRangeAddress(const object::ObjectFile &Obj,
                               const object::SectionRef &Section,
                               bool IsComdat) {}

  // Create a mapping from virtual address to section.
  void mapVirtualAddress(const object::ObjectFile &Obj);
  void mapVirtualAddress(const object::COFFObjectFile &COFFObj);

  Expected<std::pair<LVSectionIndex, object::SectionRef>>
  getSection(LVScope *Scope, LVAddress Address, LVSectionIndex SectionIndex);

  void addSectionRange(LVSectionIndex SectionIndex, LVScope *Scope);
  void addSectionRange(LVSectionIndex SectionIndex, LVScope *Scope,
                       LVAddress LowerAddress, LVAddress UpperAddress);
  LVRange *getSectionRanges(LVSectionIndex SectionIndex);

  void includeInlineeLines(LVSectionIndex SectionIndex, LVScope *Function);

  Error createInstructions();
  Error createInstructions(LVScope *Function, LVSectionIndex SectionIndex);
  Error createInstructions(LVScope *Function, LVSectionIndex SectionIndex,
                           const LVNameInfo &NameInfo);

  void processLines(LVLines *DebugLines, LVSectionIndex SectionIndex);
  void processLines(LVLines *DebugLines, LVSectionIndex SectionIndex,
                    LVScope *Function);

public:
  LVBinaryReader() = delete;
  LVBinaryReader(StringRef Filename, StringRef FileFormatName, ScopedPrinter &W,
                 LVBinaryType BinaryType)
      : LVReader(Filename, FileFormatName, W, BinaryType) {}
  LVBinaryReader(const LVBinaryReader &) = delete;
  LVBinaryReader &operator=(const LVBinaryReader &) = delete;
  virtual ~LVBinaryReader() = default;

  void addInlineeLines(LVScope *Scope, LVLines &Lines) {
    CUInlineeLines.emplace(Scope, std::make_unique<LVLines>(std::move(Lines)));
  }

  // Convert Segment::Offset pair to absolute address.
  LVAddress linearAddress(uint16_t Segment, uint32_t Offset,
                          LVAddress Addendum = 0) {
    return ImageBaseAddress + (Segment * VirtualAddress) + Offset + Addendum;
  }

  void addToSymbolTable(StringRef Name, LVScope *Function,
                        LVSectionIndex SectionIndex = 0);
  void addToSymbolTable(StringRef Name, LVAddress Address,
                        LVSectionIndex SectionIndex, bool IsComdat);
  LVSectionIndex updateSymbolTable(LVScope *Function);

  const LVSymbolTableEntry &getSymbolTableEntry(StringRef Name);
  LVAddress getSymbolTableAddress(StringRef Name);
  LVSectionIndex getSymbolTableIndex(StringRef Name);
  bool getSymbolTableIsComdat(StringRef Name);

  LVSectionIndex getSectionIndex(LVScope *Scope) override {
    return Scope ? getSymbolTableIndex(Scope->getLinkageName())
                 : DotTextSectionIndex;
  }

  void print(raw_ostream &OS) const;

#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
  void dump() const { print(dbgs()); }
#endif
};

} // end namespace logicalview
} // end namespace llvm

#endif // LLVM_DEBUGINFO_LOGICALVIEW_READERS_LVBINARYREADER_H
PKhwFZ���HOHO1DebugInfo/LogicalView/Readers/LVCodeViewVisitor.hnu�[���//===-- LVCodeViewVisitor.h -------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the LVCodeViewVisitor class, which is used to describe a
// debug information (CodeView) visitor.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_LOGICALVIEW_READERS_CODEVIEWVISITOR_H
#define LLVM_DEBUGINFO_LOGICALVIEW_READERS_CODEVIEWVISITOR_H

#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/iterator.h"
#include "llvm/DebugInfo/CodeView/SymbolDumpDelegate.h"
#include "llvm/DebugInfo/CodeView/SymbolVisitorCallbacks.h"
#include "llvm/DebugInfo/CodeView/TypeDeserializer.h"
#include "llvm/DebugInfo/CodeView/TypeVisitorCallbacks.h"
#include "llvm/DebugInfo/LogicalView/Readers/LVBinaryReader.h"
#include "llvm/DebugInfo/PDB/Native/InputFile.h"
#include "llvm/Object/Binary.h"
#include "llvm/Object/ObjectFile.h"
#include "llvm/Support/Error.h"
#include <stack>
#include <utility>

namespace llvm {
namespace logicalview {

using namespace llvm::codeview;

class LVCodeViewReader;
class LVLogicalVisitor;
struct LVShared;

class LVTypeVisitor final : public TypeVisitorCallbacks {
  ScopedPrinter &W;
  LVLogicalVisitor *LogicalVisitor;
  LazyRandomTypeCollection &Types;
  LazyRandomTypeCollection &Ids;
  uint32_t StreamIdx;
  LVShared *Shared = nullptr;

  // In a PDB, a type index may refer to a type (TPI) or an item ID (IPI).
  // In a COFF or PDB (/Z7), the type index always refer to a type (TPI).
  // When creating logical elements, we must access the correct element
  // table, while searching for a type index.
  bool HasIds = false;

  // Current type index during the types traversal.
  TypeIndex CurrentTypeIndex = TypeIndex::None();

  void printTypeIndex(StringRef FieldName, TypeIndex TI,
                      uint32_t StreamIdx) const;

public:
  LVTypeVisitor(ScopedPrinter &W, LVLogicalVisitor *LogicalVisitor,
                LazyRandomTypeCollection &Types, LazyRandomTypeCollection &Ids,
                uint32_t StreamIdx, LVShared *Shared)
      : TypeVisitorCallbacks(), W(W), LogicalVisitor(LogicalVisitor),
        Types(Types), Ids(Ids), StreamIdx(StreamIdx), Shared(Shared) {
    HasIds = &Types != &Ids;
  }

  Error visitTypeBegin(CVType &Record) override;
  Error visitTypeBegin(CVType &Record, TypeIndex TI) override;
  Error visitMemberBegin(CVMemberRecord &Record) override;
  Error visitMemberEnd(CVMemberRecord &Record) override;
  Error visitUnknownMember(CVMemberRecord &Record) override;

  Error visitKnownRecord(CVType &Record, BuildInfoRecord &Args) override;
  Error visitKnownRecord(CVType &Record, ClassRecord &Class) override;
  Error visitKnownRecord(CVType &Record, EnumRecord &Enum) override;
  Error visitKnownRecord(CVType &Record, FuncIdRecord &Func) override;
  Error visitKnownRecord(CVType &Record, ProcedureRecord &Proc) override;
  Error visitKnownRecord(CVType &Record, StringIdRecord &String) override;
  Error visitKnownRecord(CVType &Record, UdtSourceLineRecord &Line) override;
  Error visitKnownRecord(CVType &Record, UnionRecord &Union) override;
  Error visitUnknownType(CVType &Record) override;
};

class LVSymbolVisitorDelegate final : public SymbolVisitorDelegate {
  LVCodeViewReader *Reader;
  const llvm::object::coff_section *CoffSection;
  StringRef SectionContents;

public:
  LVSymbolVisitorDelegate(LVCodeViewReader *Reader,
                          const llvm::object::SectionRef &Section,
                          const llvm::object::COFFObjectFile *Obj,
                          StringRef SectionContents)
      : Reader(Reader), SectionContents(SectionContents) {
    CoffSection = Obj->getCOFFSection(Section);
  }

  uint32_t getRecordOffset(BinaryStreamReader Reader) override {
    ArrayRef<uint8_t> Data;
    if (Error Err = Reader.readLongestContiguousChunk(Data)) {
      llvm::consumeError(std::move(Err));
      return 0;
    }
    return Data.data() - SectionContents.bytes_begin();
  }

  void printRelocatedField(StringRef Label, uint32_t RelocOffset,
                           uint32_t Offset, StringRef *RelocSym = nullptr);

  void getLinkageName(uint32_t RelocOffset, uint32_t Offset,
                      StringRef *RelocSym = nullptr);

  StringRef getFileNameForFileOffset(uint32_t FileOffset) override;
  DebugStringTableSubsectionRef getStringTable() override;
};

class LVElement;
class LVScope;
class LVSymbol;
class LVType;

// Visitor for CodeView symbol streams found in COFF object files and PDB files.
class LVSymbolVisitor final : public SymbolVisitorCallbacks {
  LVCodeViewReader *Reader;
  ScopedPrinter &W;
  LVLogicalVisitor *LogicalVisitor;
  LazyRandomTypeCollection &Types;
  LazyRandomTypeCollection &Ids;
  LVSymbolVisitorDelegate *ObjDelegate;
  LVShared *Shared;

  // Symbol offset when processing PDB streams.
  uint32_t CurrentOffset = 0;
  // Current object name collected from S_OBJNAME.
  StringRef CurrentObjectName;
  // Last symbol processed by S_LOCAL.
  LVSymbol *LocalSymbol = nullptr;

  bool HasIds;
  bool InFunctionScope = false;
  bool IsCompileUnit = false;

  // Register for the locals and parameters symbols in the current frame.
  RegisterId LocalFrameRegister = RegisterId::NONE;
  RegisterId ParamFrameRegister = RegisterId::NONE;

  void printLocalVariableAddrRange(const LocalVariableAddrRange &Range,
                                   uint32_t RelocationOffset);
  void printLocalVariableAddrGap(ArrayRef<LocalVariableAddrGap> Gaps);
  void printTypeIndex(StringRef FieldName, TypeIndex TI) const;

  // Return true if this symbol is a Compile Unit.
  bool symbolIsCompileUnit(SymbolKind Kind) {
    switch (Kind) {
    case SymbolKind::S_COMPILE2:
    case SymbolKind::S_COMPILE3:
      return true;
    default:
      return false;
    }
  }

  // Determine symbol kind (local or parameter).
  void determineSymbolKind(LVSymbol *Symbol, RegisterId Register) {
    if (Register == LocalFrameRegister) {
      Symbol->setIsVariable();
      return;
    }
    if (Register == ParamFrameRegister) {
      Symbol->setIsParameter();
      return;
    }
    // Assume is a variable.
    Symbol->setIsVariable();
  }

public:
  LVSymbolVisitor(LVCodeViewReader *Reader, ScopedPrinter &W,
                  LVLogicalVisitor *LogicalVisitor,
                  LazyRandomTypeCollection &Types,
                  LazyRandomTypeCollection &Ids,
                  LVSymbolVisitorDelegate *ObjDelegate, LVShared *Shared)
      : Reader(Reader), W(W), LogicalVisitor(LogicalVisitor), Types(Types),
        Ids(Ids), ObjDelegate(ObjDelegate), Shared(Shared) {
    HasIds = &Types != &Ids;
  }

  Error visitSymbolBegin(CVSymbol &Record) override;
  Error visitSymbolBegin(CVSymbol &Record, uint32_t Offset) override;
  Error visitSymbolEnd(CVSymbol &Record) override;
  Error visitUnknownSymbol(CVSymbol &Record) override;

  Error visitKnownRecord(CVSymbol &Record, BlockSym &Block) override;
  Error visitKnownRecord(CVSymbol &Record, BPRelativeSym &Local) override;
  Error visitKnownRecord(CVSymbol &Record, BuildInfoSym &BuildInfo) override;
  Error visitKnownRecord(CVSymbol &Record, Compile2Sym &Compile2) override;
  Error visitKnownRecord(CVSymbol &Record, Compile3Sym &Compile3) override;
  Error visitKnownRecord(CVSymbol &Record, ConstantSym &Constant) override;
  Error visitKnownRecord(CVSymbol &Record, DataSym &Data) override;
  Error visitKnownRecord(CVSymbol &Record,
                         DefRangeFramePointerRelFullScopeSym
                             &DefRangeFramePointerRelFullScope) override;
  Error visitKnownRecord(
      CVSymbol &Record,
      DefRangeFramePointerRelSym &DefRangeFramePointerRel) override;
  Error visitKnownRecord(CVSymbol &Record,
                         DefRangeRegisterRelSym &DefRangeRegisterRel) override;
  Error visitKnownRecord(CVSymbol &Record,
                         DefRangeRegisterSym &DefRangeRegister) override;
  Error visitKnownRecord(
      CVSymbol &Record,
      DefRangeSubfieldRegisterSym &DefRangeSubfieldRegister) override;
  Error visitKnownRecord(CVSymbol &Record,
                         DefRangeSubfieldSym &DefRangeSubfield) override;
  Error visitKnownRecord(CVSymbol &Record, DefRangeSym &DefRange) override;
  Error visitKnownRecord(CVSymbol &Record, FrameProcSym &FrameProc) override;
  Error visitKnownRecord(CVSymbol &Record, InlineSiteSym &InlineSite) override;
  Error visitKnownRecord(CVSymbol &Record, LocalSym &Local) override;
  Error visitKnownRecord(CVSymbol &Record, ObjNameSym &ObjName) override;
  Error visitKnownRecord(CVSymbol &Record, ProcSym &Proc) override;
  Error visitKnownRecord(CVSymbol &Record, RegRelativeSym &Local) override;
  Error visitKnownRecord(CVSymbol &Record, ScopeEndSym &ScopeEnd) override;
  Error visitKnownRecord(CVSymbol &Record, Thunk32Sym &Thunk) override;
  Error visitKnownRecord(CVSymbol &Record, UDTSym &UDT) override;
  Error visitKnownRecord(CVSymbol &Record, UsingNamespaceSym &UN) override;
};

// Visitor for CodeView types and symbols to populate elements.
class LVLogicalVisitor final {
  LVCodeViewReader *Reader;
  ScopedPrinter &W;

  // Encapsulates access to the input file and any dependent type server,
  // including any precompiled header object.
  llvm::pdb::InputFile &Input;
  std::shared_ptr<llvm::pdb::InputFile> TypeServer = nullptr;
  std::shared_ptr<LazyRandomTypeCollection> PrecompHeader = nullptr;

  std::shared_ptr<LVShared> Shared;

  // Object files have only one type stream that contains both types and ids.
  // Precompiled header objects don't contain an IPI stream. Use the TPI.
  LazyRandomTypeCollection &types() {
    return TypeServer ? TypeServer->types()
                      : (PrecompHeader ? *PrecompHeader : Input.types());
  }
  LazyRandomTypeCollection &ids() {
    return TypeServer ? TypeServer->ids()
                      : (PrecompHeader ? *PrecompHeader : Input.ids());
  }

  using LVScopeStack = std::stack<LVScope *>;
  LVScopeStack ScopeStack;
  LVScope *ReaderParent = nullptr;
  LVScope *ReaderScope = nullptr;
  bool InCompileUnitScope = false;

  // Allow processing of argument list.
  bool ProcessArgumentList = false;
  StringRef OverloadedMethodName;
  std::string CompileUnitName;

  // Inlined functions source information.
  using LVInlineeEntry = std::pair<uint32_t, StringRef>;
  using LVInlineeInfo = std::map<TypeIndex, LVInlineeEntry>;
  LVInlineeInfo InlineeInfo;

  Error visitFieldListMemberStream(TypeIndex TI, LVElement *Element,
                                   ArrayRef<uint8_t> FieldList);

  LVType *createBaseType(TypeIndex TI, StringRef TypeName);
  LVType *createPointerType(TypeIndex TI, StringRef TypeName);
  LVSymbol *createParameter(TypeIndex TI, StringRef Name, LVScope *Parent);
  LVSymbol *createParameter(LVElement *Element, StringRef Name,
                            LVScope *Parent);
  void createDataMember(CVMemberRecord &Record, LVScope *Parent, StringRef Name,
                        TypeIndex Type, MemberAccess Access);
  void createParents(StringRef ScopedName, LVElement *Element);

public:
  LVLogicalVisitor(LVCodeViewReader *Reader, ScopedPrinter &W,
                   llvm::pdb::InputFile &Input);

  // Current elements during the processing of a RecordType or RecordSymbol.
  // They are shared with the SymbolVisitor.
  LVElement *CurrentElement = nullptr;
  LVScope *CurrentScope = nullptr;
  LVSymbol *CurrentSymbol = nullptr;
  LVType *CurrentType = nullptr;

  // Input source in the case of type server or precompiled header.
  void setInput(std::shared_ptr<llvm::pdb::InputFile> TypeServer) {
    this->TypeServer = TypeServer;
  }
  void setInput(std::shared_ptr<LazyRandomTypeCollection> PrecompHeader) {
    this->PrecompHeader = PrecompHeader;
  }

  void addInlineeInfo(TypeIndex TI, uint32_t LineNumber, StringRef Filename) {
    InlineeInfo.emplace(std::piecewise_construct, std::forward_as_tuple(TI),
                        std::forward_as_tuple(LineNumber, Filename));
  }

  void printTypeIndex(StringRef FieldName, TypeIndex TI, uint32_t StreamIdx);
  void printMemberAttributes(MemberAttributes Attrs);
  void printMemberAttributes(MemberAccess Access, MethodKind Kind,
                             MethodOptions Options);

  LVElement *createElement(TypeLeafKind Kind);
  LVElement *createElement(SymbolKind Kind);
  LVElement *createElement(TypeIndex TI, TypeLeafKind Kind);

  // Break down the annotation byte code and calculate code and line offsets.
  Error inlineSiteAnnotation(LVScope *AbstractFunction,
                             LVScope *InlinedFunction,
                             InlineSiteSym &InlineSite);

  void pushScope(LVScope *Scope) {
    ScopeStack.push(ReaderParent);
    ReaderParent = ReaderScope;
    ReaderScope = Scope;
  }
  void popScope() {
    ReaderScope = ReaderParent;
    ReaderParent = ScopeStack.top();
    ScopeStack.pop();
  }
  void closeScope() {
    if (InCompileUnitScope) {
      InCompileUnitScope = false;
      popScope();
    }
  }
  void setRoot(LVScope *Root) { ReaderScope = Root; }

  void addElement(LVScope *Scope, bool IsCompileUnit);
  void addElement(LVSymbol *Symbol);
  void addElement(LVType *Type);

  std::string getCompileUnitName() { return CompileUnitName; }
  void setCompileUnitName(std::string Name) {
    CompileUnitName = std::move(Name);
  }

  LVElement *getElement(uint32_t StreamIdx, TypeIndex TI,
                        LVScope *Parent = nullptr);
  LVShared *getShared() { return Shared.get(); }

  LVScope *getReaderScope() const { return ReaderScope; }

  void printTypeBegin(CVType &Record, TypeIndex TI, LVElement *Element,
                      uint32_t StreamIdx);
  void printTypeEnd(CVType &Record);
  void printMemberBegin(CVMemberRecord &Record, TypeIndex TI,
                        LVElement *Element, uint32_t StreamIdx);
  void printMemberEnd(CVMemberRecord &Record);

  void startProcessArgumentList() { ProcessArgumentList = true; }
  void stopProcessArgumentList() { ProcessArgumentList = false; }

  void processFiles();
  void processLines();
  void processNamespaces();

  void printRecords(raw_ostream &OS) const;

  Error visitUnknownType(CVType &Record, TypeIndex TI);
  Error visitKnownRecord(CVType &Record, ArgListRecord &Args, TypeIndex TI,
                         LVElement *Element);
  Error visitKnownRecord(CVType &Record, ArrayRecord &AT, TypeIndex TI,
                         LVElement *Element);
  Error visitKnownRecord(CVType &Record, BitFieldRecord &BF, TypeIndex TI,
                         LVElement *Element);
  Error visitKnownRecord(CVType &Record, BuildInfoRecord &BI, TypeIndex TI,
                         LVElement *Element);
  Error visitKnownRecord(CVType &Record, ClassRecord &Class, TypeIndex TI,
                         LVElement *Element);
  Error visitKnownRecord(CVType &Record, EnumRecord &Enum, TypeIndex TI,
                         LVElement *Element);
  Error visitKnownRecord(CVType &Record, FieldListRecord &FieldList,
                         TypeIndex TI, LVElement *Element);
  Error visitKnownRecord(CVType &Record, FuncIdRecord &Func, TypeIndex TI,
                         LVElement *Element);
  Error visitKnownRecord(CVType &Record, LabelRecord &LR, TypeIndex TI,
                         LVElement *Element);
  Error visitKnownRecord(CVType &Record, ModifierRecord &Mod, TypeIndex TI,
                         LVElement *Element);
  Error visitKnownRecord(CVType &Record, MemberFuncIdRecord &Id, TypeIndex TI,
                         LVElement *Element);
  Error visitKnownRecord(CVType &Record, MemberFunctionRecord &MF, TypeIndex TI,
                         LVElement *Element);
  Error visitKnownRecord(CVType &Record, MethodOverloadListRecord &Overloads,
                         TypeIndex TI, LVElement *Element);
  Error visitKnownRecord(CVType &Record, PointerRecord &Ptr, TypeIndex TI,
                         LVElement *Element);
  Error visitKnownRecord(CVType &Record, ProcedureRecord &Proc, TypeIndex TI,
                         LVElement *Element);
  Error visitKnownRecord(CVType &Record, UnionRecord &Union, TypeIndex TI,
                         LVElement *Element);
  Error visitKnownRecord(CVType &Record, TypeServer2Record &TS, TypeIndex TI,
                         LVElement *Element);
  Error visitKnownRecord(CVType &Record, VFTableRecord &VFT, TypeIndex TI,
                         LVElement *Element);
  Error visitKnownRecord(CVType &Record, VFTableShapeRecord &Shape,
                         TypeIndex TI, LVElement *Element);
  Error visitKnownRecord(CVType &Record, StringListRecord &Strings,
                         TypeIndex TI, LVElement *Element);
  Error visitKnownRecord(CVType &Record, StringIdRecord &String, TypeIndex TI,
                         LVElement *Element);
  Error visitKnownRecord(CVType &Record, UdtSourceLineRecord &SourceLine,
                         TypeIndex TI, LVElement *Element);
  Error visitKnownRecord(CVType &Record, UdtModSourceLineRecord &ModSourceLine,
                         TypeIndex TI, LVElement *Element);
  Error visitKnownRecord(CVType &Record, PrecompRecord &Precomp, TypeIndex TI,
                         LVElement *Element);
  Error visitKnownRecord(CVType &Record, EndPrecompRecord &EndPrecomp,
                         TypeIndex TI, LVElement *Element);

  Error visitUnknownMember(CVMemberRecord &Record, TypeIndex TI);
  Error visitKnownMember(CVMemberRecord &Record, BaseClassRecord &Base,
                         TypeIndex TI, LVElement *Element);
  Error visitKnownMember(CVMemberRecord &Record, DataMemberRecord &Field,
                         TypeIndex TI, LVElement *Element);
  Error visitKnownMember(CVMemberRecord &Record, EnumeratorRecord &Enum,
                         TypeIndex TI, LVElement *Element);
  Error visitKnownMember(CVMemberRecord &Record, ListContinuationRecord &Cont,
                         TypeIndex TI, LVElement *Element);
  Error visitKnownMember(CVMemberRecord &Record, NestedTypeRecord &Nested,
                         TypeIndex TI, LVElement *Element);
  Error visitKnownMember(CVMemberRecord &Record, OneMethodRecord &Method,
                         TypeIndex TI, LVElement *Element);
  Error visitKnownMember(CVMemberRecord &Record, OverloadedMethodRecord &Method,
                         TypeIndex TI, LVElement *Element);
  Error visitKnownMember(CVMemberRecord &Record, StaticDataMemberRecord &Field,
                         TypeIndex TI, LVElement *Element);
  Error visitKnownMember(CVMemberRecord &Record, VFPtrRecord &VFTable,
                         TypeIndex TI, LVElement *Element);
  Error visitKnownMember(CVMemberRecord &Record, VirtualBaseClassRecord &Base,
                         TypeIndex TI, LVElement *Element);

  template <typename T>
  Error visitKnownMember(CVMemberRecord &Record,
                         TypeVisitorCallbacks &Callbacks, TypeIndex TI,
                         LVElement *Element) {
    TypeRecordKind RK = static_cast<TypeRecordKind>(Record.Kind);
    T KnownRecord(RK);
    if (Error Err = Callbacks.visitKnownMember(Record, KnownRecord))
      return Err;
    if (Error Err = visitKnownMember(Record, KnownRecord, TI, Element))
      return Err;
    return Error::success();
  }

  template <typename T>
  Error visitKnownRecord(CVType &Record, TypeIndex TI, LVElement *Element) {
    TypeRecordKind RK = static_cast<TypeRecordKind>(Record.kind());
    T KnownRecord(RK);
    if (Error Err = TypeDeserializer::deserializeAs(
            const_cast<CVType &>(Record), KnownRecord))
      return Err;
    if (Error Err = visitKnownRecord(Record, KnownRecord, TI, Element))
      return Err;
    return Error::success();
  }

  Error visitMemberRecord(CVMemberRecord &Record,
                          TypeVisitorCallbacks &Callbacks, TypeIndex TI,
                          LVElement *Element);
  Error finishVisitation(CVType &Record, TypeIndex TI, LVElement *Element);
};

} // namespace logicalview
} // namespace llvm

#endif // LLVM_DEBUGINFO_LOGICALVIEW_READERS_CODEVIEWVISITOR_H
PKhwFZ;��+DebugInfo/LogicalView/Readers/LVELFReader.hnu�[���//===-- LVELFReader.h -------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the LVELFReader class, which is used to describe a
// debug information (DWARF) reader.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_LOGICALVIEW_READERS_LVELFREADER_H
#define LLVM_DEBUGINFO_LOGICALVIEW_READERS_LVELFREADER_H

#include "llvm/DebugInfo/DWARF/DWARFAbbreviationDeclaration.h"
#include "llvm/DebugInfo/DWARF/DWARFContext.h"
#include "llvm/DebugInfo/LogicalView/Readers/LVBinaryReader.h"
#include <unordered_set>

namespace llvm {
namespace logicalview {

class LVElement;
class LVLine;
class LVScopeCompileUnit;
class LVSymbol;
class LVType;

using AttributeSpec = DWARFAbbreviationDeclaration::AttributeSpec;

class LVELFReader final : public LVBinaryReader {
  object::ObjectFile &Obj;

  // Indicates if ranges data are available; in the case of split DWARF any
  // reference to ranges is valid only if the skeleton DIE has been loaded.
  bool RangesDataAvailable = false;
  LVAddress CUBaseAddress = 0;
  LVAddress CUHighAddress = 0;

  // Current elements during the processing of a DIE.
  LVElement *CurrentElement = nullptr;
  LVScope *CurrentScope = nullptr;
  LVSymbol *CurrentSymbol = nullptr;
  LVType *CurrentType = nullptr;
  LVOffset CurrentOffset = 0;
  LVOffset CurrentEndOffset = 0;

  // In DWARF v4, the files are 1-indexed.
  // In DWARF v5, the files are 0-indexed.
  // The ELF reader expects the indexes as 1-indexed.
  bool IncrementFileIndex = false;

  // Address ranges collected for current DIE.
  std::vector<LVAddressRange> CurrentRanges;

  // Symbols with locations for current compile unit.
  LVSymbols SymbolsWithLocations;

  // Global Offsets (Offset, Element).
  LVOffsetElementMap GlobalOffsets;

  // Low PC and High PC values for DIE being processed.
  LVAddress CurrentLowPC = 0;
  LVAddress CurrentHighPC = 0;
  bool FoundLowPC = false;
  bool FoundHighPC = false;

  // Cross references (Elements).
  using LVElementSet = std::unordered_set<LVElement *>;
  struct LVElementEntry {
    LVElement *Element;
    LVElementSet References;
    LVElementSet Types;
    LVElementEntry(LVElement *Element = nullptr) : Element(Element) {}
  };
  using LVElementReference = std::unordered_map<LVOffset, LVElementEntry>;
  LVElementReference ElementTable;

  Error loadTargetInfo(const object::ObjectFile &Obj);

  void mapRangeAddress(const object::ObjectFile &Obj) override;

  LVElement *createElement(dwarf::Tag Tag);
  void traverseDieAndChildren(DWARFDie &DIE, LVScope *Parent,
                              DWARFDie &SkeletonDie);
  // Process the attributes for the given DIE.
  LVScope *processOneDie(const DWARFDie &InputDIE, LVScope *Parent,
                         DWARFDie &SkeletonDie);
  void processOneAttribute(const DWARFDie &Die, LVOffset *OffsetPtr,
                           const AttributeSpec &AttrSpec);
  void createLineAndFileRecords(const DWARFDebugLine::LineTable *Lines);
  void processLocationGaps();

  // Add offset to global map.
  void addGlobalOffset(LVOffset Offset) {
    if (GlobalOffsets.find(Offset) == GlobalOffsets.end())
      // Just associate the DIE offset with a null element, as we do not
      // know if the referenced element has been created.
      GlobalOffsets.emplace(Offset, nullptr);
  }

  // Remove offset from global map.
  void removeGlobalOffset(LVOffset Offset) {
    LVOffsetElementMap::iterator Iter = GlobalOffsets.find(Offset);
    if (Iter != GlobalOffsets.end())
      GlobalOffsets.erase(Iter);
  }

  // Get the location information for DW_AT_data_member_location.
  void processLocationMember(dwarf::Attribute Attr,
                             const DWARFFormValue &FormValue,
                             const DWARFDie &Die, uint64_t OffsetOnEntry);
  void processLocationList(dwarf::Attribute Attr,
                           const DWARFFormValue &FormValue, const DWARFDie &Die,
                           uint64_t OffsetOnEntry,
                           bool CallSiteLocation = false);
  void updateReference(dwarf::Attribute Attr, const DWARFFormValue &FormValue);

  // Get an element given the DIE offset.
  LVElement *getElementForOffset(LVOffset offset, LVElement *Element,
                                 bool IsType);

protected:
  Error createScopes() override;
  void sortScopes() override;

public:
  LVELFReader() = delete;
  LVELFReader(StringRef Filename, StringRef FileFormatName,
              object::ObjectFile &Obj, ScopedPrinter &W)
      : LVBinaryReader(Filename, FileFormatName, W, LVBinaryType::ELF),
        Obj(Obj) {}
  LVELFReader(const LVELFReader &) = delete;
  LVELFReader &operator=(const LVELFReader &) = delete;
  ~LVELFReader() = default;

  LVAddress getCUBaseAddress() const { return CUBaseAddress; }
  void setCUBaseAddress(LVAddress Address) { CUBaseAddress = Address; }
  LVAddress getCUHighAddress() const { return CUHighAddress; }
  void setCUHighAddress(LVAddress Address) { CUHighAddress = Address; }

  const LVSymbols &GetSymbolsWithLocations() const {
    return SymbolsWithLocations;
  }

  std::string getRegisterName(LVSmall Opcode,
                              ArrayRef<uint64_t> Operands) override;

  void print(raw_ostream &OS) const;

#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
  void dump() const { print(dbgs()); }
#endif
};

} // end namespace logicalview
} // end namespace llvm

#endif // LLVM_DEBUGINFO_LOGICALVIEW_READERS_LVELFREADER_H
PKhwFZ���
�
'DebugInfo/LogicalView/LVReaderHandler.hnu�[���//===-- LVReaderHandler.h ---------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This class implements the Reader handler.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_LOGICALVIEW_READERS_LVREADERHANDLER_H
#define LLVM_DEBUGINFO_LOGICALVIEW_READERS_LVREADERHANDLER_H

#include "llvm/ADT/PointerUnion.h"
#include "llvm/DebugInfo/LogicalView/Core/LVReader.h"
#include "llvm/DebugInfo/PDB/Native/PDBFile.h"
#include "llvm/Object/Archive.h"
#include "llvm/Object/MachOUniversal.h"
#include "llvm/Object/ObjectFile.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/ScopedPrinter.h"
#include <string>
#include <vector>

namespace llvm {
namespace logicalview {

using LVReaders = std::vector<std::unique_ptr<LVReader>>;
using ArgVector = std::vector<std::string>;
using PdbOrObj = PointerUnion<object::ObjectFile *, pdb::PDBFile *>;

// This class performs the following tasks:
// - Creates a logical reader for every binary file in the command line,
//   that parses the debug information and creates a high level logical
//   view representation containing scopes, symbols, types and lines.
// - Prints and compares the logical views.
//
// The supported binary formats are: ELF, Mach-O and CodeView.
class LVReaderHandler {
  ArgVector &Objects;
  ScopedPrinter &W;
  raw_ostream &OS;
  LVReaders TheReaders;

  Error createReaders();
  Error printReaders();
  Error compareReaders();

  Error handleArchive(LVReaders &Readers, StringRef Filename,
                      object::Archive &Arch);
  Error handleBuffer(LVReaders &Readers, StringRef Filename,
                     MemoryBufferRef Buffer, StringRef ExePath = {});
  Error handleFile(LVReaders &Readers, StringRef Filename,
                   StringRef ExePath = {});
  Error handleMach(LVReaders &Readers, StringRef Filename,
                   object::MachOUniversalBinary &Mach);
  Error handleObject(LVReaders &Readers, StringRef Filename,
                     object::Binary &Binary);
  Error handleObject(LVReaders &Readers, StringRef Filename, StringRef Buffer,
                     StringRef ExePath);

  Error createReader(StringRef Filename, LVReaders &Readers, PdbOrObj &Input,
                     StringRef FileFormatName, StringRef ExePath = {});

public:
  LVReaderHandler() = delete;
  LVReaderHandler(ArgVector &Objects, ScopedPrinter &W,
                  LVOptions &ReaderOptions)
      : Objects(Objects), W(W), OS(W.getOStream()) {
    setOptions(&ReaderOptions);
  }
  LVReaderHandler(const LVReaderHandler &) = delete;
  LVReaderHandler &operator=(const LVReaderHandler &) = delete;

  Error createReader(StringRef Filename, LVReaders &Readers) {
    return handleFile(Readers, Filename);
  }
  Error process();

  Expected<std::unique_ptr<LVReader>> createReader(StringRef Pathname) {
    LVReaders Readers;
    if (Error Err = createReader(Pathname, Readers))
      return std::move(Err);
    return std::move(Readers[0]);
  }

  void print(raw_ostream &OS) const;

#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
  void dump() const { print(dbgs()); }
#endif
};

} // end namespace logicalview
} // namespace llvm

#endif // LLVM_DEBUGINFO_LOGICALVIEW_READERS_LVREADERHANDLER_H
PKhwFZM��+�+DebugInfo/DIContext.hnu�[���//===- DIContext.h ----------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines DIContext, an abstract data structure that holds
// debug information data.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_DICONTEXT_H
#define LLVM_DEBUGINFO_DICONTEXT_H

#include "llvm/ADT/SmallVector.h"
#include "llvm/Object/ObjectFile.h"
#include "llvm/Support/WithColor.h"
#include "llvm/Support/raw_ostream.h"
#include <cassert>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <tuple>
#include <utility>

namespace llvm {

/// A format-neutral container for source line information.
struct DILineInfo {
  // DILineInfo contains "<invalid>" for function/filename it cannot fetch.
  static constexpr const char *const BadString = "<invalid>";
  // Use "??" instead of "<invalid>" to make our output closer to addr2line.
  static constexpr const char *const Addr2LineBadString = "??";
  std::string FileName;
  std::string FunctionName;
  std::string StartFileName;
  // Full source corresponding to `FileName`
  std::optional<StringRef> Source;
  // Source code for this particular line
  // (in case if `Source` is not available)
  std::optional<StringRef> LineSource;
  uint32_t Line = 0;
  uint32_t Column = 0;
  uint32_t StartLine = 0;
  std::optional<uint64_t> StartAddress;

  // DWARF-specific.
  uint32_t Discriminator = 0;

  DILineInfo()
      : FileName(BadString), FunctionName(BadString), StartFileName(BadString) {
  }

  bool operator==(const DILineInfo &RHS) const {
    return Line == RHS.Line && Column == RHS.Column &&
           FileName == RHS.FileName && FunctionName == RHS.FunctionName &&
           StartFileName == RHS.StartFileName && StartLine == RHS.StartLine &&
           Discriminator == RHS.Discriminator;
  }

  bool operator!=(const DILineInfo &RHS) const { return !(*this == RHS); }

  bool operator<(const DILineInfo &RHS) const {
    return std::tie(FileName, FunctionName, StartFileName, Line, Column,
                    StartLine, Discriminator) <
           std::tie(RHS.FileName, RHS.FunctionName, RHS.StartFileName, RHS.Line,
                    RHS.Column, RHS.StartLine, RHS.Discriminator);
  }

  explicit operator bool() const { return *this != DILineInfo(); }

  void dump(raw_ostream &OS) {
    OS << "Line info: ";
    if (FileName != BadString)
      OS << "file '" << FileName << "', ";
    if (FunctionName != BadString)
      OS << "function '" << FunctionName << "', ";
    OS << "line " << Line << ", ";
    OS << "column " << Column << ", ";
    if (StartFileName != BadString)
      OS << "start file '" << StartFileName << "', ";
    OS << "start line " << StartLine << '\n';
  }
};

using DILineInfoTable = SmallVector<std::pair<uint64_t, DILineInfo>, 16>;

/// A format-neutral container for inlined code description.
class DIInliningInfo {
  SmallVector<DILineInfo, 4> Frames;

public:
  DIInliningInfo() = default;

  /// Returns the frame at `Index`. Frames are stored in bottom-up
  /// (leaf-to-root) order with increasing index.
  const DILineInfo &getFrame(unsigned Index) const {
    assert(Index < Frames.size());
    return Frames[Index];
  }

  DILineInfo *getMutableFrame(unsigned Index) {
    assert(Index < Frames.size());
    return &Frames[Index];
  }

  uint32_t getNumberOfFrames() const { return Frames.size(); }

  void addFrame(const DILineInfo &Frame) { Frames.push_back(Frame); }

  void resize(unsigned i) { Frames.resize(i); }
};

/// Container for description of a global variable.
struct DIGlobal {
  std::string Name;
  uint64_t Start = 0;
  uint64_t Size = 0;
  std::string DeclFile;
  uint64_t DeclLine = 0;

  DIGlobal() : Name(DILineInfo::BadString) {}
};

struct DILocal {
  std::string FunctionName;
  std::string Name;
  std::string DeclFile;
  uint64_t DeclLine = 0;
  std::optional<int64_t> FrameOffset;
  std::optional<uint64_t> Size;
  std::optional<uint64_t> TagOffset;
};

/// A DINameKind is passed to name search methods to specify a
/// preference regarding the type of name resolution the caller wants.
enum class DINameKind { None, ShortName, LinkageName };

/// Controls which fields of DILineInfo container should be filled
/// with data.
struct DILineInfoSpecifier {
  enum class FileLineInfoKind {
    None,
    // RawValue is whatever the compiler stored in the filename table.  Could be
    // a full path, could be something else.
    RawValue,
    BaseNameOnly,
    // Relative to the compilation directory.
    RelativeFilePath,
    AbsoluteFilePath
  };
  using FunctionNameKind = DINameKind;

  FileLineInfoKind FLIKind;
  FunctionNameKind FNKind;

  DILineInfoSpecifier(FileLineInfoKind FLIKind = FileLineInfoKind::RawValue,
                      FunctionNameKind FNKind = FunctionNameKind::None)
      : FLIKind(FLIKind), FNKind(FNKind) {}

  inline bool operator==(const DILineInfoSpecifier &RHS) const {
    return FLIKind == RHS.FLIKind && FNKind == RHS.FNKind;
  }
};

/// This is just a helper to programmatically construct DIDumpType.
enum DIDumpTypeCounter {
#define HANDLE_DWARF_SECTION(ENUM_NAME, ELF_NAME, CMDLINE_NAME, OPTION)        \
  DIDT_ID_##ENUM_NAME,
#include "llvm/BinaryFormat/Dwarf.def"
#undef HANDLE_DWARF_SECTION
  DIDT_ID_UUID,
  DIDT_ID_Count
};
static_assert(DIDT_ID_Count <= 32, "section types overflow storage");

/// Selects which debug sections get dumped.
enum DIDumpType : unsigned {
  DIDT_Null,
  DIDT_All = ~0U,
#define HANDLE_DWARF_SECTION(ENUM_NAME, ELF_NAME, CMDLINE_NAME, OPTION)        \
  DIDT_##ENUM_NAME = 1U << DIDT_ID_##ENUM_NAME,
#include "llvm/BinaryFormat/Dwarf.def"
#undef HANDLE_DWARF_SECTION
  DIDT_UUID = 1 << DIDT_ID_UUID,
};

/// Container for dump options that control which debug information will be
/// dumped.
struct DIDumpOptions {
  unsigned DumpType = DIDT_All;
  unsigned ChildRecurseDepth = -1U;
  unsigned ParentRecurseDepth = -1U;
  uint16_t Version = 0; // DWARF version to assume when extracting.
  uint8_t AddrSize = 4; // Address byte size to assume when extracting.
  bool ShowAddresses = true;
  bool ShowChildren = false;
  bool ShowParents = false;
  bool ShowForm = false;
  bool SummarizeTypes = false;
  bool Verbose = false;
  bool DisplayRawContents = false;
  bool IsEH = false;
  std::function<llvm::StringRef(uint64_t DwarfRegNum, bool IsEH)>
      GetNameForDWARFReg;

  /// Return default option set for printing a single DIE without children.
  static DIDumpOptions getForSingleDIE() {
    DIDumpOptions Opts;
    Opts.ChildRecurseDepth = 0;
    Opts.ParentRecurseDepth = 0;
    return Opts;
  }

  /// Return the options with RecurseDepth set to 0 unless explicitly required.
  DIDumpOptions noImplicitRecursion() const {
    DIDumpOptions Opts = *this;
    if (ChildRecurseDepth == -1U && !ShowChildren)
      Opts.ChildRecurseDepth = 0;
    if (ParentRecurseDepth == -1U && !ShowParents)
      Opts.ParentRecurseDepth = 0;
    return Opts;
  }

  std::function<void(Error)> RecoverableErrorHandler =
      WithColor::defaultErrorHandler;
  std::function<void(Error)> WarningHandler = WithColor::defaultWarningHandler;
};

class DIContext {
public:
  enum DIContextKind { CK_DWARF, CK_PDB, CK_BTF };

  DIContext(DIContextKind K) : Kind(K) {}
  virtual ~DIContext() = default;

  DIContextKind getKind() const { return Kind; }

  virtual void dump(raw_ostream &OS, DIDumpOptions DumpOpts) = 0;

  virtual bool verify(raw_ostream &OS, DIDumpOptions DumpOpts = {}) {
    // No verifier? Just say things went well.
    return true;
  }

  virtual DILineInfo getLineInfoForAddress(
      object::SectionedAddress Address,
      DILineInfoSpecifier Specifier = DILineInfoSpecifier()) = 0;
  virtual DILineInfo
  getLineInfoForDataAddress(object::SectionedAddress Address) = 0;
  virtual DILineInfoTable getLineInfoForAddressRange(
      object::SectionedAddress Address, uint64_t Size,
      DILineInfoSpecifier Specifier = DILineInfoSpecifier()) = 0;
  virtual DIInliningInfo getInliningInfoForAddress(
      object::SectionedAddress Address,
      DILineInfoSpecifier Specifier = DILineInfoSpecifier()) = 0;

  virtual std::vector<DILocal>
  getLocalsForAddress(object::SectionedAddress Address) = 0;

private:
  const DIContextKind Kind;
};

/// An inferface for inquiring the load address of a loaded object file
/// to be used by the DIContext implementations when applying relocations
/// on the fly.
class LoadedObjectInfo {
protected:
  LoadedObjectInfo() = default;
  LoadedObjectInfo(const LoadedObjectInfo &) = default;

public:
  virtual ~LoadedObjectInfo() = default;

  /// Obtain the Load Address of a section by SectionRef.
  ///
  /// Calculate the address of the given section.
  /// The section need not be present in the local address space. The addresses
  /// need to be consistent with the addresses used to query the DIContext and
  /// the output of this function should be deterministic, i.e. repeated calls
  /// with the same Sec should give the same address.
  virtual uint64_t getSectionLoadAddress(const object::SectionRef &Sec) const {
    return 0;
  }

  /// If conveniently available, return the content of the given Section.
  ///
  /// When the section is available in the local address space, in relocated
  /// (loaded) form, e.g. because it was relocated by a JIT for execution, this
  /// function should provide the contents of said section in `Data`. If the
  /// loaded section is not available, or the cost of retrieving it would be
  /// prohibitive, this function should return false. In that case, relocations
  /// will be read from the local (unrelocated) object file and applied on the
  /// fly. Note that this method is used purely for optimzation purposes in the
  /// common case of JITting in the local address space, so returning false
  /// should always be correct.
  virtual bool getLoadedSectionContents(const object::SectionRef &Sec,
                                        StringRef &Data) const {
    return false;
  }

  // FIXME: This is untested and unused anywhere in the LLVM project, it's
  // used/needed by Julia (an external project). It should have some coverage
  // (at least tests, but ideally example functionality).
  /// Obtain a copy of this LoadedObjectInfo.
  virtual std::unique_ptr<LoadedObjectInfo> clone() const = 0;
};

template <typename Derived, typename Base = LoadedObjectInfo>
struct LoadedObjectInfoHelper : Base {
protected:
  LoadedObjectInfoHelper(const LoadedObjectInfoHelper &) = default;
  LoadedObjectInfoHelper() = default;

public:
  template <typename... Ts>
  LoadedObjectInfoHelper(Ts &&...Args) : Base(std::forward<Ts>(Args)...) {}

  std::unique_ptr<llvm::LoadedObjectInfo> clone() const override {
    return std::make_unique<Derived>(static_cast<const Derived &>(*this));
  }
};

} // end namespace llvm

#endif // LLVM_DEBUGINFO_DICONTEXT_H
PKhwFZ%zZnFnFDebugInfo/CodeView/CodeView.hnu�[���//===- CodeView.h -----------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Defines constants and basic types describing CodeView debug information.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_CODEVIEW_CODEVIEW_H
#define LLVM_DEBUGINFO_CODEVIEW_CODEVIEW_H

#include <cinttypes>
#include <type_traits>

#include "llvm/Support/Endian.h"

namespace llvm {
namespace codeview {

/// Distinguishes individual records in .debug$T or .debug$P section or PDB type
/// stream. The documentation and headers talk about this as the "leaf" type.
enum class TypeRecordKind : uint16_t {
#define TYPE_RECORD(lf_ename, value, name) name = value,
#include "CodeViewTypes.def"
};

/// Duplicate copy of the above enum, but using the official CV names. Useful
/// for reference purposes and when dealing with unknown record types.
enum TypeLeafKind : uint16_t {
#define CV_TYPE(name, val) name = val,
#include "CodeViewTypes.def"
};

/// Distinguishes individual records in the Symbols subsection of a .debug$S
/// section. Equivalent to SYM_ENUM_e in cvinfo.h.
enum class SymbolRecordKind : uint16_t {
#define SYMBOL_RECORD(lf_ename, value, name) name = value,
#include "CodeViewSymbols.def"
};

/// Duplicate copy of the above enum, but using the official CV names. Useful
/// for reference purposes and when dealing with unknown record types.
enum SymbolKind : uint16_t {
#define CV_SYMBOL(name, val) name = val,
#include "CodeViewSymbols.def"
};

#define CV_DEFINE_ENUM_CLASS_FLAGS_OPERATORS(Class)                            \
  inline Class operator|(Class a, Class b) {                                   \
    return static_cast<Class>(static_cast<std::underlying_type_t<Class>>(a) |  \
                              static_cast<std::underlying_type_t<Class>>(b));  \
  }                                                                            \
  inline Class operator&(Class a, Class b) {                                   \
    return static_cast<Class>(static_cast<std::underlying_type_t<Class>>(a) &  \
                              static_cast<std::underlying_type_t<Class>>(b));  \
  }                                                                            \
  inline Class operator~(Class a) {                                            \
    return static_cast<Class>(~static_cast<std::underlying_type_t<Class>>(a)); \
  }                                                                            \
  inline Class &operator|=(Class &a, Class b) {                                \
    a = a | b;                                                                 \
    return a;                                                                  \
  }                                                                            \
  inline Class &operator&=(Class &a, Class b) {                                \
    a = a & b;                                                                 \
    return a;                                                                  \
  }

/// These values correspond to the CV_CPU_TYPE_e enumeration, and are documented
/// here: https://msdn.microsoft.com/en-us/library/b2fc64ek.aspx
enum class CPUType : uint16_t {
  Intel8080 = 0x0,
  Intel8086 = 0x1,
  Intel80286 = 0x2,
  Intel80386 = 0x3,
  Intel80486 = 0x4,
  Pentium = 0x5,
  PentiumPro = 0x6,
  Pentium3 = 0x7,
  MIPS = 0x10,
  MIPS16 = 0x11,
  MIPS32 = 0x12,
  MIPS64 = 0x13,
  MIPSI = 0x14,
  MIPSII = 0x15,
  MIPSIII = 0x16,
  MIPSIV = 0x17,
  MIPSV = 0x18,
  M68000 = 0x20,
  M68010 = 0x21,
  M68020 = 0x22,
  M68030 = 0x23,
  M68040 = 0x24,
  Alpha = 0x30,
  Alpha21164 = 0x31,
  Alpha21164A = 0x32,
  Alpha21264 = 0x33,
  Alpha21364 = 0x34,
  PPC601 = 0x40,
  PPC603 = 0x41,
  PPC604 = 0x42,
  PPC620 = 0x43,
  PPCFP = 0x44,
  PPCBE = 0x45,
  SH3 = 0x50,
  SH3E = 0x51,
  SH3DSP = 0x52,
  SH4 = 0x53,
  SHMedia = 0x54,
  ARM3 = 0x60,
  ARM4 = 0x61,
  ARM4T = 0x62,
  ARM5 = 0x63,
  ARM5T = 0x64,
  ARM6 = 0x65,
  ARM_XMAC = 0x66,
  ARM_WMMX = 0x67,
  ARM7 = 0x68,
  Omni = 0x70,
  Ia64 = 0x80,
  Ia64_2 = 0x81,
  CEE = 0x90,
  AM33 = 0xa0,
  M32R = 0xb0,
  TriCore = 0xc0,
  X64 = 0xd0,
  EBC = 0xe0,
  Thumb = 0xf0,
  ARMNT = 0xf4,
  ARM64 = 0xf6,
  HybridX86ARM64 = 0xf7,
  ARM64EC = 0xf8,
  ARM64X = 0xf9,
  D3D11_Shader = 0x100,
};

/// These values correspond to the CV_CFL_LANG enumeration in the Microsoft
/// Debug Interface Access SDK
enum SourceLanguage : uint8_t {
  C = 0x00,
  Cpp = 0x01,
  Fortran = 0x02,
  Masm = 0x03,
  Pascal = 0x04,
  Basic = 0x05,
  Cobol = 0x06,
  Link = 0x07,
  Cvtres = 0x08,
  Cvtpgd = 0x09,
  CSharp = 0x0a,
  VB = 0x0b,
  ILAsm = 0x0c,
  Java = 0x0d,
  JScript = 0x0e,
  MSIL = 0x0f,
  HLSL = 0x10,
  ObjC = 0x11,
  ObjCpp = 0x12,

  Rust = 0x15,

  /// The DMD & Swift compilers emit 'D' and 'S', respectively, for the CV
  /// source language. Microsoft does not have enumerators for them yet.
  D = 'D',
  Swift = 'S',
};

/// These values correspond to the CV_call_e enumeration, and are documented
/// at the following locations:
///   https://msdn.microsoft.com/en-us/library/b2fc64ek.aspx
///   https://msdn.microsoft.com/en-us/library/windows/desktop/ms680207(v=vs.85).aspx
///
enum class CallingConvention : uint8_t {
  NearC = 0x00,       // near right to left push, caller pops stack
  FarC = 0x01,        // far right to left push, caller pops stack
  NearPascal = 0x02,  // near left to right push, callee pops stack
  FarPascal = 0x03,   // far left to right push, callee pops stack
  NearFast = 0x04,    // near left to right push with regs, callee pops stack
  FarFast = 0x05,     // far left to right push with regs, callee pops stack
  NearStdCall = 0x07, // near standard call
  FarStdCall = 0x08,  // far standard call
  NearSysCall = 0x09, // near sys call
  FarSysCall = 0x0a,  // far sys call
  ThisCall = 0x0b,    // this call (this passed in register)
  MipsCall = 0x0c,    // Mips call
  Generic = 0x0d,     // Generic call sequence
  AlphaCall = 0x0e,   // Alpha call
  PpcCall = 0x0f,     // PPC call
  SHCall = 0x10,      // Hitachi SuperH call
  ArmCall = 0x11,     // ARM call
  AM33Call = 0x12,    // AM33 call
  TriCall = 0x13,     // TriCore Call
  SH5Call = 0x14,     // Hitachi SuperH-5 call
  M32RCall = 0x15,    // M32R Call
  ClrCall = 0x16,     // clr call
  Inline =
      0x17, // Marker for routines always inlined and thus lacking a convention
  NearVector = 0x18 // near left to right push with regs, callee pops stack
};

enum class ClassOptions : uint16_t {
  None = 0x0000,
  Packed = 0x0001,
  HasConstructorOrDestructor = 0x0002,
  HasOverloadedOperator = 0x0004,
  Nested = 0x0008,
  ContainsNestedClass = 0x0010,
  HasOverloadedAssignmentOperator = 0x0020,
  HasConversionOperator = 0x0040,
  ForwardReference = 0x0080,
  Scoped = 0x0100,
  HasUniqueName = 0x0200,
  Sealed = 0x0400,
  Intrinsic = 0x2000
};
CV_DEFINE_ENUM_CLASS_FLAGS_OPERATORS(ClassOptions)

enum class FrameProcedureOptions : uint32_t {
  None = 0x00000000,
  HasAlloca = 0x00000001,
  HasSetJmp = 0x00000002,
  HasLongJmp = 0x00000004,
  HasInlineAssembly = 0x00000008,
  HasExceptionHandling = 0x00000010,
  MarkedInline = 0x00000020,
  HasStructuredExceptionHandling = 0x00000040,
  Naked = 0x00000080,
  SecurityChecks = 0x00000100,
  AsynchronousExceptionHandling = 0x00000200,
  NoStackOrderingForSecurityChecks = 0x00000400,
  Inlined = 0x00000800,
  StrictSecurityChecks = 0x00001000,
  SafeBuffers = 0x00002000,
  EncodedLocalBasePointerMask = 0x0000C000,
  EncodedParamBasePointerMask = 0x00030000,
  ProfileGuidedOptimization = 0x00040000,
  ValidProfileCounts = 0x00080000,
  OptimizedForSpeed = 0x00100000,
  GuardCfg = 0x00200000,
  GuardCfw = 0x00400000
};
CV_DEFINE_ENUM_CLASS_FLAGS_OPERATORS(FrameProcedureOptions)

enum class FunctionOptions : uint8_t {
  None = 0x00,
  CxxReturnUdt = 0x01,
  Constructor = 0x02,
  ConstructorWithVirtualBases = 0x04
};
CV_DEFINE_ENUM_CLASS_FLAGS_OPERATORS(FunctionOptions)

enum class HfaKind : uint8_t {
  None = 0x00,
  Float = 0x01,
  Double = 0x02,
  Other = 0x03
};

/// Source-level access specifier. (CV_access_e)
enum class MemberAccess : uint8_t {
  None = 0,
  Private = 1,
  Protected = 2,
  Public = 3
};

/// Part of member attribute flags. (CV_methodprop_e)
enum class MethodKind : uint8_t {
  Vanilla = 0x00,
  Virtual = 0x01,
  Static = 0x02,
  Friend = 0x03,
  IntroducingVirtual = 0x04,
  PureVirtual = 0x05,
  PureIntroducingVirtual = 0x06
};

/// Equivalent to CV_fldattr_t bitfield.
enum class MethodOptions : uint16_t {
  None = 0x0000,
  AccessMask = 0x0003,
  MethodKindMask = 0x001c,
  Pseudo = 0x0020,
  NoInherit = 0x0040,
  NoConstruct = 0x0080,
  CompilerGenerated = 0x0100,
  Sealed = 0x0200
};
CV_DEFINE_ENUM_CLASS_FLAGS_OPERATORS(MethodOptions)

/// Equivalent to CV_LABEL_TYPE_e.
enum class LabelType : uint16_t {
  Near = 0x0,
  Far = 0x4,
};

/// Equivalent to CV_modifier_t.
/// TODO: Add flag for _Atomic modifier
enum class ModifierOptions : uint16_t {
  None = 0x0000,
  Const = 0x0001,
  Volatile = 0x0002,
  Unaligned = 0x0004
};
CV_DEFINE_ENUM_CLASS_FLAGS_OPERATORS(ModifierOptions)

// If the subsection kind has this bit set, then the linker should ignore it.
enum : uint32_t { SubsectionIgnoreFlag = 0x80000000 };

enum class DebugSubsectionKind : uint32_t {
  None = 0,
  Symbols = 0xf1,
  Lines = 0xf2,
  StringTable = 0xf3,
  FileChecksums = 0xf4,
  FrameData = 0xf5,
  InlineeLines = 0xf6,
  CrossScopeImports = 0xf7,
  CrossScopeExports = 0xf8,

  // These appear to relate to .Net assembly info.
  ILLines = 0xf9,
  FuncMDTokenMap = 0xfa,
  TypeMDTokenMap = 0xfb,
  MergedAssemblyInput = 0xfc,

  CoffSymbolRVA = 0xfd,

  XfgHashType = 0xff,
  XfgHashVirtual = 0x100,
};

/// Equivalent to CV_ptrtype_e.
enum class PointerKind : uint8_t {
  Near16 = 0x00,                // 16 bit pointer
  Far16 = 0x01,                 // 16:16 far pointer
  Huge16 = 0x02,                // 16:16 huge pointer
  BasedOnSegment = 0x03,        // based on segment
  BasedOnValue = 0x04,          // based on value of base
  BasedOnSegmentValue = 0x05,   // based on segment value of base
  BasedOnAddress = 0x06,        // based on address of base
  BasedOnSegmentAddress = 0x07, // based on segment address of base
  BasedOnType = 0x08,           // based on type
  BasedOnSelf = 0x09,           // based on self
  Near32 = 0x0a,                // 32 bit pointer
  Far32 = 0x0b,                 // 16:32 pointer
  Near64 = 0x0c                 // 64 bit pointer
};

/// Equivalent to CV_ptrmode_e.
enum class PointerMode : uint8_t {
  Pointer = 0x00,                 // "normal" pointer
  LValueReference = 0x01,         // "old" reference
  PointerToDataMember = 0x02,     // pointer to data member
  PointerToMemberFunction = 0x03, // pointer to member function
  RValueReference = 0x04          // r-value reference
};

/// Equivalent to misc lfPointerAttr bitfields.
enum class PointerOptions : uint32_t {
  None = 0x00000000,
  Flat32 = 0x00000100,
  Volatile = 0x00000200,
  Const = 0x00000400,
  Unaligned = 0x00000800,
  Restrict = 0x00001000,
  WinRTSmartPointer = 0x00080000,
  LValueRefThisPointer = 0x00100000,
  RValueRefThisPointer = 0x00200000
};
CV_DEFINE_ENUM_CLASS_FLAGS_OPERATORS(PointerOptions)

/// Equivalent to CV_pmtype_e.
enum class PointerToMemberRepresentation : uint16_t {
  Unknown = 0x00,                     // not specified (pre VC8)
  SingleInheritanceData = 0x01,       // member data, single inheritance
  MultipleInheritanceData = 0x02,     // member data, multiple inheritance
  VirtualInheritanceData = 0x03,      // member data, virtual inheritance
  GeneralData = 0x04,                 // member data, most general
  SingleInheritanceFunction = 0x05,   // member function, single inheritance
  MultipleInheritanceFunction = 0x06, // member function, multiple inheritance
  VirtualInheritanceFunction = 0x07,  // member function, virtual inheritance
  GeneralFunction = 0x08              // member function, most general
};

enum class VFTableSlotKind : uint8_t {
  Near16 = 0x00,
  Far16 = 0x01,
  This = 0x02,
  Outer = 0x03,
  Meta = 0x04,
  Near = 0x05,
  Far = 0x06
};

enum class WindowsRTClassKind : uint8_t {
  None = 0x00,
  RefClass = 0x01,
  ValueClass = 0x02,
  Interface = 0x03
};

/// Corresponds to CV_LVARFLAGS bitfield.
enum class LocalSymFlags : uint16_t {
  None = 0,
  IsParameter = 1 << 0,
  IsAddressTaken = 1 << 1,
  IsCompilerGenerated = 1 << 2,
  IsAggregate = 1 << 3,
  IsAggregated = 1 << 4,
  IsAliased = 1 << 5,
  IsAlias = 1 << 6,
  IsReturnValue = 1 << 7,
  IsOptimizedOut = 1 << 8,
  IsEnregisteredGlobal = 1 << 9,
  IsEnregisteredStatic = 1 << 10,
};
CV_DEFINE_ENUM_CLASS_FLAGS_OPERATORS(LocalSymFlags)

/// Corresponds to the CV_PUBSYMFLAGS bitfield.
enum class PublicSymFlags : uint32_t {
  None = 0,
  Code = 1 << 0,
  Function = 1 << 1,
  Managed = 1 << 2,
  MSIL = 1 << 3,
};
CV_DEFINE_ENUM_CLASS_FLAGS_OPERATORS(PublicSymFlags)

/// Corresponds to the CV_PROCFLAGS bitfield.
enum class ProcSymFlags : uint8_t {
  None = 0,
  HasFP = 1 << 0,
  HasIRET = 1 << 1,
  HasFRET = 1 << 2,
  IsNoReturn = 1 << 3,
  IsUnreachable = 1 << 4,
  HasCustomCallingConv = 1 << 5,
  IsNoInline = 1 << 6,
  HasOptimizedDebugInfo = 1 << 7,
};
CV_DEFINE_ENUM_CLASS_FLAGS_OPERATORS(ProcSymFlags)

/// Corresponds to COMPILESYM2::Flags bitfield.
enum class CompileSym2Flags : uint32_t {
  None = 0,
  SourceLanguageMask = 0xFF,
  EC = 1 << 8,
  NoDbgInfo = 1 << 9,
  LTCG = 1 << 10,
  NoDataAlign = 1 << 11,
  ManagedPresent = 1 << 12,
  SecurityChecks = 1 << 13,
  HotPatch = 1 << 14,
  CVTCIL = 1 << 15,
  MSILModule = 1 << 16,
};
CV_DEFINE_ENUM_CLASS_FLAGS_OPERATORS(CompileSym2Flags)

/// Corresponds to COMPILESYM3::Flags bitfield.
enum class CompileSym3Flags : uint32_t {
  None = 0,
  SourceLanguageMask = 0xFF,
  EC = 1 << 8,
  NoDbgInfo = 1 << 9,
  LTCG = 1 << 10,
  NoDataAlign = 1 << 11,
  ManagedPresent = 1 << 12,
  SecurityChecks = 1 << 13,
  HotPatch = 1 << 14,
  CVTCIL = 1 << 15,
  MSILModule = 1 << 16,
  Sdl = 1 << 17,
  PGO = 1 << 18,
  Exp = 1 << 19,
};
CV_DEFINE_ENUM_CLASS_FLAGS_OPERATORS(CompileSym3Flags)

enum class ExportFlags : uint16_t {
  None = 0,
  IsConstant = 1 << 0,
  IsData = 1 << 1,
  IsPrivate = 1 << 2,
  HasNoName = 1 << 3,
  HasExplicitOrdinal = 1 << 4,
  IsForwarder = 1 << 5
};
CV_DEFINE_ENUM_CLASS_FLAGS_OPERATORS(ExportFlags)

// Corresponds to BinaryAnnotationOpcode enum.
enum class BinaryAnnotationsOpCode : uint32_t {
  Invalid,
  CodeOffset,
  ChangeCodeOffsetBase,
  ChangeCodeOffset,
  ChangeCodeLength,
  ChangeFile,
  ChangeLineOffset,
  ChangeLineEndDelta,
  ChangeRangeKind,
  ChangeColumnStart,
  ChangeColumnEndDelta,
  ChangeCodeOffsetAndLineOffset,
  ChangeCodeLengthAndCodeOffset,
  ChangeColumnEnd,
};

// Corresponds to CV_cookietype_e enum.
enum class FrameCookieKind : uint8_t {
  Copy,
  XorStackPointer,
  XorFramePointer,
  XorR13,
};

// Corresponds to CV_HREG_e enum.
enum class RegisterId : uint16_t {
#define CV_REGISTERS_ALL
#define CV_REGISTER(name, value) name = value,
#include "CodeViewRegisters.def"
#undef CV_REGISTER
#undef CV_REGISTERS_ALL
};

// Register Ids are shared between architectures in CodeView. CPUType is needed
// to map register Id to name.
struct CPURegister {
  CPURegister() = delete;
  CPURegister(CPUType Cpu, codeview::RegisterId Reg) {
    this->Cpu = Cpu;
    this->Reg = Reg;
  }
  CPUType Cpu;
  RegisterId Reg;
};

/// Two-bit value indicating which register is the designated frame pointer
/// register. Appears in the S_FRAMEPROC record flags.
enum class EncodedFramePtrReg : uint8_t {
  None = 0,
  StackPtr = 1,
  FramePtr = 2,
  BasePtr = 3,
};

RegisterId decodeFramePtrReg(EncodedFramePtrReg EncodedReg, CPUType CPU);

EncodedFramePtrReg encodeFramePtrReg(RegisterId Reg, CPUType CPU);

/// These values correspond to the THUNK_ORDINAL enumeration.
enum class ThunkOrdinal : uint8_t {
  Standard,
  ThisAdjustor,
  Vcall,
  Pcode,
  UnknownLoad,
  TrampIncremental,
  BranchIsland
};

enum class TrampolineType : uint16_t { TrampIncremental, BranchIsland };

// These values correspond to the CV_SourceChksum_t enumeration.
enum class FileChecksumKind : uint8_t { None, MD5, SHA1, SHA256 };

enum LineFlags : uint16_t {
  LF_None = 0,
  LF_HaveColumns = 1, // CV_LINES_HAVE_COLUMNS
};

/// Data in the SUBSEC_FRAMEDATA subection.
struct FrameData {
  support::ulittle32_t RvaStart;
  support::ulittle32_t CodeSize;
  support::ulittle32_t LocalSize;
  support::ulittle32_t ParamsSize;
  support::ulittle32_t MaxStackSize;
  support::ulittle32_t FrameFunc;
  support::ulittle16_t PrologSize;
  support::ulittle16_t SavedRegsSize;
  support::ulittle32_t Flags;
  enum : uint32_t {
    HasSEH = 1 << 0,
    HasEH = 1 << 1,
    IsFunctionStart = 1 << 2,
  };
};

// Corresponds to LocalIdAndGlobalIdPair structure.
// This structure information allows cross-referencing between PDBs.  For
// example, when a PDB is being built during compilation it is not yet known
// what other modules may end up in the PDB at link time.  So certain types of
// IDs may clash between the various compile time PDBs.  For each affected
// module, a subsection would be put into the PDB containing a mapping from its
// local IDs to a single ID namespace for all items in the PDB file.
struct CrossModuleExport {
  support::ulittle32_t Local;
  support::ulittle32_t Global;
};

struct CrossModuleImport {
  support::ulittle32_t ModuleNameOffset;
  support::ulittle32_t Count; // Number of elements
  // support::ulittle32_t ids[Count]; // id from referenced module
};

enum class CodeViewContainer { ObjectFile, Pdb };

inline uint32_t alignOf(CodeViewContainer Container) {
  if (Container == CodeViewContainer::ObjectFile)
    return 1;
  return 4;
}
}
}

#endif
PKhwFZ��GX
X
2DebugInfo/CodeView/SymbolVisitorCallbackPipeline.hnu�[���//===- SymbolVisitorCallbackPipeline.h --------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_CODEVIEW_SYMBOLVISITORCALLBACKPIPELINE_H
#define LLVM_DEBUGINFO_CODEVIEW_SYMBOLVISITORCALLBACKPIPELINE_H

#include "llvm/DebugInfo/CodeView/SymbolRecord.h"
#include "llvm/DebugInfo/CodeView/SymbolVisitorCallbacks.h"
#include "llvm/Support/Error.h"
#include <vector>

namespace llvm {
namespace codeview {

class SymbolVisitorCallbackPipeline : public SymbolVisitorCallbacks {
public:
  SymbolVisitorCallbackPipeline() = default;

  Error visitUnknownSymbol(CVSymbol &Record) override {
    for (auto *Visitor : Pipeline) {
      if (auto EC = Visitor->visitUnknownSymbol(Record))
        return EC;
    }
    return Error::success();
  }

  Error visitSymbolBegin(CVSymbol &Record, uint32_t Offset) override {
    for (auto *Visitor : Pipeline) {
      if (auto EC = Visitor->visitSymbolBegin(Record, Offset))
        return EC;
    }
    return Error::success();
  }

  Error visitSymbolBegin(CVSymbol &Record) override {
    for (auto *Visitor : Pipeline) {
      if (auto EC = Visitor->visitSymbolBegin(Record))
        return EC;
    }
    return Error::success();
  }

  Error visitSymbolEnd(CVSymbol &Record) override {
    for (auto *Visitor : Pipeline) {
      if (auto EC = Visitor->visitSymbolEnd(Record))
        return EC;
    }
    return Error::success();
  }

  void addCallbackToPipeline(SymbolVisitorCallbacks &Callbacks) {
    Pipeline.push_back(&Callbacks);
  }

#define SYMBOL_RECORD(EnumName, EnumVal, Name)                                 \
  Error visitKnownRecord(CVSymbol &CVR, Name &Record) override {               \
    for (auto Visitor : Pipeline) {                                            \
      if (auto EC = Visitor->visitKnownRecord(CVR, Record))                    \
        return EC;                                                             \
    }                                                                          \
    return Error::success();                                                   \
  }
#define SYMBOL_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
#include "llvm/DebugInfo/CodeView/CodeViewSymbols.def"

private:
  std::vector<SymbolVisitorCallbacks *> Pipeline;
};

} // end namespace codeview
} // end namespace llvm

#endif // LLVM_DEBUGINFO_CODEVIEW_SYMBOLVISITORCALLBACKPIPELINE_H
PKhwFZ�ߑ�I	I	DebugInfo/CodeView/EnumTables.hnu�[���//===- EnumTables.h - Enum to string conversion tables ----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_CODEVIEW_ENUMTABLES_H
#define LLVM_DEBUGINFO_CODEVIEW_ENUMTABLES_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/BinaryFormat/COFF.h"
#include "llvm/DebugInfo/CodeView/CodeView.h"
#include <cstdint>

namespace llvm {
template <typename T> struct EnumEntry;
namespace codeview {

ArrayRef<EnumEntry<SymbolKind>> getSymbolTypeNames();
ArrayRef<EnumEntry<TypeLeafKind>> getTypeLeafNames();
ArrayRef<EnumEntry<uint16_t>> getRegisterNames(CPUType Cpu);
ArrayRef<EnumEntry<uint32_t>> getPublicSymFlagNames();
ArrayRef<EnumEntry<uint8_t>> getProcSymFlagNames();
ArrayRef<EnumEntry<uint16_t>> getLocalFlagNames();
ArrayRef<EnumEntry<uint8_t>> getFrameCookieKindNames();
ArrayRef<EnumEntry<SourceLanguage>> getSourceLanguageNames();
ArrayRef<EnumEntry<uint32_t>> getCompileSym2FlagNames();
ArrayRef<EnumEntry<uint32_t>> getCompileSym3FlagNames();
ArrayRef<EnumEntry<uint32_t>> getFileChecksumNames();
ArrayRef<EnumEntry<unsigned>> getCPUTypeNames();
ArrayRef<EnumEntry<uint32_t>> getFrameProcSymFlagNames();
ArrayRef<EnumEntry<uint16_t>> getExportSymFlagNames();
ArrayRef<EnumEntry<uint32_t>> getModuleSubstreamKindNames();
ArrayRef<EnumEntry<uint8_t>> getThunkOrdinalNames();
ArrayRef<EnumEntry<uint16_t>> getTrampolineNames();
ArrayRef<EnumEntry<COFF::SectionCharacteristics>>
getImageSectionCharacteristicNames();
ArrayRef<EnumEntry<uint16_t>> getClassOptionNames();
ArrayRef<EnumEntry<uint8_t>> getMemberAccessNames();
ArrayRef<EnumEntry<uint16_t>> getMethodOptionNames();
ArrayRef<EnumEntry<uint16_t>> getMemberKindNames();
ArrayRef<EnumEntry<uint8_t>> getPtrKindNames();
ArrayRef<EnumEntry<uint8_t>> getPtrModeNames();
ArrayRef<EnumEntry<uint16_t>> getPtrMemberRepNames();
ArrayRef<EnumEntry<uint16_t>> getTypeModifierNames();
ArrayRef<EnumEntry<uint8_t>> getCallingConventions();
ArrayRef<EnumEntry<uint8_t>> getFunctionOptionEnum();
ArrayRef<EnumEntry<uint16_t>> getLabelTypeEnum();

} // end namespace codeview
} // end namespace llvm

#endif // LLVM_DEBUGINFO_CODEVIEW_ENUMTABLES_H
PKhwFZ��*
*
$DebugInfo/CodeView/TypeDumpVisitor.hnu�[���//===-- TypeDumpVisitor.h - CodeView type info dumper -----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_CODEVIEW_TYPEDUMPVISITOR_H
#define LLVM_DEBUGINFO_CODEVIEW_TYPEDUMPVISITOR_H

#include "llvm/ADT/StringRef.h"
#include "llvm/DebugInfo/CodeView/CVRecord.h"
#include "llvm/DebugInfo/CodeView/CodeView.h"
#include "llvm/DebugInfo/CodeView/TypeVisitorCallbacks.h"

namespace llvm {
class ScopedPrinter;

namespace codeview {
class TypeIndex;
struct CVMemberRecord;
struct MemberAttributes;

class TypeCollection;

/// Dumper for CodeView type streams found in COFF object files and PDB files.
class TypeDumpVisitor : public TypeVisitorCallbacks {
public:
  TypeDumpVisitor(TypeCollection &TpiTypes, ScopedPrinter *W,
                  bool PrintRecordBytes)
      : W(W), PrintRecordBytes(PrintRecordBytes), TpiTypes(TpiTypes) {}

  /// When dumping types from an IPI stream in a PDB, a type index may refer to
  /// a type or an item ID. The dumper will lookup the "name" of the index in
  /// the item database if appropriate. If ItemDB is null, it will use TypeDB,
  /// which is correct when dumping types from an object file (/Z7).
  void setIpiTypes(TypeCollection &Types) { IpiTypes = &Types; }

  void printTypeIndex(StringRef FieldName, TypeIndex TI) const;

  void printItemIndex(StringRef FieldName, TypeIndex TI) const;

  /// Action to take on unknown types. By default, they are ignored.
  Error visitUnknownType(CVType &Record) override;
  Error visitUnknownMember(CVMemberRecord &Record) override;

  /// Paired begin/end actions for all types. Receives all record data,
  /// including the fixed-length record prefix.
  Error visitTypeBegin(CVType &Record) override;
  Error visitTypeBegin(CVType &Record, TypeIndex Index) override;
  Error visitTypeEnd(CVType &Record) override;
  Error visitMemberBegin(CVMemberRecord &Record) override;
  Error visitMemberEnd(CVMemberRecord &Record) override;

#define TYPE_RECORD(EnumName, EnumVal, Name)                                   \
  Error visitKnownRecord(CVType &CVR, Name##Record &Record) override;
#define MEMBER_RECORD(EnumName, EnumVal, Name)                                 \
  Error visitKnownMember(CVMemberRecord &CVR, Name##Record &Record) override;
#define TYPE_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
#define MEMBER_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
#include "llvm/DebugInfo/CodeView/CodeViewTypes.def"

private:
  void printMemberAttributes(MemberAttributes Attrs);
  void printMemberAttributes(MemberAccess Access, MethodKind Kind,
                             MethodOptions Options);

  /// Get the database of indices for the stream that we are dumping. If ItemDB
  /// is set, then we must be dumping an item (IPI) stream. This will also
  /// always get the appropriate DB for printing item names.
  TypeCollection &getSourceTypes() const {
    return IpiTypes ? *IpiTypes : TpiTypes;
  }

  ScopedPrinter *W;

  bool PrintRecordBytes = false;

  TypeCollection &TpiTypes;
  TypeCollection *IpiTypes = nullptr;
};

} // end namespace codeview
} // end namespace llvm

#endif
PKhwFZ��K�tt-DebugInfo/CodeView/DebugFrameDataSubsection.hnu�[���//===- DebugFrameDataSubsection.h ------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_CODEVIEW_DEBUGFRAMEDATASUBSECTION_H
#define LLVM_DEBUGINFO_CODEVIEW_DEBUGFRAMEDATASUBSECTION_H

#include "llvm/DebugInfo/CodeView/CodeView.h"
#include "llvm/DebugInfo/CodeView/DebugSubsection.h"
#include "llvm/Support/BinaryStreamArray.h"
#include "llvm/Support/BinaryStreamRef.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Error.h"

namespace llvm {
class BinaryStreamReader;
class BinaryStreamWriter;

namespace codeview {
class DebugFrameDataSubsectionRef final : public DebugSubsectionRef {
public:
  DebugFrameDataSubsectionRef()
      : DebugSubsectionRef(DebugSubsectionKind::FrameData) {}
  static bool classof(const DebugSubsection *S) {
    return S->kind() == DebugSubsectionKind::FrameData;
  }

  Error initialize(BinaryStreamReader Reader);
  Error initialize(BinaryStreamRef Stream);

  FixedStreamArray<FrameData>::Iterator begin() const { return Frames.begin(); }
  FixedStreamArray<FrameData>::Iterator end() const { return Frames.end(); }

  const support::ulittle32_t *getRelocPtr() const { return RelocPtr; }

private:
  const support::ulittle32_t *RelocPtr = nullptr;
  FixedStreamArray<FrameData> Frames;
};

class DebugFrameDataSubsection final : public DebugSubsection {
public:
  DebugFrameDataSubsection(bool IncludeRelocPtr)
      : DebugSubsection(DebugSubsectionKind::FrameData),
        IncludeRelocPtr(IncludeRelocPtr) {}
  static bool classof(const DebugSubsection *S) {
    return S->kind() == DebugSubsectionKind::FrameData;
  }

  uint32_t calculateSerializedSize() const override;
  Error commit(BinaryStreamWriter &Writer) const override;

  void addFrameData(const FrameData &Frame);
  void setFrames(ArrayRef<FrameData> Frames);

private:
  bool IncludeRelocPtr = false;
  std::vector<FrameData> Frames;
};
}
}

#endif
PKhwFZ�/<��0DebugInfo/CodeView/TypeVisitorCallbackPipeline.hnu�[���//===- TypeVisitorCallbackPipeline.h ----------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_CODEVIEW_TYPEVISITORCALLBACKPIPELINE_H
#define LLVM_DEBUGINFO_CODEVIEW_TYPEVISITORCALLBACKPIPELINE_H

#include "llvm/DebugInfo/CodeView/CodeView.h"
#include "llvm/DebugInfo/CodeView/TypeRecord.h"
#include "llvm/DebugInfo/CodeView/TypeVisitorCallbacks.h"
#include "llvm/Support/Error.h"
#include <vector>

namespace llvm {
namespace codeview {

class TypeVisitorCallbackPipeline : public TypeVisitorCallbacks {
public:
  TypeVisitorCallbackPipeline() = default;

  Error visitUnknownType(CVRecord<TypeLeafKind> &Record) override {
    for (auto *Visitor : Pipeline) {
      if (auto EC = Visitor->visitUnknownType(Record))
        return EC;
    }
    return Error::success();
  }

  Error visitUnknownMember(CVMemberRecord &Record) override {
    for (auto *Visitor : Pipeline) {
      if (auto EC = Visitor->visitUnknownMember(Record))
        return EC;
    }
    return Error::success();
  }

  Error visitTypeBegin(CVType &Record) override {
    for (auto *Visitor : Pipeline) {
      if (auto EC = Visitor->visitTypeBegin(Record))
        return EC;
    }
    return Error::success();
  }

  Error visitTypeBegin(CVType &Record, TypeIndex Index) override {
    for (auto *Visitor : Pipeline) {
      if (auto EC = Visitor->visitTypeBegin(Record, Index))
        return EC;
    }
    return Error::success();
  }

  Error visitTypeEnd(CVType &Record) override {
    for (auto *Visitor : Pipeline) {
      if (auto EC = Visitor->visitTypeEnd(Record))
        return EC;
    }
    return Error::success();
  }

  Error visitMemberBegin(CVMemberRecord &Record) override {
    for (auto *Visitor : Pipeline) {
      if (auto EC = Visitor->visitMemberBegin(Record))
        return EC;
    }
    return Error::success();
  }

  Error visitMemberEnd(CVMemberRecord &Record) override {
    for (auto *Visitor : Pipeline) {
      if (auto EC = Visitor->visitMemberEnd(Record))
        return EC;
    }
    return Error::success();
  }

  void addCallbackToPipeline(TypeVisitorCallbacks &Callbacks) {
    Pipeline.push_back(&Callbacks);
  }

#define TYPE_RECORD(EnumName, EnumVal, Name)                                   \
  Error visitKnownRecord(CVType &CVR, Name##Record &Record) override {         \
    return visitKnownRecordImpl(CVR, Record);                                  \
  }
#define MEMBER_RECORD(EnumName, EnumVal, Name)                                 \
  Error visitKnownMember(CVMemberRecord &CVMR, Name##Record &Record)           \
      override {                                                               \
    return visitKnownMemberImpl(CVMR, Record);                                 \
  }
#define TYPE_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
#define MEMBER_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
#include "llvm/DebugInfo/CodeView/CodeViewTypes.def"

private:
  template <typename T> Error visitKnownRecordImpl(CVType &CVR, T &Record) {
    for (auto *Visitor : Pipeline) {
      if (auto EC = Visitor->visitKnownRecord(CVR, Record))
        return EC;
    }
    return Error::success();
  }

  template <typename T>
  Error visitKnownMemberImpl(CVMemberRecord &CVMR, T &Record) {
    for (auto *Visitor : Pipeline) {
      if (auto EC = Visitor->visitKnownMember(CVMR, Record))
        return EC;
    }
    return Error::success();
  }
  std::vector<TypeVisitorCallbacks *> Pipeline;
};

} // end namespace codeview
} // end namespace llvm

#endif // LLVM_DEBUGINFO_CODEVIEW_TYPEVISITORCALLBACKPIPELINE_H
PKhwFZ��Q�}�}DebugInfo/CodeView/TypeRecord.hnu�[���//===- TypeRecord.h ---------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_CODEVIEW_TYPERECORD_H
#define LLVM_DEBUGINFO_CODEVIEW_TYPERECORD_H

#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/DebugInfo/CodeView/CVRecord.h"
#include "llvm/DebugInfo/CodeView/CodeView.h"
#include "llvm/DebugInfo/CodeView/GUID.h"
#include "llvm/DebugInfo/CodeView/TypeIndex.h"
#include "llvm/Support/BinaryStreamArray.h"
#include "llvm/Support/Endian.h"
#include <algorithm>
#include <cstdint>
#include <optional>
#include <vector>

namespace llvm {
namespace codeview {

using support::little32_t;
using support::ulittle16_t;
using support::ulittle32_t;

struct CVMemberRecord {
  TypeLeafKind Kind;
  ArrayRef<uint8_t> Data;
};

/// Equvalent to CV_fldattr_t in cvinfo.h.
struct MemberAttributes {
  uint16_t Attrs = 0;

  enum {
    MethodKindShift = 2,
  };

  MemberAttributes() = default;

  explicit MemberAttributes(MemberAccess Access)
      : Attrs(static_cast<uint16_t>(Access)) {}

  MemberAttributes(MemberAccess Access, MethodKind Kind, MethodOptions Flags) {
    Attrs = static_cast<uint16_t>(Access);
    Attrs |= (static_cast<uint16_t>(Kind) << MethodKindShift);
    Attrs |= static_cast<uint16_t>(Flags);
  }

  /// Get the access specifier. Valid for any kind of member.
  MemberAccess getAccess() const {
    return MemberAccess(unsigned(Attrs) & unsigned(MethodOptions::AccessMask));
  }

  /// Indicates if a method is defined with friend, virtual, static, etc.
  MethodKind getMethodKind() const {
    return MethodKind(
        (unsigned(Attrs) & unsigned(MethodOptions::MethodKindMask)) >>
        MethodKindShift);
  }

  /// Get the flags that are not included in access control or method
  /// properties.
  MethodOptions getFlags() const {
    return MethodOptions(
        unsigned(Attrs) &
        ~unsigned(MethodOptions::AccessMask | MethodOptions::MethodKindMask));
  }

  /// Is this method virtual.
  bool isVirtual() const {
    auto MP = getMethodKind();
    return MP != MethodKind::Vanilla && MP != MethodKind::Friend &&
           MP != MethodKind::Static;
  }

  /// Does this member introduce a new virtual method.
  bool isIntroducedVirtual() const {
    auto MP = getMethodKind();
    return MP == MethodKind::IntroducingVirtual ||
           MP == MethodKind::PureIntroducingVirtual;
  }

  /// Is this method static.
  bool isStatic() const {
    return getMethodKind() == MethodKind::Static;
  }
};

// Does not correspond to any tag, this is the tail of an LF_POINTER record
// if it represents a member pointer.
class MemberPointerInfo {
public:
  MemberPointerInfo() = default;

  MemberPointerInfo(TypeIndex ContainingType,
                    PointerToMemberRepresentation Representation)
      : ContainingType(ContainingType), Representation(Representation) {}

  TypeIndex getContainingType() const { return ContainingType; }
  PointerToMemberRepresentation getRepresentation() const {
    return Representation;
  }

  TypeIndex ContainingType;
  PointerToMemberRepresentation Representation =
      PointerToMemberRepresentation::Unknown;
};

class TypeRecord {
protected:
  TypeRecord() = default;
  explicit TypeRecord(TypeRecordKind Kind) : Kind(Kind) {}

public:
  TypeRecordKind getKind() const { return Kind; }

  TypeRecordKind Kind;
};

// LF_MODIFIER
class ModifierRecord : public TypeRecord {
public:
  ModifierRecord() = default;
  explicit ModifierRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
  ModifierRecord(TypeIndex ModifiedType, ModifierOptions Modifiers)
      : TypeRecord(TypeRecordKind::Modifier), ModifiedType(ModifiedType),
        Modifiers(Modifiers) {}

  TypeIndex getModifiedType() const { return ModifiedType; }
  ModifierOptions getModifiers() const { return Modifiers; }

  TypeIndex ModifiedType;
  ModifierOptions Modifiers = ModifierOptions::None;
};

// LF_PROCEDURE
class ProcedureRecord : public TypeRecord {
public:
  ProcedureRecord() = default;
  explicit ProcedureRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
  ProcedureRecord(TypeIndex ReturnType, CallingConvention CallConv,
                  FunctionOptions Options, uint16_t ParameterCount,
                  TypeIndex ArgumentList)
      : TypeRecord(TypeRecordKind::Procedure), ReturnType(ReturnType),
        CallConv(CallConv), Options(Options), ParameterCount(ParameterCount),
        ArgumentList(ArgumentList) {}

  TypeIndex getReturnType() const { return ReturnType; }
  CallingConvention getCallConv() const { return CallConv; }
  FunctionOptions getOptions() const { return Options; }
  uint16_t getParameterCount() const { return ParameterCount; }
  TypeIndex getArgumentList() const { return ArgumentList; }

  TypeIndex ReturnType;
  CallingConvention CallConv = CallingConvention::NearC;
  FunctionOptions Options = FunctionOptions::None;
  uint16_t ParameterCount = 0;
  TypeIndex ArgumentList;
};

// LF_MFUNCTION
class MemberFunctionRecord : public TypeRecord {
public:
  MemberFunctionRecord() = default;
  explicit MemberFunctionRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}

  MemberFunctionRecord(TypeIndex ReturnType, TypeIndex ClassType,
                       TypeIndex ThisType, CallingConvention CallConv,
                       FunctionOptions Options, uint16_t ParameterCount,
                       TypeIndex ArgumentList, int32_t ThisPointerAdjustment)
      : TypeRecord(TypeRecordKind::MemberFunction), ReturnType(ReturnType),
        ClassType(ClassType), ThisType(ThisType), CallConv(CallConv),
        Options(Options), ParameterCount(ParameterCount),
        ArgumentList(ArgumentList),
        ThisPointerAdjustment(ThisPointerAdjustment) {}

  TypeIndex getReturnType() const { return ReturnType; }
  TypeIndex getClassType() const { return ClassType; }
  TypeIndex getThisType() const { return ThisType; }
  CallingConvention getCallConv() const { return CallConv; }
  FunctionOptions getOptions() const { return Options; }
  uint16_t getParameterCount() const { return ParameterCount; }
  TypeIndex getArgumentList() const { return ArgumentList; }
  int32_t getThisPointerAdjustment() const { return ThisPointerAdjustment; }

  TypeIndex ReturnType;
  TypeIndex ClassType;
  TypeIndex ThisType;
  CallingConvention CallConv = CallingConvention::NearC;
  FunctionOptions Options = FunctionOptions::None;
  uint16_t ParameterCount = 0;
  TypeIndex ArgumentList;
  int32_t ThisPointerAdjustment = 0;
};

// LF_LABEL
class LabelRecord : public TypeRecord {
public:
  LabelRecord() = default;
  explicit LabelRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}

  LabelRecord(LabelType Mode) : TypeRecord(TypeRecordKind::Label), Mode(Mode) {}

  LabelType Mode = LabelType::Near;
};

// LF_MFUNC_ID
class MemberFuncIdRecord : public TypeRecord {
public:
  MemberFuncIdRecord() = default;
  explicit MemberFuncIdRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
  MemberFuncIdRecord(TypeIndex ClassType, TypeIndex FunctionType,
                         StringRef Name)
      : TypeRecord(TypeRecordKind::MemberFuncId), ClassType(ClassType),
        FunctionType(FunctionType), Name(Name) {}

  TypeIndex getClassType() const { return ClassType; }
  TypeIndex getFunctionType() const { return FunctionType; }
  StringRef getName() const { return Name; }

  TypeIndex ClassType;
  TypeIndex FunctionType;
  StringRef Name;
};

// LF_ARGLIST
class ArgListRecord : public TypeRecord {
public:
  ArgListRecord() = default;
  explicit ArgListRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}

  ArgListRecord(TypeRecordKind Kind, ArrayRef<TypeIndex> Indices)
      : TypeRecord(Kind), ArgIndices(Indices) {}

  ArrayRef<TypeIndex> getIndices() const { return ArgIndices; }

  std::vector<TypeIndex> ArgIndices;
};

// LF_SUBSTR_LIST
class StringListRecord : public TypeRecord {
public:
  StringListRecord() = default;
  explicit StringListRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}

  StringListRecord(TypeRecordKind Kind, ArrayRef<TypeIndex> Indices)
      : TypeRecord(Kind), StringIndices(Indices) {}

  ArrayRef<TypeIndex> getIndices() const { return StringIndices; }

  std::vector<TypeIndex> StringIndices;
};

// LF_POINTER
class PointerRecord : public TypeRecord {
public:
  // ---------------------------XXXXX
  static const uint32_t PointerKindShift = 0;
  static const uint32_t PointerKindMask = 0x1F;

  // ------------------------XXX-----
  static const uint32_t PointerModeShift = 5;
  static const uint32_t PointerModeMask = 0x07;

  // ----------XXX------XXXXX--------
  static const uint32_t PointerOptionMask = 0x381f00;

  // -------------XXXXXX------------
  static const uint32_t PointerSizeShift = 13;
  static const uint32_t PointerSizeMask = 0xFF;

  PointerRecord() = default;
  explicit PointerRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}

  PointerRecord(TypeIndex ReferentType, uint32_t Attrs)
      : TypeRecord(TypeRecordKind::Pointer), ReferentType(ReferentType),
        Attrs(Attrs) {}

  PointerRecord(TypeIndex ReferentType, PointerKind PK, PointerMode PM,
                PointerOptions PO, uint8_t Size)
      : TypeRecord(TypeRecordKind::Pointer), ReferentType(ReferentType),
        Attrs(calcAttrs(PK, PM, PO, Size)) {}

  PointerRecord(TypeIndex ReferentType, PointerKind PK, PointerMode PM,
                PointerOptions PO, uint8_t Size, const MemberPointerInfo &MPI)
      : TypeRecord(TypeRecordKind::Pointer), ReferentType(ReferentType),
        Attrs(calcAttrs(PK, PM, PO, Size)), MemberInfo(MPI) {}

  TypeIndex getReferentType() const { return ReferentType; }

  PointerKind getPointerKind() const {
    return static_cast<PointerKind>((Attrs >> PointerKindShift) &
                                    PointerKindMask);
  }

  PointerMode getMode() const {
    return static_cast<PointerMode>((Attrs >> PointerModeShift) &
                                    PointerModeMask);
  }

  PointerOptions getOptions() const {
    return static_cast<PointerOptions>(Attrs & PointerOptionMask);
  }

  uint8_t getSize() const {
    return (Attrs >> PointerSizeShift) & PointerSizeMask;
  }

  MemberPointerInfo getMemberInfo() const { return *MemberInfo; }

  bool isPointerToMember() const {
    return getMode() == PointerMode::PointerToDataMember ||
           getMode() == PointerMode::PointerToMemberFunction;
  }

  bool isFlat() const { return !!(Attrs & uint32_t(PointerOptions::Flat32)); }
  bool isConst() const { return !!(Attrs & uint32_t(PointerOptions::Const)); }

  bool isVolatile() const {
    return !!(Attrs & uint32_t(PointerOptions::Volatile));
  }

  bool isUnaligned() const {
    return !!(Attrs & uint32_t(PointerOptions::Unaligned));
  }

  bool isRestrict() const {
    return !!(Attrs & uint32_t(PointerOptions::Restrict));
  }

  bool isLValueReferenceThisPtr() const {
    return !!(Attrs & uint32_t(PointerOptions::LValueRefThisPointer));
  }

  bool isRValueReferenceThisPtr() const {
    return !!(Attrs & uint32_t(PointerOptions::RValueRefThisPointer));
  }

  TypeIndex ReferentType;
  uint32_t Attrs = 0;
  std::optional<MemberPointerInfo> MemberInfo;

  void setAttrs(PointerKind PK, PointerMode PM, PointerOptions PO,
                uint8_t Size) {
    Attrs = calcAttrs(PK, PM, PO, Size);
  }

private:
  static uint32_t calcAttrs(PointerKind PK, PointerMode PM, PointerOptions PO,
                            uint8_t Size) {
    uint32_t A = 0;
    A |= static_cast<uint32_t>(PK);
    A |= static_cast<uint32_t>(PO);
    A |= (static_cast<uint32_t>(PM) << PointerModeShift);
    A |= (static_cast<uint32_t>(Size) << PointerSizeShift);
    return A;
  }
};

// LF_NESTTYPE
class NestedTypeRecord : public TypeRecord {
public:
  NestedTypeRecord() = default;
  explicit NestedTypeRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
  NestedTypeRecord(TypeIndex Type, StringRef Name)
      : TypeRecord(TypeRecordKind::NestedType), Type(Type), Name(Name) {}

  TypeIndex getNestedType() const { return Type; }
  StringRef getName() const { return Name; }

  TypeIndex Type;
  StringRef Name;
};

// LF_FIELDLIST
class FieldListRecord : public TypeRecord {
public:
  FieldListRecord() = default;
  explicit FieldListRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
  explicit FieldListRecord(ArrayRef<uint8_t> Data)
      : TypeRecord(TypeRecordKind::FieldList), Data(Data) {}

  ArrayRef<uint8_t> Data;
};

// LF_ARRAY
class ArrayRecord : public TypeRecord {
public:
  ArrayRecord() = default;
  explicit ArrayRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
  ArrayRecord(TypeIndex ElementType, TypeIndex IndexType, uint64_t Size,
              StringRef Name)
      : TypeRecord(TypeRecordKind::Array), ElementType(ElementType),
        IndexType(IndexType), Size(Size), Name(Name) {}

  TypeIndex getElementType() const { return ElementType; }
  TypeIndex getIndexType() const { return IndexType; }
  uint64_t getSize() const { return Size; }
  StringRef getName() const { return Name; }

  TypeIndex ElementType;
  TypeIndex IndexType;
  uint64_t Size = 0;
  StringRef Name;
};

class TagRecord : public TypeRecord {
protected:
  TagRecord() = default;
  explicit TagRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
  TagRecord(TypeRecordKind Kind, uint16_t MemberCount, ClassOptions Options,
            TypeIndex FieldList, StringRef Name, StringRef UniqueName)
      : TypeRecord(Kind), MemberCount(MemberCount), Options(Options),
        FieldList(FieldList), Name(Name), UniqueName(UniqueName) {}

public:
  static const int HfaKindShift = 11;
  static const int HfaKindMask = 0x1800;
  static const int WinRTKindShift = 14;
  static const int WinRTKindMask = 0xC000;

  bool hasUniqueName() const {
    return (Options & ClassOptions::HasUniqueName) != ClassOptions::None;
  }

  bool isNested() const {
    return (Options & ClassOptions::Nested) != ClassOptions::None;
  }

  bool isForwardRef() const {
    return (Options & ClassOptions::ForwardReference) != ClassOptions::None;
  }

  bool containsNestedClass() const {
    return (Options & ClassOptions::ContainsNestedClass) != ClassOptions::None;
  }

  bool isScoped() const {
    return (Options & ClassOptions::Scoped) != ClassOptions::None;
  }

  uint16_t getMemberCount() const { return MemberCount; }
  ClassOptions getOptions() const { return Options; }
  TypeIndex getFieldList() const { return FieldList; }
  StringRef getName() const { return Name; }
  StringRef getUniqueName() const { return UniqueName; }

  uint16_t MemberCount = 0;
  ClassOptions Options = ClassOptions::None;
  TypeIndex FieldList;
  StringRef Name;
  StringRef UniqueName;
};

// LF_CLASS, LF_STRUCTURE, LF_INTERFACE
class ClassRecord : public TagRecord {
public:
  ClassRecord() = default;
  explicit ClassRecord(TypeRecordKind Kind) : TagRecord(Kind) {}
  ClassRecord(TypeRecordKind Kind, uint16_t MemberCount, ClassOptions Options,
              TypeIndex FieldList, TypeIndex DerivationList,
              TypeIndex VTableShape, uint64_t Size, StringRef Name,
              StringRef UniqueName)
      : TagRecord(Kind, MemberCount, Options, FieldList, Name, UniqueName),
        DerivationList(DerivationList), VTableShape(VTableShape), Size(Size) {}

  HfaKind getHfa() const {
    uint16_t Value = static_cast<uint16_t>(Options);
    Value = (Value & HfaKindMask) >> HfaKindShift;
    return static_cast<HfaKind>(Value);
  }

  WindowsRTClassKind getWinRTKind() const {
    uint16_t Value = static_cast<uint16_t>(Options);
    Value = (Value & WinRTKindMask) >> WinRTKindShift;
    return static_cast<WindowsRTClassKind>(Value);
  }

  TypeIndex getDerivationList() const { return DerivationList; }
  TypeIndex getVTableShape() const { return VTableShape; }
  uint64_t getSize() const { return Size; }

  TypeIndex DerivationList;
  TypeIndex VTableShape;
  uint64_t Size = 0;
};

// LF_UNION
struct UnionRecord : public TagRecord {
  UnionRecord() = default;
  explicit UnionRecord(TypeRecordKind Kind) : TagRecord(Kind) {}
  UnionRecord(uint16_t MemberCount, ClassOptions Options, TypeIndex FieldList,
              uint64_t Size, StringRef Name, StringRef UniqueName)
      : TagRecord(TypeRecordKind::Union, MemberCount, Options, FieldList, Name,
                  UniqueName),
        Size(Size) {}

  HfaKind getHfa() const {
    uint16_t Value = static_cast<uint16_t>(Options);
    Value = (Value & HfaKindMask) >> HfaKindShift;
    return static_cast<HfaKind>(Value);
  }

  uint64_t getSize() const { return Size; }

  uint64_t Size = 0;
};

// LF_ENUM
class EnumRecord : public TagRecord {
public:
  EnumRecord() = default;
  explicit EnumRecord(TypeRecordKind Kind) : TagRecord(Kind) {}
  EnumRecord(uint16_t MemberCount, ClassOptions Options, TypeIndex FieldList,
             StringRef Name, StringRef UniqueName, TypeIndex UnderlyingType)
      : TagRecord(TypeRecordKind::Enum, MemberCount, Options, FieldList, Name,
                  UniqueName),
        UnderlyingType(UnderlyingType) {}

  TypeIndex getUnderlyingType() const { return UnderlyingType; }

  TypeIndex UnderlyingType;
};

// LF_BITFIELD
class BitFieldRecord : public TypeRecord {
public:
  BitFieldRecord() = default;
  explicit BitFieldRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
  BitFieldRecord(TypeIndex Type, uint8_t BitSize, uint8_t BitOffset)
      : TypeRecord(TypeRecordKind::BitField), Type(Type), BitSize(BitSize),
        BitOffset(BitOffset) {}

  TypeIndex getType() const { return Type; }
  uint8_t getBitOffset() const { return BitOffset; }
  uint8_t getBitSize() const { return BitSize; }

  TypeIndex Type;
  uint8_t BitSize = 0;
  uint8_t BitOffset = 0;
};

// LF_VTSHAPE
class VFTableShapeRecord : public TypeRecord {
public:
  VFTableShapeRecord() = default;
  explicit VFTableShapeRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
  explicit VFTableShapeRecord(ArrayRef<VFTableSlotKind> Slots)
      : TypeRecord(TypeRecordKind::VFTableShape), SlotsRef(Slots) {}
  explicit VFTableShapeRecord(std::vector<VFTableSlotKind> Slots)
      : TypeRecord(TypeRecordKind::VFTableShape), Slots(std::move(Slots)) {}

  ArrayRef<VFTableSlotKind> getSlots() const {
    if (!SlotsRef.empty())
      return SlotsRef;
    return Slots;
  }

  uint32_t getEntryCount() const { return getSlots().size(); }

  ArrayRef<VFTableSlotKind> SlotsRef;
  std::vector<VFTableSlotKind> Slots;
};

// LF_TYPESERVER2
class TypeServer2Record : public TypeRecord {
public:
  TypeServer2Record() = default;
  explicit TypeServer2Record(TypeRecordKind Kind) : TypeRecord(Kind) {}
  TypeServer2Record(StringRef GuidStr, uint32_t Age, StringRef Name)
      : TypeRecord(TypeRecordKind::TypeServer2), Age(Age), Name(Name) {
    assert(GuidStr.size() == 16 && "guid isn't 16 bytes");
    ::memcpy(Guid.Guid, GuidStr.data(), 16);
  }

  const GUID &getGuid() const { return Guid; }
  uint32_t getAge() const { return Age; }
  StringRef getName() const { return Name; }

  GUID Guid = {};
  uint32_t Age = 0;
  StringRef Name;
};

// LF_STRING_ID
class StringIdRecord : public TypeRecord {
public:
  StringIdRecord() = default;
  explicit StringIdRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
  StringIdRecord(TypeIndex Id, StringRef String)
      : TypeRecord(TypeRecordKind::StringId), Id(Id), String(String) {}

  TypeIndex getId() const { return Id; }
  StringRef getString() const { return String; }

  TypeIndex Id;
  StringRef String;
};

// LF_FUNC_ID
class FuncIdRecord : public TypeRecord {
public:
  FuncIdRecord() = default;
  explicit FuncIdRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
  FuncIdRecord(TypeIndex ParentScope, TypeIndex FunctionType, StringRef Name)
      : TypeRecord(TypeRecordKind::FuncId), ParentScope(ParentScope),
        FunctionType(FunctionType), Name(Name) {}

  TypeIndex getParentScope() const { return ParentScope; }
  TypeIndex getFunctionType() const { return FunctionType; }
  StringRef getName() const { return Name; }

  TypeIndex ParentScope;
  TypeIndex FunctionType;
  StringRef Name;
};

// LF_UDT_SRC_LINE
class UdtSourceLineRecord : public TypeRecord {
public:
  UdtSourceLineRecord() = default;
  explicit UdtSourceLineRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
  UdtSourceLineRecord(TypeIndex UDT, TypeIndex SourceFile, uint32_t LineNumber)
      : TypeRecord(TypeRecordKind::UdtSourceLine), UDT(UDT),
        SourceFile(SourceFile), LineNumber(LineNumber) {}

  TypeIndex getUDT() const { return UDT; }
  TypeIndex getSourceFile() const { return SourceFile; }
  uint32_t getLineNumber() const { return LineNumber; }

  TypeIndex UDT;
  TypeIndex SourceFile;
  uint32_t LineNumber = 0;
};

// LF_UDT_MOD_SRC_LINE
class UdtModSourceLineRecord : public TypeRecord {
public:
  UdtModSourceLineRecord() = default;
  explicit UdtModSourceLineRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
  UdtModSourceLineRecord(TypeIndex UDT, TypeIndex SourceFile,
                         uint32_t LineNumber, uint16_t Module)
      : TypeRecord(TypeRecordKind::UdtSourceLine), UDT(UDT),
        SourceFile(SourceFile), LineNumber(LineNumber), Module(Module) {}

  TypeIndex getUDT() const { return UDT; }
  TypeIndex getSourceFile() const { return SourceFile; }
  uint32_t getLineNumber() const { return LineNumber; }
  uint16_t getModule() const { return Module; }

  TypeIndex UDT;
  TypeIndex SourceFile;
  uint32_t LineNumber = 0;
  uint16_t Module = 0;
};

// LF_BUILDINFO
class BuildInfoRecord : public TypeRecord {
public:
  BuildInfoRecord() = default;
  explicit BuildInfoRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
  BuildInfoRecord(ArrayRef<TypeIndex> ArgIndices)
      : TypeRecord(TypeRecordKind::BuildInfo),
        ArgIndices(ArgIndices.begin(), ArgIndices.end()) {}

  ArrayRef<TypeIndex> getArgs() const { return ArgIndices; }

  /// Indices of known build info arguments.
  enum BuildInfoArg {
    CurrentDirectory, ///< Absolute CWD path
    BuildTool,        ///< Absolute compiler path
    SourceFile,       ///< Path to main source file, relative or absolute
    TypeServerPDB,    ///< Absolute path of type server PDB (/Fd)
    CommandLine,      ///< Full canonical command line (maybe -cc1)
    MaxArgs
  };

  SmallVector<TypeIndex, MaxArgs> ArgIndices;
};

// LF_VFTABLE
class VFTableRecord : public TypeRecord {
public:
  VFTableRecord() = default;
  explicit VFTableRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
  VFTableRecord(TypeIndex CompleteClass, TypeIndex OverriddenVFTable,
                uint32_t VFPtrOffset, StringRef Name,
                ArrayRef<StringRef> Methods)
      : TypeRecord(TypeRecordKind::VFTable), CompleteClass(CompleteClass),
        OverriddenVFTable(OverriddenVFTable), VFPtrOffset(VFPtrOffset) {
    MethodNames.push_back(Name);
    llvm::append_range(MethodNames, Methods);
  }

  TypeIndex getCompleteClass() const { return CompleteClass; }
  TypeIndex getOverriddenVTable() const { return OverriddenVFTable; }
  uint32_t getVFPtrOffset() const { return VFPtrOffset; }
  StringRef getName() const { return ArrayRef(MethodNames).front(); }

  ArrayRef<StringRef> getMethodNames() const {
    return ArrayRef(MethodNames).drop_front();
  }

  TypeIndex CompleteClass;
  TypeIndex OverriddenVFTable;
  uint32_t VFPtrOffset = 0;
  std::vector<StringRef> MethodNames;
};

// LF_ONEMETHOD
class OneMethodRecord : public TypeRecord {
public:
  OneMethodRecord() = default;
  explicit OneMethodRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
  OneMethodRecord(TypeIndex Type, MemberAttributes Attrs, int32_t VFTableOffset,
                  StringRef Name)
      : TypeRecord(TypeRecordKind::OneMethod), Type(Type), Attrs(Attrs),
        VFTableOffset(VFTableOffset), Name(Name) {}
  OneMethodRecord(TypeIndex Type, MemberAccess Access, MethodKind MK,
                  MethodOptions Options, int32_t VFTableOffset, StringRef Name)
      : TypeRecord(TypeRecordKind::OneMethod), Type(Type),
        Attrs(Access, MK, Options), VFTableOffset(VFTableOffset), Name(Name) {}

  TypeIndex getType() const { return Type; }
  MethodKind getMethodKind() const { return Attrs.getMethodKind(); }
  MethodOptions getOptions() const { return Attrs.getFlags(); }
  MemberAccess getAccess() const { return Attrs.getAccess(); }
  int32_t getVFTableOffset() const { return VFTableOffset; }
  StringRef getName() const { return Name; }

  bool isIntroducingVirtual() const {
    return getMethodKind() == MethodKind::IntroducingVirtual ||
           getMethodKind() == MethodKind::PureIntroducingVirtual;
  }

  TypeIndex Type;
  MemberAttributes Attrs;
  int32_t VFTableOffset = 0;
  StringRef Name;
};

// LF_METHODLIST
class MethodOverloadListRecord : public TypeRecord {
public:
  MethodOverloadListRecord() = default;
  explicit MethodOverloadListRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
  MethodOverloadListRecord(ArrayRef<OneMethodRecord> Methods)
      : TypeRecord(TypeRecordKind::MethodOverloadList), Methods(Methods) {}

  ArrayRef<OneMethodRecord> getMethods() const { return Methods; }

  std::vector<OneMethodRecord> Methods;
};

/// For method overload sets.  LF_METHOD
class OverloadedMethodRecord : public TypeRecord {
public:
  OverloadedMethodRecord() = default;
  explicit OverloadedMethodRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
  OverloadedMethodRecord(uint16_t NumOverloads, TypeIndex MethodList,
                         StringRef Name)
      : TypeRecord(TypeRecordKind::OverloadedMethod),
        NumOverloads(NumOverloads), MethodList(MethodList), Name(Name) {}

  uint16_t getNumOverloads() const { return NumOverloads; }
  TypeIndex getMethodList() const { return MethodList; }
  StringRef getName() const { return Name; }

  uint16_t NumOverloads = 0;
  TypeIndex MethodList;
  StringRef Name;
};

// LF_MEMBER
class DataMemberRecord : public TypeRecord {
public:
  DataMemberRecord() = default;
  explicit DataMemberRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
  DataMemberRecord(MemberAttributes Attrs, TypeIndex Type, uint64_t Offset,
                   StringRef Name)
      : TypeRecord(TypeRecordKind::DataMember), Attrs(Attrs), Type(Type),
        FieldOffset(Offset), Name(Name) {}
  DataMemberRecord(MemberAccess Access, TypeIndex Type, uint64_t Offset,
                   StringRef Name)
      : TypeRecord(TypeRecordKind::DataMember), Attrs(Access), Type(Type),
        FieldOffset(Offset), Name(Name) {}

  MemberAccess getAccess() const { return Attrs.getAccess(); }
  TypeIndex getType() const { return Type; }
  uint64_t getFieldOffset() const { return FieldOffset; }
  StringRef getName() const { return Name; }

  MemberAttributes Attrs;
  TypeIndex Type;
  uint64_t FieldOffset = 0;
  StringRef Name;
};

// LF_STMEMBER
class StaticDataMemberRecord : public TypeRecord {
public:
  StaticDataMemberRecord() = default;
  explicit StaticDataMemberRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
  StaticDataMemberRecord(MemberAttributes Attrs, TypeIndex Type, StringRef Name)
      : TypeRecord(TypeRecordKind::StaticDataMember), Attrs(Attrs), Type(Type),
        Name(Name) {}
  StaticDataMemberRecord(MemberAccess Access, TypeIndex Type, StringRef Name)
      : TypeRecord(TypeRecordKind::StaticDataMember), Attrs(Access), Type(Type),
        Name(Name) {}

  MemberAccess getAccess() const { return Attrs.getAccess(); }
  TypeIndex getType() const { return Type; }
  StringRef getName() const { return Name; }

  MemberAttributes Attrs;
  TypeIndex Type;
  StringRef Name;
};

// LF_ENUMERATE
class EnumeratorRecord : public TypeRecord {
public:
  EnumeratorRecord() = default;
  explicit EnumeratorRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
  EnumeratorRecord(MemberAttributes Attrs, APSInt Value, StringRef Name)
      : TypeRecord(TypeRecordKind::Enumerator), Attrs(Attrs),
        Value(std::move(Value)), Name(Name) {}
  EnumeratorRecord(MemberAccess Access, APSInt Value, StringRef Name)
      : TypeRecord(TypeRecordKind::Enumerator), Attrs(Access),
        Value(std::move(Value)), Name(Name) {}

  MemberAccess getAccess() const { return Attrs.getAccess(); }
  APSInt getValue() const { return Value; }
  StringRef getName() const { return Name; }

  MemberAttributes Attrs;
  APSInt Value;
  StringRef Name;
};

// LF_VFUNCTAB
class VFPtrRecord : public TypeRecord {
public:
  VFPtrRecord() = default;
  explicit VFPtrRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
  VFPtrRecord(TypeIndex Type)
      : TypeRecord(TypeRecordKind::VFPtr), Type(Type) {}

  TypeIndex getType() const { return Type; }

  TypeIndex Type;
};

// LF_BCLASS, LF_BINTERFACE
class BaseClassRecord : public TypeRecord {
public:
  BaseClassRecord() = default;
  explicit BaseClassRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
  BaseClassRecord(MemberAttributes Attrs, TypeIndex Type, uint64_t Offset)
      : TypeRecord(TypeRecordKind::BaseClass), Attrs(Attrs), Type(Type),
        Offset(Offset) {}
  BaseClassRecord(MemberAccess Access, TypeIndex Type, uint64_t Offset)
      : TypeRecord(TypeRecordKind::BaseClass), Attrs(Access), Type(Type),
        Offset(Offset) {}

  MemberAccess getAccess() const { return Attrs.getAccess(); }
  TypeIndex getBaseType() const { return Type; }
  uint64_t getBaseOffset() const { return Offset; }

  MemberAttributes Attrs;
  TypeIndex Type;
  uint64_t Offset = 0;
};

// LF_VBCLASS, LF_IVBCLASS
class VirtualBaseClassRecord : public TypeRecord {
public:
  VirtualBaseClassRecord() = default;
  explicit VirtualBaseClassRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
  VirtualBaseClassRecord(TypeRecordKind Kind, MemberAttributes Attrs,
                         TypeIndex BaseType, TypeIndex VBPtrType,
                         uint64_t Offset, uint64_t Index)
      : TypeRecord(Kind), Attrs(Attrs), BaseType(BaseType),
        VBPtrType(VBPtrType), VBPtrOffset(Offset), VTableIndex(Index) {}
  VirtualBaseClassRecord(TypeRecordKind Kind, MemberAccess Access,
                         TypeIndex BaseType, TypeIndex VBPtrType,
                         uint64_t Offset, uint64_t Index)
      : TypeRecord(Kind), Attrs(Access), BaseType(BaseType),
        VBPtrType(VBPtrType), VBPtrOffset(Offset), VTableIndex(Index) {}

  MemberAccess getAccess() const { return Attrs.getAccess(); }
  TypeIndex getBaseType() const { return BaseType; }
  TypeIndex getVBPtrType() const { return VBPtrType; }
  uint64_t getVBPtrOffset() const { return VBPtrOffset; }
  uint64_t getVTableIndex() const { return VTableIndex; }

  MemberAttributes Attrs;
  TypeIndex BaseType;
  TypeIndex VBPtrType;
  uint64_t VBPtrOffset = 0;
  uint64_t VTableIndex = 0;
};

/// LF_INDEX - Used to chain two large LF_FIELDLIST or LF_METHODLIST records
/// together. The first will end in an LF_INDEX record that points to the next.
class ListContinuationRecord : public TypeRecord {
public:
  ListContinuationRecord() = default;
  explicit ListContinuationRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}
  ListContinuationRecord(TypeIndex ContinuationIndex)
      : TypeRecord(TypeRecordKind::ListContinuation),
        ContinuationIndex(ContinuationIndex) {}

  TypeIndex getContinuationIndex() const { return ContinuationIndex; }

  TypeIndex ContinuationIndex;
};

// LF_PRECOMP
class PrecompRecord : public TypeRecord {
public:
  PrecompRecord() = default;
  explicit PrecompRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}

  uint32_t getStartTypeIndex() const { return StartTypeIndex; }
  uint32_t getTypesCount() const { return TypesCount; }
  uint32_t getSignature() const { return Signature; }
  StringRef getPrecompFilePath() const { return PrecompFilePath; }

  uint32_t StartTypeIndex = 0;
  uint32_t TypesCount = 0;
  uint32_t Signature = 0;
  StringRef PrecompFilePath;
};

// LF_ENDPRECOMP
class EndPrecompRecord : public TypeRecord {
public:
  EndPrecompRecord() = default;
  explicit EndPrecompRecord(TypeRecordKind Kind) : TypeRecord(Kind) {}

  uint32_t getSignature() const { return Signature; }

  uint32_t Signature = 0;
};

} // end namespace codeview
} // end namespace llvm

#endif // LLVM_DEBUGINFO_CODEVIEW_TYPERECORD_H
PKhwFZ�����DebugInfo/CodeView/CVRecord.hnu�[���//===- CVRecord.h -----------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_CODEVIEW_CVRECORD_H
#define LLVM_DEBUGINFO_CODEVIEW_CVRECORD_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/DebugInfo/CodeView/CodeView.h"
#include "llvm/DebugInfo/CodeView/CodeViewError.h"
#include "llvm/DebugInfo/CodeView/RecordSerialization.h"
#include "llvm/Support/BinaryStreamReader.h"
#include "llvm/Support/BinaryStreamRef.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Error.h"
#include <cstdint>

namespace llvm {

namespace codeview {

/// CVRecord is a fat pointer (base + size pair) to a symbol or type record.
/// Carrying the size separately instead of trusting the size stored in the
/// record prefix provides some extra safety and flexibility.
template <typename Kind> class CVRecord {
public:
  CVRecord() = default;

  CVRecord(ArrayRef<uint8_t> Data) : RecordData(Data) {}

  CVRecord(const RecordPrefix *P, size_t Size)
      : RecordData(reinterpret_cast<const uint8_t *>(P), Size) {}

  bool valid() const { return kind() != Kind(0); }

  uint32_t length() const { return RecordData.size(); }

  Kind kind() const {
    if (RecordData.size() < sizeof(RecordPrefix))
      return Kind(0);
    return static_cast<Kind>(static_cast<uint16_t>(
        reinterpret_cast<const RecordPrefix *>(RecordData.data())->RecordKind));
  }

  ArrayRef<uint8_t> data() const { return RecordData; }

  StringRef str_data() const {
    return StringRef(reinterpret_cast<const char *>(RecordData.data()),
                     RecordData.size());
  }

  ArrayRef<uint8_t> content() const {
    return RecordData.drop_front(sizeof(RecordPrefix));
  }

  ArrayRef<uint8_t> RecordData;
};

// There are two kinds of codeview records: type and symbol records.
using CVType = CVRecord<TypeLeafKind>;
using CVSymbol = CVRecord<SymbolKind>;

template <typename Record, typename Func>
Error forEachCodeViewRecord(ArrayRef<uint8_t> StreamBuffer, Func F) {
  while (!StreamBuffer.empty()) {
    if (StreamBuffer.size() < sizeof(RecordPrefix))
      return make_error<CodeViewError>(cv_error_code::corrupt_record);

    const RecordPrefix *Prefix =
        reinterpret_cast<const RecordPrefix *>(StreamBuffer.data());

    size_t RealLen = Prefix->RecordLen + 2;
    if (StreamBuffer.size() < RealLen)
      return make_error<CodeViewError>(cv_error_code::corrupt_record);

    ArrayRef<uint8_t> Data = StreamBuffer.take_front(RealLen);
    StreamBuffer = StreamBuffer.drop_front(RealLen);

    Record R(Data);
    if (auto EC = F(R))
      return EC;
  }
  return Error::success();
}

/// Read a complete record from a stream at a random offset.
template <typename Kind>
inline Expected<CVRecord<Kind>> readCVRecordFromStream(BinaryStreamRef Stream,
                                                       uint32_t Offset) {
  const RecordPrefix *Prefix = nullptr;
  BinaryStreamReader Reader(Stream);
  Reader.setOffset(Offset);

  if (auto EC = Reader.readObject(Prefix))
    return std::move(EC);
  if (Prefix->RecordLen < 2)
    return make_error<CodeViewError>(cv_error_code::corrupt_record);

  Reader.setOffset(Offset);
  ArrayRef<uint8_t> RawData;
  if (auto EC = Reader.readBytes(RawData, Prefix->RecordLen + sizeof(uint16_t)))
    return std::move(EC);
  return codeview::CVRecord<Kind>(RawData);
}

} // end namespace codeview

template <typename Kind>
struct VarStreamArrayExtractor<codeview::CVRecord<Kind>> {
  Error operator()(BinaryStreamRef Stream, uint32_t &Len,
                   codeview::CVRecord<Kind> &Item) {
    auto ExpectedRec = codeview::readCVRecordFromStream<Kind>(Stream, 0);
    if (!ExpectedRec)
      return ExpectedRec.takeError();
    Item = *ExpectedRec;
    Len = ExpectedRec->length();
    return Error::success();
  }
};

namespace codeview {
using CVSymbolArray = VarStreamArray<CVSymbol>;
using CVTypeArray = VarStreamArray<CVType>;
using CVTypeRange = iterator_range<CVTypeArray::Iterator>;
} // namespace codeview

} // end namespace llvm

#endif // LLVM_DEBUGINFO_CODEVIEW_CVRECORD_H
PKhwFZr�|�,DebugInfo/CodeView/DebugCrossImpSubsection.hnu�[���//===- DebugCrossImpSubsection.h --------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_CODEVIEW_DEBUGCROSSIMPSUBSECTION_H
#define LLVM_DEBUGINFO_CODEVIEW_DEBUGCROSSIMPSUBSECTION_H

#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/DebugInfo/CodeView/CodeView.h"
#include "llvm/DebugInfo/CodeView/DebugSubsection.h"
#include "llvm/Support/BinaryStreamArray.h"
#include "llvm/Support/BinaryStreamRef.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Error.h"
#include <cstdint>
#include <vector>

namespace llvm {
class BinaryStreamReader;
class BinaryStreamWriter;

namespace codeview {

struct CrossModuleImportItem {
  const CrossModuleImport *Header = nullptr;
  FixedStreamArray<support::ulittle32_t> Imports;
};

} // end namespace codeview

template <> struct VarStreamArrayExtractor<codeview::CrossModuleImportItem> {
public:
  using ContextType = void;

  Error operator()(BinaryStreamRef Stream, uint32_t &Len,
                   codeview::CrossModuleImportItem &Item);
};

namespace codeview {

class DebugStringTableSubsection;

class DebugCrossModuleImportsSubsectionRef final : public DebugSubsectionRef {
  using ReferenceArray = VarStreamArray<CrossModuleImportItem>;
  using Iterator = ReferenceArray::Iterator;

public:
  DebugCrossModuleImportsSubsectionRef()
      : DebugSubsectionRef(DebugSubsectionKind::CrossScopeImports) {}

  static bool classof(const DebugSubsectionRef *S) {
    return S->kind() == DebugSubsectionKind::CrossScopeImports;
  }

  Error initialize(BinaryStreamReader Reader);
  Error initialize(BinaryStreamRef Stream);

  Iterator begin() const { return References.begin(); }
  Iterator end() const { return References.end(); }

private:
  ReferenceArray References;
};

class DebugCrossModuleImportsSubsection final : public DebugSubsection {
public:
  explicit DebugCrossModuleImportsSubsection(
      DebugStringTableSubsection &Strings)
      : DebugSubsection(DebugSubsectionKind::CrossScopeImports),
        Strings(Strings) {}

  static bool classof(const DebugSubsection *S) {
    return S->kind() == DebugSubsectionKind::CrossScopeImports;
  }

  void addImport(StringRef Module, uint32_t ImportId);

  uint32_t calculateSerializedSize() const override;
  Error commit(BinaryStreamWriter &Writer) const override;

private:
  DebugStringTableSubsection &Strings;
  StringMap<std::vector<support::ulittle32_t>> Mappings;
};

} // end namespace codeview

} // end namespace llvm

#endif // LLVM_DEBUGINFO_CODEVIEW_DEBUGCROSSIMPSUBSECTION_H
PKhwFZ�xx+DebugInfo/CodeView/DebugSubsectionVisitor.hnu�[���//===- DebugSubsectionVisitor.h -----------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_CODEVIEW_DEBUGSUBSECTIONVISITOR_H
#define LLVM_DEBUGINFO_CODEVIEW_DEBUGSUBSECTIONVISITOR_H

#include "llvm/DebugInfo/CodeView/StringsAndChecksums.h"
#include "llvm/Support/Error.h"

namespace llvm {

namespace codeview {

class DebugChecksumsSubsectionRef;
class DebugSubsectionRecord;
class DebugInlineeLinesSubsectionRef;
class DebugCrossModuleExportsSubsectionRef;
class DebugCrossModuleImportsSubsectionRef;
class DebugFrameDataSubsectionRef;
class DebugLinesSubsectionRef;
class DebugStringTableSubsectionRef;
class DebugSymbolRVASubsectionRef;
class DebugSymbolsSubsectionRef;
class DebugUnknownSubsectionRef;

class DebugSubsectionVisitor {
public:
  virtual ~DebugSubsectionVisitor() = default;

  virtual Error visitUnknown(DebugUnknownSubsectionRef &Unknown) {
    return Error::success();
  }
  virtual Error visitLines(DebugLinesSubsectionRef &Lines,
                           const StringsAndChecksumsRef &State) = 0;
  virtual Error visitFileChecksums(DebugChecksumsSubsectionRef &Checksums,
                                   const StringsAndChecksumsRef &State) = 0;
  virtual Error visitInlineeLines(DebugInlineeLinesSubsectionRef &Inlinees,
                                  const StringsAndChecksumsRef &State) = 0;
  virtual Error
  visitCrossModuleExports(DebugCrossModuleExportsSubsectionRef &CSE,
                          const StringsAndChecksumsRef &State) = 0;
  virtual Error
  visitCrossModuleImports(DebugCrossModuleImportsSubsectionRef &CSE,
                          const StringsAndChecksumsRef &State) = 0;

  virtual Error visitStringTable(DebugStringTableSubsectionRef &ST,
                                 const StringsAndChecksumsRef &State) = 0;

  virtual Error visitSymbols(DebugSymbolsSubsectionRef &CSE,
                             const StringsAndChecksumsRef &State) = 0;

  virtual Error visitFrameData(DebugFrameDataSubsectionRef &FD,
                               const StringsAndChecksumsRef &State) = 0;
  virtual Error visitCOFFSymbolRVAs(DebugSymbolRVASubsectionRef &RVAs,
                                    const StringsAndChecksumsRef &State) = 0;
};

Error visitDebugSubsection(const DebugSubsectionRecord &R,
                           DebugSubsectionVisitor &V,
                           const StringsAndChecksumsRef &State);

namespace detail {
template <typename T>
Error visitDebugSubsections(T &&FragmentRange, DebugSubsectionVisitor &V,
                            StringsAndChecksumsRef &State) {
  State.initialize(std::forward<T>(FragmentRange));

  for (const DebugSubsectionRecord &L : FragmentRange) {
    if (auto EC = visitDebugSubsection(L, V, State))
      return EC;
  }
  return Error::success();
}
} // namespace detail

template <typename T>
Error visitDebugSubsections(T &&FragmentRange, DebugSubsectionVisitor &V) {
  StringsAndChecksumsRef State;
  return detail::visitDebugSubsections(std::forward<T>(FragmentRange), V,
                                       State);
}

template <typename T>
Error visitDebugSubsections(T &&FragmentRange, DebugSubsectionVisitor &V,
                            const DebugStringTableSubsectionRef &Strings) {
  StringsAndChecksumsRef State(Strings);
  return detail::visitDebugSubsections(std::forward<T>(FragmentRange), V,
                                       State);
}

template <typename T>
Error visitDebugSubsections(T &&FragmentRange, DebugSubsectionVisitor &V,
                            const DebugStringTableSubsectionRef &Strings,
                            const DebugChecksumsSubsectionRef &Checksums) {
  StringsAndChecksumsRef State(Strings, Checksums);
  return detail::visitDebugSubsections(std::forward<T>(FragmentRange), V,
                                       State);
}

} // end namespace codeview

} // end namespace llvm

#endif // LLVM_DEBUGINFO_CODEVIEW_DEBUGSUBSECTIONVISITOR_H
PKhwFZ�hU��'�'DebugInfo/CodeView/TypeIndex.hnu�[���//===- TypeIndex.h ----------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_CODEVIEW_TYPEINDEX_H
#define LLVM_DEBUGINFO_CODEVIEW_TYPEINDEX_H

#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/Support/Endian.h"
#include <cassert>
#include <cinttypes>

namespace llvm {

class ScopedPrinter;
class StringRef;

namespace codeview {

class TypeCollection;

enum class SimpleTypeKind : uint32_t {
  None = 0x0000,          // uncharacterized type (no type)
  Void = 0x0003,          // void
  NotTranslated = 0x0007, // type not translated by cvpack
  HResult = 0x0008,       // OLE/COM HRESULT

  SignedCharacter = 0x0010,   // 8 bit signed
  UnsignedCharacter = 0x0020, // 8 bit unsigned
  NarrowCharacter = 0x0070,   // really a char
  WideCharacter = 0x0071,     // wide char
  Character16 = 0x007a,       // char16_t
  Character32 = 0x007b,       // char32_t
  Character8 = 0x007c,        // char8_t

  SByte = 0x0068,       // 8 bit signed int
  Byte = 0x0069,        // 8 bit unsigned int
  Int16Short = 0x0011,  // 16 bit signed
  UInt16Short = 0x0021, // 16 bit unsigned
  Int16 = 0x0072,       // 16 bit signed int
  UInt16 = 0x0073,      // 16 bit unsigned int
  Int32Long = 0x0012,   // 32 bit signed
  UInt32Long = 0x0022,  // 32 bit unsigned
  Int32 = 0x0074,       // 32 bit signed int
  UInt32 = 0x0075,      // 32 bit unsigned int
  Int64Quad = 0x0013,   // 64 bit signed
  UInt64Quad = 0x0023,  // 64 bit unsigned
  Int64 = 0x0076,       // 64 bit signed int
  UInt64 = 0x0077,      // 64 bit unsigned int
  Int128Oct = 0x0014,   // 128 bit signed int
  UInt128Oct = 0x0024,  // 128 bit unsigned int
  Int128 = 0x0078,      // 128 bit signed int
  UInt128 = 0x0079,     // 128 bit unsigned int

  Float16 = 0x0046,                 // 16 bit real
  Float32 = 0x0040,                 // 32 bit real
  Float32PartialPrecision = 0x0045, // 32 bit PP real
  Float48 = 0x0044,                 // 48 bit real
  Float64 = 0x0041,                 // 64 bit real
  Float80 = 0x0042,                 // 80 bit real
  Float128 = 0x0043,                // 128 bit real

  Complex16 = 0x0056,                 // 16 bit complex
  Complex32 = 0x0050,                 // 32 bit complex
  Complex32PartialPrecision = 0x0055, // 32 bit PP complex
  Complex48 = 0x0054,                 // 48 bit complex
  Complex64 = 0x0051,                 // 64 bit complex
  Complex80 = 0x0052,                 // 80 bit complex
  Complex128 = 0x0053,                // 128 bit complex

  Boolean8 = 0x0030,   // 8 bit boolean
  Boolean16 = 0x0031,  // 16 bit boolean
  Boolean32 = 0x0032,  // 32 bit boolean
  Boolean64 = 0x0033,  // 64 bit boolean
  Boolean128 = 0x0034, // 128 bit boolean
};

enum class SimpleTypeMode : uint32_t {
  Direct = 0x00000000,        // Not a pointer
  NearPointer = 0x00000100,   // Near pointer
  FarPointer = 0x00000200,    // Far pointer
  HugePointer = 0x00000300,   // Huge pointer
  NearPointer32 = 0x00000400, // 32 bit near pointer
  FarPointer32 = 0x00000500,  // 32 bit far pointer
  NearPointer64 = 0x00000600, // 64 bit near pointer
  NearPointer128 = 0x00000700 // 128 bit near pointer
};

/// A 32-bit type reference. Types are indexed by their order of appearance in
/// .debug$T plus 0x1000. Type indices less than 0x1000 are "simple" types,
/// composed of a SimpleTypeMode byte followed by a SimpleTypeKind byte.
class TypeIndex {
public:
  static const uint32_t FirstNonSimpleIndex = 0x1000;
  static const uint32_t SimpleKindMask = 0x000000ff;
  static const uint32_t SimpleModeMask = 0x00000700;
  static const uint32_t DecoratedItemIdMask = 0x80000000;

public:
  TypeIndex() : Index(static_cast<uint32_t>(SimpleTypeKind::None)) {}
  explicit TypeIndex(uint32_t Index) : Index(Index) {}
  explicit TypeIndex(SimpleTypeKind Kind)
      : Index(static_cast<uint32_t>(Kind)) {}
  TypeIndex(SimpleTypeKind Kind, SimpleTypeMode Mode)
      : Index(static_cast<uint32_t>(Kind) | static_cast<uint32_t>(Mode)) {}

  uint32_t getIndex() const { return Index; }
  void setIndex(uint32_t I) { Index = I; }
  bool isSimple() const { return Index < FirstNonSimpleIndex; }
  bool isDecoratedItemId() const { return !!(Index & DecoratedItemIdMask); }

  bool isNoneType() const { return *this == None(); }

  uint32_t toArrayIndex() const {
    assert(!isSimple());
    return (getIndex() & ~DecoratedItemIdMask) - FirstNonSimpleIndex;
  }

  static TypeIndex fromArrayIndex(uint32_t Index) {
    return TypeIndex(Index + FirstNonSimpleIndex);
  }

  static TypeIndex fromDecoratedArrayIndex(bool IsItem, uint32_t Index) {
    return TypeIndex((Index + FirstNonSimpleIndex) |
                     (IsItem ? DecoratedItemIdMask : 0));
  }

  TypeIndex removeDecoration() {
    return TypeIndex(Index & ~DecoratedItemIdMask);
  }

  SimpleTypeKind getSimpleKind() const {
    assert(isSimple());
    return static_cast<SimpleTypeKind>(Index & SimpleKindMask);
  }

  SimpleTypeMode getSimpleMode() const {
    assert(isSimple());
    return static_cast<SimpleTypeMode>(Index & SimpleModeMask);
  }

  TypeIndex makeDirect() const { return TypeIndex{getSimpleKind()}; }

  static TypeIndex None() { return TypeIndex(SimpleTypeKind::None); }
  static TypeIndex Void() { return TypeIndex(SimpleTypeKind::Void); }
  static TypeIndex VoidPointer32() {
    return TypeIndex(SimpleTypeKind::Void, SimpleTypeMode::NearPointer32);
  }
  static TypeIndex VoidPointer64() {
    return TypeIndex(SimpleTypeKind::Void, SimpleTypeMode::NearPointer64);
  }

  static TypeIndex NullptrT() {
    // std::nullptr_t uses the pointer mode that doesn't indicate bit-width,
    // presumably because std::nullptr_t is intended to be compatible with any
    // pointer type.
    return TypeIndex(SimpleTypeKind::Void, SimpleTypeMode::NearPointer);
  }

  static TypeIndex SignedCharacter() {
    return TypeIndex(SimpleTypeKind::SignedCharacter);
  }
  static TypeIndex UnsignedCharacter() {
    return TypeIndex(SimpleTypeKind::UnsignedCharacter);
  }
  static TypeIndex NarrowCharacter() {
    return TypeIndex(SimpleTypeKind::NarrowCharacter);
  }
  static TypeIndex WideCharacter() {
    return TypeIndex(SimpleTypeKind::WideCharacter);
  }
  static TypeIndex Int16Short() {
    return TypeIndex(SimpleTypeKind::Int16Short);
  }
  static TypeIndex UInt16Short() {
    return TypeIndex(SimpleTypeKind::UInt16Short);
  }
  static TypeIndex Int32() { return TypeIndex(SimpleTypeKind::Int32); }
  static TypeIndex UInt32() { return TypeIndex(SimpleTypeKind::UInt32); }
  static TypeIndex Int32Long() { return TypeIndex(SimpleTypeKind::Int32Long); }
  static TypeIndex UInt32Long() {
    return TypeIndex(SimpleTypeKind::UInt32Long);
  }
  static TypeIndex Int64() { return TypeIndex(SimpleTypeKind::Int64); }
  static TypeIndex UInt64() { return TypeIndex(SimpleTypeKind::UInt64); }
  static TypeIndex Int64Quad() { return TypeIndex(SimpleTypeKind::Int64Quad); }
  static TypeIndex UInt64Quad() {
    return TypeIndex(SimpleTypeKind::UInt64Quad);
  }

  static TypeIndex Float32() { return TypeIndex(SimpleTypeKind::Float32); }
  static TypeIndex Float64() { return TypeIndex(SimpleTypeKind::Float64); }

  TypeIndex &operator+=(unsigned N) {
    Index += N;
    return *this;
  }

  TypeIndex &operator++() {
    Index += 1;
    return *this;
  }

  TypeIndex operator++(int) {
    TypeIndex Copy = *this;
    operator++();
    return Copy;
  }

  TypeIndex &operator-=(unsigned N) {
    assert(Index >= N);
    Index -= N;
    return *this;
  }

  TypeIndex &operator--() {
    Index -= 1;
    return *this;
  }

  TypeIndex operator--(int) {
    TypeIndex Copy = *this;
    operator--();
    return Copy;
  }

  friend inline bool operator==(const TypeIndex &A, const TypeIndex &B) {
    return A.getIndex() == B.getIndex();
  }

  friend inline bool operator!=(const TypeIndex &A, const TypeIndex &B) {
    return A.getIndex() != B.getIndex();
  }

  friend inline bool operator<(const TypeIndex &A, const TypeIndex &B) {
    return A.getIndex() < B.getIndex();
  }

  friend inline bool operator<=(const TypeIndex &A, const TypeIndex &B) {
    return A.getIndex() <= B.getIndex();
  }

  friend inline bool operator>(const TypeIndex &A, const TypeIndex &B) {
    return A.getIndex() > B.getIndex();
  }

  friend inline bool operator>=(const TypeIndex &A, const TypeIndex &B) {
    return A.getIndex() >= B.getIndex();
  }

  friend inline TypeIndex operator+(const TypeIndex &A, uint32_t N) {
    TypeIndex Result(A);
    Result += N;
    return Result;
  }

  friend inline TypeIndex operator-(const TypeIndex &A, uint32_t N) {
    assert(A.getIndex() >= N);
    TypeIndex Result(A);
    Result -= N;
    return Result;
  }

  friend inline uint32_t operator-(const TypeIndex &A, const TypeIndex &B) {
    assert(A >= B);
    return A.toArrayIndex() - B.toArrayIndex();
  }

  static StringRef simpleTypeName(TypeIndex TI);

private:
  support::ulittle32_t Index;
};

// Used for pseudo-indexing an array of type records.  An array of such records
// sorted by TypeIndex can allow log(N) lookups even though such a type record
// stream does not provide random access.
struct TypeIndexOffset {
  TypeIndex Type;
  support::ulittle32_t Offset;
};

void printTypeIndex(ScopedPrinter &Printer, StringRef FieldName, TypeIndex TI,
                    TypeCollection &Types);
}

template <> struct DenseMapInfo<codeview::TypeIndex> {
  static inline codeview::TypeIndex getEmptyKey() {
    return codeview::TypeIndex{DenseMapInfo<uint32_t>::getEmptyKey()};
  }
  static inline codeview::TypeIndex getTombstoneKey() {
    return codeview::TypeIndex{DenseMapInfo<uint32_t>::getTombstoneKey()};
  }
  static unsigned getHashValue(const codeview::TypeIndex &TI) {
    return DenseMapInfo<uint32_t>::getHashValue(TI.getIndex());
  }
  static bool isEqual(const codeview::TypeIndex &LHS,
                      const codeview::TypeIndex &RHS) {
    return LHS == RHS;
  }
};

} // namespace llvm

#endif
PKhwFZ���U�#�# DebugInfo/CodeView/TypeHashing.hnu�[���//===- TypeHashing.h ---------------------------------------------*- C++-*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_CODEVIEW_TYPEHASHING_H
#define LLVM_DEBUGINFO_CODEVIEW_TYPEHASHING_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/StringRef.h"

#include "llvm/DebugInfo/CodeView/CVRecord.h"
#include "llvm/DebugInfo/CodeView/TypeCollection.h"
#include "llvm/DebugInfo/CodeView/TypeIndex.h"

#include "llvm/Support/FormatProviders.h"

#include <type_traits>

namespace llvm {
class raw_ostream;
namespace codeview {

/// A locally hashed type represents a straightforward hash code of a serialized
/// record.  The record is simply serialized, and then the bytes are hashed by
/// a standard algorithm.  This is sufficient for the case of de-duplicating
/// records within a single sequence of types, because if two records both have
/// a back-reference to the same type in the same stream, they will both have
/// the same numeric value for the TypeIndex of the back reference.
struct LocallyHashedType {
  hash_code Hash;
  ArrayRef<uint8_t> RecordData;

  /// Given a type, compute its local hash.
  static LocallyHashedType hashType(ArrayRef<uint8_t> RecordData);

  /// Given a sequence of types, compute all of the local hashes.
  template <typename Range>
  static std::vector<LocallyHashedType> hashTypes(Range &&Records) {
    std::vector<LocallyHashedType> Hashes;
    Hashes.reserve(std::distance(std::begin(Records), std::end(Records)));
    for (const auto &R : Records)
      Hashes.push_back(hashType(R));

    return Hashes;
  }

  static std::vector<LocallyHashedType>
  hashTypeCollection(TypeCollection &Types) {
    std::vector<LocallyHashedType> Hashes;
    Types.ForEachRecord([&Hashes](TypeIndex TI, const CVType &Type) {
      Hashes.push_back(hashType(Type.RecordData));
    });
    return Hashes;
  }
};

enum class GlobalTypeHashAlg : uint16_t {
  SHA1 = 0, // standard 20-byte SHA1 hash
  SHA1_8,   // last 8-bytes of standard SHA1 hash
  BLAKE3,   // truncated 8-bytes BLAKE3
};

/// A globally hashed type represents a hash value that is sufficient to
/// uniquely identify a record across multiple type streams or type sequences.
/// This works by, for any given record A which references B, replacing the
/// TypeIndex that refers to B with a previously-computed global hash for B.  As
/// this is a recursive algorithm (e.g. the global hash of B also depends on the
/// global hashes of the types that B refers to), a global hash can uniquely
/// identify identify that A occurs in another stream that has a completely
/// different graph structure.  Although the hash itself is slower to compute,
/// probing is much faster with a globally hashed type, because the hash itself
/// is considered "as good as" the original type.  Since type records can be
/// quite large, this makes the equality comparison of the hash much faster than
/// equality comparison of a full record.
struct GloballyHashedType {
  GloballyHashedType() = default;
  GloballyHashedType(StringRef H)
      : GloballyHashedType(ArrayRef<uint8_t>(H.bytes_begin(), H.bytes_end())) {}
  GloballyHashedType(ArrayRef<uint8_t> H) {
    assert(H.size() == 8);
    ::memcpy(Hash.data(), H.data(), 8);
  }
  std::array<uint8_t, 8> Hash;

  bool empty() const { return *(const uint64_t*)Hash.data() == 0; }

  friend inline bool operator==(const GloballyHashedType &L,
                                const GloballyHashedType &R) {
    return L.Hash == R.Hash;
  }

  friend inline bool operator!=(const GloballyHashedType &L,
                                const GloballyHashedType &R) {
    return !(L.Hash == R.Hash);
  }

  /// Given a sequence of bytes representing a record, compute a global hash for
  /// this record.  Due to the nature of global hashes incorporating the hashes
  /// of referenced records, this function requires a list of types and ids
  /// that RecordData might reference, indexable by TypeIndex.
  static GloballyHashedType hashType(ArrayRef<uint8_t> RecordData,
                                     ArrayRef<GloballyHashedType> PreviousTypes,
                                     ArrayRef<GloballyHashedType> PreviousIds);

  /// Given a sequence of bytes representing a record, compute a global hash for
  /// this record.  Due to the nature of global hashes incorporating the hashes
  /// of referenced records, this function requires a list of types and ids
  /// that RecordData might reference, indexable by TypeIndex.
  static GloballyHashedType hashType(CVType Type,
                                     ArrayRef<GloballyHashedType> PreviousTypes,
                                     ArrayRef<GloballyHashedType> PreviousIds) {
    return hashType(Type.RecordData, PreviousTypes, PreviousIds);
  }

  /// Given a sequence of combined type and ID records, compute global hashes
  /// for each of them, returning the results in a vector of hashed types.
  template <typename Range>
  static std::vector<GloballyHashedType> hashTypes(Range &&Records) {
    std::vector<GloballyHashedType> Hashes;
    bool UnresolvedRecords = false;
    for (const auto &R : Records) {
      GloballyHashedType H = hashType(R, Hashes, Hashes);
      if (H.empty())
        UnresolvedRecords = true;
      Hashes.push_back(H);
    }

    // In some rare cases, there might be records with forward references in the
    // stream. Several passes might be needed to fully hash each record in the
    // Type stream. However this occurs on very small OBJs generated by MASM,
    // with a dozen records at most. Therefore this codepath isn't
    // time-critical, as it isn't taken in 99% of cases.
    while (UnresolvedRecords) {
      UnresolvedRecords = false;
      auto HashIt = Hashes.begin();
      for (const auto &R : Records) {
        if (HashIt->empty()) {
          GloballyHashedType H = hashType(R, Hashes, Hashes);
          if (H.empty())
            UnresolvedRecords = true;
          else
            *HashIt = H;
        }
        ++HashIt;
      }
    }

    return Hashes;
  }

  /// Given a sequence of combined type and ID records, compute global hashes
  /// for each of them, returning the results in a vector of hashed types.
  template <typename Range>
  static std::vector<GloballyHashedType>
  hashIds(Range &&Records, ArrayRef<GloballyHashedType> TypeHashes) {
    std::vector<GloballyHashedType> IdHashes;
    for (const auto &R : Records)
      IdHashes.push_back(hashType(R, TypeHashes, IdHashes));

    return IdHashes;
  }

  static std::vector<GloballyHashedType>
  hashTypeCollection(TypeCollection &Types) {
    std::vector<GloballyHashedType> Hashes;
    Types.ForEachRecord([&Hashes](TypeIndex TI, const CVType &Type) {
      Hashes.push_back(hashType(Type.RecordData, Hashes, Hashes));
    });
    return Hashes;
  }
};
static_assert(std::is_trivially_copyable<GloballyHashedType>::value,
              "GloballyHashedType must be trivially copyable so that we can "
              "reinterpret_cast arrays of hash data to arrays of "
              "GloballyHashedType");
} // namespace codeview

template <> struct DenseMapInfo<codeview::LocallyHashedType> {
  static codeview::LocallyHashedType Empty;
  static codeview::LocallyHashedType Tombstone;

  static codeview::LocallyHashedType getEmptyKey() { return Empty; }

  static codeview::LocallyHashedType getTombstoneKey() { return Tombstone; }

  static unsigned getHashValue(codeview::LocallyHashedType Val) {
    return Val.Hash;
  }

  static bool isEqual(codeview::LocallyHashedType LHS,
                      codeview::LocallyHashedType RHS) {
    if (LHS.Hash != RHS.Hash)
      return false;
    return LHS.RecordData == RHS.RecordData;
  }
};

template <> struct DenseMapInfo<codeview::GloballyHashedType> {
  static codeview::GloballyHashedType Empty;
  static codeview::GloballyHashedType Tombstone;

  static codeview::GloballyHashedType getEmptyKey() { return Empty; }

  static codeview::GloballyHashedType getTombstoneKey() { return Tombstone; }

  static unsigned getHashValue(codeview::GloballyHashedType Val) {
    return *reinterpret_cast<const unsigned *>(Val.Hash.data());
  }

  static bool isEqual(codeview::GloballyHashedType LHS,
                      codeview::GloballyHashedType RHS) {
    return LHS == RHS;
  }
};

template <> struct format_provider<codeview::LocallyHashedType> {
public:
  static void format(const codeview::LocallyHashedType &V,
                     llvm::raw_ostream &Stream, StringRef Style) {
    write_hex(Stream, V.Hash, HexPrintStyle::Upper, 8);
  }
};

template <> struct format_provider<codeview::GloballyHashedType> {
public:
  static void format(const codeview::GloballyHashedType &V,
                     llvm::raw_ostream &Stream, StringRef Style) {
    for (uint8_t B : V.Hash) {
      write_hex(Stream, B, HexPrintStyle::Upper, 2);
    }
  }
};

} // namespace llvm

#endif
PKhwFZ@^�Q��%DebugInfo/CodeView/TypeStreamMerger.hnu�[���//===- TypeStreamMerger.h ---------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_CODEVIEW_TYPESTREAMMERGER_H
#define LLVM_DEBUGINFO_CODEVIEW_TYPESTREAMMERGER_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/DebugInfo/CodeView/CVRecord.h"
#include "llvm/Support/Error.h"

namespace llvm {
template <typename T> class SmallVectorImpl;
namespace codeview {

class TypeIndex;
struct GloballyHashedType;
class GlobalTypeTableBuilder;
class MergingTypeTableBuilder;

/// Used to forward information about PCH.OBJ (precompiled) files, when
/// applicable.
struct PCHMergerInfo {
  uint32_t PCHSignature{};
  uint32_t EndPrecompIndex = ~0U;
};

/// Merge one set of type records into another.  This method assumes
/// that all records are type records, and there are no Id records present.
///
/// \param Dest The table to store the re-written type records into.
///
/// \param SourceToDest A vector, indexed by the TypeIndex in the source
/// type stream, that contains the index of the corresponding type record
/// in the destination stream.
///
/// \param Types The collection of types to merge in.
///
/// \returns Error::success() if the operation succeeded, otherwise an
/// appropriate error code.
Error mergeTypeRecords(MergingTypeTableBuilder &Dest,
                       SmallVectorImpl<TypeIndex> &SourceToDest,
                       const CVTypeArray &Types);

/// Merge one set of id records into another.  This method assumes
/// that all records are id records, and there are no Type records present.
/// However, since Id records can refer back to Type records, this method
/// assumes that the referenced type records have also been merged into
/// another type stream (for example using the above method), and accepts
/// the mapping from source to dest for that stream so that it can re-write
/// the type record mappings accordingly.
///
/// \param Dest The table to store the re-written id records into.
///
/// \param Types The mapping to use for the type records that these id
/// records refer to.
///
/// \param SourceToDest A vector, indexed by the TypeIndex in the source
/// id stream, that contains the index of the corresponding id record
/// in the destination stream.
///
/// \param Ids The collection of id records to merge in.
///
/// \returns Error::success() if the operation succeeded, otherwise an
/// appropriate error code.
Error mergeIdRecords(MergingTypeTableBuilder &Dest, ArrayRef<TypeIndex> Types,
                     SmallVectorImpl<TypeIndex> &SourceToDest,
                     const CVTypeArray &Ids);

/// Merge a unified set of type and id records, splitting them into
/// separate output streams.
///
/// \param DestIds The table to store the re-written id records into.
///
/// \param DestTypes the table to store the re-written type records into.
///
/// \param SourceToDest A vector, indexed by the TypeIndex in the source
/// id stream, that contains the index of the corresponding id record
/// in the destination stream.
///
/// \param IdsAndTypes The collection of id records to merge in.
///
/// \returns Error::success() if the operation succeeded, otherwise an
/// appropriate error code.
Error mergeTypeAndIdRecords(MergingTypeTableBuilder &DestIds,
                            MergingTypeTableBuilder &DestTypes,
                            SmallVectorImpl<TypeIndex> &SourceToDest,
                            const CVTypeArray &IdsAndTypes,
                            std::optional<PCHMergerInfo> &PCHInfo);

Error mergeTypeAndIdRecords(GlobalTypeTableBuilder &DestIds,
                            GlobalTypeTableBuilder &DestTypes,
                            SmallVectorImpl<TypeIndex> &SourceToDest,
                            const CVTypeArray &IdsAndTypes,
                            ArrayRef<GloballyHashedType> Hashes,
                            std::optional<PCHMergerInfo> &PCHInfo);

Error mergeTypeRecords(GlobalTypeTableBuilder &Dest,
                       SmallVectorImpl<TypeIndex> &SourceToDest,
                       const CVTypeArray &Types,
                       ArrayRef<GloballyHashedType> Hashes,
                       std::optional<PCHMergerInfo> &PCHInfo);

Error mergeIdRecords(GlobalTypeTableBuilder &Dest, ArrayRef<TypeIndex> Types,
                     SmallVectorImpl<TypeIndex> &SourceToDest,
                     const CVTypeArray &Ids,
                     ArrayRef<GloballyHashedType> Hashes);

} // end namespace codeview
} // end namespace llvm

#endif // LLVM_DEBUGINFO_CODEVIEW_TYPESTREAMMERGER_H
PKhwFZ!@(DebugInfo/CodeView/SymbolRecordMapping.hnu�[���//===- SymbolRecordMapping.h ------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_CODEVIEW_SYMBOLRECORDMAPPING_H
#define LLVM_DEBUGINFO_CODEVIEW_SYMBOLRECORDMAPPING_H

#include "llvm/DebugInfo/CodeView/CodeViewRecordIO.h"
#include "llvm/DebugInfo/CodeView/SymbolVisitorCallbacks.h"

namespace llvm {
class BinaryStreamReader;
class BinaryStreamWriter;

namespace codeview {
class SymbolRecordMapping : public SymbolVisitorCallbacks {
public:
  explicit SymbolRecordMapping(BinaryStreamReader &Reader,
                               CodeViewContainer Container)
      : IO(Reader), Container(Container) {}
  explicit SymbolRecordMapping(BinaryStreamWriter &Writer,
                               CodeViewContainer Container)
      : IO(Writer), Container(Container) {}

  Error visitSymbolBegin(CVSymbol &Record) override;
  Error visitSymbolEnd(CVSymbol &Record) override;

#define SYMBOL_RECORD(EnumName, EnumVal, Name)                                 \
  Error visitKnownRecord(CVSymbol &CVR, Name &Record) override;
#define SYMBOL_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
#include "llvm/DebugInfo/CodeView/CodeViewSymbols.def"

private:
  std::optional<SymbolKind> Kind;

  CodeViewRecordIO IO;
  CodeViewContainer Container;
};
}
}

#endif
PKhwFZ��<)HH0DebugInfo/CodeView/DebugInlineeLinesSubsection.hnu�[���//===- DebugInlineeLinesSubsection.h ----------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_CODEVIEW_DEBUGINLINEELINESSUBSECTION_H
#define LLVM_DEBUGINFO_CODEVIEW_DEBUGINLINEELINESSUBSECTION_H

#include "llvm/ADT/StringRef.h"
#include "llvm/DebugInfo/CodeView/CodeView.h"
#include "llvm/DebugInfo/CodeView/DebugSubsection.h"
#include "llvm/DebugInfo/CodeView/TypeIndex.h"
#include "llvm/Support/BinaryStreamArray.h"
#include "llvm/Support/BinaryStreamReader.h"
#include "llvm/Support/BinaryStreamRef.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Error.h"
#include <cstdint>
#include <vector>

namespace llvm {

namespace codeview {

class DebugChecksumsSubsection;

enum class InlineeLinesSignature : uint32_t {
  Normal,    // CV_INLINEE_SOURCE_LINE_SIGNATURE
  ExtraFiles // CV_INLINEE_SOURCE_LINE_SIGNATURE_EX
};

struct InlineeSourceLineHeader {
  TypeIndex Inlinee;                  // ID of the function that was inlined.
  support::ulittle32_t FileID;        // Offset into FileChecksums subsection.
  support::ulittle32_t SourceLineNum; // First line of inlined code.
                                      // If extra files present:
                                      //   ulittle32_t ExtraFileCount;
                                      //   ulittle32_t Files[];
};

struct InlineeSourceLine {
  const InlineeSourceLineHeader *Header;
  FixedStreamArray<support::ulittle32_t> ExtraFiles;
};

} // end namespace codeview

template <> struct VarStreamArrayExtractor<codeview::InlineeSourceLine> {
  Error operator()(BinaryStreamRef Stream, uint32_t &Len,
                   codeview::InlineeSourceLine &Item);

  bool HasExtraFiles = false;
};

namespace codeview {

class DebugInlineeLinesSubsectionRef final : public DebugSubsectionRef {
  using LinesArray = VarStreamArray<InlineeSourceLine>;
  using Iterator = LinesArray::Iterator;

public:
  DebugInlineeLinesSubsectionRef();

  static bool classof(const DebugSubsectionRef *S) {
    return S->kind() == DebugSubsectionKind::InlineeLines;
  }

  Error initialize(BinaryStreamReader Reader);
  Error initialize(BinaryStreamRef Section) {
    return initialize(BinaryStreamReader(Section));
  }

  bool valid() const { return Lines.valid(); }
  bool hasExtraFiles() const;

  Iterator begin() const { return Lines.begin(); }
  Iterator end() const { return Lines.end(); }

private:
  InlineeLinesSignature Signature;
  LinesArray Lines;
};

class DebugInlineeLinesSubsection final : public DebugSubsection {
public:
  struct Entry {
    std::vector<support::ulittle32_t> ExtraFiles;
    InlineeSourceLineHeader Header;
  };

  DebugInlineeLinesSubsection(DebugChecksumsSubsection &Checksums,
                              bool HasExtraFiles = false);

  static bool classof(const DebugSubsection *S) {
    return S->kind() == DebugSubsectionKind::InlineeLines;
  }

  Error commit(BinaryStreamWriter &Writer) const override;
  uint32_t calculateSerializedSize() const override;

  void addInlineSite(TypeIndex FuncId, StringRef FileName, uint32_t SourceLine);
  void addExtraFile(StringRef FileName);

  bool hasExtraFiles() const { return HasExtraFiles; }
  void setHasExtraFiles(bool Has) { HasExtraFiles = Has; }

  std::vector<Entry>::const_iterator begin() const { return Entries.begin(); }
  std::vector<Entry>::const_iterator end() const { return Entries.end(); }

private:
  DebugChecksumsSubsection &Checksums;
  bool HasExtraFiles = false;
  uint32_t ExtraFileCount = 0;
  std::vector<Entry> Entries;
};

} // end namespace codeview

} // end namespace llvm

#endif // LLVM_DEBUGINFO_CODEVIEW_DEBUGINLINEELINESSUBSECTION_H
PKhwFZ	Yv^		"DebugInfo/CodeView/CVTypeVisitor.hnu�[���//===- CVTypeVisitor.h ------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_CODEVIEW_CVTYPEVISITOR_H
#define LLVM_DEBUGINFO_CODEVIEW_CVTYPEVISITOR_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/DebugInfo/CodeView/CVRecord.h"
#include "llvm/DebugInfo/CodeView/CodeView.h"
#include "llvm/Support/Error.h"

namespace llvm {
namespace codeview {
class TypeIndex;
class TypeCollection;
class TypeVisitorCallbacks;
struct CVMemberRecord;

enum VisitorDataSource {
  VDS_BytesPresent, // The record bytes are passed into the visitation
                    // function.  The algorithm should first deserialize them
                    // before passing them on through the pipeline.
  VDS_BytesExternal // The record bytes are not present, and it is the
                    // responsibility of the visitor callback interface to
                    // supply the bytes.
};

Error visitTypeRecord(CVType &Record, TypeIndex Index,
                      TypeVisitorCallbacks &Callbacks,
                      VisitorDataSource Source = VDS_BytesPresent);
Error visitTypeRecord(CVType &Record, TypeVisitorCallbacks &Callbacks,
                      VisitorDataSource Source = VDS_BytesPresent);

Error visitMemberRecord(CVMemberRecord Record, TypeVisitorCallbacks &Callbacks,
                        VisitorDataSource Source = VDS_BytesPresent);
Error visitMemberRecord(TypeLeafKind Kind, ArrayRef<uint8_t> Record,
                        TypeVisitorCallbacks &Callbacks);

Error visitMemberRecordStream(ArrayRef<uint8_t> FieldList,
                              TypeVisitorCallbacks &Callbacks);

Error visitTypeStream(const CVTypeArray &Types, TypeVisitorCallbacks &Callbacks,
                      VisitorDataSource Source = VDS_BytesPresent);
Error visitTypeStream(CVTypeRange Types, TypeVisitorCallbacks &Callbacks);
Error visitTypeStream(TypeCollection &Types, TypeVisitorCallbacks &Callbacks);

} // end namespace codeview
} // end namespace llvm

#endif // LLVM_DEBUGINFO_CODEVIEW_CVTYPEVISITOR_H
PKhwFZ�P���)DebugInfo/CodeView/SimpleTypeSerializer.hnu�[���//===- SimpleTypeSerializer.h -----------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_CODEVIEW_SIMPLETYPESERIALIZER_H
#define LLVM_DEBUGINFO_CODEVIEW_SIMPLETYPESERIALIZER_H

#include "llvm/ADT/ArrayRef.h"
#include <vector>

namespace llvm {
namespace codeview {
class FieldListRecord;

class SimpleTypeSerializer {
  std::vector<uint8_t> ScratchBuffer;

public:
  SimpleTypeSerializer();
  ~SimpleTypeSerializer();

  // This template is explicitly instantiated in the implementation file for all
  // supported types.  The method itself is ugly, so inlining it into the header
  // file clutters an otherwise straightforward interface.
  template <typename T> ArrayRef<uint8_t> serialize(T &Record);

  // Don't allow serialization of field list records using this interface.
  ArrayRef<uint8_t> serialize(const FieldListRecord &Record) = delete;
};

} // end namespace codeview
} // end namespace llvm

#endif // LLVM_DEBUGINFO_CODEVIEW_SIMPLETYPESERIALIZER_H
PKhwFZ����MM-DebugInfo/CodeView/DebugSymbolRVASubsection.hnu�[���//===- DebugSymbolRVASubsection.h -------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_CODEVIEW_DEBUGSYMBOLRVASUBSECTION_H
#define LLVM_DEBUGINFO_CODEVIEW_DEBUGSYMBOLRVASUBSECTION_H

#include "llvm/DebugInfo/CodeView/CodeView.h"
#include "llvm/DebugInfo/CodeView/DebugSubsection.h"
#include "llvm/Support/BinaryStreamArray.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Error.h"
#include <cstdint>
#include <vector>

namespace llvm {

class BinaryStreamReader;

namespace codeview {

class DebugSymbolRVASubsectionRef final : public DebugSubsectionRef {
public:
  using ArrayType = FixedStreamArray<support::ulittle32_t>;

  DebugSymbolRVASubsectionRef();

  static bool classof(const DebugSubsectionRef *S) {
    return S->kind() == DebugSubsectionKind::CoffSymbolRVA;
  }

  ArrayType::Iterator begin() const { return RVAs.begin(); }
  ArrayType::Iterator end() const { return RVAs.end(); }

  Error initialize(BinaryStreamReader &Reader);

private:
  ArrayType RVAs;
};

class DebugSymbolRVASubsection final : public DebugSubsection {
public:
  DebugSymbolRVASubsection();

  static bool classof(const DebugSubsection *S) {
    return S->kind() == DebugSubsectionKind::CoffSymbolRVA;
  }

  Error commit(BinaryStreamWriter &Writer) const override;
  uint32_t calculateSerializedSize() const override;

  void addRVA(uint32_t RVA) { RVAs.push_back(support::ulittle32_t(RVA)); }

private:
  std::vector<support::ulittle32_t> RVAs;
};

} // end namespace codeview

} // end namespace llvm

#endif // LLVM_DEBUGINFO_CODEVIEW_DEBUGSYMBOLRVASUBSECTION_H
PKhwFZ���I&I&&DebugInfo/CodeView/CodeViewSymbols.defnu�[���//===-- CodeViewSymbols.def - All CodeView leaf types -----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// See LEAF_ENUM_e in cvinfo.h. This should match the constants there.
//
//===----------------------------------------------------------------------===//

#ifndef CV_SYMBOL
#define CV_SYMBOL(ename, value)
#endif

#ifndef SYMBOL_RECORD
#define SYMBOL_RECORD(lf_ename, value, name) CV_SYMBOL(lf_ename, value)
#endif

#ifndef SYMBOL_RECORD_ALIAS
#define SYMBOL_RECORD_ALIAS(lf_ename, value, name, alias_name)                   \
  SYMBOL_RECORD(lf_ename, value, name)
#endif

// 16 bit symbol types. Not very useful, provided only for reference.
CV_SYMBOL(S_COMPILE       , 0x0001)
CV_SYMBOL(S_REGISTER_16t  , 0x0002)
CV_SYMBOL(S_CONSTANT_16t  , 0x0003)
CV_SYMBOL(S_UDT_16t       , 0x0004)
CV_SYMBOL(S_SSEARCH       , 0x0005)
CV_SYMBOL(S_SKIP          , 0x0007)
CV_SYMBOL(S_CVRESERVE     , 0x0008)
CV_SYMBOL(S_OBJNAME_ST    , 0x0009)
CV_SYMBOL(S_ENDARG        , 0x000a)
CV_SYMBOL(S_COBOLUDT_16t  , 0x000b)
CV_SYMBOL(S_MANYREG_16t   , 0x000c)
CV_SYMBOL(S_RETURN        , 0x000d)
CV_SYMBOL(S_ENTRYTHIS     , 0x000e)
CV_SYMBOL(S_BPREL16       , 0x0100)
CV_SYMBOL(S_LDATA16       , 0x0101)
CV_SYMBOL(S_GDATA16       , 0x0102)
CV_SYMBOL(S_PUB16         , 0x0103)
CV_SYMBOL(S_LPROC16       , 0x0104)
CV_SYMBOL(S_GPROC16       , 0x0105)
CV_SYMBOL(S_THUNK16       , 0x0106)
CV_SYMBOL(S_BLOCK16       , 0x0107)
CV_SYMBOL(S_WITH16        , 0x0108)
CV_SYMBOL(S_LABEL16       , 0x0109)
CV_SYMBOL(S_CEXMODEL16    , 0x010a)
CV_SYMBOL(S_VFTABLE16     , 0x010b)
CV_SYMBOL(S_REGREL16      , 0x010c)
CV_SYMBOL(S_BPREL32_16t   , 0x0200)
CV_SYMBOL(S_LDATA32_16t   , 0x0201)
CV_SYMBOL(S_GDATA32_16t   , 0x0202)
CV_SYMBOL(S_PUB32_16t     , 0x0203)
CV_SYMBOL(S_LPROC32_16t   , 0x0204)
CV_SYMBOL(S_GPROC32_16t   , 0x0205)
CV_SYMBOL(S_THUNK32_ST    , 0x0206)
CV_SYMBOL(S_BLOCK32_ST    , 0x0207)
CV_SYMBOL(S_WITH32_ST     , 0x0208)
CV_SYMBOL(S_LABEL32_ST    , 0x0209)
CV_SYMBOL(S_CEXMODEL32    , 0x020a)
CV_SYMBOL(S_VFTABLE32_16t , 0x020b)
CV_SYMBOL(S_REGREL32_16t  , 0x020c)
CV_SYMBOL(S_LTHREAD32_16t , 0x020d)
CV_SYMBOL(S_GTHREAD32_16t , 0x020e)
CV_SYMBOL(S_SLINK32       , 0x020f)
CV_SYMBOL(S_LPROCMIPS_16t , 0x0300)
CV_SYMBOL(S_GPROCMIPS_16t , 0x0301)
CV_SYMBOL(S_PROCREF_ST    , 0x0400)
CV_SYMBOL(S_DATAREF_ST    , 0x0401)
CV_SYMBOL(S_ALIGN         , 0x0402)
CV_SYMBOL(S_LPROCREF_ST   , 0x0403)
CV_SYMBOL(S_OEM           , 0x0404)

// All post 16 bit symbol types have the 0x1000 bit set.
CV_SYMBOL(S_TI16_MAX      , 0x1000)

// Mostly unused "start" symbol types.
CV_SYMBOL(S_REGISTER_ST   , 0x1001)
CV_SYMBOL(S_CONSTANT_ST   , 0x1002)
CV_SYMBOL(S_UDT_ST        , 0x1003)
CV_SYMBOL(S_COBOLUDT_ST   , 0x1004)
CV_SYMBOL(S_MANYREG_ST    , 0x1005)
CV_SYMBOL(S_BPREL32_ST    , 0x1006)
CV_SYMBOL(S_LDATA32_ST    , 0x1007)
CV_SYMBOL(S_GDATA32_ST    , 0x1008)
CV_SYMBOL(S_PUB32_ST      , 0x1009)
CV_SYMBOL(S_LPROC32_ST    , 0x100a)
CV_SYMBOL(S_GPROC32_ST    , 0x100b)
CV_SYMBOL(S_VFTABLE32     , 0x100c)
CV_SYMBOL(S_REGREL32_ST   , 0x100d)
CV_SYMBOL(S_LTHREAD32_ST  , 0x100e)
CV_SYMBOL(S_GTHREAD32_ST  , 0x100f)
CV_SYMBOL(S_LPROCMIPS_ST  , 0x1010)
CV_SYMBOL(S_GPROCMIPS_ST  , 0x1011)

CV_SYMBOL(S_COMPILE2_ST   , 0x1013)
CV_SYMBOL(S_MANYREG2_ST   , 0x1014)
CV_SYMBOL(S_LPROCIA64_ST  , 0x1015)
CV_SYMBOL(S_GPROCIA64_ST  , 0x1016)
CV_SYMBOL(S_LOCALSLOT_ST  , 0x1017)
CV_SYMBOL(S_PARAMSLOT_ST  , 0x1018)
CV_SYMBOL(S_GMANPROC_ST   , 0x101a)
CV_SYMBOL(S_LMANPROC_ST   , 0x101b)
CV_SYMBOL(S_RESERVED1     , 0x101c)
CV_SYMBOL(S_RESERVED2     , 0x101d)
CV_SYMBOL(S_RESERVED3     , 0x101e)
CV_SYMBOL(S_RESERVED4     , 0x101f)
CV_SYMBOL(S_LMANDATA_ST   , 0x1020)
CV_SYMBOL(S_GMANDATA_ST   , 0x1021)
CV_SYMBOL(S_MANFRAMEREL_ST, 0x1022)
CV_SYMBOL(S_MANREGISTER_ST, 0x1023)
CV_SYMBOL(S_MANSLOT_ST    , 0x1024)
CV_SYMBOL(S_MANMANYREG_ST , 0x1025)
CV_SYMBOL(S_MANREGREL_ST  , 0x1026)
CV_SYMBOL(S_MANMANYREG2_ST, 0x1027)
CV_SYMBOL(S_MANTYPREF     , 0x1028)
CV_SYMBOL(S_UNAMESPACE_ST , 0x1029)

// End of S_*_ST symbols, which do not appear to be generated by modern
// compilers.
CV_SYMBOL(S_ST_MAX        , 0x1100)


CV_SYMBOL(S_WITH32        , 0x1104)
CV_SYMBOL(S_MANYREG       , 0x110a)
CV_SYMBOL(S_LPROCMIPS     , 0x1114)
CV_SYMBOL(S_GPROCMIPS     , 0x1115)
CV_SYMBOL(S_MANYREG2      , 0x1117)
CV_SYMBOL(S_LPROCIA64     , 0x1118)
CV_SYMBOL(S_GPROCIA64     , 0x1119)
CV_SYMBOL(S_LOCALSLOT     , 0x111a)
CV_SYMBOL(S_PARAMSLOT     , 0x111b)

// Managed code symbols.
CV_SYMBOL(S_MANFRAMEREL   , 0x111e)
CV_SYMBOL(S_MANREGISTER   , 0x111f)
CV_SYMBOL(S_MANSLOT       , 0x1120)
CV_SYMBOL(S_MANMANYREG    , 0x1121)
CV_SYMBOL(S_MANREGREL     , 0x1122)
CV_SYMBOL(S_MANMANYREG2   , 0x1123)
CV_SYMBOL(S_DATAREF       , 0x1126)
CV_SYMBOL(S_ANNOTATIONREF , 0x1128)
CV_SYMBOL(S_TOKENREF      , 0x1129)
CV_SYMBOL(S_GMANPROC      , 0x112a)
CV_SYMBOL(S_LMANPROC      , 0x112b)
CV_SYMBOL(S_ATTR_FRAMEREL , 0x112e)
CV_SYMBOL(S_ATTR_REGISTER , 0x112f)
CV_SYMBOL(S_ATTR_REGREL   , 0x1130)
CV_SYMBOL(S_ATTR_MANYREG  , 0x1131)


CV_SYMBOL(S_SEPCODE       , 0x1132)
CV_SYMBOL(S_LOCAL_2005    , 0x1133)
CV_SYMBOL(S_DEFRANGE_2005 , 0x1134)
CV_SYMBOL(S_DEFRANGE2_2005, 0x1135)
CV_SYMBOL(S_DISCARDED     , 0x113b)

// Current symbol types for most procedures as of this writing.
CV_SYMBOL(S_LPROCMIPS_ID   , 0x1148)
CV_SYMBOL(S_GPROCMIPS_ID   , 0x1149)
CV_SYMBOL(S_LPROCIA64_ID   , 0x114a)
CV_SYMBOL(S_GPROCIA64_ID   , 0x114b)

CV_SYMBOL(S_DEFRANGE_HLSL  , 0x1150)
CV_SYMBOL(S_GDATA_HLSL     , 0x1151)
CV_SYMBOL(S_LDATA_HLSL     , 0x1152)
CV_SYMBOL(S_LOCAL_DPC_GROUPSHARED, 0x1154)
CV_SYMBOL(S_DEFRANGE_DPC_PTR_TAG, 0x1157)
CV_SYMBOL(S_DPC_SYM_TAG_MAP, 0x1158)
CV_SYMBOL(S_ARMSWITCHTABLE , 0x1159)
CV_SYMBOL(S_POGODATA       , 0x115c)
CV_SYMBOL(S_INLINESITE2    , 0x115d)
CV_SYMBOL(S_MOD_TYPEREF    , 0x115f)
CV_SYMBOL(S_REF_MINIPDB    , 0x1160)
CV_SYMBOL(S_PDBMAP         , 0x1161)
CV_SYMBOL(S_GDATA_HLSL32   , 0x1162)
CV_SYMBOL(S_LDATA_HLSL32   , 0x1163)
CV_SYMBOL(S_GDATA_HLSL32_EX, 0x1164)
CV_SYMBOL(S_LDATA_HLSL32_EX, 0x1165)

CV_SYMBOL(S_FASTLINK, 0x1167) // Undocumented
SYMBOL_RECORD_ALIAS(S_INLINEES, 0x1168, InlineesSym, CallerSym) // Undocumented

// Known symbol types
SYMBOL_RECORD(S_END                  , 0x0006, ScopeEndSym)
SYMBOL_RECORD_ALIAS(S_INLINESITE_END , 0x114e, InlineSiteEnd, ScopeEndSym)
SYMBOL_RECORD_ALIAS(S_PROC_ID_END    , 0x114f, ProcEnd, ScopeEndSym)

SYMBOL_RECORD(S_THUNK32       , 0x1102, Thunk32Sym)
SYMBOL_RECORD(S_TRAMPOLINE    , 0x112c, TrampolineSym)
SYMBOL_RECORD(S_SECTION       , 0x1136, SectionSym)
SYMBOL_RECORD(S_COFFGROUP     , 0x1137, CoffGroupSym)
SYMBOL_RECORD(S_EXPORT        , 0x1138, ExportSym)

SYMBOL_RECORD(S_LPROC32       , 0x110f, ProcSym)
SYMBOL_RECORD_ALIAS(S_GPROC32       , 0x1110, GlobalProcSym, ProcSym)
SYMBOL_RECORD_ALIAS(S_LPROC32_ID     , 0x1146, ProcIdSym, ProcSym)
SYMBOL_RECORD_ALIAS(S_GPROC32_ID     , 0x1147, GlobalProcIdSym, ProcSym)
SYMBOL_RECORD_ALIAS(S_LPROC32_DPC    , 0x1155, DPCProcSym, ProcSym)
SYMBOL_RECORD_ALIAS(S_LPROC32_DPC_ID , 0x1156, DPCProcIdSym, ProcSym)

SYMBOL_RECORD(S_REGISTER      , 0x1106, RegisterSym)
SYMBOL_RECORD(S_PUB32         , 0x110e, PublicSym32)

SYMBOL_RECORD(S_PROCREF       , 0x1125, ProcRefSym)
SYMBOL_RECORD_ALIAS(S_LPROCREF, 0x1127, LocalProcRef, ProcRefSym)


SYMBOL_RECORD(S_ENVBLOCK      , 0x113d, EnvBlockSym)

SYMBOL_RECORD(S_INLINESITE     , 0x114d, InlineSiteSym)
SYMBOL_RECORD(S_LOCAL         , 0x113e, LocalSym)
SYMBOL_RECORD(S_DEFRANGE      , 0x113f, DefRangeSym)
SYMBOL_RECORD(S_DEFRANGE_SUBFIELD, 0x1140, DefRangeSubfieldSym)
SYMBOL_RECORD(S_DEFRANGE_REGISTER, 0x1141, DefRangeRegisterSym)
SYMBOL_RECORD(S_DEFRANGE_FRAMEPOINTER_REL, 0x1142, DefRangeFramePointerRelSym)
SYMBOL_RECORD(S_DEFRANGE_SUBFIELD_REGISTER, 0x1143, DefRangeSubfieldRegisterSym)
SYMBOL_RECORD(S_DEFRANGE_FRAMEPOINTER_REL_FULL_SCOPE, 0x1144, DefRangeFramePointerRelFullScopeSym)
SYMBOL_RECORD(S_DEFRANGE_REGISTER_REL, 0x1145, DefRangeRegisterRelSym)
SYMBOL_RECORD(S_BLOCK32       , 0x1103, BlockSym)
SYMBOL_RECORD(S_LABEL32       , 0x1105, LabelSym)
SYMBOL_RECORD(S_OBJNAME       , 0x1101, ObjNameSym)
SYMBOL_RECORD(S_COMPILE2      , 0x1116, Compile2Sym)
SYMBOL_RECORD(S_COMPILE3      , 0x113c, Compile3Sym)
SYMBOL_RECORD(S_FRAMEPROC     , 0x1012, FrameProcSym)
SYMBOL_RECORD(S_CALLSITEINFO  , 0x1139, CallSiteInfoSym)
SYMBOL_RECORD(S_FILESTATIC     , 0x1153, FileStaticSym)
SYMBOL_RECORD(S_HEAPALLOCSITE  , 0x115e, HeapAllocationSiteSym)
SYMBOL_RECORD(S_FRAMECOOKIE   , 0x113a, FrameCookieSym)

SYMBOL_RECORD(S_CALLEES        , 0x115a, CallerSym)
SYMBOL_RECORD_ALIAS(S_CALLERS, 0x115b, CalleeSym, CallerSym)

SYMBOL_RECORD(S_UDT           , 0x1108, UDTSym)
SYMBOL_RECORD_ALIAS(S_COBOLUDT      , 0x1109, CobolUDT, UDTSym)

SYMBOL_RECORD(S_BUILDINFO      , 0x114c, BuildInfoSym)
SYMBOL_RECORD(S_BPREL32       , 0x110b, BPRelativeSym)
SYMBOL_RECORD(S_REGREL32      , 0x1111, RegRelativeSym)

SYMBOL_RECORD(S_CONSTANT      , 0x1107, ConstantSym)
SYMBOL_RECORD_ALIAS(S_MANCONSTANT   , 0x112d, ManagedConstant, ConstantSym)

SYMBOL_RECORD(S_LDATA32       , 0x110c, DataSym)
SYMBOL_RECORD_ALIAS(S_GDATA32       , 0x110d, GlobalData, DataSym)
SYMBOL_RECORD_ALIAS(S_LMANDATA      , 0x111c, ManagedLocalData, DataSym)
SYMBOL_RECORD_ALIAS(S_GMANDATA      , 0x111d, ManagedGlobalData, DataSym)

SYMBOL_RECORD(S_LTHREAD32     , 0x1112, ThreadLocalDataSym)
SYMBOL_RECORD_ALIAS(S_GTHREAD32     , 0x1113, GlobalTLS, ThreadLocalDataSym)

SYMBOL_RECORD(S_UNAMESPACE    , 0x1124, UsingNamespaceSym)
SYMBOL_RECORD(S_ANNOTATION    , 0x1019, AnnotationSym)

#undef CV_SYMBOL
#undef SYMBOL_RECORD
#undef SYMBOL_RECORD_ALIAS
PKhwFZ���%%%DebugInfo/CodeView/CodeViewRecordIO.hnu�[���//===- CodeViewRecordIO.h ---------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_CODEVIEW_CODEVIEWRECORDIO_H
#define LLVM_DEBUGINFO_CODEVIEW_CODEVIEWRECORDIO_H

#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/DebugInfo/CodeView/CodeViewError.h"
#include "llvm/Support/BinaryStreamReader.h"
#include "llvm/Support/BinaryStreamWriter.h"
#include "llvm/Support/Error.h"
#include <cassert>
#include <cstdint>
#include <type_traits>

namespace llvm {

template <typename T> class ArrayRef;
class APSInt;

namespace codeview {
class TypeIndex;
struct GUID;

class CodeViewRecordStreamer {
public:
  virtual void emitBytes(StringRef Data) = 0;
  virtual void emitIntValue(uint64_t Value, unsigned Size) = 0;
  virtual void emitBinaryData(StringRef Data) = 0;
  virtual void AddComment(const Twine &T) = 0;
  virtual void AddRawComment(const Twine &T) = 0;
  virtual bool isVerboseAsm() = 0;
  virtual std::string getTypeName(TypeIndex TI) = 0;
  virtual ~CodeViewRecordStreamer() = default;
};

class CodeViewRecordIO {
  uint32_t getCurrentOffset() const {
    if (isWriting())
      return Writer->getOffset();
    else if (isReading())
      return Reader->getOffset();
    else
      return 0;
  }

public:
  // deserializes records to structures
  explicit CodeViewRecordIO(BinaryStreamReader &Reader) : Reader(&Reader) {}

  // serializes records to buffer
  explicit CodeViewRecordIO(BinaryStreamWriter &Writer) : Writer(&Writer) {}

  // writes records to assembly file using MC library interface
  explicit CodeViewRecordIO(CodeViewRecordStreamer &Streamer)
      : Streamer(&Streamer) {}

  Error beginRecord(std::optional<uint32_t> MaxLength);
  Error endRecord();

  Error mapInteger(TypeIndex &TypeInd, const Twine &Comment = "");

  bool isStreaming() const {
    return (Streamer != nullptr) && (Reader == nullptr) && (Writer == nullptr);
  }
  bool isReading() const {
    return (Reader != nullptr) && (Streamer == nullptr) && (Writer == nullptr);
  }
  bool isWriting() const {
    return (Writer != nullptr) && (Streamer == nullptr) && (Reader == nullptr);
  }

  uint32_t maxFieldLength() const;

  template <typename T> Error mapObject(T &Value) {
    if (isStreaming()) {
      StringRef BytesSR =
          StringRef((reinterpret_cast<const char *>(&Value)), sizeof(Value));
      Streamer->emitBytes(BytesSR);
      incrStreamedLen(sizeof(T));
      return Error::success();
    }

    if (isWriting())
      return Writer->writeObject(Value);

    const T *ValuePtr;
    if (auto EC = Reader->readObject(ValuePtr))
      return EC;
    Value = *ValuePtr;
    return Error::success();
  }

  template <typename T> Error mapInteger(T &Value, const Twine &Comment = "") {
    if (isStreaming()) {
      emitComment(Comment);
      Streamer->emitIntValue((int)Value, sizeof(T));
      incrStreamedLen(sizeof(T));
      return Error::success();
    }

    if (isWriting())
      return Writer->writeInteger(Value);

    return Reader->readInteger(Value);
  }

  template <typename T> Error mapEnum(T &Value, const Twine &Comment = "") {
    if (!isStreaming() && sizeof(Value) > maxFieldLength())
      return make_error<CodeViewError>(cv_error_code::insufficient_buffer);

    using U = std::underlying_type_t<T>;
    U X;

    if (isWriting() || isStreaming())
      X = static_cast<U>(Value);

    if (auto EC = mapInteger(X, Comment))
      return EC;

    if (isReading())
      Value = static_cast<T>(X);

    return Error::success();
  }

  Error mapEncodedInteger(int64_t &Value, const Twine &Comment = "");
  Error mapEncodedInteger(uint64_t &Value, const Twine &Comment = "");
  Error mapEncodedInteger(APSInt &Value, const Twine &Comment = "");
  Error mapStringZ(StringRef &Value, const Twine &Comment = "");
  Error mapGuid(GUID &Guid, const Twine &Comment = "");

  Error mapStringZVectorZ(std::vector<StringRef> &Value,
                          const Twine &Comment = "");

  template <typename SizeType, typename T, typename ElementMapper>
  Error mapVectorN(T &Items, const ElementMapper &Mapper,
                   const Twine &Comment = "") {
    SizeType Size;
    if (isStreaming()) {
      Size = static_cast<SizeType>(Items.size());
      emitComment(Comment);
      Streamer->emitIntValue(Size, sizeof(Size));
      incrStreamedLen(sizeof(Size)); // add 1 for the delimiter

      for (auto &X : Items) {
        if (auto EC = Mapper(*this, X))
          return EC;
      }
    } else if (isWriting()) {
      Size = static_cast<SizeType>(Items.size());
      if (auto EC = Writer->writeInteger(Size))
        return EC;

      for (auto &X : Items) {
        if (auto EC = Mapper(*this, X))
          return EC;
      }
    } else {
      if (auto EC = Reader->readInteger(Size))
        return EC;
      for (SizeType I = 0; I < Size; ++I) {
        typename T::value_type Item;
        if (auto EC = Mapper(*this, Item))
          return EC;
        Items.push_back(Item);
      }
    }

    return Error::success();
  }

  template <typename T, typename ElementMapper>
  Error mapVectorTail(T &Items, const ElementMapper &Mapper,
                      const Twine &Comment = "") {
    emitComment(Comment);
    if (isStreaming() || isWriting()) {
      for (auto &Item : Items) {
        if (auto EC = Mapper(*this, Item))
          return EC;
      }
    } else {
      typename T::value_type Field;
      // Stop when we run out of bytes or we hit record padding bytes.
      while (!Reader->empty() && Reader->peek() < 0xf0 /* LF_PAD0 */) {
        if (auto EC = Mapper(*this, Field))
          return EC;
        Items.push_back(Field);
      }
    }
    return Error::success();
  }

  Error mapByteVectorTail(ArrayRef<uint8_t> &Bytes, const Twine &Comment = "");
  Error mapByteVectorTail(std::vector<uint8_t> &Bytes,
                          const Twine &Comment = "");

  Error padToAlignment(uint32_t Align);
  Error skipPadding();

  uint64_t getStreamedLen() {
    if (isStreaming())
      return StreamedLen;
    return 0;
  }

  void emitRawComment(const Twine &T) {
    if (isStreaming() && Streamer->isVerboseAsm())
      Streamer->AddRawComment(T);
  }

private:
  void emitEncodedSignedInteger(const int64_t &Value,
                                const Twine &Comment = "");
  void emitEncodedUnsignedInteger(const uint64_t &Value,
                                  const Twine &Comment = "");
  Error writeEncodedSignedInteger(const int64_t &Value);
  Error writeEncodedUnsignedInteger(const uint64_t &Value);

  void incrStreamedLen(const uint64_t &Len) {
    if (isStreaming())
      StreamedLen += Len;
  }

  void resetStreamedLen() {
    if (isStreaming())
      StreamedLen = 4; // The record prefix is 4 bytes long
  }

  void emitComment(const Twine &Comment) {
    if (isStreaming() && Streamer->isVerboseAsm()) {
      Twine TComment(Comment);
      if (!TComment.isTriviallyEmpty())
        Streamer->AddComment(TComment);
    }
  }

  struct RecordLimit {
    uint32_t BeginOffset;
    std::optional<uint32_t> MaxLength;

    std::optional<uint32_t> bytesRemaining(uint32_t CurrentOffset) const {
      if (!MaxLength)
        return std::nullopt;
      assert(CurrentOffset >= BeginOffset);

      uint32_t BytesUsed = CurrentOffset - BeginOffset;
      if (BytesUsed >= *MaxLength)
        return 0;
      return *MaxLength - BytesUsed;
    }
  };

  SmallVector<RecordLimit, 2> Limits;

  BinaryStreamReader *Reader = nullptr;
  BinaryStreamWriter *Writer = nullptr;
  CodeViewRecordStreamer *Streamer = nullptr;
  uint64_t StreamedLen = 0;
};

} // end namespace codeview
} // end namespace llvm

#endif // LLVM_DEBUGINFO_CODEVIEW_CODEVIEWRECORDIO_H
PKhwFZF�J�(((DebugInfo/CodeView/RecordSerialization.hnu�[���//===- RecordSerialization.h ------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_CODEVIEW_RECORDSERIALIZATION_H
#define LLVM_DEBUGINFO_CODEVIEW_RECORDSERIALIZATION_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/DebugInfo/CodeView/CodeView.h"
#include "llvm/DebugInfo/CodeView/CodeViewError.h"
#include "llvm/Support/BinaryStreamReader.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Error.h"
#include <cinttypes>

namespace llvm {
class APSInt;
namespace codeview {
using llvm::support::little32_t;
using llvm::support::ulittle16_t;
using llvm::support::ulittle32_t;

/// Limit on the size of all codeview symbol and type records, including the
/// RecordPrefix. MSVC does not emit any records larger than this.
enum : unsigned { MaxRecordLength = 0xFF00 };

struct RecordPrefix {
  RecordPrefix() = default;
  explicit RecordPrefix(uint16_t Kind) : RecordLen(2), RecordKind(Kind) {}

  ulittle16_t RecordLen;  // Record length, starting from &RecordKind.
  ulittle16_t RecordKind; // Record kind enum (SymRecordKind or TypeRecordKind)
};

/// Reinterpret a byte array as an array of characters. Does not interpret as
/// a C string, as StringRef has several helpers (split) that make that easy.
StringRef getBytesAsCharacters(ArrayRef<uint8_t> LeafData);
StringRef getBytesAsCString(ArrayRef<uint8_t> LeafData);

inline Error consume(BinaryStreamReader &Reader) { return Error::success(); }

/// Decodes a numeric "leaf" value. These are integer literals encountered in
/// the type stream. If the value is positive and less than LF_NUMERIC (1 <<
/// 15), it is emitted directly in Data. Otherwise, it has a tag like LF_CHAR
/// that indicates the bitwidth and sign of the numeric data.
Error consume(BinaryStreamReader &Reader, APSInt &Num);

/// Decodes a numeric leaf value that is known to be a particular type.
Error consume_numeric(BinaryStreamReader &Reader, uint64_t &Value);

/// Decodes signed and unsigned fixed-length integers.
Error consume(BinaryStreamReader &Reader, uint32_t &Item);
Error consume(BinaryStreamReader &Reader, int32_t &Item);

/// Decodes a null terminated string.
Error consume(BinaryStreamReader &Reader, StringRef &Item);

Error consume(StringRef &Data, APSInt &Num);
Error consume(StringRef &Data, uint32_t &Item);

/// Decodes an arbitrary object whose layout matches that of the underlying
/// byte sequence, and returns a pointer to the object.
template <typename T> Error consume(BinaryStreamReader &Reader, T *&Item) {
  return Reader.readObject(Item);
}

template <typename T, typename U> struct serialize_conditional_impl {
  serialize_conditional_impl(T &Item, U Func) : Item(Item), Func(Func) {}

  Error deserialize(BinaryStreamReader &Reader) const {
    if (!Func())
      return Error::success();
    return consume(Reader, Item);
  }

  T &Item;
  U Func;
};

template <typename T, typename U>
serialize_conditional_impl<T, U> serialize_conditional(T &Item, U Func) {
  return serialize_conditional_impl<T, U>(Item, Func);
}

template <typename T, typename U> struct serialize_array_impl {
  serialize_array_impl(ArrayRef<T> &Item, U Func) : Item(Item), Func(Func) {}

  Error deserialize(BinaryStreamReader &Reader) const {
    return Reader.readArray(Item, Func());
  }

  ArrayRef<T> &Item;
  U Func;
};

template <typename T> struct serialize_vector_tail_impl {
  serialize_vector_tail_impl(std::vector<T> &Item) : Item(Item) {}

  Error deserialize(BinaryStreamReader &Reader) const {
    T Field;
    // Stop when we run out of bytes or we hit record padding bytes.
    while (!Reader.empty() && Reader.peek() < LF_PAD0) {
      if (auto EC = consume(Reader, Field))
        return EC;
      Item.push_back(Field);
    }
    return Error::success();
  }

  std::vector<T> &Item;
};

struct serialize_null_term_string_array_impl {
  serialize_null_term_string_array_impl(std::vector<StringRef> &Item)
      : Item(Item) {}

  Error deserialize(BinaryStreamReader &Reader) const {
    if (Reader.empty())
      return make_error<CodeViewError>(cv_error_code::insufficient_buffer,
                                       "Null terminated string is empty!");

    while (Reader.peek() != 0) {
      StringRef Field;
      if (auto EC = Reader.readCString(Field))
        return EC;
      Item.push_back(Field);
    }
    return Reader.skip(1);
  }

  std::vector<StringRef> &Item;
};

template <typename T> struct serialize_arrayref_tail_impl {
  serialize_arrayref_tail_impl(ArrayRef<T> &Item) : Item(Item) {}

  Error deserialize(BinaryStreamReader &Reader) const {
    uint32_t Count = Reader.bytesRemaining() / sizeof(T);
    return Reader.readArray(Item, Count);
  }

  ArrayRef<T> &Item;
};

template <typename T> struct serialize_numeric_impl {
  serialize_numeric_impl(T &Item) : Item(Item) {}

  Error deserialize(BinaryStreamReader &Reader) const {
    return consume_numeric(Reader, Item);
  }

  T &Item;
};

template <typename T, typename U>
serialize_array_impl<T, U> serialize_array(ArrayRef<T> &Item, U Func) {
  return serialize_array_impl<T, U>(Item, Func);
}

inline serialize_null_term_string_array_impl
serialize_null_term_string_array(std::vector<StringRef> &Item) {
  return serialize_null_term_string_array_impl(Item);
}

template <typename T>
serialize_vector_tail_impl<T> serialize_array_tail(std::vector<T> &Item) {
  return serialize_vector_tail_impl<T>(Item);
}

template <typename T>
serialize_arrayref_tail_impl<T> serialize_array_tail(ArrayRef<T> &Item) {
  return serialize_arrayref_tail_impl<T>(Item);
}

template <typename T> serialize_numeric_impl<T> serialize_numeric(T &Item) {
  return serialize_numeric_impl<T>(Item);
}

template <typename T, typename U>
Error consume(BinaryStreamReader &Reader,
              const serialize_conditional_impl<T, U> &Item) {
  return Item.deserialize(Reader);
}

template <typename T, typename U>
Error consume(BinaryStreamReader &Reader,
              const serialize_array_impl<T, U> &Item) {
  return Item.deserialize(Reader);
}

inline Error consume(BinaryStreamReader &Reader,
                     const serialize_null_term_string_array_impl &Item) {
  return Item.deserialize(Reader);
}

template <typename T>
Error consume(BinaryStreamReader &Reader,
              const serialize_vector_tail_impl<T> &Item) {
  return Item.deserialize(Reader);
}

template <typename T>
Error consume(BinaryStreamReader &Reader,
              const serialize_arrayref_tail_impl<T> &Item) {
  return Item.deserialize(Reader);
}

template <typename T>
Error consume(BinaryStreamReader &Reader,
              const serialize_numeric_impl<T> &Item) {
  return Item.deserialize(Reader);
}

template <typename T, typename U, typename... Args>
Error consume(BinaryStreamReader &Reader, T &&X, U &&Y, Args &&... Rest) {
  if (auto EC = consume(Reader, X))
    return EC;
  return consume(Reader, Y, std::forward<Args>(Rest)...);
}

}
}

#endif
PKhwFZ�M�<DebugInfo/CodeView/Line.hnu�[���//===- Line.h ---------------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_CODEVIEW_LINE_H
#define LLVM_DEBUGINFO_CODEVIEW_LINE_H

#include "llvm/Support/Endian.h"
#include <cinttypes>

namespace llvm {
namespace codeview {

using llvm::support::ulittle32_t;

class LineInfo {
public:
  enum : uint32_t {
    AlwaysStepIntoLineNumber = 0xfeefee,
    NeverStepIntoLineNumber = 0xf00f00
  };

  enum : int { EndLineDeltaShift = 24 };

  enum : uint32_t {
    StartLineMask = 0x00ffffff,
    EndLineDeltaMask = 0x7f000000,
    StatementFlag = 0x80000000u
  };

  LineInfo(uint32_t StartLine, uint32_t EndLine, bool IsStatement);
  LineInfo(uint32_t LineData) : LineData(LineData) {}

  uint32_t getStartLine() const { return LineData & StartLineMask; }

  uint32_t getLineDelta() const {
    return (LineData & EndLineDeltaMask) >> EndLineDeltaShift;
  }

  uint32_t getEndLine() const { return getStartLine() + getLineDelta(); }

  bool isStatement() const { return (LineData & StatementFlag) != 0; }

  uint32_t getRawData() const { return LineData; }

  bool isAlwaysStepInto() const {
    return getStartLine() == AlwaysStepIntoLineNumber;
  }

  bool isNeverStepInto() const {
    return getStartLine() == NeverStepIntoLineNumber;
  }

private:
  uint32_t LineData;
};

class ColumnInfo {
private:
  static const uint32_t StartColumnMask = 0x0000ffffu;
  static const uint32_t EndColumnMask = 0xffff0000u;
  static const int EndColumnShift = 16;

public:
  ColumnInfo(uint16_t StartColumn, uint16_t EndColumn) {
    ColumnData =
        (static_cast<uint32_t>(StartColumn) & StartColumnMask) |
        ((static_cast<uint32_t>(EndColumn) << EndColumnShift) & EndColumnMask);
  }

  uint16_t getStartColumn() const {
    return static_cast<uint16_t>(ColumnData & StartColumnMask);
  }

  uint16_t getEndColumn() const {
    return static_cast<uint16_t>((ColumnData & EndColumnMask) >>
                                 EndColumnShift);
  }

  uint32_t getRawData() const { return ColumnData; }

private:
  uint32_t ColumnData;
};

class Line {
private:
  int32_t CodeOffset;
  LineInfo LineInf;
  ColumnInfo ColumnInf;

public:
  Line(int32_t CodeOffset, uint32_t StartLine, uint32_t EndLine,
       uint16_t StartColumn, uint16_t EndColumn, bool IsStatement)
      : CodeOffset(CodeOffset), LineInf(StartLine, EndLine, IsStatement),
        ColumnInf(StartColumn, EndColumn) {}

  Line(int32_t CodeOffset, LineInfo LineInf, ColumnInfo ColumnInf)
      : CodeOffset(CodeOffset), LineInf(LineInf), ColumnInf(ColumnInf) {}

  LineInfo getLineInfo() const { return LineInf; }

  ColumnInfo getColumnInfo() const { return ColumnInf; }

  int32_t getCodeOffset() const { return CodeOffset; }

  uint32_t getStartLine() const { return LineInf.getStartLine(); }

  uint32_t getLineDelta() const { return LineInf.getLineDelta(); }

  uint32_t getEndLine() const { return LineInf.getEndLine(); }

  uint16_t getStartColumn() const { return ColumnInf.getStartColumn(); }

  uint16_t getEndColumn() const { return ColumnInf.getEndColumn(); }

  bool isStatement() const { return LineInf.isStatement(); }

  bool isAlwaysStepInto() const { return LineInf.isAlwaysStepInto(); }

  bool isNeverStepInto() const { return LineInf.isNeverStepInto(); }
};

} // namespace codeview
} // namespace llvm

#endif
PKhwFZ�AOV*DebugInfo/CodeView/SymbolVisitorDelegate.hnu�[���//===-- SymbolVisitorDelegate.h ---------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_CODEVIEW_SYMBOLVISITORDELEGATE_H
#define LLVM_DEBUGINFO_CODEVIEW_SYMBOLVISITORDELEGATE_H

#include "llvm/ADT/StringRef.h"
#include <cstdint>

namespace llvm {

class BinaryStreamReader;

namespace codeview {

class DebugStringTableSubsectionRef;

class SymbolVisitorDelegate {
public:
  virtual ~SymbolVisitorDelegate() = default;

  virtual uint32_t getRecordOffset(BinaryStreamReader Reader) = 0;
  virtual StringRef getFileNameForFileOffset(uint32_t FileOffset) = 0;
  virtual DebugStringTableSubsectionRef getStringTable() = 0;
};

} // end namespace codeview

} // end namespace llvm

#endif // LLVM_DEBUGINFO_CODEVIEW_SYMBOLVISITORDELEGATE_H
PKhwFZ�\	d��"DebugInfo/CodeView/CodeViewError.hnu�[���//===- CodeViewError.h - Error extensions for CodeView ----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_CODEVIEW_CODEVIEWERROR_H
#define LLVM_DEBUGINFO_CODEVIEW_CODEVIEWERROR_H

#include "llvm/Support/Error.h"

namespace llvm {
namespace codeview {
enum class cv_error_code {
  unspecified = 1,
  insufficient_buffer,
  operation_unsupported,
  corrupt_record,
  no_records,
  unknown_member_record,
};
} // namespace codeview
} // namespace llvm

namespace std {
template <>
struct is_error_code_enum<llvm::codeview::cv_error_code> : std::true_type {};
} // namespace std

namespace llvm {
namespace codeview {
const std::error_category &CVErrorCategory();

inline std::error_code make_error_code(cv_error_code E) {
  return std::error_code(static_cast<int>(E), CVErrorCategory());
}

/// Base class for errors originating when parsing raw PDB files
class CodeViewError : public ErrorInfo<CodeViewError, StringError> {
public:
  using ErrorInfo<CodeViewError,
                  StringError>::ErrorInfo; // inherit constructors
  CodeViewError(const Twine &S) : ErrorInfo(S, cv_error_code::unspecified) {}
  static char ID;
};

} // namespace codeview
} // namespace llvm

#endif
PKhwFZ>�ȗ�
�
,DebugInfo/CodeView/MergingTypeTableBuilder.hnu�[���//===- MergingTypeTableBuilder.h ---------------------------------*- C++-*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_CODEVIEW_MERGINGTYPETABLEBUILDER_H
#define LLVM_DEBUGINFO_CODEVIEW_MERGINGTYPETABLEBUILDER_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/DebugInfo/CodeView/CVRecord.h"
#include "llvm/DebugInfo/CodeView/SimpleTypeSerializer.h"
#include "llvm/DebugInfo/CodeView/TypeCollection.h"
#include "llvm/DebugInfo/CodeView/TypeIndex.h"
#include "llvm/Support/Allocator.h"
#include <cstdint>

namespace llvm {
namespace codeview {
struct LocallyHashedType;

class ContinuationRecordBuilder;

class MergingTypeTableBuilder : public TypeCollection {
  /// Storage for records.  These need to outlive the TypeTableBuilder.
  BumpPtrAllocator &RecordStorage;

  /// A serializer that can write non-continuation leaf types.  Only used as
  /// a convenience function so that we can provide an interface method to
  /// write an unserialized record.
  SimpleTypeSerializer SimpleSerializer;

  /// Hash table.
  DenseMap<LocallyHashedType, TypeIndex> HashedRecords;

  /// Contains a list of all records indexed by TypeIndex.toArrayIndex().
  SmallVector<ArrayRef<uint8_t>, 2> SeenRecords;

public:
  explicit MergingTypeTableBuilder(BumpPtrAllocator &Storage);
  ~MergingTypeTableBuilder();

  // TypeCollection overrides
  std::optional<TypeIndex> getFirst() override;
  std::optional<TypeIndex> getNext(TypeIndex Prev) override;
  CVType getType(TypeIndex Index) override;
  StringRef getTypeName(TypeIndex Index) override;
  bool contains(TypeIndex Index) override;
  uint32_t size() override;
  uint32_t capacity() override;
  bool replaceType(TypeIndex &Index, CVType Data, bool Stabilize) override;

  // public interface
  void reset();
  TypeIndex nextTypeIndex() const;

  BumpPtrAllocator &getAllocator() { return RecordStorage; }

  ArrayRef<ArrayRef<uint8_t>> records() const;

  TypeIndex insertRecordAs(hash_code Hash, ArrayRef<uint8_t> &Record);
  TypeIndex insertRecordBytes(ArrayRef<uint8_t> &Record);
  TypeIndex insertRecord(ContinuationRecordBuilder &Builder);

  template <typename T> TypeIndex writeLeafType(T &Record) {
    ArrayRef<uint8_t> Data = SimpleSerializer.serialize(Record);
    return insertRecordBytes(Data);
  }
};

} // end namespace codeview
} // end namespace llvm

#endif // LLVM_DEBUGINFO_CODEVIEW_MERGINGTYPETABLEBUILDER_H
PKhwFZ
��'��'DebugInfo/CodeView/TypeIndexDiscovery.hnu�[���//===- TypeIndexDiscovery.h -------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_CODEVIEW_TYPEINDEXDISCOVERY_H
#define LLVM_DEBUGINFO_CODEVIEW_TYPEINDEXDISCOVERY_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/DebugInfo/CodeView/CVRecord.h"

namespace llvm {
template <typename T> class SmallVectorImpl;
namespace codeview {
class TypeIndex;
enum class TiRefKind { TypeRef, IndexRef };
struct TiReference {
  TiRefKind Kind;
  uint32_t Offset;
  uint32_t Count;
};

void discoverTypeIndices(ArrayRef<uint8_t> RecordData,
                         SmallVectorImpl<TiReference> &Refs);
void discoverTypeIndices(const CVType &Type,
                         SmallVectorImpl<TiReference> &Refs);
void discoverTypeIndices(const CVType &Type,
                         SmallVectorImpl<TypeIndex> &Indices);
void discoverTypeIndices(ArrayRef<uint8_t> RecordData,
                         SmallVectorImpl<TypeIndex> &Indices);

/// Discover type indices in symbol records. Returns false if this is an unknown
/// record.
bool discoverTypeIndicesInSymbol(const CVSymbol &Symbol,
                                 SmallVectorImpl<TiReference> &Refs);
bool discoverTypeIndicesInSymbol(ArrayRef<uint8_t> RecordData,
                                 SmallVectorImpl<TiReference> &Refs);
bool discoverTypeIndicesInSymbol(ArrayRef<uint8_t> RecordData,
                                 SmallVectorImpl<TypeIndex> &Indices);
}
}

#endif
PKhwFZi���uu+DebugInfo/CodeView/GlobalTypeTableBuilder.hnu�[���//===- GlobalTypeTableBuilder.h ----------------------------------*- C++-*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_CODEVIEW_GLOBALTYPETABLEBUILDER_H
#define LLVM_DEBUGINFO_CODEVIEW_GLOBALTYPETABLEBUILDER_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/DebugInfo/CodeView/CVRecord.h"
#include "llvm/DebugInfo/CodeView/SimpleTypeSerializer.h"
#include "llvm/DebugInfo/CodeView/TypeCollection.h"
#include "llvm/DebugInfo/CodeView/TypeHashing.h"
#include "llvm/DebugInfo/CodeView/TypeIndex.h"
#include "llvm/Support/Allocator.h"
#include <cassert>
#include <cstdint>

namespace llvm {
namespace codeview {

class ContinuationRecordBuilder;

class GlobalTypeTableBuilder : public TypeCollection {
  /// Storage for records.  These need to outlive the TypeTableBuilder.
  BumpPtrAllocator &RecordStorage;

  /// A serializer that can write non-continuation leaf types.  Only used as
  /// a convenience function so that we can provide an interface method to
  /// write an unserialized record.
  SimpleTypeSerializer SimpleSerializer;

  /// Hash table.
  DenseMap<GloballyHashedType, TypeIndex> HashedRecords;

  /// Contains a list of all records indexed by TypeIndex.toArrayIndex().
  SmallVector<ArrayRef<uint8_t>, 2> SeenRecords;

  /// Contains a list of all hash values indexed by TypeIndex.toArrayIndex().
  SmallVector<GloballyHashedType, 2> SeenHashes;

public:
  explicit GlobalTypeTableBuilder(BumpPtrAllocator &Storage);
  ~GlobalTypeTableBuilder();

  // TypeCollection overrides
  std::optional<TypeIndex> getFirst() override;
  std::optional<TypeIndex> getNext(TypeIndex Prev) override;
  CVType getType(TypeIndex Index) override;
  StringRef getTypeName(TypeIndex Index) override;
  bool contains(TypeIndex Index) override;
  uint32_t size() override;
  uint32_t capacity() override;
  bool replaceType(TypeIndex &Index, CVType Data, bool Stabilize) override;

  // public interface
  void reset();
  TypeIndex nextTypeIndex() const;

  BumpPtrAllocator &getAllocator() { return RecordStorage; }

  ArrayRef<ArrayRef<uint8_t>> records() const;
  ArrayRef<GloballyHashedType> hashes() const;

  template <typename CreateFunc>
  TypeIndex insertRecordAs(GloballyHashedType Hash, size_t RecordSize,
                           CreateFunc Create) {
    assert(RecordSize < UINT32_MAX && "Record too big");
    assert(RecordSize % 4 == 0 &&
           "RecordSize is not a multiple of 4 bytes which will cause "
           "misalignment in the output TPI stream!");

    auto Result = HashedRecords.try_emplace(Hash, nextTypeIndex());

    if (LLVM_UNLIKELY(Result.second /*inserted*/ ||
                      Result.first->second.isSimple())) {
      uint8_t *Stable = RecordStorage.Allocate<uint8_t>(RecordSize);
      MutableArrayRef<uint8_t> Data(Stable, RecordSize);
      ArrayRef<uint8_t> StableRecord = Create(Data);
      if (StableRecord.empty()) {
        // Records with forward references into the Type stream will be deferred
        // for insertion at a later time, on the second pass.
        Result.first->getSecond() = TypeIndex(SimpleTypeKind::NotTranslated);
        return TypeIndex(SimpleTypeKind::NotTranslated);
      }
      if (Result.first->second.isSimple()) {
        assert(Result.first->second.getIndex() ==
               (uint32_t)SimpleTypeKind::NotTranslated);
        // On the second pass, update with index to remapped record. The
        // (initially misbehaved) record will now come *after* other records
        // resolved in the first pass, with proper *back* references in the
        // stream.
        Result.first->second = nextTypeIndex();
      }
      SeenRecords.push_back(StableRecord);
      SeenHashes.push_back(Hash);
    }

    return Result.first->second;
  }

  TypeIndex insertRecordBytes(ArrayRef<uint8_t> Data);
  TypeIndex insertRecord(ContinuationRecordBuilder &Builder);

  template <typename T> TypeIndex writeLeafType(T &Record) {
    ArrayRef<uint8_t> Data = SimpleSerializer.serialize(Record);
    return insertRecordBytes(Data);
  }
};

} // end namespace codeview
} // end namespace llvm

#endif // LLVM_DEBUGINFO_CODEVIEW_GLOBALTYPETABLEBUILDER_H
PKhwFZ�/�c��%DebugInfo/CodeView/SymbolSerializer.hnu�[���//===- SymbolSerializer.h ---------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_CODEVIEW_SYMBOLSERIALIZER_H
#define LLVM_DEBUGINFO_CODEVIEW_SYMBOLSERIALIZER_H

#include "llvm/DebugInfo/CodeView/CVRecord.h"
#include "llvm/DebugInfo/CodeView/CodeView.h"
#include "llvm/DebugInfo/CodeView/RecordSerialization.h"
#include "llvm/DebugInfo/CodeView/SymbolRecordMapping.h"
#include "llvm/DebugInfo/CodeView/SymbolVisitorCallbacks.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/BinaryByteStream.h"
#include "llvm/Support/BinaryStreamWriter.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Error.h"
#include <array>
#include <cstdint>

namespace llvm {
namespace codeview {

class SymbolSerializer : public SymbolVisitorCallbacks {
  BumpPtrAllocator &Storage;
  // Since this is a fixed size buffer, use a stack allocated buffer.  This
  // yields measurable performance increase over the repeated heap allocations
  // when serializing many independent records via writeOneSymbol.
  std::array<uint8_t, MaxRecordLength> RecordBuffer;
  MutableBinaryByteStream Stream;
  BinaryStreamWriter Writer;
  SymbolRecordMapping Mapping;
  std::optional<SymbolKind> CurrentSymbol;

  Error writeRecordPrefix(SymbolKind Kind) {
    RecordPrefix Prefix;
    Prefix.RecordKind = Kind;
    Prefix.RecordLen = 0;
    if (auto EC = Writer.writeObject(Prefix))
      return EC;
    return Error::success();
  }

public:
  SymbolSerializer(BumpPtrAllocator &Storage, CodeViewContainer Container);

  template <typename SymType>
  static CVSymbol writeOneSymbol(SymType &Sym, BumpPtrAllocator &Storage,
                                 CodeViewContainer Container) {
    RecordPrefix Prefix{uint16_t(Sym.Kind)};
    CVSymbol Result(&Prefix, sizeof(Prefix));
    SymbolSerializer Serializer(Storage, Container);
    consumeError(Serializer.visitSymbolBegin(Result));
    consumeError(Serializer.visitKnownRecord(Result, Sym));
    consumeError(Serializer.visitSymbolEnd(Result));
    return Result;
  }

  Error visitSymbolBegin(CVSymbol &Record) override;
  Error visitSymbolEnd(CVSymbol &Record) override;

#define SYMBOL_RECORD(EnumName, EnumVal, Name)                                 \
  Error visitKnownRecord(CVSymbol &CVR, Name &Record) override {               \
    return visitKnownRecordImpl(CVR, Record);                                  \
  }
#define SYMBOL_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
#include "llvm/DebugInfo/CodeView/CodeViewSymbols.def"

private:
  template <typename RecordKind>
  Error visitKnownRecordImpl(CVSymbol &CVR, RecordKind &Record) {
    return Mapping.visitKnownRecord(CVR, Record);
  }
};

} // end namespace codeview
} // end namespace llvm

#endif // LLVM_DEBUGINFO_CODEVIEW_SYMBOLSERIALIZER_H
PKhwFZ1Ƨ�$DebugInfo/CodeView/DebugSubsection.hnu�[���//===- DebugSubsection.h ------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_CODEVIEW_DEBUGSUBSECTION_H
#define LLVM_DEBUGINFO_CODEVIEW_DEBUGSUBSECTION_H

#include "llvm/DebugInfo/CodeView/CodeView.h"
#include "llvm/Support/Error.h"

#include <cstdint>

namespace llvm {
class BinaryStreamWriter;
namespace codeview {

class DebugSubsectionRef {
public:
  explicit DebugSubsectionRef(DebugSubsectionKind Kind) : Kind(Kind) {}
  virtual ~DebugSubsectionRef();

  static bool classof(const DebugSubsectionRef *S) { return true; }

  DebugSubsectionKind kind() const { return Kind; }

protected:
  DebugSubsectionKind Kind;
};

class DebugSubsection {
public:
  explicit DebugSubsection(DebugSubsectionKind Kind) : Kind(Kind) {}
  virtual ~DebugSubsection();

  static bool classof(const DebugSubsection *S) { return true; }

  DebugSubsectionKind kind() const { return Kind; }

  virtual Error commit(BinaryStreamWriter &Writer) const = 0;
  virtual uint32_t calculateSerializedSize() const = 0;

protected:
  DebugSubsectionKind Kind;
};

} // namespace codeview
} // namespace llvm

#endif // LLVM_DEBUGINFO_CODEVIEW_DEBUGSUBSECTION_H
PKhwFZ����55&DebugInfo/CodeView/TypeRecordMapping.hnu�[���//===- TypeRecordMapping.h --------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_CODEVIEW_TYPERECORDMAPPING_H
#define LLVM_DEBUGINFO_CODEVIEW_TYPERECORDMAPPING_H

#include "llvm/DebugInfo/CodeView/CVRecord.h"
#include "llvm/DebugInfo/CodeView/CodeView.h"
#include "llvm/DebugInfo/CodeView/CodeViewRecordIO.h"
#include "llvm/DebugInfo/CodeView/TypeVisitorCallbacks.h"
#include "llvm/Support/Error.h"
#include <optional>

namespace llvm {
class BinaryStreamReader;
class BinaryStreamWriter;

namespace codeview {
class TypeIndex;
struct CVMemberRecord;
class TypeRecordMapping : public TypeVisitorCallbacks {
public:
  explicit TypeRecordMapping(BinaryStreamReader &Reader) : IO(Reader) {}
  explicit TypeRecordMapping(BinaryStreamWriter &Writer) : IO(Writer) {}
  explicit TypeRecordMapping(CodeViewRecordStreamer &Streamer) : IO(Streamer) {}

  using TypeVisitorCallbacks::visitTypeBegin;
  Error visitTypeBegin(CVType &Record) override;
  Error visitTypeBegin(CVType &Record, TypeIndex Index) override;
  Error visitTypeEnd(CVType &Record) override;

  Error visitMemberBegin(CVMemberRecord &Record) override;
  Error visitMemberEnd(CVMemberRecord &Record) override;

#define TYPE_RECORD(EnumName, EnumVal, Name)                                   \
  Error visitKnownRecord(CVType &CVR, Name##Record &Record) override;
#define MEMBER_RECORD(EnumName, EnumVal, Name)                                 \
  Error visitKnownMember(CVMemberRecord &CVR, Name##Record &Record) override;
#define TYPE_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
#define MEMBER_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
#include "llvm/DebugInfo/CodeView/CodeViewTypes.def"

private:
  std::optional<TypeLeafKind> TypeKind;
  std::optional<TypeLeafKind> MemberKind;

  CodeViewRecordIO IO;
};
}
}

#endif
PKhwFZbFBB-DebugInfo/CodeView/DebugChecksumsSubsection.hnu�[���//===- DebugChecksumsSubsection.h -------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_CODEVIEW_DEBUGCHECKSUMSSUBSECTION_H
#define LLVM_DEBUGINFO_CODEVIEW_DEBUGCHECKSUMSSUBSECTION_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/DebugInfo/CodeView/CodeView.h"
#include "llvm/DebugInfo/CodeView/DebugSubsection.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/BinaryStreamArray.h"
#include "llvm/Support/BinaryStreamRef.h"
#include "llvm/Support/Error.h"
#include <cstdint>
#include <vector>

namespace llvm {

class BinaryStreamReader;
class BinaryStreamWriter;

namespace codeview {

class DebugStringTableSubsection;

struct FileChecksumEntry {
  uint32_t FileNameOffset;    // Byte offset of filename in global stringtable.
  FileChecksumKind Kind;      // The type of checksum.
  ArrayRef<uint8_t> Checksum; // The bytes of the checksum.
};

} // end namespace codeview

template <> struct VarStreamArrayExtractor<codeview::FileChecksumEntry> {
public:
  using ContextType = void;

  Error operator()(BinaryStreamRef Stream, uint32_t &Len,
                   codeview::FileChecksumEntry &Item);
};

namespace codeview {

class DebugChecksumsSubsectionRef final : public DebugSubsectionRef {
  using FileChecksumArray = VarStreamArray<codeview::FileChecksumEntry>;
  using Iterator = FileChecksumArray::Iterator;

public:
  DebugChecksumsSubsectionRef()
      : DebugSubsectionRef(DebugSubsectionKind::FileChecksums) {}

  static bool classof(const DebugSubsectionRef *S) {
    return S->kind() == DebugSubsectionKind::FileChecksums;
  }

  bool valid() const { return Checksums.valid(); }

  Error initialize(BinaryStreamReader Reader);
  Error initialize(BinaryStreamRef Stream);

  Iterator begin() const { return Checksums.begin(); }
  Iterator end() const { return Checksums.end(); }

  const FileChecksumArray &getArray() const { return Checksums; }

private:
  FileChecksumArray Checksums;
};

class DebugChecksumsSubsection final : public DebugSubsection {
public:
  explicit DebugChecksumsSubsection(DebugStringTableSubsection &Strings);

  static bool classof(const DebugSubsection *S) {
    return S->kind() == DebugSubsectionKind::FileChecksums;
  }

  void addChecksum(StringRef FileName, FileChecksumKind Kind,
                   ArrayRef<uint8_t> Bytes);

  uint32_t calculateSerializedSize() const override;
  Error commit(BinaryStreamWriter &Writer) const override;
  uint32_t mapChecksumOffset(StringRef FileName) const;

private:
  DebugStringTableSubsection &Strings;

  DenseMap<uint32_t, uint32_t> OffsetMap;
  uint32_t SerializedSize = 0;
  BumpPtrAllocator Storage;
  std::vector<FileChecksumEntry> Checksums;
};

} // end namespace codeview

} // end namespace llvm

#endif // LLVM_DEBUGINFO_CODEVIEW_DEBUGCHECKSUMSSUBSECTION_H
PKhwFZ�P��oo&DebugInfo/CodeView/TypeRecordHelpers.hnu�[���//===- TypeRecordHelpers.h --------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_CODEVIEW_TYPERECORDHELPERS_H
#define LLVM_DEBUGINFO_CODEVIEW_TYPERECORDHELPERS_H

#include "llvm/DebugInfo/CodeView/CVRecord.h"
#include "llvm/DebugInfo/CodeView/TypeIndex.h"

namespace llvm {
namespace codeview {

/// Given an arbitrary codeview type, determine if it is an LF_STRUCTURE,
/// LF_CLASS, LF_INTERFACE, LF_UNION, or LF_ENUM with the forward ref class
/// option.
bool isUdtForwardRef(CVType CVT);

/// Given a CVType which is assumed to be an LF_MODIFIER, return the
/// TypeIndex of the type that the LF_MODIFIER modifies.
TypeIndex getModifiedType(const CVType &CVT);

/// Return true if this record should be in the IPI stream of a PDB. In an
/// object file, these record kinds will appear mixed into the .debug$T section.
inline bool isIdRecord(TypeLeafKind K) {
  switch (K) {
  case TypeLeafKind::LF_FUNC_ID:
  case TypeLeafKind::LF_MFUNC_ID:
  case TypeLeafKind::LF_STRING_ID:
  case TypeLeafKind::LF_SUBSTR_LIST:
  case TypeLeafKind::LF_BUILDINFO:
  case TypeLeafKind::LF_UDT_SRC_LINE:
  case TypeLeafKind::LF_UDT_MOD_SRC_LINE:
    return true;
  default:
    return false;
  }
}

/// Given an arbitrary codeview type, determine if it is an LF_STRUCTURE,
/// LF_CLASS, LF_INTERFACE, LF_UNION.
inline bool isAggregate(CVType CVT) {
  switch (CVT.kind()) {
  case LF_STRUCTURE:
  case LF_CLASS:
  case LF_INTERFACE:
  case LF_UNION:
    return true;
  default:
    return false;
  }
}

/// Given an arbitrary codeview type index, determine its size.
uint64_t getSizeInBytesForTypeIndex(TypeIndex TI);

/// Given an arbitrary codeview type, return the type's size in the case
/// of aggregate (LF_STRUCTURE, LF_CLASS, LF_INTERFACE, LF_UNION).
uint64_t getSizeInBytesForTypeRecord(CVType CVT);

} // namespace codeview
} // namespace llvm

#endif
PKhwFZ̻��oo+DebugInfo/CodeView/DebugCrossExSubsection.hnu�[���//===- DebugCrossExSubsection.h ---------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_CODEVIEW_DEBUGCROSSEXSUBSECTION_H
#define LLVM_DEBUGINFO_CODEVIEW_DEBUGCROSSEXSUBSECTION_H

#include "llvm/DebugInfo/CodeView/CodeView.h"
#include "llvm/DebugInfo/CodeView/DebugSubsection.h"
#include "llvm/Support/BinaryStreamArray.h"
#include "llvm/Support/BinaryStreamRef.h"
#include "llvm/Support/Error.h"
#include <cstdint>
#include <map>

namespace llvm {
class BinaryStreamReader;
class BinaryStreamWriter;
namespace codeview {

class DebugCrossModuleExportsSubsectionRef final : public DebugSubsectionRef {
  using ReferenceArray = FixedStreamArray<CrossModuleExport>;
  using Iterator = ReferenceArray::Iterator;

public:
  DebugCrossModuleExportsSubsectionRef()
      : DebugSubsectionRef(DebugSubsectionKind::CrossScopeExports) {}

  static bool classof(const DebugSubsectionRef *S) {
    return S->kind() == DebugSubsectionKind::CrossScopeExports;
  }

  Error initialize(BinaryStreamReader Reader);
  Error initialize(BinaryStreamRef Stream);

  Iterator begin() const { return References.begin(); }
  Iterator end() const { return References.end(); }

private:
  FixedStreamArray<CrossModuleExport> References;
};

class DebugCrossModuleExportsSubsection final : public DebugSubsection {
public:
  DebugCrossModuleExportsSubsection()
      : DebugSubsection(DebugSubsectionKind::CrossScopeExports) {}

  static bool classof(const DebugSubsection *S) {
    return S->kind() == DebugSubsectionKind::CrossScopeExports;
  }

  void addMapping(uint32_t Local, uint32_t Global);

  uint32_t calculateSerializedSize() const override;
  Error commit(BinaryStreamWriter &Writer) const override;

private:
  std::map<uint32_t, uint32_t> Mappings;
};

} // end namespace codeview
} // end namespace llvm

#endif // LLVM_DEBUGINFO_CODEVIEW_DEBUGCROSSEXSUBSECTION_H
PKhwFZ���](DebugInfo/CodeView/TypeTableCollection.hnu�[���//===- TypeTableCollection.h ---------------------------------- *- C++ --*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_CODEVIEW_TYPETABLECOLLECTION_H
#define LLVM_DEBUGINFO_CODEVIEW_TYPETABLECOLLECTION_H

#include "llvm/DebugInfo/CodeView/TypeCollection.h"
#include "llvm/Support/StringSaver.h"

#include <vector>

namespace llvm {
namespace codeview {

class TypeTableCollection : public TypeCollection {
public:
  explicit TypeTableCollection(ArrayRef<ArrayRef<uint8_t>> Records);

  std::optional<TypeIndex> getFirst() override;
  std::optional<TypeIndex> getNext(TypeIndex Prev) override;

  CVType getType(TypeIndex Index) override;
  StringRef getTypeName(TypeIndex Index) override;
  bool contains(TypeIndex Index) override;
  uint32_t size() override;
  uint32_t capacity() override;
  bool replaceType(TypeIndex &Index, CVType Data, bool Stabilize) override;

private:
  BumpPtrAllocator Allocator;
  StringSaver NameStorage;
  std::vector<StringRef> Names;
  ArrayRef<ArrayRef<uint8_t>> Records;
};
}
}

#endif
PKhwFZ?�R�jj#DebugInfo/CodeView/TypeCollection.hnu�[���//===- TypeCollection.h - A collection of CodeView type records -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_CODEVIEW_TYPECOLLECTION_H
#define LLVM_DEBUGINFO_CODEVIEW_TYPECOLLECTION_H

#include "llvm/ADT/StringRef.h"
#include "llvm/DebugInfo/CodeView/CVRecord.h"
#include "llvm/DebugInfo/CodeView/TypeIndex.h"

namespace llvm {
namespace codeview {
class TypeCollection {
public:
  virtual ~TypeCollection() = default;

  bool empty() { return size() == 0; }

  virtual std::optional<TypeIndex> getFirst() = 0;
  virtual std::optional<TypeIndex> getNext(TypeIndex Prev) = 0;

  virtual CVType getType(TypeIndex Index) = 0;
  virtual StringRef getTypeName(TypeIndex Index) = 0;
  virtual bool contains(TypeIndex Index) = 0;
  virtual uint32_t size() = 0;
  virtual uint32_t capacity() = 0;
  virtual bool replaceType(TypeIndex &Index, CVType Data, bool Stabilize) = 0;

  template <typename TFunc> void ForEachRecord(TFunc Func) {
    std::optional<TypeIndex> Next = getFirst();

    while (Next) {
      TypeIndex N = *Next;
      Func(N, getType(N));
      Next = getNext(N);
    }
  }
};
}
}

#endif
PKhwFZ�����(DebugInfo/CodeView/StringsAndChecksums.hnu�[���//===- StringsAndChecksums.h ------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_CODEVIEW_STRINGSANDCHECKSUMS_H
#define LLVM_DEBUGINFO_CODEVIEW_STRINGSANDCHECKSUMS_H

#include "llvm/DebugInfo/CodeView/CodeView.h"
#include "llvm/DebugInfo/CodeView/DebugSubsectionRecord.h"
#include <memory>

namespace llvm {
namespace codeview {
class DebugChecksumsSubsection;
class DebugChecksumsSubsectionRef;
class DebugStringTableSubsection;
class DebugStringTableSubsectionRef;

class StringsAndChecksumsRef {
public:
  // If no subsections are known about initially, we find as much as we can.
  StringsAndChecksumsRef();

  // If only a string table subsection is given, we find a checksums subsection.
  explicit StringsAndChecksumsRef(const DebugStringTableSubsectionRef &Strings);

  // If both subsections are given, we don't need to find anything.
  StringsAndChecksumsRef(const DebugStringTableSubsectionRef &Strings,
                         const DebugChecksumsSubsectionRef &Checksums);

  void setStrings(const DebugStringTableSubsectionRef &Strings);
  void setChecksums(const DebugChecksumsSubsectionRef &CS);

  void reset();
  void resetStrings();
  void resetChecksums();

  template <typename T> void initialize(T &&FragmentRange) {
    for (const DebugSubsectionRecord &R : FragmentRange) {
      if (Strings && Checksums)
        return;
      if (R.kind() == DebugSubsectionKind::FileChecksums) {
        initializeChecksums(R);
        continue;
      }
      if (R.kind() == DebugSubsectionKind::StringTable && !Strings) {
        // While in practice we should never encounter a string table even
        // though the string table is already initialized, in theory it's
        // possible.  PDBs are supposed to have one global string table and
        // then this subsection should not appear.  Whereas object files are
        // supposed to have this subsection appear exactly once.  However,
        // for testing purposes it's nice to be able to test this subsection
        // independently of one format or the other, so for some tests we
        // manually construct a PDB that contains this subsection in addition
        // to a global string table.
        initializeStrings(R);
        continue;
      }
    }
  }

  const DebugStringTableSubsectionRef &strings() const { return *Strings; }
  const DebugChecksumsSubsectionRef &checksums() const { return *Checksums; }

  bool hasStrings() const { return Strings != nullptr; }
  bool hasChecksums() const { return Checksums != nullptr; }

private:
  void initializeStrings(const DebugSubsectionRecord &SR);
  void initializeChecksums(const DebugSubsectionRecord &FCR);

  std::shared_ptr<DebugStringTableSubsectionRef> OwnedStrings;
  std::shared_ptr<DebugChecksumsSubsectionRef> OwnedChecksums;

  const DebugStringTableSubsectionRef *Strings = nullptr;
  const DebugChecksumsSubsectionRef *Checksums = nullptr;
};

class StringsAndChecksums {
public:
  using StringsPtr = std::shared_ptr<DebugStringTableSubsection>;
  using ChecksumsPtr = std::shared_ptr<DebugChecksumsSubsection>;

  // If no subsections are known about initially, we find as much as we can.
  StringsAndChecksums() = default;

  void setStrings(const StringsPtr &SP) { Strings = SP; }
  void setChecksums(const ChecksumsPtr &CP) { Checksums = CP; }

  const StringsPtr &strings() const { return Strings; }
  const ChecksumsPtr &checksums() const { return Checksums; }

  bool hasStrings() const { return Strings != nullptr; }
  bool hasChecksums() const { return Checksums != nullptr; }

private:
  StringsPtr Strings;
  ChecksumsPtr Checksums;
};

} // end namespace codeview
} // end namespace llvm

#endif // LLVM_DEBUGINFO_CODEVIEW_STRINGSANDCHECKSUMS_H
PKhwFZ�>�WW$DebugInfo/CodeView/CodeViewTypes.defnu�[���//===-- CodeViewTypes.def - All CodeView leaf types -------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// See LEAF_ENUM_e in cvinfo.h. This should match the constants there.
//
//===----------------------------------------------------------------------===//

// If the type is known, then we have a record describing it in TypeRecord.h.

#ifndef CV_TYPE
#define CV_TYPE(lf_ename, value)
#endif

// If the type is known, then we have a record describing it in TypeRecord.h.
#ifndef TYPE_RECORD
#define TYPE_RECORD(lf_ename, value, name) CV_TYPE(lf_ename, value)
#endif

#ifndef TYPE_RECORD_ALIAS
#define TYPE_RECORD_ALIAS(lf_ename, value, name, alias_name)                   \
  TYPE_RECORD(lf_ename, value, name)
#endif

#ifndef MEMBER_RECORD
#define MEMBER_RECORD(lf_ename, value, name) TYPE_RECORD(lf_ename, value, name)
#endif

#ifndef MEMBER_RECORD_ALIAS
#define MEMBER_RECORD_ALIAS(lf_ename, value, name, alias_name)                 \
  MEMBER_RECORD(lf_ename, value, name)
#endif

TYPE_RECORD(LF_POINTER, 0x1002, Pointer)
TYPE_RECORD(LF_MODIFIER, 0x1001, Modifier)
TYPE_RECORD(LF_PROCEDURE, 0x1008, Procedure)
TYPE_RECORD(LF_MFUNCTION, 0x1009, MemberFunction)
TYPE_RECORD(LF_LABEL, 0x000e, Label)
TYPE_RECORD(LF_ARGLIST, 0x1201, ArgList)

TYPE_RECORD(LF_FIELDLIST, 0x1203, FieldList)

TYPE_RECORD(LF_ARRAY, 0x1503, Array)
TYPE_RECORD(LF_CLASS, 0x1504, Class)
TYPE_RECORD_ALIAS(LF_STRUCTURE, 0x1505, Struct, Class)
TYPE_RECORD_ALIAS(LF_INTERFACE, 0x1519, Interface, Class)
TYPE_RECORD(LF_UNION, 0x1506, Union)
TYPE_RECORD(LF_ENUM, 0x1507, Enum)
TYPE_RECORD(LF_TYPESERVER2, 0x1515, TypeServer2)
TYPE_RECORD(LF_VFTABLE, 0x151d, VFTable)
TYPE_RECORD(LF_VTSHAPE, 0x000a, VFTableShape)

TYPE_RECORD(LF_BITFIELD, 0x1205, BitField)

// Member type records. These are generally not length prefixed, and appear
// inside of a field list record.
MEMBER_RECORD(LF_BCLASS, 0x1400, BaseClass)
MEMBER_RECORD_ALIAS(LF_BINTERFACE, 0x151a, BaseInterface, BaseClass)

MEMBER_RECORD(LF_VBCLASS, 0x1401, VirtualBaseClass)
MEMBER_RECORD_ALIAS(LF_IVBCLASS, 0x1402, IndirectVirtualBaseClass,
                    VirtualBaseClass)

MEMBER_RECORD(LF_VFUNCTAB, 0x1409, VFPtr)
MEMBER_RECORD(LF_STMEMBER, 0x150e, StaticDataMember)
MEMBER_RECORD(LF_METHOD, 0x150f, OverloadedMethod)
MEMBER_RECORD(LF_MEMBER, 0x150d, DataMember)
MEMBER_RECORD(LF_NESTTYPE, 0x1510, NestedType)
MEMBER_RECORD(LF_ONEMETHOD, 0x1511, OneMethod)
MEMBER_RECORD(LF_ENUMERATE, 0x1502, Enumerator)
MEMBER_RECORD(LF_INDEX, 0x1404, ListContinuation)

// ID leaf records. Subsequent leaf types may be referenced from .debug$S.
TYPE_RECORD(LF_FUNC_ID, 0x1601, FuncId)
TYPE_RECORD(LF_MFUNC_ID, 0x1602, MemberFuncId)
TYPE_RECORD(LF_BUILDINFO, 0x1603, BuildInfo)
TYPE_RECORD(LF_SUBSTR_LIST, 0x1604, StringList)
TYPE_RECORD(LF_STRING_ID, 0x1605, StringId)
TYPE_RECORD(LF_UDT_SRC_LINE, 0x1606, UdtSourceLine)
TYPE_RECORD(LF_UDT_MOD_SRC_LINE, 0x1607, UdtModSourceLine)


TYPE_RECORD(LF_METHODLIST, 0x1206, MethodOverloadList)

TYPE_RECORD(LF_PRECOMP, 0x1509, Precomp)
TYPE_RECORD(LF_ENDPRECOMP, 0x0014, EndPrecomp)

// 16 bit type records.
CV_TYPE(LF_MODIFIER_16t, 0x0001)
CV_TYPE(LF_POINTER_16t, 0x0002)
CV_TYPE(LF_ARRAY_16t, 0x0003)
CV_TYPE(LF_CLASS_16t, 0x0004)
CV_TYPE(LF_STRUCTURE_16t, 0x0005)
CV_TYPE(LF_UNION_16t, 0x0006)
CV_TYPE(LF_ENUM_16t, 0x0007)
CV_TYPE(LF_PROCEDURE_16t, 0x0008)
CV_TYPE(LF_MFUNCTION_16t, 0x0009)
CV_TYPE(LF_COBOL0_16t, 0x000b)
CV_TYPE(LF_COBOL1, 0x000c)
CV_TYPE(LF_BARRAY_16t, 0x000d)
CV_TYPE(LF_NULLLEAF, 0x000f) // LF_NULL
CV_TYPE(LF_NOTTRAN, 0x0010)
CV_TYPE(LF_DIMARRAY_16t, 0x0011)
CV_TYPE(LF_VFTPATH_16t, 0x0012)
CV_TYPE(LF_PRECOMP_16t, 0x0013)
CV_TYPE(LF_OEM_16t, 0x0015)
CV_TYPE(LF_TYPESERVER_ST, 0x0016)

CV_TYPE(LF_SKIP_16t, 0x0200)
CV_TYPE(LF_ARGLIST_16t, 0x0201)
CV_TYPE(LF_DEFARG_16t, 0x0202)
CV_TYPE(LF_LIST, 0x0203)
CV_TYPE(LF_FIELDLIST_16t, 0x0204)
CV_TYPE(LF_DERIVED_16t, 0x0205)
CV_TYPE(LF_BITFIELD_16t, 0x0206)
CV_TYPE(LF_METHODLIST_16t, 0x0207)
CV_TYPE(LF_DIMCONU_16t, 0x0208)
CV_TYPE(LF_DIMCONLU_16t, 0x0209)
CV_TYPE(LF_DIMVARU_16t, 0x020a)
CV_TYPE(LF_DIMVARLU_16t, 0x020b)
CV_TYPE(LF_REFSYM, 0x020c)

// 16 bit member types. Generally not length prefixed.
CV_TYPE(LF_BCLASS_16t, 0x0400)
CV_TYPE(LF_VBCLASS_16t, 0x0401)
CV_TYPE(LF_IVBCLASS_16t, 0x0402)
CV_TYPE(LF_ENUMERATE_ST, 0x0403)
CV_TYPE(LF_FRIENDFCN_16t, 0x0404)
CV_TYPE(LF_INDEX_16t, 0x0405)
CV_TYPE(LF_MEMBER_16t, 0x0406)
CV_TYPE(LF_STMEMBER_16t, 0x0407)
CV_TYPE(LF_METHOD_16t, 0x0408)
CV_TYPE(LF_NESTTYPE_16t, 0x0409)
CV_TYPE(LF_VFUNCTAB_16t, 0x040a)
CV_TYPE(LF_FRIENDCLS_16t, 0x040b)
CV_TYPE(LF_ONEMETHOD_16t, 0x040c)
CV_TYPE(LF_VFUNCOFF_16t, 0x040d)

CV_TYPE(LF_TI16_MAX, 0x1000)

CV_TYPE(LF_ARRAY_ST, 0x1003)
CV_TYPE(LF_CLASS_ST, 0x1004)
CV_TYPE(LF_STRUCTURE_ST, 0x1005)
CV_TYPE(LF_UNION_ST, 0x1006)
CV_TYPE(LF_ENUM_ST, 0x1007)
CV_TYPE(LF_COBOL0, 0x100a)
CV_TYPE(LF_BARRAY, 0x100b)
CV_TYPE(LF_DIMARRAY_ST, 0x100c)
CV_TYPE(LF_VFTPATH, 0x100d)
CV_TYPE(LF_PRECOMP_ST, 0x100e)
CV_TYPE(LF_OEM, 0x100f)
CV_TYPE(LF_ALIAS_ST, 0x1010)
CV_TYPE(LF_OEM2, 0x1011)

CV_TYPE(LF_SKIP, 0x1200)
CV_TYPE(LF_DEFARG_ST, 0x1202)
CV_TYPE(LF_DERIVED, 0x1204)
CV_TYPE(LF_DIMCONU, 0x1207)
CV_TYPE(LF_DIMCONLU, 0x1208)
CV_TYPE(LF_DIMVARU, 0x1209)
CV_TYPE(LF_DIMVARLU, 0x120a)

// Member type records. These are generally not length prefixed, and appear
// inside of a field list record.
CV_TYPE(LF_FRIENDFCN_ST, 0x1403)
CV_TYPE(LF_MEMBER_ST, 0x1405)
CV_TYPE(LF_STMEMBER_ST, 0x1406)
CV_TYPE(LF_METHOD_ST, 0x1407)
CV_TYPE(LF_NESTTYPE_ST, 0x1408)
CV_TYPE(LF_FRIENDCLS, 0x140a)
CV_TYPE(LF_ONEMETHOD_ST, 0x140b)
CV_TYPE(LF_VFUNCOFF, 0x140c)
CV_TYPE(LF_NESTTYPEEX_ST, 0x140d)
CV_TYPE(LF_MEMBERMODIFY_ST, 0x140e)
CV_TYPE(LF_MANAGED_ST, 0x140f)

CV_TYPE(LF_ST_MAX, 0x1500)
CV_TYPE(LF_TYPESERVER, 0x1501)
CV_TYPE(LF_DIMARRAY, 0x1508)
CV_TYPE(LF_ALIAS, 0x150a)
CV_TYPE(LF_DEFARG, 0x150b)
CV_TYPE(LF_FRIENDFCN, 0x150c)
CV_TYPE(LF_NESTTYPEEX, 0x1512)
CV_TYPE(LF_MEMBERMODIFY, 0x1513)
CV_TYPE(LF_MANAGED, 0x1514)
CV_TYPE(LF_STRIDED_ARRAY, 0x1516)
CV_TYPE(LF_HLSL, 0x1517)
CV_TYPE(LF_MODIFIER_EX, 0x1518)
CV_TYPE(LF_VECTOR, 0x151b)
CV_TYPE(LF_MATRIX, 0x151c)

// ID leaf records. Subsequent leaf types may be referenced from .debug$S.

// Numeric leaf types. These are generally contained in other records, and not
// encountered in the main type stream.

CV_TYPE(LF_NUMERIC, 0x8000)
CV_TYPE(LF_CHAR, 0x8000)
CV_TYPE(LF_SHORT, 0x8001)
CV_TYPE(LF_USHORT, 0x8002)
CV_TYPE(LF_LONG, 0x8003)
CV_TYPE(LF_ULONG, 0x8004)
CV_TYPE(LF_REAL32, 0x8005)
CV_TYPE(LF_REAL64, 0x8006)
CV_TYPE(LF_REAL80, 0x8007)
CV_TYPE(LF_REAL128, 0x8008)
CV_TYPE(LF_QUADWORD, 0x8009)
CV_TYPE(LF_UQUADWORD, 0x800a)
CV_TYPE(LF_REAL48, 0x800b)
CV_TYPE(LF_COMPLEX32, 0x800c)
CV_TYPE(LF_COMPLEX64, 0x800d)
CV_TYPE(LF_COMPLEX80, 0x800e)
CV_TYPE(LF_COMPLEX128, 0x800f)
CV_TYPE(LF_VARSTRING, 0x8010)
CV_TYPE(LF_OCTWORD, 0x8017)
CV_TYPE(LF_UOCTWORD, 0x8018)
CV_TYPE(LF_DECIMAL, 0x8019)
CV_TYPE(LF_DATE, 0x801a)
CV_TYPE(LF_UTF8STRING, 0x801b)
CV_TYPE(LF_REAL16, 0x801c)

// Padding bytes. These are emitted into alignment bytes in the type stream.

CV_TYPE(LF_PAD0, 0xf0)
CV_TYPE(LF_PAD1, 0xf1)
CV_TYPE(LF_PAD2, 0xf2)
CV_TYPE(LF_PAD3, 0xf3)
CV_TYPE(LF_PAD4, 0xf4)
CV_TYPE(LF_PAD5, 0xf5)
CV_TYPE(LF_PAD6, 0xf6)
CV_TYPE(LF_PAD7, 0xf7)
CV_TYPE(LF_PAD8, 0xf8)
CV_TYPE(LF_PAD9, 0xf9)
CV_TYPE(LF_PAD10, 0xfa)
CV_TYPE(LF_PAD11, 0xfb)
CV_TYPE(LF_PAD12, 0xfc)
CV_TYPE(LF_PAD13, 0xfd)
CV_TYPE(LF_PAD14, 0xfe)
CV_TYPE(LF_PAD15, 0xff)

#undef CV_TYPE
#undef TYPE_RECORD
#undef TYPE_RECORD_ALIAS
#undef MEMBER_RECORD
#undef MEMBER_RECORD_ALIAS
PKhwFZ��
ff+DebugInfo/CodeView/DebugSymbolsSubsection.hnu�[���//===- DebugSymbolsSubsection.h --------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_CODEVIEW_DEBUGSYMBOLSSUBSECTION_H
#define LLVM_DEBUGINFO_CODEVIEW_DEBUGSYMBOLSSUBSECTION_H

#include "llvm/DebugInfo/CodeView/CVRecord.h"
#include "llvm/DebugInfo/CodeView/DebugSubsection.h"
#include "llvm/Support/Error.h"

namespace llvm {
namespace codeview {
class DebugSymbolsSubsectionRef final : public DebugSubsectionRef {
public:
  DebugSymbolsSubsectionRef()
      : DebugSubsectionRef(DebugSubsectionKind::Symbols) {}

  static bool classof(const DebugSubsectionRef *S) {
    return S->kind() == DebugSubsectionKind::Symbols;
  }

  Error initialize(BinaryStreamReader Reader);

  CVSymbolArray::Iterator begin() const { return Records.begin(); }
  CVSymbolArray::Iterator end() const { return Records.end(); }

private:
  CVSymbolArray Records;
};

class DebugSymbolsSubsection final : public DebugSubsection {
public:
  DebugSymbolsSubsection() : DebugSubsection(DebugSubsectionKind::Symbols) {}
  static bool classof(const DebugSubsection *S) {
    return S->kind() == DebugSubsectionKind::Symbols;
  }

  uint32_t calculateSerializedSize() const override;
  Error commit(BinaryStreamWriter &Writer) const override;

  void addSymbol(CVSymbol Symbol);

private:
  uint32_t Length = 0;
  std::vector<CVSymbol> Records;
};
}
}

#endif
PKhwFZ,�
�tt&DebugInfo/CodeView/TypeSymbolEmitter.hnu�[���//===- TypeSymbolEmitter.h --------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_CODEVIEW_TYPESYMBOLEMITTER_H
#define LLVM_DEBUGINFO_CODEVIEW_TYPESYMBOLEMITTER_H

namespace llvm {
class StringRef;

namespace codeview {
class TypeIndex;

class TypeSymbolEmitter {
private:
  TypeSymbolEmitter(const TypeSymbolEmitter &) = delete;
  TypeSymbolEmitter &operator=(const TypeSymbolEmitter &) = delete;

protected:
  TypeSymbolEmitter() {}

public:
  virtual ~TypeSymbolEmitter() {}

public:
  virtual void writeUserDefinedType(TypeIndex TI, StringRef Name) = 0;
};
}
}

#endif
PKhwFZ�G�+n+n!DebugInfo/CodeView/SymbolRecord.hnu�[���//===- SymbolRecord.h -------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_CODEVIEW_SYMBOLRECORD_H
#define LLVM_DEBUGINFO_CODEVIEW_SYMBOLRECORD_H

#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/DebugInfo/CodeView/CVRecord.h"
#include "llvm/DebugInfo/CodeView/CodeView.h"
#include "llvm/DebugInfo/CodeView/RecordSerialization.h"
#include "llvm/DebugInfo/CodeView/TypeIndex.h"
#include "llvm/Support/BinaryStreamArray.h"
#include "llvm/Support/Endian.h"
#include <cstdint>
#include <vector>

namespace llvm {
namespace codeview {

class SymbolRecord {
protected:
  explicit SymbolRecord(SymbolRecordKind Kind) : Kind(Kind) {}

public:
  SymbolRecordKind getKind() const { return Kind; }

  SymbolRecordKind Kind;
};

// S_GPROC32, S_LPROC32, S_GPROC32_ID, S_LPROC32_ID, S_LPROC32_DPC or
// S_LPROC32_DPC_ID
class ProcSym : public SymbolRecord {
  static constexpr uint32_t RelocationOffset = 32;

public:
  explicit ProcSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
  ProcSym(SymbolRecordKind Kind, uint32_t RecordOffset)
      : SymbolRecord(Kind), RecordOffset(RecordOffset) {}

  uint32_t getRelocationOffset() const {
    return RecordOffset + RelocationOffset;
  }

  uint32_t Parent = 0;
  uint32_t End = 0;
  uint32_t Next = 0;
  uint32_t CodeSize = 0;
  uint32_t DbgStart = 0;
  uint32_t DbgEnd = 0;
  TypeIndex FunctionType;
  uint32_t CodeOffset = 0;
  uint16_t Segment = 0;
  ProcSymFlags Flags = ProcSymFlags::None;
  StringRef Name;

  uint32_t RecordOffset = 0;
};

// S_THUNK32
class Thunk32Sym : public SymbolRecord {
public:
  explicit Thunk32Sym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
  Thunk32Sym(SymbolRecordKind Kind, uint32_t RecordOffset)
      : SymbolRecord(Kind), RecordOffset(RecordOffset) {}

  uint32_t Parent = 0;
  uint32_t End = 0;
  uint32_t Next = 0;
  uint32_t Offset = 0;
  uint16_t Segment = 0;
  uint16_t Length = 0;
  ThunkOrdinal Thunk = ThunkOrdinal::Standard;
  StringRef Name;
  ArrayRef<uint8_t> VariantData;

  uint32_t RecordOffset = 0;
};

// S_TRAMPOLINE
class TrampolineSym : public SymbolRecord {
public:
  explicit TrampolineSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
  TrampolineSym(SymbolRecordKind Kind, uint32_t RecordOffset)
      : SymbolRecord(Kind), RecordOffset(RecordOffset) {}

  TrampolineType Type;
  uint16_t Size = 0;
  uint32_t ThunkOffset = 0;
  uint32_t TargetOffset = 0;
  uint16_t ThunkSection = 0;
  uint16_t TargetSection = 0;

  uint32_t RecordOffset = 0;
};

// S_SECTION
class SectionSym : public SymbolRecord {
public:
  explicit SectionSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
  SectionSym(SymbolRecordKind Kind, uint32_t RecordOffset)
      : SymbolRecord(Kind), RecordOffset(RecordOffset) {}

  uint16_t SectionNumber = 0;
  uint8_t Alignment = 0;
  uint32_t Rva = 0;
  uint32_t Length = 0;
  uint32_t Characteristics = 0;
  StringRef Name;

  uint32_t RecordOffset = 0;
};

// S_COFFGROUP
class CoffGroupSym : public SymbolRecord {
public:
  explicit CoffGroupSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
  CoffGroupSym(SymbolRecordKind Kind, uint32_t RecordOffset)
      : SymbolRecord(Kind), RecordOffset(RecordOffset) {}

  uint32_t Size = 0;
  uint32_t Characteristics = 0;
  uint32_t Offset = 0;
  uint16_t Segment = 0;
  StringRef Name;

  uint32_t RecordOffset = 0;
};

class ScopeEndSym : public SymbolRecord {
public:
  explicit ScopeEndSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
  ScopeEndSym(SymbolRecordKind Kind, uint32_t RecordOffset)
      : SymbolRecord(Kind), RecordOffset(RecordOffset) {}

  uint32_t RecordOffset = 0;
};

class CallerSym : public SymbolRecord {
public:
  explicit CallerSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
  CallerSym(SymbolRecordKind Kind, uint32_t RecordOffset)
      : SymbolRecord(Kind), RecordOffset(RecordOffset) {}

  std::vector<TypeIndex> Indices;

  uint32_t RecordOffset = 0;
};

struct DecodedAnnotation {
  StringRef Name;
  ArrayRef<uint8_t> Bytes;
  BinaryAnnotationsOpCode OpCode = BinaryAnnotationsOpCode::Invalid;
  uint32_t U1 = 0;
  uint32_t U2 = 0;
  int32_t S1 = 0;
};

struct BinaryAnnotationIterator
    : public iterator_facade_base<BinaryAnnotationIterator,
                                  std::forward_iterator_tag,
                                  DecodedAnnotation> {
  BinaryAnnotationIterator() = default;
  BinaryAnnotationIterator(ArrayRef<uint8_t> Annotations) : Data(Annotations) {}
  BinaryAnnotationIterator(const BinaryAnnotationIterator &Other)
      : Data(Other.Data) {}

  bool operator==(BinaryAnnotationIterator Other) const {
    return Data == Other.Data;
  }

  BinaryAnnotationIterator &operator=(const BinaryAnnotationIterator Other) {
    Data = Other.Data;
    return *this;
  }

  BinaryAnnotationIterator &operator++() {
    if (!ParseCurrentAnnotation()) {
      *this = BinaryAnnotationIterator();
      return *this;
    }
    Data = Next;
    Next = ArrayRef<uint8_t>();
    Current.reset();
    return *this;
  }

  const DecodedAnnotation &operator*() {
    ParseCurrentAnnotation();
    return *Current;
  }

private:
  static uint32_t GetCompressedAnnotation(ArrayRef<uint8_t> &Annotations) {
    if (Annotations.empty())
      return -1;

    uint8_t FirstByte = Annotations.front();
    Annotations = Annotations.drop_front();

    if ((FirstByte & 0x80) == 0x00)
      return FirstByte;

    if (Annotations.empty())
      return -1;

    uint8_t SecondByte = Annotations.front();
    Annotations = Annotations.drop_front();

    if ((FirstByte & 0xC0) == 0x80)
      return ((FirstByte & 0x3F) << 8) | SecondByte;

    if (Annotations.empty())
      return -1;

    uint8_t ThirdByte = Annotations.front();
    Annotations = Annotations.drop_front();

    if (Annotations.empty())
      return -1;

    uint8_t FourthByte = Annotations.front();
    Annotations = Annotations.drop_front();

    if ((FirstByte & 0xE0) == 0xC0)
      return ((FirstByte & 0x1F) << 24) | (SecondByte << 16) |
             (ThirdByte << 8) | FourthByte;

    return -1;
  }

  static int32_t DecodeSignedOperand(uint32_t Operand) {
    if (Operand & 1)
      return -(Operand >> 1);
    return Operand >> 1;
  }

  static int32_t DecodeSignedOperand(ArrayRef<uint8_t> &Annotations) {
    return DecodeSignedOperand(GetCompressedAnnotation(Annotations));
  }

  bool ParseCurrentAnnotation() {
    if (Current)
      return true;

    Next = Data;
    uint32_t Op = GetCompressedAnnotation(Next);
    DecodedAnnotation Result;
    Result.OpCode = static_cast<BinaryAnnotationsOpCode>(Op);
    switch (Result.OpCode) {
    case BinaryAnnotationsOpCode::Invalid:
      Result.Name = "Invalid";
      Next = ArrayRef<uint8_t>();
      break;
    case BinaryAnnotationsOpCode::CodeOffset:
      Result.Name = "CodeOffset";
      Result.U1 = GetCompressedAnnotation(Next);
      break;
    case BinaryAnnotationsOpCode::ChangeCodeOffsetBase:
      Result.Name = "ChangeCodeOffsetBase";
      Result.U1 = GetCompressedAnnotation(Next);
      break;
    case BinaryAnnotationsOpCode::ChangeCodeOffset:
      Result.Name = "ChangeCodeOffset";
      Result.U1 = GetCompressedAnnotation(Next);
      break;
    case BinaryAnnotationsOpCode::ChangeCodeLength:
      Result.Name = "ChangeCodeLength";
      Result.U1 = GetCompressedAnnotation(Next);
      break;
    case BinaryAnnotationsOpCode::ChangeFile:
      Result.Name = "ChangeFile";
      Result.U1 = GetCompressedAnnotation(Next);
      break;
    case BinaryAnnotationsOpCode::ChangeLineEndDelta:
      Result.Name = "ChangeLineEndDelta";
      Result.U1 = GetCompressedAnnotation(Next);
      break;
    case BinaryAnnotationsOpCode::ChangeRangeKind:
      Result.Name = "ChangeRangeKind";
      Result.U1 = GetCompressedAnnotation(Next);
      break;
    case BinaryAnnotationsOpCode::ChangeColumnStart:
      Result.Name = "ChangeColumnStart";
      Result.U1 = GetCompressedAnnotation(Next);
      break;
    case BinaryAnnotationsOpCode::ChangeColumnEnd:
      Result.Name = "ChangeColumnEnd";
      Result.U1 = GetCompressedAnnotation(Next);
      break;
    case BinaryAnnotationsOpCode::ChangeLineOffset:
      Result.Name = "ChangeLineOffset";
      Result.S1 = DecodeSignedOperand(Next);
      break;
    case BinaryAnnotationsOpCode::ChangeColumnEndDelta:
      Result.Name = "ChangeColumnEndDelta";
      Result.S1 = DecodeSignedOperand(Next);
      break;
    case BinaryAnnotationsOpCode::ChangeCodeOffsetAndLineOffset: {
      Result.Name = "ChangeCodeOffsetAndLineOffset";
      uint32_t Annotation = GetCompressedAnnotation(Next);
      Result.S1 = DecodeSignedOperand(Annotation >> 4);
      Result.U1 = Annotation & 0xf;
      break;
    }
    case BinaryAnnotationsOpCode::ChangeCodeLengthAndCodeOffset: {
      Result.Name = "ChangeCodeLengthAndCodeOffset";
      Result.U1 = GetCompressedAnnotation(Next);
      Result.U2 = GetCompressedAnnotation(Next);
      break;
    }
    }
    Result.Bytes = Data.take_front(Data.size() - Next.size());
    Current = Result;
    return true;
  }

  std::optional<DecodedAnnotation> Current;
  ArrayRef<uint8_t> Data;
  ArrayRef<uint8_t> Next;
};

// S_INLINESITE
class InlineSiteSym : public SymbolRecord {
public:
  explicit InlineSiteSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
  explicit InlineSiteSym(uint32_t RecordOffset)
      : SymbolRecord(SymbolRecordKind::InlineSiteSym),
        RecordOffset(RecordOffset) {}

  iterator_range<BinaryAnnotationIterator> annotations() const {
    return make_range(BinaryAnnotationIterator(AnnotationData),
                      BinaryAnnotationIterator());
  }

  uint32_t Parent = 0;
  uint32_t End = 0;
  TypeIndex Inlinee;
  std::vector<uint8_t> AnnotationData;

  uint32_t RecordOffset = 0;
};

struct PublicSym32Header {
  ulittle32_t Flags;
  ulittle32_t Offset;
  ulittle16_t Segment;
  // char Name[];
};

// S_PUB32
class PublicSym32 : public SymbolRecord {
public:
  PublicSym32() : SymbolRecord(SymbolRecordKind::PublicSym32) {}
  explicit PublicSym32(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
  explicit PublicSym32(uint32_t RecordOffset)
      : SymbolRecord(SymbolRecordKind::PublicSym32),
        RecordOffset(RecordOffset) {}

  PublicSymFlags Flags = PublicSymFlags::None;
  uint32_t Offset = 0;
  uint16_t Segment = 0;
  StringRef Name;

  uint32_t RecordOffset = 0;
};

// S_REGISTER
class RegisterSym : public SymbolRecord {
public:
  explicit RegisterSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
  explicit RegisterSym(uint32_t RecordOffset)
      : SymbolRecord(SymbolRecordKind::RegisterSym),
        RecordOffset(RecordOffset) {}

  TypeIndex Index;
  RegisterId Register;
  StringRef Name;

  uint32_t RecordOffset = 0;
};

// S_PROCREF, S_LPROCREF
class ProcRefSym : public SymbolRecord {
public:
  explicit ProcRefSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
  explicit ProcRefSym(uint32_t RecordOffset)
      : SymbolRecord(SymbolRecordKind::ProcRefSym), RecordOffset(RecordOffset) {
  }

  uint32_t SumName = 0;
  uint32_t SymOffset = 0;
  uint16_t Module = 0;
  StringRef Name;

  uint16_t modi() const { return Module - 1; }
  uint32_t RecordOffset = 0;
};

// S_LOCAL
class LocalSym : public SymbolRecord {
public:
  explicit LocalSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
  explicit LocalSym(uint32_t RecordOffset)
      : SymbolRecord(SymbolRecordKind::LocalSym), RecordOffset(RecordOffset) {}

  TypeIndex Type;
  LocalSymFlags Flags = LocalSymFlags::None;
  StringRef Name;

  uint32_t RecordOffset = 0;
};

struct LocalVariableAddrRange {
  uint32_t OffsetStart = 0;
  uint16_t ISectStart = 0;
  uint16_t Range = 0;
};

struct LocalVariableAddrGap {
  uint16_t GapStartOffset = 0;
  uint16_t Range = 0;
};

enum : uint16_t { MaxDefRange = 0xf000 };

// S_DEFRANGE
class DefRangeSym : public SymbolRecord {
  static constexpr uint32_t RelocationOffset = 8;

public:
  explicit DefRangeSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
  explicit DefRangeSym(uint32_t RecordOffset)
      : SymbolRecord(SymbolRecordKind::DefRangeSym),
        RecordOffset(RecordOffset) {}

  uint32_t getRelocationOffset() const {
    return RecordOffset + RelocationOffset;
  }

  uint32_t Program = 0;
  LocalVariableAddrRange Range;
  std::vector<LocalVariableAddrGap> Gaps;

  uint32_t RecordOffset = 0;
};

// S_DEFRANGE_SUBFIELD
class DefRangeSubfieldSym : public SymbolRecord {
  static constexpr uint32_t RelocationOffset = 12;

public:
  explicit DefRangeSubfieldSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
  explicit DefRangeSubfieldSym(uint32_t RecordOffset)
      : SymbolRecord(SymbolRecordKind::DefRangeSubfieldSym),
        RecordOffset(RecordOffset) {}

  uint32_t getRelocationOffset() const {
    return RecordOffset + RelocationOffset;
  }

  uint32_t Program = 0;
  uint16_t OffsetInParent = 0;
  LocalVariableAddrRange Range;
  std::vector<LocalVariableAddrGap> Gaps;

  uint32_t RecordOffset = 0;
};

struct DefRangeRegisterHeader {
  ulittle16_t Register;
  ulittle16_t MayHaveNoName;
};

// S_DEFRANGE_REGISTER
class DefRangeRegisterSym : public SymbolRecord {
public:
  explicit DefRangeRegisterSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
  explicit DefRangeRegisterSym(uint32_t RecordOffset)
      : SymbolRecord(SymbolRecordKind::DefRangeRegisterSym),
        RecordOffset(RecordOffset) {}

  uint32_t getRelocationOffset() const { return RecordOffset + sizeof(DefRangeRegisterHeader); }

  DefRangeRegisterHeader Hdr;
  LocalVariableAddrRange Range;
  std::vector<LocalVariableAddrGap> Gaps;

  uint32_t RecordOffset = 0;
};

struct DefRangeSubfieldRegisterHeader {
  ulittle16_t Register;
  ulittle16_t MayHaveNoName;
  ulittle32_t OffsetInParent;
};

// S_DEFRANGE_SUBFIELD_REGISTER
class DefRangeSubfieldRegisterSym : public SymbolRecord {
public:
  explicit DefRangeSubfieldRegisterSym(SymbolRecordKind Kind)
      : SymbolRecord(Kind) {}
  explicit DefRangeSubfieldRegisterSym(uint32_t RecordOffset)
      : SymbolRecord(SymbolRecordKind::DefRangeSubfieldRegisterSym),
        RecordOffset(RecordOffset) {}

  uint32_t getRelocationOffset() const { return RecordOffset + sizeof(DefRangeSubfieldRegisterHeader); }

  DefRangeSubfieldRegisterHeader Hdr;
  LocalVariableAddrRange Range;
  std::vector<LocalVariableAddrGap> Gaps;

  uint32_t RecordOffset = 0;
};

struct DefRangeFramePointerRelHeader {
  little32_t Offset;
};

// S_DEFRANGE_FRAMEPOINTER_REL
class DefRangeFramePointerRelSym : public SymbolRecord {
  static constexpr uint32_t RelocationOffset = 8;

public:
  explicit DefRangeFramePointerRelSym(SymbolRecordKind Kind)
      : SymbolRecord(Kind) {}
  explicit DefRangeFramePointerRelSym(uint32_t RecordOffset)
      : SymbolRecord(SymbolRecordKind::DefRangeFramePointerRelSym),
        RecordOffset(RecordOffset) {}

  uint32_t getRelocationOffset() const {
    return RecordOffset + RelocationOffset;
  }

  DefRangeFramePointerRelHeader Hdr;
  LocalVariableAddrRange Range;
  std::vector<LocalVariableAddrGap> Gaps;

  uint32_t RecordOffset = 0;
};

struct DefRangeRegisterRelHeader {
  ulittle16_t Register;
  ulittle16_t Flags;
  little32_t BasePointerOffset;
};

// S_DEFRANGE_REGISTER_REL
class DefRangeRegisterRelSym : public SymbolRecord {
public:
  explicit DefRangeRegisterRelSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
  explicit DefRangeRegisterRelSym(uint32_t RecordOffset)
      : SymbolRecord(SymbolRecordKind::DefRangeRegisterRelSym),
        RecordOffset(RecordOffset) {}

  // The flags implement this notional bitfield:
  //   uint16_t IsSubfield : 1;
  //   uint16_t Padding : 3;
  //   uint16_t OffsetInParent : 12;
  enum : uint16_t {
    IsSubfieldFlag = 1,
    OffsetInParentShift = 4,
  };

  bool hasSpilledUDTMember() const { return Hdr.Flags & IsSubfieldFlag; }
  uint16_t offsetInParent() const { return Hdr.Flags >> OffsetInParentShift; }

  uint32_t getRelocationOffset() const { return RecordOffset + sizeof(DefRangeRegisterRelHeader); }

  DefRangeRegisterRelHeader Hdr;
  LocalVariableAddrRange Range;
  std::vector<LocalVariableAddrGap> Gaps;

  uint32_t RecordOffset = 0;
};

// S_DEFRANGE_FRAMEPOINTER_REL_FULL_SCOPE
class DefRangeFramePointerRelFullScopeSym : public SymbolRecord {
public:
  explicit DefRangeFramePointerRelFullScopeSym(SymbolRecordKind Kind)
      : SymbolRecord(Kind) {}
  explicit DefRangeFramePointerRelFullScopeSym(uint32_t RecordOffset)
      : SymbolRecord(SymbolRecordKind::DefRangeFramePointerRelFullScopeSym),
        RecordOffset(RecordOffset) {}

  int32_t Offset = 0;

  uint32_t RecordOffset = 0;
};

// S_BLOCK32
class BlockSym : public SymbolRecord {
  static constexpr uint32_t RelocationOffset = 16;

public:
  explicit BlockSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
  explicit BlockSym(uint32_t RecordOffset)
      : SymbolRecord(SymbolRecordKind::BlockSym), RecordOffset(RecordOffset) {}

  uint32_t getRelocationOffset() const {
    return RecordOffset + RelocationOffset;
  }

  uint32_t Parent = 0;
  uint32_t End = 0;
  uint32_t CodeSize = 0;
  uint32_t CodeOffset = 0;
  uint16_t Segment = 0;
  StringRef Name;

  uint32_t RecordOffset = 0;
};

// S_LABEL32
class LabelSym : public SymbolRecord {
  static constexpr uint32_t RelocationOffset = 4;

public:
  explicit LabelSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
  explicit LabelSym(uint32_t RecordOffset)
      : SymbolRecord(SymbolRecordKind::LabelSym), RecordOffset(RecordOffset) {}

  uint32_t getRelocationOffset() const {
    return RecordOffset + RelocationOffset;
  }

  uint32_t CodeOffset = 0;
  uint16_t Segment = 0;
  ProcSymFlags Flags = ProcSymFlags::None;
  StringRef Name;

  uint32_t RecordOffset = 0;
};

// S_OBJNAME
class ObjNameSym : public SymbolRecord {
public:
  explicit ObjNameSym() : SymbolRecord(SymbolRecordKind::ObjNameSym) {}
  explicit ObjNameSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
  explicit ObjNameSym(uint32_t RecordOffset)
      : SymbolRecord(SymbolRecordKind::ObjNameSym), RecordOffset(RecordOffset) {
  }

  uint32_t Signature = 0;
  StringRef Name;

  uint32_t RecordOffset = 0;
};

// S_ENVBLOCK
class EnvBlockSym : public SymbolRecord {
public:
  explicit EnvBlockSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
  explicit EnvBlockSym(uint32_t RecordOffset)
      : SymbolRecord(SymbolRecordKind::EnvBlockSym),
        RecordOffset(RecordOffset) {}

  std::vector<StringRef> Fields;

  uint32_t RecordOffset = 0;
};

// S_EXPORT
class ExportSym : public SymbolRecord {
public:
  explicit ExportSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
  explicit ExportSym(uint32_t RecordOffset)
      : SymbolRecord(SymbolRecordKind::ExportSym), RecordOffset(RecordOffset) {}

  uint16_t Ordinal = 0;
  ExportFlags Flags = ExportFlags::None;
  StringRef Name;

  uint32_t RecordOffset = 0;
};

// S_FILESTATIC
class FileStaticSym : public SymbolRecord {
public:
  explicit FileStaticSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
  explicit FileStaticSym(uint32_t RecordOffset)
      : SymbolRecord(SymbolRecordKind::FileStaticSym),
        RecordOffset(RecordOffset) {}

  TypeIndex Index;
  uint32_t ModFilenameOffset = 0;
  LocalSymFlags Flags = LocalSymFlags::None;
  StringRef Name;

  uint32_t RecordOffset = 0;
};

// S_COMPILE2
class Compile2Sym : public SymbolRecord {
public:
  explicit Compile2Sym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
  explicit Compile2Sym(uint32_t RecordOffset)
      : SymbolRecord(SymbolRecordKind::Compile2Sym),
        RecordOffset(RecordOffset) {}

  CompileSym2Flags Flags = CompileSym2Flags::None;
  CPUType Machine;
  uint16_t VersionFrontendMajor = 0;
  uint16_t VersionFrontendMinor = 0;
  uint16_t VersionFrontendBuild = 0;
  uint16_t VersionBackendMajor = 0;
  uint16_t VersionBackendMinor = 0;
  uint16_t VersionBackendBuild = 0;
  StringRef Version;
  std::vector<StringRef> ExtraStrings;

  uint8_t getLanguage() const { return static_cast<uint32_t>(Flags) & 0xFF; }
  uint32_t getFlags() const { return static_cast<uint32_t>(Flags) & ~0xFF; }

  uint32_t RecordOffset = 0;
};

// S_COMPILE3
class Compile3Sym : public SymbolRecord {
public:
  Compile3Sym() : SymbolRecord(SymbolRecordKind::Compile3Sym) {}
  explicit Compile3Sym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
  explicit Compile3Sym(uint32_t RecordOffset)
      : SymbolRecord(SymbolRecordKind::Compile3Sym),
        RecordOffset(RecordOffset) {}

  CompileSym3Flags Flags = CompileSym3Flags::None;
  CPUType Machine;
  uint16_t VersionFrontendMajor = 0;
  uint16_t VersionFrontendMinor = 0;
  uint16_t VersionFrontendBuild = 0;
  uint16_t VersionFrontendQFE = 0;
  uint16_t VersionBackendMajor = 0;
  uint16_t VersionBackendMinor = 0;
  uint16_t VersionBackendBuild = 0;
  uint16_t VersionBackendQFE = 0;
  StringRef Version;

  void setLanguage(SourceLanguage Lang) {
    Flags = CompileSym3Flags((uint32_t(Flags) & 0xFFFFFF00) | uint32_t(Lang));
  }

  SourceLanguage getLanguage() const {
    return static_cast<SourceLanguage>(static_cast<uint32_t>(Flags) & 0xFF);
  }
  CompileSym3Flags getFlags() const {
    return static_cast<CompileSym3Flags>(static_cast<uint32_t>(Flags) & ~0xFF);
  }

  bool hasOptimizations() const {
    return CompileSym3Flags::None !=
           (getFlags() & (CompileSym3Flags::PGO | CompileSym3Flags::LTCG));
  }

  uint32_t RecordOffset = 0;
};

// S_FRAMEPROC
class FrameProcSym : public SymbolRecord {
public:
  explicit FrameProcSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
  explicit FrameProcSym(uint32_t RecordOffset)
      : SymbolRecord(SymbolRecordKind::FrameProcSym),
        RecordOffset(RecordOffset) {}

  uint32_t TotalFrameBytes = 0;
  uint32_t PaddingFrameBytes = 0;
  uint32_t OffsetToPadding = 0;
  uint32_t BytesOfCalleeSavedRegisters = 0;
  uint32_t OffsetOfExceptionHandler = 0;
  uint16_t SectionIdOfExceptionHandler = 0;
  FrameProcedureOptions Flags = FrameProcedureOptions::None;

  /// Extract the register this frame uses to refer to local variables.
  RegisterId getLocalFramePtrReg(CPUType CPU) const {
    return decodeFramePtrReg(
        EncodedFramePtrReg((uint32_t(Flags) >> 14U) & 0x3U), CPU);
  }

  /// Extract the register this frame uses to refer to parameters.
  RegisterId getParamFramePtrReg(CPUType CPU) const {
    return decodeFramePtrReg(
        EncodedFramePtrReg((uint32_t(Flags) >> 16U) & 0x3U), CPU);
  }

  uint32_t RecordOffset = 0;

private:
};

// S_CALLSITEINFO
class CallSiteInfoSym : public SymbolRecord {
  static constexpr uint32_t RelocationOffset = 4;

public:
  explicit CallSiteInfoSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
  explicit CallSiteInfoSym(uint32_t RecordOffset)
      : SymbolRecord(SymbolRecordKind::CallSiteInfoSym) {}

  uint32_t getRelocationOffset() const {
    return RecordOffset + RelocationOffset;
  }

  uint32_t CodeOffset = 0;
  uint16_t Segment = 0;
  TypeIndex Type;

  uint32_t RecordOffset = 0;
};

// S_HEAPALLOCSITE
class HeapAllocationSiteSym : public SymbolRecord {
  static constexpr uint32_t RelocationOffset = 4;

public:
  explicit HeapAllocationSiteSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
  explicit HeapAllocationSiteSym(uint32_t RecordOffset)
      : SymbolRecord(SymbolRecordKind::HeapAllocationSiteSym),
        RecordOffset(RecordOffset) {}

  uint32_t getRelocationOffset() const {
    return RecordOffset + RelocationOffset;
  }

  uint32_t CodeOffset = 0;
  uint16_t Segment = 0;
  uint16_t CallInstructionSize = 0;
  TypeIndex Type;

  uint32_t RecordOffset = 0;
};

// S_FRAMECOOKIE
class FrameCookieSym : public SymbolRecord {
  static constexpr uint32_t RelocationOffset = 4;

public:
  explicit FrameCookieSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
  explicit FrameCookieSym(uint32_t RecordOffset)
      : SymbolRecord(SymbolRecordKind::FrameCookieSym) {}

  uint32_t getRelocationOffset() const {
    return RecordOffset + RelocationOffset;
  }

  uint32_t CodeOffset = 0;
  uint16_t Register = 0;
  FrameCookieKind CookieKind;
  uint8_t Flags = 0;

  uint32_t RecordOffset = 0;
};

// S_UDT, S_COBOLUDT
class UDTSym : public SymbolRecord {
public:
  explicit UDTSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
  explicit UDTSym(uint32_t RecordOffset)
      : SymbolRecord(SymbolRecordKind::UDTSym) {}

  TypeIndex Type;
  StringRef Name;

  uint32_t RecordOffset = 0;
};

// S_BUILDINFO
class BuildInfoSym : public SymbolRecord {
public:
  explicit BuildInfoSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
  explicit BuildInfoSym(uint32_t RecordOffset)
      : SymbolRecord(SymbolRecordKind::BuildInfoSym),
        RecordOffset(RecordOffset) {}

  TypeIndex BuildId;

  uint32_t RecordOffset = 0;
};

// S_BPREL32
class BPRelativeSym : public SymbolRecord {
public:
  explicit BPRelativeSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
  explicit BPRelativeSym(uint32_t RecordOffset)
      : SymbolRecord(SymbolRecordKind::BPRelativeSym),
        RecordOffset(RecordOffset) {}

  int32_t Offset = 0;
  TypeIndex Type;
  StringRef Name;

  uint32_t RecordOffset = 0;
};

// S_REGREL32
class RegRelativeSym : public SymbolRecord {
public:
  explicit RegRelativeSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
  explicit RegRelativeSym(uint32_t RecordOffset)
      : SymbolRecord(SymbolRecordKind::RegRelativeSym),
        RecordOffset(RecordOffset) {}

  uint32_t Offset = 0;
  TypeIndex Type;
  RegisterId Register;
  StringRef Name;

  uint32_t RecordOffset = 0;
};

// S_CONSTANT, S_MANCONSTANT
class ConstantSym : public SymbolRecord {
public:
  explicit ConstantSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
  explicit ConstantSym(uint32_t RecordOffset)
      : SymbolRecord(SymbolRecordKind::ConstantSym),
        RecordOffset(RecordOffset) {}

  TypeIndex Type;
  APSInt Value;
  StringRef Name;

  uint32_t RecordOffset = 0;
};

// S_LDATA32, S_GDATA32, S_LMANDATA, S_GMANDATA
class DataSym : public SymbolRecord {
  static constexpr uint32_t RelocationOffset = 8;

public:
  explicit DataSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
  explicit DataSym(uint32_t RecordOffset)
      : SymbolRecord(SymbolRecordKind::DataSym), RecordOffset(RecordOffset) {}

  uint32_t getRelocationOffset() const {
    return RecordOffset + RelocationOffset;
  }

  TypeIndex Type;
  uint32_t DataOffset = 0;
  uint16_t Segment = 0;
  StringRef Name;

  uint32_t RecordOffset = 0;
};

// S_LTHREAD32, S_GTHREAD32
class ThreadLocalDataSym : public SymbolRecord {
  static constexpr uint32_t RelocationOffset = 8;

public:
  explicit ThreadLocalDataSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
  explicit ThreadLocalDataSym(uint32_t RecordOffset)
      : SymbolRecord(SymbolRecordKind::ThreadLocalDataSym),
        RecordOffset(RecordOffset) {}

  uint32_t getRelocationOffset() const {
    return RecordOffset + RelocationOffset;
  }

  TypeIndex Type;
  uint32_t DataOffset = 0;
  uint16_t Segment = 0;
  StringRef Name;

  uint32_t RecordOffset = 0;
};

// S_UNAMESPACE
class UsingNamespaceSym : public SymbolRecord {
public:
  explicit UsingNamespaceSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
  explicit UsingNamespaceSym(uint32_t RecordOffset)
      : SymbolRecord(SymbolRecordKind::UsingNamespaceSym),
        RecordOffset(RecordOffset) {}

  StringRef Name;

  uint32_t RecordOffset = 0;
};

// S_ANNOTATION
class AnnotationSym : public SymbolRecord {
public:
  explicit AnnotationSym(SymbolRecordKind Kind) : SymbolRecord(Kind) {}
  explicit AnnotationSym(uint32_t RecordOffset)
      : SymbolRecord(SymbolRecordKind::AnnotationSym),
        RecordOffset(RecordOffset) {}

  uint32_t CodeOffset = 0;
  uint16_t Segment = 0;
  std::vector<StringRef> Strings;

  uint32_t RecordOffset = 0;
};

Expected<CVSymbol> readSymbolFromStream(BinaryStreamRef Stream,
                                        uint32_t Offset);

} // end namespace codeview
} // end namespace llvm

#endif // LLVM_DEBUGINFO_CODEVIEW_SYMBOLRECORD_H
PKhwFZ�jv�DebugInfo/CodeView/RecordName.hnu�[���//===- RecordName.h ------------------------------------------- *- C++ --*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_CODEVIEW_RECORDNAME_H
#define LLVM_DEBUGINFO_CODEVIEW_RECORDNAME_H

#include "llvm/ADT/StringRef.h"
#include "llvm/DebugInfo/CodeView/CVRecord.h"
#include <string>

namespace llvm {
namespace codeview {
class TypeCollection;
class TypeIndex;
std::string computeTypeName(TypeCollection &Types, TypeIndex Index);
StringRef getSymbolName(CVSymbol Sym);
} // namespace codeview
} // namespace llvm

#endif
PKhwFZg���.DebugInfo/CodeView/AppendingTypeTableBuilder.hnu�[���//===- AppendingTypeTableBuilder.h -------------------------------*- C++-*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_CODEVIEW_APPENDINGTYPETABLEBUILDER_H
#define LLVM_DEBUGINFO_CODEVIEW_APPENDINGTYPETABLEBUILDER_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/DebugInfo/CodeView/CVRecord.h"
#include "llvm/DebugInfo/CodeView/SimpleTypeSerializer.h"
#include "llvm/DebugInfo/CodeView/TypeCollection.h"
#include "llvm/DebugInfo/CodeView/TypeIndex.h"
#include "llvm/Support/Allocator.h"
#include <cstdint>

namespace llvm {
namespace codeview {

class ContinuationRecordBuilder;

class AppendingTypeTableBuilder : public TypeCollection {

  BumpPtrAllocator &RecordStorage;
  SimpleTypeSerializer SimpleSerializer;

  /// Contains a list of all records indexed by TypeIndex.toArrayIndex().
  SmallVector<ArrayRef<uint8_t>, 2> SeenRecords;

public:
  explicit AppendingTypeTableBuilder(BumpPtrAllocator &Storage);
  ~AppendingTypeTableBuilder();

  // TypeCollection overrides
  std::optional<TypeIndex> getFirst() override;
  std::optional<TypeIndex> getNext(TypeIndex Prev) override;
  CVType getType(TypeIndex Index) override;
  StringRef getTypeName(TypeIndex Index) override;
  bool contains(TypeIndex Index) override;
  uint32_t size() override;
  uint32_t capacity() override;
  bool replaceType(TypeIndex &Index, CVType Data, bool Stabilize) override;

  // public interface
  void reset();
  TypeIndex nextTypeIndex() const;

  BumpPtrAllocator &getAllocator() { return RecordStorage; }

  ArrayRef<ArrayRef<uint8_t>> records() const;
  TypeIndex insertRecordBytes(ArrayRef<uint8_t> &Record);
  TypeIndex insertRecord(ContinuationRecordBuilder &Builder);

  template <typename T> TypeIndex writeLeafType(T &Record) {
    ArrayRef<uint8_t> Data = SimpleSerializer.serialize(Record);
    return insertRecordBytes(Data);
  }
};

} // end namespace codeview
} // end namespace llvm

#endif // LLVM_DEBUGINFO_CODEVIEW_APPENDINGTYPETABLEBUILDER_H
PKhwFZ#N�6��%DebugInfo/CodeView/TypeDeserializer.hnu�[���//===- TypeDeserializer.h ---------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_CODEVIEW_TYPEDESERIALIZER_H
#define LLVM_DEBUGINFO_CODEVIEW_TYPEDESERIALIZER_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/DebugInfo/CodeView/CodeView.h"
#include "llvm/DebugInfo/CodeView/TypeRecord.h"
#include "llvm/DebugInfo/CodeView/TypeRecordMapping.h"
#include "llvm/DebugInfo/CodeView/TypeVisitorCallbacks.h"
#include "llvm/Support/BinaryByteStream.h"
#include "llvm/Support/BinaryStreamReader.h"
#include "llvm/Support/Error.h"
#include <cassert>
#include <cstdint>
#include <memory>

namespace llvm {
namespace codeview {

class TypeDeserializer : public TypeVisitorCallbacks {
  struct MappingInfo {
    explicit MappingInfo(ArrayRef<uint8_t> RecordData)
        : Stream(RecordData, llvm::support::little), Reader(Stream),
          Mapping(Reader) {}

    BinaryByteStream Stream;
    BinaryStreamReader Reader;
    TypeRecordMapping Mapping;
  };

public:
  TypeDeserializer() = default;

  template <typename T> static Error deserializeAs(CVType &CVT, T &Record) {
    Record.Kind = static_cast<TypeRecordKind>(CVT.kind());
    MappingInfo I(CVT.content());
    if (auto EC = I.Mapping.visitTypeBegin(CVT))
      return EC;
    if (auto EC = I.Mapping.visitKnownRecord(CVT, Record))
      return EC;
    if (auto EC = I.Mapping.visitTypeEnd(CVT))
      return EC;
    return Error::success();
  }

  template <typename T>
  static Expected<T> deserializeAs(ArrayRef<uint8_t> Data) {
    const RecordPrefix *Prefix =
        reinterpret_cast<const RecordPrefix *>(Data.data());
    TypeRecordKind K =
        static_cast<TypeRecordKind>(uint16_t(Prefix->RecordKind));
    T Record(K);
    CVType CVT(Data);
    if (auto EC = deserializeAs<T>(CVT, Record))
      return std::move(EC);
    return Record;
  }

  Error visitTypeBegin(CVType &Record) override {
    assert(!Mapping && "Already in a type mapping!");
    Mapping = std::make_unique<MappingInfo>(Record.content());
    return Mapping->Mapping.visitTypeBegin(Record);
  }

  Error visitTypeBegin(CVType &Record, TypeIndex Index) override {
    return visitTypeBegin(Record);
  }

  Error visitTypeEnd(CVType &Record) override {
    assert(Mapping && "Not in a type mapping!");
    auto EC = Mapping->Mapping.visitTypeEnd(Record);
    Mapping.reset();
    return EC;
  }

#define TYPE_RECORD(EnumName, EnumVal, Name)                                   \
  Error visitKnownRecord(CVType &CVR, Name##Record &Record) override {         \
    return visitKnownRecordImpl<Name##Record>(CVR, Record);                    \
  }
#define MEMBER_RECORD(EnumName, EnumVal, Name)
#define TYPE_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
#define MEMBER_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
#include "llvm/DebugInfo/CodeView/CodeViewTypes.def"

private:
  template <typename RecordType>
  Error visitKnownRecordImpl(CVType &CVR, RecordType &Record) {
    return Mapping->Mapping.visitKnownRecord(CVR, Record);
  }

  std::unique_ptr<MappingInfo> Mapping;
};

class FieldListDeserializer : public TypeVisitorCallbacks {
  struct MappingInfo {
    explicit MappingInfo(BinaryStreamReader &R)
        : Reader(R), Mapping(Reader), StartOffset(0) {}

    BinaryStreamReader &Reader;
    TypeRecordMapping Mapping;
    uint32_t StartOffset;
  };

public:
  explicit FieldListDeserializer(BinaryStreamReader &Reader) : Mapping(Reader) {
    RecordPrefix Pre(static_cast<uint16_t>(TypeLeafKind::LF_FIELDLIST));
    CVType FieldList(&Pre, sizeof(Pre));
    consumeError(Mapping.Mapping.visitTypeBegin(FieldList));
  }

  ~FieldListDeserializer() override {
    RecordPrefix Pre(static_cast<uint16_t>(TypeLeafKind::LF_FIELDLIST));
    CVType FieldList(&Pre, sizeof(Pre));
    consumeError(Mapping.Mapping.visitTypeEnd(FieldList));
  }

  Error visitMemberBegin(CVMemberRecord &Record) override {
    Mapping.StartOffset = Mapping.Reader.getOffset();
    return Mapping.Mapping.visitMemberBegin(Record);
  }

  Error visitMemberEnd(CVMemberRecord &Record) override {
    if (auto EC = Mapping.Mapping.visitMemberEnd(Record))
      return EC;
    return Error::success();
  }

#define TYPE_RECORD(EnumName, EnumVal, Name)
#define MEMBER_RECORD(EnumName, EnumVal, Name)                                 \
  Error visitKnownMember(CVMemberRecord &CVR, Name##Record &Record) override { \
    return visitKnownMemberImpl<Name##Record>(CVR, Record);                    \
  }
#define TYPE_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
#define MEMBER_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
#include "llvm/DebugInfo/CodeView/CodeViewTypes.def"

private:
  template <typename RecordType>
  Error visitKnownMemberImpl(CVMemberRecord &CVR, RecordType &Record) {
    if (auto EC = Mapping.Mapping.visitKnownMember(CVR, Record))
      return EC;

    uint32_t EndOffset = Mapping.Reader.getOffset();
    uint32_t RecordLength = EndOffset - Mapping.StartOffset;
    Mapping.Reader.setOffset(Mapping.StartOffset);
    if (auto EC = Mapping.Reader.readBytes(CVR.Data, RecordLength))
      return EC;
    assert(Mapping.Reader.getOffset() == EndOffset);
    return Error::success();
  }
  MappingInfo Mapping;
};

} // end namespace codeview
} // end namespace llvm

#endif // LLVM_DEBUGINFO_CODEVIEW_TYPEDESERIALIZER_H
PKhwFZ_�22!DebugInfo/CodeView/SymbolDumper.hnu�[���//===-- SymbolDumper.h - CodeView symbol info dumper ------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_CODEVIEW_SYMBOLDUMPER_H
#define LLVM_DEBUGINFO_CODEVIEW_SYMBOLDUMPER_H

#include "llvm/DebugInfo/CodeView/CVRecord.h"
#include "llvm/DebugInfo/CodeView/CodeView.h"
#include "llvm/DebugInfo/CodeView/SymbolDumpDelegate.h"
#include "llvm/Support/Error.h"

#include <memory>
#include <utility>

namespace llvm {
class ScopedPrinter;

namespace codeview {
class TypeCollection;

/// Dumper for CodeView symbol streams found in COFF object files and PDB files.
class CVSymbolDumper {
public:
  CVSymbolDumper(ScopedPrinter &W, TypeCollection &Types,
                 CodeViewContainer Container,
                 std::unique_ptr<SymbolDumpDelegate> ObjDelegate, CPUType CPU,
                 bool PrintRecordBytes)
      : W(W), Types(Types), Container(Container),
        ObjDelegate(std::move(ObjDelegate)), CompilationCPUType(CPU),
        PrintRecordBytes(PrintRecordBytes) {}

  /// Dumps one type record.  Returns false if there was a type parsing error,
  /// and true otherwise.  This should be called in order, since the dumper
  /// maintains state about previous records which are necessary for cross
  /// type references.
  Error dump(CVRecord<SymbolKind> &Record);

  /// Dumps the type records in Data. Returns false if there was a type stream
  /// parse error, and true otherwise.
  Error dump(const CVSymbolArray &Symbols);

  CPUType getCompilationCPUType() const { return CompilationCPUType; }

private:
  ScopedPrinter &W;
  TypeCollection &Types;
  CodeViewContainer Container;
  std::unique_ptr<SymbolDumpDelegate> ObjDelegate;
  CPUType CompilationCPUType;
  bool PrintRecordBytes;
};
} // end namespace codeview
} // end namespace llvm

#endif // LLVM_DEBUGINFO_CODEVIEW_SYMBOLDUMPER_H
PKhwFZ[n���DebugInfo/CodeView/Formatters.hnu�[���//===- Formatters.h ---------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_CODEVIEW_FORMATTERS_H
#define LLVM_DEBUGINFO_CODEVIEW_FORMATTERS_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/DebugInfo/CodeView/GUID.h"
#include "llvm/DebugInfo/CodeView/TypeIndex.h"
#include "llvm/Support/FormatAdapters.h"
#include "llvm/Support/FormatVariadic.h"
#include "llvm/Support/raw_ostream.h"
#include <cstdint>

namespace llvm {

namespace codeview {

struct GUID;

namespace detail {

class GuidAdapter final : public FormatAdapter<ArrayRef<uint8_t>> {
  ArrayRef<uint8_t> Guid;

public:
  explicit GuidAdapter(ArrayRef<uint8_t> Guid);
  explicit GuidAdapter(StringRef Guid);

  void format(raw_ostream &Stream, StringRef Style) override;
};

} // end namespace detail

inline detail::GuidAdapter fmt_guid(StringRef Item) {
  return detail::GuidAdapter(Item);
}

inline detail::GuidAdapter fmt_guid(ArrayRef<uint8_t> Item) {
  return detail::GuidAdapter(Item);
}

} // end namespace codeview

template <> struct format_provider<codeview::TypeIndex> {
public:
  static void format(const codeview::TypeIndex &V, raw_ostream &Stream,
                     StringRef Style) {
    if (V.isNoneType())
      Stream << "<no type>";
    else {
      Stream << formatv("{0:X+4}", V.getIndex());
      if (V.isSimple())
        Stream << " (" << codeview::TypeIndex::simpleTypeName(V) << ")";
    }
  }
};

template <> struct format_provider<codeview::GUID> {
  static void format(const codeview::GUID &V, llvm::raw_ostream &Stream,
                     StringRef Style) {
    Stream << V;
  }
};

} // end namespace llvm

#endif // LLVM_DEBUGINFO_CODEVIEW_FORMATTERS_H
PKhwFZ�a�)DebugInfo/CodeView/DebugLinesSubsection.hnu�[���//===- DebugLinesSubsection.h -----------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_CODEVIEW_DEBUGLINESSUBSECTION_H
#define LLVM_DEBUGINFO_CODEVIEW_DEBUGLINESSUBSECTION_H

#include "llvm/ADT/StringRef.h"
#include "llvm/DebugInfo/CodeView/CodeView.h"
#include "llvm/DebugInfo/CodeView/DebugSubsection.h"
#include "llvm/DebugInfo/CodeView/Line.h"
#include "llvm/Support/BinaryStreamArray.h"
#include "llvm/Support/BinaryStreamRef.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Error.h"
#include <cstdint>
#include <vector>

namespace llvm {
class BinaryStreamReader;
class BinaryStreamWriter;
namespace codeview {

class DebugChecksumsSubsection;
class DebugStringTableSubsection;

// Corresponds to the `CV_DebugSLinesHeader_t` structure.
struct LineFragmentHeader {
  support::ulittle32_t RelocOffset;  // Code offset of line contribution.
  support::ulittle16_t RelocSegment; // Code segment of line contribution.
  support::ulittle16_t Flags;        // See LineFlags enumeration.
  support::ulittle32_t CodeSize;     // Code size of this line contribution.
};

// Corresponds to the `CV_DebugSLinesFileBlockHeader_t` structure.
struct LineBlockFragmentHeader {
  support::ulittle32_t NameIndex; // Offset of FileChecksum entry in File
                                  // checksums buffer.  The checksum entry then
                                  // contains another offset into the string
                                  // table of the actual name.
  support::ulittle32_t NumLines;  // Number of lines
  support::ulittle32_t BlockSize; // Code size of block, in bytes.
  // The following two variable length arrays appear immediately after the
  // header.  The structure definitions follow.
  // LineNumberEntry   Lines[NumLines];
  // ColumnNumberEntry Columns[NumLines];
};

// Corresponds to `CV_Line_t` structure
struct LineNumberEntry {
  support::ulittle32_t Offset; // Offset to start of code bytes for line number
  support::ulittle32_t Flags;  // Start:24, End:7, IsStatement:1
};

// Corresponds to `CV_Column_t` structure
struct ColumnNumberEntry {
  support::ulittle16_t StartColumn;
  support::ulittle16_t EndColumn;
};

struct LineColumnEntry {
  support::ulittle32_t NameIndex;
  FixedStreamArray<LineNumberEntry> LineNumbers;
  FixedStreamArray<ColumnNumberEntry> Columns;
};

class LineColumnExtractor {
public:
  Error operator()(BinaryStreamRef Stream, uint32_t &Len,
                   LineColumnEntry &Item);

  const LineFragmentHeader *Header = nullptr;
};

class DebugLinesSubsectionRef final : public DebugSubsectionRef {
  friend class LineColumnExtractor;

  using LineInfoArray = VarStreamArray<LineColumnEntry, LineColumnExtractor>;
  using Iterator = LineInfoArray::Iterator;

public:
  DebugLinesSubsectionRef();

  static bool classof(const DebugSubsectionRef *S) {
    return S->kind() == DebugSubsectionKind::Lines;
  }

  Error initialize(BinaryStreamReader Reader);

  Iterator begin() const { return LinesAndColumns.begin(); }
  Iterator end() const { return LinesAndColumns.end(); }

  const LineFragmentHeader *header() const { return Header; }

  bool hasColumnInfo() const;

private:
  const LineFragmentHeader *Header = nullptr;
  LineInfoArray LinesAndColumns;
};

class DebugLinesSubsection final : public DebugSubsection {
  struct Block {
    Block(uint32_t ChecksumBufferOffset)
        : ChecksumBufferOffset(ChecksumBufferOffset) {}

    uint32_t ChecksumBufferOffset;
    std::vector<LineNumberEntry> Lines;
    std::vector<ColumnNumberEntry> Columns;
  };

public:
  DebugLinesSubsection(DebugChecksumsSubsection &Checksums,
                       DebugStringTableSubsection &Strings);

  static bool classof(const DebugSubsection *S) {
    return S->kind() == DebugSubsectionKind::Lines;
  }

  void createBlock(StringRef FileName);
  void addLineInfo(uint32_t Offset, const LineInfo &Line);
  void addLineAndColumnInfo(uint32_t Offset, const LineInfo &Line,
                            uint32_t ColStart, uint32_t ColEnd);

  uint32_t calculateSerializedSize() const override;
  Error commit(BinaryStreamWriter &Writer) const override;

  void setRelocationAddress(uint16_t Segment, uint32_t Offset);
  void setCodeSize(uint32_t Size);
  void setFlags(LineFlags Flags);

  bool hasColumnInfo() const;

private:
  DebugChecksumsSubsection &Checksums;
  uint32_t RelocOffset = 0;
  uint16_t RelocSegment = 0;
  uint32_t CodeSize = 0;
  LineFlags Flags = LF_None;
  std::vector<Block> Blocks;
};

} // end namespace codeview
} // end namespace llvm

#endif // LLVM_DEBUGINFO_CODEVIEW_DEBUGLINESSUBSECTION_H
PKhwFZ��L�L(DebugInfo/CodeView/CodeViewRegisters.defnu�[���//===-- CodeViewRegisters.def - CodeView registers --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// See CV_HREG_e in cvconst.h. This should match the constants there.
//
//===----------------------------------------------------------------------===//

#ifndef CV_REGISTER
#define CV_REGISTER(name, value)
#endif

#if !defined(CV_REGISTERS_ALL) && !defined(CV_REGISTERS_X86) &&                \
    !defined(CV_REGISTERS_ARM) &&                                              \
    !defined(CV_REGISTERS_ARM64)
#error Need include at least one register set.
#endif

// This currently only contains the "register subset shared by all processor
// types" (ERR etc.) and the x86/arm64 registers.

#if defined(CV_REGISTERS_ALL) || defined(CV_REGISTERS_X86)

// Some system headers define macros that conflict with our enums. Every
// compiler supported by LLVM has the push_macro and pop_macro pragmas, so use
// them to avoid the conflict.
#pragma push_macro("CR0")
#pragma push_macro("CR1")
#pragma push_macro("CR2")
#pragma push_macro("CR3")
#pragma push_macro("CR4")

CV_REGISTER(ERR, 30000)
CV_REGISTER(TEB, 30001)
CV_REGISTER(TIMER, 30002)
CV_REGISTER(EFAD1, 30003)
CV_REGISTER(EFAD2, 30004)
CV_REGISTER(EFAD3, 30005)
CV_REGISTER(VFRAME, 30006)
CV_REGISTER(HANDLE, 30007)
CV_REGISTER(PARAMS, 30008)
CV_REGISTER(LOCALS, 30009)
CV_REGISTER(TID, 30010)
CV_REGISTER(ENV, 30011)
CV_REGISTER(CMDLN, 30012)

CV_REGISTER(NONE, 0)
CV_REGISTER(AL, 1)
CV_REGISTER(CL, 2)
CV_REGISTER(DL, 3)
CV_REGISTER(BL, 4)
CV_REGISTER(AH, 5)
CV_REGISTER(CH, 6)
CV_REGISTER(DH, 7)
CV_REGISTER(BH, 8)
CV_REGISTER(AX, 9)
CV_REGISTER(CX, 10)
CV_REGISTER(DX, 11)
CV_REGISTER(BX, 12)
CV_REGISTER(SP, 13)
CV_REGISTER(BP, 14)
CV_REGISTER(SI, 15)
CV_REGISTER(DI, 16)
CV_REGISTER(EAX, 17)
CV_REGISTER(ECX, 18)
CV_REGISTER(EDX, 19)
CV_REGISTER(EBX, 20)
CV_REGISTER(ESP, 21)
CV_REGISTER(EBP, 22)
CV_REGISTER(ESI, 23)
CV_REGISTER(EDI, 24)
CV_REGISTER(ES, 25)
CV_REGISTER(CS, 26)
CV_REGISTER(SS, 27)
CV_REGISTER(DS, 28)
CV_REGISTER(FS, 29)
CV_REGISTER(GS, 30)
CV_REGISTER(IP, 31)
CV_REGISTER(FLAGS, 32)
CV_REGISTER(EIP, 33)
CV_REGISTER(EFLAGS, 34)
CV_REGISTER(TEMP, 40)
CV_REGISTER(TEMPH, 41)
CV_REGISTER(QUOTE, 42)
CV_REGISTER(PCDR3, 43)
CV_REGISTER(PCDR4, 44)
CV_REGISTER(PCDR5, 45)
CV_REGISTER(PCDR6, 46)
CV_REGISTER(PCDR7, 47)
CV_REGISTER(CR0, 80)
CV_REGISTER(CR1, 81)
CV_REGISTER(CR2, 82)
CV_REGISTER(CR3, 83)
CV_REGISTER(CR4, 84)
CV_REGISTER(DR0, 90)
CV_REGISTER(DR1, 91)
CV_REGISTER(DR2, 92)
CV_REGISTER(DR3, 93)
CV_REGISTER(DR4, 94)
CV_REGISTER(DR5, 95)
CV_REGISTER(DR6, 96)
CV_REGISTER(DR7, 97)
CV_REGISTER(GDTR, 110)
CV_REGISTER(GDTL, 111)
CV_REGISTER(IDTR, 112)
CV_REGISTER(IDTL, 113)
CV_REGISTER(LDTR, 114)
CV_REGISTER(TR, 115)

CV_REGISTER(PSEUDO1, 116)
CV_REGISTER(PSEUDO2, 117)
CV_REGISTER(PSEUDO3, 118)
CV_REGISTER(PSEUDO4, 119)
CV_REGISTER(PSEUDO5, 120)
CV_REGISTER(PSEUDO6, 121)
CV_REGISTER(PSEUDO7, 122)
CV_REGISTER(PSEUDO8, 123)
CV_REGISTER(PSEUDO9, 124)

CV_REGISTER(ST0, 128)
CV_REGISTER(ST1, 129)
CV_REGISTER(ST2, 130)
CV_REGISTER(ST3, 131)
CV_REGISTER(ST4, 132)
CV_REGISTER(ST5, 133)
CV_REGISTER(ST6, 134)
CV_REGISTER(ST7, 135)
CV_REGISTER(CTRL, 136)
CV_REGISTER(STAT, 137)
CV_REGISTER(TAG, 138)
CV_REGISTER(FPIP, 139)
CV_REGISTER(FPCS, 140)
CV_REGISTER(FPDO, 141)
CV_REGISTER(FPDS, 142)
CV_REGISTER(ISEM, 143)
CV_REGISTER(FPEIP, 144)
CV_REGISTER(FPEDO, 145)

CV_REGISTER(MM0, 146)
CV_REGISTER(MM1, 147)
CV_REGISTER(MM2, 148)
CV_REGISTER(MM3, 149)
CV_REGISTER(MM4, 150)
CV_REGISTER(MM5, 151)
CV_REGISTER(MM6, 152)
CV_REGISTER(MM7, 153)

CV_REGISTER(XMM0, 154)
CV_REGISTER(XMM1, 155)
CV_REGISTER(XMM2, 156)
CV_REGISTER(XMM3, 157)
CV_REGISTER(XMM4, 158)
CV_REGISTER(XMM5, 159)
CV_REGISTER(XMM6, 160)
CV_REGISTER(XMM7, 161)

CV_REGISTER(MXCSR, 211)

CV_REGISTER(EDXEAX, 212)

CV_REGISTER(EMM0L, 220)
CV_REGISTER(EMM1L, 221)
CV_REGISTER(EMM2L, 222)
CV_REGISTER(EMM3L, 223)
CV_REGISTER(EMM4L, 224)
CV_REGISTER(EMM5L, 225)
CV_REGISTER(EMM6L, 226)
CV_REGISTER(EMM7L, 227)

CV_REGISTER(EMM0H, 228)
CV_REGISTER(EMM1H, 229)
CV_REGISTER(EMM2H, 230)
CV_REGISTER(EMM3H, 231)
CV_REGISTER(EMM4H, 232)
CV_REGISTER(EMM5H, 233)
CV_REGISTER(EMM6H, 234)
CV_REGISTER(EMM7H, 235)

CV_REGISTER(MM00, 236)
CV_REGISTER(MM01, 237)
CV_REGISTER(MM10, 238)
CV_REGISTER(MM11, 239)
CV_REGISTER(MM20, 240)
CV_REGISTER(MM21, 241)
CV_REGISTER(MM30, 242)
CV_REGISTER(MM31, 243)
CV_REGISTER(MM40, 244)
CV_REGISTER(MM41, 245)
CV_REGISTER(MM50, 246)
CV_REGISTER(MM51, 247)
CV_REGISTER(MM60, 248)
CV_REGISTER(MM61, 249)
CV_REGISTER(MM70, 250)
CV_REGISTER(MM71, 251)

CV_REGISTER(BND0, 396)
CV_REGISTER(BND1, 397)
CV_REGISTER(BND2, 398)


CV_REGISTER(XMM8, 252)
CV_REGISTER(XMM9, 253)
CV_REGISTER(XMM10, 254)
CV_REGISTER(XMM11, 255)
CV_REGISTER(XMM12, 256)
CV_REGISTER(XMM13, 257)
CV_REGISTER(XMM14, 258)
CV_REGISTER(XMM15, 259)


CV_REGISTER(SIL, 324)
CV_REGISTER(DIL, 325)
CV_REGISTER(BPL, 326)
CV_REGISTER(SPL, 327)

CV_REGISTER(RAX, 328)
CV_REGISTER(RBX, 329)
CV_REGISTER(RCX, 330)
CV_REGISTER(RDX, 331)
CV_REGISTER(RSI, 332)
CV_REGISTER(RDI, 333)
CV_REGISTER(RBP, 334)
CV_REGISTER(RSP, 335)

CV_REGISTER(R8, 336)
CV_REGISTER(R9, 337)
CV_REGISTER(R10, 338)
CV_REGISTER(R11, 339)
CV_REGISTER(R12, 340)
CV_REGISTER(R13, 341)
CV_REGISTER(R14, 342)
CV_REGISTER(R15, 343)

CV_REGISTER(R8B, 344)
CV_REGISTER(R9B, 345)
CV_REGISTER(R10B, 346)
CV_REGISTER(R11B, 347)
CV_REGISTER(R12B, 348)
CV_REGISTER(R13B, 349)
CV_REGISTER(R14B, 350)
CV_REGISTER(R15B, 351)

CV_REGISTER(R8W, 352)
CV_REGISTER(R9W, 353)
CV_REGISTER(R10W, 354)
CV_REGISTER(R11W, 355)
CV_REGISTER(R12W, 356)
CV_REGISTER(R13W, 357)
CV_REGISTER(R14W, 358)
CV_REGISTER(R15W, 359)

CV_REGISTER(R8D, 360)
CV_REGISTER(R9D, 361)
CV_REGISTER(R10D, 362)
CV_REGISTER(R11D, 363)
CV_REGISTER(R12D, 364)
CV_REGISTER(R13D, 365)
CV_REGISTER(R14D, 366)
CV_REGISTER(R15D, 367)


// cvconst.h defines both CV_REG_YMM0 (252) and CV_AMD64_YMM0 (368). Keep the
// original prefix to distinguish them.

CV_REGISTER(AMD64_YMM0, 368)
CV_REGISTER(AMD64_YMM1, 369)
CV_REGISTER(AMD64_YMM2, 370)
CV_REGISTER(AMD64_YMM3, 371)
CV_REGISTER(AMD64_YMM4, 372)
CV_REGISTER(AMD64_YMM5, 373)
CV_REGISTER(AMD64_YMM6, 374)
CV_REGISTER(AMD64_YMM7, 375)
CV_REGISTER(AMD64_YMM8, 376)
CV_REGISTER(AMD64_YMM9, 377)
CV_REGISTER(AMD64_YMM10, 378)
CV_REGISTER(AMD64_YMM11, 379)
CV_REGISTER(AMD64_YMM12, 380)
CV_REGISTER(AMD64_YMM13, 381)
CV_REGISTER(AMD64_YMM14, 382)
CV_REGISTER(AMD64_YMM15, 383)

CV_REGISTER(AMD64_XMM16, 694)
CV_REGISTER(AMD64_XMM17, 695)
CV_REGISTER(AMD64_XMM18, 696)
CV_REGISTER(AMD64_XMM19, 697)
CV_REGISTER(AMD64_XMM20, 698)
CV_REGISTER(AMD64_XMM21, 699)
CV_REGISTER(AMD64_XMM22, 700)
CV_REGISTER(AMD64_XMM23, 701)
CV_REGISTER(AMD64_XMM24, 702)
CV_REGISTER(AMD64_XMM25, 703)
CV_REGISTER(AMD64_XMM26, 704)
CV_REGISTER(AMD64_XMM27, 705)
CV_REGISTER(AMD64_XMM28, 706)
CV_REGISTER(AMD64_XMM29, 707)
CV_REGISTER(AMD64_XMM30, 708)
CV_REGISTER(AMD64_XMM31, 709)

CV_REGISTER(AMD64_YMM16, 710)
CV_REGISTER(AMD64_YMM17, 711)
CV_REGISTER(AMD64_YMM18, 712)
CV_REGISTER(AMD64_YMM19, 713)
CV_REGISTER(AMD64_YMM20, 714)
CV_REGISTER(AMD64_YMM21, 715)
CV_REGISTER(AMD64_YMM22, 716)
CV_REGISTER(AMD64_YMM23, 717)
CV_REGISTER(AMD64_YMM24, 718)
CV_REGISTER(AMD64_YMM25, 719)
CV_REGISTER(AMD64_YMM26, 720)
CV_REGISTER(AMD64_YMM27, 721)
CV_REGISTER(AMD64_YMM28, 722)
CV_REGISTER(AMD64_YMM29, 723)
CV_REGISTER(AMD64_YMM30, 724)
CV_REGISTER(AMD64_YMM31, 725)

CV_REGISTER(AMD64_ZMM0, 726)
CV_REGISTER(AMD64_ZMM1, 727)
CV_REGISTER(AMD64_ZMM2, 728)
CV_REGISTER(AMD64_ZMM3, 729)
CV_REGISTER(AMD64_ZMM4, 730)
CV_REGISTER(AMD64_ZMM5, 731)
CV_REGISTER(AMD64_ZMM6, 732)
CV_REGISTER(AMD64_ZMM7, 733)
CV_REGISTER(AMD64_ZMM8, 734)
CV_REGISTER(AMD64_ZMM9, 735)
CV_REGISTER(AMD64_ZMM10, 736)
CV_REGISTER(AMD64_ZMM11, 737)
CV_REGISTER(AMD64_ZMM12, 738)
CV_REGISTER(AMD64_ZMM13, 739)
CV_REGISTER(AMD64_ZMM14, 740)
CV_REGISTER(AMD64_ZMM15, 741)
CV_REGISTER(AMD64_ZMM16, 742)
CV_REGISTER(AMD64_ZMM17, 743)
CV_REGISTER(AMD64_ZMM18, 744)
CV_REGISTER(AMD64_ZMM19, 745)
CV_REGISTER(AMD64_ZMM20, 746)
CV_REGISTER(AMD64_ZMM21, 747)
CV_REGISTER(AMD64_ZMM22, 748)
CV_REGISTER(AMD64_ZMM23, 749)
CV_REGISTER(AMD64_ZMM24, 750)
CV_REGISTER(AMD64_ZMM25, 751)
CV_REGISTER(AMD64_ZMM26, 752)
CV_REGISTER(AMD64_ZMM27, 753)
CV_REGISTER(AMD64_ZMM28, 754)
CV_REGISTER(AMD64_ZMM29, 755)
CV_REGISTER(AMD64_ZMM30, 756)
CV_REGISTER(AMD64_ZMM31, 757)

CV_REGISTER(AMD64_K0, 758)
CV_REGISTER(AMD64_K1, 759)
CV_REGISTER(AMD64_K2, 760)
CV_REGISTER(AMD64_K3, 761)
CV_REGISTER(AMD64_K4, 762)
CV_REGISTER(AMD64_K5, 763)
CV_REGISTER(AMD64_K6, 764)
CV_REGISTER(AMD64_K7, 765)

#pragma pop_macro("CR0")
#pragma pop_macro("CR1")
#pragma pop_macro("CR2")
#pragma pop_macro("CR3")
#pragma pop_macro("CR4")

#endif // defined(CV_REGISTERS_ALL) || defined(CV_REGISTERS_X86)

#if defined(CV_REGISTERS_ALL) || defined(CV_REGISTERS_ARM)

// ARM registers

CV_REGISTER(ARM_NOREG, 0)

// General purpose 32-bit integer registers

CV_REGISTER(ARM_R0, 10)
CV_REGISTER(ARM_R1, 11)
CV_REGISTER(ARM_R2, 12)
CV_REGISTER(ARM_R3, 13)
CV_REGISTER(ARM_R4, 14)
CV_REGISTER(ARM_R5, 15)
CV_REGISTER(ARM_R6, 16)
CV_REGISTER(ARM_R7, 17)
CV_REGISTER(ARM_R8, 18)
CV_REGISTER(ARM_R9, 19)
CV_REGISTER(ARM_R10, 20)
CV_REGISTER(ARM_R11, 21)
CV_REGISTER(ARM_R12, 22)
CV_REGISTER(ARM_SP, 23)
CV_REGISTER(ARM_LR, 24)
CV_REGISTER(ARM_PC, 25)

// Status register

CV_REGISTER(ARM_CPSR, 26)

// ARM VFPv1 registers

CV_REGISTER(ARM_FPSCR, 40)
CV_REGISTER(ARM_FPEXC, 41)

CV_REGISTER(ARM_FS0, 50)
CV_REGISTER(ARM_FS1, 51)
CV_REGISTER(ARM_FS2, 52)
CV_REGISTER(ARM_FS3, 53)
CV_REGISTER(ARM_FS4, 54)
CV_REGISTER(ARM_FS5, 55)
CV_REGISTER(ARM_FS6, 56)
CV_REGISTER(ARM_FS7, 57)
CV_REGISTER(ARM_FS8, 58)
CV_REGISTER(ARM_FS9, 59)
CV_REGISTER(ARM_FS10, 60)
CV_REGISTER(ARM_FS11, 61)
CV_REGISTER(ARM_FS12, 62)
CV_REGISTER(ARM_FS13, 63)
CV_REGISTER(ARM_FS14, 64)
CV_REGISTER(ARM_FS15, 65)
CV_REGISTER(ARM_FS16, 66)
CV_REGISTER(ARM_FS17, 67)
CV_REGISTER(ARM_FS18, 68)
CV_REGISTER(ARM_FS19, 69)
CV_REGISTER(ARM_FS20, 70)
CV_REGISTER(ARM_FS21, 71)
CV_REGISTER(ARM_FS22, 72)
CV_REGISTER(ARM_FS23, 73)
CV_REGISTER(ARM_FS24, 74)
CV_REGISTER(ARM_FS25, 75)
CV_REGISTER(ARM_FS26, 76)
CV_REGISTER(ARM_FS27, 77)
CV_REGISTER(ARM_FS28, 78)
CV_REGISTER(ARM_FS29, 79)
CV_REGISTER(ARM_FS30, 80)
CV_REGISTER(ARM_FS31, 81)

// ARM VFPv3/NEON registers

CV_REGISTER(ARM_FS32, 200)
CV_REGISTER(ARM_FS33, 201)
CV_REGISTER(ARM_FS34, 202)
CV_REGISTER(ARM_FS35, 203)
CV_REGISTER(ARM_FS36, 204)
CV_REGISTER(ARM_FS37, 205)
CV_REGISTER(ARM_FS38, 206)
CV_REGISTER(ARM_FS39, 207)
CV_REGISTER(ARM_FS40, 208)
CV_REGISTER(ARM_FS41, 209)
CV_REGISTER(ARM_FS42, 210)
CV_REGISTER(ARM_FS43, 211)
CV_REGISTER(ARM_FS44, 212)
CV_REGISTER(ARM_FS45, 213)
CV_REGISTER(ARM_FS46, 214)
CV_REGISTER(ARM_FS47, 215)
CV_REGISTER(ARM_FS48, 216)
CV_REGISTER(ARM_FS49, 217)
CV_REGISTER(ARM_FS50, 218)
CV_REGISTER(ARM_FS51, 219)
CV_REGISTER(ARM_FS52, 220)
CV_REGISTER(ARM_FS53, 221)
CV_REGISTER(ARM_FS54, 222)
CV_REGISTER(ARM_FS55, 223)
CV_REGISTER(ARM_FS56, 224)
CV_REGISTER(ARM_FS57, 225)
CV_REGISTER(ARM_FS58, 226)
CV_REGISTER(ARM_FS59, 227)
CV_REGISTER(ARM_FS60, 228)
CV_REGISTER(ARM_FS61, 229)
CV_REGISTER(ARM_FS62, 230)
CV_REGISTER(ARM_FS63, 231)

CV_REGISTER(ARM_ND0, 300)
CV_REGISTER(ARM_ND1, 301)
CV_REGISTER(ARM_ND2, 302)
CV_REGISTER(ARM_ND3, 303)
CV_REGISTER(ARM_ND4, 304)
CV_REGISTER(ARM_ND5, 305)
CV_REGISTER(ARM_ND6, 306)
CV_REGISTER(ARM_ND7, 307)
CV_REGISTER(ARM_ND8, 308)
CV_REGISTER(ARM_ND9, 309)
CV_REGISTER(ARM_ND10, 310)
CV_REGISTER(ARM_ND11, 311)
CV_REGISTER(ARM_ND12, 312)
CV_REGISTER(ARM_ND13, 313)
CV_REGISTER(ARM_ND14, 314)
CV_REGISTER(ARM_ND15, 315)
CV_REGISTER(ARM_ND16, 316)
CV_REGISTER(ARM_ND17, 317)
CV_REGISTER(ARM_ND18, 318)
CV_REGISTER(ARM_ND19, 319)
CV_REGISTER(ARM_ND20, 320)
CV_REGISTER(ARM_ND21, 321)
CV_REGISTER(ARM_ND22, 322)
CV_REGISTER(ARM_ND23, 323)
CV_REGISTER(ARM_ND24, 324)
CV_REGISTER(ARM_ND25, 325)
CV_REGISTER(ARM_ND26, 326)
CV_REGISTER(ARM_ND27, 327)
CV_REGISTER(ARM_ND28, 328)
CV_REGISTER(ARM_ND29, 329)
CV_REGISTER(ARM_ND30, 330)
CV_REGISTER(ARM_ND31, 331)

CV_REGISTER(ARM_NQ0, 400)
CV_REGISTER(ARM_NQ1, 401)
CV_REGISTER(ARM_NQ2, 402)
CV_REGISTER(ARM_NQ3, 403)
CV_REGISTER(ARM_NQ4, 404)
CV_REGISTER(ARM_NQ5, 405)
CV_REGISTER(ARM_NQ6, 406)
CV_REGISTER(ARM_NQ7, 407)
CV_REGISTER(ARM_NQ8, 408)
CV_REGISTER(ARM_NQ9, 409)
CV_REGISTER(ARM_NQ10, 410)
CV_REGISTER(ARM_NQ11, 411)
CV_REGISTER(ARM_NQ12, 412)
CV_REGISTER(ARM_NQ13, 413)
CV_REGISTER(ARM_NQ14, 414)
CV_REGISTER(ARM_NQ15, 415)

#endif // defined(CV_REGISTERS_ALL) || defined(CV_REGISTERS_ARM)

#if defined(CV_REGISTERS_ALL) || defined(CV_REGISTERS_ARM64)

// arm64intr.h from MSVC defines ARM64_FPSR and ARM64_FPCR, which conflicts with
// these declarations.
#pragma push_macro("ARM64_FPSR")
#pragma push_macro("ARM64_FPCR")
#undef ARM64_FPSR
#undef ARM64_FPCR

// ARM64 registers

CV_REGISTER(ARM64_NOREG, 0)

// General purpose 32-bit integer registers

CV_REGISTER(ARM64_W0, 10)
CV_REGISTER(ARM64_W1, 11)
CV_REGISTER(ARM64_W2, 12)
CV_REGISTER(ARM64_W3, 13)
CV_REGISTER(ARM64_W4, 14)
CV_REGISTER(ARM64_W5, 15)
CV_REGISTER(ARM64_W6, 16)
CV_REGISTER(ARM64_W7, 17)
CV_REGISTER(ARM64_W8, 18)
CV_REGISTER(ARM64_W9, 19)
CV_REGISTER(ARM64_W10, 20)
CV_REGISTER(ARM64_W11, 21)
CV_REGISTER(ARM64_W12, 22)
CV_REGISTER(ARM64_W13, 23)
CV_REGISTER(ARM64_W14, 24)
CV_REGISTER(ARM64_W15, 25)
CV_REGISTER(ARM64_W16, 26)
CV_REGISTER(ARM64_W17, 27)
CV_REGISTER(ARM64_W18, 28)
CV_REGISTER(ARM64_W19, 29)
CV_REGISTER(ARM64_W20, 30)
CV_REGISTER(ARM64_W21, 31)
CV_REGISTER(ARM64_W22, 32)
CV_REGISTER(ARM64_W23, 33)
CV_REGISTER(ARM64_W24, 34)
CV_REGISTER(ARM64_W25, 35)
CV_REGISTER(ARM64_W26, 36)
CV_REGISTER(ARM64_W27, 37)
CV_REGISTER(ARM64_W28, 38)
CV_REGISTER(ARM64_W29, 39)
CV_REGISTER(ARM64_W30, 40)
CV_REGISTER(ARM64_WZR, 41)

// General purpose 64-bit integer registers

CV_REGISTER(ARM64_X0, 50)
CV_REGISTER(ARM64_X1, 51)
CV_REGISTER(ARM64_X2, 52)
CV_REGISTER(ARM64_X3, 53)
CV_REGISTER(ARM64_X4, 54)
CV_REGISTER(ARM64_X5, 55)
CV_REGISTER(ARM64_X6, 56)
CV_REGISTER(ARM64_X7, 57)
CV_REGISTER(ARM64_X8, 58)
CV_REGISTER(ARM64_X9, 59)
CV_REGISTER(ARM64_X10, 60)
CV_REGISTER(ARM64_X11, 61)
CV_REGISTER(ARM64_X12, 62)
CV_REGISTER(ARM64_X13, 63)
CV_REGISTER(ARM64_X14, 64)
CV_REGISTER(ARM64_X15, 65)
CV_REGISTER(ARM64_X16, 66)
CV_REGISTER(ARM64_X17, 67)
CV_REGISTER(ARM64_X18, 68)
CV_REGISTER(ARM64_X19, 69)
CV_REGISTER(ARM64_X20, 70)
CV_REGISTER(ARM64_X21, 71)
CV_REGISTER(ARM64_X22, 72)
CV_REGISTER(ARM64_X23, 73)
CV_REGISTER(ARM64_X24, 74)
CV_REGISTER(ARM64_X25, 75)
CV_REGISTER(ARM64_X26, 76)
CV_REGISTER(ARM64_X27, 77)
CV_REGISTER(ARM64_X28, 78)
CV_REGISTER(ARM64_FP, 79)
CV_REGISTER(ARM64_LR, 80)
CV_REGISTER(ARM64_SP, 81)
CV_REGISTER(ARM64_ZR, 82)

// status register

CV_REGISTER(ARM64_NZCV, 90)

// 32-bit floating point registers

CV_REGISTER(ARM64_S0, 100)
CV_REGISTER(ARM64_S1, 101)
CV_REGISTER(ARM64_S2, 102)
CV_REGISTER(ARM64_S3, 103)
CV_REGISTER(ARM64_S4, 104)
CV_REGISTER(ARM64_S5, 105)
CV_REGISTER(ARM64_S6, 106)
CV_REGISTER(ARM64_S7, 107)
CV_REGISTER(ARM64_S8, 108)
CV_REGISTER(ARM64_S9, 109)
CV_REGISTER(ARM64_S10, 110)
CV_REGISTER(ARM64_S11, 111)
CV_REGISTER(ARM64_S12, 112)
CV_REGISTER(ARM64_S13, 113)
CV_REGISTER(ARM64_S14, 114)
CV_REGISTER(ARM64_S15, 115)
CV_REGISTER(ARM64_S16, 116)
CV_REGISTER(ARM64_S17, 117)
CV_REGISTER(ARM64_S18, 118)
CV_REGISTER(ARM64_S19, 119)
CV_REGISTER(ARM64_S20, 120)
CV_REGISTER(ARM64_S21, 121)
CV_REGISTER(ARM64_S22, 122)
CV_REGISTER(ARM64_S23, 123)
CV_REGISTER(ARM64_S24, 124)
CV_REGISTER(ARM64_S25, 125)
CV_REGISTER(ARM64_S26, 126)
CV_REGISTER(ARM64_S27, 127)
CV_REGISTER(ARM64_S28, 128)
CV_REGISTER(ARM64_S29, 129)
CV_REGISTER(ARM64_S30, 130)
CV_REGISTER(ARM64_S31, 131)

// 64-bit floating point registers

CV_REGISTER(ARM64_D0, 140)
CV_REGISTER(ARM64_D1, 141)
CV_REGISTER(ARM64_D2, 142)
CV_REGISTER(ARM64_D3, 143)
CV_REGISTER(ARM64_D4, 144)
CV_REGISTER(ARM64_D5, 145)
CV_REGISTER(ARM64_D6, 146)
CV_REGISTER(ARM64_D7, 147)
CV_REGISTER(ARM64_D8, 148)
CV_REGISTER(ARM64_D9, 149)
CV_REGISTER(ARM64_D10, 150)
CV_REGISTER(ARM64_D11, 151)
CV_REGISTER(ARM64_D12, 152)
CV_REGISTER(ARM64_D13, 153)
CV_REGISTER(ARM64_D14, 154)
CV_REGISTER(ARM64_D15, 155)
CV_REGISTER(ARM64_D16, 156)
CV_REGISTER(ARM64_D17, 157)
CV_REGISTER(ARM64_D18, 158)
CV_REGISTER(ARM64_D19, 159)
CV_REGISTER(ARM64_D20, 160)
CV_REGISTER(ARM64_D21, 161)
CV_REGISTER(ARM64_D22, 162)
CV_REGISTER(ARM64_D23, 163)
CV_REGISTER(ARM64_D24, 164)
CV_REGISTER(ARM64_D25, 165)
CV_REGISTER(ARM64_D26, 166)
CV_REGISTER(ARM64_D27, 167)
CV_REGISTER(ARM64_D28, 168)
CV_REGISTER(ARM64_D29, 169)
CV_REGISTER(ARM64_D30, 170)
CV_REGISTER(ARM64_D31, 171)

// 128-bit SIMD registers

CV_REGISTER(ARM64_Q0, 180)
CV_REGISTER(ARM64_Q1, 181)
CV_REGISTER(ARM64_Q2, 182)
CV_REGISTER(ARM64_Q3, 183)
CV_REGISTER(ARM64_Q4, 184)
CV_REGISTER(ARM64_Q5, 185)
CV_REGISTER(ARM64_Q6, 186)
CV_REGISTER(ARM64_Q7, 187)
CV_REGISTER(ARM64_Q8, 188)
CV_REGISTER(ARM64_Q9, 189)
CV_REGISTER(ARM64_Q10, 190)
CV_REGISTER(ARM64_Q11, 191)
CV_REGISTER(ARM64_Q12, 192)
CV_REGISTER(ARM64_Q13, 193)
CV_REGISTER(ARM64_Q14, 194)
CV_REGISTER(ARM64_Q15, 195)
CV_REGISTER(ARM64_Q16, 196)
CV_REGISTER(ARM64_Q17, 197)
CV_REGISTER(ARM64_Q18, 198)
CV_REGISTER(ARM64_Q19, 199)
CV_REGISTER(ARM64_Q20, 200)
CV_REGISTER(ARM64_Q21, 201)
CV_REGISTER(ARM64_Q22, 202)
CV_REGISTER(ARM64_Q23, 203)
CV_REGISTER(ARM64_Q24, 204)
CV_REGISTER(ARM64_Q25, 205)
CV_REGISTER(ARM64_Q26, 206)
CV_REGISTER(ARM64_Q27, 207)
CV_REGISTER(ARM64_Q28, 208)
CV_REGISTER(ARM64_Q29, 209)
CV_REGISTER(ARM64_Q30, 210)
CV_REGISTER(ARM64_Q31, 211)

// Floating point status register

CV_REGISTER(ARM64_FPSR, 220)
CV_REGISTER(ARM64_FPCR, 221)

// 8 bit floating point registers

CV_REGISTER(ARM64_B0, 230)
CV_REGISTER(ARM64_B1, 231)
CV_REGISTER(ARM64_B2, 232)
CV_REGISTER(ARM64_B3, 233)
CV_REGISTER(ARM64_B4, 234)
CV_REGISTER(ARM64_B5, 235)
CV_REGISTER(ARM64_B6, 236)
CV_REGISTER(ARM64_B7, 237)
CV_REGISTER(ARM64_B8, 238)
CV_REGISTER(ARM64_B9, 239)
CV_REGISTER(ARM64_B10, 240)
CV_REGISTER(ARM64_B11, 241)
CV_REGISTER(ARM64_B12, 242)
CV_REGISTER(ARM64_B13, 243)
CV_REGISTER(ARM64_B14, 244)
CV_REGISTER(ARM64_B15, 245)
CV_REGISTER(ARM64_B16, 246)
CV_REGISTER(ARM64_B17, 247)
CV_REGISTER(ARM64_B18, 248)
CV_REGISTER(ARM64_B19, 249)
CV_REGISTER(ARM64_B20, 250)
CV_REGISTER(ARM64_B21, 251)
CV_REGISTER(ARM64_B22, 252)
CV_REGISTER(ARM64_B23, 253)
CV_REGISTER(ARM64_B24, 254)
CV_REGISTER(ARM64_B25, 255)
CV_REGISTER(ARM64_B26, 256)
CV_REGISTER(ARM64_B27, 257)
CV_REGISTER(ARM64_B28, 258)
CV_REGISTER(ARM64_B29, 259)
CV_REGISTER(ARM64_B30, 260)
CV_REGISTER(ARM64_B31, 261)

// 16 bit floating point registers

CV_REGISTER(ARM64_H0, 270)
CV_REGISTER(ARM64_H1, 271)
CV_REGISTER(ARM64_H2, 272)
CV_REGISTER(ARM64_H3, 273)
CV_REGISTER(ARM64_H4, 274)
CV_REGISTER(ARM64_H5, 275)
CV_REGISTER(ARM64_H6, 276)
CV_REGISTER(ARM64_H7, 277)
CV_REGISTER(ARM64_H8, 278)
CV_REGISTER(ARM64_H9, 279)
CV_REGISTER(ARM64_H10, 280)
CV_REGISTER(ARM64_H11, 281)
CV_REGISTER(ARM64_H12, 282)
CV_REGISTER(ARM64_H13, 283)
CV_REGISTER(ARM64_H14, 284)
CV_REGISTER(ARM64_H15, 285)
CV_REGISTER(ARM64_H16, 286)
CV_REGISTER(ARM64_H17, 287)
CV_REGISTER(ARM64_H18, 288)
CV_REGISTER(ARM64_H19, 289)
CV_REGISTER(ARM64_H20, 290)
CV_REGISTER(ARM64_H21, 291)
CV_REGISTER(ARM64_H22, 292)
CV_REGISTER(ARM64_H23, 293)
CV_REGISTER(ARM64_H24, 294)
CV_REGISTER(ARM64_H25, 295)
CV_REGISTER(ARM64_H26, 296)
CV_REGISTER(ARM64_H27, 297)
CV_REGISTER(ARM64_H28, 298)
CV_REGISTER(ARM64_H29, 299)
CV_REGISTER(ARM64_H30, 300)
CV_REGISTER(ARM64_H31, 301)

#pragma pop_macro("ARM64_FPSR")
#pragma pop_macro("ARM64_FPCR")

#endif // defined(CV_REGISTERS_ALL) || defined(CV_REGISTERS_ARM64)
PKhwFZ����ww.DebugInfo/CodeView/ContinuationRecordBuilder.hnu�[���//===- ContinuationRecordBuilder.h ------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_CODEVIEW_CONTINUATIONRECORDBUILDER_H
#define LLVM_DEBUGINFO_CODEVIEW_CONTINUATIONRECORDBUILDER_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/DebugInfo/CodeView/CVRecord.h"
#include "llvm/DebugInfo/CodeView/TypeRecordMapping.h"
#include "llvm/Support/BinaryByteStream.h"
#include "llvm/Support/BinaryStreamWriter.h"
#include <cstdint>
#include <vector>

namespace llvm {
namespace codeview {
class TypeIndex;
enum class ContinuationRecordKind { FieldList, MethodOverloadList };

class ContinuationRecordBuilder {
  SmallVector<uint32_t, 4> SegmentOffsets;
  std::optional<ContinuationRecordKind> Kind;
  AppendingBinaryByteStream Buffer;
  BinaryStreamWriter SegmentWriter;
  TypeRecordMapping Mapping;
  ArrayRef<uint8_t> InjectedSegmentBytes;

  uint32_t getCurrentSegmentLength() const;

  void insertSegmentEnd(uint32_t Offset);
  CVType createSegmentRecord(uint32_t OffBegin, uint32_t OffEnd,
                             std::optional<TypeIndex> RefersTo);

public:
  ContinuationRecordBuilder();
  ~ContinuationRecordBuilder();

  void begin(ContinuationRecordKind RecordKind);

  // This template is explicitly instantiated in the implementation file for all
  // supported types.  The method itself is ugly, so inlining it into the header
  // file clutters an otherwise straightforward interface.
  template <typename RecordType> void writeMemberType(RecordType &Record);

  std::vector<CVType> end(TypeIndex Index);
};
} // namespace codeview
} // namespace llvm

#endif
PKhwFZ"���EE(DebugInfo/CodeView/SymbolRecordHelpers.hnu�[���//===- SymbolRecordHelpers.h ------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_CODEVIEW_SYMBOLRECORDHELPERS_H
#define LLVM_DEBUGINFO_CODEVIEW_SYMBOLRECORDHELPERS_H

#include "llvm/DebugInfo/CodeView/CVRecord.h"
#include "llvm/DebugInfo/CodeView/CodeView.h"

namespace llvm {
namespace codeview {
/// Return true if this symbol opens a scope. This implies that the symbol has
/// "parent" and "end" fields, which contain the offset of the S_END or
/// S_INLINESITE_END record.
inline bool symbolOpensScope(SymbolKind Kind) {
  switch (Kind) {
  case SymbolKind::S_GPROC32:
  case SymbolKind::S_LPROC32:
  case SymbolKind::S_LPROC32_ID:
  case SymbolKind::S_GPROC32_ID:
  case SymbolKind::S_BLOCK32:
  case SymbolKind::S_SEPCODE:
  case SymbolKind::S_THUNK32:
  case SymbolKind::S_INLINESITE:
  case SymbolKind::S_INLINESITE2:
    return true;
  default:
    break;
  }
  return false;
}

/// Return true if this ssymbol ends a scope.
inline bool symbolEndsScope(SymbolKind Kind) {
  switch (Kind) {
  case SymbolKind::S_END:
  case SymbolKind::S_PROC_ID_END:
  case SymbolKind::S_INLINESITE_END:
    return true;
  default:
    break;
  }
  return false;
}

/// Given a symbol P for which symbolOpensScope(P) == true, return the
/// corresponding end offset.
uint32_t getScopeEndOffset(const CVSymbol &Symbol);
uint32_t getScopeParentOffset(const CVSymbol &Symbol);

CVSymbolArray limitSymbolArrayToScope(const CVSymbolArray &Symbols,
                                      uint32_t ScopeBegin);

} // namespace codeview
} // namespace llvm

#endif
PKhwFZ�Ĕ	``DebugInfo/CodeView/GUID.hnu�[���//===- GUID.h ---------------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_CODEVIEW_GUID_H
#define LLVM_DEBUGINFO_CODEVIEW_GUID_H

#include <cstdint>
#include <cstring>

namespace llvm {
class raw_ostream;

namespace codeview {

/// This represents the 'GUID' type from windows.h.
struct GUID {
  uint8_t Guid[16];
};

inline bool operator==(const GUID &LHS, const GUID &RHS) {
  return 0 == ::memcmp(LHS.Guid, RHS.Guid, sizeof(LHS.Guid));
}

inline bool operator<(const GUID &LHS, const GUID &RHS) {
  return ::memcmp(LHS.Guid, RHS.Guid, sizeof(LHS.Guid)) < 0;
}

inline bool operator<=(const GUID &LHS, const GUID &RHS) {
  return ::memcmp(LHS.Guid, RHS.Guid, sizeof(LHS.Guid)) <= 0;
}

inline bool operator>(const GUID &LHS, const GUID &RHS) {
  return !(LHS <= RHS);
}

inline bool operator>=(const GUID &LHS, const GUID &RHS) {
  return !(LHS < RHS);
}

inline bool operator!=(const GUID &LHS, const GUID &RHS) {
  return !(LHS == RHS);
}

raw_ostream &operator<<(raw_ostream &OS, const GUID &Guid);

} // namespace codeview
} // namespace llvm

#endif
PKhwFZ�9�AA+DebugInfo/CodeView/SymbolVisitorCallbacks.hnu�[���//===- SymbolVisitorCallbacks.h ---------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_CODEVIEW_SYMBOLVISITORCALLBACKS_H
#define LLVM_DEBUGINFO_CODEVIEW_SYMBOLVISITORCALLBACKS_H

#include "llvm/DebugInfo/CodeView/SymbolRecord.h"
#include "llvm/Support/Error.h"

namespace llvm {
namespace codeview {

class SymbolVisitorCallbacks {
  friend class CVSymbolVisitor;

public:
  virtual ~SymbolVisitorCallbacks() = default;

  /// Action to take on unknown symbols. By default, they are ignored.
  virtual Error visitUnknownSymbol(CVSymbol &Record) {
    return Error::success();
  }

  /// Paired begin/end actions for all symbols. Receives all record data,
  /// including the fixed-length record prefix.  visitSymbolBegin() should
  /// return the type of the Symbol, or an error if it cannot be determined.
  virtual Error visitSymbolBegin(CVSymbol &Record, uint32_t Offset) {
    return Error::success();
  }
  virtual Error visitSymbolBegin(CVSymbol &Record) { return Error::success(); }
  virtual Error visitSymbolEnd(CVSymbol &Record) { return Error::success(); }

#define SYMBOL_RECORD(EnumName, EnumVal, Name)                                 \
  virtual Error visitKnownRecord(CVSymbol &CVR, Name &Record) {                \
    return Error::success();                                                   \
  }
#define SYMBOL_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
#include "llvm/DebugInfo/CodeView/CodeViewSymbols.def"
};

} // end namespace codeview
} // end namespace llvm

#endif // LLVM_DEBUGINFO_CODEVIEW_SYMBOLVISITORCALLBACKS_H
PKhwFZ�<Ϭ
�
)DebugInfo/CodeView/TypeVisitorCallbacks.hnu�[���//===- TypeVisitorCallbacks.h -----------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_CODEVIEW_TYPEVISITORCALLBACKS_H
#define LLVM_DEBUGINFO_CODEVIEW_TYPEVISITORCALLBACKS_H

#include "llvm/DebugInfo/CodeView/TypeRecord.h"
#include "llvm/Support/Error.h"

namespace llvm {
namespace codeview {

class TypeVisitorCallbacks {
public:
  virtual ~TypeVisitorCallbacks() = default;

  /// Action to take on unknown types. By default, they are ignored.
  virtual Error visitUnknownType(CVType &Record) { return Error::success(); }
  /// Paired begin/end actions for all types. Receives all record data,
  /// including the fixed-length record prefix.  visitTypeBegin() should return
  /// the type of the Record, or an error if it cannot be determined.  Exactly
  /// one of the two visitTypeBegin methods will be called, depending on whether
  /// records are being visited sequentially or randomly.  An implementation
  /// should be prepared to handle both (or assert if it can't handle random
  /// access visitation).
  virtual Error visitTypeBegin(CVType &Record) { return Error::success(); }
  virtual Error visitTypeBegin(CVType &Record, TypeIndex Index) {
    return Error::success();
  }
  virtual Error visitTypeEnd(CVType &Record) { return Error::success(); }

  virtual Error visitUnknownMember(CVMemberRecord &Record) {
    return Error::success();
  }

  virtual Error visitMemberBegin(CVMemberRecord &Record) {
    return Error::success();
  }

  virtual Error visitMemberEnd(CVMemberRecord &Record) {
    return Error::success();
  }

#define TYPE_RECORD(EnumName, EnumVal, Name)                                   \
  virtual Error visitKnownRecord(CVType &CVR, Name##Record &Record) {          \
    return Error::success();                                                   \
  }
#define MEMBER_RECORD(EnumName, EnumVal, Name)                                 \
  virtual Error visitKnownMember(CVMemberRecord &CVM, Name##Record &Record) {  \
    return Error::success();                                                   \
  }

#define TYPE_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
#define MEMBER_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
#include "llvm/DebugInfo/CodeView/CodeViewTypes.def"
#undef TYPE_RECORD
#undef TYPE_RECORD_ALIAS
#undef MEMBER_RECORD
#undef MEMBER_RECORD_ALIAS
};

} // end namespace codeview
} // end namespace llvm

#endif // LLVM_DEBUGINFO_CODEVIEW_TYPEVISITORCALLBACKS_H
PKhwFZ�!���'DebugInfo/CodeView/SymbolDumpDelegate.hnu�[���//===-- SymbolDumpDelegate.h ------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_CODEVIEW_SYMBOLDUMPDELEGATE_H
#define LLVM_DEBUGINFO_CODEVIEW_SYMBOLDUMPDELEGATE_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/DebugInfo/CodeView/SymbolVisitorDelegate.h"
#include <cstdint>

namespace llvm {
namespace codeview {

class SymbolDumpDelegate : public SymbolVisitorDelegate {
public:
  ~SymbolDumpDelegate() override = default;

  virtual void printRelocatedField(StringRef Label, uint32_t RelocOffset,
                                   uint32_t Offset,
                                   StringRef *RelocSym = nullptr) = 0;
  virtual void printBinaryBlockWithRelocs(StringRef Label,
                                          ArrayRef<uint8_t> Block) = 0;
};

} // end namespace codeview
} // end namespace llvm

#endif // LLVM_DEBUGINFO_CODEVIEW_SYMBOLDUMPDELEGATE_H
PKhwFZ1�f�$DebugInfo/CodeView/CVSymbolVisitor.hnu�[���//===- CVSymbolVisitor.h ----------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_CODEVIEW_CVSYMBOLVISITOR_H
#define LLVM_DEBUGINFO_CODEVIEW_CVSYMBOLVISITOR_H

#include "llvm/DebugInfo/CodeView/CVRecord.h"
#include "llvm/Support/Error.h"

namespace llvm {
namespace codeview {
class SymbolVisitorCallbacks;

class CVSymbolVisitor {
public:
  struct FilterOptions {
    std::optional<uint32_t> SymbolOffset;
    std::optional<uint32_t> ParentRecursiveDepth;
    std::optional<uint32_t> ChildRecursiveDepth;
  };

  CVSymbolVisitor(SymbolVisitorCallbacks &Callbacks);

  Error visitSymbolRecord(CVSymbol &Record);
  Error visitSymbolRecord(CVSymbol &Record, uint32_t Offset);
  Error visitSymbolStream(const CVSymbolArray &Symbols);
  Error visitSymbolStream(const CVSymbolArray &Symbols, uint32_t InitialOffset);
  Error visitSymbolStreamFiltered(const CVSymbolArray &Symbols,
                                  const FilterOptions &Filter);

private:
  SymbolVisitorCallbacks &Callbacks;
};

} // end namespace codeview
} // end namespace llvm

#endif // LLVM_DEBUGINFO_CODEVIEW_CVSYMBOLVISITOR_H
PKhwFZ!�9TT-DebugInfo/CodeView/LazyRandomTypeCollection.hnu�[���//===- LazyRandomTypeCollection.h -------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_CODEVIEW_LAZYRANDOMTYPECOLLECTION_H
#define LLVM_DEBUGINFO_CODEVIEW_LAZYRANDOMTYPECOLLECTION_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/DebugInfo/CodeView/TypeCollection.h"
#include "llvm/DebugInfo/CodeView/TypeIndex.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/BinaryStreamArray.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/StringSaver.h"
#include <cstdint>
#include <vector>

namespace llvm {
namespace codeview {

/// Provides amortized O(1) random access to a CodeView type stream.
/// Normally to access a type from a type stream, you must know its byte
/// offset into the type stream, because type records are variable-lengthed.
/// However, this is not the way we prefer to access them.  For example, given
/// a symbol record one of the fields may be the TypeIndex of the symbol's
/// type record.  Or given a type record such as an array type, there might
/// be a TypeIndex for the element type.  Sequential access is perfect when
/// we're just dumping every entry, but it's very poor for real world usage.
///
/// Type streams in PDBs contain an additional field which is a list of pairs
/// containing indices and their corresponding offsets, roughly every ~8KB of
/// record data.  This general idea need not be confined to PDBs though.  By
/// supplying such an array, the producer of a type stream can allow the
/// consumer much better access time, because the consumer can find the nearest
/// index in this array, and do a linear scan forward only from there.
///
/// LazyRandomTypeCollection implements this algorithm, but additionally goes
/// one step further by caching offsets of every record that has been visited at
/// least once.  This way, even repeated visits of the same record will never
/// require more than one linear scan.  For a type stream of N elements divided
/// into M chunks of roughly equal size, this yields a worst case lookup time
/// of O(N/M) and an amortized time of O(1).
class LazyRandomTypeCollection : public TypeCollection {
  using PartialOffsetArray = FixedStreamArray<TypeIndexOffset>;

  struct CacheEntry {
    CVType Type;
    uint32_t Offset;
    StringRef Name;
  };

public:
  explicit LazyRandomTypeCollection(uint32_t RecordCountHint);
  LazyRandomTypeCollection(StringRef Data, uint32_t RecordCountHint);
  LazyRandomTypeCollection(ArrayRef<uint8_t> Data, uint32_t RecordCountHint);
  LazyRandomTypeCollection(const CVTypeArray &Types, uint32_t RecordCountHint,
                           PartialOffsetArray PartialOffsets);
  LazyRandomTypeCollection(const CVTypeArray &Types, uint32_t RecordCountHint);

  void reset(ArrayRef<uint8_t> Data, uint32_t RecordCountHint);
  void reset(StringRef Data, uint32_t RecordCountHint);
  void reset(BinaryStreamReader &Reader, uint32_t RecordCountHint);

  uint32_t getOffsetOfType(TypeIndex Index);

  std::optional<CVType> tryGetType(TypeIndex Index);

  CVType getType(TypeIndex Index) override;
  StringRef getTypeName(TypeIndex Index) override;
  bool contains(TypeIndex Index) override;
  uint32_t size() override;
  uint32_t capacity() override;
  std::optional<TypeIndex> getFirst() override;
  std::optional<TypeIndex> getNext(TypeIndex Prev) override;
  bool replaceType(TypeIndex &Index, CVType Data, bool Stabilize) override;

private:
  Error ensureTypeExists(TypeIndex Index);
  void ensureCapacityFor(TypeIndex Index);

  Error visitRangeForType(TypeIndex TI);
  Error fullScanForType(TypeIndex TI);
  void visitRange(TypeIndex Begin, uint32_t BeginOffset, TypeIndex End);

  /// Number of actual records.
  uint32_t Count = 0;

  /// The largest type index which we've visited.
  TypeIndex LargestTypeIndex = TypeIndex::None();

  BumpPtrAllocator Allocator;
  StringSaver NameStorage;

  /// The type array to allow random access visitation of.
  CVTypeArray Types;

  std::vector<CacheEntry> Records;

  /// An array of index offsets for the given type stream, allowing log(N)
  /// lookups of a type record by index.  Similar to KnownOffsets but only
  /// contains offsets for some type indices, some of which may not have
  /// ever been visited.
  PartialOffsetArray PartialOffsets;
};

} // end namespace codeview
} // end namespace llvm

#endif // LLVM_DEBUGINFO_CODEVIEW_LAZYRANDOMTYPECOLLECTION_H
PKhwFZR򰄕�+DebugInfo/CodeView/DebugUnknownSubsection.hnu�[���//===- DebugUnknownSubsection.h -----------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_CODEVIEW_DEBUGUNKNOWNSUBSECTION_H
#define LLVM_DEBUGINFO_CODEVIEW_DEBUGUNKNOWNSUBSECTION_H

#include "llvm/DebugInfo/CodeView/DebugSubsection.h"
#include "llvm/Support/BinaryStreamRef.h"

namespace llvm {
namespace codeview {

class DebugUnknownSubsectionRef final : public DebugSubsectionRef {
public:
  DebugUnknownSubsectionRef(DebugSubsectionKind Kind, BinaryStreamRef Data)
      : DebugSubsectionRef(Kind), Data(Data) {}

  BinaryStreamRef getData() const { return Data; }

private:
  BinaryStreamRef Data;
};
}
}

#endif
PKhwFZG�)�
�
'DebugInfo/CodeView/SymbolDeserializer.hnu�[���//===- SymbolDeserializer.h -------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_CODEVIEW_SYMBOLDESERIALIZER_H
#define LLVM_DEBUGINFO_CODEVIEW_SYMBOLDESERIALIZER_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/DebugInfo/CodeView/SymbolRecord.h"
#include "llvm/DebugInfo/CodeView/SymbolRecordMapping.h"
#include "llvm/DebugInfo/CodeView/SymbolVisitorCallbacks.h"
#include "llvm/DebugInfo/CodeView/SymbolVisitorDelegate.h"
#include "llvm/Support/BinaryByteStream.h"
#include "llvm/Support/BinaryStreamReader.h"
#include "llvm/Support/Error.h"

namespace llvm {
namespace codeview {
class SymbolVisitorDelegate;
class SymbolDeserializer : public SymbolVisitorCallbacks {
  struct MappingInfo {
    MappingInfo(ArrayRef<uint8_t> RecordData, CodeViewContainer Container)
        : Stream(RecordData, llvm::support::little), Reader(Stream),
          Mapping(Reader, Container) {}

    BinaryByteStream Stream;
    BinaryStreamReader Reader;
    SymbolRecordMapping Mapping;
  };

public:
  template <typename T> static Error deserializeAs(CVSymbol Symbol, T &Record) {
    // If we're just deserializing one record, then don't worry about alignment
    // as there's nothing that comes after.
    SymbolDeserializer S(nullptr, CodeViewContainer::ObjectFile);
    if (auto EC = S.visitSymbolBegin(Symbol))
      return EC;
    if (auto EC = S.visitKnownRecord(Symbol, Record))
      return EC;
    if (auto EC = S.visitSymbolEnd(Symbol))
      return EC;
    return Error::success();
  }
  template <typename T> static Expected<T> deserializeAs(CVSymbol Symbol) {
    T Record(static_cast<SymbolRecordKind>(Symbol.kind()));
    if (auto EC = deserializeAs<T>(Symbol, Record))
      return std::move(EC);
    return Record;
  }

  explicit SymbolDeserializer(SymbolVisitorDelegate *Delegate,
                              CodeViewContainer Container)
      : Delegate(Delegate), Container(Container) {}

  Error visitSymbolBegin(CVSymbol &Record, uint32_t Offset) override {
    return visitSymbolBegin(Record);
  }

  Error visitSymbolBegin(CVSymbol &Record) override {
    assert(!Mapping && "Already in a symbol mapping!");
    Mapping = std::make_unique<MappingInfo>(Record.content(), Container);
    return Mapping->Mapping.visitSymbolBegin(Record);
  }
  Error visitSymbolEnd(CVSymbol &Record) override {
    assert(Mapping && "Not in a symbol mapping!");
    auto EC = Mapping->Mapping.visitSymbolEnd(Record);
    Mapping.reset();
    return EC;
  }

#define SYMBOL_RECORD(EnumName, EnumVal, Name)                                 \
  Error visitKnownRecord(CVSymbol &CVR, Name &Record) override {               \
    return visitKnownRecordImpl(CVR, Record);                                  \
  }
#define SYMBOL_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
#include "llvm/DebugInfo/CodeView/CodeViewSymbols.def"

private:
  template <typename T> Error visitKnownRecordImpl(CVSymbol &CVR, T &Record) {

    Record.RecordOffset =
        Delegate ? Delegate->getRecordOffset(Mapping->Reader) : 0;
    if (auto EC = Mapping->Mapping.visitKnownRecord(CVR, Record))
      return EC;
    return Error::success();
  }

  SymbolVisitorDelegate *Delegate;
  CodeViewContainer Container;
  std::unique_ptr<MappingInfo> Mapping;
};
}
}

#endif
PKhwFZީJ��/DebugInfo/CodeView/DebugStringTableSubsection.hnu�[���//===- DebugStringTableSubsection.h - CodeView String Table -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_CODEVIEW_DEBUGSTRINGTABLESUBSECTION_H
#define LLVM_DEBUGINFO_CODEVIEW_DEBUGSTRINGTABLESUBSECTION_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/DebugInfo/CodeView/CodeView.h"
#include "llvm/DebugInfo/CodeView/DebugSubsection.h"
#include "llvm/Support/BinaryStreamRef.h"
#include "llvm/Support/Error.h"
#include <cstdint>

namespace llvm {

class BinaryStreamReader;

namespace codeview {

/// Represents a read-only view of a CodeView string table.  This is a very
/// simple flat buffer consisting of null-terminated strings, where strings
/// are retrieved by their offset in the buffer.  DebugStringTableSubsectionRef
/// does not own the underlying storage for the buffer.
class DebugStringTableSubsectionRef : public DebugSubsectionRef {
public:
  DebugStringTableSubsectionRef();

  static bool classof(const DebugSubsectionRef *S) {
    return S->kind() == DebugSubsectionKind::StringTable;
  }

  Error initialize(BinaryStreamRef Contents);
  Error initialize(BinaryStreamReader &Reader);

  Expected<StringRef> getString(uint32_t Offset) const;

  bool valid() const { return Stream.valid(); }

  BinaryStreamRef getBuffer() const { return Stream; }

private:
  BinaryStreamRef Stream;
};

/// Represents a read-write view of a CodeView string table.
/// DebugStringTableSubsection owns the underlying storage for the table, and is
/// capable of serializing the string table into a format understood by
/// DebugStringTableSubsectionRef.
class DebugStringTableSubsection : public DebugSubsection {
public:
  DebugStringTableSubsection();

  static bool classof(const DebugSubsection *S) {
    return S->kind() == DebugSubsectionKind::StringTable;
  }

  // If string S does not exist in the string table, insert it.
  // Returns the ID for S.
  uint32_t insert(StringRef S);

  // Return the ID for string S.  Assumes S exists in the table.
  uint32_t getIdForString(StringRef S) const;

  StringRef getStringForId(uint32_t Id) const;

  uint32_t calculateSerializedSize() const override;
  Error commit(BinaryStreamWriter &Writer) const override;

  uint32_t size() const;

  StringMap<uint32_t>::const_iterator begin() const {
    return StringToId.begin();
  }

  StringMap<uint32_t>::const_iterator end() const { return StringToId.end(); }

  std::vector<uint32_t> sortedIds() const;

private:
  DenseMap<uint32_t, StringRef> IdToString;
  StringMap<uint32_t> StringToId;
  uint32_t StringSize = 1;
};

} // end namespace codeview

} // end namespace llvm

#endif // LLVM_DEBUGINFO_CODEVIEW_DEBUGSTRINGTABLESUBSECTION_H
PKhwFZ*/�q``*DebugInfo/CodeView/DebugSubsectionRecord.hnu�[���//===- DebugSubsectionRecord.h ----------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_CODEVIEW_DEBUGSUBSECTIONRECORD_H
#define LLVM_DEBUGINFO_CODEVIEW_DEBUGSUBSECTIONRECORD_H

#include "llvm/DebugInfo/CodeView/CodeView.h"
#include "llvm/Support/BinaryStreamArray.h"
#include "llvm/Support/BinaryStreamRef.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/MathExtras.h"
#include <cstdint>
#include <memory>

namespace llvm {

class BinaryStreamWriter;

namespace codeview {

class DebugSubsection;

// Corresponds to the `CV_DebugSSubsectionHeader_t` structure.
struct DebugSubsectionHeader {
  support::ulittle32_t Kind;   // codeview::DebugSubsectionKind enum
  support::ulittle32_t Length; // number of bytes occupied by this record.
};

class DebugSubsectionRecord {
public:
  DebugSubsectionRecord();
  DebugSubsectionRecord(DebugSubsectionKind Kind, BinaryStreamRef Data);

  static Error initialize(BinaryStreamRef Stream, DebugSubsectionRecord &Info);

  uint32_t getRecordLength() const;
  DebugSubsectionKind kind() const;
  BinaryStreamRef getRecordData() const;

private:
  DebugSubsectionKind Kind = DebugSubsectionKind::None;
  BinaryStreamRef Data;
};

class DebugSubsectionRecordBuilder {
public:
  DebugSubsectionRecordBuilder(std::shared_ptr<DebugSubsection> Subsection);

  /// Use this to copy existing subsections directly from source to destination.
  /// For example, line table subsections in an object file only need to be
  /// relocated before being copied into the PDB.
  DebugSubsectionRecordBuilder(const DebugSubsectionRecord &Contents);

  uint32_t calculateSerializedLength() const;
  Error commit(BinaryStreamWriter &Writer, CodeViewContainer Container) const;

private:
  /// The subsection to build. Will be null if Contents is non-empty.
  std::shared_ptr<DebugSubsection> Subsection;

  /// The bytes of the subsection. Only non-empty if Subsection is null.
  /// FIXME: Reduce the size of this.
  DebugSubsectionRecord Contents;
};

} // end namespace codeview

template <> struct VarStreamArrayExtractor<codeview::DebugSubsectionRecord> {
  Error operator()(BinaryStreamRef Stream, uint32_t &Length,
                   codeview::DebugSubsectionRecord &Info) {
    // FIXME: We need to pass the container type through to this function.  In
    // practice this isn't super important since the subsection header describes
    // its length and we can just skip it.  It's more important when writing.
    if (auto EC = codeview::DebugSubsectionRecord::initialize(Stream, Info))
      return EC;
    Length = alignTo(Info.getRecordLength(), 4);
    return Error::success();
  }
};

namespace codeview {

using DebugSubsectionArray = VarStreamArray<DebugSubsectionRecord>;

} // end namespace codeview

} // end namespace llvm

#endif // LLVM_DEBUGINFO_CODEVIEW_DEBUGSUBSECTIONRECORD_H
PKhwFZ'�B__DebugInfo/CodeView/FunctionId.hnu�[���//===- FunctionId.h ---------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEBUGINFO_CODEVIEW_FUNCTIONID_H
#define LLVM_DEBUGINFO_CODEVIEW_FUNCTIONID_H

#include <cinttypes>

namespace llvm {
namespace codeview {

class FunctionId {
public:
  FunctionId() : Index(0) {}

  explicit FunctionId(uint32_t Index) : Index(Index) {}

  uint32_t getIndex() const { return Index; }

private:
  uint32_t Index;
};

inline bool operator==(const FunctionId &A, const FunctionId &B) {
  return A.getIndex() == B.getIndex();
}

inline bool operator!=(const FunctionId &A, const FunctionId &B) {
  return A.getIndex() != B.getIndex();
}

inline bool operator<(const FunctionId &A, const FunctionId &B) {
  return A.getIndex() < B.getIndex();
}

inline bool operator<=(const FunctionId &A, const FunctionId &B) {
  return A.getIndex() <= B.getIndex();
}

inline bool operator>(const FunctionId &A, const FunctionId &B) {
  return A.getIndex() > B.getIndex();
}

inline bool operator>=(const FunctionId &A, const FunctionId &B) {
  return A.getIndex() >= B.getIndex();
}
}
}

#endif
PKhwFZ>O�w��BinaryFormat/Swift.hnu�[���//===-- llvm/BinaryFormat/Swift.h ---Swift Constants-------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//

#ifndef LLVM_BINARYFORMAT_SWIFT_H
#define LLVM_BINARYFORMAT_SWIFT_H

namespace llvm {
namespace binaryformat {

enum Swift5ReflectionSectionKind {
#define HANDLE_SWIFT_SECTION(KIND, MACHO, ELF, COFF) KIND,
#include "llvm/BinaryFormat/Swift.def"
#undef HANDLE_SWIFT_SECTION
  unknown,
  last = unknown
};
} // end of namespace binaryformat
} // end of namespace llvm

#endif
PKhwFZr
BinaryFormat/WasmTraits.hnu�[���//===- WasmTraits.h - DenseMap traits for the Wasm structures ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file provides llvm::DenseMapInfo traits for the Wasm structures.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_BINARYFORMAT_WASMTRAITS_H
#define LLVM_BINARYFORMAT_WASMTRAITS_H

#include "llvm/ADT/Hashing.h"
#include "llvm/BinaryFormat/Wasm.h"

namespace llvm {

// Traits for using WasmSignature in a DenseMap.
template <> struct DenseMapInfo<wasm::WasmSignature, void> {
  static wasm::WasmSignature getEmptyKey() {
    wasm::WasmSignature Sig;
    Sig.State = wasm::WasmSignature::Empty;
    return Sig;
  }
  static wasm::WasmSignature getTombstoneKey() {
    wasm::WasmSignature Sig;
    Sig.State = wasm::WasmSignature::Tombstone;
    return Sig;
  }
  static unsigned getHashValue(const wasm::WasmSignature &Sig) {
    uintptr_t H = hash_value(Sig.State);
    for (auto Ret : Sig.Returns)
      H = hash_combine(H, Ret);
    for (auto Param : Sig.Params)
      H = hash_combine(H, Param);
    return H;
  }
  static bool isEqual(const wasm::WasmSignature &LHS,
                      const wasm::WasmSignature &RHS) {
    return LHS == RHS;
  }
};

// Traits for using WasmGlobalType in a DenseMap
template <> struct DenseMapInfo<wasm::WasmGlobalType, void> {
  static wasm::WasmGlobalType getEmptyKey() {
    return wasm::WasmGlobalType{1, true};
  }
  static wasm::WasmGlobalType getTombstoneKey() {
    return wasm::WasmGlobalType{2, true};
  }
  static unsigned getHashValue(const wasm::WasmGlobalType &GlobalType) {
    return hash_combine(GlobalType.Type, GlobalType.Mutable);
  }
  static bool isEqual(const wasm::WasmGlobalType &LHS,
                      const wasm::WasmGlobalType &RHS) {
    return LHS == RHS;
  }
};

// Traits for using WasmLimits in a DenseMap
template <> struct DenseMapInfo<wasm::WasmLimits, void> {
  static wasm::WasmLimits getEmptyKey() {
    return wasm::WasmLimits{0xff, 0xff, 0xff};
  }
  static wasm::WasmLimits getTombstoneKey() {
    return wasm::WasmLimits{0xee, 0xee, 0xee};
  }
  static unsigned getHashValue(const wasm::WasmLimits &Limits) {
    unsigned Hash = hash_value(Limits.Flags);
    Hash = hash_combine(Hash, Limits.Minimum);
    if (Limits.Flags & llvm::wasm::WASM_LIMITS_FLAG_HAS_MAX) {
      Hash = hash_combine(Hash, Limits.Maximum);
    }
    return Hash;
  }
  static bool isEqual(const wasm::WasmLimits &LHS,
                      const wasm::WasmLimits &RHS) {
    return LHS == RHS;
  }
};

// Traits for using WasmTableType in a DenseMap
template <> struct DenseMapInfo<wasm::WasmTableType, void> {
  static wasm::WasmTableType getEmptyKey() {
    return wasm::WasmTableType{
        0, DenseMapInfo<wasm::WasmLimits, void>::getEmptyKey()};
  }
  static wasm::WasmTableType getTombstoneKey() {
    return wasm::WasmTableType{
        1, DenseMapInfo<wasm::WasmLimits, void>::getTombstoneKey()};
  }
  static unsigned getHashValue(const wasm::WasmTableType &TableType) {
    return hash_combine(
        TableType.ElemType,
        DenseMapInfo<wasm::WasmLimits, void>::getHashValue(TableType.Limits));
  }
  static bool isEqual(const wasm::WasmTableType &LHS,
                      const wasm::WasmTableType &RHS) {
    return LHS == RHS;
  }
};

} // end namespace llvm

#endif // LLVM_BINARYFORMAT_WASMTRAITS_H
PKhwFZ�\P�EEBinaryFormat/MsgPackReader.hnu�[���//===- MsgPackReader.h - Simple MsgPack reader ------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
///  \file
///  This is a MessagePack reader.
///
///  See https://github.com/msgpack/msgpack/blob/master/spec.md for the full
///  standard.
///
///  Typical usage:
///  \code
///  StringRef input = GetInput();
///  msgpack::Reader MPReader(input);
///  msgpack::Object Obj;
///
///  while (MPReader.read(Obj)) {
///    switch (Obj.Kind) {
///    case msgpack::Type::Int:
//       // Use Obj.Int
///      break;
///    // ...
///    }
///  }
///  \endcode
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_BINARYFORMAT_MSGPACKREADER_H
#define LLVM_BINARYFORMAT_MSGPACKREADER_H

#include "llvm/Support/Error.h"
#include "llvm/Support/MemoryBufferRef.h"
#include <cstdint>

namespace llvm {
namespace msgpack {

/// MessagePack types as defined in the standard, with the exception of Integer
/// being divided into a signed Int and unsigned UInt variant in order to map
/// directly to C++ types.
///
/// The types map onto corresponding union members of the \c Object struct.
enum class Type : uint8_t {
  Int,
  UInt,
  Nil,
  Boolean,
  Float,
  String,
  Binary,
  Array,
  Map,
  Extension,
  Empty, // Used by MsgPackDocument to represent an empty node
};

/// Extension types are composed of a user-defined type ID and an uninterpreted
/// sequence of bytes.
struct ExtensionType {
  /// User-defined extension type.
  int8_t Type;
  /// Raw bytes of the extension object.
  StringRef Bytes;
};

/// MessagePack object, represented as a tagged union of C++ types.
///
/// All types except \c Type::Nil (which has only one value, and so is
/// completely represented by the \c Kind itself) map to a exactly one union
/// member.
struct Object {
  Type Kind;
  union {
    /// Value for \c Type::Int.
    int64_t Int;
    /// Value for \c Type::Uint.
    uint64_t UInt;
    /// Value for \c Type::Boolean.
    bool Bool;
    /// Value for \c Type::Float.
    double Float;
    /// Value for \c Type::String and \c Type::Binary.
    StringRef Raw;
    /// Value for \c Type::Array and \c Type::Map.
    size_t Length;
    /// Value for \c Type::Extension.
    ExtensionType Extension;
  };

  Object() : Kind(Type::Int), Int(0) {}
};

/// Reads MessagePack objects from memory, one at a time.
class Reader {
public:
  /// Construct a reader, keeping a reference to the \p InputBuffer.
  Reader(MemoryBufferRef InputBuffer);
  /// Construct a reader, keeping a reference to the \p Input.
  Reader(StringRef Input);

  Reader(const Reader &) = delete;
  Reader &operator=(const Reader &) = delete;

  /// Read one object from the input buffer, advancing past it.
  ///
  /// The \p Obj is updated with the kind of the object read, and the
  /// corresponding union member is updated.
  ///
  /// For the collection objects (Array and Map), only the length is read, and
  /// the caller must make and additional \c N calls (in the case of Array) or
  /// \c N*2 calls (in the case of Map) to \c Read to retrieve the collection
  /// elements.
  ///
  /// \param [out] Obj filled with next object on success.
  ///
  /// \returns true when object successfully read, false when at end of
  /// input (and so \p Obj was not updated), otherwise an error.
  Expected<bool> read(Object &Obj);

private:
  MemoryBufferRef InputBuffer;
  StringRef::iterator Current;
  StringRef::iterator End;

  size_t remainingSpace() {
    // The rest of the code maintains the invariant that End >= Current, so
    // that this cast is always defined behavior.
    return static_cast<size_t>(End - Current);
  }

  template <class T> Expected<bool> readRaw(Object &Obj);
  template <class T> Expected<bool> readInt(Object &Obj);
  template <class T> Expected<bool> readUInt(Object &Obj);
  template <class T> Expected<bool> readLength(Object &Obj);
  template <class T> Expected<bool> readExt(Object &Obj);
  Expected<bool> createRaw(Object &Obj, uint32_t Size);
  Expected<bool> createExt(Object &Obj, uint32_t Size);
};

} // end namespace msgpack
} // end namespace llvm

#endif // LLVM_BINARYFORMAT_MSGPACKREADER_H
PKhwFZn��eeBinaryFormat/ELF.hnu�[���//===- llvm/BinaryFormat/ELF.h - ELF constants and structures ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This header contains common, non-processor-specific data structures and
// constants for the ELF file format.
//
// The details of the ELF32 bits in this file are largely based on the Tool
// Interface Standard (TIS) Executable and Linking Format (ELF) Specification
// Version 1.2, May 1995. The ELF64 stuff is based on ELF-64 Object File Format
// Version 1.5, Draft 2, May 1998 as well as OpenBSD header files.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_BINARYFORMAT_ELF_H
#define LLVM_BINARYFORMAT_ELF_H

#include "llvm/ADT/StringRef.h"
#include <cstdint>
#include <cstring>

namespace llvm {
namespace ELF {

using Elf32_Addr = uint32_t; // Program address
using Elf32_Off = uint32_t;  // File offset
using Elf32_Half = uint16_t;
using Elf32_Word = uint32_t;
using Elf32_Sword = int32_t;

using Elf64_Addr = uint64_t;
using Elf64_Off = uint64_t;
using Elf64_Half = uint16_t;
using Elf64_Word = uint32_t;
using Elf64_Sword = int32_t;
using Elf64_Xword = uint64_t;
using Elf64_Sxword = int64_t;

// Object file magic string.
static const char ElfMagic[] = {0x7f, 'E', 'L', 'F', '\0'};

// e_ident size and indices.
enum {
  EI_MAG0 = 0,       // File identification index.
  EI_MAG1 = 1,       // File identification index.
  EI_MAG2 = 2,       // File identification index.
  EI_MAG3 = 3,       // File identification index.
  EI_CLASS = 4,      // File class.
  EI_DATA = 5,       // Data encoding.
  EI_VERSION = 6,    // File version.
  EI_OSABI = 7,      // OS/ABI identification.
  EI_ABIVERSION = 8, // ABI version.
  EI_PAD = 9,        // Start of padding bytes.
  EI_NIDENT = 16     // Number of bytes in e_ident.
};

struct Elf32_Ehdr {
  unsigned char e_ident[EI_NIDENT]; // ELF Identification bytes
  Elf32_Half e_type;                // Type of file (see ET_* below)
  Elf32_Half e_machine;   // Required architecture for this file (see EM_*)
  Elf32_Word e_version;   // Must be equal to 1
  Elf32_Addr e_entry;     // Address to jump to in order to start program
  Elf32_Off e_phoff;      // Program header table's file offset, in bytes
  Elf32_Off e_shoff;      // Section header table's file offset, in bytes
  Elf32_Word e_flags;     // Processor-specific flags
  Elf32_Half e_ehsize;    // Size of ELF header, in bytes
  Elf32_Half e_phentsize; // Size of an entry in the program header table
  Elf32_Half e_phnum;     // Number of entries in the program header table
  Elf32_Half e_shentsize; // Size of an entry in the section header table
  Elf32_Half e_shnum;     // Number of entries in the section header table
  Elf32_Half e_shstrndx;  // Sect hdr table index of sect name string table

  bool checkMagic() const {
    return (memcmp(e_ident, ElfMagic, strlen(ElfMagic))) == 0;
  }

  unsigned char getFileClass() const { return e_ident[EI_CLASS]; }
  unsigned char getDataEncoding() const { return e_ident[EI_DATA]; }
};

// 64-bit ELF header. Fields are the same as for ELF32, but with different
// types (see above).
struct Elf64_Ehdr {
  unsigned char e_ident[EI_NIDENT];
  Elf64_Half e_type;
  Elf64_Half e_machine;
  Elf64_Word e_version;
  Elf64_Addr e_entry;
  Elf64_Off e_phoff;
  Elf64_Off e_shoff;
  Elf64_Word e_flags;
  Elf64_Half e_ehsize;
  Elf64_Half e_phentsize;
  Elf64_Half e_phnum;
  Elf64_Half e_shentsize;
  Elf64_Half e_shnum;
  Elf64_Half e_shstrndx;

  bool checkMagic() const {
    return (memcmp(e_ident, ElfMagic, strlen(ElfMagic))) == 0;
  }

  unsigned char getFileClass() const { return e_ident[EI_CLASS]; }
  unsigned char getDataEncoding() const { return e_ident[EI_DATA]; }
};

// File types.
// See current registered ELF types at:
//    http://www.sco.com/developers/gabi/latest/ch4.eheader.html
enum {
  ET_NONE = 0,        // No file type
  ET_REL = 1,         // Relocatable file
  ET_EXEC = 2,        // Executable file
  ET_DYN = 3,         // Shared object file
  ET_CORE = 4,        // Core file
  ET_LOOS = 0xfe00,   // Beginning of operating system-specific codes
  ET_HIOS = 0xfeff,   // Operating system-specific
  ET_LOPROC = 0xff00, // Beginning of processor-specific codes
  ET_HIPROC = 0xffff  // Processor-specific
};

// Versioning
enum { EV_NONE = 0, EV_CURRENT = 1 };

// Machine architectures
// See current registered ELF machine architectures at:
//    http://www.uxsglobal.com/developers/gabi/latest/ch4.eheader.html
enum {
  EM_NONE = 0,           // No machine
  EM_M32 = 1,            // AT&T WE 32100
  EM_SPARC = 2,          // SPARC
  EM_386 = 3,            // Intel 386
  EM_68K = 4,            // Motorola 68000
  EM_88K = 5,            // Motorola 88000
  EM_IAMCU = 6,          // Intel MCU
  EM_860 = 7,            // Intel 80860
  EM_MIPS = 8,           // MIPS R3000
  EM_S370 = 9,           // IBM System/370
  EM_MIPS_RS3_LE = 10,   // MIPS RS3000 Little-endian
  EM_PARISC = 15,        // Hewlett-Packard PA-RISC
  EM_VPP500 = 17,        // Fujitsu VPP500
  EM_SPARC32PLUS = 18,   // Enhanced instruction set SPARC
  EM_960 = 19,           // Intel 80960
  EM_PPC = 20,           // PowerPC
  EM_PPC64 = 21,         // PowerPC64
  EM_S390 = 22,          // IBM System/390
  EM_SPU = 23,           // IBM SPU/SPC
  EM_V800 = 36,          // NEC V800
  EM_FR20 = 37,          // Fujitsu FR20
  EM_RH32 = 38,          // TRW RH-32
  EM_RCE = 39,           // Motorola RCE
  EM_ARM = 40,           // ARM
  EM_ALPHA = 41,         // DEC Alpha
  EM_SH = 42,            // Hitachi SH
  EM_SPARCV9 = 43,       // SPARC V9
  EM_TRICORE = 44,       // Siemens TriCore
  EM_ARC = 45,           // Argonaut RISC Core
  EM_H8_300 = 46,        // Hitachi H8/300
  EM_H8_300H = 47,       // Hitachi H8/300H
  EM_H8S = 48,           // Hitachi H8S
  EM_H8_500 = 49,        // Hitachi H8/500
  EM_IA_64 = 50,         // Intel IA-64 processor architecture
  EM_MIPS_X = 51,        // Stanford MIPS-X
  EM_COLDFIRE = 52,      // Motorola ColdFire
  EM_68HC12 = 53,        // Motorola M68HC12
  EM_MMA = 54,           // Fujitsu MMA Multimedia Accelerator
  EM_PCP = 55,           // Siemens PCP
  EM_NCPU = 56,          // Sony nCPU embedded RISC processor
  EM_NDR1 = 57,          // Denso NDR1 microprocessor
  EM_STARCORE = 58,      // Motorola Star*Core processor
  EM_ME16 = 59,          // Toyota ME16 processor
  EM_ST100 = 60,         // STMicroelectronics ST100 processor
  EM_TINYJ = 61,         // Advanced Logic Corp. TinyJ embedded processor family
  EM_X86_64 = 62,        // AMD x86-64 architecture
  EM_PDSP = 63,          // Sony DSP Processor
  EM_PDP10 = 64,         // Digital Equipment Corp. PDP-10
  EM_PDP11 = 65,         // Digital Equipment Corp. PDP-11
  EM_FX66 = 66,          // Siemens FX66 microcontroller
  EM_ST9PLUS = 67,       // STMicroelectronics ST9+ 8/16 bit microcontroller
  EM_ST7 = 68,           // STMicroelectronics ST7 8-bit microcontroller
  EM_68HC16 = 69,        // Motorola MC68HC16 Microcontroller
  EM_68HC11 = 70,        // Motorola MC68HC11 Microcontroller
  EM_68HC08 = 71,        // Motorola MC68HC08 Microcontroller
  EM_68HC05 = 72,        // Motorola MC68HC05 Microcontroller
  EM_SVX = 73,           // Silicon Graphics SVx
  EM_ST19 = 74,          // STMicroelectronics ST19 8-bit microcontroller
  EM_VAX = 75,           // Digital VAX
  EM_CRIS = 76,          // Axis Communications 32-bit embedded processor
  EM_JAVELIN = 77,       // Infineon Technologies 32-bit embedded processor
  EM_FIREPATH = 78,      // Element 14 64-bit DSP Processor
  EM_ZSP = 79,           // LSI Logic 16-bit DSP Processor
  EM_MMIX = 80,          // Donald Knuth's educational 64-bit processor
  EM_HUANY = 81,         // Harvard University machine-independent object files
  EM_PRISM = 82,         // SiTera Prism
  EM_AVR = 83,           // Atmel AVR 8-bit microcontroller
  EM_FR30 = 84,          // Fujitsu FR30
  EM_D10V = 85,          // Mitsubishi D10V
  EM_D30V = 86,          // Mitsubishi D30V
  EM_V850 = 87,          // NEC v850
  EM_M32R = 88,          // Mitsubishi M32R
  EM_MN10300 = 89,       // Matsushita MN10300
  EM_MN10200 = 90,       // Matsushita MN10200
  EM_PJ = 91,            // picoJava
  EM_OPENRISC = 92,      // OpenRISC 32-bit embedded processor
  EM_ARC_COMPACT = 93,   // ARC International ARCompact processor (old
                         // spelling/synonym: EM_ARC_A5)
  EM_XTENSA = 94,        // Tensilica Xtensa Architecture
  EM_VIDEOCORE = 95,     // Alphamosaic VideoCore processor
  EM_TMM_GPP = 96,       // Thompson Multimedia General Purpose Processor
  EM_NS32K = 97,         // National Semiconductor 32000 series
  EM_TPC = 98,           // Tenor Network TPC processor
  EM_SNP1K = 99,         // Trebia SNP 1000 processor
  EM_ST200 = 100,        // STMicroelectronics (www.st.com) ST200
  EM_IP2K = 101,         // Ubicom IP2xxx microcontroller family
  EM_MAX = 102,          // MAX Processor
  EM_CR = 103,           // National Semiconductor CompactRISC microprocessor
  EM_F2MC16 = 104,       // Fujitsu F2MC16
  EM_MSP430 = 105,       // Texas Instruments embedded microcontroller msp430
  EM_BLACKFIN = 106,     // Analog Devices Blackfin (DSP) processor
  EM_SE_C33 = 107,       // S1C33 Family of Seiko Epson processors
  EM_SEP = 108,          // Sharp embedded microprocessor
  EM_ARCA = 109,         // Arca RISC Microprocessor
  EM_UNICORE = 110,      // Microprocessor series from PKU-Unity Ltd. and MPRC
                         // of Peking University
  EM_EXCESS = 111,       // eXcess: 16/32/64-bit configurable embedded CPU
  EM_DXP = 112,          // Icera Semiconductor Inc. Deep Execution Processor
  EM_ALTERA_NIOS2 = 113, // Altera Nios II soft-core processor
  EM_CRX = 114,          // National Semiconductor CompactRISC CRX
  EM_XGATE = 115,        // Motorola XGATE embedded processor
  EM_C166 = 116,         // Infineon C16x/XC16x processor
  EM_M16C = 117,         // Renesas M16C series microprocessors
  EM_DSPIC30F = 118,     // Microchip Technology dsPIC30F Digital Signal
                         // Controller
  EM_CE = 119,           // Freescale Communication Engine RISC core
  EM_M32C = 120,         // Renesas M32C series microprocessors
  EM_TSK3000 = 131,      // Altium TSK3000 core
  EM_RS08 = 132,         // Freescale RS08 embedded processor
  EM_SHARC = 133,        // Analog Devices SHARC family of 32-bit DSP
                         // processors
  EM_ECOG2 = 134,        // Cyan Technology eCOG2 microprocessor
  EM_SCORE7 = 135,       // Sunplus S+core7 RISC processor
  EM_DSP24 = 136,        // New Japan Radio (NJR) 24-bit DSP Processor
  EM_VIDEOCORE3 = 137,   // Broadcom VideoCore III processor
  EM_LATTICEMICO32 = 138, // RISC processor for Lattice FPGA architecture
  EM_SE_C17 = 139,        // Seiko Epson C17 family
  EM_TI_C6000 = 140,      // The Texas Instruments TMS320C6000 DSP family
  EM_TI_C2000 = 141,      // The Texas Instruments TMS320C2000 DSP family
  EM_TI_C5500 = 142,      // The Texas Instruments TMS320C55x DSP family
  EM_MMDSP_PLUS = 160,    // STMicroelectronics 64bit VLIW Data Signal Processor
  EM_CYPRESS_M8C = 161,   // Cypress M8C microprocessor
  EM_R32C = 162,          // Renesas R32C series microprocessors
  EM_TRIMEDIA = 163,      // NXP Semiconductors TriMedia architecture family
  EM_HEXAGON = 164,       // Qualcomm Hexagon processor
  EM_8051 = 165,          // Intel 8051 and variants
  EM_STXP7X = 166,        // STMicroelectronics STxP7x family of configurable
                          // and extensible RISC processors
  EM_NDS32 = 167,         // Andes Technology compact code size embedded RISC
                          // processor family
  EM_ECOG1 = 168,         // Cyan Technology eCOG1X family
  EM_ECOG1X = 168,        // Cyan Technology eCOG1X family
  EM_MAXQ30 = 169,        // Dallas Semiconductor MAXQ30 Core Micro-controllers
  EM_XIMO16 = 170,        // New Japan Radio (NJR) 16-bit DSP Processor
  EM_MANIK = 171,         // M2000 Reconfigurable RISC Microprocessor
  EM_CRAYNV2 = 172,       // Cray Inc. NV2 vector architecture
  EM_RX = 173,            // Renesas RX family
  EM_METAG = 174,         // Imagination Technologies META processor
                          // architecture
  EM_MCST_ELBRUS = 175,   // MCST Elbrus general purpose hardware architecture
  EM_ECOG16 = 176,        // Cyan Technology eCOG16 family
  EM_CR16 = 177,          // National Semiconductor CompactRISC CR16 16-bit
                          // microprocessor
  EM_ETPU = 178,          // Freescale Extended Time Processing Unit
  EM_SLE9X = 179,         // Infineon Technologies SLE9X core
  EM_L10M = 180,          // Intel L10M
  EM_K10M = 181,          // Intel K10M
  EM_AARCH64 = 183,       // ARM AArch64
  EM_AVR32 = 185,         // Atmel Corporation 32-bit microprocessor family
  EM_STM8 = 186,          // STMicroeletronics STM8 8-bit microcontroller
  EM_TILE64 = 187,        // Tilera TILE64 multicore architecture family
  EM_TILEPRO = 188,       // Tilera TILEPro multicore architecture family
  EM_MICROBLAZE = 189,    // Xilinx MicroBlaze 32-bit RISC soft processor core
  EM_CUDA = 190,          // NVIDIA CUDA architecture
  EM_TILEGX = 191,        // Tilera TILE-Gx multicore architecture family
  EM_CLOUDSHIELD = 192,   // CloudShield architecture family
  EM_COREA_1ST = 193,     // KIPO-KAIST Core-A 1st generation processor family
  EM_COREA_2ND = 194,     // KIPO-KAIST Core-A 2nd generation processor family
  EM_ARC_COMPACT2 = 195,  // Synopsys ARCompact V2
  EM_OPEN8 = 196,         // Open8 8-bit RISC soft processor core
  EM_RL78 = 197,          // Renesas RL78 family
  EM_VIDEOCORE5 = 198,    // Broadcom VideoCore V processor
  EM_78KOR = 199,         // Renesas 78KOR family
  EM_56800EX = 200,       // Freescale 56800EX Digital Signal Controller (DSC)
  EM_BA1 = 201,           // Beyond BA1 CPU architecture
  EM_BA2 = 202,           // Beyond BA2 CPU architecture
  EM_XCORE = 203,         // XMOS xCORE processor family
  EM_MCHP_PIC = 204,      // Microchip 8-bit PIC(r) family
  EM_INTEL205 = 205,      // Reserved by Intel
  EM_INTEL206 = 206,      // Reserved by Intel
  EM_INTEL207 = 207,      // Reserved by Intel
  EM_INTEL208 = 208,      // Reserved by Intel
  EM_INTEL209 = 209,      // Reserved by Intel
  EM_KM32 = 210,          // KM211 KM32 32-bit processor
  EM_KMX32 = 211,         // KM211 KMX32 32-bit processor
  EM_KMX16 = 212,         // KM211 KMX16 16-bit processor
  EM_KMX8 = 213,          // KM211 KMX8 8-bit processor
  EM_KVARC = 214,         // KM211 KVARC processor
  EM_CDP = 215,           // Paneve CDP architecture family
  EM_COGE = 216,          // Cognitive Smart Memory Processor
  EM_COOL = 217,          // iCelero CoolEngine
  EM_NORC = 218,          // Nanoradio Optimized RISC
  EM_CSR_KALIMBA = 219,   // CSR Kalimba architecture family
  EM_AMDGPU = 224,        // AMD GPU architecture
  EM_RISCV = 243,         // RISC-V
  EM_LANAI = 244,         // Lanai 32-bit processor
  EM_BPF = 247,           // Linux kernel bpf virtual machine
  EM_VE = 251,            // NEC SX-Aurora VE
  EM_CSKY = 252,          // C-SKY 32-bit processor
  EM_LOONGARCH = 258,     // LoongArch
};

// Object file classes.
enum {
  ELFCLASSNONE = 0,
  ELFCLASS32 = 1, // 32-bit object file
  ELFCLASS64 = 2  // 64-bit object file
};

// Object file byte orderings.
enum {
  ELFDATANONE = 0, // Invalid data encoding.
  ELFDATA2LSB = 1, // Little-endian object file
  ELFDATA2MSB = 2  // Big-endian object file
};

// OS ABI identification.
enum {
  ELFOSABI_NONE = 0,           // UNIX System V ABI
  ELFOSABI_HPUX = 1,           // HP-UX operating system
  ELFOSABI_NETBSD = 2,         // NetBSD
  ELFOSABI_GNU = 3,            // GNU/Linux
  ELFOSABI_LINUX = 3,          // Historical alias for ELFOSABI_GNU.
  ELFOSABI_HURD = 4,           // GNU/Hurd
  ELFOSABI_SOLARIS = 6,        // Solaris
  ELFOSABI_AIX = 7,            // AIX
  ELFOSABI_IRIX = 8,           // IRIX
  ELFOSABI_FREEBSD = 9,        // FreeBSD
  ELFOSABI_TRU64 = 10,         // TRU64 UNIX
  ELFOSABI_MODESTO = 11,       // Novell Modesto
  ELFOSABI_OPENBSD = 12,       // OpenBSD
  ELFOSABI_OPENVMS = 13,       // OpenVMS
  ELFOSABI_NSK = 14,           // Hewlett-Packard Non-Stop Kernel
  ELFOSABI_AROS = 15,          // AROS
  ELFOSABI_FENIXOS = 16,       // FenixOS
  ELFOSABI_CLOUDABI = 17,      // Nuxi CloudABI
  ELFOSABI_FIRST_ARCH = 64,    // First architecture-specific OS ABI
  ELFOSABI_AMDGPU_HSA = 64,    // AMD HSA runtime
  ELFOSABI_AMDGPU_PAL = 65,    // AMD PAL runtime
  ELFOSABI_AMDGPU_MESA3D = 66, // AMD GCN GPUs (GFX6+) for MESA runtime
  ELFOSABI_ARM = 97,           // ARM
  ELFOSABI_C6000_ELFABI = 64,  // Bare-metal TMS320C6000
  ELFOSABI_C6000_LINUX = 65,   // Linux TMS320C6000
  ELFOSABI_STANDALONE = 255,   // Standalone (embedded) application
  ELFOSABI_LAST_ARCH = 255     // Last Architecture-specific OS ABI
};

// AMDGPU OS ABI Version identification.
enum {
  // ELFABIVERSION_AMDGPU_HSA_V1 does not exist because OS ABI identification
  // was never defined for V1.
  ELFABIVERSION_AMDGPU_HSA_V2 = 0,
  ELFABIVERSION_AMDGPU_HSA_V3 = 1,
  ELFABIVERSION_AMDGPU_HSA_V4 = 2,
  ELFABIVERSION_AMDGPU_HSA_V5 = 3
};

#define ELF_RELOC(name, value) name = value,

// X86_64 relocations.
enum {
#include "ELFRelocs/x86_64.def"
};

// i386 relocations.
enum {
#include "ELFRelocs/i386.def"
};

// ELF Relocation types for PPC32
enum {
#include "ELFRelocs/PowerPC.def"
};

// Specific e_flags for PPC64
enum {
  // e_flags bits specifying ABI:
  // 1 for original ABI using function descriptors,
  // 2 for revised ABI without function descriptors,
  // 0 for unspecified or not using any features affected by the differences.
  EF_PPC64_ABI = 3
};

// Special values for the st_other field in the symbol table entry for PPC64.
enum {
  STO_PPC64_LOCAL_BIT = 5,
  STO_PPC64_LOCAL_MASK = (7 << STO_PPC64_LOCAL_BIT)
};
static inline int64_t decodePPC64LocalEntryOffset(unsigned Other) {
  unsigned Val = (Other & STO_PPC64_LOCAL_MASK) >> STO_PPC64_LOCAL_BIT;
  return ((1 << Val) >> 2) << 2;
}

// ELF Relocation types for PPC64
enum {
#include "ELFRelocs/PowerPC64.def"
};

// ELF Relocation types for AArch64
enum {
#include "ELFRelocs/AArch64.def"
};

// Special values for the st_other field in the symbol table entry for AArch64.
enum {
  // Symbol may follow different calling convention than base PCS.
  STO_AARCH64_VARIANT_PCS = 0x80
};

// ARM Specific e_flags
enum : unsigned {
  EF_ARM_SOFT_FLOAT = 0x00000200U,     // Legacy pre EABI_VER5
  EF_ARM_ABI_FLOAT_SOFT = 0x00000200U, // EABI_VER5
  EF_ARM_VFP_FLOAT = 0x00000400U,      // Legacy pre EABI_VER5
  EF_ARM_ABI_FLOAT_HARD = 0x00000400U, // EABI_VER5
  EF_ARM_BE8 = 0x00800000U,
  EF_ARM_EABI_UNKNOWN = 0x00000000U,
  EF_ARM_EABI_VER1 = 0x01000000U,
  EF_ARM_EABI_VER2 = 0x02000000U,
  EF_ARM_EABI_VER3 = 0x03000000U,
  EF_ARM_EABI_VER4 = 0x04000000U,
  EF_ARM_EABI_VER5 = 0x05000000U,
  EF_ARM_EABIMASK = 0xFF000000U
};

// ELF Relocation types for ARM
enum {
#include "ELFRelocs/ARM.def"
};

// ARC Specific e_flags
enum : unsigned {
  EF_ARC_MACH_MSK = 0x000000ff,
  EF_ARC_OSABI_MSK = 0x00000f00,
  E_ARC_MACH_ARC600 = 0x00000002,
  E_ARC_MACH_ARC601 = 0x00000004,
  E_ARC_MACH_ARC700 = 0x00000003,
  EF_ARC_CPU_ARCV2EM = 0x00000005,
  EF_ARC_CPU_ARCV2HS = 0x00000006,
  E_ARC_OSABI_ORIG = 0x00000000,
  E_ARC_OSABI_V2 = 0x00000200,
  E_ARC_OSABI_V3 = 0x00000300,
  E_ARC_OSABI_V4 = 0x00000400,
  EF_ARC_PIC = 0x00000100
};

// ELF Relocation types for ARC
enum {
#include "ELFRelocs/ARC.def"
};

// AVR specific e_flags
enum : unsigned {
  EF_AVR_ARCH_AVR1 = 1,
  EF_AVR_ARCH_AVR2 = 2,
  EF_AVR_ARCH_AVR25 = 25,
  EF_AVR_ARCH_AVR3 = 3,
  EF_AVR_ARCH_AVR31 = 31,
  EF_AVR_ARCH_AVR35 = 35,
  EF_AVR_ARCH_AVR4 = 4,
  EF_AVR_ARCH_AVR5 = 5,
  EF_AVR_ARCH_AVR51 = 51,
  EF_AVR_ARCH_AVR6 = 6,
  EF_AVR_ARCH_AVRTINY = 100,
  EF_AVR_ARCH_XMEGA1 = 101,
  EF_AVR_ARCH_XMEGA2 = 102,
  EF_AVR_ARCH_XMEGA3 = 103,
  EF_AVR_ARCH_XMEGA4 = 104,
  EF_AVR_ARCH_XMEGA5 = 105,
  EF_AVR_ARCH_XMEGA6 = 106,
  EF_AVR_ARCH_XMEGA7 = 107,

  EF_AVR_ARCH_MASK = 0x7f, // EF_AVR_ARCH_xxx selection mask

  EF_AVR_LINKRELAX_PREPARED = 0x80, // The file is prepared for linker
                                    // relaxation to be applied
};

// ELF Relocation types for AVR
enum {
#include "ELFRelocs/AVR.def"
};

// Mips Specific e_flags
enum : unsigned {
  EF_MIPS_NOREORDER = 0x00000001, // Don't reorder instructions
  EF_MIPS_PIC = 0x00000002,       // Position independent code
  EF_MIPS_CPIC = 0x00000004,      // Call object with Position independent code
  EF_MIPS_ABI2 = 0x00000020,      // File uses N32 ABI
  EF_MIPS_32BITMODE = 0x00000100, // Code compiled for a 64-bit machine
                                  // in 32-bit mode
  EF_MIPS_FP64 = 0x00000200,      // Code compiled for a 32-bit machine
                                  // but uses 64-bit FP registers
  EF_MIPS_NAN2008 = 0x00000400,   // Uses IEE 754-2008 NaN encoding

  // ABI flags
  EF_MIPS_ABI_O32 = 0x00001000, // This file follows the first MIPS 32 bit ABI
  EF_MIPS_ABI_O64 = 0x00002000, // O32 ABI extended for 64-bit architecture.
  EF_MIPS_ABI_EABI32 = 0x00003000, // EABI in 32 bit mode.
  EF_MIPS_ABI_EABI64 = 0x00004000, // EABI in 64 bit mode.
  EF_MIPS_ABI = 0x0000f000,        // Mask for selecting EF_MIPS_ABI_ variant.

  // MIPS machine variant
  EF_MIPS_MACH_NONE = 0x00000000,    // A standard MIPS implementation.
  EF_MIPS_MACH_3900 = 0x00810000,    // Toshiba R3900
  EF_MIPS_MACH_4010 = 0x00820000,    // LSI R4010
  EF_MIPS_MACH_4100 = 0x00830000,    // NEC VR4100
  EF_MIPS_MACH_4650 = 0x00850000,    // MIPS R4650
  EF_MIPS_MACH_4120 = 0x00870000,    // NEC VR4120
  EF_MIPS_MACH_4111 = 0x00880000,    // NEC VR4111/VR4181
  EF_MIPS_MACH_SB1 = 0x008a0000,     // Broadcom SB-1
  EF_MIPS_MACH_OCTEON = 0x008b0000,  // Cavium Networks Octeon
  EF_MIPS_MACH_XLR = 0x008c0000,     // RMI Xlr
  EF_MIPS_MACH_OCTEON2 = 0x008d0000, // Cavium Networks Octeon2
  EF_MIPS_MACH_OCTEON3 = 0x008e0000, // Cavium Networks Octeon3
  EF_MIPS_MACH_5400 = 0x00910000,    // NEC VR5400
  EF_MIPS_MACH_5900 = 0x00920000,    // MIPS R5900
  EF_MIPS_MACH_5500 = 0x00980000,    // NEC VR5500
  EF_MIPS_MACH_9000 = 0x00990000,    // Unknown
  EF_MIPS_MACH_LS2E = 0x00a00000,    // ST Microelectronics Loongson 2E
  EF_MIPS_MACH_LS2F = 0x00a10000,    // ST Microelectronics Loongson 2F
  EF_MIPS_MACH_LS3A = 0x00a20000,    // Loongson 3A
  EF_MIPS_MACH = 0x00ff0000,         // EF_MIPS_MACH_xxx selection mask

  // ARCH_ASE
  EF_MIPS_MICROMIPS = 0x02000000,     // microMIPS
  EF_MIPS_ARCH_ASE_M16 = 0x04000000,  // Has Mips-16 ISA extensions
  EF_MIPS_ARCH_ASE_MDMX = 0x08000000, // Has MDMX multimedia extensions
  EF_MIPS_ARCH_ASE = 0x0f000000,      // Mask for EF_MIPS_ARCH_ASE_xxx flags

  // ARCH
  EF_MIPS_ARCH_1 = 0x00000000,    // MIPS1 instruction set
  EF_MIPS_ARCH_2 = 0x10000000,    // MIPS2 instruction set
  EF_MIPS_ARCH_3 = 0x20000000,    // MIPS3 instruction set
  EF_MIPS_ARCH_4 = 0x30000000,    // MIPS4 instruction set
  EF_MIPS_ARCH_5 = 0x40000000,    // MIPS5 instruction set
  EF_MIPS_ARCH_32 = 0x50000000,   // MIPS32 instruction set per linux not elf.h
  EF_MIPS_ARCH_64 = 0x60000000,   // MIPS64 instruction set per linux not elf.h
  EF_MIPS_ARCH_32R2 = 0x70000000, // mips32r2, mips32r3, mips32r5
  EF_MIPS_ARCH_64R2 = 0x80000000, // mips64r2, mips64r3, mips64r5
  EF_MIPS_ARCH_32R6 = 0x90000000, // mips32r6
  EF_MIPS_ARCH_64R6 = 0xa0000000, // mips64r6
  EF_MIPS_ARCH = 0xf0000000       // Mask for applying EF_MIPS_ARCH_ variant
};

// MIPS-specific section indexes
enum {
  SHN_MIPS_ACOMMON = 0xff00,   // Common symbols which are defined and allocated
  SHN_MIPS_TEXT = 0xff01,      // Not ABI compliant
  SHN_MIPS_DATA = 0xff02,      // Not ABI compliant
  SHN_MIPS_SCOMMON = 0xff03,   // Common symbols for global data area
  SHN_MIPS_SUNDEFINED = 0xff04 // Undefined symbols for global data area
};

// ELF Relocation types for Mips
enum {
#include "ELFRelocs/Mips.def"
};

// Special values for the st_other field in the symbol table entry for MIPS.
enum {
  STO_MIPS_OPTIONAL = 0x04,  // Symbol whose definition is optional
  STO_MIPS_PLT = 0x08,       // PLT entry related dynamic table record
  STO_MIPS_PIC = 0x20,       // PIC func in an object mixes PIC/non-PIC
  STO_MIPS_MICROMIPS = 0x80, // MIPS Specific ISA for MicroMips
  STO_MIPS_MIPS16 = 0xf0     // MIPS Specific ISA for Mips16
};

// .MIPS.options section descriptor kinds
enum {
  ODK_NULL = 0,       // Undefined
  ODK_REGINFO = 1,    // Register usage information
  ODK_EXCEPTIONS = 2, // Exception processing options
  ODK_PAD = 3,        // Section padding options
  ODK_HWPATCH = 4,    // Hardware patches applied
  ODK_FILL = 5,       // Linker fill value
  ODK_TAGS = 6,       // Space for tool identification
  ODK_HWAND = 7,      // Hardware AND patches applied
  ODK_HWOR = 8,       // Hardware OR patches applied
  ODK_GP_GROUP = 9,   // GP group to use for text/data sections
  ODK_IDENT = 10,     // ID information
  ODK_PAGESIZE = 11   // Page size information
};

// Hexagon-specific e_flags
enum {
  // Object processor version flags, bits[11:0]
  EF_HEXAGON_MACH_V2 = 0x00000001,   // Hexagon V2
  EF_HEXAGON_MACH_V3 = 0x00000002,   // Hexagon V3
  EF_HEXAGON_MACH_V4 = 0x00000003,   // Hexagon V4
  EF_HEXAGON_MACH_V5 = 0x00000004,   // Hexagon V5
  EF_HEXAGON_MACH_V55 = 0x00000005,  // Hexagon V55
  EF_HEXAGON_MACH_V60 = 0x00000060,  // Hexagon V60
  EF_HEXAGON_MACH_V62 = 0x00000062,  // Hexagon V62
  EF_HEXAGON_MACH_V65 = 0x00000065,  // Hexagon V65
  EF_HEXAGON_MACH_V66 = 0x00000066,  // Hexagon V66
  EF_HEXAGON_MACH_V67 = 0x00000067,  // Hexagon V67
  EF_HEXAGON_MACH_V67T = 0x00008067, // Hexagon V67T
  EF_HEXAGON_MACH_V68 = 0x00000068,  // Hexagon V68
  EF_HEXAGON_MACH_V69 = 0x00000069,  // Hexagon V69
  EF_HEXAGON_MACH_V71 = 0x00000071,  // Hexagon V71
  EF_HEXAGON_MACH_V71T = 0x00008071, // Hexagon V71T
  EF_HEXAGON_MACH_V73 = 0x00000073,  // Hexagon V73
  EF_HEXAGON_MACH = 0x000003ff,      // Hexagon V..

  // Highest ISA version flags
  EF_HEXAGON_ISA_MACH = 0x00000000, // Same as specified in bits[11:0]
                                    // of e_flags
  EF_HEXAGON_ISA_V2 = 0x00000010,   // Hexagon V2 ISA
  EF_HEXAGON_ISA_V3 = 0x00000020,   // Hexagon V3 ISA
  EF_HEXAGON_ISA_V4 = 0x00000030,   // Hexagon V4 ISA
  EF_HEXAGON_ISA_V5 = 0x00000040,   // Hexagon V5 ISA
  EF_HEXAGON_ISA_V55 = 0x00000050,  // Hexagon V55 ISA
  EF_HEXAGON_ISA_V60 = 0x00000060,  // Hexagon V60 ISA
  EF_HEXAGON_ISA_V62 = 0x00000062,  // Hexagon V62 ISA
  EF_HEXAGON_ISA_V65 = 0x00000065,  // Hexagon V65 ISA
  EF_HEXAGON_ISA_V66 = 0x00000066,  // Hexagon V66 ISA
  EF_HEXAGON_ISA_V67 = 0x00000067,  // Hexagon V67 ISA
  EF_HEXAGON_ISA_V68 = 0x00000068,  // Hexagon V68 ISA
  EF_HEXAGON_ISA_V69 = 0x00000069,  // Hexagon V69 ISA
  EF_HEXAGON_ISA_V71 = 0x00000071,  // Hexagon V71 ISA
  EF_HEXAGON_ISA_V73 = 0x00000073,  // Hexagon V73 ISA
  EF_HEXAGON_ISA_V75 = 0x00000075,  // Hexagon V75 ISA
  EF_HEXAGON_ISA = 0x000003ff,      // Hexagon V.. ISA
};

// Hexagon-specific section indexes for common small data
enum {
  SHN_HEXAGON_SCOMMON = 0xff00,   // Other access sizes
  SHN_HEXAGON_SCOMMON_1 = 0xff01, // Byte-sized access
  SHN_HEXAGON_SCOMMON_2 = 0xff02, // Half-word-sized access
  SHN_HEXAGON_SCOMMON_4 = 0xff03, // Word-sized access
  SHN_HEXAGON_SCOMMON_8 = 0xff04  // Double-word-size access
};

// ELF Relocation types for Hexagon
enum {
#include "ELFRelocs/Hexagon.def"
};

// ELF Relocation type for Lanai.
enum {
#include "ELFRelocs/Lanai.def"
};

// RISCV Specific e_flags
enum : unsigned {
  EF_RISCV_RVC = 0x0001,
  EF_RISCV_FLOAT_ABI = 0x0006,
  EF_RISCV_FLOAT_ABI_SOFT = 0x0000,
  EF_RISCV_FLOAT_ABI_SINGLE = 0x0002,
  EF_RISCV_FLOAT_ABI_DOUBLE = 0x0004,
  EF_RISCV_FLOAT_ABI_QUAD = 0x0006,
  EF_RISCV_RVE = 0x0008,
  EF_RISCV_TSO = 0x0010,
};

// ELF Relocation types for RISC-V
enum {
#include "ELFRelocs/RISCV.def"
};

enum {
  // Symbol may follow different calling convention than the standard calling
  // convention.
  STO_RISCV_VARIANT_CC = 0x80
};

// ELF Relocation types for S390/zSeries
enum {
#include "ELFRelocs/SystemZ.def"
};

// ELF Relocation type for Sparc.
enum {
#include "ELFRelocs/Sparc.def"
};

// AMDGPU specific e_flags.
enum : unsigned {
  // Processor selection mask for EF_AMDGPU_MACH_* values.
  EF_AMDGPU_MACH = 0x0ff,

  // Not specified processor.
  EF_AMDGPU_MACH_NONE = 0x000,

  // R600-based processors.

  // Radeon HD 2000/3000 Series (R600).
  EF_AMDGPU_MACH_R600_R600 = 0x001,
  EF_AMDGPU_MACH_R600_R630 = 0x002,
  EF_AMDGPU_MACH_R600_RS880 = 0x003,
  EF_AMDGPU_MACH_R600_RV670 = 0x004,
  // Radeon HD 4000 Series (R700).
  EF_AMDGPU_MACH_R600_RV710 = 0x005,
  EF_AMDGPU_MACH_R600_RV730 = 0x006,
  EF_AMDGPU_MACH_R600_RV770 = 0x007,
  // Radeon HD 5000 Series (Evergreen).
  EF_AMDGPU_MACH_R600_CEDAR = 0x008,
  EF_AMDGPU_MACH_R600_CYPRESS = 0x009,
  EF_AMDGPU_MACH_R600_JUNIPER = 0x00a,
  EF_AMDGPU_MACH_R600_REDWOOD = 0x00b,
  EF_AMDGPU_MACH_R600_SUMO = 0x00c,
  // Radeon HD 6000 Series (Northern Islands).
  EF_AMDGPU_MACH_R600_BARTS = 0x00d,
  EF_AMDGPU_MACH_R600_CAICOS = 0x00e,
  EF_AMDGPU_MACH_R600_CAYMAN = 0x00f,
  EF_AMDGPU_MACH_R600_TURKS = 0x010,

  // Reserved for R600-based processors.
  EF_AMDGPU_MACH_R600_RESERVED_FIRST = 0x011,
  EF_AMDGPU_MACH_R600_RESERVED_LAST = 0x01f,

  // First/last R600-based processors.
  EF_AMDGPU_MACH_R600_FIRST = EF_AMDGPU_MACH_R600_R600,
  EF_AMDGPU_MACH_R600_LAST = EF_AMDGPU_MACH_R600_TURKS,

  // AMDGCN-based processors.
  EF_AMDGPU_MACH_AMDGCN_GFX600        = 0x020,
  EF_AMDGPU_MACH_AMDGCN_GFX601        = 0x021,
  EF_AMDGPU_MACH_AMDGCN_GFX700        = 0x022,
  EF_AMDGPU_MACH_AMDGCN_GFX701        = 0x023,
  EF_AMDGPU_MACH_AMDGCN_GFX702        = 0x024,
  EF_AMDGPU_MACH_AMDGCN_GFX703        = 0x025,
  EF_AMDGPU_MACH_AMDGCN_GFX704        = 0x026,
  EF_AMDGPU_MACH_AMDGCN_RESERVED_0X27 = 0x027,
  EF_AMDGPU_MACH_AMDGCN_GFX801        = 0x028,
  EF_AMDGPU_MACH_AMDGCN_GFX802        = 0x029,
  EF_AMDGPU_MACH_AMDGCN_GFX803        = 0x02a,
  EF_AMDGPU_MACH_AMDGCN_GFX810        = 0x02b,
  EF_AMDGPU_MACH_AMDGCN_GFX900        = 0x02c,
  EF_AMDGPU_MACH_AMDGCN_GFX902        = 0x02d,
  EF_AMDGPU_MACH_AMDGCN_GFX904        = 0x02e,
  EF_AMDGPU_MACH_AMDGCN_GFX906        = 0x02f,
  EF_AMDGPU_MACH_AMDGCN_GFX908        = 0x030,
  EF_AMDGPU_MACH_AMDGCN_GFX909        = 0x031,
  EF_AMDGPU_MACH_AMDGCN_GFX90C        = 0x032,
  EF_AMDGPU_MACH_AMDGCN_GFX1010       = 0x033,
  EF_AMDGPU_MACH_AMDGCN_GFX1011       = 0x034,
  EF_AMDGPU_MACH_AMDGCN_GFX1012       = 0x035,
  EF_AMDGPU_MACH_AMDGCN_GFX1030       = 0x036,
  EF_AMDGPU_MACH_AMDGCN_GFX1031       = 0x037,
  EF_AMDGPU_MACH_AMDGCN_GFX1032       = 0x038,
  EF_AMDGPU_MACH_AMDGCN_GFX1033       = 0x039,
  EF_AMDGPU_MACH_AMDGCN_GFX602        = 0x03a,
  EF_AMDGPU_MACH_AMDGCN_GFX705        = 0x03b,
  EF_AMDGPU_MACH_AMDGCN_GFX805        = 0x03c,
  EF_AMDGPU_MACH_AMDGCN_GFX1035       = 0x03d,
  EF_AMDGPU_MACH_AMDGCN_GFX1034       = 0x03e,
  EF_AMDGPU_MACH_AMDGCN_GFX90A        = 0x03f,
  EF_AMDGPU_MACH_AMDGCN_GFX940        = 0x040,
  EF_AMDGPU_MACH_AMDGCN_GFX1100       = 0x041,
  EF_AMDGPU_MACH_AMDGCN_GFX1013       = 0x042,
  EF_AMDGPU_MACH_AMDGCN_GFX1150       = 0x043,
  EF_AMDGPU_MACH_AMDGCN_GFX1103       = 0x044,
  EF_AMDGPU_MACH_AMDGCN_GFX1036       = 0x045,
  EF_AMDGPU_MACH_AMDGCN_GFX1101       = 0x046,
  EF_AMDGPU_MACH_AMDGCN_GFX1102       = 0x047,
  EF_AMDGPU_MACH_AMDGCN_RESERVED_0X48 = 0x048,
  EF_AMDGPU_MACH_AMDGCN_RESERVED_0X49 = 0x049,
  EF_AMDGPU_MACH_AMDGCN_GFX1151       = 0x04a,
  EF_AMDGPU_MACH_AMDGCN_GFX941        = 0x04b,
  EF_AMDGPU_MACH_AMDGCN_GFX942        = 0x04c,

  // First/last AMDGCN-based processors.
  EF_AMDGPU_MACH_AMDGCN_FIRST = EF_AMDGPU_MACH_AMDGCN_GFX600,
  EF_AMDGPU_MACH_AMDGCN_LAST = EF_AMDGPU_MACH_AMDGCN_GFX942,

  // Indicates if the "xnack" target feature is enabled for all code contained
  // in the object.
  //
  // Only valid for ELFOSABI_AMDGPU_HSA and ELFABIVERSION_AMDGPU_HSA_V2.
  EF_AMDGPU_FEATURE_XNACK_V2 = 0x01,
  // Indicates if the trap handler is enabled for all code contained
  // in the object.
  //
  // Only valid for ELFOSABI_AMDGPU_HSA and ELFABIVERSION_AMDGPU_HSA_V2.
  EF_AMDGPU_FEATURE_TRAP_HANDLER_V2 = 0x02,

  // Indicates if the "xnack" target feature is enabled for all code contained
  // in the object.
  //
  // Only valid for ELFOSABI_AMDGPU_HSA and ELFABIVERSION_AMDGPU_HSA_V3.
  EF_AMDGPU_FEATURE_XNACK_V3 = 0x100,
  // Indicates if the "sramecc" target feature is enabled for all code
  // contained in the object.
  //
  // Only valid for ELFOSABI_AMDGPU_HSA and ELFABIVERSION_AMDGPU_HSA_V3.
  EF_AMDGPU_FEATURE_SRAMECC_V3 = 0x200,

  // XNACK selection mask for EF_AMDGPU_FEATURE_XNACK_* values.
  //
  // Only valid for ELFOSABI_AMDGPU_HSA and ELFABIVERSION_AMDGPU_HSA_V4.
  EF_AMDGPU_FEATURE_XNACK_V4 = 0x300,
  // XNACK is not supported.
  EF_AMDGPU_FEATURE_XNACK_UNSUPPORTED_V4 = 0x000,
  // XNACK is any/default/unspecified.
  EF_AMDGPU_FEATURE_XNACK_ANY_V4 = 0x100,
  // XNACK is off.
  EF_AMDGPU_FEATURE_XNACK_OFF_V4 = 0x200,
  // XNACK is on.
  EF_AMDGPU_FEATURE_XNACK_ON_V4 = 0x300,

  // SRAMECC selection mask for EF_AMDGPU_FEATURE_SRAMECC_* values.
  //
  // Only valid for ELFOSABI_AMDGPU_HSA and ELFABIVERSION_AMDGPU_HSA_V4.
  EF_AMDGPU_FEATURE_SRAMECC_V4 = 0xc00,
  // SRAMECC is not supported.
  EF_AMDGPU_FEATURE_SRAMECC_UNSUPPORTED_V4 = 0x000,
  // SRAMECC is any/default/unspecified.
  EF_AMDGPU_FEATURE_SRAMECC_ANY_V4 = 0x400,
  // SRAMECC is off.
  EF_AMDGPU_FEATURE_SRAMECC_OFF_V4 = 0x800,
  // SRAMECC is on.
  EF_AMDGPU_FEATURE_SRAMECC_ON_V4 = 0xc00,
};

// ELF Relocation types for AMDGPU
enum {
#include "ELFRelocs/AMDGPU.def"
};

// ELF Relocation types for BPF
enum {
#include "ELFRelocs/BPF.def"
};

// ELF Relocation types for M68k
enum {
#include "ELFRelocs/M68k.def"
};

// MSP430 specific e_flags
enum : unsigned {
  EF_MSP430_MACH_MSP430x11 = 11,
  EF_MSP430_MACH_MSP430x11x1 = 110,
  EF_MSP430_MACH_MSP430x12 = 12,
  EF_MSP430_MACH_MSP430x13 = 13,
  EF_MSP430_MACH_MSP430x14 = 14,
  EF_MSP430_MACH_MSP430x15 = 15,
  EF_MSP430_MACH_MSP430x16 = 16,
  EF_MSP430_MACH_MSP430x20 = 20,
  EF_MSP430_MACH_MSP430x22 = 22,
  EF_MSP430_MACH_MSP430x23 = 23,
  EF_MSP430_MACH_MSP430x24 = 24,
  EF_MSP430_MACH_MSP430x26 = 26,
  EF_MSP430_MACH_MSP430x31 = 31,
  EF_MSP430_MACH_MSP430x32 = 32,
  EF_MSP430_MACH_MSP430x33 = 33,
  EF_MSP430_MACH_MSP430x41 = 41,
  EF_MSP430_MACH_MSP430x42 = 42,
  EF_MSP430_MACH_MSP430x43 = 43,
  EF_MSP430_MACH_MSP430x44 = 44,
  EF_MSP430_MACH_MSP430X = 45,
  EF_MSP430_MACH_MSP430x46 = 46,
  EF_MSP430_MACH_MSP430x47 = 47,
  EF_MSP430_MACH_MSP430x54 = 54,
};

// ELF Relocation types for MSP430
enum {
#include "ELFRelocs/MSP430.def"
};

// ELF Relocation type for VE.
enum {
#include "ELFRelocs/VE.def"
};

// CSKY Specific e_flags
enum : unsigned {
  EF_CSKY_801 = 0xa,
  EF_CSKY_802 = 0x10,
  EF_CSKY_803 = 0x9,
  EF_CSKY_805 = 0x11,
  EF_CSKY_807 = 0x6,
  EF_CSKY_810 = 0x8,
  EF_CSKY_860 = 0xb,
  EF_CSKY_800 = 0x1f,
  EF_CSKY_FLOAT = 0x2000,
  EF_CSKY_DSP = 0x4000,
  EF_CSKY_ABIV2 = 0x20000000,
  EF_CSKY_EFV1 = 0x1000000,
  EF_CSKY_EFV2 = 0x2000000,
  EF_CSKY_EFV3 = 0x3000000
};

// ELF Relocation types for CSKY
enum {
#include "ELFRelocs/CSKY.def"
};

// LoongArch Specific e_flags
enum : unsigned {
  // Definitions from LoongArch ELF psABI v2.01.
  // Reference: https://github.com/loongson/LoongArch-Documentation
  // (commit hash 296de4def055c871809068e0816325a4ac04eb12)

  // Base ABI Modifiers
  EF_LOONGARCH_ABI_SOFT_FLOAT    = 0x1,
  EF_LOONGARCH_ABI_SINGLE_FLOAT  = 0x2,
  EF_LOONGARCH_ABI_DOUBLE_FLOAT  = 0x3,
  EF_LOONGARCH_ABI_MODIFIER_MASK = 0x7,

  // Object file ABI versions
  EF_LOONGARCH_OBJABI_V0   = 0x0,
  EF_LOONGARCH_OBJABI_V1   = 0x40,
  EF_LOONGARCH_OBJABI_MASK = 0xC0,
};

// ELF Relocation types for LoongArch
enum {
#include "ELFRelocs/LoongArch.def"
};

// Xtensa specific e_flags
enum : unsigned {
  // Four-bit Xtensa machine type mask.
  EF_XTENSA_MACH = 0x0000000f,
  // Various CPU types.
  EF_XTENSA_MACH_NONE = 0x00000000, // A base Xtensa implementation
  EF_XTENSA_XT_INSN = 0x00000100,
  EF_XTENSA_XT_LIT = 0x00000200,
};

// ELF Relocation types for Xtensa
enum {
#include "ELFRelocs/Xtensa.def"
};

#undef ELF_RELOC

// Section header.
struct Elf32_Shdr {
  Elf32_Word sh_name;      // Section name (index into string table)
  Elf32_Word sh_type;      // Section type (SHT_*)
  Elf32_Word sh_flags;     // Section flags (SHF_*)
  Elf32_Addr sh_addr;      // Address where section is to be loaded
  Elf32_Off sh_offset;     // File offset of section data, in bytes
  Elf32_Word sh_size;      // Size of section, in bytes
  Elf32_Word sh_link;      // Section type-specific header table index link
  Elf32_Word sh_info;      // Section type-specific extra information
  Elf32_Word sh_addralign; // Section address alignment
  Elf32_Word sh_entsize;   // Size of records contained within the section
};

// Section header for ELF64 - same fields as ELF32, different types.
struct Elf64_Shdr {
  Elf64_Word sh_name;
  Elf64_Word sh_type;
  Elf64_Xword sh_flags;
  Elf64_Addr sh_addr;
  Elf64_Off sh_offset;
  Elf64_Xword sh_size;
  Elf64_Word sh_link;
  Elf64_Word sh_info;
  Elf64_Xword sh_addralign;
  Elf64_Xword sh_entsize;
};

// Special section indices.
enum {
  SHN_UNDEF = 0,          // Undefined, missing, irrelevant, or meaningless
  SHN_LORESERVE = 0xff00, // Lowest reserved index
  SHN_LOPROC = 0xff00,    // Lowest processor-specific index
  SHN_HIPROC = 0xff1f,    // Highest processor-specific index
  SHN_LOOS = 0xff20,      // Lowest operating system-specific index
  SHN_HIOS = 0xff3f,      // Highest operating system-specific index
  SHN_ABS = 0xfff1,       // Symbol has absolute value; does not need relocation
  SHN_COMMON = 0xfff2,    // FORTRAN COMMON or C external global variables
  SHN_XINDEX = 0xffff,    // Mark that the index is >= SHN_LORESERVE
  SHN_HIRESERVE = 0xffff  // Highest reserved index
};

// Section types.
enum : unsigned {
  SHT_NULL = 0,           // No associated section (inactive entry).
  SHT_PROGBITS = 1,       // Program-defined contents.
  SHT_SYMTAB = 2,         // Symbol table.
  SHT_STRTAB = 3,         // String table.
  SHT_RELA = 4,           // Relocation entries; explicit addends.
  SHT_HASH = 5,           // Symbol hash table.
  SHT_DYNAMIC = 6,        // Information for dynamic linking.
  SHT_NOTE = 7,           // Information about the file.
  SHT_NOBITS = 8,         // Data occupies no space in the file.
  SHT_REL = 9,            // Relocation entries; no explicit addends.
  SHT_SHLIB = 10,         // Reserved.
  SHT_DYNSYM = 11,        // Symbol table.
  SHT_INIT_ARRAY = 14,    // Pointers to initialization functions.
  SHT_FINI_ARRAY = 15,    // Pointers to termination functions.
  SHT_PREINIT_ARRAY = 16, // Pointers to pre-init functions.
  SHT_GROUP = 17,         // Section group.
  SHT_SYMTAB_SHNDX = 18,  // Indices for SHN_XINDEX entries.
  // Experimental support for SHT_RELR sections. For details, see proposal
  // at https://groups.google.com/forum/#!topic/generic-abi/bX460iggiKg
  SHT_RELR = 19,         // Relocation entries; only offsets.
  SHT_LOOS = 0x60000000, // Lowest operating system-specific type.
  // Android packed relocation section types.
  // https://android.googlesource.com/platform/bionic/+/6f12bfece5dcc01325e0abba56a46b1bcf991c69/tools/relocation_packer/src/elf_file.cc#37
  SHT_ANDROID_REL = 0x60000001,
  SHT_ANDROID_RELA = 0x60000002,
  SHT_LLVM_ODRTAB = 0x6fff4c00,         // LLVM ODR table.
  SHT_LLVM_LINKER_OPTIONS = 0x6fff4c01, // LLVM Linker Options.
  SHT_LLVM_ADDRSIG = 0x6fff4c03,        // List of address-significant symbols
                                        // for safe ICF.
  SHT_LLVM_DEPENDENT_LIBRARIES =
      0x6fff4c04,                  // LLVM Dependent Library Specifiers.
  SHT_LLVM_SYMPART = 0x6fff4c05,   // Symbol partition specification.
  SHT_LLVM_PART_EHDR = 0x6fff4c06, // ELF header for loadable partition.
  SHT_LLVM_PART_PHDR = 0x6fff4c07, // Phdrs for loadable partition.
  SHT_LLVM_BB_ADDR_MAP_V0 =
      0x6fff4c08, // LLVM Basic Block Address Map (old version kept for
                  // backward-compatibility).
  SHT_LLVM_CALL_GRAPH_PROFILE = 0x6fff4c09, // LLVM Call Graph Profile.
  SHT_LLVM_BB_ADDR_MAP = 0x6fff4c0a,        // LLVM Basic Block Address Map.
  SHT_LLVM_OFFLOADING = 0x6fff4c0b,         // LLVM device offloading data.
  SHT_LLVM_LTO = 0x6fff4c0c,                // .llvm.lto for fat LTO.
  // Android's experimental support for SHT_RELR sections.
  // https://android.googlesource.com/platform/bionic/+/b7feec74547f84559a1467aca02708ff61346d2a/libc/include/elf.h#512
  SHT_ANDROID_RELR = 0x6fffff00,   // Relocation entries; only offsets.
  SHT_GNU_ATTRIBUTES = 0x6ffffff5, // Object attributes.
  SHT_GNU_HASH = 0x6ffffff6,       // GNU-style hash table.
  SHT_GNU_verdef = 0x6ffffffd,     // GNU version definitions.
  SHT_GNU_verneed = 0x6ffffffe,    // GNU version references.
  SHT_GNU_versym = 0x6fffffff,     // GNU symbol versions table.
  SHT_HIOS = 0x6fffffff,           // Highest operating system-specific type.
  SHT_LOPROC = 0x70000000,         // Lowest processor arch-specific type.
  // Fixme: All this is duplicated in MCSectionELF. Why??
  // Exception Index table
  SHT_ARM_EXIDX = 0x70000001U,
  // BPABI DLL dynamic linking pre-emption map
  SHT_ARM_PREEMPTMAP = 0x70000002U,
  //  Object file compatibility attributes
  SHT_ARM_ATTRIBUTES = 0x70000003U,
  SHT_ARM_DEBUGOVERLAY = 0x70000004U,
  SHT_ARM_OVERLAYSECTION = 0x70000005U,
  // Special aarch64-specific sections for MTE support, as described in:
  // https://github.com/ARM-software/abi-aa/blob/main/memtagabielf64/memtagabielf64.rst#7section-types
  SHT_AARCH64_MEMTAG_GLOBALS_STATIC = 0x70000007U,
  SHT_AARCH64_MEMTAG_GLOBALS_DYNAMIC = 0x70000008U,
  SHT_HEX_ORDERED = 0x70000000,   // Link editor is to sort the entries in
                                  // this section based on their sizes
  SHT_X86_64_UNWIND = 0x70000001, // Unwind information

  SHT_MIPS_REGINFO = 0x70000006,  // Register usage information
  SHT_MIPS_OPTIONS = 0x7000000d,  // General options
  SHT_MIPS_DWARF = 0x7000001e,    // DWARF debugging section.
  SHT_MIPS_ABIFLAGS = 0x7000002a, // ABI information.

  SHT_MSP430_ATTRIBUTES = 0x70000003U,

  SHT_RISCV_ATTRIBUTES = 0x70000003U,

  SHT_CSKY_ATTRIBUTES = 0x70000001U,

  SHT_HIPROC = 0x7fffffff, // Highest processor arch-specific type.
  SHT_LOUSER = 0x80000000, // Lowest type reserved for applications.
  SHT_HIUSER = 0xffffffff  // Highest type reserved for applications.
};

// Section flags.
enum : unsigned {
  // Section data should be writable during execution.
  SHF_WRITE = 0x1,

  // Section occupies memory during program execution.
  SHF_ALLOC = 0x2,

  // Section contains executable machine instructions.
  SHF_EXECINSTR = 0x4,

  // The data in this section may be merged.
  SHF_MERGE = 0x10,

  // The data in this section is null-terminated strings.
  SHF_STRINGS = 0x20,

  // A field in this section holds a section header table index.
  SHF_INFO_LINK = 0x40U,

  // Adds special ordering requirements for link editors.
  SHF_LINK_ORDER = 0x80U,

  // This section requires special OS-specific processing to avoid incorrect
  // behavior.
  SHF_OS_NONCONFORMING = 0x100U,

  // This section is a member of a section group.
  SHF_GROUP = 0x200U,

  // This section holds Thread-Local Storage.
  SHF_TLS = 0x400U,

  // Identifies a section containing compressed data.
  SHF_COMPRESSED = 0x800U,

  // This section should not be garbage collected by the linker.
  SHF_GNU_RETAIN = 0x200000,

  // This section is excluded from the final executable or shared library.
  SHF_EXCLUDE = 0x80000000U,

  // Start of target-specific flags.

  SHF_MASKOS = 0x0ff00000,

  // Solaris equivalent of SHF_GNU_RETAIN.
  SHF_SUNW_NODISCARD = 0x00100000,

  // Bits indicating processor-specific flags.
  SHF_MASKPROC = 0xf0000000,

  /// All sections with the "d" flag are grouped together by the linker to form
  /// the data section and the dp register is set to the start of the section by
  /// the boot code.
  XCORE_SHF_DP_SECTION = 0x10000000,

  /// All sections with the "c" flag are grouped together by the linker to form
  /// the constant pool and the cp register is set to the start of the constant
  /// pool by the boot code.
  XCORE_SHF_CP_SECTION = 0x20000000,

  // If an object file section does not have this flag set, then it may not hold
  // more than 2GB and can be freely referred to in objects using smaller code
  // models. Otherwise, only objects using larger code models can refer to them.
  // For example, a medium code model object can refer to data in a section that
  // sets this flag besides being able to refer to data in a section that does
  // not set it; likewise, a small code model object can refer only to code in a
  // section that does not set this flag.
  SHF_X86_64_LARGE = 0x10000000,

  // All sections with the GPREL flag are grouped into a global data area
  // for faster accesses
  SHF_HEX_GPREL = 0x10000000,

  // Section contains text/data which may be replicated in other sections.
  // Linker must retain only one copy.
  SHF_MIPS_NODUPES = 0x01000000,

  // Linker must generate implicit hidden weak names.
  SHF_MIPS_NAMES = 0x02000000,

  // Section data local to process.
  SHF_MIPS_LOCAL = 0x04000000,

  // Do not strip this section.
  SHF_MIPS_NOSTRIP = 0x08000000,

  // Section must be part of global data area.
  SHF_MIPS_GPREL = 0x10000000,

  // This section should be merged.
  SHF_MIPS_MERGE = 0x20000000,

  // Address size to be inferred from section entry size.
  SHF_MIPS_ADDR = 0x40000000,

  // Section data is string data by default.
  SHF_MIPS_STRING = 0x80000000,

  // Make code section unreadable when in execute-only mode
  SHF_ARM_PURECODE = 0x20000000
};

// Section Group Flags
enum : unsigned {
  GRP_COMDAT = 0x1,
  GRP_MASKOS = 0x0ff00000,
  GRP_MASKPROC = 0xf0000000
};

// Symbol table entries for ELF32.
struct Elf32_Sym {
  Elf32_Word st_name;     // Symbol name (index into string table)
  Elf32_Addr st_value;    // Value or address associated with the symbol
  Elf32_Word st_size;     // Size of the symbol
  unsigned char st_info;  // Symbol's type and binding attributes
  unsigned char st_other; // Must be zero; reserved
  Elf32_Half st_shndx;    // Which section (header table index) it's defined in

  // These accessors and mutators correspond to the ELF32_ST_BIND,
  // ELF32_ST_TYPE, and ELF32_ST_INFO macros defined in the ELF specification:
  unsigned char getBinding() const { return st_info >> 4; }
  unsigned char getType() const { return st_info & 0x0f; }
  void setBinding(unsigned char b) { setBindingAndType(b, getType()); }
  void setType(unsigned char t) { setBindingAndType(getBinding(), t); }
  void setBindingAndType(unsigned char b, unsigned char t) {
    st_info = (b << 4) + (t & 0x0f);
  }
};

// Symbol table entries for ELF64.
struct Elf64_Sym {
  Elf64_Word st_name;     // Symbol name (index into string table)
  unsigned char st_info;  // Symbol's type and binding attributes
  unsigned char st_other; // Must be zero; reserved
  Elf64_Half st_shndx;    // Which section (header tbl index) it's defined in
  Elf64_Addr st_value;    // Value or address associated with the symbol
  Elf64_Xword st_size;    // Size of the symbol

  // These accessors and mutators are identical to those defined for ELF32
  // symbol table entries.
  unsigned char getBinding() const { return st_info >> 4; }
  unsigned char getType() const { return st_info & 0x0f; }
  void setBinding(unsigned char b) { setBindingAndType(b, getType()); }
  void setType(unsigned char t) { setBindingAndType(getBinding(), t); }
  void setBindingAndType(unsigned char b, unsigned char t) {
    st_info = (b << 4) + (t & 0x0f);
  }
};

// The size (in bytes) of symbol table entries.
enum {
  SYMENTRY_SIZE32 = 16, // 32-bit symbol entry size
  SYMENTRY_SIZE64 = 24  // 64-bit symbol entry size.
};

// Symbol bindings.
enum {
  STB_LOCAL = 0,  // Local symbol, not visible outside obj file containing def
  STB_GLOBAL = 1, // Global symbol, visible to all object files being combined
  STB_WEAK = 2,   // Weak symbol, like global but lower-precedence
  STB_GNU_UNIQUE = 10,
  STB_LOOS = 10,   // Lowest operating system-specific binding type
  STB_HIOS = 12,   // Highest operating system-specific binding type
  STB_LOPROC = 13, // Lowest processor-specific binding type
  STB_HIPROC = 15  // Highest processor-specific binding type
};

// Symbol types.
enum {
  STT_NOTYPE = 0,     // Symbol's type is not specified
  STT_OBJECT = 1,     // Symbol is a data object (variable, array, etc.)
  STT_FUNC = 2,       // Symbol is executable code (function, etc.)
  STT_SECTION = 3,    // Symbol refers to a section
  STT_FILE = 4,       // Local, absolute symbol that refers to a file
  STT_COMMON = 5,     // An uninitialized common block
  STT_TLS = 6,        // Thread local data object
  STT_GNU_IFUNC = 10, // GNU indirect function
  STT_LOOS = 10,      // Lowest operating system-specific symbol type
  STT_HIOS = 12,      // Highest operating system-specific symbol type
  STT_LOPROC = 13,    // Lowest processor-specific symbol type
  STT_HIPROC = 15,    // Highest processor-specific symbol type

  // AMDGPU symbol types
  STT_AMDGPU_HSA_KERNEL = 10
};

enum {
  STV_DEFAULT = 0,  // Visibility is specified by binding type
  STV_INTERNAL = 1, // Defined by processor supplements
  STV_HIDDEN = 2,   // Not visible to other components
  STV_PROTECTED = 3 // Visible in other components but not preemptable
};

// Symbol number.
enum { STN_UNDEF = 0 };

// Special relocation symbols used in the MIPS64 ELF relocation entries
enum {
  RSS_UNDEF = 0, // None
  RSS_GP = 1,    // Value of gp
  RSS_GP0 = 2,   // Value of gp used to create object being relocated
  RSS_LOC = 3    // Address of location being relocated
};

// Relocation entry, without explicit addend.
struct Elf32_Rel {
  Elf32_Addr r_offset; // Location (file byte offset, or program virtual addr)
  Elf32_Word r_info;   // Symbol table index and type of relocation to apply

  // These accessors and mutators correspond to the ELF32_R_SYM, ELF32_R_TYPE,
  // and ELF32_R_INFO macros defined in the ELF specification:
  Elf32_Word getSymbol() const { return (r_info >> 8); }
  unsigned char getType() const { return (unsigned char)(r_info & 0x0ff); }
  void setSymbol(Elf32_Word s) { setSymbolAndType(s, getType()); }
  void setType(unsigned char t) { setSymbolAndType(getSymbol(), t); }
  void setSymbolAndType(Elf32_Word s, unsigned char t) {
    r_info = (s << 8) + t;
  }
};

// Relocation entry with explicit addend.
struct Elf32_Rela {
  Elf32_Addr r_offset;  // Location (file byte offset, or program virtual addr)
  Elf32_Word r_info;    // Symbol table index and type of relocation to apply
  Elf32_Sword r_addend; // Compute value for relocatable field by adding this

  // These accessors and mutators correspond to the ELF32_R_SYM, ELF32_R_TYPE,
  // and ELF32_R_INFO macros defined in the ELF specification:
  Elf32_Word getSymbol() const { return (r_info >> 8); }
  unsigned char getType() const { return (unsigned char)(r_info & 0x0ff); }
  void setSymbol(Elf32_Word s) { setSymbolAndType(s, getType()); }
  void setType(unsigned char t) { setSymbolAndType(getSymbol(), t); }
  void setSymbolAndType(Elf32_Word s, unsigned char t) {
    r_info = (s << 8) + t;
  }
};

// Relocation entry without explicit addend or info (relative relocations only).
typedef Elf32_Word Elf32_Relr; // offset/bitmap for relative relocations

// Relocation entry, without explicit addend.
struct Elf64_Rel {
  Elf64_Addr r_offset; // Location (file byte offset, or program virtual addr).
  Elf64_Xword r_info;  // Symbol table index and type of relocation to apply.

  // These accessors and mutators correspond to the ELF64_R_SYM, ELF64_R_TYPE,
  // and ELF64_R_INFO macros defined in the ELF specification:
  Elf64_Word getSymbol() const { return (r_info >> 32); }
  Elf64_Word getType() const { return (Elf64_Word)(r_info & 0xffffffffL); }
  void setSymbol(Elf64_Word s) { setSymbolAndType(s, getType()); }
  void setType(Elf64_Word t) { setSymbolAndType(getSymbol(), t); }
  void setSymbolAndType(Elf64_Word s, Elf64_Word t) {
    r_info = ((Elf64_Xword)s << 32) + (t & 0xffffffffL);
  }
};

// Relocation entry with explicit addend.
struct Elf64_Rela {
  Elf64_Addr r_offset; // Location (file byte offset, or program virtual addr).
  Elf64_Xword r_info;  // Symbol table index and type of relocation to apply.
  Elf64_Sxword r_addend; // Compute value for relocatable field by adding this.

  // These accessors and mutators correspond to the ELF64_R_SYM, ELF64_R_TYPE,
  // and ELF64_R_INFO macros defined in the ELF specification:
  Elf64_Word getSymbol() const { return (r_info >> 32); }
  Elf64_Word getType() const { return (Elf64_Word)(r_info & 0xffffffffL); }
  void setSymbol(Elf64_Word s) { setSymbolAndType(s, getType()); }
  void setType(Elf64_Word t) { setSymbolAndType(getSymbol(), t); }
  void setSymbolAndType(Elf64_Word s, Elf64_Word t) {
    r_info = ((Elf64_Xword)s << 32) + (t & 0xffffffffL);
  }
};

// Relocation entry without explicit addend or info (relative relocations only).
typedef Elf64_Xword Elf64_Relr; // offset/bitmap for relative relocations

// Program header for ELF32.
struct Elf32_Phdr {
  Elf32_Word p_type;   // Type of segment
  Elf32_Off p_offset;  // File offset where segment is located, in bytes
  Elf32_Addr p_vaddr;  // Virtual address of beginning of segment
  Elf32_Addr p_paddr;  // Physical address of beginning of segment (OS-specific)
  Elf32_Word p_filesz; // Num. of bytes in file image of segment (may be zero)
  Elf32_Word p_memsz;  // Num. of bytes in mem image of segment (may be zero)
  Elf32_Word p_flags;  // Segment flags
  Elf32_Word p_align;  // Segment alignment constraint
};

// Program header for ELF64.
struct Elf64_Phdr {
  Elf64_Word p_type;    // Type of segment
  Elf64_Word p_flags;   // Segment flags
  Elf64_Off p_offset;   // File offset where segment is located, in bytes
  Elf64_Addr p_vaddr;   // Virtual address of beginning of segment
  Elf64_Addr p_paddr;   // Physical addr of beginning of segment (OS-specific)
  Elf64_Xword p_filesz; // Num. of bytes in file image of segment (may be zero)
  Elf64_Xword p_memsz;  // Num. of bytes in mem image of segment (may be zero)
  Elf64_Xword p_align;  // Segment alignment constraint
};

// Segment types.
enum {
  PT_NULL = 0,            // Unused segment.
  PT_LOAD = 1,            // Loadable segment.
  PT_DYNAMIC = 2,         // Dynamic linking information.
  PT_INTERP = 3,          // Interpreter pathname.
  PT_NOTE = 4,            // Auxiliary information.
  PT_SHLIB = 5,           // Reserved.
  PT_PHDR = 6,            // The program header table itself.
  PT_TLS = 7,             // The thread-local storage template.
  PT_LOOS = 0x60000000,   // Lowest operating system-specific pt entry type.
  PT_HIOS = 0x6fffffff,   // Highest operating system-specific pt entry type.
  PT_LOPROC = 0x70000000, // Lowest processor-specific program hdr entry type.
  PT_HIPROC = 0x7fffffff, // Highest processor-specific program hdr entry type.

  // x86-64 program header types.
  // These all contain stack unwind tables.
  PT_GNU_EH_FRAME = 0x6474e550,
  PT_SUNW_EH_FRAME = 0x6474e550,
  PT_SUNW_UNWIND = 0x6464e550,

  PT_GNU_STACK = 0x6474e551,    // Indicates stack executability.
  PT_GNU_RELRO = 0x6474e552,    // Read-only after relocation.
  PT_GNU_PROPERTY = 0x6474e553, // .note.gnu.property notes sections.

  PT_OPENBSD_MUTABLE = 0x65a3dbe5,   // Like bss, but not immutable.
  PT_OPENBSD_RANDOMIZE = 0x65a3dbe6, // Fill with random data.
  PT_OPENBSD_WXNEEDED = 0x65a3dbe7,  // Program does W^X violations.
  PT_OPENBSD_NOBTCFI = 0x65a3dbe8,   // Do not enforce branch target CFI.
  PT_OPENBSD_BOOTDATA = 0x65a41be6,  // Section for boot arguments.

  // ARM program header types.
  PT_ARM_ARCHEXT = 0x70000000, // Platform architecture compatibility info
  // These all contain stack unwind tables.
  PT_ARM_EXIDX = 0x70000001,
  PT_ARM_UNWIND = 0x70000001,
  // MTE memory tag segment type
  PT_AARCH64_MEMTAG_MTE = 0x70000002,

  // MIPS program header types.
  PT_MIPS_REGINFO = 0x70000000,  // Register usage information.
  PT_MIPS_RTPROC = 0x70000001,   // Runtime procedure table.
  PT_MIPS_OPTIONS = 0x70000002,  // Options segment.
  PT_MIPS_ABIFLAGS = 0x70000003, // Abiflags segment.

  // RISCV program header types.
  PT_RISCV_ATTRIBUTES = 0x70000003,
};

// Segment flag bits.
enum : unsigned {
  PF_X = 1,                // Execute
  PF_W = 2,                // Write
  PF_R = 4,                // Read
  PF_MASKOS = 0x0ff00000,  // Bits for operating system-specific semantics.
  PF_MASKPROC = 0xf0000000 // Bits for processor-specific semantics.
};

// Dynamic table entry for ELF32.
struct Elf32_Dyn {
  Elf32_Sword d_tag; // Type of dynamic table entry.
  union {
    Elf32_Word d_val; // Integer value of entry.
    Elf32_Addr d_ptr; // Pointer value of entry.
  } d_un;
};

// Dynamic table entry for ELF64.
struct Elf64_Dyn {
  Elf64_Sxword d_tag; // Type of dynamic table entry.
  union {
    Elf64_Xword d_val; // Integer value of entry.
    Elf64_Addr d_ptr;  // Pointer value of entry.
  } d_un;
};

// Dynamic table entry tags.
enum {
#define DYNAMIC_TAG(name, value) DT_##name = value,
#include "DynamicTags.def"
#undef DYNAMIC_TAG
};

// DT_FLAGS values.
enum {
  DF_ORIGIN = 0x01,    // The object may reference $ORIGIN.
  DF_SYMBOLIC = 0x02,  // Search the shared lib before searching the exe.
  DF_TEXTREL = 0x04,   // Relocations may modify a non-writable segment.
  DF_BIND_NOW = 0x08,  // Process all relocations on load.
  DF_STATIC_TLS = 0x10 // Reject attempts to load dynamically.
};

// State flags selectable in the `d_un.d_val' element of the DT_FLAGS_1 entry.
enum {
  DF_1_NOW = 0x00000001,       // Set RTLD_NOW for this object.
  DF_1_GLOBAL = 0x00000002,    // Set RTLD_GLOBAL for this object.
  DF_1_GROUP = 0x00000004,     // Set RTLD_GROUP for this object.
  DF_1_NODELETE = 0x00000008,  // Set RTLD_NODELETE for this object.
  DF_1_LOADFLTR = 0x00000010,  // Trigger filtee loading at runtime.
  DF_1_INITFIRST = 0x00000020, // Set RTLD_INITFIRST for this object.
  DF_1_NOOPEN = 0x00000040,    // Set RTLD_NOOPEN for this object.
  DF_1_ORIGIN = 0x00000080,    // $ORIGIN must be handled.
  DF_1_DIRECT = 0x00000100,    // Direct binding enabled.
  DF_1_TRANS = 0x00000200,
  DF_1_INTERPOSE = 0x00000400,  // Object is used to interpose.
  DF_1_NODEFLIB = 0x00000800,   // Ignore default lib search path.
  DF_1_NODUMP = 0x00001000,     // Object can't be dldump'ed.
  DF_1_CONFALT = 0x00002000,    // Configuration alternative created.
  DF_1_ENDFILTEE = 0x00004000,  // Filtee terminates filters search.
  DF_1_DISPRELDNE = 0x00008000, // Disp reloc applied at build time.
  DF_1_DISPRELPND = 0x00010000, // Disp reloc applied at run-time.
  DF_1_NODIRECT = 0x00020000,   // Object has no-direct binding.
  DF_1_IGNMULDEF = 0x00040000,
  DF_1_NOKSYMS = 0x00080000,
  DF_1_NOHDR = 0x00100000,
  DF_1_EDITED = 0x00200000, // Object is modified after built.
  DF_1_NORELOC = 0x00400000,
  DF_1_SYMINTPOSE = 0x00800000, // Object has individual interposers.
  DF_1_GLOBAUDIT = 0x01000000,  // Global auditing required.
  DF_1_SINGLETON = 0x02000000,  // Singleton symbols are used.
  DF_1_PIE = 0x08000000,        // Object is a position-independent executable.
};

// DT_MIPS_FLAGS values.
enum {
  RHF_NONE = 0x00000000,                   // No flags.
  RHF_QUICKSTART = 0x00000001,             // Uses shortcut pointers.
  RHF_NOTPOT = 0x00000002,                 // Hash size is not a power of two.
  RHS_NO_LIBRARY_REPLACEMENT = 0x00000004, // Ignore LD_LIBRARY_PATH.
  RHF_NO_MOVE = 0x00000008,                // DSO address may not be relocated.
  RHF_SGI_ONLY = 0x00000010,               // SGI specific features.
  RHF_GUARANTEE_INIT = 0x00000020,         // Guarantee that .init will finish
                                           // executing before any non-init
                                           // code in DSO is called.
  RHF_DELTA_C_PLUS_PLUS = 0x00000040,      // Contains Delta C++ code.
  RHF_GUARANTEE_START_INIT = 0x00000080,   // Guarantee that .init will start
                                           // executing before any non-init
                                           // code in DSO is called.
  RHF_PIXIE = 0x00000100,                  // Generated by pixie.
  RHF_DEFAULT_DELAY_LOAD = 0x00000200,     // Delay-load DSO by default.
  RHF_REQUICKSTART = 0x00000400,           // Object may be requickstarted
  RHF_REQUICKSTARTED = 0x00000800,         // Object has been requickstarted
  RHF_CORD = 0x00001000,                   // Generated by cord.
  RHF_NO_UNRES_UNDEF = 0x00002000,         // Object contains no unresolved
                                           // undef symbols.
  RHF_RLD_ORDER_SAFE = 0x00004000          // Symbol table is in a safe order.
};

// ElfXX_VerDef structure version (GNU versioning)
enum { VER_DEF_NONE = 0, VER_DEF_CURRENT = 1 };

// VerDef Flags (ElfXX_VerDef::vd_flags)
enum { VER_FLG_BASE = 0x1, VER_FLG_WEAK = 0x2, VER_FLG_INFO = 0x4 };

// Special constants for the version table. (SHT_GNU_versym/.gnu.version)
enum {
  VER_NDX_LOCAL = 0,       // Unversioned local symbol
  VER_NDX_GLOBAL = 1,      // Unversioned global symbol
  VERSYM_VERSION = 0x7fff, // Version Index mask
  VERSYM_HIDDEN = 0x8000   // Hidden bit (non-default version)
};

// ElfXX_VerNeed structure version (GNU versioning)
enum { VER_NEED_NONE = 0, VER_NEED_CURRENT = 1 };

// SHT_NOTE section types.

// Generic note types.
enum : unsigned {
  NT_VERSION = 1,
  NT_ARCH = 2,
  NT_GNU_BUILD_ATTRIBUTE_OPEN = 0x100,
  NT_GNU_BUILD_ATTRIBUTE_FUNC = 0x101,
};

// Core note types.
enum : unsigned {
  NT_PRSTATUS = 1,
  NT_FPREGSET = 2,
  NT_PRPSINFO = 3,
  NT_TASKSTRUCT = 4,
  NT_AUXV = 6,
  NT_PSTATUS = 10,
  NT_FPREGS = 12,
  NT_PSINFO = 13,
  NT_LWPSTATUS = 16,
  NT_LWPSINFO = 17,
  NT_WIN32PSTATUS = 18,

  NT_PPC_VMX = 0x100,
  NT_PPC_VSX = 0x102,
  NT_PPC_TAR = 0x103,
  NT_PPC_PPR = 0x104,
  NT_PPC_DSCR = 0x105,
  NT_PPC_EBB = 0x106,
  NT_PPC_PMU = 0x107,
  NT_PPC_TM_CGPR = 0x108,
  NT_PPC_TM_CFPR = 0x109,
  NT_PPC_TM_CVMX = 0x10a,
  NT_PPC_TM_CVSX = 0x10b,
  NT_PPC_TM_SPR = 0x10c,
  NT_PPC_TM_CTAR = 0x10d,
  NT_PPC_TM_CPPR = 0x10e,
  NT_PPC_TM_CDSCR = 0x10f,

  NT_386_TLS = 0x200,
  NT_386_IOPERM = 0x201,
  NT_X86_XSTATE = 0x202,

  NT_S390_HIGH_GPRS = 0x300,
  NT_S390_TIMER = 0x301,
  NT_S390_TODCMP = 0x302,
  NT_S390_TODPREG = 0x303,
  NT_S390_CTRS = 0x304,
  NT_S390_PREFIX = 0x305,
  NT_S390_LAST_BREAK = 0x306,
  NT_S390_SYSTEM_CALL = 0x307,
  NT_S390_TDB = 0x308,
  NT_S390_VXRS_LOW = 0x309,
  NT_S390_VXRS_HIGH = 0x30a,
  NT_S390_GS_CB = 0x30b,
  NT_S390_GS_BC = 0x30c,

  NT_ARM_VFP = 0x400,
  NT_ARM_TLS = 0x401,
  NT_ARM_HW_BREAK = 0x402,
  NT_ARM_HW_WATCH = 0x403,
  NT_ARM_SVE = 0x405,
  NT_ARM_PAC_MASK = 0x406,
  NT_ARM_SSVE = 0x40b,
  NT_ARM_ZA = 0x40c,
  NT_ARM_ZT = 0x40d,

  NT_FILE = 0x46494c45,
  NT_PRXFPREG = 0x46e62b7f,
  NT_SIGINFO = 0x53494749,
};

// LLVM-specific notes.
enum {
  NT_LLVM_HWASAN_GLOBALS = 3,
};

// GNU note types.
enum {
  NT_GNU_ABI_TAG = 1,
  NT_GNU_HWCAP = 2,
  NT_GNU_BUILD_ID = 3,
  NT_GNU_GOLD_VERSION = 4,
  NT_GNU_PROPERTY_TYPE_0 = 5,
  FDO_PACKAGING_METADATA = 0xcafe1a7e,
};

// Android note types.
enum {
  NT_ANDROID_TYPE_IDENT = 1,
  NT_ANDROID_TYPE_KUSER = 3,
  NT_ANDROID_TYPE_MEMTAG = 4,
};

// Memory tagging values used in NT_ANDROID_TYPE_MEMTAG notes.
enum {
  // Enumeration to determine the tagging mode. In Android-land, 'SYNC' means
  // running all threads in MTE Synchronous mode, and 'ASYNC' means to use the
  // kernels auto-upgrade feature to allow for either MTE Asynchronous,
  // Asymmetric, or Synchronous mode. This allows silicon vendors to specify, on
  // a per-cpu basis what 'ASYNC' should mean. Generally, the expectation is
  // "pick the most precise mode that's very fast".
  NT_MEMTAG_LEVEL_NONE = 0,
  NT_MEMTAG_LEVEL_ASYNC = 1,
  NT_MEMTAG_LEVEL_SYNC = 2,
  NT_MEMTAG_LEVEL_MASK = 3,
  // Bits indicating whether the loader should prepare for MTE to be enabled on
  // the heap and/or stack.
  NT_MEMTAG_HEAP = 4,
  NT_MEMTAG_STACK = 8,
};

// Property types used in GNU_PROPERTY_TYPE_0 notes.
enum : unsigned {
  GNU_PROPERTY_STACK_SIZE = 1,
  GNU_PROPERTY_NO_COPY_ON_PROTECTED = 2,
  GNU_PROPERTY_AARCH64_FEATURE_1_AND = 0xc0000000,
  GNU_PROPERTY_X86_FEATURE_1_AND = 0xc0000002,

  GNU_PROPERTY_X86_UINT32_OR_LO = 0xc0008000,
  GNU_PROPERTY_X86_FEATURE_2_NEEDED = GNU_PROPERTY_X86_UINT32_OR_LO + 1,
  GNU_PROPERTY_X86_ISA_1_NEEDED = GNU_PROPERTY_X86_UINT32_OR_LO + 2,

  GNU_PROPERTY_X86_UINT32_OR_AND_LO = 0xc0010000,
  GNU_PROPERTY_X86_FEATURE_2_USED = GNU_PROPERTY_X86_UINT32_OR_AND_LO + 1,
  GNU_PROPERTY_X86_ISA_1_USED = GNU_PROPERTY_X86_UINT32_OR_AND_LO + 2,
};

// aarch64 processor feature bits.
enum : unsigned {
  GNU_PROPERTY_AARCH64_FEATURE_1_BTI = 1 << 0,
  GNU_PROPERTY_AARCH64_FEATURE_1_PAC = 1 << 1,
};

// x86 processor feature bits.
enum : unsigned {
  GNU_PROPERTY_X86_FEATURE_1_IBT = 1 << 0,
  GNU_PROPERTY_X86_FEATURE_1_SHSTK = 1 << 1,

  GNU_PROPERTY_X86_FEATURE_2_X86 = 1 << 0,
  GNU_PROPERTY_X86_FEATURE_2_X87 = 1 << 1,
  GNU_PROPERTY_X86_FEATURE_2_MMX = 1 << 2,
  GNU_PROPERTY_X86_FEATURE_2_XMM = 1 << 3,
  GNU_PROPERTY_X86_FEATURE_2_YMM = 1 << 4,
  GNU_PROPERTY_X86_FEATURE_2_ZMM = 1 << 5,
  GNU_PROPERTY_X86_FEATURE_2_FXSR = 1 << 6,
  GNU_PROPERTY_X86_FEATURE_2_XSAVE = 1 << 7,
  GNU_PROPERTY_X86_FEATURE_2_XSAVEOPT = 1 << 8,
  GNU_PROPERTY_X86_FEATURE_2_XSAVEC = 1 << 9,

  GNU_PROPERTY_X86_ISA_1_BASELINE = 1 << 0,
  GNU_PROPERTY_X86_ISA_1_V2 = 1 << 1,
  GNU_PROPERTY_X86_ISA_1_V3 = 1 << 2,
  GNU_PROPERTY_X86_ISA_1_V4 = 1 << 3,
};

// FreeBSD note types.
enum {
  NT_FREEBSD_ABI_TAG = 1,
  NT_FREEBSD_NOINIT_TAG = 2,
  NT_FREEBSD_ARCH_TAG = 3,
  NT_FREEBSD_FEATURE_CTL = 4,
};

// NT_FREEBSD_FEATURE_CTL values (see FreeBSD's sys/sys/elf_common.h).
enum {
  NT_FREEBSD_FCTL_ASLR_DISABLE = 0x00000001,
  NT_FREEBSD_FCTL_PROTMAX_DISABLE = 0x00000002,
  NT_FREEBSD_FCTL_STKGAP_DISABLE = 0x00000004,
  NT_FREEBSD_FCTL_WXNEEDED = 0x00000008,
  NT_FREEBSD_FCTL_LA48 = 0x00000010,
  NT_FREEBSD_FCTL_ASG_DISABLE = 0x00000020,
};

// FreeBSD core note types.
enum {
  NT_FREEBSD_THRMISC = 7,
  NT_FREEBSD_PROCSTAT_PROC = 8,
  NT_FREEBSD_PROCSTAT_FILES = 9,
  NT_FREEBSD_PROCSTAT_VMMAP = 10,
  NT_FREEBSD_PROCSTAT_GROUPS = 11,
  NT_FREEBSD_PROCSTAT_UMASK = 12,
  NT_FREEBSD_PROCSTAT_RLIMIT = 13,
  NT_FREEBSD_PROCSTAT_OSREL = 14,
  NT_FREEBSD_PROCSTAT_PSSTRINGS = 15,
  NT_FREEBSD_PROCSTAT_AUXV = 16,
};

// NetBSD core note types.
enum {
  NT_NETBSDCORE_PROCINFO = 1,
  NT_NETBSDCORE_AUXV = 2,
  NT_NETBSDCORE_LWPSTATUS = 24,
};

// OpenBSD core note types.
enum {
  NT_OPENBSD_PROCINFO = 10,
  NT_OPENBSD_AUXV = 11,
  NT_OPENBSD_REGS = 20,
  NT_OPENBSD_FPREGS = 21,
  NT_OPENBSD_XFPREGS = 22,
  NT_OPENBSD_WCOOKIE = 23,
};

// AMDGPU-specific section indices.
enum {
  SHN_AMDGPU_LDS = 0xff00, // Variable in LDS; symbol encoded like SHN_COMMON
};

// AMD vendor specific notes. (Code Object V2)
enum {
  NT_AMD_HSA_CODE_OBJECT_VERSION = 1,
  NT_AMD_HSA_HSAIL = 2,
  NT_AMD_HSA_ISA_VERSION = 3,
  // Note types with values between 4 and 9 (inclusive) are reserved.
  NT_AMD_HSA_METADATA = 10,
  NT_AMD_HSA_ISA_NAME = 11,
  NT_AMD_PAL_METADATA = 12
};

// AMDGPU vendor specific notes. (Code Object V3)
enum {
  // Note types with values between 0 and 31 (inclusive) are reserved.
  NT_AMDGPU_METADATA = 32
};

// LLVMOMPOFFLOAD specific notes.
enum : unsigned {
  NT_LLVM_OPENMP_OFFLOAD_VERSION = 1,
  NT_LLVM_OPENMP_OFFLOAD_PRODUCER = 2,
  NT_LLVM_OPENMP_OFFLOAD_PRODUCER_VERSION = 3
};

enum {
  GNU_ABI_TAG_LINUX = 0,
  GNU_ABI_TAG_HURD = 1,
  GNU_ABI_TAG_SOLARIS = 2,
  GNU_ABI_TAG_FREEBSD = 3,
  GNU_ABI_TAG_NETBSD = 4,
  GNU_ABI_TAG_SYLLABLE = 5,
  GNU_ABI_TAG_NACL = 6,
};

constexpr const char *ELF_NOTE_GNU = "GNU";

// Android packed relocation group flags.
enum {
  RELOCATION_GROUPED_BY_INFO_FLAG = 1,
  RELOCATION_GROUPED_BY_OFFSET_DELTA_FLAG = 2,
  RELOCATION_GROUPED_BY_ADDEND_FLAG = 4,
  RELOCATION_GROUP_HAS_ADDEND_FLAG = 8,
};

// Compressed section header for ELF32.
struct Elf32_Chdr {
  Elf32_Word ch_type;
  Elf32_Word ch_size;
  Elf32_Word ch_addralign;
};

// Compressed section header for ELF64.
struct Elf64_Chdr {
  Elf64_Word ch_type;
  Elf64_Word ch_reserved;
  Elf64_Xword ch_size;
  Elf64_Xword ch_addralign;
};

// Note header for ELF32.
struct Elf32_Nhdr {
  Elf32_Word n_namesz;
  Elf32_Word n_descsz;
  Elf32_Word n_type;
};

// Note header for ELF64.
struct Elf64_Nhdr {
  Elf64_Word n_namesz;
  Elf64_Word n_descsz;
  Elf64_Word n_type;
};

// Legal values for ch_type field of compressed section header.
enum {
  ELFCOMPRESS_ZLIB = 1,            // ZLIB/DEFLATE algorithm.
  ELFCOMPRESS_ZSTD = 2,            // Zstandard algorithm
  ELFCOMPRESS_LOOS = 0x60000000,   // Start of OS-specific.
  ELFCOMPRESS_HIOS = 0x6fffffff,   // End of OS-specific.
  ELFCOMPRESS_LOPROC = 0x70000000, // Start of processor-specific.
  ELFCOMPRESS_HIPROC = 0x7fffffff  // End of processor-specific.
};

/// Convert an architecture name into ELF's e_machine value.
uint16_t convertArchNameToEMachine(StringRef Arch);

/// Convert an ELF's e_machine value into an architecture name.
StringRef convertEMachineToArchName(uint16_t EMachine);

} // end namespace ELF
} // end namespace llvm

#endif // LLVM_BINARYFORMAT_ELF_H
PKhwFZ�Ƣ�:�:BinaryFormat/MsgPackDocument.hnu�[���//===-- MsgPackDocument.h - MsgPack Document --------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// This file declares a class that exposes a simple in-memory representation
/// of a document of MsgPack objects, that can be read from MsgPack, written to
/// MsgPack, and inspected and modified in memory. This is intended to be a
/// lighter-weight (in terms of memory allocations) replacement for
/// MsgPackTypes.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_BINARYFORMAT_MSGPACKDOCUMENT_H
#define LLVM_BINARYFORMAT_MSGPACKDOCUMENT_H

#include "llvm/BinaryFormat/MsgPackReader.h"
#include <map>

namespace llvm {
namespace msgpack {

class ArrayDocNode;
class Document;
class MapDocNode;

/// The kind of a DocNode and its owning Document.
struct KindAndDocument {
  Document *Doc;
  Type Kind;
};

/// A node in a MsgPack Document. This is a simple copyable and
/// passable-by-value type that does not own any memory.
class DocNode {
  friend Document;

public:
  typedef std::map<DocNode, DocNode> MapTy;
  typedef std::vector<DocNode> ArrayTy;

private:
  // Using KindAndDocument allows us to squeeze Kind and a pointer to the
  // owning Document into the same word. Having a pointer to the owning
  // Document makes the API of DocNode more convenient, and allows its use in
  // YAMLIO.
  const KindAndDocument *KindAndDoc;

protected:
  // The union of different values.
  union {
    int64_t Int;
    uint64_t UInt;
    bool Bool;
    double Float;
    StringRef Raw;
    ArrayTy *Array;
    MapTy *Map;
  };

public:
  // Default constructor gives an empty node with no associated Document. All
  // you can do with it is "isEmpty()".
  DocNode() : KindAndDoc(nullptr) {}

  // Type methods
  bool isMap() const { return getKind() == Type::Map; }
  bool isArray() const { return getKind() == Type::Array; }
  bool isScalar() const { return !isMap() && !isArray(); }
  bool isString() const { return getKind() == Type::String; }

  // Accessors. isEmpty() returns true for both a default-constructed DocNode
  // that has no associated Document, and the result of getEmptyNode(), which
  // does have an associated document.
  bool isEmpty() const { return !KindAndDoc || getKind() == Type::Empty; }
  Type getKind() const { return KindAndDoc->Kind; }
  Document *getDocument() const { return KindAndDoc->Doc; }

  int64_t &getInt() {
    assert(getKind() == Type::Int);
    return Int;
  }

  uint64_t &getUInt() {
    assert(getKind() == Type::UInt);
    return UInt;
  }

  bool &getBool() {
    assert(getKind() == Type::Boolean);
    return Bool;
  }

  double &getFloat() {
    assert(getKind() == Type::Float);
    return Float;
  }

  int64_t getInt() const {
    assert(getKind() == Type::Int);
    return Int;
  }

  uint64_t getUInt() const {
    assert(getKind() == Type::UInt);
    return UInt;
  }

  bool getBool() const {
    assert(getKind() == Type::Boolean);
    return Bool;
  }

  double getFloat() const {
    assert(getKind() == Type::Float);
    return Float;
  }

  StringRef getString() const {
    assert(getKind() == Type::String);
    return Raw;
  }

  MemoryBufferRef getBinary() const {
    assert(getKind() == Type::Binary);
    return MemoryBufferRef(Raw, "");
  }

  /// Get an ArrayDocNode for an array node. If Convert, convert the node to an
  /// array node if necessary.
  ArrayDocNode &getArray(bool Convert = false) {
    if (getKind() != Type::Array) {
      assert(Convert);
      convertToArray();
    }
    // This could be a static_cast, except ArrayDocNode is a forward reference.
    return *reinterpret_cast<ArrayDocNode *>(this);
  }

  /// Get a MapDocNode for a map node. If Convert, convert the node to a map
  /// node if necessary.
  MapDocNode &getMap(bool Convert = false) {
    if (getKind() != Type::Map) {
      assert(Convert);
      convertToMap();
    }
    // This could be a static_cast, except MapDocNode is a forward reference.
    return *reinterpret_cast<MapDocNode *>(this);
  }

  /// Comparison operator, used for map keys.
  friend bool operator<(const DocNode &Lhs, const DocNode &Rhs) {
    // This has to cope with one or both of the nodes being default-constructed,
    // such that KindAndDoc is not set.
    if (Rhs.isEmpty())
      return false;
    if (Lhs.KindAndDoc != Rhs.KindAndDoc) {
      if (Lhs.isEmpty())
        return true;
      return (unsigned)Lhs.getKind() < (unsigned)Rhs.getKind();
    }
    switch (Lhs.getKind()) {
    case Type::Int:
      return Lhs.Int < Rhs.Int;
    case Type::UInt:
      return Lhs.UInt < Rhs.UInt;
    case Type::Nil:
      return false;
    case Type::Boolean:
      return Lhs.Bool < Rhs.Bool;
    case Type::Float:
      return Lhs.Float < Rhs.Float;
    case Type::String:
    case Type::Binary:
      return Lhs.Raw < Rhs.Raw;
    default:
      llvm_unreachable("bad map key type");
    }
  }

  /// Equality operator
  friend bool operator==(const DocNode &Lhs, const DocNode &Rhs) {
    return !(Lhs < Rhs) && !(Rhs < Lhs);
  }

  /// Inequality operator
  friend bool operator!=(const DocNode &Lhs, const DocNode &Rhs) {
    return !(Lhs == Rhs);
  }

  /// Convert this node to a string, assuming it is scalar.
  std::string toString() const;

  /// Convert the StringRef and use it to set this DocNode (assuming scalar). If
  /// it is a string, copy the string into the Document's strings list so we do
  /// not rely on S having a lifetime beyond this call. Tag is "" or a YAML tag.
  StringRef fromString(StringRef S, StringRef Tag = "");

  /// Convenience assignment operators. This only works if the destination
  /// DocNode has an associated Document, i.e. it was not constructed using the
  /// default constructor. The string one does not copy, so the string must
  /// remain valid for the lifetime of the Document. Use fromString to avoid
  /// that restriction.
  DocNode &operator=(const char *Val) { return *this = StringRef(Val); }
  DocNode &operator=(StringRef Val);
  DocNode &operator=(MemoryBufferRef Val);
  DocNode &operator=(bool Val);
  DocNode &operator=(int Val);
  DocNode &operator=(unsigned Val);
  DocNode &operator=(int64_t Val);
  DocNode &operator=(uint64_t Val);

private:
  // Private constructor setting KindAndDoc, used by methods in Document.
  DocNode(const KindAndDocument *KindAndDoc) : KindAndDoc(KindAndDoc) {}

  void convertToArray();
  void convertToMap();
};

/// A DocNode that is a map.
class MapDocNode : public DocNode {
public:
  MapDocNode() = default;
  MapDocNode(DocNode &N) : DocNode(N) { assert(getKind() == Type::Map); }

  // Map access methods.
  size_t size() const { return Map->size(); }
  bool empty() const { return !size(); }
  MapTy::iterator begin() { return Map->begin(); }
  MapTy::iterator end() { return Map->end(); }
  MapTy::iterator find(DocNode Key) { return Map->find(Key); }
  MapTy::iterator find(StringRef Key);
  MapTy::iterator erase(MapTy::const_iterator I) { return Map->erase(I); }
  size_t erase(DocNode Key) { return Map->erase(Key); }
  MapTy::iterator erase(MapTy::const_iterator First,
                        MapTy::const_iterator Second) {
    return Map->erase(First, Second);
  }
  /// Member access. The string data must remain valid for the lifetime of the
  /// Document.
  DocNode &operator[](StringRef S);
  /// Member access, with convenience versions for an integer key.
  DocNode &operator[](DocNode Key);
  DocNode &operator[](int Key);
  DocNode &operator[](unsigned Key);
  DocNode &operator[](int64_t Key);
  DocNode &operator[](uint64_t Key);
};

/// A DocNode that is an array.
class ArrayDocNode : public DocNode {
public:
  ArrayDocNode() = default;
  ArrayDocNode(DocNode &N) : DocNode(N) { assert(getKind() == Type::Array); }

  // Array access methods.
  size_t size() const { return Array->size(); }
  bool empty() const { return !size(); }
  DocNode &back() const { return Array->back(); }
  ArrayTy::iterator begin() { return Array->begin(); }
  ArrayTy::iterator end() { return Array->end(); }
  void push_back(DocNode N) {
    assert(N.isEmpty() || N.getDocument() == getDocument());
    Array->push_back(N);
  }

  /// Element access. This extends the array if necessary, with empty nodes.
  DocNode &operator[](size_t Index);
};

/// Simple in-memory representation of a document of msgpack objects with
/// ability to find and create array and map elements.  Does not currently cope
/// with any extension types.
class Document {
  // Maps, arrays and strings used by nodes in the document. No attempt is made
  // to free unused ones.
  std::vector<std::unique_ptr<DocNode::MapTy>> Maps;
  std::vector<std::unique_ptr<DocNode::ArrayTy>> Arrays;
  std::vector<std::unique_ptr<char[]>> Strings;

  // The root node of the document.
  DocNode Root;

  // The KindAndDocument structs pointed to by nodes in the document.
  KindAndDocument KindAndDocs[size_t(Type::Empty) + 1];

  // Whether YAML output uses hex for UInt.
  bool HexMode = false;

public:
  Document() {
    clear();
    for (unsigned T = 0; T != unsigned(Type::Empty) + 1; ++T)
      KindAndDocs[T] = {this, Type(T)};
  }

  /// Get ref to the document's root element.
  DocNode &getRoot() { return Root; }

  /// Restore the Document to an empty state.
  void clear() { getRoot() = getEmptyNode(); }

  /// Create an empty node associated with this Document.
  DocNode getEmptyNode() {
    auto N = DocNode(&KindAndDocs[size_t(Type::Empty)]);
    return N;
  }

  /// Create a nil node associated with this Document.
  DocNode getNode() {
    auto N = DocNode(&KindAndDocs[size_t(Type::Nil)]);
    return N;
  }

  /// Create an Int node associated with this Document.
  DocNode getNode(int64_t V) {
    auto N = DocNode(&KindAndDocs[size_t(Type::Int)]);
    N.Int = V;
    return N;
  }

  /// Create an Int node associated with this Document.
  DocNode getNode(int V) {
    auto N = DocNode(&KindAndDocs[size_t(Type::Int)]);
    N.Int = V;
    return N;
  }

  /// Create a UInt node associated with this Document.
  DocNode getNode(uint64_t V) {
    auto N = DocNode(&KindAndDocs[size_t(Type::UInt)]);
    N.UInt = V;
    return N;
  }

  /// Create a UInt node associated with this Document.
  DocNode getNode(unsigned V) {
    auto N = DocNode(&KindAndDocs[size_t(Type::UInt)]);
    N.UInt = V;
    return N;
  }

  /// Create a Boolean node associated with this Document.
  DocNode getNode(bool V) {
    auto N = DocNode(&KindAndDocs[size_t(Type::Boolean)]);
    N.Bool = V;
    return N;
  }

  /// Create a Float node associated with this Document.
  DocNode getNode(double V) {
    auto N = DocNode(&KindAndDocs[size_t(Type::Float)]);
    N.Float = V;
    return N;
  }

  /// Create a String node associated with this Document. If !Copy, the passed
  /// string must remain valid for the lifetime of the Document.
  DocNode getNode(StringRef V, bool Copy = false) {
    if (Copy)
      V = addString(V);
    auto N = DocNode(&KindAndDocs[size_t(Type::String)]);
    N.Raw = V;
    return N;
  }

  /// Create a String node associated with this Document. If !Copy, the passed
  /// string must remain valid for the lifetime of the Document.
  DocNode getNode(const char *V, bool Copy = false) {
    return getNode(StringRef(V), Copy);
  }

  /// Create a Binary node associated with this Document. If !Copy, the passed
  /// buffer must remain valid for the lifetime of the Document.
  DocNode getNode(MemoryBufferRef V, bool Copy = false) {
    auto Raw = V.getBuffer();
    if (Copy)
      Raw = addString(Raw);
    auto N = DocNode(&KindAndDocs[size_t(Type::Binary)]);
    N.Raw = Raw;
    return N;
  }

  /// Create an empty Map node associated with this Document.
  MapDocNode getMapNode() {
    auto N = DocNode(&KindAndDocs[size_t(Type::Map)]);
    Maps.push_back(std::unique_ptr<DocNode::MapTy>(new DocNode::MapTy));
    N.Map = Maps.back().get();
    return N.getMap();
  }

  /// Create an empty Array node associated with this Document.
  ArrayDocNode getArrayNode() {
    auto N = DocNode(&KindAndDocs[size_t(Type::Array)]);
    Arrays.push_back(std::unique_ptr<DocNode::ArrayTy>(new DocNode::ArrayTy));
    N.Array = Arrays.back().get();
    return N.getArray();
  }

  /// Read a document from a binary msgpack blob, merging into anything already
  /// in the Document. The blob data must remain valid for the lifetime of this
  /// Document (because a string object in the document contains a StringRef
  /// into the original blob). If Multi, then this sets root to an array and
  /// adds top-level objects to it. If !Multi, then it only reads a single
  /// top-level object, even if there are more, and sets root to that. Returns
  /// false if failed due to illegal format or merge error.
  ///
  /// The Merger arg is a callback function that is called when the merge has a
  /// conflict, that is, it is trying to set an item that is already set. If the
  /// conflict cannot be resolved, the callback function returns -1. If the
  /// conflict can be resolved, the callback returns a non-negative number and
  /// sets *DestNode to the resolved node. The returned non-negative number is
  /// significant only for an array node; it is then the array index to start
  /// populating at. That allows Merger to choose whether to merge array
  /// elements (returns 0) or append new elements (returns existing size).
  ///
  /// If SrcNode is an array or map, the resolution must be that *DestNode is an
  /// array or map respectively, although it could be the array or map
  /// (respectively) that was already there. MapKey is the key if *DestNode is a
  /// map entry, a nil node otherwise.
  ///
  /// The default for Merger is to disallow any conflict.
  bool readFromBlob(
      StringRef Blob, bool Multi,
      function_ref<int(DocNode *DestNode, DocNode SrcNode, DocNode MapKey)>
          Merger = [](DocNode *DestNode, DocNode SrcNode, DocNode MapKey) {
            return -1;
          });

  /// Write a MsgPack document to a binary MsgPack blob.
  void writeToBlob(std::string &Blob);

  /// Copy a string into the Document's strings list, and return the copy that
  /// is owned by the Document.
  StringRef addString(StringRef S) {
    Strings.push_back(std::unique_ptr<char[]>(new char[S.size()]));
    memcpy(&Strings.back()[0], S.data(), S.size());
    return StringRef(&Strings.back()[0], S.size());
  }

  /// Set whether YAML output uses hex for UInt. Default off.
  void setHexMode(bool Val = true) { HexMode = Val; }

  /// Get Hexmode flag.
  bool getHexMode() const { return HexMode; }

  /// Convert MsgPack Document to YAML text.
  void toYAML(raw_ostream &OS);

  /// Read YAML text into the MsgPack document. Returns false on failure.
  bool fromYAML(StringRef S);
};

} // namespace msgpack
} // namespace llvm

#endif // LLVM_BINARYFORMAT_MSGPACKDOCUMENT_H
PKhwFZpMنr'r'BinaryFormat/DXContainer.hnu�[���//===-- llvm/BinaryFormat/DXContainer.h - The DXBC file format --*- C++/-*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines manifest constants for the DXContainer object file format.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_BINARYFORMAT_DXCONTAINER_H
#define LLVM_BINARYFORMAT_DXCONTAINER_H

#include "llvm/ADT/StringRef.h"
#include "llvm/Support/SwapByteOrder.h"
#include "llvm/TargetParser/Triple.h"

#include <stdint.h>

namespace llvm {

// The DXContainer file format is arranged as a header and "parts". Semantically
// parts are similar to sections in other object file formats. The File format
// structure is roughly:

// ┌────────────────────────────────┐
// │             Header             │
// ├────────────────────────────────┤
// │              Part              │
// ├────────────────────────────────┤
// │              Part              │
// ├────────────────────────────────┤
// │              ...               │
// └────────────────────────────────┘

namespace dxbc {

inline Triple::EnvironmentType getShaderStage(uint32_t Kind) {
  assert(Kind <= Triple::Amplification - Triple::Pixel &&
         "Shader kind out of expected range.");
  return static_cast<Triple::EnvironmentType>(Triple::Pixel + Kind);
}

struct Hash {
  uint8_t Digest[16];
};

enum class HashFlags : uint32_t {
  None = 0,           // No flags defined.
  IncludesSource = 1, // This flag indicates that the shader hash was computed
                      // taking into account source information (-Zss)
};

struct ShaderHash {
  uint32_t Flags; // dxbc::HashFlags
  uint8_t Digest[16];

  bool isPopulated();

  void swapBytes() { sys::swapByteOrder(Flags); }
};

struct ContainerVersion {
  uint16_t Major;
  uint16_t Minor;

  void swapBytes() {
    sys::swapByteOrder(Major);
    sys::swapByteOrder(Minor);
  }
};

struct Header {
  uint8_t Magic[4]; // "DXBC"
  Hash FileHash;
  ContainerVersion Version;
  uint32_t FileSize;
  uint32_t PartCount;

  void swapBytes() {
    Version.swapBytes();
    sys::swapByteOrder(FileSize);
    sys::swapByteOrder(PartCount);
  }
  // Structure is followed by part offsets: uint32_t PartOffset[PartCount];
  // The offset is to a PartHeader, which is followed by the Part Data.
};

/// Use this type to describe the size and type of a DXIL container part.
struct PartHeader {
  uint8_t Name[4];
  uint32_t Size;

  void swapBytes() { sys::swapByteOrder(Size); }
  StringRef getName() const {
    return StringRef(reinterpret_cast<const char *>(&Name[0]), 4);
  }
  // Structure is followed directly by part data: uint8_t PartData[PartSize].
};

struct BitcodeHeader {
  uint8_t Magic[4];     // ACSII "DXIL".
  uint8_t MajorVersion; // DXIL version.
  uint8_t MinorVersion; // DXIL version.
  uint16_t Unused;
  uint32_t Offset; // Offset to LLVM bitcode (from start of header).
  uint32_t Size;   // Size of LLVM bitcode (in bytes).
  // Followed by uint8_t[BitcodeHeader.Size] at &BitcodeHeader + Header.Offset

  void swapBytes() {
    sys::swapByteOrder(MinorVersion);
    sys::swapByteOrder(MajorVersion);
    sys::swapByteOrder(Offset);
    sys::swapByteOrder(Size);
  }
};

struct ProgramHeader {
  uint8_t MinorVersion : 4;
  uint8_t MajorVersion : 4;
  uint8_t Unused;
  uint16_t ShaderKind;
  uint32_t Size; // Size in uint32_t words including this header.
  BitcodeHeader Bitcode;

  void swapBytes() {
    sys::swapByteOrder(ShaderKind);
    sys::swapByteOrder(Size);
    Bitcode.swapBytes();
  }
};

static_assert(sizeof(ProgramHeader) == 24, "ProgramHeader Size incorrect!");

#define CONTAINER_PART(Part) Part,
enum class PartType {
  Unknown = 0,
#include "DXContainerConstants.def"
};

#define SHADER_FLAG(Num, Val, Str) Val = 1ull << Num,
enum class FeatureFlags : uint64_t {
#include "DXContainerConstants.def"
};
static_assert((uint64_t)FeatureFlags::NextUnusedBit <= 1ull << 63,
              "Shader flag bits exceed enum size.");

PartType parsePartType(StringRef S);

struct VertexPSVInfo {
  uint8_t OutputPositionPresent;
  uint8_t Unused[3];

  void swapBytes() {
    // nothing to swap
  }
};

struct HullPSVInfo {
  uint32_t InputControlPointCount;
  uint32_t OutputControlPointCount;
  uint32_t TessellatorDomain;
  uint32_t TessellatorOutputPrimitive;

  void swapBytes() {
    sys::swapByteOrder(InputControlPointCount);
    sys::swapByteOrder(OutputControlPointCount);
    sys::swapByteOrder(TessellatorDomain);
    sys::swapByteOrder(TessellatorOutputPrimitive);
  }
};

struct DomainPSVInfo {
  uint32_t InputControlPointCount;
  uint8_t OutputPositionPresent;
  uint8_t Unused[3];
  uint32_t TessellatorDomain;

  void swapBytes() {
    sys::swapByteOrder(InputControlPointCount);
    sys::swapByteOrder(TessellatorDomain);
  }
};

struct GeometryPSVInfo {
  uint32_t InputPrimitive;
  uint32_t OutputTopology;
  uint32_t OutputStreamMask;
  uint8_t OutputPositionPresent;
  uint8_t Unused[3];

  void swapBytes() {
    sys::swapByteOrder(InputPrimitive);
    sys::swapByteOrder(OutputTopology);
    sys::swapByteOrder(OutputStreamMask);
  }
};

struct PixelPSVInfo {
  uint8_t DepthOutput;
  uint8_t SampleFrequency;
  uint8_t Unused[2];

  void swapBytes() {
    // nothing to swap
  }
};

struct MeshPSVInfo {
  uint32_t GroupSharedBytesUsed;
  uint32_t GroupSharedBytesDependentOnViewID;
  uint32_t PayloadSizeInBytes;
  uint16_t MaxOutputVertices;
  uint16_t MaxOutputPrimitives;

  void swapBytes() {
    sys::swapByteOrder(GroupSharedBytesUsed);
    sys::swapByteOrder(GroupSharedBytesDependentOnViewID);
    sys::swapByteOrder(PayloadSizeInBytes);
    sys::swapByteOrder(MaxOutputVertices);
    sys::swapByteOrder(MaxOutputPrimitives);
  }
};

struct AmplificationPSVInfo {
  uint32_t PayloadSizeInBytes;

  void swapBytes() { sys::swapByteOrder(PayloadSizeInBytes); }
};

union PipelinePSVInfo {
  VertexPSVInfo VS;
  HullPSVInfo HS;
  DomainPSVInfo DS;
  GeometryPSVInfo GS;
  PixelPSVInfo PS;
  MeshPSVInfo MS;
  AmplificationPSVInfo AS;

  void swapBytes(Triple::EnvironmentType Stage) {
    switch (Stage) {
    case Triple::EnvironmentType::Pixel:
      PS.swapBytes();
      break;
    case Triple::EnvironmentType::Vertex:
      VS.swapBytes();
      break;
    case Triple::EnvironmentType::Geometry:
      GS.swapBytes();
      break;
    case Triple::EnvironmentType::Hull:
      HS.swapBytes();
      break;
    case Triple::EnvironmentType::Domain:
      DS.swapBytes();
      break;
    case Triple::EnvironmentType::Mesh:
      MS.swapBytes();
      break;
    case Triple::EnvironmentType::Amplification:
      AS.swapBytes();
      break;
    default:
      break;
    }
  }
};

static_assert(sizeof(PipelinePSVInfo) == 4 * sizeof(uint32_t),
              "Pipeline-specific PSV info must fit in 16 bytes.");

namespace PSV {

namespace v0 {
struct RuntimeInfo {
  PipelinePSVInfo StageInfo;
  uint32_t MinimumWaveLaneCount; // minimum lane count required, 0 if unused
  uint32_t MaximumWaveLaneCount; // maximum lane count required,
                                 // 0xffffffff if unused
  void swapBytes() {
    // Skip the union because we don't know which field it has
    sys::swapByteOrder(MinimumWaveLaneCount);
    sys::swapByteOrder(MaximumWaveLaneCount);
  }

  void swapBytes(Triple::EnvironmentType Stage) { StageInfo.swapBytes(Stage); }
};

struct ResourceBindInfo {
  uint32_t Type;
  uint32_t Space;
  uint32_t LowerBound;
  uint32_t UpperBound;

  void swapBytes() {
    sys::swapByteOrder(Type);
    sys::swapByteOrder(Space);
    sys::swapByteOrder(LowerBound);
    sys::swapByteOrder(UpperBound);
  }
};

} // namespace v0

namespace v1 {

struct MeshRuntimeInfo {
  uint8_t SigPrimVectors; // Primitive output for MS
  uint8_t MeshOutputTopology;
};

union GeometryExtraInfo {
  uint16_t MaxVertexCount;            // MaxVertexCount for GS only (max 1024)
  uint8_t SigPatchConstOrPrimVectors; // Output for HS; Input for DS;
                                      // Primitive output for MS (overlaps
                                      // MeshInfo::SigPrimVectors)
  MeshRuntimeInfo MeshInfo;
};
struct RuntimeInfo : public v0::RuntimeInfo {
  uint8_t ShaderStage; // PSVShaderKind
  uint8_t UsesViewID;
  GeometryExtraInfo GeomData;

  // PSVSignatureElement counts
  uint8_t SigInputElements;
  uint8_t SigOutputElements;
  uint8_t SigPatchConstOrPrimElements;

  // Number of packed vectors per signature
  uint8_t SigInputVectors;
  uint8_t SigOutputVectors[4];

  void swapBytes() {
    // nothing to swap since everything is single-byte or a union field
  }

  void swapBytes(Triple::EnvironmentType Stage) {
    v0::RuntimeInfo::swapBytes(Stage);
    if (Stage == Triple::EnvironmentType::Geometry)
      sys::swapByteOrder(GeomData.MaxVertexCount);
  }
};

} // namespace v1

namespace v2 {
struct RuntimeInfo : public v1::RuntimeInfo {
  uint32_t NumThreadsX;
  uint32_t NumThreadsY;
  uint32_t NumThreadsZ;

  void swapBytes() {
    sys::swapByteOrder(NumThreadsX);
    sys::swapByteOrder(NumThreadsY);
    sys::swapByteOrder(NumThreadsZ);
  }

  void swapBytes(Triple::EnvironmentType Stage) {
    v1::RuntimeInfo::swapBytes(Stage);
  }
};

struct ResourceBindInfo : public v0::ResourceBindInfo {
  uint32_t Kind;
  uint32_t Flags;

  void swapBytes() {
    v0::ResourceBindInfo::swapBytes();
    sys::swapByteOrder(Kind);
    sys::swapByteOrder(Flags);
  }
};

} // namespace v2
} // namespace PSV

} // namespace dxbc
} // namespace llvm

#endif // LLVM_BINARYFORMAT_DXCONTAINER_H
PKhwFZ�e�]%	%	%BinaryFormat/DXContainerConstants.defnu�[���
#ifdef CONTAINER_PART
CONTAINER_PART(DXIL)
CONTAINER_PART(SFI0)
CONTAINER_PART(HASH)
CONTAINER_PART(PSV0)

#undef CONTAINER_PART
#endif 

#ifdef SHADER_FLAG

SHADER_FLAG(0, Doubles, "Double-precision floating point")
SHADER_FLAG(1, ComputeShadersPlusRawAndStructuredBuffers, "Raw and Structured buffers")
SHADER_FLAG(2, UAVsAtEveryStage, "UAVs at every shader stage")
SHADER_FLAG(3, Max64UAVs, "64 UAV slots")
SHADER_FLAG(4, MinimumPrecision, "Minimum-precision data types")
SHADER_FLAG(5, DX11_1_DoubleExtensions, "Double-precision extensions for 11.1")
SHADER_FLAG(6, DX11_1_ShaderExtensions, "Shader extensions for 11.1")
SHADER_FLAG(7, LEVEL9ComparisonFiltering, "Comparison filtering for feature level 9")
SHADER_FLAG(8, TiledResources, "Tiled resources")
SHADER_FLAG(9, StencilRef, "PS Output Stencil Ref")
SHADER_FLAG(10, InnerCoverage, "PS Inner Coverage")
SHADER_FLAG(11, TypedUAVLoadAdditionalFormats, "Typed UAV Load Additional Formats")
SHADER_FLAG(12, ROVs, "Raster Ordered UAVs")
SHADER_FLAG(13, ViewportAndRTArrayIndexFromAnyShaderFeedingRasterizer, "SV_RenderTargetArrayIndex or SV_ViewportArrayIndex from any shader feeding rasterizer")
SHADER_FLAG(14, WaveOps, "Wave level operations")
SHADER_FLAG(15, Int64Ops, "64-Bit integer")
SHADER_FLAG(16, ViewID, "View Instancing")
SHADER_FLAG(17, Barycentrics, "Barycentrics")
SHADER_FLAG(18, NativeLowPrecision, "Use native low precision")
SHADER_FLAG(19, ShadingRate, "Shading Rate")
SHADER_FLAG(20, Raytracing_Tier_1_1, "Raytracing tier 1.1 features")
SHADER_FLAG(21, SamplerFeedback, "Sampler feedback")
SHADER_FLAG(22, AtomicInt64OnTypedResource, "64-bit Atomics on Typed Resources")
SHADER_FLAG(23, AtomicInt64OnGroupShared, "64-bit Atomics on Group Shared")
SHADER_FLAG(24, DerivativesInMeshAndAmpShaders, "Derivatives in mesh and amplification shaders")
SHADER_FLAG(25, ResourceDescriptorHeapIndexing, "Resource descriptor heap indexing")
SHADER_FLAG(26, SamplerDescriptorHeapIndexing, "Sampler descriptor heap indexing")
SHADER_FLAG(27, RESERVED, "<RESERVED>")
SHADER_FLAG(28, AtomicInt64OnHeapResource, "64-bit Atomics on Heap Resources")
SHADER_FLAG(29, AdvancedTextureOps, "Advanced Texture Ops")
SHADER_FLAG(30, WriteableMSAATextures, "Writeable MSAA Textures")

SHADER_FLAG(31, NextUnusedBit, "Next reserved shader flag bit (not a flag)")

#undef SHADER_FLAG
#endif
PKhwFZu>�N88BinaryFormat/MsgPackWriter.hnu�[���//===- MsgPackWriter.h - Simple MsgPack writer ------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
///  \file
///  This file contains a MessagePack writer.
///
///  See https://github.com/msgpack/msgpack/blob/master/spec.md for the full
///  specification.
///
///  Typical usage:
///  \code
///  raw_ostream output = GetOutputStream();
///  msgpack::Writer MPWriter(output);
///  MPWriter.writeNil();
///  MPWriter.write(false);
///  MPWriter.write("string");
///  // ...
///  \endcode
///
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_BINARYFORMAT_MSGPACKWRITER_H
#define LLVM_BINARYFORMAT_MSGPACKWRITER_H

#include "llvm/Support/EndianStream.h"
#include "llvm/Support/MemoryBufferRef.h"

namespace llvm {

class raw_ostream;

namespace msgpack {

/// Writes MessagePack objects to an output stream, one at a time.
class Writer {
public:
  /// Construct a writer, optionally enabling "Compatibility Mode" as defined
  /// in the MessagePack specification.
  ///
  /// When in \p Compatible mode, the writer will write \c Str16 formats
  /// instead of \c Str8 formats, and will refuse to write any \c Bin formats.
  ///
  /// \param OS stream to output MessagePack objects to.
  /// \param Compatible when set, write in "Compatibility Mode".
  Writer(raw_ostream &OS, bool Compatible = false);

  Writer(const Writer &) = delete;
  Writer &operator=(const Writer &) = delete;

  /// Write a \em Nil to the output stream.
  ///
  /// The output will be the \em nil format.
  void writeNil();

  /// Write a \em Boolean to the output stream.
  ///
  /// The output will be a \em bool format.
  void write(bool b);

  /// Write a signed integer to the output stream.
  ///
  /// The output will be in the smallest possible \em int format.
  ///
  /// The format chosen may be for an unsigned integer.
  void write(int64_t i);

  /// Write an unsigned integer to the output stream.
  ///
  /// The output will be in the smallest possible \em int format.
  void write(uint64_t u);

  /// Write a floating point number to the output stream.
  ///
  /// The output will be in the smallest possible \em float format.
  void write(double d);

  /// Write a string to the output stream.
  ///
  /// The output will be in the smallest possible \em str format.
  void write(StringRef s);

  /// Write a memory buffer to the output stream.
  ///
  /// The output will be in the smallest possible \em bin format.
  ///
  /// \warning Do not use this overload if in \c Compatible mode.
  void write(MemoryBufferRef Buffer);

  /// Write the header for an \em Array of the given size.
  ///
  /// The output will be in the smallest possible \em array format.
  //
  /// The header contains an identifier for the \em array format used, as well
  /// as an encoding of the size of the array.
  ///
  /// N.B. The caller must subsequently call \c Write an additional \p Size
  /// times to complete the array.
  void writeArraySize(uint32_t Size);

  /// Write the header for a \em Map of the given size.
  ///
  /// The output will be in the smallest possible \em map format.
  //
  /// The header contains an identifier for the \em map format used, as well
  /// as an encoding of the size of the map.
  ///
  /// N.B. The caller must subsequently call \c Write and additional \c Size*2
  /// times to complete the map. Each even numbered call to \c Write defines a
  /// new key, and each odd numbered call defines the previous key's value.
  void writeMapSize(uint32_t Size);

  /// Write a typed memory buffer (an extension type) to the output stream.
  ///
  /// The output will be in the smallest possible \em ext format.
  void writeExt(int8_t Type, MemoryBufferRef Buffer);

private:
  support::endian::Writer EW;
  bool Compatible;
};

} // end namespace msgpack
} // end namespace llvm

#endif // LLVM_BINARYFORMAT_MSGPACKWRITER_H
PKhwFZܙ�S��BinaryFormat/MachO.defnu�[���//,,,-- llvm/Support/MachO.def - The MachO file definitions -----*- C++ -*-,,,//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//,,,----------------------------------------------------------------------,,,//
//
// Definitions for MachO files
//
//,,,----------------------------------------------------------------------,,,//

#ifdef HANDLE_LOAD_COMMAND

HANDLE_LOAD_COMMAND(LC_SEGMENT, 0x00000001u, segment_command)
HANDLE_LOAD_COMMAND(LC_SYMTAB, 0x00000002u, symtab_command)
// LC_SYMSEG is obsolete and no longer supported.
HANDLE_LOAD_COMMAND(LC_SYMSEG, 0x00000003u, symseg_command)
HANDLE_LOAD_COMMAND(LC_THREAD, 0x00000004u, thread_command)
HANDLE_LOAD_COMMAND(LC_UNIXTHREAD, 0x00000005u, thread_command)
// LC_LOADFVMLIB is obsolete and no longer supported.
HANDLE_LOAD_COMMAND(LC_LOADFVMLIB, 0x00000006u, fvmlib_command)
// LC_IDFVMLIB is obsolete and no longer supported.
HANDLE_LOAD_COMMAND(LC_IDFVMLIB, 0x00000007u, fvmlib_command)
// LC_IDENT is obsolete and no longer supported.
HANDLE_LOAD_COMMAND(LC_IDENT, 0x00000008u, ident_command)
// LC_FVMFILE is obsolete and no longer supported.
HANDLE_LOAD_COMMAND(LC_FVMFILE, 0x00000009u, fvmfile_command)
// LC_PREPAGE is obsolete and no longer supported.
HANDLE_LOAD_COMMAND(LC_PREPAGE, 0x0000000Au, load_command)
HANDLE_LOAD_COMMAND(LC_DYSYMTAB, 0x0000000Bu, dysymtab_command)
HANDLE_LOAD_COMMAND(LC_LOAD_DYLIB, 0x0000000Cu, dylib_command)
HANDLE_LOAD_COMMAND(LC_ID_DYLIB, 0x0000000Du, dylib_command)
HANDLE_LOAD_COMMAND(LC_LOAD_DYLINKER, 0x0000000Eu, dylinker_command)
HANDLE_LOAD_COMMAND(LC_ID_DYLINKER, 0x0000000Fu, dylinker_command)
// LC_PREBOUND_DYLIB is obsolete and no longer supported.
HANDLE_LOAD_COMMAND(LC_PREBOUND_DYLIB, 0x00000010u, prebound_dylib_command)
HANDLE_LOAD_COMMAND(LC_ROUTINES, 0x00000011u, routines_command)
HANDLE_LOAD_COMMAND(LC_SUB_FRAMEWORK, 0x00000012u, sub_framework_command)
HANDLE_LOAD_COMMAND(LC_SUB_UMBRELLA, 0x00000013u, sub_umbrella_command)
HANDLE_LOAD_COMMAND(LC_SUB_CLIENT, 0x00000014u, sub_client_command)
HANDLE_LOAD_COMMAND(LC_SUB_LIBRARY, 0x00000015u, sub_library_command)
// LC_TWOLEVEL_HINTS is obsolete and no longer supported.
HANDLE_LOAD_COMMAND(LC_TWOLEVEL_HINTS, 0x00000016u, twolevel_hints_command)
// LC_PREBIND_CKSUM is obsolete and no longer supported.
HANDLE_LOAD_COMMAND(LC_PREBIND_CKSUM, 0x00000017u, prebind_cksum_command)
// LC_LOAD_WEAK_DYLIB is obsolete and no longer supported.
HANDLE_LOAD_COMMAND(LC_LOAD_WEAK_DYLIB, 0x80000018u, dylib_command)
HANDLE_LOAD_COMMAND(LC_SEGMENT_64, 0x00000019u, segment_command_64)
HANDLE_LOAD_COMMAND(LC_ROUTINES_64, 0x0000001Au, routines_command_64)
HANDLE_LOAD_COMMAND(LC_UUID, 0x0000001Bu, uuid_command)
HANDLE_LOAD_COMMAND(LC_RPATH, 0x8000001Cu, rpath_command)
HANDLE_LOAD_COMMAND(LC_CODE_SIGNATURE, 0x0000001Du, linkedit_data_command)
HANDLE_LOAD_COMMAND(LC_SEGMENT_SPLIT_INFO, 0x0000001Eu, linkedit_data_command)
HANDLE_LOAD_COMMAND(LC_REEXPORT_DYLIB, 0x8000001Fu, dylib_command)
HANDLE_LOAD_COMMAND(LC_LAZY_LOAD_DYLIB, 0x00000020u, dylib_command)
HANDLE_LOAD_COMMAND(LC_ENCRYPTION_INFO, 0x00000021u, encryption_info_command)
HANDLE_LOAD_COMMAND(LC_DYLD_INFO, 0x00000022u, dyld_info_command)
HANDLE_LOAD_COMMAND(LC_DYLD_INFO_ONLY, 0x80000022u, dyld_info_command)
HANDLE_LOAD_COMMAND(LC_LOAD_UPWARD_DYLIB, 0x80000023u, dylib_command)
HANDLE_LOAD_COMMAND(LC_VERSION_MIN_MACOSX, 0x00000024u, version_min_command)
HANDLE_LOAD_COMMAND(LC_VERSION_MIN_IPHONEOS, 0x00000025u, version_min_command)
HANDLE_LOAD_COMMAND(LC_FUNCTION_STARTS, 0x00000026u, linkedit_data_command)
HANDLE_LOAD_COMMAND(LC_DYLD_ENVIRONMENT, 0x00000027u, dylinker_command)
HANDLE_LOAD_COMMAND(LC_MAIN, 0x80000028u, entry_point_command)
HANDLE_LOAD_COMMAND(LC_DATA_IN_CODE, 0x00000029u, linkedit_data_command)
HANDLE_LOAD_COMMAND(LC_SOURCE_VERSION, 0x0000002Au, source_version_command)
HANDLE_LOAD_COMMAND(LC_DYLIB_CODE_SIGN_DRS, 0x0000002Bu, linkedit_data_command)
HANDLE_LOAD_COMMAND(LC_ENCRYPTION_INFO_64, 0x0000002Cu,
                    encryption_info_command_64)
HANDLE_LOAD_COMMAND(LC_LINKER_OPTION, 0x0000002Du, linker_option_command)
HANDLE_LOAD_COMMAND(LC_LINKER_OPTIMIZATION_HINT, 0x0000002Eu, linkedit_data_command)
HANDLE_LOAD_COMMAND(LC_VERSION_MIN_TVOS, 0x0000002Fu, version_min_command)
HANDLE_LOAD_COMMAND(LC_VERSION_MIN_WATCHOS, 0x00000030u, version_min_command)
HANDLE_LOAD_COMMAND(LC_NOTE, 0x00000031u, note_command)
HANDLE_LOAD_COMMAND(LC_BUILD_VERSION, 0x00000032u, build_version_command)
HANDLE_LOAD_COMMAND(LC_DYLD_EXPORTS_TRIE, 0x80000033u, linkedit_data_command)
HANDLE_LOAD_COMMAND(LC_DYLD_CHAINED_FIXUPS, 0x80000034u, linkedit_data_command)
HANDLE_LOAD_COMMAND(LC_FILESET_ENTRY, 0x80000035u, fileset_entry_command)
HANDLE_LOAD_COMMAND(LC_ATOM_INFO, 0x00000036u, linkedit_data_command)

#endif

#ifdef LOAD_COMMAND_STRUCT

LOAD_COMMAND_STRUCT(dyld_info_command)
LOAD_COMMAND_STRUCT(dylib_command)
LOAD_COMMAND_STRUCT(dylinker_command)
LOAD_COMMAND_STRUCT(dysymtab_command)
LOAD_COMMAND_STRUCT(encryption_info_command)
LOAD_COMMAND_STRUCT(encryption_info_command_64)
LOAD_COMMAND_STRUCT(entry_point_command)
LOAD_COMMAND_STRUCT(fvmfile_command)
LOAD_COMMAND_STRUCT(fvmlib_command)
LOAD_COMMAND_STRUCT(ident_command)
LOAD_COMMAND_STRUCT(linkedit_data_command)
LOAD_COMMAND_STRUCT(linker_option_command)
LOAD_COMMAND_STRUCT(load_command)
LOAD_COMMAND_STRUCT(prebind_cksum_command)
LOAD_COMMAND_STRUCT(prebound_dylib_command)
LOAD_COMMAND_STRUCT(routines_command)
LOAD_COMMAND_STRUCT(routines_command_64)
LOAD_COMMAND_STRUCT(rpath_command)
LOAD_COMMAND_STRUCT(segment_command)
LOAD_COMMAND_STRUCT(segment_command_64)
LOAD_COMMAND_STRUCT(source_version_command)
LOAD_COMMAND_STRUCT(sub_client_command)
LOAD_COMMAND_STRUCT(sub_framework_command)
LOAD_COMMAND_STRUCT(sub_library_command)
LOAD_COMMAND_STRUCT(sub_umbrella_command)
LOAD_COMMAND_STRUCT(symseg_command)
LOAD_COMMAND_STRUCT(symtab_command)
LOAD_COMMAND_STRUCT(thread_command)
LOAD_COMMAND_STRUCT(twolevel_hints_command)
LOAD_COMMAND_STRUCT(uuid_command)
LOAD_COMMAND_STRUCT(version_min_command)
LOAD_COMMAND_STRUCT(note_command)
LOAD_COMMAND_STRUCT(build_version_command)
LOAD_COMMAND_STRUCT(fileset_entry_command)

#endif

#undef HANDLE_LOAD_COMMAND
#undef LOAD_COMMAND_STRUCT
PKhwFZ�d'/�
�
%BinaryFormat/AMDGPUMetadataVerifier.hnu�[���//===- AMDGPUMetadataVerifier.h - MsgPack Types -----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file
/// This is a verifier for AMDGPU HSA metadata, which can verify both
/// well-typed metadata and untyped metadata. When verifying in the non-strict
/// mode, untyped metadata is coerced into the correct type if possible.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_BINARYFORMAT_AMDGPUMETADATAVERIFIER_H
#define LLVM_BINARYFORMAT_AMDGPUMETADATAVERIFIER_H

#include "llvm/ADT/STLFunctionalExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/BinaryFormat/MsgPackReader.h"

#include <cstddef>
#include <optional>

namespace llvm {

namespace msgpack {
  class DocNode;
  class MapDocNode;
}

namespace AMDGPU {
namespace HSAMD {
namespace V3 {

/// Verifier for AMDGPU HSA metadata.
///
/// Operates in two modes:
///
/// In strict mode, metadata must already be well-typed.
///
/// In non-strict mode, metadata is coerced into expected types when possible.
class MetadataVerifier {
  bool Strict;

  bool verifyScalar(msgpack::DocNode &Node, msgpack::Type SKind,
                    function_ref<bool(msgpack::DocNode &)> verifyValue = {});
  bool verifyInteger(msgpack::DocNode &Node);
  bool verifyArray(msgpack::DocNode &Node,
                   function_ref<bool(msgpack::DocNode &)> verifyNode,
                   std::optional<size_t> Size = std::nullopt);
  bool verifyEntry(msgpack::MapDocNode &MapNode, StringRef Key, bool Required,
                   function_ref<bool(msgpack::DocNode &)> verifyNode);
  bool
  verifyScalarEntry(msgpack::MapDocNode &MapNode, StringRef Key, bool Required,
                    msgpack::Type SKind,
                    function_ref<bool(msgpack::DocNode &)> verifyValue = {});
  bool verifyIntegerEntry(msgpack::MapDocNode &MapNode, StringRef Key,
                          bool Required);
  bool verifyKernelArgs(msgpack::DocNode &Node);
  bool verifyKernel(msgpack::DocNode &Node);

public:
  /// Construct a MetadataVerifier, specifying whether it will operate in \p
  /// Strict mode.
  MetadataVerifier(bool Strict) : Strict(Strict) {}

  /// Verify given HSA metadata.
  ///
  /// \returns True when successful, false when metadata is invalid.
  bool verify(msgpack::DocNode &HSAMetadataRoot);
};

} // end namespace V3
} // end namespace HSAMD
} // end namespace AMDGPU
} // end namespace llvm

#endif // LLVM_BINARYFORMAT_AMDGPUMETADATAVERIFIER_H
PKhwFZ7���66BinaryFormat/Swift.defnu�[���//===- llvm/BinaryFormat/Swift.def - Swift definitions ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Macros for running through Swift enumerators.
//
//===----------------------------------------------------------------------===//

#if !(defined HANDLE_SWIFT_SECTION)
#error "Missing macro definition of HANDLE_SWIFT_SECTION"
#endif

#ifndef HANDLE_SWIFT_SECTION
#define HANDLE_SWIFT_SECTION(KIND, MACHO, ELF, COFF)
#endif

HANDLE_SWIFT_SECTION(fieldmd, "__swift5_fieldmd", "swift5_fieldmd", ".sw5flmd")
HANDLE_SWIFT_SECTION(assocty, "__swift5_assocty", "swift5_assocty", ".sw5asty")
HANDLE_SWIFT_SECTION(builtin, "__swift5_builtin", "swift5_builtin", ".sw5bltn")
HANDLE_SWIFT_SECTION(capture, "__swift5_capture", "swift5_capture", ".sw5cptr")
HANDLE_SWIFT_SECTION(typeref, "__swift5_typeref", "swift5_typeref", ".sw5tyrf")
HANDLE_SWIFT_SECTION(reflstr, "__swift5_reflstr", "swift5_reflstr", ".sw5rfst")
HANDLE_SWIFT_SECTION(conform, "__swift5_proto", "swift5_protocol_conformances",
                     ".sw5prtc$B")
HANDLE_SWIFT_SECTION(protocs, "__swift5_protos", "swift5_protocols",
                     ".sw5prt$B")
HANDLE_SWIFT_SECTION(acfuncs, "__swift5_acfuncs", "swift5_accessible_functions",
                     ".sw5acfn$B")
HANDLE_SWIFT_SECTION(mpenum, "__swift5_mpenum", "swift5_mpenum", ".sw5mpen$B")
PKhwFZ;��?�
�
BinaryFormat/Magic.hnu�[���//===- llvm/BinaryFormat/Magic.h - File magic identification ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_BINARYFORMAT_MAGIC_H
#define LLVM_BINARYFORMAT_MAGIC_H

#include <system_error>

namespace llvm {
class StringRef;
class Twine;

/// file_magic - An "enum class" enumeration of file types based on magic (the
/// first N bytes of the file).
struct file_magic {
  enum Impl {
    unknown = 0,       ///< Unrecognized file
    bitcode,           ///< Bitcode file
    archive,           ///< ar style archive file
    elf,               ///< ELF Unknown type
    elf_relocatable,   ///< ELF Relocatable object file
    elf_executable,    ///< ELF Executable image
    elf_shared_object, ///< ELF dynamically linked shared lib
    elf_core,          ///< ELF core image
    goff_object,       ///< GOFF object file
    macho_object,      ///< Mach-O Object file
    macho_executable,  ///< Mach-O Executable
    macho_fixed_virtual_memory_shared_lib,    ///< Mach-O Shared Lib, FVM
    macho_core,                               ///< Mach-O Core File
    macho_preload_executable,                 ///< Mach-O Preloaded Executable
    macho_dynamically_linked_shared_lib,      ///< Mach-O dynlinked shared lib
    macho_dynamic_linker,                     ///< The Mach-O dynamic linker
    macho_bundle,                             ///< Mach-O Bundle file
    macho_dynamically_linked_shared_lib_stub, ///< Mach-O Shared lib stub
    macho_dsym_companion,                     ///< Mach-O dSYM companion file
    macho_kext_bundle,                        ///< Mach-O kext bundle file
    macho_universal_binary,                   ///< Mach-O universal binary
    macho_file_set,                           ///< Mach-O file set binary
    minidump,                                 ///< Windows minidump file
    coff_cl_gl_object,   ///< Microsoft cl.exe's intermediate code file
    coff_object,         ///< COFF object file
    coff_import_library, ///< COFF import library
    pecoff_executable,   ///< PECOFF executable file
    windows_resource,    ///< Windows compiled resource file (.res)
    xcoff_object_32,     ///< 32-bit XCOFF object file
    xcoff_object_64,     ///< 64-bit XCOFF object file
    wasm_object,         ///< WebAssembly Object file
    pdb,                 ///< Windows PDB debug info file
    tapi_file,           ///< Text-based Dynamic Library Stub file
    cuda_fatbinary,      ///< CUDA Fatbinary object file
    offload_binary,      ///< LLVM offload object file
    dxcontainer_object,  ///< DirectX container file
  };

  bool is_object() const { return V != unknown; }

  file_magic() = default;
  file_magic(Impl V) : V(V) {}
  operator Impl() const { return V; }

private:
  Impl V = unknown;
};

/// Identify the type of a binary file based on how magical it is.
file_magic identify_magic(StringRef magic);

/// Get and identify \a path's type based on its content.
///
/// @param path Input path.
/// @param result Set to the type of file, or file_magic::unknown.
/// @returns errc::success if result has been successfully set, otherwise a
///          platform-specific error_code.
std::error_code identify_magic(const Twine &path, file_magic &result);
} // namespace llvm

#endif
PKhwFZ��{+iiBinaryFormat/COFF.hnu�[���//===-- llvm/BinaryFormat/COFF.h --------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains an definitions used in Windows COFF Files.
//
// Structures and enums defined within this file where created using
// information from Microsoft's publicly available PE/COFF format document:
//
// Microsoft Portable Executable and Common Object File Format Specification
// Revision 8.1 - February 15, 2008
//
// As of 5/2/2010, hosted by Microsoft at:
// http://www.microsoft.com/whdc/system/platform/firmware/pecoff.mspx
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_BINARYFORMAT_COFF_H
#define LLVM_BINARYFORMAT_COFF_H

#include "llvm/Support/DataTypes.h"
#include <cassert>

namespace llvm {
namespace COFF {

// The maximum number of sections that a COFF object can have (inclusive).
const int32_t MaxNumberOfSections16 = 65279;

// The PE signature bytes that follows the DOS stub header.
static const char PEMagic[] = {'P', 'E', '\0', '\0'};

static const char BigObjMagic[] = {
    '\xc7', '\xa1', '\xba', '\xd1', '\xee', '\xba', '\xa9', '\x4b',
    '\xaf', '\x20', '\xfa', '\xf6', '\x6a', '\xa4', '\xdc', '\xb8',
};

static const char ClGlObjMagic[] = {
    '\x38', '\xfe', '\xb3', '\x0c', '\xa5', '\xd9', '\xab', '\x4d',
    '\xac', '\x9b', '\xd6', '\xb6', '\x22', '\x26', '\x53', '\xc2',
};

// The signature bytes that start a .res file.
static const char WinResMagic[] = {
    '\x00', '\x00', '\x00', '\x00', '\x20', '\x00', '\x00', '\x00',
    '\xff', '\xff', '\x00', '\x00', '\xff', '\xff', '\x00', '\x00',
};

// Sizes in bytes of various things in the COFF format.
enum {
  Header16Size = 20,
  Header32Size = 56,
  NameSize = 8,
  Symbol16Size = 18,
  Symbol32Size = 20,
  SectionSize = 40,
  RelocationSize = 10
};

struct header {
  uint16_t Machine;
  int32_t NumberOfSections;
  uint32_t TimeDateStamp;
  uint32_t PointerToSymbolTable;
  uint32_t NumberOfSymbols;
  uint16_t SizeOfOptionalHeader;
  uint16_t Characteristics;
};

struct BigObjHeader {
  enum : uint16_t { MinBigObjectVersion = 2 };

  uint16_t Sig1; ///< Must be IMAGE_FILE_MACHINE_UNKNOWN (0).
  uint16_t Sig2; ///< Must be 0xFFFF.
  uint16_t Version;
  uint16_t Machine;
  uint32_t TimeDateStamp;
  uint8_t UUID[16];
  uint32_t unused1;
  uint32_t unused2;
  uint32_t unused3;
  uint32_t unused4;
  uint32_t NumberOfSections;
  uint32_t PointerToSymbolTable;
  uint32_t NumberOfSymbols;
};

enum MachineTypes : unsigned {
  MT_Invalid = 0xffff,

  IMAGE_FILE_MACHINE_UNKNOWN = 0x0,
  IMAGE_FILE_MACHINE_AM33 = 0x1D3,
  IMAGE_FILE_MACHINE_AMD64 = 0x8664,
  IMAGE_FILE_MACHINE_ARM = 0x1C0,
  IMAGE_FILE_MACHINE_ARMNT = 0x1C4,
  IMAGE_FILE_MACHINE_ARM64 = 0xAA64,
  IMAGE_FILE_MACHINE_ARM64EC = 0xA641,
  IMAGE_FILE_MACHINE_ARM64X = 0xA64E,
  IMAGE_FILE_MACHINE_EBC = 0xEBC,
  IMAGE_FILE_MACHINE_I386 = 0x14C,
  IMAGE_FILE_MACHINE_IA64 = 0x200,
  IMAGE_FILE_MACHINE_M32R = 0x9041,
  IMAGE_FILE_MACHINE_MIPS16 = 0x266,
  IMAGE_FILE_MACHINE_MIPSFPU = 0x366,
  IMAGE_FILE_MACHINE_MIPSFPU16 = 0x466,
  IMAGE_FILE_MACHINE_POWERPC = 0x1F0,
  IMAGE_FILE_MACHINE_POWERPCFP = 0x1F1,
  IMAGE_FILE_MACHINE_R4000 = 0x166,
  IMAGE_FILE_MACHINE_RISCV32 = 0x5032,
  IMAGE_FILE_MACHINE_RISCV64 = 0x5064,
  IMAGE_FILE_MACHINE_RISCV128 = 0x5128,
  IMAGE_FILE_MACHINE_SH3 = 0x1A2,
  IMAGE_FILE_MACHINE_SH3DSP = 0x1A3,
  IMAGE_FILE_MACHINE_SH4 = 0x1A6,
  IMAGE_FILE_MACHINE_SH5 = 0x1A8,
  IMAGE_FILE_MACHINE_THUMB = 0x1C2,
  IMAGE_FILE_MACHINE_WCEMIPSV2 = 0x169
};

template <typename T> bool isArm64EC(T Machine) {
  return Machine == IMAGE_FILE_MACHINE_ARM64EC ||
         Machine == IMAGE_FILE_MACHINE_ARM64X;
}

template <typename T> bool isAnyArm64(T Machine) {
  return Machine == IMAGE_FILE_MACHINE_ARM64 || isArm64EC(Machine);
}

template <typename T> bool is64Bit(T Machine) {
  return Machine == IMAGE_FILE_MACHINE_AMD64 || isAnyArm64(Machine);
}

enum Characteristics : unsigned {
  C_Invalid = 0,

  /// The file does not contain base relocations and must be loaded at its
  /// preferred base. If this cannot be done, the loader will error.
  IMAGE_FILE_RELOCS_STRIPPED = 0x0001,
  /// The file is valid and can be run.
  IMAGE_FILE_EXECUTABLE_IMAGE = 0x0002,
  /// COFF line numbers have been stripped. This is deprecated and should be
  /// 0.
  IMAGE_FILE_LINE_NUMS_STRIPPED = 0x0004,
  /// COFF symbol table entries for local symbols have been removed. This is
  /// deprecated and should be 0.
  IMAGE_FILE_LOCAL_SYMS_STRIPPED = 0x0008,
  /// Aggressively trim working set. This is deprecated and must be 0.
  IMAGE_FILE_AGGRESSIVE_WS_TRIM = 0x0010,
  /// Image can handle > 2GiB addresses.
  IMAGE_FILE_LARGE_ADDRESS_AWARE = 0x0020,
  /// Little endian: the LSB precedes the MSB in memory. This is deprecated
  /// and should be 0.
  IMAGE_FILE_BYTES_REVERSED_LO = 0x0080,
  /// Machine is based on a 32bit word architecture.
  IMAGE_FILE_32BIT_MACHINE = 0x0100,
  /// Debugging info has been removed.
  IMAGE_FILE_DEBUG_STRIPPED = 0x0200,
  /// If the image is on removable media, fully load it and copy it to swap.
  IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP = 0x0400,
  /// If the image is on network media, fully load it and copy it to swap.
  IMAGE_FILE_NET_RUN_FROM_SWAP = 0x0800,
  /// The image file is a system file, not a user program.
  IMAGE_FILE_SYSTEM = 0x1000,
  /// The image file is a DLL.
  IMAGE_FILE_DLL = 0x2000,
  /// This file should only be run on a uniprocessor machine.
  IMAGE_FILE_UP_SYSTEM_ONLY = 0x4000,
  /// Big endian: the MSB precedes the LSB in memory. This is deprecated
  /// and should be 0.
  IMAGE_FILE_BYTES_REVERSED_HI = 0x8000
};

enum ResourceTypeID : unsigned {
  RID_Cursor = 1,
  RID_Bitmap = 2,
  RID_Icon = 3,
  RID_Menu = 4,
  RID_Dialog = 5,
  RID_String = 6,
  RID_FontDir = 7,
  RID_Font = 8,
  RID_Accelerator = 9,
  RID_RCData = 10,
  RID_MessageTable = 11,
  RID_Group_Cursor = 12,
  RID_Group_Icon = 14,
  RID_Version = 16,
  RID_DLGInclude = 17,
  RID_PlugPlay = 19,
  RID_VXD = 20,
  RID_AniCursor = 21,
  RID_AniIcon = 22,
  RID_HTML = 23,
  RID_Manifest = 24,
};

struct symbol {
  char Name[NameSize];
  uint32_t Value;
  int32_t SectionNumber;
  uint16_t Type;
  uint8_t StorageClass;
  uint8_t NumberOfAuxSymbols;
};

enum SymbolSectionNumber : int32_t {
  IMAGE_SYM_DEBUG = -2,
  IMAGE_SYM_ABSOLUTE = -1,
  IMAGE_SYM_UNDEFINED = 0
};

/// Storage class tells where and what the symbol represents
enum SymbolStorageClass {
  SSC_Invalid = 0xff,

  IMAGE_SYM_CLASS_END_OF_FUNCTION = -1,  ///< Physical end of function
  IMAGE_SYM_CLASS_NULL = 0,              ///< No symbol
  IMAGE_SYM_CLASS_AUTOMATIC = 1,         ///< Stack variable
  IMAGE_SYM_CLASS_EXTERNAL = 2,          ///< External symbol
  IMAGE_SYM_CLASS_STATIC = 3,            ///< Static
  IMAGE_SYM_CLASS_REGISTER = 4,          ///< Register variable
  IMAGE_SYM_CLASS_EXTERNAL_DEF = 5,      ///< External definition
  IMAGE_SYM_CLASS_LABEL = 6,             ///< Label
  IMAGE_SYM_CLASS_UNDEFINED_LABEL = 7,   ///< Undefined label
  IMAGE_SYM_CLASS_MEMBER_OF_STRUCT = 8,  ///< Member of structure
  IMAGE_SYM_CLASS_ARGUMENT = 9,          ///< Function argument
  IMAGE_SYM_CLASS_STRUCT_TAG = 10,       ///< Structure tag
  IMAGE_SYM_CLASS_MEMBER_OF_UNION = 11,  ///< Member of union
  IMAGE_SYM_CLASS_UNION_TAG = 12,        ///< Union tag
  IMAGE_SYM_CLASS_TYPE_DEFINITION = 13,  ///< Type definition
  IMAGE_SYM_CLASS_UNDEFINED_STATIC = 14, ///< Undefined static
  IMAGE_SYM_CLASS_ENUM_TAG = 15,         ///< Enumeration tag
  IMAGE_SYM_CLASS_MEMBER_OF_ENUM = 16,   ///< Member of enumeration
  IMAGE_SYM_CLASS_REGISTER_PARAM = 17,   ///< Register parameter
  IMAGE_SYM_CLASS_BIT_FIELD = 18,        ///< Bit field
  /// ".bb" or ".eb" - beginning or end of block
  IMAGE_SYM_CLASS_BLOCK = 100,
  /// ".bf" or ".ef" - beginning or end of function
  IMAGE_SYM_CLASS_FUNCTION = 101,
  IMAGE_SYM_CLASS_END_OF_STRUCT = 102, ///< End of structure
  IMAGE_SYM_CLASS_FILE = 103,          ///< File name
  /// Line number, reformatted as symbol
  IMAGE_SYM_CLASS_SECTION = 104,
  IMAGE_SYM_CLASS_WEAK_EXTERNAL = 105, ///< Duplicate tag
  /// External symbol in dmert public lib
  IMAGE_SYM_CLASS_CLR_TOKEN = 107
};

enum SymbolBaseType : unsigned {
  IMAGE_SYM_TYPE_NULL = 0,   ///< No type information or unknown base type.
  IMAGE_SYM_TYPE_VOID = 1,   ///< Used with void pointers and functions.
  IMAGE_SYM_TYPE_CHAR = 2,   ///< A character (signed byte).
  IMAGE_SYM_TYPE_SHORT = 3,  ///< A 2-byte signed integer.
  IMAGE_SYM_TYPE_INT = 4,    ///< A natural integer type on the target.
  IMAGE_SYM_TYPE_LONG = 5,   ///< A 4-byte signed integer.
  IMAGE_SYM_TYPE_FLOAT = 6,  ///< A 4-byte floating-point number.
  IMAGE_SYM_TYPE_DOUBLE = 7, ///< An 8-byte floating-point number.
  IMAGE_SYM_TYPE_STRUCT = 8, ///< A structure.
  IMAGE_SYM_TYPE_UNION = 9,  ///< An union.
  IMAGE_SYM_TYPE_ENUM = 10,  ///< An enumerated type.
  IMAGE_SYM_TYPE_MOE = 11,   ///< A member of enumeration (a specific value).
  IMAGE_SYM_TYPE_BYTE = 12,  ///< A byte; unsigned 1-byte integer.
  IMAGE_SYM_TYPE_WORD = 13,  ///< A word; unsigned 2-byte integer.
  IMAGE_SYM_TYPE_UINT = 14,  ///< An unsigned integer of natural size.
  IMAGE_SYM_TYPE_DWORD = 15  ///< An unsigned 4-byte integer.
};

enum SymbolComplexType : unsigned {
  IMAGE_SYM_DTYPE_NULL = 0,     ///< No complex type; simple scalar variable.
  IMAGE_SYM_DTYPE_POINTER = 1,  ///< A pointer to base type.
  IMAGE_SYM_DTYPE_FUNCTION = 2, ///< A function that returns a base type.
  IMAGE_SYM_DTYPE_ARRAY = 3,    ///< An array of base type.

  /// Type is formed as (base + (derived << SCT_COMPLEX_TYPE_SHIFT))
  SCT_COMPLEX_TYPE_SHIFT = 4
};

enum AuxSymbolType { IMAGE_AUX_SYMBOL_TYPE_TOKEN_DEF = 1 };

struct section {
  char Name[NameSize];
  uint32_t VirtualSize;
  uint32_t VirtualAddress;
  uint32_t SizeOfRawData;
  uint32_t PointerToRawData;
  uint32_t PointerToRelocations;
  uint32_t PointerToLineNumbers;
  uint16_t NumberOfRelocations;
  uint16_t NumberOfLineNumbers;
  uint32_t Characteristics;
};

enum SectionCharacteristics : uint32_t {
  SC_Invalid = 0xffffffff,

  IMAGE_SCN_TYPE_NOLOAD = 0x00000002,
  IMAGE_SCN_TYPE_NO_PAD = 0x00000008,
  IMAGE_SCN_CNT_CODE = 0x00000020,
  IMAGE_SCN_CNT_INITIALIZED_DATA = 0x00000040,
  IMAGE_SCN_CNT_UNINITIALIZED_DATA = 0x00000080,
  IMAGE_SCN_LNK_OTHER = 0x00000100,
  IMAGE_SCN_LNK_INFO = 0x00000200,
  IMAGE_SCN_LNK_REMOVE = 0x00000800,
  IMAGE_SCN_LNK_COMDAT = 0x00001000,
  IMAGE_SCN_GPREL = 0x00008000,
  IMAGE_SCN_MEM_PURGEABLE = 0x00020000,
  IMAGE_SCN_MEM_16BIT = 0x00020000,
  IMAGE_SCN_MEM_LOCKED = 0x00040000,
  IMAGE_SCN_MEM_PRELOAD = 0x00080000,
  IMAGE_SCN_ALIGN_1BYTES = 0x00100000,
  IMAGE_SCN_ALIGN_2BYTES = 0x00200000,
  IMAGE_SCN_ALIGN_4BYTES = 0x00300000,
  IMAGE_SCN_ALIGN_8BYTES = 0x00400000,
  IMAGE_SCN_ALIGN_16BYTES = 0x00500000,
  IMAGE_SCN_ALIGN_32BYTES = 0x00600000,
  IMAGE_SCN_ALIGN_64BYTES = 0x00700000,
  IMAGE_SCN_ALIGN_128BYTES = 0x00800000,
  IMAGE_SCN_ALIGN_256BYTES = 0x00900000,
  IMAGE_SCN_ALIGN_512BYTES = 0x00A00000,
  IMAGE_SCN_ALIGN_1024BYTES = 0x00B00000,
  IMAGE_SCN_ALIGN_2048BYTES = 0x00C00000,
  IMAGE_SCN_ALIGN_4096BYTES = 0x00D00000,
  IMAGE_SCN_ALIGN_8192BYTES = 0x00E00000,
  IMAGE_SCN_ALIGN_MASK = 0x00F00000,
  IMAGE_SCN_LNK_NRELOC_OVFL = 0x01000000,
  IMAGE_SCN_MEM_DISCARDABLE = 0x02000000,
  IMAGE_SCN_MEM_NOT_CACHED = 0x04000000,
  IMAGE_SCN_MEM_NOT_PAGED = 0x08000000,
  IMAGE_SCN_MEM_SHARED = 0x10000000,
  IMAGE_SCN_MEM_EXECUTE = 0x20000000,
  IMAGE_SCN_MEM_READ = 0x40000000,
  IMAGE_SCN_MEM_WRITE = 0x80000000
};

struct relocation {
  uint32_t VirtualAddress;
  uint32_t SymbolTableIndex;
  uint16_t Type;
};

enum RelocationTypeI386 : unsigned {
  IMAGE_REL_I386_ABSOLUTE = 0x0000,
  IMAGE_REL_I386_DIR16 = 0x0001,
  IMAGE_REL_I386_REL16 = 0x0002,
  IMAGE_REL_I386_DIR32 = 0x0006,
  IMAGE_REL_I386_DIR32NB = 0x0007,
  IMAGE_REL_I386_SEG12 = 0x0009,
  IMAGE_REL_I386_SECTION = 0x000A,
  IMAGE_REL_I386_SECREL = 0x000B,
  IMAGE_REL_I386_TOKEN = 0x000C,
  IMAGE_REL_I386_SECREL7 = 0x000D,
  IMAGE_REL_I386_REL32 = 0x0014
};

enum RelocationTypeAMD64 : unsigned {
  IMAGE_REL_AMD64_ABSOLUTE = 0x0000,
  IMAGE_REL_AMD64_ADDR64 = 0x0001,
  IMAGE_REL_AMD64_ADDR32 = 0x0002,
  IMAGE_REL_AMD64_ADDR32NB = 0x0003,
  IMAGE_REL_AMD64_REL32 = 0x0004,
  IMAGE_REL_AMD64_REL32_1 = 0x0005,
  IMAGE_REL_AMD64_REL32_2 = 0x0006,
  IMAGE_REL_AMD64_REL32_3 = 0x0007,
  IMAGE_REL_AMD64_REL32_4 = 0x0008,
  IMAGE_REL_AMD64_REL32_5 = 0x0009,
  IMAGE_REL_AMD64_SECTION = 0x000A,
  IMAGE_REL_AMD64_SECREL = 0x000B,
  IMAGE_REL_AMD64_SECREL7 = 0x000C,
  IMAGE_REL_AMD64_TOKEN = 0x000D,
  IMAGE_REL_AMD64_SREL32 = 0x000E,
  IMAGE_REL_AMD64_PAIR = 0x000F,
  IMAGE_REL_AMD64_SSPAN32 = 0x0010
};

enum RelocationTypesARM : unsigned {
  IMAGE_REL_ARM_ABSOLUTE = 0x0000,
  IMAGE_REL_ARM_ADDR32 = 0x0001,
  IMAGE_REL_ARM_ADDR32NB = 0x0002,
  IMAGE_REL_ARM_BRANCH24 = 0x0003,
  IMAGE_REL_ARM_BRANCH11 = 0x0004,
  IMAGE_REL_ARM_TOKEN = 0x0005,
  IMAGE_REL_ARM_BLX24 = 0x0008,
  IMAGE_REL_ARM_BLX11 = 0x0009,
  IMAGE_REL_ARM_REL32 = 0x000A,
  IMAGE_REL_ARM_SECTION = 0x000E,
  IMAGE_REL_ARM_SECREL = 0x000F,
  IMAGE_REL_ARM_MOV32A = 0x0010,
  IMAGE_REL_ARM_MOV32T = 0x0011,
  IMAGE_REL_ARM_BRANCH20T = 0x0012,
  IMAGE_REL_ARM_BRANCH24T = 0x0014,
  IMAGE_REL_ARM_BLX23T = 0x0015,
  IMAGE_REL_ARM_PAIR = 0x0016,
};

enum RelocationTypesARM64 : unsigned {
  IMAGE_REL_ARM64_ABSOLUTE = 0x0000,
  IMAGE_REL_ARM64_ADDR32 = 0x0001,
  IMAGE_REL_ARM64_ADDR32NB = 0x0002,
  IMAGE_REL_ARM64_BRANCH26 = 0x0003,
  IMAGE_REL_ARM64_PAGEBASE_REL21 = 0x0004,
  IMAGE_REL_ARM64_REL21 = 0x0005,
  IMAGE_REL_ARM64_PAGEOFFSET_12A = 0x0006,
  IMAGE_REL_ARM64_PAGEOFFSET_12L = 0x0007,
  IMAGE_REL_ARM64_SECREL = 0x0008,
  IMAGE_REL_ARM64_SECREL_LOW12A = 0x0009,
  IMAGE_REL_ARM64_SECREL_HIGH12A = 0x000A,
  IMAGE_REL_ARM64_SECREL_LOW12L = 0x000B,
  IMAGE_REL_ARM64_TOKEN = 0x000C,
  IMAGE_REL_ARM64_SECTION = 0x000D,
  IMAGE_REL_ARM64_ADDR64 = 0x000E,
  IMAGE_REL_ARM64_BRANCH19 = 0x000F,
  IMAGE_REL_ARM64_BRANCH14 = 0x0010,
  IMAGE_REL_ARM64_REL32 = 0x0011,
};

enum COMDATType : uint8_t {
  IMAGE_COMDAT_SELECT_NODUPLICATES = 1,
  IMAGE_COMDAT_SELECT_ANY,
  IMAGE_COMDAT_SELECT_SAME_SIZE,
  IMAGE_COMDAT_SELECT_EXACT_MATCH,
  IMAGE_COMDAT_SELECT_ASSOCIATIVE,
  IMAGE_COMDAT_SELECT_LARGEST,
  IMAGE_COMDAT_SELECT_NEWEST
};

// Auxiliary Symbol Formats
struct AuxiliaryFunctionDefinition {
  uint32_t TagIndex;
  uint32_t TotalSize;
  uint32_t PointerToLinenumber;
  uint32_t PointerToNextFunction;
  char unused[2];
};

struct AuxiliarybfAndefSymbol {
  uint8_t unused1[4];
  uint16_t Linenumber;
  uint8_t unused2[6];
  uint32_t PointerToNextFunction;
  uint8_t unused3[2];
};

struct AuxiliaryWeakExternal {
  uint32_t TagIndex;
  uint32_t Characteristics;
  uint8_t unused[10];
};

enum WeakExternalCharacteristics : unsigned {
  IMAGE_WEAK_EXTERN_SEARCH_NOLIBRARY = 1,
  IMAGE_WEAK_EXTERN_SEARCH_LIBRARY = 2,
  IMAGE_WEAK_EXTERN_SEARCH_ALIAS = 3,
  IMAGE_WEAK_EXTERN_ANTI_DEPENDENCY = 4
};

struct AuxiliarySectionDefinition {
  uint32_t Length;
  uint16_t NumberOfRelocations;
  uint16_t NumberOfLinenumbers;
  uint32_t CheckSum;
  uint32_t Number;
  uint8_t Selection;
  char unused;
};

struct AuxiliaryCLRToken {
  uint8_t AuxType;
  uint8_t unused1;
  uint32_t SymbolTableIndex;
  char unused2[12];
};

union Auxiliary {
  AuxiliaryFunctionDefinition FunctionDefinition;
  AuxiliarybfAndefSymbol bfAndefSymbol;
  AuxiliaryWeakExternal WeakExternal;
  AuxiliarySectionDefinition SectionDefinition;
};

/// The Import Directory Table.
///
/// There is a single array of these and one entry per imported DLL.
struct ImportDirectoryTableEntry {
  uint32_t ImportLookupTableRVA;
  uint32_t TimeDateStamp;
  uint32_t ForwarderChain;
  uint32_t NameRVA;
  uint32_t ImportAddressTableRVA;
};

/// The PE32 Import Lookup Table.
///
/// There is an array of these for each imported DLL. It represents either
/// the ordinal to import from the target DLL, or a name to lookup and import
/// from the target DLL.
///
/// This also happens to be the same format used by the Import Address Table
/// when it is initially written out to the image.
struct ImportLookupTableEntry32 {
  uint32_t data;

  /// Is this entry specified by ordinal, or name?
  bool isOrdinal() const { return data & 0x80000000; }

  /// Get the ordinal value of this entry. isOrdinal must be true.
  uint16_t getOrdinal() const {
    assert(isOrdinal() && "ILT entry is not an ordinal!");
    return data & 0xFFFF;
  }

  /// Set the ordinal value and set isOrdinal to true.
  void setOrdinal(uint16_t o) {
    data = o;
    data |= 0x80000000;
  }

  /// Get the Hint/Name entry RVA. isOrdinal must be false.
  uint32_t getHintNameRVA() const {
    assert(!isOrdinal() && "ILT entry is not a Hint/Name RVA!");
    return data;
  }

  /// Set the Hint/Name entry RVA and set isOrdinal to false.
  void setHintNameRVA(uint32_t rva) { data = rva; }
};

/// The DOS compatible header at the front of all PEs.
struct DOSHeader {
  uint16_t Magic;
  uint16_t UsedBytesInTheLastPage;
  uint16_t FileSizeInPages;
  uint16_t NumberOfRelocationItems;
  uint16_t HeaderSizeInParagraphs;
  uint16_t MinimumExtraParagraphs;
  uint16_t MaximumExtraParagraphs;
  uint16_t InitialRelativeSS;
  uint16_t InitialSP;
  uint16_t Checksum;
  uint16_t InitialIP;
  uint16_t InitialRelativeCS;
  uint16_t AddressOfRelocationTable;
  uint16_t OverlayNumber;
  uint16_t Reserved[4];
  uint16_t OEMid;
  uint16_t OEMinfo;
  uint16_t Reserved2[10];
  uint32_t AddressOfNewExeHeader;
};

struct PE32Header {
  enum { PE32 = 0x10b, PE32_PLUS = 0x20b };

  uint16_t Magic;
  uint8_t MajorLinkerVersion;
  uint8_t MinorLinkerVersion;
  uint32_t SizeOfCode;
  uint32_t SizeOfInitializedData;
  uint32_t SizeOfUninitializedData;
  uint32_t AddressOfEntryPoint; // RVA
  uint32_t BaseOfCode;          // RVA
  uint32_t BaseOfData;          // RVA
  uint64_t ImageBase;
  uint32_t SectionAlignment;
  uint32_t FileAlignment;
  uint16_t MajorOperatingSystemVersion;
  uint16_t MinorOperatingSystemVersion;
  uint16_t MajorImageVersion;
  uint16_t MinorImageVersion;
  uint16_t MajorSubsystemVersion;
  uint16_t MinorSubsystemVersion;
  uint32_t Win32VersionValue;
  uint32_t SizeOfImage;
  uint32_t SizeOfHeaders;
  uint32_t CheckSum;
  uint16_t Subsystem;
  // FIXME: This should be DllCharacteristics to match the COFF spec.
  uint16_t DLLCharacteristics;
  uint64_t SizeOfStackReserve;
  uint64_t SizeOfStackCommit;
  uint64_t SizeOfHeapReserve;
  uint64_t SizeOfHeapCommit;
  uint32_t LoaderFlags;
  // FIXME: This should be NumberOfRvaAndSizes to match the COFF spec.
  uint32_t NumberOfRvaAndSize;
};

struct DataDirectory {
  uint32_t RelativeVirtualAddress;
  uint32_t Size;
};

enum DataDirectoryIndex : unsigned {
  EXPORT_TABLE = 0,
  IMPORT_TABLE,
  RESOURCE_TABLE,
  EXCEPTION_TABLE,
  CERTIFICATE_TABLE,
  BASE_RELOCATION_TABLE,
  DEBUG_DIRECTORY,
  ARCHITECTURE,
  GLOBAL_PTR,
  TLS_TABLE,
  LOAD_CONFIG_TABLE,
  BOUND_IMPORT,
  IAT,
  DELAY_IMPORT_DESCRIPTOR,
  CLR_RUNTIME_HEADER,

  NUM_DATA_DIRECTORIES
};

enum WindowsSubsystem : unsigned {
  IMAGE_SUBSYSTEM_UNKNOWN = 0, ///< An unknown subsystem.
  IMAGE_SUBSYSTEM_NATIVE = 1,  ///< Device drivers and native Windows processes
  IMAGE_SUBSYSTEM_WINDOWS_GUI = 2,      ///< The Windows GUI subsystem.
  IMAGE_SUBSYSTEM_WINDOWS_CUI = 3,      ///< The Windows character subsystem.
  IMAGE_SUBSYSTEM_OS2_CUI = 5,          ///< The OS/2 character subsystem.
  IMAGE_SUBSYSTEM_POSIX_CUI = 7,        ///< The POSIX character subsystem.
  IMAGE_SUBSYSTEM_NATIVE_WINDOWS = 8,   ///< Native Windows 9x driver.
  IMAGE_SUBSYSTEM_WINDOWS_CE_GUI = 9,   ///< Windows CE.
  IMAGE_SUBSYSTEM_EFI_APPLICATION = 10, ///< An EFI application.
  IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER = 11, ///< An EFI driver with boot
                                                ///  services.
  IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER = 12,      ///< An EFI driver with run-time
                                                ///  services.
  IMAGE_SUBSYSTEM_EFI_ROM = 13,                 ///< An EFI ROM image.
  IMAGE_SUBSYSTEM_XBOX = 14,                    ///< XBOX.
  IMAGE_SUBSYSTEM_WINDOWS_BOOT_APPLICATION = 16 ///< A BCD application.
};

enum DLLCharacteristics : unsigned {
  /// ASLR with 64 bit address space.
  IMAGE_DLL_CHARACTERISTICS_HIGH_ENTROPY_VA = 0x0020,
  /// DLL can be relocated at load time.
  IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE = 0x0040,
  /// Code integrity checks are enforced.
  IMAGE_DLL_CHARACTERISTICS_FORCE_INTEGRITY = 0x0080,
  ///< Image is NX compatible.
  IMAGE_DLL_CHARACTERISTICS_NX_COMPAT = 0x0100,
  /// Isolation aware, but do not isolate the image.
  IMAGE_DLL_CHARACTERISTICS_NO_ISOLATION = 0x0200,
  /// Does not use structured exception handling (SEH). No SEH handler may be
  /// called in this image.
  IMAGE_DLL_CHARACTERISTICS_NO_SEH = 0x0400,
  /// Do not bind the image.
  IMAGE_DLL_CHARACTERISTICS_NO_BIND = 0x0800,
  ///< Image should execute in an AppContainer.
  IMAGE_DLL_CHARACTERISTICS_APPCONTAINER = 0x1000,
  ///< A WDM driver.
  IMAGE_DLL_CHARACTERISTICS_WDM_DRIVER = 0x2000,
  ///< Image supports Control Flow Guard.
  IMAGE_DLL_CHARACTERISTICS_GUARD_CF = 0x4000,
  /// Terminal Server aware.
  IMAGE_DLL_CHARACTERISTICS_TERMINAL_SERVER_AWARE = 0x8000
};

enum ExtendedDLLCharacteristics : unsigned {
  /// Image is CET compatible
  IMAGE_DLL_CHARACTERISTICS_EX_CET_COMPAT = 0x0001
};

enum DebugType : unsigned {
  IMAGE_DEBUG_TYPE_UNKNOWN = 0,
  IMAGE_DEBUG_TYPE_COFF = 1,
  IMAGE_DEBUG_TYPE_CODEVIEW = 2,
  IMAGE_DEBUG_TYPE_FPO = 3,
  IMAGE_DEBUG_TYPE_MISC = 4,
  IMAGE_DEBUG_TYPE_EXCEPTION = 5,
  IMAGE_DEBUG_TYPE_FIXUP = 6,
  IMAGE_DEBUG_TYPE_OMAP_TO_SRC = 7,
  IMAGE_DEBUG_TYPE_OMAP_FROM_SRC = 8,
  IMAGE_DEBUG_TYPE_BORLAND = 9,
  IMAGE_DEBUG_TYPE_RESERVED10 = 10,
  IMAGE_DEBUG_TYPE_CLSID = 11,
  IMAGE_DEBUG_TYPE_VC_FEATURE = 12,
  IMAGE_DEBUG_TYPE_POGO = 13,
  IMAGE_DEBUG_TYPE_ILTCG = 14,
  IMAGE_DEBUG_TYPE_MPX = 15,
  IMAGE_DEBUG_TYPE_REPRO = 16,
  IMAGE_DEBUG_TYPE_EX_DLLCHARACTERISTICS = 20,
};

enum BaseRelocationType : unsigned {
  IMAGE_REL_BASED_ABSOLUTE = 0,
  IMAGE_REL_BASED_HIGH = 1,
  IMAGE_REL_BASED_LOW = 2,
  IMAGE_REL_BASED_HIGHLOW = 3,
  IMAGE_REL_BASED_HIGHADJ = 4,
  IMAGE_REL_BASED_MIPS_JMPADDR = 5,
  IMAGE_REL_BASED_ARM_MOV32A = 5,
  IMAGE_REL_BASED_ARM_MOV32T = 7,
  IMAGE_REL_BASED_MIPS_JMPADDR16 = 9,
  IMAGE_REL_BASED_DIR64 = 10
};

enum ImportType : unsigned {
  IMPORT_CODE = 0,
  IMPORT_DATA = 1,
  IMPORT_CONST = 2
};

enum ImportNameType : unsigned {
  /// Import is by ordinal. This indicates that the value in the Ordinal/Hint
  /// field of the import header is the import's ordinal. If this constant is
  /// not specified, then the Ordinal/Hint field should always be interpreted
  /// as the import's hint.
  IMPORT_ORDINAL = 0,
  /// The import name is identical to the public symbol name
  IMPORT_NAME = 1,
  /// The import name is the public symbol name, but skipping the leading ?,
  /// @, or optionally _.
  IMPORT_NAME_NOPREFIX = 2,
  /// The import name is the public symbol name, but skipping the leading ?,
  /// @, or optionally _, and truncating at the first @.
  IMPORT_NAME_UNDECORATE = 3
};

enum class GuardFlags : uint32_t {
  /// Module performs control flow integrity checks using system-supplied
  /// support.
  CF_INSTRUMENTED = 0x100,
  /// Module performs control flow and write integrity checks.
  CFW_INSTRUMENTED = 0x200,
  /// Module contains valid control flow target metadata.
  CF_FUNCTION_TABLE_PRESENT = 0x400,
  /// Module does not make use of the /GS security cookie.
  SECURITY_COOKIE_UNUSED = 0x800,
  /// Module supports read only delay load IAT.
  PROTECT_DELAYLOAD_IAT = 0x1000,
  /// Delayload import table in its own .didat section (with nothing else in it)
  /// that can be freely reprotected.
  DELAYLOAD_IAT_IN_ITS_OWN_SECTION = 0x2000,
  /// Module contains suppressed export information. This also infers that the
  /// address taken IAT table is also present in the load config.
  CF_EXPORT_SUPPRESSION_INFO_PRESENT = 0x4000,
  /// Module enables suppression of exports.
  CF_ENABLE_EXPORT_SUPPRESSION = 0x8000,
  /// Module contains longjmp target information.
  CF_LONGJUMP_TABLE_PRESENT = 0x10000,
  /// Module contains EH continuation target information.
  EH_CONTINUATION_TABLE_PRESENT = 0x400000,
  /// Mask for the subfield that contains the stride of Control Flow Guard
  /// function table entries (that is, the additional count of bytes per table
  /// entry).
  CF_FUNCTION_TABLE_SIZE_MASK = 0xF0000000,
  CF_FUNCTION_TABLE_SIZE_5BYTES = 0x10000000,
  CF_FUNCTION_TABLE_SIZE_6BYTES = 0x20000000,
  CF_FUNCTION_TABLE_SIZE_7BYTES = 0x30000000,
  CF_FUNCTION_TABLE_SIZE_8BYTES = 0x40000000,
  CF_FUNCTION_TABLE_SIZE_9BYTES = 0x50000000,
  CF_FUNCTION_TABLE_SIZE_10BYTES = 0x60000000,
  CF_FUNCTION_TABLE_SIZE_11BYTES = 0x70000000,
  CF_FUNCTION_TABLE_SIZE_12BYTES = 0x80000000,
  CF_FUNCTION_TABLE_SIZE_13BYTES = 0x90000000,
  CF_FUNCTION_TABLE_SIZE_14BYTES = 0xA0000000,
  CF_FUNCTION_TABLE_SIZE_15BYTES = 0xB0000000,
  CF_FUNCTION_TABLE_SIZE_16BYTES = 0xC0000000,
  CF_FUNCTION_TABLE_SIZE_17BYTES = 0xD0000000,
  CF_FUNCTION_TABLE_SIZE_18BYTES = 0xE0000000,
  CF_FUNCTION_TABLE_SIZE_19BYTES = 0xF0000000,
};

struct ImportHeader {
  uint16_t Sig1; ///< Must be IMAGE_FILE_MACHINE_UNKNOWN (0).
  uint16_t Sig2; ///< Must be 0xFFFF.
  uint16_t Version;
  uint16_t Machine;
  uint32_t TimeDateStamp;
  uint32_t SizeOfData;
  uint16_t OrdinalHint;
  uint16_t TypeInfo;

  ImportType getType() const { return static_cast<ImportType>(TypeInfo & 0x3); }

  ImportNameType getNameType() const {
    return static_cast<ImportNameType>((TypeInfo & 0x1C) >> 2);
  }
};

enum CodeViewIdentifiers {
  DEBUG_SECTION_MAGIC = 0x4,
  DEBUG_HASHES_SECTION_MAGIC = 0x133C9C5
};

// These flags show up in the @feat.00 symbol. They appear to be some kind of
// compiler features bitfield read by link.exe.
enum Feat00Flags : uint32_t {
  // Object is compatible with /safeseh.
  SafeSEH = 0x1,
  // Object was compiled with /GS.
  GuardStack = 0x100,
  // Object was compiled with /sdl.
  SDL = 0x200,
  // Object was compiled with /guard:cf.
  GuardCF = 0x800,
  // Object was compiled with /guard:ehcont.
  GuardEHCont = 0x4000,
  // Object was compiled with /kernel.
  Kernel = 0x40000000,
};

inline bool isReservedSectionNumber(int32_t SectionNumber) {
  return SectionNumber <= 0;
}

/// Encode section name based on string table offset.
/// The size of Out must be at least COFF::NameSize.
bool encodeSectionName(char *Out, uint64_t Offset);

} // End namespace COFF.
} // End namespace llvm.

#endif
PKhwFZ|� �]
]
BinaryFormat/MsgPack.hnu�[���//===-- MsgPack.h - MessagePack Constants -----------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file contains constants used for implementing MessagePack support.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_BINARYFORMAT_MSGPACK_H
#define LLVM_BINARYFORMAT_MSGPACK_H

#include "llvm/Support/DataTypes.h"
#include "llvm/Support/Endian.h"

namespace llvm {
namespace msgpack {

/// The endianness of all multi-byte encoded values in MessagePack.
constexpr support::endianness Endianness = support::big;

/// The first byte identifiers of MessagePack object formats.
namespace FirstByte {
#define HANDLE_MP_FIRST_BYTE(ID, NAME) constexpr uint8_t NAME = ID;
#include "llvm/BinaryFormat/MsgPack.def"
}

/// Most significant bits used to identify "Fix" variants in MessagePack.
///
/// For example, FixStr objects encode their size in the five least significant
/// bits of their first byte, which is identified by the bit pattern "101" in
/// the three most significant bits. So FixBits::String contains 0b10100000.
///
/// A corresponding mask of the bit pattern is found in \c FixBitsMask.
namespace FixBits {
#define HANDLE_MP_FIX_BITS(ID, NAME) constexpr uint8_t NAME = ID;
#include "llvm/BinaryFormat/MsgPack.def"
}

/// Mask of bits used to identify "Fix" variants in MessagePack.
///
/// For example, FixStr objects encode their size in the five least significant
/// bits of their first byte, which is identified by the bit pattern "101" in
/// the three most significant bits. So FixBitsMask::String contains
/// 0b11100000.
///
/// The corresponding bit pattern to mask for is found in FixBits.
namespace FixBitsMask {
#define HANDLE_MP_FIX_BITS_MASK(ID, NAME) constexpr uint8_t NAME = ID;
#include "llvm/BinaryFormat/MsgPack.def"
}

/// The maximum value or size encodable in "Fix" variants of formats.
///
/// For example, FixStr objects encode their size in the five least significant
/// bits of their first byte, so the largest encodable size is 0b00011111.
namespace FixMax {
#define HANDLE_MP_FIX_MAX(ID, NAME) constexpr uint8_t NAME = ID;
#include "llvm/BinaryFormat/MsgPack.def"
}

/// The exact size encodable in "Fix" variants of formats.
///
/// The only objects for which an exact size makes sense are of Extension type.
///
/// For example, FixExt4 stores an extension type containing exactly four bytes.
namespace FixLen {
#define HANDLE_MP_FIX_LEN(ID, NAME) constexpr uint8_t NAME = ID;
#include "llvm/BinaryFormat/MsgPack.def"
}

/// The minimum value or size encodable in "Fix" variants of formats.
///
/// The only object for which a minimum makes sense is a negative FixNum.
///
/// Negative FixNum objects encode their signed integer value in one byte, but
/// they must have the pattern "111" as their three most significant bits. This
/// means all values are negative, and the smallest representable value is
/// 0b11100000.
namespace FixMin {
#define HANDLE_MP_FIX_MIN(ID, NAME) constexpr int8_t NAME = ID;
#include "llvm/BinaryFormat/MsgPack.def"
}

} // end namespace msgpack
} // end namespace llvm

#endif // LLVM_BINARYFORMAT_MSGPACK_H
PKhwFZB>�g"O"OBinaryFormat/XCOFF.hnu�[���//===-- llvm/BinaryFormat/XCOFF.h - The XCOFF file format -------*- C++/-*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines manifest constants for the XCOFF object file format.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_BINARYFORMAT_XCOFF_H
#define LLVM_BINARYFORMAT_XCOFF_H

#include <stddef.h>
#include <stdint.h>

namespace llvm {
class StringRef;
template <unsigned> class SmallString;
template <typename T> class Expected;

namespace XCOFF {

// Constants used in the XCOFF definition.

constexpr size_t FileNamePadSize = 6;
constexpr size_t NameSize = 8;
constexpr size_t FileHeaderSize32 = 20;
constexpr size_t FileHeaderSize64 = 24;
constexpr size_t AuxFileHeaderSize32 = 72;
constexpr size_t AuxFileHeaderSize64 = 110;
constexpr size_t AuxFileHeaderSizeShort = 28;
constexpr size_t SectionHeaderSize32 = 40;
constexpr size_t SectionHeaderSize64 = 72;
constexpr size_t SymbolTableEntrySize = 18;
constexpr size_t RelocationSerializationSize32 = 10;
constexpr size_t RelocationSerializationSize64 = 14;
constexpr size_t ExceptionSectionEntrySize32 = 6;
constexpr size_t ExceptionSectionEntrySize64 = 10;
constexpr uint16_t RelocOverflow = 65535;
constexpr uint8_t AllocRegNo = 31;

enum ReservedSectionNum : int16_t { N_DEBUG = -2, N_ABS = -1, N_UNDEF = 0 };

enum MagicNumber : uint16_t { XCOFF32 = 0x01DF, XCOFF64 = 0x01F7 };

// Masks for packing/unpacking the r_rsize field of relocations.

// The msb is used to indicate if the bits being relocated are signed or
// unsigned.
static constexpr uint8_t XR_SIGN_INDICATOR_MASK = 0x80;
// The 2nd msb is used to indicate that the binder has replaced/modified the
// original instruction.
static constexpr uint8_t XR_FIXUP_INDICATOR_MASK = 0x40;
// The remaining bits specify the bit length of the relocatable reference
// minus one.
static constexpr uint8_t XR_BIASED_LENGTH_MASK = 0x3f;

// This field only exists in the XCOFF64 definition.
enum AuxHeaderFlags64 : uint16_t {
  SHR_SYMTAB = 0x8000,  ///< At exec time, create shared symbol table for program
                        ///< (main program only).
  FORK_POLICY = 0x4000, ///< Forktree policy specified (main program only).
  FORK_COR = 0x2000     ///< If _AOUT_FORK_POLICY is set, specify copy-on-reference
                        ///< if this bit is set. Specify copy-on- write otherwise.
                        ///< If _AOUT_FORK_POLICY is 0, this bit is reserved for
                        ///< future use and should be set to 0.
};

enum XCOFFInterpret : uint16_t {
  OLD_XCOFF_INTERPRET = 1,
  NEW_XCOFF_INTERPRET = 2
};

enum FileFlag : uint16_t {
  F_RELFLG = 0x0001,    ///< relocation info stripped from file
  F_EXEC = 0x0002,      ///< file is executable (i.e., it
                        ///< has a loader section)
  F_LNNO = 0x0004,      ///< line numbers stripped from file
  F_LSYMS = 0x0008,     ///< local symbols stripped from file
  F_FDPR_PROF = 0x0010, ///< file was profiled with FDPR
  F_FDPR_OPTI = 0x0020, ///< file was reordered with FDPR
  F_DSA = 0x0040,       ///< file uses Dynamic Segment Allocation (32-bit
                        ///< only)
  F_DEP_1 = 0x0080,     ///< Data Execution Protection bit 1
  F_VARPG = 0x0100,     ///< executable requests using variable size pages
  F_LPTEXT = 0x0400,    ///< executable requires large pages for text
  F_LPDATA = 0x0800,    ///< executable requires large pages for data
  F_DYNLOAD = 0x1000,   ///< file is dynamically loadable and
                        ///< executable (equivalent to F_EXEC on AIX)
  F_SHROBJ = 0x2000,    ///< file is a shared object
  F_LOADONLY =
      0x4000,      ///< file can be loaded by the system loader, but it is
                   ///< ignored by the linker if it is a member of an archive.
  F_DEP_2 = 0x8000 ///< Data Execution Protection bit 2
};

// x_smclas field of x_csect from system header: /usr/include/syms.h
/// Storage Mapping Class definitions.
enum StorageMappingClass : uint8_t {
  //     READ ONLY CLASSES
  XMC_PR = 0,      ///< Program Code
  XMC_RO = 1,      ///< Read Only Constant
  XMC_DB = 2,      ///< Debug Dictionary Table
  XMC_GL = 6,      ///< Global Linkage (Interfile Interface Code)
  XMC_XO = 7,      ///< Extended Operation (Pseudo Machine Instruction)
  XMC_SV = 8,      ///< Supervisor Call (32-bit process only)
  XMC_SV64 = 17,   ///< Supervisor Call for 64-bit process
  XMC_SV3264 = 18, ///< Supervisor Call for both 32- and 64-bit processes
  XMC_TI = 12,     ///< Traceback Index csect
  XMC_TB = 13,     ///< Traceback Table csect

  //       READ WRITE CLASSES
  XMC_RW = 5,   ///< Read Write Data
  XMC_TC0 = 15, ///< TOC Anchor for TOC Addressability
  XMC_TC = 3,   ///< General TOC item
  XMC_TD = 16,  ///< Scalar data item in the TOC
  XMC_DS = 10,  ///< Descriptor csect
  XMC_UA = 4,   ///< Unclassified - Treated as Read Write
  XMC_BS = 9,   ///< BSS class (uninitialized static internal)
  XMC_UC = 11,  ///< Un-named Fortran Common

  XMC_TL = 20, ///< Initialized thread-local variable
  XMC_UL = 21, ///< Uninitialized thread-local variable
  XMC_TE = 22  ///< Symbol mapped at the end of TOC
};

// Flags for defining the section type. Masks for use with the (signed, 32-bit)
// s_flags field of the section header structure, selecting for values in the
// lower 16 bits. Defined in the system header `scnhdr.h`.
enum SectionTypeFlags : int32_t {
  STYP_PAD = 0x0008,
  STYP_DWARF = 0x0010,
  STYP_TEXT = 0x0020,
  STYP_DATA = 0x0040,
  STYP_BSS = 0x0080,
  STYP_EXCEPT = 0x0100,
  STYP_INFO = 0x0200,
  STYP_TDATA = 0x0400,
  STYP_TBSS = 0x0800,
  STYP_LOADER = 0x1000,
  STYP_DEBUG = 0x2000,
  STYP_TYPCHK = 0x4000,
  STYP_OVRFLO = 0x8000
};

/// Values for defining the section subtype of sections of type STYP_DWARF as
/// they would appear in the (signed, 32-bit) s_flags field of the section
/// header structure, contributing to the 16 most significant bits. Defined in
/// the system header `scnhdr.h`.
enum DwarfSectionSubtypeFlags : int32_t {
  SSUBTYP_DWINFO = 0x1'0000,  ///< DWARF info section
  SSUBTYP_DWLINE = 0x2'0000,  ///< DWARF line section
  SSUBTYP_DWPBNMS = 0x3'0000, ///< DWARF pubnames section
  SSUBTYP_DWPBTYP = 0x4'0000, ///< DWARF pubtypes section
  SSUBTYP_DWARNGE = 0x5'0000, ///< DWARF aranges section
  SSUBTYP_DWABREV = 0x6'0000, ///< DWARF abbrev section
  SSUBTYP_DWSTR = 0x7'0000,   ///< DWARF str section
  SSUBTYP_DWRNGES = 0x8'0000, ///< DWARF ranges section
  SSUBTYP_DWLOC = 0x9'0000,   ///< DWARF loc section
  SSUBTYP_DWFRAME = 0xA'0000, ///< DWARF frame section
  SSUBTYP_DWMAC = 0xB'0000    ///< DWARF macinfo section
};

// STORAGE CLASSES, n_sclass field of syment.
// The values come from `storclass.h` and `dbxstclass.h`.
enum StorageClass : uint8_t {
  // Storage classes used for symbolic debugging symbols.
  C_FILE = 103,  // File name
  C_BINCL = 108, // Beginning of include file
  C_EINCL = 109, // Ending of include file
  C_GSYM = 128,  // Global variable
  C_STSYM = 133, // Statically allocated symbol
  C_BCOMM = 135, // Beginning of common block
  C_ECOMM = 137, // End of common block
  C_ENTRY = 141, // Alternate entry
  C_BSTAT = 143, // Beginning of static block
  C_ESTAT = 144, // End of static block
  C_GTLS = 145,  // Global thread-local variable
  C_STTLS = 146, // Static thread-local variable

  // Storage classes used for DWARF symbols.
  C_DWARF = 112, // DWARF section symbol

  // Storage classes used for absolute symbols.
  C_LSYM = 129,  // Automatic variable allocated on stack
  C_PSYM = 130,  // Argument to subroutine allocated on stack
  C_RSYM = 131,  // Register variable
  C_RPSYM = 132, // Argument to function or procedure stored in register
  C_ECOML = 136, // Local member of common block
  C_FUN = 142,   // Function or procedure

  // Storage classes used for undefined external symbols or
  // symbols of general sections.
  C_EXT = 2,       // External symbol
  C_WEAKEXT = 111, // Weak external symbol

  // Storage classes used for symbols of general sections.
  C_NULL = 0,
  C_STAT = 3,     // Static
  C_BLOCK = 100,  // ".bb" or ".eb"
  C_FCN = 101,    // ".bf" or ".ef"
  C_HIDEXT = 107, // Un-named external symbol
  C_INFO = 110,   // Comment string in .info section
  C_DECL = 140,   // Declaration of object (type)

  // Storage classes - Obsolete/Undocumented.
  C_AUTO = 1,     // Automatic variable
  C_REG = 4,      // Register variable
  C_EXTDEF = 5,   // External definition
  C_LABEL = 6,    // Label
  C_ULABEL = 7,   // Undefined label
  C_MOS = 8,      // Member of structure
  C_ARG = 9,      // Function argument
  C_STRTAG = 10,  // Structure tag
  C_MOU = 11,     // Member of union
  C_UNTAG = 12,   // Union tag
  C_TPDEF = 13,   // Type definition
  C_USTATIC = 14, // Undefined static
  C_ENTAG = 15,   // Enumeration tag
  C_MOE = 16,     // Member of enumeration
  C_REGPARM = 17, // Register parameter
  C_FIELD = 18,   // Bit field
  C_EOS = 102,    // End of structure
  C_LINE = 104,
  C_ALIAS = 105,  // Duplicate tag
  C_HIDDEN = 106, // Special storage class for external
  C_EFCN = 255,   // Physical end of function

  // Storage classes - reserved
  C_TCSYM = 134 // Reserved
};

// Flags for defining the symbol type. Values to be encoded into the lower 3
// bits of the (unsigned, 8-bit) x_smtyp field of csect auxiliary symbol table
// entries. Defined in the system header `syms.h`.
enum SymbolType : uint8_t {
  XTY_ER = 0, ///< External reference.
  XTY_SD = 1, ///< Csect definition for initialized storage.
  XTY_LD = 2, ///< Label definition.
              ///< Defines an entry point to an initialized csect.
  XTY_CM = 3  ///< Common csect definition. For uninitialized storage.
};

/// Values for visibility as they would appear when encoded in the high 4 bits
/// of the 16-bit unsigned n_type field of symbol table entries. Valid for
/// 32-bit XCOFF only when the vstamp in the auxiliary header is greater than 1.
enum VisibilityType : uint16_t {
  SYM_V_UNSPECIFIED = 0x0000,
  SYM_V_INTERNAL = 0x1000,
  SYM_V_HIDDEN = 0x2000,
  SYM_V_PROTECTED = 0x3000,
  SYM_V_EXPORTED = 0x4000
};

constexpr uint16_t VISIBILITY_MASK = 0x7000;

// Relocation types, defined in `/usr/include/reloc.h`.
enum RelocationType : uint8_t {
  R_POS = 0x00, ///< Positive relocation. Provides the address of the referenced
                ///< symbol.
  R_RL = 0x0c,  ///< Positive indirect load relocation. Modifiable instruction.
  R_RLA = 0x0d, ///< Positive load address relocation. Modifiable instruction.

  R_NEG = 0x01, ///< Negative relocation. Provides the negative of the address
                ///< of the referenced symbol.
  R_REL = 0x02, ///< Relative to self relocation. Provides a displacement value
                ///< between the address of the referenced symbol and the
                ///< address being relocated.

  R_TOC = 0x03, ///< Relative to the TOC relocation. Provides a displacement
                ///< that is the difference between the address of the
                ///< referenced symbol and the TOC anchor csect.
  R_TRL = 0x12, ///< TOC relative indirect load relocation. Similar to R_TOC,
                ///< but not modifiable instruction.

  R_TRLA =
      0x13, ///< Relative to the TOC or to the thread-local storage base
            ///< relocation. Compilers are not permitted to generate this
            ///< relocation type. It is the result of a reversible
            ///< transformation by the linker of an R_TOC relation that turned a
            ///< load instruction into an add-immediate instruction.

  R_GL = 0x05, ///< Global linkage-external TOC address relocation. Provides the
               ///< address of the external TOC associated with a defined
               ///< external symbol.
  R_TCL = 0x06, ///< Local object TOC address relocation. Provides the address
                ///< of the local TOC entry of a defined external symbol.

  R_REF = 0x0f, ///< A non-relocating relocation. Used to prevent the binder
                ///< from garbage collecting a csect (such as code used for
                ///< dynamic initialization of non-local statics) for which
                ///< another csect has an implicit dependency.

  R_BA = 0x08, ///< Branch absolute relocation. Provides the address of the
               ///< referenced symbol. References a non-modifiable instruction.
  R_BR = 0x0a, ///< Branch relative to self relocation. Provides the
               ///< displacement that is the difference between the address of
               ///< the referenced symbol and the address of the referenced
               ///< branch instruction. References a non-modifiable instruction.
  R_RBA = 0x18, ///< Branch absolute relocation. Similar to R_BA but
                ///< references a modifiable instruction.
  R_RBR = 0x1a, ///< Branch relative to self relocation. Similar to the R_BR
                ///< relocation type, but references a modifiable instruction.

  R_TLS = 0x20,    ///< General-dynamic reference to TLS symbol.
  R_TLS_IE = 0x21, ///< Initial-exec reference to TLS symbol.
  R_TLS_LD = 0x22, ///< Local-dynamic reference to TLS symbol.
  R_TLS_LE = 0x23, ///< Local-exec reference to TLS symbol.
  R_TLSM = 0x24,  ///< Module reference to TLS. Provides a handle for the module
                  ///< containing the referenced symbol.
  R_TLSML = 0x25, ///< Module reference to the local TLS storage.

  R_TOCU = 0x30, ///< Relative to TOC upper. Specifies the high-order 16 bits of
                 ///< a large code model TOC-relative relocation.
  R_TOCL = 0x31 ///< Relative to TOC lower. Specifies the low-order 16 bits of a
                ///< large code model TOC-relative relocation.
};

enum CFileStringType : uint8_t {
  XFT_FN = 0,  ///< Specifies the source-file name.
  XFT_CT = 1,  ///< Specifies the compiler time stamp.
  XFT_CV = 2,  ///< Specifies the compiler version number.
  XFT_CD = 128 ///< Specifies compiler-defined information.
};

enum CFileLangId : uint8_t {
  TB_C = 0,        ///< C language.
  TB_Fortran = 1,  ///< Fortran language.
  TB_CPLUSPLUS = 9 ///< C++ language.
};

enum CFileCpuId : uint8_t {
  TCPU_PPC64 = 2, ///< PowerPC common architecture 64-bit mode.
  TCPU_COM = 3,   ///< POWER and PowerPC architecture common.
  TCPU_970 = 19   ///< PPC970 - PowerPC 64-bit architecture.
};

enum SymbolAuxType : uint8_t {
  AUX_EXCEPT = 255, ///< Identifies an exception auxiliary entry.
  AUX_FCN = 254,    ///< Identifies a function auxiliary entry.
  AUX_SYM = 253,    ///< Identifies a symbol auxiliary entry.
  AUX_FILE = 252,   ///< Identifies a file auxiliary entry.
  AUX_CSECT = 251,  ///< Identifies a csect auxiliary entry.
  AUX_SECT = 250    ///< Identifies a SECT auxiliary entry.
};                  // 64-bit XCOFF file only.

StringRef getMappingClassString(XCOFF::StorageMappingClass SMC);
StringRef getRelocationTypeString(XCOFF::RelocationType Type);
Expected<SmallString<32>> parseParmsType(uint32_t Value, unsigned FixedParmsNum,
                                         unsigned FloatingParmsNum);
Expected<SmallString<32>> parseParmsTypeWithVecInfo(uint32_t Value,
                                                    unsigned FixedParmsNum,
                                                    unsigned FloatingParmsNum,
                                                    unsigned VectorParmsNum);
Expected<SmallString<32>> parseVectorParmsType(uint32_t Value,
                                               unsigned ParmsNum);

struct TracebackTable {
  enum LanguageID : uint8_t {
    C,
    Fortran,
    Pascal,
    Ada,
    PL1,
    Basic,
    Lisp,
    Cobol,
    Modula2,
    CPlusPlus,
    Rpg,
    PL8,
    PLIX = PL8,
    Assembly,
    Java,
    ObjectiveC
  };
  // Byte 1
  static constexpr uint32_t VersionMask = 0xFF00'0000;
  static constexpr uint8_t VersionShift = 24;

  // Byte 2
  static constexpr uint32_t LanguageIdMask = 0x00FF'0000;
  static constexpr uint8_t LanguageIdShift = 16;

  // Byte 3
  static constexpr uint32_t IsGlobaLinkageMask = 0x0000'8000;
  static constexpr uint32_t IsOutOfLineEpilogOrPrologueMask = 0x0000'4000;
  static constexpr uint32_t HasTraceBackTableOffsetMask = 0x0000'2000;
  static constexpr uint32_t IsInternalProcedureMask = 0x0000'1000;
  static constexpr uint32_t HasControlledStorageMask = 0x0000'0800;
  static constexpr uint32_t IsTOClessMask = 0x0000'0400;
  static constexpr uint32_t IsFloatingPointPresentMask = 0x0000'0200;
  static constexpr uint32_t IsFloatingPointOperationLogOrAbortEnabledMask =
      0x0000'0100;

  // Byte 4
  static constexpr uint32_t IsInterruptHandlerMask = 0x0000'0080;
  static constexpr uint32_t IsFunctionNamePresentMask = 0x0000'0040;
  static constexpr uint32_t IsAllocaUsedMask = 0x0000'0020;
  static constexpr uint32_t OnConditionDirectiveMask = 0x0000'001C;
  static constexpr uint32_t IsCRSavedMask = 0x0000'0002;
  static constexpr uint32_t IsLRSavedMask = 0x0000'0001;
  static constexpr uint8_t OnConditionDirectiveShift = 2;

  // Byte 5
  static constexpr uint32_t IsBackChainStoredMask = 0x8000'0000;
  static constexpr uint32_t IsFixupMask = 0x4000'0000;
  static constexpr uint32_t FPRSavedMask = 0x3F00'0000;
  static constexpr uint32_t FPRSavedShift = 24;

  // Byte 6
  static constexpr uint32_t HasExtensionTableMask = 0x0080'0000;
  static constexpr uint32_t HasVectorInfoMask = 0x0040'0000;
  static constexpr uint32_t GPRSavedMask = 0x003F'0000;
  static constexpr uint32_t GPRSavedShift = 16;

  // Byte 7
  static constexpr uint32_t NumberOfFixedParmsMask = 0x0000'FF00;
  static constexpr uint8_t NumberOfFixedParmsShift = 8;

  // Byte 8
  static constexpr uint32_t NumberOfFloatingPointParmsMask = 0x0000'00FE;
  static constexpr uint32_t HasParmsOnStackMask = 0x0000'0001;
  static constexpr uint8_t NumberOfFloatingPointParmsShift = 1;

  // Masks to select leftmost bits for decoding parameter type information.
  // Bit to use when vector info is not presented.
  static constexpr uint32_t ParmTypeIsFloatingBit = 0x8000'0000;
  static constexpr uint32_t ParmTypeFloatingIsDoubleBit = 0x4000'0000;
  // Bits to use when vector info is presented.
  static constexpr uint32_t ParmTypeIsFixedBits = 0x0000'0000;
  static constexpr uint32_t ParmTypeIsVectorBits = 0x4000'0000;
  static constexpr uint32_t ParmTypeIsFloatingBits = 0x8000'0000;
  static constexpr uint32_t ParmTypeIsDoubleBits = 0xC000'0000;
  static constexpr uint32_t ParmTypeMask = 0xC000'0000;

  // Vector extension
  static constexpr uint16_t NumberOfVRSavedMask = 0xFC00;
  static constexpr uint16_t IsVRSavedOnStackMask = 0x0200;
  static constexpr uint16_t HasVarArgsMask = 0x0100;
  static constexpr uint8_t NumberOfVRSavedShift = 10;

  static constexpr uint16_t NumberOfVectorParmsMask = 0x00FE;
  static constexpr uint16_t HasVMXInstructionMask = 0x0001;
  static constexpr uint8_t NumberOfVectorParmsShift = 1;

  static constexpr uint32_t ParmTypeIsVectorCharBit = 0x0000'0000;
  static constexpr uint32_t ParmTypeIsVectorShortBit = 0x4000'0000;
  static constexpr uint32_t ParmTypeIsVectorIntBit = 0x8000'0000;
  static constexpr uint32_t ParmTypeIsVectorFloatBit = 0xC000'0000;

  static constexpr uint8_t WidthOfParamType = 2;
};

// Extended Traceback table flags.
enum ExtendedTBTableFlag : uint8_t {
  TB_OS1 = 0x80,         ///< Reserved for OS use.
  TB_RESERVED = 0x40,    ///< Reserved for compiler.
  TB_SSP_CANARY = 0x20,  ///< stack smasher canary present on stack.
  TB_OS2 = 0x10,         ///< Reserved for OS use.
  TB_EH_INFO = 0x08,     ///< Exception handling info present.
  TB_LONGTBTABLE2 = 0x01 ///< Additional tbtable extension exists.
};

StringRef getNameForTracebackTableLanguageId(TracebackTable::LanguageID LangId);
SmallString<32> getExtendedTBTableFlagString(uint8_t Flag);

struct CsectProperties {
  CsectProperties(StorageMappingClass SMC, SymbolType ST)
      : MappingClass(SMC), Type(ST) {}
  StorageMappingClass MappingClass;
  SymbolType Type;
};

} // end namespace XCOFF
} // end namespace llvm

#endif
PKhwFZE	B�
�
BinaryFormat/GOFF.hnu�[���//===-- llvm/BinaryFormat/GOFF.h - GOFF definitions --------------*- C++-*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This header contains common, non-processor-specific data structures and
// constants for the GOFF file format.
//
// GOFF specifics can be found in MVS Program Management: Advanced Facilities.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_BINARYFORMAT_GOFF_H
#define LLVM_BINARYFORMAT_GOFF_H

#include "llvm/Support/DataTypes.h"

namespace llvm {
namespace GOFF {

constexpr uint8_t RecordLength = 80;
constexpr uint8_t RecordPrefixLength = 3;
constexpr uint8_t PayloadLength = 77;

// Prefix byte on every record. This indicates GOFF format.
constexpr uint8_t PTVPrefix = 0x03;

enum RecordType : uint8_t {
  RT_ESD = 0,
  RT_TXT = 1,
  RT_RLD = 2,
  RT_LEN = 3,
  RT_END = 4,
  RT_HDR = 15,
};

enum ESDSymbolType : uint8_t {
  ESD_ST_SectionDefinition = 0,
  ESD_ST_ElementDefinition = 1,
  ESD_ST_LabelDefinition = 2,
  ESD_ST_PartReference = 3,
  ESD_ST_ExternalReference = 4,
};

enum ESDNameSpaceId : uint8_t {
  ESD_NS_ProgramManagementBinder = 0,
  ESD_NS_NormalName = 1,
  ESD_NS_PseudoRegister = 2,
  ESD_NS_Parts = 3
};

enum ESDReserveQwords : uint8_t {
  ESD_RQ_0 = 0,
  ESD_RQ_1 = 1,
  ESD_RQ_2 = 2,
  ESD_RQ_3 = 3
};

enum ESDAmode : uint8_t {
  ESD_AMODE_None = 0,
  ESD_AMODE_24 = 1,
  ESD_AMODE_31 = 2,
  ESD_AMODE_ANY = 3,
  ESD_AMODE_64 = 4,
  ESD_AMODE_MIN = 16,
};

enum ESDRmode : uint8_t {
  ESD_RMODE_None = 0,
  ESD_RMODE_24 = 1,
  ESD_RMODE_31 = 3,
  ESD_RMODE_64 = 4,
};

enum ESDTextStyle : uint8_t {
  ESD_TS_ByteOriented = 0,
  ESD_TS_Structured = 1,
  ESD_TS_Unstructured = 2,
};

enum ESDBindingAlgorithm : uint8_t {
  ESD_BA_Concatenate = 0,
  ESD_BA_Merge = 1,
};

enum ESDTaskingBehavior : uint8_t {
  ESD_TA_Unspecified = 0,
  ESD_TA_NonReus = 1,
  ESD_TA_Reus = 2,
  ESD_TA_Rent = 3,
};

enum ESDExecutable : uint8_t {
  ESD_EXE_Unspecified = 0,
  ESD_EXE_DATA = 1,
  ESD_EXE_CODE = 2,
};

enum ESDDuplicateSymbolSeverity : uint8_t {
  ESD_DSS_NoWarning = 0,
  ESD_DSS_Warning = 1,
  ESD_DSS_Error = 2,
  ESD_DSS_Reserved = 3,
};

enum ESDBindingStrength : uint8_t {
  ESD_BST_Strong = 0,
  ESD_BST_Weak = 1,
};

enum ESDLoadingBehavior : uint8_t {
  ESD_LB_Initial = 0,
  ESD_LB_Deferred = 1,
  ESD_LB_NoLoad = 2,
  ESD_LB_Reserved = 3,
};

enum ESDBindingScope : uint8_t {
  ESD_BSC_Unspecified = 0,
  ESD_BSC_Section = 1,
  ESD_BSC_Module = 2,
  ESD_BSC_Library = 3,
  ESD_BSC_ImportExport = 4,
};

enum ESDLinkageType : uint8_t { ESD_LT_OS = 0, ESD_LT_XPLink = 1 };

enum ESDAlignment : uint8_t {
  ESD_ALIGN_Byte = 0,
  ESD_ALIGN_Halfword = 1,
  ESD_ALIGN_Fullword = 2,
  ESD_ALIGN_Doubleword = 3,
  ESD_ALIGN_Quadword = 4,
  ESD_ALIGN_32byte = 5,
  ESD_ALIGN_64byte = 6,
  ESD_ALIGN_128byte = 7,
  ESD_ALIGN_256byte = 8,
  ESD_ALIGN_512byte = 9,
  ESD_ALIGN_1024byte = 10,
  ESD_ALIGN_2Kpage = 11,
  ESD_ALIGN_4Kpage = 12,
};

enum ENDEntryPointRequest : uint8_t {
  END_EPR_None = 0,
  END_EPR_EsdidOffset = 1,
  END_EPR_ExternalName = 2,
  END_EPR_Reserved = 3,
};

// \brief Subsections of the primary C_CODE section in the object file.
enum SubsectionKind : uint8_t {
  SK_PPA1 = 2,
};
} // end namespace GOFF

} // end namespace llvm

#endif // LLVM_BINARYFORMAT_GOFF_H
PKhwFZ/!��77BinaryFormat/DynamicTags.defnu�[���#ifndef DYNAMIC_TAG
#error "DYNAMIC_TAG must be defined"
#endif

// Add separate macros for the architecture specific tags and the markers
// such as DT_HIOS, etc. to allow using this file to in other contexts.
// For example we can use it to generate a stringification switch statement.

#ifndef AARCH64_DYNAMIC_TAG
#define AARCH64_DYNAMIC_TAG(name, value) DYNAMIC_TAG(name, value)
#define AARCH64_DYNAMIC_TAG_DEFINED
#endif

#ifndef HEXAGON_DYNAMIC_TAG
#define HEXAGON_DYNAMIC_TAG(name, value) DYNAMIC_TAG(name, value)
#define HEXAGON_DYNAMIC_TAG_DEFINED
#endif

#ifndef MIPS_DYNAMIC_TAG
#define MIPS_DYNAMIC_TAG(name, value) DYNAMIC_TAG(name, value)
#define MIPS_DYNAMIC_TAG_DEFINED
#endif

#ifndef PPC_DYNAMIC_TAG
#define PPC_DYNAMIC_TAG(name, value) DYNAMIC_TAG(name, value)
#define PPC_DYNAMIC_TAG_DEFINED
#endif

#ifndef PPC64_DYNAMIC_TAG
#define PPC64_DYNAMIC_TAG(name, value) DYNAMIC_TAG(name, value)
#define PPC64_DYNAMIC_TAG_DEFINED
#endif

#ifndef RISCV_DYNAMIC_TAG
#define RISCV_DYNAMIC_TAG(name, value) DYNAMIC_TAG(name, value)
#define RISCV_DYNAMIC_TAG_DEFINED
#endif

#ifndef DYNAMIC_TAG_MARKER
#define DYNAMIC_TAG_MARKER(name, value) DYNAMIC_TAG(name, value)
#define DYNAMIC_TAG_MARKER_DEFINED
#endif

DYNAMIC_TAG(NULL, 0)        // Marks end of dynamic array.
DYNAMIC_TAG(NEEDED, 1)      // String table offset of needed library.
DYNAMIC_TAG(PLTRELSZ, 2)    // Size of relocation entries in PLT.
DYNAMIC_TAG(PLTGOT, 3)      // Address associated with linkage table.
DYNAMIC_TAG(HASH, 4)        // Address of symbolic hash table.
DYNAMIC_TAG(STRTAB, 5)      // Address of dynamic string table.
DYNAMIC_TAG(SYMTAB, 6)      // Address of dynamic symbol table.
DYNAMIC_TAG(RELA, 7)        // Address of relocation table (Rela entries).
DYNAMIC_TAG(RELASZ, 8)      // Size of Rela relocation table.
DYNAMIC_TAG(RELAENT, 9)     // Size of a Rela relocation entry.
DYNAMIC_TAG(STRSZ, 10)      // Total size of the string table.
DYNAMIC_TAG(SYMENT, 11)     // Size of a symbol table entry.
DYNAMIC_TAG(INIT, 12)       // Address of initialization function.
DYNAMIC_TAG(FINI, 13)       // Address of termination function.
DYNAMIC_TAG(SONAME, 14)     // String table offset of a shared objects name.
DYNAMIC_TAG(RPATH, 15)      // String table offset of library search path.
DYNAMIC_TAG(SYMBOLIC, 16)   // Changes symbol resolution algorithm.
DYNAMIC_TAG(REL, 17)        // Address of relocation table (Rel entries).
DYNAMIC_TAG(RELSZ, 18)      // Size of Rel relocation table.
DYNAMIC_TAG(RELENT, 19)     // Size of a Rel relocation entry.
DYNAMIC_TAG(PLTREL, 20)     // Type of relocation entry used for linking.
DYNAMIC_TAG(DEBUG, 21)      // Reserved for debugger.
DYNAMIC_TAG(TEXTREL, 22)    // Relocations exist for non-writable segments.
DYNAMIC_TAG(JMPREL, 23)     // Address of relocations associated with PLT.
DYNAMIC_TAG(BIND_NOW, 24)   // Process all relocations before execution.
DYNAMIC_TAG(INIT_ARRAY, 25) // Pointer to array of initialization functions.
DYNAMIC_TAG(FINI_ARRAY, 26) // Pointer to array of termination functions.
DYNAMIC_TAG(INIT_ARRAYSZ, 27) // Size of DT_INIT_ARRAY.
DYNAMIC_TAG(FINI_ARRAYSZ, 28) // Size of DT_FINI_ARRAY.
DYNAMIC_TAG(RUNPATH, 29)      // String table offset of lib search path.
DYNAMIC_TAG(FLAGS, 30)        // Flags.
DYNAMIC_TAG_MARKER(ENCODING, 32) // Values from here to DT_LOOS follow the rules
                                 // for the interpretation of the d_un union.

DYNAMIC_TAG(PREINIT_ARRAY, 32)   // Pointer to array of preinit functions.
DYNAMIC_TAG(PREINIT_ARRAYSZ, 33) // Size of the DT_PREINIT_ARRAY array.

DYNAMIC_TAG(SYMTAB_SHNDX, 34) // Address of the SHT_SYMTAB_SHNDX section.

// Experimental support for SHT_RELR sections. For details, see proposal
// at https://groups.google.com/forum/#!topic/generic-abi/bX460iggiKg
DYNAMIC_TAG(RELRSZ, 35)  // Size of Relr relocation table.
DYNAMIC_TAG(RELR, 36)    // Address of relocation table (Relr entries).
DYNAMIC_TAG(RELRENT, 37) // Size of a Relr relocation entry.

DYNAMIC_TAG_MARKER(LOOS, 0x60000000)   // Start of environment specific tags.
DYNAMIC_TAG_MARKER(HIOS, 0x6FFFFFFF)   // End of environment specific tags.
DYNAMIC_TAG_MARKER(LOPROC, 0x70000000) // Start of processor specific tags.
DYNAMIC_TAG_MARKER(HIPROC, 0x7FFFFFFF) // End of processor specific tags.

// Android packed relocation section tags.
// https://android.googlesource.com/platform/bionic/+/6f12bfece5dcc01325e0abba56a46b1bcf991c69/tools/relocation_packer/src/elf_file.cc#31
DYNAMIC_TAG(ANDROID_REL, 0x6000000F)
DYNAMIC_TAG(ANDROID_RELSZ, 0x60000010)
DYNAMIC_TAG(ANDROID_RELA, 0x60000011)
DYNAMIC_TAG(ANDROID_RELASZ, 0x60000012)

// Android's experimental support for SHT_RELR sections.
// https://android.googlesource.com/platform/bionic/+/b7feec74547f84559a1467aca02708ff61346d2a/libc/include/elf.h#253
DYNAMIC_TAG(ANDROID_RELR, 0x6FFFE000)      // Address of relocation table (Relr entries).
DYNAMIC_TAG(ANDROID_RELRSZ, 0x6FFFE001)    // Size of Relr relocation table.
DYNAMIC_TAG(ANDROID_RELRENT, 0x6FFFE003)   // Size of a Relr relocation entry.

DYNAMIC_TAG(GNU_HASH, 0x6FFFFEF5)    // Reference to the GNU hash table.
DYNAMIC_TAG(TLSDESC_PLT, 0x6FFFFEF6) // Location of PLT entry for TLS
                                     // descriptor resolver calls.
DYNAMIC_TAG(TLSDESC_GOT, 0x6FFFFEF7) // Location of GOT entry used by TLS
                                     // descriptor resolver PLT entry.
DYNAMIC_TAG(RELACOUNT, 0x6FFFFFF9)   // ELF32_Rela count.
DYNAMIC_TAG(RELCOUNT, 0x6FFFFFFA)    // ELF32_Rel count.

DYNAMIC_TAG(FLAGS_1, 0X6FFFFFFB) // Flags_1.

DYNAMIC_TAG(VERSYM, 0x6FFFFFF0)     // The address of .gnu.version section.
DYNAMIC_TAG(VERDEF, 0X6FFFFFFC)     // The address of the version definition
                                    // table.
DYNAMIC_TAG(VERDEFNUM, 0X6FFFFFFD)  // The number of entries in DT_VERDEF.
DYNAMIC_TAG(VERNEED, 0X6FFFFFFE)    // The address of the version dependency
                                    // table.
DYNAMIC_TAG(VERNEEDNUM, 0X6FFFFFFF) // The number of entries in DT_VERNEED.

// AArch64 specific dynamic table entries
AARCH64_DYNAMIC_TAG(AARCH64_BTI_PLT, 0x70000001)
AARCH64_DYNAMIC_TAG(AARCH64_PAC_PLT, 0x70000003)
AARCH64_DYNAMIC_TAG(AARCH64_VARIANT_PCS, 0x70000005)
AARCH64_DYNAMIC_TAG(AARCH64_MEMTAG_MODE, 0x70000009)
AARCH64_DYNAMIC_TAG(AARCH64_MEMTAG_HEAP, 0x7000000b)
AARCH64_DYNAMIC_TAG(AARCH64_MEMTAG_STACK, 0x7000000c)
AARCH64_DYNAMIC_TAG(AARCH64_MEMTAG_GLOBALS, 0x7000000d)
AARCH64_DYNAMIC_TAG(AARCH64_MEMTAG_GLOBALSSZ, 0x7000000f)

// Hexagon specific dynamic table entries
HEXAGON_DYNAMIC_TAG(HEXAGON_SYMSZ, 0x70000000)
HEXAGON_DYNAMIC_TAG(HEXAGON_VER, 0x70000001)
HEXAGON_DYNAMIC_TAG(HEXAGON_PLT, 0x70000002)

// Mips specific dynamic table entry tags.

MIPS_DYNAMIC_TAG(MIPS_RLD_VERSION, 0x70000001)  // 32 bit version number for
                                                // runtime linker interface.
MIPS_DYNAMIC_TAG(MIPS_TIME_STAMP, 0x70000002)   // Time stamp.
MIPS_DYNAMIC_TAG(MIPS_ICHECKSUM, 0x70000003)    // Checksum of external strings
                                                // and common sizes.
MIPS_DYNAMIC_TAG(MIPS_IVERSION, 0x70000004)     // Index of version string
                                                // in string table.
MIPS_DYNAMIC_TAG(MIPS_FLAGS, 0x70000005)        // 32 bits of flags.
MIPS_DYNAMIC_TAG(MIPS_BASE_ADDRESS, 0x70000006) // Base address of the segment.
MIPS_DYNAMIC_TAG(MIPS_MSYM, 0x70000007)         // Address of .msym section.
MIPS_DYNAMIC_TAG(MIPS_CONFLICT, 0x70000008)     // Address of .conflict section.
MIPS_DYNAMIC_TAG(MIPS_LIBLIST, 0x70000009)      // Address of .liblist section.
MIPS_DYNAMIC_TAG(MIPS_LOCAL_GOTNO, 0x7000000a)  // Number of local global offset
                                                // table entries.
MIPS_DYNAMIC_TAG(MIPS_CONFLICTNO, 0x7000000b)   // Number of entries
                                                // in the .conflict section.
MIPS_DYNAMIC_TAG(MIPS_LIBLISTNO, 0x70000010)    // Number of entries
                                                // in the .liblist section.
MIPS_DYNAMIC_TAG(MIPS_SYMTABNO, 0x70000011)     // Number of entries
                                                // in the .dynsym section.
MIPS_DYNAMIC_TAG(MIPS_UNREFEXTNO, 0x70000012)   // Index of first external dynamic
                                                // symbol not referenced locally.
MIPS_DYNAMIC_TAG(MIPS_GOTSYM, 0x70000013)       // Index of first dynamic symbol
                                                // in global offset table.
MIPS_DYNAMIC_TAG(MIPS_HIPAGENO, 0x70000014)     // Number of page table entries
                                                // in global offset table.
MIPS_DYNAMIC_TAG(MIPS_RLD_MAP, 0x70000016)      // Address of run time loader map
                                                // used for debugging.
MIPS_DYNAMIC_TAG(MIPS_DELTA_CLASS, 0x70000017)    // Delta C++ class definition.
MIPS_DYNAMIC_TAG(MIPS_DELTA_CLASS_NO, 0x70000018) // Number of entries
                                                  // in DT_MIPS_DELTA_CLASS.
MIPS_DYNAMIC_TAG(MIPS_DELTA_INSTANCE, 0x70000019) // Delta C++ class instances.
MIPS_DYNAMIC_TAG(MIPS_DELTA_INSTANCE_NO, 0x7000001A) // Number of entries
                                                     // in DT_MIPS_DELTA_INSTANCE.
MIPS_DYNAMIC_TAG(MIPS_DELTA_RELOC, 0x7000001B)       // Delta relocations.
MIPS_DYNAMIC_TAG(MIPS_DELTA_RELOC_NO, 0x7000001C)    // Number of entries
                                                     // in DT_MIPS_DELTA_RELOC.
MIPS_DYNAMIC_TAG(MIPS_DELTA_SYM, 0x7000001D)         // Delta symbols that Delta
                                                     // relocations refer to.
MIPS_DYNAMIC_TAG(MIPS_DELTA_SYM_NO, 0x7000001E)      // Number of entries
                                                     // in DT_MIPS_DELTA_SYM.
MIPS_DYNAMIC_TAG(MIPS_DELTA_CLASSSYM, 0x70000020)    // Delta symbols that hold
                                                     // class declarations.
MIPS_DYNAMIC_TAG(MIPS_DELTA_CLASSSYM_NO, 0x70000021) // Number of entries
                                                     // in DT_MIPS_DELTA_CLASSSYM.

MIPS_DYNAMIC_TAG(MIPS_CXX_FLAGS, 0x70000022)         // Flags indicating information
                                                     // about C++ flavor.
MIPS_DYNAMIC_TAG(MIPS_PIXIE_INIT, 0x70000023)        // Pixie information.
MIPS_DYNAMIC_TAG(MIPS_SYMBOL_LIB, 0x70000024)        // Address of .MIPS.symlib
MIPS_DYNAMIC_TAG(MIPS_LOCALPAGE_GOTIDX, 0x70000025)  // The GOT index of the first PTE
                                                     // for a segment
MIPS_DYNAMIC_TAG(MIPS_LOCAL_GOTIDX, 0x70000026)      // The GOT index of the first PTE
                                                     // for a local symbol
MIPS_DYNAMIC_TAG(MIPS_HIDDEN_GOTIDX, 0x70000027)     // The GOT index of the first PTE
                                                     // for a hidden symbol
MIPS_DYNAMIC_TAG(MIPS_PROTECTED_GOTIDX, 0x70000028)  // The GOT index of the first PTE
                                                        // for a protected symbol
MIPS_DYNAMIC_TAG(MIPS_OPTIONS, 0x70000029)               // Address of `.MIPS.options'.
MIPS_DYNAMIC_TAG(MIPS_INTERFACE, 0x7000002A)             // Address of `.interface'.
MIPS_DYNAMIC_TAG(MIPS_DYNSTR_ALIGN, 0x7000002B)          // Unknown.
MIPS_DYNAMIC_TAG(MIPS_INTERFACE_SIZE, 0x7000002C)        // Size of the .interface section.
MIPS_DYNAMIC_TAG(MIPS_RLD_TEXT_RESOLVE_ADDR, 0x7000002D) // Size of rld_text_resolve
                                                         // function stored in the GOT.
MIPS_DYNAMIC_TAG(MIPS_PERF_SUFFIX, 0x7000002E)  // Default suffix of DSO to be added
                                                // by rld on dlopen() calls.
MIPS_DYNAMIC_TAG(MIPS_COMPACT_SIZE, 0x7000002F) // Size of compact relocation
                                                // section (O32).
MIPS_DYNAMIC_TAG(MIPS_GP_VALUE, 0x70000030)     // GP value for auxiliary GOTs.
MIPS_DYNAMIC_TAG(MIPS_AUX_DYNAMIC, 0x70000031)  // Address of auxiliary .dynamic.
MIPS_DYNAMIC_TAG(MIPS_PLTGOT, 0x70000032)       // Address of the base of the PLTGOT.
MIPS_DYNAMIC_TAG(MIPS_RWPLT, 0x70000034)        // Points to the base
                                                // of a writable PLT.
MIPS_DYNAMIC_TAG(MIPS_RLD_MAP_REL, 0x70000035)  // Relative offset of run time loader
                                                // map, used for debugging.
MIPS_DYNAMIC_TAG(MIPS_XHASH, 0x70000036)        // GNU-style hash table with xlat.

// PPC specific dynamic table entries.
PPC_DYNAMIC_TAG(PPC_GOT, 0x70000000) // Uses Secure PLT ABI.
PPC_DYNAMIC_TAG(PPC_OPT, 0x70000001) // Has TLS optimization.

// PPC64 specific dynamic table entries.
PPC64_DYNAMIC_TAG(PPC64_GLINK, 0x70000000) // Address of 32 bytes before the
                                           // first glink lazy resolver stub.
PPC64_DYNAMIC_TAG(PPC64_OPT, 0x70000003) // Flags to control optimizations
                                         // for TLS and multiple TOCs.

// RISC-V specific dynamic array tags.
RISCV_DYNAMIC_TAG(RISCV_VARIANT_CC, 0x70000001)

// Sun machine-independent extensions.
DYNAMIC_TAG(AUXILIARY, 0x7FFFFFFD) // Shared object to load before self
DYNAMIC_TAG(USED, 0x7FFFFFFE)      // Same as DT_NEEDED
DYNAMIC_TAG(FILTER, 0x7FFFFFFF)    // Shared object to get values from


#ifdef DYNAMIC_TAG_MARKER_DEFINED
#undef DYNAMIC_TAG_MARKER
#undef DYNAMIC_TAG_MARKER_DEFINED
#endif
#ifdef AARCH64_DYNAMIC_TAG_DEFINED
#undef AARCH64_DYNAMIC_TAG
#undef AARCH64_DYNAMIC_TAG_DEFINED
#endif
#ifdef MIPS_DYNAMIC_TAG_DEFINED
#undef MIPS_DYNAMIC_TAG
#undef MIPS_DYNAMIC_TAG_DEFINED
#endif
#ifdef HEXAGON_DYNAMIC_TAG_DEFINED
#undef HEXAGON_DYNAMIC_TAG
#undef HEXAGON_DYNAMIC_TAG_DEFINED
#endif
#ifdef PPC_DYNAMIC_TAG_DEFINED
#undef PPC_DYNAMIC_TAG
#undef PPC_DYNAMIC_TAG_DEFINED
#endif
#ifdef PPC64_DYNAMIC_TAG_DEFINED
#undef PPC64_DYNAMIC_TAG
#undef PPC64_DYNAMIC_TAG_DEFINED
#endif
#ifdef RISCV_DYNAMIC_TAG_DEFINED
#undef RISCV_DYNAMIC_TAG
#undef RISCV_DYNAMIC_TAG_DEFINED
#endif
PKhwFZ�,�VJ2J2BinaryFormat/Wasm.hnu�[���//===- Wasm.h - Wasm object file format -------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines manifest constants for the wasm object file format.
// See: https://github.com/WebAssembly/design/blob/main/BinaryEncoding.md
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_BINARYFORMAT_WASM_H
#define LLVM_BINARYFORMAT_WASM_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include <optional>

namespace llvm {
namespace wasm {

// Object file magic string.
const char WasmMagic[] = {'\0', 'a', 's', 'm'};
// Wasm binary format version
const uint32_t WasmVersion = 0x1;
// Wasm linking metadata version
const uint32_t WasmMetadataVersion = 0x2;
// Wasm uses a 64k page size
const uint32_t WasmPageSize = 65536;

struct WasmObjectHeader {
  StringRef Magic;
  uint32_t Version;
};

struct WasmDylinkImportInfo {
  StringRef Module;
  StringRef Field;
  uint32_t Flags;
};

struct WasmDylinkExportInfo {
  StringRef Name;
  uint32_t Flags;
};

struct WasmDylinkInfo {
  uint32_t MemorySize; // Memory size in bytes
  uint32_t MemoryAlignment;  // P2 alignment of memory
  uint32_t TableSize;  // Table size in elements
  uint32_t TableAlignment;  // P2 alignment of table
  std::vector<StringRef> Needed; // Shared library dependencies
  std::vector<WasmDylinkImportInfo> ImportInfo;
  std::vector<WasmDylinkExportInfo> ExportInfo;
};

struct WasmProducerInfo {
  std::vector<std::pair<std::string, std::string>> Languages;
  std::vector<std::pair<std::string, std::string>> Tools;
  std::vector<std::pair<std::string, std::string>> SDKs;
};

struct WasmFeatureEntry {
  uint8_t Prefix;
  std::string Name;
};

struct WasmExport {
  StringRef Name;
  uint8_t Kind;
  uint32_t Index;
};

struct WasmLimits {
  uint8_t Flags;
  uint64_t Minimum;
  uint64_t Maximum;
};

struct WasmTableType {
  uint8_t ElemType;
  WasmLimits Limits;
};

struct WasmTable {
  uint32_t Index;
  WasmTableType Type;
  StringRef SymbolName; // from the "linking" section
};

struct WasmInitExprMVP {
  uint8_t Opcode;
  union {
    int32_t Int32;
    int64_t Int64;
    uint32_t Float32;
    uint64_t Float64;
    uint32_t Global;
  } Value;
};

struct WasmInitExpr {
  uint8_t Extended; // Set to non-zero if extended const is used (i.e. more than
                    // one instruction)
  WasmInitExprMVP Inst;
  ArrayRef<uint8_t> Body;
};

struct WasmGlobalType {
  uint8_t Type;
  bool Mutable;
};

struct WasmGlobal {
  uint32_t Index;
  WasmGlobalType Type;
  WasmInitExpr InitExpr;
  StringRef SymbolName; // from the "linking" section
};

struct WasmTag {
  uint32_t Index;
  uint32_t SigIndex;
  StringRef SymbolName; // from the "linking" section
};

struct WasmImport {
  StringRef Module;
  StringRef Field;
  uint8_t Kind;
  union {
    uint32_t SigIndex;
    WasmGlobalType Global;
    WasmTableType Table;
    WasmLimits Memory;
  };
};

struct WasmLocalDecl {
  uint8_t Type;
  uint32_t Count;
};

struct WasmFunction {
  uint32_t Index;
  uint32_t SigIndex;
  std::vector<WasmLocalDecl> Locals;
  ArrayRef<uint8_t> Body;
  uint32_t CodeSectionOffset;
  uint32_t Size;
  uint32_t CodeOffset;  // start of Locals and Body
  std::optional<StringRef> ExportName; // from the "export" section
  StringRef SymbolName; // from the "linking" section
  StringRef DebugName;  // from the "name" section
  uint32_t Comdat;      // from the "comdat info" section
};

struct WasmDataSegment {
  uint32_t InitFlags;
  // Present if InitFlags & WASM_DATA_SEGMENT_HAS_MEMINDEX.
  uint32_t MemoryIndex;
  // Present if InitFlags & WASM_DATA_SEGMENT_IS_PASSIVE == 0.
  WasmInitExpr Offset;

  ArrayRef<uint8_t> Content;
  StringRef Name; // from the "segment info" section
  uint32_t Alignment;
  uint32_t LinkingFlags;
  uint32_t Comdat; // from the "comdat info" section
};

struct WasmElemSegment {
  uint32_t Flags;
  uint32_t TableNumber;
  uint8_t ElemKind;
  WasmInitExpr Offset;
  std::vector<uint32_t> Functions;
};

// Represents the location of a Wasm data symbol within a WasmDataSegment, as
// the index of the segment, and the offset and size within the segment.
struct WasmDataReference {
  uint32_t Segment;
  uint64_t Offset;
  uint64_t Size;
};

struct WasmRelocation {
  uint8_t Type;    // The type of the relocation.
  uint32_t Index;  // Index into either symbol or type index space.
  uint64_t Offset; // Offset from the start of the section.
  int64_t Addend;  // A value to add to the symbol.
};

struct WasmInitFunc {
  uint32_t Priority;
  uint32_t Symbol;
};

struct WasmSymbolInfo {
  StringRef Name;
  uint8_t Kind;
  uint32_t Flags;
  // For undefined symbols the module of the import
  std::optional<StringRef> ImportModule;
  // For undefined symbols the name of the import
  std::optional<StringRef> ImportName;
  // For symbols to be exported from the final module
  std::optional<StringRef> ExportName;
  union {
    // For function, table, or global symbols, the index in function, table, or
    // global index space.
    uint32_t ElementIndex;
    // For a data symbols, the address of the data relative to segment.
    WasmDataReference DataRef;
  };
};

enum class NameType {
  FUNCTION,
  GLOBAL,
  DATA_SEGMENT,
};

struct WasmDebugName {
  NameType Type;
  uint32_t Index;
  StringRef Name;
};

struct WasmLinkingData {
  uint32_t Version;
  std::vector<WasmInitFunc> InitFunctions;
  std::vector<StringRef> Comdats;
  std::vector<WasmSymbolInfo> SymbolTable;
};

enum : unsigned {
  WASM_SEC_CUSTOM = 0,     // Custom / User-defined section
  WASM_SEC_TYPE = 1,       // Function signature declarations
  WASM_SEC_IMPORT = 2,     // Import declarations
  WASM_SEC_FUNCTION = 3,   // Function declarations
  WASM_SEC_TABLE = 4,      // Indirect function table and other tables
  WASM_SEC_MEMORY = 5,     // Memory attributes
  WASM_SEC_GLOBAL = 6,     // Global declarations
  WASM_SEC_EXPORT = 7,     // Exports
  WASM_SEC_START = 8,      // Start function declaration
  WASM_SEC_ELEM = 9,       // Elements section
  WASM_SEC_CODE = 10,      // Function bodies (code)
  WASM_SEC_DATA = 11,      // Data segments
  WASM_SEC_DATACOUNT = 12, // Data segment count
  WASM_SEC_TAG = 13,       // Tag declarations
  WASM_SEC_LAST_KNOWN = WASM_SEC_TAG,
};

// Type immediate encodings used in various contexts.
enum : unsigned {
  WASM_TYPE_I32 = 0x7F,
  WASM_TYPE_I64 = 0x7E,
  WASM_TYPE_F32 = 0x7D,
  WASM_TYPE_F64 = 0x7C,
  WASM_TYPE_V128 = 0x7B,
  WASM_TYPE_FUNCREF = 0x70,
  WASM_TYPE_EXTERNREF = 0x6F,
  WASM_TYPE_FUNC = 0x60,
  WASM_TYPE_NORESULT = 0x40, // for blocks with no result values
};

// Kinds of externals (for imports and exports).
enum : unsigned {
  WASM_EXTERNAL_FUNCTION = 0x0,
  WASM_EXTERNAL_TABLE = 0x1,
  WASM_EXTERNAL_MEMORY = 0x2,
  WASM_EXTERNAL_GLOBAL = 0x3,
  WASM_EXTERNAL_TAG = 0x4,
};

// Opcodes used in initializer expressions.
enum : unsigned {
  WASM_OPCODE_END = 0x0b,
  WASM_OPCODE_CALL = 0x10,
  WASM_OPCODE_LOCAL_GET = 0x20,
  WASM_OPCODE_LOCAL_SET = 0x21,
  WASM_OPCODE_LOCAL_TEE = 0x22,
  WASM_OPCODE_GLOBAL_GET = 0x23,
  WASM_OPCODE_GLOBAL_SET = 0x24,
  WASM_OPCODE_I32_STORE = 0x36,
  WASM_OPCODE_I64_STORE = 0x37,
  WASM_OPCODE_I32_CONST = 0x41,
  WASM_OPCODE_I64_CONST = 0x42,
  WASM_OPCODE_F32_CONST = 0x43,
  WASM_OPCODE_F64_CONST = 0x44,
  WASM_OPCODE_I32_ADD = 0x6a,
  WASM_OPCODE_I32_SUB = 0x6b,
  WASM_OPCODE_I32_MUL = 0x6c,
  WASM_OPCODE_I64_ADD = 0x7c,
  WASM_OPCODE_I64_SUB = 0x7d,
  WASM_OPCODE_I64_MUL = 0x7e,
  WASM_OPCODE_REF_NULL = 0xd0,
};

// Opcodes used in synthetic functions.
enum : unsigned {
  WASM_OPCODE_BLOCK = 0x02,
  WASM_OPCODE_BR = 0x0c,
  WASM_OPCODE_BR_TABLE = 0x0e,
  WASM_OPCODE_RETURN = 0x0f,
  WASM_OPCODE_DROP = 0x1a,
  WASM_OPCODE_MISC_PREFIX = 0xfc,
  WASM_OPCODE_MEMORY_INIT = 0x08,
  WASM_OPCODE_MEMORY_FILL = 0x0b,
  WASM_OPCODE_DATA_DROP = 0x09,
  WASM_OPCODE_ATOMICS_PREFIX = 0xfe,
  WASM_OPCODE_ATOMIC_NOTIFY = 0x00,
  WASM_OPCODE_I32_ATOMIC_WAIT = 0x01,
  WASM_OPCODE_I32_ATOMIC_STORE = 0x17,
  WASM_OPCODE_I32_RMW_CMPXCHG = 0x48,
};

enum : unsigned {
  WASM_LIMITS_FLAG_NONE = 0x0,
  WASM_LIMITS_FLAG_HAS_MAX = 0x1,
  WASM_LIMITS_FLAG_IS_SHARED = 0x2,
  WASM_LIMITS_FLAG_IS_64 = 0x4,
};

enum : unsigned {
  WASM_DATA_SEGMENT_IS_PASSIVE = 0x01,
  WASM_DATA_SEGMENT_HAS_MEMINDEX = 0x02,
};

enum : unsigned {
  WASM_ELEM_SEGMENT_IS_PASSIVE = 0x01,
  WASM_ELEM_SEGMENT_HAS_TABLE_NUMBER = 0x02,
  WASM_ELEM_SEGMENT_HAS_INIT_EXPRS = 0x04,
};
const unsigned WASM_ELEM_SEGMENT_MASK_HAS_ELEM_KIND = 0x3;

// Feature policy prefixes used in the custom "target_features" section
enum : uint8_t {
  WASM_FEATURE_PREFIX_USED = '+',
  WASM_FEATURE_PREFIX_REQUIRED = '=',
  WASM_FEATURE_PREFIX_DISALLOWED = '-',
};

// Kind codes used in the custom "name" section
enum : unsigned {
  WASM_NAMES_FUNCTION = 1,
  WASM_NAMES_LOCAL = 2,
  WASM_NAMES_GLOBAL = 7,
  WASM_NAMES_DATA_SEGMENT = 9,
};

// Kind codes used in the custom "linking" section
enum : unsigned {
  WASM_SEGMENT_INFO = 0x5,
  WASM_INIT_FUNCS = 0x6,
  WASM_COMDAT_INFO = 0x7,
  WASM_SYMBOL_TABLE = 0x8,
};

// Kind codes used in the custom "dylink" section
enum : unsigned {
  WASM_DYLINK_MEM_INFO = 0x1,
  WASM_DYLINK_NEEDED = 0x2,
  WASM_DYLINK_EXPORT_INFO = 0x3,
  WASM_DYLINK_IMPORT_INFO = 0x4,
};

// Kind codes used in the custom "linking" section in the WASM_COMDAT_INFO
enum : unsigned {
  WASM_COMDAT_DATA = 0x0,
  WASM_COMDAT_FUNCTION = 0x1,
  // GLOBAL, TAG, and TABLE are in here but LLVM doesn't use them yet.
  WASM_COMDAT_SECTION = 0x5,
};

// Kind codes used in the custom "linking" section in the WASM_SYMBOL_TABLE
enum WasmSymbolType : unsigned {
  WASM_SYMBOL_TYPE_FUNCTION = 0x0,
  WASM_SYMBOL_TYPE_DATA = 0x1,
  WASM_SYMBOL_TYPE_GLOBAL = 0x2,
  WASM_SYMBOL_TYPE_SECTION = 0x3,
  WASM_SYMBOL_TYPE_TAG = 0x4,
  WASM_SYMBOL_TYPE_TABLE = 0x5,
};

enum WasmSegmentFlag : unsigned {
  WASM_SEG_FLAG_STRINGS = 0x1,
  WASM_SEG_FLAG_TLS = 0x2,
};

// Kinds of tag attributes.
enum WasmTagAttribute : uint8_t {
  WASM_TAG_ATTRIBUTE_EXCEPTION = 0x0,
};

const unsigned WASM_SYMBOL_BINDING_MASK = 0x3;
const unsigned WASM_SYMBOL_VISIBILITY_MASK = 0xc;

const unsigned WASM_SYMBOL_BINDING_GLOBAL = 0x0;
const unsigned WASM_SYMBOL_BINDING_WEAK = 0x1;
const unsigned WASM_SYMBOL_BINDING_LOCAL = 0x2;
const unsigned WASM_SYMBOL_VISIBILITY_DEFAULT = 0x0;
const unsigned WASM_SYMBOL_VISIBILITY_HIDDEN = 0x4;
const unsigned WASM_SYMBOL_UNDEFINED = 0x10;
const unsigned WASM_SYMBOL_EXPORTED = 0x20;
const unsigned WASM_SYMBOL_EXPLICIT_NAME = 0x40;
const unsigned WASM_SYMBOL_NO_STRIP = 0x80;
const unsigned WASM_SYMBOL_TLS = 0x100;

#define WASM_RELOC(name, value) name = value,

enum : unsigned {
#include "WasmRelocs.def"
};

#undef WASM_RELOC

// Subset of types that a value can have
enum class ValType {
  I32 = WASM_TYPE_I32,
  I64 = WASM_TYPE_I64,
  F32 = WASM_TYPE_F32,
  F64 = WASM_TYPE_F64,
  V128 = WASM_TYPE_V128,
  FUNCREF = WASM_TYPE_FUNCREF,
  EXTERNREF = WASM_TYPE_EXTERNREF,
};

struct WasmSignature {
  SmallVector<ValType, 1> Returns;
  SmallVector<ValType, 4> Params;
  // Support empty and tombstone instances, needed by DenseMap.
  enum { Plain, Empty, Tombstone } State = Plain;

  WasmSignature(SmallVector<ValType, 1> &&InReturns,
                SmallVector<ValType, 4> &&InParams)
      : Returns(InReturns), Params(InParams) {}
  WasmSignature() = default;
};

// Useful comparison operators
inline bool operator==(const WasmSignature &LHS, const WasmSignature &RHS) {
  return LHS.State == RHS.State && LHS.Returns == RHS.Returns &&
         LHS.Params == RHS.Params;
}

inline bool operator!=(const WasmSignature &LHS, const WasmSignature &RHS) {
  return !(LHS == RHS);
}

inline bool operator==(const WasmGlobalType &LHS, const WasmGlobalType &RHS) {
  return LHS.Type == RHS.Type && LHS.Mutable == RHS.Mutable;
}

inline bool operator!=(const WasmGlobalType &LHS, const WasmGlobalType &RHS) {
  return !(LHS == RHS);
}

inline bool operator==(const WasmLimits &LHS, const WasmLimits &RHS) {
  return LHS.Flags == RHS.Flags && LHS.Minimum == RHS.Minimum &&
         (LHS.Flags & WASM_LIMITS_FLAG_HAS_MAX ? LHS.Maximum == RHS.Maximum
                                               : true);
}

inline bool operator==(const WasmTableType &LHS, const WasmTableType &RHS) {
  return LHS.ElemType == RHS.ElemType && LHS.Limits == RHS.Limits;
}

llvm::StringRef toString(WasmSymbolType type);
llvm::StringRef relocTypetoString(uint32_t type);
llvm::StringRef sectionTypeToString(uint32_t type);
bool relocTypeHasAddend(uint32_t type);

} // end namespace wasm
} // end namespace llvm

#endif
PKhwFZ+��i�iBinaryFormat/Dwarf.hnu�[���//===-- llvm/BinaryFormat/Dwarf.h ---Dwarf Constants-------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file
/// This file contains constants used for implementing Dwarf
/// debug support.
///
/// For details on the Dwarf specfication see the latest DWARF Debugging
/// Information Format standard document on http://www.dwarfstd.org. This
/// file often includes support for non-released standard features.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_BINARYFORMAT_DWARF_H
#define LLVM_BINARYFORMAT_DWARF_H

#include "llvm/Support/Compiler.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/FormatVariadicDetails.h"
#include "llvm/TargetParser/Triple.h"

#include <limits>

namespace llvm {
class StringRef;

namespace dwarf {

//===----------------------------------------------------------------------===//
// DWARF constants as gleaned from the DWARF Debugging Information Format V.5
// reference manual http://www.dwarfstd.org/.
//

// Do not mix the following two enumerations sets.  DW_TAG_invalid changes the
// enumeration base type.

enum LLVMConstants : uint32_t {
  /// LLVM mock tags (see also llvm/BinaryFormat/Dwarf.def).
  /// \{
  DW_TAG_invalid = ~0U,        ///< Tag for invalid results.
  DW_VIRTUALITY_invalid = ~0U, ///< Virtuality for invalid results.
  DW_MACINFO_invalid = ~0U,    ///< Macinfo type for invalid results.
  /// \}

  /// Special values for an initial length field.
  /// \{
  DW_LENGTH_lo_reserved = 0xfffffff0, ///< Lower bound of the reserved range.
  DW_LENGTH_DWARF64 = 0xffffffff,     ///< Indicator of 64-bit DWARF format.
  DW_LENGTH_hi_reserved = 0xffffffff, ///< Upper bound of the reserved range.
  /// \}

  /// Other constants.
  /// \{
  DWARF_VERSION = 4,       ///< Default dwarf version we output.
  DW_PUBTYPES_VERSION = 2, ///< Section version number for .debug_pubtypes.
  DW_PUBNAMES_VERSION = 2, ///< Section version number for .debug_pubnames.
  DW_ARANGES_VERSION = 2,  ///< Section version number for .debug_aranges.
  /// \}

  /// Identifiers we use to distinguish vendor extensions.
  /// \{
  DWARF_VENDOR_DWARF = 0, ///< Defined in v2 or later of the DWARF standard.
  DWARF_VENDOR_APPLE = 1,
  DWARF_VENDOR_BORLAND = 2,
  DWARF_VENDOR_GNU = 3,
  DWARF_VENDOR_GOOGLE = 4,
  DWARF_VENDOR_LLVM = 5,
  DWARF_VENDOR_MIPS = 6,
  DWARF_VENDOR_WASM = 7,
  DWARF_VENDOR_ALTIUM,
  DWARF_VENDOR_COMPAQ,
  DWARF_VENDOR_GHS,
  DWARF_VENDOR_GO,
  DWARF_VENDOR_HP,
  DWARF_VENDOR_IBM,
  DWARF_VENDOR_INTEL,
  DWARF_VENDOR_PGI,
  DWARF_VENDOR_SUN,
  DWARF_VENDOR_UPC,
  ///\}
};

/// Constants that define the DWARF format as 32 or 64 bit.
enum DwarfFormat : uint8_t { DWARF32, DWARF64 };

/// Special ID values that distinguish a CIE from a FDE in DWARF CFI.
/// Not inside an enum because a 64-bit value is needed.
/// @{
const uint32_t DW_CIE_ID = UINT32_MAX;
const uint64_t DW64_CIE_ID = UINT64_MAX;
/// @}

/// Identifier of an invalid DIE offset in the .debug_info section.
const uint32_t DW_INVALID_OFFSET = UINT32_MAX;

enum Tag : uint16_t {
#define HANDLE_DW_TAG(ID, NAME, VERSION, VENDOR, KIND) DW_TAG_##NAME = ID,
#include "llvm/BinaryFormat/Dwarf.def"
  DW_TAG_lo_user = 0x4080,
  DW_TAG_hi_user = 0xffff,
  DW_TAG_user_base = 0x1000 ///< Recommended base for user tags.
};

inline bool isType(Tag T) {
  switch (T) {
  default:
    return false;
#define HANDLE_DW_TAG(ID, NAME, VERSION, VENDOR, KIND)                         \
  case DW_TAG_##NAME:                                                          \
    return (KIND == DW_KIND_TYPE);
#include "llvm/BinaryFormat/Dwarf.def"
  }
}

/// Attributes.
enum Attribute : uint16_t {
#define HANDLE_DW_AT(ID, NAME, VERSION, VENDOR) DW_AT_##NAME = ID,
#include "llvm/BinaryFormat/Dwarf.def"
  DW_AT_lo_user = 0x2000,
  DW_AT_hi_user = 0x3fff,
};

enum Form : uint16_t {
#define HANDLE_DW_FORM(ID, NAME, VERSION, VENDOR) DW_FORM_##NAME = ID,
#include "llvm/BinaryFormat/Dwarf.def"
  DW_FORM_lo_user = 0x1f00, ///< Not specified by DWARF.
};

enum LocationAtom {
#define HANDLE_DW_OP(ID, NAME, VERSION, VENDOR) DW_OP_##NAME = ID,
#include "llvm/BinaryFormat/Dwarf.def"
  DW_OP_lo_user = 0xe0,
  DW_OP_hi_user = 0xff,
  DW_OP_LLVM_fragment = 0x1000,         ///< Only used in LLVM metadata.
  DW_OP_LLVM_convert = 0x1001,          ///< Only used in LLVM metadata.
  DW_OP_LLVM_tag_offset = 0x1002,       ///< Only used in LLVM metadata.
  DW_OP_LLVM_entry_value = 0x1003,      ///< Only used in LLVM metadata.
  DW_OP_LLVM_implicit_pointer = 0x1004, ///< Only used in LLVM metadata.
  DW_OP_LLVM_arg = 0x1005,              ///< Only used in LLVM metadata.
};

enum LlvmUserLocationAtom {
#define HANDLE_DW_OP_LLVM_USEROP(ID, NAME) DW_OP_LLVM_##NAME = ID,
#include "llvm/BinaryFormat/Dwarf.def"
};

enum TypeKind : uint8_t {
#define HANDLE_DW_ATE(ID, NAME, VERSION, VENDOR) DW_ATE_##NAME = ID,
#include "llvm/BinaryFormat/Dwarf.def"
  DW_ATE_lo_user = 0x80,
  DW_ATE_hi_user = 0xff
};

enum DecimalSignEncoding {
  // Decimal sign attribute values
  DW_DS_unsigned = 0x01,
  DW_DS_leading_overpunch = 0x02,
  DW_DS_trailing_overpunch = 0x03,
  DW_DS_leading_separate = 0x04,
  DW_DS_trailing_separate = 0x05
};

enum EndianityEncoding {
  // Endianity attribute values
#define HANDLE_DW_END(ID, NAME) DW_END_##NAME = ID,
#include "llvm/BinaryFormat/Dwarf.def"
  DW_END_lo_user = 0x40,
  DW_END_hi_user = 0xff
};

enum AccessAttribute {
  // Accessibility codes
  DW_ACCESS_public = 0x01,
  DW_ACCESS_protected = 0x02,
  DW_ACCESS_private = 0x03
};

enum VisibilityAttribute {
  // Visibility codes
  DW_VIS_local = 0x01,
  DW_VIS_exported = 0x02,
  DW_VIS_qualified = 0x03
};

enum VirtualityAttribute {
#define HANDLE_DW_VIRTUALITY(ID, NAME) DW_VIRTUALITY_##NAME = ID,
#include "llvm/BinaryFormat/Dwarf.def"
  DW_VIRTUALITY_max = 0x02
};

enum DefaultedMemberAttribute {
#define HANDLE_DW_DEFAULTED(ID, NAME) DW_DEFAULTED_##NAME = ID,
#include "llvm/BinaryFormat/Dwarf.def"
  DW_DEFAULTED_max = 0x02
};

enum SourceLanguage {
#define HANDLE_DW_LANG(ID, NAME, LOWER_BOUND, VERSION, VENDOR)                 \
  DW_LANG_##NAME = ID,
#include "llvm/BinaryFormat/Dwarf.def"
  DW_LANG_lo_user = 0x8000,
  DW_LANG_hi_user = 0xffff
};

inline bool isCPlusPlus(SourceLanguage S) {
  bool result = false;
  // Deliberately enumerate all the language options so we get a warning when
  // new language options are added (-Wswitch) that'll hopefully help keep this
  // switch up-to-date when new C++ versions are added.
  switch (S) {
  case DW_LANG_C_plus_plus:
  case DW_LANG_C_plus_plus_03:
  case DW_LANG_C_plus_plus_11:
  case DW_LANG_C_plus_plus_14:
  case DW_LANG_C_plus_plus_17:
  case DW_LANG_C_plus_plus_20:
    result = true;
    break;
  case DW_LANG_C89:
  case DW_LANG_C:
  case DW_LANG_Ada83:
  case DW_LANG_Cobol74:
  case DW_LANG_Cobol85:
  case DW_LANG_Fortran77:
  case DW_LANG_Fortran90:
  case DW_LANG_Pascal83:
  case DW_LANG_Modula2:
  case DW_LANG_Java:
  case DW_LANG_C99:
  case DW_LANG_Ada95:
  case DW_LANG_Fortran95:
  case DW_LANG_PLI:
  case DW_LANG_ObjC:
  case DW_LANG_ObjC_plus_plus:
  case DW_LANG_UPC:
  case DW_LANG_D:
  case DW_LANG_Python:
  case DW_LANG_OpenCL:
  case DW_LANG_Go:
  case DW_LANG_Modula3:
  case DW_LANG_Haskell:
  case DW_LANG_OCaml:
  case DW_LANG_Rust:
  case DW_LANG_C11:
  case DW_LANG_Swift:
  case DW_LANG_Julia:
  case DW_LANG_Dylan:
  case DW_LANG_Fortran03:
  case DW_LANG_Fortran08:
  case DW_LANG_RenderScript:
  case DW_LANG_BLISS:
  case DW_LANG_Mips_Assembler:
  case DW_LANG_GOOGLE_RenderScript:
  case DW_LANG_BORLAND_Delphi:
  case DW_LANG_lo_user:
  case DW_LANG_hi_user:
  case DW_LANG_Kotlin:
  case DW_LANG_Zig:
  case DW_LANG_Crystal:
  case DW_LANG_C17:
  case DW_LANG_Fortran18:
  case DW_LANG_Ada2005:
  case DW_LANG_Ada2012:
  case DW_LANG_Mojo:
    result = false;
    break;
  }

  return result;
}

inline bool isFortran(SourceLanguage S) {
  bool result = false;
  // Deliberately enumerate all the language options so we get a warning when
  // new language options are added (-Wswitch) that'll hopefully help keep this
  // switch up-to-date when new Fortran versions are added.
  switch (S) {
  case DW_LANG_Fortran77:
  case DW_LANG_Fortran90:
  case DW_LANG_Fortran95:
  case DW_LANG_Fortran03:
  case DW_LANG_Fortran08:
  case DW_LANG_Fortran18:
    result = true;
    break;
  case DW_LANG_C89:
  case DW_LANG_C:
  case DW_LANG_Ada83:
  case DW_LANG_C_plus_plus:
  case DW_LANG_Cobol74:
  case DW_LANG_Cobol85:
  case DW_LANG_Pascal83:
  case DW_LANG_Modula2:
  case DW_LANG_Java:
  case DW_LANG_C99:
  case DW_LANG_Ada95:
  case DW_LANG_PLI:
  case DW_LANG_ObjC:
  case DW_LANG_ObjC_plus_plus:
  case DW_LANG_UPC:
  case DW_LANG_D:
  case DW_LANG_Python:
  case DW_LANG_OpenCL:
  case DW_LANG_Go:
  case DW_LANG_Modula3:
  case DW_LANG_Haskell:
  case DW_LANG_C_plus_plus_03:
  case DW_LANG_C_plus_plus_11:
  case DW_LANG_OCaml:
  case DW_LANG_Rust:
  case DW_LANG_C11:
  case DW_LANG_Swift:
  case DW_LANG_Julia:
  case DW_LANG_Dylan:
  case DW_LANG_C_plus_plus_14:
  case DW_LANG_RenderScript:
  case DW_LANG_BLISS:
  case DW_LANG_Mips_Assembler:
  case DW_LANG_GOOGLE_RenderScript:
  case DW_LANG_BORLAND_Delphi:
  case DW_LANG_lo_user:
  case DW_LANG_hi_user:
  case DW_LANG_Kotlin:
  case DW_LANG_Zig:
  case DW_LANG_Crystal:
  case DW_LANG_C_plus_plus_17:
  case DW_LANG_C_plus_plus_20:
  case DW_LANG_C17:
  case DW_LANG_Ada2005:
  case DW_LANG_Ada2012:
  case DW_LANG_Mojo:
    result = false;
    break;
  }

  return result;
}

inline bool isC(SourceLanguage S) {
  // Deliberately enumerate all the language options so we get a warning when
  // new language options are added (-Wswitch) that'll hopefully help keep this
  // switch up-to-date when new C++ versions are added.
  switch (S) {
  case DW_LANG_C11:
  case DW_LANG_C17:
  case DW_LANG_C89:
  case DW_LANG_C99:
  case DW_LANG_C:
  case DW_LANG_ObjC:
    return true;
  case DW_LANG_C_plus_plus:
  case DW_LANG_C_plus_plus_03:
  case DW_LANG_C_plus_plus_11:
  case DW_LANG_C_plus_plus_14:
  case DW_LANG_C_plus_plus_17:
  case DW_LANG_C_plus_plus_20:
  case DW_LANG_Ada83:
  case DW_LANG_Cobol74:
  case DW_LANG_Cobol85:
  case DW_LANG_Fortran77:
  case DW_LANG_Fortran90:
  case DW_LANG_Pascal83:
  case DW_LANG_Modula2:
  case DW_LANG_Java:
  case DW_LANG_Ada95:
  case DW_LANG_Fortran95:
  case DW_LANG_PLI:
  case DW_LANG_ObjC_plus_plus:
  case DW_LANG_UPC:
  case DW_LANG_D:
  case DW_LANG_Python:
  case DW_LANG_OpenCL:
  case DW_LANG_Go:
  case DW_LANG_Modula3:
  case DW_LANG_Haskell:
  case DW_LANG_OCaml:
  case DW_LANG_Rust:
  case DW_LANG_Swift:
  case DW_LANG_Julia:
  case DW_LANG_Dylan:
  case DW_LANG_Fortran03:
  case DW_LANG_Fortran08:
  case DW_LANG_RenderScript:
  case DW_LANG_BLISS:
  case DW_LANG_Mips_Assembler:
  case DW_LANG_GOOGLE_RenderScript:
  case DW_LANG_BORLAND_Delphi:
  case DW_LANG_lo_user:
  case DW_LANG_hi_user:
  case DW_LANG_Kotlin:
  case DW_LANG_Zig:
  case DW_LANG_Crystal:
  case DW_LANG_Fortran18:
  case DW_LANG_Ada2005:
  case DW_LANG_Ada2012:
  case DW_LANG_Mojo:
    return false;
  }
  llvm_unreachable("Unknown language kind.");
}

inline TypeKind getArrayIndexTypeEncoding(SourceLanguage S) {
  return isFortran(S) ? DW_ATE_signed : DW_ATE_unsigned;
}

enum CaseSensitivity {
  // Identifier case codes
  DW_ID_case_sensitive = 0x00,
  DW_ID_up_case = 0x01,
  DW_ID_down_case = 0x02,
  DW_ID_case_insensitive = 0x03
};

enum CallingConvention {
// Calling convention codes
#define HANDLE_DW_CC(ID, NAME) DW_CC_##NAME = ID,
#include "llvm/BinaryFormat/Dwarf.def"
  DW_CC_lo_user = 0x40,
  DW_CC_hi_user = 0xff
};

enum InlineAttribute {
  // Inline codes
  DW_INL_not_inlined = 0x00,
  DW_INL_inlined = 0x01,
  DW_INL_declared_not_inlined = 0x02,
  DW_INL_declared_inlined = 0x03
};

enum ArrayDimensionOrdering {
  // Array ordering
  DW_ORD_row_major = 0x00,
  DW_ORD_col_major = 0x01
};

enum DiscriminantList {
  // Discriminant descriptor values
  DW_DSC_label = 0x00,
  DW_DSC_range = 0x01
};

/// Line Number Standard Opcode Encodings.
enum LineNumberOps : uint8_t {
#define HANDLE_DW_LNS(ID, NAME) DW_LNS_##NAME = ID,
#include "llvm/BinaryFormat/Dwarf.def"
};

/// Line Number Extended Opcode Encodings.
enum LineNumberExtendedOps {
#define HANDLE_DW_LNE(ID, NAME) DW_LNE_##NAME = ID,
#include "llvm/BinaryFormat/Dwarf.def"
  DW_LNE_lo_user = 0x80,
  DW_LNE_hi_user = 0xff
};

enum LineNumberEntryFormat {
#define HANDLE_DW_LNCT(ID, NAME) DW_LNCT_##NAME = ID,
#include "llvm/BinaryFormat/Dwarf.def"
  DW_LNCT_lo_user = 0x2000,
  DW_LNCT_hi_user = 0x3fff,
};

enum MacinfoRecordType {
  // Macinfo Type Encodings
  DW_MACINFO_define = 0x01,
  DW_MACINFO_undef = 0x02,
  DW_MACINFO_start_file = 0x03,
  DW_MACINFO_end_file = 0x04,
  DW_MACINFO_vendor_ext = 0xff
};

/// DWARF v5 macro information entry type encodings.
enum MacroEntryType {
#define HANDLE_DW_MACRO(ID, NAME) DW_MACRO_##NAME = ID,
#include "llvm/BinaryFormat/Dwarf.def"
  DW_MACRO_lo_user = 0xe0,
  DW_MACRO_hi_user = 0xff
};

/// GNU .debug_macro macro information entry type encodings.
enum GnuMacroEntryType {
#define HANDLE_DW_MACRO_GNU(ID, NAME) DW_MACRO_GNU_##NAME = ID,
#include "llvm/BinaryFormat/Dwarf.def"
  DW_MACRO_GNU_lo_user = 0xe0,
  DW_MACRO_GNU_hi_user = 0xff
};

/// DWARF v5 range list entry encoding values.
enum RnglistEntries {
#define HANDLE_DW_RLE(ID, NAME) DW_RLE_##NAME = ID,
#include "llvm/BinaryFormat/Dwarf.def"
};

/// DWARF v5 loc list entry encoding values.
enum LoclistEntries {
#define HANDLE_DW_LLE(ID, NAME) DW_LLE_##NAME = ID,
#include "llvm/BinaryFormat/Dwarf.def"
};

/// Call frame instruction encodings.
enum CallFrameInfo {
#define HANDLE_DW_CFA(ID, NAME) DW_CFA_##NAME = ID,
#define HANDLE_DW_CFA_PRED(ID, NAME, ARCH) DW_CFA_##NAME = ID,
#include "llvm/BinaryFormat/Dwarf.def"
  DW_CFA_extended = 0x00,

  DW_CFA_lo_user = 0x1c,
  DW_CFA_hi_user = 0x3f
};

enum Constants {
  // Children flag
  DW_CHILDREN_no = 0x00,
  DW_CHILDREN_yes = 0x01,

  DW_EH_PE_absptr = 0x00,
  DW_EH_PE_omit = 0xff,
  DW_EH_PE_uleb128 = 0x01,
  DW_EH_PE_udata2 = 0x02,
  DW_EH_PE_udata4 = 0x03,
  DW_EH_PE_udata8 = 0x04,
  DW_EH_PE_sleb128 = 0x09,
  DW_EH_PE_sdata2 = 0x0A,
  DW_EH_PE_sdata4 = 0x0B,
  DW_EH_PE_sdata8 = 0x0C,
  DW_EH_PE_signed = 0x08,
  DW_EH_PE_pcrel = 0x10,
  DW_EH_PE_textrel = 0x20,
  DW_EH_PE_datarel = 0x30,
  DW_EH_PE_funcrel = 0x40,
  DW_EH_PE_aligned = 0x50,
  DW_EH_PE_indirect = 0x80
};

/// Constants for the DW_APPLE_PROPERTY_attributes attribute.
/// Keep this list in sync with clang's DeclObjCCommon.h
/// ObjCPropertyAttribute::Kind!
enum ApplePropertyAttributes {
#define HANDLE_DW_APPLE_PROPERTY(ID, NAME) DW_APPLE_PROPERTY_##NAME = ID,
#include "llvm/BinaryFormat/Dwarf.def"
};

/// Constants for unit types in DWARF v5.
enum UnitType : unsigned char {
#define HANDLE_DW_UT(ID, NAME) DW_UT_##NAME = ID,
#include "llvm/BinaryFormat/Dwarf.def"
  DW_UT_lo_user = 0x80,
  DW_UT_hi_user = 0xff
};

enum Index {
#define HANDLE_DW_IDX(ID, NAME) DW_IDX_##NAME = ID,
#include "llvm/BinaryFormat/Dwarf.def"
  DW_IDX_lo_user = 0x2000,
  DW_IDX_hi_user = 0x3fff
};

inline bool isUnitType(uint8_t UnitType) {
  switch (UnitType) {
  case DW_UT_compile:
  case DW_UT_type:
  case DW_UT_partial:
  case DW_UT_skeleton:
  case DW_UT_split_compile:
  case DW_UT_split_type:
    return true;
  default:
    return false;
  }
}

inline bool isUnitType(dwarf::Tag T) {
  switch (T) {
  case DW_TAG_compile_unit:
  case DW_TAG_type_unit:
  case DW_TAG_partial_unit:
  case DW_TAG_skeleton_unit:
    return true;
  default:
    return false;
  }
}

// Constants for the DWARF v5 Accelerator Table Proposal
enum AcceleratorTable {
  // Data layout descriptors.
  DW_ATOM_null = 0u,       ///  Marker as the end of a list of atoms.
  DW_ATOM_die_offset = 1u, // DIE offset in the debug_info section.
  DW_ATOM_cu_offset = 2u, // Offset of the compile unit header that contains the
                          // item in question.
  DW_ATOM_die_tag = 3u,   // A tag entry.
  DW_ATOM_type_flags = 4u, // Set of flags for a type.

  DW_ATOM_type_type_flags = 5u, // Dsymutil type extension.
  DW_ATOM_qual_name_hash = 6u,  // Dsymutil qualified hash extension.

  // DW_ATOM_type_flags values.

  // Always set for C++, only set for ObjC if this is the @implementation for a
  // class.
  DW_FLAG_type_implementation = 2u,

  // Hash functions.

  // Daniel J. Bernstein hash.
  DW_hash_function_djb = 0u
};

// Constants for the GNU pubnames/pubtypes extensions supporting gdb index.
enum GDBIndexEntryKind {
  GIEK_NONE,
  GIEK_TYPE,
  GIEK_VARIABLE,
  GIEK_FUNCTION,
  GIEK_OTHER,
  GIEK_UNUSED5,
  GIEK_UNUSED6,
  GIEK_UNUSED7
};

enum GDBIndexEntryLinkage { GIEL_EXTERNAL, GIEL_STATIC };

/// \defgroup DwarfConstantsDumping Dwarf constants dumping functions
///
/// All these functions map their argument's value back to the
/// corresponding enumerator name or return an empty StringRef if the value
/// isn't known.
///
/// @{
StringRef TagString(unsigned Tag);
StringRef ChildrenString(unsigned Children);
StringRef AttributeString(unsigned Attribute);
StringRef FormEncodingString(unsigned Encoding);
StringRef OperationEncodingString(unsigned Encoding);
StringRef SubOperationEncodingString(unsigned OpEncoding,
                                     unsigned SubOpEncoding);
StringRef AttributeEncodingString(unsigned Encoding);
StringRef DecimalSignString(unsigned Sign);
StringRef EndianityString(unsigned Endian);
StringRef AccessibilityString(unsigned Access);
StringRef DefaultedMemberString(unsigned DefaultedEncodings);
StringRef VisibilityString(unsigned Visibility);
StringRef VirtualityString(unsigned Virtuality);
StringRef LanguageString(unsigned Language);
StringRef CaseString(unsigned Case);
StringRef ConventionString(unsigned Convention);
StringRef InlineCodeString(unsigned Code);
StringRef ArrayOrderString(unsigned Order);
StringRef LNStandardString(unsigned Standard);
StringRef LNExtendedString(unsigned Encoding);
StringRef MacinfoString(unsigned Encoding);
StringRef MacroString(unsigned Encoding);
StringRef GnuMacroString(unsigned Encoding);
StringRef RangeListEncodingString(unsigned Encoding);
StringRef LocListEncodingString(unsigned Encoding);
StringRef CallFrameString(unsigned Encoding, Triple::ArchType Arch);
StringRef ApplePropertyString(unsigned);
StringRef UnitTypeString(unsigned);
StringRef AtomTypeString(unsigned Atom);
StringRef GDBIndexEntryKindString(GDBIndexEntryKind Kind);
StringRef GDBIndexEntryLinkageString(GDBIndexEntryLinkage Linkage);
StringRef IndexString(unsigned Idx);
StringRef FormatString(DwarfFormat Format);
StringRef FormatString(bool IsDWARF64);
StringRef RLEString(unsigned RLE);
/// @}

/// \defgroup DwarfConstantsParsing Dwarf constants parsing functions
///
/// These functions map their strings back to the corresponding enumeration
/// value or return 0 if there is none, except for these exceptions:
///
/// \li \a getTag() returns \a DW_TAG_invalid on invalid input.
/// \li \a getVirtuality() returns \a DW_VIRTUALITY_invalid on invalid input.
/// \li \a getMacinfo() returns \a DW_MACINFO_invalid on invalid input.
///
/// @{
unsigned getTag(StringRef TagString);
unsigned getOperationEncoding(StringRef OperationEncodingString);
unsigned getSubOperationEncoding(unsigned OpEncoding,
                                 StringRef SubOperationEncodingString);
unsigned getVirtuality(StringRef VirtualityString);
unsigned getLanguage(StringRef LanguageString);
unsigned getCallingConvention(StringRef LanguageString);
unsigned getAttributeEncoding(StringRef EncodingString);
unsigned getMacinfo(StringRef MacinfoString);
unsigned getMacro(StringRef MacroString);
/// @}

/// \defgroup DwarfConstantsVersioning Dwarf version for constants
///
/// For constants defined by DWARF, returns the DWARF version when the constant
/// was first defined. For vendor extensions, if there is a version-related
/// policy for when to emit it, returns a version number for that policy.
/// Otherwise returns 0.
///
/// @{
unsigned TagVersion(Tag T);
unsigned AttributeVersion(Attribute A);
unsigned FormVersion(Form F);
unsigned OperationVersion(LocationAtom O);
unsigned AttributeEncodingVersion(TypeKind E);
unsigned LanguageVersion(SourceLanguage L);
/// @}

/// \defgroup DwarfConstantsVendor Dwarf "vendor" for constants
///
/// These functions return an identifier describing "who" defined the constant,
/// either the DWARF standard itself or the vendor who defined the extension.
///
/// @{
unsigned TagVendor(Tag T);
unsigned AttributeVendor(Attribute A);
unsigned FormVendor(Form F);
unsigned OperationVendor(LocationAtom O);
unsigned AttributeEncodingVendor(TypeKind E);
unsigned LanguageVendor(SourceLanguage L);
/// @}

std::optional<unsigned> LanguageLowerBound(SourceLanguage L);

/// The size of a reference determined by the DWARF 32/64-bit format.
inline uint8_t getDwarfOffsetByteSize(DwarfFormat Format) {
  switch (Format) {
  case DwarfFormat::DWARF32:
    return 4;
  case DwarfFormat::DWARF64:
    return 8;
  }
  llvm_unreachable("Invalid Format value");
}

/// A helper struct providing information about the byte size of DW_FORM
/// values that vary in size depending on the DWARF version, address byte
/// size, or DWARF32/DWARF64.
struct FormParams {
  uint16_t Version;
  uint8_t AddrSize;
  DwarfFormat Format;
  /// True if DWARF v2 output generally uses relocations for references
  /// to other .debug_* sections.
  bool DwarfUsesRelocationsAcrossSections = false;

  /// The definition of the size of form DW_FORM_ref_addr depends on the
  /// version. In DWARF v2 it's the size of an address; after that, it's the
  /// size of a reference.
  uint8_t getRefAddrByteSize() const {
    if (Version == 2)
      return AddrSize;
    return getDwarfOffsetByteSize();
  }

  /// The size of a reference is determined by the DWARF 32/64-bit format.
  uint8_t getDwarfOffsetByteSize() const {
    return dwarf::getDwarfOffsetByteSize(Format);
  }

  explicit operator bool() const { return Version && AddrSize; }
};

/// Get the byte size of the unit length field depending on the DWARF format.
inline uint8_t getUnitLengthFieldByteSize(DwarfFormat Format) {
  switch (Format) {
  case DwarfFormat::DWARF32:
    return 4;
  case DwarfFormat::DWARF64:
    return 12;
  }
  llvm_unreachable("Invalid Format value");
}

/// Get the fixed byte size for a given form.
///
/// If the form has a fixed byte size, then an Optional with a value will be
/// returned. If the form is always encoded using a variable length storage
/// format (ULEB or SLEB numbers or blocks) then std::nullopt will be returned.
///
/// \param Form DWARF form to get the fixed byte size for.
/// \param Params DWARF parameters to help interpret forms.
/// \returns std::optional<uint8_t> value with the fixed byte size or
/// std::nullopt if \p Form doesn't have a fixed byte size.
std::optional<uint8_t> getFixedFormByteSize(dwarf::Form Form,
                                            FormParams Params);

/// Tells whether the specified form is defined in the specified version,
/// or is an extension if extensions are allowed.
bool isValidFormForVersion(Form F, unsigned Version, bool ExtensionsOk = true);

/// Returns the symbolic string representing Val when used as a value
/// for attribute Attr.
StringRef AttributeValueString(uint16_t Attr, unsigned Val);

/// Returns the symbolic string representing Val when used as a value
/// for atom Atom.
StringRef AtomValueString(uint16_t Atom, unsigned Val);

/// Describes an entry of the various gnu_pub* debug sections.
///
/// The gnu_pub* kind looks like:
///
/// 0-3  reserved
/// 4-6  symbol kind
/// 7    0 == global, 1 == static
///
/// A gdb_index descriptor includes the above kind, shifted 24 bits up with the
/// offset of the cu within the debug_info section stored in those 24 bits.
struct PubIndexEntryDescriptor {
  GDBIndexEntryKind Kind;
  GDBIndexEntryLinkage Linkage;
  PubIndexEntryDescriptor(GDBIndexEntryKind Kind, GDBIndexEntryLinkage Linkage)
      : Kind(Kind), Linkage(Linkage) {}
  /* implicit */ PubIndexEntryDescriptor(GDBIndexEntryKind Kind)
      : Kind(Kind), Linkage(GIEL_EXTERNAL) {}
  explicit PubIndexEntryDescriptor(uint8_t Value)
      : Kind(
            static_cast<GDBIndexEntryKind>((Value & KIND_MASK) >> KIND_OFFSET)),
        Linkage(static_cast<GDBIndexEntryLinkage>((Value & LINKAGE_MASK) >>
                                                  LINKAGE_OFFSET)) {}
  uint8_t toBits() const {
    return Kind << KIND_OFFSET | Linkage << LINKAGE_OFFSET;
  }

private:
  enum {
    KIND_OFFSET = 4,
    KIND_MASK = 7 << KIND_OFFSET,
    LINKAGE_OFFSET = 7,
    LINKAGE_MASK = 1 << LINKAGE_OFFSET
  };
};

template <typename Enum> struct EnumTraits : public std::false_type {};

template <> struct EnumTraits<Attribute> : public std::true_type {
  static constexpr char Type[3] = "AT";
  static constexpr StringRef (*StringFn)(unsigned) = &AttributeString;
};

template <> struct EnumTraits<Form> : public std::true_type {
  static constexpr char Type[5] = "FORM";
  static constexpr StringRef (*StringFn)(unsigned) = &FormEncodingString;
};

template <> struct EnumTraits<Index> : public std::true_type {
  static constexpr char Type[4] = "IDX";
  static constexpr StringRef (*StringFn)(unsigned) = &IndexString;
};

template <> struct EnumTraits<Tag> : public std::true_type {
  static constexpr char Type[4] = "TAG";
  static constexpr StringRef (*StringFn)(unsigned) = &TagString;
};

template <> struct EnumTraits<LineNumberOps> : public std::true_type {
  static constexpr char Type[4] = "LNS";
  static constexpr StringRef (*StringFn)(unsigned) = &LNStandardString;
};

template <> struct EnumTraits<LocationAtom> : public std::true_type {
  static constexpr char Type[3] = "OP";
  static constexpr StringRef (*StringFn)(unsigned) = &OperationEncodingString;
};

inline uint64_t computeTombstoneAddress(uint8_t AddressByteSize) {
  return std::numeric_limits<uint64_t>::max() >> (8 - AddressByteSize) * 8;
}

} // End of namespace dwarf

/// Dwarf constants format_provider
///
/// Specialization of the format_provider template for dwarf enums. Unlike the
/// dumping functions above, these format unknown enumerator values as
/// DW_TYPE_unknown_1234 (e.g. DW_TAG_unknown_ffff).
template <typename Enum>
struct format_provider<Enum, std::enable_if_t<dwarf::EnumTraits<Enum>::value>> {
  static void format(const Enum &E, raw_ostream &OS, StringRef Style) {
    StringRef Str = dwarf::EnumTraits<Enum>::StringFn(E);
    if (Str.empty()) {
      OS << "DW_" << dwarf::EnumTraits<Enum>::Type << "_unknown_"
         << llvm::format("%x", E);
    } else
      OS << Str;
  }
};
} // End of namespace llvm

#endif
PKhwFZr�8{BinaryFormat/WasmRelocs.defnu�[���#ifndef WASM_RELOC
#error "WASM_RELOC must be defined"
#endif

WASM_RELOC(R_WASM_FUNCTION_INDEX_LEB,      0)
WASM_RELOC(R_WASM_TABLE_INDEX_SLEB,        1)
WASM_RELOC(R_WASM_TABLE_INDEX_I32,         2)
WASM_RELOC(R_WASM_MEMORY_ADDR_LEB,         3)
WASM_RELOC(R_WASM_MEMORY_ADDR_SLEB,        4)
WASM_RELOC(R_WASM_MEMORY_ADDR_I32,         5)
WASM_RELOC(R_WASM_TYPE_INDEX_LEB,          6)
WASM_RELOC(R_WASM_GLOBAL_INDEX_LEB,        7)
WASM_RELOC(R_WASM_FUNCTION_OFFSET_I32,     8)
WASM_RELOC(R_WASM_SECTION_OFFSET_I32,      9)
WASM_RELOC(R_WASM_TAG_INDEX_LEB,          10)
WASM_RELOC(R_WASM_MEMORY_ADDR_REL_SLEB,   11)
WASM_RELOC(R_WASM_TABLE_INDEX_REL_SLEB,   12)
WASM_RELOC(R_WASM_GLOBAL_INDEX_I32,       13)
WASM_RELOC(R_WASM_MEMORY_ADDR_LEB64,      14)
WASM_RELOC(R_WASM_MEMORY_ADDR_SLEB64,     15)
WASM_RELOC(R_WASM_MEMORY_ADDR_I64,        16)
WASM_RELOC(R_WASM_MEMORY_ADDR_REL_SLEB64, 17)
WASM_RELOC(R_WASM_TABLE_INDEX_SLEB64,     18)
WASM_RELOC(R_WASM_TABLE_INDEX_I64,        19)
WASM_RELOC(R_WASM_TABLE_NUMBER_LEB,       20)
WASM_RELOC(R_WASM_MEMORY_ADDR_TLS_SLEB,   21)
WASM_RELOC(R_WASM_FUNCTION_OFFSET_I64,    22)
WASM_RELOC(R_WASM_MEMORY_ADDR_LOCREL_I32, 23)
WASM_RELOC(R_WASM_TABLE_INDEX_REL_SLEB64, 24)
WASM_RELOC(R_WASM_MEMORY_ADDR_TLS_SLEB64, 25)
WASM_RELOC(R_WASM_FUNCTION_INDEX_I32,     26)
PKhwFZ�i�9#9#BinaryFormat/Minidump.hnu�[���//===- Minidump.h - Minidump constants and structures -----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This header constants and data structures pertaining to the Windows Minidump
// core file format.
//
// Reference:
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms679293(v=vs.85).aspx
// https://chromium.googlesource.com/breakpad/breakpad/
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_BINARYFORMAT_MINIDUMP_H
#define LLVM_BINARYFORMAT_MINIDUMP_H

#include "llvm/ADT/BitmaskEnum.h"
#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/Support/Endian.h"

namespace llvm {
namespace minidump {

LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE();

/// The minidump header is the first part of a minidump file. It identifies the
/// file as a minidump file, and gives the location of the stream directory.
struct Header {
  static constexpr uint32_t MagicSignature = 0x504d444d; // PMDM
  static constexpr uint16_t MagicVersion = 0xa793;

  support::ulittle32_t Signature;
  // The high 16 bits of version field are implementation specific. The low 16
  // bits should be MagicVersion.
  support::ulittle32_t Version;
  support::ulittle32_t NumberOfStreams;
  support::ulittle32_t StreamDirectoryRVA;
  support::ulittle32_t Checksum;
  support::ulittle32_t TimeDateStamp;
  support::ulittle64_t Flags;
};
static_assert(sizeof(Header) == 32);

/// The type of a minidump stream identifies its contents. Streams numbers after
/// LastReserved are for application-defined data streams.
enum class StreamType : uint32_t {
#define HANDLE_MDMP_STREAM_TYPE(CODE, NAME) NAME = CODE,
#include "llvm/BinaryFormat/MinidumpConstants.def"
  Unused = 0,
  LastReserved = 0x0000ffff,
};

/// Specifies the location (and size) of various objects in the minidump file.
/// The location is relative to the start of the file.
struct LocationDescriptor {
  support::ulittle32_t DataSize;
  support::ulittle32_t RVA;
};
static_assert(sizeof(LocationDescriptor) == 8);

/// Describes a single memory range (both its VM address and where to find it in
/// the file) of the process from which this minidump file was generated.
struct MemoryDescriptor {
  support::ulittle64_t StartOfMemoryRange;
  LocationDescriptor Memory;
};
static_assert(sizeof(MemoryDescriptor) == 16);

struct MemoryInfoListHeader {
  support::ulittle32_t SizeOfHeader;
  support::ulittle32_t SizeOfEntry;
  support::ulittle64_t NumberOfEntries;

  MemoryInfoListHeader() = default;
  MemoryInfoListHeader(uint32_t SizeOfHeader, uint32_t SizeOfEntry,
                       uint64_t NumberOfEntries)
      : SizeOfHeader(SizeOfHeader), SizeOfEntry(SizeOfEntry),
        NumberOfEntries(NumberOfEntries) {}
};
static_assert(sizeof(MemoryInfoListHeader) == 16);

enum class MemoryProtection : uint32_t {
#define HANDLE_MDMP_PROTECT(CODE, NAME, NATIVENAME) NAME = CODE,
#include "llvm/BinaryFormat/MinidumpConstants.def"
  LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/0xffffffffu),
};

enum class MemoryState : uint32_t {
#define HANDLE_MDMP_MEMSTATE(CODE, NAME, NATIVENAME) NAME = CODE,
#include "llvm/BinaryFormat/MinidumpConstants.def"
  LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/0xffffffffu),
};

enum class MemoryType : uint32_t {
#define HANDLE_MDMP_MEMTYPE(CODE, NAME, NATIVENAME) NAME = CODE,
#include "llvm/BinaryFormat/MinidumpConstants.def"
  LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/0xffffffffu),
};

struct MemoryInfo {
  support::ulittle64_t BaseAddress;
  support::ulittle64_t AllocationBase;
  support::little_t<MemoryProtection> AllocationProtect;
  support::ulittle32_t Reserved0;
  support::ulittle64_t RegionSize;
  support::little_t<MemoryState> State;
  support::little_t<MemoryProtection> Protect;
  support::little_t<MemoryType> Type;
  support::ulittle32_t Reserved1;
};
static_assert(sizeof(MemoryInfo) == 48);

/// Specifies the location and type of a single stream in the minidump file. The
/// minidump stream directory is an array of entries of this type, with its size
/// given by Header.NumberOfStreams.
struct Directory {
  support::little_t<StreamType> Type;
  LocationDescriptor Location;
};
static_assert(sizeof(Directory) == 12);

/// The processor architecture of the system that generated this minidump. Used
/// in the ProcessorArch field of the SystemInfo stream.
enum class ProcessorArchitecture : uint16_t {
#define HANDLE_MDMP_ARCH(CODE, NAME) NAME = CODE,
#include "llvm/BinaryFormat/MinidumpConstants.def"
};

/// The OS Platform of the system that generated this minidump. Used in the
/// PlatformId field of the SystemInfo stream.
enum class OSPlatform : uint32_t {
#define HANDLE_MDMP_PLATFORM(CODE, NAME) NAME = CODE,
#include "llvm/BinaryFormat/MinidumpConstants.def"
};

/// Detailed information about the processor of the system that generated this
/// minidump. Its interpretation depends on the ProcessorArchitecture enum.
union CPUInfo {
  struct X86Info {
    char VendorID[12];                        // cpuid 0: ebx, edx, ecx
    support::ulittle32_t VersionInfo;         // cpuid 1: eax
    support::ulittle32_t FeatureInfo;         // cpuid 1: edx
    support::ulittle32_t AMDExtendedFeatures; // cpuid 0x80000001, ebx
  } X86;
  struct ArmInfo {
    support::ulittle32_t CPUID;
    support::ulittle32_t ElfHWCaps; // linux specific, 0 otherwise
  } Arm;
  struct OtherInfo {
    uint8_t ProcessorFeatures[16];
  } Other;
};
static_assert(sizeof(CPUInfo) == 24);

/// The SystemInfo stream, containing various information about the system where
/// this minidump was generated.
struct SystemInfo {
  support::little_t<ProcessorArchitecture> ProcessorArch;
  support::ulittle16_t ProcessorLevel;
  support::ulittle16_t ProcessorRevision;

  uint8_t NumberOfProcessors;
  uint8_t ProductType;

  support::ulittle32_t MajorVersion;
  support::ulittle32_t MinorVersion;
  support::ulittle32_t BuildNumber;
  support::little_t<OSPlatform> PlatformId;
  support::ulittle32_t CSDVersionRVA;

  support::ulittle16_t SuiteMask;
  support::ulittle16_t Reserved;

  CPUInfo CPU;
};
static_assert(sizeof(SystemInfo) == 56);

struct VSFixedFileInfo {
  support::ulittle32_t Signature;
  support::ulittle32_t StructVersion;
  support::ulittle32_t FileVersionHigh;
  support::ulittle32_t FileVersionLow;
  support::ulittle32_t ProductVersionHigh;
  support::ulittle32_t ProductVersionLow;
  support::ulittle32_t FileFlagsMask;
  support::ulittle32_t FileFlags;
  support::ulittle32_t FileOS;
  support::ulittle32_t FileType;
  support::ulittle32_t FileSubtype;
  support::ulittle32_t FileDateHigh;
  support::ulittle32_t FileDateLow;
};
static_assert(sizeof(VSFixedFileInfo) == 52);

inline bool operator==(const VSFixedFileInfo &LHS, const VSFixedFileInfo &RHS) {
  return memcmp(&LHS, &RHS, sizeof(VSFixedFileInfo)) == 0;
}

struct Module {
  support::ulittle64_t BaseOfImage;
  support::ulittle32_t SizeOfImage;
  support::ulittle32_t Checksum;
  support::ulittle32_t TimeDateStamp;
  support::ulittle32_t ModuleNameRVA;
  VSFixedFileInfo VersionInfo;
  LocationDescriptor CvRecord;
  LocationDescriptor MiscRecord;
  support::ulittle64_t Reserved0;
  support::ulittle64_t Reserved1;
};
static_assert(sizeof(Module) == 108);

/// Describes a single thread in the minidump file. Part of the ThreadList
/// stream.
struct Thread {
  support::ulittle32_t ThreadId;
  support::ulittle32_t SuspendCount;
  support::ulittle32_t PriorityClass;
  support::ulittle32_t Priority;
  support::ulittle64_t EnvironmentBlock;
  MemoryDescriptor Stack;
  LocationDescriptor Context;
};
static_assert(sizeof(Thread) == 48);

struct Exception {
  static constexpr size_t MaxParameters = 15;

  support::ulittle32_t ExceptionCode;
  support::ulittle32_t ExceptionFlags;
  support::ulittle64_t ExceptionRecord;
  support::ulittle64_t ExceptionAddress;
  support::ulittle32_t NumberParameters;
  support::ulittle32_t UnusedAlignment;
  support::ulittle64_t ExceptionInformation[MaxParameters];
};
static_assert(sizeof(Exception) == 152);

struct ExceptionStream {
  support::ulittle32_t ThreadId;
  support::ulittle32_t UnusedAlignment;
  Exception ExceptionRecord;
  LocationDescriptor ThreadContext;
};
static_assert(sizeof(ExceptionStream) == 168);

} // namespace minidump

template <> struct DenseMapInfo<minidump::StreamType> {
  static minidump::StreamType getEmptyKey() { return minidump::StreamType(-1); }

  static minidump::StreamType getTombstoneKey() {
    return minidump::StreamType(-2);
  }

  static unsigned getHashValue(minidump::StreamType Val) {
    return DenseMapInfo<uint32_t>::getHashValue(static_cast<uint32_t>(Val));
  }

  static bool isEqual(minidump::StreamType LHS, minidump::StreamType RHS) {
    return LHS == RHS;
  }
};

} // namespace llvm

#endif // LLVM_BINARYFORMAT_MINIDUMP_H
PKhwFZ��zҔ���BinaryFormat/MachO.hnu�[���//===-- llvm/BinaryFormat/MachO.h - The MachO file format -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines manifest constants for the MachO object file format.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_BINARYFORMAT_MACHO_H
#define LLVM_BINARYFORMAT_MACHO_H

#include "llvm/Support/Compiler.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/SwapByteOrder.h"

namespace llvm {

class Triple;

namespace MachO {
// Enums from <mach-o/loader.h>
enum : uint32_t {
  // Constants for the "magic" field in llvm::MachO::mach_header and
  // llvm::MachO::mach_header_64
  MH_MAGIC = 0xFEEDFACEu,
  MH_CIGAM = 0xCEFAEDFEu,
  MH_MAGIC_64 = 0xFEEDFACFu,
  MH_CIGAM_64 = 0xCFFAEDFEu,
  FAT_MAGIC = 0xCAFEBABEu,
  FAT_CIGAM = 0xBEBAFECAu,
  FAT_MAGIC_64 = 0xCAFEBABFu,
  FAT_CIGAM_64 = 0xBFBAFECAu
};

enum HeaderFileType {
  // Constants for the "filetype" field in llvm::MachO::mach_header and
  // llvm::MachO::mach_header_64
  MH_OBJECT = 0x1u,
  MH_EXECUTE = 0x2u,
  MH_FVMLIB = 0x3u,
  MH_CORE = 0x4u,
  MH_PRELOAD = 0x5u,
  MH_DYLIB = 0x6u,
  MH_DYLINKER = 0x7u,
  MH_BUNDLE = 0x8u,
  MH_DYLIB_STUB = 0x9u,
  MH_DSYM = 0xAu,
  MH_KEXT_BUNDLE = 0xBu,
  MH_FILESET = 0xCu,
};

enum {
  // Constant bits for the "flags" field in llvm::MachO::mach_header and
  // llvm::MachO::mach_header_64
  MH_NOUNDEFS = 0x00000001u,
  MH_INCRLINK = 0x00000002u,
  MH_DYLDLINK = 0x00000004u,
  MH_BINDATLOAD = 0x00000008u,
  MH_PREBOUND = 0x00000010u,
  MH_SPLIT_SEGS = 0x00000020u,
  MH_LAZY_INIT = 0x00000040u,
  MH_TWOLEVEL = 0x00000080u,
  MH_FORCE_FLAT = 0x00000100u,
  MH_NOMULTIDEFS = 0x00000200u,
  MH_NOFIXPREBINDING = 0x00000400u,
  MH_PREBINDABLE = 0x00000800u,
  MH_ALLMODSBOUND = 0x00001000u,
  MH_SUBSECTIONS_VIA_SYMBOLS = 0x00002000u,
  MH_CANONICAL = 0x00004000u,
  MH_WEAK_DEFINES = 0x00008000u,
  MH_BINDS_TO_WEAK = 0x00010000u,
  MH_ALLOW_STACK_EXECUTION = 0x00020000u,
  MH_ROOT_SAFE = 0x00040000u,
  MH_SETUID_SAFE = 0x00080000u,
  MH_NO_REEXPORTED_DYLIBS = 0x00100000u,
  MH_PIE = 0x00200000u,
  MH_DEAD_STRIPPABLE_DYLIB = 0x00400000u,
  MH_HAS_TLV_DESCRIPTORS = 0x00800000u,
  MH_NO_HEAP_EXECUTION = 0x01000000u,
  MH_APP_EXTENSION_SAFE = 0x02000000u,
  MH_NLIST_OUTOFSYNC_WITH_DYLDINFO = 0x04000000u,
  MH_SIM_SUPPORT = 0x08000000u,
  MH_DYLIB_IN_CACHE = 0x80000000u,
};

enum : uint32_t {
  // Flags for the "cmd" field in llvm::MachO::load_command
  LC_REQ_DYLD = 0x80000000u
};

#define HANDLE_LOAD_COMMAND(LCName, LCValue, LCStruct) LCName = LCValue,

enum LoadCommandType : uint32_t {
#include "llvm/BinaryFormat/MachO.def"
};

#undef HANDLE_LOAD_COMMAND

enum : uint32_t {
  // Constant bits for the "flags" field in llvm::MachO::segment_command
  SG_HIGHVM = 0x1u,
  SG_FVMLIB = 0x2u,
  SG_NORELOC = 0x4u,
  SG_PROTECTED_VERSION_1 = 0x8u,
  SG_READ_ONLY = 0x10u,

  // Constant masks for the "flags" field in llvm::MachO::section and
  // llvm::MachO::section_64
  SECTION_TYPE = 0x000000ffu,           // SECTION_TYPE
  SECTION_ATTRIBUTES = 0xffffff00u,     // SECTION_ATTRIBUTES
  SECTION_ATTRIBUTES_USR = 0xff000000u, // SECTION_ATTRIBUTES_USR
  SECTION_ATTRIBUTES_SYS = 0x00ffff00u  // SECTION_ATTRIBUTES_SYS
};

/// These are the section type and attributes fields.  A MachO section can
/// have only one Type, but can have any of the attributes specified.
enum SectionType : uint32_t {
  // Constant masks for the "flags[7:0]" field in llvm::MachO::section and
  // llvm::MachO::section_64 (mask "flags" with SECTION_TYPE)

  /// S_REGULAR - Regular section.
  S_REGULAR = 0x00u,
  /// S_ZEROFILL - Zero fill on demand section.
  S_ZEROFILL = 0x01u,
  /// S_CSTRING_LITERALS - Section with literal C strings.
  S_CSTRING_LITERALS = 0x02u,
  /// S_4BYTE_LITERALS - Section with 4 byte literals.
  S_4BYTE_LITERALS = 0x03u,
  /// S_8BYTE_LITERALS - Section with 8 byte literals.
  S_8BYTE_LITERALS = 0x04u,
  /// S_LITERAL_POINTERS - Section with pointers to literals.
  S_LITERAL_POINTERS = 0x05u,
  /// S_NON_LAZY_SYMBOL_POINTERS - Section with non-lazy symbol pointers.
  S_NON_LAZY_SYMBOL_POINTERS = 0x06u,
  /// S_LAZY_SYMBOL_POINTERS - Section with lazy symbol pointers.
  S_LAZY_SYMBOL_POINTERS = 0x07u,
  /// S_SYMBOL_STUBS - Section with symbol stubs, byte size of stub in
  /// the Reserved2 field.
  S_SYMBOL_STUBS = 0x08u,
  /// S_MOD_INIT_FUNC_POINTERS - Section with only function pointers for
  /// initialization.
  S_MOD_INIT_FUNC_POINTERS = 0x09u,
  /// S_MOD_TERM_FUNC_POINTERS - Section with only function pointers for
  /// termination.
  S_MOD_TERM_FUNC_POINTERS = 0x0au,
  /// S_COALESCED - Section contains symbols that are to be coalesced.
  S_COALESCED = 0x0bu,
  /// S_GB_ZEROFILL - Zero fill on demand section (that can be larger than 4
  /// gigabytes).
  S_GB_ZEROFILL = 0x0cu,
  /// S_INTERPOSING - Section with only pairs of function pointers for
  /// interposing.
  S_INTERPOSING = 0x0du,
  /// S_16BYTE_LITERALS - Section with only 16 byte literals.
  S_16BYTE_LITERALS = 0x0eu,
  /// S_DTRACE_DOF - Section contains DTrace Object Format.
  S_DTRACE_DOF = 0x0fu,
  /// S_LAZY_DYLIB_SYMBOL_POINTERS - Section with lazy symbol pointers to
  /// lazy loaded dylibs.
  S_LAZY_DYLIB_SYMBOL_POINTERS = 0x10u,
  /// S_THREAD_LOCAL_REGULAR - Thread local data section.
  S_THREAD_LOCAL_REGULAR = 0x11u,
  /// S_THREAD_LOCAL_ZEROFILL - Thread local zerofill section.
  S_THREAD_LOCAL_ZEROFILL = 0x12u,
  /// S_THREAD_LOCAL_VARIABLES - Section with thread local variable
  /// structure data.
  S_THREAD_LOCAL_VARIABLES = 0x13u,
  /// S_THREAD_LOCAL_VARIABLE_POINTERS - Section with pointers to thread
  /// local structures.
  S_THREAD_LOCAL_VARIABLE_POINTERS = 0x14u,
  /// S_THREAD_LOCAL_INIT_FUNCTION_POINTERS - Section with thread local
  /// variable initialization pointers to functions.
  S_THREAD_LOCAL_INIT_FUNCTION_POINTERS = 0x15u,
  /// S_INIT_FUNC_OFFSETS - Section with 32-bit offsets to initializer
  /// functions.
  S_INIT_FUNC_OFFSETS = 0x16u,

  LAST_KNOWN_SECTION_TYPE = S_INIT_FUNC_OFFSETS
};

enum : uint32_t {
  // Constant masks for the "flags[31:24]" field in llvm::MachO::section and
  // llvm::MachO::section_64 (mask "flags" with SECTION_ATTRIBUTES_USR)

  /// S_ATTR_PURE_INSTRUCTIONS - Section contains only true machine
  /// instructions.
  S_ATTR_PURE_INSTRUCTIONS = 0x80000000u,
  /// S_ATTR_NO_TOC - Section contains coalesced symbols that are not to be
  /// in a ranlib table of contents.
  S_ATTR_NO_TOC = 0x40000000u,
  /// S_ATTR_STRIP_STATIC_SYMS - Ok to strip static symbols in this section
  /// in files with the MY_DYLDLINK flag.
  S_ATTR_STRIP_STATIC_SYMS = 0x20000000u,
  /// S_ATTR_NO_DEAD_STRIP - No dead stripping.
  S_ATTR_NO_DEAD_STRIP = 0x10000000u,
  /// S_ATTR_LIVE_SUPPORT - Blocks are live if they reference live blocks.
  S_ATTR_LIVE_SUPPORT = 0x08000000u,
  /// S_ATTR_SELF_MODIFYING_CODE - Used with i386 code stubs written on by
  /// dyld.
  S_ATTR_SELF_MODIFYING_CODE = 0x04000000u,
  /// S_ATTR_DEBUG - A debug section.
  S_ATTR_DEBUG = 0x02000000u,

  // Constant masks for the "flags[23:8]" field in llvm::MachO::section and
  // llvm::MachO::section_64 (mask "flags" with SECTION_ATTRIBUTES_SYS)

  /// S_ATTR_SOME_INSTRUCTIONS - Section contains some machine instructions.
  S_ATTR_SOME_INSTRUCTIONS = 0x00000400u,
  /// S_ATTR_EXT_RELOC - Section has external relocation entries.
  S_ATTR_EXT_RELOC = 0x00000200u,
  /// S_ATTR_LOC_RELOC - Section has local relocation entries.
  S_ATTR_LOC_RELOC = 0x00000100u,

  // Constant masks for the value of an indirect symbol in an indirect
  // symbol table
  INDIRECT_SYMBOL_LOCAL = 0x80000000u,
  INDIRECT_SYMBOL_ABS = 0x40000000u
};

enum DataRegionType {
  // Constants for the "kind" field in a data_in_code_entry structure
  DICE_KIND_DATA = 1u,
  DICE_KIND_JUMP_TABLE8 = 2u,
  DICE_KIND_JUMP_TABLE16 = 3u,
  DICE_KIND_JUMP_TABLE32 = 4u,
  DICE_KIND_ABS_JUMP_TABLE32 = 5u
};

enum RebaseType {
  REBASE_TYPE_POINTER = 1u,
  REBASE_TYPE_TEXT_ABSOLUTE32 = 2u,
  REBASE_TYPE_TEXT_PCREL32 = 3u
};

enum { REBASE_OPCODE_MASK = 0xF0u, REBASE_IMMEDIATE_MASK = 0x0Fu };

enum RebaseOpcode {
  REBASE_OPCODE_DONE = 0x00u,
  REBASE_OPCODE_SET_TYPE_IMM = 0x10u,
  REBASE_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB = 0x20u,
  REBASE_OPCODE_ADD_ADDR_ULEB = 0x30u,
  REBASE_OPCODE_ADD_ADDR_IMM_SCALED = 0x40u,
  REBASE_OPCODE_DO_REBASE_IMM_TIMES = 0x50u,
  REBASE_OPCODE_DO_REBASE_ULEB_TIMES = 0x60u,
  REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB = 0x70u,
  REBASE_OPCODE_DO_REBASE_ULEB_TIMES_SKIPPING_ULEB = 0x80u
};

enum BindType {
  BIND_TYPE_POINTER = 1u,
  BIND_TYPE_TEXT_ABSOLUTE32 = 2u,
  BIND_TYPE_TEXT_PCREL32 = 3u
};

enum BindSpecialDylib {
  BIND_SPECIAL_DYLIB_SELF = 0,
  BIND_SPECIAL_DYLIB_MAIN_EXECUTABLE = -1,
  BIND_SPECIAL_DYLIB_FLAT_LOOKUP = -2,
  BIND_SPECIAL_DYLIB_WEAK_LOOKUP = -3
};

enum {
  BIND_SYMBOL_FLAGS_WEAK_IMPORT = 0x1u,
  BIND_SYMBOL_FLAGS_NON_WEAK_DEFINITION = 0x8u,

  BIND_OPCODE_MASK = 0xF0u,
  BIND_IMMEDIATE_MASK = 0x0Fu
};

enum BindOpcode {
  BIND_OPCODE_DONE = 0x00u,
  BIND_OPCODE_SET_DYLIB_ORDINAL_IMM = 0x10u,
  BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB = 0x20u,
  BIND_OPCODE_SET_DYLIB_SPECIAL_IMM = 0x30u,
  BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM = 0x40u,
  BIND_OPCODE_SET_TYPE_IMM = 0x50u,
  BIND_OPCODE_SET_ADDEND_SLEB = 0x60u,
  BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB = 0x70u,
  BIND_OPCODE_ADD_ADDR_ULEB = 0x80u,
  BIND_OPCODE_DO_BIND = 0x90u,
  BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB = 0xA0u,
  BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED = 0xB0u,
  BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB = 0xC0u
};

enum {
  EXPORT_SYMBOL_FLAGS_KIND_MASK = 0x03u,
  EXPORT_SYMBOL_FLAGS_WEAK_DEFINITION = 0x04u,
  EXPORT_SYMBOL_FLAGS_REEXPORT = 0x08u,
  EXPORT_SYMBOL_FLAGS_STUB_AND_RESOLVER = 0x10u
};

enum ExportSymbolKind {
  EXPORT_SYMBOL_FLAGS_KIND_REGULAR = 0x00u,
  EXPORT_SYMBOL_FLAGS_KIND_THREAD_LOCAL = 0x01u,
  EXPORT_SYMBOL_FLAGS_KIND_ABSOLUTE = 0x02u
};

enum {
  // Constant masks for the "n_type" field in llvm::MachO::nlist and
  // llvm::MachO::nlist_64
  N_STAB = 0xe0,
  N_PEXT = 0x10,
  N_TYPE = 0x0e,
  N_EXT = 0x01
};

enum NListType : uint8_t {
  // Constants for the "n_type & N_TYPE" llvm::MachO::nlist and
  // llvm::MachO::nlist_64
  N_UNDF = 0x0u,
  N_ABS = 0x2u,
  N_SECT = 0xeu,
  N_PBUD = 0xcu,
  N_INDR = 0xau
};

enum SectionOrdinal {
  // Constants for the "n_sect" field in llvm::MachO::nlist and
  // llvm::MachO::nlist_64
  NO_SECT = 0u,
  MAX_SECT = 0xffu
};

enum {
  // Constant masks for the "n_desc" field in llvm::MachO::nlist and
  // llvm::MachO::nlist_64
  // The low 3 bits are the for the REFERENCE_TYPE.
  REFERENCE_TYPE = 0x7,
  REFERENCE_FLAG_UNDEFINED_NON_LAZY = 0,
  REFERENCE_FLAG_UNDEFINED_LAZY = 1,
  REFERENCE_FLAG_DEFINED = 2,
  REFERENCE_FLAG_PRIVATE_DEFINED = 3,
  REFERENCE_FLAG_PRIVATE_UNDEFINED_NON_LAZY = 4,
  REFERENCE_FLAG_PRIVATE_UNDEFINED_LAZY = 5,
  // Flag bits (some overlap with the library ordinal bits).
  N_ARM_THUMB_DEF = 0x0008u,
  REFERENCED_DYNAMICALLY = 0x0010u,
  N_NO_DEAD_STRIP = 0x0020u,
  N_WEAK_REF = 0x0040u,
  N_WEAK_DEF = 0x0080u,
  N_SYMBOL_RESOLVER = 0x0100u,
  N_ALT_ENTRY = 0x0200u,
  N_COLD_FUNC = 0x0400u,
  // For undefined symbols coming from libraries, see GET_LIBRARY_ORDINAL()
  // as these are in the top 8 bits.
  SELF_LIBRARY_ORDINAL = 0x0,
  MAX_LIBRARY_ORDINAL = 0xfd,
  DYNAMIC_LOOKUP_ORDINAL = 0xfe,
  EXECUTABLE_ORDINAL = 0xff
};

enum StabType {
  // Constant values for the "n_type" field in llvm::MachO::nlist and
  // llvm::MachO::nlist_64 when "(n_type & N_STAB) != 0"
  N_GSYM = 0x20u,
  N_FNAME = 0x22u,
  N_FUN = 0x24u,
  N_STSYM = 0x26u,
  N_LCSYM = 0x28u,
  N_BNSYM = 0x2Eu,
  N_PC = 0x30u,
  N_AST = 0x32u,
  N_OPT = 0x3Cu,
  N_RSYM = 0x40u,
  N_SLINE = 0x44u,
  N_ENSYM = 0x4Eu,
  N_SSYM = 0x60u,
  N_SO = 0x64u,
  N_OSO = 0x66u,
  N_LSYM = 0x80u,
  N_BINCL = 0x82u,
  N_SOL = 0x84u,
  N_PARAMS = 0x86u,
  N_VERSION = 0x88u,
  N_OLEVEL = 0x8Au,
  N_PSYM = 0xA0u,
  N_EINCL = 0xA2u,
  N_ENTRY = 0xA4u,
  N_LBRAC = 0xC0u,
  N_EXCL = 0xC2u,
  N_RBRAC = 0xE0u,
  N_BCOMM = 0xE2u,
  N_ECOMM = 0xE4u,
  N_ECOML = 0xE8u,
  N_LENG = 0xFEu
};

enum : uint32_t {
  // Constant values for the r_symbolnum field in an
  // llvm::MachO::relocation_info structure when r_extern is 0.
  R_ABS = 0,

  // Constant bits for the r_address field in an
  // llvm::MachO::relocation_info structure.
  R_SCATTERED = 0x80000000
};

enum RelocationInfoType {
  // Constant values for the r_type field in an
  // llvm::MachO::relocation_info or llvm::MachO::scattered_relocation_info
  // structure.
  GENERIC_RELOC_INVALID = 0xff,
  GENERIC_RELOC_VANILLA = 0,
  GENERIC_RELOC_PAIR = 1,
  GENERIC_RELOC_SECTDIFF = 2,
  GENERIC_RELOC_PB_LA_PTR = 3,
  GENERIC_RELOC_LOCAL_SECTDIFF = 4,
  GENERIC_RELOC_TLV = 5,

  // Constant values for the r_type field in a PowerPC architecture
  // llvm::MachO::relocation_info or llvm::MachO::scattered_relocation_info
  // structure.
  PPC_RELOC_VANILLA = GENERIC_RELOC_VANILLA,
  PPC_RELOC_PAIR = GENERIC_RELOC_PAIR,
  PPC_RELOC_BR14 = 2,
  PPC_RELOC_BR24 = 3,
  PPC_RELOC_HI16 = 4,
  PPC_RELOC_LO16 = 5,
  PPC_RELOC_HA16 = 6,
  PPC_RELOC_LO14 = 7,
  PPC_RELOC_SECTDIFF = 8,
  PPC_RELOC_PB_LA_PTR = 9,
  PPC_RELOC_HI16_SECTDIFF = 10,
  PPC_RELOC_LO16_SECTDIFF = 11,
  PPC_RELOC_HA16_SECTDIFF = 12,
  PPC_RELOC_JBSR = 13,
  PPC_RELOC_LO14_SECTDIFF = 14,
  PPC_RELOC_LOCAL_SECTDIFF = 15,

  // Constant values for the r_type field in an ARM architecture
  // llvm::MachO::relocation_info or llvm::MachO::scattered_relocation_info
  // structure.
  ARM_RELOC_VANILLA = GENERIC_RELOC_VANILLA,
  ARM_RELOC_PAIR = GENERIC_RELOC_PAIR,
  ARM_RELOC_SECTDIFF = GENERIC_RELOC_SECTDIFF,
  ARM_RELOC_LOCAL_SECTDIFF = 3,
  ARM_RELOC_PB_LA_PTR = 4,
  ARM_RELOC_BR24 = 5,
  ARM_THUMB_RELOC_BR22 = 6,
  ARM_THUMB_32BIT_BRANCH = 7, // obsolete
  ARM_RELOC_HALF = 8,
  ARM_RELOC_HALF_SECTDIFF = 9,

  // Constant values for the r_type field in an ARM64 architecture
  // llvm::MachO::relocation_info or llvm::MachO::scattered_relocation_info
  // structure.

  // For pointers.
  ARM64_RELOC_UNSIGNED = 0,
  // Must be followed by an ARM64_RELOC_UNSIGNED
  ARM64_RELOC_SUBTRACTOR = 1,
  // A B/BL instruction with 26-bit displacement.
  ARM64_RELOC_BRANCH26 = 2,
  // PC-rel distance to page of target.
  ARM64_RELOC_PAGE21 = 3,
  // Offset within page, scaled by r_length.
  ARM64_RELOC_PAGEOFF12 = 4,
  // PC-rel distance to page of GOT slot.
  ARM64_RELOC_GOT_LOAD_PAGE21 = 5,
  // Offset within page of GOT slot, scaled by r_length.
  ARM64_RELOC_GOT_LOAD_PAGEOFF12 = 6,
  // For pointers to GOT slots.
  ARM64_RELOC_POINTER_TO_GOT = 7,
  // PC-rel distance to page of TLVP slot.
  ARM64_RELOC_TLVP_LOAD_PAGE21 = 8,
  // Offset within page of TLVP slot, scaled by r_length.
  ARM64_RELOC_TLVP_LOAD_PAGEOFF12 = 9,
  // Must be followed by ARM64_RELOC_PAGE21 or ARM64_RELOC_PAGEOFF12.
  ARM64_RELOC_ADDEND = 10,
  // An authenticated pointer.
  ARM64_RELOC_AUTHENTICATED_POINTER = 11,

  // Constant values for the r_type field in an x86_64 architecture
  // llvm::MachO::relocation_info or llvm::MachO::scattered_relocation_info
  // structure
  X86_64_RELOC_UNSIGNED = 0,
  X86_64_RELOC_SIGNED = 1,
  X86_64_RELOC_BRANCH = 2,
  X86_64_RELOC_GOT_LOAD = 3,
  X86_64_RELOC_GOT = 4,
  X86_64_RELOC_SUBTRACTOR = 5,
  X86_64_RELOC_SIGNED_1 = 6,
  X86_64_RELOC_SIGNED_2 = 7,
  X86_64_RELOC_SIGNED_4 = 8,
  X86_64_RELOC_TLV = 9
};

// Values for segment_command.initprot.
// From <mach/vm_prot.h>
enum { VM_PROT_READ = 0x1, VM_PROT_WRITE = 0x2, VM_PROT_EXECUTE = 0x4 };

// Values for platform field in build_version_command.
enum PlatformType {
  PLATFORM_UNKNOWN = 0,
  PLATFORM_MACOS = 1,
  PLATFORM_IOS = 2,
  PLATFORM_TVOS = 3,
  PLATFORM_WATCHOS = 4,
  PLATFORM_BRIDGEOS = 5,
  PLATFORM_MACCATALYST = 6,
  PLATFORM_IOSSIMULATOR = 7,
  PLATFORM_TVOSSIMULATOR = 8,
  PLATFORM_WATCHOSSIMULATOR = 9,
  PLATFORM_DRIVERKIT = 10,
};

// Values for tools enum in build_tool_version.
enum { TOOL_CLANG = 1, TOOL_SWIFT = 2, TOOL_LD = 3, TOOL_LLD = 4 };

// Structs from <mach-o/loader.h>

struct mach_header {
  uint32_t magic;
  uint32_t cputype;
  uint32_t cpusubtype;
  uint32_t filetype;
  uint32_t ncmds;
  uint32_t sizeofcmds;
  uint32_t flags;
};

struct mach_header_64 {
  uint32_t magic;
  uint32_t cputype;
  uint32_t cpusubtype;
  uint32_t filetype;
  uint32_t ncmds;
  uint32_t sizeofcmds;
  uint32_t flags;
  uint32_t reserved;
};

struct load_command {
  uint32_t cmd;
  uint32_t cmdsize;
};

struct segment_command {
  uint32_t cmd;
  uint32_t cmdsize;
  char segname[16];
  uint32_t vmaddr;
  uint32_t vmsize;
  uint32_t fileoff;
  uint32_t filesize;
  uint32_t maxprot;
  uint32_t initprot;
  uint32_t nsects;
  uint32_t flags;
};

struct segment_command_64 {
  uint32_t cmd;
  uint32_t cmdsize;
  char segname[16];
  uint64_t vmaddr;
  uint64_t vmsize;
  uint64_t fileoff;
  uint64_t filesize;
  uint32_t maxprot;
  uint32_t initprot;
  uint32_t nsects;
  uint32_t flags;
};

struct section {
  char sectname[16];
  char segname[16];
  uint32_t addr;
  uint32_t size;
  uint32_t offset;
  uint32_t align;
  uint32_t reloff;
  uint32_t nreloc;
  uint32_t flags;
  uint32_t reserved1;
  uint32_t reserved2;
};

struct section_64 {
  char sectname[16];
  char segname[16];
  uint64_t addr;
  uint64_t size;
  uint32_t offset;
  uint32_t align;
  uint32_t reloff;
  uint32_t nreloc;
  uint32_t flags;
  uint32_t reserved1;
  uint32_t reserved2;
  uint32_t reserved3;
};

inline bool isVirtualSection(uint8_t type) {
  return (type == MachO::S_ZEROFILL || type == MachO::S_GB_ZEROFILL ||
          type == MachO::S_THREAD_LOCAL_ZEROFILL);
}

struct fvmlib {
  uint32_t name;
  uint32_t minor_version;
  uint32_t header_addr;
};

// The fvmlib_command is obsolete and no longer supported.
struct fvmlib_command {
  uint32_t cmd;
  uint32_t cmdsize;
  struct fvmlib fvmlib;
};

struct dylib {
  uint32_t name;
  uint32_t timestamp;
  uint32_t current_version;
  uint32_t compatibility_version;
};

struct dylib_command {
  uint32_t cmd;
  uint32_t cmdsize;
  struct dylib dylib;
};

struct sub_framework_command {
  uint32_t cmd;
  uint32_t cmdsize;
  uint32_t umbrella;
};

struct sub_client_command {
  uint32_t cmd;
  uint32_t cmdsize;
  uint32_t client;
};

struct sub_umbrella_command {
  uint32_t cmd;
  uint32_t cmdsize;
  uint32_t sub_umbrella;
};

struct sub_library_command {
  uint32_t cmd;
  uint32_t cmdsize;
  uint32_t sub_library;
};

// The prebound_dylib_command is obsolete and no longer supported.
struct prebound_dylib_command {
  uint32_t cmd;
  uint32_t cmdsize;
  uint32_t name;
  uint32_t nmodules;
  uint32_t linked_modules;
};

struct dylinker_command {
  uint32_t cmd;
  uint32_t cmdsize;
  uint32_t name;
};

struct thread_command {
  uint32_t cmd;
  uint32_t cmdsize;
};

struct routines_command {
  uint32_t cmd;
  uint32_t cmdsize;
  uint32_t init_address;
  uint32_t init_module;
  uint32_t reserved1;
  uint32_t reserved2;
  uint32_t reserved3;
  uint32_t reserved4;
  uint32_t reserved5;
  uint32_t reserved6;
};

struct routines_command_64 {
  uint32_t cmd;
  uint32_t cmdsize;
  uint64_t init_address;
  uint64_t init_module;
  uint64_t reserved1;
  uint64_t reserved2;
  uint64_t reserved3;
  uint64_t reserved4;
  uint64_t reserved5;
  uint64_t reserved6;
};

struct symtab_command {
  uint32_t cmd;
  uint32_t cmdsize;
  uint32_t symoff;
  uint32_t nsyms;
  uint32_t stroff;
  uint32_t strsize;
};

struct dysymtab_command {
  uint32_t cmd;
  uint32_t cmdsize;
  uint32_t ilocalsym;
  uint32_t nlocalsym;
  uint32_t iextdefsym;
  uint32_t nextdefsym;
  uint32_t iundefsym;
  uint32_t nundefsym;
  uint32_t tocoff;
  uint32_t ntoc;
  uint32_t modtaboff;
  uint32_t nmodtab;
  uint32_t extrefsymoff;
  uint32_t nextrefsyms;
  uint32_t indirectsymoff;
  uint32_t nindirectsyms;
  uint32_t extreloff;
  uint32_t nextrel;
  uint32_t locreloff;
  uint32_t nlocrel;
};

struct dylib_table_of_contents {
  uint32_t symbol_index;
  uint32_t module_index;
};

struct dylib_module {
  uint32_t module_name;
  uint32_t iextdefsym;
  uint32_t nextdefsym;
  uint32_t irefsym;
  uint32_t nrefsym;
  uint32_t ilocalsym;
  uint32_t nlocalsym;
  uint32_t iextrel;
  uint32_t nextrel;
  uint32_t iinit_iterm;
  uint32_t ninit_nterm;
  uint32_t objc_module_info_addr;
  uint32_t objc_module_info_size;
};

struct dylib_module_64 {
  uint32_t module_name;
  uint32_t iextdefsym;
  uint32_t nextdefsym;
  uint32_t irefsym;
  uint32_t nrefsym;
  uint32_t ilocalsym;
  uint32_t nlocalsym;
  uint32_t iextrel;
  uint32_t nextrel;
  uint32_t iinit_iterm;
  uint32_t ninit_nterm;
  uint32_t objc_module_info_size;
  uint64_t objc_module_info_addr;
};

struct dylib_reference {
  uint32_t isym : 24, flags : 8;
};

// The twolevel_hints_command is obsolete and no longer supported.
struct twolevel_hints_command {
  uint32_t cmd;
  uint32_t cmdsize;
  uint32_t offset;
  uint32_t nhints;
};

// The twolevel_hints_command is obsolete and no longer supported.
struct twolevel_hint {
  uint32_t isub_image : 8, itoc : 24;
};

// The prebind_cksum_command is obsolete and no longer supported.
struct prebind_cksum_command {
  uint32_t cmd;
  uint32_t cmdsize;
  uint32_t cksum;
};

struct uuid_command {
  uint32_t cmd;
  uint32_t cmdsize;
  uint8_t uuid[16];
};

struct rpath_command {
  uint32_t cmd;
  uint32_t cmdsize;
  uint32_t path;
};

struct linkedit_data_command {
  uint32_t cmd;
  uint32_t cmdsize;
  uint32_t dataoff;
  uint32_t datasize;
};

struct data_in_code_entry {
  uint32_t offset;
  uint16_t length;
  uint16_t kind;
};

struct source_version_command {
  uint32_t cmd;
  uint32_t cmdsize;
  uint64_t version;
};

struct encryption_info_command {
  uint32_t cmd;
  uint32_t cmdsize;
  uint32_t cryptoff;
  uint32_t cryptsize;
  uint32_t cryptid;
};

struct encryption_info_command_64 {
  uint32_t cmd;
  uint32_t cmdsize;
  uint32_t cryptoff;
  uint32_t cryptsize;
  uint32_t cryptid;
  uint32_t pad;
};

struct version_min_command {
  uint32_t cmd;     // LC_VERSION_MIN_MACOSX or
                    // LC_VERSION_MIN_IPHONEOS
  uint32_t cmdsize; // sizeof(struct version_min_command)
  uint32_t version; // X.Y.Z is encoded in nibbles xxxx.yy.zz
  uint32_t sdk;     // X.Y.Z is encoded in nibbles xxxx.yy.zz
};

struct note_command {
  uint32_t cmd;        // LC_NOTE
  uint32_t cmdsize;    // sizeof(struct note_command)
  char data_owner[16]; // owner name for this LC_NOTE
  uint64_t offset;     // file offset of this data
  uint64_t size;       // length of data region
};

struct build_tool_version {
  uint32_t tool;    // enum for the tool
  uint32_t version; // version of the tool
};

struct build_version_command {
  uint32_t cmd;      // LC_BUILD_VERSION
  uint32_t cmdsize;  // sizeof(struct build_version_command) +
                     // ntools * sizeof(struct build_tool_version)
  uint32_t platform; // platform
  uint32_t minos;    // X.Y.Z is encoded in nibbles xxxx.yy.zz
  uint32_t sdk;      // X.Y.Z is encoded in nibbles xxxx.yy.zz
  uint32_t ntools;   // number of tool entries following this
};

struct dyld_env_command {
  uint32_t cmd;
  uint32_t cmdsize;
  uint32_t name;
};

struct dyld_info_command {
  uint32_t cmd;
  uint32_t cmdsize;
  uint32_t rebase_off;
  uint32_t rebase_size;
  uint32_t bind_off;
  uint32_t bind_size;
  uint32_t weak_bind_off;
  uint32_t weak_bind_size;
  uint32_t lazy_bind_off;
  uint32_t lazy_bind_size;
  uint32_t export_off;
  uint32_t export_size;
};

struct linker_option_command {
  uint32_t cmd;
  uint32_t cmdsize;
  uint32_t count;
};

struct fileset_entry_command {
  uint32_t cmd;
  uint32_t cmdsize;
  uint64_t vmaddr;
  uint64_t fileoff;
  uint32_t entry_id;
};

// The symseg_command is obsolete and no longer supported.
struct symseg_command {
  uint32_t cmd;
  uint32_t cmdsize;
  uint32_t offset;
  uint32_t size;
};

// The ident_command is obsolete and no longer supported.
struct ident_command {
  uint32_t cmd;
  uint32_t cmdsize;
};

// The fvmfile_command is obsolete and no longer supported.
struct fvmfile_command {
  uint32_t cmd;
  uint32_t cmdsize;
  uint32_t name;
  uint32_t header_addr;
};

struct tlv_descriptor_32 {
  uint32_t thunk;
  uint32_t key;
  uint32_t offset;
};

struct tlv_descriptor_64 {
  uint64_t thunk;
  uint64_t key;
  uint64_t offset;
};

struct tlv_descriptor {
  uintptr_t thunk;
  uintptr_t key;
  uintptr_t offset;
};

struct entry_point_command {
  uint32_t cmd;
  uint32_t cmdsize;
  uint64_t entryoff;
  uint64_t stacksize;
};

// Structs from <mach-o/fat.h>
struct fat_header {
  uint32_t magic;
  uint32_t nfat_arch;
};

struct fat_arch {
  uint32_t cputype;
  uint32_t cpusubtype;
  uint32_t offset;
  uint32_t size;
  uint32_t align;
};

struct fat_arch_64 {
  uint32_t cputype;
  uint32_t cpusubtype;
  uint64_t offset;
  uint64_t size;
  uint32_t align;
  uint32_t reserved;
};

// Structs from <mach-o/reloc.h>
struct relocation_info {
  int32_t r_address;
  uint32_t r_symbolnum : 24, r_pcrel : 1, r_length : 2, r_extern : 1,
      r_type : 4;
};

struct scattered_relocation_info {
#if defined(BYTE_ORDER) && defined(BIG_ENDIAN) && (BYTE_ORDER == BIG_ENDIAN)
  uint32_t r_scattered : 1, r_pcrel : 1, r_length : 2, r_type : 4,
      r_address : 24;
#else
  uint32_t r_address : 24, r_type : 4, r_length : 2, r_pcrel : 1,
      r_scattered : 1;
#endif
  int32_t r_value;
};

// Structs NOT from <mach-o/reloc.h>, but that make LLVM's life easier
struct any_relocation_info {
  uint32_t r_word0, r_word1;
};

// Structs from <mach-o/nlist.h>
struct nlist_base {
  uint32_t n_strx;
  uint8_t n_type;
  uint8_t n_sect;
  uint16_t n_desc;
};

struct nlist {
  uint32_t n_strx;
  uint8_t n_type;
  uint8_t n_sect;
  int16_t n_desc;
  uint32_t n_value;
};

struct nlist_64 {
  uint32_t n_strx;
  uint8_t n_type;
  uint8_t n_sect;
  uint16_t n_desc;
  uint64_t n_value;
};

// Values for dyld_chained_fixups_header::imports_format.
enum ChainedImportFormat {
  DYLD_CHAINED_IMPORT = 1,
  DYLD_CHAINED_IMPORT_ADDEND = 2,
  DYLD_CHAINED_IMPORT_ADDEND64 = 3,
};

// Values for dyld_chained_fixups_header::symbols_format.
enum {
  DYLD_CHAINED_SYMBOL_UNCOMPRESSED = 0,
  DYLD_CHAINED_SYMBOL_ZLIB = 1,
};

// Values for dyld_chained_starts_in_segment::page_start.
enum {
  DYLD_CHAINED_PTR_START_NONE = 0xFFFF,
  DYLD_CHAINED_PTR_START_MULTI = 0x8000, // page which has multiple starts
  DYLD_CHAINED_PTR_START_LAST = 0x8000,  // last chain_start for a given page
};

// Values for dyld_chained_starts_in_segment::pointer_format.
enum {
  DYLD_CHAINED_PTR_ARM64E = 1,
  DYLD_CHAINED_PTR_64 = 2,
  DYLD_CHAINED_PTR_32 = 3,
  DYLD_CHAINED_PTR_32_CACHE = 4,
  DYLD_CHAINED_PTR_32_FIRMWARE = 5,
  DYLD_CHAINED_PTR_64_OFFSET = 6,
  DYLD_CHAINED_PTR_ARM64E_KERNEL = 7,
  DYLD_CHAINED_PTR_64_KERNEL_CACHE = 8,
  DYLD_CHAINED_PTR_ARM64E_USERLAND = 9,
  DYLD_CHAINED_PTR_ARM64E_FIRMWARE = 10,
  DYLD_CHAINED_PTR_X86_64_KERNEL_CACHE = 11,
  DYLD_CHAINED_PTR_ARM64E_USERLAND24 = 12,
};

/// Structs for dyld chained fixups.
/// dyld_chained_fixups_header is the data pointed to by LC_DYLD_CHAINED_FIXUPS
/// load command.
struct dyld_chained_fixups_header {
  uint32_t fixups_version; ///< 0
  uint32_t starts_offset;  ///< Offset of dyld_chained_starts_in_image.
  uint32_t imports_offset; ///< Offset of imports table in chain_data.
  uint32_t symbols_offset; ///< Offset of symbol strings in chain_data.
  uint32_t imports_count;  ///< Number of imported symbol names.
  uint32_t imports_format; ///< DYLD_CHAINED_IMPORT*
  uint32_t symbols_format; ///< 0 => uncompressed, 1 => zlib compressed
};

/// dyld_chained_starts_in_image is embedded in LC_DYLD_CHAINED_FIXUPS payload.
/// Each each seg_info_offset entry is the offset into this struct for that
/// segment followed by pool of dyld_chain_starts_in_segment data.
struct dyld_chained_starts_in_image {
  uint32_t seg_count;
  uint32_t seg_info_offset[1];
};

struct dyld_chained_starts_in_segment {
  uint32_t size;              ///< Size of this, including chain_starts entries
  uint16_t page_size;         ///< Page size in bytes (0x1000 or 0x4000)
  uint16_t pointer_format;    ///< DYLD_CHAINED_PTR*
  uint64_t segment_offset;    ///< VM offset from the __TEXT segment
  uint32_t max_valid_pointer; ///< Values beyond this are not pointers on 32-bit
  uint16_t page_count;        ///< Length of the page_start array
  uint16_t page_start[1];     ///< Page offset of first fixup on each page, or
                              ///< DYLD_CHAINED_PTR_START_NONE if no fixups
};

// DYLD_CHAINED_IMPORT
struct dyld_chained_import {
  uint32_t lib_ordinal : 8;
  uint32_t weak_import : 1;
  uint32_t name_offset : 23;
};

// DYLD_CHAINED_IMPORT_ADDEND
struct dyld_chained_import_addend {
  uint32_t lib_ordinal : 8;
  uint32_t weak_import : 1;
  uint32_t name_offset : 23;
  int32_t addend;
};

// DYLD_CHAINED_IMPORT_ADDEND64
struct dyld_chained_import_addend64 {
  uint64_t lib_ordinal : 16;
  uint64_t weak_import : 1;
  uint64_t reserved : 15;
  uint64_t name_offset : 32;
  uint64_t addend;
};

// The `bind` field (most significant bit) of the encoded fixup determines
// whether it is dyld_chained_ptr_64_bind or dyld_chained_ptr_64_rebase.

// DYLD_CHAINED_PTR_64/DYLD_CHAINED_PTR_64_OFFSET
struct dyld_chained_ptr_64_bind {
  uint64_t ordinal : 24;
  uint64_t addend : 8;
  uint64_t reserved : 19;
  uint64_t next : 12;
  uint64_t bind : 1; // set to 1
};

// DYLD_CHAINED_PTR_64/DYLD_CHAINED_PTR_64_OFFSET
struct dyld_chained_ptr_64_rebase {
  uint64_t target : 36;
  uint64_t high8 : 8;
  uint64_t reserved : 7;
  uint64_t next : 12;
  uint64_t bind : 1; // set to 0
};

// Byte order swapping functions for MachO structs

inline void swapStruct(fat_header &mh) {
  sys::swapByteOrder(mh.magic);
  sys::swapByteOrder(mh.nfat_arch);
}

inline void swapStruct(fat_arch &mh) {
  sys::swapByteOrder(mh.cputype);
  sys::swapByteOrder(mh.cpusubtype);
  sys::swapByteOrder(mh.offset);
  sys::swapByteOrder(mh.size);
  sys::swapByteOrder(mh.align);
}

inline void swapStruct(fat_arch_64 &mh) {
  sys::swapByteOrder(mh.cputype);
  sys::swapByteOrder(mh.cpusubtype);
  sys::swapByteOrder(mh.offset);
  sys::swapByteOrder(mh.size);
  sys::swapByteOrder(mh.align);
  sys::swapByteOrder(mh.reserved);
}

inline void swapStruct(mach_header &mh) {
  sys::swapByteOrder(mh.magic);
  sys::swapByteOrder(mh.cputype);
  sys::swapByteOrder(mh.cpusubtype);
  sys::swapByteOrder(mh.filetype);
  sys::swapByteOrder(mh.ncmds);
  sys::swapByteOrder(mh.sizeofcmds);
  sys::swapByteOrder(mh.flags);
}

inline void swapStruct(mach_header_64 &H) {
  sys::swapByteOrder(H.magic);
  sys::swapByteOrder(H.cputype);
  sys::swapByteOrder(H.cpusubtype);
  sys::swapByteOrder(H.filetype);
  sys::swapByteOrder(H.ncmds);
  sys::swapByteOrder(H.sizeofcmds);
  sys::swapByteOrder(H.flags);
  sys::swapByteOrder(H.reserved);
}

inline void swapStruct(load_command &lc) {
  sys::swapByteOrder(lc.cmd);
  sys::swapByteOrder(lc.cmdsize);
}

inline void swapStruct(symtab_command &lc) {
  sys::swapByteOrder(lc.cmd);
  sys::swapByteOrder(lc.cmdsize);
  sys::swapByteOrder(lc.symoff);
  sys::swapByteOrder(lc.nsyms);
  sys::swapByteOrder(lc.stroff);
  sys::swapByteOrder(lc.strsize);
}

inline void swapStruct(segment_command_64 &seg) {
  sys::swapByteOrder(seg.cmd);
  sys::swapByteOrder(seg.cmdsize);
  sys::swapByteOrder(seg.vmaddr);
  sys::swapByteOrder(seg.vmsize);
  sys::swapByteOrder(seg.fileoff);
  sys::swapByteOrder(seg.filesize);
  sys::swapByteOrder(seg.maxprot);
  sys::swapByteOrder(seg.initprot);
  sys::swapByteOrder(seg.nsects);
  sys::swapByteOrder(seg.flags);
}

inline void swapStruct(segment_command &seg) {
  sys::swapByteOrder(seg.cmd);
  sys::swapByteOrder(seg.cmdsize);
  sys::swapByteOrder(seg.vmaddr);
  sys::swapByteOrder(seg.vmsize);
  sys::swapByteOrder(seg.fileoff);
  sys::swapByteOrder(seg.filesize);
  sys::swapByteOrder(seg.maxprot);
  sys::swapByteOrder(seg.initprot);
  sys::swapByteOrder(seg.nsects);
  sys::swapByteOrder(seg.flags);
}

inline void swapStruct(section_64 &sect) {
  sys::swapByteOrder(sect.addr);
  sys::swapByteOrder(sect.size);
  sys::swapByteOrder(sect.offset);
  sys::swapByteOrder(sect.align);
  sys::swapByteOrder(sect.reloff);
  sys::swapByteOrder(sect.nreloc);
  sys::swapByteOrder(sect.flags);
  sys::swapByteOrder(sect.reserved1);
  sys::swapByteOrder(sect.reserved2);
}

inline void swapStruct(section &sect) {
  sys::swapByteOrder(sect.addr);
  sys::swapByteOrder(sect.size);
  sys::swapByteOrder(sect.offset);
  sys::swapByteOrder(sect.align);
  sys::swapByteOrder(sect.reloff);
  sys::swapByteOrder(sect.nreloc);
  sys::swapByteOrder(sect.flags);
  sys::swapByteOrder(sect.reserved1);
  sys::swapByteOrder(sect.reserved2);
}

inline void swapStruct(dyld_info_command &info) {
  sys::swapByteOrder(info.cmd);
  sys::swapByteOrder(info.cmdsize);
  sys::swapByteOrder(info.rebase_off);
  sys::swapByteOrder(info.rebase_size);
  sys::swapByteOrder(info.bind_off);
  sys::swapByteOrder(info.bind_size);
  sys::swapByteOrder(info.weak_bind_off);
  sys::swapByteOrder(info.weak_bind_size);
  sys::swapByteOrder(info.lazy_bind_off);
  sys::swapByteOrder(info.lazy_bind_size);
  sys::swapByteOrder(info.export_off);
  sys::swapByteOrder(info.export_size);
}

inline void swapStruct(dylib_command &d) {
  sys::swapByteOrder(d.cmd);
  sys::swapByteOrder(d.cmdsize);
  sys::swapByteOrder(d.dylib.name);
  sys::swapByteOrder(d.dylib.timestamp);
  sys::swapByteOrder(d.dylib.current_version);
  sys::swapByteOrder(d.dylib.compatibility_version);
}

inline void swapStruct(sub_framework_command &s) {
  sys::swapByteOrder(s.cmd);
  sys::swapByteOrder(s.cmdsize);
  sys::swapByteOrder(s.umbrella);
}

inline void swapStruct(sub_umbrella_command &s) {
  sys::swapByteOrder(s.cmd);
  sys::swapByteOrder(s.cmdsize);
  sys::swapByteOrder(s.sub_umbrella);
}

inline void swapStruct(sub_library_command &s) {
  sys::swapByteOrder(s.cmd);
  sys::swapByteOrder(s.cmdsize);
  sys::swapByteOrder(s.sub_library);
}

inline void swapStruct(sub_client_command &s) {
  sys::swapByteOrder(s.cmd);
  sys::swapByteOrder(s.cmdsize);
  sys::swapByteOrder(s.client);
}

inline void swapStruct(routines_command &r) {
  sys::swapByteOrder(r.cmd);
  sys::swapByteOrder(r.cmdsize);
  sys::swapByteOrder(r.init_address);
  sys::swapByteOrder(r.init_module);
  sys::swapByteOrder(r.reserved1);
  sys::swapByteOrder(r.reserved2);
  sys::swapByteOrder(r.reserved3);
  sys::swapByteOrder(r.reserved4);
  sys::swapByteOrder(r.reserved5);
  sys::swapByteOrder(r.reserved6);
}

inline void swapStruct(routines_command_64 &r) {
  sys::swapByteOrder(r.cmd);
  sys::swapByteOrder(r.cmdsize);
  sys::swapByteOrder(r.init_address);
  sys::swapByteOrder(r.init_module);
  sys::swapByteOrder(r.reserved1);
  sys::swapByteOrder(r.reserved2);
  sys::swapByteOrder(r.reserved3);
  sys::swapByteOrder(r.reserved4);
  sys::swapByteOrder(r.reserved5);
  sys::swapByteOrder(r.reserved6);
}

inline void swapStruct(thread_command &t) {
  sys::swapByteOrder(t.cmd);
  sys::swapByteOrder(t.cmdsize);
}

inline void swapStruct(dylinker_command &d) {
  sys::swapByteOrder(d.cmd);
  sys::swapByteOrder(d.cmdsize);
  sys::swapByteOrder(d.name);
}

inline void swapStruct(uuid_command &u) {
  sys::swapByteOrder(u.cmd);
  sys::swapByteOrder(u.cmdsize);
}

inline void swapStruct(rpath_command &r) {
  sys::swapByteOrder(r.cmd);
  sys::swapByteOrder(r.cmdsize);
  sys::swapByteOrder(r.path);
}

inline void swapStruct(source_version_command &s) {
  sys::swapByteOrder(s.cmd);
  sys::swapByteOrder(s.cmdsize);
  sys::swapByteOrder(s.version);
}

inline void swapStruct(entry_point_command &e) {
  sys::swapByteOrder(e.cmd);
  sys::swapByteOrder(e.cmdsize);
  sys::swapByteOrder(e.entryoff);
  sys::swapByteOrder(e.stacksize);
}

inline void swapStruct(encryption_info_command &e) {
  sys::swapByteOrder(e.cmd);
  sys::swapByteOrder(e.cmdsize);
  sys::swapByteOrder(e.cryptoff);
  sys::swapByteOrder(e.cryptsize);
  sys::swapByteOrder(e.cryptid);
}

inline void swapStruct(encryption_info_command_64 &e) {
  sys::swapByteOrder(e.cmd);
  sys::swapByteOrder(e.cmdsize);
  sys::swapByteOrder(e.cryptoff);
  sys::swapByteOrder(e.cryptsize);
  sys::swapByteOrder(e.cryptid);
  sys::swapByteOrder(e.pad);
}

inline void swapStruct(dysymtab_command &dst) {
  sys::swapByteOrder(dst.cmd);
  sys::swapByteOrder(dst.cmdsize);
  sys::swapByteOrder(dst.ilocalsym);
  sys::swapByteOrder(dst.nlocalsym);
  sys::swapByteOrder(dst.iextdefsym);
  sys::swapByteOrder(dst.nextdefsym);
  sys::swapByteOrder(dst.iundefsym);
  sys::swapByteOrder(dst.nundefsym);
  sys::swapByteOrder(dst.tocoff);
  sys::swapByteOrder(dst.ntoc);
  sys::swapByteOrder(dst.modtaboff);
  sys::swapByteOrder(dst.nmodtab);
  sys::swapByteOrder(dst.extrefsymoff);
  sys::swapByteOrder(dst.nextrefsyms);
  sys::swapByteOrder(dst.indirectsymoff);
  sys::swapByteOrder(dst.nindirectsyms);
  sys::swapByteOrder(dst.extreloff);
  sys::swapByteOrder(dst.nextrel);
  sys::swapByteOrder(dst.locreloff);
  sys::swapByteOrder(dst.nlocrel);
}

inline void swapStruct(any_relocation_info &reloc) {
  sys::swapByteOrder(reloc.r_word0);
  sys::swapByteOrder(reloc.r_word1);
}

inline void swapStruct(nlist_base &S) {
  sys::swapByteOrder(S.n_strx);
  sys::swapByteOrder(S.n_desc);
}

inline void swapStruct(nlist &sym) {
  sys::swapByteOrder(sym.n_strx);
  sys::swapByteOrder(sym.n_desc);
  sys::swapByteOrder(sym.n_value);
}

inline void swapStruct(nlist_64 &sym) {
  sys::swapByteOrder(sym.n_strx);
  sys::swapByteOrder(sym.n_desc);
  sys::swapByteOrder(sym.n_value);
}

inline void swapStruct(linkedit_data_command &C) {
  sys::swapByteOrder(C.cmd);
  sys::swapByteOrder(C.cmdsize);
  sys::swapByteOrder(C.dataoff);
  sys::swapByteOrder(C.datasize);
}

inline void swapStruct(linker_option_command &C) {
  sys::swapByteOrder(C.cmd);
  sys::swapByteOrder(C.cmdsize);
  sys::swapByteOrder(C.count);
}

inline void swapStruct(fileset_entry_command &C) {
  sys::swapByteOrder(C.cmd);
  sys::swapByteOrder(C.cmdsize);
  sys::swapByteOrder(C.vmaddr);
  sys::swapByteOrder(C.fileoff);
  sys::swapByteOrder(C.entry_id);
}

inline void swapStruct(version_min_command &C) {
  sys::swapByteOrder(C.cmd);
  sys::swapByteOrder(C.cmdsize);
  sys::swapByteOrder(C.version);
  sys::swapByteOrder(C.sdk);
}

inline void swapStruct(note_command &C) {
  sys::swapByteOrder(C.cmd);
  sys::swapByteOrder(C.cmdsize);
  sys::swapByteOrder(C.offset);
  sys::swapByteOrder(C.size);
}

inline void swapStruct(build_version_command &C) {
  sys::swapByteOrder(C.cmd);
  sys::swapByteOrder(C.cmdsize);
  sys::swapByteOrder(C.platform);
  sys::swapByteOrder(C.minos);
  sys::swapByteOrder(C.sdk);
  sys::swapByteOrder(C.ntools);
}

inline void swapStruct(build_tool_version &C) {
  sys::swapByteOrder(C.tool);
  sys::swapByteOrder(C.version);
}

inline void swapStruct(data_in_code_entry &C) {
  sys::swapByteOrder(C.offset);
  sys::swapByteOrder(C.length);
  sys::swapByteOrder(C.kind);
}

inline void swapStruct(uint32_t &C) { sys::swapByteOrder(C); }

// The prebind_cksum_command is obsolete and no longer supported.
inline void swapStruct(prebind_cksum_command &C) {
  sys::swapByteOrder(C.cmd);
  sys::swapByteOrder(C.cmdsize);
  sys::swapByteOrder(C.cksum);
}

// The twolevel_hints_command is obsolete and no longer supported.
inline void swapStruct(twolevel_hints_command &C) {
  sys::swapByteOrder(C.cmd);
  sys::swapByteOrder(C.cmdsize);
  sys::swapByteOrder(C.offset);
  sys::swapByteOrder(C.nhints);
}

// The prebound_dylib_command is obsolete and no longer supported.
inline void swapStruct(prebound_dylib_command &C) {
  sys::swapByteOrder(C.cmd);
  sys::swapByteOrder(C.cmdsize);
  sys::swapByteOrder(C.name);
  sys::swapByteOrder(C.nmodules);
  sys::swapByteOrder(C.linked_modules);
}

// The fvmfile_command is obsolete and no longer supported.
inline void swapStruct(fvmfile_command &C) {
  sys::swapByteOrder(C.cmd);
  sys::swapByteOrder(C.cmdsize);
  sys::swapByteOrder(C.name);
  sys::swapByteOrder(C.header_addr);
}

// The symseg_command is obsolete and no longer supported.
inline void swapStruct(symseg_command &C) {
  sys::swapByteOrder(C.cmd);
  sys::swapByteOrder(C.cmdsize);
  sys::swapByteOrder(C.offset);
  sys::swapByteOrder(C.size);
}

// The ident_command is obsolete and no longer supported.
inline void swapStruct(ident_command &C) {
  sys::swapByteOrder(C.cmd);
  sys::swapByteOrder(C.cmdsize);
}

inline void swapStruct(fvmlib &C) {
  sys::swapByteOrder(C.name);
  sys::swapByteOrder(C.minor_version);
  sys::swapByteOrder(C.header_addr);
}

// The fvmlib_command is obsolete and no longer supported.
inline void swapStruct(fvmlib_command &C) {
  sys::swapByteOrder(C.cmd);
  sys::swapByteOrder(C.cmdsize);
  swapStruct(C.fvmlib);
}

// Get/Set functions from <mach-o/nlist.h>

inline uint16_t GET_LIBRARY_ORDINAL(uint16_t n_desc) {
  return (((n_desc) >> 8u) & 0xffu);
}

inline void SET_LIBRARY_ORDINAL(uint16_t &n_desc, uint8_t ordinal) {
  n_desc = (((n_desc)&0x00ff) | (((ordinal)&0xff) << 8));
}

inline uint8_t GET_COMM_ALIGN(uint16_t n_desc) {
  return (n_desc >> 8u) & 0x0fu;
}

inline void SET_COMM_ALIGN(uint16_t &n_desc, uint8_t align) {
  n_desc = ((n_desc & 0xf0ffu) | ((align & 0x0fu) << 8u));
}

// Enums from <mach/machine.h>
enum : uint32_t {
  // Capability bits used in the definition of cpu_type.
  CPU_ARCH_MASK = 0xff000000, // Mask for architecture bits
  CPU_ARCH_ABI64 = 0x01000000, // 64 bit ABI
  CPU_ARCH_ABI64_32 = 0x02000000, // ILP32 ABI on 64-bit hardware
};

// Constants for the cputype field.
enum CPUType {
  CPU_TYPE_ANY = -1,
  CPU_TYPE_X86 = 7,
  CPU_TYPE_I386 = CPU_TYPE_X86,
  CPU_TYPE_X86_64 = CPU_TYPE_X86 | CPU_ARCH_ABI64,
  /* CPU_TYPE_MIPS      = 8, */
  CPU_TYPE_MC98000 = 10, // Old Motorola PowerPC
  CPU_TYPE_ARM = 12,
  CPU_TYPE_ARM64 = CPU_TYPE_ARM | CPU_ARCH_ABI64,
  CPU_TYPE_ARM64_32 = CPU_TYPE_ARM | CPU_ARCH_ABI64_32,
  CPU_TYPE_SPARC = 14,
  CPU_TYPE_POWERPC = 18,
  CPU_TYPE_POWERPC64 = CPU_TYPE_POWERPC | CPU_ARCH_ABI64
};

enum : uint32_t {
  // Capability bits used in the definition of cpusubtype.
  CPU_SUBTYPE_MASK = 0xff000000,  // Mask for architecture bits
  CPU_SUBTYPE_LIB64 = 0x80000000, // 64 bit libraries

  // Special CPU subtype constants.
  CPU_SUBTYPE_MULTIPLE = ~0u
};

// Constants for the cpusubtype field.
enum CPUSubTypeX86 {
  CPU_SUBTYPE_I386_ALL = 3,
  CPU_SUBTYPE_386 = 3,
  CPU_SUBTYPE_486 = 4,
  CPU_SUBTYPE_486SX = 0x84,
  CPU_SUBTYPE_586 = 5,
  CPU_SUBTYPE_PENT = CPU_SUBTYPE_586,
  CPU_SUBTYPE_PENTPRO = 0x16,
  CPU_SUBTYPE_PENTII_M3 = 0x36,
  CPU_SUBTYPE_PENTII_M5 = 0x56,
  CPU_SUBTYPE_CELERON = 0x67,
  CPU_SUBTYPE_CELERON_MOBILE = 0x77,
  CPU_SUBTYPE_PENTIUM_3 = 0x08,
  CPU_SUBTYPE_PENTIUM_3_M = 0x18,
  CPU_SUBTYPE_PENTIUM_3_XEON = 0x28,
  CPU_SUBTYPE_PENTIUM_M = 0x09,
  CPU_SUBTYPE_PENTIUM_4 = 0x0a,
  CPU_SUBTYPE_PENTIUM_4_M = 0x1a,
  CPU_SUBTYPE_ITANIUM = 0x0b,
  CPU_SUBTYPE_ITANIUM_2 = 0x1b,
  CPU_SUBTYPE_XEON = 0x0c,
  CPU_SUBTYPE_XEON_MP = 0x1c,

  CPU_SUBTYPE_X86_ALL = 3,
  CPU_SUBTYPE_X86_64_ALL = 3,
  CPU_SUBTYPE_X86_ARCH1 = 4,
  CPU_SUBTYPE_X86_64_H = 8
};
inline int CPU_SUBTYPE_INTEL(int Family, int Model) {
  return Family | (Model << 4);
}
inline int CPU_SUBTYPE_INTEL_FAMILY(CPUSubTypeX86 ST) {
  return ((int)ST) & 0x0f;
}
inline int CPU_SUBTYPE_INTEL_MODEL(CPUSubTypeX86 ST) { return ((int)ST) >> 4; }
enum { CPU_SUBTYPE_INTEL_FAMILY_MAX = 15, CPU_SUBTYPE_INTEL_MODEL_ALL = 0 };

enum CPUSubTypeARM {
  CPU_SUBTYPE_ARM_ALL = 0,
  CPU_SUBTYPE_ARM_V4T = 5,
  CPU_SUBTYPE_ARM_V6 = 6,
  CPU_SUBTYPE_ARM_V5 = 7,
  CPU_SUBTYPE_ARM_V5TEJ = 7,
  CPU_SUBTYPE_ARM_XSCALE = 8,
  CPU_SUBTYPE_ARM_V7 = 9,
  //  unused  ARM_V7F     = 10,
  CPU_SUBTYPE_ARM_V7S = 11,
  CPU_SUBTYPE_ARM_V7K = 12,
  CPU_SUBTYPE_ARM_V6M = 14,
  CPU_SUBTYPE_ARM_V7M = 15,
  CPU_SUBTYPE_ARM_V7EM = 16
};

enum CPUSubTypeARM64 {
  CPU_SUBTYPE_ARM64_ALL = 0,
  CPU_SUBTYPE_ARM64_V8 = 1,
  CPU_SUBTYPE_ARM64E = 2,
};

enum CPUSubTypeARM64_32 { CPU_SUBTYPE_ARM64_32_V8 = 1 };

enum CPUSubTypeSPARC { CPU_SUBTYPE_SPARC_ALL = 0 };

enum CPUSubTypePowerPC {
  CPU_SUBTYPE_POWERPC_ALL = 0,
  CPU_SUBTYPE_POWERPC_601 = 1,
  CPU_SUBTYPE_POWERPC_602 = 2,
  CPU_SUBTYPE_POWERPC_603 = 3,
  CPU_SUBTYPE_POWERPC_603e = 4,
  CPU_SUBTYPE_POWERPC_603ev = 5,
  CPU_SUBTYPE_POWERPC_604 = 6,
  CPU_SUBTYPE_POWERPC_604e = 7,
  CPU_SUBTYPE_POWERPC_620 = 8,
  CPU_SUBTYPE_POWERPC_750 = 9,
  CPU_SUBTYPE_POWERPC_7400 = 10,
  CPU_SUBTYPE_POWERPC_7450 = 11,
  CPU_SUBTYPE_POWERPC_970 = 100,

  CPU_SUBTYPE_MC980000_ALL = CPU_SUBTYPE_POWERPC_ALL,
  CPU_SUBTYPE_MC98601 = CPU_SUBTYPE_POWERPC_601
};

Expected<uint32_t> getCPUType(const Triple &T);
Expected<uint32_t> getCPUSubType(const Triple &T);

struct x86_thread_state32_t {
  uint32_t eax;
  uint32_t ebx;
  uint32_t ecx;
  uint32_t edx;
  uint32_t edi;
  uint32_t esi;
  uint32_t ebp;
  uint32_t esp;
  uint32_t ss;
  uint32_t eflags;
  uint32_t eip;
  uint32_t cs;
  uint32_t ds;
  uint32_t es;
  uint32_t fs;
  uint32_t gs;
};

struct x86_thread_state64_t {
  uint64_t rax;
  uint64_t rbx;
  uint64_t rcx;
  uint64_t rdx;
  uint64_t rdi;
  uint64_t rsi;
  uint64_t rbp;
  uint64_t rsp;
  uint64_t r8;
  uint64_t r9;
  uint64_t r10;
  uint64_t r11;
  uint64_t r12;
  uint64_t r13;
  uint64_t r14;
  uint64_t r15;
  uint64_t rip;
  uint64_t rflags;
  uint64_t cs;
  uint64_t fs;
  uint64_t gs;
};

enum x86_fp_control_precis {
  x86_FP_PREC_24B = 0,
  x86_FP_PREC_53B = 2,
  x86_FP_PREC_64B = 3
};

enum x86_fp_control_rc {
  x86_FP_RND_NEAR = 0,
  x86_FP_RND_DOWN = 1,
  x86_FP_RND_UP = 2,
  x86_FP_CHOP = 3
};

struct fp_control_t {
  unsigned short invalid : 1, denorm : 1, zdiv : 1, ovrfl : 1, undfl : 1,
      precis : 1, : 2, pc : 2, rc : 2, : 1, : 3;
};

struct fp_status_t {
  unsigned short invalid : 1, denorm : 1, zdiv : 1, ovrfl : 1, undfl : 1,
      precis : 1, stkflt : 1, errsumm : 1, c0 : 1, c1 : 1, c2 : 1, tos : 3,
      c3 : 1, busy : 1;
};

struct mmst_reg_t {
  char mmst_reg[10];
  char mmst_rsrv[6];
};

struct xmm_reg_t {
  char xmm_reg[16];
};

struct x86_float_state64_t {
  int32_t fpu_reserved[2];
  fp_control_t fpu_fcw;
  fp_status_t fpu_fsw;
  uint8_t fpu_ftw;
  uint8_t fpu_rsrv1;
  uint16_t fpu_fop;
  uint32_t fpu_ip;
  uint16_t fpu_cs;
  uint16_t fpu_rsrv2;
  uint32_t fpu_dp;
  uint16_t fpu_ds;
  uint16_t fpu_rsrv3;
  uint32_t fpu_mxcsr;
  uint32_t fpu_mxcsrmask;
  mmst_reg_t fpu_stmm0;
  mmst_reg_t fpu_stmm1;
  mmst_reg_t fpu_stmm2;
  mmst_reg_t fpu_stmm3;
  mmst_reg_t fpu_stmm4;
  mmst_reg_t fpu_stmm5;
  mmst_reg_t fpu_stmm6;
  mmst_reg_t fpu_stmm7;
  xmm_reg_t fpu_xmm0;
  xmm_reg_t fpu_xmm1;
  xmm_reg_t fpu_xmm2;
  xmm_reg_t fpu_xmm3;
  xmm_reg_t fpu_xmm4;
  xmm_reg_t fpu_xmm5;
  xmm_reg_t fpu_xmm6;
  xmm_reg_t fpu_xmm7;
  xmm_reg_t fpu_xmm8;
  xmm_reg_t fpu_xmm9;
  xmm_reg_t fpu_xmm10;
  xmm_reg_t fpu_xmm11;
  xmm_reg_t fpu_xmm12;
  xmm_reg_t fpu_xmm13;
  xmm_reg_t fpu_xmm14;
  xmm_reg_t fpu_xmm15;
  char fpu_rsrv4[6 * 16];
  uint32_t fpu_reserved1;
};

struct x86_exception_state64_t {
  uint16_t trapno;
  uint16_t cpu;
  uint32_t err;
  uint64_t faultvaddr;
};

inline void swapStruct(x86_thread_state32_t &x) {
  sys::swapByteOrder(x.eax);
  sys::swapByteOrder(x.ebx);
  sys::swapByteOrder(x.ecx);
  sys::swapByteOrder(x.edx);
  sys::swapByteOrder(x.edi);
  sys::swapByteOrder(x.esi);
  sys::swapByteOrder(x.ebp);
  sys::swapByteOrder(x.esp);
  sys::swapByteOrder(x.ss);
  sys::swapByteOrder(x.eflags);
  sys::swapByteOrder(x.eip);
  sys::swapByteOrder(x.cs);
  sys::swapByteOrder(x.ds);
  sys::swapByteOrder(x.es);
  sys::swapByteOrder(x.fs);
  sys::swapByteOrder(x.gs);
}

inline void swapStruct(x86_thread_state64_t &x) {
  sys::swapByteOrder(x.rax);
  sys::swapByteOrder(x.rbx);
  sys::swapByteOrder(x.rcx);
  sys::swapByteOrder(x.rdx);
  sys::swapByteOrder(x.rdi);
  sys::swapByteOrder(x.rsi);
  sys::swapByteOrder(x.rbp);
  sys::swapByteOrder(x.rsp);
  sys::swapByteOrder(x.r8);
  sys::swapByteOrder(x.r9);
  sys::swapByteOrder(x.r10);
  sys::swapByteOrder(x.r11);
  sys::swapByteOrder(x.r12);
  sys::swapByteOrder(x.r13);
  sys::swapByteOrder(x.r14);
  sys::swapByteOrder(x.r15);
  sys::swapByteOrder(x.rip);
  sys::swapByteOrder(x.rflags);
  sys::swapByteOrder(x.cs);
  sys::swapByteOrder(x.fs);
  sys::swapByteOrder(x.gs);
}

inline void swapStruct(x86_float_state64_t &x) {
  sys::swapByteOrder(x.fpu_reserved[0]);
  sys::swapByteOrder(x.fpu_reserved[1]);
  // TODO swap: fp_control_t fpu_fcw;
  // TODO swap: fp_status_t fpu_fsw;
  sys::swapByteOrder(x.fpu_fop);
  sys::swapByteOrder(x.fpu_ip);
  sys::swapByteOrder(x.fpu_cs);
  sys::swapByteOrder(x.fpu_rsrv2);
  sys::swapByteOrder(x.fpu_dp);
  sys::swapByteOrder(x.fpu_ds);
  sys::swapByteOrder(x.fpu_rsrv3);
  sys::swapByteOrder(x.fpu_mxcsr);
  sys::swapByteOrder(x.fpu_mxcsrmask);
  sys::swapByteOrder(x.fpu_reserved1);
}

inline void swapStruct(x86_exception_state64_t &x) {
  sys::swapByteOrder(x.trapno);
  sys::swapByteOrder(x.cpu);
  sys::swapByteOrder(x.err);
  sys::swapByteOrder(x.faultvaddr);
}

struct x86_state_hdr_t {
  uint32_t flavor;
  uint32_t count;
};

struct x86_thread_state_t {
  x86_state_hdr_t tsh;
  union {
    x86_thread_state64_t ts64;
    x86_thread_state32_t ts32;
  } uts;
};

struct x86_float_state_t {
  x86_state_hdr_t fsh;
  union {
    x86_float_state64_t fs64;
  } ufs;
};

struct x86_exception_state_t {
  x86_state_hdr_t esh;
  union {
    x86_exception_state64_t es64;
  } ues;
};

inline void swapStruct(x86_state_hdr_t &x) {
  sys::swapByteOrder(x.flavor);
  sys::swapByteOrder(x.count);
}

enum X86ThreadFlavors {
  x86_THREAD_STATE32 = 1,
  x86_FLOAT_STATE32 = 2,
  x86_EXCEPTION_STATE32 = 3,
  x86_THREAD_STATE64 = 4,
  x86_FLOAT_STATE64 = 5,
  x86_EXCEPTION_STATE64 = 6,
  x86_THREAD_STATE = 7,
  x86_FLOAT_STATE = 8,
  x86_EXCEPTION_STATE = 9,
  x86_DEBUG_STATE32 = 10,
  x86_DEBUG_STATE64 = 11,
  x86_DEBUG_STATE = 12
};

inline void swapStruct(x86_thread_state_t &x) {
  swapStruct(x.tsh);
  if (x.tsh.flavor == x86_THREAD_STATE64)
    swapStruct(x.uts.ts64);
}

inline void swapStruct(x86_float_state_t &x) {
  swapStruct(x.fsh);
  if (x.fsh.flavor == x86_FLOAT_STATE64)
    swapStruct(x.ufs.fs64);
}

inline void swapStruct(x86_exception_state_t &x) {
  swapStruct(x.esh);
  if (x.esh.flavor == x86_EXCEPTION_STATE64)
    swapStruct(x.ues.es64);
}

const uint32_t x86_THREAD_STATE32_COUNT =
    sizeof(x86_thread_state32_t) / sizeof(uint32_t);

const uint32_t x86_THREAD_STATE64_COUNT =
    sizeof(x86_thread_state64_t) / sizeof(uint32_t);
const uint32_t x86_FLOAT_STATE64_COUNT =
    sizeof(x86_float_state64_t) / sizeof(uint32_t);
const uint32_t x86_EXCEPTION_STATE64_COUNT =
    sizeof(x86_exception_state64_t) / sizeof(uint32_t);

const uint32_t x86_THREAD_STATE_COUNT =
    sizeof(x86_thread_state_t) / sizeof(uint32_t);
const uint32_t x86_FLOAT_STATE_COUNT =
    sizeof(x86_float_state_t) / sizeof(uint32_t);
const uint32_t x86_EXCEPTION_STATE_COUNT =
    sizeof(x86_exception_state_t) / sizeof(uint32_t);

struct arm_thread_state32_t {
  uint32_t r[13];
  uint32_t sp;
  uint32_t lr;
  uint32_t pc;
  uint32_t cpsr;
};

inline void swapStruct(arm_thread_state32_t &x) {
  for (int i = 0; i < 13; i++)
    sys::swapByteOrder(x.r[i]);
  sys::swapByteOrder(x.sp);
  sys::swapByteOrder(x.lr);
  sys::swapByteOrder(x.pc);
  sys::swapByteOrder(x.cpsr);
}

struct arm_thread_state64_t {
  uint64_t x[29];
  uint64_t fp;
  uint64_t lr;
  uint64_t sp;
  uint64_t pc;
  uint32_t cpsr;
  uint32_t pad;
};

inline void swapStruct(arm_thread_state64_t &x) {
  for (int i = 0; i < 29; i++)
    sys::swapByteOrder(x.x[i]);
  sys::swapByteOrder(x.fp);
  sys::swapByteOrder(x.lr);
  sys::swapByteOrder(x.sp);
  sys::swapByteOrder(x.pc);
  sys::swapByteOrder(x.cpsr);
}

struct arm_state_hdr_t {
  uint32_t flavor;
  uint32_t count;
};

struct arm_thread_state_t {
  arm_state_hdr_t tsh;
  union {
    arm_thread_state32_t ts32;
  } uts;
};

inline void swapStruct(arm_state_hdr_t &x) {
  sys::swapByteOrder(x.flavor);
  sys::swapByteOrder(x.count);
}

enum ARMThreadFlavors {
  ARM_THREAD_STATE = 1,
  ARM_VFP_STATE = 2,
  ARM_EXCEPTION_STATE = 3,
  ARM_DEBUG_STATE = 4,
  ARN_THREAD_STATE_NONE = 5,
  ARM_THREAD_STATE64 = 6,
  ARM_EXCEPTION_STATE64 = 7
};

inline void swapStruct(arm_thread_state_t &x) {
  swapStruct(x.tsh);
  if (x.tsh.flavor == ARM_THREAD_STATE)
    swapStruct(x.uts.ts32);
}

const uint32_t ARM_THREAD_STATE_COUNT =
    sizeof(arm_thread_state32_t) / sizeof(uint32_t);

const uint32_t ARM_THREAD_STATE64_COUNT =
    sizeof(arm_thread_state64_t) / sizeof(uint32_t);

struct ppc_thread_state32_t {
  uint32_t srr0;
  uint32_t srr1;
  uint32_t r0;
  uint32_t r1;
  uint32_t r2;
  uint32_t r3;
  uint32_t r4;
  uint32_t r5;
  uint32_t r6;
  uint32_t r7;
  uint32_t r8;
  uint32_t r9;
  uint32_t r10;
  uint32_t r11;
  uint32_t r12;
  uint32_t r13;
  uint32_t r14;
  uint32_t r15;
  uint32_t r16;
  uint32_t r17;
  uint32_t r18;
  uint32_t r19;
  uint32_t r20;
  uint32_t r21;
  uint32_t r22;
  uint32_t r23;
  uint32_t r24;
  uint32_t r25;
  uint32_t r26;
  uint32_t r27;
  uint32_t r28;
  uint32_t r29;
  uint32_t r30;
  uint32_t r31;
  uint32_t ct;
  uint32_t xer;
  uint32_t lr;
  uint32_t ctr;
  uint32_t mq;
  uint32_t vrsave;
};

inline void swapStruct(ppc_thread_state32_t &x) {
  sys::swapByteOrder(x.srr0);
  sys::swapByteOrder(x.srr1);
  sys::swapByteOrder(x.r0);
  sys::swapByteOrder(x.r1);
  sys::swapByteOrder(x.r2);
  sys::swapByteOrder(x.r3);
  sys::swapByteOrder(x.r4);
  sys::swapByteOrder(x.r5);
  sys::swapByteOrder(x.r6);
  sys::swapByteOrder(x.r7);
  sys::swapByteOrder(x.r8);
  sys::swapByteOrder(x.r9);
  sys::swapByteOrder(x.r10);
  sys::swapByteOrder(x.r11);
  sys::swapByteOrder(x.r12);
  sys::swapByteOrder(x.r13);
  sys::swapByteOrder(x.r14);
  sys::swapByteOrder(x.r15);
  sys::swapByteOrder(x.r16);
  sys::swapByteOrder(x.r17);
  sys::swapByteOrder(x.r18);
  sys::swapByteOrder(x.r19);
  sys::swapByteOrder(x.r20);
  sys::swapByteOrder(x.r21);
  sys::swapByteOrder(x.r22);
  sys::swapByteOrder(x.r23);
  sys::swapByteOrder(x.r24);
  sys::swapByteOrder(x.r25);
  sys::swapByteOrder(x.r26);
  sys::swapByteOrder(x.r27);
  sys::swapByteOrder(x.r28);
  sys::swapByteOrder(x.r29);
  sys::swapByteOrder(x.r30);
  sys::swapByteOrder(x.r31);
  sys::swapByteOrder(x.ct);
  sys::swapByteOrder(x.xer);
  sys::swapByteOrder(x.lr);
  sys::swapByteOrder(x.ctr);
  sys::swapByteOrder(x.mq);
  sys::swapByteOrder(x.vrsave);
}

struct ppc_state_hdr_t {
  uint32_t flavor;
  uint32_t count;
};

struct ppc_thread_state_t {
  ppc_state_hdr_t tsh;
  union {
    ppc_thread_state32_t ts32;
  } uts;
};

inline void swapStruct(ppc_state_hdr_t &x) {
  sys::swapByteOrder(x.flavor);
  sys::swapByteOrder(x.count);
}

enum PPCThreadFlavors {
  PPC_THREAD_STATE = 1,
  PPC_FLOAT_STATE = 2,
  PPC_EXCEPTION_STATE = 3,
  PPC_VECTOR_STATE = 4,
  PPC_THREAD_STATE64 = 5,
  PPC_EXCEPTION_STATE64 = 6,
  PPC_THREAD_STATE_NONE = 7
};

inline void swapStruct(ppc_thread_state_t &x) {
  swapStruct(x.tsh);
  if (x.tsh.flavor == PPC_THREAD_STATE)
    swapStruct(x.uts.ts32);
}

const uint32_t PPC_THREAD_STATE_COUNT =
    sizeof(ppc_thread_state32_t) / sizeof(uint32_t);

// Define a union of all load command structs
#define LOAD_COMMAND_STRUCT(LCStruct) LCStruct LCStruct##_data;

LLVM_PACKED_START
union alignas(4) macho_load_command {
#include "llvm/BinaryFormat/MachO.def"
};
LLVM_PACKED_END

inline void swapStruct(dyld_chained_fixups_header &C) {
  sys::swapByteOrder(C.fixups_version);
  sys::swapByteOrder(C.starts_offset);
  sys::swapByteOrder(C.imports_offset);
  sys::swapByteOrder(C.symbols_offset);
  sys::swapByteOrder(C.imports_count);
  sys::swapByteOrder(C.imports_format);
  sys::swapByteOrder(C.symbols_format);
}

inline void swapStruct(dyld_chained_starts_in_image &C) {
  sys::swapByteOrder(C.seg_count);
  // getStructOrErr() cannot copy the variable-length seg_info_offset array.
  // Its elements must be byte swapped manually.
}

inline void swapStruct(dyld_chained_starts_in_segment &C) {
  sys::swapByteOrder(C.size);
  sys::swapByteOrder(C.page_size);
  sys::swapByteOrder(C.pointer_format);
  sys::swapByteOrder(C.segment_offset);
  sys::swapByteOrder(C.max_valid_pointer);
  sys::swapByteOrder(C.page_count);
  // seg_info_offset entries must be byte swapped manually.
}

/* code signing attributes of a process */

enum CodeSignAttrs {
  CS_VALID = 0x00000001,          /* dynamically valid */
  CS_ADHOC = 0x00000002,          /* ad hoc signed */
  CS_GET_TASK_ALLOW = 0x00000004, /* has get-task-allow entitlement */
  CS_INSTALLER = 0x00000008,      /* has installer entitlement */

  CS_FORCED_LV =
      0x00000010, /* Library Validation required by Hardened System Policy */
  CS_INVALID_ALLOWED = 0x00000020, /* (macOS Only) Page invalidation allowed by
                                      task port policy */

  CS_HARD = 0x00000100,             /* don't load invalid pages */
  CS_KILL = 0x00000200,             /* kill process if it becomes invalid */
  CS_CHECK_EXPIRATION = 0x00000400, /* force expiration checking */
  CS_RESTRICT = 0x00000800,         /* tell dyld to treat restricted */

  CS_ENFORCEMENT = 0x00001000, /* require enforcement */
  CS_REQUIRE_LV = 0x00002000,  /* require library validation */
  CS_ENTITLEMENTS_VALIDATED =
      0x00004000, /* code signature permits restricted entitlements */
  CS_NVRAM_UNRESTRICTED =
      0x00008000, /* has com.apple.rootless.restricted-nvram-variables.heritable
                     entitlement */

  CS_RUNTIME = 0x00010000,       /* Apply hardened runtime policies */
  CS_LINKER_SIGNED = 0x00020000, /* Automatically signed by the linker */

  CS_ALLOWED_MACHO =
      (CS_ADHOC | CS_HARD | CS_KILL | CS_CHECK_EXPIRATION | CS_RESTRICT |
       CS_ENFORCEMENT | CS_REQUIRE_LV | CS_RUNTIME | CS_LINKER_SIGNED),

  CS_EXEC_SET_HARD = 0x00100000, /* set CS_HARD on any exec'ed process */
  CS_EXEC_SET_KILL = 0x00200000, /* set CS_KILL on any exec'ed process */
  CS_EXEC_SET_ENFORCEMENT =
      0x00400000, /* set CS_ENFORCEMENT on any exec'ed process */
  CS_EXEC_INHERIT_SIP =
      0x00800000, /* set CS_INSTALLER on any exec'ed process */

  CS_KILLED = 0x01000000, /* was killed by kernel for invalidity */
  CS_DYLD_PLATFORM =
      0x02000000, /* dyld used to load this is a platform binary */
  CS_PLATFORM_BINARY = 0x04000000, /* this is a platform binary */
  CS_PLATFORM_PATH =
      0x08000000, /* platform binary by the fact of path (osx only) */

  CS_DEBUGGED = 0x10000000, /* process is currently or has previously been
                debugged and allowed to run with invalid pages */
  CS_SIGNED = 0x20000000, /* process has a signature (may have gone invalid) */
  CS_DEV_CODE =
      0x40000000, /* code is dev signed, cannot be loaded into prod signed code
                     (will go away with rdar://problem/28322552) */
  CS_DATAVAULT_CONTROLLER =
      0x80000000, /* has Data Vault controller entitlement */

  CS_ENTITLEMENT_FLAGS = (CS_GET_TASK_ALLOW | CS_INSTALLER |
                          CS_DATAVAULT_CONTROLLER | CS_NVRAM_UNRESTRICTED),
};

/* executable segment flags */

enum CodeSignExecSegFlags {

  CS_EXECSEG_MAIN_BINARY = 0x1,     /* executable segment denotes main binary */
  CS_EXECSEG_ALLOW_UNSIGNED = 0x10, /* allow unsigned pages (for debugging) */
  CS_EXECSEG_DEBUGGER = 0x20,       /* main binary is debugger */
  CS_EXECSEG_JIT = 0x40,            /* JIT enabled */
  CS_EXECSEG_SKIP_LV = 0x80,        /* OBSOLETE: skip library validation */
  CS_EXECSEG_CAN_LOAD_CDHASH = 0x100, /* can bless cdhash for execution */
  CS_EXECSEG_CAN_EXEC_CDHASH = 0x200, /* can execute blessed cdhash */

};

/* Magic numbers used by Code Signing */

enum CodeSignMagic {
  CSMAGIC_REQUIREMENT = 0xfade0c00, /* single Requirement blob */
  CSMAGIC_REQUIREMENTS =
      0xfade0c01, /* Requirements vector (internal requirements) */
  CSMAGIC_CODEDIRECTORY = 0xfade0c02,      /* CodeDirectory blob */
  CSMAGIC_EMBEDDED_SIGNATURE = 0xfade0cc0, /* embedded form of signature data */
  CSMAGIC_EMBEDDED_SIGNATURE_OLD = 0xfade0b02, /* XXX */
  CSMAGIC_EMBEDDED_ENTITLEMENTS = 0xfade7171,  /* embedded entitlements */
  CSMAGIC_DETACHED_SIGNATURE =
      0xfade0cc1, /* multi-arch collection of embedded signatures */
  CSMAGIC_BLOBWRAPPER = 0xfade0b01, /* CMS Signature, among other things */

  CS_SUPPORTSSCATTER = 0x20100,
  CS_SUPPORTSTEAMID = 0x20200,
  CS_SUPPORTSCODELIMIT64 = 0x20300,
  CS_SUPPORTSEXECSEG = 0x20400,
  CS_SUPPORTSRUNTIME = 0x20500,
  CS_SUPPORTSLINKAGE = 0x20600,

  CSSLOT_CODEDIRECTORY = 0, /* slot index for CodeDirectory */
  CSSLOT_INFOSLOT = 1,
  CSSLOT_REQUIREMENTS = 2,
  CSSLOT_RESOURCEDIR = 3,
  CSSLOT_APPLICATION = 4,
  CSSLOT_ENTITLEMENTS = 5,

  CSSLOT_ALTERNATE_CODEDIRECTORIES =
      0x1000, /* first alternate CodeDirectory, if any */
  CSSLOT_ALTERNATE_CODEDIRECTORY_MAX = 5, /* max number of alternate CD slots */
  CSSLOT_ALTERNATE_CODEDIRECTORY_LIMIT =
      CSSLOT_ALTERNATE_CODEDIRECTORIES +
      CSSLOT_ALTERNATE_CODEDIRECTORY_MAX, /* one past the last */

  CSSLOT_SIGNATURESLOT = 0x10000, /* CMS Signature */
  CSSLOT_IDENTIFICATIONSLOT = 0x10001,
  CSSLOT_TICKETSLOT = 0x10002,

  CSTYPE_INDEX_REQUIREMENTS = 0x00000002, /* compat with amfi */
  CSTYPE_INDEX_ENTITLEMENTS = 0x00000005, /* compat with amfi */

  CS_HASHTYPE_SHA1 = 1,
  CS_HASHTYPE_SHA256 = 2,
  CS_HASHTYPE_SHA256_TRUNCATED = 3,
  CS_HASHTYPE_SHA384 = 4,

  CS_SHA1_LEN = 20,
  CS_SHA256_LEN = 32,
  CS_SHA256_TRUNCATED_LEN = 20,

  CS_CDHASH_LEN = 20,    /* always - larger hashes are truncated */
  CS_HASH_MAX_SIZE = 48, /* max size of the hash we'll support */

  /*
   * Currently only to support Legacy VPN plugins, and Mac App Store
   * but intended to replace all the various platform code, dev code etc. bits.
   */
  CS_SIGNER_TYPE_UNKNOWN = 0,
  CS_SIGNER_TYPE_LEGACYVPN = 5,
  CS_SIGNER_TYPE_MAC_APP_STORE = 6,

  CS_SUPPL_SIGNER_TYPE_UNKNOWN = 0,
  CS_SUPPL_SIGNER_TYPE_TRUSTCACHE = 7,
  CS_SUPPL_SIGNER_TYPE_LOCAL = 8,
};

struct CS_CodeDirectory {
  uint32_t magic;         /* magic number (CSMAGIC_CODEDIRECTORY) */
  uint32_t length;        /* total length of CodeDirectory blob */
  uint32_t version;       /* compatibility version */
  uint32_t flags;         /* setup and mode flags */
  uint32_t hashOffset;    /* offset of hash slot element at index zero */
  uint32_t identOffset;   /* offset of identifier string */
  uint32_t nSpecialSlots; /* number of special hash slots */
  uint32_t nCodeSlots;    /* number of ordinary (code) hash slots */
  uint32_t codeLimit;     /* limit to main image signature range */
  uint8_t hashSize;       /* size of each hash in bytes */
  uint8_t hashType;       /* type of hash (cdHashType* constants) */
  uint8_t platform;       /* platform identifier; zero if not platform binary */
  uint8_t pageSize;       /* log2(page size in bytes); 0 => infinite */
  uint32_t spare2;        /* unused (must be zero) */

  /* Version 0x20100 */
  uint32_t scatterOffset; /* offset of optional scatter vector */

  /* Version 0x20200 */
  uint32_t teamOffset; /* offset of optional team identifier */

  /* Version 0x20300 */
  uint32_t spare3;      /* unused (must be zero) */
  uint64_t codeLimit64; /* limit to main image signature range, 64 bits */

  /* Version 0x20400 */
  uint64_t execSegBase;  /* offset of executable segment */
  uint64_t execSegLimit; /* limit of executable segment */
  uint64_t execSegFlags; /* executable segment flags */
};

static_assert(sizeof(CS_CodeDirectory) == 88);

struct CS_BlobIndex {
  uint32_t type;   /* type of entry */
  uint32_t offset; /* offset of entry */
};

struct CS_SuperBlob {
  uint32_t magic;  /* magic number */
  uint32_t length; /* total length of SuperBlob */
  uint32_t count;  /* number of index entries following */
  /* followed by Blobs in no particular order as indicated by index offsets */
};

enum SecCSDigestAlgorithm {
  kSecCodeSignatureNoHash = 0,     /* null value */
  kSecCodeSignatureHashSHA1 = 1,   /* SHA-1 */
  kSecCodeSignatureHashSHA256 = 2, /* SHA-256 */
  kSecCodeSignatureHashSHA256Truncated =
      3,                           /* SHA-256 truncated to first 20 bytes */
  kSecCodeSignatureHashSHA384 = 4, /* SHA-384 */
  kSecCodeSignatureHashSHA512 = 5, /* SHA-512 */
};

enum LinkerOptimizationHintKind {
  LOH_ARM64_ADRP_ADRP = 1,
  LOH_ARM64_ADRP_LDR = 2,
  LOH_ARM64_ADRP_ADD_LDR = 3,
  LOH_ARM64_ADRP_LDR_GOT_LDR = 4,
  LOH_ARM64_ADRP_ADD_STR = 5,
  LOH_ARM64_ADRP_LDR_GOT_STR = 6,
  LOH_ARM64_ADRP_ADD = 7,
  LOH_ARM64_ADRP_LDR_GOT = 8,
};

} // end namespace MachO
} // end namespace llvm

#endif
PKhwFZ�s���BinaryFormat/MsgPack.defnu�[���//===- MsgPack.def - MessagePack definitions --------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// Macros for running through MessagePack enumerators.
///
//===----------------------------------------------------------------------===//

#if !(                                                                         \
    defined HANDLE_MP_FIRST_BYTE || defined HANDLE_MP_FIX_BITS ||              \
    defined HANDLE_MP_FIX_BITS_MASK || defined HANDLE_MP_FIX_MAX ||            \
    defined HANDLE_MP_FIX_LEN || defined HANDLE_MP_FIX_MIN)
#error "Missing macro definition of HANDLE_MP*"
#endif

#ifndef HANDLE_MP_FIRST_BYTE
#define HANDLE_MP_FIRST_BYTE(ID, NAME)
#endif

#ifndef HANDLE_MP_FIX_BITS
#define HANDLE_MP_FIX_BITS(ID, NAME)
#endif

#ifndef HANDLE_MP_FIX_BITS_MASK
#define HANDLE_MP_FIX_BITS_MASK(ID, NAME)
#endif

#ifndef HANDLE_MP_FIX_MAX
#define HANDLE_MP_FIX_MAX(ID, NAME)
#endif

#ifndef HANDLE_MP_FIX_LEN
#define HANDLE_MP_FIX_LEN(ID, NAME)
#endif

#ifndef HANDLE_MP_FIX_MIN
#define HANDLE_MP_FIX_MIN(ID, NAME)
#endif

HANDLE_MP_FIRST_BYTE(0xc0, Nil)
HANDLE_MP_FIRST_BYTE(0xc2, False)
HANDLE_MP_FIRST_BYTE(0xc3, True)
HANDLE_MP_FIRST_BYTE(0xc4, Bin8)
HANDLE_MP_FIRST_BYTE(0xc5, Bin16)
HANDLE_MP_FIRST_BYTE(0xc6, Bin32)
HANDLE_MP_FIRST_BYTE(0xc7, Ext8)
HANDLE_MP_FIRST_BYTE(0xc8, Ext16)
HANDLE_MP_FIRST_BYTE(0xc9, Ext32)
HANDLE_MP_FIRST_BYTE(0xca, Float32)
HANDLE_MP_FIRST_BYTE(0xcb, Float64)
HANDLE_MP_FIRST_BYTE(0xcc, UInt8)
HANDLE_MP_FIRST_BYTE(0xcd, UInt16)
HANDLE_MP_FIRST_BYTE(0xce, UInt32)
HANDLE_MP_FIRST_BYTE(0xcf, UInt64)
HANDLE_MP_FIRST_BYTE(0xd0, Int8)
HANDLE_MP_FIRST_BYTE(0xd1, Int16)
HANDLE_MP_FIRST_BYTE(0xd2, Int32)
HANDLE_MP_FIRST_BYTE(0xd3, Int64)
HANDLE_MP_FIRST_BYTE(0xd4, FixExt1)
HANDLE_MP_FIRST_BYTE(0xd5, FixExt2)
HANDLE_MP_FIRST_BYTE(0xd6, FixExt4)
HANDLE_MP_FIRST_BYTE(0xd7, FixExt8)
HANDLE_MP_FIRST_BYTE(0xd8, FixExt16)
HANDLE_MP_FIRST_BYTE(0xd9, Str8)
HANDLE_MP_FIRST_BYTE(0xda, Str16)
HANDLE_MP_FIRST_BYTE(0xdb, Str32)
HANDLE_MP_FIRST_BYTE(0xdc, Array16)
HANDLE_MP_FIRST_BYTE(0xdd, Array32)
HANDLE_MP_FIRST_BYTE(0xde, Map16)
HANDLE_MP_FIRST_BYTE(0xdf, Map32)

HANDLE_MP_FIX_BITS(0x00, PositiveInt)
HANDLE_MP_FIX_BITS(0x80, Map)
HANDLE_MP_FIX_BITS(0x90, Array)
HANDLE_MP_FIX_BITS(0xa0, String)
HANDLE_MP_FIX_BITS(0xe0, NegativeInt)

HANDLE_MP_FIX_BITS_MASK(0x80, PositiveInt)
HANDLE_MP_FIX_BITS_MASK(0xf0, Map)
HANDLE_MP_FIX_BITS_MASK(0xf0, Array)
HANDLE_MP_FIX_BITS_MASK(0xe0, String)
HANDLE_MP_FIX_BITS_MASK(0xe0, NegativeInt)

HANDLE_MP_FIX_MAX(0x7f, PositiveInt)
HANDLE_MP_FIX_MAX(0x0f, Map)
HANDLE_MP_FIX_MAX(0x0f, Array)
HANDLE_MP_FIX_MAX(0x1f, String)

HANDLE_MP_FIX_LEN(0x01, Ext1)
HANDLE_MP_FIX_LEN(0x02, Ext2)
HANDLE_MP_FIX_LEN(0x04, Ext4)
HANDLE_MP_FIX_LEN(0x08, Ext8)
HANDLE_MP_FIX_LEN(0x10, Ext16)

HANDLE_MP_FIX_MIN(-0x20, NegativeInt)

#undef HANDLE_MP_FIRST_BYTE
#undef HANDLE_MP_FIX_BITS
#undef HANDLE_MP_FIX_BITS_MASK
#undef HANDLE_MP_FIX_MAX
#undef HANDLE_MP_FIX_LEN
#undef HANDLE_MP_FIX_MIN
PKhwFZ�7�QQ"BinaryFormat/MinidumpConstants.defnu�[���//===- MinidumpConstants.def - Iteration over minidump constants-*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#if !(defined(HANDLE_MDMP_STREAM_TYPE) || defined(HANDLE_MDMP_ARCH) ||         \
      defined(HANDLE_MDMP_PLATFORM) || defined(HANDLE_MDMP_PROTECT) ||         \
      defined(HANDLE_MDMP_MEMSTATE) || defined(HANDLE_MDMP_MEMTYPE))
#error "Missing HANDLE_MDMP definition"
#endif

#ifndef HANDLE_MDMP_STREAM_TYPE
#define HANDLE_MDMP_STREAM_TYPE(CODE, NAME)
#endif

#ifndef HANDLE_MDMP_ARCH
#define HANDLE_MDMP_ARCH(CODE, NAME)
#endif

#ifndef HANDLE_MDMP_PLATFORM
#define HANDLE_MDMP_PLATFORM(CODE, NAME)
#endif

#ifndef HANDLE_MDMP_PROTECT
#define HANDLE_MDMP_PROTECT(CODE, NAME, NATIVENAME)
#endif

#ifndef HANDLE_MDMP_MEMSTATE
#define HANDLE_MDMP_MEMSTATE(CODE, NAME, NATIVENAME)
#endif

#ifndef HANDLE_MDMP_MEMTYPE
#define HANDLE_MDMP_MEMTYPE(CODE, NAME, NATIVENAME)
#endif

HANDLE_MDMP_STREAM_TYPE(0x0003, ThreadList)
HANDLE_MDMP_STREAM_TYPE(0x0004, ModuleList)
HANDLE_MDMP_STREAM_TYPE(0x0005, MemoryList)
HANDLE_MDMP_STREAM_TYPE(0x0006, Exception)
HANDLE_MDMP_STREAM_TYPE(0x0007, SystemInfo)
HANDLE_MDMP_STREAM_TYPE(0x0008, ThreadExList)
HANDLE_MDMP_STREAM_TYPE(0x0009, Memory64List)
HANDLE_MDMP_STREAM_TYPE(0x000a, CommentA)
HANDLE_MDMP_STREAM_TYPE(0x000b, CommentW)
HANDLE_MDMP_STREAM_TYPE(0x000c, HandleData)
HANDLE_MDMP_STREAM_TYPE(0x000d, FunctionTable)
HANDLE_MDMP_STREAM_TYPE(0x000e, UnloadedModuleList)
HANDLE_MDMP_STREAM_TYPE(0x000f, MiscInfo)
HANDLE_MDMP_STREAM_TYPE(0x0010, MemoryInfoList)
HANDLE_MDMP_STREAM_TYPE(0x0011, ThreadInfoList)
HANDLE_MDMP_STREAM_TYPE(0x0012, HandleOperationList)
HANDLE_MDMP_STREAM_TYPE(0x0013, Token)
HANDLE_MDMP_STREAM_TYPE(0x0014, JavascriptData)
HANDLE_MDMP_STREAM_TYPE(0x0015, SystemMemoryInfo)
HANDLE_MDMP_STREAM_TYPE(0x0016, ProcessVMCounters)
// Breakpad extension types.  0x4767 = "Gg"
HANDLE_MDMP_STREAM_TYPE(0x47670001, BreakpadInfo)
HANDLE_MDMP_STREAM_TYPE(0x47670002, AssertionInfo)
// These are additional minidump stream values which are specific to the linux
// breakpad implementation.
HANDLE_MDMP_STREAM_TYPE(0x47670003, LinuxCPUInfo)    // /proc/cpuinfo
HANDLE_MDMP_STREAM_TYPE(0x47670004, LinuxProcStatus) // /proc/$x/status
HANDLE_MDMP_STREAM_TYPE(0x47670005, LinuxLSBRelease) // /etc/lsb-release
HANDLE_MDMP_STREAM_TYPE(0x47670006, LinuxCMDLine)    // /proc/$x/cmdline
HANDLE_MDMP_STREAM_TYPE(0x47670007, LinuxEnviron)    // /proc/$x/environ
HANDLE_MDMP_STREAM_TYPE(0x47670008, LinuxAuxv)       // /proc/$x/auxv
HANDLE_MDMP_STREAM_TYPE(0x47670009, LinuxMaps)       // /proc/$x/maps
HANDLE_MDMP_STREAM_TYPE(0x4767000A, LinuxDSODebug)
HANDLE_MDMP_STREAM_TYPE(0x4767000B, LinuxProcStat)   // /proc/$x/stat
HANDLE_MDMP_STREAM_TYPE(0x4767000C, LinuxProcUptime) // uptime
HANDLE_MDMP_STREAM_TYPE(0x4767000D, LinuxProcFD)     // /proc/$x/fd
// Facebook-defined stream types
HANDLE_MDMP_STREAM_TYPE(0xFACE1CA7, FacebookLogcat)
HANDLE_MDMP_STREAM_TYPE(0xFACECAFA, FacebookAppCustomData)
HANDLE_MDMP_STREAM_TYPE(0xFACECAFB, FacebookBuildID)
HANDLE_MDMP_STREAM_TYPE(0xFACECAFC, FacebookAppVersionName)
HANDLE_MDMP_STREAM_TYPE(0xFACECAFD, FacebookJavaStack)
HANDLE_MDMP_STREAM_TYPE(0xFACECAFE, FacebookDalvikInfo)
HANDLE_MDMP_STREAM_TYPE(0xFACECAFF, FacebookUnwindSymbols)
HANDLE_MDMP_STREAM_TYPE(0xFACECB00, FacebookDumpErrorLog)
HANDLE_MDMP_STREAM_TYPE(0xFACECCCC, FacebookAppStateLog)
HANDLE_MDMP_STREAM_TYPE(0xFACEDEAD, FacebookAbortReason)
HANDLE_MDMP_STREAM_TYPE(0xFACEE000, FacebookThreadName)

HANDLE_MDMP_ARCH(0x0000, X86)       // PROCESSOR_ARCHITECTURE_INTEL
HANDLE_MDMP_ARCH(0x0001, MIPS)      // PROCESSOR_ARCHITECTURE_MIPS
HANDLE_MDMP_ARCH(0x0002, Alpha)     // PROCESSOR_ARCHITECTURE_ALPHA
HANDLE_MDMP_ARCH(0x0003, PPC)       // PROCESSOR_ARCHITECTURE_PPC
HANDLE_MDMP_ARCH(0x0004, SHX)       // PROCESSOR_ARCHITECTURE_SHX (Super-H)
HANDLE_MDMP_ARCH(0x0005, ARM)       // PROCESSOR_ARCHITECTURE_ARM
HANDLE_MDMP_ARCH(0x0006, IA64)      // PROCESSOR_ARCHITECTURE_IA64
HANDLE_MDMP_ARCH(0x0007, Alpha64)   // PROCESSOR_ARCHITECTURE_ALPHA64
HANDLE_MDMP_ARCH(0x0008, MSIL)      // PROCESSOR_ARCHITECTURE_MSIL
HANDLE_MDMP_ARCH(0x0009, AMD64)     // PROCESSOR_ARCHITECTURE_AMD64
HANDLE_MDMP_ARCH(0x000a, X86Win64)  // PROCESSOR_ARCHITECTURE_IA32_ON_WIN64
HANDLE_MDMP_ARCH(0x000c, ARM64)     // PROCESSOR_ARCHITECTURE_ARM64
HANDLE_MDMP_ARCH(0x8001, BP_SPARC)  // Breakpad-defined value for SPARC
HANDLE_MDMP_ARCH(0x8002, BP_PPC64)  // Breakpad-defined value for PPC64
HANDLE_MDMP_ARCH(0x8003, BP_ARM64)  // Breakpad-defined value for ARM64
HANDLE_MDMP_ARCH(0x8004, BP_MIPS64) // Breakpad-defined value for MIPS64

HANDLE_MDMP_PLATFORM(0x0000, Win32S) // Win32 on Windows 3.1
HANDLE_MDMP_PLATFORM(0x0001, Win32Windows) // Windows 95-98-Me
HANDLE_MDMP_PLATFORM(0x0002, Win32NT) // Windows NT, 2000+
HANDLE_MDMP_PLATFORM(0x0003, Win32CE) // Windows CE, Windows Mobile, "Handheld"
// Breakpad-defined values.
HANDLE_MDMP_PLATFORM(0x8000, Unix) // Generic Unix-ish
HANDLE_MDMP_PLATFORM(0x8101, MacOSX) // Mac OS X/Darwin
HANDLE_MDMP_PLATFORM(0x8102, IOS) // iOS
HANDLE_MDMP_PLATFORM(0x8201, Linux) // Linux
HANDLE_MDMP_PLATFORM(0x8202, Solaris) // Solaris
HANDLE_MDMP_PLATFORM(0x8203, Android) // Android
HANDLE_MDMP_PLATFORM(0x8204, PS3) // PS3
HANDLE_MDMP_PLATFORM(0x8205, NaCl) // Native Client (NaCl)
HANDLE_MDMP_PLATFORM(0x8206, OpenHOS) // OpenHarmony OS

HANDLE_MDMP_PROTECT(0x01, NoAccess, PAGE_NO_ACCESS)
HANDLE_MDMP_PROTECT(0x02, ReadOnly, PAGE_READ_ONLY)
HANDLE_MDMP_PROTECT(0x04, ReadWrite, PAGE_READ_WRITE)
HANDLE_MDMP_PROTECT(0x08, WriteCopy, PAGE_WRITE_COPY)
HANDLE_MDMP_PROTECT(0x10, Execute, PAGE_EXECUTE)
HANDLE_MDMP_PROTECT(0x20, ExecuteRead, PAGE_EXECUTE_READ)
HANDLE_MDMP_PROTECT(0x40, ExecuteReadWrite, PAGE_EXECUTE_READ_WRITE)
HANDLE_MDMP_PROTECT(0x80, ExeciteWriteCopy, PAGE_EXECUTE_WRITE_COPY)
HANDLE_MDMP_PROTECT(0x100, Guard, PAGE_GUARD)
HANDLE_MDMP_PROTECT(0x200, NoCache, PAGE_NOCACHE)
HANDLE_MDMP_PROTECT(0x400, WriteCombine, PAGE_WRITECOMBINE)
HANDLE_MDMP_PROTECT(0x40000000, TargetsInvalid, PAGE_TARGETS_INVALID)

HANDLE_MDMP_MEMSTATE(0x01000, Commit, MEM_COMMIT)
HANDLE_MDMP_MEMSTATE(0x02000, Reserve, MEM_RESERVE)
HANDLE_MDMP_MEMSTATE(0x10000, Free, MEM_FREE)

HANDLE_MDMP_MEMTYPE(0x0020000, Private, MEM_PRIVATE)
HANDLE_MDMP_MEMTYPE(0x0040000, Mapped, MEM_MAPPED)
HANDLE_MDMP_MEMTYPE(0x1000000, Image, MEM_IMAGE)

#undef HANDLE_MDMP_STREAM_TYPE
#undef HANDLE_MDMP_ARCH
#undef HANDLE_MDMP_PLATFORM
#undef HANDLE_MDMP_PROTECT
#undef HANDLE_MDMP_MEMSTATE
#undef HANDLE_MDMP_MEMTYPE
PKhwFZΦL�II"BinaryFormat/ELFRelocs/Hexagon.defnu�[���
#ifndef ELF_RELOC
#error "ELF_RELOC must be defined"
#endif

// Release 5 ABI
ELF_RELOC(R_HEX_NONE,                0)
ELF_RELOC(R_HEX_B22_PCREL,           1)
ELF_RELOC(R_HEX_B15_PCREL,           2)
ELF_RELOC(R_HEX_B7_PCREL,            3)
ELF_RELOC(R_HEX_LO16,                4)
ELF_RELOC(R_HEX_HI16,                5)
ELF_RELOC(R_HEX_32,                  6)
ELF_RELOC(R_HEX_16,                  7)
ELF_RELOC(R_HEX_8,                   8)
ELF_RELOC(R_HEX_GPREL16_0,           9)
ELF_RELOC(R_HEX_GPREL16_1,           10)
ELF_RELOC(R_HEX_GPREL16_2,           11)
ELF_RELOC(R_HEX_GPREL16_3,           12)
ELF_RELOC(R_HEX_HL16,                13)
ELF_RELOC(R_HEX_B13_PCREL,           14)
ELF_RELOC(R_HEX_B9_PCREL,            15)
ELF_RELOC(R_HEX_B32_PCREL_X,         16)
ELF_RELOC(R_HEX_32_6_X,              17)
ELF_RELOC(R_HEX_B22_PCREL_X,         18)
ELF_RELOC(R_HEX_B15_PCREL_X,         19)
ELF_RELOC(R_HEX_B13_PCREL_X,         20)
ELF_RELOC(R_HEX_B9_PCREL_X,          21)
ELF_RELOC(R_HEX_B7_PCREL_X,          22)
ELF_RELOC(R_HEX_16_X,                23)
ELF_RELOC(R_HEX_12_X,                24)
ELF_RELOC(R_HEX_11_X,                25)
ELF_RELOC(R_HEX_10_X,                26)
ELF_RELOC(R_HEX_9_X,                 27)
ELF_RELOC(R_HEX_8_X,                 28)
ELF_RELOC(R_HEX_7_X,                 29)
ELF_RELOC(R_HEX_6_X,                 30)
ELF_RELOC(R_HEX_32_PCREL,            31)
ELF_RELOC(R_HEX_COPY,                32)
ELF_RELOC(R_HEX_GLOB_DAT,            33)
ELF_RELOC(R_HEX_JMP_SLOT,            34)
ELF_RELOC(R_HEX_RELATIVE,            35)
ELF_RELOC(R_HEX_PLT_B22_PCREL,       36)
ELF_RELOC(R_HEX_GOTREL_LO16,         37)
ELF_RELOC(R_HEX_GOTREL_HI16,         38)
ELF_RELOC(R_HEX_GOTREL_32,           39)
ELF_RELOC(R_HEX_GOT_LO16,            40)
ELF_RELOC(R_HEX_GOT_HI16,            41)
ELF_RELOC(R_HEX_GOT_32,              42)
ELF_RELOC(R_HEX_GOT_16,              43)
ELF_RELOC(R_HEX_DTPMOD_32,           44)
ELF_RELOC(R_HEX_DTPREL_LO16,         45)
ELF_RELOC(R_HEX_DTPREL_HI16,         46)
ELF_RELOC(R_HEX_DTPREL_32,           47)
ELF_RELOC(R_HEX_DTPREL_16,           48)
ELF_RELOC(R_HEX_GD_PLT_B22_PCREL,    49)
ELF_RELOC(R_HEX_GD_GOT_LO16,         50)
ELF_RELOC(R_HEX_GD_GOT_HI16,         51)
ELF_RELOC(R_HEX_GD_GOT_32,           52)
ELF_RELOC(R_HEX_GD_GOT_16,           53)
ELF_RELOC(R_HEX_IE_LO16,             54)
ELF_RELOC(R_HEX_IE_HI16,             55)
ELF_RELOC(R_HEX_IE_32,               56)
ELF_RELOC(R_HEX_IE_GOT_LO16,         57)
ELF_RELOC(R_HEX_IE_GOT_HI16,         58)
ELF_RELOC(R_HEX_IE_GOT_32,           59)
ELF_RELOC(R_HEX_IE_GOT_16,           60)
ELF_RELOC(R_HEX_TPREL_LO16,          61)
ELF_RELOC(R_HEX_TPREL_HI16,          62)
ELF_RELOC(R_HEX_TPREL_32,            63)
ELF_RELOC(R_HEX_TPREL_16,            64)
ELF_RELOC(R_HEX_6_PCREL_X,           65)
ELF_RELOC(R_HEX_GOTREL_32_6_X,       66)
ELF_RELOC(R_HEX_GOTREL_16_X,         67)
ELF_RELOC(R_HEX_GOTREL_11_X,         68)
ELF_RELOC(R_HEX_GOT_32_6_X,          69)
ELF_RELOC(R_HEX_GOT_16_X,            70)
ELF_RELOC(R_HEX_GOT_11_X,            71)
ELF_RELOC(R_HEX_DTPREL_32_6_X,       72)
ELF_RELOC(R_HEX_DTPREL_16_X,         73)
ELF_RELOC(R_HEX_DTPREL_11_X,         74)
ELF_RELOC(R_HEX_GD_GOT_32_6_X,       75)
ELF_RELOC(R_HEX_GD_GOT_16_X,         76)
ELF_RELOC(R_HEX_GD_GOT_11_X,         77)
ELF_RELOC(R_HEX_IE_32_6_X,           78)
ELF_RELOC(R_HEX_IE_16_X,             79)
ELF_RELOC(R_HEX_IE_GOT_32_6_X,       80)
ELF_RELOC(R_HEX_IE_GOT_16_X,         81)
ELF_RELOC(R_HEX_IE_GOT_11_X,         82)
ELF_RELOC(R_HEX_TPREL_32_6_X,        83)
ELF_RELOC(R_HEX_TPREL_16_X,          84)
ELF_RELOC(R_HEX_TPREL_11_X,          85)
ELF_RELOC(R_HEX_LD_PLT_B22_PCREL,    86)
ELF_RELOC(R_HEX_LD_GOT_LO16,         87)
ELF_RELOC(R_HEX_LD_GOT_HI16,         88)
ELF_RELOC(R_HEX_LD_GOT_32,           89)
ELF_RELOC(R_HEX_LD_GOT_16,           90)
ELF_RELOC(R_HEX_LD_GOT_32_6_X,       91)
ELF_RELOC(R_HEX_LD_GOT_16_X,         92)
ELF_RELOC(R_HEX_LD_GOT_11_X,         93)
ELF_RELOC(R_HEX_23_REG,              94)
ELF_RELOC(R_HEX_GD_PLT_B22_PCREL_X,  95)
ELF_RELOC(R_HEX_GD_PLT_B32_PCREL_X,  96)
ELF_RELOC(R_HEX_LD_PLT_B22_PCREL_X,  97)
ELF_RELOC(R_HEX_LD_PLT_B32_PCREL_X,  98)
ELF_RELOC(R_HEX_27_REG,              99)
PKhwFZ*
�0	0	!BinaryFormat/ELFRelocs/Xtensa.defnu�[���#ifndef ELF_RELOC
#error "ELF_RELOC must be defined"
#endif

ELF_RELOC(R_XTENSA_NONE,              0)
ELF_RELOC(R_XTENSA_32,                1)
ELF_RELOC(R_XTENSA_RTLD,              2)
ELF_RELOC(R_XTENSA_GLOB_DAT,          3)
ELF_RELOC(R_XTENSA_JMP_SLOT,          4)
ELF_RELOC(R_XTENSA_RELATIVE,          5)
ELF_RELOC(R_XTENSA_PLT,               6)
// RELOC '7' currently is not used.
ELF_RELOC(R_XTENSA_OP0,               8)
ELF_RELOC(R_XTENSA_OP1,               9)
ELF_RELOC(R_XTENSA_OP2,              10)
ELF_RELOC(R_XTENSA_ASM_EXPAND,       11)
ELF_RELOC(R_XTENSA_ASM_SIMPLIFY,     12)
ELF_RELOC(R_XTENSA_32_PCREL,         14)
ELF_RELOC(R_XTENSA_GNU_VTINHERIT,    15)
ELF_RELOC(R_XTENSA_GNU_VTENTRY,      16)
ELF_RELOC(R_XTENSA_DIFF8,            17)
ELF_RELOC(R_XTENSA_DIFF16,           18)
ELF_RELOC(R_XTENSA_DIFF32,           19)
ELF_RELOC(R_XTENSA_SLOT0_OP,         20)
ELF_RELOC(R_XTENSA_SLOT1_OP,         21)
ELF_RELOC(R_XTENSA_SLOT2_OP,         22)
ELF_RELOC(R_XTENSA_SLOT3_OP,         23)
ELF_RELOC(R_XTENSA_SLOT4_OP,         24)
ELF_RELOC(R_XTENSA_SLOT5_OP,         25)
ELF_RELOC(R_XTENSA_SLOT6_OP,         26)
ELF_RELOC(R_XTENSA_SLOT7_OP,         27)
ELF_RELOC(R_XTENSA_SLOT8_OP,         28)
ELF_RELOC(R_XTENSA_SLOT9_OP,         29)
ELF_RELOC(R_XTENSA_SLOT10_OP,        30)
ELF_RELOC(R_XTENSA_SLOT11_OP,        31)
ELF_RELOC(R_XTENSA_SLOT12_OP,        32)
ELF_RELOC(R_XTENSA_SLOT13_OP,        33)
ELF_RELOC(R_XTENSA_SLOT14_OP,        34)
ELF_RELOC(R_XTENSA_SLOT0_ALT,        35)
ELF_RELOC(R_XTENSA_SLOT1_ALT,        36)
ELF_RELOC(R_XTENSA_SLOT2_ALT,        37)
ELF_RELOC(R_XTENSA_SLOT3_ALT,        38)
ELF_RELOC(R_XTENSA_SLOT4_ALT,        39)
ELF_RELOC(R_XTENSA_SLOT5_ALT,        40)
ELF_RELOC(R_XTENSA_SLOT6_ALT,        41)
ELF_RELOC(R_XTENSA_SLOT7_ALT,        42)
ELF_RELOC(R_XTENSA_SLOT8_ALT,        43)
ELF_RELOC(R_XTENSA_SLOT9_ALT,        44)
ELF_RELOC(R_XTENSA_SLOT10_ALT,       45)
ELF_RELOC(R_XTENSA_SLOT11_ALT,       46)
ELF_RELOC(R_XTENSA_SLOT12_ALT,       47)
ELF_RELOC(R_XTENSA_SLOT13_ALT,       48)
ELF_RELOC(R_XTENSA_SLOT14_ALT,       49)
ELF_RELOC(R_XTENSA_TLSDESC_FN,       50)
ELF_RELOC(R_XTENSA_TLSDESC_ARG,      51)
ELF_RELOC(R_XTENSA_TLS_DTPOFF,       52)
ELF_RELOC(R_XTENSA_TLS_TPOFF,        53)
ELF_RELOC(R_XTENSA_TLS_FUNC,         54)
ELF_RELOC(R_XTENSA_TLS_ARG,          55)
ELF_RELOC(R_XTENSA_TLS_CALL,         56)
PKhwFZ����
�
BinaryFormat/ELFRelocs/CSKY.defnu�[���
#ifndef ELF_RELOC
#error "ELF_RELOC must be defined"
#endif

ELF_RELOC(R_CKCORE_NONE,                        0)
ELF_RELOC(R_CKCORE_ADDR32,                      1)
ELF_RELOC(R_CKCORE_PCREL_IMM8_4,                2)
ELF_RELOC(R_CKCORE_PCREL_IMM11_2,               3)
ELF_RELOC(R_CKCORE_PCREL_IMM4_2,                4)
ELF_RELOC(R_CKCORE_PCREL32,                     5)
ELF_RELOC(R_CKCORE_PCREL_JSR_IMM11_2,           6)
ELF_RELOC(R_CKCORE_GNU_VTINHERIT,               7)
ELF_RELOC(R_CKCORE_GNU_VTENTRY,                 8)
ELF_RELOC(R_CKCORE_RELATIVE,                    9)
ELF_RELOC(R_CKCORE_COPY,                       10)
ELF_RELOC(R_CKCORE_GLOB_DAT,                   11)
ELF_RELOC(R_CKCORE_JUMP_SLOT,                  12)
ELF_RELOC(R_CKCORE_GOTOFF,                     13)
ELF_RELOC(R_CKCORE_GOTPC,                      14)
ELF_RELOC(R_CKCORE_GOT32,                      15)
ELF_RELOC(R_CKCORE_PLT32,                      16)
ELF_RELOC(R_CKCORE_ADDRGOT,                    17)
ELF_RELOC(R_CKCORE_ADDRPLT,                    18)
ELF_RELOC(R_CKCORE_PCREL_IMM26_2,              19)
ELF_RELOC(R_CKCORE_PCREL_IMM16_2,              20)
ELF_RELOC(R_CKCORE_PCREL_IMM16_4,              21)
ELF_RELOC(R_CKCORE_PCREL_IMM10_2,              22)
ELF_RELOC(R_CKCORE_PCREL_IMM10_4,              23)
ELF_RELOC(R_CKCORE_ADDR_HI16,                  24)
ELF_RELOC(R_CKCORE_ADDR_LO16,                  25)
ELF_RELOC(R_CKCORE_GOTPC_HI16,                 26)
ELF_RELOC(R_CKCORE_GOTPC_LO16,                 27)
ELF_RELOC(R_CKCORE_GOTOFF_HI16,                28)
ELF_RELOC(R_CKCORE_GOTOFF_LO16,                29)
ELF_RELOC(R_CKCORE_GOT12,                      30)
ELF_RELOC(R_CKCORE_GOT_HI16,                   31)
ELF_RELOC(R_CKCORE_GOT_LO16,                   32)
ELF_RELOC(R_CKCORE_PLT12,                      33)
ELF_RELOC(R_CKCORE_PLT_HI16,                   34)
ELF_RELOC(R_CKCORE_PLT_LO16,                   35)
ELF_RELOC(R_CKCORE_ADDRGOT_HI16,               36)
ELF_RELOC(R_CKCORE_ADDRGOT_LO16,               37)
ELF_RELOC(R_CKCORE_ADDRPLT_HI16,               38)
ELF_RELOC(R_CKCORE_ADDRPLT_LO16,               39)
ELF_RELOC(R_CKCORE_PCREL_JSR_IMM26_2,          40)
ELF_RELOC(R_CKCORE_TOFFSET_LO16,               41)
ELF_RELOC(R_CKCORE_DOFFSET_LO16,               42)
ELF_RELOC(R_CKCORE_PCREL_IMM18_2,              43)
ELF_RELOC(R_CKCORE_DOFFSET_IMM18,              44)
ELF_RELOC(R_CKCORE_DOFFSET_IMM18_2,            45)
ELF_RELOC(R_CKCORE_DOFFSET_IMM18_4,            46)
ELF_RELOC(R_CKCORE_GOTOFF_IMM18,               47)
ELF_RELOC(R_CKCORE_GOT_IMM18_4,                48)
ELF_RELOC(R_CKCORE_PLT_IMM18_4,                49)
ELF_RELOC(R_CKCORE_PCREL_IMM7_4,               50)
ELF_RELOC(R_CKCORE_TLS_LE32,                   51)
ELF_RELOC(R_CKCORE_TLS_IE32,                   52)
ELF_RELOC(R_CKCORE_TLS_GD32,                   53)
ELF_RELOC(R_CKCORE_TLS_LDM32,                  54)
ELF_RELOC(R_CKCORE_TLS_LDO32,                  55)
ELF_RELOC(R_CKCORE_TLS_DTPMOD32,               56)
ELF_RELOC(R_CKCORE_TLS_DTPOFF32,               57)
ELF_RELOC(R_CKCORE_TLS_TPOFF32,                58)
ELF_RELOC(R_CKCORE_PCREL_FLRW_IMM8_4,          59)
ELF_RELOC(R_CKCORE_NOJSRI,                     60)
ELF_RELOC(R_CKCORE_CALLGRAPH,                  61)
ELF_RELOC(R_CKCORE_IRELATIVE,                  62)
ELF_RELOC(R_CKCORE_PCREL_BLOOP_IMM4_4,         63)
ELF_RELOC(R_CKCORE_PCREL_BLOOP_IMM12_4,        64)
ELF_RELOC(R_CKCORE_PCREL_VLRW_IMM12_1,         65)
ELF_RELOC(R_CKCORE_PCREL_VLRW_IMM12_2,         66)
ELF_RELOC(R_CKCORE_PCREL_VLRW_IMM12_4,         67)
ELF_RELOC(R_CKCORE_PCREL_VLRW_IMM12_8,         68)
PKhwFZ��w

!BinaryFormat/ELFRelocs/MSP430.defnu�[���
#ifndef ELF_RELOC
#error "ELF_RELOC must be defined"
#endif

ELF_RELOC(R_MSP430_NONE,               0)
ELF_RELOC(R_MSP430_32,                 1)
ELF_RELOC(R_MSP430_10_PCREL,           2)
ELF_RELOC(R_MSP430_16,                 3)
ELF_RELOC(R_MSP430_16_PCREL,           4)
ELF_RELOC(R_MSP430_16_BYTE,            5)
ELF_RELOC(R_MSP430_16_PCREL_BYTE,      6)
ELF_RELOC(R_MSP430_2X_PCREL,           7)
ELF_RELOC(R_MSP430_RL_PCREL,           8)
ELF_RELOC(R_MSP430_8,                  9)
ELF_RELOC(R_MSP430_SYM_DIFF,           10)
PKhwFZ��nG��$BinaryFormat/ELFRelocs/LoongArch.defnu�[���#ifndef ELF_RELOC
#error "ELF_RELOC must be defined"
#endif

// These types and values are from the LoongArch ELF psABI which can be found at
// https://github.com/loongson/LoongArch-Documentation
// and these definitions has been adopted by binutils (include/elf/loongarch.h).
// The commit hash (main branch) we reference is:
// 9b3bd9f4a497115913c22f1a2a47863798fbc02a

ELF_RELOC(R_LARCH_NONE,                        0)
ELF_RELOC(R_LARCH_32,                          1)
ELF_RELOC(R_LARCH_64,                          2)
ELF_RELOC(R_LARCH_RELATIVE,                    3)
ELF_RELOC(R_LARCH_COPY,                        4)
ELF_RELOC(R_LARCH_JUMP_SLOT,                   5)
ELF_RELOC(R_LARCH_TLS_DTPMOD32,                6)
ELF_RELOC(R_LARCH_TLS_DTPMOD64,                7)
ELF_RELOC(R_LARCH_TLS_DTPREL32,                8)
ELF_RELOC(R_LARCH_TLS_DTPREL64,                9)
ELF_RELOC(R_LARCH_TLS_TPREL32,                10)
ELF_RELOC(R_LARCH_TLS_TPREL64,                11)
ELF_RELOC(R_LARCH_IRELATIVE,                  12)
ELF_RELOC(R_LARCH_MARK_LA,                    20)
ELF_RELOC(R_LARCH_MARK_PCREL,                 21)
ELF_RELOC(R_LARCH_SOP_PUSH_PCREL,             22)
ELF_RELOC(R_LARCH_SOP_PUSH_ABSOLUTE,          23)
ELF_RELOC(R_LARCH_SOP_PUSH_DUP,               24)
ELF_RELOC(R_LARCH_SOP_PUSH_GPREL,             25)
ELF_RELOC(R_LARCH_SOP_PUSH_TLS_TPREL,         26)
ELF_RELOC(R_LARCH_SOP_PUSH_TLS_GOT,           27)
ELF_RELOC(R_LARCH_SOP_PUSH_TLS_GD,            28)
ELF_RELOC(R_LARCH_SOP_PUSH_PLT_PCREL,         29)
ELF_RELOC(R_LARCH_SOP_ASSERT,                 30)
ELF_RELOC(R_LARCH_SOP_NOT,                    31)
ELF_RELOC(R_LARCH_SOP_SUB,                    32)
ELF_RELOC(R_LARCH_SOP_SL,                     33)
ELF_RELOC(R_LARCH_SOP_SR,                     34)
ELF_RELOC(R_LARCH_SOP_ADD,                    35)
ELF_RELOC(R_LARCH_SOP_AND,                    36)
ELF_RELOC(R_LARCH_SOP_IF_ELSE,                37)
ELF_RELOC(R_LARCH_SOP_POP_32_S_10_5,          38)
ELF_RELOC(R_LARCH_SOP_POP_32_U_10_12,         39)
ELF_RELOC(R_LARCH_SOP_POP_32_S_10_12,         40)
ELF_RELOC(R_LARCH_SOP_POP_32_S_10_16,         41)
ELF_RELOC(R_LARCH_SOP_POP_32_S_10_16_S2,      42)
ELF_RELOC(R_LARCH_SOP_POP_32_S_5_20,          43)
ELF_RELOC(R_LARCH_SOP_POP_32_S_0_5_10_16_S2,  44)
ELF_RELOC(R_LARCH_SOP_POP_32_S_0_10_10_16_S2, 45)
ELF_RELOC(R_LARCH_SOP_POP_32_U,               46)
ELF_RELOC(R_LARCH_ADD8,                       47)
ELF_RELOC(R_LARCH_ADD16,                      48)
ELF_RELOC(R_LARCH_ADD24,                      49)
ELF_RELOC(R_LARCH_ADD32,                      50)
ELF_RELOC(R_LARCH_ADD64,                      51)
ELF_RELOC(R_LARCH_SUB8,                       52)
ELF_RELOC(R_LARCH_SUB16,                      53)
ELF_RELOC(R_LARCH_SUB24,                      54)
ELF_RELOC(R_LARCH_SUB32,                      55)
ELF_RELOC(R_LARCH_SUB64,                      56)
ELF_RELOC(R_LARCH_GNU_VTINHERIT,              57)
ELF_RELOC(R_LARCH_GNU_VTENTRY,                58)

// Relocs whose processing do not require a stack machine.
//
// Spec addition: https://github.com/loongson/LoongArch-Documentation/pull/57
// Binutils commit 6d13722a97cee3fd397e116bde3bcedbb1e220be
//      and commit 9801120721c3a702ce3bd50433ef920f92a83502
ELF_RELOC(R_LARCH_B16,              64)
ELF_RELOC(R_LARCH_B21,              65)
ELF_RELOC(R_LARCH_B26,              66)
ELF_RELOC(R_LARCH_ABS_HI20,         67)
ELF_RELOC(R_LARCH_ABS_LO12,         68)
ELF_RELOC(R_LARCH_ABS64_LO20,       69)
ELF_RELOC(R_LARCH_ABS64_HI12,       70)
ELF_RELOC(R_LARCH_PCALA_HI20,       71)
ELF_RELOC(R_LARCH_PCALA_LO12,       72)
ELF_RELOC(R_LARCH_PCALA64_LO20,     73)
ELF_RELOC(R_LARCH_PCALA64_HI12,     74)
ELF_RELOC(R_LARCH_GOT_PC_HI20,      75)
ELF_RELOC(R_LARCH_GOT_PC_LO12,      76)
ELF_RELOC(R_LARCH_GOT64_PC_LO20,    77)
ELF_RELOC(R_LARCH_GOT64_PC_HI12,    78)
ELF_RELOC(R_LARCH_GOT_HI20,         79)
ELF_RELOC(R_LARCH_GOT_LO12,         80)
ELF_RELOC(R_LARCH_GOT64_LO20,       81)
ELF_RELOC(R_LARCH_GOT64_HI12,       82)
ELF_RELOC(R_LARCH_TLS_LE_HI20,      83)
ELF_RELOC(R_LARCH_TLS_LE_LO12,      84)
ELF_RELOC(R_LARCH_TLS_LE64_LO20,    85)
ELF_RELOC(R_LARCH_TLS_LE64_HI12,    86)
ELF_RELOC(R_LARCH_TLS_IE_PC_HI20,   87)
ELF_RELOC(R_LARCH_TLS_IE_PC_LO12,   88)
ELF_RELOC(R_LARCH_TLS_IE64_PC_LO20, 89)
ELF_RELOC(R_LARCH_TLS_IE64_PC_HI12, 90)
ELF_RELOC(R_LARCH_TLS_IE_HI20,      91)
ELF_RELOC(R_LARCH_TLS_IE_LO12,      92)
ELF_RELOC(R_LARCH_TLS_IE64_LO20,    93)
ELF_RELOC(R_LARCH_TLS_IE64_HI12,    94)
ELF_RELOC(R_LARCH_TLS_LD_PC_HI20,   95)
ELF_RELOC(R_LARCH_TLS_LD_HI20,      96)
ELF_RELOC(R_LARCH_TLS_GD_PC_HI20,   97)
ELF_RELOC(R_LARCH_TLS_GD_HI20,      98)
ELF_RELOC(R_LARCH_32_PCREL,         99)
ELF_RELOC(R_LARCH_RELAX,            100)

// Relocs added in ELF for the LoongArch™ Architecture v20230519, part of the
// v2.10 LoongArch ABI specs.
//
// Spec addition: https://github.com/loongson/la-abi-specs/pull/1
// Binutils commit 57a930e3bfe4b2c7fd6463ed39311e1938513138
ELF_RELOC(R_LARCH_DELETE,      101)
ELF_RELOC(R_LARCH_ALIGN,       102)
ELF_RELOC(R_LARCH_PCREL20_S2,  103)
ELF_RELOC(R_LARCH_CFA,         104)
ELF_RELOC(R_LARCH_ADD6,        105)
ELF_RELOC(R_LARCH_SUB6,        106)
ELF_RELOC(R_LARCH_ADD_ULEB128, 107)
ELF_RELOC(R_LARCH_SUB_ULEB128, 108)
ELF_RELOC(R_LARCH_64_PCREL,    109)
PKhwFZx0ŖK3K3"BinaryFormat/ELFRelocs/AArch64.defnu�[���
#ifndef ELF_RELOC
#error "ELF_RELOC must be defined"
#endif

// Based on ABI release 1.1-beta, dated 6 November 2013. NB: The cover page of
// this document, IHI0056C_beta_aaelf64.pdf, on infocenter.arm.com, still
// labels this as release 1.0.
ELF_RELOC(R_AARCH64_NONE,                                0)
ELF_RELOC(R_AARCH64_ABS64,                           0x101)
ELF_RELOC(R_AARCH64_ABS32,                           0x102)
ELF_RELOC(R_AARCH64_ABS16,                           0x103)
ELF_RELOC(R_AARCH64_PREL64,                          0x104)
ELF_RELOC(R_AARCH64_PREL32,                          0x105)
ELF_RELOC(R_AARCH64_PREL16,                          0x106)
ELF_RELOC(R_AARCH64_MOVW_UABS_G0,                    0x107)
ELF_RELOC(R_AARCH64_MOVW_UABS_G0_NC,                 0x108)
ELF_RELOC(R_AARCH64_MOVW_UABS_G1,                    0x109)
ELF_RELOC(R_AARCH64_MOVW_UABS_G1_NC,                 0x10a)
ELF_RELOC(R_AARCH64_MOVW_UABS_G2,                    0x10b)
ELF_RELOC(R_AARCH64_MOVW_UABS_G2_NC,                 0x10c)
ELF_RELOC(R_AARCH64_MOVW_UABS_G3,                    0x10d)
ELF_RELOC(R_AARCH64_MOVW_SABS_G0,                    0x10e)
ELF_RELOC(R_AARCH64_MOVW_SABS_G1,                    0x10f)
ELF_RELOC(R_AARCH64_MOVW_SABS_G2,                    0x110)
ELF_RELOC(R_AARCH64_LD_PREL_LO19,                    0x111)
ELF_RELOC(R_AARCH64_ADR_PREL_LO21,                   0x112)
ELF_RELOC(R_AARCH64_ADR_PREL_PG_HI21,                0x113)
ELF_RELOC(R_AARCH64_ADR_PREL_PG_HI21_NC,             0x114)
ELF_RELOC(R_AARCH64_ADD_ABS_LO12_NC,                 0x115)
ELF_RELOC(R_AARCH64_LDST8_ABS_LO12_NC,               0x116)
ELF_RELOC(R_AARCH64_TSTBR14,                         0x117)
ELF_RELOC(R_AARCH64_CONDBR19,                        0x118)
ELF_RELOC(R_AARCH64_JUMP26,                          0x11a)
ELF_RELOC(R_AARCH64_CALL26,                          0x11b)
ELF_RELOC(R_AARCH64_LDST16_ABS_LO12_NC,              0x11c)
ELF_RELOC(R_AARCH64_LDST32_ABS_LO12_NC,              0x11d)
ELF_RELOC(R_AARCH64_LDST64_ABS_LO12_NC,              0x11e)
ELF_RELOC(R_AARCH64_MOVW_PREL_G0,                    0x11f)
ELF_RELOC(R_AARCH64_MOVW_PREL_G0_NC,                 0x120)
ELF_RELOC(R_AARCH64_MOVW_PREL_G1,                    0x121)
ELF_RELOC(R_AARCH64_MOVW_PREL_G1_NC,                 0x122)
ELF_RELOC(R_AARCH64_MOVW_PREL_G2,                    0x123)
ELF_RELOC(R_AARCH64_MOVW_PREL_G2_NC,                 0x124)
ELF_RELOC(R_AARCH64_MOVW_PREL_G3,                    0x125)
ELF_RELOC(R_AARCH64_LDST128_ABS_LO12_NC,             0x12b)
ELF_RELOC(R_AARCH64_MOVW_GOTOFF_G0,                  0x12c)
ELF_RELOC(R_AARCH64_MOVW_GOTOFF_G0_NC,               0x12d)
ELF_RELOC(R_AARCH64_MOVW_GOTOFF_G1,                  0x12e)
ELF_RELOC(R_AARCH64_MOVW_GOTOFF_G1_NC,               0x12f)
ELF_RELOC(R_AARCH64_MOVW_GOTOFF_G2,                  0x130)
ELF_RELOC(R_AARCH64_MOVW_GOTOFF_G2_NC,               0x131)
ELF_RELOC(R_AARCH64_MOVW_GOTOFF_G3,                  0x132)
ELF_RELOC(R_AARCH64_GOTREL64,                        0x133)
ELF_RELOC(R_AARCH64_GOTREL32,                        0x134)
ELF_RELOC(R_AARCH64_GOT_LD_PREL19,                   0x135)
ELF_RELOC(R_AARCH64_LD64_GOTOFF_LO15,                0x136)
ELF_RELOC(R_AARCH64_ADR_GOT_PAGE,                    0x137)
ELF_RELOC(R_AARCH64_LD64_GOT_LO12_NC,                0x138)
ELF_RELOC(R_AARCH64_LD64_GOTPAGE_LO15,               0x139)
ELF_RELOC(R_AARCH64_PLT32,                           0x13a)
ELF_RELOC(R_AARCH64_TLSGD_ADR_PREL21,                0x200)
ELF_RELOC(R_AARCH64_TLSGD_ADR_PAGE21,                0x201)
ELF_RELOC(R_AARCH64_TLSGD_ADD_LO12_NC,               0x202)
ELF_RELOC(R_AARCH64_TLSGD_MOVW_G1,                   0x203)
ELF_RELOC(R_AARCH64_TLSGD_MOVW_G0_NC,                0x204)
ELF_RELOC(R_AARCH64_TLSLD_ADR_PREL21,                0x205)
ELF_RELOC(R_AARCH64_TLSLD_ADR_PAGE21,                0x206)
ELF_RELOC(R_AARCH64_TLSLD_ADD_LO12_NC,               0x207)
ELF_RELOC(R_AARCH64_TLSLD_MOVW_G1,                   0x208)
ELF_RELOC(R_AARCH64_TLSLD_MOVW_G0_NC,                0x209)
ELF_RELOC(R_AARCH64_TLSLD_LD_PREL19,                 0x20a)
ELF_RELOC(R_AARCH64_TLSLD_MOVW_DTPREL_G2,            0x20b)
ELF_RELOC(R_AARCH64_TLSLD_MOVW_DTPREL_G1,            0x20c)
ELF_RELOC(R_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,         0x20d)
ELF_RELOC(R_AARCH64_TLSLD_MOVW_DTPREL_G0,            0x20e)
ELF_RELOC(R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,         0x20f)
ELF_RELOC(R_AARCH64_TLSLD_ADD_DTPREL_HI12,           0x210)
ELF_RELOC(R_AARCH64_TLSLD_ADD_DTPREL_LO12,           0x211)
ELF_RELOC(R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,        0x212)
ELF_RELOC(R_AARCH64_TLSLD_LDST8_DTPREL_LO12,         0x213)
ELF_RELOC(R_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,      0x214)
ELF_RELOC(R_AARCH64_TLSLD_LDST16_DTPREL_LO12,        0x215)
ELF_RELOC(R_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,     0x216)
ELF_RELOC(R_AARCH64_TLSLD_LDST32_DTPREL_LO12,        0x217)
ELF_RELOC(R_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,     0x218)
ELF_RELOC(R_AARCH64_TLSLD_LDST64_DTPREL_LO12,        0x219)
ELF_RELOC(R_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,     0x21a)
ELF_RELOC(R_AARCH64_TLSIE_MOVW_GOTTPREL_G1,          0x21b)
ELF_RELOC(R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,       0x21c)
ELF_RELOC(R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,       0x21d)
ELF_RELOC(R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC,     0x21e)
ELF_RELOC(R_AARCH64_TLSIE_LD_GOTTPREL_PREL19,        0x21f)
ELF_RELOC(R_AARCH64_TLSLE_MOVW_TPREL_G2,             0x220)
ELF_RELOC(R_AARCH64_TLSLE_MOVW_TPREL_G1,             0x221)
ELF_RELOC(R_AARCH64_TLSLE_MOVW_TPREL_G1_NC,          0x222)
ELF_RELOC(R_AARCH64_TLSLE_MOVW_TPREL_G0,             0x223)
ELF_RELOC(R_AARCH64_TLSLE_MOVW_TPREL_G0_NC,          0x224)
ELF_RELOC(R_AARCH64_TLSLE_ADD_TPREL_HI12,            0x225)
ELF_RELOC(R_AARCH64_TLSLE_ADD_TPREL_LO12,            0x226)
ELF_RELOC(R_AARCH64_TLSLE_ADD_TPREL_LO12_NC,         0x227)
ELF_RELOC(R_AARCH64_TLSLE_LDST8_TPREL_LO12,          0x228)
ELF_RELOC(R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC,       0x229)
ELF_RELOC(R_AARCH64_TLSLE_LDST16_TPREL_LO12,         0x22a)
ELF_RELOC(R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC,      0x22b)
ELF_RELOC(R_AARCH64_TLSLE_LDST32_TPREL_LO12,         0x22c)
ELF_RELOC(R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC,      0x22d)
ELF_RELOC(R_AARCH64_TLSLE_LDST64_TPREL_LO12,         0x22e)
ELF_RELOC(R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC,      0x22f)
ELF_RELOC(R_AARCH64_TLSDESC_LD_PREL19,               0x230)
ELF_RELOC(R_AARCH64_TLSDESC_ADR_PREL21,              0x231)
ELF_RELOC(R_AARCH64_TLSDESC_ADR_PAGE21,              0x232)
ELF_RELOC(R_AARCH64_TLSDESC_LD64_LO12,               0x233)
ELF_RELOC(R_AARCH64_TLSDESC_ADD_LO12,                0x234)
ELF_RELOC(R_AARCH64_TLSDESC_OFF_G1,                  0x235)
ELF_RELOC(R_AARCH64_TLSDESC_OFF_G0_NC,               0x236)
ELF_RELOC(R_AARCH64_TLSDESC_LDR,                     0x237)
ELF_RELOC(R_AARCH64_TLSDESC_ADD,                     0x238)
ELF_RELOC(R_AARCH64_TLSDESC_CALL,                    0x239)
ELF_RELOC(R_AARCH64_TLSLE_LDST128_TPREL_LO12,        0x23a)
ELF_RELOC(R_AARCH64_TLSLE_LDST128_TPREL_LO12_NC,     0x23b)
ELF_RELOC(R_AARCH64_TLSLD_LDST128_DTPREL_LO12,       0x23c)
ELF_RELOC(R_AARCH64_TLSLD_LDST128_DTPREL_LO12_NC,    0x23d)
// Dynamic relocations start
ELF_RELOC(R_AARCH64_COPY,                            0x400)
ELF_RELOC(R_AARCH64_GLOB_DAT,                        0x401)
ELF_RELOC(R_AARCH64_JUMP_SLOT,                       0x402)
ELF_RELOC(R_AARCH64_RELATIVE,                        0x403)
// 0x404 and 0x405 are now R_AARCH64_TLS_IMPDEF1 and R_AARCH64_TLS_IMPDEF2
// We follow GNU and define TLS_IMPDEF1 as TLS_DTPMOD64 and TLS_IMPDEF2 as
// TLS_DTPREL64
ELF_RELOC(R_AARCH64_TLS_DTPMOD64,                    0x404)
ELF_RELOC(R_AARCH64_TLS_DTPREL64,                    0x405)
ELF_RELOC(R_AARCH64_TLS_TPREL64,                     0x406)
ELF_RELOC(R_AARCH64_TLSDESC,                         0x407)
ELF_RELOC(R_AARCH64_IRELATIVE,                       0x408)

// ELF_RELOC(R_AARCH64_P32_NONE,                         0)
ELF_RELOC(R_AARCH64_P32_ABS32,                       0x001)
ELF_RELOC(R_AARCH64_P32_ABS16,                       0x002)
ELF_RELOC(R_AARCH64_P32_PREL32,                      0x003)
ELF_RELOC(R_AARCH64_P32_PREL16,                      0x004)
ELF_RELOC(R_AARCH64_P32_MOVW_UABS_G0,                0x005)
ELF_RELOC(R_AARCH64_P32_MOVW_UABS_G0_NC,             0x006)
ELF_RELOC(R_AARCH64_P32_MOVW_UABS_G1,                0x007)
ELF_RELOC(R_AARCH64_P32_MOVW_SABS_G0,                0x008)
ELF_RELOC(R_AARCH64_P32_LD_PREL_LO19,                0x009)
ELF_RELOC(R_AARCH64_P32_ADR_PREL_LO21,               0x00a)
ELF_RELOC(R_AARCH64_P32_ADR_PREL_PG_HI21,            0x00b)
ELF_RELOC(R_AARCH64_P32_ADD_ABS_LO12_NC,             0x00c)
ELF_RELOC(R_AARCH64_P32_LDST8_ABS_LO12_NC,           0x00d)
ELF_RELOC(R_AARCH64_P32_LDST16_ABS_LO12_NC,          0x00e)
ELF_RELOC(R_AARCH64_P32_LDST32_ABS_LO12_NC,          0x00f)
ELF_RELOC(R_AARCH64_P32_LDST64_ABS_LO12_NC,          0x010)
ELF_RELOC(R_AARCH64_P32_LDST128_ABS_LO12_NC,         0x011)
ELF_RELOC(R_AARCH64_P32_TSTBR14,                     0x012)
ELF_RELOC(R_AARCH64_P32_CONDBR19,                    0x013)
ELF_RELOC(R_AARCH64_P32_JUMP26,                      0x014)
ELF_RELOC(R_AARCH64_P32_CALL26,                      0x015)
ELF_RELOC(R_AARCH64_P32_MOVW_PREL_G0,                0x016)
ELF_RELOC(R_AARCH64_P32_MOVW_PREL_G0_NC,             0x017)
ELF_RELOC(R_AARCH64_P32_MOVW_PREL_G1,                0x018)
ELF_RELOC(R_AARCH64_P32_GOT_LD_PREL19,               0x019)
ELF_RELOC(R_AARCH64_P32_ADR_GOT_PAGE,                0x01a)
ELF_RELOC(R_AARCH64_P32_LD32_GOT_LO12_NC,            0x01b)
ELF_RELOC(R_AARCH64_P32_LD32_GOTPAGE_LO14,           0x01c)
ELF_RELOC(R_AARCH64_P32_PLT32,                       0x01d)
ELF_RELOC(R_AARCH64_P32_TLSGD_ADR_PREL21,            0x050)
ELF_RELOC(R_AARCH64_P32_TLSGD_ADR_PAGE21,            0x051)
ELF_RELOC(R_AARCH64_P32_TLSGD_ADD_LO12_NC,           0x052)
ELF_RELOC(R_AARCH64_P32_TLSLD_ADR_PREL21,            0x053)
ELF_RELOC(R_AARCH64_P32_TLSLD_ADR_PAGE21,            0x054)
ELF_RELOC(R_AARCH64_P32_TLSLD_ADD_LO12_NC,           0x055)
ELF_RELOC(R_AARCH64_P32_TLSLD_LD_PREL19,             0x056)
ELF_RELOC(R_AARCH64_P32_TLSLD_MOVW_DTPREL_G1,        0x057)
ELF_RELOC(R_AARCH64_P32_TLSLD_MOVW_DTPREL_G0,        0x058)
ELF_RELOC(R_AARCH64_P32_TLSLD_MOVW_DTPREL_G0_NC,     0x059)
ELF_RELOC(R_AARCH64_P32_TLSLD_ADD_DTPREL_HI12,       0x05a)
ELF_RELOC(R_AARCH64_P32_TLSLD_ADD_DTPREL_LO12,       0x05b)
ELF_RELOC(R_AARCH64_P32_TLSLD_ADD_DTPREL_LO12_NC,    0x05c)
ELF_RELOC(R_AARCH64_P32_TLSLD_LDST8_DTPREL_LO12,     0x05d)
ELF_RELOC(R_AARCH64_P32_TLSLD_LDST8_DTPREL_LO12_NC,  0x05e)
ELF_RELOC(R_AARCH64_P32_TLSLD_LDST16_DTPREL_LO12,    0x05f)
ELF_RELOC(R_AARCH64_P32_TLSLD_LDST16_DTPREL_LO12_NC, 0x060)
ELF_RELOC(R_AARCH64_P32_TLSLD_LDST32_DTPREL_LO12,    0x061)
ELF_RELOC(R_AARCH64_P32_TLSLD_LDST32_DTPREL_LO12_NC, 0x062)
ELF_RELOC(R_AARCH64_P32_TLSLD_LDST64_DTPREL_LO12,    0x063)
ELF_RELOC(R_AARCH64_P32_TLSLD_LDST64_DTPREL_LO12_NC, 0x064)
ELF_RELOC(R_AARCH64_P32_TLSLD_LDST128_DTPREL_LO12,   0x065)
ELF_RELOC(R_AARCH64_P32_TLSLD_LDST128_DTPREL_LO12_NC,0x066)
ELF_RELOC(R_AARCH64_P32_TLSIE_ADR_GOTTPREL_PAGE21,   0x067)
ELF_RELOC(R_AARCH64_P32_TLSIE_LD32_GOTTPREL_LO12_NC, 0x068)
ELF_RELOC(R_AARCH64_P32_TLSIE_LD_GOTTPREL_PREL19,    0x069)
ELF_RELOC(R_AARCH64_P32_TLSLE_MOVW_TPREL_G1,         0x06a)
ELF_RELOC(R_AARCH64_P32_TLSLE_MOVW_TPREL_G0,         0x06b)
ELF_RELOC(R_AARCH64_P32_TLSLE_MOVW_TPREL_G0_NC,      0x06c)
ELF_RELOC(R_AARCH64_P32_TLSLE_ADD_TPREL_HI12,        0x06d)
ELF_RELOC(R_AARCH64_P32_TLSLE_ADD_TPREL_LO12,        0x06e)
ELF_RELOC(R_AARCH64_P32_TLSLE_ADD_TPREL_LO12_NC,     0x06f)
ELF_RELOC(R_AARCH64_P32_TLSLE_LDST8_TPREL_LO12,      0x070)
ELF_RELOC(R_AARCH64_P32_TLSLE_LDST8_TPREL_LO12_NC,   0x071)
ELF_RELOC(R_AARCH64_P32_TLSLE_LDST16_TPREL_LO12,     0x072)
ELF_RELOC(R_AARCH64_P32_TLSLE_LDST16_TPREL_LO12_NC,  0x073)
ELF_RELOC(R_AARCH64_P32_TLSLE_LDST32_TPREL_LO12,     0x074)
ELF_RELOC(R_AARCH64_P32_TLSLE_LDST32_TPREL_LO12_NC,  0x075)
ELF_RELOC(R_AARCH64_P32_TLSLE_LDST64_TPREL_LO12,     0x076)
ELF_RELOC(R_AARCH64_P32_TLSLE_LDST64_TPREL_LO12_NC,  0x077)
ELF_RELOC(R_AARCH64_P32_TLSLE_LDST128_TPREL_LO12,    0x078)
ELF_RELOC(R_AARCH64_P32_TLSLE_LDST128_TPREL_LO12_NC, 0x079)
ELF_RELOC(R_AARCH64_P32_TLSDESC_LD_PREL19,           0x07a)
ELF_RELOC(R_AARCH64_P32_TLSDESC_ADR_PREL21,          0x07b)
ELF_RELOC(R_AARCH64_P32_TLSDESC_ADR_PAGE21,          0x07c)
ELF_RELOC(R_AARCH64_P32_TLSDESC_LD32_LO12,           0x07d)
ELF_RELOC(R_AARCH64_P32_TLSDESC_ADD_LO12,            0x07e)
ELF_RELOC(R_AARCH64_P32_TLSDESC_CALL,                0x07f)
// Dynamic relocations start
ELF_RELOC(R_AARCH64_P32_COPY,                        0x0b4)
ELF_RELOC(R_AARCH64_P32_GLOB_DAT,                    0x0b5)
ELF_RELOC(R_AARCH64_P32_JUMP_SLOT,                   0x0b6)
ELF_RELOC(R_AARCH64_P32_RELATIVE,                    0x0b7)
ELF_RELOC(R_AARCH64_P32_TLS_DTPREL,                  0x0b8)
ELF_RELOC(R_AARCH64_P32_TLS_DTPMOD,                  0x0b9)
ELF_RELOC(R_AARCH64_P32_TLS_TPREL,                   0x0ba)
ELF_RELOC(R_AARCH64_P32_TLSDESC,                     0x0bb)
ELF_RELOC(R_AARCH64_P32_IRELATIVE,                   0x0bc)
PKhwFZ�BinaryFormat/ELFRelocs/i386.defnu�[���
#ifndef ELF_RELOC
#error "ELF_RELOC must be defined"
#endif

// TODO: this is just a subset
ELF_RELOC(R_386_NONE,           0)
ELF_RELOC(R_386_32,             1)
ELF_RELOC(R_386_PC32,           2)
ELF_RELOC(R_386_GOT32,          3)
ELF_RELOC(R_386_PLT32,          4)
ELF_RELOC(R_386_COPY,           5)
ELF_RELOC(R_386_GLOB_DAT,       6)
ELF_RELOC(R_386_JUMP_SLOT,      7)
ELF_RELOC(R_386_RELATIVE,       8)
ELF_RELOC(R_386_GOTOFF,         9)
ELF_RELOC(R_386_GOTPC,          10)
ELF_RELOC(R_386_32PLT,          11)
ELF_RELOC(R_386_TLS_TPOFF,      14)
ELF_RELOC(R_386_TLS_IE,         15)
ELF_RELOC(R_386_TLS_GOTIE,      16)
ELF_RELOC(R_386_TLS_LE,         17)
ELF_RELOC(R_386_TLS_GD,         18)
ELF_RELOC(R_386_TLS_LDM,        19)
ELF_RELOC(R_386_16,             20)
ELF_RELOC(R_386_PC16,           21)
ELF_RELOC(R_386_8,              22)
ELF_RELOC(R_386_PC8,            23)
ELF_RELOC(R_386_TLS_GD_32,      24)
ELF_RELOC(R_386_TLS_GD_PUSH,    25)
ELF_RELOC(R_386_TLS_GD_CALL,    26)
ELF_RELOC(R_386_TLS_GD_POP,     27)
ELF_RELOC(R_386_TLS_LDM_32,     28)
ELF_RELOC(R_386_TLS_LDM_PUSH,   29)
ELF_RELOC(R_386_TLS_LDM_CALL,   30)
ELF_RELOC(R_386_TLS_LDM_POP,    31)
ELF_RELOC(R_386_TLS_LDO_32,     32)
ELF_RELOC(R_386_TLS_IE_32,      33)
ELF_RELOC(R_386_TLS_LE_32,      34)
ELF_RELOC(R_386_TLS_DTPMOD32,   35)
ELF_RELOC(R_386_TLS_DTPOFF32,   36)
ELF_RELOC(R_386_TLS_TPOFF32,    37)
ELF_RELOC(R_386_TLS_GOTDESC,    39)
ELF_RELOC(R_386_TLS_DESC_CALL,  40)
ELF_RELOC(R_386_TLS_DESC,       41)
ELF_RELOC(R_386_IRELATIVE,      42)
ELF_RELOC(R_386_GOT32X,         43)
PKhwFZTCc�� BinaryFormat/ELFRelocs/RISCV.defnu�[���
#ifndef ELF_RELOC
#error "ELF_RELOC must be defined"
#endif

ELF_RELOC(R_RISCV_NONE,               0)
ELF_RELOC(R_RISCV_32,                 1)
ELF_RELOC(R_RISCV_64,                 2)
ELF_RELOC(R_RISCV_RELATIVE,           3)
ELF_RELOC(R_RISCV_COPY,               4)
ELF_RELOC(R_RISCV_JUMP_SLOT,          5)
ELF_RELOC(R_RISCV_TLS_DTPMOD32,       6)
ELF_RELOC(R_RISCV_TLS_DTPMOD64,       7)
ELF_RELOC(R_RISCV_TLS_DTPREL32,       8)
ELF_RELOC(R_RISCV_TLS_DTPREL64,       9)
ELF_RELOC(R_RISCV_TLS_TPREL32,       10)
ELF_RELOC(R_RISCV_TLS_TPREL64,       11)
ELF_RELOC(R_RISCV_BRANCH,            16)
ELF_RELOC(R_RISCV_JAL,               17)
ELF_RELOC(R_RISCV_CALL,              18)
ELF_RELOC(R_RISCV_CALL_PLT,          19)
ELF_RELOC(R_RISCV_GOT_HI20,          20)
ELF_RELOC(R_RISCV_TLS_GOT_HI20,      21)
ELF_RELOC(R_RISCV_TLS_GD_HI20,       22)
ELF_RELOC(R_RISCV_PCREL_HI20,        23)
ELF_RELOC(R_RISCV_PCREL_LO12_I,      24)
ELF_RELOC(R_RISCV_PCREL_LO12_S,      25)
ELF_RELOC(R_RISCV_HI20,              26)
ELF_RELOC(R_RISCV_LO12_I,            27)
ELF_RELOC(R_RISCV_LO12_S,            28)
ELF_RELOC(R_RISCV_TPREL_HI20,        29)
ELF_RELOC(R_RISCV_TPREL_LO12_I,      30)
ELF_RELOC(R_RISCV_TPREL_LO12_S,      31)
ELF_RELOC(R_RISCV_TPREL_ADD,         32)
ELF_RELOC(R_RISCV_ADD8,              33)
ELF_RELOC(R_RISCV_ADD16,             34)
ELF_RELOC(R_RISCV_ADD32,             35)
ELF_RELOC(R_RISCV_ADD64,             36)
ELF_RELOC(R_RISCV_SUB8,              37)
ELF_RELOC(R_RISCV_SUB16,             38)
ELF_RELOC(R_RISCV_SUB32,             39)
ELF_RELOC(R_RISCV_SUB64,             40)
ELF_RELOC(R_RISCV_GNU_VTINHERIT,     41)
ELF_RELOC(R_RISCV_GNU_VTENTRY,       42)
ELF_RELOC(R_RISCV_ALIGN,             43)
ELF_RELOC(R_RISCV_RVC_BRANCH,        44)
ELF_RELOC(R_RISCV_RVC_JUMP,          45)
ELF_RELOC(R_RISCV_RVC_LUI,           46)
ELF_RELOC(R_RISCV_RELAX,             51)
ELF_RELOC(R_RISCV_SUB6,              52)
ELF_RELOC(R_RISCV_SET6,              53)
ELF_RELOC(R_RISCV_SET8,              54)
ELF_RELOC(R_RISCV_SET16,             55)
ELF_RELOC(R_RISCV_SET32,             56)
ELF_RELOC(R_RISCV_32_PCREL,          57)
ELF_RELOC(R_RISCV_IRELATIVE,         58)
ELF_RELOC(R_RISCV_PLT32,             59)
PKhwFZ�KzfBinaryFormat/ELFRelocs/BPF.defnu�[���#ifndef ELF_RELOC
#error "ELF_RELOC must be defined"
#endif

// No relocation
ELF_RELOC(R_BPF_NONE,        0)
ELF_RELOC(R_BPF_64_64,       1)
ELF_RELOC(R_BPF_64_ABS64,    2)
ELF_RELOC(R_BPF_64_ABS32,    3)
ELF_RELOC(R_BPF_64_NODYLD32, 4)
ELF_RELOC(R_BPF_64_32,      10)
PKhwFZ��DD BinaryFormat/ELFRelocs/Sparc.defnu�[���
#ifndef ELF_RELOC
#error "ELF_RELOC must be defined"
#endif

ELF_RELOC(R_SPARC_NONE,         0)
ELF_RELOC(R_SPARC_8,            1)
ELF_RELOC(R_SPARC_16,           2)
ELF_RELOC(R_SPARC_32,           3)
ELF_RELOC(R_SPARC_DISP8,        4)
ELF_RELOC(R_SPARC_DISP16,       5)
ELF_RELOC(R_SPARC_DISP32,       6)
ELF_RELOC(R_SPARC_WDISP30,      7)
ELF_RELOC(R_SPARC_WDISP22,      8)
ELF_RELOC(R_SPARC_HI22,         9)
ELF_RELOC(R_SPARC_22,           10)
ELF_RELOC(R_SPARC_13,           11)
ELF_RELOC(R_SPARC_LO10,         12)
ELF_RELOC(R_SPARC_GOT10,        13)
ELF_RELOC(R_SPARC_GOT13,        14)
ELF_RELOC(R_SPARC_GOT22,        15)
ELF_RELOC(R_SPARC_PC10,         16)
ELF_RELOC(R_SPARC_PC22,         17)
ELF_RELOC(R_SPARC_WPLT30,       18)
ELF_RELOC(R_SPARC_COPY,         19)
ELF_RELOC(R_SPARC_GLOB_DAT,     20)
ELF_RELOC(R_SPARC_JMP_SLOT,     21)
ELF_RELOC(R_SPARC_RELATIVE,     22)
ELF_RELOC(R_SPARC_UA32,         23)
ELF_RELOC(R_SPARC_PLT32,        24)
ELF_RELOC(R_SPARC_HIPLT22,      25)
ELF_RELOC(R_SPARC_LOPLT10,      26)
ELF_RELOC(R_SPARC_PCPLT32,      27)
ELF_RELOC(R_SPARC_PCPLT22,      28)
ELF_RELOC(R_SPARC_PCPLT10,      29)
ELF_RELOC(R_SPARC_10,           30)
ELF_RELOC(R_SPARC_11,           31)
ELF_RELOC(R_SPARC_64,           32)
ELF_RELOC(R_SPARC_OLO10,        33)
ELF_RELOC(R_SPARC_HH22,         34)
ELF_RELOC(R_SPARC_HM10,         35)
ELF_RELOC(R_SPARC_LM22,         36)
ELF_RELOC(R_SPARC_PC_HH22,      37)
ELF_RELOC(R_SPARC_PC_HM10,      38)
ELF_RELOC(R_SPARC_PC_LM22,      39)
ELF_RELOC(R_SPARC_WDISP16,      40)
ELF_RELOC(R_SPARC_WDISP19,      41)
ELF_RELOC(R_SPARC_7,            43)
ELF_RELOC(R_SPARC_5,            44)
ELF_RELOC(R_SPARC_6,            45)
ELF_RELOC(R_SPARC_DISP64,       46)
ELF_RELOC(R_SPARC_PLT64,        47)
ELF_RELOC(R_SPARC_HIX22,        48)
ELF_RELOC(R_SPARC_LOX10,        49)
ELF_RELOC(R_SPARC_H44,          50)
ELF_RELOC(R_SPARC_M44,          51)
ELF_RELOC(R_SPARC_L44,          52)
ELF_RELOC(R_SPARC_REGISTER,     53)
ELF_RELOC(R_SPARC_UA64,         54)
ELF_RELOC(R_SPARC_UA16,         55)
ELF_RELOC(R_SPARC_TLS_GD_HI22,    56)
ELF_RELOC(R_SPARC_TLS_GD_LO10,    57)
ELF_RELOC(R_SPARC_TLS_GD_ADD,     58)
ELF_RELOC(R_SPARC_TLS_GD_CALL,    59)
ELF_RELOC(R_SPARC_TLS_LDM_HI22,   60)
ELF_RELOC(R_SPARC_TLS_LDM_LO10,   61)
ELF_RELOC(R_SPARC_TLS_LDM_ADD,    62)
ELF_RELOC(R_SPARC_TLS_LDM_CALL,   63)
ELF_RELOC(R_SPARC_TLS_LDO_HIX22,  64)
ELF_RELOC(R_SPARC_TLS_LDO_LOX10,  65)
ELF_RELOC(R_SPARC_TLS_LDO_ADD,    66)
ELF_RELOC(R_SPARC_TLS_IE_HI22,    67)
ELF_RELOC(R_SPARC_TLS_IE_LO10,    68)
ELF_RELOC(R_SPARC_TLS_IE_LD,      69)
ELF_RELOC(R_SPARC_TLS_IE_LDX,     70)
ELF_RELOC(R_SPARC_TLS_IE_ADD,     71)
ELF_RELOC(R_SPARC_TLS_LE_HIX22,   72)
ELF_RELOC(R_SPARC_TLS_LE_LOX10,   73)
ELF_RELOC(R_SPARC_TLS_DTPMOD32,   74)
ELF_RELOC(R_SPARC_TLS_DTPMOD64,   75)
ELF_RELOC(R_SPARC_TLS_DTPOFF32,   76)
ELF_RELOC(R_SPARC_TLS_DTPOFF64,   77)
ELF_RELOC(R_SPARC_TLS_TPOFF32,    78)
ELF_RELOC(R_SPARC_TLS_TPOFF64,    79)
ELF_RELOC(R_SPARC_GOTDATA_HIX22,  80)
ELF_RELOC(R_SPARC_GOTDATA_LOX10,  81)
ELF_RELOC(R_SPARC_GOTDATA_OP_HIX22,  82)
ELF_RELOC(R_SPARC_GOTDATA_OP_LOX10,  83)
ELF_RELOC(R_SPARC_GOTDATA_OP,     84)
PKhwFZ/��QQBinaryFormat/ELFRelocs/ARM.defnu�[���
#ifndef ELF_RELOC
#error "ELF_RELOC must be defined"
#endif

// Meets 2.09 ABI Specs.
ELF_RELOC(R_ARM_NONE,                   0x00)
ELF_RELOC(R_ARM_PC24,                   0x01)
ELF_RELOC(R_ARM_ABS32,                  0x02)
ELF_RELOC(R_ARM_REL32,                  0x03)
ELF_RELOC(R_ARM_LDR_PC_G0,              0x04)
ELF_RELOC(R_ARM_ABS16,                  0x05)
ELF_RELOC(R_ARM_ABS12,                  0x06)
ELF_RELOC(R_ARM_THM_ABS5,               0x07)
ELF_RELOC(R_ARM_ABS8,                   0x08)
ELF_RELOC(R_ARM_SBREL32,                0x09)
ELF_RELOC(R_ARM_THM_CALL,               0x0a)
ELF_RELOC(R_ARM_THM_PC8,                0x0b)
ELF_RELOC(R_ARM_BREL_ADJ,               0x0c)
ELF_RELOC(R_ARM_TLS_DESC,               0x0d)
ELF_RELOC(R_ARM_THM_SWI8,               0x0e)
ELF_RELOC(R_ARM_XPC25,                  0x0f)
ELF_RELOC(R_ARM_THM_XPC22,              0x10)
ELF_RELOC(R_ARM_TLS_DTPMOD32,           0x11)
ELF_RELOC(R_ARM_TLS_DTPOFF32,           0x12)
ELF_RELOC(R_ARM_TLS_TPOFF32,            0x13)
ELF_RELOC(R_ARM_COPY,                   0x14)
ELF_RELOC(R_ARM_GLOB_DAT,               0x15)
ELF_RELOC(R_ARM_JUMP_SLOT,              0x16)
ELF_RELOC(R_ARM_RELATIVE,               0x17)
ELF_RELOC(R_ARM_GOTOFF32,               0x18)
ELF_RELOC(R_ARM_BASE_PREL,              0x19)
ELF_RELOC(R_ARM_GOT_BREL,               0x1a)
ELF_RELOC(R_ARM_PLT32,                  0x1b)
ELF_RELOC(R_ARM_CALL,                   0x1c)
ELF_RELOC(R_ARM_JUMP24,                 0x1d)
ELF_RELOC(R_ARM_THM_JUMP24,             0x1e)
ELF_RELOC(R_ARM_BASE_ABS,               0x1f)
ELF_RELOC(R_ARM_ALU_PCREL_7_0,          0x20)
ELF_RELOC(R_ARM_ALU_PCREL_15_8,         0x21)
ELF_RELOC(R_ARM_ALU_PCREL_23_15,        0x22)
ELF_RELOC(R_ARM_LDR_SBREL_11_0_NC,      0x23)
ELF_RELOC(R_ARM_ALU_SBREL_19_12_NC,     0x24)
ELF_RELOC(R_ARM_ALU_SBREL_27_20_CK,     0x25)
ELF_RELOC(R_ARM_TARGET1,                0x26)
ELF_RELOC(R_ARM_SBREL31,                0x27)
ELF_RELOC(R_ARM_V4BX,                   0x28)
ELF_RELOC(R_ARM_TARGET2,                0x29)
ELF_RELOC(R_ARM_PREL31,                 0x2a)
ELF_RELOC(R_ARM_MOVW_ABS_NC,            0x2b)
ELF_RELOC(R_ARM_MOVT_ABS,               0x2c)
ELF_RELOC(R_ARM_MOVW_PREL_NC,           0x2d)
ELF_RELOC(R_ARM_MOVT_PREL,              0x2e)
ELF_RELOC(R_ARM_THM_MOVW_ABS_NC,        0x2f)
ELF_RELOC(R_ARM_THM_MOVT_ABS,           0x30)
ELF_RELOC(R_ARM_THM_MOVW_PREL_NC,       0x31)
ELF_RELOC(R_ARM_THM_MOVT_PREL,          0x32)
ELF_RELOC(R_ARM_THM_JUMP19,             0x33)
ELF_RELOC(R_ARM_THM_JUMP6,              0x34)
ELF_RELOC(R_ARM_THM_ALU_PREL_11_0,      0x35)
ELF_RELOC(R_ARM_THM_PC12,               0x36)
ELF_RELOC(R_ARM_ABS32_NOI,              0x37)
ELF_RELOC(R_ARM_REL32_NOI,              0x38)
ELF_RELOC(R_ARM_ALU_PC_G0_NC,           0x39)
ELF_RELOC(R_ARM_ALU_PC_G0,              0x3a)
ELF_RELOC(R_ARM_ALU_PC_G1_NC,           0x3b)
ELF_RELOC(R_ARM_ALU_PC_G1,              0x3c)
ELF_RELOC(R_ARM_ALU_PC_G2,              0x3d)
ELF_RELOC(R_ARM_LDR_PC_G1,              0x3e)
ELF_RELOC(R_ARM_LDR_PC_G2,              0x3f)
ELF_RELOC(R_ARM_LDRS_PC_G0,             0x40)
ELF_RELOC(R_ARM_LDRS_PC_G1,             0x41)
ELF_RELOC(R_ARM_LDRS_PC_G2,             0x42)
ELF_RELOC(R_ARM_LDC_PC_G0,              0x43)
ELF_RELOC(R_ARM_LDC_PC_G1,              0x44)
ELF_RELOC(R_ARM_LDC_PC_G2,              0x45)
ELF_RELOC(R_ARM_ALU_SB_G0_NC,           0x46)
ELF_RELOC(R_ARM_ALU_SB_G0,              0x47)
ELF_RELOC(R_ARM_ALU_SB_G1_NC,           0x48)
ELF_RELOC(R_ARM_ALU_SB_G1,              0x49)
ELF_RELOC(R_ARM_ALU_SB_G2,              0x4a)
ELF_RELOC(R_ARM_LDR_SB_G0,              0x4b)
ELF_RELOC(R_ARM_LDR_SB_G1,              0x4c)
ELF_RELOC(R_ARM_LDR_SB_G2,              0x4d)
ELF_RELOC(R_ARM_LDRS_SB_G0,             0x4e)
ELF_RELOC(R_ARM_LDRS_SB_G1,             0x4f)
ELF_RELOC(R_ARM_LDRS_SB_G2,             0x50)
ELF_RELOC(R_ARM_LDC_SB_G0,              0x51)
ELF_RELOC(R_ARM_LDC_SB_G1,              0x52)
ELF_RELOC(R_ARM_LDC_SB_G2,              0x53)
ELF_RELOC(R_ARM_MOVW_BREL_NC,           0x54)
ELF_RELOC(R_ARM_MOVT_BREL,              0x55)
ELF_RELOC(R_ARM_MOVW_BREL,              0x56)
ELF_RELOC(R_ARM_THM_MOVW_BREL_NC,       0x57)
ELF_RELOC(R_ARM_THM_MOVT_BREL,          0x58)
ELF_RELOC(R_ARM_THM_MOVW_BREL,          0x59)
ELF_RELOC(R_ARM_TLS_GOTDESC,            0x5a)
ELF_RELOC(R_ARM_TLS_CALL,               0x5b)
ELF_RELOC(R_ARM_TLS_DESCSEQ,            0x5c)
ELF_RELOC(R_ARM_THM_TLS_CALL,           0x5d)
ELF_RELOC(R_ARM_PLT32_ABS,              0x5e)
ELF_RELOC(R_ARM_GOT_ABS,                0x5f)
ELF_RELOC(R_ARM_GOT_PREL,               0x60)
ELF_RELOC(R_ARM_GOT_BREL12,             0x61)
ELF_RELOC(R_ARM_GOTOFF12,               0x62)
ELF_RELOC(R_ARM_GOTRELAX,               0x63)
ELF_RELOC(R_ARM_GNU_VTENTRY,            0x64)
ELF_RELOC(R_ARM_GNU_VTINHERIT,          0x65)
ELF_RELOC(R_ARM_THM_JUMP11,             0x66)
ELF_RELOC(R_ARM_THM_JUMP8,              0x67)
ELF_RELOC(R_ARM_TLS_GD32,               0x68)
ELF_RELOC(R_ARM_TLS_LDM32,              0x69)
ELF_RELOC(R_ARM_TLS_LDO32,              0x6a)
ELF_RELOC(R_ARM_TLS_IE32,               0x6b)
ELF_RELOC(R_ARM_TLS_LE32,               0x6c)
ELF_RELOC(R_ARM_TLS_LDO12,              0x6d)
ELF_RELOC(R_ARM_TLS_LE12,               0x6e)
ELF_RELOC(R_ARM_TLS_IE12GP,             0x6f)
ELF_RELOC(R_ARM_PRIVATE_0,              0x70)
ELF_RELOC(R_ARM_PRIVATE_1,              0x71)
ELF_RELOC(R_ARM_PRIVATE_2,              0x72)
ELF_RELOC(R_ARM_PRIVATE_3,              0x73)
ELF_RELOC(R_ARM_PRIVATE_4,              0x74)
ELF_RELOC(R_ARM_PRIVATE_5,              0x75)
ELF_RELOC(R_ARM_PRIVATE_6,              0x76)
ELF_RELOC(R_ARM_PRIVATE_7,              0x77)
ELF_RELOC(R_ARM_PRIVATE_8,              0x78)
ELF_RELOC(R_ARM_PRIVATE_9,              0x79)
ELF_RELOC(R_ARM_PRIVATE_10,             0x7a)
ELF_RELOC(R_ARM_PRIVATE_11,             0x7b)
ELF_RELOC(R_ARM_PRIVATE_12,             0x7c)
ELF_RELOC(R_ARM_PRIVATE_13,             0x7d)
ELF_RELOC(R_ARM_PRIVATE_14,             0x7e)
ELF_RELOC(R_ARM_PRIVATE_15,             0x7f)
ELF_RELOC(R_ARM_ME_TOO,                 0x80)
ELF_RELOC(R_ARM_THM_TLS_DESCSEQ16,      0x81)
ELF_RELOC(R_ARM_THM_TLS_DESCSEQ32,      0x82)
ELF_RELOC(R_ARM_THM_ALU_ABS_G0_NC,      0x84)
ELF_RELOC(R_ARM_THM_ALU_ABS_G1_NC,      0x85)
ELF_RELOC(R_ARM_THM_ALU_ABS_G2_NC,      0x86)
ELF_RELOC(R_ARM_THM_ALU_ABS_G3,         0x87)
ELF_RELOC(R_ARM_THM_BF16,               0x88)
ELF_RELOC(R_ARM_THM_BF12,               0x89)
ELF_RELOC(R_ARM_THM_BF18,               0x8a)
ELF_RELOC(R_ARM_IRELATIVE,              0xa0)
PKhwFZ�15~��BinaryFormat/ELFRelocs/M68k.defnu�[���#ifndef ELF_RELOC
#error "ELF_RELOC must be defined"
#endif

ELF_RELOC(R_68K_NONE,          0)  /* No reloc */
ELF_RELOC(R_68K_32,            1)  /* Direct 32 bit  */
ELF_RELOC(R_68K_16,            2)  /* Direct 16 bit  */
ELF_RELOC(R_68K_8,             3)  /* Direct 8 bit  */
ELF_RELOC(R_68K_PC32,          4)  /* PC relative 32 bit */
ELF_RELOC(R_68K_PC16,          5)  /* PC relative 16 bit */
ELF_RELOC(R_68K_PC8,           6)  /* PC relative 8 bit */
ELF_RELOC(R_68K_GOTPCREL32,    7)  /* 32 bit PC relative GOT entry */
ELF_RELOC(R_68K_GOTPCREL16,    8)  /* 16 bit PC relative GOT entry */
ELF_RELOC(R_68K_GOTPCREL8,     9)  /* 8 bit PC relative GOT entry */
ELF_RELOC(R_68K_GOTOFF32,      10) /* 32 bit GOT offset */
ELF_RELOC(R_68K_GOTOFF16,      11) /* 16 bit GOT offset */
ELF_RELOC(R_68K_GOTOFF8,       12) /* 8 bit GOT offset */
ELF_RELOC(R_68K_PLT32,         13) /* 32 bit PC relative PLT address */
ELF_RELOC(R_68K_PLT16,         14) /* 16 bit PC relative PLT address */
ELF_RELOC(R_68K_PLT8,          15) /* 8 bit PC relative PLT address */
ELF_RELOC(R_68K_PLTOFF32,      16) /* 32 bit PLT offset */
ELF_RELOC(R_68K_PLTOFF16,      17) /* 16 bit PLT offset */
ELF_RELOC(R_68K_PLTOFF8,       18) /* 8 bit PLT offset */
ELF_RELOC(R_68K_COPY,          19) /* Copy symbol at runtime */
ELF_RELOC(R_68K_GLOB_DAT,      20) /* Create GOT entry */
ELF_RELOC(R_68K_JMP_SLOT,      21) /* Create PLT entry */
ELF_RELOC(R_68K_RELATIVE,      22) /* Adjust by program base */
/* These are GNU extensions to enable C++ vtable garbage collection.  */
ELF_RELOC(R_68K_GNU_VTINHERIT, 23)
ELF_RELOC(R_68K_GNU_VTENTRY,   24)
/* TLS static relocations.  */
ELF_RELOC(R_68K_TLS_GD32,      25)
ELF_RELOC(R_68K_TLS_GD16,      26)
ELF_RELOC(R_68K_TLS_GD8,       27)
ELF_RELOC(R_68K_TLS_LDM32,     28)
ELF_RELOC(R_68K_TLS_LDM16,     29)
ELF_RELOC(R_68K_TLS_LDM8,      30)
ELF_RELOC(R_68K_TLS_LDO32,     31)
ELF_RELOC(R_68K_TLS_LDO16,     32)
ELF_RELOC(R_68K_TLS_LDO8,      33)
ELF_RELOC(R_68K_TLS_IE32,      34)
ELF_RELOC(R_68K_TLS_IE16,      35)
ELF_RELOC(R_68K_TLS_IE8,       36)
ELF_RELOC(R_68K_TLS_LE32,      37)
ELF_RELOC(R_68K_TLS_LE16,      38)
ELF_RELOC(R_68K_TLS_LE8,       39)
ELF_RELOC(R_68K_TLS_DTPMOD32,  40)
ELF_RELOC(R_68K_TLS_DTPREL32,  41)
ELF_RELOC(R_68K_TLS_TPREL32,   42)
PKhwFZK
�1��BinaryFormat/ELFRelocs/ARC.defnu�[���
#ifndef ELF_RELOC
#error "ELF_RELOC must be defined"
#endif

ELF_RELOC(R_ARC_NONE,                  0)
ELF_RELOC(R_ARC_8,                     1)
ELF_RELOC(R_ARC_16,                    2)
ELF_RELOC(R_ARC_24,                    3)
ELF_RELOC(R_ARC_32,                    4)
ELF_RELOC(R_ARC_N8,                    8)
ELF_RELOC(R_ARC_N16,                   9)
ELF_RELOC(R_ARC_N24,                  10)
ELF_RELOC(R_ARC_N32,                  11)
ELF_RELOC(R_ARC_SDA,                  12)
ELF_RELOC(R_ARC_SECTOFF,              13)
ELF_RELOC(R_ARC_S21H_PCREL,           14)
ELF_RELOC(R_ARC_S21W_PCREL,           15)
ELF_RELOC(R_ARC_S25H_PCREL,           16)
ELF_RELOC(R_ARC_S25W_PCREL,           17)
ELF_RELOC(R_ARC_SDA32,                18)
ELF_RELOC(R_ARC_SDA_LDST,             19)
ELF_RELOC(R_ARC_SDA_LDST1,            20)
ELF_RELOC(R_ARC_SDA_LDST2,            21)
ELF_RELOC(R_ARC_SDA16_LD,             22)
ELF_RELOC(R_ARC_SDA16_LD1,            23)
ELF_RELOC(R_ARC_SDA16_LD2,            24)
ELF_RELOC(R_ARC_S13_PCREL,            25)
ELF_RELOC(R_ARC_W,                    26)
ELF_RELOC(R_ARC_32_ME,                27)
ELF_RELOC(R_ARC_32_ME_S,             105)
ELF_RELOC(R_ARC_N32_ME,               28)
ELF_RELOC(R_ARC_SECTOFF_ME,           29)
ELF_RELOC(R_ARC_SDA32_ME,             30)
ELF_RELOC(R_ARC_W_ME,                 31)
ELF_RELOC(R_AC_SECTOFF_U8,            35)
ELF_RELOC(R_AC_SECTOFF_U8_1,          36)
ELF_RELOC(R_AC_SECTOFF_U8_2,          37)
ELF_RELOC(R_AC_SECTOFF_S9,            38)
ELF_RELOC(R_AC_SECTOFF_S9_1,          39)
ELF_RELOC(R_AC_SECTOFF_S9_2,          40)
ELF_RELOC(R_ARC_SECTOFF_ME_1,         41)
ELF_RELOC(R_ARC_SECTOFF_ME_2,         42)
ELF_RELOC(R_ARC_SECTOFF_1,            43)
ELF_RELOC(R_ARC_SECTOFF_2,            44)
ELF_RELOC(R_ARC_SDA_12,               45)
ELF_RELOC(R_ARC_SDA16_ST2,            48)
ELF_RELOC(R_ARC_32_PCREL,             49)
ELF_RELOC(R_ARC_PC32,                 50)
ELF_RELOC(R_ARC_GOT32,                59)
ELF_RELOC(R_ARC_GOTPC32,              51)
ELF_RELOC(R_ARC_PLT32,                52)
ELF_RELOC(R_ARC_COPY,                 53)
ELF_RELOC(R_ARC_GLOB_DAT,             54)
ELF_RELOC(R_ARC_JMP_SLOT,             55)
ELF_RELOC(R_ARC_RELATIVE,             56)
ELF_RELOC(R_ARC_GOTOFF,               57)
ELF_RELOC(R_ARC_GOTPC,                58)
ELF_RELOC(R_ARC_S21W_PCREL_PLT,       60)
ELF_RELOC(R_ARC_S25H_PCREL_PLT,       61)
ELF_RELOC(R_ARC_JLI_SECTOFF,          63)
ELF_RELOC(R_ARC_TLS_DTPMOD,           66)
ELF_RELOC(R_ARC_TLS_TPOFF,            68)
ELF_RELOC(R_ARC_TLS_GD_GOT,           69)
ELF_RELOC(R_ARC_TLS_GD_LD,            70)
ELF_RELOC(R_ARC_TLS_GD_CALL,          71)
ELF_RELOC(R_ARC_TLS_IE_GOT,           72)
ELF_RELOC(R_ARC_TLS_DTPOFF,           67)
ELF_RELOC(R_ARC_TLS_DTPOFF_S9,        73)
ELF_RELOC(R_ARC_TLS_LE_S9,            74)
ELF_RELOC(R_ARC_TLS_LE_32,            75)
ELF_RELOC(R_ARC_S25W_PCREL_PLT,       76)
ELF_RELOC(R_ARC_S21H_PCREL_PLT,       77)
ELF_RELOC(R_ARC_NPS_CMEM16,           78)
PKhwFZ��1g��!BinaryFormat/ELFRelocs/x86_64.defnu�[���
#ifndef ELF_RELOC
#error "ELF_RELOC must be defined"
#endif

ELF_RELOC(R_X86_64_NONE,        0)
ELF_RELOC(R_X86_64_64,          1)
ELF_RELOC(R_X86_64_PC32,        2)
ELF_RELOC(R_X86_64_GOT32,       3)
ELF_RELOC(R_X86_64_PLT32,       4)
ELF_RELOC(R_X86_64_COPY,        5)
ELF_RELOC(R_X86_64_GLOB_DAT,    6)
ELF_RELOC(R_X86_64_JUMP_SLOT,   7)
ELF_RELOC(R_X86_64_RELATIVE,    8)
ELF_RELOC(R_X86_64_GOTPCREL,    9)
ELF_RELOC(R_X86_64_32,          10)
ELF_RELOC(R_X86_64_32S,         11)
ELF_RELOC(R_X86_64_16,          12)
ELF_RELOC(R_X86_64_PC16,        13)
ELF_RELOC(R_X86_64_8,           14)
ELF_RELOC(R_X86_64_PC8,         15)
ELF_RELOC(R_X86_64_DTPMOD64,    16)
ELF_RELOC(R_X86_64_DTPOFF64,    17)
ELF_RELOC(R_X86_64_TPOFF64,     18)
ELF_RELOC(R_X86_64_TLSGD,       19)
ELF_RELOC(R_X86_64_TLSLD,       20)
ELF_RELOC(R_X86_64_DTPOFF32,    21)
ELF_RELOC(R_X86_64_GOTTPOFF,    22)
ELF_RELOC(R_X86_64_TPOFF32,     23)
ELF_RELOC(R_X86_64_PC64,        24)
ELF_RELOC(R_X86_64_GOTOFF64,    25)
ELF_RELOC(R_X86_64_GOTPC32,     26)
ELF_RELOC(R_X86_64_GOT64,       27)
ELF_RELOC(R_X86_64_GOTPCREL64,  28)
ELF_RELOC(R_X86_64_GOTPC64,     29)
ELF_RELOC(R_X86_64_GOTPLT64,    30)
ELF_RELOC(R_X86_64_PLTOFF64,    31)
ELF_RELOC(R_X86_64_SIZE32,      32)
ELF_RELOC(R_X86_64_SIZE64,      33)
ELF_RELOC(R_X86_64_GOTPC32_TLSDESC,  34)
ELF_RELOC(R_X86_64_TLSDESC_CALL,     35)
ELF_RELOC(R_X86_64_TLSDESC,     36)
ELF_RELOC(R_X86_64_IRELATIVE,   37)
ELF_RELOC(R_X86_64_GOTPCRELX,   41)
ELF_RELOC(R_X86_64_REX_GOTPCRELX,    42)
PKhwFZ{�����BinaryFormat/ELFRelocs/Mips.defnu�[���
#ifndef ELF_RELOC
#error "ELF_RELOC must be defined"
#endif

ELF_RELOC(R_MIPS_NONE,                0)
ELF_RELOC(R_MIPS_16,                  1)
ELF_RELOC(R_MIPS_32,                  2)
ELF_RELOC(R_MIPS_REL32,               3)
ELF_RELOC(R_MIPS_26,                  4)
ELF_RELOC(R_MIPS_HI16,                5)
ELF_RELOC(R_MIPS_LO16,                6)
ELF_RELOC(R_MIPS_GPREL16,             7)
ELF_RELOC(R_MIPS_LITERAL,             8)
ELF_RELOC(R_MIPS_GOT16,               9)
ELF_RELOC(R_MIPS_PC16,               10)
ELF_RELOC(R_MIPS_CALL16,             11)
ELF_RELOC(R_MIPS_GPREL32,            12)
ELF_RELOC(R_MIPS_UNUSED1,            13)
ELF_RELOC(R_MIPS_UNUSED2,            14)
ELF_RELOC(R_MIPS_UNUSED3,            15)
ELF_RELOC(R_MIPS_SHIFT5,             16)
ELF_RELOC(R_MIPS_SHIFT6,             17)
ELF_RELOC(R_MIPS_64,                 18)
ELF_RELOC(R_MIPS_GOT_DISP,           19)
ELF_RELOC(R_MIPS_GOT_PAGE,           20)
ELF_RELOC(R_MIPS_GOT_OFST,           21)
ELF_RELOC(R_MIPS_GOT_HI16,           22)
ELF_RELOC(R_MIPS_GOT_LO16,           23)
ELF_RELOC(R_MIPS_SUB,                24)
ELF_RELOC(R_MIPS_INSERT_A,           25)
ELF_RELOC(R_MIPS_INSERT_B,           26)
ELF_RELOC(R_MIPS_DELETE,             27)
ELF_RELOC(R_MIPS_HIGHER,             28)
ELF_RELOC(R_MIPS_HIGHEST,            29)
ELF_RELOC(R_MIPS_CALL_HI16,          30)
ELF_RELOC(R_MIPS_CALL_LO16,          31)
ELF_RELOC(R_MIPS_SCN_DISP,           32)
ELF_RELOC(R_MIPS_REL16,              33)
ELF_RELOC(R_MIPS_ADD_IMMEDIATE,      34)
ELF_RELOC(R_MIPS_PJUMP,              35)
ELF_RELOC(R_MIPS_RELGOT,             36)
ELF_RELOC(R_MIPS_JALR,               37)
ELF_RELOC(R_MIPS_TLS_DTPMOD32,       38)
ELF_RELOC(R_MIPS_TLS_DTPREL32,       39)
ELF_RELOC(R_MIPS_TLS_DTPMOD64,       40)
ELF_RELOC(R_MIPS_TLS_DTPREL64,       41)
ELF_RELOC(R_MIPS_TLS_GD,             42)
ELF_RELOC(R_MIPS_TLS_LDM,            43)
ELF_RELOC(R_MIPS_TLS_DTPREL_HI16,    44)
ELF_RELOC(R_MIPS_TLS_DTPREL_LO16,    45)
ELF_RELOC(R_MIPS_TLS_GOTTPREL,       46)
ELF_RELOC(R_MIPS_TLS_TPREL32,        47)
ELF_RELOC(R_MIPS_TLS_TPREL64,        48)
ELF_RELOC(R_MIPS_TLS_TPREL_HI16,     49)
ELF_RELOC(R_MIPS_TLS_TPREL_LO16,     50)
ELF_RELOC(R_MIPS_GLOB_DAT,           51)
ELF_RELOC(R_MIPS_PC21_S2,            60)
ELF_RELOC(R_MIPS_PC26_S2,            61)
ELF_RELOC(R_MIPS_PC18_S3,            62)
ELF_RELOC(R_MIPS_PC19_S2,            63)
ELF_RELOC(R_MIPS_PCHI16,             64)
ELF_RELOC(R_MIPS_PCLO16,             65)
ELF_RELOC(R_MIPS16_26,               100)
ELF_RELOC(R_MIPS16_GPREL,            101)
ELF_RELOC(R_MIPS16_GOT16,            102)
ELF_RELOC(R_MIPS16_CALL16,           103)
ELF_RELOC(R_MIPS16_HI16,             104)
ELF_RELOC(R_MIPS16_LO16,             105)
ELF_RELOC(R_MIPS16_TLS_GD,           106)
ELF_RELOC(R_MIPS16_TLS_LDM,          107)
ELF_RELOC(R_MIPS16_TLS_DTPREL_HI16,  108)
ELF_RELOC(R_MIPS16_TLS_DTPREL_LO16,  109)
ELF_RELOC(R_MIPS16_TLS_GOTTPREL,     110)
ELF_RELOC(R_MIPS16_TLS_TPREL_HI16,   111)
ELF_RELOC(R_MIPS16_TLS_TPREL_LO16,   112)
ELF_RELOC(R_MIPS_COPY,               126)
ELF_RELOC(R_MIPS_JUMP_SLOT,          127)
ELF_RELOC(R_MICROMIPS_26_S1,         133)
ELF_RELOC(R_MICROMIPS_HI16,          134)
ELF_RELOC(R_MICROMIPS_LO16,          135)
ELF_RELOC(R_MICROMIPS_GPREL16,       136)
ELF_RELOC(R_MICROMIPS_LITERAL,       137)
ELF_RELOC(R_MICROMIPS_GOT16,         138)
ELF_RELOC(R_MICROMIPS_PC7_S1,        139)
ELF_RELOC(R_MICROMIPS_PC10_S1,       140)
ELF_RELOC(R_MICROMIPS_PC16_S1,       141)
ELF_RELOC(R_MICROMIPS_CALL16,        142)
ELF_RELOC(R_MICROMIPS_GOT_DISP,      145)
ELF_RELOC(R_MICROMIPS_GOT_PAGE,      146)
ELF_RELOC(R_MICROMIPS_GOT_OFST,      147)
ELF_RELOC(R_MICROMIPS_GOT_HI16,      148)
ELF_RELOC(R_MICROMIPS_GOT_LO16,      149)
ELF_RELOC(R_MICROMIPS_SUB,           150)
ELF_RELOC(R_MICROMIPS_HIGHER,        151)
ELF_RELOC(R_MICROMIPS_HIGHEST,       152)
ELF_RELOC(R_MICROMIPS_CALL_HI16,     153)
ELF_RELOC(R_MICROMIPS_CALL_LO16,     154)
ELF_RELOC(R_MICROMIPS_SCN_DISP,      155)
ELF_RELOC(R_MICROMIPS_JALR,          156)
ELF_RELOC(R_MICROMIPS_HI0_LO16,      157)
ELF_RELOC(R_MICROMIPS_TLS_GD,           162)
ELF_RELOC(R_MICROMIPS_TLS_LDM,          163)
ELF_RELOC(R_MICROMIPS_TLS_DTPREL_HI16,  164)
ELF_RELOC(R_MICROMIPS_TLS_DTPREL_LO16,  165)
ELF_RELOC(R_MICROMIPS_TLS_GOTTPREL,     166)
ELF_RELOC(R_MICROMIPS_TLS_TPREL_HI16,   169)
ELF_RELOC(R_MICROMIPS_TLS_TPREL_LO16,   170)
ELF_RELOC(R_MICROMIPS_GPREL7_S2,        172)
ELF_RELOC(R_MICROMIPS_PC23_S2,          173)
ELF_RELOC(R_MICROMIPS_PC21_S1,          174)
ELF_RELOC(R_MICROMIPS_PC26_S1,          175)
ELF_RELOC(R_MICROMIPS_PC18_S3,          176)
ELF_RELOC(R_MICROMIPS_PC19_S2,          177)
ELF_RELOC(R_MIPS_NUM,                218)
ELF_RELOC(R_MIPS_PC32,               248)
ELF_RELOC(R_MIPS_EH,                 249)
PKhwFZ�!���BinaryFormat/ELFRelocs/VE.defnu�[���
#ifndef ELF_RELOC
#error "ELF_RELOC must be defined"
#endif

// Relocation types defined in following documents.
//
//  - System V Application Binary Interface - VE Architecture
//    Processor Supplement
//  - ELF Handling For Thread-Local Storage - VE Architecture
//    Processor Supplement

ELF_RELOC(R_VE_NONE,         0)
ELF_RELOC(R_VE_REFLONG,      1)
ELF_RELOC(R_VE_REFQUAD,      2)
ELF_RELOC(R_VE_SREL32,       3)
ELF_RELOC(R_VE_HI32,         4)
ELF_RELOC(R_VE_LO32,         5)
ELF_RELOC(R_VE_PC_HI32,      6)
ELF_RELOC(R_VE_PC_LO32,      7)
ELF_RELOC(R_VE_GOT32,        8)
ELF_RELOC(R_VE_GOT_HI32,     9)
ELF_RELOC(R_VE_GOT_LO32,     10)
ELF_RELOC(R_VE_GOTOFF32,     11)
ELF_RELOC(R_VE_GOTOFF_HI32,  12)
ELF_RELOC(R_VE_GOTOFF_LO32,  13)
ELF_RELOC(R_VE_PLT32,        14)
ELF_RELOC(R_VE_PLT_HI32,     15)
ELF_RELOC(R_VE_PLT_LO32,     16)
ELF_RELOC(R_VE_RELATIVE,     17)
ELF_RELOC(R_VE_GLOB_DAT,     18)
ELF_RELOC(R_VE_JUMP_SLOT,    19)
ELF_RELOC(R_VE_COPY,         20)
ELF_RELOC(R_VE_DTPMOD64,     22)
ELF_RELOC(R_VE_DTPOFF64,     23)
// ELF_RELOC(R_VE_TPOFF64,     24)
ELF_RELOC(R_VE_TLS_GD_HI32,  25)
ELF_RELOC(R_VE_TLS_GD_LO32,  26)
// ELF_RELOC(R_VE_TLS_LD_HI32,  27)
// ELF_RELOC(R_VE_TLS_LD_LO32,  28)
// ELF_RELOC(R_VE_DTPOFF32,     29)
// ELF_RELOC(R_VE_TLS_IE_HI32,  30)
// ELF_RELOC(R_VE_TLS_IE_LO32,  31)
ELF_RELOC(R_VE_TPOFF_HI32,   32)
ELF_RELOC(R_VE_TPOFF_LO32,   33)
// ELF_RELOC(R_VE_TPOFF32,      34)
ELF_RELOC(R_VE_CALL_HI32,    35)
ELF_RELOC(R_VE_CALL_LO32,    36)
PKhwFZ�)�&&BinaryFormat/ELFRelocs/AVR.defnu�[���
#ifndef ELF_RELOC
#error "ELF_RELOC must be defined"
#endif

ELF_RELOC(R_AVR_NONE,                  0)
ELF_RELOC(R_AVR_32,                    1)
ELF_RELOC(R_AVR_7_PCREL,               2)
ELF_RELOC(R_AVR_13_PCREL,              3)
ELF_RELOC(R_AVR_16,                    4)
ELF_RELOC(R_AVR_16_PM,                 5)
ELF_RELOC(R_AVR_LO8_LDI,               6)
ELF_RELOC(R_AVR_HI8_LDI,               7)
ELF_RELOC(R_AVR_HH8_LDI,               8)
ELF_RELOC(R_AVR_LO8_LDI_NEG,           9)
ELF_RELOC(R_AVR_HI8_LDI_NEG,          10)
ELF_RELOC(R_AVR_HH8_LDI_NEG,          11)
ELF_RELOC(R_AVR_LO8_LDI_PM,           12)
ELF_RELOC(R_AVR_HI8_LDI_PM,           13)
ELF_RELOC(R_AVR_HH8_LDI_PM,           14)
ELF_RELOC(R_AVR_LO8_LDI_PM_NEG,       15)
ELF_RELOC(R_AVR_HI8_LDI_PM_NEG,       16)
ELF_RELOC(R_AVR_HH8_LDI_PM_NEG,       17)
ELF_RELOC(R_AVR_CALL,                 18)
ELF_RELOC(R_AVR_LDI,                  19)
ELF_RELOC(R_AVR_6,                    20)
ELF_RELOC(R_AVR_6_ADIW,               21)
ELF_RELOC(R_AVR_MS8_LDI,              22)
ELF_RELOC(R_AVR_MS8_LDI_NEG,          23)
ELF_RELOC(R_AVR_LO8_LDI_GS,           24)
ELF_RELOC(R_AVR_HI8_LDI_GS,           25)
ELF_RELOC(R_AVR_8,                    26)
ELF_RELOC(R_AVR_8_LO8,                27)
ELF_RELOC(R_AVR_8_HI8,                28)
ELF_RELOC(R_AVR_8_HLO8,               29)
ELF_RELOC(R_AVR_DIFF8,                30)
ELF_RELOC(R_AVR_DIFF16,               31)
ELF_RELOC(R_AVR_DIFF32,               32)
ELF_RELOC(R_AVR_LDS_STS_16,           33)
ELF_RELOC(R_AVR_PORT6,                34)
ELF_RELOC(R_AVR_PORT5,                35)
PKhwFZh6&QQ!BinaryFormat/ELFRelocs/AMDGPU.defnu�[���#ifndef ELF_RELOC
#error "ELF_RELOC must be defined"
#endif

ELF_RELOC(R_AMDGPU_NONE,           0)
ELF_RELOC(R_AMDGPU_ABS32_LO,       1)
ELF_RELOC(R_AMDGPU_ABS32_HI,       2)
ELF_RELOC(R_AMDGPU_ABS64,          3)
ELF_RELOC(R_AMDGPU_REL32,          4)
ELF_RELOC(R_AMDGPU_REL64,          5)
ELF_RELOC(R_AMDGPU_ABS32,          6)
ELF_RELOC(R_AMDGPU_GOTPCREL,       7)
ELF_RELOC(R_AMDGPU_GOTPCREL32_LO,  8)
ELF_RELOC(R_AMDGPU_GOTPCREL32_HI,  9)
ELF_RELOC(R_AMDGPU_REL32_LO,      10)
ELF_RELOC(R_AMDGPU_REL32_HI,      11)
ELF_RELOC(R_AMDGPU_RELATIVE64,    13)
ELF_RELOC(R_AMDGPU_REL16,         14)
PKhwFZ��_dd"BinaryFormat/ELFRelocs/PowerPC.defnu�[���
#ifndef ELF_RELOC
#error "ELF_RELOC must be defined"
#endif

// glibc's PowerPC asm/sigcontext.h, when compiling for PPC64, has the
// unfortunate behavior of including asm/elf.h, which defines R_PPC_NONE, etc.
// to their corresponding integer values. As a result, we need to undef them
// here before continuing.

#undef R_PPC_NONE
#undef R_PPC_ADDR32
#undef R_PPC_ADDR24
#undef R_PPC_ADDR16
#undef R_PPC_ADDR16_LO
#undef R_PPC_ADDR16_HI
#undef R_PPC_ADDR16_HA
#undef R_PPC_ADDR14
#undef R_PPC_ADDR14_BRTAKEN
#undef R_PPC_ADDR14_BRNTAKEN
#undef R_PPC_REL24
#undef R_PPC_REL14
#undef R_PPC_REL14_BRTAKEN
#undef R_PPC_REL14_BRNTAKEN
#undef R_PPC_GOT16
#undef R_PPC_GOT16_LO
#undef R_PPC_GOT16_HI
#undef R_PPC_GOT16_HA
#undef R_PPC_PLTREL24
#undef R_PPC_COPY
#undef R_PPC_GLOB_DAT
#undef R_PPC_JMP_SLOT
#undef R_PPC_RELATIVE
#undef R_PPC_LOCAL24PC
#undef R_PPC_UADDR32
#undef R_PPC_UADDR16
#undef R_PPC_REL32
#undef R_PPC_PLT32
#undef R_PPC_PLTREL32
#undef R_PPC_PLT16_LO
#undef R_PPC_PLT16_HI
#undef R_PPC_PLT16_HA
#undef R_PPC_SDAREL16
#undef R_PPC_SECTOFF
#undef R_PPC_SECTOFF_LO
#undef R_PPC_SECTOFF_HI
#undef R_PPC_SECTOFF_HA
#undef R_PPC_ADDR30
#undef R_PPC_TLS
#undef R_PPC_DTPMOD32
#undef R_PPC_TPREL16
#undef R_PPC_TPREL16_LO
#undef R_PPC_TPREL16_HI
#undef R_PPC_TPREL16_HA
#undef R_PPC_TPREL32
#undef R_PPC_DTPREL16
#undef R_PPC_DTPREL16_LO
#undef R_PPC_DTPREL16_HI
#undef R_PPC_DTPREL16_HA
#undef R_PPC_DTPREL32
#undef R_PPC_GOT_TLSGD16
#undef R_PPC_GOT_TLSGD16_LO
#undef R_PPC_GOT_TLSGD16_HI
#undef R_PPC_GOT_TLSGD16_HA
#undef R_PPC_GOT_TLSLD16
#undef R_PPC_GOT_TLSLD16_LO
#undef R_PPC_GOT_TLSLD16_HI
#undef R_PPC_GOT_TLSLD16_HA
#undef R_PPC_GOT_TPREL16
#undef R_PPC_GOT_TPREL16_LO
#undef R_PPC_GOT_TPREL16_HI
#undef R_PPC_GOT_TPREL16_HA
#undef R_PPC_GOT_DTPREL16
#undef R_PPC_GOT_DTPREL16_LO
#undef R_PPC_GOT_DTPREL16_HI
#undef R_PPC_GOT_DTPREL16_HA
#undef R_PPC_TLSGD
#undef R_PPC_TLSLD
#undef R_PPC_REL16
#undef R_PPC_REL16_LO
#undef R_PPC_REL16_HI
#undef R_PPC_REL16_HA

ELF_RELOC(R_PPC_NONE,                   0)      /* No relocation. */
ELF_RELOC(R_PPC_ADDR32,                 1)
ELF_RELOC(R_PPC_ADDR24,                 2)
ELF_RELOC(R_PPC_ADDR16,                 3)
ELF_RELOC(R_PPC_ADDR16_LO,              4)
ELF_RELOC(R_PPC_ADDR16_HI,              5)
ELF_RELOC(R_PPC_ADDR16_HA,              6)
ELF_RELOC(R_PPC_ADDR14,                 7)
ELF_RELOC(R_PPC_ADDR14_BRTAKEN,         8)
ELF_RELOC(R_PPC_ADDR14_BRNTAKEN,        9)
ELF_RELOC(R_PPC_REL24,                  10)
ELF_RELOC(R_PPC_REL14,                  11)
ELF_RELOC(R_PPC_REL14_BRTAKEN,          12)
ELF_RELOC(R_PPC_REL14_BRNTAKEN,         13)
ELF_RELOC(R_PPC_GOT16,                  14)
ELF_RELOC(R_PPC_GOT16_LO,               15)
ELF_RELOC(R_PPC_GOT16_HI,               16)
ELF_RELOC(R_PPC_GOT16_HA,               17)
ELF_RELOC(R_PPC_PLTREL24,               18)
ELF_RELOC(R_PPC_COPY,                   19)
ELF_RELOC(R_PPC_GLOB_DAT,               20)
ELF_RELOC(R_PPC_JMP_SLOT,               21)
ELF_RELOC(R_PPC_RELATIVE,               22)
ELF_RELOC(R_PPC_LOCAL24PC,              23)
ELF_RELOC(R_PPC_UADDR32,                24)
ELF_RELOC(R_PPC_UADDR16,                25)
ELF_RELOC(R_PPC_REL32,                  26)
ELF_RELOC(R_PPC_PLT32,                  27)
ELF_RELOC(R_PPC_PLTREL32,               28)
ELF_RELOC(R_PPC_PLT16_LO,               29)
ELF_RELOC(R_PPC_PLT16_HI,               30)
ELF_RELOC(R_PPC_PLT16_HA,               31)
ELF_RELOC(R_PPC_SDAREL16,               32)
ELF_RELOC(R_PPC_SECTOFF,                33)
ELF_RELOC(R_PPC_SECTOFF_LO,             34)
ELF_RELOC(R_PPC_SECTOFF_HI,             35)
ELF_RELOC(R_PPC_SECTOFF_HA,             36)
ELF_RELOC(R_PPC_ADDR30,                 37)
ELF_RELOC(R_PPC_TLS,                    67)
ELF_RELOC(R_PPC_DTPMOD32,               68)
ELF_RELOC(R_PPC_TPREL16,                69)
ELF_RELOC(R_PPC_TPREL16_LO,             70)
ELF_RELOC(R_PPC_TPREL16_HI,             71)
ELF_RELOC(R_PPC_TPREL16_HA,             72)
ELF_RELOC(R_PPC_TPREL32,                73)
ELF_RELOC(R_PPC_DTPREL16,               74)
ELF_RELOC(R_PPC_DTPREL16_LO,            75)
ELF_RELOC(R_PPC_DTPREL16_HI,            76)
ELF_RELOC(R_PPC_DTPREL16_HA,            77)
ELF_RELOC(R_PPC_DTPREL32,               78)
ELF_RELOC(R_PPC_GOT_TLSGD16,            79)
ELF_RELOC(R_PPC_GOT_TLSGD16_LO,         80)
ELF_RELOC(R_PPC_GOT_TLSGD16_HI,         81)
ELF_RELOC(R_PPC_GOT_TLSGD16_HA,         82)
ELF_RELOC(R_PPC_GOT_TLSLD16,            83)
ELF_RELOC(R_PPC_GOT_TLSLD16_LO,         84)
ELF_RELOC(R_PPC_GOT_TLSLD16_HI,         85)
ELF_RELOC(R_PPC_GOT_TLSLD16_HA,         86)
ELF_RELOC(R_PPC_GOT_TPREL16,            87)
ELF_RELOC(R_PPC_GOT_TPREL16_LO,         88)
ELF_RELOC(R_PPC_GOT_TPREL16_HI,         89)
ELF_RELOC(R_PPC_GOT_TPREL16_HA,         90)
ELF_RELOC(R_PPC_GOT_DTPREL16,           91)
ELF_RELOC(R_PPC_GOT_DTPREL16_LO,        92)
ELF_RELOC(R_PPC_GOT_DTPREL16_HI,        93)
ELF_RELOC(R_PPC_GOT_DTPREL16_HA,        94)
ELF_RELOC(R_PPC_TLSGD,                  95)
ELF_RELOC(R_PPC_TLSLD,                  96)
ELF_RELOC(R_PPC_IRELATIVE,              248)
ELF_RELOC(R_PPC_REL16,                  249)
ELF_RELOC(R_PPC_REL16_LO,               250)
ELF_RELOC(R_PPC_REL16_HI,               251)
ELF_RELOC(R_PPC_REL16_HA,               252)
PKhwFZ��
�ii$BinaryFormat/ELFRelocs/PowerPC64.defnu�[���
#ifndef ELF_RELOC
#error "ELF_RELOC must be defined"
#endif

// glibc's PowerPC asm/sigcontext.h, when compiling for PPC64, has the
// unfortunate behavior of including asm/elf.h, which defines R_PPC_NONE, etc.
// to their corresponding integer values. As a result, we need to undef them
// here before continuing.

#undef R_PPC64_NONE
#undef R_PPC64_ADDR32
#undef R_PPC64_ADDR24
#undef R_PPC64_ADDR16
#undef R_PPC64_ADDR16_LO
#undef R_PPC64_ADDR16_HI
#undef R_PPC64_ADDR16_HA
#undef R_PPC64_ADDR14
#undef R_PPC64_ADDR14_BRTAKEN
#undef R_PPC64_ADDR14_BRNTAKEN
#undef R_PPC64_REL24
#undef R_PPC64_REL14
#undef R_PPC64_REL14_BRTAKEN
#undef R_PPC64_REL14_BRNTAKEN
#undef R_PPC64_GOT16
#undef R_PPC64_GOT16_LO
#undef R_PPC64_GOT16_HI
#undef R_PPC64_GOT16_HA
#undef R_PPC64_COPY
#undef R_PPC64_GLOB_DAT
#undef R_PPC64_JMP_SLOT
#undef R_PPC64_RELATIVE
#undef R_PPC64_REL32
#undef R_PPC64_ADDR64
#undef R_PPC64_ADDR16_HIGHER
#undef R_PPC64_ADDR16_HIGHERA
#undef R_PPC64_ADDR16_HIGHEST
#undef R_PPC64_ADDR16_HIGHESTA
#undef R_PPC64_REL64
#undef R_PPC64_TOC16
#undef R_PPC64_TOC16_LO
#undef R_PPC64_TOC16_HI
#undef R_PPC64_TOC16_HA
#undef R_PPC64_TOC
#undef R_PPC64_ADDR16_DS
#undef R_PPC64_ADDR16_LO_DS
#undef R_PPC64_GOT16_DS
#undef R_PPC64_GOT16_LO_DS
#undef R_PPC64_TOC16_DS
#undef R_PPC64_TOC16_LO_DS
#undef R_PPC64_TLS
#undef R_PPC64_DTPMOD64
#undef R_PPC64_TPREL16
#undef R_PPC64_TPREL16_LO
#undef R_PPC64_TPREL16_HI
#undef R_PPC64_TPREL16_HA
#undef R_PPC64_TPREL64
#undef R_PPC64_DTPREL16
#undef R_PPC64_DTPREL16_LO
#undef R_PPC64_DTPREL16_HI
#undef R_PPC64_DTPREL16_HA
#undef R_PPC64_DTPREL64
#undef R_PPC64_GOT_TLSGD16
#undef R_PPC64_GOT_TLSGD16_LO
#undef R_PPC64_GOT_TLSGD16_HI
#undef R_PPC64_GOT_TLSGD16_HA
#undef R_PPC64_GOT_TLSLD16
#undef R_PPC64_GOT_TLSLD16_LO
#undef R_PPC64_GOT_TLSLD16_HI
#undef R_PPC64_GOT_TLSLD16_HA
#undef R_PPC64_GOT_TPREL16_DS
#undef R_PPC64_GOT_TPREL16_LO_DS
#undef R_PPC64_GOT_TPREL16_HI
#undef R_PPC64_GOT_TPREL16_HA
#undef R_PPC64_GOT_DTPREL16_DS
#undef R_PPC64_GOT_DTPREL16_LO_DS
#undef R_PPC64_GOT_DTPREL16_HI
#undef R_PPC64_GOT_DTPREL16_HA
#undef R_PPC64_TPREL16_DS
#undef R_PPC64_TPREL16_LO_DS
#undef R_PPC64_TPREL16_HIGHER
#undef R_PPC64_TPREL16_HIGHERA
#undef R_PPC64_TPREL16_HIGHEST
#undef R_PPC64_TPREL16_HIGHESTA
#undef R_PPC64_DTPREL16_DS
#undef R_PPC64_DTPREL16_LO_DS
#undef R_PPC64_DTPREL16_HIGHER
#undef R_PPC64_DTPREL16_HIGHERA
#undef R_PPC64_DTPREL16_HIGHEST
#undef R_PPC64_DTPREL16_HIGHESTA
#undef R_PPC64_TLSGD
#undef R_PPC64_TLSLD
#undef R_PPC64_ADDR16_HIGH
#undef R_PPC64_ADDR16_HIGHA
#undef R_PPC64_TPREL16_HIGH
#undef R_PPC64_TPREL16_HIGHA
#undef R_PPC64_DTPREL16_HIGH
#undef R_PPC64_DTPREL16_HIGHA
#undef R_PPC64_REL24_NOTOC
#undef R_PPC64_PCREL_OPT
#undef R_PPC64_PCREL34
#undef R_PPC64_GOT_PCREL34
#undef R_PPC64_TPREL34
#undef R_PPC64_DTPREL34
#undef R_PPC64_GOT_TLSGD_PCREL34
#undef R_PPC64_GOT_TLSLD_PCREL34
#undef R_PPC64_GOT_TPREL_PCREL34
#undef R_PPC64_IRELATIVE
#undef R_PPC64_REL16
#undef R_PPC64_REL16_LO
#undef R_PPC64_REL16_HI
#undef R_PPC64_REL16_HA

ELF_RELOC(R_PPC64_NONE,                 0)
ELF_RELOC(R_PPC64_ADDR32,               1)
ELF_RELOC(R_PPC64_ADDR24,               2)
ELF_RELOC(R_PPC64_ADDR16,               3)
ELF_RELOC(R_PPC64_ADDR16_LO,            4)
ELF_RELOC(R_PPC64_ADDR16_HI,            5)
ELF_RELOC(R_PPC64_ADDR16_HA,            6)
ELF_RELOC(R_PPC64_ADDR14,               7)
ELF_RELOC(R_PPC64_ADDR14_BRTAKEN,       8)
ELF_RELOC(R_PPC64_ADDR14_BRNTAKEN,      9)
ELF_RELOC(R_PPC64_REL24,                10)
ELF_RELOC(R_PPC64_REL14,                11)
ELF_RELOC(R_PPC64_REL14_BRTAKEN,        12)
ELF_RELOC(R_PPC64_REL14_BRNTAKEN,       13)
ELF_RELOC(R_PPC64_GOT16,                14)
ELF_RELOC(R_PPC64_GOT16_LO,             15)
ELF_RELOC(R_PPC64_GOT16_HI,             16)
ELF_RELOC(R_PPC64_GOT16_HA,             17)
ELF_RELOC(R_PPC64_COPY,                 19)
ELF_RELOC(R_PPC64_GLOB_DAT,             20)
ELF_RELOC(R_PPC64_JMP_SLOT,             21)
ELF_RELOC(R_PPC64_RELATIVE,             22)
ELF_RELOC(R_PPC64_REL32,                26)
ELF_RELOC(R_PPC64_ADDR64,               38)
ELF_RELOC(R_PPC64_ADDR16_HIGHER,        39)
ELF_RELOC(R_PPC64_ADDR16_HIGHERA,       40)
ELF_RELOC(R_PPC64_ADDR16_HIGHEST,       41)
ELF_RELOC(R_PPC64_ADDR16_HIGHESTA,      42)
ELF_RELOC(R_PPC64_REL64,                44)
ELF_RELOC(R_PPC64_TOC16,                47)
ELF_RELOC(R_PPC64_TOC16_LO,             48)
ELF_RELOC(R_PPC64_TOC16_HI,             49)
ELF_RELOC(R_PPC64_TOC16_HA,             50)
ELF_RELOC(R_PPC64_TOC,                  51)
ELF_RELOC(R_PPC64_ADDR16_DS,            56)
ELF_RELOC(R_PPC64_ADDR16_LO_DS,         57)
ELF_RELOC(R_PPC64_GOT16_DS,             58)
ELF_RELOC(R_PPC64_GOT16_LO_DS,          59)
ELF_RELOC(R_PPC64_TOC16_DS,             63)
ELF_RELOC(R_PPC64_TOC16_LO_DS,          64)
ELF_RELOC(R_PPC64_TLS,                  67)
ELF_RELOC(R_PPC64_DTPMOD64,             68)
ELF_RELOC(R_PPC64_TPREL16,              69)
ELF_RELOC(R_PPC64_TPREL16_LO,           70)
ELF_RELOC(R_PPC64_TPREL16_HI,           71)
ELF_RELOC(R_PPC64_TPREL16_HA,           72)
ELF_RELOC(R_PPC64_TPREL64,              73)
ELF_RELOC(R_PPC64_DTPREL16,             74)
ELF_RELOC(R_PPC64_DTPREL16_LO,          75)
ELF_RELOC(R_PPC64_DTPREL16_HI,          76)
ELF_RELOC(R_PPC64_DTPREL16_HA,          77)
ELF_RELOC(R_PPC64_DTPREL64,             78)
ELF_RELOC(R_PPC64_GOT_TLSGD16,          79)
ELF_RELOC(R_PPC64_GOT_TLSGD16_LO,       80)
ELF_RELOC(R_PPC64_GOT_TLSGD16_HI,       81)
ELF_RELOC(R_PPC64_GOT_TLSGD16_HA,       82)
ELF_RELOC(R_PPC64_GOT_TLSLD16,          83)
ELF_RELOC(R_PPC64_GOT_TLSLD16_LO,       84)
ELF_RELOC(R_PPC64_GOT_TLSLD16_HI,       85)
ELF_RELOC(R_PPC64_GOT_TLSLD16_HA,       86)
ELF_RELOC(R_PPC64_GOT_TPREL16_DS,       87)
ELF_RELOC(R_PPC64_GOT_TPREL16_LO_DS,    88)
ELF_RELOC(R_PPC64_GOT_TPREL16_HI,       89)
ELF_RELOC(R_PPC64_GOT_TPREL16_HA,       90)
ELF_RELOC(R_PPC64_GOT_DTPREL16_DS,      91)
ELF_RELOC(R_PPC64_GOT_DTPREL16_LO_DS,   92)
ELF_RELOC(R_PPC64_GOT_DTPREL16_HI,      93)
ELF_RELOC(R_PPC64_GOT_DTPREL16_HA,      94)
ELF_RELOC(R_PPC64_TPREL16_DS,           95)
ELF_RELOC(R_PPC64_TPREL16_LO_DS,        96)
ELF_RELOC(R_PPC64_TPREL16_HIGHER,       97)
ELF_RELOC(R_PPC64_TPREL16_HIGHERA,      98)
ELF_RELOC(R_PPC64_TPREL16_HIGHEST,      99)
ELF_RELOC(R_PPC64_TPREL16_HIGHESTA,     100)
ELF_RELOC(R_PPC64_DTPREL16_DS,          101)
ELF_RELOC(R_PPC64_DTPREL16_LO_DS,       102)
ELF_RELOC(R_PPC64_DTPREL16_HIGHER,      103)
ELF_RELOC(R_PPC64_DTPREL16_HIGHERA,     104)
ELF_RELOC(R_PPC64_DTPREL16_HIGHEST,     105)
ELF_RELOC(R_PPC64_DTPREL16_HIGHESTA,    106)
ELF_RELOC(R_PPC64_TLSGD,                107)
ELF_RELOC(R_PPC64_TLSLD,                108)
ELF_RELOC(R_PPC64_ADDR16_HIGH,          110)
ELF_RELOC(R_PPC64_ADDR16_HIGHA,         111)
ELF_RELOC(R_PPC64_TPREL16_HIGH,         112)
ELF_RELOC(R_PPC64_TPREL16_HIGHA,        113)
ELF_RELOC(R_PPC64_DTPREL16_HIGH,        114)
ELF_RELOC(R_PPC64_DTPREL16_HIGHA,       115)
ELF_RELOC(R_PPC64_REL24_NOTOC,          116)
ELF_RELOC(R_PPC64_PCREL_OPT,            123)
ELF_RELOC(R_PPC64_PCREL34,              132)
ELF_RELOC(R_PPC64_GOT_PCREL34,          133)
ELF_RELOC(R_PPC64_TPREL34,              146)
ELF_RELOC(R_PPC64_DTPREL34,             147)
ELF_RELOC(R_PPC64_GOT_TLSGD_PCREL34,    148)
ELF_RELOC(R_PPC64_GOT_TLSLD_PCREL34,    149)
ELF_RELOC(R_PPC64_GOT_TPREL_PCREL34,    150)
ELF_RELOC(R_PPC64_IRELATIVE,            248)
ELF_RELOC(R_PPC64_REL16,                249)
ELF_RELOC(R_PPC64_REL16_LO,             250)
ELF_RELOC(R_PPC64_REL16_HI,             251)
ELF_RELOC(R_PPC64_REL16_HA,             252)
PKhwFZ+�.� BinaryFormat/ELFRelocs/Lanai.defnu�[���
#ifndef ELF_RELOC
#error "ELF_RELOC must be defined"
#endif

// No relocation
ELF_RELOC(R_LANAI_NONE,        0)
// 21-bit symbol relocation
ELF_RELOC(R_LANAI_21,          1)
// 21-bit symbol relocation with last two bits masked to 0
ELF_RELOC(R_LANAI_21_F,        2)
// 25-bit branch targets
ELF_RELOC(R_LANAI_25,          3)
// General 32-bit relocation
ELF_RELOC(R_LANAI_32,          4)
// Upper 16-bits of a symbolic relocation
ELF_RELOC(R_LANAI_HI16,        5)
// Lower 16-bits of a symbolic relocation
ELF_RELOC(R_LANAI_LO16,        6)
PKhwFZ:'n		"BinaryFormat/ELFRelocs/SystemZ.defnu�[���
#ifndef ELF_RELOC
#error "ELF_RELOC must be defined"
#endif

ELF_RELOC(R_390_NONE,          0)
ELF_RELOC(R_390_8,             1)
ELF_RELOC(R_390_12,            2)
ELF_RELOC(R_390_16,            3)
ELF_RELOC(R_390_32,            4)
ELF_RELOC(R_390_PC32,          5)
ELF_RELOC(R_390_GOT12,         6)
ELF_RELOC(R_390_GOT32,         7)
ELF_RELOC(R_390_PLT32,         8)
ELF_RELOC(R_390_COPY,          9)
ELF_RELOC(R_390_GLOB_DAT,     10)
ELF_RELOC(R_390_JMP_SLOT,     11)
ELF_RELOC(R_390_RELATIVE,     12)
ELF_RELOC(R_390_GOTOFF,       13)
ELF_RELOC(R_390_GOTPC,        14)
ELF_RELOC(R_390_GOT16,        15)
ELF_RELOC(R_390_PC16,         16)
ELF_RELOC(R_390_PC16DBL,      17)
ELF_RELOC(R_390_PLT16DBL,     18)
ELF_RELOC(R_390_PC32DBL,      19)
ELF_RELOC(R_390_PLT32DBL,     20)
ELF_RELOC(R_390_GOTPCDBL,     21)
ELF_RELOC(R_390_64,           22)
ELF_RELOC(R_390_PC64,         23)
ELF_RELOC(R_390_GOT64,        24)
ELF_RELOC(R_390_PLT64,        25)
ELF_RELOC(R_390_GOTENT,       26)
ELF_RELOC(R_390_GOTOFF16,     27)
ELF_RELOC(R_390_GOTOFF64,     28)
ELF_RELOC(R_390_GOTPLT12,     29)
ELF_RELOC(R_390_GOTPLT16,     30)
ELF_RELOC(R_390_GOTPLT32,     31)
ELF_RELOC(R_390_GOTPLT64,     32)
ELF_RELOC(R_390_GOTPLTENT,    33)
ELF_RELOC(R_390_PLTOFF16,     34)
ELF_RELOC(R_390_PLTOFF32,     35)
ELF_RELOC(R_390_PLTOFF64,     36)
ELF_RELOC(R_390_TLS_LOAD,     37)
ELF_RELOC(R_390_TLS_GDCALL,   38)
ELF_RELOC(R_390_TLS_LDCALL,   39)
ELF_RELOC(R_390_TLS_GD32,     40)
ELF_RELOC(R_390_TLS_GD64,     41)
ELF_RELOC(R_390_TLS_GOTIE12,  42)
ELF_RELOC(R_390_TLS_GOTIE32,  43)
ELF_RELOC(R_390_TLS_GOTIE64,  44)
ELF_RELOC(R_390_TLS_LDM32,    45)
ELF_RELOC(R_390_TLS_LDM64,    46)
ELF_RELOC(R_390_TLS_IE32,     47)
ELF_RELOC(R_390_TLS_IE64,     48)
ELF_RELOC(R_390_TLS_IEENT,    49)
ELF_RELOC(R_390_TLS_LE32,     50)
ELF_RELOC(R_390_TLS_LE64,     51)
ELF_RELOC(R_390_TLS_LDO32,    52)
ELF_RELOC(R_390_TLS_LDO64,    53)
ELF_RELOC(R_390_TLS_DTPMOD,   54)
ELF_RELOC(R_390_TLS_DTPOFF,   55)
ELF_RELOC(R_390_TLS_TPOFF,    56)
ELF_RELOC(R_390_20,           57)
ELF_RELOC(R_390_GOT20,        58)
ELF_RELOC(R_390_GOTPLT20,     59)
ELF_RELOC(R_390_TLS_GOTIE20,  60)
ELF_RELOC(R_390_IRELATIVE,    61)
ELF_RELOC(R_390_PC12DBL,      62)
ELF_RELOC(R_390_PLT12DBL,     63)
ELF_RELOC(R_390_PC24DBL,      64)
ELF_RELOC(R_390_PLT24DBL,     65)
PKhwFZ�D1�s�s�BinaryFormat/Dwarf.defnu�[���//===- llvm/Support/Dwarf.def - Dwarf definitions ---------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Macros for running through Dwarf enumerators.
//
//===----------------------------------------------------------------------===//

// TODO: Add other DW-based macros.
#if !(                                                                         \
    defined HANDLE_DW_TAG || defined HANDLE_DW_AT || defined HANDLE_DW_FORM || \
    defined HANDLE_DW_OP || defined HANDLE_DW_OP_LLVM_USEROP ||                    \
    defined HANDLE_DW_LANG || defined HANDLE_DW_ATE ||                         \
    defined HANDLE_DW_VIRTUALITY || defined HANDLE_DW_DEFAULTED ||             \
    defined HANDLE_DW_CC || defined HANDLE_DW_LNS || defined HANDLE_DW_LNE ||  \
    defined HANDLE_DW_LNCT || defined HANDLE_DW_MACRO ||                       \
    defined HANDLE_DW_MACRO_GNU || defined HANDLE_MACRO_FLAG ||                \
    defined HANDLE_DW_RLE || defined HANDLE_DW_LLE ||                          \
    (defined HANDLE_DW_CFA && defined HANDLE_DW_CFA_PRED) ||                   \
    defined HANDLE_DW_APPLE_PROPERTY || defined HANDLE_DW_UT ||                \
    defined HANDLE_DWARF_SECTION || defined HANDLE_DW_IDX ||                   \
    defined HANDLE_DW_END || defined HANDLE_DW_SECT)
#error "Missing macro definition of HANDLE_DW*"
#endif

#ifndef HANDLE_DW_TAG
#define HANDLE_DW_TAG(ID, NAME, VERSION, VENDOR, KIND)
#endif

// Note that DW_KIND is not a DWARF concept, but rather a way for us to
// generate a list of tags that belong together.
#ifndef DW_KIND_NONE
#define DW_KIND_NONE 0
#endif

#ifndef DW_KIND_TYPE
#define DW_KIND_TYPE 1
#endif

#ifndef HANDLE_DW_AT
#define HANDLE_DW_AT(ID, NAME, VERSION, VENDOR)
#endif

#ifndef HANDLE_DW_FORM
#define HANDLE_DW_FORM(ID, NAME, VERSION, VENDOR)
#endif

#ifndef HANDLE_DW_OP
#define HANDLE_DW_OP(ID, NAME, VERSION, VENDOR)
#endif

#ifndef HANDLE_DW_OP_LLVM_USEROP
#define HANDLE_DW_OP_LLVM_USEROP(ID, NAME)
#endif

#ifndef HANDLE_DW_LANG
#define HANDLE_DW_LANG(ID, NAME, LOWER_BOUND, VERSION, VENDOR)
#endif

#ifndef HANDLE_DW_ATE
#define HANDLE_DW_ATE(ID, NAME, VERSION, VENDOR)
#endif

#ifndef HANDLE_DW_VIRTUALITY
#define HANDLE_DW_VIRTUALITY(ID, NAME)
#endif

#ifndef HANDLE_DW_DEFAULTED
#define HANDLE_DW_DEFAULTED(ID, NAME)
#endif

#ifndef HANDLE_DW_CC
#define HANDLE_DW_CC(ID, NAME)
#endif

#ifndef HANDLE_DW_LNS
#define HANDLE_DW_LNS(ID, NAME)
#endif

#ifndef HANDLE_DW_LNE
#define HANDLE_DW_LNE(ID, NAME)
#endif

#ifndef HANDLE_DW_LNCT
#define HANDLE_DW_LNCT(ID, NAME)
#endif

#ifndef HANDLE_DW_MACRO
#define HANDLE_DW_MACRO(ID, NAME)
#endif

#ifndef HANDLE_DW_MACRO_GNU
#define HANDLE_DW_MACRO_GNU(ID, NAME)
#endif

#ifndef HANDLE_MACRO_FLAG
#define HANDLE_MACRO_FLAG(ID, NAME)
#endif

#ifndef HANDLE_DW_RLE
#define HANDLE_DW_RLE(ID, NAME)
#endif

#ifndef HANDLE_DW_LLE
#define HANDLE_DW_LLE(ID, NAME)
#endif

#ifndef HANDLE_DW_CFA
#define HANDLE_DW_CFA(ID, NAME)
#endif

#ifndef HANDLE_DW_CFA_PRED
#define HANDLE_DW_CFA_PRED(ID, NAME, PRED)
#endif

#ifndef HANDLE_DW_APPLE_PROPERTY
#define HANDLE_DW_APPLE_PROPERTY(ID, NAME)
#endif

#ifndef HANDLE_DW_UT
#define HANDLE_DW_UT(ID, NAME)
#endif

#ifndef HANDLE_DWARF_SECTION
#define HANDLE_DWARF_SECTION(ENUM_NAME, ELF_NAME, CMDLINE_NAME, OPTION)
#endif

#ifndef HANDLE_DW_IDX
#define HANDLE_DW_IDX(ID, NAME)
#endif

#ifndef HANDLE_DW_END
#define HANDLE_DW_END(ID, NAME)
#endif

#ifndef HANDLE_DW_SECT
#define HANDLE_DW_SECT(ID, NAME)
#endif

HANDLE_DW_TAG(0x0000, null, 2, DWARF, DW_KIND_NONE)
HANDLE_DW_TAG(0x0001, array_type, 2, DWARF, DW_KIND_TYPE)
HANDLE_DW_TAG(0x0002, class_type, 2, DWARF, DW_KIND_TYPE)
HANDLE_DW_TAG(0x0003, entry_point, 2, DWARF, DW_KIND_NONE)
HANDLE_DW_TAG(0x0004, enumeration_type, 2, DWARF, DW_KIND_TYPE)
HANDLE_DW_TAG(0x0005, formal_parameter, 2, DWARF, DW_KIND_NONE)
HANDLE_DW_TAG(0x0008, imported_declaration, 2, DWARF, DW_KIND_NONE)
HANDLE_DW_TAG(0x000a, label, 2, DWARF, DW_KIND_NONE)
HANDLE_DW_TAG(0x000b, lexical_block, 2, DWARF, DW_KIND_NONE)
HANDLE_DW_TAG(0x000d, member, 2, DWARF, DW_KIND_NONE)
HANDLE_DW_TAG(0x000f, pointer_type, 2, DWARF, DW_KIND_TYPE)
HANDLE_DW_TAG(0x0010, reference_type, 2, DWARF, DW_KIND_TYPE)
HANDLE_DW_TAG(0x0011, compile_unit, 2, DWARF, DW_KIND_NONE)
HANDLE_DW_TAG(0x0012, string_type, 2, DWARF, DW_KIND_TYPE)
HANDLE_DW_TAG(0x0013, structure_type, 2, DWARF, DW_KIND_TYPE)
HANDLE_DW_TAG(0x0015, subroutine_type, 2, DWARF, DW_KIND_TYPE)
HANDLE_DW_TAG(0x0016, typedef, 2, DWARF, DW_KIND_TYPE)
HANDLE_DW_TAG(0x0017, union_type, 2, DWARF, DW_KIND_TYPE)
HANDLE_DW_TAG(0x0018, unspecified_parameters, 2, DWARF, DW_KIND_NONE)
HANDLE_DW_TAG(0x0019, variant, 2, DWARF, DW_KIND_NONE)
HANDLE_DW_TAG(0x001a, common_block, 2, DWARF, DW_KIND_NONE)
HANDLE_DW_TAG(0x001b, common_inclusion, 2, DWARF, DW_KIND_NONE)
HANDLE_DW_TAG(0x001c, inheritance, 2, DWARF, DW_KIND_NONE)
HANDLE_DW_TAG(0x001d, inlined_subroutine, 2, DWARF, DW_KIND_NONE)
HANDLE_DW_TAG(0x001e, module, 2, DWARF, DW_KIND_NONE)
HANDLE_DW_TAG(0x001f, ptr_to_member_type, 2, DWARF, DW_KIND_TYPE)
HANDLE_DW_TAG(0x0020, set_type, 2, DWARF, DW_KIND_TYPE)
HANDLE_DW_TAG(0x0021, subrange_type, 2, DWARF, DW_KIND_TYPE)
HANDLE_DW_TAG(0x0022, with_stmt, 2, DWARF, DW_KIND_NONE)
HANDLE_DW_TAG(0x0023, access_declaration, 2, DWARF, DW_KIND_NONE)
HANDLE_DW_TAG(0x0024, base_type, 2, DWARF, DW_KIND_TYPE)
HANDLE_DW_TAG(0x0025, catch_block, 2, DWARF, DW_KIND_NONE)
HANDLE_DW_TAG(0x0026, const_type, 2, DWARF, DW_KIND_TYPE)
HANDLE_DW_TAG(0x0027, constant, 2, DWARF, DW_KIND_NONE)
HANDLE_DW_TAG(0x0028, enumerator, 2, DWARF, DW_KIND_NONE)
HANDLE_DW_TAG(0x0029, file_type, 2, DWARF, DW_KIND_TYPE)
HANDLE_DW_TAG(0x002a, friend, 2, DWARF, DW_KIND_NONE)
HANDLE_DW_TAG(0x002b, namelist, 2, DWARF, DW_KIND_NONE)
HANDLE_DW_TAG(0x002c, namelist_item, 2, DWARF, DW_KIND_NONE)
HANDLE_DW_TAG(0x002d, packed_type, 2, DWARF, DW_KIND_TYPE)
HANDLE_DW_TAG(0x002e, subprogram, 2, DWARF, DW_KIND_NONE)
HANDLE_DW_TAG(0x002f, template_type_parameter, 2, DWARF, DW_KIND_NONE)
HANDLE_DW_TAG(0x0030, template_value_parameter, 2, DWARF, DW_KIND_NONE)
HANDLE_DW_TAG(0x0031, thrown_type, 2, DWARF, DW_KIND_TYPE)
HANDLE_DW_TAG(0x0032, try_block, 2, DWARF, DW_KIND_NONE)
HANDLE_DW_TAG(0x0033, variant_part, 2, DWARF, DW_KIND_NONE)
HANDLE_DW_TAG(0x0034, variable, 2, DWARF, DW_KIND_NONE)
HANDLE_DW_TAG(0x0035, volatile_type, 2, DWARF, DW_KIND_TYPE)
// New in DWARF v3:
HANDLE_DW_TAG(0x0036, dwarf_procedure, 3, DWARF, DW_KIND_NONE)
HANDLE_DW_TAG(0x0037, restrict_type, 3, DWARF, DW_KIND_TYPE)
HANDLE_DW_TAG(0x0038, interface_type, 3, DWARF, DW_KIND_TYPE)
HANDLE_DW_TAG(0x0039, namespace, 3, DWARF, DW_KIND_NONE)
HANDLE_DW_TAG(0x003a, imported_module, 3, DWARF, DW_KIND_NONE)
HANDLE_DW_TAG(0x003b, unspecified_type, 3, DWARF, DW_KIND_TYPE)
HANDLE_DW_TAG(0x003c, partial_unit, 3, DWARF, DW_KIND_NONE)
HANDLE_DW_TAG(0x003d, imported_unit, 3, DWARF, DW_KIND_NONE)
HANDLE_DW_TAG(0x003f, condition, 3, DWARF, DW_KIND_NONE)
HANDLE_DW_TAG(0x0040, shared_type, 3, DWARF, DW_KIND_TYPE)
// New in DWARF v4:
HANDLE_DW_TAG(0x0041, type_unit, 4, DWARF, DW_KIND_NONE)
HANDLE_DW_TAG(0x0042, rvalue_reference_type, 4, DWARF, DW_KIND_TYPE)
HANDLE_DW_TAG(0x0043, template_alias, 4, DWARF, DW_KIND_NONE)
// New in DWARF v5:
HANDLE_DW_TAG(0x0044, coarray_type, 5, DWARF, DW_KIND_TYPE)
HANDLE_DW_TAG(0x0045, generic_subrange, 5, DWARF, DW_KIND_NONE)
HANDLE_DW_TAG(0x0046, dynamic_type, 5, DWARF, DW_KIND_TYPE)
HANDLE_DW_TAG(0x0047, atomic_type, 5, DWARF, DW_KIND_TYPE)
HANDLE_DW_TAG(0x0048, call_site, 5, DWARF, DW_KIND_NONE)
HANDLE_DW_TAG(0x0049, call_site_parameter, 5, DWARF, DW_KIND_NONE)
HANDLE_DW_TAG(0x004a, skeleton_unit, 5, DWARF, DW_KIND_NONE)
HANDLE_DW_TAG(0x004b, immutable_type, 5, DWARF, DW_KIND_TYPE)
// Vendor extensions:
HANDLE_DW_TAG(0x4081, MIPS_loop, 0, MIPS, DW_KIND_NONE)
// Conflicting:
// HANDLE_DW_TAG(0x4081, HP_array_descriptor, 0, HP, DW_KIND_NONE)
HANDLE_DW_TAG(0x4101, format_label, 0, GNU, DW_KIND_NONE)
HANDLE_DW_TAG(0x4102, function_template, 0, GNU, DW_KIND_NONE)
HANDLE_DW_TAG(0x4103, class_template, 0, GNU, DW_KIND_NONE)

HANDLE_DW_TAG(0x4104, GNU_BINCL, 0, GNU, DW_KIND_NONE)
HANDLE_DW_TAG(0x4105, GNU_EINCL, 0, GNU, DW_KIND_NONE)
HANDLE_DW_TAG(0x4106, GNU_template_template_param, 0, GNU, DW_KIND_NONE)
HANDLE_DW_TAG(0x4107, GNU_template_parameter_pack, 0, GNU, DW_KIND_NONE)
HANDLE_DW_TAG(0x4108, GNU_formal_parameter_pack, 0, GNU, DW_KIND_NONE)
HANDLE_DW_TAG(0x4109, GNU_call_site, 0, GNU, DW_KIND_NONE)
HANDLE_DW_TAG(0x410a, GNU_call_site_parameter, 0, GNU, DW_KIND_NONE)

HANDLE_DW_TAG(0x4200, APPLE_property, 0, APPLE, DW_KIND_NONE)

HANDLE_DW_TAG(0x4201, SUN_function_template, 0, SUN, DW_KIND_NONE)
HANDLE_DW_TAG(0x4202, SUN_class_template, 0, SUN, DW_KIND_NONE)
HANDLE_DW_TAG(0x4203, SUN_struct_template, 0, SUN, DW_KIND_NONE)
HANDLE_DW_TAG(0x4204, SUN_union_template, 0, SUN, DW_KIND_NONE)
HANDLE_DW_TAG(0x4205, SUN_indirect_inheritance, 0, SUN, DW_KIND_NONE)
HANDLE_DW_TAG(0x4206, SUN_codeflags, 0, SUN, DW_KIND_NONE)
HANDLE_DW_TAG(0x4207, SUN_memop_info, 0, SUN, DW_KIND_NONE)
HANDLE_DW_TAG(0x4208, SUN_omp_child_func, 0, SUN, DW_KIND_NONE)
HANDLE_DW_TAG(0x4209, SUN_rtti_descriptor, 0, SUN, DW_KIND_NONE)
HANDLE_DW_TAG(0x420a, SUN_dtor_info, 0, SUN, DW_KIND_NONE)
HANDLE_DW_TAG(0x420b, SUN_dtor, 0, SUN, DW_KIND_NONE)
HANDLE_DW_TAG(0x420c, SUN_f90_interface, 0, SUN, DW_KIND_NONE)
HANDLE_DW_TAG(0x420d, SUN_fortran_vax_structure, 0, SUN, DW_KIND_NONE)
HANDLE_DW_TAG(0x42ff, SUN_hi, 0, SUN, DW_KIND_NONE)

// LLVM
HANDLE_DW_TAG(0x4300, LLVM_ptrauth_type, 0, LLVM, DW_KIND_TYPE)

// DSP-C/Starcore __circ, _rev
HANDLE_DW_TAG(0x5101, ALTIUM_circ_type, 0, ALTIUM, DW_KIND_NONE)
HANDLE_DW_TAG(0x5102, ALTIUM_mwa_circ_type, 0, ALTIUM, DW_KIND_NONE)
HANDLE_DW_TAG(0x5103, ALTIUM_rev_carry_type, 0, ALTIUM, DW_KIND_NONE)
// M16 __rom qualifier
HANDLE_DW_TAG(0x5111, ALTIUM_rom, 0, ALTIUM, DW_KIND_NONE)

// LLVM
HANDLE_DW_TAG(0x6000, LLVM_annotation, 0, LLVM, DW_KIND_NONE)

// Green Hills.
HANDLE_DW_TAG(0x8004, GHS_namespace, 0, GHS, DW_KIND_NONE)
HANDLE_DW_TAG(0x8005, GHS_using_namespace, 0, GHS, DW_KIND_NONE)
HANDLE_DW_TAG(0x8006, GHS_using_declaration, 0, GHS, DW_KIND_NONE)
HANDLE_DW_TAG(0x8007, GHS_template_templ_param, 0, GHS, DW_KIND_NONE)

// Unified Parallel C.
HANDLE_DW_TAG(0x8765, UPC_shared_type, 0, UPC, DW_KIND_NONE)
HANDLE_DW_TAG(0x8766, UPC_strict_type, 0, UPC, DW_KIND_NONE)
HANDLE_DW_TAG(0x8767, UPC_relaxed, 0, UPC, DW_KIND_NONE)

HANDLE_DW_TAG(0xa000, PGI_kanji_type, 0, PGI, DW_KIND_NONE)
HANDLE_DW_TAG(0xa020, PGI_interface_block, 0, PGI, DW_KIND_NONE)

HANDLE_DW_TAG(0xb000, BORLAND_property, 0, BORLAND, DW_KIND_NONE)
HANDLE_DW_TAG(0xb001, BORLAND_Delphi_string, 0, BORLAND, DW_KIND_TYPE)
HANDLE_DW_TAG(0xb002, BORLAND_Delphi_dynamic_array, 0, BORLAND, DW_KIND_TYPE)
HANDLE_DW_TAG(0xb003, BORLAND_Delphi_set, 0, BORLAND, DW_KIND_TYPE)
HANDLE_DW_TAG(0xb004, BORLAND_Delphi_variant, 0, BORLAND, DW_KIND_TYPE)

// Attributes.
HANDLE_DW_AT(0x01, sibling, 2, DWARF)
HANDLE_DW_AT(0x02, location, 2, DWARF)
HANDLE_DW_AT(0x03, name, 2, DWARF)
HANDLE_DW_AT(0x09, ordering, 2, DWARF)
HANDLE_DW_AT(0x0b, byte_size, 2, DWARF)
HANDLE_DW_AT(0x0c, bit_offset, 2, DWARF)
HANDLE_DW_AT(0x0d, bit_size, 2, DWARF)
HANDLE_DW_AT(0x10, stmt_list, 2, DWARF)
HANDLE_DW_AT(0x11, low_pc, 2, DWARF)
HANDLE_DW_AT(0x12, high_pc, 2, DWARF)
HANDLE_DW_AT(0x13, language, 2, DWARF)
HANDLE_DW_AT(0x15, discr, 2, DWARF)
HANDLE_DW_AT(0x16, discr_value, 2, DWARF)
HANDLE_DW_AT(0x17, visibility, 2, DWARF)
HANDLE_DW_AT(0x18, import, 2, DWARF)
HANDLE_DW_AT(0x19, string_length, 2, DWARF)
HANDLE_DW_AT(0x1a, common_reference, 2, DWARF)
HANDLE_DW_AT(0x1b, comp_dir, 2, DWARF)
HANDLE_DW_AT(0x1c, const_value, 2, DWARF)
HANDLE_DW_AT(0x1d, containing_type, 2, DWARF)
HANDLE_DW_AT(0x1e, default_value, 2, DWARF)
HANDLE_DW_AT(0x20, inline, 2, DWARF)
HANDLE_DW_AT(0x21, is_optional, 2, DWARF)
HANDLE_DW_AT(0x22, lower_bound, 2, DWARF)
HANDLE_DW_AT(0x25, producer, 2, DWARF)
HANDLE_DW_AT(0x27, prototyped, 2, DWARF)
HANDLE_DW_AT(0x2a, return_addr, 2, DWARF)
HANDLE_DW_AT(0x2c, start_scope, 2, DWARF)
HANDLE_DW_AT(0x2e, bit_stride, 2, DWARF)
HANDLE_DW_AT(0x2f, upper_bound, 2, DWARF)
HANDLE_DW_AT(0x31, abstract_origin, 2, DWARF)
HANDLE_DW_AT(0x32, accessibility, 2, DWARF)
HANDLE_DW_AT(0x33, address_class, 2, DWARF)
HANDLE_DW_AT(0x34, artificial, 2, DWARF)
HANDLE_DW_AT(0x35, base_types, 2, DWARF)
HANDLE_DW_AT(0x36, calling_convention, 2, DWARF)
HANDLE_DW_AT(0x37, count, 2, DWARF)
HANDLE_DW_AT(0x38, data_member_location, 2, DWARF)
HANDLE_DW_AT(0x39, decl_column, 2, DWARF)
HANDLE_DW_AT(0x3a, decl_file, 2, DWARF)
HANDLE_DW_AT(0x3b, decl_line, 2, DWARF)
HANDLE_DW_AT(0x3c, declaration, 2, DWARF)
HANDLE_DW_AT(0x3d, discr_list, 2, DWARF)
HANDLE_DW_AT(0x3e, encoding, 2, DWARF)
HANDLE_DW_AT(0x3f, external, 2, DWARF)
HANDLE_DW_AT(0x40, frame_base, 2, DWARF)
HANDLE_DW_AT(0x41, friend, 2, DWARF)
HANDLE_DW_AT(0x42, identifier_case, 2, DWARF)
HANDLE_DW_AT(0x43, macro_info, 2, DWARF)
HANDLE_DW_AT(0x44, namelist_item, 2, DWARF)
HANDLE_DW_AT(0x45, priority, 2, DWARF)
HANDLE_DW_AT(0x46, segment, 2, DWARF)
HANDLE_DW_AT(0x47, specification, 2, DWARF)
HANDLE_DW_AT(0x48, static_link, 2, DWARF)
HANDLE_DW_AT(0x49, type, 2, DWARF)
HANDLE_DW_AT(0x4a, use_location, 2, DWARF)
HANDLE_DW_AT(0x4b, variable_parameter, 2, DWARF)
HANDLE_DW_AT(0x4c, virtuality, 2, DWARF)
HANDLE_DW_AT(0x4d, vtable_elem_location, 2, DWARF)
// New in DWARF v3:
HANDLE_DW_AT(0x4e, allocated, 3, DWARF)
HANDLE_DW_AT(0x4f, associated, 3, DWARF)
HANDLE_DW_AT(0x50, data_location, 3, DWARF)
HANDLE_DW_AT(0x51, byte_stride, 3, DWARF)
HANDLE_DW_AT(0x52, entry_pc, 3, DWARF)
HANDLE_DW_AT(0x53, use_UTF8, 3, DWARF)
HANDLE_DW_AT(0x54, extension, 3, DWARF)
HANDLE_DW_AT(0x55, ranges, 3, DWARF)
HANDLE_DW_AT(0x56, trampoline, 3, DWARF)
HANDLE_DW_AT(0x57, call_column, 3, DWARF)
HANDLE_DW_AT(0x58, call_file, 3, DWARF)
HANDLE_DW_AT(0x59, call_line, 3, DWARF)
HANDLE_DW_AT(0x5a, description, 3, DWARF)
HANDLE_DW_AT(0x5b, binary_scale, 3, DWARF)
HANDLE_DW_AT(0x5c, decimal_scale, 3, DWARF)
HANDLE_DW_AT(0x5d, small, 3, DWARF)
HANDLE_DW_AT(0x5e, decimal_sign, 3, DWARF)
HANDLE_DW_AT(0x5f, digit_count, 3, DWARF)
HANDLE_DW_AT(0x60, picture_string, 3, DWARF)
HANDLE_DW_AT(0x61, mutable, 3, DWARF)
HANDLE_DW_AT(0x62, threads_scaled, 3, DWARF)
HANDLE_DW_AT(0x63, explicit, 3, DWARF)
HANDLE_DW_AT(0x64, object_pointer, 3, DWARF)
HANDLE_DW_AT(0x65, endianity, 3, DWARF)
HANDLE_DW_AT(0x66, elemental, 3, DWARF)
HANDLE_DW_AT(0x67, pure, 3, DWARF)
HANDLE_DW_AT(0x68, recursive, 3, DWARF)
// New in DWARF v4:
HANDLE_DW_AT(0x69, signature, 4, DWARF)
HANDLE_DW_AT(0x6a, main_subprogram, 4, DWARF)
HANDLE_DW_AT(0x6b, data_bit_offset, 4, DWARF)
HANDLE_DW_AT(0x6c, const_expr, 4, DWARF)
HANDLE_DW_AT(0x6d, enum_class, 4, DWARF)
HANDLE_DW_AT(0x6e, linkage_name, 4, DWARF)
// New in DWARF v5:
HANDLE_DW_AT(0x6f, string_length_bit_size, 5, DWARF)
HANDLE_DW_AT(0x70, string_length_byte_size, 5, DWARF)
HANDLE_DW_AT(0x71, rank, 5, DWARF)
HANDLE_DW_AT(0x72, str_offsets_base, 5, DWARF)
HANDLE_DW_AT(0x73, addr_base, 5, DWARF)
HANDLE_DW_AT(0x74, rnglists_base, 5, DWARF)
HANDLE_DW_AT(0x75, dwo_id, 0, DWARF) ///< Retracted from DWARF v5.
HANDLE_DW_AT(0x76, dwo_name, 5, DWARF)
HANDLE_DW_AT(0x77, reference, 5, DWARF)
HANDLE_DW_AT(0x78, rvalue_reference, 5, DWARF)
HANDLE_DW_AT(0x79, macros, 5, DWARF)
HANDLE_DW_AT(0x7a, call_all_calls, 5, DWARF)
HANDLE_DW_AT(0x7b, call_all_source_calls, 5, DWARF)
HANDLE_DW_AT(0x7c, call_all_tail_calls, 5, DWARF)
HANDLE_DW_AT(0x7d, call_return_pc, 5, DWARF)
HANDLE_DW_AT(0x7e, call_value, 5, DWARF)
HANDLE_DW_AT(0x7f, call_origin, 5, DWARF)
HANDLE_DW_AT(0x80, call_parameter, 5, DWARF)
HANDLE_DW_AT(0x81, call_pc, 5, DWARF)
HANDLE_DW_AT(0x82, call_tail_call, 5, DWARF)
HANDLE_DW_AT(0x83, call_target, 5, DWARF)
HANDLE_DW_AT(0x84, call_target_clobbered, 5, DWARF)
HANDLE_DW_AT(0x85, call_data_location, 5, DWARF)
HANDLE_DW_AT(0x86, call_data_value, 5, DWARF)
HANDLE_DW_AT(0x87, noreturn, 5, DWARF)
HANDLE_DW_AT(0x88, alignment, 5, DWARF)
HANDLE_DW_AT(0x89, export_symbols, 5, DWARF)
HANDLE_DW_AT(0x8a, deleted, 5, DWARF)
HANDLE_DW_AT(0x8b, defaulted, 5, DWARF)
HANDLE_DW_AT(0x8c, loclists_base, 5, DWARF)

// Vendor extensions:
HANDLE_DW_AT(0x806, GHS_namespace_alias, 0, GHS)
HANDLE_DW_AT(0x807, GHS_using_namespace, 0, GHS)
HANDLE_DW_AT(0x808, GHS_using_declaration, 0, GHS)

HANDLE_DW_AT(0x2001, MIPS_fde, 0, MIPS)
HANDLE_DW_AT(0x2002, MIPS_loop_begin, 0, MIPS)
HANDLE_DW_AT(0x2003, MIPS_tail_loop_begin, 0, MIPS)
HANDLE_DW_AT(0x2004, MIPS_epilog_begin, 0, MIPS)
HANDLE_DW_AT(0x2005, MIPS_loop_unroll_factor, 0, MIPS)
HANDLE_DW_AT(0x2006, MIPS_software_pipeline_depth, 0, MIPS)
HANDLE_DW_AT(0x2007, MIPS_linkage_name, 0, MIPS)
// Conflicting:
// HANDLE_DW_AT(0x2007, GHS_mangled, 0, GHS)
HANDLE_DW_AT(0x2008, MIPS_stride, 0, MIPS)
HANDLE_DW_AT(0x2009, MIPS_abstract_name, 0, MIPS)
HANDLE_DW_AT(0x200a, MIPS_clone_origin, 0, MIPS)
HANDLE_DW_AT(0x200b, MIPS_has_inlines, 0, MIPS)
HANDLE_DW_AT(0x200c, MIPS_stride_byte, 0, MIPS)
HANDLE_DW_AT(0x200d, MIPS_stride_elem, 0, MIPS)
HANDLE_DW_AT(0x200e, MIPS_ptr_dopetype, 0, MIPS)
HANDLE_DW_AT(0x200f, MIPS_allocatable_dopetype, 0, MIPS)
HANDLE_DW_AT(0x2010, MIPS_assumed_shape_dopetype, 0, MIPS)

// This one appears to have only been implemented by Open64 for
// fortran and may conflict with other extensions.
HANDLE_DW_AT(0x2011, MIPS_assumed_size, 0, MIPS)

// HP  0x2001-0x2011 conflict with MIPS
// HANDLE_DW_AT(0x2001, HP_unmodifiable, 0, HP)
// HANDLE_DW_AT(0x2005, HP_prologue, 0, HP)
// HANDLE_DW_AT(0x2008, HP_epilogue, 0, HP)
// HANDLE_DW_AT(0x2010, HP_actuals_stmt_list, 0, HP)
// HANDLE_DW_AT(0x2011, HP_proc_per_section, 0, HP)

HANDLE_DW_AT(0x2012, HP_raw_data_ptr, 0, HP)
HANDLE_DW_AT(0x2013, HP_pass_by_reference, 0, HP)
HANDLE_DW_AT(0x2014, HP_opt_level, 0, HP)
HANDLE_DW_AT(0x2015, HP_prof_version_id, 0, HP)
HANDLE_DW_AT(0x2016, HP_opt_flags, 0, HP)
HANDLE_DW_AT(0x2017, HP_cold_region_low_pc, 0, HP)
HANDLE_DW_AT(0x2018, HP_cold_region_high_pc, 0, HP)
HANDLE_DW_AT(0x2019, HP_all_variables_modifiable, 0, HP)
HANDLE_DW_AT(0x201a, HP_linkage_name, 0, HP)
HANDLE_DW_AT(0x201b, HP_prof_flags, 0, HP)
HANDLE_DW_AT(0x201f, HP_unit_name, 0, HP)
HANDLE_DW_AT(0x2020, HP_unit_size, 0, HP)
HANDLE_DW_AT(0x2021, HP_widened_byte_size, 0, HP)
HANDLE_DW_AT(0x2022, HP_definition_points, 0, HP)
HANDLE_DW_AT(0x2023, HP_default_location, 0, HP)
HANDLE_DW_AT(0x2029, HP_is_result_param, 0, HP)

// COMPAQ/HP Conflicts with MIPS/HP  0x2001 - 0x2005
// HANDLE_DW_AT(0x2001, CPQ_discontig_ranges, 0, COMPAQ)
// HANDLE_DW_AT(0x2002, CPQ_semantic_events, 0, COMPAQ)
// HANDLE_DW_AT(0x2003, CPQ_split_lifetimes_var, 0, COMPAQ)
// HANDLE_DW_AT(0x2004, CPQ_split_lifetimes_rtn, 0, COMPAQ)
// HANDLE_DW_AT(0x2005, CPQ_prologue_length, 0, COMPAQ)

HANDLE_DW_AT(0x2026, DW_AT_INTEL_other_endian, 0, INTEL)

// Green Hills.
HANDLE_DW_AT(0x2083, GHS_rsm, 0, GHS)
HANDLE_DW_AT(0x2085, GHS_frsm, 0, GHS)
HANDLE_DW_AT(0x2086, GHS_frames, 0, GHS)
HANDLE_DW_AT(0x2087, GHS_rso, 0, GHS)
HANDLE_DW_AT(0x2092, GHS_subcpu, 0, GHS)
HANDLE_DW_AT(0x2093, GHS_lbrace_line, 0, GHS)

// GNU extensions
HANDLE_DW_AT(0x2101, sf_names, 0, GNU)
HANDLE_DW_AT(0x2102, src_info, 0, GNU)
HANDLE_DW_AT(0x2103, mac_info, 0, GNU)
HANDLE_DW_AT(0x2104, src_coords, 0, GNU)
HANDLE_DW_AT(0x2105, body_begin, 0, GNU)
HANDLE_DW_AT(0x2106, body_end, 0, GNU)
HANDLE_DW_AT(0x2107, GNU_vector, 0, GNU)
HANDLE_DW_AT(0x210f, GNU_odr_signature, 0, GNU)
HANDLE_DW_AT(0x2110, GNU_template_name, 0, GNU)
HANDLE_DW_AT(0x2111, GNU_call_site_value, 0, GNU)
HANDLE_DW_AT(0x2112, GNU_call_site_data_value, 0, GNU)
HANDLE_DW_AT(0x2113, GNU_call_site_target, 0, GNU)
HANDLE_DW_AT(0x2114, GNU_call_site_target_clobbered, 0, GNU)
HANDLE_DW_AT(0x2115, GNU_tail_call, 0, GNU)
HANDLE_DW_AT(0x2116, GNU_all_tail_call_sites, 0, GNU)
HANDLE_DW_AT(0x2117, GNU_all_call_sites, 0, GNU)
HANDLE_DW_AT(0x2118, GNU_all_source_call_sites, 0, GNU)
HANDLE_DW_AT(0x2119, GNU_macros, 0, GNU)
HANDLE_DW_AT(0x211a, GNU_deleted, 0, GNU)
// Extensions for Fission proposal.
HANDLE_DW_AT(0x2130, GNU_dwo_name, 0, GNU)
HANDLE_DW_AT(0x2131, GNU_dwo_id, 0, GNU)
HANDLE_DW_AT(0x2132, GNU_ranges_base, 0, GNU)
HANDLE_DW_AT(0x2133, GNU_addr_base, 0, GNU)
HANDLE_DW_AT(0x2134, GNU_pubnames, 0, GNU)
HANDLE_DW_AT(0x2135, GNU_pubtypes, 0, GNU)
HANDLE_DW_AT(0x2136, GNU_discriminator, 0, GNU)
HANDLE_DW_AT(0x2137, GNU_locviews, 0, GNU)
HANDLE_DW_AT(0x2138, GNU_entry_view, 0, GNU)

HANDLE_DW_AT(0x2201, SUN_template, 0, SUN)
// Conflicting:
// HANDLE_DW_AT(0x2201, VMS_rtnbeg_pd_address);

HANDLE_DW_AT(0x2202, SUN_alignment, 0, SUN)
HANDLE_DW_AT(0x2203, SUN_vtable, 0, SUN)
HANDLE_DW_AT(0x2204, SUN_count_guarantee, 0, SUN)
HANDLE_DW_AT(0x2205, SUN_command_line, 0, SUN)
HANDLE_DW_AT(0x2206, SUN_vbase, 0, SUN)
HANDLE_DW_AT(0x2207, SUN_compile_options, 0, SUN)
HANDLE_DW_AT(0x2208, SUN_language, 0, SUN)
HANDLE_DW_AT(0x2209, SUN_browser_file, 0, SUN)
HANDLE_DW_AT(0x2210, SUN_vtable_abi, 0, SUN)
HANDLE_DW_AT(0x2211, SUN_func_offsets, 0, SUN)
HANDLE_DW_AT(0x2212, SUN_cf_kind, 0, SUN)
HANDLE_DW_AT(0x2213, SUN_vtable_index, 0, SUN)
HANDLE_DW_AT(0x2214, SUN_omp_tpriv_addr, 0, SUN)
HANDLE_DW_AT(0x2215, SUN_omp_child_func, 0, SUN)
HANDLE_DW_AT(0x2216, SUN_func_offset, 0, SUN)
HANDLE_DW_AT(0x2217, SUN_memop_type_ref, 0, SUN)
HANDLE_DW_AT(0x2218, SUN_profile_id, 0, SUN)
HANDLE_DW_AT(0x2219, SUN_memop_signature, 0, SUN)

HANDLE_DW_AT(0x2220, SUN_obj_dir, 0, SUN)
HANDLE_DW_AT(0x2221, SUN_obj_file, 0, SUN)
HANDLE_DW_AT(0x2222, SUN_original_name, 0, SUN)
HANDLE_DW_AT(0x2223, SUN_hwcprof_signature, 0, SUN)
HANDLE_DW_AT(0x2224, SUN_amd64_parmdump, 0, SUN)
HANDLE_DW_AT(0x2225, SUN_part_link_name, 0, SUN)
HANDLE_DW_AT(0x2226, SUN_link_name, 0, SUN)
HANDLE_DW_AT(0x2227, SUN_pass_with_const, 0, SUN)
HANDLE_DW_AT(0x2228, SUN_return_with_const, 0, SUN)
HANDLE_DW_AT(0x2229, SUN_import_by_name, 0, SUN)
HANDLE_DW_AT(0x222a, SUN_90_pointer, 0, SUN)
HANDLE_DW_AT(0x222b, SUN_pass_by_ref, 0, SUN)
HANDLE_DW_AT(0x222c, SUN_f90_allocatable, 0, SUN)
HANDLE_DW_AT(0x222d, SUN_f90_assumed_shape_array, 0, SUN)
HANDLE_DW_AT(0x222e, SUN_c_vla, 0, SUN)
HANDLE_DW_AT(0x2230, SUN_return_value_ptr, 0, SUN)
HANDLE_DW_AT(0x2231, SUN_dtor_start, 0, SUN)
HANDLE_DW_AT(0x2232, SUN_dtor_length, 0, SUN)
HANDLE_DW_AT(0x2233, SUN_dtor_state_initial, 0, SUN)
HANDLE_DW_AT(0x2234, SUN_dtor_state_final, 0, SUN)
HANDLE_DW_AT(0x2235, SUN_dtor_state_deltas, 0, SUN)
HANDLE_DW_AT(0x2236, SUN_import_by_lname, 0, SUN)
HANDLE_DW_AT(0x2237, SUN_f90_use_only, 0, SUN)
HANDLE_DW_AT(0x2238, SUN_namelist_spec, 0, SUN)
HANDLE_DW_AT(0x2239, SUN_is_omp_child_func, 0, SUN)
HANDLE_DW_AT(0x223a, SUN_fortran_main_alias, 0, SUN)
HANDLE_DW_AT(0x223b, SUN_fortran_based, 0, SUN)

HANDLE_DW_AT(0x2300, ALTIUM_loclist, 0, ALTIUM)

HANDLE_DW_AT(0x2301, use_GNAT_descriptive_type, 0, GNU)
HANDLE_DW_AT(0x2302, GNAT_descriptive_type, 0, GNU)
HANDLE_DW_AT(0x2303, GNU_numerator, 0, GNU)
HANDLE_DW_AT(0x2304, GNU_denominator, 0, GNU)
HANDLE_DW_AT(0x2305, GNU_bias, 0, GNU)

HANDLE_DW_AT(0x2900, GO_kind, 0, GO)
HANDLE_DW_AT(0x2901, GO_key, 0, GO)
HANDLE_DW_AT(0x2902, GO_elem, 0, GO)
HANDLE_DW_AT(0x2903, GO_embedded_field, 0, GO)
HANDLE_DW_AT(0x2904, GO_runtime_type, 0, GO)

HANDLE_DW_AT(0x3210, UPC_threads_scaled, 0, UPC)

HANDLE_DW_AT(0x393e, IBM_wsa_addr, 0, IBM)
HANDLE_DW_AT(0x393f, IBM_home_location, 0, IBM)
HANDLE_DW_AT(0x3940, IBM_alt_srcview, 0, IBM)

// PGI extensions (STMicroelectronics)
HANDLE_DW_AT(0x3a00, PGI_lbase, 0, PGI)
HANDLE_DW_AT(0x3a01, PGI_soffset, 0, PGI)
HANDLE_DW_AT(0x3a02, PGI_lstride, 0, PGI)

// Borland extensions.
HANDLE_DW_AT(0x3b11, BORLAND_property_read, 0, BORLAND)
HANDLE_DW_AT(0x3b12, BORLAND_property_write, 0, BORLAND)
HANDLE_DW_AT(0x3b13, BORLAND_property_implements, 0, BORLAND)
HANDLE_DW_AT(0x3b14, BORLAND_property_index, 0, BORLAND)
HANDLE_DW_AT(0x3b15, BORLAND_property_default, 0, BORLAND)
HANDLE_DW_AT(0x3b20, BORLAND_Delphi_unit, 0, BORLAND)
HANDLE_DW_AT(0x3b21, BORLAND_Delphi_class, 0, BORLAND)
HANDLE_DW_AT(0x3b22, BORLAND_Delphi_record, 0, BORLAND)
HANDLE_DW_AT(0x3b23, BORLAND_Delphi_metaclass, 0, BORLAND)
HANDLE_DW_AT(0x3b24, BORLAND_Delphi_constructor, 0, BORLAND)
HANDLE_DW_AT(0x3b25, BORLAND_Delphi_destructor, 0, BORLAND)
HANDLE_DW_AT(0x3b26, BORLAND_Delphi_anonymous_method, 0, BORLAND)
HANDLE_DW_AT(0x3b27, BORLAND_Delphi_interface, 0, BORLAND)
HANDLE_DW_AT(0x3b28, BORLAND_Delphi_ABI, 0, BORLAND)
HANDLE_DW_AT(0x3b29, BORLAND_Delphi_return, 0, BORLAND)
HANDLE_DW_AT(0x3b30, BORLAND_Delphi_frameptr, 0, BORLAND)
HANDLE_DW_AT(0x3b31, BORLAND_closure, 0, BORLAND)
// LLVM project extensions.
HANDLE_DW_AT(0x3e00, LLVM_include_path, 0, LLVM)
HANDLE_DW_AT(0x3e01, LLVM_config_macros, 0, LLVM)
HANDLE_DW_AT(0x3e02, LLVM_sysroot, 0, LLVM)
HANDLE_DW_AT(0x3e03, LLVM_tag_offset, 0, LLVM)
HANDLE_DW_AT(0x3e04, LLVM_ptrauth_key, 0, LLVM)
HANDLE_DW_AT(0x3e05, LLVM_ptrauth_address_discriminated, 0, LLVM)
HANDLE_DW_AT(0x3e06, LLVM_ptrauth_extra_discriminator, 0, LLVM)
HANDLE_DW_AT(0x3e07, LLVM_apinotes, 0, APPLE)
HANDLE_DW_AT(0x3e08, LLVM_ptrauth_isa_pointer, 0, LLVM)
HANDLE_DW_AT(0x3e09, LLVM_ptrauth_authenticates_null_values, 0, LLVM)

// Apple extensions.

HANDLE_DW_AT(0x3fe1, APPLE_optimized, 0, APPLE)
HANDLE_DW_AT(0x3fe2, APPLE_flags, 0, APPLE)
HANDLE_DW_AT(0x3fe3, APPLE_isa, 0, APPLE)
HANDLE_DW_AT(0x3fe4, APPLE_block, 0, APPLE)
HANDLE_DW_AT(0x3fe5, APPLE_major_runtime_vers, 0, APPLE)
HANDLE_DW_AT(0x3fe6, APPLE_runtime_class, 0, APPLE)
HANDLE_DW_AT(0x3fe7, APPLE_omit_frame_ptr, 0, APPLE)
HANDLE_DW_AT(0x3fe8, APPLE_property_name, 0, APPLE)
HANDLE_DW_AT(0x3fe9, APPLE_property_getter, 0, APPLE)
HANDLE_DW_AT(0x3fea, APPLE_property_setter, 0, APPLE)
HANDLE_DW_AT(0x3feb, APPLE_property_attribute, 0, APPLE)
HANDLE_DW_AT(0x3fec, APPLE_objc_complete_type, 0, APPLE)
HANDLE_DW_AT(0x3fed, APPLE_property, 0, APPLE)
HANDLE_DW_AT(0x3fee, APPLE_objc_direct, 0, APPLE)
HANDLE_DW_AT(0x3fef, APPLE_sdk, 0, APPLE)

// Attribute form encodings.
HANDLE_DW_FORM(0x01, addr, 2, DWARF)
HANDLE_DW_FORM(0x03, block2, 2, DWARF)
HANDLE_DW_FORM(0x04, block4, 2, DWARF)
HANDLE_DW_FORM(0x05, data2, 2, DWARF)
HANDLE_DW_FORM(0x06, data4, 2, DWARF)
HANDLE_DW_FORM(0x07, data8, 2, DWARF)
HANDLE_DW_FORM(0x08, string, 2, DWARF)
HANDLE_DW_FORM(0x09, block, 2, DWARF)
HANDLE_DW_FORM(0x0a, block1, 2, DWARF)
HANDLE_DW_FORM(0x0b, data1, 2, DWARF)
HANDLE_DW_FORM(0x0c, flag, 2, DWARF)
HANDLE_DW_FORM(0x0d, sdata, 2, DWARF)
HANDLE_DW_FORM(0x0e, strp, 2, DWARF)
HANDLE_DW_FORM(0x0f, udata, 2, DWARF)
HANDLE_DW_FORM(0x10, ref_addr, 2, DWARF)
HANDLE_DW_FORM(0x11, ref1, 2, DWARF)
HANDLE_DW_FORM(0x12, ref2, 2, DWARF)
HANDLE_DW_FORM(0x13, ref4, 2, DWARF)
HANDLE_DW_FORM(0x14, ref8, 2, DWARF)
HANDLE_DW_FORM(0x15, ref_udata, 2, DWARF)
HANDLE_DW_FORM(0x16, indirect, 2, DWARF)
// New in DWARF v4:
HANDLE_DW_FORM(0x17, sec_offset, 4, DWARF)
HANDLE_DW_FORM(0x18, exprloc, 4, DWARF)
HANDLE_DW_FORM(0x19, flag_present, 4, DWARF)
// This was defined out of sequence.
HANDLE_DW_FORM(0x20, ref_sig8, 4, DWARF)
// New in DWARF v5:
HANDLE_DW_FORM(0x1a, strx, 5, DWARF)
HANDLE_DW_FORM(0x1b, addrx, 5, DWARF)
HANDLE_DW_FORM(0x1c, ref_sup4, 5, DWARF)
HANDLE_DW_FORM(0x1d, strp_sup, 5, DWARF)
HANDLE_DW_FORM(0x1e, data16, 5, DWARF)
HANDLE_DW_FORM(0x1f, line_strp, 5, DWARF)
HANDLE_DW_FORM(0x21, implicit_const, 5, DWARF)
HANDLE_DW_FORM(0x22, loclistx, 5, DWARF)
HANDLE_DW_FORM(0x23, rnglistx, 5, DWARF)
HANDLE_DW_FORM(0x24, ref_sup8, 5, DWARF)
HANDLE_DW_FORM(0x25, strx1, 5, DWARF)
HANDLE_DW_FORM(0x26, strx2, 5, DWARF)
HANDLE_DW_FORM(0x27, strx3, 5, DWARF)
HANDLE_DW_FORM(0x28, strx4, 5, DWARF)
HANDLE_DW_FORM(0x29, addrx1, 5, DWARF)
HANDLE_DW_FORM(0x2a, addrx2, 5, DWARF)
HANDLE_DW_FORM(0x2b, addrx3, 5, DWARF)
HANDLE_DW_FORM(0x2c, addrx4, 5, DWARF)
// Extensions for Fission proposal
HANDLE_DW_FORM(0x1f01, GNU_addr_index, 0, GNU)
HANDLE_DW_FORM(0x1f02, GNU_str_index, 0, GNU)
// Alternate debug sections proposal (output of "dwz" tool).
HANDLE_DW_FORM(0x1f20, GNU_ref_alt, 0, GNU)
HANDLE_DW_FORM(0x1f21, GNU_strp_alt, 0, GNU)
// LLVM addr+offset extension
HANDLE_DW_FORM(0x2001, LLVM_addrx_offset, 0, LLVM)

// DWARF Expression operators.
HANDLE_DW_OP(0x03, addr, 2, DWARF)
HANDLE_DW_OP(0x06, deref, 2, DWARF)
HANDLE_DW_OP(0x08, const1u, 2, DWARF)
HANDLE_DW_OP(0x09, const1s, 2, DWARF)
HANDLE_DW_OP(0x0a, const2u, 2, DWARF)
HANDLE_DW_OP(0x0b, const2s, 2, DWARF)
HANDLE_DW_OP(0x0c, const4u, 2, DWARF)
HANDLE_DW_OP(0x0d, const4s, 2, DWARF)
HANDLE_DW_OP(0x0e, const8u, 2, DWARF)
HANDLE_DW_OP(0x0f, const8s, 2, DWARF)
HANDLE_DW_OP(0x10, constu, 2, DWARF)
HANDLE_DW_OP(0x11, consts, 2, DWARF)
HANDLE_DW_OP(0x12, dup, 2, DWARF)
HANDLE_DW_OP(0x13, drop, 2, DWARF)
HANDLE_DW_OP(0x14, over, 2, DWARF)
HANDLE_DW_OP(0x15, pick, 2, DWARF)
HANDLE_DW_OP(0x16, swap, 2, DWARF)
HANDLE_DW_OP(0x17, rot, 2, DWARF)
HANDLE_DW_OP(0x18, xderef, 2, DWARF)
HANDLE_DW_OP(0x19, abs, 2, DWARF)
HANDLE_DW_OP(0x1a, and, 2, DWARF)
HANDLE_DW_OP(0x1b, div, 2, DWARF)
HANDLE_DW_OP(0x1c, minus, 2, DWARF)
HANDLE_DW_OP(0x1d, mod, 2, DWARF)
HANDLE_DW_OP(0x1e, mul, 2, DWARF)
HANDLE_DW_OP(0x1f, neg, 2, DWARF)
HANDLE_DW_OP(0x20, not, 2, DWARF)
HANDLE_DW_OP(0x21, or, 2, DWARF)
HANDLE_DW_OP(0x22, plus, 2, DWARF)
HANDLE_DW_OP(0x23, plus_uconst, 2, DWARF)
HANDLE_DW_OP(0x24, shl, 2, DWARF)
HANDLE_DW_OP(0x25, shr, 2, DWARF)
HANDLE_DW_OP(0x26, shra, 2, DWARF)
HANDLE_DW_OP(0x27, xor, 2, DWARF)
HANDLE_DW_OP(0x28, bra, 2, DWARF)
HANDLE_DW_OP(0x29, eq, 2, DWARF)
HANDLE_DW_OP(0x2a, ge, 2, DWARF)
HANDLE_DW_OP(0x2b, gt, 2, DWARF)
HANDLE_DW_OP(0x2c, le, 2, DWARF)
HANDLE_DW_OP(0x2d, lt, 2, DWARF)
HANDLE_DW_OP(0x2e, ne, 2, DWARF)
HANDLE_DW_OP(0x2f, skip, 2, DWARF)
HANDLE_DW_OP(0x30, lit0, 2, DWARF)
HANDLE_DW_OP(0x31, lit1, 2, DWARF)
HANDLE_DW_OP(0x32, lit2, 2, DWARF)
HANDLE_DW_OP(0x33, lit3, 2, DWARF)
HANDLE_DW_OP(0x34, lit4, 2, DWARF)
HANDLE_DW_OP(0x35, lit5, 2, DWARF)
HANDLE_DW_OP(0x36, lit6, 2, DWARF)
HANDLE_DW_OP(0x37, lit7, 2, DWARF)
HANDLE_DW_OP(0x38, lit8, 2, DWARF)
HANDLE_DW_OP(0x39, lit9, 2, DWARF)
HANDLE_DW_OP(0x3a, lit10, 2, DWARF)
HANDLE_DW_OP(0x3b, lit11, 2, DWARF)
HANDLE_DW_OP(0x3c, lit12, 2, DWARF)
HANDLE_DW_OP(0x3d, lit13, 2, DWARF)
HANDLE_DW_OP(0x3e, lit14, 2, DWARF)
HANDLE_DW_OP(0x3f, lit15, 2, DWARF)
HANDLE_DW_OP(0x40, lit16, 2, DWARF)
HANDLE_DW_OP(0x41, lit17, 2, DWARF)
HANDLE_DW_OP(0x42, lit18, 2, DWARF)
HANDLE_DW_OP(0x43, lit19, 2, DWARF)
HANDLE_DW_OP(0x44, lit20, 2, DWARF)
HANDLE_DW_OP(0x45, lit21, 2, DWARF)
HANDLE_DW_OP(0x46, lit22, 2, DWARF)
HANDLE_DW_OP(0x47, lit23, 2, DWARF)
HANDLE_DW_OP(0x48, lit24, 2, DWARF)
HANDLE_DW_OP(0x49, lit25, 2, DWARF)
HANDLE_DW_OP(0x4a, lit26, 2, DWARF)
HANDLE_DW_OP(0x4b, lit27, 2, DWARF)
HANDLE_DW_OP(0x4c, lit28, 2, DWARF)
HANDLE_DW_OP(0x4d, lit29, 2, DWARF)
HANDLE_DW_OP(0x4e, lit30, 2, DWARF)
HANDLE_DW_OP(0x4f, lit31, 2, DWARF)
HANDLE_DW_OP(0x50, reg0, 2, DWARF)
HANDLE_DW_OP(0x51, reg1, 2, DWARF)
HANDLE_DW_OP(0x52, reg2, 2, DWARF)
HANDLE_DW_OP(0x53, reg3, 2, DWARF)
HANDLE_DW_OP(0x54, reg4, 2, DWARF)
HANDLE_DW_OP(0x55, reg5, 2, DWARF)
HANDLE_DW_OP(0x56, reg6, 2, DWARF)
HANDLE_DW_OP(0x57, reg7, 2, DWARF)
HANDLE_DW_OP(0x58, reg8, 2, DWARF)
HANDLE_DW_OP(0x59, reg9, 2, DWARF)
HANDLE_DW_OP(0x5a, reg10, 2, DWARF)
HANDLE_DW_OP(0x5b, reg11, 2, DWARF)
HANDLE_DW_OP(0x5c, reg12, 2, DWARF)
HANDLE_DW_OP(0x5d, reg13, 2, DWARF)
HANDLE_DW_OP(0x5e, reg14, 2, DWARF)
HANDLE_DW_OP(0x5f, reg15, 2, DWARF)
HANDLE_DW_OP(0x60, reg16, 2, DWARF)
HANDLE_DW_OP(0x61, reg17, 2, DWARF)
HANDLE_DW_OP(0x62, reg18, 2, DWARF)
HANDLE_DW_OP(0x63, reg19, 2, DWARF)
HANDLE_DW_OP(0x64, reg20, 2, DWARF)
HANDLE_DW_OP(0x65, reg21, 2, DWARF)
HANDLE_DW_OP(0x66, reg22, 2, DWARF)
HANDLE_DW_OP(0x67, reg23, 2, DWARF)
HANDLE_DW_OP(0x68, reg24, 2, DWARF)
HANDLE_DW_OP(0x69, reg25, 2, DWARF)
HANDLE_DW_OP(0x6a, reg26, 2, DWARF)
HANDLE_DW_OP(0x6b, reg27, 2, DWARF)
HANDLE_DW_OP(0x6c, reg28, 2, DWARF)
HANDLE_DW_OP(0x6d, reg29, 2, DWARF)
HANDLE_DW_OP(0x6e, reg30, 2, DWARF)
HANDLE_DW_OP(0x6f, reg31, 2, DWARF)
HANDLE_DW_OP(0x70, breg0, 2, DWARF)
HANDLE_DW_OP(0x71, breg1, 2, DWARF)
HANDLE_DW_OP(0x72, breg2, 2, DWARF)
HANDLE_DW_OP(0x73, breg3, 2, DWARF)
HANDLE_DW_OP(0x74, breg4, 2, DWARF)
HANDLE_DW_OP(0x75, breg5, 2, DWARF)
HANDLE_DW_OP(0x76, breg6, 2, DWARF)
HANDLE_DW_OP(0x77, breg7, 2, DWARF)
HANDLE_DW_OP(0x78, breg8, 2, DWARF)
HANDLE_DW_OP(0x79, breg9, 2, DWARF)
HANDLE_DW_OP(0x7a, breg10, 2, DWARF)
HANDLE_DW_OP(0x7b, breg11, 2, DWARF)
HANDLE_DW_OP(0x7c, breg12, 2, DWARF)
HANDLE_DW_OP(0x7d, breg13, 2, DWARF)
HANDLE_DW_OP(0x7e, breg14, 2, DWARF)
HANDLE_DW_OP(0x7f, breg15, 2, DWARF)
HANDLE_DW_OP(0x80, breg16, 2, DWARF)
HANDLE_DW_OP(0x81, breg17, 2, DWARF)
HANDLE_DW_OP(0x82, breg18, 2, DWARF)
HANDLE_DW_OP(0x83, breg19, 2, DWARF)
HANDLE_DW_OP(0x84, breg20, 2, DWARF)
HANDLE_DW_OP(0x85, breg21, 2, DWARF)
HANDLE_DW_OP(0x86, breg22, 2, DWARF)
HANDLE_DW_OP(0x87, breg23, 2, DWARF)
HANDLE_DW_OP(0x88, breg24, 2, DWARF)
HANDLE_DW_OP(0x89, breg25, 2, DWARF)
HANDLE_DW_OP(0x8a, breg26, 2, DWARF)
HANDLE_DW_OP(0x8b, breg27, 2, DWARF)
HANDLE_DW_OP(0x8c, breg28, 2, DWARF)
HANDLE_DW_OP(0x8d, breg29, 2, DWARF)
HANDLE_DW_OP(0x8e, breg30, 2, DWARF)
HANDLE_DW_OP(0x8f, breg31, 2, DWARF)
HANDLE_DW_OP(0x90, regx, 2, DWARF)
HANDLE_DW_OP(0x91, fbreg, 2, DWARF)
HANDLE_DW_OP(0x92, bregx, 2, DWARF)
HANDLE_DW_OP(0x93, piece, 2, DWARF)
HANDLE_DW_OP(0x94, deref_size, 2, DWARF)
HANDLE_DW_OP(0x95, xderef_size, 2, DWARF)
HANDLE_DW_OP(0x96, nop, 2, DWARF)
// New in DWARF v3:
HANDLE_DW_OP(0x97, push_object_address, 3, DWARF)
HANDLE_DW_OP(0x98, call2, 3, DWARF)
HANDLE_DW_OP(0x99, call4, 3, DWARF)
HANDLE_DW_OP(0x9a, call_ref, 3, DWARF)
HANDLE_DW_OP(0x9b, form_tls_address, 3, DWARF)
HANDLE_DW_OP(0x9c, call_frame_cfa, 3, DWARF)
HANDLE_DW_OP(0x9d, bit_piece, 3, DWARF)
// New in DWARF v4:
HANDLE_DW_OP(0x9e, implicit_value, 4, DWARF)
HANDLE_DW_OP(0x9f, stack_value, 4, DWARF)
// New in DWARF v5:
HANDLE_DW_OP(0xa0, implicit_pointer, 5, DWARF)
HANDLE_DW_OP(0xa1, addrx, 5, DWARF)
HANDLE_DW_OP(0xa2, constx, 5, DWARF)
HANDLE_DW_OP(0xa3, entry_value, 5, DWARF)
HANDLE_DW_OP(0xa4, const_type, 5, DWARF)
HANDLE_DW_OP(0xa5, regval_type, 5, DWARF)
HANDLE_DW_OP(0xa6, deref_type, 5, DWARF)
HANDLE_DW_OP(0xa7, xderef_type, 5, DWARF)
HANDLE_DW_OP(0xa8, convert, 5, DWARF)
HANDLE_DW_OP(0xa9, reinterpret, 5, DWARF)
// Vendor extensions:
// Extensions for GNU-style thread-local storage.
HANDLE_DW_OP(0xe0, GNU_push_tls_address, 0, GNU)
// Conflicting:
// HANDLE_DW_OP(0xe0, HP_unknown, 0, HP)
HANDLE_DW_OP(0xe1, HP_is_value, 0, HP)
HANDLE_DW_OP(0xe2, HP_fltconst4, 0, HP)
HANDLE_DW_OP(0xe3, HP_fltconst8, 0, HP)
HANDLE_DW_OP(0xe4, HP_mod_range, 0, HP)
HANDLE_DW_OP(0xe5, HP_unmod_range, 0, HP)
HANDLE_DW_OP(0xe6, HP_tls, 0, HP)
HANDLE_DW_OP(0xe8, INTEL_bit_piece, 0, INTEL)

// Extensions for WebAssembly.
HANDLE_DW_OP(0xed, WASM_location, 0, WASM)
HANDLE_DW_OP(0xee, WASM_location_int, 0, WASM)
// Historic and not implemented in LLVM.
HANDLE_DW_OP(0xf0, APPLE_uninit, 0, APPLE)
// The GNU entry value extension.
HANDLE_DW_OP(0xf3, GNU_entry_value, 0, GNU)
HANDLE_DW_OP(0xf8, PGI_omp_thread_num, 0, PGI)
// Extensions for Fission proposal.
HANDLE_DW_OP(0xfb, GNU_addr_index, 0, GNU)
HANDLE_DW_OP(0xfc, GNU_const_index, 0, GNU)

// DW_OP_LLVM_user has two operands:
//   (1) An unsigned LEB128 "LLVM Vendor Extension Opcode".
//   (2) Zero or more literal operands, the number and type of which are
//       implied by the opcode (1).
// DW_OP_LLVM_user acts as an extension multiplexer, opening up the encoding
// space to accommodate an infinite number of extensions. This better reflects
// the de-facto permanent allocation of extensions.
HANDLE_DW_OP(0xe9, LLVM_user, 0, LLVM)
// "LLVM Vendor Extension" operations under the DW_OP_LLVM_user encoding
// scheme. This list is authoritative and exhaustive. Once an operation is
// registered here it cannot be removed nor have its encoding changed. The
// encoding space must skip zero (which is reserved) and have no gaps.
//
// The DW_OP_LLVM_user DW_OP_LLVM_nop operation has no effect on the
// location stack or any of its values. It is defined as a placeholder for
// testing purposes.
HANDLE_DW_OP_LLVM_USEROP(0x0001, nop)

// DWARF languages.
HANDLE_DW_LANG(0x0001, C89, 0, 2, DWARF)
HANDLE_DW_LANG(0x0002, C, 0, 2, DWARF)
HANDLE_DW_LANG(0x0003, Ada83, 1, 2, DWARF)
HANDLE_DW_LANG(0x0004, C_plus_plus, 0, 2, DWARF)
HANDLE_DW_LANG(0x0005, Cobol74, 1, 2, DWARF)
HANDLE_DW_LANG(0x0006, Cobol85, 1, 2, DWARF)
HANDLE_DW_LANG(0x0007, Fortran77, 1, 2, DWARF)
HANDLE_DW_LANG(0x0008, Fortran90, 1, 2, DWARF)
HANDLE_DW_LANG(0x0009, Pascal83, 1, 2, DWARF)
HANDLE_DW_LANG(0x000a, Modula2, 1, 2, DWARF)
// New in DWARF v3:
HANDLE_DW_LANG(0x000b, Java, 0, 3, DWARF)
HANDLE_DW_LANG(0x000c, C99, 0, 3, DWARF)
HANDLE_DW_LANG(0x000d, Ada95, 1, 3, DWARF)
HANDLE_DW_LANG(0x000e, Fortran95, 1, 3, DWARF)
HANDLE_DW_LANG(0x000f, PLI, 1, 3, DWARF)
HANDLE_DW_LANG(0x0010, ObjC, 0, 3, DWARF)
HANDLE_DW_LANG(0x0011, ObjC_plus_plus, 0, 3, DWARF)
HANDLE_DW_LANG(0x0012, UPC, 0, 3, DWARF)
HANDLE_DW_LANG(0x0013, D, 0, 3, DWARF)
// New in DWARF v4:
HANDLE_DW_LANG(0x0014, Python, 0, 4, DWARF)
// New in DWARF v5:
HANDLE_DW_LANG(0x0015, OpenCL, 0, 5, DWARF)
HANDLE_DW_LANG(0x0016, Go, 0, 5, DWARF)
HANDLE_DW_LANG(0x0017, Modula3, 1, 5, DWARF)
HANDLE_DW_LANG(0x0018, Haskell, 0, 5, DWARF)
HANDLE_DW_LANG(0x0019, C_plus_plus_03, 0, 5, DWARF)
HANDLE_DW_LANG(0x001a, C_plus_plus_11, 0, 5, DWARF)
HANDLE_DW_LANG(0x001b, OCaml, 0, 5, DWARF)
HANDLE_DW_LANG(0x001c, Rust, 0, 5, DWARF)
HANDLE_DW_LANG(0x001d, C11, 0, 5, DWARF)
HANDLE_DW_LANG(0x001e, Swift, 0, 5, DWARF)
HANDLE_DW_LANG(0x001f, Julia, 1, 5, DWARF)
HANDLE_DW_LANG(0x0020, Dylan, 0, 5, DWARF)
HANDLE_DW_LANG(0x0021, C_plus_plus_14, 0, 5, DWARF)
HANDLE_DW_LANG(0x0022, Fortran03, 1, 5, DWARF)
HANDLE_DW_LANG(0x0023, Fortran08, 1, 5, DWARF)
HANDLE_DW_LANG(0x0024, RenderScript, 0, 5, DWARF)
HANDLE_DW_LANG(0x0025, BLISS, 0, 5, DWARF)
// New since DWARF v5:
HANDLE_DW_LANG(0x0026, Kotlin, 0, 0, DWARF)
HANDLE_DW_LANG(0x0027, Zig, 0, 0, DWARF)
HANDLE_DW_LANG(0x0028, Crystal, 0, 0, DWARF)
HANDLE_DW_LANG(0x002a, C_plus_plus_17, 0, 0, DWARF)
HANDLE_DW_LANG(0x002b, C_plus_plus_20, 0, 0, DWARF)
HANDLE_DW_LANG(0x002c, C17, 0, 0, DWARF)
HANDLE_DW_LANG(0x002d, Fortran18, 0, 0, DWARF)
HANDLE_DW_LANG(0x002e, Ada2005, 0, 0, DWARF)
HANDLE_DW_LANG(0x002f, Ada2012, 0, 0, DWARF)
HANDLE_DW_LANG(0x0033, Mojo, 0, 0, DWARF)
// Vendor extensions:
HANDLE_DW_LANG(0x8001, Mips_Assembler, std::nullopt, 0, MIPS)
HANDLE_DW_LANG(0x8e57, GOOGLE_RenderScript, 0, 0, GOOGLE)
HANDLE_DW_LANG(0xb000, BORLAND_Delphi, 0, 0, BORLAND)


// DWARF attribute type encodings.
HANDLE_DW_ATE(0x01, address, 2, DWARF)
HANDLE_DW_ATE(0x02, boolean, 2, DWARF)
HANDLE_DW_ATE(0x03, complex_float, 2, DWARF)
HANDLE_DW_ATE(0x04, float, 2, DWARF)
HANDLE_DW_ATE(0x05, signed, 2, DWARF)
HANDLE_DW_ATE(0x06, signed_char, 2, DWARF)
HANDLE_DW_ATE(0x07, unsigned, 2, DWARF)
HANDLE_DW_ATE(0x08, unsigned_char, 2, DWARF)
// New in DWARF v3:
HANDLE_DW_ATE(0x09, imaginary_float, 3, DWARF)
HANDLE_DW_ATE(0x0a, packed_decimal, 3, DWARF)
HANDLE_DW_ATE(0x0b, numeric_string, 3, DWARF)
HANDLE_DW_ATE(0x0c, edited, 3, DWARF)
HANDLE_DW_ATE(0x0d, signed_fixed, 3, DWARF)
HANDLE_DW_ATE(0x0e, unsigned_fixed, 3, DWARF)
HANDLE_DW_ATE(0x0f, decimal_float, 3, DWARF)
// New in DWARF v4:
HANDLE_DW_ATE(0x10, UTF, 4, DWARF)
// New in DWARF v5:
HANDLE_DW_ATE(0x11, UCS, 5, DWARF)
HANDLE_DW_ATE(0x12, ASCII, 5, DWARF)

// The version numbers of all vendor extensions >0x80 were guessed.
// Conflicting:
// HANDLE_DW_ATE(0x80, ALTIUM_fract, 2, ALTIUM) = DW_ATE_low_user
// HANDLE_DW_ATE(0x81, ALTIUM_accum, 2, ALTIUM)

HANDLE_DW_ATE(0x81, HP_complex_float, 2, HP)
HANDLE_DW_ATE(0x82, HP_float128, 2, HP)
HANDLE_DW_ATE(0x83, HP_complex_float128, 2, HP)
HANDLE_DW_ATE(0x84, HP_floathpintel, 2, HP)
HANDLE_DW_ATE(0x85, HP_imaginary_float90, 2, HP)
HANDLE_DW_ATE(0x86, HP_imaginary_float128, 2, HP)
// Conflicting:
// HANDLE_DW_ATE(0x86, SUN_imaginary_float, 2, SUN)

// DWARF attribute endianity
HANDLE_DW_END(0x00, default)
HANDLE_DW_END(0x01, big)
HANDLE_DW_END(0x02, little)

// DWARF virtuality codes.
HANDLE_DW_VIRTUALITY(0x00, none)
HANDLE_DW_VIRTUALITY(0x01, virtual)
HANDLE_DW_VIRTUALITY(0x02, pure_virtual)

// DWARF v5 Defaulted Member Encodings.
HANDLE_DW_DEFAULTED(0x00, no)
HANDLE_DW_DEFAULTED(0x01, in_class)
HANDLE_DW_DEFAULTED(0x02, out_of_class)

// DWARF calling convention codes.
HANDLE_DW_CC(0x01, normal)
HANDLE_DW_CC(0x02, program)
HANDLE_DW_CC(0x03, nocall)
// New in DWARF v5:
HANDLE_DW_CC(0x04, pass_by_reference)
HANDLE_DW_CC(0x05, pass_by_value)
// Vendor extensions:
HANDLE_DW_CC(0x40, GNU_renesas_sh)
HANDLE_DW_CC(0x41, GNU_borland_fastcall_i386)
HANDLE_DW_CC(0xb0, BORLAND_safecall)
HANDLE_DW_CC(0xb1, BORLAND_stdcall)
HANDLE_DW_CC(0xb2, BORLAND_pascal)
HANDLE_DW_CC(0xb3, BORLAND_msfastcall)
HANDLE_DW_CC(0xb4, BORLAND_msreturn)
HANDLE_DW_CC(0xb5, BORLAND_thiscall)
HANDLE_DW_CC(0xb6, BORLAND_fastcall)
HANDLE_DW_CC(0xc0, LLVM_vectorcall)
HANDLE_DW_CC(0xc1, LLVM_Win64)
HANDLE_DW_CC(0xc2, LLVM_X86_64SysV)
HANDLE_DW_CC(0xc3, LLVM_AAPCS)
HANDLE_DW_CC(0xc4, LLVM_AAPCS_VFP)
HANDLE_DW_CC(0xc5, LLVM_IntelOclBicc)
HANDLE_DW_CC(0xc6, LLVM_SpirFunction)
HANDLE_DW_CC(0xc7, LLVM_OpenCLKernel)
HANDLE_DW_CC(0xc8, LLVM_Swift)
HANDLE_DW_CC(0xc9, LLVM_PreserveMost)
HANDLE_DW_CC(0xca, LLVM_PreserveAll)
HANDLE_DW_CC(0xcb, LLVM_X86RegCall)
// From GCC source code (include/dwarf2.h): This DW_CC_ value is not currently
// generated by any toolchain.  It is used internally to GDB to indicate OpenCL
// C functions that have been compiled with the IBM XL C for OpenCL compiler and
// use a non-platform calling convention for passing OpenCL C vector types.
HANDLE_DW_CC(0xff, GDB_IBM_OpenCL)

// Line Number Extended Opcode Encodings
HANDLE_DW_LNE(0x01, end_sequence)
HANDLE_DW_LNE(0x02, set_address)
HANDLE_DW_LNE(0x03, define_file)
// New in DWARF v4:
HANDLE_DW_LNE(0x04, set_discriminator)

// Line Number Standard Opcode Encodings.
HANDLE_DW_LNS(0x00, extended_op)
HANDLE_DW_LNS(0x01, copy)
HANDLE_DW_LNS(0x02, advance_pc)
HANDLE_DW_LNS(0x03, advance_line)
HANDLE_DW_LNS(0x04, set_file)
HANDLE_DW_LNS(0x05, set_column)
HANDLE_DW_LNS(0x06, negate_stmt)
HANDLE_DW_LNS(0x07, set_basic_block)
HANDLE_DW_LNS(0x08, const_add_pc)
HANDLE_DW_LNS(0x09, fixed_advance_pc)
// New in DWARF v3:
HANDLE_DW_LNS(0x0a, set_prologue_end)
HANDLE_DW_LNS(0x0b, set_epilogue_begin)
HANDLE_DW_LNS(0x0c, set_isa)

// DWARF v5 Line number header entry format.
HANDLE_DW_LNCT(0x01, path)
HANDLE_DW_LNCT(0x02, directory_index)
HANDLE_DW_LNCT(0x03, timestamp)
HANDLE_DW_LNCT(0x04, size)
HANDLE_DW_LNCT(0x05, MD5)
// A vendor extension until http://dwarfstd.org/ShowIssue.php?issue=180201.1 is
// accepted and incorporated into the next DWARF standard.
HANDLE_DW_LNCT(0x2001, LLVM_source)

// DWARF v5 Macro information.
HANDLE_DW_MACRO(0x01, define)
HANDLE_DW_MACRO(0x02, undef)
HANDLE_DW_MACRO(0x03, start_file)
HANDLE_DW_MACRO(0x04, end_file)
HANDLE_DW_MACRO(0x05, define_strp)
HANDLE_DW_MACRO(0x06, undef_strp)
HANDLE_DW_MACRO(0x07, import)
HANDLE_DW_MACRO(0x08, define_sup)
HANDLE_DW_MACRO(0x09, undef_sup)
HANDLE_DW_MACRO(0x0a, import_sup)
HANDLE_DW_MACRO(0x0b, define_strx)
HANDLE_DW_MACRO(0x0c, undef_strx)

// GNU .debug_macro extension.
HANDLE_DW_MACRO_GNU(0x01, define)
HANDLE_DW_MACRO_GNU(0x02, undef)
HANDLE_DW_MACRO_GNU(0x03, start_file)
HANDLE_DW_MACRO_GNU(0x04, end_file)
HANDLE_DW_MACRO_GNU(0x05, define_indirect)
HANDLE_DW_MACRO_GNU(0x06, undef_indirect)
HANDLE_DW_MACRO_GNU(0x07, transparent_include)
HANDLE_DW_MACRO_GNU(0x08, define_indirect_alt)
HANDLE_DW_MACRO_GNU(0x09, undef_indirect_alt)
HANDLE_DW_MACRO_GNU(0x0a, transparent_include_alt)

// DWARF v5 Macro header flags.
HANDLE_MACRO_FLAG(0x01, OFFSET_SIZE)
HANDLE_MACRO_FLAG(0x02, DEBUG_LINE_OFFSET)
HANDLE_MACRO_FLAG(0x04, OPCODE_OPERANDS_TABLE)

// DWARF v5 Range List Entry encoding values.
HANDLE_DW_RLE(0x00, end_of_list)
HANDLE_DW_RLE(0x01, base_addressx)
HANDLE_DW_RLE(0x02, startx_endx)
HANDLE_DW_RLE(0x03, startx_length)
HANDLE_DW_RLE(0x04, offset_pair)
HANDLE_DW_RLE(0x05, base_address)
HANDLE_DW_RLE(0x06, start_end)
HANDLE_DW_RLE(0x07, start_length)

// DWARF v5 Loc List Entry encoding values.
HANDLE_DW_LLE(0x00, end_of_list)
HANDLE_DW_LLE(0x01, base_addressx)
HANDLE_DW_LLE(0x02, startx_endx)
HANDLE_DW_LLE(0x03, startx_length)
HANDLE_DW_LLE(0x04, offset_pair)
HANDLE_DW_LLE(0x05, default_location)
HANDLE_DW_LLE(0x06, base_address)
HANDLE_DW_LLE(0x07, start_end)
HANDLE_DW_LLE(0x08, start_length)

// Call frame instruction encodings.
HANDLE_DW_CFA(0x00, nop)
HANDLE_DW_CFA(0x40, advance_loc)
HANDLE_DW_CFA(0x80, offset)
HANDLE_DW_CFA(0xc0, restore)
HANDLE_DW_CFA(0x01, set_loc)
HANDLE_DW_CFA(0x02, advance_loc1)
HANDLE_DW_CFA(0x03, advance_loc2)
HANDLE_DW_CFA(0x04, advance_loc4)
HANDLE_DW_CFA(0x05, offset_extended)
HANDLE_DW_CFA(0x06, restore_extended)
HANDLE_DW_CFA(0x07, undefined)
HANDLE_DW_CFA(0x08, same_value)
HANDLE_DW_CFA(0x09, register)
HANDLE_DW_CFA(0x0a, remember_state)
HANDLE_DW_CFA(0x0b, restore_state)
HANDLE_DW_CFA(0x0c, def_cfa)
HANDLE_DW_CFA(0x0d, def_cfa_register)
HANDLE_DW_CFA(0x0e, def_cfa_offset)
// New in DWARF v3:
HANDLE_DW_CFA(0x0f, def_cfa_expression)
HANDLE_DW_CFA(0x10, expression)
HANDLE_DW_CFA(0x11, offset_extended_sf)
HANDLE_DW_CFA(0x12, def_cfa_sf)
HANDLE_DW_CFA(0x13, def_cfa_offset_sf)
HANDLE_DW_CFA(0x14, val_offset)
HANDLE_DW_CFA(0x15, val_offset_sf)
HANDLE_DW_CFA(0x16, val_expression)
// Vendor extensions:
HANDLE_DW_CFA_PRED(0x1d, MIPS_advance_loc8, SELECT_MIPS64)
HANDLE_DW_CFA_PRED(0x2d, GNU_window_save, SELECT_SPARC)
HANDLE_DW_CFA_PRED(0x2d, AARCH64_negate_ra_state, SELECT_AARCH64)
HANDLE_DW_CFA_PRED(0x2e, GNU_args_size, SELECT_X86)
// Heterogeneous Debugging Extension defined at
// https://llvm.org/docs/AMDGPUDwarfExtensionsForHeterogeneousDebugging.html#cfa-definition-instructions
HANDLE_DW_CFA(0x30, LLVM_def_aspace_cfa)
HANDLE_DW_CFA(0x31, LLVM_def_aspace_cfa_sf)

// Apple Objective-C Property Attributes.
// Keep this list in sync with clang's DeclObjCCommon.h
// ObjCPropertyAttribute::Kind!
HANDLE_DW_APPLE_PROPERTY(0x01, readonly)
HANDLE_DW_APPLE_PROPERTY(0x02, getter)
HANDLE_DW_APPLE_PROPERTY(0x04, assign)
HANDLE_DW_APPLE_PROPERTY(0x08, readwrite)
HANDLE_DW_APPLE_PROPERTY(0x10, retain)
HANDLE_DW_APPLE_PROPERTY(0x20, copy)
HANDLE_DW_APPLE_PROPERTY(0x40, nonatomic)
HANDLE_DW_APPLE_PROPERTY(0x80, setter)
HANDLE_DW_APPLE_PROPERTY(0x100, atomic)
HANDLE_DW_APPLE_PROPERTY(0x200, weak)
HANDLE_DW_APPLE_PROPERTY(0x400, strong)
HANDLE_DW_APPLE_PROPERTY(0x800, unsafe_unretained)
HANDLE_DW_APPLE_PROPERTY(0x1000, nullability)
HANDLE_DW_APPLE_PROPERTY(0x2000, null_resettable)
HANDLE_DW_APPLE_PROPERTY(0x4000, class)

// DWARF v5 Unit Types.
HANDLE_DW_UT(0x01, compile)
HANDLE_DW_UT(0x02, type)
HANDLE_DW_UT(0x03, partial)
HANDLE_DW_UT(0x04, skeleton)
HANDLE_DW_UT(0x05, split_compile)
HANDLE_DW_UT(0x06, split_type)

// DWARF section types. (enum name, ELF name, ELF DWO name, cmdline name,
// option) Note that these IDs don't mean anything.
// TODO: Add Mach-O and COFF names.
// Official DWARF sections.
HANDLE_DWARF_SECTION(DebugAbbrev, ".debug_abbrev", "debug-abbrev", BoolOption)
HANDLE_DWARF_SECTION(DebugAddr, ".debug_addr", "debug-addr", BoolOption)
HANDLE_DWARF_SECTION(DebugAranges, ".debug_aranges", "debug-aranges",
                     BoolOption)
HANDLE_DWARF_SECTION(DebugInfo, ".debug_info", "debug-info", OffsetOption)
HANDLE_DWARF_SECTION(DebugTypes, ".debug_types", "debug-types", OffsetOption)
HANDLE_DWARF_SECTION(DebugLine, ".debug_line", "debug-line", OffsetOption)
HANDLE_DWARF_SECTION(DebugLineStr, ".debug_line_str", "debug-line-str",
                     BoolOption)
HANDLE_DWARF_SECTION(DebugLoc, ".debug_loc", "debug-loc", OffsetOption)
HANDLE_DWARF_SECTION(DebugLoclists, ".debug_loclists", "debug-loclists",
                     OffsetOption)
HANDLE_DWARF_SECTION(DebugFrame, ".debug_frame", "debug-frame", OffsetOption)
HANDLE_DWARF_SECTION(DebugMacro, ".debug_macro", "debug-macro", BoolOption)
HANDLE_DWARF_SECTION(DebugNames, ".debug_names", "debug-names", BoolOption)
HANDLE_DWARF_SECTION(DebugPubnames, ".debug_pubnames", "debug-pubnames",
                     BoolOption)
HANDLE_DWARF_SECTION(DebugPubtypes, ".debug_pubtypes", "debug-pubtypes",
                     BoolOption)
HANDLE_DWARF_SECTION(DebugGnuPubnames, ".debug_gnu_pubnames",
                     "debug-gnu-pubnames", BoolOption)
HANDLE_DWARF_SECTION(DebugGnuPubtypes, ".debug_gnu_pubtypes",
                     "debug-gnu-pubtypes", BoolOption)
HANDLE_DWARF_SECTION(DebugRanges, ".debug_ranges", "debug-ranges", BoolOption)
HANDLE_DWARF_SECTION(DebugRnglists, ".debug_rnglists", "debug-rnglists",
                     BoolOption)
HANDLE_DWARF_SECTION(DebugStr, ".debug_str", "debug-str", BoolOption)
HANDLE_DWARF_SECTION(DebugStrOffsets, ".debug_str_offsets", "debug-str-offsets",
                     BoolOption)
HANDLE_DWARF_SECTION(DebugCUIndex, ".debug_cu_index", "debug-cu-index",
                     BoolOption)
HANDLE_DWARF_SECTION(DebugTUIndex, ".debug_tu_index", "debug-tu-index",
                     BoolOption)
// Vendor extensions.
HANDLE_DWARF_SECTION(AppleNames, ".apple_names", "apple-names", BoolOption)
HANDLE_DWARF_SECTION(AppleTypes, ".apple_types", "apple-types", BoolOption)
HANDLE_DWARF_SECTION(AppleNamespaces, ".apple_namespaces", "apple-namespaces",
                     BoolOption)
HANDLE_DWARF_SECTION(AppleObjC, ".apple_objc", "apple-objc", BoolOption)
HANDLE_DWARF_SECTION(GdbIndex, ".gdb_index", "gdb-index", BoolOption)

HANDLE_DW_IDX(0x01, compile_unit)
HANDLE_DW_IDX(0x02, type_unit)
HANDLE_DW_IDX(0x03, die_offset)
HANDLE_DW_IDX(0x04, parent)
HANDLE_DW_IDX(0x05, type_hash)
HANDLE_DW_IDX(0x2000, GNU_internal)
HANDLE_DW_IDX(0x2001, GNU_external)

// DWARF package file section identifiers.
// DWARFv5, section 7.3.5.3, table 7.1.
HANDLE_DW_SECT(1, INFO)
HANDLE_DW_SECT(3, ABBREV)
HANDLE_DW_SECT(4, LINE)
HANDLE_DW_SECT(5, LOCLISTS)
HANDLE_DW_SECT(6, STR_OFFSETS)
HANDLE_DW_SECT(7, MACRO)
HANDLE_DW_SECT(8, RNGLISTS)

#undef HANDLE_DW_TAG
#undef HANDLE_DW_AT
#undef HANDLE_DW_FORM
#undef HANDLE_DW_OP
#undef HANDLE_DW_OP_LLVM_USEROP
#undef HANDLE_DW_LANG
#undef HANDLE_DW_ATE
#undef HANDLE_DW_VIRTUALITY
#undef HANDLE_DW_DEFAULTED
#undef HANDLE_DW_CC
#undef HANDLE_DW_LNS
#undef HANDLE_DW_LNE
#undef HANDLE_DW_LNCT
#undef HANDLE_DW_MACRO
#undef HANDLE_DW_MACRO_GNU
#undef HANDLE_MACRO_FLAG
#undef HANDLE_DW_RLE
#undef HANDLE_DW_LLE
#undef HANDLE_DW_CFA
#undef HANDLE_DW_CFA_PRED
#undef HANDLE_DW_APPLE_PROPERTY
#undef HANDLE_DW_UT
#undef HANDLE_DWARF_SECTION
#undef HANDLE_DW_IDX
#undef HANDLE_DW_END
#undef HANDLE_DW_SECT
PKhwFZ���rPassRegistry.hnu�[���//===- llvm/PassRegistry.h - Pass Information Registry ----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines PassRegistry, a class that is used in the initialization
// and registration of passes.  At application startup, passes are registered
// with the PassRegistry, which is later provided to the PassManager for
// dependency resolution and similar tasks.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_PASSREGISTRY_H
#define LLVM_PASSREGISTRY_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/RWMutex.h"
#include <memory>
#include <vector>

namespace llvm {

class PassInfo;
struct PassRegistrationListener;

/// PassRegistry - This class manages the registration and intitialization of
/// the pass subsystem as application startup, and assists the PassManager
/// in resolving pass dependencies.
/// NOTE: PassRegistry is NOT thread-safe.  If you want to use LLVM on multiple
/// threads simultaneously, you will need to use a separate PassRegistry on
/// each thread.
class PassRegistry {
  mutable sys::SmartRWMutex<true> Lock;

  /// PassInfoMap - Keep track of the PassInfo object for each registered pass.
  using MapType = DenseMap<const void *, const PassInfo *>;
  MapType PassInfoMap;

  using StringMapType = StringMap<const PassInfo *>;
  StringMapType PassInfoStringMap;

  std::vector<std::unique_ptr<const PassInfo>> ToFree;
  std::vector<PassRegistrationListener *> Listeners;

public:
  PassRegistry() = default;
  ~PassRegistry();

  /// getPassRegistry - Access the global registry object, which is
  /// automatically initialized at application launch and destroyed by
  /// llvm_shutdown.
  static PassRegistry *getPassRegistry();

  /// getPassInfo - Look up a pass' corresponding PassInfo, indexed by the pass'
  /// type identifier (&MyPass::ID).
  const PassInfo *getPassInfo(const void *TI) const;

  /// getPassInfo - Look up a pass' corresponding PassInfo, indexed by the pass'
  /// argument string.
  const PassInfo *getPassInfo(StringRef Arg) const;

  /// registerPass - Register a pass (by means of its PassInfo) with the
  /// registry.  Required in order to use the pass with a PassManager.
  void registerPass(const PassInfo &PI, bool ShouldFree = false);

  /// registerAnalysisGroup - Register an analysis group (or a pass implementing
  // an analysis group) with the registry.  Like registerPass, this is required
  // in order for a PassManager to be able to use this group/pass.
  void registerAnalysisGroup(const void *InterfaceID, const void *PassID,
                             PassInfo &Registeree, bool isDefault,
                             bool ShouldFree = false);

  /// enumerateWith - Enumerate the registered passes, calling the provided
  /// PassRegistrationListener's passEnumerate() callback on each of them.
  void enumerateWith(PassRegistrationListener *L);

  /// addRegistrationListener - Register the given PassRegistrationListener
  /// to receive passRegistered() callbacks whenever a new pass is registered.
  void addRegistrationListener(PassRegistrationListener *L);

  /// removeRegistrationListener - Unregister a PassRegistrationListener so that
  /// it no longer receives passRegistered() callbacks.
  void removeRegistrationListener(PassRegistrationListener *L);
};

} // end namespace llvm

#endif // LLVM_PASSREGISTRY_H
PKhwFZ�Y'
'
CodeGen/MachineSSAContext.hnu�[���//===- MachineSSAContext.h --------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This file declares a specialization of the GenericSSAContext<X>
/// template class for Machine IR.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_MACHINESSACONTEXT_H
#define LLVM_CODEGEN_MACHINESSACONTEXT_H

#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/Support/Printable.h"

namespace llvm {
class MachineRegisterInfo;
class MachineInstr;
class MachineFunction;
class Register;
template <typename _FunctionT> class GenericSSAContext;
template <typename, bool> class DominatorTreeBase;

inline unsigned succ_size(const MachineBasicBlock *BB) {
  return BB->succ_size();
}
inline unsigned pred_size(const MachineBasicBlock *BB) {
  return BB->pred_size();
}
inline auto instrs(const MachineBasicBlock &BB) { return BB.instrs(); }

template <> class GenericSSAContext<MachineFunction> {
  const MachineRegisterInfo *RegInfo = nullptr;
  MachineFunction *MF = nullptr;

public:
  using BlockT = MachineBasicBlock;
  using FunctionT = MachineFunction;
  using InstructionT = MachineInstr;
  using ValueRefT = Register;
  using ConstValueRefT = Register;
  using UseT = MachineOperand;
  using DominatorTreeT = DominatorTreeBase<BlockT, false>;

  static constexpr Register ValueRefNull = 0;

  void setFunction(MachineFunction &Fn);
  MachineFunction *getFunction() const { return MF; }

  static MachineBasicBlock *getEntryBlock(MachineFunction &F);
  static void appendBlockDefs(SmallVectorImpl<Register> &defs,
                              const MachineBasicBlock &block);
  static void appendBlockTerms(SmallVectorImpl<MachineInstr *> &terms,
                               MachineBasicBlock &block);
  static void appendBlockTerms(SmallVectorImpl<const MachineInstr *> &terms,
                               const MachineBasicBlock &block);
  MachineBasicBlock *getDefBlock(Register) const;
  static bool isConstantOrUndefValuePhi(const MachineInstr &Phi);

  Printable print(const MachineBasicBlock *Block) const;
  Printable print(const MachineInstr *Inst) const;
  Printable print(Register Value) const;
};

using MachineSSAContext = GenericSSAContext<MachineFunction>;
} // namespace llvm

#endif // LLVM_CODEGEN_MACHINESSACONTEXT_H
PKhwFZ�8���CodeGen/AsmPrinterHandler.hnu�[���//===-- llvm/CodeGen/AsmPrinterHandler.h -----------------------*- C++ -*--===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains a generic interface for AsmPrinter handlers,
// like debug and EH info emitters.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_ASMPRINTERHANDLER_H
#define LLVM_CODEGEN_ASMPRINTERHANDLER_H

#include "llvm/Support/DataTypes.h"

namespace llvm {

class AsmPrinter;
class MachineBasicBlock;
class MachineFunction;
class MachineInstr;
class MCSymbol;
class Module;

typedef MCSymbol *ExceptionSymbolProvider(AsmPrinter *Asm,
                                          const MachineBasicBlock *MBB);

/// Collects and handles AsmPrinter objects required to build debug
/// or EH information.
class AsmPrinterHandler {
public:
  virtual ~AsmPrinterHandler();

  /// For symbols that have a size designated (e.g. common symbols),
  /// this tracks that size.
  virtual void setSymbolSize(const MCSymbol *Sym, uint64_t Size) = 0;

  virtual void beginModule(Module *M) {}

  /// Emit all sections that should come after the content.
  virtual void endModule() = 0;

  /// Gather pre-function debug information.
  /// Every beginFunction(MF) call should be followed by an endFunction(MF)
  /// call.
  virtual void beginFunction(const MachineFunction *MF) = 0;

  // Emit any of function marker (like .cfi_endproc). This is called
  // before endFunction and cannot switch sections.
  virtual void markFunctionEnd();

  /// Gather post-function debug information.
  virtual void endFunction(const MachineFunction *MF) = 0;

  /// Process the beginning of a new basic-block-section within a
  /// function. Always called immediately after beginFunction for the first
  /// basic-block. When basic-block-sections are enabled, called before the
  /// first block of each such section.
  virtual void beginBasicBlockSection(const MachineBasicBlock &MBB) {}

  /// Process the end of a basic-block-section within a function. When
  /// basic-block-sections are enabled, called after the last block in each such
  /// section (including the last section in the function). When
  /// basic-block-sections are disabled, called at the end of a function,
  /// immediately prior to markFunctionEnd.
  virtual void endBasicBlockSection(const MachineBasicBlock &MBB) {}

  /// Emit target-specific EH funclet machinery.
  virtual void beginFunclet(const MachineBasicBlock &MBB,
                            MCSymbol *Sym = nullptr) {}
  virtual void endFunclet() {}

  /// Process beginning of an instruction.
  virtual void beginInstruction(const MachineInstr *MI) = 0;

  /// Process end of an instruction.
  virtual void endInstruction() = 0;
};

} // End of namespace llvm

#endif
PKhwFZs��'�'CodeGen/LexicalScopes.hnu�[���//===- LexicalScopes.cpp - Collecting lexical scope info --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements LexicalScopes analysis.
//
// This pass collects lexical scope information and maps machine instructions
// to respective lexical scopes.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_LEXICALSCOPES_H
#define LLVM_CODEGEN_LEXICALSCOPES_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/DebugInfoMetadata.h"
#include <cassert>
#include <unordered_map>
#include <utility>

namespace llvm {

class MachineBasicBlock;
class MachineFunction;
class MachineInstr;
class MDNode;

//===----------------------------------------------------------------------===//
/// InsnRange - This is used to track range of instructions with identical
/// lexical scope.
///
using InsnRange = std::pair<const MachineInstr *, const MachineInstr *>;

//===----------------------------------------------------------------------===//
/// LexicalScope - This class is used to track scope information.
///
class LexicalScope {
public:
  LexicalScope(LexicalScope *P, const DILocalScope *D, const DILocation *I,
               bool A)
      : Parent(P), Desc(D), InlinedAtLocation(I), AbstractScope(A) {
    assert(D);
    assert(D->getSubprogram()->getUnit()->getEmissionKind() !=
           DICompileUnit::NoDebug &&
           "Don't build lexical scopes for non-debug locations");
    assert(D->isResolved() && "Expected resolved node");
    assert((!I || I->isResolved()) && "Expected resolved node");
    if (Parent)
      Parent->addChild(this);
  }

  // Accessors.
  LexicalScope *getParent() const { return Parent; }
  const MDNode *getDesc() const { return Desc; }
  const DILocation *getInlinedAt() const { return InlinedAtLocation; }
  const DILocalScope *getScopeNode() const { return Desc; }
  bool isAbstractScope() const { return AbstractScope; }
  SmallVectorImpl<LexicalScope *> &getChildren() { return Children; }
  SmallVectorImpl<InsnRange> &getRanges() { return Ranges; }

  /// addChild - Add a child scope.
  void addChild(LexicalScope *S) { Children.push_back(S); }

  /// openInsnRange - This scope covers instruction range starting from MI.
  void openInsnRange(const MachineInstr *MI) {
    if (!FirstInsn)
      FirstInsn = MI;

    if (Parent)
      Parent->openInsnRange(MI);
  }

  /// extendInsnRange - Extend the current instruction range covered by
  /// this scope.
  void extendInsnRange(const MachineInstr *MI) {
    assert(FirstInsn && "MI Range is not open!");
    LastInsn = MI;
    if (Parent)
      Parent->extendInsnRange(MI);
  }

  /// closeInsnRange - Create a range based on FirstInsn and LastInsn collected
  /// until now. This is used when a new scope is encountered while walking
  /// machine instructions.
  void closeInsnRange(LexicalScope *NewScope = nullptr) {
    assert(LastInsn && "Last insn missing!");
    Ranges.push_back(InsnRange(FirstInsn, LastInsn));
    FirstInsn = nullptr;
    LastInsn = nullptr;
    // If Parent dominates NewScope then do not close Parent's instruction
    // range.
    if (Parent && (!NewScope || !Parent->dominates(NewScope)))
      Parent->closeInsnRange(NewScope);
  }

  /// dominates - Return true if current scope dominates given lexical scope.
  bool dominates(const LexicalScope *S) const {
    if (S == this)
      return true;
    if (DFSIn < S->getDFSIn() && DFSOut > S->getDFSOut())
      return true;
    return false;
  }

  // Depth First Search support to walk and manipulate LexicalScope hierarchy.
  unsigned getDFSOut() const { return DFSOut; }
  void setDFSOut(unsigned O) { DFSOut = O; }
  unsigned getDFSIn() const { return DFSIn; }
  void setDFSIn(unsigned I) { DFSIn = I; }

  /// dump - print lexical scope.
  void dump(unsigned Indent = 0) const;

private:
  LexicalScope *Parent;                        // Parent to this scope.
  const DILocalScope *Desc;                    // Debug info descriptor.
  const DILocation *InlinedAtLocation;         // Location at which this
                                               // scope is inlined.
  bool AbstractScope;                          // Abstract Scope
  SmallVector<LexicalScope *, 4> Children;     // Scopes defined in scope.
                                               // Contents not owned.
  SmallVector<InsnRange, 4> Ranges;

  const MachineInstr *LastInsn = nullptr;  // Last instruction of this scope.
  const MachineInstr *FirstInsn = nullptr; // First instruction of this scope.
  unsigned DFSIn = 0; // In & Out Depth use to determine scope nesting.
  unsigned DFSOut = 0;
};

//===----------------------------------------------------------------------===//
/// LexicalScopes -  This class provides interface to collect and use lexical
/// scoping information from machine instruction.
///
class LexicalScopes {
public:
  LexicalScopes() = default;

  /// initialize - Scan machine function and constuct lexical scope nest, resets
  /// the instance if necessary.
  void initialize(const MachineFunction &);

  /// releaseMemory - release memory.
  void reset();

  /// empty - Return true if there is any lexical scope information available.
  bool empty() { return CurrentFnLexicalScope == nullptr; }

  /// getCurrentFunctionScope - Return lexical scope for the current function.
  LexicalScope *getCurrentFunctionScope() const {
    return CurrentFnLexicalScope;
  }

  /// getMachineBasicBlocks - Populate given set using machine basic blocks
  /// which have machine instructions that belong to lexical scope identified by
  /// DebugLoc.
  void getMachineBasicBlocks(const DILocation *DL,
                             SmallPtrSetImpl<const MachineBasicBlock *> &MBBs);

  /// Return true if DebugLoc's lexical scope dominates at least one machine
  /// instruction's lexical scope in a given machine basic block.
  bool dominates(const DILocation *DL, MachineBasicBlock *MBB);

  /// findLexicalScope - Find lexical scope, either regular or inlined, for the
  /// given DebugLoc. Return NULL if not found.
  LexicalScope *findLexicalScope(const DILocation *DL);

  /// getAbstractScopesList - Return a reference to list of abstract scopes.
  ArrayRef<LexicalScope *> getAbstractScopesList() const {
    return AbstractScopesList;
  }

  /// findAbstractScope - Find an abstract scope or return null.
  LexicalScope *findAbstractScope(const DILocalScope *N) {
    auto I = AbstractScopeMap.find(N);
    return I != AbstractScopeMap.end() ? &I->second : nullptr;
  }

  /// findInlinedScope - Find an inlined scope for the given scope/inlined-at.
  LexicalScope *findInlinedScope(const DILocalScope *N, const DILocation *IA) {
    auto I = InlinedLexicalScopeMap.find(std::make_pair(N, IA));
    return I != InlinedLexicalScopeMap.end() ? &I->second : nullptr;
  }

  /// findLexicalScope - Find regular lexical scope or return null.
  LexicalScope *findLexicalScope(const DILocalScope *N) {
    auto I = LexicalScopeMap.find(N);
    return I != LexicalScopeMap.end() ? &I->second : nullptr;
  }

  /// getOrCreateAbstractScope - Find or create an abstract lexical scope.
  LexicalScope *getOrCreateAbstractScope(const DILocalScope *Scope);

private:
  /// getOrCreateLexicalScope - Find lexical scope for the given Scope/IA. If
  /// not available then create new lexical scope.
  LexicalScope *getOrCreateLexicalScope(const DILocalScope *Scope,
                                        const DILocation *IA = nullptr);
  LexicalScope *getOrCreateLexicalScope(const DILocation *DL) {
    return DL ? getOrCreateLexicalScope(DL->getScope(), DL->getInlinedAt())
              : nullptr;
  }

  /// getOrCreateRegularScope - Find or create a regular lexical scope.
  LexicalScope *getOrCreateRegularScope(const DILocalScope *Scope);

  /// getOrCreateInlinedScope - Find or create an inlined lexical scope.
  LexicalScope *getOrCreateInlinedScope(const DILocalScope *Scope,
                                        const DILocation *InlinedAt);

  /// extractLexicalScopes - Extract instruction ranges for each lexical scopes
  /// for the given machine function.
  void extractLexicalScopes(SmallVectorImpl<InsnRange> &MIRanges,
                            DenseMap<const MachineInstr *, LexicalScope *> &M);
  void constructScopeNest(LexicalScope *Scope);
  void
  assignInstructionRanges(SmallVectorImpl<InsnRange> &MIRanges,
                          DenseMap<const MachineInstr *, LexicalScope *> &M);

  const MachineFunction *MF = nullptr;

  /// LexicalScopeMap - Tracks the scopes in the current function.
  // Use an unordered_map to ensure value pointer validity over insertion.
  std::unordered_map<const DILocalScope *, LexicalScope> LexicalScopeMap;

  /// InlinedLexicalScopeMap - Tracks inlined function scopes in current
  /// function.
  std::unordered_map<std::pair<const DILocalScope *, const DILocation *>,
                     LexicalScope,
                     pair_hash<const DILocalScope *, const DILocation *>>
      InlinedLexicalScopeMap;

  /// AbstractScopeMap - These scopes are  not included LexicalScopeMap.
  // Use an unordered_map to ensure value pointer validity over insertion.
  std::unordered_map<const DILocalScope *, LexicalScope> AbstractScopeMap;

  /// AbstractScopesList - Tracks abstract scopes constructed while processing
  /// a function.
  SmallVector<LexicalScope *, 4> AbstractScopesList;

  /// CurrentFnLexicalScope - Top level scope for the current function.
  ///
  LexicalScope *CurrentFnLexicalScope = nullptr;

  /// Map a location to the set of basic blocks it dominates. This is a cache
  /// for \ref LexicalScopes::getMachineBasicBlocks results.
  using BlockSetT = SmallPtrSet<const MachineBasicBlock *, 4>;
  DenseMap<const DILocation *, std::unique_ptr<BlockSetT>> DominatedBlocks;
};

} // end namespace llvm

#endif // LLVM_CODEGEN_LEXICALSCOPES_H
PKhwFZL0�zz%z%*CodeGen/MachineOptimizationRemarkEmitter.hnu�[���///===- MachineOptimizationRemarkEmitter.h - Opt Diagnostics -*- C++ -*----===//
///
/// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
/// See https://llvm.org/LICENSE.txt for license information.
/// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
///
///===---------------------------------------------------------------------===//
/// \file
/// Optimization diagnostic interfaces for machine passes.  It's packaged as an
/// analysis pass so that by using this service passes become dependent on MBFI
/// as well.  MBFI is used to compute the "hotness" of the diagnostic message.
///
///===---------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_MACHINEOPTIMIZATIONREMARKEMITTER_H
#define LLVM_CODEGEN_MACHINEOPTIMIZATIONREMARKEMITTER_H

#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/Function.h"
#include <optional>

namespace llvm {
class MachineBasicBlock;
class MachineBlockFrequencyInfo;
class MachineInstr;

/// Common features for diagnostics dealing with optimization remarks
/// that are used by machine passes.
class DiagnosticInfoMIROptimization : public DiagnosticInfoOptimizationBase {
public:
  DiagnosticInfoMIROptimization(enum DiagnosticKind Kind, const char *PassName,
                                StringRef RemarkName,
                                const DiagnosticLocation &Loc,
                                const MachineBasicBlock *MBB)
      : DiagnosticInfoOptimizationBase(Kind, DS_Remark, PassName, RemarkName,
                                       MBB->getParent()->getFunction(), Loc),
        MBB(MBB) {}

  /// MI-specific kinds of diagnostic Arguments.
  struct MachineArgument : public DiagnosticInfoOptimizationBase::Argument {
    /// Print an entire MachineInstr.
    MachineArgument(StringRef Key, const MachineInstr &MI);
  };

  static bool classof(const DiagnosticInfo *DI) {
    return DI->getKind() >= DK_FirstMachineRemark &&
           DI->getKind() <= DK_LastMachineRemark;
  }

  const MachineBasicBlock *getBlock() const { return MBB; }

private:
  const MachineBasicBlock *MBB;
};

/// Diagnostic information for applied optimization remarks.
class MachineOptimizationRemark : public DiagnosticInfoMIROptimization {
public:
  /// \p PassName is the name of the pass emitting this diagnostic. If this name
  /// matches the regular expression given in -Rpass=, then the diagnostic will
  /// be emitted.  \p RemarkName is a textual identifier for the remark.  \p
  /// Loc is the debug location and \p MBB is the block that the optimization
  /// operates in.
  MachineOptimizationRemark(const char *PassName, StringRef RemarkName,
                            const DiagnosticLocation &Loc,
                            const MachineBasicBlock *MBB)
      : DiagnosticInfoMIROptimization(DK_MachineOptimizationRemark, PassName,
                                      RemarkName, Loc, MBB) {}

  static bool classof(const DiagnosticInfo *DI) {
    return DI->getKind() == DK_MachineOptimizationRemark;
  }

  /// \see DiagnosticInfoOptimizationBase::isEnabled.
  bool isEnabled() const override {
    const Function &Fn = getFunction();
    LLVMContext &Ctx = Fn.getContext();
    return Ctx.getDiagHandlerPtr()->isPassedOptRemarkEnabled(getPassName());
  }
};

/// Diagnostic information for missed-optimization remarks.
class MachineOptimizationRemarkMissed : public DiagnosticInfoMIROptimization {
public:
  /// \p PassName is the name of the pass emitting this diagnostic. If this name
  /// matches the regular expression given in -Rpass-missed=, then the
  /// diagnostic will be emitted.  \p RemarkName is a textual identifier for the
  /// remark.  \p Loc is the debug location and \p MBB is the block that the
  /// optimization operates in.
  MachineOptimizationRemarkMissed(const char *PassName, StringRef RemarkName,
                                  const DiagnosticLocation &Loc,
                                  const MachineBasicBlock *MBB)
      : DiagnosticInfoMIROptimization(DK_MachineOptimizationRemarkMissed,
                                      PassName, RemarkName, Loc, MBB) {}

  static bool classof(const DiagnosticInfo *DI) {
    return DI->getKind() == DK_MachineOptimizationRemarkMissed;
  }

  /// \see DiagnosticInfoOptimizationBase::isEnabled.
  bool isEnabled() const override {
    const Function &Fn = getFunction();
    LLVMContext &Ctx = Fn.getContext();
    return Ctx.getDiagHandlerPtr()->isMissedOptRemarkEnabled(getPassName());
  }
};

/// Diagnostic information for optimization analysis remarks.
class MachineOptimizationRemarkAnalysis : public DiagnosticInfoMIROptimization {
public:
  /// \p PassName is the name of the pass emitting this diagnostic. If this name
  /// matches the regular expression given in -Rpass-analysis=, then the
  /// diagnostic will be emitted.  \p RemarkName is a textual identifier for the
  /// remark.  \p Loc is the debug location and \p MBB is the block that the
  /// optimization operates in.
  MachineOptimizationRemarkAnalysis(const char *PassName, StringRef RemarkName,
                                    const DiagnosticLocation &Loc,
                                    const MachineBasicBlock *MBB)
      : DiagnosticInfoMIROptimization(DK_MachineOptimizationRemarkAnalysis,
                                      PassName, RemarkName, Loc, MBB) {}

  MachineOptimizationRemarkAnalysis(const char *PassName, StringRef RemarkName,
                                    const MachineInstr *MI)
      : DiagnosticInfoMIROptimization(DK_MachineOptimizationRemarkAnalysis,
                                      PassName, RemarkName, MI->getDebugLoc(),
                                      MI->getParent()) {}

  static bool classof(const DiagnosticInfo *DI) {
    return DI->getKind() == DK_MachineOptimizationRemarkAnalysis;
  }

  /// \see DiagnosticInfoOptimizationBase::isEnabled.
  bool isEnabled() const override {
    const Function &Fn = getFunction();
    LLVMContext &Ctx = Fn.getContext();
    return Ctx.getDiagHandlerPtr()->isAnalysisRemarkEnabled(getPassName());
  }
};

/// Extend llvm::ore:: with MI-specific helper names.
namespace ore {
using MNV = DiagnosticInfoMIROptimization::MachineArgument;
}

/// The optimization diagnostic interface.
///
/// It allows reporting when optimizations are performed and when they are not
/// along with the reasons for it.  Hotness information of the corresponding
/// code region can be included in the remark if DiagnosticsHotnessRequested is
/// enabled in the LLVM context.
class MachineOptimizationRemarkEmitter {
public:
  MachineOptimizationRemarkEmitter(MachineFunction &MF,
                                   MachineBlockFrequencyInfo *MBFI)
      : MF(MF), MBFI(MBFI) {}

  /// Emit an optimization remark.
  void emit(DiagnosticInfoOptimizationBase &OptDiag);

  /// Whether we allow for extra compile-time budget to perform more
  /// analysis to be more informative.
  ///
  /// This is useful to enable additional missed optimizations to be reported
  /// that are normally too noisy.  In this mode, we can use the extra analysis
  /// (1) to filter trivial false positives or (2) to provide more context so
  /// that non-trivial false positives can be quickly detected by the user.
  bool allowExtraAnalysis(StringRef PassName) const {
    return (
        MF.getFunction().getContext().getLLVMRemarkStreamer() ||
        MF.getFunction().getContext().getDiagHandlerPtr()->isAnyRemarkEnabled(
            PassName));
  }

  /// Take a lambda that returns a remark which will be emitted.  Second
  /// argument is only used to restrict this to functions.
  template <typename T>
  void emit(T RemarkBuilder, decltype(RemarkBuilder()) * = nullptr) {
    // Avoid building the remark unless we know there are at least *some*
    // remarks enabled. We can't currently check whether remarks are requested
    // for the calling pass since that requires actually building the remark.

    if (MF.getFunction().getContext().getLLVMRemarkStreamer() ||
        MF.getFunction()
            .getContext()
            .getDiagHandlerPtr()
            ->isAnyRemarkEnabled()) {
      auto R = RemarkBuilder();
      emit((DiagnosticInfoOptimizationBase &)R);
    }
  }

  MachineBlockFrequencyInfo *getBFI() {
    return MBFI;
  }

private:
  MachineFunction &MF;

  /// MBFI is only set if hotness is requested.
  MachineBlockFrequencyInfo *MBFI;

  /// Compute hotness from IR value (currently assumed to be a block) if PGO is
  /// available.
  std::optional<uint64_t> computeHotness(const MachineBasicBlock &MBB);

  /// Similar but use value from \p OptDiag and update hotness there.
  void computeHotness(DiagnosticInfoMIROptimization &Remark);

  /// Only allow verbose messages if we know we're filtering by hotness
  /// (BFI is only set in this case).
  bool shouldEmitVerbose() { return MBFI != nullptr; }
};

/// The analysis pass
///
/// Note that this pass shouldn't generally be marked as preserved by other
/// passes.  It's holding onto BFI, so if the pass does not preserve BFI, BFI
/// could be freed.
class MachineOptimizationRemarkEmitterPass : public MachineFunctionPass {
  std::unique_ptr<MachineOptimizationRemarkEmitter> ORE;

public:
  MachineOptimizationRemarkEmitterPass();

  bool runOnMachineFunction(MachineFunction &MF) override;

  void getAnalysisUsage(AnalysisUsage &AU) const override;

  MachineOptimizationRemarkEmitter &getORE() {
    assert(ORE && "pass not run yet");
    return *ORE;
  }

  static char ID;
};
}

#endif
PKhwFZ��I.\.\CodeGen/MachinePipeliner.hnu�[���//===- MachinePipeliner.h - Machine Software Pipeliner Pass -------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// An implementation of the Swing Modulo Scheduling (SMS) software pipeliner.
//
// Software pipelining (SWP) is an instruction scheduling technique for loops
// that overlap loop iterations and exploits ILP via a compiler transformation.
//
// Swing Modulo Scheduling is an implementation of software pipelining
// that generates schedules that are near optimal in terms of initiation
// interval, register requirements, and stage count. See the papers:
//
// "Swing Modulo Scheduling: A Lifetime-Sensitive Approach", by J. Llosa,
// A. Gonzalez, E. Ayguade, and M. Valero. In PACT '96 Proceedings of the 1996
// Conference on Parallel Architectures and Compilation Techiniques.
//
// "Lifetime-Sensitive Modulo Scheduling in a Production Environment", by J.
// Llosa, E. Ayguade, A. Gonzalez, M. Valero, and J. Eckhardt. In IEEE
// Transactions on Computers, Vol. 50, No. 3, 2001.
//
// "An Implementation of Swing Modulo Scheduling With Extensions for
// Superblocks", by T. Lattner, Master's Thesis, University of Illinois at
// Urbana-Champaign, 2005.
//
//
// The SMS algorithm consists of three main steps after computing the minimal
// initiation interval (MII).
// 1) Analyze the dependence graph and compute information about each
//    instruction in the graph.
// 2) Order the nodes (instructions) by priority based upon the heuristics
//    described in the algorithm.
// 3) Attempt to schedule the nodes in the specified order using the MII.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_MACHINEPIPELINER_H
#define LLVM_CODEGEN_MACHINEPIPELINER_H

#include "llvm/ADT/SetVector.h"
#include "llvm/CodeGen/DFAPacketizer.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h"
#include "llvm/CodeGen/RegisterClassInfo.h"
#include "llvm/CodeGen/ScheduleDAGInstrs.h"
#include "llvm/CodeGen/ScheduleDAGMutation.h"
#include "llvm/CodeGen/TargetInstrInfo.h"
#include "llvm/InitializePasses.h"

#include <deque>

namespace llvm {

class AAResults;
class NodeSet;
class SMSchedule;

extern cl::opt<bool> SwpEnableCopyToPhi;
extern cl::opt<int> SwpForceIssueWidth;

/// The main class in the implementation of the target independent
/// software pipeliner pass.
class MachinePipeliner : public MachineFunctionPass {
public:
  MachineFunction *MF = nullptr;
  MachineOptimizationRemarkEmitter *ORE = nullptr;
  const MachineLoopInfo *MLI = nullptr;
  const MachineDominatorTree *MDT = nullptr;
  const InstrItineraryData *InstrItins = nullptr;
  const TargetInstrInfo *TII = nullptr;
  RegisterClassInfo RegClassInfo;
  bool disabledByPragma = false;
  unsigned II_setByPragma = 0;

#ifndef NDEBUG
  static int NumTries;
#endif

  /// Cache the target analysis information about the loop.
  struct LoopInfo {
    MachineBasicBlock *TBB = nullptr;
    MachineBasicBlock *FBB = nullptr;
    SmallVector<MachineOperand, 4> BrCond;
    MachineInstr *LoopInductionVar = nullptr;
    MachineInstr *LoopCompare = nullptr;
    std::unique_ptr<TargetInstrInfo::PipelinerLoopInfo> LoopPipelinerInfo =
        nullptr;
  };
  LoopInfo LI;

  static char ID;

  MachinePipeliner() : MachineFunctionPass(ID) {
    initializeMachinePipelinerPass(*PassRegistry::getPassRegistry());
  }

  bool runOnMachineFunction(MachineFunction &MF) override;

  void getAnalysisUsage(AnalysisUsage &AU) const override;

private:
  void preprocessPhiNodes(MachineBasicBlock &B);
  bool canPipelineLoop(MachineLoop &L);
  bool scheduleLoop(MachineLoop &L);
  bool swingModuloScheduler(MachineLoop &L);
  void setPragmaPipelineOptions(MachineLoop &L);
};

/// This class builds the dependence graph for the instructions in a loop,
/// and attempts to schedule the instructions using the SMS algorithm.
class SwingSchedulerDAG : public ScheduleDAGInstrs {
  MachinePipeliner &Pass;
  /// The minimum initiation interval between iterations for this schedule.
  unsigned MII = 0;
  /// The maximum initiation interval between iterations for this schedule.
  unsigned MAX_II = 0;
  /// Set to true if a valid pipelined schedule is found for the loop.
  bool Scheduled = false;
  MachineLoop &Loop;
  LiveIntervals &LIS;
  const RegisterClassInfo &RegClassInfo;
  unsigned II_setByPragma = 0;
  TargetInstrInfo::PipelinerLoopInfo *LoopPipelinerInfo = nullptr;

  /// A toplogical ordering of the SUnits, which is needed for changing
  /// dependences and iterating over the SUnits.
  ScheduleDAGTopologicalSort Topo;

  struct NodeInfo {
    int ASAP = 0;
    int ALAP = 0;
    int ZeroLatencyDepth = 0;
    int ZeroLatencyHeight = 0;

    NodeInfo() = default;
  };
  /// Computed properties for each node in the graph.
  std::vector<NodeInfo> ScheduleInfo;

  enum OrderKind { BottomUp = 0, TopDown = 1 };
  /// Computed node ordering for scheduling.
  SetVector<SUnit *> NodeOrder;

  using NodeSetType = SmallVector<NodeSet, 8>;
  using ValueMapTy = DenseMap<unsigned, unsigned>;
  using MBBVectorTy = SmallVectorImpl<MachineBasicBlock *>;
  using InstrMapTy = DenseMap<MachineInstr *, MachineInstr *>;

  /// Instructions to change when emitting the final schedule.
  DenseMap<SUnit *, std::pair<unsigned, int64_t>> InstrChanges;

  /// We may create a new instruction, so remember it because it
  /// must be deleted when the pass is finished.
  DenseMap<MachineInstr*, MachineInstr *> NewMIs;

  /// Ordered list of DAG postprocessing steps.
  std::vector<std::unique_ptr<ScheduleDAGMutation>> Mutations;

  /// Helper class to implement Johnson's circuit finding algorithm.
  class Circuits {
    std::vector<SUnit> &SUnits;
    SetVector<SUnit *> Stack;
    BitVector Blocked;
    SmallVector<SmallPtrSet<SUnit *, 4>, 10> B;
    SmallVector<SmallVector<int, 4>, 16> AdjK;
    // Node to Index from ScheduleDAGTopologicalSort
    std::vector<int> *Node2Idx;
    unsigned NumPaths = 0u;
    static unsigned MaxPaths;

  public:
    Circuits(std::vector<SUnit> &SUs, ScheduleDAGTopologicalSort &Topo)
        : SUnits(SUs), Blocked(SUs.size()), B(SUs.size()), AdjK(SUs.size()) {
      Node2Idx = new std::vector<int>(SUs.size());
      unsigned Idx = 0;
      for (const auto &NodeNum : Topo)
        Node2Idx->at(NodeNum) = Idx++;
    }
    Circuits &operator=(const Circuits &other) = delete;
    Circuits(const Circuits &other) = delete;
    ~Circuits() { delete Node2Idx; }

    /// Reset the data structures used in the circuit algorithm.
    void reset() {
      Stack.clear();
      Blocked.reset();
      B.assign(SUnits.size(), SmallPtrSet<SUnit *, 4>());
      NumPaths = 0;
    }

    void createAdjacencyStructure(SwingSchedulerDAG *DAG);
    bool circuit(int V, int S, NodeSetType &NodeSets, bool HasBackedge = false);
    void unblock(int U);
  };

  struct CopyToPhiMutation : public ScheduleDAGMutation {
    void apply(ScheduleDAGInstrs *DAG) override;
  };

public:
  SwingSchedulerDAG(MachinePipeliner &P, MachineLoop &L, LiveIntervals &lis,
                    const RegisterClassInfo &rci, unsigned II,
                    TargetInstrInfo::PipelinerLoopInfo *PLI)
      : ScheduleDAGInstrs(*P.MF, P.MLI, false), Pass(P), Loop(L), LIS(lis),
        RegClassInfo(rci), II_setByPragma(II), LoopPipelinerInfo(PLI),
        Topo(SUnits, &ExitSU) {
    P.MF->getSubtarget().getSMSMutations(Mutations);
    if (SwpEnableCopyToPhi)
      Mutations.push_back(std::make_unique<CopyToPhiMutation>());
  }

  void schedule() override;
  void finishBlock() override;

  /// Return true if the loop kernel has been scheduled.
  bool hasNewSchedule() { return Scheduled; }

  /// Return the earliest time an instruction may be scheduled.
  int getASAP(SUnit *Node) { return ScheduleInfo[Node->NodeNum].ASAP; }

  /// Return the latest time an instruction my be scheduled.
  int getALAP(SUnit *Node) { return ScheduleInfo[Node->NodeNum].ALAP; }

  /// The mobility function, which the number of slots in which
  /// an instruction may be scheduled.
  int getMOV(SUnit *Node) { return getALAP(Node) - getASAP(Node); }

  /// The depth, in the dependence graph, for a node.
  unsigned getDepth(SUnit *Node) { return Node->getDepth(); }

  /// The maximum unweighted length of a path from an arbitrary node to the
  /// given node in which each edge has latency 0
  int getZeroLatencyDepth(SUnit *Node) {
    return ScheduleInfo[Node->NodeNum].ZeroLatencyDepth;
  }

  /// The height, in the dependence graph, for a node.
  unsigned getHeight(SUnit *Node) { return Node->getHeight(); }

  /// The maximum unweighted length of a path from the given node to an
  /// arbitrary node in which each edge has latency 0
  int getZeroLatencyHeight(SUnit *Node) {
    return ScheduleInfo[Node->NodeNum].ZeroLatencyHeight;
  }

  /// Return true if the dependence is a back-edge in the data dependence graph.
  /// Since the DAG doesn't contain cycles, we represent a cycle in the graph
  /// using an anti dependence from a Phi to an instruction.
  bool isBackedge(SUnit *Source, const SDep &Dep) {
    if (Dep.getKind() != SDep::Anti)
      return false;
    return Source->getInstr()->isPHI() || Dep.getSUnit()->getInstr()->isPHI();
  }

  bool isLoopCarriedDep(SUnit *Source, const SDep &Dep, bool isSucc = true);

  /// The distance function, which indicates that operation V of iteration I
  /// depends on operations U of iteration I-distance.
  unsigned getDistance(SUnit *U, SUnit *V, const SDep &Dep) {
    // Instructions that feed a Phi have a distance of 1. Computing larger
    // values for arrays requires data dependence information.
    if (V->getInstr()->isPHI() && Dep.getKind() == SDep::Anti)
      return 1;
    return 0;
  }

  void applyInstrChange(MachineInstr *MI, SMSchedule &Schedule);

  void fixupRegisterOverlaps(std::deque<SUnit *> &Instrs);

  /// Return the new base register that was stored away for the changed
  /// instruction.
  unsigned getInstrBaseReg(SUnit *SU) {
    DenseMap<SUnit *, std::pair<unsigned, int64_t>>::iterator It =
        InstrChanges.find(SU);
    if (It != InstrChanges.end())
      return It->second.first;
    return 0;
  }

  void addMutation(std::unique_ptr<ScheduleDAGMutation> Mutation) {
    Mutations.push_back(std::move(Mutation));
  }

  static bool classof(const ScheduleDAGInstrs *DAG) { return true; }

private:
  void addLoopCarriedDependences(AAResults *AA);
  void updatePhiDependences();
  void changeDependences();
  unsigned calculateResMII();
  unsigned calculateRecMII(NodeSetType &RecNodeSets);
  void findCircuits(NodeSetType &NodeSets);
  void fuseRecs(NodeSetType &NodeSets);
  void removeDuplicateNodes(NodeSetType &NodeSets);
  void computeNodeFunctions(NodeSetType &NodeSets);
  void registerPressureFilter(NodeSetType &NodeSets);
  void colocateNodeSets(NodeSetType &NodeSets);
  void checkNodeSets(NodeSetType &NodeSets);
  void groupRemainingNodes(NodeSetType &NodeSets);
  void addConnectedNodes(SUnit *SU, NodeSet &NewSet,
                         SetVector<SUnit *> &NodesAdded);
  void computeNodeOrder(NodeSetType &NodeSets);
  void checkValidNodeOrder(const NodeSetType &Circuits) const;
  bool schedulePipeline(SMSchedule &Schedule);
  bool computeDelta(MachineInstr &MI, unsigned &Delta);
  MachineInstr *findDefInLoop(Register Reg);
  bool canUseLastOffsetValue(MachineInstr *MI, unsigned &BasePos,
                             unsigned &OffsetPos, unsigned &NewBase,
                             int64_t &NewOffset);
  void postProcessDAG();
  /// Set the Minimum Initiation Interval for this schedule attempt.
  void setMII(unsigned ResMII, unsigned RecMII);
  /// Set the Maximum Initiation Interval for this schedule attempt.
  void setMAX_II();
};

/// A NodeSet contains a set of SUnit DAG nodes with additional information
/// that assigns a priority to the set.
class NodeSet {
  SetVector<SUnit *> Nodes;
  bool HasRecurrence = false;
  unsigned RecMII = 0;
  int MaxMOV = 0;
  unsigned MaxDepth = 0;
  unsigned Colocate = 0;
  SUnit *ExceedPressure = nullptr;
  unsigned Latency = 0;

public:
  using iterator = SetVector<SUnit *>::const_iterator;

  NodeSet() = default;
  NodeSet(iterator S, iterator E) : Nodes(S, E), HasRecurrence(true) {
    Latency = 0;
    for (const SUnit *Node : Nodes) {
      DenseMap<SUnit *, unsigned> SuccSUnitLatency;
      for (const SDep &Succ : Node->Succs) {
        auto SuccSUnit = Succ.getSUnit();
        if (!Nodes.count(SuccSUnit))
          continue;
        unsigned CurLatency = Succ.getLatency();
        unsigned MaxLatency = 0;
        if (SuccSUnitLatency.count(SuccSUnit))
          MaxLatency = SuccSUnitLatency[SuccSUnit];
        if (CurLatency > MaxLatency)
          SuccSUnitLatency[SuccSUnit] = CurLatency;
      }
      for (auto SUnitLatency : SuccSUnitLatency)
        Latency += SUnitLatency.second;
    }
  }

  bool insert(SUnit *SU) { return Nodes.insert(SU); }

  void insert(iterator S, iterator E) { Nodes.insert(S, E); }

  template <typename UnaryPredicate> bool remove_if(UnaryPredicate P) {
    return Nodes.remove_if(P);
  }

  unsigned count(SUnit *SU) const { return Nodes.count(SU); }

  bool hasRecurrence() { return HasRecurrence; };

  unsigned size() const { return Nodes.size(); }

  bool empty() const { return Nodes.empty(); }

  SUnit *getNode(unsigned i) const { return Nodes[i]; };

  void setRecMII(unsigned mii) { RecMII = mii; };

  void setColocate(unsigned c) { Colocate = c; };

  void setExceedPressure(SUnit *SU) { ExceedPressure = SU; }

  bool isExceedSU(SUnit *SU) { return ExceedPressure == SU; }

  int compareRecMII(NodeSet &RHS) { return RecMII - RHS.RecMII; }

  int getRecMII() { return RecMII; }

  /// Summarize node functions for the entire node set.
  void computeNodeSetInfo(SwingSchedulerDAG *SSD) {
    for (SUnit *SU : *this) {
      MaxMOV = std::max(MaxMOV, SSD->getMOV(SU));
      MaxDepth = std::max(MaxDepth, SSD->getDepth(SU));
    }
  }

  unsigned getLatency() { return Latency; }

  unsigned getMaxDepth() { return MaxDepth; }

  void clear() {
    Nodes.clear();
    RecMII = 0;
    HasRecurrence = false;
    MaxMOV = 0;
    MaxDepth = 0;
    Colocate = 0;
    ExceedPressure = nullptr;
  }

  operator SetVector<SUnit *> &() { return Nodes; }

  /// Sort the node sets by importance. First, rank them by recurrence MII,
  /// then by mobility (least mobile done first), and finally by depth.
  /// Each node set may contain a colocate value which is used as the first
  /// tie breaker, if it's set.
  bool operator>(const NodeSet &RHS) const {
    if (RecMII == RHS.RecMII) {
      if (Colocate != 0 && RHS.Colocate != 0 && Colocate != RHS.Colocate)
        return Colocate < RHS.Colocate;
      if (MaxMOV == RHS.MaxMOV)
        return MaxDepth > RHS.MaxDepth;
      return MaxMOV < RHS.MaxMOV;
    }
    return RecMII > RHS.RecMII;
  }

  bool operator==(const NodeSet &RHS) const {
    return RecMII == RHS.RecMII && MaxMOV == RHS.MaxMOV &&
           MaxDepth == RHS.MaxDepth;
  }

  bool operator!=(const NodeSet &RHS) const { return !operator==(RHS); }

  iterator begin() { return Nodes.begin(); }
  iterator end() { return Nodes.end(); }
  void print(raw_ostream &os) const;

#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
  LLVM_DUMP_METHOD void dump() const;
#endif
};

// 16 was selected based on the number of ProcResource kinds for all
// existing Subtargets, so that SmallVector don't need to resize too often.
static const int DefaultProcResSize = 16;

class ResourceManager {
private:
  const MCSubtargetInfo *STI;
  const MCSchedModel &SM;
  const TargetSubtargetInfo *ST;
  const TargetInstrInfo *TII;
  SwingSchedulerDAG *DAG;
  const bool UseDFA;
  /// DFA resources for each slot
  llvm::SmallVector<std::unique_ptr<DFAPacketizer>> DFAResources;
  /// Modulo Reservation Table. When a resource with ID R is consumed in cycle
  /// C, it is counted in MRT[C mod II][R]. (Used when UseDFA == F)
  llvm::SmallVector<llvm::SmallVector<uint64_t, DefaultProcResSize>> MRT;
  /// The number of scheduled micro operations for each slot. Micro operations
  /// are assumed to be scheduled one per cycle, starting with the cycle in
  /// which the instruction is scheduled.
  llvm::SmallVector<int> NumScheduledMops;
  /// Each processor resource is associated with a so-called processor resource
  /// mask. This vector allows to correlate processor resource IDs with
  /// processor resource masks. There is exactly one element per each processor
  /// resource declared by the scheduling model.
  llvm::SmallVector<uint64_t, DefaultProcResSize> ProcResourceMasks;
  int InitiationInterval = 0;
  /// The number of micro operations that can be scheduled at a cycle.
  int IssueWidth;

  int calculateResMIIDFA() const;
  /// Check if MRT is overbooked
  bool isOverbooked() const;
  /// Reserve resources on MRT
  void reserveResources(const MCSchedClassDesc *SCDesc, int Cycle);
  /// Unreserve resources on MRT
  void unreserveResources(const MCSchedClassDesc *SCDesc, int Cycle);

  /// Return M satisfying Dividend = Divisor * X + M, 0 < M < Divisor.
  /// The slot on MRT to reserve a resource for the cycle C is positiveModulo(C,
  /// II).
  int positiveModulo(int Dividend, int Divisor) const {
    assert(Divisor > 0);
    int R = Dividend % Divisor;
    if (R < 0)
      R += Divisor;
    return R;
  }

#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
  LLVM_DUMP_METHOD void dumpMRT() const;
#endif

public:
  ResourceManager(const TargetSubtargetInfo *ST, SwingSchedulerDAG *DAG)
      : STI(ST), SM(ST->getSchedModel()), ST(ST), TII(ST->getInstrInfo()),
        DAG(DAG), UseDFA(ST->useDFAforSMS()),
        ProcResourceMasks(SM.getNumProcResourceKinds(), 0),
        IssueWidth(SM.IssueWidth) {
    initProcResourceVectors(SM, ProcResourceMasks);
    if (IssueWidth <= 0)
      // If IssueWidth is not specified, set a sufficiently large value
      IssueWidth = 100;
    if (SwpForceIssueWidth > 0)
      IssueWidth = SwpForceIssueWidth;
  }

  void initProcResourceVectors(const MCSchedModel &SM,
                               SmallVectorImpl<uint64_t> &Masks);

  /// Check if the resources occupied by a machine instruction are available
  /// in the current state.
  bool canReserveResources(SUnit &SU, int Cycle);

  /// Reserve the resources occupied by a machine instruction and change the
  /// current state to reflect that change.
  void reserveResources(SUnit &SU, int Cycle);

  int calculateResMII() const;

  /// Initialize resources with the initiation interval II.
  void init(int II);
};

/// This class represents the scheduled code.  The main data structure is a
/// map from scheduled cycle to instructions.  During scheduling, the
/// data structure explicitly represents all stages/iterations.   When
/// the algorithm finshes, the schedule is collapsed into a single stage,
/// which represents instructions from different loop iterations.
///
/// The SMS algorithm allows negative values for cycles, so the first cycle
/// in the schedule is the smallest cycle value.
class SMSchedule {
private:
  /// Map from execution cycle to instructions.
  DenseMap<int, std::deque<SUnit *>> ScheduledInstrs;

  /// Map from instruction to execution cycle.
  std::map<SUnit *, int> InstrToCycle;

  /// Keep track of the first cycle value in the schedule.  It starts
  /// as zero, but the algorithm allows negative values.
  int FirstCycle = 0;

  /// Keep track of the last cycle value in the schedule.
  int LastCycle = 0;

  /// The initiation interval (II) for the schedule.
  int InitiationInterval = 0;

  /// Target machine information.
  const TargetSubtargetInfo &ST;

  /// Virtual register information.
  MachineRegisterInfo &MRI;

  ResourceManager ProcItinResources;

public:
  SMSchedule(MachineFunction *mf, SwingSchedulerDAG *DAG)
      : ST(mf->getSubtarget()), MRI(mf->getRegInfo()),
        ProcItinResources(&ST, DAG) {}

  void reset() {
    ScheduledInstrs.clear();
    InstrToCycle.clear();
    FirstCycle = 0;
    LastCycle = 0;
    InitiationInterval = 0;
  }

  /// Set the initiation interval for this schedule.
  void setInitiationInterval(int ii) {
    InitiationInterval = ii;
    ProcItinResources.init(ii);
  }

  /// Return the initiation interval for this schedule.
  int getInitiationInterval() const { return InitiationInterval; }

  /// Return the first cycle in the completed schedule.  This
  /// can be a negative value.
  int getFirstCycle() const { return FirstCycle; }

  /// Return the last cycle in the finalized schedule.
  int getFinalCycle() const { return FirstCycle + InitiationInterval - 1; }

  /// Return the cycle of the earliest scheduled instruction in the dependence
  /// chain.
  int earliestCycleInChain(const SDep &Dep);

  /// Return the cycle of the latest scheduled instruction in the dependence
  /// chain.
  int latestCycleInChain(const SDep &Dep);

  void computeStart(SUnit *SU, int *MaxEarlyStart, int *MinLateStart,
                    int *MinEnd, int *MaxStart, int II, SwingSchedulerDAG *DAG);
  bool insert(SUnit *SU, int StartCycle, int EndCycle, int II);

  /// Iterators for the cycle to instruction map.
  using sched_iterator = DenseMap<int, std::deque<SUnit *>>::iterator;
  using const_sched_iterator =
      DenseMap<int, std::deque<SUnit *>>::const_iterator;

  /// Return true if the instruction is scheduled at the specified stage.
  bool isScheduledAtStage(SUnit *SU, unsigned StageNum) {
    return (stageScheduled(SU) == (int)StageNum);
  }

  /// Return the stage for a scheduled instruction.  Return -1 if
  /// the instruction has not been scheduled.
  int stageScheduled(SUnit *SU) const {
    std::map<SUnit *, int>::const_iterator it = InstrToCycle.find(SU);
    if (it == InstrToCycle.end())
      return -1;
    return (it->second - FirstCycle) / InitiationInterval;
  }

  /// Return the cycle for a scheduled instruction. This function normalizes
  /// the first cycle to be 0.
  unsigned cycleScheduled(SUnit *SU) const {
    std::map<SUnit *, int>::const_iterator it = InstrToCycle.find(SU);
    assert(it != InstrToCycle.end() && "Instruction hasn't been scheduled.");
    return (it->second - FirstCycle) % InitiationInterval;
  }

  /// Return the maximum stage count needed for this schedule.
  unsigned getMaxStageCount() {
    return (LastCycle - FirstCycle) / InitiationInterval;
  }

  /// Return the instructions that are scheduled at the specified cycle.
  std::deque<SUnit *> &getInstructions(int cycle) {
    return ScheduledInstrs[cycle];
  }

  SmallSet<SUnit *, 8>
  computeUnpipelineableNodes(SwingSchedulerDAG *SSD,
                             TargetInstrInfo::PipelinerLoopInfo *PLI);

  bool
  normalizeNonPipelinedInstructions(SwingSchedulerDAG *SSD,
                                    TargetInstrInfo::PipelinerLoopInfo *PLI);
  bool isValidSchedule(SwingSchedulerDAG *SSD);
  void finalizeSchedule(SwingSchedulerDAG *SSD);
  void orderDependence(SwingSchedulerDAG *SSD, SUnit *SU,
                       std::deque<SUnit *> &Insts);
  bool isLoopCarried(SwingSchedulerDAG *SSD, MachineInstr &Phi);
  bool isLoopCarriedDefOfUse(SwingSchedulerDAG *SSD, MachineInstr *Def,
                             MachineOperand &MO);
  void print(raw_ostream &os) const;
  void dump() const;
};

} // end namespace llvm

#endif // LLVM_CODEGEN_MACHINEPIPELINER_H
PKhwFZ7�o���CodeGen/ExpandReductions.hnu�[���//===- ExpandReductions.h - Expand reduction intrinsics ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_EXPANDREDUCTIONS_H
#define LLVM_CODEGEN_EXPANDREDUCTIONS_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class ExpandReductionsPass
    : public PassInfoMixin<ExpandReductionsPass> {
public:
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
} // end namespace llvm

#endif // LLVM_CODEGEN_EXPANDREDUCTIONS_H
PKhwFZ㕣��
�
CodeGen/LiveStacks.hnu�[���//===- LiveStacks.h - Live Stack Slot Analysis ------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements the live stack slot analysis pass. It is analogous to
// live interval analysis except it's analyzing liveness of stack slots rather
// than registers.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_LIVESTACKS_H
#define LLVM_CODEGEN_LIVESTACKS_H

#include "llvm/CodeGen/LiveInterval.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/InitializePasses.h"
#include "llvm/PassRegistry.h"
#include <cassert>
#include <map>
#include <unordered_map>

namespace llvm {

class AnalysisUsage;
class MachineFunction;
class Module;
class raw_ostream;
class TargetRegisterClass;
class TargetRegisterInfo;

class LiveStacks : public MachineFunctionPass {
  const TargetRegisterInfo *TRI = nullptr;

  /// Special pool allocator for VNInfo's (LiveInterval val#).
  ///
  VNInfo::Allocator VNInfoAllocator;

  /// S2IMap - Stack slot indices to live interval mapping.
  using SS2IntervalMap = std::unordered_map<int, LiveInterval>;
  SS2IntervalMap S2IMap;

  /// S2RCMap - Stack slot indices to register class mapping.
  std::map<int, const TargetRegisterClass *> S2RCMap;

public:
  static char ID; // Pass identification, replacement for typeid

  LiveStacks() : MachineFunctionPass(ID) {
    initializeLiveStacksPass(*PassRegistry::getPassRegistry());
  }

  using iterator = SS2IntervalMap::iterator;
  using const_iterator = SS2IntervalMap::const_iterator;

  const_iterator begin() const { return S2IMap.begin(); }
  const_iterator end() const { return S2IMap.end(); }
  iterator begin() { return S2IMap.begin(); }
  iterator end() { return S2IMap.end(); }

  unsigned getNumIntervals() const { return (unsigned)S2IMap.size(); }

  LiveInterval &getOrCreateInterval(int Slot, const TargetRegisterClass *RC);

  LiveInterval &getInterval(int Slot) {
    assert(Slot >= 0 && "Spill slot indice must be >= 0");
    SS2IntervalMap::iterator I = S2IMap.find(Slot);
    assert(I != S2IMap.end() && "Interval does not exist for stack slot");
    return I->second;
  }

  const LiveInterval &getInterval(int Slot) const {
    assert(Slot >= 0 && "Spill slot indice must be >= 0");
    SS2IntervalMap::const_iterator I = S2IMap.find(Slot);
    assert(I != S2IMap.end() && "Interval does not exist for stack slot");
    return I->second;
  }

  bool hasInterval(int Slot) const { return S2IMap.count(Slot); }

  const TargetRegisterClass *getIntervalRegClass(int Slot) const {
    assert(Slot >= 0 && "Spill slot indice must be >= 0");
    std::map<int, const TargetRegisterClass *>::const_iterator I =
        S2RCMap.find(Slot);
    assert(I != S2RCMap.end() &&
           "Register class info does not exist for stack slot");
    return I->second;
  }

  VNInfo::Allocator &getVNInfoAllocator() { return VNInfoAllocator; }

  void getAnalysisUsage(AnalysisUsage &AU) const override;
  void releaseMemory() override;

  /// runOnMachineFunction - pass entry point
  bool runOnMachineFunction(MachineFunction &) override;

  /// print - Implement the dump method.
  void print(raw_ostream &O, const Module * = nullptr) const override;
};

} // end namespace llvm

#endif
PKhwFZ��x���CodeGen/RegAllocCommon.hnu�[���//===- RegAllocCommon.h - Utilities shared between allocators ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_REGALLOCCOMMON_H
#define LLVM_CODEGEN_REGALLOCCOMMON_H

#include <functional>

namespace llvm {

class TargetRegisterClass;
class TargetRegisterInfo;

typedef std::function<bool(const TargetRegisterInfo &TRI,
                           const TargetRegisterClass &RC)> RegClassFilterFunc;

/// Default register class filter function for register allocation. All virtual
/// registers should be allocated.
static inline bool allocateAllRegClasses(const TargetRegisterInfo &,
                                         const TargetRegisterClass &) {
  return true;
}

}

#endif // LLVM_CODEGEN_REGALLOCCOMMON_H
PKhwFZB�i��CodeGen/AntiDepBreaker.hnu�[���//===- llvm/CodeGen/AntiDepBreaker.h - Anti-Dependence Breaking -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements the AntiDepBreaker class, which implements
// anti-dependence breaking heuristics for post-register-allocation scheduling.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_ANTIDEPBREAKER_H
#define LLVM_CODEGEN_ANTIDEPBREAKER_H

#include "llvm/ADT/iterator_range.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/TargetSubtargetInfo.h"
#include "llvm/Support/Compiler.h"
#include <cassert>
#include <utility>
#include <vector>

namespace llvm {

class RegisterClassInfo;

/// This class works in conjunction with the post-RA scheduler to rename
/// registers to break register anti-dependencies (WAR hazards).
class AntiDepBreaker {
public:
  using DbgValueVector =
      std::vector<std::pair<MachineInstr *, MachineInstr *>>;

  virtual ~AntiDepBreaker();

  /// Initialize anti-dep breaking for a new basic block.
  virtual void StartBlock(MachineBasicBlock *BB) = 0;

  /// Identifiy anti-dependencies within a basic-block region and break them by
  /// renaming registers. Return the number of anti-dependencies broken.
  virtual unsigned BreakAntiDependencies(const std::vector<SUnit> &SUnits,
                                         MachineBasicBlock::iterator Begin,
                                         MachineBasicBlock::iterator End,
                                         unsigned InsertPosIndex,
                                         DbgValueVector &DbgValues) = 0;

  /// Update liveness information to account for the current
  /// instruction, which will not be scheduled.
  virtual void Observe(MachineInstr &MI, unsigned Count,
                       unsigned InsertPosIndex) = 0;

  /// Finish anti-dep breaking for a basic block.
  virtual void FinishBlock() = 0;

  /// Update DBG_VALUE or DBG_PHI if dependency breaker is updating
  /// other machine instruction to use NewReg.
  void UpdateDbgValue(MachineInstr &MI, unsigned OldReg, unsigned NewReg) {
    if (MI.isDebugValue()) {
      if (MI.getDebugOperand(0).isReg() &&
          MI.getDebugOperand(0).getReg() == OldReg)
        MI.getDebugOperand(0).setReg(NewReg);
    } else if (MI.isDebugPHI()) {
      if (MI.getOperand(0).isReg() &&
          MI.getOperand(0).getReg() == OldReg)
        MI.getOperand(0).setReg(NewReg);
    } else {
      llvm_unreachable("MI is not DBG_VALUE / DBG_PHI!");
    }
  }

  /// Update all DBG_VALUE instructions that may be affected by the dependency
  /// breaker's update of ParentMI to use NewReg.
  void UpdateDbgValues(const DbgValueVector &DbgValues, MachineInstr *ParentMI,
                       unsigned OldReg, unsigned NewReg) {
    // The following code is dependent on the order in which the DbgValues are
    // constructed in ScheduleDAGInstrs::buildSchedGraph.
    MachineInstr *PrevDbgMI = nullptr;
    for (const auto &DV : make_range(DbgValues.crbegin(), DbgValues.crend())) {
      MachineInstr *PrevMI = DV.second;
      if ((PrevMI == ParentMI) || (PrevMI == PrevDbgMI)) {
        MachineInstr *DbgMI = DV.first;
        UpdateDbgValue(*DbgMI, OldReg, NewReg);
        PrevDbgMI = DbgMI;
      } else if (PrevDbgMI) {
        break; // If no match and already found a DBG_VALUE, we're done.
      }
    }
  }
};

AntiDepBreaker *createAggressiveAntiDepBreaker(
    MachineFunction &MFi, const RegisterClassInfo &RCI,
    TargetSubtargetInfo::RegClassVector &CriticalPathRCs);

AntiDepBreaker *createCriticalAntiDepBreaker(MachineFunction &MFi,
                                             const RegisterClassInfo &RCI);

} // end namespace llvm

#endif // LLVM_CODEGEN_ANTIDEPBREAKER_H
PKhwFZTߎ��CodeGen/MachineFunctionPass.hnu�[���//===-- MachineFunctionPass.h - Pass for MachineFunctions --------*-C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the MachineFunctionPass class.  MachineFunctionPass's are
// just FunctionPass's, except they operate on machine code as part of a code
// generator.  Because they operate on machine code, not the LLVM
// representation, MachineFunctionPass's are not allowed to modify the LLVM
// representation.  Due to this limitation, the MachineFunctionPass class takes
// care of declaring that no LLVM passes are invalidated.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_MACHINEFUNCTIONPASS_H
#define LLVM_CODEGEN_MACHINEFUNCTIONPASS_H

#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/Pass.h"

namespace llvm {

/// MachineFunctionPass - This class adapts the FunctionPass interface to
/// allow convenient creation of passes that operate on the MachineFunction
/// representation. Instead of overriding runOnFunction, subclasses
/// override runOnMachineFunction.
class MachineFunctionPass : public FunctionPass {
public:
  bool doInitialization(Module&) override {
    // Cache the properties info at module-init time so we don't have to
    // construct them for every function.
    RequiredProperties = getRequiredProperties();
    SetProperties = getSetProperties();
    ClearedProperties = getClearedProperties();
    return false;
  }
protected:
  explicit MachineFunctionPass(char &ID) : FunctionPass(ID) {}

  /// runOnMachineFunction - This method must be overloaded to perform the
  /// desired machine code transformation or analysis.
  ///
  virtual bool runOnMachineFunction(MachineFunction &MF) = 0;

  /// getAnalysisUsage - Subclasses that override getAnalysisUsage
  /// must call this.
  ///
  /// For MachineFunctionPasses, calling AU.preservesCFG() indicates that
  /// the pass does not modify the MachineBasicBlock CFG.
  ///
  void getAnalysisUsage(AnalysisUsage &AU) const override;

  virtual MachineFunctionProperties getRequiredProperties() const {
    return MachineFunctionProperties();
  }
  virtual MachineFunctionProperties getSetProperties() const {
    return MachineFunctionProperties();
  }
  virtual MachineFunctionProperties getClearedProperties() const {
    return MachineFunctionProperties();
  }

private:
  MachineFunctionProperties RequiredProperties;
  MachineFunctionProperties SetProperties;
  MachineFunctionProperties ClearedProperties;

  /// createPrinterPass - Get a machine function printer pass.
  Pass *createPrinterPass(raw_ostream &O,
                          const std::string &Banner) const override;

  bool runOnFunction(Function &F) override;
};

} // End llvm namespace

#endif
PKhwFZjn�~ֲֲCodeGen/CodeGenPassBuilder.hnu�[���//===- Construction of codegen pass pipelines ------------------*- C++ -*--===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// Interfaces for registering analysis passes, producing common pass manager
/// configurations, and parsing of pass pipelines.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_CODEGENPASSBUILDER_H
#define LLVM_CODEGEN_CODEGENPASSBUILDER_H

#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/BasicAliasAnalysis.h"
#include "llvm/Analysis/ScopedNoAliasAA.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Analysis/TypeBasedAliasAnalysis.h"
#include "llvm/CodeGen/ExpandReductions.h"
#include "llvm/CodeGen/MachinePassManager.h"
#include "llvm/CodeGen/PreISelIntrinsicLowering.h"
#include "llvm/CodeGen/ReplaceWithVeclib.h"
#include "llvm/CodeGen/UnreachableBlockElim.h"
#include "llvm/IR/PassManager.h"
#include "llvm/IR/Verifier.h"
#include "llvm/IRPrinter/IRPrintingPasses.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCTargetOptions.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Target/CGPassBuilderOption.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Transforms/Scalar/ConstantHoisting.h"
#include "llvm/Transforms/Scalar/LoopPassManager.h"
#include "llvm/Transforms/Scalar/LoopStrengthReduce.h"
#include "llvm/Transforms/Scalar/LowerConstantIntrinsics.h"
#include "llvm/Transforms/Scalar/MergeICmps.h"
#include "llvm/Transforms/Scalar/PartiallyInlineLibCalls.h"
#include "llvm/Transforms/Scalar/ScalarizeMaskedMemIntrin.h"
#include "llvm/Transforms/Utils/EntryExitInstrumenter.h"
#include "llvm/Transforms/Utils/LowerInvoke.h"
#include <cassert>
#include <type_traits>
#include <utility>

namespace llvm {

// FIXME: Dummy target independent passes definitions that have not yet been
// ported to new pass manager. Once they do, remove these.
#define DUMMY_FUNCTION_PASS(NAME, PASS_NAME, CONSTRUCTOR)                      \
  struct PASS_NAME : public PassInfoMixin<PASS_NAME> {                         \
    template <typename... Ts> PASS_NAME(Ts &&...) {}                           \
    PreservedAnalyses run(Function &, FunctionAnalysisManager &) {             \
      return PreservedAnalyses::all();                                         \
    }                                                                          \
  };
#define DUMMY_MODULE_PASS(NAME, PASS_NAME, CONSTRUCTOR)                        \
  struct PASS_NAME : public PassInfoMixin<PASS_NAME> {                         \
    template <typename... Ts> PASS_NAME(Ts &&...) {}                           \
    PreservedAnalyses run(Module &, ModuleAnalysisManager &) {                 \
      return PreservedAnalyses::all();                                         \
    }                                                                          \
  };
#define DUMMY_MACHINE_MODULE_PASS(NAME, PASS_NAME, CONSTRUCTOR)                \
  struct PASS_NAME : public PassInfoMixin<PASS_NAME> {                         \
    template <typename... Ts> PASS_NAME(Ts &&...) {}                           \
    Error run(Module &, MachineFunctionAnalysisManager &) {                    \
      return Error::success();                                                 \
    }                                                                          \
    PreservedAnalyses run(MachineFunction &,                                   \
                          MachineFunctionAnalysisManager &) {                  \
      llvm_unreachable("this api is to make new PM api happy");                \
    }                                                                          \
    static AnalysisKey Key;                                                    \
  };
#define DUMMY_MACHINE_FUNCTION_PASS(NAME, PASS_NAME, CONSTRUCTOR)              \
  struct PASS_NAME : public PassInfoMixin<PASS_NAME> {                         \
    template <typename... Ts> PASS_NAME(Ts &&...) {}                           \
    PreservedAnalyses run(MachineFunction &,                                   \
                          MachineFunctionAnalysisManager &) {                  \
      return PreservedAnalyses::all();                                         \
    }                                                                          \
    static AnalysisKey Key;                                                    \
  };
#include "MachinePassRegistry.def"

/// This class provides access to building LLVM's passes.
///
/// Its members provide the baseline state available to passes during their
/// construction. The \c MachinePassRegistry.def file specifies how to construct
/// all of the built-in passes, and those may reference these members during
/// construction.
template <typename DerivedT> class CodeGenPassBuilder {
public:
  explicit CodeGenPassBuilder(LLVMTargetMachine &TM, CGPassBuilderOption Opts,
                              PassInstrumentationCallbacks *PIC)
      : TM(TM), Opt(Opts), PIC(PIC) {
    // Target could set CGPassBuilderOption::MISchedPostRA to true to achieve
    //     substitutePass(&PostRASchedulerID, &PostMachineSchedulerID)

    // Target should override TM.Options.EnableIPRA in their target-specific
    // LLVMTM ctor. See TargetMachine::setGlobalISel for example.
    if (Opt.EnableIPRA)
      TM.Options.EnableIPRA = *Opt.EnableIPRA;

    if (Opt.EnableGlobalISelAbort)
      TM.Options.GlobalISelAbort = *Opt.EnableGlobalISelAbort;

    if (!Opt.OptimizeRegAlloc)
      Opt.OptimizeRegAlloc = getOptLevel() != CodeGenOpt::None;
  }

  Error buildPipeline(ModulePassManager &MPM, MachineFunctionPassManager &MFPM,
                      raw_pwrite_stream &Out, raw_pwrite_stream *DwoOut,
                      CodeGenFileType FileType) const;

  void registerModuleAnalyses(ModuleAnalysisManager &) const;
  void registerFunctionAnalyses(FunctionAnalysisManager &) const;
  void registerMachineFunctionAnalyses(MachineFunctionAnalysisManager &) const;
  std::pair<StringRef, bool> getPassNameFromLegacyName(StringRef) const;

  void registerAnalyses(MachineFunctionAnalysisManager &MFAM) const {
    registerModuleAnalyses(*MFAM.MAM);
    registerFunctionAnalyses(*MFAM.FAM);
    registerMachineFunctionAnalyses(MFAM);
  }

  PassInstrumentationCallbacks *getPassInstrumentationCallbacks() const {
    return PIC;
  }

protected:
  template <typename PassT> using has_key_t = decltype(PassT::Key);

  template <typename PassT>
  using is_module_pass_t = decltype(std::declval<PassT &>().run(
      std::declval<Module &>(), std::declval<ModuleAnalysisManager &>()));

  template <typename PassT>
  using is_function_pass_t = decltype(std::declval<PassT &>().run(
      std::declval<Function &>(), std::declval<FunctionAnalysisManager &>()));

  // Function object to maintain state while adding codegen IR passes.
  class AddIRPass {
  public:
    AddIRPass(ModulePassManager &MPM, bool DebugPM, bool Check = true)
        : MPM(MPM) {
      if (Check)
        AddingFunctionPasses = false;
    }
    ~AddIRPass() {
      MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM)));
    }

    // Add Function Pass
    template <typename PassT>
    std::enable_if_t<is_detected<is_function_pass_t, PassT>::value>
    operator()(PassT &&Pass) {
      if (AddingFunctionPasses && !*AddingFunctionPasses)
        AddingFunctionPasses = true;
      FPM.addPass(std::forward<PassT>(Pass));
    }

    // Add Module Pass
    template <typename PassT>
    std::enable_if_t<is_detected<is_module_pass_t, PassT>::value &&
                     !is_detected<is_function_pass_t, PassT>::value>
    operator()(PassT &&Pass) {
      assert((!AddingFunctionPasses || !*AddingFunctionPasses) &&
             "could not add module pass after adding function pass");
      MPM.addPass(std::forward<PassT>(Pass));
    }

  private:
    ModulePassManager &MPM;
    FunctionPassManager FPM;
    // The codegen IR pipeline are mostly function passes with the exceptions of
    // a few loop and module passes. `AddingFunctionPasses` make sures that
    // we could only add module passes at the beginning of the pipeline. Once
    // we begin adding function passes, we could no longer add module passes.
    // This special-casing introduces less adaptor passes. If we have the need
    // of adding module passes after function passes, we could change the
    // implementation to accommodate that.
    std::optional<bool> AddingFunctionPasses;
  };

  // Function object to maintain state while adding codegen machine passes.
  class AddMachinePass {
  public:
    AddMachinePass(MachineFunctionPassManager &PM) : PM(PM) {}

    template <typename PassT> void operator()(PassT &&Pass) {
      static_assert(
          is_detected<has_key_t, PassT>::value,
          "Machine function pass must define a static member variable `Key`.");
      for (auto &C : BeforeCallbacks)
        if (!C(&PassT::Key))
          return;
      PM.addPass(std::forward<PassT>(Pass));
      for (auto &C : AfterCallbacks)
        C(&PassT::Key);
    }

    template <typename PassT> void insertPass(AnalysisKey *ID, PassT Pass) {
      AfterCallbacks.emplace_back(
          [this, ID, Pass = std::move(Pass)](AnalysisKey *PassID) {
            if (PassID == ID)
              this->PM.addPass(std::move(Pass));
          });
    }

    void disablePass(AnalysisKey *ID) {
      BeforeCallbacks.emplace_back(
          [ID](AnalysisKey *PassID) { return PassID != ID; });
    }

    MachineFunctionPassManager releasePM() { return std::move(PM); }

  private:
    MachineFunctionPassManager &PM;
    SmallVector<llvm::unique_function<bool(AnalysisKey *)>, 4> BeforeCallbacks;
    SmallVector<llvm::unique_function<void(AnalysisKey *)>, 4> AfterCallbacks;
  };

  LLVMTargetMachine &TM;
  CGPassBuilderOption Opt;
  PassInstrumentationCallbacks *PIC;

  /// Target override these hooks to parse target-specific analyses.
  void registerTargetAnalysis(ModuleAnalysisManager &) const {}
  void registerTargetAnalysis(FunctionAnalysisManager &) const {}
  void registerTargetAnalysis(MachineFunctionAnalysisManager &) const {}
  std::pair<StringRef, bool> getTargetPassNameFromLegacyName(StringRef) const {
    return {"", false};
  }

  template <typename TMC> TMC &getTM() const { return static_cast<TMC &>(TM); }
  CodeGenOpt::Level getOptLevel() const { return TM.getOptLevel(); }

  /// Check whether or not GlobalISel should abort on error.
  /// When this is disabled, GlobalISel will fall back on SDISel instead of
  /// erroring out.
  bool isGlobalISelAbortEnabled() const {
    return TM.Options.GlobalISelAbort == GlobalISelAbortMode::Enable;
  }

  /// Check whether or not a diagnostic should be emitted when GlobalISel
  /// uses the fallback path. In other words, it will emit a diagnostic
  /// when GlobalISel failed and isGlobalISelAbortEnabled is false.
  bool reportDiagnosticWhenGlobalISelFallback() const {
    return TM.Options.GlobalISelAbort == GlobalISelAbortMode::DisableWithDiag;
  }

  /// addInstSelector - This method should install an instruction selector pass,
  /// which converts from LLVM code to machine instructions.
  Error addInstSelector(AddMachinePass &) const {
    return make_error<StringError>("addInstSelector is not overridden",
                                   inconvertibleErrorCode());
  }

  /// Add passes that optimize instruction level parallelism for out-of-order
  /// targets. These passes are run while the machine code is still in SSA
  /// form, so they can use MachineTraceMetrics to control their heuristics.
  ///
  /// All passes added here should preserve the MachineDominatorTree,
  /// MachineLoopInfo, and MachineTraceMetrics analyses.
  void addILPOpts(AddMachinePass &) const {}

  /// This method may be implemented by targets that want to run passes
  /// immediately before register allocation.
  void addPreRegAlloc(AddMachinePass &) const {}

  /// addPreRewrite - Add passes to the optimized register allocation pipeline
  /// after register allocation is complete, but before virtual registers are
  /// rewritten to physical registers.
  ///
  /// These passes must preserve VirtRegMap and LiveIntervals, and when running
  /// after RABasic or RAGreedy, they should take advantage of LiveRegMatrix.
  /// When these passes run, VirtRegMap contains legal physreg assignments for
  /// all virtual registers.
  ///
  /// Note if the target overloads addRegAssignAndRewriteOptimized, this may not
  /// be honored. This is also not generally used for the the fast variant,
  /// where the allocation and rewriting are done in one pass.
  void addPreRewrite(AddMachinePass &) const {}

  /// Add passes to be run immediately after virtual registers are rewritten
  /// to physical registers.
  void addPostRewrite(AddMachinePass &) const {}

  /// This method may be implemented by targets that want to run passes after
  /// register allocation pass pipeline but before prolog-epilog insertion.
  void addPostRegAlloc(AddMachinePass &) const {}

  /// This method may be implemented by targets that want to run passes after
  /// prolog-epilog insertion and before the second instruction scheduling pass.
  void addPreSched2(AddMachinePass &) const {}

  /// This pass may be implemented by targets that want to run passes
  /// immediately before machine code is emitted.
  void addPreEmitPass(AddMachinePass &) const {}

  /// Targets may add passes immediately before machine code is emitted in this
  /// callback. This is called even later than `addPreEmitPass`.
  // FIXME: Rename `addPreEmitPass` to something more sensible given its actual
  // position and remove the `2` suffix here as this callback is what
  // `addPreEmitPass` *should* be but in reality isn't.
  void addPreEmitPass2(AddMachinePass &) const {}

  /// {{@ For GlobalISel
  ///

  /// addPreISel - This method should add any "last minute" LLVM->LLVM
  /// passes (which are run just before instruction selector).
  void addPreISel(AddIRPass &) const {
    llvm_unreachable("addPreISel is not overridden");
  }

  /// This method should install an IR translator pass, which converts from
  /// LLVM code to machine instructions with possibly generic opcodes.
  Error addIRTranslator(AddMachinePass &) const {
    return make_error<StringError>("addIRTranslator is not overridden",
                                   inconvertibleErrorCode());
  }

  /// This method may be implemented by targets that want to run passes
  /// immediately before legalization.
  void addPreLegalizeMachineIR(AddMachinePass &) const {}

  /// This method should install a legalize pass, which converts the instruction
  /// sequence into one that can be selected by the target.
  Error addLegalizeMachineIR(AddMachinePass &) const {
    return make_error<StringError>("addLegalizeMachineIR is not overridden",
                                   inconvertibleErrorCode());
  }

  /// This method may be implemented by targets that want to run passes
  /// immediately before the register bank selection.
  void addPreRegBankSelect(AddMachinePass &) const {}

  /// This method should install a register bank selector pass, which
  /// assigns register banks to virtual registers without a register
  /// class or register banks.
  Error addRegBankSelect(AddMachinePass &) const {
    return make_error<StringError>("addRegBankSelect is not overridden",
                                   inconvertibleErrorCode());
  }

  /// This method may be implemented by targets that want to run passes
  /// immediately before the (global) instruction selection.
  void addPreGlobalInstructionSelect(AddMachinePass &) const {}

  /// This method should install a (global) instruction selector pass, which
  /// converts possibly generic instructions to fully target-specific
  /// instructions, thereby constraining all generic virtual registers to
  /// register classes.
  Error addGlobalInstructionSelect(AddMachinePass &) const {
    return make_error<StringError>(
        "addGlobalInstructionSelect is not overridden",
        inconvertibleErrorCode());
  }
  /// @}}

  /// High level function that adds all passes necessary to go from llvm IR
  /// representation to the MI representation.
  /// Adds IR based lowering and target specific optimization passes and finally
  /// the core instruction selection passes.
  void addISelPasses(AddIRPass &) const;

  /// Add the actual instruction selection passes. This does not include
  /// preparation passes on IR.
  Error addCoreISelPasses(AddMachinePass &) const;

  /// Add the complete, standard set of LLVM CodeGen passes.
  /// Fully developed targets will not generally override this.
  Error addMachinePasses(AddMachinePass &) const;

  /// Add passes to lower exception handling for the code generator.
  void addPassesToHandleExceptions(AddIRPass &) const;

  /// Add common target configurable passes that perform LLVM IR to IR
  /// transforms following machine independent optimization.
  void addIRPasses(AddIRPass &) const;

  /// Add pass to prepare the LLVM IR for code generation. This should be done
  /// before exception handling preparation passes.
  void addCodeGenPrepare(AddIRPass &) const;

  /// Add common passes that perform LLVM IR to IR transforms in preparation for
  /// instruction selection.
  void addISelPrepare(AddIRPass &) const;

  /// Methods with trivial inline returns are convenient points in the common
  /// codegen pass pipeline where targets may insert passes. Methods with
  /// out-of-line standard implementations are major CodeGen stages called by
  /// addMachinePasses. Some targets may override major stages when inserting
  /// passes is insufficient, but maintaining overriden stages is more work.
  ///

  /// addMachineSSAOptimization - Add standard passes that optimize machine
  /// instructions in SSA form.
  void addMachineSSAOptimization(AddMachinePass &) const;

  /// addFastRegAlloc - Add the minimum set of target-independent passes that
  /// are required for fast register allocation.
  Error addFastRegAlloc(AddMachinePass &) const;

  /// addOptimizedRegAlloc - Add passes related to register allocation.
  /// LLVMTargetMachine provides standard regalloc passes for most targets.
  void addOptimizedRegAlloc(AddMachinePass &) const;

  /// Add passes that optimize machine instructions after register allocation.
  void addMachineLateOptimization(AddMachinePass &) const;

  /// addGCPasses - Add late codegen passes that analyze code for garbage
  /// collection. This should return true if GC info should be printed after
  /// these passes.
  void addGCPasses(AddMachinePass &) const {}

  /// Add standard basic block placement passes.
  void addBlockPlacement(AddMachinePass &) const;

  using CreateMCStreamer =
      std::function<Expected<std::unique_ptr<MCStreamer>>(MCContext &)>;
  void addAsmPrinter(AddMachinePass &, CreateMCStreamer) const {
    llvm_unreachable("addAsmPrinter is not overridden");
  }

  /// Utilities for targets to add passes to the pass manager.
  ///

  /// createTargetRegisterAllocator - Create the register allocator pass for
  /// this target at the current optimization level.
  void addTargetRegisterAllocator(AddMachinePass &, bool Optimized) const;

  /// addMachinePasses helper to create the target-selected or overriden
  /// regalloc pass.
  void addRegAllocPass(AddMachinePass &, bool Optimized) const;

  /// Add core register alloator passes which do the actual register assignment
  /// and rewriting. \returns true if any passes were added.
  Error addRegAssignmentFast(AddMachinePass &) const;
  Error addRegAssignmentOptimized(AddMachinePass &) const;

private:
  DerivedT &derived() { return static_cast<DerivedT &>(*this); }
  const DerivedT &derived() const {
    return static_cast<const DerivedT &>(*this);
  }
};

template <typename Derived>
Error CodeGenPassBuilder<Derived>::buildPipeline(
    ModulePassManager &MPM, MachineFunctionPassManager &MFPM,
    raw_pwrite_stream &Out, raw_pwrite_stream *DwoOut,
    CodeGenFileType FileType) const {
  AddIRPass addIRPass(MPM, Opt.DebugPM);
  addISelPasses(addIRPass);

  AddMachinePass addPass(MFPM);
  if (auto Err = addCoreISelPasses(addPass))
    return std::move(Err);

  if (auto Err = derived().addMachinePasses(addPass))
    return std::move(Err);

  derived().addAsmPrinter(
      addPass, [this, &Out, DwoOut, FileType](MCContext &Ctx) {
        return this->TM.createMCStreamer(Out, DwoOut, FileType, Ctx);
      });

  addPass(FreeMachineFunctionPass());
  return Error::success();
}

static inline AAManager registerAAAnalyses() {
  AAManager AA;

  // The order in which these are registered determines their priority when
  // being queried.

  // Basic AliasAnalysis support.
  // Add TypeBasedAliasAnalysis before BasicAliasAnalysis so that
  // BasicAliasAnalysis wins if they disagree. This is intended to help
  // support "obvious" type-punning idioms.
  AA.registerFunctionAnalysis<TypeBasedAA>();
  AA.registerFunctionAnalysis<ScopedNoAliasAA>();
  AA.registerFunctionAnalysis<BasicAA>();

  return AA;
}

template <typename Derived>
void CodeGenPassBuilder<Derived>::registerModuleAnalyses(
    ModuleAnalysisManager &MAM) const {
#define MODULE_ANALYSIS(NAME, PASS_NAME, CONSTRUCTOR)                          \
  MAM.registerPass([&] { return PASS_NAME CONSTRUCTOR; });
#include "MachinePassRegistry.def"
  derived().registerTargetAnalysis(MAM);
}

template <typename Derived>
void CodeGenPassBuilder<Derived>::registerFunctionAnalyses(
    FunctionAnalysisManager &FAM) const {
  FAM.registerPass([this] { return registerAAAnalyses(); });

#define FUNCTION_ANALYSIS(NAME, PASS_NAME, CONSTRUCTOR)                        \
  FAM.registerPass([&] { return PASS_NAME CONSTRUCTOR; });
#include "MachinePassRegistry.def"
  derived().registerTargetAnalysis(FAM);
}

template <typename Derived>
void CodeGenPassBuilder<Derived>::registerMachineFunctionAnalyses(
    MachineFunctionAnalysisManager &MFAM) const {
#define MACHINE_FUNCTION_ANALYSIS(NAME, PASS_NAME, CONSTRUCTOR)                \
  MFAM.registerPass([&] { return PASS_NAME CONSTRUCTOR; });
#include "MachinePassRegistry.def"
  derived().registerTargetAnalysis(MFAM);
}

// FIXME: For new PM, use pass name directly in commandline seems good.
// Translate stringfied pass name to its old commandline name. Returns the
// matching legacy name and a boolean value indicating if the pass is a machine
// pass.
template <typename Derived>
std::pair<StringRef, bool>
CodeGenPassBuilder<Derived>::getPassNameFromLegacyName(StringRef Name) const {
  std::pair<StringRef, bool> Ret;
  if (Name.empty())
    return Ret;

#define FUNCTION_PASS(NAME, PASS_NAME, CONSTRUCTOR)                            \
  if (Name == NAME)                                                            \
    Ret = {#PASS_NAME, false};
#define DUMMY_FUNCTION_PASS(NAME, PASS_NAME, CONSTRUCTOR)                      \
  if (Name == NAME)                                                            \
    Ret = {#PASS_NAME, false};
#define MODULE_PASS(NAME, PASS_NAME, CONSTRUCTOR)                              \
  if (Name == NAME)                                                            \
    Ret = {#PASS_NAME, false};
#define DUMMY_MODULE_PASS(NAME, PASS_NAME, CONSTRUCTOR)                        \
  if (Name == NAME)                                                            \
    Ret = {#PASS_NAME, false};
#define MACHINE_MODULE_PASS(NAME, PASS_NAME, CONSTRUCTOR)                      \
  if (Name == NAME)                                                            \
    Ret = {#PASS_NAME, true};
#define DUMMY_MACHINE_MODULE_PASS(NAME, PASS_NAME, CONSTRUCTOR)                \
  if (Name == NAME)                                                            \
    Ret = {#PASS_NAME, true};
#define MACHINE_FUNCTION_PASS(NAME, PASS_NAME, CONSTRUCTOR)                    \
  if (Name == NAME)                                                            \
    Ret = {#PASS_NAME, true};
#define DUMMY_MACHINE_FUNCTION_PASS(NAME, PASS_NAME, CONSTRUCTOR)              \
  if (Name == NAME)                                                            \
    Ret = {#PASS_NAME, true};
#include "llvm/CodeGen/MachinePassRegistry.def"

  if (Ret.first.empty())
    Ret = derived().getTargetPassNameFromLegacyName(Name);

  if (Ret.first.empty())
    report_fatal_error(Twine('\"') + Twine(Name) +
                       Twine("\" pass could not be found."));

  return Ret;
}

template <typename Derived>
void CodeGenPassBuilder<Derived>::addISelPasses(AddIRPass &addPass) const {
  if (TM.useEmulatedTLS())
    addPass(LowerEmuTLSPass());

  addPass(PreISelIntrinsicLoweringPass(TM));

  derived().addIRPasses(addPass);
  derived().addCodeGenPrepare(addPass);
  addPassesToHandleExceptions(addPass);
  derived().addISelPrepare(addPass);
}

/// Add common target configurable passes that perform LLVM IR to IR transforms
/// following machine independent optimization.
template <typename Derived>
void CodeGenPassBuilder<Derived>::addIRPasses(AddIRPass &addPass) const {
  // Before running any passes, run the verifier to determine if the input
  // coming from the front-end and/or optimizer is valid.
  if (!Opt.DisableVerify)
    addPass(VerifierPass());

  // Run loop strength reduction before anything else.
  if (getOptLevel() != CodeGenOpt::None && !Opt.DisableLSR) {
    addPass(createFunctionToLoopPassAdaptor(
        LoopStrengthReducePass(), /*UseMemorySSA*/ true, Opt.DebugPM));
    // FIXME: use -stop-after so we could remove PrintLSR
    if (Opt.PrintLSR)
      addPass(PrintFunctionPass(dbgs(), "\n\n*** Code after LSR ***\n"));
  }

  if (getOptLevel() != CodeGenOpt::None) {
    // The MergeICmpsPass tries to create memcmp calls by grouping sequences of
    // loads and compares. ExpandMemCmpPass then tries to expand those calls
    // into optimally-sized loads and compares. The transforms are enabled by a
    // target lowering hook.
    if (!Opt.DisableMergeICmps)
      addPass(MergeICmpsPass());
    addPass(ExpandMemCmpPass());
  }

  // Run GC lowering passes for builtin collectors
  // TODO: add a pass insertion point here
  addPass(GCLoweringPass());
  addPass(ShadowStackGCLoweringPass());
  addPass(LowerConstantIntrinsicsPass());

  // Make sure that no unreachable blocks are instruction selected.
  addPass(UnreachableBlockElimPass());

  // Prepare expensive constants for SelectionDAG.
  if (getOptLevel() != CodeGenOpt::None && !Opt.DisableConstantHoisting)
    addPass(ConstantHoistingPass());

  // Replace calls to LLVM intrinsics (e.g., exp, log) operating on vector
  // operands with calls to the corresponding functions in a vector library.
  if (getOptLevel() != CodeGenOpt::None)
    addPass(ReplaceWithVeclib());

  if (getOptLevel() != CodeGenOpt::None && !Opt.DisablePartialLibcallInlining)
    addPass(PartiallyInlineLibCallsPass());

  // Instrument function entry and exit, e.g. with calls to mcount().
  addPass(EntryExitInstrumenterPass(/*PostInlining=*/true));

  // Add scalarization of target's unsupported masked memory intrinsics pass.
  // the unsupported intrinsic will be replaced with a chain of basic blocks,
  // that stores/loads element one-by-one if the appropriate mask bit is set.
  addPass(ScalarizeMaskedMemIntrinPass());

  // Expand reduction intrinsics into shuffle sequences if the target wants to.
  addPass(ExpandReductionsPass());

  // Convert conditional moves to conditional jumps when profitable.
  if (getOptLevel() != CodeGenOpt::None && !Opt.DisableSelectOptimize)
    addPass(SelectOptimizePass());
}

/// Turn exception handling constructs into something the code generators can
/// handle.
template <typename Derived>
void CodeGenPassBuilder<Derived>::addPassesToHandleExceptions(
    AddIRPass &addPass) const {
  const MCAsmInfo *MCAI = TM.getMCAsmInfo();
  assert(MCAI && "No MCAsmInfo");
  switch (MCAI->getExceptionHandlingType()) {
  case ExceptionHandling::SjLj:
    // SjLj piggy-backs on dwarf for this bit. The cleanups done apply to both
    // Dwarf EH prepare needs to be run after SjLj prepare. Otherwise,
    // catch info can get misplaced when a selector ends up more than one block
    // removed from the parent invoke(s). This could happen when a landing
    // pad is shared by multiple invokes and is also a target of a normal
    // edge from elsewhere.
    addPass(SjLjEHPreparePass());
    [[fallthrough]];
  case ExceptionHandling::DwarfCFI:
  case ExceptionHandling::ARM:
  case ExceptionHandling::AIX:
    addPass(DwarfEHPass(getOptLevel()));
    break;
  case ExceptionHandling::WinEH:
    // We support using both GCC-style and MSVC-style exceptions on Windows, so
    // add both preparation passes. Each pass will only actually run if it
    // recognizes the personality function.
    addPass(WinEHPass());
    addPass(DwarfEHPass(getOptLevel()));
    break;
  case ExceptionHandling::Wasm:
    // Wasm EH uses Windows EH instructions, but it does not need to demote PHIs
    // on catchpads and cleanuppads because it does not outline them into
    // funclets. Catchswitch blocks are not lowered in SelectionDAG, so we
    // should remove PHIs there.
    addPass(WinEHPass(/*DemoteCatchSwitchPHIOnly=*/false));
    addPass(WasmEHPass());
    break;
  case ExceptionHandling::None:
    addPass(LowerInvokePass());

    // The lower invoke pass may create unreachable code. Remove it.
    addPass(UnreachableBlockElimPass());
    break;
  }
}

/// Add pass to prepare the LLVM IR for code generation. This should be done
/// before exception handling preparation passes.
template <typename Derived>
void CodeGenPassBuilder<Derived>::addCodeGenPrepare(AddIRPass &addPass) const {
  if (getOptLevel() != CodeGenOpt::None && !Opt.DisableCGP)
    addPass(CodeGenPreparePass());
  // TODO: Default ctor'd RewriteSymbolPass is no-op.
  // addPass(RewriteSymbolPass());
}

/// Add common passes that perform LLVM IR to IR transforms in preparation for
/// instruction selection.
template <typename Derived>
void CodeGenPassBuilder<Derived>::addISelPrepare(AddIRPass &addPass) const {
  derived().addPreISel(addPass);

  addPass(CallBrPrepare());
  // Add both the safe stack and the stack protection passes: each of them will
  // only protect functions that have corresponding attributes.
  addPass(SafeStackPass());
  addPass(StackProtectorPass());

  if (Opt.PrintISelInput)
    addPass(PrintFunctionPass(dbgs(),
                              "\n\n*** Final LLVM Code input to ISel ***\n"));

  // All passes which modify the LLVM IR are now complete; run the verifier
  // to ensure that the IR is valid.
  if (!Opt.DisableVerify)
    addPass(VerifierPass());
}

template <typename Derived>
Error CodeGenPassBuilder<Derived>::addCoreISelPasses(
    AddMachinePass &addPass) const {
  // Enable FastISel with -fast-isel, but allow that to be overridden.
  TM.setO0WantsFastISel(Opt.EnableFastISelOption.value_or(true));

  // Determine an instruction selector.
  enum class SelectorType { SelectionDAG, FastISel, GlobalISel };
  SelectorType Selector;

  if (Opt.EnableFastISelOption && *Opt.EnableFastISelOption == true)
    Selector = SelectorType::FastISel;
  else if ((Opt.EnableGlobalISelOption &&
            *Opt.EnableGlobalISelOption == true) ||
           (TM.Options.EnableGlobalISel &&
            (!Opt.EnableGlobalISelOption ||
             *Opt.EnableGlobalISelOption == false)))
    Selector = SelectorType::GlobalISel;
  else if (TM.getOptLevel() == CodeGenOpt::None && TM.getO0WantsFastISel())
    Selector = SelectorType::FastISel;
  else
    Selector = SelectorType::SelectionDAG;

  // Set consistently TM.Options.EnableFastISel and EnableGlobalISel.
  if (Selector == SelectorType::FastISel) {
    TM.setFastISel(true);
    TM.setGlobalISel(false);
  } else if (Selector == SelectorType::GlobalISel) {
    TM.setFastISel(false);
    TM.setGlobalISel(true);
  }

  // Add instruction selector passes.
  if (Selector == SelectorType::GlobalISel) {
    if (auto Err = derived().addIRTranslator(addPass))
      return std::move(Err);

    derived().addPreLegalizeMachineIR(addPass);

    if (auto Err = derived().addLegalizeMachineIR(addPass))
      return std::move(Err);

    // Before running the register bank selector, ask the target if it
    // wants to run some passes.
    derived().addPreRegBankSelect(addPass);

    if (auto Err = derived().addRegBankSelect(addPass))
      return std::move(Err);

    derived().addPreGlobalInstructionSelect(addPass);

    if (auto Err = derived().addGlobalInstructionSelect(addPass))
      return std::move(Err);

    // Pass to reset the MachineFunction if the ISel failed.
    addPass(ResetMachineFunctionPass(reportDiagnosticWhenGlobalISelFallback(),
                                     isGlobalISelAbortEnabled()));

    // Provide a fallback path when we do not want to abort on
    // not-yet-supported input.
    if (!isGlobalISelAbortEnabled())
      if (auto Err = derived().addInstSelector(addPass))
        return std::move(Err);

  } else if (auto Err = derived().addInstSelector(addPass))
    return std::move(Err);

  // Expand pseudo-instructions emitted by ISel. Don't run the verifier before
  // FinalizeISel.
  addPass(FinalizeISelPass());

  // // Print the instruction selected machine code...
  // printAndVerify("After Instruction Selection");

  return Error::success();
}

/// Add the complete set of target-independent postISel code generator passes.
///
/// This can be read as the standard order of major LLVM CodeGen stages. Stages
/// with nontrivial configuration or multiple passes are broken out below in
/// add%Stage routines.
///
/// Any CodeGenPassBuilder<Derived>::addXX routine may be overriden by the
/// Target. The addPre/Post methods with empty header implementations allow
/// injecting target-specific fixups just before or after major stages.
/// Additionally, targets have the flexibility to change pass order within a
/// stage by overriding default implementation of add%Stage routines below. Each
/// technique has maintainability tradeoffs because alternate pass orders are
/// not well supported. addPre/Post works better if the target pass is easily
/// tied to a common pass. But if it has subtle dependencies on multiple passes,
/// the target should override the stage instead.
template <typename Derived>
Error CodeGenPassBuilder<Derived>::addMachinePasses(
    AddMachinePass &addPass) const {
  // Add passes that optimize machine instructions in SSA form.
  if (getOptLevel() != CodeGenOpt::None) {
    derived().addMachineSSAOptimization(addPass);
  } else {
    // If the target requests it, assign local variables to stack slots relative
    // to one another and simplify frame index references where possible.
    addPass(LocalStackSlotPass());
  }

  if (TM.Options.EnableIPRA)
    addPass(RegUsageInfoPropagationPass());

  // Run pre-ra passes.
  derived().addPreRegAlloc(addPass);

  // Run register allocation and passes that are tightly coupled with it,
  // including phi elimination and scheduling.
  if (*Opt.OptimizeRegAlloc) {
    derived().addOptimizedRegAlloc(addPass);
  } else {
    if (auto Err = derived().addFastRegAlloc(addPass))
      return Err;
  }

  // Run post-ra passes.
  derived().addPostRegAlloc(addPass);

  addPass(RemoveRedundantDebugValuesPass());

  // Insert prolog/epilog code.  Eliminate abstract frame index references...
  if (getOptLevel() != CodeGenOpt::None) {
    addPass(PostRAMachineSinkingPass());
    addPass(ShrinkWrapPass());
  }

  addPass(PrologEpilogInserterPass());

  /// Add passes that optimize machine instructions after register allocation.
  if (getOptLevel() != CodeGenOpt::None)
    derived().addMachineLateOptimization(addPass);

  // Expand pseudo instructions before second scheduling pass.
  addPass(ExpandPostRAPseudosPass());

  // Run pre-sched2 passes.
  derived().addPreSched2(addPass);

  if (Opt.EnableImplicitNullChecks)
    addPass(ImplicitNullChecksPass());

  // Second pass scheduler.
  // Let Target optionally insert this pass by itself at some other
  // point.
  if (getOptLevel() != CodeGenOpt::None &&
      !TM.targetSchedulesPostRAScheduling()) {
    if (Opt.MISchedPostRA)
      addPass(PostMachineSchedulerPass());
    else
      addPass(PostRASchedulerPass());
  }

  // GC
  derived().addGCPasses(addPass);

  // Basic block placement.
  if (getOptLevel() != CodeGenOpt::None)
    derived().addBlockPlacement(addPass);

  // Insert before XRay Instrumentation.
  addPass(FEntryInserterPass());

  addPass(XRayInstrumentationPass());
  addPass(PatchableFunctionPass());

  derived().addPreEmitPass(addPass);

  if (TM.Options.EnableIPRA)
    // Collect register usage information and produce a register mask of
    // clobbered registers, to be used to optimize call sites.
    addPass(RegUsageInfoCollectorPass());

  addPass(FuncletLayoutPass());

  addPass(StackMapLivenessPass());
  addPass(LiveDebugValuesPass());
  addPass(MachineSanitizerBinaryMetadata());

  if (TM.Options.EnableMachineOutliner && getOptLevel() != CodeGenOpt::None &&
      Opt.EnableMachineOutliner != RunOutliner::NeverOutline) {
    bool RunOnAllFunctions =
        (Opt.EnableMachineOutliner == RunOutliner::AlwaysOutline);
    bool AddOutliner = RunOnAllFunctions || TM.Options.SupportsDefaultOutlining;
    if (AddOutliner)
      addPass(MachineOutlinerPass(RunOnAllFunctions));
  }

  // Add passes that directly emit MI after all other MI passes.
  derived().addPreEmitPass2(addPass);

  return Error::success();
}

/// Add passes that optimize machine instructions in SSA form.
template <typename Derived>
void CodeGenPassBuilder<Derived>::addMachineSSAOptimization(
    AddMachinePass &addPass) const {
  // Pre-ra tail duplication.
  addPass(EarlyTailDuplicatePass());

  // Optimize PHIs before DCE: removing dead PHI cycles may make more
  // instructions dead.
  addPass(OptimizePHIsPass());

  // This pass merges large allocas. StackSlotColoring is a different pass
  // which merges spill slots.
  addPass(StackColoringPass());

  // If the target requests it, assign local variables to stack slots relative
  // to one another and simplify frame index references where possible.
  addPass(LocalStackSlotPass());

  // With optimization, dead code should already be eliminated. However
  // there is one known exception: lowered code for arguments that are only
  // used by tail calls, where the tail calls reuse the incoming stack
  // arguments directly (see t11 in test/CodeGen/X86/sibcall.ll).
  addPass(DeadMachineInstructionElimPass());

  // Allow targets to insert passes that improve instruction level parallelism,
  // like if-conversion. Such passes will typically need dominator trees and
  // loop info, just like LICM and CSE below.
  derived().addILPOpts(addPass);

  addPass(EarlyMachineLICMPass());
  addPass(MachineCSEPass());

  addPass(MachineSinkingPass());

  addPass(PeepholeOptimizerPass());
  // Clean-up the dead code that may have been generated by peephole
  // rewriting.
  addPass(DeadMachineInstructionElimPass());
}

//===---------------------------------------------------------------------===//
/// Register Allocation Pass Configuration
//===---------------------------------------------------------------------===//

/// Instantiate the default register allocator pass for this target for either
/// the optimized or unoptimized allocation path. This will be added to the pass
/// manager by addFastRegAlloc in the unoptimized case or addOptimizedRegAlloc
/// in the optimized case.
///
/// A target that uses the standard regalloc pass order for fast or optimized
/// allocation may still override this for per-target regalloc
/// selection. But -regalloc=... always takes precedence.
template <typename Derived>
void CodeGenPassBuilder<Derived>::addTargetRegisterAllocator(
    AddMachinePass &addPass, bool Optimized) const {
  if (Optimized)
    addPass(RAGreedyPass());
  else
    addPass(RAFastPass());
}

/// Find and instantiate the register allocation pass requested by this target
/// at the current optimization level.  Different register allocators are
/// defined as separate passes because they may require different analysis.
template <typename Derived>
void CodeGenPassBuilder<Derived>::addRegAllocPass(AddMachinePass &addPass,
                                                  bool Optimized) const {
  if (Opt.RegAlloc == RegAllocType::Default)
    // With no -regalloc= override, ask the target for a regalloc pass.
    derived().addTargetRegisterAllocator(addPass, Optimized);
  else if (Opt.RegAlloc == RegAllocType::Basic)
    addPass(RABasicPass());
  else if (Opt.RegAlloc == RegAllocType::Fast)
    addPass(RAFastPass());
  else if (Opt.RegAlloc == RegAllocType::Greedy)
    addPass(RAGreedyPass());
  else if (Opt.RegAlloc == RegAllocType::PBQP)
    addPass(RAPBQPPass());
  else
    llvm_unreachable("unknonwn register allocator type");
}

template <typename Derived>
Error CodeGenPassBuilder<Derived>::addRegAssignmentFast(
    AddMachinePass &addPass) const {
  if (Opt.RegAlloc != RegAllocType::Default &&
      Opt.RegAlloc != RegAllocType::Fast)
    return make_error<StringError>(
        "Must use fast (default) register allocator for unoptimized regalloc.",
        inconvertibleErrorCode());

  addRegAllocPass(addPass, false);
  return Error::success();
}

template <typename Derived>
Error CodeGenPassBuilder<Derived>::addRegAssignmentOptimized(
    AddMachinePass &addPass) const {
  // Add the selected register allocation pass.
  addRegAllocPass(addPass, true);

  // Allow targets to change the register assignments before rewriting.
  derived().addPreRewrite(addPass);

  // Finally rewrite virtual registers.
  addPass(VirtRegRewriterPass());
  // Perform stack slot coloring and post-ra machine LICM.
  //
  // FIXME: Re-enable coloring with register when it's capable of adding
  // kill markers.
  addPass(StackSlotColoringPass());

  return Error::success();
}

/// Add the minimum set of target-independent passes that are required for
/// register allocation. No coalescing or scheduling.
template <typename Derived>
Error CodeGenPassBuilder<Derived>::addFastRegAlloc(
    AddMachinePass &addPass) const {
  addPass(PHIEliminationPass());
  addPass(TwoAddressInstructionPass());
  return derived().addRegAssignmentFast(addPass);
}

/// Add standard target-independent passes that are tightly coupled with
/// optimized register allocation, including coalescing, machine instruction
/// scheduling, and register allocation itself.
template <typename Derived>
void CodeGenPassBuilder<Derived>::addOptimizedRegAlloc(
    AddMachinePass &addPass) const {
  addPass(DetectDeadLanesPass());

  addPass(ProcessImplicitDefsPass());

  // Edge splitting is smarter with machine loop info.
  addPass(PHIEliminationPass());

  // Eventually, we want to run LiveIntervals before PHI elimination.
  if (Opt.EarlyLiveIntervals)
    addPass(LiveIntervalsPass());

  addPass(TwoAddressInstructionPass());
  addPass(RegisterCoalescerPass());

  // The machine scheduler may accidentally create disconnected components
  // when moving subregister definitions around, avoid this by splitting them to
  // separate vregs before. Splitting can also improve reg. allocation quality.
  addPass(RenameIndependentSubregsPass());

  // PreRA instruction scheduling.
  addPass(MachineSchedulerPass());

  if (derived().addRegAssignmentOptimized(addPass)) {
    // Allow targets to expand pseudo instructions depending on the choice of
    // registers before MachineCopyPropagation.
    derived().addPostRewrite(addPass);

    // Copy propagate to forward register uses and try to eliminate COPYs that
    // were not coalesced.
    addPass(MachineCopyPropagationPass());

    // Run post-ra machine LICM to hoist reloads / remats.
    //
    // FIXME: can this move into MachineLateOptimization?
    addPass(MachineLICMPass());
  }
}

//===---------------------------------------------------------------------===//
/// Post RegAlloc Pass Configuration
//===---------------------------------------------------------------------===//

/// Add passes that optimize machine instructions after register allocation.
template <typename Derived>
void CodeGenPassBuilder<Derived>::addMachineLateOptimization(
    AddMachinePass &addPass) const {
  // Branch folding must be run after regalloc and prolog/epilog insertion.
  addPass(BranchFolderPass());

  // Tail duplication.
  // Note that duplicating tail just increases code size and degrades
  // performance for targets that require Structured Control Flow.
  // In addition it can also make CFG irreducible. Thus we disable it.
  if (!TM.requiresStructuredCFG())
    addPass(TailDuplicatePass());

  // Cleanup of redundant (identical) address/immediate loads.
  addPass(MachineLateInstrsCleanupPass());

  // Copy propagation.
  addPass(MachineCopyPropagationPass());
}

/// Add standard basic block placement passes.
template <typename Derived>
void CodeGenPassBuilder<Derived>::addBlockPlacement(
    AddMachinePass &addPass) const {
  addPass(MachineBlockPlacementPass());
  // Run a separate pass to collect block placement statistics.
  if (Opt.EnableBlockPlacementStats)
    addPass(MachineBlockPlacementStatsPass());
}

} // namespace llvm

#endif // LLVM_CODEGEN_CODEGENPASSBUILDER_H
PKhwFZ���Q�QCodeGen/CallingConvLower.hnu�[���//===- llvm/CallingConvLower.h - Calling Conventions ------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the CCState and CCValAssign classes, used for lowering
// and implementing calling conventions.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_CALLINGCONVLOWER_H
#define LLVM_CODEGEN_CALLINGCONVLOWER_H

#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/Register.h"
#include "llvm/CodeGen/TargetCallingConv.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/Support/Alignment.h"
#include <variant>
#include <vector>

namespace llvm {

class CCState;
class MachineFunction;
class MVT;
class TargetRegisterInfo;

/// CCValAssign - Represent assignment of one arg/retval to a location.
class CCValAssign {
public:
  enum LocInfo {
    Full,      // The value fills the full location.
    SExt,      // The value is sign extended in the location.
    ZExt,      // The value is zero extended in the location.
    AExt,      // The value is extended with undefined upper bits.
    SExtUpper, // The value is in the upper bits of the location and should be
               // sign extended when retrieved.
    ZExtUpper, // The value is in the upper bits of the location and should be
               // zero extended when retrieved.
    AExtUpper, // The value is in the upper bits of the location and should be
               // extended with undefined upper bits when retrieved.
    BCvt,      // The value is bit-converted in the location.
    Trunc,     // The value is truncated in the location.
    VExt,      // The value is vector-widened in the location.
               // FIXME: Not implemented yet. Code that uses AExt to mean
               // vector-widen should be fixed to use VExt instead.
    FPExt,     // The floating-point value is fp-extended in the location.
    Indirect   // The location contains pointer to the value.
    // TODO: a subset of the value is in the location.
  };

private:
  // Holds one of:
  // - the register that the value is assigned to;
  // - the memory offset at which the value resides;
  // - additional information about pending location; the exact interpretation
  //   of the data is target-dependent.
  std::variant<Register, int64_t, unsigned> Data;

  /// ValNo - This is the value number being assigned (e.g. an argument number).
  unsigned ValNo;

  /// isCustom - True if this arg/retval requires special handling.
  unsigned isCustom : 1;

  /// Information about how the value is assigned.
  LocInfo HTP : 6;

  /// ValVT - The type of the value being assigned.
  MVT ValVT;

  /// LocVT - The type of the location being assigned to.
  MVT LocVT;

  CCValAssign(LocInfo HTP, unsigned ValNo, MVT ValVT, MVT LocVT, bool IsCustom)
      : ValNo(ValNo), isCustom(IsCustom), HTP(HTP), ValVT(ValVT), LocVT(LocVT) {
  }

public:
  static CCValAssign getReg(unsigned ValNo, MVT ValVT, unsigned RegNo,
                            MVT LocVT, LocInfo HTP, bool IsCustom = false) {
    CCValAssign Ret(HTP, ValNo, ValVT, LocVT, IsCustom);
    Ret.Data = Register(RegNo);
    return Ret;
  }

  static CCValAssign getCustomReg(unsigned ValNo, MVT ValVT, unsigned RegNo,
                                  MVT LocVT, LocInfo HTP) {
    return getReg(ValNo, ValVT, RegNo, LocVT, HTP, /*IsCustom=*/true);
  }

  static CCValAssign getMem(unsigned ValNo, MVT ValVT, int64_t Offset,
                            MVT LocVT, LocInfo HTP, bool IsCustom = false) {
    CCValAssign Ret(HTP, ValNo, ValVT, LocVT, IsCustom);
    Ret.Data = Offset;
    return Ret;
  }

  static CCValAssign getCustomMem(unsigned ValNo, MVT ValVT, int64_t Offset,
                                  MVT LocVT, LocInfo HTP) {
    return getMem(ValNo, ValVT, Offset, LocVT, HTP, /*IsCustom=*/true);
  }

  static CCValAssign getPending(unsigned ValNo, MVT ValVT, MVT LocVT,
                                LocInfo HTP, unsigned ExtraInfo = 0) {
    CCValAssign Ret(HTP, ValNo, ValVT, LocVT, false);
    Ret.Data = ExtraInfo;
    return Ret;
  }

  void convertToReg(unsigned RegNo) { Data = Register(RegNo); }

  void convertToMem(int64_t Offset) { Data = Offset; }

  unsigned getValNo() const { return ValNo; }
  MVT getValVT() const { return ValVT; }

  bool isRegLoc() const { return std::holds_alternative<Register>(Data); }
  bool isMemLoc() const { return std::holds_alternative<int64_t>(Data); }
  bool isPendingLoc() const { return std::holds_alternative<unsigned>(Data); }

  bool needsCustom() const { return isCustom; }

  Register getLocReg() const { return std::get<Register>(Data); }
  int64_t getLocMemOffset() const { return std::get<int64_t>(Data); }
  unsigned getExtraInfo() const { return std::get<unsigned>(Data); }

  MVT getLocVT() const { return LocVT; }

  LocInfo getLocInfo() const { return HTP; }
  bool isExtInLoc() const {
    return (HTP == AExt || HTP == SExt || HTP == ZExt);
  }

  bool isUpperBitsInLoc() const {
    return HTP == AExtUpper || HTP == SExtUpper || HTP == ZExtUpper;
  }
};

/// Describes a register that needs to be forwarded from the prologue to a
/// musttail call.
struct ForwardedRegister {
  ForwardedRegister(Register VReg, MCPhysReg PReg, MVT VT)
      : VReg(VReg), PReg(PReg), VT(VT) {}
  Register VReg;
  MCPhysReg PReg;
  MVT VT;
};

/// CCAssignFn - This function assigns a location for Val, updating State to
/// reflect the change.  It returns 'true' if it failed to handle Val.
typedef bool CCAssignFn(unsigned ValNo, MVT ValVT,
                        MVT LocVT, CCValAssign::LocInfo LocInfo,
                        ISD::ArgFlagsTy ArgFlags, CCState &State);

/// CCCustomFn - This function assigns a location for Val, possibly updating
/// all args to reflect changes and indicates if it handled it. It must set
/// isCustom if it handles the arg and returns true.
typedef bool CCCustomFn(unsigned &ValNo, MVT &ValVT,
                        MVT &LocVT, CCValAssign::LocInfo &LocInfo,
                        ISD::ArgFlagsTy &ArgFlags, CCState &State);

/// CCState - This class holds information needed while lowering arguments and
/// return values.  It captures which registers are already assigned and which
/// stack slots are used.  It provides accessors to allocate these values.
class CCState {
private:
  CallingConv::ID CallingConv;
  bool IsVarArg;
  bool AnalyzingMustTailForwardedRegs = false;
  MachineFunction &MF;
  const TargetRegisterInfo &TRI;
  SmallVectorImpl<CCValAssign> &Locs;
  LLVMContext &Context;
  // True if arguments should be allocated at negative offsets.
  bool NegativeOffsets;

  uint64_t StackSize;
  Align MaxStackArgAlign;
  SmallVector<uint32_t, 16> UsedRegs;
  SmallVector<CCValAssign, 4> PendingLocs;
  SmallVector<ISD::ArgFlagsTy, 4> PendingArgFlags;

  // ByValInfo and SmallVector<ByValInfo, 4> ByValRegs:
  //
  // Vector of ByValInfo instances (ByValRegs) is introduced for byval registers
  // tracking.
  // Or, in another words it tracks byval parameters that are stored in
  // general purpose registers.
  //
  // For 4 byte stack alignment,
  // instance index means byval parameter number in formal
  // arguments set. Assume, we have some "struct_type" with size = 4 bytes,
  // then, for function "foo":
  //
  // i32 foo(i32 %p, %struct_type* %r, i32 %s, %struct_type* %t)
  //
  // ByValRegs[0] describes how "%r" is stored (Begin == r1, End == r2)
  // ByValRegs[1] describes how "%t" is stored (Begin == r3, End == r4).
  //
  // In case of 8 bytes stack alignment,
  // In function shown above, r3 would be wasted according to AAPCS rules.
  // ByValRegs vector size still would be 2,
  // while "%t" goes to the stack: it wouldn't be described in ByValRegs.
  //
  // Supposed use-case for this collection:
  // 1. Initially ByValRegs is empty, InRegsParamsProcessed is 0.
  // 2. HandleByVal fills up ByValRegs.
  // 3. Argument analysis (LowerFormatArguments, for example). After
  // some byval argument was analyzed, InRegsParamsProcessed is increased.
  struct ByValInfo {
    ByValInfo(unsigned B, unsigned E) : Begin(B), End(E) {}

    // First register allocated for current parameter.
    unsigned Begin;

    // First after last register allocated for current parameter.
    unsigned End;
  };
  SmallVector<ByValInfo, 4 > ByValRegs;

  // InRegsParamsProcessed - shows how many instances of ByValRegs was proceed
  // during argument analysis.
  unsigned InRegsParamsProcessed;

public:
  CCState(CallingConv::ID CC, bool IsVarArg, MachineFunction &MF,
          SmallVectorImpl<CCValAssign> &Locs, LLVMContext &Context,
          bool NegativeOffsets = false);

  void addLoc(const CCValAssign &V) {
    Locs.push_back(V);
  }

  LLVMContext &getContext() const { return Context; }
  MachineFunction &getMachineFunction() const { return MF; }
  CallingConv::ID getCallingConv() const { return CallingConv; }
  bool isVarArg() const { return IsVarArg; }

  /// Returns the size of the currently allocated portion of the stack.
  uint64_t getStackSize() const { return StackSize; }

  /// getAlignedCallFrameSize - Return the size of the call frame needed to
  /// be able to store all arguments and such that the alignment requirement
  /// of each of the arguments is satisfied.
  uint64_t getAlignedCallFrameSize() const {
    return alignTo(StackSize, MaxStackArgAlign);
  }

  /// isAllocated - Return true if the specified register (or an alias) is
  /// allocated.
  bool isAllocated(MCRegister Reg) const {
    return UsedRegs[Reg / 32] & (1 << (Reg & 31));
  }

  /// AnalyzeFormalArguments - Analyze an array of argument values,
  /// incorporating info about the formals into this state.
  void AnalyzeFormalArguments(const SmallVectorImpl<ISD::InputArg> &Ins,
                              CCAssignFn Fn);

  /// The function will invoke AnalyzeFormalArguments.
  void AnalyzeArguments(const SmallVectorImpl<ISD::InputArg> &Ins,
                        CCAssignFn Fn) {
    AnalyzeFormalArguments(Ins, Fn);
  }

  /// AnalyzeReturn - Analyze the returned values of a return,
  /// incorporating info about the result values into this state.
  void AnalyzeReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
                     CCAssignFn Fn);

  /// CheckReturn - Analyze the return values of a function, returning
  /// true if the return can be performed without sret-demotion, and
  /// false otherwise.
  bool CheckReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
                   CCAssignFn Fn);

  /// AnalyzeCallOperands - Analyze the outgoing arguments to a call,
  /// incorporating info about the passed values into this state.
  void AnalyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Outs,
                           CCAssignFn Fn);

  /// AnalyzeCallOperands - Same as above except it takes vectors of types
  /// and argument flags.
  void AnalyzeCallOperands(SmallVectorImpl<MVT> &ArgVTs,
                           SmallVectorImpl<ISD::ArgFlagsTy> &Flags,
                           CCAssignFn Fn);

  /// The function will invoke AnalyzeCallOperands.
  void AnalyzeArguments(const SmallVectorImpl<ISD::OutputArg> &Outs,
                        CCAssignFn Fn) {
    AnalyzeCallOperands(Outs, Fn);
  }

  /// AnalyzeCallResult - Analyze the return values of a call,
  /// incorporating info about the passed values into this state.
  void AnalyzeCallResult(const SmallVectorImpl<ISD::InputArg> &Ins,
                         CCAssignFn Fn);

  /// A shadow allocated register is a register that was allocated
  /// but wasn't added to the location list (Locs).
  /// \returns true if the register was allocated as shadow or false otherwise.
  bool IsShadowAllocatedReg(MCRegister Reg) const;

  /// AnalyzeCallResult - Same as above except it's specialized for calls which
  /// produce a single value.
  void AnalyzeCallResult(MVT VT, CCAssignFn Fn);

  /// getFirstUnallocated - Return the index of the first unallocated register
  /// in the set, or Regs.size() if they are all allocated.
  unsigned getFirstUnallocated(ArrayRef<MCPhysReg> Regs) const {
    for (unsigned i = 0; i < Regs.size(); ++i)
      if (!isAllocated(Regs[i]))
        return i;
    return Regs.size();
  }

  void DeallocateReg(MCPhysReg Reg) {
    assert(isAllocated(Reg) && "Trying to deallocate an unallocated register");
    MarkUnallocated(Reg);
  }

  /// AllocateReg - Attempt to allocate one register.  If it is not available,
  /// return zero.  Otherwise, return the register, marking it and any aliases
  /// as allocated.
  MCRegister AllocateReg(MCPhysReg Reg) {
    if (isAllocated(Reg))
      return MCRegister();
    MarkAllocated(Reg);
    return Reg;
  }

  /// Version of AllocateReg with extra register to be shadowed.
  MCRegister AllocateReg(MCPhysReg Reg, MCPhysReg ShadowReg) {
    if (isAllocated(Reg))
      return MCRegister();
    MarkAllocated(Reg);
    MarkAllocated(ShadowReg);
    return Reg;
  }

  /// AllocateReg - Attempt to allocate one of the specified registers.  If none
  /// are available, return zero.  Otherwise, return the first one available,
  /// marking it and any aliases as allocated.
  MCPhysReg AllocateReg(ArrayRef<MCPhysReg> Regs) {
    unsigned FirstUnalloc = getFirstUnallocated(Regs);
    if (FirstUnalloc == Regs.size())
      return MCRegister();    // Didn't find the reg.

    // Mark the register and any aliases as allocated.
    MCPhysReg Reg = Regs[FirstUnalloc];
    MarkAllocated(Reg);
    return Reg;
  }

  /// AllocateRegBlock - Attempt to allocate a block of RegsRequired consecutive
  /// registers. If this is not possible, return zero. Otherwise, return the first
  /// register of the block that were allocated, marking the entire block as allocated.
  MCPhysReg AllocateRegBlock(ArrayRef<MCPhysReg> Regs, unsigned RegsRequired) {
    if (RegsRequired > Regs.size())
      return 0;

    for (unsigned StartIdx = 0; StartIdx <= Regs.size() - RegsRequired;
         ++StartIdx) {
      bool BlockAvailable = true;
      // Check for already-allocated regs in this block
      for (unsigned BlockIdx = 0; BlockIdx < RegsRequired; ++BlockIdx) {
        if (isAllocated(Regs[StartIdx + BlockIdx])) {
          BlockAvailable = false;
          break;
        }
      }
      if (BlockAvailable) {
        // Mark the entire block as allocated
        for (unsigned BlockIdx = 0; BlockIdx < RegsRequired; ++BlockIdx) {
          MarkAllocated(Regs[StartIdx + BlockIdx]);
        }
        return Regs[StartIdx];
      }
    }
    // No block was available
    return 0;
  }

  /// Version of AllocateReg with list of registers to be shadowed.
  MCRegister AllocateReg(ArrayRef<MCPhysReg> Regs, const MCPhysReg *ShadowRegs) {
    unsigned FirstUnalloc = getFirstUnallocated(Regs);
    if (FirstUnalloc == Regs.size())
      return MCRegister();    // Didn't find the reg.

    // Mark the register and any aliases as allocated.
    MCRegister Reg = Regs[FirstUnalloc], ShadowReg = ShadowRegs[FirstUnalloc];
    MarkAllocated(Reg);
    MarkAllocated(ShadowReg);
    return Reg;
  }

  /// AllocateStack - Allocate a chunk of stack space with the specified size
  /// and alignment.
  int64_t AllocateStack(unsigned Size, Align Alignment) {
    int64_t Offset;
    if (NegativeOffsets) {
      StackSize = alignTo(StackSize + Size, Alignment);
      Offset = -StackSize;
    } else {
      Offset = alignTo(StackSize, Alignment);
      StackSize = Offset + Size;
    }
    MaxStackArgAlign = std::max(Alignment, MaxStackArgAlign);
    ensureMaxAlignment(Alignment);
    return Offset;
  }

  void ensureMaxAlignment(Align Alignment);

  /// Version of AllocateStack with list of extra registers to be shadowed.
  /// Note that, unlike AllocateReg, this shadows ALL of the shadow registers.
  int64_t AllocateStack(unsigned Size, Align Alignment,
                        ArrayRef<MCPhysReg> ShadowRegs) {
    for (MCPhysReg Reg : ShadowRegs)
      MarkAllocated(Reg);
    return AllocateStack(Size, Alignment);
  }

  // HandleByVal - Allocate a stack slot large enough to pass an argument by
  // value. The size and alignment information of the argument is encoded in its
  // parameter attribute.
  void HandleByVal(unsigned ValNo, MVT ValVT, MVT LocVT,
                   CCValAssign::LocInfo LocInfo, int MinSize, Align MinAlign,
                   ISD::ArgFlagsTy ArgFlags);

  // Returns count of byval arguments that are to be stored (even partly)
  // in registers.
  unsigned getInRegsParamsCount() const { return ByValRegs.size(); }

  // Returns count of byval in-regs arguments processed.
  unsigned getInRegsParamsProcessed() const { return InRegsParamsProcessed; }

  // Get information about N-th byval parameter that is stored in registers.
  // Here "ByValParamIndex" is N.
  void getInRegsParamInfo(unsigned InRegsParamRecordIndex,
                          unsigned& BeginReg, unsigned& EndReg) const {
    assert(InRegsParamRecordIndex < ByValRegs.size() &&
           "Wrong ByVal parameter index");

    const ByValInfo& info = ByValRegs[InRegsParamRecordIndex];
    BeginReg = info.Begin;
    EndReg = info.End;
  }

  // Add information about parameter that is kept in registers.
  void addInRegsParamInfo(unsigned RegBegin, unsigned RegEnd) {
    ByValRegs.push_back(ByValInfo(RegBegin, RegEnd));
  }

  // Goes either to next byval parameter (excluding "waste" record), or
  // to the end of collection.
  // Returns false, if end is reached.
  bool nextInRegsParam() {
    unsigned e = ByValRegs.size();
    if (InRegsParamsProcessed < e)
      ++InRegsParamsProcessed;
    return InRegsParamsProcessed < e;
  }

  // Clear byval registers tracking info.
  void clearByValRegsInfo() {
    InRegsParamsProcessed = 0;
    ByValRegs.clear();
  }

  // Rewind byval registers tracking info.
  void rewindByValRegsInfo() {
    InRegsParamsProcessed = 0;
  }

  // Get list of pending assignments
  SmallVectorImpl<CCValAssign> &getPendingLocs() {
    return PendingLocs;
  }

  // Get a list of argflags for pending assignments.
  SmallVectorImpl<ISD::ArgFlagsTy> &getPendingArgFlags() {
    return PendingArgFlags;
  }

  /// Compute the remaining unused register parameters that would be used for
  /// the given value type. This is useful when varargs are passed in the
  /// registers that normal prototyped parameters would be passed in, or for
  /// implementing perfect forwarding.
  void getRemainingRegParmsForType(SmallVectorImpl<MCPhysReg> &Regs, MVT VT,
                                   CCAssignFn Fn);

  /// Compute the set of registers that need to be preserved and forwarded to
  /// any musttail calls.
  void analyzeMustTailForwardedRegisters(
      SmallVectorImpl<ForwardedRegister> &Forwards, ArrayRef<MVT> RegParmTypes,
      CCAssignFn Fn);

  /// Returns true if the results of the two calling conventions are compatible.
  /// This is usually part of the check for tailcall eligibility.
  static bool resultsCompatible(CallingConv::ID CalleeCC,
                                CallingConv::ID CallerCC, MachineFunction &MF,
                                LLVMContext &C,
                                const SmallVectorImpl<ISD::InputArg> &Ins,
                                CCAssignFn CalleeFn, CCAssignFn CallerFn);

  /// The function runs an additional analysis pass over function arguments.
  /// It will mark each argument with the attribute flag SecArgPass.
  /// After running, it will sort the locs list.
  template <class T>
  void AnalyzeArgumentsSecondPass(const SmallVectorImpl<T> &Args,
                                  CCAssignFn Fn) {
    unsigned NumFirstPassLocs = Locs.size();

    /// Creates similar argument list to \p Args in which each argument is
    /// marked using SecArgPass flag.
    SmallVector<T, 16> SecPassArg;
    // SmallVector<ISD::InputArg, 16> SecPassArg;
    for (auto Arg : Args) {
      Arg.Flags.setSecArgPass();
      SecPassArg.push_back(Arg);
    }

    // Run the second argument pass
    AnalyzeArguments(SecPassArg, Fn);

    // Sort the locations of the arguments according to their original position.
    SmallVector<CCValAssign, 16> TmpArgLocs;
    TmpArgLocs.swap(Locs);
    auto B = TmpArgLocs.begin(), E = TmpArgLocs.end();
    std::merge(B, B + NumFirstPassLocs, B + NumFirstPassLocs, E,
               std::back_inserter(Locs),
               [](const CCValAssign &A, const CCValAssign &B) -> bool {
                 return A.getValNo() < B.getValNo();
               });
  }

private:
  /// MarkAllocated - Mark a register and all of its aliases as allocated.
  void MarkAllocated(MCPhysReg Reg);

  void MarkUnallocated(MCPhysReg Reg);
};

} // end namespace llvm

#endif // LLVM_CODEGEN_CALLINGCONVLOWER_H
PKhwFZ4��ooCodeGen/MachineSizeOpts.hnu�[���//===- MachineSizeOpts.h - machine size optimization ------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains some shared machine IR code size optimization related
// code.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_MACHINESIZEOPTS_H
#define LLVM_CODEGEN_MACHINESIZEOPTS_H

#include "llvm/Transforms/Utils/SizeOpts.h"

namespace llvm {

class ProfileSummaryInfo;
class MachineBasicBlock;
class MachineBlockFrequencyInfo;
class MachineFunction;
class MBFIWrapper;

/// Returns true if machine function \p MF is suggested to be size-optimized
/// based on the profile.
bool shouldOptimizeForSize(const MachineFunction *MF, ProfileSummaryInfo *PSI,
                           const MachineBlockFrequencyInfo *BFI,
                           PGSOQueryType QueryType = PGSOQueryType::Other);
/// Returns true if machine basic block \p MBB is suggested to be size-optimized
/// based on the profile.
bool shouldOptimizeForSize(const MachineBasicBlock *MBB,
                           ProfileSummaryInfo *PSI,
                           const MachineBlockFrequencyInfo *MBFI,
                           PGSOQueryType QueryType = PGSOQueryType::Other);
/// Returns true if machine basic block \p MBB is suggested to be size-optimized
/// based on the profile.
bool shouldOptimizeForSize(const MachineBasicBlock *MBB,
                           ProfileSummaryInfo *PSI,
                           MBFIWrapper *MBFIWrapper,
                           PGSOQueryType QueryType = PGSOQueryType::Other);

} // end namespace llvm

#endif // LLVM_CODEGEN_MACHINESIZEOPTS_H
PKhwFZNq_�>+>+CodeGen/FunctionLoweringInfo.hnu�[���//===- FunctionLoweringInfo.h - Lower functions from LLVM IR ---*- C++ -*--===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This implements routines for translating functions from LLVM IR into
// Machine IR.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_FUNCTIONLOWERINGINFO_H
#define LLVM_CODEGEN_FUNCTIONLOWERINGINFO_H

#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/IndexedMap.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/ISDOpcodes.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/KnownBits.h"
#include <cassert>
#include <utility>
#include <vector>

namespace llvm {

class Argument;
class BasicBlock;
class BranchProbabilityInfo;
class DbgDeclareInst;
class Function;
class Instruction;
class MachineFunction;
class MachineInstr;
class MachineRegisterInfo;
class MVT;
class SelectionDAG;
class TargetLowering;

template <typename T> class GenericSSAContext;
using SSAContext = GenericSSAContext<Function>;
template <typename T> class GenericUniformityInfo;
using UniformityInfo = GenericUniformityInfo<SSAContext>;

//===--------------------------------------------------------------------===//
/// FunctionLoweringInfo - This contains information that is global to a
/// function that is used when lowering a region of the function.
///
class FunctionLoweringInfo {
public:
  const Function *Fn;
  MachineFunction *MF;
  const TargetLowering *TLI;
  MachineRegisterInfo *RegInfo;
  BranchProbabilityInfo *BPI;
  const UniformityInfo *UA;
  /// CanLowerReturn - true iff the function's return value can be lowered to
  /// registers.
  bool CanLowerReturn;

  /// True if part of the CSRs will be handled via explicit copies.
  bool SplitCSR;

  /// DemoteRegister - if CanLowerReturn is false, DemoteRegister is a vreg
  /// allocated to hold a pointer to the hidden sret parameter.
  Register DemoteRegister;

  /// MBBMap - A mapping from LLVM basic blocks to their machine code entry.
  DenseMap<const BasicBlock*, MachineBasicBlock *> MBBMap;

  /// ValueMap - Since we emit code for the function a basic block at a time,
  /// we must remember which virtual registers hold the values for
  /// cross-basic-block values.
  DenseMap<const Value *, Register> ValueMap;

  /// VirtReg2Value map is needed by the Divergence Analysis driven
  /// instruction selection. It is reverted ValueMap. It is computed
  /// in lazy style - on demand. It is used to get the Value corresponding
  /// to the live in virtual register and is called from the
  /// TargetLowerinInfo::isSDNodeSourceOfDivergence.
  DenseMap<Register, const Value*> VirtReg2Value;

  /// This method is called from TargetLowerinInfo::isSDNodeSourceOfDivergence
  /// to get the Value corresponding to the live-in virtual register.
  const Value *getValueFromVirtualReg(Register Vreg);

  /// Track virtual registers created for exception pointers.
  DenseMap<const Value *, Register> CatchPadExceptionPointers;

  /// Helper object to track which of three possible relocation mechanisms are
  /// used for a particular value being relocated over a statepoint.
  struct StatepointRelocationRecord {
    enum RelocType {
      // Value did not need to be relocated and can be used directly.
      NoRelocate,
      // Value was spilled to stack and needs filled at the gc.relocate.
      Spill,
      // Value was lowered to tied def and gc.relocate should be replaced with
      // copy from vreg.
      VReg,
      // Value was lowered to tied def and gc.relocate should be replaced with
      // SDValue kept in StatepointLoweringInfo structure. This valid for local
      // relocates only.
      SDValueNode,
    } type = NoRelocate;
    // Payload contains either frame index of the stack slot in which the value
    // was spilled, or virtual register which contains the re-definition.
    union payload_t {
      payload_t() : FI(-1) {}
      int FI;
      Register Reg;
    } payload;
  };

  /// Keep track of each value which was relocated and the strategy used to
  /// relocate that value.  This information is required when visiting
  /// gc.relocates which may appear in following blocks.
  using StatepointSpillMapTy =
    DenseMap<const Value *, StatepointRelocationRecord>;
  DenseMap<const Instruction *, StatepointSpillMapTy> StatepointRelocationMaps;

  /// StaticAllocaMap - Keep track of frame indices for fixed sized allocas in
  /// the entry block.  This allows the allocas to be efficiently referenced
  /// anywhere in the function.
  DenseMap<const AllocaInst*, int> StaticAllocaMap;

  /// ByValArgFrameIndexMap - Keep track of frame indices for byval arguments.
  DenseMap<const Argument*, int> ByValArgFrameIndexMap;

  /// ArgDbgValues - A list of DBG_VALUE instructions created during isel for
  /// function arguments that are inserted after scheduling is completed.
  SmallVector<MachineInstr*, 8> ArgDbgValues;

  /// Bitvector with a bit set if corresponding argument is described in
  /// ArgDbgValues. Using arg numbers according to Argument numbering.
  BitVector DescribedArgs;

  /// RegFixups - Registers which need to be replaced after isel is done.
  DenseMap<Register, Register> RegFixups;

  DenseSet<Register> RegsWithFixups;

  /// StatepointStackSlots - A list of temporary stack slots (frame indices)
  /// used to spill values at a statepoint.  We store them here to enable
  /// reuse of the same stack slots across different statepoints in different
  /// basic blocks.
  SmallVector<unsigned, 50> StatepointStackSlots;

  /// MBB - The current block.
  MachineBasicBlock *MBB;

  /// MBB - The current insert position inside the current block.
  MachineBasicBlock::iterator InsertPt;

  struct LiveOutInfo {
    unsigned NumSignBits : 31;
    unsigned IsValid : 1;
    KnownBits Known = 1;

    LiveOutInfo() : NumSignBits(0), IsValid(true) {}
  };

  /// Record the preferred extend type (ISD::SIGN_EXTEND or ISD::ZERO_EXTEND)
  /// for a value.
  DenseMap<const Value *, ISD::NodeType> PreferredExtendType;

  /// VisitedBBs - The set of basic blocks visited thus far by instruction
  /// selection.
  SmallPtrSet<const BasicBlock*, 4> VisitedBBs;

  /// PHINodesToUpdate - A list of phi instructions whose operand list will
  /// be updated after processing the current basic block.
  /// TODO: This isn't per-function state, it's per-basic-block state. But
  /// there's no other convenient place for it to live right now.
  std::vector<std::pair<MachineInstr*, unsigned> > PHINodesToUpdate;
  unsigned OrigNumPHINodesToUpdate;

  /// If the current MBB is a landing pad, the exception pointer and exception
  /// selector registers are copied into these virtual registers by
  /// SelectionDAGISel::PrepareEHLandingPad().
  unsigned ExceptionPointerVirtReg, ExceptionSelectorVirtReg;

  /// Collection of dbg.declare instructions handled after argument
  /// lowering and before ISel proper.
  SmallPtrSet<const DbgDeclareInst *, 8> PreprocessedDbgDeclares;

  /// set - Initialize this FunctionLoweringInfo with the given Function
  /// and its associated MachineFunction.
  ///
  void set(const Function &Fn, MachineFunction &MF, SelectionDAG *DAG);

  /// clear - Clear out all the function-specific state. This returns this
  /// FunctionLoweringInfo to an empty state, ready to be used for a
  /// different function.
  void clear();

  /// isExportedInst - Return true if the specified value is an instruction
  /// exported from its block.
  bool isExportedInst(const Value *V) const {
    return ValueMap.count(V);
  }

  Register CreateReg(MVT VT, bool isDivergent = false);

  Register CreateRegs(const Value *V);

  Register CreateRegs(Type *Ty, bool isDivergent = false);

  Register InitializeRegForValue(const Value *V) {
    // Tokens never live in vregs.
    if (V->getType()->isTokenTy())
      return 0;
    Register &R = ValueMap[V];
    assert(R == 0 && "Already initialized this value register!");
    assert(VirtReg2Value.empty());
    return R = CreateRegs(V);
  }

  /// GetLiveOutRegInfo - Gets LiveOutInfo for a register, returning NULL if the
  /// register is a PHI destination and the PHI's LiveOutInfo is not valid.
  const LiveOutInfo *GetLiveOutRegInfo(Register Reg) {
    if (!LiveOutRegInfo.inBounds(Reg))
      return nullptr;

    const LiveOutInfo *LOI = &LiveOutRegInfo[Reg];
    if (!LOI->IsValid)
      return nullptr;

    return LOI;
  }

  /// GetLiveOutRegInfo - Gets LiveOutInfo for a register, returning NULL if the
  /// register is a PHI destination and the PHI's LiveOutInfo is not valid. If
  /// the register's LiveOutInfo is for a smaller bit width, it is extended to
  /// the larger bit width by zero extension. The bit width must be no smaller
  /// than the LiveOutInfo's existing bit width.
  const LiveOutInfo *GetLiveOutRegInfo(Register Reg, unsigned BitWidth);

  /// AddLiveOutRegInfo - Adds LiveOutInfo for a register.
  void AddLiveOutRegInfo(Register Reg, unsigned NumSignBits,
                         const KnownBits &Known) {
    // Only install this information if it tells us something.
    if (NumSignBits == 1 && Known.isUnknown())
      return;

    LiveOutRegInfo.grow(Reg);
    LiveOutInfo &LOI = LiveOutRegInfo[Reg];
    LOI.NumSignBits = NumSignBits;
    LOI.Known.One = Known.One;
    LOI.Known.Zero = Known.Zero;
  }

  /// ComputePHILiveOutRegInfo - Compute LiveOutInfo for a PHI's destination
  /// register based on the LiveOutInfo of its operands.
  void ComputePHILiveOutRegInfo(const PHINode*);

  /// InvalidatePHILiveOutRegInfo - Invalidates a PHI's LiveOutInfo, to be
  /// called when a block is visited before all of its predecessors.
  void InvalidatePHILiveOutRegInfo(const PHINode *PN) {
    // PHIs with no uses have no ValueMap entry.
    DenseMap<const Value*, Register>::const_iterator It = ValueMap.find(PN);
    if (It == ValueMap.end())
      return;

    Register Reg = It->second;
    if (Reg == 0)
      return;

    LiveOutRegInfo.grow(Reg);
    LiveOutRegInfo[Reg].IsValid = false;
  }

  /// setArgumentFrameIndex - Record frame index for the byval
  /// argument.
  void setArgumentFrameIndex(const Argument *A, int FI);

  /// getArgumentFrameIndex - Get frame index for the byval argument.
  int getArgumentFrameIndex(const Argument *A);

  Register getCatchPadExceptionPointerVReg(const Value *CPI,
                                           const TargetRegisterClass *RC);

private:
  /// LiveOutRegInfo - Information about live out vregs.
  IndexedMap<LiveOutInfo, VirtReg2IndexFunctor> LiveOutRegInfo;
};

} // end namespace llvm

#endif // LLVM_CODEGEN_FUNCTIONLOWERINGINFO_H
PKhwFZ.ccCodeGen/LatencyPriorityQueue.hnu�[���//===---- LatencyPriorityQueue.h - A latency-oriented priority queue ------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the LatencyPriorityQueue class, which is a
// SchedulingPriorityQueue that schedules using latency information to
// reduce the length of the critical path through the basic block.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_LATENCYPRIORITYQUEUE_H
#define LLVM_CODEGEN_LATENCYPRIORITYQUEUE_H

#include "llvm/CodeGen/ScheduleDAG.h"
#include "llvm/Config/llvm-config.h"

namespace llvm {
  class LatencyPriorityQueue;

  /// Sorting functions for the Available queue.
  struct latency_sort {
    LatencyPriorityQueue *PQ;
    explicit latency_sort(LatencyPriorityQueue *pq) : PQ(pq) {}

    bool operator()(const SUnit* LHS, const SUnit* RHS) const;
  };

  class LatencyPriorityQueue : public SchedulingPriorityQueue {
    // SUnits - The SUnits for the current graph.
    std::vector<SUnit> *SUnits = nullptr;

    /// NumNodesSolelyBlocking - This vector contains, for every node in the
    /// Queue, the number of nodes that the node is the sole unscheduled
    /// predecessor for.  This is used as a tie-breaker heuristic for better
    /// mobility.
    std::vector<unsigned> NumNodesSolelyBlocking;

    /// Queue - The queue.
    std::vector<SUnit*> Queue;
    latency_sort Picker;

  public:
    LatencyPriorityQueue() : Picker(this) {
    }

    bool isBottomUp() const override { return false; }

    void initNodes(std::vector<SUnit> &sunits) override {
      SUnits = &sunits;
      NumNodesSolelyBlocking.resize(SUnits->size(), 0);
    }

    void addNode(const SUnit *SU) override {
      NumNodesSolelyBlocking.resize(SUnits->size(), 0);
    }

    void updateNode(const SUnit *SU) override {
    }

    void releaseState() override {
      SUnits = nullptr;
    }

    unsigned getLatency(unsigned NodeNum) const {
      assert(NodeNum < (*SUnits).size());
      return (*SUnits)[NodeNum].getHeight();
    }

    unsigned getNumSolelyBlockNodes(unsigned NodeNum) const {
      assert(NodeNum < NumNodesSolelyBlocking.size());
      return NumNodesSolelyBlocking[NodeNum];
    }

    bool empty() const override { return Queue.empty(); }

    void push(SUnit *U) override;

    SUnit *pop() override;

    void remove(SUnit *SU) override;

#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
    LLVM_DUMP_METHOD void dump(ScheduleDAG *DAG) const override;
#endif

    // scheduledNode - As nodes are scheduled, we look to see if there are any
    // successor nodes that have a single unscheduled predecessor.  If so, that
    // single predecessor has a higher priority, since scheduling it will make
    // the node available.
    void scheduledNode(SUnit *SU) override;

private:
    void AdjustPriorityOfUnscheduledPreds(SUnit *SU);
    SUnit *getSingleUnscheduledPred(SUnit *SU);
  };
}

#endif
PKhwFZsJJ"��CodeGen/TargetOpcodes.hnu�[���//===-- llvm/CodeGen/TargetOpcodes.h - Target Indep Opcodes -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the target independent instruction opcodes.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_TARGETOPCODES_H
#define LLVM_CODEGEN_TARGETOPCODES_H

namespace llvm {

/// Invariant opcodes: All instruction sets have these as their low opcodes.
///
namespace TargetOpcode {
enum {
#define HANDLE_TARGET_OPCODE(OPC) OPC,
#define HANDLE_TARGET_OPCODE_MARKER(IDENT, OPC) IDENT = OPC,
#include "llvm/Support/TargetOpcodes.def"
};
} // end namespace TargetOpcode

/// Check whether the given Opcode is a generic opcode that is not supposed
/// to appear after ISel.
inline bool isPreISelGenericOpcode(unsigned Opcode) {
  return Opcode >= TargetOpcode::PRE_ISEL_GENERIC_OPCODE_START &&
         Opcode <= TargetOpcode::PRE_ISEL_GENERIC_OPCODE_END;
}

/// Check whether the given Opcode is a target-specific opcode.
inline bool isTargetSpecificOpcode(unsigned Opcode) {
  return Opcode > TargetOpcode::PRE_ISEL_GENERIC_OPCODE_END;
}

/// \returns true if \p Opcode is an optimization hint opcode which is not
/// supposed to appear after ISel.
inline bool isPreISelGenericOptimizationHint(unsigned Opcode) {
  return Opcode >= TargetOpcode::PRE_ISEL_GENERIC_OPTIMIZATION_HINT_START &&
         Opcode <= TargetOpcode::PRE_ISEL_GENERIC_OPTIMIZATION_HINT_END;
}

} // end namespace llvm

#endif
PKhwFZ�����CodeGen/GCMetadata.hnu�[���//===- GCMetadata.h - Garbage collector metadata ----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the GCFunctionInfo and GCModuleInfo classes, which are
// used as a communication channel from the target code generator to the target
// garbage collectors. This interface allows code generators and garbage
// collectors to be developed independently.
//
// The GCFunctionInfo class logs the data necessary to build a type accurate
// stack map. The code generator outputs:
//
//   - Safe points as specified by the GCStrategy's NeededSafePoints.
//   - Stack offsets for GC roots, as specified by calls to llvm.gcroot
//
// As a refinement, liveness analysis calculates the set of live roots at each
// safe point. Liveness analysis is not presently performed by the code
// generator, so all roots are assumed live.
//
// GCModuleInfo simply collects GCFunctionInfo instances for each Function as
// they are compiled. This accretion is necessary for collectors which must emit
// a stack map for the compilation unit as a whole. Therefore, GCFunctionInfo
// outlives the MachineFunction from which it is derived and must not refer to
// any code generator data structures.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_GCMETADATA_H
#define LLVM_CODEGEN_GCMETADATA_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/GCStrategy.h"
#include "llvm/Pass.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <vector>

namespace llvm {

class Constant;
class Function;
class MCSymbol;

/// GCPoint - Metadata for a collector-safe point in machine code.
///
struct GCPoint {
  MCSymbol *Label;    ///< A label.
  DebugLoc Loc;

  GCPoint(MCSymbol *L, DebugLoc DL)
      : Label(L), Loc(std::move(DL)) {}
};

/// GCRoot - Metadata for a pointer to an object managed by the garbage
/// collector.
struct GCRoot {
  int Num;                  ///< Usually a frame index.
  int StackOffset = -1;     ///< Offset from the stack pointer.
  const Constant *Metadata; ///< Metadata straight from the call
                            ///< to llvm.gcroot.

  GCRoot(int N, const Constant *MD) : Num(N), Metadata(MD) {}
};

/// Garbage collection metadata for a single function.  Currently, this
/// information only applies to GCStrategies which use GCRoot.
class GCFunctionInfo {
public:
  using iterator = std::vector<GCPoint>::iterator;
  using roots_iterator = std::vector<GCRoot>::iterator;
  using live_iterator = std::vector<GCRoot>::const_iterator;

private:
  const Function &F;
  GCStrategy &S;
  uint64_t FrameSize;
  std::vector<GCRoot> Roots;
  std::vector<GCPoint> SafePoints;

  // FIXME: Liveness. A 2D BitVector, perhaps?
  //
  //   BitVector Liveness;
  //
  //   bool islive(int point, int root) =
  //     Liveness[point * SafePoints.size() + root]
  //
  // The bit vector is the more compact representation where >3.2% of roots
  // are live per safe point (1.5% on 64-bit hosts).

public:
  GCFunctionInfo(const Function &F, GCStrategy &S);
  ~GCFunctionInfo();

  /// getFunction - Return the function to which this metadata applies.
  const Function &getFunction() const { return F; }

  /// getStrategy - Return the GC strategy for the function.
  GCStrategy &getStrategy() { return S; }

  /// addStackRoot - Registers a root that lives on the stack. Num is the
  ///                stack object ID for the alloca (if the code generator is
  //                 using  MachineFrameInfo).
  void addStackRoot(int Num, const Constant *Metadata) {
    Roots.push_back(GCRoot(Num, Metadata));
  }

  /// removeStackRoot - Removes a root.
  roots_iterator removeStackRoot(roots_iterator position) {
    return Roots.erase(position);
  }

  /// addSafePoint - Notes the existence of a safe point. Num is the ID of the
  /// label just prior to the safe point (if the code generator is using
  /// MachineModuleInfo).
  void addSafePoint(MCSymbol *Label, const DebugLoc &DL) {
    SafePoints.emplace_back(Label, DL);
  }

  /// getFrameSize/setFrameSize - Records the function's frame size.
  uint64_t getFrameSize() const { return FrameSize; }
  void setFrameSize(uint64_t S) { FrameSize = S; }

  /// begin/end - Iterators for safe points.
  iterator begin() { return SafePoints.begin(); }
  iterator end() { return SafePoints.end(); }
  size_t size() const { return SafePoints.size(); }

  /// roots_begin/roots_end - Iterators for all roots in the function.
  roots_iterator roots_begin() { return Roots.begin(); }
  roots_iterator roots_end() { return Roots.end(); }
  size_t roots_size() const { return Roots.size(); }

  /// live_begin/live_end - Iterators for live roots at a given safe point.
  live_iterator live_begin(const iterator &p) { return roots_begin(); }
  live_iterator live_end(const iterator &p) { return roots_end(); }
  size_t live_size(const iterator &p) const { return roots_size(); }
};

/// An analysis pass which caches information about the entire Module.
/// Records both the function level information used by GCRoots and a
/// cache of the 'active' gc strategy objects for the current Module.
class GCModuleInfo : public ImmutablePass {
  /// An owning list of all GCStrategies which have been created
  SmallVector<std::unique_ptr<GCStrategy>, 1> GCStrategyList;
  /// A helper map to speedup lookups into the above list
  StringMap<GCStrategy*> GCStrategyMap;

public:
  /// Lookup the GCStrategy object associated with the given gc name.
  /// Objects are owned internally; No caller should attempt to delete the
  /// returned objects.
  GCStrategy *getGCStrategy(const StringRef Name);

  /// List of per function info objects.  In theory, Each of these
  /// may be associated with a different GC.
  using FuncInfoVec = std::vector<std::unique_ptr<GCFunctionInfo>>;

  FuncInfoVec::iterator funcinfo_begin() { return Functions.begin(); }
  FuncInfoVec::iterator funcinfo_end() { return Functions.end(); }

private:
  /// Owning list of all GCFunctionInfos associated with this Module
  FuncInfoVec Functions;

  /// Non-owning map to bypass linear search when finding the GCFunctionInfo
  /// associated with a particular Function.
  using finfo_map_type = DenseMap<const Function *, GCFunctionInfo *>;
  finfo_map_type FInfoMap;

public:
  using iterator = SmallVector<std::unique_ptr<GCStrategy>, 1>::const_iterator;

  static char ID;

  GCModuleInfo();

  /// clear - Resets the pass. Any pass, which uses GCModuleInfo, should
  /// call it in doFinalization().
  ///
  void clear();

  /// begin/end - Iterators for used strategies.
  ///
  iterator begin() const { return GCStrategyList.begin(); }
  iterator end() const { return GCStrategyList.end(); }

  /// get - Look up function metadata.  This is currently assumed
  /// have the side effect of initializing the associated GCStrategy.  That
  /// will soon change.
  GCFunctionInfo &getFunctionInfo(const Function &F);
};

} // end namespace llvm

#endif // LLVM_CODEGEN_GCMETADATA_H
PKhwFZ(�c7&&CodeGen/MachineSSAUpdater.hnu�[���//===- MachineSSAUpdater.h - Unstructured SSA Update Tool -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the MachineSSAUpdater class.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_MACHINESSAUPDATER_H
#define LLVM_CODEGEN_MACHINESSAUPDATER_H

#include "llvm/CodeGen/Register.h"

namespace llvm {

class MachineBasicBlock;
class MachineFunction;
class MachineInstr;
class MachineOperand;
class MachineRegisterInfo;
class TargetInstrInfo;
class TargetRegisterClass;
template<typename T> class SmallVectorImpl;
template<typename T> class SSAUpdaterTraits;

/// MachineSSAUpdater - This class updates SSA form for a set of virtual
/// registers defined in multiple blocks.  This is used when code duplication
/// or another unstructured transformation wants to rewrite a set of uses of one
/// vreg with uses of a set of vregs.
class MachineSSAUpdater {
  friend class SSAUpdaterTraits<MachineSSAUpdater>;

private:
  /// AvailableVals - This keeps track of which value to use on a per-block
  /// basis.  When we insert PHI nodes, we keep track of them here.
  //typedef DenseMap<MachineBasicBlock*, Register> AvailableValsTy;
  void *AV = nullptr;

  /// VRC - Register class of the current virtual register.
  const TargetRegisterClass *VRC = nullptr;

  /// InsertedPHIs - If this is non-null, the MachineSSAUpdater adds all PHI
  /// nodes that it creates to the vector.
  SmallVectorImpl<MachineInstr*> *InsertedPHIs;

  const TargetInstrInfo *TII = nullptr;
  MachineRegisterInfo *MRI = nullptr;

public:
  /// MachineSSAUpdater constructor.  If InsertedPHIs is specified, it will be
  /// filled in with all PHI Nodes created by rewriting.
  explicit MachineSSAUpdater(MachineFunction &MF,
                        SmallVectorImpl<MachineInstr*> *NewPHI = nullptr);
  MachineSSAUpdater(const MachineSSAUpdater &) = delete;
  MachineSSAUpdater &operator=(const MachineSSAUpdater &) = delete;
  ~MachineSSAUpdater();

  /// Initialize - Reset this object to get ready for a new set of SSA
  /// updates.
  void Initialize(Register V);
  void Initialize(const TargetRegisterClass *RC);

  /// AddAvailableValue - Indicate that a rewritten value is available at the
  /// end of the specified block with the specified value.
  void AddAvailableValue(MachineBasicBlock *BB, Register V);

  /// HasValueForBlock - Return true if the MachineSSAUpdater already has a
  /// value for the specified block.
  bool HasValueForBlock(MachineBasicBlock *BB) const;

  /// GetValueAtEndOfBlock - Construct SSA form, materializing a value that is
  /// live at the end of the specified block.
  Register GetValueAtEndOfBlock(MachineBasicBlock *BB);

  /// GetValueInMiddleOfBlock - Construct SSA form, materializing a value that
  /// is live in the middle of the specified block. If ExistingValueOnly is
  /// true then this will only return an existing value or $noreg; otherwise new
  /// instructions may be inserted to materialize a value.
  ///
  /// GetValueInMiddleOfBlock is the same as GetValueAtEndOfBlock except in one
  /// important case: if there is a definition of the rewritten value after the
  /// 'use' in BB.  Consider code like this:
  ///
  ///      X1 = ...
  ///   SomeBB:
  ///      use(X)
  ///      X2 = ...
  ///      br Cond, SomeBB, OutBB
  ///
  /// In this case, there are two values (X1 and X2) added to the AvailableVals
  /// set by the client of the rewriter, and those values are both live out of
  /// their respective blocks.  However, the use of X happens in the *middle* of
  /// a block.  Because of this, we need to insert a new PHI node in SomeBB to
  /// merge the appropriate values, and this value isn't live out of the block.
  Register GetValueInMiddleOfBlock(MachineBasicBlock *BB,
                                   bool ExistingValueOnly = false);

  /// RewriteUse - Rewrite a use of the symbolic value.  This handles PHI nodes,
  /// which use their value in the corresponding predecessor.  Note that this
  /// will not work if the use is supposed to be rewritten to a value defined in
  /// the same block as the use, but above it.  Any 'AddAvailableValue's added
  /// for the use's block will be considered to be below it.
  void RewriteUse(MachineOperand &U);

private:
  // If ExistingValueOnly is true, will not create any new instructions. Used
  // for debug values, which cannot modify Codegen.
  Register GetValueAtEndOfBlockInternal(MachineBasicBlock *BB,
                                        bool ExistingValueOnly = false);
};

} // end namespace llvm

#endif // LLVM_CODEGEN_MACHINESSAUPDATER_H
PKhwFZw��5
5
CodeGen/StackProtector.hnu�[���//===- StackProtector.h - Stack Protector Insertion -------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This pass inserts stack protectors into functions which need them. A variable
// with a random value in it is stored onto the stack before the local variables
// are allocated. Upon exiting the block, the stored value is checked. If it's
// changed, then there was some sort of violation and the program aborts.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_STACKPROTECTOR_H
#define LLVM_CODEGEN_STACKPROTECTOR_H

#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Analysis/DomTreeUpdater.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/IR/Instructions.h"
#include "llvm/Pass.h"
#include "llvm/TargetParser/Triple.h"

namespace llvm {

class BasicBlock;
class Function;
class Module;
class TargetLoweringBase;
class TargetMachine;

class StackProtector : public FunctionPass {
private:
  static constexpr unsigned DefaultSSPBufferSize = 8;

  /// A mapping of AllocaInsts to their required SSP layout.
  using SSPLayoutMap = DenseMap<const AllocaInst *,
                                MachineFrameInfo::SSPLayoutKind>;

  const TargetMachine *TM = nullptr;

  /// TLI - Keep a pointer of a TargetLowering to consult for determining
  /// target type sizes.
  const TargetLoweringBase *TLI = nullptr;
  Triple Trip;

  Function *F = nullptr;
  Module *M = nullptr;

  std::optional<DomTreeUpdater> DTU;

  /// Layout - Mapping of allocations to the required SSPLayoutKind.
  /// StackProtector analysis will update this map when determining if an
  /// AllocaInst triggers a stack protector.
  SSPLayoutMap Layout;

  /// The minimum size of buffers that will receive stack smashing
  /// protection when -fstack-protection is used.
  unsigned SSPBufferSize = DefaultSSPBufferSize;

  // A prologue is generated.
  bool HasPrologue = false;

  // IR checking code is generated.
  bool HasIRCheck = false;

  /// InsertStackProtectors - Insert code into the prologue and epilogue of
  /// the function.
  ///
  ///  - The prologue code loads and stores the stack guard onto the stack.
  ///  - The epilogue checks the value stored in the prologue against the
  ///    original value. It calls __stack_chk_fail if they differ.
  bool InsertStackProtectors();

  /// CreateFailBB - Create a basic block to jump to when the stack protector
  /// check fails.
  BasicBlock *CreateFailBB();

public:
  static char ID; // Pass identification, replacement for typeid.

  StackProtector();

  void getAnalysisUsage(AnalysisUsage &AU) const override;

  // Return true if StackProtector is supposed to be handled by SelectionDAG.
  bool shouldEmitSDCheck(const BasicBlock &BB) const;

  bool runOnFunction(Function &Fn) override;

  void copyToMachineFrameInfo(MachineFrameInfo &MFI) const;

  /// Check whether or not \p F needs a stack protector based upon the stack
  /// protector level.
  static bool requiresStackProtector(Function *F, SSPLayoutMap *Layout = nullptr);

};

} // end namespace llvm

#endif // LLVM_CODEGEN_STACKPROTECTOR_H
PKhwFZ���#CodeGen/ComplexDeinterleavingPass.hnu�[���//===- ComplexDeinterleavingPass.h - Complex Deinterleaving Pass *- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This pass implements generation of target-specific intrinsics to support
// handling of complex number arithmetic and deinterleaving.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_COMPLEXDEINTERLEAVING_H
#define LLVM_CODEGEN_COMPLEXDEINTERLEAVING_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class Function;
class TargetMachine;

struct ComplexDeinterleavingPass
    : public PassInfoMixin<ComplexDeinterleavingPass> {
private:
  TargetMachine *TM;

public:
  ComplexDeinterleavingPass(TargetMachine *TM) : TM(TM) {}

  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

enum class ComplexDeinterleavingOperation {
  CAdd,
  CMulPartial,
  // The following 'operations' are used to represent internal states. Backends
  // are not expected to try and support these in any capacity.
  Deinterleave,
  Splat,
  Symmetric,
  ReductionPHI,
  ReductionOperation,
  ReductionSelect,
};

enum class ComplexDeinterleavingRotation {
  Rotation_0 = 0,
  Rotation_90 = 1,
  Rotation_180 = 2,
  Rotation_270 = 3,
};

} // namespace llvm

#endif // LLVM_CODEGEN_COMPLEXDEINTERLEAVING_H
PKhwFZ��5�<�<CodeGen/ScheduleDAGInstrs.hnu�[���//===- ScheduleDAGInstrs.h - MachineInstr Scheduling ------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file Implements the ScheduleDAGInstrs class, which implements scheduling
/// for a MachineInstr-based dependency graph.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_SCHEDULEDAGINSTRS_H
#define LLVM_CODEGEN_SCHEDULEDAGINSTRS_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/SparseMultiSet.h"
#include "llvm/ADT/SparseSet.h"
#include "llvm/ADT/identity.h"
#include "llvm/CodeGen/LivePhysRegs.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/ScheduleDAG.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
#include "llvm/CodeGen/TargetSchedule.h"
#include "llvm/MC/LaneBitmask.h"
#include <cassert>
#include <cstdint>
#include <list>
#include <string>
#include <utility>
#include <vector>

namespace llvm {

  class AAResults;
  class LiveIntervals;
  class MachineFrameInfo;
  class MachineFunction;
  class MachineInstr;
  class MachineLoopInfo;
  class MachineOperand;
  struct MCSchedClassDesc;
  class PressureDiffs;
  class PseudoSourceValue;
  class RegPressureTracker;
  class UndefValue;
  class Value;

  /// An individual mapping from virtual register number to SUnit.
  struct VReg2SUnit {
    unsigned VirtReg;
    LaneBitmask LaneMask;
    SUnit *SU;

    VReg2SUnit(unsigned VReg, LaneBitmask LaneMask, SUnit *SU)
      : VirtReg(VReg), LaneMask(LaneMask), SU(SU) {}

    unsigned getSparseSetIndex() const {
      return Register::virtReg2Index(VirtReg);
    }
  };

  /// Mapping from virtual register to SUnit including an operand index.
  struct VReg2SUnitOperIdx : public VReg2SUnit {
    unsigned OperandIndex;

    VReg2SUnitOperIdx(unsigned VReg, LaneBitmask LaneMask,
                      unsigned OperandIndex, SUnit *SU)
      : VReg2SUnit(VReg, LaneMask, SU), OperandIndex(OperandIndex) {}
  };

  /// Record a physical register access.
  /// For non-data-dependent uses, OpIdx == -1.
  struct PhysRegSUOper {
    SUnit *SU;
    int OpIdx;
    unsigned Reg;

    PhysRegSUOper(SUnit *su, int op, unsigned R): SU(su), OpIdx(op), Reg(R) {}

    unsigned getSparseSetIndex() const { return Reg; }
  };

  /// Use a SparseMultiSet to track physical registers. Storage is only
  /// allocated once for the pass. It can be cleared in constant time and reused
  /// without any frees.
  using Reg2SUnitsMap =
      SparseMultiSet<PhysRegSUOper, identity<unsigned>, uint16_t>;

  /// Use SparseSet as a SparseMap by relying on the fact that it never
  /// compares ValueT's, only unsigned keys. This allows the set to be cleared
  /// between scheduling regions in constant time as long as ValueT does not
  /// require a destructor.
  using VReg2SUnitMap = SparseSet<VReg2SUnit, VirtReg2IndexFunctor>;

  /// Track local uses of virtual registers. These uses are gathered by the DAG
  /// builder and may be consulted by the scheduler to avoid iterating an entire
  /// vreg use list.
  using VReg2SUnitMultiMap = SparseMultiSet<VReg2SUnit, VirtReg2IndexFunctor>;

  using VReg2SUnitOperIdxMultiMap =
      SparseMultiSet<VReg2SUnitOperIdx, VirtReg2IndexFunctor>;

  using ValueType = PointerUnion<const Value *, const PseudoSourceValue *>;

  struct UnderlyingObject : PointerIntPair<ValueType, 1, bool> {
    UnderlyingObject(ValueType V, bool MayAlias)
        : PointerIntPair<ValueType, 1, bool>(V, MayAlias) {}

    ValueType getValue() const { return getPointer(); }
    bool mayAlias() const { return getInt(); }
  };

  using UnderlyingObjectsVector = SmallVector<UnderlyingObject, 4>;

  /// A ScheduleDAG for scheduling lists of MachineInstr.
  class ScheduleDAGInstrs : public ScheduleDAG {
  protected:
    const MachineLoopInfo *MLI = nullptr;
    const MachineFrameInfo &MFI;

    /// TargetSchedModel provides an interface to the machine model.
    TargetSchedModel SchedModel;

    /// True if the DAG builder should remove kill flags (in preparation for
    /// rescheduling).
    bool RemoveKillFlags;

    /// The standard DAG builder does not normally include terminators as DAG
    /// nodes because it does not create the necessary dependencies to prevent
    /// reordering. A specialized scheduler can override
    /// TargetInstrInfo::isSchedulingBoundary then enable this flag to indicate
    /// it has taken responsibility for scheduling the terminator correctly.
    bool CanHandleTerminators = false;

    /// Whether lane masks should get tracked.
    bool TrackLaneMasks = false;

    // State specific to the current scheduling region.
    // ------------------------------------------------

    /// The block in which to insert instructions
    MachineBasicBlock *BB = nullptr;

    /// The beginning of the range to be scheduled.
    MachineBasicBlock::iterator RegionBegin;

    /// The end of the range to be scheduled.
    MachineBasicBlock::iterator RegionEnd;

    /// Instructions in this region (distance(RegionBegin, RegionEnd)).
    unsigned NumRegionInstrs = 0;

    /// After calling BuildSchedGraph, each machine instruction in the current
    /// scheduling region is mapped to an SUnit.
    DenseMap<MachineInstr*, SUnit*> MISUnitMap;

    // State internal to DAG building.
    // -------------------------------

    /// Defs, Uses - Remember where defs and uses of each register are as we
    /// iterate upward through the instructions. This is allocated here instead
    /// of inside BuildSchedGraph to avoid the need for it to be initialized and
    /// destructed for each block.
    Reg2SUnitsMap Defs;
    Reg2SUnitsMap Uses;

    /// Tracks the last instruction(s) in this region defining each virtual
    /// register. There may be multiple current definitions for a register with
    /// disjunct lanemasks.
    VReg2SUnitMultiMap CurrentVRegDefs;
    /// Tracks the last instructions in this region using each virtual register.
    VReg2SUnitOperIdxMultiMap CurrentVRegUses;

    AAResults *AAForDep = nullptr;

    /// Remember a generic side-effecting instruction as we proceed.
    /// No other SU ever gets scheduled around it (except in the special
    /// case of a huge region that gets reduced).
    SUnit *BarrierChain = nullptr;

  public:
    /// A list of SUnits, used in Value2SUsMap, during DAG construction.
    /// Note: to gain speed it might be worth investigating an optimized
    /// implementation of this data structure, such as a singly linked list
    /// with a memory pool (SmallVector was tried but slow and SparseSet is not
    /// applicable).
    using SUList = std::list<SUnit *>;

  protected:
    /// A map from ValueType to SUList, used during DAG construction, as
    /// a means of remembering which SUs depend on which memory locations.
    class Value2SUsMap;

    /// Reduces maps in FIFO order, by N SUs. This is better than turning
    /// every Nth memory SU into BarrierChain in buildSchedGraph(), since
    /// it avoids unnecessary edges between seen SUs above the new BarrierChain,
    /// and those below it.
    void reduceHugeMemNodeMaps(Value2SUsMap &stores,
                               Value2SUsMap &loads, unsigned N);

    /// Adds a chain edge between SUa and SUb, but only if both
    /// AAResults and Target fail to deny the dependency.
    void addChainDependency(SUnit *SUa, SUnit *SUb,
                            unsigned Latency = 0);

    /// Adds dependencies as needed from all SUs in list to SU.
    void addChainDependencies(SUnit *SU, SUList &SUs, unsigned Latency) {
      for (SUnit *Entry : SUs)
        addChainDependency(SU, Entry, Latency);
    }

    /// Adds dependencies as needed from all SUs in map, to SU.
    void addChainDependencies(SUnit *SU, Value2SUsMap &Val2SUsMap);

    /// Adds dependencies as needed to SU, from all SUs mapped to V.
    void addChainDependencies(SUnit *SU, Value2SUsMap &Val2SUsMap,
                              ValueType V);

    /// Adds barrier chain edges from all SUs in map, and then clear the map.
    /// This is equivalent to insertBarrierChain(), but optimized for the common
    /// case where the new BarrierChain (a global memory object) has a higher
    /// NodeNum than all SUs in map. It is assumed BarrierChain has been set
    /// before calling this.
    void addBarrierChain(Value2SUsMap &map);

    /// Inserts a barrier chain in a huge region, far below current SU.
    /// Adds barrier chain edges from all SUs in map with higher NodeNums than
    /// this new BarrierChain, and remove them from map. It is assumed
    /// BarrierChain has been set before calling this.
    void insertBarrierChain(Value2SUsMap &map);

    /// For an unanalyzable memory access, this Value is used in maps.
    UndefValue *UnknownValue;


    /// Topo - A topological ordering for SUnits which permits fast IsReachable
    /// and similar queries.
    ScheduleDAGTopologicalSort Topo;

    using DbgValueVector =
        std::vector<std::pair<MachineInstr *, MachineInstr *>>;
    /// Remember instruction that precedes DBG_VALUE.
    /// These are generated by buildSchedGraph but persist so they can be
    /// referenced when emitting the final schedule.
    DbgValueVector DbgValues;
    MachineInstr *FirstDbgValue = nullptr;

    /// Set of live physical registers for updating kill flags.
    LivePhysRegs LiveRegs;

  public:
    explicit ScheduleDAGInstrs(MachineFunction &mf,
                               const MachineLoopInfo *mli,
                               bool RemoveKillFlags = false);

    ~ScheduleDAGInstrs() override = default;

    /// Gets the machine model for instruction scheduling.
    const TargetSchedModel *getSchedModel() const { return &SchedModel; }

    /// Resolves and cache a resolved scheduling class for an SUnit.
    const MCSchedClassDesc *getSchedClass(SUnit *SU) const {
      if (!SU->SchedClass && SchedModel.hasInstrSchedModel())
        SU->SchedClass = SchedModel.resolveSchedClass(SU->getInstr());
      return SU->SchedClass;
    }

    /// IsReachable - Checks if SU is reachable from TargetSU.
    bool IsReachable(SUnit *SU, SUnit *TargetSU) {
      return Topo.IsReachable(SU, TargetSU);
    }

    /// Returns an iterator to the top of the current scheduling region.
    MachineBasicBlock::iterator begin() const { return RegionBegin; }

    /// Returns an iterator to the bottom of the current scheduling region.
    MachineBasicBlock::iterator end() const { return RegionEnd; }

    /// Creates a new SUnit and return a ptr to it.
    SUnit *newSUnit(MachineInstr *MI);

    /// Returns an existing SUnit for this MI, or nullptr.
    SUnit *getSUnit(MachineInstr *MI) const;

    /// If this method returns true, handling of the scheduling regions
    /// themselves (in case of a scheduling boundary in MBB) will be done
    /// beginning with the topmost region of MBB.
    virtual bool doMBBSchedRegionsTopDown() const { return false; }

    /// Prepares to perform scheduling in the given block.
    virtual void startBlock(MachineBasicBlock *BB);

    /// Cleans up after scheduling in the given block.
    virtual void finishBlock();

    /// Initialize the DAG and common scheduler state for a new
    /// scheduling region. This does not actually create the DAG, only clears
    /// it. The scheduling driver may call BuildSchedGraph multiple times per
    /// scheduling region.
    virtual void enterRegion(MachineBasicBlock *bb,
                             MachineBasicBlock::iterator begin,
                             MachineBasicBlock::iterator end,
                             unsigned regioninstrs);

    /// Called when the scheduler has finished scheduling the current region.
    virtual void exitRegion();

    /// Builds SUnits for the current region.
    /// If \p RPTracker is non-null, compute register pressure as a side effect.
    /// The DAG builder is an efficient place to do it because it already visits
    /// operands.
    void buildSchedGraph(AAResults *AA,
                         RegPressureTracker *RPTracker = nullptr,
                         PressureDiffs *PDiffs = nullptr,
                         LiveIntervals *LIS = nullptr,
                         bool TrackLaneMasks = false);

    /// Adds dependencies from instructions in the current list of
    /// instructions being scheduled to scheduling barrier. We want to make sure
    /// instructions which define registers that are either used by the
    /// terminator or are live-out are properly scheduled. This is especially
    /// important when the definition latency of the return value(s) are too
    /// high to be hidden by the branch or when the liveout registers used by
    /// instructions in the fallthrough block.
    void addSchedBarrierDeps();

    /// Orders nodes according to selected style.
    ///
    /// Typically, a scheduling algorithm will implement schedule() without
    /// overriding enterRegion() or exitRegion().
    virtual void schedule() = 0;

    /// Allow targets to perform final scheduling actions at the level of the
    /// whole MachineFunction. By default does nothing.
    virtual void finalizeSchedule() {}

    void dumpNode(const SUnit &SU) const override;
    void dump() const override;

    /// Returns a label for a DAG node that points to an instruction.
    std::string getGraphNodeLabel(const SUnit *SU) const override;

    /// Returns a label for the region of code covered by the DAG.
    std::string getDAGName() const override;

    /// Fixes register kill flags that scheduling has made invalid.
    void fixupKills(MachineBasicBlock &MBB);

    /// True if an edge can be added from PredSU to SuccSU without creating
    /// a cycle.
    bool canAddEdge(SUnit *SuccSU, SUnit *PredSU);

    /// Add a DAG edge to the given SU with the given predecessor
    /// dependence data.
    ///
    /// \returns true if the edge may be added without creating a cycle OR if an
    /// equivalent edge already existed (false indicates failure).
    bool addEdge(SUnit *SuccSU, const SDep &PredDep);

  protected:
    void initSUnits();
    void addPhysRegDataDeps(SUnit *SU, unsigned OperIdx);
    void addPhysRegDeps(SUnit *SU, unsigned OperIdx);
    void addVRegDefDeps(SUnit *SU, unsigned OperIdx);
    void addVRegUseDeps(SUnit *SU, unsigned OperIdx);

    /// Returns a mask for which lanes get read/written by the given (register)
    /// machine operand.
    LaneBitmask getLaneMaskForMO(const MachineOperand &MO) const;

    /// Returns true if the def register in \p MO has no uses.
    bool deadDefHasNoUse(const MachineOperand &MO);
  };

  /// Creates a new SUnit and return a ptr to it.
  inline SUnit *ScheduleDAGInstrs::newSUnit(MachineInstr *MI) {
#ifndef NDEBUG
    const SUnit *Addr = SUnits.empty() ? nullptr : &SUnits[0];
#endif
    SUnits.emplace_back(MI, (unsigned)SUnits.size());
    assert((Addr == nullptr || Addr == &SUnits[0]) &&
           "SUnits std::vector reallocated on the fly!");
    return &SUnits.back();
  }

  /// Returns an existing SUnit for this MI, or nullptr.
  inline SUnit *ScheduleDAGInstrs::getSUnit(MachineInstr *MI) const {
    return MISUnitMap.lookup(MI);
  }

} // end namespace llvm

#endif // LLVM_CODEGEN_SCHEDULEDAGINSTRS_H
PKhwFZ�M,\��CodeGen/LiveRegMatrix.hnu�[���//===- LiveRegMatrix.h - Track register interference ----------*- C++ -*---===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// The LiveRegMatrix analysis pass keeps track of virtual register interference
// along two dimensions: Slot indexes and register units. The matrix is used by
// register allocators to ensure that no interfering virtual registers get
// assigned to overlapping physical registers.
//
// Register units are defined in MCRegisterInfo.h, they represent the smallest
// unit of interference when dealing with overlapping physical registers. The
// LiveRegMatrix is represented as a LiveIntervalUnion per register unit. When
// a virtual register is assigned to a physical register, the live range for
// the virtual register is inserted into the LiveIntervalUnion for each regunit
// in the physreg.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_LIVEREGMATRIX_H
#define LLVM_CODEGEN_LIVEREGMATRIX_H

#include "llvm/ADT/BitVector.h"
#include "llvm/CodeGen/LiveIntervalUnion.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include <memory>

namespace llvm {

class AnalysisUsage;
class LiveInterval;
class LiveIntervals;
class MachineFunction;
class TargetRegisterInfo;
class VirtRegMap;

class LiveRegMatrix : public MachineFunctionPass {
  const TargetRegisterInfo *TRI = nullptr;
  LiveIntervals *LIS = nullptr;
  VirtRegMap *VRM = nullptr;

  // UserTag changes whenever virtual registers have been modified.
  unsigned UserTag = 0;

  // The matrix is represented as a LiveIntervalUnion per register unit.
  LiveIntervalUnion::Allocator LIUAlloc;
  LiveIntervalUnion::Array Matrix;

  // Cached queries per register unit.
  std::unique_ptr<LiveIntervalUnion::Query[]> Queries;

  // Cached register mask interference info.
  unsigned RegMaskTag = 0;
  unsigned RegMaskVirtReg = 0;
  BitVector RegMaskUsable;

  // MachineFunctionPass boilerplate.
  void getAnalysisUsage(AnalysisUsage &) const override;
  bool runOnMachineFunction(MachineFunction &) override;
  void releaseMemory() override;

public:
  static char ID;

  LiveRegMatrix();

  //===--------------------------------------------------------------------===//
  // High-level interface.
  //===--------------------------------------------------------------------===//
  //
  // Check for interference before assigning virtual registers to physical
  // registers.
  //

  /// Invalidate cached interference queries after modifying virtual register
  /// live ranges. Interference checks may return stale information unless
  /// caches are invalidated.
  void invalidateVirtRegs() { ++UserTag; }

  enum InterferenceKind {
    /// No interference, go ahead and assign.
    IK_Free = 0,

    /// Virtual register interference. There are interfering virtual registers
    /// assigned to PhysReg or its aliases. This interference could be resolved
    /// by unassigning those other virtual registers.
    IK_VirtReg,

    /// Register unit interference. A fixed live range is in the way, typically
    /// argument registers for a call. This can't be resolved by unassigning
    /// other virtual registers.
    IK_RegUnit,

    /// RegMask interference. The live range is crossing an instruction with a
    /// regmask operand that doesn't preserve PhysReg. This typically means
    /// VirtReg is live across a call, and PhysReg isn't call-preserved.
    IK_RegMask
  };

  /// Check for interference before assigning VirtReg to PhysReg.
  /// If this function returns IK_Free, it is legal to assign(VirtReg, PhysReg).
  /// When there is more than one kind of interference, the InterferenceKind
  /// with the highest enum value is returned.
  InterferenceKind checkInterference(const LiveInterval &VirtReg,
                                     MCRegister PhysReg);

  /// Check for interference in the segment [Start, End) that may prevent
  /// assignment to PhysReg. If this function returns true, there is
  /// interference in the segment [Start, End) of some other interval already
  /// assigned to PhysReg. If this function returns false, PhysReg is free at
  /// the segment [Start, End).
  bool checkInterference(SlotIndex Start, SlotIndex End, MCRegister PhysReg);

  /// Assign VirtReg to PhysReg.
  /// This will mark VirtReg's live range as occupied in the LiveRegMatrix and
  /// update VirtRegMap. The live range is expected to be available in PhysReg.
  void assign(const LiveInterval &VirtReg, MCRegister PhysReg);

  /// Unassign VirtReg from its PhysReg.
  /// Assuming that VirtReg was previously assigned to a PhysReg, this undoes
  /// the assignment and updates VirtRegMap accordingly.
  void unassign(const LiveInterval &VirtReg);

  /// Returns true if the given \p PhysReg has any live intervals assigned.
  bool isPhysRegUsed(MCRegister PhysReg) const;

  //===--------------------------------------------------------------------===//
  // Low-level interface.
  //===--------------------------------------------------------------------===//
  //
  // Provide access to the underlying LiveIntervalUnions.
  //

  /// Check for regmask interference only.
  /// Return true if VirtReg crosses a regmask operand that clobbers PhysReg.
  /// If PhysReg is null, check if VirtReg crosses any regmask operands.
  bool checkRegMaskInterference(const LiveInterval &VirtReg,
                                MCRegister PhysReg = MCRegister::NoRegister);

  /// Check for regunit interference only.
  /// Return true if VirtReg overlaps a fixed assignment of one of PhysRegs's
  /// register units.
  bool checkRegUnitInterference(const LiveInterval &VirtReg,
                                MCRegister PhysReg);

  /// Query a line of the assigned virtual register matrix directly.
  /// Use MCRegUnitIterator to enumerate all regunits in the desired PhysReg.
  /// This returns a reference to an internal Query data structure that is only
  /// valid until the next query() call.
  LiveIntervalUnion::Query &query(const LiveRange &LR, MCRegister RegUnit);

  /// Directly access the live interval unions per regunit.
  /// This returns an array indexed by the regunit number.
  LiveIntervalUnion *getLiveUnions() { return &Matrix[0]; }

  Register getOneVReg(unsigned PhysReg) const;
};

} // end namespace llvm

#endif // LLVM_CODEGEN_LIVEREGMATRIX_H
PKhwFZ�J:ݒݒCodeGen/BasicTTIImpl.hnu�[���//===- BasicTTIImpl.h -------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file
/// This file provides a helper that implements much of the TTI interface in
/// terms of the target-independent code generator and TargetLowering
/// interfaces.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_BASICTTIIMPL_H
#define LLVM_CODEGEN_BASICTTIIMPL_H

#include "llvm/ADT/APInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/OptimizationRemarkEmitter.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Analysis/TargetTransformInfoImpl.h"
#include "llvm/CodeGen/ISDOpcodes.h"
#include "llvm/CodeGen/MachineValueType.h"
#include "llvm/CodeGen/TargetLowering.h"
#include "llvm/CodeGen/TargetSubtargetInfo.h"
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <limits>
#include <optional>
#include <utility>

namespace llvm {

class Function;
class GlobalValue;
class LLVMContext;
class ScalarEvolution;
class SCEV;
class TargetMachine;

extern cl::opt<unsigned> PartialUnrollingThreshold;

/// Base class which can be used to help build a TTI implementation.
///
/// This class provides as much implementation of the TTI interface as is
/// possible using the target independent parts of the code generator.
///
/// In order to subclass it, your class must implement a getST() method to
/// return the subtarget, and a getTLI() method to return the target lowering.
/// We need these methods implemented in the derived class so that this class
/// doesn't have to duplicate storage for them.
template <typename T>
class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
private:
  using BaseT = TargetTransformInfoImplCRTPBase<T>;
  using TTI = TargetTransformInfo;

  /// Helper function to access this as a T.
  T *thisT() { return static_cast<T *>(this); }

  /// Estimate a cost of Broadcast as an extract and sequence of insert
  /// operations.
  InstructionCost getBroadcastShuffleOverhead(FixedVectorType *VTy,
                                              TTI::TargetCostKind CostKind) {
    InstructionCost Cost = 0;
    // Broadcast cost is equal to the cost of extracting the zero'th element
    // plus the cost of inserting it into every element of the result vector.
    Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
                                        CostKind, 0, nullptr, nullptr);

    for (int i = 0, e = VTy->getNumElements(); i < e; ++i) {
      Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,
                                          CostKind, i, nullptr, nullptr);
    }
    return Cost;
  }

  /// Estimate a cost of shuffle as a sequence of extract and insert
  /// operations.
  InstructionCost getPermuteShuffleOverhead(FixedVectorType *VTy,
                                            TTI::TargetCostKind CostKind) {
    InstructionCost Cost = 0;
    // Shuffle cost is equal to the cost of extracting element from its argument
    // plus the cost of inserting them onto the result vector.

    // e.g. <4 x float> has a mask of <0,5,2,7> i.e we need to extract from
    // index 0 of first vector, index 1 of second vector,index 2 of first
    // vector and finally index 3 of second vector and insert them at index
    // <0,1,2,3> of result vector.
    for (int i = 0, e = VTy->getNumElements(); i < e; ++i) {
      Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,
                                          CostKind, i, nullptr, nullptr);
      Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
                                          CostKind, i, nullptr, nullptr);
    }
    return Cost;
  }

  /// Estimate a cost of subvector extraction as a sequence of extract and
  /// insert operations.
  InstructionCost getExtractSubvectorOverhead(VectorType *VTy,
                                              TTI::TargetCostKind CostKind,
                                              int Index,
                                              FixedVectorType *SubVTy) {
    assert(VTy && SubVTy &&
           "Can only extract subvectors from vectors");
    int NumSubElts = SubVTy->getNumElements();
    assert((!isa<FixedVectorType>(VTy) ||
            (Index + NumSubElts) <=
                (int)cast<FixedVectorType>(VTy)->getNumElements()) &&
           "SK_ExtractSubvector index out of range");

    InstructionCost Cost = 0;
    // Subvector extraction cost is equal to the cost of extracting element from
    // the source type plus the cost of inserting them into the result vector
    // type.
    for (int i = 0; i != NumSubElts; ++i) {
      Cost +=
          thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
                                      CostKind, i + Index, nullptr, nullptr);
      Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, SubVTy,
                                          CostKind, i, nullptr, nullptr);
    }
    return Cost;
  }

  /// Estimate a cost of subvector insertion as a sequence of extract and
  /// insert operations.
  InstructionCost getInsertSubvectorOverhead(VectorType *VTy,
                                             TTI::TargetCostKind CostKind,
                                             int Index,
                                             FixedVectorType *SubVTy) {
    assert(VTy && SubVTy &&
           "Can only insert subvectors into vectors");
    int NumSubElts = SubVTy->getNumElements();
    assert((!isa<FixedVectorType>(VTy) ||
            (Index + NumSubElts) <=
                (int)cast<FixedVectorType>(VTy)->getNumElements()) &&
           "SK_InsertSubvector index out of range");

    InstructionCost Cost = 0;
    // Subvector insertion cost is equal to the cost of extracting element from
    // the source type plus the cost of inserting them into the result vector
    // type.
    for (int i = 0; i != NumSubElts; ++i) {
      Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, SubVTy,
                                          CostKind, i, nullptr, nullptr);
      Cost +=
          thisT()->getVectorInstrCost(Instruction::InsertElement, VTy, CostKind,
                                      i + Index, nullptr, nullptr);
    }
    return Cost;
  }

  /// Local query method delegates up to T which *must* implement this!
  const TargetSubtargetInfo *getST() const {
    return static_cast<const T *>(this)->getST();
  }

  /// Local query method delegates up to T which *must* implement this!
  const TargetLoweringBase *getTLI() const {
    return static_cast<const T *>(this)->getTLI();
  }

  static ISD::MemIndexedMode getISDIndexedMode(TTI::MemIndexedMode M) {
    switch (M) {
      case TTI::MIM_Unindexed:
        return ISD::UNINDEXED;
      case TTI::MIM_PreInc:
        return ISD::PRE_INC;
      case TTI::MIM_PreDec:
        return ISD::PRE_DEC;
      case TTI::MIM_PostInc:
        return ISD::POST_INC;
      case TTI::MIM_PostDec:
        return ISD::POST_DEC;
    }
    llvm_unreachable("Unexpected MemIndexedMode");
  }

  InstructionCost getCommonMaskedMemoryOpCost(unsigned Opcode, Type *DataTy,
                                              Align Alignment,
                                              bool VariableMask,
                                              bool IsGatherScatter,
                                              TTI::TargetCostKind CostKind) {
    // We cannot scalarize scalable vectors, so return Invalid.
    if (isa<ScalableVectorType>(DataTy))
      return InstructionCost::getInvalid();

    auto *VT = cast<FixedVectorType>(DataTy);
    // Assume the target does not have support for gather/scatter operations
    // and provide a rough estimate.
    //
    // First, compute the cost of the individual memory operations.
    InstructionCost AddrExtractCost =
        IsGatherScatter
            ? getVectorInstrCost(Instruction::ExtractElement,
                                 FixedVectorType::get(
                                     PointerType::get(VT->getElementType(), 0),
                                     VT->getNumElements()),
                                 CostKind, -1, nullptr, nullptr)
            : 0;
    InstructionCost LoadCost =
        VT->getNumElements() *
        (AddrExtractCost +
         getMemoryOpCost(Opcode, VT->getElementType(), Alignment, 0, CostKind));

    // Next, compute the cost of packing the result in a vector.
    InstructionCost PackingCost =
        getScalarizationOverhead(VT, Opcode != Instruction::Store,
                                 Opcode == Instruction::Store, CostKind);

    InstructionCost ConditionalCost = 0;
    if (VariableMask) {
      // Compute the cost of conditionally executing the memory operations with
      // variable masks. This includes extracting the individual conditions, a
      // branches and PHIs to combine the results.
      // NOTE: Estimating the cost of conditionally executing the memory
      // operations accurately is quite difficult and the current solution
      // provides a very rough estimate only.
      ConditionalCost =
          VT->getNumElements() *
          (getVectorInstrCost(
               Instruction::ExtractElement,
               FixedVectorType::get(Type::getInt1Ty(DataTy->getContext()),
                                    VT->getNumElements()),
               CostKind, -1, nullptr, nullptr) +
           getCFInstrCost(Instruction::Br, CostKind) +
           getCFInstrCost(Instruction::PHI, CostKind));
    }

    return LoadCost + PackingCost + ConditionalCost;
  }

protected:
  explicit BasicTTIImplBase(const TargetMachine *TM, const DataLayout &DL)
      : BaseT(DL) {}
  virtual ~BasicTTIImplBase() = default;

  using TargetTransformInfoImplBase::DL;

public:
  /// \name Scalar TTI Implementations
  /// @{
  bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth,
                                      unsigned AddressSpace, Align Alignment,
                                      unsigned *Fast) const {
    EVT E = EVT::getIntegerVT(Context, BitWidth);
    return getTLI()->allowsMisalignedMemoryAccesses(
        E, AddressSpace, Alignment, MachineMemOperand::MONone, Fast);
  }

  bool hasBranchDivergence(const Function *F = nullptr) { return false; }

  bool isSourceOfDivergence(const Value *V) { return false; }

  bool isAlwaysUniform(const Value *V) { return false; }

  bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const {
    return false;
  }

  bool addrspacesMayAlias(unsigned AS0, unsigned AS1) const {
    return true;
  }

  unsigned getFlatAddressSpace() {
    // Return an invalid address space.
    return -1;
  }

  bool collectFlatAddressOperands(SmallVectorImpl<int> &OpIndexes,
                                  Intrinsic::ID IID) const {
    return false;
  }

  bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const {
    return getTLI()->getTargetMachine().isNoopAddrSpaceCast(FromAS, ToAS);
  }

  unsigned getAssumedAddrSpace(const Value *V) const {
    return getTLI()->getTargetMachine().getAssumedAddrSpace(V);
  }

  bool isSingleThreaded() const {
    return getTLI()->getTargetMachine().Options.ThreadModel ==
           ThreadModel::Single;
  }

  std::pair<const Value *, unsigned>
  getPredicatedAddrSpace(const Value *V) const {
    return getTLI()->getTargetMachine().getPredicatedAddrSpace(V);
  }

  Value *rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV,
                                          Value *NewV) const {
    return nullptr;
  }

  bool isLegalAddImmediate(int64_t imm) {
    return getTLI()->isLegalAddImmediate(imm);
  }

  bool isLegalICmpImmediate(int64_t imm) {
    return getTLI()->isLegalICmpImmediate(imm);
  }

  bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
                             bool HasBaseReg, int64_t Scale,
                             unsigned AddrSpace, Instruction *I = nullptr) {
    TargetLoweringBase::AddrMode AM;
    AM.BaseGV = BaseGV;
    AM.BaseOffs = BaseOffset;
    AM.HasBaseReg = HasBaseReg;
    AM.Scale = Scale;
    return getTLI()->isLegalAddressingMode(DL, AM, Ty, AddrSpace, I);
  }

  unsigned getStoreMinimumVF(unsigned VF, Type *ScalarMemTy,
                             Type *ScalarValTy) const {
    auto &&IsSupportedByTarget = [this, ScalarMemTy, ScalarValTy](unsigned VF) {
      auto *SrcTy = FixedVectorType::get(ScalarMemTy, VF / 2);
      EVT VT = getTLI()->getValueType(DL, SrcTy);
      if (getTLI()->isOperationLegal(ISD::STORE, VT) ||
          getTLI()->isOperationCustom(ISD::STORE, VT))
        return true;

      EVT ValVT =
          getTLI()->getValueType(DL, FixedVectorType::get(ScalarValTy, VF / 2));
      EVT LegalizedVT =
          getTLI()->getTypeToTransformTo(ScalarMemTy->getContext(), VT);
      return getTLI()->isTruncStoreLegal(LegalizedVT, ValVT);
    };
    while (VF > 2 && IsSupportedByTarget(VF))
      VF /= 2;
    return VF;
  }

  bool isIndexedLoadLegal(TTI::MemIndexedMode M, Type *Ty,
                          const DataLayout &DL) const {
    EVT VT = getTLI()->getValueType(DL, Ty);
    return getTLI()->isIndexedLoadLegal(getISDIndexedMode(M), VT);
  }

  bool isIndexedStoreLegal(TTI::MemIndexedMode M, Type *Ty,
                           const DataLayout &DL) const {
    EVT VT = getTLI()->getValueType(DL, Ty);
    return getTLI()->isIndexedStoreLegal(getISDIndexedMode(M), VT);
  }

  bool isLSRCostLess(TTI::LSRCost C1, TTI::LSRCost C2) {
    return TargetTransformInfoImplBase::isLSRCostLess(C1, C2);
  }

  bool isNumRegsMajorCostOfLSR() {
    return TargetTransformInfoImplBase::isNumRegsMajorCostOfLSR();
  }

  bool isProfitableLSRChainElement(Instruction *I) {
    return TargetTransformInfoImplBase::isProfitableLSRChainElement(I);
  }

  InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
                                       int64_t BaseOffset, bool HasBaseReg,
                                       int64_t Scale, unsigned AddrSpace) {
    TargetLoweringBase::AddrMode AM;
    AM.BaseGV = BaseGV;
    AM.BaseOffs = BaseOffset;
    AM.HasBaseReg = HasBaseReg;
    AM.Scale = Scale;
    if (getTLI()->isLegalAddressingMode(DL, AM, Ty, AddrSpace))
      return 0;
    return -1;
  }

  bool isTruncateFree(Type *Ty1, Type *Ty2) {
    return getTLI()->isTruncateFree(Ty1, Ty2);
  }

  bool isProfitableToHoist(Instruction *I) {
    return getTLI()->isProfitableToHoist(I);
  }

  bool useAA() const { return getST()->useAA(); }

  bool isTypeLegal(Type *Ty) {
    EVT VT = getTLI()->getValueType(DL, Ty);
    return getTLI()->isTypeLegal(VT);
  }

  unsigned getRegUsageForType(Type *Ty) {
    EVT ETy = getTLI()->getValueType(DL, Ty);
    return getTLI()->getNumRegisters(Ty->getContext(), ETy);
  }

  InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr,
                             ArrayRef<const Value *> Operands, Type *AccessType,
                             TTI::TargetCostKind CostKind) {
    return BaseT::getGEPCost(PointeeType, Ptr, Operands, AccessType, CostKind);
  }

  unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI,
                                            unsigned &JumpTableSize,
                                            ProfileSummaryInfo *PSI,
                                            BlockFrequencyInfo *BFI) {
    /// Try to find the estimated number of clusters. Note that the number of
    /// clusters identified in this function could be different from the actual
    /// numbers found in lowering. This function ignore switches that are
    /// lowered with a mix of jump table / bit test / BTree. This function was
    /// initially intended to be used when estimating the cost of switch in
    /// inline cost heuristic, but it's a generic cost model to be used in other
    /// places (e.g., in loop unrolling).
    unsigned N = SI.getNumCases();
    const TargetLoweringBase *TLI = getTLI();
    const DataLayout &DL = this->getDataLayout();

    JumpTableSize = 0;
    bool IsJTAllowed = TLI->areJTsAllowed(SI.getParent()->getParent());

    // Early exit if both a jump table and bit test are not allowed.
    if (N < 1 || (!IsJTAllowed && DL.getIndexSizeInBits(0u) < N))
      return N;

    APInt MaxCaseVal = SI.case_begin()->getCaseValue()->getValue();
    APInt MinCaseVal = MaxCaseVal;
    for (auto CI : SI.cases()) {
      const APInt &CaseVal = CI.getCaseValue()->getValue();
      if (CaseVal.sgt(MaxCaseVal))
        MaxCaseVal = CaseVal;
      if (CaseVal.slt(MinCaseVal))
        MinCaseVal = CaseVal;
    }

    // Check if suitable for a bit test
    if (N <= DL.getIndexSizeInBits(0u)) {
      SmallPtrSet<const BasicBlock *, 4> Dests;
      for (auto I : SI.cases())
        Dests.insert(I.getCaseSuccessor());

      if (TLI->isSuitableForBitTests(Dests.size(), N, MinCaseVal, MaxCaseVal,
                                     DL))
        return 1;
    }

    // Check if suitable for a jump table.
    if (IsJTAllowed) {
      if (N < 2 || N < TLI->getMinimumJumpTableEntries())
        return N;
      uint64_t Range =
          (MaxCaseVal - MinCaseVal)
              .getLimitedValue(std::numeric_limits<uint64_t>::max() - 1) + 1;
      // Check whether a range of clusters is dense enough for a jump table
      if (TLI->isSuitableForJumpTable(&SI, N, Range, PSI, BFI)) {
        JumpTableSize = Range;
        return 1;
      }
    }
    return N;
  }

  bool shouldBuildLookupTables() {
    const TargetLoweringBase *TLI = getTLI();
    return TLI->isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
           TLI->isOperationLegalOrCustom(ISD::BRIND, MVT::Other);
  }

  bool shouldBuildRelLookupTables() const {
    const TargetMachine &TM = getTLI()->getTargetMachine();
    // If non-PIC mode, do not generate a relative lookup table.
    if (!TM.isPositionIndependent())
      return false;

    /// Relative lookup table entries consist of 32-bit offsets.
    /// Do not generate relative lookup tables for large code models
    /// in 64-bit achitectures where 32-bit offsets might not be enough.
    if (TM.getCodeModel() == CodeModel::Medium ||
        TM.getCodeModel() == CodeModel::Large)
      return false;

    Triple TargetTriple = TM.getTargetTriple();
    if (!TargetTriple.isArch64Bit())
      return false;

    // TODO: Triggers issues on aarch64 on darwin, so temporarily disable it
    // there.
    if (TargetTriple.getArch() == Triple::aarch64 && TargetTriple.isOSDarwin())
      return false;

    return true;
  }

  bool haveFastSqrt(Type *Ty) {
    const TargetLoweringBase *TLI = getTLI();
    EVT VT = TLI->getValueType(DL, Ty);
    return TLI->isTypeLegal(VT) &&
           TLI->isOperationLegalOrCustom(ISD::FSQRT, VT);
  }

  bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) {
    return true;
  }

  InstructionCost getFPOpCost(Type *Ty) {
    // Check whether FADD is available, as a proxy for floating-point in
    // general.
    const TargetLoweringBase *TLI = getTLI();
    EVT VT = TLI->getValueType(DL, Ty);
    if (TLI->isOperationLegalOrCustomOrPromote(ISD::FADD, VT))
      return TargetTransformInfo::TCC_Basic;
    return TargetTransformInfo::TCC_Expensive;
  }

  unsigned getInliningThresholdMultiplier() const { return 1; }
  unsigned adjustInliningThreshold(const CallBase *CB) { return 0; }
  unsigned getCallerAllocaCost(const CallBase *CB, const AllocaInst *AI) const {
    return 0;
  }

  int getInlinerVectorBonusPercent() const { return 150; }

  void getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
                               TTI::UnrollingPreferences &UP,
                               OptimizationRemarkEmitter *ORE) {
    // This unrolling functionality is target independent, but to provide some
    // motivation for its intended use, for x86:

    // According to the Intel 64 and IA-32 Architectures Optimization Reference
    // Manual, Intel Core models and later have a loop stream detector (and
    // associated uop queue) that can benefit from partial unrolling.
    // The relevant requirements are:
    //  - The loop must have no more than 4 (8 for Nehalem and later) branches
    //    taken, and none of them may be calls.
    //  - The loop can have no more than 18 (28 for Nehalem and later) uops.

    // According to the Software Optimization Guide for AMD Family 15h
    // Processors, models 30h-4fh (Steamroller and later) have a loop predictor
    // and loop buffer which can benefit from partial unrolling.
    // The relevant requirements are:
    //  - The loop must have fewer than 16 branches
    //  - The loop must have less than 40 uops in all executed loop branches

    // The number of taken branches in a loop is hard to estimate here, and
    // benchmarking has revealed that it is better not to be conservative when
    // estimating the branch count. As a result, we'll ignore the branch limits
    // until someone finds a case where it matters in practice.

    unsigned MaxOps;
    const TargetSubtargetInfo *ST = getST();
    if (PartialUnrollingThreshold.getNumOccurrences() > 0)
      MaxOps = PartialUnrollingThreshold;
    else if (ST->getSchedModel().LoopMicroOpBufferSize > 0)
      MaxOps = ST->getSchedModel().LoopMicroOpBufferSize;
    else
      return;

    // Scan the loop: don't unroll loops with calls.
    for (BasicBlock *BB : L->blocks()) {
      for (Instruction &I : *BB) {
        if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
          if (const Function *F = cast<CallBase>(I).getCalledFunction()) {
            if (!thisT()->isLoweredToCall(F))
              continue;
          }

          if (ORE) {
            ORE->emit([&]() {
              return OptimizationRemark("TTI", "DontUnroll", L->getStartLoc(),
                                        L->getHeader())
                     << "advising against unrolling the loop because it "
                        "contains a "
                     << ore::NV("Call", &I);
            });
          }
          return;
        }
      }
    }

    // Enable runtime and partial unrolling up to the specified size.
    // Enable using trip count upper bound to unroll loops.
    UP.Partial = UP.Runtime = UP.UpperBound = true;
    UP.PartialThreshold = MaxOps;

    // Avoid unrolling when optimizing for size.
    UP.OptSizeThreshold = 0;
    UP.PartialOptSizeThreshold = 0;

    // Set number of instructions optimized when "back edge"
    // becomes "fall through" to default value of 2.
    UP.BEInsns = 2;
  }

  void getPeelingPreferences(Loop *L, ScalarEvolution &SE,
                             TTI::PeelingPreferences &PP) {
    PP.PeelCount = 0;
    PP.AllowPeeling = true;
    PP.AllowLoopNestsPeeling = false;
    PP.PeelProfiledIterations = true;
  }

  bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
                                AssumptionCache &AC,
                                TargetLibraryInfo *LibInfo,
                                HardwareLoopInfo &HWLoopInfo) {
    return BaseT::isHardwareLoopProfitable(L, SE, AC, LibInfo, HWLoopInfo);
  }

  bool preferPredicateOverEpilogue(TailFoldingInfo *TFI) {
    return BaseT::preferPredicateOverEpilogue(TFI);
  }

  TailFoldingStyle
  getPreferredTailFoldingStyle(bool IVUpdateMayOverflow = true) {
    return BaseT::getPreferredTailFoldingStyle(IVUpdateMayOverflow);
  }

  std::optional<Instruction *> instCombineIntrinsic(InstCombiner &IC,
                                               IntrinsicInst &II) {
    return BaseT::instCombineIntrinsic(IC, II);
  }

  std::optional<Value *>
  simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II,
                                   APInt DemandedMask, KnownBits &Known,
                                   bool &KnownBitsComputed) {
    return BaseT::simplifyDemandedUseBitsIntrinsic(IC, II, DemandedMask, Known,
                                                   KnownBitsComputed);
  }

  std::optional<Value *> simplifyDemandedVectorEltsIntrinsic(
      InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
      APInt &UndefElts2, APInt &UndefElts3,
      std::function<void(Instruction *, unsigned, APInt, APInt &)>
          SimplifyAndSetOp) {
    return BaseT::simplifyDemandedVectorEltsIntrinsic(
        IC, II, DemandedElts, UndefElts, UndefElts2, UndefElts3,
        SimplifyAndSetOp);
  }

  virtual std::optional<unsigned>
  getCacheSize(TargetTransformInfo::CacheLevel Level) const {
    return std::optional<unsigned>(
        getST()->getCacheSize(static_cast<unsigned>(Level)));
  }

  virtual std::optional<unsigned>
  getCacheAssociativity(TargetTransformInfo::CacheLevel Level) const {
    std::optional<unsigned> TargetResult =
        getST()->getCacheAssociativity(static_cast<unsigned>(Level));

    if (TargetResult)
      return TargetResult;

    return BaseT::getCacheAssociativity(Level);
  }

  virtual unsigned getCacheLineSize() const {
    return getST()->getCacheLineSize();
  }

  virtual unsigned getPrefetchDistance() const {
    return getST()->getPrefetchDistance();
  }

  virtual unsigned getMinPrefetchStride(unsigned NumMemAccesses,
                                        unsigned NumStridedMemAccesses,
                                        unsigned NumPrefetches,
                                        bool HasCall) const {
    return getST()->getMinPrefetchStride(NumMemAccesses, NumStridedMemAccesses,
                                         NumPrefetches, HasCall);
  }

  virtual unsigned getMaxPrefetchIterationsAhead() const {
    return getST()->getMaxPrefetchIterationsAhead();
  }

  virtual bool enableWritePrefetching() const {
    return getST()->enableWritePrefetching();
  }

  virtual bool shouldPrefetchAddressSpace(unsigned AS) const {
    return getST()->shouldPrefetchAddressSpace(AS);
  }

  /// @}

  /// \name Vector TTI Implementations
  /// @{

  TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const {
    return TypeSize::getFixed(32);
  }

  std::optional<unsigned> getMaxVScale() const { return std::nullopt; }
  std::optional<unsigned> getVScaleForTuning() const { return std::nullopt; }
  bool isVScaleKnownToBeAPowerOfTwo() const { return false; }

  /// Estimate the overhead of scalarizing an instruction. Insert and Extract
  /// are set if the demanded result elements need to be inserted and/or
  /// extracted from vectors.
  InstructionCost getScalarizationOverhead(VectorType *InTy,
                                           const APInt &DemandedElts,
                                           bool Insert, bool Extract,
                                           TTI::TargetCostKind CostKind) {
    /// FIXME: a bitfield is not a reasonable abstraction for talking about
    /// which elements are needed from a scalable vector
    if (isa<ScalableVectorType>(InTy))
      return InstructionCost::getInvalid();
    auto *Ty = cast<FixedVectorType>(InTy);

    assert(DemandedElts.getBitWidth() == Ty->getNumElements() &&
           "Vector size mismatch");

    InstructionCost Cost = 0;

    for (int i = 0, e = Ty->getNumElements(); i < e; ++i) {
      if (!DemandedElts[i])
        continue;
      if (Insert)
        Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, Ty,
                                            CostKind, i, nullptr, nullptr);
      if (Extract)
        Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
                                            CostKind, i, nullptr, nullptr);
    }

    return Cost;
  }

  /// Helper wrapper for the DemandedElts variant of getScalarizationOverhead.
  InstructionCost getScalarizationOverhead(VectorType *InTy, bool Insert,
                                           bool Extract,
                                           TTI::TargetCostKind CostKind) {
    if (isa<ScalableVectorType>(InTy))
      return InstructionCost::getInvalid();
    auto *Ty = cast<FixedVectorType>(InTy);

    APInt DemandedElts = APInt::getAllOnes(Ty->getNumElements());
    return thisT()->getScalarizationOverhead(Ty, DemandedElts, Insert, Extract,
                                             CostKind);
  }

  /// Estimate the overhead of scalarizing an instructions unique
  /// non-constant operands. The (potentially vector) types to use for each of
  /// argument are passes via Tys.
  InstructionCost
  getOperandsScalarizationOverhead(ArrayRef<const Value *> Args,
                                   ArrayRef<Type *> Tys,
                                   TTI::TargetCostKind CostKind) {
    assert(Args.size() == Tys.size() && "Expected matching Args and Tys");

    InstructionCost Cost = 0;
    SmallPtrSet<const Value*, 4> UniqueOperands;
    for (int I = 0, E = Args.size(); I != E; I++) {
      // Disregard things like metadata arguments.
      const Value *A = Args[I];
      Type *Ty = Tys[I];
      if (!Ty->isIntOrIntVectorTy() && !Ty->isFPOrFPVectorTy() &&
          !Ty->isPtrOrPtrVectorTy())
        continue;

      if (!isa<Constant>(A) && UniqueOperands.insert(A).second) {
        if (auto *VecTy = dyn_cast<VectorType>(Ty))
          Cost += getScalarizationOverhead(VecTy, /*Insert*/ false,
                                           /*Extract*/ true, CostKind);
      }
    }

    return Cost;
  }

  /// Estimate the overhead of scalarizing the inputs and outputs of an
  /// instruction, with return type RetTy and arguments Args of type Tys. If
  /// Args are unknown (empty), then the cost associated with one argument is
  /// added as a heuristic.
  InstructionCost getScalarizationOverhead(VectorType *RetTy,
                                           ArrayRef<const Value *> Args,
                                           ArrayRef<Type *> Tys,
                                           TTI::TargetCostKind CostKind) {
    InstructionCost Cost = getScalarizationOverhead(
        RetTy, /*Insert*/ true, /*Extract*/ false, CostKind);
    if (!Args.empty())
      Cost += getOperandsScalarizationOverhead(Args, Tys, CostKind);
    else
      // When no information on arguments is provided, we add the cost
      // associated with one argument as a heuristic.
      Cost += getScalarizationOverhead(RetTy, /*Insert*/ false,
                                       /*Extract*/ true, CostKind);

    return Cost;
  }

  /// Estimate the cost of type-legalization and the legalized type.
  std::pair<InstructionCost, MVT> getTypeLegalizationCost(Type *Ty) const {
    LLVMContext &C = Ty->getContext();
    EVT MTy = getTLI()->getValueType(DL, Ty);

    InstructionCost Cost = 1;
    // We keep legalizing the type until we find a legal kind. We assume that
    // the only operation that costs anything is the split. After splitting
    // we need to handle two types.
    while (true) {
      TargetLoweringBase::LegalizeKind LK = getTLI()->getTypeConversion(C, MTy);

      if (LK.first == TargetLoweringBase::TypeScalarizeScalableVector) {
        // Ensure we return a sensible simple VT here, since many callers of
        // this function require it.
        MVT VT = MTy.isSimple() ? MTy.getSimpleVT() : MVT::i64;
        return std::make_pair(InstructionCost::getInvalid(), VT);
      }

      if (LK.first == TargetLoweringBase::TypeLegal)
        return std::make_pair(Cost, MTy.getSimpleVT());

      if (LK.first == TargetLoweringBase::TypeSplitVector ||
          LK.first == TargetLoweringBase::TypeExpandInteger)
        Cost *= 2;

      // Do not loop with f128 type.
      if (MTy == LK.second)
        return std::make_pair(Cost, MTy.getSimpleVT());

      // Keep legalizing the type.
      MTy = LK.second;
    }
  }

  unsigned getMaxInterleaveFactor(ElementCount VF) { return 1; }

  InstructionCost getArithmeticInstrCost(
      unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
      TTI::OperandValueInfo Opd1Info = {TTI::OK_AnyValue, TTI::OP_None},
      TTI::OperandValueInfo Opd2Info = {TTI::OK_AnyValue, TTI::OP_None},
      ArrayRef<const Value *> Args = ArrayRef<const Value *>(),
      const Instruction *CxtI = nullptr) {
    // Check if any of the operands are vector operands.
    const TargetLoweringBase *TLI = getTLI();
    int ISD = TLI->InstructionOpcodeToISD(Opcode);
    assert(ISD && "Invalid opcode");

    // TODO: Handle more cost kinds.
    if (CostKind != TTI::TCK_RecipThroughput)
      return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind,
                                           Opd1Info, Opd2Info,
                                           Args, CxtI);

    std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty);

    bool IsFloat = Ty->isFPOrFPVectorTy();
    // Assume that floating point arithmetic operations cost twice as much as
    // integer operations.
    InstructionCost OpCost = (IsFloat ? 2 : 1);

    if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
      // The operation is legal. Assume it costs 1.
      // TODO: Once we have extract/insert subvector cost we need to use them.
      return LT.first * OpCost;
    }

    if (!TLI->isOperationExpand(ISD, LT.second)) {
      // If the operation is custom lowered, then assume that the code is twice
      // as expensive.
      return LT.first * 2 * OpCost;
    }

    // An 'Expand' of URem and SRem is special because it may default
    // to expanding the operation into a sequence of sub-operations
    // i.e. X % Y -> X-(X/Y)*Y.
    if (ISD == ISD::UREM || ISD == ISD::SREM) {
      bool IsSigned = ISD == ISD::SREM;
      if (TLI->isOperationLegalOrCustom(IsSigned ? ISD::SDIVREM : ISD::UDIVREM,
                                        LT.second) ||
          TLI->isOperationLegalOrCustom(IsSigned ? ISD::SDIV : ISD::UDIV,
                                        LT.second)) {
        unsigned DivOpc = IsSigned ? Instruction::SDiv : Instruction::UDiv;
        InstructionCost DivCost = thisT()->getArithmeticInstrCost(
            DivOpc, Ty, CostKind, Opd1Info, Opd2Info);
        InstructionCost MulCost =
            thisT()->getArithmeticInstrCost(Instruction::Mul, Ty, CostKind);
        InstructionCost SubCost =
            thisT()->getArithmeticInstrCost(Instruction::Sub, Ty, CostKind);
        return DivCost + MulCost + SubCost;
      }
    }

    // We cannot scalarize scalable vectors, so return Invalid.
    if (isa<ScalableVectorType>(Ty))
      return InstructionCost::getInvalid();

    // Else, assume that we need to scalarize this op.
    // TODO: If one of the types get legalized by splitting, handle this
    // similarly to what getCastInstrCost() does.
    if (auto *VTy = dyn_cast<FixedVectorType>(Ty)) {
      InstructionCost Cost = thisT()->getArithmeticInstrCost(
          Opcode, VTy->getScalarType(), CostKind, Opd1Info, Opd2Info,
          Args, CxtI);
      // Return the cost of multiple scalar invocation plus the cost of
      // inserting and extracting the values.
      SmallVector<Type *> Tys(Args.size(), Ty);
      return getScalarizationOverhead(VTy, Args, Tys, CostKind) +
             VTy->getNumElements() * Cost;
    }

    // We don't know anything about this scalar instruction.
    return OpCost;
  }

  TTI::ShuffleKind improveShuffleKindFromMask(TTI::ShuffleKind Kind,
                                              ArrayRef<int> Mask) const {
    int Limit = Mask.size() * 2;
    if (Mask.empty() ||
        // Extra check required by isSingleSourceMaskImpl function (called by
        // ShuffleVectorInst::isSingleSourceMask).
        any_of(Mask, [Limit](int I) { return I >= Limit; }))
      return Kind;
    int Index;
    switch (Kind) {
    case TTI::SK_PermuteSingleSrc:
      if (ShuffleVectorInst::isReverseMask(Mask))
        return TTI::SK_Reverse;
      if (ShuffleVectorInst::isZeroEltSplatMask(Mask))
        return TTI::SK_Broadcast;
      break;
    case TTI::SK_PermuteTwoSrc:
      if (ShuffleVectorInst::isSelectMask(Mask))
        return TTI::SK_Select;
      if (ShuffleVectorInst::isTransposeMask(Mask))
        return TTI::SK_Transpose;
      if (ShuffleVectorInst::isSpliceMask(Mask, Index))
        return TTI::SK_Splice;
      break;
    case TTI::SK_Select:
    case TTI::SK_Reverse:
    case TTI::SK_Broadcast:
    case TTI::SK_Transpose:
    case TTI::SK_InsertSubvector:
    case TTI::SK_ExtractSubvector:
    case TTI::SK_Splice:
      break;
    }
    return Kind;
  }

  InstructionCost getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp,
                                 ArrayRef<int> Mask,
                                 TTI::TargetCostKind CostKind, int Index,
                                 VectorType *SubTp,
                                 ArrayRef<const Value *> Args = std::nullopt) {

    switch (improveShuffleKindFromMask(Kind, Mask)) {
    case TTI::SK_Broadcast:
      if (auto *FVT = dyn_cast<FixedVectorType>(Tp))
        return getBroadcastShuffleOverhead(FVT, CostKind);
      return InstructionCost::getInvalid();
    case TTI::SK_Select:
    case TTI::SK_Splice:
    case TTI::SK_Reverse:
    case TTI::SK_Transpose:
    case TTI::SK_PermuteSingleSrc:
    case TTI::SK_PermuteTwoSrc:
      if (auto *FVT = dyn_cast<FixedVectorType>(Tp))
        return getPermuteShuffleOverhead(FVT, CostKind);
      return InstructionCost::getInvalid();
    case TTI::SK_ExtractSubvector:
      return getExtractSubvectorOverhead(Tp, CostKind, Index,
                                         cast<FixedVectorType>(SubTp));
    case TTI::SK_InsertSubvector:
      return getInsertSubvectorOverhead(Tp, CostKind, Index,
                                        cast<FixedVectorType>(SubTp));
    }
    llvm_unreachable("Unknown TTI::ShuffleKind");
  }

  InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
                                   TTI::CastContextHint CCH,
                                   TTI::TargetCostKind CostKind,
                                   const Instruction *I = nullptr) {
    if (BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I) == 0)
      return 0;

    const TargetLoweringBase *TLI = getTLI();
    int ISD = TLI->InstructionOpcodeToISD(Opcode);
    assert(ISD && "Invalid opcode");
    std::pair<InstructionCost, MVT> SrcLT = getTypeLegalizationCost(Src);
    std::pair<InstructionCost, MVT> DstLT = getTypeLegalizationCost(Dst);

    TypeSize SrcSize = SrcLT.second.getSizeInBits();
    TypeSize DstSize = DstLT.second.getSizeInBits();
    bool IntOrPtrSrc = Src->isIntegerTy() || Src->isPointerTy();
    bool IntOrPtrDst = Dst->isIntegerTy() || Dst->isPointerTy();

    switch (Opcode) {
    default:
      break;
    case Instruction::Trunc:
      // Check for NOOP conversions.
      if (TLI->isTruncateFree(SrcLT.second, DstLT.second))
        return 0;
      [[fallthrough]];
    case Instruction::BitCast:
      // Bitcast between types that are legalized to the same type are free and
      // assume int to/from ptr of the same size is also free.
      if (SrcLT.first == DstLT.first && IntOrPtrSrc == IntOrPtrDst &&
          SrcSize == DstSize)
        return 0;
      break;
    case Instruction::FPExt:
      if (I && getTLI()->isExtFree(I))
        return 0;
      break;
    case Instruction::ZExt:
      if (TLI->isZExtFree(SrcLT.second, DstLT.second))
        return 0;
      [[fallthrough]];
    case Instruction::SExt:
      if (I && getTLI()->isExtFree(I))
        return 0;

      // If this is a zext/sext of a load, return 0 if the corresponding
      // extending load exists on target and the result type is legal.
      if (CCH == TTI::CastContextHint::Normal) {
        EVT ExtVT = EVT::getEVT(Dst);
        EVT LoadVT = EVT::getEVT(Src);
        unsigned LType =
          ((Opcode == Instruction::ZExt) ? ISD::ZEXTLOAD : ISD::SEXTLOAD);
        if (DstLT.first == SrcLT.first &&
            TLI->isLoadExtLegal(LType, ExtVT, LoadVT))
          return 0;
      }
      break;
    case Instruction::AddrSpaceCast:
      if (TLI->isFreeAddrSpaceCast(Src->getPointerAddressSpace(),
                                   Dst->getPointerAddressSpace()))
        return 0;
      break;
    }

    auto *SrcVTy = dyn_cast<VectorType>(Src);
    auto *DstVTy = dyn_cast<VectorType>(Dst);

    // If the cast is marked as legal (or promote) then assume low cost.
    if (SrcLT.first == DstLT.first &&
        TLI->isOperationLegalOrPromote(ISD, DstLT.second))
      return SrcLT.first;

    // Handle scalar conversions.
    if (!SrcVTy && !DstVTy) {
      // Just check the op cost. If the operation is legal then assume it costs
      // 1.
      if (!TLI->isOperationExpand(ISD, DstLT.second))
        return 1;

      // Assume that illegal scalar instruction are expensive.
      return 4;
    }

    // Check vector-to-vector casts.
    if (DstVTy && SrcVTy) {
      // If the cast is between same-sized registers, then the check is simple.
      if (SrcLT.first == DstLT.first && SrcSize == DstSize) {

        // Assume that Zext is done using AND.
        if (Opcode == Instruction::ZExt)
          return SrcLT.first;

        // Assume that sext is done using SHL and SRA.
        if (Opcode == Instruction::SExt)
          return SrcLT.first * 2;

        // Just check the op cost. If the operation is legal then assume it
        // costs
        // 1 and multiply by the type-legalization overhead.
        if (!TLI->isOperationExpand(ISD, DstLT.second))
          return SrcLT.first * 1;
      }

      // If we are legalizing by splitting, query the concrete TTI for the cost
      // of casting the original vector twice. We also need to factor in the
      // cost of the split itself. Count that as 1, to be consistent with
      // getTypeLegalizationCost().
      bool SplitSrc =
          TLI->getTypeAction(Src->getContext(), TLI->getValueType(DL, Src)) ==
          TargetLowering::TypeSplitVector;
      bool SplitDst =
          TLI->getTypeAction(Dst->getContext(), TLI->getValueType(DL, Dst)) ==
          TargetLowering::TypeSplitVector;
      if ((SplitSrc || SplitDst) && SrcVTy->getElementCount().isVector() &&
          DstVTy->getElementCount().isVector()) {
        Type *SplitDstTy = VectorType::getHalfElementsVectorType(DstVTy);
        Type *SplitSrcTy = VectorType::getHalfElementsVectorType(SrcVTy);
        T *TTI = static_cast<T *>(this);
        // If both types need to be split then the split is free.
        InstructionCost SplitCost =
            (!SplitSrc || !SplitDst) ? TTI->getVectorSplitCost() : 0;
        return SplitCost +
               (2 * TTI->getCastInstrCost(Opcode, SplitDstTy, SplitSrcTy, CCH,
                                          CostKind, I));
      }

      // Scalarization cost is Invalid, can't assume any num elements.
      if (isa<ScalableVectorType>(DstVTy))
        return InstructionCost::getInvalid();

      // In other cases where the source or destination are illegal, assume
      // the operation will get scalarized.
      unsigned Num = cast<FixedVectorType>(DstVTy)->getNumElements();
      InstructionCost Cost = thisT()->getCastInstrCost(
          Opcode, Dst->getScalarType(), Src->getScalarType(), CCH, CostKind, I);

      // Return the cost of multiple scalar invocation plus the cost of
      // inserting and extracting the values.
      return getScalarizationOverhead(DstVTy, /*Insert*/ true, /*Extract*/ true,
                                      CostKind) +
             Num * Cost;
    }

    // We already handled vector-to-vector and scalar-to-scalar conversions.
    // This
    // is where we handle bitcast between vectors and scalars. We need to assume
    //  that the conversion is scalarized in one way or another.
    if (Opcode == Instruction::BitCast) {
      // Illegal bitcasts are done by storing and loading from a stack slot.
      return (SrcVTy ? getScalarizationOverhead(SrcVTy, /*Insert*/ false,
                                                /*Extract*/ true, CostKind)
                     : 0) +
             (DstVTy ? getScalarizationOverhead(DstVTy, /*Insert*/ true,
                                                /*Extract*/ false, CostKind)
                     : 0);
    }

    llvm_unreachable("Unhandled cast");
  }

  InstructionCost getExtractWithExtendCost(unsigned Opcode, Type *Dst,
                                           VectorType *VecTy, unsigned Index) {
    TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
    return thisT()->getVectorInstrCost(Instruction::ExtractElement, VecTy,
                                       CostKind, Index, nullptr, nullptr) +
           thisT()->getCastInstrCost(Opcode, Dst, VecTy->getElementType(),
                                     TTI::CastContextHint::None, CostKind);
  }

  InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind,
                                 const Instruction *I = nullptr) {
    return BaseT::getCFInstrCost(Opcode, CostKind, I);
  }

  InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
                                     CmpInst::Predicate VecPred,
                                     TTI::TargetCostKind CostKind,
                                     const Instruction *I = nullptr) {
    const TargetLoweringBase *TLI = getTLI();
    int ISD = TLI->InstructionOpcodeToISD(Opcode);
    assert(ISD && "Invalid opcode");

    // TODO: Handle other cost kinds.
    if (CostKind != TTI::TCK_RecipThroughput)
      return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind,
                                       I);

    // Selects on vectors are actually vector selects.
    if (ISD == ISD::SELECT) {
      assert(CondTy && "CondTy must exist");
      if (CondTy->isVectorTy())
        ISD = ISD::VSELECT;
    }
    std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(ValTy);

    if (!(ValTy->isVectorTy() && !LT.second.isVector()) &&
        !TLI->isOperationExpand(ISD, LT.second)) {
      // The operation is legal. Assume it costs 1. Multiply
      // by the type-legalization overhead.
      return LT.first * 1;
    }

    // Otherwise, assume that the cast is scalarized.
    // TODO: If one of the types get legalized by splitting, handle this
    // similarly to what getCastInstrCost() does.
    if (auto *ValVTy = dyn_cast<VectorType>(ValTy)) {
      if (isa<ScalableVectorType>(ValTy))
        return InstructionCost::getInvalid();

      unsigned Num = cast<FixedVectorType>(ValVTy)->getNumElements();
      if (CondTy)
        CondTy = CondTy->getScalarType();
      InstructionCost Cost = thisT()->getCmpSelInstrCost(
          Opcode, ValVTy->getScalarType(), CondTy, VecPred, CostKind, I);

      // Return the cost of multiple scalar invocation plus the cost of
      // inserting and extracting the values.
      return getScalarizationOverhead(ValVTy, /*Insert*/ true,
                                      /*Extract*/ false, CostKind) +
             Num * Cost;
    }

    // Unknown scalar opcode.
    return 1;
  }

  InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val,
                                     TTI::TargetCostKind CostKind,
                                     unsigned Index, Value *Op0, Value *Op1) {
    return getRegUsageForType(Val->getScalarType());
  }

  InstructionCost getVectorInstrCost(const Instruction &I, Type *Val,
                                     TTI::TargetCostKind CostKind,
                                     unsigned Index) {
    Value *Op0 = nullptr;
    Value *Op1 = nullptr;
    if (auto *IE = dyn_cast<InsertElementInst>(&I)) {
      Op0 = IE->getOperand(0);
      Op1 = IE->getOperand(1);
    }
    return thisT()->getVectorInstrCost(I.getOpcode(), Val, CostKind, Index, Op0,
                                       Op1);
  }

  InstructionCost getReplicationShuffleCost(Type *EltTy, int ReplicationFactor,
                                            int VF,
                                            const APInt &DemandedDstElts,
                                            TTI::TargetCostKind CostKind) {
    assert(DemandedDstElts.getBitWidth() == (unsigned)VF * ReplicationFactor &&
           "Unexpected size of DemandedDstElts.");

    InstructionCost Cost;

    auto *SrcVT = FixedVectorType::get(EltTy, VF);
    auto *ReplicatedVT = FixedVectorType::get(EltTy, VF * ReplicationFactor);

    // The Mask shuffling cost is extract all the elements of the Mask
    // and insert each of them Factor times into the wide vector:
    //
    // E.g. an interleaved group with factor 3:
    //    %mask = icmp ult <8 x i32> %vec1, %vec2
    //    %interleaved.mask = shufflevector <8 x i1> %mask, <8 x i1> undef,
    //        <24 x i32> <0,0,0,1,1,1,2,2,2,3,3,3,4,4,4,5,5,5,6,6,6,7,7,7>
    // The cost is estimated as extract all mask elements from the <8xi1> mask
    // vector and insert them factor times into the <24xi1> shuffled mask
    // vector.
    APInt DemandedSrcElts = APIntOps::ScaleBitMask(DemandedDstElts, VF);
    Cost += thisT()->getScalarizationOverhead(SrcVT, DemandedSrcElts,
                                              /*Insert*/ false,
                                              /*Extract*/ true, CostKind);
    Cost += thisT()->getScalarizationOverhead(ReplicatedVT, DemandedDstElts,
                                              /*Insert*/ true,
                                              /*Extract*/ false, CostKind);

    return Cost;
  }

  InstructionCost
  getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment,
                  unsigned AddressSpace, TTI::TargetCostKind CostKind,
                  TTI::OperandValueInfo OpInfo = {TTI::OK_AnyValue, TTI::OP_None},
                  const Instruction *I = nullptr) {
    assert(!Src->isVoidTy() && "Invalid type");
    // Assume types, such as structs, are expensive.
    if (getTLI()->getValueType(DL, Src,  true) == MVT::Other)
      return 4;
    std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Src);

    // Assuming that all loads of legal types cost 1.
    InstructionCost Cost = LT.first;
    if (CostKind != TTI::TCK_RecipThroughput)
      return Cost;

    const DataLayout &DL = this->getDataLayout();
    if (Src->isVectorTy() &&
        // In practice it's not currently possible to have a change in lane
        // length for extending loads or truncating stores so both types should
        // have the same scalable property.
        TypeSize::isKnownLT(DL.getTypeStoreSizeInBits(Src),
                            LT.second.getSizeInBits())) {
      // This is a vector load that legalizes to a larger type than the vector
      // itself. Unless the corresponding extending load or truncating store is
      // legal, then this will scalarize.
      TargetLowering::LegalizeAction LA = TargetLowering::Expand;
      EVT MemVT = getTLI()->getValueType(DL, Src);
      if (Opcode == Instruction::Store)
        LA = getTLI()->getTruncStoreAction(LT.second, MemVT);
      else
        LA = getTLI()->getLoadExtAction(ISD::EXTLOAD, LT.second, MemVT);

      if (LA != TargetLowering::Legal && LA != TargetLowering::Custom) {
        // This is a vector load/store for some illegal type that is scalarized.
        // We must account for the cost of building or decomposing the vector.
        Cost += getScalarizationOverhead(
            cast<VectorType>(Src), Opcode != Instruction::Store,
            Opcode == Instruction::Store, CostKind);
      }
    }

    return Cost;
  }

  InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *DataTy,
                                        Align Alignment, unsigned AddressSpace,
                                        TTI::TargetCostKind CostKind) {
    return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment, true, false,
                                       CostKind);
  }

  InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy,
                                         const Value *Ptr, bool VariableMask,
                                         Align Alignment,
                                         TTI::TargetCostKind CostKind,
                                         const Instruction *I = nullptr) {
    return getCommonMaskedMemoryOpCost(Opcode, DataTy, Alignment, VariableMask,
                                       true, CostKind);
  }

  InstructionCost getInterleavedMemoryOpCost(
      unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
      Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
      bool UseMaskForCond = false, bool UseMaskForGaps = false) {

    // We cannot scalarize scalable vectors, so return Invalid.
    if (isa<ScalableVectorType>(VecTy))
      return InstructionCost::getInvalid();

    auto *VT = cast<FixedVectorType>(VecTy);

    unsigned NumElts = VT->getNumElements();
    assert(Factor > 1 && NumElts % Factor == 0 && "Invalid interleave factor");

    unsigned NumSubElts = NumElts / Factor;
    auto *SubVT = FixedVectorType::get(VT->getElementType(), NumSubElts);

    // Firstly, the cost of load/store operation.
    InstructionCost Cost;
    if (UseMaskForCond || UseMaskForGaps)
      Cost = thisT()->getMaskedMemoryOpCost(Opcode, VecTy, Alignment,
                                            AddressSpace, CostKind);
    else
      Cost = thisT()->getMemoryOpCost(Opcode, VecTy, Alignment, AddressSpace,
                                      CostKind);

    // Legalize the vector type, and get the legalized and unlegalized type
    // sizes.
    MVT VecTyLT = getTypeLegalizationCost(VecTy).second;
    unsigned VecTySize = thisT()->getDataLayout().getTypeStoreSize(VecTy);
    unsigned VecTyLTSize = VecTyLT.getStoreSize();

    // Scale the cost of the memory operation by the fraction of legalized
    // instructions that will actually be used. We shouldn't account for the
    // cost of dead instructions since they will be removed.
    //
    // E.g., An interleaved load of factor 8:
    //       %vec = load <16 x i64>, <16 x i64>* %ptr
    //       %v0 = shufflevector %vec, undef, <0, 8>
    //
    // If <16 x i64> is legalized to 8 v2i64 loads, only 2 of the loads will be
    // used (those corresponding to elements [0:1] and [8:9] of the unlegalized
    // type). The other loads are unused.
    //
    // TODO: Note that legalization can turn masked loads/stores into unmasked
    // (legalized) loads/stores. This can be reflected in the cost.
    if (Cost.isValid() && VecTySize > VecTyLTSize) {
      // The number of loads of a legal type it will take to represent a load
      // of the unlegalized vector type.
      unsigned NumLegalInsts = divideCeil(VecTySize, VecTyLTSize);

      // The number of elements of the unlegalized type that correspond to a
      // single legal instruction.
      unsigned NumEltsPerLegalInst = divideCeil(NumElts, NumLegalInsts);

      // Determine which legal instructions will be used.
      BitVector UsedInsts(NumLegalInsts, false);
      for (unsigned Index : Indices)
        for (unsigned Elt = 0; Elt < NumSubElts; ++Elt)
          UsedInsts.set((Index + Elt * Factor) / NumEltsPerLegalInst);

      // Scale the cost of the load by the fraction of legal instructions that
      // will be used.
      Cost = divideCeil(UsedInsts.count() * *Cost.getValue(), NumLegalInsts);
    }

    // Then plus the cost of interleave operation.
    assert(Indices.size() <= Factor &&
           "Interleaved memory op has too many members");

    const APInt DemandedAllSubElts = APInt::getAllOnes(NumSubElts);
    const APInt DemandedAllResultElts = APInt::getAllOnes(NumElts);

    APInt DemandedLoadStoreElts = APInt::getZero(NumElts);
    for (unsigned Index : Indices) {
      assert(Index < Factor && "Invalid index for interleaved memory op");
      for (unsigned Elm = 0; Elm < NumSubElts; Elm++)
        DemandedLoadStoreElts.setBit(Index + Elm * Factor);
    }

    if (Opcode == Instruction::Load) {
      // The interleave cost is similar to extract sub vectors' elements
      // from the wide vector, and insert them into sub vectors.
      //
      // E.g. An interleaved load of factor 2 (with one member of index 0):
      //      %vec = load <8 x i32>, <8 x i32>* %ptr
      //      %v0 = shuffle %vec, undef, <0, 2, 4, 6>         ; Index 0
      // The cost is estimated as extract elements at 0, 2, 4, 6 from the
      // <8 x i32> vector and insert them into a <4 x i32> vector.
      InstructionCost InsSubCost = thisT()->getScalarizationOverhead(
          SubVT, DemandedAllSubElts,
          /*Insert*/ true, /*Extract*/ false, CostKind);
      Cost += Indices.size() * InsSubCost;
      Cost += thisT()->getScalarizationOverhead(VT, DemandedLoadStoreElts,
                                                /*Insert*/ false,
                                                /*Extract*/ true, CostKind);
    } else {
      // The interleave cost is extract elements from sub vectors, and
      // insert them into the wide vector.
      //
      // E.g. An interleaved store of factor 3 with 2 members at indices 0,1:
      // (using VF=4):
      //    %v0_v1 = shuffle %v0, %v1, <0,4,undef,1,5,undef,2,6,undef,3,7,undef>
      //    %gaps.mask = <true, true, false, true, true, false,
      //                  true, true, false, true, true, false>
      //    call llvm.masked.store <12 x i32> %v0_v1, <12 x i32>* %ptr,
      //                           i32 Align, <12 x i1> %gaps.mask
      // The cost is estimated as extract all elements (of actual members,
      // excluding gaps) from both <4 x i32> vectors and insert into the <12 x
      // i32> vector.
      InstructionCost ExtSubCost = thisT()->getScalarizationOverhead(
          SubVT, DemandedAllSubElts,
          /*Insert*/ false, /*Extract*/ true, CostKind);
      Cost += ExtSubCost * Indices.size();
      Cost += thisT()->getScalarizationOverhead(VT, DemandedLoadStoreElts,
                                                /*Insert*/ true,
                                                /*Extract*/ false, CostKind);
    }

    if (!UseMaskForCond)
      return Cost;

    Type *I8Type = Type::getInt8Ty(VT->getContext());

    Cost += thisT()->getReplicationShuffleCost(
        I8Type, Factor, NumSubElts,
        UseMaskForGaps ? DemandedLoadStoreElts : DemandedAllResultElts,
        CostKind);

    // The Gaps mask is invariant and created outside the loop, therefore the
    // cost of creating it is not accounted for here. However if we have both
    // a MaskForGaps and some other mask that guards the execution of the
    // memory access, we need to account for the cost of And-ing the two masks
    // inside the loop.
    if (UseMaskForGaps) {
      auto *MaskVT = FixedVectorType::get(I8Type, NumElts);
      Cost += thisT()->getArithmeticInstrCost(BinaryOperator::And, MaskVT,
                                              CostKind);
    }

    return Cost;
  }

  /// Get intrinsic cost based on arguments.
  InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
                                        TTI::TargetCostKind CostKind) {
    // Check for generically free intrinsics.
    if (BaseT::getIntrinsicInstrCost(ICA, CostKind) == 0)
      return 0;

    // Assume that target intrinsics are cheap.
    Intrinsic::ID IID = ICA.getID();
    if (Function::isTargetIntrinsic(IID))
      return TargetTransformInfo::TCC_Basic;

    if (ICA.isTypeBasedOnly())
      return getTypeBasedIntrinsicInstrCost(ICA, CostKind);

    Type *RetTy = ICA.getReturnType();

    ElementCount RetVF =
        (RetTy->isVectorTy() ? cast<VectorType>(RetTy)->getElementCount()
                             : ElementCount::getFixed(1));
    const IntrinsicInst *I = ICA.getInst();
    const SmallVectorImpl<const Value *> &Args = ICA.getArgs();
    FastMathFlags FMF = ICA.getFlags();
    switch (IID) {
    default:
      break;

    case Intrinsic::powi:
      if (auto *RHSC = dyn_cast<ConstantInt>(Args[1])) {
        bool ShouldOptForSize = I->getParent()->getParent()->hasOptSize();
        if (getTLI()->isBeneficialToExpandPowI(RHSC->getSExtValue(),
                                               ShouldOptForSize)) {
          // The cost is modeled on the expansion performed by ExpandPowI in
          // SelectionDAGBuilder.
          APInt Exponent = RHSC->getValue().abs();
          unsigned ActiveBits = Exponent.getActiveBits();
          unsigned PopCount = Exponent.popcount();
          InstructionCost Cost = (ActiveBits + PopCount - 2) *
                                 thisT()->getArithmeticInstrCost(
                                     Instruction::FMul, RetTy, CostKind);
          if (RHSC->isNegative())
            Cost += thisT()->getArithmeticInstrCost(Instruction::FDiv, RetTy,
                                                    CostKind);
          return Cost;
        }
      }
      break;
    case Intrinsic::cttz:
      // FIXME: If necessary, this should go in target-specific overrides.
      if (RetVF.isScalar() && getTLI()->isCheapToSpeculateCttz(RetTy))
        return TargetTransformInfo::TCC_Basic;
      break;

    case Intrinsic::ctlz:
      // FIXME: If necessary, this should go in target-specific overrides.
      if (RetVF.isScalar() && getTLI()->isCheapToSpeculateCtlz(RetTy))
        return TargetTransformInfo::TCC_Basic;
      break;

    case Intrinsic::memcpy:
      return thisT()->getMemcpyCost(ICA.getInst());

    case Intrinsic::masked_scatter: {
      const Value *Mask = Args[3];
      bool VarMask = !isa<Constant>(Mask);
      Align Alignment = cast<ConstantInt>(Args[2])->getAlignValue();
      return thisT()->getGatherScatterOpCost(Instruction::Store,
                                             ICA.getArgTypes()[0], Args[1],
                                             VarMask, Alignment, CostKind, I);
    }
    case Intrinsic::masked_gather: {
      const Value *Mask = Args[2];
      bool VarMask = !isa<Constant>(Mask);
      Align Alignment = cast<ConstantInt>(Args[1])->getAlignValue();
      return thisT()->getGatherScatterOpCost(Instruction::Load, RetTy, Args[0],
                                             VarMask, Alignment, CostKind, I);
    }
    case Intrinsic::experimental_stepvector: {
      if (isa<ScalableVectorType>(RetTy))
        return BaseT::getIntrinsicInstrCost(ICA, CostKind);
      // The cost of materialising a constant integer vector.
      return TargetTransformInfo::TCC_Basic;
    }
    case Intrinsic::vector_extract: {
      // FIXME: Handle case where a scalable vector is extracted from a scalable
      // vector
      if (isa<ScalableVectorType>(RetTy))
        return BaseT::getIntrinsicInstrCost(ICA, CostKind);
      unsigned Index = cast<ConstantInt>(Args[1])->getZExtValue();
      return thisT()->getShuffleCost(
          TTI::SK_ExtractSubvector, cast<VectorType>(Args[0]->getType()),
          std::nullopt, CostKind, Index, cast<VectorType>(RetTy));
    }
    case Intrinsic::vector_insert: {
      // FIXME: Handle case where a scalable vector is inserted into a scalable
      // vector
      if (isa<ScalableVectorType>(Args[1]->getType()))
        return BaseT::getIntrinsicInstrCost(ICA, CostKind);
      unsigned Index = cast<ConstantInt>(Args[2])->getZExtValue();
      return thisT()->getShuffleCost(
          TTI::SK_InsertSubvector, cast<VectorType>(Args[0]->getType()),
          std::nullopt, CostKind, Index, cast<VectorType>(Args[1]->getType()));
    }
    case Intrinsic::experimental_vector_reverse: {
      return thisT()->getShuffleCost(
          TTI::SK_Reverse, cast<VectorType>(Args[0]->getType()), std::nullopt,
          CostKind, 0, cast<VectorType>(RetTy));
    }
    case Intrinsic::experimental_vector_splice: {
      unsigned Index = cast<ConstantInt>(Args[2])->getZExtValue();
      return thisT()->getShuffleCost(
          TTI::SK_Splice, cast<VectorType>(Args[0]->getType()), std::nullopt,
          CostKind, Index, cast<VectorType>(RetTy));
    }
    case Intrinsic::vector_reduce_add:
    case Intrinsic::vector_reduce_mul:
    case Intrinsic::vector_reduce_and:
    case Intrinsic::vector_reduce_or:
    case Intrinsic::vector_reduce_xor:
    case Intrinsic::vector_reduce_smax:
    case Intrinsic::vector_reduce_smin:
    case Intrinsic::vector_reduce_fmax:
    case Intrinsic::vector_reduce_fmin:
    case Intrinsic::vector_reduce_fmaximum:
    case Intrinsic::vector_reduce_fminimum:
    case Intrinsic::vector_reduce_umax:
    case Intrinsic::vector_reduce_umin: {
      IntrinsicCostAttributes Attrs(IID, RetTy, Args[0]->getType(), FMF, I, 1);
      return getTypeBasedIntrinsicInstrCost(Attrs, CostKind);
    }
    case Intrinsic::vector_reduce_fadd:
    case Intrinsic::vector_reduce_fmul: {
      IntrinsicCostAttributes Attrs(
          IID, RetTy, {Args[0]->getType(), Args[1]->getType()}, FMF, I, 1);
      return getTypeBasedIntrinsicInstrCost(Attrs, CostKind);
    }
    case Intrinsic::fshl:
    case Intrinsic::fshr: {
      const Value *X = Args[0];
      const Value *Y = Args[1];
      const Value *Z = Args[2];
      const TTI::OperandValueInfo OpInfoX = TTI::getOperandInfo(X);
      const TTI::OperandValueInfo OpInfoY = TTI::getOperandInfo(Y);
      const TTI::OperandValueInfo OpInfoZ = TTI::getOperandInfo(Z);
      const TTI::OperandValueInfo OpInfoBW =
        {TTI::OK_UniformConstantValue,
         isPowerOf2_32(RetTy->getScalarSizeInBits()) ? TTI::OP_PowerOf2
         : TTI::OP_None};

      // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
      // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW))
      InstructionCost Cost = 0;
      Cost +=
          thisT()->getArithmeticInstrCost(BinaryOperator::Or, RetTy, CostKind);
      Cost +=
          thisT()->getArithmeticInstrCost(BinaryOperator::Sub, RetTy, CostKind);
      Cost += thisT()->getArithmeticInstrCost(
          BinaryOperator::Shl, RetTy, CostKind, OpInfoX,
          {OpInfoZ.Kind, TTI::OP_None});
      Cost += thisT()->getArithmeticInstrCost(
          BinaryOperator::LShr, RetTy, CostKind, OpInfoY,
          {OpInfoZ.Kind, TTI::OP_None});
      // Non-constant shift amounts requires a modulo.
      if (!OpInfoZ.isConstant())
        Cost += thisT()->getArithmeticInstrCost(BinaryOperator::URem, RetTy,
                                                CostKind, OpInfoZ, OpInfoBW);
      // For non-rotates (X != Y) we must add shift-by-zero handling costs.
      if (X != Y) {
        Type *CondTy = RetTy->getWithNewBitWidth(1);
        Cost +=
            thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
                                        CmpInst::ICMP_EQ, CostKind);
        Cost +=
            thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
                                        CmpInst::ICMP_EQ, CostKind);
      }
      return Cost;
    }
    case Intrinsic::get_active_lane_mask: {
      EVT ResVT = getTLI()->getValueType(DL, RetTy, true);
      EVT ArgType = getTLI()->getValueType(DL, ICA.getArgTypes()[0], true);

      // If we're not expanding the intrinsic then we assume this is cheap
      // to implement.
      if (!getTLI()->shouldExpandGetActiveLaneMask(ResVT, ArgType)) {
        return getTypeLegalizationCost(RetTy).first;
      }

      // Create the expanded types that will be used to calculate the uadd_sat
      // operation.
      Type *ExpRetTy = VectorType::get(
          ICA.getArgTypes()[0], cast<VectorType>(RetTy)->getElementCount());
      IntrinsicCostAttributes Attrs(Intrinsic::uadd_sat, ExpRetTy, {}, FMF);
      InstructionCost Cost =
          thisT()->getTypeBasedIntrinsicInstrCost(Attrs, CostKind);
      Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, ExpRetTy, RetTy,
                                          CmpInst::ICMP_ULT, CostKind);
      return Cost;
    }
    }

    // Assume that we need to scalarize this intrinsic.
    // Compute the scalarization overhead based on Args for a vector
    // intrinsic.
    InstructionCost ScalarizationCost = InstructionCost::getInvalid();
    if (RetVF.isVector() && !RetVF.isScalable()) {
      ScalarizationCost = 0;
      if (!RetTy->isVoidTy())
        ScalarizationCost += getScalarizationOverhead(
            cast<VectorType>(RetTy),
            /*Insert*/ true, /*Extract*/ false, CostKind);
      ScalarizationCost +=
          getOperandsScalarizationOverhead(Args, ICA.getArgTypes(), CostKind);
    }

    IntrinsicCostAttributes Attrs(IID, RetTy, ICA.getArgTypes(), FMF, I,
                                  ScalarizationCost);
    return thisT()->getTypeBasedIntrinsicInstrCost(Attrs, CostKind);
  }

  /// Get intrinsic cost based on argument types.
  /// If ScalarizationCostPassed is std::numeric_limits<unsigned>::max(), the
  /// cost of scalarizing the arguments and the return value will be computed
  /// based on types.
  InstructionCost
  getTypeBasedIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
                                 TTI::TargetCostKind CostKind) {
    Intrinsic::ID IID = ICA.getID();
    Type *RetTy = ICA.getReturnType();
    const SmallVectorImpl<Type *> &Tys = ICA.getArgTypes();
    FastMathFlags FMF = ICA.getFlags();
    InstructionCost ScalarizationCostPassed = ICA.getScalarizationCost();
    bool SkipScalarizationCost = ICA.skipScalarizationCost();

    VectorType *VecOpTy = nullptr;
    if (!Tys.empty()) {
      // The vector reduction operand is operand 0 except for fadd/fmul.
      // Their operand 0 is a scalar start value, so the vector op is operand 1.
      unsigned VecTyIndex = 0;
      if (IID == Intrinsic::vector_reduce_fadd ||
          IID == Intrinsic::vector_reduce_fmul)
        VecTyIndex = 1;
      assert(Tys.size() > VecTyIndex && "Unexpected IntrinsicCostAttributes");
      VecOpTy = dyn_cast<VectorType>(Tys[VecTyIndex]);
    }

    // Library call cost - other than size, make it expensive.
    unsigned SingleCallCost = CostKind == TTI::TCK_CodeSize ? 1 : 10;
    unsigned ISD = 0;
    switch (IID) {
    default: {
      // Scalable vectors cannot be scalarized, so return Invalid.
      if (isa<ScalableVectorType>(RetTy) || any_of(Tys, [](const Type *Ty) {
            return isa<ScalableVectorType>(Ty);
          }))
        return InstructionCost::getInvalid();

      // Assume that we need to scalarize this intrinsic.
      InstructionCost ScalarizationCost =
          SkipScalarizationCost ? ScalarizationCostPassed : 0;
      unsigned ScalarCalls = 1;
      Type *ScalarRetTy = RetTy;
      if (auto *RetVTy = dyn_cast<VectorType>(RetTy)) {
        if (!SkipScalarizationCost)
          ScalarizationCost = getScalarizationOverhead(
              RetVTy, /*Insert*/ true, /*Extract*/ false, CostKind);
        ScalarCalls = std::max(ScalarCalls,
                               cast<FixedVectorType>(RetVTy)->getNumElements());
        ScalarRetTy = RetTy->getScalarType();
      }
      SmallVector<Type *, 4> ScalarTys;
      for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
        Type *Ty = Tys[i];
        if (auto *VTy = dyn_cast<VectorType>(Ty)) {
          if (!SkipScalarizationCost)
            ScalarizationCost += getScalarizationOverhead(
                VTy, /*Insert*/ false, /*Extract*/ true, CostKind);
          ScalarCalls = std::max(ScalarCalls,
                                 cast<FixedVectorType>(VTy)->getNumElements());
          Ty = Ty->getScalarType();
        }
        ScalarTys.push_back(Ty);
      }
      if (ScalarCalls == 1)
        return 1; // Return cost of a scalar intrinsic. Assume it to be cheap.

      IntrinsicCostAttributes ScalarAttrs(IID, ScalarRetTy, ScalarTys, FMF);
      InstructionCost ScalarCost =
          thisT()->getIntrinsicInstrCost(ScalarAttrs, CostKind);

      return ScalarCalls * ScalarCost + ScalarizationCost;
    }
    // Look for intrinsics that can be lowered directly or turned into a scalar
    // intrinsic call.
    case Intrinsic::sqrt:
      ISD = ISD::FSQRT;
      break;
    case Intrinsic::sin:
      ISD = ISD::FSIN;
      break;
    case Intrinsic::cos:
      ISD = ISD::FCOS;
      break;
    case Intrinsic::exp:
      ISD = ISD::FEXP;
      break;
    case Intrinsic::exp2:
      ISD = ISD::FEXP2;
      break;
    case Intrinsic::log:
      ISD = ISD::FLOG;
      break;
    case Intrinsic::log10:
      ISD = ISD::FLOG10;
      break;
    case Intrinsic::log2:
      ISD = ISD::FLOG2;
      break;
    case Intrinsic::fabs:
      ISD = ISD::FABS;
      break;
    case Intrinsic::canonicalize:
      ISD = ISD::FCANONICALIZE;
      break;
    case Intrinsic::minnum:
      ISD = ISD::FMINNUM;
      break;
    case Intrinsic::maxnum:
      ISD = ISD::FMAXNUM;
      break;
    case Intrinsic::minimum:
      ISD = ISD::FMINIMUM;
      break;
    case Intrinsic::maximum:
      ISD = ISD::FMAXIMUM;
      break;
    case Intrinsic::copysign:
      ISD = ISD::FCOPYSIGN;
      break;
    case Intrinsic::floor:
      ISD = ISD::FFLOOR;
      break;
    case Intrinsic::ceil:
      ISD = ISD::FCEIL;
      break;
    case Intrinsic::trunc:
      ISD = ISD::FTRUNC;
      break;
    case Intrinsic::nearbyint:
      ISD = ISD::FNEARBYINT;
      break;
    case Intrinsic::rint:
      ISD = ISD::FRINT;
      break;
    case Intrinsic::round:
      ISD = ISD::FROUND;
      break;
    case Intrinsic::roundeven:
      ISD = ISD::FROUNDEVEN;
      break;
    case Intrinsic::pow:
      ISD = ISD::FPOW;
      break;
    case Intrinsic::fma:
      ISD = ISD::FMA;
      break;
    case Intrinsic::fmuladd:
      ISD = ISD::FMA;
      break;
    case Intrinsic::experimental_constrained_fmuladd:
      ISD = ISD::STRICT_FMA;
      break;
    // FIXME: We should return 0 whenever getIntrinsicCost == TCC_Free.
    case Intrinsic::lifetime_start:
    case Intrinsic::lifetime_end:
    case Intrinsic::sideeffect:
    case Intrinsic::pseudoprobe:
    case Intrinsic::arithmetic_fence:
      return 0;
    case Intrinsic::masked_store: {
      Type *Ty = Tys[0];
      Align TyAlign = thisT()->DL.getABITypeAlign(Ty);
      return thisT()->getMaskedMemoryOpCost(Instruction::Store, Ty, TyAlign, 0,
                                            CostKind);
    }
    case Intrinsic::masked_load: {
      Type *Ty = RetTy;
      Align TyAlign = thisT()->DL.getABITypeAlign(Ty);
      return thisT()->getMaskedMemoryOpCost(Instruction::Load, Ty, TyAlign, 0,
                                            CostKind);
    }
    case Intrinsic::vector_reduce_add:
      return thisT()->getArithmeticReductionCost(Instruction::Add, VecOpTy,
                                                 std::nullopt, CostKind);
    case Intrinsic::vector_reduce_mul:
      return thisT()->getArithmeticReductionCost(Instruction::Mul, VecOpTy,
                                                 std::nullopt, CostKind);
    case Intrinsic::vector_reduce_and:
      return thisT()->getArithmeticReductionCost(Instruction::And, VecOpTy,
                                                 std::nullopt, CostKind);
    case Intrinsic::vector_reduce_or:
      return thisT()->getArithmeticReductionCost(Instruction::Or, VecOpTy,
                                                 std::nullopt, CostKind);
    case Intrinsic::vector_reduce_xor:
      return thisT()->getArithmeticReductionCost(Instruction::Xor, VecOpTy,
                                                 std::nullopt, CostKind);
    case Intrinsic::vector_reduce_fadd:
      return thisT()->getArithmeticReductionCost(Instruction::FAdd, VecOpTy,
                                                 FMF, CostKind);
    case Intrinsic::vector_reduce_fmul:
      return thisT()->getArithmeticReductionCost(Instruction::FMul, VecOpTy,
                                                 FMF, CostKind);
    case Intrinsic::vector_reduce_smax:
      return thisT()->getMinMaxReductionCost(Intrinsic::smax, VecOpTy,
                                             ICA.getFlags(), CostKind);
    case Intrinsic::vector_reduce_smin:
      return thisT()->getMinMaxReductionCost(Intrinsic::smin, VecOpTy,
                                             ICA.getFlags(), CostKind);
    case Intrinsic::vector_reduce_umax:
      return thisT()->getMinMaxReductionCost(Intrinsic::umax, VecOpTy,
                                             ICA.getFlags(), CostKind);
    case Intrinsic::vector_reduce_umin:
      return thisT()->getMinMaxReductionCost(Intrinsic::umin, VecOpTy,
                                             ICA.getFlags(), CostKind);
    case Intrinsic::vector_reduce_fmax:
      return thisT()->getMinMaxReductionCost(Intrinsic::maxnum, VecOpTy,
                                             ICA.getFlags(), CostKind);
    case Intrinsic::vector_reduce_fmin:
      return thisT()->getMinMaxReductionCost(Intrinsic::minnum, VecOpTy,
                                             ICA.getFlags(), CostKind);
    case Intrinsic::vector_reduce_fmaximum:
      return thisT()->getMinMaxReductionCost(Intrinsic::maximum, VecOpTy,
                                             ICA.getFlags(), CostKind);
    case Intrinsic::vector_reduce_fminimum:
      return thisT()->getMinMaxReductionCost(Intrinsic::minimum, VecOpTy,
                                             ICA.getFlags(), CostKind);
    case Intrinsic::abs: {
      // abs(X) = select(icmp(X,0),X,sub(0,X))
      Type *CondTy = RetTy->getWithNewBitWidth(1);
      CmpInst::Predicate Pred = CmpInst::ICMP_SGT;
      InstructionCost Cost = 0;
      Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
                                          Pred, CostKind);
      Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
                                          Pred, CostKind);
      // TODO: Should we add an OperandValueProperties::OP_Zero property?
      Cost += thisT()->getArithmeticInstrCost(
         BinaryOperator::Sub, RetTy, CostKind, {TTI::OK_UniformConstantValue, TTI::OP_None});
      return Cost;
    }
    case Intrinsic::smax:
    case Intrinsic::smin:
    case Intrinsic::umax:
    case Intrinsic::umin: {
      // minmax(X,Y) = select(icmp(X,Y),X,Y)
      Type *CondTy = RetTy->getWithNewBitWidth(1);
      bool IsUnsigned = IID == Intrinsic::umax || IID == Intrinsic::umin;
      CmpInst::Predicate Pred =
          IsUnsigned ? CmpInst::ICMP_UGT : CmpInst::ICMP_SGT;
      InstructionCost Cost = 0;
      Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
                                          Pred, CostKind);
      Cost += thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
                                          Pred, CostKind);
      return Cost;
    }
    case Intrinsic::sadd_sat:
    case Intrinsic::ssub_sat: {
      Type *CondTy = RetTy->getWithNewBitWidth(1);

      Type *OpTy = StructType::create({RetTy, CondTy});
      Intrinsic::ID OverflowOp = IID == Intrinsic::sadd_sat
                                     ? Intrinsic::sadd_with_overflow
                                     : Intrinsic::ssub_with_overflow;
      CmpInst::Predicate Pred = CmpInst::ICMP_SGT;

      // SatMax -> Overflow && SumDiff < 0
      // SatMin -> Overflow && SumDiff >= 0
      InstructionCost Cost = 0;
      IntrinsicCostAttributes Attrs(OverflowOp, OpTy, {RetTy, RetTy}, FMF,
                                    nullptr, ScalarizationCostPassed);
      Cost += thisT()->getIntrinsicInstrCost(Attrs, CostKind);
      Cost += thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy, CondTy,
                                          Pred, CostKind);
      Cost += 2 * thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy,
                                              CondTy, Pred, CostKind);
      return Cost;
    }
    case Intrinsic::uadd_sat:
    case Intrinsic::usub_sat: {
      Type *CondTy = RetTy->getWithNewBitWidth(1);

      Type *OpTy = StructType::create({RetTy, CondTy});
      Intrinsic::ID OverflowOp = IID == Intrinsic::uadd_sat
                                     ? Intrinsic::uadd_with_overflow
                                     : Intrinsic::usub_with_overflow;

      InstructionCost Cost = 0;
      IntrinsicCostAttributes Attrs(OverflowOp, OpTy, {RetTy, RetTy}, FMF,
                                    nullptr, ScalarizationCostPassed);
      Cost += thisT()->getIntrinsicInstrCost(Attrs, CostKind);
      Cost +=
          thisT()->getCmpSelInstrCost(BinaryOperator::Select, RetTy, CondTy,
                                      CmpInst::BAD_ICMP_PREDICATE, CostKind);
      return Cost;
    }
    case Intrinsic::smul_fix:
    case Intrinsic::umul_fix: {
      unsigned ExtSize = RetTy->getScalarSizeInBits() * 2;
      Type *ExtTy = RetTy->getWithNewBitWidth(ExtSize);

      unsigned ExtOp =
          IID == Intrinsic::smul_fix ? Instruction::SExt : Instruction::ZExt;
      TTI::CastContextHint CCH = TTI::CastContextHint::None;

      InstructionCost Cost = 0;
      Cost += 2 * thisT()->getCastInstrCost(ExtOp, ExtTy, RetTy, CCH, CostKind);
      Cost +=
          thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy, CostKind);
      Cost += 2 * thisT()->getCastInstrCost(Instruction::Trunc, RetTy, ExtTy,
                                            CCH, CostKind);
      Cost += thisT()->getArithmeticInstrCost(Instruction::LShr, RetTy,
                                              CostKind,
                                              {TTI::OK_AnyValue, TTI::OP_None},
                                              {TTI::OK_UniformConstantValue, TTI::OP_None});
      Cost += thisT()->getArithmeticInstrCost(Instruction::Shl, RetTy, CostKind,
                                              {TTI::OK_AnyValue, TTI::OP_None},
                                              {TTI::OK_UniformConstantValue, TTI::OP_None});
      Cost += thisT()->getArithmeticInstrCost(Instruction::Or, RetTy, CostKind);
      return Cost;
    }
    case Intrinsic::sadd_with_overflow:
    case Intrinsic::ssub_with_overflow: {
      Type *SumTy = RetTy->getContainedType(0);
      Type *OverflowTy = RetTy->getContainedType(1);
      unsigned Opcode = IID == Intrinsic::sadd_with_overflow
                            ? BinaryOperator::Add
                            : BinaryOperator::Sub;

      //   Add:
      //   Overflow -> (Result < LHS) ^ (RHS < 0)
      //   Sub:
      //   Overflow -> (Result < LHS) ^ (RHS > 0)
      InstructionCost Cost = 0;
      Cost += thisT()->getArithmeticInstrCost(Opcode, SumTy, CostKind);
      Cost += 2 * thisT()->getCmpSelInstrCost(
                      Instruction::ICmp, SumTy, OverflowTy,
                      CmpInst::ICMP_SGT, CostKind);
      Cost += thisT()->getArithmeticInstrCost(BinaryOperator::Xor, OverflowTy,
                                              CostKind);
      return Cost;
    }
    case Intrinsic::uadd_with_overflow:
    case Intrinsic::usub_with_overflow: {
      Type *SumTy = RetTy->getContainedType(0);
      Type *OverflowTy = RetTy->getContainedType(1);
      unsigned Opcode = IID == Intrinsic::uadd_with_overflow
                            ? BinaryOperator::Add
                            : BinaryOperator::Sub;
      CmpInst::Predicate Pred = IID == Intrinsic::uadd_with_overflow
                                    ? CmpInst::ICMP_ULT
                                    : CmpInst::ICMP_UGT;

      InstructionCost Cost = 0;
      Cost += thisT()->getArithmeticInstrCost(Opcode, SumTy, CostKind);
      Cost +=
          thisT()->getCmpSelInstrCost(BinaryOperator::ICmp, SumTy, OverflowTy,
                                      Pred, CostKind);
      return Cost;
    }
    case Intrinsic::smul_with_overflow:
    case Intrinsic::umul_with_overflow: {
      Type *MulTy = RetTy->getContainedType(0);
      Type *OverflowTy = RetTy->getContainedType(1);
      unsigned ExtSize = MulTy->getScalarSizeInBits() * 2;
      Type *ExtTy = MulTy->getWithNewBitWidth(ExtSize);
      bool IsSigned = IID == Intrinsic::smul_with_overflow;

      unsigned ExtOp = IsSigned ? Instruction::SExt : Instruction::ZExt;
      TTI::CastContextHint CCH = TTI::CastContextHint::None;

      InstructionCost Cost = 0;
      Cost += 2 * thisT()->getCastInstrCost(ExtOp, ExtTy, MulTy, CCH, CostKind);
      Cost +=
          thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy, CostKind);
      Cost += 2 * thisT()->getCastInstrCost(Instruction::Trunc, MulTy, ExtTy,
                                            CCH, CostKind);
      Cost += thisT()->getArithmeticInstrCost(Instruction::LShr, ExtTy,
                                              CostKind,
                                              {TTI::OK_AnyValue, TTI::OP_None},
                                              {TTI::OK_UniformConstantValue, TTI::OP_None});

      if (IsSigned)
        Cost += thisT()->getArithmeticInstrCost(Instruction::AShr, MulTy,
                                                CostKind,
                                                {TTI::OK_AnyValue, TTI::OP_None},
                                                {TTI::OK_UniformConstantValue, TTI::OP_None});

      Cost += thisT()->getCmpSelInstrCost(
          BinaryOperator::ICmp, MulTy, OverflowTy, CmpInst::ICMP_NE, CostKind);
      return Cost;
    }
    case Intrinsic::fptosi_sat:
    case Intrinsic::fptoui_sat: {
      if (Tys.empty())
        break;
      Type *FromTy = Tys[0];
      bool IsSigned = IID == Intrinsic::fptosi_sat;

      InstructionCost Cost = 0;
      IntrinsicCostAttributes Attrs1(Intrinsic::minnum, FromTy,
                                     {FromTy, FromTy});
      Cost += thisT()->getIntrinsicInstrCost(Attrs1, CostKind);
      IntrinsicCostAttributes Attrs2(Intrinsic::maxnum, FromTy,
                                     {FromTy, FromTy});
      Cost += thisT()->getIntrinsicInstrCost(Attrs2, CostKind);
      Cost += thisT()->getCastInstrCost(
          IsSigned ? Instruction::FPToSI : Instruction::FPToUI, RetTy, FromTy,
          TTI::CastContextHint::None, CostKind);
      if (IsSigned) {
        Type *CondTy = RetTy->getWithNewBitWidth(1);
        Cost += thisT()->getCmpSelInstrCost(
            BinaryOperator::FCmp, FromTy, CondTy, CmpInst::FCMP_UNO, CostKind);
        Cost += thisT()->getCmpSelInstrCost(
            BinaryOperator::Select, RetTy, CondTy, CmpInst::FCMP_UNO, CostKind);
      }
      return Cost;
    }
    case Intrinsic::ctpop:
      ISD = ISD::CTPOP;
      // In case of legalization use TCC_Expensive. This is cheaper than a
      // library call but still not a cheap instruction.
      SingleCallCost = TargetTransformInfo::TCC_Expensive;
      break;
    case Intrinsic::ctlz:
      ISD = ISD::CTLZ;
      break;
    case Intrinsic::cttz:
      ISD = ISD::CTTZ;
      break;
    case Intrinsic::bswap:
      ISD = ISD::BSWAP;
      break;
    case Intrinsic::bitreverse:
      ISD = ISD::BITREVERSE;
      break;
    }

    const TargetLoweringBase *TLI = getTLI();
    std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(RetTy);

    if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
      if (IID == Intrinsic::fabs && LT.second.isFloatingPoint() &&
          TLI->isFAbsFree(LT.second)) {
        return 0;
      }

      // The operation is legal. Assume it costs 1.
      // If the type is split to multiple registers, assume that there is some
      // overhead to this.
      // TODO: Once we have extract/insert subvector cost we need to use them.
      if (LT.first > 1)
        return (LT.first * 2);
      else
        return (LT.first * 1);
    } else if (!TLI->isOperationExpand(ISD, LT.second)) {
      // If the operation is custom lowered then assume
      // that the code is twice as expensive.
      return (LT.first * 2);
    }

    // If we can't lower fmuladd into an FMA estimate the cost as a floating
    // point mul followed by an add.
    if (IID == Intrinsic::fmuladd)
      return thisT()->getArithmeticInstrCost(BinaryOperator::FMul, RetTy,
                                             CostKind) +
             thisT()->getArithmeticInstrCost(BinaryOperator::FAdd, RetTy,
                                             CostKind);
    if (IID == Intrinsic::experimental_constrained_fmuladd) {
      IntrinsicCostAttributes FMulAttrs(
        Intrinsic::experimental_constrained_fmul, RetTy, Tys);
      IntrinsicCostAttributes FAddAttrs(
        Intrinsic::experimental_constrained_fadd, RetTy, Tys);
      return thisT()->getIntrinsicInstrCost(FMulAttrs, CostKind) +
             thisT()->getIntrinsicInstrCost(FAddAttrs, CostKind);
    }

    // Else, assume that we need to scalarize this intrinsic. For math builtins
    // this will emit a costly libcall, adding call overhead and spills. Make it
    // very expensive.
    if (auto *RetVTy = dyn_cast<VectorType>(RetTy)) {
      // Scalable vectors cannot be scalarized, so return Invalid.
      if (isa<ScalableVectorType>(RetTy) || any_of(Tys, [](const Type *Ty) {
            return isa<ScalableVectorType>(Ty);
          }))
        return InstructionCost::getInvalid();

      InstructionCost ScalarizationCost =
          SkipScalarizationCost
              ? ScalarizationCostPassed
              : getScalarizationOverhead(RetVTy, /*Insert*/ true,
                                         /*Extract*/ false, CostKind);

      unsigned ScalarCalls = cast<FixedVectorType>(RetVTy)->getNumElements();
      SmallVector<Type *, 4> ScalarTys;
      for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
        Type *Ty = Tys[i];
        if (Ty->isVectorTy())
          Ty = Ty->getScalarType();
        ScalarTys.push_back(Ty);
      }
      IntrinsicCostAttributes Attrs(IID, RetTy->getScalarType(), ScalarTys, FMF);
      InstructionCost ScalarCost =
          thisT()->getIntrinsicInstrCost(Attrs, CostKind);
      for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
        if (auto *VTy = dyn_cast<VectorType>(Tys[i])) {
          if (!ICA.skipScalarizationCost())
            ScalarizationCost += getScalarizationOverhead(
                VTy, /*Insert*/ false, /*Extract*/ true, CostKind);
          ScalarCalls = std::max(ScalarCalls,
                                 cast<FixedVectorType>(VTy)->getNumElements());
        }
      }
      return ScalarCalls * ScalarCost + ScalarizationCost;
    }

    // This is going to be turned into a library call, make it expensive.
    return SingleCallCost;
  }

  /// Compute a cost of the given call instruction.
  ///
  /// Compute the cost of calling function F with return type RetTy and
  /// argument types Tys. F might be nullptr, in this case the cost of an
  /// arbitrary call with the specified signature will be returned.
  /// This is used, for instance,  when we estimate call of a vector
  /// counterpart of the given function.
  /// \param F Called function, might be nullptr.
  /// \param RetTy Return value types.
  /// \param Tys Argument types.
  /// \returns The cost of Call instruction.
  InstructionCost getCallInstrCost(Function *F, Type *RetTy,
                                   ArrayRef<Type *> Tys,
                                   TTI::TargetCostKind CostKind) {
    return 10;
  }

  unsigned getNumberOfParts(Type *Tp) {
    std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Tp);
    return LT.first.isValid() ? *LT.first.getValue() : 0;
  }

  InstructionCost getAddressComputationCost(Type *Ty, ScalarEvolution *,
                                            const SCEV *) {
    return 0;
  }

  /// Try to calculate arithmetic and shuffle op costs for reduction intrinsics.
  /// We're assuming that reduction operation are performing the following way:
  ///
  /// %val1 = shufflevector<n x t> %val, <n x t> %undef,
  /// <n x i32> <i32 n/2, i32 n/2 + 1, ..., i32 n, i32 undef, ..., i32 undef>
  ///            \----------------v-------------/  \----------v------------/
  ///                            n/2 elements               n/2 elements
  /// %red1 = op <n x t> %val, <n x t> val1
  /// After this operation we have a vector %red1 where only the first n/2
  /// elements are meaningful, the second n/2 elements are undefined and can be
  /// dropped. All other operations are actually working with the vector of
  /// length n/2, not n, though the real vector length is still n.
  /// %val2 = shufflevector<n x t> %red1, <n x t> %undef,
  /// <n x i32> <i32 n/4, i32 n/4 + 1, ..., i32 n/2, i32 undef, ..., i32 undef>
  ///            \----------------v-------------/  \----------v------------/
  ///                            n/4 elements               3*n/4 elements
  /// %red2 = op <n x t> %red1, <n x t> val2  - working with the vector of
  /// length n/2, the resulting vector has length n/4 etc.
  ///
  /// The cost model should take into account that the actual length of the
  /// vector is reduced on each iteration.
  InstructionCost getTreeReductionCost(unsigned Opcode, VectorType *Ty,
                                       TTI::TargetCostKind CostKind) {
    // Targets must implement a default value for the scalable case, since
    // we don't know how many lanes the vector has.
    if (isa<ScalableVectorType>(Ty))
      return InstructionCost::getInvalid();

    Type *ScalarTy = Ty->getElementType();
    unsigned NumVecElts = cast<FixedVectorType>(Ty)->getNumElements();
    if ((Opcode == Instruction::Or || Opcode == Instruction::And) &&
        ScalarTy == IntegerType::getInt1Ty(Ty->getContext()) &&
        NumVecElts >= 2) {
      // Or reduction for i1 is represented as:
      // %val = bitcast <ReduxWidth x i1> to iReduxWidth
      // %res = cmp ne iReduxWidth %val, 0
      // And reduction for i1 is represented as:
      // %val = bitcast <ReduxWidth x i1> to iReduxWidth
      // %res = cmp eq iReduxWidth %val, 11111
      Type *ValTy = IntegerType::get(Ty->getContext(), NumVecElts);
      return thisT()->getCastInstrCost(Instruction::BitCast, ValTy, Ty,
                                       TTI::CastContextHint::None, CostKind) +
             thisT()->getCmpSelInstrCost(Instruction::ICmp, ValTy,
                                         CmpInst::makeCmpResultType(ValTy),
                                         CmpInst::BAD_ICMP_PREDICATE, CostKind);
    }
    unsigned NumReduxLevels = Log2_32(NumVecElts);
    InstructionCost ArithCost = 0;
    InstructionCost ShuffleCost = 0;
    std::pair<InstructionCost, MVT> LT = thisT()->getTypeLegalizationCost(Ty);
    unsigned LongVectorCount = 0;
    unsigned MVTLen =
        LT.second.isVector() ? LT.second.getVectorNumElements() : 1;
    while (NumVecElts > MVTLen) {
      NumVecElts /= 2;
      VectorType *SubTy = FixedVectorType::get(ScalarTy, NumVecElts);
      ShuffleCost +=
          thisT()->getShuffleCost(TTI::SK_ExtractSubvector, Ty, std::nullopt,
                                  CostKind, NumVecElts, SubTy);
      ArithCost += thisT()->getArithmeticInstrCost(Opcode, SubTy, CostKind);
      Ty = SubTy;
      ++LongVectorCount;
    }

    NumReduxLevels -= LongVectorCount;

    // The minimal length of the vector is limited by the real length of vector
    // operations performed on the current platform. That's why several final
    // reduction operations are performed on the vectors with the same
    // architecture-dependent length.

    // By default reductions need one shuffle per reduction level.
    ShuffleCost +=
        NumReduxLevels * thisT()->getShuffleCost(TTI::SK_PermuteSingleSrc, Ty,
                                                 std::nullopt, CostKind, 0, Ty);
    ArithCost +=
        NumReduxLevels * thisT()->getArithmeticInstrCost(Opcode, Ty, CostKind);
    return ShuffleCost + ArithCost +
           thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
                                       CostKind, 0, nullptr, nullptr);
  }

  /// Try to calculate the cost of performing strict (in-order) reductions,
  /// which involves doing a sequence of floating point additions in lane
  /// order, starting with an initial value. For example, consider a scalar
  /// initial value 'InitVal' of type float and a vector of type <4 x float>:
  ///
  ///   Vector = <float %v0, float %v1, float %v2, float %v3>
  ///
  ///   %add1 = %InitVal + %v0
  ///   %add2 = %add1 + %v1
  ///   %add3 = %add2 + %v2
  ///   %add4 = %add3 + %v3
  ///
  /// As a simple estimate we can say the cost of such a reduction is 4 times
  /// the cost of a scalar FP addition. We can only estimate the costs for
  /// fixed-width vectors here because for scalable vectors we do not know the
  /// runtime number of operations.
  InstructionCost getOrderedReductionCost(unsigned Opcode, VectorType *Ty,
                                          TTI::TargetCostKind CostKind) {
    // Targets must implement a default value for the scalable case, since
    // we don't know how many lanes the vector has.
    if (isa<ScalableVectorType>(Ty))
      return InstructionCost::getInvalid();

    auto *VTy = cast<FixedVectorType>(Ty);
    InstructionCost ExtractCost = getScalarizationOverhead(
        VTy, /*Insert=*/false, /*Extract=*/true, CostKind);
    InstructionCost ArithCost = thisT()->getArithmeticInstrCost(
        Opcode, VTy->getElementType(), CostKind);
    ArithCost *= VTy->getNumElements();

    return ExtractCost + ArithCost;
  }

  InstructionCost getArithmeticReductionCost(unsigned Opcode, VectorType *Ty,
                                             std::optional<FastMathFlags> FMF,
                                             TTI::TargetCostKind CostKind) {
    assert(Ty && "Unknown reduction vector type");
    if (TTI::requiresOrderedReduction(FMF))
      return getOrderedReductionCost(Opcode, Ty, CostKind);
    return getTreeReductionCost(Opcode, Ty, CostKind);
  }

  /// Try to calculate op costs for min/max reduction operations.
  /// \param CondTy Conditional type for the Select instruction.
  InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty,
                                         FastMathFlags FMF,
                                         TTI::TargetCostKind CostKind) {
    // Targets must implement a default value for the scalable case, since
    // we don't know how many lanes the vector has.
    if (isa<ScalableVectorType>(Ty))
      return InstructionCost::getInvalid();

    Type *ScalarTy = Ty->getElementType();
    unsigned NumVecElts = cast<FixedVectorType>(Ty)->getNumElements();
    unsigned NumReduxLevels = Log2_32(NumVecElts);
    InstructionCost MinMaxCost = 0;
    InstructionCost ShuffleCost = 0;
    std::pair<InstructionCost, MVT> LT = thisT()->getTypeLegalizationCost(Ty);
    unsigned LongVectorCount = 0;
    unsigned MVTLen =
        LT.second.isVector() ? LT.second.getVectorNumElements() : 1;
    while (NumVecElts > MVTLen) {
      NumVecElts /= 2;
      auto *SubTy = FixedVectorType::get(ScalarTy, NumVecElts);

      ShuffleCost +=
          thisT()->getShuffleCost(TTI::SK_ExtractSubvector, Ty, std::nullopt,
                                  CostKind, NumVecElts, SubTy);

      IntrinsicCostAttributes Attrs(IID, SubTy, {SubTy, SubTy}, FMF);
      MinMaxCost += getIntrinsicInstrCost(Attrs, CostKind);
      Ty = SubTy;
      ++LongVectorCount;
    }

    NumReduxLevels -= LongVectorCount;

    // The minimal length of the vector is limited by the real length of vector
    // operations performed on the current platform. That's why several final
    // reduction opertions are perfomed on the vectors with the same
    // architecture-dependent length.
    ShuffleCost +=
        NumReduxLevels * thisT()->getShuffleCost(TTI::SK_PermuteSingleSrc, Ty,
                                                 std::nullopt, CostKind, 0, Ty);
    IntrinsicCostAttributes Attrs(IID, Ty, {Ty, Ty}, FMF);
    MinMaxCost += NumReduxLevels * getIntrinsicInstrCost(Attrs, CostKind);
    // The last min/max should be in vector registers and we counted it above.
    // So just need a single extractelement.
    return ShuffleCost + MinMaxCost +
           thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty,
                                       CostKind, 0, nullptr, nullptr);
  }

  InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned,
                                           Type *ResTy, VectorType *Ty,
                                           FastMathFlags FMF,
                                           TTI::TargetCostKind CostKind) {
    // Without any native support, this is equivalent to the cost of
    // vecreduce.opcode(ext(Ty A)).
    VectorType *ExtTy = VectorType::get(ResTy, Ty);
    InstructionCost RedCost =
        thisT()->getArithmeticReductionCost(Opcode, ExtTy, FMF, CostKind);
    InstructionCost ExtCost = thisT()->getCastInstrCost(
        IsUnsigned ? Instruction::ZExt : Instruction::SExt, ExtTy, Ty,
        TTI::CastContextHint::None, CostKind);

    return RedCost + ExtCost;
  }

  InstructionCost getMulAccReductionCost(bool IsUnsigned, Type *ResTy,
                                         VectorType *Ty,
                                         TTI::TargetCostKind CostKind) {
    // Without any native support, this is equivalent to the cost of
    // vecreduce.add(mul(ext(Ty A), ext(Ty B))) or
    // vecreduce.add(mul(A, B)).
    VectorType *ExtTy = VectorType::get(ResTy, Ty);
    InstructionCost RedCost = thisT()->getArithmeticReductionCost(
        Instruction::Add, ExtTy, std::nullopt, CostKind);
    InstructionCost ExtCost = thisT()->getCastInstrCost(
        IsUnsigned ? Instruction::ZExt : Instruction::SExt, ExtTy, Ty,
        TTI::CastContextHint::None, CostKind);

    InstructionCost MulCost =
        thisT()->getArithmeticInstrCost(Instruction::Mul, ExtTy, CostKind);

    return RedCost + MulCost + 2 * ExtCost;
  }

  InstructionCost getVectorSplitCost() { return 1; }

  /// @}
};

/// Concrete BasicTTIImpl that can be used if no further customization
/// is needed.
class BasicTTIImpl : public BasicTTIImplBase<BasicTTIImpl> {
  using BaseT = BasicTTIImplBase<BasicTTIImpl>;

  friend class BasicTTIImplBase<BasicTTIImpl>;

  const TargetSubtargetInfo *ST;
  const TargetLoweringBase *TLI;

  const TargetSubtargetInfo *getST() const { return ST; }
  const TargetLoweringBase *getTLI() const { return TLI; }

public:
  explicit BasicTTIImpl(const TargetMachine *TM, const Function &F);
};

} // end namespace llvm

#endif // LLVM_CODEGEN_BASICTTIIMPL_H
PKhwFZ*A�

CodeGen/AtomicExpandUtils.hnu�[���//===- AtomicExpandUtils.h - Utilities for expanding atomic instructions --===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_ATOMICEXPANDUTILS_H
#define LLVM_CODEGEN_ATOMICEXPANDUTILS_H

#include "llvm/ADT/STLExtras.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/Support/AtomicOrdering.h"

namespace llvm {

class AtomicRMWInst;
class Value;

/// Parameters (see the expansion example below):
/// (the builder, %addr, %loaded, %new_val, ordering,
///  /* OUT */ %success, /* OUT */ %new_loaded)
using CreateCmpXchgInstFun =
    function_ref<void(IRBuilderBase &, Value *, Value *, Value *, Align,
                      AtomicOrdering, SyncScope::ID, Value *&, Value *&)>;

/// Expand an atomic RMW instruction into a loop utilizing
/// cmpxchg. You'll want to make sure your target machine likes cmpxchg
/// instructions in the first place and that there isn't another, better,
/// transformation available (for example AArch32/AArch64 have linked loads).
///
/// This is useful in passes which can't rewrite the more exotic RMW
/// instructions directly into a platform specific intrinsics (because, say,
/// those intrinsics don't exist). If such a pass is able to expand cmpxchg
/// instructions directly however, then, with this function, it could avoid two
/// extra module passes (avoiding passes by `-atomic-expand` and itself). A
/// specific example would be PNaCl's `RewriteAtomics` pass.
///
/// Given: atomicrmw some_op iN* %addr, iN %incr ordering
///
/// The standard expansion we produce is:
///     [...]
///     %init_loaded = load atomic iN* %addr
///     br label %loop
/// loop:
///     %loaded = phi iN [ %init_loaded, %entry ], [ %new_loaded, %loop ]
///     %new = some_op iN %loaded, %incr
/// ; This is what -atomic-expand will produce using this function on i686
/// targets:
///     %pair = cmpxchg iN* %addr, iN %loaded, iN %new_val
///     %new_loaded = extractvalue { iN, i1 } %pair, 0
///     %success = extractvalue { iN, i1 } %pair, 1
/// ; End callback produced IR
///     br i1 %success, label %atomicrmw.end, label %loop
/// atomicrmw.end:
///     [...]
///
/// Returns true if the containing function was modified.
bool expandAtomicRMWToCmpXchg(AtomicRMWInst *AI, CreateCmpXchgInstFun CreateCmpXchg);

} // end namespace llvm

#endif // LLVM_CODEGEN_ATOMICEXPANDUTILS_H
PKhwFZ������"CodeGen/MachineDominanceFrontier.hnu�[���//===- llvm/CodeGen/MachineDominanceFrontier.h ------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_MACHINEDOMINANCEFRONTIER_H
#define LLVM_CODEGEN_MACHINEDOMINANCEFRONTIER_H

#include "llvm/Analysis/DominanceFrontier.h"
#include "llvm/Analysis/DominanceFrontierImpl.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/Support/GenericDomTree.h"

namespace llvm {

class MachineDominanceFrontier : public MachineFunctionPass {
  ForwardDominanceFrontierBase<MachineBasicBlock> Base;

public:
 using DomTreeT = DomTreeBase<MachineBasicBlock>;
 using DomTreeNodeT = DomTreeNodeBase<MachineBasicBlock>;
 using DomSetType = DominanceFrontierBase<MachineBasicBlock, false>::DomSetType;
 using iterator = DominanceFrontierBase<MachineBasicBlock, false>::iterator;
 using const_iterator =
     DominanceFrontierBase<MachineBasicBlock, false>::const_iterator;

 MachineDominanceFrontier(const MachineDominanceFrontier &) = delete;
 MachineDominanceFrontier &operator=(const MachineDominanceFrontier &) = delete;

 static char ID;

 MachineDominanceFrontier();

 ForwardDominanceFrontierBase<MachineBasicBlock> &getBase() { return Base; }

 const SmallVectorImpl<MachineBasicBlock *> &getRoots() const {
   return Base.getRoots();
  }

  MachineBasicBlock *getRoot() const {
    return Base.getRoot();
  }

  bool isPostDominator() const {
    return Base.isPostDominator();
  }

  iterator begin() {
    return Base.begin();
  }

  const_iterator begin() const {
    return Base.begin();
  }

  iterator end() {
    return Base.end();
  }

  const_iterator end() const {
    return Base.end();
  }

  iterator find(MachineBasicBlock *B) {
    return Base.find(B);
  }

  const_iterator find(MachineBasicBlock *B) const {
    return Base.find(B);
  }

  iterator addBasicBlock(MachineBasicBlock *BB, const DomSetType &frontier) {
    return Base.addBasicBlock(BB, frontier);
  }

  void removeBlock(MachineBasicBlock *BB) {
    return Base.removeBlock(BB);
  }

  void addToFrontier(iterator I, MachineBasicBlock *Node) {
    return Base.addToFrontier(I, Node);
  }

  void removeFromFrontier(iterator I, MachineBasicBlock *Node) {
    return Base.removeFromFrontier(I, Node);
  }

  bool compareDomSet(DomSetType &DS1, const DomSetType &DS2) const {
    return Base.compareDomSet(DS1, DS2);
  }

  bool compare(DominanceFrontierBase<MachineBasicBlock, false> &Other) const {
    return Base.compare(Other);
  }

  bool runOnMachineFunction(MachineFunction &F) override;

  void releaseMemory() override;

  void getAnalysisUsage(AnalysisUsage &AU) const override;
};

} // end namespace llvm

#endif // LLVM_CODEGEN_MACHINEDOMINANCEFRONTIER_H
PKhwFZ�
HJ��CodeGen/WinEHFuncInfo.hnu�[���//===- llvm/CodeGen/WinEHFuncInfo.h -----------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Data structures and associated state for Windows exception handling schemes.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_WINEHFUNCINFO_H
#define LLVM_CODEGEN_WINEHFUNCINFO_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/SmallVector.h"
#include <cstdint>
#include <limits>
#include <utility>

namespace llvm {

class AllocaInst;
class BasicBlock;
class FuncletPadInst;
class Function;
class GlobalVariable;
class Instruction;
class InvokeInst;
class MachineBasicBlock;
class MCSymbol;

// The following structs respresent the .xdata tables for various
// Windows-related EH personalities.

using MBBOrBasicBlock = PointerUnion<const BasicBlock *, MachineBasicBlock *>;

struct CxxUnwindMapEntry {
  int ToState;
  MBBOrBasicBlock Cleanup;
};

/// Similar to CxxUnwindMapEntry, but supports SEH filters.
struct SEHUnwindMapEntry {
  /// If unwinding continues through this handler, transition to the handler at
  /// this state. This indexes into SEHUnwindMap.
  int ToState = -1;

  bool IsFinally = false;

  /// Holds the filter expression function.
  const Function *Filter = nullptr;

  /// Holds the __except or __finally basic block.
  MBBOrBasicBlock Handler;
};

struct WinEHHandlerType {
  int Adjectives;
  /// The CatchObj starts out life as an LLVM alloca and is eventually turned
  /// frame index.
  union {
    const AllocaInst *Alloca;
    int FrameIndex;
  } CatchObj = {};
  GlobalVariable *TypeDescriptor;
  MBBOrBasicBlock Handler;
};

struct WinEHTryBlockMapEntry {
  int TryLow = -1;
  int TryHigh = -1;
  int CatchHigh = -1;
  SmallVector<WinEHHandlerType, 1> HandlerArray;
};

enum class ClrHandlerType { Catch, Finally, Fault, Filter };

struct ClrEHUnwindMapEntry {
  MBBOrBasicBlock Handler;
  uint32_t TypeToken;
  int HandlerParentState; ///< Outer handler enclosing this entry's handler
  int TryParentState; ///< Outer try region enclosing this entry's try region,
                      ///< treating later catches on same try as "outer"
  ClrHandlerType HandlerType;
};

struct WinEHFuncInfo {
  DenseMap<const Instruction *, int> EHPadStateMap;
  DenseMap<const FuncletPadInst *, int> FuncletBaseStateMap;
  DenseMap<const InvokeInst *, int> InvokeStateMap;
  DenseMap<MCSymbol *, std::pair<int, MCSymbol *>> LabelToStateMap;
  DenseMap<const BasicBlock *, int> BlockToStateMap; // for AsynchEH
  SmallVector<CxxUnwindMapEntry, 4> CxxUnwindMap;
  SmallVector<WinEHTryBlockMapEntry, 4> TryBlockMap;
  SmallVector<SEHUnwindMapEntry, 4> SEHUnwindMap;
  SmallVector<ClrEHUnwindMapEntry, 4> ClrEHUnwindMap;
  int UnwindHelpFrameIdx = std::numeric_limits<int>::max();
  int PSPSymFrameIdx = std::numeric_limits<int>::max();

  int getLastStateNumber() const { return CxxUnwindMap.size() - 1; }

  void addIPToStateRange(const InvokeInst *II, MCSymbol *InvokeBegin,
                         MCSymbol *InvokeEnd);

  void addIPToStateRange(int State, MCSymbol *InvokeBegin, MCSymbol *InvokeEnd);

  int EHRegNodeFrameIndex = std::numeric_limits<int>::max();
  int EHRegNodeEndOffset = std::numeric_limits<int>::max();
  int EHGuardFrameIndex = std::numeric_limits<int>::max();
  int SEHSetFrameOffset = std::numeric_limits<int>::max();

  WinEHFuncInfo();
};

/// Analyze the IR in ParentFn and it's handlers to build WinEHFuncInfo, which
/// describes the state numbers and tables used by __CxxFrameHandler3. This
/// analysis assumes that WinEHPrepare has already been run.
void calculateWinCXXEHStateNumbers(const Function *ParentFn,
                                   WinEHFuncInfo &FuncInfo);

void calculateSEHStateNumbers(const Function *ParentFn,
                              WinEHFuncInfo &FuncInfo);

void calculateClrEHStateNumbers(const Function *Fn, WinEHFuncInfo &FuncInfo);

// For AsynchEH (VC++ option -EHa)
void calculateCXXStateForAsynchEH(const BasicBlock *BB, int State,
                                  WinEHFuncInfo &FuncInfo);
void calculateSEHStateForAsynchEH(const BasicBlock *BB, int State,
                                  WinEHFuncInfo &FuncInfo);

} // end namespace llvm

#endif // LLVM_CODEGEN_WINEHFUNCINFO_H
PKhwFZ�';��CodeGen/ResourcePriorityQueue.hnu�[���//===----- ResourcePriorityQueue.h - A DFA-oriented priority queue -------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements the ResourcePriorityQueue class, which is a
// SchedulingPriorityQueue that schedules using DFA state to
// reduce the length of the critical path through the basic block
// on VLIW platforms.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_RESOURCEPRIORITYQUEUE_H
#define LLVM_CODEGEN_RESOURCEPRIORITYQUEUE_H

#include "llvm/CodeGen/ScheduleDAG.h"

namespace llvm {
  class DFAPacketizer;
  class InstrItineraryData;
  class ResourcePriorityQueue;
  class SelectionDAGISel;
  class TargetInstrInfo;
  class TargetRegisterInfo;

  /// Sorting functions for the Available queue.
  struct resource_sort {
    ResourcePriorityQueue *PQ;
    explicit resource_sort(ResourcePriorityQueue *pq) : PQ(pq) {}

    bool operator()(const SUnit* LHS, const SUnit* RHS) const;
  };

  class ResourcePriorityQueue : public SchedulingPriorityQueue {
    /// SUnits - The SUnits for the current graph.
    std::vector<SUnit> *SUnits;

    /// NumNodesSolelyBlocking - This vector contains, for every node in the
    /// Queue, the number of nodes that the node is the sole unscheduled
    /// predecessor for.  This is used as a tie-breaker heuristic for better
    /// mobility.
    std::vector<unsigned> NumNodesSolelyBlocking;

    /// Queue - The queue.
    std::vector<SUnit*> Queue;

    /// RegPressure - Tracking current reg pressure per register class.
    ///
    std::vector<unsigned> RegPressure;

    /// RegLimit - Tracking the number of allocatable registers per register
    /// class.
    std::vector<unsigned> RegLimit;

    resource_sort Picker;
    const TargetRegisterInfo *TRI;
    const TargetLowering *TLI;
    const TargetInstrInfo *TII;
    const InstrItineraryData* InstrItins;
    /// ResourcesModel - Represents VLIW state.
    /// Not limited to VLIW targets per say, but assumes
    /// definition of DFA by a target.
    std::unique_ptr<DFAPacketizer> ResourcesModel;

    /// Resource model - packet/bundle model. Purely
    /// internal at the time.
    std::vector<SUnit*> Packet;

    /// Heuristics for estimating register pressure.
    unsigned ParallelLiveRanges;
    int HorizontalVerticalBalance;

  public:
    ResourcePriorityQueue(SelectionDAGISel *IS);

    bool isBottomUp() const override { return false; }

    void initNodes(std::vector<SUnit> &sunits) override;

    void addNode(const SUnit *SU) override {
      NumNodesSolelyBlocking.resize(SUnits->size(), 0);
    }

    void updateNode(const SUnit *SU) override {}

    void releaseState() override {
      SUnits = nullptr;
    }

    unsigned getLatency(unsigned NodeNum) const {
      assert(NodeNum < (*SUnits).size());
      return (*SUnits)[NodeNum].getHeight();
    }

    unsigned getNumSolelyBlockNodes(unsigned NodeNum) const {
      assert(NodeNum < NumNodesSolelyBlocking.size());
      return NumNodesSolelyBlocking[NodeNum];
    }

    /// Single cost function reflecting benefit of scheduling SU
    /// in the current cycle.
    int SUSchedulingCost (SUnit *SU);

    /// InitNumRegDefsLeft - Determine the # of regs defined by this node.
    ///
    void initNumRegDefsLeft(SUnit *SU);
    int regPressureDelta(SUnit *SU, bool RawPressure = false);
    int rawRegPressureDelta (SUnit *SU, unsigned RCId);

    bool empty() const override { return Queue.empty(); }

    void push(SUnit *U) override;

    SUnit *pop() override;

    void remove(SUnit *SU) override;

    /// scheduledNode - Main resource tracking point.
    void scheduledNode(SUnit *SU) override;
    bool isResourceAvailable(SUnit *SU);
    void reserveResources(SUnit *SU);

private:
    void adjustPriorityOfUnscheduledPreds(SUnit *SU);
    SUnit *getSingleUnscheduledPred(SUnit *SU);
    unsigned numberRCValPredInSU (SUnit *SU, unsigned RCId);
    unsigned numberRCValSuccInSU (SUnit *SU, unsigned RCId);
  };
}

#endif
PKhwFZlQڪCodeGen/ParallelCG.hnu�[���//===-- llvm/CodeGen/ParallelCG.h - Parallel code generation ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This header declares functions that can be used for parallel code generation.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_PARALLELCG_H
#define LLVM_CODEGEN_PARALLELCG_H

#include "llvm/Support/CodeGen.h"
#include <functional>
#include <memory>

namespace llvm {

template <typename T> class ArrayRef;
class Module;
class TargetMachine;
class raw_pwrite_stream;

/// Split M into OSs.size() partitions, and generate code for each. Takes a
/// factory function for the TargetMachine TMFactory. Writes OSs.size() output
/// files to the output streams in OSs. The resulting output files if linked
/// together are intended to be equivalent to the single output file that would
/// have been code generated from M.
///
/// Writes bitcode for individual partitions into output streams in BCOSs, if
/// BCOSs is not empty.
void splitCodeGen(
    Module &M, ArrayRef<raw_pwrite_stream *> OSs,
    ArrayRef<llvm::raw_pwrite_stream *> BCOSs,
    const std::function<std::unique_ptr<TargetMachine>()> &TMFactory,
    CodeGenFileType FileType = CGFT_ObjectFile, bool PreserveLocals = false);

} // namespace llvm

#endif
PKhwFZ�C��FFCodeGen/GenVT.incnu�[���/*===- TableGen'erated file -------------------------------------*- C++ -*-===*\
|*                                                                            *|
|* ValueTypes Source Fragment                                                 *|
|*                                                                            *|
|* Automatically generated file, do not edit!                                 *|
|*                                                                            *|
\*===----------------------------------------------------------------------===*/

#ifdef GET_VT_ATTR // (Ty, n, sz, Any, Int, FP, Vec, Sc)
  GET_VT_ATTR(Other, 1, 0, 0, 0, 0, 0, 0)
  GET_VT_ATTR(i1, 2, 1, 0, 3, 0, 0, 0)
  GET_VT_ATTR(i2, 3, 2, 0, 3, 0, 0, 0)
  GET_VT_ATTR(i4, 4, 4, 0, 3, 0, 0, 0)
  GET_VT_ATTR(i8, 5, 8, 0, 3, 0, 0, 0)
  GET_VT_ATTR(i16, 6, 16, 0, 3, 0, 0, 0)
  GET_VT_ATTR(i32, 7, 32, 0, 3, 0, 0, 0)
  GET_VT_ATTR(i64, 8, 64, 0, 3, 0, 0, 0)
  GET_VT_ATTR(i128, 9, 128, 0, 3, 0, 0, 0)
  GET_VT_ATTR(bf16, 10, 16, 0, 0, 1, 0, 0)
  GET_VT_ATTR(f16, 11, 16, 0, 0, 3, 0, 0)
  GET_VT_ATTR(f32, 12, 32, 0, 0, 3, 0, 0)
  GET_VT_ATTR(f64, 13, 64, 0, 0, 3, 0, 0)
  GET_VT_ATTR(f80, 14, 80, 0, 0, 3, 0, 0)
  GET_VT_ATTR(f128, 15, 128, 0, 0, 3, 0, 0)
  GET_VT_ATTR(ppcf128, 16, 128, 0, 0, 1, 0, 0)
  GET_VT_ATTR(v1i1, 17, 1, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v2i1, 18, 2, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v4i1, 19, 4, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v8i1, 20, 8, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v16i1, 21, 16, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v32i1, 22, 32, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v64i1, 23, 64, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v128i1, 24, 128, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v256i1, 25, 256, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v512i1, 26, 512, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v1024i1, 27, 1024, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v2048i1, 28, 2048, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v128i2, 29, 256, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v256i2, 30, 512, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v64i4, 31, 256, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v128i4, 32, 512, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v1i8, 33, 8, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v2i8, 34, 16, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v4i8, 35, 32, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v8i8, 36, 64, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v16i8, 37, 128, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v32i8, 38, 256, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v64i8, 39, 512, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v128i8, 40, 1024, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v256i8, 41, 2048, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v512i8, 42, 4096, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v1024i8, 43, 8192, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v1i16, 44, 16, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v2i16, 45, 32, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v3i16, 46, 48, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v4i16, 47, 64, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v8i16, 48, 128, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v16i16, 49, 256, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v32i16, 50, 512, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v64i16, 51, 1024, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v128i16, 52, 2048, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v256i16, 53, 4096, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v512i16, 54, 8192, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v1i32, 55, 32, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v2i32, 56, 64, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v3i32, 57, 96, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v4i32, 58, 128, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v5i32, 59, 160, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v6i32, 60, 192, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v7i32, 61, 224, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v8i32, 62, 256, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v9i32, 63, 288, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v10i32, 64, 320, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v11i32, 65, 352, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v12i32, 66, 384, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v16i32, 67, 512, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v32i32, 68, 1024, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v64i32, 69, 2048, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v128i32, 70, 4096, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v256i32, 71, 8192, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v512i32, 72, 16384, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v1024i32, 73, 32768, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v2048i32, 74, 65536, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v1i64, 75, 64, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v2i64, 76, 128, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v3i64, 77, 192, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v4i64, 78, 256, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v8i64, 79, 512, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v16i64, 80, 1024, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v32i64, 81, 2048, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v64i64, 82, 4096, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v128i64, 83, 8192, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v256i64, 84, 16384, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v1i128, 85, 128, 0, 1, 0, 1, 0)
  GET_VT_ATTR(v1f16, 86, 16, 0, 0, 1, 1, 0)
  GET_VT_ATTR(v2f16, 87, 32, 0, 0, 1, 1, 0)
  GET_VT_ATTR(v3f16, 88, 48, 0, 0, 1, 1, 0)
  GET_VT_ATTR(v4f16, 89, 64, 0, 0, 1, 1, 0)
  GET_VT_ATTR(v8f16, 90, 128, 0, 0, 1, 1, 0)
  GET_VT_ATTR(v16f16, 91, 256, 0, 0, 1, 1, 0)
  GET_VT_ATTR(v32f16, 92, 512, 0, 0, 1, 1, 0)
  GET_VT_ATTR(v64f16, 93, 1024, 0, 0, 1, 1, 0)
  GET_VT_ATTR(v128f16, 94, 2048, 0, 0, 1, 1, 0)
  GET_VT_ATTR(v256f16, 95, 4096, 0, 0, 1, 1, 0)
  GET_VT_ATTR(v512f16, 96, 8192, 0, 0, 1, 1, 0)
  GET_VT_ATTR(v2bf16, 97, 32, 0, 0, 1, 1, 0)
  GET_VT_ATTR(v3bf16, 98, 48, 0, 0, 1, 1, 0)
  GET_VT_ATTR(v4bf16, 99, 64, 0, 0, 1, 1, 0)
  GET_VT_ATTR(v8bf16, 100, 128, 0, 0, 1, 1, 0)
  GET_VT_ATTR(v16bf16, 101, 256, 0, 0, 1, 1, 0)
  GET_VT_ATTR(v32bf16, 102, 512, 0, 0, 1, 1, 0)
  GET_VT_ATTR(v64bf16, 103, 1024, 0, 0, 1, 1, 0)
  GET_VT_ATTR(v128bf16, 104, 2048, 0, 0, 1, 1, 0)
  GET_VT_ATTR(v1f32, 105, 32, 0, 0, 1, 1, 0)
  GET_VT_ATTR(v2f32, 106, 64, 0, 0, 1, 1, 0)
  GET_VT_ATTR(v3f32, 107, 96, 0, 0, 1, 1, 0)
  GET_VT_ATTR(v4f32, 108, 128, 0, 0, 1, 1, 0)
  GET_VT_ATTR(v5f32, 109, 160, 0, 0, 1, 1, 0)
  GET_VT_ATTR(v6f32, 110, 192, 0, 0, 1, 1, 0)
  GET_VT_ATTR(v7f32, 111, 224, 0, 0, 1, 1, 0)
  GET_VT_ATTR(v8f32, 112, 256, 0, 0, 1, 1, 0)
  GET_VT_ATTR(v9f32, 113, 288, 0, 0, 1, 1, 0)
  GET_VT_ATTR(v10f32, 114, 320, 0, 0, 1, 1, 0)
  GET_VT_ATTR(v11f32, 115, 352, 0, 0, 1, 1, 0)
  GET_VT_ATTR(v12f32, 116, 384, 0, 0, 1, 1, 0)
  GET_VT_ATTR(v16f32, 117, 512, 0, 0, 1, 1, 0)
  GET_VT_ATTR(v32f32, 118, 1024, 0, 0, 1, 1, 0)
  GET_VT_ATTR(v64f32, 119, 2048, 0, 0, 1, 1, 0)
  GET_VT_ATTR(v128f32, 120, 4096, 0, 0, 1, 1, 0)
  GET_VT_ATTR(v256f32, 121, 8192, 0, 0, 1, 1, 0)
  GET_VT_ATTR(v512f32, 122, 16384, 0, 0, 1, 1, 0)
  GET_VT_ATTR(v1024f32, 123, 32768, 0, 0, 1, 1, 0)
  GET_VT_ATTR(v2048f32, 124, 65536, 0, 0, 1, 1, 0)
  GET_VT_ATTR(v1f64, 125, 64, 0, 0, 1, 1, 0)
  GET_VT_ATTR(v2f64, 126, 128, 0, 0, 1, 1, 0)
  GET_VT_ATTR(v3f64, 127, 192, 0, 0, 1, 1, 0)
  GET_VT_ATTR(v4f64, 128, 256, 0, 0, 1, 1, 0)
  GET_VT_ATTR(v8f64, 129, 512, 0, 0, 1, 1, 0)
  GET_VT_ATTR(v16f64, 130, 1024, 0, 0, 1, 1, 0)
  GET_VT_ATTR(v32f64, 131, 2048, 0, 0, 1, 1, 0)
  GET_VT_ATTR(v64f64, 132, 4096, 0, 0, 1, 1, 0)
  GET_VT_ATTR(v128f64, 133, 8192, 0, 0, 1, 1, 0)
  GET_VT_ATTR(v256f64, 134, 16384, 0, 0, 1, 1, 0)
  GET_VT_ATTR(nxv1i1, 135, 1, 0, 1, 0, 1, 1)
  GET_VT_ATTR(nxv2i1, 136, 2, 0, 1, 0, 1, 1)
  GET_VT_ATTR(nxv4i1, 137, 4, 0, 1, 0, 1, 1)
  GET_VT_ATTR(nxv8i1, 138, 8, 0, 1, 0, 1, 1)
  GET_VT_ATTR(nxv16i1, 139, 16, 0, 1, 0, 1, 1)
  GET_VT_ATTR(nxv32i1, 140, 32, 0, 1, 0, 1, 1)
  GET_VT_ATTR(nxv64i1, 141, 64, 0, 1, 0, 1, 1)
  GET_VT_ATTR(nxv1i8, 142, 8, 0, 1, 0, 1, 1)
  GET_VT_ATTR(nxv2i8, 143, 16, 0, 1, 0, 1, 1)
  GET_VT_ATTR(nxv4i8, 144, 32, 0, 1, 0, 1, 1)
  GET_VT_ATTR(nxv8i8, 145, 64, 0, 1, 0, 1, 1)
  GET_VT_ATTR(nxv16i8, 146, 128, 0, 1, 0, 1, 1)
  GET_VT_ATTR(nxv32i8, 147, 256, 0, 1, 0, 1, 1)
  GET_VT_ATTR(nxv64i8, 148, 512, 0, 1, 0, 1, 1)
  GET_VT_ATTR(nxv1i16, 149, 16, 0, 1, 0, 1, 1)
  GET_VT_ATTR(nxv2i16, 150, 32, 0, 1, 0, 1, 1)
  GET_VT_ATTR(nxv4i16, 151, 64, 0, 1, 0, 1, 1)
  GET_VT_ATTR(nxv8i16, 152, 128, 0, 1, 0, 1, 1)
  GET_VT_ATTR(nxv16i16, 153, 256, 0, 1, 0, 1, 1)
  GET_VT_ATTR(nxv32i16, 154, 512, 0, 1, 0, 1, 1)
  GET_VT_ATTR(nxv1i32, 155, 32, 0, 1, 0, 1, 1)
  GET_VT_ATTR(nxv2i32, 156, 64, 0, 1, 0, 1, 1)
  GET_VT_ATTR(nxv4i32, 157, 128, 0, 1, 0, 1, 1)
  GET_VT_ATTR(nxv8i32, 158, 256, 0, 1, 0, 1, 1)
  GET_VT_ATTR(nxv16i32, 159, 512, 0, 1, 0, 1, 1)
  GET_VT_ATTR(nxv32i32, 160, 1024, 0, 1, 0, 1, 1)
  GET_VT_ATTR(nxv1i64, 161, 64, 0, 1, 0, 1, 1)
  GET_VT_ATTR(nxv2i64, 162, 128, 0, 1, 0, 1, 1)
  GET_VT_ATTR(nxv4i64, 163, 256, 0, 1, 0, 1, 1)
  GET_VT_ATTR(nxv8i64, 164, 512, 0, 1, 0, 1, 1)
  GET_VT_ATTR(nxv16i64, 165, 1024, 0, 1, 0, 1, 1)
  GET_VT_ATTR(nxv32i64, 166, 2048, 0, 1, 0, 1, 1)
  GET_VT_ATTR(nxv1f16, 167, 16, 0, 0, 1, 1, 1)
  GET_VT_ATTR(nxv2f16, 168, 32, 0, 0, 1, 1, 1)
  GET_VT_ATTR(nxv4f16, 169, 64, 0, 0, 1, 1, 1)
  GET_VT_ATTR(nxv8f16, 170, 128, 0, 0, 1, 1, 1)
  GET_VT_ATTR(nxv16f16, 171, 256, 0, 0, 1, 1, 1)
  GET_VT_ATTR(nxv32f16, 172, 512, 0, 0, 1, 1, 1)
  GET_VT_ATTR(nxv1bf16, 173, 16, 0, 0, 1, 1, 1)
  GET_VT_ATTR(nxv2bf16, 174, 32, 0, 0, 1, 1, 1)
  GET_VT_ATTR(nxv4bf16, 175, 64, 0, 0, 1, 1, 1)
  GET_VT_ATTR(nxv8bf16, 176, 128, 0, 0, 1, 1, 1)
  GET_VT_ATTR(nxv16bf16, 177, 256, 0, 0, 1, 1, 1)
  GET_VT_ATTR(nxv32bf16, 178, 512, 0, 0, 1, 1, 1)
  GET_VT_ATTR(nxv1f32, 179, 32, 0, 0, 1, 1, 1)
  GET_VT_ATTR(nxv2f32, 180, 64, 0, 0, 1, 1, 1)
  GET_VT_ATTR(nxv4f32, 181, 128, 0, 0, 1, 1, 1)
  GET_VT_ATTR(nxv8f32, 182, 256, 0, 0, 1, 1, 1)
  GET_VT_ATTR(nxv16f32, 183, 512, 0, 0, 1, 1, 1)
  GET_VT_ATTR(nxv1f64, 184, 64, 0, 0, 1, 1, 1)
  GET_VT_ATTR(nxv2f64, 185, 128, 0, 0, 1, 1, 1)
  GET_VT_ATTR(nxv4f64, 186, 256, 0, 0, 1, 1, 1)
  GET_VT_ATTR(nxv8f64, 187, 512, 0, 0, 1, 1, 1)
  GET_VT_ATTR(x86mmx, 188, 64, 0, 0, 0, 0, 0)
  GET_VT_ATTR(Glue, 189, 0, 0, 0, 0, 0, 0)
  GET_VT_ATTR(isVoid, 190, 0, 0, 0, 0, 0, 0)
  GET_VT_ATTR(Untyped, 191, 8, 0, 0, 0, 0, 0)
  GET_VT_ATTR(funcref, 192, 0, 0, 0, 0, 0, 0)
  GET_VT_ATTR(externref, 193, 0, 0, 0, 0, 0, 0)
  GET_VT_ATTR(x86amx, 194, 8192, 0, 0, 0, 0, 0)
  GET_VT_ATTR(i64x8, 195, 512, 0, 0, 0, 0, 0)
  GET_VT_ATTR(aarch64svcount, 196, 16, 0, 0, 0, 0, 0)
  GET_VT_ATTR(spirvbuiltin, 197, 0, 0, 0, 0, 0, 0)
  GET_VT_ATTR(token, 248, 0, 0, 0, 0, 0, 0)
  GET_VT_ATTR(Metadata, 249, 0, 0, 0, 0, 0, 0)
  GET_VT_ATTR(iPTRAny, 250, 0, 1, 0, 0, 0, 0)
  GET_VT_ATTR(vAny, 251, 0, 1, 0, 0, 0, 0)
  GET_VT_ATTR(fAny, 252, 0, 1, 0, 0, 0, 0)
  GET_VT_ATTR(iAny, 253, 0, 1, 0, 0, 0, 0)
  GET_VT_ATTR(iPTR, 254, 0, 0, 0, 0, 0, 0)
  GET_VT_ATTR(Any, 255, 0, 1, 0, 0, 0, 0)
#endif

#ifdef GET_VT_RANGES
  FIRST_FIXEDLEN_VECTOR_VALUETYPE = v1i1,
  LAST_FIXEDLEN_VECTOR_VALUETYPE = v256f64,
  FIRST_FP_FIXEDLEN_VECTOR_VALUETYPE = v1f16,
  LAST_FP_FIXEDLEN_VECTOR_VALUETYPE = v256f64,
  FIRST_FP_SCALABLE_VECTOR_VALUETYPE = nxv1f16,
  LAST_FP_SCALABLE_VECTOR_VALUETYPE = nxv8f64,
  FIRST_FP_VALUETYPE = bf16,
  LAST_FP_VALUETYPE = ppcf128,
  FIRST_INTEGER_FIXEDLEN_VECTOR_VALUETYPE = v1i1,
  LAST_INTEGER_FIXEDLEN_VECTOR_VALUETYPE = v1i128,
  FIRST_INTEGER_SCALABLE_VECTOR_VALUETYPE = nxv1i1,
  LAST_INTEGER_SCALABLE_VECTOR_VALUETYPE = nxv32i64,
  FIRST_INTEGER_VALUETYPE = i1,
  LAST_INTEGER_VALUETYPE = i128,
  FIRST_SCALABLE_VECTOR_VALUETYPE = nxv1i1,
  LAST_SCALABLE_VECTOR_VALUETYPE = nxv8f64,
  FIRST_VALUETYPE = Other,
  LAST_VALUETYPE = spirvbuiltin,
  FIRST_VECTOR_VALUETYPE = v1i1,
  LAST_VECTOR_VALUETYPE = nxv8f64,
#endif

#ifdef GET_VT_VECATTR // (Ty, Sc, nElem, ElTy, ElSz)
  GET_VT_VECATTR(v1i1, 0, 1, i1, 1)
  GET_VT_VECATTR(v2i1, 0, 2, i1, 1)
  GET_VT_VECATTR(v4i1, 0, 4, i1, 1)
  GET_VT_VECATTR(v8i1, 0, 8, i1, 1)
  GET_VT_VECATTR(v16i1, 0, 16, i1, 1)
  GET_VT_VECATTR(v32i1, 0, 32, i1, 1)
  GET_VT_VECATTR(v64i1, 0, 64, i1, 1)
  GET_VT_VECATTR(v128i1, 0, 128, i1, 1)
  GET_VT_VECATTR(v256i1, 0, 256, i1, 1)
  GET_VT_VECATTR(v512i1, 0, 512, i1, 1)
  GET_VT_VECATTR(v1024i1, 0, 1024, i1, 1)
  GET_VT_VECATTR(v2048i1, 0, 2048, i1, 1)
  GET_VT_VECATTR(v128i2, 0, 128, i2, 2)
  GET_VT_VECATTR(v256i2, 0, 256, i2, 2)
  GET_VT_VECATTR(v64i4, 0, 64, i4, 4)
  GET_VT_VECATTR(v128i4, 0, 128, i4, 4)
  GET_VT_VECATTR(v1i8, 0, 1, i8, 8)
  GET_VT_VECATTR(v2i8, 0, 2, i8, 8)
  GET_VT_VECATTR(v4i8, 0, 4, i8, 8)
  GET_VT_VECATTR(v8i8, 0, 8, i8, 8)
  GET_VT_VECATTR(v16i8, 0, 16, i8, 8)
  GET_VT_VECATTR(v32i8, 0, 32, i8, 8)
  GET_VT_VECATTR(v64i8, 0, 64, i8, 8)
  GET_VT_VECATTR(v128i8, 0, 128, i8, 8)
  GET_VT_VECATTR(v256i8, 0, 256, i8, 8)
  GET_VT_VECATTR(v512i8, 0, 512, i8, 8)
  GET_VT_VECATTR(v1024i8, 0, 1024, i8, 8)
  GET_VT_VECATTR(v1i16, 0, 1, i16, 16)
  GET_VT_VECATTR(v2i16, 0, 2, i16, 16)
  GET_VT_VECATTR(v3i16, 0, 3, i16, 16)
  GET_VT_VECATTR(v4i16, 0, 4, i16, 16)
  GET_VT_VECATTR(v8i16, 0, 8, i16, 16)
  GET_VT_VECATTR(v16i16, 0, 16, i16, 16)
  GET_VT_VECATTR(v32i16, 0, 32, i16, 16)
  GET_VT_VECATTR(v64i16, 0, 64, i16, 16)
  GET_VT_VECATTR(v128i16, 0, 128, i16, 16)
  GET_VT_VECATTR(v256i16, 0, 256, i16, 16)
  GET_VT_VECATTR(v512i16, 0, 512, i16, 16)
  GET_VT_VECATTR(v1i32, 0, 1, i32, 32)
  GET_VT_VECATTR(v2i32, 0, 2, i32, 32)
  GET_VT_VECATTR(v3i32, 0, 3, i32, 32)
  GET_VT_VECATTR(v4i32, 0, 4, i32, 32)
  GET_VT_VECATTR(v5i32, 0, 5, i32, 32)
  GET_VT_VECATTR(v6i32, 0, 6, i32, 32)
  GET_VT_VECATTR(v7i32, 0, 7, i32, 32)
  GET_VT_VECATTR(v8i32, 0, 8, i32, 32)
  GET_VT_VECATTR(v9i32, 0, 9, i32, 32)
  GET_VT_VECATTR(v10i32, 0, 10, i32, 32)
  GET_VT_VECATTR(v11i32, 0, 11, i32, 32)
  GET_VT_VECATTR(v12i32, 0, 12, i32, 32)
  GET_VT_VECATTR(v16i32, 0, 16, i32, 32)
  GET_VT_VECATTR(v32i32, 0, 32, i32, 32)
  GET_VT_VECATTR(v64i32, 0, 64, i32, 32)
  GET_VT_VECATTR(v128i32, 0, 128, i32, 32)
  GET_VT_VECATTR(v256i32, 0, 256, i32, 32)
  GET_VT_VECATTR(v512i32, 0, 512, i32, 32)
  GET_VT_VECATTR(v1024i32, 0, 1024, i32, 32)
  GET_VT_VECATTR(v2048i32, 0, 2048, i32, 32)
  GET_VT_VECATTR(v1i64, 0, 1, i64, 64)
  GET_VT_VECATTR(v2i64, 0, 2, i64, 64)
  GET_VT_VECATTR(v3i64, 0, 3, i64, 64)
  GET_VT_VECATTR(v4i64, 0, 4, i64, 64)
  GET_VT_VECATTR(v8i64, 0, 8, i64, 64)
  GET_VT_VECATTR(v16i64, 0, 16, i64, 64)
  GET_VT_VECATTR(v32i64, 0, 32, i64, 64)
  GET_VT_VECATTR(v64i64, 0, 64, i64, 64)
  GET_VT_VECATTR(v128i64, 0, 128, i64, 64)
  GET_VT_VECATTR(v256i64, 0, 256, i64, 64)
  GET_VT_VECATTR(v1i128, 0, 1, i128, 128)
  GET_VT_VECATTR(v1f16, 0, 1, f16, 16)
  GET_VT_VECATTR(v2f16, 0, 2, f16, 16)
  GET_VT_VECATTR(v3f16, 0, 3, f16, 16)
  GET_VT_VECATTR(v4f16, 0, 4, f16, 16)
  GET_VT_VECATTR(v8f16, 0, 8, f16, 16)
  GET_VT_VECATTR(v16f16, 0, 16, f16, 16)
  GET_VT_VECATTR(v32f16, 0, 32, f16, 16)
  GET_VT_VECATTR(v64f16, 0, 64, f16, 16)
  GET_VT_VECATTR(v128f16, 0, 128, f16, 16)
  GET_VT_VECATTR(v256f16, 0, 256, f16, 16)
  GET_VT_VECATTR(v512f16, 0, 512, f16, 16)
  GET_VT_VECATTR(v2bf16, 0, 2, bf16, 16)
  GET_VT_VECATTR(v3bf16, 0, 3, bf16, 16)
  GET_VT_VECATTR(v4bf16, 0, 4, bf16, 16)
  GET_VT_VECATTR(v8bf16, 0, 8, bf16, 16)
  GET_VT_VECATTR(v16bf16, 0, 16, bf16, 16)
  GET_VT_VECATTR(v32bf16, 0, 32, bf16, 16)
  GET_VT_VECATTR(v64bf16, 0, 64, bf16, 16)
  GET_VT_VECATTR(v128bf16, 0, 128, bf16, 16)
  GET_VT_VECATTR(v1f32, 0, 1, f32, 32)
  GET_VT_VECATTR(v2f32, 0, 2, f32, 32)
  GET_VT_VECATTR(v3f32, 0, 3, f32, 32)
  GET_VT_VECATTR(v4f32, 0, 4, f32, 32)
  GET_VT_VECATTR(v5f32, 0, 5, f32, 32)
  GET_VT_VECATTR(v6f32, 0, 6, f32, 32)
  GET_VT_VECATTR(v7f32, 0, 7, f32, 32)
  GET_VT_VECATTR(v8f32, 0, 8, f32, 32)
  GET_VT_VECATTR(v9f32, 0, 9, f32, 32)
  GET_VT_VECATTR(v10f32, 0, 10, f32, 32)
  GET_VT_VECATTR(v11f32, 0, 11, f32, 32)
  GET_VT_VECATTR(v12f32, 0, 12, f32, 32)
  GET_VT_VECATTR(v16f32, 0, 16, f32, 32)
  GET_VT_VECATTR(v32f32, 0, 32, f32, 32)
  GET_VT_VECATTR(v64f32, 0, 64, f32, 32)
  GET_VT_VECATTR(v128f32, 0, 128, f32, 32)
  GET_VT_VECATTR(v256f32, 0, 256, f32, 32)
  GET_VT_VECATTR(v512f32, 0, 512, f32, 32)
  GET_VT_VECATTR(v1024f32, 0, 1024, f32, 32)
  GET_VT_VECATTR(v2048f32, 0, 2048, f32, 32)
  GET_VT_VECATTR(v1f64, 0, 1, f64, 64)
  GET_VT_VECATTR(v2f64, 0, 2, f64, 64)
  GET_VT_VECATTR(v3f64, 0, 3, f64, 64)
  GET_VT_VECATTR(v4f64, 0, 4, f64, 64)
  GET_VT_VECATTR(v8f64, 0, 8, f64, 64)
  GET_VT_VECATTR(v16f64, 0, 16, f64, 64)
  GET_VT_VECATTR(v32f64, 0, 32, f64, 64)
  GET_VT_VECATTR(v64f64, 0, 64, f64, 64)
  GET_VT_VECATTR(v128f64, 0, 128, f64, 64)
  GET_VT_VECATTR(v256f64, 0, 256, f64, 64)
  GET_VT_VECATTR(nxv1i1, 1, 1, i1, 1)
  GET_VT_VECATTR(nxv2i1, 1, 2, i1, 1)
  GET_VT_VECATTR(nxv4i1, 1, 4, i1, 1)
  GET_VT_VECATTR(nxv8i1, 1, 8, i1, 1)
  GET_VT_VECATTR(nxv16i1, 1, 16, i1, 1)
  GET_VT_VECATTR(nxv32i1, 1, 32, i1, 1)
  GET_VT_VECATTR(nxv64i1, 1, 64, i1, 1)
  GET_VT_VECATTR(nxv1i8, 1, 1, i8, 8)
  GET_VT_VECATTR(nxv2i8, 1, 2, i8, 8)
  GET_VT_VECATTR(nxv4i8, 1, 4, i8, 8)
  GET_VT_VECATTR(nxv8i8, 1, 8, i8, 8)
  GET_VT_VECATTR(nxv16i8, 1, 16, i8, 8)
  GET_VT_VECATTR(nxv32i8, 1, 32, i8, 8)
  GET_VT_VECATTR(nxv64i8, 1, 64, i8, 8)
  GET_VT_VECATTR(nxv1i16, 1, 1, i16, 16)
  GET_VT_VECATTR(nxv2i16, 1, 2, i16, 16)
  GET_VT_VECATTR(nxv4i16, 1, 4, i16, 16)
  GET_VT_VECATTR(nxv8i16, 1, 8, i16, 16)
  GET_VT_VECATTR(nxv16i16, 1, 16, i16, 16)
  GET_VT_VECATTR(nxv32i16, 1, 32, i16, 16)
  GET_VT_VECATTR(nxv1i32, 1, 1, i32, 32)
  GET_VT_VECATTR(nxv2i32, 1, 2, i32, 32)
  GET_VT_VECATTR(nxv4i32, 1, 4, i32, 32)
  GET_VT_VECATTR(nxv8i32, 1, 8, i32, 32)
  GET_VT_VECATTR(nxv16i32, 1, 16, i32, 32)
  GET_VT_VECATTR(nxv32i32, 1, 32, i32, 32)
  GET_VT_VECATTR(nxv1i64, 1, 1, i64, 64)
  GET_VT_VECATTR(nxv2i64, 1, 2, i64, 64)
  GET_VT_VECATTR(nxv4i64, 1, 4, i64, 64)
  GET_VT_VECATTR(nxv8i64, 1, 8, i64, 64)
  GET_VT_VECATTR(nxv16i64, 1, 16, i64, 64)
  GET_VT_VECATTR(nxv32i64, 1, 32, i64, 64)
  GET_VT_VECATTR(nxv1f16, 1, 1, f16, 16)
  GET_VT_VECATTR(nxv2f16, 1, 2, f16, 16)
  GET_VT_VECATTR(nxv4f16, 1, 4, f16, 16)
  GET_VT_VECATTR(nxv8f16, 1, 8, f16, 16)
  GET_VT_VECATTR(nxv16f16, 1, 16, f16, 16)
  GET_VT_VECATTR(nxv32f16, 1, 32, f16, 16)
  GET_VT_VECATTR(nxv1bf16, 1, 1, bf16, 16)
  GET_VT_VECATTR(nxv2bf16, 1, 2, bf16, 16)
  GET_VT_VECATTR(nxv4bf16, 1, 4, bf16, 16)
  GET_VT_VECATTR(nxv8bf16, 1, 8, bf16, 16)
  GET_VT_VECATTR(nxv16bf16, 1, 16, bf16, 16)
  GET_VT_VECATTR(nxv32bf16, 1, 32, bf16, 16)
  GET_VT_VECATTR(nxv1f32, 1, 1, f32, 32)
  GET_VT_VECATTR(nxv2f32, 1, 2, f32, 32)
  GET_VT_VECATTR(nxv4f32, 1, 4, f32, 32)
  GET_VT_VECATTR(nxv8f32, 1, 8, f32, 32)
  GET_VT_VECATTR(nxv16f32, 1, 16, f32, 32)
  GET_VT_VECATTR(nxv1f64, 1, 1, f64, 64)
  GET_VT_VECATTR(nxv2f64, 1, 2, f64, 64)
  GET_VT_VECATTR(nxv4f64, 1, 4, f64, 64)
  GET_VT_VECATTR(nxv8f64, 1, 8, f64, 64)
#endif

PKhwFZHL���CodeGen/RDFLiveness.hnu�[���//===- RDFLiveness.h --------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Recalculate the liveness information given a data flow graph.
// This includes block live-ins and kill flags.

#ifndef LLVM_CODEGEN_RDFLIVENESS_H
#define LLVM_CODEGEN_RDFLIVENESS_H

#include "RDFGraph.h"
#include "RDFRegisters.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/MC/LaneBitmask.h"
#include <map>
#include <set>
#include <unordered_map>
#include <unordered_set>
#include <utility>

namespace llvm {

class MachineBasicBlock;
class MachineDominanceFrontier;
class MachineDominatorTree;
class MachineRegisterInfo;
class TargetRegisterInfo;

namespace rdf {
namespace detail {

using NodeRef = std::pair<NodeId, LaneBitmask>;

} // namespace detail
} // namespace rdf
} // namespace llvm

namespace std {

template <> struct hash<llvm::rdf::detail::NodeRef> {
  std::size_t operator()(llvm::rdf::detail::NodeRef R) const {
    return std::hash<llvm::rdf::NodeId>{}(R.first) ^
           std::hash<llvm::LaneBitmask::Type>{}(R.second.getAsInteger());
  }
};

} // namespace std

namespace llvm::rdf {

struct Liveness {
public:
  using LiveMapType = RegisterAggrMap<MachineBasicBlock *>;
  using NodeRef = detail::NodeRef;
  using NodeRefSet = std::unordered_set<NodeRef>;
  using RefMap = std::unordered_map<RegisterId, NodeRefSet>;

  Liveness(MachineRegisterInfo &mri, const DataFlowGraph &g)
      : DFG(g), TRI(g.getTRI()), PRI(g.getPRI()), MDT(g.getDT()),
        MDF(g.getDF()), LiveMap(g.getPRI()), Empty(), NoRegs(g.getPRI()) {}

  NodeList getAllReachingDefs(RegisterRef RefRR, NodeAddr<RefNode *> RefA,
                              bool TopShadows, bool FullChain,
                              const RegisterAggr &DefRRs);

  NodeList getAllReachingDefs(NodeAddr<RefNode *> RefA) {
    return getAllReachingDefs(RefA.Addr->getRegRef(DFG), RefA, false, false,
                              NoRegs);
  }

  NodeList getAllReachingDefs(RegisterRef RefRR, NodeAddr<RefNode *> RefA) {
    return getAllReachingDefs(RefRR, RefA, false, false, NoRegs);
  }

  NodeSet getAllReachedUses(RegisterRef RefRR, NodeAddr<DefNode *> DefA,
                            const RegisterAggr &DefRRs);

  NodeSet getAllReachedUses(RegisterRef RefRR, NodeAddr<DefNode *> DefA) {
    return getAllReachedUses(RefRR, DefA, NoRegs);
  }

  std::pair<NodeSet, bool> getAllReachingDefsRec(RegisterRef RefRR,
                                                 NodeAddr<RefNode *> RefA,
                                                 NodeSet &Visited,
                                                 const NodeSet &Defs);

  NodeAddr<RefNode *> getNearestAliasedRef(RegisterRef RefRR,
                                           NodeAddr<InstrNode *> IA);

  LiveMapType &getLiveMap() { return LiveMap; }
  const LiveMapType &getLiveMap() const { return LiveMap; }

  const RefMap &getRealUses(NodeId P) const {
    auto F = RealUseMap.find(P);
    return F == RealUseMap.end() ? Empty : F->second;
  }

  void computePhiInfo();
  void computeLiveIns();
  void resetLiveIns();
  void resetKills();
  void resetKills(MachineBasicBlock *B);

  void trace(bool T) { Trace = T; }

private:
  const DataFlowGraph &DFG;
  const TargetRegisterInfo &TRI;
  const PhysicalRegisterInfo &PRI;
  const MachineDominatorTree &MDT;
  const MachineDominanceFrontier &MDF;
  LiveMapType LiveMap;
  const RefMap Empty;
  const RegisterAggr NoRegs;
  bool Trace = false;

  // Cache of mapping from node ids (for RefNodes) to the containing
  // basic blocks. Not computing it each time for each node reduces
  // the liveness calculation time by a large fraction.
  DenseMap<NodeId, MachineBasicBlock *> NBMap;

  // Phi information:
  //
  // RealUseMap
  // map: NodeId -> (map: RegisterId -> NodeRefSet)
  //      phi id -> (map: register -> set of reached non-phi uses)
  DenseMap<NodeId, RefMap> RealUseMap;

  // Inverse iterated dominance frontier.
  std::map<MachineBasicBlock *, std::set<MachineBasicBlock *>> IIDF;

  // Live on entry.
  std::map<MachineBasicBlock *, RefMap> PhiLON;

  // Phi uses are considered to be located at the end of the block that
  // they are associated with. The reaching def of a phi use dominates the
  // block that the use corresponds to, but not the block that contains
  // the phi itself. To include these uses in the liveness propagation (up
  // the dominator tree), create a map: block -> set of uses live on exit.
  std::map<MachineBasicBlock *, RefMap> PhiLOX;

  MachineBasicBlock *getBlockWithRef(NodeId RN) const;
  void traverse(MachineBasicBlock *B, RefMap &LiveIn);
  void emptify(RefMap &M);

  std::pair<NodeSet, bool>
  getAllReachingDefsRecImpl(RegisterRef RefRR, NodeAddr<RefNode *> RefA,
                            NodeSet &Visited, const NodeSet &Defs,
                            unsigned Nest, unsigned MaxNest);
};

raw_ostream &operator<<(raw_ostream &OS, const Print<Liveness::RefMap> &P);

} // end namespace llvm::rdf

#endif // LLVM_CODEGEN_RDFLIVENESS_H
PKhwFZx���CodeGen/IntrinsicLowering.hnu�[���//===-- IntrinsicLowering.h - Intrinsic Function Lowering -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the IntrinsicLowering interface.  This interface allows
// addition of domain-specific or front-end specific intrinsics to LLVM without
// having to modify all of the C backend or interpreter.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_INTRINSICLOWERING_H
#define LLVM_CODEGEN_INTRINSICLOWERING_H

namespace llvm {
class CallInst;
class DataLayout;

class IntrinsicLowering {
  const DataLayout &DL;

  bool Warned = false;

public:
  explicit IntrinsicLowering(const DataLayout &DL) : DL(DL) {}

  /// Replace a call to the specified intrinsic function.
  /// If an intrinsic function must be implemented by the code generator
  /// (such as va_start), this function should print a message and abort.
  ///
  /// Otherwise, if an intrinsic function call can be lowered, the code to
  /// implement it (often a call to a non-intrinsic function) is inserted
  /// _after_ the call instruction and the call is deleted. The caller must
  /// be capable of handling this kind of change.
  void LowerIntrinsicCall(CallInst *CI);

  /// Try to replace a call instruction with a call to a bswap intrinsic. Return
  /// false if the call is not a simple integer bswap.
  static bool LowerToByteSwap(CallInst *CI);
};
}

#endif
PKhwFZ&u׼�,�,CodeGen/ReachingDefAnalysis.hnu�[���//==--- llvm/CodeGen/ReachingDefAnalysis.h - Reaching Def Analysis -*- C++ -*---==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file Reaching Defs Analysis pass.
///
/// This pass tracks for each instruction what is the "closest" reaching def of
/// a given register. It is used by BreakFalseDeps (for clearance calculation)
/// and ExecutionDomainFix (for arbitrating conflicting domains).
///
/// Note that this is different from the usual definition notion of liveness.
/// The CPU doesn't care whether or not we consider a register killed.
///
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_REACHINGDEFANALYSIS_H
#define LLVM_CODEGEN_REACHINGDEFANALYSIS_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/CodeGen/LoopTraversal.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/InitializePasses.h"

namespace llvm {

class MachineBasicBlock;
class MachineInstr;

/// Thin wrapper around "int" used to store reaching definitions,
/// using an encoding that makes it compatible with TinyPtrVector.
/// The 0th LSB is forced zero (and will be used for pointer union tagging),
/// The 1st LSB is forced one (to make sure the value is non-zero).
class ReachingDef {
  uintptr_t Encoded;
  friend struct PointerLikeTypeTraits<ReachingDef>;
  explicit ReachingDef(uintptr_t Encoded) : Encoded(Encoded) {}

public:
  ReachingDef(std::nullptr_t) : Encoded(0) {}
  ReachingDef(int Instr) : Encoded(((uintptr_t) Instr << 2) | 2) {}
  operator int() const { return ((int) Encoded) >> 2; }
};

template<>
struct PointerLikeTypeTraits<ReachingDef> {
  static constexpr int NumLowBitsAvailable = 1;

  static inline void *getAsVoidPointer(const ReachingDef &RD) {
    return reinterpret_cast<void *>(RD.Encoded);
  }

  static inline ReachingDef getFromVoidPointer(void *P) {
    return ReachingDef(reinterpret_cast<uintptr_t>(P));
  }

  static inline ReachingDef getFromVoidPointer(const void *P) {
    return ReachingDef(reinterpret_cast<uintptr_t>(P));
  }
};

/// This class provides the reaching def analysis.
class ReachingDefAnalysis : public MachineFunctionPass {
private:
  MachineFunction *MF = nullptr;
  const TargetRegisterInfo *TRI = nullptr;
  LoopTraversal::TraversalOrder TraversedMBBOrder;
  unsigned NumRegUnits = 0;
  /// Instruction that defined each register, relative to the beginning of the
  /// current basic block.  When a LiveRegsDefInfo is used to represent a
  /// live-out register, this value is relative to the end of the basic block,
  /// so it will be a negative number.
  using LiveRegsDefInfo = std::vector<int>;
  LiveRegsDefInfo LiveRegs;

  /// Keeps clearance information for all registers. Note that this
  /// is different from the usual definition notion of liveness. The CPU
  /// doesn't care whether or not we consider a register killed.
  using OutRegsInfoMap = SmallVector<LiveRegsDefInfo, 4>;
  OutRegsInfoMap MBBOutRegsInfos;

  /// Current instruction number.
  /// The first instruction in each basic block is 0.
  int CurInstr = -1;

  /// Maps instructions to their instruction Ids, relative to the beginning of
  /// their basic blocks.
  DenseMap<MachineInstr *, int> InstIds;

  /// All reaching defs of a given RegUnit for a given MBB.
  using MBBRegUnitDefs = TinyPtrVector<ReachingDef>;
  /// All reaching defs of all reg units for a given MBB
  using MBBDefsInfo = std::vector<MBBRegUnitDefs>;
  /// All reaching defs of all reg units for a all MBBs
  using MBBReachingDefsInfo = SmallVector<MBBDefsInfo, 4>;
  MBBReachingDefsInfo MBBReachingDefs;

  /// Default values are 'nothing happened a long time ago'.
  const int ReachingDefDefaultVal = -(1 << 21);

  using InstSet = SmallPtrSetImpl<MachineInstr*>;
  using BlockSet = SmallPtrSetImpl<MachineBasicBlock*>;

public:
  static char ID; // Pass identification, replacement for typeid

  ReachingDefAnalysis() : MachineFunctionPass(ID) {
    initializeReachingDefAnalysisPass(*PassRegistry::getPassRegistry());
  }
  void releaseMemory() override;

  void getAnalysisUsage(AnalysisUsage &AU) const override {
    AU.setPreservesAll();
    MachineFunctionPass::getAnalysisUsage(AU);
  }

  bool runOnMachineFunction(MachineFunction &MF) override;

  MachineFunctionProperties getRequiredProperties() const override {
    return MachineFunctionProperties().set(
        MachineFunctionProperties::Property::NoVRegs).set(
          MachineFunctionProperties::Property::TracksLiveness);
  }

  /// Re-run the analysis.
  void reset();

  /// Initialize data structures.
  void init();

  /// Traverse the machine function, mapping definitions.
  void traverse();

  /// Provides the instruction id of the closest reaching def instruction of
  /// PhysReg that reaches MI, relative to the begining of MI's basic block.
  int getReachingDef(MachineInstr *MI, MCRegister PhysReg) const;

  /// Return whether A and B use the same def of PhysReg.
  bool hasSameReachingDef(MachineInstr *A, MachineInstr *B,
                          MCRegister PhysReg) const;

  /// Return whether the reaching def for MI also is live out of its parent
  /// block.
  bool isReachingDefLiveOut(MachineInstr *MI, MCRegister PhysReg) const;

  /// Return the local MI that produces the live out value for PhysReg, or
  /// nullptr for a non-live out or non-local def.
  MachineInstr *getLocalLiveOutMIDef(MachineBasicBlock *MBB,
                                     MCRegister PhysReg) const;

  /// If a single MachineInstr creates the reaching definition, then return it.
  /// Otherwise return null.
  MachineInstr *getUniqueReachingMIDef(MachineInstr *MI,
                                       MCRegister PhysReg) const;

  /// If a single MachineInstr creates the reaching definition, for MIs operand
  /// at Idx, then return it. Otherwise return null.
  MachineInstr *getMIOperand(MachineInstr *MI, unsigned Idx) const;

  /// If a single MachineInstr creates the reaching definition, for MIs MO,
  /// then return it. Otherwise return null.
  MachineInstr *getMIOperand(MachineInstr *MI, MachineOperand &MO) const;

  /// Provide whether the register has been defined in the same basic block as,
  /// and before, MI.
  bool hasLocalDefBefore(MachineInstr *MI, MCRegister PhysReg) const;

  /// Return whether the given register is used after MI, whether it's a local
  /// use or a live out.
  bool isRegUsedAfter(MachineInstr *MI, MCRegister PhysReg) const;

  /// Return whether the given register is defined after MI.
  bool isRegDefinedAfter(MachineInstr *MI, MCRegister PhysReg) const;

  /// Provides the clearance - the number of instructions since the closest
  /// reaching def instuction of PhysReg that reaches MI.
  int getClearance(MachineInstr *MI, MCRegister PhysReg) const;

  /// Provides the uses, in the same block as MI, of register that MI defines.
  /// This does not consider live-outs.
  void getReachingLocalUses(MachineInstr *MI, MCRegister PhysReg,
                            InstSet &Uses) const;

  /// Search MBB for a definition of PhysReg and insert it into Defs. If no
  /// definition is found, recursively search the predecessor blocks for them.
  void getLiveOuts(MachineBasicBlock *MBB, MCRegister PhysReg, InstSet &Defs,
                   BlockSet &VisitedBBs) const;
  void getLiveOuts(MachineBasicBlock *MBB, MCRegister PhysReg,
                   InstSet &Defs) const;

  /// For the given block, collect the instructions that use the live-in
  /// value of the provided register. Return whether the value is still
  /// live on exit.
  bool getLiveInUses(MachineBasicBlock *MBB, MCRegister PhysReg,
                     InstSet &Uses) const;

  /// Collect the users of the value stored in PhysReg, which is defined
  /// by MI.
  void getGlobalUses(MachineInstr *MI, MCRegister PhysReg, InstSet &Uses) const;

  /// Collect all possible definitions of the value stored in PhysReg, which is
  /// used by MI.
  void getGlobalReachingDefs(MachineInstr *MI, MCRegister PhysReg,
                             InstSet &Defs) const;

  /// Return whether From can be moved forwards to just before To.
  bool isSafeToMoveForwards(MachineInstr *From, MachineInstr *To) const;

  /// Return whether From can be moved backwards to just after To.
  bool isSafeToMoveBackwards(MachineInstr *From, MachineInstr *To) const;

  /// Assuming MI is dead, recursively search the incoming operands which are
  /// killed by MI and collect those that would become dead.
  void collectKilledOperands(MachineInstr *MI, InstSet &Dead) const;

  /// Return whether removing this instruction will have no effect on the
  /// program, returning the redundant use-def chain.
  bool isSafeToRemove(MachineInstr *MI, InstSet &ToRemove) const;

  /// Return whether removing this instruction will have no effect on the
  /// program, ignoring the possible effects on some instructions, returning
  /// the redundant use-def chain.
  bool isSafeToRemove(MachineInstr *MI, InstSet &ToRemove,
                      InstSet &Ignore) const;

  /// Return whether a MachineInstr could be inserted at MI and safely define
  /// the given register without affecting the program.
  bool isSafeToDefRegAt(MachineInstr *MI, MCRegister PhysReg) const;

  /// Return whether a MachineInstr could be inserted at MI and safely define
  /// the given register without affecting the program, ignoring any effects
  /// on the provided instructions.
  bool isSafeToDefRegAt(MachineInstr *MI, MCRegister PhysReg,
                        InstSet &Ignore) const;

private:
  /// Set up LiveRegs by merging predecessor live-out values.
  void enterBasicBlock(MachineBasicBlock *MBB);

  /// Update live-out values.
  void leaveBasicBlock(MachineBasicBlock *MBB);

  /// Process he given basic block.
  void processBasicBlock(const LoopTraversal::TraversedMBBInfo &TraversedMBB);

  /// Process block that is part of a loop again.
  void reprocessBasicBlock(MachineBasicBlock *MBB);

  /// Update def-ages for registers defined by MI.
  /// Also break dependencies on partial defs and undef uses.
  void processDefs(MachineInstr *);

  /// Utility function for isSafeToMoveForwards/Backwards.
  template<typename Iterator>
  bool isSafeToMove(MachineInstr *From, MachineInstr *To) const;

  /// Return whether removing this instruction will have no effect on the
  /// program, ignoring the possible effects on some instructions, returning
  /// the redundant use-def chain.
  bool isSafeToRemove(MachineInstr *MI, InstSet &Visited,
                      InstSet &ToRemove, InstSet &Ignore) const;

  /// Provides the MI, from the given block, corresponding to the Id or a
  /// nullptr if the id does not refer to the block.
  MachineInstr *getInstFromId(MachineBasicBlock *MBB, int InstId) const;

  /// Provides the instruction of the closest reaching def instruction of
  /// PhysReg that reaches MI, relative to the begining of MI's basic block.
  MachineInstr *getReachingLocalMIDef(MachineInstr *MI,
                                      MCRegister PhysReg) const;
};

} // namespace llvm

#endif // LLVM_CODEGEN_REACHINGDEFANALYSIS_H
PKhwFZ^^�;��"CodeGen/NonRelocatableStringpool.hnu�[���//===- NonRelocatableStringpool.h -------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_NONRELOCATABLESTRINGPOOL_H
#define LLVM_CODEGEN_NONRELOCATABLESTRINGPOOL_H

#include "llvm/CodeGen/DwarfStringPoolEntry.h"
#include "llvm/Support/Allocator.h"
#include <cstdint>
#include <vector>

namespace llvm {

/// A string table that doesn't need relocations.
///
/// Use this class when a string table doesn't need relocations.
/// This class provides this ability by just associating offsets with strings.
class NonRelocatableStringpool {
public:
  /// Entries are stored into the StringMap and simply linked together through
  /// the second element of this pair in order to keep track of insertion
  /// order.
  using MapTy = StringMap<DwarfStringPoolEntry, BumpPtrAllocator>;

  NonRelocatableStringpool(
      std::function<StringRef(StringRef Input)> Translator = nullptr,
      bool PutEmptyString = false)
      : Translator(Translator) {
    if (PutEmptyString)
      EmptyString = getEntry("");
  }

  DwarfStringPoolEntryRef getEntry(StringRef S);

  /// Get the offset of string \p S in the string table. This can insert a new
  /// element or return the offset of a pre-existing one.
  uint64_t getStringOffset(StringRef S) { return getEntry(S).getOffset(); }

  /// Get permanent storage for \p S (but do not necessarily emit \p S in the
  /// output section). A latter call to getStringOffset() with the same string
  /// will chain it though.
  ///
  /// \returns The StringRef that points to permanent storage to use
  /// in place of \p S.
  StringRef internString(StringRef S);

  uint64_t getSize() { return CurrentEndOffset; }

  /// Return the list of strings to be emitted. This does not contain the
  /// strings which were added via internString only.
  std::vector<DwarfStringPoolEntryRef> getEntriesForEmission() const;

private:
  MapTy Strings;
  uint64_t CurrentEndOffset = 0;
  unsigned NumEntries = 0;
  DwarfStringPoolEntryRef EmptyString;
  std::function<StringRef(StringRef Input)> Translator;
};

/// Helper for making strong types.
template <typename T, typename S> class StrongType : public T {
public:
  template <typename... Args>
  explicit StrongType(Args... A) : T(std::forward<Args>(A)...) {}
};

/// It's very easy to introduce bugs by passing the wrong string pool.
/// By using strong types the interface enforces that the right
/// kind of pool is used.
struct UniqueTag {};
struct OffsetsTag {};
using UniquingStringPool = StrongType<NonRelocatableStringpool, UniqueTag>;
using OffsetsStringPool = StrongType<NonRelocatableStringpool, OffsetsTag>;

} // end namespace llvm

#endif // LLVM_CODEGEN_NONRELOCATABLESTRINGPOOL_H
PKhwFZ*6�ˤ�CodeGen/PseudoSourceValue.hnu�[���//===-- llvm/CodeGen/PseudoSourceValue.h ------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the declaration of the PseudoSourceValue class.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_PSEUDOSOURCEVALUE_H
#define LLVM_CODEGEN_PSEUDOSOURCEVALUE_H

#include "llvm/ADT/StringMap.h"
#include "llvm/IR/ValueMap.h"
#include <map>

namespace llvm {

class GlobalValue;
class MachineFrameInfo;
class MachineMemOperand;
class MIRFormatter;
class PseudoSourceValue;
class raw_ostream;
class TargetMachine;

raw_ostream &operator<<(raw_ostream &OS, const PseudoSourceValue* PSV);

/// Special value supplied for machine level alias analysis. It indicates that
/// a memory access references the functions stack frame (e.g., a spill slot),
/// below the stack frame (e.g., argument space), or constant pool.
class PseudoSourceValue {
public:
  enum PSVKind : unsigned {
    Stack,
    GOT,
    JumpTable,
    ConstantPool,
    FixedStack,
    GlobalValueCallEntry,
    ExternalSymbolCallEntry,
    TargetCustom
  };

private:
  unsigned Kind;
  unsigned AddressSpace;
  friend raw_ostream &llvm::operator<<(raw_ostream &OS,
                                       const PseudoSourceValue* PSV);

  friend class MachineMemOperand; // For printCustom().
  friend class MIRFormatter;      // For printCustom().

  /// Implement printing for PseudoSourceValue. This is called from
  /// Value::print or Value's operator<<.
  virtual void printCustom(raw_ostream &O) const;

public:
  explicit PseudoSourceValue(unsigned Kind, const TargetMachine &TM);

  virtual ~PseudoSourceValue();

  unsigned kind() const { return Kind; }

  bool isStack() const { return Kind == Stack; }
  bool isGOT() const { return Kind == GOT; }
  bool isConstantPool() const { return Kind == ConstantPool; }
  bool isJumpTable() const { return Kind == JumpTable; }

  unsigned getAddressSpace() const { return AddressSpace; }

  unsigned getTargetCustom() const {
    return (Kind >= TargetCustom) ? ((Kind+1) - TargetCustom) : 0;
  }

  /// Test whether the memory pointed to by this PseudoSourceValue has a
  /// constant value.
  virtual bool isConstant(const MachineFrameInfo *) const;

  /// Test whether the memory pointed to by this PseudoSourceValue may also be
  /// pointed to by an LLVM IR Value.
  virtual bool isAliased(const MachineFrameInfo *) const;

  /// Return true if the memory pointed to by this PseudoSourceValue can ever
  /// alias an LLVM IR Value.
  virtual bool mayAlias(const MachineFrameInfo *) const;
};

/// A specialized PseudoSourceValue for holding FixedStack values, which must
/// include a frame index.
class FixedStackPseudoSourceValue : public PseudoSourceValue {
  const int FI;

public:
  explicit FixedStackPseudoSourceValue(int FI, const TargetMachine &TM)
      : PseudoSourceValue(FixedStack, TM), FI(FI) {}

  static bool classof(const PseudoSourceValue *V) {
    return V->kind() == FixedStack;
  }

  bool isConstant(const MachineFrameInfo *MFI) const override;

  bool isAliased(const MachineFrameInfo *MFI) const override;

  bool mayAlias(const MachineFrameInfo *) const override;

  void printCustom(raw_ostream &OS) const override;

  int getFrameIndex() const { return FI; }
};

class CallEntryPseudoSourceValue : public PseudoSourceValue {
protected:
  CallEntryPseudoSourceValue(unsigned Kind, const TargetMachine &TM);

public:
  bool isConstant(const MachineFrameInfo *) const override;
  bool isAliased(const MachineFrameInfo *) const override;
  bool mayAlias(const MachineFrameInfo *) const override;
};

/// A specialized pseudo source value for holding GlobalValue values.
class GlobalValuePseudoSourceValue : public CallEntryPseudoSourceValue {
  const GlobalValue *GV;

public:
  GlobalValuePseudoSourceValue(const GlobalValue *GV, const TargetMachine &TM);

  static bool classof(const PseudoSourceValue *V) {
    return V->kind() == GlobalValueCallEntry;
  }

  const GlobalValue *getValue() const { return GV; }
};

/// A specialized pseudo source value for holding external symbol values.
class ExternalSymbolPseudoSourceValue : public CallEntryPseudoSourceValue {
  const char *ES;

public:
  ExternalSymbolPseudoSourceValue(const char *ES, const TargetMachine &TM);

  static bool classof(const PseudoSourceValue *V) {
    return V->kind() == ExternalSymbolCallEntry;
  }

  const char *getSymbol() const { return ES; }
};

/// Manages creation of pseudo source values.
class PseudoSourceValueManager {
  const TargetMachine &TM;
  const PseudoSourceValue StackPSV, GOTPSV, JumpTablePSV, ConstantPoolPSV;
  std::map<int, std::unique_ptr<FixedStackPseudoSourceValue>> FSValues;
  StringMap<std::unique_ptr<const ExternalSymbolPseudoSourceValue>>
      ExternalCallEntries;
  ValueMap<const GlobalValue *,
           std::unique_ptr<const GlobalValuePseudoSourceValue>>
      GlobalCallEntries;

public:
  PseudoSourceValueManager(const TargetMachine &TM);

  /// Return a pseudo source value referencing the area below the stack frame of
  /// a function, e.g., the argument space.
  const PseudoSourceValue *getStack();

  /// Return a pseudo source value referencing the global offset table
  /// (or something the like).
  const PseudoSourceValue *getGOT();

  /// Return a pseudo source value referencing the constant pool. Since constant
  /// pools are constant, this doesn't need to identify a specific constant
  /// pool entry.
  const PseudoSourceValue *getConstantPool();

  /// Return a pseudo source value referencing a jump table. Since jump tables
  /// are constant, this doesn't need to identify a specific jump table.
  const PseudoSourceValue *getJumpTable();

  /// Return a pseudo source value referencing a fixed stack frame entry,
  /// e.g., a spill slot.
  const PseudoSourceValue *getFixedStack(int FI);

  const PseudoSourceValue *getGlobalValueCallEntry(const GlobalValue *GV);

  const PseudoSourceValue *getExternalSymbolCallEntry(const char *ES);
};

} // end namespace llvm

#endif
PKhwFZ�}3�'�'CodeGen/MachineDominators.hnu�[���//==- llvm/CodeGen/MachineDominators.h - Machine Dom Calculation -*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines classes mirroring those in llvm/Analysis/Dominators.h,
// but for target-specific code rather than target-independent IR.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_MACHINEDOMINATORS_H
#define LLVM_CODEGEN_MACHINEDOMINATORS_H

#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBundleIterator.h"
#include "llvm/Support/GenericDomTree.h"
#include "llvm/Support/GenericDomTreeConstruction.h"
#include <cassert>
#include <memory>

namespace llvm {
class AnalysisUsage;
class MachineFunction;
class Module;
class raw_ostream;

template <>
inline void DominatorTreeBase<MachineBasicBlock, false>::addRoot(
    MachineBasicBlock *MBB) {
  this->Roots.push_back(MBB);
}

extern template class DomTreeNodeBase<MachineBasicBlock>;
extern template class DominatorTreeBase<MachineBasicBlock, false>; // DomTree
extern template class DominatorTreeBase<MachineBasicBlock, true>; // PostDomTree

using MachineDomTree = DomTreeBase<MachineBasicBlock>;
using MachineDomTreeNode = DomTreeNodeBase<MachineBasicBlock>;

//===-------------------------------------
/// DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to
/// compute a normal dominator tree.
///
class MachineDominatorTree : public MachineFunctionPass {
  /// Helper structure used to hold all the basic blocks
  /// involved in the split of a critical edge.
  struct CriticalEdge {
    MachineBasicBlock *FromBB;
    MachineBasicBlock *ToBB;
    MachineBasicBlock *NewBB;
  };

  /// Pile up all the critical edges to be split.
  /// The splitting of a critical edge is local and thus, it is possible
  /// to apply several of those changes at the same time.
  mutable SmallVector<CriticalEdge, 32> CriticalEdgesToSplit;

  /// Remember all the basic blocks that are inserted during
  /// edge splitting.
  /// Invariant: NewBBs == all the basic blocks contained in the NewBB
  /// field of all the elements of CriticalEdgesToSplit.
  /// I.e., forall elt in CriticalEdgesToSplit, it exists BB in NewBBs
  /// such as BB == elt.NewBB.
  mutable SmallSet<MachineBasicBlock *, 32> NewBBs;

  /// The DominatorTreeBase that is used to compute a normal dominator tree.
  std::unique_ptr<MachineDomTree> DT;

  /// Apply all the recorded critical edges to the DT.
  /// This updates the underlying DT information in a way that uses
  /// the fast query path of DT as much as possible.
  ///
  /// \post CriticalEdgesToSplit.empty().
  void applySplitCriticalEdges() const;

public:
  static char ID; // Pass ID, replacement for typeid

  MachineDominatorTree();
  explicit MachineDominatorTree(MachineFunction &MF) : MachineFunctionPass(ID) {
    calculate(MF);
  }

  MachineDomTree &getBase() {
    if (!DT)
      DT.reset(new MachineDomTree());
    applySplitCriticalEdges();
    return *DT;
  }

  void getAnalysisUsage(AnalysisUsage &AU) const override;

  MachineBasicBlock *getRoot() const {
    applySplitCriticalEdges();
    return DT->getRoot();
  }

  MachineDomTreeNode *getRootNode() const {
    applySplitCriticalEdges();
    return DT->getRootNode();
  }

  bool runOnMachineFunction(MachineFunction &F) override;

  void calculate(MachineFunction &F);

  bool dominates(const MachineDomTreeNode *A,
                 const MachineDomTreeNode *B) const {
    applySplitCriticalEdges();
    return DT->dominates(A, B);
  }

  void getDescendants(MachineBasicBlock *A,
                      SmallVectorImpl<MachineBasicBlock *> &Result) {
    applySplitCriticalEdges();
    DT->getDescendants(A, Result);
  }

  bool dominates(const MachineBasicBlock *A, const MachineBasicBlock *B) const {
    applySplitCriticalEdges();
    return DT->dominates(A, B);
  }

  // dominates - Return true if A dominates B. This performs the
  // special checks necessary if A and B are in the same basic block.
  bool dominates(const MachineInstr *A, const MachineInstr *B) const {
    applySplitCriticalEdges();
    const MachineBasicBlock *BBA = A->getParent(), *BBB = B->getParent();
    if (BBA != BBB) return DT->dominates(BBA, BBB);

    // Loop through the basic block until we find A or B.
    MachineBasicBlock::const_iterator I = BBA->begin();
    for (; &*I != A && &*I != B; ++I)
      /*empty*/ ;

    return &*I == A;
  }

  bool properlyDominates(const MachineDomTreeNode *A,
                         const MachineDomTreeNode *B) const {
    applySplitCriticalEdges();
    return DT->properlyDominates(A, B);
  }

  bool properlyDominates(const MachineBasicBlock *A,
                         const MachineBasicBlock *B) const {
    applySplitCriticalEdges();
    return DT->properlyDominates(A, B);
  }

  /// findNearestCommonDominator - Find nearest common dominator basic block
  /// for basic block A and B. If there is no such block then return NULL.
  MachineBasicBlock *findNearestCommonDominator(MachineBasicBlock *A,
                                                MachineBasicBlock *B) {
    applySplitCriticalEdges();
    return DT->findNearestCommonDominator(A, B);
  }

  MachineDomTreeNode *operator[](MachineBasicBlock *BB) const {
    applySplitCriticalEdges();
    return DT->getNode(BB);
  }

  /// getNode - return the (Post)DominatorTree node for the specified basic
  /// block.  This is the same as using operator[] on this class.
  ///
  MachineDomTreeNode *getNode(MachineBasicBlock *BB) const {
    applySplitCriticalEdges();
    return DT->getNode(BB);
  }

  /// addNewBlock - Add a new node to the dominator tree information.  This
  /// creates a new node as a child of DomBB dominator node,linking it into
  /// the children list of the immediate dominator.
  MachineDomTreeNode *addNewBlock(MachineBasicBlock *BB,
                                  MachineBasicBlock *DomBB) {
    applySplitCriticalEdges();
    return DT->addNewBlock(BB, DomBB);
  }

  /// changeImmediateDominator - This method is used to update the dominator
  /// tree information when a node's immediate dominator changes.
  ///
  void changeImmediateDominator(MachineBasicBlock *N,
                                MachineBasicBlock *NewIDom) {
    applySplitCriticalEdges();
    DT->changeImmediateDominator(N, NewIDom);
  }

  void changeImmediateDominator(MachineDomTreeNode *N,
                                MachineDomTreeNode *NewIDom) {
    applySplitCriticalEdges();
    DT->changeImmediateDominator(N, NewIDom);
  }

  /// eraseNode - Removes a node from  the dominator tree. Block must not
  /// dominate any other blocks. Removes node from its immediate dominator's
  /// children list. Deletes dominator node associated with basic block BB.
  void eraseNode(MachineBasicBlock *BB) {
    applySplitCriticalEdges();
    DT->eraseNode(BB);
  }

  /// splitBlock - BB is split and now it has one successor. Update dominator
  /// tree to reflect this change.
  void splitBlock(MachineBasicBlock* NewBB) {
    applySplitCriticalEdges();
    DT->splitBlock(NewBB);
  }

  /// isReachableFromEntry - Return true if A is dominated by the entry
  /// block of the function containing it.
  bool isReachableFromEntry(const MachineBasicBlock *A) {
    applySplitCriticalEdges();
    return DT->isReachableFromEntry(A);
  }

  void releaseMemory() override;

  void verifyAnalysis() const override;

  void print(raw_ostream &OS, const Module*) const override;

  /// Record that the critical edge (FromBB, ToBB) has been
  /// split with NewBB.
  /// This is best to use this method instead of directly update the
  /// underlying information, because this helps mitigating the
  /// number of time the DT information is invalidated.
  ///
  /// \note Do not use this method with regular edges.
  ///
  /// \note To benefit from the compile time improvement incurred by this
  /// method, the users of this method have to limit the queries to the DT
  /// interface between two edges splitting. In other words, they have to
  /// pack the splitting of critical edges as much as possible.
  void recordSplitCriticalEdge(MachineBasicBlock *FromBB,
                              MachineBasicBlock *ToBB,
                              MachineBasicBlock *NewBB) {
    bool Inserted = NewBBs.insert(NewBB).second;
    (void)Inserted;
    assert(Inserted &&
           "A basic block inserted via edge splitting cannot appear twice");
    CriticalEdgesToSplit.push_back({FromBB, ToBB, NewBB});
  }
};

//===-------------------------------------
/// DominatorTree GraphTraits specialization so the DominatorTree can be
/// iterable by generic graph iterators.
///

template <class Node, class ChildIterator>
struct MachineDomTreeGraphTraitsBase {
  using NodeRef = Node *;
  using ChildIteratorType = ChildIterator;

  static NodeRef getEntryNode(NodeRef N) { return N; }
  static ChildIteratorType child_begin(NodeRef N) { return N->begin(); }
  static ChildIteratorType child_end(NodeRef N) { return N->end(); }
};

template <class T> struct GraphTraits;

template <>
struct GraphTraits<MachineDomTreeNode *>
    : public MachineDomTreeGraphTraitsBase<MachineDomTreeNode,
                                           MachineDomTreeNode::const_iterator> {
};

template <>
struct GraphTraits<const MachineDomTreeNode *>
    : public MachineDomTreeGraphTraitsBase<const MachineDomTreeNode,
                                           MachineDomTreeNode::const_iterator> {
};

template <> struct GraphTraits<MachineDominatorTree*>
  : public GraphTraits<MachineDomTreeNode *> {
  static NodeRef getEntryNode(MachineDominatorTree *DT) {
    return DT->getRootNode();
  }
};

} // end namespace llvm

#endif // LLVM_CODEGEN_MACHINEDOMINATORS_H
PKhwFZي��CodeGen/ByteProvider.hnu�[���//===-- include/llvm/CodeGen/ByteProvider.h - Map bytes ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// \file
// This file implements ByteProvider. The purpose of ByteProvider is to provide
// a map between a target node's byte (byte position is DestOffset) and the
// source (and byte position) that provides it (in Src and SrcOffset
// respectively) See CodeGen/SelectionDAG/DAGCombiner.cpp MatchLoadCombine
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_BYTEPROVIDER_H
#define LLVM_CODEGEN_BYTEPROVIDER_H

#include <optional>
#include <type_traits>

namespace llvm {

/// Represents known origin of an individual byte in combine pattern. The
/// value of the byte is either constant zero, or comes from memory /
/// some other productive instruction (e.g. arithmetic instructions).
/// Bit manipulation instructions like shifts are not ByteProviders, rather
/// are used to extract Bytes.
template <typename ISelOp> class ByteProvider {
private:
  ByteProvider(std::optional<ISelOp> Src, int64_t DestOffset, int64_t SrcOffset)
      : Src(Src), DestOffset(DestOffset), SrcOffset(SrcOffset) {}

  // TODO -- use constraint in c++20
  // Does this type correspond with an operation in selection DAG
  template <typename T> class is_op {
  private:
    using yes = std::true_type;
    using no = std::false_type;

    // Only allow classes with member function getOpcode
    template <typename U>
    static auto test(int) -> decltype(std::declval<U>().getOpcode(), yes());

    template <typename> static no test(...);

  public:
    using remove_pointer_t = typename std::remove_pointer<T>::type;
    static constexpr bool value =
        std::is_same<decltype(test<remove_pointer_t>(0)), yes>::value;
  };

public:
  // For constant zero providers Src is set to nullopt. For actual providers
  // Src represents the node which originally produced the relevant bits.
  std::optional<ISelOp> Src = std::nullopt;
  // DestOffset is the offset of the byte in the dest we are trying to map for.
  int64_t DestOffset = 0;
  // SrcOffset is the offset in the ultimate source node that maps to the
  // DestOffset
  int64_t SrcOffset = 0;

  ByteProvider() = default;

  static ByteProvider getSrc(std::optional<ISelOp> Val, int64_t ByteOffset,
                             int64_t VectorOffset) {
    static_assert(is_op<ISelOp>().value,
                  "ByteProviders must contain an operation in selection DAG.");
    return ByteProvider(Val, ByteOffset, VectorOffset);
  }

  static ByteProvider getConstantZero() {
    return ByteProvider<ISelOp>(std::nullopt, 0, 0);
  }
  bool isConstantZero() const { return !Src; }

  bool hasSrc() const { return Src.has_value(); }

  bool hasSameSrc(const ByteProvider &Other) const { return Other.Src == Src; }

  bool operator==(const ByteProvider &Other) const {
    return Other.Src == Src && Other.DestOffset == DestOffset &&
           Other.SrcOffset == SrcOffset;
  }
};
} // end namespace llvm

#endif // LLVM_CODEGEN_BYTEPROVIDER_H
PKhwFZ�^����CodeGen/EdgeBundles.hnu�[���//===-------- EdgeBundles.h - Bundles of CFG edges --------------*- c++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// The EdgeBundles analysis forms equivalence classes of CFG edges such that all
// edges leaving a machine basic block are in the same bundle, and all edges
// entering a machine basic block are in the same bundle.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_EDGEBUNDLES_H
#define LLVM_CODEGEN_EDGEBUNDLES_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/IntEqClasses.h"
#include "llvm/CodeGen/MachineFunctionPass.h"

namespace llvm {

class EdgeBundles : public MachineFunctionPass {
  const MachineFunction *MF = nullptr;

  /// EC - Each edge bundle is an equivalence class. The keys are:
  ///   2*BB->getNumber()   -> Ingoing bundle.
  ///   2*BB->getNumber()+1 -> Outgoing bundle.
  IntEqClasses EC;

  /// Blocks - Map each bundle to a list of basic block numbers.
  SmallVector<SmallVector<unsigned, 8>, 4> Blocks;

public:
  static char ID;
  EdgeBundles() : MachineFunctionPass(ID) {}

  /// getBundle - Return the ingoing (Out = false) or outgoing (Out = true)
  /// bundle number for basic block #N
  unsigned getBundle(unsigned N, bool Out) const { return EC[2 * N + Out]; }

  /// getNumBundles - Return the total number of bundles in the CFG.
  unsigned getNumBundles() const { return EC.getNumClasses(); }

  /// getBlocks - Return an array of blocks that are connected to Bundle.
  ArrayRef<unsigned> getBlocks(unsigned Bundle) const { return Blocks[Bundle]; }

  /// getMachineFunction - Return the last machine function computed.
  const MachineFunction *getMachineFunction() const { return MF; }

  /// view - Visualize the annotated bipartite CFG with Graphviz.
  void view() const;

private:
  bool runOnMachineFunction(MachineFunction&) override;
  void getAnalysisUsage(AnalysisUsage&) const override;
};

} // end namespace llvm

#endif
PKhwFZ���>!>!CodeGen/MachineModuleInfo.hnu�[���//===-- llvm/CodeGen/MachineModuleInfo.h ------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Collect meta information for a module.  This information should be in a
// neutral form that can be used by different debugging and exception handling
// schemes.
//
// The organization of information is primarily clustered around the source
// compile units.  The main exception is source line correspondence where
// inlining may interleave code from various compile units.
//
// The following information can be retrieved from the MachineModuleInfo.
//
//  -- Source directories - Directories are uniqued based on their canonical
//     string and assigned a sequential numeric ID (base 1.)
//  -- Source files - Files are also uniqued based on their name and directory
//     ID.  A file ID is sequential number (base 1.)
//  -- Source line correspondence - A vector of file ID, line#, column# triples.
//     A DEBUG_LOCATION instruction is generated  by the DAG Legalizer
//     corresponding to each entry in the source line list.  This allows a debug
//     emitter to generate labels referenced by debug information tables.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_MACHINEMODULEINFO_H
#define LLVM_CODEGEN_MACHINEMODULEINFO_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/IR/PassManager.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Pass.h"
#include <memory>
#include <utility>
#include <vector>

namespace llvm {

class Function;
class LLVMTargetMachine;
class MachineFunction;
class Module;

//===----------------------------------------------------------------------===//
/// This class can be derived from and used by targets to hold private
/// target-specific information for each Module.  Objects of type are
/// accessed/created with MachineModuleInfo::getObjFileInfo and destroyed when
/// the MachineModuleInfo is destroyed.
///
class MachineModuleInfoImpl {
public:
  using StubValueTy = PointerIntPair<MCSymbol *, 1, bool>;
  using SymbolListTy = std::vector<std::pair<MCSymbol *, StubValueTy>>;

  virtual ~MachineModuleInfoImpl();

protected:
  /// Return the entries from a DenseMap in a deterministic sorted orer.
  /// Clears the map.
  static SymbolListTy getSortedStubs(DenseMap<MCSymbol*, StubValueTy>&);
};

//===----------------------------------------------------------------------===//
/// This class contains meta information specific to a module.  Queries can be
/// made by different debugging and exception handling schemes and reformated
/// for specific use.
///
class MachineModuleInfo {
  friend class MachineModuleInfoWrapperPass;
  friend class MachineModuleAnalysis;

  const LLVMTargetMachine &TM;

  /// This is the MCContext used for the entire code generator.
  MCContext Context;
  // This is an external context, that if assigned, will be used instead of the
  // internal context.
  MCContext *ExternalContext = nullptr;

  /// This is the LLVM Module being worked on.
  const Module *TheModule = nullptr;

  /// This is the object-file-format-specific implementation of
  /// MachineModuleInfoImpl, which lets targets accumulate whatever info they
  /// want.
  MachineModuleInfoImpl *ObjFileMMI;

  /// \name Exception Handling
  /// \{

  /// The current call site index being processed, if any. 0 if none.
  unsigned CurCallSite = 0;

  /// \}

  // TODO: Ideally, what we'd like is to have a switch that allows emitting
  // synchronous (precise at call-sites only) CFA into .eh_frame. However,
  // even under this switch, we'd like .debug_frame to be precise when using
  // -g. At this moment, there's no way to specify that some CFI directives
  // go into .eh_frame only, while others go into .debug_frame only.

  /// True if debugging information is available in this module.
  bool DbgInfoAvailable = false;

  /// True if this module is being built for windows/msvc, and uses floating
  /// point.  This is used to emit an undefined reference to _fltused.
  bool UsesMSVCFloatingPoint = false;

  /// Maps IR Functions to their corresponding MachineFunctions.
  DenseMap<const Function*, std::unique_ptr<MachineFunction>> MachineFunctions;
  /// Next unique number available for a MachineFunction.
  unsigned NextFnNum = 0;
  const Function *LastRequest = nullptr; ///< Used for shortcut/cache.
  MachineFunction *LastResult = nullptr; ///< Used for shortcut/cache.

  MachineModuleInfo &operator=(MachineModuleInfo &&MMII) = delete;

public:
  explicit MachineModuleInfo(const LLVMTargetMachine *TM = nullptr);

  explicit MachineModuleInfo(const LLVMTargetMachine *TM,
                             MCContext *ExtContext);

  MachineModuleInfo(MachineModuleInfo &&MMII);

  ~MachineModuleInfo();

  void initialize();
  void finalize();

  const LLVMTargetMachine &getTarget() const { return TM; }

  const MCContext &getContext() const {
    return ExternalContext ? *ExternalContext : Context;
  }
  MCContext &getContext() {
    return ExternalContext ? *ExternalContext : Context;
  }

  const Module *getModule() const { return TheModule; }

  /// Returns the MachineFunction constructed for the IR function \p F.
  /// Creates a new MachineFunction if none exists yet.
  MachineFunction &getOrCreateMachineFunction(Function &F);

  /// \brief Returns the MachineFunction associated to IR function \p F if there
  /// is one, otherwise nullptr.
  MachineFunction *getMachineFunction(const Function &F) const;

  /// Delete the MachineFunction \p MF and reset the link in the IR Function to
  /// Machine Function map.
  void deleteMachineFunctionFor(Function &F);

  /// Add an externally created MachineFunction \p MF for \p F.
  void insertFunction(const Function &F, std::unique_ptr<MachineFunction> &&MF);

  /// Keep track of various per-module pieces of information for backends
  /// that would like to do so.
  template<typename Ty>
  Ty &getObjFileInfo() {
    if (ObjFileMMI == nullptr)
      ObjFileMMI = new Ty(*this);
    return *static_cast<Ty*>(ObjFileMMI);
  }

  template<typename Ty>
  const Ty &getObjFileInfo() const {
    return const_cast<MachineModuleInfo*>(this)->getObjFileInfo<Ty>();
  }

  /// Returns true if valid debug info is present.
  bool hasDebugInfo() const { return DbgInfoAvailable; }

  bool usesMSVCFloatingPoint() const { return UsesMSVCFloatingPoint; }

  void setUsesMSVCFloatingPoint(bool b) { UsesMSVCFloatingPoint = b; }

  /// \name Exception Handling
  /// \{

  /// Set the call site currently being processed.
  void setCurrentCallSite(unsigned Site) { CurCallSite = Site; }

  /// Get the call site currently being processed, if any.  return zero if
  /// none.
  unsigned getCurrentCallSite() { return CurCallSite; }

  /// \}

  // MMI owes MCContext. It should never be invalidated.
  bool invalidate(Module &, const PreservedAnalyses &,
                  ModuleAnalysisManager::Invalidator &) {
    return false;
  }
}; // End class MachineModuleInfo

class MachineModuleInfoWrapperPass : public ImmutablePass {
  MachineModuleInfo MMI;

public:
  static char ID; // Pass identification, replacement for typeid
  explicit MachineModuleInfoWrapperPass(const LLVMTargetMachine *TM = nullptr);

  explicit MachineModuleInfoWrapperPass(const LLVMTargetMachine *TM,
                                        MCContext *ExtContext);

  // Initialization and Finalization
  bool doInitialization(Module &) override;
  bool doFinalization(Module &) override;

  MachineModuleInfo &getMMI() { return MMI; }
  const MachineModuleInfo &getMMI() const { return MMI; }
};

/// An analysis that produces \c MachineInfo for a module.
class MachineModuleAnalysis : public AnalysisInfoMixin<MachineModuleAnalysis> {
  friend AnalysisInfoMixin<MachineModuleAnalysis>;
  static AnalysisKey Key;

  const LLVMTargetMachine *TM;

public:
  /// Provide the result type for this analysis pass.
  using Result = MachineModuleInfo;

  MachineModuleAnalysis(const LLVMTargetMachine *TM) : TM(TM) {}

  /// Run the analysis pass and produce machine module information.
  MachineModuleInfo run(Module &M, ModuleAnalysisManager &);
};

} // end namespace llvm

#endif // LLVM_CODEGEN_MACHINEMODULEINFO_H
PKhwFZN�VZZCodeGen/CSEConfigBase.hnu�[���//===- CSEConfigBase.h - A CSEConfig interface ------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_CSECONFIGBASE_H
#define LLVM_CODEGEN_CSECONFIGBASE_H

namespace llvm {
// Class representing some configuration that can be done during GlobalISel's
// CSEInfo analysis. We define it here because TargetPassConfig can't depend on
// the GlobalISel library, and so we use this in the interface between them
// so that the derived classes in GISel can reference generic opcodes.
class CSEConfigBase {
public:
  virtual ~CSEConfigBase() = default;
  // Hook for defining which Generic instructions should be CSEd.
  // GISelCSEInfo currently only calls this hook when dealing with generic
  // opcodes.
  virtual bool shouldCSEOpc(unsigned Opc) { return false; }
};

} // namespace llvm

#endif // LLVM_CODEGEN_CSECONFIGBASE_H
PKhwFZ�eA�d�dCodeGen/MachineInstrBuilder.hnu�[���//===- CodeGen/MachineInstrBuilder.h - Simplify creation of MIs --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file exposes a function named BuildMI, which is useful for dramatically
// simplifying how MachineInstr's are created.  It allows use of code like this:
//
//   MIMetadata MIMD(MI);  // Propagates DebugLoc and other metadata
//   M = BuildMI(MBB, MI, MIMD, TII.get(X86::ADD8rr), Dst)
//           .addReg(argVal1)
//           .addReg(argVal2);
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_MACHINEINSTRBUILDER_H
#define LLVM_CODEGEN_MACHINEINSTRBUILDER_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/CodeGen/GlobalISel/Utils.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBundle.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/Support/ErrorHandling.h"
#include <cassert>
#include <cstdint>

namespace llvm {

class MCInstrDesc;
class MDNode;

namespace RegState {

enum {
  /// Register definition.
  Define = 0x2,
  /// Not emitted register (e.g. carry, or temporary result).
  Implicit = 0x4,
  /// The last use of a register.
  Kill = 0x8,
  /// Unused definition.
  Dead = 0x10,
  /// Value of the register doesn't matter.
  Undef = 0x20,
  /// Register definition happens before uses.
  EarlyClobber = 0x40,
  /// Register 'use' is for debugging purpose.
  Debug = 0x80,
  /// Register reads a value that is defined inside the same instruction or
  /// bundle.
  InternalRead = 0x100,
  /// Register that may be renamed.
  Renamable = 0x200,
  DefineNoRead = Define | Undef,
  ImplicitDefine = Implicit | Define,
  ImplicitKill = Implicit | Kill
};

} // end namespace RegState

class MachineInstrBuilder {
  MachineFunction *MF = nullptr;
  MachineInstr *MI = nullptr;

public:
  MachineInstrBuilder() = default;

  /// Create a MachineInstrBuilder for manipulating an existing instruction.
  /// F must be the machine function that was used to allocate I.
  MachineInstrBuilder(MachineFunction &F, MachineInstr *I) : MF(&F), MI(I) {}
  MachineInstrBuilder(MachineFunction &F, MachineBasicBlock::iterator I)
      : MF(&F), MI(&*I) {}

  /// Allow automatic conversion to the machine instruction we are working on.
  operator MachineInstr*() const { return MI; }
  MachineInstr *operator->() const { return MI; }
  operator MachineBasicBlock::iterator() const { return MI; }

  /// If conversion operators fail, use this method to get the MachineInstr
  /// explicitly.
  MachineInstr *getInstr() const { return MI; }

  /// Get the register for the operand index.
  /// The operand at the index should be a register (asserted by
  /// MachineOperand).
  Register getReg(unsigned Idx) const { return MI->getOperand(Idx).getReg(); }

  /// Add a new virtual register operand.
  const MachineInstrBuilder &addReg(Register RegNo, unsigned flags = 0,
                                    unsigned SubReg = 0) const {
    assert((flags & 0x1) == 0 &&
           "Passing in 'true' to addReg is forbidden! Use enums instead.");
    MI->addOperand(*MF, MachineOperand::CreateReg(RegNo,
                                               flags & RegState::Define,
                                               flags & RegState::Implicit,
                                               flags & RegState::Kill,
                                               flags & RegState::Dead,
                                               flags & RegState::Undef,
                                               flags & RegState::EarlyClobber,
                                               SubReg,
                                               flags & RegState::Debug,
                                               flags & RegState::InternalRead,
                                               flags & RegState::Renamable));
    return *this;
  }

  /// Add a virtual register definition operand.
  const MachineInstrBuilder &addDef(Register RegNo, unsigned Flags = 0,
                                    unsigned SubReg = 0) const {
    return addReg(RegNo, Flags | RegState::Define, SubReg);
  }

  /// Add a virtual register use operand. It is an error for Flags to contain
  /// `RegState::Define` when calling this function.
  const MachineInstrBuilder &addUse(Register RegNo, unsigned Flags = 0,
                                    unsigned SubReg = 0) const {
    assert(!(Flags & RegState::Define) &&
           "Misleading addUse defines register, use addReg instead.");
    return addReg(RegNo, Flags, SubReg);
  }

  /// Add a new immediate operand.
  const MachineInstrBuilder &addImm(int64_t Val) const {
    MI->addOperand(*MF, MachineOperand::CreateImm(Val));
    return *this;
  }

  const MachineInstrBuilder &addCImm(const ConstantInt *Val) const {
    MI->addOperand(*MF, MachineOperand::CreateCImm(Val));
    return *this;
  }

  const MachineInstrBuilder &addFPImm(const ConstantFP *Val) const {
    MI->addOperand(*MF, MachineOperand::CreateFPImm(Val));
    return *this;
  }

  const MachineInstrBuilder &addMBB(MachineBasicBlock *MBB,
                                    unsigned TargetFlags = 0) const {
    MI->addOperand(*MF, MachineOperand::CreateMBB(MBB, TargetFlags));
    return *this;
  }

  const MachineInstrBuilder &addFrameIndex(int Idx) const {
    MI->addOperand(*MF, MachineOperand::CreateFI(Idx));
    return *this;
  }

  const MachineInstrBuilder &
  addConstantPoolIndex(unsigned Idx, int Offset = 0,
                       unsigned TargetFlags = 0) const {
    MI->addOperand(*MF, MachineOperand::CreateCPI(Idx, Offset, TargetFlags));
    return *this;
  }

  const MachineInstrBuilder &addTargetIndex(unsigned Idx, int64_t Offset = 0,
                                          unsigned TargetFlags = 0) const {
    MI->addOperand(*MF, MachineOperand::CreateTargetIndex(Idx, Offset,
                                                          TargetFlags));
    return *this;
  }

  const MachineInstrBuilder &addJumpTableIndex(unsigned Idx,
                                               unsigned TargetFlags = 0) const {
    MI->addOperand(*MF, MachineOperand::CreateJTI(Idx, TargetFlags));
    return *this;
  }

  const MachineInstrBuilder &addGlobalAddress(const GlobalValue *GV,
                                              int64_t Offset = 0,
                                              unsigned TargetFlags = 0) const {
    MI->addOperand(*MF, MachineOperand::CreateGA(GV, Offset, TargetFlags));
    return *this;
  }

  const MachineInstrBuilder &addExternalSymbol(const char *FnName,
                                               unsigned TargetFlags = 0) const {
    MI->addOperand(*MF, MachineOperand::CreateES(FnName, TargetFlags));
    return *this;
  }

  const MachineInstrBuilder &addBlockAddress(const BlockAddress *BA,
                                             int64_t Offset = 0,
                                             unsigned TargetFlags = 0) const {
    MI->addOperand(*MF, MachineOperand::CreateBA(BA, Offset, TargetFlags));
    return *this;
  }

  const MachineInstrBuilder &addRegMask(const uint32_t *Mask) const {
    MI->addOperand(*MF, MachineOperand::CreateRegMask(Mask));
    return *this;
  }

  const MachineInstrBuilder &addMemOperand(MachineMemOperand *MMO) const {
    MI->addMemOperand(*MF, MMO);
    return *this;
  }

  const MachineInstrBuilder &
  setMemRefs(ArrayRef<MachineMemOperand *> MMOs) const {
    MI->setMemRefs(*MF, MMOs);
    return *this;
  }

  const MachineInstrBuilder &cloneMemRefs(const MachineInstr &OtherMI) const {
    MI->cloneMemRefs(*MF, OtherMI);
    return *this;
  }

  const MachineInstrBuilder &
  cloneMergedMemRefs(ArrayRef<const MachineInstr *> OtherMIs) const {
    MI->cloneMergedMemRefs(*MF, OtherMIs);
    return *this;
  }

  const MachineInstrBuilder &add(const MachineOperand &MO) const {
    MI->addOperand(*MF, MO);
    return *this;
  }

  const MachineInstrBuilder &add(ArrayRef<MachineOperand> MOs) const {
    for (const MachineOperand &MO : MOs) {
      MI->addOperand(*MF, MO);
    }
    return *this;
  }

  const MachineInstrBuilder &addMetadata(const MDNode *MD) const {
    MI->addOperand(*MF, MachineOperand::CreateMetadata(MD));
    assert((MI->isDebugValueLike() ? static_cast<bool>(MI->getDebugVariable())
                                   : true) &&
           "first MDNode argument of a DBG_VALUE not a variable");
    assert((MI->isDebugLabel() ? static_cast<bool>(MI->getDebugLabel())
                               : true) &&
           "first MDNode argument of a DBG_LABEL not a label");
    return *this;
  }

  const MachineInstrBuilder &addCFIIndex(unsigned CFIIndex) const {
    MI->addOperand(*MF, MachineOperand::CreateCFIIndex(CFIIndex));
    return *this;
  }

  const MachineInstrBuilder &addIntrinsicID(Intrinsic::ID ID) const {
    MI->addOperand(*MF, MachineOperand::CreateIntrinsicID(ID));
    return *this;
  }

  const MachineInstrBuilder &addPredicate(CmpInst::Predicate Pred) const {
    MI->addOperand(*MF, MachineOperand::CreatePredicate(Pred));
    return *this;
  }

  const MachineInstrBuilder &addShuffleMask(ArrayRef<int> Val) const {
    MI->addOperand(*MF, MachineOperand::CreateShuffleMask(Val));
    return *this;
  }

  const MachineInstrBuilder &addSym(MCSymbol *Sym,
                                    unsigned char TargetFlags = 0) const {
    MI->addOperand(*MF, MachineOperand::CreateMCSymbol(Sym, TargetFlags));
    return *this;
  }

  const MachineInstrBuilder &setMIFlags(unsigned Flags) const {
    MI->setFlags(Flags);
    return *this;
  }

  const MachineInstrBuilder &setMIFlag(MachineInstr::MIFlag Flag) const {
    MI->setFlag(Flag);
    return *this;
  }

  // Add a displacement from an existing MachineOperand with an added offset.
  const MachineInstrBuilder &addDisp(const MachineOperand &Disp, int64_t off,
                                     unsigned char TargetFlags = 0) const {
    // If caller specifies new TargetFlags then use it, otherwise the
    // default behavior is to copy the target flags from the existing
    // MachineOperand. This means if the caller wants to clear the
    // target flags it needs to do so explicitly.
    if (0 == TargetFlags)
      TargetFlags = Disp.getTargetFlags();

    switch (Disp.getType()) {
      default:
        llvm_unreachable("Unhandled operand type in addDisp()");
      case MachineOperand::MO_Immediate:
        return addImm(Disp.getImm() + off);
      case MachineOperand::MO_ConstantPoolIndex:
        return addConstantPoolIndex(Disp.getIndex(), Disp.getOffset() + off,
                                    TargetFlags);
      case MachineOperand::MO_GlobalAddress:
        return addGlobalAddress(Disp.getGlobal(), Disp.getOffset() + off,
                                TargetFlags);
      case MachineOperand::MO_BlockAddress:
        return addBlockAddress(Disp.getBlockAddress(), Disp.getOffset() + off,
                               TargetFlags);
      case MachineOperand::MO_JumpTableIndex:
        assert(off == 0 && "cannot create offset into jump tables");
        return addJumpTableIndex(Disp.getIndex(), TargetFlags);
    }
  }

  const MachineInstrBuilder &setPCSections(MDNode *MD) const {
    if (MD)
      MI->setPCSections(*MF, MD);
    return *this;
  }

  /// Copy all the implicit operands from OtherMI onto this one.
  const MachineInstrBuilder &
  copyImplicitOps(const MachineInstr &OtherMI) const {
    MI->copyImplicitOps(*MF, OtherMI);
    return *this;
  }

  bool constrainAllUses(const TargetInstrInfo &TII,
                        const TargetRegisterInfo &TRI,
                        const RegisterBankInfo &RBI) const {
    return constrainSelectedInstRegOperands(*MI, TII, TRI, RBI);
  }
};

/// Set of metadata that should be preserved when using BuildMI(). This provides
/// a more convenient way of preserving DebugLoc and PCSections.
class MIMetadata {
public:
  MIMetadata() = default;
  MIMetadata(DebugLoc DL, MDNode *PCSections = nullptr)
      : DL(std::move(DL)), PCSections(PCSections) {}
  MIMetadata(const DILocation *DI, MDNode *PCSections = nullptr)
      : DL(DI), PCSections(PCSections) {}
  explicit MIMetadata(const Instruction &From)
      : DL(From.getDebugLoc()),
        PCSections(From.getMetadata(LLVMContext::MD_pcsections)) {}
  explicit MIMetadata(const MachineInstr &From)
      : DL(From.getDebugLoc()), PCSections(From.getPCSections()) {}

  const DebugLoc &getDL() const { return DL; }
  MDNode *getPCSections() const { return PCSections; }

private:
  DebugLoc DL;
  MDNode *PCSections = nullptr;
};

/// Builder interface. Specify how to create the initial instruction itself.
inline MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD,
                                   const MCInstrDesc &MCID) {
  return MachineInstrBuilder(MF, MF.CreateMachineInstr(MCID, MIMD.getDL()))
           .setPCSections(MIMD.getPCSections());
}

/// This version of the builder sets up the first operand as a
/// destination virtual register.
inline MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD,
                                   const MCInstrDesc &MCID, Register DestReg) {
  return MachineInstrBuilder(MF, MF.CreateMachineInstr(MCID, MIMD.getDL()))
           .setPCSections(MIMD.getPCSections())
           .addReg(DestReg, RegState::Define);
}

/// This version of the builder inserts the newly-built instruction before
/// the given position in the given MachineBasicBlock, and sets up the first
/// operand as a destination virtual register.
inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
                                   MachineBasicBlock::iterator I,
                                   const MIMetadata &MIMD,
                                   const MCInstrDesc &MCID, Register DestReg) {
  MachineFunction &MF = *BB.getParent();
  MachineInstr *MI = MF.CreateMachineInstr(MCID, MIMD.getDL());
  BB.insert(I, MI);
  return MachineInstrBuilder(MF, MI)
           .setPCSections(MIMD.getPCSections())
           .addReg(DestReg, RegState::Define);
}

/// This version of the builder inserts the newly-built instruction before
/// the given position in the given MachineBasicBlock, and sets up the first
/// operand as a destination virtual register.
///
/// If \c I is inside a bundle, then the newly inserted \a MachineInstr is
/// added to the same bundle.
inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
                                   MachineBasicBlock::instr_iterator I,
                                   const MIMetadata &MIMD,
                                   const MCInstrDesc &MCID, Register DestReg) {
  MachineFunction &MF = *BB.getParent();
  MachineInstr *MI = MF.CreateMachineInstr(MCID, MIMD.getDL());
  BB.insert(I, MI);
  return MachineInstrBuilder(MF, MI)
           .setPCSections(MIMD.getPCSections())
           .addReg(DestReg, RegState::Define);
}

inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB, MachineInstr &I,
                                   const MIMetadata &MIMD,
                                   const MCInstrDesc &MCID, Register DestReg) {
  // Calling the overload for instr_iterator is always correct.  However, the
  // definition is not available in headers, so inline the check.
  if (I.isInsideBundle())
    return BuildMI(BB, MachineBasicBlock::instr_iterator(I), MIMD, MCID,
                   DestReg);
  return BuildMI(BB, MachineBasicBlock::iterator(I), MIMD, MCID, DestReg);
}

inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB, MachineInstr *I,
                                   const MIMetadata &MIMD,
                                   const MCInstrDesc &MCID, Register DestReg) {
  return BuildMI(BB, *I, MIMD, MCID, DestReg);
}

/// This version of the builder inserts the newly-built instruction before the
/// given position in the given MachineBasicBlock, and does NOT take a
/// destination register.
inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
                                   MachineBasicBlock::iterator I,
                                   const MIMetadata &MIMD,
                                   const MCInstrDesc &MCID) {
  MachineFunction &MF = *BB.getParent();
  MachineInstr *MI = MF.CreateMachineInstr(MCID, MIMD.getDL());
  BB.insert(I, MI);
  return MachineInstrBuilder(MF, MI).setPCSections(MIMD.getPCSections());
}

inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
                                   MachineBasicBlock::instr_iterator I,
                                   const MIMetadata &MIMD,
                                   const MCInstrDesc &MCID) {
  MachineFunction &MF = *BB.getParent();
  MachineInstr *MI = MF.CreateMachineInstr(MCID, MIMD.getDL());
  BB.insert(I, MI);
  return MachineInstrBuilder(MF, MI).setPCSections(MIMD.getPCSections());
}

inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB, MachineInstr &I,
                                   const MIMetadata &MIMD,
                                   const MCInstrDesc &MCID) {
  // Calling the overload for instr_iterator is always correct.  However, the
  // definition is not available in headers, so inline the check.
  if (I.isInsideBundle())
    return BuildMI(BB, MachineBasicBlock::instr_iterator(I), MIMD, MCID);
  return BuildMI(BB, MachineBasicBlock::iterator(I), MIMD, MCID);
}

inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB, MachineInstr *I,
                                   const MIMetadata &MIMD,
                                   const MCInstrDesc &MCID) {
  return BuildMI(BB, *I, MIMD, MCID);
}

/// This version of the builder inserts the newly-built instruction at the end
/// of the given MachineBasicBlock, and does NOT take a destination register.
inline MachineInstrBuilder BuildMI(MachineBasicBlock *BB,
                                   const MIMetadata &MIMD,
                                   const MCInstrDesc &MCID) {
  return BuildMI(*BB, BB->end(), MIMD, MCID);
}

/// This version of the builder inserts the newly-built instruction at the
/// end of the given MachineBasicBlock, and sets up the first operand as a
/// destination virtual register.
inline MachineInstrBuilder BuildMI(MachineBasicBlock *BB,
                                   const MIMetadata &MIMD,
                                   const MCInstrDesc &MCID, Register DestReg) {
  return BuildMI(*BB, BB->end(), MIMD, MCID, DestReg);
}

/// This version of the builder builds a DBG_VALUE intrinsic
/// for either a value in a register or a register-indirect
/// address.  The convention is that a DBG_VALUE is indirect iff the
/// second operand is an immediate.
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL,
                            const MCInstrDesc &MCID, bool IsIndirect,
                            Register Reg, const MDNode *Variable,
                            const MDNode *Expr);

/// This version of the builder builds a DBG_VALUE or DBG_VALUE_LIST intrinsic
/// for a MachineOperand.
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL,
                            const MCInstrDesc &MCID, bool IsIndirect,
                            ArrayRef<MachineOperand> MOs,
                            const MDNode *Variable, const MDNode *Expr);

/// This version of the builder builds a DBG_VALUE intrinsic
/// for either a value in a register or a register-indirect
/// address and inserts it at position I.
MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
                            MachineBasicBlock::iterator I, const DebugLoc &DL,
                            const MCInstrDesc &MCID, bool IsIndirect,
                            Register Reg, const MDNode *Variable,
                            const MDNode *Expr);

/// This version of the builder builds a DBG_VALUE, DBG_INSTR_REF, or
/// DBG_VALUE_LIST intrinsic for a machine operand and inserts it at position I.
MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
                            MachineBasicBlock::iterator I, const DebugLoc &DL,
                            const MCInstrDesc &MCID, bool IsIndirect,
                            ArrayRef<MachineOperand> MOs,
                            const MDNode *Variable, const MDNode *Expr);

/// Clone a DBG_VALUE whose value has been spilled to FrameIndex.
MachineInstr *buildDbgValueForSpill(MachineBasicBlock &BB,
                                    MachineBasicBlock::iterator I,
                                    const MachineInstr &Orig, int FrameIndex,
                                    Register SpillReg);
MachineInstr *
buildDbgValueForSpill(MachineBasicBlock &BB, MachineBasicBlock::iterator I,
                      const MachineInstr &Orig, int FrameIndex,
                      SmallVectorImpl<const MachineOperand *> &SpilledOperands);

/// Update a DBG_VALUE whose value has been spilled to FrameIndex. Useful when
/// modifying an instruction in place while iterating over a basic block.
void updateDbgValueForSpill(MachineInstr &Orig, int FrameIndex, Register Reg);

inline unsigned getDefRegState(bool B) {
  return B ? RegState::Define : 0;
}
inline unsigned getImplRegState(bool B) {
  return B ? RegState::Implicit : 0;
}
inline unsigned getKillRegState(bool B) {
  return B ? RegState::Kill : 0;
}
inline unsigned getDeadRegState(bool B) {
  return B ? RegState::Dead : 0;
}
inline unsigned getUndefRegState(bool B) {
  return B ? RegState::Undef : 0;
}
inline unsigned getInternalReadRegState(bool B) {
  return B ? RegState::InternalRead : 0;
}
inline unsigned getDebugRegState(bool B) {
  return B ? RegState::Debug : 0;
}
inline unsigned getRenamableRegState(bool B) {
  return B ? RegState::Renamable : 0;
}

/// Get all register state flags from machine operand \p RegOp.
inline unsigned getRegState(const MachineOperand &RegOp) {
  assert(RegOp.isReg() && "Not a register operand");
  return getDefRegState(RegOp.isDef()) | getImplRegState(RegOp.isImplicit()) |
         getKillRegState(RegOp.isKill()) | getDeadRegState(RegOp.isDead()) |
         getUndefRegState(RegOp.isUndef()) |
         getInternalReadRegState(RegOp.isInternalRead()) |
         getDebugRegState(RegOp.isDebug()) |
         getRenamableRegState(RegOp.getReg().isPhysical() &&
                              RegOp.isRenamable());
}

/// Helper class for constructing bundles of MachineInstrs.
///
/// MIBundleBuilder can create a bundle from scratch by inserting new
/// MachineInstrs one at a time, or it can create a bundle from a sequence of
/// existing MachineInstrs in a basic block.
class MIBundleBuilder {
  MachineBasicBlock &MBB;
  MachineBasicBlock::instr_iterator Begin;
  MachineBasicBlock::instr_iterator End;

public:
  /// Create an MIBundleBuilder that inserts instructions into a new bundle in
  /// BB above the bundle or instruction at Pos.
  MIBundleBuilder(MachineBasicBlock &BB, MachineBasicBlock::iterator Pos)
      : MBB(BB), Begin(Pos.getInstrIterator()), End(Begin) {}

  /// Create a bundle from the sequence of instructions between B and E.
  MIBundleBuilder(MachineBasicBlock &BB, MachineBasicBlock::iterator B,
                  MachineBasicBlock::iterator E)
      : MBB(BB), Begin(B.getInstrIterator()), End(E.getInstrIterator()) {
    assert(B != E && "No instructions to bundle");
    ++B;
    while (B != E) {
      MachineInstr &MI = *B;
      ++B;
      MI.bundleWithPred();
    }
  }

  /// Create an MIBundleBuilder representing an existing instruction or bundle
  /// that has MI as its head.
  explicit MIBundleBuilder(MachineInstr *MI)
      : MBB(*MI->getParent()), Begin(MI),
        End(getBundleEnd(MI->getIterator())) {}

  /// Return a reference to the basic block containing this bundle.
  MachineBasicBlock &getMBB() const { return MBB; }

  /// Return true if no instructions have been inserted in this bundle yet.
  /// Empty bundles aren't representable in a MachineBasicBlock.
  bool empty() const { return Begin == End; }

  /// Return an iterator to the first bundled instruction.
  MachineBasicBlock::instr_iterator begin() const { return Begin; }

  /// Return an iterator beyond the last bundled instruction.
  MachineBasicBlock::instr_iterator end() const { return End; }

  /// Insert MI into this bundle before I which must point to an instruction in
  /// the bundle, or end().
  MIBundleBuilder &insert(MachineBasicBlock::instr_iterator I,
                          MachineInstr *MI) {
    MBB.insert(I, MI);
    if (I == Begin) {
      if (!empty())
        MI->bundleWithSucc();
      Begin = MI->getIterator();
      return *this;
    }
    if (I == End) {
      MI->bundleWithPred();
      return *this;
    }
    // MI was inserted in the middle of the bundle, so its neighbors' flags are
    // already fine. Update MI's bundle flags manually.
    MI->setFlag(MachineInstr::BundledPred);
    MI->setFlag(MachineInstr::BundledSucc);
    return *this;
  }

  /// Insert MI into MBB by prepending it to the instructions in the bundle.
  /// MI will become the first instruction in the bundle.
  MIBundleBuilder &prepend(MachineInstr *MI) {
    return insert(begin(), MI);
  }

  /// Insert MI into MBB by appending it to the instructions in the bundle.
  /// MI will become the last instruction in the bundle.
  MIBundleBuilder &append(MachineInstr *MI) {
    return insert(end(), MI);
  }
};

} // end namespace llvm

#endif // LLVM_CODEGEN_MACHINEINSTRBUILDER_H
PKhwFZz���CodeGen/TailDuplicator.hnu�[���//===- llvm/CodeGen/TailDuplicator.h ----------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the TailDuplicator class. Used by the
// TailDuplication pass, and MachineBlockPlacement.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_TAILDUPLICATOR_H
#define LLVM_CODEGEN_TAILDUPLICATOR_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/TargetInstrInfo.h"
#include <utility>
#include <vector>

namespace llvm {

template <typename T, unsigned int N> class SmallSetVector;
template <typename Fn> class function_ref;
class MBFIWrapper;
class MachineBasicBlock;
class MachineBranchProbabilityInfo;
class MachineFunction;
class MachineInstr;
class MachineModuleInfo;
class MachineRegisterInfo;
class ProfileSummaryInfo;
class TargetRegisterInfo;

/// Utility class to perform tail duplication.
class TailDuplicator {
  const TargetInstrInfo *TII;
  const TargetRegisterInfo *TRI;
  const MachineBranchProbabilityInfo *MBPI;
  const MachineModuleInfo *MMI;
  MachineRegisterInfo *MRI;
  MachineFunction *MF;
  MBFIWrapper *MBFI;
  ProfileSummaryInfo *PSI;
  bool PreRegAlloc;
  bool LayoutMode;
  unsigned TailDupSize;

  // A list of virtual registers for which to update SSA form.
  SmallVector<Register, 16> SSAUpdateVRs;

  // For each virtual register in SSAUpdateVals keep a list of source virtual
  // registers.
  using AvailableValsTy = std::vector<std::pair<MachineBasicBlock *, Register>>;

  DenseMap<Register, AvailableValsTy> SSAUpdateVals;

public:
  /// Prepare to run on a specific machine function.
  /// @param MF - Function that will be processed
  /// @param PreRegAlloc - true if used before register allocation
  /// @param MBPI - Branch Probability Info. Used to propagate correct
  ///     probabilities when modifying the CFG.
  /// @param LayoutMode - When true, don't use the existing layout to make
  ///     decisions.
  /// @param TailDupSize - Maxmimum size of blocks to tail-duplicate. Zero
  ///     default implies using the command line value TailDupSize.
  void initMF(MachineFunction &MF, bool PreRegAlloc,
              const MachineBranchProbabilityInfo *MBPI,
              MBFIWrapper *MBFI,
              ProfileSummaryInfo *PSI,
              bool LayoutMode, unsigned TailDupSize = 0);

  bool tailDuplicateBlocks();
  static bool isSimpleBB(MachineBasicBlock *TailBB);
  bool shouldTailDuplicate(bool IsSimple, MachineBasicBlock &TailBB);

  /// Returns true if TailBB can successfully be duplicated into PredBB
  bool canTailDuplicate(MachineBasicBlock *TailBB, MachineBasicBlock *PredBB);

  /// Tail duplicate a single basic block into its predecessors, and then clean
  /// up.
  /// If \p DuplicatePreds is not null, it will be updated to contain the list
  /// of predecessors that received a copy of \p MBB.
  /// If \p RemovalCallback is non-null. It will be called before MBB is
  /// deleted.
  /// If \p CandidatePtr is not null, duplicate into these blocks only.
  bool tailDuplicateAndUpdate(
      bool IsSimple, MachineBasicBlock *MBB,
      MachineBasicBlock *ForcedLayoutPred,
      SmallVectorImpl<MachineBasicBlock*> *DuplicatedPreds = nullptr,
      function_ref<void(MachineBasicBlock *)> *RemovalCallback = nullptr,
      SmallVectorImpl<MachineBasicBlock *> *CandidatePtr = nullptr);

private:
  using RegSubRegPair = TargetInstrInfo::RegSubRegPair;

  void addSSAUpdateEntry(Register OrigReg, Register NewReg,
                         MachineBasicBlock *BB);
  void processPHI(MachineInstr *MI, MachineBasicBlock *TailBB,
                  MachineBasicBlock *PredBB,
                  DenseMap<Register, RegSubRegPair> &LocalVRMap,
                  SmallVectorImpl<std::pair<Register, RegSubRegPair>> &Copies,
                  const DenseSet<Register> &UsedByPhi, bool Remove);
  void duplicateInstruction(MachineInstr *MI, MachineBasicBlock *TailBB,
                            MachineBasicBlock *PredBB,
                            DenseMap<Register, RegSubRegPair> &LocalVRMap,
                            const DenseSet<Register> &UsedByPhi);
  void updateSuccessorsPHIs(MachineBasicBlock *FromBB, bool isDead,
                            SmallVectorImpl<MachineBasicBlock *> &TDBBs,
                            SmallSetVector<MachineBasicBlock *, 8> &Succs);
  bool canCompletelyDuplicateBB(MachineBasicBlock &BB);
  bool duplicateSimpleBB(MachineBasicBlock *TailBB,
                         SmallVectorImpl<MachineBasicBlock *> &TDBBs,
                         const DenseSet<Register> &RegsUsedByPhi);
  bool tailDuplicate(bool IsSimple,
                     MachineBasicBlock *TailBB,
                     MachineBasicBlock *ForcedLayoutPred,
                     SmallVectorImpl<MachineBasicBlock *> &TDBBs,
                     SmallVectorImpl<MachineInstr *> &Copies,
                     SmallVectorImpl<MachineBasicBlock *> *CandidatePtr);
  void appendCopies(MachineBasicBlock *MBB,
                 SmallVectorImpl<std::pair<Register, RegSubRegPair>> &CopyInfos,
                 SmallVectorImpl<MachineInstr *> &Copies);

  void removeDeadBlock(
      MachineBasicBlock *MBB,
      function_ref<void(MachineBasicBlock *)> *RemovalCallback = nullptr);
};

} // end namespace llvm

#endif // LLVM_CODEGEN_TAILDUPLICATOR_H
PKhwFZj�	��CodeGen/MBFIWrapper.hnu�[���//===- llvm/CodeGen/MBFIWrapper.h -------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This class keeps track of branch frequencies of newly created blocks and
// tail-merged blocks. Used by the TailDuplication and MachineBlockPlacement.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_MBFIWRAPPER_H
#define LLVM_CODEGEN_MBFIWRAPPER_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/Support/BlockFrequency.h"
#include "llvm/Support/raw_ostream.h"
#include <optional>

namespace llvm {

class MachineBasicBlock;
class MachineBlockFrequencyInfo;

class MBFIWrapper {
 public:
  MBFIWrapper(const MachineBlockFrequencyInfo &I) : MBFI(I) {}

  BlockFrequency getBlockFreq(const MachineBasicBlock *MBB) const;
  void setBlockFreq(const MachineBasicBlock *MBB, BlockFrequency F);
  std::optional<uint64_t>
  getBlockProfileCount(const MachineBasicBlock *MBB) const;

  raw_ostream &printBlockFreq(raw_ostream &OS,
                              const MachineBasicBlock *MBB) const;
  raw_ostream &printBlockFreq(raw_ostream &OS,
                              const BlockFrequency Freq) const;
  void view(const Twine &Name, bool isSimple = true);
  uint64_t getEntryFreq() const;
  const MachineBlockFrequencyInfo &getMBFI() { return MBFI; }

 private:
  const MachineBlockFrequencyInfo &MBFI;
  DenseMap<const MachineBasicBlock *, BlockFrequency> MergedBBFreq;
};

} // end namespace llvm

#endif // LLVM_CODEGEN_MBFIWRAPPER_H
PKhwFZ�B�v�N�NCodeGen/TargetFrameLowering.hnu�[���//===-- llvm/CodeGen/TargetFrameLowering.h ----------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Interface to describe the layout of a stack frame on the target machine.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_TARGETFRAMELOWERING_H
#define LLVM_CODEGEN_TARGETFRAMELOWERING_H

#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/Support/TypeSize.h"
#include <vector>

namespace llvm {
  class BitVector;
  class CalleeSavedInfo;
  class MachineFunction;
  class RegScavenger;

namespace TargetStackID {
enum Value {
  Default = 0,
  SGPRSpill = 1,
  ScalableVector = 2,
  WasmLocal = 3,
  NoAlloc = 255
};
}

/// Information about stack frame layout on the target.  It holds the direction
/// of stack growth, the known stack alignment on entry to each function, and
/// the offset to the locals area.
///
/// The offset to the local area is the offset from the stack pointer on
/// function entry to the first location where function data (local variables,
/// spill locations) can be stored.
class TargetFrameLowering {
public:
  enum StackDirection {
    StackGrowsUp,        // Adding to the stack increases the stack address
    StackGrowsDown       // Adding to the stack decreases the stack address
  };

  // Maps a callee saved register to a stack slot with a fixed offset.
  struct SpillSlot {
    unsigned Reg;
    int Offset; // Offset relative to stack pointer on function entry.
  };

  struct DwarfFrameBase {
    // The frame base may be either a register (the default), the CFA with an
    // offset, or a WebAssembly-specific location description.
    enum FrameBaseKind { Register, CFA, WasmFrameBase } Kind;
    struct WasmFrameBase {
      unsigned Kind; // Wasm local, global, or value stack
      unsigned Index;
    };
    union {
      // Used with FrameBaseKind::Register.
      unsigned Reg;
      // Used with FrameBaseKind::CFA.
      int Offset;
      struct WasmFrameBase WasmLoc;
    } Location;
  };

private:
  StackDirection StackDir;
  Align StackAlignment;
  Align TransientStackAlignment;
  int LocalAreaOffset;
  bool StackRealignable;
public:
  TargetFrameLowering(StackDirection D, Align StackAl, int LAO,
                      Align TransAl = Align(1), bool StackReal = true)
      : StackDir(D), StackAlignment(StackAl), TransientStackAlignment(TransAl),
        LocalAreaOffset(LAO), StackRealignable(StackReal) {}

  virtual ~TargetFrameLowering();

  // These methods return information that describes the abstract stack layout
  // of the target machine.

  /// getStackGrowthDirection - Return the direction the stack grows
  ///
  StackDirection getStackGrowthDirection() const { return StackDir; }

  /// getStackAlignment - This method returns the number of bytes to which the
  /// stack pointer must be aligned on entry to a function.  Typically, this
  /// is the largest alignment for any data object in the target.
  ///
  unsigned getStackAlignment() const { return StackAlignment.value(); }
  /// getStackAlignment - This method returns the number of bytes to which the
  /// stack pointer must be aligned on entry to a function.  Typically, this
  /// is the largest alignment for any data object in the target.
  ///
  Align getStackAlign() const { return StackAlignment; }

  /// alignSPAdjust - This method aligns the stack adjustment to the correct
  /// alignment.
  ///
  int alignSPAdjust(int SPAdj) const {
    if (SPAdj < 0) {
      SPAdj = -alignTo(-SPAdj, StackAlignment);
    } else {
      SPAdj = alignTo(SPAdj, StackAlignment);
    }
    return SPAdj;
  }

  /// getTransientStackAlignment - This method returns the number of bytes to
  /// which the stack pointer must be aligned at all times, even between
  /// calls.
  ///
  Align getTransientStackAlign() const { return TransientStackAlignment; }

  /// isStackRealignable - This method returns whether the stack can be
  /// realigned.
  bool isStackRealignable() const {
    return StackRealignable;
  }

  /// This method returns whether or not it is safe for an object with the
  /// given stack id to be bundled into the local area.
  virtual bool isStackIdSafeForLocalArea(unsigned StackId) const {
    return true;
  }

  /// getOffsetOfLocalArea - This method returns the offset of the local area
  /// from the stack pointer on entrance to a function.
  ///
  int getOffsetOfLocalArea() const { return LocalAreaOffset; }

  /// Control the placement of special register scavenging spill slots when
  /// allocating a stack frame.
  ///
  /// If this returns true, the frame indexes used by the RegScavenger will be
  /// allocated closest to the incoming stack pointer.
  virtual bool allocateScavengingFrameIndexesNearIncomingSP(
    const MachineFunction &MF) const;

  /// assignCalleeSavedSpillSlots - Allows target to override spill slot
  /// assignment logic.  If implemented, assignCalleeSavedSpillSlots() should
  /// assign frame slots to all CSI entries and return true.  If this method
  /// returns false, spill slots will be assigned using generic implementation.
  /// assignCalleeSavedSpillSlots() may add, delete or rearrange elements of
  /// CSI.
  virtual bool assignCalleeSavedSpillSlots(MachineFunction &MF,
                                           const TargetRegisterInfo *TRI,
                                           std::vector<CalleeSavedInfo> &CSI,
                                           unsigned &MinCSFrameIndex,
                                           unsigned &MaxCSFrameIndex) const {
    return assignCalleeSavedSpillSlots(MF, TRI, CSI);
  }

  virtual bool
  assignCalleeSavedSpillSlots(MachineFunction &MF,
                              const TargetRegisterInfo *TRI,
                              std::vector<CalleeSavedInfo> &CSI) const {
    return false;
  }

  /// getCalleeSavedSpillSlots - This method returns a pointer to an array of
  /// pairs, that contains an entry for each callee saved register that must be
  /// spilled to a particular stack location if it is spilled.
  ///
  /// Each entry in this array contains a <register,offset> pair, indicating the
  /// fixed offset from the incoming stack pointer that each register should be
  /// spilled at. If a register is not listed here, the code generator is
  /// allowed to spill it anywhere it chooses.
  ///
  virtual const SpillSlot *
  getCalleeSavedSpillSlots(unsigned &NumEntries) const {
    NumEntries = 0;
    return nullptr;
  }

  /// targetHandlesStackFrameRounding - Returns true if the target is
  /// responsible for rounding up the stack frame (probably at emitPrologue
  /// time).
  virtual bool targetHandlesStackFrameRounding() const {
    return false;
  }

  /// Returns true if the target will correctly handle shrink wrapping.
  virtual bool enableShrinkWrapping(const MachineFunction &MF) const {
    return false;
  }

  /// Returns true if the stack slot holes in the fixed and callee-save stack
  /// area should be used when allocating other stack locations to reduce stack
  /// size.
  virtual bool enableStackSlotScavenging(const MachineFunction &MF) const {
    return false;
  }

  /// Returns true if the target can safely skip saving callee-saved registers
  /// for noreturn nounwind functions.
  virtual bool enableCalleeSaveSkip(const MachineFunction &MF) const;

  /// emitProlog/emitEpilog - These methods insert prolog and epilog code into
  /// the function.
  virtual void emitPrologue(MachineFunction &MF,
                            MachineBasicBlock &MBB) const = 0;
  virtual void emitEpilogue(MachineFunction &MF,
                            MachineBasicBlock &MBB) const = 0;

  /// emitZeroCallUsedRegs - Zeros out call used registers.
  virtual void emitZeroCallUsedRegs(BitVector RegsToZero,
                                    MachineBasicBlock &MBB) const {}

  /// With basic block sections, emit callee saved frame moves for basic blocks
  /// that are in a different section.
  virtual void
  emitCalleeSavedFrameMovesFullCFA(MachineBasicBlock &MBB,
                                   MachineBasicBlock::iterator MBBI) const {}

  /// Returns true if we may need to fix the unwind information for the
  /// function.
  virtual bool enableCFIFixup(MachineFunction &MF) const;

  /// Emit CFI instructions that recreate the state of the unwind information
  /// upon fucntion entry.
  virtual void resetCFIToInitialState(MachineBasicBlock &MBB) const {}

  /// Replace a StackProbe stub (if any) with the actual probe code inline
  virtual void inlineStackProbe(MachineFunction &MF,
                                MachineBasicBlock &PrologueMBB) const {}

  /// Does the stack probe function call return with a modified stack pointer?
  virtual bool stackProbeFunctionModifiesSP() const { return false; }

  /// Adjust the prologue to have the function use segmented stacks. This works
  /// by adding a check even before the "normal" function prologue.
  virtual void adjustForSegmentedStacks(MachineFunction &MF,
                                        MachineBasicBlock &PrologueMBB) const {}

  /// Adjust the prologue to add Erlang Run-Time System (ERTS) specific code in
  /// the assembly prologue to explicitly handle the stack.
  virtual void adjustForHiPEPrologue(MachineFunction &MF,
                                     MachineBasicBlock &PrologueMBB) const {}

  /// spillCalleeSavedRegisters - Issues instruction(s) to spill all callee
  /// saved registers and returns true if it isn't possible / profitable to do
  /// so by issuing a series of store instructions via
  /// storeRegToStackSlot(). Returns false otherwise.
  virtual bool spillCalleeSavedRegisters(MachineBasicBlock &MBB,
                                         MachineBasicBlock::iterator MI,
                                         ArrayRef<CalleeSavedInfo> CSI,
                                         const TargetRegisterInfo *TRI) const {
    return false;
  }

  /// restoreCalleeSavedRegisters - Issues instruction(s) to restore all callee
  /// saved registers and returns true if it isn't possible / profitable to do
  /// so by issuing a series of load instructions via loadRegToStackSlot().
  /// If it returns true, and any of the registers in CSI is not restored,
  /// it sets the corresponding Restored flag in CSI to false.
  /// Returns false otherwise.
  virtual bool
  restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
                              MachineBasicBlock::iterator MI,
                              MutableArrayRef<CalleeSavedInfo> CSI,
                              const TargetRegisterInfo *TRI) const {
    return false;
  }

  /// Return true if the target wants to keep the frame pointer regardless of
  /// the function attribute "frame-pointer".
  virtual bool keepFramePointer(const MachineFunction &MF) const {
    return false;
  }

  /// hasFP - Return true if the specified function should have a dedicated
  /// frame pointer register. For most targets this is true only if the function
  /// has variable sized allocas or if frame pointer elimination is disabled.
  virtual bool hasFP(const MachineFunction &MF) const = 0;

  /// hasReservedCallFrame - Under normal circumstances, when a frame pointer is
  /// not required, we reserve argument space for call sites in the function
  /// immediately on entry to the current function. This eliminates the need for
  /// add/sub sp brackets around call sites. Returns true if the call frame is
  /// included as part of the stack frame.
  virtual bool hasReservedCallFrame(const MachineFunction &MF) const {
    return !hasFP(MF);
  }

  /// canSimplifyCallFramePseudos - When possible, it's best to simplify the
  /// call frame pseudo ops before doing frame index elimination. This is
  /// possible only when frame index references between the pseudos won't
  /// need adjusting for the call frame adjustments. Normally, that's true
  /// if the function has a reserved call frame or a frame pointer. Some
  /// targets (Thumb2, for example) may have more complicated criteria,
  /// however, and can override this behavior.
  virtual bool canSimplifyCallFramePseudos(const MachineFunction &MF) const {
    return hasReservedCallFrame(MF) || hasFP(MF);
  }

  // needsFrameIndexResolution - Do we need to perform FI resolution for
  // this function. Normally, this is required only when the function
  // has any stack objects. However, targets may want to override this.
  virtual bool needsFrameIndexResolution(const MachineFunction &MF) const;

  /// getFrameIndexReference - This method should return the base register
  /// and offset used to reference a frame index location. The offset is
  /// returned directly, and the base register is returned via FrameReg.
  virtual StackOffset getFrameIndexReference(const MachineFunction &MF, int FI,
                                             Register &FrameReg) const;

  /// Same as \c getFrameIndexReference, except that the stack pointer (as
  /// opposed to the frame pointer) will be the preferred value for \p
  /// FrameReg. This is generally used for emitting statepoint or EH tables that
  /// use offsets from RSP.  If \p IgnoreSPUpdates is true, the returned
  /// offset is only guaranteed to be valid with respect to the value of SP at
  /// the end of the prologue.
  virtual StackOffset
  getFrameIndexReferencePreferSP(const MachineFunction &MF, int FI,
                                 Register &FrameReg,
                                 bool IgnoreSPUpdates) const {
    // Always safe to dispatch to getFrameIndexReference.
    return getFrameIndexReference(MF, FI, FrameReg);
  }

  /// getNonLocalFrameIndexReference - This method returns the offset used to
  /// reference a frame index location. The offset can be from either FP/BP/SP
  /// based on which base register is returned by llvm.localaddress.
  virtual StackOffset getNonLocalFrameIndexReference(const MachineFunction &MF,
                                                     int FI) const {
    // By default, dispatch to getFrameIndexReference. Interested targets can
    // override this.
    Register FrameReg;
    return getFrameIndexReference(MF, FI, FrameReg);
  }

  /// Returns the callee-saved registers as computed by determineCalleeSaves
  /// in the BitVector \p SavedRegs.
  virtual void getCalleeSaves(const MachineFunction &MF,
                                  BitVector &SavedRegs) const;

  /// This method determines which of the registers reported by
  /// TargetRegisterInfo::getCalleeSavedRegs() should actually get saved.
  /// The default implementation checks populates the \p SavedRegs bitset with
  /// all registers which are modified in the function, targets may override
  /// this function to save additional registers.
  /// This method also sets up the register scavenger ensuring there is a free
  /// register or a frameindex available.
  /// This method should not be called by any passes outside of PEI, because
  /// it may change state passed in by \p MF and \p RS. The preferred
  /// interface outside PEI is getCalleeSaves.
  virtual void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs,
                                    RegScavenger *RS = nullptr) const;

  /// processFunctionBeforeFrameFinalized - This method is called immediately
  /// before the specified function's frame layout (MF.getFrameInfo()) is
  /// finalized.  Once the frame is finalized, MO_FrameIndex operands are
  /// replaced with direct constants.  This method is optional.
  ///
  virtual void processFunctionBeforeFrameFinalized(MachineFunction &MF,
                                             RegScavenger *RS = nullptr) const {
  }

  /// processFunctionBeforeFrameIndicesReplaced - This method is called
  /// immediately before MO_FrameIndex operands are eliminated, but after the
  /// frame is finalized. This method is optional.
  virtual void
  processFunctionBeforeFrameIndicesReplaced(MachineFunction &MF,
                                            RegScavenger *RS = nullptr) const {}

  virtual unsigned getWinEHParentFrameOffset(const MachineFunction &MF) const {
    report_fatal_error("WinEH not implemented for this target");
  }

  /// This method is called during prolog/epilog code insertion to eliminate
  /// call frame setup and destroy pseudo instructions (but only if the Target
  /// is using them).  It is responsible for eliminating these instructions,
  /// replacing them with concrete instructions.  This method need only be
  /// implemented if using call frame setup/destroy pseudo instructions.
  /// Returns an iterator pointing to the instruction after the replaced one.
  virtual MachineBasicBlock::iterator
  eliminateCallFramePseudoInstr(MachineFunction &MF,
                                MachineBasicBlock &MBB,
                                MachineBasicBlock::iterator MI) const {
    llvm_unreachable("Call Frame Pseudo Instructions do not exist on this "
                     "target!");
  }


  /// Order the symbols in the local stack frame.
  /// The list of objects that we want to order is in \p objectsToAllocate as
  /// indices into the MachineFrameInfo. The array can be reordered in any way
  /// upon return. The contents of the array, however, may not be modified (i.e.
  /// only their order may be changed).
  /// By default, just maintain the original order.
  virtual void
  orderFrameObjects(const MachineFunction &MF,
                    SmallVectorImpl<int> &objectsToAllocate) const {
  }

  /// Check whether or not the given \p MBB can be used as a prologue
  /// for the target.
  /// The prologue will be inserted first in this basic block.
  /// This method is used by the shrink-wrapping pass to decide if
  /// \p MBB will be correctly handled by the target.
  /// As soon as the target enable shrink-wrapping without overriding
  /// this method, we assume that each basic block is a valid
  /// prologue.
  virtual bool canUseAsPrologue(const MachineBasicBlock &MBB) const {
    return true;
  }

  /// Check whether or not the given \p MBB can be used as a epilogue
  /// for the target.
  /// The epilogue will be inserted before the first terminator of that block.
  /// This method is used by the shrink-wrapping pass to decide if
  /// \p MBB will be correctly handled by the target.
  /// As soon as the target enable shrink-wrapping without overriding
  /// this method, we assume that each basic block is a valid
  /// epilogue.
  virtual bool canUseAsEpilogue(const MachineBasicBlock &MBB) const {
    return true;
  }

  /// Returns the StackID that scalable vectors should be associated with.
  virtual TargetStackID::Value getStackIDForScalableVectors() const {
    return TargetStackID::Default;
  }

  virtual bool isSupportedStackID(TargetStackID::Value ID) const {
    switch (ID) {
    default:
      return false;
    case TargetStackID::Default:
    case TargetStackID::NoAlloc:
      return true;
    }
  }

  /// Check if given function is safe for not having callee saved registers.
  /// This is used when interprocedural register allocation is enabled.
  static bool isSafeForNoCSROpt(const Function &F);

  /// Check if the no-CSR optimisation is profitable for the given function.
  virtual bool isProfitableForNoCSROpt(const Function &F) const {
    return true;
  }

  /// Return initial CFA offset value i.e. the one valid at the beginning of the
  /// function (before any stack operations).
  virtual int getInitialCFAOffset(const MachineFunction &MF) const;

  /// Return initial CFA register value i.e. the one valid at the beginning of
  /// the function (before any stack operations).
  virtual Register getInitialCFARegister(const MachineFunction &MF) const;

  /// Return the frame base information to be encoded in the DWARF subprogram
  /// debug info.
  virtual DwarfFrameBase getDwarfFrameBase(const MachineFunction &MF) const;
};

} // End llvm namespace

#endif
PKhwFZ��Le++CodeGen/ScheduleDFS.hnu�[���//===- ScheduleDFS.h - ILP metric for ScheduleDAGInstrs ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Definition of an ILP metric for machine level instruction scheduling.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_SCHEDULEDFS_H
#define LLVM_CODEGEN_SCHEDULEDFS_H

#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/ScheduleDAG.h"
#include <cassert>
#include <cstdint>
#include <vector>

namespace llvm {

template <typename T> class ArrayRef;
class raw_ostream;

/// Represent the ILP of the subDAG rooted at a DAG node.
///
/// ILPValues summarize the DAG subtree rooted at each node. ILPValues are
/// valid for all nodes regardless of their subtree membership.
///
/// When computed using bottom-up DFS, this metric assumes that the DAG is a
/// forest of trees with roots at the bottom of the schedule branching upward.
struct ILPValue {
  unsigned InstrCount;
  /// Length may either correspond to depth or height, depending on direction,
  /// and cycles or nodes depending on context.
  unsigned Length;

  ILPValue(unsigned count, unsigned length):
    InstrCount(count), Length(length) {}

  // Order by the ILP metric's value.
  bool operator<(ILPValue RHS) const {
    return (uint64_t)InstrCount * RHS.Length
      < (uint64_t)Length * RHS.InstrCount;
  }
  bool operator>(ILPValue RHS) const {
    return RHS < *this;
  }
  bool operator<=(ILPValue RHS) const {
    return (uint64_t)InstrCount * RHS.Length
      <= (uint64_t)Length * RHS.InstrCount;
  }
  bool operator>=(ILPValue RHS) const {
    return RHS <= *this;
  }

  void print(raw_ostream &OS) const;

  void dump() const;
};

/// Compute the values of each DAG node for various metrics during DFS.
class SchedDFSResult {
  friend class SchedDFSImpl;

  static const unsigned InvalidSubtreeID = ~0u;

  /// Per-SUnit data computed during DFS for various metrics.
  ///
  /// A node's SubtreeID is set to itself when it is visited to indicate that it
  /// is the root of a subtree. Later it is set to its parent to indicate an
  /// interior node. Finally, it is set to a representative subtree ID during
  /// finalization.
  struct NodeData {
    unsigned InstrCount = 0;
    unsigned SubtreeID = InvalidSubtreeID;

    NodeData() = default;
  };

  /// Per-Subtree data computed during DFS.
  struct TreeData {
    unsigned ParentTreeID = InvalidSubtreeID;
    unsigned SubInstrCount = 0;

    TreeData() = default;
  };

  /// Record a connection between subtrees and the connection level.
  struct Connection {
    unsigned TreeID;
    unsigned Level;

    Connection(unsigned tree, unsigned level): TreeID(tree), Level(level) {}
  };

  bool IsBottomUp;
  unsigned SubtreeLimit;
  /// DFS results for each SUnit in this DAG.
  std::vector<NodeData> DFSNodeData;

  // Store per-tree data indexed on tree ID,
  SmallVector<TreeData, 16> DFSTreeData;

  // For each subtree discovered during DFS, record its connections to other
  // subtrees.
  std::vector<SmallVector<Connection, 4>> SubtreeConnections;

  /// Cache the current connection level of each subtree.
  /// This mutable array is updated during scheduling.
  std::vector<unsigned> SubtreeConnectLevels;

public:
  SchedDFSResult(bool IsBU, unsigned lim)
    : IsBottomUp(IsBU), SubtreeLimit(lim) {}

  /// Get the node cutoff before subtrees are considered significant.
  unsigned getSubtreeLimit() const { return SubtreeLimit; }

  /// Return true if this DFSResult is uninitialized.
  ///
  /// resize() initializes DFSResult, while compute() populates it.
  bool empty() const { return DFSNodeData.empty(); }

  /// Clear the results.
  void clear() {
    DFSNodeData.clear();
    DFSTreeData.clear();
    SubtreeConnections.clear();
    SubtreeConnectLevels.clear();
  }

  /// Initialize the result data with the size of the DAG.
  void resize(unsigned NumSUnits) {
    DFSNodeData.resize(NumSUnits);
  }

  /// Compute various metrics for the DAG with given roots.
  void compute(ArrayRef<SUnit> SUnits);

  /// Get the number of instructions in the given subtree and its
  /// children.
  unsigned getNumInstrs(const SUnit *SU) const {
    return DFSNodeData[SU->NodeNum].InstrCount;
  }

  /// Get the number of instructions in the given subtree not including
  /// children.
  unsigned getNumSubInstrs(unsigned SubtreeID) const {
    return DFSTreeData[SubtreeID].SubInstrCount;
  }

  /// Get the ILP value for a DAG node.
  ///
  /// A leaf node has an ILP of 1/1.
  ILPValue getILP(const SUnit *SU) const {
    return ILPValue(DFSNodeData[SU->NodeNum].InstrCount, 1 + SU->getDepth());
  }

  /// The number of subtrees detected in this DAG.
  unsigned getNumSubtrees() const { return SubtreeConnectLevels.size(); }

  /// Get the ID of the subtree the given DAG node belongs to.
  ///
  /// For convenience, if DFSResults have not been computed yet, give everything
  /// tree ID 0.
  unsigned getSubtreeID(const SUnit *SU) const {
    if (empty())
      return 0;
    assert(SU->NodeNum < DFSNodeData.size() &&  "New Node");
    return DFSNodeData[SU->NodeNum].SubtreeID;
  }

  /// Get the connection level of a subtree.
  ///
  /// For bottom-up trees, the connection level is the latency depth (in cycles)
  /// of the deepest connection to another subtree.
  unsigned getSubtreeLevel(unsigned SubtreeID) const {
    return SubtreeConnectLevels[SubtreeID];
  }

  /// Scheduler callback to update SubtreeConnectLevels when a tree is
  /// initially scheduled.
  void scheduleTree(unsigned SubtreeID);
};

raw_ostream &operator<<(raw_ostream &OS, const ILPValue &Val);

} // end namespace llvm

#endif // LLVM_CODEGEN_SCHEDULEDFS_H
PKhwFZ�+��B�BCodeGen/RegAllocPBQP.hnu�[���//===- RegAllocPBQP.h -------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the PBQPBuilder interface, for classes which build PBQP
// instances to represent register allocation problems, and the RegAllocPBQP
// interface.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_REGALLOCPBQP_H
#define LLVM_CODEGEN_REGALLOCPBQP_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/Hashing.h"
#include "llvm/CodeGen/PBQP/CostAllocator.h"
#include "llvm/CodeGen/PBQP/Graph.h"
#include "llvm/CodeGen/PBQP/Math.h"
#include "llvm/CodeGen/PBQP/ReductionRules.h"
#include "llvm/CodeGen/PBQP/Solution.h"
#include "llvm/CodeGen/Register.h"
#include "llvm/MC/MCRegister.h"
#include "llvm/Support/ErrorHandling.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <limits>
#include <memory>
#include <set>
#include <vector>

namespace llvm {

class FunctionPass;
class LiveIntervals;
class MachineBlockFrequencyInfo;
class MachineFunction;
class raw_ostream;

namespace PBQP {
namespace RegAlloc {

/// Spill option index.
inline unsigned getSpillOptionIdx() { return 0; }

/// Metadata to speed allocatability test.
///
/// Keeps track of the number of infinities in each row and column.
class MatrixMetadata {
public:
  MatrixMetadata(const Matrix& M)
    : UnsafeRows(new bool[M.getRows() - 1]()),
      UnsafeCols(new bool[M.getCols() - 1]()) {
    unsigned* ColCounts = new unsigned[M.getCols() - 1]();

    for (unsigned i = 1; i < M.getRows(); ++i) {
      unsigned RowCount = 0;
      for (unsigned j = 1; j < M.getCols(); ++j) {
        if (M[i][j] == std::numeric_limits<PBQPNum>::infinity()) {
          ++RowCount;
          ++ColCounts[j - 1];
          UnsafeRows[i - 1] = true;
          UnsafeCols[j - 1] = true;
        }
      }
      WorstRow = std::max(WorstRow, RowCount);
    }
    unsigned WorstColCountForCurRow =
      *std::max_element(ColCounts, ColCounts + M.getCols() - 1);
    WorstCol = std::max(WorstCol, WorstColCountForCurRow);
    delete[] ColCounts;
  }

  MatrixMetadata(const MatrixMetadata &) = delete;
  MatrixMetadata &operator=(const MatrixMetadata &) = delete;

  unsigned getWorstRow() const { return WorstRow; }
  unsigned getWorstCol() const { return WorstCol; }
  const bool* getUnsafeRows() const { return UnsafeRows.get(); }
  const bool* getUnsafeCols() const { return UnsafeCols.get(); }

private:
  unsigned WorstRow = 0;
  unsigned WorstCol = 0;
  std::unique_ptr<bool[]> UnsafeRows;
  std::unique_ptr<bool[]> UnsafeCols;
};

/// Holds a vector of the allowed physical regs for a vreg.
class AllowedRegVector {
  friend hash_code hash_value(const AllowedRegVector &);

public:
  AllowedRegVector() = default;
  AllowedRegVector(AllowedRegVector &&) = default;

  AllowedRegVector(const std::vector<MCRegister> &OptVec)
      : NumOpts(OptVec.size()), Opts(new MCRegister[NumOpts]) {
    std::copy(OptVec.begin(), OptVec.end(), Opts.get());
  }

  unsigned size() const { return NumOpts; }
  MCRegister operator[](size_t I) const { return Opts[I]; }

  bool operator==(const AllowedRegVector &Other) const {
    if (NumOpts != Other.NumOpts)
      return false;
    return std::equal(Opts.get(), Opts.get() + NumOpts, Other.Opts.get());
  }

  bool operator!=(const AllowedRegVector &Other) const {
    return !(*this == Other);
  }

private:
  unsigned NumOpts = 0;
  std::unique_ptr<MCRegister[]> Opts;
};

inline hash_code hash_value(const AllowedRegVector &OptRegs) {
  MCRegister *OStart = OptRegs.Opts.get();
  MCRegister *OEnd = OptRegs.Opts.get() + OptRegs.NumOpts;
  return hash_combine(OptRegs.NumOpts,
                      hash_combine_range(OStart, OEnd));
}

/// Holds graph-level metadata relevant to PBQP RA problems.
class GraphMetadata {
private:
  using AllowedRegVecPool = ValuePool<AllowedRegVector>;

public:
  using AllowedRegVecRef = AllowedRegVecPool::PoolRef;

  GraphMetadata(MachineFunction &MF,
                LiveIntervals &LIS,
                MachineBlockFrequencyInfo &MBFI)
    : MF(MF), LIS(LIS), MBFI(MBFI) {}

  MachineFunction &MF;
  LiveIntervals &LIS;
  MachineBlockFrequencyInfo &MBFI;

  void setNodeIdForVReg(Register VReg, GraphBase::NodeId NId) {
    VRegToNodeId[VReg.id()] = NId;
  }

  GraphBase::NodeId getNodeIdForVReg(Register VReg) const {
    auto VRegItr = VRegToNodeId.find(VReg);
    if (VRegItr == VRegToNodeId.end())
      return GraphBase::invalidNodeId();
    return VRegItr->second;
  }

  AllowedRegVecRef getAllowedRegs(AllowedRegVector Allowed) {
    return AllowedRegVecs.getValue(std::move(Allowed));
  }

private:
  DenseMap<Register, GraphBase::NodeId> VRegToNodeId;
  AllowedRegVecPool AllowedRegVecs;
};

/// Holds solver state and other metadata relevant to each PBQP RA node.
class NodeMetadata {
public:
  using AllowedRegVector = RegAlloc::AllowedRegVector;

  // The node's reduction state. The order in this enum is important,
  // as it is assumed nodes can only progress up (i.e. towards being
  // optimally reducible) when reducing the graph.
  using ReductionState = enum {
    Unprocessed,
    NotProvablyAllocatable,
    ConservativelyAllocatable,
    OptimallyReducible
  };

  NodeMetadata() = default;

  NodeMetadata(const NodeMetadata &Other)
      : RS(Other.RS), NumOpts(Other.NumOpts), DeniedOpts(Other.DeniedOpts),
        OptUnsafeEdges(new unsigned[NumOpts]), VReg(Other.VReg),
        AllowedRegs(Other.AllowedRegs)
#if LLVM_ENABLE_ABI_BREAKING_CHECKS
        ,
        everConservativelyAllocatable(Other.everConservativelyAllocatable)
#endif
  {
    if (NumOpts > 0) {
      std::copy(&Other.OptUnsafeEdges[0], &Other.OptUnsafeEdges[NumOpts],
                &OptUnsafeEdges[0]);
    }
  }

  NodeMetadata(NodeMetadata &&) = default;
  NodeMetadata& operator=(NodeMetadata &&) = default;

  void setVReg(Register VReg) { this->VReg = VReg; }
  Register getVReg() const { return VReg; }

  void setAllowedRegs(GraphMetadata::AllowedRegVecRef AllowedRegs) {
    this->AllowedRegs = std::move(AllowedRegs);
  }
  const AllowedRegVector& getAllowedRegs() const { return *AllowedRegs; }

  void setup(const Vector& Costs) {
    NumOpts = Costs.getLength() - 1;
    OptUnsafeEdges = std::unique_ptr<unsigned[]>(new unsigned[NumOpts]());
  }

  ReductionState getReductionState() const { return RS; }
  void setReductionState(ReductionState RS) {
    assert(RS >= this->RS && "A node's reduction state can not be downgraded");
    this->RS = RS;

#if LLVM_ENABLE_ABI_BREAKING_CHECKS
    // Remember this state to assert later that a non-infinite register
    // option was available.
    if (RS == ConservativelyAllocatable)
      everConservativelyAllocatable = true;
#endif
  }

  void handleAddEdge(const MatrixMetadata& MD, bool Transpose) {
    DeniedOpts += Transpose ? MD.getWorstRow() : MD.getWorstCol();
    const bool* UnsafeOpts =
      Transpose ? MD.getUnsafeCols() : MD.getUnsafeRows();
    for (unsigned i = 0; i < NumOpts; ++i)
      OptUnsafeEdges[i] += UnsafeOpts[i];
  }

  void handleRemoveEdge(const MatrixMetadata& MD, bool Transpose) {
    DeniedOpts -= Transpose ? MD.getWorstRow() : MD.getWorstCol();
    const bool* UnsafeOpts =
      Transpose ? MD.getUnsafeCols() : MD.getUnsafeRows();
    for (unsigned i = 0; i < NumOpts; ++i)
      OptUnsafeEdges[i] -= UnsafeOpts[i];
  }

  bool isConservativelyAllocatable() const {
    return (DeniedOpts < NumOpts) ||
      (std::find(&OptUnsafeEdges[0], &OptUnsafeEdges[NumOpts], 0) !=
       &OptUnsafeEdges[NumOpts]);
  }

#if LLVM_ENABLE_ABI_BREAKING_CHECKS
  bool wasConservativelyAllocatable() const {
    return everConservativelyAllocatable;
  }
#endif

private:
  ReductionState RS = Unprocessed;
  unsigned NumOpts = 0;
  unsigned DeniedOpts = 0;
  std::unique_ptr<unsigned[]> OptUnsafeEdges;
  Register VReg;
  GraphMetadata::AllowedRegVecRef AllowedRegs;

#if LLVM_ENABLE_ABI_BREAKING_CHECKS
  bool everConservativelyAllocatable = false;
#endif
};

class RegAllocSolverImpl {
private:
  using RAMatrix = MDMatrix<MatrixMetadata>;

public:
  using RawVector = PBQP::Vector;
  using RawMatrix = PBQP::Matrix;
  using Vector = PBQP::Vector;
  using Matrix = RAMatrix;
  using CostAllocator = PBQP::PoolCostAllocator<Vector, Matrix>;

  using NodeId = GraphBase::NodeId;
  using EdgeId = GraphBase::EdgeId;

  using NodeMetadata = RegAlloc::NodeMetadata;
  struct EdgeMetadata {};
  using GraphMetadata = RegAlloc::GraphMetadata;

  using Graph = PBQP::Graph<RegAllocSolverImpl>;

  RegAllocSolverImpl(Graph &G) : G(G) {}

  Solution solve() {
    G.setSolver(*this);
    Solution S;
    setup();
    S = backpropagate(G, reduce());
    G.unsetSolver();
    return S;
  }

  void handleAddNode(NodeId NId) {
    assert(G.getNodeCosts(NId).getLength() > 1 &&
           "PBQP Graph should not contain single or zero-option nodes");
    G.getNodeMetadata(NId).setup(G.getNodeCosts(NId));
  }

  void handleRemoveNode(NodeId NId) {}
  void handleSetNodeCosts(NodeId NId, const Vector& newCosts) {}

  void handleAddEdge(EdgeId EId) {
    handleReconnectEdge(EId, G.getEdgeNode1Id(EId));
    handleReconnectEdge(EId, G.getEdgeNode2Id(EId));
  }

  void handleDisconnectEdge(EdgeId EId, NodeId NId) {
    NodeMetadata& NMd = G.getNodeMetadata(NId);
    const MatrixMetadata& MMd = G.getEdgeCosts(EId).getMetadata();
    NMd.handleRemoveEdge(MMd, NId == G.getEdgeNode2Id(EId));
    promote(NId, NMd);
  }

  void handleReconnectEdge(EdgeId EId, NodeId NId) {
    NodeMetadata& NMd = G.getNodeMetadata(NId);
    const MatrixMetadata& MMd = G.getEdgeCosts(EId).getMetadata();
    NMd.handleAddEdge(MMd, NId == G.getEdgeNode2Id(EId));
  }

  void handleUpdateCosts(EdgeId EId, const Matrix& NewCosts) {
    NodeId N1Id = G.getEdgeNode1Id(EId);
    NodeId N2Id = G.getEdgeNode2Id(EId);
    NodeMetadata& N1Md = G.getNodeMetadata(N1Id);
    NodeMetadata& N2Md = G.getNodeMetadata(N2Id);
    bool Transpose = N1Id != G.getEdgeNode1Id(EId);

    // Metadata are computed incrementally. First, update them
    // by removing the old cost.
    const MatrixMetadata& OldMMd = G.getEdgeCosts(EId).getMetadata();
    N1Md.handleRemoveEdge(OldMMd, Transpose);
    N2Md.handleRemoveEdge(OldMMd, !Transpose);

    // And update now the metadata with the new cost.
    const MatrixMetadata& MMd = NewCosts.getMetadata();
    N1Md.handleAddEdge(MMd, Transpose);
    N2Md.handleAddEdge(MMd, !Transpose);

    // As the metadata may have changed with the update, the nodes may have
    // become ConservativelyAllocatable or OptimallyReducible.
    promote(N1Id, N1Md);
    promote(N2Id, N2Md);
  }

private:
  void promote(NodeId NId, NodeMetadata& NMd) {
    if (G.getNodeDegree(NId) == 3) {
      // This node is becoming optimally reducible.
      moveToOptimallyReducibleNodes(NId);
    } else if (NMd.getReductionState() ==
               NodeMetadata::NotProvablyAllocatable &&
               NMd.isConservativelyAllocatable()) {
      // This node just became conservatively allocatable.
      moveToConservativelyAllocatableNodes(NId);
    }
  }

  void removeFromCurrentSet(NodeId NId) {
    switch (G.getNodeMetadata(NId).getReductionState()) {
    case NodeMetadata::Unprocessed: break;
    case NodeMetadata::OptimallyReducible:
      assert(OptimallyReducibleNodes.find(NId) !=
             OptimallyReducibleNodes.end() &&
             "Node not in optimally reducible set.");
      OptimallyReducibleNodes.erase(NId);
      break;
    case NodeMetadata::ConservativelyAllocatable:
      assert(ConservativelyAllocatableNodes.find(NId) !=
             ConservativelyAllocatableNodes.end() &&
             "Node not in conservatively allocatable set.");
      ConservativelyAllocatableNodes.erase(NId);
      break;
    case NodeMetadata::NotProvablyAllocatable:
      assert(NotProvablyAllocatableNodes.find(NId) !=
             NotProvablyAllocatableNodes.end() &&
             "Node not in not-provably-allocatable set.");
      NotProvablyAllocatableNodes.erase(NId);
      break;
    }
  }

  void moveToOptimallyReducibleNodes(NodeId NId) {
    removeFromCurrentSet(NId);
    OptimallyReducibleNodes.insert(NId);
    G.getNodeMetadata(NId).setReductionState(
      NodeMetadata::OptimallyReducible);
  }

  void moveToConservativelyAllocatableNodes(NodeId NId) {
    removeFromCurrentSet(NId);
    ConservativelyAllocatableNodes.insert(NId);
    G.getNodeMetadata(NId).setReductionState(
      NodeMetadata::ConservativelyAllocatable);
  }

  void moveToNotProvablyAllocatableNodes(NodeId NId) {
    removeFromCurrentSet(NId);
    NotProvablyAllocatableNodes.insert(NId);
    G.getNodeMetadata(NId).setReductionState(
      NodeMetadata::NotProvablyAllocatable);
  }

  void setup() {
    // Set up worklists.
    for (auto NId : G.nodeIds()) {
      if (G.getNodeDegree(NId) < 3)
        moveToOptimallyReducibleNodes(NId);
      else if (G.getNodeMetadata(NId).isConservativelyAllocatable())
        moveToConservativelyAllocatableNodes(NId);
      else
        moveToNotProvablyAllocatableNodes(NId);
    }
  }

  // Compute a reduction order for the graph by iteratively applying PBQP
  // reduction rules. Locally optimal rules are applied whenever possible (R0,
  // R1, R2). If no locally-optimal rules apply then any conservatively
  // allocatable node is reduced. Finally, if no conservatively allocatable
  // node exists then the node with the lowest spill-cost:degree ratio is
  // selected.
  std::vector<GraphBase::NodeId> reduce() {
    assert(!G.empty() && "Cannot reduce empty graph.");

    using NodeId = GraphBase::NodeId;
    std::vector<NodeId> NodeStack;

    // Consume worklists.
    while (true) {
      if (!OptimallyReducibleNodes.empty()) {
        NodeSet::iterator NItr = OptimallyReducibleNodes.begin();
        NodeId NId = *NItr;
        OptimallyReducibleNodes.erase(NItr);
        NodeStack.push_back(NId);
        switch (G.getNodeDegree(NId)) {
        case 0:
          break;
        case 1:
          applyR1(G, NId);
          break;
        case 2:
          applyR2(G, NId);
          break;
        default: llvm_unreachable("Not an optimally reducible node.");
        }
      } else if (!ConservativelyAllocatableNodes.empty()) {
        // Conservatively allocatable nodes will never spill. For now just
        // take the first node in the set and push it on the stack. When we
        // start optimizing more heavily for register preferencing, it may
        // would be better to push nodes with lower 'expected' or worst-case
        // register costs first (since early nodes are the most
        // constrained).
        NodeSet::iterator NItr = ConservativelyAllocatableNodes.begin();
        NodeId NId = *NItr;
        ConservativelyAllocatableNodes.erase(NItr);
        NodeStack.push_back(NId);
        G.disconnectAllNeighborsFromNode(NId);
      } else if (!NotProvablyAllocatableNodes.empty()) {
        NodeSet::iterator NItr =
          std::min_element(NotProvablyAllocatableNodes.begin(),
                           NotProvablyAllocatableNodes.end(),
                           SpillCostComparator(G));
        NodeId NId = *NItr;
        NotProvablyAllocatableNodes.erase(NItr);
        NodeStack.push_back(NId);
        G.disconnectAllNeighborsFromNode(NId);
      } else
        break;
    }

    return NodeStack;
  }

  class SpillCostComparator {
  public:
    SpillCostComparator(const Graph& G) : G(G) {}

    bool operator()(NodeId N1Id, NodeId N2Id) {
      PBQPNum N1SC = G.getNodeCosts(N1Id)[0];
      PBQPNum N2SC = G.getNodeCosts(N2Id)[0];
      if (N1SC == N2SC)
        return G.getNodeDegree(N1Id) < G.getNodeDegree(N2Id);
      return N1SC < N2SC;
    }

  private:
    const Graph& G;
  };

  Graph& G;
  using NodeSet = std::set<NodeId>;
  NodeSet OptimallyReducibleNodes;
  NodeSet ConservativelyAllocatableNodes;
  NodeSet NotProvablyAllocatableNodes;
};

class PBQPRAGraph : public PBQP::Graph<RegAllocSolverImpl> {
private:
  using BaseT = PBQP::Graph<RegAllocSolverImpl>;

public:
  PBQPRAGraph(GraphMetadata Metadata) : BaseT(std::move(Metadata)) {}

  /// Dump this graph to dbgs().
  void dump() const;

  /// Dump this graph to an output stream.
  /// @param OS Output stream to print on.
  void dump(raw_ostream &OS) const;

  /// Print a representation of this graph in DOT format.
  /// @param OS Output stream to print on.
  void printDot(raw_ostream &OS) const;
};

inline Solution solve(PBQPRAGraph& G) {
  if (G.empty())
    return Solution();
  RegAllocSolverImpl RegAllocSolver(G);
  return RegAllocSolver.solve();
}

} // end namespace RegAlloc
} // end namespace PBQP

/// Create a PBQP register allocator instance.
FunctionPass *
createPBQPRegisterAllocator(char *customPassID = nullptr);

} // end namespace llvm

#endif // LLVM_CODEGEN_REGALLOCPBQP_H
PKhwFZ@��ڝ&�&CodeGen/SwitchLoweringUtils.hnu�[���//===- SwitchLoweringUtils.h - Switch Lowering ------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_SWITCHLOWERINGUTILS_H
#define LLVM_CODEGEN_SWITCHLOWERINGUTILS_H

#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/ISDOpcodes.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/Support/BranchProbability.h"
#include <vector>

namespace llvm {

class BlockFrequencyInfo;
class ConstantInt;
class FunctionLoweringInfo;
class MachineBasicBlock;
class ProfileSummaryInfo;
class TargetLowering;
class TargetMachine;

namespace SwitchCG {

enum CaseClusterKind {
  /// A cluster of adjacent case labels with the same destination, or just one
  /// case.
  CC_Range,
  /// A cluster of cases suitable for jump table lowering.
  CC_JumpTable,
  /// A cluster of cases suitable for bit test lowering.
  CC_BitTests
};

/// A cluster of case labels.
struct CaseCluster {
  CaseClusterKind Kind;
  const ConstantInt *Low, *High;
  union {
    MachineBasicBlock *MBB;
    unsigned JTCasesIndex;
    unsigned BTCasesIndex;
  };
  BranchProbability Prob;

  static CaseCluster range(const ConstantInt *Low, const ConstantInt *High,
                           MachineBasicBlock *MBB, BranchProbability Prob) {
    CaseCluster C;
    C.Kind = CC_Range;
    C.Low = Low;
    C.High = High;
    C.MBB = MBB;
    C.Prob = Prob;
    return C;
  }

  static CaseCluster jumpTable(const ConstantInt *Low, const ConstantInt *High,
                               unsigned JTCasesIndex, BranchProbability Prob) {
    CaseCluster C;
    C.Kind = CC_JumpTable;
    C.Low = Low;
    C.High = High;
    C.JTCasesIndex = JTCasesIndex;
    C.Prob = Prob;
    return C;
  }

  static CaseCluster bitTests(const ConstantInt *Low, const ConstantInt *High,
                              unsigned BTCasesIndex, BranchProbability Prob) {
    CaseCluster C;
    C.Kind = CC_BitTests;
    C.Low = Low;
    C.High = High;
    C.BTCasesIndex = BTCasesIndex;
    C.Prob = Prob;
    return C;
  }
};

using CaseClusterVector = std::vector<CaseCluster>;
using CaseClusterIt = CaseClusterVector::iterator;

/// Sort Clusters and merge adjacent cases.
void sortAndRangeify(CaseClusterVector &Clusters);

struct CaseBits {
  uint64_t Mask = 0;
  MachineBasicBlock *BB = nullptr;
  unsigned Bits = 0;
  BranchProbability ExtraProb;

  CaseBits() = default;
  CaseBits(uint64_t mask, MachineBasicBlock *bb, unsigned bits,
           BranchProbability Prob)
      : Mask(mask), BB(bb), Bits(bits), ExtraProb(Prob) {}
};

using CaseBitsVector = std::vector<CaseBits>;

/// This structure is used to communicate between SelectionDAGBuilder and
/// SDISel for the code generation of additional basic blocks needed by
/// multi-case switch statements.
struct CaseBlock {
  // For the GISel interface.
  struct PredInfoPair {
    CmpInst::Predicate Pred;
    // Set when no comparison should be emitted.
    bool NoCmp;
  };
  union {
    // The condition code to use for the case block's setcc node.
    // Besides the integer condition codes, this can also be SETTRUE, in which
    // case no comparison gets emitted.
    ISD::CondCode CC;
    struct PredInfoPair PredInfo;
  };

  // The LHS/MHS/RHS of the comparison to emit.
  // Emit by default LHS op RHS. MHS is used for range comparisons:
  // If MHS is not null: (LHS <= MHS) and (MHS <= RHS).
  const Value *CmpLHS, *CmpMHS, *CmpRHS;

  // The block to branch to if the setcc is true/false.
  MachineBasicBlock *TrueBB, *FalseBB;

  // The block into which to emit the code for the setcc and branches.
  MachineBasicBlock *ThisBB;

  /// The debug location of the instruction this CaseBlock was
  /// produced from.
  SDLoc DL;
  DebugLoc DbgLoc;

  // Branch weights.
  BranchProbability TrueProb, FalseProb;

  // Constructor for SelectionDAG.
  CaseBlock(ISD::CondCode cc, const Value *cmplhs, const Value *cmprhs,
            const Value *cmpmiddle, MachineBasicBlock *truebb,
            MachineBasicBlock *falsebb, MachineBasicBlock *me, SDLoc dl,
            BranchProbability trueprob = BranchProbability::getUnknown(),
            BranchProbability falseprob = BranchProbability::getUnknown())
      : CC(cc), CmpLHS(cmplhs), CmpMHS(cmpmiddle), CmpRHS(cmprhs),
        TrueBB(truebb), FalseBB(falsebb), ThisBB(me), DL(dl),
        TrueProb(trueprob), FalseProb(falseprob) {}

  // Constructor for GISel.
  CaseBlock(CmpInst::Predicate pred, bool nocmp, const Value *cmplhs,
            const Value *cmprhs, const Value *cmpmiddle,
            MachineBasicBlock *truebb, MachineBasicBlock *falsebb,
            MachineBasicBlock *me, DebugLoc dl,
            BranchProbability trueprob = BranchProbability::getUnknown(),
            BranchProbability falseprob = BranchProbability::getUnknown())
      : PredInfo({pred, nocmp}), CmpLHS(cmplhs), CmpMHS(cmpmiddle),
        CmpRHS(cmprhs), TrueBB(truebb), FalseBB(falsebb), ThisBB(me),
        DbgLoc(dl), TrueProb(trueprob), FalseProb(falseprob) {}
};

struct JumpTable {
  /// The virtual register containing the index of the jump table entry
  /// to jump to.
  unsigned Reg;
  /// The JumpTableIndex for this jump table in the function.
  unsigned JTI;
  /// The MBB into which to emit the code for the indirect jump.
  MachineBasicBlock *MBB;
  /// The MBB of the default bb, which is a successor of the range
  /// check MBB.  This is when updating PHI nodes in successors.
  MachineBasicBlock *Default;

  JumpTable(unsigned R, unsigned J, MachineBasicBlock *M, MachineBasicBlock *D)
      : Reg(R), JTI(J), MBB(M), Default(D) {}
};
struct JumpTableHeader {
  APInt First;
  APInt Last;
  const Value *SValue;
  MachineBasicBlock *HeaderBB;
  bool Emitted;
  bool FallthroughUnreachable = false;

  JumpTableHeader(APInt F, APInt L, const Value *SV, MachineBasicBlock *H,
                  bool E = false)
      : First(std::move(F)), Last(std::move(L)), SValue(SV), HeaderBB(H),
        Emitted(E) {}
};
using JumpTableBlock = std::pair<JumpTableHeader, JumpTable>;

struct BitTestCase {
  uint64_t Mask;
  MachineBasicBlock *ThisBB;
  MachineBasicBlock *TargetBB;
  BranchProbability ExtraProb;

  BitTestCase(uint64_t M, MachineBasicBlock *T, MachineBasicBlock *Tr,
              BranchProbability Prob)
      : Mask(M), ThisBB(T), TargetBB(Tr), ExtraProb(Prob) {}
};

using BitTestInfo = SmallVector<BitTestCase, 3>;

struct BitTestBlock {
  APInt First;
  APInt Range;
  const Value *SValue;
  unsigned Reg;
  MVT RegVT;
  bool Emitted;
  bool ContiguousRange;
  MachineBasicBlock *Parent;
  MachineBasicBlock *Default;
  BitTestInfo Cases;
  BranchProbability Prob;
  BranchProbability DefaultProb;
  bool FallthroughUnreachable = false;

  BitTestBlock(APInt F, APInt R, const Value *SV, unsigned Rg, MVT RgVT, bool E,
               bool CR, MachineBasicBlock *P, MachineBasicBlock *D,
               BitTestInfo C, BranchProbability Pr)
      : First(std::move(F)), Range(std::move(R)), SValue(SV), Reg(Rg),
        RegVT(RgVT), Emitted(E), ContiguousRange(CR), Parent(P), Default(D),
        Cases(std::move(C)), Prob(Pr) {}
};

/// Return the range of values within a range.
uint64_t getJumpTableRange(const CaseClusterVector &Clusters, unsigned First,
                           unsigned Last);

/// Return the number of cases within a range.
uint64_t getJumpTableNumCases(const SmallVectorImpl<unsigned> &TotalCases,
                              unsigned First, unsigned Last);

struct SwitchWorkListItem {
  MachineBasicBlock *MBB = nullptr;
  CaseClusterIt FirstCluster;
  CaseClusterIt LastCluster;
  const ConstantInt *GE = nullptr;
  const ConstantInt *LT = nullptr;
  BranchProbability DefaultProb;
};
using SwitchWorkList = SmallVector<SwitchWorkListItem, 4>;

class SwitchLowering {
public:
  SwitchLowering(FunctionLoweringInfo &funcinfo) : FuncInfo(funcinfo) {}

  void init(const TargetLowering &tli, const TargetMachine &tm,
            const DataLayout &dl) {
    TLI = &tli;
    TM = &tm;
    DL = &dl;
  }

  /// Vector of CaseBlock structures used to communicate SwitchInst code
  /// generation information.
  std::vector<CaseBlock> SwitchCases;

  /// Vector of JumpTable structures used to communicate SwitchInst code
  /// generation information.
  std::vector<JumpTableBlock> JTCases;

  /// Vector of BitTestBlock structures used to communicate SwitchInst code
  /// generation information.
  std::vector<BitTestBlock> BitTestCases;

  void findJumpTables(CaseClusterVector &Clusters, const SwitchInst *SI,
                      MachineBasicBlock *DefaultMBB,
                      ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI);

  bool buildJumpTable(const CaseClusterVector &Clusters, unsigned First,
                      unsigned Last, const SwitchInst *SI,
                      MachineBasicBlock *DefaultMBB, CaseCluster &JTCluster);


  void findBitTestClusters(CaseClusterVector &Clusters, const SwitchInst *SI);

  /// Build a bit test cluster from Clusters[First..Last]. Returns false if it
  /// decides it's not a good idea.
  bool buildBitTests(CaseClusterVector &Clusters, unsigned First, unsigned Last,
                     const SwitchInst *SI, CaseCluster &BTCluster);

  virtual void addSuccessorWithProb(
      MachineBasicBlock *Src, MachineBasicBlock *Dst,
      BranchProbability Prob = BranchProbability::getUnknown()) = 0;

  virtual ~SwitchLowering() = default;

private:
  const TargetLowering *TLI = nullptr;
  const TargetMachine *TM = nullptr;
  const DataLayout *DL = nullptr;
  FunctionLoweringInfo &FuncInfo;
};

} // namespace SwitchCG
} // namespace llvm

#endif // LLVM_CODEGEN_SWITCHLOWERINGUTILS_H
PKhwFZ�mm CodeGen/MachineCombinerPattern.hnu�[���//===-- llvm/CodeGen/MachineCombinerPattern.h - Instruction pattern supported by
// combiner  ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines instruction pattern supported by combiner
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_MACHINECOMBINERPATTERN_H
#define LLVM_CODEGEN_MACHINECOMBINERPATTERN_H

namespace llvm {

/// These are instruction patterns matched by the machine combiner pass.
enum class MachineCombinerPattern {
  // These are commutative variants for reassociating a computation chain. See
  // the comments before getMachineCombinerPatterns() in TargetInstrInfo.cpp.
  REASSOC_AX_BY,
  REASSOC_AX_YB,
  REASSOC_XA_BY,
  REASSOC_XA_YB,

  // These are patterns matched by the PowerPC to reassociate FMA chains.
  REASSOC_XY_AMM_BMM,
  REASSOC_XMM_AMM_BMM,

  // These are patterns matched by the PowerPC to reassociate FMA and FSUB to
  // reduce register pressure.
  REASSOC_XY_BCA,
  REASSOC_XY_BAC,

  // These are patterns used to reduce the length of dependence chain.
  SUBADD_OP1,
  SUBADD_OP2,

  // These are multiply-add patterns matched by the AArch64 machine combiner.
  MULADDW_OP1,
  MULADDW_OP2,
  MULSUBW_OP1,
  MULSUBW_OP2,
  MULADDWI_OP1,
  MULSUBWI_OP1,
  MULADDX_OP1,
  MULADDX_OP2,
  MULSUBX_OP1,
  MULSUBX_OP2,
  MULADDXI_OP1,
  MULSUBXI_OP1,
  // NEON integers vectors
  MULADDv8i8_OP1,
  MULADDv8i8_OP2,
  MULADDv16i8_OP1,
  MULADDv16i8_OP2,
  MULADDv4i16_OP1,
  MULADDv4i16_OP2,
  MULADDv8i16_OP1,
  MULADDv8i16_OP2,
  MULADDv2i32_OP1,
  MULADDv2i32_OP2,
  MULADDv4i32_OP1,
  MULADDv4i32_OP2,

  MULSUBv8i8_OP1,
  MULSUBv8i8_OP2,
  MULSUBv16i8_OP1,
  MULSUBv16i8_OP2,
  MULSUBv4i16_OP1,
  MULSUBv4i16_OP2,
  MULSUBv8i16_OP1,
  MULSUBv8i16_OP2,
  MULSUBv2i32_OP1,
  MULSUBv2i32_OP2,
  MULSUBv4i32_OP1,
  MULSUBv4i32_OP2,

  MULADDv4i16_indexed_OP1,
  MULADDv4i16_indexed_OP2,
  MULADDv8i16_indexed_OP1,
  MULADDv8i16_indexed_OP2,
  MULADDv2i32_indexed_OP1,
  MULADDv2i32_indexed_OP2,
  MULADDv4i32_indexed_OP1,
  MULADDv4i32_indexed_OP2,

  MULSUBv4i16_indexed_OP1,
  MULSUBv4i16_indexed_OP2,
  MULSUBv8i16_indexed_OP1,
  MULSUBv8i16_indexed_OP2,
  MULSUBv2i32_indexed_OP1,
  MULSUBv2i32_indexed_OP2,
  MULSUBv4i32_indexed_OP1,
  MULSUBv4i32_indexed_OP2,

  // Floating Point
  FMULADDH_OP1,
  FMULADDH_OP2,
  FMULSUBH_OP1,
  FMULSUBH_OP2,
  FMULADDS_OP1,
  FMULADDS_OP2,
  FMULSUBS_OP1,
  FMULSUBS_OP2,
  FMULADDD_OP1,
  FMULADDD_OP2,
  FMULSUBD_OP1,
  FMULSUBD_OP2,
  FNMULSUBH_OP1,
  FNMULSUBS_OP1,
  FNMULSUBD_OP1,
  FMLAv1i32_indexed_OP1,
  FMLAv1i32_indexed_OP2,
  FMLAv1i64_indexed_OP1,
  FMLAv1i64_indexed_OP2,
  FMLAv4f16_OP1,
  FMLAv4f16_OP2,
  FMLAv8f16_OP1,
  FMLAv8f16_OP2,
  FMLAv2f32_OP2,
  FMLAv2f32_OP1,
  FMLAv2f64_OP1,
  FMLAv2f64_OP2,
  FMLAv4i16_indexed_OP1,
  FMLAv4i16_indexed_OP2,
  FMLAv8i16_indexed_OP1,
  FMLAv8i16_indexed_OP2,
  FMLAv2i32_indexed_OP1,
  FMLAv2i32_indexed_OP2,
  FMLAv2i64_indexed_OP1,
  FMLAv2i64_indexed_OP2,
  FMLAv4f32_OP1,
  FMLAv4f32_OP2,
  FMLAv4i32_indexed_OP1,
  FMLAv4i32_indexed_OP2,
  FMLSv1i32_indexed_OP2,
  FMLSv1i64_indexed_OP2,
  FMLSv4f16_OP1,
  FMLSv4f16_OP2,
  FMLSv8f16_OP1,
  FMLSv8f16_OP2,
  FMLSv2f32_OP1,
  FMLSv2f32_OP2,
  FMLSv2f64_OP1,
  FMLSv2f64_OP2,
  FMLSv4i16_indexed_OP1,
  FMLSv4i16_indexed_OP2,
  FMLSv8i16_indexed_OP1,
  FMLSv8i16_indexed_OP2,
  FMLSv2i32_indexed_OP1,
  FMLSv2i32_indexed_OP2,
  FMLSv2i64_indexed_OP1,
  FMLSv2i64_indexed_OP2,
  FMLSv4f32_OP1,
  FMLSv4f32_OP2,
  FMLSv4i32_indexed_OP1,
  FMLSv4i32_indexed_OP2,

  FMULv2i32_indexed_OP1,
  FMULv2i32_indexed_OP2,
  FMULv2i64_indexed_OP1,
  FMULv2i64_indexed_OP2,
  FMULv4i16_indexed_OP1,
  FMULv4i16_indexed_OP2,
  FMULv4i32_indexed_OP1,
  FMULv4i32_indexed_OP2,
  FMULv8i16_indexed_OP1,
  FMULv8i16_indexed_OP2,

  // RISCV FMADD, FMSUB, FNMSUB patterns
  FMADD_AX,
  FMADD_XA,
  FMSUB,
  FNMSUB,

  // X86 VNNI
  DPWSSD,

  FNMADD,
};

} // end namespace llvm

#endif
PKhwFZ���R!R!CodeGen/PBQP/Math.hnu�[���//===- Math.h - PBQP Vector and Matrix classes ------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_PBQP_MATH_H
#define LLVM_CODEGEN_PBQP_MATH_H

#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/STLExtras.h"
#include <algorithm>
#include <cassert>
#include <functional>
#include <memory>

namespace llvm {
namespace PBQP {

using PBQPNum = float;

/// PBQP Vector class.
class Vector {
  friend hash_code hash_value(const Vector &);

public:
  /// Construct a PBQP vector of the given size.
  explicit Vector(unsigned Length)
    : Length(Length), Data(std::make_unique<PBQPNum []>(Length)) {}

  /// Construct a PBQP vector with initializer.
  Vector(unsigned Length, PBQPNum InitVal)
    : Length(Length), Data(std::make_unique<PBQPNum []>(Length)) {
    std::fill(Data.get(), Data.get() + Length, InitVal);
  }

  /// Copy construct a PBQP vector.
  Vector(const Vector &V)
    : Length(V.Length), Data(std::make_unique<PBQPNum []>(Length)) {
    std::copy(V.Data.get(), V.Data.get() + Length, Data.get());
  }

  /// Move construct a PBQP vector.
  Vector(Vector &&V)
    : Length(V.Length), Data(std::move(V.Data)) {
    V.Length = 0;
  }

  /// Comparison operator.
  bool operator==(const Vector &V) const {
    assert(Length != 0 && Data && "Invalid vector");
    if (Length != V.Length)
      return false;
    return std::equal(Data.get(), Data.get() + Length, V.Data.get());
  }

  /// Return the length of the vector
  unsigned getLength() const {
    assert(Length != 0 && Data && "Invalid vector");
    return Length;
  }

  /// Element access.
  PBQPNum& operator[](unsigned Index) {
    assert(Length != 0 && Data && "Invalid vector");
    assert(Index < Length && "Vector element access out of bounds.");
    return Data[Index];
  }

  /// Const element access.
  const PBQPNum& operator[](unsigned Index) const {
    assert(Length != 0 && Data && "Invalid vector");
    assert(Index < Length && "Vector element access out of bounds.");
    return Data[Index];
  }

  /// Add another vector to this one.
  Vector& operator+=(const Vector &V) {
    assert(Length != 0 && Data && "Invalid vector");
    assert(Length == V.Length && "Vector length mismatch.");
    std::transform(Data.get(), Data.get() + Length, V.Data.get(), Data.get(),
                   std::plus<PBQPNum>());
    return *this;
  }

  /// Returns the index of the minimum value in this vector
  unsigned minIndex() const {
    assert(Length != 0 && Data && "Invalid vector");
    return std::min_element(Data.get(), Data.get() + Length) - Data.get();
  }

private:
  unsigned Length;
  std::unique_ptr<PBQPNum []> Data;
};

/// Return a hash_value for the given vector.
inline hash_code hash_value(const Vector &V) {
  unsigned *VBegin = reinterpret_cast<unsigned*>(V.Data.get());
  unsigned *VEnd = reinterpret_cast<unsigned*>(V.Data.get() + V.Length);
  return hash_combine(V.Length, hash_combine_range(VBegin, VEnd));
}

/// Output a textual representation of the given vector on the given
///        output stream.
template <typename OStream>
OStream& operator<<(OStream &OS, const Vector &V) {
  assert((V.getLength() != 0) && "Zero-length vector badness.");

  OS << "[ " << V[0];
  for (unsigned i = 1; i < V.getLength(); ++i)
    OS << ", " << V[i];
  OS << " ]";

  return OS;
}

/// PBQP Matrix class
class Matrix {
private:
  friend hash_code hash_value(const Matrix &);

public:
  /// Construct a PBQP Matrix with the given dimensions.
  Matrix(unsigned Rows, unsigned Cols) :
    Rows(Rows), Cols(Cols), Data(std::make_unique<PBQPNum []>(Rows * Cols)) {
  }

  /// Construct a PBQP Matrix with the given dimensions and initial
  /// value.
  Matrix(unsigned Rows, unsigned Cols, PBQPNum InitVal)
    : Rows(Rows), Cols(Cols),
      Data(std::make_unique<PBQPNum []>(Rows * Cols)) {
    std::fill(Data.get(), Data.get() + (Rows * Cols), InitVal);
  }

  /// Copy construct a PBQP matrix.
  Matrix(const Matrix &M)
    : Rows(M.Rows), Cols(M.Cols),
      Data(std::make_unique<PBQPNum []>(Rows * Cols)) {
    std::copy(M.Data.get(), M.Data.get() + (Rows * Cols), Data.get());
  }

  /// Move construct a PBQP matrix.
  Matrix(Matrix &&M)
    : Rows(M.Rows), Cols(M.Cols), Data(std::move(M.Data)) {
    M.Rows = M.Cols = 0;
  }

  /// Comparison operator.
  bool operator==(const Matrix &M) const {
    assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix");
    if (Rows != M.Rows || Cols != M.Cols)
      return false;
    return std::equal(Data.get(), Data.get() + (Rows * Cols), M.Data.get());
  }

  /// Return the number of rows in this matrix.
  unsigned getRows() const {
    assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix");
    return Rows;
  }

  /// Return the number of cols in this matrix.
  unsigned getCols() const {
    assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix");
    return Cols;
  }

  /// Matrix element access.
  PBQPNum* operator[](unsigned R) {
    assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix");
    assert(R < Rows && "Row out of bounds.");
    return Data.get() + (R * Cols);
  }

  /// Matrix element access.
  const PBQPNum* operator[](unsigned R) const {
    assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix");
    assert(R < Rows && "Row out of bounds.");
    return Data.get() + (R * Cols);
  }

  /// Returns the given row as a vector.
  Vector getRowAsVector(unsigned R) const {
    assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix");
    Vector V(Cols);
    for (unsigned C = 0; C < Cols; ++C)
      V[C] = (*this)[R][C];
    return V;
  }

  /// Returns the given column as a vector.
  Vector getColAsVector(unsigned C) const {
    assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix");
    Vector V(Rows);
    for (unsigned R = 0; R < Rows; ++R)
      V[R] = (*this)[R][C];
    return V;
  }

  /// Matrix transpose.
  Matrix transpose() const {
    assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix");
    Matrix M(Cols, Rows);
    for (unsigned r = 0; r < Rows; ++r)
      for (unsigned c = 0; c < Cols; ++c)
        M[c][r] = (*this)[r][c];
    return M;
  }

  /// Add the given matrix to this one.
  Matrix& operator+=(const Matrix &M) {
    assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix");
    assert(Rows == M.Rows && Cols == M.Cols &&
           "Matrix dimensions mismatch.");
    std::transform(Data.get(), Data.get() + (Rows * Cols), M.Data.get(),
                   Data.get(), std::plus<PBQPNum>());
    return *this;
  }

  Matrix operator+(const Matrix &M) {
    assert(Rows != 0 && Cols != 0 && Data && "Invalid matrix");
    Matrix Tmp(*this);
    Tmp += M;
    return Tmp;
  }

private:
  unsigned Rows, Cols;
  std::unique_ptr<PBQPNum []> Data;
};

/// Return a hash_code for the given matrix.
inline hash_code hash_value(const Matrix &M) {
  unsigned *MBegin = reinterpret_cast<unsigned*>(M.Data.get());
  unsigned *MEnd =
    reinterpret_cast<unsigned*>(M.Data.get() + (M.Rows * M.Cols));
  return hash_combine(M.Rows, M.Cols, hash_combine_range(MBegin, MEnd));
}

/// Output a textual representation of the given matrix on the given
///        output stream.
template <typename OStream>
OStream& operator<<(OStream &OS, const Matrix &M) {
  assert((M.getRows() != 0) && "Zero-row matrix badness.");
  for (unsigned i = 0; i < M.getRows(); ++i)
    OS << M.getRowAsVector(i) << "\n";
  return OS;
}

template <typename Metadata>
class MDVector : public Vector {
public:
  MDVector(const Vector &v) : Vector(v), md(*this) {}
  MDVector(Vector &&v) : Vector(std::move(v)), md(*this) { }

  const Metadata& getMetadata() const { return md; }

private:
  Metadata md;
};

template <typename Metadata>
inline hash_code hash_value(const MDVector<Metadata> &V) {
  return hash_value(static_cast<const Vector&>(V));
}

template <typename Metadata>
class MDMatrix : public Matrix {
public:
  MDMatrix(const Matrix &m) : Matrix(m), md(*this) {}
  MDMatrix(Matrix &&m) : Matrix(std::move(m)), md(*this) { }

  const Metadata& getMetadata() const { return md; }

private:
  Metadata md;
};

template <typename Metadata>
inline hash_code hash_value(const MDMatrix<Metadata> &M) {
  return hash_value(static_cast<const Matrix&>(M));
}

} // end namespace PBQP
} // end namespace llvm

#endif // LLVM_CODEGEN_PBQP_MATH_H
PKhwFZ+��A��CodeGen/PBQP/CostAllocator.hnu�[���//===- CostAllocator.h - PBQP Cost Allocator --------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Defines classes conforming to the PBQP cost value manager concept.
//
// Cost value managers are memory managers for PBQP cost values (vectors and
// matrices). Since PBQP graphs can grow very large (E.g. hundreds of thousands
// of edges on the largest function in SPEC2006).
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_PBQP_COSTALLOCATOR_H
#define LLVM_CODEGEN_PBQP_COSTALLOCATOR_H

#include "llvm/ADT/DenseSet.h"
#include <algorithm>
#include <cstdint>
#include <memory>

namespace llvm {
namespace PBQP {

template <typename ValueT> class ValuePool {
public:
  using PoolRef = std::shared_ptr<const ValueT>;

private:
  class PoolEntry : public std::enable_shared_from_this<PoolEntry> {
  public:
    template <typename ValueKeyT>
    PoolEntry(ValuePool &Pool, ValueKeyT Value)
        : Pool(Pool), Value(std::move(Value)) {}

    ~PoolEntry() { Pool.removeEntry(this); }

    const ValueT &getValue() const { return Value; }

  private:
    ValuePool &Pool;
    ValueT Value;
  };

  class PoolEntryDSInfo {
  public:
    static inline PoolEntry *getEmptyKey() { return nullptr; }

    static inline PoolEntry *getTombstoneKey() {
      return reinterpret_cast<PoolEntry *>(static_cast<uintptr_t>(1));
    }

    template <typename ValueKeyT>
    static unsigned getHashValue(const ValueKeyT &C) {
      return hash_value(C);
    }

    static unsigned getHashValue(PoolEntry *P) {
      return getHashValue(P->getValue());
    }

    static unsigned getHashValue(const PoolEntry *P) {
      return getHashValue(P->getValue());
    }

    template <typename ValueKeyT1, typename ValueKeyT2>
    static bool isEqual(const ValueKeyT1 &C1, const ValueKeyT2 &C2) {
      return C1 == C2;
    }

    template <typename ValueKeyT>
    static bool isEqual(const ValueKeyT &C, PoolEntry *P) {
      if (P == getEmptyKey() || P == getTombstoneKey())
        return false;
      return isEqual(C, P->getValue());
    }

    static bool isEqual(PoolEntry *P1, PoolEntry *P2) {
      if (P1 == getEmptyKey() || P1 == getTombstoneKey())
        return P1 == P2;
      return isEqual(P1->getValue(), P2);
    }
  };

  using EntrySetT = DenseSet<PoolEntry *, PoolEntryDSInfo>;

  EntrySetT EntrySet;

  void removeEntry(PoolEntry *P) { EntrySet.erase(P); }

public:
  template <typename ValueKeyT> PoolRef getValue(ValueKeyT ValueKey) {
    typename EntrySetT::iterator I = EntrySet.find_as(ValueKey);

    if (I != EntrySet.end())
      return PoolRef((*I)->shared_from_this(), &(*I)->getValue());

    auto P = std::make_shared<PoolEntry>(*this, std::move(ValueKey));
    EntrySet.insert(P.get());
    return PoolRef(P, &P->getValue());
  }
};

template <typename VectorT, typename MatrixT> class PoolCostAllocator {
private:
  using VectorCostPool = ValuePool<VectorT>;
  using MatrixCostPool = ValuePool<MatrixT>;

public:
  using Vector = VectorT;
  using Matrix = MatrixT;
  using VectorPtr = typename VectorCostPool::PoolRef;
  using MatrixPtr = typename MatrixCostPool::PoolRef;

  template <typename VectorKeyT> VectorPtr getVector(VectorKeyT v) {
    return VectorPool.getValue(std::move(v));
  }

  template <typename MatrixKeyT> MatrixPtr getMatrix(MatrixKeyT m) {
    return MatrixPool.getValue(std::move(m));
  }

private:
  VectorCostPool VectorPool;
  MatrixCostPool MatrixPool;
};

} // end namespace PBQP
} // end namespace llvm

#endif // LLVM_CODEGEN_PBQP_COSTALLOCATOR_H
PKiwFZ�52�V�VCodeGen/PBQP/Graph.hnu�[���//===- Graph.h - PBQP Graph -------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// PBQP Graph class.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_PBQP_GRAPH_H
#define LLVM_CODEGEN_PBQP_GRAPH_H

#include "llvm/ADT/STLExtras.h"
#include <algorithm>
#include <cassert>
#include <iterator>
#include <limits>
#include <vector>

namespace llvm {
namespace PBQP {

  class GraphBase {
  public:
    using NodeId = unsigned;
    using EdgeId = unsigned;

    /// Returns a value representing an invalid (non-existent) node.
    static NodeId invalidNodeId() {
      return std::numeric_limits<NodeId>::max();
    }

    /// Returns a value representing an invalid (non-existent) edge.
    static EdgeId invalidEdgeId() {
      return std::numeric_limits<EdgeId>::max();
    }
  };

  /// PBQP Graph class.
  /// Instances of this class describe PBQP problems.
  ///
  template <typename SolverT>
  class Graph : public GraphBase {
  private:
    using CostAllocator = typename SolverT::CostAllocator;

  public:
    using RawVector = typename SolverT::RawVector;
    using RawMatrix = typename SolverT::RawMatrix;
    using Vector = typename SolverT::Vector;
    using Matrix = typename SolverT::Matrix;
    using VectorPtr = typename CostAllocator::VectorPtr;
    using MatrixPtr = typename CostAllocator::MatrixPtr;
    using NodeMetadata = typename SolverT::NodeMetadata;
    using EdgeMetadata = typename SolverT::EdgeMetadata;
    using GraphMetadata = typename SolverT::GraphMetadata;

  private:
    class NodeEntry {
    public:
      using AdjEdgeList = std::vector<EdgeId>;
      using AdjEdgeIdx = AdjEdgeList::size_type;
      using AdjEdgeItr = AdjEdgeList::const_iterator;

      NodeEntry(VectorPtr Costs) : Costs(std::move(Costs)) {}

      static AdjEdgeIdx getInvalidAdjEdgeIdx() {
        return std::numeric_limits<AdjEdgeIdx>::max();
      }

      AdjEdgeIdx addAdjEdgeId(EdgeId EId) {
        AdjEdgeIdx Idx = AdjEdgeIds.size();
        AdjEdgeIds.push_back(EId);
        return Idx;
      }

      void removeAdjEdgeId(Graph &G, NodeId ThisNId, AdjEdgeIdx Idx) {
        // Swap-and-pop for fast removal.
        //   1) Update the adj index of the edge currently at back().
        //   2) Move last Edge down to Idx.
        //   3) pop_back()
        // If Idx == size() - 1 then the setAdjEdgeIdx and swap are
        // redundant, but both operations are cheap.
        G.getEdge(AdjEdgeIds.back()).setAdjEdgeIdx(ThisNId, Idx);
        AdjEdgeIds[Idx] = AdjEdgeIds.back();
        AdjEdgeIds.pop_back();
      }

      const AdjEdgeList& getAdjEdgeIds() const { return AdjEdgeIds; }

      VectorPtr Costs;
      NodeMetadata Metadata;

    private:
      AdjEdgeList AdjEdgeIds;
    };

    class EdgeEntry {
    public:
      EdgeEntry(NodeId N1Id, NodeId N2Id, MatrixPtr Costs)
          : Costs(std::move(Costs)) {
        NIds[0] = N1Id;
        NIds[1] = N2Id;
        ThisEdgeAdjIdxs[0] = NodeEntry::getInvalidAdjEdgeIdx();
        ThisEdgeAdjIdxs[1] = NodeEntry::getInvalidAdjEdgeIdx();
      }

      void connectToN(Graph &G, EdgeId ThisEdgeId, unsigned NIdx) {
        assert(ThisEdgeAdjIdxs[NIdx] == NodeEntry::getInvalidAdjEdgeIdx() &&
               "Edge already connected to NIds[NIdx].");
        NodeEntry &N = G.getNode(NIds[NIdx]);
        ThisEdgeAdjIdxs[NIdx] = N.addAdjEdgeId(ThisEdgeId);
      }

      void connect(Graph &G, EdgeId ThisEdgeId) {
        connectToN(G, ThisEdgeId, 0);
        connectToN(G, ThisEdgeId, 1);
      }

      void setAdjEdgeIdx(NodeId NId, typename NodeEntry::AdjEdgeIdx NewIdx) {
        if (NId == NIds[0])
          ThisEdgeAdjIdxs[0] = NewIdx;
        else {
          assert(NId == NIds[1] && "Edge not connected to NId");
          ThisEdgeAdjIdxs[1] = NewIdx;
        }
      }

      void disconnectFromN(Graph &G, unsigned NIdx) {
        assert(ThisEdgeAdjIdxs[NIdx] != NodeEntry::getInvalidAdjEdgeIdx() &&
               "Edge not connected to NIds[NIdx].");
        NodeEntry &N = G.getNode(NIds[NIdx]);
        N.removeAdjEdgeId(G, NIds[NIdx], ThisEdgeAdjIdxs[NIdx]);
        ThisEdgeAdjIdxs[NIdx] = NodeEntry::getInvalidAdjEdgeIdx();
      }

      void disconnectFrom(Graph &G, NodeId NId) {
        if (NId == NIds[0])
          disconnectFromN(G, 0);
        else {
          assert(NId == NIds[1] && "Edge does not connect NId");
          disconnectFromN(G, 1);
        }
      }

      NodeId getN1Id() const { return NIds[0]; }
      NodeId getN2Id() const { return NIds[1]; }

      MatrixPtr Costs;
      EdgeMetadata Metadata;

    private:
      NodeId NIds[2];
      typename NodeEntry::AdjEdgeIdx ThisEdgeAdjIdxs[2];
    };

    // ----- MEMBERS -----

    GraphMetadata Metadata;
    CostAllocator CostAlloc;
    SolverT *Solver = nullptr;

    using NodeVector = std::vector<NodeEntry>;
    using FreeNodeVector = std::vector<NodeId>;
    NodeVector Nodes;
    FreeNodeVector FreeNodeIds;

    using EdgeVector = std::vector<EdgeEntry>;
    using FreeEdgeVector = std::vector<EdgeId>;
    EdgeVector Edges;
    FreeEdgeVector FreeEdgeIds;

    Graph(const Graph &Other) {}

    // ----- INTERNAL METHODS -----

    NodeEntry &getNode(NodeId NId) {
      assert(NId < Nodes.size() && "Out of bound NodeId");
      return Nodes[NId];
    }
    const NodeEntry &getNode(NodeId NId) const {
      assert(NId < Nodes.size() && "Out of bound NodeId");
      return Nodes[NId];
    }

    EdgeEntry& getEdge(EdgeId EId) { return Edges[EId]; }
    const EdgeEntry& getEdge(EdgeId EId) const { return Edges[EId]; }

    NodeId addConstructedNode(NodeEntry N) {
      NodeId NId = 0;
      if (!FreeNodeIds.empty()) {
        NId = FreeNodeIds.back();
        FreeNodeIds.pop_back();
        Nodes[NId] = std::move(N);
      } else {
        NId = Nodes.size();
        Nodes.push_back(std::move(N));
      }
      return NId;
    }

    EdgeId addConstructedEdge(EdgeEntry E) {
      assert(findEdge(E.getN1Id(), E.getN2Id()) == invalidEdgeId() &&
             "Attempt to add duplicate edge.");
      EdgeId EId = 0;
      if (!FreeEdgeIds.empty()) {
        EId = FreeEdgeIds.back();
        FreeEdgeIds.pop_back();
        Edges[EId] = std::move(E);
      } else {
        EId = Edges.size();
        Edges.push_back(std::move(E));
      }

      EdgeEntry &NE = getEdge(EId);

      // Add the edge to the adjacency sets of its nodes.
      NE.connect(*this, EId);
      return EId;
    }

    void operator=(const Graph &Other) {}

  public:
    using AdjEdgeItr = typename NodeEntry::AdjEdgeItr;

    class NodeItr {
    public:
      using iterator_category = std::forward_iterator_tag;
      using value_type = NodeId;
      using difference_type = int;
      using pointer = NodeId *;
      using reference = NodeId &;

      NodeItr(NodeId CurNId, const Graph &G)
        : CurNId(CurNId), EndNId(G.Nodes.size()), FreeNodeIds(G.FreeNodeIds) {
        this->CurNId = findNextInUse(CurNId); // Move to first in-use node id
      }

      bool operator==(const NodeItr &O) const { return CurNId == O.CurNId; }
      bool operator!=(const NodeItr &O) const { return !(*this == O); }
      NodeItr& operator++() { CurNId = findNextInUse(++CurNId); return *this; }
      NodeId operator*() const { return CurNId; }

    private:
      NodeId findNextInUse(NodeId NId) const {
        while (NId < EndNId && is_contained(FreeNodeIds, NId)) {
          ++NId;
        }
        return NId;
      }

      NodeId CurNId, EndNId;
      const FreeNodeVector &FreeNodeIds;
    };

    class EdgeItr {
    public:
      EdgeItr(EdgeId CurEId, const Graph &G)
        : CurEId(CurEId), EndEId(G.Edges.size()), FreeEdgeIds(G.FreeEdgeIds) {
        this->CurEId = findNextInUse(CurEId); // Move to first in-use edge id
      }

      bool operator==(const EdgeItr &O) const { return CurEId == O.CurEId; }
      bool operator!=(const EdgeItr &O) const { return !(*this == O); }
      EdgeItr& operator++() { CurEId = findNextInUse(++CurEId); return *this; }
      EdgeId operator*() const { return CurEId; }

    private:
      EdgeId findNextInUse(EdgeId EId) const {
        while (EId < EndEId && is_contained(FreeEdgeIds, EId)) {
          ++EId;
        }
        return EId;
      }

      EdgeId CurEId, EndEId;
      const FreeEdgeVector &FreeEdgeIds;
    };

    class NodeIdSet {
    public:
      NodeIdSet(const Graph &G) : G(G) {}

      NodeItr begin() const { return NodeItr(0, G); }
      NodeItr end() const { return NodeItr(G.Nodes.size(), G); }

      bool empty() const { return G.Nodes.empty(); }

      typename NodeVector::size_type size() const {
        return G.Nodes.size() - G.FreeNodeIds.size();
      }

    private:
      const Graph& G;
    };

    class EdgeIdSet {
    public:
      EdgeIdSet(const Graph &G) : G(G) {}

      EdgeItr begin() const { return EdgeItr(0, G); }
      EdgeItr end() const { return EdgeItr(G.Edges.size(), G); }

      bool empty() const { return G.Edges.empty(); }

      typename NodeVector::size_type size() const {
        return G.Edges.size() - G.FreeEdgeIds.size();
      }

    private:
      const Graph& G;
    };

    class AdjEdgeIdSet {
    public:
      AdjEdgeIdSet(const NodeEntry &NE) : NE(NE) {}

      typename NodeEntry::AdjEdgeItr begin() const {
        return NE.getAdjEdgeIds().begin();
      }

      typename NodeEntry::AdjEdgeItr end() const {
        return NE.getAdjEdgeIds().end();
      }

      bool empty() const { return NE.getAdjEdgeIds().empty(); }

      typename NodeEntry::AdjEdgeList::size_type size() const {
        return NE.getAdjEdgeIds().size();
      }

    private:
      const NodeEntry &NE;
    };

    /// Construct an empty PBQP graph.
    Graph() = default;

    /// Construct an empty PBQP graph with the given graph metadata.
    Graph(GraphMetadata Metadata) : Metadata(std::move(Metadata)) {}

    /// Get a reference to the graph metadata.
    GraphMetadata& getMetadata() { return Metadata; }

    /// Get a const-reference to the graph metadata.
    const GraphMetadata& getMetadata() const { return Metadata; }

    /// Lock this graph to the given solver instance in preparation
    /// for running the solver. This method will call solver.handleAddNode for
    /// each node in the graph, and handleAddEdge for each edge, to give the
    /// solver an opportunity to set up any requried metadata.
    void setSolver(SolverT &S) {
      assert(!Solver && "Solver already set. Call unsetSolver().");
      Solver = &S;
      for (auto NId : nodeIds())
        Solver->handleAddNode(NId);
      for (auto EId : edgeIds())
        Solver->handleAddEdge(EId);
    }

    /// Release from solver instance.
    void unsetSolver() {
      assert(Solver && "Solver not set.");
      Solver = nullptr;
    }

    /// Add a node with the given costs.
    /// @param Costs Cost vector for the new node.
    /// @return Node iterator for the added node.
    template <typename OtherVectorT>
    NodeId addNode(OtherVectorT Costs) {
      // Get cost vector from the problem domain
      VectorPtr AllocatedCosts = CostAlloc.getVector(std::move(Costs));
      NodeId NId = addConstructedNode(NodeEntry(AllocatedCosts));
      if (Solver)
        Solver->handleAddNode(NId);
      return NId;
    }

    /// Add a node bypassing the cost allocator.
    /// @param Costs Cost vector ptr for the new node (must be convertible to
    ///        VectorPtr).
    /// @return Node iterator for the added node.
    ///
    ///   This method allows for fast addition of a node whose costs don't need
    /// to be passed through the cost allocator. The most common use case for
    /// this is when duplicating costs from an existing node (when using a
    /// pooling allocator). These have already been uniqued, so we can avoid
    /// re-constructing and re-uniquing them by attaching them directly to the
    /// new node.
    template <typename OtherVectorPtrT>
    NodeId addNodeBypassingCostAllocator(OtherVectorPtrT Costs) {
      NodeId NId = addConstructedNode(NodeEntry(Costs));
      if (Solver)
        Solver->handleAddNode(NId);
      return NId;
    }

    /// Add an edge between the given nodes with the given costs.
    /// @param N1Id First node.
    /// @param N2Id Second node.
    /// @param Costs Cost matrix for new edge.
    /// @return Edge iterator for the added edge.
    template <typename OtherVectorT>
    EdgeId addEdge(NodeId N1Id, NodeId N2Id, OtherVectorT Costs) {
      assert(getNodeCosts(N1Id).getLength() == Costs.getRows() &&
             getNodeCosts(N2Id).getLength() == Costs.getCols() &&
             "Matrix dimensions mismatch.");
      // Get cost matrix from the problem domain.
      MatrixPtr AllocatedCosts = CostAlloc.getMatrix(std::move(Costs));
      EdgeId EId = addConstructedEdge(EdgeEntry(N1Id, N2Id, AllocatedCosts));
      if (Solver)
        Solver->handleAddEdge(EId);
      return EId;
    }

    /// Add an edge bypassing the cost allocator.
    /// @param N1Id First node.
    /// @param N2Id Second node.
    /// @param Costs Cost matrix for new edge.
    /// @return Edge iterator for the added edge.
    ///
    ///   This method allows for fast addition of an edge whose costs don't need
    /// to be passed through the cost allocator. The most common use case for
    /// this is when duplicating costs from an existing edge (when using a
    /// pooling allocator). These have already been uniqued, so we can avoid
    /// re-constructing and re-uniquing them by attaching them directly to the
    /// new edge.
    template <typename OtherMatrixPtrT>
    NodeId addEdgeBypassingCostAllocator(NodeId N1Id, NodeId N2Id,
                                         OtherMatrixPtrT Costs) {
      assert(getNodeCosts(N1Id).getLength() == Costs->getRows() &&
             getNodeCosts(N2Id).getLength() == Costs->getCols() &&
             "Matrix dimensions mismatch.");
      // Get cost matrix from the problem domain.
      EdgeId EId = addConstructedEdge(EdgeEntry(N1Id, N2Id, Costs));
      if (Solver)
        Solver->handleAddEdge(EId);
      return EId;
    }

    /// Returns true if the graph is empty.
    bool empty() const { return NodeIdSet(*this).empty(); }

    NodeIdSet nodeIds() const { return NodeIdSet(*this); }
    EdgeIdSet edgeIds() const { return EdgeIdSet(*this); }

    AdjEdgeIdSet adjEdgeIds(NodeId NId) { return AdjEdgeIdSet(getNode(NId)); }

    /// Get the number of nodes in the graph.
    /// @return Number of nodes in the graph.
    unsigned getNumNodes() const { return NodeIdSet(*this).size(); }

    /// Get the number of edges in the graph.
    /// @return Number of edges in the graph.
    unsigned getNumEdges() const { return EdgeIdSet(*this).size(); }

    /// Set a node's cost vector.
    /// @param NId Node to update.
    /// @param Costs New costs to set.
    template <typename OtherVectorT>
    void setNodeCosts(NodeId NId, OtherVectorT Costs) {
      VectorPtr AllocatedCosts = CostAlloc.getVector(std::move(Costs));
      if (Solver)
        Solver->handleSetNodeCosts(NId, *AllocatedCosts);
      getNode(NId).Costs = AllocatedCosts;
    }

    /// Get a VectorPtr to a node's cost vector. Rarely useful - use
    ///        getNodeCosts where possible.
    /// @param NId Node id.
    /// @return VectorPtr to node cost vector.
    ///
    ///   This method is primarily useful for duplicating costs quickly by
    /// bypassing the cost allocator. See addNodeBypassingCostAllocator. Prefer
    /// getNodeCosts when dealing with node cost values.
    const VectorPtr& getNodeCostsPtr(NodeId NId) const {
      return getNode(NId).Costs;
    }

    /// Get a node's cost vector.
    /// @param NId Node id.
    /// @return Node cost vector.
    const Vector& getNodeCosts(NodeId NId) const {
      return *getNodeCostsPtr(NId);
    }

    NodeMetadata& getNodeMetadata(NodeId NId) {
      return getNode(NId).Metadata;
    }

    const NodeMetadata& getNodeMetadata(NodeId NId) const {
      return getNode(NId).Metadata;
    }

    typename NodeEntry::AdjEdgeList::size_type getNodeDegree(NodeId NId) const {
      return getNode(NId).getAdjEdgeIds().size();
    }

    /// Update an edge's cost matrix.
    /// @param EId Edge id.
    /// @param Costs New cost matrix.
    template <typename OtherMatrixT>
    void updateEdgeCosts(EdgeId EId, OtherMatrixT Costs) {
      MatrixPtr AllocatedCosts = CostAlloc.getMatrix(std::move(Costs));
      if (Solver)
        Solver->handleUpdateCosts(EId, *AllocatedCosts);
      getEdge(EId).Costs = AllocatedCosts;
    }

    /// Get a MatrixPtr to a node's cost matrix. Rarely useful - use
    ///        getEdgeCosts where possible.
    /// @param EId Edge id.
    /// @return MatrixPtr to edge cost matrix.
    ///
    ///   This method is primarily useful for duplicating costs quickly by
    /// bypassing the cost allocator. See addNodeBypassingCostAllocator. Prefer
    /// getEdgeCosts when dealing with edge cost values.
    const MatrixPtr& getEdgeCostsPtr(EdgeId EId) const {
      return getEdge(EId).Costs;
    }

    /// Get an edge's cost matrix.
    /// @param EId Edge id.
    /// @return Edge cost matrix.
    const Matrix& getEdgeCosts(EdgeId EId) const {
      return *getEdge(EId).Costs;
    }

    EdgeMetadata& getEdgeMetadata(EdgeId EId) {
      return getEdge(EId).Metadata;
    }

    const EdgeMetadata& getEdgeMetadata(EdgeId EId) const {
      return getEdge(EId).Metadata;
    }

    /// Get the first node connected to this edge.
    /// @param EId Edge id.
    /// @return The first node connected to the given edge.
    NodeId getEdgeNode1Id(EdgeId EId) const {
      return getEdge(EId).getN1Id();
    }

    /// Get the second node connected to this edge.
    /// @param EId Edge id.
    /// @return The second node connected to the given edge.
    NodeId getEdgeNode2Id(EdgeId EId) const {
      return getEdge(EId).getN2Id();
    }

    /// Get the "other" node connected to this edge.
    /// @param EId Edge id.
    /// @param NId Node id for the "given" node.
    /// @return The iterator for the "other" node connected to this edge.
    NodeId getEdgeOtherNodeId(EdgeId EId, NodeId NId) {
      EdgeEntry &E = getEdge(EId);
      if (E.getN1Id() == NId) {
        return E.getN2Id();
      } // else
      return E.getN1Id();
    }

    /// Get the edge connecting two nodes.
    /// @param N1Id First node id.
    /// @param N2Id Second node id.
    /// @return An id for edge (N1Id, N2Id) if such an edge exists,
    ///         otherwise returns an invalid edge id.
    EdgeId findEdge(NodeId N1Id, NodeId N2Id) {
      for (auto AEId : adjEdgeIds(N1Id)) {
        if ((getEdgeNode1Id(AEId) == N2Id) ||
            (getEdgeNode2Id(AEId) == N2Id)) {
          return AEId;
        }
      }
      return invalidEdgeId();
    }

    /// Remove a node from the graph.
    /// @param NId Node id.
    void removeNode(NodeId NId) {
      if (Solver)
        Solver->handleRemoveNode(NId);
      NodeEntry &N = getNode(NId);
      // TODO: Can this be for-each'd?
      for (AdjEdgeItr AEItr = N.adjEdgesBegin(),
             AEEnd = N.adjEdgesEnd();
           AEItr != AEEnd;) {
        EdgeId EId = *AEItr;
        ++AEItr;
        removeEdge(EId);
      }
      FreeNodeIds.push_back(NId);
    }

    /// Disconnect an edge from the given node.
    ///
    /// Removes the given edge from the adjacency list of the given node.
    /// This operation leaves the edge in an 'asymmetric' state: It will no
    /// longer appear in an iteration over the given node's (NId's) edges, but
    /// will appear in an iteration over the 'other', unnamed node's edges.
    ///
    /// This does not correspond to any normal graph operation, but exists to
    /// support efficient PBQP graph-reduction based solvers. It is used to
    /// 'effectively' remove the unnamed node from the graph while the solver
    /// is performing the reduction. The solver will later call reconnectNode
    /// to restore the edge in the named node's adjacency list.
    ///
    /// Since the degree of a node is the number of connected edges,
    /// disconnecting an edge from a node 'u' will cause the degree of 'u' to
    /// drop by 1.
    ///
    /// A disconnected edge WILL still appear in an iteration over the graph
    /// edges.
    ///
    /// A disconnected edge should not be removed from the graph, it should be
    /// reconnected first.
    ///
    /// A disconnected edge can be reconnected by calling the reconnectEdge
    /// method.
    void disconnectEdge(EdgeId EId, NodeId NId) {
      if (Solver)
        Solver->handleDisconnectEdge(EId, NId);

      EdgeEntry &E = getEdge(EId);
      E.disconnectFrom(*this, NId);
    }

    /// Convenience method to disconnect all neighbours from the given
    ///        node.
    void disconnectAllNeighborsFromNode(NodeId NId) {
      for (auto AEId : adjEdgeIds(NId))
        disconnectEdge(AEId, getEdgeOtherNodeId(AEId, NId));
    }

    /// Re-attach an edge to its nodes.
    ///
    /// Adds an edge that had been previously disconnected back into the
    /// adjacency set of the nodes that the edge connects.
    void reconnectEdge(EdgeId EId, NodeId NId) {
      EdgeEntry &E = getEdge(EId);
      E.connectTo(*this, EId, NId);
      if (Solver)
        Solver->handleReconnectEdge(EId, NId);
    }

    /// Remove an edge from the graph.
    /// @param EId Edge id.
    void removeEdge(EdgeId EId) {
      if (Solver)
        Solver->handleRemoveEdge(EId);
      EdgeEntry &E = getEdge(EId);
      E.disconnect();
      FreeEdgeIds.push_back(EId);
      Edges[EId].invalidate();
    }

    /// Remove all nodes and edges from the graph.
    void clear() {
      Nodes.clear();
      FreeNodeIds.clear();
      Edges.clear();
      FreeEdgeIds.clear();
    }
  };

} // end namespace PBQP
} // end namespace llvm

#endif // LLVM_CODEGEN_PBQP_GRAPH_H
PKiwFZݯm��CodeGen/PBQP/ReductionRules.hnu�[���//===- ReductionRules.h - Reduction Rules -----------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Reduction Rules.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_PBQP_REDUCTIONRULES_H
#define LLVM_CODEGEN_PBQP_REDUCTIONRULES_H

#include "Graph.h"
#include "Math.h"
#include "Solution.h"
#include <cassert>
#include <limits>

namespace llvm {
namespace PBQP {

  /// Reduce a node of degree one.
  ///
  /// Propagate costs from the given node, which must be of degree one, to its
  /// neighbor. Notify the problem domain.
  template <typename GraphT>
  void applyR1(GraphT &G, typename GraphT::NodeId NId) {
    using NodeId = typename GraphT::NodeId;
    using EdgeId = typename GraphT::EdgeId;
    using Vector = typename GraphT::Vector;
    using Matrix = typename GraphT::Matrix;
    using RawVector = typename GraphT::RawVector;

    assert(G.getNodeDegree(NId) == 1 &&
           "R1 applied to node with degree != 1.");

    EdgeId EId = *G.adjEdgeIds(NId).begin();
    NodeId MId = G.getEdgeOtherNodeId(EId, NId);

    const Matrix &ECosts = G.getEdgeCosts(EId);
    const Vector &XCosts = G.getNodeCosts(NId);
    RawVector YCosts = G.getNodeCosts(MId);

    // Duplicate a little to avoid transposing matrices.
    if (NId == G.getEdgeNode1Id(EId)) {
      for (unsigned j = 0; j < YCosts.getLength(); ++j) {
        PBQPNum Min = ECosts[0][j] + XCosts[0];
        for (unsigned i = 1; i < XCosts.getLength(); ++i) {
          PBQPNum C = ECosts[i][j] + XCosts[i];
          if (C < Min)
            Min = C;
        }
        YCosts[j] += Min;
      }
    } else {
      for (unsigned i = 0; i < YCosts.getLength(); ++i) {
        PBQPNum Min = ECosts[i][0] + XCosts[0];
        for (unsigned j = 1; j < XCosts.getLength(); ++j) {
          PBQPNum C = ECosts[i][j] + XCosts[j];
          if (C < Min)
            Min = C;
        }
        YCosts[i] += Min;
      }
    }
    G.setNodeCosts(MId, YCosts);
    G.disconnectEdge(EId, MId);
  }

  template <typename GraphT>
  void applyR2(GraphT &G, typename GraphT::NodeId NId) {
    using NodeId = typename GraphT::NodeId;
    using EdgeId = typename GraphT::EdgeId;
    using Vector = typename GraphT::Vector;
    using Matrix = typename GraphT::Matrix;
    using RawMatrix = typename GraphT::RawMatrix;

    assert(G.getNodeDegree(NId) == 2 &&
           "R2 applied to node with degree != 2.");

    const Vector &XCosts = G.getNodeCosts(NId);

    typename GraphT::AdjEdgeItr AEItr = G.adjEdgeIds(NId).begin();
    EdgeId YXEId = *AEItr,
           ZXEId = *(++AEItr);

    NodeId YNId = G.getEdgeOtherNodeId(YXEId, NId),
           ZNId = G.getEdgeOtherNodeId(ZXEId, NId);

    bool FlipEdge1 = (G.getEdgeNode1Id(YXEId) == NId),
         FlipEdge2 = (G.getEdgeNode1Id(ZXEId) == NId);

    const Matrix *YXECosts = FlipEdge1 ?
      new Matrix(G.getEdgeCosts(YXEId).transpose()) :
      &G.getEdgeCosts(YXEId);

    const Matrix *ZXECosts = FlipEdge2 ?
      new Matrix(G.getEdgeCosts(ZXEId).transpose()) :
      &G.getEdgeCosts(ZXEId);

    unsigned XLen = XCosts.getLength(),
      YLen = YXECosts->getRows(),
      ZLen = ZXECosts->getRows();

    RawMatrix Delta(YLen, ZLen);

    for (unsigned i = 0; i < YLen; ++i) {
      for (unsigned j = 0; j < ZLen; ++j) {
        PBQPNum Min = (*YXECosts)[i][0] + (*ZXECosts)[j][0] + XCosts[0];
        for (unsigned k = 1; k < XLen; ++k) {
          PBQPNum C = (*YXECosts)[i][k] + (*ZXECosts)[j][k] + XCosts[k];
          if (C < Min) {
            Min = C;
          }
        }
        Delta[i][j] = Min;
      }
    }

    if (FlipEdge1)
      delete YXECosts;

    if (FlipEdge2)
      delete ZXECosts;

    EdgeId YZEId = G.findEdge(YNId, ZNId);

    if (YZEId == G.invalidEdgeId()) {
      YZEId = G.addEdge(YNId, ZNId, Delta);
    } else {
      const Matrix &YZECosts = G.getEdgeCosts(YZEId);
      if (YNId == G.getEdgeNode1Id(YZEId)) {
        G.updateEdgeCosts(YZEId, Delta + YZECosts);
      } else {
        G.updateEdgeCosts(YZEId, Delta.transpose() + YZECosts);
      }
    }

    G.disconnectEdge(YXEId, YNId);
    G.disconnectEdge(ZXEId, ZNId);

    // TODO: Try to normalize newly added/modified edge.
  }

#ifndef NDEBUG
  // Does this Cost vector have any register options ?
  template <typename VectorT>
  bool hasRegisterOptions(const VectorT &V) {
    unsigned VL = V.getLength();

    // An empty or spill only cost vector does not provide any register option.
    if (VL <= 1)
      return false;

    // If there are registers in the cost vector, but all of them have infinite
    // costs, then ... there is no available register.
    for (unsigned i = 1; i < VL; ++i)
      if (V[i] != std::numeric_limits<PBQP::PBQPNum>::infinity())
        return true;

    return false;
  }
#endif

  // Find a solution to a fully reduced graph by backpropagation.
  //
  // Given a graph and a reduction order, pop each node from the reduction
  // order and greedily compute a minimum solution based on the node costs, and
  // the dependent costs due to previously solved nodes.
  //
  // Note - This does not return the graph to its original (pre-reduction)
  //        state: the existing solvers destructively alter the node and edge
  //        costs. Given that, the backpropagate function doesn't attempt to
  //        replace the edges either, but leaves the graph in its reduced
  //        state.
  template <typename GraphT, typename StackT>
  Solution backpropagate(GraphT& G, StackT stack) {
    using NodeId = GraphBase::NodeId;
    using Matrix = typename GraphT::Matrix;
    using RawVector = typename GraphT::RawVector;

    Solution s;

    while (!stack.empty()) {
      NodeId NId = stack.back();
      stack.pop_back();

      RawVector v = G.getNodeCosts(NId);

#if LLVM_ENABLE_ABI_BREAKING_CHECKS
      // Although a conservatively allocatable node can be allocated to a register,
      // spilling it may provide a lower cost solution. Assert here that spilling
      // is done by choice, not because there were no register available.
      if (G.getNodeMetadata(NId).wasConservativelyAllocatable())
        assert(hasRegisterOptions(v) && "A conservatively allocatable node "
                                        "must have available register options");
#endif

      for (auto EId : G.adjEdgeIds(NId)) {
        const Matrix& edgeCosts = G.getEdgeCosts(EId);
        if (NId == G.getEdgeNode1Id(EId)) {
          NodeId mId = G.getEdgeNode2Id(EId);
          v += edgeCosts.getColAsVector(s.getSelection(mId));
        } else {
          NodeId mId = G.getEdgeNode1Id(EId);
          v += edgeCosts.getRowAsVector(s.getSelection(mId));
        }
      }

      s.setSelection(NId, v.minIndex());
    }

    return s;
  }

} // end namespace PBQP
} // end namespace llvm

#endif // LLVM_CODEGEN_PBQP_REDUCTIONRULES_H
PKiwFZCe���CodeGen/PBQP/Solution.hnu�[���//===- Solution.h - PBQP Solution -------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// PBQP Solution class.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_PBQP_SOLUTION_H
#define LLVM_CODEGEN_PBQP_SOLUTION_H

#include "llvm/CodeGen/PBQP/Graph.h"
#include <cassert>
#include <map>

namespace llvm {
namespace PBQP {

  /// Represents a solution to a PBQP problem.
  ///
  /// To get the selection for each node in the problem use the getSelection method.
  class Solution {
  private:
    using SelectionsMap = std::map<GraphBase::NodeId, unsigned>;
    SelectionsMap selections;

  public:
    /// Initialise an empty solution.
    Solution() = default;

    /// Set the selection for a given node.
    /// @param nodeId Node id.
    /// @param selection Selection for nodeId.
    void setSelection(GraphBase::NodeId nodeId, unsigned selection) {
      selections[nodeId] = selection;
    }

    /// Get a node's selection.
    /// @param nodeId Node id.
    /// @return The selection for nodeId;
    unsigned getSelection(GraphBase::NodeId nodeId) const {
      SelectionsMap::const_iterator sItr = selections.find(nodeId);
      assert(sItr != selections.end() && "No selection for node.");
      return sItr->second;
    }
  };

} // end namespace PBQP
} // end namespace llvm

#endif // LLVM_CODEGEN_PBQP_SOLUTION_H
PKiwFZ�[�u�uCodeGen/ScheduleDAG.hnu�[���//===- llvm/CodeGen/ScheduleDAG.h - Common Base Class -----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file Implements the ScheduleDAG class, which is used as the common base
/// class for instruction schedulers. This encapsulates the scheduling DAG,
/// which is shared between SelectionDAG and MachineInstr scheduling.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_SCHEDULEDAG_H
#define LLVM_CODEGEN_SCHEDULEDAG_H

#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/iterator.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/TargetLowering.h"
#include "llvm/Support/ErrorHandling.h"
#include <cassert>
#include <cstddef>
#include <iterator>
#include <string>
#include <vector>

namespace llvm {

template <class GraphType> struct GraphTraits;
template<class Graph> class GraphWriter;
class LLVMTargetMachine;
class MachineFunction;
class MachineRegisterInfo;
class MCInstrDesc;
struct MCSchedClassDesc;
class SDNode;
class SUnit;
class ScheduleDAG;
class TargetInstrInfo;
class TargetRegisterClass;
class TargetRegisterInfo;

  /// Scheduling dependency. This represents one direction of an edge in the
  /// scheduling DAG.
  class SDep {
  public:
    /// These are the different kinds of scheduling dependencies.
    enum Kind {
      Data,        ///< Regular data dependence (aka true-dependence).
      Anti,        ///< A register anti-dependence (aka WAR).
      Output,      ///< A register output-dependence (aka WAW).
      Order        ///< Any other ordering dependency.
    };

    // Strong dependencies must be respected by the scheduler. Artificial
    // dependencies may be removed only if they are redundant with another
    // strong dependence.
    //
    // Weak dependencies may be violated by the scheduling strategy, but only if
    // the strategy can prove it is correct to do so.
    //
    // Strong OrderKinds must occur before "Weak".
    // Weak OrderKinds must occur after "Weak".
    enum OrderKind {
      Barrier,      ///< An unknown scheduling barrier.
      MayAliasMem,  ///< Nonvolatile load/Store instructions that may alias.
      MustAliasMem, ///< Nonvolatile load/Store instructions that must alias.
      Artificial,   ///< Arbitrary strong DAG edge (no real dependence).
      Weak,         ///< Arbitrary weak DAG edge.
      Cluster       ///< Weak DAG edge linking a chain of clustered instrs.
    };

  private:
    /// A pointer to the depending/depended-on SUnit, and an enum
    /// indicating the kind of the dependency.
    PointerIntPair<SUnit *, 2, Kind> Dep;

    /// A union discriminated by the dependence kind.
    union {
      /// For Data, Anti, and Output dependencies, the associated register. For
      /// Data dependencies that don't currently have a register/ assigned, this
      /// is set to zero.
      unsigned Reg;

      /// Additional information about Order dependencies.
      unsigned OrdKind; // enum OrderKind
    } Contents;

    /// The time associated with this edge. Often this is just the value of the
    /// Latency field of the predecessor, however advanced models may provide
    /// additional information about specific edges.
    unsigned Latency = 0u;

  public:
    /// Constructs a null SDep. This is only for use by container classes which
    /// require default constructors. SUnits may not/ have null SDep edges.
    SDep() : Dep(nullptr, Data) {}

    /// Constructs an SDep with the specified values.
    SDep(SUnit *S, Kind kind, unsigned Reg)
      : Dep(S, kind), Contents() {
      switch (kind) {
      default:
        llvm_unreachable("Reg given for non-register dependence!");
      case Anti:
      case Output:
        assert(Reg != 0 &&
               "SDep::Anti and SDep::Output must use a non-zero Reg!");
        Contents.Reg = Reg;
        Latency = 0;
        break;
      case Data:
        Contents.Reg = Reg;
        Latency = 1;
        break;
      }
    }

    SDep(SUnit *S, OrderKind kind)
      : Dep(S, Order), Contents(), Latency(0) {
      Contents.OrdKind = kind;
    }

    /// Returns true if the specified SDep is equivalent except for latency.
    bool overlaps(const SDep &Other) const;

    bool operator==(const SDep &Other) const {
      return overlaps(Other) && Latency == Other.Latency;
    }

    bool operator!=(const SDep &Other) const {
      return !operator==(Other);
    }

    /// Returns the latency value for this edge, which roughly means the
    /// minimum number of cycles that must elapse between the predecessor and
    /// the successor, given that they have this edge between them.
    unsigned getLatency() const {
      return Latency;
    }

    /// Sets the latency for this edge.
    void setLatency(unsigned Lat) {
      Latency = Lat;
    }

    //// Returns the SUnit to which this edge points.
    SUnit *getSUnit() const;

    //// Assigns the SUnit to which this edge points.
    void setSUnit(SUnit *SU);

    /// Returns an enum value representing the kind of the dependence.
    Kind getKind() const;

    /// Shorthand for getKind() != SDep::Data.
    bool isCtrl() const {
      return getKind() != Data;
    }

    /// Tests if this is an Order dependence between two memory accesses
    /// where both sides of the dependence access memory in non-volatile and
    /// fully modeled ways.
    bool isNormalMemory() const {
      return getKind() == Order && (Contents.OrdKind == MayAliasMem
                                    || Contents.OrdKind == MustAliasMem);
    }

    /// Tests if this is an Order dependence that is marked as a barrier.
    bool isBarrier() const {
      return getKind() == Order && Contents.OrdKind == Barrier;
    }

    /// Tests if this is could be any kind of memory dependence.
    bool isNormalMemoryOrBarrier() const {
      return (isNormalMemory() || isBarrier());
    }

    /// Tests if this is an Order dependence that is marked as
    /// "must alias", meaning that the SUnits at either end of the edge have a
    /// memory dependence on a known memory location.
    bool isMustAlias() const {
      return getKind() == Order && Contents.OrdKind == MustAliasMem;
    }

    /// Tests if this a weak dependence. Weak dependencies are considered DAG
    /// edges for height computation and other heuristics, but do not force
    /// ordering. Breaking a weak edge may require the scheduler to compensate,
    /// for example by inserting a copy.
    bool isWeak() const {
      return getKind() == Order && Contents.OrdKind >= Weak;
    }

    /// Tests if this is an Order dependence that is marked as
    /// "artificial", meaning it isn't necessary for correctness.
    bool isArtificial() const {
      return getKind() == Order && Contents.OrdKind == Artificial;
    }

    /// Tests if this is an Order dependence that is marked as "cluster",
    /// meaning it is artificial and wants to be adjacent.
    bool isCluster() const {
      return getKind() == Order && Contents.OrdKind == Cluster;
    }

    /// Tests if this is a Data dependence that is associated with a register.
    bool isAssignedRegDep() const {
      return getKind() == Data && Contents.Reg != 0;
    }

    /// Returns the register associated with this edge. This is only valid on
    /// Data, Anti, and Output edges. On Data edges, this value may be zero,
    /// meaning there is no associated register.
    unsigned getReg() const {
      assert((getKind() == Data || getKind() == Anti || getKind() == Output) &&
             "getReg called on non-register dependence edge!");
      return Contents.Reg;
    }

    /// Assigns the associated register for this edge. This is only valid on
    /// Data, Anti, and Output edges. On Anti and Output edges, this value must
    /// not be zero. On Data edges, the value may be zero, which would mean that
    /// no specific register is associated with this edge.
    void setReg(unsigned Reg) {
      assert((getKind() == Data || getKind() == Anti || getKind() == Output) &&
             "setReg called on non-register dependence edge!");
      assert((getKind() != Anti || Reg != 0) &&
             "SDep::Anti edge cannot use the zero register!");
      assert((getKind() != Output || Reg != 0) &&
             "SDep::Output edge cannot use the zero register!");
      Contents.Reg = Reg;
    }

    void dump(const TargetRegisterInfo *TRI = nullptr) const;
  };

  /// Scheduling unit. This is a node in the scheduling DAG.
  class SUnit {
  private:
    enum : unsigned { BoundaryID = ~0u };

    SDNode *Node = nullptr;        ///< Representative node.
    MachineInstr *Instr = nullptr; ///< Alternatively, a MachineInstr.

  public:
    SUnit *OrigNode = nullptr; ///< If not this, the node from which this node
                               /// was cloned. (SD scheduling only)

    const MCSchedClassDesc *SchedClass =
        nullptr; ///< nullptr or resolved SchedClass.

    SmallVector<SDep, 4> Preds;  ///< All sunit predecessors.
    SmallVector<SDep, 4> Succs;  ///< All sunit successors.

    typedef SmallVectorImpl<SDep>::iterator pred_iterator;
    typedef SmallVectorImpl<SDep>::iterator succ_iterator;
    typedef SmallVectorImpl<SDep>::const_iterator const_pred_iterator;
    typedef SmallVectorImpl<SDep>::const_iterator const_succ_iterator;

    unsigned NodeNum = BoundaryID;     ///< Entry # of node in the node vector.
    unsigned NodeQueueId = 0;          ///< Queue id of node.
    unsigned NumPreds = 0;             ///< # of SDep::Data preds.
    unsigned NumSuccs = 0;             ///< # of SDep::Data sucss.
    unsigned NumPredsLeft = 0;         ///< # of preds not scheduled.
    unsigned NumSuccsLeft = 0;         ///< # of succs not scheduled.
    unsigned WeakPredsLeft = 0;        ///< # of weak preds not scheduled.
    unsigned WeakSuccsLeft = 0;        ///< # of weak succs not scheduled.
    unsigned short NumRegDefsLeft = 0; ///< # of reg defs with no scheduled use.
    unsigned short Latency = 0;        ///< Node latency.
    bool isVRegCycle      : 1;         ///< May use and def the same vreg.
    bool isCall           : 1;         ///< Is a function call.
    bool isCallOp         : 1;         ///< Is a function call operand.
    bool isTwoAddress     : 1;         ///< Is a two-address instruction.
    bool isCommutable     : 1;         ///< Is a commutable instruction.
    bool hasPhysRegUses   : 1;         ///< Has physreg uses.
    bool hasPhysRegDefs   : 1;         ///< Has physreg defs that are being used.
    bool hasPhysRegClobbers : 1;       ///< Has any physreg defs, used or not.
    bool isPending        : 1;         ///< True once pending.
    bool isAvailable      : 1;         ///< True once available.
    bool isScheduled      : 1;         ///< True once scheduled.
    bool isScheduleHigh   : 1;         ///< True if preferable to schedule high.
    bool isScheduleLow    : 1;         ///< True if preferable to schedule low.
    bool isCloned         : 1;         ///< True if this node has been cloned.
    bool isUnbuffered     : 1;         ///< Uses an unbuffered resource.
    bool hasReservedResource : 1;      ///< Uses a reserved resource.
    Sched::Preference SchedulingPref = Sched::None; ///< Scheduling preference.

  private:
    bool isDepthCurrent   : 1;         ///< True if Depth is current.
    bool isHeightCurrent  : 1;         ///< True if Height is current.
    unsigned Depth = 0;                ///< Node depth.
    unsigned Height = 0;               ///< Node height.

  public:
    unsigned TopReadyCycle = 0; ///< Cycle relative to start when node is ready.
    unsigned BotReadyCycle = 0; ///< Cycle relative to end when node is ready.

    const TargetRegisterClass *CopyDstRC =
        nullptr; ///< Is a special copy node if != nullptr.
    const TargetRegisterClass *CopySrcRC = nullptr;

    /// Constructs an SUnit for pre-regalloc scheduling to represent an
    /// SDNode and any nodes flagged to it.
    SUnit(SDNode *node, unsigned nodenum)
      : Node(node), NodeNum(nodenum), isVRegCycle(false), isCall(false),
        isCallOp(false), isTwoAddress(false), isCommutable(false),
        hasPhysRegUses(false), hasPhysRegDefs(false), hasPhysRegClobbers(false),
        isPending(false), isAvailable(false), isScheduled(false),
        isScheduleHigh(false), isScheduleLow(false), isCloned(false),
        isUnbuffered(false), hasReservedResource(false), isDepthCurrent(false),
        isHeightCurrent(false) {}

    /// Constructs an SUnit for post-regalloc scheduling to represent a
    /// MachineInstr.
    SUnit(MachineInstr *instr, unsigned nodenum)
      : Instr(instr), NodeNum(nodenum), isVRegCycle(false), isCall(false),
        isCallOp(false), isTwoAddress(false), isCommutable(false),
        hasPhysRegUses(false), hasPhysRegDefs(false), hasPhysRegClobbers(false),
        isPending(false), isAvailable(false), isScheduled(false),
        isScheduleHigh(false), isScheduleLow(false), isCloned(false),
        isUnbuffered(false), hasReservedResource(false), isDepthCurrent(false),
        isHeightCurrent(false) {}

    /// Constructs a placeholder SUnit.
    SUnit()
      : isVRegCycle(false), isCall(false), isCallOp(false), isTwoAddress(false),
        isCommutable(false), hasPhysRegUses(false), hasPhysRegDefs(false),
        hasPhysRegClobbers(false), isPending(false), isAvailable(false),
        isScheduled(false), isScheduleHigh(false), isScheduleLow(false),
        isCloned(false), isUnbuffered(false), hasReservedResource(false),
        isDepthCurrent(false), isHeightCurrent(false) {}

    /// Boundary nodes are placeholders for the boundary of the
    /// scheduling region.
    ///
    /// BoundaryNodes can have DAG edges, including Data edges, but they do not
    /// correspond to schedulable entities (e.g. instructions) and do not have a
    /// valid ID. Consequently, always check for boundary nodes before accessing
    /// an associative data structure keyed on node ID.
    bool isBoundaryNode() const { return NodeNum == BoundaryID; }

    /// Assigns the representative SDNode for this SUnit. This may be used
    /// during pre-regalloc scheduling.
    void setNode(SDNode *N) {
      assert(!Instr && "Setting SDNode of SUnit with MachineInstr!");
      Node = N;
    }

    /// Returns the representative SDNode for this SUnit. This may be used
    /// during pre-regalloc scheduling.
    SDNode *getNode() const {
      assert(!Instr && "Reading SDNode of SUnit with MachineInstr!");
      return Node;
    }

    /// Returns true if this SUnit refers to a machine instruction as
    /// opposed to an SDNode.
    bool isInstr() const { return Instr; }

    /// Assigns the instruction for the SUnit. This may be used during
    /// post-regalloc scheduling.
    void setInstr(MachineInstr *MI) {
      assert(!Node && "Setting MachineInstr of SUnit with SDNode!");
      Instr = MI;
    }

    /// Returns the representative MachineInstr for this SUnit. This may be used
    /// during post-regalloc scheduling.
    MachineInstr *getInstr() const {
      assert(!Node && "Reading MachineInstr of SUnit with SDNode!");
      return Instr;
    }

    /// Adds the specified edge as a pred of the current node if not already.
    /// It also adds the current node as a successor of the specified node.
    bool addPred(const SDep &D, bool Required = true);

    /// Adds a barrier edge to SU by calling addPred(), with latency 0
    /// generally or latency 1 for a store followed by a load.
    bool addPredBarrier(SUnit *SU) {
      SDep Dep(SU, SDep::Barrier);
      unsigned TrueMemOrderLatency =
        ((SU->getInstr()->mayStore() && this->getInstr()->mayLoad()) ? 1 : 0);
      Dep.setLatency(TrueMemOrderLatency);
      return addPred(Dep);
    }

    /// Removes the specified edge as a pred of the current node if it exists.
    /// It also removes the current node as a successor of the specified node.
    void removePred(const SDep &D);

    /// Returns the depth of this node, which is the length of the maximum path
    /// up to any node which has no predecessors.
    unsigned getDepth() const {
      if (!isDepthCurrent)
        const_cast<SUnit *>(this)->ComputeDepth();
      return Depth;
    }

    /// Returns the height of this node, which is the length of the
    /// maximum path down to any node which has no successors.
    unsigned getHeight() const {
      if (!isHeightCurrent)
        const_cast<SUnit *>(this)->ComputeHeight();
      return Height;
    }

    /// If NewDepth is greater than this node's depth value, sets it to
    /// be the new depth value. This also recursively marks successor nodes
    /// dirty.
    void setDepthToAtLeast(unsigned NewDepth);

    /// If NewHeight is greater than this node's height value, set it to be
    /// the new height value. This also recursively marks predecessor nodes
    /// dirty.
    void setHeightToAtLeast(unsigned NewHeight);

    /// Sets a flag in this node to indicate that its stored Depth value
    /// will require recomputation the next time getDepth() is called.
    void setDepthDirty();

    /// Sets a flag in this node to indicate that its stored Height value
    /// will require recomputation the next time getHeight() is called.
    void setHeightDirty();

    /// Tests if node N is a predecessor of this node.
    bool isPred(const SUnit *N) const {
      for (const SDep &Pred : Preds)
        if (Pred.getSUnit() == N)
          return true;
      return false;
    }

    /// Tests if node N is a successor of this node.
    bool isSucc(const SUnit *N) const {
      for (const SDep &Succ : Succs)
        if (Succ.getSUnit() == N)
          return true;
      return false;
    }

    bool isTopReady() const {
      return NumPredsLeft == 0;
    }
    bool isBottomReady() const {
      return NumSuccsLeft == 0;
    }

    /// Orders this node's predecessor edges such that the critical path
    /// edge occurs first.
    void biasCriticalPath();

    void dumpAttributes() const;

  private:
    void ComputeDepth();
    void ComputeHeight();
  };

  /// Returns true if the specified SDep is equivalent except for latency.
  inline bool SDep::overlaps(const SDep &Other) const {
    if (Dep != Other.Dep)
      return false;
    switch (Dep.getInt()) {
    case Data:
    case Anti:
    case Output:
      return Contents.Reg == Other.Contents.Reg;
    case Order:
      return Contents.OrdKind == Other.Contents.OrdKind;
    }
    llvm_unreachable("Invalid dependency kind!");
  }

  //// Returns the SUnit to which this edge points.
  inline SUnit *SDep::getSUnit() const { return Dep.getPointer(); }

  //// Assigns the SUnit to which this edge points.
  inline void SDep::setSUnit(SUnit *SU) { Dep.setPointer(SU); }

  /// Returns an enum value representing the kind of the dependence.
  inline SDep::Kind SDep::getKind() const { return Dep.getInt(); }

  //===--------------------------------------------------------------------===//

  /// This interface is used to plug different priorities computation
  /// algorithms into the list scheduler. It implements the interface of a
  /// standard priority queue, where nodes are inserted in arbitrary order and
  /// returned in priority order.  The computation of the priority and the
  /// representation of the queue are totally up to the implementation to
  /// decide.
  class SchedulingPriorityQueue {
    virtual void anchor();

    unsigned CurCycle = 0;
    bool HasReadyFilter;

  public:
    SchedulingPriorityQueue(bool rf = false) :  HasReadyFilter(rf) {}

    virtual ~SchedulingPriorityQueue() = default;

    virtual bool isBottomUp() const = 0;

    virtual void initNodes(std::vector<SUnit> &SUnits) = 0;
    virtual void addNode(const SUnit *SU) = 0;
    virtual void updateNode(const SUnit *SU) = 0;
    virtual void releaseState() = 0;

    virtual bool empty() const = 0;

    bool hasReadyFilter() const { return HasReadyFilter; }

    virtual bool tracksRegPressure() const { return false; }

    virtual bool isReady(SUnit *) const {
      assert(!HasReadyFilter && "The ready filter must override isReady()");
      return true;
    }

    virtual void push(SUnit *U) = 0;

    void push_all(const std::vector<SUnit *> &Nodes) {
      for (SUnit *SU : Nodes)
        push(SU);
    }

    virtual SUnit *pop() = 0;

    virtual void remove(SUnit *SU) = 0;

    virtual void dump(ScheduleDAG *) const {}

    /// As each node is scheduled, this method is invoked.  This allows the
    /// priority function to adjust the priority of related unscheduled nodes,
    /// for example.
    virtual void scheduledNode(SUnit *) {}

    virtual void unscheduledNode(SUnit *) {}

    void setCurCycle(unsigned Cycle) {
      CurCycle = Cycle;
    }

    unsigned getCurCycle() const {
      return CurCycle;
    }
  };

  class ScheduleDAG {
  public:
    const LLVMTargetMachine &TM;        ///< Target processor
    const TargetInstrInfo *TII;         ///< Target instruction information
    const TargetRegisterInfo *TRI;      ///< Target processor register info
    MachineFunction &MF;                ///< Machine function
    MachineRegisterInfo &MRI;           ///< Virtual/real register map
    std::vector<SUnit> SUnits;          ///< The scheduling units.
    SUnit EntrySU;                      ///< Special node for the region entry.
    SUnit ExitSU;                       ///< Special node for the region exit.

#ifdef NDEBUG
    static const bool StressSched = false;
#else
    bool StressSched;
#endif

    // This class is designed to be passed by reference only. Copy constructor
    // is declared as deleted here to make the derived classes have deleted
    // implicit-declared copy constructor, which suppresses the warnings from
    // static analyzer when the derived classes own resources that are freed in
    // their destructors, but don't have user-written copy constructors (rule
    // of three).
    ScheduleDAG(const ScheduleDAG &) = delete;
    ScheduleDAG &operator=(const ScheduleDAG &) = delete;

    explicit ScheduleDAG(MachineFunction &mf);

    virtual ~ScheduleDAG();

    /// Clears the DAG state (between regions).
    void clearDAG();

    /// Returns the MCInstrDesc of this SUnit.
    /// Returns NULL for SDNodes without a machine opcode.
    const MCInstrDesc *getInstrDesc(const SUnit *SU) const {
      if (SU->isInstr()) return &SU->getInstr()->getDesc();
      return getNodeDesc(SU->getNode());
    }

    /// Pops up a GraphViz/gv window with the ScheduleDAG rendered using 'dot'.
    virtual void viewGraph(const Twine &Name, const Twine &Title);
    virtual void viewGraph();

    virtual void dumpNode(const SUnit &SU) const = 0;
    virtual void dump() const = 0;
    void dumpNodeName(const SUnit &SU) const;

    /// Returns a label for an SUnit node in a visualization of the ScheduleDAG.
    virtual std::string getGraphNodeLabel(const SUnit *SU) const = 0;

    /// Returns a label for the region of code covered by the DAG.
    virtual std::string getDAGName() const = 0;

    /// Adds custom features for a visualization of the ScheduleDAG.
    virtual void addCustomGraphFeatures(GraphWriter<ScheduleDAG*> &) const {}

#ifndef NDEBUG
    /// Verifies that all SUnits were scheduled and that their state is
    /// consistent. Returns the number of scheduled SUnits.
    unsigned VerifyScheduledDAG(bool isBottomUp);
#endif

  protected:
    void dumpNodeAll(const SUnit &SU) const;

  private:
    /// Returns the MCInstrDesc of this SDNode or NULL.
    const MCInstrDesc *getNodeDesc(const SDNode *Node) const;
  };

  class SUnitIterator {
    SUnit *Node;
    unsigned Operand;

    SUnitIterator(SUnit *N, unsigned Op) : Node(N), Operand(Op) {}

  public:
    using iterator_category = std::forward_iterator_tag;
    using value_type = SUnit;
    using difference_type = std::ptrdiff_t;
    using pointer = value_type *;
    using reference = value_type &;

    bool operator==(const SUnitIterator& x) const {
      return Operand == x.Operand;
    }
    bool operator!=(const SUnitIterator& x) const { return !operator==(x); }

    pointer operator*() const {
      return Node->Preds[Operand].getSUnit();
    }
    pointer operator->() const { return operator*(); }

    SUnitIterator& operator++() {                // Preincrement
      ++Operand;
      return *this;
    }
    SUnitIterator operator++(int) { // Postincrement
      SUnitIterator tmp = *this; ++*this; return tmp;
    }

    static SUnitIterator begin(SUnit *N) { return SUnitIterator(N, 0); }
    static SUnitIterator end  (SUnit *N) {
      return SUnitIterator(N, (unsigned)N->Preds.size());
    }

    unsigned getOperand() const { return Operand; }
    const SUnit *getNode() const { return Node; }

    /// Tests if this is not an SDep::Data dependence.
    bool isCtrlDep() const {
      return getSDep().isCtrl();
    }
    bool isArtificialDep() const {
      return getSDep().isArtificial();
    }
    const SDep &getSDep() const {
      return Node->Preds[Operand];
    }
  };

  template <> struct GraphTraits<SUnit*> {
    typedef SUnit *NodeRef;
    typedef SUnitIterator ChildIteratorType;
    static NodeRef getEntryNode(SUnit *N) { return N; }
    static ChildIteratorType child_begin(NodeRef N) {
      return SUnitIterator::begin(N);
    }
    static ChildIteratorType child_end(NodeRef N) {
      return SUnitIterator::end(N);
    }
  };

  template <> struct GraphTraits<ScheduleDAG*> : public GraphTraits<SUnit*> {
    typedef pointer_iterator<std::vector<SUnit>::iterator> nodes_iterator;
    static nodes_iterator nodes_begin(ScheduleDAG *G) {
      return nodes_iterator(G->SUnits.begin());
    }
    static nodes_iterator nodes_end(ScheduleDAG *G) {
      return nodes_iterator(G->SUnits.end());
    }
  };

  /// This class can compute a topological ordering for SUnits and provides
  /// methods for dynamically updating the ordering as new edges are added.
  ///
  /// This allows a very fast implementation of IsReachable, for example.
  class ScheduleDAGTopologicalSort {
    /// A reference to the ScheduleDAG's SUnits.
    std::vector<SUnit> &SUnits;
    SUnit *ExitSU;

    // Have any new nodes been added?
    bool Dirty = false;

    // Outstanding added edges, that have not been applied to the ordering.
    SmallVector<std::pair<SUnit *, SUnit *>, 16> Updates;

    /// Maps topological index to the node number.
    std::vector<int> Index2Node;
    /// Maps the node number to its topological index.
    std::vector<int> Node2Index;
    /// a set of nodes visited during a DFS traversal.
    BitVector Visited;

    /// Makes a DFS traversal and mark all nodes affected by the edge insertion.
    /// These nodes will later get new topological indexes by means of the Shift
    /// method.
    void DFS(const SUnit *SU, int UpperBound, bool& HasLoop);

    /// Reassigns topological indexes for the nodes in the DAG to
    /// preserve the topological ordering.
    void Shift(BitVector& Visited, int LowerBound, int UpperBound);

    /// Assigns the topological index to the node n.
    void Allocate(int n, int index);

    /// Fix the ordering, by either recomputing from scratch or by applying
    /// any outstanding updates. Uses a heuristic to estimate what will be
    /// cheaper.
    void FixOrder();

  public:
    ScheduleDAGTopologicalSort(std::vector<SUnit> &SUnits, SUnit *ExitSU);

    /// Add a SUnit without predecessors to the end of the topological order. It
    /// also must be the first new node added to the DAG.
    void AddSUnitWithoutPredecessors(const SUnit *SU);

    /// Creates the initial topological ordering from the DAG to be scheduled.
    void InitDAGTopologicalSorting();

    /// Returns an array of SUs that are both in the successor
    /// subtree of StartSU and in the predecessor subtree of TargetSU.
    /// StartSU and TargetSU are not in the array.
    /// Success is false if TargetSU is not in the successor subtree of
    /// StartSU, else it is true.
    std::vector<int> GetSubGraph(const SUnit &StartSU, const SUnit &TargetSU,
                                 bool &Success);

    /// Checks if \p SU is reachable from \p TargetSU.
    bool IsReachable(const SUnit *SU, const SUnit *TargetSU);

    /// Returns true if addPred(TargetSU, SU) creates a cycle.
    bool WillCreateCycle(SUnit *TargetSU, SUnit *SU);

    /// Updates the topological ordering to accommodate an edge to be
    /// added from SUnit \p X to SUnit \p Y.
    void AddPred(SUnit *Y, SUnit *X);

    /// Queues an update to the topological ordering to accommodate an edge to
    /// be added from SUnit \p X to SUnit \p Y.
    void AddPredQueued(SUnit *Y, SUnit *X);

    /// Updates the topological ordering to accommodate an an edge to be
    /// removed from the specified node \p N from the predecessors of the
    /// current node \p M.
    void RemovePred(SUnit *M, SUnit *N);

    /// Mark the ordering as temporarily broken, after a new node has been
    /// added.
    void MarkDirty() { Dirty = true; }

    typedef std::vector<int>::iterator iterator;
    typedef std::vector<int>::const_iterator const_iterator;
    iterator begin() { return Index2Node.begin(); }
    const_iterator begin() const { return Index2Node.begin(); }
    iterator end() { return Index2Node.end(); }
    const_iterator end() const { return Index2Node.end(); }

    typedef std::vector<int>::reverse_iterator reverse_iterator;
    typedef std::vector<int>::const_reverse_iterator const_reverse_iterator;
    reverse_iterator rbegin() { return Index2Node.rbegin(); }
    const_reverse_iterator rbegin() const { return Index2Node.rbegin(); }
    reverse_iterator rend() { return Index2Node.rend(); }
    const_reverse_iterator rend() const { return Index2Node.rend(); }
  };

} // end namespace llvm

#endif // LLVM_CODEGEN_SCHEDULEDAG_H
PKiwFZ�%���CodeGen/MIRPrinter.hnu�[���//===- MIRPrinter.h - MIR serialization format printer ----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the functions that print out the LLVM IR and the machine
// functions using the MIR serialization format.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_MIRPRINTER_H
#define LLVM_CODEGEN_MIRPRINTER_H

namespace llvm {

class MachineBasicBlock;
class MachineFunction;
class Module;
class raw_ostream;
template <typename T> class SmallVectorImpl;

/// Print LLVM IR using the MIR serialization format to the given output stream.
void printMIR(raw_ostream &OS, const Module &M);

/// Print a machine function using the MIR serialization format to the given
/// output stream.
void printMIR(raw_ostream &OS, const MachineFunction &MF);

/// Determine a possible list of successors of a basic block based on the
/// basic block machine operand being used inside the block. This should give
/// you the correct list of successor blocks in most cases except for things
/// like jump tables where the basic block references can't easily be found.
/// The MIRPRinter will skip printing successors if they match the result of
/// this function and the parser will use this function to construct a list if
/// it is missing.
void guessSuccessors(const MachineBasicBlock &MBB,
                     SmallVectorImpl<MachineBasicBlock*> &Result,
                     bool &IsFallthrough);

} // end namespace llvm

#endif
PKiwFZ`%���v�vCodeGen/MIRYamlMapping.hnu�[���//===- MIRYamlMapping.h - Describe mapping between MIR and YAML--*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements the mapping between various MIR data structures and
// their corresponding YAML representation.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_MIRYAMLMAPPING_H
#define LLVM_CODEGEN_MIRYAMLMAPPING_H

#include "llvm/ADT/StringRef.h"
#include "llvm/CodeGen/MachineJumpTableInfo.h"
#include "llvm/CodeGen/TargetFrameLowering.h"
#include "llvm/Support/SMLoc.h"
#include "llvm/Support/YAMLTraits.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cstdint>
#include <optional>
#include <string>
#include <vector>

namespace llvm {
namespace yaml {

/// A wrapper around std::string which contains a source range that's being
/// set during parsing.
struct StringValue {
  std::string Value;
  SMRange SourceRange;

  StringValue() = default;
  StringValue(std::string Value) : Value(std::move(Value)) {}
  StringValue(const char Val[]) : Value(Val) {}

  bool operator==(const StringValue &Other) const {
    return Value == Other.Value;
  }
};

template <> struct ScalarTraits<StringValue> {
  static void output(const StringValue &S, void *, raw_ostream &OS) {
    OS << S.Value;
  }

  static StringRef input(StringRef Scalar, void *Ctx, StringValue &S) {
    S.Value = Scalar.str();
    if (const auto *Node =
            reinterpret_cast<yaml::Input *>(Ctx)->getCurrentNode())
      S.SourceRange = Node->getSourceRange();
    return "";
  }

  static QuotingType mustQuote(StringRef S) { return needsQuotes(S); }
};

struct FlowStringValue : StringValue {
  FlowStringValue() = default;
  FlowStringValue(std::string Value) : StringValue(std::move(Value)) {}
};

template <> struct ScalarTraits<FlowStringValue> {
  static void output(const FlowStringValue &S, void *, raw_ostream &OS) {
    return ScalarTraits<StringValue>::output(S, nullptr, OS);
  }

  static StringRef input(StringRef Scalar, void *Ctx, FlowStringValue &S) {
    return ScalarTraits<StringValue>::input(Scalar, Ctx, S);
  }

  static QuotingType mustQuote(StringRef S) { return needsQuotes(S); }
};

struct BlockStringValue {
  StringValue Value;

  bool operator==(const BlockStringValue &Other) const {
    return Value == Other.Value;
  }
};

template <> struct BlockScalarTraits<BlockStringValue> {
  static void output(const BlockStringValue &S, void *Ctx, raw_ostream &OS) {
    return ScalarTraits<StringValue>::output(S.Value, Ctx, OS);
  }

  static StringRef input(StringRef Scalar, void *Ctx, BlockStringValue &S) {
    return ScalarTraits<StringValue>::input(Scalar, Ctx, S.Value);
  }
};

/// A wrapper around unsigned which contains a source range that's being set
/// during parsing.
struct UnsignedValue {
  unsigned Value = 0;
  SMRange SourceRange;

  UnsignedValue() = default;
  UnsignedValue(unsigned Value) : Value(Value) {}

  bool operator==(const UnsignedValue &Other) const {
    return Value == Other.Value;
  }
};

template <> struct ScalarTraits<UnsignedValue> {
  static void output(const UnsignedValue &Value, void *Ctx, raw_ostream &OS) {
    return ScalarTraits<unsigned>::output(Value.Value, Ctx, OS);
  }

  static StringRef input(StringRef Scalar, void *Ctx, UnsignedValue &Value) {
    if (const auto *Node =
            reinterpret_cast<yaml::Input *>(Ctx)->getCurrentNode())
      Value.SourceRange = Node->getSourceRange();
    return ScalarTraits<unsigned>::input(Scalar, Ctx, Value.Value);
  }

  static QuotingType mustQuote(StringRef Scalar) {
    return ScalarTraits<unsigned>::mustQuote(Scalar);
  }
};

template <> struct ScalarEnumerationTraits<MachineJumpTableInfo::JTEntryKind> {
  static void enumeration(yaml::IO &IO,
                          MachineJumpTableInfo::JTEntryKind &EntryKind) {
    IO.enumCase(EntryKind, "block-address",
                MachineJumpTableInfo::EK_BlockAddress);
    IO.enumCase(EntryKind, "gp-rel64-block-address",
                MachineJumpTableInfo::EK_GPRel64BlockAddress);
    IO.enumCase(EntryKind, "gp-rel32-block-address",
                MachineJumpTableInfo::EK_GPRel32BlockAddress);
    IO.enumCase(EntryKind, "label-difference32",
                MachineJumpTableInfo::EK_LabelDifference32);
    IO.enumCase(EntryKind, "inline", MachineJumpTableInfo::EK_Inline);
    IO.enumCase(EntryKind, "custom32", MachineJumpTableInfo::EK_Custom32);
  }
};

template <> struct ScalarTraits<MaybeAlign> {
  static void output(const MaybeAlign &Alignment, void *,
                     llvm::raw_ostream &out) {
    out << uint64_t(Alignment ? Alignment->value() : 0U);
  }
  static StringRef input(StringRef Scalar, void *, MaybeAlign &Alignment) {
    unsigned long long n;
    if (getAsUnsignedInteger(Scalar, 10, n))
      return "invalid number";
    if (n > 0 && !isPowerOf2_64(n))
      return "must be 0 or a power of two";
    Alignment = MaybeAlign(n);
    return StringRef();
  }
  static QuotingType mustQuote(StringRef) { return QuotingType::None; }
};

template <> struct ScalarTraits<Align> {
  static void output(const Align &Alignment, void *, llvm::raw_ostream &OS) {
    OS << Alignment.value();
  }
  static StringRef input(StringRef Scalar, void *, Align &Alignment) {
    unsigned long long N;
    if (getAsUnsignedInteger(Scalar, 10, N))
      return "invalid number";
    if (!isPowerOf2_64(N))
      return "must be a power of two";
    Alignment = Align(N);
    return StringRef();
  }
  static QuotingType mustQuote(StringRef) { return QuotingType::None; }
};

} // end namespace yaml
} // end namespace llvm

LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::yaml::StringValue)
LLVM_YAML_IS_FLOW_SEQUENCE_VECTOR(llvm::yaml::FlowStringValue)
LLVM_YAML_IS_FLOW_SEQUENCE_VECTOR(llvm::yaml::UnsignedValue)

namespace llvm {
namespace yaml {

struct VirtualRegisterDefinition {
  UnsignedValue ID;
  StringValue Class;
  StringValue PreferredRegister;

  // TODO: Serialize the target specific register hints.

  bool operator==(const VirtualRegisterDefinition &Other) const {
    return ID == Other.ID && Class == Other.Class &&
           PreferredRegister == Other.PreferredRegister;
  }
};

template <> struct MappingTraits<VirtualRegisterDefinition> {
  static void mapping(IO &YamlIO, VirtualRegisterDefinition &Reg) {
    YamlIO.mapRequired("id", Reg.ID);
    YamlIO.mapRequired("class", Reg.Class);
    YamlIO.mapOptional("preferred-register", Reg.PreferredRegister,
                       StringValue()); // Don't print out when it's empty.
  }

  static const bool flow = true;
};

struct MachineFunctionLiveIn {
  StringValue Register;
  StringValue VirtualRegister;

  bool operator==(const MachineFunctionLiveIn &Other) const {
    return Register == Other.Register &&
           VirtualRegister == Other.VirtualRegister;
  }
};

template <> struct MappingTraits<MachineFunctionLiveIn> {
  static void mapping(IO &YamlIO, MachineFunctionLiveIn &LiveIn) {
    YamlIO.mapRequired("reg", LiveIn.Register);
    YamlIO.mapOptional(
        "virtual-reg", LiveIn.VirtualRegister,
        StringValue()); // Don't print the virtual register when it's empty.
  }

  static const bool flow = true;
};

/// Serializable representation of stack object from the MachineFrameInfo class.
///
/// The flags 'isImmutable' and 'isAliased' aren't serialized, as they are
/// determined by the object's type and frame information flags.
/// Dead stack objects aren't serialized.
///
/// The 'isPreallocated' flag is determined by the local offset.
struct MachineStackObject {
  enum ObjectType { DefaultType, SpillSlot, VariableSized };
  UnsignedValue ID;
  StringValue Name;
  // TODO: Serialize unnamed LLVM alloca reference.
  ObjectType Type = DefaultType;
  int64_t Offset = 0;
  uint64_t Size = 0;
  MaybeAlign Alignment = std::nullopt;
  TargetStackID::Value StackID;
  StringValue CalleeSavedRegister;
  bool CalleeSavedRestored = true;
  std::optional<int64_t> LocalOffset;
  StringValue DebugVar;
  StringValue DebugExpr;
  StringValue DebugLoc;

  bool operator==(const MachineStackObject &Other) const {
    return ID == Other.ID && Name == Other.Name && Type == Other.Type &&
           Offset == Other.Offset && Size == Other.Size &&
           Alignment == Other.Alignment &&
           StackID == Other.StackID &&
           CalleeSavedRegister == Other.CalleeSavedRegister &&
           CalleeSavedRestored == Other.CalleeSavedRestored &&
           LocalOffset == Other.LocalOffset && DebugVar == Other.DebugVar &&
           DebugExpr == Other.DebugExpr && DebugLoc == Other.DebugLoc;
  }
};

template <> struct ScalarEnumerationTraits<MachineStackObject::ObjectType> {
  static void enumeration(yaml::IO &IO, MachineStackObject::ObjectType &Type) {
    IO.enumCase(Type, "default", MachineStackObject::DefaultType);
    IO.enumCase(Type, "spill-slot", MachineStackObject::SpillSlot);
    IO.enumCase(Type, "variable-sized", MachineStackObject::VariableSized);
  }
};

template <> struct MappingTraits<MachineStackObject> {
  static void mapping(yaml::IO &YamlIO, MachineStackObject &Object) {
    YamlIO.mapRequired("id", Object.ID);
    YamlIO.mapOptional("name", Object.Name,
                       StringValue()); // Don't print out an empty name.
    YamlIO.mapOptional(
        "type", Object.Type,
        MachineStackObject::DefaultType); // Don't print the default type.
    YamlIO.mapOptional("offset", Object.Offset, (int64_t)0);
    if (Object.Type != MachineStackObject::VariableSized)
      YamlIO.mapRequired("size", Object.Size);
    YamlIO.mapOptional("alignment", Object.Alignment, std::nullopt);
    YamlIO.mapOptional("stack-id", Object.StackID, TargetStackID::Default);
    YamlIO.mapOptional("callee-saved-register", Object.CalleeSavedRegister,
                       StringValue()); // Don't print it out when it's empty.
    YamlIO.mapOptional("callee-saved-restored", Object.CalleeSavedRestored,
                       true);
    YamlIO.mapOptional("local-offset", Object.LocalOffset,
                       std::optional<int64_t>());
    YamlIO.mapOptional("debug-info-variable", Object.DebugVar,
                       StringValue()); // Don't print it out when it's empty.
    YamlIO.mapOptional("debug-info-expression", Object.DebugExpr,
                       StringValue()); // Don't print it out when it's empty.
    YamlIO.mapOptional("debug-info-location", Object.DebugLoc,
                       StringValue()); // Don't print it out when it's empty.
  }

  static const bool flow = true;
};

/// Serializable representation of the MCRegister variant of
/// MachineFunction::VariableDbgInfo.
struct EntryValueObject {
  StringValue EntryValueRegister;
  StringValue DebugVar;
  StringValue DebugExpr;
  StringValue DebugLoc;
  bool operator==(const EntryValueObject &Other) const {
    return EntryValueRegister == Other.EntryValueRegister &&
           DebugVar == Other.DebugVar && DebugExpr == Other.DebugExpr &&
           DebugLoc == Other.DebugLoc;
  }
};

template <> struct MappingTraits<EntryValueObject> {
  static void mapping(yaml::IO &YamlIO, EntryValueObject &Object) {
    YamlIO.mapRequired("entry-value-register", Object.EntryValueRegister);
    YamlIO.mapRequired("debug-info-variable", Object.DebugVar);
    YamlIO.mapRequired("debug-info-expression", Object.DebugExpr);
    YamlIO.mapRequired("debug-info-location", Object.DebugLoc);
  }
  static const bool flow = true;
};

/// Serializable representation of the fixed stack object from the
/// MachineFrameInfo class.
struct FixedMachineStackObject {
  enum ObjectType { DefaultType, SpillSlot };
  UnsignedValue ID;
  ObjectType Type = DefaultType;
  int64_t Offset = 0;
  uint64_t Size = 0;
  MaybeAlign Alignment = std::nullopt;
  TargetStackID::Value StackID;
  bool IsImmutable = false;
  bool IsAliased = false;
  StringValue CalleeSavedRegister;
  bool CalleeSavedRestored = true;
  StringValue DebugVar;
  StringValue DebugExpr;
  StringValue DebugLoc;

  bool operator==(const FixedMachineStackObject &Other) const {
    return ID == Other.ID && Type == Other.Type && Offset == Other.Offset &&
           Size == Other.Size && Alignment == Other.Alignment &&
           StackID == Other.StackID &&
           IsImmutable == Other.IsImmutable && IsAliased == Other.IsAliased &&
           CalleeSavedRegister == Other.CalleeSavedRegister &&
           CalleeSavedRestored == Other.CalleeSavedRestored &&
           DebugVar == Other.DebugVar && DebugExpr == Other.DebugExpr
           && DebugLoc == Other.DebugLoc;
  }
};

template <>
struct ScalarEnumerationTraits<FixedMachineStackObject::ObjectType> {
  static void enumeration(yaml::IO &IO,
                          FixedMachineStackObject::ObjectType &Type) {
    IO.enumCase(Type, "default", FixedMachineStackObject::DefaultType);
    IO.enumCase(Type, "spill-slot", FixedMachineStackObject::SpillSlot);
  }
};

template <>
struct ScalarEnumerationTraits<TargetStackID::Value> {
  static void enumeration(yaml::IO &IO, TargetStackID::Value &ID) {
    IO.enumCase(ID, "default", TargetStackID::Default);
    IO.enumCase(ID, "sgpr-spill", TargetStackID::SGPRSpill);
    IO.enumCase(ID, "scalable-vector", TargetStackID::ScalableVector);
    IO.enumCase(ID, "wasm-local", TargetStackID::WasmLocal);
    IO.enumCase(ID, "noalloc", TargetStackID::NoAlloc);
  }
};

template <> struct MappingTraits<FixedMachineStackObject> {
  static void mapping(yaml::IO &YamlIO, FixedMachineStackObject &Object) {
    YamlIO.mapRequired("id", Object.ID);
    YamlIO.mapOptional(
        "type", Object.Type,
        FixedMachineStackObject::DefaultType); // Don't print the default type.
    YamlIO.mapOptional("offset", Object.Offset, (int64_t)0);
    YamlIO.mapOptional("size", Object.Size, (uint64_t)0);
    YamlIO.mapOptional("alignment", Object.Alignment, std::nullopt);
    YamlIO.mapOptional("stack-id", Object.StackID, TargetStackID::Default);
    if (Object.Type != FixedMachineStackObject::SpillSlot) {
      YamlIO.mapOptional("isImmutable", Object.IsImmutable, false);
      YamlIO.mapOptional("isAliased", Object.IsAliased, false);
    }
    YamlIO.mapOptional("callee-saved-register", Object.CalleeSavedRegister,
                       StringValue()); // Don't print it out when it's empty.
    YamlIO.mapOptional("callee-saved-restored", Object.CalleeSavedRestored,
                     true);
    YamlIO.mapOptional("debug-info-variable", Object.DebugVar,
                       StringValue()); // Don't print it out when it's empty.
    YamlIO.mapOptional("debug-info-expression", Object.DebugExpr,
                       StringValue()); // Don't print it out when it's empty.
    YamlIO.mapOptional("debug-info-location", Object.DebugLoc,
                       StringValue()); // Don't print it out when it's empty.
  }

  static const bool flow = true;
};

/// A serializaable representation of a reference to a stack object or fixed
/// stack object.
struct FrameIndex {
  // The frame index as printed. This is always a positive number, even for
  // fixed objects. To obtain the real index,
  // MachineFrameInfo::getObjectIndexBegin has to be added.
  int FI;
  bool IsFixed;
  SMRange SourceRange;

  FrameIndex() = default;
  FrameIndex(int FI, const llvm::MachineFrameInfo &MFI);

  Expected<int> getFI(const llvm::MachineFrameInfo &MFI) const;
};

template <> struct ScalarTraits<FrameIndex> {
  static void output(const FrameIndex &FI, void *, raw_ostream &OS) {
    MachineOperand::printStackObjectReference(OS, FI.FI, FI.IsFixed, "");
  }

  static StringRef input(StringRef Scalar, void *Ctx, FrameIndex &FI) {
    FI.IsFixed = false;
    StringRef Num;
    if (Scalar.startswith("%stack.")) {
      Num = Scalar.substr(7);
    } else if (Scalar.startswith("%fixed-stack.")) {
      Num = Scalar.substr(13);
      FI.IsFixed = true;
    } else {
      return "Invalid frame index, needs to start with %stack. or "
             "%fixed-stack.";
    }
    if (Num.consumeInteger(10, FI.FI))
      return "Invalid frame index, not a valid number";

    if (const auto *Node =
            reinterpret_cast<yaml::Input *>(Ctx)->getCurrentNode())
      FI.SourceRange = Node->getSourceRange();
    return StringRef();
  }

  static QuotingType mustQuote(StringRef S) { return needsQuotes(S); }
};

/// Serializable representation of CallSiteInfo.
struct CallSiteInfo {
  // Representation of call argument and register which is used to
  // transfer it.
  struct ArgRegPair {
    StringValue Reg;
    uint16_t ArgNo;

    bool operator==(const ArgRegPair &Other) const {
      return Reg == Other.Reg && ArgNo == Other.ArgNo;
    }
  };

  /// Identifies call instruction location in machine function.
  struct MachineInstrLoc {
    unsigned BlockNum;
    unsigned Offset;

    bool operator==(const MachineInstrLoc &Other) const {
      return BlockNum == Other.BlockNum && Offset == Other.Offset;
    }
  };

  MachineInstrLoc CallLocation;
  std::vector<ArgRegPair> ArgForwardingRegs;

  bool operator==(const CallSiteInfo &Other) const {
    return CallLocation.BlockNum == Other.CallLocation.BlockNum &&
           CallLocation.Offset == Other.CallLocation.Offset;
  }
};

template <> struct MappingTraits<CallSiteInfo::ArgRegPair> {
  static void mapping(IO &YamlIO, CallSiteInfo::ArgRegPair &ArgReg) {
    YamlIO.mapRequired("arg", ArgReg.ArgNo);
    YamlIO.mapRequired("reg", ArgReg.Reg);
  }

  static const bool flow = true;
};
}
}

LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::yaml::CallSiteInfo::ArgRegPair)

namespace llvm {
namespace yaml {

template <> struct MappingTraits<CallSiteInfo> {
  static void mapping(IO &YamlIO, CallSiteInfo &CSInfo) {
    YamlIO.mapRequired("bb", CSInfo.CallLocation.BlockNum);
    YamlIO.mapRequired("offset", CSInfo.CallLocation.Offset);
    YamlIO.mapOptional("fwdArgRegs", CSInfo.ArgForwardingRegs,
                       std::vector<CallSiteInfo::ArgRegPair>());
  }

  static const bool flow = true;
};

/// Serializable representation of debug value substitutions.
struct DebugValueSubstitution {
  unsigned SrcInst;
  unsigned SrcOp;
  unsigned DstInst;
  unsigned DstOp;
  unsigned Subreg;

  bool operator==(const DebugValueSubstitution &Other) const {
    return std::tie(SrcInst, SrcOp, DstInst, DstOp) ==
           std::tie(Other.SrcInst, Other.SrcOp, Other.DstInst, Other.DstOp);
  }
};

template <> struct MappingTraits<DebugValueSubstitution> {
  static void mapping(IO &YamlIO, DebugValueSubstitution &Sub) {
    YamlIO.mapRequired("srcinst", Sub.SrcInst);
    YamlIO.mapRequired("srcop", Sub.SrcOp);
    YamlIO.mapRequired("dstinst", Sub.DstInst);
    YamlIO.mapRequired("dstop", Sub.DstOp);
    YamlIO.mapRequired("subreg", Sub.Subreg);
  }

  static const bool flow = true;
};
} // namespace yaml
} // namespace llvm

LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::yaml::DebugValueSubstitution)

namespace llvm {
namespace yaml {
struct MachineConstantPoolValue {
  UnsignedValue ID;
  StringValue Value;
  MaybeAlign Alignment = std::nullopt;
  bool IsTargetSpecific = false;

  bool operator==(const MachineConstantPoolValue &Other) const {
    return ID == Other.ID && Value == Other.Value &&
           Alignment == Other.Alignment &&
           IsTargetSpecific == Other.IsTargetSpecific;
  }
};

template <> struct MappingTraits<MachineConstantPoolValue> {
  static void mapping(IO &YamlIO, MachineConstantPoolValue &Constant) {
    YamlIO.mapRequired("id", Constant.ID);
    YamlIO.mapOptional("value", Constant.Value, StringValue());
    YamlIO.mapOptional("alignment", Constant.Alignment, std::nullopt);
    YamlIO.mapOptional("isTargetSpecific", Constant.IsTargetSpecific, false);
  }
};

struct MachineJumpTable {
  struct Entry {
    UnsignedValue ID;
    std::vector<FlowStringValue> Blocks;

    bool operator==(const Entry &Other) const {
      return ID == Other.ID && Blocks == Other.Blocks;
    }
  };

  MachineJumpTableInfo::JTEntryKind Kind = MachineJumpTableInfo::EK_Custom32;
  std::vector<Entry> Entries;

  bool operator==(const MachineJumpTable &Other) const {
    return Kind == Other.Kind && Entries == Other.Entries;
  }
};

template <> struct MappingTraits<MachineJumpTable::Entry> {
  static void mapping(IO &YamlIO, MachineJumpTable::Entry &Entry) {
    YamlIO.mapRequired("id", Entry.ID);
    YamlIO.mapOptional("blocks", Entry.Blocks, std::vector<FlowStringValue>());
  }
};

} // end namespace yaml
} // end namespace llvm

LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::yaml::MachineFunctionLiveIn)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::yaml::VirtualRegisterDefinition)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::yaml::MachineStackObject)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::yaml::EntryValueObject)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::yaml::FixedMachineStackObject)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::yaml::CallSiteInfo)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::yaml::MachineConstantPoolValue)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::yaml::MachineJumpTable::Entry)

namespace llvm {
namespace yaml {

template <> struct MappingTraits<MachineJumpTable> {
  static void mapping(IO &YamlIO, MachineJumpTable &JT) {
    YamlIO.mapRequired("kind", JT.Kind);
    YamlIO.mapOptional("entries", JT.Entries,
                       std::vector<MachineJumpTable::Entry>());
  }
};

/// Serializable representation of MachineFrameInfo.
///
/// Doesn't serialize attributes like 'StackAlignment', 'IsStackRealignable' and
/// 'RealignOption' as they are determined by the target and LLVM function
/// attributes.
/// It also doesn't serialize attributes like 'NumFixedObject' and
/// 'HasVarSizedObjects' as they are determined by the frame objects themselves.
struct MachineFrameInfo {
  bool IsFrameAddressTaken = false;
  bool IsReturnAddressTaken = false;
  bool HasStackMap = false;
  bool HasPatchPoint = false;
  uint64_t StackSize = 0;
  int OffsetAdjustment = 0;
  unsigned MaxAlignment = 0;
  bool AdjustsStack = false;
  bool HasCalls = false;
  StringValue StackProtector;
  StringValue FunctionContext;
  unsigned MaxCallFrameSize = ~0u; ///< ~0u means: not computed yet.
  unsigned CVBytesOfCalleeSavedRegisters = 0;
  bool HasOpaqueSPAdjustment = false;
  bool HasVAStart = false;
  bool HasMustTailInVarArgFunc = false;
  bool HasTailCall = false;
  unsigned LocalFrameSize = 0;
  StringValue SavePoint;
  StringValue RestorePoint;

  bool operator==(const MachineFrameInfo &Other) const {
    return IsFrameAddressTaken == Other.IsFrameAddressTaken &&
           IsReturnAddressTaken == Other.IsReturnAddressTaken &&
           HasStackMap == Other.HasStackMap &&
           HasPatchPoint == Other.HasPatchPoint &&
           StackSize == Other.StackSize &&
           OffsetAdjustment == Other.OffsetAdjustment &&
           MaxAlignment == Other.MaxAlignment &&
           AdjustsStack == Other.AdjustsStack && HasCalls == Other.HasCalls &&
           StackProtector == Other.StackProtector &&
           FunctionContext == Other.FunctionContext &&
           MaxCallFrameSize == Other.MaxCallFrameSize &&
           CVBytesOfCalleeSavedRegisters ==
               Other.CVBytesOfCalleeSavedRegisters &&
           HasOpaqueSPAdjustment == Other.HasOpaqueSPAdjustment &&
           HasVAStart == Other.HasVAStart &&
           HasMustTailInVarArgFunc == Other.HasMustTailInVarArgFunc &&
           HasTailCall == Other.HasTailCall &&
           LocalFrameSize == Other.LocalFrameSize &&
           SavePoint == Other.SavePoint && RestorePoint == Other.RestorePoint;
  }
};

template <> struct MappingTraits<MachineFrameInfo> {
  static void mapping(IO &YamlIO, MachineFrameInfo &MFI) {
    YamlIO.mapOptional("isFrameAddressTaken", MFI.IsFrameAddressTaken, false);
    YamlIO.mapOptional("isReturnAddressTaken", MFI.IsReturnAddressTaken, false);
    YamlIO.mapOptional("hasStackMap", MFI.HasStackMap, false);
    YamlIO.mapOptional("hasPatchPoint", MFI.HasPatchPoint, false);
    YamlIO.mapOptional("stackSize", MFI.StackSize, (uint64_t)0);
    YamlIO.mapOptional("offsetAdjustment", MFI.OffsetAdjustment, (int)0);
    YamlIO.mapOptional("maxAlignment", MFI.MaxAlignment, (unsigned)0);
    YamlIO.mapOptional("adjustsStack", MFI.AdjustsStack, false);
    YamlIO.mapOptional("hasCalls", MFI.HasCalls, false);
    YamlIO.mapOptional("stackProtector", MFI.StackProtector,
                       StringValue()); // Don't print it out when it's empty.
    YamlIO.mapOptional("functionContext", MFI.FunctionContext,
                       StringValue()); // Don't print it out when it's empty.
    YamlIO.mapOptional("maxCallFrameSize", MFI.MaxCallFrameSize, (unsigned)~0);
    YamlIO.mapOptional("cvBytesOfCalleeSavedRegisters",
                       MFI.CVBytesOfCalleeSavedRegisters, 0U);
    YamlIO.mapOptional("hasOpaqueSPAdjustment", MFI.HasOpaqueSPAdjustment,
                       false);
    YamlIO.mapOptional("hasVAStart", MFI.HasVAStart, false);
    YamlIO.mapOptional("hasMustTailInVarArgFunc", MFI.HasMustTailInVarArgFunc,
                       false);
    YamlIO.mapOptional("hasTailCall", MFI.HasTailCall, false);
    YamlIO.mapOptional("localFrameSize", MFI.LocalFrameSize, (unsigned)0);
    YamlIO.mapOptional("savePoint", MFI.SavePoint,
                       StringValue()); // Don't print it out when it's empty.
    YamlIO.mapOptional("restorePoint", MFI.RestorePoint,
                       StringValue()); // Don't print it out when it's empty.
  }
};

/// Targets should override this in a way that mirrors the implementation of
/// llvm::MachineFunctionInfo.
struct MachineFunctionInfo {
  virtual ~MachineFunctionInfo() = default;
  virtual void mappingImpl(IO &YamlIO) {}
};

template <> struct MappingTraits<std::unique_ptr<MachineFunctionInfo>> {
  static void mapping(IO &YamlIO, std::unique_ptr<MachineFunctionInfo> &MFI) {
    if (MFI)
      MFI->mappingImpl(YamlIO);
  }
};

struct MachineFunction {
  StringRef Name;
  MaybeAlign Alignment = std::nullopt;
  bool ExposesReturnsTwice = false;
  // GISel MachineFunctionProperties.
  bool Legalized = false;
  bool RegBankSelected = false;
  bool Selected = false;
  bool FailedISel = false;
  // Register information
  bool TracksRegLiveness = false;
  bool HasWinCFI = false;

  bool CallsEHReturn = false;
  bool CallsUnwindInit = false;
  bool HasEHCatchret = false;
  bool HasEHScopes = false;
  bool HasEHFunclets = false;
  bool IsOutlined = false;

  bool FailsVerification = false;
  bool TracksDebugUserValues = false;
  bool UseDebugInstrRef = false;
  std::vector<VirtualRegisterDefinition> VirtualRegisters;
  std::vector<MachineFunctionLiveIn> LiveIns;
  std::optional<std::vector<FlowStringValue>> CalleeSavedRegisters;
  // TODO: Serialize the various register masks.
  // Frame information
  MachineFrameInfo FrameInfo;
  std::vector<FixedMachineStackObject> FixedStackObjects;
  std::vector<EntryValueObject> EntryValueObjects;
  std::vector<MachineStackObject> StackObjects;
  std::vector<MachineConstantPoolValue> Constants; /// Constant pool.
  std::unique_ptr<MachineFunctionInfo> MachineFuncInfo;
  std::vector<CallSiteInfo> CallSitesInfo;
  std::vector<DebugValueSubstitution> DebugValueSubstitutions;
  MachineJumpTable JumpTableInfo;
  std::vector<StringValue> MachineMetadataNodes;
  BlockStringValue Body;
};

template <> struct MappingTraits<MachineFunction> {
  static void mapping(IO &YamlIO, MachineFunction &MF) {
    YamlIO.mapRequired("name", MF.Name);
    YamlIO.mapOptional("alignment", MF.Alignment, std::nullopt);
    YamlIO.mapOptional("exposesReturnsTwice", MF.ExposesReturnsTwice, false);
    YamlIO.mapOptional("legalized", MF.Legalized, false);
    YamlIO.mapOptional("regBankSelected", MF.RegBankSelected, false);
    YamlIO.mapOptional("selected", MF.Selected, false);
    YamlIO.mapOptional("failedISel", MF.FailedISel, false);
    YamlIO.mapOptional("tracksRegLiveness", MF.TracksRegLiveness, false);
    YamlIO.mapOptional("hasWinCFI", MF.HasWinCFI, false);

    YamlIO.mapOptional("callsEHReturn", MF.CallsEHReturn, false);
    YamlIO.mapOptional("callsUnwindInit", MF.CallsUnwindInit, false);
    YamlIO.mapOptional("hasEHCatchret", MF.HasEHCatchret, false);
    YamlIO.mapOptional("hasEHScopes", MF.HasEHScopes, false);
    YamlIO.mapOptional("hasEHFunclets", MF.HasEHFunclets, false);
    YamlIO.mapOptional("isOutlined", MF.IsOutlined, false);
    YamlIO.mapOptional("debugInstrRef", MF.UseDebugInstrRef, false);

    YamlIO.mapOptional("failsVerification", MF.FailsVerification, false);
    YamlIO.mapOptional("tracksDebugUserValues", MF.TracksDebugUserValues,
                       false);
    YamlIO.mapOptional("registers", MF.VirtualRegisters,
                       std::vector<VirtualRegisterDefinition>());
    YamlIO.mapOptional("liveins", MF.LiveIns,
                       std::vector<MachineFunctionLiveIn>());
    YamlIO.mapOptional("calleeSavedRegisters", MF.CalleeSavedRegisters,
                       std::optional<std::vector<FlowStringValue>>());
    YamlIO.mapOptional("frameInfo", MF.FrameInfo, MachineFrameInfo());
    YamlIO.mapOptional("fixedStack", MF.FixedStackObjects,
                       std::vector<FixedMachineStackObject>());
    YamlIO.mapOptional("stack", MF.StackObjects,
                       std::vector<MachineStackObject>());
    YamlIO.mapOptional("entry_values", MF.EntryValueObjects,
                       std::vector<EntryValueObject>());
    YamlIO.mapOptional("callSites", MF.CallSitesInfo,
                       std::vector<CallSiteInfo>());
    YamlIO.mapOptional("debugValueSubstitutions", MF.DebugValueSubstitutions,
                       std::vector<DebugValueSubstitution>());
    YamlIO.mapOptional("constants", MF.Constants,
                       std::vector<MachineConstantPoolValue>());
    YamlIO.mapOptional("machineFunctionInfo", MF.MachineFuncInfo);
    if (!YamlIO.outputting() || !MF.JumpTableInfo.Entries.empty())
      YamlIO.mapOptional("jumpTable", MF.JumpTableInfo, MachineJumpTable());
    if (!YamlIO.outputting() || !MF.MachineMetadataNodes.empty())
      YamlIO.mapOptional("machineMetadataNodes", MF.MachineMetadataNodes,
                         std::vector<StringValue>());
    YamlIO.mapOptional("body", MF.Body, BlockStringValue());
  }
};

} // end namespace yaml
} // end namespace llvm

#endif // LLVM_CODEGEN_MIRYAMLMAPPING_H
PKiwFZ]�X���"CodeGen/ScheduleHazardRecognizer.hnu�[���//=- llvm/CodeGen/ScheduleHazardRecognizer.h - Scheduling Support -*- C++ -*-=//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements the ScheduleHazardRecognizer class, which implements
// hazard-avoidance heuristics for scheduling.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_SCHEDULEHAZARDRECOGNIZER_H
#define LLVM_CODEGEN_SCHEDULEHAZARDRECOGNIZER_H

namespace llvm {

class MachineInstr;
class SUnit;

/// HazardRecognizer - This determines whether or not an instruction can be
/// issued this cycle, and whether or not a noop needs to be inserted to handle
/// the hazard.
class ScheduleHazardRecognizer {
protected:
  /// MaxLookAhead - Indicate the number of cycles in the scoreboard
  /// state. Important to restore the state after backtracking. Additionally,
  /// MaxLookAhead=0 identifies a fake recognizer, allowing the client to
  /// bypass virtual calls. Currently the PostRA scheduler ignores it.
  unsigned MaxLookAhead = 0;

public:
  ScheduleHazardRecognizer() = default;
  virtual ~ScheduleHazardRecognizer();

  enum HazardType {
    NoHazard,      // This instruction can be emitted at this cycle.
    Hazard,        // This instruction can't be emitted at this cycle.
    NoopHazard     // This instruction can't be emitted, and needs noops.
  };

  unsigned getMaxLookAhead() const { return MaxLookAhead; }

  bool isEnabled() const { return MaxLookAhead != 0; }

  /// atIssueLimit - Return true if no more instructions may be issued in this
  /// cycle.
  ///
  /// FIXME: remove this once MachineScheduler is the only client.
  virtual bool atIssueLimit() const { return false; }

  /// getHazardType - Return the hazard type of emitting this node.  There are
  /// three possible results.  Either:
  ///  * NoHazard: it is legal to issue this instruction on this cycle.
  ///  * Hazard: issuing this instruction would stall the machine.  If some
  ///     other instruction is available, issue it first.
  ///  * NoopHazard: issuing this instruction would break the program.  If
  ///     some other instruction can be issued, do so, otherwise issue a noop.
  virtual HazardType getHazardType(SUnit *, int Stalls = 0) {
    return NoHazard;
  }

  /// Reset - This callback is invoked when a new block of
  /// instructions is about to be schedule. The hazard state should be
  /// set to an initialized state.
  virtual void Reset() {}

  /// EmitInstruction - This callback is invoked when an instruction is
  /// emitted, to advance the hazard state.
  virtual void EmitInstruction(SUnit *) {}

  /// This overload will be used when the hazard recognizer is being used
  /// by a non-scheduling pass, which does not use SUnits.
  virtual void EmitInstruction(MachineInstr *) {}

  /// PreEmitNoops - This callback is invoked prior to emitting an instruction.
  /// It should return the number of noops to emit prior to the provided
  /// instruction.
  /// Note: This is only used during PostRA scheduling. EmitNoop is not called
  /// for these noops.
  virtual unsigned PreEmitNoops(SUnit *) {
    return 0;
  }

  /// This overload will be used when the hazard recognizer is being used
  /// by a non-scheduling pass, which does not use SUnits.
  virtual unsigned PreEmitNoops(MachineInstr *) {
    return 0;
  }

  /// ShouldPreferAnother - This callback may be invoked if getHazardType
  /// returns NoHazard. If, even though there is no hazard, it would be better to
  /// schedule another available instruction, this callback should return true.
  virtual bool ShouldPreferAnother(SUnit *) {
    return false;
  }

  /// AdvanceCycle - This callback is invoked whenever the next top-down
  /// instruction to be scheduled cannot issue in the current cycle, either
  /// because of latency or resource conflicts.  This should increment the
  /// internal state of the hazard recognizer so that previously "Hazard"
  /// instructions will now not be hazards.
  virtual void AdvanceCycle() {}

  /// RecedeCycle - This callback is invoked whenever the next bottom-up
  /// instruction to be scheduled cannot issue in the current cycle, either
  /// because of latency or resource conflicts.
  virtual void RecedeCycle() {}

  /// EmitNoop - This callback is invoked when a noop was added to the
  /// instruction stream.
  virtual void EmitNoop() {
    // Default implementation: count it as a cycle.
    AdvanceCycle();
  }

  /// EmitNoops - This callback is invoked when noops were added to the
  /// instruction stream.
  virtual void EmitNoops(unsigned Quantity) {
    // Default implementation: count it as a cycle.
    for (unsigned i = 0; i < Quantity; ++i)
      EmitNoop();
  }
};

} // end namespace llvm

#endif // LLVM_CODEGEN_SCHEDULEHAZARDRECOGNIZER_H
PKiwFZ��%%CodeGen/VirtRegMap.hnu�[���//===- llvm/CodeGen/VirtRegMap.h - Virtual Register Map ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements a virtual register map. This maps virtual registers to
// physical registers and virtual registers to stack slots. It is created and
// updated by a register allocator and then used by a machine code rewriter that
// adds spill code and rewrites virtual into physical register references.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_VIRTREGMAP_H
#define LLVM_CODEGEN_VIRTREGMAP_H

#include "llvm/ADT/IndexedMap.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
#include "llvm/CodeGen/TileShapeInfo.h"
#include "llvm/Pass.h"
#include <cassert>

namespace llvm {

class MachineFunction;
class MachineRegisterInfo;
class raw_ostream;
class TargetInstrInfo;

  class VirtRegMap : public MachineFunctionPass {
  public:
    enum {
      NO_PHYS_REG = 0,
      NO_STACK_SLOT = (1L << 30)-1,
      MAX_STACK_SLOT = (1L << 18)-1
    };

  private:
    MachineRegisterInfo *MRI = nullptr;
    const TargetInstrInfo *TII = nullptr;
    const TargetRegisterInfo *TRI = nullptr;
    MachineFunction *MF = nullptr;

    /// Virt2PhysMap - This is a virtual to physical register
    /// mapping. Each virtual register is required to have an entry in
    /// it; even spilled virtual registers (the register mapped to a
    /// spilled register is the temporary used to load it from the
    /// stack).
    IndexedMap<Register, VirtReg2IndexFunctor> Virt2PhysMap;

    /// Virt2StackSlotMap - This is virtual register to stack slot
    /// mapping. Each spilled virtual register has an entry in it
    /// which corresponds to the stack slot this register is spilled
    /// at.
    IndexedMap<int, VirtReg2IndexFunctor> Virt2StackSlotMap;

    /// Virt2SplitMap - This is virtual register to splitted virtual register
    /// mapping.
    IndexedMap<unsigned, VirtReg2IndexFunctor> Virt2SplitMap;

    /// Virt2ShapeMap - For X86 AMX register whose register is bound shape
    /// information.
    DenseMap<unsigned, ShapeT> Virt2ShapeMap;

    /// createSpillSlot - Allocate a spill slot for RC from MFI.
    unsigned createSpillSlot(const TargetRegisterClass *RC);

  public:
    static char ID;

    VirtRegMap()
        : MachineFunctionPass(ID), Virt2PhysMap(NO_PHYS_REG),
          Virt2StackSlotMap(NO_STACK_SLOT), Virt2SplitMap(0) {}
    VirtRegMap(const VirtRegMap &) = delete;
    VirtRegMap &operator=(const VirtRegMap &) = delete;

    bool runOnMachineFunction(MachineFunction &MF) override;

    void getAnalysisUsage(AnalysisUsage &AU) const override {
      AU.setPreservesAll();
      MachineFunctionPass::getAnalysisUsage(AU);
    }

    MachineFunction &getMachineFunction() const {
      assert(MF && "getMachineFunction called before runOnMachineFunction");
      return *MF;
    }

    MachineRegisterInfo &getRegInfo() const { return *MRI; }
    const TargetRegisterInfo &getTargetRegInfo() const { return *TRI; }

    void grow();

    /// returns true if the specified virtual register is
    /// mapped to a physical register
    bool hasPhys(Register virtReg) const {
      return getPhys(virtReg) != NO_PHYS_REG;
    }

    /// returns the physical register mapped to the specified
    /// virtual register
    MCRegister getPhys(Register virtReg) const {
      assert(virtReg.isVirtual());
      return MCRegister::from(Virt2PhysMap[virtReg.id()]);
    }

    /// creates a mapping for the specified virtual register to
    /// the specified physical register
    void assignVirt2Phys(Register virtReg, MCPhysReg physReg);

    bool isShapeMapEmpty() const { return Virt2ShapeMap.empty(); }

    bool hasShape(Register virtReg) const {
      return getShape(virtReg).isValid();
    }

    ShapeT getShape(Register virtReg) const {
      assert(virtReg.isVirtual());
      return Virt2ShapeMap.lookup(virtReg);
    }

    void assignVirt2Shape(Register virtReg, ShapeT shape) {
      Virt2ShapeMap[virtReg.id()] = shape;
    }

    /// clears the specified virtual register's, physical
    /// register mapping
    void clearVirt(Register virtReg) {
      assert(virtReg.isVirtual());
      assert(Virt2PhysMap[virtReg.id()] != NO_PHYS_REG &&
             "attempt to clear a not assigned virtual register");
      Virt2PhysMap[virtReg.id()] = NO_PHYS_REG;
    }

    /// clears all virtual to physical register mappings
    void clearAllVirt() {
      Virt2PhysMap.clear();
      grow();
    }

    /// returns true if VirtReg is assigned to its preferred physreg.
    bool hasPreferredPhys(Register VirtReg) const;

    /// returns true if VirtReg has a known preferred register.
    /// This returns false if VirtReg has a preference that is a virtual
    /// register that hasn't been assigned yet.
    bool hasKnownPreference(Register VirtReg) const;

    /// records virtReg is a split live interval from SReg.
    void setIsSplitFromReg(Register virtReg, Register SReg) {
      Virt2SplitMap[virtReg.id()] = SReg;
      if (hasShape(SReg)) {
        Virt2ShapeMap[virtReg.id()] = getShape(SReg);
      }
    }

    /// returns the live interval virtReg is split from.
    Register getPreSplitReg(Register virtReg) const {
      return Virt2SplitMap[virtReg.id()];
    }

    /// getOriginal - Return the original virtual register that VirtReg descends
    /// from through splitting.
    /// A register that was not created by splitting is its own original.
    /// This operation is idempotent.
    Register getOriginal(Register VirtReg) const {
      Register Orig = getPreSplitReg(VirtReg);
      return Orig ? Orig : VirtReg;
    }

    /// returns true if the specified virtual register is not
    /// mapped to a stack slot or rematerialized.
    bool isAssignedReg(Register virtReg) const {
      if (getStackSlot(virtReg) == NO_STACK_SLOT)
        return true;
      // Split register can be assigned a physical register as well as a
      // stack slot or remat id.
      return (Virt2SplitMap[virtReg.id()] &&
              Virt2PhysMap[virtReg.id()] != NO_PHYS_REG);
    }

    /// returns the stack slot mapped to the specified virtual
    /// register
    int getStackSlot(Register virtReg) const {
      assert(virtReg.isVirtual());
      return Virt2StackSlotMap[virtReg.id()];
    }

    /// create a mapping for the specifed virtual register to
    /// the next available stack slot
    int assignVirt2StackSlot(Register virtReg);

    /// create a mapping for the specified virtual register to
    /// the specified stack slot
    void assignVirt2StackSlot(Register virtReg, int SS);

    void print(raw_ostream &OS, const Module* M = nullptr) const override;
    void dump() const;
  };

  inline raw_ostream &operator<<(raw_ostream &OS, const VirtRegMap &VRM) {
    VRM.print(OS);
    return OS;
  }

} // end llvm namespace

#endif // LLVM_CODEGEN_VIRTREGMAP_H
PKiwFZr��CodeGen/MachineLoopUtils.hnu�[���//=- MachineLoopUtils.h - Helper functions for manipulating loops -*- C++ -*-=//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_MACHINELOOPUTILS_H
#define LLVM_CODEGEN_MACHINELOOPUTILS_H

namespace llvm {
class MachineBasicBlock;
class MachineRegisterInfo;
class TargetInstrInfo;

enum LoopPeelDirection {
  LPD_Front, ///< Peel the first iteration of the loop.
  LPD_Back   ///< Peel the last iteration of the loop.
};

/// Peels a single block loop. Loop must have two successors, one of which
/// must be itself. Similarly it must have two predecessors, one of which must
/// be itself.
///
/// The loop block is copied and inserted into the CFG such that two copies of
/// the loop follow on from each other. The copy is inserted either before or
/// after the loop based on Direction.
///
/// Phis are updated and an unconditional branch inserted at the end of the
/// clone so as to execute a single iteration.
///
/// The trip count of Loop is not updated.
MachineBasicBlock *PeelSingleBlockLoop(LoopPeelDirection Direction,
                                       MachineBasicBlock *Loop,
                                       MachineRegisterInfo &MRI,
                                       const TargetInstrInfo *TII);

} // namespace llvm

#endif // LLVM_CODEGEN_MACHINELOOPUTILS_H
PKiwFZ,��8�� CodeGen/SelectionDAGTargetInfo.hnu�[���//==- llvm/CodeGen/SelectionDAGTargetInfo.h - SelectionDAG Info --*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the SelectionDAGTargetInfo class, which targets can
// subclass to parameterize the SelectionDAG lowering and instruction
// selection process.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_SELECTIONDAGTARGETINFO_H
#define LLVM_CODEGEN_SELECTIONDAGTARGETINFO_H

#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
#include "llvm/Support/CodeGen.h"
#include <utility>

namespace llvm {

class SelectionDAG;

//===----------------------------------------------------------------------===//
/// Targets can subclass this to parameterize the
/// SelectionDAG lowering and instruction selection process.
///
class SelectionDAGTargetInfo {
public:
  explicit SelectionDAGTargetInfo() = default;
  SelectionDAGTargetInfo(const SelectionDAGTargetInfo &) = delete;
  SelectionDAGTargetInfo &operator=(const SelectionDAGTargetInfo &) = delete;
  virtual ~SelectionDAGTargetInfo();

  /// Emit target-specific code that performs a memcpy.
  /// This can be used by targets to provide code sequences for cases
  /// that don't fit the target's parameters for simple loads/stores and can be
  /// more efficient than using a library call. This function can return a null
  /// SDValue if the target declines to use custom code and a different
  /// lowering strategy should be used.
  ///
  /// If AlwaysInline is true, the size is constant and the target should not
  /// emit any calls and is strongly encouraged to attempt to emit inline code
  /// even if it is beyond the usual threshold because this intrinsic is being
  /// expanded in a place where calls are not feasible (e.g. within the prologue
  /// for another call). If the target chooses to decline an AlwaysInline
  /// request here, legalize will resort to using simple loads and stores.
  virtual SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, const SDLoc &dl,
                                          SDValue Chain, SDValue Op1,
                                          SDValue Op2, SDValue Op3,
                                          Align Alignment, bool isVolatile,
                                          bool AlwaysInline,
                                          MachinePointerInfo DstPtrInfo,
                                          MachinePointerInfo SrcPtrInfo) const {
    return SDValue();
  }

  /// Emit target-specific code that performs a memmove.
  /// This can be used by targets to provide code sequences for cases
  /// that don't fit the target's parameters for simple loads/stores and can be
  /// more efficient than using a library call. This function can return a null
  /// SDValue if the target declines to use custom code and a different
  /// lowering strategy should be used.
  virtual SDValue EmitTargetCodeForMemmove(
      SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Op1,
      SDValue Op2, SDValue Op3, Align Alignment, bool isVolatile,
      MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo) const {
    return SDValue();
  }

  /// Emit target-specific code that performs a memset.
  /// This can be used by targets to provide code sequences for cases
  /// that don't fit the target's parameters for simple stores and can be more
  /// efficient than using a library call. This function can return a null
  /// SDValue if the target declines to use custom code and a different
  /// lowering strategy should be used. Note that if AlwaysInline is true the
  /// function has to return a valid SDValue.
  virtual SDValue EmitTargetCodeForMemset(SelectionDAG &DAG, const SDLoc &dl,
                                          SDValue Chain, SDValue Op1,
                                          SDValue Op2, SDValue Op3,
                                          Align Alignment, bool isVolatile,
                                          bool AlwaysInline,
                                          MachinePointerInfo DstPtrInfo) const {
    return SDValue();
  }

  /// Emit target-specific code that performs a memcmp/bcmp, in cases where that is
  /// faster than a libcall. The first returned SDValue is the result of the
  /// memcmp and the second is the chain. Both SDValues can be null if a normal
  /// libcall should be used.
  virtual std::pair<SDValue, SDValue>
  EmitTargetCodeForMemcmp(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain,
                          SDValue Op1, SDValue Op2, SDValue Op3,
                          MachinePointerInfo Op1PtrInfo,
                          MachinePointerInfo Op2PtrInfo) const {
    return std::make_pair(SDValue(), SDValue());
  }

  /// Emit target-specific code that performs a memchr, in cases where that is
  /// faster than a libcall. The first returned SDValue is the result of the
  /// memchr and the second is the chain. Both SDValues can be null if a normal
  /// libcall should be used.
  virtual std::pair<SDValue, SDValue>
  EmitTargetCodeForMemchr(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain,
                          SDValue Src, SDValue Char, SDValue Length,
                          MachinePointerInfo SrcPtrInfo) const {
    return std::make_pair(SDValue(), SDValue());
  }

  /// Emit target-specific code that performs a strcpy or stpcpy, in cases
  /// where that is faster than a libcall.
  /// The first returned SDValue is the result of the copy (the start
  /// of the destination string for strcpy, a pointer to the null terminator
  /// for stpcpy) and the second is the chain.  Both SDValues can be null
  /// if a normal libcall should be used.
  virtual std::pair<SDValue, SDValue>
  EmitTargetCodeForStrcpy(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain,
                          SDValue Dest, SDValue Src,
                          MachinePointerInfo DestPtrInfo,
                          MachinePointerInfo SrcPtrInfo, bool isStpcpy) const {
    return std::make_pair(SDValue(), SDValue());
  }

  /// Emit target-specific code that performs a strcmp, in cases where that is
  /// faster than a libcall.
  /// The first returned SDValue is the result of the strcmp and the second is
  /// the chain. Both SDValues can be null if a normal libcall should be used.
  virtual std::pair<SDValue, SDValue>
  EmitTargetCodeForStrcmp(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain,
                          SDValue Op1, SDValue Op2,
                          MachinePointerInfo Op1PtrInfo,
                          MachinePointerInfo Op2PtrInfo) const {
    return std::make_pair(SDValue(), SDValue());
  }

  virtual std::pair<SDValue, SDValue>
  EmitTargetCodeForStrlen(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain,
                          SDValue Src, MachinePointerInfo SrcPtrInfo) const {
    return std::make_pair(SDValue(), SDValue());
  }

  virtual std::pair<SDValue, SDValue>
  EmitTargetCodeForStrnlen(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain,
                           SDValue Src, SDValue MaxLength,
                           MachinePointerInfo SrcPtrInfo) const {
    return std::make_pair(SDValue(), SDValue());
  }

  virtual SDValue EmitTargetCodeForSetTag(SelectionDAG &DAG, const SDLoc &dl,
                                          SDValue Chain, SDValue Addr,
                                          SDValue Size,
                                          MachinePointerInfo DstPtrInfo,
                                          bool ZeroData) const {
    return SDValue();
  }

  // Return true if the DAG Combiner should disable generic combines.
  virtual bool disableGenericCombines(CodeGenOpt::Level OptLevel) const {
    return false;
  }
};

} // end namespace llvm

#endif // LLVM_CODEGEN_SELECTIONDAGTARGETINFO_H
PKiwFZ�:ffCodeGen/LiveIntervalUnion.hnu�[���//===- LiveIntervalUnion.h - Live interval union data struct ---*- C++ -*--===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// LiveIntervalUnion is a union of live segments across multiple live virtual
// registers. This may be used during coalescing to represent a congruence
// class, or during register allocation to model liveness of a physical
// register.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_LIVEINTERVALUNION_H
#define LLVM_CODEGEN_LIVEINTERVALUNION_H

#include "llvm/ADT/IntervalMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/LiveInterval.h"
#include "llvm/CodeGen/SlotIndexes.h"
#include <cassert>
#include <limits>

namespace llvm {

class raw_ostream;
class TargetRegisterInfo;

#ifndef NDEBUG
// forward declaration
template <unsigned Element> class SparseBitVector;

using LiveVirtRegBitSet = SparseBitVector<128>;
#endif

/// Union of live intervals that are strong candidates for coalescing into a
/// single register (either physical or virtual depending on the context).  We
/// expect the constituent live intervals to be disjoint, although we may
/// eventually make exceptions to handle value-based interference.
class LiveIntervalUnion {
  // A set of live virtual register segments that supports fast insertion,
  // intersection, and removal.
  // Mapping SlotIndex intervals to virtual register numbers.
  using LiveSegments = IntervalMap<SlotIndex, const LiveInterval *>;

public:
  // SegmentIter can advance to the next segment ordered by starting position
  // which may belong to a different live virtual register. We also must be able
  // to reach the current segment's containing virtual register.
  using SegmentIter = LiveSegments::iterator;

  /// Const version of SegmentIter.
  using ConstSegmentIter = LiveSegments::const_iterator;

  // LiveIntervalUnions share an external allocator.
  using Allocator = LiveSegments::Allocator;

private:
  unsigned Tag = 0;       // unique tag for current contents.
  LiveSegments Segments;  // union of virtual reg segments

public:
  explicit LiveIntervalUnion(Allocator &a) : Segments(a) {}

  // Iterate over all segments in the union of live virtual registers ordered
  // by their starting position.
  SegmentIter begin() { return Segments.begin(); }
  SegmentIter end() { return Segments.end(); }
  SegmentIter find(SlotIndex x) { return Segments.find(x); }
  ConstSegmentIter begin() const { return Segments.begin(); }
  ConstSegmentIter end() const { return Segments.end(); }
  ConstSegmentIter find(SlotIndex x) const { return Segments.find(x); }

  bool empty() const { return Segments.empty(); }
  SlotIndex startIndex() const { return Segments.start(); }
  SlotIndex endIndex() const { return Segments.stop(); }

  // Provide public access to the underlying map to allow overlap iteration.
  using Map = LiveSegments;
  const Map &getMap() const { return Segments; }

  /// getTag - Return an opaque tag representing the current state of the union.
  unsigned getTag() const { return Tag; }

  /// changedSince - Return true if the union change since getTag returned tag.
  bool changedSince(unsigned tag) const { return tag != Tag; }

  // Add a live virtual register to this union and merge its segments.
  void unify(const LiveInterval &VirtReg, const LiveRange &Range);

  // Remove a live virtual register's segments from this union.
  void extract(const LiveInterval &VirtReg, const LiveRange &Range);

  // Remove all inserted virtual registers.
  void clear() { Segments.clear(); ++Tag; }

  // Print union, using TRI to translate register names
  void print(raw_ostream &OS, const TargetRegisterInfo *TRI) const;

#ifndef NDEBUG
  // Verify the live intervals in this union and add them to the visited set.
  void verify(LiveVirtRegBitSet& VisitedVRegs);
#endif

  // Get any virtual register that is assign to this physical unit
  const LiveInterval *getOneVReg() const;

  /// Query interferences between a single live virtual register and a live
  /// interval union.
  class Query {
    const LiveIntervalUnion *LiveUnion = nullptr;
    const LiveRange *LR = nullptr;
    LiveRange::const_iterator LRI;  ///< current position in LR
    ConstSegmentIter LiveUnionI;    ///< current position in LiveUnion
    SmallVector<const LiveInterval *, 4> InterferingVRegs;
    bool CheckedFirstInterference = false;
    bool SeenAllInterferences = false;
    unsigned Tag = 0;
    unsigned UserTag = 0;

    // Count the virtual registers in this union that interfere with this
    // query's live virtual register, up to maxInterferingRegs.
    unsigned collectInterferingVRegs(unsigned MaxInterferingRegs);

    // Was this virtual register visited during collectInterferingVRegs?
    bool isSeenInterference(const LiveInterval *VirtReg) const;

  public:
    Query() = default;
    Query(const LiveRange &LR, const LiveIntervalUnion &LIU)
        : LiveUnion(&LIU), LR(&LR) {}
    Query(const Query &) = delete;
    Query &operator=(const Query &) = delete;

    void reset(unsigned NewUserTag, const LiveRange &NewLR,
               const LiveIntervalUnion &NewLiveUnion) {
      LiveUnion = &NewLiveUnion;
      LR = &NewLR;
      InterferingVRegs.clear();
      CheckedFirstInterference = false;
      SeenAllInterferences = false;
      Tag = NewLiveUnion.getTag();
      UserTag = NewUserTag;
    }

    void init(unsigned NewUserTag, const LiveRange &NewLR,
              const LiveIntervalUnion &NewLiveUnion) {
      if (UserTag == NewUserTag && LR == &NewLR && LiveUnion == &NewLiveUnion &&
          !NewLiveUnion.changedSince(Tag)) {
        // Retain cached results, e.g. firstInterference.
        return;
      }
      reset(NewUserTag, NewLR, NewLiveUnion);
    }

    // Does this live virtual register interfere with the union?
    bool checkInterference() { return collectInterferingVRegs(1); }

    // Vector generated by collectInterferingVRegs.
    const SmallVectorImpl<const LiveInterval *> &interferingVRegs(
        unsigned MaxInterferingRegs = std::numeric_limits<unsigned>::max()) {
      if (!SeenAllInterferences || MaxInterferingRegs < InterferingVRegs.size())
        collectInterferingVRegs(MaxInterferingRegs);
      return InterferingVRegs;
    }
  };

  // Array of LiveIntervalUnions.
  class Array {
    unsigned Size = 0;
    LiveIntervalUnion *LIUs = nullptr;

  public:
    Array() = default;
    ~Array() { clear(); }

    // Initialize the array to have Size entries.
    // Reuse an existing allocation if the size matches.
    void init(LiveIntervalUnion::Allocator&, unsigned Size);

    unsigned size() const { return Size; }

    void clear();

    LiveIntervalUnion& operator[](unsigned idx) {
      assert(idx <  Size && "idx out of bounds");
      return LIUs[idx];
    }

    const LiveIntervalUnion& operator[](unsigned Idx) const {
      assert(Idx < Size && "Idx out of bounds");
      return LIUs[Idx];
    }
  };
};

} // end namespace llvm

#endif // LLVM_CODEGEN_LIVEINTERVALUNION_H
PKiwFZރ�&�5�5&CodeGen/TargetLoweringObjectFileImpl.hnu�[���//==- llvm/CodeGen/TargetLoweringObjectFileImpl.h - Object Info --*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements classes used to handle lowerings specific to common
// object file formats.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_TARGETLOWERINGOBJECTFILEIMPL_H
#define LLVM_CODEGEN_TARGETLOWERINGOBJECTFILEIMPL_H

#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/BinaryFormat/XCOFF.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/Target/TargetLoweringObjectFile.h"

namespace llvm {

class GlobalValue;
class MachineModuleInfo;
class MachineFunction;
class MCContext;
class MCExpr;
class MCSection;
class MCSymbol;
class Module;
class TargetMachine;

class TargetLoweringObjectFileELF : public TargetLoweringObjectFile {
  bool UseInitArray = false;
  mutable unsigned NextUniqueID = 1;  // ID 0 is reserved for execute-only sections
  SmallPtrSet<GlobalObject *, 2> Used;

protected:
  MCSymbolRefExpr::VariantKind PLTRelativeVariantKind =
      MCSymbolRefExpr::VK_None;

public:
  TargetLoweringObjectFileELF();
  ~TargetLoweringObjectFileELF() override = default;

  void Initialize(MCContext &Ctx, const TargetMachine &TM) override;

  void getModuleMetadata(Module &M) override;

  /// Emit Obj-C garbage collection and linker options.
  void emitModuleMetadata(MCStreamer &Streamer, Module &M) const override;

  void emitPersonalityValue(MCStreamer &Streamer, const DataLayout &DL,
                            const MCSymbol *Sym) const override;

  /// Given a constant with the SectionKind, return a section that it should be
  /// placed in.
  MCSection *getSectionForConstant(const DataLayout &DL, SectionKind Kind,
                                   const Constant *C,
                                   Align &Alignment) const override;

  MCSection *getExplicitSectionGlobal(const GlobalObject *GO, SectionKind Kind,
                                      const TargetMachine &TM) const override;

  MCSection *SelectSectionForGlobal(const GlobalObject *GO, SectionKind Kind,
                                    const TargetMachine &TM) const override;

  MCSection *getSectionForJumpTable(const Function &F,
                                    const TargetMachine &TM) const override;
  MCSection *getSectionForLSDA(const Function &F, const MCSymbol &FnSym,
                               const TargetMachine &TM) const override;

  MCSection *
  getSectionForMachineBasicBlock(const Function &F,
                                 const MachineBasicBlock &MBB,
                                 const TargetMachine &TM) const override;

  MCSection *
  getUniqueSectionForFunction(const Function &F,
                              const TargetMachine &TM) const override;

  bool shouldPutJumpTableInFunctionSection(bool UsesLabelDifference,
                                           const Function &F) const override;

  /// Return an MCExpr to use for a reference to the specified type info global
  /// variable from exception handling information.
  const MCExpr *getTTypeGlobalReference(const GlobalValue *GV,
                                        unsigned Encoding,
                                        const TargetMachine &TM,
                                        MachineModuleInfo *MMI,
                                        MCStreamer &Streamer) const override;

  // The symbol that gets passed to .cfi_personality.
  MCSymbol *getCFIPersonalitySymbol(const GlobalValue *GV,
                                    const TargetMachine &TM,
                                    MachineModuleInfo *MMI) const override;

  void InitializeELF(bool UseInitArray_);
  MCSection *getStaticCtorSection(unsigned Priority,
                                  const MCSymbol *KeySym) const override;
  MCSection *getStaticDtorSection(unsigned Priority,
                                  const MCSymbol *KeySym) const override;

  const MCExpr *lowerRelativeReference(const GlobalValue *LHS,
                                       const GlobalValue *RHS,
                                       const TargetMachine &TM) const override;

  const MCExpr *lowerDSOLocalEquivalent(const DSOLocalEquivalent *Equiv,
                                        const TargetMachine &TM) const override;

  MCSection *getSectionForCommandLines() const override;
};

class TargetLoweringObjectFileMachO : public TargetLoweringObjectFile {
public:
  TargetLoweringObjectFileMachO();
  ~TargetLoweringObjectFileMachO() override = default;

  void Initialize(MCContext &Ctx, const TargetMachine &TM) override;

  MCSection *getStaticDtorSection(unsigned Priority,
                                  const MCSymbol *KeySym) const override;

  /// Emit the module flags that specify the garbage collection information.
  void emitModuleMetadata(MCStreamer &Streamer, Module &M) const override;

  MCSection *SelectSectionForGlobal(const GlobalObject *GO, SectionKind Kind,
                                    const TargetMachine &TM) const override;

  MCSection *getExplicitSectionGlobal(const GlobalObject *GO, SectionKind Kind,
                                      const TargetMachine &TM) const override;

  MCSection *getSectionForConstant(const DataLayout &DL, SectionKind Kind,
                                   const Constant *C,
                                   Align &Alignment) const override;

  /// The mach-o version of this method defaults to returning a stub reference.
  const MCExpr *getTTypeGlobalReference(const GlobalValue *GV,
                                        unsigned Encoding,
                                        const TargetMachine &TM,
                                        MachineModuleInfo *MMI,
                                        MCStreamer &Streamer) const override;

  // The symbol that gets passed to .cfi_personality.
  MCSymbol *getCFIPersonalitySymbol(const GlobalValue *GV,
                                    const TargetMachine &TM,
                                    MachineModuleInfo *MMI) const override;

  /// Get MachO PC relative GOT entry relocation
  const MCExpr *getIndirectSymViaGOTPCRel(const GlobalValue *GV,
                                          const MCSymbol *Sym,
                                          const MCValue &MV, int64_t Offset,
                                          MachineModuleInfo *MMI,
                                          MCStreamer &Streamer) const override;

  void getNameWithPrefix(SmallVectorImpl<char> &OutName, const GlobalValue *GV,
                         const TargetMachine &TM) const override;

  MCSection *getSectionForCommandLines() const override;
};

class TargetLoweringObjectFileCOFF : public TargetLoweringObjectFile {
  mutable unsigned NextUniqueID = 0;
  const TargetMachine *TM = nullptr;

public:
  ~TargetLoweringObjectFileCOFF() override = default;

  void Initialize(MCContext &Ctx, const TargetMachine &TM) override;
  MCSection *getExplicitSectionGlobal(const GlobalObject *GO, SectionKind Kind,
                                      const TargetMachine &TM) const override;

  MCSection *SelectSectionForGlobal(const GlobalObject *GO, SectionKind Kind,
                                    const TargetMachine &TM) const override;

  void getNameWithPrefix(SmallVectorImpl<char> &OutName, const GlobalValue *GV,
                         const TargetMachine &TM) const override;

  MCSection *getSectionForJumpTable(const Function &F,
                                    const TargetMachine &TM) const override;

  bool shouldPutJumpTableInFunctionSection(bool UsesLabelDifference,
                                           const Function &F) const override;

  /// Emit Obj-C garbage collection and linker options.
  void emitModuleMetadata(MCStreamer &Streamer, Module &M) const override;

  MCSection *getStaticCtorSection(unsigned Priority,
                                  const MCSymbol *KeySym) const override;
  MCSection *getStaticDtorSection(unsigned Priority,
                                  const MCSymbol *KeySym) const override;

  const MCExpr *lowerRelativeReference(const GlobalValue *LHS,
                                       const GlobalValue *RHS,
                                       const TargetMachine &TM) const override;

  /// Given a mergeable constant with the specified size and relocation
  /// information, return a section that it should be placed in.
  MCSection *getSectionForConstant(const DataLayout &DL, SectionKind Kind,
                                   const Constant *C,
                                   Align &Alignment) const override;

private:
  void emitLinkerDirectives(MCStreamer &Streamer, Module &M) const;
};

class TargetLoweringObjectFileWasm : public TargetLoweringObjectFile {
  mutable unsigned NextUniqueID = 0;

public:
  TargetLoweringObjectFileWasm() = default;
  ~TargetLoweringObjectFileWasm() override = default;

  MCSection *getExplicitSectionGlobal(const GlobalObject *GO, SectionKind Kind,
                                      const TargetMachine &TM) const override;

  MCSection *SelectSectionForGlobal(const GlobalObject *GO, SectionKind Kind,
                                    const TargetMachine &TM) const override;

  bool shouldPutJumpTableInFunctionSection(bool UsesLabelDifference,
                                           const Function &F) const override;

  void InitializeWasm();
  MCSection *getStaticCtorSection(unsigned Priority,
                                  const MCSymbol *KeySym) const override;
  MCSection *getStaticDtorSection(unsigned Priority,
                                  const MCSymbol *KeySym) const override;

  const MCExpr *lowerRelativeReference(const GlobalValue *LHS,
                                       const GlobalValue *RHS,
                                       const TargetMachine &TM) const override;
};

class TargetLoweringObjectFileXCOFF : public TargetLoweringObjectFile {
public:
  TargetLoweringObjectFileXCOFF() = default;
  ~TargetLoweringObjectFileXCOFF() override = default;

  static bool ShouldEmitEHBlock(const MachineFunction *MF);
  static bool ShouldSetSSPCanaryBitInTB(const MachineFunction *MF);

  static MCSymbol *getEHInfoTableSymbol(const MachineFunction *MF);

  void Initialize(MCContext &Ctx, const TargetMachine &TM) override;

  bool shouldPutJumpTableInFunctionSection(bool UsesLabelDifference,
                                           const Function &F) const override;

  MCSection *getExplicitSectionGlobal(const GlobalObject *GO, SectionKind Kind,
                                      const TargetMachine &TM) const override;

  MCSection *getStaticCtorSection(unsigned Priority,
                                  const MCSymbol *KeySym) const override;
  MCSection *getStaticDtorSection(unsigned Priority,
                                  const MCSymbol *KeySym) const override;

  const MCExpr *lowerRelativeReference(const GlobalValue *LHS,
                                       const GlobalValue *RHS,
                                       const TargetMachine &TM) const override;

  MCSection *SelectSectionForGlobal(const GlobalObject *GO, SectionKind Kind,
                                    const TargetMachine &TM) const override;

  MCSection *getSectionForJumpTable(const Function &F,
                                    const TargetMachine &TM) const override;

  /// Given a constant with the SectionKind, return a section that it should be
  /// placed in.
  MCSection *getSectionForConstant(const DataLayout &DL, SectionKind Kind,
                                   const Constant *C,
                                   Align &Alignment) const override;

  static XCOFF::StorageClass getStorageClassForGlobal(const GlobalValue *GV);

  MCSection *
  getSectionForFunctionDescriptor(const Function *F,
                                  const TargetMachine &TM) const override;
  MCSection *getSectionForTOCEntry(const MCSymbol *Sym,
                                   const TargetMachine &TM) const override;

  /// For external functions, this will always return a function descriptor
  /// csect.
  MCSection *
  getSectionForExternalReference(const GlobalObject *GO,
                                 const TargetMachine &TM) const override;

  /// For functions, this will always return a function descriptor symbol.
  MCSymbol *getTargetSymbol(const GlobalValue *GV,
                            const TargetMachine &TM) const override;

  MCSymbol *getFunctionEntryPointSymbol(const GlobalValue *Func,
                                        const TargetMachine &TM) const override;

  /// For functions, this will return the LSDA section. If option
  /// -ffunction-sections is on, this will return a unique csect with the
  /// function name appended to .gcc_except_table as a suffix of the LSDA
  /// section name.
  MCSection *getSectionForLSDA(const Function &F, const MCSymbol &FnSym,
                               const TargetMachine &TM) const override;
};

class TargetLoweringObjectFileGOFF : public TargetLoweringObjectFile {
public:
  TargetLoweringObjectFileGOFF();
  ~TargetLoweringObjectFileGOFF() override = default;

  MCSection *SelectSectionForGlobal(const GlobalObject *GO, SectionKind Kind,
                                    const TargetMachine &TM) const override;
  MCSection *getExplicitSectionGlobal(const GlobalObject *GO, SectionKind Kind,
                                      const TargetMachine &TM) const override;
};

} // end namespace llvm

#endif // LLVM_CODEGEN_TARGETLOWERINGOBJECTFILEIMPL_H
PKiwFZ�f�c&c&CodeGen/MachineOutliner.hnu�[���//===---- MachineOutliner.h - Outliner data structures ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// Contains all data structures shared between the outliner implemented in
/// MachineOutliner.cpp and target implementations of the outliner.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_MACHINEOUTLINER_H
#define LLVM_CODEGEN_MACHINEOUTLINER_H

#include "llvm/CodeGen/LiveRegUnits.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include <initializer_list>

namespace llvm {
namespace outliner {

/// Represents how an instruction should be mapped by the outliner.
/// \p Legal instructions are those which are safe to outline.
/// \p LegalTerminator instructions are safe to outline, but only as the
/// last instruction in a sequence.
/// \p Illegal instructions are those which cannot be outlined.
/// \p Invisible instructions are instructions which can be outlined, but
/// shouldn't actually impact the outlining result.
enum InstrType { Legal, LegalTerminator, Illegal, Invisible };

/// An individual sequence of instructions to be replaced with a call to
/// an outlined function.
struct Candidate {
private:
  /// The start index of this \p Candidate in the instruction list.
  unsigned StartIdx = 0;

  /// The number of instructions in this \p Candidate.
  unsigned Len = 0;

  // The first instruction in this \p Candidate.
  MachineBasicBlock::iterator FirstInst;

  // The last instruction in this \p Candidate.
  MachineBasicBlock::iterator LastInst;

  // The basic block that contains this Candidate.
  MachineBasicBlock *MBB = nullptr;

  /// Cost of calling an outlined function from this point as defined by the
  /// target.
  unsigned CallOverhead = 0;

  /// Liveness information for this Candidate. Tracks from the end of the
  /// block containing this Candidate to the beginning of its sequence.
  ///
  /// Optional. Can be used to fine-tune the cost model, or fine-tune legality
  /// decisions.
  LiveRegUnits FromEndOfBlockToStartOfSeq;

  /// Liveness information restricted to this Candidate's instruction sequence.
  ///
  /// Optional. Can be used to fine-tune the cost model, or fine-tune legality
  /// decisions.
  LiveRegUnits InSeq;

  /// True if FromEndOfBlockToStartOfSeq has been initialized.
  bool FromEndOfBlockToStartOfSeqWasSet = false;

  /// True if InSeq has been initialized.
  bool InSeqWasSet = false;

  /// Populate FromEndOfBlockToStartOfSeq with liveness information.
  void initFromEndOfBlockToStartOfSeq(const TargetRegisterInfo &TRI) {
    assert(MBB->getParent()->getRegInfo().tracksLiveness() &&
           "Candidate's Machine Function must track liveness");
    // Only initialize once.
    if (FromEndOfBlockToStartOfSeqWasSet)
      return;
    FromEndOfBlockToStartOfSeqWasSet = true;
    FromEndOfBlockToStartOfSeq.init(TRI);
    FromEndOfBlockToStartOfSeq.addLiveOuts(*MBB);
    // Compute liveness from the end of the block up to the beginning of the
    // outlining candidate.
    for (auto &MI : make_range(MBB->rbegin(),
                               (MachineBasicBlock::reverse_iterator)front()))
      FromEndOfBlockToStartOfSeq.stepBackward(MI);
  }

  /// Populate InSeq with liveness information.
  void initInSeq(const TargetRegisterInfo &TRI) {
    assert(MBB->getParent()->getRegInfo().tracksLiveness() &&
           "Candidate's Machine Function must track liveness");
    // Only initialize once.
    if (InSeqWasSet)
      return;
    InSeqWasSet = true;
    InSeq.init(TRI);
    for (auto &MI : make_range(front(), std::next(back())))
      InSeq.accumulate(MI);
  }

public:
  /// The index of this \p Candidate's \p OutlinedFunction in the list of
  /// \p OutlinedFunctions.
  unsigned FunctionIdx = 0;

  /// Identifier denoting the instructions to emit to call an outlined function
  /// from this point. Defined by the target.
  unsigned CallConstructionID = 0;

  /// Target-specific flags for this Candidate's MBB.
  unsigned Flags = 0x0;

  /// Return the number of instructions in this Candidate.
  unsigned getLength() const { return Len; }

  /// Return the start index of this candidate.
  unsigned getStartIdx() const { return StartIdx; }

  /// Return the end index of this candidate.
  unsigned getEndIdx() const { return StartIdx + Len - 1; }

  /// Set the CallConstructionID and CallOverhead of this candidate to CID and
  /// CO respectively.
  void setCallInfo(unsigned CID, unsigned CO) {
    CallConstructionID = CID;
    CallOverhead = CO;
  }

  /// Returns the call overhead of this candidate if it is in the list.
  unsigned getCallOverhead() const { return CallOverhead; }

  MachineBasicBlock::iterator &front() { return FirstInst; }
  MachineBasicBlock::iterator &back() { return LastInst; }
  MachineFunction *getMF() const { return MBB->getParent(); }
  MachineBasicBlock *getMBB() const { return MBB; }

  /// \returns True if \p Reg is available from the end of the block to the
  /// beginning of the sequence.
  ///
  /// This query considers the following range:
  ///
  /// in_seq_1
  /// in_seq_2
  /// ...
  /// in_seq_n
  /// not_in_seq_1
  /// ...
  /// <end of block>
  bool isAvailableAcrossAndOutOfSeq(Register Reg,
                                    const TargetRegisterInfo &TRI) {
    if (!FromEndOfBlockToStartOfSeqWasSet)
      initFromEndOfBlockToStartOfSeq(TRI);
    return FromEndOfBlockToStartOfSeq.available(Reg);
  }

  /// \returns True if `isAvailableAcrossAndOutOfSeq` fails for any register
  /// in \p Regs.
  bool isAnyUnavailableAcrossOrOutOfSeq(std::initializer_list<Register> Regs,
                                        const TargetRegisterInfo &TRI) {
    if (!FromEndOfBlockToStartOfSeqWasSet)
      initFromEndOfBlockToStartOfSeq(TRI);
    return any_of(Regs, [&](Register Reg) {
      return !FromEndOfBlockToStartOfSeq.available(Reg);
    });
  }

  /// \returns True if \p Reg is available within the sequence itself.
  ///
  /// This query considers the following range:
  ///
  /// in_seq_1
  /// in_seq_2
  /// ...
  /// in_seq_n
  bool isAvailableInsideSeq(Register Reg, const TargetRegisterInfo &TRI) {
    if (!InSeqWasSet)
      initInSeq(TRI);
    return InSeq.available(Reg);
  }

  /// The number of instructions that would be saved by outlining every
  /// candidate of this type.
  ///
  /// This is a fixed value which is not updated during the candidate pruning
  /// process. It is only used for deciding which candidate to keep if two
  /// candidates overlap. The true benefit is stored in the OutlinedFunction
  /// for some given candidate.
  unsigned Benefit = 0;

  Candidate(unsigned StartIdx, unsigned Len,
            MachineBasicBlock::iterator &FirstInst,
            MachineBasicBlock::iterator &LastInst, MachineBasicBlock *MBB,
            unsigned FunctionIdx, unsigned Flags)
      : StartIdx(StartIdx), Len(Len), FirstInst(FirstInst), LastInst(LastInst),
        MBB(MBB), FunctionIdx(FunctionIdx), Flags(Flags) {}
  Candidate() = delete;

  /// Used to ensure that \p Candidates are outlined in an order that
  /// preserves the start and end indices of other \p Candidates.
  bool operator<(const Candidate &RHS) const {
    return getStartIdx() > RHS.getStartIdx();
  }

};

/// The information necessary to create an outlined function for some
/// class of candidate.
struct OutlinedFunction {

public:
  std::vector<Candidate> Candidates;

  /// The actual outlined function created.
  /// This is initialized after we go through and create the actual function.
  MachineFunction *MF = nullptr;

  /// Represents the size of a sequence in bytes. (Some instructions vary
  /// widely in size, so just counting the instructions isn't very useful.)
  unsigned SequenceSize = 0;

  /// Target-defined overhead of constructing a frame for this function.
  unsigned FrameOverhead = 0;

  /// Target-defined identifier for constructing a frame for this function.
  unsigned FrameConstructionID = 0;

  /// Return the number of candidates for this \p OutlinedFunction.
  unsigned getOccurrenceCount() const { return Candidates.size(); }

  /// Return the number of bytes it would take to outline this
  /// function.
  unsigned getOutliningCost() const {
    unsigned CallOverhead = 0;
    for (const Candidate &C : Candidates)
      CallOverhead += C.getCallOverhead();
    return CallOverhead + SequenceSize + FrameOverhead;
  }

  /// Return the size in bytes of the unoutlined sequences.
  unsigned getNotOutlinedCost() const {
    return getOccurrenceCount() * SequenceSize;
  }

  /// Return the number of instructions that would be saved by outlining
  /// this function.
  unsigned getBenefit() const {
    unsigned NotOutlinedCost = getNotOutlinedCost();
    unsigned OutlinedCost = getOutliningCost();
    return (NotOutlinedCost < OutlinedCost) ? 0
                                            : NotOutlinedCost - OutlinedCost;
  }

  /// Return the number of instructions in this sequence.
  unsigned getNumInstrs() const { return Candidates[0].getLength(); }

  OutlinedFunction(std::vector<Candidate> &Candidates, unsigned SequenceSize,
                   unsigned FrameOverhead, unsigned FrameConstructionID)
      : Candidates(Candidates), SequenceSize(SequenceSize),
        FrameOverhead(FrameOverhead), FrameConstructionID(FrameConstructionID) {
    const unsigned B = getBenefit();
    for (Candidate &C : Candidates)
      C.Benefit = B;
  }

  OutlinedFunction() = delete;
};
} // namespace outliner
} // namespace llvm

#endif
PKiwFZ��x+P+PCodeGen/MachineValueType.hnu�[���//===- CodeGen/MachineValueType.h - Machine-Level types ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the set of machine-level target independent types which
// legal values in the code generator use.
//
// Constants and properties are defined in ValueTypes.td.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_MACHINEVALUETYPE_H
#define LLVM_CODEGEN_MACHINEVALUETYPE_H

#include "llvm/ADT/Sequence.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/TypeSize.h"
#include <cassert>
#include <cstdint>

namespace llvm {

  class Type;
  class raw_ostream;

  /// Machine Value Type. Every type that is supported natively by some
  /// processor targeted by LLVM occurs here. This means that any legal value
  /// type can be represented by an MVT.
  class MVT {
  public:
    enum SimpleValueType : uint8_t {
      // Simple value types that aren't explicitly part of this enumeration
      // are considered extended value types.
      INVALID_SIMPLE_VALUE_TYPE = 0,

#define GET_VT_ATTR(Ty, n, sz, Any, Int, FP, Vec, Sc) Ty = n,
#define GET_VT_RANGES
#include "llvm/CodeGen/GenVT.inc"
#undef GET_VT_ATTR
#undef GET_VT_RANGES

      VALUETYPE_SIZE = LAST_VALUETYPE + 1,

      // This is the current maximum for LAST_VALUETYPE.
      // MVT::MAX_ALLOWED_VALUETYPE is used for asserts and to size bit vectors
      // This value must be a multiple of 32.
      MAX_ALLOWED_VALUETYPE = 224,
    };

    static_assert(FIRST_VALUETYPE > 0);
    static_assert(LAST_VALUETYPE < MAX_ALLOWED_VALUETYPE);

    SimpleValueType SimpleTy = INVALID_SIMPLE_VALUE_TYPE;

    constexpr MVT() = default;
    constexpr MVT(SimpleValueType SVT) : SimpleTy(SVT) {}

    bool operator>(const MVT& S)  const { return SimpleTy >  S.SimpleTy; }
    bool operator<(const MVT& S)  const { return SimpleTy <  S.SimpleTy; }
    bool operator==(const MVT& S) const { return SimpleTy == S.SimpleTy; }
    bool operator!=(const MVT& S) const { return SimpleTy != S.SimpleTy; }
    bool operator>=(const MVT& S) const { return SimpleTy >= S.SimpleTy; }
    bool operator<=(const MVT& S) const { return SimpleTy <= S.SimpleTy; }

    /// Support for debugging, callable in GDB: VT.dump()
    void dump() const;

    /// Implement operator<<.
    void print(raw_ostream &OS) const;

    /// Return true if this is a valid simple valuetype.
    bool isValid() const {
      return (SimpleTy >= MVT::FIRST_VALUETYPE &&
              SimpleTy <= MVT::LAST_VALUETYPE);
    }

    /// Return true if this is a FP or a vector FP type.
    bool isFloatingPoint() const {
      return ((SimpleTy >= MVT::FIRST_FP_VALUETYPE &&
               SimpleTy <= MVT::LAST_FP_VALUETYPE) ||
              (SimpleTy >= MVT::FIRST_FP_FIXEDLEN_VECTOR_VALUETYPE &&
               SimpleTy <= MVT::LAST_FP_FIXEDLEN_VECTOR_VALUETYPE) ||
              (SimpleTy >= MVT::FIRST_FP_SCALABLE_VECTOR_VALUETYPE &&
               SimpleTy <= MVT::LAST_FP_SCALABLE_VECTOR_VALUETYPE));
    }

    /// Return true if this is an integer or a vector integer type.
    bool isInteger() const {
      return ((SimpleTy >= MVT::FIRST_INTEGER_VALUETYPE &&
               SimpleTy <= MVT::LAST_INTEGER_VALUETYPE) ||
              (SimpleTy >= MVT::FIRST_INTEGER_FIXEDLEN_VECTOR_VALUETYPE &&
               SimpleTy <= MVT::LAST_INTEGER_FIXEDLEN_VECTOR_VALUETYPE) ||
              (SimpleTy >= MVT::FIRST_INTEGER_SCALABLE_VECTOR_VALUETYPE &&
               SimpleTy <= MVT::LAST_INTEGER_SCALABLE_VECTOR_VALUETYPE));
    }

    /// Return true if this is an integer, not including vectors.
    bool isScalarInteger() const {
      return (SimpleTy >= MVT::FIRST_INTEGER_VALUETYPE &&
              SimpleTy <= MVT::LAST_INTEGER_VALUETYPE);
    }

    /// Return true if this is a vector value type.
    bool isVector() const {
      return (SimpleTy >= MVT::FIRST_VECTOR_VALUETYPE &&
              SimpleTy <= MVT::LAST_VECTOR_VALUETYPE);
    }

    /// Return true if this is a vector value type where the
    /// runtime length is machine dependent
    bool isScalableVector() const {
      return (SimpleTy >= MVT::FIRST_SCALABLE_VECTOR_VALUETYPE &&
              SimpleTy <= MVT::LAST_SCALABLE_VECTOR_VALUETYPE);
    }

    /// Return true if this is a custom target type that has a scalable size.
    bool isScalableTargetExtVT() const {
      return SimpleTy == MVT::aarch64svcount;
    }

    /// Return true if the type is a scalable type.
    bool isScalableVT() const {
      return isScalableVector() || isScalableTargetExtVT();
    }

    bool isFixedLengthVector() const {
      return (SimpleTy >= MVT::FIRST_FIXEDLEN_VECTOR_VALUETYPE &&
              SimpleTy <= MVT::LAST_FIXEDLEN_VECTOR_VALUETYPE);
    }

    /// Return true if this is a 16-bit vector type.
    bool is16BitVector() const {
      return (isFixedLengthVector() && getFixedSizeInBits() == 16);
    }

    /// Return true if this is a 32-bit vector type.
    bool is32BitVector() const {
      return (isFixedLengthVector() && getFixedSizeInBits() == 32);
    }

    /// Return true if this is a 64-bit vector type.
    bool is64BitVector() const {
      return (isFixedLengthVector() && getFixedSizeInBits() == 64);
    }

    /// Return true if this is a 128-bit vector type.
    bool is128BitVector() const {
      return (isFixedLengthVector() && getFixedSizeInBits() == 128);
    }

    /// Return true if this is a 256-bit vector type.
    bool is256BitVector() const {
      return (isFixedLengthVector() && getFixedSizeInBits() == 256);
    }

    /// Return true if this is a 512-bit vector type.
    bool is512BitVector() const {
      return (isFixedLengthVector() && getFixedSizeInBits() == 512);
    }

    /// Return true if this is a 1024-bit vector type.
    bool is1024BitVector() const {
      return (isFixedLengthVector() && getFixedSizeInBits() == 1024);
    }

    /// Return true if this is a 2048-bit vector type.
    bool is2048BitVector() const {
      return (isFixedLengthVector() && getFixedSizeInBits() == 2048);
    }

    /// Return true if this is an overloaded type for TableGen.
    bool isOverloaded() const {
      switch (SimpleTy) {
#define GET_VT_ATTR(Ty, n, sz, Any, Int, FP, Vec, Sc)                          \
  case Ty:                                                                     \
    return Any;
#include "llvm/CodeGen/GenVT.inc"
#undef GET_VT_ATTR
      default:
        return false;
      }
    }

    /// Return a vector with the same number of elements as this vector, but
    /// with the element type converted to an integer type with the same
    /// bitwidth.
    MVT changeVectorElementTypeToInteger() const {
      MVT EltTy = getVectorElementType();
      MVT IntTy = MVT::getIntegerVT(EltTy.getSizeInBits());
      MVT VecTy = MVT::getVectorVT(IntTy, getVectorElementCount());
      assert(VecTy.SimpleTy != MVT::INVALID_SIMPLE_VALUE_TYPE &&
             "Simple vector VT not representable by simple integer vector VT!");
      return VecTy;
    }

    /// Return a VT for a vector type whose attributes match ourselves
    /// with the exception of the element type that is chosen by the caller.
    MVT changeVectorElementType(MVT EltVT) const {
      MVT VecTy = MVT::getVectorVT(EltVT, getVectorElementCount());
      assert(VecTy.SimpleTy != MVT::INVALID_SIMPLE_VALUE_TYPE &&
             "Simple vector VT not representable by simple integer vector VT!");
      return VecTy;
    }

    /// Return the type converted to an equivalently sized integer or vector
    /// with integer element type. Similar to changeVectorElementTypeToInteger,
    /// but also handles scalars.
    MVT changeTypeToInteger() {
      if (isVector())
        return changeVectorElementTypeToInteger();
      return MVT::getIntegerVT(getSizeInBits());
    }

    /// Return a VT for a vector type with the same element type but
    /// half the number of elements.
    MVT getHalfNumVectorElementsVT() const {
      MVT EltVT = getVectorElementType();
      auto EltCnt = getVectorElementCount();
      assert(EltCnt.isKnownEven() && "Splitting vector, but not in half!");
      return getVectorVT(EltVT, EltCnt.divideCoefficientBy(2));
    }

    // Return a VT for a vector type with the same element type but
    // double the number of elements.
    MVT getDoubleNumVectorElementsVT() const {
      MVT EltVT = getVectorElementType();
      auto EltCnt = getVectorElementCount();
      return MVT::getVectorVT(EltVT, EltCnt * 2);
    }

    /// Returns true if the given vector is a power of 2.
    bool isPow2VectorType() const {
      unsigned NElts = getVectorMinNumElements();
      return !(NElts & (NElts - 1));
    }

    /// Widens the length of the given vector MVT up to the nearest power of 2
    /// and returns that type.
    MVT getPow2VectorType() const {
      if (isPow2VectorType())
        return *this;

      ElementCount NElts = getVectorElementCount();
      unsigned NewMinCount = 1 << Log2_32_Ceil(NElts.getKnownMinValue());
      NElts = ElementCount::get(NewMinCount, NElts.isScalable());
      return MVT::getVectorVT(getVectorElementType(), NElts);
    }

    /// If this is a vector, return the element type, otherwise return this.
    MVT getScalarType() const {
      return isVector() ? getVectorElementType() : *this;
    }

    MVT getVectorElementType() const {
      switch (SimpleTy) {
      default:
        llvm_unreachable("Not a vector MVT!");

#define GET_VT_VECATTR(Ty, Sc, nElem, ElTy, ElSz)                              \
  case Ty:                                                                     \
    return ElTy;
#include "llvm/CodeGen/GenVT.inc"
#undef GET_VT_VECATTR
      }
    }

    /// Given a vector type, return the minimum number of elements it contains.
    unsigned getVectorMinNumElements() const {
      switch (SimpleTy) {
      default:
        llvm_unreachable("Not a vector MVT!");

#define GET_VT_VECATTR(Ty, Sc, nElem, ElTy, ElSz)                              \
  case Ty:                                                                     \
    return nElem;
#include "llvm/CodeGen/GenVT.inc"
#undef GET_VT_VECATTR
      }
    }

    ElementCount getVectorElementCount() const {
      return ElementCount::get(getVectorMinNumElements(), isScalableVector());
    }

    unsigned getVectorNumElements() const {
      if (isScalableVector())
        llvm::reportInvalidSizeRequest(
            "Possible incorrect use of MVT::getVectorNumElements() for "
            "scalable vector. Scalable flag may be dropped, use "
            "MVT::getVectorElementCount() instead");
      return getVectorMinNumElements();
    }

    /// Returns the size of the specified MVT in bits.
    ///
    /// If the value type is a scalable vector type, the scalable property will
    /// be set and the runtime size will be a positive integer multiple of the
    /// base size.
    TypeSize getSizeInBits() const {
      switch (SimpleTy) {
      default:
        switch (SimpleTy) {
        default:
          llvm_unreachable("getSizeInBits called on extended MVT.");

#define GET_VT_ATTR(Ty, N, Sz, Any, Int, FP, Vec, Sc)                          \
  case Ty:                                                                     \
    return (Sc ? TypeSize::Scalable(Sz) : TypeSize::Fixed(Sz));
#include "llvm/CodeGen/GenVT.inc"
#undef GET_VT_ATTR
        }
      case Other:
        llvm_unreachable("Value type is non-standard value, Other.");
      case iPTR:
        llvm_unreachable("Value type size is target-dependent. Ask TLI.");
      case iPTRAny:
      case iAny:
      case fAny:
      case vAny:
      case Any:
        llvm_unreachable("Value type is overloaded.");
      case token:
        llvm_unreachable("Token type is a sentinel that cannot be used "
                         "in codegen and has no size");
      case Metadata:
        llvm_unreachable("Value type is metadata.");
      case aarch64svcount: // FIXME: Not in the td.
        return TypeSize::Scalable(16);
      }
    }

    /// Return the size of the specified fixed width value type in bits. The
    /// function will assert if the type is scalable.
    uint64_t getFixedSizeInBits() const {
      return getSizeInBits().getFixedValue();
    }

    uint64_t getScalarSizeInBits() const {
      return getScalarType().getSizeInBits().getFixedValue();
    }

    /// Return the number of bytes overwritten by a store of the specified value
    /// type.
    ///
    /// If the value type is a scalable vector type, the scalable property will
    /// be set and the runtime size will be a positive integer multiple of the
    /// base size.
    TypeSize getStoreSize() const {
      TypeSize BaseSize = getSizeInBits();
      return {(BaseSize.getKnownMinValue() + 7) / 8, BaseSize.isScalable()};
    }

    // Return the number of bytes overwritten by a store of this value type or
    // this value type's element type in the case of a vector.
    uint64_t getScalarStoreSize() const {
      return getScalarType().getStoreSize().getFixedValue();
    }

    /// Return the number of bits overwritten by a store of the specified value
    /// type.
    ///
    /// If the value type is a scalable vector type, the scalable property will
    /// be set and the runtime size will be a positive integer multiple of the
    /// base size.
    TypeSize getStoreSizeInBits() const {
      return getStoreSize() * 8;
    }

    /// Returns true if the number of bits for the type is a multiple of an
    /// 8-bit byte.
    bool isByteSized() const { return getSizeInBits().isKnownMultipleOf(8); }

    /// Return true if we know at compile time this has more bits than VT.
    bool knownBitsGT(MVT VT) const {
      return TypeSize::isKnownGT(getSizeInBits(), VT.getSizeInBits());
    }

    /// Return true if we know at compile time this has more than or the same
    /// bits as VT.
    bool knownBitsGE(MVT VT) const {
      return TypeSize::isKnownGE(getSizeInBits(), VT.getSizeInBits());
    }

    /// Return true if we know at compile time this has fewer bits than VT.
    bool knownBitsLT(MVT VT) const {
      return TypeSize::isKnownLT(getSizeInBits(), VT.getSizeInBits());
    }

    /// Return true if we know at compile time this has fewer than or the same
    /// bits as VT.
    bool knownBitsLE(MVT VT) const {
      return TypeSize::isKnownLE(getSizeInBits(), VT.getSizeInBits());
    }

    /// Return true if this has more bits than VT.
    bool bitsGT(MVT VT) const {
      assert(isScalableVector() == VT.isScalableVector() &&
             "Comparison between scalable and fixed types");
      return knownBitsGT(VT);
    }

    /// Return true if this has no less bits than VT.
    bool bitsGE(MVT VT) const {
      assert(isScalableVector() == VT.isScalableVector() &&
             "Comparison between scalable and fixed types");
      return knownBitsGE(VT);
    }

    /// Return true if this has less bits than VT.
    bool bitsLT(MVT VT) const {
      assert(isScalableVector() == VT.isScalableVector() &&
             "Comparison between scalable and fixed types");
      return knownBitsLT(VT);
    }

    /// Return true if this has no more bits than VT.
    bool bitsLE(MVT VT) const {
      assert(isScalableVector() == VT.isScalableVector() &&
             "Comparison between scalable and fixed types");
      return knownBitsLE(VT);
    }

    static MVT getFloatingPointVT(unsigned BitWidth) {
#define GET_VT_ATTR(Ty, n, sz, Any, Int, FP, Vec, Sc)                          \
  if (FP == 3 && sz == BitWidth)                                               \
    return Ty;
#include "llvm/CodeGen/GenVT.inc"
#undef GET_VT_ATTR

      llvm_unreachable("Bad bit width!");
    }

    static MVT getIntegerVT(unsigned BitWidth) {
#define GET_VT_ATTR(Ty, n, sz, Any, Int, FP, Vec, Sc)                          \
  if (Int == 3 && sz == BitWidth)                                              \
    return Ty;
#include "llvm/CodeGen/GenVT.inc"
#undef GET_VT_ATTR

      return (MVT::SimpleValueType)(MVT::INVALID_SIMPLE_VALUE_TYPE);
    }

    static MVT getVectorVT(MVT VT, unsigned NumElements) {
#define GET_VT_VECATTR(Ty, Sc, nElem, ElTy, ElSz)                              \
  if (!Sc && VT.SimpleTy == ElTy && NumElements == nElem)                      \
    return Ty;
#include "llvm/CodeGen/GenVT.inc"
#undef GET_VT_VECATTR

      return (MVT::SimpleValueType)(MVT::INVALID_SIMPLE_VALUE_TYPE);
    }

    static MVT getScalableVectorVT(MVT VT, unsigned NumElements) {
#define GET_VT_VECATTR(Ty, Sc, nElem, ElTy, ElSz)                              \
  if (Sc && VT.SimpleTy == ElTy && NumElements == nElem)                       \
    return Ty;
#include "llvm/CodeGen/GenVT.inc"
#undef GET_VT_VECATTR

      return (MVT::SimpleValueType)(MVT::INVALID_SIMPLE_VALUE_TYPE);
    }

    static MVT getVectorVT(MVT VT, unsigned NumElements, bool IsScalable) {
      if (IsScalable)
        return getScalableVectorVT(VT, NumElements);
      return getVectorVT(VT, NumElements);
    }

    static MVT getVectorVT(MVT VT, ElementCount EC) {
      if (EC.isScalable())
        return getScalableVectorVT(VT, EC.getKnownMinValue());
      return getVectorVT(VT, EC.getKnownMinValue());
    }

    /// Return the value type corresponding to the specified type.  This returns
    /// all pointers as iPTR.  If HandleUnknown is true, unknown types are
    /// returned as Other, otherwise they are invalid.
    static MVT getVT(Type *Ty, bool HandleUnknown = false);

  public:
    /// SimpleValueType Iteration
    /// @{
    static auto all_valuetypes() {
      return enum_seq_inclusive(MVT::FIRST_VALUETYPE, MVT::LAST_VALUETYPE,
                                force_iteration_on_noniterable_enum);
    }

    static auto integer_valuetypes() {
      return enum_seq_inclusive(MVT::FIRST_INTEGER_VALUETYPE,
                                MVT::LAST_INTEGER_VALUETYPE,
                                force_iteration_on_noniterable_enum);
    }

    static auto fp_valuetypes() {
      return enum_seq_inclusive(MVT::FIRST_FP_VALUETYPE, MVT::LAST_FP_VALUETYPE,
                                force_iteration_on_noniterable_enum);
    }

    static auto vector_valuetypes() {
      return enum_seq_inclusive(MVT::FIRST_VECTOR_VALUETYPE,
                                MVT::LAST_VECTOR_VALUETYPE,
                                force_iteration_on_noniterable_enum);
    }

    static auto fixedlen_vector_valuetypes() {
      return enum_seq_inclusive(MVT::FIRST_FIXEDLEN_VECTOR_VALUETYPE,
                                MVT::LAST_FIXEDLEN_VECTOR_VALUETYPE,
                                force_iteration_on_noniterable_enum);
    }

    static auto scalable_vector_valuetypes() {
      return enum_seq_inclusive(MVT::FIRST_SCALABLE_VECTOR_VALUETYPE,
                                MVT::LAST_SCALABLE_VECTOR_VALUETYPE,
                                force_iteration_on_noniterable_enum);
    }

    static auto integer_fixedlen_vector_valuetypes() {
      return enum_seq_inclusive(MVT::FIRST_INTEGER_FIXEDLEN_VECTOR_VALUETYPE,
                                MVT::LAST_INTEGER_FIXEDLEN_VECTOR_VALUETYPE,
                                force_iteration_on_noniterable_enum);
    }

    static auto fp_fixedlen_vector_valuetypes() {
      return enum_seq_inclusive(MVT::FIRST_FP_FIXEDLEN_VECTOR_VALUETYPE,
                                MVT::LAST_FP_FIXEDLEN_VECTOR_VALUETYPE,
                                force_iteration_on_noniterable_enum);
    }

    static auto integer_scalable_vector_valuetypes() {
      return enum_seq_inclusive(MVT::FIRST_INTEGER_SCALABLE_VECTOR_VALUETYPE,
                                MVT::LAST_INTEGER_SCALABLE_VECTOR_VALUETYPE,
                                force_iteration_on_noniterable_enum);
    }

    static auto fp_scalable_vector_valuetypes() {
      return enum_seq_inclusive(MVT::FIRST_FP_SCALABLE_VECTOR_VALUETYPE,
                                MVT::LAST_FP_SCALABLE_VECTOR_VALUETYPE,
                                force_iteration_on_noniterable_enum);
    }
    /// @}
  };

  inline raw_ostream &operator<<(raw_ostream &OS, const MVT &VT) {
    VT.print(OS);
    return OS;
  }

} // end namespace llvm

#endif // LLVM_CODEGEN_MACHINEVALUETYPE_H
PKiwFZ��Z��CodeGen/WasmEHFuncInfo.hnu�[���//===--- llvm/CodeGen/WasmEHFuncInfo.h --------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Data structures for Wasm exception handling schemes.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_WASMEHFUNCINFO_H
#define LLVM_CODEGEN_WASMEHFUNCINFO_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/SmallPtrSet.h"

namespace llvm {

class BasicBlock;
class Function;
class MachineBasicBlock;

namespace WebAssembly {
enum Tag { CPP_EXCEPTION = 0, C_LONGJMP = 1 };
}

using BBOrMBB = PointerUnion<const BasicBlock *, MachineBasicBlock *>;

struct WasmEHFuncInfo {
  // When there is an entry <A, B>, if an exception is not caught by A, it
  // should next unwind to the EH pad B.
  DenseMap<BBOrMBB, BBOrMBB> SrcToUnwindDest;
  DenseMap<BBOrMBB, SmallPtrSet<BBOrMBB, 4>> UnwindDestToSrcs; // reverse map

  // Helper functions
  const BasicBlock *getUnwindDest(const BasicBlock *BB) const {
    assert(hasUnwindDest(BB));
    return cast<const BasicBlock *>(SrcToUnwindDest.lookup(BB));
  }
  SmallPtrSet<const BasicBlock *, 4> getUnwindSrcs(const BasicBlock *BB) const {
    assert(hasUnwindSrcs(BB));
    const auto &Set = UnwindDestToSrcs.lookup(BB);
    SmallPtrSet<const BasicBlock *, 4> Ret;
    for (const auto P : Set)
      Ret.insert(cast<const BasicBlock *>(P));
    return Ret;
  }
  void setUnwindDest(const BasicBlock *BB, const BasicBlock *Dest) {
    SrcToUnwindDest[BB] = Dest;
    UnwindDestToSrcs[Dest].insert(BB);
  }
  bool hasUnwindDest(const BasicBlock *BB) const {
    return SrcToUnwindDest.count(BB);
  }
  bool hasUnwindSrcs(const BasicBlock *BB) const {
    return UnwindDestToSrcs.count(BB);
  }

  MachineBasicBlock *getUnwindDest(MachineBasicBlock *MBB) const {
    assert(hasUnwindDest(MBB));
    return cast<MachineBasicBlock *>(SrcToUnwindDest.lookup(MBB));
  }
  SmallPtrSet<MachineBasicBlock *, 4>
  getUnwindSrcs(MachineBasicBlock *MBB) const {
    assert(hasUnwindSrcs(MBB));
    const auto &Set = UnwindDestToSrcs.lookup(MBB);
    SmallPtrSet<MachineBasicBlock *, 4> Ret;
    for (const auto P : Set)
      Ret.insert(cast<MachineBasicBlock *>(P));
    return Ret;
  }
  void setUnwindDest(MachineBasicBlock *MBB, MachineBasicBlock *Dest) {
    SrcToUnwindDest[MBB] = Dest;
    UnwindDestToSrcs[Dest].insert(MBB);
  }
  bool hasUnwindDest(MachineBasicBlock *MBB) const {
    return SrcToUnwindDest.count(MBB);
  }
  bool hasUnwindSrcs(MachineBasicBlock *MBB) const {
    return UnwindDestToSrcs.count(MBB);
  }
};

// Analyze the IR in the given function to build WasmEHFuncInfo.
void calculateWasmEHInfo(const Function *F, WasmEHFuncInfo &EHInfo);

} // namespace llvm

#endif // LLVM_CODEGEN_WASMEHFUNCINFO_H
PKiwFZ��GGCodeGen/MachineCycleAnalysis.hnu�[���//===- MachineCycleAnalysis.h - Cycle Info for Machine IR -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the MachineCycleInfo class, which is a thin wrapper over
// the Machine IR instance of GenericCycleInfo.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_MACHINECYCLEANALYSIS_H
#define LLVM_CODEGEN_MACHINECYCLEANALYSIS_H

#include "llvm/ADT/GenericCycleInfo.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineSSAContext.h"

namespace llvm {

extern template class GenericCycleInfo<MachineSSAContext>;
extern template class GenericCycle<MachineSSAContext>;

using MachineCycleInfo = GenericCycleInfo<MachineSSAContext>;
using MachineCycle = MachineCycleInfo::CycleT;

/// Legacy analysis pass which computes a \ref MachineCycleInfo.
class MachineCycleInfoWrapperPass : public MachineFunctionPass {
  MachineFunction *F = nullptr;
  MachineCycleInfo CI;

public:
  static char ID;

  MachineCycleInfoWrapperPass();

  MachineCycleInfo &getCycleInfo() { return CI; }
  const MachineCycleInfo &getCycleInfo() const { return CI; }

  bool runOnMachineFunction(MachineFunction &F) override;
  void getAnalysisUsage(AnalysisUsage &AU) const override;
  void releaseMemory() override;
  void print(raw_ostream &OS, const Module *M = nullptr) const override;
};

// TODO: add this function to GenericCycle template after implementing IR
//       version.
bool isCycleInvariant(const MachineCycle *Cycle, MachineInstr &I);

} // end namespace llvm

#endif // LLVM_CODEGEN_MACHINECYCLEANALYSIS_H
PKiwFZJ(>��CodeGen/Analysis.hnu�[���//===- CodeGen/Analysis.h - CodeGen LLVM IR Analysis Utilities --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares several CodeGen-specific LLVM IR analysis utilities.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_ANALYSIS_H
#define LLVM_CODEGEN_ANALYSIS_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/CodeGen/ISDOpcodes.h"
#include "llvm/IR/Instructions.h"

namespace llvm {
template <typename T> class SmallVectorImpl;
class GlobalValue;
class LLT;
class MachineBasicBlock;
class MachineFunction;
class TargetLoweringBase;
class TargetLowering;
class TargetMachine;
struct EVT;

/// Compute the linearized index of a member in a nested
/// aggregate/struct/array.
///
/// Given an LLVM IR aggregate type and a sequence of insertvalue or
/// extractvalue indices that identify a member, return the linearized index of
/// the start of the member, i.e the number of element in memory before the
/// sought one. This is disconnected from the number of bytes.
///
/// \param Ty is the type indexed by \p Indices.
/// \param Indices is an optional pointer in the indices list to the current
/// index.
/// \param IndicesEnd is the end of the indices list.
/// \param CurIndex is the current index in the recursion.
///
/// \returns \p CurIndex plus the linear index in \p Ty  the indices list.
unsigned ComputeLinearIndex(Type *Ty,
                            const unsigned *Indices,
                            const unsigned *IndicesEnd,
                            unsigned CurIndex = 0);

inline unsigned ComputeLinearIndex(Type *Ty,
                                   ArrayRef<unsigned> Indices,
                                   unsigned CurIndex = 0) {
  return ComputeLinearIndex(Ty, Indices.begin(), Indices.end(), CurIndex);
}

/// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
/// EVTs that represent all the individual underlying
/// non-aggregate types that comprise it.
///
/// If Offsets is non-null, it points to a vector to be filled in
/// with the in-memory offsets of each of the individual values.
///
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty,
                     SmallVectorImpl<EVT> &ValueVTs,
                     SmallVectorImpl<TypeSize> *Offsets,
                     TypeSize StartingOffset);
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty,
                     SmallVectorImpl<EVT> &ValueVTs,
                     SmallVectorImpl<TypeSize> *Offsets = nullptr,
                     uint64_t StartingOffset = 0);
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty,
                     SmallVectorImpl<EVT> &ValueVTs,
                     SmallVectorImpl<uint64_t> *FixedOffsets,
                     uint64_t StartingOffset);

/// Variant of ComputeValueVTs that also produces the memory VTs.
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty,
                     SmallVectorImpl<EVT> &ValueVTs,
                     SmallVectorImpl<EVT> *MemVTs,
                     SmallVectorImpl<TypeSize> *Offsets,
                     TypeSize StartingOffset);
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty,
                     SmallVectorImpl<EVT> &ValueVTs,
                     SmallVectorImpl<EVT> *MemVTs,
                     SmallVectorImpl<TypeSize> *Offsets = nullptr,
                     uint64_t StartingOffset = 0);
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty,
                     SmallVectorImpl<EVT> &ValueVTs,
                     SmallVectorImpl<EVT> *MemVTs,
                     SmallVectorImpl<uint64_t> *FixedOffsets,
                     uint64_t StartingOffset);

/// computeValueLLTs - Given an LLVM IR type, compute a sequence of
/// LLTs that represent all the individual underlying
/// non-aggregate types that comprise it.
///
/// If Offsets is non-null, it points to a vector to be filled in
/// with the in-memory offsets of each of the individual values.
///
void computeValueLLTs(const DataLayout &DL, Type &Ty,
                      SmallVectorImpl<LLT> &ValueTys,
                      SmallVectorImpl<uint64_t> *Offsets = nullptr,
                      uint64_t StartingOffset = 0);

/// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
GlobalValue *ExtractTypeInfo(Value *V);

/// getFCmpCondCode - Return the ISD condition code corresponding to
/// the given LLVM IR floating-point condition code.  This includes
/// consideration of global floating-point math flags.
///
ISD::CondCode getFCmpCondCode(FCmpInst::Predicate Pred);

/// getFCmpCodeWithoutNaN - Given an ISD condition code comparing floats,
/// return the equivalent code if we're allowed to assume that NaNs won't occur.
ISD::CondCode getFCmpCodeWithoutNaN(ISD::CondCode CC);

/// getICmpCondCode - Return the ISD condition code corresponding to
/// the given LLVM IR integer condition code.
ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred);

/// getICmpCondCode - Return the LLVM IR integer condition code
/// corresponding to the given ISD integer condition code.
ICmpInst::Predicate getICmpCondCode(ISD::CondCode Pred);

/// Test if the given instruction is in a position to be optimized
/// with a tail-call. This roughly means that it's in a block with
/// a return and there's nothing that needs to be scheduled
/// between it and the return.
///
/// This function only tests target-independent requirements.
bool isInTailCallPosition(const CallBase &Call, const TargetMachine &TM);

/// Test if given that the input instruction is in the tail call position, if
/// there is an attribute mismatch between the caller and the callee that will
/// inhibit tail call optimizations.
/// \p AllowDifferingSizes is an output parameter which, if forming a tail call
/// is permitted, determines whether it's permitted only if the size of the
/// caller's and callee's return types match exactly.
bool attributesPermitTailCall(const Function *F, const Instruction *I,
                              const ReturnInst *Ret,
                              const TargetLoweringBase &TLI,
                              bool *AllowDifferingSizes = nullptr);

/// Test if given that the input instruction is in the tail call position if the
/// return type or any attributes of the function will inhibit tail call
/// optimization.
bool returnTypeIsEligibleForTailCall(const Function *F, const Instruction *I,
                                     const ReturnInst *Ret,
                                     const TargetLoweringBase &TLI);

DenseMap<const MachineBasicBlock *, int>
getEHScopeMembership(const MachineFunction &MF);

} // End llvm namespace

#endif
PKiwFZ:Q��CodeGen/DIEValue.defnu�[���//===- llvm/CodeGen/DIEValue.def - DIEValue types ---------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Macros for running through all types of DIEValue.
//
//===----------------------------------------------------------------------===//

#if !(defined HANDLE_DIEVALUE || defined HANDLE_DIEVALUE_SMALL ||              \
      defined HANDLE_DIEVALUE_LARGE)
#error "Missing macro definition of HANDLE_DIEVALUE"
#endif

// Handler for all values.
#ifndef HANDLE_DIEVALUE
#define HANDLE_DIEVALUE(T)
#endif

// Handler for small values.
#ifndef HANDLE_DIEVALUE_SMALL
#define HANDLE_DIEVALUE_SMALL(T) HANDLE_DIEVALUE(T)
#endif

// Handler for large values.
#ifndef HANDLE_DIEVALUE_LARGE
#define HANDLE_DIEVALUE_LARGE(T) HANDLE_DIEVALUE(T)
#endif

HANDLE_DIEVALUE_SMALL(Integer)
HANDLE_DIEVALUE_SMALL(String)
HANDLE_DIEVALUE_SMALL(Expr)
HANDLE_DIEVALUE_SMALL(Label)
HANDLE_DIEVALUE_LARGE(BaseTypeRef)
HANDLE_DIEVALUE_LARGE(Delta)
HANDLE_DIEVALUE_SMALL(Entry)
HANDLE_DIEVALUE_LARGE(Block)
HANDLE_DIEVALUE_LARGE(Loc)
HANDLE_DIEVALUE_SMALL(LocList)
HANDLE_DIEVALUE_LARGE(InlineString)
HANDLE_DIEVALUE_LARGE(AddrOffset)

#undef HANDLE_DIEVALUE
#undef HANDLE_DIEVALUE_SMALL
#undef HANDLE_DIEVALUE_LARGE
PKiwFZ��_�Z
Z
CodeGen/MIRParser/MIRParser.hnu�[���//===- MIRParser.h - MIR serialization format parser ------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This MIR serialization library is currently a work in progress. It can't
// serialize machine functions at this time.
//
// This file declares the functions that parse the MIR serialization format
// files.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_MIRPARSER_MIRPARSER_H
#define LLVM_CODEGEN_MIRPARSER_MIRPARSER_H

#include "llvm/ADT/STLFunctionalExtras.h"
#include "llvm/ADT/StringRef.h"
#include <functional>
#include <memory>
#include <optional>

namespace llvm {

class Function;
class LLVMContext;
class MemoryBuffer;
class Module;
class MIRParserImpl;
class MachineModuleInfo;
class SMDiagnostic;
class StringRef;

typedef llvm::function_ref<std::optional<std::string>(StringRef, StringRef)>
    DataLayoutCallbackTy;

/// This class initializes machine functions by applying the state loaded from
/// a MIR file.
class MIRParser {
  std::unique_ptr<MIRParserImpl> Impl;

public:
  MIRParser(std::unique_ptr<MIRParserImpl> Impl);
  MIRParser(const MIRParser &) = delete;
  ~MIRParser();

  /// Parses the optional LLVM IR module in the MIR file.
  ///
  /// A new, empty module is created if the LLVM IR isn't present.
  /// \returns nullptr if a parsing error occurred.
  std::unique_ptr<Module>
  parseIRModule(DataLayoutCallbackTy DataLayoutCallback =
                    [](StringRef, StringRef) { return std::nullopt; });

  /// Parses MachineFunctions in the MIR file and add them to the given
  /// MachineModuleInfo \p MMI.
  ///
  /// \returns true if an error occurred.
  bool parseMachineFunctions(Module &M, MachineModuleInfo &MMI);
};

/// This function is the main interface to the MIR serialization format parser.
///
/// It reads in a MIR file and returns a MIR parser that can parse the embedded
/// LLVM IR module and initialize the machine functions by parsing the machine
/// function's state.
///
/// \param Filename - The name of the file to parse.
/// \param Error - Error result info.
/// \param Context - Context which will be used for the parsed LLVM IR module.
/// \param ProcessIRFunction - function to run on every IR function or stub
/// loaded from the MIR file.
std::unique_ptr<MIRParser> createMIRParserFromFile(
    StringRef Filename, SMDiagnostic &Error, LLVMContext &Context,
    std::function<void(Function &)> ProcessIRFunction = nullptr);

/// This function is another interface to the MIR serialization format parser.
///
/// It returns a MIR parser that works with the given memory buffer and that can
/// parse the embedded LLVM IR module and initialize the machine functions by
/// parsing the machine function's state.
///
/// \param Contents - The MemoryBuffer containing the machine level IR.
/// \param Context - Context which will be used for the parsed LLVM IR module.
std::unique_ptr<MIRParser>
createMIRParser(std::unique_ptr<MemoryBuffer> Contents, LLVMContext &Context,
                std::function<void(Function &)> ProcessIRFunction = nullptr);

} // end namespace llvm

#endif // LLVM_CODEGEN_MIRPARSER_MIRPARSER_H
PKiwFZ:�ó�!�!CodeGen/MIRParser/MIParser.hnu�[���//===- MIParser.h - Machine Instructions Parser -----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the function that parses the machine instructions.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_MIRPARSER_MIPARSER_H
#define LLVM_CODEGEN_MIRPARSER_MIPARSER_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/Register.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/SMLoc.h"
#include <utility>

namespace llvm {

class MachineBasicBlock;
class MachineFunction;
class MDNode;
class RegisterBank;
struct SlotMapping;
class SMDiagnostic;
class SourceMgr;
class StringRef;
class TargetRegisterClass;
class TargetSubtargetInfo;

struct VRegInfo {
  enum uint8_t {
    UNKNOWN, NORMAL, GENERIC, REGBANK
  } Kind = UNKNOWN;
  bool Explicit = false; ///< VReg was explicitly specified in the .mir file.
  union {
    const TargetRegisterClass *RC;
    const RegisterBank *RegBank;
  } D;
  Register VReg;
  Register PreferredReg;
};

using Name2RegClassMap = StringMap<const TargetRegisterClass *>;
using Name2RegBankMap = StringMap<const RegisterBank *>;

struct PerTargetMIParsingState {
private:
  const TargetSubtargetInfo &Subtarget;

  /// Maps from instruction names to op codes.
  StringMap<unsigned> Names2InstrOpCodes;

  /// Maps from register names to registers.
  StringMap<Register> Names2Regs;

  /// Maps from register mask names to register masks.
  StringMap<const uint32_t *> Names2RegMasks;

  /// Maps from subregister names to subregister indices.
  StringMap<unsigned> Names2SubRegIndices;

  /// Maps from target index names to target indices.
  StringMap<int> Names2TargetIndices;

  /// Maps from direct target flag names to the direct target flag values.
  StringMap<unsigned> Names2DirectTargetFlags;

  /// Maps from direct target flag names to the bitmask target flag values.
  StringMap<unsigned> Names2BitmaskTargetFlags;

  /// Maps from MMO target flag names to MMO target flag values.
  StringMap<MachineMemOperand::Flags> Names2MMOTargetFlags;

  /// Maps from register class names to register classes.
  Name2RegClassMap Names2RegClasses;

  /// Maps from register bank names to register banks.
  Name2RegBankMap Names2RegBanks;

  void initNames2InstrOpCodes();
  void initNames2Regs();
  void initNames2RegMasks();
  void initNames2SubRegIndices();
  void initNames2TargetIndices();
  void initNames2DirectTargetFlags();
  void initNames2BitmaskTargetFlags();
  void initNames2MMOTargetFlags();

  void initNames2RegClasses();
  void initNames2RegBanks();

public:
  /// Try to convert an instruction name to an opcode. Return true if the
  /// instruction name is invalid.
  bool parseInstrName(StringRef InstrName, unsigned &OpCode);

  /// Try to convert a register name to a register number. Return true if the
  /// register name is invalid.
  bool getRegisterByName(StringRef RegName, Register &Reg);

  /// Check if the given identifier is a name of a register mask.
  ///
  /// Return null if the identifier isn't a register mask.
  const uint32_t *getRegMask(StringRef Identifier);

  /// Check if the given identifier is a name of a subregister index.
  ///
  /// Return 0 if the name isn't a subregister index class.
  unsigned getSubRegIndex(StringRef Name);

  /// Try to convert a name of target index to the corresponding target index.
  ///
  /// Return true if the name isn't a name of a target index.
  bool getTargetIndex(StringRef Name, int &Index);

  /// Try to convert a name of a direct target flag to the corresponding
  /// target flag.
  ///
  /// Return true if the name isn't a name of a direct flag.
  bool getDirectTargetFlag(StringRef Name, unsigned &Flag);

  /// Try to convert a name of a bitmask target flag to the corresponding
  /// target flag.
  ///
  /// Return true if the name isn't a name of a bitmask target flag.
  bool getBitmaskTargetFlag(StringRef Name, unsigned &Flag);

  /// Try to convert a name of a MachineMemOperand target flag to the
  /// corresponding target flag.
  ///
  /// Return true if the name isn't a name of a target MMO flag.
  bool getMMOTargetFlag(StringRef Name, MachineMemOperand::Flags &Flag);

  /// Check if the given identifier is a name of a register class.
  ///
  /// Return null if the name isn't a register class.
  const TargetRegisterClass *getRegClass(StringRef Name);

  /// Check if the given identifier is a name of a register bank.
  ///
  /// Return null if the name isn't a register bank.
  const RegisterBank *getRegBank(StringRef Name);

  PerTargetMIParsingState(const TargetSubtargetInfo &STI)
    : Subtarget(STI) {
    initNames2RegClasses();
    initNames2RegBanks();
  }

  ~PerTargetMIParsingState() = default;

  void setTarget(const TargetSubtargetInfo &NewSubtarget);
};

struct PerFunctionMIParsingState {
  BumpPtrAllocator Allocator;
  MachineFunction &MF;
  SourceMgr *SM;
  const SlotMapping &IRSlots;
  PerTargetMIParsingState &Target;

  std::map<unsigned, TrackingMDNodeRef> MachineMetadataNodes;
  std::map<unsigned, std::pair<TempMDTuple, SMLoc>> MachineForwardRefMDNodes;

  DenseMap<unsigned, MachineBasicBlock *> MBBSlots;
  DenseMap<Register, VRegInfo *> VRegInfos;
  StringMap<VRegInfo *> VRegInfosNamed;
  DenseMap<unsigned, int> FixedStackObjectSlots;
  DenseMap<unsigned, int> StackObjectSlots;
  DenseMap<unsigned, unsigned> ConstantPoolSlots;
  DenseMap<unsigned, unsigned> JumpTableSlots;

  /// Maps from slot numbers to function's unnamed values.
  DenseMap<unsigned, const Value *> Slots2Values;

  PerFunctionMIParsingState(MachineFunction &MF, SourceMgr &SM,
                            const SlotMapping &IRSlots,
                            PerTargetMIParsingState &Target);

  VRegInfo &getVRegInfo(Register Num);
  VRegInfo &getVRegInfoNamed(StringRef RegName);
  const Value *getIRValue(unsigned Slot);
};

/// Parse the machine basic block definitions, and skip the machine
/// instructions.
///
/// This function runs the first parsing pass on the machine function's body.
/// It parses only the machine basic block definitions and creates the machine
/// basic blocks in the given machine function.
///
/// The machine instructions aren't parsed during the first pass because all
/// the machine basic blocks aren't defined yet - this makes it impossible to
/// resolve the machine basic block references.
///
/// Return true if an error occurred.
bool parseMachineBasicBlockDefinitions(PerFunctionMIParsingState &PFS,
                                       StringRef Src, SMDiagnostic &Error);

/// Parse the machine instructions.
///
/// This function runs the second parsing pass on the machine function's body.
/// It skips the machine basic block definitions and parses only the machine
/// instructions and basic block attributes like liveins and successors.
///
/// The second parsing pass assumes that the first parsing pass already ran
/// on the given source string.
///
/// Return true if an error occurred.
bool parseMachineInstructions(PerFunctionMIParsingState &PFS, StringRef Src,
                              SMDiagnostic &Error);

bool parseMBBReference(PerFunctionMIParsingState &PFS,
                       MachineBasicBlock *&MBB, StringRef Src,
                       SMDiagnostic &Error);

bool parseRegisterReference(PerFunctionMIParsingState &PFS,
                            Register &Reg, StringRef Src,
                            SMDiagnostic &Error);

bool parseNamedRegisterReference(PerFunctionMIParsingState &PFS, Register &Reg,
                                 StringRef Src, SMDiagnostic &Error);

bool parseVirtualRegisterReference(PerFunctionMIParsingState &PFS,
                                   VRegInfo *&Info, StringRef Src,
                                   SMDiagnostic &Error);

bool parseStackObjectReference(PerFunctionMIParsingState &PFS, int &FI,
                               StringRef Src, SMDiagnostic &Error);

bool parseMDNode(PerFunctionMIParsingState &PFS, MDNode *&Node, StringRef Src,
                 SMDiagnostic &Error);

bool parseMachineMetadata(PerFunctionMIParsingState &PFS, StringRef Src,
                          SMRange SourceRange, SMDiagnostic &Error);

} // end namespace llvm

#endif // LLVM_CODEGEN_MIRPARSER_MIPARSER_H
PKiwFZ�Ѐ��� CodeGen/MachineModuleInfoImpls.hnu�[���//===- llvm/CodeGen/MachineModuleInfoImpls.h --------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines object-file format specific implementations of
// MachineModuleInfoImpl.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_MACHINEMODULEINFOIMPLS_H
#define LLVM_CODEGEN_MACHINEMODULEINFOIMPLS_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include <cassert>

namespace llvm {

class MCSymbol;

/// MachineModuleInfoMachO - This is a MachineModuleInfoImpl implementation
/// for MachO targets.
class MachineModuleInfoMachO : public MachineModuleInfoImpl {
  /// GVStubs - Darwin '$non_lazy_ptr' stubs.  The key is something like
  /// "Lfoo$non_lazy_ptr", the value is something like "_foo". The extra bit
  /// is true if this GV is external.
  DenseMap<MCSymbol *, StubValueTy> GVStubs;

  /// ThreadLocalGVStubs - Darwin '$non_lazy_ptr' stubs.  The key is something
  /// like "Lfoo$non_lazy_ptr", the value is something like "_foo". The extra
  /// bit is true if this GV is external.
  DenseMap<MCSymbol *, StubValueTy> ThreadLocalGVStubs;

  virtual void anchor(); // Out of line virtual method.

public:
  MachineModuleInfoMachO(const MachineModuleInfo &) {}

  StubValueTy &getGVStubEntry(MCSymbol *Sym) {
    assert(Sym && "Key cannot be null");
    return GVStubs[Sym];
  }

  StubValueTy &getThreadLocalGVStubEntry(MCSymbol *Sym) {
    assert(Sym && "Key cannot be null");
    return ThreadLocalGVStubs[Sym];
  }

  /// Accessor methods to return the set of stubs in sorted order.
  SymbolListTy GetGVStubList() { return getSortedStubs(GVStubs); }
  SymbolListTy GetThreadLocalGVStubList() {
    return getSortedStubs(ThreadLocalGVStubs);
  }
};

/// MachineModuleInfoELF - This is a MachineModuleInfoImpl implementation
/// for ELF targets.
class MachineModuleInfoELF : public MachineModuleInfoImpl {
  /// GVStubs - These stubs are used to materialize global addresses in PIC
  /// mode.
  DenseMap<MCSymbol *, StubValueTy> GVStubs;

  virtual void anchor(); // Out of line virtual method.

public:
  MachineModuleInfoELF(const MachineModuleInfo &) {}

  StubValueTy &getGVStubEntry(MCSymbol *Sym) {
    assert(Sym && "Key cannot be null");
    return GVStubs[Sym];
  }

  /// Accessor methods to return the set of stubs in sorted order.

  SymbolListTy GetGVStubList() { return getSortedStubs(GVStubs); }
};

/// MachineModuleInfoCOFF - This is a MachineModuleInfoImpl implementation
/// for COFF targets.
class MachineModuleInfoCOFF : public MachineModuleInfoImpl {
  /// GVStubs - These stubs are used to materialize global addresses in PIC
  /// mode.
  DenseMap<MCSymbol *, StubValueTy> GVStubs;

  virtual void anchor(); // Out of line virtual method.

public:
  MachineModuleInfoCOFF(const MachineModuleInfo &) {}

  StubValueTy &getGVStubEntry(MCSymbol *Sym) {
    assert(Sym && "Key cannot be null");
    return GVStubs[Sym];
  }

  /// Accessor methods to return the set of stubs in sorted order.

  SymbolListTy GetGVStubList() { return getSortedStubs(GVStubs); }
};

/// MachineModuleInfoWasm - This is a MachineModuleInfoImpl implementation
/// for Wasm targets.
class MachineModuleInfoWasm : public MachineModuleInfoImpl {
  virtual void anchor(); // Out of line virtual method.

public:
  MachineModuleInfoWasm(const MachineModuleInfo &) {}

  SetVector<StringRef> MachineSymbolsUsed;
};

} // end namespace llvm

#endif // LLVM_CODEGEN_MACHINEMODULEINFOIMPLS_H
PKiwFZ���cc!CodeGen/SwiftErrorValueTracking.hnu�[���//===- SwiftErrorValueTracking.h - Track swifterror VReg vals --*- C++ -*--===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This implements a limited mem2reg-like analysis to promote uses of function
// arguments and allocas marked with swiftalloc from memory into virtual
// registers tracked by this class.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_SWIFTERRORVALUETRACKING_H
#define LLVM_CODEGEN_SWIFTERRORVALUETRACKING_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/Register.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/DebugLoc.h"
#include <utility>


namespace llvm {
  class Function;
  class MachineBasicBlock;
  class MachineFunction;
  class MachineInstr;
  class TargetInstrInfo;
  class TargetLowering;

class SwiftErrorValueTracking {
  // Some useful objects to reduce the number of function arguments needed.
  MachineFunction *MF;
  const Function *Fn;
  const TargetLowering *TLI;
  const TargetInstrInfo *TII;

  /// A map from swifterror value in a basic block to the virtual register it is
  /// currently represented by.
  DenseMap<std::pair<const MachineBasicBlock *, const Value *>, Register>
      VRegDefMap;

  /// A list of upward exposed vreg uses that need to be satisfied by either a
  /// copy def or a phi node at the beginning of the basic block representing
  /// the predecessor(s) swifterror value.
  DenseMap<std::pair<const MachineBasicBlock *, const Value *>, Register>
      VRegUpwardsUse;

  /// A map from instructions that define/use a swifterror value to the virtual
  /// register that represents that def/use.
  llvm::DenseMap<PointerIntPair<const Instruction *, 1, bool>, Register>
      VRegDefUses;

  /// The swifterror argument of the current function.
  const Value *SwiftErrorArg;

  using SwiftErrorValues = SmallVector<const Value*, 1>;
  /// A function can only have a single swifterror argument. And if it does
  /// have a swifterror argument, it must be the first entry in
  /// SwiftErrorVals.
  SwiftErrorValues SwiftErrorVals;

public:
  /// Initialize data structures for specified new function.
  void setFunction(MachineFunction &MF);

  /// Get the (unique) function argument that was marked swifterror, or nullptr
  /// if this function has no swifterror args.
  const Value *getFunctionArg() const {
    return SwiftErrorArg;
  }

  /// Get or create the swifterror value virtual register in
  /// VRegDefMap for this basic block.
  Register getOrCreateVReg(const MachineBasicBlock *, const Value *);

  /// Set the swifterror virtual register in the VRegDefMap for this
  /// basic block.
  void setCurrentVReg(const MachineBasicBlock *MBB, const Value *, Register);

  /// Get or create the swifterror value virtual register for a def of a
  /// swifterror by an instruction.
  Register getOrCreateVRegDefAt(const Instruction *, const MachineBasicBlock *,
                                const Value *);

  /// Get or create the swifterror value virtual register for a use of a
  /// swifterror by an instruction.
  Register getOrCreateVRegUseAt(const Instruction *, const MachineBasicBlock *,
                                const Value *);

  /// Create initial definitions of swifterror values in the entry block of the
  /// current function.
  bool createEntriesInEntryBlock(DebugLoc DbgLoc);

  /// Propagate assigned swifterror vregs through a function, synthesizing PHI
  /// nodes when needed to maintain consistency.
  void propagateVRegs();

  void preassignVRegs(MachineBasicBlock *MBB, BasicBlock::const_iterator Begin,
                      BasicBlock::const_iterator End);
};

}

#endif
PKiwFZy�I�6�6CodeGen/MachineMemOperand.hnu�[���//==- llvm/CodeGen/MachineMemOperand.h - MachineMemOperand class -*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the declaration of the MachineMemOperand class, which is a
// description of a memory reference. It is used to help track dependencies
// in the backend.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_MACHINEMEMOPERAND_H
#define LLVM_CODEGEN_MACHINEMEMOPERAND_H

#include "llvm/ADT/BitmaskEnum.h"
#include "llvm/ADT/PointerUnion.h"
#include "llvm/CodeGen/LowLevelType.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Value.h" // PointerLikeTypeTraits<Value*>
#include "llvm/Support/AtomicOrdering.h"
#include "llvm/Support/DataTypes.h"

namespace llvm {

class MDNode;
class raw_ostream;
class MachineFunction;
class ModuleSlotTracker;
class TargetInstrInfo;

/// This class contains a discriminated union of information about pointers in
/// memory operands, relating them back to LLVM IR or to virtual locations (such
/// as frame indices) that are exposed during codegen.
struct MachinePointerInfo {
  /// This is the IR pointer value for the access, or it is null if unknown.
  PointerUnion<const Value *, const PseudoSourceValue *> V;

  /// Offset - This is an offset from the base Value*.
  int64_t Offset;

  unsigned AddrSpace = 0;

  uint8_t StackID;

  explicit MachinePointerInfo(const Value *v, int64_t offset = 0,
                              uint8_t ID = 0)
      : V(v), Offset(offset), StackID(ID) {
    AddrSpace = v ? v->getType()->getPointerAddressSpace() : 0;
  }

  explicit MachinePointerInfo(const PseudoSourceValue *v, int64_t offset = 0,
                              uint8_t ID = 0)
      : V(v), Offset(offset), StackID(ID) {
    AddrSpace = v ? v->getAddressSpace() : 0;
  }

  explicit MachinePointerInfo(unsigned AddressSpace = 0, int64_t offset = 0)
      : V((const Value *)nullptr), Offset(offset), AddrSpace(AddressSpace),
        StackID(0) {}

  explicit MachinePointerInfo(
    PointerUnion<const Value *, const PseudoSourceValue *> v,
    int64_t offset = 0,
    uint8_t ID = 0)
    : V(v), Offset(offset), StackID(ID) {
    if (V) {
      if (const auto *ValPtr = dyn_cast_if_present<const Value *>(V))
        AddrSpace = ValPtr->getType()->getPointerAddressSpace();
      else
        AddrSpace = cast<const PseudoSourceValue *>(V)->getAddressSpace();
    }
  }

  MachinePointerInfo getWithOffset(int64_t O) const {
    if (V.isNull())
      return MachinePointerInfo(AddrSpace, Offset + O);
    if (isa<const Value *>(V))
      return MachinePointerInfo(cast<const Value *>(V), Offset + O, StackID);
    return MachinePointerInfo(cast<const PseudoSourceValue *>(V), Offset + O,
                              StackID);
  }

  /// Return true if memory region [V, V+Offset+Size) is known to be
  /// dereferenceable.
  bool isDereferenceable(unsigned Size, LLVMContext &C,
                         const DataLayout &DL) const;

  /// Return the LLVM IR address space number that this pointer points into.
  unsigned getAddrSpace() const;

  /// Return a MachinePointerInfo record that refers to the constant pool.
  static MachinePointerInfo getConstantPool(MachineFunction &MF);

  /// Return a MachinePointerInfo record that refers to the specified
  /// FrameIndex.
  static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI,
                                          int64_t Offset = 0);

  /// Return a MachinePointerInfo record that refers to a jump table entry.
  static MachinePointerInfo getJumpTable(MachineFunction &MF);

  /// Return a MachinePointerInfo record that refers to a GOT entry.
  static MachinePointerInfo getGOT(MachineFunction &MF);

  /// Stack pointer relative access.
  static MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset,
                                     uint8_t ID = 0);

  /// Stack memory without other information.
  static MachinePointerInfo getUnknownStack(MachineFunction &MF);
};


//===----------------------------------------------------------------------===//
/// A description of a memory reference used in the backend.
/// Instead of holding a StoreInst or LoadInst, this class holds the address
/// Value of the reference along with a byte size and offset. This allows it
/// to describe lowered loads and stores. Also, the special PseudoSourceValue
/// objects can be used to represent loads and stores to memory locations
/// that aren't explicit in the regular LLVM IR.
///
class MachineMemOperand {
public:
  /// Flags values. These may be or'd together.
  enum Flags : uint16_t {
    // No flags set.
    MONone = 0,
    /// The memory access reads data.
    MOLoad = 1u << 0,
    /// The memory access writes data.
    MOStore = 1u << 1,
    /// The memory access is volatile.
    MOVolatile = 1u << 2,
    /// The memory access is non-temporal.
    MONonTemporal = 1u << 3,
    /// The memory access is dereferenceable (i.e., doesn't trap).
    MODereferenceable = 1u << 4,
    /// The memory access always returns the same value (or traps).
    MOInvariant = 1u << 5,

    // Reserved for use by target-specific passes.
    // Targets may override getSerializableMachineMemOperandTargetFlags() to
    // enable MIR serialization/parsing of these flags.  If more of these flags
    // are added, the MIR printing/parsing code will need to be updated as well.
    MOTargetFlag1 = 1u << 6,
    MOTargetFlag2 = 1u << 7,
    MOTargetFlag3 = 1u << 8,

    LLVM_MARK_AS_BITMASK_ENUM(/* LargestFlag = */ MOTargetFlag3)
  };

private:
  /// Atomic information for this memory operation.
  struct MachineAtomicInfo {
    /// Synchronization scope ID for this memory operation.
    unsigned SSID : 8;            // SyncScope::ID
    /// Atomic ordering requirements for this memory operation. For cmpxchg
    /// atomic operations, atomic ordering requirements when store occurs.
    unsigned Ordering : 4;        // enum AtomicOrdering
    /// For cmpxchg atomic operations, atomic ordering requirements when store
    /// does not occur.
    unsigned FailureOrdering : 4; // enum AtomicOrdering
  };

  MachinePointerInfo PtrInfo;

  /// Track the memory type of the access. An access size which is unknown or
  /// too large to be represented by LLT should use the invalid LLT.
  LLT MemoryType;

  Flags FlagVals;
  Align BaseAlign;
  MachineAtomicInfo AtomicInfo;
  AAMDNodes AAInfo;
  const MDNode *Ranges;

public:
  /// Construct a MachineMemOperand object with the specified PtrInfo, flags,
  /// size, and base alignment. For atomic operations the synchronization scope
  /// and atomic ordering requirements must also be specified. For cmpxchg
  /// atomic operations the atomic ordering requirements when store does not
  /// occur must also be specified.
  MachineMemOperand(MachinePointerInfo PtrInfo, Flags flags, uint64_t s,
                    Align a, const AAMDNodes &AAInfo = AAMDNodes(),
                    const MDNode *Ranges = nullptr,
                    SyncScope::ID SSID = SyncScope::System,
                    AtomicOrdering Ordering = AtomicOrdering::NotAtomic,
                    AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic);
  MachineMemOperand(MachinePointerInfo PtrInfo, Flags flags, LLT type, Align a,
                    const AAMDNodes &AAInfo = AAMDNodes(),
                    const MDNode *Ranges = nullptr,
                    SyncScope::ID SSID = SyncScope::System,
                    AtomicOrdering Ordering = AtomicOrdering::NotAtomic,
                    AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic);

  const MachinePointerInfo &getPointerInfo() const { return PtrInfo; }

  /// Return the base address of the memory access. This may either be a normal
  /// LLVM IR Value, or one of the special values used in CodeGen.
  /// Special values are those obtained via
  /// PseudoSourceValue::getFixedStack(int), PseudoSourceValue::getStack, and
  /// other PseudoSourceValue member functions which return objects which stand
  /// for frame/stack pointer relative references and other special references
  /// which are not representable in the high-level IR.
  const Value *getValue() const {
    return dyn_cast_if_present<const Value *>(PtrInfo.V);
  }

  const PseudoSourceValue *getPseudoValue() const {
    return dyn_cast_if_present<const PseudoSourceValue *>(PtrInfo.V);
  }

  const void *getOpaqueValue() const { return PtrInfo.V.getOpaqueValue(); }

  /// Return the raw flags of the source value, \see Flags.
  Flags getFlags() const { return FlagVals; }

  /// Bitwise OR the current flags with the given flags.
  void setFlags(Flags f) { FlagVals |= f; }

  /// For normal values, this is a byte offset added to the base address.
  /// For PseudoSourceValue::FPRel values, this is the FrameIndex number.
  int64_t getOffset() const { return PtrInfo.Offset; }

  unsigned getAddrSpace() const { return PtrInfo.getAddrSpace(); }

  /// Return the memory type of the memory reference. This should only be relied
  /// on for GlobalISel G_* operation legalization.
  LLT getMemoryType() const { return MemoryType; }

  /// Return the size in bytes of the memory reference.
  uint64_t getSize() const {
    return MemoryType.isValid() ? MemoryType.getSizeInBytes() : ~UINT64_C(0);
  }

  /// Return the size in bits of the memory reference.
  uint64_t getSizeInBits() const {
    return MemoryType.isValid() ? MemoryType.getSizeInBits() : ~UINT64_C(0);
  }

  LLT getType() const {
    return MemoryType;
  }

  /// Return the minimum known alignment in bytes of the actual memory
  /// reference.
  Align getAlign() const;

  /// Return the minimum known alignment in bytes of the base address, without
  /// the offset.
  Align getBaseAlign() const { return BaseAlign; }

  /// Return the AA tags for the memory reference.
  AAMDNodes getAAInfo() const { return AAInfo; }

  /// Return the range tag for the memory reference.
  const MDNode *getRanges() const { return Ranges; }

  /// Returns the synchronization scope ID for this memory operation.
  SyncScope::ID getSyncScopeID() const {
    return static_cast<SyncScope::ID>(AtomicInfo.SSID);
  }

  /// Return the atomic ordering requirements for this memory operation. For
  /// cmpxchg atomic operations, return the atomic ordering requirements when
  /// store occurs.
  AtomicOrdering getSuccessOrdering() const {
    return static_cast<AtomicOrdering>(AtomicInfo.Ordering);
  }

  /// For cmpxchg atomic operations, return the atomic ordering requirements
  /// when store does not occur.
  AtomicOrdering getFailureOrdering() const {
    return static_cast<AtomicOrdering>(AtomicInfo.FailureOrdering);
  }

  /// Return a single atomic ordering that is at least as strong as both the
  /// success and failure orderings for an atomic operation.  (For operations
  /// other than cmpxchg, this is equivalent to getSuccessOrdering().)
  AtomicOrdering getMergedOrdering() const {
    return getMergedAtomicOrdering(getSuccessOrdering(), getFailureOrdering());
  }

  bool isLoad() const { return FlagVals & MOLoad; }
  bool isStore() const { return FlagVals & MOStore; }
  bool isVolatile() const { return FlagVals & MOVolatile; }
  bool isNonTemporal() const { return FlagVals & MONonTemporal; }
  bool isDereferenceable() const { return FlagVals & MODereferenceable; }
  bool isInvariant() const { return FlagVals & MOInvariant; }

  /// Returns true if this operation has an atomic ordering requirement of
  /// unordered or higher, false otherwise.
  bool isAtomic() const {
    return getSuccessOrdering() != AtomicOrdering::NotAtomic;
  }

  /// Returns true if this memory operation doesn't have any ordering
  /// constraints other than normal aliasing. Volatile and (ordered) atomic
  /// memory operations can't be reordered.
  bool isUnordered() const {
    return (getSuccessOrdering() == AtomicOrdering::NotAtomic ||
            getSuccessOrdering() == AtomicOrdering::Unordered) &&
           !isVolatile();
  }

  /// Update this MachineMemOperand to reflect the alignment of MMO, if it has a
  /// greater alignment. This must only be used when the new alignment applies
  /// to all users of this MachineMemOperand.
  void refineAlignment(const MachineMemOperand *MMO);

  /// Change the SourceValue for this MachineMemOperand. This should only be
  /// used when an object is being relocated and all references to it are being
  /// updated.
  void setValue(const Value *NewSV) { PtrInfo.V = NewSV; }
  void setValue(const PseudoSourceValue *NewSV) { PtrInfo.V = NewSV; }
  void setOffset(int64_t NewOffset) { PtrInfo.Offset = NewOffset; }

  /// Reset the tracked memory type.
  void setType(LLT NewTy) {
    MemoryType = NewTy;
  }

  /// Support for operator<<.
  /// @{
  void print(raw_ostream &OS, ModuleSlotTracker &MST,
             SmallVectorImpl<StringRef> &SSNs, const LLVMContext &Context,
             const MachineFrameInfo *MFI, const TargetInstrInfo *TII) const;
  /// @}

  friend bool operator==(const MachineMemOperand &LHS,
                         const MachineMemOperand &RHS) {
    return LHS.getValue() == RHS.getValue() &&
           LHS.getPseudoValue() == RHS.getPseudoValue() &&
           LHS.getSize() == RHS.getSize() &&
           LHS.getOffset() == RHS.getOffset() &&
           LHS.getFlags() == RHS.getFlags() &&
           LHS.getAAInfo() == RHS.getAAInfo() &&
           LHS.getRanges() == RHS.getRanges() &&
           LHS.getAlign() == RHS.getAlign() &&
           LHS.getAddrSpace() == RHS.getAddrSpace();
  }

  friend bool operator!=(const MachineMemOperand &LHS,
                         const MachineMemOperand &RHS) {
    return !(LHS == RHS);
  }
};

} // End llvm namespace

#endif
PKiwFZ���K;;'CodeGen/LazyMachineBlockFrequencyInfo.hnu�[���///===- LazyMachineBlockFrequencyInfo.h - Lazy Block Frequency -*- C++ -*--===//
///
/// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
/// See https://llvm.org/LICENSE.txt for license information.
/// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
///
///===---------------------------------------------------------------------===//
/// \file
/// This is an alternative analysis pass to MachineBlockFrequencyInfo.  The
/// difference is that with this pass the block frequencies are not computed
/// when the analysis pass is executed but rather when the BFI result is
/// explicitly requested by the analysis client.
///
///===---------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_LAZYMACHINEBLOCKFREQUENCYINFO_H
#define LLVM_CODEGEN_LAZYMACHINEBLOCKFREQUENCYINFO_H

#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineLoopInfo.h"

namespace llvm {
/// This is an alternative analysis pass to MachineBlockFrequencyInfo.
/// The difference is that with this pass, the block frequencies are not
/// computed when the analysis pass is executed but rather when the BFI result
/// is explicitly requested by the analysis client.
///
/// This works by checking querying if MBFI is available and otherwise
/// generating MBFI on the fly.  In this case the passes required for (LI, DT)
/// are also queried before being computed on the fly.
///
/// Note that it is expected that we wouldn't need this functionality for the
/// new PM since with the new PM, analyses are executed on demand.

class LazyMachineBlockFrequencyInfoPass : public MachineFunctionPass {
private:
  /// If generated on the fly this own the instance.
  mutable std::unique_ptr<MachineBlockFrequencyInfo> OwnedMBFI;

  /// If generated on the fly this own the instance.
  mutable std::unique_ptr<MachineLoopInfo> OwnedMLI;

  /// If generated on the fly this own the instance.
  mutable std::unique_ptr<MachineDominatorTree> OwnedMDT;

  /// The function.
  MachineFunction *MF = nullptr;

  /// Calculate MBFI and all other analyses that's not available and
  /// required by BFI.
  MachineBlockFrequencyInfo &calculateIfNotAvailable() const;

public:
  static char ID;

  LazyMachineBlockFrequencyInfoPass();

  /// Compute and return the block frequencies.
  MachineBlockFrequencyInfo &getBFI() { return calculateIfNotAvailable(); }

  /// Compute and return the block frequencies.
  const MachineBlockFrequencyInfo &getBFI() const {
    return calculateIfNotAvailable();
  }

  void getAnalysisUsage(AnalysisUsage &AU) const override;

  bool runOnMachineFunction(MachineFunction &F) override;
  void releaseMemory() override;
  void print(raw_ostream &OS, const Module *M) const override;
};
}
#endif
PKiwFZ}_�U��CodeGen/CommandFlags.hnu�[���//===-- CommandFlags.h - Command Line Flags Interface -----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains codegen-specific flags that are shared between different
// command line tools. The tools "llc" and "opt" both use this file to prevent
// flag duplication.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_COMMANDFLAGS_H
#define LLVM_CODEGEN_COMMANDFLAGS_H

#include "llvm/ADT/FloatingPointMode.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Target/TargetOptions.h"
#include <optional>
#include <string>
#include <vector>

namespace llvm {

class Module;
class AttrBuilder;
class Function;
class Triple;

namespace codegen {

std::string getMArch();

std::string getMCPU();

std::vector<std::string> getMAttrs();

Reloc::Model getRelocModel();
std::optional<Reloc::Model> getExplicitRelocModel();

ThreadModel::Model getThreadModel();

CodeModel::Model getCodeModel();
std::optional<CodeModel::Model> getExplicitCodeModel();

llvm::ExceptionHandling getExceptionModel();

std::optional<CodeGenFileType> getExplicitFileType();

CodeGenFileType getFileType();

FramePointerKind getFramePointerUsage();

bool getEnableUnsafeFPMath();

bool getEnableNoInfsFPMath();

bool getEnableNoNaNsFPMath();

bool getEnableNoSignedZerosFPMath();

bool getEnableApproxFuncFPMath();

bool getEnableNoTrappingFPMath();

DenormalMode::DenormalModeKind getDenormalFPMath();
DenormalMode::DenormalModeKind getDenormalFP32Math();

bool getEnableHonorSignDependentRoundingFPMath();

llvm::FloatABI::ABIType getFloatABIForCalls();

llvm::FPOpFusion::FPOpFusionMode getFuseFPOps();

SwiftAsyncFramePointerMode getSwiftAsyncFramePointer();

bool getDontPlaceZerosInBSS();

bool getEnableGuaranteedTailCallOpt();

bool getEnableAIXExtendedAltivecABI();

bool getDisableTailCalls();

bool getStackSymbolOrdering();

unsigned getOverrideStackAlignment();

bool getStackRealign();

std::string getTrapFuncName();

bool getUseCtors();

bool getDisableIntegratedAS();

bool getRelaxELFRelocations();

bool getDataSections();
std::optional<bool> getExplicitDataSections();

bool getFunctionSections();
std::optional<bool> getExplicitFunctionSections();

bool getIgnoreXCOFFVisibility();

bool getXCOFFTracebackTable();

std::string getBBSections();

unsigned getTLSSize();

bool getEmulatedTLS();
std::optional<bool> getExplicitEmulatedTLS();

bool getUniqueSectionNames();

bool getUniqueBasicBlockSectionNames();

llvm::EABI getEABIVersion();

llvm::DebuggerKind getDebuggerTuningOpt();

bool getEnableStackSizeSection();

bool getEnableAddrsig();

bool getEmitCallSiteInfo();

bool getEnableMachineFunctionSplitter();

bool getEnableDebugEntryValues();

bool getValueTrackingVariableLocations();
std::optional<bool> getExplicitValueTrackingVariableLocations();

bool getForceDwarfFrameSection();

bool getXRayFunctionIndex();

bool getDebugStrictDwarf();

unsigned getAlignLoops();

bool getJMCInstrument();

bool getXCOFFReadOnlyPointers();

/// Create this object with static storage to register codegen-related command
/// line options.
struct RegisterCodeGenFlags {
  RegisterCodeGenFlags();
};

llvm::BasicBlockSection getBBSectionsMode(llvm::TargetOptions &Options);

/// Common utility function tightly tied to the options listed here. Initializes
/// a TargetOptions object with CodeGen flags and returns it.
/// \p TheTriple is used to determine the default value for options if
///    options are not explicitly specified. If those triple dependant options
///    value do not have effect for your component, a default Triple() could be
///    passed in.
TargetOptions InitTargetOptionsFromCodeGenFlags(const llvm::Triple &TheTriple);

std::string getCPUStr();

std::string getFeaturesStr();

std::vector<std::string> getFeatureList();

void renderBoolStringAttr(AttrBuilder &B, StringRef Name, bool Val);

/// Set function attributes of function \p F based on CPU, Features, and command
/// line flags.
void setFunctionAttributes(StringRef CPU, StringRef Features, Function &F);

/// Set function attributes of functions in Module M based on CPU,
/// Features, and command line flags.
void setFunctionAttributes(StringRef CPU, StringRef Features, Module &M);

/// Should value-tracking variable locations / instruction referencing be
/// enabled by default for this triple?
bool getDefaultValueTrackingVariableLocations(const llvm::Triple &T);
} // namespace codegen
} // namespace llvm

#endif // LLVM_CODEGEN_COMMANDFLAGS_H
PKiwFZK��+��CodeGen/CalcSpillWeights.hnu�[���//===- lib/CodeGen/CalcSpillWeights.h ---------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_CALCSPILLWEIGHTS_H
#define LLVM_CODEGEN_CALCSPILLWEIGHTS_H

#include "llvm/CodeGen/SlotIndexes.h"

namespace llvm {

class LiveInterval;
class LiveIntervals;
class MachineBlockFrequencyInfo;
class MachineFunction;
class MachineLoopInfo;
class VirtRegMap;

  /// Normalize the spill weight of a live interval
  ///
  /// The spill weight of a live interval is computed as:
  ///
  ///   (sum(use freq) + sum(def freq)) / (K + size)
  ///
  /// @param UseDefFreq Expected number of executed use and def instructions
  ///                   per function call. Derived from block frequencies.
  /// @param Size       Size of live interval as returnexd by getSize()
  /// @param NumInstr   Number of instructions using this live interval
  static inline float normalizeSpillWeight(float UseDefFreq, unsigned Size,
                                           unsigned NumInstr) {
    // The constant 25 instructions is added to avoid depending too much on
    // accidental SlotIndex gaps for small intervals. The effect is that small
    // intervals have a spill weight that is mostly proportional to the number
    // of uses, while large intervals get a spill weight that is closer to a use
    // density.
    return UseDefFreq / (Size + 25*SlotIndex::InstrDist);
  }

  /// Calculate auxiliary information for a virtual register such as its
  /// spill weight and allocation hint.
  class VirtRegAuxInfo {
    MachineFunction &MF;
    LiveIntervals &LIS;
    const VirtRegMap &VRM;
    const MachineLoopInfo &Loops;
    const MachineBlockFrequencyInfo &MBFI;

    /// Returns true if Reg of live interval LI is used in instruction with many
    /// operands like STATEPOINT.
    bool isLiveAtStatepointVarArg(LiveInterval &LI);

  public:
    VirtRegAuxInfo(MachineFunction &MF, LiveIntervals &LIS,
                   const VirtRegMap &VRM, const MachineLoopInfo &Loops,
                   const MachineBlockFrequencyInfo &MBFI)
        : MF(MF), LIS(LIS), VRM(VRM), Loops(Loops), MBFI(MBFI) {}

    virtual ~VirtRegAuxInfo() = default;

    /// (re)compute li's spill weight and allocation hint.
    void calculateSpillWeightAndHint(LiveInterval &LI);

    /// Compute spill weights and allocation hints for all virtual register
    /// live intervals.
    void calculateSpillWeightsAndHints();

    /// Return the preferred allocation register for reg, given a COPY
    /// instruction.
    static Register copyHint(const MachineInstr *MI, unsigned Reg,
                             const TargetRegisterInfo &TRI,
                             const MachineRegisterInfo &MRI);

    /// Determine if all values in LI are rematerializable.
    static bool isRematerializable(const LiveInterval &LI,
                                   const LiveIntervals &LIS,
                                   const VirtRegMap &VRM,
                                   const TargetInstrInfo &TII);

  protected:
    /// Helper function for weight calculations.
    /// (Re)compute LI's spill weight and allocation hint, or, for non null
    /// start and end - compute future expected spill weight of a split
    /// artifact of LI that will span between start and end slot indexes.
    /// \param LI     The live interval for which to compute the weight.
    /// \param Start  The expected beginning of the split artifact. Instructions
    ///               before start will not affect the weight. Relevant for
    ///               weight calculation of future split artifact.
    /// \param End    The expected end of the split artifact. Instructions
    ///               after end will not affect the weight. Relevant for
    ///               weight calculation of future split artifact.
    /// \return The spill weight. Returns negative weight for unspillable LI.
    float weightCalcHelper(LiveInterval &LI, SlotIndex *Start = nullptr,
                           SlotIndex *End = nullptr);

    /// Weight normalization function.
    virtual float normalize(float UseDefFreq, unsigned Size,
                            unsigned NumInstr) {
      return normalizeSpillWeight(UseDefFreq, Size, NumInstr);
    }
  };
} // end namespace llvm

#endif // LLVM_CODEGEN_CALCSPILLWEIGHTS_H
PKiwFZ��`�P>P>CodeGen/ValueTypes.tdnu�[���//===- ValueTypes.td - ValueType definitions ---------------*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Value types - These values correspond to the register types defined in the
// MachineValueTypes.h file.  If you update anything here, you must update it
// there as well!
//
//===----------------------------------------------------------------------===//

class ValueType<int size, int value> {
  string Namespace = "MVT";
  string LLVMName = NAME;
  int Size = size;
  int Value = value;
  int nElem = 1;
  ValueType ElementType = ?;
  int isOverloaded = false;
  int isInteger = false;
  int isFP = false;
  int isVector = false;
  int isScalable = false;
}

class VTAny<int value> : ValueType<0, value> {
  let isOverloaded = true;
}

class VTInt<int size, int value>
    : ValueType<size, value> {
  let isInteger = true;
}

class VTFP<int size, int value>
    : ValueType<size, value> {
  let isFP = true;
}

class VTVec<int nelem, ValueType elt, int value>
    : ValueType<!mul(nelem, elt.Size), value> {
  let nElem = nelem;
  let ElementType = elt;
  let isInteger = elt.isInteger;
  let isFP = elt.isFP;
  let isVector = true;
}

class VTScalableVec<int nelem, ValueType elt, int value>
    : VTVec<nelem, elt, value> {
  let isScalable = true;
}

defset list<ValueType> ValueTypes = {

def OtherVT : ValueType<0,   1> {  // "Other" value
  let LLVMName = "Other";
}

def i1      : VTInt<1,   2>;  // One bit boolean value
def i2      : VTInt<2,   3>;  // 2-bit integer value
def i4      : VTInt<4,   4>;  // 4-bit integer value
def i8      : VTInt<8,   5>;  // 8-bit integer value
def i16     : VTInt<16,  6>;  // 16-bit integer value
def i32     : VTInt<32,  7>;  // 32-bit integer value
def i64     : VTInt<64,  8>;  // 64-bit integer value
def i128    : VTInt<128, 9>;  // 128-bit integer value

def bf16    : VTFP<16,  10>;  // 16-bit brain floating point value
def f16     : VTFP<16,  11>;  // 16-bit floating point value
def f32     : VTFP<32,  12>;  // 32-bit floating point value
def f64     : VTFP<64,  13>;  // 64-bit floating point value
def f80     : VTFP<80,  14>;  // 80-bit floating point value
def f128    : VTFP<128, 15>;  // 128-bit floating point value
def ppcf128 : VTFP<128, 16>;  // PPC 128-bit floating point value

def v1i1    : VTVec<1,    i1, 17>;  //    1 x i1 vector value
def v2i1    : VTVec<2,    i1, 18>;  //    2 x i1 vector value
def v4i1    : VTVec<4,    i1, 19>;  //    4 x i1 vector value
def v8i1    : VTVec<8,    i1, 20>;  //    8 x i1 vector value
def v16i1   : VTVec<16,   i1, 21>;  //   16 x i1 vector value
def v32i1   : VTVec<32,   i1, 22>;  //   32 x i1 vector value
def v64i1   : VTVec<64,   i1, 23>;  //   64 x i1 vector value
def v128i1  : VTVec<128,  i1, 24>;  //  128 x i1 vector value
def v256i1  : VTVec<256,  i1, 25>;  //  256 x i1 vector value
def v512i1  : VTVec<512,  i1, 26>;  //  512 x i1 vector value
def v1024i1 : VTVec<1024, i1, 27>;  // 1024 x i1 vector value
def v2048i1 : VTVec<2048, i1, 28>;  // 2048 x i1 vector value

def v128i2  : VTVec<128,  i2, 29>;   //  128 x i2 vector value
def v256i2  : VTVec<256,  i2, 30>;   //  256 x i2 vector value

def v64i4   : VTVec<64,   i4, 31>;   //   64 x i4 vector value
def v128i4  : VTVec<128,  i4, 32>;   //  128 x i4 vector value

def v1i8    : VTVec<1,    i8, 33>;  //    1 x i8 vector value
def v2i8    : VTVec<2,    i8, 34>;  //    2 x i8 vector value
def v4i8    : VTVec<4,    i8, 35>;  //    4 x i8 vector value
def v8i8    : VTVec<8,    i8, 36>;  //    8 x i8 vector value
def v16i8   : VTVec<16,   i8, 37>;  //   16 x i8 vector value
def v32i8   : VTVec<32,   i8, 38>;  //   32 x i8 vector value
def v64i8   : VTVec<64,   i8, 39>;  //   64 x i8 vector value
def v128i8  : VTVec<128,  i8, 40>;  //  128 x i8 vector value
def v256i8  : VTVec<256,  i8, 41>;  //  256 x i8 vector value
def v512i8  : VTVec<512,  i8, 42>;  //  512 x i8 vector value
def v1024i8 : VTVec<1024, i8, 43>;  // 1024 x i8 vector value

def v1i16   : VTVec<1,   i16, 44>;  //   1 x i16 vector value
def v2i16   : VTVec<2,   i16, 45>;  //   2 x i16 vector value
def v3i16   : VTVec<3,   i16, 46>;  //   3 x i16 vector value
def v4i16   : VTVec<4,   i16, 47>;  //   4 x i16 vector value
def v8i16   : VTVec<8,   i16, 48>;  //   8 x i16 vector value
def v16i16  : VTVec<16,  i16, 49>;  //  16 x i16 vector value
def v32i16  : VTVec<32,  i16, 50>;  //  32 x i16 vector value
def v64i16  : VTVec<64,  i16, 51>;  //  64 x i16 vector value
def v128i16 : VTVec<128, i16, 52>;  // 128 x i16 vector value
def v256i16 : VTVec<256, i16, 53>;  // 256 x i16 vector value
def v512i16 : VTVec<512, i16, 54>;  // 512 x i16 vector value

def v1i32    : VTVec<1,    i32, 55>;  //    1 x i32 vector value
def v2i32    : VTVec<2,    i32, 56>;  //    2 x i32 vector value
def v3i32    : VTVec<3,    i32, 57>;  //    3 x i32 vector value
def v4i32    : VTVec<4,    i32, 58>;  //    4 x i32 vector value
def v5i32    : VTVec<5,    i32, 59>;  //    5 x i32 vector value
def v6i32    : VTVec<6,    i32, 60>;  //    6 x f32 vector value
def v7i32    : VTVec<7,    i32, 61>;  //    7 x f32 vector value
def v8i32    : VTVec<8,    i32, 62>;  //    8 x i32 vector value
def v9i32    : VTVec<9,    i32, 63>;  //    9 x i32 vector value
def v10i32   : VTVec<10,   i32, 64>;  //   10 x i32 vector value
def v11i32   : VTVec<11,   i32, 65>;  //   11 x i32 vector value
def v12i32   : VTVec<12,   i32, 66>;  //   12 x i32 vector value
def v16i32   : VTVec<16,   i32, 67>;  //   16 x i32 vector value
def v32i32   : VTVec<32,   i32, 68>;  //   32 x i32 vector value
def v64i32   : VTVec<64,   i32, 69>;  //   64 x i32 vector value
def v128i32  : VTVec<128,  i32, 70>;  //  128 x i32 vector value
def v256i32  : VTVec<256,  i32, 71>;  //  256 x i32 vector value
def v512i32  : VTVec<512,  i32, 72>;  //  512 x i32 vector value
def v1024i32 : VTVec<1024, i32, 73>;  // 1024 x i32 vector value
def v2048i32 : VTVec<2048, i32, 74>;  // 2048 x i32 vector value

def v1i64   : VTVec<1,   i64, 75>;  //   1 x i64 vector value
def v2i64   : VTVec<2,   i64, 76>;  //   2 x i64 vector value
def v3i64   : VTVec<3,   i64, 77>;  //   3 x i64 vector value
def v4i64   : VTVec<4,   i64, 78>;  //   4 x i64 vector value
def v8i64   : VTVec<8,   i64, 79>;  //   8 x i64 vector value
def v16i64  : VTVec<16,  i64, 80>;  //  16 x i64 vector value
def v32i64  : VTVec<32,  i64, 81>;  //  32 x i64 vector value
def v64i64  : VTVec<64,  i64, 82>;  //  64 x i64 vector value
def v128i64 : VTVec<128, i64, 83>;  // 128 x i64 vector value
def v256i64 : VTVec<256, i64, 84>;  // 256 x i64 vector value

def v1i128  : VTVec<1,  i128, 85>;  //  1 x i128 vector value

def v1f16    : VTVec<1,    f16,  86>;  //    1 x f16 vector value
def v2f16    : VTVec<2,    f16,  87>;  //    2 x f16 vector value
def v3f16    : VTVec<3,    f16,  88>;  //    3 x f16 vector value
def v4f16    : VTVec<4,    f16,  89>;  //    4 x f16 vector value
def v8f16    : VTVec<8,    f16,  90>;  //    8 x f16 vector value
def v16f16   : VTVec<16,   f16,  91>;  //   16 x f16 vector value
def v32f16   : VTVec<32,   f16,  92>;  //   32 x f16 vector value
def v64f16   : VTVec<64,   f16,  93>;  //   64 x f16 vector value
def v128f16  : VTVec<128,  f16,  94>;  //  128 x f16 vector value
def v256f16  : VTVec<256,  f16,  95>;  //  256 x f16 vector value
def v512f16  : VTVec<512,  f16,  96>;  //  512 x f16 vector value

def v2bf16   : VTVec<2,   bf16,  97>;  //    2 x bf16 vector value
def v3bf16   : VTVec<3,   bf16,  98>;  //    3 x bf16 vector value
def v4bf16   : VTVec<4,   bf16,  99>;  //    4 x bf16 vector value
def v8bf16   : VTVec<8,   bf16, 100>;  //    8 x bf16 vector value
def v16bf16  : VTVec<16,  bf16, 101>;  //   16 x bf16 vector value
def v32bf16  : VTVec<32,  bf16, 102>;  //   32 x bf16 vector value
def v64bf16  : VTVec<64,  bf16, 103>;  //   64 x bf16 vector value
def v128bf16 : VTVec<128, bf16, 104>;  //  128 x bf16 vector value

def v1f32    : VTVec<1,    f32, 105>;  //    1 x f32 vector value
def v2f32    : VTVec<2,    f32, 106>;  //    2 x f32 vector value
def v3f32    : VTVec<3,    f32, 107>;  //    3 x f32 vector value
def v4f32    : VTVec<4,    f32, 108>;  //    4 x f32 vector value
def v5f32    : VTVec<5,    f32, 109>;  //    5 x f32 vector value
def v6f32    : VTVec<6,    f32, 110>;  //    6 x f32 vector value
def v7f32    : VTVec<7,    f32, 111>;  //    7 x f32 vector value
def v8f32    : VTVec<8,    f32, 112>;  //    8 x f32 vector value
def v9f32    : VTVec<9,    f32, 113>;  //    9 x f32 vector value
def v10f32   : VTVec<10,   f32, 114>;  //   10 x f32 vector value
def v11f32   : VTVec<11,   f32, 115>;  //   11 x f32 vector value
def v12f32   : VTVec<12,   f32, 116>;  //   12 x f32 vector value
def v16f32   : VTVec<16,   f32, 117>;  //   16 x f32 vector value
def v32f32   : VTVec<32,   f32, 118>;  //   32 x f32 vector value
def v64f32   : VTVec<64,   f32, 119>;  //   64 x f32 vector value
def v128f32  : VTVec<128,  f32, 120>;  //  128 x f32 vector value
def v256f32  : VTVec<256,  f32, 121>;  //  256 x f32 vector value
def v512f32  : VTVec<512,  f32, 122>;  //  512 x f32 vector value
def v1024f32 : VTVec<1024, f32, 123>;  // 1024 x f32 vector value
def v2048f32 : VTVec<2048, f32, 124>;  // 2048 x f32 vector value

def v1f64    : VTVec<1,    f64, 125>;  //    1 x f64 vector value
def v2f64    : VTVec<2,    f64, 126>;  //    2 x f64 vector value
def v3f64    : VTVec<3,    f64, 127>;  //    3 x f64 vector value
def v4f64    : VTVec<4,    f64, 128>;  //    4 x f64 vector value
def v8f64    : VTVec<8,    f64, 129>;  //    8 x f64 vector value
def v16f64   : VTVec<16,   f64, 130>;  //   16 x f64 vector value
def v32f64   : VTVec<32,   f64, 131>;  //   32 x f64 vector value
def v64f64   : VTVec<64,   f64, 132>;  //   64 x f64 vector value
def v128f64  : VTVec<128,  f64, 133>;  //  128 x f64 vector value
def v256f64  : VTVec<256,  f64, 134>;  //  256 x f64 vector value

def nxv1i1  : VTScalableVec<1,  i1, 135>;  // n x  1 x i1  vector value
def nxv2i1  : VTScalableVec<2,  i1, 136>;  // n x  2 x i1  vector value
def nxv4i1  : VTScalableVec<4,  i1, 137>;  // n x  4 x i1  vector value
def nxv8i1  : VTScalableVec<8,  i1, 138>;  // n x  8 x i1  vector value
def nxv16i1 : VTScalableVec<16, i1, 139>;  // n x 16 x i1  vector value
def nxv32i1 : VTScalableVec<32, i1, 140>;  // n x 32 x i1  vector value
def nxv64i1 : VTScalableVec<64, i1, 141>;  // n x 64 x i1  vector value

def nxv1i8  : VTScalableVec<1,  i8, 142>;  // n x  1 x i8  vector value
def nxv2i8  : VTScalableVec<2,  i8, 143>;  // n x  2 x i8  vector value
def nxv4i8  : VTScalableVec<4,  i8, 144>;  // n x  4 x i8  vector value
def nxv8i8  : VTScalableVec<8,  i8, 145>;  // n x  8 x i8  vector value
def nxv16i8 : VTScalableVec<16, i8, 146>;  // n x 16 x i8  vector value
def nxv32i8 : VTScalableVec<32, i8, 147>;  // n x 32 x i8  vector value
def nxv64i8 : VTScalableVec<64, i8, 148>;  // n x 64 x i8  vector value

def nxv1i16  : VTScalableVec<1,  i16, 149>;  // n x  1 x i16 vector value
def nxv2i16  : VTScalableVec<2,  i16, 150>;  // n x  2 x i16 vector value
def nxv4i16  : VTScalableVec<4,  i16, 151>;  // n x  4 x i16 vector value
def nxv8i16  : VTScalableVec<8,  i16, 152>;  // n x  8 x i16 vector value
def nxv16i16 : VTScalableVec<16, i16, 153>;  // n x 16 x i16 vector value
def nxv32i16 : VTScalableVec<32, i16, 154>;  // n x 32 x i16 vector value

def nxv1i32  : VTScalableVec<1,  i32, 155>;  // n x  1 x i32 vector value
def nxv2i32  : VTScalableVec<2,  i32, 156>;  // n x  2 x i32 vector value
def nxv4i32  : VTScalableVec<4,  i32, 157>;  // n x  4 x i32 vector value
def nxv8i32  : VTScalableVec<8,  i32, 158>;  // n x  8 x i32 vector value
def nxv16i32 : VTScalableVec<16, i32, 159>;  // n x 16 x i32 vector value
def nxv32i32 : VTScalableVec<32, i32, 160>;  // n x 32 x i32 vector value

def nxv1i64  : VTScalableVec<1,  i64, 161>;  // n x  1 x i64 vector value
def nxv2i64  : VTScalableVec<2,  i64, 162>;  // n x  2 x i64 vector value
def nxv4i64  : VTScalableVec<4,  i64, 163>;  // n x  4 x i64 vector value
def nxv8i64  : VTScalableVec<8,  i64, 164>;  // n x  8 x i64 vector value
def nxv16i64 : VTScalableVec<16, i64, 165>;  // n x 16 x i64 vector value
def nxv32i64 : VTScalableVec<32, i64, 166>;  // n x 32 x i64 vector value

def nxv1f16  : VTScalableVec<1,  f16, 167>;  // n x  1 x  f16 vector value
def nxv2f16  : VTScalableVec<2,  f16, 168>;  // n x  2 x  f16 vector value
def nxv4f16  : VTScalableVec<4,  f16, 169>;  // n x  4 x  f16 vector value
def nxv8f16  : VTScalableVec<8,  f16, 170>;  // n x  8 x  f16 vector value
def nxv16f16 : VTScalableVec<16, f16, 171>;  // n x 16 x  f16 vector value
def nxv32f16 : VTScalableVec<32, f16, 172>;  // n x 32 x  f16 vector value

def nxv1bf16  : VTScalableVec<1,  bf16, 173>;  // n x  1 x bf16 vector value
def nxv2bf16  : VTScalableVec<2,  bf16, 174>;  // n x  2 x bf16 vector value
def nxv4bf16  : VTScalableVec<4,  bf16, 175>;  // n x  4 x bf16 vector value
def nxv8bf16  : VTScalableVec<8,  bf16, 176>;  // n x  8 x bf16 vector value
def nxv16bf16 : VTScalableVec<16, bf16, 177>;  // n x 16 x bf16 vector value
def nxv32bf16 : VTScalableVec<32, bf16, 178>;  // n x 32 x bf16 vector value

def nxv1f32  : VTScalableVec<1,  f32, 179>;  // n x  1 x  f32 vector value
def nxv2f32  : VTScalableVec<2,  f32, 180>;  // n x  2 x  f32 vector value
def nxv4f32  : VTScalableVec<4,  f32, 181>;  // n x  4 x  f32 vector value
def nxv8f32  : VTScalableVec<8,  f32, 182>;  // n x  8 x  f32 vector value
def nxv16f32 : VTScalableVec<16, f32, 183>;  // n x 16 x  f32 vector value

def nxv1f64  : VTScalableVec<1,  f64, 184>;  // n x  1 x  f64 vector value
def nxv2f64  : VTScalableVec<2,  f64, 185>;  // n x  2 x  f64 vector value
def nxv4f64  : VTScalableVec<4,  f64, 186>;  // n x  4 x  f64 vector value
def nxv8f64  : VTScalableVec<8,  f64, 187>;  // n x  8 x  f64 vector value

def x86mmx    : ValueType<64,   188>;  // X86 MMX value
def FlagVT    : ValueType<0,    189> { // Pre-RA sched glue
  let LLVMName = "Glue";
}
def isVoid    : ValueType<0,    190>;  // Produces no value
def untyped   : ValueType<8,    191> { // Produces an untyped value
  let LLVMName = "Untyped";
}
def funcref   : ValueType<0,    192>;  // WebAssembly's funcref type
def externref : ValueType<0,    193>;  // WebAssembly's externref type
def x86amx    : ValueType<8192, 194>;  // X86 AMX value
def i64x8     : ValueType<512,  195>;  // 8 Consecutive GPRs (AArch64)
def aarch64svcount
              : ValueType<16,   196>;  // AArch64 predicate-as-counter
def spirvbuiltin : ValueType<0,  197>; // SPIR-V's builtin type

def token      : ValueType<0, 248>;  // TokenTy
def MetadataVT : ValueType<0, 249> { // Metadata
  let LLVMName = "Metadata";
}

// Pseudo valuetype mapped to the current pointer size to any address space.
// Should only be used in TableGen.
def iPTRAny    : VTAny<250>;

// Pseudo valuetype to represent "vector of any size"
def vAny       : VTAny<251>;

// Pseudo valuetype to represent "float of any format"
def fAny       : VTAny<252>;

// Pseudo valuetype to represent "integer of any bit width"
def iAny       : VTAny<253>;

// Pseudo valuetype mapped to the current pointer size.
def iPTR       : ValueType<0, 254>;

// Pseudo valuetype to represent "any type of any size".
def Any        : VTAny<255>;

} // end defset ValueTypes

/// This class is for targets that want to use pointer types in patterns
/// with the GlobalISelEmitter.  Targets must define their own pointer
/// derived from this class.  The scalar argument should be an
/// integer type with the same bit size as the pointer.
/// e.g. def p0 : PtrValueType <i64, 0>;

class PtrValueType <ValueType scalar, int addrspace> :
    ValueType<scalar.Size, scalar.Value> {
  int AddrSpace = addrspace;
}
PKiwFZgn�I&I&CodeGen/MachinePassManager.hnu�[���//===- PassManager.h --- Pass management for CodeGen ------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This header defines the pass manager interface for codegen. The codegen
// pipeline consists of only machine function passes. There is no container
// relationship between IR module/function and machine function in terms of pass
// manager organization. So there is no need for adaptor classes (for example
// ModuleToMachineFunctionAdaptor). Since invalidation could only happen among
// machine function passes, there is no proxy classes to handle cross-IR-unit
// invalidation. IR analysis results are provided for machine function passes by
// their respective analysis managers such as ModuleAnalysisManager and
// FunctionAnalysisManager.
//
// TODO: Add MachineFunctionProperties support.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_MACHINEPASSMANAGER_H
#define LLVM_CODEGEN_MACHINEPASSMANAGER_H

#include "llvm/ADT/FunctionExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Support/Error.h"

#include <map>

namespace llvm {
class Module;
class Function;
class MachineFunction;

extern template class AnalysisManager<MachineFunction>;

/// An AnalysisManager<MachineFunction> that also exposes IR analysis results.
class MachineFunctionAnalysisManager : public AnalysisManager<MachineFunction> {
public:
  using Base = AnalysisManager<MachineFunction>;

  MachineFunctionAnalysisManager() : FAM(nullptr), MAM(nullptr) {}
  MachineFunctionAnalysisManager(FunctionAnalysisManager &FAM,
                                 ModuleAnalysisManager &MAM)
      : FAM(&FAM), MAM(&MAM) {}
  MachineFunctionAnalysisManager(MachineFunctionAnalysisManager &&) = default;
  MachineFunctionAnalysisManager &
  operator=(MachineFunctionAnalysisManager &&) = default;

  /// Get the result of an analysis pass for a Function.
  ///
  /// Runs the analysis if a cached result is not available.
  template <typename PassT> typename PassT::Result &getResult(Function &F) {
    return FAM->getResult<PassT>(F);
  }

  /// Get the cached result of an analysis pass for a Function.
  ///
  /// This method never runs the analysis.
  ///
  /// \returns null if there is no cached result.
  template <typename PassT>
  typename PassT::Result *getCachedResult(Function &F) {
    return FAM->getCachedResult<PassT>(F);
  }

  /// Get the result of an analysis pass for a Module.
  ///
  /// Runs the analysis if a cached result is not available.
  template <typename PassT> typename PassT::Result &getResult(Module &M) {
    return MAM->getResult<PassT>(M);
  }

  /// Get the cached result of an analysis pass for a Module.
  ///
  /// This method never runs the analysis.
  ///
  /// \returns null if there is no cached result.
  template <typename PassT> typename PassT::Result *getCachedResult(Module &M) {
    return MAM->getCachedResult<PassT>(M);
  }

  /// Get the result of an analysis pass for a MachineFunction.
  ///
  /// Runs the analysis if a cached result is not available.
  using Base::getResult;

  /// Get the cached result of an analysis pass for a MachineFunction.
  ///
  /// This method never runs the analysis.
  ///
  /// returns null if there is no cached result.
  using Base::getCachedResult;

  // FIXME: Add LoopAnalysisManager or CGSCCAnalysisManager if needed.
  FunctionAnalysisManager *FAM;
  ModuleAnalysisManager *MAM;
};

extern template class PassManager<MachineFunction>;

/// MachineFunctionPassManager adds/removes below features to/from the base
/// PassManager template instantiation.
///
/// - Support passes that implement doInitialization/doFinalization. This is for
///   machine function passes to work on module level constructs. One such pass
///   is AsmPrinter.
///
/// - Support machine module pass which runs over the module (for example,
///   MachineOutliner). A machine module pass needs to define the method:
///
///   ```Error run(Module &, MachineFunctionAnalysisManager &)```
///
///   FIXME: machine module passes still need to define the usual machine
///          function pass interface, namely,
///          `PreservedAnalyses run(MachineFunction &,
///                                 MachineFunctionAnalysisManager &)`
///          But this interface wouldn't be executed. It is just a placeholder
///          to satisfy the pass manager type-erased inteface. This
///          special-casing of machine module pass is due to its limited use
///          cases and the unnecessary complexity it may bring to the machine
///          pass manager.
///
/// - The base class `run` method is replaced by an alternative `run` method.
///   See details below.
///
/// - Support codegening in the SCC order. Users include interprocedural
///   register allocation (IPRA).
class MachineFunctionPassManager
    : public PassManager<MachineFunction, MachineFunctionAnalysisManager> {
  using Base = PassManager<MachineFunction, MachineFunctionAnalysisManager>;

public:
  MachineFunctionPassManager(bool RequireCodeGenSCCOrder = false,
                             bool VerifyMachineFunction = false)
      : RequireCodeGenSCCOrder(RequireCodeGenSCCOrder),
        VerifyMachineFunction(VerifyMachineFunction) {}
  MachineFunctionPassManager(MachineFunctionPassManager &&) = default;
  MachineFunctionPassManager &
  operator=(MachineFunctionPassManager &&) = default;

  /// Run machine passes for a Module.
  ///
  /// The intended use is to start the codegen pipeline for a Module. The base
  /// class's `run` method is deliberately hidden by this due to the observation
  /// that we don't yet have the use cases of compositing two instances of
  /// machine pass managers, or compositing machine pass managers with other
  /// types of pass managers.
  Error run(Module &M, MachineFunctionAnalysisManager &MFAM);

  template <typename PassT> void addPass(PassT &&Pass) {
    Base::addPass(std::forward<PassT>(Pass));
    PassConceptT *P = Passes.back().get();
    addDoInitialization<PassT>(P);
    addDoFinalization<PassT>(P);

    // Add machine module pass.
    addRunOnModule<PassT>(P);
  }

private:
  template <typename PassT>
  using has_init_t = decltype(std::declval<PassT &>().doInitialization(
      std::declval<Module &>(),
      std::declval<MachineFunctionAnalysisManager &>()));

  template <typename PassT>
  std::enable_if_t<!is_detected<has_init_t, PassT>::value>
  addDoInitialization(PassConceptT *Pass) {}

  template <typename PassT>
  std::enable_if_t<is_detected<has_init_t, PassT>::value>
  addDoInitialization(PassConceptT *Pass) {
    using PassModelT =
        detail::PassModel<MachineFunction, PassT, PreservedAnalyses,
                          MachineFunctionAnalysisManager>;
    auto *P = static_cast<PassModelT *>(Pass);
    InitializationFuncs.emplace_back(
        [=](Module &M, MachineFunctionAnalysisManager &MFAM) {
          return P->Pass.doInitialization(M, MFAM);
        });
  }

  template <typename PassT>
  using has_fini_t = decltype(std::declval<PassT &>().doFinalization(
      std::declval<Module &>(),
      std::declval<MachineFunctionAnalysisManager &>()));

  template <typename PassT>
  std::enable_if_t<!is_detected<has_fini_t, PassT>::value>
  addDoFinalization(PassConceptT *Pass) {}

  template <typename PassT>
  std::enable_if_t<is_detected<has_fini_t, PassT>::value>
  addDoFinalization(PassConceptT *Pass) {
    using PassModelT =
        detail::PassModel<MachineFunction, PassT, PreservedAnalyses,
                          MachineFunctionAnalysisManager>;
    auto *P = static_cast<PassModelT *>(Pass);
    FinalizationFuncs.emplace_back(
        [=](Module &M, MachineFunctionAnalysisManager &MFAM) {
          return P->Pass.doFinalization(M, MFAM);
        });
  }

  template <typename PassT>
  using is_machine_module_pass_t = decltype(std::declval<PassT &>().run(
      std::declval<Module &>(),
      std::declval<MachineFunctionAnalysisManager &>()));

  template <typename PassT>
  using is_machine_function_pass_t = decltype(std::declval<PassT &>().run(
      std::declval<MachineFunction &>(),
      std::declval<MachineFunctionAnalysisManager &>()));

  template <typename PassT>
  std::enable_if_t<!is_detected<is_machine_module_pass_t, PassT>::value>
  addRunOnModule(PassConceptT *Pass) {}

  template <typename PassT>
  std::enable_if_t<is_detected<is_machine_module_pass_t, PassT>::value>
  addRunOnModule(PassConceptT *Pass) {
    static_assert(is_detected<is_machine_function_pass_t, PassT>::value,
                  "machine module pass needs to define machine function pass "
                  "api. sorry.");

    using PassModelT =
        detail::PassModel<MachineFunction, PassT, PreservedAnalyses,
                          MachineFunctionAnalysisManager>;
    auto *P = static_cast<PassModelT *>(Pass);
    MachineModulePasses.emplace(
        Passes.size() - 1,
        [=](Module &M, MachineFunctionAnalysisManager &MFAM) {
          return P->Pass.run(M, MFAM);
        });
  }

  using FuncTy = Error(Module &, MachineFunctionAnalysisManager &);
  SmallVector<llvm::unique_function<FuncTy>, 4> InitializationFuncs;
  SmallVector<llvm::unique_function<FuncTy>, 4> FinalizationFuncs;

  using PassIndex = decltype(Passes)::size_type;
  std::map<PassIndex, llvm::unique_function<FuncTy>> MachineModulePasses;

  // Run codegen in the SCC order.
  bool RequireCodeGenSCCOrder;

  bool VerifyMachineFunction;
};

} // end namespace llvm

#endif // LLVM_CODEGEN_MACHINEPASSMANAGER_H
PKiwFZ۹�u�E�ECodeGen/MachineTraceMetrics.hnu�[���//===- lib/CodeGen/MachineTraceMetrics.h - Super-scalar metrics -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the interface for the MachineTraceMetrics analysis pass
// that estimates CPU resource usage and critical data dependency paths through
// preferred traces. This is useful for super-scalar CPUs where execution speed
// can be limited both by data dependencies and by limited execution resources.
//
// Out-of-order CPUs will often be executing instructions from multiple basic
// blocks at the same time. This makes it difficult to estimate the resource
// usage accurately in a single basic block. Resources can be estimated better
// by looking at a trace through the current basic block.
//
// For every block, the MachineTraceMetrics pass will pick a preferred trace
// that passes through the block. The trace is chosen based on loop structure,
// branch probabilities, and resource usage. The intention is to pick likely
// traces that would be the most affected by code transformations.
//
// It is expensive to compute a full arbitrary trace for every block, so to
// save some computations, traces are chosen to be convergent. This means that
// if the traces through basic blocks A and B ever cross when moving away from
// A and B, they never diverge again. This applies in both directions - If the
// traces meet above A and B, they won't diverge when going further back.
//
// Traces tend to align with loops. The trace through a block in an inner loop
// will begin at the loop entry block and end at a back edge. If there are
// nested loops, the trace may begin and end at those instead.
//
// For each trace, we compute the critical path length, which is the number of
// cycles required to execute the trace when execution is limited by data
// dependencies only. We also compute the resource height, which is the number
// of cycles required to execute all instructions in the trace when ignoring
// data dependencies.
//
// Every instruction in the current block has a slack - the number of cycles
// execution of the instruction can be delayed without extending the critical
// path.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_MACHINETRACEMETRICS_H
#define LLVM_CODEGEN_MACHINETRACEMETRICS_H

#include "llvm/ADT/SparseSet.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/TargetSchedule.h"

namespace llvm {

class AnalysisUsage;
class MachineFunction;
class MachineInstr;
class MachineLoop;
class MachineLoopInfo;
class MachineRegisterInfo;
struct MCSchedClassDesc;
class raw_ostream;
class TargetInstrInfo;
class TargetRegisterInfo;

// Keep track of physreg data dependencies by recording each live register unit.
// Associate each regunit with an instruction operand. Depending on the
// direction instructions are scanned, it could be the operand that defined the
// regunit, or the highest operand to read the regunit.
struct LiveRegUnit {
  unsigned RegUnit;
  unsigned Cycle = 0;
  const MachineInstr *MI = nullptr;
  unsigned Op = 0;

  unsigned getSparseSetIndex() const { return RegUnit; }

  LiveRegUnit(unsigned RU) : RegUnit(RU) {}
};

/// Strategies for selecting traces.
enum class MachineTraceStrategy {
  /// Select the trace through a block that has the fewest instructions.
  TS_MinInstrCount,
  /// Select the trace that contains only the current basic block. For instance,
  /// this strategy can be used by MachineCombiner to make better decisions when
  /// we estimate critical path for in-order cores.
  TS_Local,
  TS_NumStrategies
};

class MachineTraceMetrics : public MachineFunctionPass {
  const MachineFunction *MF = nullptr;
  const TargetInstrInfo *TII = nullptr;
  const TargetRegisterInfo *TRI = nullptr;
  const MachineRegisterInfo *MRI = nullptr;
  const MachineLoopInfo *Loops = nullptr;
  TargetSchedModel SchedModel;

public:
  friend class Ensemble;
  friend class Trace;

  class Ensemble;

  static char ID;

  MachineTraceMetrics();

  void getAnalysisUsage(AnalysisUsage&) const override;
  bool runOnMachineFunction(MachineFunction&) override;
  void releaseMemory() override;
  void verifyAnalysis() const override;

  /// Per-basic block information that doesn't depend on the trace through the
  /// block.
  struct FixedBlockInfo {
    /// The number of non-trivial instructions in the block.
    /// Doesn't count PHI and COPY instructions that are likely to be removed.
    unsigned InstrCount = ~0u;

    /// True when the block contains calls.
    bool HasCalls = false;

    FixedBlockInfo() = default;

    /// Returns true when resource information for this block has been computed.
    bool hasResources() const { return InstrCount != ~0u; }

    /// Invalidate resource information.
    void invalidate() { InstrCount = ~0u; }
  };

  /// Get the fixed resource information about MBB. Compute it on demand.
  const FixedBlockInfo *getResources(const MachineBasicBlock*);

  /// Get the scaled number of cycles used per processor resource in MBB.
  /// This is an array with SchedModel.getNumProcResourceKinds() entries.
  /// The getResources() function above must have been called first.
  ///
  /// These numbers have already been scaled by SchedModel.getResourceFactor().
  ArrayRef<unsigned> getProcResourceCycles(unsigned MBBNum) const;

  /// A virtual register or regunit required by a basic block or its trace
  /// successors.
  struct LiveInReg {
    /// The virtual register required, or a register unit.
    Register Reg;

    /// For virtual registers: Minimum height of the defining instruction.
    /// For regunits: Height of the highest user in the trace.
    unsigned Height;

    LiveInReg(Register Reg, unsigned Height = 0) : Reg(Reg), Height(Height) {}
  };

  /// Per-basic block information that relates to a specific trace through the
  /// block. Convergent traces means that only one of these is required per
  /// block in a trace ensemble.
  struct TraceBlockInfo {
    /// Trace predecessor, or NULL for the first block in the trace.
    /// Valid when hasValidDepth().
    const MachineBasicBlock *Pred = nullptr;

    /// Trace successor, or NULL for the last block in the trace.
    /// Valid when hasValidHeight().
    const MachineBasicBlock *Succ = nullptr;

    /// The block number of the head of the trace. (When hasValidDepth()).
    unsigned Head;

    /// The block number of the tail of the trace. (When hasValidHeight()).
    unsigned Tail;

    /// Accumulated number of instructions in the trace above this block.
    /// Does not include instructions in this block.
    unsigned InstrDepth = ~0u;

    /// Accumulated number of instructions in the trace below this block.
    /// Includes instructions in this block.
    unsigned InstrHeight = ~0u;

    TraceBlockInfo() = default;

    /// Returns true if the depth resources have been computed from the trace
    /// above this block.
    bool hasValidDepth() const { return InstrDepth != ~0u; }

    /// Returns true if the height resources have been computed from the trace
    /// below this block.
    bool hasValidHeight() const { return InstrHeight != ~0u; }

    /// Invalidate depth resources when some block above this one has changed.
    void invalidateDepth() { InstrDepth = ~0u; HasValidInstrDepths = false; }

    /// Invalidate height resources when a block below this one has changed.
    void invalidateHeight() { InstrHeight = ~0u; HasValidInstrHeights = false; }

    /// Assuming that this is a dominator of TBI, determine if it contains
    /// useful instruction depths. A dominating block can be above the current
    /// trace head, and any dependencies from such a far away dominator are not
    /// expected to affect the critical path.
    ///
    /// Also returns true when TBI == this.
    bool isUsefulDominator(const TraceBlockInfo &TBI) const {
      // The trace for TBI may not even be calculated yet.
      if (!hasValidDepth() || !TBI.hasValidDepth())
        return false;
      // Instruction depths are only comparable if the traces share a head.
      if (Head != TBI.Head)
        return false;
      // It is almost always the case that TBI belongs to the same trace as
      // this block, but rare convoluted cases involving irreducible control
      // flow, a dominator may share a trace head without actually being on the
      // same trace as TBI. This is not a big problem as long as it doesn't
      // increase the instruction depth.
      return HasValidInstrDepths && InstrDepth <= TBI.InstrDepth;
    }

    // Data-dependency-related information. Per-instruction depth and height
    // are computed from data dependencies in the current trace, using
    // itinerary data.

    /// Instruction depths have been computed. This implies hasValidDepth().
    bool HasValidInstrDepths = false;

    /// Instruction heights have been computed. This implies hasValidHeight().
    bool HasValidInstrHeights = false;

    /// Critical path length. This is the number of cycles in the longest data
    /// dependency chain through the trace. This is only valid when both
    /// HasValidInstrDepths and HasValidInstrHeights are set.
    unsigned CriticalPath;

    /// Live-in registers. These registers are defined above the current block
    /// and used by this block or a block below it.
    /// This does not include PHI uses in the current block, but it does
    /// include PHI uses in deeper blocks.
    SmallVector<LiveInReg, 4> LiveIns;

    void print(raw_ostream&) const;
  };

  /// InstrCycles represents the cycle height and depth of an instruction in a
  /// trace.
  struct InstrCycles {
    /// Earliest issue cycle as determined by data dependencies and instruction
    /// latencies from the beginning of the trace. Data dependencies from
    /// before the trace are not included.
    unsigned Depth;

    /// Minimum number of cycles from this instruction is issued to the of the
    /// trace, as determined by data dependencies and instruction latencies.
    unsigned Height;
  };

  /// A trace represents a plausible sequence of executed basic blocks that
  /// passes through the current basic block one. The Trace class serves as a
  /// handle to internal cached data structures.
  class Trace {
    Ensemble &TE;
    TraceBlockInfo &TBI;

    unsigned getBlockNum() const { return &TBI - &TE.BlockInfo[0]; }

  public:
    explicit Trace(Ensemble &te, TraceBlockInfo &tbi) : TE(te), TBI(tbi) {}

    void print(raw_ostream&) const;

    /// Compute the total number of instructions in the trace.
    unsigned getInstrCount() const {
      return TBI.InstrDepth + TBI.InstrHeight;
    }

    /// Return the resource depth of the top/bottom of the trace center block.
    /// This is the number of cycles required to execute all instructions from
    /// the trace head to the trace center block. The resource depth only
    /// considers execution resources, it ignores data dependencies.
    /// When Bottom is set, instructions in the trace center block are included.
    unsigned getResourceDepth(bool Bottom) const;

    /// Return the resource length of the trace. This is the number of cycles
    /// required to execute the instructions in the trace if they were all
    /// independent, exposing the maximum instruction-level parallelism.
    ///
    /// Any blocks in Extrablocks are included as if they were part of the
    /// trace. Likewise, extra resources required by the specified scheduling
    /// classes are included. For the caller to account for extra machine
    /// instructions, it must first resolve each instruction's scheduling class.
    unsigned getResourceLength(
        ArrayRef<const MachineBasicBlock *> Extrablocks = std::nullopt,
        ArrayRef<const MCSchedClassDesc *> ExtraInstrs = std::nullopt,
        ArrayRef<const MCSchedClassDesc *> RemoveInstrs = std::nullopt) const;

    /// Return the length of the (data dependency) critical path through the
    /// trace.
    unsigned getCriticalPath() const { return TBI.CriticalPath; }

    /// Return the depth and height of MI. The depth is only valid for
    /// instructions in or above the trace center block. The height is only
    /// valid for instructions in or below the trace center block.
    InstrCycles getInstrCycles(const MachineInstr &MI) const {
      return TE.Cycles.lookup(&MI);
    }

    /// Return the slack of MI. This is the number of cycles MI can be delayed
    /// before the critical path becomes longer.
    /// MI must be an instruction in the trace center block.
    unsigned getInstrSlack(const MachineInstr &MI) const;

    /// Return the Depth of a PHI instruction in a trace center block successor.
    /// The PHI does not have to be part of the trace.
    unsigned getPHIDepth(const MachineInstr &PHI) const;

    /// A dependence is useful if the basic block of the defining instruction
    /// is part of the trace of the user instruction. It is assumed that DefMI
    /// dominates UseMI (see also isUsefulDominator).
    bool isDepInTrace(const MachineInstr &DefMI,
                      const MachineInstr &UseMI) const;
  };

  /// A trace ensemble is a collection of traces selected using the same
  /// strategy, for example 'minimum resource height'. There is one trace for
  /// every block in the function.
  class Ensemble {
    friend class Trace;

    SmallVector<TraceBlockInfo, 4> BlockInfo;
    DenseMap<const MachineInstr*, InstrCycles> Cycles;
    SmallVector<unsigned, 0> ProcResourceDepths;
    SmallVector<unsigned, 0> ProcResourceHeights;

    void computeTrace(const MachineBasicBlock*);
    void computeDepthResources(const MachineBasicBlock*);
    void computeHeightResources(const MachineBasicBlock*);
    unsigned computeCrossBlockCriticalPath(const TraceBlockInfo&);
    void computeInstrDepths(const MachineBasicBlock*);
    void computeInstrHeights(const MachineBasicBlock*);
    void addLiveIns(const MachineInstr *DefMI, unsigned DefOp,
                    ArrayRef<const MachineBasicBlock*> Trace);

  protected:
    MachineTraceMetrics &MTM;

    explicit Ensemble(MachineTraceMetrics*);

    virtual const MachineBasicBlock *pickTracePred(const MachineBasicBlock*) =0;
    virtual const MachineBasicBlock *pickTraceSucc(const MachineBasicBlock*) =0;
    const MachineLoop *getLoopFor(const MachineBasicBlock*) const;
    const TraceBlockInfo *getDepthResources(const MachineBasicBlock*) const;
    const TraceBlockInfo *getHeightResources(const MachineBasicBlock*) const;
    ArrayRef<unsigned> getProcResourceDepths(unsigned MBBNum) const;
    ArrayRef<unsigned> getProcResourceHeights(unsigned MBBNum) const;

  public:
    virtual ~Ensemble();

    virtual const char *getName() const = 0;
    void print(raw_ostream&) const;
    void invalidate(const MachineBasicBlock *MBB);
    void verify() const;

    /// Get the trace that passes through MBB.
    /// The trace is computed on demand.
    Trace getTrace(const MachineBasicBlock *MBB);

    /// Updates the depth of an machine instruction, given RegUnits.
    void updateDepth(TraceBlockInfo &TBI, const MachineInstr&,
                     SparseSet<LiveRegUnit> &RegUnits);
    void updateDepth(const MachineBasicBlock *, const MachineInstr&,
                     SparseSet<LiveRegUnit> &RegUnits);

    /// Updates the depth of the instructions from Start to End.
    void updateDepths(MachineBasicBlock::iterator Start,
                      MachineBasicBlock::iterator End,
                      SparseSet<LiveRegUnit> &RegUnits);

  };

  /// Get the trace ensemble representing the given trace selection strategy.
  /// The returned Ensemble object is owned by the MachineTraceMetrics analysis,
  /// and valid for the lifetime of the analysis pass.
  Ensemble *getEnsemble(MachineTraceStrategy);

  /// Invalidate cached information about MBB. This must be called *before* MBB
  /// is erased, or the CFG is otherwise changed.
  ///
  /// This invalidates per-block information about resource usage for MBB only,
  /// and it invalidates per-trace information for any trace that passes
  /// through MBB.
  ///
  /// Call Ensemble::getTrace() again to update any trace handles.
  void invalidate(const MachineBasicBlock *MBB);

private:
  // One entry per basic block, indexed by block number.
  SmallVector<FixedBlockInfo, 4> BlockInfo;

  // Cycles consumed on each processor resource per block.
  // The number of processor resource kinds is constant for a given subtarget,
  // but it is not known at compile time. The number of cycles consumed by
  // block B on processor resource R is at ProcResourceCycles[B*Kinds + R]
  // where Kinds = SchedModel.getNumProcResourceKinds().
  SmallVector<unsigned, 0> ProcResourceCycles;

  // One ensemble per strategy.
  Ensemble
      *Ensembles[static_cast<size_t>(MachineTraceStrategy::TS_NumStrategies)];

  // Convert scaled resource usage to a cycle count that can be compared with
  // latencies.
  unsigned getCycles(unsigned Scaled) {
    unsigned Factor = SchedModel.getLatencyFactor();
    return (Scaled + Factor - 1) / Factor;
  }
};

inline raw_ostream &operator<<(raw_ostream &OS,
                               const MachineTraceMetrics::Trace &Tr) {
  Tr.print(OS);
  return OS;
}

inline raw_ostream &operator<<(raw_ostream &OS,
                               const MachineTraceMetrics::Ensemble &En) {
  En.print(OS);
  return OS;
}

} // end namespace llvm

#endif // LLVM_CODEGEN_MACHINETRACEMETRICS_H
PKiwFZ@�s@�b�b#CodeGen/GlobalISel/MIPatternMatch.hnu�[���//==------ llvm/CodeGen/GlobalISel/MIPatternMatch.h -------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// Contains matchers for matching SSA Machine Instructions.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_GLOBALISEL_MIPATTERNMATCH_H
#define LLVM_CODEGEN_GLOBALISEL_MIPATTERNMATCH_H

#include "llvm/ADT/APInt.h"
#include "llvm/CodeGen/GlobalISel/Utils.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/IR/InstrTypes.h"

namespace llvm {
namespace MIPatternMatch {

template <typename Reg, typename Pattern>
[[nodiscard]] bool mi_match(Reg R, const MachineRegisterInfo &MRI,
                            Pattern &&P) {
  return P.match(MRI, R);
}

template <typename Pattern>
[[nodiscard]] bool mi_match(MachineInstr &MI, const MachineRegisterInfo &MRI,
                            Pattern &&P) {
  return P.match(MRI, &MI);
}

// TODO: Extend for N use.
template <typename SubPatternT> struct OneUse_match {
  SubPatternT SubPat;
  OneUse_match(const SubPatternT &SP) : SubPat(SP) {}

  bool match(const MachineRegisterInfo &MRI, Register Reg) {
    return MRI.hasOneUse(Reg) && SubPat.match(MRI, Reg);
  }
};

template <typename SubPat>
inline OneUse_match<SubPat> m_OneUse(const SubPat &SP) {
  return SP;
}

template <typename SubPatternT> struct OneNonDBGUse_match {
  SubPatternT SubPat;
  OneNonDBGUse_match(const SubPatternT &SP) : SubPat(SP) {}

  bool match(const MachineRegisterInfo &MRI, Register Reg) {
    return MRI.hasOneNonDBGUse(Reg) && SubPat.match(MRI, Reg);
  }
};

template <typename SubPat>
inline OneNonDBGUse_match<SubPat> m_OneNonDBGUse(const SubPat &SP) {
  return SP;
}

template <typename ConstT>
inline std::optional<ConstT> matchConstant(Register,
                                           const MachineRegisterInfo &);

template <>
inline std::optional<APInt> matchConstant(Register Reg,
                                          const MachineRegisterInfo &MRI) {
  return getIConstantVRegVal(Reg, MRI);
}

template <>
inline std::optional<int64_t> matchConstant(Register Reg,
                                            const MachineRegisterInfo &MRI) {
  return getIConstantVRegSExtVal(Reg, MRI);
}

template <typename ConstT> struct ConstantMatch {
  ConstT &CR;
  ConstantMatch(ConstT &C) : CR(C) {}
  bool match(const MachineRegisterInfo &MRI, Register Reg) {
    if (auto MaybeCst = matchConstant<ConstT>(Reg, MRI)) {
      CR = *MaybeCst;
      return true;
    }
    return false;
  }
};

inline ConstantMatch<APInt> m_ICst(APInt &Cst) {
  return ConstantMatch<APInt>(Cst);
}
inline ConstantMatch<int64_t> m_ICst(int64_t &Cst) {
  return ConstantMatch<int64_t>(Cst);
}

template <typename ConstT>
inline std::optional<ConstT> matchConstantSplat(Register,
                                                const MachineRegisterInfo &);

template <>
inline std::optional<APInt> matchConstantSplat(Register Reg,
                                               const MachineRegisterInfo &MRI) {
  return getIConstantSplatVal(Reg, MRI);
}

template <>
inline std::optional<int64_t>
matchConstantSplat(Register Reg, const MachineRegisterInfo &MRI) {
  return getIConstantSplatSExtVal(Reg, MRI);
}

template <typename ConstT> struct ICstOrSplatMatch {
  ConstT &CR;
  ICstOrSplatMatch(ConstT &C) : CR(C) {}
  bool match(const MachineRegisterInfo &MRI, Register Reg) {
    if (auto MaybeCst = matchConstant<ConstT>(Reg, MRI)) {
      CR = *MaybeCst;
      return true;
    }

    if (auto MaybeCstSplat = matchConstantSplat<ConstT>(Reg, MRI)) {
      CR = *MaybeCstSplat;
      return true;
    }

    return false;
  };
};

inline ICstOrSplatMatch<APInt> m_ICstOrSplat(APInt &Cst) {
  return ICstOrSplatMatch<APInt>(Cst);
}

inline ICstOrSplatMatch<int64_t> m_ICstOrSplat(int64_t &Cst) {
  return ICstOrSplatMatch<int64_t>(Cst);
}

struct GCstAndRegMatch {
  std::optional<ValueAndVReg> &ValReg;
  GCstAndRegMatch(std::optional<ValueAndVReg> &ValReg) : ValReg(ValReg) {}
  bool match(const MachineRegisterInfo &MRI, Register Reg) {
    ValReg = getIConstantVRegValWithLookThrough(Reg, MRI);
    return ValReg ? true : false;
  }
};

inline GCstAndRegMatch m_GCst(std::optional<ValueAndVReg> &ValReg) {
  return GCstAndRegMatch(ValReg);
}

struct GFCstAndRegMatch {
  std::optional<FPValueAndVReg> &FPValReg;
  GFCstAndRegMatch(std::optional<FPValueAndVReg> &FPValReg)
      : FPValReg(FPValReg) {}
  bool match(const MachineRegisterInfo &MRI, Register Reg) {
    FPValReg = getFConstantVRegValWithLookThrough(Reg, MRI);
    return FPValReg ? true : false;
  }
};

inline GFCstAndRegMatch m_GFCst(std::optional<FPValueAndVReg> &FPValReg) {
  return GFCstAndRegMatch(FPValReg);
}

struct GFCstOrSplatGFCstMatch {
  std::optional<FPValueAndVReg> &FPValReg;
  GFCstOrSplatGFCstMatch(std::optional<FPValueAndVReg> &FPValReg)
      : FPValReg(FPValReg) {}
  bool match(const MachineRegisterInfo &MRI, Register Reg) {
    return (FPValReg = getFConstantSplat(Reg, MRI)) ||
           (FPValReg = getFConstantVRegValWithLookThrough(Reg, MRI));
  };
};

inline GFCstOrSplatGFCstMatch
m_GFCstOrSplat(std::optional<FPValueAndVReg> &FPValReg) {
  return GFCstOrSplatGFCstMatch(FPValReg);
}

/// Matcher for a specific constant value.
struct SpecificConstantMatch {
  int64_t RequestedVal;
  SpecificConstantMatch(int64_t RequestedVal) : RequestedVal(RequestedVal) {}
  bool match(const MachineRegisterInfo &MRI, Register Reg) {
    int64_t MatchedVal;
    return mi_match(Reg, MRI, m_ICst(MatchedVal)) && MatchedVal == RequestedVal;
  }
};

/// Matches a constant equal to \p RequestedValue.
inline SpecificConstantMatch m_SpecificICst(int64_t RequestedValue) {
  return SpecificConstantMatch(RequestedValue);
}

/// Matcher for a specific constant splat.
struct SpecificConstantSplatMatch {
  int64_t RequestedVal;
  SpecificConstantSplatMatch(int64_t RequestedVal)
      : RequestedVal(RequestedVal) {}
  bool match(const MachineRegisterInfo &MRI, Register Reg) {
    return isBuildVectorConstantSplat(Reg, MRI, RequestedVal,
                                      /* AllowUndef */ false);
  }
};

/// Matches a constant splat of \p RequestedValue.
inline SpecificConstantSplatMatch m_SpecificICstSplat(int64_t RequestedValue) {
  return SpecificConstantSplatMatch(RequestedValue);
}

/// Matcher for a specific constant or constant splat.
struct SpecificConstantOrSplatMatch {
  int64_t RequestedVal;
  SpecificConstantOrSplatMatch(int64_t RequestedVal)
      : RequestedVal(RequestedVal) {}
  bool match(const MachineRegisterInfo &MRI, Register Reg) {
    int64_t MatchedVal;
    if (mi_match(Reg, MRI, m_ICst(MatchedVal)) && MatchedVal == RequestedVal)
      return true;
    return isBuildVectorConstantSplat(Reg, MRI, RequestedVal,
                                      /* AllowUndef */ false);
  }
};

/// Matches a \p RequestedValue constant or a constant splat of \p
/// RequestedValue.
inline SpecificConstantOrSplatMatch
m_SpecificICstOrSplat(int64_t RequestedValue) {
  return SpecificConstantOrSplatMatch(RequestedValue);
}

///{
/// Convenience matchers for specific integer values.
inline SpecificConstantMatch m_ZeroInt() { return SpecificConstantMatch(0); }
inline SpecificConstantMatch m_AllOnesInt() {
  return SpecificConstantMatch(-1);
}
///}

/// Matcher for a specific register.
struct SpecificRegisterMatch {
  Register RequestedReg;
  SpecificRegisterMatch(Register RequestedReg) : RequestedReg(RequestedReg) {}
  bool match(const MachineRegisterInfo &MRI, Register Reg) {
    return Reg == RequestedReg;
  }
};

/// Matches a register only if it is equal to \p RequestedReg.
inline SpecificRegisterMatch m_SpecificReg(Register RequestedReg) {
  return SpecificRegisterMatch(RequestedReg);
}

// TODO: Rework this for different kinds of MachineOperand.
// Currently assumes the Src for a match is a register.
// We might want to support taking in some MachineOperands and call getReg on
// that.

struct operand_type_match {
  bool match(const MachineRegisterInfo &MRI, Register Reg) { return true; }
  bool match(const MachineRegisterInfo &MRI, MachineOperand *MO) {
    return MO->isReg();
  }
};

inline operand_type_match m_Reg() { return operand_type_match(); }

/// Matching combinators.
template <typename... Preds> struct And {
  template <typename MatchSrc>
  bool match(const MachineRegisterInfo &MRI, MatchSrc &&src) {
    return true;
  }
};

template <typename Pred, typename... Preds>
struct And<Pred, Preds...> : And<Preds...> {
  Pred P;
  And(Pred &&p, Preds &&... preds)
      : And<Preds...>(std::forward<Preds>(preds)...), P(std::forward<Pred>(p)) {
  }
  template <typename MatchSrc>
  bool match(const MachineRegisterInfo &MRI, MatchSrc &&src) {
    return P.match(MRI, src) && And<Preds...>::match(MRI, src);
  }
};

template <typename... Preds> struct Or {
  template <typename MatchSrc>
  bool match(const MachineRegisterInfo &MRI, MatchSrc &&src) {
    return false;
  }
};

template <typename Pred, typename... Preds>
struct Or<Pred, Preds...> : Or<Preds...> {
  Pred P;
  Or(Pred &&p, Preds &&... preds)
      : Or<Preds...>(std::forward<Preds>(preds)...), P(std::forward<Pred>(p)) {}
  template <typename MatchSrc>
  bool match(const MachineRegisterInfo &MRI, MatchSrc &&src) {
    return P.match(MRI, src) || Or<Preds...>::match(MRI, src);
  }
};

template <typename... Preds> And<Preds...> m_all_of(Preds &&... preds) {
  return And<Preds...>(std::forward<Preds>(preds)...);
}

template <typename... Preds> Or<Preds...> m_any_of(Preds &&... preds) {
  return Or<Preds...>(std::forward<Preds>(preds)...);
}

template <typename BindTy> struct bind_helper {
  static bool bind(const MachineRegisterInfo &MRI, BindTy &VR, BindTy &V) {
    VR = V;
    return true;
  }
};

template <> struct bind_helper<MachineInstr *> {
  static bool bind(const MachineRegisterInfo &MRI, MachineInstr *&MI,
                   Register Reg) {
    MI = MRI.getVRegDef(Reg);
    if (MI)
      return true;
    return false;
  }
  static bool bind(const MachineRegisterInfo &MRI, MachineInstr *&MI,
                   MachineInstr *Inst) {
    MI = Inst;
    return MI;
  }
};

template <> struct bind_helper<LLT> {
  static bool bind(const MachineRegisterInfo &MRI, LLT Ty, Register Reg) {
    Ty = MRI.getType(Reg);
    if (Ty.isValid())
      return true;
    return false;
  }
};

template <> struct bind_helper<const ConstantFP *> {
  static bool bind(const MachineRegisterInfo &MRI, const ConstantFP *&F,
                   Register Reg) {
    F = getConstantFPVRegVal(Reg, MRI);
    if (F)
      return true;
    return false;
  }
};

template <typename Class> struct bind_ty {
  Class &VR;

  bind_ty(Class &V) : VR(V) {}

  template <typename ITy> bool match(const MachineRegisterInfo &MRI, ITy &&V) {
    return bind_helper<Class>::bind(MRI, VR, V);
  }
};

inline bind_ty<Register> m_Reg(Register &R) { return R; }
inline bind_ty<MachineInstr *> m_MInstr(MachineInstr *&MI) { return MI; }
inline bind_ty<LLT> m_Type(LLT Ty) { return Ty; }
inline bind_ty<CmpInst::Predicate> m_Pred(CmpInst::Predicate &P) { return P; }
inline operand_type_match m_Pred() { return operand_type_match(); }

struct ImplicitDefMatch {
  bool match(const MachineRegisterInfo &MRI, Register Reg) {
    MachineInstr *TmpMI;
    if (mi_match(Reg, MRI, m_MInstr(TmpMI)))
      return TmpMI->getOpcode() == TargetOpcode::G_IMPLICIT_DEF;
    return false;
  }
};

inline ImplicitDefMatch m_GImplicitDef() { return ImplicitDefMatch(); }

// Helper for matching G_FCONSTANT
inline bind_ty<const ConstantFP *> m_GFCst(const ConstantFP *&C) { return C; }

// General helper for all the binary generic MI such as G_ADD/G_SUB etc
template <typename LHS_P, typename RHS_P, unsigned Opcode,
          bool Commutable = false>
struct BinaryOp_match {
  LHS_P L;
  RHS_P R;

  BinaryOp_match(const LHS_P &LHS, const RHS_P &RHS) : L(LHS), R(RHS) {}
  template <typename OpTy>
  bool match(const MachineRegisterInfo &MRI, OpTy &&Op) {
    MachineInstr *TmpMI;
    if (mi_match(Op, MRI, m_MInstr(TmpMI))) {
      if (TmpMI->getOpcode() == Opcode && TmpMI->getNumOperands() == 3) {
        return (L.match(MRI, TmpMI->getOperand(1).getReg()) &&
                R.match(MRI, TmpMI->getOperand(2).getReg())) ||
               (Commutable && (R.match(MRI, TmpMI->getOperand(1).getReg()) &&
                               L.match(MRI, TmpMI->getOperand(2).getReg())));
      }
    }
    return false;
  }
};

// Helper for (commutative) binary generic MI that checks Opcode.
template <typename LHS_P, typename RHS_P, bool Commutable = false>
struct BinaryOpc_match {
  unsigned Opc;
  LHS_P L;
  RHS_P R;

  BinaryOpc_match(unsigned Opcode, const LHS_P &LHS, const RHS_P &RHS)
      : Opc(Opcode), L(LHS), R(RHS) {}
  template <typename OpTy>
  bool match(const MachineRegisterInfo &MRI, OpTy &&Op) {
    MachineInstr *TmpMI;
    if (mi_match(Op, MRI, m_MInstr(TmpMI))) {
      if (TmpMI->getOpcode() == Opc && TmpMI->getNumDefs() == 1 &&
          TmpMI->getNumOperands() == 3) {
        return (L.match(MRI, TmpMI->getOperand(1).getReg()) &&
                R.match(MRI, TmpMI->getOperand(2).getReg())) ||
               (Commutable && (R.match(MRI, TmpMI->getOperand(1).getReg()) &&
                               L.match(MRI, TmpMI->getOperand(2).getReg())));
      }
    }
    return false;
  }
};

template <typename LHS, typename RHS>
inline BinaryOpc_match<LHS, RHS, false> m_BinOp(unsigned Opcode, const LHS &L,
                                                const RHS &R) {
  return BinaryOpc_match<LHS, RHS, false>(Opcode, L, R);
}

template <typename LHS, typename RHS>
inline BinaryOpc_match<LHS, RHS, true>
m_CommutativeBinOp(unsigned Opcode, const LHS &L, const RHS &R) {
  return BinaryOpc_match<LHS, RHS, true>(Opcode, L, R);
}

template <typename LHS, typename RHS>
inline BinaryOp_match<LHS, RHS, TargetOpcode::G_ADD, true>
m_GAdd(const LHS &L, const RHS &R) {
  return BinaryOp_match<LHS, RHS, TargetOpcode::G_ADD, true>(L, R);
}

template <typename LHS, typename RHS>
inline BinaryOp_match<LHS, RHS, TargetOpcode::G_BUILD_VECTOR, false>
m_GBuildVector(const LHS &L, const RHS &R) {
  return BinaryOp_match<LHS, RHS, TargetOpcode::G_BUILD_VECTOR, false>(L, R);
}

template <typename LHS, typename RHS>
inline BinaryOp_match<LHS, RHS, TargetOpcode::G_BUILD_VECTOR_TRUNC, false>
m_GBuildVectorTrunc(const LHS &L, const RHS &R) {
  return BinaryOp_match<LHS, RHS, TargetOpcode::G_BUILD_VECTOR_TRUNC, false>(L,
                                                                             R);
}

template <typename LHS, typename RHS>
inline BinaryOp_match<LHS, RHS, TargetOpcode::G_PTR_ADD, false>
m_GPtrAdd(const LHS &L, const RHS &R) {
  return BinaryOp_match<LHS, RHS, TargetOpcode::G_PTR_ADD, false>(L, R);
}

template <typename LHS, typename RHS>
inline BinaryOp_match<LHS, RHS, TargetOpcode::G_SUB> m_GSub(const LHS &L,
                                                            const RHS &R) {
  return BinaryOp_match<LHS, RHS, TargetOpcode::G_SUB>(L, R);
}

template <typename LHS, typename RHS>
inline BinaryOp_match<LHS, RHS, TargetOpcode::G_MUL, true>
m_GMul(const LHS &L, const RHS &R) {
  return BinaryOp_match<LHS, RHS, TargetOpcode::G_MUL, true>(L, R);
}

template <typename LHS, typename RHS>
inline BinaryOp_match<LHS, RHS, TargetOpcode::G_FADD, true>
m_GFAdd(const LHS &L, const RHS &R) {
  return BinaryOp_match<LHS, RHS, TargetOpcode::G_FADD, true>(L, R);
}

template <typename LHS, typename RHS>
inline BinaryOp_match<LHS, RHS, TargetOpcode::G_FMUL, true>
m_GFMul(const LHS &L, const RHS &R) {
  return BinaryOp_match<LHS, RHS, TargetOpcode::G_FMUL, true>(L, R);
}

template <typename LHS, typename RHS>
inline BinaryOp_match<LHS, RHS, TargetOpcode::G_FSUB, false>
m_GFSub(const LHS &L, const RHS &R) {
  return BinaryOp_match<LHS, RHS, TargetOpcode::G_FSUB, false>(L, R);
}

template <typename LHS, typename RHS>
inline BinaryOp_match<LHS, RHS, TargetOpcode::G_AND, true>
m_GAnd(const LHS &L, const RHS &R) {
  return BinaryOp_match<LHS, RHS, TargetOpcode::G_AND, true>(L, R);
}

template <typename LHS, typename RHS>
inline BinaryOp_match<LHS, RHS, TargetOpcode::G_XOR, true>
m_GXor(const LHS &L, const RHS &R) {
  return BinaryOp_match<LHS, RHS, TargetOpcode::G_XOR, true>(L, R);
}

template <typename LHS, typename RHS>
inline BinaryOp_match<LHS, RHS, TargetOpcode::G_OR, true> m_GOr(const LHS &L,
                                                                const RHS &R) {
  return BinaryOp_match<LHS, RHS, TargetOpcode::G_OR, true>(L, R);
}

template <typename LHS, typename RHS>
inline BinaryOp_match<LHS, RHS, TargetOpcode::G_SHL, false>
m_GShl(const LHS &L, const RHS &R) {
  return BinaryOp_match<LHS, RHS, TargetOpcode::G_SHL, false>(L, R);
}

template <typename LHS, typename RHS>
inline BinaryOp_match<LHS, RHS, TargetOpcode::G_LSHR, false>
m_GLShr(const LHS &L, const RHS &R) {
  return BinaryOp_match<LHS, RHS, TargetOpcode::G_LSHR, false>(L, R);
}

template <typename LHS, typename RHS>
inline BinaryOp_match<LHS, RHS, TargetOpcode::G_ASHR, false>
m_GAShr(const LHS &L, const RHS &R) {
  return BinaryOp_match<LHS, RHS, TargetOpcode::G_ASHR, false>(L, R);
}

template <typename LHS, typename RHS>
inline BinaryOp_match<LHS, RHS, TargetOpcode::G_SMAX, false>
m_GSMax(const LHS &L, const RHS &R) {
  return BinaryOp_match<LHS, RHS, TargetOpcode::G_SMAX, false>(L, R);
}

template <typename LHS, typename RHS>
inline BinaryOp_match<LHS, RHS, TargetOpcode::G_SMIN, false>
m_GSMin(const LHS &L, const RHS &R) {
  return BinaryOp_match<LHS, RHS, TargetOpcode::G_SMIN, false>(L, R);
}

// Helper for unary instructions (G_[ZSA]EXT/G_TRUNC) etc
template <typename SrcTy, unsigned Opcode> struct UnaryOp_match {
  SrcTy L;

  UnaryOp_match(const SrcTy &LHS) : L(LHS) {}
  template <typename OpTy>
  bool match(const MachineRegisterInfo &MRI, OpTy &&Op) {
    MachineInstr *TmpMI;
    if (mi_match(Op, MRI, m_MInstr(TmpMI))) {
      if (TmpMI->getOpcode() == Opcode && TmpMI->getNumOperands() == 2) {
        return L.match(MRI, TmpMI->getOperand(1).getReg());
      }
    }
    return false;
  }
};

template <typename SrcTy>
inline UnaryOp_match<SrcTy, TargetOpcode::G_ANYEXT>
m_GAnyExt(const SrcTy &Src) {
  return UnaryOp_match<SrcTy, TargetOpcode::G_ANYEXT>(Src);
}

template <typename SrcTy>
inline UnaryOp_match<SrcTy, TargetOpcode::G_SEXT> m_GSExt(const SrcTy &Src) {
  return UnaryOp_match<SrcTy, TargetOpcode::G_SEXT>(Src);
}

template <typename SrcTy>
inline UnaryOp_match<SrcTy, TargetOpcode::G_ZEXT> m_GZExt(const SrcTy &Src) {
  return UnaryOp_match<SrcTy, TargetOpcode::G_ZEXT>(Src);
}

template <typename SrcTy>
inline UnaryOp_match<SrcTy, TargetOpcode::G_FPEXT> m_GFPExt(const SrcTy &Src) {
  return UnaryOp_match<SrcTy, TargetOpcode::G_FPEXT>(Src);
}

template <typename SrcTy>
inline UnaryOp_match<SrcTy, TargetOpcode::G_TRUNC> m_GTrunc(const SrcTy &Src) {
  return UnaryOp_match<SrcTy, TargetOpcode::G_TRUNC>(Src);
}

template <typename SrcTy>
inline UnaryOp_match<SrcTy, TargetOpcode::G_BITCAST>
m_GBitcast(const SrcTy &Src) {
  return UnaryOp_match<SrcTy, TargetOpcode::G_BITCAST>(Src);
}

template <typename SrcTy>
inline UnaryOp_match<SrcTy, TargetOpcode::G_PTRTOINT>
m_GPtrToInt(const SrcTy &Src) {
  return UnaryOp_match<SrcTy, TargetOpcode::G_PTRTOINT>(Src);
}

template <typename SrcTy>
inline UnaryOp_match<SrcTy, TargetOpcode::G_INTTOPTR>
m_GIntToPtr(const SrcTy &Src) {
  return UnaryOp_match<SrcTy, TargetOpcode::G_INTTOPTR>(Src);
}

template <typename SrcTy>
inline UnaryOp_match<SrcTy, TargetOpcode::G_FPTRUNC>
m_GFPTrunc(const SrcTy &Src) {
  return UnaryOp_match<SrcTy, TargetOpcode::G_FPTRUNC>(Src);
}

template <typename SrcTy>
inline UnaryOp_match<SrcTy, TargetOpcode::G_FABS> m_GFabs(const SrcTy &Src) {
  return UnaryOp_match<SrcTy, TargetOpcode::G_FABS>(Src);
}

template <typename SrcTy>
inline UnaryOp_match<SrcTy, TargetOpcode::G_FNEG> m_GFNeg(const SrcTy &Src) {
  return UnaryOp_match<SrcTy, TargetOpcode::G_FNEG>(Src);
}

template <typename SrcTy>
inline UnaryOp_match<SrcTy, TargetOpcode::COPY> m_Copy(SrcTy &&Src) {
  return UnaryOp_match<SrcTy, TargetOpcode::COPY>(std::forward<SrcTy>(Src));
}

template <typename SrcTy>
inline UnaryOp_match<SrcTy, TargetOpcode::G_FSQRT> m_GFSqrt(const SrcTy &Src) {
  return UnaryOp_match<SrcTy, TargetOpcode::G_FSQRT>(Src);
}

// General helper for generic MI compares, i.e. G_ICMP and G_FCMP
// TODO: Allow checking a specific predicate.
template <typename Pred_P, typename LHS_P, typename RHS_P, unsigned Opcode,
          bool Commutable = false>
struct CompareOp_match {
  Pred_P P;
  LHS_P L;
  RHS_P R;

  CompareOp_match(const Pred_P &Pred, const LHS_P &LHS, const RHS_P &RHS)
      : P(Pred), L(LHS), R(RHS) {}

  template <typename OpTy>
  bool match(const MachineRegisterInfo &MRI, OpTy &&Op) {
    MachineInstr *TmpMI;
    if (!mi_match(Op, MRI, m_MInstr(TmpMI)) || TmpMI->getOpcode() != Opcode)
      return false;

    auto TmpPred =
        static_cast<CmpInst::Predicate>(TmpMI->getOperand(1).getPredicate());
    if (!P.match(MRI, TmpPred))
      return false;
    Register LHS = TmpMI->getOperand(2).getReg();
    Register RHS = TmpMI->getOperand(3).getReg();
    if (L.match(MRI, LHS) && R.match(MRI, RHS))
      return true;
    if (Commutable && L.match(MRI, RHS) && R.match(MRI, LHS) &&
        P.match(MRI, CmpInst::getSwappedPredicate(TmpPred)))
      return true;
    return false;
  }
};

template <typename Pred, typename LHS, typename RHS>
inline CompareOp_match<Pred, LHS, RHS, TargetOpcode::G_ICMP>
m_GICmp(const Pred &P, const LHS &L, const RHS &R) {
  return CompareOp_match<Pred, LHS, RHS, TargetOpcode::G_ICMP>(P, L, R);
}

template <typename Pred, typename LHS, typename RHS>
inline CompareOp_match<Pred, LHS, RHS, TargetOpcode::G_FCMP>
m_GFCmp(const Pred &P, const LHS &L, const RHS &R) {
  return CompareOp_match<Pred, LHS, RHS, TargetOpcode::G_FCMP>(P, L, R);
}

/// G_ICMP matcher that also matches commuted compares.
/// E.g.
///
/// m_c_GICmp(m_Pred(...), m_GAdd(...), m_GSub(...))
///
/// Could match both of:
///
/// icmp ugt (add x, y) (sub a, b)
/// icmp ult (sub a, b) (add x, y)
template <typename Pred, typename LHS, typename RHS>
inline CompareOp_match<Pred, LHS, RHS, TargetOpcode::G_ICMP, true>
m_c_GICmp(const Pred &P, const LHS &L, const RHS &R) {
  return CompareOp_match<Pred, LHS, RHS, TargetOpcode::G_ICMP, true>(P, L, R);
}

/// G_FCMP matcher that also matches commuted compares.
/// E.g.
///
/// m_c_GFCmp(m_Pred(...), m_FAdd(...), m_GFMul(...))
///
/// Could match both of:
///
/// fcmp ogt (fadd x, y) (fmul a, b)
/// fcmp olt (fmul a, b) (fadd x, y)
template <typename Pred, typename LHS, typename RHS>
inline CompareOp_match<Pred, LHS, RHS, TargetOpcode::G_FCMP, true>
m_c_GFCmp(const Pred &P, const LHS &L, const RHS &R) {
  return CompareOp_match<Pred, LHS, RHS, TargetOpcode::G_FCMP, true>(P, L, R);
}

// Helper for checking if a Reg is of specific type.
struct CheckType {
  LLT Ty;
  CheckType(const LLT Ty) : Ty(Ty) {}

  bool match(const MachineRegisterInfo &MRI, Register Reg) {
    return MRI.getType(Reg) == Ty;
  }
};

inline CheckType m_SpecificType(LLT Ty) { return Ty; }

template <typename Src0Ty, typename Src1Ty, typename Src2Ty, unsigned Opcode>
struct TernaryOp_match {
  Src0Ty Src0;
  Src1Ty Src1;
  Src2Ty Src2;

  TernaryOp_match(const Src0Ty &Src0, const Src1Ty &Src1, const Src2Ty &Src2)
      : Src0(Src0), Src1(Src1), Src2(Src2) {}
  template <typename OpTy>
  bool match(const MachineRegisterInfo &MRI, OpTy &&Op) {
    MachineInstr *TmpMI;
    if (mi_match(Op, MRI, m_MInstr(TmpMI))) {
      if (TmpMI->getOpcode() == Opcode && TmpMI->getNumOperands() == 4) {
        return (Src0.match(MRI, TmpMI->getOperand(1).getReg()) &&
                Src1.match(MRI, TmpMI->getOperand(2).getReg()) &&
                Src2.match(MRI, TmpMI->getOperand(3).getReg()));
      }
    }
    return false;
  }
};
template <typename Src0Ty, typename Src1Ty, typename Src2Ty>
inline TernaryOp_match<Src0Ty, Src1Ty, Src2Ty,
                       TargetOpcode::G_INSERT_VECTOR_ELT>
m_GInsertVecElt(const Src0Ty &Src0, const Src1Ty &Src1, const Src2Ty &Src2) {
  return TernaryOp_match<Src0Ty, Src1Ty, Src2Ty,
                         TargetOpcode::G_INSERT_VECTOR_ELT>(Src0, Src1, Src2);
}

template <typename Src0Ty, typename Src1Ty, typename Src2Ty>
inline TernaryOp_match<Src0Ty, Src1Ty, Src2Ty, TargetOpcode::G_SELECT>
m_GISelect(const Src0Ty &Src0, const Src1Ty &Src1, const Src2Ty &Src2) {
  return TernaryOp_match<Src0Ty, Src1Ty, Src2Ty, TargetOpcode::G_SELECT>(
      Src0, Src1, Src2);
}

/// Matches a register negated by a G_SUB.
/// G_SUB 0, %negated_reg
template <typename SrcTy>
inline BinaryOp_match<SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB>
m_Neg(const SrcTy &&Src) {
  return m_GSub(m_ZeroInt(), Src);
}

/// Matches a register not-ed by a G_XOR.
/// G_XOR %not_reg, -1
template <typename SrcTy>
inline BinaryOp_match<SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true>
m_Not(const SrcTy &&Src) {
  return m_GXor(Src, m_AllOnesInt());
}

} // namespace MIPatternMatch
} // namespace llvm

#endif
PKiwFZݢ�EVEV%CodeGen/GlobalISel/MachineIRBuilder.hnu�[���//===-- llvm/CodeGen/GlobalISel/MachineIRBuilder.h - MIBuilder --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// This file declares the MachineIRBuilder class.
/// This is a helper class to build MachineInstr.
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_GLOBALISEL_MACHINEIRBUILDER_H
#define LLVM_CODEGEN_GLOBALISEL_MACHINEIRBUILDER_H

#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/TargetOpcodes.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/Module.h"

namespace llvm {

// Forward declarations.
class APInt;
class BlockAddress;
class Constant;
class ConstantFP;
class ConstantInt;
class DataLayout;
class GISelCSEInfo;
class GlobalValue;
class TargetRegisterClass;
class MachineFunction;
class MachineInstr;
class TargetInstrInfo;
class GISelChangeObserver;

/// Class which stores all the state required in a MachineIRBuilder.
/// Since MachineIRBuilders will only store state in this object, it allows
/// to transfer BuilderState between different kinds of MachineIRBuilders.
struct MachineIRBuilderState {
  /// MachineFunction under construction.
  MachineFunction *MF = nullptr;
  /// Information used to access the description of the opcodes.
  const TargetInstrInfo *TII = nullptr;
  /// Information used to verify types are consistent and to create virtual registers.
  MachineRegisterInfo *MRI = nullptr;
  /// Debug location to be set to any instruction we create.
  DebugLoc DL;
  /// PC sections metadata to be set to any instruction we create.
  MDNode *PCSections = nullptr;

  /// \name Fields describing the insertion point.
  /// @{
  MachineBasicBlock *MBB = nullptr;
  MachineBasicBlock::iterator II;
  /// @}

  GISelChangeObserver *Observer = nullptr;

  GISelCSEInfo *CSEInfo = nullptr;
};

class DstOp {
  union {
    LLT LLTTy;
    Register Reg;
    const TargetRegisterClass *RC;
  };

public:
  enum class DstType { Ty_LLT, Ty_Reg, Ty_RC };
  DstOp(unsigned R) : Reg(R), Ty(DstType::Ty_Reg) {}
  DstOp(Register R) : Reg(R), Ty(DstType::Ty_Reg) {}
  DstOp(const MachineOperand &Op) : Reg(Op.getReg()), Ty(DstType::Ty_Reg) {}
  DstOp(const LLT T) : LLTTy(T), Ty(DstType::Ty_LLT) {}
  DstOp(const TargetRegisterClass *TRC) : RC(TRC), Ty(DstType::Ty_RC) {}

  void addDefToMIB(MachineRegisterInfo &MRI, MachineInstrBuilder &MIB) const {
    switch (Ty) {
    case DstType::Ty_Reg:
      MIB.addDef(Reg);
      break;
    case DstType::Ty_LLT:
      MIB.addDef(MRI.createGenericVirtualRegister(LLTTy));
      break;
    case DstType::Ty_RC:
      MIB.addDef(MRI.createVirtualRegister(RC));
      break;
    }
  }

  LLT getLLTTy(const MachineRegisterInfo &MRI) const {
    switch (Ty) {
    case DstType::Ty_RC:
      return LLT{};
    case DstType::Ty_LLT:
      return LLTTy;
    case DstType::Ty_Reg:
      return MRI.getType(Reg);
    }
    llvm_unreachable("Unrecognised DstOp::DstType enum");
  }

  Register getReg() const {
    assert(Ty == DstType::Ty_Reg && "Not a register");
    return Reg;
  }

  const TargetRegisterClass *getRegClass() const {
    switch (Ty) {
    case DstType::Ty_RC:
      return RC;
    default:
      llvm_unreachable("Not a RC Operand");
    }
  }

  DstType getDstOpKind() const { return Ty; }

private:
  DstType Ty;
};

class SrcOp {
  union {
    MachineInstrBuilder SrcMIB;
    Register Reg;
    CmpInst::Predicate Pred;
    int64_t Imm;
  };

public:
  enum class SrcType { Ty_Reg, Ty_MIB, Ty_Predicate, Ty_Imm };
  SrcOp(Register R) : Reg(R), Ty(SrcType::Ty_Reg) {}
  SrcOp(const MachineOperand &Op) : Reg(Op.getReg()), Ty(SrcType::Ty_Reg) {}
  SrcOp(const MachineInstrBuilder &MIB) : SrcMIB(MIB), Ty(SrcType::Ty_MIB) {}
  SrcOp(const CmpInst::Predicate P) : Pred(P), Ty(SrcType::Ty_Predicate) {}
  /// Use of registers held in unsigned integer variables (or more rarely signed
  /// integers) is no longer permitted to avoid ambiguity with upcoming support
  /// for immediates.
  SrcOp(unsigned) = delete;
  SrcOp(int) = delete;
  SrcOp(uint64_t V) : Imm(V), Ty(SrcType::Ty_Imm) {}
  SrcOp(int64_t V) : Imm(V), Ty(SrcType::Ty_Imm) {}

  void addSrcToMIB(MachineInstrBuilder &MIB) const {
    switch (Ty) {
    case SrcType::Ty_Predicate:
      MIB.addPredicate(Pred);
      break;
    case SrcType::Ty_Reg:
      MIB.addUse(Reg);
      break;
    case SrcType::Ty_MIB:
      MIB.addUse(SrcMIB->getOperand(0).getReg());
      break;
    case SrcType::Ty_Imm:
      MIB.addImm(Imm);
      break;
    }
  }

  LLT getLLTTy(const MachineRegisterInfo &MRI) const {
    switch (Ty) {
    case SrcType::Ty_Predicate:
    case SrcType::Ty_Imm:
      llvm_unreachable("Not a register operand");
    case SrcType::Ty_Reg:
      return MRI.getType(Reg);
    case SrcType::Ty_MIB:
      return MRI.getType(SrcMIB->getOperand(0).getReg());
    }
    llvm_unreachable("Unrecognised SrcOp::SrcType enum");
  }

  Register getReg() const {
    switch (Ty) {
    case SrcType::Ty_Predicate:
    case SrcType::Ty_Imm:
      llvm_unreachable("Not a register operand");
    case SrcType::Ty_Reg:
      return Reg;
    case SrcType::Ty_MIB:
      return SrcMIB->getOperand(0).getReg();
    }
    llvm_unreachable("Unrecognised SrcOp::SrcType enum");
  }

  CmpInst::Predicate getPredicate() const {
    switch (Ty) {
    case SrcType::Ty_Predicate:
      return Pred;
    default:
      llvm_unreachable("Not a register operand");
    }
  }

  int64_t getImm() const {
    switch (Ty) {
    case SrcType::Ty_Imm:
      return Imm;
    default:
      llvm_unreachable("Not an immediate");
    }
  }

  SrcType getSrcOpKind() const { return Ty; }

private:
  SrcType Ty;
};

/// Helper class to build MachineInstr.
/// It keeps internally the insertion point and debug location for all
/// the new instructions we want to create.
/// This information can be modified via the related setters.
class MachineIRBuilder {

  MachineIRBuilderState State;

  unsigned getOpcodeForMerge(const DstOp &DstOp, ArrayRef<SrcOp> SrcOps) const;

protected:
  void validateTruncExt(const LLT Dst, const LLT Src, bool IsExtend);

  void validateUnaryOp(const LLT Res, const LLT Op0);
  void validateBinaryOp(const LLT Res, const LLT Op0, const LLT Op1);
  void validateShiftOp(const LLT Res, const LLT Op0, const LLT Op1);

  void validateSelectOp(const LLT ResTy, const LLT TstTy, const LLT Op0Ty,
                        const LLT Op1Ty);

  void recordInsertion(MachineInstr *InsertedInstr) const {
    if (State.Observer)
      State.Observer->createdInstr(*InsertedInstr);
  }

public:
  /// Some constructors for easy use.
  MachineIRBuilder() = default;
  MachineIRBuilder(MachineFunction &MF) { setMF(MF); }

  MachineIRBuilder(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsPt) {
    setMF(*MBB.getParent());
    setInsertPt(MBB, InsPt);
  }

  MachineIRBuilder(MachineInstr &MI) :
    MachineIRBuilder(*MI.getParent(), MI.getIterator()) {
    setInstr(MI);
    setDebugLoc(MI.getDebugLoc());
  }

  MachineIRBuilder(MachineInstr &MI, GISelChangeObserver &Observer) :
    MachineIRBuilder(MI) {
    setChangeObserver(Observer);
  }

  virtual ~MachineIRBuilder() = default;

  MachineIRBuilder(const MachineIRBuilderState &BState) : State(BState) {}

  const TargetInstrInfo &getTII() {
    assert(State.TII && "TargetInstrInfo is not set");
    return *State.TII;
  }

  /// Getter for the function we currently build.
  MachineFunction &getMF() {
    assert(State.MF && "MachineFunction is not set");
    return *State.MF;
  }

  const MachineFunction &getMF() const {
    assert(State.MF && "MachineFunction is not set");
    return *State.MF;
  }

  const DataLayout &getDataLayout() const {
    return getMF().getFunction().getParent()->getDataLayout();
  }

  LLVMContext &getContext() const {
    return getMF().getFunction().getContext();
  }

  /// Getter for DebugLoc
  const DebugLoc &getDL() { return State.DL; }

  /// Getter for MRI
  MachineRegisterInfo *getMRI() { return State.MRI; }
  const MachineRegisterInfo *getMRI() const { return State.MRI; }

  /// Getter for the State
  MachineIRBuilderState &getState() { return State; }

  /// Getter for the basic block we currently build.
  const MachineBasicBlock &getMBB() const {
    assert(State.MBB && "MachineBasicBlock is not set");
    return *State.MBB;
  }

  MachineBasicBlock &getMBB() {
    return const_cast<MachineBasicBlock &>(
        const_cast<const MachineIRBuilder *>(this)->getMBB());
  }

  GISelCSEInfo *getCSEInfo() { return State.CSEInfo; }
  const GISelCSEInfo *getCSEInfo() const { return State.CSEInfo; }

  /// Current insertion point for new instructions.
  MachineBasicBlock::iterator getInsertPt() { return State.II; }

  /// Set the insertion point before the specified position.
  /// \pre MBB must be in getMF().
  /// \pre II must be a valid iterator in MBB.
  void setInsertPt(MachineBasicBlock &MBB, MachineBasicBlock::iterator II) {
    assert(MBB.getParent() == &getMF() &&
           "Basic block is in a different function");
    State.MBB = &MBB;
    State.II = II;
  }

  /// @}

  void setCSEInfo(GISelCSEInfo *Info) { State.CSEInfo = Info; }

  /// \name Setters for the insertion point.
  /// @{
  /// Set the MachineFunction where to build instructions.
  void setMF(MachineFunction &MF);

  /// Set the insertion point to the  end of \p MBB.
  /// \pre \p MBB must be contained by getMF().
  void setMBB(MachineBasicBlock &MBB) {
    State.MBB = &MBB;
    State.II = MBB.end();
    assert(&getMF() == MBB.getParent() &&
           "Basic block is in a different function");
  }

  /// Set the insertion point to before MI.
  /// \pre MI must be in getMF().
  void setInstr(MachineInstr &MI) {
    assert(MI.getParent() && "Instruction is not part of a basic block");
    setMBB(*MI.getParent());
    State.II = MI.getIterator();
    setPCSections(MI.getPCSections());
  }
  /// @}

  /// Set the insertion point to before MI, and set the debug loc to MI's loc.
  /// \pre MI must be in getMF().
  void setInstrAndDebugLoc(MachineInstr &MI) {
    setInstr(MI);
    setDebugLoc(MI.getDebugLoc());
  }

  void setChangeObserver(GISelChangeObserver &Observer) {
    State.Observer = &Observer;
  }

  void stopObservingChanges() { State.Observer = nullptr; }
  /// @}

  /// Set the debug location to \p DL for all the next build instructions.
  void setDebugLoc(const DebugLoc &DL) { this->State.DL = DL; }

  /// Get the current instruction's debug location.
  const DebugLoc &getDebugLoc() { return State.DL; }

  /// Set the PC sections metadata to \p MD for all the next build instructions.
  void setPCSections(MDNode *MD) { State.PCSections = MD; }

  /// Get the current instruction's PC sections metadata.
  MDNode *getPCSections() { return State.PCSections; }

  /// Build and insert <empty> = \p Opcode <empty>.
  /// The insertion point is the one set by the last call of either
  /// setBasicBlock or setMI.
  ///
  /// \pre setBasicBlock or setMI must have been called.
  ///
  /// \return a MachineInstrBuilder for the newly created instruction.
  MachineInstrBuilder buildInstr(unsigned Opcode) {
    return insertInstr(buildInstrNoInsert(Opcode));
  }

  /// Build but don't insert <empty> = \p Opcode <empty>.
  ///
  /// \pre setMF, setBasicBlock or setMI  must have been called.
  ///
  /// \return a MachineInstrBuilder for the newly created instruction.
  MachineInstrBuilder buildInstrNoInsert(unsigned Opcode);

  /// Insert an existing instruction at the insertion point.
  MachineInstrBuilder insertInstr(MachineInstrBuilder MIB);

  /// Build and insert a DBG_VALUE instruction expressing the fact that the
  /// associated \p Variable lives in \p Reg (suitably modified by \p Expr).
  MachineInstrBuilder buildDirectDbgValue(Register Reg, const MDNode *Variable,
                                          const MDNode *Expr);

  /// Build and insert a DBG_VALUE instruction expressing the fact that the
  /// associated \p Variable lives in memory at \p Reg (suitably modified by \p
  /// Expr).
  MachineInstrBuilder buildIndirectDbgValue(Register Reg,
                                            const MDNode *Variable,
                                            const MDNode *Expr);

  /// Build and insert a DBG_VALUE instruction expressing the fact that the
  /// associated \p Variable lives in the stack slot specified by \p FI
  /// (suitably modified by \p Expr).
  MachineInstrBuilder buildFIDbgValue(int FI, const MDNode *Variable,
                                      const MDNode *Expr);

  /// Build and insert a DBG_VALUE instructions specifying that \p Variable is
  /// given by \p C (suitably modified by \p Expr).
  MachineInstrBuilder buildConstDbgValue(const Constant &C,
                                         const MDNode *Variable,
                                         const MDNode *Expr);

  /// Build and insert a DBG_LABEL instructions specifying that \p Label is
  /// given. Convert "llvm.dbg.label Label" to "DBG_LABEL Label".
  MachineInstrBuilder buildDbgLabel(const MDNode *Label);

  /// Build and insert \p Res = G_DYN_STACKALLOC \p Size, \p Align
  ///
  /// G_DYN_STACKALLOC does a dynamic stack allocation and writes the address of
  /// the allocated memory into \p Res.
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre \p Res must be a generic virtual register with pointer type.
  ///
  /// \return a MachineInstrBuilder for the newly created instruction.
  MachineInstrBuilder buildDynStackAlloc(const DstOp &Res, const SrcOp &Size,
                                         Align Alignment);

  /// Build and insert \p Res = G_FRAME_INDEX \p Idx
  ///
  /// G_FRAME_INDEX materializes the address of an alloca value or other
  /// stack-based object.
  ///
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre \p Res must be a generic virtual register with pointer type.
  ///
  /// \return a MachineInstrBuilder for the newly created instruction.
  MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx);

  /// Build and insert \p Res = G_GLOBAL_VALUE \p GV
  ///
  /// G_GLOBAL_VALUE materializes the address of the specified global
  /// into \p Res.
  ///
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre \p Res must be a generic virtual register with pointer type
  ///      in the same address space as \p GV.
  ///
  /// \return a MachineInstrBuilder for the newly created instruction.
  MachineInstrBuilder buildGlobalValue(const DstOp &Res, const GlobalValue *GV);

  /// Build and insert \p Res = G_CONSTANT_POOL \p Idx
  ///
  /// G_CONSTANT_POOL materializes the address of an object in the constant
  /// pool.
  ///
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre \p Res must be a generic virtual register with pointer type.
  ///
  /// \return a MachineInstrBuilder for the newly created instruction.
  MachineInstrBuilder buildConstantPool(const DstOp &Res, unsigned Idx);

  /// Build and insert \p Res = G_PTR_ADD \p Op0, \p Op1
  ///
  /// G_PTR_ADD adds \p Op1 addressible units to the pointer specified by \p Op0,
  /// storing the resulting pointer in \p Res. Addressible units are typically
  /// bytes but this can vary between targets.
  ///
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre \p Res and \p Op0 must be generic virtual registers with pointer
  ///      type.
  /// \pre \p Op1 must be a generic virtual register with scalar type.
  ///
  /// \return a MachineInstrBuilder for the newly created instruction.
  MachineInstrBuilder buildPtrAdd(const DstOp &Res, const SrcOp &Op0,
                                  const SrcOp &Op1);

  /// Materialize and insert \p Res = G_PTR_ADD \p Op0, (G_CONSTANT \p Value)
  ///
  /// G_PTR_ADD adds \p Value bytes to the pointer specified by \p Op0,
  /// storing the resulting pointer in \p Res. If \p Value is zero then no
  /// G_PTR_ADD or G_CONSTANT will be created and \pre Op0 will be assigned to
  /// \p Res.
  ///
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre \p Op0 must be a generic virtual register with pointer type.
  /// \pre \p ValueTy must be a scalar type.
  /// \pre \p Res must be 0. This is to detect confusion between
  ///      materializePtrAdd() and buildPtrAdd().
  /// \post \p Res will either be a new generic virtual register of the same
  ///       type as \p Op0 or \p Op0 itself.
  ///
  /// \return a MachineInstrBuilder for the newly created instruction.
  std::optional<MachineInstrBuilder> materializePtrAdd(Register &Res,
                                                       Register Op0,
                                                       const LLT ValueTy,
                                                       uint64_t Value);

  /// Build and insert \p Res = G_PTRMASK \p Op0, \p Op1
  MachineInstrBuilder buildPtrMask(const DstOp &Res, const SrcOp &Op0,
                                   const SrcOp &Op1) {
    return buildInstr(TargetOpcode::G_PTRMASK, {Res}, {Op0, Op1});
  }

  /// Build and insert \p Res = G_PTRMASK \p Op0, \p G_CONSTANT (1 << NumBits) - 1
  ///
  /// This clears the low bits of a pointer operand without destroying its
  /// pointer properties. This has the effect of rounding the address *down* to
  /// a specified alignment in bits.
  ///
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre \p Res and \p Op0 must be generic virtual registers with pointer
  ///      type.
  /// \pre \p NumBits must be an integer representing the number of low bits to
  ///      be cleared in \p Op0.
  ///
  /// \return a MachineInstrBuilder for the newly created instruction.
  MachineInstrBuilder buildMaskLowPtrBits(const DstOp &Res, const SrcOp &Op0,
                                          uint32_t NumBits);

  /// Build and insert
  /// a, b, ..., x = G_UNMERGE_VALUES \p Op0
  /// \p Res = G_BUILD_VECTOR a, b, ..., x, undef, ..., undef
  ///
  /// Pad \p Op0 with undef elements to match number of elements in \p Res.
  ///
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre \p Res and \p Op0 must be generic virtual registers with vector type,
  ///      same vector element type and Op0 must have fewer elements then Res.
  ///
  /// \return a MachineInstrBuilder for the newly created build vector instr.
  MachineInstrBuilder buildPadVectorWithUndefElements(const DstOp &Res,
                                                      const SrcOp &Op0);

  /// Build and insert
  /// a, b, ..., x, y, z = G_UNMERGE_VALUES \p Op0
  /// \p Res = G_BUILD_VECTOR a, b, ..., x
  ///
  /// Delete trailing elements in \p Op0 to match number of elements in \p Res.
  ///
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre \p Res and \p Op0 must be generic virtual registers with vector type,
  ///      same vector element type and Op0 must have more elements then Res.
  ///
  /// \return a MachineInstrBuilder for the newly created build vector instr.
  MachineInstrBuilder buildDeleteTrailingVectorElements(const DstOp &Res,
                                                        const SrcOp &Op0);

  /// Build and insert \p Res, \p CarryOut = G_UADDO \p Op0, \p Op1
  ///
  /// G_UADDO sets \p Res to \p Op0 + \p Op1 (truncated to the bit width) and
  /// sets \p CarryOut to 1 if the result overflowed in unsigned arithmetic.
  ///
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers with the
  /// same scalar type.
  ////\pre \p CarryOut must be generic virtual register with scalar type
  ///(typically s1)
  ///
  /// \return The newly created instruction.
  MachineInstrBuilder buildUAddo(const DstOp &Res, const DstOp &CarryOut,
                                 const SrcOp &Op0, const SrcOp &Op1) {
    return buildInstr(TargetOpcode::G_UADDO, {Res, CarryOut}, {Op0, Op1});
  }

  /// Build and insert \p Res, \p CarryOut = G_USUBO \p Op0, \p Op1
  MachineInstrBuilder buildUSubo(const DstOp &Res, const DstOp &CarryOut,
                                 const SrcOp &Op0, const SrcOp &Op1) {
    return buildInstr(TargetOpcode::G_USUBO, {Res, CarryOut}, {Op0, Op1});
  }

  /// Build and insert \p Res, \p CarryOut = G_SADDO \p Op0, \p Op1
  MachineInstrBuilder buildSAddo(const DstOp &Res, const DstOp &CarryOut,
                                 const SrcOp &Op0, const SrcOp &Op1) {
    return buildInstr(TargetOpcode::G_SADDO, {Res, CarryOut}, {Op0, Op1});
  }

  /// Build and insert \p Res, \p CarryOut = G_SUBO \p Op0, \p Op1
  MachineInstrBuilder buildSSubo(const DstOp &Res, const DstOp &CarryOut,
                                 const SrcOp &Op0, const SrcOp &Op1) {
    return buildInstr(TargetOpcode::G_SSUBO, {Res, CarryOut}, {Op0, Op1});
  }

  /// Build and insert \p Res, \p CarryOut = G_UADDE \p Op0,
  /// \p Op1, \p CarryIn
  ///
  /// G_UADDE sets \p Res to \p Op0 + \p Op1 + \p CarryIn (truncated to the bit
  /// width) and sets \p CarryOut to 1 if the result overflowed in unsigned
  /// arithmetic.
  ///
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
  ///      with the same scalar type.
  /// \pre \p CarryOut and \p CarryIn must be generic virtual
  ///      registers with the same scalar type (typically s1)
  ///
  /// \return The newly created instruction.
  MachineInstrBuilder buildUAdde(const DstOp &Res, const DstOp &CarryOut,
                                 const SrcOp &Op0, const SrcOp &Op1,
                                 const SrcOp &CarryIn) {
    return buildInstr(TargetOpcode::G_UADDE, {Res, CarryOut},
                                             {Op0, Op1, CarryIn});
  }

  /// Build and insert \p Res, \p CarryOut = G_USUBE \p Op0, \p Op1, \p CarryInp
  MachineInstrBuilder buildUSube(const DstOp &Res, const DstOp &CarryOut,
                                 const SrcOp &Op0, const SrcOp &Op1,
                                 const SrcOp &CarryIn) {
    return buildInstr(TargetOpcode::G_USUBE, {Res, CarryOut},
                                             {Op0, Op1, CarryIn});
  }

  /// Build and insert \p Res, \p CarryOut = G_SADDE \p Op0, \p Op1, \p CarryInp
  MachineInstrBuilder buildSAdde(const DstOp &Res, const DstOp &CarryOut,
                                 const SrcOp &Op0, const SrcOp &Op1,
                                 const SrcOp &CarryIn) {
    return buildInstr(TargetOpcode::G_SADDE, {Res, CarryOut},
                                             {Op0, Op1, CarryIn});
  }

  /// Build and insert \p Res, \p CarryOut = G_SSUBE \p Op0, \p Op1, \p CarryInp
  MachineInstrBuilder buildSSube(const DstOp &Res, const DstOp &CarryOut,
                                 const SrcOp &Op0, const SrcOp &Op1,
                                 const SrcOp &CarryIn) {
    return buildInstr(TargetOpcode::G_SSUBE, {Res, CarryOut},
                                             {Op0, Op1, CarryIn});
  }

  /// Build and insert \p Res = G_ANYEXT \p Op0
  ///
  /// G_ANYEXT produces a register of the specified width, with bits 0 to
  /// sizeof(\p Ty) * 8 set to \p Op. The remaining bits are unspecified
  /// (i.e. this is neither zero nor sign-extension). For a vector register,
  /// each element is extended individually.
  ///
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre \p Res must be a generic virtual register with scalar or vector type.
  /// \pre \p Op must be a generic virtual register with scalar or vector type.
  /// \pre \p Op must be smaller than \p Res
  ///
  /// \return The newly created instruction.

  MachineInstrBuilder buildAnyExt(const DstOp &Res, const SrcOp &Op);

  /// Build and insert \p Res = G_SEXT \p Op
  ///
  /// G_SEXT produces a register of the specified width, with bits 0 to
  /// sizeof(\p Ty) * 8 set to \p Op. The remaining bits are duplicated from the
  /// high bit of \p Op (i.e. 2s-complement sign extended).
  ///
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre \p Res must be a generic virtual register with scalar or vector type.
  /// \pre \p Op must be a generic virtual register with scalar or vector type.
  /// \pre \p Op must be smaller than \p Res
  ///
  /// \return The newly created instruction.
  MachineInstrBuilder buildSExt(const DstOp &Res, const SrcOp &Op);

  /// Build and insert \p Res = G_SEXT_INREG \p Op, ImmOp
  MachineInstrBuilder buildSExtInReg(const DstOp &Res, const SrcOp &Op, int64_t ImmOp) {
    return buildInstr(TargetOpcode::G_SEXT_INREG, {Res}, {Op, SrcOp(ImmOp)});
  }

  /// Build and insert \p Res = G_FPEXT \p Op
  MachineInstrBuilder buildFPExt(const DstOp &Res, const SrcOp &Op,
                                 std::optional<unsigned> Flags = std::nullopt) {
    return buildInstr(TargetOpcode::G_FPEXT, {Res}, {Op}, Flags);
  }

  /// Build and insert a G_PTRTOINT instruction.
  MachineInstrBuilder buildPtrToInt(const DstOp &Dst, const SrcOp &Src) {
    return buildInstr(TargetOpcode::G_PTRTOINT, {Dst}, {Src});
  }

  /// Build and insert a G_INTTOPTR instruction.
  MachineInstrBuilder buildIntToPtr(const DstOp &Dst, const SrcOp &Src) {
    return buildInstr(TargetOpcode::G_INTTOPTR, {Dst}, {Src});
  }

  /// Build and insert \p Dst = G_BITCAST \p Src
  MachineInstrBuilder buildBitcast(const DstOp &Dst, const SrcOp &Src) {
    return buildInstr(TargetOpcode::G_BITCAST, {Dst}, {Src});
  }

    /// Build and insert \p Dst = G_ADDRSPACE_CAST \p Src
  MachineInstrBuilder buildAddrSpaceCast(const DstOp &Dst, const SrcOp &Src) {
    return buildInstr(TargetOpcode::G_ADDRSPACE_CAST, {Dst}, {Src});
  }

  /// \return The opcode of the extension the target wants to use for boolean
  /// values.
  unsigned getBoolExtOp(bool IsVec, bool IsFP) const;

  // Build and insert \p Res = G_ANYEXT \p Op, \p Res = G_SEXT \p Op, or \p Res
  // = G_ZEXT \p Op depending on how the target wants to extend boolean values.
  MachineInstrBuilder buildBoolExt(const DstOp &Res, const SrcOp &Op,
                                   bool IsFP);

  // Build and insert \p Res = G_SEXT_INREG \p Op, 1 or \p Res = G_AND \p Op, 1,
  // or COPY depending on how the target wants to extend boolean values, using
  // the original register size.
  MachineInstrBuilder buildBoolExtInReg(const DstOp &Res, const SrcOp &Op,
                                        bool IsVector,
                                        bool IsFP);

  /// Build and insert \p Res = G_ZEXT \p Op
  ///
  /// G_ZEXT produces a register of the specified width, with bits 0 to
  /// sizeof(\p Ty) * 8 set to \p Op. The remaining bits are 0. For a vector
  /// register, each element is extended individually.
  ///
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre \p Res must be a generic virtual register with scalar or vector type.
  /// \pre \p Op must be a generic virtual register with scalar or vector type.
  /// \pre \p Op must be smaller than \p Res
  ///
  /// \return The newly created instruction.
  MachineInstrBuilder buildZExt(const DstOp &Res, const SrcOp &Op);

  /// Build and insert \p Res = G_SEXT \p Op, \p Res = G_TRUNC \p Op, or
  /// \p Res = COPY \p Op depending on the differing sizes of \p Res and \p Op.
  ///  ///
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre \p Res must be a generic virtual register with scalar or vector type.
  /// \pre \p Op must be a generic virtual register with scalar or vector type.
  ///
  /// \return The newly created instruction.
  MachineInstrBuilder buildSExtOrTrunc(const DstOp &Res, const SrcOp &Op);

  /// Build and insert \p Res = G_ZEXT \p Op, \p Res = G_TRUNC \p Op, or
  /// \p Res = COPY \p Op depending on the differing sizes of \p Res and \p Op.
  ///  ///
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre \p Res must be a generic virtual register with scalar or vector type.
  /// \pre \p Op must be a generic virtual register with scalar or vector type.
  ///
  /// \return The newly created instruction.
  MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op);

  // Build and insert \p Res = G_ANYEXT \p Op, \p Res = G_TRUNC \p Op, or
  /// \p Res = COPY \p Op depending on the differing sizes of \p Res and \p Op.
  ///  ///
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre \p Res must be a generic virtual register with scalar or vector type.
  /// \pre \p Op must be a generic virtual register with scalar or vector type.
  ///
  /// \return The newly created instruction.
  MachineInstrBuilder buildAnyExtOrTrunc(const DstOp &Res, const SrcOp &Op);

  /// Build and insert \p Res = \p ExtOpc, \p Res = G_TRUNC \p
  /// Op, or \p Res = COPY \p Op depending on the differing sizes of \p Res and
  /// \p Op.
  ///  ///
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre \p Res must be a generic virtual register with scalar or vector type.
  /// \pre \p Op must be a generic virtual register with scalar or vector type.
  ///
  /// \return The newly created instruction.
  MachineInstrBuilder buildExtOrTrunc(unsigned ExtOpc, const DstOp &Res,
                                      const SrcOp &Op);

  /// Build and inserts \p Res = \p G_AND \p Op, \p LowBitsSet(ImmOp)
  /// Since there is no G_ZEXT_INREG like G_SEXT_INREG, the instruction is
  /// emulated using G_AND.
  MachineInstrBuilder buildZExtInReg(const DstOp &Res, const SrcOp &Op,
                                     int64_t ImmOp);

  /// Build and insert an appropriate cast between two registers of equal size.
  MachineInstrBuilder buildCast(const DstOp &Dst, const SrcOp &Src);

  /// Build and insert G_BR \p Dest
  ///
  /// G_BR is an unconditional branch to \p Dest.
  ///
  /// \pre setBasicBlock or setMI must have been called.
  ///
  /// \return a MachineInstrBuilder for the newly created instruction.
  MachineInstrBuilder buildBr(MachineBasicBlock &Dest);

  /// Build and insert G_BRCOND \p Tst, \p Dest
  ///
  /// G_BRCOND is a conditional branch to \p Dest.
  ///
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre \p Tst must be a generic virtual register with scalar
  ///      type. At the beginning of legalization, this will be a single
  ///      bit (s1). Targets with interesting flags registers may change
  ///      this. For a wider type, whether the branch is taken must only
  ///      depend on bit 0 (for now).
  ///
  /// \return The newly created instruction.
  MachineInstrBuilder buildBrCond(const SrcOp &Tst, MachineBasicBlock &Dest);

  /// Build and insert G_BRINDIRECT \p Tgt
  ///
  /// G_BRINDIRECT is an indirect branch to \p Tgt.
  ///
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre \p Tgt must be a generic virtual register with pointer type.
  ///
  /// \return a MachineInstrBuilder for the newly created instruction.
  MachineInstrBuilder buildBrIndirect(Register Tgt);

  /// Build and insert G_BRJT \p TablePtr, \p JTI, \p IndexReg
  ///
  /// G_BRJT is a jump table branch using a table base pointer \p TablePtr,
  /// jump table index \p JTI and index \p IndexReg
  ///
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre \p TablePtr must be a generic virtual register with pointer type.
  /// \pre \p JTI must be be a jump table index.
  /// \pre \p IndexReg must be a generic virtual register with pointer type.
  ///
  /// \return a MachineInstrBuilder for the newly created instruction.
  MachineInstrBuilder buildBrJT(Register TablePtr, unsigned JTI,
                                Register IndexReg);

  /// Build and insert \p Res = G_CONSTANT \p Val
  ///
  /// G_CONSTANT is an integer constant with the specified size and value. \p
  /// Val will be extended or truncated to the size of \p Reg.
  ///
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre \p Res must be a generic virtual register with scalar or pointer
  ///      type.
  ///
  /// \return The newly created instruction.
  virtual MachineInstrBuilder buildConstant(const DstOp &Res,
                                            const ConstantInt &Val);

  /// Build and insert \p Res = G_CONSTANT \p Val
  ///
  /// G_CONSTANT is an integer constant with the specified size and value.
  ///
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre \p Res must be a generic virtual register with scalar type.
  ///
  /// \return The newly created instruction.
  MachineInstrBuilder buildConstant(const DstOp &Res, int64_t Val);
  MachineInstrBuilder buildConstant(const DstOp &Res, const APInt &Val);

  /// Build and insert \p Res = G_FCONSTANT \p Val
  ///
  /// G_FCONSTANT is a floating-point constant with the specified size and
  /// value.
  ///
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre \p Res must be a generic virtual register with scalar type.
  ///
  /// \return The newly created instruction.
  virtual MachineInstrBuilder buildFConstant(const DstOp &Res,
                                             const ConstantFP &Val);

  MachineInstrBuilder buildFConstant(const DstOp &Res, double Val);
  MachineInstrBuilder buildFConstant(const DstOp &Res, const APFloat &Val);

  /// Build and insert \p Res = COPY Op
  ///
  /// Register-to-register COPY sets \p Res to \p Op.
  ///
  /// \pre setBasicBlock or setMI must have been called.
  ///
  /// \return a MachineInstrBuilder for the newly created instruction.
  MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op);


  /// Build and insert G_ASSERT_SEXT, G_ASSERT_ZEXT, or G_ASSERT_ALIGN
  ///
  /// \return a MachineInstrBuilder for the newly created instruction.
  MachineInstrBuilder buildAssertInstr(unsigned Opc, const DstOp &Res,
                                       const SrcOp &Op, unsigned Val) {
    return buildInstr(Opc, Res, Op).addImm(Val);
  }

  /// Build and insert \p Res = G_ASSERT_ZEXT Op, Size
  ///
  /// \return a MachineInstrBuilder for the newly created instruction.
  MachineInstrBuilder buildAssertZExt(const DstOp &Res, const SrcOp &Op,
                                      unsigned Size) {
    return buildAssertInstr(TargetOpcode::G_ASSERT_ZEXT, Res, Op, Size);
  }

  /// Build and insert \p Res = G_ASSERT_SEXT Op, Size
  ///
  /// \return a MachineInstrBuilder for the newly created instruction.
  MachineInstrBuilder buildAssertSExt(const DstOp &Res, const SrcOp &Op,
                                      unsigned Size) {
    return buildAssertInstr(TargetOpcode::G_ASSERT_SEXT, Res, Op, Size);
  }

  /// Build and insert \p Res = G_ASSERT_ALIGN Op, AlignVal
  ///
  /// \return a MachineInstrBuilder for the newly created instruction.
  MachineInstrBuilder buildAssertAlign(const DstOp &Res, const SrcOp &Op,
				       Align AlignVal) {
    return buildAssertInstr(TargetOpcode::G_ASSERT_ALIGN, Res, Op,
                            AlignVal.value());
  }

  /// Build and insert `Res = G_LOAD Addr, MMO`.
  ///
  /// Loads the value stored at \p Addr. Puts the result in \p Res.
  ///
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre \p Res must be a generic virtual register.
  /// \pre \p Addr must be a generic virtual register with pointer type.
  ///
  /// \return a MachineInstrBuilder for the newly created instruction.
  MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr,
                                MachineMemOperand &MMO) {
    return buildLoadInstr(TargetOpcode::G_LOAD, Res, Addr, MMO);
  }

  /// Build and insert a G_LOAD instruction, while constructing the
  /// MachineMemOperand.
  MachineInstrBuilder
  buildLoad(const DstOp &Res, const SrcOp &Addr, MachinePointerInfo PtrInfo,
            Align Alignment,
            MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
            const AAMDNodes &AAInfo = AAMDNodes());

  /// Build and insert `Res = <opcode> Addr, MMO`.
  ///
  /// Loads the value stored at \p Addr. Puts the result in \p Res.
  ///
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre \p Res must be a generic virtual register.
  /// \pre \p Addr must be a generic virtual register with pointer type.
  ///
  /// \return a MachineInstrBuilder for the newly created instruction.
  MachineInstrBuilder buildLoadInstr(unsigned Opcode, const DstOp &Res,
                                     const SrcOp &Addr, MachineMemOperand &MMO);

  /// Helper to create a load from a constant offset given a base address. Load
  /// the type of \p Dst from \p Offset from the given base address and memory
  /// operand.
  MachineInstrBuilder buildLoadFromOffset(const DstOp &Dst,
                                          const SrcOp &BasePtr,
                                          MachineMemOperand &BaseMMO,
                                          int64_t Offset);

  /// Build and insert `G_STORE Val, Addr, MMO`.
  ///
  /// Stores the value \p Val to \p Addr.
  ///
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre \p Val must be a generic virtual register.
  /// \pre \p Addr must be a generic virtual register with pointer type.
  ///
  /// \return a MachineInstrBuilder for the newly created instruction.
  MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr,
                                 MachineMemOperand &MMO);

  /// Build and insert a G_STORE instruction, while constructing the
  /// MachineMemOperand.
  MachineInstrBuilder
  buildStore(const SrcOp &Val, const SrcOp &Addr, MachinePointerInfo PtrInfo,
             Align Alignment,
             MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
             const AAMDNodes &AAInfo = AAMDNodes());

  /// Build and insert `Res0, ... = G_EXTRACT Src, Idx0`.
  ///
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre \p Res and \p Src must be generic virtual registers.
  ///
  /// \return a MachineInstrBuilder for the newly created instruction.
  MachineInstrBuilder buildExtract(const DstOp &Res, const SrcOp &Src, uint64_t Index);

  /// Build and insert \p Res = IMPLICIT_DEF.
  MachineInstrBuilder buildUndef(const DstOp &Res);

  /// Build and insert \p Res = G_MERGE_VALUES \p Op0, ...
  ///
  /// G_MERGE_VALUES combines the input elements contiguously into a larger
  /// register. It should only be used when the destination register is not a
  /// vector.
  ///
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre The entire register \p Res (and no more) must be covered by the input
  ///      registers.
  /// \pre The type of all \p Ops registers must be identical.
  ///
  /// \return a MachineInstrBuilder for the newly created instruction.
  MachineInstrBuilder buildMergeValues(const DstOp &Res,
                                       ArrayRef<Register> Ops);

  /// Build and insert \p Res = G_MERGE_VALUES \p Op0, ...
  ///               or \p Res = G_BUILD_VECTOR \p Op0, ...
  ///               or \p Res = G_CONCAT_VECTORS \p Op0, ...
  ///
  /// G_MERGE_VALUES combines the input elements contiguously into a larger
  /// register. It is used when the destination register is not a vector.
  /// G_BUILD_VECTOR combines scalar inputs into a vector register.
  /// G_CONCAT_VECTORS combines vector inputs into a vector register.
  ///
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre The entire register \p Res (and no more) must be covered by the input
  ///      registers.
  /// \pre The type of all \p Ops registers must be identical.
  ///
  /// \return a MachineInstrBuilder for the newly created instruction. The
  ///         opcode of the new instruction will depend on the types of both
  ///         the destination and the sources.
  MachineInstrBuilder buildMergeLikeInstr(const DstOp &Res,
                                          ArrayRef<Register> Ops);
  MachineInstrBuilder buildMergeLikeInstr(const DstOp &Res,
                                          std::initializer_list<SrcOp> Ops);

  /// Build and insert \p Res0, ... = G_UNMERGE_VALUES \p Op
  ///
  /// G_UNMERGE_VALUES splits contiguous bits of the input into multiple
  ///
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre The entire register \p Res (and no more) must be covered by the input
  ///      registers.
  /// \pre The type of all \p Res registers must be identical.
  ///
  /// \return a MachineInstrBuilder for the newly created instruction.
  MachineInstrBuilder buildUnmerge(ArrayRef<LLT> Res, const SrcOp &Op);
  MachineInstrBuilder buildUnmerge(ArrayRef<Register> Res, const SrcOp &Op);

  /// Build and insert an unmerge of \p Res sized pieces to cover \p Op
  MachineInstrBuilder buildUnmerge(LLT Res, const SrcOp &Op);

  /// Build and insert \p Res = G_BUILD_VECTOR \p Op0, ...
  ///
  /// G_BUILD_VECTOR creates a vector value from multiple scalar registers.
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre The entire register \p Res (and no more) must be covered by the
  ///      input scalar registers.
  /// \pre The type of all \p Ops registers must be identical.
  ///
  /// \return a MachineInstrBuilder for the newly created instruction.
  MachineInstrBuilder buildBuildVector(const DstOp &Res,
                                       ArrayRef<Register> Ops);

  /// Build and insert \p Res = G_BUILD_VECTOR \p Op0, ... where each OpN is
  /// built with G_CONSTANT.
  MachineInstrBuilder buildBuildVectorConstant(const DstOp &Res,
                                               ArrayRef<APInt> Ops);

  /// Build and insert \p Res = G_BUILD_VECTOR with \p Src replicated to fill
  /// the number of elements
  MachineInstrBuilder buildSplatVector(const DstOp &Res,
                                       const SrcOp &Src);

  /// Build and insert \p Res = G_BUILD_VECTOR_TRUNC \p Op0, ...
  ///
  /// G_BUILD_VECTOR_TRUNC creates a vector value from multiple scalar registers
  /// which have types larger than the destination vector element type, and
  /// truncates the values to fit.
  ///
  /// If the operands given are already the same size as the vector elt type,
  /// then this method will instead create a G_BUILD_VECTOR instruction.
  ///
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre The type of all \p Ops registers must be identical.
  ///
  /// \return a MachineInstrBuilder for the newly created instruction.
  MachineInstrBuilder buildBuildVectorTrunc(const DstOp &Res,
                                            ArrayRef<Register> Ops);

  /// Build and insert a vector splat of a scalar \p Src using a
  /// G_INSERT_VECTOR_ELT and G_SHUFFLE_VECTOR idiom.
  ///
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre \p Src must have the same type as the element type of \p Dst
  ///
  /// \return a MachineInstrBuilder for the newly created instruction.
  MachineInstrBuilder buildShuffleSplat(const DstOp &Res, const SrcOp &Src);

  /// Build and insert \p Res = G_SHUFFLE_VECTOR \p Src1, \p Src2, \p Mask
  ///
  /// \pre setBasicBlock or setMI must have been called.
  ///
  /// \return a MachineInstrBuilder for the newly created instruction.
  MachineInstrBuilder buildShuffleVector(const DstOp &Res, const SrcOp &Src1,
                                         const SrcOp &Src2, ArrayRef<int> Mask);

  /// Build and insert \p Res = G_CONCAT_VECTORS \p Op0, ...
  ///
  /// G_CONCAT_VECTORS creates a vector from the concatenation of 2 or more
  /// vectors.
  ///
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre The entire register \p Res (and no more) must be covered by the input
  ///      registers.
  /// \pre The type of all source operands must be identical.
  ///
  /// \return a MachineInstrBuilder for the newly created instruction.
  MachineInstrBuilder buildConcatVectors(const DstOp &Res,
                                         ArrayRef<Register> Ops);

  MachineInstrBuilder buildInsert(const DstOp &Res, const SrcOp &Src,
                                  const SrcOp &Op, unsigned Index);

  /// Build and insert either a G_INTRINSIC (if \p HasSideEffects is false) or
  /// G_INTRINSIC_W_SIDE_EFFECTS instruction. Its first operand will be the
  /// result register definition unless \p Reg is NoReg (== 0). The second
  /// operand will be the intrinsic's ID.
  ///
  /// Callers are expected to add the required definitions and uses afterwards.
  ///
  /// \pre setBasicBlock or setMI must have been called.
  ///
  /// \return a MachineInstrBuilder for the newly created instruction.
  MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef<Register> Res,
                                     bool HasSideEffects);
  MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef<DstOp> Res,
                                     bool HasSideEffects);

  /// Build and insert \p Res = G_FPTRUNC \p Op
  ///
  /// G_FPTRUNC converts a floating-point value into one with a smaller type.
  ///
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre \p Res must be a generic virtual register with scalar or vector type.
  /// \pre \p Op must be a generic virtual register with scalar or vector type.
  /// \pre \p Res must be smaller than \p Op
  ///
  /// \return The newly created instruction.
  MachineInstrBuilder
  buildFPTrunc(const DstOp &Res, const SrcOp &Op,
               std::optional<unsigned> Flags = std::nullopt);

  /// Build and insert \p Res = G_TRUNC \p Op
  ///
  /// G_TRUNC extracts the low bits of a type. For a vector type each element is
  /// truncated independently before being packed into the destination.
  ///
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre \p Res must be a generic virtual register with scalar or vector type.
  /// \pre \p Op must be a generic virtual register with scalar or vector type.
  /// \pre \p Res must be smaller than \p Op
  ///
  /// \return The newly created instruction.
  MachineInstrBuilder buildTrunc(const DstOp &Res, const SrcOp &Op);

  /// Build and insert a \p Res = G_ICMP \p Pred, \p Op0, \p Op1
  ///
  /// \pre setBasicBlock or setMI must have been called.

  /// \pre \p Res must be a generic virtual register with scalar or
  ///      vector type. Typically this starts as s1 or <N x s1>.
  /// \pre \p Op0 and Op1 must be generic virtual registers with the
  ///      same number of elements as \p Res. If \p Res is a scalar,
  ///      \p Op0 must be either a scalar or pointer.
  /// \pre \p Pred must be an integer predicate.
  ///
  /// \return a MachineInstrBuilder for the newly created instruction.
  MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res,
                                const SrcOp &Op0, const SrcOp &Op1);

  /// Build and insert a \p Res = G_FCMP \p Pred\p Op0, \p Op1
  ///
  /// \pre setBasicBlock or setMI must have been called.

  /// \pre \p Res must be a generic virtual register with scalar or
  ///      vector type. Typically this starts as s1 or <N x s1>.
  /// \pre \p Op0 and Op1 must be generic virtual registers with the
  ///      same number of elements as \p Res (or scalar, if \p Res is
  ///      scalar).
  /// \pre \p Pred must be a floating-point predicate.
  ///
  /// \return a MachineInstrBuilder for the newly created instruction.
  MachineInstrBuilder buildFCmp(CmpInst::Predicate Pred, const DstOp &Res,
                                const SrcOp &Op0, const SrcOp &Op1,
                                std::optional<unsigned> Flags = std::nullopt);

  /// Build and insert a \p Res = G_IS_FPCLASS \p Pred\p Src, \p Mask
  MachineInstrBuilder buildIsFPClass(const DstOp &Res, const SrcOp &Src,
                                     unsigned Mask) {
    return buildInstr(TargetOpcode::G_IS_FPCLASS, {Res},
                      {Src, SrcOp(static_cast<int64_t>(Mask))});
  }

  /// Build and insert a \p Res = G_SELECT \p Tst, \p Op0, \p Op1
  ///
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
  ///      with the same type.
  /// \pre \p Tst must be a generic virtual register with scalar, pointer or
  ///      vector type. If vector then it must have the same number of
  ///      elements as the other parameters.
  ///
  /// \return a MachineInstrBuilder for the newly created instruction.
  MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst,
                                  const SrcOp &Op0, const SrcOp &Op1,
                                  std::optional<unsigned> Flags = std::nullopt);

  /// Build and insert \p Res = G_INSERT_VECTOR_ELT \p Val,
  /// \p Elt, \p Idx
  ///
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre \p Res and \p Val must be a generic virtual register
  //       with the same vector type.
  /// \pre \p Elt and \p Idx must be a generic virtual register
  ///      with scalar type.
  ///
  /// \return The newly created instruction.
  MachineInstrBuilder buildInsertVectorElement(const DstOp &Res,
                                               const SrcOp &Val,
                                               const SrcOp &Elt,
                                               const SrcOp &Idx);

  /// Build and insert \p Res = G_EXTRACT_VECTOR_ELT \p Val, \p Idx
  ///
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre \p Res must be a generic virtual register with scalar type.
  /// \pre \p Val must be a generic virtual register with vector type.
  ///
  /// \return The newly created instruction.
  MachineInstrBuilder buildExtractVectorElementConstant(const DstOp &Res,
                                                        const SrcOp &Val,
                                                        const int Idx) {
    return buildExtractVectorElement(Res, Val,
                                     buildConstant(LLT::scalar(64), Idx));
  }

  /// Build and insert \p Res = G_EXTRACT_VECTOR_ELT \p Val, \p Idx
  ///
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre \p Res must be a generic virtual register with scalar type.
  /// \pre \p Val must be a generic virtual register with vector type.
  /// \pre \p Idx must be a generic virtual register with scalar type.
  ///
  /// \return The newly created instruction.
  MachineInstrBuilder buildExtractVectorElement(const DstOp &Res,
                                                const SrcOp &Val,
                                                const SrcOp &Idx);

  /// Build and insert `OldValRes<def>, SuccessRes<def> =
  /// G_ATOMIC_CMPXCHG_WITH_SUCCESS Addr, CmpVal, NewVal, MMO`.
  ///
  /// Atomically replace the value at \p Addr with \p NewVal if it is currently
  /// \p CmpVal otherwise leaves it unchanged. Puts the original value from \p
  /// Addr in \p Res, along with an s1 indicating whether it was replaced.
  ///
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre \p OldValRes must be a generic virtual register of scalar type.
  /// \pre \p SuccessRes must be a generic virtual register of scalar type. It
  ///      will be assigned 0 on failure and 1 on success.
  /// \pre \p Addr must be a generic virtual register with pointer type.
  /// \pre \p OldValRes, \p CmpVal, and \p NewVal must be generic virtual
  ///      registers of the same type.
  ///
  /// \return a MachineInstrBuilder for the newly created instruction.
  MachineInstrBuilder
  buildAtomicCmpXchgWithSuccess(Register OldValRes, Register SuccessRes,
                                Register Addr, Register CmpVal, Register NewVal,
                                MachineMemOperand &MMO);

  /// Build and insert `OldValRes<def> = G_ATOMIC_CMPXCHG Addr, CmpVal, NewVal,
  /// MMO`.
  ///
  /// Atomically replace the value at \p Addr with \p NewVal if it is currently
  /// \p CmpVal otherwise leaves it unchanged. Puts the original value from \p
  /// Addr in \p Res.
  ///
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre \p OldValRes must be a generic virtual register of scalar type.
  /// \pre \p Addr must be a generic virtual register with pointer type.
  /// \pre \p OldValRes, \p CmpVal, and \p NewVal must be generic virtual
  ///      registers of the same type.
  ///
  /// \return a MachineInstrBuilder for the newly created instruction.
  MachineInstrBuilder buildAtomicCmpXchg(Register OldValRes, Register Addr,
                                         Register CmpVal, Register NewVal,
                                         MachineMemOperand &MMO);

  /// Build and insert `OldValRes<def> = G_ATOMICRMW_<Opcode> Addr, Val, MMO`.
  ///
  /// Atomically read-modify-update the value at \p Addr with \p Val. Puts the
  /// original value from \p Addr in \p OldValRes. The modification is
  /// determined by the opcode.
  ///
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre \p OldValRes must be a generic virtual register.
  /// \pre \p Addr must be a generic virtual register with pointer type.
  /// \pre \p OldValRes, and \p Val must be generic virtual registers of the
  ///      same type.
  ///
  /// \return a MachineInstrBuilder for the newly created instruction.
  MachineInstrBuilder buildAtomicRMW(unsigned Opcode, const DstOp &OldValRes,
                                     const SrcOp &Addr, const SrcOp &Val,
                                     MachineMemOperand &MMO);

  /// Build and insert `OldValRes<def> = G_ATOMICRMW_XCHG Addr, Val, MMO`.
  ///
  /// Atomically replace the value at \p Addr with \p Val. Puts the original
  /// value from \p Addr in \p OldValRes.
  ///
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre \p OldValRes must be a generic virtual register.
  /// \pre \p Addr must be a generic virtual register with pointer type.
  /// \pre \p OldValRes, and \p Val must be generic virtual registers of the
  ///      same type.
  ///
  /// \return a MachineInstrBuilder for the newly created instruction.
  MachineInstrBuilder buildAtomicRMWXchg(Register OldValRes, Register Addr,
                                         Register Val, MachineMemOperand &MMO);

  /// Build and insert `OldValRes<def> = G_ATOMICRMW_ADD Addr, Val, MMO`.
  ///
  /// Atomically replace the value at \p Addr with the addition of \p Val and
  /// the original value. Puts the original value from \p Addr in \p OldValRes.
  ///
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre \p OldValRes must be a generic virtual register.
  /// \pre \p Addr must be a generic virtual register with pointer type.
  /// \pre \p OldValRes, and \p Val must be generic virtual registers of the
  ///      same type.
  ///
  /// \return a MachineInstrBuilder for the newly created instruction.
  MachineInstrBuilder buildAtomicRMWAdd(Register OldValRes, Register Addr,
                                        Register Val, MachineMemOperand &MMO);

  /// Build and insert `OldValRes<def> = G_ATOMICRMW_SUB Addr, Val, MMO`.
  ///
  /// Atomically replace the value at \p Addr with the subtraction of \p Val and
  /// the original value. Puts the original value from \p Addr in \p OldValRes.
  ///
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre \p OldValRes must be a generic virtual register.
  /// \pre \p Addr must be a generic virtual register with pointer type.
  /// \pre \p OldValRes, and \p Val must be generic virtual registers of the
  ///      same type.
  ///
  /// \return a MachineInstrBuilder for the newly created instruction.
  MachineInstrBuilder buildAtomicRMWSub(Register OldValRes, Register Addr,
                                        Register Val, MachineMemOperand &MMO);

  /// Build and insert `OldValRes<def> = G_ATOMICRMW_AND Addr, Val, MMO`.
  ///
  /// Atomically replace the value at \p Addr with the bitwise and of \p Val and
  /// the original value. Puts the original value from \p Addr in \p OldValRes.
  ///
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre \p OldValRes must be a generic virtual register.
  /// \pre \p Addr must be a generic virtual register with pointer type.
  /// \pre \p OldValRes, and \p Val must be generic virtual registers of the
  ///      same type.
  ///
  /// \return a MachineInstrBuilder for the newly created instruction.
  MachineInstrBuilder buildAtomicRMWAnd(Register OldValRes, Register Addr,
                                        Register Val, MachineMemOperand &MMO);

  /// Build and insert `OldValRes<def> = G_ATOMICRMW_NAND Addr, Val, MMO`.
  ///
  /// Atomically replace the value at \p Addr with the bitwise nand of \p Val
  /// and the original value. Puts the original value from \p Addr in \p
  /// OldValRes.
  ///
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre \p OldValRes must be a generic virtual register.
  /// \pre \p Addr must be a generic virtual register with pointer type.
  /// \pre \p OldValRes, and \p Val must be generic virtual registers of the
  ///      same type.
  ///
  /// \return a MachineInstrBuilder for the newly created instruction.
  MachineInstrBuilder buildAtomicRMWNand(Register OldValRes, Register Addr,
                                         Register Val, MachineMemOperand &MMO);

  /// Build and insert `OldValRes<def> = G_ATOMICRMW_OR Addr, Val, MMO`.
  ///
  /// Atomically replace the value at \p Addr with the bitwise or of \p Val and
  /// the original value. Puts the original value from \p Addr in \p OldValRes.
  ///
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre \p OldValRes must be a generic virtual register.
  /// \pre \p Addr must be a generic virtual register with pointer type.
  /// \pre \p OldValRes, and \p Val must be generic virtual registers of the
  ///      same type.
  ///
  /// \return a MachineInstrBuilder for the newly created instruction.
  MachineInstrBuilder buildAtomicRMWOr(Register OldValRes, Register Addr,
                                       Register Val, MachineMemOperand &MMO);

  /// Build and insert `OldValRes<def> = G_ATOMICRMW_XOR Addr, Val, MMO`.
  ///
  /// Atomically replace the value at \p Addr with the bitwise xor of \p Val and
  /// the original value. Puts the original value from \p Addr in \p OldValRes.
  ///
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre \p OldValRes must be a generic virtual register.
  /// \pre \p Addr must be a generic virtual register with pointer type.
  /// \pre \p OldValRes, and \p Val must be generic virtual registers of the
  ///      same type.
  ///
  /// \return a MachineInstrBuilder for the newly created instruction.
  MachineInstrBuilder buildAtomicRMWXor(Register OldValRes, Register Addr,
                                        Register Val, MachineMemOperand &MMO);

  /// Build and insert `OldValRes<def> = G_ATOMICRMW_MAX Addr, Val, MMO`.
  ///
  /// Atomically replace the value at \p Addr with the signed maximum of \p
  /// Val and the original value. Puts the original value from \p Addr in \p
  /// OldValRes.
  ///
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre \p OldValRes must be a generic virtual register.
  /// \pre \p Addr must be a generic virtual register with pointer type.
  /// \pre \p OldValRes, and \p Val must be generic virtual registers of the
  ///      same type.
  ///
  /// \return a MachineInstrBuilder for the newly created instruction.
  MachineInstrBuilder buildAtomicRMWMax(Register OldValRes, Register Addr,
                                        Register Val, MachineMemOperand &MMO);

  /// Build and insert `OldValRes<def> = G_ATOMICRMW_MIN Addr, Val, MMO`.
  ///
  /// Atomically replace the value at \p Addr with the signed minimum of \p
  /// Val and the original value. Puts the original value from \p Addr in \p
  /// OldValRes.
  ///
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre \p OldValRes must be a generic virtual register.
  /// \pre \p Addr must be a generic virtual register with pointer type.
  /// \pre \p OldValRes, and \p Val must be generic virtual registers of the
  ///      same type.
  ///
  /// \return a MachineInstrBuilder for the newly created instruction.
  MachineInstrBuilder buildAtomicRMWMin(Register OldValRes, Register Addr,
                                        Register Val, MachineMemOperand &MMO);

  /// Build and insert `OldValRes<def> = G_ATOMICRMW_UMAX Addr, Val, MMO`.
  ///
  /// Atomically replace the value at \p Addr with the unsigned maximum of \p
  /// Val and the original value. Puts the original value from \p Addr in \p
  /// OldValRes.
  ///
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre \p OldValRes must be a generic virtual register.
  /// \pre \p Addr must be a generic virtual register with pointer type.
  /// \pre \p OldValRes, and \p Val must be generic virtual registers of the
  ///      same type.
  ///
  /// \return a MachineInstrBuilder for the newly created instruction.
  MachineInstrBuilder buildAtomicRMWUmax(Register OldValRes, Register Addr,
                                         Register Val, MachineMemOperand &MMO);

  /// Build and insert `OldValRes<def> = G_ATOMICRMW_UMIN Addr, Val, MMO`.
  ///
  /// Atomically replace the value at \p Addr with the unsigned minimum of \p
  /// Val and the original value. Puts the original value from \p Addr in \p
  /// OldValRes.
  ///
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre \p OldValRes must be a generic virtual register.
  /// \pre \p Addr must be a generic virtual register with pointer type.
  /// \pre \p OldValRes, and \p Val must be generic virtual registers of the
  ///      same type.
  ///
  /// \return a MachineInstrBuilder for the newly created instruction.
  MachineInstrBuilder buildAtomicRMWUmin(Register OldValRes, Register Addr,
                                         Register Val, MachineMemOperand &MMO);

  /// Build and insert `OldValRes<def> = G_ATOMICRMW_FADD Addr, Val, MMO`.
  MachineInstrBuilder buildAtomicRMWFAdd(
    const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
    MachineMemOperand &MMO);

  /// Build and insert `OldValRes<def> = G_ATOMICRMW_FSUB Addr, Val, MMO`.
  MachineInstrBuilder buildAtomicRMWFSub(
        const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
        MachineMemOperand &MMO);

  /// Build and insert `OldValRes<def> = G_ATOMICRMW_FMAX Addr, Val, MMO`.
  ///
  /// Atomically replace the value at \p Addr with the floating point maximum of
  /// \p Val and the original value. Puts the original value from \p Addr in \p
  /// OldValRes.
  ///
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre \p OldValRes must be a generic virtual register.
  /// \pre \p Addr must be a generic virtual register with pointer type.
  /// \pre \p OldValRes, and \p Val must be generic virtual registers of the
  ///      same type.
  ///
  /// \return a MachineInstrBuilder for the newly created instruction.
  MachineInstrBuilder buildAtomicRMWFMax(
        const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
        MachineMemOperand &MMO);

  /// Build and insert `OldValRes<def> = G_ATOMICRMW_FMIN Addr, Val, MMO`.
  ///
  /// Atomically replace the value at \p Addr with the floating point minimum of
  /// \p Val and the original value. Puts the original value from \p Addr in \p
  /// OldValRes.
  ///
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre \p OldValRes must be a generic virtual register.
  /// \pre \p Addr must be a generic virtual register with pointer type.
  /// \pre \p OldValRes, and \p Val must be generic virtual registers of the
  ///      same type.
  ///
  /// \return a MachineInstrBuilder for the newly created instruction.
  MachineInstrBuilder buildAtomicRMWFMin(
        const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
        MachineMemOperand &MMO);

  /// Build and insert `G_FENCE Ordering, Scope`.
  MachineInstrBuilder buildFence(unsigned Ordering, unsigned Scope);

  /// Build and insert \p Dst = G_FREEZE \p Src
  MachineInstrBuilder buildFreeze(const DstOp &Dst, const SrcOp &Src) {
    return buildInstr(TargetOpcode::G_FREEZE, {Dst}, {Src});
  }

  /// Build and insert \p Res = G_BLOCK_ADDR \p BA
  ///
  /// G_BLOCK_ADDR computes the address of a basic block.
  ///
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre \p Res must be a generic virtual register of a pointer type.
  ///
  /// \return The newly created instruction.
  MachineInstrBuilder buildBlockAddress(Register Res, const BlockAddress *BA);

  /// Build and insert \p Res = G_ADD \p Op0, \p Op1
  ///
  /// G_ADD sets \p Res to the sum of integer parameters \p Op0 and \p Op1,
  /// truncated to their width.
  ///
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
  ///      with the same (scalar or vector) type).
  ///
  /// \return a MachineInstrBuilder for the newly created instruction.

  MachineInstrBuilder buildAdd(const DstOp &Dst, const SrcOp &Src0,
                               const SrcOp &Src1,
                               std::optional<unsigned> Flags = std::nullopt) {
    return buildInstr(TargetOpcode::G_ADD, {Dst}, {Src0, Src1}, Flags);
  }

  /// Build and insert \p Res = G_SUB \p Op0, \p Op1
  ///
  /// G_SUB sets \p Res to the difference of integer parameters \p Op0 and
  /// \p Op1, truncated to their width.
  ///
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
  ///      with the same (scalar or vector) type).
  ///
  /// \return a MachineInstrBuilder for the newly created instruction.

  MachineInstrBuilder buildSub(const DstOp &Dst, const SrcOp &Src0,
                               const SrcOp &Src1,
                               std::optional<unsigned> Flags = std::nullopt) {
    return buildInstr(TargetOpcode::G_SUB, {Dst}, {Src0, Src1}, Flags);
  }

  /// Build and insert \p Res = G_MUL \p Op0, \p Op1
  ///
  /// G_MUL sets \p Res to the product of integer parameters \p Op0 and \p Op1,
  /// truncated to their width.
  ///
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
  ///      with the same (scalar or vector) type).
  ///
  /// \return a MachineInstrBuilder for the newly created instruction.
  MachineInstrBuilder buildMul(const DstOp &Dst, const SrcOp &Src0,
                               const SrcOp &Src1,
                               std::optional<unsigned> Flags = std::nullopt) {
    return buildInstr(TargetOpcode::G_MUL, {Dst}, {Src0, Src1}, Flags);
  }

  MachineInstrBuilder buildUMulH(const DstOp &Dst, const SrcOp &Src0,
                                 const SrcOp &Src1,
                                 std::optional<unsigned> Flags = std::nullopt) {
    return buildInstr(TargetOpcode::G_UMULH, {Dst}, {Src0, Src1}, Flags);
  }

  MachineInstrBuilder buildSMulH(const DstOp &Dst, const SrcOp &Src0,
                                 const SrcOp &Src1,
                                 std::optional<unsigned> Flags = std::nullopt) {
    return buildInstr(TargetOpcode::G_SMULH, {Dst}, {Src0, Src1}, Flags);
  }

  /// Build and insert \p Res = G_UREM \p Op0, \p Op1
  MachineInstrBuilder buildURem(const DstOp &Dst, const SrcOp &Src0,
                                const SrcOp &Src1,
                                std::optional<unsigned> Flags = std::nullopt) {
    return buildInstr(TargetOpcode::G_UREM, {Dst}, {Src0, Src1}, Flags);
  }

  MachineInstrBuilder buildFMul(const DstOp &Dst, const SrcOp &Src0,
                                const SrcOp &Src1,
                                std::optional<unsigned> Flags = std::nullopt) {
    return buildInstr(TargetOpcode::G_FMUL, {Dst}, {Src0, Src1}, Flags);
  }

  MachineInstrBuilder
  buildFMinNum(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1,
               std::optional<unsigned> Flags = std::nullopt) {
    return buildInstr(TargetOpcode::G_FMINNUM, {Dst}, {Src0, Src1}, Flags);
  }

  MachineInstrBuilder
  buildFMaxNum(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1,
               std::optional<unsigned> Flags = std::nullopt) {
    return buildInstr(TargetOpcode::G_FMAXNUM, {Dst}, {Src0, Src1}, Flags);
  }

  MachineInstrBuilder
  buildFMinNumIEEE(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1,
                   std::optional<unsigned> Flags = std::nullopt) {
    return buildInstr(TargetOpcode::G_FMINNUM_IEEE, {Dst}, {Src0, Src1}, Flags);
  }

  MachineInstrBuilder
  buildFMaxNumIEEE(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1,
                   std::optional<unsigned> Flags = std::nullopt) {
    return buildInstr(TargetOpcode::G_FMAXNUM_IEEE, {Dst}, {Src0, Src1}, Flags);
  }

  MachineInstrBuilder buildShl(const DstOp &Dst, const SrcOp &Src0,
                               const SrcOp &Src1,
                               std::optional<unsigned> Flags = std::nullopt) {
    return buildInstr(TargetOpcode::G_SHL, {Dst}, {Src0, Src1}, Flags);
  }

  MachineInstrBuilder buildLShr(const DstOp &Dst, const SrcOp &Src0,
                                const SrcOp &Src1,
                                std::optional<unsigned> Flags = std::nullopt) {
    return buildInstr(TargetOpcode::G_LSHR, {Dst}, {Src0, Src1}, Flags);
  }

  MachineInstrBuilder buildAShr(const DstOp &Dst, const SrcOp &Src0,
                                const SrcOp &Src1,
                                std::optional<unsigned> Flags = std::nullopt) {
    return buildInstr(TargetOpcode::G_ASHR, {Dst}, {Src0, Src1}, Flags);
  }

  /// Build and insert \p Res = G_AND \p Op0, \p Op1
  ///
  /// G_AND sets \p Res to the bitwise and of integer parameters \p Op0 and \p
  /// Op1.
  ///
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
  ///      with the same (scalar or vector) type).
  ///
  /// \return a MachineInstrBuilder for the newly created instruction.

  MachineInstrBuilder buildAnd(const DstOp &Dst, const SrcOp &Src0,
                               const SrcOp &Src1) {
    return buildInstr(TargetOpcode::G_AND, {Dst}, {Src0, Src1});
  }

  /// Build and insert \p Res = G_OR \p Op0, \p Op1
  ///
  /// G_OR sets \p Res to the bitwise or of integer parameters \p Op0 and \p
  /// Op1.
  ///
  /// \pre setBasicBlock or setMI must have been called.
  /// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
  ///      with the same (scalar or vector) type).
  ///
  /// \return a MachineInstrBuilder for the newly created instruction.
  MachineInstrBuilder buildOr(const DstOp &Dst, const SrcOp &Src0,
                              const SrcOp &Src1,
                              std::optional<unsigned> Flags = std::nullopt) {
    return buildInstr(TargetOpcode::G_OR, {Dst}, {Src0, Src1}, Flags);
  }

  /// Build and insert \p Res = G_XOR \p Op0, \p Op1
  MachineInstrBuilder buildXor(const DstOp &Dst, const SrcOp &Src0,
                               const SrcOp &Src1) {
    return buildInstr(TargetOpcode::G_XOR, {Dst}, {Src0, Src1});
  }

  /// Build and insert a bitwise not,
  /// \p NegOne = G_CONSTANT -1
  /// \p Res = G_OR \p Op0, NegOne
  MachineInstrBuilder buildNot(const DstOp &Dst, const SrcOp &Src0) {
    auto NegOne = buildConstant(Dst.getLLTTy(*getMRI()), -1);
    return buildInstr(TargetOpcode::G_XOR, {Dst}, {Src0, NegOne});
  }

  /// Build and insert integer negation
  /// \p Zero = G_CONSTANT 0
  /// \p Res = G_SUB Zero, \p Op0
  MachineInstrBuilder buildNeg(const DstOp &Dst, const SrcOp &Src0) {
    auto Zero = buildConstant(Dst.getLLTTy(*getMRI()), 0);
    return buildInstr(TargetOpcode::G_SUB, {Dst}, {Zero, Src0});
  }

  /// Build and insert \p Res = G_CTPOP \p Op0, \p Src0
  MachineInstrBuilder buildCTPOP(const DstOp &Dst, const SrcOp &Src0) {
    return buildInstr(TargetOpcode::G_CTPOP, {Dst}, {Src0});
  }

  /// Build and insert \p Res = G_CTLZ \p Op0, \p Src0
  MachineInstrBuilder buildCTLZ(const DstOp &Dst, const SrcOp &Src0) {
    return buildInstr(TargetOpcode::G_CTLZ, {Dst}, {Src0});
  }

  /// Build and insert \p Res = G_CTLZ_ZERO_UNDEF \p Op0, \p Src0
  MachineInstrBuilder buildCTLZ_ZERO_UNDEF(const DstOp &Dst, const SrcOp &Src0) {
    return buildInstr(TargetOpcode::G_CTLZ_ZERO_UNDEF, {Dst}, {Src0});
  }

  /// Build and insert \p Res = G_CTTZ \p Op0, \p Src0
  MachineInstrBuilder buildCTTZ(const DstOp &Dst, const SrcOp &Src0) {
    return buildInstr(TargetOpcode::G_CTTZ, {Dst}, {Src0});
  }

  /// Build and insert \p Res = G_CTTZ_ZERO_UNDEF \p Op0, \p Src0
  MachineInstrBuilder buildCTTZ_ZERO_UNDEF(const DstOp &Dst, const SrcOp &Src0) {
    return buildInstr(TargetOpcode::G_CTTZ_ZERO_UNDEF, {Dst}, {Src0});
  }

  /// Build and insert \p Dst = G_BSWAP \p Src0
  MachineInstrBuilder buildBSwap(const DstOp &Dst, const SrcOp &Src0) {
    return buildInstr(TargetOpcode::G_BSWAP, {Dst}, {Src0});
  }

  /// Build and insert \p Res = G_FADD \p Op0, \p Op1
  MachineInstrBuilder buildFAdd(const DstOp &Dst, const SrcOp &Src0,
                                const SrcOp &Src1,
                                std::optional<unsigned> Flags = std::nullopt) {
    return buildInstr(TargetOpcode::G_FADD, {Dst}, {Src0, Src1}, Flags);
  }

  /// Build and insert \p Res = G_STRICT_FADD \p Op0, \p Op1
  MachineInstrBuilder
  buildStrictFAdd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1,
                  std::optional<unsigned> Flags = std::nullopt) {
    return buildInstr(TargetOpcode::G_STRICT_FADD, {Dst}, {Src0, Src1}, Flags);
  }

  /// Build and insert \p Res = G_FSUB \p Op0, \p Op1
  MachineInstrBuilder buildFSub(const DstOp &Dst, const SrcOp &Src0,
                                const SrcOp &Src1,
                                std::optional<unsigned> Flags = std::nullopt) {
    return buildInstr(TargetOpcode::G_FSUB, {Dst}, {Src0, Src1}, Flags);
  }

  /// Build and insert \p Res = G_FDIV \p Op0, \p Op1
  MachineInstrBuilder buildFDiv(const DstOp &Dst, const SrcOp &Src0,
                                const SrcOp &Src1,
                                std::optional<unsigned> Flags = std::nullopt) {
    return buildInstr(TargetOpcode::G_FDIV, {Dst}, {Src0, Src1}, Flags);
  }

  /// Build and insert \p Res = G_FMA \p Op0, \p Op1, \p Op2
  MachineInstrBuilder buildFMA(const DstOp &Dst, const SrcOp &Src0,
                               const SrcOp &Src1, const SrcOp &Src2,
                               std::optional<unsigned> Flags = std::nullopt) {
    return buildInstr(TargetOpcode::G_FMA, {Dst}, {Src0, Src1, Src2}, Flags);
  }

  /// Build and insert \p Res = G_FMAD \p Op0, \p Op1, \p Op2
  MachineInstrBuilder buildFMAD(const DstOp &Dst, const SrcOp &Src0,
                                const SrcOp &Src1, const SrcOp &Src2,
                                std::optional<unsigned> Flags = std::nullopt) {
    return buildInstr(TargetOpcode::G_FMAD, {Dst}, {Src0, Src1, Src2}, Flags);
  }

  /// Build and insert \p Res = G_FNEG \p Op0
  MachineInstrBuilder buildFNeg(const DstOp &Dst, const SrcOp &Src0,
                                std::optional<unsigned> Flags = std::nullopt) {
    return buildInstr(TargetOpcode::G_FNEG, {Dst}, {Src0}, Flags);
  }

  /// Build and insert \p Res = G_FABS \p Op0
  MachineInstrBuilder buildFAbs(const DstOp &Dst, const SrcOp &Src0,
                                std::optional<unsigned> Flags = std::nullopt) {
    return buildInstr(TargetOpcode::G_FABS, {Dst}, {Src0}, Flags);
  }

  /// Build and insert \p Dst = G_FCANONICALIZE \p Src0
  MachineInstrBuilder
  buildFCanonicalize(const DstOp &Dst, const SrcOp &Src0,
                     std::optional<unsigned> Flags = std::nullopt) {
    return buildInstr(TargetOpcode::G_FCANONICALIZE, {Dst}, {Src0}, Flags);
  }

  /// Build and insert \p Dst = G_INTRINSIC_TRUNC \p Src0
  MachineInstrBuilder
  buildIntrinsicTrunc(const DstOp &Dst, const SrcOp &Src0,
                      std::optional<unsigned> Flags = std::nullopt) {
    return buildInstr(TargetOpcode::G_INTRINSIC_TRUNC, {Dst}, {Src0}, Flags);
  }

  /// Build and insert \p Res = GFFLOOR \p Op0, \p Op1
  MachineInstrBuilder
  buildFFloor(const DstOp &Dst, const SrcOp &Src0,
              std::optional<unsigned> Flags = std::nullopt) {
    return buildInstr(TargetOpcode::G_FFLOOR, {Dst}, {Src0}, Flags);
  }

  /// Build and insert \p Dst = G_FLOG \p Src
  MachineInstrBuilder buildFLog(const DstOp &Dst, const SrcOp &Src,
                                std::optional<unsigned> Flags = std::nullopt) {
    return buildInstr(TargetOpcode::G_FLOG, {Dst}, {Src}, Flags);
  }

  /// Build and insert \p Dst = G_FLOG2 \p Src
  MachineInstrBuilder buildFLog2(const DstOp &Dst, const SrcOp &Src,
                                 std::optional<unsigned> Flags = std::nullopt) {
    return buildInstr(TargetOpcode::G_FLOG2, {Dst}, {Src}, Flags);
  }

  /// Build and insert \p Dst = G_FEXP2 \p Src
  MachineInstrBuilder buildFExp2(const DstOp &Dst, const SrcOp &Src,
                                 std::optional<unsigned> Flags = std::nullopt) {
    return buildInstr(TargetOpcode::G_FEXP2, {Dst}, {Src}, Flags);
  }

  /// Build and insert \p Dst = G_FPOW \p Src0, \p Src1
  MachineInstrBuilder buildFPow(const DstOp &Dst, const SrcOp &Src0,
                                const SrcOp &Src1,
                                std::optional<unsigned> Flags = std::nullopt) {
    return buildInstr(TargetOpcode::G_FPOW, {Dst}, {Src0, Src1}, Flags);
  }

  /// Build and insert \p Dst = G_FLDEXP \p Src0, \p Src1
  MachineInstrBuilder
  buildFLdexp(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1,
              std::optional<unsigned> Flags = std::nullopt) {
    return buildInstr(TargetOpcode::G_FLDEXP, {Dst}, {Src0, Src1}, Flags);
  }

  /// Build and insert \p Fract, \p Exp = G_FFREXP \p Src
  MachineInstrBuilder
  buildFFrexp(const DstOp &Fract, const DstOp &Exp, const SrcOp &Src,
              std::optional<unsigned> Flags = std::nullopt) {
    return buildInstr(TargetOpcode::G_FFREXP, {Fract, Exp}, {Src}, Flags);
  }

  /// Build and insert \p Res = G_FCOPYSIGN \p Op0, \p Op1
  MachineInstrBuilder buildFCopysign(const DstOp &Dst, const SrcOp &Src0,
                                     const SrcOp &Src1) {
    return buildInstr(TargetOpcode::G_FCOPYSIGN, {Dst}, {Src0, Src1});
  }

  /// Build and insert \p Res = G_UITOFP \p Src0
  MachineInstrBuilder buildUITOFP(const DstOp &Dst, const SrcOp &Src0) {
    return buildInstr(TargetOpcode::G_UITOFP, {Dst}, {Src0});
  }

  /// Build and insert \p Res = G_SITOFP \p Src0
  MachineInstrBuilder buildSITOFP(const DstOp &Dst, const SrcOp &Src0) {
    return buildInstr(TargetOpcode::G_SITOFP, {Dst}, {Src0});
  }

  /// Build and insert \p Res = G_FPTOUI \p Src0
  MachineInstrBuilder buildFPTOUI(const DstOp &Dst, const SrcOp &Src0) {
    return buildInstr(TargetOpcode::G_FPTOUI, {Dst}, {Src0});
  }

  /// Build and insert \p Res = G_FPTOSI \p Src0
  MachineInstrBuilder buildFPTOSI(const DstOp &Dst, const SrcOp &Src0) {
    return buildInstr(TargetOpcode::G_FPTOSI, {Dst}, {Src0});
  }

  /// Build and insert \p Dst = G_FRINT \p Src0, \p Src1
  MachineInstrBuilder buildFRint(const DstOp &Dst, const SrcOp &Src0,
                                 std::optional<unsigned> Flags = std::nullopt) {
    return buildInstr(TargetOpcode::G_FRINT, {Dst}, {Src0}, Flags);
  }

  /// Build and insert \p Res = G_SMIN \p Op0, \p Op1
  MachineInstrBuilder buildSMin(const DstOp &Dst, const SrcOp &Src0,
                                const SrcOp &Src1) {
    return buildInstr(TargetOpcode::G_SMIN, {Dst}, {Src0, Src1});
  }

  /// Build and insert \p Res = G_SMAX \p Op0, \p Op1
  MachineInstrBuilder buildSMax(const DstOp &Dst, const SrcOp &Src0,
                                const SrcOp &Src1) {
    return buildInstr(TargetOpcode::G_SMAX, {Dst}, {Src0, Src1});
  }

  /// Build and insert \p Res = G_UMIN \p Op0, \p Op1
  MachineInstrBuilder buildUMin(const DstOp &Dst, const SrcOp &Src0,
                                const SrcOp &Src1) {
    return buildInstr(TargetOpcode::G_UMIN, {Dst}, {Src0, Src1});
  }

  /// Build and insert \p Res = G_UMAX \p Op0, \p Op1
  MachineInstrBuilder buildUMax(const DstOp &Dst, const SrcOp &Src0,
                                const SrcOp &Src1) {
    return buildInstr(TargetOpcode::G_UMAX, {Dst}, {Src0, Src1});
  }

  /// Build and insert \p Dst = G_ABS \p Src
  MachineInstrBuilder buildAbs(const DstOp &Dst, const SrcOp &Src) {
    return buildInstr(TargetOpcode::G_ABS, {Dst}, {Src});
  }

  /// Build and insert \p Res = G_JUMP_TABLE \p JTI
  ///
  /// G_JUMP_TABLE sets \p Res to the address of the jump table specified by
  /// the jump table index \p JTI.
  ///
  /// \return a MachineInstrBuilder for the newly created instruction.
  MachineInstrBuilder buildJumpTable(const LLT PtrTy, unsigned JTI);

  /// Build and insert \p Res = G_VECREDUCE_SEQ_FADD \p ScalarIn, \p VecIn
  ///
  /// \p ScalarIn is the scalar accumulator input to start the sequential
  /// reduction operation of \p VecIn.
  MachineInstrBuilder buildVecReduceSeqFAdd(const DstOp &Dst,
                                            const SrcOp &ScalarIn,
                                            const SrcOp &VecIn) {
    return buildInstr(TargetOpcode::G_VECREDUCE_SEQ_FADD, {Dst},
                      {ScalarIn, {VecIn}});
  }

  /// Build and insert \p Res = G_VECREDUCE_SEQ_FMUL \p ScalarIn, \p VecIn
  ///
  /// \p ScalarIn is the scalar accumulator input to start the sequential
  /// reduction operation of \p VecIn.
  MachineInstrBuilder buildVecReduceSeqFMul(const DstOp &Dst,
                                            const SrcOp &ScalarIn,
                                            const SrcOp &VecIn) {
    return buildInstr(TargetOpcode::G_VECREDUCE_SEQ_FMUL, {Dst},
                      {ScalarIn, {VecIn}});
  }

  /// Build and insert \p Res = G_VECREDUCE_FADD \p Src
  ///
  /// \p ScalarIn is the scalar accumulator input to the reduction operation of
  /// \p VecIn.
  MachineInstrBuilder buildVecReduceFAdd(const DstOp &Dst,
                                         const SrcOp &ScalarIn,
                                         const SrcOp &VecIn) {
    return buildInstr(TargetOpcode::G_VECREDUCE_FADD, {Dst}, {ScalarIn, VecIn});
  }

  /// Build and insert \p Res = G_VECREDUCE_FMUL \p Src
  ///
  /// \p ScalarIn is the scalar accumulator input to the reduction operation of
  /// \p VecIn.
  MachineInstrBuilder buildVecReduceFMul(const DstOp &Dst,
                                         const SrcOp &ScalarIn,
                                         const SrcOp &VecIn) {
    return buildInstr(TargetOpcode::G_VECREDUCE_FMUL, {Dst}, {ScalarIn, VecIn});
  }

  /// Build and insert \p Res = G_VECREDUCE_FMAX \p Src
  MachineInstrBuilder buildVecReduceFMax(const DstOp &Dst, const SrcOp &Src) {
    return buildInstr(TargetOpcode::G_VECREDUCE_FMAX, {Dst}, {Src});
  }

  /// Build and insert \p Res = G_VECREDUCE_FMIN \p Src
  MachineInstrBuilder buildVecReduceFMin(const DstOp &Dst, const SrcOp &Src) {
    return buildInstr(TargetOpcode::G_VECREDUCE_FMIN, {Dst}, {Src});
  }
  /// Build and insert \p Res = G_VECREDUCE_ADD \p Src
  MachineInstrBuilder buildVecReduceAdd(const DstOp &Dst, const SrcOp &Src) {
    return buildInstr(TargetOpcode::G_VECREDUCE_ADD, {Dst}, {Src});
  }

  /// Build and insert \p Res = G_VECREDUCE_MUL \p Src
  MachineInstrBuilder buildVecReduceMul(const DstOp &Dst, const SrcOp &Src) {
    return buildInstr(TargetOpcode::G_VECREDUCE_MUL, {Dst}, {Src});
  }

  /// Build and insert \p Res = G_VECREDUCE_AND \p Src
  MachineInstrBuilder buildVecReduceAnd(const DstOp &Dst, const SrcOp &Src) {
    return buildInstr(TargetOpcode::G_VECREDUCE_AND, {Dst}, {Src});
  }

  /// Build and insert \p Res = G_VECREDUCE_OR \p Src
  MachineInstrBuilder buildVecReduceOr(const DstOp &Dst, const SrcOp &Src) {
    return buildInstr(TargetOpcode::G_VECREDUCE_OR, {Dst}, {Src});
  }

  /// Build and insert \p Res = G_VECREDUCE_XOR \p Src
  MachineInstrBuilder buildVecReduceXor(const DstOp &Dst, const SrcOp &Src) {
    return buildInstr(TargetOpcode::G_VECREDUCE_XOR, {Dst}, {Src});
  }

  /// Build and insert \p Res = G_VECREDUCE_SMAX \p Src
  MachineInstrBuilder buildVecReduceSMax(const DstOp &Dst, const SrcOp &Src) {
    return buildInstr(TargetOpcode::G_VECREDUCE_SMAX, {Dst}, {Src});
  }

  /// Build and insert \p Res = G_VECREDUCE_SMIN \p Src
  MachineInstrBuilder buildVecReduceSMin(const DstOp &Dst, const SrcOp &Src) {
    return buildInstr(TargetOpcode::G_VECREDUCE_SMIN, {Dst}, {Src});
  }

  /// Build and insert \p Res = G_VECREDUCE_UMAX \p Src
  MachineInstrBuilder buildVecReduceUMax(const DstOp &Dst, const SrcOp &Src) {
    return buildInstr(TargetOpcode::G_VECREDUCE_UMAX, {Dst}, {Src});
  }

  /// Build and insert \p Res = G_VECREDUCE_UMIN \p Src
  MachineInstrBuilder buildVecReduceUMin(const DstOp &Dst, const SrcOp &Src) {
    return buildInstr(TargetOpcode::G_VECREDUCE_UMIN, {Dst}, {Src});
  }

  /// Build and insert G_MEMCPY or G_MEMMOVE
  MachineInstrBuilder buildMemTransferInst(unsigned Opcode, const SrcOp &DstPtr,
                                           const SrcOp &SrcPtr,
                                           const SrcOp &Size,
                                           MachineMemOperand &DstMMO,
                                           MachineMemOperand &SrcMMO) {
    auto MIB = buildInstr(
        Opcode, {}, {DstPtr, SrcPtr, Size, SrcOp(INT64_C(0) /*isTailCall*/)});
    MIB.addMemOperand(&DstMMO);
    MIB.addMemOperand(&SrcMMO);
    return MIB;
  }

  MachineInstrBuilder buildMemCpy(const SrcOp &DstPtr, const SrcOp &SrcPtr,
                                  const SrcOp &Size, MachineMemOperand &DstMMO,
                                  MachineMemOperand &SrcMMO) {
    return buildMemTransferInst(TargetOpcode::G_MEMCPY, DstPtr, SrcPtr, Size,
                                DstMMO, SrcMMO);
  }

  /// Build and insert \p Dst = G_SBFX \p Src, \p LSB, \p Width.
  MachineInstrBuilder buildSbfx(const DstOp &Dst, const SrcOp &Src,
                                const SrcOp &LSB, const SrcOp &Width) {
    return buildInstr(TargetOpcode::G_SBFX, {Dst}, {Src, LSB, Width});
  }

  /// Build and insert \p Dst = G_UBFX \p Src, \p LSB, \p Width.
  MachineInstrBuilder buildUbfx(const DstOp &Dst, const SrcOp &Src,
                                const SrcOp &LSB, const SrcOp &Width) {
    return buildInstr(TargetOpcode::G_UBFX, {Dst}, {Src, LSB, Width});
  }

  /// Build and insert \p Dst = G_ROTR \p Src, \p Amt
  MachineInstrBuilder buildRotateRight(const DstOp &Dst, const SrcOp &Src,
                                       const SrcOp &Amt) {
    return buildInstr(TargetOpcode::G_ROTR, {Dst}, {Src, Amt});
  }

  /// Build and insert \p Dst = G_ROTL \p Src, \p Amt
  MachineInstrBuilder buildRotateLeft(const DstOp &Dst, const SrcOp &Src,
                                      const SrcOp &Amt) {
    return buildInstr(TargetOpcode::G_ROTL, {Dst}, {Src, Amt});
  }

  /// Build and insert \p Dst = G_BITREVERSE \p Src
  MachineInstrBuilder buildBitReverse(const DstOp &Dst, const SrcOp &Src) {
    return buildInstr(TargetOpcode::G_BITREVERSE, {Dst}, {Src});
  }

  virtual MachineInstrBuilder
  buildInstr(unsigned Opc, ArrayRef<DstOp> DstOps, ArrayRef<SrcOp> SrcOps,
             std::optional<unsigned> Flags = std::nullopt);
};

} // End namespace llvm.
#endif // LLVM_CODEGEN_GLOBALISEL_MACHINEIRBUILDER_H
PKiwFZ�
�qeqe"CodeGen/GlobalISel/RegBankSelect.hnu�[���//=- llvm/CodeGen/GlobalISel/RegBankSelect.h - Reg Bank Selector --*- C++ -*-=//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file
/// This file describes the interface of the MachineFunctionPass
/// responsible for assigning the generic virtual registers to register bank.
///
/// By default, the reg bank selector relies on local decisions to
/// assign the register bank. In other words, it looks at one instruction
/// at a time to decide where the operand of that instruction should live.
///
/// At higher optimization level, we could imagine that the reg bank selector
/// would use more global analysis and do crazier thing like duplicating
/// instructions and so on. This is future work.
///
/// For now, the pass uses a greedy algorithm to decide where the operand
/// of an instruction should live. It asks the target which banks may be
/// used for each operand of the instruction and what is the cost. Then,
/// it chooses the solution which minimize the cost of the instruction plus
/// the cost of any move that may be needed to the values into the right
/// register bank.
/// In other words, the cost for an instruction on a register bank RegBank
/// is: Cost of I on RegBank plus the sum of the cost for bringing the
/// input operands from their current register bank to RegBank.
/// Thus, the following formula:
/// cost(I, RegBank) = cost(I.Opcode, RegBank) +
///    sum(for each arg in I.arguments: costCrossCopy(arg.RegBank, RegBank))
///
/// E.g., Let say we are assigning the register bank for the instruction
/// defining v2.
/// v0(A_REGBANK) = ...
/// v1(A_REGBANK) = ...
/// v2 = G_ADD i32 v0, v1 <-- MI
///
/// The target may say it can generate G_ADD i32 on register bank A and B
/// with a cost of respectively 5 and 1.
/// Then, let say the cost of a cross register bank copies from A to B is 1.
/// The reg bank selector would compare the following two costs:
/// cost(MI, A_REGBANK) = cost(G_ADD, A_REGBANK) + cost(v0.RegBank, A_REGBANK) +
///    cost(v1.RegBank, A_REGBANK)
///                     = 5 + cost(A_REGBANK, A_REGBANK) + cost(A_REGBANK,
///                                                             A_REGBANK)
///                     = 5 + 0 + 0 = 5
/// cost(MI, B_REGBANK) = cost(G_ADD, B_REGBANK) + cost(v0.RegBank, B_REGBANK) +
///    cost(v1.RegBank, B_REGBANK)
///                     = 1 + cost(A_REGBANK, B_REGBANK) + cost(A_REGBANK,
///                                                             B_REGBANK)
///                     = 1 + 1 + 1 = 3
/// Therefore, in this specific example, the reg bank selector would choose
/// bank B for MI.
/// v0(A_REGBANK) = ...
/// v1(A_REGBANK) = ...
/// tmp0(B_REGBANK) = COPY v0
/// tmp1(B_REGBANK) = COPY v1
/// v2(B_REGBANK) = G_ADD i32 tmp0, tmp1
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_GLOBALISEL_REGBANKSELECT_H
#define LLVM_CODEGEN_GLOBALISEL_REGBANKSELECT_H

#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h"
#include "llvm/CodeGen/RegisterBankInfo.h"
#include <cassert>
#include <cstdint>
#include <memory>

namespace llvm {

class BlockFrequency;
class MachineBlockFrequencyInfo;
class MachineBranchProbabilityInfo;
class MachineOperand;
class MachineRegisterInfo;
class Pass;
class raw_ostream;
class TargetPassConfig;
class TargetRegisterInfo;

/// This pass implements the reg bank selector pass used in the GlobalISel
/// pipeline. At the end of this pass, all register operands have been assigned
class RegBankSelect : public MachineFunctionPass {
public:
  static char ID;

  /// List of the modes supported by the RegBankSelect pass.
  enum Mode {
    /// Assign the register banks as fast as possible (default).
    Fast,
    /// Greedily minimize the cost of assigning register banks.
    /// This should produce code of greater quality, but will
    /// require more compile time.
    Greedy
  };

  /// Abstract class used to represent an insertion point in a CFG.
  /// This class records an insertion point and materializes it on
  /// demand.
  /// It allows to reason about the frequency of this insertion point,
  /// without having to logically materialize it (e.g., on an edge),
  /// before we actually need to insert something.
  class InsertPoint {
  protected:
    /// Tell if the insert point has already been materialized.
    bool WasMaterialized = false;

    /// Materialize the insertion point.
    ///
    /// If isSplit() is true, this involves actually splitting
    /// the block or edge.
    ///
    /// \post getPointImpl() returns a valid iterator.
    /// \post getInsertMBBImpl() returns a valid basic block.
    /// \post isSplit() == false ; no more splitting should be required.
    virtual void materialize() = 0;

    /// Return the materialized insertion basic block.
    /// Code will be inserted into that basic block.
    ///
    /// \pre ::materialize has been called.
    virtual MachineBasicBlock &getInsertMBBImpl() = 0;

    /// Return the materialized insertion point.
    /// Code will be inserted before that point.
    ///
    /// \pre ::materialize has been called.
    virtual MachineBasicBlock::iterator getPointImpl() = 0;

  public:
    virtual ~InsertPoint() = default;

    /// The first call to this method will cause the splitting to
    /// happen if need be, then sub sequent calls just return
    /// the iterator to that point. I.e., no more splitting will
    /// occur.
    ///
    /// \return The iterator that should be used with
    /// MachineBasicBlock::insert. I.e., additional code happens
    /// before that point.
    MachineBasicBlock::iterator getPoint() {
      if (!WasMaterialized) {
        WasMaterialized = true;
        assert(canMaterialize() && "Impossible to materialize this point");
        materialize();
      }
      // When we materialized the point we should have done the splitting.
      assert(!isSplit() && "Wrong pre-condition");
      return getPointImpl();
    }

    /// The first call to this method will cause the splitting to
    /// happen if need be, then sub sequent calls just return
    /// the basic block that contains the insertion point.
    /// I.e., no more splitting will occur.
    ///
    /// \return The basic block should be used with
    /// MachineBasicBlock::insert and ::getPoint. The new code should
    /// happen before that point.
    MachineBasicBlock &getInsertMBB() {
      if (!WasMaterialized) {
        WasMaterialized = true;
        assert(canMaterialize() && "Impossible to materialize this point");
        materialize();
      }
      // When we materialized the point we should have done the splitting.
      assert(!isSplit() && "Wrong pre-condition");
      return getInsertMBBImpl();
    }

    /// Insert \p MI in the just before ::getPoint()
    MachineBasicBlock::iterator insert(MachineInstr &MI) {
      return getInsertMBB().insert(getPoint(), &MI);
    }

    /// Does this point involve splitting an edge or block?
    /// As soon as ::getPoint is called and thus, the point
    /// materialized, the point will not require splitting anymore,
    /// i.e., this will return false.
    virtual bool isSplit() const { return false; }

    /// Frequency of the insertion point.
    /// \p P is used to access the various analysis that will help to
    /// get that information, like MachineBlockFrequencyInfo.  If \p P
    /// does not contain enough enough to return the actual frequency,
    /// this returns 1.
    virtual uint64_t frequency(const Pass &P) const { return 1; }

    /// Check whether this insertion point can be materialized.
    /// As soon as ::getPoint is called and thus, the point materialized
    /// calling this method does not make sense.
    virtual bool canMaterialize() const { return false; }
  };

  /// Insertion point before or after an instruction.
  class InstrInsertPoint : public InsertPoint {
  private:
    /// Insertion point.
    MachineInstr &Instr;

    /// Does the insertion point is before or after Instr.
    bool Before;

    void materialize() override;

    MachineBasicBlock::iterator getPointImpl() override {
      if (Before)
        return Instr;
      return Instr.getNextNode() ? *Instr.getNextNode()
                                 : Instr.getParent()->end();
    }

    MachineBasicBlock &getInsertMBBImpl() override {
      return *Instr.getParent();
    }

  public:
    /// Create an insertion point before (\p Before=true) or after \p Instr.
    InstrInsertPoint(MachineInstr &Instr, bool Before = true);

    bool isSplit() const override;
    uint64_t frequency(const Pass &P) const override;

    // Worst case, we need to slice the basic block, but that is still doable.
    bool canMaterialize() const override { return true; }
  };

  /// Insertion point at the beginning or end of a basic block.
  class MBBInsertPoint : public InsertPoint {
  private:
    /// Insertion point.
    MachineBasicBlock &MBB;

    /// Does the insertion point is at the beginning or end of MBB.
    bool Beginning;

    void materialize() override { /*Nothing to do to materialize*/
    }

    MachineBasicBlock::iterator getPointImpl() override {
      return Beginning ? MBB.begin() : MBB.end();
    }

    MachineBasicBlock &getInsertMBBImpl() override { return MBB; }

  public:
    MBBInsertPoint(MachineBasicBlock &MBB, bool Beginning = true)
        : MBB(MBB), Beginning(Beginning) {
      // If we try to insert before phis, we should use the insertion
      // points on the incoming edges.
      assert((!Beginning || MBB.getFirstNonPHI() == MBB.begin()) &&
             "Invalid beginning point");
      // If we try to insert after the terminators, we should use the
      // points on the outcoming edges.
      assert((Beginning || MBB.getFirstTerminator() == MBB.end()) &&
             "Invalid end point");
    }

    bool isSplit() const override { return false; }
    uint64_t frequency(const Pass &P) const override;
    bool canMaterialize() const override { return true; };
  };

  /// Insertion point on an edge.
  class EdgeInsertPoint : public InsertPoint {
  private:
    /// Source of the edge.
    MachineBasicBlock &Src;

    /// Destination of the edge.
    /// After the materialization is done, this hold the basic block
    /// that resulted from the splitting.
    MachineBasicBlock *DstOrSplit;

    /// P is used to update the analysis passes as applicable.
    Pass &P;

    void materialize() override;

    MachineBasicBlock::iterator getPointImpl() override {
      // DstOrSplit should be the Split block at this point.
      // I.e., it should have one predecessor, Src, and one successor,
      // the original Dst.
      assert(DstOrSplit && DstOrSplit->isPredecessor(&Src) &&
             DstOrSplit->pred_size() == 1 && DstOrSplit->succ_size() == 1 &&
             "Did not split?!");
      return DstOrSplit->begin();
    }

    MachineBasicBlock &getInsertMBBImpl() override { return *DstOrSplit; }

  public:
    EdgeInsertPoint(MachineBasicBlock &Src, MachineBasicBlock &Dst, Pass &P)
        : Src(Src), DstOrSplit(&Dst), P(P) {}

    bool isSplit() const override {
      return Src.succ_size() > 1 && DstOrSplit->pred_size() > 1;
    }

    uint64_t frequency(const Pass &P) const override;
    bool canMaterialize() const override;
  };

  /// Struct used to represent the placement of a repairing point for
  /// a given operand.
  class RepairingPlacement {
  public:
    /// Define the kind of action this repairing needs.
    enum RepairingKind {
      /// Nothing to repair, just drop this action.
      None,
      /// Reparing code needs to happen before InsertPoints.
      Insert,
      /// (Re)assign the register bank of the operand.
      Reassign,
      /// Mark this repairing placement as impossible.
      Impossible
    };

    /// \name Convenient types for a list of insertion points.
    /// @{
    using InsertionPoints = SmallVector<std::unique_ptr<InsertPoint>, 2>;
    using insertpt_iterator = InsertionPoints::iterator;
    using const_insertpt_iterator = InsertionPoints::const_iterator;
    /// @}

  private:
    /// Kind of repairing.
    RepairingKind Kind;
    /// Index of the operand that will be repaired.
    unsigned OpIdx;
    /// Are all the insert points materializeable?
    bool CanMaterialize;
    /// Is there any of the insert points needing splitting?
    bool HasSplit = false;
    /// Insertion point for the repair code.
    /// The repairing code needs to happen just before these points.
    InsertionPoints InsertPoints;
    /// Some insertion points may need to update the liveness and such.
    Pass &P;

  public:
    /// Create a repairing placement for the \p OpIdx-th operand of
    /// \p MI. \p TRI is used to make some checks on the register aliases
    /// if the machine operand is a physical register. \p P is used to
    /// to update liveness information and such when materializing the
    /// points.
    RepairingPlacement(MachineInstr &MI, unsigned OpIdx,
                       const TargetRegisterInfo &TRI, Pass &P,
                       RepairingKind Kind = RepairingKind::Insert);

    /// \name Getters.
    /// @{
    RepairingKind getKind() const { return Kind; }
    unsigned getOpIdx() const { return OpIdx; }
    bool canMaterialize() const { return CanMaterialize; }
    bool hasSplit() { return HasSplit; }
    /// @}

    /// \name Overloaded methods to add an insertion point.
    /// @{
    /// Add a MBBInsertionPoint to the list of InsertPoints.
    void addInsertPoint(MachineBasicBlock &MBB, bool Beginning);
    /// Add a InstrInsertionPoint to the list of InsertPoints.
    void addInsertPoint(MachineInstr &MI, bool Before);
    /// Add an EdgeInsertionPoint (\p Src, \p Dst) to the list of InsertPoints.
    void addInsertPoint(MachineBasicBlock &Src, MachineBasicBlock &Dst);
    /// Add an InsertPoint to the list of insert points.
    /// This method takes the ownership of &\p Point.
    void addInsertPoint(InsertPoint &Point);
    /// @}

    /// \name Accessors related to the insertion points.
    /// @{
    insertpt_iterator begin() { return InsertPoints.begin(); }
    insertpt_iterator end() { return InsertPoints.end(); }

    const_insertpt_iterator begin() const { return InsertPoints.begin(); }
    const_insertpt_iterator end() const { return InsertPoints.end(); }

    unsigned getNumInsertPoints() const { return InsertPoints.size(); }
    /// @}

    /// Change the type of this repairing placement to \p NewKind.
    /// It is not possible to switch a repairing placement to the
    /// RepairingKind::Insert. There is no fundamental problem with
    /// that, but no uses as well, so do not support it for now.
    ///
    /// \pre NewKind != RepairingKind::Insert
    /// \post getKind() == NewKind
    void switchTo(RepairingKind NewKind) {
      assert(NewKind != Kind && "Already of the right Kind");
      Kind = NewKind;
      InsertPoints.clear();
      CanMaterialize = NewKind != RepairingKind::Impossible;
      HasSplit = false;
      assert(NewKind != RepairingKind::Insert &&
             "We would need more MI to switch to Insert");
    }
  };

protected:
  /// Helper class used to represent the cost for mapping an instruction.
  /// When mapping an instruction, we may introduce some repairing code.
  /// In most cases, the repairing code is local to the instruction,
  /// thus, we can omit the basic block frequency from the cost.
  /// However, some alternatives may produce non-local cost, e.g., when
  /// repairing a phi, and thus we then need to scale the local cost
  /// to the non-local cost. This class does this for us.
  /// \note: We could simply always scale the cost. The problem is that
  /// there are higher chances that we saturate the cost easier and end
  /// up having the same cost for actually different alternatives.
  /// Another option would be to use APInt everywhere.
  class MappingCost {
  private:
    /// Cost of the local instructions.
    /// This cost is free of basic block frequency.
    uint64_t LocalCost = 0;
    /// Cost of the non-local instructions.
    /// This cost should include the frequency of the related blocks.
    uint64_t NonLocalCost = 0;
    /// Frequency of the block where the local instructions live.
    uint64_t LocalFreq;

    MappingCost(uint64_t LocalCost, uint64_t NonLocalCost, uint64_t LocalFreq)
        : LocalCost(LocalCost), NonLocalCost(NonLocalCost),
          LocalFreq(LocalFreq) {}

    /// Check if this cost is saturated.
    bool isSaturated() const;

  public:
    /// Create a MappingCost assuming that most of the instructions
    /// will occur in a basic block with \p LocalFreq frequency.
    MappingCost(const BlockFrequency &LocalFreq);

    /// Add \p Cost to the local cost.
    /// \return true if this cost is saturated, false otherwise.
    bool addLocalCost(uint64_t Cost);

    /// Add \p Cost to the non-local cost.
    /// Non-local cost should reflect the frequency of their placement.
    /// \return true if this cost is saturated, false otherwise.
    bool addNonLocalCost(uint64_t Cost);

    /// Saturate the cost to the maximal representable value.
    void saturate();

    /// Return an instance of MappingCost that represents an
    /// impossible mapping.
    static MappingCost ImpossibleCost();

    /// Check if this is less than \p Cost.
    bool operator<(const MappingCost &Cost) const;
    /// Check if this is equal to \p Cost.
    bool operator==(const MappingCost &Cost) const;
    /// Check if this is not equal to \p Cost.
    bool operator!=(const MappingCost &Cost) const { return !(*this == Cost); }
    /// Check if this is greater than \p Cost.
    bool operator>(const MappingCost &Cost) const {
      return *this != Cost && Cost < *this;
    }

    /// Print this on dbgs() stream.
    void dump() const;

    /// Print this on \p OS;
    void print(raw_ostream &OS) const;

    /// Overload the stream operator for easy debug printing.
    friend raw_ostream &operator<<(raw_ostream &OS, const MappingCost &Cost) {
      Cost.print(OS);
      return OS;
    }
  };

  /// Interface to the target lowering info related
  /// to register banks.
  const RegisterBankInfo *RBI = nullptr;

  /// MRI contains all the register class/bank information that this
  /// pass uses and updates.
  MachineRegisterInfo *MRI = nullptr;

  /// Information on the register classes for the current function.
  const TargetRegisterInfo *TRI = nullptr;

  /// Get the frequency of blocks.
  /// This is required for non-fast mode.
  MachineBlockFrequencyInfo *MBFI = nullptr;

  /// Get the frequency of the edges.
  /// This is required for non-fast mode.
  MachineBranchProbabilityInfo *MBPI = nullptr;

  /// Current optimization remark emitter. Used to report failures.
  std::unique_ptr<MachineOptimizationRemarkEmitter> MORE;

  /// Helper class used for every code morphing.
  MachineIRBuilder MIRBuilder;

  /// Optimization mode of the pass.
  Mode OptMode;

  /// Current target configuration. Controls how the pass handles errors.
  const TargetPassConfig *TPC;

  /// Assign the register bank of each operand of \p MI.
  /// \return True on success, false otherwise.
  bool assignInstr(MachineInstr &MI);

  /// Initialize the field members using \p MF.
  void init(MachineFunction &MF);

  /// Check if \p Reg is already assigned what is described by \p ValMapping.
  /// \p OnlyAssign == true means that \p Reg just needs to be assigned a
  /// register bank.  I.e., no repairing is necessary to have the
  /// assignment match.
  bool assignmentMatch(Register Reg,
                       const RegisterBankInfo::ValueMapping &ValMapping,
                       bool &OnlyAssign) const;

  /// Insert repairing code for \p Reg as specified by \p ValMapping.
  /// The repairing placement is specified by \p RepairPt.
  /// \p NewVRegs contains all the registers required to remap \p Reg.
  /// In other words, the number of registers in NewVRegs must be equal
  /// to ValMapping.BreakDown.size().
  ///
  /// The transformation could be sketched as:
  /// \code
  /// ... = op Reg
  /// \endcode
  /// Becomes
  /// \code
  /// <NewRegs> = COPY or extract Reg
  /// ... = op Reg
  /// \endcode
  ///
  /// and
  /// \code
  /// Reg = op ...
  /// \endcode
  /// Becomes
  /// \code
  /// Reg = op ...
  /// Reg = COPY or build_sequence <NewRegs>
  /// \endcode
  ///
  /// \pre NewVRegs.size() == ValMapping.BreakDown.size()
  ///
  /// \note The caller is supposed to do the rewriting of op if need be.
  /// I.e., Reg = op ... => <NewRegs> = NewOp ...
  ///
  /// \return True if the repairing worked, false otherwise.
  bool repairReg(MachineOperand &MO,
                 const RegisterBankInfo::ValueMapping &ValMapping,
                 RegBankSelect::RepairingPlacement &RepairPt,
                 const iterator_range<SmallVectorImpl<Register>::const_iterator>
                     &NewVRegs);

  /// Return the cost of the instruction needed to map \p MO to \p ValMapping.
  /// The cost is free of basic block frequencies.
  /// \pre MO.isReg()
  /// \pre MO is assigned to a register bank.
  /// \pre ValMapping is a valid mapping for MO.
  uint64_t
  getRepairCost(const MachineOperand &MO,
                const RegisterBankInfo::ValueMapping &ValMapping) const;

  /// Find the best mapping for \p MI from \p PossibleMappings.
  /// \return a reference on the best mapping in \p PossibleMappings.
  const RegisterBankInfo::InstructionMapping &
  findBestMapping(MachineInstr &MI,
                  RegisterBankInfo::InstructionMappings &PossibleMappings,
                  SmallVectorImpl<RepairingPlacement> &RepairPts);

  /// Compute the cost of mapping \p MI with \p InstrMapping and
  /// compute the repairing placement for such mapping in \p
  /// RepairPts.
  /// \p BestCost is used to specify when the cost becomes too high
  /// and thus it is not worth computing the RepairPts.  Moreover if
  /// \p BestCost == nullptr, the mapping cost is actually not
  /// computed.
  MappingCost
  computeMapping(MachineInstr &MI,
                 const RegisterBankInfo::InstructionMapping &InstrMapping,
                 SmallVectorImpl<RepairingPlacement> &RepairPts,
                 const MappingCost *BestCost = nullptr);

  /// When \p RepairPt involves splitting to repair \p MO for the
  /// given \p ValMapping, try to change the way we repair such that
  /// the splitting is not required anymore.
  ///
  /// \pre \p RepairPt.hasSplit()
  /// \pre \p MO == MO.getParent()->getOperand(\p RepairPt.getOpIdx())
  /// \pre \p ValMapping is the mapping of \p MO for MO.getParent()
  ///      that implied \p RepairPt.
  void tryAvoidingSplit(RegBankSelect::RepairingPlacement &RepairPt,
                        const MachineOperand &MO,
                        const RegisterBankInfo::ValueMapping &ValMapping) const;

  /// Apply \p Mapping to \p MI. \p RepairPts represents the different
  /// mapping action that need to happen for the mapping to be
  /// applied.
  /// \return True if the mapping was applied sucessfully, false otherwise.
  bool applyMapping(MachineInstr &MI,
                    const RegisterBankInfo::InstructionMapping &InstrMapping,
                    SmallVectorImpl<RepairingPlacement> &RepairPts);

public:
  /// Create a RegBankSelect pass with the specified \p RunningMode.
  RegBankSelect(char &PassID = ID, Mode RunningMode = Fast);

  StringRef getPassName() const override { return "RegBankSelect"; }

  void getAnalysisUsage(AnalysisUsage &AU) const override;

  MachineFunctionProperties getRequiredProperties() const override {
    return MachineFunctionProperties()
        .set(MachineFunctionProperties::Property::IsSSA)
        .set(MachineFunctionProperties::Property::Legalized);
  }

  MachineFunctionProperties getSetProperties() const override {
    return MachineFunctionProperties().set(
        MachineFunctionProperties::Property::RegBankSelected);
  }

  MachineFunctionProperties getClearedProperties() const override {
    return MachineFunctionProperties()
      .set(MachineFunctionProperties::Property::NoPHIs);
  }

  /// Check that our input is fully legal: we require the function to have the
  /// Legalized property, so it should be.
  ///
  /// FIXME: This should be in the MachineVerifier.
  bool checkFunctionIsLegal(MachineFunction &MF) const;

  /// Walk through \p MF and assign a register bank to every virtual register
  /// that are still mapped to nothing.
  /// The target needs to provide a RegisterBankInfo and in particular
  /// override RegisterBankInfo::getInstrMapping.
  ///
  /// Simplified algo:
  /// \code
  ///   RBI = MF.subtarget.getRegBankInfo()
  ///   MIRBuilder.setMF(MF)
  ///   for each bb in MF
  ///     for each inst in bb
  ///       MIRBuilder.setInstr(inst)
  ///       MappingCosts = RBI.getMapping(inst);
  ///       Idx = findIdxOfMinCost(MappingCosts)
  ///       CurRegBank = MappingCosts[Idx].RegBank
  ///       MRI.setRegBank(inst.getOperand(0).getReg(), CurRegBank)
  ///       for each argument in inst
  ///         if (CurRegBank != argument.RegBank)
  ///           ArgReg = argument.getReg()
  ///           Tmp = MRI.createNewVirtual(MRI.getSize(ArgReg), CurRegBank)
  ///           MIRBuilder.buildInstr(COPY, Tmp, ArgReg)
  ///           inst.getOperand(argument.getOperandNo()).setReg(Tmp)
  /// \endcode
  bool assignRegisterBanks(MachineFunction &MF);

  bool runOnMachineFunction(MachineFunction &MF) override;
};

} // end namespace llvm

#endif // LLVM_CODEGEN_GLOBALISEL_REGBANKSELECT_H
PKiwFZ�CH �)�))CodeGen/GlobalISel/GenericMachineInstrs.hnu�[���//===- llvm/CodeGen/GlobalISel/GenericMachineInstrs.h -----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// Declares convenience wrapper classes for interpreting MachineInstr instances
/// as specific generic operations.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_GLOBALISEL_GENERICMACHINEINSTRS_H
#define LLVM_CODEGEN_GLOBALISEL_GENERICMACHINEINSTRS_H

#include "llvm/IR/Instructions.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/TargetOpcodes.h"
#include "llvm/Support/Casting.h"

namespace llvm {

/// A base class for all GenericMachineInstrs.
class GenericMachineInstr : public MachineInstr {
public:
  GenericMachineInstr() = delete;

  /// Access the Idx'th operand as a register and return it.
  /// This assumes that the Idx'th operand is a Register type.
  Register getReg(unsigned Idx) const { return getOperand(Idx).getReg(); }

  static bool classof(const MachineInstr *MI) {
    return isPreISelGenericOpcode(MI->getOpcode());
  }
};

/// Represents any type of generic load or store.
/// G_LOAD, G_STORE, G_ZEXTLOAD, G_SEXTLOAD.
class GLoadStore : public GenericMachineInstr {
public:
  /// Get the source register of the pointer value.
  Register getPointerReg() const { return getOperand(1).getReg(); }

  /// Get the MachineMemOperand on this instruction.
  MachineMemOperand &getMMO() const { return **memoperands_begin(); }

  /// Returns true if the attached MachineMemOperand  has the atomic flag set.
  bool isAtomic() const { return getMMO().isAtomic(); }
  /// Returns true if the attached MachineMemOpeand as the volatile flag set.
  bool isVolatile() const { return getMMO().isVolatile(); }
  /// Returns true if the memory operation is neither atomic or volatile.
  bool isSimple() const { return !isAtomic() && !isVolatile(); }
  /// Returns true if this memory operation doesn't have any ordering
  /// constraints other than normal aliasing. Volatile and (ordered) atomic
  /// memory operations can't be reordered.
  bool isUnordered() const { return getMMO().isUnordered(); }

  /// Returns the size in bytes of the memory access.
  uint64_t getMemSize() const { return getMMO().getSize();
  } /// Returns the size in bits of the memory access.
  uint64_t getMemSizeInBits() const { return getMMO().getSizeInBits(); }

  static bool classof(const MachineInstr *MI) {
    switch (MI->getOpcode()) {
    case TargetOpcode::G_LOAD:
    case TargetOpcode::G_STORE:
    case TargetOpcode::G_ZEXTLOAD:
    case TargetOpcode::G_SEXTLOAD:
      return true;
    default:
      return false;
    }
  }
};

/// Represents any generic load, including sign/zero extending variants.
class GAnyLoad : public GLoadStore {
public:
  /// Get the definition register of the loaded value.
  Register getDstReg() const { return getOperand(0).getReg(); }

  static bool classof(const MachineInstr *MI) {
    switch (MI->getOpcode()) {
    case TargetOpcode::G_LOAD:
    case TargetOpcode::G_ZEXTLOAD:
    case TargetOpcode::G_SEXTLOAD:
      return true;
    default:
      return false;
    }
  }
};

/// Represents a G_LOAD.
class GLoad : public GAnyLoad {
public:
  static bool classof(const MachineInstr *MI) {
    return MI->getOpcode() == TargetOpcode::G_LOAD;
  }
};

/// Represents either a G_SEXTLOAD or G_ZEXTLOAD.
class GExtLoad : public GAnyLoad {
public:
  static bool classof(const MachineInstr *MI) {
    return MI->getOpcode() == TargetOpcode::G_SEXTLOAD ||
           MI->getOpcode() == TargetOpcode::G_ZEXTLOAD;
  }
};

/// Represents a G_SEXTLOAD.
class GSExtLoad : public GExtLoad {
public:
  static bool classof(const MachineInstr *MI) {
    return MI->getOpcode() == TargetOpcode::G_SEXTLOAD;
  }
};

/// Represents a G_ZEXTLOAD.
class GZExtLoad : public GExtLoad {
public:
  static bool classof(const MachineInstr *MI) {
    return MI->getOpcode() == TargetOpcode::G_ZEXTLOAD;
  }
};

/// Represents a G_STORE.
class GStore : public GLoadStore {
public:
  /// Get the stored value register.
  Register getValueReg() const { return getOperand(0).getReg(); }

  static bool classof(const MachineInstr *MI) {
    return MI->getOpcode() == TargetOpcode::G_STORE;
  }
};

/// Represents a G_UNMERGE_VALUES.
class GUnmerge : public GenericMachineInstr {
public:
  /// Returns the number of def registers.
  unsigned getNumDefs() const { return getNumOperands() - 1; }
  /// Get the unmerge source register.
  Register getSourceReg() const { return getOperand(getNumDefs()).getReg(); }

  static bool classof(const MachineInstr *MI) {
    return MI->getOpcode() == TargetOpcode::G_UNMERGE_VALUES;
  }
};

/// Represents G_BUILD_VECTOR, G_CONCAT_VECTORS or G_MERGE_VALUES.
/// All these have the common property of generating a single value from
/// multiple sources.
class GMergeLikeInstr : public GenericMachineInstr {
public:
  /// Returns the number of source registers.
  unsigned getNumSources() const { return getNumOperands() - 1; }
  /// Returns the I'th source register.
  Register getSourceReg(unsigned I) const { return getReg(I + 1); }

  static bool classof(const MachineInstr *MI) {
    switch (MI->getOpcode()) {
    case TargetOpcode::G_MERGE_VALUES:
    case TargetOpcode::G_CONCAT_VECTORS:
    case TargetOpcode::G_BUILD_VECTOR:
      return true;
    default:
      return false;
    }
  }
};

/// Represents a G_MERGE_VALUES.
class GMerge : public GMergeLikeInstr {
public:
  static bool classof(const MachineInstr *MI) {
    return MI->getOpcode() == TargetOpcode::G_MERGE_VALUES;
  }
};

/// Represents a G_CONCAT_VECTORS.
class GConcatVectors : public GMergeLikeInstr {
public:
  static bool classof(const MachineInstr *MI) {
    return MI->getOpcode() == TargetOpcode::G_CONCAT_VECTORS;
  }
};

/// Represents a G_BUILD_VECTOR.
class GBuildVector : public GMergeLikeInstr {
public:
  static bool classof(const MachineInstr *MI) {
    return MI->getOpcode() == TargetOpcode::G_BUILD_VECTOR;
  }
};

/// Represents a G_PTR_ADD.
class GPtrAdd : public GenericMachineInstr {
public:
  Register getBaseReg() const { return getReg(1); }
  Register getOffsetReg() const { return getReg(2); }

  static bool classof(const MachineInstr *MI) {
    return MI->getOpcode() == TargetOpcode::G_PTR_ADD;
  }
};

/// Represents a G_IMPLICIT_DEF.
class GImplicitDef : public GenericMachineInstr {
public:
  static bool classof(const MachineInstr *MI) {
    return MI->getOpcode() == TargetOpcode::G_IMPLICIT_DEF;
  }
};

/// Represents a G_SELECT.
class GSelect : public GenericMachineInstr {
public:
  Register getCondReg() const { return getReg(1); }
  Register getTrueReg() const { return getReg(2); }
  Register getFalseReg() const { return getReg(3); }

  static bool classof(const MachineInstr *MI) {
    return MI->getOpcode() == TargetOpcode::G_SELECT;
  }
};

/// Represent a G_ICMP or G_FCMP.
class GAnyCmp : public GenericMachineInstr {
public:
  CmpInst::Predicate getCond() const {
    return static_cast<CmpInst::Predicate>(getOperand(1).getPredicate());
  }
  Register getLHSReg() const { return getReg(2); }
  Register getRHSReg() const { return getReg(3); }

  static bool classof(const MachineInstr *MI) {
    return MI->getOpcode() == TargetOpcode::G_ICMP ||
           MI->getOpcode() == TargetOpcode::G_FCMP;
  }
};

/// Represent a G_ICMP.
class GICmp : public GAnyCmp {
public:
  static bool classof(const MachineInstr *MI) {
    return MI->getOpcode() == TargetOpcode::G_ICMP;
  }
};

/// Represent a G_FCMP.
class GFCmp : public GAnyCmp {
public:
  static bool classof(const MachineInstr *MI) {
    return MI->getOpcode() == TargetOpcode::G_FCMP;
  }
};

/// Represents overflowing binary operations.
/// Only carry-out:
/// G_UADDO, G_SADDO, G_USUBO, G_SSUBO, G_UMULO, G_SMULO
/// Carry-in and carry-out:
/// G_UADDE, G_SADDE, G_USUBE, G_SSUBE
class GBinOpCarryOut : public GenericMachineInstr {
public:
  Register getDstReg() const { return getReg(0); }
  Register getCarryOutReg() const { return getReg(1); }
  MachineOperand &getLHS() { return getOperand(2); }
  MachineOperand &getRHS() { return getOperand(3); }

  static bool classof(const MachineInstr *MI) {
    switch (MI->getOpcode()) {
    case TargetOpcode::G_UADDO:
    case TargetOpcode::G_SADDO:
    case TargetOpcode::G_USUBO:
    case TargetOpcode::G_SSUBO:
    case TargetOpcode::G_UADDE:
    case TargetOpcode::G_SADDE:
    case TargetOpcode::G_USUBE:
    case TargetOpcode::G_SSUBE:
    case TargetOpcode::G_UMULO:
    case TargetOpcode::G_SMULO:
      return true;
    default:
      return false;
    }
  }
};

/// Represents overflowing add/sub operations.
/// Only carry-out:
/// G_UADDO, G_SADDO, G_USUBO, G_SSUBO
/// Carry-in and carry-out:
/// G_UADDE, G_SADDE, G_USUBE, G_SSUBE
class GAddSubCarryOut : public GBinOpCarryOut {
public:
  bool isAdd() const {
    switch (getOpcode()) {
    case TargetOpcode::G_UADDO:
    case TargetOpcode::G_SADDO:
    case TargetOpcode::G_UADDE:
    case TargetOpcode::G_SADDE:
      return true;
    default:
      return false;
    }
  }
  bool isSub() const { return !isAdd(); }

  bool isSigned() const {
    switch (getOpcode()) {
    case TargetOpcode::G_SADDO:
    case TargetOpcode::G_SSUBO:
    case TargetOpcode::G_SADDE:
    case TargetOpcode::G_SSUBE:
      return true;
    default:
      return false;
    }
  }
  bool isUnsigned() const { return !isSigned(); }

  static bool classof(const MachineInstr *MI) {
    switch (MI->getOpcode()) {
    case TargetOpcode::G_UADDO:
    case TargetOpcode::G_SADDO:
    case TargetOpcode::G_USUBO:
    case TargetOpcode::G_SSUBO:
    case TargetOpcode::G_UADDE:
    case TargetOpcode::G_SADDE:
    case TargetOpcode::G_USUBE:
    case TargetOpcode::G_SSUBE:
      return true;
    default:
      return false;
    }
  }
};

/// Represents overflowing add/sub operations that also consume a carry-in.
/// G_UADDE, G_SADDE, G_USUBE, G_SSUBE
class GAddSubCarryInOut : public GAddSubCarryOut {
public:
  Register getCarryInReg() const { return getReg(4); }

  static bool classof(const MachineInstr *MI) {
    switch (MI->getOpcode()) {
    case TargetOpcode::G_UADDE:
    case TargetOpcode::G_SADDE:
    case TargetOpcode::G_USUBE:
    case TargetOpcode::G_SSUBE:
      return true;
    default:
      return false;
    }
  }
};

} // namespace llvm

#endif // LLVM_CODEGEN_GLOBALISEL_GENERICMACHINEINSTRS_H
PKiwFZ$ْ?��!CodeGen/GlobalISel/LoadStoreOpt.hnu�[���//== llvm/CodeGen/GlobalISel/LoadStoreOpt.h - LoadStoreOpt -------*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// This is an optimization pass for GlobalISel generic memory operations.
/// Specifically, it focuses on merging stores and loads to consecutive
/// addresses.
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_GLOBALISEL_LOADSTOREOPT_H
#define LLVM_CODEGEN_GLOBALISEL_LOADSTOREOPT_H

#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"

namespace llvm {
// Forward declarations.
class AnalysisUsage;
class GStore;
class LegalizerInfo;
class MachineBasicBlock;
class MachineInstr;
class TargetLowering;
struct LegalityQuery;
class MachineRegisterInfo;
namespace GISelAddressing {
/// Helper struct to store a base, index and offset that forms an address
struct BaseIndexOffset {
  Register BaseReg;
  Register IndexReg;
  int64_t Offset = 0;
  bool IsIndexSignExt = false;
};

/// Returns a BaseIndexOffset which describes the pointer in \p Ptr.
BaseIndexOffset getPointerInfo(Register Ptr, MachineRegisterInfo &MRI);

/// Compute whether or not a memory access at \p MI1 aliases with an access at
/// \p MI2 \returns true if either alias/no-alias is known. Sets \p IsAlias
/// accordingly.
bool aliasIsKnownForLoadStore(const MachineInstr &MI1, const MachineInstr &MI2,
                              bool &IsAlias, MachineRegisterInfo &MRI);

/// Returns true if the instruction \p MI may alias \p Other.
/// This function uses multiple strategies to detect aliasing, whereas
/// aliasIsKnownForLoadStore just looks at the addresses of load/stores and is
/// tries to reason about base/index/offsets.
bool instMayAlias(const MachineInstr &MI, const MachineInstr &Other,
                  MachineRegisterInfo &MRI, AliasAnalysis *AA);
} // namespace GISelAddressing

using namespace GISelAddressing;

class LoadStoreOpt : public MachineFunctionPass {
public:
  static char ID;

private:
  /// An input function to decide if the pass should run or not
  /// on the given MachineFunction.
  std::function<bool(const MachineFunction &)> DoNotRunPass;

  MachineRegisterInfo *MRI = nullptr;
  const TargetLowering *TLI = nullptr;
  MachineFunction *MF = nullptr;
  AliasAnalysis *AA = nullptr;
  const LegalizerInfo *LI = nullptr;

  MachineIRBuilder Builder;

  /// Initialize the field members using \p MF.
  void init(MachineFunction &MF);

  class StoreMergeCandidate {
  public:
    // The base pointer used as the base for all stores in this candidate.
    Register BasePtr;
    // Our algorithm is very simple at the moment. We assume that in instruction
    // order stores are writing to incremeneting consecutive addresses. So when
    // we walk the block in reverse order, the next eligible store must write to
    // an offset one store width lower than CurrentLowestOffset.
    uint64_t CurrentLowestOffset;
    SmallVector<GStore *> Stores;
    // A vector of MachineInstr/unsigned pairs to denote potential aliases that
    // need to be checked before the candidate is considered safe to merge. The
    // unsigned value is an index into the Stores vector. The indexed store is
    // the highest-indexed store that has already been checked to not have an
    // alias with the instruction. We record this so we don't have to repeat
    // alias checks that have been already done, only those with stores added
    // after the potential alias is recorded.
    SmallVector<std::pair<MachineInstr *, unsigned>> PotentialAliases;

    void addPotentialAlias(MachineInstr &MI);

    /// Reset this candidate back to an empty one.
    void reset() {
      Stores.clear();
      PotentialAliases.clear();
      CurrentLowestOffset = 0;
      BasePtr = Register();
    }
  };

  bool isLegalOrBeforeLegalizer(const LegalityQuery &Query,
                                MachineFunction &MF) const;
  /// If the given store is valid to be a member of the candidate, add it and
  /// return true. Otherwise, returns false.
  bool addStoreToCandidate(GStore &MI, StoreMergeCandidate &C);
  /// Returns true if the instruction \p MI would potentially alias with any
  /// stores in the candidate \p C.
  bool operationAliasesWithCandidate(MachineInstr &MI, StoreMergeCandidate &C);
  /// Merges the stores in the given vector into a wide store.
  /// \p returns true if at least some of the stores were merged.
  /// This may decide not to merge stores if heuristics predict it will not be
  /// worth it.
  bool mergeStores(SmallVectorImpl<GStore *> &StoresToMerge);
  /// Perform a merge of all the stores in \p Stores into a single store.
  /// Erases the old stores from the block when finished.
  /// \returns true if merging was done. It may fail to perform a merge if
  /// there are issues with materializing legal wide values.
  bool doSingleStoreMerge(SmallVectorImpl<GStore *> &Stores);
  bool processMergeCandidate(StoreMergeCandidate &C);
  bool mergeBlockStores(MachineBasicBlock &MBB);
  bool mergeFunctionStores(MachineFunction &MF);

  bool mergeTruncStore(GStore &StoreMI,
                       SmallPtrSetImpl<GStore *> &DeletedStores);
  bool mergeTruncStoresBlock(MachineBasicBlock &MBB);

  /// Initialize some target-specific data structures for the store merging
  /// optimization. \p AddrSpace indicates which address space to use when
  /// probing the legalizer info for legal stores.
  void initializeStoreMergeTargetInfo(unsigned AddrSpace = 0);
  /// A map between address space numbers and a bitvector of supported stores
  /// sizes. Each bit in the bitvector represents whether a store size of
  /// that bit's value is legal. E.g. if bit 64 is set, then 64 bit scalar
  /// stores are legal.
  DenseMap<unsigned, BitVector> LegalStoreSizes;
  bool IsPreLegalizer = false;
  /// Contains instructions to be erased at the end of a block scan.
  SmallSet<MachineInstr *, 16> InstsToErase;

public:
  LoadStoreOpt();
  LoadStoreOpt(std::function<bool(const MachineFunction &)>);

  StringRef getPassName() const override { return "LoadStoreOpt"; }

  MachineFunctionProperties getRequiredProperties() const override {
    return MachineFunctionProperties()
        .set(MachineFunctionProperties::Property::IsSSA);
  }

  void getAnalysisUsage(AnalysisUsage &AU) const override;

  bool runOnMachineFunction(MachineFunction &MF) override;
};

} // End namespace llvm.

#endif
PKiwFZ}-�fvfv!CodeGen/GlobalISel/IRTranslator.hnu�[���//===- llvm/CodeGen/GlobalISel/IRTranslator.h - IRTranslator ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// This file declares the IRTranslator pass.
/// This pass is responsible for translating LLVM IR into MachineInstr.
/// It uses target hooks to lower the ABI but aside from that, the pass
/// generated code is generic. This is the default translator used for
/// GlobalISel.
///
/// \todo Replace the comments with actual doxygen comments.
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_GLOBALISEL_IRTRANSLATOR_H
#define LLVM_CODEGEN_GLOBALISEL_IRTRANSLATOR_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/CodeGenCommonISel.h"
#include "llvm/CodeGen/FunctionLoweringInfo.h"
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/SwiftErrorValueTracking.h"
#include "llvm/CodeGen/SwitchLoweringUtils.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/CodeGen.h"
#include <memory>
#include <utility>

namespace llvm {

class AllocaInst;
class AssumptionCache;
class BasicBlock;
class CallInst;
class CallLowering;
class Constant;
class ConstrainedFPIntrinsic;
class DataLayout;
class DbgDeclareInst;
class DbgValueInst;
class Instruction;
class MachineBasicBlock;
class MachineFunction;
class MachineInstr;
class MachineRegisterInfo;
class OptimizationRemarkEmitter;
class PHINode;
class TargetLibraryInfo;
class TargetPassConfig;
class User;
class Value;

// Technically the pass should run on an hypothetical MachineModule,
// since it should translate Global into some sort of MachineGlobal.
// The MachineGlobal should ultimately just be a transfer of ownership of
// the interesting bits that are relevant to represent a global value.
// That being said, we could investigate what would it cost to just duplicate
// the information from the LLVM IR.
// The idea is that ultimately we would be able to free up the memory used
// by the LLVM IR as soon as the translation is over.
class IRTranslator : public MachineFunctionPass {
public:
  static char ID;

private:
  /// Interface used to lower the everything related to calls.
  const CallLowering *CLI = nullptr;

  /// This class contains the mapping between the Values to vreg related data.
  class ValueToVRegInfo {
  public:
    ValueToVRegInfo() = default;

    using VRegListT = SmallVector<Register, 1>;
    using OffsetListT = SmallVector<uint64_t, 1>;

    using const_vreg_iterator =
        DenseMap<const Value *, VRegListT *>::const_iterator;
    using const_offset_iterator =
        DenseMap<const Value *, OffsetListT *>::const_iterator;

    inline const_vreg_iterator vregs_end() const { return ValToVRegs.end(); }

    VRegListT *getVRegs(const Value &V) {
      auto It = ValToVRegs.find(&V);
      if (It != ValToVRegs.end())
        return It->second;

      return insertVRegs(V);
    }

    OffsetListT *getOffsets(const Value &V) {
      auto It = TypeToOffsets.find(V.getType());
      if (It != TypeToOffsets.end())
        return It->second;

      return insertOffsets(V);
    }

    const_vreg_iterator findVRegs(const Value &V) const {
      return ValToVRegs.find(&V);
    }

    bool contains(const Value &V) const { return ValToVRegs.contains(&V); }

    void reset() {
      ValToVRegs.clear();
      TypeToOffsets.clear();
      VRegAlloc.DestroyAll();
      OffsetAlloc.DestroyAll();
    }

  private:
    VRegListT *insertVRegs(const Value &V) {
      assert(!ValToVRegs.contains(&V) && "Value already exists");

      // We placement new using our fast allocator since we never try to free
      // the vectors until translation is finished.
      auto *VRegList = new (VRegAlloc.Allocate()) VRegListT();
      ValToVRegs[&V] = VRegList;
      return VRegList;
    }

    OffsetListT *insertOffsets(const Value &V) {
      assert(!TypeToOffsets.contains(V.getType()) && "Type already exists");

      auto *OffsetList = new (OffsetAlloc.Allocate()) OffsetListT();
      TypeToOffsets[V.getType()] = OffsetList;
      return OffsetList;
    }
    SpecificBumpPtrAllocator<VRegListT> VRegAlloc;
    SpecificBumpPtrAllocator<OffsetListT> OffsetAlloc;

    // We store pointers to vectors here since references may be invalidated
    // while we hold them if we stored the vectors directly.
    DenseMap<const Value *, VRegListT*> ValToVRegs;
    DenseMap<const Type *, OffsetListT*> TypeToOffsets;
  };

  /// Mapping of the values of the current LLVM IR function to the related
  /// virtual registers and offsets.
  ValueToVRegInfo VMap;

  // N.b. it's not completely obvious that this will be sufficient for every
  // LLVM IR construct (with "invoke" being the obvious candidate to mess up our
  // lives.
  DenseMap<const BasicBlock *, MachineBasicBlock *> BBToMBB;

  // One BasicBlock can be translated to multiple MachineBasicBlocks.  For such
  // BasicBlocks translated to multiple MachineBasicBlocks, MachinePreds retains
  // a mapping between the edges arriving at the BasicBlock to the corresponding
  // created MachineBasicBlocks. Some BasicBlocks that get translated to a
  // single MachineBasicBlock may also end up in this Map.
  using CFGEdge = std::pair<const BasicBlock *, const BasicBlock *>;
  DenseMap<CFGEdge, SmallVector<MachineBasicBlock *, 1>> MachinePreds;

  // List of stubbed PHI instructions, for values and basic blocks to be filled
  // in once all MachineBasicBlocks have been created.
  SmallVector<std::pair<const PHINode *, SmallVector<MachineInstr *, 1>>, 4>
      PendingPHIs;

  /// Record of what frame index has been allocated to specified allocas for
  /// this function.
  DenseMap<const AllocaInst *, int> FrameIndices;

  SwiftErrorValueTracking SwiftError;

  /// \name Methods for translating form LLVM IR to MachineInstr.
  /// \see ::translate for general information on the translate methods.
  /// @{

  /// Translate \p Inst into its corresponding MachineInstr instruction(s).
  /// Insert the newly translated instruction(s) right where the CurBuilder
  /// is set.
  ///
  /// The general algorithm is:
  /// 1. Look for a virtual register for each operand or
  ///    create one.
  /// 2 Update the VMap accordingly.
  /// 2.alt. For constant arguments, if they are compile time constants,
  ///   produce an immediate in the right operand and do not touch
  ///   ValToReg. Actually we will go with a virtual register for each
  ///   constants because it may be expensive to actually materialize the
  ///   constant. Moreover, if the constant spans on several instructions,
  ///   CSE may not catch them.
  ///   => Update ValToVReg and remember that we saw a constant in Constants.
  ///   We will materialize all the constants in finalize.
  /// Note: we would need to do something so that we can recognize such operand
  ///       as constants.
  /// 3. Create the generic instruction.
  ///
  /// \return true if the translation succeeded.
  bool translate(const Instruction &Inst);

  /// Materialize \p C into virtual-register \p Reg. The generic instructions
  /// performing this materialization will be inserted into the entry block of
  /// the function.
  ///
  /// \return true if the materialization succeeded.
  bool translate(const Constant &C, Register Reg);

  // Translate U as a copy of V.
  bool translateCopy(const User &U, const Value &V,
                     MachineIRBuilder &MIRBuilder);

  /// Translate an LLVM bitcast into generic IR. Either a COPY or a G_BITCAST is
  /// emitted.
  bool translateBitCast(const User &U, MachineIRBuilder &MIRBuilder);

  /// Translate an LLVM load instruction into generic IR.
  bool translateLoad(const User &U, MachineIRBuilder &MIRBuilder);

  /// Translate an LLVM store instruction into generic IR.
  bool translateStore(const User &U, MachineIRBuilder &MIRBuilder);

  /// Translate an LLVM string intrinsic (memcpy, memset, ...).
  bool translateMemFunc(const CallInst &CI, MachineIRBuilder &MIRBuilder,
                        unsigned Opcode);

  void getStackGuard(Register DstReg, MachineIRBuilder &MIRBuilder);

  bool translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
                                  MachineIRBuilder &MIRBuilder);
  bool translateFixedPointIntrinsic(unsigned Op, const CallInst &CI,
                                    MachineIRBuilder &MIRBuilder);

  /// Helper function for translateSimpleIntrinsic.
  /// \return The generic opcode for \p IntrinsicID if \p IntrinsicID is a
  /// simple intrinsic (ceil, fabs, etc.). Otherwise, returns
  /// Intrinsic::not_intrinsic.
  unsigned getSimpleIntrinsicOpcode(Intrinsic::ID ID);

  /// Translates the intrinsics defined in getSimpleIntrinsicOpcode.
  /// \return true if the translation succeeded.
  bool translateSimpleIntrinsic(const CallInst &CI, Intrinsic::ID ID,
                                MachineIRBuilder &MIRBuilder);

  bool translateConstrainedFPIntrinsic(const ConstrainedFPIntrinsic &FPI,
                                       MachineIRBuilder &MIRBuilder);

  bool translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
                               MachineIRBuilder &MIRBuilder);

  /// Returns the single livein physical register Arg was lowered to, if
  /// possible.
  std::optional<MCRegister> getArgPhysReg(Argument &Arg);

  /// If DebugInst targets an Argument and its expression is an EntryValue,
  /// lower it as an entry in the MF debug table.
  bool translateIfEntryValueArgument(const DbgDeclareInst &DebugInst);

  /// If DebugInst targets an Argument and its expression is an EntryValue,
  /// lower as a DBG_VALUE targeting the corresponding livein register for that
  /// Argument.
  bool translateIfEntryValueArgument(const DbgValueInst &DebugInst,
                                     MachineIRBuilder &MIRBuilder);

  bool translateInlineAsm(const CallBase &CB, MachineIRBuilder &MIRBuilder);

  /// Common code for translating normal calls or invokes.
  bool translateCallBase(const CallBase &CB, MachineIRBuilder &MIRBuilder);

  /// Translate call instruction.
  /// \pre \p U is a call instruction.
  bool translateCall(const User &U, MachineIRBuilder &MIRBuilder);

  /// When an invoke or a cleanupret unwinds to the next EH pad, there are
  /// many places it could ultimately go. In the IR, we have a single unwind
  /// destination, but in the machine CFG, we enumerate all the possible blocks.
  /// This function skips over imaginary basic blocks that hold catchswitch
  /// instructions, and finds all the "real" machine
  /// basic block destinations. As those destinations may not be successors of
  /// EHPadBB, here we also calculate the edge probability to those
  /// destinations. The passed-in Prob is the edge probability to EHPadBB.
  bool findUnwindDestinations(
      const BasicBlock *EHPadBB, BranchProbability Prob,
      SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
          &UnwindDests);

  bool translateInvoke(const User &U, MachineIRBuilder &MIRBuilder);

  bool translateCallBr(const User &U, MachineIRBuilder &MIRBuilder);

  bool translateLandingPad(const User &U, MachineIRBuilder &MIRBuilder);

  /// Translate one of LLVM's cast instructions into MachineInstrs, with the
  /// given generic Opcode.
  bool translateCast(unsigned Opcode, const User &U,
                     MachineIRBuilder &MIRBuilder);

  /// Translate a phi instruction.
  bool translatePHI(const User &U, MachineIRBuilder &MIRBuilder);

  /// Translate a comparison (icmp or fcmp) instruction or constant.
  bool translateCompare(const User &U, MachineIRBuilder &MIRBuilder);

  /// Translate an integer compare instruction (or constant).
  bool translateICmp(const User &U, MachineIRBuilder &MIRBuilder) {
    return translateCompare(U, MIRBuilder);
  }

  /// Translate a floating-point compare instruction (or constant).
  bool translateFCmp(const User &U, MachineIRBuilder &MIRBuilder) {
    return translateCompare(U, MIRBuilder);
  }

  /// Add remaining operands onto phis we've translated. Executed after all
  /// MachineBasicBlocks for the function have been created.
  void finishPendingPhis();

  /// Translate \p Inst into a unary operation \p Opcode.
  /// \pre \p U is a unary operation.
  bool translateUnaryOp(unsigned Opcode, const User &U,
                        MachineIRBuilder &MIRBuilder);

  /// Translate \p Inst into a binary operation \p Opcode.
  /// \pre \p U is a binary operation.
  bool translateBinaryOp(unsigned Opcode, const User &U,
                         MachineIRBuilder &MIRBuilder);

  /// If the set of cases should be emitted as a series of branches, return
  /// true. If we should emit this as a bunch of and/or'd together conditions,
  /// return false.
  bool shouldEmitAsBranches(const std::vector<SwitchCG::CaseBlock> &Cases);
  /// Helper method for findMergedConditions.
  /// This function emits a branch and is used at the leaves of an OR or an
  /// AND operator tree.
  void emitBranchForMergedCondition(const Value *Cond, MachineBasicBlock *TBB,
                                    MachineBasicBlock *FBB,
                                    MachineBasicBlock *CurBB,
                                    MachineBasicBlock *SwitchBB,
                                    BranchProbability TProb,
                                    BranchProbability FProb, bool InvertCond);
  /// Used during condbr translation to find trees of conditions that can be
  /// optimized.
  void findMergedConditions(const Value *Cond, MachineBasicBlock *TBB,
                            MachineBasicBlock *FBB, MachineBasicBlock *CurBB,
                            MachineBasicBlock *SwitchBB,
                            Instruction::BinaryOps Opc, BranchProbability TProb,
                            BranchProbability FProb, bool InvertCond);

  /// Translate branch (br) instruction.
  /// \pre \p U is a branch instruction.
  bool translateBr(const User &U, MachineIRBuilder &MIRBuilder);

  // Begin switch lowering functions.
  bool emitJumpTableHeader(SwitchCG::JumpTable &JT,
                           SwitchCG::JumpTableHeader &JTH,
                           MachineBasicBlock *HeaderBB);
  void emitJumpTable(SwitchCG::JumpTable &JT, MachineBasicBlock *MBB);

  void emitSwitchCase(SwitchCG::CaseBlock &CB, MachineBasicBlock *SwitchBB,
                      MachineIRBuilder &MIB);

  /// Generate for for the BitTest header block, which precedes each sequence of
  /// BitTestCases.
  void emitBitTestHeader(SwitchCG::BitTestBlock &BTB,
                         MachineBasicBlock *SwitchMBB);
  /// Generate code to produces one "bit test" for a given BitTestCase \p B.
  void emitBitTestCase(SwitchCG::BitTestBlock &BB, MachineBasicBlock *NextMBB,
                       BranchProbability BranchProbToNext, Register Reg,
                       SwitchCG::BitTestCase &B, MachineBasicBlock *SwitchBB);

  bool lowerJumpTableWorkItem(
      SwitchCG::SwitchWorkListItem W, MachineBasicBlock *SwitchMBB,
      MachineBasicBlock *CurMBB, MachineBasicBlock *DefaultMBB,
      MachineIRBuilder &MIB, MachineFunction::iterator BBI,
      BranchProbability UnhandledProbs, SwitchCG::CaseClusterIt I,
      MachineBasicBlock *Fallthrough, bool FallthroughUnreachable);

  bool lowerSwitchRangeWorkItem(SwitchCG::CaseClusterIt I, Value *Cond,
                                MachineBasicBlock *Fallthrough,
                                bool FallthroughUnreachable,
                                BranchProbability UnhandledProbs,
                                MachineBasicBlock *CurMBB,
                                MachineIRBuilder &MIB,
                                MachineBasicBlock *SwitchMBB);

  bool lowerBitTestWorkItem(
      SwitchCG::SwitchWorkListItem W, MachineBasicBlock *SwitchMBB,
      MachineBasicBlock *CurMBB, MachineBasicBlock *DefaultMBB,
      MachineIRBuilder &MIB, MachineFunction::iterator BBI,
      BranchProbability DefaultProb, BranchProbability UnhandledProbs,
      SwitchCG::CaseClusterIt I, MachineBasicBlock *Fallthrough,
      bool FallthroughUnreachable);

  bool lowerSwitchWorkItem(SwitchCG::SwitchWorkListItem W, Value *Cond,
                           MachineBasicBlock *SwitchMBB,
                           MachineBasicBlock *DefaultMBB,
                           MachineIRBuilder &MIB);

  bool translateSwitch(const User &U, MachineIRBuilder &MIRBuilder);
  // End switch lowering section.

  bool translateIndirectBr(const User &U, MachineIRBuilder &MIRBuilder);

  bool translateExtractValue(const User &U, MachineIRBuilder &MIRBuilder);

  bool translateInsertValue(const User &U, MachineIRBuilder &MIRBuilder);

  bool translateSelect(const User &U, MachineIRBuilder &MIRBuilder);

  bool translateGetElementPtr(const User &U, MachineIRBuilder &MIRBuilder);

  bool translateAlloca(const User &U, MachineIRBuilder &MIRBuilder);

  /// Translate return (ret) instruction.
  /// The target needs to implement CallLowering::lowerReturn for
  /// this to succeed.
  /// \pre \p U is a return instruction.
  bool translateRet(const User &U, MachineIRBuilder &MIRBuilder);

  bool translateFNeg(const User &U, MachineIRBuilder &MIRBuilder);

  bool translateAdd(const User &U, MachineIRBuilder &MIRBuilder) {
    return translateBinaryOp(TargetOpcode::G_ADD, U, MIRBuilder);
  }
  bool translateSub(const User &U, MachineIRBuilder &MIRBuilder) {
    return translateBinaryOp(TargetOpcode::G_SUB, U, MIRBuilder);
  }
  bool translateAnd(const User &U, MachineIRBuilder &MIRBuilder) {
    return translateBinaryOp(TargetOpcode::G_AND, U, MIRBuilder);
  }
  bool translateMul(const User &U, MachineIRBuilder &MIRBuilder) {
    return translateBinaryOp(TargetOpcode::G_MUL, U, MIRBuilder);
  }
  bool translateOr(const User &U, MachineIRBuilder &MIRBuilder) {
    return translateBinaryOp(TargetOpcode::G_OR, U, MIRBuilder);
  }
  bool translateXor(const User &U, MachineIRBuilder &MIRBuilder) {
    return translateBinaryOp(TargetOpcode::G_XOR, U, MIRBuilder);
  }

  bool translateUDiv(const User &U, MachineIRBuilder &MIRBuilder) {
    return translateBinaryOp(TargetOpcode::G_UDIV, U, MIRBuilder);
  }
  bool translateSDiv(const User &U, MachineIRBuilder &MIRBuilder) {
    return translateBinaryOp(TargetOpcode::G_SDIV, U, MIRBuilder);
  }
  bool translateURem(const User &U, MachineIRBuilder &MIRBuilder) {
    return translateBinaryOp(TargetOpcode::G_UREM, U, MIRBuilder);
  }
  bool translateSRem(const User &U, MachineIRBuilder &MIRBuilder) {
    return translateBinaryOp(TargetOpcode::G_SREM, U, MIRBuilder);
  }
  bool translateIntToPtr(const User &U, MachineIRBuilder &MIRBuilder) {
    return translateCast(TargetOpcode::G_INTTOPTR, U, MIRBuilder);
  }
  bool translatePtrToInt(const User &U, MachineIRBuilder &MIRBuilder) {
    return translateCast(TargetOpcode::G_PTRTOINT, U, MIRBuilder);
  }
  bool translateTrunc(const User &U, MachineIRBuilder &MIRBuilder) {
    return translateCast(TargetOpcode::G_TRUNC, U, MIRBuilder);
  }
  bool translateFPTrunc(const User &U, MachineIRBuilder &MIRBuilder) {
    return translateCast(TargetOpcode::G_FPTRUNC, U, MIRBuilder);
  }
  bool translateFPExt(const User &U, MachineIRBuilder &MIRBuilder) {
    return translateCast(TargetOpcode::G_FPEXT, U, MIRBuilder);
  }
  bool translateFPToUI(const User &U, MachineIRBuilder &MIRBuilder) {
    return translateCast(TargetOpcode::G_FPTOUI, U, MIRBuilder);
  }
  bool translateFPToSI(const User &U, MachineIRBuilder &MIRBuilder) {
    return translateCast(TargetOpcode::G_FPTOSI, U, MIRBuilder);
  }
  bool translateUIToFP(const User &U, MachineIRBuilder &MIRBuilder) {
    return translateCast(TargetOpcode::G_UITOFP, U, MIRBuilder);
  }
  bool translateSIToFP(const User &U, MachineIRBuilder &MIRBuilder) {
    return translateCast(TargetOpcode::G_SITOFP, U, MIRBuilder);
  }
  bool translateUnreachable(const User &U, MachineIRBuilder &MIRBuilder);

  bool translateSExt(const User &U, MachineIRBuilder &MIRBuilder) {
    return translateCast(TargetOpcode::G_SEXT, U, MIRBuilder);
  }

  bool translateZExt(const User &U, MachineIRBuilder &MIRBuilder) {
    return translateCast(TargetOpcode::G_ZEXT, U, MIRBuilder);
  }

  bool translateShl(const User &U, MachineIRBuilder &MIRBuilder) {
    return translateBinaryOp(TargetOpcode::G_SHL, U, MIRBuilder);
  }
  bool translateLShr(const User &U, MachineIRBuilder &MIRBuilder) {
    return translateBinaryOp(TargetOpcode::G_LSHR, U, MIRBuilder);
  }
  bool translateAShr(const User &U, MachineIRBuilder &MIRBuilder) {
    return translateBinaryOp(TargetOpcode::G_ASHR, U, MIRBuilder);
  }

  bool translateFAdd(const User &U, MachineIRBuilder &MIRBuilder) {
    return translateBinaryOp(TargetOpcode::G_FADD, U, MIRBuilder);
  }
  bool translateFSub(const User &U, MachineIRBuilder &MIRBuilder) {
    return translateBinaryOp(TargetOpcode::G_FSUB, U, MIRBuilder);
  }
  bool translateFMul(const User &U, MachineIRBuilder &MIRBuilder) {
    return translateBinaryOp(TargetOpcode::G_FMUL, U, MIRBuilder);
  }
  bool translateFDiv(const User &U, MachineIRBuilder &MIRBuilder) {
    return translateBinaryOp(TargetOpcode::G_FDIV, U, MIRBuilder);
  }
  bool translateFRem(const User &U, MachineIRBuilder &MIRBuilder) {
    return translateBinaryOp(TargetOpcode::G_FREM, U, MIRBuilder);
  }

  bool translateVAArg(const User &U, MachineIRBuilder &MIRBuilder);

  bool translateInsertElement(const User &U, MachineIRBuilder &MIRBuilder);

  bool translateExtractElement(const User &U, MachineIRBuilder &MIRBuilder);

  bool translateShuffleVector(const User &U, MachineIRBuilder &MIRBuilder);

  bool translateAtomicCmpXchg(const User &U, MachineIRBuilder &MIRBuilder);
  bool translateAtomicRMW(const User &U, MachineIRBuilder &MIRBuilder);
  bool translateFence(const User &U, MachineIRBuilder &MIRBuilder);
  bool translateFreeze(const User &U, MachineIRBuilder &MIRBuilder);

  // Stubs to keep the compiler happy while we implement the rest of the
  // translation.
  bool translateResume(const User &U, MachineIRBuilder &MIRBuilder) {
    return false;
  }
  bool translateCleanupRet(const User &U, MachineIRBuilder &MIRBuilder) {
    return false;
  }
  bool translateCatchRet(const User &U, MachineIRBuilder &MIRBuilder) {
    return false;
  }
  bool translateCatchSwitch(const User &U, MachineIRBuilder &MIRBuilder) {
    return false;
  }
  bool translateAddrSpaceCast(const User &U, MachineIRBuilder &MIRBuilder) {
    return translateCast(TargetOpcode::G_ADDRSPACE_CAST, U, MIRBuilder);
  }
  bool translateCleanupPad(const User &U, MachineIRBuilder &MIRBuilder) {
    return false;
  }
  bool translateCatchPad(const User &U, MachineIRBuilder &MIRBuilder) {
    return false;
  }
  bool translateUserOp1(const User &U, MachineIRBuilder &MIRBuilder) {
    return false;
  }
  bool translateUserOp2(const User &U, MachineIRBuilder &MIRBuilder) {
    return false;
  }

  /// @}

  // Builder for machine instruction a la IRBuilder.
  // I.e., compared to regular MIBuilder, this one also inserts the instruction
  // in the current block, it can creates block, etc., basically a kind of
  // IRBuilder, but for Machine IR.
  // CSEMIRBuilder CurBuilder;
  std::unique_ptr<MachineIRBuilder> CurBuilder;

  // Builder set to the entry block (just after ABI lowering instructions). Used
  // as a convenient location for Constants.
  // CSEMIRBuilder EntryBuilder;
  std::unique_ptr<MachineIRBuilder> EntryBuilder;

  // The MachineFunction currently being translated.
  MachineFunction *MF = nullptr;

  /// MachineRegisterInfo used to create virtual registers.
  MachineRegisterInfo *MRI = nullptr;

  const DataLayout *DL = nullptr;

  /// Current target configuration. Controls how the pass handles errors.
  const TargetPassConfig *TPC = nullptr;

  CodeGenOpt::Level OptLevel;

  /// Current optimization remark emitter. Used to report failures.
  std::unique_ptr<OptimizationRemarkEmitter> ORE;

  AAResults *AA = nullptr;
  AssumptionCache *AC = nullptr;
  const TargetLibraryInfo *LibInfo = nullptr;
  FunctionLoweringInfo FuncInfo;

  // True when either the Target Machine specifies no optimizations or the
  // function has the optnone attribute.
  bool EnableOpts = false;

  /// True when the block contains a tail call. This allows the IRTranslator to
  /// stop translating such blocks early.
  bool HasTailCall = false;

  StackProtectorDescriptor SPDescriptor;

  /// Switch analysis and optimization.
  class GISelSwitchLowering : public SwitchCG::SwitchLowering {
  public:
    GISelSwitchLowering(IRTranslator *irt, FunctionLoweringInfo &funcinfo)
        : SwitchLowering(funcinfo), IRT(irt) {
      assert(irt && "irt is null!");
    }

    void addSuccessorWithProb(
        MachineBasicBlock *Src, MachineBasicBlock *Dst,
        BranchProbability Prob = BranchProbability::getUnknown()) override {
      IRT->addSuccessorWithProb(Src, Dst, Prob);
    }

    virtual ~GISelSwitchLowering() = default;

  private:
    IRTranslator *IRT;
  };

  std::unique_ptr<GISelSwitchLowering> SL;

  // * Insert all the code needed to materialize the constants
  // at the proper place. E.g., Entry block or dominator block
  // of each constant depending on how fancy we want to be.
  // * Clear the different maps.
  void finalizeFunction();

  // Processing steps done per block. E.g. emitting jump tables, stack
  // protectors etc. Returns true if no errors, false if there was a problem
  // that caused an abort.
  bool finalizeBasicBlock(const BasicBlock &BB, MachineBasicBlock &MBB);

  /// Codegen a new tail for a stack protector check ParentMBB which has had its
  /// tail spliced into a stack protector check success bb.
  ///
  /// For a high level explanation of how this fits into the stack protector
  /// generation see the comment on the declaration of class
  /// StackProtectorDescriptor.
  ///
  /// \return true if there were no problems.
  bool emitSPDescriptorParent(StackProtectorDescriptor &SPD,
                              MachineBasicBlock *ParentBB);

  /// Codegen the failure basic block for a stack protector check.
  ///
  /// A failure stack protector machine basic block consists simply of a call to
  /// __stack_chk_fail().
  ///
  /// For a high level explanation of how this fits into the stack protector
  /// generation see the comment on the declaration of class
  /// StackProtectorDescriptor.
  ///
  /// \return true if there were no problems.
  bool emitSPDescriptorFailure(StackProtectorDescriptor &SPD,
                               MachineBasicBlock *FailureBB);

  /// Get the VRegs that represent \p Val.
  /// Non-aggregate types have just one corresponding VReg and the list can be
  /// used as a single "unsigned". Aggregates get flattened. If such VRegs do
  /// not exist, they are created.
  ArrayRef<Register> getOrCreateVRegs(const Value &Val);

  Register getOrCreateVReg(const Value &Val) {
    auto Regs = getOrCreateVRegs(Val);
    if (Regs.empty())
      return 0;
    assert(Regs.size() == 1 &&
           "attempt to get single VReg for aggregate or void");
    return Regs[0];
  }

  /// Allocate some vregs and offsets in the VMap. Then populate just the
  /// offsets while leaving the vregs empty.
  ValueToVRegInfo::VRegListT &allocateVRegs(const Value &Val);

  /// Get the frame index that represents \p Val.
  /// If such VReg does not exist, it is created.
  int getOrCreateFrameIndex(const AllocaInst &AI);

  /// Get the alignment of the given memory operation instruction. This will
  /// either be the explicitly specified value or the ABI-required alignment for
  /// the type being accessed (according to the Module's DataLayout).
  Align getMemOpAlign(const Instruction &I);

  /// Get the MachineBasicBlock that represents \p BB. Specifically, the block
  /// returned will be the head of the translated block (suitable for branch
  /// destinations).
  MachineBasicBlock &getMBB(const BasicBlock &BB);

  /// Record \p NewPred as a Machine predecessor to `Edge.second`, corresponding
  /// to `Edge.first` at the IR level. This is used when IRTranslation creates
  /// multiple MachineBasicBlocks for a given IR block and the CFG is no longer
  /// represented simply by the IR-level CFG.
  void addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred);

  /// Returns the Machine IR predecessors for the given IR CFG edge. Usually
  /// this is just the single MachineBasicBlock corresponding to the predecessor
  /// in the IR. More complex lowering can result in multiple MachineBasicBlocks
  /// preceding the original though (e.g. switch instructions).
  SmallVector<MachineBasicBlock *, 1> getMachinePredBBs(CFGEdge Edge) {
    auto RemappedEdge = MachinePreds.find(Edge);
    if (RemappedEdge != MachinePreds.end())
      return RemappedEdge->second;
    return SmallVector<MachineBasicBlock *, 4>(1, &getMBB(*Edge.first));
  }

  /// Return branch probability calculated by BranchProbabilityInfo for IR
  /// blocks.
  BranchProbability getEdgeProbability(const MachineBasicBlock *Src,
                                       const MachineBasicBlock *Dst) const;

  void addSuccessorWithProb(
      MachineBasicBlock *Src, MachineBasicBlock *Dst,
      BranchProbability Prob = BranchProbability::getUnknown());

public:
  IRTranslator(CodeGenOpt::Level OptLevel = CodeGenOpt::None);

  StringRef getPassName() const override { return "IRTranslator"; }

  void getAnalysisUsage(AnalysisUsage &AU) const override;

  // Algo:
  //   CallLowering = MF.subtarget.getCallLowering()
  //   F = MF.getParent()
  //   MIRBuilder.reset(MF)
  //   getMBB(F.getEntryBB())
  //   CallLowering->translateArguments(MIRBuilder, F, ValToVReg)
  //   for each bb in F
  //     getMBB(bb)
  //     for each inst in bb
  //       if (!translate(MIRBuilder, inst, ValToVReg, ConstantToSequence))
  //         report_fatal_error("Don't know how to translate input");
  //   finalize()
  bool runOnMachineFunction(MachineFunction &MF) override;
};

} // end namespace llvm

#endif // LLVM_CODEGEN_GLOBALISEL_IRTRANSLATOR_H
PKiwFZ%'�:�:�"CodeGen/GlobalISel/LegalizerInfo.hnu�[���//===- llvm/CodeGen/GlobalISel/LegalizerInfo.h ------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// Interface for Targets to specify which operations they can successfully
/// select and how the others should be expanded most efficiently.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_GLOBALISEL_LEGALIZERINFO_H
#define LLVM_CODEGEN_GLOBALISEL_LEGALIZERINFO_H

#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/GlobalISel/LegacyLegalizerInfo.h"
#include "llvm/CodeGen/LowLevelType.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/TargetOpcodes.h"
#include "llvm/MC/MCInstrDesc.h"
#include "llvm/Support/AtomicOrdering.h"
#include "llvm/Support/CommandLine.h"
#include <cassert>
#include <cstdint>
#include <tuple>
#include <utility>

namespace llvm {

extern cl::opt<bool> DisableGISelLegalityCheck;

class MachineFunction;
class raw_ostream;
class LegalizerHelper;
class MachineInstr;
class MachineRegisterInfo;
class MCInstrInfo;

namespace LegalizeActions {
enum LegalizeAction : std::uint8_t {
  /// The operation is expected to be selectable directly by the target, and
  /// no transformation is necessary.
  Legal,

  /// The operation should be synthesized from multiple instructions acting on
  /// a narrower scalar base-type. For example a 64-bit add might be
  /// implemented in terms of 32-bit add-with-carry.
  NarrowScalar,

  /// The operation should be implemented in terms of a wider scalar
  /// base-type. For example a <2 x s8> add could be implemented as a <2
  /// x s32> add (ignoring the high bits).
  WidenScalar,

  /// The (vector) operation should be implemented by splitting it into
  /// sub-vectors where the operation is legal. For example a <8 x s64> add
  /// might be implemented as 4 separate <2 x s64> adds. There can be a leftover
  /// if there are not enough elements for last sub-vector e.g. <7 x s64> add
  /// will be implemented as 3 separate <2 x s64> adds and one s64 add. Leftover
  /// types can be avoided by doing MoreElements first.
  FewerElements,

  /// The (vector) operation should be implemented by widening the input
  /// vector and ignoring the lanes added by doing so. For example <2 x i8> is
  /// rarely legal, but you might perform an <8 x i8> and then only look at
  /// the first two results.
  MoreElements,

  /// Perform the operation on a different, but equivalently sized type.
  Bitcast,

  /// The operation itself must be expressed in terms of simpler actions on
  /// this target. E.g. a SREM replaced by an SDIV and subtraction.
  Lower,

  /// The operation should be implemented as a call to some kind of runtime
  /// support library. For example this usually happens on machines that don't
  /// support floating-point operations natively.
  Libcall,

  /// The target wants to do something special with this combination of
  /// operand and type. A callback will be issued when it is needed.
  Custom,

  /// This operation is completely unsupported on the target. A programming
  /// error has occurred.
  Unsupported,

  /// Sentinel value for when no action was found in the specified table.
  NotFound,

  /// Fall back onto the old rules.
  /// TODO: Remove this once we've migrated
  UseLegacyRules,
};
} // end namespace LegalizeActions
raw_ostream &operator<<(raw_ostream &OS, LegalizeActions::LegalizeAction Action);

using LegalizeActions::LegalizeAction;

/// The LegalityQuery object bundles together all the information that's needed
/// to decide whether a given operation is legal or not.
/// For efficiency, it doesn't make a copy of Types so care must be taken not
/// to free it before using the query.
struct LegalityQuery {
  unsigned Opcode;
  ArrayRef<LLT> Types;

  struct MemDesc {
    LLT MemoryTy;
    uint64_t AlignInBits;
    AtomicOrdering Ordering;

    MemDesc() = default;
    MemDesc(LLT MemoryTy, uint64_t AlignInBits, AtomicOrdering Ordering)
        : MemoryTy(MemoryTy), AlignInBits(AlignInBits), Ordering(Ordering) {}
    MemDesc(const MachineMemOperand &MMO)
        : MemoryTy(MMO.getMemoryType()),
          AlignInBits(MMO.getAlign().value() * 8),
          Ordering(MMO.getSuccessOrdering()) {}
  };

  /// Operations which require memory can use this to place requirements on the
  /// memory type for each MMO.
  ArrayRef<MemDesc> MMODescrs;

  constexpr LegalityQuery(unsigned Opcode, const ArrayRef<LLT> Types,
                          const ArrayRef<MemDesc> MMODescrs)
      : Opcode(Opcode), Types(Types), MMODescrs(MMODescrs) {}
  constexpr LegalityQuery(unsigned Opcode, const ArrayRef<LLT> Types)
      : LegalityQuery(Opcode, Types, {}) {}

  raw_ostream &print(raw_ostream &OS) const;
};

/// The result of a query. It either indicates a final answer of Legal or
/// Unsupported or describes an action that must be taken to make an operation
/// more legal.
struct LegalizeActionStep {
  /// The action to take or the final answer.
  LegalizeAction Action;
  /// If describing an action, the type index to change. Otherwise zero.
  unsigned TypeIdx;
  /// If describing an action, the new type for TypeIdx. Otherwise LLT{}.
  LLT NewType;

  LegalizeActionStep(LegalizeAction Action, unsigned TypeIdx,
                     const LLT NewType)
      : Action(Action), TypeIdx(TypeIdx), NewType(NewType) {}

  LegalizeActionStep(LegacyLegalizeActionStep Step)
      : TypeIdx(Step.TypeIdx), NewType(Step.NewType) {
    switch (Step.Action) {
    case LegacyLegalizeActions::Legal:
      Action = LegalizeActions::Legal;
      break;
    case LegacyLegalizeActions::NarrowScalar:
      Action = LegalizeActions::NarrowScalar;
      break;
    case LegacyLegalizeActions::WidenScalar:
      Action = LegalizeActions::WidenScalar;
      break;
    case LegacyLegalizeActions::FewerElements:
      Action = LegalizeActions::FewerElements;
      break;
    case LegacyLegalizeActions::MoreElements:
      Action = LegalizeActions::MoreElements;
      break;
    case LegacyLegalizeActions::Bitcast:
      Action = LegalizeActions::Bitcast;
      break;
    case LegacyLegalizeActions::Lower:
      Action = LegalizeActions::Lower;
      break;
    case LegacyLegalizeActions::Libcall:
      Action = LegalizeActions::Libcall;
      break;
    case LegacyLegalizeActions::Custom:
      Action = LegalizeActions::Custom;
      break;
    case LegacyLegalizeActions::Unsupported:
      Action = LegalizeActions::Unsupported;
      break;
    case LegacyLegalizeActions::NotFound:
      Action = LegalizeActions::NotFound;
      break;
    }
  }

  bool operator==(const LegalizeActionStep &RHS) const {
    return std::tie(Action, TypeIdx, NewType) ==
        std::tie(RHS.Action, RHS.TypeIdx, RHS.NewType);
  }
};

using LegalityPredicate = std::function<bool (const LegalityQuery &)>;
using LegalizeMutation =
    std::function<std::pair<unsigned, LLT>(const LegalityQuery &)>;

namespace LegalityPredicates {
struct TypePairAndMemDesc {
  LLT Type0;
  LLT Type1;
  LLT MemTy;
  uint64_t Align;

  bool operator==(const TypePairAndMemDesc &Other) const {
    return Type0 == Other.Type0 && Type1 == Other.Type1 &&
           Align == Other.Align && MemTy == Other.MemTy;
  }

  /// \returns true if this memory access is legal with for the access described
  /// by \p Other (The alignment is sufficient for the size and result type).
  bool isCompatible(const TypePairAndMemDesc &Other) const {
    return Type0 == Other.Type0 && Type1 == Other.Type1 &&
           Align >= Other.Align &&
           // FIXME: This perhaps should be stricter, but the current legality
           // rules are written only considering the size.
           MemTy.getSizeInBits() == Other.MemTy.getSizeInBits();
  }
};

/// True iff P0 and P1 are true.
template<typename Predicate>
Predicate all(Predicate P0, Predicate P1) {
  return [=](const LegalityQuery &Query) {
    return P0(Query) && P1(Query);
  };
}
/// True iff all given predicates are true.
template<typename Predicate, typename... Args>
Predicate all(Predicate P0, Predicate P1, Args... args) {
  return all(all(P0, P1), args...);
}

/// True iff P0 or P1 are true.
template<typename Predicate>
Predicate any(Predicate P0, Predicate P1) {
  return [=](const LegalityQuery &Query) {
    return P0(Query) || P1(Query);
  };
}
/// True iff any given predicates are true.
template<typename Predicate, typename... Args>
Predicate any(Predicate P0, Predicate P1, Args... args) {
  return any(any(P0, P1), args...);
}

/// True iff the given type index is the specified type.
LegalityPredicate typeIs(unsigned TypeIdx, LLT TypesInit);
/// True iff the given type index is one of the specified types.
LegalityPredicate typeInSet(unsigned TypeIdx,
                            std::initializer_list<LLT> TypesInit);

/// True iff the given type index is not the specified type.
inline LegalityPredicate typeIsNot(unsigned TypeIdx, LLT Type) {
  return [=](const LegalityQuery &Query) {
           return Query.Types[TypeIdx] != Type;
         };
}

/// True iff the given types for the given pair of type indexes is one of the
/// specified type pairs.
LegalityPredicate
typePairInSet(unsigned TypeIdx0, unsigned TypeIdx1,
              std::initializer_list<std::pair<LLT, LLT>> TypesInit);
/// True iff the given types for the given pair of type indexes is one of the
/// specified type pairs.
LegalityPredicate typePairAndMemDescInSet(
    unsigned TypeIdx0, unsigned TypeIdx1, unsigned MMOIdx,
    std::initializer_list<TypePairAndMemDesc> TypesAndMemDescInit);
/// True iff the specified type index is a scalar.
LegalityPredicate isScalar(unsigned TypeIdx);
/// True iff the specified type index is a vector.
LegalityPredicate isVector(unsigned TypeIdx);
/// True iff the specified type index is a pointer (with any address space).
LegalityPredicate isPointer(unsigned TypeIdx);
/// True iff the specified type index is a pointer with the specified address
/// space.
LegalityPredicate isPointer(unsigned TypeIdx, unsigned AddrSpace);

/// True if the type index is a vector with element type \p EltTy
LegalityPredicate elementTypeIs(unsigned TypeIdx, LLT EltTy);

/// True iff the specified type index is a scalar that's narrower than the given
/// size.
LegalityPredicate scalarNarrowerThan(unsigned TypeIdx, unsigned Size);

/// True iff the specified type index is a scalar that's wider than the given
/// size.
LegalityPredicate scalarWiderThan(unsigned TypeIdx, unsigned Size);

/// True iff the specified type index is a scalar or vector with an element type
/// that's narrower than the given size.
LegalityPredicate scalarOrEltNarrowerThan(unsigned TypeIdx, unsigned Size);

/// True iff the specified type index is a scalar or a vector with an element
/// type that's wider than the given size.
LegalityPredicate scalarOrEltWiderThan(unsigned TypeIdx, unsigned Size);

/// True iff the specified type index is a scalar whose size is not a multiple
/// of Size.
LegalityPredicate sizeNotMultipleOf(unsigned TypeIdx, unsigned Size);

/// True iff the specified type index is a scalar whose size is not a power of
/// 2.
LegalityPredicate sizeNotPow2(unsigned TypeIdx);

/// True iff the specified type index is a scalar or vector whose element size
/// is not a power of 2.
LegalityPredicate scalarOrEltSizeNotPow2(unsigned TypeIdx);

/// True if the total bitwidth of the specified type index is \p Size bits.
LegalityPredicate sizeIs(unsigned TypeIdx, unsigned Size);

/// True iff the specified type indices are both the same bit size.
LegalityPredicate sameSize(unsigned TypeIdx0, unsigned TypeIdx1);

/// True iff the first type index has a larger total bit size than second type
/// index.
LegalityPredicate largerThan(unsigned TypeIdx0, unsigned TypeIdx1);

/// True iff the first type index has a smaller total bit size than second type
/// index.
LegalityPredicate smallerThan(unsigned TypeIdx0, unsigned TypeIdx1);

/// True iff the specified MMO index has a size (rounded to bytes) that is not a
/// power of 2.
LegalityPredicate memSizeInBytesNotPow2(unsigned MMOIdx);

/// True iff the specified MMO index has a size that is not an even byte size,
/// or that even byte size is not a power of 2.
LegalityPredicate memSizeNotByteSizePow2(unsigned MMOIdx);

/// True iff the specified type index is a vector whose element count is not a
/// power of 2.
LegalityPredicate numElementsNotPow2(unsigned TypeIdx);
/// True iff the specified MMO index has at an atomic ordering of at Ordering or
/// stronger.
LegalityPredicate atomicOrderingAtLeastOrStrongerThan(unsigned MMOIdx,
                                                      AtomicOrdering Ordering);
} // end namespace LegalityPredicates

namespace LegalizeMutations {
/// Select this specific type for the given type index.
LegalizeMutation changeTo(unsigned TypeIdx, LLT Ty);

/// Keep the same type as the given type index.
LegalizeMutation changeTo(unsigned TypeIdx, unsigned FromTypeIdx);

/// Keep the same scalar or element type as the given type index.
LegalizeMutation changeElementTo(unsigned TypeIdx, unsigned FromTypeIdx);

/// Keep the same scalar or element type as the given type.
LegalizeMutation changeElementTo(unsigned TypeIdx, LLT Ty);

/// Keep the same scalar or element type as \p TypeIdx, but take the number of
/// elements from \p FromTypeIdx.
LegalizeMutation changeElementCountTo(unsigned TypeIdx, unsigned FromTypeIdx);

/// Keep the same scalar or element type as \p TypeIdx, but take the number of
/// elements from \p Ty.
LegalizeMutation changeElementCountTo(unsigned TypeIdx, LLT Ty);

/// Change the scalar size or element size to have the same scalar size as type
/// index \p FromIndex. Unlike changeElementTo, this discards pointer types and
/// only changes the size.
LegalizeMutation changeElementSizeTo(unsigned TypeIdx, unsigned FromTypeIdx);

/// Widen the scalar type or vector element type for the given type index to the
/// next power of 2.
LegalizeMutation widenScalarOrEltToNextPow2(unsigned TypeIdx, unsigned Min = 0);

/// Widen the scalar type or vector element type for the given type index to
/// next multiple of \p Size.
LegalizeMutation widenScalarOrEltToNextMultipleOf(unsigned TypeIdx,
                                                  unsigned Size);

/// Add more elements to the type for the given type index to the next power of
/// 2.
LegalizeMutation moreElementsToNextPow2(unsigned TypeIdx, unsigned Min = 0);
/// Break up the vector type for the given type index into the element type.
LegalizeMutation scalarize(unsigned TypeIdx);
} // end namespace LegalizeMutations

/// A single rule in a legalizer info ruleset.
/// The specified action is chosen when the predicate is true. Where appropriate
/// for the action (e.g. for WidenScalar) the new type is selected using the
/// given mutator.
class LegalizeRule {
  LegalityPredicate Predicate;
  LegalizeAction Action;
  LegalizeMutation Mutation;

public:
  LegalizeRule(LegalityPredicate Predicate, LegalizeAction Action,
               LegalizeMutation Mutation = nullptr)
      : Predicate(Predicate), Action(Action), Mutation(Mutation) {}

  /// Test whether the LegalityQuery matches.
  bool match(const LegalityQuery &Query) const {
    return Predicate(Query);
  }

  LegalizeAction getAction() const { return Action; }

  /// Determine the change to make.
  std::pair<unsigned, LLT> determineMutation(const LegalityQuery &Query) const {
    if (Mutation)
      return Mutation(Query);
    return std::make_pair(0, LLT{});
  }
};

class LegalizeRuleSet {
  /// When non-zero, the opcode we are an alias of
  unsigned AliasOf = 0;
  /// If true, there is another opcode that aliases this one
  bool IsAliasedByAnother = false;
  SmallVector<LegalizeRule, 2> Rules;

#ifndef NDEBUG
  /// If bit I is set, this rule set contains a rule that may handle (predicate
  /// or perform an action upon (or both)) the type index I. The uncertainty
  /// comes from free-form rules executing user-provided lambda functions. We
  /// conservatively assume such rules do the right thing and cover all type
  /// indices. The bitset is intentionally 1 bit wider than it absolutely needs
  /// to be to distinguish such cases from the cases where all type indices are
  /// individually handled.
  SmallBitVector TypeIdxsCovered{MCOI::OPERAND_LAST_GENERIC -
                                 MCOI::OPERAND_FIRST_GENERIC + 2};
  SmallBitVector ImmIdxsCovered{MCOI::OPERAND_LAST_GENERIC_IMM -
                                MCOI::OPERAND_FIRST_GENERIC_IMM + 2};
#endif

  unsigned typeIdx(unsigned TypeIdx) {
    assert(TypeIdx <=
               (MCOI::OPERAND_LAST_GENERIC - MCOI::OPERAND_FIRST_GENERIC) &&
           "Type Index is out of bounds");
#ifndef NDEBUG
    TypeIdxsCovered.set(TypeIdx);
#endif
    return TypeIdx;
  }

  void markAllIdxsAsCovered() {
#ifndef NDEBUG
    TypeIdxsCovered.set();
    ImmIdxsCovered.set();
#endif
  }

  void add(const LegalizeRule &Rule) {
    assert(AliasOf == 0 &&
           "RuleSet is aliased, change the representative opcode instead");
    Rules.push_back(Rule);
  }

  static bool always(const LegalityQuery &) { return true; }

  /// Use the given action when the predicate is true.
  /// Action should not be an action that requires mutation.
  LegalizeRuleSet &actionIf(LegalizeAction Action,
                            LegalityPredicate Predicate) {
    add({Predicate, Action});
    return *this;
  }
  /// Use the given action when the predicate is true.
  /// Action should be an action that requires mutation.
  LegalizeRuleSet &actionIf(LegalizeAction Action, LegalityPredicate Predicate,
                            LegalizeMutation Mutation) {
    add({Predicate, Action, Mutation});
    return *this;
  }
  /// Use the given action when type index 0 is any type in the given list.
  /// Action should not be an action that requires mutation.
  LegalizeRuleSet &actionFor(LegalizeAction Action,
                             std::initializer_list<LLT> Types) {
    using namespace LegalityPredicates;
    return actionIf(Action, typeInSet(typeIdx(0), Types));
  }
  /// Use the given action when type index 0 is any type in the given list.
  /// Action should be an action that requires mutation.
  LegalizeRuleSet &actionFor(LegalizeAction Action,
                             std::initializer_list<LLT> Types,
                             LegalizeMutation Mutation) {
    using namespace LegalityPredicates;
    return actionIf(Action, typeInSet(typeIdx(0), Types), Mutation);
  }
  /// Use the given action when type indexes 0 and 1 is any type pair in the
  /// given list.
  /// Action should not be an action that requires mutation.
  LegalizeRuleSet &actionFor(LegalizeAction Action,
                             std::initializer_list<std::pair<LLT, LLT>> Types) {
    using namespace LegalityPredicates;
    return actionIf(Action, typePairInSet(typeIdx(0), typeIdx(1), Types));
  }
  /// Use the given action when type indexes 0 and 1 is any type pair in the
  /// given list.
  /// Action should be an action that requires mutation.
  LegalizeRuleSet &actionFor(LegalizeAction Action,
                             std::initializer_list<std::pair<LLT, LLT>> Types,
                             LegalizeMutation Mutation) {
    using namespace LegalityPredicates;
    return actionIf(Action, typePairInSet(typeIdx(0), typeIdx(1), Types),
                    Mutation);
  }
  /// Use the given action when type index 0 is any type in the given list and
  /// imm index 0 is anything. Action should not be an action that requires
  /// mutation.
  LegalizeRuleSet &actionForTypeWithAnyImm(LegalizeAction Action,
                                           std::initializer_list<LLT> Types) {
    using namespace LegalityPredicates;
    immIdx(0); // Inform verifier imm idx 0 is handled.
    return actionIf(Action, typeInSet(typeIdx(0), Types));
  }

  LegalizeRuleSet &actionForTypeWithAnyImm(
    LegalizeAction Action, std::initializer_list<std::pair<LLT, LLT>> Types) {
    using namespace LegalityPredicates;
    immIdx(0); // Inform verifier imm idx 0 is handled.
    return actionIf(Action, typePairInSet(typeIdx(0), typeIdx(1), Types));
  }

  /// Use the given action when type indexes 0 and 1 are both in the given list.
  /// That is, the type pair is in the cartesian product of the list.
  /// Action should not be an action that requires mutation.
  LegalizeRuleSet &actionForCartesianProduct(LegalizeAction Action,
                                             std::initializer_list<LLT> Types) {
    using namespace LegalityPredicates;
    return actionIf(Action, all(typeInSet(typeIdx(0), Types),
                                typeInSet(typeIdx(1), Types)));
  }
  /// Use the given action when type indexes 0 and 1 are both in their
  /// respective lists.
  /// That is, the type pair is in the cartesian product of the lists
  /// Action should not be an action that requires mutation.
  LegalizeRuleSet &
  actionForCartesianProduct(LegalizeAction Action,
                            std::initializer_list<LLT> Types0,
                            std::initializer_list<LLT> Types1) {
    using namespace LegalityPredicates;
    return actionIf(Action, all(typeInSet(typeIdx(0), Types0),
                                typeInSet(typeIdx(1), Types1)));
  }
  /// Use the given action when type indexes 0, 1, and 2 are all in their
  /// respective lists.
  /// That is, the type triple is in the cartesian product of the lists
  /// Action should not be an action that requires mutation.
  LegalizeRuleSet &actionForCartesianProduct(
      LegalizeAction Action, std::initializer_list<LLT> Types0,
      std::initializer_list<LLT> Types1, std::initializer_list<LLT> Types2) {
    using namespace LegalityPredicates;
    return actionIf(Action, all(typeInSet(typeIdx(0), Types0),
                                all(typeInSet(typeIdx(1), Types1),
                                    typeInSet(typeIdx(2), Types2))));
  }

public:
  LegalizeRuleSet() = default;

  bool isAliasedByAnother() { return IsAliasedByAnother; }
  void setIsAliasedByAnother() { IsAliasedByAnother = true; }
  void aliasTo(unsigned Opcode) {
    assert((AliasOf == 0 || AliasOf == Opcode) &&
           "Opcode is already aliased to another opcode");
    assert(Rules.empty() && "Aliasing will discard rules");
    AliasOf = Opcode;
  }
  unsigned getAlias() const { return AliasOf; }

  unsigned immIdx(unsigned ImmIdx) {
    assert(ImmIdx <= (MCOI::OPERAND_LAST_GENERIC_IMM -
                      MCOI::OPERAND_FIRST_GENERIC_IMM) &&
           "Imm Index is out of bounds");
#ifndef NDEBUG
    ImmIdxsCovered.set(ImmIdx);
#endif
    return ImmIdx;
  }

  /// The instruction is legal if predicate is true.
  LegalizeRuleSet &legalIf(LegalityPredicate Predicate) {
    // We have no choice but conservatively assume that the free-form
    // user-provided Predicate properly handles all type indices:
    markAllIdxsAsCovered();
    return actionIf(LegalizeAction::Legal, Predicate);
  }
  /// The instruction is legal when type index 0 is any type in the given list.
  LegalizeRuleSet &legalFor(std::initializer_list<LLT> Types) {
    return actionFor(LegalizeAction::Legal, Types);
  }
  /// The instruction is legal when type indexes 0 and 1 is any type pair in the
  /// given list.
  LegalizeRuleSet &legalFor(std::initializer_list<std::pair<LLT, LLT>> Types) {
    return actionFor(LegalizeAction::Legal, Types);
  }
  /// The instruction is legal when type index 0 is any type in the given list
  /// and imm index 0 is anything.
  LegalizeRuleSet &legalForTypeWithAnyImm(std::initializer_list<LLT> Types) {
    markAllIdxsAsCovered();
    return actionForTypeWithAnyImm(LegalizeAction::Legal, Types);
  }

  LegalizeRuleSet &legalForTypeWithAnyImm(
    std::initializer_list<std::pair<LLT, LLT>> Types) {
    markAllIdxsAsCovered();
    return actionForTypeWithAnyImm(LegalizeAction::Legal, Types);
  }

  /// The instruction is legal when type indexes 0 and 1 along with the memory
  /// size and minimum alignment is any type and size tuple in the given list.
  LegalizeRuleSet &legalForTypesWithMemDesc(
      std::initializer_list<LegalityPredicates::TypePairAndMemDesc>
          TypesAndMemDesc) {
    return actionIf(LegalizeAction::Legal,
                    LegalityPredicates::typePairAndMemDescInSet(
                        typeIdx(0), typeIdx(1), /*MMOIdx*/ 0, TypesAndMemDesc));
  }
  /// The instruction is legal when type indexes 0 and 1 are both in the given
  /// list. That is, the type pair is in the cartesian product of the list.
  LegalizeRuleSet &legalForCartesianProduct(std::initializer_list<LLT> Types) {
    return actionForCartesianProduct(LegalizeAction::Legal, Types);
  }
  /// The instruction is legal when type indexes 0 and 1 are both their
  /// respective lists.
  LegalizeRuleSet &legalForCartesianProduct(std::initializer_list<LLT> Types0,
                                            std::initializer_list<LLT> Types1) {
    return actionForCartesianProduct(LegalizeAction::Legal, Types0, Types1);
  }
  /// The instruction is legal when type indexes 0, 1, and 2 are both their
  /// respective lists.
  LegalizeRuleSet &legalForCartesianProduct(std::initializer_list<LLT> Types0,
                                            std::initializer_list<LLT> Types1,
                                            std::initializer_list<LLT> Types2) {
    return actionForCartesianProduct(LegalizeAction::Legal, Types0, Types1,
                                     Types2);
  }

  LegalizeRuleSet &alwaysLegal() {
    using namespace LegalizeMutations;
    markAllIdxsAsCovered();
    return actionIf(LegalizeAction::Legal, always);
  }

  /// The specified type index is coerced if predicate is true.
  LegalizeRuleSet &bitcastIf(LegalityPredicate Predicate,
                             LegalizeMutation Mutation) {
    // We have no choice but conservatively assume that lowering with a
    // free-form user provided Predicate properly handles all type indices:
    markAllIdxsAsCovered();
    return actionIf(LegalizeAction::Bitcast, Predicate, Mutation);
  }

  /// The instruction is lowered.
  LegalizeRuleSet &lower() {
    using namespace LegalizeMutations;
    // We have no choice but conservatively assume that predicate-less lowering
    // properly handles all type indices by design:
    markAllIdxsAsCovered();
    return actionIf(LegalizeAction::Lower, always);
  }
  /// The instruction is lowered if predicate is true. Keep type index 0 as the
  /// same type.
  LegalizeRuleSet &lowerIf(LegalityPredicate Predicate) {
    using namespace LegalizeMutations;
    // We have no choice but conservatively assume that lowering with a
    // free-form user provided Predicate properly handles all type indices:
    markAllIdxsAsCovered();
    return actionIf(LegalizeAction::Lower, Predicate);
  }
  /// The instruction is lowered if predicate is true.
  LegalizeRuleSet &lowerIf(LegalityPredicate Predicate,
                           LegalizeMutation Mutation) {
    // We have no choice but conservatively assume that lowering with a
    // free-form user provided Predicate properly handles all type indices:
    markAllIdxsAsCovered();
    return actionIf(LegalizeAction::Lower, Predicate, Mutation);
  }
  /// The instruction is lowered when type index 0 is any type in the given
  /// list. Keep type index 0 as the same type.
  LegalizeRuleSet &lowerFor(std::initializer_list<LLT> Types) {
    return actionFor(LegalizeAction::Lower, Types);
  }
  /// The instruction is lowered when type index 0 is any type in the given
  /// list.
  LegalizeRuleSet &lowerFor(std::initializer_list<LLT> Types,
                            LegalizeMutation Mutation) {
    return actionFor(LegalizeAction::Lower, Types, Mutation);
  }
  /// The instruction is lowered when type indexes 0 and 1 is any type pair in
  /// the given list. Keep type index 0 as the same type.
  LegalizeRuleSet &lowerFor(std::initializer_list<std::pair<LLT, LLT>> Types) {
    return actionFor(LegalizeAction::Lower, Types);
  }
  /// The instruction is lowered when type indexes 0 and 1 is any type pair in
  /// the given list.
  LegalizeRuleSet &lowerFor(std::initializer_list<std::pair<LLT, LLT>> Types,
                            LegalizeMutation Mutation) {
    return actionFor(LegalizeAction::Lower, Types, Mutation);
  }
  /// The instruction is lowered when type indexes 0 and 1 are both in their
  /// respective lists.
  LegalizeRuleSet &lowerForCartesianProduct(std::initializer_list<LLT> Types0,
                                            std::initializer_list<LLT> Types1) {
    using namespace LegalityPredicates;
    return actionForCartesianProduct(LegalizeAction::Lower, Types0, Types1);
  }
  /// The instruction is lowered when when type indexes 0, 1, and 2 are all in
  /// their respective lists.
  LegalizeRuleSet &lowerForCartesianProduct(std::initializer_list<LLT> Types0,
                                            std::initializer_list<LLT> Types1,
                                            std::initializer_list<LLT> Types2) {
    using namespace LegalityPredicates;
    return actionForCartesianProduct(LegalizeAction::Lower, Types0, Types1,
                                     Types2);
  }

  /// The instruction is emitted as a library call.
  LegalizeRuleSet &libcall() {
    using namespace LegalizeMutations;
    // We have no choice but conservatively assume that predicate-less lowering
    // properly handles all type indices by design:
    markAllIdxsAsCovered();
    return actionIf(LegalizeAction::Libcall, always);
  }

  /// Like legalIf, but for the Libcall action.
  LegalizeRuleSet &libcallIf(LegalityPredicate Predicate) {
    // We have no choice but conservatively assume that a libcall with a
    // free-form user provided Predicate properly handles all type indices:
    markAllIdxsAsCovered();
    return actionIf(LegalizeAction::Libcall, Predicate);
  }
  LegalizeRuleSet &libcallFor(std::initializer_list<LLT> Types) {
    return actionFor(LegalizeAction::Libcall, Types);
  }
  LegalizeRuleSet &
  libcallFor(std::initializer_list<std::pair<LLT, LLT>> Types) {
    return actionFor(LegalizeAction::Libcall, Types);
  }
  LegalizeRuleSet &
  libcallForCartesianProduct(std::initializer_list<LLT> Types) {
    return actionForCartesianProduct(LegalizeAction::Libcall, Types);
  }
  LegalizeRuleSet &
  libcallForCartesianProduct(std::initializer_list<LLT> Types0,
                             std::initializer_list<LLT> Types1) {
    return actionForCartesianProduct(LegalizeAction::Libcall, Types0, Types1);
  }

  /// Widen the scalar to the one selected by the mutation if the predicate is
  /// true.
  LegalizeRuleSet &widenScalarIf(LegalityPredicate Predicate,
                                 LegalizeMutation Mutation) {
    // We have no choice but conservatively assume that an action with a
    // free-form user provided Predicate properly handles all type indices:
    markAllIdxsAsCovered();
    return actionIf(LegalizeAction::WidenScalar, Predicate, Mutation);
  }
  /// Narrow the scalar to the one selected by the mutation if the predicate is
  /// true.
  LegalizeRuleSet &narrowScalarIf(LegalityPredicate Predicate,
                                  LegalizeMutation Mutation) {
    // We have no choice but conservatively assume that an action with a
    // free-form user provided Predicate properly handles all type indices:
    markAllIdxsAsCovered();
    return actionIf(LegalizeAction::NarrowScalar, Predicate, Mutation);
  }
  /// Narrow the scalar, specified in mutation, when type indexes 0 and 1 is any
  /// type pair in the given list.
  LegalizeRuleSet &
  narrowScalarFor(std::initializer_list<std::pair<LLT, LLT>> Types,
                  LegalizeMutation Mutation) {
    return actionFor(LegalizeAction::NarrowScalar, Types, Mutation);
  }

  /// Add more elements to reach the type selected by the mutation if the
  /// predicate is true.
  LegalizeRuleSet &moreElementsIf(LegalityPredicate Predicate,
                                  LegalizeMutation Mutation) {
    // We have no choice but conservatively assume that an action with a
    // free-form user provided Predicate properly handles all type indices:
    markAllIdxsAsCovered();
    return actionIf(LegalizeAction::MoreElements, Predicate, Mutation);
  }
  /// Remove elements to reach the type selected by the mutation if the
  /// predicate is true.
  LegalizeRuleSet &fewerElementsIf(LegalityPredicate Predicate,
                                   LegalizeMutation Mutation) {
    // We have no choice but conservatively assume that an action with a
    // free-form user provided Predicate properly handles all type indices:
    markAllIdxsAsCovered();
    return actionIf(LegalizeAction::FewerElements, Predicate, Mutation);
  }

  /// The instruction is unsupported.
  LegalizeRuleSet &unsupported() {
    markAllIdxsAsCovered();
    return actionIf(LegalizeAction::Unsupported, always);
  }
  LegalizeRuleSet &unsupportedIf(LegalityPredicate Predicate) {
    return actionIf(LegalizeAction::Unsupported, Predicate);
  }

  LegalizeRuleSet &unsupportedFor(std::initializer_list<LLT> Types) {
    return actionFor(LegalizeAction::Unsupported, Types);
  }

  LegalizeRuleSet &unsupportedIfMemSizeNotPow2() {
    return actionIf(LegalizeAction::Unsupported,
                    LegalityPredicates::memSizeInBytesNotPow2(0));
  }

  /// Lower a memory operation if the memory size, rounded to bytes, is not a
  /// power of 2. For example, this will not trigger for s1 or s7, but will for
  /// s24.
  LegalizeRuleSet &lowerIfMemSizeNotPow2() {
    return actionIf(LegalizeAction::Lower,
                    LegalityPredicates::memSizeInBytesNotPow2(0));
  }

  /// Lower a memory operation if the memory access size is not a round power of
  /// 2 byte size. This is stricter than lowerIfMemSizeNotPow2, and more likely
  /// what you want (e.g. this will lower s1, s7 and s24).
  LegalizeRuleSet &lowerIfMemSizeNotByteSizePow2() {
    return actionIf(LegalizeAction::Lower,
                    LegalityPredicates::memSizeNotByteSizePow2(0));
  }

  LegalizeRuleSet &customIf(LegalityPredicate Predicate) {
    // We have no choice but conservatively assume that a custom action with a
    // free-form user provided Predicate properly handles all type indices:
    markAllIdxsAsCovered();
    return actionIf(LegalizeAction::Custom, Predicate);
  }
  LegalizeRuleSet &customFor(std::initializer_list<LLT> Types) {
    return actionFor(LegalizeAction::Custom, Types);
  }

  /// The instruction is custom when type indexes 0 and 1 is any type pair in the
  /// given list.
  LegalizeRuleSet &customFor(std::initializer_list<std::pair<LLT, LLT>> Types) {
    return actionFor(LegalizeAction::Custom, Types);
  }

  LegalizeRuleSet &customForCartesianProduct(std::initializer_list<LLT> Types) {
    return actionForCartesianProduct(LegalizeAction::Custom, Types);
  }
  /// The instruction is custom when type indexes 0 and 1 are both in their
  /// respective lists.
  LegalizeRuleSet &
  customForCartesianProduct(std::initializer_list<LLT> Types0,
                            std::initializer_list<LLT> Types1) {
    return actionForCartesianProduct(LegalizeAction::Custom, Types0, Types1);
  }
  /// The instruction is custom when when type indexes 0, 1, and 2 are all in
  /// their respective lists.
  LegalizeRuleSet &
  customForCartesianProduct(std::initializer_list<LLT> Types0,
                            std::initializer_list<LLT> Types1,
                            std::initializer_list<LLT> Types2) {
    return actionForCartesianProduct(LegalizeAction::Custom, Types0, Types1,
                                     Types2);
  }

  /// Unconditionally custom lower.
  LegalizeRuleSet &custom() {
    return customIf(always);
  }

  /// Widen the scalar to the next power of two that is at least MinSize.
  /// No effect if the type is not a scalar or is a power of two.
  LegalizeRuleSet &widenScalarToNextPow2(unsigned TypeIdx,
                                         unsigned MinSize = 0) {
    using namespace LegalityPredicates;
    return actionIf(
        LegalizeAction::WidenScalar, sizeNotPow2(typeIdx(TypeIdx)),
        LegalizeMutations::widenScalarOrEltToNextPow2(TypeIdx, MinSize));
  }

  /// Widen the scalar to the next multiple of Size. No effect if the
  /// type is not a scalar or is a multiple of Size.
  LegalizeRuleSet &widenScalarToNextMultipleOf(unsigned TypeIdx,
                                               unsigned Size) {
    using namespace LegalityPredicates;
    return actionIf(
        LegalizeAction::WidenScalar, sizeNotMultipleOf(typeIdx(TypeIdx), Size),
        LegalizeMutations::widenScalarOrEltToNextMultipleOf(TypeIdx, Size));
  }

  /// Widen the scalar or vector element type to the next power of two that is
  /// at least MinSize.  No effect if the scalar size is a power of two.
  LegalizeRuleSet &widenScalarOrEltToNextPow2(unsigned TypeIdx,
                                              unsigned MinSize = 0) {
    using namespace LegalityPredicates;
    return actionIf(
        LegalizeAction::WidenScalar, scalarOrEltSizeNotPow2(typeIdx(TypeIdx)),
        LegalizeMutations::widenScalarOrEltToNextPow2(TypeIdx, MinSize));
  }

  LegalizeRuleSet &narrowScalar(unsigned TypeIdx, LegalizeMutation Mutation) {
    using namespace LegalityPredicates;
    return actionIf(LegalizeAction::NarrowScalar, isScalar(typeIdx(TypeIdx)),
                    Mutation);
  }

  LegalizeRuleSet &scalarize(unsigned TypeIdx) {
    using namespace LegalityPredicates;
    return actionIf(LegalizeAction::FewerElements, isVector(typeIdx(TypeIdx)),
                    LegalizeMutations::scalarize(TypeIdx));
  }

  LegalizeRuleSet &scalarizeIf(LegalityPredicate Predicate, unsigned TypeIdx) {
    using namespace LegalityPredicates;
    return actionIf(LegalizeAction::FewerElements,
                    all(Predicate, isVector(typeIdx(TypeIdx))),
                    LegalizeMutations::scalarize(TypeIdx));
  }

  /// Ensure the scalar or element is at least as wide as Ty.
  LegalizeRuleSet &minScalarOrElt(unsigned TypeIdx, const LLT Ty) {
    using namespace LegalityPredicates;
    using namespace LegalizeMutations;
    return actionIf(LegalizeAction::WidenScalar,
                    scalarOrEltNarrowerThan(TypeIdx, Ty.getScalarSizeInBits()),
                    changeElementTo(typeIdx(TypeIdx), Ty));
  }

  /// Ensure the scalar or element is at least as wide as Ty.
  LegalizeRuleSet &minScalarOrEltIf(LegalityPredicate Predicate,
                                    unsigned TypeIdx, const LLT Ty) {
    using namespace LegalityPredicates;
    using namespace LegalizeMutations;
    return actionIf(LegalizeAction::WidenScalar,
                    all(Predicate, scalarOrEltNarrowerThan(
                                       TypeIdx, Ty.getScalarSizeInBits())),
                    changeElementTo(typeIdx(TypeIdx), Ty));
  }

  /// Ensure the vector size is at least as wide as VectorSize by promoting the
  /// element.
  LegalizeRuleSet &widenVectorEltsToVectorMinSize(unsigned TypeIdx,
                                                  unsigned VectorSize) {
    using namespace LegalityPredicates;
    using namespace LegalizeMutations;
    return actionIf(
        LegalizeAction::WidenScalar,
        [=](const LegalityQuery &Query) {
          const LLT VecTy = Query.Types[TypeIdx];
          return VecTy.isVector() && !VecTy.isScalable() &&
                 VecTy.getSizeInBits() < VectorSize;
        },
        [=](const LegalityQuery &Query) {
          const LLT VecTy = Query.Types[TypeIdx];
          unsigned NumElts = VecTy.getNumElements();
          unsigned MinSize = VectorSize / NumElts;
          LLT NewTy = LLT::fixed_vector(NumElts, LLT::scalar(MinSize));
          return std::make_pair(TypeIdx, NewTy);
        });
  }

  /// Ensure the scalar is at least as wide as Ty.
  LegalizeRuleSet &minScalar(unsigned TypeIdx, const LLT Ty) {
    using namespace LegalityPredicates;
    using namespace LegalizeMutations;
    return actionIf(LegalizeAction::WidenScalar,
                    scalarNarrowerThan(TypeIdx, Ty.getSizeInBits()),
                    changeTo(typeIdx(TypeIdx), Ty));
  }

  /// Ensure the scalar is at least as wide as Ty if condition is met.
  LegalizeRuleSet &minScalarIf(LegalityPredicate Predicate, unsigned TypeIdx,
                               const LLT Ty) {
    using namespace LegalityPredicates;
    using namespace LegalizeMutations;
    return actionIf(
        LegalizeAction::WidenScalar,
        [=](const LegalityQuery &Query) {
          const LLT QueryTy = Query.Types[TypeIdx];
          return QueryTy.isScalar() &&
                 QueryTy.getSizeInBits() < Ty.getSizeInBits() &&
                 Predicate(Query);
        },
        changeTo(typeIdx(TypeIdx), Ty));
  }

  /// Ensure the scalar is at most as wide as Ty.
  LegalizeRuleSet &maxScalarOrElt(unsigned TypeIdx, const LLT Ty) {
    using namespace LegalityPredicates;
    using namespace LegalizeMutations;
    return actionIf(LegalizeAction::NarrowScalar,
                    scalarOrEltWiderThan(TypeIdx, Ty.getScalarSizeInBits()),
                    changeElementTo(typeIdx(TypeIdx), Ty));
  }

  /// Ensure the scalar is at most as wide as Ty.
  LegalizeRuleSet &maxScalar(unsigned TypeIdx, const LLT Ty) {
    using namespace LegalityPredicates;
    using namespace LegalizeMutations;
    return actionIf(LegalizeAction::NarrowScalar,
                    scalarWiderThan(TypeIdx, Ty.getSizeInBits()),
                    changeTo(typeIdx(TypeIdx), Ty));
  }

  /// Conditionally limit the maximum size of the scalar.
  /// For example, when the maximum size of one type depends on the size of
  /// another such as extracting N bits from an M bit container.
  LegalizeRuleSet &maxScalarIf(LegalityPredicate Predicate, unsigned TypeIdx,
                               const LLT Ty) {
    using namespace LegalityPredicates;
    using namespace LegalizeMutations;
    return actionIf(
        LegalizeAction::NarrowScalar,
        [=](const LegalityQuery &Query) {
          const LLT QueryTy = Query.Types[TypeIdx];
          return QueryTy.isScalar() &&
                 QueryTy.getSizeInBits() > Ty.getSizeInBits() &&
                 Predicate(Query);
        },
        changeElementTo(typeIdx(TypeIdx), Ty));
  }

  /// Limit the range of scalar sizes to MinTy and MaxTy.
  LegalizeRuleSet &clampScalar(unsigned TypeIdx, const LLT MinTy,
                               const LLT MaxTy) {
    assert(MinTy.isScalar() && MaxTy.isScalar() && "Expected scalar types");
    return minScalar(TypeIdx, MinTy).maxScalar(TypeIdx, MaxTy);
  }

  /// Limit the range of scalar sizes to MinTy and MaxTy.
  LegalizeRuleSet &clampScalarOrElt(unsigned TypeIdx, const LLT MinTy,
                                    const LLT MaxTy) {
    return minScalarOrElt(TypeIdx, MinTy).maxScalarOrElt(TypeIdx, MaxTy);
  }

  /// Widen the scalar to match the size of another.
  LegalizeRuleSet &minScalarSameAs(unsigned TypeIdx, unsigned LargeTypeIdx) {
    typeIdx(TypeIdx);
    return widenScalarIf(
        [=](const LegalityQuery &Query) {
          return Query.Types[LargeTypeIdx].getScalarSizeInBits() >
                 Query.Types[TypeIdx].getSizeInBits();
        },
        LegalizeMutations::changeElementSizeTo(TypeIdx, LargeTypeIdx));
  }

  /// Narrow the scalar to match the size of another.
  LegalizeRuleSet &maxScalarSameAs(unsigned TypeIdx, unsigned NarrowTypeIdx) {
    typeIdx(TypeIdx);
    return narrowScalarIf(
        [=](const LegalityQuery &Query) {
          return Query.Types[NarrowTypeIdx].getScalarSizeInBits() <
                 Query.Types[TypeIdx].getSizeInBits();
        },
        LegalizeMutations::changeElementSizeTo(TypeIdx, NarrowTypeIdx));
  }

  /// Change the type \p TypeIdx to have the same scalar size as type \p
  /// SameSizeIdx.
  LegalizeRuleSet &scalarSameSizeAs(unsigned TypeIdx, unsigned SameSizeIdx) {
    return minScalarSameAs(TypeIdx, SameSizeIdx)
          .maxScalarSameAs(TypeIdx, SameSizeIdx);
  }

  /// Conditionally widen the scalar or elt to match the size of another.
  LegalizeRuleSet &minScalarEltSameAsIf(LegalityPredicate Predicate,
                                   unsigned TypeIdx, unsigned LargeTypeIdx) {
    typeIdx(TypeIdx);
    return widenScalarIf(
        [=](const LegalityQuery &Query) {
          return Query.Types[LargeTypeIdx].getScalarSizeInBits() >
                     Query.Types[TypeIdx].getScalarSizeInBits() &&
                 Predicate(Query);
        },
        [=](const LegalityQuery &Query) {
          LLT T = Query.Types[LargeTypeIdx];
          if (T.isVector() && T.getElementType().isPointer())
            T = T.changeElementType(LLT::scalar(T.getScalarSizeInBits()));
          return std::make_pair(TypeIdx, T);
        });
  }

  /// Conditionally narrow the scalar or elt to match the size of another.
  LegalizeRuleSet &maxScalarEltSameAsIf(LegalityPredicate Predicate,
                                        unsigned TypeIdx,
                                        unsigned SmallTypeIdx) {
    typeIdx(TypeIdx);
    return narrowScalarIf(
        [=](const LegalityQuery &Query) {
          return Query.Types[SmallTypeIdx].getScalarSizeInBits() <
                     Query.Types[TypeIdx].getScalarSizeInBits() &&
                 Predicate(Query);
        },
        [=](const LegalityQuery &Query) {
          LLT T = Query.Types[SmallTypeIdx];
          return std::make_pair(TypeIdx, T);
        });
  }

  /// Add more elements to the vector to reach the next power of two.
  /// No effect if the type is not a vector or the element count is a power of
  /// two.
  LegalizeRuleSet &moreElementsToNextPow2(unsigned TypeIdx) {
    using namespace LegalityPredicates;
    return actionIf(LegalizeAction::MoreElements,
                    numElementsNotPow2(typeIdx(TypeIdx)),
                    LegalizeMutations::moreElementsToNextPow2(TypeIdx));
  }

  /// Limit the number of elements in EltTy vectors to at least MinElements.
  LegalizeRuleSet &clampMinNumElements(unsigned TypeIdx, const LLT EltTy,
                                       unsigned MinElements) {
    // Mark the type index as covered:
    typeIdx(TypeIdx);
    return actionIf(
        LegalizeAction::MoreElements,
        [=](const LegalityQuery &Query) {
          LLT VecTy = Query.Types[TypeIdx];
          return VecTy.isVector() && VecTy.getElementType() == EltTy &&
                 VecTy.getNumElements() < MinElements;
        },
        [=](const LegalityQuery &Query) {
          LLT VecTy = Query.Types[TypeIdx];
          return std::make_pair(
              TypeIdx, LLT::fixed_vector(MinElements, VecTy.getElementType()));
        });
  }

  /// Set number of elements to nearest larger multiple of NumElts.
  LegalizeRuleSet &alignNumElementsTo(unsigned TypeIdx, const LLT EltTy,
                                      unsigned NumElts) {
    typeIdx(TypeIdx);
    return actionIf(
        LegalizeAction::MoreElements,
        [=](const LegalityQuery &Query) {
          LLT VecTy = Query.Types[TypeIdx];
          return VecTy.isVector() && VecTy.getElementType() == EltTy &&
                 (VecTy.getNumElements() % NumElts != 0);
        },
        [=](const LegalityQuery &Query) {
          LLT VecTy = Query.Types[TypeIdx];
          unsigned NewSize = alignTo(VecTy.getNumElements(), NumElts);
          return std::make_pair(
              TypeIdx, LLT::fixed_vector(NewSize, VecTy.getElementType()));
        });
  }

  /// Limit the number of elements in EltTy vectors to at most MaxElements.
  LegalizeRuleSet &clampMaxNumElements(unsigned TypeIdx, const LLT EltTy,
                                       unsigned MaxElements) {
    // Mark the type index as covered:
    typeIdx(TypeIdx);
    return actionIf(
        LegalizeAction::FewerElements,
        [=](const LegalityQuery &Query) {
          LLT VecTy = Query.Types[TypeIdx];
          return VecTy.isVector() && VecTy.getElementType() == EltTy &&
                 VecTy.getNumElements() > MaxElements;
        },
        [=](const LegalityQuery &Query) {
          LLT VecTy = Query.Types[TypeIdx];
          LLT NewTy = LLT::scalarOrVector(ElementCount::getFixed(MaxElements),
                                          VecTy.getElementType());
          return std::make_pair(TypeIdx, NewTy);
        });
  }
  /// Limit the number of elements for the given vectors to at least MinTy's
  /// number of elements and at most MaxTy's number of elements.
  ///
  /// No effect if the type is not a vector or does not have the same element
  /// type as the constraints.
  /// The element type of MinTy and MaxTy must match.
  LegalizeRuleSet &clampNumElements(unsigned TypeIdx, const LLT MinTy,
                                    const LLT MaxTy) {
    assert(MinTy.getElementType() == MaxTy.getElementType() &&
           "Expected element types to agree");

    const LLT EltTy = MinTy.getElementType();
    return clampMinNumElements(TypeIdx, EltTy, MinTy.getNumElements())
        .clampMaxNumElements(TypeIdx, EltTy, MaxTy.getNumElements());
  }

  /// Express \p EltTy vectors strictly using vectors with \p NumElts elements
  /// (or scalars when \p NumElts equals 1).
  /// First pad with undef elements to nearest larger multiple of \p NumElts.
  /// Then perform split with all sub-instructions having the same type.
  /// Using clampMaxNumElements (non-strict) can result in leftover instruction
  /// with different type (fewer elements then \p NumElts or scalar).
  /// No effect if the type is not a vector.
  LegalizeRuleSet &clampMaxNumElementsStrict(unsigned TypeIdx, const LLT EltTy,
                                             unsigned NumElts) {
    return alignNumElementsTo(TypeIdx, EltTy, NumElts)
        .clampMaxNumElements(TypeIdx, EltTy, NumElts);
  }

  /// Fallback on the previous implementation. This should only be used while
  /// porting a rule.
  LegalizeRuleSet &fallback() {
    add({always, LegalizeAction::UseLegacyRules});
    return *this;
  }

  /// Check if there is no type index which is obviously not handled by the
  /// LegalizeRuleSet in any way at all.
  /// \pre Type indices of the opcode form a dense [0, \p NumTypeIdxs) set.
  bool verifyTypeIdxsCoverage(unsigned NumTypeIdxs) const;
  /// Check if there is no imm index which is obviously not handled by the
  /// LegalizeRuleSet in any way at all.
  /// \pre Type indices of the opcode form a dense [0, \p NumTypeIdxs) set.
  bool verifyImmIdxsCoverage(unsigned NumImmIdxs) const;

  /// Apply the ruleset to the given LegalityQuery.
  LegalizeActionStep apply(const LegalityQuery &Query) const;
};

class LegalizerInfo {
public:
  virtual ~LegalizerInfo() = default;

  const LegacyLegalizerInfo &getLegacyLegalizerInfo() const {
    return LegacyInfo;
  }
  LegacyLegalizerInfo &getLegacyLegalizerInfo() { return LegacyInfo; }

  unsigned getOpcodeIdxForOpcode(unsigned Opcode) const;
  unsigned getActionDefinitionsIdx(unsigned Opcode) const;

  /// Perform simple self-diagnostic and assert if there is anything obviously
  /// wrong with the actions set up.
  void verify(const MCInstrInfo &MII) const;

  /// Get the action definitions for the given opcode. Use this to run a
  /// LegalityQuery through the definitions.
  const LegalizeRuleSet &getActionDefinitions(unsigned Opcode) const;

  /// Get the action definition builder for the given opcode. Use this to define
  /// the action definitions.
  ///
  /// It is an error to request an opcode that has already been requested by the
  /// multiple-opcode variant.
  LegalizeRuleSet &getActionDefinitionsBuilder(unsigned Opcode);

  /// Get the action definition builder for the given set of opcodes. Use this
  /// to define the action definitions for multiple opcodes at once. The first
  /// opcode given will be considered the representative opcode and will hold
  /// the definitions whereas the other opcodes will be configured to refer to
  /// the representative opcode. This lowers memory requirements and very
  /// slightly improves performance.
  ///
  /// It would be very easy to introduce unexpected side-effects as a result of
  /// this aliasing if it were permitted to request different but intersecting
  /// sets of opcodes but that is difficult to keep track of. It is therefore an
  /// error to request the same opcode twice using this API, to request an
  /// opcode that already has definitions, or to use the single-opcode API on an
  /// opcode that has already been requested by this API.
  LegalizeRuleSet &
  getActionDefinitionsBuilder(std::initializer_list<unsigned> Opcodes);
  void aliasActionDefinitions(unsigned OpcodeTo, unsigned OpcodeFrom);

  /// Determine what action should be taken to legalize the described
  /// instruction. Requires computeTables to have been called.
  ///
  /// \returns a description of the next legalization step to perform.
  LegalizeActionStep getAction(const LegalityQuery &Query) const;

  /// Determine what action should be taken to legalize the given generic
  /// instruction.
  ///
  /// \returns a description of the next legalization step to perform.
  LegalizeActionStep getAction(const MachineInstr &MI,
                               const MachineRegisterInfo &MRI) const;

  bool isLegal(const LegalityQuery &Query) const {
    return getAction(Query).Action == LegalizeAction::Legal;
  }

  bool isLegalOrCustom(const LegalityQuery &Query) const {
    auto Action = getAction(Query).Action;
    return Action == LegalizeAction::Legal || Action == LegalizeAction::Custom;
  }

  bool isLegal(const MachineInstr &MI, const MachineRegisterInfo &MRI) const;
  bool isLegalOrCustom(const MachineInstr &MI,
                       const MachineRegisterInfo &MRI) const;

  /// Called for instructions with the Custom LegalizationAction.
  virtual bool legalizeCustom(LegalizerHelper &Helper,
                              MachineInstr &MI) const {
    llvm_unreachable("must implement this if custom action is used");
  }

  /// \returns true if MI is either legal or has been legalized and false if not
  /// legal.
  /// Return true if MI is either legal or has been legalized and false
  /// if not legal.
  virtual bool legalizeIntrinsic(LegalizerHelper &Helper,
                                 MachineInstr &MI) const {
    return true;
  }

  /// Return the opcode (SEXT/ZEXT/ANYEXT) that should be performed while
  /// widening a constant of type SmallTy which targets can override.
  /// For eg, the DAG does (SmallTy.isByteSized() ? G_SEXT : G_ZEXT) which
  /// will be the default.
  virtual unsigned getExtOpcodeForWideningConstant(LLT SmallTy) const;

private:
  static const int FirstOp = TargetOpcode::PRE_ISEL_GENERIC_OPCODE_START;
  static const int LastOp = TargetOpcode::PRE_ISEL_GENERIC_OPCODE_END;

  LegalizeRuleSet RulesForOpcode[LastOp - FirstOp + 1];
  LegacyLegalizerInfo LegacyInfo;
};

#ifndef NDEBUG
/// Checks that MIR is fully legal, returns an illegal instruction if it's not,
/// nullptr otherwise
const MachineInstr *machineFunctionIsIllegal(const MachineFunction &MF);
#endif

} // end namespace llvm.

#endif // LLVM_CODEGEN_GLOBALISEL_LEGALIZERINFO_H
PKiwFZ(��zi#i#CodeGen/GlobalISel/CSEInfo.hnu�[���//===- llvm/CodeGen/GlobalISel/CSEInfo.h ------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// Provides analysis for continuously CSEing during GISel passes.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_GLOBALISEL_CSEINFO_H
#define LLVM_CODEGEN_GLOBALISEL_CSEINFO_H

#include "llvm/ADT/FoldingSet.h"
#include "llvm/CodeGen/CSEConfigBase.h"
#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
#include "llvm/CodeGen/GlobalISel/GISelWorkList.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/CodeGen.h"

namespace llvm {
class MachineBasicBlock;

/// A class that wraps MachineInstrs and derives from FoldingSetNode in order to
/// be uniqued in a CSEMap. The tradeoff here is extra memory allocations for
/// UniqueMachineInstr vs making MachineInstr bigger.
class UniqueMachineInstr : public FoldingSetNode {
  friend class GISelCSEInfo;
  const MachineInstr *MI;
  explicit UniqueMachineInstr(const MachineInstr *MI) : MI(MI) {}

public:
  void Profile(FoldingSetNodeID &ID);
};

// A CSE config for fully optimized builds.
class CSEConfigFull : public CSEConfigBase {
public:
  virtual ~CSEConfigFull() = default;
  bool shouldCSEOpc(unsigned Opc) override;
};

// Commonly used for O0 config.
class CSEConfigConstantOnly : public CSEConfigBase {
public:
  virtual ~CSEConfigConstantOnly() = default;
  bool shouldCSEOpc(unsigned Opc) override;
};

// Returns the standard expected CSEConfig for the given optimization level.
// We have this logic here so targets can make use of it from their derived
// TargetPassConfig, but can't put this logic into TargetPassConfig directly
// because the CodeGen library can't depend on GlobalISel.
std::unique_ptr<CSEConfigBase>
getStandardCSEConfigForOpt(CodeGenOpt::Level Level);

/// The CSE Analysis object.
/// This installs itself as a delegate to the MachineFunction to track
/// new instructions as well as deletions. It however will not be able to
/// track instruction mutations. In such cases, recordNewInstruction should be
/// called (for eg inside MachineIRBuilder::recordInsertion).
/// Also because of how just the instruction can be inserted without adding any
/// operands to the instruction, instructions are uniqued and inserted lazily.
/// CSEInfo should assert when trying to enter an incomplete instruction into
/// the CSEMap. There is Opcode level granularity on which instructions can be
/// CSE'd and for now, only Generic instructions are CSEable.
class GISelCSEInfo : public GISelChangeObserver {
  // Make it accessible only to CSEMIRBuilder.
  friend class CSEMIRBuilder;

  BumpPtrAllocator UniqueInstrAllocator;
  FoldingSet<UniqueMachineInstr> CSEMap;
  MachineRegisterInfo *MRI = nullptr;
  MachineFunction *MF = nullptr;
  std::unique_ptr<CSEConfigBase> CSEOpt;
  /// Keep a cache of UniqueInstrs for each MachineInstr. In GISel,
  /// often instructions are mutated (while their ID has completely changed).
  /// Whenever mutation happens, invalidate the UniqueMachineInstr for the
  /// MachineInstr
  DenseMap<const MachineInstr *, UniqueMachineInstr *> InstrMapping;

  /// Store instructions that are not fully formed in TemporaryInsts.
  /// Also because CSE insertion happens lazily, we can remove insts from this
  /// list and avoid inserting and then removing from the CSEMap.
  GISelWorkList<8> TemporaryInsts;

  // Only used in asserts.
  DenseMap<unsigned, unsigned> OpcodeHitTable;

  bool isUniqueMachineInstValid(const UniqueMachineInstr &UMI) const;

  void invalidateUniqueMachineInstr(UniqueMachineInstr *UMI);

  UniqueMachineInstr *getNodeIfExists(FoldingSetNodeID &ID,
                                      MachineBasicBlock *MBB, void *&InsertPos);

  /// Allocate and construct a new UniqueMachineInstr for MI and return.
  UniqueMachineInstr *getUniqueInstrForMI(const MachineInstr *MI);

  void insertNode(UniqueMachineInstr *UMI, void *InsertPos = nullptr);

  /// Get the MachineInstr(Unique) if it exists already in the CSEMap and the
  /// same MachineBasicBlock.
  MachineInstr *getMachineInstrIfExists(FoldingSetNodeID &ID,
                                        MachineBasicBlock *MBB,
                                        void *&InsertPos);

  /// Use this method to allocate a new UniqueMachineInstr for MI and insert it
  /// into the CSEMap. MI should return true for shouldCSE(MI->getOpcode())
  void insertInstr(MachineInstr *MI, void *InsertPos = nullptr);

  bool HandlingRecordedInstrs = false;

public:
  GISelCSEInfo() = default;

  virtual ~GISelCSEInfo();

  void setMF(MachineFunction &MF);

  Error verify();

  /// Records a newly created inst in a list and lazily insert it to the CSEMap.
  /// Sometimes, this method might be called with a partially constructed
  /// MachineInstr,
  //  (right after BuildMI without adding any operands) - and in such cases,
  //  defer the hashing of the instruction to a later stage.
  void recordNewInstruction(MachineInstr *MI);

  /// Use this callback to inform CSE about a newly fully created instruction.
  void handleRecordedInst(MachineInstr *MI);

  /// Use this callback to insert all the recorded instructions. At this point,
  /// all of these insts need to be fully constructed and should not be missing
  /// any operands.
  void handleRecordedInsts();

  /// Remove this inst from the CSE map. If this inst has not been inserted yet,
  /// it will be removed from the Tempinsts list if it exists.
  void handleRemoveInst(MachineInstr *MI);

  void releaseMemory();

  void setCSEConfig(std::unique_ptr<CSEConfigBase> Opt) {
    CSEOpt = std::move(Opt);
  }

  bool shouldCSE(unsigned Opc) const;

  void analyze(MachineFunction &MF);

  void countOpcodeHit(unsigned Opc);

  void print();

  // Observer API
  void erasingInstr(MachineInstr &MI) override;
  void createdInstr(MachineInstr &MI) override;
  void changingInstr(MachineInstr &MI) override;
  void changedInstr(MachineInstr &MI) override;
};

class TargetRegisterClass;
class RegisterBank;

// Simple builder class to easily profile properties about MIs.
class GISelInstProfileBuilder {
  FoldingSetNodeID &ID;
  const MachineRegisterInfo &MRI;

public:
  GISelInstProfileBuilder(FoldingSetNodeID &ID, const MachineRegisterInfo &MRI)
      : ID(ID), MRI(MRI) {}
  // Profiling methods.
  const GISelInstProfileBuilder &addNodeIDOpcode(unsigned Opc) const;
  const GISelInstProfileBuilder &addNodeIDRegType(const LLT Ty) const;
  const GISelInstProfileBuilder &addNodeIDRegType(const Register) const;

  const GISelInstProfileBuilder &
  addNodeIDRegType(const TargetRegisterClass *RC) const;
  const GISelInstProfileBuilder &addNodeIDRegType(const RegisterBank *RB) const;

  const GISelInstProfileBuilder &addNodeIDRegNum(Register Reg) const;

  const GISelInstProfileBuilder &addNodeIDReg(Register Reg) const;

  const GISelInstProfileBuilder &addNodeIDImmediate(int64_t Imm) const;
  const GISelInstProfileBuilder &
  addNodeIDMBB(const MachineBasicBlock *MBB) const;

  const GISelInstProfileBuilder &
  addNodeIDMachineOperand(const MachineOperand &MO) const;

  const GISelInstProfileBuilder &addNodeIDFlag(unsigned Flag) const;
  const GISelInstProfileBuilder &addNodeID(const MachineInstr *MI) const;
};

/// Simple wrapper that does the following.
/// 1) Lazily evaluate the MachineFunction to compute CSEable instructions.
/// 2) Allows configuration of which instructions are CSEd through CSEConfig
/// object. Provides a method called get which takes a CSEConfig object.
class GISelCSEAnalysisWrapper {
  GISelCSEInfo Info;
  MachineFunction *MF = nullptr;
  bool AlreadyComputed = false;

public:
  /// Takes a CSEConfigBase object that defines what opcodes get CSEd.
  /// If CSEConfig is already set, and the CSE Analysis has been preserved,
  /// it will not use the new CSEOpt(use Recompute to force using the new
  /// CSEOpt).
  GISelCSEInfo &get(std::unique_ptr<CSEConfigBase> CSEOpt,
                    bool ReCompute = false);
  void setMF(MachineFunction &MFunc) { MF = &MFunc; }
  void setComputed(bool Computed) { AlreadyComputed = Computed; }
  void releaseMemory() { Info.releaseMemory(); }
};

/// The actual analysis pass wrapper.
class GISelCSEAnalysisWrapperPass : public MachineFunctionPass {
  GISelCSEAnalysisWrapper Wrapper;

public:
  static char ID;
  GISelCSEAnalysisWrapperPass();

  void getAnalysisUsage(AnalysisUsage &AU) const override;

  const GISelCSEAnalysisWrapper &getCSEWrapper() const { return Wrapper; }
  GISelCSEAnalysisWrapper &getCSEWrapper() { return Wrapper; }

  bool runOnMachineFunction(MachineFunction &MF) override;

  void releaseMemory() override {
    Wrapper.releaseMemory();
    Wrapper.setComputed(false);
  }
};

} // namespace llvm

#endif
PKiwFZyd���CodeGen/GlobalISel/Localizer.hnu�[���//== llvm/CodeGen/GlobalISel/Localizer.h - Localizer -------------*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file This file describes the interface of the Localizer pass.
/// This pass moves/duplicates constant-like instructions close to their uses.
/// Its primarily goal is to workaround the deficiencies of the fast register
/// allocator.
/// With GlobalISel constants are all materialized in the entry block of
/// a function. However, the fast allocator cannot rematerialize constants and
/// has a lot more live-ranges to deal with and will most likely end up
/// spilling a lot.
/// By pushing the constants close to their use, we only create small
/// live-ranges.
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_GLOBALISEL_LOCALIZER_H
#define LLVM_CODEGEN_GLOBALISEL_LOCALIZER_H

#include "llvm/ADT/SetVector.h"
#include "llvm/CodeGen/MachineFunctionPass.h"

namespace llvm {
// Forward declarations.
class AnalysisUsage;
class MachineBasicBlock;
class MachineInstr;
class MachineOperand;
class MachineRegisterInfo;
class TargetTransformInfo;

/// This pass implements the localization mechanism described at the
/// top of this file. One specificity of the implementation is that
/// it will materialize one and only one instance of a constant per
/// basic block, thus enabling reuse of that constant within that block.
/// Moreover, it only materializes constants in blocks where they
/// are used. PHI uses are considered happening at the end of the
/// related predecessor.
class Localizer : public MachineFunctionPass {
public:
  static char ID;

private:
  /// An input function to decide if the pass should run or not
  /// on the given MachineFunction.
  std::function<bool(const MachineFunction &)> DoNotRunPass;

  /// MRI contains all the register class/bank information that this
  /// pass uses and updates.
  MachineRegisterInfo *MRI = nullptr;
  /// TTI used for getting remat costs for instructions.
  TargetTransformInfo *TTI = nullptr;

  /// Check if \p MOUse is used in the same basic block as \p Def.
  /// If the use is in the same block, we say it is local.
  /// When the use is not local, \p InsertMBB will contain the basic
  /// block when to insert \p Def to have a local use.
  static bool isLocalUse(MachineOperand &MOUse, const MachineInstr &Def,
                         MachineBasicBlock *&InsertMBB);

  /// Initialize the field members using \p MF.
  void init(MachineFunction &MF);

  typedef SmallSetVector<MachineInstr *, 32> LocalizedSetVecT;

  /// If \p Op is a phi operand and not unique in that phi, that is,
  /// there are other operands in the phi with the same register,
  /// return true.
  bool isNonUniquePhiValue(MachineOperand &Op) const;

  /// Do inter-block localization from the entry block.
  bool localizeInterBlock(MachineFunction &MF,
                          LocalizedSetVecT &LocalizedInstrs);

  /// Do intra-block localization of already localized instructions.
  bool localizeIntraBlock(LocalizedSetVecT &LocalizedInstrs);

public:
  Localizer();
  Localizer(std::function<bool(const MachineFunction &)>);

  StringRef getPassName() const override { return "Localizer"; }

  MachineFunctionProperties getRequiredProperties() const override {
    return MachineFunctionProperties()
        .set(MachineFunctionProperties::Property::IsSSA);
  }

  void getAnalysisUsage(AnalysisUsage &AU) const override;

  bool runOnMachineFunction(MachineFunction &MF) override;
};

} // End namespace llvm.

#endif
PKiwFZ,�مa�a�#CodeGen/GlobalISel/CombinerHelper.hnu�[���//===-- llvm/CodeGen/GlobalISel/CombinerHelper.h --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===--------------------------------------------------------------------===//
/// \file
/// This contains common combine transformations that may be used in a combine
/// pass,or by the target elsewhere.
/// Targets can pick individual opcode transformations from the helper or use
/// tryCombine which invokes all transformations. All of the transformations
/// return true if the MachineInstruction changed and false otherwise.
///
//===--------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_GLOBALISEL_COMBINERHELPER_H
#define LLVM_CODEGEN_GLOBALISEL_COMBINERHELPER_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/LowLevelType.h"
#include "llvm/CodeGen/Register.h"
#include "llvm/IR/InstrTypes.h"
#include <functional>

namespace llvm {

class GISelChangeObserver;
class APFloat;
class APInt;
class ConstantFP;
class GPtrAdd;
class GStore;
class GZExtLoad;
class MachineIRBuilder;
class MachineInstrBuilder;
class MachineRegisterInfo;
class MachineInstr;
class MachineOperand;
class GISelKnownBits;
class MachineDominatorTree;
class LegalizerInfo;
struct LegalityQuery;
class RegisterBank;
class RegisterBankInfo;
class TargetLowering;
class TargetRegisterInfo;

struct PreferredTuple {
  LLT Ty;                // The result type of the extend.
  unsigned ExtendOpcode; // G_ANYEXT/G_SEXT/G_ZEXT
  MachineInstr *MI;
};

struct IndexedLoadStoreMatchInfo {
  Register Addr;
  Register Base;
  Register Offset;
  bool IsPre;
};

struct PtrAddChain {
  int64_t Imm;
  Register Base;
  const RegisterBank *Bank;
};

struct RegisterImmPair {
  Register Reg;
  int64_t Imm;
};

struct ShiftOfShiftedLogic {
  MachineInstr *Logic;
  MachineInstr *Shift2;
  Register LogicNonShiftReg;
  uint64_t ValSum;
};

using BuildFnTy = std::function<void(MachineIRBuilder &)>;

using OperandBuildSteps =
    SmallVector<std::function<void(MachineInstrBuilder &)>, 4>;
struct InstructionBuildSteps {
  unsigned Opcode = 0;          /// The opcode for the produced instruction.
  OperandBuildSteps OperandFns; /// Operands to be added to the instruction.
  InstructionBuildSteps() = default;
  InstructionBuildSteps(unsigned Opcode, const OperandBuildSteps &OperandFns)
      : Opcode(Opcode), OperandFns(OperandFns) {}
};

struct InstructionStepsMatchInfo {
  /// Describes instructions to be built during a combine.
  SmallVector<InstructionBuildSteps, 2> InstrsToBuild;
  InstructionStepsMatchInfo() = default;
  InstructionStepsMatchInfo(
      std::initializer_list<InstructionBuildSteps> InstrsToBuild)
      : InstrsToBuild(InstrsToBuild) {}
};

class CombinerHelper {
protected:
  MachineIRBuilder &Builder;
  MachineRegisterInfo &MRI;
  GISelChangeObserver &Observer;
  GISelKnownBits *KB;
  MachineDominatorTree *MDT;
  bool IsPreLegalize;
  const LegalizerInfo *LI;
  const RegisterBankInfo *RBI;
  const TargetRegisterInfo *TRI;

public:
  CombinerHelper(GISelChangeObserver &Observer, MachineIRBuilder &B,
                 bool IsPreLegalize,
                 GISelKnownBits *KB = nullptr,
                 MachineDominatorTree *MDT = nullptr,
                 const LegalizerInfo *LI = nullptr);

  GISelKnownBits *getKnownBits() const {
    return KB;
  }

  MachineIRBuilder &getBuilder() const {
    return Builder;
  }

  const TargetLowering &getTargetLowering() const;

  /// \returns true if the combiner is running pre-legalization.
  bool isPreLegalize() const;

  /// \returns true if \p Query is legal on the target.
  bool isLegal(const LegalityQuery &Query) const;

  /// \return true if the combine is running prior to legalization, or if \p
  /// Query is legal on the target.
  bool isLegalOrBeforeLegalizer(const LegalityQuery &Query) const;

  /// \return true if the combine is running prior to legalization, or if \p Ty
  /// is a legal integer constant type on the target.
  bool isConstantLegalOrBeforeLegalizer(const LLT Ty) const;

  /// MachineRegisterInfo::replaceRegWith() and inform the observer of the changes
  void replaceRegWith(MachineRegisterInfo &MRI, Register FromReg, Register ToReg) const;

  /// Replace a single register operand with a new register and inform the
  /// observer of the changes.
  void replaceRegOpWith(MachineRegisterInfo &MRI, MachineOperand &FromRegOp,
                        Register ToReg) const;

  /// Replace the opcode in instruction with a new opcode and inform the
  /// observer of the changes.
  void replaceOpcodeWith(MachineInstr &FromMI, unsigned ToOpcode) const;

  /// Get the register bank of \p Reg.
  /// If Reg has not been assigned a register, a register class,
  /// or a register bank, then this returns nullptr.
  ///
  /// \pre Reg.isValid()
  const RegisterBank *getRegBank(Register Reg) const;

  /// Set the register bank of \p Reg.
  /// Does nothing if the RegBank is null.
  /// This is the counterpart to getRegBank.
  void setRegBank(Register Reg, const RegisterBank *RegBank);

  /// If \p MI is COPY, try to combine it.
  /// Returns true if MI changed.
  bool tryCombineCopy(MachineInstr &MI);
  bool matchCombineCopy(MachineInstr &MI);
  void applyCombineCopy(MachineInstr &MI);

  /// Returns true if \p DefMI precedes \p UseMI or they are the same
  /// instruction. Both must be in the same basic block.
  bool isPredecessor(const MachineInstr &DefMI, const MachineInstr &UseMI);

  /// Returns true if \p DefMI dominates \p UseMI. By definition an
  /// instruction dominates itself.
  ///
  /// If we haven't been provided with a MachineDominatorTree during
  /// construction, this function returns a conservative result that tracks just
  /// a single basic block.
  bool dominates(const MachineInstr &DefMI, const MachineInstr &UseMI);

  /// If \p MI is extend that consumes the result of a load, try to combine it.
  /// Returns true if MI changed.
  bool tryCombineExtendingLoads(MachineInstr &MI);
  bool matchCombineExtendingLoads(MachineInstr &MI, PreferredTuple &MatchInfo);
  void applyCombineExtendingLoads(MachineInstr &MI, PreferredTuple &MatchInfo);

  /// Match (and (load x), mask) -> zextload x
  bool matchCombineLoadWithAndMask(MachineInstr &MI, BuildFnTy &MatchInfo);

  /// Combine \p MI into a pre-indexed or post-indexed load/store operation if
  /// legal and the surrounding code makes it useful.
  bool tryCombineIndexedLoadStore(MachineInstr &MI);
  bool matchCombineIndexedLoadStore(MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo);
  void applyCombineIndexedLoadStore(MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo);

  bool matchSextTruncSextLoad(MachineInstr &MI);
  void applySextTruncSextLoad(MachineInstr &MI);

  /// Match sext_inreg(load p), imm -> sextload p
  bool matchSextInRegOfLoad(MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo);
  void applySextInRegOfLoad(MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo);

  /// Try to combine G_[SU]DIV and G_[SU]REM into a single G_[SU]DIVREM
  /// when their source operands are identical.
  bool matchCombineDivRem(MachineInstr &MI, MachineInstr *&OtherMI);
  void applyCombineDivRem(MachineInstr &MI, MachineInstr *&OtherMI);

  /// If a brcond's true block is not the fallthrough, make it so by inverting
  /// the condition and swapping operands.
  bool matchOptBrCondByInvertingCond(MachineInstr &MI, MachineInstr *&BrCond);
  void applyOptBrCondByInvertingCond(MachineInstr &MI, MachineInstr *&BrCond);

  /// If \p MI is G_CONCAT_VECTORS, try to combine it.
  /// Returns true if MI changed.
  /// Right now, we support:
  /// - concat_vector(undef, undef) => undef
  /// - concat_vector(build_vector(A, B), build_vector(C, D)) =>
  ///   build_vector(A, B, C, D)
  ///
  /// \pre MI.getOpcode() == G_CONCAT_VECTORS.
  bool tryCombineConcatVectors(MachineInstr &MI);
  /// Check if the G_CONCAT_VECTORS \p MI is undef or if it
  /// can be flattened into a build_vector.
  /// In the first case \p IsUndef will be true.
  /// In the second case \p Ops will contain the operands needed
  /// to produce the flattened build_vector.
  ///
  /// \pre MI.getOpcode() == G_CONCAT_VECTORS.
  bool matchCombineConcatVectors(MachineInstr &MI, bool &IsUndef,
                                 SmallVectorImpl<Register> &Ops);
  /// Replace \p MI with a flattened build_vector with \p Ops or an
  /// implicit_def if IsUndef is true.
  void applyCombineConcatVectors(MachineInstr &MI, bool IsUndef,
                                 const ArrayRef<Register> Ops);

  /// Try to combine G_SHUFFLE_VECTOR into G_CONCAT_VECTORS.
  /// Returns true if MI changed.
  ///
  /// \pre MI.getOpcode() == G_SHUFFLE_VECTOR.
  bool tryCombineShuffleVector(MachineInstr &MI);
  /// Check if the G_SHUFFLE_VECTOR \p MI can be replaced by a
  /// concat_vectors.
  /// \p Ops will contain the operands needed to produce the flattened
  /// concat_vectors.
  ///
  /// \pre MI.getOpcode() == G_SHUFFLE_VECTOR.
  bool matchCombineShuffleVector(MachineInstr &MI,
                                 SmallVectorImpl<Register> &Ops);
  /// Replace \p MI with a concat_vectors with \p Ops.
  void applyCombineShuffleVector(MachineInstr &MI,
                                 const ArrayRef<Register> Ops);

  /// Optimize memcpy intrinsics et al, e.g. constant len calls.
  /// /p MaxLen if non-zero specifies the max length of a mem libcall to inline.
  ///
  /// For example (pre-indexed):
  ///
  ///     $addr = G_PTR_ADD $base, $offset
  ///     [...]
  ///     $val = G_LOAD $addr
  ///     [...]
  ///     $whatever = COPY $addr
  ///
  /// -->
  ///
  ///     $val, $addr = G_INDEXED_LOAD $base, $offset, 1 (IsPre)
  ///     [...]
  ///     $whatever = COPY $addr
  ///
  /// or (post-indexed):
  ///
  ///     G_STORE $val, $base
  ///     [...]
  ///     $addr = G_PTR_ADD $base, $offset
  ///     [...]
  ///     $whatever = COPY $addr
  ///
  /// -->
  ///
  ///     $addr = G_INDEXED_STORE $val, $base, $offset
  ///     [...]
  ///     $whatever = COPY $addr
  bool tryCombineMemCpyFamily(MachineInstr &MI, unsigned MaxLen = 0);

  bool matchPtrAddImmedChain(MachineInstr &MI, PtrAddChain &MatchInfo);
  void applyPtrAddImmedChain(MachineInstr &MI, PtrAddChain &MatchInfo);

  /// Fold (shift (shift base, x), y) -> (shift base (x+y))
  bool matchShiftImmedChain(MachineInstr &MI, RegisterImmPair &MatchInfo);
  void applyShiftImmedChain(MachineInstr &MI, RegisterImmPair &MatchInfo);

  /// If we have a shift-by-constant of a bitwise logic op that itself has a
  /// shift-by-constant operand with identical opcode, we may be able to convert
  /// that into 2 independent shifts followed by the logic op.
  bool matchShiftOfShiftedLogic(MachineInstr &MI,
                                ShiftOfShiftedLogic &MatchInfo);
  void applyShiftOfShiftedLogic(MachineInstr &MI,
                                ShiftOfShiftedLogic &MatchInfo);

  bool matchCommuteShift(MachineInstr &MI, BuildFnTy &MatchInfo);

  /// Transform a multiply by a power-of-2 value to a left shift.
  bool matchCombineMulToShl(MachineInstr &MI, unsigned &ShiftVal);
  void applyCombineMulToShl(MachineInstr &MI, unsigned &ShiftVal);

  // Transform a G_SHL with an extended source into a narrower shift if
  // possible.
  bool matchCombineShlOfExtend(MachineInstr &MI, RegisterImmPair &MatchData);
  void applyCombineShlOfExtend(MachineInstr &MI,
                               const RegisterImmPair &MatchData);

  /// Fold away a merge of an unmerge of the corresponding values.
  bool matchCombineMergeUnmerge(MachineInstr &MI, Register &MatchInfo);

  /// Reduce a shift by a constant to an unmerge and a shift on a half sized
  /// type. This will not produce a shift smaller than \p TargetShiftSize.
  bool matchCombineShiftToUnmerge(MachineInstr &MI, unsigned TargetShiftSize,
                                 unsigned &ShiftVal);
  void applyCombineShiftToUnmerge(MachineInstr &MI, const unsigned &ShiftVal);
  bool tryCombineShiftToUnmerge(MachineInstr &MI, unsigned TargetShiftAmount);

  /// Transform <ty,...> G_UNMERGE(G_MERGE ty X, Y, Z) -> ty X, Y, Z.
  bool
  matchCombineUnmergeMergeToPlainValues(MachineInstr &MI,
                                        SmallVectorImpl<Register> &Operands);
  void
  applyCombineUnmergeMergeToPlainValues(MachineInstr &MI,
                                        SmallVectorImpl<Register> &Operands);

  /// Transform G_UNMERGE Constant -> Constant1, Constant2, ...
  bool matchCombineUnmergeConstant(MachineInstr &MI,
                                   SmallVectorImpl<APInt> &Csts);
  void applyCombineUnmergeConstant(MachineInstr &MI,
                                   SmallVectorImpl<APInt> &Csts);

  /// Transform G_UNMERGE G_IMPLICIT_DEF -> G_IMPLICIT_DEF, G_IMPLICIT_DEF, ...
  bool
  matchCombineUnmergeUndef(MachineInstr &MI,
                           std::function<void(MachineIRBuilder &)> &MatchInfo);

  /// Transform X, Y<dead> = G_UNMERGE Z -> X = G_TRUNC Z.
  bool matchCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI);
  void applyCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI);

  /// Transform X, Y = G_UNMERGE(G_ZEXT(Z)) -> X = G_ZEXT(Z); Y = G_CONSTANT 0
  bool matchCombineUnmergeZExtToZExt(MachineInstr &MI);
  void applyCombineUnmergeZExtToZExt(MachineInstr &MI);

  /// Transform fp_instr(cst) to constant result of the fp operation.
  void applyCombineConstantFoldFpUnary(MachineInstr &MI, const ConstantFP *Cst);

  /// Transform IntToPtr(PtrToInt(x)) to x if cast is in the same address space.
  bool matchCombineI2PToP2I(MachineInstr &MI, Register &Reg);
  void applyCombineI2PToP2I(MachineInstr &MI, Register &Reg);

  /// Transform PtrToInt(IntToPtr(x)) to x.
  void applyCombineP2IToI2P(MachineInstr &MI, Register &Reg);

  /// Transform G_ADD (G_PTRTOINT x), y -> G_PTRTOINT (G_PTR_ADD x, y)
  /// Transform G_ADD y, (G_PTRTOINT x) -> G_PTRTOINT (G_PTR_ADD x, y)
  bool matchCombineAddP2IToPtrAdd(MachineInstr &MI,
                                  std::pair<Register, bool> &PtrRegAndCommute);
  void applyCombineAddP2IToPtrAdd(MachineInstr &MI,
                                  std::pair<Register, bool> &PtrRegAndCommute);

  // Transform G_PTR_ADD (G_PTRTOINT C1), C2 -> C1 + C2
  bool matchCombineConstPtrAddToI2P(MachineInstr &MI, APInt &NewCst);
  void applyCombineConstPtrAddToI2P(MachineInstr &MI, APInt &NewCst);

  /// Transform anyext(trunc(x)) to x.
  bool matchCombineAnyExtTrunc(MachineInstr &MI, Register &Reg);
  void applyCombineAnyExtTrunc(MachineInstr &MI, Register &Reg);

  /// Transform zext(trunc(x)) to x.
  bool matchCombineZextTrunc(MachineInstr &MI, Register &Reg);

  /// Transform [asz]ext([asz]ext(x)) to [asz]ext x.
  bool matchCombineExtOfExt(MachineInstr &MI,
                            std::tuple<Register, unsigned> &MatchInfo);
  void applyCombineExtOfExt(MachineInstr &MI,
                            std::tuple<Register, unsigned> &MatchInfo);

  /// Transform fabs(fabs(x)) to fabs(x).
  void applyCombineFAbsOfFAbs(MachineInstr &MI, Register &Src);

  /// Transform fabs(fneg(x)) to fabs(x).
  bool matchCombineFAbsOfFNeg(MachineInstr &MI, BuildFnTy &MatchInfo);

  /// Transform trunc ([asz]ext x) to x or ([asz]ext x) or (trunc x).
  bool matchCombineTruncOfExt(MachineInstr &MI,
                              std::pair<Register, unsigned> &MatchInfo);
  void applyCombineTruncOfExt(MachineInstr &MI,
                              std::pair<Register, unsigned> &MatchInfo);

  /// Transform trunc (shl x, K) to shl (trunc x), K
  ///    if K < VT.getScalarSizeInBits().
  ///
  /// Transforms trunc ([al]shr x, K) to (trunc ([al]shr (MidVT (trunc x)), K))
  ///    if K <= (MidVT.getScalarSizeInBits() - VT.getScalarSizeInBits())
  /// MidVT is obtained by finding a legal type between the trunc's src and dst
  /// types.
  bool matchCombineTruncOfShift(MachineInstr &MI,
                                std::pair<MachineInstr *, LLT> &MatchInfo);
  void applyCombineTruncOfShift(MachineInstr &MI,
                                std::pair<MachineInstr *, LLT> &MatchInfo);

  /// Transform G_MUL(x, -1) to G_SUB(0, x)
  void applyCombineMulByNegativeOne(MachineInstr &MI);

  /// Return true if any explicit use operand on \p MI is defined by a
  /// G_IMPLICIT_DEF.
  bool matchAnyExplicitUseIsUndef(MachineInstr &MI);

  /// Return true if all register explicit use operands on \p MI are defined by
  /// a G_IMPLICIT_DEF.
  bool matchAllExplicitUsesAreUndef(MachineInstr &MI);

  /// Return true if a G_SHUFFLE_VECTOR instruction \p MI has an undef mask.
  bool matchUndefShuffleVectorMask(MachineInstr &MI);

  /// Return true if a G_STORE instruction \p MI is storing an undef value.
  bool matchUndefStore(MachineInstr &MI);

  /// Return true if a G_SELECT instruction \p MI has an undef comparison.
  bool matchUndefSelectCmp(MachineInstr &MI);

  /// Return true if a G_{EXTRACT,INSERT}_VECTOR_ELT has an out of range index.
  bool matchInsertExtractVecEltOutOfBounds(MachineInstr &MI);

  /// Return true if a G_SELECT instruction \p MI has a constant comparison. If
  /// true, \p OpIdx will store the operand index of the known selected value.
  bool matchConstantSelectCmp(MachineInstr &MI, unsigned &OpIdx);

  /// Replace an instruction with a G_FCONSTANT with value \p C.
  void replaceInstWithFConstant(MachineInstr &MI, double C);

  /// Replace an instruction with a G_CONSTANT with value \p C.
  void replaceInstWithConstant(MachineInstr &MI, int64_t C);

  /// Replace an instruction with a G_CONSTANT with value \p C.
  void replaceInstWithConstant(MachineInstr &MI, APInt C);

  /// Replace an instruction with a G_IMPLICIT_DEF.
  void replaceInstWithUndef(MachineInstr &MI);

  /// Delete \p MI and replace all of its uses with its \p OpIdx-th operand.
  void replaceSingleDefInstWithOperand(MachineInstr &MI, unsigned OpIdx);

  /// Delete \p MI and replace all of its uses with \p Replacement.
  void replaceSingleDefInstWithReg(MachineInstr &MI, Register Replacement);

  /// Return true if \p MOP1 and \p MOP2 are register operands are defined by
  /// equivalent instructions.
  bool matchEqualDefs(const MachineOperand &MOP1, const MachineOperand &MOP2);

  /// Return true if \p MOP is defined by a G_CONSTANT with a value equal to
  /// \p C.
  bool matchConstantOp(const MachineOperand &MOP, int64_t C);

  /// Optimize (cond ? x : x) -> x
  bool matchSelectSameVal(MachineInstr &MI);

  /// Optimize (x op x) -> x
  bool matchBinOpSameVal(MachineInstr &MI);

  /// Check if operand \p OpIdx is zero.
  bool matchOperandIsZero(MachineInstr &MI, unsigned OpIdx);

  /// Check if operand \p OpIdx is undef.
  bool matchOperandIsUndef(MachineInstr &MI, unsigned OpIdx);

  /// Check if operand \p OpIdx is known to be a power of 2.
  bool matchOperandIsKnownToBeAPowerOfTwo(MachineInstr &MI, unsigned OpIdx);

  /// Erase \p MI
  void eraseInst(MachineInstr &MI);

  /// Return true if MI is a G_ADD which can be simplified to a G_SUB.
  bool matchSimplifyAddToSub(MachineInstr &MI,
                             std::tuple<Register, Register> &MatchInfo);
  void applySimplifyAddToSub(MachineInstr &MI,
                             std::tuple<Register, Register> &MatchInfo);

  /// Match (logic_op (op x...), (op y...)) -> (op (logic_op x, y))
  bool
  matchHoistLogicOpWithSameOpcodeHands(MachineInstr &MI,
                                       InstructionStepsMatchInfo &MatchInfo);

  /// Replace \p MI with a series of instructions described in \p MatchInfo.
  void applyBuildInstructionSteps(MachineInstr &MI,
                                  InstructionStepsMatchInfo &MatchInfo);

  /// Match ashr (shl x, C), C -> sext_inreg (C)
  bool matchAshrShlToSextInreg(MachineInstr &MI,
                               std::tuple<Register, int64_t> &MatchInfo);
  void applyAshShlToSextInreg(MachineInstr &MI,
                              std::tuple<Register, int64_t> &MatchInfo);

  /// Fold and(and(x, C1), C2) -> C1&C2 ? and(x, C1&C2) : 0
  bool matchOverlappingAnd(MachineInstr &MI,
                           BuildFnTy &MatchInfo);

  /// \return true if \p MI is a G_AND instruction whose operands are x and y
  /// where x & y == x or x & y == y. (E.g., one of operands is all-ones value.)
  ///
  /// \param [in] MI - The G_AND instruction.
  /// \param [out] Replacement - A register the G_AND should be replaced with on
  /// success.
  bool matchRedundantAnd(MachineInstr &MI, Register &Replacement);

  /// \return true if \p MI is a G_OR instruction whose operands are x and y
  /// where x | y == x or x | y == y. (E.g., one of operands is all-zeros
  /// value.)
  ///
  /// \param [in] MI - The G_OR instruction.
  /// \param [out] Replacement - A register the G_OR should be replaced with on
  /// success.
  bool matchRedundantOr(MachineInstr &MI, Register &Replacement);

  /// \return true if \p MI is a G_SEXT_INREG that can be erased.
  bool matchRedundantSExtInReg(MachineInstr &MI);

  /// Combine inverting a result of a compare into the opposite cond code.
  bool matchNotCmp(MachineInstr &MI, SmallVectorImpl<Register> &RegsToNegate);
  void applyNotCmp(MachineInstr &MI, SmallVectorImpl<Register> &RegsToNegate);

  /// Fold (xor (and x, y), y) -> (and (not x), y)
  ///{
  bool matchXorOfAndWithSameReg(MachineInstr &MI,
                                std::pair<Register, Register> &MatchInfo);
  void applyXorOfAndWithSameReg(MachineInstr &MI,
                                std::pair<Register, Register> &MatchInfo);
  ///}

  /// Combine G_PTR_ADD with nullptr to G_INTTOPTR
  bool matchPtrAddZero(MachineInstr &MI);
  void applyPtrAddZero(MachineInstr &MI);

  /// Combine G_UREM x, (known power of 2) to an add and bitmasking.
  void applySimplifyURemByPow2(MachineInstr &MI);

  /// Push a binary operator through a select on constants.
  ///
  /// binop (select cond, K0, K1), K2 ->
  ///   select cond, (binop K0, K2), (binop K1, K2)
  bool matchFoldBinOpIntoSelect(MachineInstr &MI, unsigned &SelectOpNo);
  void applyFoldBinOpIntoSelect(MachineInstr &MI, const unsigned &SelectOpNo);

  bool matchCombineInsertVecElts(MachineInstr &MI,
                                 SmallVectorImpl<Register> &MatchInfo);

  void applyCombineInsertVecElts(MachineInstr &MI,
                             SmallVectorImpl<Register> &MatchInfo);

  /// Match expression trees of the form
  ///
  /// \code
  ///  sN *a = ...
  ///  sM val = a[0] | (a[1] << N) | (a[2] << 2N) | (a[3] << 3N) ...
  /// \endcode
  ///
  /// And check if the tree can be replaced with a M-bit load + possibly a
  /// bswap.
  bool matchLoadOrCombine(MachineInstr &MI, BuildFnTy &MatchInfo);

  bool matchExtendThroughPhis(MachineInstr &MI, MachineInstr *&ExtMI);
  void applyExtendThroughPhis(MachineInstr &MI, MachineInstr *&ExtMI);

  bool matchExtractVecEltBuildVec(MachineInstr &MI, Register &Reg);
  void applyExtractVecEltBuildVec(MachineInstr &MI, Register &Reg);

  bool matchExtractAllEltsFromBuildVector(
      MachineInstr &MI,
      SmallVectorImpl<std::pair<Register, MachineInstr *>> &MatchInfo);
  void applyExtractAllEltsFromBuildVector(
      MachineInstr &MI,
      SmallVectorImpl<std::pair<Register, MachineInstr *>> &MatchInfo);

  /// Use a function which takes in a MachineIRBuilder to perform a combine.
  /// By default, it erases the instruction \p MI from the function.
  void applyBuildFn(MachineInstr &MI, BuildFnTy &MatchInfo);
  /// Use a function which takes in a MachineIRBuilder to perform a combine.
  /// This variant does not erase \p MI after calling the build function.
  void applyBuildFnNoErase(MachineInstr &MI, BuildFnTy &MatchInfo);

  bool matchOrShiftToFunnelShift(MachineInstr &MI, BuildFnTy &MatchInfo);
  bool matchFunnelShiftToRotate(MachineInstr &MI);
  void applyFunnelShiftToRotate(MachineInstr &MI);
  bool matchRotateOutOfRange(MachineInstr &MI);
  void applyRotateOutOfRange(MachineInstr &MI);

  /// \returns true if a G_ICMP instruction \p MI can be replaced with a true
  /// or false constant based off of KnownBits information.
  bool matchICmpToTrueFalseKnownBits(MachineInstr &MI, int64_t &MatchInfo);

  /// \returns true if a G_ICMP \p MI can be replaced with its LHS based off of
  /// KnownBits information.
  bool
  matchICmpToLHSKnownBits(MachineInstr &MI,
                          BuildFnTy &MatchInfo);

  /// \returns true if (and (or x, c1), c2) can be replaced with (and x, c2)
  bool matchAndOrDisjointMask(MachineInstr &MI, BuildFnTy &MatchInfo);

  bool matchBitfieldExtractFromSExtInReg(MachineInstr &MI,
                                         BuildFnTy &MatchInfo);
  /// Match: and (lshr x, cst), mask -> ubfx x, cst, width
  bool matchBitfieldExtractFromAnd(MachineInstr &MI, BuildFnTy &MatchInfo);

  /// Match: shr (shl x, n), k -> sbfx/ubfx x, pos, width
  bool matchBitfieldExtractFromShr(MachineInstr &MI, BuildFnTy &MatchInfo);

  /// Match: shr (and x, n), k -> ubfx x, pos, width
  bool matchBitfieldExtractFromShrAnd(MachineInstr &MI, BuildFnTy &MatchInfo);

  // Helpers for reassociation:
  bool matchReassocConstantInnerRHS(GPtrAdd &MI, MachineInstr *RHS,
                                    BuildFnTy &MatchInfo);
  bool matchReassocFoldConstantsInSubTree(GPtrAdd &MI, MachineInstr *LHS,
                                          MachineInstr *RHS,
                                          BuildFnTy &MatchInfo);
  bool matchReassocConstantInnerLHS(GPtrAdd &MI, MachineInstr *LHS,
                                    MachineInstr *RHS, BuildFnTy &MatchInfo);
  /// Reassociate pointer calculations with G_ADD involved, to allow better
  /// addressing mode usage.
  bool matchReassocPtrAdd(MachineInstr &MI, BuildFnTy &MatchInfo);

  /// Try to reassociate to reassociate operands of a commutative binop.
  bool tryReassocBinOp(unsigned Opc, Register DstReg, Register Op0,
                       Register Op1, BuildFnTy &MatchInfo);
  /// Reassociate commutative binary operations like G_ADD.
  bool matchReassocCommBinOp(MachineInstr &MI, BuildFnTy &MatchInfo);

  /// Do constant folding when opportunities are exposed after MIR building.
  bool matchConstantFold(MachineInstr &MI, APInt &MatchInfo);

  /// \returns true if it is possible to narrow the width of a scalar binop
  /// feeding a G_AND instruction \p MI.
  bool matchNarrowBinopFeedingAnd(MachineInstr &MI, BuildFnTy &MatchInfo);

  /// Given an G_UDIV \p MI expressing a divide by constant, return an
  /// expression that implements it by multiplying by a magic number.
  /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide".
  MachineInstr *buildUDivUsingMul(MachineInstr &MI);
  /// Combine G_UDIV by constant into a multiply by magic constant.
  bool matchUDivByConst(MachineInstr &MI);
  void applyUDivByConst(MachineInstr &MI);

  /// Given an G_SDIV \p MI expressing a signed divide by constant, return an
  /// expression that implements it by multiplying by a magic number.
  /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide".
  MachineInstr *buildSDivUsingMul(MachineInstr &MI);
  bool matchSDivByConst(MachineInstr &MI);
  void applySDivByConst(MachineInstr &MI);

  // G_UMULH x, (1 << c)) -> x >> (bitwidth - c)
  bool matchUMulHToLShr(MachineInstr &MI);
  void applyUMulHToLShr(MachineInstr &MI);

  /// Try to transform \p MI by using all of the above
  /// combine functions. Returns true if changed.
  bool tryCombine(MachineInstr &MI);

  /// Emit loads and stores that perform the given memcpy.
  /// Assumes \p MI is a G_MEMCPY_INLINE
  /// TODO: implement dynamically sized inline memcpy,
  ///       and rename: s/bool tryEmit/void emit/
  bool tryEmitMemcpyInline(MachineInstr &MI);

  /// Match:
  ///   (G_UMULO x, 2) -> (G_UADDO x, x)
  ///   (G_SMULO x, 2) -> (G_SADDO x, x)
  bool matchMulOBy2(MachineInstr &MI, BuildFnTy &MatchInfo);

  /// Match:
  /// (G_*MULO x, 0) -> 0 + no carry out
  bool matchMulOBy0(MachineInstr &MI, BuildFnTy &MatchInfo);

  /// Match:
  /// (G_*ADDO x, 0) -> x + no carry out
  bool matchAddOBy0(MachineInstr &MI, BuildFnTy &MatchInfo);

  /// Match:
  /// (G_*ADDE x, y, 0) -> (G_*ADDO x, y)
  /// (G_*SUBE x, y, 0) -> (G_*SUBO x, y)
  bool matchAddEToAddO(MachineInstr &MI, BuildFnTy &MatchInfo);

  /// Transform (fadd x, fneg(y)) -> (fsub x, y)
  ///           (fadd fneg(x), y) -> (fsub y, x)
  ///           (fsub x, fneg(y)) -> (fadd x, y)
  ///           (fmul fneg(x), fneg(y)) -> (fmul x, y)
  ///           (fdiv fneg(x), fneg(y)) -> (fdiv x, y)
  ///           (fmad fneg(x), fneg(y), z) -> (fmad x, y, z)
  ///           (fma fneg(x), fneg(y), z) -> (fma x, y, z)
  bool matchRedundantNegOperands(MachineInstr &MI, BuildFnTy &MatchInfo);

  bool matchFsubToFneg(MachineInstr &MI, Register &MatchInfo);
  void applyFsubToFneg(MachineInstr &MI, Register &MatchInfo);

  bool canCombineFMadOrFMA(MachineInstr &MI, bool &AllowFusionGlobally,
                           bool &HasFMAD, bool &Aggressive,
                           bool CanReassociate = false);

  /// Transform (fadd (fmul x, y), z) -> (fma x, y, z)
  ///           (fadd (fmul x, y), z) -> (fmad x, y, z)
  bool matchCombineFAddFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo);

  /// Transform (fadd (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), z)
  ///           (fadd (fpext (fmul x, y)), z) -> (fmad (fpext x), (fpext y), z)
  bool matchCombineFAddFpExtFMulToFMadOrFMA(MachineInstr &MI,
                                            BuildFnTy &MatchInfo);

  /// Transform (fadd (fma x, y, (fmul u, v)), z) -> (fma x, y, (fma u, v, z))
  ///          (fadd (fmad x, y, (fmul u, v)), z) -> (fmad x, y, (fmad u, v, z))
  bool matchCombineFAddFMAFMulToFMadOrFMA(MachineInstr &MI,
                                          BuildFnTy &MatchInfo);

  // Transform (fadd (fma x, y, (fpext (fmul u, v))), z)
  //            -> (fma x, y, (fma (fpext u), (fpext v), z))
  //           (fadd (fmad x, y, (fpext (fmul u, v))), z)
  //            -> (fmad x, y, (fmad (fpext u), (fpext v), z))
  bool matchCombineFAddFpExtFMulToFMadOrFMAAggressive(MachineInstr &MI,
                                                      BuildFnTy &MatchInfo);

  /// Transform (fsub (fmul x, y), z) -> (fma x, y, -z)
  ///           (fsub (fmul x, y), z) -> (fmad x, y, -z)
  bool matchCombineFSubFMulToFMadOrFMA(MachineInstr &MI, BuildFnTy &MatchInfo);

  /// Transform (fsub (fneg (fmul, x, y)), z) -> (fma (fneg x), y, (fneg z))
  ///           (fsub (fneg (fmul, x, y)), z) -> (fmad (fneg x), y, (fneg z))
  bool matchCombineFSubFNegFMulToFMadOrFMA(MachineInstr &MI,
                                           BuildFnTy &MatchInfo);

  /// Transform (fsub (fpext (fmul x, y)), z)
  ///           -> (fma (fpext x), (fpext y), (fneg z))
  ///           (fsub (fpext (fmul x, y)), z)
  ///           -> (fmad (fpext x), (fpext y), (fneg z))
  bool matchCombineFSubFpExtFMulToFMadOrFMA(MachineInstr &MI,
                                            BuildFnTy &MatchInfo);

  /// Transform (fsub (fpext (fneg (fmul x, y))), z)
  ///           -> (fneg (fma (fpext x), (fpext y), z))
  ///           (fsub (fpext (fneg (fmul x, y))), z)
  ///           -> (fneg (fmad (fpext x), (fpext y), z))
  bool matchCombineFSubFpExtFNegFMulToFMadOrFMA(MachineInstr &MI,
                                                BuildFnTy &MatchInfo);

  /// Fold boolean selects to logical operations.
  bool matchSelectToLogical(MachineInstr &MI, BuildFnTy &MatchInfo);

  bool matchCombineFMinMaxNaN(MachineInstr &MI, unsigned &Info);

  /// Transform G_ADD(x, G_SUB(y, x)) to y.
  /// Transform G_ADD(G_SUB(y, x), x) to y.
  bool matchAddSubSameReg(MachineInstr &MI, Register &Src);

  bool matchBuildVectorIdentityFold(MachineInstr &MI, Register &MatchInfo);
  bool matchTruncBuildVectorFold(MachineInstr &MI, Register &MatchInfo);
  bool matchTruncLshrBuildVectorFold(MachineInstr &MI, Register &MatchInfo);

  /// Transform:
  ///   (x + y) - y -> x
  ///   (x + y) - x -> y
  ///   x - (y + x) -> 0 - y
  ///   x - (x + z) -> 0 - z
  bool matchSubAddSameReg(MachineInstr &MI, BuildFnTy &MatchInfo);

  /// \returns true if it is possible to simplify a select instruction \p MI
  /// to a min/max instruction of some sort.
  bool matchSimplifySelectToMinMax(MachineInstr &MI, BuildFnTy &MatchInfo);

  /// Transform:
  ///   (X + Y) == X -> Y == 0
  ///   (X - Y) == X -> Y == 0
  ///   (X ^ Y) == X -> Y == 0
  ///   (X + Y) != X -> Y != 0
  ///   (X - Y) != X -> Y != 0
  ///   (X ^ Y) != X -> Y != 0
  bool matchRedundantBinOpInEquality(MachineInstr &MI, BuildFnTy &MatchInfo);

  /// Match shifts greater or equal to the bitwidth of the operation.
  bool matchShiftsTooBig(MachineInstr &MI);

private:
  /// Given a non-indexed load or store instruction \p MI, find an offset that
  /// can be usefully and legally folded into it as a post-indexing operation.
  ///
  /// \returns true if a candidate is found.
  bool findPostIndexCandidate(MachineInstr &MI, Register &Addr, Register &Base,
                              Register &Offset);

  /// Given a non-indexed load or store instruction \p MI, find an offset that
  /// can be usefully and legally folded into it as a pre-indexing operation.
  ///
  /// \returns true if a candidate is found.
  bool findPreIndexCandidate(MachineInstr &MI, Register &Addr, Register &Base,
                             Register &Offset);

  /// Helper function for matchLoadOrCombine. Searches for Registers
  /// which may have been produced by a load instruction + some arithmetic.
  ///
  /// \param [in] Root - The search root.
  ///
  /// \returns The Registers found during the search.
  std::optional<SmallVector<Register, 8>>
  findCandidatesForLoadOrCombine(const MachineInstr *Root) const;

  /// Helper function for matchLoadOrCombine.
  ///
  /// Checks if every register in \p RegsToVisit is defined by a load
  /// instruction + some arithmetic.
  ///
  /// \param [out] MemOffset2Idx - Maps the byte positions each load ends up
  /// at to the index of the load.
  /// \param [in] MemSizeInBits - The number of bits each load should produce.
  ///
  /// \returns On success, a 3-tuple containing lowest-index load found, the
  /// lowest index, and the last load in the sequence.
  std::optional<std::tuple<GZExtLoad *, int64_t, GZExtLoad *>>
  findLoadOffsetsForLoadOrCombine(
      SmallDenseMap<int64_t, int64_t, 8> &MemOffset2Idx,
      const SmallVector<Register, 8> &RegsToVisit,
      const unsigned MemSizeInBits);

  /// Examines the G_PTR_ADD instruction \p PtrAdd and determines if performing
  /// a re-association of its operands would break an existing legal addressing
  /// mode that the address computation currently represents.
  bool reassociationCanBreakAddressingModePattern(MachineInstr &PtrAdd);

  /// Behavior when a floating point min/max is given one NaN and one
  /// non-NaN as input.
  enum class SelectPatternNaNBehaviour {
    NOT_APPLICABLE = 0, /// NaN behavior not applicable.
    RETURNS_NAN,        /// Given one NaN input, returns the NaN.
    RETURNS_OTHER,      /// Given one NaN input, returns the non-NaN.
    RETURNS_ANY /// Given one NaN input, can return either (or both operands are
                /// known non-NaN.)
  };

  /// \returns which of \p LHS and \p RHS would be the result of a non-equality
  /// floating point comparison where one of \p LHS and \p RHS may be NaN.
  ///
  /// If both \p LHS and \p RHS may be NaN, returns
  /// SelectPatternNaNBehaviour::NOT_APPLICABLE.
  SelectPatternNaNBehaviour
  computeRetValAgainstNaN(Register LHS, Register RHS,
                          bool IsOrderedComparison) const;

  /// Determines the floating point min/max opcode which should be used for
  /// a G_SELECT fed by a G_FCMP with predicate \p Pred.
  ///
  /// \returns 0 if this G_SELECT should not be combined to a floating point
  /// min or max. If it should be combined, returns one of
  ///
  /// * G_FMAXNUM
  /// * G_FMAXIMUM
  /// * G_FMINNUM
  /// * G_FMINIMUM
  ///
  /// Helper function for matchFPSelectToMinMax.
  unsigned getFPMinMaxOpcForSelect(CmpInst::Predicate Pred, LLT DstTy,
                                   SelectPatternNaNBehaviour VsNaNRetVal) const;

  /// Handle floating point cases for matchSimplifySelectToMinMax.
  ///
  /// E.g.
  ///
  /// select (fcmp uge x, 1.0) x, 1.0 -> fmax x, 1.0
  /// select (fcmp uge x, 1.0) 1.0, x -> fminnm x, 1.0
  bool matchFPSelectToMinMax(Register Dst, Register Cond, Register TrueVal,
                             Register FalseVal, BuildFnTy &MatchInfo);
};
} // namespace llvm

#endif
PKiwFZr�+
`
`CodeGen/GlobalISel/Utils.hnu�[���//==-- llvm/CodeGen/GlobalISel/Utils.h ---------------------------*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file This file declares the API of helper functions used throughout the
/// GlobalISel pipeline.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_GLOBALISEL_UTILS_H
#define LLVM_CODEGEN_GLOBALISEL_UTILS_H

#include "GISelWorkList.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/CodeGen/LowLevelType.h"
#include "llvm/CodeGen/Register.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/Support/Alignment.h"
#include "llvm/Support/Casting.h"
#include <cstdint>

namespace llvm {

class AnalysisUsage;
class LostDebugLocObserver;
class MachineBasicBlock;
class BlockFrequencyInfo;
class GISelKnownBits;
class MachineFunction;
class MachineInstr;
class MachineOperand;
class MachineOptimizationRemarkEmitter;
class MachineOptimizationRemarkMissed;
struct MachinePointerInfo;
class MachineRegisterInfo;
class MCInstrDesc;
class ProfileSummaryInfo;
class RegisterBankInfo;
class TargetInstrInfo;
class TargetLowering;
class TargetPassConfig;
class TargetRegisterInfo;
class TargetRegisterClass;
class ConstantFP;
class APFloat;

// Convenience macros for dealing with vector reduction opcodes.
#define GISEL_VECREDUCE_CASES_ALL                                              \
  case TargetOpcode::G_VECREDUCE_SEQ_FADD:                                     \
  case TargetOpcode::G_VECREDUCE_SEQ_FMUL:                                     \
  case TargetOpcode::G_VECREDUCE_FADD:                                         \
  case TargetOpcode::G_VECREDUCE_FMUL:                                         \
  case TargetOpcode::G_VECREDUCE_FMAX:                                         \
  case TargetOpcode::G_VECREDUCE_FMIN:                                         \
  case TargetOpcode::G_VECREDUCE_ADD:                                          \
  case TargetOpcode::G_VECREDUCE_MUL:                                          \
  case TargetOpcode::G_VECREDUCE_AND:                                          \
  case TargetOpcode::G_VECREDUCE_OR:                                           \
  case TargetOpcode::G_VECREDUCE_XOR:                                          \
  case TargetOpcode::G_VECREDUCE_SMAX:                                         \
  case TargetOpcode::G_VECREDUCE_SMIN:                                         \
  case TargetOpcode::G_VECREDUCE_UMAX:                                         \
  case TargetOpcode::G_VECREDUCE_UMIN:

#define GISEL_VECREDUCE_CASES_NONSEQ                                           \
  case TargetOpcode::G_VECREDUCE_FADD:                                         \
  case TargetOpcode::G_VECREDUCE_FMUL:                                         \
  case TargetOpcode::G_VECREDUCE_FMAX:                                         \
  case TargetOpcode::G_VECREDUCE_FMIN:                                         \
  case TargetOpcode::G_VECREDUCE_ADD:                                          \
  case TargetOpcode::G_VECREDUCE_MUL:                                          \
  case TargetOpcode::G_VECREDUCE_AND:                                          \
  case TargetOpcode::G_VECREDUCE_OR:                                           \
  case TargetOpcode::G_VECREDUCE_XOR:                                          \
  case TargetOpcode::G_VECREDUCE_SMAX:                                         \
  case TargetOpcode::G_VECREDUCE_SMIN:                                         \
  case TargetOpcode::G_VECREDUCE_UMAX:                                         \
  case TargetOpcode::G_VECREDUCE_UMIN:

/// Try to constrain Reg to the specified register class. If this fails,
/// create a new virtual register in the correct class.
///
/// \return The virtual register constrained to the right register class.
Register constrainRegToClass(MachineRegisterInfo &MRI,
                             const TargetInstrInfo &TII,
                             const RegisterBankInfo &RBI, Register Reg,
                             const TargetRegisterClass &RegClass);

/// Constrain the Register operand OpIdx, so that it is now constrained to the
/// TargetRegisterClass passed as an argument (RegClass).
/// If this fails, create a new virtual register in the correct class and insert
/// a COPY before \p InsertPt if it is a use or after if it is a definition.
/// In both cases, the function also updates the register of RegMo. The debug
/// location of \p InsertPt is used for the new copy.
///
/// \return The virtual register constrained to the right register class.
Register constrainOperandRegClass(const MachineFunction &MF,
                                  const TargetRegisterInfo &TRI,
                                  MachineRegisterInfo &MRI,
                                  const TargetInstrInfo &TII,
                                  const RegisterBankInfo &RBI,
                                  MachineInstr &InsertPt,
                                  const TargetRegisterClass &RegClass,
                                  MachineOperand &RegMO);

/// Try to constrain Reg so that it is usable by argument OpIdx of the provided
/// MCInstrDesc \p II. If this fails, create a new virtual register in the
/// correct class and insert a COPY before \p InsertPt if it is a use or after
/// if it is a definition. In both cases, the function also updates the register
/// of RegMo.
/// This is equivalent to constrainOperandRegClass(..., RegClass, ...)
/// with RegClass obtained from the MCInstrDesc. The debug location of \p
/// InsertPt is used for the new copy.
///
/// \return The virtual register constrained to the right register class.
Register constrainOperandRegClass(const MachineFunction &MF,
                                  const TargetRegisterInfo &TRI,
                                  MachineRegisterInfo &MRI,
                                  const TargetInstrInfo &TII,
                                  const RegisterBankInfo &RBI,
                                  MachineInstr &InsertPt, const MCInstrDesc &II,
                                  MachineOperand &RegMO, unsigned OpIdx);

/// Mutate the newly-selected instruction \p I to constrain its (possibly
/// generic) virtual register operands to the instruction's register class.
/// This could involve inserting COPYs before (for uses) or after (for defs).
/// This requires the number of operands to match the instruction description.
/// \returns whether operand regclass constraining succeeded.
///
// FIXME: Not all instructions have the same number of operands. We should
// probably expose a constrain helper per operand and let the target selector
// constrain individual registers, like fast-isel.
bool constrainSelectedInstRegOperands(MachineInstr &I,
                                      const TargetInstrInfo &TII,
                                      const TargetRegisterInfo &TRI,
                                      const RegisterBankInfo &RBI);

/// Check if DstReg can be replaced with SrcReg depending on the register
/// constraints.
bool canReplaceReg(Register DstReg, Register SrcReg, MachineRegisterInfo &MRI);

/// Check whether an instruction \p MI is dead: it only defines dead virtual
/// registers, and doesn't have other side effects.
bool isTriviallyDead(const MachineInstr &MI, const MachineRegisterInfo &MRI);

/// Report an ISel error as a missed optimization remark to the LLVMContext's
/// diagnostic stream.  Set the FailedISel MachineFunction property.
void reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
                        MachineOptimizationRemarkEmitter &MORE,
                        MachineOptimizationRemarkMissed &R);

void reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
                        MachineOptimizationRemarkEmitter &MORE,
                        const char *PassName, StringRef Msg,
                        const MachineInstr &MI);

/// Report an ISel warning as a missed optimization remark to the LLVMContext's
/// diagnostic stream.
void reportGISelWarning(MachineFunction &MF, const TargetPassConfig &TPC,
                        MachineOptimizationRemarkEmitter &MORE,
                        MachineOptimizationRemarkMissed &R);

/// If \p VReg is defined by a G_CONSTANT, return the corresponding value.
std::optional<APInt> getIConstantVRegVal(Register VReg,
                                         const MachineRegisterInfo &MRI);

/// If \p VReg is defined by a G_CONSTANT fits in int64_t returns it.
std::optional<int64_t> getIConstantVRegSExtVal(Register VReg,
                                               const MachineRegisterInfo &MRI);

/// Simple struct used to hold a constant integer value and a virtual
/// register.
struct ValueAndVReg {
  APInt Value;
  Register VReg;
};

/// If \p VReg is defined by a statically evaluable chain of instructions rooted
/// on a G_CONSTANT returns its APInt value and def register.
std::optional<ValueAndVReg>
getIConstantVRegValWithLookThrough(Register VReg,
                                   const MachineRegisterInfo &MRI,
                                   bool LookThroughInstrs = true);

/// If \p VReg is defined by a statically evaluable chain of instructions rooted
/// on a G_CONSTANT or G_FCONSTANT returns its value as APInt and def register.
std::optional<ValueAndVReg> getAnyConstantVRegValWithLookThrough(
    Register VReg, const MachineRegisterInfo &MRI,
    bool LookThroughInstrs = true, bool LookThroughAnyExt = false);

struct FPValueAndVReg {
  APFloat Value;
  Register VReg;
};

/// If \p VReg is defined by a statically evaluable chain of instructions rooted
/// on a G_FCONSTANT returns its APFloat value and def register.
std::optional<FPValueAndVReg>
getFConstantVRegValWithLookThrough(Register VReg,
                                   const MachineRegisterInfo &MRI,
                                   bool LookThroughInstrs = true);

const ConstantFP* getConstantFPVRegVal(Register VReg,
                                       const MachineRegisterInfo &MRI);

/// See if Reg is defined by an single def instruction that is
/// Opcode. Also try to do trivial folding if it's a COPY with
/// same types. Returns null otherwise.
MachineInstr *getOpcodeDef(unsigned Opcode, Register Reg,
                           const MachineRegisterInfo &MRI);

/// Simple struct used to hold a Register value and the instruction which
/// defines it.
struct DefinitionAndSourceRegister {
  MachineInstr *MI;
  Register Reg;
};

/// Find the def instruction for \p Reg, and underlying value Register folding
/// away any copies.
///
/// Also walks through hints such as G_ASSERT_ZEXT.
std::optional<DefinitionAndSourceRegister>
getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI);

/// Find the def instruction for \p Reg, folding away any trivial copies. May
/// return nullptr if \p Reg is not a generic virtual register.
///
/// Also walks through hints such as G_ASSERT_ZEXT.
MachineInstr *getDefIgnoringCopies(Register Reg,
                                   const MachineRegisterInfo &MRI);

/// Find the source register for \p Reg, folding away any trivial copies. It
/// will be an output register of the instruction that getDefIgnoringCopies
/// returns. May return an invalid register if \p Reg is not a generic virtual
/// register.
///
/// Also walks through hints such as G_ASSERT_ZEXT.
Register getSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI);

// Templated variant of getOpcodeDef returning a MachineInstr derived T.
/// See if Reg is defined by an single def instruction of type T
/// Also try to do trivial folding if it's a COPY with
/// same types. Returns null otherwise.
template <class T>
T *getOpcodeDef(Register Reg, const MachineRegisterInfo &MRI) {
  MachineInstr *DefMI = getDefIgnoringCopies(Reg, MRI);
  return dyn_cast_or_null<T>(DefMI);
}

/// Returns an APFloat from Val converted to the appropriate size.
APFloat getAPFloatFromSize(double Val, unsigned Size);

/// Modify analysis usage so it preserves passes required for the SelectionDAG
/// fallback.
void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU);

std::optional<APInt> ConstantFoldBinOp(unsigned Opcode, const Register Op1,
                                       const Register Op2,
                                       const MachineRegisterInfo &MRI);
std::optional<APFloat> ConstantFoldFPBinOp(unsigned Opcode, const Register Op1,
                                           const Register Op2,
                                           const MachineRegisterInfo &MRI);

/// Tries to constant fold a vector binop with sources \p Op1 and \p Op2.
/// Returns an empty vector on failure.
SmallVector<APInt> ConstantFoldVectorBinop(unsigned Opcode, const Register Op1,
                                           const Register Op2,
                                           const MachineRegisterInfo &MRI);

std::optional<APInt> ConstantFoldExtOp(unsigned Opcode, const Register Op1,
                                       uint64_t Imm,
                                       const MachineRegisterInfo &MRI);

std::optional<APFloat> ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy,
                                              Register Src,
                                              const MachineRegisterInfo &MRI);

/// Tries to constant fold a G_CTLZ operation on \p Src. If \p Src is a vector
/// then it tries to do an element-wise constant fold.
std::optional<SmallVector<unsigned>>
ConstantFoldCTLZ(Register Src, const MachineRegisterInfo &MRI);

/// Test if the given value is known to have exactly one bit set. This differs
/// from computeKnownBits in that it doesn't necessarily determine which bit is
/// set.
bool isKnownToBeAPowerOfTwo(Register Val, const MachineRegisterInfo &MRI,
                            GISelKnownBits *KnownBits = nullptr);

/// Returns true if \p Val can be assumed to never be a NaN. If \p SNaN is true,
/// this returns if \p Val can be assumed to never be a signaling NaN.
bool isKnownNeverNaN(Register Val, const MachineRegisterInfo &MRI,
                     bool SNaN = false);

/// Returns true if \p Val can be assumed to never be a signaling NaN.
inline bool isKnownNeverSNaN(Register Val, const MachineRegisterInfo &MRI) {
  return isKnownNeverNaN(Val, MRI, true);
}

Align inferAlignFromPtrInfo(MachineFunction &MF, const MachinePointerInfo &MPO);

/// Return a virtual register corresponding to the incoming argument register \p
/// PhysReg. This register is expected to have class \p RC, and optional type \p
/// RegTy. This assumes all references to the register will use the same type.
///
/// If there is an existing live-in argument register, it will be returned.
/// This will also ensure there is a valid copy
Register getFunctionLiveInPhysReg(MachineFunction &MF,
                                  const TargetInstrInfo &TII,
                                  MCRegister PhysReg,
                                  const TargetRegisterClass &RC,
                                  const DebugLoc &DL, LLT RegTy = LLT());

/// Return the least common multiple type of \p OrigTy and \p TargetTy, by changing the
/// number of vector elements or scalar bitwidth. The intent is a
/// G_MERGE_VALUES, G_BUILD_VECTOR, or G_CONCAT_VECTORS can be constructed from
/// \p OrigTy elements, and unmerged into \p TargetTy
LLVM_READNONE
LLT getLCMType(LLT OrigTy, LLT TargetTy);

LLVM_READNONE
/// Return smallest type that covers both \p OrigTy and \p TargetTy and is
/// multiple of TargetTy.
LLT getCoverTy(LLT OrigTy, LLT TargetTy);

/// Return a type where the total size is the greatest common divisor of \p
/// OrigTy and \p TargetTy. This will try to either change the number of vector
/// elements, or bitwidth of scalars. The intent is the result type can be used
/// as the result of a G_UNMERGE_VALUES from \p OrigTy, and then some
/// combination of G_MERGE_VALUES, G_BUILD_VECTOR and G_CONCAT_VECTORS (possibly
/// with intermediate casts) can re-form \p TargetTy.
///
/// If these are vectors with different element types, this will try to produce
/// a vector with a compatible total size, but the element type of \p OrigTy. If
/// this can't be satisfied, this will produce a scalar smaller than the
/// original vector elements.
///
/// In the worst case, this returns LLT::scalar(1)
LLVM_READNONE
LLT getGCDType(LLT OrigTy, LLT TargetTy);

/// Represents a value which can be a Register or a constant.
///
/// This is useful in situations where an instruction may have an interesting
/// register operand or interesting constant operand. For a concrete example,
/// \see getVectorSplat.
class RegOrConstant {
  int64_t Cst;
  Register Reg;
  bool IsReg;

public:
  explicit RegOrConstant(Register Reg) : Reg(Reg), IsReg(true) {}
  explicit RegOrConstant(int64_t Cst) : Cst(Cst), IsReg(false) {}
  bool isReg() const { return IsReg; }
  bool isCst() const { return !IsReg; }
  Register getReg() const {
    assert(isReg() && "Expected a register!");
    return Reg;
  }
  int64_t getCst() const {
    assert(isCst() && "Expected a constant!");
    return Cst;
  }
};

/// \returns The splat index of a G_SHUFFLE_VECTOR \p MI when \p MI is a splat.
/// If \p MI is not a splat, returns std::nullopt.
std::optional<int> getSplatIndex(MachineInstr &MI);

/// \returns the scalar integral splat value of \p Reg if possible.
std::optional<APInt> getIConstantSplatVal(const Register Reg,
                                          const MachineRegisterInfo &MRI);

/// \returns the scalar integral splat value defined by \p MI if possible.
std::optional<APInt> getIConstantSplatVal(const MachineInstr &MI,
                                          const MachineRegisterInfo &MRI);

/// \returns the scalar sign extended integral splat value of \p Reg if
/// possible.
std::optional<int64_t> getIConstantSplatSExtVal(const Register Reg,
                                                const MachineRegisterInfo &MRI);

/// \returns the scalar sign extended integral splat value defined by \p MI if
/// possible.
std::optional<int64_t> getIConstantSplatSExtVal(const MachineInstr &MI,
                                                const MachineRegisterInfo &MRI);

/// Returns a floating point scalar constant of a build vector splat if it
/// exists. When \p AllowUndef == true some elements can be undef but not all.
std::optional<FPValueAndVReg> getFConstantSplat(Register VReg,
                                                const MachineRegisterInfo &MRI,
                                                bool AllowUndef = true);

/// Return true if the specified register is defined by G_BUILD_VECTOR or
/// G_BUILD_VECTOR_TRUNC where all of the elements are \p SplatValue or undef.
bool isBuildVectorConstantSplat(const Register Reg,
                                const MachineRegisterInfo &MRI,
                                int64_t SplatValue, bool AllowUndef);

/// Return true if the specified instruction is a G_BUILD_VECTOR or
/// G_BUILD_VECTOR_TRUNC where all of the elements are \p SplatValue or undef.
bool isBuildVectorConstantSplat(const MachineInstr &MI,
                                const MachineRegisterInfo &MRI,
                                int64_t SplatValue, bool AllowUndef);

/// Return true if the specified instruction is a G_BUILD_VECTOR or
/// G_BUILD_VECTOR_TRUNC where all of the elements are 0 or undef.
bool isBuildVectorAllZeros(const MachineInstr &MI,
                           const MachineRegisterInfo &MRI,
                           bool AllowUndef = false);

/// Return true if the specified instruction is a G_BUILD_VECTOR or
/// G_BUILD_VECTOR_TRUNC where all of the elements are ~0 or undef.
bool isBuildVectorAllOnes(const MachineInstr &MI,
                          const MachineRegisterInfo &MRI,
                          bool AllowUndef = false);

/// Return true if the specified instruction is known to be a constant, or a
/// vector of constants.
///
/// If \p AllowFP is true, this will consider G_FCONSTANT in addition to
/// G_CONSTANT. If \p AllowOpaqueConstants is true, constant-like instructions
/// such as G_GLOBAL_VALUE will also be considered.
bool isConstantOrConstantVector(const MachineInstr &MI,
                                const MachineRegisterInfo &MRI,
                                bool AllowFP = true,
                                bool AllowOpaqueConstants = true);

/// Return true if the value is a constant 0 integer or a splatted vector of a
/// constant 0 integer (with no undefs if \p AllowUndefs is false). This will
/// handle G_BUILD_VECTOR and G_BUILD_VECTOR_TRUNC as truncation is not an issue
/// for null values.
bool isNullOrNullSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI,
                       bool AllowUndefs = false);

/// Return true if the value is a constant -1 integer or a splatted vector of a
/// constant -1 integer (with no undefs if \p AllowUndefs is false).
bool isAllOnesOrAllOnesSplat(const MachineInstr &MI,
                             const MachineRegisterInfo &MRI,
                             bool AllowUndefs = false);

/// \returns a value when \p MI is a vector splat. The splat can be either a
/// Register or a constant.
///
/// Examples:
///
/// \code
///   %reg = COPY $physreg
///   %reg_splat = G_BUILD_VECTOR %reg, %reg, ..., %reg
/// \endcode
///
/// If called on the G_BUILD_VECTOR above, this will return a RegOrConstant
/// containing %reg.
///
/// \code
///   %cst = G_CONSTANT iN 4
///   %constant_splat = G_BUILD_VECTOR %cst, %cst, ..., %cst
/// \endcode
///
/// In the above case, this will return a RegOrConstant containing 4.
std::optional<RegOrConstant> getVectorSplat(const MachineInstr &MI,
                                            const MachineRegisterInfo &MRI);

/// Determines if \p MI defines a constant integer or a build vector of
/// constant integers. Treats undef values as constants.
bool isConstantOrConstantVector(MachineInstr &MI,
                                const MachineRegisterInfo &MRI);

/// Determines if \p MI defines a constant integer or a splat vector of
/// constant integers.
/// \returns the scalar constant or std::nullopt.
std::optional<APInt>
isConstantOrConstantSplatVector(MachineInstr &MI,
                                const MachineRegisterInfo &MRI);

/// Attempt to match a unary predicate against a scalar/splat constant or every
/// element of a constant G_BUILD_VECTOR. If \p ConstVal is null, the source
/// value was undef.
bool matchUnaryPredicate(const MachineRegisterInfo &MRI, Register Reg,
                         std::function<bool(const Constant *ConstVal)> Match,
                         bool AllowUndefs = false);

/// Returns true if given the TargetLowering's boolean contents information,
/// the value \p Val contains a true value.
bool isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector,
                    bool IsFP);
/// \returns true if given the TargetLowering's boolean contents information,
/// the value \p Val contains a false value.
bool isConstFalseVal(const TargetLowering &TLI, int64_t Val, bool IsVector,
                    bool IsFP);

/// Returns an integer representing true, as defined by the
/// TargetBooleanContents.
int64_t getICmpTrueVal(const TargetLowering &TLI, bool IsVector, bool IsFP);

/// Returns true if the given block should be optimized for size.
bool shouldOptForSize(const MachineBasicBlock &MBB, ProfileSummaryInfo *PSI,
                      BlockFrequencyInfo *BFI);

using SmallInstListTy = GISelWorkList<4>;
void saveUsesAndErase(MachineInstr &MI, MachineRegisterInfo &MRI,
                      LostDebugLocObserver *LocObserver,
                      SmallInstListTy &DeadInstChain);
void eraseInstrs(ArrayRef<MachineInstr *> DeadInstrs, MachineRegisterInfo &MRI,
                 LostDebugLocObserver *LocObserver = nullptr);
void eraseInstr(MachineInstr &MI, MachineRegisterInfo &MRI,
                LostDebugLocObserver *LocObserver = nullptr);

/// Assuming the instruction \p MI is going to be deleted, attempt to salvage
/// debug users of \p MI by writing the effect of \p MI in a DIExpression.
void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI);

} // End namespace llvm.
#endif
PKiwFZ
T6//(CodeGen/GlobalISel/InstructionSelector.hnu�[���//===- llvm/CodeGen/GlobalISel/InstructionSelector.h ------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file This file declares the API for the instruction selector.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECTOR_H
#define LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECTOR_H

#include "llvm/CodeGen/GlobalISel/GIMatchTableExecutor.h"

namespace llvm {
class InstructionSelector : public GIMatchTableExecutor {
public:
  virtual ~InstructionSelector();

  /// Select the (possibly generic) instruction \p I to only use target-specific
  /// opcodes. It is OK to insert multiple instructions, but they cannot be
  /// generic pre-isel instructions.
  ///
  /// \returns whether selection succeeded.
  /// \pre  I.getParent() && I.getParent()->getParent()
  /// \post
  ///   if returns true:
  ///     for I in all mutated/inserted instructions:
  ///       !isPreISelGenericOpcode(I.getOpcode())
  virtual bool select(MachineInstr &I) = 0;
};
} // namespace llvm

#endif
PKiwFZ���9R9R(CodeGen/GlobalISel/LegacyLegalizerInfo.hnu�[���//===- llvm/CodeGen/GlobalISel/LegacyLegalizerInfo.h ------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// Interface for Targets to specify which operations they can successfully
/// select and how the others should be expanded most efficiently.
/// This implementation has been deprecated for a long time but it still in use
/// in a few places.
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_GLOBALISEL_LEGACYLEGALIZERINFO_H
#define LLVM_CODEGEN_GLOBALISEL_LEGACYLEGALIZERINFO_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/CodeGen/LowLevelType.h"
#include "llvm/CodeGen/TargetOpcodes.h"
#include <unordered_map>

namespace llvm {
struct LegalityQuery;

namespace LegacyLegalizeActions {
enum LegacyLegalizeAction : std::uint8_t {
  /// The operation is expected to be selectable directly by the target, and
  /// no transformation is necessary.
  Legal,

  /// The operation should be synthesized from multiple instructions acting on
  /// a narrower scalar base-type. For example a 64-bit add might be
  /// implemented in terms of 32-bit add-with-carry.
  NarrowScalar,

  /// The operation should be implemented in terms of a wider scalar
  /// base-type. For example a <2 x s8> add could be implemented as a <2
  /// x s32> add (ignoring the high bits).
  WidenScalar,

  /// The (vector) operation should be implemented by splitting it into
  /// sub-vectors where the operation is legal. For example a <8 x s64> add
  /// might be implemented as 4 separate <2 x s64> adds.
  FewerElements,

  /// The (vector) operation should be implemented by widening the input
  /// vector and ignoring the lanes added by doing so. For example <2 x i8> is
  /// rarely legal, but you might perform an <8 x i8> and then only look at
  /// the first two results.
  MoreElements,

  /// Perform the operation on a different, but equivalently sized type.
  Bitcast,

  /// The operation itself must be expressed in terms of simpler actions on
  /// this target. E.g. a SREM replaced by an SDIV and subtraction.
  Lower,

  /// The operation should be implemented as a call to some kind of runtime
  /// support library. For example this usually happens on machines that don't
  /// support floating-point operations natively.
  Libcall,

  /// The target wants to do something special with this combination of
  /// operand and type. A callback will be issued when it is needed.
  Custom,

  /// This operation is completely unsupported on the target. A programming
  /// error has occurred.
  Unsupported,

  /// Sentinel value for when no action was found in the specified table.
  NotFound,
};
} // end namespace LegacyLegalizeActions
raw_ostream &operator<<(raw_ostream &OS,
                        LegacyLegalizeActions::LegacyLegalizeAction Action);

/// Legalization is decided based on an instruction's opcode, which type slot
/// we're considering, and what the existing type is. These aspects are gathered
/// together for convenience in the InstrAspect class.
struct InstrAspect {
  unsigned Opcode;
  unsigned Idx = 0;
  LLT Type;

  InstrAspect(unsigned Opcode, LLT Type) : Opcode(Opcode), Type(Type) {}
  InstrAspect(unsigned Opcode, unsigned Idx, LLT Type)
      : Opcode(Opcode), Idx(Idx), Type(Type) {}

  bool operator==(const InstrAspect &RHS) const {
    return Opcode == RHS.Opcode && Idx == RHS.Idx && Type == RHS.Type;
  }
};

/// The result of a query. It either indicates a final answer of Legal or
/// Unsupported or describes an action that must be taken to make an operation
/// more legal.
struct LegacyLegalizeActionStep {
  /// The action to take or the final answer.
  LegacyLegalizeActions::LegacyLegalizeAction Action;
  /// If describing an action, the type index to change. Otherwise zero.
  unsigned TypeIdx;
  /// If describing an action, the new type for TypeIdx. Otherwise LLT{}.
  LLT NewType;

  LegacyLegalizeActionStep(LegacyLegalizeActions::LegacyLegalizeAction Action,
                           unsigned TypeIdx, const LLT NewType)
      : Action(Action), TypeIdx(TypeIdx), NewType(NewType) {}

  bool operator==(const LegacyLegalizeActionStep &RHS) const {
    return std::tie(Action, TypeIdx, NewType) ==
        std::tie(RHS.Action, RHS.TypeIdx, RHS.NewType);
  }
};


class LegacyLegalizerInfo {
public:
  using SizeAndAction =
      std::pair<uint16_t, LegacyLegalizeActions::LegacyLegalizeAction>;
  using SizeAndActionsVec = std::vector<SizeAndAction>;
  using SizeChangeStrategy =
      std::function<SizeAndActionsVec(const SizeAndActionsVec &v)>;

  LegacyLegalizerInfo();

  static bool needsLegalizingToDifferentSize(
      const LegacyLegalizeActions::LegacyLegalizeAction Action) {
    using namespace LegacyLegalizeActions;
    switch (Action) {
    case NarrowScalar:
    case WidenScalar:
    case FewerElements:
    case MoreElements:
    case Unsupported:
      return true;
    default:
      return false;
    }
  }

  /// Compute any ancillary tables needed to quickly decide how an operation
  /// should be handled. This must be called after all "set*Action"methods but
  /// before any query is made or incorrect results may be returned.
  void computeTables();

  /// More friendly way to set an action for common types that have an LLT
  /// representation.
  /// The LegacyLegalizeAction must be one for which
  /// NeedsLegalizingToDifferentSize returns false.
  void setAction(const InstrAspect &Aspect,
                 LegacyLegalizeActions::LegacyLegalizeAction Action) {
    assert(!needsLegalizingToDifferentSize(Action));
    TablesInitialized = false;
    const unsigned OpcodeIdx = Aspect.Opcode - FirstOp;
    if (SpecifiedActions[OpcodeIdx].size() <= Aspect.Idx)
      SpecifiedActions[OpcodeIdx].resize(Aspect.Idx + 1);
    SpecifiedActions[OpcodeIdx][Aspect.Idx][Aspect.Type] = Action;
  }

  /// The setAction calls record the non-size-changing legalization actions
  /// to take on specificly-sized types. The SizeChangeStrategy defines what
  /// to do when the size of the type needs to be changed to reach a legally
  /// sized type (i.e., one that was defined through a setAction call).
  /// e.g.
  /// setAction ({G_ADD, 0, LLT::scalar(32)}, Legal);
  /// setLegalizeScalarToDifferentSizeStrategy(
  ///   G_ADD, 0, widenToLargerTypesAndNarrowToLargest);
  /// will end up defining getAction({G_ADD, 0, T}) to return the following
  /// actions for different scalar types T:
  ///  LLT::scalar(1)..LLT::scalar(31): {WidenScalar, 0, LLT::scalar(32)}
  ///  LLT::scalar(32):                 {Legal, 0, LLT::scalar(32)}
  ///  LLT::scalar(33)..:               {NarrowScalar, 0, LLT::scalar(32)}
  ///
  /// If no SizeChangeAction gets defined, through this function,
  /// the default is unsupportedForDifferentSizes.
  void setLegalizeScalarToDifferentSizeStrategy(const unsigned Opcode,
                                                const unsigned TypeIdx,
                                                SizeChangeStrategy S) {
    const unsigned OpcodeIdx = Opcode - FirstOp;
    if (ScalarSizeChangeStrategies[OpcodeIdx].size() <= TypeIdx)
      ScalarSizeChangeStrategies[OpcodeIdx].resize(TypeIdx + 1);
    ScalarSizeChangeStrategies[OpcodeIdx][TypeIdx] = S;
  }

  /// See also setLegalizeScalarToDifferentSizeStrategy.
  /// This function allows to set the SizeChangeStrategy for vector elements.
  void setLegalizeVectorElementToDifferentSizeStrategy(const unsigned Opcode,
                                                       const unsigned TypeIdx,
                                                       SizeChangeStrategy S) {
    const unsigned OpcodeIdx = Opcode - FirstOp;
    if (VectorElementSizeChangeStrategies[OpcodeIdx].size() <= TypeIdx)
      VectorElementSizeChangeStrategies[OpcodeIdx].resize(TypeIdx + 1);
    VectorElementSizeChangeStrategies[OpcodeIdx][TypeIdx] = S;
  }

  /// A SizeChangeStrategy for the common case where legalization for a
  /// particular operation consists of only supporting a specific set of type
  /// sizes. E.g.
  ///   setAction ({G_DIV, 0, LLT::scalar(32)}, Legal);
  ///   setAction ({G_DIV, 0, LLT::scalar(64)}, Legal);
  ///   setLegalizeScalarToDifferentSizeStrategy(
  ///     G_DIV, 0, unsupportedForDifferentSizes);
  /// will result in getAction({G_DIV, 0, T}) to return Legal for s32 and s64,
  /// and Unsupported for all other scalar types T.
  static SizeAndActionsVec
  unsupportedForDifferentSizes(const SizeAndActionsVec &v) {
    using namespace LegacyLegalizeActions;
    return increaseToLargerTypesAndDecreaseToLargest(v, Unsupported,
                                                     Unsupported);
  }

  /// A SizeChangeStrategy for the common case where legalization for a
  /// particular operation consists of widening the type to a large legal type,
  /// unless there is no such type and then instead it should be narrowed to the
  /// largest legal type.
  static SizeAndActionsVec
  widenToLargerTypesAndNarrowToLargest(const SizeAndActionsVec &v) {
    using namespace LegacyLegalizeActions;
    assert(v.size() > 0 &&
           "At least one size that can be legalized towards is needed"
           " for this SizeChangeStrategy");
    return increaseToLargerTypesAndDecreaseToLargest(v, WidenScalar,
                                                     NarrowScalar);
  }

  static SizeAndActionsVec
  widenToLargerTypesUnsupportedOtherwise(const SizeAndActionsVec &v) {
    using namespace LegacyLegalizeActions;
    return increaseToLargerTypesAndDecreaseToLargest(v, WidenScalar,
                                                     Unsupported);
  }

  static SizeAndActionsVec
  narrowToSmallerAndUnsupportedIfTooSmall(const SizeAndActionsVec &v) {
    using namespace LegacyLegalizeActions;
    return decreaseToSmallerTypesAndIncreaseToSmallest(v, NarrowScalar,
                                                       Unsupported);
  }

  static SizeAndActionsVec
  narrowToSmallerAndWidenToSmallest(const SizeAndActionsVec &v) {
    using namespace LegacyLegalizeActions;
    assert(v.size() > 0 &&
           "At least one size that can be legalized towards is needed"
           " for this SizeChangeStrategy");
    return decreaseToSmallerTypesAndIncreaseToSmallest(v, NarrowScalar,
                                                       WidenScalar);
  }

  /// A SizeChangeStrategy for the common case where legalization for a
  /// particular vector operation consists of having more elements in the
  /// vector, to a type that is legal. Unless there is no such type and then
  /// instead it should be legalized towards the widest vector that's still
  /// legal. E.g.
  ///   setAction({G_ADD, LLT::vector(8, 8)}, Legal);
  ///   setAction({G_ADD, LLT::vector(16, 8)}, Legal);
  ///   setAction({G_ADD, LLT::vector(2, 32)}, Legal);
  ///   setAction({G_ADD, LLT::vector(4, 32)}, Legal);
  ///   setLegalizeVectorElementToDifferentSizeStrategy(
  ///     G_ADD, 0, moreToWiderTypesAndLessToWidest);
  /// will result in the following getAction results:
  ///   * getAction({G_ADD, LLT::vector(8,8)}) returns
  ///       (Legal, vector(8,8)).
  ///   * getAction({G_ADD, LLT::vector(9,8)}) returns
  ///       (MoreElements, vector(16,8)).
  ///   * getAction({G_ADD, LLT::vector(8,32)}) returns
  ///       (FewerElements, vector(4,32)).
  static SizeAndActionsVec
  moreToWiderTypesAndLessToWidest(const SizeAndActionsVec &v) {
    using namespace LegacyLegalizeActions;
    return increaseToLargerTypesAndDecreaseToLargest(v, MoreElements,
                                                     FewerElements);
  }

  /// Helper function to implement many typical SizeChangeStrategy functions.
  static SizeAndActionsVec increaseToLargerTypesAndDecreaseToLargest(
      const SizeAndActionsVec &v,
      LegacyLegalizeActions::LegacyLegalizeAction IncreaseAction,
      LegacyLegalizeActions::LegacyLegalizeAction DecreaseAction);
  /// Helper function to implement many typical SizeChangeStrategy functions.
  static SizeAndActionsVec decreaseToSmallerTypesAndIncreaseToSmallest(
      const SizeAndActionsVec &v,
      LegacyLegalizeActions::LegacyLegalizeAction DecreaseAction,
      LegacyLegalizeActions::LegacyLegalizeAction IncreaseAction);

  LegacyLegalizeActionStep getAction(const LegalityQuery &Query) const;

  unsigned getOpcodeIdxForOpcode(unsigned Opcode) const;

private:
  /// Determine what action should be taken to legalize the given generic
  /// instruction opcode, type-index and type. Requires computeTables to have
  /// been called.
  ///
  /// \returns a pair consisting of the kind of legalization that should be
  /// performed and the destination type.
  std::pair<LegacyLegalizeActions::LegacyLegalizeAction, LLT>
  getAspectAction(const InstrAspect &Aspect) const;

  /// The SizeAndActionsVec is a representation mapping between all natural
  /// numbers and an Action. The natural number represents the bit size of
  /// the InstrAspect. For example, for a target with native support for 32-bit
  /// and 64-bit additions, you'd express that as:
  /// setScalarAction(G_ADD, 0,
  ///           {{1, WidenScalar},  // bit sizes [ 1, 31[
  ///            {32, Legal},       // bit sizes [32, 33[
  ///            {33, WidenScalar}, // bit sizes [33, 64[
  ///            {64, Legal},       // bit sizes [64, 65[
  ///            {65, NarrowScalar} // bit sizes [65, +inf[
  ///           });
  /// It may be that only 64-bit pointers are supported on your target:
  /// setPointerAction(G_PTR_ADD, 0, LLT:pointer(1),
  ///           {{1, Unsupported},  // bit sizes [ 1, 63[
  ///            {64, Legal},       // bit sizes [64, 65[
  ///            {65, Unsupported}, // bit sizes [65, +inf[
  ///           });
  void setScalarAction(const unsigned Opcode, const unsigned TypeIndex,
                       const SizeAndActionsVec &SizeAndActions) {
    const unsigned OpcodeIdx = Opcode - FirstOp;
    SmallVector<SizeAndActionsVec, 1> &Actions = ScalarActions[OpcodeIdx];
    setActions(TypeIndex, Actions, SizeAndActions);
  }
  void setPointerAction(const unsigned Opcode, const unsigned TypeIndex,
                        const unsigned AddressSpace,
                        const SizeAndActionsVec &SizeAndActions) {
    const unsigned OpcodeIdx = Opcode - FirstOp;
    if (AddrSpace2PointerActions[OpcodeIdx].find(AddressSpace) ==
        AddrSpace2PointerActions[OpcodeIdx].end())
      AddrSpace2PointerActions[OpcodeIdx][AddressSpace] = {{}};
    SmallVector<SizeAndActionsVec, 1> &Actions =
        AddrSpace2PointerActions[OpcodeIdx].find(AddressSpace)->second;
    setActions(TypeIndex, Actions, SizeAndActions);
  }

  /// If an operation on a given vector type (say <M x iN>) isn't explicitly
  /// specified, we proceed in 2 stages. First we legalize the underlying scalar
  /// (so that there's at least one legal vector with that scalar), then we
  /// adjust the number of elements in the vector so that it is legal. The
  /// desired action in the first step is controlled by this function.
  void setScalarInVectorAction(const unsigned Opcode, const unsigned TypeIndex,
                               const SizeAndActionsVec &SizeAndActions) {
    unsigned OpcodeIdx = Opcode - FirstOp;
    SmallVector<SizeAndActionsVec, 1> &Actions =
        ScalarInVectorActions[OpcodeIdx];
    setActions(TypeIndex, Actions, SizeAndActions);
  }

  /// See also setScalarInVectorAction.
  /// This function let's you specify the number of elements in a vector that
  /// are legal for a legal element size.
  void setVectorNumElementAction(const unsigned Opcode,
                                 const unsigned TypeIndex,
                                 const unsigned ElementSize,
                                 const SizeAndActionsVec &SizeAndActions) {
    const unsigned OpcodeIdx = Opcode - FirstOp;
    if (NumElements2Actions[OpcodeIdx].find(ElementSize) ==
        NumElements2Actions[OpcodeIdx].end())
      NumElements2Actions[OpcodeIdx][ElementSize] = {{}};
    SmallVector<SizeAndActionsVec, 1> &Actions =
        NumElements2Actions[OpcodeIdx].find(ElementSize)->second;
    setActions(TypeIndex, Actions, SizeAndActions);
  }

  /// A partial SizeAndActionsVec potentially doesn't cover all bit sizes,
  /// i.e. it's OK if it doesn't start from size 1.
  static void checkPartialSizeAndActionsVector(const SizeAndActionsVec& v) {
    using namespace LegacyLegalizeActions;
#ifndef NDEBUG
    // The sizes should be in increasing order
    int prev_size = -1;
    for(auto SizeAndAction: v) {
      assert(SizeAndAction.first > prev_size);
      prev_size = SizeAndAction.first;
    }
    // - for every Widen action, there should be a larger bitsize that
    //   can be legalized towards (e.g. Legal, Lower, Libcall or Custom
    //   action).
    // - for every Narrow action, there should be a smaller bitsize that
    //   can be legalized towards.
    int SmallestNarrowIdx = -1;
    int LargestWidenIdx = -1;
    int SmallestLegalizableToSameSizeIdx = -1;
    int LargestLegalizableToSameSizeIdx = -1;
    for(size_t i=0; i<v.size(); ++i) {
      switch (v[i].second) {
        case FewerElements:
        case NarrowScalar:
          if (SmallestNarrowIdx == -1)
            SmallestNarrowIdx = i;
          break;
        case WidenScalar:
        case MoreElements:
          LargestWidenIdx = i;
          break;
        case Unsupported:
          break;
        default:
          if (SmallestLegalizableToSameSizeIdx == -1)
            SmallestLegalizableToSameSizeIdx = i;
          LargestLegalizableToSameSizeIdx = i;
      }
    }
    if (SmallestNarrowIdx != -1) {
      assert(SmallestLegalizableToSameSizeIdx != -1);
      assert(SmallestNarrowIdx > SmallestLegalizableToSameSizeIdx);
    }
    if (LargestWidenIdx != -1)
      assert(LargestWidenIdx < LargestLegalizableToSameSizeIdx);
#endif
  }

  /// A full SizeAndActionsVec must cover all bit sizes, i.e. must start with
  /// from size 1.
  static void checkFullSizeAndActionsVector(const SizeAndActionsVec& v) {
#ifndef NDEBUG
    // Data structure invariant: The first bit size must be size 1.
    assert(v.size() >= 1);
    assert(v[0].first == 1);
    checkPartialSizeAndActionsVector(v);
#endif
  }

  /// Sets actions for all bit sizes on a particular generic opcode, type
  /// index and scalar or pointer type.
  void setActions(unsigned TypeIndex,
                  SmallVector<SizeAndActionsVec, 1> &Actions,
                  const SizeAndActionsVec &SizeAndActions) {
    checkFullSizeAndActionsVector(SizeAndActions);
    if (Actions.size() <= TypeIndex)
      Actions.resize(TypeIndex + 1);
    Actions[TypeIndex] = SizeAndActions;
  }

  static SizeAndAction findAction(const SizeAndActionsVec &Vec,
                                  const uint32_t Size);

  /// Returns the next action needed to get the scalar or pointer type closer
  /// to being legal
  /// E.g. findLegalAction({G_REM, 13}) should return
  /// (WidenScalar, 32). After that, findLegalAction({G_REM, 32}) will
  /// probably be called, which should return (Lower, 32).
  /// This is assuming the setScalarAction on G_REM was something like:
  /// setScalarAction(G_REM, 0,
  ///           {{1, WidenScalar},  // bit sizes [ 1, 31[
  ///            {32, Lower},       // bit sizes [32, 33[
  ///            {33, NarrowScalar} // bit sizes [65, +inf[
  ///           });
  std::pair<LegacyLegalizeActions::LegacyLegalizeAction, LLT>
  findScalarLegalAction(const InstrAspect &Aspect) const;

  /// Returns the next action needed towards legalizing the vector type.
  std::pair<LegacyLegalizeActions::LegacyLegalizeAction, LLT>
  findVectorLegalAction(const InstrAspect &Aspect) const;

  static const int FirstOp = TargetOpcode::PRE_ISEL_GENERIC_OPCODE_START;
  static const int LastOp = TargetOpcode::PRE_ISEL_GENERIC_OPCODE_END;

  // Data structures used temporarily during construction of legality data:
  using TypeMap = DenseMap<LLT, LegacyLegalizeActions::LegacyLegalizeAction>;
  SmallVector<TypeMap, 1> SpecifiedActions[LastOp - FirstOp + 1];
  SmallVector<SizeChangeStrategy, 1>
      ScalarSizeChangeStrategies[LastOp - FirstOp + 1];
  SmallVector<SizeChangeStrategy, 1>
      VectorElementSizeChangeStrategies[LastOp - FirstOp + 1];
  bool TablesInitialized = false;

  // Data structures used by getAction:
  SmallVector<SizeAndActionsVec, 1> ScalarActions[LastOp - FirstOp + 1];
  SmallVector<SizeAndActionsVec, 1> ScalarInVectorActions[LastOp - FirstOp + 1];
  std::unordered_map<uint16_t, SmallVector<SizeAndActionsVec, 1>>
      AddrSpace2PointerActions[LastOp - FirstOp + 1];
  std::unordered_map<uint16_t, SmallVector<SizeAndActionsVec, 1>>
      NumElements2Actions[LastOp - FirstOp + 1];
};

} // end namespace llvm

#endif // LLVM_CODEGEN_GLOBALISEL_LEGACYLEGALIZERINFO_H
PKiwFZb1;t[	[	&CodeGen/GlobalISel/InlineAsmLowering.hnu�[���//===- llvm/CodeGen/GlobalISel/InlineAsmLowering.h --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file describes how to lower LLVM inline asm to machine code INLINEASM.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_GLOBALISEL_INLINEASMLOWERING_H
#define LLVM_CODEGEN_GLOBALISEL_INLINEASMLOWERING_H

#include "llvm/ADT/ArrayRef.h"
#include <functional>

namespace llvm {
class CallBase;
class MachineIRBuilder;
class MachineOperand;
class Register;
class TargetLowering;
class Value;

class InlineAsmLowering {
  const TargetLowering *TLI;

  virtual void anchor();

public:
  /// Lower the given inline asm call instruction
  /// \p GetOrCreateVRegs is a callback to materialize a register for the
  /// input and output operands of the inline asm
  /// \return True if the lowering succeeds, false otherwise.
  bool lowerInlineAsm(MachineIRBuilder &MIRBuilder, const CallBase &CB,
                      std::function<ArrayRef<Register>(const Value &Val)>
                          GetOrCreateVRegs) const;

  /// Lower the specified operand into the Ops vector.
  /// \p Val is the IR input value to be lowered
  /// \p Constraint is the user supplied constraint string
  /// \p Ops is the vector to be filled with the lowered operands
  /// \return True if the lowering succeeds, false otherwise.
  virtual bool lowerAsmOperandForConstraint(Value *Val, StringRef Constraint,
                                            std::vector<MachineOperand> &Ops,
                                            MachineIRBuilder &MIRBuilder) const;

protected:
  /// Getter for generic TargetLowering class.
  const TargetLowering *getTLI() const { return TLI; }

  /// Getter for target specific TargetLowering class.
  template <class XXXTargetLowering> const XXXTargetLowering *getTLI() const {
    return static_cast<const XXXTargetLowering *>(TLI);
  }

public:
  InlineAsmLowering(const TargetLowering *TLI) : TLI(TLI) {}
  virtual ~InlineAsmLowering() = default;
};

} // end namespace llvm

#endif // LLVM_CODEGEN_GLOBALISEL_INLINEASMLOWERING_H
PKiwFZ0�K��(CodeGen/GlobalISel/GISelChangeObserver.hnu�[���//===----- llvm/CodeGen/GlobalISel/GISelChangeObserver.h --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// This contains common code to allow clients to notify changes to machine
/// instr.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_GLOBALISEL_GISELCHANGEOBSERVER_H
#define LLVM_CODEGEN_GLOBALISEL_GISELCHANGEOBSERVER_H

#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/CodeGen/MachineFunction.h"

namespace llvm {
class MachineInstr;
class MachineRegisterInfo;

/// Abstract class that contains various methods for clients to notify about
/// changes. This should be the preferred way for APIs to notify changes.
/// Typically calling erasingInstr/createdInstr multiple times should not affect
/// the result. The observer would likely need to check if it was already
/// notified earlier (consider using GISelWorkList).
class GISelChangeObserver {
  SmallPtrSet<MachineInstr *, 4> ChangingAllUsesOfReg;

public:
  virtual ~GISelChangeObserver() = default;

  /// An instruction is about to be erased.
  virtual void erasingInstr(MachineInstr &MI) = 0;

  /// An instruction has been created and inserted into the function.
  /// Note that the instruction might not be a fully fledged instruction at this
  /// point and won't be if the MachineFunction::Delegate is calling it. This is
  /// because the delegate only sees the construction of the MachineInstr before
  /// operands have been added.
  virtual void createdInstr(MachineInstr &MI) = 0;

  /// This instruction is about to be mutated in some way.
  virtual void changingInstr(MachineInstr &MI) = 0;

  /// This instruction was mutated in some way.
  virtual void changedInstr(MachineInstr &MI) = 0;

  /// All the instructions using the given register are being changed.
  /// For convenience, finishedChangingAllUsesOfReg() will report the completion
  /// of the changes. The use list may change between this call and
  /// finishedChangingAllUsesOfReg().
  void changingAllUsesOfReg(const MachineRegisterInfo &MRI, Register Reg);
  /// All instructions reported as changing by changingAllUsesOfReg() have
  /// finished being changed.
  void finishedChangingAllUsesOfReg();

};

/// Simple wrapper observer that takes several observers, and calls
/// each one for each event. If there are multiple observers (say CSE,
/// Legalizer, Combiner), it's sufficient to register this to the machine
/// function as the delegate.
class GISelObserverWrapper : public MachineFunction::Delegate,
                             public GISelChangeObserver {
  SmallVector<GISelChangeObserver *, 4> Observers;

public:
  GISelObserverWrapper() = default;
  GISelObserverWrapper(ArrayRef<GISelChangeObserver *> Obs)
      : Observers(Obs.begin(), Obs.end()) {}
  // Adds an observer.
  void addObserver(GISelChangeObserver *O) { Observers.push_back(O); }
  // Removes an observer from the list and does nothing if observer is not
  // present.
  void removeObserver(GISelChangeObserver *O) {
    auto It = llvm::find(Observers, O);
    if (It != Observers.end())
      Observers.erase(It);
  }
  // API for Observer.
  void erasingInstr(MachineInstr &MI) override {
    for (auto &O : Observers)
      O->erasingInstr(MI);
  }
  void createdInstr(MachineInstr &MI) override {
    for (auto &O : Observers)
      O->createdInstr(MI);
  }
  void changingInstr(MachineInstr &MI) override {
    for (auto &O : Observers)
      O->changingInstr(MI);
  }
  void changedInstr(MachineInstr &MI) override {
    for (auto &O : Observers)
      O->changedInstr(MI);
  }
  // API for MachineFunction::Delegate
  void MF_HandleInsertion(MachineInstr &MI) override { createdInstr(MI); }
  void MF_HandleRemoval(MachineInstr &MI) override { erasingInstr(MI); }
};

/// A simple RAII based Delegate installer.
/// Use this in a scope to install a delegate to the MachineFunction and reset
/// it at the end of the scope.
class RAIIDelegateInstaller {
  MachineFunction &MF;
  MachineFunction::Delegate *Delegate;

public:
  RAIIDelegateInstaller(MachineFunction &MF, MachineFunction::Delegate *Del);
  ~RAIIDelegateInstaller();
};

/// A simple RAII based Observer installer.
/// Use this in a scope to install the Observer to the MachineFunction and reset
/// it at the end of the scope.
class RAIIMFObserverInstaller {
  MachineFunction &MF;

public:
  RAIIMFObserverInstaller(MachineFunction &MF, GISelChangeObserver &Observer);
  ~RAIIMFObserverInstaller();
};

/// Class to install both of the above.
class RAIIMFObsDelInstaller {
  RAIIDelegateInstaller DelI;
  RAIIMFObserverInstaller ObsI;

public:
  RAIIMFObsDelInstaller(MachineFunction &MF, GISelObserverWrapper &Wrapper)
      : DelI(MF, &Wrapper), ObsI(MF, Wrapper) {}
  ~RAIIMFObsDelInstaller() = default;
};

} // namespace llvm
#endif
PKiwFZ((T�#�#�-CodeGen/GlobalISel/GIMatchTableExecutorImpl.hnu�[���//===- llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file This file implements GIMatchTableExecutor's `executeMatchTable`
/// function. This is implemented in a separate file because the function is
/// quite large.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_GLOBALISEL_GIMATCHTABLEEXECUTORIMPL_H
#define LLVM_CODEGEN_GLOBALISEL_GIMATCHTABLEEXECUTORIMPL_H

#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/GlobalISel/GIMatchTableExecutor.h"
#include "llvm/CodeGen/GlobalISel/Utils.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RegisterBankInfo.h"
#include "llvm/CodeGen/TargetInstrInfo.h"
#include "llvm/CodeGen/TargetOpcodes.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/Support/CodeGenCoverage.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include <cassert>
#include <cstddef>
#include <cstdint>

namespace llvm {

template <class TgtExecutor, class PredicateBitset, class ComplexMatcherMemFn,
          class CustomRendererFn>
bool GIMatchTableExecutor::executeMatchTable(
    TgtExecutor &Exec, NewMIVector &OutMIs, MatcherState &State,
    const ExecInfoTy<PredicateBitset, ComplexMatcherMemFn, CustomRendererFn>
        &ExecInfo,
    const int64_t *MatchTable, const TargetInstrInfo &TII,
    MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI,
    const RegisterBankInfo &RBI, const PredicateBitset &AvailableFeatures,
    CodeGenCoverage *CoverageInfo) const {

  uint64_t CurrentIdx = 0;
  SmallVector<uint64_t, 4> OnFailResumeAt;

  // Bypass the flag check on the instruction, and only look at the MCInstrDesc.
  bool NoFPException = !State.MIs[0]->getDesc().mayRaiseFPException();

  const uint16_t Flags = State.MIs[0]->getFlags();

  enum RejectAction { RejectAndGiveUp, RejectAndResume };
  auto handleReject = [&]() -> RejectAction {
    DEBUG_WITH_TYPE(TgtExecutor::getName(),
                    dbgs() << CurrentIdx << ": Rejected\n");
    if (OnFailResumeAt.empty())
      return RejectAndGiveUp;
    CurrentIdx = OnFailResumeAt.pop_back_val();
    DEBUG_WITH_TYPE(TgtExecutor::getName(),
                    dbgs() << CurrentIdx << ": Resume at " << CurrentIdx << " ("
                           << OnFailResumeAt.size() << " try-blocks remain)\n");
    return RejectAndResume;
  };

  auto propagateFlags = [=](NewMIVector &OutMIs) {
    for (auto MIB : OutMIs) {
      // Set the NoFPExcept flag when no original matched instruction could
      // raise an FP exception, but the new instruction potentially might.
      uint16_t MIBFlags = Flags;
      if (NoFPException && MIB->mayRaiseFPException())
        MIBFlags |= MachineInstr::NoFPExcept;
      MIB.setMIFlags(MIBFlags);
    }

    return true;
  };

  while (true) {
    assert(CurrentIdx != ~0u && "Invalid MatchTable index");
    int64_t MatcherOpcode = MatchTable[CurrentIdx++];
    switch (MatcherOpcode) {
    case GIM_Try: {
      DEBUG_WITH_TYPE(TgtExecutor::getName(),
                      dbgs() << CurrentIdx << ": Begin try-block\n");
      OnFailResumeAt.push_back(MatchTable[CurrentIdx++]);
      break;
    }

    case GIM_RecordInsn:
    case GIM_RecordInsnIgnoreCopies: {
      int64_t NewInsnID = MatchTable[CurrentIdx++];
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t OpIdx = MatchTable[CurrentIdx++];

      // As an optimisation we require that MIs[0] is always the root. Refuse
      // any attempt to modify it.
      assert(NewInsnID != 0 && "Refusing to modify MIs[0]");

      MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
      if (!MO.isReg()) {
        DEBUG_WITH_TYPE(TgtExecutor::getName(),
                        dbgs() << CurrentIdx << ": Not a register\n");
        if (handleReject() == RejectAndGiveUp)
          return false;
        break;
      }
      if (MO.getReg().isPhysical()) {
        DEBUG_WITH_TYPE(TgtExecutor::getName(),
                        dbgs() << CurrentIdx << ": Is a physical register\n");
        if (handleReject() == RejectAndGiveUp)
          return false;
        break;
      }

      MachineInstr *NewMI;
      if (MatcherOpcode == GIM_RecordInsnIgnoreCopies)
        NewMI = getDefIgnoringCopies(MO.getReg(), MRI);
      else
        NewMI = MRI.getVRegDef(MO.getReg());

      if ((size_t)NewInsnID < State.MIs.size())
        State.MIs[NewInsnID] = NewMI;
      else {
        assert((size_t)NewInsnID == State.MIs.size() &&
               "Expected to store MIs in order");
        State.MIs.push_back(NewMI);
      }
      DEBUG_WITH_TYPE(TgtExecutor::getName(),
                      dbgs() << CurrentIdx << ": MIs[" << NewInsnID
                             << "] = GIM_RecordInsn(" << InsnID << ", " << OpIdx
                             << ")\n");
      break;
    }

    case GIM_CheckFeatures: {
      int64_t ExpectedBitsetID = MatchTable[CurrentIdx++];
      DEBUG_WITH_TYPE(TgtExecutor::getName(),
                      dbgs() << CurrentIdx
                             << ": GIM_CheckFeatures(ExpectedBitsetID="
                             << ExpectedBitsetID << ")\n");
      if ((AvailableFeatures & ExecInfo.FeatureBitsets[ExpectedBitsetID]) !=
          ExecInfo.FeatureBitsets[ExpectedBitsetID]) {
        if (handleReject() == RejectAndGiveUp)
          return false;
      }
      break;
    }
    case GIM_CheckOpcode:
    case GIM_CheckOpcodeIsEither: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t Expected0 = MatchTable[CurrentIdx++];
      int64_t Expected1 = -1;
      if (MatcherOpcode == GIM_CheckOpcodeIsEither)
        Expected1 = MatchTable[CurrentIdx++];

      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
      unsigned Opcode = State.MIs[InsnID]->getOpcode();

      DEBUG_WITH_TYPE(TgtExecutor::getName(),
                      dbgs() << CurrentIdx << ": GIM_CheckOpcode(MIs[" << InsnID
                             << "], ExpectedOpcode=" << Expected0;
                      if (MatcherOpcode == GIM_CheckOpcodeIsEither) dbgs()
                      << " || " << Expected1;
                      dbgs() << ") // Got=" << Opcode << "\n";);

      if (Opcode != Expected0 && Opcode != Expected1) {
        if (handleReject() == RejectAndGiveUp)
          return false;
      }
      break;
    }
    case GIM_SwitchOpcode: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t LowerBound = MatchTable[CurrentIdx++];
      int64_t UpperBound = MatchTable[CurrentIdx++];
      int64_t Default = MatchTable[CurrentIdx++];

      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
      const int64_t Opcode = State.MIs[InsnID]->getOpcode();

      DEBUG_WITH_TYPE(TgtExecutor::getName(), {
        dbgs() << CurrentIdx << ": GIM_SwitchOpcode(MIs[" << InsnID << "], ["
               << LowerBound << ", " << UpperBound << "), Default=" << Default
               << ", JumpTable...) // Got=" << Opcode << "\n";
      });
      if (Opcode < LowerBound || UpperBound <= Opcode) {
        CurrentIdx = Default;
        break;
      }
      CurrentIdx = MatchTable[CurrentIdx + (Opcode - LowerBound)];
      if (!CurrentIdx) {
        CurrentIdx = Default;
        break;
      }
      OnFailResumeAt.push_back(Default);
      break;
    }

    case GIM_SwitchType: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t OpIdx = MatchTable[CurrentIdx++];
      int64_t LowerBound = MatchTable[CurrentIdx++];
      int64_t UpperBound = MatchTable[CurrentIdx++];
      int64_t Default = MatchTable[CurrentIdx++];

      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
      MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);

      DEBUG_WITH_TYPE(TgtExecutor::getName(), {
        dbgs() << CurrentIdx << ": GIM_SwitchType(MIs[" << InsnID
               << "]->getOperand(" << OpIdx << "), [" << LowerBound << ", "
               << UpperBound << "), Default=" << Default
               << ", JumpTable...) // Got=";
        if (!MO.isReg())
          dbgs() << "Not a VReg\n";
        else
          dbgs() << MRI.getType(MO.getReg()) << "\n";
      });
      if (!MO.isReg()) {
        CurrentIdx = Default;
        break;
      }
      const LLT Ty = MRI.getType(MO.getReg());
      const auto TyI = ExecInfo.TypeIDMap.find(Ty);
      if (TyI == ExecInfo.TypeIDMap.end()) {
        CurrentIdx = Default;
        break;
      }
      const int64_t TypeID = TyI->second;
      if (TypeID < LowerBound || UpperBound <= TypeID) {
        CurrentIdx = Default;
        break;
      }
      CurrentIdx = MatchTable[CurrentIdx + (TypeID - LowerBound)];
      if (!CurrentIdx) {
        CurrentIdx = Default;
        break;
      }
      OnFailResumeAt.push_back(Default);
      break;
    }

    case GIM_CheckNumOperands: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t Expected = MatchTable[CurrentIdx++];
      DEBUG_WITH_TYPE(TgtExecutor::getName(),
                      dbgs() << CurrentIdx << ": GIM_CheckNumOperands(MIs["
                             << InsnID << "], Expected=" << Expected << ")\n");
      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
      if (State.MIs[InsnID]->getNumOperands() != Expected) {
        if (handleReject() == RejectAndGiveUp)
          return false;
      }
      break;
    }
    case GIM_CheckI64ImmPredicate:
    case GIM_CheckImmOperandPredicate: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t OpIdx = MatcherOpcode == GIM_CheckImmOperandPredicate
                          ? MatchTable[CurrentIdx++]
                          : 1;
      int64_t Predicate = MatchTable[CurrentIdx++];
      DEBUG_WITH_TYPE(TgtExecutor::getName(),
                      dbgs() << CurrentIdx << ": GIM_CheckImmPredicate(MIs["
                             << InsnID << "]->getOperand(" << OpIdx
                             << "), Predicate=" << Predicate << ")\n");
      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
      assert((State.MIs[InsnID]->getOperand(OpIdx).isImm() ||
              State.MIs[InsnID]->getOperand(OpIdx).isCImm()) &&
             "Expected immediate operand");
      assert(Predicate > GICXXPred_Invalid && "Expected a valid predicate");
      int64_t Value = 0;
      if (State.MIs[InsnID]->getOperand(OpIdx).isCImm())
        Value = State.MIs[InsnID]->getOperand(OpIdx).getCImm()->getSExtValue();
      else if (State.MIs[InsnID]->getOperand(OpIdx).isImm())
        Value = State.MIs[InsnID]->getOperand(OpIdx).getImm();
      else
        llvm_unreachable("Expected Imm or CImm operand");

      if (!testImmPredicate_I64(Predicate, Value))
        if (handleReject() == RejectAndGiveUp)
          return false;
      break;
    }
    case GIM_CheckAPIntImmPredicate: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t Predicate = MatchTable[CurrentIdx++];
      DEBUG_WITH_TYPE(TgtExecutor::getName(),
                      dbgs()
                          << CurrentIdx << ": GIM_CheckAPIntImmPredicate(MIs["
                          << InsnID << "], Predicate=" << Predicate << ")\n");
      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
      assert(State.MIs[InsnID]->getOpcode() == TargetOpcode::G_CONSTANT &&
             "Expected G_CONSTANT");
      assert(Predicate > GICXXPred_Invalid &&
             "Expected a valid predicate");
      if (!State.MIs[InsnID]->getOperand(1).isCImm())
        llvm_unreachable("Expected Imm or CImm operand");

      const APInt &Value =
          State.MIs[InsnID]->getOperand(1).getCImm()->getValue();
      if (!testImmPredicate_APInt(Predicate, Value))
        if (handleReject() == RejectAndGiveUp)
          return false;
      break;
    }
    case GIM_CheckAPFloatImmPredicate: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t Predicate = MatchTable[CurrentIdx++];
      DEBUG_WITH_TYPE(TgtExecutor::getName(),
                      dbgs()
                          << CurrentIdx << ": GIM_CheckAPFloatImmPredicate(MIs["
                          << InsnID << "], Predicate=" << Predicate << ")\n");
      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
      assert(State.MIs[InsnID]->getOpcode() == TargetOpcode::G_FCONSTANT &&
             "Expected G_FCONSTANT");
      assert(State.MIs[InsnID]->getOperand(1).isFPImm() &&
             "Expected FPImm operand");
      assert(Predicate > GICXXPred_Invalid &&
             "Expected a valid predicate");
      const APFloat &Value =
          State.MIs[InsnID]->getOperand(1).getFPImm()->getValueAPF();

      if (!testImmPredicate_APFloat(Predicate, Value))
        if (handleReject() == RejectAndGiveUp)
          return false;
      break;
    }
    case GIM_CheckIsBuildVectorAllOnes:
    case GIM_CheckIsBuildVectorAllZeros: {
      int64_t InsnID = MatchTable[CurrentIdx++];

      DEBUG_WITH_TYPE(TgtExecutor::getName(),
                      dbgs() << CurrentIdx
                             << ": GIM_CheckBuildVectorAll{Zeros|Ones}(MIs["
                             << InsnID << "])\n");
      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");

      const MachineInstr *MI = State.MIs[InsnID];
      assert((MI->getOpcode() == TargetOpcode::G_BUILD_VECTOR ||
              MI->getOpcode() == TargetOpcode::G_BUILD_VECTOR_TRUNC) &&
             "Expected G_BUILD_VECTOR or G_BUILD_VECTOR_TRUNC");

      if (MatcherOpcode == GIM_CheckIsBuildVectorAllOnes) {
        if (!isBuildVectorAllOnes(*MI, MRI)) {
          if (handleReject() == RejectAndGiveUp)
            return false;
        }
      } else {
        if (!isBuildVectorAllZeros(*MI, MRI)) {
          if (handleReject() == RejectAndGiveUp)
            return false;
        }
      }

      break;
    }
    case GIM_CheckSimplePredicate: {
      // Note: we don't check for invalid here because this is purely a hook to
      // allow some executors (such as the combiner) to check arbitrary,
      // contextless predicates, such as whether a rule is enabled or not.
      int64_t Predicate = MatchTable[CurrentIdx++];
      DEBUG_WITH_TYPE(TgtExecutor::getName(),
                      dbgs() << CurrentIdx
                             << ": GIM_CheckSimplePredicate(Predicate="
                             << Predicate << ")\n");
      assert(Predicate > GICXXPred_Invalid && "Expected a valid predicate");
      if (!testSimplePredicate(Predicate)) {
        if (handleReject() == RejectAndGiveUp)
          return false;
      }
      break;
    }
    case GIM_CheckCxxInsnPredicate: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t Predicate = MatchTable[CurrentIdx++];
      DEBUG_WITH_TYPE(TgtExecutor::getName(),
                      dbgs()
                          << CurrentIdx << ": GIM_CheckCxxPredicate(MIs["
                          << InsnID << "], Predicate=" << Predicate << ")\n");
      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
      assert(Predicate > GICXXPred_Invalid && "Expected a valid predicate");

      if (!testMIPredicate_MI(Predicate, *State.MIs[InsnID], State))
        if (handleReject() == RejectAndGiveUp)
          return false;
      break;
    }
    case GIM_CheckHasNoUse: {
      int64_t InsnID = MatchTable[CurrentIdx++];

      DEBUG_WITH_TYPE(TgtExecutor::getName(),
                      dbgs() << CurrentIdx << ": GIM_CheckHasNoUse(MIs["
                             << InsnID << "]\n");

      const MachineInstr *MI = State.MIs[InsnID];
      assert(MI && "Used insn before defined");
      assert(MI->getNumDefs() > 0 && "No defs");
      const Register Res = MI->getOperand(0).getReg();

      if (!MRI.use_nodbg_empty(Res)) {
        if (handleReject() == RejectAndGiveUp)
          return false;
      }

      break;
    }
    case GIM_CheckAtomicOrdering: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      AtomicOrdering Ordering = (AtomicOrdering)MatchTable[CurrentIdx++];
      DEBUG_WITH_TYPE(TgtExecutor::getName(),
                      dbgs() << CurrentIdx << ": GIM_CheckAtomicOrdering(MIs["
                             << InsnID << "], " << (uint64_t)Ordering << ")\n");
      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
      if (!State.MIs[InsnID]->hasOneMemOperand())
        if (handleReject() == RejectAndGiveUp)
          return false;

      for (const auto &MMO : State.MIs[InsnID]->memoperands())
        if (MMO->getMergedOrdering() != Ordering)
          if (handleReject() == RejectAndGiveUp)
            return false;
      break;
    }
    case GIM_CheckAtomicOrderingOrStrongerThan: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      AtomicOrdering Ordering = (AtomicOrdering)MatchTable[CurrentIdx++];
      DEBUG_WITH_TYPE(TgtExecutor::getName(),
                      dbgs() << CurrentIdx
                             << ": GIM_CheckAtomicOrderingOrStrongerThan(MIs["
                             << InsnID << "], " << (uint64_t)Ordering << ")\n");
      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
      if (!State.MIs[InsnID]->hasOneMemOperand())
        if (handleReject() == RejectAndGiveUp)
          return false;

      for (const auto &MMO : State.MIs[InsnID]->memoperands())
        if (!isAtLeastOrStrongerThan(MMO->getMergedOrdering(), Ordering))
          if (handleReject() == RejectAndGiveUp)
            return false;
      break;
    }
    case GIM_CheckAtomicOrderingWeakerThan: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      AtomicOrdering Ordering = (AtomicOrdering)MatchTable[CurrentIdx++];
      DEBUG_WITH_TYPE(TgtExecutor::getName(),
                      dbgs() << CurrentIdx
                             << ": GIM_CheckAtomicOrderingWeakerThan(MIs["
                             << InsnID << "], " << (uint64_t)Ordering << ")\n");
      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
      if (!State.MIs[InsnID]->hasOneMemOperand())
        if (handleReject() == RejectAndGiveUp)
          return false;

      for (const auto &MMO : State.MIs[InsnID]->memoperands())
        if (!isStrongerThan(Ordering, MMO->getMergedOrdering()))
          if (handleReject() == RejectAndGiveUp)
            return false;
      break;
    }
    case GIM_CheckMemoryAddressSpace: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t MMOIdx = MatchTable[CurrentIdx++];
      // This accepts a list of possible address spaces.
      const int NumAddrSpace = MatchTable[CurrentIdx++];

      if (State.MIs[InsnID]->getNumMemOperands() <= MMOIdx) {
        if (handleReject() == RejectAndGiveUp)
          return false;
        break;
      }

      // Need to still jump to the end of the list of address spaces if we find
      // a match earlier.
      const uint64_t LastIdx = CurrentIdx + NumAddrSpace;

      const MachineMemOperand *MMO =
          *(State.MIs[InsnID]->memoperands_begin() + MMOIdx);
      const unsigned MMOAddrSpace = MMO->getAddrSpace();

      bool Success = false;
      for (int I = 0; I != NumAddrSpace; ++I) {
        unsigned AddrSpace = MatchTable[CurrentIdx++];
        DEBUG_WITH_TYPE(TgtExecutor::getName(),
                        dbgs() << "addrspace(" << MMOAddrSpace << ") vs "
                               << AddrSpace << '\n');

        if (AddrSpace == MMOAddrSpace) {
          Success = true;
          break;
        }
      }

      CurrentIdx = LastIdx;
      if (!Success && handleReject() == RejectAndGiveUp)
        return false;
      break;
    }
    case GIM_CheckMemoryAlignment: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t MMOIdx = MatchTable[CurrentIdx++];
      unsigned MinAlign = MatchTable[CurrentIdx++];

      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");

      if (State.MIs[InsnID]->getNumMemOperands() <= MMOIdx) {
        if (handleReject() == RejectAndGiveUp)
          return false;
        break;
      }

      MachineMemOperand *MMO =
          *(State.MIs[InsnID]->memoperands_begin() + MMOIdx);
      DEBUG_WITH_TYPE(TgtExecutor::getName(),
                      dbgs() << CurrentIdx << ": GIM_CheckMemoryAlignment"
                             << "(MIs[" << InsnID << "]->memoperands() + "
                             << MMOIdx << ")->getAlignment() >= " << MinAlign
                             << ")\n");
      if (MMO->getAlign() < MinAlign && handleReject() == RejectAndGiveUp)
        return false;

      break;
    }
    case GIM_CheckMemorySizeEqualTo: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t MMOIdx = MatchTable[CurrentIdx++];
      uint64_t Size = MatchTable[CurrentIdx++];

      DEBUG_WITH_TYPE(TgtExecutor::getName(),
                      dbgs() << CurrentIdx << ": GIM_CheckMemorySizeEqual(MIs["
                             << InsnID << "]->memoperands() + " << MMOIdx
                             << ", Size=" << Size << ")\n");
      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");

      if (State.MIs[InsnID]->getNumMemOperands() <= MMOIdx) {
        if (handleReject() == RejectAndGiveUp)
          return false;
        break;
      }

      MachineMemOperand *MMO =
          *(State.MIs[InsnID]->memoperands_begin() + MMOIdx);

      DEBUG_WITH_TYPE(TgtExecutor::getName(), dbgs() << MMO->getSize()
                                                     << " bytes vs " << Size
                                                     << " bytes\n");
      if (MMO->getSize() != Size)
        if (handleReject() == RejectAndGiveUp)
          return false;

      break;
    }
    case GIM_CheckMemorySizeEqualToLLT:
    case GIM_CheckMemorySizeLessThanLLT:
    case GIM_CheckMemorySizeGreaterThanLLT: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t MMOIdx = MatchTable[CurrentIdx++];
      int64_t OpIdx = MatchTable[CurrentIdx++];

      DEBUG_WITH_TYPE(
          TgtExecutor::getName(),
          dbgs() << CurrentIdx << ": GIM_CheckMemorySize"
                 << (MatcherOpcode == GIM_CheckMemorySizeEqualToLLT ? "EqualTo"
                     : MatcherOpcode == GIM_CheckMemorySizeGreaterThanLLT
                         ? "GreaterThan"
                         : "LessThan")
                 << "LLT(MIs[" << InsnID << "]->memoperands() + " << MMOIdx
                 << ", OpIdx=" << OpIdx << ")\n");
      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");

      MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
      if (!MO.isReg()) {
        DEBUG_WITH_TYPE(TgtExecutor::getName(),
                        dbgs() << CurrentIdx << ": Not a register\n");
        if (handleReject() == RejectAndGiveUp)
          return false;
        break;
      }

      if (State.MIs[InsnID]->getNumMemOperands() <= MMOIdx) {
        if (handleReject() == RejectAndGiveUp)
          return false;
        break;
      }

      MachineMemOperand *MMO =
          *(State.MIs[InsnID]->memoperands_begin() + MMOIdx);

      unsigned Size = MRI.getType(MO.getReg()).getSizeInBits();
      if (MatcherOpcode == GIM_CheckMemorySizeEqualToLLT &&
          MMO->getSizeInBits() != Size) {
        if (handleReject() == RejectAndGiveUp)
          return false;
      } else if (MatcherOpcode == GIM_CheckMemorySizeLessThanLLT &&
                 MMO->getSizeInBits() >= Size) {
        if (handleReject() == RejectAndGiveUp)
          return false;
      } else if (MatcherOpcode == GIM_CheckMemorySizeGreaterThanLLT &&
                 MMO->getSizeInBits() <= Size)
        if (handleReject() == RejectAndGiveUp)
          return false;

      break;
    }
    case GIM_CheckType: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t OpIdx = MatchTable[CurrentIdx++];
      int64_t TypeID = MatchTable[CurrentIdx++];
      DEBUG_WITH_TYPE(TgtExecutor::getName(),
                      dbgs() << CurrentIdx << ": GIM_CheckType(MIs[" << InsnID
                             << "]->getOperand(" << OpIdx
                             << "), TypeID=" << TypeID << ")\n");
      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
      MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
      if (!MO.isReg() ||
          MRI.getType(MO.getReg()) != ExecInfo.TypeObjects[TypeID]) {
        if (handleReject() == RejectAndGiveUp)
          return false;
      }
      break;
    }
    case GIM_CheckPointerToAny: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t OpIdx = MatchTable[CurrentIdx++];
      uint64_t SizeInBits = MatchTable[CurrentIdx++];

      DEBUG_WITH_TYPE(TgtExecutor::getName(),
                      dbgs() << CurrentIdx << ": GIM_CheckPointerToAny(MIs["
                             << InsnID << "]->getOperand(" << OpIdx
                             << "), SizeInBits=" << SizeInBits << ")\n");
      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
      MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
      const LLT Ty = MRI.getType(MO.getReg());

      // iPTR must be looked up in the target.
      if (SizeInBits == 0) {
        MachineFunction *MF = State.MIs[InsnID]->getParent()->getParent();
        const unsigned AddrSpace = Ty.getAddressSpace();
        SizeInBits = MF->getDataLayout().getPointerSizeInBits(AddrSpace);
      }

      assert(SizeInBits != 0 && "Pointer size must be known");

      if (MO.isReg()) {
        if (!Ty.isPointer() || Ty.getSizeInBits() != SizeInBits)
          if (handleReject() == RejectAndGiveUp)
            return false;
      } else if (handleReject() == RejectAndGiveUp)
        return false;

      break;
    }
    case GIM_RecordNamedOperand: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t OpIdx = MatchTable[CurrentIdx++];
      uint64_t StoreIdx = MatchTable[CurrentIdx++];

      DEBUG_WITH_TYPE(TgtExecutor::getName(),
                      dbgs() << CurrentIdx << ": GIM_RecordNamedOperand(MIs["
                             << InsnID << "]->getOperand(" << OpIdx
                             << "), StoreIdx=" << StoreIdx << ")\n");
      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
      assert(StoreIdx < State.RecordedOperands.size() && "Index out of range");
      State.RecordedOperands[StoreIdx] = &State.MIs[InsnID]->getOperand(OpIdx);
      break;
    }
    case GIM_CheckRegBankForClass: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t OpIdx = MatchTable[CurrentIdx++];
      int64_t RCEnum = MatchTable[CurrentIdx++];
      DEBUG_WITH_TYPE(TgtExecutor::getName(),
                      dbgs() << CurrentIdx << ": GIM_CheckRegBankForClass(MIs["
                             << InsnID << "]->getOperand(" << OpIdx
                             << "), RCEnum=" << RCEnum << ")\n");
      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
      MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
      if (!MO.isReg() ||
          &RBI.getRegBankFromRegClass(*TRI.getRegClass(RCEnum),
                                      MRI.getType(MO.getReg())) !=
              RBI.getRegBank(MO.getReg(), MRI, TRI)) {
        if (handleReject() == RejectAndGiveUp)
          return false;
      }
      break;
    }

    case GIM_CheckComplexPattern: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t OpIdx = MatchTable[CurrentIdx++];
      int64_t RendererID = MatchTable[CurrentIdx++];
      int64_t ComplexPredicateID = MatchTable[CurrentIdx++];
      DEBUG_WITH_TYPE(TgtExecutor::getName(),
                      dbgs() << CurrentIdx << ": State.Renderers[" << RendererID
                             << "] = GIM_CheckComplexPattern(MIs[" << InsnID
                             << "]->getOperand(" << OpIdx
                             << "), ComplexPredicateID=" << ComplexPredicateID
                             << ")\n");
      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
      // FIXME: Use std::invoke() when it's available.
      ComplexRendererFns Renderer =
          (Exec.*ExecInfo.ComplexPredicates[ComplexPredicateID])(
              State.MIs[InsnID]->getOperand(OpIdx));
      if (Renderer)
        State.Renderers[RendererID] = *Renderer;
      else if (handleReject() == RejectAndGiveUp)
        return false;
      break;
    }

    case GIM_CheckConstantInt: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t OpIdx = MatchTable[CurrentIdx++];
      int64_t Value = MatchTable[CurrentIdx++];
      DEBUG_WITH_TYPE(TgtExecutor::getName(),
                      dbgs() << CurrentIdx << ": GIM_CheckConstantInt(MIs["
                             << InsnID << "]->getOperand(" << OpIdx
                             << "), Value=" << Value << ")\n");
      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
      MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
      if (MO.isReg()) {
        // isOperandImmEqual() will sign-extend to 64-bits, so should we.
        LLT Ty = MRI.getType(MO.getReg());
        Value = SignExtend64(Value, Ty.getSizeInBits());

        if (!isOperandImmEqual(MO, Value, MRI)) {
          if (handleReject() == RejectAndGiveUp)
            return false;
        }
      } else if (handleReject() == RejectAndGiveUp)
        return false;

      break;
    }

    case GIM_CheckLiteralInt: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t OpIdx = MatchTable[CurrentIdx++];
      int64_t Value = MatchTable[CurrentIdx++];
      DEBUG_WITH_TYPE(TgtExecutor::getName(),
                      dbgs() << CurrentIdx << ": GIM_CheckLiteralInt(MIs["
                             << InsnID << "]->getOperand(" << OpIdx
                             << "), Value=" << Value << ")\n");
      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
      MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
      if (MO.isImm() && MO.getImm() == Value)
        break;

      if (MO.isCImm() && MO.getCImm()->equalsInt(Value))
        break;

      if (handleReject() == RejectAndGiveUp)
        return false;

      break;
    }

    case GIM_CheckIntrinsicID: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t OpIdx = MatchTable[CurrentIdx++];
      int64_t Value = MatchTable[CurrentIdx++];
      DEBUG_WITH_TYPE(TgtExecutor::getName(),
                      dbgs() << CurrentIdx << ": GIM_CheckIntrinsicID(MIs["
                             << InsnID << "]->getOperand(" << OpIdx
                             << "), Value=" << Value << ")\n");
      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
      MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
      if (!MO.isIntrinsicID() || MO.getIntrinsicID() != Value)
        if (handleReject() == RejectAndGiveUp)
          return false;
      break;
    }
    case GIM_CheckCmpPredicate: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t OpIdx = MatchTable[CurrentIdx++];
      int64_t Value = MatchTable[CurrentIdx++];
      DEBUG_WITH_TYPE(TgtExecutor::getName(),
                      dbgs() << CurrentIdx << ": GIM_CheckCmpPredicate(MIs["
                             << InsnID << "]->getOperand(" << OpIdx
                             << "), Value=" << Value << ")\n");
      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
      MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
      if (!MO.isPredicate() || MO.getPredicate() != Value)
        if (handleReject() == RejectAndGiveUp)
          return false;
      break;
    }
    case GIM_CheckIsMBB: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t OpIdx = MatchTable[CurrentIdx++];
      DEBUG_WITH_TYPE(TgtExecutor::getName(),
                      dbgs() << CurrentIdx << ": GIM_CheckIsMBB(MIs[" << InsnID
                             << "]->getOperand(" << OpIdx << "))\n");
      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
      if (!State.MIs[InsnID]->getOperand(OpIdx).isMBB()) {
        if (handleReject() == RejectAndGiveUp)
          return false;
      }
      break;
    }
    case GIM_CheckIsImm: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t OpIdx = MatchTable[CurrentIdx++];
      DEBUG_WITH_TYPE(TgtExecutor::getName(),
                      dbgs() << CurrentIdx << ": GIM_CheckIsImm(MIs[" << InsnID
                             << "]->getOperand(" << OpIdx << "))\n");
      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
      if (!State.MIs[InsnID]->getOperand(OpIdx).isImm()) {
        if (handleReject() == RejectAndGiveUp)
          return false;
      }
      break;
    }
    case GIM_CheckIsSafeToFold: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      DEBUG_WITH_TYPE(TgtExecutor::getName(),
                      dbgs() << CurrentIdx << ": GIM_CheckIsSafeToFold(MIs["
                             << InsnID << "])\n");
      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
      if (!isObviouslySafeToFold(*State.MIs[InsnID], *State.MIs[0])) {
        if (handleReject() == RejectAndGiveUp)
          return false;
      }
      break;
    }
    case GIM_CheckIsSameOperand:
    case GIM_CheckIsSameOperandIgnoreCopies: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t OpIdx = MatchTable[CurrentIdx++];
      int64_t OtherInsnID = MatchTable[CurrentIdx++];
      int64_t OtherOpIdx = MatchTable[CurrentIdx++];
      DEBUG_WITH_TYPE(TgtExecutor::getName(),
                      dbgs() << CurrentIdx << ": GIM_CheckIsSameOperand(MIs["
                             << InsnID << "][" << OpIdx << "], MIs["
                             << OtherInsnID << "][" << OtherOpIdx << "])\n");
      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
      assert(State.MIs[OtherInsnID] != nullptr && "Used insn before defined");

      MachineOperand &Op = State.MIs[InsnID]->getOperand(OpIdx);
      MachineOperand &OtherOp = State.MIs[OtherInsnID]->getOperand(OtherOpIdx);

      if (MatcherOpcode == GIM_CheckIsSameOperandIgnoreCopies) {
        if (Op.isReg() && OtherOp.isReg()) {
          if (getSrcRegIgnoringCopies(Op.getReg(), MRI) ==
              getSrcRegIgnoringCopies(OtherOp.getReg(), MRI))
            break;
        }
      }

      if (!Op.isIdenticalTo(OtherOp)) {
        if (handleReject() == RejectAndGiveUp)
          return false;
      }
      break;
    }
    case GIM_Reject:
      DEBUG_WITH_TYPE(TgtExecutor::getName(),
                      dbgs() << CurrentIdx << ": GIM_Reject\n");
      if (handleReject() == RejectAndGiveUp)
        return false;
      break;
    case GIR_MutateOpcode: {
      int64_t OldInsnID = MatchTable[CurrentIdx++];
      uint64_t NewInsnID = MatchTable[CurrentIdx++];
      int64_t NewOpcode = MatchTable[CurrentIdx++];
      if (NewInsnID >= OutMIs.size())
        OutMIs.resize(NewInsnID + 1);

      OutMIs[NewInsnID] = MachineInstrBuilder(*State.MIs[OldInsnID]->getMF(),
                                              State.MIs[OldInsnID]);
      OutMIs[NewInsnID]->setDesc(TII.get(NewOpcode));
      DEBUG_WITH_TYPE(TgtExecutor::getName(),
                      dbgs() << CurrentIdx << ": GIR_MutateOpcode(OutMIs["
                             << NewInsnID << "], MIs[" << OldInsnID << "], "
                             << NewOpcode << ")\n");
      break;
    }

    case GIR_BuildMI: {
      uint64_t NewInsnID = MatchTable[CurrentIdx++];
      int64_t Opcode = MatchTable[CurrentIdx++];
      if (NewInsnID >= OutMIs.size())
        OutMIs.resize(NewInsnID + 1);

      OutMIs[NewInsnID] = BuildMI(*State.MIs[0]->getParent(), State.MIs[0],
                                  MIMetadata(*State.MIs[0]), TII.get(Opcode));
      DEBUG_WITH_TYPE(TgtExecutor::getName(),
                      dbgs() << CurrentIdx << ": GIR_BuildMI(OutMIs["
                             << NewInsnID << "], " << Opcode << ")\n");
      break;
    }

    case GIR_Copy: {
      int64_t NewInsnID = MatchTable[CurrentIdx++];
      int64_t OldInsnID = MatchTable[CurrentIdx++];
      int64_t OpIdx = MatchTable[CurrentIdx++];
      assert(OutMIs[NewInsnID] && "Attempted to add to undefined instruction");
      OutMIs[NewInsnID].add(State.MIs[OldInsnID]->getOperand(OpIdx));
      DEBUG_WITH_TYPE(TgtExecutor::getName(),
                      dbgs()
                          << CurrentIdx << ": GIR_Copy(OutMIs[" << NewInsnID
                          << "], MIs[" << OldInsnID << "], " << OpIdx << ")\n");
      break;
    }

    case GIR_CopyOrAddZeroReg: {
      int64_t NewInsnID = MatchTable[CurrentIdx++];
      int64_t OldInsnID = MatchTable[CurrentIdx++];
      int64_t OpIdx = MatchTable[CurrentIdx++];
      int64_t ZeroReg = MatchTable[CurrentIdx++];
      assert(OutMIs[NewInsnID] && "Attempted to add to undefined instruction");
      MachineOperand &MO = State.MIs[OldInsnID]->getOperand(OpIdx);
      if (isOperandImmEqual(MO, 0, MRI))
        OutMIs[NewInsnID].addReg(ZeroReg);
      else
        OutMIs[NewInsnID].add(MO);
      DEBUG_WITH_TYPE(TgtExecutor::getName(),
                      dbgs() << CurrentIdx << ": GIR_CopyOrAddZeroReg(OutMIs["
                             << NewInsnID << "], MIs[" << OldInsnID << "], "
                             << OpIdx << ", " << ZeroReg << ")\n");
      break;
    }

    case GIR_CopySubReg: {
      int64_t NewInsnID = MatchTable[CurrentIdx++];
      int64_t OldInsnID = MatchTable[CurrentIdx++];
      int64_t OpIdx = MatchTable[CurrentIdx++];
      int64_t SubRegIdx = MatchTable[CurrentIdx++];
      assert(OutMIs[NewInsnID] && "Attempted to add to undefined instruction");
      OutMIs[NewInsnID].addReg(State.MIs[OldInsnID]->getOperand(OpIdx).getReg(),
                               0, SubRegIdx);
      DEBUG_WITH_TYPE(TgtExecutor::getName(),
                      dbgs() << CurrentIdx << ": GIR_CopySubReg(OutMIs["
                             << NewInsnID << "], MIs[" << OldInsnID << "], "
                             << OpIdx << ", " << SubRegIdx << ")\n");
      break;
    }

    case GIR_AddImplicitDef: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t RegNum = MatchTable[CurrentIdx++];
      assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
      OutMIs[InsnID].addDef(RegNum, RegState::Implicit);
      DEBUG_WITH_TYPE(TgtExecutor::getName(),
                      dbgs() << CurrentIdx << ": GIR_AddImplicitDef(OutMIs["
                             << InsnID << "], " << RegNum << ")\n");
      break;
    }

    case GIR_AddImplicitUse: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t RegNum = MatchTable[CurrentIdx++];
      assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
      OutMIs[InsnID].addUse(RegNum, RegState::Implicit);
      DEBUG_WITH_TYPE(TgtExecutor::getName(),
                      dbgs() << CurrentIdx << ": GIR_AddImplicitUse(OutMIs["
                             << InsnID << "], " << RegNum << ")\n");
      break;
    }

    case GIR_AddRegister: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t RegNum = MatchTable[CurrentIdx++];
      uint64_t RegFlags = MatchTable[CurrentIdx++];
      assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
      OutMIs[InsnID].addReg(RegNum, RegFlags);
      DEBUG_WITH_TYPE(TgtExecutor::getName(),
                      dbgs()
                          << CurrentIdx << ": GIR_AddRegister(OutMIs[" << InsnID
                          << "], " << RegNum << ", " << RegFlags << ")\n");
      break;
    }

    case GIR_AddTempRegister:
    case GIR_AddTempSubRegister: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t TempRegID = MatchTable[CurrentIdx++];
      uint64_t TempRegFlags = MatchTable[CurrentIdx++];
      unsigned SubReg = 0;
      if (MatcherOpcode == GIR_AddTempSubRegister)
        SubReg = MatchTable[CurrentIdx++];

      assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");

      OutMIs[InsnID].addReg(State.TempRegisters[TempRegID], TempRegFlags,
                            SubReg);
      DEBUG_WITH_TYPE(
          TgtExecutor::getName(),
          dbgs() << CurrentIdx << ": GIR_AddTempRegister(OutMIs[" << InsnID
                 << "], TempRegisters[" << TempRegID << "]";
          if (SubReg) dbgs() << '.' << TRI.getSubRegIndexName(SubReg);
          dbgs() << ", " << TempRegFlags << ")\n");
      break;
    }

    case GIR_AddImm: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t Imm = MatchTable[CurrentIdx++];
      assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
      OutMIs[InsnID].addImm(Imm);
      DEBUG_WITH_TYPE(TgtExecutor::getName(),
                      dbgs() << CurrentIdx << ": GIR_AddImm(OutMIs[" << InsnID
                             << "], " << Imm << ")\n");
      break;
    }

    case GIR_ComplexRenderer: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t RendererID = MatchTable[CurrentIdx++];
      assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
      for (const auto &RenderOpFn : State.Renderers[RendererID])
        RenderOpFn(OutMIs[InsnID]);
      DEBUG_WITH_TYPE(TgtExecutor::getName(),
                      dbgs() << CurrentIdx << ": GIR_ComplexRenderer(OutMIs["
                             << InsnID << "], " << RendererID << ")\n");
      break;
    }
    case GIR_ComplexSubOperandRenderer: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t RendererID = MatchTable[CurrentIdx++];
      int64_t RenderOpID = MatchTable[CurrentIdx++];
      assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
      State.Renderers[RendererID][RenderOpID](OutMIs[InsnID]);
      DEBUG_WITH_TYPE(TgtExecutor::getName(),
                      dbgs() << CurrentIdx
                             << ": GIR_ComplexSubOperandRenderer(OutMIs["
                             << InsnID << "], " << RendererID << ", "
                             << RenderOpID << ")\n");
      break;
    }
    case GIR_ComplexSubOperandSubRegRenderer: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t RendererID = MatchTable[CurrentIdx++];
      int64_t RenderOpID = MatchTable[CurrentIdx++];
      int64_t SubRegIdx = MatchTable[CurrentIdx++];
      MachineInstrBuilder &MI = OutMIs[InsnID];
      assert(MI && "Attempted to add to undefined instruction");
      State.Renderers[RendererID][RenderOpID](MI);
      MI->getOperand(MI->getNumOperands() - 1).setSubReg(SubRegIdx);
      DEBUG_WITH_TYPE(TgtExecutor::getName(),
                      dbgs() << CurrentIdx
                             << ": GIR_ComplexSubOperandSubRegRenderer(OutMIs["
                             << InsnID << "], " << RendererID << ", "
                             << RenderOpID << ", " << SubRegIdx << ")\n");
      break;
    }

    case GIR_CopyConstantAsSImm: {
      int64_t NewInsnID = MatchTable[CurrentIdx++];
      int64_t OldInsnID = MatchTable[CurrentIdx++];
      assert(OutMIs[NewInsnID] && "Attempted to add to undefined instruction");
      assert(State.MIs[OldInsnID]->getOpcode() == TargetOpcode::G_CONSTANT &&
             "Expected G_CONSTANT");
      if (State.MIs[OldInsnID]->getOperand(1).isCImm()) {
        OutMIs[NewInsnID].addImm(
            State.MIs[OldInsnID]->getOperand(1).getCImm()->getSExtValue());
      } else if (State.MIs[OldInsnID]->getOperand(1).isImm())
        OutMIs[NewInsnID].add(State.MIs[OldInsnID]->getOperand(1));
      else
        llvm_unreachable("Expected Imm or CImm operand");
      DEBUG_WITH_TYPE(TgtExecutor::getName(),
                      dbgs() << CurrentIdx << ": GIR_CopyConstantAsSImm(OutMIs["
                             << NewInsnID << "], MIs[" << OldInsnID << "])\n");
      break;
    }

    // TODO: Needs a test case once we have a pattern that uses this.
    case GIR_CopyFConstantAsFPImm: {
      int64_t NewInsnID = MatchTable[CurrentIdx++];
      int64_t OldInsnID = MatchTable[CurrentIdx++];
      assert(OutMIs[NewInsnID] && "Attempted to add to undefined instruction");
      assert(State.MIs[OldInsnID]->getOpcode() == TargetOpcode::G_FCONSTANT &&
             "Expected G_FCONSTANT");
      if (State.MIs[OldInsnID]->getOperand(1).isFPImm())
        OutMIs[NewInsnID].addFPImm(
            State.MIs[OldInsnID]->getOperand(1).getFPImm());
      else
        llvm_unreachable("Expected FPImm operand");
      DEBUG_WITH_TYPE(TgtExecutor::getName(),
                      dbgs()
                          << CurrentIdx << ": GIR_CopyFPConstantAsFPImm(OutMIs["
                          << NewInsnID << "], MIs[" << OldInsnID << "])\n");
      break;
    }

    case GIR_CustomRenderer: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t OldInsnID = MatchTable[CurrentIdx++];
      int64_t RendererFnID = MatchTable[CurrentIdx++];
      assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
      DEBUG_WITH_TYPE(TgtExecutor::getName(),
                      dbgs() << CurrentIdx << ": GIR_CustomRenderer(OutMIs["
                             << InsnID << "], MIs[" << OldInsnID << "], "
                             << RendererFnID << ")\n");
      (Exec.*ExecInfo.CustomRenderers[RendererFnID])(
          OutMIs[InsnID], *State.MIs[OldInsnID],
          -1); // Not a source operand of the old instruction.
      break;
    }
    case GIR_CustomAction: {
      int64_t FnID = MatchTable[CurrentIdx++];
      DEBUG_WITH_TYPE(TgtExecutor::getName(),
                      dbgs() << CurrentIdx << ": GIR_CustomAction(FnID=" << FnID
                             << ")\n");
      assert(FnID > GICXXCustomAction_Invalid && "Expected a valid FnID");
      runCustomAction(FnID, State);
      break;
    }
    case GIR_CustomOperandRenderer: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t OldInsnID = MatchTable[CurrentIdx++];
      int64_t OpIdx = MatchTable[CurrentIdx++];
      int64_t RendererFnID = MatchTable[CurrentIdx++];
      assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");

      DEBUG_WITH_TYPE(TgtExecutor::getName(),
                      dbgs() << CurrentIdx
                             << ": GIR_CustomOperandRenderer(OutMIs[" << InsnID
                             << "], MIs[" << OldInsnID << "]->getOperand("
                             << OpIdx << "), " << RendererFnID << ")\n");
      (Exec.*ExecInfo.CustomRenderers[RendererFnID])(
          OutMIs[InsnID], *State.MIs[OldInsnID], OpIdx);
      break;
    }
    case GIR_ConstrainOperandRC: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t OpIdx = MatchTable[CurrentIdx++];
      int64_t RCEnum = MatchTable[CurrentIdx++];
      assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
      MachineInstr &I = *OutMIs[InsnID].getInstr();
      MachineFunction &MF = *I.getParent()->getParent();
      MachineRegisterInfo &MRI = MF.getRegInfo();
      const TargetRegisterClass &RC = *TRI.getRegClass(RCEnum);
      MachineOperand &MO = I.getOperand(OpIdx);
      constrainOperandRegClass(MF, TRI, MRI, TII, RBI, I, RC, MO);
      DEBUG_WITH_TYPE(TgtExecutor::getName(),
                      dbgs() << CurrentIdx << ": GIR_ConstrainOperandRC(OutMIs["
                             << InsnID << "], " << OpIdx << ", " << RCEnum
                             << ")\n");
      break;
    }

    case GIR_ConstrainSelectedInstOperands: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
      constrainSelectedInstRegOperands(*OutMIs[InsnID].getInstr(), TII, TRI,
                                       RBI);
      DEBUG_WITH_TYPE(TgtExecutor::getName(),
                      dbgs() << CurrentIdx
                             << ": GIR_ConstrainSelectedInstOperands(OutMIs["
                             << InsnID << "])\n");
      break;
    }

    case GIR_MergeMemOperands: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");

      DEBUG_WITH_TYPE(TgtExecutor::getName(),
                      dbgs() << CurrentIdx << ": GIR_MergeMemOperands(OutMIs["
                             << InsnID << "]");
      int64_t MergeInsnID = GIU_MergeMemOperands_EndOfList;
      while ((MergeInsnID = MatchTable[CurrentIdx++]) !=
             GIU_MergeMemOperands_EndOfList) {
        DEBUG_WITH_TYPE(TgtExecutor::getName(),
                        dbgs() << ", MIs[" << MergeInsnID << "]");
        for (const auto &MMO : State.MIs[MergeInsnID]->memoperands())
          OutMIs[InsnID].addMemOperand(MMO);
      }
      DEBUG_WITH_TYPE(TgtExecutor::getName(), dbgs() << ")\n");
      break;
    }

    case GIR_EraseFromParent: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      assert(State.MIs[InsnID] &&
             "Attempted to erase an undefined instruction");
      State.MIs[InsnID]->eraseFromParent();
      DEBUG_WITH_TYPE(TgtExecutor::getName(),
                      dbgs() << CurrentIdx << ": GIR_EraseFromParent(MIs["
                             << InsnID << "])\n");
      break;
    }

    case GIR_MakeTempReg: {
      int64_t TempRegID = MatchTable[CurrentIdx++];
      int64_t TypeID = MatchTable[CurrentIdx++];

      State.TempRegisters[TempRegID] =
          MRI.createGenericVirtualRegister(ExecInfo.TypeObjects[TypeID]);
      DEBUG_WITH_TYPE(TgtExecutor::getName(),
                      dbgs() << CurrentIdx << ": TempRegs[" << TempRegID
                             << "] = GIR_MakeTempReg(" << TypeID << ")\n");
      break;
    }

    case GIR_Coverage: {
      int64_t RuleID = MatchTable[CurrentIdx++];
      assert(CoverageInfo);
      CoverageInfo->setCovered(RuleID);

      DEBUG_WITH_TYPE(TgtExecutor::getName(), dbgs() << CurrentIdx
                                                     << ": GIR_Coverage("
                                                     << RuleID << ")");
      break;
    }

    case GIR_Done:
      DEBUG_WITH_TYPE(TgtExecutor::getName(),
                      dbgs() << CurrentIdx << ": GIR_Done\n");
      propagateFlags(OutMIs);
      return true;
    default:
      llvm_unreachable("Unexpected command");
    }
  }
}

} // end namespace llvm

#endif // LLVM_CODEGEN_GLOBALISEL_GIMATCHTABLEEXECUTORIMPL_H
PKiwFZ|6cWxWx%CodeGen/GlobalISel/RegisterBankInfo.hnu�[���//===- llvm/CodeGen/GlobalISel/RegisterBankInfo.h ---------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file This file declares the API for the register bank info.
/// This API is responsible for handling the register banks.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_GLOBALISEL_REGISTERBANKINFO_H
#define LLVM_CODEGEN_GLOBALISEL_REGISTERBANKINFO_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/CodeGen/Register.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/LowLevelTypeImpl.h"
#include <cassert>
#include <initializer_list>
#include <memory>

namespace llvm {

class MachineInstr;
class MachineRegisterInfo;
class raw_ostream;
class RegisterBank;
class TargetInstrInfo;
class TargetRegisterClass;
class TargetRegisterInfo;

/// Holds all the information related to register banks.
class RegisterBankInfo {
public:
  /// Helper struct that represents how a value is partially mapped
  /// into a register.
  /// The StartIdx and Length represent what region of the orginal
  /// value this partial mapping covers.
  /// This can be represented as a Mask of contiguous bit starting
  /// at StartIdx bit and spanning Length bits.
  /// StartIdx is the number of bits from the less significant bits.
  struct PartialMapping {
    /// Number of bits at which this partial mapping starts in the
    /// original value.  The bits are counted from less significant
    /// bits to most significant bits.
    unsigned StartIdx;

    /// Length of this mapping in bits. This is how many bits this
    /// partial mapping covers in the original value:
    /// from StartIdx to StartIdx + Length -1.
    unsigned Length;

    /// Register bank where the partial value lives.
    const RegisterBank *RegBank;

    PartialMapping() = default;

    /// Provide a shortcut for quickly building PartialMapping.
    PartialMapping(unsigned StartIdx, unsigned Length,
                   const RegisterBank &RegBank)
        : StartIdx(StartIdx), Length(Length), RegBank(&RegBank) {}

    /// \return the index of in the original value of the most
    /// significant bit that this partial mapping covers.
    unsigned getHighBitIdx() const { return StartIdx + Length - 1; }

    /// Print this partial mapping on dbgs() stream.
    void dump() const;

    /// Print this partial mapping on \p OS;
    void print(raw_ostream &OS) const;

    /// Check that the Mask is compatible with the RegBank.
    /// Indeed, if the RegBank cannot accomadate the "active bits" of the mask,
    /// there is no way this mapping is valid.
    ///
    /// \note This method does not check anything when assertions are disabled.
    ///
    /// \return True is the check was successful.
    bool verify() const;
  };

  /// Helper struct that represents how a value is mapped through
  /// different register banks.
  ///
  /// \note: So far we do not have any users of the complex mappings
  /// (mappings with more than one partial mapping), but when we do,
  /// we would have needed to duplicate partial mappings.
  /// The alternative could be to use an array of pointers of partial
  /// mapping (i.e., PartialMapping **BreakDown) and duplicate the
  /// pointers instead.
  ///
  /// E.g.,
  /// Let say we have a 32-bit add and a <2 x 32-bit> vadd. We
  /// can expand the
  /// <2 x 32-bit> add into 2 x 32-bit add.
  ///
  /// Currently the TableGen-like file would look like:
  /// \code
  /// PartialMapping[] = {
  /// /*32-bit add*/      {0, 32, GPR}, // Scalar entry repeated for first
  ///                                   // vec elt.
  /// /*2x32-bit add*/    {0, 32, GPR}, {32, 32, GPR},
  /// /*<2x32-bit> vadd*/ {0, 64, VPR}
  /// }; // PartialMapping duplicated.
  ///
  /// ValueMapping[] {
  ///   /*plain 32-bit add*/       {&PartialMapping[0], 1},
  ///   /*expanded vadd on 2xadd*/ {&PartialMapping[1], 2},
  ///   /*plain <2x32-bit> vadd*/  {&PartialMapping[3], 1}
  /// };
  /// \endcode
  ///
  /// With the array of pointer, we would have:
  /// \code
  /// PartialMapping[] = {
  /// /*32-bit add lower */ { 0, 32, GPR},
  /// /*32-bit add upper */ {32, 32, GPR},
  /// /*<2x32-bit> vadd */  { 0, 64, VPR}
  /// }; // No more duplication.
  ///
  /// BreakDowns[] = {
  /// /*AddBreakDown*/   &PartialMapping[0],
  /// /*2xAddBreakDown*/ &PartialMapping[0], &PartialMapping[1],
  /// /*VAddBreakDown*/  &PartialMapping[2]
  /// }; // Addresses of PartialMapping duplicated (smaller).
  ///
  /// ValueMapping[] {
  ///   /*plain 32-bit add*/       {&BreakDowns[0], 1},
  ///   /*expanded vadd on 2xadd*/ {&BreakDowns[1], 2},
  ///   /*plain <2x32-bit> vadd*/  {&BreakDowns[3], 1}
  /// };
  /// \endcode
  ///
  /// Given that a PartialMapping is actually small, the code size
  /// impact is actually a degradation. Moreover the compile time will
  /// be hit by the additional indirection.
  /// If PartialMapping gets bigger we may reconsider.
  struct ValueMapping {
    /// How the value is broken down between the different register banks.
    const PartialMapping *BreakDown;

    /// Number of partial mapping to break down this value.
    unsigned NumBreakDowns;

    /// The default constructor creates an invalid (isValid() == false)
    /// instance.
    ValueMapping() : ValueMapping(nullptr, 0) {}

    /// Initialize a ValueMapping with the given parameter.
    /// \p BreakDown needs to have a life time at least as long
    /// as this instance.
    ValueMapping(const PartialMapping *BreakDown, unsigned NumBreakDowns)
        : BreakDown(BreakDown), NumBreakDowns(NumBreakDowns) {}

    /// Iterators through the PartialMappings.
    const PartialMapping *begin() const { return BreakDown; }
    const PartialMapping *end() const { return BreakDown + NumBreakDowns; }

    /// \return true if all partial mappings are the same size and register
    /// bank.
    bool partsAllUniform() const;

    /// Check if this ValueMapping is valid.
    bool isValid() const { return BreakDown && NumBreakDowns; }

    /// Verify that this mapping makes sense for a value of
    /// \p MeaningfulBitWidth.
    /// \note This method does not check anything when assertions are disabled.
    ///
    /// \return True is the check was successful.
    bool verify(unsigned MeaningfulBitWidth) const;

    /// Print this on dbgs() stream.
    void dump() const;

    /// Print this on \p OS;
    void print(raw_ostream &OS) const;
  };

  /// Helper class that represents how the value of an instruction may be
  /// mapped and what is the related cost of such mapping.
  class InstructionMapping {
    /// Identifier of the mapping.
    /// This is used to communicate between the target and the optimizers
    /// which mapping should be realized.
    unsigned ID = InvalidMappingID;

    /// Cost of this mapping.
    unsigned Cost = 0;

    /// Mapping of all the operands.
    const ValueMapping *OperandsMapping = nullptr;

    /// Number of operands.
    unsigned NumOperands = 0;

    const ValueMapping &getOperandMapping(unsigned i) {
      assert(i < getNumOperands() && "Out of bound operand");
      return OperandsMapping[i];
    }

  public:
    /// Constructor for the mapping of an instruction.
    /// \p NumOperands must be equal to number of all the operands of
    /// the related instruction.
    /// The rationale is that it is more efficient for the optimizers
    /// to be able to assume that the mapping of the ith operand is
    /// at the index i.
    InstructionMapping(unsigned ID, unsigned Cost,
                       const ValueMapping *OperandsMapping,
                       unsigned NumOperands)
        : ID(ID), Cost(Cost), OperandsMapping(OperandsMapping),
          NumOperands(NumOperands) {
    }

    /// Default constructor.
    /// Use this constructor to express that the mapping is invalid.
    InstructionMapping() = default;

    /// Get the cost.
    unsigned getCost() const { return Cost; }

    /// Get the ID.
    unsigned getID() const { return ID; }

    /// Get the number of operands.
    unsigned getNumOperands() const { return NumOperands; }

    /// Get the value mapping of the ith operand.
    /// \pre The mapping for the ith operand has been set.
    /// \pre The ith operand is a register.
    const ValueMapping &getOperandMapping(unsigned i) const {
      const ValueMapping &ValMapping =
          const_cast<InstructionMapping *>(this)->getOperandMapping(i);
      return ValMapping;
    }

    /// Set the mapping for all the operands.
    /// In other words, OpdsMapping should hold at least getNumOperands
    /// ValueMapping.
    void setOperandsMapping(const ValueMapping *OpdsMapping) {
      OperandsMapping = OpdsMapping;
    }

    /// Check whether this object is valid.
    /// This is a lightweight check for obvious wrong instance.
    bool isValid() const {
      return getID() != InvalidMappingID && OperandsMapping;
    }

    /// Verifiy that this mapping makes sense for \p MI.
    /// \pre \p MI must be connected to a MachineFunction.
    ///
    /// \note This method does not check anything when assertions are disabled.
    ///
    /// \return True is the check was successful.
    bool verify(const MachineInstr &MI) const;

    /// Print this on dbgs() stream.
    void dump() const;

    /// Print this on \p OS;
    void print(raw_ostream &OS) const;
  };

  /// Convenient type to represent the alternatives for mapping an
  /// instruction.
  /// \todo When we move to TableGen this should be an array ref.
  using InstructionMappings = SmallVector<const InstructionMapping *, 4>;

  /// Helper class used to get/create the virtual registers that will be used
  /// to replace the MachineOperand when applying a mapping.
  class OperandsMapper {
    /// The OpIdx-th cell contains the index in NewVRegs where the VRegs of the
    /// OpIdx-th operand starts. -1 means we do not have such mapping yet.
    /// Note: We use a SmallVector to avoid heap allocation for most cases.
    SmallVector<int, 8> OpToNewVRegIdx;

    /// Hold the registers that will be used to map MI with InstrMapping.
    SmallVector<Register, 8> NewVRegs;

    /// Current MachineRegisterInfo, used to create new virtual registers.
    MachineRegisterInfo &MRI;

    /// Instruction being remapped.
    MachineInstr &MI;

    /// New mapping of the instruction.
    const InstructionMapping &InstrMapping;

    /// Constant value identifying that the index in OpToNewVRegIdx
    /// for an operand has not been set yet.
    static const int DontKnowIdx;

    /// Get the range in NewVRegs to store all the partial
    /// values for the \p OpIdx-th operand.
    ///
    /// \return The iterator range for the space created.
    //
    /// \pre getMI().getOperand(OpIdx).isReg()
    iterator_range<SmallVectorImpl<Register>::iterator>
    getVRegsMem(unsigned OpIdx);

    /// Get the end iterator for a range starting at \p StartIdx and
    /// spannig \p NumVal in NewVRegs.
    /// \pre StartIdx + NumVal <= NewVRegs.size()
    SmallVectorImpl<Register>::const_iterator
    getNewVRegsEnd(unsigned StartIdx, unsigned NumVal) const;
    SmallVectorImpl<Register>::iterator getNewVRegsEnd(unsigned StartIdx,
                                                       unsigned NumVal);

  public:
    /// Create an OperandsMapper that will hold the information to apply \p
    /// InstrMapping to \p MI.
    /// \pre InstrMapping.verify(MI)
    OperandsMapper(MachineInstr &MI, const InstructionMapping &InstrMapping,
                   MachineRegisterInfo &MRI);

    /// \name Getters.
    /// @{
    /// The MachineInstr being remapped.
    MachineInstr &getMI() const { return MI; }

    /// The final mapping of the instruction.
    const InstructionMapping &getInstrMapping() const { return InstrMapping; }

    /// The MachineRegisterInfo we used to realize the mapping.
    MachineRegisterInfo &getMRI() const { return MRI; }
    /// @}

    /// Create as many new virtual registers as needed for the mapping of the \p
    /// OpIdx-th operand.
    /// The number of registers is determined by the number of breakdown for the
    /// related operand in the instruction mapping.
    /// The type of the new registers is a plain scalar of the right size.
    /// The proper type is expected to be set when the mapping is applied to
    /// the instruction(s) that realizes the mapping.
    ///
    /// \pre getMI().getOperand(OpIdx).isReg()
    ///
    /// \post All the partial mapping of the \p OpIdx-th operand have been
    /// assigned a new virtual register.
    void createVRegs(unsigned OpIdx);

    /// Set the virtual register of the \p PartialMapIdx-th partial mapping of
    /// the OpIdx-th operand to \p NewVReg.
    ///
    /// \pre getMI().getOperand(OpIdx).isReg()
    /// \pre getInstrMapping().getOperandMapping(OpIdx).BreakDown.size() >
    /// PartialMapIdx
    /// \pre NewReg != 0
    ///
    /// \post the \p PartialMapIdx-th register of the value mapping of the \p
    /// OpIdx-th operand has been set.
    void setVRegs(unsigned OpIdx, unsigned PartialMapIdx, Register NewVReg);

    /// Get all the virtual registers required to map the \p OpIdx-th operand of
    /// the instruction.
    ///
    /// This return an empty range when createVRegs or setVRegs has not been
    /// called.
    /// The iterator may be invalidated by a call to setVRegs or createVRegs.
    ///
    /// When \p ForDebug is true, we will not check that the list of new virtual
    /// registers does not contain uninitialized values.
    ///
    /// \pre getMI().getOperand(OpIdx).isReg()
    /// \pre ForDebug || All partial mappings have been set a register
    iterator_range<SmallVectorImpl<Register>::const_iterator>
    getVRegs(unsigned OpIdx, bool ForDebug = false) const;

    /// Print this operands mapper on dbgs() stream.
    void dump() const;

    /// Print this operands mapper on \p OS stream.
    void print(raw_ostream &OS, bool ForDebug = false) const;
  };

protected:
  /// Hold the set of supported register banks.
  RegisterBank **RegBanks;

  /// Total number of register banks.
  unsigned NumRegBanks;

  /// Keep dynamically allocated PartialMapping in a separate map.
  /// This shouldn't be needed when everything gets TableGen'ed.
  mutable DenseMap<unsigned, std::unique_ptr<const PartialMapping>>
      MapOfPartialMappings;

  /// Keep dynamically allocated ValueMapping in a separate map.
  /// This shouldn't be needed when everything gets TableGen'ed.
  mutable DenseMap<unsigned, std::unique_ptr<const ValueMapping>>
      MapOfValueMappings;

  /// Keep dynamically allocated array of ValueMapping in a separate map.
  /// This shouldn't be needed when everything gets TableGen'ed.
  mutable DenseMap<unsigned, std::unique_ptr<ValueMapping[]>>
      MapOfOperandsMappings;

  /// Keep dynamically allocated InstructionMapping in a separate map.
  /// This shouldn't be needed when everything gets TableGen'ed.
  mutable DenseMap<unsigned, std::unique_ptr<const InstructionMapping>>
      MapOfInstructionMappings;

  /// Getting the minimal register class of a physreg is expensive.
  /// Cache this information as we get it.
  mutable DenseMap<unsigned, const TargetRegisterClass *> PhysRegMinimalRCs;

  /// Create a RegisterBankInfo that can accommodate up to \p NumRegBanks
  /// RegisterBank instances.
  RegisterBankInfo(RegisterBank **RegBanks, unsigned NumRegBanks);

  /// This constructor is meaningless.
  /// It just provides a default constructor that can be used at link time
  /// when GlobalISel is not built.
  /// That way, targets can still inherit from this class without doing
  /// crazy gymnastic to avoid link time failures.
  /// \note That works because the constructor is inlined.
  RegisterBankInfo() {
    llvm_unreachable("This constructor should not be executed");
  }

  /// Get the register bank identified by \p ID.
  RegisterBank &getRegBank(unsigned ID) {
    assert(ID < getNumRegBanks() && "Accessing an unknown register bank");
    return *RegBanks[ID];
  }

  /// Get the MinimalPhysRegClass for Reg.
  /// \pre Reg is a physical register.
  const TargetRegisterClass &
  getMinimalPhysRegClass(Register Reg, const TargetRegisterInfo &TRI) const;

  /// Try to get the mapping of \p MI.
  /// See getInstrMapping for more details on what a mapping represents.
  ///
  /// Unlike getInstrMapping the returned InstructionMapping may be invalid
  /// (isValid() == false).
  /// This means that the target independent code is not smart enough
  /// to get the mapping of \p MI and thus, the target has to provide the
  /// information for \p MI.
  ///
  /// This implementation is able to get the mapping of:
  /// - Target specific instructions by looking at the encoding constraints.
  /// - Any instruction if all the register operands have already been assigned
  ///   a register, a register class, or a register bank.
  /// - Copies and phis if at least one of the operands has been assigned a
  ///   register, a register class, or a register bank.
  /// In other words, this method will likely fail to find a mapping for
  /// any generic opcode that has not been lowered by target specific code.
  const InstructionMapping &getInstrMappingImpl(const MachineInstr &MI) const;

  /// Get the uniquely generated PartialMapping for the
  /// given arguments.
  const PartialMapping &getPartialMapping(unsigned StartIdx, unsigned Length,
                                          const RegisterBank &RegBank) const;

  /// \name Methods to get a uniquely generated ValueMapping.
  /// @{

  /// The most common ValueMapping consists of a single PartialMapping.
  /// Feature a method for that.
  const ValueMapping &getValueMapping(unsigned StartIdx, unsigned Length,
                                      const RegisterBank &RegBank) const;

  /// Get the ValueMapping for the given arguments.
  const ValueMapping &getValueMapping(const PartialMapping *BreakDown,
                                      unsigned NumBreakDowns) const;
  /// @}

  /// \name Methods to get a uniquely generated array of ValueMapping.
  /// @{

  /// Get the uniquely generated array of ValueMapping for the
  /// elements of between \p Begin and \p End.
  ///
  /// Elements that are nullptr will be replaced by
  /// invalid ValueMapping (ValueMapping::isValid == false).
  ///
  /// \pre The pointers on ValueMapping between \p Begin and \p End
  /// must uniquely identify a ValueMapping. Otherwise, there is no
  /// guarantee that the return instance will be unique, i.e., another
  /// OperandsMapping could have the same content.
  template <typename Iterator>
  const ValueMapping *getOperandsMapping(Iterator Begin, Iterator End) const;

  /// Get the uniquely generated array of ValueMapping for the
  /// elements of \p OpdsMapping.
  ///
  /// Elements of \p OpdsMapping that are nullptr will be replaced by
  /// invalid ValueMapping (ValueMapping::isValid == false).
  const ValueMapping *getOperandsMapping(
      const SmallVectorImpl<const ValueMapping *> &OpdsMapping) const;

  /// Get the uniquely generated array of ValueMapping for the
  /// given arguments.
  ///
  /// Arguments that are nullptr will be replaced by invalid
  /// ValueMapping (ValueMapping::isValid == false).
  const ValueMapping *getOperandsMapping(
      std::initializer_list<const ValueMapping *> OpdsMapping) const;
  /// @}

  /// \name Methods to get a uniquely generated InstructionMapping.
  /// @{

private:
  /// Method to get a uniquely generated InstructionMapping.
  const InstructionMapping &
  getInstructionMappingImpl(bool IsInvalid, unsigned ID = InvalidMappingID,
                            unsigned Cost = 0,
                            const ValueMapping *OperandsMapping = nullptr,
                            unsigned NumOperands = 0) const;

public:
  /// Method to get a uniquely generated InstructionMapping.
  const InstructionMapping &
  getInstructionMapping(unsigned ID, unsigned Cost,
                        const ValueMapping *OperandsMapping,
                        unsigned NumOperands) const {
    return getInstructionMappingImpl(/*IsInvalid*/ false, ID, Cost,
                                     OperandsMapping, NumOperands);
  }

  /// Method to get a uniquely generated invalid InstructionMapping.
  const InstructionMapping &getInvalidInstructionMapping() const {
    return getInstructionMappingImpl(/*IsInvalid*/ true);
  }
  /// @}

  /// Get the register bank for the \p OpIdx-th operand of \p MI form
  /// the encoding constraints, if any.
  ///
  /// \return A register bank that covers the register class of the
  /// related encoding constraints or nullptr if \p MI did not provide
  /// enough information to deduce it.
  const RegisterBank *
  getRegBankFromConstraints(const MachineInstr &MI, unsigned OpIdx,
                            const TargetInstrInfo &TII,
                            const MachineRegisterInfo &MRI) const;

  /// Helper method to apply something that is like the default mapping.
  /// Basically, that means that \p OpdMapper.getMI() is left untouched
  /// aside from the reassignment of the register operand that have been
  /// remapped.
  ///
  /// The type of all the new registers that have been created by the
  /// mapper are properly remapped to the type of the original registers
  /// they replace. In other words, the semantic of the instruction does
  /// not change, only the register banks.
  ///
  /// If the mapping of one of the operand spans several registers, this
  /// method will abort as this is not like a default mapping anymore.
  ///
  /// \pre For OpIdx in {0..\p OpdMapper.getMI().getNumOperands())
  ///        the range OpdMapper.getVRegs(OpIdx) is empty or of size 1.
  static void applyDefaultMapping(const OperandsMapper &OpdMapper);

  /// See ::applyMapping.
  virtual void applyMappingImpl(const OperandsMapper &OpdMapper) const {
    llvm_unreachable("The target has to implement that part");
  }

public:
  virtual ~RegisterBankInfo() = default;

  /// Get the register bank identified by \p ID.
  const RegisterBank &getRegBank(unsigned ID) const {
    return const_cast<RegisterBankInfo *>(this)->getRegBank(ID);
  }

  /// Get the register bank of \p Reg.
  /// If Reg has not been assigned a register, a register class,
  /// or a register bank, then this returns nullptr.
  ///
  /// \pre Reg != 0 (NoRegister)
  const RegisterBank *getRegBank(Register Reg, const MachineRegisterInfo &MRI,
                                 const TargetRegisterInfo &TRI) const;

  /// Get the total number of register banks.
  unsigned getNumRegBanks() const { return NumRegBanks; }

  /// Get a register bank that covers \p RC.
  ///
  /// \pre \p RC is a user-defined register class (as opposed as one
  /// generated by TableGen).
  ///
  /// \note The mapping RC -> RegBank could be built while adding the
  /// coverage for the register banks. However, we do not do it, because,
  /// at least for now, we only need this information for register classes
  /// that are used in the description of instruction. In other words,
  /// there are just a handful of them and we do not want to waste space.
  ///
  /// \todo This should be TableGen'ed.
  virtual const RegisterBank &
  getRegBankFromRegClass(const TargetRegisterClass &RC, LLT Ty) const {
    llvm_unreachable("The target must override this method");
  }

  /// Get the cost of a copy from \p B to \p A, or put differently,
  /// get the cost of A = COPY B. Since register banks may cover
  /// different size, \p Size specifies what will be the size in bits
  /// that will be copied around.
  ///
  /// \note Since this is a copy, both registers have the same size.
  virtual unsigned copyCost(const RegisterBank &A, const RegisterBank &B,
                            unsigned Size) const {
    // Optimistically assume that copies are coalesced. I.e., when
    // they are on the same bank, they are free.
    // Otherwise assume a non-zero cost of 1. The targets are supposed
    // to override that properly anyway if they care.
    return &A != &B;
  }

  /// \returns true if emitting a copy from \p Src to \p Dst is impossible.
  bool cannotCopy(const RegisterBank &Dst, const RegisterBank &Src,
                  unsigned Size) const {
    return copyCost(Dst, Src, Size) == std::numeric_limits<unsigned>::max();
  }

  /// Get the cost of using \p ValMapping to decompose a register. This is
  /// similar to ::copyCost, except for cases where multiple copy-like
  /// operations need to be inserted. If the register is used as a source
  /// operand and already has a bank assigned, \p CurBank is non-null.
  virtual unsigned getBreakDownCost(const ValueMapping &ValMapping,
                                    const RegisterBank *CurBank = nullptr) const {
    return std::numeric_limits<unsigned>::max();
  }

  /// Constrain the (possibly generic) virtual register \p Reg to \p RC.
  ///
  /// \pre \p Reg is a virtual register that either has a bank or a class.
  /// \returns The constrained register class, or nullptr if there is none.
  /// \note This is a generic variant of MachineRegisterInfo::constrainRegClass
  /// \note Use MachineRegisterInfo::constrainRegAttrs instead for any non-isel
  /// purpose, including non-select passes of GlobalISel
  static const TargetRegisterClass *
  constrainGenericRegister(Register Reg, const TargetRegisterClass &RC,
                           MachineRegisterInfo &MRI);

  /// Identifier used when the related instruction mapping instance
  /// is generated by target independent code.
  /// Make sure not to use that identifier to avoid possible collision.
  static const unsigned DefaultMappingID;

  /// Identifier used when the related instruction mapping instance
  /// is generated by the default constructor.
  /// Make sure not to use that identifier.
  static const unsigned InvalidMappingID;

  /// Get the mapping of the different operands of \p MI
  /// on the register bank.
  /// This mapping should be the direct translation of \p MI.
  /// In other words, when \p MI is mapped with the returned mapping,
  /// only the register banks of the operands of \p MI need to be updated.
  /// In particular, neither the opcode nor the type of \p MI needs to be
  /// updated for this direct mapping.
  ///
  /// The target independent implementation gives a mapping based on
  /// the register classes for the target specific opcode.
  /// It uses the ID RegisterBankInfo::DefaultMappingID for that mapping.
  /// Make sure you do not use that ID for the alternative mapping
  /// for MI. See getInstrAlternativeMappings for the alternative
  /// mappings.
  ///
  /// For instance, if \p MI is a vector add, the mapping should
  /// not be a scalarization of the add.
  ///
  /// \post returnedVal.verify(MI).
  ///
  /// \note If returnedVal does not verify MI, this would probably mean
  /// that the target does not support that instruction.
  virtual const InstructionMapping &
  getInstrMapping(const MachineInstr &MI) const;

  /// Get the alternative mappings for \p MI.
  /// Alternative in the sense different from getInstrMapping.
  virtual InstructionMappings
  getInstrAlternativeMappings(const MachineInstr &MI) const;

  /// Get the possible mapping for \p MI.
  /// A mapping defines where the different operands may live and at what cost.
  /// For instance, let us consider:
  /// v0(16) = G_ADD <2 x i8> v1, v2
  /// The possible mapping could be:
  ///
  /// {/*ID*/VectorAdd, /*Cost*/1, /*v0*/{(0xFFFF, VPR)}, /*v1*/{(0xFFFF, VPR)},
  ///                              /*v2*/{(0xFFFF, VPR)}}
  /// {/*ID*/ScalarAddx2, /*Cost*/2, /*v0*/{(0x00FF, GPR),(0xFF00, GPR)},
  ///                                /*v1*/{(0x00FF, GPR),(0xFF00, GPR)},
  ///                                /*v2*/{(0x00FF, GPR),(0xFF00, GPR)}}
  ///
  /// \note The first alternative of the returned mapping should be the
  /// direct translation of \p MI current form.
  ///
  /// \post !returnedVal.empty().
  InstructionMappings getInstrPossibleMappings(const MachineInstr &MI) const;

  /// Apply \p OpdMapper.getInstrMapping() to \p OpdMapper.getMI().
  /// After this call \p OpdMapper.getMI() may not be valid anymore.
  /// \p OpdMapper.getInstrMapping().getID() carries the information of
  /// what has been chosen to map \p OpdMapper.getMI(). This ID is set
  /// by the various getInstrXXXMapping method.
  ///
  /// Therefore, getting the mapping and applying it should be kept in
  /// sync.
  void applyMapping(const OperandsMapper &OpdMapper) const {
    // The only mapping we know how to handle is the default mapping.
    if (OpdMapper.getInstrMapping().getID() == DefaultMappingID)
      return applyDefaultMapping(OpdMapper);
    // For other mapping, the target needs to do the right thing.
    // If that means calling applyDefaultMapping, fine, but this
    // must be explicitly stated.
    applyMappingImpl(OpdMapper);
  }

  /// Get the size in bits of \p Reg.
  /// Utility method to get the size of any registers. Unlike
  /// MachineRegisterInfo::getSize, the register does not need to be a
  /// virtual register.
  ///
  /// \pre \p Reg != 0 (NoRegister).
  unsigned getSizeInBits(Register Reg, const MachineRegisterInfo &MRI,
                         const TargetRegisterInfo &TRI) const;

  /// Check that information hold by this instance make sense for the
  /// given \p TRI.
  ///
  /// \note This method does not check anything when assertions are disabled.
  ///
  /// \return True is the check was successful.
  bool verify(const TargetRegisterInfo &TRI) const;
};

inline raw_ostream &
operator<<(raw_ostream &OS,
           const RegisterBankInfo::PartialMapping &PartMapping) {
  PartMapping.print(OS);
  return OS;
}

inline raw_ostream &
operator<<(raw_ostream &OS, const RegisterBankInfo::ValueMapping &ValMapping) {
  ValMapping.print(OS);
  return OS;
}

inline raw_ostream &
operator<<(raw_ostream &OS,
           const RegisterBankInfo::InstructionMapping &InstrMapping) {
  InstrMapping.print(OS);
  return OS;
}

inline raw_ostream &
operator<<(raw_ostream &OS, const RegisterBankInfo::OperandsMapper &OpdMapper) {
  OpdMapper.print(OS, /*ForDebug*/ false);
  return OS;
}

/// Hashing function for PartialMapping.
/// It is required for the hashing of ValueMapping.
hash_code hash_value(const RegisterBankInfo::PartialMapping &PartMapping);

} // end namespace llvm

#endif // LLVM_CODEGEN_GLOBALISEL_REGISTERBANKINFO_H
PKiwFZ�>��U�U)CodeGen/GlobalISel/GIMatchTableExecutor.hnu�[���//===- llvm/CodeGen/GlobalISel/GIMatchTableExecutor.h -----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file This file declares the GIMatchTableExecutor API, the opcodes supported
/// by the match table, and some associated data structures used by the
/// executor's implementation (see `GIMatchTableExecutorImpl.h`).
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_GLOBALISEL_GIMATCHTABLEEXECUTOR_H
#define LLVM_CODEGEN_GLOBALISEL_GIMATCHTABLEEXECUTOR_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/GlobalISel/Utils.h"
#include "llvm/CodeGen/LowLevelType.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/IR/Function.h"
#include <bitset>
#include <cstddef>
#include <cstdint>
#include <functional>
#include <initializer_list>
#include <optional>
#include <vector>

namespace llvm {

class BlockFrequencyInfo;
class CodeGenCoverage;
class MachineBasicBlock;
class ProfileSummaryInfo;
class APInt;
class APFloat;
class GISelKnownBits;
class MachineInstr;
class MachineInstrBuilder;
class MachineFunction;
class MachineOperand;
class MachineRegisterInfo;
class RegisterBankInfo;
class TargetInstrInfo;
class TargetRegisterInfo;

/// Container class for CodeGen predicate results.
/// This is convenient because std::bitset does not have a constructor
/// with an initializer list of set bits.
///
/// Each GIMatchTableExecutor subclass should define a PredicateBitset class
/// with:
///   const unsigned MAX_SUBTARGET_PREDICATES = 192;
///   using PredicateBitset = PredicateBitsetImpl<MAX_SUBTARGET_PREDICATES>;
/// and updating the constant to suit the target. Tablegen provides a suitable
/// definition for the predicates in use in <Target>GenGlobalISel.inc when
/// GET_GLOBALISEL_PREDICATE_BITSET is defined.
template <std::size_t MaxPredicates>
class PredicateBitsetImpl : public std::bitset<MaxPredicates> {
public:
  // Cannot inherit constructors because it's not supported by VC++..
  PredicateBitsetImpl() = default;

  PredicateBitsetImpl(const std::bitset<MaxPredicates> &B)
      : std::bitset<MaxPredicates>(B) {}

  PredicateBitsetImpl(std::initializer_list<unsigned> Init) {
    for (auto I : Init)
      std::bitset<MaxPredicates>::set(I);
  }
};

enum {
  GICXXPred_Invalid = 0,
  GICXXCustomAction_Invalid = 0,
};

enum {
  /// Begin a try-block to attempt a match and jump to OnFail if it is
  /// unsuccessful.
  /// - OnFail - The MatchTable entry at which to resume if the match fails.
  ///
  /// FIXME: This ought to take an argument indicating the number of try-blocks
  ///        to exit on failure. It's usually one but the last match attempt of
  ///        a block will need more. The (implemented) alternative is to tack a
  ///        GIM_Reject on the end of each try-block which is simpler but
  ///        requires an extra opcode and iteration in the interpreter on each
  ///        failed match.
  GIM_Try,

  /// Switch over the opcode on the specified instruction
  /// - InsnID - Instruction ID
  /// - LowerBound - numerically minimum opcode supported
  /// - UpperBound - numerically maximum + 1 opcode supported
  /// - Default - failure jump target
  /// - JumpTable... - (UpperBound - LowerBound) (at least 2) jump targets
  GIM_SwitchOpcode,

  /// Switch over the LLT on the specified instruction operand
  /// - InsnID - Instruction ID
  /// - OpIdx - Operand index
  /// - LowerBound - numerically minimum Type ID supported
  /// - UpperBound - numerically maximum + 1 Type ID supported
  /// - Default - failure jump target
  /// - JumpTable... - (UpperBound - LowerBound) (at least 2) jump targets
  GIM_SwitchType,

  /// Record the specified instruction.
  /// The IgnoreCopies variant ignores COPY instructions.
  /// - NewInsnID - Instruction ID to define
  /// - InsnID - Instruction ID
  /// - OpIdx - Operand index
  GIM_RecordInsn,
  GIM_RecordInsnIgnoreCopies,

  /// Check the feature bits
  /// - Expected features
  GIM_CheckFeatures,

  /// Check the opcode on the specified instruction
  /// - InsnID - Instruction ID
  /// - Expected opcode
  GIM_CheckOpcode,

  /// Check the opcode on the specified instruction, checking 2 acceptable
  /// alternatives.
  /// - InsnID - Instruction ID
  /// - Expected opcode
  /// - Alternative expected opcode
  GIM_CheckOpcodeIsEither,

  /// Check the instruction has the right number of operands
  /// - InsnID - Instruction ID
  /// - Expected number of operands
  GIM_CheckNumOperands,
  /// Check an immediate predicate on the specified instruction
  /// - InsnID - Instruction ID
  /// - The predicate to test
  GIM_CheckI64ImmPredicate,
  /// Check an immediate predicate on the specified instruction via an APInt.
  /// - InsnID - Instruction ID
  /// - The predicate to test
  GIM_CheckAPIntImmPredicate,
  /// Check a floating point immediate predicate on the specified instruction.
  /// - InsnID - Instruction ID
  /// - The predicate to test
  GIM_CheckAPFloatImmPredicate,
  /// Check an immediate predicate on the specified instruction
  /// - InsnID - Instruction ID
  /// - OpIdx - Operand index
  /// - The predicate to test
  GIM_CheckImmOperandPredicate,
  /// Check a memory operation has the specified atomic ordering.
  /// - InsnID - Instruction ID
  /// - Ordering - The AtomicOrdering value
  GIM_CheckAtomicOrdering,
  GIM_CheckAtomicOrderingOrStrongerThan,
  GIM_CheckAtomicOrderingWeakerThan,
  /// Check the size of the memory access for the given machine memory operand.
  /// - InsnID - Instruction ID
  /// - MMOIdx - MMO index
  /// - Size - The size in bytes of the memory access
  GIM_CheckMemorySizeEqualTo,

  /// Check the address space of the memory access for the given machine memory
  /// operand.
  /// - InsnID - Instruction ID
  /// - MMOIdx - MMO index
  /// - NumAddrSpace - Number of valid address spaces
  /// - AddrSpaceN - An allowed space of the memory access
  /// - AddrSpaceN+1 ...
  GIM_CheckMemoryAddressSpace,

  /// Check the minimum alignment of the memory access for the given machine
  /// memory operand.
  /// - InsnID - Instruction ID
  /// - MMOIdx - MMO index
  /// - MinAlign - Minimum acceptable alignment
  GIM_CheckMemoryAlignment,

  /// Check the size of the memory access for the given machine memory operand
  /// against the size of an operand.
  /// - InsnID - Instruction ID
  /// - MMOIdx - MMO index
  /// - OpIdx - The operand index to compare the MMO against
  GIM_CheckMemorySizeEqualToLLT,
  GIM_CheckMemorySizeLessThanLLT,
  GIM_CheckMemorySizeGreaterThanLLT,

  /// Check if this is a vector that can be treated as a vector splat
  /// constant. This is valid for both G_BUILD_VECTOR as well as
  /// G_BUILD_VECTOR_TRUNC. For AllOnes refers to individual bits, so a -1
  /// element.
  /// - InsnID - Instruction ID
  GIM_CheckIsBuildVectorAllOnes,
  GIM_CheckIsBuildVectorAllZeros,

  /// Check a trivial predicate which takes no arguments.
  /// This can be used by executors to implement custom flags that don't fit in
  /// target features.
  GIM_CheckSimplePredicate,

  /// Check a generic C++ instruction predicate
  /// - InsnID - Instruction ID
  /// - PredicateID - The ID of the predicate function to call
  GIM_CheckCxxInsnPredicate,

  /// Check if there's no use of the first result.
  /// - InsnID - Instruction ID
  GIM_CheckHasNoUse,

  /// Check the type for the specified operand
  /// - InsnID - Instruction ID
  /// - OpIdx - Operand index
  /// - Expected type
  GIM_CheckType,
  /// Check the type of a pointer to any address space.
  /// - InsnID - Instruction ID
  /// - OpIdx - Operand index
  /// - SizeInBits - The size of the pointer value in bits.
  GIM_CheckPointerToAny,
  /// Check the register bank for the specified operand
  /// - InsnID - Instruction ID
  /// - OpIdx - Operand index
  /// - Expected register bank (specified as a register class)
  GIM_CheckRegBankForClass,

  /// Check the operand matches a complex predicate
  /// - InsnID - Instruction ID
  /// - OpIdx - Operand index
  /// - RendererID - The renderer to hold the result
  /// - Complex predicate ID
  GIM_CheckComplexPattern,

  /// Check the operand is a specific integer
  /// - InsnID - Instruction ID
  /// - OpIdx - Operand index
  /// - Expected integer
  GIM_CheckConstantInt,
  /// Check the operand is a specific literal integer (i.e. MO.isImm() or
  /// MO.isCImm() is true).
  /// - InsnID - Instruction ID
  /// - OpIdx - Operand index
  /// - Expected integer
  GIM_CheckLiteralInt,
  /// Check the operand is a specific intrinsic ID
  /// - InsnID - Instruction ID
  /// - OpIdx - Operand index
  /// - Expected Intrinsic ID
  GIM_CheckIntrinsicID,

  /// Check the operand is a specific predicate
  /// - InsnID - Instruction ID
  /// - OpIdx - Operand index
  /// - Expected predicate
  GIM_CheckCmpPredicate,

  /// Check the specified operand is an MBB
  /// - InsnID - Instruction ID
  /// - OpIdx - Operand index
  GIM_CheckIsMBB,

  /// Check the specified operand is an Imm
  /// - InsnID - Instruction ID
  /// - OpIdx - Operand index
  GIM_CheckIsImm,

  /// Check if the specified operand is safe to fold into the current
  /// instruction.
  /// - InsnID - Instruction ID
  GIM_CheckIsSafeToFold,

  /// Check the specified operands are identical.
  /// The IgnoreCopies variant looks through COPY instructions before
  /// comparing the operands.
  /// - InsnID - Instruction ID
  /// - OpIdx - Operand index
  /// - OtherInsnID - Other instruction ID
  /// - OtherOpIdx - Other operand index
  GIM_CheckIsSameOperand,
  GIM_CheckIsSameOperandIgnoreCopies,

  /// Predicates with 'let PredicateCodeUsesOperands = 1' need to examine some
  /// named operands that will be recorded in RecordedOperands. Names of these
  /// operands are referenced in predicate argument list. Emitter determines
  /// StoreIdx(corresponds to the order in which names appear in argument list).
  /// - InsnID - Instruction ID
  /// - OpIdx - Operand index
  /// - StoreIdx - Store location in RecordedOperands.
  GIM_RecordNamedOperand,

  /// Fail the current try-block, or completely fail to match if there is no
  /// current try-block.
  GIM_Reject,

  //=== Renderers ===

  /// Mutate an instruction
  /// - NewInsnID - Instruction ID to define
  /// - OldInsnID - Instruction ID to mutate
  /// - NewOpcode - The new opcode to use
  GIR_MutateOpcode,

  /// Build a new instruction
  /// - InsnID - Instruction ID to define
  /// - Opcode - The new opcode to use
  GIR_BuildMI,

  /// Copy an operand to the specified instruction
  /// - NewInsnID - Instruction ID to modify
  /// - OldInsnID - Instruction ID to copy from
  /// - OpIdx - The operand to copy
  GIR_Copy,

  /// Copy an operand to the specified instruction or add a zero register if the
  /// operand is a zero immediate.
  /// - NewInsnID - Instruction ID to modify
  /// - OldInsnID - Instruction ID to copy from
  /// - OpIdx - The operand to copy
  /// - ZeroReg - The zero register to use
  GIR_CopyOrAddZeroReg,
  /// Copy an operand to the specified instruction
  /// - NewInsnID - Instruction ID to modify
  /// - OldInsnID - Instruction ID to copy from
  /// - OpIdx - The operand to copy
  /// - SubRegIdx - The subregister to copy
  GIR_CopySubReg,

  /// Add an implicit register def to the specified instruction
  /// - InsnID - Instruction ID to modify
  /// - RegNum - The register to add
  GIR_AddImplicitDef,
  /// Add an implicit register use to the specified instruction
  /// - InsnID - Instruction ID to modify
  /// - RegNum - The register to add
  GIR_AddImplicitUse,
  /// Add an register to the specified instruction
  /// - InsnID - Instruction ID to modify
  /// - RegNum - The register to add
  GIR_AddRegister,

  /// Add a temporary register to the specified instruction
  /// - InsnID - Instruction ID to modify
  /// - TempRegID - The temporary register ID to add
  /// - TempRegFlags - The register flags to set
  GIR_AddTempRegister,

  /// Add a temporary register to the specified instruction
  /// - InsnID - Instruction ID to modify
  /// - TempRegID - The temporary register ID to add
  /// - TempRegFlags - The register flags to set
  /// - SubRegIndex - The subregister index to set
  GIR_AddTempSubRegister,

  /// Add an immediate to the specified instruction
  /// - InsnID - Instruction ID to modify
  /// - Imm - The immediate to add
  GIR_AddImm,

  /// Render complex operands to the specified instruction
  /// - InsnID - Instruction ID to modify
  /// - RendererID - The renderer to call
  GIR_ComplexRenderer,
  /// Render sub-operands of complex operands to the specified instruction
  /// - InsnID - Instruction ID to modify
  /// - RendererID - The renderer to call
  /// - RenderOpID - The suboperand to render.
  GIR_ComplexSubOperandRenderer,
  /// Render subregisters of suboperands of complex operands to the
  /// specified instruction
  /// - InsnID - Instruction ID to modify
  /// - RendererID - The renderer to call
  /// - RenderOpID - The suboperand to render
  /// - SubRegIdx - The subregister to extract
  GIR_ComplexSubOperandSubRegRenderer,

  /// Render operands to the specified instruction using a custom function
  /// - InsnID - Instruction ID to modify
  /// - OldInsnID - Instruction ID to get the matched operand from
  /// - RendererFnID - Custom renderer function to call
  GIR_CustomRenderer,

  /// Calls a C++ function to perform an action when a match is complete.
  /// The MatcherState is passed to the function to allow it to modify
  /// instructions.
  /// This is less constrained than a custom renderer and can update instructions
  /// in the state.
  /// - FnID - The function to call.
  /// TODO: Remove this at some point when combiners aren't reliant on it. It's
  /// a bit of a hack.
  GIR_CustomAction,

  /// Render operands to the specified instruction using a custom function,
  /// reading from a specific operand.
  /// - InsnID - Instruction ID to modify
  /// - OldInsnID - Instruction ID to get the matched operand from
  /// - OpIdx - Operand index in OldInsnID the render function should read
  /// from..
  /// - RendererFnID - Custom renderer function to call
  GIR_CustomOperandRenderer,

  /// Render a G_CONSTANT operator as a sign-extended immediate.
  /// - NewInsnID - Instruction ID to modify
  /// - OldInsnID - Instruction ID to copy from
  /// The operand index is implicitly 1.
  GIR_CopyConstantAsSImm,

  /// Render a G_FCONSTANT operator as a sign-extended immediate.
  /// - NewInsnID - Instruction ID to modify
  /// - OldInsnID - Instruction ID to copy from
  /// The operand index is implicitly 1.
  GIR_CopyFConstantAsFPImm,

  /// Constrain an instruction operand to a register class.
  /// - InsnID - Instruction ID to modify
  /// - OpIdx - Operand index
  /// - RCEnum - Register class enumeration value
  GIR_ConstrainOperandRC,

  /// Constrain an instructions operands according to the instruction
  /// description.
  /// - InsnID - Instruction ID to modify
  GIR_ConstrainSelectedInstOperands,

  /// Merge all memory operands into instruction.
  /// - InsnID - Instruction ID to modify
  /// - MergeInsnID... - One or more Instruction ID to merge into the result.
  /// - GIU_MergeMemOperands_EndOfList - Terminates the list of instructions to
  ///                                    merge.
  GIR_MergeMemOperands,

  /// Erase from parent.
  /// - InsnID - Instruction ID to erase
  GIR_EraseFromParent,

  /// Create a new temporary register that's not constrained.
  /// - TempRegID - The temporary register ID to initialize.
  /// - Expected type
  GIR_MakeTempReg,

  /// A successful emission
  GIR_Done,

  /// Increment the rule coverage counter.
  /// - RuleID - The ID of the rule that was covered.
  GIR_Coverage,

  /// Keeping track of the number of the GI opcodes. Must be the last entry.
  GIU_NumOpcodes,
};

enum {
  /// Indicates the end of the variable-length MergeInsnID list in a
  /// GIR_MergeMemOperands opcode.
  GIU_MergeMemOperands_EndOfList = -1,
};

/// Provides the logic to execute GlobalISel match tables, which are used by the
/// instruction selector and instruction combiners as their engine to match and
/// apply MIR patterns.
class GIMatchTableExecutor {
public:
  virtual ~GIMatchTableExecutor() = default;

  CodeGenCoverage *CoverageInfo = nullptr;
  GISelKnownBits *KB = nullptr;
  MachineFunction *MF = nullptr;
  ProfileSummaryInfo *PSI = nullptr;
  BlockFrequencyInfo *BFI = nullptr;
  // For some predicates, we need to track the current MBB.
  MachineBasicBlock *CurMBB = nullptr;

  virtual void setupGeneratedPerFunctionState(MachineFunction &MF) {
    llvm_unreachable("TableGen should have emitted implementation");
  }

  /// Setup per-MF executor state.
  virtual void setupMF(MachineFunction &mf, GISelKnownBits *kb,
                       CodeGenCoverage *covinfo = nullptr,
                       ProfileSummaryInfo *psi = nullptr,
                       BlockFrequencyInfo *bfi = nullptr) {
    CoverageInfo = covinfo;
    KB = kb;
    MF = &mf;
    PSI = psi;
    BFI = bfi;
    CurMBB = nullptr;
    setupGeneratedPerFunctionState(mf);
  }

protected:
  using ComplexRendererFns =
      std::optional<SmallVector<std::function<void(MachineInstrBuilder &)>, 4>>;
  using RecordedMIVector = SmallVector<MachineInstr *, 4>;
  using NewMIVector = SmallVector<MachineInstrBuilder, 4>;

  struct MatcherState {
    std::vector<ComplexRendererFns::value_type> Renderers;
    RecordedMIVector MIs;
    DenseMap<unsigned, unsigned> TempRegisters;
    /// Named operands that predicate with 'let PredicateCodeUsesOperands = 1'
    /// referenced in its argument list. Operands are inserted at index set by
    /// emitter, it corresponds to the order in which names appear in argument
    /// list. Currently such predicates don't have more then 3 arguments.
    std::array<const MachineOperand *, 3> RecordedOperands;

    MatcherState(unsigned MaxRenderers);
  };

  bool shouldOptForSize(const MachineFunction *MF) const {
    const auto &F = MF->getFunction();
    return F.hasOptSize() || F.hasMinSize() ||
           (PSI && BFI && CurMBB && llvm::shouldOptForSize(*CurMBB, PSI, BFI));
  }

public:
  template <class PredicateBitset, class ComplexMatcherMemFn,
            class CustomRendererFn>
  struct ExecInfoTy {
    ExecInfoTy(const LLT *TypeObjects, size_t NumTypeObjects,
               const PredicateBitset *FeatureBitsets,
               const ComplexMatcherMemFn *ComplexPredicates,
               const CustomRendererFn *CustomRenderers)
        : TypeObjects(TypeObjects), FeatureBitsets(FeatureBitsets),
          ComplexPredicates(ComplexPredicates),
          CustomRenderers(CustomRenderers) {

      for (size_t I = 0; I < NumTypeObjects; ++I)
        TypeIDMap[TypeObjects[I]] = I;
    }
    const LLT *TypeObjects;
    const PredicateBitset *FeatureBitsets;
    const ComplexMatcherMemFn *ComplexPredicates;
    const CustomRendererFn *CustomRenderers;

    SmallDenseMap<LLT, unsigned, 64> TypeIDMap;
  };

protected:
  GIMatchTableExecutor();

  /// Execute a given matcher table and return true if the match was successful
  /// and false otherwise.
  template <class TgtExecutor, class PredicateBitset, class ComplexMatcherMemFn,
            class CustomRendererFn>
  bool executeMatchTable(
      TgtExecutor &Exec, NewMIVector &OutMIs, MatcherState &State,
      const ExecInfoTy<PredicateBitset, ComplexMatcherMemFn, CustomRendererFn>
          &ISelInfo,
      const int64_t *MatchTable, const TargetInstrInfo &TII,
      MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI,
      const RegisterBankInfo &RBI, const PredicateBitset &AvailableFeatures,
      CodeGenCoverage *CoverageInfo) const;

  virtual const int64_t *getMatchTable() const {
    llvm_unreachable("Should have been overridden by tablegen if used");
  }

  virtual bool testImmPredicate_I64(unsigned, int64_t) const {
    llvm_unreachable(
        "Subclasses must override this with a tablegen-erated function");
  }
  virtual bool testImmPredicate_APInt(unsigned, const APInt &) const {
    llvm_unreachable(
        "Subclasses must override this with a tablegen-erated function");
  }
  virtual bool testImmPredicate_APFloat(unsigned, const APFloat &) const {
    llvm_unreachable(
        "Subclasses must override this with a tablegen-erated function");
  }
  virtual bool testMIPredicate_MI(unsigned, const MachineInstr &,
                                  const MatcherState &State) const {
    llvm_unreachable(
        "Subclasses must override this with a tablegen-erated function");
  }

  virtual bool testSimplePredicate(unsigned) const {
    llvm_unreachable("Subclass does not implement testSimplePredicate!");
  }

  virtual void runCustomAction(unsigned, const MatcherState &State) const {
    llvm_unreachable("Subclass does not implement runCustomAction!");
  }

  bool isOperandImmEqual(const MachineOperand &MO, int64_t Value,
                         const MachineRegisterInfo &MRI) const;

  /// Return true if the specified operand is a G_PTR_ADD with a G_CONSTANT on
  /// the right-hand side. GlobalISel's separation of pointer and integer types
  /// means that we don't need to worry about G_OR with equivalent semantics.
  bool isBaseWithConstantOffset(const MachineOperand &Root,
                                const MachineRegisterInfo &MRI) const;

  /// Return true if MI can obviously be folded into IntoMI.
  /// MI and IntoMI do not need to be in the same basic blocks, but MI must
  /// preceed IntoMI.
  bool isObviouslySafeToFold(MachineInstr &MI, MachineInstr &IntoMI) const;
};

} // end namespace llvm

#endif // LLVM_CODEGEN_GLOBALISEL_GIMATCHTABLEEXECUTOR_H
PKiwFZ�̃�[d[d!CodeGen/GlobalISel/CallLowering.hnu�[���//===- llvm/CodeGen/GlobalISel/CallLowering.h - Call lowering ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file describes how to lower LLVM calls to machine code calls.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_GLOBALISEL_CALLLOWERING_H
#define LLVM_CODEGEN_GLOBALISEL_CALLLOWERING_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/LowLevelType.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineValueType.h"
#include "llvm/CodeGen/TargetCallingConv.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/ErrorHandling.h"
#include <cstdint>
#include <functional>

namespace llvm {

class AttributeList;
class CallBase;
class DataLayout;
class Function;
class FunctionLoweringInfo;
class MachineIRBuilder;
class MachineFunction;
struct MachinePointerInfo;
class MachineRegisterInfo;
class TargetLowering;

class CallLowering {
  const TargetLowering *TLI;

  virtual void anchor();
public:
  struct BaseArgInfo {
    Type *Ty;
    SmallVector<ISD::ArgFlagsTy, 4> Flags;
    bool IsFixed;

    BaseArgInfo(Type *Ty,
                ArrayRef<ISD::ArgFlagsTy> Flags = ArrayRef<ISD::ArgFlagsTy>(),
                bool IsFixed = true)
        : Ty(Ty), Flags(Flags.begin(), Flags.end()), IsFixed(IsFixed) {}

    BaseArgInfo() : Ty(nullptr), IsFixed(false) {}
  };

  struct ArgInfo : public BaseArgInfo {
    SmallVector<Register, 4> Regs;
    // If the argument had to be split into multiple parts according to the
    // target calling convention, then this contains the original vregs
    // if the argument was an incoming arg.
    SmallVector<Register, 2> OrigRegs;

    /// Optionally track the original IR value for the argument. This may not be
    /// meaningful in all contexts. This should only be used on for forwarding
    /// through to use for aliasing information in MachinePointerInfo for memory
    /// arguments.
    const Value *OrigValue = nullptr;

    /// Index original Function's argument.
    unsigned OrigArgIndex;

    /// Sentinel value for implicit machine-level input arguments.
    static const unsigned NoArgIndex = UINT_MAX;

    ArgInfo(ArrayRef<Register> Regs, Type *Ty, unsigned OrigIndex,
            ArrayRef<ISD::ArgFlagsTy> Flags = ArrayRef<ISD::ArgFlagsTy>(),
            bool IsFixed = true, const Value *OrigValue = nullptr)
        : BaseArgInfo(Ty, Flags, IsFixed), Regs(Regs.begin(), Regs.end()),
          OrigValue(OrigValue), OrigArgIndex(OrigIndex) {
      if (!Regs.empty() && Flags.empty())
        this->Flags.push_back(ISD::ArgFlagsTy());
      // FIXME: We should have just one way of saying "no register".
      assert(((Ty->isVoidTy() || Ty->isEmptyTy()) ==
              (Regs.empty() || Regs[0] == 0)) &&
             "only void types should have no register");
    }

    ArgInfo(ArrayRef<Register> Regs, const Value &OrigValue, unsigned OrigIndex,
            ArrayRef<ISD::ArgFlagsTy> Flags = ArrayRef<ISD::ArgFlagsTy>(),
            bool IsFixed = true)
      : ArgInfo(Regs, OrigValue.getType(), OrigIndex, Flags, IsFixed, &OrigValue) {}

    ArgInfo() = default;
  };

  struct CallLoweringInfo {
    /// Calling convention to be used for the call.
    CallingConv::ID CallConv = CallingConv::C;

    /// Destination of the call. It should be either a register, globaladdress,
    /// or externalsymbol.
    MachineOperand Callee = MachineOperand::CreateImm(0);

    /// Descriptor for the return type of the function.
    ArgInfo OrigRet;

    /// List of descriptors of the arguments passed to the function.
    SmallVector<ArgInfo, 32> OrigArgs;

    /// Valid if the call has a swifterror inout parameter, and contains the
    /// vreg that the swifterror should be copied into after the call.
    Register SwiftErrorVReg;

    /// Original IR callsite corresponding to this call, if available.
    const CallBase *CB = nullptr;

    MDNode *KnownCallees = nullptr;

    /// True if the call must be tail call optimized.
    bool IsMustTailCall = false;

    /// True if the call passes all target-independent checks for tail call
    /// optimization.
    bool IsTailCall = false;

    /// True if the call was lowered as a tail call. This is consumed by the
    /// legalizer. This allows the legalizer to lower libcalls as tail calls.
    bool LoweredTailCall = false;

    /// True if the call is to a vararg function.
    bool IsVarArg = false;

    /// True if the function's return value can be lowered to registers.
    bool CanLowerReturn = true;

    /// VReg to hold the hidden sret parameter.
    Register DemoteRegister;

    /// The stack index for sret demotion.
    int DemoteStackIndex;

    /// Expected type identifier for indirect calls with a CFI check.
    const ConstantInt *CFIType = nullptr;
  };

  /// Argument handling is mostly uniform between the four places that
  /// make these decisions: function formal arguments, call
  /// instruction args, call instruction returns and function
  /// returns. However, once a decision has been made on where an
  /// argument should go, exactly what happens can vary slightly. This
  /// class abstracts the differences.
  ///
  /// ValueAssigner should not depend on any specific function state, and
  /// only determine the types and locations for arguments.
  struct ValueAssigner {
    ValueAssigner(bool IsIncoming, CCAssignFn *AssignFn_,
                  CCAssignFn *AssignFnVarArg_ = nullptr)
        : AssignFn(AssignFn_), AssignFnVarArg(AssignFnVarArg_),
          IsIncomingArgumentHandler(IsIncoming) {

      // Some targets change the handler depending on whether the call is
      // varargs or not. If
      if (!AssignFnVarArg)
        AssignFnVarArg = AssignFn;
    }

    virtual ~ValueAssigner() = default;

    /// Returns true if the handler is dealing with incoming arguments,
    /// i.e. those that move values from some physical location to vregs.
    bool isIncomingArgumentHandler() const {
      return IsIncomingArgumentHandler;
    }

    /// Wrap call to (typically tablegenerated CCAssignFn). This may be
    /// overridden to track additional state information as arguments are
    /// assigned or apply target specific hacks around the legacy
    /// infrastructure.
    virtual bool assignArg(unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT,
                           CCValAssign::LocInfo LocInfo, const ArgInfo &Info,
                           ISD::ArgFlagsTy Flags, CCState &State) {
      if (getAssignFn(State.isVarArg())(ValNo, ValVT, LocVT, LocInfo, Flags,
                                        State))
        return true;
      StackSize = State.getStackSize();
      return false;
    }

    /// Assignment function to use for a general call.
    CCAssignFn *AssignFn;

    /// Assignment function to use for a variadic call. This is usually the same
    /// as AssignFn on most targets.
    CCAssignFn *AssignFnVarArg;

    /// The size of the currently allocated portion of the stack.
    uint64_t StackSize = 0;

    /// Select the appropriate assignment function depending on whether this is
    /// a variadic call.
    CCAssignFn *getAssignFn(bool IsVarArg) const {
      return IsVarArg ? AssignFnVarArg : AssignFn;
    }

  private:
    const bool IsIncomingArgumentHandler;
    virtual void anchor();
  };

  struct IncomingValueAssigner : public ValueAssigner {
    IncomingValueAssigner(CCAssignFn *AssignFn_,
                          CCAssignFn *AssignFnVarArg_ = nullptr)
        : ValueAssigner(true, AssignFn_, AssignFnVarArg_) {}
  };

  struct OutgoingValueAssigner : public ValueAssigner {
    OutgoingValueAssigner(CCAssignFn *AssignFn_,
                          CCAssignFn *AssignFnVarArg_ = nullptr)
        : ValueAssigner(false, AssignFn_, AssignFnVarArg_) {}
  };

  struct ValueHandler {
    MachineIRBuilder &MIRBuilder;
    MachineRegisterInfo &MRI;
    const bool IsIncomingArgumentHandler;

    ValueHandler(bool IsIncoming, MachineIRBuilder &MIRBuilder,
                 MachineRegisterInfo &MRI)
        : MIRBuilder(MIRBuilder), MRI(MRI),
          IsIncomingArgumentHandler(IsIncoming) {}

    virtual ~ValueHandler() = default;

    /// Returns true if the handler is dealing with incoming arguments,
    /// i.e. those that move values from some physical location to vregs.
    bool isIncomingArgumentHandler() const {
      return IsIncomingArgumentHandler;
    }

    /// Materialize a VReg containing the address of the specified
    /// stack-based object. This is either based on a FrameIndex or
    /// direct SP manipulation, depending on the context. \p MPO
    /// should be initialized to an appropriate description of the
    /// address created.
    virtual Register getStackAddress(uint64_t MemSize, int64_t Offset,
                                     MachinePointerInfo &MPO,
                                     ISD::ArgFlagsTy Flags) = 0;

    /// Return the in-memory size to write for the argument at \p VA. This may
    /// be smaller than the allocated stack slot size.
    ///
    /// This is overridable primarily for targets to maintain compatibility with
    /// hacks around the existing DAG call lowering infrastructure.
    virtual LLT getStackValueStoreType(const DataLayout &DL,
                                       const CCValAssign &VA,
                                       ISD::ArgFlagsTy Flags) const;

    /// The specified value has been assigned to a physical register,
    /// handle the appropriate COPY (either to or from) and mark any
    /// relevant uses/defines as needed.
    virtual void assignValueToReg(Register ValVReg, Register PhysReg,
                                  CCValAssign VA) = 0;

    /// The specified value has been assigned to a stack
    /// location. Load or store it there, with appropriate extension
    /// if necessary.
    virtual void assignValueToAddress(Register ValVReg, Register Addr,
                                      LLT MemTy, MachinePointerInfo &MPO,
                                      CCValAssign &VA) = 0;

    /// An overload which takes an ArgInfo if additional information about the
    /// arg is needed. \p ValRegIndex is the index in \p Arg.Regs for the value
    /// to store.
    virtual void assignValueToAddress(const ArgInfo &Arg, unsigned ValRegIndex,
                                      Register Addr, LLT MemTy,
                                      MachinePointerInfo &MPO,
                                      CCValAssign &VA) {
      assignValueToAddress(Arg.Regs[ValRegIndex], Addr, MemTy, MPO, VA);
    }

    /// Handle custom values, which may be passed into one or more of \p VAs.
    /// \p If the handler wants the assignments to be delayed until after
    /// mem loc assignments, then it sets \p Thunk to the thunk to do the
    /// assignment.
    /// \return The number of \p VAs that have been assigned after the first
    ///         one, and which should therefore be skipped from further
    ///         processing.
    virtual unsigned assignCustomValue(ArgInfo &Arg, ArrayRef<CCValAssign> VAs,
                                       std::function<void()> *Thunk = nullptr) {
      // This is not a pure virtual method because not all targets need to worry
      // about custom values.
      llvm_unreachable("Custom values not supported");
    }

    /// Do a memory copy of \p MemSize bytes from \p SrcPtr to \p DstPtr. This
    /// is necessary for outgoing stack-passed byval arguments.
    void
    copyArgumentMemory(const ArgInfo &Arg, Register DstPtr, Register SrcPtr,
                       const MachinePointerInfo &DstPtrInfo, Align DstAlign,
                       const MachinePointerInfo &SrcPtrInfo, Align SrcAlign,
                       uint64_t MemSize, CCValAssign &VA) const;

    /// Extend a register to the location type given in VA, capped at extending
    /// to at most MaxSize bits. If MaxSizeBits is 0 then no maximum is set.
    Register extendRegister(Register ValReg, CCValAssign &VA,
                            unsigned MaxSizeBits = 0);
  };

  /// Base class for ValueHandlers used for arguments coming into the current
  /// function, or for return values received from a call.
  struct IncomingValueHandler : public ValueHandler {
    IncomingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)
        : ValueHandler(/*IsIncoming*/ true, MIRBuilder, MRI) {}

    /// Insert G_ASSERT_ZEXT/G_ASSERT_SEXT or other hint instruction based on \p
    /// VA, returning the new register if a hint was inserted.
    Register buildExtensionHint(CCValAssign &VA, Register SrcReg, LLT NarrowTy);

    /// Provides a default implementation for argument handling.
    void assignValueToReg(Register ValVReg, Register PhysReg,
                          CCValAssign VA) override;
  };

  /// Base class for ValueHandlers used for arguments passed to a function call,
  /// or for return values.
  struct OutgoingValueHandler : public ValueHandler {
    OutgoingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)
        : ValueHandler(/*IsIncoming*/ false, MIRBuilder, MRI) {}
  };

protected:
  /// Getter for generic TargetLowering class.
  const TargetLowering *getTLI() const {
    return TLI;
  }

  /// Getter for target specific TargetLowering class.
  template <class XXXTargetLowering>
    const XXXTargetLowering *getTLI() const {
    return static_cast<const XXXTargetLowering *>(TLI);
  }

  /// \returns Flags corresponding to the attributes on the \p ArgIdx-th
  /// parameter of \p Call.
  ISD::ArgFlagsTy getAttributesForArgIdx(const CallBase &Call,
                                         unsigned ArgIdx) const;

  /// \returns Flags corresponding to the attributes on the return from \p Call.
  ISD::ArgFlagsTy getAttributesForReturn(const CallBase &Call) const;

  /// Adds flags to \p Flags based off of the attributes in \p Attrs.
  /// \p OpIdx is the index in \p Attrs to add flags from.
  void addArgFlagsFromAttributes(ISD::ArgFlagsTy &Flags,
                                 const AttributeList &Attrs,
                                 unsigned OpIdx) const;

  template <typename FuncInfoTy>
  void setArgFlags(ArgInfo &Arg, unsigned OpIdx, const DataLayout &DL,
                   const FuncInfoTy &FuncInfo) const;

  /// Break \p OrigArgInfo into one or more pieces the calling convention can
  /// process, returned in \p SplitArgs. For example, this should break structs
  /// down into individual fields.
  ///
  /// If \p Offsets is non-null, it points to a vector to be filled in
  /// with the in-memory offsets of each of the individual values.
  void splitToValueTypes(const ArgInfo &OrigArgInfo,
                         SmallVectorImpl<ArgInfo> &SplitArgs,
                         const DataLayout &DL, CallingConv::ID CallConv,
                         SmallVectorImpl<uint64_t> *Offsets = nullptr) const;

  /// Analyze the argument list in \p Args, using \p Assigner to populate \p
  /// CCInfo. This will determine the types and locations to use for passed or
  /// returned values. This may resize fields in \p Args if the value is split
  /// across multiple registers or stack slots.
  ///
  /// This is independent of the function state and can be used
  /// to determine how a call would pass arguments without needing to change the
  /// function. This can be used to check if arguments are suitable for tail
  /// call lowering.
  ///
  /// \return True if everything has succeeded, false otherwise.
  bool determineAssignments(ValueAssigner &Assigner,
                            SmallVectorImpl<ArgInfo> &Args,
                            CCState &CCInfo) const;

  /// Invoke ValueAssigner::assignArg on each of the given \p Args and then use
  /// \p Handler to move them to the assigned locations.
  ///
  /// \return True if everything has succeeded, false otherwise.
  bool determineAndHandleAssignments(
      ValueHandler &Handler, ValueAssigner &Assigner,
      SmallVectorImpl<ArgInfo> &Args, MachineIRBuilder &MIRBuilder,
      CallingConv::ID CallConv, bool IsVarArg,
      ArrayRef<Register> ThisReturnRegs = std::nullopt) const;

  /// Use \p Handler to insert code to handle the argument/return values
  /// represented by \p Args. It's expected determineAssignments previously
  /// processed these arguments to populate \p CCState and \p ArgLocs.
  bool
  handleAssignments(ValueHandler &Handler, SmallVectorImpl<ArgInfo> &Args,
                    CCState &CCState, SmallVectorImpl<CCValAssign> &ArgLocs,
                    MachineIRBuilder &MIRBuilder,
                    ArrayRef<Register> ThisReturnRegs = std::nullopt) const;

  /// Check whether parameters to a call that are passed in callee saved
  /// registers are the same as from the calling function.  This needs to be
  /// checked for tail call eligibility.
  bool parametersInCSRMatch(const MachineRegisterInfo &MRI,
                            const uint32_t *CallerPreservedMask,
                            const SmallVectorImpl<CCValAssign> &ArgLocs,
                            const SmallVectorImpl<ArgInfo> &OutVals) const;

  /// \returns True if the calling convention for a callee and its caller pass
  /// results in the same way. Typically used for tail call eligibility checks.
  ///
  /// \p Info is the CallLoweringInfo for the call.
  /// \p MF is the MachineFunction for the caller.
  /// \p InArgs contains the results of the call.
  /// \p CalleeAssigner specifies the target's handling of the argument types
  /// for the callee.
  /// \p CallerAssigner specifies the target's handling of the
  /// argument types for the caller.
  bool resultsCompatible(CallLoweringInfo &Info, MachineFunction &MF,
                         SmallVectorImpl<ArgInfo> &InArgs,
                         ValueAssigner &CalleeAssigner,
                         ValueAssigner &CallerAssigner) const;

public:
  CallLowering(const TargetLowering *TLI) : TLI(TLI) {}
  virtual ~CallLowering() = default;

  /// \return true if the target is capable of handling swifterror values that
  /// have been promoted to a specified register. The extended versions of
  /// lowerReturn and lowerCall should be implemented.
  virtual bool supportSwiftError() const {
    return false;
  }

  /// Load the returned value from the stack into virtual registers in \p VRegs.
  /// It uses the frame index \p FI and the start offset from \p DemoteReg.
  /// The loaded data size will be determined from \p RetTy.
  void insertSRetLoads(MachineIRBuilder &MIRBuilder, Type *RetTy,
                       ArrayRef<Register> VRegs, Register DemoteReg,
                       int FI) const;

  /// Store the return value given by \p VRegs into stack starting at the offset
  /// specified in \p DemoteReg.
  void insertSRetStores(MachineIRBuilder &MIRBuilder, Type *RetTy,
                        ArrayRef<Register> VRegs, Register DemoteReg) const;

  /// Insert the hidden sret ArgInfo to the beginning of \p SplitArgs.
  /// This function should be called from the target specific
  /// lowerFormalArguments when \p F requires the sret demotion.
  void insertSRetIncomingArgument(const Function &F,
                                  SmallVectorImpl<ArgInfo> &SplitArgs,
                                  Register &DemoteReg, MachineRegisterInfo &MRI,
                                  const DataLayout &DL) const;

  /// For the call-base described by \p CB, insert the hidden sret ArgInfo to
  /// the OrigArgs field of \p Info.
  void insertSRetOutgoingArgument(MachineIRBuilder &MIRBuilder,
                                  const CallBase &CB,
                                  CallLoweringInfo &Info) const;

  /// \return True if the return type described by \p Outs can be returned
  /// without performing sret demotion.
  bool checkReturn(CCState &CCInfo, SmallVectorImpl<BaseArgInfo> &Outs,
                   CCAssignFn *Fn) const;

  /// Get the type and the ArgFlags for the split components of \p RetTy as
  /// returned by \c ComputeValueVTs.
  void getReturnInfo(CallingConv::ID CallConv, Type *RetTy, AttributeList Attrs,
                     SmallVectorImpl<BaseArgInfo> &Outs,
                     const DataLayout &DL) const;

  /// Toplevel function to check the return type based on the target calling
  /// convention. \return True if the return value of \p MF can be returned
  /// without performing sret demotion.
  bool checkReturnTypeForCallConv(MachineFunction &MF) const;

  /// This hook must be implemented to check whether the return values
  /// described by \p Outs can fit into the return registers. If false
  /// is returned, an sret-demotion is performed.
  virtual bool canLowerReturn(MachineFunction &MF, CallingConv::ID CallConv,
                              SmallVectorImpl<BaseArgInfo> &Outs,
                              bool IsVarArg) const {
    return true;
  }

  /// This hook must be implemented to lower outgoing return values, described
  /// by \p Val, into the specified virtual registers \p VRegs.
  /// This hook is used by GlobalISel.
  ///
  /// \p FLI is required for sret demotion.
  ///
  /// \p SwiftErrorVReg is non-zero if the function has a swifterror parameter
  /// that needs to be implicitly returned.
  ///
  /// \return True if the lowering succeeds, false otherwise.
  virtual bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val,
                           ArrayRef<Register> VRegs, FunctionLoweringInfo &FLI,
                           Register SwiftErrorVReg) const {
    if (!supportSwiftError()) {
      assert(SwiftErrorVReg == 0 && "attempt to use unsupported swifterror");
      return lowerReturn(MIRBuilder, Val, VRegs, FLI);
    }
    return false;
  }

  /// This hook behaves as the extended lowerReturn function, but for targets
  /// that do not support swifterror value promotion.
  virtual bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val,
                           ArrayRef<Register> VRegs,
                           FunctionLoweringInfo &FLI) const {
    return false;
  }

  virtual bool fallBackToDAGISel(const MachineFunction &MF) const {
    return false;
  }

  /// This hook must be implemented to lower the incoming (formal)
  /// arguments, described by \p VRegs, for GlobalISel. Each argument
  /// must end up in the related virtual registers described by \p VRegs.
  /// In other words, the first argument should end up in \c VRegs[0],
  /// the second in \c VRegs[1], and so on. For each argument, there will be one
  /// register for each non-aggregate type, as returned by \c computeValueLLTs.
  /// \p MIRBuilder is set to the proper insertion for the argument
  /// lowering. \p FLI is required for sret demotion.
  ///
  /// \return True if the lowering succeeded, false otherwise.
  virtual bool lowerFormalArguments(MachineIRBuilder &MIRBuilder,
                                    const Function &F,
                                    ArrayRef<ArrayRef<Register>> VRegs,
                                    FunctionLoweringInfo &FLI) const {
    return false;
  }

  /// This hook must be implemented to lower the given call instruction,
  /// including argument and return value marshalling.
  ///
  ///
  /// \return true if the lowering succeeded, false otherwise.
  virtual bool lowerCall(MachineIRBuilder &MIRBuilder,
                         CallLoweringInfo &Info) const {
    return false;
  }

  /// Lower the given call instruction, including argument and return value
  /// marshalling.
  ///
  /// \p CI is the call/invoke instruction.
  ///
  /// \p ResRegs are the registers where the call's return value should be
  /// stored (or 0 if there is no return value). There will be one register for
  /// each non-aggregate type, as returned by \c computeValueLLTs.
  ///
  /// \p ArgRegs is a list of lists of virtual registers containing each
  /// argument that needs to be passed (argument \c i should be placed in \c
  /// ArgRegs[i]). For each argument, there will be one register for each
  /// non-aggregate type, as returned by \c computeValueLLTs.
  ///
  /// \p SwiftErrorVReg is non-zero if the call has a swifterror inout
  /// parameter, and contains the vreg that the swifterror should be copied into
  /// after the call.
  ///
  /// \p GetCalleeReg is a callback to materialize a register for the callee if
  /// the target determines it cannot jump to the destination based purely on \p
  /// CI. This might be because \p CI is indirect, or because of the limited
  /// range of an immediate jump.
  ///
  /// \return true if the lowering succeeded, false otherwise.
  bool lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &Call,
                 ArrayRef<Register> ResRegs,
                 ArrayRef<ArrayRef<Register>> ArgRegs, Register SwiftErrorVReg,
                 std::function<unsigned()> GetCalleeReg) const;

  /// For targets which want to use big-endian can enable it with
  /// enableBigEndian() hook
  virtual bool enableBigEndian() const { return false; }

  /// For targets which support the "returned" parameter attribute, returns
  /// true if the given type is a valid one to use with "returned".
  virtual bool isTypeIsValidForThisReturn(EVT Ty) const { return false; }
};

} // end namespace llvm

#endif // LLVM_CODEGEN_GLOBALISEL_CALLLOWERING_H
PKiwFZ�v�o�
�
!CodeGen/GlobalISel/CombinerInfo.hnu�[���//===- llvm/CodeGen/GlobalISel/CombinerInfo.h ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// Interface for Targets to specify which operations are combined how and when.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_GLOBALISEL_COMBINERINFO_H
#define LLVM_CODEGEN_GLOBALISEL_COMBINERINFO_H

#include <cassert>
namespace llvm {

class GISelChangeObserver;
class LegalizerInfo;
class MachineInstr;
class MachineIRBuilder;

// Contains information relevant to enabling/disabling various combines for a
// pass.
class CombinerInfo {
public:
  CombinerInfo(bool AllowIllegalOps, bool ShouldLegalizeIllegal,
               const LegalizerInfo *LInfo, bool OptEnabled, bool OptSize,
               bool MinSize)
      : IllegalOpsAllowed(AllowIllegalOps),
        LegalizeIllegalOps(ShouldLegalizeIllegal), LInfo(LInfo),
        EnableOpt(OptEnabled), EnableOptSize(OptSize), EnableMinSize(MinSize) {
    assert(((AllowIllegalOps || !LegalizeIllegalOps) || LInfo) &&
           "Expecting legalizerInfo when illegalops not allowed");
  }
  virtual ~CombinerInfo() = default;
  /// If \p IllegalOpsAllowed is false, the CombinerHelper will make use of
  /// the legalizerInfo to check for legality before each transformation.
  bool IllegalOpsAllowed; // TODO: Make use of this.

  /// If \p LegalizeIllegalOps is true, the Combiner will also legalize the
  /// illegal ops that are created.
  bool LegalizeIllegalOps; // TODO: Make use of this.
  const LegalizerInfo *LInfo;

  /// Whether optimizations should be enabled. This is to distinguish between
  /// uses of the combiner unconditionally and only when optimizations are
  /// specifically enabled/
  bool EnableOpt;
  /// Whether we're optimizing for size.
  bool EnableOptSize;
  /// Whether we're optimizing for minsize (-Oz).
  bool EnableMinSize;

  /// Attempt to combine instructions using MI as the root.
  ///
  /// Use Observer to report the creation, modification, and erasure of
  /// instructions. GISelChangeObserver will automatically report certain
  /// kinds of operations. These operations are:
  /// * Instructions that are newly inserted into the MachineFunction
  /// * Instructions that are erased from the MachineFunction.
  ///
  /// However, it is important to report instruction modification and this is
  /// not automatic.
  virtual bool combine(GISelChangeObserver &Observer, MachineInstr &MI,
                       MachineIRBuilder &B) const = 0;
};
} // namespace llvm

#endif
PKiwFZ�΄
<<)CodeGen/GlobalISel/LostDebugLocObserver.hnu�[���//===----- llvm/CodeGen/GlobalISel/LostDebugLocObserver.h -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// Tracks DebugLocs between checkpoints and verifies that they are transferred.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_GLOBALISEL_LOSTDEBUGLOCOBSERVER_H
#define LLVM_CODEGEN_GLOBALISEL_LOSTDEBUGLOCOBSERVER_H

#include "llvm/ADT/SmallSet.h"
#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"

namespace llvm {
class LostDebugLocObserver : public GISelChangeObserver {
  StringRef DebugType;
  SmallSet<DebugLoc, 4> LostDebugLocs;
  SmallPtrSet<MachineInstr *, 4> PotentialMIsForDebugLocs;
  unsigned NumLostDebugLocs = 0;

public:
  LostDebugLocObserver(StringRef DebugType) : DebugType(DebugType) {}

  unsigned getNumLostDebugLocs() const { return NumLostDebugLocs; }

  /// Call this to indicate that it's a good point to assess whether locations
  /// have been lost. Typically this will be when a logical change has been
  /// completed such as the caller has finished replacing some instructions with
  /// alternatives. When CheckDebugLocs is true, the locations will be checked
  /// to see if any have been lost since the last checkpoint. When
  /// CheckDebugLocs is false, it will just reset ready for the next checkpoint
  /// without checking anything. This can be helpful to limit the detection to
  /// easy-to-fix portions of an algorithm before allowing more difficult ones.
  void checkpoint(bool CheckDebugLocs = true);

  void createdInstr(MachineInstr &MI) override;
  void erasingInstr(MachineInstr &MI) override;
  void changingInstr(MachineInstr &MI) override;
  void changedInstr(MachineInstr &MI) override;

private:
  void analyzeDebugLocations();
};

} // namespace llvm
#endif // LLVM_CODEGEN_GLOBALISEL_LOSTDEBUGLOCOBSERVER_H
PKiwFZ�TAp�R�R$CodeGen/GlobalISel/LegalizerHelper.hnu�[���//== llvm/CodeGen/GlobalISel/LegalizerHelper.h ---------------- -*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file A pass to convert the target-illegal operations created by IR -> MIR
/// translation into ones the target expects to be able to select. This may
/// occur in multiple phases, for example G_ADD <2 x i8> -> G_ADD <2 x i16> ->
/// G_ADD <4 x i16>.
///
/// The LegalizerHelper class is where most of the work happens, and is
/// designed to be callable from other passes that find themselves with an
/// illegal instruction.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_GLOBALISEL_LEGALIZERHELPER_H
#define LLVM_CODEGEN_GLOBALISEL_LEGALIZERHELPER_H

#include "llvm/CodeGen/GlobalISel/CallLowering.h"
#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
#include "llvm/CodeGen/RuntimeLibcalls.h"
#include "llvm/CodeGen/TargetOpcodes.h"

namespace llvm {
// Forward declarations.
class APInt;
class GAnyLoad;
class GLoadStore;
class GStore;
class GenericMachineInstr;
class MachineFunction;
class MachineIRBuilder;
class MachineInstr;
class MachineInstrBuilder;
struct MachinePointerInfo;
template <typename T> class SmallVectorImpl;
class LegalizerInfo;
class MachineRegisterInfo;
class GISelChangeObserver;
class LostDebugLocObserver;
class TargetLowering;

class LegalizerHelper {
public:
  /// Expose MIRBuilder so clients can set their own RecordInsertInstruction
  /// functions
  MachineIRBuilder &MIRBuilder;

  /// To keep track of changes made by the LegalizerHelper.
  GISelChangeObserver &Observer;

private:
  MachineRegisterInfo &MRI;
  const LegalizerInfo &LI;
  const TargetLowering &TLI;
  GISelKnownBits *KB;

public:
  enum LegalizeResult {
    /// Instruction was already legal and no change was made to the
    /// MachineFunction.
    AlreadyLegal,

    /// Instruction has been legalized and the MachineFunction changed.
    Legalized,

    /// Some kind of error has occurred and we could not legalize this
    /// instruction.
    UnableToLegalize,
  };

  /// Expose LegalizerInfo so the clients can re-use.
  const LegalizerInfo &getLegalizerInfo() const { return LI; }
  const TargetLowering &getTargetLowering() const { return TLI; }
  GISelKnownBits *getKnownBits() const { return KB; }

  LegalizerHelper(MachineFunction &MF, GISelChangeObserver &Observer,
                  MachineIRBuilder &B);
  LegalizerHelper(MachineFunction &MF, const LegalizerInfo &LI,
                  GISelChangeObserver &Observer, MachineIRBuilder &B,
                  GISelKnownBits *KB = nullptr);

  /// Replace \p MI by a sequence of legal instructions that can implement the
  /// same operation. Note that this means \p MI may be deleted, so any iterator
  /// steps should be performed before calling this function. \p Helper should
  /// be initialized to the MachineFunction containing \p MI.
  ///
  /// Considered as an opaque blob, the legal code will use and define the same
  /// registers as \p MI.
  LegalizeResult legalizeInstrStep(MachineInstr &MI,
                                   LostDebugLocObserver &LocObserver);

  /// Legalize an instruction by emiting a runtime library call instead.
  LegalizeResult libcall(MachineInstr &MI, LostDebugLocObserver &LocObserver);

  /// Legalize an instruction by reducing the width of the underlying scalar
  /// type.
  LegalizeResult narrowScalar(MachineInstr &MI, unsigned TypeIdx, LLT NarrowTy);

  /// Legalize an instruction by performing the operation on a wider scalar type
  /// (for example a 16-bit addition can be safely performed at 32-bits
  /// precision, ignoring the unused bits).
  LegalizeResult widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy);

  /// Legalize an instruction by replacing the value type
  LegalizeResult bitcast(MachineInstr &MI, unsigned TypeIdx, LLT Ty);

  /// Legalize an instruction by splitting it into simpler parts, hopefully
  /// understood by the target.
  LegalizeResult lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty);

  /// Legalize a vector instruction by splitting into multiple components, each
  /// acting on the same scalar type as the original but with fewer elements.
  LegalizeResult fewerElementsVector(MachineInstr &MI, unsigned TypeIdx,
                                     LLT NarrowTy);

  /// Legalize a vector instruction by increasing the number of vector elements
  /// involved and ignoring the added elements later.
  LegalizeResult moreElementsVector(MachineInstr &MI, unsigned TypeIdx,
                                    LLT MoreTy);

  /// Cast the given value to an LLT::scalar with an equivalent size. Returns
  /// the register to use if an instruction was inserted. Returns the original
  /// register if no coercion was necessary.
  //
  // This may also fail and return Register() if there is no legal way to cast.
  Register coerceToScalar(Register Val);

  /// Legalize a single operand \p OpIdx of the machine instruction \p MI as a
  /// Use by extending the operand's type to \p WideTy using the specified \p
  /// ExtOpcode for the extension instruction, and replacing the vreg of the
  /// operand in place.
  void widenScalarSrc(MachineInstr &MI, LLT WideTy, unsigned OpIdx,
                      unsigned ExtOpcode);

  /// Legalize a single operand \p OpIdx of the machine instruction \p MI as a
  /// Use by truncating the operand's type to \p NarrowTy using G_TRUNC, and
  /// replacing the vreg of the operand in place.
  void narrowScalarSrc(MachineInstr &MI, LLT NarrowTy, unsigned OpIdx);

  /// Legalize a single operand \p OpIdx of the machine instruction \p MI as a
  /// Def by extending the operand's type to \p WideTy and truncating it back
  /// with the \p TruncOpcode, and replacing the vreg of the operand in place.
  void widenScalarDst(MachineInstr &MI, LLT WideTy, unsigned OpIdx = 0,
                      unsigned TruncOpcode = TargetOpcode::G_TRUNC);

  // Legalize a single operand \p OpIdx of the machine instruction \p MI as a
  // Def by truncating the operand's type to \p NarrowTy, replacing in place and
  // extending back with \p ExtOpcode.
  void narrowScalarDst(MachineInstr &MI, LLT NarrowTy, unsigned OpIdx,
                       unsigned ExtOpcode);
  /// Legalize a single operand \p OpIdx of the machine instruction \p MI as a
  /// Def by performing it with additional vector elements and extracting the
  /// result elements, and replacing the vreg of the operand in place.
  void moreElementsVectorDst(MachineInstr &MI, LLT MoreTy, unsigned OpIdx);

  /// Legalize a single operand \p OpIdx of the machine instruction \p MI as a
  /// Use by producing a vector with undefined high elements, extracting the
  /// original vector type, and replacing the vreg of the operand in place.
  void moreElementsVectorSrc(MachineInstr &MI, LLT MoreTy, unsigned OpIdx);

  /// Legalize a single operand \p OpIdx of the machine instruction \p MI as a
  /// use by inserting a G_BITCAST to \p CastTy
  void bitcastSrc(MachineInstr &MI, LLT CastTy, unsigned OpIdx);

  /// Legalize a single operand \p OpIdx of the machine instruction \p MI as a
  /// def by inserting a G_BITCAST from \p CastTy
  void bitcastDst(MachineInstr &MI, LLT CastTy, unsigned OpIdx);

private:
  LegalizeResult
  widenScalarMergeValues(MachineInstr &MI, unsigned TypeIdx, LLT WideTy);
  LegalizeResult
  widenScalarUnmergeValues(MachineInstr &MI, unsigned TypeIdx, LLT WideTy);
  LegalizeResult
  widenScalarExtract(MachineInstr &MI, unsigned TypeIdx, LLT WideTy);
  LegalizeResult
  widenScalarInsert(MachineInstr &MI, unsigned TypeIdx, LLT WideTy);
  LegalizeResult widenScalarAddSubOverflow(MachineInstr &MI, unsigned TypeIdx,
                                           LLT WideTy);
  LegalizeResult widenScalarAddSubShlSat(MachineInstr &MI, unsigned TypeIdx,
                                         LLT WideTy);
  LegalizeResult widenScalarMulo(MachineInstr &MI, unsigned TypeIdx,
                                 LLT WideTy);

  /// Helper function to split a wide generic register into bitwise blocks with
  /// the given Type (which implies the number of blocks needed). The generic
  /// registers created are appended to Ops, starting at bit 0 of Reg.
  void extractParts(Register Reg, LLT Ty, int NumParts,
                    SmallVectorImpl<Register> &VRegs);

  /// Version which handles irregular splits.
  bool extractParts(Register Reg, LLT RegTy, LLT MainTy,
                    LLT &LeftoverTy,
                    SmallVectorImpl<Register> &VRegs,
                    SmallVectorImpl<Register> &LeftoverVRegs);

  /// Version which handles irregular sub-vector splits.
  void extractVectorParts(Register Reg, unsigned NumElst,
                          SmallVectorImpl<Register> &VRegs);

  /// Helper function to build a wide generic register \p DstReg of type \p
  /// RegTy from smaller parts. This will produce a G_MERGE_VALUES,
  /// G_BUILD_VECTOR, G_CONCAT_VECTORS, or sequence of G_INSERT as appropriate
  /// for the types.
  ///
  /// \p PartRegs must be registers of type \p PartTy.
  ///
  /// If \p ResultTy does not evenly break into \p PartTy sized pieces, the
  /// remainder must be specified with \p LeftoverRegs of type \p LeftoverTy.
  void insertParts(Register DstReg, LLT ResultTy,
                   LLT PartTy, ArrayRef<Register> PartRegs,
                   LLT LeftoverTy = LLT(), ArrayRef<Register> LeftoverRegs = {});

  /// Merge \p PartRegs with different types into \p DstReg.
  void mergeMixedSubvectors(Register DstReg, ArrayRef<Register> PartRegs);

  void appendVectorElts(SmallVectorImpl<Register> &Elts, Register Reg);

  /// Unmerge \p SrcReg into smaller sized values, and append them to \p
  /// Parts. The elements of \p Parts will be the greatest common divisor type
  /// of \p DstTy, \p NarrowTy and the type of \p SrcReg. This will compute and
  /// return the GCD type.
  LLT extractGCDType(SmallVectorImpl<Register> &Parts, LLT DstTy,
                     LLT NarrowTy, Register SrcReg);

  /// Unmerge \p SrcReg into \p GCDTy typed registers. This will append all of
  /// the unpacked registers to \p Parts. This version is if the common unmerge
  /// type is already known.
  void extractGCDType(SmallVectorImpl<Register> &Parts, LLT GCDTy,
                      Register SrcReg);

  /// Produce a merge of values in \p VRegs to define \p DstReg. Perform a merge
  /// from the least common multiple type, and convert as appropriate to \p
  /// DstReg.
  ///
  /// \p VRegs should each have type \p GCDTy. This type should be greatest
  /// common divisor type of \p DstReg, \p NarrowTy, and an undetermined source
  /// type.
  ///
  /// \p NarrowTy is the desired result merge source type. If the source value
  /// needs to be widened to evenly cover \p DstReg, inserts high bits
  /// corresponding to the extension opcode \p PadStrategy.
  ///
  /// \p VRegs will be cleared, and the the result \p NarrowTy register pieces
  /// will replace it. Returns The complete LCMTy that \p VRegs will cover when
  /// merged.
  LLT buildLCMMergePieces(LLT DstTy, LLT NarrowTy, LLT GCDTy,
                          SmallVectorImpl<Register> &VRegs,
                          unsigned PadStrategy = TargetOpcode::G_ANYEXT);

  /// Merge the values in \p RemergeRegs to an \p LCMTy typed value. Extract the
  /// low bits into \p DstReg. This is intended to use the outputs from
  /// buildLCMMergePieces after processing.
  void buildWidenedRemergeToDst(Register DstReg, LLT LCMTy,
                                ArrayRef<Register> RemergeRegs);

  /// Perform generic multiplication of values held in multiple registers.
  /// Generated instructions use only types NarrowTy and i1.
  /// Destination can be same or two times size of the source.
  void multiplyRegisters(SmallVectorImpl<Register> &DstRegs,
                         ArrayRef<Register> Src1Regs,
                         ArrayRef<Register> Src2Regs, LLT NarrowTy);

  void changeOpcode(MachineInstr &MI, unsigned NewOpcode);

  LegalizeResult tryNarrowPow2Reduction(MachineInstr &MI, Register SrcReg,
                                        LLT SrcTy, LLT NarrowTy,
                                        unsigned ScalarOpc);

  // Memcpy family legalization helpers.
  LegalizeResult lowerMemset(MachineInstr &MI, Register Dst, Register Val,
                             uint64_t KnownLen, Align Alignment,
                             bool IsVolatile);
  LegalizeResult lowerMemcpyInline(MachineInstr &MI, Register Dst, Register Src,
                                   uint64_t KnownLen, Align DstAlign,
                                   Align SrcAlign, bool IsVolatile);
  LegalizeResult lowerMemcpy(MachineInstr &MI, Register Dst, Register Src,
                             uint64_t KnownLen, uint64_t Limit, Align DstAlign,
                             Align SrcAlign, bool IsVolatile);
  LegalizeResult lowerMemmove(MachineInstr &MI, Register Dst, Register Src,
                              uint64_t KnownLen, Align DstAlign, Align SrcAlign,
                              bool IsVolatile);

public:
  /// Return the alignment to use for a stack temporary object with the given
  /// type.
  Align getStackTemporaryAlignment(LLT Type, Align MinAlign = Align()) const;

  /// Create a stack temporary based on the size in bytes and the alignment
  MachineInstrBuilder createStackTemporary(TypeSize Bytes, Align Alignment,
                                           MachinePointerInfo &PtrInfo);

  /// Get a pointer to vector element \p Index located in memory for a vector of
  /// type \p VecTy starting at a base address of \p VecPtr. If \p Index is out
  /// of bounds the returned pointer is unspecified, but will be within the
  /// vector bounds.
  Register getVectorElementPointer(Register VecPtr, LLT VecTy, Register Index);

  /// Handles most opcodes. Split \p MI into same instruction on sub-vectors or
  /// scalars with \p NumElts elements (1 for scalar). Supports uneven splits:
  /// there can be leftover sub-vector with fewer then \p NumElts or a leftover
  /// scalar. To avoid this use moreElements first and set MI number of elements
  /// to multiple of \p NumElts. Non-vector operands that should be used on all
  /// sub-instructions without split are listed in \p NonVecOpIndices.
  LegalizeResult fewerElementsVectorMultiEltType(
      GenericMachineInstr &MI, unsigned NumElts,
      std::initializer_list<unsigned> NonVecOpIndices = {});

  LegalizeResult fewerElementsVectorPhi(GenericMachineInstr &MI,
                                        unsigned NumElts);

  LegalizeResult moreElementsVectorPhi(MachineInstr &MI, unsigned TypeIdx,
                                       LLT MoreTy);
  LegalizeResult moreElementsVectorShuffle(MachineInstr &MI, unsigned TypeIdx,
                                           LLT MoreTy);

  LegalizeResult fewerElementsVectorUnmergeValues(MachineInstr &MI,
                                                  unsigned TypeIdx,
                                                  LLT NarrowTy);
  LegalizeResult fewerElementsVectorMerge(MachineInstr &MI, unsigned TypeIdx,
                                          LLT NarrowTy);
  LegalizeResult fewerElementsVectorExtractInsertVectorElt(MachineInstr &MI,
                                                           unsigned TypeIdx,
                                                           LLT NarrowTy);

  /// Equalize source and destination vector sizes of G_SHUFFLE_VECTOR.
  LegalizeResult equalizeVectorShuffleLengths(MachineInstr &MI);

  LegalizeResult reduceLoadStoreWidth(GLoadStore &MI, unsigned TypeIdx,
                                      LLT NarrowTy);

  LegalizeResult narrowScalarShiftByConstant(MachineInstr &MI, const APInt &Amt,
                                             LLT HalfTy, LLT ShiftAmtTy);

  LegalizeResult fewerElementsVectorReductions(MachineInstr &MI,
                                               unsigned TypeIdx, LLT NarrowTy);

  LegalizeResult fewerElementsVectorShuffle(MachineInstr &MI, unsigned TypeIdx,
                                            LLT NarrowTy);

  LegalizeResult narrowScalarShift(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
  LegalizeResult narrowScalarAddSub(MachineInstr &MI, unsigned TypeIdx,
                                    LLT NarrowTy);
  LegalizeResult narrowScalarMul(MachineInstr &MI, LLT Ty);
  LegalizeResult narrowScalarFPTOI(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
  LegalizeResult narrowScalarExtract(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
  LegalizeResult narrowScalarInsert(MachineInstr &MI, unsigned TypeIdx, LLT Ty);

  LegalizeResult narrowScalarBasic(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
  LegalizeResult narrowScalarExt(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
  LegalizeResult narrowScalarSelect(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
  LegalizeResult narrowScalarCTLZ(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
  LegalizeResult narrowScalarCTTZ(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
  LegalizeResult narrowScalarCTPOP(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
  LegalizeResult narrowScalarFLDEXP(MachineInstr &MI, unsigned TypeIdx, LLT Ty);

  /// Perform Bitcast legalize action on G_EXTRACT_VECTOR_ELT.
  LegalizeResult bitcastExtractVectorElt(MachineInstr &MI, unsigned TypeIdx,
                                         LLT CastTy);

  /// Perform Bitcast legalize action on G_INSERT_VECTOR_ELT.
  LegalizeResult bitcastInsertVectorElt(MachineInstr &MI, unsigned TypeIdx,
                                        LLT CastTy);

  LegalizeResult lowerFConstant(MachineInstr &MI);
  LegalizeResult lowerBitcast(MachineInstr &MI);
  LegalizeResult lowerLoad(GAnyLoad &MI);
  LegalizeResult lowerStore(GStore &MI);
  LegalizeResult lowerBitCount(MachineInstr &MI);
  LegalizeResult lowerFunnelShiftWithInverse(MachineInstr &MI);
  LegalizeResult lowerFunnelShiftAsShifts(MachineInstr &MI);
  LegalizeResult lowerFunnelShift(MachineInstr &MI);
  LegalizeResult lowerRotateWithReverseRotate(MachineInstr &MI);
  LegalizeResult lowerRotate(MachineInstr &MI);

  LegalizeResult lowerU64ToF32BitOps(MachineInstr &MI);
  LegalizeResult lowerUITOFP(MachineInstr &MI);
  LegalizeResult lowerSITOFP(MachineInstr &MI);
  LegalizeResult lowerFPTOUI(MachineInstr &MI);
  LegalizeResult lowerFPTOSI(MachineInstr &MI);

  LegalizeResult lowerFPTRUNC_F64_TO_F16(MachineInstr &MI);
  LegalizeResult lowerFPTRUNC(MachineInstr &MI);
  LegalizeResult lowerFPOWI(MachineInstr &MI);

  LegalizeResult lowerISFPCLASS(MachineInstr &MI);

  LegalizeResult lowerMinMax(MachineInstr &MI);
  LegalizeResult lowerFCopySign(MachineInstr &MI);
  LegalizeResult lowerFMinNumMaxNum(MachineInstr &MI);
  LegalizeResult lowerFMad(MachineInstr &MI);
  LegalizeResult lowerIntrinsicRound(MachineInstr &MI);
  LegalizeResult lowerFFloor(MachineInstr &MI);
  LegalizeResult lowerMergeValues(MachineInstr &MI);
  LegalizeResult lowerUnmergeValues(MachineInstr &MI);
  LegalizeResult lowerExtractInsertVectorElt(MachineInstr &MI);
  LegalizeResult lowerShuffleVector(MachineInstr &MI);
  LegalizeResult lowerDynStackAlloc(MachineInstr &MI);
  LegalizeResult lowerExtract(MachineInstr &MI);
  LegalizeResult lowerInsert(MachineInstr &MI);
  LegalizeResult lowerSADDO_SSUBO(MachineInstr &MI);
  LegalizeResult lowerAddSubSatToMinMax(MachineInstr &MI);
  LegalizeResult lowerAddSubSatToAddoSubo(MachineInstr &MI);
  LegalizeResult lowerShlSat(MachineInstr &MI);
  LegalizeResult lowerBswap(MachineInstr &MI);
  LegalizeResult lowerBitreverse(MachineInstr &MI);
  LegalizeResult lowerReadWriteRegister(MachineInstr &MI);
  LegalizeResult lowerSMULH_UMULH(MachineInstr &MI);
  LegalizeResult lowerSelect(MachineInstr &MI);
  LegalizeResult lowerDIVREM(MachineInstr &MI);
  LegalizeResult lowerAbsToAddXor(MachineInstr &MI);
  LegalizeResult lowerAbsToMaxNeg(MachineInstr &MI);
  LegalizeResult lowerVectorReduction(MachineInstr &MI);
  LegalizeResult lowerMemcpyInline(MachineInstr &MI);
  LegalizeResult lowerMemCpyFamily(MachineInstr &MI, unsigned MaxLen = 0);
};

/// Helper function that creates a libcall to the given \p Name using the given
/// calling convention \p CC.
LegalizerHelper::LegalizeResult
createLibcall(MachineIRBuilder &MIRBuilder, const char *Name,
              const CallLowering::ArgInfo &Result,
              ArrayRef<CallLowering::ArgInfo> Args, CallingConv::ID CC);

/// Helper function that creates the given libcall.
LegalizerHelper::LegalizeResult
createLibcall(MachineIRBuilder &MIRBuilder, RTLIB::Libcall Libcall,
              const CallLowering::ArgInfo &Result,
              ArrayRef<CallLowering::ArgInfo> Args);

/// Create a libcall to memcpy et al.
LegalizerHelper::LegalizeResult
createMemLibcall(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
                 MachineInstr &MI, LostDebugLocObserver &LocObserver);

} // End namespace llvm.

#endif
PKiwFZl�7-��!CodeGen/GlobalISel/RegisterBank.hnu�[���//==-- llvm/CodeGen/GlobalISel/RegisterBank.h - Register Bank ----*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file This file declares the API of register banks.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_GLOBALISEL_REGISTERBANK_H
#define LLVM_CODEGEN_GLOBALISEL_REGISTERBANK_H

#include "llvm/ADT/BitVector.h"

namespace llvm {
// Forward declarations.
class RegisterBankInfo;
class raw_ostream;
class TargetRegisterClass;
class TargetRegisterInfo;

/// This class implements the register bank concept.
/// Two instances of RegisterBank must have different ID.
/// This property is enforced by the RegisterBankInfo class.
class RegisterBank {
private:
  unsigned ID;
  const char *Name;
  unsigned Size;
  BitVector ContainedRegClasses;

  /// Sentinel value used to recognize register bank not properly
  /// initialized yet.
  static const unsigned InvalidID;

  /// Only the RegisterBankInfo can initialize RegisterBank properly.
  friend RegisterBankInfo;

public:
  RegisterBank(unsigned ID, const char *Name, unsigned Size,
               const uint32_t *CoveredClasses, unsigned NumRegClasses);

  /// Get the identifier of this register bank.
  unsigned getID() const { return ID; }

  /// Get a user friendly name of this register bank.
  /// Should be used only for debugging purposes.
  const char *getName() const { return Name; }

  /// Get the maximal size in bits that fits in this register bank.
  unsigned getSize() const { return Size; }

  /// Check whether this instance is ready to be used.
  bool isValid() const;

  /// Check if this register bank is valid. In other words,
  /// if it has been properly constructed.
  ///
  /// \note This method does not check anything when assertions are disabled.
  ///
  /// \return True is the check was successful.
  bool verify(const TargetRegisterInfo &TRI) const;

  /// Check whether this register bank covers \p RC.
  /// In other words, check if this register bank fully covers
  /// the registers that \p RC contains.
  /// \pre isValid()
  bool covers(const TargetRegisterClass &RC) const;

  /// Check whether \p OtherRB is the same as this.
  bool operator==(const RegisterBank &OtherRB) const;
  bool operator!=(const RegisterBank &OtherRB) const {
    return !this->operator==(OtherRB);
  }

  /// Dump the register mask on dbgs() stream.
  /// The dump is verbose.
  void dump(const TargetRegisterInfo *TRI = nullptr) const;

  /// Print the register mask on OS.
  /// If IsForDebug is false, then only the name of the register bank
  /// is printed. Otherwise, all the fields are printing.
  /// TRI is then used to print the name of the register classes that
  /// this register bank covers.
  void print(raw_ostream &OS, bool IsForDebug = false,
             const TargetRegisterInfo *TRI = nullptr) const;
};

inline raw_ostream &operator<<(raw_ostream &OS, const RegisterBank &RegBank) {
  RegBank.print(OS);
  return OS;
}
} // End namespace llvm.

#endif
PKiwFZ�
���"CodeGen/GlobalISel/CSEMIRBuilder.hnu�[���//===-- llvm/CodeGen/GlobalISel/CSEMIRBuilder.h  --*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// This file implements a version of MachineIRBuilder which CSEs insts within
/// a MachineBasicBlock.
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_GLOBALISEL_CSEMIRBUILDER_H
#define LLVM_CODEGEN_GLOBALISEL_CSEMIRBUILDER_H

#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"

namespace llvm {

class GISelInstProfileBuilder;
/// Defines a builder that does CSE of MachineInstructions using GISelCSEInfo.
/// Eg usage.
///
///
/// GISelCSEInfo *Info =
/// &getAnalysis<GISelCSEAnalysisWrapperPass>().getCSEInfo(); CSEMIRBuilder
/// CB(Builder.getState()); CB.setCSEInfo(Info); auto A = CB.buildConstant(s32,
/// 42); auto B = CB.buildConstant(s32, 42); assert(A == B); unsigned CReg =
/// MRI.createGenericVirtualRegister(s32); auto C = CB.buildConstant(CReg, 42);
/// assert(C->getOpcode() == TargetOpcode::COPY);
/// Explicitly passing in a register would materialize a copy if possible.
/// CSEMIRBuilder also does trivial constant folding for binary ops.
class CSEMIRBuilder : public MachineIRBuilder {

  /// Returns true if A dominates B (within the same basic block).
  /// Both iterators must be in the same basic block.
  //
  // TODO: Another approach for checking dominance is having two iterators and
  // making them go towards each other until they meet or reach begin/end. Which
  // approach is better? Should this even change dynamically? For G_CONSTANTS
  // most of which will be at the top of the BB, the top down approach would be
  // a better choice. Does IRTranslator placing constants at the beginning still
  // make sense? Should this change based on Opcode?
  bool dominates(MachineBasicBlock::const_iterator A,
                 MachineBasicBlock::const_iterator B) const;

  /// For given ID, find a machineinstr in the CSE Map. If found, check if it
  /// dominates the current insertion point and if not, move it just before the
  /// current insertion point and return it. If not found, return Null
  /// MachineInstrBuilder.
  MachineInstrBuilder getDominatingInstrForID(FoldingSetNodeID &ID,
                                              void *&NodeInsertPos);
  /// Simple check if we can CSE (we have the CSEInfo) or if this Opcode is
  /// safe to CSE.
  bool canPerformCSEForOpc(unsigned Opc) const;

  void profileDstOp(const DstOp &Op, GISelInstProfileBuilder &B) const;

  void profileDstOps(ArrayRef<DstOp> Ops, GISelInstProfileBuilder &B) const {
    for (const DstOp &Op : Ops)
      profileDstOp(Op, B);
  }

  void profileSrcOp(const SrcOp &Op, GISelInstProfileBuilder &B) const;

  void profileSrcOps(ArrayRef<SrcOp> Ops, GISelInstProfileBuilder &B) const {
    for (const SrcOp &Op : Ops)
      profileSrcOp(Op, B);
  }

  void profileMBBOpcode(GISelInstProfileBuilder &B, unsigned Opc) const;

  void profileEverything(unsigned Opc, ArrayRef<DstOp> DstOps,
                         ArrayRef<SrcOp> SrcOps, std::optional<unsigned> Flags,
                         GISelInstProfileBuilder &B) const;

  // Takes a MachineInstrBuilder and inserts it into the CSEMap using the
  // NodeInsertPos.
  MachineInstrBuilder memoizeMI(MachineInstrBuilder MIB, void *NodeInsertPos);

  // If we have can CSE an instruction, but still need to materialize to a VReg,
  // we emit a copy from the CSE'd inst to the VReg.
  MachineInstrBuilder generateCopiesIfRequired(ArrayRef<DstOp> DstOps,
                                               MachineInstrBuilder &MIB);

  // If we have can CSE an instruction, but still need to materialize to a VReg,
  // check if we can generate copies. It's not possible to return a single MIB,
  // while emitting copies to multiple vregs.
  bool checkCopyToDefsPossible(ArrayRef<DstOp> DstOps);

public:
  // Pull in base class constructors.
  using MachineIRBuilder::MachineIRBuilder;
  // Unhide buildInstr
  MachineInstrBuilder
  buildInstr(unsigned Opc, ArrayRef<DstOp> DstOps, ArrayRef<SrcOp> SrcOps,
             std::optional<unsigned> Flag = std::nullopt) override;
  // Bring in the other overload from the base class.
  using MachineIRBuilder::buildConstant;

  MachineInstrBuilder buildConstant(const DstOp &Res,
                                    const ConstantInt &Val) override;

  // Bring in the other overload from the base class.
  using MachineIRBuilder::buildFConstant;
  MachineInstrBuilder buildFConstant(const DstOp &Res,
                                     const ConstantFP &Val) override;
};
} // namespace llvm
#endif
PKiwFZȱE�4�4�,CodeGen/GlobalISel/InstructionSelectorImpl.hnu�[���//===- llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file This file declares the API for the instruction selector.
/// This class is responsible for selecting machine instructions.
/// It's implemented by the target. It's used by the InstructionSelect pass.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECTORIMPL_H
#define LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECTORIMPL_H

#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
#include "llvm/CodeGen/GlobalISel/Utils.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RegisterBankInfo.h"
#include "llvm/CodeGen/TargetInstrInfo.h"
#include "llvm/CodeGen/TargetOpcodes.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/Support/CodeGenCoverage.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include <cassert>
#include <cstddef>
#include <cstdint>

namespace llvm {

/// GlobalISel PatFrag Predicates
enum {
  GIPFP_I64_Invalid = 0,
  GIPFP_APInt_Invalid = 0,
  GIPFP_APFloat_Invalid = 0,
  GIPFP_MI_Invalid = 0,
};

template <class TgtInstructionSelector, class PredicateBitset,
          class ComplexMatcherMemFn, class CustomRendererFn>
bool InstructionSelector::executeMatchTable(
    TgtInstructionSelector &ISel, NewMIVector &OutMIs, MatcherState &State,
    const ISelInfoTy<PredicateBitset, ComplexMatcherMemFn, CustomRendererFn>
        &ISelInfo,
    const int64_t *MatchTable, const TargetInstrInfo &TII,
    MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI,
    const RegisterBankInfo &RBI, const PredicateBitset &AvailableFeatures,
    CodeGenCoverage &CoverageInfo) const {

  uint64_t CurrentIdx = 0;
  SmallVector<uint64_t, 4> OnFailResumeAt;

  // Bypass the flag check on the instruction, and only look at the MCInstrDesc.
  bool NoFPException = !State.MIs[0]->getDesc().mayRaiseFPException();

  const uint16_t Flags = State.MIs[0]->getFlags();

  enum RejectAction { RejectAndGiveUp, RejectAndResume };
  auto handleReject = [&]() -> RejectAction {
    DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                    dbgs() << CurrentIdx << ": Rejected\n");
    if (OnFailResumeAt.empty())
      return RejectAndGiveUp;
    CurrentIdx = OnFailResumeAt.pop_back_val();
    DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                    dbgs() << CurrentIdx << ": Resume at " << CurrentIdx << " ("
                           << OnFailResumeAt.size() << " try-blocks remain)\n");
    return RejectAndResume;
  };

  auto propagateFlags = [=](NewMIVector &OutMIs) {
    for (auto MIB : OutMIs) {
      // Set the NoFPExcept flag when no original matched instruction could
      // raise an FP exception, but the new instruction potentially might.
      uint16_t MIBFlags = Flags;
      if (NoFPException && MIB->mayRaiseFPException())
        MIBFlags |= MachineInstr::NoFPExcept;
      MIB.setMIFlags(MIBFlags);
    }

    return true;
  };

  while (true) {
    assert(CurrentIdx != ~0u && "Invalid MatchTable index");
    int64_t MatcherOpcode = MatchTable[CurrentIdx++];
    switch (MatcherOpcode) {
    case GIM_Try: {
      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                      dbgs() << CurrentIdx << ": Begin try-block\n");
      OnFailResumeAt.push_back(MatchTable[CurrentIdx++]);
      break;
    }

    case GIM_RecordInsn: {
      int64_t NewInsnID = MatchTable[CurrentIdx++];
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t OpIdx = MatchTable[CurrentIdx++];

      // As an optimisation we require that MIs[0] is always the root. Refuse
      // any attempt to modify it.
      assert(NewInsnID != 0 && "Refusing to modify MIs[0]");

      MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
      if (!MO.isReg()) {
        DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                        dbgs() << CurrentIdx << ": Not a register\n");
        if (handleReject() == RejectAndGiveUp)
          return false;
        break;
      }
      if (MO.getReg().isPhysical()) {
        DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                        dbgs() << CurrentIdx << ": Is a physical register\n");
        if (handleReject() == RejectAndGiveUp)
          return false;
        break;
      }

      MachineInstr *NewMI = MRI.getVRegDef(MO.getReg());
      if ((size_t)NewInsnID < State.MIs.size())
        State.MIs[NewInsnID] = NewMI;
      else {
        assert((size_t)NewInsnID == State.MIs.size() &&
               "Expected to store MIs in order");
        State.MIs.push_back(NewMI);
      }
      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                      dbgs() << CurrentIdx << ": MIs[" << NewInsnID
                             << "] = GIM_RecordInsn(" << InsnID << ", " << OpIdx
                             << ")\n");
      break;
    }

    case GIM_CheckFeatures: {
      int64_t ExpectedBitsetID = MatchTable[CurrentIdx++];
      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                      dbgs() << CurrentIdx
                             << ": GIM_CheckFeatures(ExpectedBitsetID="
                             << ExpectedBitsetID << ")\n");
      if ((AvailableFeatures & ISelInfo.FeatureBitsets[ExpectedBitsetID]) !=
          ISelInfo.FeatureBitsets[ExpectedBitsetID]) {
        if (handleReject() == RejectAndGiveUp)
          return false;
      }
      break;
    }

    case GIM_CheckOpcode:
    case GIM_CheckOpcodeIsEither: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t Expected0 = MatchTable[CurrentIdx++];
      int64_t Expected1 = -1;
      if (MatcherOpcode == GIM_CheckOpcodeIsEither)
        Expected1 = MatchTable[CurrentIdx++];

      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
      unsigned Opcode = State.MIs[InsnID]->getOpcode();

      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
        dbgs() << CurrentIdx << ": GIM_CheckOpcode(MIs[" << InsnID
        << "], ExpectedOpcode=" << Expected0;
        if (MatcherOpcode == GIM_CheckOpcodeIsEither)
          dbgs() << " || " << Expected1;
        dbgs() << ") // Got=" << Opcode << "\n";
      );

      if (Opcode != Expected0 && Opcode != Expected1) {
        if (handleReject() == RejectAndGiveUp)
          return false;
      }
      break;
    }
    case GIM_SwitchOpcode: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t LowerBound = MatchTable[CurrentIdx++];
      int64_t UpperBound = MatchTable[CurrentIdx++];
      int64_t Default = MatchTable[CurrentIdx++];

      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
      const int64_t Opcode = State.MIs[InsnID]->getOpcode();

      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(), {
        dbgs() << CurrentIdx << ": GIM_SwitchOpcode(MIs[" << InsnID << "], ["
               << LowerBound << ", " << UpperBound << "), Default=" << Default
               << ", JumpTable...) // Got=" << Opcode << "\n";
      });
      if (Opcode < LowerBound || UpperBound <= Opcode) {
        CurrentIdx = Default;
        break;
      }
      CurrentIdx = MatchTable[CurrentIdx + (Opcode - LowerBound)];
      if (!CurrentIdx) {
        CurrentIdx = Default;
        break;
      }
      OnFailResumeAt.push_back(Default);
      break;
    }

    case GIM_SwitchType: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t OpIdx = MatchTable[CurrentIdx++];
      int64_t LowerBound = MatchTable[CurrentIdx++];
      int64_t UpperBound = MatchTable[CurrentIdx++];
      int64_t Default = MatchTable[CurrentIdx++];

      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
      MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);

      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(), {
        dbgs() << CurrentIdx << ": GIM_SwitchType(MIs[" << InsnID
               << "]->getOperand(" << OpIdx << "), [" << LowerBound << ", "
               << UpperBound << "), Default=" << Default
               << ", JumpTable...) // Got=";
        if (!MO.isReg())
          dbgs() << "Not a VReg\n";
        else
          dbgs() << MRI.getType(MO.getReg()) << "\n";
      });
      if (!MO.isReg()) {
        CurrentIdx = Default;
        break;
      }
      const LLT Ty = MRI.getType(MO.getReg());
      const auto TyI = ISelInfo.TypeIDMap.find(Ty);
      if (TyI == ISelInfo.TypeIDMap.end()) {
        CurrentIdx = Default;
        break;
      }
      const int64_t TypeID = TyI->second;
      if (TypeID < LowerBound || UpperBound <= TypeID) {
        CurrentIdx = Default;
        break;
      }
      CurrentIdx = MatchTable[CurrentIdx + (TypeID - LowerBound)];
      if (!CurrentIdx) {
        CurrentIdx = Default;
        break;
      }
      OnFailResumeAt.push_back(Default);
      break;
    }

    case GIM_CheckNumOperands: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t Expected = MatchTable[CurrentIdx++];
      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                      dbgs() << CurrentIdx << ": GIM_CheckNumOperands(MIs["
                             << InsnID << "], Expected=" << Expected << ")\n");
      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
      if (State.MIs[InsnID]->getNumOperands() != Expected) {
        if (handleReject() == RejectAndGiveUp)
          return false;
      }
      break;
    }
    case GIM_CheckI64ImmPredicate:
    case GIM_CheckImmOperandPredicate: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t OpIdx = MatcherOpcode == GIM_CheckImmOperandPredicate
                          ? MatchTable[CurrentIdx++]
                          : 1;
      int64_t Predicate = MatchTable[CurrentIdx++];
      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                      dbgs() << CurrentIdx << ": GIM_CheckImmPredicate(MIs["
                             << InsnID << "]->getOperand(" << OpIdx
                             << "), Predicate=" << Predicate << ")\n");
      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
      assert((State.MIs[InsnID]->getOperand(OpIdx).isImm() ||
              State.MIs[InsnID]->getOperand(OpIdx).isCImm()) &&
             "Expected immediate operand");
      assert(Predicate > GIPFP_I64_Invalid && "Expected a valid predicate");
      int64_t Value = 0;
      if (State.MIs[InsnID]->getOperand(OpIdx).isCImm())
        Value = State.MIs[InsnID]->getOperand(OpIdx).getCImm()->getSExtValue();
      else if (State.MIs[InsnID]->getOperand(OpIdx).isImm())
        Value = State.MIs[InsnID]->getOperand(OpIdx).getImm();
      else
        llvm_unreachable("Expected Imm or CImm operand");

      if (!testImmPredicate_I64(Predicate, Value))
        if (handleReject() == RejectAndGiveUp)
          return false;
      break;
    }
    case GIM_CheckAPIntImmPredicate: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t Predicate = MatchTable[CurrentIdx++];
      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                      dbgs()
                          << CurrentIdx << ": GIM_CheckAPIntImmPredicate(MIs["
                          << InsnID << "], Predicate=" << Predicate << ")\n");
      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
      assert(State.MIs[InsnID]->getOpcode() == TargetOpcode::G_CONSTANT &&
             "Expected G_CONSTANT");
      assert(Predicate > GIPFP_APInt_Invalid && "Expected a valid predicate");
      APInt Value;
      if (State.MIs[InsnID]->getOperand(1).isCImm())
        Value = State.MIs[InsnID]->getOperand(1).getCImm()->getValue();
      else
        llvm_unreachable("Expected Imm or CImm operand");

      if (!testImmPredicate_APInt(Predicate, Value))
        if (handleReject() == RejectAndGiveUp)
          return false;
      break;
    }
    case GIM_CheckAPFloatImmPredicate: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t Predicate = MatchTable[CurrentIdx++];
      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                      dbgs()
                          << CurrentIdx << ": GIM_CheckAPFloatImmPredicate(MIs["
                          << InsnID << "], Predicate=" << Predicate << ")\n");
      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
      assert(State.MIs[InsnID]->getOpcode() == TargetOpcode::G_FCONSTANT &&
             "Expected G_FCONSTANT");
      assert(State.MIs[InsnID]->getOperand(1).isFPImm() && "Expected FPImm operand");
      assert(Predicate > GIPFP_APFloat_Invalid && "Expected a valid predicate");
      APFloat Value = State.MIs[InsnID]->getOperand(1).getFPImm()->getValueAPF();

      if (!testImmPredicate_APFloat(Predicate, Value))
        if (handleReject() == RejectAndGiveUp)
          return false;
      break;
    }
    case GIM_CheckIsBuildVectorAllOnes:
    case GIM_CheckIsBuildVectorAllZeros: {
      int64_t InsnID = MatchTable[CurrentIdx++];

      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                      dbgs() << CurrentIdx
                             << ": GIM_CheckBuildVectorAll{Zeros|Ones}(MIs["
                             << InsnID << "])\n");
      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");

      const MachineInstr *MI = State.MIs[InsnID];
      assert((MI->getOpcode() == TargetOpcode::G_BUILD_VECTOR ||
              MI->getOpcode() == TargetOpcode::G_BUILD_VECTOR_TRUNC) &&
             "Expected G_BUILD_VECTOR or G_BUILD_VECTOR_TRUNC");

      if (MatcherOpcode == GIM_CheckIsBuildVectorAllOnes) {
        if (!isBuildVectorAllOnes(*MI, MRI)) {
          if (handleReject() == RejectAndGiveUp)
            return false;
        }
      } else {
        if (!isBuildVectorAllZeros(*MI, MRI)) {
          if (handleReject() == RejectAndGiveUp)
            return false;
        }
      }

      break;
    }
    case GIM_CheckCxxInsnPredicate: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t Predicate = MatchTable[CurrentIdx++];
      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                      dbgs()
                          << CurrentIdx << ": GIM_CheckCxxPredicate(MIs["
                          << InsnID << "], Predicate=" << Predicate << ")\n");
      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
      assert(Predicate > GIPFP_MI_Invalid && "Expected a valid predicate");

      if (!testMIPredicate_MI(Predicate, *State.MIs[InsnID],
                              State.RecordedOperands))
        if (handleReject() == RejectAndGiveUp)
          return false;
      break;
    }
    case GIM_CheckHasNoUse: {
      int64_t InsnID = MatchTable[CurrentIdx++];

      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                      dbgs() << CurrentIdx << ": GIM_CheckHasNoUse(MIs["
                             << InsnID << "]\n");

      const MachineInstr *MI = State.MIs[InsnID];
      assert(MI && "Used insn before defined");
      assert(MI->getNumDefs() > 0 && "No defs");
      const Register Res = MI->getOperand(0).getReg();

      if (!MRI.use_nodbg_empty(Res)) {
        if (handleReject() == RejectAndGiveUp)
          return false;
      }

      break;
    }
    case GIM_CheckAtomicOrdering: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      AtomicOrdering Ordering = (AtomicOrdering)MatchTable[CurrentIdx++];
      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                      dbgs() << CurrentIdx << ": GIM_CheckAtomicOrdering(MIs["
                             << InsnID << "], " << (uint64_t)Ordering << ")\n");
      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
      if (!State.MIs[InsnID]->hasOneMemOperand())
        if (handleReject() == RejectAndGiveUp)
          return false;

      for (const auto &MMO : State.MIs[InsnID]->memoperands())
        if (MMO->getMergedOrdering() != Ordering)
          if (handleReject() == RejectAndGiveUp)
            return false;
      break;
    }
    case GIM_CheckAtomicOrderingOrStrongerThan: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      AtomicOrdering Ordering = (AtomicOrdering)MatchTable[CurrentIdx++];
      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                      dbgs() << CurrentIdx
                             << ": GIM_CheckAtomicOrderingOrStrongerThan(MIs["
                             << InsnID << "], " << (uint64_t)Ordering << ")\n");
      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
      if (!State.MIs[InsnID]->hasOneMemOperand())
        if (handleReject() == RejectAndGiveUp)
          return false;

      for (const auto &MMO : State.MIs[InsnID]->memoperands())
        if (!isAtLeastOrStrongerThan(MMO->getMergedOrdering(), Ordering))
          if (handleReject() == RejectAndGiveUp)
            return false;
      break;
    }
    case GIM_CheckAtomicOrderingWeakerThan: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      AtomicOrdering Ordering = (AtomicOrdering)MatchTable[CurrentIdx++];
      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                      dbgs() << CurrentIdx
                             << ": GIM_CheckAtomicOrderingWeakerThan(MIs["
                             << InsnID << "], " << (uint64_t)Ordering << ")\n");
      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
      if (!State.MIs[InsnID]->hasOneMemOperand())
        if (handleReject() == RejectAndGiveUp)
          return false;

      for (const auto &MMO : State.MIs[InsnID]->memoperands())
        if (!isStrongerThan(Ordering, MMO->getMergedOrdering()))
          if (handleReject() == RejectAndGiveUp)
            return false;
      break;
    }
    case GIM_CheckMemoryAddressSpace: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t MMOIdx = MatchTable[CurrentIdx++];
      // This accepts a list of possible address spaces.
      const int NumAddrSpace = MatchTable[CurrentIdx++];

      if (State.MIs[InsnID]->getNumMemOperands() <= MMOIdx) {
        if (handleReject() == RejectAndGiveUp)
          return false;
        break;
      }

      // Need to still jump to the end of the list of address spaces if we find
      // a match earlier.
      const uint64_t LastIdx = CurrentIdx + NumAddrSpace;

      const MachineMemOperand *MMO
        = *(State.MIs[InsnID]->memoperands_begin() + MMOIdx);
      const unsigned MMOAddrSpace = MMO->getAddrSpace();

      bool Success = false;
      for (int I = 0; I != NumAddrSpace; ++I) {
        unsigned AddrSpace = MatchTable[CurrentIdx++];
        DEBUG_WITH_TYPE(
          TgtInstructionSelector::getName(),
          dbgs() << "addrspace(" << MMOAddrSpace << ") vs "
                 << AddrSpace << '\n');

        if (AddrSpace == MMOAddrSpace) {
          Success = true;
          break;
        }
      }

      CurrentIdx = LastIdx;
      if (!Success && handleReject() == RejectAndGiveUp)
        return false;
      break;
    }
    case GIM_CheckMemoryAlignment: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t MMOIdx = MatchTable[CurrentIdx++];
      unsigned MinAlign = MatchTable[CurrentIdx++];

      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");

      if (State.MIs[InsnID]->getNumMemOperands() <= MMOIdx) {
        if (handleReject() == RejectAndGiveUp)
          return false;
        break;
      }

      MachineMemOperand *MMO
        = *(State.MIs[InsnID]->memoperands_begin() + MMOIdx);
      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                      dbgs() << CurrentIdx << ": GIM_CheckMemoryAlignment"
                      << "(MIs[" << InsnID << "]->memoperands() + " << MMOIdx
                      << ")->getAlignment() >= " << MinAlign << ")\n");
      if (MMO->getAlign() < MinAlign && handleReject() == RejectAndGiveUp)
        return false;

      break;
    }
    case GIM_CheckMemorySizeEqualTo: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t MMOIdx = MatchTable[CurrentIdx++];
      uint64_t Size = MatchTable[CurrentIdx++];

      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                      dbgs() << CurrentIdx
                             << ": GIM_CheckMemorySizeEqual(MIs[" << InsnID
                             << "]->memoperands() + " << MMOIdx
                             << ", Size=" << Size << ")\n");
      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");

      if (State.MIs[InsnID]->getNumMemOperands() <= MMOIdx) {
        if (handleReject() == RejectAndGiveUp)
          return false;
        break;
      }

      MachineMemOperand *MMO = *(State.MIs[InsnID]->memoperands_begin() + MMOIdx);

      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                      dbgs() << MMO->getSize() << " bytes vs " << Size
                             << " bytes\n");
      if (MMO->getSize() != Size)
        if (handleReject() == RejectAndGiveUp)
          return false;

      break;
    }
    case GIM_CheckMemorySizeEqualToLLT:
    case GIM_CheckMemorySizeLessThanLLT:
    case GIM_CheckMemorySizeGreaterThanLLT: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t MMOIdx = MatchTable[CurrentIdx++];
      int64_t OpIdx = MatchTable[CurrentIdx++];

      DEBUG_WITH_TYPE(
          TgtInstructionSelector::getName(),
          dbgs() << CurrentIdx << ": GIM_CheckMemorySize"
                 << (MatcherOpcode == GIM_CheckMemorySizeEqualToLLT
                         ? "EqualTo"
                         : MatcherOpcode == GIM_CheckMemorySizeGreaterThanLLT
                               ? "GreaterThan"
                               : "LessThan")
                 << "LLT(MIs[" << InsnID << "]->memoperands() + " << MMOIdx
                 << ", OpIdx=" << OpIdx << ")\n");
      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");

      MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
      if (!MO.isReg()) {
        DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                        dbgs() << CurrentIdx << ": Not a register\n");
        if (handleReject() == RejectAndGiveUp)
          return false;
        break;
      }

      if (State.MIs[InsnID]->getNumMemOperands() <= MMOIdx) {
        if (handleReject() == RejectAndGiveUp)
          return false;
        break;
      }

      MachineMemOperand *MMO = *(State.MIs[InsnID]->memoperands_begin() + MMOIdx);

      unsigned Size = MRI.getType(MO.getReg()).getSizeInBits();
      if (MatcherOpcode == GIM_CheckMemorySizeEqualToLLT &&
          MMO->getSizeInBits() != Size) {
        if (handleReject() == RejectAndGiveUp)
          return false;
      } else if (MatcherOpcode == GIM_CheckMemorySizeLessThanLLT &&
                 MMO->getSizeInBits() >= Size) {
        if (handleReject() == RejectAndGiveUp)
          return false;
      } else if (MatcherOpcode == GIM_CheckMemorySizeGreaterThanLLT &&
                 MMO->getSizeInBits() <= Size)
        if (handleReject() == RejectAndGiveUp)
          return false;

      break;
    }
    case GIM_CheckType: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t OpIdx = MatchTable[CurrentIdx++];
      int64_t TypeID = MatchTable[CurrentIdx++];
      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                      dbgs() << CurrentIdx << ": GIM_CheckType(MIs[" << InsnID
                             << "]->getOperand(" << OpIdx
                             << "), TypeID=" << TypeID << ")\n");
      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
      MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
      if (!MO.isReg() ||
          MRI.getType(MO.getReg()) != ISelInfo.TypeObjects[TypeID]) {
        if (handleReject() == RejectAndGiveUp)
          return false;
      }
      break;
    }
    case GIM_CheckPointerToAny: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t OpIdx = MatchTable[CurrentIdx++];
      uint64_t SizeInBits = MatchTable[CurrentIdx++];

      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                      dbgs() << CurrentIdx << ": GIM_CheckPointerToAny(MIs["
                             << InsnID << "]->getOperand(" << OpIdx
                             << "), SizeInBits=" << SizeInBits << ")\n");
      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
      MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
      const LLT Ty = MRI.getType(MO.getReg());

      // iPTR must be looked up in the target.
      if (SizeInBits == 0) {
        MachineFunction *MF = State.MIs[InsnID]->getParent()->getParent();
        const unsigned AddrSpace = Ty.getAddressSpace();
        SizeInBits = MF->getDataLayout().getPointerSizeInBits(AddrSpace);
      }

      assert(SizeInBits != 0 && "Pointer size must be known");

      if (MO.isReg()) {
        if (!Ty.isPointer() || Ty.getSizeInBits() != SizeInBits)
          if (handleReject() == RejectAndGiveUp)
            return false;
      } else if (handleReject() == RejectAndGiveUp)
        return false;

      break;
    }
    case GIM_RecordNamedOperand: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t OpIdx = MatchTable[CurrentIdx++];
      uint64_t StoreIdx = MatchTable[CurrentIdx++];

      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                      dbgs() << CurrentIdx << ": GIM_RecordNamedOperand(MIs["
                             << InsnID << "]->getOperand(" << OpIdx
                             << "), StoreIdx=" << StoreIdx << ")\n");
      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
      assert(StoreIdx < State.RecordedOperands.size() && "Index out of range");
      State.RecordedOperands[StoreIdx] = &State.MIs[InsnID]->getOperand(OpIdx);
      break;
    }
    case GIM_CheckRegBankForClass: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t OpIdx = MatchTable[CurrentIdx++];
      int64_t RCEnum = MatchTable[CurrentIdx++];
      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                      dbgs() << CurrentIdx << ": GIM_CheckRegBankForClass(MIs["
                             << InsnID << "]->getOperand(" << OpIdx
                             << "), RCEnum=" << RCEnum << ")\n");
      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
      MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
      if (!MO.isReg() ||
          &RBI.getRegBankFromRegClass(*TRI.getRegClass(RCEnum),
                                      MRI.getType(MO.getReg())) !=
              RBI.getRegBank(MO.getReg(), MRI, TRI)) {
        if (handleReject() == RejectAndGiveUp)
          return false;
      }
      break;
    }

    case GIM_CheckComplexPattern: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t OpIdx = MatchTable[CurrentIdx++];
      int64_t RendererID = MatchTable[CurrentIdx++];
      int64_t ComplexPredicateID = MatchTable[CurrentIdx++];
      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                      dbgs() << CurrentIdx << ": State.Renderers[" << RendererID
                             << "] = GIM_CheckComplexPattern(MIs[" << InsnID
                             << "]->getOperand(" << OpIdx
                             << "), ComplexPredicateID=" << ComplexPredicateID
                             << ")\n");
      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
      // FIXME: Use std::invoke() when it's available.
      ComplexRendererFns Renderer =
          (ISel.*ISelInfo.ComplexPredicates[ComplexPredicateID])(
              State.MIs[InsnID]->getOperand(OpIdx));
      if (Renderer)
        State.Renderers[RendererID] = *Renderer;
      else
        if (handleReject() == RejectAndGiveUp)
          return false;
      break;
    }

    case GIM_CheckConstantInt: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t OpIdx = MatchTable[CurrentIdx++];
      int64_t Value = MatchTable[CurrentIdx++];
      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                      dbgs() << CurrentIdx << ": GIM_CheckConstantInt(MIs["
                             << InsnID << "]->getOperand(" << OpIdx
                             << "), Value=" << Value << ")\n");
      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
      MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
      if (MO.isReg()) {
        // isOperandImmEqual() will sign-extend to 64-bits, so should we.
        LLT Ty = MRI.getType(MO.getReg());
        Value = SignExtend64(Value, Ty.getSizeInBits());

        if (!isOperandImmEqual(MO, Value, MRI)) {
          if (handleReject() == RejectAndGiveUp)
            return false;
        }
      } else if (handleReject() == RejectAndGiveUp)
        return false;

      break;
    }

    case GIM_CheckLiteralInt: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t OpIdx = MatchTable[CurrentIdx++];
      int64_t Value = MatchTable[CurrentIdx++];
      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                      dbgs() << CurrentIdx << ": GIM_CheckLiteralInt(MIs["
                             << InsnID << "]->getOperand(" << OpIdx
                             << "), Value=" << Value << ")\n");
      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
      MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
      if (MO.isImm() && MO.getImm() == Value)
        break;

      if (MO.isCImm() && MO.getCImm()->equalsInt(Value))
        break;

      if (handleReject() == RejectAndGiveUp)
        return false;

      break;
    }

    case GIM_CheckIntrinsicID: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t OpIdx = MatchTable[CurrentIdx++];
      int64_t Value = MatchTable[CurrentIdx++];
      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                      dbgs() << CurrentIdx << ": GIM_CheckIntrinsicID(MIs["
                             << InsnID << "]->getOperand(" << OpIdx
                             << "), Value=" << Value << ")\n");
      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
      MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
      if (!MO.isIntrinsicID() || MO.getIntrinsicID() != Value)
        if (handleReject() == RejectAndGiveUp)
          return false;
      break;
    }
    case GIM_CheckCmpPredicate: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t OpIdx = MatchTable[CurrentIdx++];
      int64_t Value = MatchTable[CurrentIdx++];
      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                      dbgs() << CurrentIdx << ": GIM_CheckCmpPredicate(MIs["
                             << InsnID << "]->getOperand(" << OpIdx
                             << "), Value=" << Value << ")\n");
      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
      MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
      if (!MO.isPredicate() || MO.getPredicate() != Value)
        if (handleReject() == RejectAndGiveUp)
          return false;
      break;
    }
    case GIM_CheckIsMBB: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t OpIdx = MatchTable[CurrentIdx++];
      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                      dbgs() << CurrentIdx << ": GIM_CheckIsMBB(MIs[" << InsnID
                             << "]->getOperand(" << OpIdx << "))\n");
      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
      if (!State.MIs[InsnID]->getOperand(OpIdx).isMBB()) {
        if (handleReject() == RejectAndGiveUp)
          return false;
      }
      break;
    }
    case GIM_CheckIsImm: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t OpIdx = MatchTable[CurrentIdx++];
      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                      dbgs() << CurrentIdx << ": GIM_CheckIsImm(MIs[" << InsnID
                             << "]->getOperand(" << OpIdx << "))\n");
      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
      if (!State.MIs[InsnID]->getOperand(OpIdx).isImm()) {
        if (handleReject() == RejectAndGiveUp)
          return false;
      }
      break;
    }
    case GIM_CheckIsSafeToFold: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                      dbgs() << CurrentIdx << ": GIM_CheckIsSafeToFold(MIs["
                             << InsnID << "])\n");
      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
      if (!isObviouslySafeToFold(*State.MIs[InsnID], *State.MIs[0])) {
        if (handleReject() == RejectAndGiveUp)
          return false;
      }
      break;
    }
    case GIM_CheckIsSameOperand: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t OpIdx = MatchTable[CurrentIdx++];
      int64_t OtherInsnID = MatchTable[CurrentIdx++];
      int64_t OtherOpIdx = MatchTable[CurrentIdx++];
      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                      dbgs() << CurrentIdx << ": GIM_CheckIsSameOperand(MIs["
                             << InsnID << "][" << OpIdx << "], MIs["
                             << OtherInsnID << "][" << OtherOpIdx << "])\n");
      assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
      assert(State.MIs[OtherInsnID] != nullptr && "Used insn before defined");
      if (!State.MIs[InsnID]->getOperand(OpIdx).isIdenticalTo(
              State.MIs[OtherInsnID]->getOperand(OtherOpIdx))) {
        if (handleReject() == RejectAndGiveUp)
          return false;
      }
      break;
    }
    case GIM_Reject:
      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                      dbgs() << CurrentIdx << ": GIM_Reject\n");
      if (handleReject() == RejectAndGiveUp)
        return false;
      break;

    case GIR_MutateOpcode: {
      int64_t OldInsnID = MatchTable[CurrentIdx++];
      uint64_t NewInsnID = MatchTable[CurrentIdx++];
      int64_t NewOpcode = MatchTable[CurrentIdx++];
      if (NewInsnID >= OutMIs.size())
        OutMIs.resize(NewInsnID + 1);

      OutMIs[NewInsnID] = MachineInstrBuilder(*State.MIs[OldInsnID]->getMF(),
                                              State.MIs[OldInsnID]);
      OutMIs[NewInsnID]->setDesc(TII.get(NewOpcode));
      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                      dbgs() << CurrentIdx << ": GIR_MutateOpcode(OutMIs["
                             << NewInsnID << "], MIs[" << OldInsnID << "], "
                             << NewOpcode << ")\n");
      break;
    }

    case GIR_BuildMI: {
      uint64_t NewInsnID = MatchTable[CurrentIdx++];
      int64_t Opcode = MatchTable[CurrentIdx++];
      if (NewInsnID >= OutMIs.size())
        OutMIs.resize(NewInsnID + 1);

      OutMIs[NewInsnID] = BuildMI(*State.MIs[0]->getParent(), State.MIs[0],
                                  MIMetadata(*State.MIs[0]), TII.get(Opcode));
      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                      dbgs() << CurrentIdx << ": GIR_BuildMI(OutMIs["
                             << NewInsnID << "], " << Opcode << ")\n");
      break;
    }

    case GIR_Copy: {
      int64_t NewInsnID = MatchTable[CurrentIdx++];
      int64_t OldInsnID = MatchTable[CurrentIdx++];
      int64_t OpIdx = MatchTable[CurrentIdx++];
      assert(OutMIs[NewInsnID] && "Attempted to add to undefined instruction");
      OutMIs[NewInsnID].add(State.MIs[OldInsnID]->getOperand(OpIdx));
      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                      dbgs()
                          << CurrentIdx << ": GIR_Copy(OutMIs[" << NewInsnID
                          << "], MIs[" << OldInsnID << "], " << OpIdx << ")\n");
      break;
    }

    case GIR_CopyOrAddZeroReg: {
      int64_t NewInsnID = MatchTable[CurrentIdx++];
      int64_t OldInsnID = MatchTable[CurrentIdx++];
      int64_t OpIdx = MatchTable[CurrentIdx++];
      int64_t ZeroReg = MatchTable[CurrentIdx++];
      assert(OutMIs[NewInsnID] && "Attempted to add to undefined instruction");
      MachineOperand &MO = State.MIs[OldInsnID]->getOperand(OpIdx);
      if (isOperandImmEqual(MO, 0, MRI))
        OutMIs[NewInsnID].addReg(ZeroReg);
      else
        OutMIs[NewInsnID].add(MO);
      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                      dbgs() << CurrentIdx << ": GIR_CopyOrAddZeroReg(OutMIs["
                             << NewInsnID << "], MIs[" << OldInsnID << "], "
                             << OpIdx << ", " << ZeroReg << ")\n");
      break;
    }

    case GIR_CopySubReg: {
      int64_t NewInsnID = MatchTable[CurrentIdx++];
      int64_t OldInsnID = MatchTable[CurrentIdx++];
      int64_t OpIdx = MatchTable[CurrentIdx++];
      int64_t SubRegIdx = MatchTable[CurrentIdx++];
      assert(OutMIs[NewInsnID] && "Attempted to add to undefined instruction");
      OutMIs[NewInsnID].addReg(State.MIs[OldInsnID]->getOperand(OpIdx).getReg(),
                               0, SubRegIdx);
      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                      dbgs() << CurrentIdx << ": GIR_CopySubReg(OutMIs["
                             << NewInsnID << "], MIs[" << OldInsnID << "], "
                             << OpIdx << ", " << SubRegIdx << ")\n");
      break;
    }

    case GIR_AddImplicitDef: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t RegNum = MatchTable[CurrentIdx++];
      assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
      OutMIs[InsnID].addDef(RegNum, RegState::Implicit);
      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                      dbgs() << CurrentIdx << ": GIR_AddImplicitDef(OutMIs["
                             << InsnID << "], " << RegNum << ")\n");
      break;
    }

    case GIR_AddImplicitUse: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t RegNum = MatchTable[CurrentIdx++];
      assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
      OutMIs[InsnID].addUse(RegNum, RegState::Implicit);
      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                      dbgs() << CurrentIdx << ": GIR_AddImplicitUse(OutMIs["
                             << InsnID << "], " << RegNum << ")\n");
      break;
    }

    case GIR_AddRegister: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t RegNum = MatchTable[CurrentIdx++];
      uint64_t RegFlags = MatchTable[CurrentIdx++];
      assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
      OutMIs[InsnID].addReg(RegNum, RegFlags);
      DEBUG_WITH_TYPE(
        TgtInstructionSelector::getName(),
        dbgs() << CurrentIdx << ": GIR_AddRegister(OutMIs["
        << InsnID << "], " << RegNum << ", " << RegFlags << ")\n");
      break;
    }

    case GIR_AddTempRegister:
    case GIR_AddTempSubRegister: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t TempRegID = MatchTable[CurrentIdx++];
      uint64_t TempRegFlags = MatchTable[CurrentIdx++];
      unsigned SubReg = 0;
      if (MatcherOpcode == GIR_AddTempSubRegister)
        SubReg = MatchTable[CurrentIdx++];

      assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");

      OutMIs[InsnID].addReg(State.TempRegisters[TempRegID], TempRegFlags, SubReg);
      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                      dbgs() << CurrentIdx << ": GIR_AddTempRegister(OutMIs["
                             << InsnID << "], TempRegisters[" << TempRegID
                             << "]";
                      if (SubReg)
                        dbgs() << '.' << TRI.getSubRegIndexName(SubReg);
                      dbgs() << ", " << TempRegFlags << ")\n");
      break;
    }

    case GIR_AddImm: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t Imm = MatchTable[CurrentIdx++];
      assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
      OutMIs[InsnID].addImm(Imm);
      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                      dbgs() << CurrentIdx << ": GIR_AddImm(OutMIs[" << InsnID
                             << "], " << Imm << ")\n");
      break;
    }

    case GIR_ComplexRenderer: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t RendererID = MatchTable[CurrentIdx++];
      assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
      for (const auto &RenderOpFn : State.Renderers[RendererID])
        RenderOpFn(OutMIs[InsnID]);
      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                      dbgs() << CurrentIdx << ": GIR_ComplexRenderer(OutMIs["
                             << InsnID << "], " << RendererID << ")\n");
      break;
    }
    case GIR_ComplexSubOperandRenderer: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t RendererID = MatchTable[CurrentIdx++];
      int64_t RenderOpID = MatchTable[CurrentIdx++];
      assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
      State.Renderers[RendererID][RenderOpID](OutMIs[InsnID]);
      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                      dbgs() << CurrentIdx
                             << ": GIR_ComplexSubOperandRenderer(OutMIs["
                             << InsnID << "], " << RendererID << ", "
                             << RenderOpID << ")\n");
      break;
    }

    case GIR_CopyConstantAsSImm: {
      int64_t NewInsnID = MatchTable[CurrentIdx++];
      int64_t OldInsnID = MatchTable[CurrentIdx++];
      assert(OutMIs[NewInsnID] && "Attempted to add to undefined instruction");
      assert(State.MIs[OldInsnID]->getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT");
      if (State.MIs[OldInsnID]->getOperand(1).isCImm()) {
        OutMIs[NewInsnID].addImm(
            State.MIs[OldInsnID]->getOperand(1).getCImm()->getSExtValue());
      } else if (State.MIs[OldInsnID]->getOperand(1).isImm())
        OutMIs[NewInsnID].add(State.MIs[OldInsnID]->getOperand(1));
      else
        llvm_unreachable("Expected Imm or CImm operand");
      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                      dbgs() << CurrentIdx << ": GIR_CopyConstantAsSImm(OutMIs["
                             << NewInsnID << "], MIs[" << OldInsnID << "])\n");
      break;
    }

    // TODO: Needs a test case once we have a pattern that uses this.
    case GIR_CopyFConstantAsFPImm: {
      int64_t NewInsnID = MatchTable[CurrentIdx++];
      int64_t OldInsnID = MatchTable[CurrentIdx++];
      assert(OutMIs[NewInsnID] && "Attempted to add to undefined instruction");
      assert(State.MIs[OldInsnID]->getOpcode() == TargetOpcode::G_FCONSTANT && "Expected G_FCONSTANT");
      if (State.MIs[OldInsnID]->getOperand(1).isFPImm())
        OutMIs[NewInsnID].addFPImm(
            State.MIs[OldInsnID]->getOperand(1).getFPImm());
      else
        llvm_unreachable("Expected FPImm operand");
      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                      dbgs() << CurrentIdx << ": GIR_CopyFPConstantAsFPImm(OutMIs["
                             << NewInsnID << "], MIs[" << OldInsnID << "])\n");
      break;
    }

    case GIR_CustomRenderer: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t OldInsnID = MatchTable[CurrentIdx++];
      int64_t RendererFnID = MatchTable[CurrentIdx++];
      assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                      dbgs() << CurrentIdx << ": GIR_CustomRenderer(OutMIs["
                             << InsnID << "], MIs[" << OldInsnID << "], "
                             << RendererFnID << ")\n");
      (ISel.*ISelInfo.CustomRenderers[RendererFnID])(
        OutMIs[InsnID], *State.MIs[OldInsnID],
        -1); // Not a source operand of the old instruction.
      break;
    }
    case GIR_CustomOperandRenderer: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t OldInsnID = MatchTable[CurrentIdx++];
      int64_t OpIdx = MatchTable[CurrentIdx++];
      int64_t RendererFnID = MatchTable[CurrentIdx++];
      assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");

      DEBUG_WITH_TYPE(
        TgtInstructionSelector::getName(),
        dbgs() << CurrentIdx << ": GIR_CustomOperandRenderer(OutMIs["
               << InsnID << "], MIs[" << OldInsnID << "]->getOperand("
               << OpIdx << "), "
        << RendererFnID << ")\n");
      (ISel.*ISelInfo.CustomRenderers[RendererFnID])(OutMIs[InsnID],
                                                     *State.MIs[OldInsnID],
                                                     OpIdx);
      break;
    }
    case GIR_ConstrainOperandRC: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      int64_t OpIdx = MatchTable[CurrentIdx++];
      int64_t RCEnum = MatchTable[CurrentIdx++];
      assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
      MachineInstr &I = *OutMIs[InsnID].getInstr();
      MachineFunction &MF = *I.getParent()->getParent();
      MachineRegisterInfo &MRI = MF.getRegInfo();
      const TargetRegisterClass &RC = *TRI.getRegClass(RCEnum);
      MachineOperand &MO = I.getOperand(OpIdx);
      constrainOperandRegClass(MF, TRI, MRI, TII, RBI, I, RC, MO);
      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                      dbgs() << CurrentIdx << ": GIR_ConstrainOperandRC(OutMIs["
                             << InsnID << "], " << OpIdx << ", " << RCEnum
                             << ")\n");
      break;
    }

    case GIR_ConstrainSelectedInstOperands: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
      constrainSelectedInstRegOperands(*OutMIs[InsnID].getInstr(), TII, TRI,
                                       RBI);
      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                      dbgs() << CurrentIdx
                             << ": GIR_ConstrainSelectedInstOperands(OutMIs["
                             << InsnID << "])\n");
      break;
    }

    case GIR_MergeMemOperands: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");

      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                      dbgs() << CurrentIdx << ": GIR_MergeMemOperands(OutMIs["
                             << InsnID << "]");
      int64_t MergeInsnID = GIU_MergeMemOperands_EndOfList;
      while ((MergeInsnID = MatchTable[CurrentIdx++]) !=
             GIU_MergeMemOperands_EndOfList) {
        DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                        dbgs() << ", MIs[" << MergeInsnID << "]");
        for (const auto &MMO : State.MIs[MergeInsnID]->memoperands())
          OutMIs[InsnID].addMemOperand(MMO);
      }
      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(), dbgs() << ")\n");
      break;
    }

    case GIR_EraseFromParent: {
      int64_t InsnID = MatchTable[CurrentIdx++];
      assert(State.MIs[InsnID] &&
             "Attempted to erase an undefined instruction");
      State.MIs[InsnID]->eraseFromParent();
      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                      dbgs() << CurrentIdx << ": GIR_EraseFromParent(MIs["
                             << InsnID << "])\n");
      break;
    }

    case GIR_MakeTempReg: {
      int64_t TempRegID = MatchTable[CurrentIdx++];
      int64_t TypeID = MatchTable[CurrentIdx++];

      State.TempRegisters[TempRegID] =
          MRI.createGenericVirtualRegister(ISelInfo.TypeObjects[TypeID]);
      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                      dbgs() << CurrentIdx << ": TempRegs[" << TempRegID
                             << "] = GIR_MakeTempReg(" << TypeID << ")\n");
      break;
    }

    case GIR_Coverage: {
      int64_t RuleID = MatchTable[CurrentIdx++];
      CoverageInfo.setCovered(RuleID);

      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                      dbgs()
                          << CurrentIdx << ": GIR_Coverage(" << RuleID << ")");
      break;
    }

    case GIR_Done:
      DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
                      dbgs() << CurrentIdx << ": GIR_Done\n");
      propagateFlags(OutMIs);
      return true;

    default:
      llvm_unreachable("Unexpected command");
    }
  }
}

} // end namespace llvm

#endif // LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECTORIMPL_H
PKiwFZ�$��P
P
CodeGen/GlobalISel/Legalizer.hnu�[���//== llvm/CodeGen/GlobalISel/Legalizer.h ---------------- -*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file A pass to convert the target-illegal operations created by IR -> MIR
/// translation into ones the target expects to be able to select. This may
/// occur in multiple phases, for example G_ADD <2 x i8> -> G_ADD <2 x i16> ->
/// G_ADD <4 x i16>.
///
/// The LegalizeHelper class is where most of the work happens, and is designed
/// to be callable from other passes that find themselves with an illegal
/// instruction.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_GLOBALISEL_LEGALIZER_H
#define LLVM_CODEGEN_GLOBALISEL_LEGALIZER_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"

namespace llvm {

class LegalizerInfo;
class MachineIRBuilder;
class MachineInstr;
class GISelChangeObserver;
class LostDebugLocObserver;

class Legalizer : public MachineFunctionPass {
public:
  static char ID;

  struct MFResult {
    bool Changed;
    const MachineInstr *FailedOn;
  };

private:
  /// Initialize the field members using \p MF.
  void init(MachineFunction &MF);

public:
  // Ctor, nothing fancy.
  Legalizer();

  StringRef getPassName() const override { return "Legalizer"; }

  void getAnalysisUsage(AnalysisUsage &AU) const override;

  MachineFunctionProperties getRequiredProperties() const override {
    return MachineFunctionProperties().set(
        MachineFunctionProperties::Property::IsSSA);
  }

  MachineFunctionProperties getSetProperties() const override {
    return MachineFunctionProperties().set(
        MachineFunctionProperties::Property::Legalized);
  }

  MachineFunctionProperties getClearedProperties() const override {
    return MachineFunctionProperties().set(
        MachineFunctionProperties::Property::NoPHIs);
  }

  bool runOnMachineFunction(MachineFunction &MF) override;

  static MFResult
  legalizeMachineFunction(MachineFunction &MF, const LegalizerInfo &LI,
                          ArrayRef<GISelChangeObserver *> AuxObservers,
                          LostDebugLocObserver &LocObserver,
                          MachineIRBuilder &MIRBuilder, GISelKnownBits *KB);
};
} // End namespace llvm.

#endif
PKiwFZ�!�.�.�1CodeGen/GlobalISel/LegalizationArtifactCombiner.hnu�[���//===-- llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h -----*- C++ -*-//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// This file contains some helper functions which try to cleanup artifacts
// such as G_TRUNCs/G_[ZSA]EXTENDS that were created during legalization to make
// the types match. This file also contains some combines of merges that happens
// at the end of the legalization.
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_GLOBALISEL_LEGALIZATIONARTIFACTCOMBINER_H
#define LLVM_CODEGEN_GLOBALISEL_LEGALIZATIONARTIFACTCOMBINER_H

#include "llvm/ADT/SmallBitVector.h"
#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
#include "llvm/CodeGen/GlobalISel/Legalizer.h"
#include "llvm/CodeGen/GlobalISel/LegalizerInfo.h"
#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
#include "llvm/CodeGen/GlobalISel/Utils.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/Register.h"
#include "llvm/IR/Constants.h"
#include "llvm/Support/Debug.h"

#define DEBUG_TYPE "legalizer"

namespace llvm {
class LegalizationArtifactCombiner {
  MachineIRBuilder &Builder;
  MachineRegisterInfo &MRI;
  const LegalizerInfo &LI;

  static bool isArtifactCast(unsigned Opc) {
    switch (Opc) {
    case TargetOpcode::G_TRUNC:
    case TargetOpcode::G_SEXT:
    case TargetOpcode::G_ZEXT:
    case TargetOpcode::G_ANYEXT:
      return true;
    default:
      return false;
    }
  }

public:
  LegalizationArtifactCombiner(MachineIRBuilder &B, MachineRegisterInfo &MRI,
                    const LegalizerInfo &LI)
      : Builder(B), MRI(MRI), LI(LI) {}

  bool tryCombineAnyExt(MachineInstr &MI,
                        SmallVectorImpl<MachineInstr *> &DeadInsts,
                        SmallVectorImpl<Register> &UpdatedDefs,
                        GISelObserverWrapper &Observer) {
    using namespace llvm::MIPatternMatch;
    assert(MI.getOpcode() == TargetOpcode::G_ANYEXT);

    Builder.setInstrAndDebugLoc(MI);
    Register DstReg = MI.getOperand(0).getReg();
    Register SrcReg = lookThroughCopyInstrs(MI.getOperand(1).getReg());

    // aext(trunc x) - > aext/copy/trunc x
    Register TruncSrc;
    if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc)))) {
      LLVM_DEBUG(dbgs() << ".. Combine MI: " << MI;);
      if (MRI.getType(DstReg) == MRI.getType(TruncSrc))
        replaceRegOrBuildCopy(DstReg, TruncSrc, MRI, Builder, UpdatedDefs,
                              Observer);
      else
        Builder.buildAnyExtOrTrunc(DstReg, TruncSrc);
      UpdatedDefs.push_back(DstReg);
      markInstAndDefDead(MI, *MRI.getVRegDef(SrcReg), DeadInsts);
      return true;
    }

    // aext([asz]ext x) -> [asz]ext x
    Register ExtSrc;
    MachineInstr *ExtMI;
    if (mi_match(SrcReg, MRI,
                 m_all_of(m_MInstr(ExtMI), m_any_of(m_GAnyExt(m_Reg(ExtSrc)),
                                                    m_GSExt(m_Reg(ExtSrc)),
                                                    m_GZExt(m_Reg(ExtSrc)))))) {
      Builder.buildInstr(ExtMI->getOpcode(), {DstReg}, {ExtSrc});
      UpdatedDefs.push_back(DstReg);
      markInstAndDefDead(MI, *ExtMI, DeadInsts);
      return true;
    }

    // Try to fold aext(g_constant) when the larger constant type is legal.
    auto *SrcMI = MRI.getVRegDef(SrcReg);
    if (SrcMI->getOpcode() == TargetOpcode::G_CONSTANT) {
      const LLT DstTy = MRI.getType(DstReg);
      if (isInstLegal({TargetOpcode::G_CONSTANT, {DstTy}})) {
        auto &CstVal = SrcMI->getOperand(1);
        Builder.buildConstant(
            DstReg, CstVal.getCImm()->getValue().sext(DstTy.getSizeInBits()));
        UpdatedDefs.push_back(DstReg);
        markInstAndDefDead(MI, *SrcMI, DeadInsts);
        return true;
      }
    }
    return tryFoldImplicitDef(MI, DeadInsts, UpdatedDefs);
  }

  bool tryCombineZExt(MachineInstr &MI,
                      SmallVectorImpl<MachineInstr *> &DeadInsts,
                      SmallVectorImpl<Register> &UpdatedDefs,
                      GISelObserverWrapper &Observer) {
    using namespace llvm::MIPatternMatch;
    assert(MI.getOpcode() == TargetOpcode::G_ZEXT);

    Builder.setInstrAndDebugLoc(MI);
    Register DstReg = MI.getOperand(0).getReg();
    Register SrcReg = lookThroughCopyInstrs(MI.getOperand(1).getReg());

    // zext(trunc x) - > and (aext/copy/trunc x), mask
    // zext(sext x) -> and (sext x), mask
    Register TruncSrc;
    Register SextSrc;
    if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc))) ||
        mi_match(SrcReg, MRI, m_GSExt(m_Reg(SextSrc)))) {
      LLT DstTy = MRI.getType(DstReg);
      if (isInstUnsupported({TargetOpcode::G_AND, {DstTy}}) ||
          isConstantUnsupported(DstTy))
        return false;
      LLVM_DEBUG(dbgs() << ".. Combine MI: " << MI;);
      LLT SrcTy = MRI.getType(SrcReg);
      APInt MaskVal = APInt::getAllOnes(SrcTy.getScalarSizeInBits());
      auto Mask = Builder.buildConstant(
        DstTy, MaskVal.zext(DstTy.getScalarSizeInBits()));
      if (SextSrc && (DstTy != MRI.getType(SextSrc)))
        SextSrc = Builder.buildSExtOrTrunc(DstTy, SextSrc).getReg(0);
      if (TruncSrc && (DstTy != MRI.getType(TruncSrc)))
        TruncSrc = Builder.buildAnyExtOrTrunc(DstTy, TruncSrc).getReg(0);
      Builder.buildAnd(DstReg, SextSrc ? SextSrc : TruncSrc, Mask);
      markInstAndDefDead(MI, *MRI.getVRegDef(SrcReg), DeadInsts);
      return true;
    }

    // zext(zext x) -> (zext x)
    Register ZextSrc;
    if (mi_match(SrcReg, MRI, m_GZExt(m_Reg(ZextSrc)))) {
      LLVM_DEBUG(dbgs() << ".. Combine MI: " << MI);
      Observer.changingInstr(MI);
      MI.getOperand(1).setReg(ZextSrc);
      Observer.changedInstr(MI);
      UpdatedDefs.push_back(DstReg);
      markDefDead(MI, *MRI.getVRegDef(SrcReg), DeadInsts);
      return true;
    }

    // Try to fold zext(g_constant) when the larger constant type is legal.
    auto *SrcMI = MRI.getVRegDef(SrcReg);
    if (SrcMI->getOpcode() == TargetOpcode::G_CONSTANT) {
      const LLT DstTy = MRI.getType(DstReg);
      if (isInstLegal({TargetOpcode::G_CONSTANT, {DstTy}})) {
        auto &CstVal = SrcMI->getOperand(1);
        Builder.buildConstant(
            DstReg, CstVal.getCImm()->getValue().zext(DstTy.getSizeInBits()));
        UpdatedDefs.push_back(DstReg);
        markInstAndDefDead(MI, *SrcMI, DeadInsts);
        return true;
      }
    }
    return tryFoldImplicitDef(MI, DeadInsts, UpdatedDefs);
  }

  bool tryCombineSExt(MachineInstr &MI,
                      SmallVectorImpl<MachineInstr *> &DeadInsts,
                      SmallVectorImpl<Register> &UpdatedDefs) {
    using namespace llvm::MIPatternMatch;
    assert(MI.getOpcode() == TargetOpcode::G_SEXT);

    Builder.setInstrAndDebugLoc(MI);
    Register DstReg = MI.getOperand(0).getReg();
    Register SrcReg = lookThroughCopyInstrs(MI.getOperand(1).getReg());

    // sext(trunc x) - > (sext_inreg (aext/copy/trunc x), c)
    Register TruncSrc;
    if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc)))) {
      LLT DstTy = MRI.getType(DstReg);
      if (isInstUnsupported({TargetOpcode::G_SEXT_INREG, {DstTy}}))
        return false;
      LLVM_DEBUG(dbgs() << ".. Combine MI: " << MI;);
      LLT SrcTy = MRI.getType(SrcReg);
      uint64_t SizeInBits = SrcTy.getScalarSizeInBits();
      if (DstTy != MRI.getType(TruncSrc))
        TruncSrc = Builder.buildAnyExtOrTrunc(DstTy, TruncSrc).getReg(0);
      Builder.buildSExtInReg(DstReg, TruncSrc, SizeInBits);
      markInstAndDefDead(MI, *MRI.getVRegDef(SrcReg), DeadInsts);
      return true;
    }

    // sext(zext x) -> (zext x)
    // sext(sext x) -> (sext x)
    Register ExtSrc;
    MachineInstr *ExtMI;
    if (mi_match(SrcReg, MRI,
                 m_all_of(m_MInstr(ExtMI), m_any_of(m_GZExt(m_Reg(ExtSrc)),
                                                    m_GSExt(m_Reg(ExtSrc)))))) {
      LLVM_DEBUG(dbgs() << ".. Combine MI: " << MI);
      Builder.buildInstr(ExtMI->getOpcode(), {DstReg}, {ExtSrc});
      UpdatedDefs.push_back(DstReg);
      markInstAndDefDead(MI, *MRI.getVRegDef(SrcReg), DeadInsts);
      return true;
    }

    // Try to fold sext(g_constant) when the larger constant type is legal.
    auto *SrcMI = MRI.getVRegDef(SrcReg);
    if (SrcMI->getOpcode() == TargetOpcode::G_CONSTANT) {
      const LLT DstTy = MRI.getType(DstReg);
      if (isInstLegal({TargetOpcode::G_CONSTANT, {DstTy}})) {
        auto &CstVal = SrcMI->getOperand(1);
        Builder.buildConstant(
            DstReg, CstVal.getCImm()->getValue().sext(DstTy.getSizeInBits()));
        UpdatedDefs.push_back(DstReg);
        markInstAndDefDead(MI, *SrcMI, DeadInsts);
        return true;
      }
    }

    return tryFoldImplicitDef(MI, DeadInsts, UpdatedDefs);
  }

  bool tryCombineTrunc(MachineInstr &MI,
                       SmallVectorImpl<MachineInstr *> &DeadInsts,
                       SmallVectorImpl<Register> &UpdatedDefs,
                       GISelObserverWrapper &Observer) {
    using namespace llvm::MIPatternMatch;
    assert(MI.getOpcode() == TargetOpcode::G_TRUNC);

    Builder.setInstr(MI);
    Register DstReg = MI.getOperand(0).getReg();
    Register SrcReg = lookThroughCopyInstrs(MI.getOperand(1).getReg());

    // Try to fold trunc(g_constant) when the smaller constant type is legal.
    auto *SrcMI = MRI.getVRegDef(SrcReg);
    if (SrcMI->getOpcode() == TargetOpcode::G_CONSTANT) {
      const LLT DstTy = MRI.getType(DstReg);
      if (isInstLegal({TargetOpcode::G_CONSTANT, {DstTy}})) {
        auto &CstVal = SrcMI->getOperand(1);
        Builder.buildConstant(
            DstReg, CstVal.getCImm()->getValue().trunc(DstTy.getSizeInBits()));
        UpdatedDefs.push_back(DstReg);
        markInstAndDefDead(MI, *SrcMI, DeadInsts);
        return true;
      }
    }

    // Try to fold trunc(merge) to directly use the source of the merge.
    // This gets rid of large, difficult to legalize, merges
    if (auto *SrcMerge = dyn_cast<GMerge>(SrcMI)) {
      const Register MergeSrcReg = SrcMerge->getSourceReg(0);
      const LLT MergeSrcTy = MRI.getType(MergeSrcReg);
      const LLT DstTy = MRI.getType(DstReg);

      // We can only fold if the types are scalar
      const unsigned DstSize = DstTy.getSizeInBits();
      const unsigned MergeSrcSize = MergeSrcTy.getSizeInBits();
      if (!DstTy.isScalar() || !MergeSrcTy.isScalar())
        return false;

      if (DstSize < MergeSrcSize) {
        // When the merge source is larger than the destination, we can just
        // truncate the merge source directly
        if (isInstUnsupported({TargetOpcode::G_TRUNC, {DstTy, MergeSrcTy}}))
          return false;

        LLVM_DEBUG(dbgs() << "Combining G_TRUNC(G_MERGE_VALUES) to G_TRUNC: "
                          << MI);

        Builder.buildTrunc(DstReg, MergeSrcReg);
        UpdatedDefs.push_back(DstReg);
      } else if (DstSize == MergeSrcSize) {
        // If the sizes match we can simply try to replace the register
        LLVM_DEBUG(
            dbgs() << "Replacing G_TRUNC(G_MERGE_VALUES) with merge input: "
                   << MI);
        replaceRegOrBuildCopy(DstReg, MergeSrcReg, MRI, Builder, UpdatedDefs,
                              Observer);
      } else if (DstSize % MergeSrcSize == 0) {
        // If the trunc size is a multiple of the merge source size we can use
        // a smaller merge instead
        if (isInstUnsupported(
                {TargetOpcode::G_MERGE_VALUES, {DstTy, MergeSrcTy}}))
          return false;

        LLVM_DEBUG(
            dbgs() << "Combining G_TRUNC(G_MERGE_VALUES) to G_MERGE_VALUES: "
                   << MI);

        const unsigned NumSrcs = DstSize / MergeSrcSize;
        assert(NumSrcs < SrcMI->getNumOperands() - 1 &&
               "trunc(merge) should require less inputs than merge");
        SmallVector<Register, 8> SrcRegs(NumSrcs);
        for (unsigned i = 0; i < NumSrcs; ++i)
          SrcRegs[i] = SrcMerge->getSourceReg(i);

        Builder.buildMergeValues(DstReg, SrcRegs);
        UpdatedDefs.push_back(DstReg);
      } else {
        // Unable to combine
        return false;
      }

      markInstAndDefDead(MI, *SrcMerge, DeadInsts);
      return true;
    }

    // trunc(trunc) -> trunc
    Register TruncSrc;
    if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc)))) {
      // Always combine trunc(trunc) since the eventual resulting trunc must be
      // legal anyway as it must be legal for all outputs of the consumer type
      // set.
      LLVM_DEBUG(dbgs() << ".. Combine G_TRUNC(G_TRUNC): " << MI);

      Builder.buildTrunc(DstReg, TruncSrc);
      UpdatedDefs.push_back(DstReg);
      markInstAndDefDead(MI, *MRI.getVRegDef(TruncSrc), DeadInsts);
      return true;
    }

    return false;
  }

  /// Try to fold G_[ASZ]EXT (G_IMPLICIT_DEF).
  bool tryFoldImplicitDef(MachineInstr &MI,
                          SmallVectorImpl<MachineInstr *> &DeadInsts,
                          SmallVectorImpl<Register> &UpdatedDefs) {
    unsigned Opcode = MI.getOpcode();
    assert(Opcode == TargetOpcode::G_ANYEXT || Opcode == TargetOpcode::G_ZEXT ||
           Opcode == TargetOpcode::G_SEXT);

    if (MachineInstr *DefMI = getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF,
                                           MI.getOperand(1).getReg(), MRI)) {
      Builder.setInstr(MI);
      Register DstReg = MI.getOperand(0).getReg();
      LLT DstTy = MRI.getType(DstReg);

      if (Opcode == TargetOpcode::G_ANYEXT) {
        // G_ANYEXT (G_IMPLICIT_DEF) -> G_IMPLICIT_DEF
        if (!isInstLegal({TargetOpcode::G_IMPLICIT_DEF, {DstTy}}))
          return false;
        LLVM_DEBUG(dbgs() << ".. Combine G_ANYEXT(G_IMPLICIT_DEF): " << MI;);
        Builder.buildInstr(TargetOpcode::G_IMPLICIT_DEF, {DstReg}, {});
        UpdatedDefs.push_back(DstReg);
      } else {
        // G_[SZ]EXT (G_IMPLICIT_DEF) -> G_CONSTANT 0 because the top
        // bits will be 0 for G_ZEXT and 0/1 for the G_SEXT.
        if (isConstantUnsupported(DstTy))
          return false;
        LLVM_DEBUG(dbgs() << ".. Combine G_[SZ]EXT(G_IMPLICIT_DEF): " << MI;);
        Builder.buildConstant(DstReg, 0);
        UpdatedDefs.push_back(DstReg);
      }

      markInstAndDefDead(MI, *DefMI, DeadInsts);
      return true;
    }
    return false;
  }

  bool tryFoldUnmergeCast(MachineInstr &MI, MachineInstr &CastMI,
                          SmallVectorImpl<MachineInstr *> &DeadInsts,
                          SmallVectorImpl<Register> &UpdatedDefs) {

    assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES);

    const unsigned CastOpc = CastMI.getOpcode();

    if (!isArtifactCast(CastOpc))
      return false;

    const unsigned NumDefs = MI.getNumOperands() - 1;

    const Register CastSrcReg = CastMI.getOperand(1).getReg();
    const LLT CastSrcTy = MRI.getType(CastSrcReg);
    const LLT DestTy = MRI.getType(MI.getOperand(0).getReg());
    const LLT SrcTy = MRI.getType(MI.getOperand(NumDefs).getReg());

    const unsigned CastSrcSize = CastSrcTy.getSizeInBits();
    const unsigned DestSize = DestTy.getSizeInBits();

    if (CastOpc == TargetOpcode::G_TRUNC) {
      if (SrcTy.isVector() && SrcTy.getScalarType() == DestTy.getScalarType()) {
        //  %1:_(<4 x s8>) = G_TRUNC %0(<4 x s32>)
        //  %2:_(s8), %3:_(s8), %4:_(s8), %5:_(s8) = G_UNMERGE_VALUES %1
        // =>
        //  %6:_(s32), %7:_(s32), %8:_(s32), %9:_(s32) = G_UNMERGE_VALUES %0
        //  %2:_(s8) = G_TRUNC %6
        //  %3:_(s8) = G_TRUNC %7
        //  %4:_(s8) = G_TRUNC %8
        //  %5:_(s8) = G_TRUNC %9

        unsigned UnmergeNumElts =
            DestTy.isVector() ? CastSrcTy.getNumElements() / NumDefs : 1;
        LLT UnmergeTy = CastSrcTy.changeElementCount(
            ElementCount::getFixed(UnmergeNumElts));

        if (isInstUnsupported(
                {TargetOpcode::G_UNMERGE_VALUES, {UnmergeTy, CastSrcTy}}))
          return false;

        Builder.setInstr(MI);
        auto NewUnmerge = Builder.buildUnmerge(UnmergeTy, CastSrcReg);

        for (unsigned I = 0; I != NumDefs; ++I) {
          Register DefReg = MI.getOperand(I).getReg();
          UpdatedDefs.push_back(DefReg);
          Builder.buildTrunc(DefReg, NewUnmerge.getReg(I));
        }

        markInstAndDefDead(MI, CastMI, DeadInsts);
        return true;
      }

      if (CastSrcTy.isScalar() && SrcTy.isScalar() && !DestTy.isVector()) {
        //  %1:_(s16) = G_TRUNC %0(s32)
        //  %2:_(s8), %3:_(s8) = G_UNMERGE_VALUES %1
        // =>
        //  %2:_(s8), %3:_(s8), %4:_(s8), %5:_(s8) = G_UNMERGE_VALUES %0

        // Unmerge(trunc) can be combined if the trunc source size is a multiple
        // of the unmerge destination size
        if (CastSrcSize % DestSize != 0)
          return false;

        // Check if the new unmerge is supported
        if (isInstUnsupported(
                {TargetOpcode::G_UNMERGE_VALUES, {DestTy, CastSrcTy}}))
          return false;

        // Gather the original destination registers and create new ones for the
        // unused bits
        const unsigned NewNumDefs = CastSrcSize / DestSize;
        SmallVector<Register, 8> DstRegs(NewNumDefs);
        for (unsigned Idx = 0; Idx < NewNumDefs; ++Idx) {
          if (Idx < NumDefs)
            DstRegs[Idx] = MI.getOperand(Idx).getReg();
          else
            DstRegs[Idx] = MRI.createGenericVirtualRegister(DestTy);
        }

        // Build new unmerge
        Builder.setInstr(MI);
        Builder.buildUnmerge(DstRegs, CastSrcReg);
        UpdatedDefs.append(DstRegs.begin(), DstRegs.begin() + NewNumDefs);
        markInstAndDefDead(MI, CastMI, DeadInsts);
        return true;
      }
    }

    // TODO: support combines with other casts as well
    return false;
  }

  static bool canFoldMergeOpcode(unsigned MergeOp, unsigned ConvertOp,
                                 LLT OpTy, LLT DestTy) {
    // Check if we found a definition that is like G_MERGE_VALUES.
    switch (MergeOp) {
    default:
      return false;
    case TargetOpcode::G_BUILD_VECTOR:
    case TargetOpcode::G_MERGE_VALUES:
      // The convert operation that we will need to insert is
      // going to convert the input of that type of instruction (scalar)
      // to the destination type (DestTy).
      // The conversion needs to stay in the same domain (scalar to scalar
      // and vector to vector), so if we were to allow to fold the merge
      // we would need to insert some bitcasts.
      // E.g.,
      // <2 x s16> = build_vector s16, s16
      // <2 x s32> = zext <2 x s16>
      // <2 x s16>, <2 x s16> = unmerge <2 x s32>
      //
      // As is the folding would produce:
      // <2 x s16> = zext s16  <-- scalar to vector
      // <2 x s16> = zext s16  <-- scalar to vector
      // Which is invalid.
      // Instead we would want to generate:
      // s32 = zext s16
      // <2 x s16> = bitcast s32
      // s32 = zext s16
      // <2 x s16> = bitcast s32
      //
      // That is not done yet.
      if (ConvertOp == 0)
        return true;
      return !DestTy.isVector() && OpTy.isVector() &&
             DestTy == OpTy.getElementType();
    case TargetOpcode::G_CONCAT_VECTORS: {
      if (ConvertOp == 0)
        return true;
      if (!DestTy.isVector())
        return false;

      const unsigned OpEltSize = OpTy.getElementType().getSizeInBits();

      // Don't handle scalarization with a cast that isn't in the same
      // direction as the vector cast. This could be handled, but it would
      // require more intermediate unmerges.
      if (ConvertOp == TargetOpcode::G_TRUNC)
        return DestTy.getSizeInBits() <= OpEltSize;
      return DestTy.getSizeInBits() >= OpEltSize;
    }
    }
  }

  /// Try to replace DstReg with SrcReg or build a COPY instruction
  /// depending on the register constraints.
  static void replaceRegOrBuildCopy(Register DstReg, Register SrcReg,
                                    MachineRegisterInfo &MRI,
                                    MachineIRBuilder &Builder,
                                    SmallVectorImpl<Register> &UpdatedDefs,
                                    GISelChangeObserver &Observer) {
    if (!llvm::canReplaceReg(DstReg, SrcReg, MRI)) {
      Builder.buildCopy(DstReg, SrcReg);
      UpdatedDefs.push_back(DstReg);
      return;
    }
    SmallVector<MachineInstr *, 4> UseMIs;
    // Get the users and notify the observer before replacing.
    for (auto &UseMI : MRI.use_instructions(DstReg)) {
      UseMIs.push_back(&UseMI);
      Observer.changingInstr(UseMI);
    }
    // Replace the registers.
    MRI.replaceRegWith(DstReg, SrcReg);
    UpdatedDefs.push_back(SrcReg);
    // Notify the observer that we changed the instructions.
    for (auto *UseMI : UseMIs)
      Observer.changedInstr(*UseMI);
  }

  /// Return the operand index in \p MI that defines \p Def
  static unsigned getDefIndex(const MachineInstr &MI, Register SearchDef) {
    unsigned DefIdx = 0;
    for (const MachineOperand &Def : MI.defs()) {
      if (Def.getReg() == SearchDef)
        break;
      ++DefIdx;
    }

    return DefIdx;
  }

  /// This class provides utilities for finding source registers of specific
  /// bit ranges in an artifact. The routines can look through the source
  /// registers if they're other artifacts to try to find a non-artifact source
  /// of a value.
  class ArtifactValueFinder {
    MachineRegisterInfo &MRI;
    MachineIRBuilder &MIB;
    const LegalizerInfo &LI;

    // Stores the best register found in the current query so far.
    Register CurrentBest = Register();

    /// Given an concat_vector op \p Concat and a start bit and size, try to
    /// find the origin of the value defined by that start position and size.
    ///
    /// \returns a register with the requested size, or the current best
    /// register found during the current query.
    Register findValueFromConcat(GConcatVectors &Concat, unsigned StartBit,
                                 unsigned Size) {
      assert(Size > 0);

      // Find the source operand that provides the bits requested.
      Register Src1Reg = Concat.getSourceReg(0);
      unsigned SrcSize = MRI.getType(Src1Reg).getSizeInBits();

      // Operand index of the source that provides the start of the bit range.
      unsigned StartSrcIdx = (StartBit / SrcSize) + 1;
      // Offset into the source at which the bit range starts.
      unsigned InRegOffset = StartBit % SrcSize;
      // Check that the bits don't span multiple sources.
      // FIXME: we might be able return multiple sources? Or create an
      // appropriate concat to make it fit.
      if (InRegOffset + Size > SrcSize)
        return CurrentBest;

      Register SrcReg = Concat.getReg(StartSrcIdx);
      if (InRegOffset == 0 && Size == SrcSize) {
        CurrentBest = SrcReg;
        return findValueFromDefImpl(SrcReg, 0, Size);
      }

      return findValueFromDefImpl(SrcReg, InRegOffset, Size);
    }

    /// Given an build_vector op \p BV and a start bit and size, try to find
    /// the origin of the value defined by that start position and size.
    ///
    /// \returns a register with the requested size, or the current best
    /// register found during the current query.
    Register findValueFromBuildVector(GBuildVector &BV, unsigned StartBit,
                                      unsigned Size) {
      assert(Size > 0);

      // Find the source operand that provides the bits requested.
      Register Src1Reg = BV.getSourceReg(0);
      unsigned SrcSize = MRI.getType(Src1Reg).getSizeInBits();

      // Operand index of the source that provides the start of the bit range.
      unsigned StartSrcIdx = (StartBit / SrcSize) + 1;
      // Offset into the source at which the bit range starts.
      unsigned InRegOffset = StartBit % SrcSize;

      if (InRegOffset != 0)
        return CurrentBest; // Give up, bits don't start at a scalar source.
      if (Size < SrcSize)
        return CurrentBest; // Scalar source is too large for requested bits.

      // If the bits cover multiple sources evenly, then create a new
      // build_vector to synthesize the required size, if that's been requested.
      if (Size > SrcSize) {
        if (Size % SrcSize > 0)
          return CurrentBest; // Isn't covered exactly by sources.

        unsigned NumSrcsUsed = Size / SrcSize;
        // If we're requesting all of the sources, just return this def.
        if (NumSrcsUsed == BV.getNumSources())
          return BV.getReg(0);

        LLT SrcTy = MRI.getType(Src1Reg);
        LLT NewBVTy = LLT::fixed_vector(NumSrcsUsed, SrcTy);

        // Check if the resulting build vector would be legal.
        LegalizeActionStep ActionStep =
            LI.getAction({TargetOpcode::G_BUILD_VECTOR, {NewBVTy, SrcTy}});
        if (ActionStep.Action != LegalizeActions::Legal)
          return CurrentBest;

        SmallVector<Register> NewSrcs;
        for (unsigned SrcIdx = StartSrcIdx; SrcIdx < StartSrcIdx + NumSrcsUsed;
             ++SrcIdx)
          NewSrcs.push_back(BV.getReg(SrcIdx));
        MIB.setInstrAndDebugLoc(BV);
        return MIB.buildBuildVector(NewBVTy, NewSrcs).getReg(0);
      }
      // A single source is requested, just return it.
      return BV.getReg(StartSrcIdx);
    }

    /// Given an G_INSERT op \p MI and a start bit and size, try to find
    /// the origin of the value defined by that start position and size.
    ///
    /// \returns a register with the requested size, or the current best
    /// register found during the current query.
    Register findValueFromInsert(MachineInstr &MI, unsigned StartBit,
                                 unsigned Size) {
      assert(MI.getOpcode() == TargetOpcode::G_INSERT);
      assert(Size > 0);

      Register ContainerSrcReg = MI.getOperand(1).getReg();
      Register InsertedReg = MI.getOperand(2).getReg();
      LLT InsertedRegTy = MRI.getType(InsertedReg);
      unsigned InsertOffset = MI.getOperand(3).getImm();

      // There are 4 possible container/insertreg + requested bit-range layouts
      // that the instruction and query could be representing.
      // For: %_ = G_INSERT %CONTAINER, %INS, InsOff (abbrev. to 'IO')
      // and a start bit 'SB', with size S, giving an end bit 'EB', we could
      // have...
      // Scenario A:
      //   --------------------------
      //  |  INS    |  CONTAINER     |
      //   --------------------------
      //       |   |
      //       SB  EB
      //
      // Scenario B:
      //   --------------------------
      //  |  INS    |  CONTAINER     |
      //   --------------------------
      //                |    |
      //                SB   EB
      //
      // Scenario C:
      //   --------------------------
      //  |  CONTAINER    |  INS     |
      //   --------------------------
      //       |    |
      //       SB   EB
      //
      // Scenario D:
      //   --------------------------
      //  |  CONTAINER    |  INS     |
      //   --------------------------
      //                     |   |
      //                     SB  EB
      //
      // So therefore, A and D are requesting data from the INS operand, while
      // B and C are requesting from the container operand.

      unsigned InsertedEndBit = InsertOffset + InsertedRegTy.getSizeInBits();
      unsigned EndBit = StartBit + Size;
      unsigned NewStartBit;
      Register SrcRegToUse;
      if (EndBit <= InsertOffset || InsertedEndBit <= StartBit) {
        SrcRegToUse = ContainerSrcReg;
        NewStartBit = StartBit;
        return findValueFromDefImpl(SrcRegToUse, NewStartBit, Size);
      }
      if (InsertOffset <= StartBit && EndBit <= InsertedEndBit) {
        SrcRegToUse = InsertedReg;
        NewStartBit = StartBit - InsertOffset;
        if (NewStartBit == 0 &&
            Size == MRI.getType(SrcRegToUse).getSizeInBits())
          CurrentBest = SrcRegToUse;
        return findValueFromDefImpl(SrcRegToUse, NewStartBit, Size);
      }
      // The bit range spans both the inserted and container regions.
      return Register();
    }

    /// Internal implementation for findValueFromDef(). findValueFromDef()
    /// initializes some data like the CurrentBest register, which this method
    /// and its callees rely upon.
    Register findValueFromDefImpl(Register DefReg, unsigned StartBit,
                                  unsigned Size) {
      std::optional<DefinitionAndSourceRegister> DefSrcReg =
          getDefSrcRegIgnoringCopies(DefReg, MRI);
      MachineInstr *Def = DefSrcReg->MI;
      DefReg = DefSrcReg->Reg;
      // If the instruction has a single def, then simply delegate the search.
      // For unmerge however with multiple defs, we need to compute the offset
      // into the source of the unmerge.
      switch (Def->getOpcode()) {
      case TargetOpcode::G_CONCAT_VECTORS:
        return findValueFromConcat(cast<GConcatVectors>(*Def), StartBit, Size);
      case TargetOpcode::G_UNMERGE_VALUES: {
        unsigned DefStartBit = 0;
        unsigned DefSize = MRI.getType(DefReg).getSizeInBits();
        for (const auto &MO : Def->defs()) {
          if (MO.getReg() == DefReg)
            break;
          DefStartBit += DefSize;
        }
        Register SrcReg = Def->getOperand(Def->getNumOperands() - 1).getReg();
        Register SrcOriginReg =
            findValueFromDefImpl(SrcReg, StartBit + DefStartBit, Size);
        if (SrcOriginReg)
          return SrcOriginReg;
        // Failed to find a further value. If the StartBit and Size perfectly
        // covered the requested DefReg, return that since it's better than
        // nothing.
        if (StartBit == 0 && Size == DefSize)
          return DefReg;
        return CurrentBest;
      }
      case TargetOpcode::G_BUILD_VECTOR:
        return findValueFromBuildVector(cast<GBuildVector>(*Def), StartBit,
                                        Size);
      case TargetOpcode::G_INSERT:
        return findValueFromInsert(*Def, StartBit, Size);
      default:
        return CurrentBest;
      }
    }

  public:
    ArtifactValueFinder(MachineRegisterInfo &Mri, MachineIRBuilder &Builder,
                        const LegalizerInfo &Info)
        : MRI(Mri), MIB(Builder), LI(Info) {}

    /// Try to find a source of the value defined in the def \p DefReg, starting
    /// at position \p StartBit with size \p Size.
    /// \returns a register with the requested size, or an empty Register if no
    /// better value could be found.
    Register findValueFromDef(Register DefReg, unsigned StartBit,
                              unsigned Size) {
      CurrentBest = Register();
      Register FoundReg = findValueFromDefImpl(DefReg, StartBit, Size);
      return FoundReg != DefReg ? FoundReg : Register();
    }

    /// Try to combine the defs of an unmerge \p MI by attempting to find
    /// values that provides the bits for each def reg.
    /// \returns true if all the defs of the unmerge have been made dead.
    bool tryCombineUnmergeDefs(GUnmerge &MI, GISelChangeObserver &Observer,
                               SmallVectorImpl<Register> &UpdatedDefs) {
      unsigned NumDefs = MI.getNumDefs();
      LLT DestTy = MRI.getType(MI.getReg(0));

      SmallBitVector DeadDefs(NumDefs);
      for (unsigned DefIdx = 0; DefIdx < NumDefs; ++DefIdx) {
        Register DefReg = MI.getReg(DefIdx);
        if (MRI.use_nodbg_empty(DefReg)) {
          DeadDefs[DefIdx] = true;
          continue;
        }
        Register FoundVal = findValueFromDef(DefReg, 0, DestTy.getSizeInBits());
        if (!FoundVal)
          continue;
        if (MRI.getType(FoundVal) != DestTy)
          continue;

        replaceRegOrBuildCopy(DefReg, FoundVal, MRI, MIB, UpdatedDefs,
                              Observer);
        // We only want to replace the uses, not the def of the old reg.
        Observer.changingInstr(MI);
        MI.getOperand(DefIdx).setReg(DefReg);
        Observer.changedInstr(MI);
        DeadDefs[DefIdx] = true;
      }
      return DeadDefs.all();
    }

    GUnmerge *findUnmergeThatDefinesReg(Register Reg, unsigned Size,
                                        unsigned &DefOperandIdx) {
      if (Register Def = findValueFromDefImpl(Reg, 0, Size)) {
        if (auto *Unmerge = dyn_cast<GUnmerge>(MRI.getVRegDef(Def))) {
          DefOperandIdx = Unmerge->findRegisterDefOperandIdx(Def);
          return Unmerge;
        }
      }
      return nullptr;
    }

    // Check if sequence of elements from merge-like instruction is defined by
    // another sequence of elements defined by unmerge. Most often this is the
    // same sequence. Search for elements using findValueFromDefImpl.
    bool isSequenceFromUnmerge(GMergeLikeInstr &MI, unsigned MergeStartIdx,
                               GUnmerge *Unmerge, unsigned UnmergeIdxStart,
                               unsigned NumElts, unsigned EltSize) {
      assert(MergeStartIdx + NumElts <= MI.getNumSources());
      for (unsigned i = MergeStartIdx; i < MergeStartIdx + NumElts; ++i) {
        unsigned EltUnmergeIdx;
        GUnmerge *EltUnmerge = findUnmergeThatDefinesReg(
            MI.getSourceReg(i), EltSize, EltUnmergeIdx);
        // Check if source i comes from the same Unmerge.
        if (!EltUnmerge || EltUnmerge != Unmerge)
          return false;
        // Check that source i's def has same index in sequence in Unmerge.
        if (i - MergeStartIdx != EltUnmergeIdx - UnmergeIdxStart)
          return false;
      }
      return true;
    }

    bool tryCombineMergeLike(GMergeLikeInstr &MI,
                             SmallVectorImpl<MachineInstr *> &DeadInsts,
                             SmallVectorImpl<Register> &UpdatedDefs,
                             GISelChangeObserver &Observer) {
      Register Elt0 = MI.getSourceReg(0);
      LLT EltTy = MRI.getType(Elt0);
      unsigned EltSize = EltTy.getSizeInBits();

      unsigned Elt0UnmergeIdx;
      // Search for unmerge that will be candidate for combine.
      auto *Unmerge = findUnmergeThatDefinesReg(Elt0, EltSize, Elt0UnmergeIdx);
      if (!Unmerge)
        return false;

      unsigned NumMIElts = MI.getNumSources();
      Register Dst = MI.getReg(0);
      LLT DstTy = MRI.getType(Dst);
      Register UnmergeSrc = Unmerge->getSourceReg();
      LLT UnmergeSrcTy = MRI.getType(UnmergeSrc);

      // Recognize copy of UnmergeSrc to Dst.
      // Unmerge UnmergeSrc and reassemble it using merge-like opcode into Dst.
      //
      // %0:_(EltTy), %1, ... = G_UNMERGE_VALUES %UnmergeSrc:_(Ty)
      // %Dst:_(Ty) = G_merge_like_opcode %0:_(EltTy), %1, ...
      //
      // %Dst:_(Ty) = COPY %UnmergeSrc:_(Ty)
      if ((DstTy == UnmergeSrcTy) && (Elt0UnmergeIdx == 0)) {
        if (!isSequenceFromUnmerge(MI, 0, Unmerge, 0, NumMIElts, EltSize))
          return false;
        replaceRegOrBuildCopy(Dst, UnmergeSrc, MRI, MIB, UpdatedDefs, Observer);
        DeadInsts.push_back(&MI);
        return true;
      }

      // Recognize UnmergeSrc that can be unmerged to DstTy directly.
      // Types have to be either both vector or both non-vector types.
      // Merge-like opcodes are combined one at the time. First one creates new
      // unmerge, following should use the same unmerge (builder performs CSE).
      //
      // %0:_(EltTy), %1, %2, %3 = G_UNMERGE_VALUES %UnmergeSrc:_(UnmergeSrcTy)
      // %Dst:_(DstTy) = G_merge_like_opcode %0:_(EltTy), %1
      // %AnotherDst:_(DstTy) = G_merge_like_opcode %2:_(EltTy), %3
      //
      // %Dst:_(DstTy), %AnotherDst = G_UNMERGE_VALUES %UnmergeSrc
      if ((DstTy.isVector() == UnmergeSrcTy.isVector()) &&
          (Elt0UnmergeIdx % NumMIElts == 0) &&
          getCoverTy(UnmergeSrcTy, DstTy) == UnmergeSrcTy) {
        if (!isSequenceFromUnmerge(MI, 0, Unmerge, Elt0UnmergeIdx, NumMIElts,
                                   EltSize))
          return false;
        MIB.setInstrAndDebugLoc(MI);
        auto NewUnmerge = MIB.buildUnmerge(DstTy, Unmerge->getSourceReg());
        unsigned DstIdx = (Elt0UnmergeIdx * EltSize) / DstTy.getSizeInBits();
        replaceRegOrBuildCopy(Dst, NewUnmerge.getReg(DstIdx), MRI, MIB,
                              UpdatedDefs, Observer);
        DeadInsts.push_back(&MI);
        return true;
      }

      // Recognize when multiple unmerged sources with UnmergeSrcTy type
      // can be merged into Dst with DstTy type directly.
      // Types have to be either both vector or both non-vector types.

      // %0:_(EltTy), %1 = G_UNMERGE_VALUES %UnmergeSrc:_(UnmergeSrcTy)
      // %2:_(EltTy), %3 = G_UNMERGE_VALUES %AnotherUnmergeSrc:_(UnmergeSrcTy)
      // %Dst:_(DstTy) = G_merge_like_opcode %0:_(EltTy), %1, %2, %3
      //
      // %Dst:_(DstTy) = G_merge_like_opcode %UnmergeSrc, %AnotherUnmergeSrc

      if ((DstTy.isVector() == UnmergeSrcTy.isVector()) &&
          getCoverTy(DstTy, UnmergeSrcTy) == DstTy) {
        SmallVector<Register, 4> ConcatSources;
        unsigned NumElts = Unmerge->getNumDefs();
        for (unsigned i = 0; i < MI.getNumSources(); i += NumElts) {
          unsigned EltUnmergeIdx;
          auto *UnmergeI = findUnmergeThatDefinesReg(MI.getSourceReg(i),
                                                     EltSize, EltUnmergeIdx);
          // All unmerges have to be the same size.
          if ((!UnmergeI) || (UnmergeI->getNumDefs() != NumElts) ||
              (EltUnmergeIdx != 0))
            return false;
          if (!isSequenceFromUnmerge(MI, i, UnmergeI, 0, NumElts, EltSize))
            return false;
          ConcatSources.push_back(UnmergeI->getSourceReg());
        }

        MIB.setInstrAndDebugLoc(MI);
        MIB.buildMergeLikeInstr(Dst, ConcatSources);
        DeadInsts.push_back(&MI);
        return true;
      }

      return false;
    }
  };

  bool tryCombineUnmergeValues(GUnmerge &MI,
                               SmallVectorImpl<MachineInstr *> &DeadInsts,
                               SmallVectorImpl<Register> &UpdatedDefs,
                               GISelChangeObserver &Observer) {
    unsigned NumDefs = MI.getNumDefs();
    Register SrcReg = MI.getSourceReg();
    MachineInstr *SrcDef = getDefIgnoringCopies(SrcReg, MRI);
    if (!SrcDef)
      return false;

    LLT OpTy = MRI.getType(SrcReg);
    LLT DestTy = MRI.getType(MI.getReg(0));
    unsigned SrcDefIdx = getDefIndex(*SrcDef, SrcReg);

    Builder.setInstrAndDebugLoc(MI);

    ArtifactValueFinder Finder(MRI, Builder, LI);
    if (Finder.tryCombineUnmergeDefs(MI, Observer, UpdatedDefs)) {
      markInstAndDefDead(MI, *SrcDef, DeadInsts, SrcDefIdx);
      return true;
    }

    if (auto *SrcUnmerge = dyn_cast<GUnmerge>(SrcDef)) {
      // %0:_(<4 x s16>) = G_FOO
      // %1:_(<2 x s16>), %2:_(<2 x s16>) = G_UNMERGE_VALUES %0
      // %3:_(s16), %4:_(s16) = G_UNMERGE_VALUES %1
      //
      // %3:_(s16), %4:_(s16), %5:_(s16), %6:_(s16) = G_UNMERGE_VALUES %0
      Register SrcUnmergeSrc = SrcUnmerge->getSourceReg();
      LLT SrcUnmergeSrcTy = MRI.getType(SrcUnmergeSrc);

      // If we need to decrease the number of vector elements in the result type
      // of an unmerge, this would involve the creation of an equivalent unmerge
      // to copy back to the original result registers.
      LegalizeActionStep ActionStep = LI.getAction(
          {TargetOpcode::G_UNMERGE_VALUES, {OpTy, SrcUnmergeSrcTy}});
      switch (ActionStep.Action) {
      case LegalizeActions::Lower:
      case LegalizeActions::Unsupported:
        break;
      case LegalizeActions::FewerElements:
      case LegalizeActions::NarrowScalar:
        if (ActionStep.TypeIdx == 1)
          return false;
        break;
      default:
        return false;
      }

      auto NewUnmerge = Builder.buildUnmerge(DestTy, SrcUnmergeSrc);

      // TODO: Should we try to process out the other defs now? If the other
      // defs of the source unmerge are also unmerged, we end up with a separate
      // unmerge for each one.
      for (unsigned I = 0; I != NumDefs; ++I) {
        Register Def = MI.getReg(I);
        replaceRegOrBuildCopy(Def, NewUnmerge.getReg(SrcDefIdx * NumDefs + I),
                              MRI, Builder, UpdatedDefs, Observer);
      }

      markInstAndDefDead(MI, *SrcUnmerge, DeadInsts, SrcDefIdx);
      return true;
    }

    MachineInstr *MergeI = SrcDef;
    unsigned ConvertOp = 0;

    // Handle intermediate conversions
    unsigned SrcOp = SrcDef->getOpcode();
    if (isArtifactCast(SrcOp)) {
      ConvertOp = SrcOp;
      MergeI = getDefIgnoringCopies(SrcDef->getOperand(1).getReg(), MRI);
    }

    if (!MergeI || !canFoldMergeOpcode(MergeI->getOpcode(),
                                       ConvertOp, OpTy, DestTy)) {
      // We might have a chance to combine later by trying to combine
      // unmerge(cast) first
      return tryFoldUnmergeCast(MI, *SrcDef, DeadInsts, UpdatedDefs);
    }

    const unsigned NumMergeRegs = MergeI->getNumOperands() - 1;

    if (NumMergeRegs < NumDefs) {
      if (NumDefs % NumMergeRegs != 0)
        return false;

      Builder.setInstr(MI);
      // Transform to UNMERGEs, for example
      //   %1 = G_MERGE_VALUES %4, %5
      //   %9, %10, %11, %12 = G_UNMERGE_VALUES %1
      // to
      //   %9, %10 = G_UNMERGE_VALUES %4
      //   %11, %12 = G_UNMERGE_VALUES %5

      const unsigned NewNumDefs = NumDefs / NumMergeRegs;
      for (unsigned Idx = 0; Idx < NumMergeRegs; ++Idx) {
        SmallVector<Register, 8> DstRegs;
        for (unsigned j = 0, DefIdx = Idx * NewNumDefs; j < NewNumDefs;
             ++j, ++DefIdx)
          DstRegs.push_back(MI.getReg(DefIdx));

        if (ConvertOp) {
          LLT MergeSrcTy = MRI.getType(MergeI->getOperand(1).getReg());

          // This is a vector that is being split and casted. Extract to the
          // element type, and do the conversion on the scalars (or smaller
          // vectors).
          LLT MergeEltTy = MergeSrcTy.divide(NewNumDefs);

          // Handle split to smaller vectors, with conversions.
          // %2(<8 x s8>) = G_CONCAT_VECTORS %0(<4 x s8>), %1(<4 x s8>)
          // %3(<8 x s16>) = G_SEXT %2
          // %4(<2 x s16>), %5(<2 x s16>), %6(<2 x s16>), %7(<2 x s16>) = G_UNMERGE_VALUES %3
          //
          // =>
          //
          // %8(<2 x s8>), %9(<2 x s8>) = G_UNMERGE_VALUES %0
          // %10(<2 x s8>), %11(<2 x s8>) = G_UNMERGE_VALUES %1
          // %4(<2 x s16>) = G_SEXT %8
          // %5(<2 x s16>) = G_SEXT %9
          // %6(<2 x s16>) = G_SEXT %10
          // %7(<2 x s16>)= G_SEXT %11

          SmallVector<Register, 4> TmpRegs(NewNumDefs);
          for (unsigned k = 0; k < NewNumDefs; ++k)
            TmpRegs[k] = MRI.createGenericVirtualRegister(MergeEltTy);

          Builder.buildUnmerge(TmpRegs, MergeI->getOperand(Idx + 1).getReg());

          for (unsigned k = 0; k < NewNumDefs; ++k)
            Builder.buildInstr(ConvertOp, {DstRegs[k]}, {TmpRegs[k]});
        } else {
          Builder.buildUnmerge(DstRegs, MergeI->getOperand(Idx + 1).getReg());
        }
        UpdatedDefs.append(DstRegs.begin(), DstRegs.end());
      }

    } else if (NumMergeRegs > NumDefs) {
      if (ConvertOp != 0 || NumMergeRegs % NumDefs != 0)
        return false;

      Builder.setInstr(MI);
      // Transform to MERGEs
      //   %6 = G_MERGE_VALUES %17, %18, %19, %20
      //   %7, %8 = G_UNMERGE_VALUES %6
      // to
      //   %7 = G_MERGE_VALUES %17, %18
      //   %8 = G_MERGE_VALUES %19, %20

      const unsigned NumRegs = NumMergeRegs / NumDefs;
      for (unsigned DefIdx = 0; DefIdx < NumDefs; ++DefIdx) {
        SmallVector<Register, 8> Regs;
        for (unsigned j = 0, Idx = NumRegs * DefIdx + 1; j < NumRegs;
             ++j, ++Idx)
          Regs.push_back(MergeI->getOperand(Idx).getReg());

        Register DefReg = MI.getReg(DefIdx);
        Builder.buildMergeLikeInstr(DefReg, Regs);
        UpdatedDefs.push_back(DefReg);
      }

    } else {
      LLT MergeSrcTy = MRI.getType(MergeI->getOperand(1).getReg());

      if (!ConvertOp && DestTy != MergeSrcTy)
        ConvertOp = TargetOpcode::G_BITCAST;

      if (ConvertOp) {
        Builder.setInstr(MI);

        for (unsigned Idx = 0; Idx < NumDefs; ++Idx) {
          Register DefReg = MI.getOperand(Idx).getReg();
          Register MergeSrc = MergeI->getOperand(Idx + 1).getReg();

          if (!MRI.use_empty(DefReg)) {
            Builder.buildInstr(ConvertOp, {DefReg}, {MergeSrc});
            UpdatedDefs.push_back(DefReg);
          }
        }

        markInstAndDefDead(MI, *MergeI, DeadInsts);
        return true;
      }

      assert(DestTy == MergeSrcTy &&
             "Bitcast and the other kinds of conversions should "
             "have happened earlier");

      Builder.setInstr(MI);
      for (unsigned Idx = 0; Idx < NumDefs; ++Idx) {
        Register DstReg = MI.getOperand(Idx).getReg();
        Register SrcReg = MergeI->getOperand(Idx + 1).getReg();
        replaceRegOrBuildCopy(DstReg, SrcReg, MRI, Builder, UpdatedDefs,
                              Observer);
      }
    }

    markInstAndDefDead(MI, *MergeI, DeadInsts);
    return true;
  }

  bool tryCombineExtract(MachineInstr &MI,
                         SmallVectorImpl<MachineInstr *> &DeadInsts,
                         SmallVectorImpl<Register> &UpdatedDefs) {
    assert(MI.getOpcode() == TargetOpcode::G_EXTRACT);

    // Try to use the source registers from a G_MERGE_VALUES
    //
    // %2 = G_MERGE_VALUES %0, %1
    // %3 = G_EXTRACT %2, N
    // =>
    //
    // for N < %2.getSizeInBits() / 2
    //     %3 = G_EXTRACT %0, N
    //
    // for N >= %2.getSizeInBits() / 2
    //    %3 = G_EXTRACT %1, (N - %0.getSizeInBits()

    Register SrcReg = lookThroughCopyInstrs(MI.getOperand(1).getReg());
    MachineInstr *MergeI = MRI.getVRegDef(SrcReg);
    if (!MergeI || !isa<GMergeLikeInstr>(MergeI))
      return false;

    Register DstReg = MI.getOperand(0).getReg();
    LLT DstTy = MRI.getType(DstReg);
    LLT SrcTy = MRI.getType(SrcReg);

    // TODO: Do we need to check if the resulting extract is supported?
    unsigned ExtractDstSize = DstTy.getSizeInBits();
    unsigned Offset = MI.getOperand(2).getImm();
    unsigned NumMergeSrcs = MergeI->getNumOperands() - 1;
    unsigned MergeSrcSize = SrcTy.getSizeInBits() / NumMergeSrcs;
    unsigned MergeSrcIdx = Offset / MergeSrcSize;

    // Compute the offset of the last bit the extract needs.
    unsigned EndMergeSrcIdx = (Offset + ExtractDstSize - 1) / MergeSrcSize;

    // Can't handle the case where the extract spans multiple inputs.
    if (MergeSrcIdx != EndMergeSrcIdx)
      return false;

    // TODO: We could modify MI in place in most cases.
    Builder.setInstr(MI);
    Builder.buildExtract(DstReg, MergeI->getOperand(MergeSrcIdx + 1).getReg(),
                         Offset - MergeSrcIdx * MergeSrcSize);
    UpdatedDefs.push_back(DstReg);
    markInstAndDefDead(MI, *MergeI, DeadInsts);
    return true;
  }

  /// Try to combine away MI.
  /// Returns true if it combined away the MI.
  /// Adds instructions that are dead as a result of the combine
  /// into DeadInsts, which can include MI.
  bool tryCombineInstruction(MachineInstr &MI,
                             SmallVectorImpl<MachineInstr *> &DeadInsts,
                             GISelObserverWrapper &WrapperObserver) {
    ArtifactValueFinder Finder(MRI, Builder, LI);

    // This might be a recursive call, and we might have DeadInsts already
    // populated. To avoid bad things happening later with multiple vreg defs
    // etc, process the dead instructions now if any.
    if (!DeadInsts.empty())
      deleteMarkedDeadInsts(DeadInsts, WrapperObserver);

    // Put here every vreg that was redefined in such a way that it's at least
    // possible that one (or more) of its users (immediate or COPY-separated)
    // could become artifact combinable with the new definition (or the
    // instruction reachable from it through a chain of copies if any).
    SmallVector<Register, 4> UpdatedDefs;
    bool Changed = false;
    switch (MI.getOpcode()) {
    default:
      return false;
    case TargetOpcode::G_ANYEXT:
      Changed = tryCombineAnyExt(MI, DeadInsts, UpdatedDefs, WrapperObserver);
      break;
    case TargetOpcode::G_ZEXT:
      Changed = tryCombineZExt(MI, DeadInsts, UpdatedDefs, WrapperObserver);
      break;
    case TargetOpcode::G_SEXT:
      Changed = tryCombineSExt(MI, DeadInsts, UpdatedDefs);
      break;
    case TargetOpcode::G_UNMERGE_VALUES:
      Changed = tryCombineUnmergeValues(cast<GUnmerge>(MI), DeadInsts,
                                        UpdatedDefs, WrapperObserver);
      break;
    case TargetOpcode::G_MERGE_VALUES:
    case TargetOpcode::G_BUILD_VECTOR:
    case TargetOpcode::G_CONCAT_VECTORS:
      // If any of the users of this merge are an unmerge, then add them to the
      // artifact worklist in case there's folding that can be done looking up.
      for (MachineInstr &U : MRI.use_instructions(MI.getOperand(0).getReg())) {
        if (U.getOpcode() == TargetOpcode::G_UNMERGE_VALUES ||
            U.getOpcode() == TargetOpcode::G_TRUNC) {
          UpdatedDefs.push_back(MI.getOperand(0).getReg());
          break;
        }
      }
      Changed = Finder.tryCombineMergeLike(cast<GMergeLikeInstr>(MI), DeadInsts,
                                           UpdatedDefs, WrapperObserver);
      break;
    case TargetOpcode::G_EXTRACT:
      Changed = tryCombineExtract(MI, DeadInsts, UpdatedDefs);
      break;
    case TargetOpcode::G_TRUNC:
      Changed = tryCombineTrunc(MI, DeadInsts, UpdatedDefs, WrapperObserver);
      if (!Changed) {
        // Try to combine truncates away even if they are legal. As all artifact
        // combines at the moment look only "up" the def-use chains, we achieve
        // that by throwing truncates' users (with look through copies) into the
        // ArtifactList again.
        UpdatedDefs.push_back(MI.getOperand(0).getReg());
      }
      break;
    }
    // If the main loop through the ArtifactList found at least one combinable
    // pair of artifacts, not only combine it away (as done above), but also
    // follow the def-use chain from there to combine everything that can be
    // combined within this def-use chain of artifacts.
    while (!UpdatedDefs.empty()) {
      Register NewDef = UpdatedDefs.pop_back_val();
      assert(NewDef.isVirtual() && "Unexpected redefinition of a physreg");
      for (MachineInstr &Use : MRI.use_instructions(NewDef)) {
        switch (Use.getOpcode()) {
        // Keep this list in sync with the list of all artifact combines.
        case TargetOpcode::G_ANYEXT:
        case TargetOpcode::G_ZEXT:
        case TargetOpcode::G_SEXT:
        case TargetOpcode::G_UNMERGE_VALUES:
        case TargetOpcode::G_EXTRACT:
        case TargetOpcode::G_TRUNC:
        case TargetOpcode::G_BUILD_VECTOR:
          // Adding Use to ArtifactList.
          WrapperObserver.changedInstr(Use);
          break;
        case TargetOpcode::COPY: {
          Register Copy = Use.getOperand(0).getReg();
          if (Copy.isVirtual())
            UpdatedDefs.push_back(Copy);
          break;
        }
        default:
          // If we do not have an artifact combine for the opcode, there is no
          // point in adding it to the ArtifactList as nothing interesting will
          // be done to it anyway.
          break;
        }
      }
    }
    return Changed;
  }

private:
  static Register getArtifactSrcReg(const MachineInstr &MI) {
    switch (MI.getOpcode()) {
    case TargetOpcode::COPY:
    case TargetOpcode::G_TRUNC:
    case TargetOpcode::G_ZEXT:
    case TargetOpcode::G_ANYEXT:
    case TargetOpcode::G_SEXT:
    case TargetOpcode::G_EXTRACT:
      return MI.getOperand(1).getReg();
    case TargetOpcode::G_UNMERGE_VALUES:
      return MI.getOperand(MI.getNumOperands() - 1).getReg();
    default:
      llvm_unreachable("Not a legalization artifact happen");
    }
  }

  /// Mark a def of one of MI's original operands, DefMI, as dead if changing MI
  /// (either by killing it or changing operands) results in DefMI being dead
  /// too. In-between COPYs or artifact-casts are also collected if they are
  /// dead.
  /// MI is not marked dead.
  void markDefDead(MachineInstr &MI, MachineInstr &DefMI,
                   SmallVectorImpl<MachineInstr *> &DeadInsts,
                   unsigned DefIdx = 0) {
    // Collect all the copy instructions that are made dead, due to deleting
    // this instruction. Collect all of them until the Trunc(DefMI).
    // Eg,
    // %1(s1) = G_TRUNC %0(s32)
    // %2(s1) = COPY %1(s1)
    // %3(s1) = COPY %2(s1)
    // %4(s32) = G_ANYEXT %3(s1)
    // In this case, we would have replaced %4 with a copy of %0,
    // and as a result, %3, %2, %1 are dead.
    MachineInstr *PrevMI = &MI;
    while (PrevMI != &DefMI) {
      Register PrevRegSrc = getArtifactSrcReg(*PrevMI);

      MachineInstr *TmpDef = MRI.getVRegDef(PrevRegSrc);
      if (MRI.hasOneUse(PrevRegSrc)) {
        if (TmpDef != &DefMI) {
          assert((TmpDef->getOpcode() == TargetOpcode::COPY ||
                  isArtifactCast(TmpDef->getOpcode())) &&
                 "Expecting copy or artifact cast here");

          DeadInsts.push_back(TmpDef);
        }
      } else
        break;
      PrevMI = TmpDef;
    }

    if (PrevMI == &DefMI) {
      unsigned I = 0;
      bool IsDead = true;
      for (MachineOperand &Def : DefMI.defs()) {
        if (I != DefIdx) {
          if (!MRI.use_empty(Def.getReg())) {
            IsDead = false;
            break;
          }
        } else {
          if (!MRI.hasOneUse(DefMI.getOperand(DefIdx).getReg()))
            break;
        }

        ++I;
      }

      if (IsDead)
        DeadInsts.push_back(&DefMI);
    }
  }

  /// Mark MI as dead. If a def of one of MI's operands, DefMI, would also be
  /// dead due to MI being killed, then mark DefMI as dead too.
  /// Some of the combines (extends(trunc)), try to walk through redundant
  /// copies in between the extends and the truncs, and this attempts to collect
  /// the in between copies if they're dead.
  void markInstAndDefDead(MachineInstr &MI, MachineInstr &DefMI,
                          SmallVectorImpl<MachineInstr *> &DeadInsts,
                          unsigned DefIdx = 0) {
    DeadInsts.push_back(&MI);
    markDefDead(MI, DefMI, DeadInsts, DefIdx);
  }

  /// Erase the dead instructions in the list and call the observer hooks.
  /// Normally the Legalizer will deal with erasing instructions that have been
  /// marked dead. However, for the trunc(ext(x)) cases we can end up trying to
  /// process instructions which have been marked dead, but otherwise break the
  /// MIR by introducing multiple vreg defs. For those cases, allow the combines
  /// to explicitly delete the instructions before we run into trouble.
  void deleteMarkedDeadInsts(SmallVectorImpl<MachineInstr *> &DeadInsts,
                             GISelObserverWrapper &WrapperObserver) {
    for (auto *DeadMI : DeadInsts) {
      LLVM_DEBUG(dbgs() << *DeadMI << "Is dead, eagerly deleting\n");
      WrapperObserver.erasingInstr(*DeadMI);
      DeadMI->eraseFromParent();
    }
    DeadInsts.clear();
  }

  /// Checks if the target legalizer info has specified anything about the
  /// instruction, or if unsupported.
  bool isInstUnsupported(const LegalityQuery &Query) const {
    using namespace LegalizeActions;
    auto Step = LI.getAction(Query);
    return Step.Action == Unsupported || Step.Action == NotFound;
  }

  bool isInstLegal(const LegalityQuery &Query) const {
    return LI.getAction(Query).Action == LegalizeActions::Legal;
  }

  bool isConstantUnsupported(LLT Ty) const {
    if (!Ty.isVector())
      return isInstUnsupported({TargetOpcode::G_CONSTANT, {Ty}});

    LLT EltTy = Ty.getElementType();
    return isInstUnsupported({TargetOpcode::G_CONSTANT, {EltTy}}) ||
           isInstUnsupported({TargetOpcode::G_BUILD_VECTOR, {Ty, EltTy}});
  }

  /// Looks through copy instructions and returns the actual
  /// source register.
  Register lookThroughCopyInstrs(Register Reg) {
    using namespace llvm::MIPatternMatch;

    Register TmpReg;
    while (mi_match(Reg, MRI, m_Copy(m_Reg(TmpReg)))) {
      if (MRI.getType(TmpReg).isValid())
        Reg = TmpReg;
      else
        break;
    }
    return Reg;
  }
};

} // namespace llvm

#endif // LLVM_CODEGEN_GLOBALISEL_LEGALIZATIONARTIFACTCOMBINER_H
PKiwFZ�����CodeGen/GlobalISel/Combiner.hnu�[���//== ----- llvm/CodeGen/GlobalISel/Combiner.h -------------------*- C++ -*-== //
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// This contains common code to drive combines. Combiner Passes will need to
/// setup a CombinerInfo and call combineMachineFunction.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_GLOBALISEL_COMBINER_H
#define LLVM_CODEGEN_GLOBALISEL_COMBINER_H

#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"

namespace llvm {
class MachineRegisterInfo;
class CombinerInfo;
class GISelCSEInfo;
class TargetPassConfig;
class MachineFunction;

class Combiner {
public:
  Combiner(CombinerInfo &CombinerInfo, const TargetPassConfig *TPC);

  /// If CSEInfo is not null, then the Combiner will setup observer for
  /// CSEInfo and instantiate a CSEMIRBuilder. Pass nullptr if CSE is not
  /// needed.
  bool combineMachineInstrs(MachineFunction &MF, GISelCSEInfo *CSEInfo);

protected:
  CombinerInfo &CInfo;

  MachineRegisterInfo *MRI = nullptr;
  const TargetPassConfig *TPC;
  std::unique_ptr<MachineIRBuilder> Builder;
};

} // End namespace llvm.

#endif // LLVM_CODEGEN_GLOBALISEL_COMBINER_H
PKiwFZEXzZkk#CodeGen/GlobalISel/GISelKnownBits.hnu�[���//===- llvm/CodeGen/GlobalISel/GISelKnownBits.h ---------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// Provides analysis for querying information about KnownBits during GISel
/// passes.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_GLOBALISEL_GISELKNOWNBITS_H
#define LLVM_CODEGEN_GLOBALISEL_GISELKNOWNBITS_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/Register.h"
#include "llvm/InitializePasses.h"
#include "llvm/Support/KnownBits.h"

namespace llvm {

class TargetLowering;
class DataLayout;

class GISelKnownBits : public GISelChangeObserver {
  MachineFunction &MF;
  MachineRegisterInfo &MRI;
  const TargetLowering &TL;
  const DataLayout &DL;
  unsigned MaxDepth;
  /// Cache maintained during a computeKnownBits request.
  SmallDenseMap<Register, KnownBits, 16> ComputeKnownBitsCache;

  void computeKnownBitsMin(Register Src0, Register Src1, KnownBits &Known,
                           const APInt &DemandedElts,
                           unsigned Depth = 0);

  unsigned computeNumSignBitsMin(Register Src0, Register Src1,
                                 const APInt &DemandedElts, unsigned Depth = 0);

public:
  GISelKnownBits(MachineFunction &MF, unsigned MaxDepth = 6);
  virtual ~GISelKnownBits() = default;

  const MachineFunction &getMachineFunction() const {
    return MF;
  }

  const DataLayout &getDataLayout() const {
    return DL;
  }

  virtual void computeKnownBitsImpl(Register R, KnownBits &Known,
                                    const APInt &DemandedElts,
                                    unsigned Depth = 0);

  unsigned computeNumSignBits(Register R, const APInt &DemandedElts,
                              unsigned Depth = 0);
  unsigned computeNumSignBits(Register R, unsigned Depth = 0);

  // KnownBitsAPI
  KnownBits getKnownBits(Register R);
  KnownBits getKnownBits(Register R, const APInt &DemandedElts,
                         unsigned Depth = 0);

  // Calls getKnownBits for first operand def of MI.
  KnownBits getKnownBits(MachineInstr &MI);
  APInt getKnownZeroes(Register R);
  APInt getKnownOnes(Register R);

  /// \return true if 'V & Mask' is known to be zero in DemandedElts. We use
  /// this predicate to simplify operations downstream.
  /// Mask is known to be zero for bits that V cannot have.
  bool maskedValueIsZero(Register Val, const APInt &Mask) {
    return Mask.isSubsetOf(getKnownBits(Val).Zero);
  }

  /// \return true if the sign bit of Op is known to be zero.  We use this
  /// predicate to simplify operations downstream.
  bool signBitIsZero(Register Op);

  static void computeKnownBitsForAlignment(KnownBits &Known,
                                           Align Alignment) {
    // The low bits are known zero if the pointer is aligned.
    Known.Zero.setLowBits(Log2(Alignment));
  }

  /// \return The known alignment for the pointer-like value \p R.
  Align computeKnownAlignment(Register R, unsigned Depth = 0);

  // Observer API. No-op for non-caching implementation.
  void erasingInstr(MachineInstr &MI) override {}
  void createdInstr(MachineInstr &MI) override {}
  void changingInstr(MachineInstr &MI) override {}
  void changedInstr(MachineInstr &MI) override {}

protected:
  unsigned getMaxDepth() const { return MaxDepth; }
};

/// To use KnownBitsInfo analysis in a pass,
/// KnownBitsInfo &Info = getAnalysis<GISelKnownBitsInfoAnalysis>().get(MF);
/// Add to observer if the Info is caching.
/// WrapperObserver.addObserver(Info);

/// Eventually add other features such as caching/ser/deserializing
/// to MIR etc. Those implementations can derive from GISelKnownBits
/// and override computeKnownBitsImpl.
class GISelKnownBitsAnalysis : public MachineFunctionPass {
  std::unique_ptr<GISelKnownBits> Info;

public:
  static char ID;
  GISelKnownBitsAnalysis() : MachineFunctionPass(ID) {
    initializeGISelKnownBitsAnalysisPass(*PassRegistry::getPassRegistry());
  }
  GISelKnownBits &get(MachineFunction &MF) {
    if (!Info)
      Info = std::make_unique<GISelKnownBits>(MF);
    return *Info.get();
  }
  void getAnalysisUsage(AnalysisUsage &AU) const override;
  bool runOnMachineFunction(MachineFunction &MF) override;
  void releaseMemory() override { Info.reset(); }
};
} // namespace llvm

#endif // LLVM_CODEGEN_GLOBALISEL_GISELKNOWNBITS_H
PKiwFZ�p/
��"CodeGen/GlobalISel/GISelWorkList.hnu�[���//===- GISelWorkList.h - Worklist for GISel passes ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_GLOBALISEL_GISELWORKLIST_H
#define LLVM_CODEGEN_GLOBALISEL_GISELWORKLIST_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"

namespace llvm {

class MachineInstr;

// Worklist which mostly works similar to InstCombineWorkList, but on
// MachineInstrs. The main difference with something like a SetVector is that
// erasing an element doesn't move all elements over one place - instead just
// nulls out the element of the vector.
//
// FIXME: Does it make sense to factor out common code with the
// instcombinerWorkList?
template<unsigned N>
class GISelWorkList {
  SmallVector<MachineInstr *, N> Worklist;
  DenseMap<MachineInstr *, unsigned> WorklistMap;

#if LLVM_ENABLE_ABI_BREAKING_CHECKS
  bool Finalized = true;
#endif

public:
  GISelWorkList() : WorklistMap(N) {}

  bool empty() const { return WorklistMap.empty(); }

  unsigned size() const { return WorklistMap.size(); }

  // Since we don't know ahead of time how many instructions we're going to add
  // to the worklist, and migrating densemap's elements is quite expensive
  // everytime we resize, only insert to the smallvector (typically during the
  // initial phase of populating lists). Before the worklist can be used,
  // finalize should be called. Also assert with NDEBUG if list is ever used
  // without finalizing. Note that unlike insert, we won't check for duplicates
  // - so the ideal place to use this is during the initial prepopulating phase
  // of most passes.
  void deferred_insert(MachineInstr *I) {
    Worklist.push_back(I);
#if LLVM_ENABLE_ABI_BREAKING_CHECKS
    Finalized = false;
#endif
  }

  // This should only be called when using deferred_insert.
  // This asserts that the WorklistMap is empty, and then
  // inserts all the elements in the Worklist into the map.
  // It also asserts if there are any duplicate elements found.
  void finalize() {
    assert(WorklistMap.empty() && "Expecting empty worklistmap");
    if (Worklist.size() > N)
      WorklistMap.reserve(Worklist.size());
    for (unsigned i = 0; i < Worklist.size(); ++i)
      if (!WorklistMap.try_emplace(Worklist[i], i).second)
        llvm_unreachable("Duplicate elements in the list");
#if LLVM_ENABLE_ABI_BREAKING_CHECKS
    Finalized = true;
#endif
  }

  /// Add the specified instruction to the worklist if it isn't already in it.
  void insert(MachineInstr *I) {
#if LLVM_ENABLE_ABI_BREAKING_CHECKS
    assert(Finalized && "GISelWorkList used without finalizing");
#endif
    if (WorklistMap.try_emplace(I, Worklist.size()).second)
      Worklist.push_back(I);
  }

  /// Remove I from the worklist if it exists.
  void remove(const MachineInstr *I) {
#if LLVM_ENABLE_ABI_BREAKING_CHECKS
    assert((Finalized || WorklistMap.empty()) && "Neither finalized nor empty");
#endif
    auto It = WorklistMap.find(I);
    if (It == WorklistMap.end())
      return; // Not in worklist.

    // Don't bother moving everything down, just null out the slot.
    Worklist[It->second] = nullptr;

    WorklistMap.erase(It);
  }

  void clear() {
    Worklist.clear();
    WorklistMap.clear();
  }

  MachineInstr *pop_back_val() {
#if LLVM_ENABLE_ABI_BREAKING_CHECKS
    assert(Finalized && "GISelWorkList used without finalizing");
#endif
    MachineInstr *I;
    do {
      I = Worklist.pop_back_val();
    } while(!I);
    assert(I && "Pop back on empty worklist");
    WorklistMap.erase(I);
    return I;
  }
};

} // end namespace llvm.

#endif
PKiwFZnN���&CodeGen/GlobalISel/InstructionSelect.hnu�[���//== llvm/CodeGen/GlobalISel/InstructionSelect.h -----------------*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file This file describes the interface of the MachineFunctionPass
/// responsible for selecting (possibly generic) machine instructions to
/// target-specific instructions.
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECT_H
#define LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECT_H

#include "llvm/ADT/StringRef.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/Support/CodeGen.h"

namespace llvm {

class BlockFrequencyInfo;
class ProfileSummaryInfo;

/// This pass is responsible for selecting generic machine instructions to
/// target-specific instructions.  It relies on the InstructionSelector provided
/// by the target.
/// Selection is done by examining blocks in post-order, and instructions in
/// reverse order.
///
/// \post for all inst in MF: not isPreISelGenericOpcode(inst.opcode)
class InstructionSelect : public MachineFunctionPass {
public:
  static char ID;
  StringRef getPassName() const override { return "InstructionSelect"; }

  void getAnalysisUsage(AnalysisUsage &AU) const override;

  MachineFunctionProperties getRequiredProperties() const override {
    return MachineFunctionProperties()
        .set(MachineFunctionProperties::Property::IsSSA)
        .set(MachineFunctionProperties::Property::Legalized)
        .set(MachineFunctionProperties::Property::RegBankSelected);
  }

  MachineFunctionProperties getSetProperties() const override {
    return MachineFunctionProperties().set(
        MachineFunctionProperties::Property::Selected);
  }

  InstructionSelect(CodeGenOpt::Level OL);
  InstructionSelect();

  bool runOnMachineFunction(MachineFunction &MF) override;

protected:
  BlockFrequencyInfo *BFI = nullptr;
  ProfileSummaryInfo *PSI = nullptr;

  CodeGenOpt::Level OptLevel = CodeGenOpt::None;
};
} // End namespace llvm.

#endif
PKiwFZx�`"CodeGen/MachineModuleSlotTracker.hnu�[���//===-- llvm/CodeGen/MachineModuleInfo.h ------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_MACHINEMODULESLOTTRACKER_H
#define LLVM_CODEGEN_MACHINEMODULESLOTTRACKER_H

#include "llvm/IR/ModuleSlotTracker.h"

namespace llvm {

class AbstractSlotTrackerStorage;
class Function;
class MachineModuleInfo;
class MachineFunction;
class Module;

class MachineModuleSlotTracker : public ModuleSlotTracker {
  const Function &TheFunction;
  const MachineModuleInfo &TheMMI;
  unsigned MDNStartSlot = 0, MDNEndSlot = 0;

  void processMachineFunctionMetadata(AbstractSlotTrackerStorage *AST,
                                      const MachineFunction &MF);
  void processMachineModule(AbstractSlotTrackerStorage *AST, const Module *M,
                            bool ShouldInitializeAllMetadata);
  void processMachineFunction(AbstractSlotTrackerStorage *AST,
                              const Function *F,
                              bool ShouldInitializeAllMetadata);

public:
  MachineModuleSlotTracker(const MachineFunction *MF,
                           bool ShouldInitializeAllMetadata = true);
  ~MachineModuleSlotTracker();

  void collectMachineMDNodes(MachineMDNodeListType &L) const;
};

} // namespace llvm

#endif // LLVM_CODEGEN_MACHINEMODULESLOTTRACKER_H
PKiwFZT��8!8!CodeGen/DFAPacketizer.hnu�[���//===- llvm/CodeGen/DFAPacketizer.h - DFA Packetizer for VLIW ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// This class implements a deterministic finite automaton (DFA) based
// packetizing mechanism for VLIW architectures. It provides APIs to
// determine whether there exists a legal mapping of instructions to
// functional unit assignments in a packet. The DFA is auto-generated from
// the target's Schedule.td file.
//
// A DFA consists of 3 major elements: states, inputs, and transitions. For
// the packetizing mechanism, the input is the set of instruction classes for
// a target. The state models all possible combinations of functional unit
// consumption for a given set of instructions in a packet. A transition
// models the addition of an instruction to a packet. In the DFA constructed
// by this class, if an instruction can be added to a packet, then a valid
// transition exists from the corresponding state. Invalid transitions
// indicate that the instruction cannot be added to the current packet.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_DFAPACKETIZER_H
#define LLVM_CODEGEN_DFAPACKETIZER_H

#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/ScheduleDAGInstrs.h"
#include "llvm/CodeGen/ScheduleDAGMutation.h"
#include "llvm/Support/Automaton.h"
#include <cstdint>
#include <map>
#include <memory>
#include <utility>
#include <vector>

namespace llvm {

class ScheduleDAGMutation;
class InstrItineraryData;
class MachineFunction;
class MachineInstr;
class MachineLoopInfo;
class MCInstrDesc;
class SUnit;
class TargetInstrInfo;

// This class extends ScheduleDAGInstrs and overrides the schedule method
// to build the dependence graph.
class DefaultVLIWScheduler : public ScheduleDAGInstrs {
private:
  AAResults *AA;
  /// Ordered list of DAG postprocessing steps.
  std::vector<std::unique_ptr<ScheduleDAGMutation>> Mutations;

public:
  DefaultVLIWScheduler(MachineFunction &MF, MachineLoopInfo &MLI,
                       AAResults *AA);

  // Actual scheduling work.
  void schedule() override;

  /// DefaultVLIWScheduler takes ownership of the Mutation object.
  void addMutation(std::unique_ptr<ScheduleDAGMutation> Mutation) {
    Mutations.push_back(std::move(Mutation));
  }

protected:
  void postProcessDAG();
};

class DFAPacketizer {
private:
  const InstrItineraryData *InstrItins;
  Automaton<uint64_t> A;
  /// For every itinerary, an "action" to apply to the automaton. This removes
  /// the redundancy in actions between itinerary classes.
  ArrayRef<unsigned> ItinActions;

public:
  DFAPacketizer(const InstrItineraryData *InstrItins, Automaton<uint64_t> a,
                ArrayRef<unsigned> ItinActions)
      : InstrItins(InstrItins), A(std::move(a)), ItinActions(ItinActions) {
    // Start off with resource tracking disabled.
    A.enableTranscription(false);
  }

  // Reset the current state to make all resources available.
  void clearResources() {
    A.reset();
  }

  // Set whether this packetizer should track not just whether instructions
  // can be packetized, but also which functional units each instruction ends up
  // using after packetization.
  void setTrackResources(bool Track) {
    A.enableTranscription(Track);
  }

  // Check if the resources occupied by a MCInstrDesc are available in
  // the current state.
  bool canReserveResources(const MCInstrDesc *MID);

  // Reserve the resources occupied by a MCInstrDesc and change the current
  // state to reflect that change.
  void reserveResources(const MCInstrDesc *MID);

  // Check if the resources occupied by a machine instruction are available
  // in the current state.
  bool canReserveResources(MachineInstr &MI);

  // Reserve the resources occupied by a machine instruction and change the
  // current state to reflect that change.
  void reserveResources(MachineInstr &MI);

  // Return the resources used by the InstIdx'th instruction added to this
  // packet. The resources are returned as a bitvector of functional units.
  //
  // Note that a bundle may be packed in multiple valid ways. This function
  // returns one arbitary valid packing.
  //
  // Requires setTrackResources(true) to have been called.
  unsigned getUsedResources(unsigned InstIdx);

  const InstrItineraryData *getInstrItins() const { return InstrItins; }
};

// VLIWPacketizerList implements a simple VLIW packetizer using DFA. The
// packetizer works on machine basic blocks. For each instruction I in BB,
// the packetizer consults the DFA to see if machine resources are available
// to execute I. If so, the packetizer checks if I depends on any instruction
// in the current packet. If no dependency is found, I is added to current
// packet and the machine resource is marked as taken. If any dependency is
// found, a target API call is made to prune the dependence.
class VLIWPacketizerList {
protected:
  MachineFunction &MF;
  const TargetInstrInfo *TII;
  AAResults *AA;

  // The VLIW Scheduler.
  DefaultVLIWScheduler *VLIWScheduler;
  // Vector of instructions assigned to the current packet.
  std::vector<MachineInstr*> CurrentPacketMIs;
  // DFA resource tracker.
  DFAPacketizer *ResourceTracker;
  // Map: MI -> SU.
  std::map<MachineInstr*, SUnit*> MIToSUnit;

public:
  // The AAResults parameter can be nullptr.
  VLIWPacketizerList(MachineFunction &MF, MachineLoopInfo &MLI,
                     AAResults *AA);
  VLIWPacketizerList &operator=(const VLIWPacketizerList &other) = delete;
  VLIWPacketizerList(const VLIWPacketizerList &other) = delete;
  virtual ~VLIWPacketizerList();

  // Implement this API in the backend to bundle instructions.
  void PacketizeMIs(MachineBasicBlock *MBB,
                    MachineBasicBlock::iterator BeginItr,
                    MachineBasicBlock::iterator EndItr);

  // Return the ResourceTracker.
  DFAPacketizer *getResourceTracker() {return ResourceTracker;}

  // addToPacket - Add MI to the current packet.
  virtual MachineBasicBlock::iterator addToPacket(MachineInstr &MI) {
    CurrentPacketMIs.push_back(&MI);
    ResourceTracker->reserveResources(MI);
    return MI;
  }

  // End the current packet and reset the state of the packetizer.
  // Overriding this function allows the target-specific packetizer
  // to perform custom finalization.
  virtual void endPacket(MachineBasicBlock *MBB,
                         MachineBasicBlock::iterator MI);

  // Perform initialization before packetizing an instruction. This
  // function is supposed to be overrided by the target dependent packetizer.
  virtual void initPacketizerState() {}

  // Check if the given instruction I should be ignored by the packetizer.
  virtual bool ignorePseudoInstruction(const MachineInstr &I,
                                       const MachineBasicBlock *MBB) {
    return false;
  }

  // Return true if instruction MI can not be packetized with any other
  // instruction, which means that MI itself is a packet.
  virtual bool isSoloInstruction(const MachineInstr &MI) { return true; }

  // Check if the packetizer should try to add the given instruction to
  // the current packet. One reasons for which it may not be desirable
  // to include an instruction in the current packet could be that it
  // would cause a stall.
  // If this function returns "false", the current packet will be ended,
  // and the instruction will be added to the next packet.
  virtual bool shouldAddToPacket(const MachineInstr &MI) { return true; }

  // Check if it is legal to packetize SUI and SUJ together.
  virtual bool isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ) {
    return false;
  }

  // Check if it is legal to prune dependece between SUI and SUJ.
  virtual bool isLegalToPruneDependencies(SUnit *SUI, SUnit *SUJ) {
    return false;
  }

  // Add a DAG mutation to be done before the packetization begins.
  void addMutation(std::unique_ptr<ScheduleDAGMutation> Mutation);

  bool alias(const MachineInstr &MI1, const MachineInstr &MI2,
             bool UseTBAA = true) const;

private:
  bool alias(const MachineMemOperand &Op1, const MachineMemOperand &Op2,
             bool UseTBAA = true) const;
};

} // end namespace llvm

#endif // LLVM_CODEGEN_DFAPACKETIZER_H
PKiwFZ�e��"*"*CodeGen/RDFRegisters.hnu�[���//===- RDFRegisters.h -------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_RDFREGISTERS_H
#define LLVM_CODEGEN_RDFREGISTERS_H

#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
#include "llvm/MC/LaneBitmask.h"
#include "llvm/MC/MCRegister.h"
#include <cassert>
#include <cstdint>
#include <map>
#include <set>
#include <vector>

namespace llvm {

class MachineFunction;
class raw_ostream;

namespace rdf {
struct RegisterAggr;

using RegisterId = uint32_t;

template <typename T>
bool disjoint(const std::set<T> &A, const std::set<T> &B) {
  auto ItA = A.begin(), EndA = A.end();
  auto ItB = B.begin(), EndB = B.end();
  while (ItA != EndA && ItB != EndB) {
    if (*ItA < *ItB)
      ++ItA;
    else if (*ItB < *ItA)
      ++ItB;
    else
      return false;
  }
  return true;
}

// Template class for a map translating uint32_t into arbitrary types.
// The map will act like an indexed set: upon insertion of a new object,
// it will automatically assign a new index to it. Index of 0 is treated
// as invalid and is never allocated.
template <typename T, unsigned N = 32> struct IndexedSet {
  IndexedSet() { Map.reserve(N); }

  T get(uint32_t Idx) const {
    // Index Idx corresponds to Map[Idx-1].
    assert(Idx != 0 && !Map.empty() && Idx - 1 < Map.size());
    return Map[Idx - 1];
  }

  uint32_t insert(T Val) {
    // Linear search.
    auto F = llvm::find(Map, Val);
    if (F != Map.end())
      return F - Map.begin() + 1;
    Map.push_back(Val);
    return Map.size(); // Return actual_index + 1.
  }

  uint32_t find(T Val) const {
    auto F = llvm::find(Map, Val);
    assert(F != Map.end());
    return F - Map.begin() + 1;
  }

  uint32_t size() const { return Map.size(); }

  using const_iterator = typename std::vector<T>::const_iterator;

  const_iterator begin() const { return Map.begin(); }
  const_iterator end() const { return Map.end(); }

private:
  std::vector<T> Map;
};

struct RegisterRef {
  RegisterId Reg = 0;
  LaneBitmask Mask = LaneBitmask::getNone(); // Only for registers.

  constexpr RegisterRef() = default;
  constexpr explicit RegisterRef(RegisterId R,
                                 LaneBitmask M = LaneBitmask::getAll())
      : Reg(R), Mask(isRegId(R) && R != 0 ? M : LaneBitmask::getNone()) {}

  // Classify null register as a "register".
  constexpr bool isReg() const { return Reg == 0 || isRegId(Reg); }
  constexpr bool isUnit() const { return isUnitId(Reg); }
  constexpr bool isMask() const { return isMaskId(Reg); }

  constexpr unsigned idx() const { return toIdx(Reg); }

  constexpr operator bool() const {
    return !isReg() || (Reg != 0 && Mask.any());
  }

  size_t hash() const {
    return std::hash<RegisterId>{}(Reg) ^
           std::hash<LaneBitmask::Type>{}(Mask.getAsInteger());
  }

  static constexpr bool isRegId(unsigned Id) {
    return Register::isPhysicalRegister(Id);
  }
  static constexpr bool isUnitId(unsigned Id) {
    return Register::isVirtualRegister(Id);
  }
  static constexpr bool isMaskId(unsigned Id) {
    return Register::isStackSlot(Id);
  }

  static constexpr RegisterId toUnitId(unsigned Idx) {
    return Idx | MCRegister::VirtualRegFlag;
  }

  static constexpr unsigned toIdx(RegisterId Id) {
    // Not using virtReg2Index or stackSlot2Index, because they are
    // not constexpr.
    if (isUnitId(Id))
      return Id & ~MCRegister::VirtualRegFlag;
    // RegId and MaskId are unchanged.
    return Id;
  }

  bool operator<(RegisterRef) const = delete;
  bool operator==(RegisterRef) const = delete;
  bool operator!=(RegisterRef) const = delete;
};

struct PhysicalRegisterInfo {
  PhysicalRegisterInfo(const TargetRegisterInfo &tri,
                       const MachineFunction &mf);

  RegisterId getRegMaskId(const uint32_t *RM) const {
    return Register::index2StackSlot(RegMasks.find(RM));
  }

  const uint32_t *getRegMaskBits(RegisterId R) const {
    return RegMasks.get(Register::stackSlot2Index(R));
  }

  bool alias(RegisterRef RA, RegisterRef RB) const;

  // Returns the set of aliased physical registers.
  std::set<RegisterId> getAliasSet(RegisterId Reg) const;

  RegisterRef getRefForUnit(uint32_t U) const {
    return RegisterRef(UnitInfos[U].Reg, UnitInfos[U].Mask);
  }

  const BitVector &getMaskUnits(RegisterId MaskId) const {
    return MaskInfos[Register::stackSlot2Index(MaskId)].Units;
  }

  std::set<RegisterId> getUnits(RegisterRef RR) const;

  const BitVector &getUnitAliases(uint32_t U) const {
    return AliasInfos[U].Regs;
  }

  RegisterRef mapTo(RegisterRef RR, unsigned R) const;
  const TargetRegisterInfo &getTRI() const { return TRI; }

  bool equal_to(RegisterRef A, RegisterRef B) const;
  bool less(RegisterRef A, RegisterRef B) const;

  void print(raw_ostream &OS, RegisterRef A) const;
  void print(raw_ostream &OS, const RegisterAggr &A) const;

private:
  struct RegInfo {
    const TargetRegisterClass *RegClass = nullptr;
  };
  struct UnitInfo {
    RegisterId Reg = 0;
    LaneBitmask Mask;
  };
  struct MaskInfo {
    BitVector Units;
  };
  struct AliasInfo {
    BitVector Regs;
  };

  const TargetRegisterInfo &TRI;
  IndexedSet<const uint32_t *> RegMasks;
  std::vector<RegInfo> RegInfos;
  std::vector<UnitInfo> UnitInfos;
  std::vector<MaskInfo> MaskInfos;
  std::vector<AliasInfo> AliasInfos;
};

struct RegisterAggr {
  RegisterAggr(const PhysicalRegisterInfo &pri)
      : Units(pri.getTRI().getNumRegUnits()), PRI(pri) {}
  RegisterAggr(const RegisterAggr &RG) = default;

  unsigned size() const { return Units.count(); }
  bool empty() const { return Units.none(); }
  bool hasAliasOf(RegisterRef RR) const;
  bool hasCoverOf(RegisterRef RR) const;

  const PhysicalRegisterInfo &getPRI() const { return PRI; }

  bool operator==(const RegisterAggr &A) const {
    return DenseMapInfo<BitVector>::isEqual(Units, A.Units);
  }

  static bool isCoverOf(RegisterRef RA, RegisterRef RB,
                        const PhysicalRegisterInfo &PRI) {
    return RegisterAggr(PRI).insert(RA).hasCoverOf(RB);
  }

  RegisterAggr &insert(RegisterRef RR);
  RegisterAggr &insert(const RegisterAggr &RG);
  RegisterAggr &intersect(RegisterRef RR);
  RegisterAggr &intersect(const RegisterAggr &RG);
  RegisterAggr &clear(RegisterRef RR);
  RegisterAggr &clear(const RegisterAggr &RG);

  RegisterRef intersectWith(RegisterRef RR) const;
  RegisterRef clearIn(RegisterRef RR) const;
  RegisterRef makeRegRef() const;

  size_t hash() const { return DenseMapInfo<BitVector>::getHashValue(Units); }

  struct ref_iterator {
    using MapType = std::map<RegisterId, LaneBitmask>;

  private:
    MapType Masks;
    MapType::iterator Pos;
    unsigned Index;
    const RegisterAggr *Owner;

  public:
    ref_iterator(const RegisterAggr &RG, bool End);

    RegisterRef operator*() const {
      return RegisterRef(Pos->first, Pos->second);
    }

    ref_iterator &operator++() {
      ++Pos;
      ++Index;
      return *this;
    }

    bool operator==(const ref_iterator &I) const {
      assert(Owner == I.Owner);
      (void)Owner;
      return Index == I.Index;
    }

    bool operator!=(const ref_iterator &I) const { return !(*this == I); }
  };

  ref_iterator ref_begin() const { return ref_iterator(*this, false); }
  ref_iterator ref_end() const { return ref_iterator(*this, true); }

  using unit_iterator = typename BitVector::const_set_bits_iterator;
  unit_iterator unit_begin() const { return Units.set_bits_begin(); }
  unit_iterator unit_end() const { return Units.set_bits_end(); }

  iterator_range<ref_iterator> refs() const {
    return make_range(ref_begin(), ref_end());
  }
  iterator_range<unit_iterator> units() const {
    return make_range(unit_begin(), unit_end());
  }

private:
  BitVector Units;
  const PhysicalRegisterInfo &PRI;
};

// This is really a std::map, except that it provides a non-trivial
// default constructor to the element accessed via [].
template <typename KeyType> struct RegisterAggrMap {
  RegisterAggrMap(const PhysicalRegisterInfo &pri) : Empty(pri) {}

  RegisterAggr &operator[](KeyType Key) {
    return Map.emplace(Key, Empty).first->second;
  }

  auto begin() { return Map.begin(); }
  auto end() { return Map.end(); }
  auto begin() const { return Map.begin(); }
  auto end() const { return Map.end(); }
  auto find(const KeyType &Key) const { return Map.find(Key); }

private:
  RegisterAggr Empty;
  std::map<KeyType, RegisterAggr> Map;

public:
  using key_type = typename decltype(Map)::key_type;
  using mapped_type = typename decltype(Map)::mapped_type;
  using value_type = typename decltype(Map)::value_type;
};

raw_ostream &operator<<(raw_ostream &OS, const RegisterAggr &A);

// Print the lane mask in a short form (or not at all if all bits are set).
struct PrintLaneMaskShort {
  PrintLaneMaskShort(LaneBitmask M) : Mask(M) {}
  LaneBitmask Mask;
};
raw_ostream &operator<<(raw_ostream &OS, const PrintLaneMaskShort &P);

} // end namespace rdf
} // end namespace llvm

namespace std {

template <> struct hash<llvm::rdf::RegisterRef> {
  size_t operator()(llvm::rdf::RegisterRef A) const { //
    return A.hash();
  }
};

template <> struct hash<llvm::rdf::RegisterAggr> {
  size_t operator()(const llvm::rdf::RegisterAggr &A) const { //
    return A.hash();
  }
};

template <> struct equal_to<llvm::rdf::RegisterRef> {
  constexpr equal_to(const llvm::rdf::PhysicalRegisterInfo &pri) : PRI(&pri) {}

  bool operator()(llvm::rdf::RegisterRef A, llvm::rdf::RegisterRef B) const {
    return PRI->equal_to(A, B);
  }

private:
  // Make it a pointer just in case. See comment in `less` below.
  const llvm::rdf::PhysicalRegisterInfo *PRI;
};

template <> struct equal_to<llvm::rdf::RegisterAggr> {
  bool operator()(const llvm::rdf::RegisterAggr &A,
                  const llvm::rdf::RegisterAggr &B) const {
    return A == B;
  }
};

template <> struct less<llvm::rdf::RegisterRef> {
  constexpr less(const llvm::rdf::PhysicalRegisterInfo &pri) : PRI(&pri) {}

  bool operator()(llvm::rdf::RegisterRef A, llvm::rdf::RegisterRef B) const {
    return PRI->less(A, B);
  }

private:
  // Make it a pointer because apparently some versions of MSVC use std::swap
  // on the std::less specialization.
  const llvm::rdf::PhysicalRegisterInfo *PRI;
};

} // namespace std

namespace llvm::rdf {
using RegisterSet = std::set<RegisterRef, std::less<RegisterRef>>;
} // namespace llvm::rdf

#endif // LLVM_CODEGEN_RDFREGISTERS_H
PKiwFZ ����CodeGen/UnreachableBlockElim.hnu�[���//===-- UnreachableBlockElim.h - Remove unreachable blocks for codegen --===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This pass is an extremely simple version of the SimplifyCFG pass.  Its sole
// job is to delete LLVM basic blocks that are not reachable from the entry
// node.  To do this, it performs a simple depth first traversal of the CFG,
// then deletes any unvisited nodes.
//
// Note that this pass is really a hack.  In particular, the instruction
// selectors for various targets should just not generate code for unreachable
// blocks.  Until LLVM has a more systematic way of defining instruction
// selectors, however, we cannot really expect them to handle additional
// complexity.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_UNREACHABLEBLOCKELIM_H
#define LLVM_CODEGEN_UNREACHABLEBLOCKELIM_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class UnreachableBlockElimPass
    : public PassInfoMixin<UnreachableBlockElimPass> {
public:
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
} // end namespace llvm

#endif // LLVM_CODEGEN_UNREACHABLEBLOCKELIM_H
PKiwFZ!���#CodeGen/MachineBlockFrequencyInfo.hnu�[���//===- MachineBlockFrequencyInfo.h - MBB Frequency Analysis -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Loops should be simplified before this analysis.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_MACHINEBLOCKFREQUENCYINFO_H
#define LLVM_CODEGEN_MACHINEBLOCKFREQUENCYINFO_H

#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/Support/BlockFrequency.h"
#include <cstdint>
#include <memory>
#include <optional>

namespace llvm {

template <class BlockT> class BlockFrequencyInfoImpl;
class MachineBasicBlock;
class MachineBranchProbabilityInfo;
class MachineFunction;
class MachineLoopInfo;
class raw_ostream;

/// MachineBlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation
/// to estimate machine basic block frequencies.
class MachineBlockFrequencyInfo : public MachineFunctionPass {
  using ImplType = BlockFrequencyInfoImpl<MachineBasicBlock>;
  std::unique_ptr<ImplType> MBFI;

public:
  static char ID;

  MachineBlockFrequencyInfo();
  explicit MachineBlockFrequencyInfo(MachineFunction &F,
                                     MachineBranchProbabilityInfo &MBPI,
                                     MachineLoopInfo &MLI);
  ~MachineBlockFrequencyInfo() override;

  void getAnalysisUsage(AnalysisUsage &AU) const override;

  bool runOnMachineFunction(MachineFunction &F) override;

  /// calculate - compute block frequency info for the given function.
  void calculate(const MachineFunction &F,
                 const MachineBranchProbabilityInfo &MBPI,
                 const MachineLoopInfo &MLI);

  void releaseMemory() override;

  /// getblockFreq - Return block frequency. Return 0 if we don't have the
  /// information. Please note that initial frequency is equal to 1024. It means
  /// that we should not rely on the value itself, but only on the comparison to
  /// the other block frequencies. We do this to avoid using of floating points.
  /// For example, to get the frequency of a block relative to the entry block,
  /// divide the integral value returned by this function (the
  /// BlockFrequency::getFrequency() value) by getEntryFreq().
  BlockFrequency getBlockFreq(const MachineBasicBlock *MBB) const;

  /// Compute the frequency of the block, relative to the entry block.
  /// This API assumes getEntryFreq() is non-zero.
  float getBlockFreqRelativeToEntryBlock(const MachineBasicBlock *MBB) const {
    assert(getEntryFreq() != 0 && "getEntryFreq() should not return 0 here!");
    return getBlockFreq(MBB).getFrequency() * (1.0f / getEntryFreq());
  }

  std::optional<uint64_t>
  getBlockProfileCount(const MachineBasicBlock *MBB) const;
  std::optional<uint64_t> getProfileCountFromFreq(uint64_t Freq) const;

  bool isIrrLoopHeader(const MachineBasicBlock *MBB) const;

  /// incrementally calculate block frequencies when we split edges, to avoid
  /// full CFG traversal.
  void onEdgeSplit(const MachineBasicBlock &NewPredecessor,
                   const MachineBasicBlock &NewSuccessor,
                   const MachineBranchProbabilityInfo &MBPI);

  const MachineFunction *getFunction() const;
  const MachineBranchProbabilityInfo *getMBPI() const;

  /// Pop up a ghostview window with the current block frequency propagation
  /// rendered using dot.
  void view(const Twine &Name, bool isSimple = true) const;

  // Print the block frequency Freq to OS using the current functions entry
  // frequency to convert freq into a relative decimal form.
  raw_ostream &printBlockFreq(raw_ostream &OS, const BlockFrequency Freq) const;

  // Convenience method that attempts to look up the frequency associated with
  // BB and print it to OS.
  raw_ostream &printBlockFreq(raw_ostream &OS,
                              const MachineBasicBlock *MBB) const;

  /// Divide a block's BlockFrequency::getFrequency() value by this value to
  /// obtain the entry block - relative frequency of said block.
  uint64_t getEntryFreq() const;
};

} // end namespace llvm

#endif // LLVM_CODEGEN_MACHINEBLOCKFREQUENCYINFO_H
PKiwFZ�`�X��CodeGen/LivePhysRegs.hnu�[���//===- llvm/CodeGen/LivePhysRegs.h - Live Physical Register Set -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file
/// This file implements the LivePhysRegs utility for tracking liveness of
/// physical registers. This can be used for ad-hoc liveness tracking after
/// register allocation. You can start with the live-ins/live-outs at the
/// beginning/end of a block and update the information while walking the
/// instructions inside the block. This implementation tracks the liveness on a
/// sub-register granularity.
///
/// We assume that the high bits of a physical super-register are not preserved
/// unless the instruction has an implicit-use operand reading the super-
/// register.
///
/// X86 Example:
/// %ymm0 = ...
/// %xmm0 = ... (Kills %xmm0, all %xmm0s sub-registers, and %ymm0)
///
/// %ymm0 = ...
/// %xmm0 = ..., implicit %ymm0 (%ymm0 and all its sub-registers are alive)
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_LIVEPHYSREGS_H
#define LLVM_CODEGEN_LIVEPHYSREGS_H

#include "llvm/ADT/SparseSet.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
#include "llvm/MC/MCRegister.h"
#include "llvm/MC/MCRegisterInfo.h"
#include <cassert>
#include <utility>

namespace llvm {

class MachineInstr;
class MachineFunction;
class MachineOperand;
class MachineRegisterInfo;
class raw_ostream;

/// A set of physical registers with utility functions to track liveness
/// when walking backward/forward through a basic block.
class LivePhysRegs {
  const TargetRegisterInfo *TRI = nullptr;
  using RegisterSet = SparseSet<MCPhysReg, identity<MCPhysReg>>;
  RegisterSet LiveRegs;

public:
  /// Constructs an unitialized set. init() needs to be called to initialize it.
  LivePhysRegs() = default;

  /// Constructs and initializes an empty set.
  LivePhysRegs(const TargetRegisterInfo &TRI) : TRI(&TRI) {
    LiveRegs.setUniverse(TRI.getNumRegs());
  }

  LivePhysRegs(const LivePhysRegs&) = delete;
  LivePhysRegs &operator=(const LivePhysRegs&) = delete;

  /// (re-)initializes and clears the set.
  void init(const TargetRegisterInfo &TRI) {
    this->TRI = &TRI;
    LiveRegs.clear();
    LiveRegs.setUniverse(TRI.getNumRegs());
  }

  /// Clears the set.
  void clear() { LiveRegs.clear(); }

  /// Returns true if the set is empty.
  bool empty() const { return LiveRegs.empty(); }

  /// Adds a physical register and all its sub-registers to the set.
  void addReg(MCPhysReg Reg) {
    assert(TRI && "LivePhysRegs is not initialized.");
    assert(Reg <= TRI->getNumRegs() && "Expected a physical register.");
    for (MCPhysReg SubReg : TRI->subregs_inclusive(Reg))
      LiveRegs.insert(SubReg);
  }

  /// Removes a physical register, all its sub-registers, and all its
  /// super-registers from the set.
  void removeReg(MCPhysReg Reg) {
    assert(TRI && "LivePhysRegs is not initialized.");
    assert(Reg <= TRI->getNumRegs() && "Expected a physical register.");
    for (MCRegAliasIterator R(Reg, TRI, true); R.isValid(); ++R)
      LiveRegs.erase(*R);
  }

  /// Removes physical registers clobbered by the regmask operand \p MO.
  void removeRegsInMask(const MachineOperand &MO,
        SmallVectorImpl<std::pair<MCPhysReg, const MachineOperand*>> *Clobbers =
        nullptr);

  /// Returns true if register \p Reg is contained in the set. This also
  /// works if only the super register of \p Reg has been defined, because
  /// addReg() always adds all sub-registers to the set as well.
  /// Note: Returns false if just some sub registers are live, use available()
  /// when searching a free register.
  bool contains(MCPhysReg Reg) const { return LiveRegs.count(Reg); }

  /// Returns true if register \p Reg and no aliasing register is in the set.
  bool available(const MachineRegisterInfo &MRI, MCPhysReg Reg) const;

  /// Remove defined registers and regmask kills from the set.
  void removeDefs(const MachineInstr &MI);

  /// Add uses to the set.
  void addUses(const MachineInstr &MI);

  /// Simulates liveness when stepping backwards over an instruction(bundle).
  /// Remove Defs, add uses. This is the recommended way of calculating
  /// liveness.
  void stepBackward(const MachineInstr &MI);

  /// Simulates liveness when stepping forward over an instruction(bundle).
  /// Remove killed-uses, add defs. This is the not recommended way, because it
  /// depends on accurate kill flags. If possible use stepBackward() instead of
  /// this function. The clobbers set will be the list of registers either
  /// defined or clobbered by a regmask.  The operand will identify whether this
  /// is a regmask or register operand.
  void stepForward(const MachineInstr &MI,
        SmallVectorImpl<std::pair<MCPhysReg, const MachineOperand*>> &Clobbers);

  /// Adds all live-in registers of basic block \p MBB.
  /// Live in registers are the registers in the blocks live-in list and the
  /// pristine registers.
  void addLiveIns(const MachineBasicBlock &MBB);

  /// Adds all live-in registers of basic block \p MBB but skips pristine
  /// registers.
  void addLiveInsNoPristines(const MachineBasicBlock &MBB);

  /// Adds all live-out registers of basic block \p MBB.
  /// Live out registers are the union of the live-in registers of the successor
  /// blocks and pristine registers. Live out registers of the end block are the
  /// callee saved registers.
  /// If a register is not added by this method, it is guaranteed to not be
  /// live out from MBB, although a sub-register may be. This is true
  /// both before and after regalloc.
  void addLiveOuts(const MachineBasicBlock &MBB);

  /// Adds all live-out registers of basic block \p MBB but skips pristine
  /// registers.
  void addLiveOutsNoPristines(const MachineBasicBlock &MBB);

  using const_iterator = RegisterSet::const_iterator;

  const_iterator begin() const { return LiveRegs.begin(); }
  const_iterator end() const { return LiveRegs.end(); }

  /// Prints the currently live registers to \p OS.
  void print(raw_ostream &OS) const;

  /// Dumps the currently live registers to the debug output.
  void dump() const;

private:
  /// Adds live-in registers from basic block \p MBB, taking associated
  /// lane masks into consideration.
  void addBlockLiveIns(const MachineBasicBlock &MBB);

  /// Adds pristine registers. Pristine registers are callee saved registers
  /// that are unused in the function.
  void addPristines(const MachineFunction &MF);
};

inline raw_ostream &operator<<(raw_ostream &OS, const LivePhysRegs& LR) {
  LR.print(OS);
  return OS;
}

/// Computes registers live-in to \p MBB assuming all of its successors
/// live-in lists are up-to-date. Puts the result into the given LivePhysReg
/// instance \p LiveRegs.
void computeLiveIns(LivePhysRegs &LiveRegs, const MachineBasicBlock &MBB);

/// Recomputes dead and kill flags in \p MBB.
void recomputeLivenessFlags(MachineBasicBlock &MBB);

/// Adds registers contained in \p LiveRegs to the block live-in list of \p MBB.
/// Does not add reserved registers.
void addLiveIns(MachineBasicBlock &MBB, const LivePhysRegs &LiveRegs);

/// Convenience function combining computeLiveIns() and addLiveIns().
void computeAndAddLiveIns(LivePhysRegs &LiveRegs,
                          MachineBasicBlock &MBB);

/// Convenience function for recomputing live-in's for \p MBB.
static inline void recomputeLiveIns(MachineBasicBlock &MBB) {
  LivePhysRegs LPR;
  MBB.clearLiveIns();
  computeAndAddLiveIns(LPR, MBB);
}

} // end namespace llvm

#endif // LLVM_CODEGEN_LIVEPHYSREGS_H
PKiwFZ�?�&�&�CodeGen/MachineRegisterInfo.hnu�[���//===- llvm/CodeGen/MachineRegisterInfo.h -----------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the MachineRegisterInfo class.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_MACHINEREGISTERINFO_H
#define LLVM_CODEGEN_MACHINEREGISTERINFO_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/IndexedMap.h"
#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBundle.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/RegisterBank.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
#include "llvm/CodeGen/TargetSubtargetInfo.h"
#include "llvm/MC/LaneBitmask.h"
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <memory>
#include <utility>
#include <vector>

namespace llvm {

class PSetIterator;

/// Convenient type to represent either a register class or a register bank.
using RegClassOrRegBank =
    PointerUnion<const TargetRegisterClass *, const RegisterBank *>;

/// MachineRegisterInfo - Keep track of information for virtual and physical
/// registers, including vreg register classes, use/def chains for registers,
/// etc.
class MachineRegisterInfo {
public:
  class Delegate {
    virtual void anchor();

  public:
    virtual ~Delegate() = default;

    virtual void MRI_NoteNewVirtualRegister(Register Reg) = 0;
    virtual void MRI_NoteCloneVirtualRegister(Register NewReg,
                                              Register SrcReg) {
      MRI_NoteNewVirtualRegister(NewReg);
    }
  };

private:
  MachineFunction *MF;
  SmallPtrSet<Delegate *, 1> TheDelegates;

  /// True if subregister liveness is tracked.
  const bool TracksSubRegLiveness;

  /// VRegInfo - Information we keep for each virtual register.
  ///
  /// Each element in this list contains the register class of the vreg and the
  /// start of the use/def list for the register.
  IndexedMap<std::pair<RegClassOrRegBank, MachineOperand *>,
             VirtReg2IndexFunctor>
      VRegInfo;

  /// Map for recovering vreg name from vreg number.
  /// This map is used by the MIR Printer.
  IndexedMap<std::string, VirtReg2IndexFunctor> VReg2Name;

  /// StringSet that is used to unique vreg names.
  StringSet<> VRegNames;

  /// The flag is true upon \p UpdatedCSRs initialization
  /// and false otherwise.
  bool IsUpdatedCSRsInitialized = false;

  /// Contains the updated callee saved register list.
  /// As opposed to the static list defined in register info,
  /// all registers that were disabled are removed from the list.
  SmallVector<MCPhysReg, 16> UpdatedCSRs;

  /// RegAllocHints - This vector records register allocation hints for
  /// virtual registers. For each virtual register, it keeps a pair of hint
  /// type and hints vector making up the allocation hints. Only the first
  /// hint may be target specific, and in that case this is reflected by the
  /// first member of the pair being non-zero. If the hinted register is
  /// virtual, it means the allocator should prefer the physical register
  /// allocated to it if any.
  IndexedMap<std::pair<unsigned, SmallVector<Register, 4>>,
             VirtReg2IndexFunctor>
      RegAllocHints;

  /// PhysRegUseDefLists - This is an array of the head of the use/def list for
  /// physical registers.
  std::unique_ptr<MachineOperand *[]> PhysRegUseDefLists;

  /// getRegUseDefListHead - Return the head pointer for the register use/def
  /// list for the specified virtual or physical register.
  MachineOperand *&getRegUseDefListHead(Register RegNo) {
    if (RegNo.isVirtual())
      return VRegInfo[RegNo.id()].second;
    return PhysRegUseDefLists[RegNo.id()];
  }

  MachineOperand *getRegUseDefListHead(Register RegNo) const {
    if (RegNo.isVirtual())
      return VRegInfo[RegNo.id()].second;
    return PhysRegUseDefLists[RegNo.id()];
  }

  /// Get the next element in the use-def chain.
  static MachineOperand *getNextOperandForReg(const MachineOperand *MO) {
    assert(MO && MO->isReg() && "This is not a register operand!");
    return MO->Contents.Reg.Next;
  }

  /// UsedPhysRegMask - Additional used physregs including aliases.
  /// This bit vector represents all the registers clobbered by function calls.
  BitVector UsedPhysRegMask;

  /// ReservedRegs - This is a bit vector of reserved registers.  The target
  /// may change its mind about which registers should be reserved.  This
  /// vector is the frozen set of reserved registers when register allocation
  /// started.
  BitVector ReservedRegs;

  using VRegToTypeMap = IndexedMap<LLT, VirtReg2IndexFunctor>;
  /// Map generic virtual registers to their low-level type.
  VRegToTypeMap VRegToType;

  /// Keep track of the physical registers that are live in to the function.
  /// Live in values are typically arguments in registers.  LiveIn values are
  /// allowed to have virtual registers associated with them, stored in the
  /// second element.
  std::vector<std::pair<MCRegister, Register>> LiveIns;

public:
  explicit MachineRegisterInfo(MachineFunction *MF);
  MachineRegisterInfo(const MachineRegisterInfo &) = delete;
  MachineRegisterInfo &operator=(const MachineRegisterInfo &) = delete;

  const TargetRegisterInfo *getTargetRegisterInfo() const {
    return MF->getSubtarget().getRegisterInfo();
  }

  void resetDelegate(Delegate *delegate) {
    // Ensure another delegate does not take over unless the current
    // delegate first unattaches itself.
    assert(TheDelegates.count(delegate) &&
           "Only an existing delegate can perform reset!");
    TheDelegates.erase(delegate);
  }

  void addDelegate(Delegate *delegate) {
    assert(delegate && !TheDelegates.count(delegate) &&
           "Attempted to add null delegate, or to change it without "
           "first resetting it!");

    TheDelegates.insert(delegate);
  }

  void noteNewVirtualRegister(Register Reg) {
    for (auto *TheDelegate : TheDelegates)
      TheDelegate->MRI_NoteNewVirtualRegister(Reg);
  }

  void noteCloneVirtualRegister(Register NewReg, Register SrcReg) {
    for (auto *TheDelegate : TheDelegates)
      TheDelegate->MRI_NoteCloneVirtualRegister(NewReg, SrcReg);
  }

  //===--------------------------------------------------------------------===//
  // Function State
  //===--------------------------------------------------------------------===//

  // isSSA - Returns true when the machine function is in SSA form. Early
  // passes require the machine function to be in SSA form where every virtual
  // register has a single defining instruction.
  //
  // The TwoAddressInstructionPass and PHIElimination passes take the machine
  // function out of SSA form when they introduce multiple defs per virtual
  // register.
  bool isSSA() const {
    return MF->getProperties().hasProperty(
        MachineFunctionProperties::Property::IsSSA);
  }

  // leaveSSA - Indicates that the machine function is no longer in SSA form.
  void leaveSSA() {
    MF->getProperties().reset(MachineFunctionProperties::Property::IsSSA);
  }

  /// tracksLiveness - Returns true when tracking register liveness accurately.
  /// (see MachineFUnctionProperties::Property description for details)
  bool tracksLiveness() const {
    return MF->getProperties().hasProperty(
        MachineFunctionProperties::Property::TracksLiveness);
  }

  /// invalidateLiveness - Indicates that register liveness is no longer being
  /// tracked accurately.
  ///
  /// This should be called by late passes that invalidate the liveness
  /// information.
  void invalidateLiveness() {
    MF->getProperties().reset(
        MachineFunctionProperties::Property::TracksLiveness);
  }

  /// Returns true if liveness for register class @p RC should be tracked at
  /// the subregister level.
  bool shouldTrackSubRegLiveness(const TargetRegisterClass &RC) const {
    return subRegLivenessEnabled() && RC.HasDisjunctSubRegs;
  }
  bool shouldTrackSubRegLiveness(Register VReg) const {
    assert(VReg.isVirtual() && "Must pass a VReg");
    return shouldTrackSubRegLiveness(*getRegClass(VReg));
  }
  bool subRegLivenessEnabled() const {
    return TracksSubRegLiveness;
  }

  //===--------------------------------------------------------------------===//
  // Register Info
  //===--------------------------------------------------------------------===//

  /// Returns true if the updated CSR list was initialized and false otherwise.
  bool isUpdatedCSRsInitialized() const { return IsUpdatedCSRsInitialized; }

  /// Returns true if a register can be used as an argument to a function.
  bool isArgumentRegister(const MachineFunction &MF, MCRegister Reg) const;

  /// Returns true if a register is a fixed register.
  bool isFixedRegister(const MachineFunction &MF, MCRegister Reg) const;

  /// Returns true if a register is a general purpose register.
  bool isGeneralPurposeRegister(const MachineFunction &MF,
                                MCRegister Reg) const;

  /// Disables the register from the list of CSRs.
  /// I.e. the register will not appear as part of the CSR mask.
  /// \see UpdatedCalleeSavedRegs.
  void disableCalleeSavedRegister(MCRegister Reg);

  /// Returns list of callee saved registers.
  /// The function returns the updated CSR list (after taking into account
  /// registers that are disabled from the CSR list).
  const MCPhysReg *getCalleeSavedRegs() const;

  /// Sets the updated Callee Saved Registers list.
  /// Notice that it will override ant previously disabled/saved CSRs.
  void setCalleeSavedRegs(ArrayRef<MCPhysReg> CSRs);

  // Strictly for use by MachineInstr.cpp.
  void addRegOperandToUseList(MachineOperand *MO);

  // Strictly for use by MachineInstr.cpp.
  void removeRegOperandFromUseList(MachineOperand *MO);

  // Strictly for use by MachineInstr.cpp.
  void moveOperands(MachineOperand *Dst, MachineOperand *Src, unsigned NumOps);

  /// Verify the sanity of the use list for Reg.
  void verifyUseList(Register Reg) const;

  /// Verify the use list of all registers.
  void verifyUseLists() const;

  /// reg_begin/reg_end - Provide iteration support to walk over all definitions
  /// and uses of a register within the MachineFunction that corresponds to this
  /// MachineRegisterInfo object.
  template<bool Uses, bool Defs, bool SkipDebug,
           bool ByOperand, bool ByInstr, bool ByBundle>
  class defusechain_iterator;
  template<bool Uses, bool Defs, bool SkipDebug,
           bool ByOperand, bool ByInstr, bool ByBundle>
  class defusechain_instr_iterator;

  // Make it a friend so it can access getNextOperandForReg().
  template<bool, bool, bool, bool, bool, bool>
    friend class defusechain_iterator;
  template<bool, bool, bool, bool, bool, bool>
    friend class defusechain_instr_iterator;

  /// reg_iterator/reg_begin/reg_end - Walk all defs and uses of the specified
  /// register.
  using reg_iterator =
      defusechain_iterator<true, true, false, true, false, false>;
  reg_iterator reg_begin(Register RegNo) const {
    return reg_iterator(getRegUseDefListHead(RegNo));
  }
  static reg_iterator reg_end() { return reg_iterator(nullptr); }

  inline iterator_range<reg_iterator> reg_operands(Register Reg) const {
    return make_range(reg_begin(Reg), reg_end());
  }

  /// reg_instr_iterator/reg_instr_begin/reg_instr_end - Walk all defs and uses
  /// of the specified register, stepping by MachineInstr.
  using reg_instr_iterator =
      defusechain_instr_iterator<true, true, false, false, true, false>;
  reg_instr_iterator reg_instr_begin(Register RegNo) const {
    return reg_instr_iterator(getRegUseDefListHead(RegNo));
  }
  static reg_instr_iterator reg_instr_end() {
    return reg_instr_iterator(nullptr);
  }

  inline iterator_range<reg_instr_iterator>
  reg_instructions(Register Reg) const {
    return make_range(reg_instr_begin(Reg), reg_instr_end());
  }

  /// reg_bundle_iterator/reg_bundle_begin/reg_bundle_end - Walk all defs and uses
  /// of the specified register, stepping by bundle.
  using reg_bundle_iterator =
      defusechain_instr_iterator<true, true, false, false, false, true>;
  reg_bundle_iterator reg_bundle_begin(Register RegNo) const {
    return reg_bundle_iterator(getRegUseDefListHead(RegNo));
  }
  static reg_bundle_iterator reg_bundle_end() {
    return reg_bundle_iterator(nullptr);
  }

  inline iterator_range<reg_bundle_iterator> reg_bundles(Register Reg) const {
    return make_range(reg_bundle_begin(Reg), reg_bundle_end());
  }

  /// reg_empty - Return true if there are no instructions using or defining the
  /// specified register (it may be live-in).
  bool reg_empty(Register RegNo) const { return reg_begin(RegNo) == reg_end(); }

  /// reg_nodbg_iterator/reg_nodbg_begin/reg_nodbg_end - Walk all defs and uses
  /// of the specified register, skipping those marked as Debug.
  using reg_nodbg_iterator =
      defusechain_iterator<true, true, true, true, false, false>;
  reg_nodbg_iterator reg_nodbg_begin(Register RegNo) const {
    return reg_nodbg_iterator(getRegUseDefListHead(RegNo));
  }
  static reg_nodbg_iterator reg_nodbg_end() {
    return reg_nodbg_iterator(nullptr);
  }

  inline iterator_range<reg_nodbg_iterator>
  reg_nodbg_operands(Register Reg) const {
    return make_range(reg_nodbg_begin(Reg), reg_nodbg_end());
  }

  /// reg_instr_nodbg_iterator/reg_instr_nodbg_begin/reg_instr_nodbg_end - Walk
  /// all defs and uses of the specified register, stepping by MachineInstr,
  /// skipping those marked as Debug.
  using reg_instr_nodbg_iterator =
      defusechain_instr_iterator<true, true, true, false, true, false>;
  reg_instr_nodbg_iterator reg_instr_nodbg_begin(Register RegNo) const {
    return reg_instr_nodbg_iterator(getRegUseDefListHead(RegNo));
  }
  static reg_instr_nodbg_iterator reg_instr_nodbg_end() {
    return reg_instr_nodbg_iterator(nullptr);
  }

  inline iterator_range<reg_instr_nodbg_iterator>
  reg_nodbg_instructions(Register Reg) const {
    return make_range(reg_instr_nodbg_begin(Reg), reg_instr_nodbg_end());
  }

  /// reg_bundle_nodbg_iterator/reg_bundle_nodbg_begin/reg_bundle_nodbg_end - Walk
  /// all defs and uses of the specified register, stepping by bundle,
  /// skipping those marked as Debug.
  using reg_bundle_nodbg_iterator =
      defusechain_instr_iterator<true, true, true, false, false, true>;
  reg_bundle_nodbg_iterator reg_bundle_nodbg_begin(Register RegNo) const {
    return reg_bundle_nodbg_iterator(getRegUseDefListHead(RegNo));
  }
  static reg_bundle_nodbg_iterator reg_bundle_nodbg_end() {
    return reg_bundle_nodbg_iterator(nullptr);
  }

  inline iterator_range<reg_bundle_nodbg_iterator>
  reg_nodbg_bundles(Register Reg) const {
    return make_range(reg_bundle_nodbg_begin(Reg), reg_bundle_nodbg_end());
  }

  /// reg_nodbg_empty - Return true if the only instructions using or defining
  /// Reg are Debug instructions.
  bool reg_nodbg_empty(Register RegNo) const {
    return reg_nodbg_begin(RegNo) == reg_nodbg_end();
  }

  /// def_iterator/def_begin/def_end - Walk all defs of the specified register.
  using def_iterator =
      defusechain_iterator<false, true, false, true, false, false>;
  def_iterator def_begin(Register RegNo) const {
    return def_iterator(getRegUseDefListHead(RegNo));
  }
  static def_iterator def_end() { return def_iterator(nullptr); }

  inline iterator_range<def_iterator> def_operands(Register Reg) const {
    return make_range(def_begin(Reg), def_end());
  }

  /// def_instr_iterator/def_instr_begin/def_instr_end - Walk all defs of the
  /// specified register, stepping by MachineInst.
  using def_instr_iterator =
      defusechain_instr_iterator<false, true, false, false, true, false>;
  def_instr_iterator def_instr_begin(Register RegNo) const {
    return def_instr_iterator(getRegUseDefListHead(RegNo));
  }
  static def_instr_iterator def_instr_end() {
    return def_instr_iterator(nullptr);
  }

  inline iterator_range<def_instr_iterator>
  def_instructions(Register Reg) const {
    return make_range(def_instr_begin(Reg), def_instr_end());
  }

  /// def_bundle_iterator/def_bundle_begin/def_bundle_end - Walk all defs of the
  /// specified register, stepping by bundle.
  using def_bundle_iterator =
      defusechain_instr_iterator<false, true, false, false, false, true>;
  def_bundle_iterator def_bundle_begin(Register RegNo) const {
    return def_bundle_iterator(getRegUseDefListHead(RegNo));
  }
  static def_bundle_iterator def_bundle_end() {
    return def_bundle_iterator(nullptr);
  }

  inline iterator_range<def_bundle_iterator> def_bundles(Register Reg) const {
    return make_range(def_bundle_begin(Reg), def_bundle_end());
  }

  /// def_empty - Return true if there are no instructions defining the
  /// specified register (it may be live-in).
  bool def_empty(Register RegNo) const { return def_begin(RegNo) == def_end(); }

  StringRef getVRegName(Register Reg) const {
    return VReg2Name.inBounds(Reg) ? StringRef(VReg2Name[Reg]) : "";
  }

  void insertVRegByName(StringRef Name, Register Reg) {
    assert((Name.empty() || !VRegNames.contains(Name)) &&
           "Named VRegs Must be Unique.");
    if (!Name.empty()) {
      VRegNames.insert(Name);
      VReg2Name.grow(Reg);
      VReg2Name[Reg] = Name.str();
    }
  }

  /// Return true if there is exactly one operand defining the specified
  /// register.
  bool hasOneDef(Register RegNo) const {
    return hasSingleElement(def_operands(RegNo));
  }

  /// Returns the defining operand if there is exactly one operand defining the
  /// specified register, otherwise nullptr.
  MachineOperand *getOneDef(Register Reg) const {
    def_iterator DI = def_begin(Reg);
    if (DI == def_end()) // No defs.
      return nullptr;

    def_iterator OneDef = DI;
    if (++DI == def_end())
      return &*OneDef;
    return nullptr; // Multiple defs.
  }

  /// use_iterator/use_begin/use_end - Walk all uses of the specified register.
  using use_iterator =
      defusechain_iterator<true, false, false, true, false, false>;
  use_iterator use_begin(Register RegNo) const {
    return use_iterator(getRegUseDefListHead(RegNo));
  }
  static use_iterator use_end() { return use_iterator(nullptr); }

  inline iterator_range<use_iterator> use_operands(Register Reg) const {
    return make_range(use_begin(Reg), use_end());
  }

  /// use_instr_iterator/use_instr_begin/use_instr_end - Walk all uses of the
  /// specified register, stepping by MachineInstr.
  using use_instr_iterator =
      defusechain_instr_iterator<true, false, false, false, true, false>;
  use_instr_iterator use_instr_begin(Register RegNo) const {
    return use_instr_iterator(getRegUseDefListHead(RegNo));
  }
  static use_instr_iterator use_instr_end() {
    return use_instr_iterator(nullptr);
  }

  inline iterator_range<use_instr_iterator>
  use_instructions(Register Reg) const {
    return make_range(use_instr_begin(Reg), use_instr_end());
  }

  /// use_bundle_iterator/use_bundle_begin/use_bundle_end - Walk all uses of the
  /// specified register, stepping by bundle.
  using use_bundle_iterator =
      defusechain_instr_iterator<true, false, false, false, false, true>;
  use_bundle_iterator use_bundle_begin(Register RegNo) const {
    return use_bundle_iterator(getRegUseDefListHead(RegNo));
  }
  static use_bundle_iterator use_bundle_end() {
    return use_bundle_iterator(nullptr);
  }

  inline iterator_range<use_bundle_iterator> use_bundles(Register Reg) const {
    return make_range(use_bundle_begin(Reg), use_bundle_end());
  }

  /// use_empty - Return true if there are no instructions using the specified
  /// register.
  bool use_empty(Register RegNo) const { return use_begin(RegNo) == use_end(); }

  /// hasOneUse - Return true if there is exactly one instruction using the
  /// specified register.
  bool hasOneUse(Register RegNo) const {
    return hasSingleElement(use_operands(RegNo));
  }

  /// use_nodbg_iterator/use_nodbg_begin/use_nodbg_end - Walk all uses of the
  /// specified register, skipping those marked as Debug.
  using use_nodbg_iterator =
      defusechain_iterator<true, false, true, true, false, false>;
  use_nodbg_iterator use_nodbg_begin(Register RegNo) const {
    return use_nodbg_iterator(getRegUseDefListHead(RegNo));
  }
  static use_nodbg_iterator use_nodbg_end() {
    return use_nodbg_iterator(nullptr);
  }

  inline iterator_range<use_nodbg_iterator>
  use_nodbg_operands(Register Reg) const {
    return make_range(use_nodbg_begin(Reg), use_nodbg_end());
  }

  /// use_instr_nodbg_iterator/use_instr_nodbg_begin/use_instr_nodbg_end - Walk
  /// all uses of the specified register, stepping by MachineInstr, skipping
  /// those marked as Debug.
  using use_instr_nodbg_iterator =
      defusechain_instr_iterator<true, false, true, false, true, false>;
  use_instr_nodbg_iterator use_instr_nodbg_begin(Register RegNo) const {
    return use_instr_nodbg_iterator(getRegUseDefListHead(RegNo));
  }
  static use_instr_nodbg_iterator use_instr_nodbg_end() {
    return use_instr_nodbg_iterator(nullptr);
  }

  inline iterator_range<use_instr_nodbg_iterator>
  use_nodbg_instructions(Register Reg) const {
    return make_range(use_instr_nodbg_begin(Reg), use_instr_nodbg_end());
  }

  /// use_bundle_nodbg_iterator/use_bundle_nodbg_begin/use_bundle_nodbg_end - Walk
  /// all uses of the specified register, stepping by bundle, skipping
  /// those marked as Debug.
  using use_bundle_nodbg_iterator =
      defusechain_instr_iterator<true, false, true, false, false, true>;
  use_bundle_nodbg_iterator use_bundle_nodbg_begin(Register RegNo) const {
    return use_bundle_nodbg_iterator(getRegUseDefListHead(RegNo));
  }
  static use_bundle_nodbg_iterator use_bundle_nodbg_end() {
    return use_bundle_nodbg_iterator(nullptr);
  }

  inline iterator_range<use_bundle_nodbg_iterator>
  use_nodbg_bundles(Register Reg) const {
    return make_range(use_bundle_nodbg_begin(Reg), use_bundle_nodbg_end());
  }

  /// use_nodbg_empty - Return true if there are no non-Debug instructions
  /// using the specified register.
  bool use_nodbg_empty(Register RegNo) const {
    return use_nodbg_begin(RegNo) == use_nodbg_end();
  }

  /// hasOneNonDBGUse - Return true if there is exactly one non-Debug
  /// use of the specified register.
  bool hasOneNonDBGUse(Register RegNo) const;

  /// hasOneNonDBGUse - Return true if there is exactly one non-Debug
  /// instruction using the specified register. Said instruction may have
  /// multiple uses.
  bool hasOneNonDBGUser(Register RegNo) const;


  /// hasAtMostUses - Return true if the given register has at most \p MaxUsers
  /// non-debug user instructions.
  bool hasAtMostUserInstrs(Register Reg, unsigned MaxUsers) const;

  /// replaceRegWith - Replace all instances of FromReg with ToReg in the
  /// machine function.  This is like llvm-level X->replaceAllUsesWith(Y),
  /// except that it also changes any definitions of the register as well.
  ///
  /// Note that it is usually necessary to first constrain ToReg's register
  /// class and register bank to match the FromReg constraints using one of the
  /// methods:
  ///
  ///   constrainRegClass(ToReg, getRegClass(FromReg))
  ///   constrainRegAttrs(ToReg, FromReg)
  ///   RegisterBankInfo::constrainGenericRegister(ToReg,
  ///       *MRI.getRegClass(FromReg), MRI)
  ///
  /// These functions will return a falsy result if the virtual registers have
  /// incompatible constraints.
  ///
  /// Note that if ToReg is a physical register the function will replace and
  /// apply sub registers to ToReg in order to obtain a final/proper physical
  /// register.
  void replaceRegWith(Register FromReg, Register ToReg);

  /// getVRegDef - Return the machine instr that defines the specified virtual
  /// register or null if none is found.  This assumes that the code is in SSA
  /// form, so there should only be one definition.
  MachineInstr *getVRegDef(Register Reg) const;

  /// getUniqueVRegDef - Return the unique machine instr that defines the
  /// specified virtual register or null if none is found.  If there are
  /// multiple definitions or no definition, return null.
  MachineInstr *getUniqueVRegDef(Register Reg) const;

  /// clearKillFlags - Iterate over all the uses of the given register and
  /// clear the kill flag from the MachineOperand. This function is used by
  /// optimization passes which extend register lifetimes and need only
  /// preserve conservative kill flag information.
  void clearKillFlags(Register Reg) const;

  void dumpUses(Register RegNo) const;

  /// Returns true if PhysReg is unallocatable and constant throughout the
  /// function. Writing to a constant register has no effect.
  bool isConstantPhysReg(MCRegister PhysReg) const;

  /// Get an iterator over the pressure sets affected by the given physical or
  /// virtual register. If RegUnit is physical, it must be a register unit (from
  /// MCRegUnitIterator).
  PSetIterator getPressureSets(Register RegUnit) const;

  //===--------------------------------------------------------------------===//
  // Virtual Register Info
  //===--------------------------------------------------------------------===//

  /// Return the register class of the specified virtual register.
  /// This shouldn't be used directly unless \p Reg has a register class.
  /// \see getRegClassOrNull when this might happen.
  const TargetRegisterClass *getRegClass(Register Reg) const {
    assert(isa<const TargetRegisterClass *>(VRegInfo[Reg.id()].first) &&
           "Register class not set, wrong accessor");
    return cast<const TargetRegisterClass *>(VRegInfo[Reg.id()].first);
  }

  /// Return the register class of \p Reg, or null if Reg has not been assigned
  /// a register class yet.
  ///
  /// \note A null register class can only happen when these two
  /// conditions are met:
  /// 1. Generic virtual registers are created.
  /// 2. The machine function has not completely been through the
  ///    instruction selection process.
  /// None of this condition is possible without GlobalISel for now.
  /// In other words, if GlobalISel is not used or if the query happens after
  /// the select pass, using getRegClass is safe.
  const TargetRegisterClass *getRegClassOrNull(Register Reg) const {
    const RegClassOrRegBank &Val = VRegInfo[Reg].first;
    return dyn_cast_if_present<const TargetRegisterClass *>(Val);
  }

  /// Return the register bank of \p Reg, or null if Reg has not been assigned
  /// a register bank or has been assigned a register class.
  /// \note It is possible to get the register bank from the register class via
  /// RegisterBankInfo::getRegBankFromRegClass.
  const RegisterBank *getRegBankOrNull(Register Reg) const {
    const RegClassOrRegBank &Val = VRegInfo[Reg].first;
    return dyn_cast_if_present<const RegisterBank *>(Val);
  }

  /// Return the register bank or register class of \p Reg.
  /// \note Before the register bank gets assigned (i.e., before the
  /// RegBankSelect pass) \p Reg may not have either.
  const RegClassOrRegBank &getRegClassOrRegBank(Register Reg) const {
    return VRegInfo[Reg].first;
  }

  /// setRegClass - Set the register class of the specified virtual register.
  void setRegClass(Register Reg, const TargetRegisterClass *RC);

  /// Set the register bank to \p RegBank for \p Reg.
  void setRegBank(Register Reg, const RegisterBank &RegBank);

  void setRegClassOrRegBank(Register Reg,
                            const RegClassOrRegBank &RCOrRB){
    VRegInfo[Reg].first = RCOrRB;
  }

  /// constrainRegClass - Constrain the register class of the specified virtual
  /// register to be a common subclass of RC and the current register class,
  /// but only if the new class has at least MinNumRegs registers.  Return the
  /// new register class, or NULL if no such class exists.
  /// This should only be used when the constraint is known to be trivial, like
  /// GR32 -> GR32_NOSP. Beware of increasing register pressure.
  ///
  /// \note Assumes that the register has a register class assigned.
  /// Use RegisterBankInfo::constrainGenericRegister in GlobalISel's
  /// InstructionSelect pass and constrainRegAttrs in every other pass,
  /// including non-select passes of GlobalISel, instead.
  const TargetRegisterClass *constrainRegClass(Register Reg,
                                               const TargetRegisterClass *RC,
                                               unsigned MinNumRegs = 0);

  /// Constrain the register class or the register bank of the virtual register
  /// \p Reg (and low-level type) to be a common subclass or a common bank of
  /// both registers provided respectively (and a common low-level type). Do
  /// nothing if any of the attributes (classes, banks, or low-level types) of
  /// the registers are deemed incompatible, or if the resulting register will
  /// have a class smaller than before and of size less than \p MinNumRegs.
  /// Return true if such register attributes exist, false otherwise.
  ///
  /// \note Use this method instead of constrainRegClass and
  /// RegisterBankInfo::constrainGenericRegister everywhere but SelectionDAG
  /// ISel / FastISel and GlobalISel's InstructionSelect pass respectively.
  bool constrainRegAttrs(Register Reg, Register ConstrainingReg,
                         unsigned MinNumRegs = 0);

  /// recomputeRegClass - Try to find a legal super-class of Reg's register
  /// class that still satisfies the constraints from the instructions using
  /// Reg.  Returns true if Reg was upgraded.
  ///
  /// This method can be used after constraints have been removed from a
  /// virtual register, for example after removing instructions or splitting
  /// the live range.
  bool recomputeRegClass(Register Reg);

  /// createVirtualRegister - Create and return a new virtual register in the
  /// function with the specified register class.
  Register createVirtualRegister(const TargetRegisterClass *RegClass,
                                 StringRef Name = "");

  /// Create and return a new virtual register in the function with the same
  /// attributes as the given register.
  Register cloneVirtualRegister(Register VReg, StringRef Name = "");

  /// Get the low-level type of \p Reg or LLT{} if Reg is not a generic
  /// (target independent) virtual register.
  LLT getType(Register Reg) const {
    if (Reg.isVirtual() && VRegToType.inBounds(Reg))
      return VRegToType[Reg];
    return LLT{};
  }

  /// Set the low-level type of \p VReg to \p Ty.
  void setType(Register VReg, LLT Ty);

  /// Create and return a new generic virtual register with low-level
  /// type \p Ty.
  Register createGenericVirtualRegister(LLT Ty, StringRef Name = "");

  /// Remove all types associated to virtual registers (after instruction
  /// selection and constraining of all generic virtual registers).
  void clearVirtRegTypes();

  /// Creates a new virtual register that has no register class, register bank
  /// or size assigned yet. This is only allowed to be used
  /// temporarily while constructing machine instructions. Most operations are
  /// undefined on an incomplete register until one of setRegClass(),
  /// setRegBank() or setSize() has been called on it.
  Register createIncompleteVirtualRegister(StringRef Name = "");

  /// getNumVirtRegs - Return the number of virtual registers created.
  unsigned getNumVirtRegs() const { return VRegInfo.size(); }

  /// clearVirtRegs - Remove all virtual registers (after physreg assignment).
  void clearVirtRegs();

  /// setRegAllocationHint - Specify a register allocation hint for the
  /// specified virtual register. This is typically used by target, and in case
  /// of an earlier hint it will be overwritten.
  void setRegAllocationHint(Register VReg, unsigned Type, Register PrefReg) {
    assert(VReg.isVirtual());
    RegAllocHints[VReg].first  = Type;
    RegAllocHints[VReg].second.clear();
    RegAllocHints[VReg].second.push_back(PrefReg);
  }

  /// addRegAllocationHint - Add a register allocation hint to the hints
  /// vector for VReg.
  void addRegAllocationHint(Register VReg, Register PrefReg) {
    assert(VReg.isVirtual());
    RegAllocHints[VReg].second.push_back(PrefReg);
  }

  /// Specify the preferred (target independent) register allocation hint for
  /// the specified virtual register.
  void setSimpleHint(Register VReg, Register PrefReg) {
    setRegAllocationHint(VReg, /*Type=*/0, PrefReg);
  }

  void clearSimpleHint(Register VReg) {
    assert (!RegAllocHints[VReg].first &&
            "Expected to clear a non-target hint!");
    RegAllocHints[VReg].second.clear();
  }

  /// getRegAllocationHint - Return the register allocation hint for the
  /// specified virtual register. If there are many hints, this returns the
  /// one with the greatest weight.
  std::pair<unsigned, Register> getRegAllocationHint(Register VReg) const {
    assert(VReg.isVirtual());
    Register BestHint = (RegAllocHints[VReg.id()].second.size() ?
                         RegAllocHints[VReg.id()].second[0] : Register());
    return {RegAllocHints[VReg.id()].first, BestHint};
  }

  /// getSimpleHint - same as getRegAllocationHint except it will only return
  /// a target independent hint.
  Register getSimpleHint(Register VReg) const {
    assert(VReg.isVirtual());
    std::pair<unsigned, Register> Hint = getRegAllocationHint(VReg);
    return Hint.first ? Register() : Hint.second;
  }

  /// getRegAllocationHints - Return a reference to the vector of all
  /// register allocation hints for VReg.
  const std::pair<unsigned, SmallVector<Register, 4>> &
  getRegAllocationHints(Register VReg) const {
    assert(VReg.isVirtual());
    return RegAllocHints[VReg];
  }

  /// markUsesInDebugValueAsUndef - Mark every DBG_VALUE referencing the
  /// specified register as undefined which causes the DBG_VALUE to be
  /// deleted during LiveDebugVariables analysis.
  void markUsesInDebugValueAsUndef(Register Reg) const;

  /// updateDbgUsersToReg - Update a collection of debug instructions
  /// to refer to the designated register.
  void updateDbgUsersToReg(MCRegister OldReg, MCRegister NewReg,
                           ArrayRef<MachineInstr *> Users) const {
    // If this operand is a register, check whether it overlaps with OldReg.
    // If it does, replace with NewReg.
    auto UpdateOp = [this, &NewReg, &OldReg](MachineOperand &Op) {
      if (Op.isReg() &&
          getTargetRegisterInfo()->regsOverlap(Op.getReg(), OldReg))
        Op.setReg(NewReg);
    };

    // Iterate through (possibly several) operands to DBG_VALUEs and update
    // each. For DBG_PHIs, only one operand will be present.
    for (MachineInstr *MI : Users) {
      if (MI->isDebugValue()) {
        for (auto &Op : MI->debug_operands())
          UpdateOp(Op);
        assert(MI->hasDebugOperandForReg(NewReg) &&
               "Expected debug value to have some overlap with OldReg");
      } else if (MI->isDebugPHI()) {
        UpdateOp(MI->getOperand(0));
      } else {
        llvm_unreachable("Non-DBG_VALUE, Non-DBG_PHI debug instr updated");
      }
    }
  }

  /// Return true if the specified register is modified in this function.
  /// This checks that no defining machine operands exist for the register or
  /// any of its aliases. Definitions found on functions marked noreturn are
  /// ignored, to consider them pass 'true' for optional parameter
  /// SkipNoReturnDef. The register is also considered modified when it is set
  /// in the UsedPhysRegMask.
  bool isPhysRegModified(MCRegister PhysReg, bool SkipNoReturnDef = false) const;

  /// Return true if the specified register is modified or read in this
  /// function. This checks that no machine operands exist for the register or
  /// any of its aliases. If SkipRegMaskTest is false, the register is
  /// considered used when it is set in the UsedPhysRegMask.
  bool isPhysRegUsed(MCRegister PhysReg, bool SkipRegMaskTest = false) const;

  /// addPhysRegsUsedFromRegMask - Mark any registers not in RegMask as used.
  /// This corresponds to the bit mask attached to register mask operands.
  void addPhysRegsUsedFromRegMask(const uint32_t *RegMask) {
    UsedPhysRegMask.setBitsNotInMask(RegMask);
  }

  const BitVector &getUsedPhysRegsMask() const { return UsedPhysRegMask; }

  //===--------------------------------------------------------------------===//
  // Reserved Register Info
  //===--------------------------------------------------------------------===//
  //
  // The set of reserved registers must be invariant during register
  // allocation.  For example, the target cannot suddenly decide it needs a
  // frame pointer when the register allocator has already used the frame
  // pointer register for something else.
  //
  // These methods can be used by target hooks like hasFP() to avoid changing
  // the reserved register set during register allocation.

  /// freezeReservedRegs - Called by the register allocator to freeze the set
  /// of reserved registers before allocation begins.
  void freezeReservedRegs(const MachineFunction&);

  /// reserveReg -- Mark a register as reserved so checks like isAllocatable 
  /// will not suggest using it. This should not be used during the middle
  /// of a function walk, or when liveness info is available.
  void reserveReg(MCRegister PhysReg, const TargetRegisterInfo *TRI) {
    assert(reservedRegsFrozen() &&
           "Reserved registers haven't been frozen yet. ");
    MCRegAliasIterator R(PhysReg, TRI, true);

    for (; R.isValid(); ++R)
      ReservedRegs.set(*R);
  }

  /// reservedRegsFrozen - Returns true after freezeReservedRegs() was called
  /// to ensure the set of reserved registers stays constant.
  bool reservedRegsFrozen() const {
    return !ReservedRegs.empty();
  }

  /// canReserveReg - Returns true if PhysReg can be used as a reserved
  /// register.  Any register can be reserved before freezeReservedRegs() is
  /// called.
  bool canReserveReg(MCRegister PhysReg) const {
    return !reservedRegsFrozen() || ReservedRegs.test(PhysReg);
  }

  /// getReservedRegs - Returns a reference to the frozen set of reserved
  /// registers. This method should always be preferred to calling
  /// TRI::getReservedRegs() when possible.
  const BitVector &getReservedRegs() const {
    assert(reservedRegsFrozen() &&
           "Reserved registers haven't been frozen yet. "
           "Use TRI::getReservedRegs().");
    return ReservedRegs;
  }

  /// isReserved - Returns true when PhysReg is a reserved register.
  ///
  /// Reserved registers may belong to an allocatable register class, but the
  /// target has explicitly requested that they are not used.
  bool isReserved(MCRegister PhysReg) const {
    return getReservedRegs().test(PhysReg.id());
  }

  /// Returns true when the given register unit is considered reserved.
  ///
  /// Register units are considered reserved when for at least one of their
  /// root registers, the root register and all super registers are reserved.
  /// This currently iterates the register hierarchy and may be slower than
  /// expected.
  bool isReservedRegUnit(unsigned Unit) const;

  /// isAllocatable - Returns true when PhysReg belongs to an allocatable
  /// register class and it hasn't been reserved.
  ///
  /// Allocatable registers may show up in the allocation order of some virtual
  /// register, so a register allocator needs to track its liveness and
  /// availability.
  bool isAllocatable(MCRegister PhysReg) const {
    return getTargetRegisterInfo()->isInAllocatableClass(PhysReg) &&
      !isReserved(PhysReg);
  }

  //===--------------------------------------------------------------------===//
  // LiveIn Management
  //===--------------------------------------------------------------------===//

  /// addLiveIn - Add the specified register as a live-in.  Note that it
  /// is an error to add the same register to the same set more than once.
  void addLiveIn(MCRegister Reg, Register vreg = Register()) {
    LiveIns.push_back(std::make_pair(Reg, vreg));
  }

  // Iteration support for the live-ins set.  It's kept in sorted order
  // by register number.
  using livein_iterator =
      std::vector<std::pair<MCRegister,Register>>::const_iterator;
  livein_iterator livein_begin() const { return LiveIns.begin(); }
  livein_iterator livein_end()   const { return LiveIns.end(); }
  bool            livein_empty() const { return LiveIns.empty(); }

  ArrayRef<std::pair<MCRegister, Register>> liveins() const {
    return LiveIns;
  }

  bool isLiveIn(Register Reg) const;

  /// getLiveInPhysReg - If VReg is a live-in virtual register, return the
  /// corresponding live-in physical register.
  MCRegister getLiveInPhysReg(Register VReg) const;

  /// getLiveInVirtReg - If PReg is a live-in physical register, return the
  /// corresponding live-in virtual register.
  Register getLiveInVirtReg(MCRegister PReg) const;

  /// EmitLiveInCopies - Emit copies to initialize livein virtual registers
  /// into the given entry block.
  void EmitLiveInCopies(MachineBasicBlock *EntryMBB,
                        const TargetRegisterInfo &TRI,
                        const TargetInstrInfo &TII);

  /// Returns a mask covering all bits that can appear in lane masks of
  /// subregisters of the virtual register @p Reg.
  LaneBitmask getMaxLaneMaskForVReg(Register Reg) const;

  /// defusechain_iterator - This class provides iterator support for machine
  /// operands in the function that use or define a specific register.  If
  /// ReturnUses is true it returns uses of registers, if ReturnDefs is true it
  /// returns defs.  If neither are true then you are silly and it always
  /// returns end().  If SkipDebug is true it skips uses marked Debug
  /// when incrementing.
  template <bool ReturnUses, bool ReturnDefs, bool SkipDebug, bool ByOperand,
            bool ByInstr, bool ByBundle>
  class defusechain_iterator {
    friend class MachineRegisterInfo;

  public:
    using iterator_category = std::forward_iterator_tag;
    using value_type = MachineOperand;
    using difference_type = std::ptrdiff_t;
    using pointer = value_type *;
    using reference = value_type &;

  private:
    MachineOperand *Op = nullptr;

    explicit defusechain_iterator(MachineOperand *op) : Op(op) {
      // If the first node isn't one we're interested in, advance to one that
      // we are interested in.
      if (op) {
        if ((!ReturnUses && op->isUse()) ||
            (!ReturnDefs && op->isDef()) ||
            (SkipDebug && op->isDebug()))
          advance();
      }
    }

    void advance() {
      assert(Op && "Cannot increment end iterator!");
      Op = getNextOperandForReg(Op);

      // All defs come before the uses, so stop def_iterator early.
      if (!ReturnUses) {
        if (Op) {
          if (Op->isUse())
            Op = nullptr;
          else
            assert(!Op->isDebug() && "Can't have debug defs");
        }
      } else {
        // If this is an operand we don't care about, skip it.
        while (Op && ((!ReturnDefs && Op->isDef()) ||
                      (SkipDebug && Op->isDebug())))
          Op = getNextOperandForReg(Op);
      }
    }

  public:
    defusechain_iterator() = default;

    bool operator==(const defusechain_iterator &x) const {
      return Op == x.Op;
    }
    bool operator!=(const defusechain_iterator &x) const {
      return !operator==(x);
    }

    /// atEnd - return true if this iterator is equal to reg_end() on the value.
    bool atEnd() const { return Op == nullptr; }

    // Iterator traversal: forward iteration only
    defusechain_iterator &operator++() {          // Preincrement
      assert(Op && "Cannot increment end iterator!");
      if (ByOperand)
        advance();
      else if (ByInstr) {
        MachineInstr *P = Op->getParent();
        do {
          advance();
        } while (Op && Op->getParent() == P);
      } else if (ByBundle) {
        MachineBasicBlock::instr_iterator P =
            getBundleStart(Op->getParent()->getIterator());
        do {
          advance();
        } while (Op && getBundleStart(Op->getParent()->getIterator()) == P);
      }

      return *this;
    }
    defusechain_iterator operator++(int) {        // Postincrement
      defusechain_iterator tmp = *this; ++*this; return tmp;
    }

    /// getOperandNo - Return the operand # of this MachineOperand in its
    /// MachineInstr.
    unsigned getOperandNo() const {
      assert(Op && "Cannot dereference end iterator!");
      return Op - &Op->getParent()->getOperand(0);
    }

    // Retrieve a reference to the current operand.
    MachineOperand &operator*() const {
      assert(Op && "Cannot dereference end iterator!");
      return *Op;
    }

    MachineOperand *operator->() const {
      assert(Op && "Cannot dereference end iterator!");
      return Op;
    }
  };

  /// defusechain_iterator - This class provides iterator support for machine
  /// operands in the function that use or define a specific register.  If
  /// ReturnUses is true it returns uses of registers, if ReturnDefs is true it
  /// returns defs.  If neither are true then you are silly and it always
  /// returns end().  If SkipDebug is true it skips uses marked Debug
  /// when incrementing.
  template <bool ReturnUses, bool ReturnDefs, bool SkipDebug, bool ByOperand,
            bool ByInstr, bool ByBundle>
  class defusechain_instr_iterator {
    friend class MachineRegisterInfo;

  public:
    using iterator_category = std::forward_iterator_tag;
    using value_type = MachineInstr;
    using difference_type = std::ptrdiff_t;
    using pointer = value_type *;
    using reference = value_type &;

  private:
    MachineOperand *Op = nullptr;

    explicit defusechain_instr_iterator(MachineOperand *op) : Op(op) {
      // If the first node isn't one we're interested in, advance to one that
      // we are interested in.
      if (op) {
        if ((!ReturnUses && op->isUse()) ||
            (!ReturnDefs && op->isDef()) ||
            (SkipDebug && op->isDebug()))
          advance();
      }
    }

    void advance() {
      assert(Op && "Cannot increment end iterator!");
      Op = getNextOperandForReg(Op);

      // All defs come before the uses, so stop def_iterator early.
      if (!ReturnUses) {
        if (Op) {
          if (Op->isUse())
            Op = nullptr;
          else
            assert(!Op->isDebug() && "Can't have debug defs");
        }
      } else {
        // If this is an operand we don't care about, skip it.
        while (Op && ((!ReturnDefs && Op->isDef()) ||
                      (SkipDebug && Op->isDebug())))
          Op = getNextOperandForReg(Op);
      }
    }

  public:
    defusechain_instr_iterator() = default;

    bool operator==(const defusechain_instr_iterator &x) const {
      return Op == x.Op;
    }
    bool operator!=(const defusechain_instr_iterator &x) const {
      return !operator==(x);
    }

    /// atEnd - return true if this iterator is equal to reg_end() on the value.
    bool atEnd() const { return Op == nullptr; }

    // Iterator traversal: forward iteration only
    defusechain_instr_iterator &operator++() {          // Preincrement
      assert(Op && "Cannot increment end iterator!");
      if (ByOperand)
        advance();
      else if (ByInstr) {
        MachineInstr *P = Op->getParent();
        do {
          advance();
        } while (Op && Op->getParent() == P);
      } else if (ByBundle) {
        MachineBasicBlock::instr_iterator P =
            getBundleStart(Op->getParent()->getIterator());
        do {
          advance();
        } while (Op && getBundleStart(Op->getParent()->getIterator()) == P);
      }

      return *this;
    }
    defusechain_instr_iterator operator++(int) {        // Postincrement
      defusechain_instr_iterator tmp = *this; ++*this; return tmp;
    }

    // Retrieve a reference to the current operand.
    MachineInstr &operator*() const {
      assert(Op && "Cannot dereference end iterator!");
      if (ByBundle)
        return *getBundleStart(Op->getParent()->getIterator());
      return *Op->getParent();
    }

    MachineInstr *operator->() const { return &operator*(); }
  };
};

/// Iterate over the pressure sets affected by the given physical or virtual
/// register. If Reg is physical, it must be a register unit (from
/// MCRegUnitIterator).
class PSetIterator {
  const int *PSet = nullptr;
  unsigned Weight = 0;

public:
  PSetIterator() = default;

  PSetIterator(Register RegUnit, const MachineRegisterInfo *MRI) {
    const TargetRegisterInfo *TRI = MRI->getTargetRegisterInfo();
    if (RegUnit.isVirtual()) {
      const TargetRegisterClass *RC = MRI->getRegClass(RegUnit);
      PSet = TRI->getRegClassPressureSets(RC);
      Weight = TRI->getRegClassWeight(RC).RegWeight;
    } else {
      PSet = TRI->getRegUnitPressureSets(RegUnit);
      Weight = TRI->getRegUnitWeight(RegUnit);
    }
    if (*PSet == -1)
      PSet = nullptr;
  }

  bool isValid() const { return PSet; }

  unsigned getWeight() const { return Weight; }

  unsigned operator*() const { return *PSet; }

  void operator++() {
    assert(isValid() && "Invalid PSetIterator.");
    ++PSet;
    if (*PSet == -1)
      PSet = nullptr;
  }
};

inline PSetIterator
MachineRegisterInfo::getPressureSets(Register RegUnit) const {
  return PSetIterator(RegUnit, this);
}

} // end namespace llvm

#endif // LLVM_CODEGEN_MACHINEREGISTERINFO_H
PKiwFZ?��2�2�CodeGen/LiveInterval.hnu�[���//===- llvm/CodeGen/LiveInterval.h - Interval representation ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements the LiveRange and LiveInterval classes.  Given some
// numbering of each the machine instructions an interval [i, j) is said to be a
// live range for register v if there is no instruction with number j' >= j
// such that v is live at j' and there is no instruction with number i' < i such
// that v is live at i'. In this implementation ranges can have holes,
// i.e. a range might look like [1,20), [50,65), [1000,1001).  Each
// individual segment is represented as an instance of LiveRange::Segment,
// and the whole range is represented as an instance of LiveRange.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_LIVEINTERVAL_H
#define LLVM_CODEGEN_LIVEINTERVAL_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/IntEqClasses.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/CodeGen/Register.h"
#include "llvm/CodeGen/SlotIndexes.h"
#include "llvm/MC/LaneBitmask.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/MathExtras.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <functional>
#include <memory>
#include <set>
#include <tuple>
#include <utility>

namespace llvm {

  class CoalescerPair;
  class LiveIntervals;
  class MachineRegisterInfo;
  class raw_ostream;

  /// VNInfo - Value Number Information.
  /// This class holds information about a machine level values, including
  /// definition and use points.
  ///
  class VNInfo {
  public:
    using Allocator = BumpPtrAllocator;

    /// The ID number of this value.
    unsigned id;

    /// The index of the defining instruction.
    SlotIndex def;

    /// VNInfo constructor.
    VNInfo(unsigned i, SlotIndex d) : id(i), def(d) {}

    /// VNInfo constructor, copies values from orig, except for the value number.
    VNInfo(unsigned i, const VNInfo &orig) : id(i), def(orig.def) {}

    /// Copy from the parameter into this VNInfo.
    void copyFrom(VNInfo &src) {
      def = src.def;
    }

    /// Returns true if this value is defined by a PHI instruction (or was,
    /// PHI instructions may have been eliminated).
    /// PHI-defs begin at a block boundary, all other defs begin at register or
    /// EC slots.
    bool isPHIDef() const { return def.isBlock(); }

    /// Returns true if this value is unused.
    bool isUnused() const { return !def.isValid(); }

    /// Mark this value as unused.
    void markUnused() { def = SlotIndex(); }
  };

  /// Result of a LiveRange query. This class hides the implementation details
  /// of live ranges, and it should be used as the primary interface for
  /// examining live ranges around instructions.
  class LiveQueryResult {
    VNInfo *const EarlyVal;
    VNInfo *const LateVal;
    const SlotIndex EndPoint;
    const bool Kill;

  public:
    LiveQueryResult(VNInfo *EarlyVal, VNInfo *LateVal, SlotIndex EndPoint,
                    bool Kill)
      : EarlyVal(EarlyVal), LateVal(LateVal), EndPoint(EndPoint), Kill(Kill)
    {}

    /// Return the value that is live-in to the instruction. This is the value
    /// that will be read by the instruction's use operands. Return NULL if no
    /// value is live-in.
    VNInfo *valueIn() const {
      return EarlyVal;
    }

    /// Return true if the live-in value is killed by this instruction. This
    /// means that either the live range ends at the instruction, or it changes
    /// value.
    bool isKill() const {
      return Kill;
    }

    /// Return true if this instruction has a dead def.
    bool isDeadDef() const {
      return EndPoint.isDead();
    }

    /// Return the value leaving the instruction, if any. This can be a
    /// live-through value, or a live def. A dead def returns NULL.
    VNInfo *valueOut() const {
      return isDeadDef() ? nullptr : LateVal;
    }

    /// Returns the value alive at the end of the instruction, if any. This can
    /// be a live-through value, a live def or a dead def.
    VNInfo *valueOutOrDead() const {
      return LateVal;
    }

    /// Return the value defined by this instruction, if any. This includes
    /// dead defs, it is the value created by the instruction's def operands.
    VNInfo *valueDefined() const {
      return EarlyVal == LateVal ? nullptr : LateVal;
    }

    /// Return the end point of the last live range segment to interact with
    /// the instruction, if any.
    ///
    /// The end point is an invalid SlotIndex only if the live range doesn't
    /// intersect the instruction at all.
    ///
    /// The end point may be at or past the end of the instruction's basic
    /// block. That means the value was live out of the block.
    SlotIndex endPoint() const {
      return EndPoint;
    }
  };

  /// This class represents the liveness of a register, stack slot, etc.
  /// It manages an ordered list of Segment objects.
  /// The Segments are organized in a static single assignment form: At places
  /// where a new value is defined or different values reach a CFG join a new
  /// segment with a new value number is used.
  class LiveRange {
  public:
    /// This represents a simple continuous liveness interval for a value.
    /// The start point is inclusive, the end point exclusive. These intervals
    /// are rendered as [start,end).
    struct Segment {
      SlotIndex start;  // Start point of the interval (inclusive)
      SlotIndex end;    // End point of the interval (exclusive)
      VNInfo *valno = nullptr; // identifier for the value contained in this
                               // segment.

      Segment() = default;

      Segment(SlotIndex S, SlotIndex E, VNInfo *V)
        : start(S), end(E), valno(V) {
        assert(S < E && "Cannot create empty or backwards segment");
      }

      /// Return true if the index is covered by this segment.
      bool contains(SlotIndex I) const {
        return start <= I && I < end;
      }

      /// Return true if the given interval, [S, E), is covered by this segment.
      bool containsInterval(SlotIndex S, SlotIndex E) const {
        assert((S < E) && "Backwards interval?");
        return (start <= S && S < end) && (start < E && E <= end);
      }

      bool operator<(const Segment &Other) const {
        return std::tie(start, end) < std::tie(Other.start, Other.end);
      }
      bool operator==(const Segment &Other) const {
        return start == Other.start && end == Other.end;
      }

      bool operator!=(const Segment &Other) const {
        return !(*this == Other);
      }

      void dump() const;
    };

    using Segments = SmallVector<Segment, 2>;
    using VNInfoList = SmallVector<VNInfo *, 2>;

    Segments segments;   // the liveness segments
    VNInfoList valnos;   // value#'s

    // The segment set is used temporarily to accelerate initial computation
    // of live ranges of physical registers in computeRegUnitRange.
    // After that the set is flushed to the segment vector and deleted.
    using SegmentSet = std::set<Segment>;
    std::unique_ptr<SegmentSet> segmentSet;

    using iterator = Segments::iterator;
    using const_iterator = Segments::const_iterator;

    iterator begin() { return segments.begin(); }
    iterator end()   { return segments.end(); }

    const_iterator begin() const { return segments.begin(); }
    const_iterator end() const  { return segments.end(); }

    using vni_iterator = VNInfoList::iterator;
    using const_vni_iterator = VNInfoList::const_iterator;

    vni_iterator vni_begin() { return valnos.begin(); }
    vni_iterator vni_end()   { return valnos.end(); }

    const_vni_iterator vni_begin() const { return valnos.begin(); }
    const_vni_iterator vni_end() const   { return valnos.end(); }

    iterator_range<vni_iterator> vnis() {
      return make_range(vni_begin(), vni_end());
    }

    iterator_range<const_vni_iterator> vnis() const {
      return make_range(vni_begin(), vni_end());
    }

    /// Constructs a new LiveRange object.
    LiveRange(bool UseSegmentSet = false)
        : segmentSet(UseSegmentSet ? std::make_unique<SegmentSet>()
                                   : nullptr) {}

    /// Constructs a new LiveRange object by copying segments and valnos from
    /// another LiveRange.
    LiveRange(const LiveRange &Other, BumpPtrAllocator &Allocator) {
      assert(Other.segmentSet == nullptr &&
             "Copying of LiveRanges with active SegmentSets is not supported");
      assign(Other, Allocator);
    }

    /// Copies values numbers and live segments from \p Other into this range.
    void assign(const LiveRange &Other, BumpPtrAllocator &Allocator) {
      if (this == &Other)
        return;

      assert(Other.segmentSet == nullptr &&
             "Copying of LiveRanges with active SegmentSets is not supported");
      // Duplicate valnos.
      for (const VNInfo *VNI : Other.valnos)
        createValueCopy(VNI, Allocator);
      // Now we can copy segments and remap their valnos.
      for (const Segment &S : Other.segments)
        segments.push_back(Segment(S.start, S.end, valnos[S.valno->id]));
    }

    /// advanceTo - Advance the specified iterator to point to the Segment
    /// containing the specified position, or end() if the position is past the
    /// end of the range.  If no Segment contains this position, but the
    /// position is in a hole, this method returns an iterator pointing to the
    /// Segment immediately after the hole.
    iterator advanceTo(iterator I, SlotIndex Pos) {
      assert(I != end());
      if (Pos >= endIndex())
        return end();
      while (I->end <= Pos) ++I;
      return I;
    }

    const_iterator advanceTo(const_iterator I, SlotIndex Pos) const {
      assert(I != end());
      if (Pos >= endIndex())
        return end();
      while (I->end <= Pos) ++I;
      return I;
    }

    /// find - Return an iterator pointing to the first segment that ends after
    /// Pos, or end(). This is the same as advanceTo(begin(), Pos), but faster
    /// when searching large ranges.
    ///
    /// If Pos is contained in a Segment, that segment is returned.
    /// If Pos is in a hole, the following Segment is returned.
    /// If Pos is beyond endIndex, end() is returned.
    iterator find(SlotIndex Pos);

    const_iterator find(SlotIndex Pos) const {
      return const_cast<LiveRange*>(this)->find(Pos);
    }

    void clear() {
      valnos.clear();
      segments.clear();
    }

    size_t size() const {
      return segments.size();
    }

    bool hasAtLeastOneValue() const { return !valnos.empty(); }

    bool containsOneValue() const { return valnos.size() == 1; }

    unsigned getNumValNums() const { return (unsigned)valnos.size(); }

    /// getValNumInfo - Returns pointer to the specified val#.
    ///
    inline VNInfo *getValNumInfo(unsigned ValNo) {
      return valnos[ValNo];
    }
    inline const VNInfo *getValNumInfo(unsigned ValNo) const {
      return valnos[ValNo];
    }

    /// containsValue - Returns true if VNI belongs to this range.
    bool containsValue(const VNInfo *VNI) const {
      return VNI && VNI->id < getNumValNums() && VNI == getValNumInfo(VNI->id);
    }

    /// getNextValue - Create a new value number and return it.  MIIdx specifies
    /// the instruction that defines the value number.
    VNInfo *getNextValue(SlotIndex def, VNInfo::Allocator &VNInfoAllocator) {
      VNInfo *VNI =
        new (VNInfoAllocator) VNInfo((unsigned)valnos.size(), def);
      valnos.push_back(VNI);
      return VNI;
    }

    /// createDeadDef - Make sure the range has a value defined at Def.
    /// If one already exists, return it. Otherwise allocate a new value and
    /// add liveness for a dead def.
    VNInfo *createDeadDef(SlotIndex Def, VNInfo::Allocator &VNIAlloc);

    /// Create a def of value @p VNI. Return @p VNI. If there already exists
    /// a definition at VNI->def, the value defined there must be @p VNI.
    VNInfo *createDeadDef(VNInfo *VNI);

    /// Create a copy of the given value. The new value will be identical except
    /// for the Value number.
    VNInfo *createValueCopy(const VNInfo *orig,
                            VNInfo::Allocator &VNInfoAllocator) {
      VNInfo *VNI =
        new (VNInfoAllocator) VNInfo((unsigned)valnos.size(), *orig);
      valnos.push_back(VNI);
      return VNI;
    }

    /// RenumberValues - Renumber all values in order of appearance and remove
    /// unused values.
    void RenumberValues();

    /// MergeValueNumberInto - This method is called when two value numbers
    /// are found to be equivalent.  This eliminates V1, replacing all
    /// segments with the V1 value number with the V2 value number.  This can
    /// cause merging of V1/V2 values numbers and compaction of the value space.
    VNInfo* MergeValueNumberInto(VNInfo *V1, VNInfo *V2);

    /// Merge all of the live segments of a specific val# in RHS into this live
    /// range as the specified value number. The segments in RHS are allowed
    /// to overlap with segments in the current range, it will replace the
    /// value numbers of the overlaped live segments with the specified value
    /// number.
    void MergeSegmentsInAsValue(const LiveRange &RHS, VNInfo *LHSValNo);

    /// MergeValueInAsValue - Merge all of the segments of a specific val#
    /// in RHS into this live range as the specified value number.
    /// The segments in RHS are allowed to overlap with segments in the
    /// current range, but only if the overlapping segments have the
    /// specified value number.
    void MergeValueInAsValue(const LiveRange &RHS,
                             const VNInfo *RHSValNo, VNInfo *LHSValNo);

    bool empty() const { return segments.empty(); }

    /// beginIndex - Return the lowest numbered slot covered.
    SlotIndex beginIndex() const {
      assert(!empty() && "Call to beginIndex() on empty range.");
      return segments.front().start;
    }

    /// endNumber - return the maximum point of the range of the whole,
    /// exclusive.
    SlotIndex endIndex() const {
      assert(!empty() && "Call to endIndex() on empty range.");
      return segments.back().end;
    }

    bool expiredAt(SlotIndex index) const {
      return index >= endIndex();
    }

    bool liveAt(SlotIndex index) const {
      const_iterator r = find(index);
      return r != end() && r->start <= index;
    }

    /// Return the segment that contains the specified index, or null if there
    /// is none.
    const Segment *getSegmentContaining(SlotIndex Idx) const {
      const_iterator I = FindSegmentContaining(Idx);
      return I == end() ? nullptr : &*I;
    }

    /// Return the live segment that contains the specified index, or null if
    /// there is none.
    Segment *getSegmentContaining(SlotIndex Idx) {
      iterator I = FindSegmentContaining(Idx);
      return I == end() ? nullptr : &*I;
    }

    /// getVNInfoAt - Return the VNInfo that is live at Idx, or NULL.
    VNInfo *getVNInfoAt(SlotIndex Idx) const {
      const_iterator I = FindSegmentContaining(Idx);
      return I == end() ? nullptr : I->valno;
    }

    /// getVNInfoBefore - Return the VNInfo that is live up to but not
    /// necessarilly including Idx, or NULL. Use this to find the reaching def
    /// used by an instruction at this SlotIndex position.
    VNInfo *getVNInfoBefore(SlotIndex Idx) const {
      const_iterator I = FindSegmentContaining(Idx.getPrevSlot());
      return I == end() ? nullptr : I->valno;
    }

    /// Return an iterator to the segment that contains the specified index, or
    /// end() if there is none.
    iterator FindSegmentContaining(SlotIndex Idx) {
      iterator I = find(Idx);
      return I != end() && I->start <= Idx ? I : end();
    }

    const_iterator FindSegmentContaining(SlotIndex Idx) const {
      const_iterator I = find(Idx);
      return I != end() && I->start <= Idx ? I : end();
    }

    /// overlaps - Return true if the intersection of the two live ranges is
    /// not empty.
    bool overlaps(const LiveRange &other) const {
      if (other.empty())
        return false;
      return overlapsFrom(other, other.begin());
    }

    /// overlaps - Return true if the two ranges have overlapping segments
    /// that are not coalescable according to CP.
    ///
    /// Overlapping segments where one range is defined by a coalescable
    /// copy are allowed.
    bool overlaps(const LiveRange &Other, const CoalescerPair &CP,
                  const SlotIndexes&) const;

    /// overlaps - Return true if the live range overlaps an interval specified
    /// by [Start, End).
    bool overlaps(SlotIndex Start, SlotIndex End) const;

    /// overlapsFrom - Return true if the intersection of the two live ranges
    /// is not empty.  The specified iterator is a hint that we can begin
    /// scanning the Other range starting at I.
    bool overlapsFrom(const LiveRange &Other, const_iterator StartPos) const;

    /// Returns true if all segments of the @p Other live range are completely
    /// covered by this live range.
    /// Adjacent live ranges do not affect the covering:the liverange
    /// [1,5](5,10] covers (3,7].
    bool covers(const LiveRange &Other) const;

    /// Add the specified Segment to this range, merging segments as
    /// appropriate.  This returns an iterator to the inserted segment (which
    /// may have grown since it was inserted).
    iterator addSegment(Segment S);

    /// Attempt to extend a value defined after @p StartIdx to include @p Use.
    /// Both @p StartIdx and @p Use should be in the same basic block. In case
    /// of subranges, an extension could be prevented by an explicit "undef"
    /// caused by a <def,read-undef> on a non-overlapping lane. The list of
    /// location of such "undefs" should be provided in @p Undefs.
    /// The return value is a pair: the first element is VNInfo of the value
    /// that was extended (possibly nullptr), the second is a boolean value
    /// indicating whether an "undef" was encountered.
    /// If this range is live before @p Use in the basic block that starts at
    /// @p StartIdx, and there is no intervening "undef", extend it to be live
    /// up to @p Use, and return the pair {value, false}. If there is no
    /// segment before @p Use and there is no "undef" between @p StartIdx and
    /// @p Use, return {nullptr, false}. If there is an "undef" before @p Use,
    /// return {nullptr, true}.
    std::pair<VNInfo*,bool> extendInBlock(ArrayRef<SlotIndex> Undefs,
        SlotIndex StartIdx, SlotIndex Kill);

    /// Simplified version of the above "extendInBlock", which assumes that
    /// no register lanes are undefined by <def,read-undef> operands.
    /// If this range is live before @p Use in the basic block that starts
    /// at @p StartIdx, extend it to be live up to @p Use, and return the
    /// value. If there is no segment before @p Use, return nullptr.
    VNInfo *extendInBlock(SlotIndex StartIdx, SlotIndex Kill);

    /// join - Join two live ranges (this, and other) together.  This applies
    /// mappings to the value numbers in the LHS/RHS ranges as specified.  If
    /// the ranges are not joinable, this aborts.
    void join(LiveRange &Other,
              const int *ValNoAssignments,
              const int *RHSValNoAssignments,
              SmallVectorImpl<VNInfo *> &NewVNInfo);

    /// True iff this segment is a single segment that lies between the
    /// specified boundaries, exclusively. Vregs live across a backedge are not
    /// considered local. The boundaries are expected to lie within an extended
    /// basic block, so vregs that are not live out should contain no holes.
    bool isLocal(SlotIndex Start, SlotIndex End) const {
      return beginIndex() > Start.getBaseIndex() &&
        endIndex() < End.getBoundaryIndex();
    }

    /// Remove the specified segment from this range.  Note that the segment
    /// must be a single Segment in its entirety.
    void removeSegment(SlotIndex Start, SlotIndex End,
                       bool RemoveDeadValNo = false);

    void removeSegment(Segment S, bool RemoveDeadValNo = false) {
      removeSegment(S.start, S.end, RemoveDeadValNo);
    }

    /// Remove segment pointed to by iterator @p I from this range.
    iterator removeSegment(iterator I, bool RemoveDeadValNo = false);

    /// Mark \p ValNo for deletion if no segments in this range use it.
    void removeValNoIfDead(VNInfo *ValNo);

    /// Query Liveness at Idx.
    /// The sub-instruction slot of Idx doesn't matter, only the instruction
    /// it refers to is considered.
    LiveQueryResult Query(SlotIndex Idx) const {
      // Find the segment that enters the instruction.
      const_iterator I = find(Idx.getBaseIndex());
      const_iterator E = end();
      if (I == E)
        return LiveQueryResult(nullptr, nullptr, SlotIndex(), false);

      // Is this an instruction live-in segment?
      // If Idx is the start index of a basic block, include live-in segments
      // that start at Idx.getBaseIndex().
      VNInfo *EarlyVal = nullptr;
      VNInfo *LateVal  = nullptr;
      SlotIndex EndPoint;
      bool Kill = false;
      if (I->start <= Idx.getBaseIndex()) {
        EarlyVal = I->valno;
        EndPoint = I->end;
        // Move to the potentially live-out segment.
        if (SlotIndex::isSameInstr(Idx, I->end)) {
          Kill = true;
          if (++I == E)
            return LiveQueryResult(EarlyVal, LateVal, EndPoint, Kill);
        }
        // Special case: A PHIDef value can have its def in the middle of a
        // segment if the value happens to be live out of the layout
        // predecessor.
        // Such a value is not live-in.
        if (EarlyVal->def == Idx.getBaseIndex())
          EarlyVal = nullptr;
      }
      // I now points to the segment that may be live-through, or defined by
      // this instr. Ignore segments starting after the current instr.
      if (!SlotIndex::isEarlierInstr(Idx, I->start)) {
        LateVal = I->valno;
        EndPoint = I->end;
      }
      return LiveQueryResult(EarlyVal, LateVal, EndPoint, Kill);
    }

    /// removeValNo - Remove all the segments defined by the specified value#.
    /// Also remove the value# from value# list.
    void removeValNo(VNInfo *ValNo);

    /// Returns true if the live range is zero length, i.e. no live segments
    /// span instructions. It doesn't pay to spill such a range.
    bool isZeroLength(SlotIndexes *Indexes) const {
      for (const Segment &S : segments)
        if (Indexes->getNextNonNullIndex(S.start).getBaseIndex() <
            S.end.getBaseIndex())
          return false;
      return true;
    }

    // Returns true if any segment in the live range contains any of the
    // provided slot indexes.  Slots which occur in holes between
    // segments will not cause the function to return true.
    bool isLiveAtIndexes(ArrayRef<SlotIndex> Slots) const;

    bool operator<(const LiveRange& other) const {
      const SlotIndex &thisIndex = beginIndex();
      const SlotIndex &otherIndex = other.beginIndex();
      return thisIndex < otherIndex;
    }

    /// Returns true if there is an explicit "undef" between @p Begin
    /// @p End.
    bool isUndefIn(ArrayRef<SlotIndex> Undefs, SlotIndex Begin,
                   SlotIndex End) const {
      return llvm::any_of(Undefs, [Begin, End](SlotIndex Idx) -> bool {
        return Begin <= Idx && Idx < End;
      });
    }

    /// Flush segment set into the regular segment vector.
    /// The method is to be called after the live range
    /// has been created, if use of the segment set was
    /// activated in the constructor of the live range.
    void flushSegmentSet();

    /// Stores indexes from the input index sequence R at which this LiveRange
    /// is live to the output O iterator.
    /// R is a range of _ascending sorted_ _random_ access iterators
    /// to the input indexes. Indexes stored at O are ascending sorted so it
    /// can be used directly in the subsequent search (for example for
    /// subranges). Returns true if found at least one index.
    template <typename Range, typename OutputIt>
    bool findIndexesLiveAt(Range &&R, OutputIt O) const {
      assert(llvm::is_sorted(R));
      auto Idx = R.begin(), EndIdx = R.end();
      auto Seg = segments.begin(), EndSeg = segments.end();
      bool Found = false;
      while (Idx != EndIdx && Seg != EndSeg) {
        // if the Seg is lower find first segment that is above Idx using binary
        // search
        if (Seg->end <= *Idx) {
          Seg =
              std::upper_bound(++Seg, EndSeg, *Idx, [=](auto V, const auto &S) {
                return V < S.end;
              });
          if (Seg == EndSeg)
            break;
        }
        auto NotLessStart = std::lower_bound(Idx, EndIdx, Seg->start);
        if (NotLessStart == EndIdx)
          break;
        auto NotLessEnd = std::lower_bound(NotLessStart, EndIdx, Seg->end);
        if (NotLessEnd != NotLessStart) {
          Found = true;
          O = std::copy(NotLessStart, NotLessEnd, O);
        }
        Idx = NotLessEnd;
        ++Seg;
      }
      return Found;
    }

    void print(raw_ostream &OS) const;
    void dump() const;

    /// Walk the range and assert if any invariants fail to hold.
    ///
    /// Note that this is a no-op when asserts are disabled.
#ifdef NDEBUG
    void verify() const {}
#else
    void verify() const;
#endif

  protected:
    /// Append a segment to the list of segments.
    void append(const LiveRange::Segment S);

  private:
    friend class LiveRangeUpdater;
    void addSegmentToSet(Segment S);
    void markValNoForDeletion(VNInfo *V);
  };

  inline raw_ostream &operator<<(raw_ostream &OS, const LiveRange &LR) {
    LR.print(OS);
    return OS;
  }

  /// LiveInterval - This class represents the liveness of a register,
  /// or stack slot.
  class LiveInterval : public LiveRange {
  public:
    using super = LiveRange;

    /// A live range for subregisters. The LaneMask specifies which parts of the
    /// super register are covered by the interval.
    /// (@sa TargetRegisterInfo::getSubRegIndexLaneMask()).
    class SubRange : public LiveRange {
    public:
      SubRange *Next = nullptr;
      LaneBitmask LaneMask;

      /// Constructs a new SubRange object.
      SubRange(LaneBitmask LaneMask) : LaneMask(LaneMask) {}

      /// Constructs a new SubRange object by copying liveness from @p Other.
      SubRange(LaneBitmask LaneMask, const LiveRange &Other,
               BumpPtrAllocator &Allocator)
        : LiveRange(Other, Allocator), LaneMask(LaneMask) {}

      void print(raw_ostream &OS) const;
      void dump() const;
    };

  private:
    SubRange *SubRanges = nullptr; ///< Single linked list of subregister live
                                   /// ranges.
    const Register Reg; // the register or stack slot of this interval.
    float Weight = 0.0; // weight of this interval

  public:
    Register reg() const { return Reg; }
    float weight() const { return Weight; }
    void incrementWeight(float Inc) { Weight += Inc; }
    void setWeight(float Value) { Weight = Value; }

    LiveInterval(unsigned Reg, float Weight) : Reg(Reg), Weight(Weight) {}

    ~LiveInterval() {
      clearSubRanges();
    }

    template<typename T>
    class SingleLinkedListIterator {
      T *P;

    public:
      SingleLinkedListIterator(T *P) : P(P) {}

      SingleLinkedListIterator<T> &operator++() {
        P = P->Next;
        return *this;
      }
      SingleLinkedListIterator<T> operator++(int) {
        SingleLinkedListIterator res = *this;
        ++*this;
        return res;
      }
      bool operator!=(const SingleLinkedListIterator<T> &Other) const {
        return P != Other.operator->();
      }
      bool operator==(const SingleLinkedListIterator<T> &Other) const {
        return P == Other.operator->();
      }
      T &operator*() const {
        return *P;
      }
      T *operator->() const {
        return P;
      }
    };

    using subrange_iterator = SingleLinkedListIterator<SubRange>;
    using const_subrange_iterator = SingleLinkedListIterator<const SubRange>;

    subrange_iterator subrange_begin() {
      return subrange_iterator(SubRanges);
    }
    subrange_iterator subrange_end() {
      return subrange_iterator(nullptr);
    }

    const_subrange_iterator subrange_begin() const {
      return const_subrange_iterator(SubRanges);
    }
    const_subrange_iterator subrange_end() const {
      return const_subrange_iterator(nullptr);
    }

    iterator_range<subrange_iterator> subranges() {
      return make_range(subrange_begin(), subrange_end());
    }

    iterator_range<const_subrange_iterator> subranges() const {
      return make_range(subrange_begin(), subrange_end());
    }

    /// Creates a new empty subregister live range. The range is added at the
    /// beginning of the subrange list; subrange iterators stay valid.
    SubRange *createSubRange(BumpPtrAllocator &Allocator,
                             LaneBitmask LaneMask) {
      SubRange *Range = new (Allocator) SubRange(LaneMask);
      appendSubRange(Range);
      return Range;
    }

    /// Like createSubRange() but the new range is filled with a copy of the
    /// liveness information in @p CopyFrom.
    SubRange *createSubRangeFrom(BumpPtrAllocator &Allocator,
                                 LaneBitmask LaneMask,
                                 const LiveRange &CopyFrom) {
      SubRange *Range = new (Allocator) SubRange(LaneMask, CopyFrom, Allocator);
      appendSubRange(Range);
      return Range;
    }

    /// Returns true if subregister liveness information is available.
    bool hasSubRanges() const {
      return SubRanges != nullptr;
    }

    /// Removes all subregister liveness information.
    void clearSubRanges();

    /// Removes all subranges without any segments (subranges without segments
    /// are not considered valid and should only exist temporarily).
    void removeEmptySubRanges();

    /// getSize - Returns the sum of sizes of all the LiveRange's.
    ///
    unsigned getSize() const;

    /// isSpillable - Can this interval be spilled?
    bool isSpillable() const { return Weight != huge_valf; }

    /// markNotSpillable - Mark interval as not spillable
    void markNotSpillable() { Weight = huge_valf; }

    /// For a given lane mask @p LaneMask, compute indexes at which the
    /// lane is marked undefined by subregister <def,read-undef> definitions.
    void computeSubRangeUndefs(SmallVectorImpl<SlotIndex> &Undefs,
                               LaneBitmask LaneMask,
                               const MachineRegisterInfo &MRI,
                               const SlotIndexes &Indexes) const;

    /// Refines the subranges to support \p LaneMask. This may only be called
    /// for LI.hasSubrange()==true. Subregister ranges are split or created
    /// until \p LaneMask can be matched exactly. \p Mod is executed on the
    /// matching subranges.
    ///
    /// Example:
    ///    Given an interval with subranges with lanemasks L0F00, L00F0 and
    ///    L000F, refining for mask L0018. Will split the L00F0 lane into
    ///    L00E0 and L0010 and the L000F lane into L0007 and L0008. The Mod
    ///    function will be applied to the L0010 and L0008 subranges.
    ///
    /// \p Indexes and \p TRI are required to clean up the VNIs that
    /// don't define the related lane masks after they get shrunk. E.g.,
    /// when L000F gets split into L0007 and L0008 maybe only a subset
    /// of the VNIs that defined L000F defines L0007.
    ///
    /// The clean up of the VNIs need to look at the actual instructions
    /// to decide what is or is not live at a definition point. If the
    /// update of the subranges occurs while the IR does not reflect these
    /// changes, \p ComposeSubRegIdx can be used to specify how the
    /// definition are going to be rewritten.
    /// E.g., let say we want to merge:
    ///     V1.sub1:<2 x s32> = COPY V2.sub3:<4 x s32>
    /// We do that by choosing a class where sub1:<2 x s32> and sub3:<4 x s32>
    /// overlap, i.e., by choosing a class where we can find "offset + 1 == 3".
    /// Put differently we align V2's sub3 with V1's sub1:
    /// V2: sub0 sub1 sub2 sub3
    /// V1: <offset>  sub0 sub1
    ///
    /// This offset will look like a composed subregidx in the the class:
    ///     V1.(composed sub2 with sub1):<4 x s32> = COPY V2.sub3:<4 x s32>
    /// =>  V1.(composed sub2 with sub1):<4 x s32> = COPY V2.sub3:<4 x s32>
    ///
    /// Now if we didn't rewrite the uses and def of V1, all the checks for V1
    /// need to account for this offset.
    /// This happens during coalescing where we update the live-ranges while
    /// still having the old IR around because updating the IR on-the-fly
    /// would actually clobber some information on how the live-ranges that
    /// are being updated look like.
    void refineSubRanges(BumpPtrAllocator &Allocator, LaneBitmask LaneMask,
                         std::function<void(LiveInterval::SubRange &)> Apply,
                         const SlotIndexes &Indexes,
                         const TargetRegisterInfo &TRI,
                         unsigned ComposeSubRegIdx = 0);

    bool operator<(const LiveInterval& other) const {
      const SlotIndex &thisIndex = beginIndex();
      const SlotIndex &otherIndex = other.beginIndex();
      return std::tie(thisIndex, Reg) < std::tie(otherIndex, other.Reg);
    }

    void print(raw_ostream &OS) const;
    void dump() const;

    /// Walks the interval and assert if any invariants fail to hold.
    ///
    /// Note that this is a no-op when asserts are disabled.
#ifdef NDEBUG
    void verify(const MachineRegisterInfo *MRI = nullptr) const {}
#else
    void verify(const MachineRegisterInfo *MRI = nullptr) const;
#endif

  private:
    /// Appends @p Range to SubRanges list.
    void appendSubRange(SubRange *Range) {
      Range->Next = SubRanges;
      SubRanges = Range;
    }

    /// Free memory held by SubRange.
    void freeSubRange(SubRange *S);
  };

  inline raw_ostream &operator<<(raw_ostream &OS,
                                 const LiveInterval::SubRange &SR) {
    SR.print(OS);
    return OS;
  }

  inline raw_ostream &operator<<(raw_ostream &OS, const LiveInterval &LI) {
    LI.print(OS);
    return OS;
  }

  raw_ostream &operator<<(raw_ostream &OS, const LiveRange::Segment &S);

  inline bool operator<(SlotIndex V, const LiveRange::Segment &S) {
    return V < S.start;
  }

  inline bool operator<(const LiveRange::Segment &S, SlotIndex V) {
    return S.start < V;
  }

  /// Helper class for performant LiveRange bulk updates.
  ///
  /// Calling LiveRange::addSegment() repeatedly can be expensive on large
  /// live ranges because segments after the insertion point may need to be
  /// shifted. The LiveRangeUpdater class can defer the shifting when adding
  /// many segments in order.
  ///
  /// The LiveRange will be in an invalid state until flush() is called.
  class LiveRangeUpdater {
    LiveRange *LR;
    SlotIndex LastStart;
    LiveRange::iterator WriteI;
    LiveRange::iterator ReadI;
    SmallVector<LiveRange::Segment, 16> Spills;
    void mergeSpills();

  public:
    /// Create a LiveRangeUpdater for adding segments to LR.
    /// LR will temporarily be in an invalid state until flush() is called.
    LiveRangeUpdater(LiveRange *lr = nullptr) : LR(lr) {}

    ~LiveRangeUpdater() { flush(); }

    /// Add a segment to LR and coalesce when possible, just like
    /// LR.addSegment(). Segments should be added in increasing start order for
    /// best performance.
    void add(LiveRange::Segment);

    void add(SlotIndex Start, SlotIndex End, VNInfo *VNI) {
      add(LiveRange::Segment(Start, End, VNI));
    }

    /// Return true if the LR is currently in an invalid state, and flush()
    /// needs to be called.
    bool isDirty() const { return LastStart.isValid(); }

    /// Flush the updater state to LR so it is valid and contains all added
    /// segments.
    void flush();

    /// Select a different destination live range.
    void setDest(LiveRange *lr) {
      if (LR != lr && isDirty())
        flush();
      LR = lr;
    }

    /// Get the current destination live range.
    LiveRange *getDest() const { return LR; }

    void dump() const;
    void print(raw_ostream&) const;
  };

  inline raw_ostream &operator<<(raw_ostream &OS, const LiveRangeUpdater &X) {
    X.print(OS);
    return OS;
  }

  /// ConnectedVNInfoEqClasses - Helper class that can divide VNInfos in a
  /// LiveInterval into equivalence clases of connected components. A
  /// LiveInterval that has multiple connected components can be broken into
  /// multiple LiveIntervals.
  ///
  /// Given a LiveInterval that may have multiple connected components, run:
  ///
  ///   unsigned numComps = ConEQ.Classify(LI);
  ///   if (numComps > 1) {
  ///     // allocate numComps-1 new LiveIntervals into LIS[1..]
  ///     ConEQ.Distribute(LIS);
  /// }

  class ConnectedVNInfoEqClasses {
    LiveIntervals &LIS;
    IntEqClasses EqClass;

  public:
    explicit ConnectedVNInfoEqClasses(LiveIntervals &lis) : LIS(lis) {}

    /// Classify the values in \p LR into connected components.
    /// Returns the number of connected components.
    unsigned Classify(const LiveRange &LR);

    /// getEqClass - Classify creates equivalence classes numbered 0..N. Return
    /// the equivalence class assigned the VNI.
    unsigned getEqClass(const VNInfo *VNI) const { return EqClass[VNI->id]; }

    /// Distribute values in \p LI into a separate LiveIntervals
    /// for each connected component. LIV must have an empty LiveInterval for
    /// each additional connected component. The first connected component is
    /// left in \p LI.
    void Distribute(LiveInterval &LI, LiveInterval *LIV[],
                    MachineRegisterInfo &MRI);
  };

} // end namespace llvm

#endif // LLVM_CODEGEN_LIVEINTERVAL_H
PKiwFZ�s��CodeGen/LoopTraversal.hnu�[���//==------ llvm/CodeGen/LoopTraversal.h - Loop Traversal -*- C++ -*---------==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file Loop Traversal logic.
///
/// This class provides the basic blocks traversal order used by passes like
/// ReachingDefAnalysis and ExecutionDomainFix.
/// It identifies basic blocks that are part of loops and should to be visited
/// twice and returns efficient traversal order for all the blocks.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_LOOPTRAVERSAL_H
#define LLVM_CODEGEN_LOOPTRAVERSAL_H

#include "llvm/ADT/SmallVector.h"

namespace llvm {

class MachineBasicBlock;
class MachineFunction;

/// This class provides the basic blocks traversal order used by passes like
/// ReachingDefAnalysis and ExecutionDomainFix.
/// It identifies basic blocks that are part of loops and should to be visited
/// twice and returns efficient traversal order for all the blocks.
///
/// We want to visit every instruction in every basic block in order to update
/// it's execution domain or collect clearance information. However, for the
/// clearance calculation, we need to know clearances from all predecessors
/// (including any backedges), therfore we need to visit some blocks twice.
/// As an example, consider the following loop.
///
///
///    PH -> A -> B (xmm<Undef> -> xmm<Def>) -> C -> D -> EXIT
///          ^                                  |
///          +----------------------------------+
///
/// The iteration order this pass will return is as follows:
/// Optimized: PH A B C A' B' C' D
///
/// The basic block order is constructed as follows:
/// Once we finish processing some block, we update the counters in MBBInfos
/// and re-process any successors that are now 'done'.
/// We call a block that is ready for its final round of processing `done`
/// (isBlockDone), e.g. when all predecessor information is known.
///
/// Note that a naive traversal order would be to do two complete passes over
/// all basic blocks/instructions, the first for recording clearances, the
/// second for updating clearance based on backedges.
/// However, for functions without backedges, or functions with a lot of
/// straight-line code, and a small loop, that would be a lot of unnecessary
/// work (since only the BBs that are part of the loop require two passes).
///
/// E.g., the naive iteration order for the above exmple is as follows:
/// Naive: PH A B C D A' B' C' D'
///
/// In the optimized approach we avoid processing D twice, because we
/// can entirely process the predecessors before getting to D.
class LoopTraversal {
private:
  struct MBBInfo {
    /// Whether we have gotten to this block in primary processing yet.
    bool PrimaryCompleted = false;

    /// The number of predecessors for which primary processing has completed
    unsigned IncomingProcessed = 0;

    /// The value of `IncomingProcessed` at the start of primary processing
    unsigned PrimaryIncoming = 0;

    /// The number of predecessors for which all processing steps are done.
    unsigned IncomingCompleted = 0;

    MBBInfo() = default;
  };
  using MBBInfoMap = SmallVector<MBBInfo, 4>;
  /// Helps keep track if we proccessed this block and all its predecessors.
  MBBInfoMap MBBInfos;

public:
  struct TraversedMBBInfo {
    /// The basic block.
    MachineBasicBlock *MBB = nullptr;

    /// True if this is the first time we process the basic block.
    bool PrimaryPass = true;

    /// True if the block that is ready for its final round of processing.
    bool IsDone = true;

    TraversedMBBInfo(MachineBasicBlock *BB = nullptr, bool Primary = true,
                     bool Done = true)
        : MBB(BB), PrimaryPass(Primary), IsDone(Done) {}
  };
  LoopTraversal() = default;

  /// Identifies basic blocks that are part of loops and should to be
  ///  visited twice and returns efficient traversal order for all the blocks.
  typedef SmallVector<TraversedMBBInfo, 4> TraversalOrder;
  TraversalOrder traverse(MachineFunction &MF);

private:
  /// Returens true if the block is ready for its final round of processing.
  bool isBlockDone(MachineBasicBlock *MBB);
};

} // namespace llvm

#endif // LLVM_CODEGEN_LOOPTRAVERSAL_H
PKiwFZhi��CodeGen/ReplaceWithVeclib.hnu�[���//===- ReplaceWithVeclib.h - Replace vector intrinsics with veclib calls --===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Replaces calls to LLVM vector intrinsics (i.e., calls to LLVM intrinsics
// with vector operands) with matching calls to functions from a vector
// library (e.g., libmvec, SVML) according to TargetLibraryInfo.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_REPLACEWITHVECLIB_H
#define LLVM_CODEGEN_REPLACEWITHVECLIB_H

#include "llvm/IR/PassManager.h"
#include "llvm/InitializePasses.h"
#include "llvm/Pass.h"
#include "llvm/PassRegistry.h"

namespace llvm {
class Function;
struct ReplaceWithVeclib : public PassInfoMixin<ReplaceWithVeclib> {
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

// Legacy pass
struct ReplaceWithVeclibLegacy : public FunctionPass {
  static char ID;
  ReplaceWithVeclibLegacy() : FunctionPass(ID) {
    initializeReplaceWithVeclibLegacyPass(*PassRegistry::getPassRegistry());
  }
  void getAnalysisUsage(AnalysisUsage &AU) const override;
  bool runOnFunction(Function &F) override;
};

} // End namespace llvm
#endif // LLVM_CODEGEN_REPLACEWITHVECLIB_H
PKiwFZ�!��	�	CodeGen/GCMetadataPrinter.hnu�[���//===- llvm/CodeGen/GCMetadataPrinter.h - Prints asm GC tables --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// The abstract base class GCMetadataPrinter supports writing GC metadata tables
// as assembly code. This is a separate class from GCStrategy in order to allow
// users of the LLVM JIT to avoid linking with the AsmWriter.
//
// Subclasses of GCMetadataPrinter must be registered using the
// GCMetadataPrinterRegistry. This is separate from the GCStrategy itself
// because these subclasses are logically plugins for the AsmWriter.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_GCMETADATAPRINTER_H
#define LLVM_CODEGEN_GCMETADATAPRINTER_H

#include "llvm/Support/Registry.h"

namespace llvm {

class AsmPrinter;
class GCMetadataPrinter;
class GCModuleInfo;
class GCStrategy;
class Module;
class StackMaps;

/// GCMetadataPrinterRegistry - The GC assembly printer registry uses all the
/// defaults from Registry.
using GCMetadataPrinterRegistry = Registry<GCMetadataPrinter>;

/// GCMetadataPrinter - Emits GC metadata as assembly code.  Instances are
/// created, managed, and owned by the AsmPrinter.
class GCMetadataPrinter {
private:
  friend class AsmPrinter;

  GCStrategy *S;

protected:
  // May only be subclassed.
  GCMetadataPrinter();

public:
  GCMetadataPrinter(const GCMetadataPrinter &) = delete;
  GCMetadataPrinter &operator=(const GCMetadataPrinter &) = delete;
  virtual ~GCMetadataPrinter();

  GCStrategy &getStrategy() { return *S; }

  /// Called before the assembly for the module is generated by
  /// the AsmPrinter (but after target specific hooks.)
  virtual void beginAssembly(Module &M, GCModuleInfo &Info, AsmPrinter &AP) {}

  /// Called after the assembly for the module is generated by
  /// the AsmPrinter (but before target specific hooks)
  virtual void finishAssembly(Module &M, GCModuleInfo &Info, AsmPrinter &AP) {}

  /// Called when the stack maps are generated. Return true if
  /// stack maps with a custom format are generated. Otherwise
  /// returns false and the default format will be used.
  virtual bool emitStackMaps(StackMaps &SM, AsmPrinter &AP) { return false; }
};

} // end namespace llvm

#endif // LLVM_CODEGEN_GCMETADATAPRINTER_H
PKiwFZ���7��CodeGen/Spiller.hnu�[���//===- llvm/CodeGen/Spiller.h - Spiller -------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_SPILLER_H
#define LLVM_CODEGEN_SPILLER_H

namespace llvm {

class LiveRangeEdit;
class MachineFunction;
class MachineFunctionPass;
class VirtRegMap;
class VirtRegAuxInfo;

/// Spiller interface.
///
/// Implementations are utility classes which insert spill or remat code on
/// demand.
class Spiller {
  virtual void anchor();

public:
  virtual ~Spiller() = 0;

  /// spill - Spill the LRE.getParent() live interval.
  virtual void spill(LiveRangeEdit &LRE) = 0;

  virtual void postOptimization() {}
};

/// Create and return a spiller that will insert spill code directly instead
/// of deferring though VirtRegMap.
Spiller *createInlineSpiller(MachineFunctionPass &Pass, MachineFunction &MF,
                             VirtRegMap &VRM, VirtRegAuxInfo &VRAI);

} // end namespace llvm

#endif // LLVM_CODEGEN_SPILLER_H
PKiwFZӇ���)�)CodeGen/MachineInstrBundle.hnu�[���//===- llvm/CodeGen/MachineInstrBundle.h - MI bundle utilities --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file provide utility functions to manipulate machine instruction
// bundles.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_MACHINEINSTRBUNDLE_H
#define LLVM_CODEGEN_MACHINEINSTRBUNDLE_H

#include "llvm/CodeGen/MachineBasicBlock.h"

namespace llvm {

/// finalizeBundle - Finalize a machine instruction bundle which includes
/// a sequence of instructions starting from FirstMI to LastMI (exclusive).
/// This routine adds a BUNDLE instruction to represent the bundle, it adds
/// IsInternalRead markers to MachineOperands which are defined inside the
/// bundle, and it copies externally visible defs and uses to the BUNDLE
/// instruction.
void finalizeBundle(MachineBasicBlock &MBB,
                    MachineBasicBlock::instr_iterator FirstMI,
                    MachineBasicBlock::instr_iterator LastMI);

/// finalizeBundle - Same functionality as the previous finalizeBundle except
/// the last instruction in the bundle is not provided as an input. This is
/// used in cases where bundles are pre-determined by marking instructions
/// with 'InsideBundle' marker. It returns the MBB instruction iterator that
/// points to the end of the bundle.
MachineBasicBlock::instr_iterator finalizeBundle(MachineBasicBlock &MBB,
                    MachineBasicBlock::instr_iterator FirstMI);

/// finalizeBundles - Finalize instruction bundles in the specified
/// MachineFunction. Return true if any bundles are finalized.
bool finalizeBundles(MachineFunction &MF);

/// Returns an iterator to the first instruction in the bundle containing \p I.
inline MachineBasicBlock::instr_iterator getBundleStart(
    MachineBasicBlock::instr_iterator I) {
  while (I->isBundledWithPred())
    --I;
  return I;
}

/// Returns an iterator to the first instruction in the bundle containing \p I.
inline MachineBasicBlock::const_instr_iterator getBundleStart(
    MachineBasicBlock::const_instr_iterator I) {
  while (I->isBundledWithPred())
    --I;
  return I;
}

/// Returns an iterator pointing beyond the bundle containing \p I.
inline MachineBasicBlock::instr_iterator getBundleEnd(
    MachineBasicBlock::instr_iterator I) {
  while (I->isBundledWithSucc())
    ++I;
  ++I;
  return I;
}

/// Returns an iterator pointing beyond the bundle containing \p I.
inline MachineBasicBlock::const_instr_iterator getBundleEnd(
    MachineBasicBlock::const_instr_iterator I) {
  while (I->isBundledWithSucc())
    ++I;
  ++I;
  return I;
}

//===----------------------------------------------------------------------===//
// MachineBundleOperand iterator
//

/// MIBundleOperandIteratorBase - Iterator that visits all operands in a bundle
/// of MachineInstrs. This class is not intended to be used directly, use one
/// of the sub-classes instead.
///
/// Intended use:
///
///   for (MIBundleOperands MIO(MI); MIO.isValid(); ++MIO) {
///     if (!MIO->isReg())
///       continue;
///     ...
///   }
///
template <typename ValueT>
class MIBundleOperandIteratorBase
    : public iterator_facade_base<MIBundleOperandIteratorBase<ValueT>,
                                  std::forward_iterator_tag, ValueT> {
  MachineBasicBlock::instr_iterator InstrI, InstrE;
  MachineInstr::mop_iterator OpI, OpE;

  // If the operands on InstrI are exhausted, advance InstrI to the next
  // bundled instruction with operands.
  void advance() {
    while (OpI == OpE) {
      // Don't advance off the basic block, or into a new bundle.
      if (++InstrI == InstrE || !InstrI->isInsideBundle()) {
        InstrI = InstrE;
        break;
      }
      OpI = InstrI->operands_begin();
      OpE = InstrI->operands_end();
    }
  }

protected:
  /// MIBundleOperandIteratorBase - Create an iterator that visits all operands
  /// on MI, or all operands on every instruction in the bundle containing MI.
  ///
  /// @param MI The instruction to examine.
  ///
  explicit MIBundleOperandIteratorBase(MachineInstr &MI) {
    InstrI = getBundleStart(MI.getIterator());
    InstrE = MI.getParent()->instr_end();
    OpI = InstrI->operands_begin();
    OpE = InstrI->operands_end();
    advance();
  }

  /// Constructor for an iterator past the last iteration: both instruction
  /// iterators point to the end of the BB and OpI == OpE.
  explicit MIBundleOperandIteratorBase(MachineBasicBlock::instr_iterator InstrE,
                                       MachineInstr::mop_iterator OpE)
      : InstrI(InstrE), InstrE(InstrE), OpI(OpE), OpE(OpE) {}

public:
  /// isValid - Returns true until all the operands have been visited.
  bool isValid() const { return OpI != OpE; }

  /// Preincrement.  Move to the next operand.
  void operator++() {
    assert(isValid() && "Cannot advance MIOperands beyond the last operand");
    ++OpI;
    advance();
  }

  ValueT &operator*() const { return *OpI; }
  ValueT *operator->() const { return &*OpI; }

  bool operator==(const MIBundleOperandIteratorBase &Arg) const {
    // Iterators are equal, if InstrI matches and either OpIs match or OpI ==
    // OpE match for both. The second condition allows us to construct an 'end'
    // iterator, without finding the last instruction in a bundle up-front.
    return InstrI == Arg.InstrI &&
           (OpI == Arg.OpI || (OpI == OpE && Arg.OpI == Arg.OpE));
  }
  /// getOperandNo - Returns the number of the current operand relative to its
  /// instruction.
  ///
  unsigned getOperandNo() const {
    return OpI - InstrI->operands_begin();
  }
};

/// MIBundleOperands - Iterate over all operands in a bundle of machine
/// instructions.
///
class MIBundleOperands : public MIBundleOperandIteratorBase<MachineOperand> {
  /// Constructor for an iterator past the last iteration.
  MIBundleOperands(MachineBasicBlock::instr_iterator InstrE,
                   MachineInstr::mop_iterator OpE)
      : MIBundleOperandIteratorBase(InstrE, OpE) {}

public:
  MIBundleOperands(MachineInstr &MI) : MIBundleOperandIteratorBase(MI) {}

  /// Returns an iterator past the last iteration.
  static MIBundleOperands end(const MachineBasicBlock &MBB) {
    return {const_cast<MachineBasicBlock &>(MBB).instr_end(),
            const_cast<MachineBasicBlock &>(MBB).instr_begin()->operands_end()};
  }
};

/// ConstMIBundleOperands - Iterate over all operands in a const bundle of
/// machine instructions.
///
class ConstMIBundleOperands
    : public MIBundleOperandIteratorBase<const MachineOperand> {

  /// Constructor for an iterator past the last iteration.
  ConstMIBundleOperands(MachineBasicBlock::instr_iterator InstrE,
                        MachineInstr::mop_iterator OpE)
      : MIBundleOperandIteratorBase(InstrE, OpE) {}

public:
  ConstMIBundleOperands(const MachineInstr &MI)
      : MIBundleOperandIteratorBase(const_cast<MachineInstr &>(MI)) {}

  /// Returns an iterator past the last iteration.
  static ConstMIBundleOperands end(const MachineBasicBlock &MBB) {
    return {const_cast<MachineBasicBlock &>(MBB).instr_end(),
            const_cast<MachineBasicBlock &>(MBB).instr_begin()->operands_end()};
  }
};

inline iterator_range<ConstMIBundleOperands>
const_mi_bundle_ops(const MachineInstr &MI) {
  return make_range(ConstMIBundleOperands(MI),
                    ConstMIBundleOperands::end(*MI.getParent()));
}

inline iterator_range<MIBundleOperands> mi_bundle_ops(MachineInstr &MI) {
  return make_range(MIBundleOperands(MI),
                    MIBundleOperands::end(*MI.getParent()));
}

/// VirtRegInfo - Information about a virtual register used by a set of
/// operands.
///
struct VirtRegInfo {
  /// Reads - One of the operands read the virtual register.  This does not
  /// include undef or internal use operands, see MO::readsReg().
  bool Reads;

  /// Writes - One of the operands writes the virtual register.
  bool Writes;

  /// Tied - Uses and defs must use the same register. This can be because of
  /// a two-address constraint, or there may be a partial redefinition of a
  /// sub-register.
  bool Tied;
};

/// AnalyzeVirtRegInBundle - Analyze how the current instruction or bundle uses
/// a virtual register.  This function should not be called after operator++(),
/// it expects a fresh iterator.
///
/// @param Reg The virtual register to analyze.
/// @param Ops When set, this vector will receive an (MI, OpNum) entry for
///            each operand referring to Reg.
/// @returns A filled-in RegInfo struct.
VirtRegInfo AnalyzeVirtRegInBundle(
    MachineInstr &MI, Register Reg,
    SmallVectorImpl<std::pair<MachineInstr *, unsigned>> *Ops = nullptr);

/// Return a pair of lane masks (reads, writes) indicating which lanes this
/// instruction uses with Reg.
std::pair<LaneBitmask, LaneBitmask>
AnalyzeVirtRegLanesInBundle(const MachineInstr &MI, Register Reg,
                            const MachineRegisterInfo &MRI,
                            const TargetRegisterInfo &TRI);

/// Information about how a physical register Reg is used by a set of
/// operands.
struct PhysRegInfo {
  /// There is a regmask operand indicating Reg is clobbered.
  /// \see MachineOperand::CreateRegMask().
  bool Clobbered;

  /// Reg or one of its aliases is defined. The definition may only cover
  /// parts of the register.
  bool Defined;
  /// Reg or a super-register is defined. The definition covers the full
  /// register.
  bool FullyDefined;

  /// Reg or one of its aliases is read. The register may only be read
  /// partially.
  bool Read;
  /// Reg or a super-register is read. The full register is read.
  bool FullyRead;

  /// Either:
  /// - Reg is FullyDefined and all defs of reg or an overlapping
  ///   register are dead, or
  /// - Reg is completely dead because "defined" by a clobber.
  bool DeadDef;

  /// Reg is Defined and all defs of reg or an overlapping register are
  /// dead.
  bool PartialDeadDef;

  /// There is a use operand of reg or a super-register with kill flag set.
  bool Killed;
};

/// AnalyzePhysRegInBundle - Analyze how the current instruction or bundle uses
/// a physical register.  This function should not be called after operator++(),
/// it expects a fresh iterator.
///
/// @param Reg The physical register to analyze.
/// @returns A filled-in PhysRegInfo struct.
PhysRegInfo AnalyzePhysRegInBundle(const MachineInstr &MI, Register Reg,
                                   const TargetRegisterInfo *TRI);

} // End llvm namespace

#endif
PKiwFZa26�����CodeGen/TargetRegisterInfo.hnu�[���//==- CodeGen/TargetRegisterInfo.h - Target Register Information -*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file describes an abstract interface used to get information about a
// target machines register file.  This information is used for a variety of
// purposed, especially register allocation.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_TARGETREGISTERINFO_H
#define LLVM_CODEGEN_TARGETREGISTERINFO_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/RegisterBank.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/MC/LaneBitmask.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/Printable.h"
#include <cassert>
#include <cstdint>

namespace llvm {

class BitVector;
class DIExpression;
class LiveRegMatrix;
class MachineFunction;
class MachineInstr;
class RegScavenger;
class VirtRegMap;
class LiveIntervals;
class LiveInterval;

class TargetRegisterClass {
public:
  using iterator = const MCPhysReg *;
  using const_iterator = const MCPhysReg *;
  using sc_iterator = const TargetRegisterClass* const *;

  // Instance variables filled by tablegen, do not use!
  const MCRegisterClass *MC;
  const uint32_t *SubClassMask;
  const uint16_t *SuperRegIndices;
  const LaneBitmask LaneMask;
  /// Classes with a higher priority value are assigned first by register
  /// allocators using a greedy heuristic. The value is in the range [0,31].
  const uint8_t AllocationPriority;

  // Change allocation priority heuristic used by greedy.
  const bool GlobalPriority;

  /// Configurable target specific flags.
  const uint8_t TSFlags;
  /// Whether the class supports two (or more) disjunct subregister indices.
  const bool HasDisjunctSubRegs;
  /// Whether a combination of subregisters can cover every register in the
  /// class. See also the CoveredBySubRegs description in Target.td.
  const bool CoveredBySubRegs;
  const sc_iterator SuperClasses;
  ArrayRef<MCPhysReg> (*OrderFunc)(const MachineFunction&);

  /// Return the register class ID number.
  unsigned getID() const { return MC->getID(); }

  /// begin/end - Return all of the registers in this class.
  ///
  iterator       begin() const { return MC->begin(); }
  iterator         end() const { return MC->end(); }

  /// Return the number of registers in this class.
  unsigned getNumRegs() const { return MC->getNumRegs(); }

  iterator_range<SmallVectorImpl<MCPhysReg>::const_iterator>
  getRegisters() const {
    return make_range(MC->begin(), MC->end());
  }

  /// Return the specified register in the class.
  MCRegister getRegister(unsigned i) const {
    return MC->getRegister(i);
  }

  /// Return true if the specified register is included in this register class.
  /// This does not include virtual registers.
  bool contains(Register Reg) const {
    /// FIXME: Historically this function has returned false when given vregs
    ///        but it should probably only receive physical registers
    if (!Reg.isPhysical())
      return false;
    return MC->contains(Reg.asMCReg());
  }

  /// Return true if both registers are in this class.
  bool contains(Register Reg1, Register Reg2) const {
    /// FIXME: Historically this function has returned false when given a vregs
    ///        but it should probably only receive physical registers
    if (!Reg1.isPhysical() || !Reg2.isPhysical())
      return false;
    return MC->contains(Reg1.asMCReg(), Reg2.asMCReg());
  }

  /// Return the cost of copying a value between two registers in this class.
  /// A negative number means the register class is very expensive
  /// to copy e.g. status flag register classes.
  int getCopyCost() const { return MC->getCopyCost(); }

  /// Return true if this register class may be used to create virtual
  /// registers.
  bool isAllocatable() const { return MC->isAllocatable(); }

  /// Return true if the specified TargetRegisterClass
  /// is a proper sub-class of this TargetRegisterClass.
  bool hasSubClass(const TargetRegisterClass *RC) const {
    return RC != this && hasSubClassEq(RC);
  }

  /// Returns true if RC is a sub-class of or equal to this class.
  bool hasSubClassEq(const TargetRegisterClass *RC) const {
    unsigned ID = RC->getID();
    return (SubClassMask[ID / 32] >> (ID % 32)) & 1;
  }

  /// Return true if the specified TargetRegisterClass is a
  /// proper super-class of this TargetRegisterClass.
  bool hasSuperClass(const TargetRegisterClass *RC) const {
    return RC->hasSubClass(this);
  }

  /// Returns true if RC is a super-class of or equal to this class.
  bool hasSuperClassEq(const TargetRegisterClass *RC) const {
    return RC->hasSubClassEq(this);
  }

  /// Returns a bit vector of subclasses, including this one.
  /// The vector is indexed by class IDs.
  ///
  /// To use it, consider the returned array as a chunk of memory that
  /// contains an array of bits of size NumRegClasses. Each 32-bit chunk
  /// contains a bitset of the ID of the subclasses in big-endian style.

  /// I.e., the representation of the memory from left to right at the
  /// bit level looks like:
  /// [31 30 ... 1 0] [ 63 62 ... 33 32] ...
  ///                     [ XXX NumRegClasses NumRegClasses - 1 ... ]
  /// Where the number represents the class ID and XXX bits that
  /// should be ignored.
  ///
  /// See the implementation of hasSubClassEq for an example of how it
  /// can be used.
  const uint32_t *getSubClassMask() const {
    return SubClassMask;
  }

  /// Returns a 0-terminated list of sub-register indices that project some
  /// super-register class into this register class. The list has an entry for
  /// each Idx such that:
  ///
  ///   There exists SuperRC where:
  ///     For all Reg in SuperRC:
  ///       this->contains(Reg:Idx)
  const uint16_t *getSuperRegIndices() const {
    return SuperRegIndices;
  }

  /// Returns a NULL-terminated list of super-classes.  The
  /// classes are ordered by ID which is also a topological ordering from large
  /// to small classes.  The list does NOT include the current class.
  sc_iterator getSuperClasses() const {
    return SuperClasses;
  }

  /// Return true if this TargetRegisterClass is a subset
  /// class of at least one other TargetRegisterClass.
  bool isASubClass() const {
    return SuperClasses[0] != nullptr;
  }

  /// Returns the preferred order for allocating registers from this register
  /// class in MF. The raw order comes directly from the .td file and may
  /// include reserved registers that are not allocatable.
  /// Register allocators should also make sure to allocate
  /// callee-saved registers only after all the volatiles are used. The
  /// RegisterClassInfo class provides filtered allocation orders with
  /// callee-saved registers moved to the end.
  ///
  /// The MachineFunction argument can be used to tune the allocatable
  /// registers based on the characteristics of the function, subtarget, or
  /// other criteria.
  ///
  /// By default, this method returns all registers in the class.
  ArrayRef<MCPhysReg> getRawAllocationOrder(const MachineFunction &MF) const {
    return OrderFunc ? OrderFunc(MF) : ArrayRef(begin(), getNumRegs());
  }

  /// Returns the combination of all lane masks of register in this class.
  /// The lane masks of the registers are the combination of all lane masks
  /// of their subregisters. Returns 1 if there are no subregisters.
  LaneBitmask getLaneMask() const {
    return LaneMask;
  }
};

/// Extra information, not in MCRegisterDesc, about registers.
/// These are used by codegen, not by MC.
struct TargetRegisterInfoDesc {
  const uint8_t *CostPerUse; // Extra cost of instructions using register.
  unsigned NumCosts; // Number of cost values associated with each register.
  const bool
      *InAllocatableClass; // Register belongs to an allocatable regclass.
};

/// Each TargetRegisterClass has a per register weight, and weight
/// limit which must be less than the limits of its pressure sets.
struct RegClassWeight {
  unsigned RegWeight;
  unsigned WeightLimit;
};

/// TargetRegisterInfo base class - We assume that the target defines a static
/// array of TargetRegisterDesc objects that represent all of the machine
/// registers that the target has.  As such, we simply have to track a pointer
/// to this array so that we can turn register number into a register
/// descriptor.
///
class TargetRegisterInfo : public MCRegisterInfo {
public:
  using regclass_iterator = const TargetRegisterClass * const *;
  using vt_iterator = const MVT::SimpleValueType *;
  struct RegClassInfo {
    unsigned RegSize, SpillSize, SpillAlignment;
    vt_iterator VTList;
  };
private:
  const TargetRegisterInfoDesc *InfoDesc;     // Extra desc array for codegen
  const char *const *SubRegIndexNames;        // Names of subreg indexes.
  // Pointer to array of lane masks, one per sub-reg index.
  const LaneBitmask *SubRegIndexLaneMasks;

  regclass_iterator RegClassBegin, RegClassEnd;   // List of regclasses
  LaneBitmask CoveringLanes;
  const RegClassInfo *const RCInfos;
  unsigned HwMode;

protected:
  TargetRegisterInfo(const TargetRegisterInfoDesc *ID,
                     regclass_iterator RCB,
                     regclass_iterator RCE,
                     const char *const *SRINames,
                     const LaneBitmask *SRILaneMasks,
                     LaneBitmask CoveringLanes,
                     const RegClassInfo *const RCIs,
                     unsigned Mode = 0);
  virtual ~TargetRegisterInfo();

public:
  // Register numbers can represent physical registers, virtual registers, and
  // sometimes stack slots. The unsigned values are divided into these ranges:
  //
  //   0           Not a register, can be used as a sentinel.
  //   [1;2^30)    Physical registers assigned by TableGen.
  //   [2^30;2^31) Stack slots. (Rarely used.)
  //   [2^31;2^32) Virtual registers assigned by MachineRegisterInfo.
  //
  // Further sentinels can be allocated from the small negative integers.
  // DenseMapInfo<unsigned> uses -1u and -2u.

  /// Return the size in bits of a register from class RC.
  unsigned getRegSizeInBits(const TargetRegisterClass &RC) const {
    return getRegClassInfo(RC).RegSize;
  }

  /// Return the size in bytes of the stack slot allocated to hold a spilled
  /// copy of a register from class RC.
  unsigned getSpillSize(const TargetRegisterClass &RC) const {
    return getRegClassInfo(RC).SpillSize / 8;
  }

  /// Return the minimum required alignment in bytes for a spill slot for
  /// a register of this class.
  Align getSpillAlign(const TargetRegisterClass &RC) const {
    return Align(getRegClassInfo(RC).SpillAlignment / 8);
  }

  /// Return true if the given TargetRegisterClass has the ValueType T.
  bool isTypeLegalForClass(const TargetRegisterClass &RC, MVT T) const {
    for (auto I = legalclasstypes_begin(RC); *I != MVT::Other; ++I)
      if (MVT(*I) == T)
        return true;
    return false;
  }

  /// Return true if the given TargetRegisterClass is compatible with LLT T.
  bool isTypeLegalForClass(const TargetRegisterClass &RC, LLT T) const {
    for (auto I = legalclasstypes_begin(RC); *I != MVT::Other; ++I) {
      MVT VT(*I);
      if (VT == MVT::Untyped)
        return true;

      if (LLT(VT) == T)
        return true;
    }
    return false;
  }

  /// Loop over all of the value types that can be represented by values
  /// in the given register class.
  vt_iterator legalclasstypes_begin(const TargetRegisterClass &RC) const {
    return getRegClassInfo(RC).VTList;
  }

  vt_iterator legalclasstypes_end(const TargetRegisterClass &RC) const {
    vt_iterator I = legalclasstypes_begin(RC);
    while (*I != MVT::Other)
      ++I;
    return I;
  }

  /// Returns the Register Class of a physical register of the given type,
  /// picking the most sub register class of the right type that contains this
  /// physreg.
  const TargetRegisterClass *getMinimalPhysRegClass(MCRegister Reg,
                                                    MVT VT = MVT::Other) const;

  /// Returns the Register Class of a physical register of the given type,
  /// picking the most sub register class of the right type that contains this
  /// physreg. If there is no register class compatible with the given type,
  /// returns nullptr.
  const TargetRegisterClass *getMinimalPhysRegClassLLT(MCRegister Reg,
                                                       LLT Ty = LLT()) const;

  /// Return the maximal subclass of the given register class that is
  /// allocatable or NULL.
  const TargetRegisterClass *
    getAllocatableClass(const TargetRegisterClass *RC) const;

  /// Returns a bitset indexed by register number indicating if a register is
  /// allocatable or not. If a register class is specified, returns the subset
  /// for the class.
  BitVector getAllocatableSet(const MachineFunction &MF,
                              const TargetRegisterClass *RC = nullptr) const;

  /// Get a list of cost values for all registers that correspond to the index
  /// returned by RegisterCostTableIndex.
  ArrayRef<uint8_t> getRegisterCosts(const MachineFunction &MF) const {
    unsigned Idx = getRegisterCostTableIndex(MF);
    unsigned NumRegs = getNumRegs();
    assert(Idx < InfoDesc->NumCosts && "CostPerUse index out of bounds");

    return ArrayRef(&InfoDesc->CostPerUse[Idx * NumRegs], NumRegs);
  }

  /// Return true if the register is in the allocation of any register class.
  bool isInAllocatableClass(MCRegister RegNo) const {
    return InfoDesc->InAllocatableClass[RegNo];
  }

  /// Return the human-readable symbolic target-specific
  /// name for the specified SubRegIndex.
  const char *getSubRegIndexName(unsigned SubIdx) const {
    assert(SubIdx && SubIdx < getNumSubRegIndices() &&
           "This is not a subregister index");
    return SubRegIndexNames[SubIdx-1];
  }

  /// Return a bitmask representing the parts of a register that are covered by
  /// SubIdx \see LaneBitmask.
  ///
  /// SubIdx == 0 is allowed, it has the lane mask ~0u.
  LaneBitmask getSubRegIndexLaneMask(unsigned SubIdx) const {
    assert(SubIdx < getNumSubRegIndices() && "This is not a subregister index");
    return SubRegIndexLaneMasks[SubIdx];
  }

  /// Try to find one or more subregister indexes to cover \p LaneMask.
  ///
  /// If this is possible, returns true and appends the best matching set of
  /// indexes to \p Indexes. If this is not possible, returns false.
  bool getCoveringSubRegIndexes(const MachineRegisterInfo &MRI,
                                const TargetRegisterClass *RC,
                                LaneBitmask LaneMask,
                                SmallVectorImpl<unsigned> &Indexes) const;

  /// The lane masks returned by getSubRegIndexLaneMask() above can only be
  /// used to determine if sub-registers overlap - they can't be used to
  /// determine if a set of sub-registers completely cover another
  /// sub-register.
  ///
  /// The X86 general purpose registers have two lanes corresponding to the
  /// sub_8bit and sub_8bit_hi sub-registers. Both sub_32bit and sub_16bit have
  /// lane masks '3', but the sub_16bit sub-register doesn't fully cover the
  /// sub_32bit sub-register.
  ///
  /// On the other hand, the ARM NEON lanes fully cover their registers: The
  /// dsub_0 sub-register is completely covered by the ssub_0 and ssub_1 lanes.
  /// This is related to the CoveredBySubRegs property on register definitions.
  ///
  /// This function returns a bit mask of lanes that completely cover their
  /// sub-registers. More precisely, given:
  ///
  ///   Covering = getCoveringLanes();
  ///   MaskA = getSubRegIndexLaneMask(SubA);
  ///   MaskB = getSubRegIndexLaneMask(SubB);
  ///
  /// If (MaskA & ~(MaskB & Covering)) == 0, then SubA is completely covered by
  /// SubB.
  LaneBitmask getCoveringLanes() const { return CoveringLanes; }

  /// Returns true if the two registers are equal or alias each other.
  /// The registers may be virtual registers.
  bool regsOverlap(Register RegA, Register RegB) const {
    if (RegA == RegB)
      return true;
    if (RegA.isPhysical() && RegB.isPhysical())
      return MCRegisterInfo::regsOverlap(RegA.asMCReg(), RegB.asMCReg());
    return false;
  }

  /// Returns true if Reg contains RegUnit.
  bool hasRegUnit(MCRegister Reg, Register RegUnit) const {
    for (MCRegUnit Unit : regunits(Reg))
      if (Register(Unit) == RegUnit)
        return true;
    return false;
  }

  /// Returns the original SrcReg unless it is the target of a copy-like
  /// operation, in which case we chain backwards through all such operations
  /// to the ultimate source register.  If a physical register is encountered,
  /// we stop the search.
  virtual Register lookThruCopyLike(Register SrcReg,
                                    const MachineRegisterInfo *MRI) const;

  /// Find the original SrcReg unless it is the target of a copy-like operation,
  /// in which case we chain backwards through all such operations to the
  /// ultimate source register. If a physical register is encountered, we stop
  /// the search.
  /// Return the original SrcReg if all the definitions in the chain only have
  /// one user and not a physical register.
  virtual Register
  lookThruSingleUseCopyChain(Register SrcReg,
                             const MachineRegisterInfo *MRI) const;

  /// Return a null-terminated list of all of the callee-saved registers on
  /// this target. The register should be in the order of desired callee-save
  /// stack frame offset. The first register is closest to the incoming stack
  /// pointer if stack grows down, and vice versa.
  /// Notice: This function does not take into account disabled CSRs.
  ///         In most cases you will want to use instead the function
  ///         getCalleeSavedRegs that is implemented in MachineRegisterInfo.
  virtual const MCPhysReg*
  getCalleeSavedRegs(const MachineFunction *MF) const = 0;

  /// Return a mask of call-preserved registers for the given calling convention
  /// on the current function. The mask should include all call-preserved
  /// aliases. This is used by the register allocator to determine which
  /// registers can be live across a call.
  ///
  /// The mask is an array containing (TRI::getNumRegs()+31)/32 entries.
  /// A set bit indicates that all bits of the corresponding register are
  /// preserved across the function call.  The bit mask is expected to be
  /// sub-register complete, i.e. if A is preserved, so are all its
  /// sub-registers.
  ///
  /// Bits are numbered from the LSB, so the bit for physical register Reg can
  /// be found as (Mask[Reg / 32] >> Reg % 32) & 1.
  ///
  /// A NULL pointer means that no register mask will be used, and call
  /// instructions should use implicit-def operands to indicate call clobbered
  /// registers.
  ///
  virtual const uint32_t *getCallPreservedMask(const MachineFunction &MF,
                                               CallingConv::ID) const {
    // The default mask clobbers everything.  All targets should override.
    return nullptr;
  }

  /// Return a register mask for the registers preserved by the unwinder,
  /// or nullptr if no custom mask is needed.
  virtual const uint32_t *
  getCustomEHPadPreservedMask(const MachineFunction &MF) const {
    return nullptr;
  }

  /// Return a register mask that clobbers everything.
  virtual const uint32_t *getNoPreservedMask() const {
    llvm_unreachable("target does not provide no preserved mask");
  }

  /// Return a list of all of the registers which are clobbered "inside" a call
  /// to the given function. For example, these might be needed for PLT
  /// sequences of long-branch veneers.
  virtual ArrayRef<MCPhysReg>
  getIntraCallClobberedRegs(const MachineFunction *MF) const {
    return {};
  }

  /// Return true if all bits that are set in mask \p mask0 are also set in
  /// \p mask1.
  bool regmaskSubsetEqual(const uint32_t *mask0, const uint32_t *mask1) const;

  /// Return all the call-preserved register masks defined for this target.
  virtual ArrayRef<const uint32_t *> getRegMasks() const = 0;
  virtual ArrayRef<const char *> getRegMaskNames() const = 0;

  /// Returns a bitset indexed by physical register number indicating if a
  /// register is a special register that has particular uses and should be
  /// considered unavailable at all times, e.g. stack pointer, return address.
  /// A reserved register:
  /// - is not allocatable
  /// - is considered always live
  /// - is ignored by liveness tracking
  /// It is often necessary to reserve the super registers of a reserved
  /// register as well, to avoid them getting allocated indirectly. You may use
  /// markSuperRegs() and checkAllSuperRegsMarked() in this case.
  virtual BitVector getReservedRegs(const MachineFunction &MF) const = 0;

  /// Returns either a string explaining why the given register is reserved for
  /// this function, or an empty optional if no explanation has been written.
  /// The absence of an explanation does not mean that the register is not
  /// reserved (meaning, you should check that PhysReg is in fact reserved
  /// before calling this).
  virtual std::optional<std::string>
  explainReservedReg(const MachineFunction &MF, MCRegister PhysReg) const {
    return {};
  }

  /// Returns false if we can't guarantee that Physreg, specified as an IR asm
  /// clobber constraint, will be preserved across the statement.
  virtual bool isAsmClobberable(const MachineFunction &MF,
                                MCRegister PhysReg) const {
    return true;
  }

  /// Returns true if PhysReg cannot be written to in inline asm statements.
  virtual bool isInlineAsmReadOnlyReg(const MachineFunction &MF,
                                      unsigned PhysReg) const {
    return false;
  }

  /// Returns true if PhysReg is unallocatable and constant throughout the
  /// function.  Used by MachineRegisterInfo::isConstantPhysReg().
  virtual bool isConstantPhysReg(MCRegister PhysReg) const { return false; }

  /// Returns true if the register class is considered divergent.
  virtual bool isDivergentRegClass(const TargetRegisterClass *RC) const {
    return false;
  }

  /// Returns true if the register is considered uniform.
  virtual bool isUniformReg(const MachineRegisterInfo &MRI,
                            const RegisterBankInfo &RBI, Register Reg) const {
    return false;
  }

  /// Physical registers that may be modified within a function but are
  /// guaranteed to be restored before any uses. This is useful for targets that
  /// have call sequences where a GOT register may be updated by the caller
  /// prior to a call and is guaranteed to be restored (also by the caller)
  /// after the call.
  virtual bool isCallerPreservedPhysReg(MCRegister PhysReg,
                                        const MachineFunction &MF) const {
    return false;
  }

  /// This is a wrapper around getCallPreservedMask().
  /// Return true if the register is preserved after the call.
  virtual bool isCalleeSavedPhysReg(MCRegister PhysReg,
                                    const MachineFunction &MF) const;

  /// Returns true if PhysReg can be used as an argument to a function.
  virtual bool isArgumentRegister(const MachineFunction &MF,
                                  MCRegister PhysReg) const {
    return false;
  }

  /// Returns true if PhysReg is a fixed register.
  virtual bool isFixedRegister(const MachineFunction &MF,
                               MCRegister PhysReg) const {
    return false;
  }

  /// Returns true if PhysReg is a general purpose register.
  virtual bool isGeneralPurposeRegister(const MachineFunction &MF,
                                        MCRegister PhysReg) const {
    return false;
  }

  /// Prior to adding the live-out mask to a stackmap or patchpoint
  /// instruction, provide the target the opportunity to adjust it (mainly to
  /// remove pseudo-registers that should be ignored).
  virtual void adjustStackMapLiveOutMask(uint32_t *Mask) const {}

  /// Return a super-register of the specified register
  /// Reg so its sub-register of index SubIdx is Reg.
  MCRegister getMatchingSuperReg(MCRegister Reg, unsigned SubIdx,
                                 const TargetRegisterClass *RC) const {
    return MCRegisterInfo::getMatchingSuperReg(Reg, SubIdx, RC->MC);
  }

  /// Return a subclass of the specified register
  /// class A so that each register in it has a sub-register of the
  /// specified sub-register index which is in the specified register class B.
  ///
  /// TableGen will synthesize missing A sub-classes.
  virtual const TargetRegisterClass *
  getMatchingSuperRegClass(const TargetRegisterClass *A,
                           const TargetRegisterClass *B, unsigned Idx) const;

  // For a copy-like instruction that defines a register of class DefRC with
  // subreg index DefSubReg, reading from another source with class SrcRC and
  // subregister SrcSubReg return true if this is a preferable copy
  // instruction or an earlier use should be used.
  virtual bool shouldRewriteCopySrc(const TargetRegisterClass *DefRC,
                                    unsigned DefSubReg,
                                    const TargetRegisterClass *SrcRC,
                                    unsigned SrcSubReg) const;

  /// Returns the largest legal sub-class of RC that
  /// supports the sub-register index Idx.
  /// If no such sub-class exists, return NULL.
  /// If all registers in RC already have an Idx sub-register, return RC.
  ///
  /// TableGen generates a version of this function that is good enough in most
  /// cases.  Targets can override if they have constraints that TableGen
  /// doesn't understand.  For example, the x86 sub_8bit sub-register index is
  /// supported by the full GR32 register class in 64-bit mode, but only by the
  /// GR32_ABCD regiister class in 32-bit mode.
  ///
  /// TableGen will synthesize missing RC sub-classes.
  virtual const TargetRegisterClass *
  getSubClassWithSubReg(const TargetRegisterClass *RC, unsigned Idx) const {
    assert(Idx == 0 && "Target has no sub-registers");
    return RC;
  }

  /// Return a register class that can be used for a subregister copy from/into
  /// \p SuperRC at \p SubRegIdx.
  virtual const TargetRegisterClass *
  getSubRegisterClass(const TargetRegisterClass *SuperRC,
                      unsigned SubRegIdx) const {
    return nullptr;
  }

  /// Return the subregister index you get from composing
  /// two subregister indices.
  ///
  /// The special null sub-register index composes as the identity.
  ///
  /// If R:a:b is the same register as R:c, then composeSubRegIndices(a, b)
  /// returns c. Note that composeSubRegIndices does not tell you about illegal
  /// compositions. If R does not have a subreg a, or R:a does not have a subreg
  /// b, composeSubRegIndices doesn't tell you.
  ///
  /// The ARM register Q0 has two D subregs dsub_0:D0 and dsub_1:D1. It also has
  /// ssub_0:S0 - ssub_3:S3 subregs.
  /// If you compose subreg indices dsub_1, ssub_0 you get ssub_2.
  unsigned composeSubRegIndices(unsigned a, unsigned b) const {
    if (!a) return b;
    if (!b) return a;
    return composeSubRegIndicesImpl(a, b);
  }

  /// Transforms a LaneMask computed for one subregister to the lanemask that
  /// would have been computed when composing the subsubregisters with IdxA
  /// first. @sa composeSubRegIndices()
  LaneBitmask composeSubRegIndexLaneMask(unsigned IdxA,
                                         LaneBitmask Mask) const {
    if (!IdxA)
      return Mask;
    return composeSubRegIndexLaneMaskImpl(IdxA, Mask);
  }

  /// Transform a lanemask given for a virtual register to the corresponding
  /// lanemask before using subregister with index \p IdxA.
  /// This is the reverse of composeSubRegIndexLaneMask(), assuming Mask is a
  /// valie lane mask (no invalid bits set) the following holds:
  /// X0 = composeSubRegIndexLaneMask(Idx, Mask)
  /// X1 = reverseComposeSubRegIndexLaneMask(Idx, X0)
  /// => X1 == Mask
  LaneBitmask reverseComposeSubRegIndexLaneMask(unsigned IdxA,
                                                LaneBitmask LaneMask) const {
    if (!IdxA)
      return LaneMask;
    return reverseComposeSubRegIndexLaneMaskImpl(IdxA, LaneMask);
  }

  /// Debugging helper: dump register in human readable form to dbgs() stream.
  static void dumpReg(Register Reg, unsigned SubRegIndex = 0,
                      const TargetRegisterInfo *TRI = nullptr);

  /// Return target defined base register class for a physical register.
  /// This is the register class with the lowest BaseClassOrder containing the
  /// register.
  /// Will be nullptr if the register is not in any base register class.
  virtual const TargetRegisterClass *getPhysRegBaseClass(MCRegister Reg) const {
    return nullptr;
  }

protected:
  /// Overridden by TableGen in targets that have sub-registers.
  virtual unsigned composeSubRegIndicesImpl(unsigned, unsigned) const {
    llvm_unreachable("Target has no sub-registers");
  }

  /// Overridden by TableGen in targets that have sub-registers.
  virtual LaneBitmask
  composeSubRegIndexLaneMaskImpl(unsigned, LaneBitmask) const {
    llvm_unreachable("Target has no sub-registers");
  }

  virtual LaneBitmask reverseComposeSubRegIndexLaneMaskImpl(unsigned,
                                                            LaneBitmask) const {
    llvm_unreachable("Target has no sub-registers");
  }

  /// Return the register cost table index. This implementation is sufficient
  /// for most architectures and can be overriden by targets in case there are
  /// multiple cost values associated with each register.
  virtual unsigned getRegisterCostTableIndex(const MachineFunction &MF) const {
    return 0;
  }

public:
  /// Find a common super-register class if it exists.
  ///
  /// Find a register class, SuperRC and two sub-register indices, PreA and
  /// PreB, such that:
  ///
  ///   1. PreA + SubA == PreB + SubB  (using composeSubRegIndices()), and
  ///
  ///   2. For all Reg in SuperRC: Reg:PreA in RCA and Reg:PreB in RCB, and
  ///
  ///   3. SuperRC->getSize() >= max(RCA->getSize(), RCB->getSize()).
  ///
  /// SuperRC will be chosen such that no super-class of SuperRC satisfies the
  /// requirements, and there is no register class with a smaller spill size
  /// that satisfies the requirements.
  ///
  /// SubA and SubB must not be 0. Use getMatchingSuperRegClass() instead.
  ///
  /// Either of the PreA and PreB sub-register indices may be returned as 0. In
  /// that case, the returned register class will be a sub-class of the
  /// corresponding argument register class.
  ///
  /// The function returns NULL if no register class can be found.
  const TargetRegisterClass*
  getCommonSuperRegClass(const TargetRegisterClass *RCA, unsigned SubA,
                         const TargetRegisterClass *RCB, unsigned SubB,
                         unsigned &PreA, unsigned &PreB) const;

  //===--------------------------------------------------------------------===//
  // Register Class Information
  //
protected:
  const RegClassInfo &getRegClassInfo(const TargetRegisterClass &RC) const {
    return RCInfos[getNumRegClasses() * HwMode + RC.getID()];
  }

public:
  /// Register class iterators
  regclass_iterator regclass_begin() const { return RegClassBegin; }
  regclass_iterator regclass_end() const { return RegClassEnd; }
  iterator_range<regclass_iterator> regclasses() const {
    return make_range(regclass_begin(), regclass_end());
  }

  unsigned getNumRegClasses() const {
    return (unsigned)(regclass_end()-regclass_begin());
  }

  /// Returns the register class associated with the enumeration value.
  /// See class MCOperandInfo.
  const TargetRegisterClass *getRegClass(unsigned i) const {
    assert(i < getNumRegClasses() && "Register Class ID out of range");
    return RegClassBegin[i];
  }

  /// Returns the name of the register class.
  const char *getRegClassName(const TargetRegisterClass *Class) const {
    return MCRegisterInfo::getRegClassName(Class->MC);
  }

  /// Find the largest common subclass of A and B.
  /// Return NULL if there is no common subclass.
  const TargetRegisterClass *
  getCommonSubClass(const TargetRegisterClass *A,
                    const TargetRegisterClass *B) const;

  /// Returns a TargetRegisterClass used for pointer values.
  /// If a target supports multiple different pointer register classes,
  /// kind specifies which one is indicated.
  virtual const TargetRegisterClass *
  getPointerRegClass(const MachineFunction &MF, unsigned Kind=0) const {
    llvm_unreachable("Target didn't implement getPointerRegClass!");
  }

  /// Returns a legal register class to copy a register in the specified class
  /// to or from. If it is possible to copy the register directly without using
  /// a cross register class copy, return the specified RC. Returns NULL if it
  /// is not possible to copy between two registers of the specified class.
  virtual const TargetRegisterClass *
  getCrossCopyRegClass(const TargetRegisterClass *RC) const {
    return RC;
  }

  /// Returns the largest super class of RC that is legal to use in the current
  /// sub-target and has the same spill size.
  /// The returned register class can be used to create virtual registers which
  /// means that all its registers can be copied and spilled.
  virtual const TargetRegisterClass *
  getLargestLegalSuperClass(const TargetRegisterClass *RC,
                            const MachineFunction &) const {
    /// The default implementation is very conservative and doesn't allow the
    /// register allocator to inflate register classes.
    return RC;
  }

  /// Return the register pressure "high water mark" for the specific register
  /// class. The scheduler is in high register pressure mode (for the specific
  /// register class) if it goes over the limit.
  ///
  /// Note: this is the old register pressure model that relies on a manually
  /// specified representative register class per value type.
  virtual unsigned getRegPressureLimit(const TargetRegisterClass *RC,
                                       MachineFunction &MF) const {
    return 0;
  }

  /// Return a heuristic for the machine scheduler to compare the profitability
  /// of increasing one register pressure set versus another.  The scheduler
  /// will prefer increasing the register pressure of the set which returns
  /// the largest value for this function.
  virtual unsigned getRegPressureSetScore(const MachineFunction &MF,
                                          unsigned PSetID) const {
    return PSetID;
  }

  /// Get the weight in units of pressure for this register class.
  virtual const RegClassWeight &getRegClassWeight(
    const TargetRegisterClass *RC) const = 0;

  /// Returns size in bits of a phys/virtual/generic register.
  unsigned getRegSizeInBits(Register Reg, const MachineRegisterInfo &MRI) const;

  /// Get the weight in units of pressure for this register unit.
  virtual unsigned getRegUnitWeight(unsigned RegUnit) const = 0;

  /// Get the number of dimensions of register pressure.
  virtual unsigned getNumRegPressureSets() const = 0;

  /// Get the name of this register unit pressure set.
  virtual const char *getRegPressureSetName(unsigned Idx) const = 0;

  /// Get the register unit pressure limit for this dimension.
  /// This limit must be adjusted dynamically for reserved registers.
  virtual unsigned getRegPressureSetLimit(const MachineFunction &MF,
                                          unsigned Idx) const = 0;

  /// Get the dimensions of register pressure impacted by this register class.
  /// Returns a -1 terminated array of pressure set IDs.
  virtual const int *getRegClassPressureSets(
    const TargetRegisterClass *RC) const = 0;

  /// Get the dimensions of register pressure impacted by this register unit.
  /// Returns a -1 terminated array of pressure set IDs.
  virtual const int *getRegUnitPressureSets(unsigned RegUnit) const = 0;

  /// Get a list of 'hint' registers that the register allocator should try
  /// first when allocating a physical register for the virtual register
  /// VirtReg. These registers are effectively moved to the front of the
  /// allocation order. If true is returned, regalloc will try to only use
  /// hints to the greatest extent possible even if it means spilling.
  ///
  /// The Order argument is the allocation order for VirtReg's register class
  /// as returned from RegisterClassInfo::getOrder(). The hint registers must
  /// come from Order, and they must not be reserved.
  ///
  /// The default implementation of this function will only add target
  /// independent register allocation hints. Targets that override this
  /// function should typically call this default implementation as well and
  /// expect to see generic copy hints added.
  virtual bool
  getRegAllocationHints(Register VirtReg, ArrayRef<MCPhysReg> Order,
                        SmallVectorImpl<MCPhysReg> &Hints,
                        const MachineFunction &MF,
                        const VirtRegMap *VRM = nullptr,
                        const LiveRegMatrix *Matrix = nullptr) const;

  /// A callback to allow target a chance to update register allocation hints
  /// when a register is "changed" (e.g. coalesced) to another register.
  /// e.g. On ARM, some virtual registers should target register pairs,
  /// if one of pair is coalesced to another register, the allocation hint of
  /// the other half of the pair should be changed to point to the new register.
  virtual void updateRegAllocHint(Register Reg, Register NewReg,
                                  MachineFunction &MF) const {
    // Do nothing.
  }

  /// Allow the target to reverse allocation order of local live ranges. This
  /// will generally allocate shorter local live ranges first. For targets with
  /// many registers, this could reduce regalloc compile time by a large
  /// factor. It is disabled by default for three reasons:
  /// (1) Top-down allocation is simpler and easier to debug for targets that
  /// don't benefit from reversing the order.
  /// (2) Bottom-up allocation could result in poor evicition decisions on some
  /// targets affecting the performance of compiled code.
  /// (3) Bottom-up allocation is no longer guaranteed to optimally color.
  virtual bool reverseLocalAssignment() const { return false; }

  /// Allow the target to override the cost of using a callee-saved register for
  /// the first time. Default value of 0 means we will use a callee-saved
  /// register if it is available.
  virtual unsigned getCSRFirstUseCost() const { return 0; }

  /// Returns true if the target requires (and can make use of) the register
  /// scavenger.
  virtual bool requiresRegisterScavenging(const MachineFunction &MF) const {
    return false;
  }

  /// Returns true if the target wants to use frame pointer based accesses to
  /// spill to the scavenger emergency spill slot.
  virtual bool useFPForScavengingIndex(const MachineFunction &MF) const {
    return true;
  }

  /// Returns true if the target requires post PEI scavenging of registers for
  /// materializing frame index constants.
  virtual bool requiresFrameIndexScavenging(const MachineFunction &MF) const {
    return false;
  }

  /// Returns true if the target requires using the RegScavenger directly for
  /// frame elimination despite using requiresFrameIndexScavenging.
  virtual bool requiresFrameIndexReplacementScavenging(
      const MachineFunction &MF) const {
    return false;
  }

  /// Returns true if the target wants the LocalStackAllocation pass to be run
  /// and virtual base registers used for more efficient stack access.
  virtual bool requiresVirtualBaseRegisters(const MachineFunction &MF) const {
    return false;
  }

  /// Return true if target has reserved a spill slot in the stack frame of
  /// the given function for the specified register. e.g. On x86, if the frame
  /// register is required, the first fixed stack object is reserved as its
  /// spill slot. This tells PEI not to create a new stack frame
  /// object for the given register. It should be called only after
  /// determineCalleeSaves().
  virtual bool hasReservedSpillSlot(const MachineFunction &MF, Register Reg,
                                    int &FrameIdx) const {
    return false;
  }

  /// Returns true if the live-ins should be tracked after register allocation.
  virtual bool trackLivenessAfterRegAlloc(const MachineFunction &MF) const {
    return true;
  }

  /// True if the stack can be realigned for the target.
  virtual bool canRealignStack(const MachineFunction &MF) const;

  /// True if storage within the function requires the stack pointer to be
  /// aligned more than the normal calling convention calls for.
  virtual bool shouldRealignStack(const MachineFunction &MF) const;

  /// True if stack realignment is required and still possible.
  bool hasStackRealignment(const MachineFunction &MF) const {
    return shouldRealignStack(MF) && canRealignStack(MF);
  }

  /// Get the offset from the referenced frame index in the instruction,
  /// if there is one.
  virtual int64_t getFrameIndexInstrOffset(const MachineInstr *MI,
                                           int Idx) const {
    return 0;
  }

  /// Returns true if the instruction's frame index reference would be better
  /// served by a base register other than FP or SP.
  /// Used by LocalStackFrameAllocation to determine which frame index
  /// references it should create new base registers for.
  virtual bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
    return false;
  }

  /// Insert defining instruction(s) for a pointer to FrameIdx before
  /// insertion point I. Return materialized frame pointer.
  virtual Register materializeFrameBaseRegister(MachineBasicBlock *MBB,
                                                int FrameIdx,
                                                int64_t Offset) const {
    llvm_unreachable("materializeFrameBaseRegister does not exist on this "
                     "target");
  }

  /// Resolve a frame index operand of an instruction
  /// to reference the indicated base register plus offset instead.
  virtual void resolveFrameIndex(MachineInstr &MI, Register BaseReg,
                                 int64_t Offset) const {
    llvm_unreachable("resolveFrameIndex does not exist on this target");
  }

  /// Determine whether a given base register plus offset immediate is
  /// encodable to resolve a frame index.
  virtual bool isFrameOffsetLegal(const MachineInstr *MI, Register BaseReg,
                                  int64_t Offset) const {
    llvm_unreachable("isFrameOffsetLegal does not exist on this target");
  }

  /// Gets the DWARF expression opcodes for \p Offset.
  virtual void getOffsetOpcodes(const StackOffset &Offset,
                                SmallVectorImpl<uint64_t> &Ops) const;

  /// Prepends a DWARF expression for \p Offset to DIExpression \p Expr.
  DIExpression *
  prependOffsetExpression(const DIExpression *Expr, unsigned PrependFlags,
                          const StackOffset &Offset) const;

  /// Spill the register so it can be used by the register scavenger.
  /// Return true if the register was spilled, false otherwise.
  /// If this function does not spill the register, the scavenger
  /// will instead spill it to the emergency spill slot.
  virtual bool saveScavengerRegister(MachineBasicBlock &MBB,
                                     MachineBasicBlock::iterator I,
                                     MachineBasicBlock::iterator &UseMI,
                                     const TargetRegisterClass *RC,
                                     Register Reg) const {
    return false;
  }

  /// Process frame indices in reverse block order. This changes the behavior of
  /// the RegScavenger passed to eliminateFrameIndex. If this is true targets
  /// should scavengeRegisterBackwards in eliminateFrameIndex. New targets
  /// should prefer reverse scavenging behavior.
  virtual bool supportsBackwardScavenger() const { return false; }

  /// This method must be overriden to eliminate abstract frame indices from
  /// instructions which may use them. The instruction referenced by the
  /// iterator contains an MO_FrameIndex operand which must be eliminated by
  /// this method. This method may modify or replace the specified instruction,
  /// as long as it keeps the iterator pointing at the finished product.
  /// SPAdj is the SP adjustment due to call frame setup instruction.
  /// FIOperandNum is the FI operand number.
  /// Returns true if the current instruction was removed and the iterator
  /// is not longer valid
  virtual bool eliminateFrameIndex(MachineBasicBlock::iterator MI,
                                   int SPAdj, unsigned FIOperandNum,
                                   RegScavenger *RS = nullptr) const = 0;

  /// Return the assembly name for \p Reg.
  virtual StringRef getRegAsmName(MCRegister Reg) const {
    // FIXME: We are assuming that the assembly name is equal to the TableGen
    // name converted to lower case
    //
    // The TableGen name is the name of the definition for this register in the
    // target's tablegen files.  For example, the TableGen name of
    // def EAX : Register <...>; is "EAX"
    return StringRef(getName(Reg));
  }

  //===--------------------------------------------------------------------===//
  /// Subtarget Hooks

  /// SrcRC and DstRC will be morphed into NewRC if this returns true.
  virtual bool shouldCoalesce(MachineInstr *MI,
                              const TargetRegisterClass *SrcRC,
                              unsigned SubReg,
                              const TargetRegisterClass *DstRC,
                              unsigned DstSubReg,
                              const TargetRegisterClass *NewRC,
                              LiveIntervals &LIS) const
  { return true; }

  /// Region split has a high compile time cost especially for large live range.
  /// This method is used to decide whether or not \p VirtReg should
  /// go through this expensive splitting heuristic.
  virtual bool shouldRegionSplitForVirtReg(const MachineFunction &MF,
                                           const LiveInterval &VirtReg) const;

  /// Last chance recoloring has a high compile time cost especially for
  /// targets with a lot of registers.
  /// This method is used to decide whether or not \p VirtReg should
  /// go through this expensive heuristic.
  /// When this target hook is hit, by returning false, there is a high
  /// chance that the register allocation will fail altogether (usually with
  /// "ran out of registers").
  /// That said, this error usually points to another problem in the
  /// optimization pipeline.
  virtual bool
  shouldUseLastChanceRecoloringForVirtReg(const MachineFunction &MF,
                                          const LiveInterval &VirtReg) const {
    return true;
  }

  /// Deferred spilling delays the spill insertion of a virtual register
  /// after every other allocation. By deferring the spilling, it is
  /// sometimes possible to eliminate that spilling altogether because
  /// something else could have been eliminated, thus leaving some space
  /// for the virtual register.
  /// However, this comes with a compile time impact because it adds one
  /// more stage to the greedy register allocator.
  /// This method is used to decide whether \p VirtReg should use the deferred
  /// spilling stage instead of being spilled right away.
  virtual bool
  shouldUseDeferredSpillingForVirtReg(const MachineFunction &MF,
                                      const LiveInterval &VirtReg) const {
    return false;
  }

  /// When prioritizing live ranges in register allocation, if this hook returns
  /// true then the AllocationPriority of the register class will be treated as
  /// more important than whether the range is local to a basic block or global.
  virtual bool
  regClassPriorityTrumpsGlobalness(const MachineFunction &MF) const {
    return false;
  }

  //===--------------------------------------------------------------------===//
  /// Debug information queries.

  /// getFrameRegister - This method should return the register used as a base
  /// for values allocated in the current stack frame.
  virtual Register getFrameRegister(const MachineFunction &MF) const = 0;

  /// Mark a register and all its aliases as reserved in the given set.
  void markSuperRegs(BitVector &RegisterSet, MCRegister Reg) const;

  /// Returns true if for every register in the set all super registers are part
  /// of the set as well.
  bool checkAllSuperRegsMarked(const BitVector &RegisterSet,
      ArrayRef<MCPhysReg> Exceptions = ArrayRef<MCPhysReg>()) const;

  virtual const TargetRegisterClass *
  getConstrainedRegClassForOperand(const MachineOperand &MO,
                                   const MachineRegisterInfo &MRI) const {
    return nullptr;
  }

  /// Returns the physical register number of sub-register "Index"
  /// for physical register RegNo. Return zero if the sub-register does not
  /// exist.
  inline MCRegister getSubReg(MCRegister Reg, unsigned Idx) const {
    return static_cast<const MCRegisterInfo *>(this)->getSubReg(Reg, Idx);
  }

  /// Some targets have non-allocatable registers that aren't technically part
  /// of the explicit callee saved register list, but should be handled as such
  /// in certain cases.
  virtual bool isNonallocatableRegisterCalleeSave(MCRegister Reg) const {
    return false;
  }
};

//===----------------------------------------------------------------------===//
//                           SuperRegClassIterator
//===----------------------------------------------------------------------===//
//
// Iterate over the possible super-registers for a given register class. The
// iterator will visit a list of pairs (Idx, Mask) corresponding to the
// possible classes of super-registers.
//
// Each bit mask will have at least one set bit, and each set bit in Mask
// corresponds to a SuperRC such that:
//
//   For all Reg in SuperRC: Reg:Idx is in RC.
//
// The iterator can include (O, RC->getSubClassMask()) as the first entry which
// also satisfies the above requirement, assuming Reg:0 == Reg.
//
class SuperRegClassIterator {
  const unsigned RCMaskWords;
  unsigned SubReg = 0;
  const uint16_t *Idx;
  const uint32_t *Mask;

public:
  /// Create a SuperRegClassIterator that visits all the super-register classes
  /// of RC. When IncludeSelf is set, also include the (0, sub-classes) entry.
  SuperRegClassIterator(const TargetRegisterClass *RC,
                        const TargetRegisterInfo *TRI,
                        bool IncludeSelf = false)
    : RCMaskWords((TRI->getNumRegClasses() + 31) / 32),
      Idx(RC->getSuperRegIndices()), Mask(RC->getSubClassMask()) {
    if (!IncludeSelf)
      ++*this;
  }

  /// Returns true if this iterator is still pointing at a valid entry.
  bool isValid() const { return Idx; }

  /// Returns the current sub-register index.
  unsigned getSubReg() const { return SubReg; }

  /// Returns the bit mask of register classes that getSubReg() projects into
  /// RC.
  /// See TargetRegisterClass::getSubClassMask() for how to use it.
  const uint32_t *getMask() const { return Mask; }

  /// Advance iterator to the next entry.
  void operator++() {
    assert(isValid() && "Cannot move iterator past end.");
    Mask += RCMaskWords;
    SubReg = *Idx++;
    if (!SubReg)
      Idx = nullptr;
  }
};

//===----------------------------------------------------------------------===//
//                           BitMaskClassIterator
//===----------------------------------------------------------------------===//
/// This class encapuslates the logic to iterate over bitmask returned by
/// the various RegClass related APIs.
/// E.g., this class can be used to iterate over the subclasses provided by
/// TargetRegisterClass::getSubClassMask or SuperRegClassIterator::getMask.
class BitMaskClassIterator {
  /// Total number of register classes.
  const unsigned NumRegClasses;
  /// Base index of CurrentChunk.
  /// In other words, the number of bit we read to get at the
  /// beginning of that chunck.
  unsigned Base = 0;
  /// Adjust base index of CurrentChunk.
  /// Base index + how many bit we read within CurrentChunk.
  unsigned Idx = 0;
  /// Current register class ID.
  unsigned ID = 0;
  /// Mask we are iterating over.
  const uint32_t *Mask;
  /// Current chunk of the Mask we are traversing.
  uint32_t CurrentChunk;

  /// Move ID to the next set bit.
  void moveToNextID() {
    // If the current chunk of memory is empty, move to the next one,
    // while making sure we do not go pass the number of register
    // classes.
    while (!CurrentChunk) {
      // Move to the next chunk.
      Base += 32;
      if (Base >= NumRegClasses) {
        ID = NumRegClasses;
        return;
      }
      CurrentChunk = *++Mask;
      Idx = Base;
    }
    // Otherwise look for the first bit set from the right
    // (representation of the class ID is big endian).
    // See getSubClassMask for more details on the representation.
    unsigned Offset = llvm::countr_zero(CurrentChunk);
    // Add the Offset to the adjusted base number of this chunk: Idx.
    // This is the ID of the register class.
    ID = Idx + Offset;

    // Consume the zeros, if any, and the bit we just read
    // so that we are at the right spot for the next call.
    // Do not do Offset + 1 because Offset may be 31 and 32
    // will be UB for the shift, though in that case we could
    // have make the chunk being equal to 0, but that would
    // have introduced a if statement.
    moveNBits(Offset);
    moveNBits(1);
  }

  /// Move \p NumBits Bits forward in CurrentChunk.
  void moveNBits(unsigned NumBits) {
    assert(NumBits < 32 && "Undefined behavior spotted!");
    // Consume the bit we read for the next call.
    CurrentChunk >>= NumBits;
    // Adjust the base for the chunk.
    Idx += NumBits;
  }

public:
  /// Create a BitMaskClassIterator that visits all the register classes
  /// represented by \p Mask.
  ///
  /// \pre \p Mask != nullptr
  BitMaskClassIterator(const uint32_t *Mask, const TargetRegisterInfo &TRI)
      : NumRegClasses(TRI.getNumRegClasses()), Mask(Mask), CurrentChunk(*Mask) {
    // Move to the first ID.
    moveToNextID();
  }

  /// Returns true if this iterator is still pointing at a valid entry.
  bool isValid() const { return getID() != NumRegClasses; }

  /// Returns the current register class ID.
  unsigned getID() const { return ID; }

  /// Advance iterator to the next entry.
  void operator++() {
    assert(isValid() && "Cannot move iterator past end.");
    moveToNextID();
  }
};

// This is useful when building IndexedMaps keyed on virtual registers
struct VirtReg2IndexFunctor {
  using argument_type = Register;
  unsigned operator()(Register Reg) const {
    return Register::virtReg2Index(Reg);
  }
};

/// Prints virtual and physical registers with or without a TRI instance.
///
/// The format is:
///   %noreg          - NoRegister
///   %5              - a virtual register.
///   %5:sub_8bit     - a virtual register with sub-register index (with TRI).
///   %eax            - a physical register
///   %physreg17      - a physical register when no TRI instance given.
///
/// Usage: OS << printReg(Reg, TRI, SubRegIdx) << '\n';
Printable printReg(Register Reg, const TargetRegisterInfo *TRI = nullptr,
                   unsigned SubIdx = 0,
                   const MachineRegisterInfo *MRI = nullptr);

/// Create Printable object to print register units on a \ref raw_ostream.
///
/// Register units are named after their root registers:
///
///   al      - Single root.
///   fp0~st7 - Dual roots.
///
/// Usage: OS << printRegUnit(Unit, TRI) << '\n';
Printable printRegUnit(unsigned Unit, const TargetRegisterInfo *TRI);

/// Create Printable object to print virtual registers and physical
/// registers on a \ref raw_ostream.
Printable printVRegOrUnit(unsigned VRegOrUnit, const TargetRegisterInfo *TRI);

/// Create Printable object to print register classes or register banks
/// on a \ref raw_ostream.
Printable printRegClassOrBank(Register Reg, const MachineRegisterInfo &RegInfo,
                              const TargetRegisterInfo *TRI);

} // end namespace llvm

#endif // LLVM_CODEGEN_TARGETREGISTERINFO_H
PKiwFZ';RUffCodeGen/MachinePassRegistry.hnu�[���//===- llvm/CodeGen/MachinePassRegistry.h -----------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the mechanics for machine function pass registries.  A
// function pass registry (MachinePassRegistry) is auto filled by the static
// constructors of MachinePassRegistryNode.  Further there is a command line
// parser (RegisterPassParser) which listens to each registry for additions
// and deletions, so that the appropriate command option is updated.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_MACHINEPASSREGISTRY_H
#define LLVM_CODEGEN_MACHINEPASSREGISTRY_H

#include "llvm/ADT/StringRef.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/Support/CommandLine.h"

namespace llvm {

//===----------------------------------------------------------------------===//
///
/// MachinePassRegistryListener - Listener to adds and removals of nodes in
/// registration list.
///
//===----------------------------------------------------------------------===//
template <class PassCtorTy> class MachinePassRegistryListener {
  virtual void anchor() {}

public:
  MachinePassRegistryListener() = default;
  virtual ~MachinePassRegistryListener() = default;

  virtual void NotifyAdd(StringRef N, PassCtorTy C, StringRef D) = 0;
  virtual void NotifyRemove(StringRef N) = 0;
};

//===----------------------------------------------------------------------===//
///
/// MachinePassRegistryNode - Machine pass node stored in registration list.
///
//===----------------------------------------------------------------------===//
template <typename PassCtorTy> class MachinePassRegistryNode {
private:
  MachinePassRegistryNode *Next = nullptr; // Next function pass in list.
  StringRef Name;                       // Name of function pass.
  StringRef Description;                // Description string.
  PassCtorTy Ctor;                      // Pass creator.

public:
  MachinePassRegistryNode(const char *N, const char *D, PassCtorTy C)
      : Name(N), Description(D), Ctor(C) {}

  // Accessors
  MachinePassRegistryNode *getNext()      const { return Next; }
  MachinePassRegistryNode **getNextAddress()    { return &Next; }
  StringRef getName()                   const { return Name; }
  StringRef getDescription()            const { return Description; }
  PassCtorTy getCtor() const { return Ctor; }
  void setNext(MachinePassRegistryNode *N)      { Next = N; }
};

//===----------------------------------------------------------------------===//
///
/// MachinePassRegistry - Track the registration of machine passes.
///
//===----------------------------------------------------------------------===//
template <typename PassCtorTy> class MachinePassRegistry {
private:
  MachinePassRegistryNode<PassCtorTy> *List; // List of registry nodes.
  PassCtorTy Default;                        // Default function pass creator.
  MachinePassRegistryListener<PassCtorTy>
      *Listener; // Listener for list adds are removes.

public:
  // NO CONSTRUCTOR - we don't want static constructor ordering to mess
  // with the registry.

  // Accessors.
  //
  MachinePassRegistryNode<PassCtorTy> *getList() { return List; }
  PassCtorTy getDefault() { return Default; }
  void setDefault(PassCtorTy C) { Default = C; }
  /// setDefault - Set the default constructor by name.
  void setDefault(StringRef Name) {
    PassCtorTy Ctor = nullptr;
    for (MachinePassRegistryNode<PassCtorTy> *R = getList(); R;
         R = R->getNext()) {
      if (R->getName() == Name) {
        Ctor = R->getCtor();
        break;
      }
    }
    assert(Ctor && "Unregistered pass name");
    setDefault(Ctor);
  }
  void setListener(MachinePassRegistryListener<PassCtorTy> *L) { Listener = L; }

  /// Add - Adds a function pass to the registration list.
  ///
  void Add(MachinePassRegistryNode<PassCtorTy> *Node) {
    Node->setNext(List);
    List = Node;
    if (Listener)
      Listener->NotifyAdd(Node->getName(), Node->getCtor(),
                          Node->getDescription());
  }

  /// Remove - Removes a function pass from the registration list.
  ///
  void Remove(MachinePassRegistryNode<PassCtorTy> *Node) {
    for (MachinePassRegistryNode<PassCtorTy> **I = &List; *I;
         I = (*I)->getNextAddress()) {
      if (*I == Node) {
        if (Listener)
          Listener->NotifyRemove(Node->getName());
        *I = (*I)->getNext();
        break;
      }
    }
  }
};

//===----------------------------------------------------------------------===//
///
/// RegisterPassParser class - Handle the addition of new machine passes.
///
//===----------------------------------------------------------------------===//
template <class RegistryClass>
class RegisterPassParser
    : public MachinePassRegistryListener<
          typename RegistryClass::FunctionPassCtor>,
      public cl::parser<typename RegistryClass::FunctionPassCtor> {
public:
  RegisterPassParser(cl::Option &O)
      : cl::parser<typename RegistryClass::FunctionPassCtor>(O) {}
  ~RegisterPassParser() override { RegistryClass::setListener(nullptr); }

  void initialize() {
    cl::parser<typename RegistryClass::FunctionPassCtor>::initialize();

    // Add existing passes to option.
    for (RegistryClass *Node = RegistryClass::getList();
         Node; Node = Node->getNext()) {
      this->addLiteralOption(Node->getName(),
                      (typename RegistryClass::FunctionPassCtor)Node->getCtor(),
                             Node->getDescription());
    }

    // Make sure we listen for list changes.
    RegistryClass::setListener(this);
  }

  // Implement the MachinePassRegistryListener callbacks.
  void NotifyAdd(StringRef N, typename RegistryClass::FunctionPassCtor C,
                 StringRef D) override {
    this->addLiteralOption(N, C, D);
  }
  void NotifyRemove(StringRef N) override {
    this->removeLiteralOption(N);
  }
};

} // end namespace llvm

#endif // LLVM_CODEGEN_MACHINEPASSREGISTRY_H
PKiwFZ`d����CodeGen/FaultMaps.hnu�[���//===- FaultMaps.h - The "FaultMaps" section --------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_FAULTMAPS_H
#define LLVM_CODEGEN_FAULTMAPS_H

#include "llvm/MC/MCSymbol.h"
#include <map>
#include <vector>

namespace llvm {

class AsmPrinter;
class MCExpr;

class FaultMaps {
public:
  enum FaultKind {
    FaultingLoad = 1,
    FaultingLoadStore,
    FaultingStore,
    FaultKindMax
  };

  explicit FaultMaps(AsmPrinter &AP);

  static const char *faultTypeToString(FaultKind);

  void recordFaultingOp(FaultKind FaultTy, const MCSymbol *FaultingLabel,
                        const MCSymbol *HandlerLabel);
  void serializeToFaultMapSection();
  void reset() {
    FunctionInfos.clear();
  }

private:
  static const char *WFMP;

  struct FaultInfo {
    FaultKind Kind = FaultKindMax;
    const MCExpr *FaultingOffsetExpr = nullptr;
    const MCExpr *HandlerOffsetExpr = nullptr;

    FaultInfo() = default;

    explicit FaultInfo(FaultMaps::FaultKind Kind, const MCExpr *FaultingOffset,
                       const MCExpr *HandlerOffset)
        : Kind(Kind), FaultingOffsetExpr(FaultingOffset),
          HandlerOffsetExpr(HandlerOffset) {}
  };

  using FunctionFaultInfos = std::vector<FaultInfo>;

  // We'd like to keep a stable iteration order for FunctionInfos to help
  // FileCheck based testing.
  struct MCSymbolComparator {
    bool operator()(const MCSymbol *LHS, const MCSymbol *RHS) const {
      return LHS->getName() < RHS->getName();
    }
  };

  std::map<const MCSymbol *, FunctionFaultInfos, MCSymbolComparator>
      FunctionInfos;
  AsmPrinter &AP;

  void emitFunctionInfo(const MCSymbol *FnLabel, const FunctionFaultInfos &FFI);
};

} // end namespace llvm

#endif // LLVM_CODEGEN_FAULTMAPS_H
PKiwFZ�?���CodeGen/StableHashing.hnu�[���//===- llvm/CodeGen/StableHashing.h - Utilities for stable hashing * C++ *-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file provides types and functions for computing and combining stable
// hashes. Stable hashes can be useful for hashing across different modules,
// processes, or compiler runs.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_STABLEHASHING_H
#define LLVM_CODEGEN_STABLEHASHING_H

#include "llvm/ADT/StringRef.h"

namespace llvm {

/// An opaque object representing a stable hash code. It can be serialized,
/// deserialized, and is stable across processes and executions.
using stable_hash = uint64_t;

// Implementation details
namespace hashing {
namespace detail {

// Stable hashes are based on the 64-bit FNV-1 hash:
// https://en.wikipedia.org/wiki/Fowler-Noll-Vo_hash_function

const uint64_t FNV_PRIME_64 = 1099511628211u;
const uint64_t FNV_OFFSET_64 = 14695981039346656037u;

inline void stable_hash_append(stable_hash &Hash, const char Value) {
  Hash = Hash ^ (Value & 0xFF);
  Hash = Hash * FNV_PRIME_64;
}

inline void stable_hash_append(stable_hash &Hash, stable_hash Value) {
  for (unsigned I = 0; I < 8; ++I) {
    stable_hash_append(Hash, static_cast<char>(Value));
    Value >>= 8;
  }
}

} // namespace detail
} // namespace hashing

inline stable_hash stable_hash_combine(stable_hash A, stable_hash B) {
  stable_hash Hash = hashing::detail::FNV_OFFSET_64;
  hashing::detail::stable_hash_append(Hash, A);
  hashing::detail::stable_hash_append(Hash, B);
  return Hash;
}

inline stable_hash stable_hash_combine(stable_hash A, stable_hash B,
                                       stable_hash C) {
  stable_hash Hash = hashing::detail::FNV_OFFSET_64;
  hashing::detail::stable_hash_append(Hash, A);
  hashing::detail::stable_hash_append(Hash, B);
  hashing::detail::stable_hash_append(Hash, C);
  return Hash;
}

inline stable_hash stable_hash_combine(stable_hash A, stable_hash B,
                                       stable_hash C, stable_hash D) {
  stable_hash Hash = hashing::detail::FNV_OFFSET_64;
  hashing::detail::stable_hash_append(Hash, A);
  hashing::detail::stable_hash_append(Hash, B);
  hashing::detail::stable_hash_append(Hash, C);
  hashing::detail::stable_hash_append(Hash, D);
  return Hash;
}

/// Compute a stable_hash for a sequence of values.
///
/// This hashes a sequence of values. It produces the same stable_hash as
/// 'stable_hash_combine(a, b, c, ...)', but can run over arbitrary sized
/// sequences and is significantly faster given pointers and types which
/// can be hashed as a sequence of bytes.
template <typename InputIteratorT>
stable_hash stable_hash_combine_range(InputIteratorT First,
                                      InputIteratorT Last) {
  stable_hash Hash = hashing::detail::FNV_OFFSET_64;
  for (auto I = First; I != Last; ++I)
    hashing::detail::stable_hash_append(Hash, *I);
  return Hash;
}

inline stable_hash stable_hash_combine_array(const stable_hash *P, size_t C) {
  stable_hash Hash = hashing::detail::FNV_OFFSET_64;
  for (size_t I = 0; I < C; ++I)
    hashing::detail::stable_hash_append(Hash, P[I]);
  return Hash;
}

inline stable_hash stable_hash_combine_string(const StringRef &S) {
  return stable_hash_combine_range(S.begin(), S.end());
}

inline stable_hash stable_hash_combine_string(const char *C) {
  stable_hash Hash = hashing::detail::FNV_OFFSET_64;
  while (*C)
    hashing::detail::stable_hash_append(Hash, *(C++));
  return Hash;
}

} // namespace llvm

#endif
PKiwFZ��o��CodeGen/TileShapeInfo.hnu�[���//===- llvm/CodeGen/TileShapeInfo.h - ---------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file Shape utility for AMX.
/// AMX hardware requires to config the shape of tile data register before use.
/// The 2D shape includes row and column. In AMX intrinsics interface the shape
/// is passed as 1st and 2nd parameter and they are lowered as the 1st and 2nd
/// machine operand of AMX pseudo instructions. ShapeT class is to facilitate
/// tile config and register allocator. The row and column are machine operand
/// of AMX pseudo instructions.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_TILESHAPEINFO_H
#define LLVM_CODEGEN_TILESHAPEINFO_H

#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/Register.h"

namespace llvm {

class ShapeT {
public:
  ShapeT(MachineOperand *Row, MachineOperand *Col,
         const MachineRegisterInfo *MRI = nullptr)
      : Row(Row), Col(Col) {
    if (MRI)
      deduceImm(MRI);
  }
  ShapeT()
      : Row(nullptr), Col(nullptr), RowImm(InvalidImmShape),
        ColImm(InvalidImmShape) {}
  bool operator==(const ShapeT &Shape) const {
    MachineOperand *R = Shape.Row;
    MachineOperand *C = Shape.Col;
    if (!R || !C)
      return false;
    if (!Row || !Col)
      return false;
    if (Row->getReg() == R->getReg() && Col->getReg() == C->getReg())
      return true;
    if ((RowImm != InvalidImmShape) && (ColImm != InvalidImmShape))
      return RowImm == Shape.getRowImm() && ColImm == Shape.getColImm();
    return false;
  }

  bool operator!=(const ShapeT &Shape) const { return !(*this == Shape); }

  MachineOperand *getRow() const { return Row; }

  MachineOperand *getCol() const { return Col; }

  int64_t getRowImm() const { return RowImm; }

  int64_t getColImm() const { return ColImm; }

  bool isValid() { return (Row != nullptr) && (Col != nullptr); }

  void deduceImm(const MachineRegisterInfo *MRI) {
    // All def must be the same value, otherwise it is invalid MIs.
    // Find the immediate.
    // TODO copy propagation.
    auto GetImm = [&](Register Reg) {
      int64_t Imm = InvalidImmShape;
      for (const MachineOperand &DefMO : MRI->def_operands(Reg)) {
        const auto *MI = DefMO.getParent();
        if (MI->isMoveImmediate()) {
          Imm = MI->getOperand(1).getImm();
          break;
        }
      }
      return Imm;
    };
    RowImm = GetImm(Row->getReg());
    ColImm = GetImm(Col->getReg());
  }

private:
  static constexpr int64_t InvalidImmShape = -1;
  MachineOperand *Row;
  MachineOperand *Col;
  int64_t RowImm = -1;
  int64_t ColImm = -1;
};

} // namespace llvm

#endif
PKiwFZk��7�7CodeGen/SelectionDAGISel.hnu�[���//===-- llvm/CodeGen/SelectionDAGISel.h - Common Base Class------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements the SelectionDAGISel class, which is used as the common
// base class for SelectionDAG-based instruction selectors.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_SELECTIONDAGISEL_H
#define LLVM_CODEGEN_SELECTIONDAGISEL_H

#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/IR/BasicBlock.h"
#include <memory>

namespace llvm {
class AAResults;
class AssumptionCache;
class TargetInstrInfo;
class TargetMachine;
class SelectionDAGBuilder;
class SDValue;
class MachineRegisterInfo;
class MachineFunction;
class OptimizationRemarkEmitter;
class TargetLowering;
class TargetLibraryInfo;
class FunctionLoweringInfo;
class SwiftErrorValueTracking;
class GCFunctionInfo;
class ScheduleDAGSDNodes;

/// SelectionDAGISel - This is the common base class used for SelectionDAG-based
/// pattern-matching instruction selectors.
class SelectionDAGISel : public MachineFunctionPass {
public:
  TargetMachine &TM;
  const TargetLibraryInfo *LibInfo;
  std::unique_ptr<FunctionLoweringInfo> FuncInfo;
  SwiftErrorValueTracking *SwiftError;
  MachineFunction *MF;
  MachineRegisterInfo *RegInfo;
  SelectionDAG *CurDAG;
  std::unique_ptr<SelectionDAGBuilder> SDB;
  AAResults *AA = nullptr;
  AssumptionCache *AC = nullptr;
  GCFunctionInfo *GFI = nullptr;
  CodeGenOpt::Level OptLevel;
  const TargetInstrInfo *TII;
  const TargetLowering *TLI;
  bool FastISelFailed;
  SmallPtrSet<const Instruction *, 4> ElidedArgCopyInstrs;

  /// Current optimization remark emitter.
  /// Used to report things like combines and FastISel failures.
  std::unique_ptr<OptimizationRemarkEmitter> ORE;

  explicit SelectionDAGISel(char &ID, TargetMachine &tm,
                            CodeGenOpt::Level OL = CodeGenOpt::Default);
  ~SelectionDAGISel() override;

  const TargetLowering *getTargetLowering() const { return TLI; }

  void getAnalysisUsage(AnalysisUsage &AU) const override;

  bool runOnMachineFunction(MachineFunction &MF) override;

  virtual void emitFunctionEntryCode() {}

  /// PreprocessISelDAG - This hook allows targets to hack on the graph before
  /// instruction selection starts.
  virtual void PreprocessISelDAG() {}

  /// PostprocessISelDAG() - This hook allows the target to hack on the graph
  /// right after selection.
  virtual void PostprocessISelDAG() {}

  /// Main hook for targets to transform nodes into machine nodes.
  virtual void Select(SDNode *N) = 0;

  /// SelectInlineAsmMemoryOperand - Select the specified address as a target
  /// addressing mode, according to the specified constraint.  If this does
  /// not match or is not implemented, return true.  The resultant operands
  /// (which will appear in the machine instruction) should be added to the
  /// OutOps vector.
  virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op,
                                            unsigned ConstraintID,
                                            std::vector<SDValue> &OutOps) {
    return true;
  }

  /// IsProfitableToFold - Returns true if it's profitable to fold the specific
  /// operand node N of U during instruction selection that starts at Root.
  virtual bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const;

  /// IsLegalToFold - Returns true if the specific operand node N of
  /// U can be folded during instruction selection that starts at Root.
  /// FIXME: This is a static member function because the MSP430/X86
  /// targets, which uses it during isel.  This could become a proper member.
  static bool IsLegalToFold(SDValue N, SDNode *U, SDNode *Root,
                            CodeGenOpt::Level OptLevel,
                            bool IgnoreChains = false);

  static void InvalidateNodeId(SDNode *N);
  static int getUninvalidatedNodeId(SDNode *N);

  static void EnforceNodeIdInvariant(SDNode *N);

  // Opcodes used by the DAG state machine:
  enum BuiltinOpcodes {
    OPC_Scope,
    OPC_RecordNode,
    OPC_RecordChild0, OPC_RecordChild1, OPC_RecordChild2, OPC_RecordChild3,
    OPC_RecordChild4, OPC_RecordChild5, OPC_RecordChild6, OPC_RecordChild7,
    OPC_RecordMemRef,
    OPC_CaptureGlueInput,
    OPC_MoveChild,
    OPC_MoveChild0, OPC_MoveChild1, OPC_MoveChild2, OPC_MoveChild3,
    OPC_MoveChild4, OPC_MoveChild5, OPC_MoveChild6, OPC_MoveChild7,
    OPC_MoveParent,
    OPC_CheckSame,
    OPC_CheckChild0Same, OPC_CheckChild1Same,
    OPC_CheckChild2Same, OPC_CheckChild3Same,
    OPC_CheckPatternPredicate,
    OPC_CheckPredicate,
    OPC_CheckPredicateWithOperands,
    OPC_CheckOpcode,
    OPC_SwitchOpcode,
    OPC_CheckType,
    OPC_CheckTypeRes,
    OPC_SwitchType,
    OPC_CheckChild0Type, OPC_CheckChild1Type, OPC_CheckChild2Type,
    OPC_CheckChild3Type, OPC_CheckChild4Type, OPC_CheckChild5Type,
    OPC_CheckChild6Type, OPC_CheckChild7Type,
    OPC_CheckInteger,
    OPC_CheckChild0Integer, OPC_CheckChild1Integer, OPC_CheckChild2Integer,
    OPC_CheckChild3Integer, OPC_CheckChild4Integer,
    OPC_CheckCondCode, OPC_CheckChild2CondCode,
    OPC_CheckValueType,
    OPC_CheckComplexPat,
    OPC_CheckAndImm, OPC_CheckOrImm,
    OPC_CheckImmAllOnesV,
    OPC_CheckImmAllZerosV,
    OPC_CheckFoldableChainNode,

    OPC_EmitInteger,
    OPC_EmitStringInteger,
    OPC_EmitRegister,
    OPC_EmitRegister2,
    OPC_EmitConvertToTarget,
    OPC_EmitMergeInputChains,
    OPC_EmitMergeInputChains1_0,
    OPC_EmitMergeInputChains1_1,
    OPC_EmitMergeInputChains1_2,
    OPC_EmitCopyToReg,
    OPC_EmitCopyToReg2,
    OPC_EmitNodeXForm,
    OPC_EmitNode,
    // Space-optimized forms that implicitly encode number of result VTs.
    OPC_EmitNode0, OPC_EmitNode1, OPC_EmitNode2,
    OPC_MorphNodeTo,
    // Space-optimized forms that implicitly encode number of result VTs.
    OPC_MorphNodeTo0, OPC_MorphNodeTo1, OPC_MorphNodeTo2,
    OPC_CompleteMatch,
    // Contains offset in table for pattern being selected
    OPC_Coverage
  };

  enum {
    OPFL_None       = 0,  // Node has no chain or glue input and isn't variadic.
    OPFL_Chain      = 1,     // Node has a chain input.
    OPFL_GlueInput  = 2,     // Node has a glue input.
    OPFL_GlueOutput = 4,     // Node has a glue output.
    OPFL_MemRefs    = 8,     // Node gets accumulated MemRefs.
    OPFL_Variadic0  = 1<<4,  // Node is variadic, root has 0 fixed inputs.
    OPFL_Variadic1  = 2<<4,  // Node is variadic, root has 1 fixed inputs.
    OPFL_Variadic2  = 3<<4,  // Node is variadic, root has 2 fixed inputs.
    OPFL_Variadic3  = 4<<4,  // Node is variadic, root has 3 fixed inputs.
    OPFL_Variadic4  = 5<<4,  // Node is variadic, root has 4 fixed inputs.
    OPFL_Variadic5  = 6<<4,  // Node is variadic, root has 5 fixed inputs.
    OPFL_Variadic6  = 7<<4,  // Node is variadic, root has 6 fixed inputs.

    OPFL_VariadicInfo = OPFL_Variadic6
  };

  /// getNumFixedFromVariadicInfo - Transform an EmitNode flags word into the
  /// number of fixed arity values that should be skipped when copying from the
  /// root.
  static inline int getNumFixedFromVariadicInfo(unsigned Flags) {
    return ((Flags&OPFL_VariadicInfo) >> 4)-1;
  }


protected:
  /// DAGSize - Size of DAG being instruction selected.
  ///
  unsigned DAGSize = 0;

  /// ReplaceUses - replace all uses of the old node F with the use
  /// of the new node T.
  void ReplaceUses(SDValue F, SDValue T) {
    CurDAG->ReplaceAllUsesOfValueWith(F, T);
    EnforceNodeIdInvariant(T.getNode());
  }

  /// ReplaceUses - replace all uses of the old nodes F with the use
  /// of the new nodes T.
  void ReplaceUses(const SDValue *F, const SDValue *T, unsigned Num) {
    CurDAG->ReplaceAllUsesOfValuesWith(F, T, Num);
    for (unsigned i = 0; i < Num; ++i)
      EnforceNodeIdInvariant(T[i].getNode());
  }

  /// ReplaceUses - replace all uses of the old node F with the use
  /// of the new node T.
  void ReplaceUses(SDNode *F, SDNode *T) {
    CurDAG->ReplaceAllUsesWith(F, T);
    EnforceNodeIdInvariant(T);
  }

  /// Replace all uses of \c F with \c T, then remove \c F from the DAG.
  void ReplaceNode(SDNode *F, SDNode *T) {
    CurDAG->ReplaceAllUsesWith(F, T);
    EnforceNodeIdInvariant(T);
    CurDAG->RemoveDeadNode(F);
  }

  /// SelectInlineAsmMemoryOperands - Calls to this are automatically generated
  /// by tblgen.  Others should not call it.
  void SelectInlineAsmMemoryOperands(std::vector<SDValue> &Ops,
                                     const SDLoc &DL);

  /// getPatternForIndex - Patterns selected by tablegen during ISEL
  virtual StringRef getPatternForIndex(unsigned index) {
    llvm_unreachable("Tblgen should generate the implementation of this!");
  }

  /// getIncludePathForIndex - get the td source location of pattern instantiation
  virtual StringRef getIncludePathForIndex(unsigned index) {
    llvm_unreachable("Tblgen should generate the implementation of this!");
  }

  bool shouldOptForSize(const MachineFunction *MF) const {
    return CurDAG->shouldOptForSize();
  }

public:
  // Calls to these predicates are generated by tblgen.
  bool CheckAndMask(SDValue LHS, ConstantSDNode *RHS,
                    int64_t DesiredMaskS) const;
  bool CheckOrMask(SDValue LHS, ConstantSDNode *RHS,
                    int64_t DesiredMaskS) const;


  /// CheckPatternPredicate - This function is generated by tblgen in the
  /// target.  It runs the specified pattern predicate and returns true if it
  /// succeeds or false if it fails.  The number is a private implementation
  /// detail to the code tblgen produces.
  virtual bool CheckPatternPredicate(unsigned PredNo) const {
    llvm_unreachable("Tblgen should generate the implementation of this!");
  }

  /// CheckNodePredicate - This function is generated by tblgen in the target.
  /// It runs node predicate number PredNo and returns true if it succeeds or
  /// false if it fails.  The number is a private implementation
  /// detail to the code tblgen produces.
  virtual bool CheckNodePredicate(SDNode *N, unsigned PredNo) const {
    llvm_unreachable("Tblgen should generate the implementation of this!");
  }

  /// CheckNodePredicateWithOperands - This function is generated by tblgen in
  /// the target.
  /// It runs node predicate number PredNo and returns true if it succeeds or
  /// false if it fails.  The number is a private implementation detail to the
  /// code tblgen produces.
  virtual bool CheckNodePredicateWithOperands(
      SDNode *N, unsigned PredNo,
      const SmallVectorImpl<SDValue> &Operands) const {
    llvm_unreachable("Tblgen should generate the implementation of this!");
  }

  virtual bool CheckComplexPattern(SDNode *Root, SDNode *Parent, SDValue N,
                                   unsigned PatternNo,
                        SmallVectorImpl<std::pair<SDValue, SDNode*> > &Result) {
    llvm_unreachable("Tblgen should generate the implementation of this!");
  }

  virtual SDValue RunSDNodeXForm(SDValue V, unsigned XFormNo) {
    llvm_unreachable("Tblgen should generate this!");
  }

  void SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
                        unsigned TableSize);

  /// Return true if complex patterns for this target can mutate the
  /// DAG.
  virtual bool ComplexPatternFuncMutatesDAG() const {
    return false;
  }

  /// Return whether the node may raise an FP exception.
  bool mayRaiseFPException(SDNode *Node) const;

  bool isOrEquivalentToAdd(const SDNode *N) const;

private:

  // Calls to these functions are generated by tblgen.
  void Select_INLINEASM(SDNode *N);
  void Select_READ_REGISTER(SDNode *Op);
  void Select_WRITE_REGISTER(SDNode *Op);
  void Select_UNDEF(SDNode *N);
  void CannotYetSelect(SDNode *N);

  void Select_FREEZE(SDNode *N);
  void Select_ARITH_FENCE(SDNode *N);
  void Select_MEMBARRIER(SDNode *N);

  void pushStackMapLiveVariable(SmallVectorImpl<SDValue> &Ops, SDValue Operand,
                                SDLoc DL);
  void Select_STACKMAP(SDNode *N);
  void Select_PATCHPOINT(SDNode *N);

private:
  void DoInstructionSelection();
  SDNode *MorphNode(SDNode *Node, unsigned TargetOpc, SDVTList VTList,
                    ArrayRef<SDValue> Ops, unsigned EmitNodeInfo);

  /// Prepares the landing pad to take incoming values or do other EH
  /// personality specific tasks. Returns true if the block should be
  /// instruction selected, false if no code should be emitted for it.
  bool PrepareEHLandingPad();

  // Mark and Report IPToState for each Block under AsynchEH
  void reportIPToStateForBlocks(MachineFunction *Fn);

  /// Perform instruction selection on all basic blocks in the function.
  void SelectAllBasicBlocks(const Function &Fn);

  /// Perform instruction selection on a single basic block, for
  /// instructions between \p Begin and \p End.  \p HadTailCall will be set
  /// to true if a call in the block was translated as a tail call.
  void SelectBasicBlock(BasicBlock::const_iterator Begin,
                        BasicBlock::const_iterator End,
                        bool &HadTailCall);
  void FinishBasicBlock();

  void CodeGenAndEmitDAG();

  /// Generate instructions for lowering the incoming arguments of the
  /// given function.
  void LowerArguments(const Function &F);

  void ComputeLiveOutVRegInfo();

  /// Create the scheduler. If a specific scheduler was specified
  /// via the SchedulerRegistry, use it, otherwise select the
  /// one preferred by the target.
  ///
  ScheduleDAGSDNodes *CreateScheduler();

  /// OpcodeOffset - This is a cache used to dispatch efficiently into isel
  /// state machines that start with a OPC_SwitchOpcode node.
  std::vector<unsigned> OpcodeOffset;

  void UpdateChains(SDNode *NodeToMatch, SDValue InputChain,
                    SmallVectorImpl<SDNode *> &ChainNodesMatched,
                    bool isMorphNodeTo);
};

}

#endif /* LLVM_CODEGEN_SELECTIONDAGISEL_H */
PKiwFZ�t\CodeGen/RegisterClassInfo.hnu�[���//===- RegisterClassInfo.h - Dynamic Register Class Info --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements the RegisterClassInfo class which provides dynamic
// information about target register classes. Callee saved and reserved
// registers depends on calling conventions and other dynamic information, so
// some things cannot be determined statically.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_REGISTERCLASSINFO_H
#define LLVM_CODEGEN_REGISTERCLASSINFO_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
#include "llvm/MC/MCRegister.h"
#include <cstdint>
#include <memory>

namespace llvm {

class RegisterClassInfo {
  struct RCInfo {
    unsigned Tag = 0;
    unsigned NumRegs = 0;
    bool ProperSubClass = false;
    uint8_t MinCost = 0;
    uint16_t LastCostChange = 0;
    std::unique_ptr<MCPhysReg[]> Order;

    RCInfo() = default;

    operator ArrayRef<MCPhysReg>() const {
      return ArrayRef(Order.get(), NumRegs);
    }
  };

  // Brief cached information for each register class.
  std::unique_ptr<RCInfo[]> RegClass;

  // Tag changes whenever cached information needs to be recomputed. An RCInfo
  // entry is valid when its tag matches.
  unsigned Tag = 0;

  const MachineFunction *MF = nullptr;
  const TargetRegisterInfo *TRI = nullptr;

  // Callee saved registers of last MF.
  // Used only to determine if an update for CalleeSavedAliases is necessary.
  SmallVector<MCPhysReg, 16> LastCalleeSavedRegs;

  // Map register alias to the callee saved Register.
  SmallVector<MCPhysReg, 4> CalleeSavedAliases;

  // Indicate if a specified callee saved register be in the allocation order
  // exactly as written in the tablegen descriptions or listed later.
  BitVector IgnoreCSRForAllocOrder;

  // Reserved registers in the current MF.
  BitVector Reserved;

  std::unique_ptr<unsigned[]> PSetLimits;

  // The register cost values.
  ArrayRef<uint8_t> RegCosts;

  // Compute all information about RC.
  void compute(const TargetRegisterClass *RC) const;

  // Return an up-to-date RCInfo for RC.
  const RCInfo &get(const TargetRegisterClass *RC) const {
    const RCInfo &RCI = RegClass[RC->getID()];
    if (Tag != RCI.Tag)
      compute(RC);
    return RCI;
  }

public:
  RegisterClassInfo();

  /// runOnFunction - Prepare to answer questions about MF. This must be called
  /// before any other methods are used.
  void runOnMachineFunction(const MachineFunction &MF);

  /// getNumAllocatableRegs - Returns the number of actually allocatable
  /// registers in RC in the current function.
  unsigned getNumAllocatableRegs(const TargetRegisterClass *RC) const {
    return get(RC).NumRegs;
  }

  /// getOrder - Returns the preferred allocation order for RC. The order
  /// contains no reserved registers, and registers that alias callee saved
  /// registers come last.
  ArrayRef<MCPhysReg> getOrder(const TargetRegisterClass *RC) const {
    return get(RC);
  }

  /// isProperSubClass - Returns true if RC has a legal super-class with more
  /// allocatable registers.
  ///
  /// Register classes like GR32_NOSP are not proper sub-classes because %esp
  /// is not allocatable.  Similarly, tGPR is not a proper sub-class in Thumb
  /// mode because the GPR super-class is not legal.
  bool isProperSubClass(const TargetRegisterClass *RC) const {
    return get(RC).ProperSubClass;
  }

  /// getLastCalleeSavedAlias - Returns the last callee saved register that
  /// overlaps PhysReg, or NoRegister if Reg doesn't overlap a
  /// CalleeSavedAliases.
  MCRegister getLastCalleeSavedAlias(MCRegister PhysReg) const {
    if (PhysReg.id() < CalleeSavedAliases.size())
      return CalleeSavedAliases[PhysReg];
    return MCRegister::NoRegister;
  }

  /// Get the minimum register cost in RC's allocation order.
  /// This is the smallest value in RegCosts[Reg] for all
  /// the registers in getOrder(RC).
  uint8_t getMinCost(const TargetRegisterClass *RC) const {
    return get(RC).MinCost;
  }

  /// Get the position of the last cost change in getOrder(RC).
  ///
  /// All registers in getOrder(RC).slice(getLastCostChange(RC)) will have the
  /// same cost according to RegCosts[Reg].
  unsigned getLastCostChange(const TargetRegisterClass *RC) const {
    return get(RC).LastCostChange;
  }

  /// Get the register unit limit for the given pressure set index.
  ///
  /// RegisterClassInfo adjusts this limit for reserved registers.
  unsigned getRegPressureSetLimit(unsigned Idx) const {
    if (!PSetLimits[Idx])
      PSetLimits[Idx] = computePSetLimit(Idx);
    return PSetLimits[Idx];
  }

protected:
  unsigned computePSetLimit(unsigned Idx) const;
};

} // end namespace llvm

#endif // LLVM_CODEGEN_REGISTERCLASSINFO_H
PKiwFZ1r��CodeGen/ExecutionDomainFix.hnu�[���//==-- llvm/CodeGen/ExecutionDomainFix.h - Execution Domain Fix -*- C++ -*--==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file Execution Domain Fix pass.
///
/// Some X86 SSE instructions like mov, and, or, xor are available in different
/// variants for different operand types. These variant instructions are
/// equivalent, but on Nehalem and newer cpus there is extra latency
/// transferring data between integer and floating point domains.  ARM cores
/// have similar issues when they are configured with both VFP and NEON
/// pipelines.
///
/// This pass changes the variant instructions to minimize domain crossings.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_EXECUTIONDOMAINFIX_H
#define LLVM_CODEGEN_EXECUTIONDOMAINFIX_H

#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/LoopTraversal.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/ReachingDefAnalysis.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"

namespace llvm {

class MachineInstr;
class TargetInstrInfo;

/// A DomainValue is a bit like LiveIntervals' ValNo, but it also keeps track
/// of execution domains.
///
/// An open DomainValue represents a set of instructions that can still switch
/// execution domain. Multiple registers may refer to the same open
/// DomainValue - they will eventually be collapsed to the same execution
/// domain.
///
/// A collapsed DomainValue represents a single register that has been forced
/// into one of more execution domains. There is a separate collapsed
/// DomainValue for each register, but it may contain multiple execution
/// domains. A register value is initially created in a single execution
/// domain, but if we were forced to pay the penalty of a domain crossing, we
/// keep track of the fact that the register is now available in multiple
/// domains.
struct DomainValue {
  /// Basic reference counting.
  unsigned Refs = 0;

  /// Bitmask of available domains. For an open DomainValue, it is the still
  /// possible domains for collapsing. For a collapsed DomainValue it is the
  /// domains where the register is available for free.
  unsigned AvailableDomains;

  /// Pointer to the next DomainValue in a chain.  When two DomainValues are
  /// merged, Victim.Next is set to point to Victor, so old DomainValue
  /// references can be updated by following the chain.
  DomainValue *Next;

  /// Twiddleable instructions using or defining these registers.
  SmallVector<MachineInstr *, 8> Instrs;

  DomainValue() { clear(); }

  /// A collapsed DomainValue has no instructions to twiddle - it simply keeps
  /// track of the domains where the registers are already available.
  bool isCollapsed() const { return Instrs.empty(); }

  /// Is domain available?
  bool hasDomain(unsigned domain) const {
    assert(domain <
               static_cast<unsigned>(std::numeric_limits<unsigned>::digits) &&
           "undefined behavior");
    return AvailableDomains & (1u << domain);
  }

  /// Mark domain as available.
  void addDomain(unsigned domain) {
    assert(domain <
               static_cast<unsigned>(std::numeric_limits<unsigned>::digits) &&
           "undefined behavior");
    AvailableDomains |= 1u << domain;
  }

  // Restrict to a single domain available.
  void setSingleDomain(unsigned domain) {
    assert(domain <
               static_cast<unsigned>(std::numeric_limits<unsigned>::digits) &&
           "undefined behavior");
    AvailableDomains = 1u << domain;
  }

  /// Return bitmask of domains that are available and in mask.
  unsigned getCommonDomains(unsigned mask) const {
    return AvailableDomains & mask;
  }

  /// First domain available.
  unsigned getFirstDomain() const {
    return llvm::countr_zero(AvailableDomains);
  }

  /// Clear this DomainValue and point to next which has all its data.
  void clear() {
    AvailableDomains = 0;
    Next = nullptr;
    Instrs.clear();
  }
};

class ExecutionDomainFix : public MachineFunctionPass {
  SpecificBumpPtrAllocator<DomainValue> Allocator;
  SmallVector<DomainValue *, 16> Avail;

  const TargetRegisterClass *const RC;
  MachineFunction *MF = nullptr;
  const TargetInstrInfo *TII = nullptr;
  const TargetRegisterInfo *TRI = nullptr;
  std::vector<SmallVector<int, 1>> AliasMap;
  const unsigned NumRegs;
  /// Value currently in each register, or NULL when no value is being tracked.
  /// This counts as a DomainValue reference.
  using LiveRegsDVInfo = std::vector<DomainValue *>;
  LiveRegsDVInfo LiveRegs;
  /// Keeps domain information for all registers. Note that this
  /// is different from the usual definition notion of liveness. The CPU
  /// doesn't care whether or not we consider a register killed.
  using OutRegsInfoMap = SmallVector<LiveRegsDVInfo, 4>;
  OutRegsInfoMap MBBOutRegsInfos;

  ReachingDefAnalysis *RDA = nullptr;

public:
  ExecutionDomainFix(char &PassID, const TargetRegisterClass &RC)
      : MachineFunctionPass(PassID), RC(&RC), NumRegs(RC.getNumRegs()) {}

  void getAnalysisUsage(AnalysisUsage &AU) const override {
    AU.setPreservesAll();
    AU.addRequired<ReachingDefAnalysis>();
    MachineFunctionPass::getAnalysisUsage(AU);
  }

  bool runOnMachineFunction(MachineFunction &MF) override;

  MachineFunctionProperties getRequiredProperties() const override {
    return MachineFunctionProperties().set(
        MachineFunctionProperties::Property::NoVRegs);
  }

private:
  /// Translate TRI register number to a list of indices into our smaller tables
  /// of interesting registers.
  iterator_range<SmallVectorImpl<int>::const_iterator>
  regIndices(unsigned Reg) const;

  /// DomainValue allocation.
  DomainValue *alloc(int domain = -1);

  /// Add reference to DV.
  DomainValue *retain(DomainValue *DV) {
    if (DV)
      ++DV->Refs;
    return DV;
  }

  /// Release a reference to DV.  When the last reference is released,
  /// collapse if needed.
  void release(DomainValue *);

  /// Follow the chain of dead DomainValues until a live DomainValue is reached.
  /// Update the referenced pointer when necessary.
  DomainValue *resolve(DomainValue *&);

  /// Set LiveRegs[rx] = dv, updating reference counts.
  void setLiveReg(int rx, DomainValue *DV);

  /// Kill register rx, recycle or collapse any DomainValue.
  void kill(int rx);

  /// Force register rx into domain.
  void force(int rx, unsigned domain);

  /// Collapse open DomainValue into given domain. If there are multiple
  /// registers using dv, they each get a unique collapsed DomainValue.
  void collapse(DomainValue *dv, unsigned domain);

  /// All instructions and registers in B are moved to A, and B is released.
  bool merge(DomainValue *A, DomainValue *B);

  /// Set up LiveRegs by merging predecessor live-out values.
  void enterBasicBlock(const LoopTraversal::TraversedMBBInfo &TraversedMBB);

  /// Update live-out values.
  void leaveBasicBlock(const LoopTraversal::TraversedMBBInfo &TraversedMBB);

  /// Process he given basic block.
  void processBasicBlock(const LoopTraversal::TraversedMBBInfo &TraversedMBB);

  /// Visit given insturcion.
  bool visitInstr(MachineInstr *);

  /// Update def-ages for registers defined by MI.
  /// If Kill is set, also kill off DomainValues clobbered by the defs.
  void processDefs(MachineInstr *, bool Kill);

  /// A soft instruction can be changed to work in other domains given by mask.
  void visitSoftInstr(MachineInstr *, unsigned mask);

  /// A hard instruction only works in one domain. All input registers will be
  /// forced into that domain.
  void visitHardInstr(MachineInstr *, unsigned domain);
};

} // namespace llvm

#endif // LLVM_CODEGEN_EXECUTIONDOMAINFIX_H
PKiwFZ�r)��CodeGen/TypePromotion.hnu�[���//===- TypePromotion.h ------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// Defines an IR pass for type promotion.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_TYPEPROMOTION_H
#define LLVM_CODEGEN_TYPEPROMOTION_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class Function;
class TargetMachine;

class TypePromotionPass : public PassInfoMixin<TypePromotionPass> {
private:
  const TargetMachine *TM;

public:
  TypePromotionPass(const TargetMachine *TM): TM(TM) { }
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

} // end namespace llvm

#endif // LLVM_CODEGEN_TYPEPROMOTION_H
PKiwFZ��HO	O	CodeGen/RegAllocRegistry.hnu�[���//===- llvm/CodeGen/RegAllocRegistry.h --------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the implementation for register allocator function
// pass registry (RegisterRegAlloc).
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_REGALLOCREGISTRY_H
#define LLVM_CODEGEN_REGALLOCREGISTRY_H

#include "llvm/CodeGen/RegAllocCommon.h"
#include "llvm/CodeGen/MachinePassRegistry.h"

namespace llvm {

class FunctionPass;

//===----------------------------------------------------------------------===//
///
/// RegisterRegAllocBase class - Track the registration of register allocators.
///
//===----------------------------------------------------------------------===//
template <class SubClass>
class RegisterRegAllocBase : public MachinePassRegistryNode<FunctionPass *(*)()> {
public:
  using FunctionPassCtor = FunctionPass *(*)();

  static MachinePassRegistry<FunctionPassCtor> Registry;

  RegisterRegAllocBase(const char *N, const char *D, FunctionPassCtor C)
      : MachinePassRegistryNode(N, D, C) {
    Registry.Add(this);
  }

  ~RegisterRegAllocBase() { Registry.Remove(this); }

  // Accessors.
  SubClass *getNext() const {
    return static_cast<SubClass *>(MachinePassRegistryNode::getNext());
  }

  static SubClass *getList() {
    return static_cast<SubClass *>(Registry.getList());
  }

  static FunctionPassCtor getDefault() { return Registry.getDefault(); }

  static void setDefault(FunctionPassCtor C) { Registry.setDefault(C); }

  static void setListener(MachinePassRegistryListener<FunctionPassCtor> *L) {
    Registry.setListener(L);
  }
};

class RegisterRegAlloc : public RegisterRegAllocBase<RegisterRegAlloc> {
public:
  RegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C)
    : RegisterRegAllocBase(N, D, C) {}
};

/// RegisterRegAlloc's global Registry tracks allocator registration.
template <class T>
MachinePassRegistry<typename RegisterRegAllocBase<T>::FunctionPassCtor>
RegisterRegAllocBase<T>::Registry;

} // end namespace llvm

#endif // LLVM_CODEGEN_REGALLOCREGISTRY_H
PKiwFZ�-��$CodeGen/AssignmentTrackingAnalysis.hnu�[���#ifndef LLVM_CODEGEN_ASSIGNMENTTRACKINGANALYSIS_H
#define LLVM_CODEGEN_ASSIGNMENTTRACKINGANALYSIS_H

#include "llvm/IR/DebugInfoMetadata.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/Pass.h"

namespace llvm {
class Function;
class Instruction;
class raw_ostream;
} // namespace llvm
class FunctionVarLocsBuilder;

namespace llvm {
/// Type wrapper for integer ID for Variables. 0 is reserved.
enum class VariableID : unsigned { Reserved = 0 };
/// Variable location definition used by FunctionVarLocs.
struct VarLocInfo {
  llvm::VariableID VariableID;
  DIExpression *Expr = nullptr;
  DebugLoc DL;
  RawLocationWrapper Values = RawLocationWrapper();
};

/// Data structure describing the variable locations in a function. Used as the
/// result of the AssignmentTrackingAnalysis pass. Essentially read-only
/// outside of AssignmentTrackingAnalysis where it is built.
class FunctionVarLocs {
  /// Maps VarLocInfo.VariableID to a DebugVariable for VarLocRecords.
  SmallVector<DebugVariable> Variables;
  /// List of variable location changes grouped by the instruction the
  /// change occurs before (see VarLocsBeforeInst). The elements from
  /// zero to SingleVarLocEnd represent variables with a single location.
  SmallVector<VarLocInfo> VarLocRecords;
  /// End of range of VarLocRecords that represent variables with a single
  /// location that is valid for the entire scope. Range starts at 0.
  unsigned SingleVarLocEnd = 0;
  /// Maps an instruction to a range of VarLocs that start just before it.
  DenseMap<const Instruction *, std::pair<unsigned, unsigned>>
      VarLocsBeforeInst;

public:
  /// Return the DILocalVariable for the location definition represented by \p
  /// ID.
  DILocalVariable *getDILocalVariable(const VarLocInfo *Loc) const {
    VariableID VarID = Loc->VariableID;
    return getDILocalVariable(VarID);
  }
  /// Return the DILocalVariable of the variable represented by \p ID.
  DILocalVariable *getDILocalVariable(VariableID ID) const {
    return const_cast<DILocalVariable *>(getVariable(ID).getVariable());
  }
  /// Return the DebugVariable represented by \p ID.
  const DebugVariable &getVariable(VariableID ID) const {
    return Variables[static_cast<unsigned>(ID)];
  }

  ///@name iterators
  ///@{
  /// First single-location variable location definition.
  const VarLocInfo *single_locs_begin() const { return VarLocRecords.begin(); }
  /// One past the last single-location variable location definition.
  const VarLocInfo *single_locs_end() const {
    const auto *It = VarLocRecords.begin();
    std::advance(It, SingleVarLocEnd);
    return It;
  }
  /// First variable location definition that comes before \p Before.
  const VarLocInfo *locs_begin(const Instruction *Before) const {
    auto Span = VarLocsBeforeInst.lookup(Before);
    const auto *It = VarLocRecords.begin();
    std::advance(It, Span.first);
    return It;
  }
  /// One past the last variable location definition that comes before \p
  /// Before.
  const VarLocInfo *locs_end(const Instruction *Before) const {
    auto Span = VarLocsBeforeInst.lookup(Before);
    const auto *It = VarLocRecords.begin();
    std::advance(It, Span.second);
    return It;
  }
  ///@}

  void print(raw_ostream &OS, const Function &Fn) const;

  ///@{
  /// Non-const methods used by AssignmentTrackingAnalysis (which invalidate
  /// analysis results if called incorrectly).
  void init(FunctionVarLocsBuilder &Builder);
  void clear();
  ///@}
};

class AssignmentTrackingAnalysis : public FunctionPass {
  std::unique_ptr<FunctionVarLocs> Results;

public:
  static char ID;

  AssignmentTrackingAnalysis();

  bool runOnFunction(Function &F) override;

  static bool isRequired() { return true; }

  void getAnalysisUsage(AnalysisUsage &AU) const override {
    AU.setPreservesAll();
  }

  const FunctionVarLocs *getResults() { return Results.get(); }
};

} // end namespace llvm
#endif // LLVM_CODEGEN_ASSIGNMENTTRACKINGANALYSIS_H
PKiwFZ[��~~CodeGen/LiveRegUnits.hnu�[���//===- llvm/CodeGen/LiveRegUnits.h - Register Unit Set ----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file
/// A set of register units. It is intended for register liveness tracking.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_LIVEREGUNITS_H
#define LLVM_CODEGEN_LIVEREGUNITS_H

#include "llvm/ADT/BitVector.h"
#include "llvm/CodeGen/MachineInstrBundle.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
#include "llvm/MC/LaneBitmask.h"
#include "llvm/MC/MCRegisterInfo.h"
#include <cstdint>

namespace llvm {

class MachineInstr;
class MachineBasicBlock;

/// A set of register units used to track register liveness.
class LiveRegUnits {
  const TargetRegisterInfo *TRI = nullptr;
  BitVector Units;

public:
  /// Constructs a new empty LiveRegUnits set.
  LiveRegUnits() = default;

  /// Constructs and initialize an empty LiveRegUnits set.
  LiveRegUnits(const TargetRegisterInfo &TRI) {
    init(TRI);
  }

  /// For a machine instruction \p MI, adds all register units used in
  /// \p UsedRegUnits and defined or clobbered in \p ModifiedRegUnits. This is
  /// useful when walking over a range of instructions to track registers
  /// used or defined seperately.
  static void accumulateUsedDefed(const MachineInstr &MI,
                                  LiveRegUnits &ModifiedRegUnits,
                                  LiveRegUnits &UsedRegUnits,
                                  const TargetRegisterInfo *TRI) {
    for (ConstMIBundleOperands O(MI); O.isValid(); ++O) {
      if (O->isRegMask())
        ModifiedRegUnits.addRegsInMask(O->getRegMask());
      if (!O->isReg())
        continue;
      Register Reg = O->getReg();
      if (!Reg.isPhysical())
        continue;
      if (O->isDef()) {
        // Some architectures (e.g. AArch64 XZR/WZR) have registers that are
        // constant and may be used as destinations to indicate the generated
        // value is discarded. No need to track such case as a def.
        if (!TRI->isConstantPhysReg(Reg))
          ModifiedRegUnits.addReg(Reg);
      } else {
        assert(O->isUse() && "Reg operand not a def and not a use");
        UsedRegUnits.addReg(Reg);
      }
    }
  }

  /// Initialize and clear the set.
  void init(const TargetRegisterInfo &TRI) {
    this->TRI = &TRI;
    Units.reset();
    Units.resize(TRI.getNumRegUnits());
  }

  /// Clears the set.
  void clear() { Units.reset(); }

  /// Returns true if the set is empty.
  bool empty() const { return Units.none(); }

  /// Adds register units covered by physical register \p Reg.
  void addReg(MCPhysReg Reg) {
    for (MCRegUnit Unit : TRI->regunits(Reg))
      Units.set(Unit);
  }

  /// Adds register units covered by physical register \p Reg that are
  /// part of the lanemask \p Mask.
  void addRegMasked(MCPhysReg Reg, LaneBitmask Mask) {
    for (MCRegUnitMaskIterator Unit(Reg, TRI); Unit.isValid(); ++Unit) {
      LaneBitmask UnitMask = (*Unit).second;
      if (UnitMask.none() || (UnitMask & Mask).any())
        Units.set((*Unit).first);
    }
  }

  /// Removes all register units covered by physical register \p Reg.
  void removeReg(MCPhysReg Reg) {
    for (MCRegUnit Unit : TRI->regunits(Reg))
      Units.reset(Unit);
  }

  /// Removes register units not preserved by the regmask \p RegMask.
  /// The regmask has the same format as the one in the RegMask machine operand.
  void removeRegsNotPreserved(const uint32_t *RegMask);

  /// Adds register units not preserved by the regmask \p RegMask.
  /// The regmask has the same format as the one in the RegMask machine operand.
  void addRegsInMask(const uint32_t *RegMask);

  /// Returns true if no part of physical register \p Reg is live.
  bool available(MCPhysReg Reg) const {
    for (MCRegUnit Unit : TRI->regunits(Reg)) {
      if (Units.test(Unit))
        return false;
    }
    return true;
  }

  /// Updates liveness when stepping backwards over the instruction \p MI.
  /// This removes all register units defined or clobbered in \p MI and then
  /// adds the units used (as in use operands) in \p MI.
  void stepBackward(const MachineInstr &MI);

  /// Adds all register units used, defined or clobbered in \p MI.
  /// This is useful when walking over a range of instruction to find registers
  /// unused over the whole range.
  void accumulate(const MachineInstr &MI);

  /// Adds registers living out of block \p MBB.
  /// Live out registers are the union of the live-in registers of the successor
  /// blocks and pristine registers. Live out registers of the end block are the
  /// callee saved registers.
  void addLiveOuts(const MachineBasicBlock &MBB);

  /// Adds registers living into block \p MBB.
  void addLiveIns(const MachineBasicBlock &MBB);

  /// Adds all register units marked in the bitvector \p RegUnits.
  void addUnits(const BitVector &RegUnits) {
    Units |= RegUnits;
  }
  /// Removes all register units marked in the bitvector \p RegUnits.
  void removeUnits(const BitVector &RegUnits) {
    Units.reset(RegUnits);
  }
  /// Return the internal bitvector representation of the set.
  const BitVector &getBitVector() const {
    return Units;
  }

private:
  /// Adds pristine registers. Pristine registers are callee saved registers
  /// that are unused in the function.
  void addPristines(const MachineFunction &MF);
};

/// Returns an iterator range over all physical register and mask operands for
/// \p MI and bundled instructions. This also skips any debug operands.
inline iterator_range<filter_iterator<
    ConstMIBundleOperands, std::function<bool(const MachineOperand &)>>>
phys_regs_and_masks(const MachineInstr &MI) {
  std::function<bool(const MachineOperand &)> Pred =
      [](const MachineOperand &MOP) {
        return MOP.isRegMask() ||
               (MOP.isReg() && !MOP.isDebug() && MOP.getReg().isPhysical());
      };
  return make_filter_range(const_mi_bundle_ops(MI), Pred);
}

} // end namespace llvm

#endif // LLVM_CODEGEN_LIVEREGUNITS_H
PKiwFZ�5X���CodeGen/MachineFrameInfo.hnu�[���//===-- CodeGen/MachineFrameInfo.h - Abstract Stack Frame Rep. --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// The file defines the MachineFrameInfo class.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_MACHINEFRAMEINFO_H
#define LLVM_CODEGEN_MACHINEFRAMEINFO_H

#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/Register.h"
#include "llvm/CodeGen/TargetFrameLowering.h"
#include "llvm/Support/Alignment.h"
#include <cassert>
#include <vector>

namespace llvm {
class raw_ostream;
class MachineFunction;
class MachineBasicBlock;
class BitVector;
class AllocaInst;

/// The CalleeSavedInfo class tracks the information need to locate where a
/// callee saved register is in the current frame.
/// Callee saved reg can also be saved to a different register rather than
/// on the stack by setting DstReg instead of FrameIdx.
class CalleeSavedInfo {
  Register Reg;
  union {
    int FrameIdx;
    unsigned DstReg;
  };
  /// Flag indicating whether the register is actually restored in the epilog.
  /// In most cases, if a register is saved, it is also restored. There are
  /// some situations, though, when this is not the case. For example, the
  /// LR register on ARM is usually saved, but on exit from the function its
  /// saved value may be loaded directly into PC. Since liveness tracking of
  /// physical registers treats callee-saved registers are live outside of
  /// the function, LR would be treated as live-on-exit, even though in these
  /// scenarios it is not. This flag is added to indicate that the saved
  /// register described by this object is not restored in the epilog.
  /// The long-term solution is to model the liveness of callee-saved registers
  /// by implicit uses on the return instructions, however, the required
  /// changes in the ARM backend would be quite extensive.
  bool Restored = true;
  /// Flag indicating whether the register is spilled to stack or another
  /// register.
  bool SpilledToReg = false;

public:
  explicit CalleeSavedInfo(unsigned R, int FI = 0) : Reg(R), FrameIdx(FI) {}

  // Accessors.
  Register getReg()                        const { return Reg; }
  int getFrameIdx()                        const { return FrameIdx; }
  unsigned getDstReg()                     const { return DstReg; }
  void setFrameIdx(int FI) {
    FrameIdx = FI;
    SpilledToReg = false;
  }
  void setDstReg(Register SpillReg) {
    DstReg = SpillReg;
    SpilledToReg = true;
  }
  bool isRestored()                        const { return Restored; }
  void setRestored(bool R)                       { Restored = R; }
  bool isSpilledToReg()                    const { return SpilledToReg; }
};

/// The MachineFrameInfo class represents an abstract stack frame until
/// prolog/epilog code is inserted.  This class is key to allowing stack frame
/// representation optimizations, such as frame pointer elimination.  It also
/// allows more mundane (but still important) optimizations, such as reordering
/// of abstract objects on the stack frame.
///
/// To support this, the class assigns unique integer identifiers to stack
/// objects requested clients.  These identifiers are negative integers for
/// fixed stack objects (such as arguments passed on the stack) or nonnegative
/// for objects that may be reordered.  Instructions which refer to stack
/// objects use a special MO_FrameIndex operand to represent these frame
/// indexes.
///
/// Because this class keeps track of all references to the stack frame, it
/// knows when a variable sized object is allocated on the stack.  This is the
/// sole condition which prevents frame pointer elimination, which is an
/// important optimization on register-poor architectures.  Because original
/// variable sized alloca's in the source program are the only source of
/// variable sized stack objects, it is safe to decide whether there will be
/// any variable sized objects before all stack objects are known (for
/// example, register allocator spill code never needs variable sized
/// objects).
///
/// When prolog/epilog code emission is performed, the final stack frame is
/// built and the machine instructions are modified to refer to the actual
/// stack offsets of the object, eliminating all MO_FrameIndex operands from
/// the program.
///
/// Abstract Stack Frame Information
class MachineFrameInfo {
public:
  /// Stack Smashing Protection (SSP) rules require that vulnerable stack
  /// allocations are located close the stack protector.
  enum SSPLayoutKind {
    SSPLK_None,       ///< Did not trigger a stack protector.  No effect on data
                      ///< layout.
    SSPLK_LargeArray, ///< Array or nested array >= SSP-buffer-size.  Closest
                      ///< to the stack protector.
    SSPLK_SmallArray, ///< Array or nested array < SSP-buffer-size. 2nd closest
                      ///< to the stack protector.
    SSPLK_AddrOf      ///< The address of this allocation is exposed and
                      ///< triggered protection.  3rd closest to the protector.
  };

private:
  // Represent a single object allocated on the stack.
  struct StackObject {
    // The offset of this object from the stack pointer on entry to
    // the function.  This field has no meaning for a variable sized element.
    int64_t SPOffset;

    // The size of this object on the stack. 0 means a variable sized object,
    // ~0ULL means a dead object.
    uint64_t Size;

    // The required alignment of this stack slot.
    Align Alignment;

    // If true, the value of the stack object is set before
    // entering the function and is not modified inside the function. By
    // default, fixed objects are immutable unless marked otherwise.
    bool isImmutable;

    // If true the stack object is used as spill slot. It
    // cannot alias any other memory objects.
    bool isSpillSlot;

    /// If true, this stack slot is used to spill a value (could be deopt
    /// and/or GC related) over a statepoint. We know that the address of the
    /// slot can't alias any LLVM IR value.  This is very similar to a Spill
    /// Slot, but is created by statepoint lowering is SelectionDAG, not the
    /// register allocator.
    bool isStatepointSpillSlot = false;

    /// Identifier for stack memory type analagous to address space. If this is
    /// non-0, the meaning is target defined. Offsets cannot be directly
    /// compared between objects with different stack IDs. The object may not
    /// necessarily reside in the same contiguous memory block as other stack
    /// objects. Objects with differing stack IDs should not be merged or
    /// replaced substituted for each other.
    //
    /// It is assumed a target uses consecutive, increasing stack IDs starting
    /// from 1.
    uint8_t StackID;

    /// If this stack object is originated from an Alloca instruction
    /// this value saves the original IR allocation. Can be NULL.
    const AllocaInst *Alloca;

    // If true, the object was mapped into the local frame
    // block and doesn't need additional handling for allocation beyond that.
    bool PreAllocated = false;

    // If true, an LLVM IR value might point to this object.
    // Normally, spill slots and fixed-offset objects don't alias IR-accessible
    // objects, but there are exceptions (on PowerPC, for example, some byval
    // arguments have ABI-prescribed offsets).
    bool isAliased;

    /// If true, the object has been zero-extended.
    bool isZExt = false;

    /// If true, the object has been sign-extended.
    bool isSExt = false;

    uint8_t SSPLayout = SSPLK_None;

    StackObject(uint64_t Size, Align Alignment, int64_t SPOffset,
                bool IsImmutable, bool IsSpillSlot, const AllocaInst *Alloca,
                bool IsAliased, uint8_t StackID = 0)
        : SPOffset(SPOffset), Size(Size), Alignment(Alignment),
          isImmutable(IsImmutable), isSpillSlot(IsSpillSlot), StackID(StackID),
          Alloca(Alloca), isAliased(IsAliased) {}
  };

  /// The alignment of the stack.
  Align StackAlignment;

  /// Can the stack be realigned. This can be false if the target does not
  /// support stack realignment, or if the user asks us not to realign the
  /// stack. In this situation, overaligned allocas are all treated as dynamic
  /// allocations and the target must handle them as part of DYNAMIC_STACKALLOC
  /// lowering. All non-alloca stack objects have their alignment clamped to the
  /// base ABI stack alignment.
  /// FIXME: There is room for improvement in this case, in terms of
  /// grouping overaligned allocas into a "secondary stack frame" and
  /// then only use a single alloca to allocate this frame and only a
  /// single virtual register to access it. Currently, without such an
  /// optimization, each such alloca gets its own dynamic realignment.
  bool StackRealignable;

  /// Whether the function has the \c alignstack attribute.
  bool ForcedRealign;

  /// The list of stack objects allocated.
  std::vector<StackObject> Objects;

  /// This contains the number of fixed objects contained on
  /// the stack.  Because fixed objects are stored at a negative index in the
  /// Objects list, this is also the index to the 0th object in the list.
  unsigned NumFixedObjects = 0;

  /// This boolean keeps track of whether any variable
  /// sized objects have been allocated yet.
  bool HasVarSizedObjects = false;

  /// This boolean keeps track of whether there is a call
  /// to builtin \@llvm.frameaddress.
  bool FrameAddressTaken = false;

  /// This boolean keeps track of whether there is a call
  /// to builtin \@llvm.returnaddress.
  bool ReturnAddressTaken = false;

  /// This boolean keeps track of whether there is a call
  /// to builtin \@llvm.experimental.stackmap.
  bool HasStackMap = false;

  /// This boolean keeps track of whether there is a call
  /// to builtin \@llvm.experimental.patchpoint.
  bool HasPatchPoint = false;

  /// The prolog/epilog code inserter calculates the final stack
  /// offsets for all of the fixed size objects, updating the Objects list
  /// above.  It then updates StackSize to contain the number of bytes that need
  /// to be allocated on entry to the function.
  uint64_t StackSize = 0;

  /// The amount that a frame offset needs to be adjusted to
  /// have the actual offset from the stack/frame pointer.  The exact usage of
  /// this is target-dependent, but it is typically used to adjust between
  /// SP-relative and FP-relative offsets.  E.G., if objects are accessed via
  /// SP then OffsetAdjustment is zero; if FP is used, OffsetAdjustment is set
  /// to the distance between the initial SP and the value in FP.  For many
  /// targets, this value is only used when generating debug info (via
  /// TargetRegisterInfo::getFrameIndexReference); when generating code, the
  /// corresponding adjustments are performed directly.
  int OffsetAdjustment = 0;

  /// The prolog/epilog code inserter may process objects that require greater
  /// alignment than the default alignment the target provides.
  /// To handle this, MaxAlignment is set to the maximum alignment
  /// needed by the objects on the current frame.  If this is greater than the
  /// native alignment maintained by the compiler, dynamic alignment code will
  /// be needed.
  ///
  Align MaxAlignment;

  /// Set to true if this function adjusts the stack -- e.g.,
  /// when calling another function. This is only valid during and after
  /// prolog/epilog code insertion.
  bool AdjustsStack = false;

  /// Set to true if this function has any function calls.
  bool HasCalls = false;

  /// The frame index for the stack protector.
  int StackProtectorIdx = -1;

  /// The frame index for the function context. Used for SjLj exceptions.
  int FunctionContextIdx = -1;

  /// This contains the size of the largest call frame if the target uses frame
  /// setup/destroy pseudo instructions (as defined in the TargetFrameInfo
  /// class).  This information is important for frame pointer elimination.
  /// It is only valid during and after prolog/epilog code insertion.
  unsigned MaxCallFrameSize = ~0u;

  /// The number of bytes of callee saved registers that the target wants to
  /// report for the current function in the CodeView S_FRAMEPROC record.
  unsigned CVBytesOfCalleeSavedRegisters = 0;

  /// The prolog/epilog code inserter fills in this vector with each
  /// callee saved register saved in either the frame or a different
  /// register.  Beyond its use by the prolog/ epilog code inserter,
  /// this data is used for debug info and exception handling.
  std::vector<CalleeSavedInfo> CSInfo;

  /// Has CSInfo been set yet?
  bool CSIValid = false;

  /// References to frame indices which are mapped
  /// into the local frame allocation block. <FrameIdx, LocalOffset>
  SmallVector<std::pair<int, int64_t>, 32> LocalFrameObjects;

  /// Size of the pre-allocated local frame block.
  int64_t LocalFrameSize = 0;

  /// Required alignment of the local object blob, which is the strictest
  /// alignment of any object in it.
  Align LocalFrameMaxAlign;

  /// Whether the local object blob needs to be allocated together. If not,
  /// PEI should ignore the isPreAllocated flags on the stack objects and
  /// just allocate them normally.
  bool UseLocalStackAllocationBlock = false;

  /// True if the function dynamically adjusts the stack pointer through some
  /// opaque mechanism like inline assembly or Win32 EH.
  bool HasOpaqueSPAdjustment = false;

  /// True if the function contains operations which will lower down to
  /// instructions which manipulate the stack pointer.
  bool HasCopyImplyingStackAdjustment = false;

  /// True if the function contains a call to the llvm.vastart intrinsic.
  bool HasVAStart = false;

  /// True if this is a varargs function that contains a musttail call.
  bool HasMustTailInVarArgFunc = false;

  /// True if this function contains a tail call. If so immutable objects like
  /// function arguments are no longer so. A tail call *can* override fixed
  /// stack objects like arguments so we can't treat them as immutable.
  bool HasTailCall = false;

  /// Not null, if shrink-wrapping found a better place for the prologue.
  MachineBasicBlock *Save = nullptr;
  /// Not null, if shrink-wrapping found a better place for the epilogue.
  MachineBasicBlock *Restore = nullptr;

  /// Size of the UnsafeStack Frame
  uint64_t UnsafeStackSize = 0;

public:
  explicit MachineFrameInfo(Align StackAlignment, bool StackRealignable,
                            bool ForcedRealign)
      : StackAlignment(StackAlignment),
        StackRealignable(StackRealignable), ForcedRealign(ForcedRealign) {}

  MachineFrameInfo(const MachineFrameInfo &) = delete;

  /// Return true if there are any stack objects in this function.
  bool hasStackObjects() const { return !Objects.empty(); }

  /// This method may be called any time after instruction
  /// selection is complete to determine if the stack frame for this function
  /// contains any variable sized objects.
  bool hasVarSizedObjects() const { return HasVarSizedObjects; }

  /// Return the index for the stack protector object.
  int getStackProtectorIndex() const { return StackProtectorIdx; }
  void setStackProtectorIndex(int I) { StackProtectorIdx = I; }
  bool hasStackProtectorIndex() const { return StackProtectorIdx != -1; }

  /// Return the index for the function context object.
  /// This object is used for SjLj exceptions.
  int getFunctionContextIndex() const { return FunctionContextIdx; }
  void setFunctionContextIndex(int I) { FunctionContextIdx = I; }
  bool hasFunctionContextIndex() const { return FunctionContextIdx != -1; }

  /// This method may be called any time after instruction
  /// selection is complete to determine if there is a call to
  /// \@llvm.frameaddress in this function.
  bool isFrameAddressTaken() const { return FrameAddressTaken; }
  void setFrameAddressIsTaken(bool T) { FrameAddressTaken = T; }

  /// This method may be called any time after
  /// instruction selection is complete to determine if there is a call to
  /// \@llvm.returnaddress in this function.
  bool isReturnAddressTaken() const { return ReturnAddressTaken; }
  void setReturnAddressIsTaken(bool s) { ReturnAddressTaken = s; }

  /// This method may be called any time after instruction
  /// selection is complete to determine if there is a call to builtin
  /// \@llvm.experimental.stackmap.
  bool hasStackMap() const { return HasStackMap; }
  void setHasStackMap(bool s = true) { HasStackMap = s; }

  /// This method may be called any time after instruction
  /// selection is complete to determine if there is a call to builtin
  /// \@llvm.experimental.patchpoint.
  bool hasPatchPoint() const { return HasPatchPoint; }
  void setHasPatchPoint(bool s = true) { HasPatchPoint = s; }

  /// Return true if this function requires a split stack prolog, even if it
  /// uses no stack space. This is only meaningful for functions where
  /// MachineFunction::shouldSplitStack() returns true.
  //
  // For non-leaf functions we have to allow for the possibility that the call
  // is to a non-split function, as in PR37807. This function could also take
  // the address of a non-split function. When the linker tries to adjust its
  // non-existent prologue, it would fail with an error. Mark the object file so
  // that such failures are not errors. See this Go language bug-report
  // https://go-review.googlesource.com/c/go/+/148819/
  bool needsSplitStackProlog() const {
    return getStackSize() != 0 || hasTailCall();
  }

  /// Return the minimum frame object index.
  int getObjectIndexBegin() const { return -NumFixedObjects; }

  /// Return one past the maximum frame object index.
  int getObjectIndexEnd() const { return (int)Objects.size()-NumFixedObjects; }

  /// Return the number of fixed objects.
  unsigned getNumFixedObjects() const { return NumFixedObjects; }

  /// Return the number of objects.
  unsigned getNumObjects() const { return Objects.size(); }

  /// Map a frame index into the local object block
  void mapLocalFrameObject(int ObjectIndex, int64_t Offset) {
    LocalFrameObjects.push_back(std::pair<int, int64_t>(ObjectIndex, Offset));
    Objects[ObjectIndex + NumFixedObjects].PreAllocated = true;
  }

  /// Get the local offset mapping for a for an object.
  std::pair<int, int64_t> getLocalFrameObjectMap(int i) const {
    assert (i >= 0 && (unsigned)i < LocalFrameObjects.size() &&
            "Invalid local object reference!");
    return LocalFrameObjects[i];
  }

  /// Return the number of objects allocated into the local object block.
  int64_t getLocalFrameObjectCount() const { return LocalFrameObjects.size(); }

  /// Set the size of the local object blob.
  void setLocalFrameSize(int64_t sz) { LocalFrameSize = sz; }

  /// Get the size of the local object blob.
  int64_t getLocalFrameSize() const { return LocalFrameSize; }

  /// Required alignment of the local object blob,
  /// which is the strictest alignment of any object in it.
  void setLocalFrameMaxAlign(Align Alignment) {
    LocalFrameMaxAlign = Alignment;
  }

  /// Return the required alignment of the local object blob.
  Align getLocalFrameMaxAlign() const { return LocalFrameMaxAlign; }

  /// Get whether the local allocation blob should be allocated together or
  /// let PEI allocate the locals in it directly.
  bool getUseLocalStackAllocationBlock() const {
    return UseLocalStackAllocationBlock;
  }

  /// setUseLocalStackAllocationBlock - Set whether the local allocation blob
  /// should be allocated together or let PEI allocate the locals in it
  /// directly.
  void setUseLocalStackAllocationBlock(bool v) {
    UseLocalStackAllocationBlock = v;
  }

  /// Return true if the object was pre-allocated into the local block.
  bool isObjectPreAllocated(int ObjectIdx) const {
    assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
           "Invalid Object Idx!");
    return Objects[ObjectIdx+NumFixedObjects].PreAllocated;
  }

  /// Return the size of the specified object.
  int64_t getObjectSize(int ObjectIdx) const {
    assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
           "Invalid Object Idx!");
    return Objects[ObjectIdx+NumFixedObjects].Size;
  }

  /// Change the size of the specified stack object.
  void setObjectSize(int ObjectIdx, int64_t Size) {
    assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
           "Invalid Object Idx!");
    Objects[ObjectIdx+NumFixedObjects].Size = Size;
  }

  /// Return the alignment of the specified stack object.
  Align getObjectAlign(int ObjectIdx) const {
    assert(unsigned(ObjectIdx + NumFixedObjects) < Objects.size() &&
           "Invalid Object Idx!");
    return Objects[ObjectIdx + NumFixedObjects].Alignment;
  }

  /// Should this stack ID be considered in MaxAlignment.
  bool contributesToMaxAlignment(uint8_t StackID) {
    return StackID == TargetStackID::Default ||
           StackID == TargetStackID::ScalableVector;
  }

  /// setObjectAlignment - Change the alignment of the specified stack object.
  void setObjectAlignment(int ObjectIdx, Align Alignment) {
    assert(unsigned(ObjectIdx + NumFixedObjects) < Objects.size() &&
           "Invalid Object Idx!");
    Objects[ObjectIdx + NumFixedObjects].Alignment = Alignment;

    // Only ensure max alignment for the default and scalable vector stack.
    uint8_t StackID = getStackID(ObjectIdx);
    if (contributesToMaxAlignment(StackID))
      ensureMaxAlignment(Alignment);
  }

  /// Return the underlying Alloca of the specified
  /// stack object if it exists. Returns 0 if none exists.
  const AllocaInst* getObjectAllocation(int ObjectIdx) const {
    assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
           "Invalid Object Idx!");
    return Objects[ObjectIdx+NumFixedObjects].Alloca;
  }

  /// Remove the underlying Alloca of the specified stack object if it
  /// exists. This generally should not be used and is for reduction tooling.
  void clearObjectAllocation(int ObjectIdx) {
    assert(unsigned(ObjectIdx + NumFixedObjects) < Objects.size() &&
           "Invalid Object Idx!");
    Objects[ObjectIdx + NumFixedObjects].Alloca = nullptr;
  }

  /// Return the assigned stack offset of the specified object
  /// from the incoming stack pointer.
  int64_t getObjectOffset(int ObjectIdx) const {
    assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
           "Invalid Object Idx!");
    assert(!isDeadObjectIndex(ObjectIdx) &&
           "Getting frame offset for a dead object?");
    return Objects[ObjectIdx+NumFixedObjects].SPOffset;
  }

  bool isObjectZExt(int ObjectIdx) const {
    assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
           "Invalid Object Idx!");
    return Objects[ObjectIdx+NumFixedObjects].isZExt;
  }

  void setObjectZExt(int ObjectIdx, bool IsZExt) {
    assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
           "Invalid Object Idx!");
    Objects[ObjectIdx+NumFixedObjects].isZExt = IsZExt;
  }

  bool isObjectSExt(int ObjectIdx) const {
    assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
           "Invalid Object Idx!");
    return Objects[ObjectIdx+NumFixedObjects].isSExt;
  }

  void setObjectSExt(int ObjectIdx, bool IsSExt) {
    assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
           "Invalid Object Idx!");
    Objects[ObjectIdx+NumFixedObjects].isSExt = IsSExt;
  }

  /// Set the stack frame offset of the specified object. The
  /// offset is relative to the stack pointer on entry to the function.
  void setObjectOffset(int ObjectIdx, int64_t SPOffset) {
    assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
           "Invalid Object Idx!");
    assert(!isDeadObjectIndex(ObjectIdx) &&
           "Setting frame offset for a dead object?");
    Objects[ObjectIdx+NumFixedObjects].SPOffset = SPOffset;
  }

  SSPLayoutKind getObjectSSPLayout(int ObjectIdx) const {
    assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
           "Invalid Object Idx!");
    return (SSPLayoutKind)Objects[ObjectIdx+NumFixedObjects].SSPLayout;
  }

  void setObjectSSPLayout(int ObjectIdx, SSPLayoutKind Kind) {
    assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
           "Invalid Object Idx!");
    assert(!isDeadObjectIndex(ObjectIdx) &&
           "Setting SSP layout for a dead object?");
    Objects[ObjectIdx+NumFixedObjects].SSPLayout = Kind;
  }

  /// Return the number of bytes that must be allocated to hold
  /// all of the fixed size frame objects.  This is only valid after
  /// Prolog/Epilog code insertion has finalized the stack frame layout.
  uint64_t getStackSize() const { return StackSize; }

  /// Set the size of the stack.
  void setStackSize(uint64_t Size) { StackSize = Size; }

  /// Estimate and return the size of the stack frame.
  uint64_t estimateStackSize(const MachineFunction &MF) const;

  /// Return the correction for frame offsets.
  int getOffsetAdjustment() const { return OffsetAdjustment; }

  /// Set the correction for frame offsets.
  void setOffsetAdjustment(int Adj) { OffsetAdjustment = Adj; }

  /// Return the alignment in bytes that this function must be aligned to,
  /// which is greater than the default stack alignment provided by the target.
  Align getMaxAlign() const { return MaxAlignment; }

  /// Make sure the function is at least Align bytes aligned.
  void ensureMaxAlignment(Align Alignment);

  /// Return true if this function adjusts the stack -- e.g.,
  /// when calling another function. This is only valid during and after
  /// prolog/epilog code insertion.
  bool adjustsStack() const { return AdjustsStack; }
  void setAdjustsStack(bool V) { AdjustsStack = V; }

  /// Return true if the current function has any function calls.
  bool hasCalls() const { return HasCalls; }
  void setHasCalls(bool V) { HasCalls = V; }

  /// Returns true if the function contains opaque dynamic stack adjustments.
  bool hasOpaqueSPAdjustment() const { return HasOpaqueSPAdjustment; }
  void setHasOpaqueSPAdjustment(bool B) { HasOpaqueSPAdjustment = B; }

  /// Returns true if the function contains operations which will lower down to
  /// instructions which manipulate the stack pointer.
  bool hasCopyImplyingStackAdjustment() const {
    return HasCopyImplyingStackAdjustment;
  }
  void setHasCopyImplyingStackAdjustment(bool B) {
    HasCopyImplyingStackAdjustment = B;
  }

  /// Returns true if the function calls the llvm.va_start intrinsic.
  bool hasVAStart() const { return HasVAStart; }
  void setHasVAStart(bool B) { HasVAStart = B; }

  /// Returns true if the function is variadic and contains a musttail call.
  bool hasMustTailInVarArgFunc() const { return HasMustTailInVarArgFunc; }
  void setHasMustTailInVarArgFunc(bool B) { HasMustTailInVarArgFunc = B; }

  /// Returns true if the function contains a tail call.
  bool hasTailCall() const { return HasTailCall; }
  void setHasTailCall(bool V = true) { HasTailCall = V; }

  /// Computes the maximum size of a callframe and the AdjustsStack property.
  /// This only works for targets defining
  /// TargetInstrInfo::getCallFrameSetupOpcode(), getCallFrameDestroyOpcode(),
  /// and getFrameSize().
  /// This is usually computed by the prologue epilogue inserter but some
  /// targets may call this to compute it earlier.
  void computeMaxCallFrameSize(const MachineFunction &MF);

  /// Return the maximum size of a call frame that must be
  /// allocated for an outgoing function call.  This is only available if
  /// CallFrameSetup/Destroy pseudo instructions are used by the target, and
  /// then only during or after prolog/epilog code insertion.
  ///
  unsigned getMaxCallFrameSize() const {
    // TODO: Enable this assert when targets are fixed.
    //assert(isMaxCallFrameSizeComputed() && "MaxCallFrameSize not computed yet");
    if (!isMaxCallFrameSizeComputed())
      return 0;
    return MaxCallFrameSize;
  }
  bool isMaxCallFrameSizeComputed() const {
    return MaxCallFrameSize != ~0u;
  }
  void setMaxCallFrameSize(unsigned S) { MaxCallFrameSize = S; }

  /// Returns how many bytes of callee-saved registers the target pushed in the
  /// prologue. Only used for debug info.
  unsigned getCVBytesOfCalleeSavedRegisters() const {
    return CVBytesOfCalleeSavedRegisters;
  }
  void setCVBytesOfCalleeSavedRegisters(unsigned S) {
    CVBytesOfCalleeSavedRegisters = S;
  }

  /// Create a new object at a fixed location on the stack.
  /// All fixed objects should be created before other objects are created for
  /// efficiency. By default, fixed objects are not pointed to by LLVM IR
  /// values. This returns an index with a negative value.
  int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable,
                        bool isAliased = false);

  /// Create a spill slot at a fixed location on the stack.
  /// Returns an index with a negative value.
  int CreateFixedSpillStackObject(uint64_t Size, int64_t SPOffset,
                                  bool IsImmutable = false);

  /// Returns true if the specified index corresponds to a fixed stack object.
  bool isFixedObjectIndex(int ObjectIdx) const {
    return ObjectIdx < 0 && (ObjectIdx >= -(int)NumFixedObjects);
  }

  /// Returns true if the specified index corresponds
  /// to an object that might be pointed to by an LLVM IR value.
  bool isAliasedObjectIndex(int ObjectIdx) const {
    assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
           "Invalid Object Idx!");
    return Objects[ObjectIdx+NumFixedObjects].isAliased;
  }

  /// Returns true if the specified index corresponds to an immutable object.
  bool isImmutableObjectIndex(int ObjectIdx) const {
    // Tail calling functions can clobber their function arguments.
    if (HasTailCall)
      return false;
    assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
           "Invalid Object Idx!");
    return Objects[ObjectIdx+NumFixedObjects].isImmutable;
  }

  /// Marks the immutability of an object.
  void setIsImmutableObjectIndex(int ObjectIdx, bool IsImmutable) {
    assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
           "Invalid Object Idx!");
    Objects[ObjectIdx+NumFixedObjects].isImmutable = IsImmutable;
  }

  /// Returns true if the specified index corresponds to a spill slot.
  bool isSpillSlotObjectIndex(int ObjectIdx) const {
    assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
           "Invalid Object Idx!");
    return Objects[ObjectIdx+NumFixedObjects].isSpillSlot;
  }

  bool isStatepointSpillSlotObjectIndex(int ObjectIdx) const {
    assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
           "Invalid Object Idx!");
    return Objects[ObjectIdx+NumFixedObjects].isStatepointSpillSlot;
  }

  /// \see StackID
  uint8_t getStackID(int ObjectIdx) const {
    return Objects[ObjectIdx+NumFixedObjects].StackID;
  }

  /// \see StackID
  void setStackID(int ObjectIdx, uint8_t ID) {
    assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
           "Invalid Object Idx!");
    Objects[ObjectIdx+NumFixedObjects].StackID = ID;
    // If ID > 0, MaxAlignment may now be overly conservative.
    // If ID == 0, MaxAlignment will need to be updated separately.
  }

  /// Returns true if the specified index corresponds to a dead object.
  bool isDeadObjectIndex(int ObjectIdx) const {
    assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
           "Invalid Object Idx!");
    return Objects[ObjectIdx+NumFixedObjects].Size == ~0ULL;
  }

  /// Returns true if the specified index corresponds to a variable sized
  /// object.
  bool isVariableSizedObjectIndex(int ObjectIdx) const {
    assert(unsigned(ObjectIdx + NumFixedObjects) < Objects.size() &&
           "Invalid Object Idx!");
    return Objects[ObjectIdx + NumFixedObjects].Size == 0;
  }

  void markAsStatepointSpillSlotObjectIndex(int ObjectIdx) {
    assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
           "Invalid Object Idx!");
    Objects[ObjectIdx+NumFixedObjects].isStatepointSpillSlot = true;
    assert(isStatepointSpillSlotObjectIndex(ObjectIdx) && "inconsistent");
  }

  /// Create a new statically sized stack object, returning
  /// a nonnegative identifier to represent it.
  int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot,
                        const AllocaInst *Alloca = nullptr, uint8_t ID = 0);

  /// Create a new statically sized stack object that represents a spill slot,
  /// returning a nonnegative identifier to represent it.
  int CreateSpillStackObject(uint64_t Size, Align Alignment);

  /// Remove or mark dead a statically sized stack object.
  void RemoveStackObject(int ObjectIdx) {
    // Mark it dead.
    Objects[ObjectIdx+NumFixedObjects].Size = ~0ULL;
  }

  /// Notify the MachineFrameInfo object that a variable sized object has been
  /// created.  This must be created whenever a variable sized object is
  /// created, whether or not the index returned is actually used.
  int CreateVariableSizedObject(Align Alignment, const AllocaInst *Alloca);

  /// Returns a reference to call saved info vector for the current function.
  const std::vector<CalleeSavedInfo> &getCalleeSavedInfo() const {
    return CSInfo;
  }
  /// \copydoc getCalleeSavedInfo()
  std::vector<CalleeSavedInfo> &getCalleeSavedInfo() { return CSInfo; }

  /// Used by prolog/epilog inserter to set the function's callee saved
  /// information.
  void setCalleeSavedInfo(std::vector<CalleeSavedInfo> CSI) {
    CSInfo = std::move(CSI);
  }

  /// Has the callee saved info been calculated yet?
  bool isCalleeSavedInfoValid() const { return CSIValid; }

  void setCalleeSavedInfoValid(bool v) { CSIValid = v; }

  MachineBasicBlock *getSavePoint() const { return Save; }
  void setSavePoint(MachineBasicBlock *NewSave) { Save = NewSave; }
  MachineBasicBlock *getRestorePoint() const { return Restore; }
  void setRestorePoint(MachineBasicBlock *NewRestore) { Restore = NewRestore; }

  uint64_t getUnsafeStackSize() const { return UnsafeStackSize; }
  void setUnsafeStackSize(uint64_t Size) { UnsafeStackSize = Size; }

  /// Return a set of physical registers that are pristine.
  ///
  /// Pristine registers hold a value that is useless to the current function,
  /// but that must be preserved - they are callee saved registers that are not
  /// saved.
  ///
  /// Before the PrologueEpilogueInserter has placed the CSR spill code, this
  /// method always returns an empty set.
  BitVector getPristineRegs(const MachineFunction &MF) const;

  /// Used by the MachineFunction printer to print information about
  /// stack objects. Implemented in MachineFunction.cpp.
  void print(const MachineFunction &MF, raw_ostream &OS) const;

  /// dump - Print the function to stderr.
  void dump(const MachineFunction &MF) const;
};

} // End llvm namespace

#endif
PKiwFZ�Z�?WWCodeGen/PBQPRAConstraint.hnu�[���//===- llvm/CodeGen/PBQPRAConstraint.h --------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the PBQPBuilder interface, for classes which build PBQP
// instances to represent register allocation problems, and the RegAllocPBQP
// interface.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_PBQPRACONSTRAINT_H
#define LLVM_CODEGEN_PBQPRACONSTRAINT_H

#include <algorithm>
#include <memory>
#include <vector>

namespace llvm {

namespace PBQP {
namespace RegAlloc {

// Forward declare PBQP graph class.
class PBQPRAGraph;

} // end namespace RegAlloc
} // end namespace PBQP

using PBQPRAGraph = PBQP::RegAlloc::PBQPRAGraph;

/// Abstract base for classes implementing PBQP register allocation
///        constraints (e.g. Spill-costs, interference, coalescing).
class PBQPRAConstraint {
public:
  virtual ~PBQPRAConstraint() = 0;
  virtual void apply(PBQPRAGraph &G) = 0;

private:
  virtual void anchor();
};

/// PBQP register allocation constraint composer.
///
///   Constraints added to this list will be applied, in the order that they are
/// added, to the PBQP graph.
class PBQPRAConstraintList : public PBQPRAConstraint {
public:
  void apply(PBQPRAGraph &G) override {
    for (auto &C : Constraints)
      C->apply(G);
  }

  void addConstraint(std::unique_ptr<PBQPRAConstraint> C) {
    if (C)
      Constraints.push_back(std::move(C));
  }

private:
  std::vector<std::unique_ptr<PBQPRAConstraint>> Constraints;

  void anchor() override;
};

} // end namespace llvm

#endif // LLVM_CODEGEN_PBQPRACONSTRAINT_H
PKiwFZ_E�i]]CodeGen/Passes.hnu�[���//===-- Passes.h - Target independent code generation passes ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines interfaces to access the target independent code generation
// passes provided by the LLVM backend.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_PASSES_H
#define LLVM_CODEGEN_PASSES_H

#include "llvm/Support/CodeGen.h"
#include "llvm/Support/Discriminator.h"
#include "llvm/CodeGen/RegAllocCommon.h"

#include <functional>
#include <string>

namespace llvm {

class FunctionPass;
class MachineFunction;
class MachineFunctionPass;
class ModulePass;
class Pass;
class TargetMachine;
class raw_ostream;

template <typename T> class IntrusiveRefCntPtr;
namespace vfs {
class FileSystem;
} // namespace vfs

} // End llvm namespace

// List of target independent CodeGen pass IDs.
namespace llvm {

  /// AtomicExpandPass - At IR level this pass replace atomic instructions with
  /// __atomic_* library calls, or target specific instruction which implement the
  /// same semantics in a way which better fits the target backend.
  FunctionPass *createAtomicExpandPass();

  /// createUnreachableBlockEliminationPass - The LLVM code generator does not
  /// work well with unreachable basic blocks (what live ranges make sense for a
  /// block that cannot be reached?).  As such, a code generator should either
  /// not instruction select unreachable blocks, or run this pass as its
  /// last LLVM modifying pass to clean up blocks that are not reachable from
  /// the entry block.
  FunctionPass *createUnreachableBlockEliminationPass();

  /// createBasicBlockSections Pass - This pass assigns sections to machine
  /// basic blocks and is enabled with -fbasic-block-sections.
  MachineFunctionPass *createBasicBlockSectionsPass();

  /// createMachineFunctionSplitterPass - This pass splits machine functions
  /// using profile information.
  MachineFunctionPass *createMachineFunctionSplitterPass();

  /// MachineFunctionPrinter pass - This pass prints out the machine function to
  /// the given stream as a debugging tool.
  MachineFunctionPass *
  createMachineFunctionPrinterPass(raw_ostream &OS,
                                   const std::string &Banner ="");

  /// StackFramePrinter pass - This pass prints out the machine function's
  /// stack frame to the given stream as a debugging tool.
  MachineFunctionPass *createStackFrameLayoutAnalysisPass();

  /// MIRPrinting pass - this pass prints out the LLVM IR into the given stream
  /// using the MIR serialization format.
  MachineFunctionPass *createPrintMIRPass(raw_ostream &OS);

  /// This pass resets a MachineFunction when it has the FailedISel property
  /// as if it was just created.
  /// If EmitFallbackDiag is true, the pass will emit a
  /// DiagnosticInfoISelFallback for every MachineFunction it resets.
  /// If AbortOnFailedISel is true, abort compilation instead of resetting.
  MachineFunctionPass *createResetMachineFunctionPass(bool EmitFallbackDiag,
                                                      bool AbortOnFailedISel);

  /// createCodeGenPreparePass - Transform the code to expose more pattern
  /// matching during instruction selection.
  FunctionPass *createCodeGenPreparePass();

  /// This pass implements generation of target-specific intrinsics to support
  /// handling of complex number arithmetic
  FunctionPass *createComplexDeinterleavingPass(const TargetMachine *TM);

  /// AtomicExpandID -- Lowers atomic operations in terms of either cmpxchg
  /// load-linked/store-conditional loops.
  extern char &AtomicExpandID;

  /// MachineLoopInfo - This pass is a loop analysis pass.
  extern char &MachineLoopInfoID;

  /// MachineDominators - This pass is a machine dominators analysis pass.
  extern char &MachineDominatorsID;

  /// MachineDominanaceFrontier - This pass is a machine dominators analysis.
  extern char &MachineDominanceFrontierID;

  /// MachineRegionInfo - This pass computes SESE regions for machine functions.
  extern char &MachineRegionInfoPassID;

  /// EdgeBundles analysis - Bundle machine CFG edges.
  extern char &EdgeBundlesID;

  /// LiveVariables pass - This pass computes the set of blocks in which each
  /// variable is life and sets machine operand kill flags.
  extern char &LiveVariablesID;

  /// PHIElimination - This pass eliminates machine instruction PHI nodes
  /// by inserting copy instructions.  This destroys SSA information, but is the
  /// desired input for some register allocators.  This pass is "required" by
  /// these register allocator like this: AU.addRequiredID(PHIEliminationID);
  extern char &PHIEliminationID;

  /// LiveIntervals - This analysis keeps track of the live ranges of virtual
  /// and physical registers.
  extern char &LiveIntervalsID;

  /// LiveStacks pass. An analysis keeping track of the liveness of stack slots.
  extern char &LiveStacksID;

  /// TwoAddressInstruction - This pass reduces two-address instructions to
  /// use two operands. This destroys SSA information but it is desired by
  /// register allocators.
  extern char &TwoAddressInstructionPassID;

  /// ProcessImpicitDefs pass - This pass removes IMPLICIT_DEFs.
  extern char &ProcessImplicitDefsID;

  /// RegisterCoalescer - This pass merges live ranges to eliminate copies.
  extern char &RegisterCoalescerID;

  /// MachineScheduler - This pass schedules machine instructions.
  extern char &MachineSchedulerID;

  /// PostMachineScheduler - This pass schedules machine instructions postRA.
  extern char &PostMachineSchedulerID;

  /// SpillPlacement analysis. Suggest optimal placement of spill code between
  /// basic blocks.
  extern char &SpillPlacementID;

  /// ShrinkWrap pass. Look for the best place to insert save and restore
  // instruction and update the MachineFunctionInfo with that information.
  extern char &ShrinkWrapID;

  /// LiveRangeShrink pass. Move instruction close to its definition to shrink
  /// the definition's live range.
  extern char &LiveRangeShrinkID;

  /// Greedy register allocator.
  extern char &RAGreedyID;

  /// Basic register allocator.
  extern char &RABasicID;

  /// VirtRegRewriter pass. Rewrite virtual registers to physical registers as
  /// assigned in VirtRegMap.
  extern char &VirtRegRewriterID;
  FunctionPass *createVirtRegRewriter(bool ClearVirtRegs = true);

  /// UnreachableMachineBlockElimination - This pass removes unreachable
  /// machine basic blocks.
  extern char &UnreachableMachineBlockElimID;

  /// DeadMachineInstructionElim - This pass removes dead machine instructions.
  extern char &DeadMachineInstructionElimID;

  /// This pass adds dead/undef flags after analyzing subregister lanes.
  extern char &DetectDeadLanesID;

  /// This pass perform post-ra machine sink for COPY instructions.
  extern char &PostRAMachineSinkingID;

  /// This pass adds flow sensitive discriminators.
  extern char &MIRAddFSDiscriminatorsID;

  /// This pass reads flow sensitive profile.
  extern char &MIRProfileLoaderPassID;

  /// FastRegisterAllocation Pass - This pass register allocates as fast as
  /// possible. It is best suited for debug code where live ranges are short.
  ///
  FunctionPass *createFastRegisterAllocator();
  FunctionPass *createFastRegisterAllocator(RegClassFilterFunc F,
                                            bool ClearVirtRegs);

  /// BasicRegisterAllocation Pass - This pass implements a degenerate global
  /// register allocator using the basic regalloc framework.
  ///
  FunctionPass *createBasicRegisterAllocator();
  FunctionPass *createBasicRegisterAllocator(RegClassFilterFunc F);

  /// Greedy register allocation pass - This pass implements a global register
  /// allocator for optimized builds.
  ///
  FunctionPass *createGreedyRegisterAllocator();
  FunctionPass *createGreedyRegisterAllocator(RegClassFilterFunc F);

  /// PBQPRegisterAllocation Pass - This pass implements the Partitioned Boolean
  /// Quadratic Prograaming (PBQP) based register allocator.
  ///
  FunctionPass *createDefaultPBQPRegisterAllocator();

  /// PrologEpilogCodeInserter - This pass inserts prolog and epilog code,
  /// and eliminates abstract frame references.
  extern char &PrologEpilogCodeInserterID;
  MachineFunctionPass *createPrologEpilogInserterPass();

  /// ExpandPostRAPseudos - This pass expands pseudo instructions after
  /// register allocation.
  extern char &ExpandPostRAPseudosID;

  /// PostRAHazardRecognizer - This pass runs the post-ra hazard
  /// recognizer.
  extern char &PostRAHazardRecognizerID;

  /// PostRAScheduler - This pass performs post register allocation
  /// scheduling.
  extern char &PostRASchedulerID;

  /// BranchFolding - This pass performs machine code CFG based
  /// optimizations to delete branches to branches, eliminate branches to
  /// successor blocks (creating fall throughs), and eliminating branches over
  /// branches.
  extern char &BranchFolderPassID;

  /// BranchRelaxation - This pass replaces branches that need to jump further
  /// than is supported by a branch instruction.
  extern char &BranchRelaxationPassID;

  /// MachineFunctionPrinterPass - This pass prints out MachineInstr's.
  extern char &MachineFunctionPrinterPassID;

  /// MIRPrintingPass - this pass prints out the LLVM IR using the MIR
  /// serialization format.
  extern char &MIRPrintingPassID;

  /// TailDuplicate - Duplicate blocks with unconditional branches
  /// into tails of their predecessors.
  extern char &TailDuplicateID;

  /// Duplicate blocks with unconditional branches into tails of their
  /// predecessors. Variant that works before register allocation.
  extern char &EarlyTailDuplicateID;

  /// MachineTraceMetrics - This pass computes critical path and CPU resource
  /// usage in an ensemble of traces.
  extern char &MachineTraceMetricsID;

  /// EarlyIfConverter - This pass performs if-conversion on SSA form by
  /// inserting cmov instructions.
  extern char &EarlyIfConverterID;

  /// EarlyIfPredicator - This pass performs if-conversion on SSA form by
  /// predicating if/else block and insert select at the join point.
  extern char &EarlyIfPredicatorID;

  /// This pass performs instruction combining using trace metrics to estimate
  /// critical-path and resource depth.
  extern char &MachineCombinerID;

  /// StackSlotColoring - This pass performs stack coloring and merging.
  /// It merges disjoint allocas to reduce the stack size.
  extern char &StackColoringID;

  /// StackFramePrinter - This pass prints the stack frame layout and variable
  /// mappings.
  extern char &StackFrameLayoutAnalysisPassID;

  /// IfConverter - This pass performs machine code if conversion.
  extern char &IfConverterID;

  FunctionPass *createIfConverter(
      std::function<bool(const MachineFunction &)> Ftor);

  /// MachineBlockPlacement - This pass places basic blocks based on branch
  /// probabilities.
  extern char &MachineBlockPlacementID;

  /// MachineBlockPlacementStats - This pass collects statistics about the
  /// basic block placement using branch probabilities and block frequency
  /// information.
  extern char &MachineBlockPlacementStatsID;

  /// GCLowering Pass - Used by gc.root to perform its default lowering
  /// operations.
  FunctionPass *createGCLoweringPass();

  /// GCLowering Pass - Used by gc.root to perform its default lowering
  /// operations.
  extern char &GCLoweringID;

  /// ShadowStackGCLowering - Implements the custom lowering mechanism
  /// used by the shadow stack GC.  Only runs on functions which opt in to
  /// the shadow stack collector.
  FunctionPass *createShadowStackGCLoweringPass();

  /// ShadowStackGCLowering - Implements the custom lowering mechanism
  /// used by the shadow stack GC.
  extern char &ShadowStackGCLoweringID;

  /// GCMachineCodeAnalysis - Target-independent pass to mark safe points
  /// in machine code. Must be added very late during code generation, just
  /// prior to output, and importantly after all CFG transformations (such as
  /// branch folding).
  extern char &GCMachineCodeAnalysisID;

  /// Creates a pass to print GC metadata.
  ///
  FunctionPass *createGCInfoPrinter(raw_ostream &OS);

  /// MachineCSE - This pass performs global CSE on machine instructions.
  extern char &MachineCSEID;

  /// MIRCanonicalizer - This pass canonicalizes MIR by renaming vregs
  /// according to the semantics of the instruction as well as hoists
  /// code.
  extern char &MIRCanonicalizerID;

  /// ImplicitNullChecks - This pass folds null pointer checks into nearby
  /// memory operations.
  extern char &ImplicitNullChecksID;

  /// This pass performs loop invariant code motion on machine instructions.
  extern char &MachineLICMID;

  /// This pass performs loop invariant code motion on machine instructions.
  /// This variant works before register allocation. \see MachineLICMID.
  extern char &EarlyMachineLICMID;

  /// MachineSinking - This pass performs sinking on machine instructions.
  extern char &MachineSinkingID;

  /// MachineCopyPropagation - This pass performs copy propagation on
  /// machine instructions.
  extern char &MachineCopyPropagationID;

  MachineFunctionPass *createMachineCopyPropagationPass(bool UseCopyInstr);

  /// MachineLateInstrsCleanup - This pass removes redundant identical
  /// instructions after register allocation and rematerialization.
  extern char &MachineLateInstrsCleanupID;

  /// PeepholeOptimizer - This pass performs peephole optimizations -
  /// like extension and comparison eliminations.
  extern char &PeepholeOptimizerID;

  /// OptimizePHIs - This pass optimizes machine instruction PHIs
  /// to take advantage of opportunities created during DAG legalization.
  extern char &OptimizePHIsID;

  /// StackSlotColoring - This pass performs stack slot coloring.
  extern char &StackSlotColoringID;

  /// This pass lays out funclets contiguously.
  extern char &FuncletLayoutID;

  /// This pass inserts the XRay instrumentation sleds if they are supported by
  /// the target platform.
  extern char &XRayInstrumentationID;

  /// This pass inserts FEntry calls
  extern char &FEntryInserterID;

  /// This pass implements the "patchable-function" attribute.
  extern char &PatchableFunctionID;

  /// createStackProtectorPass - This pass adds stack protectors to functions.
  ///
  FunctionPass *createStackProtectorPass();

  /// createMachineVerifierPass - This pass verifies cenerated machine code
  /// instructions for correctness.
  ///
  FunctionPass *createMachineVerifierPass(const std::string& Banner);

  /// createDwarfEHPass - This pass mulches exception handling code into a form
  /// adapted to code generation.  Required if using dwarf exception handling.
  FunctionPass *createDwarfEHPass(CodeGenOpt::Level OptLevel);

  /// createWinEHPass - Prepares personality functions used by MSVC on Windows,
  /// in addition to the Itanium LSDA based personalities.
  FunctionPass *createWinEHPass(bool DemoteCatchSwitchPHIOnly = false);

  /// createSjLjEHPreparePass - This pass adapts exception handling code to use
  /// the GCC-style builtin setjmp/longjmp (sjlj) to handling EH control flow.
  ///
  FunctionPass *createSjLjEHPreparePass(const TargetMachine *TM);

  /// createWasmEHPass - This pass adapts exception handling code to use
  /// WebAssembly's exception handling scheme.
  FunctionPass *createWasmEHPass();

  /// LocalStackSlotAllocation - This pass assigns local frame indices to stack
  /// slots relative to one another and allocates base registers to access them
  /// when it is estimated by the target to be out of range of normal frame
  /// pointer or stack pointer index addressing.
  extern char &LocalStackSlotAllocationID;

  /// This pass expands pseudo-instructions, reserves registers and adjusts
  /// machine frame information.
  extern char &FinalizeISelID;

  /// UnpackMachineBundles - This pass unpack machine instruction bundles.
  extern char &UnpackMachineBundlesID;

  FunctionPass *
  createUnpackMachineBundles(std::function<bool(const MachineFunction &)> Ftor);

  /// FinalizeMachineBundles - This pass finalize machine instruction
  /// bundles (created earlier, e.g. during pre-RA scheduling).
  extern char &FinalizeMachineBundlesID;

  /// StackMapLiveness - This pass analyses the register live-out set of
  /// stackmap/patchpoint intrinsics and attaches the calculated information to
  /// the intrinsic for later emission to the StackMap.
  extern char &StackMapLivenessID;

  // MachineSanitizerBinaryMetadata - appends/finalizes sanitizer binary
  // metadata after llvm SanitizerBinaryMetadata pass.
  extern char &MachineSanitizerBinaryMetadataID;

  /// RemoveRedundantDebugValues pass.
  extern char &RemoveRedundantDebugValuesID;

  /// MachineCFGPrinter pass.
  extern char &MachineCFGPrinterID;

  /// LiveDebugValues pass
  extern char &LiveDebugValuesID;

  /// createJumpInstrTables - This pass creates jump-instruction tables.
  ModulePass *createJumpInstrTablesPass();

  /// InterleavedAccess Pass - This pass identifies and matches interleaved
  /// memory accesses to target specific intrinsics.
  ///
  FunctionPass *createInterleavedAccessPass();

  /// InterleavedLoadCombines Pass - This pass identifies interleaved loads and
  /// combines them into wide loads detectable by InterleavedAccessPass
  ///
  FunctionPass *createInterleavedLoadCombinePass();

  /// LowerEmuTLS - This pass generates __emutls_[vt].xyz variables for all
  /// TLS variables for the emulated TLS model.
  ///
  ModulePass *createLowerEmuTLSPass();

  /// This pass lowers the \@llvm.load.relative and \@llvm.objc.* intrinsics to
  /// instructions.  This is unsafe to do earlier because a pass may combine the
  /// constant initializer into the load, which may result in an overflowing
  /// evaluation.
  ModulePass *createPreISelIntrinsicLoweringPass();

  /// GlobalMerge - This pass merges internal (by default) globals into structs
  /// to enable reuse of a base pointer by indexed addressing modes.
  /// It can also be configured to focus on size optimizations only.
  ///
  Pass *createGlobalMergePass(const TargetMachine *TM, unsigned MaximalOffset,
                              bool OnlyOptimizeForSize = false,
                              bool MergeExternalByDefault = false);

  /// This pass splits the stack into a safe stack and an unsafe stack to
  /// protect against stack-based overflow vulnerabilities.
  FunctionPass *createSafeStackPass();

  /// This pass detects subregister lanes in a virtual register that are used
  /// independently of other lanes and splits them into separate virtual
  /// registers.
  extern char &RenameIndependentSubregsID;

  /// This pass is executed POST-RA to collect which physical registers are
  /// preserved by given machine function.
  FunctionPass *createRegUsageInfoCollector();

  /// Return a MachineFunction pass that identifies call sites
  /// and propagates register usage information of callee to caller
  /// if available with PysicalRegisterUsageInfo pass.
  FunctionPass *createRegUsageInfoPropPass();

  /// This pass performs software pipelining on machine instructions.
  extern char &MachinePipelinerID;

  /// This pass frees the memory occupied by the MachineFunction.
  FunctionPass *createFreeMachineFunctionPass();

  /// This pass performs outlining on machine instructions directly before
  /// printing assembly.
  ModulePass *createMachineOutlinerPass(bool RunOnAllFunctions = true);

  /// This pass expands the reduction intrinsics into sequences of shuffles.
  FunctionPass *createExpandReductionsPass();

  // This pass replaces intrinsics operating on vector operands with calls to
  // the corresponding function in a vector library (e.g., SVML, libmvec).
  FunctionPass *createReplaceWithVeclibLegacyPass();

  /// This pass expands the vector predication intrinsics into unpredicated
  /// instructions with selects or just the explicit vector length into the
  /// predicate mask.
  FunctionPass *createExpandVectorPredicationPass();

  // Expands large div/rem instructions.
  FunctionPass *createExpandLargeDivRemPass();

  // Expands large div/rem instructions.
  FunctionPass *createExpandLargeFpConvertPass();

  // This pass expands memcmp() to load/stores.
  FunctionPass *createExpandMemCmpPass();

  /// Creates Break False Dependencies pass. \see BreakFalseDeps.cpp
  FunctionPass *createBreakFalseDeps();

  // This pass expands indirectbr instructions.
  FunctionPass *createIndirectBrExpandPass();

  /// Creates CFI Fixup pass. \see CFIFixup.cpp
  FunctionPass *createCFIFixup();

  /// Creates CFI Instruction Inserter pass. \see CFIInstrInserter.cpp
  FunctionPass *createCFIInstrInserter();

  /// Creates CFGuard longjmp target identification pass.
  /// \see CFGuardLongjmp.cpp
  FunctionPass *createCFGuardLongjmpPass();

  /// Creates EHContGuard catchret target identification pass.
  /// \see EHContGuardCatchret.cpp
  FunctionPass *createEHContGuardCatchretPass();

  /// Create Hardware Loop pass. \see HardwareLoops.cpp
  FunctionPass *createHardwareLoopsLegacyPass();

  /// This pass inserts pseudo probe annotation for callsite profiling.
  FunctionPass *createPseudoProbeInserter();

  /// Create IR Type Promotion pass. \see TypePromotion.cpp
  FunctionPass *createTypePromotionLegacyPass();

  /// Add Flow Sensitive Discriminators. PassNum specifies the
  /// sequence number of this pass (starting from 1).
  FunctionPass *
  createMIRAddFSDiscriminatorsPass(sampleprof::FSDiscriminatorPass P);

  /// Read Flow Sensitive Profile.
  FunctionPass *
  createMIRProfileLoaderPass(std::string File, std::string RemappingFile,
                             sampleprof::FSDiscriminatorPass P,
                             IntrusiveRefCntPtr<vfs::FileSystem> FS);

  /// Creates MIR Debugify pass. \see MachineDebugify.cpp
  ModulePass *createDebugifyMachineModulePass();

  /// Creates MIR Strip Debug pass. \see MachineStripDebug.cpp
  /// If OnlyDebugified is true then it will only strip debug info if it was
  /// added by a Debugify pass. The module will be left unchanged if the debug
  /// info was generated by another source such as clang.
  ModulePass *createStripDebugMachineModulePass(bool OnlyDebugified);

  /// Creates MIR Check Debug pass. \see MachineCheckDebugify.cpp
  ModulePass *createCheckDebugMachineModulePass();

  /// The pass fixups statepoint machine instruction to replace usage of
  /// caller saved registers with stack slots.
  extern char &FixupStatepointCallerSavedID;

  /// The pass transforms load/store <256 x i32> to AMX load/store intrinsics
  /// or split the data to two <128 x i32>.
  FunctionPass *createX86LowerAMXTypePass();

  /// The pass insert tile config intrinsics for AMX fast register allocation.
  FunctionPass *createX86PreAMXConfigPass();

  /// The pass transforms amx intrinsics to scalar operation if the function has
  /// optnone attribute or it is O0.
  FunctionPass *createX86LowerAMXIntrinsicsPass();

  /// When learning an eviction policy, extract score(reward) information,
  /// otherwise this does nothing
  FunctionPass *createRegAllocScoringPass();

  /// JMC instrument pass.
  ModulePass *createJMCInstrumenterPass();

  /// This pass converts conditional moves to conditional jumps when profitable.
  FunctionPass *createSelectOptimizePass();

  FunctionPass *createCallBrPass();

  /// Lowers KCFI operand bundles for indirect calls.
  FunctionPass *createKCFIPass();
} // End llvm namespace

#endif
PKiwFZA]P��$CodeGen/LinkAllAsmWriterComponents.hnu�[���//===- llvm/Codegen/LinkAllAsmWriterComponents.h ----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This header file pulls in all assembler writer related passes for tools like
// llc that need this functionality.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_LINKALLASMWRITERCOMPONENTS_H
#define LLVM_CODEGEN_LINKALLASMWRITERCOMPONENTS_H

#include "llvm/IR/BuiltinGCs.h"
#include <cstdlib>

namespace {
  struct ForceAsmWriterLinking {
    ForceAsmWriterLinking() {
      // We must reference the plug-ins in such a way that compilers will not
      // delete it all as dead code, even with whole program optimization,
      // yet is effectively a NO-OP. As the compiler isn't smart enough
      // to know that getenv() never returns -1, this will do the job.
      // This is so that globals in the translation units where these functions
      // are defined are forced to be initialized, populating various
      // registries.
      if (std::getenv("bar") != (char*) -1)
        return;

      llvm::linkOcamlGCPrinter();
      llvm::linkErlangGCPrinter();

    }
  } ForceAsmWriterLinking; // Force link by creating a global definition.
}

#endif // LLVM_CODEGEN_LINKALLASMWRITERCOMPONENTS_H
PKiwFZ��3P��CodeGen/MachinePostDominators.hnu�[���//===- llvm/CodeGen/MachinePostDominators.h ----------------------*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file exposes interfaces to post dominance information for
// target-specific code.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_MACHINEPOSTDOMINATORS_H
#define LLVM_CODEGEN_MACHINEPOSTDOMINATORS_H

#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include <memory>

namespace llvm {

///
/// MachinePostDominatorTree - an analysis pass wrapper for DominatorTree
/// used to compute the post-dominator tree for MachineFunctions.
///
class MachinePostDominatorTree : public MachineFunctionPass {
  using PostDomTreeT = PostDomTreeBase<MachineBasicBlock>;
  std::unique_ptr<PostDomTreeT> PDT;

public:
  static char ID;

  MachinePostDominatorTree();

  PostDomTreeT &getBase() {
    if (!PDT)
      PDT.reset(new PostDomTreeT());
    return *PDT;
  }

  FunctionPass *createMachinePostDominatorTreePass();

  MachineDomTreeNode *getRootNode() const { return PDT->getRootNode(); }

  MachineDomTreeNode *operator[](MachineBasicBlock *BB) const {
    return PDT->getNode(BB);
  }

  MachineDomTreeNode *getNode(MachineBasicBlock *BB) const {
    return PDT->getNode(BB);
  }

  bool dominates(const MachineDomTreeNode *A,
                 const MachineDomTreeNode *B) const {
    return PDT->dominates(A, B);
  }

  bool dominates(const MachineBasicBlock *A, const MachineBasicBlock *B) const {
    return PDT->dominates(A, B);
  }

  bool properlyDominates(const MachineDomTreeNode *A,
                         const MachineDomTreeNode *B) const {
    return PDT->properlyDominates(A, B);
  }

  bool properlyDominates(const MachineBasicBlock *A,
                         const MachineBasicBlock *B) const {
    return PDT->properlyDominates(A, B);
  }

  bool isVirtualRoot(const MachineDomTreeNode *Node) const {
    return PDT->isVirtualRoot(Node);
  }

  MachineBasicBlock *findNearestCommonDominator(MachineBasicBlock *A,
                                                MachineBasicBlock *B) const {
    return PDT->findNearestCommonDominator(A, B);
  }

  /// Returns the nearest common dominator of the given blocks.
  /// If that tree node is a virtual root, a nullptr will be returned.
  MachineBasicBlock *
  findNearestCommonDominator(ArrayRef<MachineBasicBlock *> Blocks) const;

  bool runOnMachineFunction(MachineFunction &MF) override;
  void getAnalysisUsage(AnalysisUsage &AU) const override;
  void releaseMemory() override { PDT.reset(nullptr); }
  void verifyAnalysis() const override;
  void print(llvm::raw_ostream &OS, const Module *M = nullptr) const override;
};
} //end of namespace llvm

#endif
PKiwFZ�&&$CodeGen/DbgEntityHistoryCalculator.hnu�[���//===- llvm/CodeGen/DbgEntityHistoryCalculator.h ----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_DBGENTITYHISTORYCALCULATOR_H
#define LLVM_CODEGEN_DBGENTITYHISTORYCALCULATOR_H

#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/MachineInstr.h"
#include <utility>

namespace llvm {

class DILocation;
class LexicalScopes;
class DINode;
class MachineFunction;
class TargetRegisterInfo;

/// Record instruction ordering so we can query their relative positions within
/// a function. Meta instructions are given the same ordinal as the preceding
/// non-meta instruction. Class state is invalid if MF is modified after
/// calling initialize.
class InstructionOrdering {
public:
  void initialize(const MachineFunction &MF);
  void clear() { InstNumberMap.clear(); }

  /// Check if instruction \p A comes before \p B, where \p A and \p B both
  /// belong to the MachineFunction passed to initialize().
  bool isBefore(const MachineInstr *A, const MachineInstr *B) const;

private:
  /// Each instruction is assigned an order number.
  DenseMap<const MachineInstr *, unsigned> InstNumberMap;
};

/// For each user variable, keep a list of instruction ranges where this
/// variable is accessible. The variables are listed in order of appearance.
class DbgValueHistoryMap {
public:
  /// Index in the entry vector.
  typedef size_t EntryIndex;

  /// Special value to indicate that an entry is valid until the end of the
  /// function.
  static const EntryIndex NoEntry = std::numeric_limits<EntryIndex>::max();

  /// Specifies a change in a variable's debug value history.
  ///
  /// There exist two types of entries:
  ///
  /// * Debug value entry:
  ///
  ///   A new debug value becomes live. If the entry's \p EndIndex is \p NoEntry,
  ///   the value is valid until the end of the function. For other values, the
  ///   index points to the entry in the entry vector that ends this debug
  ///   value. The ending entry can either be an overlapping debug value, or
  ///   an instruction that clobbers the value.
  ///
  /// * Clobbering entry:
  ///
  ///   This entry's instruction clobbers one or more preceding
  ///   register-described debug values that have their end index
  ///   set to this entry's position in the entry vector.
  class Entry {
    friend DbgValueHistoryMap;

  public:
    enum EntryKind { DbgValue, Clobber };

    Entry(const MachineInstr *Instr, EntryKind Kind)
        : Instr(Instr, Kind), EndIndex(NoEntry) {}

    const MachineInstr *getInstr() const { return Instr.getPointer(); }
    EntryIndex getEndIndex() const { return EndIndex; }
    EntryKind getEntryKind() const { return Instr.getInt(); }

    bool isClobber() const { return getEntryKind() == Clobber; }
    bool isDbgValue() const { return getEntryKind() == DbgValue; }
    bool isClosed() const { return EndIndex != NoEntry; }

    void endEntry(EntryIndex EndIndex);

  private:
    PointerIntPair<const MachineInstr *, 1, EntryKind> Instr;
    EntryIndex EndIndex;
  };
  using Entries = SmallVector<Entry, 4>;
  using InlinedEntity = std::pair<const DINode *, const DILocation *>;
  using EntriesMap = MapVector<InlinedEntity, Entries>;

private:
  EntriesMap VarEntries;

public:
  bool startDbgValue(InlinedEntity Var, const MachineInstr &MI,
                     EntryIndex &NewIndex);
  EntryIndex startClobber(InlinedEntity Var, const MachineInstr &MI);

  Entry &getEntry(InlinedEntity Var, EntryIndex Index) {
    auto &Entries = VarEntries[Var];
    return Entries[Index];
  }

  /// Test whether a vector of entries features any non-empty locations. It
  /// could have no entries, or only DBG_VALUE $noreg entries.
  bool hasNonEmptyLocation(const Entries &Entries) const;

  /// Drop location ranges which exist entirely outside each variable's scope.
  void trimLocationRanges(const MachineFunction &MF, LexicalScopes &LScopes,
                          const InstructionOrdering &Ordering);
  bool empty() const { return VarEntries.empty(); }
  void clear() { VarEntries.clear(); }
  EntriesMap::const_iterator begin() const { return VarEntries.begin(); }
  EntriesMap::const_iterator end() const { return VarEntries.end(); }

#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
  LLVM_DUMP_METHOD void dump(StringRef FuncName) const;
#endif
};

/// For each inlined instance of a source-level label, keep the corresponding
/// DBG_LABEL instruction. The DBG_LABEL instruction could be used to generate
/// a temporary (assembler) label before it.
class DbgLabelInstrMap {
public:
  using InlinedEntity = std::pair<const DINode *, const DILocation *>;
  using InstrMap = MapVector<InlinedEntity, const MachineInstr *>;

private:
  InstrMap LabelInstr;

public:
  void  addInstr(InlinedEntity Label, const MachineInstr &MI);

  bool empty() const { return LabelInstr.empty(); }
  void clear() { LabelInstr.clear(); }
  InstrMap::const_iterator begin() const { return LabelInstr.begin(); }
  InstrMap::const_iterator end() const { return LabelInstr.end(); }
};

void calculateDbgEntityHistory(const MachineFunction *MF,
                               const TargetRegisterInfo *TRI,
                               DbgValueHistoryMap &DbgValues,
                               DbgLabelInstrMap &DbgLabels);

} // end namespace llvm

#endif // LLVM_CODEGEN_DBGENTITYHISTORYCALCULATOR_H
PKiwFZ/ 3���CodeGen/ISDOpcodes.hnu�[���//===-- llvm/CodeGen/ISDOpcodes.h - CodeGen opcodes -------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares codegen opcodes and related utilities.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_ISDOPCODES_H
#define LLVM_CODEGEN_ISDOPCODES_H

#include "llvm/CodeGen/ValueTypes.h"

namespace llvm {

/// ISD namespace - This namespace contains an enum which represents all of the
/// SelectionDAG node types and value types.
///
namespace ISD {

//===--------------------------------------------------------------------===//
/// ISD::NodeType enum - This enum defines the target-independent operators
/// for a SelectionDAG.
///
/// Targets may also define target-dependent operator codes for SDNodes. For
/// example, on x86, these are the enum values in the X86ISD namespace.
/// Targets should aim to use target-independent operators to model their
/// instruction sets as much as possible, and only use target-dependent
/// operators when they have special requirements.
///
/// Finally, during and after selection proper, SNodes may use special
/// operator codes that correspond directly with MachineInstr opcodes. These
/// are used to represent selected instructions. See the isMachineOpcode()
/// and getMachineOpcode() member functions of SDNode.
///
enum NodeType {

  /// DELETED_NODE - This is an illegal value that is used to catch
  /// errors.  This opcode is not a legal opcode for any node.
  DELETED_NODE,

  /// EntryToken - This is the marker used to indicate the start of a region.
  EntryToken,

  /// TokenFactor - This node takes multiple tokens as input and produces a
  /// single token result. This is used to represent the fact that the operand
  /// operators are independent of each other.
  TokenFactor,

  /// AssertSext, AssertZext - These nodes record if a register contains a
  /// value that has already been zero or sign extended from a narrower type.
  /// These nodes take two operands.  The first is the node that has already
  /// been extended, and the second is a value type node indicating the width
  /// of the extension.
  /// NOTE: In case of the source value (or any vector element value) is
  /// poisoned the assertion will not be true for that value.
  AssertSext,
  AssertZext,

  /// AssertAlign - These nodes record if a register contains a value that
  /// has a known alignment and the trailing bits are known to be zero.
  /// NOTE: In case of the source value (or any vector element value) is
  /// poisoned the assertion will not be true for that value.
  AssertAlign,

  /// Various leaf nodes.
  BasicBlock,
  VALUETYPE,
  CONDCODE,
  Register,
  RegisterMask,
  Constant,
  ConstantFP,
  GlobalAddress,
  GlobalTLSAddress,
  FrameIndex,
  JumpTable,
  ConstantPool,
  ExternalSymbol,
  BlockAddress,

  /// The address of the GOT
  GLOBAL_OFFSET_TABLE,

  /// FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and
  /// llvm.returnaddress on the DAG.  These nodes take one operand, the index
  /// of the frame or return address to return.  An index of zero corresponds
  /// to the current function's frame or return address, an index of one to
  /// the parent's frame or return address, and so on.
  FRAMEADDR,
  RETURNADDR,

  /// ADDROFRETURNADDR - Represents the llvm.addressofreturnaddress intrinsic.
  /// This node takes no operand, returns a target-specific pointer to the
  /// place in the stack frame where the return address of the current
  /// function is stored.
  ADDROFRETURNADDR,

  /// SPONENTRY - Represents the llvm.sponentry intrinsic. Takes no argument
  /// and returns the stack pointer value at the entry of the current
  /// function calling this intrinsic.
  SPONENTRY,

  /// LOCAL_RECOVER - Represents the llvm.localrecover intrinsic.
  /// Materializes the offset from the local object pointer of another
  /// function to a particular local object passed to llvm.localescape. The
  /// operand is the MCSymbol label used to represent this offset, since
  /// typically the offset is not known until after code generation of the
  /// parent.
  LOCAL_RECOVER,

  /// READ_REGISTER, WRITE_REGISTER - This node represents llvm.register on
  /// the DAG, which implements the named register global variables extension.
  READ_REGISTER,
  WRITE_REGISTER,

  /// FRAME_TO_ARGS_OFFSET - This node represents offset from frame pointer to
  /// first (possible) on-stack argument. This is needed for correct stack
  /// adjustment during unwind.
  FRAME_TO_ARGS_OFFSET,

  /// EH_DWARF_CFA - This node represents the pointer to the DWARF Canonical
  /// Frame Address (CFA), generally the value of the stack pointer at the
  /// call site in the previous frame.
  EH_DWARF_CFA,

  /// OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents
  /// 'eh_return' gcc dwarf builtin, which is used to return from
  /// exception. The general meaning is: adjust stack by OFFSET and pass
  /// execution to HANDLER. Many platform-related details also :)
  EH_RETURN,

  /// RESULT, OUTCHAIN = EH_SJLJ_SETJMP(INCHAIN, buffer)
  /// This corresponds to the eh.sjlj.setjmp intrinsic.
  /// It takes an input chain and a pointer to the jump buffer as inputs
  /// and returns an outchain.
  EH_SJLJ_SETJMP,

  /// OUTCHAIN = EH_SJLJ_LONGJMP(INCHAIN, buffer)
  /// This corresponds to the eh.sjlj.longjmp intrinsic.
  /// It takes an input chain and a pointer to the jump buffer as inputs
  /// and returns an outchain.
  EH_SJLJ_LONGJMP,

  /// OUTCHAIN = EH_SJLJ_SETUP_DISPATCH(INCHAIN)
  /// The target initializes the dispatch table here.
  EH_SJLJ_SETUP_DISPATCH,

  /// TargetConstant* - Like Constant*, but the DAG does not do any folding,
  /// simplification, or lowering of the constant. They are used for constants
  /// which are known to fit in the immediate fields of their users, or for
  /// carrying magic numbers which are not values which need to be
  /// materialized in registers.
  TargetConstant,
  TargetConstantFP,

  /// TargetGlobalAddress - Like GlobalAddress, but the DAG does no folding or
  /// anything else with this node, and this is valid in the target-specific
  /// dag, turning into a GlobalAddress operand.
  TargetGlobalAddress,
  TargetGlobalTLSAddress,
  TargetFrameIndex,
  TargetJumpTable,
  TargetConstantPool,
  TargetExternalSymbol,
  TargetBlockAddress,

  MCSymbol,

  /// TargetIndex - Like a constant pool entry, but with completely
  /// target-dependent semantics. Holds target flags, a 32-bit index, and a
  /// 64-bit index. Targets can use this however they like.
  TargetIndex,

  /// RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...)
  /// This node represents a target intrinsic function with no side effects.
  /// The first operand is the ID number of the intrinsic from the
  /// llvm::Intrinsic namespace.  The operands to the intrinsic follow.  The
  /// node returns the result of the intrinsic.
  INTRINSIC_WO_CHAIN,

  /// RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...)
  /// This node represents a target intrinsic function with side effects that
  /// returns a result.  The first operand is a chain pointer.  The second is
  /// the ID number of the intrinsic from the llvm::Intrinsic namespace.  The
  /// operands to the intrinsic follow.  The node has two results, the result
  /// of the intrinsic and an output chain.
  INTRINSIC_W_CHAIN,

  /// OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...)
  /// This node represents a target intrinsic function with side effects that
  /// does not return a result.  The first operand is a chain pointer.  The
  /// second is the ID number of the intrinsic from the llvm::Intrinsic
  /// namespace.  The operands to the intrinsic follow.
  INTRINSIC_VOID,

  /// CopyToReg - This node has three operands: a chain, a register number to
  /// set to this value, and a value.
  CopyToReg,

  /// CopyFromReg - This node indicates that the input value is a virtual or
  /// physical register that is defined outside of the scope of this
  /// SelectionDAG.  The register is available from the RegisterSDNode object.
  CopyFromReg,

  /// UNDEF - An undefined node.
  UNDEF,

  // FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or
  // is evaluated to UNDEF), or returns VAL otherwise. Note that each
  // read of UNDEF can yield different value, but FREEZE(UNDEF) cannot.
  FREEZE,

  /// EXTRACT_ELEMENT - This is used to get the lower or upper (determined by
  /// a Constant, which is required to be operand #1) half of the integer or
  /// float value specified as operand #0.  This is only for use before
  /// legalization, for values that will be broken into multiple registers.
  EXTRACT_ELEMENT,

  /// BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
  /// Given two values of the same integer value type, this produces a value
  /// twice as big.  Like EXTRACT_ELEMENT, this can only be used before
  /// legalization. The lower part of the composite value should be in
  /// element 0 and the upper part should be in element 1.
  BUILD_PAIR,

  /// MERGE_VALUES - This node takes multiple discrete operands and returns
  /// them all as its individual results.  This nodes has exactly the same
  /// number of inputs and outputs. This node is useful for some pieces of the
  /// code generator that want to think about a single node with multiple
  /// results, not multiple nodes.
  MERGE_VALUES,

  /// Simple integer binary arithmetic operators.
  ADD,
  SUB,
  MUL,
  SDIV,
  UDIV,
  SREM,
  UREM,

  /// SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing
  /// a signed/unsigned value of type i[2*N], and return the full value as
  /// two results, each of type iN.
  SMUL_LOHI,
  UMUL_LOHI,

  /// SDIVREM/UDIVREM - Divide two integers and produce both a quotient and
  /// remainder result.
  SDIVREM,
  UDIVREM,

  /// CARRY_FALSE - This node is used when folding other nodes,
  /// like ADDC/SUBC, which indicate the carry result is always false.
  CARRY_FALSE,

  /// Carry-setting nodes for multiple precision addition and subtraction.
  /// These nodes take two operands of the same value type, and produce two
  /// results.  The first result is the normal add or sub result, the second
  /// result is the carry flag result.
  /// FIXME: These nodes are deprecated in favor of UADDO_CARRY and USUBO_CARRY.
  /// They are kept around for now to provide a smooth transition path
  /// toward the use of UADDO_CARRY/USUBO_CARRY and will eventually be removed.
  ADDC,
  SUBC,

  /// Carry-using nodes for multiple precision addition and subtraction. These
  /// nodes take three operands: The first two are the normal lhs and rhs to
  /// the add or sub, and the third is the input carry flag.  These nodes
  /// produce two results; the normal result of the add or sub, and the output
  /// carry flag.  These nodes both read and write a carry flag to allow them
  /// to them to be chained together for add and sub of arbitrarily large
  /// values.
  ADDE,
  SUBE,

  /// Carry-using nodes for multiple precision addition and subtraction.
  /// These nodes take three operands: The first two are the normal lhs and
  /// rhs to the add or sub, and the third is a boolean value that is 1 if and
  /// only if there is an incoming carry/borrow. These nodes produce two
  /// results: the normal result of the add or sub, and a boolean value that is
  /// 1 if and only if there is an outgoing carry/borrow.
  ///
  /// Care must be taken if these opcodes are lowered to hardware instructions
  /// that use the inverse logic -- 0 if and only if there is an
  /// incoming/outgoing carry/borrow.  In such cases, you must preserve the
  /// semantics of these opcodes by inverting the incoming carry/borrow, feeding
  /// it to the add/sub hardware instruction, and then inverting the outgoing
  /// carry/borrow.
  ///
  /// The use of these opcodes is preferable to adde/sube if the target supports
  /// it, as the carry is a regular value rather than a glue, which allows
  /// further optimisation.
  ///
  /// These opcodes are different from [US]{ADD,SUB}O in that
  /// U{ADD,SUB}O_CARRY consume and produce a carry/borrow, whereas
  /// [US]{ADD,SUB}O produce an overflow.
  UADDO_CARRY,
  USUBO_CARRY,

  /// Carry-using overflow-aware nodes for multiple precision addition and
  /// subtraction. These nodes take three operands: The first two are normal lhs
  /// and rhs to the add or sub, and the third is a boolean indicating if there
  /// is an incoming carry. They produce two results: the normal result of the
  /// add or sub, and a boolean that indicates if an overflow occurred (*not*
  /// flag, because it may be a store to memory, etc.). If the type of the
  /// boolean is not i1 then the high bits conform to getBooleanContents.
  SADDO_CARRY,
  SSUBO_CARRY,

  /// RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
  /// These nodes take two operands: the normal LHS and RHS to the add. They
  /// produce two results: the normal result of the add, and a boolean that
  /// indicates if an overflow occurred (*not* a flag, because it may be store
  /// to memory, etc.).  If the type of the boolean is not i1 then the high
  /// bits conform to getBooleanContents.
  /// These nodes are generated from llvm.[su]add.with.overflow intrinsics.
  SADDO,
  UADDO,

  /// Same for subtraction.
  SSUBO,
  USUBO,

  /// Same for multiplication.
  SMULO,
  UMULO,

  /// RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2
  /// integers with the same bit width (W). If the true value of LHS + RHS
  /// exceeds the largest value that can be represented by W bits, the
  /// resulting value is this maximum value. Otherwise, if this value is less
  /// than the smallest value that can be represented by W bits, the
  /// resulting value is this minimum value.
  SADDSAT,
  UADDSAT,

  /// RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2
  /// integers with the same bit width (W). If the true value of LHS - RHS
  /// exceeds the largest value that can be represented by W bits, the
  /// resulting value is this maximum value. Otherwise, if this value is less
  /// than the smallest value that can be represented by W bits, the
  /// resulting value is this minimum value.
  SSUBSAT,
  USUBSAT,

  /// RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift. The first
  /// operand is the value to be shifted, and the second argument is the amount
  /// to shift by. Both must be integers of the same bit width (W). If the true
  /// value of LHS << RHS exceeds the largest value that can be represented by
  /// W bits, the resulting value is this maximum value, Otherwise, if this
  /// value is less than the smallest value that can be represented by W bits,
  /// the resulting value is this minimum value.
  SSHLSAT,
  USHLSAT,

  /// RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication
  /// on 2 integers with the same width and scale. SCALE represents the scale
  /// of both operands as fixed point numbers. This SCALE parameter must be a
  /// constant integer. A scale of zero is effectively performing
  /// multiplication on 2 integers.
  SMULFIX,
  UMULFIX,

  /// Same as the corresponding unsaturated fixed point instructions, but the
  /// result is clamped between the min and max values representable by the
  /// bits of the first 2 operands.
  SMULFIXSAT,
  UMULFIXSAT,

  /// RESULT = [US]DIVFIX(LHS, RHS, SCALE) - Perform fixed point division on
  /// 2 integers with the same width and scale. SCALE represents the scale
  /// of both operands as fixed point numbers. This SCALE parameter must be a
  /// constant integer.
  SDIVFIX,
  UDIVFIX,

  /// Same as the corresponding unsaturated fixed point instructions, but the
  /// result is clamped between the min and max values representable by the
  /// bits of the first 2 operands.
  SDIVFIXSAT,
  UDIVFIXSAT,

  /// Simple binary floating point operators.
  FADD,
  FSUB,
  FMUL,
  FDIV,
  FREM,

  /// Constrained versions of the binary floating point operators.
  /// These will be lowered to the simple operators before final selection.
  /// They are used to limit optimizations while the DAG is being
  /// optimized.
  STRICT_FADD,
  STRICT_FSUB,
  STRICT_FMUL,
  STRICT_FDIV,
  STRICT_FREM,
  STRICT_FMA,

  /// Constrained versions of libm-equivalent floating point intrinsics.
  /// These will be lowered to the equivalent non-constrained pseudo-op
  /// (or expanded to the equivalent library call) before final selection.
  /// They are used to limit optimizations while the DAG is being optimized.
  STRICT_FSQRT,
  STRICT_FPOW,
  STRICT_FPOWI,
  STRICT_FLDEXP,
  STRICT_FSIN,
  STRICT_FCOS,
  STRICT_FEXP,
  STRICT_FEXP2,
  STRICT_FLOG,
  STRICT_FLOG10,
  STRICT_FLOG2,
  STRICT_FRINT,
  STRICT_FNEARBYINT,
  STRICT_FMAXNUM,
  STRICT_FMINNUM,
  STRICT_FCEIL,
  STRICT_FFLOOR,
  STRICT_FROUND,
  STRICT_FROUNDEVEN,
  STRICT_FTRUNC,
  STRICT_LROUND,
  STRICT_LLROUND,
  STRICT_LRINT,
  STRICT_LLRINT,
  STRICT_FMAXIMUM,
  STRICT_FMINIMUM,

  /// STRICT_FP_TO_[US]INT - Convert a floating point value to a signed or
  /// unsigned integer. These have the same semantics as fptosi and fptoui
  /// in IR.
  /// They are used to limit optimizations while the DAG is being optimized.
  STRICT_FP_TO_SINT,
  STRICT_FP_TO_UINT,

  /// STRICT_[US]INT_TO_FP - Convert a signed or unsigned integer to
  /// a floating point value. These have the same semantics as sitofp and
  /// uitofp in IR.
  /// They are used to limit optimizations while the DAG is being optimized.
  STRICT_SINT_TO_FP,
  STRICT_UINT_TO_FP,

  /// X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating
  /// point type down to the precision of the destination VT.  TRUNC is a
  /// flag, which is always an integer that is zero or one.  If TRUNC is 0,
  /// this is a normal rounding, if it is 1, this FP_ROUND is known to not
  /// change the value of Y.
  ///
  /// The TRUNC = 1 case is used in cases where we know that the value will
  /// not be modified by the node, because Y is not using any of the extra
  /// precision of source type.  This allows certain transformations like
  /// STRICT_FP_EXTEND(STRICT_FP_ROUND(X,1)) -> X which are not safe for
  /// STRICT_FP_EXTEND(STRICT_FP_ROUND(X,0)) because the extra bits aren't
  /// removed.
  /// It is used to limit optimizations while the DAG is being optimized.
  STRICT_FP_ROUND,

  /// X = STRICT_FP_EXTEND(Y) - Extend a smaller FP type into a larger FP
  /// type.
  /// It is used to limit optimizations while the DAG is being optimized.
  STRICT_FP_EXTEND,

  /// STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used
  /// for floating-point operands only.  STRICT_FSETCC performs a quiet
  /// comparison operation, while STRICT_FSETCCS performs a signaling
  /// comparison operation.
  STRICT_FSETCC,
  STRICT_FSETCCS,

  // FPTRUNC_ROUND - This corresponds to the fptrunc_round intrinsic.
  FPTRUNC_ROUND,

  /// FMA - Perform a * b + c with no intermediate rounding step.
  FMA,

  /// FMAD - Perform a * b + c, while getting the same result as the
  /// separately rounded operations.
  FMAD,

  /// FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.  NOTE: This
  /// DAG node does not require that X and Y have the same type, just that
  /// they are both floating point.  X and the result must have the same type.
  /// FCOPYSIGN(f32, f64) is allowed.
  FCOPYSIGN,

  /// INT = FGETSIGN(FP) - Return the sign bit of the specified floating point
  /// value as an integer 0/1 value.
  FGETSIGN,

  /// Returns platform specific canonical encoding of a floating point number.
  FCANONICALIZE,

  /// Performs a check of floating point class property, defined by IEEE-754.
  /// The first operand is the floating point value to check. The second operand
  /// specifies the checked property and is a TargetConstant which specifies
  /// test in the same way as intrinsic 'is_fpclass'.
  /// Returns boolean value.
  IS_FPCLASS,

  /// BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector
  /// with the specified, possibly variable, elements. The types of the
  /// operands must match the vector element type, except that integer types
  /// are allowed to be larger than the element type, in which case the
  /// operands are implicitly truncated. The types of the operands must all
  /// be the same.
  BUILD_VECTOR,

  /// INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element
  /// at IDX replaced with VAL. If the type of VAL is larger than the vector
  /// element type then VAL is truncated before replacement.
  ///
  /// If VECTOR is a scalable vector, then IDX may be larger than the minimum
  /// vector width. IDX is not first scaled by the runtime scaling factor of
  /// VECTOR.
  INSERT_VECTOR_ELT,

  /// EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR
  /// identified by the (potentially variable) element number IDX. If the return
  /// type is an integer type larger than the element type of the vector, the
  /// result is extended to the width of the return type. In that case, the high
  /// bits are undefined.
  ///
  /// If VECTOR is a scalable vector, then IDX may be larger than the minimum
  /// vector width. IDX is not first scaled by the runtime scaling factor of
  /// VECTOR.
  EXTRACT_VECTOR_ELT,

  /// CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of
  /// vector type with the same length and element type, this produces a
  /// concatenated vector result value, with length equal to the sum of the
  /// lengths of the input vectors. If VECTOR0 is a fixed-width vector, then
  /// VECTOR1..VECTORN must all be fixed-width vectors. Similarly, if VECTOR0
  /// is a scalable vector, then VECTOR1..VECTORN must all be scalable vectors.
  CONCAT_VECTORS,

  /// INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2
  /// inserted into VECTOR1. IDX represents the starting element number at which
  /// VECTOR2 will be inserted. IDX must be a constant multiple of T's known
  /// minimum vector length. Let the type of VECTOR2 be T, then if T is a
  /// scalable vector, IDX is first scaled by the runtime scaling factor of T.
  /// The elements of VECTOR1 starting at IDX are overwritten with VECTOR2.
  /// Elements IDX through (IDX + num_elements(T) - 1) must be valid VECTOR1
  /// indices. If this condition cannot be determined statically but is false at
  /// runtime, then the result vector is undefined. The IDX parameter must be a
  /// vector index constant type, which for most targets will be an integer
  /// pointer type.
  ///
  /// This operation supports inserting a fixed-width vector into a scalable
  /// vector, but not the other way around.
  INSERT_SUBVECTOR,

  /// EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
  /// Let the result type be T, then IDX represents the starting element number
  /// from which a subvector of type T is extracted. IDX must be a constant
  /// multiple of T's known minimum vector length. If T is a scalable vector,
  /// IDX is first scaled by the runtime scaling factor of T. Elements IDX
  /// through (IDX + num_elements(T) - 1) must be valid VECTOR indices. If this
  /// condition cannot be determined statically but is false at runtime, then
  /// the result vector is undefined. The IDX parameter must be a vector index
  /// constant type, which for most targets will be an integer pointer type.
  ///
  /// This operation supports extracting a fixed-width vector from a scalable
  /// vector, but not the other way around.
  EXTRACT_SUBVECTOR,

  /// VECTOR_DEINTERLEAVE(VEC1, VEC2) - Returns two vectors with all input and
  /// output vectors having the same type. The first output contains the even
  /// indices from CONCAT_VECTORS(VEC1, VEC2), with the second output
  /// containing the odd indices. The relative order of elements within an
  /// output match that of the concatenated input.
  VECTOR_DEINTERLEAVE,

  /// VECTOR_INTERLEAVE(VEC1, VEC2) - Returns two vectors with all input and
  /// output vectors having the same type. The first output contains the
  /// result of interleaving the low half of CONCAT_VECTORS(VEC1, VEC2), with
  /// the second output containing the result of interleaving the high half.
  VECTOR_INTERLEAVE,

  /// VECTOR_REVERSE(VECTOR) - Returns a vector, of the same type as VECTOR,
  /// whose elements are shuffled using the following algorithm:
  ///   RESULT[i] = VECTOR[VECTOR.ElementCount - 1 - i]
  VECTOR_REVERSE,

  /// VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as
  /// VEC1/VEC2.  A VECTOR_SHUFFLE node also contains an array of constant int
  /// values that indicate which value (or undef) each result element will
  /// get.  These constant ints are accessible through the
  /// ShuffleVectorSDNode class.  This is quite similar to the Altivec
  /// 'vperm' instruction, except that the indices must be constants and are
  /// in terms of the element size of VEC1/VEC2, not in terms of bytes.
  VECTOR_SHUFFLE,

  /// VECTOR_SPLICE(VEC1, VEC2, IMM) - Returns a subvector of the same type as
  /// VEC1/VEC2 from CONCAT_VECTORS(VEC1, VEC2), based on the IMM in two ways.
  /// Let the result type be T, if IMM is positive it represents the starting
  /// element number (an index) from which a subvector of type T is extracted
  /// from CONCAT_VECTORS(VEC1, VEC2). If IMM is negative it represents a count
  /// specifying the number of trailing elements to extract from VEC1, where the
  /// elements of T are selected using the following algorithm:
  ///   RESULT[i] = CONCAT_VECTORS(VEC1,VEC2)[VEC1.ElementCount - ABS(IMM) + i]
  /// If IMM is not in the range [-VL, VL-1] the result vector is undefined. IMM
  /// is a constant integer.
  VECTOR_SPLICE,

  /// SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a
  /// scalar value into element 0 of the resultant vector type.  The top
  /// elements 1 to N-1 of the N-element vector are undefined.  The type
  /// of the operand must match the vector element type, except when they
  /// are integer types.  In this case the operand is allowed to be wider
  /// than the vector element type, and is implicitly truncated to it.
  SCALAR_TO_VECTOR,

  /// SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL
  /// duplicated in all lanes. The type of the operand must match the vector
  /// element type, except when they are integer types.  In this case the
  /// operand is allowed to be wider than the vector element type, and is
  /// implicitly truncated to it.
  SPLAT_VECTOR,

  /// SPLAT_VECTOR_PARTS(SCALAR1, SCALAR2, ...) - Returns a vector with the
  /// scalar values joined together and then duplicated in all lanes. This
  /// represents a SPLAT_VECTOR that has had its scalar operand expanded. This
  /// allows representing a 64-bit splat on a target with 32-bit integers. The
  /// total width of the scalars must cover the element width. SCALAR1 contains
  /// the least significant bits of the value regardless of endianness and all
  /// scalars should have the same type.
  SPLAT_VECTOR_PARTS,

  /// STEP_VECTOR(IMM) - Returns a scalable vector whose lanes are comprised
  /// of a linear sequence of unsigned values starting from 0 with a step of
  /// IMM, where IMM must be a TargetConstant with type equal to the vector
  /// element type. The arithmetic is performed modulo the bitwidth of the
  /// element.
  ///
  /// The operation does not support returning fixed-width vectors or
  /// non-constant operands.
  STEP_VECTOR,

  /// MULHU/MULHS - Multiply high - Multiply two integers of type iN,
  /// producing an unsigned/signed value of type i[2*N], then return the top
  /// part.
  MULHU,
  MULHS,

  /// AVGFLOORS/AVGFLOORU - Averaging add - Add two integers using an integer of
  /// type i[N+1], halving the result by shifting it one bit right.
  /// shr(add(ext(X), ext(Y)), 1)
  AVGFLOORS,
  AVGFLOORU,
  /// AVGCEILS/AVGCEILU - Rounding averaging add - Add two integers using an
  /// integer of type i[N+2], add 1 and halve the result by shifting it one bit
  /// right. shr(add(ext(X), ext(Y), 1), 1)
  AVGCEILS,
  AVGCEILU,

  // ABDS/ABDU - Absolute difference - Return the absolute difference between
  // two numbers interpreted as signed/unsigned.
  // i.e trunc(abs(sext(Op0) - sext(Op1))) becomes abds(Op0, Op1)
  //  or trunc(abs(zext(Op0) - zext(Op1))) becomes abdu(Op0, Op1)
  ABDS,
  ABDU,

  /// [US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned
  /// integers.
  SMIN,
  SMAX,
  UMIN,
  UMAX,

  /// Bitwise operators - logical and, logical or, logical xor.
  AND,
  OR,
  XOR,

  /// ABS - Determine the unsigned absolute value of a signed integer value of
  /// the same bitwidth.
  /// Note: A value of INT_MIN will return INT_MIN, no saturation or overflow
  /// is performed.
  ABS,

  /// Shift and rotation operations.  After legalization, the type of the
  /// shift amount is known to be TLI.getShiftAmountTy().  Before legalization
  /// the shift amount can be any type, but care must be taken to ensure it is
  /// large enough.  TLI.getShiftAmountTy() is i8 on some targets, but before
  /// legalization, types like i1024 can occur and i8 doesn't have enough bits
  /// to represent the shift amount.
  /// When the 1st operand is a vector, the shift amount must be in the same
  /// type. (TLI.getShiftAmountTy() will return the same type when the input
  /// type is a vector.)
  /// For rotates and funnel shifts, the shift amount is treated as an unsigned
  /// amount modulo the element size of the first operand.
  ///
  /// Funnel 'double' shifts take 3 operands, 2 inputs and the shift amount.
  /// fshl(X,Y,Z): (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
  /// fshr(X,Y,Z): (X << (BW - (Z % BW))) | (Y >> (Z % BW))
  SHL,
  SRA,
  SRL,
  ROTL,
  ROTR,
  FSHL,
  FSHR,

  /// Byte Swap and Counting operators.
  BSWAP,
  CTTZ,
  CTLZ,
  CTPOP,
  BITREVERSE,
  PARITY,

  /// Bit counting operators with an undefined result for zero inputs.
  CTTZ_ZERO_UNDEF,
  CTLZ_ZERO_UNDEF,

  /// Select(COND, TRUEVAL, FALSEVAL).  If the type of the boolean COND is not
  /// i1 then the high bits must conform to getBooleanContents.
  SELECT,

  /// Select with a vector condition (op #0) and two vector operands (ops #1
  /// and #2), returning a vector result.  All vectors have the same length.
  /// Much like the scalar select and setcc, each bit in the condition selects
  /// whether the corresponding result element is taken from op #1 or op #2.
  /// At first, the VSELECT condition is of vXi1 type. Later, targets may
  /// change the condition type in order to match the VSELECT node using a
  /// pattern. The condition follows the BooleanContent format of the target.
  VSELECT,

  /// Select with condition operator - This selects between a true value and
  /// a false value (ops #2 and #3) based on the boolean result of comparing
  /// the lhs and rhs (ops #0 and #1) of a conditional expression with the
  /// condition code in op #4, a CondCodeSDNode.
  SELECT_CC,

  /// SetCC operator - This evaluates to a true value iff the condition is
  /// true.  If the result value type is not i1 then the high bits conform
  /// to getBooleanContents.  The operands to this are the left and right
  /// operands to compare (ops #0, and #1) and the condition code to compare
  /// them with (op #2) as a CondCodeSDNode. If the operands are vector types
  /// then the result type must also be a vector type.
  SETCC,

  /// Like SetCC, ops #0 and #1 are the LHS and RHS operands to compare, but
  /// op #2 is a boolean indicating if there is an incoming carry. This
  /// operator checks the result of "LHS - RHS - Carry", and can be used to
  /// compare two wide integers:
  /// (setcccarry lhshi rhshi (usubo_carry lhslo rhslo) cc).
  /// Only valid for integers.
  SETCCCARRY,

  /// SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded
  /// integer shift operations.  The operation ordering is:
  ///       [Lo,Hi] = op [LoLHS,HiLHS], Amt
  SHL_PARTS,
  SRA_PARTS,
  SRL_PARTS,

  /// Conversion operators.  These are all single input single output
  /// operations.  For all of these, the result type must be strictly
  /// wider or narrower (depending on the operation) than the source
  /// type.

  /// SIGN_EXTEND - Used for integer types, replicating the sign bit
  /// into new bits.
  SIGN_EXTEND,

  /// ZERO_EXTEND - Used for integer types, zeroing the new bits.
  ZERO_EXTEND,

  /// ANY_EXTEND - Used for integer types.  The high bits are undefined.
  ANY_EXTEND,

  /// TRUNCATE - Completely drop the high bits.
  TRUNCATE,

  /// [SU]INT_TO_FP - These operators convert integers (whose interpreted sign
  /// depends on the first letter) to floating point.
  SINT_TO_FP,
  UINT_TO_FP,

  /// SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to
  /// sign extend a small value in a large integer register (e.g. sign
  /// extending the low 8 bits of a 32-bit register to fill the top 24 bits
  /// with the 7th bit).  The size of the smaller type is indicated by the 1th
  /// operand, a ValueType node.
  SIGN_EXTEND_INREG,

  /// ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an
  /// in-register any-extension of the low lanes of an integer vector. The
  /// result type must have fewer elements than the operand type, and those
  /// elements must be larger integer types such that the total size of the
  /// operand type is less than or equal to the size of the result type. Each
  /// of the low operand elements is any-extended into the corresponding,
  /// wider result elements with the high bits becoming undef.
  /// NOTE: The type legalizer prefers to make the operand and result size
  /// the same to allow expansion to shuffle vector during op legalization.
  ANY_EXTEND_VECTOR_INREG,

  /// SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an
  /// in-register sign-extension of the low lanes of an integer vector. The
  /// result type must have fewer elements than the operand type, and those
  /// elements must be larger integer types such that the total size of the
  /// operand type is less than or equal to the size of the result type. Each
  /// of the low operand elements is sign-extended into the corresponding,
  /// wider result elements.
  /// NOTE: The type legalizer prefers to make the operand and result size
  /// the same to allow expansion to shuffle vector during op legalization.
  SIGN_EXTEND_VECTOR_INREG,

  /// ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an
  /// in-register zero-extension of the low lanes of an integer vector. The
  /// result type must have fewer elements than the operand type, and those
  /// elements must be larger integer types such that the total size of the
  /// operand type is less than or equal to the size of the result type. Each
  /// of the low operand elements is zero-extended into the corresponding,
  /// wider result elements.
  /// NOTE: The type legalizer prefers to make the operand and result size
  /// the same to allow expansion to shuffle vector during op legalization.
  ZERO_EXTEND_VECTOR_INREG,

  /// FP_TO_[US]INT - Convert a floating point value to a signed or unsigned
  /// integer. These have the same semantics as fptosi and fptoui in IR. If
  /// the FP value cannot fit in the integer type, the results are undefined.
  FP_TO_SINT,
  FP_TO_UINT,

  /// FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a
  /// signed or unsigned scalar integer type given in operand 1 with the
  /// following semantics:
  ///
  ///  * If the value is NaN, zero is returned.
  ///  * If the value is larger/smaller than the largest/smallest integer,
  ///    the largest/smallest integer is returned (saturation).
  ///  * Otherwise the result of rounding the value towards zero is returned.
  ///
  /// The scalar width of the type given in operand 1 must be equal to, or
  /// smaller than, the scalar result type width. It may end up being smaller
  /// than the result width as a result of integer type legalization.
  ///
  /// After converting to the scalar integer type in operand 1, the value is
  /// extended to the result VT. FP_TO_SINT_SAT sign extends and FP_TO_UINT_SAT
  /// zero extends.
  FP_TO_SINT_SAT,
  FP_TO_UINT_SAT,

  /// X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type
  /// down to the precision of the destination VT.  TRUNC is a flag, which is
  /// always an integer that is zero or one.  If TRUNC is 0, this is a
  /// normal rounding, if it is 1, this FP_ROUND is known to not change the
  /// value of Y.
  ///
  /// The TRUNC = 1 case is used in cases where we know that the value will
  /// not be modified by the node, because Y is not using any of the extra
  /// precision of source type.  This allows certain transformations like
  /// FP_EXTEND(FP_ROUND(X,1)) -> X which are not safe for
  /// FP_EXTEND(FP_ROUND(X,0)) because the extra bits aren't removed.
  FP_ROUND,

  /// Returns current rounding mode:
  /// -1 Undefined
  ///  0 Round to 0
  ///  1 Round to nearest, ties to even
  ///  2 Round to +inf
  ///  3 Round to -inf
  ///  4 Round to nearest, ties to zero
  /// Result is rounding mode and chain. Input is a chain.
  GET_ROUNDING,

  /// Set rounding mode.
  /// The first operand is a chain pointer. The second specifies the required
  /// rounding mode, encoded in the same way as used in '``GET_ROUNDING``'.
  SET_ROUNDING,

  /// X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
  FP_EXTEND,

  /// BITCAST - This operator converts between integer, vector and FP
  /// values, as if the value was stored to memory with one type and loaded
  /// from the same address with the other type (or equivalently for vector
  /// format conversions, etc).  The source and result are required to have
  /// the same bit size (e.g.  f32 <-> i32).  This can also be used for
  /// int-to-int or fp-to-fp conversions, but that is a noop, deleted by
  /// getNode().
  ///
  /// This operator is subtly different from the bitcast instruction from
  /// LLVM-IR since this node may change the bits in the register. For
  /// example, this occurs on big-endian NEON and big-endian MSA where the
  /// layout of the bits in the register depends on the vector type and this
  /// operator acts as a shuffle operation for some vector type combinations.
  BITCAST,

  /// ADDRSPACECAST - This operator converts between pointers of different
  /// address spaces.
  ADDRSPACECAST,

  /// FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions
  /// and truncation for half-precision (16 bit) floating numbers. These nodes
  /// form a semi-softened interface for dealing with f16 (as an i16), which
  /// is often a storage-only type but has native conversions.
  FP16_TO_FP,
  FP_TO_FP16,
  STRICT_FP16_TO_FP,
  STRICT_FP_TO_FP16,

  /// BF16_TO_FP, FP_TO_BF16 - These operators are used to perform promotions
  /// and truncation for bfloat16. These nodes form a semi-softened interface
  /// for dealing with bf16 (as an i16), which is often a storage-only type but
  /// has native conversions.
  BF16_TO_FP,
  FP_TO_BF16,

  /// Perform various unary floating-point operations inspired by libm. For
  /// FPOWI, the result is undefined if if the integer operand doesn't fit into
  /// sizeof(int).
  FNEG,
  FABS,
  FSQRT,
  FCBRT,
  FSIN,
  FCOS,
  FPOW,
  FPOWI,
  /// FLDEXP - ldexp, inspired by libm (op0 * 2**op1).
  FLDEXP,

  /// FFREXP - frexp, extract fractional and exponent component of a
  /// floating-point value. Returns the two components as separate return
  /// values.
  FFREXP,

  FLOG,
  FLOG2,
  FLOG10,
  FEXP,
  FEXP2,
  FCEIL,
  FTRUNC,
  FRINT,
  FNEARBYINT,
  FROUND,
  FROUNDEVEN,
  FFLOOR,
  LROUND,
  LLROUND,
  LRINT,
  LLRINT,

  /// FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two
  /// values.
  //
  /// In the case where a single input is a NaN (either signaling or quiet),
  /// the non-NaN input is returned.
  ///
  /// The return value of (FMINNUM 0.0, -0.0) could be either 0.0 or -0.0.
  FMINNUM,
  FMAXNUM,

  /// FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimum or maximum on
  /// two values, following the IEEE-754 2008 definition. This differs from
  /// FMINNUM/FMAXNUM in the handling of signaling NaNs. If one input is a
  /// signaling NaN, returns a quiet NaN.
  FMINNUM_IEEE,
  FMAXNUM_IEEE,

  /// FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0
  /// as less than 0.0. While FMINNUM_IEEE/FMAXNUM_IEEE follow IEEE 754-2008
  /// semantics, FMINIMUM/FMAXIMUM follow IEEE 754-2018 draft semantics.
  FMINIMUM,
  FMAXIMUM,

  /// FSINCOS - Compute both fsin and fcos as a single operation.
  FSINCOS,

  /// Gets the current floating-point environment. The first operand is a token
  /// chain. The results are FP environment, represented by an integer value,
  /// and a token chain.
  GET_FPENV,

  /// Sets the current floating-point environment. The first operand is a token
  /// chain, the second is FP environment, represented by an integer value. The
  /// result is a token chain.
  SET_FPENV,

  /// Set floating-point environment to default state. The first operand and the
  /// result are token chains.
  RESET_FPENV,

  /// Gets the current floating-point environment. The first operand is a token
  /// chain, the second is a pointer to memory, where FP environment is stored
  /// to. The result is a token chain.
  GET_FPENV_MEM,

  /// Sets the current floating point environment. The first operand is a token
  /// chain, the second is a pointer to memory, where FP environment is loaded
  /// from. The result is a token chain.
  SET_FPENV_MEM,

  /// LOAD and STORE have token chains as their first operand, then the same
  /// operands as an LLVM load/store instruction, then an offset node that
  /// is added / subtracted from the base pointer to form the address (for
  /// indexed memory ops).
  LOAD,
  STORE,

  /// DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned
  /// to a specified boundary.  This node always has two return values: a new
  /// stack pointer value and a chain. The first operand is the token chain,
  /// the second is the number of bytes to allocate, and the third is the
  /// alignment boundary.  The size is guaranteed to be a multiple of the
  /// stack alignment, and the alignment is guaranteed to be bigger than the
  /// stack alignment (if required) or 0 to get standard stack alignment.
  DYNAMIC_STACKALLOC,

  /// Control flow instructions.  These all have token chains.

  /// BR - Unconditional branch.  The first operand is the chain
  /// operand, the second is the MBB to branch to.
  BR,

  /// BRIND - Indirect branch.  The first operand is the chain, the second
  /// is the value to branch to, which must be of the same type as the
  /// target's pointer type.
  BRIND,

  /// BR_JT - Jumptable branch. The first operand is the chain, the second
  /// is the jumptable index, the last one is the jumptable entry index.
  BR_JT,

  /// BRCOND - Conditional branch.  The first operand is the chain, the
  /// second is the condition, the third is the block to branch to if the
  /// condition is true.  If the type of the condition is not i1, then the
  /// high bits must conform to getBooleanContents. If the condition is undef,
  /// it nondeterministically jumps to the block.
  /// TODO: Its semantics w.r.t undef requires further discussion; we need to
  /// make it sure that it is consistent with optimizations in MIR & the
  /// meaning of IMPLICIT_DEF. See https://reviews.llvm.org/D92015
  BRCOND,

  /// BR_CC - Conditional branch.  The behavior is like that of SELECT_CC, in
  /// that the condition is represented as condition code, and two nodes to
  /// compare, rather than as a combined SetCC node.  The operands in order
  /// are chain, cc, lhs, rhs, block to branch to if condition is true. If
  /// condition is undef, it nondeterministically jumps to the block.
  BR_CC,

  /// INLINEASM - Represents an inline asm block.  This node always has two
  /// return values: a chain and a flag result.  The inputs are as follows:
  ///   Operand #0  : Input chain.
  ///   Operand #1  : a ExternalSymbolSDNode with a pointer to the asm string.
  ///   Operand #2  : a MDNodeSDNode with the !srcloc metadata.
  ///   Operand #3  : HasSideEffect, IsAlignStack bits.
  ///   After this, it is followed by a list of operands with this format:
  ///     ConstantSDNode: Flags that encode whether it is a mem or not, the
  ///                     of operands that follow, etc.  See InlineAsm.h.
  ///     ... however many operands ...
  ///   Operand #last: Optional, an incoming flag.
  ///
  /// The variable width operands are required to represent target addressing
  /// modes as a single "operand", even though they may have multiple
  /// SDOperands.
  INLINEASM,

  /// INLINEASM_BR - Branching version of inline asm. Used by asm-goto.
  INLINEASM_BR,

  /// EH_LABEL - Represents a label in mid basic block used to track
  /// locations needed for debug and exception handling tables.  These nodes
  /// take a chain as input and return a chain.
  EH_LABEL,

  /// ANNOTATION_LABEL - Represents a mid basic block label used by
  /// annotations. This should remain within the basic block and be ordered
  /// with respect to other call instructions, but loads and stores may float
  /// past it.
  ANNOTATION_LABEL,

  /// CATCHRET - Represents a return from a catch block funclet. Used for
  /// MSVC compatible exception handling. Takes a chain operand and a
  /// destination basic block operand.
  CATCHRET,

  /// CLEANUPRET - Represents a return from a cleanup block funclet.  Used for
  /// MSVC compatible exception handling. Takes only a chain operand.
  CLEANUPRET,

  /// STACKSAVE - STACKSAVE has one operand, an input chain.  It produces a
  /// value, the same type as the pointer type for the system, and an output
  /// chain.
  STACKSAVE,

  /// STACKRESTORE has two operands, an input chain and a pointer to restore
  /// to it returns an output chain.
  STACKRESTORE,

  /// CALLSEQ_START/CALLSEQ_END - These operators mark the beginning and end
  /// of a call sequence, and carry arbitrary information that target might
  /// want to know.  The first operand is a chain, the rest are specified by
  /// the target and not touched by the DAG optimizers.
  /// Targets that may use stack to pass call arguments define additional
  /// operands:
  /// - size of the call frame part that must be set up within the
  ///   CALLSEQ_START..CALLSEQ_END pair,
  /// - part of the call frame prepared prior to CALLSEQ_START.
  /// Both these parameters must be constants, their sum is the total call
  /// frame size.
  /// CALLSEQ_START..CALLSEQ_END pairs may not be nested.
  CALLSEQ_START, // Beginning of a call sequence
  CALLSEQ_END,   // End of a call sequence

  /// VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE,
  /// and the alignment. It returns a pair of values: the vaarg value and a
  /// new chain.
  VAARG,

  /// VACOPY - VACOPY has 5 operands: an input chain, a destination pointer,
  /// a source pointer, a SRCVALUE for the destination, and a SRCVALUE for the
  /// source.
  VACOPY,

  /// VAEND, VASTART - VAEND and VASTART have three operands: an input chain,
  /// pointer, and a SRCVALUE.
  VAEND,
  VASTART,

  // PREALLOCATED_SETUP - This has 2 operands: an input chain and a SRCVALUE
  // with the preallocated call Value.
  PREALLOCATED_SETUP,
  // PREALLOCATED_ARG - This has 3 operands: an input chain, a SRCVALUE
  // with the preallocated call Value, and a constant int.
  PREALLOCATED_ARG,

  /// SRCVALUE - This is a node type that holds a Value* that is used to
  /// make reference to a value in the LLVM IR.
  SRCVALUE,

  /// MDNODE_SDNODE - This is a node that holdes an MDNode*, which is used to
  /// reference metadata in the IR.
  MDNODE_SDNODE,

  /// PCMARKER - This corresponds to the pcmarker intrinsic.
  PCMARKER,

  /// READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
  /// It produces a chain and one i64 value. The only operand is a chain.
  /// If i64 is not legal, the result will be expanded into smaller values.
  /// Still, it returns an i64, so targets should set legality for i64.
  /// The result is the content of the architecture-specific cycle
  /// counter-like register (or other high accuracy low latency clock source).
  READCYCLECOUNTER,

  /// HANDLENODE node - Used as a handle for various purposes.
  HANDLENODE,

  /// INIT_TRAMPOLINE - This corresponds to the init_trampoline intrinsic.  It
  /// takes as input a token chain, the pointer to the trampoline, the pointer
  /// to the nested function, the pointer to pass for the 'nest' parameter, a
  /// SRCVALUE for the trampoline and another for the nested function
  /// (allowing targets to access the original Function*).
  /// It produces a token chain as output.
  INIT_TRAMPOLINE,

  /// ADJUST_TRAMPOLINE - This corresponds to the adjust_trampoline intrinsic.
  /// It takes a pointer to the trampoline and produces a (possibly) new
  /// pointer to the same trampoline with platform-specific adjustments
  /// applied.  The pointer it returns points to an executable block of code.
  ADJUST_TRAMPOLINE,

  /// TRAP - Trapping instruction
  TRAP,

  /// DEBUGTRAP - Trap intended to get the attention of a debugger.
  DEBUGTRAP,

  /// UBSANTRAP - Trap with an immediate describing the kind of sanitizer
  /// failure.
  UBSANTRAP,

  /// PREFETCH - This corresponds to a prefetch intrinsic. The first operand
  /// is the chain.  The other operands are the address to prefetch,
  /// read / write specifier, locality specifier and instruction / data cache
  /// specifier.
  PREFETCH,

  /// ARITH_FENCE - This corresponds to a arithmetic fence intrinsic. Both its
  /// operand and output are the same floating type.
  ARITH_FENCE,

  /// MEMBARRIER - Compiler barrier only; generate a no-op.
  MEMBARRIER,

  /// OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope)
  /// This corresponds to the fence instruction. It takes an input chain, and
  /// two integer constants: an AtomicOrdering and a SynchronizationScope.
  ATOMIC_FENCE,

  /// Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr)
  /// This corresponds to "load atomic" instruction.
  ATOMIC_LOAD,

  /// OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val)
  /// This corresponds to "store atomic" instruction.
  ATOMIC_STORE,

  /// Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap)
  /// For double-word atomic operations:
  /// ValLo, ValHi, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmpLo, cmpHi,
  ///                                          swapLo, swapHi)
  /// This corresponds to the cmpxchg instruction.
  ATOMIC_CMP_SWAP,

  /// Val, Success, OUTCHAIN
  ///     = ATOMIC_CMP_SWAP_WITH_SUCCESS(INCHAIN, ptr, cmp, swap)
  /// N.b. this is still a strong cmpxchg operation, so
  /// Success == "Val == cmp".
  ATOMIC_CMP_SWAP_WITH_SUCCESS,

  /// Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt)
  /// Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amt)
  /// For double-word atomic operations:
  /// ValLo, ValHi, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amtLo, amtHi)
  /// ValLo, ValHi, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amtLo, amtHi)
  /// These correspond to the atomicrmw instruction.
  ATOMIC_SWAP,
  ATOMIC_LOAD_ADD,
  ATOMIC_LOAD_SUB,
  ATOMIC_LOAD_AND,
  ATOMIC_LOAD_CLR,
  ATOMIC_LOAD_OR,
  ATOMIC_LOAD_XOR,
  ATOMIC_LOAD_NAND,
  ATOMIC_LOAD_MIN,
  ATOMIC_LOAD_MAX,
  ATOMIC_LOAD_UMIN,
  ATOMIC_LOAD_UMAX,
  ATOMIC_LOAD_FADD,
  ATOMIC_LOAD_FSUB,
  ATOMIC_LOAD_FMAX,
  ATOMIC_LOAD_FMIN,
  ATOMIC_LOAD_UINC_WRAP,
  ATOMIC_LOAD_UDEC_WRAP,

  // Masked load and store - consecutive vector load and store operations
  // with additional mask operand that prevents memory accesses to the
  // masked-off lanes.
  //
  // Val, OutChain = MLOAD(BasePtr, Mask, PassThru)
  // OutChain = MSTORE(Value, BasePtr, Mask)
  MLOAD,
  MSTORE,

  // Masked gather and scatter - load and store operations for a vector of
  // random addresses with additional mask operand that prevents memory
  // accesses to the masked-off lanes.
  //
  // Val, OutChain = GATHER(InChain, PassThru, Mask, BasePtr, Index, Scale)
  // OutChain = SCATTER(InChain, Value, Mask, BasePtr, Index, Scale)
  //
  // The Index operand can have more vector elements than the other operands
  // due to type legalization. The extra elements are ignored.
  MGATHER,
  MSCATTER,

  /// This corresponds to the llvm.lifetime.* intrinsics. The first operand
  /// is the chain and the second operand is the alloca pointer.
  LIFETIME_START,
  LIFETIME_END,

  /// GC_TRANSITION_START/GC_TRANSITION_END - These operators mark the
  /// beginning and end of GC transition  sequence, and carry arbitrary
  /// information that target might need for lowering.  The first operand is
  /// a chain, the rest are specified by the target and not touched by the DAG
  /// optimizers. GC_TRANSITION_START..GC_TRANSITION_END pairs may not be
  /// nested.
  GC_TRANSITION_START,
  GC_TRANSITION_END,

  /// GET_DYNAMIC_AREA_OFFSET - get offset from native SP to the address of
  /// the most recent dynamic alloca. For most targets that would be 0, but
  /// for some others (e.g. PowerPC, PowerPC64) that would be compile-time
  /// known nonzero constant. The only operand here is the chain.
  GET_DYNAMIC_AREA_OFFSET,

  /// Pseudo probe for AutoFDO, as a place holder in a basic block to improve
  /// the sample counts quality.
  PSEUDO_PROBE,

  /// VSCALE(IMM) - Returns the runtime scaling factor used to calculate the
  /// number of elements within a scalable vector. IMM is a constant integer
  /// multiplier that is applied to the runtime value.
  VSCALE,

  /// Generic reduction nodes. These nodes represent horizontal vector
  /// reduction operations, producing a scalar result.
  /// The SEQ variants perform reductions in sequential order. The first
  /// operand is an initial scalar accumulator value, and the second operand
  /// is the vector to reduce.
  /// E.g. RES = VECREDUCE_SEQ_FADD f32 ACC, <4 x f32> SRC_VEC
  ///  ... is equivalent to
  /// RES = (((ACC + SRC_VEC[0]) + SRC_VEC[1]) + SRC_VEC[2]) + SRC_VEC[3]
  VECREDUCE_SEQ_FADD,
  VECREDUCE_SEQ_FMUL,

  /// These reductions have relaxed evaluation order semantics, and have a
  /// single vector operand. The order of evaluation is unspecified. For
  /// pow-of-2 vectors, one valid legalizer expansion is to use a tree
  /// reduction, i.e.:
  /// For RES = VECREDUCE_FADD <8 x f16> SRC_VEC
  ///   PART_RDX = FADD SRC_VEC[0:3], SRC_VEC[4:7]
  ///   PART_RDX2 = FADD PART_RDX[0:1], PART_RDX[2:3]
  ///   RES = FADD PART_RDX2[0], PART_RDX2[1]
  /// For non-pow-2 vectors, this can be computed by extracting each element
  /// and performing the operation as if it were scalarized.
  VECREDUCE_FADD,
  VECREDUCE_FMUL,
  /// FMIN/FMAX nodes can have flags, for NaN/NoNaN variants.
  VECREDUCE_FMAX,
  VECREDUCE_FMIN,
  /// FMINIMUM/FMAXIMUM nodes propatate NaNs and signed zeroes using the
  /// llvm.minimum and llvm.maximum semantics.
  VECREDUCE_FMAXIMUM,
  VECREDUCE_FMINIMUM,
  /// Integer reductions may have a result type larger than the vector element
  /// type. However, the reduction is performed using the vector element type
  /// and the value in the top bits is unspecified.
  VECREDUCE_ADD,
  VECREDUCE_MUL,
  VECREDUCE_AND,
  VECREDUCE_OR,
  VECREDUCE_XOR,
  VECREDUCE_SMAX,
  VECREDUCE_SMIN,
  VECREDUCE_UMAX,
  VECREDUCE_UMIN,

  // The `llvm.experimental.stackmap` intrinsic.
  // Operands: input chain, glue, <id>, <numShadowBytes>, [live0[, live1...]]
  // Outputs: output chain, glue
  STACKMAP,

  // The `llvm.experimental.patchpoint.*` intrinsic.
  // Operands: input chain, [glue], reg-mask, <id>, <numShadowBytes>, callee,
  //   <numArgs>, cc, ...
  // Outputs: [rv], output chain, glue
  PATCHPOINT,

// Vector Predication
#define BEGIN_REGISTER_VP_SDNODE(VPSDID, ...) VPSDID,
#include "llvm/IR/VPIntrinsics.def"

  /// BUILTIN_OP_END - This must be the last enum value in this list.
  /// The target-specific pre-isel opcode values start here.
  BUILTIN_OP_END
};

/// FIRST_TARGET_STRICTFP_OPCODE - Target-specific pre-isel operations
/// which cannot raise FP exceptions should be less than this value.
/// Those that do must not be less than this value.
static const int FIRST_TARGET_STRICTFP_OPCODE = BUILTIN_OP_END + 400;

/// FIRST_TARGET_MEMORY_OPCODE - Target-specific pre-isel operations
/// which do not reference a specific memory location should be less than
/// this value. Those that do must not be less than this value, and can
/// be used with SelectionDAG::getMemIntrinsicNode.
static const int FIRST_TARGET_MEMORY_OPCODE = BUILTIN_OP_END + 500;

/// Whether this is bitwise logic opcode.
inline bool isBitwiseLogicOp(unsigned Opcode) {
  return Opcode == ISD::AND || Opcode == ISD::OR || Opcode == ISD::XOR;
}

/// Get underlying scalar opcode for VECREDUCE opcode.
/// For example ISD::AND for ISD::VECREDUCE_AND.
NodeType getVecReduceBaseOpcode(unsigned VecReduceOpcode);

/// Whether this is a vector-predicated Opcode.
bool isVPOpcode(unsigned Opcode);

/// Whether this is a vector-predicated binary operation opcode.
bool isVPBinaryOp(unsigned Opcode);

/// Whether this is a vector-predicated reduction opcode.
bool isVPReduction(unsigned Opcode);

/// The operand position of the vector mask.
std::optional<unsigned> getVPMaskIdx(unsigned Opcode);

/// The operand position of the explicit vector length parameter.
std::optional<unsigned> getVPExplicitVectorLengthIdx(unsigned Opcode);

/// Translate this VP Opcode to its corresponding non-VP Opcode.
std::optional<unsigned> getBaseOpcodeForVP(unsigned Opcode, bool hasFPExcept);

/// Translate this non-VP Opcode to its corresponding VP Opcode.
unsigned getVPForBaseOpcode(unsigned Opcode);

//===--------------------------------------------------------------------===//
/// MemIndexedMode enum - This enum defines the load / store indexed
/// addressing modes.
///
/// UNINDEXED    "Normal" load / store. The effective address is already
///              computed and is available in the base pointer. The offset
///              operand is always undefined. In addition to producing a
///              chain, an unindexed load produces one value (result of the
///              load); an unindexed store does not produce a value.
///
/// PRE_INC      Similar to the unindexed mode where the effective address is
/// PRE_DEC      the value of the base pointer add / subtract the offset.
///              It considers the computation as being folded into the load /
///              store operation (i.e. the load / store does the address
///              computation as well as performing the memory transaction).
///              The base operand is always undefined. In addition to
///              producing a chain, pre-indexed load produces two values
///              (result of the load and the result of the address
///              computation); a pre-indexed store produces one value (result
///              of the address computation).
///
/// POST_INC     The effective address is the value of the base pointer. The
/// POST_DEC     value of the offset operand is then added to / subtracted
///              from the base after memory transaction. In addition to
///              producing a chain, post-indexed load produces two values
///              (the result of the load and the result of the base +/- offset
///              computation); a post-indexed store produces one value (the
///              the result of the base +/- offset computation).
enum MemIndexedMode { UNINDEXED = 0, PRE_INC, PRE_DEC, POST_INC, POST_DEC };

static const int LAST_INDEXED_MODE = POST_DEC + 1;

//===--------------------------------------------------------------------===//
/// MemIndexType enum - This enum defines how to interpret MGATHER/SCATTER's
/// index parameter when calculating addresses.
///
/// SIGNED_SCALED     Addr = Base + ((signed)Index * Scale)
/// UNSIGNED_SCALED   Addr = Base + ((unsigned)Index * Scale)
///
/// NOTE: The value of Scale is typically only known to the node owning the
/// IndexType, with a value of 1 the equivalent of being unscaled.
enum MemIndexType { SIGNED_SCALED = 0, UNSIGNED_SCALED };

static const int LAST_MEM_INDEX_TYPE = UNSIGNED_SCALED + 1;

inline bool isIndexTypeSigned(MemIndexType IndexType) {
  return IndexType == SIGNED_SCALED;
}

//===--------------------------------------------------------------------===//
/// LoadExtType enum - This enum defines the three variants of LOADEXT
/// (load with extension).
///
/// SEXTLOAD loads the integer operand and sign extends it to a larger
///          integer result type.
/// ZEXTLOAD loads the integer operand and zero extends it to a larger
///          integer result type.
/// EXTLOAD  is used for two things: floating point extending loads and
///          integer extending loads [the top bits are undefined].
enum LoadExtType { NON_EXTLOAD = 0, EXTLOAD, SEXTLOAD, ZEXTLOAD };

static const int LAST_LOADEXT_TYPE = ZEXTLOAD + 1;

NodeType getExtForLoadExtType(bool IsFP, LoadExtType);

//===--------------------------------------------------------------------===//
/// ISD::CondCode enum - These are ordered carefully to make the bitfields
/// below work out, when considering SETFALSE (something that never exists
/// dynamically) as 0.  "U" -> Unsigned (for integer operands) or Unordered
/// (for floating point), "L" -> Less than, "G" -> Greater than, "E" -> Equal
/// to.  If the "N" column is 1, the result of the comparison is undefined if
/// the input is a NAN.
///
/// All of these (except for the 'always folded ops') should be handled for
/// floating point.  For integer, only the SETEQ,SETNE,SETLT,SETLE,SETGT,
/// SETGE,SETULT,SETULE,SETUGT, and SETUGE opcodes are used.
///
/// Note that these are laid out in a specific order to allow bit-twiddling
/// to transform conditions.
enum CondCode {
  // Opcode       N U L G E       Intuitive operation
  SETFALSE, //      0 0 0 0       Always false (always folded)
  SETOEQ,   //      0 0 0 1       True if ordered and equal
  SETOGT,   //      0 0 1 0       True if ordered and greater than
  SETOGE,   //      0 0 1 1       True if ordered and greater than or equal
  SETOLT,   //      0 1 0 0       True if ordered and less than
  SETOLE,   //      0 1 0 1       True if ordered and less than or equal
  SETONE,   //      0 1 1 0       True if ordered and operands are unequal
  SETO,     //      0 1 1 1       True if ordered (no nans)
  SETUO,    //      1 0 0 0       True if unordered: isnan(X) | isnan(Y)
  SETUEQ,   //      1 0 0 1       True if unordered or equal
  SETUGT,   //      1 0 1 0       True if unordered or greater than
  SETUGE,   //      1 0 1 1       True if unordered, greater than, or equal
  SETULT,   //      1 1 0 0       True if unordered or less than
  SETULE,   //      1 1 0 1       True if unordered, less than, or equal
  SETUNE,   //      1 1 1 0       True if unordered or not equal
  SETTRUE,  //      1 1 1 1       Always true (always folded)
  // Don't care operations: undefined if the input is a nan.
  SETFALSE2, //   1 X 0 0 0       Always false (always folded)
  SETEQ,     //   1 X 0 0 1       True if equal
  SETGT,     //   1 X 0 1 0       True if greater than
  SETGE,     //   1 X 0 1 1       True if greater than or equal
  SETLT,     //   1 X 1 0 0       True if less than
  SETLE,     //   1 X 1 0 1       True if less than or equal
  SETNE,     //   1 X 1 1 0       True if not equal
  SETTRUE2,  //   1 X 1 1 1       Always true (always folded)

  SETCC_INVALID // Marker value.
};

/// Return true if this is a setcc instruction that performs a signed
/// comparison when used with integer operands.
inline bool isSignedIntSetCC(CondCode Code) {
  return Code == SETGT || Code == SETGE || Code == SETLT || Code == SETLE;
}

/// Return true if this is a setcc instruction that performs an unsigned
/// comparison when used with integer operands.
inline bool isUnsignedIntSetCC(CondCode Code) {
  return Code == SETUGT || Code == SETUGE || Code == SETULT || Code == SETULE;
}

/// Return true if this is a setcc instruction that performs an equality
/// comparison when used with integer operands.
inline bool isIntEqualitySetCC(CondCode Code) {
  return Code == SETEQ || Code == SETNE;
}

/// Return true if the specified condition returns true if the two operands to
/// the condition are equal. Note that if one of the two operands is a NaN,
/// this value is meaningless.
inline bool isTrueWhenEqual(CondCode Cond) { return ((int)Cond & 1) != 0; }

/// This function returns 0 if the condition is always false if an operand is
/// a NaN, 1 if the condition is always true if the operand is a NaN, and 2 if
/// the condition is undefined if the operand is a NaN.
inline unsigned getUnorderedFlavor(CondCode Cond) {
  return ((int)Cond >> 3) & 3;
}

/// Return the operation corresponding to !(X op Y), where 'op' is a valid
/// SetCC operation.
CondCode getSetCCInverse(CondCode Operation, EVT Type);

inline bool isExtOpcode(unsigned Opcode) {
  return Opcode == ISD::ANY_EXTEND || Opcode == ISD::ZERO_EXTEND ||
         Opcode == ISD::SIGN_EXTEND;
}

inline bool isExtVecInRegOpcode(unsigned Opcode) {
  return Opcode == ISD::ANY_EXTEND_VECTOR_INREG ||
         Opcode == ISD::ZERO_EXTEND_VECTOR_INREG ||
         Opcode == ISD::SIGN_EXTEND_VECTOR_INREG;
}

namespace GlobalISel {
/// Return the operation corresponding to !(X op Y), where 'op' is a valid
/// SetCC operation. The U bit of the condition code has different meanings
/// between floating point and integer comparisons and LLT's don't provide
/// this distinction. As such we need to be told whether the comparison is
/// floating point or integer-like. Pointers should use integer-like
/// comparisons.
CondCode getSetCCInverse(CondCode Operation, bool isIntegerLike);
} // end namespace GlobalISel

/// Return the operation corresponding to (Y op X) when given the operation
/// for (X op Y).
CondCode getSetCCSwappedOperands(CondCode Operation);

/// Return the result of a logical OR between different comparisons of
/// identical values: ((X op1 Y) | (X op2 Y)). This function returns
/// SETCC_INVALID if it is not possible to represent the resultant comparison.
CondCode getSetCCOrOperation(CondCode Op1, CondCode Op2, EVT Type);

/// Return the result of a logical AND between different comparisons of
/// identical values: ((X op1 Y) & (X op2 Y)). This function returns
/// SETCC_INVALID if it is not possible to represent the resultant comparison.
CondCode getSetCCAndOperation(CondCode Op1, CondCode Op2, EVT Type);

} // namespace ISD

} // namespace llvm

#endif
PKiwFZT�ӡ//CodeGen/LowLevelTypeUtils.hnu�[���//== llvm/CodeGen/LowLevelTypeUtils.h -------------------------- -*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// Implement a low-level type suitable for MachineInstr level instruction
/// selection.
///
/// This provides the CodeGen aspects of LowLevelType, such as Type conversion.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_LOWLEVELTYPEUTILS_H
#define LLVM_CODEGEN_LOWLEVELTYPEUTILS_H

#include "llvm/CodeGen/LowLevelType.h"
#include "llvm/CodeGen/ValueTypes.h"

namespace llvm {

class DataLayout;
class Type;
struct fltSemantics;

/// Construct a low-level type based on an LLVM type.
LLT getLLTForType(Type &Ty, const DataLayout &DL);

/// Get a rough equivalent of an MVT for a given LLT. MVT can't distinguish
/// pointers, so these will convert to a plain integer.
MVT getMVTForLLT(LLT Ty);
EVT getApproximateEVTForLLT(LLT Ty, const DataLayout &DL, LLVMContext &Ctx);

/// Get a rough equivalent of an LLT for a given MVT. LLT does not yet support
/// scalarable vector types, and will assert if used.
LLT getLLTForMVT(MVT Ty);

/// Get the appropriate floating point arithmetic semantic based on the bit size
/// of the given scalar LLT.
const llvm::fltSemantics &getFltSemanticForLLT(LLT Ty);
}

#endif // LLVM_CODEGEN_LOWLEVELTYPEUTILS_H
PKiwFZ:t�C�F�FCodeGen/MachineInstr.hnu�[���//===- llvm/CodeGen/MachineInstr.h - MachineInstr class ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the declaration of the MachineInstr class, which is the
// basic representation for all target dependent machine instructions used by
// the back end.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_MACHINEINSTR_H
#define LLVM_CODEGEN_MACHINEINSTR_H

#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/ADT/PointerSumType.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/ilist.h"
#include "llvm/ADT/ilist_node.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/TargetOpcodes.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/MC/MCInstrDesc.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Support/ArrayRecycler.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/TrailingObjects.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <utility>

namespace llvm {

class DILabel;
class Instruction;
class MDNode;
class AAResults;
template <typename T> class ArrayRef;
class DIExpression;
class DILocalVariable;
class MachineBasicBlock;
class MachineFunction;
class MachineRegisterInfo;
class ModuleSlotTracker;
class raw_ostream;
template <typename T> class SmallVectorImpl;
class SmallBitVector;
class StringRef;
class TargetInstrInfo;
class TargetRegisterClass;
class TargetRegisterInfo;

//===----------------------------------------------------------------------===//
/// Representation of each machine instruction.
///
/// This class isn't a POD type, but it must have a trivial destructor. When a
/// MachineFunction is deleted, all the contained MachineInstrs are deallocated
/// without having their destructor called.
///
class MachineInstr
    : public ilist_node_with_parent<MachineInstr, MachineBasicBlock,
                                    ilist_sentinel_tracking<true>> {
public:
  using mmo_iterator = ArrayRef<MachineMemOperand *>::iterator;

  /// Flags to specify different kinds of comments to output in
  /// assembly code.  These flags carry semantic information not
  /// otherwise easily derivable from the IR text.
  ///
  enum CommentFlag {
    ReloadReuse = 0x1,    // higher bits are reserved for target dep comments.
    NoSchedComment = 0x2,
    TAsmComments = 0x4    // Target Asm comments should start from this value.
  };

  enum MIFlag {
    NoFlags = 0,
    FrameSetup = 1 << 0,     // Instruction is used as a part of
                             // function frame setup code.
    FrameDestroy = 1 << 1,   // Instruction is used as a part of
                             // function frame destruction code.
    BundledPred = 1 << 2,    // Instruction has bundled predecessors.
    BundledSucc = 1 << 3,    // Instruction has bundled successors.
    FmNoNans = 1 << 4,       // Instruction does not support Fast
                             // math nan values.
    FmNoInfs = 1 << 5,       // Instruction does not support Fast
                             // math infinity values.
    FmNsz = 1 << 6,          // Instruction is not required to retain
                             // signed zero values.
    FmArcp = 1 << 7,         // Instruction supports Fast math
                             // reciprocal approximations.
    FmContract = 1 << 8,     // Instruction supports Fast math
                             // contraction operations like fma.
    FmAfn = 1 << 9,          // Instruction may map to Fast math
                             // intrinsic approximation.
    FmReassoc = 1 << 10,     // Instruction supports Fast math
                             // reassociation of operand order.
    NoUWrap = 1 << 11,       // Instruction supports binary operator
                             // no unsigned wrap.
    NoSWrap = 1 << 12,       // Instruction supports binary operator
                             // no signed wrap.
    IsExact = 1 << 13,       // Instruction supports division is
                             // known to be exact.
    NoFPExcept = 1 << 14,    // Instruction does not raise
                             // floatint-point exceptions.
    NoMerge = 1 << 15,       // Passes that drop source location info
                             // (e.g. branch folding) should skip
                             // this instruction.
    Unpredictable = 1 << 16, // Instruction with unpredictable condition.
  };

private:
  const MCInstrDesc *MCID;              // Instruction descriptor.
  MachineBasicBlock *Parent = nullptr;  // Pointer to the owning basic block.

  // Operands are allocated by an ArrayRecycler.
  MachineOperand *Operands = nullptr;   // Pointer to the first operand.

#define LLVM_MI_NUMOPERANDS_BITS 24
#define LLVM_MI_FLAGS_BITS 24
#define LLVM_MI_ASMPRINTERFLAGS_BITS 8

  /// Number of operands on instruction.
  uint32_t NumOperands : LLVM_MI_NUMOPERANDS_BITS;

  // OperandCapacity has uint8_t size, so it should be next to NumOperands
  // to properly pack.
  using OperandCapacity = ArrayRecycler<MachineOperand>::Capacity;
  OperandCapacity CapOperands;          // Capacity of the Operands array.

  /// Various bits of additional information about the machine instruction.
  uint32_t Flags : LLVM_MI_FLAGS_BITS;

  /// Various bits of information used by the AsmPrinter to emit helpful
  /// comments.  This is *not* semantic information.  Do not use this for
  /// anything other than to convey comment information to AsmPrinter.
  uint8_t AsmPrinterFlags : LLVM_MI_ASMPRINTERFLAGS_BITS;

  /// Internal implementation detail class that provides out-of-line storage for
  /// extra info used by the machine instruction when this info cannot be stored
  /// in-line within the instruction itself.
  ///
  /// This has to be defined eagerly due to the implementation constraints of
  /// `PointerSumType` where it is used.
  class ExtraInfo final : TrailingObjects<ExtraInfo, MachineMemOperand *,
                                          MCSymbol *, MDNode *, uint32_t> {
  public:
    static ExtraInfo *create(BumpPtrAllocator &Allocator,
                             ArrayRef<MachineMemOperand *> MMOs,
                             MCSymbol *PreInstrSymbol = nullptr,
                             MCSymbol *PostInstrSymbol = nullptr,
                             MDNode *HeapAllocMarker = nullptr,
                             MDNode *PCSections = nullptr,
                             uint32_t CFIType = 0) {
      bool HasPreInstrSymbol = PreInstrSymbol != nullptr;
      bool HasPostInstrSymbol = PostInstrSymbol != nullptr;
      bool HasHeapAllocMarker = HeapAllocMarker != nullptr;
      bool HasCFIType = CFIType != 0;
      bool HasPCSections = PCSections != nullptr;
      auto *Result = new (Allocator.Allocate(
          totalSizeToAlloc<MachineMemOperand *, MCSymbol *, MDNode *, uint32_t>(
              MMOs.size(), HasPreInstrSymbol + HasPostInstrSymbol,
              HasHeapAllocMarker + HasPCSections, HasCFIType),
          alignof(ExtraInfo)))
          ExtraInfo(MMOs.size(), HasPreInstrSymbol, HasPostInstrSymbol,
                    HasHeapAllocMarker, HasPCSections, HasCFIType);

      // Copy the actual data into the trailing objects.
      std::copy(MMOs.begin(), MMOs.end(),
                Result->getTrailingObjects<MachineMemOperand *>());

      if (HasPreInstrSymbol)
        Result->getTrailingObjects<MCSymbol *>()[0] = PreInstrSymbol;
      if (HasPostInstrSymbol)
        Result->getTrailingObjects<MCSymbol *>()[HasPreInstrSymbol] =
            PostInstrSymbol;
      if (HasHeapAllocMarker)
        Result->getTrailingObjects<MDNode *>()[0] = HeapAllocMarker;
      if (HasPCSections)
        Result->getTrailingObjects<MDNode *>()[HasHeapAllocMarker] =
            PCSections;
      if (HasCFIType)
        Result->getTrailingObjects<uint32_t>()[0] = CFIType;

      return Result;
    }

    ArrayRef<MachineMemOperand *> getMMOs() const {
      return ArrayRef(getTrailingObjects<MachineMemOperand *>(), NumMMOs);
    }

    MCSymbol *getPreInstrSymbol() const {
      return HasPreInstrSymbol ? getTrailingObjects<MCSymbol *>()[0] : nullptr;
    }

    MCSymbol *getPostInstrSymbol() const {
      return HasPostInstrSymbol
                 ? getTrailingObjects<MCSymbol *>()[HasPreInstrSymbol]
                 : nullptr;
    }

    MDNode *getHeapAllocMarker() const {
      return HasHeapAllocMarker ? getTrailingObjects<MDNode *>()[0] : nullptr;
    }

    MDNode *getPCSections() const {
      return HasPCSections
                 ? getTrailingObjects<MDNode *>()[HasHeapAllocMarker]
                 : nullptr;
    }

    uint32_t getCFIType() const {
      return HasCFIType ? getTrailingObjects<uint32_t>()[0] : 0;
    }

  private:
    friend TrailingObjects;

    // Description of the extra info, used to interpret the actual optional
    // data appended.
    //
    // Note that this is not terribly space optimized. This leaves a great deal
    // of flexibility to fit more in here later.
    const int NumMMOs;
    const bool HasPreInstrSymbol;
    const bool HasPostInstrSymbol;
    const bool HasHeapAllocMarker;
    const bool HasPCSections;
    const bool HasCFIType;

    // Implement the `TrailingObjects` internal API.
    size_t numTrailingObjects(OverloadToken<MachineMemOperand *>) const {
      return NumMMOs;
    }
    size_t numTrailingObjects(OverloadToken<MCSymbol *>) const {
      return HasPreInstrSymbol + HasPostInstrSymbol;
    }
    size_t numTrailingObjects(OverloadToken<MDNode *>) const {
      return HasHeapAllocMarker + HasPCSections;
    }
    size_t numTrailingObjects(OverloadToken<uint32_t>) const {
      return HasCFIType;
    }

    // Just a boring constructor to allow us to initialize the sizes. Always use
    // the `create` routine above.
    ExtraInfo(int NumMMOs, bool HasPreInstrSymbol, bool HasPostInstrSymbol,
              bool HasHeapAllocMarker, bool HasPCSections, bool HasCFIType)
        : NumMMOs(NumMMOs), HasPreInstrSymbol(HasPreInstrSymbol),
          HasPostInstrSymbol(HasPostInstrSymbol),
          HasHeapAllocMarker(HasHeapAllocMarker), HasPCSections(HasPCSections),
          HasCFIType(HasCFIType) {}
  };

  /// Enumeration of the kinds of inline extra info available. It is important
  /// that the `MachineMemOperand` inline kind has a tag value of zero to make
  /// it accessible as an `ArrayRef`.
  enum ExtraInfoInlineKinds {
    EIIK_MMO = 0,
    EIIK_PreInstrSymbol,
    EIIK_PostInstrSymbol,
    EIIK_OutOfLine
  };

  // We store extra information about the instruction here. The common case is
  // expected to be nothing or a single pointer (typically a MMO or a symbol).
  // We work to optimize this common case by storing it inline here rather than
  // requiring a separate allocation, but we fall back to an allocation when
  // multiple pointers are needed.
  PointerSumType<ExtraInfoInlineKinds,
                 PointerSumTypeMember<EIIK_MMO, MachineMemOperand *>,
                 PointerSumTypeMember<EIIK_PreInstrSymbol, MCSymbol *>,
                 PointerSumTypeMember<EIIK_PostInstrSymbol, MCSymbol *>,
                 PointerSumTypeMember<EIIK_OutOfLine, ExtraInfo *>>
      Info;

  DebugLoc DbgLoc; // Source line information.

  /// Unique instruction number. Used by DBG_INSTR_REFs to refer to the values
  /// defined by this instruction.
  unsigned DebugInstrNum;

  // Intrusive list support
  friend struct ilist_traits<MachineInstr>;
  friend struct ilist_callback_traits<MachineBasicBlock>;
  void setParent(MachineBasicBlock *P) { Parent = P; }

  /// This constructor creates a copy of the given
  /// MachineInstr in the given MachineFunction.
  MachineInstr(MachineFunction &, const MachineInstr &);

  /// This constructor create a MachineInstr and add the implicit operands.
  /// It reserves space for number of operands specified by
  /// MCInstrDesc.  An explicit DebugLoc is supplied.
  MachineInstr(MachineFunction &, const MCInstrDesc &TID, DebugLoc DL,
               bool NoImp = false);

  // MachineInstrs are pool-allocated and owned by MachineFunction.
  friend class MachineFunction;

  void
  dumprImpl(const MachineRegisterInfo &MRI, unsigned Depth, unsigned MaxDepth,
            SmallPtrSetImpl<const MachineInstr *> &AlreadySeenInstrs) const;

  static bool opIsRegDef(const MachineOperand &Op) {
    return Op.isReg() && Op.isDef();
  }

  static bool opIsRegUse(const MachineOperand &Op) {
    return Op.isReg() && Op.isUse();
  }

public:
  MachineInstr(const MachineInstr &) = delete;
  MachineInstr &operator=(const MachineInstr &) = delete;
  // Use MachineFunction::DeleteMachineInstr() instead.
  ~MachineInstr() = delete;

  const MachineBasicBlock* getParent() const { return Parent; }
  MachineBasicBlock* getParent() { return Parent; }

  /// Move the instruction before \p MovePos.
  void moveBefore(MachineInstr *MovePos);

  /// Return the function that contains the basic block that this instruction
  /// belongs to.
  ///
  /// Note: this is undefined behaviour if the instruction does not have a
  /// parent.
  const MachineFunction *getMF() const;
  MachineFunction *getMF() {
    return const_cast<MachineFunction *>(
        static_cast<const MachineInstr *>(this)->getMF());
  }

  /// Return the asm printer flags bitvector.
  uint8_t getAsmPrinterFlags() const { return AsmPrinterFlags; }

  /// Clear the AsmPrinter bitvector.
  void clearAsmPrinterFlags() { AsmPrinterFlags = 0; }

  /// Return whether an AsmPrinter flag is set.
  bool getAsmPrinterFlag(CommentFlag Flag) const {
    assert(isUInt<LLVM_MI_ASMPRINTERFLAGS_BITS>(unsigned(Flag)) &&
           "Flag is out of range for the AsmPrinterFlags field");
    return AsmPrinterFlags & Flag;
  }

  /// Set a flag for the AsmPrinter.
  void setAsmPrinterFlag(uint8_t Flag) {
    assert(isUInt<LLVM_MI_ASMPRINTERFLAGS_BITS>(unsigned(Flag)) &&
           "Flag is out of range for the AsmPrinterFlags field");
    AsmPrinterFlags |= Flag;
  }

  /// Clear specific AsmPrinter flags.
  void clearAsmPrinterFlag(CommentFlag Flag) {
    assert(isUInt<LLVM_MI_ASMPRINTERFLAGS_BITS>(unsigned(Flag)) &&
           "Flag is out of range for the AsmPrinterFlags field");
    AsmPrinterFlags &= ~Flag;
  }

  /// Return the MI flags bitvector.
  uint32_t getFlags() const {
    return Flags;
  }

  /// Return whether an MI flag is set.
  bool getFlag(MIFlag Flag) const {
    assert(isUInt<LLVM_MI_FLAGS_BITS>(unsigned(Flag)) &&
           "Flag is out of range for the Flags field");
    return Flags & Flag;
  }

  /// Set a MI flag.
  void setFlag(MIFlag Flag) {
    assert(isUInt<LLVM_MI_FLAGS_BITS>(unsigned(Flag)) &&
           "Flag is out of range for the Flags field");
    Flags |= (uint32_t)Flag;
  }

  void setFlags(unsigned flags) {
    assert(isUInt<LLVM_MI_FLAGS_BITS>(flags) &&
           "flags to be set are out of range for the Flags field");
    // Filter out the automatically maintained flags.
    unsigned Mask = BundledPred | BundledSucc;
    Flags = (Flags & Mask) | (flags & ~Mask);
  }

  /// clearFlag - Clear a MI flag.
  void clearFlag(MIFlag Flag) {
    assert(isUInt<LLVM_MI_FLAGS_BITS>(unsigned(Flag)) &&
           "Flag to clear is out of range for the Flags field");
    Flags &= ~((uint32_t)Flag);
  }

  /// Return true if MI is in a bundle (but not the first MI in a bundle).
  ///
  /// A bundle looks like this before it's finalized:
  ///   ----------------
  ///   |      MI      |
  ///   ----------------
  ///          |
  ///   ----------------
  ///   |      MI    * |
  ///   ----------------
  ///          |
  ///   ----------------
  ///   |      MI    * |
  ///   ----------------
  /// In this case, the first MI starts a bundle but is not inside a bundle, the
  /// next 2 MIs are considered "inside" the bundle.
  ///
  /// After a bundle is finalized, it looks like this:
  ///   ----------------
  ///   |    Bundle    |
  ///   ----------------
  ///          |
  ///   ----------------
  ///   |      MI    * |
  ///   ----------------
  ///          |
  ///   ----------------
  ///   |      MI    * |
  ///   ----------------
  ///          |
  ///   ----------------
  ///   |      MI    * |
  ///   ----------------
  /// The first instruction has the special opcode "BUNDLE". It's not "inside"
  /// a bundle, but the next three MIs are.
  bool isInsideBundle() const {
    return getFlag(BundledPred);
  }

  /// Return true if this instruction part of a bundle. This is true
  /// if either itself or its following instruction is marked "InsideBundle".
  bool isBundled() const {
    return isBundledWithPred() || isBundledWithSucc();
  }

  /// Return true if this instruction is part of a bundle, and it is not the
  /// first instruction in the bundle.
  bool isBundledWithPred() const { return getFlag(BundledPred); }

  /// Return true if this instruction is part of a bundle, and it is not the
  /// last instruction in the bundle.
  bool isBundledWithSucc() const { return getFlag(BundledSucc); }

  /// Bundle this instruction with its predecessor. This can be an unbundled
  /// instruction, or it can be the first instruction in a bundle.
  void bundleWithPred();

  /// Bundle this instruction with its successor. This can be an unbundled
  /// instruction, or it can be the last instruction in a bundle.
  void bundleWithSucc();

  /// Break bundle above this instruction.
  void unbundleFromPred();

  /// Break bundle below this instruction.
  void unbundleFromSucc();

  /// Returns the debug location id of this MachineInstr.
  const DebugLoc &getDebugLoc() const { return DbgLoc; }

  /// Return the operand containing the offset to be used if this DBG_VALUE
  /// instruction is indirect; will be an invalid register if this value is
  /// not indirect, and an immediate with value 0 otherwise.
  const MachineOperand &getDebugOffset() const {
    assert(isNonListDebugValue() && "not a DBG_VALUE");
    return getOperand(1);
  }
  MachineOperand &getDebugOffset() {
    assert(isNonListDebugValue() && "not a DBG_VALUE");
    return getOperand(1);
  }

  /// Return the operand for the debug variable referenced by
  /// this DBG_VALUE instruction.
  const MachineOperand &getDebugVariableOp() const;
  MachineOperand &getDebugVariableOp();

  /// Return the debug variable referenced by
  /// this DBG_VALUE instruction.
  const DILocalVariable *getDebugVariable() const;

  /// Return the operand for the complex address expression referenced by
  /// this DBG_VALUE instruction.
  const MachineOperand &getDebugExpressionOp() const;
  MachineOperand &getDebugExpressionOp();

  /// Return the complex address expression referenced by
  /// this DBG_VALUE instruction.
  const DIExpression *getDebugExpression() const;

  /// Return the debug label referenced by
  /// this DBG_LABEL instruction.
  const DILabel *getDebugLabel() const;

  /// Fetch the instruction number of this MachineInstr. If it does not have
  /// one already, a new and unique number will be assigned.
  unsigned getDebugInstrNum();

  /// Fetch instruction number of this MachineInstr -- but before it's inserted
  /// into \p MF. Needed for transformations that create an instruction but
  /// don't immediately insert them.
  unsigned getDebugInstrNum(MachineFunction &MF);

  /// Examine the instruction number of this MachineInstr. May be zero if
  /// it hasn't been assigned a number yet.
  unsigned peekDebugInstrNum() const { return DebugInstrNum; }

  /// Set instruction number of this MachineInstr. Avoid using unless you're
  /// deserializing this information.
  void setDebugInstrNum(unsigned Num) { DebugInstrNum = Num; }

  /// Drop any variable location debugging information associated with this
  /// instruction. Use when an instruction is modified in such a way that it no
  /// longer defines the value it used to. Variable locations using that value
  /// will be dropped.
  void dropDebugNumber() { DebugInstrNum = 0; }

  /// Emit an error referring to the source location of this instruction.
  /// This should only be used for inline assembly that is somehow
  /// impossible to compile. Other errors should have been handled much
  /// earlier.
  ///
  /// If this method returns, the caller should try to recover from the error.
  void emitError(StringRef Msg) const;

  /// Returns the target instruction descriptor of this MachineInstr.
  const MCInstrDesc &getDesc() const { return *MCID; }

  /// Returns the opcode of this MachineInstr.
  unsigned getOpcode() const { return MCID->Opcode; }

  /// Retuns the total number of operands.
  unsigned getNumOperands() const { return NumOperands; }

  /// Returns the total number of operands which are debug locations.
  unsigned getNumDebugOperands() const {
    return std::distance(debug_operands().begin(), debug_operands().end());
  }

  const MachineOperand& getOperand(unsigned i) const {
    assert(i < getNumOperands() && "getOperand() out of range!");
    return Operands[i];
  }
  MachineOperand& getOperand(unsigned i) {
    assert(i < getNumOperands() && "getOperand() out of range!");
    return Operands[i];
  }

  MachineOperand &getDebugOperand(unsigned Index) {
    assert(Index < getNumDebugOperands() && "getDebugOperand() out of range!");
    return *(debug_operands().begin() + Index);
  }
  const MachineOperand &getDebugOperand(unsigned Index) const {
    assert(Index < getNumDebugOperands() && "getDebugOperand() out of range!");
    return *(debug_operands().begin() + Index);
  }

  SmallSet<Register, 4> getUsedDebugRegs() const {
    assert(isDebugValue() && "not a DBG_VALUE*");
    SmallSet<Register, 4> UsedRegs;
    for (const auto &MO : debug_operands())
      if (MO.isReg() && MO.getReg())
        UsedRegs.insert(MO.getReg());
    return UsedRegs;
  }

  /// Returns whether this debug value has at least one debug operand with the
  /// register \p Reg.
  bool hasDebugOperandForReg(Register Reg) const {
    return any_of(debug_operands(), [Reg](const MachineOperand &Op) {
      return Op.isReg() && Op.getReg() == Reg;
    });
  }

  /// Returns a range of all of the operands that correspond to a debug use of
  /// \p Reg.
  template <typename Operand, typename Instruction>
  static iterator_range<
      filter_iterator<Operand *, std::function<bool(Operand &Op)>>>
  getDebugOperandsForReg(Instruction *MI, Register Reg) {
    std::function<bool(Operand & Op)> OpUsesReg(
        [Reg](Operand &Op) { return Op.isReg() && Op.getReg() == Reg; });
    return make_filter_range(MI->debug_operands(), OpUsesReg);
  }
  iterator_range<filter_iterator<const MachineOperand *,
                                 std::function<bool(const MachineOperand &Op)>>>
  getDebugOperandsForReg(Register Reg) const {
    return MachineInstr::getDebugOperandsForReg<const MachineOperand,
                                                const MachineInstr>(this, Reg);
  }
  iterator_range<filter_iterator<MachineOperand *,
                                 std::function<bool(MachineOperand &Op)>>>
  getDebugOperandsForReg(Register Reg) {
    return MachineInstr::getDebugOperandsForReg<MachineOperand, MachineInstr>(
        this, Reg);
  }

  bool isDebugOperand(const MachineOperand *Op) const {
    return Op >= adl_begin(debug_operands()) && Op <= adl_end(debug_operands());
  }

  unsigned getDebugOperandIndex(const MachineOperand *Op) const {
    assert(isDebugOperand(Op) && "Expected a debug operand.");
    return std::distance(adl_begin(debug_operands()), Op);
  }

  /// Returns the total number of definitions.
  unsigned getNumDefs() const {
    return getNumExplicitDefs() + MCID->implicit_defs().size();
  }

  /// Returns true if the instruction has implicit definition.
  bool hasImplicitDef() const {
    for (const MachineOperand &MO : implicit_operands())
      if (MO.isDef() && MO.isImplicit())
        return true;
    return false;
  }

  /// Returns the implicit operands number.
  unsigned getNumImplicitOperands() const {
    return getNumOperands() - getNumExplicitOperands();
  }

  /// Return true if operand \p OpIdx is a subregister index.
  bool isOperandSubregIdx(unsigned OpIdx) const {
    assert(getOperand(OpIdx).isImm() && "Expected MO_Immediate operand type.");
    if (isExtractSubreg() && OpIdx == 2)
      return true;
    if (isInsertSubreg() && OpIdx == 3)
      return true;
    if (isRegSequence() && OpIdx > 1 && (OpIdx % 2) == 0)
      return true;
    if (isSubregToReg() && OpIdx == 3)
      return true;
    return false;
  }

  /// Returns the number of non-implicit operands.
  unsigned getNumExplicitOperands() const;

  /// Returns the number of non-implicit definitions.
  unsigned getNumExplicitDefs() const;

  /// iterator/begin/end - Iterate over all operands of a machine instruction.
  using mop_iterator = MachineOperand *;
  using const_mop_iterator = const MachineOperand *;

  mop_iterator operands_begin() { return Operands; }
  mop_iterator operands_end() { return Operands + NumOperands; }

  const_mop_iterator operands_begin() const { return Operands; }
  const_mop_iterator operands_end() const { return Operands + NumOperands; }

  iterator_range<mop_iterator> operands() {
    return make_range(operands_begin(), operands_end());
  }
  iterator_range<const_mop_iterator> operands() const {
    return make_range(operands_begin(), operands_end());
  }
  iterator_range<mop_iterator> explicit_operands() {
    return make_range(operands_begin(),
                      operands_begin() + getNumExplicitOperands());
  }
  iterator_range<const_mop_iterator> explicit_operands() const {
    return make_range(operands_begin(),
                      operands_begin() + getNumExplicitOperands());
  }
  iterator_range<mop_iterator> implicit_operands() {
    return make_range(explicit_operands().end(), operands_end());
  }
  iterator_range<const_mop_iterator> implicit_operands() const {
    return make_range(explicit_operands().end(), operands_end());
  }
  /// Returns a range over all operands that are used to determine the variable
  /// location for this DBG_VALUE instruction.
  iterator_range<mop_iterator> debug_operands() {
    assert((isDebugValueLike()) && "Must be a debug value instruction.");
    return isNonListDebugValue()
               ? make_range(operands_begin(), operands_begin() + 1)
               : make_range(operands_begin() + 2, operands_end());
  }
  /// \copydoc debug_operands()
  iterator_range<const_mop_iterator> debug_operands() const {
    assert((isDebugValueLike()) && "Must be a debug value instruction.");
    return isNonListDebugValue()
               ? make_range(operands_begin(), operands_begin() + 1)
               : make_range(operands_begin() + 2, operands_end());
  }
  /// Returns a range over all explicit operands that are register definitions.
  /// Implicit definition are not included!
  iterator_range<mop_iterator> defs() {
    return make_range(operands_begin(),
                      operands_begin() + getNumExplicitDefs());
  }
  /// \copydoc defs()
  iterator_range<const_mop_iterator> defs() const {
    return make_range(operands_begin(),
                      operands_begin() + getNumExplicitDefs());
  }
  /// Returns a range that includes all operands that are register uses.
  /// This may include unrelated operands which are not register uses.
  iterator_range<mop_iterator> uses() {
    return make_range(operands_begin() + getNumExplicitDefs(), operands_end());
  }
  /// \copydoc uses()
  iterator_range<const_mop_iterator> uses() const {
    return make_range(operands_begin() + getNumExplicitDefs(), operands_end());
  }
  iterator_range<mop_iterator> explicit_uses() {
    return make_range(operands_begin() + getNumExplicitDefs(),
                      operands_begin() + getNumExplicitOperands());
  }
  iterator_range<const_mop_iterator> explicit_uses() const {
    return make_range(operands_begin() + getNumExplicitDefs(),
                      operands_begin() + getNumExplicitOperands());
  }

  using filtered_mop_iterator =
      filter_iterator<mop_iterator, bool (*)(const MachineOperand &)>;
  using filtered_const_mop_iterator =
      filter_iterator<const_mop_iterator, bool (*)(const MachineOperand &)>;

  /// Returns an iterator range over all operands that are (explicit or
  /// implicit) register defs.
  iterator_range<filtered_mop_iterator> all_defs() {
    return make_filter_range(operands(), opIsRegDef);
  }
  /// \copydoc all_defs()
  iterator_range<filtered_const_mop_iterator> all_defs() const {
    return make_filter_range(operands(), opIsRegDef);
  }

  /// Returns an iterator range over all operands that are (explicit or
  /// implicit) register uses.
  iterator_range<filtered_mop_iterator> all_uses() {
    return make_filter_range(uses(), opIsRegUse);
  }
  /// \copydoc all_uses()
  iterator_range<filtered_const_mop_iterator> all_uses() const {
    return make_filter_range(uses(), opIsRegUse);
  }

  /// Returns the number of the operand iterator \p I points to.
  unsigned getOperandNo(const_mop_iterator I) const {
    return I - operands_begin();
  }

  /// Access to memory operands of the instruction. If there are none, that does
  /// not imply anything about whether the function accesses memory. Instead,
  /// the caller must behave conservatively.
  ArrayRef<MachineMemOperand *> memoperands() const {
    if (!Info)
      return {};

    if (Info.is<EIIK_MMO>())
      return ArrayRef(Info.getAddrOfZeroTagPointer(), 1);

    if (ExtraInfo *EI = Info.get<EIIK_OutOfLine>())
      return EI->getMMOs();

    return {};
  }

  /// Access to memory operands of the instruction.
  ///
  /// If `memoperands_begin() == memoperands_end()`, that does not imply
  /// anything about whether the function accesses memory. Instead, the caller
  /// must behave conservatively.
  mmo_iterator memoperands_begin() const { return memoperands().begin(); }

  /// Access to memory operands of the instruction.
  ///
  /// If `memoperands_begin() == memoperands_end()`, that does not imply
  /// anything about whether the function accesses memory. Instead, the caller
  /// must behave conservatively.
  mmo_iterator memoperands_end() const { return memoperands().end(); }

  /// Return true if we don't have any memory operands which described the
  /// memory access done by this instruction.  If this is true, calling code
  /// must be conservative.
  bool memoperands_empty() const { return memoperands().empty(); }

  /// Return true if this instruction has exactly one MachineMemOperand.
  bool hasOneMemOperand() const { return memoperands().size() == 1; }

  /// Return the number of memory operands.
  unsigned getNumMemOperands() const { return memoperands().size(); }

  /// Helper to extract a pre-instruction symbol if one has been added.
  MCSymbol *getPreInstrSymbol() const {
    if (!Info)
      return nullptr;
    if (MCSymbol *S = Info.get<EIIK_PreInstrSymbol>())
      return S;
    if (ExtraInfo *EI = Info.get<EIIK_OutOfLine>())
      return EI->getPreInstrSymbol();

    return nullptr;
  }

  /// Helper to extract a post-instruction symbol if one has been added.
  MCSymbol *getPostInstrSymbol() const {
    if (!Info)
      return nullptr;
    if (MCSymbol *S = Info.get<EIIK_PostInstrSymbol>())
      return S;
    if (ExtraInfo *EI = Info.get<EIIK_OutOfLine>())
      return EI->getPostInstrSymbol();

    return nullptr;
  }

  /// Helper to extract a heap alloc marker if one has been added.
  MDNode *getHeapAllocMarker() const {
    if (!Info)
      return nullptr;
    if (ExtraInfo *EI = Info.get<EIIK_OutOfLine>())
      return EI->getHeapAllocMarker();

    return nullptr;
  }

  /// Helper to extract PCSections metadata target sections.
  MDNode *getPCSections() const {
    if (!Info)
      return nullptr;
    if (ExtraInfo *EI = Info.get<EIIK_OutOfLine>())
      return EI->getPCSections();

    return nullptr;
  }

  /// Helper to extract a CFI type hash if one has been added.
  uint32_t getCFIType() const {
    if (!Info)
      return 0;
    if (ExtraInfo *EI = Info.get<EIIK_OutOfLine>())
      return EI->getCFIType();

    return 0;
  }

  /// API for querying MachineInstr properties. They are the same as MCInstrDesc
  /// queries but they are bundle aware.

  enum QueryType {
    IgnoreBundle,    // Ignore bundles
    AnyInBundle,     // Return true if any instruction in bundle has property
    AllInBundle      // Return true if all instructions in bundle have property
  };

  /// Return true if the instruction (or in the case of a bundle,
  /// the instructions inside the bundle) has the specified property.
  /// The first argument is the property being queried.
  /// The second argument indicates whether the query should look inside
  /// instruction bundles.
  bool hasProperty(unsigned MCFlag, QueryType Type = AnyInBundle) const {
    assert(MCFlag < 64 &&
           "MCFlag out of range for bit mask in getFlags/hasPropertyInBundle.");
    // Inline the fast path for unbundled or bundle-internal instructions.
    if (Type == IgnoreBundle || !isBundled() || isBundledWithPred())
      return getDesc().getFlags() & (1ULL << MCFlag);

    // If this is the first instruction in a bundle, take the slow path.
    return hasPropertyInBundle(1ULL << MCFlag, Type);
  }

  /// Return true if this is an instruction that should go through the usual
  /// legalization steps.
  bool isPreISelOpcode(QueryType Type = IgnoreBundle) const {
    return hasProperty(MCID::PreISelOpcode, Type);
  }

  /// Return true if this instruction can have a variable number of operands.
  /// In this case, the variable operands will be after the normal
  /// operands but before the implicit definitions and uses (if any are
  /// present).
  bool isVariadic(QueryType Type = IgnoreBundle) const {
    return hasProperty(MCID::Variadic, Type);
  }

  /// Set if this instruction has an optional definition, e.g.
  /// ARM instructions which can set condition code if 's' bit is set.
  bool hasOptionalDef(QueryType Type = IgnoreBundle) const {
    return hasProperty(MCID::HasOptionalDef, Type);
  }

  /// Return true if this is a pseudo instruction that doesn't
  /// correspond to a real machine instruction.
  bool isPseudo(QueryType Type = IgnoreBundle) const {
    return hasProperty(MCID::Pseudo, Type);
  }

  /// Return true if this instruction doesn't produce any output in the form of
  /// executable instructions.
  bool isMetaInstruction(QueryType Type = IgnoreBundle) const {
    return hasProperty(MCID::Meta, Type);
  }

  bool isReturn(QueryType Type = AnyInBundle) const {
    return hasProperty(MCID::Return, Type);
  }

  /// Return true if this is an instruction that marks the end of an EH scope,
  /// i.e., a catchpad or a cleanuppad instruction.
  bool isEHScopeReturn(QueryType Type = AnyInBundle) const {
    return hasProperty(MCID::EHScopeReturn, Type);
  }

  bool isCall(QueryType Type = AnyInBundle) const {
    return hasProperty(MCID::Call, Type);
  }

  /// Return true if this is a call instruction that may have an associated
  /// call site entry in the debug info.
  bool isCandidateForCallSiteEntry(QueryType Type = IgnoreBundle) const;
  /// Return true if copying, moving, or erasing this instruction requires
  /// updating Call Site Info (see \ref copyCallSiteInfo, \ref moveCallSiteInfo,
  /// \ref eraseCallSiteInfo).
  bool shouldUpdateCallSiteInfo() const;

  /// Returns true if the specified instruction stops control flow
  /// from executing the instruction immediately following it.  Examples include
  /// unconditional branches and return instructions.
  bool isBarrier(QueryType Type = AnyInBundle) const {
    return hasProperty(MCID::Barrier, Type);
  }

  /// Returns true if this instruction part of the terminator for a basic block.
  /// Typically this is things like return and branch instructions.
  ///
  /// Various passes use this to insert code into the bottom of a basic block,
  /// but before control flow occurs.
  bool isTerminator(QueryType Type = AnyInBundle) const {
    return hasProperty(MCID::Terminator, Type);
  }

  /// Returns true if this is a conditional, unconditional, or indirect branch.
  /// Predicates below can be used to discriminate between
  /// these cases, and the TargetInstrInfo::analyzeBranch method can be used to
  /// get more information.
  bool isBranch(QueryType Type = AnyInBundle) const {
    return hasProperty(MCID::Branch, Type);
  }

  /// Return true if this is an indirect branch, such as a
  /// branch through a register.
  bool isIndirectBranch(QueryType Type = AnyInBundle) const {
    return hasProperty(MCID::IndirectBranch, Type);
  }

  /// Return true if this is a branch which may fall
  /// through to the next instruction or may transfer control flow to some other
  /// block.  The TargetInstrInfo::analyzeBranch method can be used to get more
  /// information about this branch.
  bool isConditionalBranch(QueryType Type = AnyInBundle) const {
    return isBranch(Type) && !isBarrier(Type) && !isIndirectBranch(Type);
  }

  /// Return true if this is a branch which always
  /// transfers control flow to some other block.  The
  /// TargetInstrInfo::analyzeBranch method can be used to get more information
  /// about this branch.
  bool isUnconditionalBranch(QueryType Type = AnyInBundle) const {
    return isBranch(Type) && isBarrier(Type) && !isIndirectBranch(Type);
  }

  /// Return true if this instruction has a predicate operand that
  /// controls execution.  It may be set to 'always', or may be set to other
  /// values.   There are various methods in TargetInstrInfo that can be used to
  /// control and modify the predicate in this instruction.
  bool isPredicable(QueryType Type = AllInBundle) const {
    // If it's a bundle than all bundled instructions must be predicable for this
    // to return true.
    return hasProperty(MCID::Predicable, Type);
  }

  /// Return true if this instruction is a comparison.
  bool isCompare(QueryType Type = IgnoreBundle) const {
    return hasProperty(MCID::Compare, Type);
  }

  /// Return true if this instruction is a move immediate
  /// (including conditional moves) instruction.
  bool isMoveImmediate(QueryType Type = IgnoreBundle) const {
    return hasProperty(MCID::MoveImm, Type);
  }

  /// Return true if this instruction is a register move.
  /// (including moving values from subreg to reg)
  bool isMoveReg(QueryType Type = IgnoreBundle) const {
    return hasProperty(MCID::MoveReg, Type);
  }

  /// Return true if this instruction is a bitcast instruction.
  bool isBitcast(QueryType Type = IgnoreBundle) const {
    return hasProperty(MCID::Bitcast, Type);
  }

  /// Return true if this instruction is a select instruction.
  bool isSelect(QueryType Type = IgnoreBundle) const {
    return hasProperty(MCID::Select, Type);
  }

  /// Return true if this instruction cannot be safely duplicated.
  /// For example, if the instruction has a unique labels attached
  /// to it, duplicating it would cause multiple definition errors.
  bool isNotDuplicable(QueryType Type = AnyInBundle) const {
    if (getPreInstrSymbol() || getPostInstrSymbol())
      return true;
    return hasProperty(MCID::NotDuplicable, Type);
  }

  /// Return true if this instruction is convergent.
  /// Convergent instructions can not be made control-dependent on any
  /// additional values.
  bool isConvergent(QueryType Type = AnyInBundle) const {
    if (isInlineAsm()) {
      unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
      if (ExtraInfo & InlineAsm::Extra_IsConvergent)
        return true;
    }
    return hasProperty(MCID::Convergent, Type);
  }

  /// Returns true if the specified instruction has a delay slot
  /// which must be filled by the code generator.
  bool hasDelaySlot(QueryType Type = AnyInBundle) const {
    return hasProperty(MCID::DelaySlot, Type);
  }

  /// Return true for instructions that can be folded as
  /// memory operands in other instructions. The most common use for this
  /// is instructions that are simple loads from memory that don't modify
  /// the loaded value in any way, but it can also be used for instructions
  /// that can be expressed as constant-pool loads, such as V_SETALLONES
  /// on x86, to allow them to be folded when it is beneficial.
  /// This should only be set on instructions that return a value in their
  /// only virtual register definition.
  bool canFoldAsLoad(QueryType Type = IgnoreBundle) const {
    return hasProperty(MCID::FoldableAsLoad, Type);
  }

  /// Return true if this instruction behaves
  /// the same way as the generic REG_SEQUENCE instructions.
  /// E.g., on ARM,
  /// dX VMOVDRR rY, rZ
  /// is equivalent to
  /// dX = REG_SEQUENCE rY, ssub_0, rZ, ssub_1.
  ///
  /// Note that for the optimizers to be able to take advantage of
  /// this property, TargetInstrInfo::getRegSequenceLikeInputs has to be
  /// override accordingly.
  bool isRegSequenceLike(QueryType Type = IgnoreBundle) const {
    return hasProperty(MCID::RegSequence, Type);
  }

  /// Return true if this instruction behaves
  /// the same way as the generic EXTRACT_SUBREG instructions.
  /// E.g., on ARM,
  /// rX, rY VMOVRRD dZ
  /// is equivalent to two EXTRACT_SUBREG:
  /// rX = EXTRACT_SUBREG dZ, ssub_0
  /// rY = EXTRACT_SUBREG dZ, ssub_1
  ///
  /// Note that for the optimizers to be able to take advantage of
  /// this property, TargetInstrInfo::getExtractSubregLikeInputs has to be
  /// override accordingly.
  bool isExtractSubregLike(QueryType Type = IgnoreBundle) const {
    return hasProperty(MCID::ExtractSubreg, Type);
  }

  /// Return true if this instruction behaves
  /// the same way as the generic INSERT_SUBREG instructions.
  /// E.g., on ARM,
  /// dX = VSETLNi32 dY, rZ, Imm
  /// is equivalent to a INSERT_SUBREG:
  /// dX = INSERT_SUBREG dY, rZ, translateImmToSubIdx(Imm)
  ///
  /// Note that for the optimizers to be able to take advantage of
  /// this property, TargetInstrInfo::getInsertSubregLikeInputs has to be
  /// override accordingly.
  bool isInsertSubregLike(QueryType Type = IgnoreBundle) const {
    return hasProperty(MCID::InsertSubreg, Type);
  }

  //===--------------------------------------------------------------------===//
  // Side Effect Analysis
  //===--------------------------------------------------------------------===//

  /// Return true if this instruction could possibly read memory.
  /// Instructions with this flag set are not necessarily simple load
  /// instructions, they may load a value and modify it, for example.
  bool mayLoad(QueryType Type = AnyInBundle) const {
    if (isInlineAsm()) {
      unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
      if (ExtraInfo & InlineAsm::Extra_MayLoad)
        return true;
    }
    return hasProperty(MCID::MayLoad, Type);
  }

  /// Return true if this instruction could possibly modify memory.
  /// Instructions with this flag set are not necessarily simple store
  /// instructions, they may store a modified value based on their operands, or
  /// may not actually modify anything, for example.
  bool mayStore(QueryType Type = AnyInBundle) const {
    if (isInlineAsm()) {
      unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
      if (ExtraInfo & InlineAsm::Extra_MayStore)
        return true;
    }
    return hasProperty(MCID::MayStore, Type);
  }

  /// Return true if this instruction could possibly read or modify memory.
  bool mayLoadOrStore(QueryType Type = AnyInBundle) const {
    return mayLoad(Type) || mayStore(Type);
  }

  /// Return true if this instruction could possibly raise a floating-point
  /// exception.  This is the case if the instruction is a floating-point
  /// instruction that can in principle raise an exception, as indicated
  /// by the MCID::MayRaiseFPException property, *and* at the same time,
  /// the instruction is used in a context where we expect floating-point
  /// exceptions are not disabled, as indicated by the NoFPExcept MI flag.
  bool mayRaiseFPException() const {
    return hasProperty(MCID::MayRaiseFPException) &&
           !getFlag(MachineInstr::MIFlag::NoFPExcept);
  }

  //===--------------------------------------------------------------------===//
  // Flags that indicate whether an instruction can be modified by a method.
  //===--------------------------------------------------------------------===//

  /// Return true if this may be a 2- or 3-address
  /// instruction (of the form "X = op Y, Z, ..."), which produces the same
  /// result if Y and Z are exchanged.  If this flag is set, then the
  /// TargetInstrInfo::commuteInstruction method may be used to hack on the
  /// instruction.
  ///
  /// Note that this flag may be set on instructions that are only commutable
  /// sometimes.  In these cases, the call to commuteInstruction will fail.
  /// Also note that some instructions require non-trivial modification to
  /// commute them.
  bool isCommutable(QueryType Type = IgnoreBundle) const {
    return hasProperty(MCID::Commutable, Type);
  }

  /// Return true if this is a 2-address instruction
  /// which can be changed into a 3-address instruction if needed.  Doing this
  /// transformation can be profitable in the register allocator, because it
  /// means that the instruction can use a 2-address form if possible, but
  /// degrade into a less efficient form if the source and dest register cannot
  /// be assigned to the same register.  For example, this allows the x86
  /// backend to turn a "shl reg, 3" instruction into an LEA instruction, which
  /// is the same speed as the shift but has bigger code size.
  ///
  /// If this returns true, then the target must implement the
  /// TargetInstrInfo::convertToThreeAddress method for this instruction, which
  /// is allowed to fail if the transformation isn't valid for this specific
  /// instruction (e.g. shl reg, 4 on x86).
  ///
  bool isConvertibleTo3Addr(QueryType Type = IgnoreBundle) const {
    return hasProperty(MCID::ConvertibleTo3Addr, Type);
  }

  /// Return true if this instruction requires
  /// custom insertion support when the DAG scheduler is inserting it into a
  /// machine basic block.  If this is true for the instruction, it basically
  /// means that it is a pseudo instruction used at SelectionDAG time that is
  /// expanded out into magic code by the target when MachineInstrs are formed.
  ///
  /// If this is true, the TargetLoweringInfo::InsertAtEndOfBasicBlock method
  /// is used to insert this into the MachineBasicBlock.
  bool usesCustomInsertionHook(QueryType Type = IgnoreBundle) const {
    return hasProperty(MCID::UsesCustomInserter, Type);
  }

  /// Return true if this instruction requires *adjustment*
  /// after instruction selection by calling a target hook. For example, this
  /// can be used to fill in ARM 's' optional operand depending on whether
  /// the conditional flag register is used.
  bool hasPostISelHook(QueryType Type = IgnoreBundle) const {
    return hasProperty(MCID::HasPostISelHook, Type);
  }

  /// Returns true if this instruction is a candidate for remat.
  /// This flag is deprecated, please don't use it anymore.  If this
  /// flag is set, the isReallyTriviallyReMaterializable() method is called to
  /// verify the instruction is really rematable.
  bool isRematerializable(QueryType Type = AllInBundle) const {
    // It's only possible to re-mat a bundle if all bundled instructions are
    // re-materializable.
    return hasProperty(MCID::Rematerializable, Type);
  }

  /// Returns true if this instruction has the same cost (or less) than a move
  /// instruction. This is useful during certain types of optimizations
  /// (e.g., remat during two-address conversion or machine licm)
  /// where we would like to remat or hoist the instruction, but not if it costs
  /// more than moving the instruction into the appropriate register. Note, we
  /// are not marking copies from and to the same register class with this flag.
  bool isAsCheapAsAMove(QueryType Type = AllInBundle) const {
    // Only returns true for a bundle if all bundled instructions are cheap.
    return hasProperty(MCID::CheapAsAMove, Type);
  }

  /// Returns true if this instruction source operands
  /// have special register allocation requirements that are not captured by the
  /// operand register classes. e.g. ARM::STRD's two source registers must be an
  /// even / odd pair, ARM::STM registers have to be in ascending order.
  /// Post-register allocation passes should not attempt to change allocations
  /// for sources of instructions with this flag.
  bool hasExtraSrcRegAllocReq(QueryType Type = AnyInBundle) const {
    return hasProperty(MCID::ExtraSrcRegAllocReq, Type);
  }

  /// Returns true if this instruction def operands
  /// have special register allocation requirements that are not captured by the
  /// operand register classes. e.g. ARM::LDRD's two def registers must be an
  /// even / odd pair, ARM::LDM registers have to be in ascending order.
  /// Post-register allocation passes should not attempt to change allocations
  /// for definitions of instructions with this flag.
  bool hasExtraDefRegAllocReq(QueryType Type = AnyInBundle) const {
    return hasProperty(MCID::ExtraDefRegAllocReq, Type);
  }

  enum MICheckType {
    CheckDefs,      // Check all operands for equality
    CheckKillDead,  // Check all operands including kill / dead markers
    IgnoreDefs,     // Ignore all definitions
    IgnoreVRegDefs  // Ignore virtual register definitions
  };

  /// Return true if this instruction is identical to \p Other.
  /// Two instructions are identical if they have the same opcode and all their
  /// operands are identical (with respect to MachineOperand::isIdenticalTo()).
  /// Note that this means liveness related flags (dead, undef, kill) do not
  /// affect the notion of identical.
  bool isIdenticalTo(const MachineInstr &Other,
                     MICheckType Check = CheckDefs) const;

  /// Returns true if this instruction is a debug instruction that represents an
  /// identical debug value to \p Other.
  /// This function considers these debug instructions equivalent if they have
  /// identical variables, debug locations, and debug operands, and if the
  /// DIExpressions combined with the directness flags are equivalent.
  bool isEquivalentDbgInstr(const MachineInstr &Other) const;

  /// Unlink 'this' from the containing basic block, and return it without
  /// deleting it.
  ///
  /// This function can not be used on bundled instructions, use
  /// removeFromBundle() to remove individual instructions from a bundle.
  MachineInstr *removeFromParent();

  /// Unlink this instruction from its basic block and return it without
  /// deleting it.
  ///
  /// If the instruction is part of a bundle, the other instructions in the
  /// bundle remain bundled.
  MachineInstr *removeFromBundle();

  /// Unlink 'this' from the containing basic block and delete it.
  ///
  /// If this instruction is the header of a bundle, the whole bundle is erased.
  /// This function can not be used for instructions inside a bundle, use
  /// eraseFromBundle() to erase individual bundled instructions.
  void eraseFromParent();

  /// Unlink 'this' form its basic block and delete it.
  ///
  /// If the instruction is part of a bundle, the other instructions in the
  /// bundle remain bundled.
  void eraseFromBundle();

  bool isEHLabel() const { return getOpcode() == TargetOpcode::EH_LABEL; }
  bool isGCLabel() const { return getOpcode() == TargetOpcode::GC_LABEL; }
  bool isAnnotationLabel() const {
    return getOpcode() == TargetOpcode::ANNOTATION_LABEL;
  }

  /// Returns true if the MachineInstr represents a label.
  bool isLabel() const {
    return isEHLabel() || isGCLabel() || isAnnotationLabel();
  }

  bool isCFIInstruction() const {
    return getOpcode() == TargetOpcode::CFI_INSTRUCTION;
  }

  bool isPseudoProbe() const {
    return getOpcode() == TargetOpcode::PSEUDO_PROBE;
  }

  // True if the instruction represents a position in the function.
  bool isPosition() const { return isLabel() || isCFIInstruction(); }

  bool isNonListDebugValue() const {
    return getOpcode() == TargetOpcode::DBG_VALUE;
  }
  bool isDebugValueList() const {
    return getOpcode() == TargetOpcode::DBG_VALUE_LIST;
  }
  bool isDebugValue() const {
    return isNonListDebugValue() || isDebugValueList();
  }
  bool isDebugLabel() const { return getOpcode() == TargetOpcode::DBG_LABEL; }
  bool isDebugRef() const { return getOpcode() == TargetOpcode::DBG_INSTR_REF; }
  bool isDebugValueLike() const { return isDebugValue() || isDebugRef(); }
  bool isDebugPHI() const { return getOpcode() == TargetOpcode::DBG_PHI; }
  bool isDebugInstr() const {
    return isDebugValue() || isDebugLabel() || isDebugRef() || isDebugPHI();
  }
  bool isDebugOrPseudoInstr() const {
    return isDebugInstr() || isPseudoProbe();
  }

  bool isDebugOffsetImm() const {
    return isNonListDebugValue() && getDebugOffset().isImm();
  }

  /// A DBG_VALUE is indirect iff the location operand is a register and
  /// the offset operand is an immediate.
  bool isIndirectDebugValue() const {
    return isDebugOffsetImm() && getDebugOperand(0).isReg();
  }

  /// A DBG_VALUE is an entry value iff its debug expression contains the
  /// DW_OP_LLVM_entry_value operation.
  bool isDebugEntryValue() const;

  /// Return true if the instruction is a debug value which describes a part of
  /// a variable as unavailable.
  bool isUndefDebugValue() const {
    if (!isDebugValue())
      return false;
    // If any $noreg locations are given, this DV is undef.
    for (const MachineOperand &Op : debug_operands())
      if (Op.isReg() && !Op.getReg().isValid())
        return true;
    return false;
  }

  bool isPHI() const {
    return getOpcode() == TargetOpcode::PHI ||
           getOpcode() == TargetOpcode::G_PHI;
  }
  bool isKill() const { return getOpcode() == TargetOpcode::KILL; }
  bool isImplicitDef() const { return getOpcode()==TargetOpcode::IMPLICIT_DEF; }
  bool isInlineAsm() const {
    return getOpcode() == TargetOpcode::INLINEASM ||
           getOpcode() == TargetOpcode::INLINEASM_BR;
  }

  /// FIXME: Seems like a layering violation that the AsmDialect, which is X86
  /// specific, be attached to a generic MachineInstr.
  bool isMSInlineAsm() const {
    return isInlineAsm() && getInlineAsmDialect() == InlineAsm::AD_Intel;
  }

  bool isStackAligningInlineAsm() const;
  InlineAsm::AsmDialect getInlineAsmDialect() const;

  bool isInsertSubreg() const {
    return getOpcode() == TargetOpcode::INSERT_SUBREG;
  }

  bool isSubregToReg() const {
    return getOpcode() == TargetOpcode::SUBREG_TO_REG;
  }

  bool isRegSequence() const {
    return getOpcode() == TargetOpcode::REG_SEQUENCE;
  }

  bool isBundle() const {
    return getOpcode() == TargetOpcode::BUNDLE;
  }

  bool isCopy() const {
    return getOpcode() == TargetOpcode::COPY;
  }

  bool isFullCopy() const {
    return isCopy() && !getOperand(0).getSubReg() && !getOperand(1).getSubReg();
  }

  bool isExtractSubreg() const {
    return getOpcode() == TargetOpcode::EXTRACT_SUBREG;
  }

  /// Return true if the instruction behaves like a copy.
  /// This does not include native copy instructions.
  bool isCopyLike() const {
    return isCopy() || isSubregToReg();
  }

  /// Return true is the instruction is an identity copy.
  bool isIdentityCopy() const {
    return isCopy() && getOperand(0).getReg() == getOperand(1).getReg() &&
      getOperand(0).getSubReg() == getOperand(1).getSubReg();
  }

  /// Return true if this is a transient instruction that is either very likely
  /// to be eliminated during register allocation (such as copy-like
  /// instructions), or if this instruction doesn't have an execution-time cost.
  bool isTransient() const {
    switch (getOpcode()) {
    default:
      return isMetaInstruction();
    // Copy-like instructions are usually eliminated during register allocation.
    case TargetOpcode::PHI:
    case TargetOpcode::G_PHI:
    case TargetOpcode::COPY:
    case TargetOpcode::INSERT_SUBREG:
    case TargetOpcode::SUBREG_TO_REG:
    case TargetOpcode::REG_SEQUENCE:
      return true;
    }
  }

  /// Return the number of instructions inside the MI bundle, excluding the
  /// bundle header.
  ///
  /// This is the number of instructions that MachineBasicBlock::iterator
  /// skips, 0 for unbundled instructions.
  unsigned getBundleSize() const;

  /// Return true if the MachineInstr reads the specified register.
  /// If TargetRegisterInfo is passed, then it also checks if there
  /// is a read of a super-register.
  /// This does not count partial redefines of virtual registers as reads:
  ///   %reg1024:6 = OP.
  bool readsRegister(Register Reg,
                     const TargetRegisterInfo *TRI = nullptr) const {
    return findRegisterUseOperandIdx(Reg, false, TRI) != -1;
  }

  /// Return true if the MachineInstr reads the specified virtual register.
  /// Take into account that a partial define is a
  /// read-modify-write operation.
  bool readsVirtualRegister(Register Reg) const {
    return readsWritesVirtualRegister(Reg).first;
  }

  /// Return a pair of bools (reads, writes) indicating if this instruction
  /// reads or writes Reg. This also considers partial defines.
  /// If Ops is not null, all operand indices for Reg are added.
  std::pair<bool,bool> readsWritesVirtualRegister(Register Reg,
                                SmallVectorImpl<unsigned> *Ops = nullptr) const;

  /// Return true if the MachineInstr kills the specified register.
  /// If TargetRegisterInfo is passed, then it also checks if there is
  /// a kill of a super-register.
  bool killsRegister(Register Reg,
                     const TargetRegisterInfo *TRI = nullptr) const {
    return findRegisterUseOperandIdx(Reg, true, TRI) != -1;
  }

  /// Return true if the MachineInstr fully defines the specified register.
  /// If TargetRegisterInfo is passed, then it also checks
  /// if there is a def of a super-register.
  /// NOTE: It's ignoring subreg indices on virtual registers.
  bool definesRegister(Register Reg,
                       const TargetRegisterInfo *TRI = nullptr) const {
    return findRegisterDefOperandIdx(Reg, false, false, TRI) != -1;
  }

  /// Return true if the MachineInstr modifies (fully define or partially
  /// define) the specified register.
  /// NOTE: It's ignoring subreg indices on virtual registers.
  bool modifiesRegister(Register Reg,
                        const TargetRegisterInfo *TRI = nullptr) const {
    return findRegisterDefOperandIdx(Reg, false, true, TRI) != -1;
  }

  /// Returns true if the register is dead in this machine instruction.
  /// If TargetRegisterInfo is passed, then it also checks
  /// if there is a dead def of a super-register.
  bool registerDefIsDead(Register Reg,
                         const TargetRegisterInfo *TRI = nullptr) const {
    return findRegisterDefOperandIdx(Reg, true, false, TRI) != -1;
  }

  /// Returns true if the MachineInstr has an implicit-use operand of exactly
  /// the given register (not considering sub/super-registers).
  bool hasRegisterImplicitUseOperand(Register Reg) const;

  /// Returns the operand index that is a use of the specific register or -1
  /// if it is not found. It further tightens the search criteria to a use
  /// that kills the register if isKill is true.
  int findRegisterUseOperandIdx(Register Reg, bool isKill = false,
                                const TargetRegisterInfo *TRI = nullptr) const;

  /// Wrapper for findRegisterUseOperandIdx, it returns
  /// a pointer to the MachineOperand rather than an index.
  MachineOperand *findRegisterUseOperand(Register Reg, bool isKill = false,
                                      const TargetRegisterInfo *TRI = nullptr) {
    int Idx = findRegisterUseOperandIdx(Reg, isKill, TRI);
    return (Idx == -1) ? nullptr : &getOperand(Idx);
  }

  const MachineOperand *findRegisterUseOperand(
    Register Reg, bool isKill = false,
    const TargetRegisterInfo *TRI = nullptr) const {
    return const_cast<MachineInstr *>(this)->
      findRegisterUseOperand(Reg, isKill, TRI);
  }

  /// Returns the operand index that is a def of the specified register or
  /// -1 if it is not found. If isDead is true, defs that are not dead are
  /// skipped. If Overlap is true, then it also looks for defs that merely
  /// overlap the specified register. If TargetRegisterInfo is non-null,
  /// then it also checks if there is a def of a super-register.
  /// This may also return a register mask operand when Overlap is true.
  int findRegisterDefOperandIdx(Register Reg,
                                bool isDead = false, bool Overlap = false,
                                const TargetRegisterInfo *TRI = nullptr) const;

  /// Wrapper for findRegisterDefOperandIdx, it returns
  /// a pointer to the MachineOperand rather than an index.
  MachineOperand *
  findRegisterDefOperand(Register Reg, bool isDead = false,
                         bool Overlap = false,
                         const TargetRegisterInfo *TRI = nullptr) {
    int Idx = findRegisterDefOperandIdx(Reg, isDead, Overlap, TRI);
    return (Idx == -1) ? nullptr : &getOperand(Idx);
  }

  const MachineOperand *
  findRegisterDefOperand(Register Reg, bool isDead = false,
                         bool Overlap = false,
                         const TargetRegisterInfo *TRI = nullptr) const {
    return const_cast<MachineInstr *>(this)->findRegisterDefOperand(
        Reg, isDead, Overlap, TRI);
  }

  /// Find the index of the first operand in the
  /// operand list that is used to represent the predicate. It returns -1 if
  /// none is found.
  int findFirstPredOperandIdx() const;

  /// Find the index of the flag word operand that
  /// corresponds to operand OpIdx on an inline asm instruction.  Returns -1 if
  /// getOperand(OpIdx) does not belong to an inline asm operand group.
  ///
  /// If GroupNo is not NULL, it will receive the number of the operand group
  /// containing OpIdx.
  int findInlineAsmFlagIdx(unsigned OpIdx, unsigned *GroupNo = nullptr) const;

  /// Compute the static register class constraint for operand OpIdx.
  /// For normal instructions, this is derived from the MCInstrDesc.
  /// For inline assembly it is derived from the flag words.
  ///
  /// Returns NULL if the static register class constraint cannot be
  /// determined.
  const TargetRegisterClass*
  getRegClassConstraint(unsigned OpIdx,
                        const TargetInstrInfo *TII,
                        const TargetRegisterInfo *TRI) const;

  /// Applies the constraints (def/use) implied by this MI on \p Reg to
  /// the given \p CurRC.
  /// If \p ExploreBundle is set and MI is part of a bundle, all the
  /// instructions inside the bundle will be taken into account. In other words,
  /// this method accumulates all the constraints of the operand of this MI and
  /// the related bundle if MI is a bundle or inside a bundle.
  ///
  /// Returns the register class that satisfies both \p CurRC and the
  /// constraints set by MI. Returns NULL if such a register class does not
  /// exist.
  ///
  /// \pre CurRC must not be NULL.
  const TargetRegisterClass *getRegClassConstraintEffectForVReg(
      Register Reg, const TargetRegisterClass *CurRC,
      const TargetInstrInfo *TII, const TargetRegisterInfo *TRI,
      bool ExploreBundle = false) const;

  /// Applies the constraints (def/use) implied by the \p OpIdx operand
  /// to the given \p CurRC.
  ///
  /// Returns the register class that satisfies both \p CurRC and the
  /// constraints set by \p OpIdx MI. Returns NULL if such a register class
  /// does not exist.
  ///
  /// \pre CurRC must not be NULL.
  /// \pre The operand at \p OpIdx must be a register.
  const TargetRegisterClass *
  getRegClassConstraintEffect(unsigned OpIdx, const TargetRegisterClass *CurRC,
                              const TargetInstrInfo *TII,
                              const TargetRegisterInfo *TRI) const;

  /// Add a tie between the register operands at DefIdx and UseIdx.
  /// The tie will cause the register allocator to ensure that the two
  /// operands are assigned the same physical register.
  ///
  /// Tied operands are managed automatically for explicit operands in the
  /// MCInstrDesc. This method is for exceptional cases like inline asm.
  void tieOperands(unsigned DefIdx, unsigned UseIdx);

  /// Given the index of a tied register operand, find the
  /// operand it is tied to. Defs are tied to uses and vice versa. Returns the
  /// index of the tied operand which must exist.
  unsigned findTiedOperandIdx(unsigned OpIdx) const;

  /// Given the index of a register def operand,
  /// check if the register def is tied to a source operand, due to either
  /// two-address elimination or inline assembly constraints. Returns the
  /// first tied use operand index by reference if UseOpIdx is not null.
  bool isRegTiedToUseOperand(unsigned DefOpIdx,
                             unsigned *UseOpIdx = nullptr) const {
    const MachineOperand &MO = getOperand(DefOpIdx);
    if (!MO.isReg() || !MO.isDef() || !MO.isTied())
      return false;
    if (UseOpIdx)
      *UseOpIdx = findTiedOperandIdx(DefOpIdx);
    return true;
  }

  /// Return true if the use operand of the specified index is tied to a def
  /// operand. It also returns the def operand index by reference if DefOpIdx
  /// is not null.
  bool isRegTiedToDefOperand(unsigned UseOpIdx,
                             unsigned *DefOpIdx = nullptr) const {
    const MachineOperand &MO = getOperand(UseOpIdx);
    if (!MO.isReg() || !MO.isUse() || !MO.isTied())
      return false;
    if (DefOpIdx)
      *DefOpIdx = findTiedOperandIdx(UseOpIdx);
    return true;
  }

  /// Clears kill flags on all operands.
  void clearKillInfo();

  /// Replace all occurrences of FromReg with ToReg:SubIdx,
  /// properly composing subreg indices where necessary.
  void substituteRegister(Register FromReg, Register ToReg, unsigned SubIdx,
                          const TargetRegisterInfo &RegInfo);

  /// We have determined MI kills a register. Look for the
  /// operand that uses it and mark it as IsKill. If AddIfNotFound is true,
  /// add a implicit operand if it's not found. Returns true if the operand
  /// exists / is added.
  bool addRegisterKilled(Register IncomingReg,
                         const TargetRegisterInfo *RegInfo,
                         bool AddIfNotFound = false);

  /// Clear all kill flags affecting Reg.  If RegInfo is provided, this includes
  /// all aliasing registers.
  void clearRegisterKills(Register Reg, const TargetRegisterInfo *RegInfo);

  /// We have determined MI defined a register without a use.
  /// Look for the operand that defines it and mark it as IsDead. If
  /// AddIfNotFound is true, add a implicit operand if it's not found. Returns
  /// true if the operand exists / is added.
  bool addRegisterDead(Register Reg, const TargetRegisterInfo *RegInfo,
                       bool AddIfNotFound = false);

  /// Clear all dead flags on operands defining register @p Reg.
  void clearRegisterDeads(Register Reg);

  /// Mark all subregister defs of register @p Reg with the undef flag.
  /// This function is used when we determined to have a subregister def in an
  /// otherwise undefined super register.
  void setRegisterDefReadUndef(Register Reg, bool IsUndef = true);

  /// We have determined MI defines a register. Make sure there is an operand
  /// defining Reg.
  void addRegisterDefined(Register Reg,
                          const TargetRegisterInfo *RegInfo = nullptr);

  /// Mark every physreg used by this instruction as
  /// dead except those in the UsedRegs list.
  ///
  /// On instructions with register mask operands, also add implicit-def
  /// operands for all registers in UsedRegs.
  void setPhysRegsDeadExcept(ArrayRef<Register> UsedRegs,
                             const TargetRegisterInfo &TRI);

  /// Return true if it is safe to move this instruction. If
  /// SawStore is set to true, it means that there is a store (or call) between
  /// the instruction's location and its intended destination.
  bool isSafeToMove(AAResults *AA, bool &SawStore) const;

  /// Returns true if this instruction's memory access aliases the memory
  /// access of Other.
  //
  /// Assumes any physical registers used to compute addresses
  /// have the same value for both instructions.  Returns false if neither
  /// instruction writes to memory.
  ///
  /// @param AA Optional alias analysis, used to compare memory operands.
  /// @param Other MachineInstr to check aliasing against.
  /// @param UseTBAA Whether to pass TBAA information to alias analysis.
  bool mayAlias(AAResults *AA, const MachineInstr &Other, bool UseTBAA) const;

  /// Return true if this instruction may have an ordered
  /// or volatile memory reference, or if the information describing the memory
  /// reference is not available. Return false if it is known to have no
  /// ordered or volatile memory references.
  bool hasOrderedMemoryRef() const;

  /// Return true if this load instruction never traps and points to a memory
  /// location whose value doesn't change during the execution of this function.
  ///
  /// Examples include loading a value from the constant pool or from the
  /// argument area of a function (if it does not change).  If the instruction
  /// does multiple loads, this returns true only if all of the loads are
  /// dereferenceable and invariant.
  bool isDereferenceableInvariantLoad() const;

  /// If the specified instruction is a PHI that always merges together the
  /// same virtual register, return the register, otherwise return 0.
  unsigned isConstantValuePHI() const;

  /// Return true if this instruction has side effects that are not modeled
  /// by mayLoad / mayStore, etc.
  /// For all instructions, the property is encoded in MCInstrDesc::Flags
  /// (see MCInstrDesc::hasUnmodeledSideEffects(). The only exception is
  /// INLINEASM instruction, in which case the side effect property is encoded
  /// in one of its operands (see InlineAsm::Extra_HasSideEffect).
  ///
  bool hasUnmodeledSideEffects() const;

  /// Returns true if it is illegal to fold a load across this instruction.
  bool isLoadFoldBarrier() const;

  /// Return true if all the defs of this instruction are dead.
  bool allDefsAreDead() const;

  /// Return a valid size if the instruction is a spill instruction.
  std::optional<unsigned> getSpillSize(const TargetInstrInfo *TII) const;

  /// Return a valid size if the instruction is a folded spill instruction.
  std::optional<unsigned> getFoldedSpillSize(const TargetInstrInfo *TII) const;

  /// Return a valid size if the instruction is a restore instruction.
  std::optional<unsigned> getRestoreSize(const TargetInstrInfo *TII) const;

  /// Return a valid size if the instruction is a folded restore instruction.
  std::optional<unsigned>
  getFoldedRestoreSize(const TargetInstrInfo *TII) const;

  /// Copy implicit register operands from specified
  /// instruction to this instruction.
  void copyImplicitOps(MachineFunction &MF, const MachineInstr &MI);

  /// Debugging support
  /// @{
  /// Determine the generic type to be printed (if needed) on uses and defs.
  LLT getTypeToPrint(unsigned OpIdx, SmallBitVector &PrintedTypes,
                     const MachineRegisterInfo &MRI) const;

  /// Return true when an instruction has tied register that can't be determined
  /// by the instruction's descriptor. This is useful for MIR printing, to
  /// determine whether we need to print the ties or not.
  bool hasComplexRegisterTies() const;

  /// Print this MI to \p OS.
  /// Don't print information that can be inferred from other instructions if
  /// \p IsStandalone is false. It is usually true when only a fragment of the
  /// function is printed.
  /// Only print the defs and the opcode if \p SkipOpers is true.
  /// Otherwise, also print operands if \p SkipDebugLoc is true.
  /// Otherwise, also print the debug loc, with a terminating newline.
  /// \p TII is used to print the opcode name.  If it's not present, but the
  /// MI is in a function, the opcode will be printed using the function's TII.
  void print(raw_ostream &OS, bool IsStandalone = true, bool SkipOpers = false,
             bool SkipDebugLoc = false, bool AddNewLine = true,
             const TargetInstrInfo *TII = nullptr) const;
  void print(raw_ostream &OS, ModuleSlotTracker &MST, bool IsStandalone = true,
             bool SkipOpers = false, bool SkipDebugLoc = false,
             bool AddNewLine = true,
             const TargetInstrInfo *TII = nullptr) const;
  void dump() const;
  /// Print on dbgs() the current instruction and the instructions defining its
  /// operands and so on until we reach \p MaxDepth.
  void dumpr(const MachineRegisterInfo &MRI,
             unsigned MaxDepth = UINT_MAX) const;
  /// @}

  //===--------------------------------------------------------------------===//
  // Accessors used to build up machine instructions.

  /// Add the specified operand to the instruction.  If it is an implicit
  /// operand, it is added to the end of the operand list.  If it is an
  /// explicit operand it is added at the end of the explicit operand list
  /// (before the first implicit operand).
  ///
  /// MF must be the machine function that was used to allocate this
  /// instruction.
  ///
  /// MachineInstrBuilder provides a more convenient interface for creating
  /// instructions and adding operands.
  void addOperand(MachineFunction &MF, const MachineOperand &Op);

  /// Add an operand without providing an MF reference. This only works for
  /// instructions that are inserted in a basic block.
  ///
  /// MachineInstrBuilder and the two-argument addOperand(MF, MO) should be
  /// preferred.
  void addOperand(const MachineOperand &Op);

  /// Replace the instruction descriptor (thus opcode) of
  /// the current instruction with a new one.
  void setDesc(const MCInstrDesc &TID) { MCID = &TID; }

  /// Replace current source information with new such.
  /// Avoid using this, the constructor argument is preferable.
  void setDebugLoc(DebugLoc DL) {
    DbgLoc = std::move(DL);
    assert(DbgLoc.hasTrivialDestructor() && "Expected trivial destructor");
  }

  /// Erase an operand from an instruction, leaving it with one
  /// fewer operand than it started with.
  void removeOperand(unsigned OpNo);

  /// Clear this MachineInstr's memory reference descriptor list.  This resets
  /// the memrefs to their most conservative state.  This should be used only
  /// as a last resort since it greatly pessimizes our knowledge of the memory
  /// access performed by the instruction.
  void dropMemRefs(MachineFunction &MF);

  /// Assign this MachineInstr's memory reference descriptor list.
  ///
  /// Unlike other methods, this *will* allocate them into a new array
  /// associated with the provided `MachineFunction`.
  void setMemRefs(MachineFunction &MF, ArrayRef<MachineMemOperand *> MemRefs);

  /// Add a MachineMemOperand to the machine instruction.
  /// This function should be used only occasionally. The setMemRefs function
  /// is the primary method for setting up a MachineInstr's MemRefs list.
  void addMemOperand(MachineFunction &MF, MachineMemOperand *MO);

  /// Clone another MachineInstr's memory reference descriptor list and replace
  /// ours with it.
  ///
  /// Note that `*this` may be the incoming MI!
  ///
  /// Prefer this API whenever possible as it can avoid allocations in common
  /// cases.
  void cloneMemRefs(MachineFunction &MF, const MachineInstr &MI);

  /// Clone the merge of multiple MachineInstrs' memory reference descriptors
  /// list and replace ours with it.
  ///
  /// Note that `*this` may be one of the incoming MIs!
  ///
  /// Prefer this API whenever possible as it can avoid allocations in common
  /// cases.
  void cloneMergedMemRefs(MachineFunction &MF,
                          ArrayRef<const MachineInstr *> MIs);

  /// Set a symbol that will be emitted just prior to the instruction itself.
  ///
  /// Setting this to a null pointer will remove any such symbol.
  ///
  /// FIXME: This is not fully implemented yet.
  void setPreInstrSymbol(MachineFunction &MF, MCSymbol *Symbol);

  /// Set a symbol that will be emitted just after the instruction itself.
  ///
  /// Setting this to a null pointer will remove any such symbol.
  ///
  /// FIXME: This is not fully implemented yet.
  void setPostInstrSymbol(MachineFunction &MF, MCSymbol *Symbol);

  /// Clone another MachineInstr's pre- and post- instruction symbols and
  /// replace ours with it.
  void cloneInstrSymbols(MachineFunction &MF, const MachineInstr &MI);

  /// Set a marker on instructions that denotes where we should create and emit
  /// heap alloc site labels. This waits until after instruction selection and
  /// optimizations to create the label, so it should still work if the
  /// instruction is removed or duplicated.
  void setHeapAllocMarker(MachineFunction &MF, MDNode *MD);

  // Set metadata on instructions that say which sections to emit instruction
  // addresses into.
  void setPCSections(MachineFunction &MF, MDNode *MD);

  /// Set the CFI type for the instruction.
  void setCFIType(MachineFunction &MF, uint32_t Type);

  /// Return the MIFlags which represent both MachineInstrs. This
  /// should be used when merging two MachineInstrs into one. This routine does
  /// not modify the MIFlags of this MachineInstr.
  uint32_t mergeFlagsWith(const MachineInstr& Other) const;

  static uint32_t copyFlagsFromInstruction(const Instruction &I);

  /// Copy all flags to MachineInst MIFlags
  void copyIRFlags(const Instruction &I);

  /// Break any tie involving OpIdx.
  void untieRegOperand(unsigned OpIdx) {
    MachineOperand &MO = getOperand(OpIdx);
    if (MO.isReg() && MO.isTied()) {
      getOperand(findTiedOperandIdx(OpIdx)).TiedTo = 0;
      MO.TiedTo = 0;
    }
  }

  /// Add all implicit def and use operands to this instruction.
  void addImplicitDefUseOperands(MachineFunction &MF);

  /// Scan instructions immediately following MI and collect any matching
  /// DBG_VALUEs.
  void collectDebugValues(SmallVectorImpl<MachineInstr *> &DbgValues);

  /// Find all DBG_VALUEs that point to the register def in this instruction
  /// and point them to \p Reg instead.
  void changeDebugValuesDefReg(Register Reg);

  /// Returns the Intrinsic::ID for this instruction.
  /// \pre Must have an intrinsic ID operand.
  unsigned getIntrinsicID() const {
    return getOperand(getNumExplicitDefs()).getIntrinsicID();
  }

  /// Sets all register debug operands in this debug value instruction to be
  /// undef.
  void setDebugValueUndef() {
    assert(isDebugValue() && "Must be a debug value instruction.");
    for (MachineOperand &MO : debug_operands()) {
      if (MO.isReg()) {
        MO.setReg(0);
        MO.setSubReg(0);
      }
    }
  }

  std::tuple<Register, Register> getFirst2Regs() const {
    return std::tuple(getOperand(0).getReg(), getOperand(1).getReg());
  }

  std::tuple<Register, Register, Register> getFirst3Regs() const {
    return std::tuple(getOperand(0).getReg(), getOperand(1).getReg(),
                      getOperand(2).getReg());
  }

  std::tuple<Register, Register, Register, Register> getFirst4Regs() const {
    return std::tuple(getOperand(0).getReg(), getOperand(1).getReg(),
                      getOperand(2).getReg(), getOperand(3).getReg());
  }

  std::tuple<Register, Register, Register, Register, Register>
  getFirst5Regs() const {
    return std::tuple(getOperand(0).getReg(), getOperand(1).getReg(),
                      getOperand(2).getReg(), getOperand(3).getReg(),
                      getOperand(4).getReg());
  }

  std::tuple<LLT, LLT> getFirst2LLTs() const;
  std::tuple<LLT, LLT, LLT> getFirst3LLTs() const;
  std::tuple<LLT, LLT, LLT, LLT> getFirst4LLTs() const;
  std::tuple<LLT, LLT, LLT, LLT, LLT> getFirst5LLTs() const;

  std::tuple<Register, LLT, Register, LLT> getFirst2RegLLTs() const;
  std::tuple<Register, LLT, Register, LLT, Register, LLT>
  getFirst3RegLLTs() const;
  std::tuple<Register, LLT, Register, LLT, Register, LLT, Register, LLT>
  getFirst4RegLLTs() const;
  std::tuple<Register, LLT, Register, LLT, Register, LLT, Register, LLT,
             Register, LLT>
  getFirst5RegLLTs() const;

private:
  /// If this instruction is embedded into a MachineFunction, return the
  /// MachineRegisterInfo object for the current function, otherwise
  /// return null.
  MachineRegisterInfo *getRegInfo();
  const MachineRegisterInfo *getRegInfo() const;

  /// Unlink all of the register operands in this instruction from their
  /// respective use lists.  This requires that the operands already be on their
  /// use lists.
  void removeRegOperandsFromUseLists(MachineRegisterInfo&);

  /// Add all of the register operands in this instruction from their
  /// respective use lists.  This requires that the operands not be on their
  /// use lists yet.
  void addRegOperandsToUseLists(MachineRegisterInfo&);

  /// Slow path for hasProperty when we're dealing with a bundle.
  bool hasPropertyInBundle(uint64_t Mask, QueryType Type) const;

  /// Implements the logic of getRegClassConstraintEffectForVReg for the
  /// this MI and the given operand index \p OpIdx.
  /// If the related operand does not constrained Reg, this returns CurRC.
  const TargetRegisterClass *getRegClassConstraintEffectForVRegImpl(
      unsigned OpIdx, Register Reg, const TargetRegisterClass *CurRC,
      const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const;

  /// Stores extra instruction information inline or allocates as ExtraInfo
  /// based on the number of pointers.
  void setExtraInfo(MachineFunction &MF, ArrayRef<MachineMemOperand *> MMOs,
                    MCSymbol *PreInstrSymbol, MCSymbol *PostInstrSymbol,
                    MDNode *HeapAllocMarker, MDNode *PCSections,
                    uint32_t CFIType);
};

/// Special DenseMapInfo traits to compare MachineInstr* by *value* of the
/// instruction rather than by pointer value.
/// The hashing and equality testing functions ignore definitions so this is
/// useful for CSE, etc.
struct MachineInstrExpressionTrait : DenseMapInfo<MachineInstr*> {
  static inline MachineInstr *getEmptyKey() {
    return nullptr;
  }

  static inline MachineInstr *getTombstoneKey() {
    return reinterpret_cast<MachineInstr*>(-1);
  }

  static unsigned getHashValue(const MachineInstr* const &MI);

  static bool isEqual(const MachineInstr* const &LHS,
                      const MachineInstr* const &RHS) {
    if (RHS == getEmptyKey() || RHS == getTombstoneKey() ||
        LHS == getEmptyKey() || LHS == getTombstoneKey())
      return LHS == RHS;
    return LHS->isIdenticalTo(*RHS, MachineInstr::IgnoreVRegDefs);
  }
};

//===----------------------------------------------------------------------===//
// Debugging Support

inline raw_ostream& operator<<(raw_ostream &OS, const MachineInstr &MI) {
  MI.print(OS);
  return OS;
}

} // end namespace llvm

#endif // LLVM_CODEGEN_MACHINEINSTR_H
PKiwFZ��h 
)
)CodeGen/LiveRangeEdit.hnu�[���//===- LiveRangeEdit.h - Basic tools for split and spill --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// The LiveRangeEdit class represents changes done to a virtual register when it
// is spilled or split.
//
// The parent register is never changed. Instead, a number of new virtual
// registers are created and added to the newRegs vector.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_LIVERANGEEDIT_H
#define LLVM_CODEGEN_LIVERANGEEDIT_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/LiveInterval.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/SlotIndexes.h"
#include "llvm/CodeGen/TargetSubtargetInfo.h"
#include <cassert>

namespace llvm {

class LiveIntervals;
class MachineInstr;
class MachineOperand;
class TargetInstrInfo;
class TargetRegisterInfo;
class VirtRegMap;
class VirtRegAuxInfo;

class LiveRangeEdit : private MachineRegisterInfo::Delegate {
public:
  /// Callback methods for LiveRangeEdit owners.
  class Delegate {
    virtual void anchor();

  public:
    virtual ~Delegate() = default;

    /// Called immediately before erasing a dead machine instruction.
    virtual void LRE_WillEraseInstruction(MachineInstr *MI) {}

    /// Called when a virtual register is no longer used. Return false to defer
    /// its deletion from LiveIntervals.
    virtual bool LRE_CanEraseVirtReg(Register) { return true; }

    /// Called before shrinking the live range of a virtual register.
    virtual void LRE_WillShrinkVirtReg(Register) {}

    /// Called after cloning a virtual register.
    /// This is used for new registers representing connected components of Old.
    virtual void LRE_DidCloneVirtReg(Register New, Register Old) {}
  };

private:
  const LiveInterval *const Parent;
  SmallVectorImpl<Register> &NewRegs;
  MachineRegisterInfo &MRI;
  LiveIntervals &LIS;
  VirtRegMap *VRM;
  const TargetInstrInfo &TII;
  Delegate *const TheDelegate;

  /// FirstNew - Index of the first register added to NewRegs.
  const unsigned FirstNew;

  /// ScannedRemattable - true when remattable values have been identified.
  bool ScannedRemattable = false;

  /// DeadRemats - The saved instructions which have already been dead after
  /// rematerialization but not deleted yet -- to be done in postOptimization.
  SmallPtrSet<MachineInstr *, 32> *DeadRemats;

  /// Remattable - Values defined by remattable instructions as identified by
  /// tii.isTriviallyReMaterializable().
  SmallPtrSet<const VNInfo *, 4> Remattable;

  /// Rematted - Values that were actually rematted, and so need to have their
  /// live range trimmed or entirely removed.
  SmallPtrSet<const VNInfo *, 4> Rematted;

  /// scanRemattable - Identify the Parent values that may rematerialize.
  void scanRemattable();

  /// foldAsLoad - If LI has a single use and a single def that can be folded as
  /// a load, eliminate the register by folding the def into the use.
  bool foldAsLoad(LiveInterval *LI, SmallVectorImpl<MachineInstr *> &Dead);

  using ToShrinkSet = SmallSetVector<LiveInterval *, 8>;

  /// Helper for eliminateDeadDefs.
  void eliminateDeadDef(MachineInstr *MI, ToShrinkSet &ToShrink);

  /// MachineRegisterInfo callback to notify when new virtual
  /// registers are created.
  void MRI_NoteNewVirtualRegister(Register VReg) override;

  /// Check if MachineOperand \p MO is a last use/kill either in the
  /// main live range of \p LI or in one of the matching subregister ranges.
  bool useIsKill(const LiveInterval &LI, const MachineOperand &MO) const;

  /// Create a new empty interval based on OldReg.
  LiveInterval &createEmptyIntervalFrom(Register OldReg, bool createSubRanges);

public:
  /// Create a LiveRangeEdit for breaking down parent into smaller pieces.
  /// @param parent The register being spilled or split.
  /// @param newRegs List to receive any new registers created. This needn't be
  ///                empty initially, any existing registers are ignored.
  /// @param MF The MachineFunction the live range edit is taking place in.
  /// @param lis The collection of all live intervals in this function.
  /// @param vrm Map of virtual registers to physical registers for this
  ///            function.  If NULL, no virtual register map updates will
  ///            be done.  This could be the case if called before Regalloc.
  /// @param deadRemats The collection of all the instructions defining an
  ///                   original reg and are dead after remat.
  LiveRangeEdit(const LiveInterval *parent, SmallVectorImpl<Register> &newRegs,
                MachineFunction &MF, LiveIntervals &lis, VirtRegMap *vrm,
                Delegate *delegate = nullptr,
                SmallPtrSet<MachineInstr *, 32> *deadRemats = nullptr)
      : Parent(parent), NewRegs(newRegs), MRI(MF.getRegInfo()), LIS(lis),
        VRM(vrm), TII(*MF.getSubtarget().getInstrInfo()), TheDelegate(delegate),
        FirstNew(newRegs.size()), DeadRemats(deadRemats) {
    MRI.addDelegate(this);
  }

  ~LiveRangeEdit() override { MRI.resetDelegate(this); }

  const LiveInterval &getParent() const {
    assert(Parent && "No parent LiveInterval");
    return *Parent;
  }

  Register getReg() const { return getParent().reg(); }

  /// Iterator for accessing the new registers added by this edit.
  using iterator = SmallVectorImpl<Register>::const_iterator;
  iterator begin() const { return NewRegs.begin() + FirstNew; }
  iterator end() const { return NewRegs.end(); }
  unsigned size() const { return NewRegs.size() - FirstNew; }
  bool empty() const { return size() == 0; }
  Register get(unsigned idx) const { return NewRegs[idx + FirstNew]; }

  /// pop_back - It allows LiveRangeEdit users to drop new registers.
  /// The context is when an original def instruction of a register is
  /// dead after rematerialization, we still want to keep it for following
  /// rematerializations. We save the def instruction in DeadRemats,
  /// and replace the original dst register with a new dummy register so
  /// the live range of original dst register can be shrinked normally.
  /// We don't want to allocate phys register for the dummy register, so
  /// we want to drop it from the NewRegs set.
  void pop_back() { NewRegs.pop_back(); }

  ArrayRef<Register> regs() const { return ArrayRef(NewRegs).slice(FirstNew); }

  /// createFrom - Create a new virtual register based on OldReg.
  Register createFrom(Register OldReg);

  /// create - Create a new register with the same class and original slot as
  /// parent.
  LiveInterval &createEmptyInterval() {
    return createEmptyIntervalFrom(getReg(), true);
  }

  Register create() { return createFrom(getReg()); }

  /// anyRematerializable - Return true if any parent values may be
  /// rematerializable.
  /// This function must be called before any rematerialization is attempted.
  bool anyRematerializable();

  /// checkRematerializable - Manually add VNI to the list of rematerializable
  /// values if DefMI may be rematerializable.
  bool checkRematerializable(VNInfo *VNI, const MachineInstr *DefMI);

  /// Remat - Information needed to rematerialize at a specific location.
  struct Remat {
    const VNInfo *const ParentVNI;  // parent_'s value at the remat location.
    MachineInstr *OrigMI = nullptr; // Instruction defining OrigVNI. It contains
                                    // the real expr for remat.

    explicit Remat(const VNInfo *ParentVNI) : ParentVNI(ParentVNI) {}
  };

  /// allUsesAvailableAt - Return true if all registers used by OrigMI at
  /// OrigIdx are also available with the same value at UseIdx.
  bool allUsesAvailableAt(const MachineInstr *OrigMI, SlotIndex OrigIdx,
                          SlotIndex UseIdx) const;

  /// canRematerializeAt - Determine if ParentVNI can be rematerialized at
  /// UseIdx. It is assumed that parent_.getVNINfoAt(UseIdx) == ParentVNI.
  /// When cheapAsAMove is set, only cheap remats are allowed.
  bool canRematerializeAt(Remat &RM, VNInfo *OrigVNI, SlotIndex UseIdx,
                          bool cheapAsAMove);

  /// rematerializeAt - Rematerialize RM.ParentVNI into DestReg by inserting an
  /// instruction into MBB before MI. The new instruction is mapped, but
  /// liveness is not updated. If ReplaceIndexMI is not null it will be replaced
  /// by new MI in the index map.
  /// Return the SlotIndex of the new instruction.
  SlotIndex rematerializeAt(MachineBasicBlock &MBB,
                            MachineBasicBlock::iterator MI, Register DestReg,
                            const Remat &RM, const TargetRegisterInfo &,
                            bool Late = false, unsigned SubIdx = 0,
                            MachineInstr *ReplaceIndexMI = nullptr);

  /// markRematerialized - explicitly mark a value as rematerialized after doing
  /// it manually.
  void markRematerialized(const VNInfo *ParentVNI) {
    Rematted.insert(ParentVNI);
  }

  /// didRematerialize - Return true if ParentVNI was rematerialized anywhere.
  bool didRematerialize(const VNInfo *ParentVNI) const {
    return Rematted.count(ParentVNI);
  }

  /// eraseVirtReg - Notify the delegate that Reg is no longer in use, and try
  /// to erase it from LIS.
  void eraseVirtReg(Register Reg);

  /// eliminateDeadDefs - Try to delete machine instructions that are now dead
  /// (allDefsAreDead returns true). This may cause live intervals to be trimmed
  /// and further dead efs to be eliminated.
  /// RegsBeingSpilled lists registers currently being spilled by the register
  /// allocator.  These registers should not be split into new intervals
  /// as currently those new intervals are not guaranteed to spill.
  void eliminateDeadDefs(SmallVectorImpl<MachineInstr *> &Dead,
                         ArrayRef<Register> RegsBeingSpilled = std::nullopt);

  /// calculateRegClassAndHint - Recompute register class and hint for each new
  /// register.
  void calculateRegClassAndHint(MachineFunction &, VirtRegAuxInfo &);
};

} // end namespace llvm

#endif // LLVM_CODEGEN_LIVERANGEEDIT_H
PKiwFZ7pE�::CodeGen/RegisterBank.hnu�[���//==-- llvm/CodeGen/RegisterBank.h - Register Bank ---------------*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file This file declares the API of register banks.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_REGISTERBANK_H
#define LLVM_CODEGEN_REGISTERBANK_H

#include "llvm/ADT/BitVector.h"

namespace llvm {
// Forward declarations.
class RegisterBankInfo;
class raw_ostream;
class TargetRegisterClass;
class TargetRegisterInfo;

/// This class implements the register bank concept.
/// Two instances of RegisterBank must have different ID.
/// This property is enforced by the RegisterBankInfo class.
class RegisterBank {
private:
  unsigned ID;
  const char *Name;
  BitVector ContainedRegClasses;

  /// Sentinel value used to recognize register bank not properly
  /// initialized yet.
  static const unsigned InvalidID;

  /// Only the RegisterBankInfo can initialize RegisterBank properly.
  friend RegisterBankInfo;

public:
  RegisterBank(unsigned ID, const char *Name, const uint32_t *CoveredClasses,
               unsigned NumRegClasses);

  /// Get the identifier of this register bank.
  unsigned getID() const { return ID; }

  /// Get a user friendly name of this register bank.
  /// Should be used only for debugging purposes.
  const char *getName() const { return Name; }

  /// Check whether this instance is ready to be used.
  bool isValid() const;

  /// Check if this register bank is valid. In other words,
  /// if it has been properly constructed.
  ///
  /// \note This method does not check anything when assertions are disabled.
  ///
  /// \return True is the check was successful.
  bool verify(const RegisterBankInfo &RBI, const TargetRegisterInfo &TRI) const;

  /// Check whether this register bank covers \p RC.
  /// In other words, check if this register bank fully covers
  /// the registers that \p RC contains.
  /// \pre isValid()
  bool covers(const TargetRegisterClass &RC) const;

  /// Check whether \p OtherRB is the same as this.
  bool operator==(const RegisterBank &OtherRB) const;
  bool operator!=(const RegisterBank &OtherRB) const {
    return !this->operator==(OtherRB);
  }

  /// Dump the register mask on dbgs() stream.
  /// The dump is verbose.
  void dump(const TargetRegisterInfo *TRI = nullptr) const;

  /// Print the register mask on OS.
  /// If IsForDebug is false, then only the name of the register bank
  /// is printed. Otherwise, all the fields are printing.
  /// TRI is then used to print the name of the register classes that
  /// this register bank covers.
  void print(raw_ostream &OS, bool IsForDebug = false,
             const TargetRegisterInfo *TRI = nullptr) const;
};

inline raw_ostream &operator<<(raw_ostream &OS, const RegisterBank &RegBank) {
  RegBank.print(OS);
  return OS;
}
} // End namespace llvm.

#endif
PKiwFZ���CodeGen/SchedulerRegistry.hnu�[���//===- llvm/CodeGen/SchedulerRegistry.h -------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the implementation for instruction scheduler function
// pass registry (RegisterScheduler).
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_SCHEDULERREGISTRY_H
#define LLVM_CODEGEN_SCHEDULERREGISTRY_H

#include "llvm/CodeGen/MachinePassRegistry.h"
#include "llvm/Support/CodeGen.h"

namespace llvm {

//===----------------------------------------------------------------------===//
///
/// RegisterScheduler class - Track the registration of instruction schedulers.
///
//===----------------------------------------------------------------------===//

class ScheduleDAGSDNodes;
class SelectionDAGISel;

class RegisterScheduler
    : public MachinePassRegistryNode<
          ScheduleDAGSDNodes *(*)(SelectionDAGISel *, CodeGenOpt::Level)> {
public:
  using FunctionPassCtor = ScheduleDAGSDNodes *(*)(SelectionDAGISel*,
                                                   CodeGenOpt::Level);

  static MachinePassRegistry<FunctionPassCtor> Registry;

  RegisterScheduler(const char *N, const char *D, FunctionPassCtor C)
      : MachinePassRegistryNode(N, D, C) {
    Registry.Add(this);
  }
  ~RegisterScheduler() { Registry.Remove(this); }


  // Accessors.
  RegisterScheduler *getNext() const {
    return (RegisterScheduler *)MachinePassRegistryNode::getNext();
  }

  static RegisterScheduler *getList() {
    return (RegisterScheduler *)Registry.getList();
  }

  static void setListener(MachinePassRegistryListener<FunctionPassCtor> *L) {
    Registry.setListener(L);
  }
};

/// createBURRListDAGScheduler - This creates a bottom up register usage
/// reduction list scheduler.
ScheduleDAGSDNodes *createBURRListDAGScheduler(SelectionDAGISel *IS,
                                               CodeGenOpt::Level OptLevel);

/// createBURRListDAGScheduler - This creates a bottom up list scheduler that
/// schedules nodes in source code order when possible.
ScheduleDAGSDNodes *createSourceListDAGScheduler(SelectionDAGISel *IS,
                                                 CodeGenOpt::Level OptLevel);

/// createHybridListDAGScheduler - This creates a bottom up register pressure
/// aware list scheduler that make use of latency information to avoid stalls
/// for long latency instructions in low register pressure mode. In high
/// register pressure mode it schedules to reduce register pressure.
ScheduleDAGSDNodes *createHybridListDAGScheduler(SelectionDAGISel *IS,
                                                 CodeGenOpt::Level);

/// createILPListDAGScheduler - This creates a bottom up register pressure
/// aware list scheduler that tries to increase instruction level parallelism
/// in low register pressure mode. In high register pressure mode it schedules
/// to reduce register pressure.
ScheduleDAGSDNodes *createILPListDAGScheduler(SelectionDAGISel *IS,
                                              CodeGenOpt::Level);

/// createFastDAGScheduler - This creates a "fast" scheduler.
///
ScheduleDAGSDNodes *createFastDAGScheduler(SelectionDAGISel *IS,
                                           CodeGenOpt::Level OptLevel);

/// createVLIWDAGScheduler - Scheduler for VLIW targets. This creates top down
/// DFA driven list scheduler with clustering heuristic to control
/// register pressure.
ScheduleDAGSDNodes *createVLIWDAGScheduler(SelectionDAGISel *IS,
                                           CodeGenOpt::Level OptLevel);
/// createDefaultScheduler - This creates an instruction scheduler appropriate
/// for the target.
ScheduleDAGSDNodes *createDefaultScheduler(SelectionDAGISel *IS,
                                           CodeGenOpt::Level OptLevel);

/// createDAGLinearizer - This creates a "no-scheduling" scheduler which
/// linearize the DAG using topological order.
ScheduleDAGSDNodes *createDAGLinearizer(SelectionDAGISel *IS,
                                        CodeGenOpt::Level OptLevel);

} // end namespace llvm

#endif // LLVM_CODEGEN_SCHEDULERREGISTRY_H
PKiwFZ1=3&)CodeGen/BasicBlockSectionsProfileReader.hnu�[���//===-- BasicBlockSectionsProfileReader.h - BB sections profile reader pass ==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This pass creates the basic block cluster info by reading the basic block
// sections profile. The cluster info will be used by the basic-block-sections
// pass to arrange basic blocks in their sections.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_BASICBLOCKSECTIONSPROFILEREADER_H
#define LLVM_CODEGEN_BASICBLOCKSECTIONSPROFILEREADER_H

#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/IR/Module.h"
#include "llvm/InitializePasses.h"
#include "llvm/Pass.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/LineIterator.h"
#include "llvm/Support/MemoryBuffer.h"
using namespace llvm;

namespace llvm {

// The cluster information for a machine basic block.
struct BBClusterInfo {
  // Unique ID for this basic block.
  unsigned BBID;
  // Cluster ID this basic block belongs to.
  unsigned ClusterID;
  // Position of basic block within the cluster.
  unsigned PositionInCluster;
};

using ProgramBBClusterInfoMapTy = StringMap<SmallVector<BBClusterInfo>>;

class BasicBlockSectionsProfileReader : public ImmutablePass {
public:
  static char ID;

  BasicBlockSectionsProfileReader(const MemoryBuffer *Buf)
      : ImmutablePass(ID), MBuf(Buf) {
    initializeBasicBlockSectionsProfileReaderPass(
        *PassRegistry::getPassRegistry());
  };

  BasicBlockSectionsProfileReader() : ImmutablePass(ID) {
    initializeBasicBlockSectionsProfileReaderPass(
        *PassRegistry::getPassRegistry());
  }

  StringRef getPassName() const override {
    return "Basic Block Sections Profile Reader";
  }

  // Returns true if basic block sections profile exist for function \p
  // FuncName.
  bool isFunctionHot(StringRef FuncName) const;

  // Returns a pair with first element representing whether basic block sections
  // profile exist for the function \p FuncName, and the second element
  // representing the basic block sections profile (cluster info) for this
  // function. If the first element is true and the second element is empty, it
  // means unique basic block sections are desired for all basic blocks of the
  // function.
  std::pair<bool, SmallVector<BBClusterInfo>>
  getBBClusterInfoForFunction(StringRef FuncName) const;

  // Initializes the FunctionNameToDIFilename map for the current module and
  // then reads the profile for matching functions.
  bool doInitialization(Module &M) override;

private:
  StringRef getAliasName(StringRef FuncName) const {
    auto R = FuncAliasMap.find(FuncName);
    return R == FuncAliasMap.end() ? FuncName : R->second;
  }

  // Reads the basic block sections profile for functions in this module.
  Error ReadProfile();

  // This contains the basic-block-sections profile.
  const MemoryBuffer *MBuf = nullptr;

  // Map from every function name in the module to its debug info filename or
  // empty string if no debug info is available.
  StringMap<SmallString<128>> FunctionNameToDIFilename;

  // This encapsulates the BB cluster information for the whole program.
  //
  // For every function name, it contains the cluster information for (all or
  // some of) its basic blocks. The cluster information for every basic block
  // includes its cluster ID along with the position of the basic block in that
  // cluster.
  ProgramBBClusterInfoMapTy ProgramBBClusterInfo;

  // Some functions have alias names. We use this map to find the main alias
  // name for which we have mapping in ProgramBBClusterInfo.
  StringMap<StringRef> FuncAliasMap;
};

// Creates a BasicBlockSectionsProfileReader pass to parse the basic block
// sections profile. \p Buf is a memory buffer that contains the list of
// functions and basic block ids to selectively enable basic block sections.
ImmutablePass *
createBasicBlockSectionsProfileReaderPass(const MemoryBuffer *Buf);

} // namespace llvm
#endif // LLVM_CODEGEN_BASICBLOCKSECTIONSPROFILEREADER_H
PKiwFZ�
d	66$CodeGen/ScoreboardHazardRecognizer.hnu�[���//=- llvm/CodeGen/ScoreboardHazardRecognizer.h - Schedule Support -*- C++ -*-=//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the ScoreboardHazardRecognizer class, which
// encapsulates hazard-avoidance heuristics for scheduling, based on the
// scheduling itineraries specified for the target.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_SCOREBOARDHAZARDRECOGNIZER_H
#define LLVM_CODEGEN_SCOREBOARDHAZARDRECOGNIZER_H

#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
#include "llvm/MC/MCInstrItineraries.h"
#include <cassert>
#include <cstddef>
#include <cstring>

namespace llvm {

class ScheduleDAG;
class SUnit;

class ScoreboardHazardRecognizer : public ScheduleHazardRecognizer {
  // Scoreboard to track function unit usage. Scoreboard[0] is a
  // mask of the FUs in use in the cycle currently being
  // schedule. Scoreboard[1] is a mask for the next cycle. The
  // Scoreboard is used as a circular buffer with the current cycle
  // indicated by Head.
  //
  // Scoreboard always counts cycles in forward execution order. If used by a
  // bottom-up scheduler, then the scoreboard cycles are the inverse of the
  // scheduler's cycles.
  class Scoreboard {
    InstrStage::FuncUnits *Data = nullptr;

    // The maximum number of cycles monitored by the Scoreboard. This
    // value is determined based on the target itineraries to ensure
    // that all hazards can be tracked.
    size_t Depth = 0;

    // Indices into the Scoreboard that represent the current cycle.
    size_t Head = 0;

  public:
    Scoreboard() = default;
    Scoreboard &operator=(const Scoreboard &other) = delete;
    Scoreboard(const Scoreboard &other) = delete;
    ~Scoreboard() {
      delete[] Data;
    }

    size_t getDepth() const { return Depth; }

    InstrStage::FuncUnits& operator[](size_t idx) const {
      // Depth is expected to be a power-of-2.
      assert(Depth && !(Depth & (Depth - 1)) &&
             "Scoreboard was not initialized properly!");

      return Data[(Head + idx) & (Depth-1)];
    }

    void reset(size_t d = 1) {
      if (!Data) {
        Depth = d;
        Data = new InstrStage::FuncUnits[Depth];
      }

      memset(Data, 0, Depth * sizeof(Data[0]));
      Head = 0;
    }

    void advance() {
      Head = (Head + 1) & (Depth-1);
    }

    void recede() {
      Head = (Head - 1) & (Depth-1);
    }

    // Print the scoreboard.
    void dump() const;
  };

  // Support for tracing ScoreboardHazardRecognizer as a component within
  // another module.
  const char *DebugType;

  // Itinerary data for the target.
  const InstrItineraryData *ItinData;

  const ScheduleDAG *DAG;

  /// IssueWidth - Max issue per cycle. 0=Unknown.
  unsigned IssueWidth = 0;

  /// IssueCount - Count instructions issued in this cycle.
  unsigned IssueCount = 0;

  Scoreboard ReservedScoreboard;
  Scoreboard RequiredScoreboard;

public:
  ScoreboardHazardRecognizer(const InstrItineraryData *II,
                             const ScheduleDAG *DAG,
                             const char *ParentDebugType = "");

  /// atIssueLimit - Return true if no more instructions may be issued in this
  /// cycle.
  bool atIssueLimit() const override;

  // Stalls provides an cycle offset at which SU will be scheduled. It will be
  // negative for bottom-up scheduling.
  HazardType getHazardType(SUnit *SU, int Stalls) override;
  void Reset() override;
  void EmitInstruction(SUnit *SU) override;
  void AdvanceCycle() override;
  void RecedeCycle() override;
};

} // end namespace llvm

#endif // LLVM_CODEGEN_SCOREBOARDHAZARDRECOGNIZER_H
PKiwFZ#���?G?GCodeGen/TargetPassConfig.hnu�[���//===- TargetPassConfig.h - Code Generation pass options --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// Target-Independent Code Generator Pass Configuration Options pass.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_TARGETPASSCONFIG_H
#define LLVM_CODEGEN_TARGETPASSCONFIG_H

#include "llvm/Pass.h"
#include "llvm/Support/CodeGen.h"
#include <cassert>
#include <string>

namespace llvm {

class LLVMTargetMachine;
struct MachineSchedContext;
class PassConfigImpl;
class ScheduleDAGInstrs;
class CSEConfigBase;
class PassInstrumentationCallbacks;

// The old pass manager infrastructure is hidden in a legacy namespace now.
namespace legacy {

class PassManagerBase;

} // end namespace legacy

using legacy::PassManagerBase;

/// Discriminated union of Pass ID types.
///
/// The PassConfig API prefers dealing with IDs because they are safer and more
/// efficient. IDs decouple configuration from instantiation. This way, when a
/// pass is overriden, it isn't unnecessarily instantiated. It is also unsafe to
/// refer to a Pass pointer after adding it to a pass manager, which deletes
/// redundant pass instances.
///
/// However, it is convient to directly instantiate target passes with
/// non-default ctors. These often don't have a registered PassInfo. Rather than
/// force all target passes to implement the pass registry boilerplate, allow
/// the PassConfig API to handle either type.
///
/// AnalysisID is sadly char*, so PointerIntPair won't work.
class IdentifyingPassPtr {
  union {
    AnalysisID ID;
    Pass *P;
  };
  bool IsInstance = false;

public:
  IdentifyingPassPtr() : P(nullptr) {}
  IdentifyingPassPtr(AnalysisID IDPtr) : ID(IDPtr) {}
  IdentifyingPassPtr(Pass *InstancePtr) : P(InstancePtr), IsInstance(true) {}

  bool isValid() const { return P; }
  bool isInstance() const { return IsInstance; }

  AnalysisID getID() const {
    assert(!IsInstance && "Not a Pass ID");
    return ID;
  }

  Pass *getInstance() const {
    assert(IsInstance && "Not a Pass Instance");
    return P;
  }
};


/// Target-Independent Code Generator Pass Configuration Options.
///
/// This is an ImmutablePass solely for the purpose of exposing CodeGen options
/// to the internals of other CodeGen passes.
class TargetPassConfig : public ImmutablePass {
private:
  PassManagerBase *PM = nullptr;
  AnalysisID StartBefore = nullptr;
  AnalysisID StartAfter = nullptr;
  AnalysisID StopBefore = nullptr;
  AnalysisID StopAfter = nullptr;

  unsigned StartBeforeInstanceNum = 0;
  unsigned StartBeforeCount = 0;

  unsigned StartAfterInstanceNum = 0;
  unsigned StartAfterCount = 0;

  unsigned StopBeforeInstanceNum = 0;
  unsigned StopBeforeCount = 0;

  unsigned StopAfterInstanceNum = 0;
  unsigned StopAfterCount = 0;

  bool Started = true;
  bool Stopped = false;
  bool AddingMachinePasses = false;
  bool DebugifyIsSafe = true;

  /// Set the StartAfter, StartBefore and StopAfter passes to allow running only
  /// a portion of the normal code-gen pass sequence.
  ///
  /// If the StartAfter and StartBefore pass ID is zero, then compilation will
  /// begin at the normal point; otherwise, clear the Started flag to indicate
  /// that passes should not be added until the starting pass is seen.  If the
  /// Stop pass ID is zero, then compilation will continue to the end.
  ///
  /// This function expects that at least one of the StartAfter or the
  /// StartBefore pass IDs is null.
  void setStartStopPasses();

protected:
  LLVMTargetMachine *TM;
  PassConfigImpl *Impl = nullptr; // Internal data structures
  bool Initialized = false; // Flagged after all passes are configured.

  // Target Pass Options
  // Targets provide a default setting, user flags override.
  bool DisableVerify = false;

  /// Default setting for -enable-tail-merge on this target.
  bool EnableTailMerge = true;

  /// Require processing of functions such that callees are generated before
  /// callers.
  bool RequireCodeGenSCCOrder = false;

  /// Add the actual instruction selection passes. This does not include
  /// preparation passes on IR.
  bool addCoreISelPasses();

public:
  TargetPassConfig(LLVMTargetMachine &TM, PassManagerBase &pm);
  // Dummy constructor.
  TargetPassConfig();

  ~TargetPassConfig() override;

  static char ID;

  /// Get the right type of TargetMachine for this target.
  template<typename TMC> TMC &getTM() const {
    return *static_cast<TMC*>(TM);
  }

  //
  void setInitialized() { Initialized = true; }

  CodeGenOpt::Level getOptLevel() const;

  /// Returns true if one of the `-start-after`, `-start-before`, `-stop-after`
  /// or `-stop-before` options is set.
  static bool hasLimitedCodeGenPipeline();

  /// Returns true if none of the `-stop-before` and `-stop-after` options is
  /// set.
  static bool willCompleteCodeGenPipeline();

  /// If hasLimitedCodeGenPipeline is true, this method
  /// returns a string with the name of the options, separated
  /// by \p Separator that caused this pipeline to be limited.
  static std::string
  getLimitedCodeGenPipelineReason(const char *Separator = "/");

  void setDisableVerify(bool Disable) { setOpt(DisableVerify, Disable); }

  bool getEnableTailMerge() const { return EnableTailMerge; }
  void setEnableTailMerge(bool Enable) { setOpt(EnableTailMerge, Enable); }

  bool requiresCodeGenSCCOrder() const { return RequireCodeGenSCCOrder; }
  void setRequiresCodeGenSCCOrder(bool Enable = true) {
    setOpt(RequireCodeGenSCCOrder, Enable);
  }

  /// Allow the target to override a specific pass without overriding the pass
  /// pipeline. When passes are added to the standard pipeline at the
  /// point where StandardID is expected, add TargetID in its place.
  void substitutePass(AnalysisID StandardID, IdentifyingPassPtr TargetID);

  /// Insert InsertedPassID pass after TargetPassID pass.
  void insertPass(AnalysisID TargetPassID, IdentifyingPassPtr InsertedPassID);

  /// Allow the target to enable a specific standard pass by default.
  void enablePass(AnalysisID PassID) { substitutePass(PassID, PassID); }

  /// Allow the target to disable a specific standard pass by default.
  void disablePass(AnalysisID PassID) {
    substitutePass(PassID, IdentifyingPassPtr());
  }

  /// Return the pass substituted for StandardID by the target.
  /// If no substitution exists, return StandardID.
  IdentifyingPassPtr getPassSubstitution(AnalysisID StandardID) const;

  /// Return true if the pass has been substituted by the target or
  /// overridden on the command line.
  bool isPassSubstitutedOrOverridden(AnalysisID ID) const;

  /// Return true if the optimized regalloc pipeline is enabled.
  bool getOptimizeRegAlloc() const;

  /// Return true if the default global register allocator is in use and
  /// has not be overriden on the command line with '-regalloc=...'
  bool usingDefaultRegAlloc() const;

  /// High level function that adds all passes necessary to go from llvm IR
  /// representation to the MI representation.
  /// Adds IR based lowering and target specific optimization passes and finally
  /// the core instruction selection passes.
  /// \returns true if an error occurred, false otherwise.
  bool addISelPasses();

  /// Add common target configurable passes that perform LLVM IR to IR
  /// transforms following machine independent optimization.
  virtual void addIRPasses();

  /// Add passes to lower exception handling for the code generator.
  void addPassesToHandleExceptions();

  /// Add pass to prepare the LLVM IR for code generation. This should be done
  /// before exception handling preparation passes.
  virtual void addCodeGenPrepare();

  /// Add common passes that perform LLVM IR to IR transforms in preparation for
  /// instruction selection.
  virtual void addISelPrepare();

  /// addInstSelector - This method should install an instruction selector pass,
  /// which converts from LLVM code to machine instructions.
  virtual bool addInstSelector() {
    return true;
  }

  /// This method should install an IR translator pass, which converts from
  /// LLVM code to machine instructions with possibly generic opcodes.
  virtual bool addIRTranslator() { return true; }

  /// This method may be implemented by targets that want to run passes
  /// immediately before legalization.
  virtual void addPreLegalizeMachineIR() {}

  /// This method should install a legalize pass, which converts the instruction
  /// sequence into one that can be selected by the target.
  virtual bool addLegalizeMachineIR() { return true; }

  /// This method may be implemented by targets that want to run passes
  /// immediately before the register bank selection.
  virtual void addPreRegBankSelect() {}

  /// This method should install a register bank selector pass, which
  /// assigns register banks to virtual registers without a register
  /// class or register banks.
  virtual bool addRegBankSelect() { return true; }

  /// This method may be implemented by targets that want to run passes
  /// immediately before the (global) instruction selection.
  virtual void addPreGlobalInstructionSelect() {}

  /// This method should install a (global) instruction selector pass, which
  /// converts possibly generic instructions to fully target-specific
  /// instructions, thereby constraining all generic virtual registers to
  /// register classes.
  virtual bool addGlobalInstructionSelect() { return true; }

  /// Add the complete, standard set of LLVM CodeGen passes.
  /// Fully developed targets will not generally override this.
  virtual void addMachinePasses();

  /// Create an instance of ScheduleDAGInstrs to be run within the standard
  /// MachineScheduler pass for this function and target at the current
  /// optimization level.
  ///
  /// This can also be used to plug a new MachineSchedStrategy into an instance
  /// of the standard ScheduleDAGMI:
  ///   return new ScheduleDAGMI(C, std::make_unique<MyStrategy>(C), /*RemoveKillFlags=*/false)
  ///
  /// Return NULL to select the default (generic) machine scheduler.
  virtual ScheduleDAGInstrs *
  createMachineScheduler(MachineSchedContext *C) const {
    return nullptr;
  }

  /// Similar to createMachineScheduler but used when postRA machine scheduling
  /// is enabled.
  virtual ScheduleDAGInstrs *
  createPostMachineScheduler(MachineSchedContext *C) const {
    return nullptr;
  }

  /// printAndVerify - Add a pass to dump then verify the machine function, if
  /// those steps are enabled.
  void printAndVerify(const std::string &Banner);

  /// Add a pass to print the machine function if printing is enabled.
  void addPrintPass(const std::string &Banner);

  /// Add a pass to perform basic verification of the machine function if
  /// verification is enabled.
  void addVerifyPass(const std::string &Banner);

  /// Add a pass to add synthesized debug info to the MIR.
  void addDebugifyPass();

  /// Add a pass to remove debug info from the MIR.
  void addStripDebugPass();

  /// Add a pass to check synthesized debug info for MIR.
  void addCheckDebugPass();

  /// Add standard passes before a pass that's about to be added. For example,
  /// the DebugifyMachineModulePass if it is enabled.
  void addMachinePrePasses(bool AllowDebugify = true);

  /// Add standard passes after a pass that has just been added. For example,
  /// the MachineVerifier if it is enabled.
  void addMachinePostPasses(const std::string &Banner);

  /// Check whether or not GlobalISel should abort on error.
  /// When this is disabled, GlobalISel will fall back on SDISel instead of
  /// erroring out.
  bool isGlobalISelAbortEnabled() const;

  /// Check whether or not a diagnostic should be emitted when GlobalISel
  /// uses the fallback path. In other words, it will emit a diagnostic
  /// when GlobalISel failed and isGlobalISelAbortEnabled is false.
  virtual bool reportDiagnosticWhenGlobalISelFallback() const;

  /// Check whether continuous CSE should be enabled in GISel passes.
  /// By default, it's enabled for non O0 levels.
  virtual bool isGISelCSEEnabled() const;

  /// Returns the CSEConfig object to use for the current optimization level.
  virtual std::unique_ptr<CSEConfigBase> getCSEConfig() const;

protected:
  // Helper to verify the analysis is really immutable.
  void setOpt(bool &Opt, bool Val);

  /// Return true if register allocator is specified by -regalloc=override.
  bool isCustomizedRegAlloc();

  /// Methods with trivial inline returns are convenient points in the common
  /// codegen pass pipeline where targets may insert passes. Methods with
  /// out-of-line standard implementations are major CodeGen stages called by
  /// addMachinePasses. Some targets may override major stages when inserting
  /// passes is insufficient, but maintaining overriden stages is more work.
  ///

  /// addPreISelPasses - This method should add any "last minute" LLVM->LLVM
  /// passes (which are run just before instruction selector).
  virtual bool addPreISel() {
    return true;
  }

  /// addMachineSSAOptimization - Add standard passes that optimize machine
  /// instructions in SSA form.
  virtual void addMachineSSAOptimization();

  /// Add passes that optimize instruction level parallelism for out-of-order
  /// targets. These passes are run while the machine code is still in SSA
  /// form, so they can use MachineTraceMetrics to control their heuristics.
  ///
  /// All passes added here should preserve the MachineDominatorTree,
  /// MachineLoopInfo, and MachineTraceMetrics analyses.
  virtual bool addILPOpts() {
    return false;
  }

  /// This method may be implemented by targets that want to run passes
  /// immediately before register allocation.
  virtual void addPreRegAlloc() { }

  /// createTargetRegisterAllocator - Create the register allocator pass for
  /// this target at the current optimization level.
  virtual FunctionPass *createTargetRegisterAllocator(bool Optimized);

  /// addFastRegAlloc - Add the minimum set of target-independent passes that
  /// are required for fast register allocation.
  virtual void addFastRegAlloc();

  /// addOptimizedRegAlloc - Add passes related to register allocation.
  /// LLVMTargetMachine provides standard regalloc passes for most targets.
  virtual void addOptimizedRegAlloc();

  /// addPreRewrite - Add passes to the optimized register allocation pipeline
  /// after register allocation is complete, but before virtual registers are
  /// rewritten to physical registers.
  ///
  /// These passes must preserve VirtRegMap and LiveIntervals, and when running
  /// after RABasic or RAGreedy, they should take advantage of LiveRegMatrix.
  /// When these passes run, VirtRegMap contains legal physreg assignments for
  /// all virtual registers.
  ///
  /// Note if the target overloads addRegAssignAndRewriteOptimized, this may not
  /// be honored. This is also not generally used for the the fast variant,
  /// where the allocation and rewriting are done in one pass.
  virtual bool addPreRewrite() {
    return false;
  }

  /// addPostFastRegAllocRewrite - Add passes to the optimized register
  /// allocation pipeline after fast register allocation is complete.
  virtual bool addPostFastRegAllocRewrite() { return false; }

  /// Add passes to be run immediately after virtual registers are rewritten
  /// to physical registers.
  virtual void addPostRewrite() { }

  /// This method may be implemented by targets that want to run passes after
  /// register allocation pass pipeline but before prolog-epilog insertion.
  virtual void addPostRegAlloc() { }

  /// Add passes that optimize machine instructions after register allocation.
  virtual void addMachineLateOptimization();

  /// This method may be implemented by targets that want to run passes after
  /// prolog-epilog insertion and before the second instruction scheduling pass.
  virtual void addPreSched2() { }

  /// addGCPasses - Add late codegen passes that analyze code for garbage
  /// collection. This should return true if GC info should be printed after
  /// these passes.
  virtual bool addGCPasses();

  /// Add standard basic block placement passes.
  virtual void addBlockPlacement();

  /// This pass may be implemented by targets that want to run passes
  /// immediately before machine code is emitted.
  virtual void addPreEmitPass() { }

  /// This pass may be implemented by targets that want to run passes
  /// immediately after basic block sections are assigned.
  virtual void addPostBBSections() {}

  /// Targets may add passes immediately before machine code is emitted in this
  /// callback. This is called even later than `addPreEmitPass`.
  // FIXME: Rename `addPreEmitPass` to something more sensible given its actual
  // position and remove the `2` suffix here as this callback is what
  // `addPreEmitPass` *should* be but in reality isn't.
  virtual void addPreEmitPass2() {}

  /// Utilities for targets to add passes to the pass manager.
  ///

  /// Add a CodeGen pass at this point in the pipeline after checking overrides.
  /// Return the pass that was added, or zero if no pass was added.
  AnalysisID addPass(AnalysisID PassID);

  /// Add a pass to the PassManager if that pass is supposed to be run, as
  /// determined by the StartAfter and StopAfter options. Takes ownership of the
  /// pass.
  void addPass(Pass *P);

  /// addMachinePasses helper to create the target-selected or overriden
  /// regalloc pass.
  virtual FunctionPass *createRegAllocPass(bool Optimized);

  /// Add core register allocator passes which do the actual register assignment
  /// and rewriting. \returns true if any passes were added.
  virtual bool addRegAssignAndRewriteFast();
  virtual bool addRegAssignAndRewriteOptimized();
};

void registerCodeGenCallback(PassInstrumentationCallbacks &PIC,
                             LLVMTargetMachine &);

} // end namespace llvm

#endif // LLVM_CODEGEN_TARGETPASSCONFIG_H
PKiwFZ.��V  CodeGen/MachineConstantPool.hnu�[���//===- CodeGen/MachineConstantPool.h - Abstract Constant Pool ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// @file
/// This file declares the MachineConstantPool class which is an abstract
/// constant pool to keep track of constants referenced by a function.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_MACHINECONSTANTPOOL_H
#define LLVM_CODEGEN_MACHINECONSTANTPOOL_H

#include "llvm/ADT/DenseSet.h"
#include "llvm/MC/SectionKind.h"
#include "llvm/Support/Alignment.h"
#include <climits>
#include <vector>

namespace llvm {

class Constant;
class DataLayout;
class FoldingSetNodeID;
class MachineConstantPool;
class raw_ostream;
class Type;

/// Abstract base class for all machine specific constantpool value subclasses.
///
class MachineConstantPoolValue {
  virtual void anchor();

  Type *Ty;

public:
  explicit MachineConstantPoolValue(Type *ty) : Ty(ty) {}
  virtual ~MachineConstantPoolValue() = default;

  Type *getType() const { return Ty; }

  virtual unsigned getSizeInBytes(const DataLayout &DL) const;

  virtual int getExistingMachineCPValue(MachineConstantPool *CP,
                                        Align Alignment) = 0;

  virtual void addSelectionDAGCSEId(FoldingSetNodeID &ID) = 0;

  /// print - Implement operator<<
  virtual void print(raw_ostream &O) const = 0;
};

inline raw_ostream &operator<<(raw_ostream &OS,
                               const MachineConstantPoolValue &V) {
  V.print(OS);
  return OS;
}

/// This class is a data container for one entry in a MachineConstantPool.
/// It contains a pointer to the value and an offset from the start of
/// the constant pool.
/// An entry in a MachineConstantPool
class MachineConstantPoolEntry {
public:
  /// The constant itself.
  union {
    const Constant *ConstVal;
    MachineConstantPoolValue *MachineCPVal;
  } Val;

  /// The required alignment for this entry.
  Align Alignment;

  bool IsMachineConstantPoolEntry;

  MachineConstantPoolEntry(const Constant *V, Align A)
      : Alignment(A), IsMachineConstantPoolEntry(false) {
    Val.ConstVal = V;
  }

  MachineConstantPoolEntry(MachineConstantPoolValue *V, Align A)
      : Alignment(A), IsMachineConstantPoolEntry(true) {
    Val.MachineCPVal = V;
  }

  /// isMachineConstantPoolEntry - Return true if the MachineConstantPoolEntry
  /// is indeed a target specific constantpool entry, not a wrapper over a
  /// Constant.
  bool isMachineConstantPoolEntry() const { return IsMachineConstantPoolEntry; }

  Align getAlign() const { return Alignment; }

  unsigned getSizeInBytes(const DataLayout &DL) const;

  /// This method classifies the entry according to whether or not it may
  /// generate a relocation entry.  This must be conservative, so if it might
  /// codegen to a relocatable entry, it should say so.
  bool needsRelocation() const;

  SectionKind getSectionKind(const DataLayout *DL) const;
};

/// The MachineConstantPool class keeps track of constants referenced by a
/// function which must be spilled to memory.  This is used for constants which
/// are unable to be used directly as operands to instructions, which typically
/// include floating point and large integer constants.
///
/// Instructions reference the address of these constant pool constants through
/// the use of MO_ConstantPoolIndex values.  When emitting assembly or machine
/// code, these virtual address references are converted to refer to the
/// address of the function constant pool values.
/// The machine constant pool.
class MachineConstantPool {
  Align PoolAlignment; ///< The alignment for the pool.
  std::vector<MachineConstantPoolEntry> Constants; ///< The pool of constants.
  /// MachineConstantPoolValues that use an existing MachineConstantPoolEntry.
  DenseSet<MachineConstantPoolValue*> MachineCPVsSharingEntries;
  const DataLayout &DL;

  const DataLayout &getDataLayout() const { return DL; }

public:
  /// The only constructor.
  explicit MachineConstantPool(const DataLayout &DL)
      : PoolAlignment(1), DL(DL) {}
  ~MachineConstantPool();

  /// Return the alignment required by the whole constant pool, of which the
  /// first element must be aligned.
  Align getConstantPoolAlign() const { return PoolAlignment; }

  /// getConstantPoolIndex - Create a new entry in the constant pool or return
  /// an existing one.  User must specify the minimum required alignment for
  /// the object.
  unsigned getConstantPoolIndex(const Constant *C, Align Alignment);
  unsigned getConstantPoolIndex(MachineConstantPoolValue *V, Align Alignment);

  /// isEmpty - Return true if this constant pool contains no constants.
  bool isEmpty() const { return Constants.empty(); }

  const std::vector<MachineConstantPoolEntry> &getConstants() const {
    return Constants;
  }

  /// print - Used by the MachineFunction printer to print information about
  /// constant pool objects.  Implemented in MachineFunction.cpp
  void print(raw_ostream &OS) const;

  /// dump - Call print(cerr) to be called from the debugger.
  void dump() const;
};

} // end namespace llvm

#endif // LLVM_CODEGEN_MACHINECONSTANTPOOL_H
PKiwFZl�$``CodeGen/CFIFixup.hnu�[���//===-- CFIFixup.h - Insert CFI remember/restore instructions ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// Contains definition of the base CFIFixup pass.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_CFIFIXUP_H
#define LLVM_CODEGEN_CFIFIXUP_H

#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/InitializePasses.h"

namespace llvm {
class CFIFixup : public MachineFunctionPass {
public:
  static char ID;

  CFIFixup() : MachineFunctionPass(ID) {
    initializeCFIFixupPass(*PassRegistry::getPassRegistry());
  }

  void getAnalysisUsage(AnalysisUsage &AU) const override {
    AU.setPreservesAll();
    MachineFunctionPass::getAnalysisUsage(AU);
  }

  bool runOnMachineFunction(MachineFunction &MF) override;
};
} // namespace llvm

#endif // LLVM_CODEGEN_CFIFIXUP_H
PKiwFZ����4�4CodeGen/LiveVariables.hnu�[���//===-- llvm/CodeGen/LiveVariables.h - Live Variable Analysis ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements the LiveVariables analysis pass.  For each machine
// instruction in the function, this pass calculates the set of registers that
// are immediately dead after the instruction (i.e., the instruction calculates
// the value, but it is never used) and the set of registers that are used by
// the instruction, but are never used after the instruction (i.e., they are
// killed).
//
// This class computes live variables using a sparse implementation based on
// the machine code SSA form.  This class computes live variable information for
// each virtual and _register allocatable_ physical register in a function.  It
// uses the dominance properties of SSA form to efficiently compute live
// variables for virtual registers, and assumes that physical registers are only
// live within a single basic block (allowing it to do a single local analysis
// to resolve physical register lifetimes in each basic block).  If a physical
// register is not register allocatable, it is not tracked.  This is useful for
// things like the stack pointer and condition codes.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_LIVEVARIABLES_H
#define LLVM_CODEGEN_LIVEVARIABLES_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/IndexedMap.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/SparseBitVector.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
#include "llvm/InitializePasses.h"
#include "llvm/PassRegistry.h"

namespace llvm {

class MachineBasicBlock;
class MachineRegisterInfo;

class LiveVariables : public MachineFunctionPass {
public:
  static char ID; // Pass identification, replacement for typeid
  LiveVariables() : MachineFunctionPass(ID) {
    initializeLiveVariablesPass(*PassRegistry::getPassRegistry());
  }

  /// VarInfo - This represents the regions where a virtual register is live in
  /// the program.  We represent this with three different pieces of
  /// information: the set of blocks in which the instruction is live
  /// throughout, the set of blocks in which the instruction is actually used,
  /// and the set of non-phi instructions that are the last users of the value.
  ///
  /// In the common case where a value is defined and killed in the same block,
  /// There is one killing instruction, and AliveBlocks is empty.
  ///
  /// Otherwise, the value is live out of the block.  If the value is live
  /// throughout any blocks, these blocks are listed in AliveBlocks.  Blocks
  /// where the liveness range ends are not included in AliveBlocks, instead
  /// being captured by the Kills set.  In these blocks, the value is live into
  /// the block (unless the value is defined and killed in the same block) and
  /// lives until the specified instruction.  Note that there cannot ever be a
  /// value whose Kills set contains two instructions from the same basic block.
  ///
  /// PHI nodes complicate things a bit.  If a PHI node is the last user of a
  /// value in one of its predecessor blocks, it is not listed in the kills set,
  /// but does include the predecessor block in the AliveBlocks set (unless that
  /// block also defines the value).  This leads to the (perfectly sensical)
  /// situation where a value is defined in a block, and the last use is a phi
  /// node in the successor.  In this case, AliveBlocks is empty (the value is
  /// not live across any  blocks) and Kills is empty (phi nodes are not
  /// included). This is sensical because the value must be live to the end of
  /// the block, but is not live in any successor blocks.
  struct VarInfo {
    /// AliveBlocks - Set of blocks in which this value is alive completely
    /// through.  This is a bit set which uses the basic block number as an
    /// index.
    ///
    SparseBitVector<> AliveBlocks;

    /// Kills - List of MachineInstruction's which are the last use of this
    /// virtual register (kill it) in their basic block.
    ///
    std::vector<MachineInstr*> Kills;

    /// removeKill - Delete a kill corresponding to the specified
    /// machine instruction. Returns true if there was a kill
    /// corresponding to this instruction, false otherwise.
    bool removeKill(MachineInstr &MI) {
      std::vector<MachineInstr *>::iterator I = find(Kills, &MI);
      if (I == Kills.end())
        return false;
      Kills.erase(I);
      return true;
    }

    /// findKill - Find a kill instruction in MBB. Return NULL if none is found.
    MachineInstr *findKill(const MachineBasicBlock *MBB) const;

    /// isLiveIn - Is Reg live in to MBB? This means that Reg is live through
    /// MBB, or it is killed in MBB. If Reg is only used by PHI instructions in
    /// MBB, it is not considered live in.
    bool isLiveIn(const MachineBasicBlock &MBB, Register Reg,
                  MachineRegisterInfo &MRI);

    void dump() const;
  };

private:
  /// VirtRegInfo - This list is a mapping from virtual register number to
  /// variable information.
  ///
  IndexedMap<VarInfo, VirtReg2IndexFunctor> VirtRegInfo;

  /// PHIJoins - list of virtual registers that are PHI joins. These registers
  /// may have multiple definitions, and they require special handling when
  /// building live intervals.
  SparseBitVector<> PHIJoins;

private:   // Intermediate data structures
  MachineFunction *MF = nullptr;

  MachineRegisterInfo *MRI = nullptr;

  const TargetRegisterInfo *TRI = nullptr;

  // PhysRegInfo - Keep track of which instruction was the last def of a
  // physical register. This is a purely local property, because all physical
  // register references are presumed dead across basic blocks.
  std::vector<MachineInstr *> PhysRegDef;

  // PhysRegInfo - Keep track of which instruction was the last use of a
  // physical register. This is a purely local property, because all physical
  // register references are presumed dead across basic blocks.
  std::vector<MachineInstr *> PhysRegUse;

  std::vector<SmallVector<unsigned, 4>> PHIVarInfo;

  // DistanceMap - Keep track the distance of a MI from the start of the
  // current basic block.
  DenseMap<MachineInstr*, unsigned> DistanceMap;

  /// HandlePhysRegKill - Add kills of Reg and its sub-registers to the
  /// uses. Pay special attention to the sub-register uses which may come below
  /// the last use of the whole register.
  bool HandlePhysRegKill(Register Reg, MachineInstr *MI);

  /// HandleRegMask - Call HandlePhysRegKill for all registers clobbered by Mask.
  void HandleRegMask(const MachineOperand&);

  void HandlePhysRegUse(Register Reg, MachineInstr &MI);
  void HandlePhysRegDef(Register Reg, MachineInstr *MI,
                        SmallVectorImpl<unsigned> &Defs);
  void UpdatePhysRegDefs(MachineInstr &MI, SmallVectorImpl<unsigned> &Defs);

  /// FindLastRefOrPartRef - Return the last reference or partial reference of
  /// the specified register.
  MachineInstr *FindLastRefOrPartRef(Register Reg);

  /// FindLastPartialDef - Return the last partial def of the specified
  /// register. Also returns the sub-registers that're defined by the
  /// instruction.
  MachineInstr *FindLastPartialDef(Register Reg,
                                   SmallSet<unsigned, 4> &PartDefRegs);

  /// analyzePHINodes - Gather information about the PHI nodes in here. In
  /// particular, we want to map the variable information of a virtual
  /// register which is used in a PHI node. We map that to the BB the vreg
  /// is coming from.
  void analyzePHINodes(const MachineFunction& Fn);

  void runOnInstr(MachineInstr &MI, SmallVectorImpl<unsigned> &Defs);

  void runOnBlock(MachineBasicBlock *MBB, unsigned NumRegs);
public:

  bool runOnMachineFunction(MachineFunction &MF) override;

  /// RegisterDefIsDead - Return true if the specified instruction defines the
  /// specified register, but that definition is dead.
  bool RegisterDefIsDead(MachineInstr &MI, Register Reg) const;

  //===--------------------------------------------------------------------===//
  //  API to update live variable information

  /// Recompute liveness from scratch for a virtual register \p Reg that is
  /// known to have a single def that dominates all uses. This can be useful
  /// after removing some uses of \p Reg. It is not necessary for the whole
  /// machine function to be in SSA form.
  void recomputeForSingleDefVirtReg(Register Reg);

  /// replaceKillInstruction - Update register kill info by replacing a kill
  /// instruction with a new one.
  void replaceKillInstruction(Register Reg, MachineInstr &OldMI,
                              MachineInstr &NewMI);

  /// addVirtualRegisterKilled - Add information about the fact that the
  /// specified register is killed after being used by the specified
  /// instruction. If AddIfNotFound is true, add a implicit operand if it's
  /// not found.
  void addVirtualRegisterKilled(Register IncomingReg, MachineInstr &MI,
                                bool AddIfNotFound = false) {
    if (MI.addRegisterKilled(IncomingReg, TRI, AddIfNotFound))
      getVarInfo(IncomingReg).Kills.push_back(&MI);
  }

  /// removeVirtualRegisterKilled - Remove the specified kill of the virtual
  /// register from the live variable information. Returns true if the
  /// variable was marked as killed by the specified instruction,
  /// false otherwise.
  bool removeVirtualRegisterKilled(Register Reg, MachineInstr &MI) {
    if (!getVarInfo(Reg).removeKill(MI))
      return false;

    bool Removed = false;
    for (MachineOperand &MO : MI.operands()) {
      if (MO.isReg() && MO.isKill() && MO.getReg() == Reg) {
        MO.setIsKill(false);
        Removed = true;
        break;
      }
    }

    assert(Removed && "Register is not used by this instruction!");
    (void)Removed;
    return true;
  }

  /// removeVirtualRegistersKilled - Remove all killed info for the specified
  /// instruction.
  void removeVirtualRegistersKilled(MachineInstr &MI);

  /// addVirtualRegisterDead - Add information about the fact that the specified
  /// register is dead after being used by the specified instruction. If
  /// AddIfNotFound is true, add a implicit operand if it's not found.
  void addVirtualRegisterDead(Register IncomingReg, MachineInstr &MI,
                              bool AddIfNotFound = false) {
    if (MI.addRegisterDead(IncomingReg, TRI, AddIfNotFound))
      getVarInfo(IncomingReg).Kills.push_back(&MI);
  }

  /// removeVirtualRegisterDead - Remove the specified kill of the virtual
  /// register from the live variable information. Returns true if the
  /// variable was marked dead at the specified instruction, false
  /// otherwise.
  bool removeVirtualRegisterDead(Register Reg, MachineInstr &MI) {
    if (!getVarInfo(Reg).removeKill(MI))
      return false;

    bool Removed = false;
    for (MachineOperand &MO : MI.operands()) {
      if (MO.isReg() && MO.isDef() && MO.getReg() == Reg) {
        MO.setIsDead(false);
        Removed = true;
        break;
      }
    }
    assert(Removed && "Register is not defined by this instruction!");
    (void)Removed;
    return true;
  }

  void getAnalysisUsage(AnalysisUsage &AU) const override;

  void releaseMemory() override {
    VirtRegInfo.clear();
  }

  /// getVarInfo - Return the VarInfo structure for the specified VIRTUAL
  /// register.
  VarInfo &getVarInfo(Register Reg);

  void MarkVirtRegAliveInBlock(VarInfo& VRInfo, MachineBasicBlock* DefBlock,
                               MachineBasicBlock *BB);
  void MarkVirtRegAliveInBlock(VarInfo &VRInfo, MachineBasicBlock *DefBlock,
                               MachineBasicBlock *BB,
                               SmallVectorImpl<MachineBasicBlock *> &WorkList);

  void HandleVirtRegDef(Register reg, MachineInstr &MI);
  void HandleVirtRegUse(Register reg, MachineBasicBlock *MBB, MachineInstr &MI);

  bool isLiveIn(Register Reg, const MachineBasicBlock &MBB) {
    return getVarInfo(Reg).isLiveIn(MBB, Reg, *MRI);
  }

  /// isLiveOut - Determine if Reg is live out from MBB, when not considering
  /// PHI nodes. This means that Reg is either killed by a successor block or
  /// passed through one.
  bool isLiveOut(Register Reg, const MachineBasicBlock &MBB);

  /// addNewBlock - Add a new basic block BB between DomBB and SuccBB. All
  /// variables that are live out of DomBB and live into SuccBB will be marked
  /// as passing live through BB. This method assumes that the machine code is
  /// still in SSA form.
  void addNewBlock(MachineBasicBlock *BB,
                   MachineBasicBlock *DomBB,
                   MachineBasicBlock *SuccBB);

  void addNewBlock(MachineBasicBlock *BB,
                   MachineBasicBlock *DomBB,
                   MachineBasicBlock *SuccBB,
                   std::vector<SparseBitVector<>> &LiveInSets);

  /// isPHIJoin - Return true if Reg is a phi join register.
  bool isPHIJoin(Register Reg) { return PHIJoins.test(Reg.id()); }

  /// setPHIJoin - Mark Reg as a phi join register.
  void setPHIJoin(Register Reg) { PHIJoins.set(Reg.id()); }
};

} // End llvm namespace

#endif
PKiwFZ8�eu��CodeGen/MachORelocation.hnu�[���//=== MachORelocation.h - Mach-O Relocation Info ----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the MachORelocation class.
//
//===----------------------------------------------------------------------===//


#ifndef LLVM_CODEGEN_MACHORELOCATION_H
#define LLVM_CODEGEN_MACHORELOCATION_H

#include "llvm/Support/DataTypes.h"

namespace llvm {

  /// MachORelocation - This struct contains information about each relocation
  /// that needs to be emitted to the file.
  /// see <mach-o/reloc.h>
  class MachORelocation {
    uint32_t r_address;   // offset in the section to what is being  relocated
    uint32_t r_symbolnum; // symbol index if r_extern == 1 else section index
    bool     r_pcrel;     // was relocated pc-relative already
    uint8_t  r_length;    // length = 2 ^ r_length
    bool     r_extern;    //
    uint8_t  r_type;      // if not 0, machine-specific relocation type.
    bool     r_scattered; // 1 = scattered, 0 = non-scattered
    int32_t  r_value;     // the value the item to be relocated is referring
                          // to.
  public:
    uint32_t getPackedFields() const {
      if (r_scattered)
        return (1 << 31) | (r_pcrel << 30) | ((r_length & 3) << 28) |
          ((r_type & 15) << 24) | (r_address & 0x00FFFFFF);
      else
        return (r_symbolnum << 8) | (r_pcrel << 7) | ((r_length & 3) << 5) |
          (r_extern << 4) | (r_type & 15);
    }
    uint32_t getAddress() const { return r_scattered ? r_value : r_address; }
    uint32_t getRawAddress() const { return r_address; }

    MachORelocation(uint32_t addr, uint32_t index, bool pcrel, uint8_t len,
                    bool ext, uint8_t type, bool scattered = false,
                    int32_t value = 0) :
      r_address(addr), r_symbolnum(index), r_pcrel(pcrel), r_length(len),
      r_extern(ext), r_type(type), r_scattered(scattered), r_value(value) {}
  };

} // end llvm namespace

#endif // LLVM_CODEGEN_MACHORELOCATION_H
PKiwFZ����*�*CodeGen/CodeGenCommonISel.hnu�[���//===- CodeGenCommonISel.h - Common code between ISels ---------*- C++ -*--===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares common utilities that are shared between SelectionDAG and
// GlobalISel frameworks.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_CODEGENCOMMONISEL_H
#define LLVM_CODEGEN_CODEGENCOMMONISEL_H

#include "llvm/CodeGen/MachineBasicBlock.h"
#include <cassert>
namespace llvm {

class BasicBlock;
enum FPClassTest : unsigned;

/// Encapsulates all of the information needed to generate a stack protector
/// check, and signals to isel when initialized that one needs to be generated.
///
/// *NOTE* The following is a high level documentation of SelectionDAG Stack
/// Protector Generation. This is now also ported be shared with GlobalISel,
/// but without any significant changes.
///
/// High Level Overview of ISel Stack Protector Generation:
///
/// Previously, the "stack protector" IR pass handled stack protector
/// generation. This necessitated splitting basic blocks at the IR level to
/// create the success/failure basic blocks in the tail of the basic block in
/// question. As a result of this, calls that would have qualified for the
/// sibling call optimization were no longer eligible for optimization since
/// said calls were no longer right in the "tail position" (i.e. the immediate
/// predecessor of a ReturnInst instruction).
///
/// Since the sibling call optimization causes the callee to reuse the caller's
/// stack, if we could delay the generation of the stack protector check until
/// later in CodeGen after the sibling call decision was made, we get both the
/// tail call optimization and the stack protector check!
///
/// A few goals in solving this problem were:
///
///   1. Preserve the architecture independence of stack protector generation.
///
///   2. Preserve the normal IR level stack protector check for platforms like
///      OpenBSD for which we support platform-specific stack protector
///      generation.
///
/// The main problem that guided the present solution is that one can not
/// solve this problem in an architecture independent manner at the IR level
/// only. This is because:
///
///   1. The decision on whether or not to perform a sibling call on certain
///      platforms (for instance i386) requires lower level information
///      related to available registers that can not be known at the IR level.
///
///   2. Even if the previous point were not true, the decision on whether to
///      perform a tail call is done in LowerCallTo in SelectionDAG (or
///      CallLowering in GlobalISel) which occurs after the Stack Protector
///      Pass. As a result, one would need to put the relevant callinst into the
///      stack protector check success basic block (where the return inst is
///      placed) and then move it back later at ISel/MI time before the
///      stack protector check if the tail call optimization failed. The MI
///      level option was nixed immediately since it would require
///      platform-specific pattern matching. The ISel level option was
///      nixed because SelectionDAG only processes one IR level basic block at a
///      time implying one could not create a DAG Combine to move the callinst.
///
/// To get around this problem:
///
///   1. SelectionDAG can only process one block at a time, we can generate
///      multiple machine basic blocks for one IR level basic block.
///      This is how we handle bit tests and switches.
///
///   2. At the MI level, tail calls are represented via a special return
///      MIInst called "tcreturn". Thus if we know the basic block in which we
///      wish to insert the stack protector check, we get the correct behavior
///      by always inserting the stack protector check right before the return
///      statement. This is a "magical transformation" since no matter where
///      the stack protector check intrinsic is, we always insert the stack
///      protector check code at the end of the BB.
///
/// Given the aforementioned constraints, the following solution was devised:
///
///   1. On platforms that do not support ISel stack protector check
///      generation, allow for the normal IR level stack protector check
///      generation to continue.
///
///   2. On platforms that do support ISel stack protector check
///      generation:
///
///     a. Use the IR level stack protector pass to decide if a stack
///        protector is required/which BB we insert the stack protector check
///        in by reusing the logic already therein.
///
///     b. After we finish selecting the basic block, we produce the validation
///        code with one of these techniques:
///          1) with a call to a guard check function
///          2) with inlined instrumentation
///
///        1) We insert a call to the check function before the terminator.
///
///        2) We first find a splice point in the parent basic block
///        before the terminator and then splice the terminator of said basic
///        block into the success basic block. Then we code-gen a new tail for
///        the parent basic block consisting of the two loads, the comparison,
///        and finally two branches to the success/failure basic blocks. We
///        conclude by code-gening the failure basic block if we have not
///        code-gened it already (all stack protector checks we generate in
///        the same function, use the same failure basic block).
class StackProtectorDescriptor {
public:
  StackProtectorDescriptor() = default;

  /// Returns true if all fields of the stack protector descriptor are
  /// initialized implying that we should/are ready to emit a stack protector.
  bool shouldEmitStackProtector() const {
    return ParentMBB && SuccessMBB && FailureMBB;
  }

  bool shouldEmitFunctionBasedCheckStackProtector() const {
    return ParentMBB && !SuccessMBB && !FailureMBB;
  }

  /// Initialize the stack protector descriptor structure for a new basic
  /// block.
  void initialize(const BasicBlock *BB, MachineBasicBlock *MBB,
                  bool FunctionBasedInstrumentation) {
    // Make sure we are not initialized yet.
    assert(!shouldEmitStackProtector() && "Stack Protector Descriptor is "
                                          "already initialized!");
    ParentMBB = MBB;
    if (!FunctionBasedInstrumentation) {
      SuccessMBB = addSuccessorMBB(BB, MBB, /* IsLikely */ true);
      FailureMBB = addSuccessorMBB(BB, MBB, /* IsLikely */ false, FailureMBB);
    }
  }

  /// Reset state that changes when we handle different basic blocks.
  ///
  /// This currently includes:
  ///
  /// 1. The specific basic block we are generating a
  /// stack protector for (ParentMBB).
  ///
  /// 2. The successor machine basic block that will contain the tail of
  /// parent mbb after we create the stack protector check (SuccessMBB). This
  /// BB is visited only on stack protector check success.
  void resetPerBBState() {
    ParentMBB = nullptr;
    SuccessMBB = nullptr;
  }

  /// Reset state that only changes when we switch functions.
  ///
  /// This currently includes:
  ///
  /// 1. FailureMBB since we reuse the failure code path for all stack
  /// protector checks created in an individual function.
  ///
  /// 2.The guard variable since the guard variable we are checking against is
  /// always the same.
  void resetPerFunctionState() { FailureMBB = nullptr; }

  MachineBasicBlock *getParentMBB() { return ParentMBB; }
  MachineBasicBlock *getSuccessMBB() { return SuccessMBB; }
  MachineBasicBlock *getFailureMBB() { return FailureMBB; }

private:
  /// The basic block for which we are generating the stack protector.
  ///
  /// As a result of stack protector generation, we will splice the
  /// terminators of this basic block into the successor mbb SuccessMBB and
  /// replace it with a compare/branch to the successor mbbs
  /// SuccessMBB/FailureMBB depending on whether or not the stack protector
  /// was violated.
  MachineBasicBlock *ParentMBB = nullptr;

  /// A basic block visited on stack protector check success that contains the
  /// terminators of ParentMBB.
  MachineBasicBlock *SuccessMBB = nullptr;

  /// This basic block visited on stack protector check failure that will
  /// contain a call to __stack_chk_fail().
  MachineBasicBlock *FailureMBB = nullptr;

  /// Add a successor machine basic block to ParentMBB. If the successor mbb
  /// has not been created yet (i.e. if SuccMBB = 0), then the machine basic
  /// block will be created. Assign a large weight if IsLikely is true.
  MachineBasicBlock *addSuccessorMBB(const BasicBlock *BB,
                                     MachineBasicBlock *ParentMBB,
                                     bool IsLikely,
                                     MachineBasicBlock *SuccMBB = nullptr);
};

/// Find the split point at which to splice the end of BB into its success stack
/// protector check machine basic block.
///
/// On many platforms, due to ABI constraints, terminators, even before register
/// allocation, use physical registers. This creates an issue for us since
/// physical registers at this point can not travel across basic
/// blocks. Luckily, selectiondag always moves physical registers into vregs
/// when they enter functions and moves them through a sequence of copies back
/// into the physical registers right before the terminator creating a
/// ``Terminator Sequence''. This function is searching for the beginning of the
/// terminator sequence so that we can ensure that we splice off not just the
/// terminator, but additionally the copies that move the vregs into the
/// physical registers.
MachineBasicBlock::iterator
findSplitPointForStackProtector(MachineBasicBlock *BB,
                                const TargetInstrInfo &TII);

/// Evaluates if the specified FP class test is better performed as the inverse
/// (i.e. fewer instructions should be required to lower it).  An example is the
/// test "inf|normal|subnormal|zero", which is an inversion of "nan".
/// \param Test The test as specified in 'is_fpclass' intrinsic invocation.
/// \returns The inverted test, or fcNone, if inversion does not produce a
/// simpler test.
FPClassTest invertFPClassTestIfSimpler(FPClassTest Test);

/// Assuming the instruction \p MI is going to be deleted, attempt to salvage
/// debug users of \p MI by writing the effect of \p MI in a DIExpression.
void salvageDebugInfoForDbgValue(const MachineRegisterInfo &MRI,
                                 MachineInstr &MI,
                                 ArrayRef<MachineOperand *> DbgUsers);

} // namespace llvm

#endif // LLVM_CODEGEN_CODEGENCOMMONISEL_H
PKiwFZxS��
�
CodeGen/CostTable.hnu�[���//===-- CostTable.h - Instruction Cost Table handling -----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// Cost tables and simple lookup functions
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_COSTTABLE_H_
#define LLVM_CODEGEN_COSTTABLE_H_

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/CodeGen/MachineValueType.h"

namespace llvm {

/// Cost Table Entry
template <typename CostType>
struct CostTblEntryT {
  int ISD;
  MVT::SimpleValueType Type;
  CostType Cost;
};
using CostTblEntry = CostTblEntryT<unsigned>;

/// Find in cost table.
template <class CostType>
inline const CostTblEntryT<CostType> *
CostTableLookup(ArrayRef<CostTblEntryT<CostType>> Tbl, int ISD, MVT Ty) {
  auto I = find_if(Tbl, [=](const CostTblEntryT<CostType> &Entry) {
    return ISD == Entry.ISD && Ty == Entry.Type;
  });
  if (I != Tbl.end())
    return I;

  // Could not find an entry.
  return nullptr;
}

template <size_t N, class CostType>
inline const CostTblEntryT<CostType> *
CostTableLookup(const CostTblEntryT<CostType> (&Table)[N], int ISD, MVT Ty) {
  // Wrapper to fix template argument deduction failures.
  return CostTableLookup<CostType>(Table, ISD, Ty);
}

/// Type Conversion Cost Table
template <typename CostType>
struct TypeConversionCostTblEntryT {
  int ISD;
  MVT::SimpleValueType Dst;
  MVT::SimpleValueType Src;
  CostType Cost;
};
using TypeConversionCostTblEntry = TypeConversionCostTblEntryT<unsigned>;

/// Find in type conversion cost table.
template <class CostType>
inline const TypeConversionCostTblEntryT<CostType> *
ConvertCostTableLookup(ArrayRef<TypeConversionCostTblEntryT<CostType>> Tbl,
                       int ISD, MVT Dst, MVT Src) {
  auto I =
      find_if(Tbl, [=](const TypeConversionCostTblEntryT<CostType> &Entry) {
        return ISD == Entry.ISD && Src == Entry.Src && Dst == Entry.Dst;
      });
  if (I != Tbl.end())
    return I;

  // Could not find an entry.
  return nullptr;
}

template <size_t N, class CostType>
inline const TypeConversionCostTblEntryT<CostType> *
ConvertCostTableLookup(const TypeConversionCostTblEntryT<CostType> (&Table)[N],
                       int ISD, MVT Dst, MVT Src) {
  // Wrapper to fix template argument deduction failures.
  return ConvertCostTableLookup<CostType>(Table, ISD, Dst, Src);
}

} // namespace llvm

#endif /* LLVM_CODEGEN_COSTTABLE_H_ */
PKiwFZd����CodeGen/DetectDeadLanes.hnu�[���//===- DetectDeadLanes.h - SubRegister Lane Usage Analysis --*- C++ -*-----===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file
/// Analysis that tracks defined/used subregister lanes across COPY instructions
/// and instructions that get lowered to a COPY (PHI, REG_SEQUENCE,
/// INSERT_SUBREG, EXTRACT_SUBREG).
/// The information is used to detect dead definitions and the usage of
/// (completely) undefined values and mark the operands as such.
/// This pass is necessary because the dead/undef status is not obvious anymore
/// when subregisters are involved.
///
/// Example:
///    %0 = some definition
///    %1 = IMPLICIT_DEF
///    %2 = REG_SEQUENCE %0, sub0, %1, sub1
///    %3 = EXTRACT_SUBREG %2, sub1
///       = use %3
/// The %0 definition is dead and %3 contains an undefined value.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_DETECTDEADLANES_H
#define LLVM_CODEGEN_DETECTDEADLANES_H

#include "llvm/ADT/BitVector.h"
#include "llvm/MC/LaneBitmask.h"
#include <deque>

namespace llvm {

class MachineInstr;
class MachineOperand;
class MachineRegisterInfo;
class TargetRegisterInfo;

class DeadLaneDetector {
public:
  /// Contains a bitmask of which lanes of a given virtual register are
  /// defined and which ones are actually used.
  struct VRegInfo {
    LaneBitmask UsedLanes;
    LaneBitmask DefinedLanes;
  };

  DeadLaneDetector(const MachineRegisterInfo *MRI,
                   const TargetRegisterInfo *TRI);

  /// Update the \p DefinedLanes and the \p UsedLanes for all virtual registers.
  void computeSubRegisterLaneBitInfo();

  const VRegInfo &getVRegInfo(unsigned RegIdx) const {
    return VRegInfos[RegIdx];
  }

  bool isDefinedByCopy(unsigned RegIdx) const {
    return DefinedByCopy.test(RegIdx);
  }

private:
  /// Add used lane bits on the register used by operand \p MO. This translates
  /// the bitmask based on the operands subregister, and puts the register into
  /// the worklist if any new bits were added.
  void addUsedLanesOnOperand(const MachineOperand &MO, LaneBitmask UsedLanes);

  /// Given a bitmask \p UsedLanes for the used lanes on a def output of a
  /// COPY-like instruction determine the lanes used on the use operands
  /// and call addUsedLanesOnOperand() for them.
  void transferUsedLanesStep(const MachineInstr &MI, LaneBitmask UsedLanes);

  /// Given a use regiser operand \p Use and a mask of defined lanes, check
  /// if the operand belongs to a lowersToCopies() instruction, transfer the
  /// mask to the def and put the instruction into the worklist.
  void transferDefinedLanesStep(const MachineOperand &Use,
                                LaneBitmask DefinedLanes);

public:
  /// Given a mask \p DefinedLanes of lanes defined at operand \p OpNum
  /// of COPY-like instruction, determine which lanes are defined at the output
  /// operand \p Def.
  LaneBitmask transferDefinedLanes(const MachineOperand &Def, unsigned OpNum,
                                   LaneBitmask DefinedLanes) const;

  /// Given a mask \p UsedLanes used from the output of instruction \p MI
  /// determine which lanes are used from operand \p MO of this instruction.
  LaneBitmask transferUsedLanes(const MachineInstr &MI, LaneBitmask UsedLanes,
                                const MachineOperand &MO) const;

private:
  LaneBitmask determineInitialDefinedLanes(unsigned Reg);
  LaneBitmask determineInitialUsedLanes(unsigned Reg);

  const MachineRegisterInfo *MRI;
  const TargetRegisterInfo *TRI;

  void PutInWorklist(unsigned RegIdx) {
    if (WorklistMembers.test(RegIdx))
      return;
    WorklistMembers.set(RegIdx);
    Worklist.push_back(RegIdx);
  }

  std::unique_ptr<VRegInfo[]> VRegInfos;
  /// Worklist containing virtreg indexes.
  std::deque<unsigned> Worklist;
  BitVector WorklistMembers;
  /// This bitvector is set for each vreg index where the vreg is defined
  /// by an instruction where lowersToCopies()==true.
  BitVector DefinedByCopy;
};

} // end namespace llvm

#endif // LLVM_CODEGEN_DETECTDEADLANES_H
PKiwFZ0
c�#�#CodeGen/VLIWMachineScheduler.hnu�[���//===- VLIWMachineScheduler.h - VLIW-Focused Scheduling Pass ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//                                                                            //
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_VLIWMACHINESCHEDULER_H
#define LLVM_CODEGEN_VLIWMACHINESCHEDULER_H

#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Twine.h"
#include "llvm/CodeGen/MachineScheduler.h"
#include "llvm/CodeGen/TargetSchedule.h"
#include <limits>
#include <memory>
#include <utility>

namespace llvm {

class DFAPacketizer;
class RegisterClassInfo;
class ScheduleHazardRecognizer;
class SUnit;
class TargetInstrInfo;
class TargetSubtargetInfo;

class VLIWResourceModel {
protected:
  const TargetInstrInfo *TII;

  /// ResourcesModel - Represents VLIW state.
  /// Not limited to VLIW targets per se, but assumes definition of resource
  /// model by a target.
  DFAPacketizer *ResourcesModel;

  const TargetSchedModel *SchedModel;

  /// Local packet/bundle model. Purely
  /// internal to the MI scheduler at the time.
  SmallVector<SUnit *> Packet;

  /// Total packets created.
  unsigned TotalPackets = 0;

public:
  VLIWResourceModel(const TargetSubtargetInfo &STI, const TargetSchedModel *SM);
  VLIWResourceModel &operator=(const VLIWResourceModel &other) = delete;
  VLIWResourceModel(const VLIWResourceModel &other) = delete;
  virtual ~VLIWResourceModel();

  virtual void reset();

  virtual bool hasDependence(const SUnit *SUd, const SUnit *SUu);
  virtual bool isResourceAvailable(SUnit *SU, bool IsTop);
  virtual bool reserveResources(SUnit *SU, bool IsTop);
  unsigned getTotalPackets() const { return TotalPackets; }
  size_t getPacketInstCount() const { return Packet.size(); }
  bool isInPacket(SUnit *SU) const { return is_contained(Packet, SU); }

protected:
  virtual DFAPacketizer *createPacketizer(const TargetSubtargetInfo &STI) const;
};

/// Extend the standard ScheduleDAGMILive to provide more context and override
/// the top-level schedule() driver.
class VLIWMachineScheduler : public ScheduleDAGMILive {
public:
  VLIWMachineScheduler(MachineSchedContext *C,
                       std::unique_ptr<MachineSchedStrategy> S)
      : ScheduleDAGMILive(C, std::move(S)) {}

  /// Schedule - This is called back from ScheduleDAGInstrs::Run() when it's
  /// time to do some work.
  void schedule() override;

  RegisterClassInfo *getRegClassInfo() { return RegClassInfo; }
  int getBBSize() { return BB->size(); }
};

//===----------------------------------------------------------------------===//
// ConvergingVLIWScheduler - Implementation of a VLIW-aware
// MachineSchedStrategy.
//===----------------------------------------------------------------------===//

class ConvergingVLIWScheduler : public MachineSchedStrategy {
protected:
  /// Store the state used by ConvergingVLIWScheduler heuristics, required
  ///  for the lifetime of one invocation of pickNode().
  struct SchedCandidate {
    // The best SUnit candidate.
    SUnit *SU = nullptr;

    // Register pressure values for the best candidate.
    RegPressureDelta RPDelta;

    // Best scheduling cost.
    int SCost = 0;

    SchedCandidate() = default;
  };
  /// Represent the type of SchedCandidate found within a single queue.
  enum CandResult {
    NoCand,
    NodeOrder,
    SingleExcess,
    SingleCritical,
    SingleMax,
    MultiPressure,
    BestCost,
    Weak
  };

  // Constants used to denote relative importance of
  // heuristic components for cost computation.
  static constexpr unsigned PriorityOne = 200;
  static constexpr unsigned PriorityTwo = 50;
  static constexpr unsigned PriorityThree = 75;
  static constexpr unsigned ScaleTwo = 10;

  /// Each Scheduling boundary is associated with ready queues. It tracks the
  /// current cycle in whichever direction at has moved, and maintains the state
  /// of "hazards" and other interlocks at the current cycle.
  struct VLIWSchedBoundary {
    VLIWMachineScheduler *DAG = nullptr;
    const TargetSchedModel *SchedModel = nullptr;

    ReadyQueue Available;
    ReadyQueue Pending;
    bool CheckPending = false;

    ScheduleHazardRecognizer *HazardRec = nullptr;
    VLIWResourceModel *ResourceModel = nullptr;

    unsigned CurrCycle = 0;
    unsigned IssueCount = 0;
    unsigned CriticalPathLength = 0;

    /// MinReadyCycle - Cycle of the soonest available instruction.
    unsigned MinReadyCycle = std::numeric_limits<unsigned>::max();

    // Remember the greatest min operand latency.
    unsigned MaxMinLatency = 0;

    /// Pending queues extend the ready queues with the same ID and the
    /// PendingFlag set.
    VLIWSchedBoundary(unsigned ID, const Twine &Name)
        : Available(ID, Name + ".A"),
          Pending(ID << ConvergingVLIWScheduler::LogMaxQID, Name + ".P") {}

    ~VLIWSchedBoundary();
    VLIWSchedBoundary &operator=(const VLIWSchedBoundary &other) = delete;
    VLIWSchedBoundary(const VLIWSchedBoundary &other) = delete;

    void init(VLIWMachineScheduler *dag, const TargetSchedModel *smodel) {
      DAG = dag;
      SchedModel = smodel;
      CurrCycle = 0;
      IssueCount = 0;
      // Initialize the critical path length limit, which used by the scheduling
      // cost model to determine the value for scheduling an instruction. We use
      // a slightly different heuristic for small and large functions. For small
      // functions, it's important to use the height/depth of the instruction.
      // For large functions, prioritizing by height or depth increases spills.
      CriticalPathLength = DAG->getBBSize() / SchedModel->getIssueWidth();
      if (DAG->getBBSize() < 50)
        // We divide by two as a cheap and simple heuristic to reduce the
        // critcal path length, which increases the priority of using the graph
        // height/depth in the scheduler's cost computation.
        CriticalPathLength >>= 1;
      else {
        // For large basic blocks, we prefer a larger critical path length to
        // decrease the priority of using the graph height/depth.
        unsigned MaxPath = 0;
        for (auto &SU : DAG->SUnits)
          MaxPath = std::max(MaxPath, isTop() ? SU.getHeight() : SU.getDepth());
        CriticalPathLength = std::max(CriticalPathLength, MaxPath) + 1;
      }
    }

    bool isTop() const {
      return Available.getID() == ConvergingVLIWScheduler::TopQID;
    }

    bool checkHazard(SUnit *SU);

    void releaseNode(SUnit *SU, unsigned ReadyCycle);

    void bumpCycle();

    void bumpNode(SUnit *SU);

    void releasePending();

    void removeReady(SUnit *SU);

    SUnit *pickOnlyChoice();

    bool isLatencyBound(SUnit *SU) {
      if (CurrCycle >= CriticalPathLength)
        return true;
      unsigned PathLength = isTop() ? SU->getHeight() : SU->getDepth();
      return CriticalPathLength - CurrCycle <= PathLength;
    }
  };

  VLIWMachineScheduler *DAG = nullptr;
  const TargetSchedModel *SchedModel = nullptr;

  // State of the top and bottom scheduled instruction boundaries.
  VLIWSchedBoundary Top;
  VLIWSchedBoundary Bot;

  /// List of pressure sets that have a high pressure level in the region.
  SmallVector<bool> HighPressureSets;

public:
  /// SUnit::NodeQueueId: 0 (none), 1 (top), 2 (bot), 3 (both)
  enum { TopQID = 1, BotQID = 2, LogMaxQID = 2 };

  ConvergingVLIWScheduler() : Top(TopQID, "TopQ"), Bot(BotQID, "BotQ") {}
  virtual ~ConvergingVLIWScheduler() = default;

  void initialize(ScheduleDAGMI *dag) override;

  SUnit *pickNode(bool &IsTopNode) override;

  void schedNode(SUnit *SU, bool IsTopNode) override;

  void releaseTopNode(SUnit *SU) override;

  void releaseBottomNode(SUnit *SU) override;

  unsigned reportPackets() {
    return Top.ResourceModel->getTotalPackets() +
           Bot.ResourceModel->getTotalPackets();
  }

protected:
  virtual VLIWResourceModel *
  createVLIWResourceModel(const TargetSubtargetInfo &STI,
                          const TargetSchedModel *SchedModel) const;

  SUnit *pickNodeBidrectional(bool &IsTopNode);

  int pressureChange(const SUnit *SU, bool isBotUp);

  virtual int SchedulingCost(ReadyQueue &Q, SUnit *SU,
                             SchedCandidate &Candidate, RegPressureDelta &Delta,
                             bool verbose);

  CandResult pickNodeFromQueue(VLIWSchedBoundary &Zone,
                               const RegPressureTracker &RPTracker,
                               SchedCandidate &Candidate);
#ifndef NDEBUG
  void traceCandidate(const char *Label, const ReadyQueue &Q, SUnit *SU,
                      int Cost, PressureChange P = PressureChange());

  void readyQueueVerboseDump(const RegPressureTracker &RPTracker,
                             SchedCandidate &Candidate, ReadyQueue &Q);
#endif
};

} // end namespace llvm

#endif // LLVM_CODEGEN_VLIWMACHINESCHEDULER_H
PKiwFZ�7�9�+�+$CodeGen/MachineInstrBundleIterator.hnu�[���//===- llvm/CodeGen/MachineInstrBundleIterator.h ----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Defines an iterator class that bundles MachineInstr.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_MACHINEINSTRBUNDLEITERATOR_H
#define LLVM_CODEGEN_MACHINEINSTRBUNDLEITERATOR_H

#include "llvm/ADT/ilist.h"
#include "llvm/ADT/simple_ilist.h"
#include <cassert>
#include <iterator>
#include <type_traits>

namespace llvm {

template <class T, bool IsReverse> struct MachineInstrBundleIteratorTraits;
template <class T> struct MachineInstrBundleIteratorTraits<T, false> {
  using list_type = simple_ilist<T, ilist_sentinel_tracking<true>>;
  using instr_iterator = typename list_type::iterator;
  using nonconst_instr_iterator = typename list_type::iterator;
  using const_instr_iterator = typename list_type::const_iterator;
};
template <class T> struct MachineInstrBundleIteratorTraits<T, true> {
  using list_type = simple_ilist<T, ilist_sentinel_tracking<true>>;
  using instr_iterator = typename list_type::reverse_iterator;
  using nonconst_instr_iterator = typename list_type::reverse_iterator;
  using const_instr_iterator = typename list_type::const_reverse_iterator;
};
template <class T> struct MachineInstrBundleIteratorTraits<const T, false> {
  using list_type = simple_ilist<T, ilist_sentinel_tracking<true>>;
  using instr_iterator = typename list_type::const_iterator;
  using nonconst_instr_iterator = typename list_type::iterator;
  using const_instr_iterator = typename list_type::const_iterator;
};
template <class T> struct MachineInstrBundleIteratorTraits<const T, true> {
  using list_type = simple_ilist<T, ilist_sentinel_tracking<true>>;
  using instr_iterator = typename list_type::const_reverse_iterator;
  using nonconst_instr_iterator = typename list_type::reverse_iterator;
  using const_instr_iterator = typename list_type::const_reverse_iterator;
};

template <bool IsReverse> struct MachineInstrBundleIteratorHelper;
template <> struct MachineInstrBundleIteratorHelper<false> {
  /// Get the beginning of the current bundle.
  template <class Iterator> static Iterator getBundleBegin(Iterator I) {
    if (!I.isEnd())
      while (I->isBundledWithPred())
        --I;
    return I;
  }

  /// Get the final node of the current bundle.
  template <class Iterator> static Iterator getBundleFinal(Iterator I) {
    if (!I.isEnd())
      while (I->isBundledWithSucc())
        ++I;
    return I;
  }

  /// Increment forward ilist iterator.
  template <class Iterator> static void increment(Iterator &I) {
    I = std::next(getBundleFinal(I));
  }

  /// Decrement forward ilist iterator.
  template <class Iterator> static void decrement(Iterator &I) {
    I = getBundleBegin(std::prev(I));
  }
};

template <> struct MachineInstrBundleIteratorHelper<true> {
  /// Get the beginning of the current bundle.
  template <class Iterator> static Iterator getBundleBegin(Iterator I) {
    return MachineInstrBundleIteratorHelper<false>::getBundleBegin(
               I.getReverse())
        .getReverse();
  }

  /// Get the final node of the current bundle.
  template <class Iterator> static Iterator getBundleFinal(Iterator I) {
    return MachineInstrBundleIteratorHelper<false>::getBundleFinal(
               I.getReverse())
        .getReverse();
  }

  /// Increment reverse ilist iterator.
  template <class Iterator> static void increment(Iterator &I) {
    I = getBundleBegin(std::next(I));
  }

  /// Decrement reverse ilist iterator.
  template <class Iterator> static void decrement(Iterator &I) {
    I = std::prev(getBundleFinal(I));
  }
};

/// MachineBasicBlock iterator that automatically skips over MIs that are
/// inside bundles (i.e. walk top level MIs only).
template <typename Ty, bool IsReverse = false>
class MachineInstrBundleIterator : MachineInstrBundleIteratorHelper<IsReverse> {
  using Traits = MachineInstrBundleIteratorTraits<Ty, IsReverse>;
  using instr_iterator = typename Traits::instr_iterator;

  instr_iterator MII;

public:
  using value_type = typename instr_iterator::value_type;
  using difference_type = typename instr_iterator::difference_type;
  using pointer = typename instr_iterator::pointer;
  using reference = typename instr_iterator::reference;
  using const_pointer = typename instr_iterator::const_pointer;
  using const_reference = typename instr_iterator::const_reference;
  using iterator_category = std::bidirectional_iterator_tag;

private:
  using nonconst_instr_iterator = typename Traits::nonconst_instr_iterator;
  using const_instr_iterator = typename Traits::const_instr_iterator;
  using nonconst_iterator =
      MachineInstrBundleIterator<typename nonconst_instr_iterator::value_type,
                                 IsReverse>;
  using reverse_iterator = MachineInstrBundleIterator<Ty, !IsReverse>;

public:
  MachineInstrBundleIterator(instr_iterator MI) : MII(MI) {
    assert((!MI.getNodePtr() || MI.isEnd() || !MI->isBundledWithPred()) &&
           "It's not legal to initialize MachineInstrBundleIterator with a "
           "bundled MI");
  }

  MachineInstrBundleIterator(reference MI) : MII(MI) {
    assert(!MI.isBundledWithPred() && "It's not legal to initialize "
                                      "MachineInstrBundleIterator with a "
                                      "bundled MI");
  }

  MachineInstrBundleIterator(pointer MI) : MII(MI) {
    // FIXME: This conversion should be explicit.
    assert((!MI || !MI->isBundledWithPred()) && "It's not legal to initialize "
                                                "MachineInstrBundleIterator "
                                                "with a bundled MI");
  }

  // Template allows conversion from const to nonconst.
  template <class OtherTy>
  MachineInstrBundleIterator(
      const MachineInstrBundleIterator<OtherTy, IsReverse> &I,
      std::enable_if_t<std::is_convertible<OtherTy *, Ty *>::value, void *> =
          nullptr)
      : MII(I.getInstrIterator()) {}

  MachineInstrBundleIterator() : MII(nullptr) {}

  /// Explicit conversion between forward/reverse iterators.
  ///
  /// Translate between forward and reverse iterators without changing range
  /// boundaries.  The resulting iterator will dereference (and have a handle)
  /// to the previous node, which is somewhat unexpected; but converting the
  /// two endpoints in a range will give the same range in reverse.
  ///
  /// This matches std::reverse_iterator conversions.
  explicit MachineInstrBundleIterator(
      const MachineInstrBundleIterator<Ty, !IsReverse> &I)
      : MachineInstrBundleIterator(++I.getReverse()) {}

  /// Get the bundle iterator for the given instruction's bundle.
  static MachineInstrBundleIterator getAtBundleBegin(instr_iterator MI) {
    return MachineInstrBundleIteratorHelper<IsReverse>::getBundleBegin(MI);
  }

  reference operator*() const { return *MII; }
  pointer operator->() const { return &operator*(); }

  /// Check for null.
  bool isValid() const { return MII.getNodePtr(); }

  friend bool operator==(const MachineInstrBundleIterator &L,
                         const MachineInstrBundleIterator &R) {
    return L.MII == R.MII;
  }
  friend bool operator==(const MachineInstrBundleIterator &L,
                         const const_instr_iterator &R) {
    return L.MII == R; // Avoid assertion about validity of R.
  }
  friend bool operator==(const const_instr_iterator &L,
                         const MachineInstrBundleIterator &R) {
    return L == R.MII; // Avoid assertion about validity of L.
  }
  friend bool operator==(const MachineInstrBundleIterator &L,
                         const nonconst_instr_iterator &R) {
    return L.MII == R; // Avoid assertion about validity of R.
  }
  friend bool operator==(const nonconst_instr_iterator &L,
                         const MachineInstrBundleIterator &R) {
    return L == R.MII; // Avoid assertion about validity of L.
  }
  friend bool operator==(const MachineInstrBundleIterator &L, const_pointer R) {
    return L == const_instr_iterator(R); // Avoid assertion about validity of R.
  }
  friend bool operator==(const_pointer L, const MachineInstrBundleIterator &R) {
    return const_instr_iterator(L) == R; // Avoid assertion about validity of L.
  }
  friend bool operator==(const MachineInstrBundleIterator &L,
                         const_reference R) {
    return L == &R; // Avoid assertion about validity of R.
  }
  friend bool operator==(const_reference L,
                         const MachineInstrBundleIterator &R) {
    return &L == R; // Avoid assertion about validity of L.
  }

  friend bool operator!=(const MachineInstrBundleIterator &L,
                         const MachineInstrBundleIterator &R) {
    return !(L == R);
  }
  friend bool operator!=(const MachineInstrBundleIterator &L,
                         const const_instr_iterator &R) {
    return !(L == R);
  }
  friend bool operator!=(const const_instr_iterator &L,
                         const MachineInstrBundleIterator &R) {
    return !(L == R);
  }
  friend bool operator!=(const MachineInstrBundleIterator &L,
                         const nonconst_instr_iterator &R) {
    return !(L == R);
  }
  friend bool operator!=(const nonconst_instr_iterator &L,
                         const MachineInstrBundleIterator &R) {
    return !(L == R);
  }
  friend bool operator!=(const MachineInstrBundleIterator &L, const_pointer R) {
    return !(L == R);
  }
  friend bool operator!=(const_pointer L, const MachineInstrBundleIterator &R) {
    return !(L == R);
  }
  friend bool operator!=(const MachineInstrBundleIterator &L,
                         const_reference R) {
    return !(L == R);
  }
  friend bool operator!=(const_reference L,
                         const MachineInstrBundleIterator &R) {
    return !(L == R);
  }

  // Increment and decrement operators...
  MachineInstrBundleIterator &operator--() {
    this->decrement(MII);
    return *this;
  }
  MachineInstrBundleIterator &operator++() {
    this->increment(MII);
    return *this;
  }
  MachineInstrBundleIterator operator--(int) {
    MachineInstrBundleIterator Temp = *this;
    --*this;
    return Temp;
  }
  MachineInstrBundleIterator operator++(int) {
    MachineInstrBundleIterator Temp = *this;
    ++*this;
    return Temp;
  }

  instr_iterator getInstrIterator() const { return MII; }

  nonconst_iterator getNonConstIterator() const { return MII.getNonConst(); }

  /// Get a reverse iterator to the same node.
  ///
  /// Gives a reverse iterator that will dereference (and have a handle) to the
  /// same node.  Converting the endpoint iterators in a range will give a
  /// different range; for range operations, use the explicit conversions.
  reverse_iterator getReverse() const { return MII.getReverse(); }
};

} // end namespace llvm

#endif // LLVM_CODEGEN_MACHINEINSTRBUNDLEITERATOR_H
PKiwFZ�'[A[ACodeGen/LowLevelType.hnu�[���//== llvm/CodeGen/LowLevelType.h ------------------------------- -*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// Implement a low-level type suitable for MachineInstr level instruction
/// selection.
///
/// For a type attached to a MachineInstr, we only care about 2 details: total
/// size and the number of vector lanes (if any). Accordingly, there are 4
/// possible valid type-kinds:
///
///    * `sN` for scalars and aggregates
///    * `<N x sM>` for vectors, which must have at least 2 elements.
///    * `pN` for pointers
///
/// Other information required for correct selection is expected to be carried
/// by the opcode, or non-type flags. For example the distinction between G_ADD
/// and G_FADD for int/float or fast-math flags.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_LOWLEVELTYPE_H
#define LLVM_CODEGEN_LOWLEVELTYPE_H

#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/CodeGen/MachineValueType.h"
#include "llvm/Support/Debug.h"
#include <cassert>

namespace llvm {

class Type;
class raw_ostream;

class LLT {
public:
  /// Get a low-level scalar or aggregate "bag of bits".
  static constexpr LLT scalar(unsigned SizeInBits) {
    return LLT{/*isPointer=*/false, /*isVector=*/false, /*isScalar=*/true,
               ElementCount::getFixed(0), SizeInBits,
               /*AddressSpace=*/0};
  }

  /// Get a low-level pointer in the given address space.
  static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits) {
    assert(SizeInBits > 0 && "invalid pointer size");
    return LLT{/*isPointer=*/true, /*isVector=*/false, /*isScalar=*/false,
               ElementCount::getFixed(0), SizeInBits, AddressSpace};
  }

  /// Get a low-level vector of some number of elements and element width.
  static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits) {
    assert(!EC.isScalar() && "invalid number of vector elements");
    return LLT{/*isPointer=*/false, /*isVector=*/true, /*isScalar=*/false,
               EC, ScalarSizeInBits, /*AddressSpace=*/0};
  }

  /// Get a low-level vector of some number of elements and element type.
  static constexpr LLT vector(ElementCount EC, LLT ScalarTy) {
    assert(!EC.isScalar() && "invalid number of vector elements");
    assert(!ScalarTy.isVector() && "invalid vector element type");
    return LLT{ScalarTy.isPointer(),
               /*isVector=*/true,
               /*isScalar=*/false,
               EC,
               ScalarTy.getSizeInBits().getFixedValue(),
               ScalarTy.isPointer() ? ScalarTy.getAddressSpace() : 0};
  }

  /// Get a low-level fixed-width vector of some number of elements and element
  /// width.
  static constexpr LLT fixed_vector(unsigned NumElements,
                                    unsigned ScalarSizeInBits) {
    return vector(ElementCount::getFixed(NumElements), ScalarSizeInBits);
  }

  /// Get a low-level fixed-width vector of some number of elements and element
  /// type.
  static constexpr LLT fixed_vector(unsigned NumElements, LLT ScalarTy) {
    return vector(ElementCount::getFixed(NumElements), ScalarTy);
  }

  /// Get a low-level scalable vector of some number of elements and element
  /// width.
  static constexpr LLT scalable_vector(unsigned MinNumElements,
                                       unsigned ScalarSizeInBits) {
    return vector(ElementCount::getScalable(MinNumElements), ScalarSizeInBits);
  }

  /// Get a low-level scalable vector of some number of elements and element
  /// type.
  static constexpr LLT scalable_vector(unsigned MinNumElements, LLT ScalarTy) {
    return vector(ElementCount::getScalable(MinNumElements), ScalarTy);
  }

  static constexpr LLT scalarOrVector(ElementCount EC, LLT ScalarTy) {
    return EC.isScalar() ? ScalarTy : LLT::vector(EC, ScalarTy);
  }

  static constexpr LLT scalarOrVector(ElementCount EC, uint64_t ScalarSize) {
    assert(ScalarSize <= std::numeric_limits<unsigned>::max() &&
           "Not enough bits in LLT to represent size");
    return scalarOrVector(EC, LLT::scalar(static_cast<unsigned>(ScalarSize)));
  }

  explicit constexpr LLT(bool isPointer, bool isVector, bool isScalar,
                         ElementCount EC, uint64_t SizeInBits,
                         unsigned AddressSpace)
      : LLT() {
    init(isPointer, isVector, isScalar, EC, SizeInBits, AddressSpace);
  }
  explicit constexpr LLT()
      : IsScalar(false), IsPointer(false), IsVector(false), RawData(0) {}

  explicit LLT(MVT VT);

  constexpr bool isValid() const { return IsScalar || RawData != 0; }

  constexpr bool isScalar() const { return IsScalar; }

  constexpr bool isPointer() const {
    return isValid() && IsPointer && !IsVector;
  }

  constexpr bool isVector() const { return isValid() && IsVector; }

  /// Returns the number of elements in a vector LLT. Must only be called on
  /// vector types.
  constexpr uint16_t getNumElements() const {
    if (isScalable())
      llvm::reportInvalidSizeRequest(
          "Possible incorrect use of LLT::getNumElements() for "
          "scalable vector. Scalable flag may be dropped, use "
          "LLT::getElementCount() instead");
    return getElementCount().getKnownMinValue();
  }

  /// Returns true if the LLT is a scalable vector. Must only be called on
  /// vector types.
  constexpr bool isScalable() const {
    assert(isVector() && "Expected a vector type");
    return IsPointer ? getFieldValue(PointerVectorScalableFieldInfo)
                     : getFieldValue(VectorScalableFieldInfo);
  }

  constexpr ElementCount getElementCount() const {
    assert(IsVector && "cannot get number of elements on scalar/aggregate");
    return ElementCount::get(IsPointer
                                 ? getFieldValue(PointerVectorElementsFieldInfo)
                                 : getFieldValue(VectorElementsFieldInfo),
                             isScalable());
  }

  /// Returns the total size of the type. Must only be called on sized types.
  constexpr TypeSize getSizeInBits() const {
    if (isPointer() || isScalar())
      return TypeSize::Fixed(getScalarSizeInBits());
    auto EC = getElementCount();
    return TypeSize(getScalarSizeInBits() * EC.getKnownMinValue(),
                    EC.isScalable());
  }

  /// Returns the total size of the type in bytes, i.e. number of whole bytes
  /// needed to represent the size in bits. Must only be called on sized types.
  constexpr TypeSize getSizeInBytes() const {
    TypeSize BaseSize = getSizeInBits();
    return {(BaseSize.getKnownMinValue() + 7) / 8, BaseSize.isScalable()};
  }

  constexpr LLT getScalarType() const {
    return isVector() ? getElementType() : *this;
  }

  /// If this type is a vector, return a vector with the same number of elements
  /// but the new element type. Otherwise, return the new element type.
  constexpr LLT changeElementType(LLT NewEltTy) const {
    return isVector() ? LLT::vector(getElementCount(), NewEltTy) : NewEltTy;
  }

  /// If this type is a vector, return a vector with the same number of elements
  /// but the new element size. Otherwise, return the new element type. Invalid
  /// for pointer types. For pointer types, use changeElementType.
  constexpr LLT changeElementSize(unsigned NewEltSize) const {
    assert(!getScalarType().isPointer() &&
           "invalid to directly change element size for pointers");
    return isVector() ? LLT::vector(getElementCount(), NewEltSize)
                      : LLT::scalar(NewEltSize);
  }

  /// Return a vector or scalar with the same element type and the new element
  /// count.
  constexpr LLT changeElementCount(ElementCount EC) const {
    return LLT::scalarOrVector(EC, getScalarType());
  }

  /// Return a type that is \p Factor times smaller. Reduces the number of
  /// elements if this is a vector, or the bitwidth for scalar/pointers. Does
  /// not attempt to handle cases that aren't evenly divisible.
  constexpr LLT divide(int Factor) const {
    assert(Factor != 1);
    assert((!isScalar() || getScalarSizeInBits() != 0) &&
           "cannot divide scalar of size zero");
    if (isVector()) {
      assert(getElementCount().isKnownMultipleOf(Factor));
      return scalarOrVector(getElementCount().divideCoefficientBy(Factor),
                            getElementType());
    }

    assert(getScalarSizeInBits() % Factor == 0);
    return scalar(getScalarSizeInBits() / Factor);
  }

  /// Produce a vector type that is \p Factor times bigger, preserving the
  /// element type. For a scalar or pointer, this will produce a new vector with
  /// \p Factor elements.
  constexpr LLT multiplyElements(int Factor) const {
    if (isVector()) {
      return scalarOrVector(getElementCount().multiplyCoefficientBy(Factor),
                            getElementType());
    }

    return fixed_vector(Factor, *this);
  }

  constexpr bool isByteSized() const {
    return getSizeInBits().isKnownMultipleOf(8);
  }

  constexpr unsigned getScalarSizeInBits() const {
    if (IsScalar)
      return getFieldValue(ScalarSizeFieldInfo);
    if (IsVector) {
      if (!IsPointer)
        return getFieldValue(VectorSizeFieldInfo);
      else
        return getFieldValue(PointerVectorSizeFieldInfo);
    }
    assert(IsPointer && "unexpected LLT");
    return getFieldValue(PointerSizeFieldInfo);
  }

  constexpr unsigned getAddressSpace() const {
    assert(RawData != 0 && "Invalid Type");
    assert(IsPointer && "cannot get address space of non-pointer type");
    if (!IsVector)
      return getFieldValue(PointerAddressSpaceFieldInfo);
    else
      return getFieldValue(PointerVectorAddressSpaceFieldInfo);
  }

  /// Returns the vector's element type. Only valid for vector types.
  constexpr LLT getElementType() const {
    assert(isVector() && "cannot get element type of scalar/aggregate");
    if (IsPointer)
      return pointer(getAddressSpace(), getScalarSizeInBits());
    else
      return scalar(getScalarSizeInBits());
  }

  void print(raw_ostream &OS) const;

#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
  LLVM_DUMP_METHOD void dump() const;
#endif

  constexpr bool operator==(const LLT &RHS) const {
    return IsPointer == RHS.IsPointer && IsVector == RHS.IsVector &&
           IsScalar == RHS.IsScalar && RHS.RawData == RawData;
  }

  constexpr bool operator!=(const LLT &RHS) const { return !(*this == RHS); }

  friend struct DenseMapInfo<LLT>;
  friend class GISelInstProfileBuilder;

private:
  /// LLT is packed into 64 bits as follows:
  /// isScalar : 1
  /// isPointer : 1
  /// isVector  : 1
  /// with 61 bits remaining for Kind-specific data, packed in bitfields
  /// as described below. As there isn't a simple portable way to pack bits
  /// into bitfields, here the different fields in the packed structure is
  /// described in static const *Field variables. Each of these variables
  /// is a 2-element array, with the first element describing the bitfield size
  /// and the second element describing the bitfield offset.
  typedef int BitFieldInfo[2];
  ///
  /// This is how the bitfields are packed per Kind:
  /// * Invalid:
  ///   gets encoded as RawData == 0, as that is an invalid encoding, since for
  ///   valid encodings, SizeInBits/SizeOfElement must be larger than 0.
  /// * Non-pointer scalar (isPointer == 0 && isVector == 0):
  ///   SizeInBits: 32;
  static const constexpr BitFieldInfo ScalarSizeFieldInfo{32, 0};
  /// * Pointer (isPointer == 1 && isVector == 0):
  ///   SizeInBits: 16;
  ///   AddressSpace: 24;
  static const constexpr BitFieldInfo PointerSizeFieldInfo{16, 0};
  static const constexpr BitFieldInfo PointerAddressSpaceFieldInfo{
      24, PointerSizeFieldInfo[0] + PointerSizeFieldInfo[1]};
  static_assert((PointerAddressSpaceFieldInfo[0] +
                 PointerAddressSpaceFieldInfo[1]) <= 61,
                "Insufficient bits to encode all data");
  /// * Vector-of-non-pointer (isPointer == 0 && isVector == 1):
  ///   NumElements: 16;
  ///   SizeOfElement: 32;
  ///   Scalable: 1;
  static const constexpr BitFieldInfo VectorElementsFieldInfo{16, 0};
  static const constexpr BitFieldInfo VectorSizeFieldInfo{
      32, VectorElementsFieldInfo[0] + VectorElementsFieldInfo[1]};
  static const constexpr BitFieldInfo VectorScalableFieldInfo{
      1, VectorSizeFieldInfo[0] + VectorSizeFieldInfo[1]};
  static_assert((VectorSizeFieldInfo[0] + VectorSizeFieldInfo[1]) <= 61,
                "Insufficient bits to encode all data");
  /// * Vector-of-pointer (isPointer == 1 && isVector == 1):
  ///   NumElements: 16;
  ///   SizeOfElement: 16;
  ///   AddressSpace: 24;
  ///   Scalable: 1;
  static const constexpr BitFieldInfo PointerVectorElementsFieldInfo{16, 0};
  static const constexpr BitFieldInfo PointerVectorSizeFieldInfo{
      16,
      PointerVectorElementsFieldInfo[1] + PointerVectorElementsFieldInfo[0]};
  static const constexpr BitFieldInfo PointerVectorAddressSpaceFieldInfo{
      24, PointerVectorSizeFieldInfo[1] + PointerVectorSizeFieldInfo[0]};
  static const constexpr BitFieldInfo PointerVectorScalableFieldInfo{
      1, PointerVectorAddressSpaceFieldInfo[0] +
             PointerVectorAddressSpaceFieldInfo[1]};
  static_assert((PointerVectorAddressSpaceFieldInfo[0] +
                 PointerVectorAddressSpaceFieldInfo[1]) <= 61,
                "Insufficient bits to encode all data");

  uint64_t IsScalar : 1;
  uint64_t IsPointer : 1;
  uint64_t IsVector : 1;
  uint64_t RawData : 61;

  static constexpr uint64_t getMask(const BitFieldInfo FieldInfo) {
    const int FieldSizeInBits = FieldInfo[0];
    return (((uint64_t)1) << FieldSizeInBits) - 1;
  }
  static constexpr uint64_t maskAndShift(uint64_t Val, uint64_t Mask,
                                         uint8_t Shift) {
    assert(Val <= Mask && "Value too large for field");
    return (Val & Mask) << Shift;
  }
  static constexpr uint64_t maskAndShift(uint64_t Val,
                                         const BitFieldInfo FieldInfo) {
    return maskAndShift(Val, getMask(FieldInfo), FieldInfo[1]);
  }

  constexpr uint64_t getFieldValue(const BitFieldInfo FieldInfo) const {
    return getMask(FieldInfo) & (RawData >> FieldInfo[1]);
  }

  constexpr void init(bool IsPointer, bool IsVector, bool IsScalar,
                      ElementCount EC, uint64_t SizeInBits,
                      unsigned AddressSpace) {
    assert(SizeInBits <= std::numeric_limits<unsigned>::max() &&
           "Not enough bits in LLT to represent size");
    this->IsPointer = IsPointer;
    this->IsVector = IsVector;
    this->IsScalar = IsScalar;
    if (IsScalar)
      RawData = maskAndShift(SizeInBits, ScalarSizeFieldInfo);
    else if (IsVector) {
      assert(EC.isVector() && "invalid number of vector elements");
      if (!IsPointer)
        RawData =
            maskAndShift(EC.getKnownMinValue(), VectorElementsFieldInfo) |
            maskAndShift(SizeInBits, VectorSizeFieldInfo) |
            maskAndShift(EC.isScalable() ? 1 : 0, VectorScalableFieldInfo);
      else
        RawData =
            maskAndShift(EC.getKnownMinValue(),
                         PointerVectorElementsFieldInfo) |
            maskAndShift(SizeInBits, PointerVectorSizeFieldInfo) |
            maskAndShift(AddressSpace, PointerVectorAddressSpaceFieldInfo) |
            maskAndShift(EC.isScalable() ? 1 : 0,
                         PointerVectorScalableFieldInfo);
    } else if (IsPointer)
      RawData = maskAndShift(SizeInBits, PointerSizeFieldInfo) |
                maskAndShift(AddressSpace, PointerAddressSpaceFieldInfo);
    else
      llvm_unreachable("unexpected LLT configuration");
  }

public:
  constexpr uint64_t getUniqueRAWLLTData() const {
    return ((uint64_t)RawData) << 3 | ((uint64_t)IsScalar) << 2 |
           ((uint64_t)IsPointer) << 1 | ((uint64_t)IsVector);
  }
};

inline raw_ostream& operator<<(raw_ostream &OS, const LLT &Ty) {
  Ty.print(OS);
  return OS;
}

template<> struct DenseMapInfo<LLT> {
  static inline LLT getEmptyKey() {
    LLT Invalid;
    Invalid.IsPointer = true;
    return Invalid;
  }
  static inline LLT getTombstoneKey() {
    LLT Invalid;
    Invalid.IsVector = true;
    return Invalid;
  }
  static inline unsigned getHashValue(const LLT &Ty) {
    uint64_t Val = Ty.getUniqueRAWLLTData();
    return DenseMapInfo<uint64_t>::getHashValue(Val);
  }
  static bool isEqual(const LLT &LHS, const LLT &RHS) {
    return LHS == RHS;
  }
};

}

#endif // LLVM_CODEGEN_LOWLEVELTYPE_H
PKiwFZy>P�?�?CodeGen/ModuloSchedule.hnu�[���//===- ModuloSchedule.h - Software pipeline schedule expansion ------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Software pipelining (SWP) is an instruction scheduling technique for loops
// that overlaps loop iterations and exploits ILP via compiler transformations.
//
// There are multiple methods for analyzing a loop and creating a schedule.
// An example algorithm is Swing Modulo Scheduling (implemented by the
// MachinePipeliner). The details of how a schedule is arrived at are irrelevant
// for the task of actually rewriting a loop to adhere to the schedule, which
// is what this file does.
//
// A schedule is, for every instruction in a block, a Cycle and a Stage. Note
// that we only support single-block loops, so "block" and "loop" can be used
// interchangably.
//
// The Cycle of an instruction defines a partial order of the instructions in
// the remapped loop. Instructions within a cycle must not consume the output
// of any instruction in the same cycle. Cycle information is assumed to have
// been calculated such that the processor will execute instructions in
// lock-step (for example in a VLIW ISA).
//
// The Stage of an instruction defines the mapping between logical loop
// iterations and pipelined loop iterations. An example (unrolled) pipeline
// may look something like:
//
//  I0[0]                      Execute instruction I0 of iteration 0
//  I1[0], I0[1]               Execute I0 of iteration 1 and I1 of iteration 1
//         I1[1], I0[2]
//                I1[2], I0[3]
//
// In the schedule for this unrolled sequence we would say that I0 was scheduled
// in stage 0 and I1 in stage 1:
//
//  loop:
//    [stage 0] x = I0
//    [stage 1] I1 x (from stage 0)
//
// And to actually generate valid code we must insert a phi:
//
//  loop:
//    x' = phi(x)
//    x = I0
//    I1 x'
//
// This is a simple example; the rules for how to generate correct code given
// an arbitrary schedule containing loop-carried values are complex.
//
// Note that these examples only mention the steady-state kernel of the
// generated loop; prologs and epilogs must be generated also that prime and
// flush the pipeline. Doing so is nontrivial.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_MODULOSCHEDULE_H
#define LLVM_CODEGEN_MODULOSCHEDULE_H

#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineLoopUtils.h"
#include "llvm/CodeGen/TargetInstrInfo.h"
#include "llvm/CodeGen/TargetSubtargetInfo.h"
#include <deque>
#include <vector>

namespace llvm {
class MachineBasicBlock;
class MachineLoop;
class MachineRegisterInfo;
class MachineInstr;
class LiveIntervals;

/// Represents a schedule for a single-block loop. For every instruction we
/// maintain a Cycle and Stage.
class ModuloSchedule {
private:
  /// The block containing the loop instructions.
  MachineLoop *Loop;

  /// The instructions to be generated, in total order. Cycle provides a partial
  /// order; the total order within cycles has been decided by the schedule
  /// producer.
  std::vector<MachineInstr *> ScheduledInstrs;

  /// The cycle for each instruction.
  DenseMap<MachineInstr *, int> Cycle;

  /// The stage for each instruction.
  DenseMap<MachineInstr *, int> Stage;

  /// The number of stages in this schedule (Max(Stage) + 1).
  int NumStages;

public:
  /// Create a new ModuloSchedule.
  /// \arg ScheduledInstrs The new loop instructions, in total resequenced
  ///    order.
  /// \arg Cycle Cycle index for all instructions in ScheduledInstrs. Cycle does
  ///    not need to start at zero. ScheduledInstrs must be partially ordered by
  ///    Cycle.
  /// \arg Stage Stage index for all instructions in ScheduleInstrs.
  ModuloSchedule(MachineFunction &MF, MachineLoop *Loop,
                 std::vector<MachineInstr *> ScheduledInstrs,
                 DenseMap<MachineInstr *, int> Cycle,
                 DenseMap<MachineInstr *, int> Stage)
      : Loop(Loop), ScheduledInstrs(ScheduledInstrs), Cycle(std::move(Cycle)),
        Stage(std::move(Stage)) {
    NumStages = 0;
    for (auto &KV : this->Stage)
      NumStages = std::max(NumStages, KV.second);
    ++NumStages;
  }

  /// Return the single-block loop being scheduled.
  MachineLoop *getLoop() const { return Loop; }

  /// Return the number of stages contained in this schedule, which is the
  /// largest stage index + 1.
  int getNumStages() const { return NumStages; }

  /// Return the first cycle in the schedule, which is the cycle index of the
  /// first instruction.
  int getFirstCycle() { return Cycle[ScheduledInstrs.front()]; }

  /// Return the final cycle in the schedule, which is the cycle index of the
  /// last instruction.
  int getFinalCycle() { return Cycle[ScheduledInstrs.back()]; }

  /// Return the stage that MI is scheduled in, or -1.
  int getStage(MachineInstr *MI) {
    auto I = Stage.find(MI);
    return I == Stage.end() ? -1 : I->second;
  }

  /// Return the cycle that MI is scheduled at, or -1.
  int getCycle(MachineInstr *MI) {
    auto I = Cycle.find(MI);
    return I == Cycle.end() ? -1 : I->second;
  }

  /// Set the stage of a newly created instruction.
  void setStage(MachineInstr *MI, int MIStage) {
    assert(Stage.count(MI) == 0);
    Stage[MI] = MIStage;
  }

  /// Return the rescheduled instructions in order.
  ArrayRef<MachineInstr *> getInstructions() { return ScheduledInstrs; }

  void dump() { print(dbgs()); }
  void print(raw_ostream &OS);
};

/// The ModuloScheduleExpander takes a ModuloSchedule and expands it in-place,
/// rewriting the old loop and inserting prologs and epilogs as required.
class ModuloScheduleExpander {
public:
  using InstrChangesTy = DenseMap<MachineInstr *, std::pair<unsigned, int64_t>>;

private:
  using ValueMapTy = DenseMap<unsigned, unsigned>;
  using MBBVectorTy = SmallVectorImpl<MachineBasicBlock *>;
  using InstrMapTy = DenseMap<MachineInstr *, MachineInstr *>;

  ModuloSchedule &Schedule;
  MachineFunction &MF;
  const TargetSubtargetInfo &ST;
  MachineRegisterInfo &MRI;
  const TargetInstrInfo *TII = nullptr;
  LiveIntervals &LIS;

  MachineBasicBlock *BB = nullptr;
  MachineBasicBlock *Preheader = nullptr;
  MachineBasicBlock *NewKernel = nullptr;
  std::unique_ptr<TargetInstrInfo::PipelinerLoopInfo> LoopInfo;

  /// Map for each register and the max difference between its uses and def.
  /// The first element in the pair is the max difference in stages. The
  /// second is true if the register defines a Phi value and loop value is
  /// scheduled before the Phi.
  std::map<unsigned, std::pair<unsigned, bool>> RegToStageDiff;

  /// Instructions to change when emitting the final schedule.
  InstrChangesTy InstrChanges;

  void generatePipelinedLoop();
  void generateProlog(unsigned LastStage, MachineBasicBlock *KernelBB,
                      ValueMapTy *VRMap, MBBVectorTy &PrologBBs);
  void generateEpilog(unsigned LastStage, MachineBasicBlock *KernelBB,
                      MachineBasicBlock *OrigBB, ValueMapTy *VRMap,
                      ValueMapTy *VRMapPhi, MBBVectorTy &EpilogBBs,
                      MBBVectorTy &PrologBBs);
  void generateExistingPhis(MachineBasicBlock *NewBB, MachineBasicBlock *BB1,
                            MachineBasicBlock *BB2, MachineBasicBlock *KernelBB,
                            ValueMapTy *VRMap, InstrMapTy &InstrMap,
                            unsigned LastStageNum, unsigned CurStageNum,
                            bool IsLast);
  void generatePhis(MachineBasicBlock *NewBB, MachineBasicBlock *BB1,
                    MachineBasicBlock *BB2, MachineBasicBlock *KernelBB,
                    ValueMapTy *VRMap, ValueMapTy *VRMapPhi,
                    InstrMapTy &InstrMap, unsigned LastStageNum,
                    unsigned CurStageNum, bool IsLast);
  void removeDeadInstructions(MachineBasicBlock *KernelBB,
                              MBBVectorTy &EpilogBBs);
  void splitLifetimes(MachineBasicBlock *KernelBB, MBBVectorTy &EpilogBBs);
  void addBranches(MachineBasicBlock &PreheaderBB, MBBVectorTy &PrologBBs,
                   MachineBasicBlock *KernelBB, MBBVectorTy &EpilogBBs,
                   ValueMapTy *VRMap);
  bool computeDelta(MachineInstr &MI, unsigned &Delta);
  void updateMemOperands(MachineInstr &NewMI, MachineInstr &OldMI,
                         unsigned Num);
  MachineInstr *cloneInstr(MachineInstr *OldMI, unsigned CurStageNum,
                           unsigned InstStageNum);
  MachineInstr *cloneAndChangeInstr(MachineInstr *OldMI, unsigned CurStageNum,
                                    unsigned InstStageNum);
  void updateInstruction(MachineInstr *NewMI, bool LastDef,
                         unsigned CurStageNum, unsigned InstrStageNum,
                         ValueMapTy *VRMap);
  MachineInstr *findDefInLoop(unsigned Reg);
  unsigned getPrevMapVal(unsigned StageNum, unsigned PhiStage, unsigned LoopVal,
                         unsigned LoopStage, ValueMapTy *VRMap,
                         MachineBasicBlock *BB);
  void rewritePhiValues(MachineBasicBlock *NewBB, unsigned StageNum,
                        ValueMapTy *VRMap, InstrMapTy &InstrMap);
  void rewriteScheduledInstr(MachineBasicBlock *BB, InstrMapTy &InstrMap,
                             unsigned CurStageNum, unsigned PhiNum,
                             MachineInstr *Phi, unsigned OldReg,
                             unsigned NewReg, unsigned PrevReg = 0);
  bool isLoopCarried(MachineInstr &Phi);

  /// Return the max. number of stages/iterations that can occur between a
  /// register definition and its uses.
  unsigned getStagesForReg(int Reg, unsigned CurStage) {
    std::pair<unsigned, bool> Stages = RegToStageDiff[Reg];
    if ((int)CurStage > Schedule.getNumStages() - 1 && Stages.first == 0 &&
        Stages.second)
      return 1;
    return Stages.first;
  }

  /// The number of stages for a Phi is a little different than other
  /// instructions. The minimum value computed in RegToStageDiff is 1
  /// because we assume the Phi is needed for at least 1 iteration.
  /// This is not the case if the loop value is scheduled prior to the
  /// Phi in the same stage.  This function returns the number of stages
  /// or iterations needed between the Phi definition and any uses.
  unsigned getStagesForPhi(int Reg) {
    std::pair<unsigned, bool> Stages = RegToStageDiff[Reg];
    if (Stages.second)
      return Stages.first;
    return Stages.first - 1;
  }

public:
  /// Create a new ModuloScheduleExpander.
  /// \arg InstrChanges Modifications to make to instructions with memory
  ///   operands.
  /// FIXME: InstrChanges is opaque and is an implementation detail of an
  ///   optimization in MachinePipeliner that crosses abstraction boundaries.
  ModuloScheduleExpander(MachineFunction &MF, ModuloSchedule &S,
                         LiveIntervals &LIS, InstrChangesTy InstrChanges)
      : Schedule(S), MF(MF), ST(MF.getSubtarget()), MRI(MF.getRegInfo()),
        TII(ST.getInstrInfo()), LIS(LIS),
        InstrChanges(std::move(InstrChanges)) {}

  /// Performs the actual expansion.
  void expand();
  /// Performs final cleanup after expansion.
  void cleanup();

  /// Returns the newly rewritten kernel block, or nullptr if this was
  /// optimized away.
  MachineBasicBlock *getRewrittenKernel() { return NewKernel; }
};

/// A reimplementation of ModuloScheduleExpander. It works by generating a
/// standalone kernel loop and peeling out the prologs and epilogs.
class PeelingModuloScheduleExpander {
public:
  PeelingModuloScheduleExpander(MachineFunction &MF, ModuloSchedule &S,
                                LiveIntervals *LIS)
      : Schedule(S), MF(MF), ST(MF.getSubtarget()), MRI(MF.getRegInfo()),
        TII(ST.getInstrInfo()), LIS(LIS) {}

  void expand();

  /// Runs ModuloScheduleExpander and treats it as a golden input to validate
  /// aspects of the code generated by PeelingModuloScheduleExpander.
  void validateAgainstModuloScheduleExpander();

protected:
  ModuloSchedule &Schedule;
  MachineFunction &MF;
  const TargetSubtargetInfo &ST;
  MachineRegisterInfo &MRI;
  const TargetInstrInfo *TII = nullptr;
  LiveIntervals *LIS = nullptr;

  /// The original loop block that gets rewritten in-place.
  MachineBasicBlock *BB = nullptr;
  /// The original loop preheader.
  MachineBasicBlock *Preheader = nullptr;
  /// All prolog and epilog blocks.
  SmallVector<MachineBasicBlock *, 4> Prologs, Epilogs;
  /// For every block, the stages that are produced.
  DenseMap<MachineBasicBlock *, BitVector> LiveStages;
  /// For every block, the stages that are available. A stage can be available
  /// but not produced (in the epilog) or produced but not available (in the
  /// prolog).
  DenseMap<MachineBasicBlock *, BitVector> AvailableStages;
  /// When peeling the epilogue keep track of the distance between the phi
  /// nodes and the kernel.
  DenseMap<MachineInstr *, unsigned> PhiNodeLoopIteration;

  /// CanonicalMIs and BlockMIs form a bidirectional map between any of the
  /// loop kernel clones.
  DenseMap<MachineInstr *, MachineInstr *> CanonicalMIs;
  DenseMap<std::pair<MachineBasicBlock *, MachineInstr *>, MachineInstr *>
      BlockMIs;

  /// State passed from peelKernel to peelPrologAndEpilogs().
  std::deque<MachineBasicBlock *> PeeledFront, PeeledBack;
  /// Illegal phis that need to be deleted once we re-link stages.
  SmallVector<MachineInstr *, 4> IllegalPhisToDelete;

  /// Converts BB from the original loop body to the rewritten, pipelined
  /// steady-state.
  void rewriteKernel();

  /// Peels one iteration of the rewritten kernel (BB) in the specified
  /// direction.
  MachineBasicBlock *peelKernel(LoopPeelDirection LPD);
  // Delete instructions whose stage is less than MinStage in the given basic
  // block.
  void filterInstructions(MachineBasicBlock *MB, int MinStage);
  // Move instructions of the given stage from sourceBB to DestBB. Remap the phi
  // instructions to keep a valid IR.
  void moveStageBetweenBlocks(MachineBasicBlock *DestBB,
                              MachineBasicBlock *SourceBB, unsigned Stage);
  /// Peel the kernel forwards and backwards to produce prologs and epilogs,
  /// and stitch them together.
  void peelPrologAndEpilogs();
  /// All prolog and epilog blocks are clones of the kernel, so any produced
  /// register in one block has an corollary in all other blocks.
  Register getEquivalentRegisterIn(Register Reg, MachineBasicBlock *BB);
  /// Change all users of MI, if MI is predicated out
  /// (LiveStages[MI->getParent()] == false).
  void rewriteUsesOf(MachineInstr *MI);
  /// Insert branches between prologs, kernel and epilogs.
  void fixupBranches();
  /// Create a poor-man's LCSSA by cloning only the PHIs from the kernel block
  /// to a block dominated by all prologs and epilogs. This allows us to treat
  /// the loop exiting block as any other kernel clone.
  MachineBasicBlock *CreateLCSSAExitingBlock();
  /// Helper to get the stage of an instruction in the schedule.
  unsigned getStage(MachineInstr *MI) {
    if (CanonicalMIs.count(MI))
      MI = CanonicalMIs[MI];
    return Schedule.getStage(MI);
  }
  /// Helper function to find the right canonical register for a phi instruction
  /// coming from a peeled out prologue.
  Register getPhiCanonicalReg(MachineInstr* CanonicalPhi, MachineInstr* Phi);
  /// Target loop info before kernel peeling.
  std::unique_ptr<TargetInstrInfo::PipelinerLoopInfo> LoopInfo;
};

/// Expander that simply annotates each scheduled instruction with a post-instr
/// symbol that can be consumed by the ModuloScheduleTest pass.
///
/// The post-instr symbol is a way of annotating an instruction that can be
/// roundtripped in MIR. The syntax is:
///   MYINST %0, post-instr-symbol <mcsymbol Stage-1_Cycle-5>
class ModuloScheduleTestAnnotater {
  MachineFunction &MF;
  ModuloSchedule &S;

public:
  ModuloScheduleTestAnnotater(MachineFunction &MF, ModuloSchedule &S)
      : MF(MF), S(S) {}

  /// Performs the annotation.
  void annotate();
};

} // end namespace llvm

#endif // LLVM_CODEGEN_MODULOSCHEDULE_H
PKiwFZq^$<D�D�CodeGen/MachineOperand.hnu�[���//===-- llvm/CodeGen/MachineOperand.h - MachineOperand class ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the declaration of the MachineOperand class.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_MACHINEOPERAND_H
#define LLVM_CODEGEN_MACHINEOPERAND_H

#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/CodeGen/Register.h"
#include "llvm/IR/Intrinsics.h"
#include <cassert>

namespace llvm {

class LLT;
class BlockAddress;
class Constant;
class ConstantFP;
class ConstantInt;
class GlobalValue;
class MachineBasicBlock;
class MachineInstr;
class MachineRegisterInfo;
class MCCFIInstruction;
class MDNode;
class ModuleSlotTracker;
class TargetIntrinsicInfo;
class TargetRegisterInfo;
class hash_code;
class raw_ostream;
class MCSymbol;

/// MachineOperand class - Representation of each machine instruction operand.
///
/// This class isn't a POD type because it has a private constructor, but its
/// destructor must be trivial. Functions like MachineInstr::addOperand(),
/// MachineRegisterInfo::moveOperands(), and MF::DeleteMachineInstr() depend on
/// not having to call the MachineOperand destructor.
///
class MachineOperand {
public:
  enum MachineOperandType : unsigned char {
    MO_Register,          ///< Register operand.
    MO_Immediate,         ///< Immediate operand
    MO_CImmediate,        ///< Immediate >64bit operand
    MO_FPImmediate,       ///< Floating-point immediate operand
    MO_MachineBasicBlock, ///< MachineBasicBlock reference
    MO_FrameIndex,        ///< Abstract Stack Frame Index
    MO_ConstantPoolIndex, ///< Address of indexed Constant in Constant Pool
    MO_TargetIndex,       ///< Target-dependent index+offset operand.
    MO_JumpTableIndex,    ///< Address of indexed Jump Table for switch
    MO_ExternalSymbol,    ///< Name of external global symbol
    MO_GlobalAddress,     ///< Address of a global value
    MO_BlockAddress,      ///< Address of a basic block
    MO_RegisterMask,      ///< Mask of preserved registers.
    MO_RegisterLiveOut,   ///< Mask of live-out registers.
    MO_Metadata,          ///< Metadata reference (for debug info)
    MO_MCSymbol,          ///< MCSymbol reference (for debug/eh info)
    MO_CFIIndex,          ///< MCCFIInstruction index.
    MO_IntrinsicID,       ///< Intrinsic ID for ISel
    MO_Predicate,         ///< Generic predicate for ISel
    MO_ShuffleMask,       ///< Other IR Constant for ISel (shuffle masks)
    MO_DbgInstrRef, ///< Integer indices referring to an instruction+operand
    MO_Last = MO_DbgInstrRef
  };

private:
  /// OpKind - Specify what kind of operand this is.  This discriminates the
  /// union.
  unsigned OpKind : 8;

  /// Subregister number for MO_Register.  A value of 0 indicates the
  /// MO_Register has no subReg.
  ///
  /// For all other kinds of operands, this field holds target-specific flags.
  unsigned SubReg_TargetFlags : 12;

  /// TiedTo - Non-zero when this register operand is tied to another register
  /// operand. The encoding of this field is described in the block comment
  /// before MachineInstr::tieOperands().
  unsigned TiedTo : 4;

  /// IsDef - True if this is a def, false if this is a use of the register.
  /// This is only valid on register operands.
  ///
  unsigned IsDef : 1;

  /// IsImp - True if this is an implicit def or use, false if it is explicit.
  /// This is only valid on register opderands.
  ///
  unsigned IsImp : 1;

  /// IsDeadOrKill
  /// For uses: IsKill - Conservatively indicates the last use of a register
  /// on this path through the function. A register operand with true value of
  /// this flag must be the last use of the register, a register operand with
  /// false value may or may not be the last use of the register. After regalloc
  /// we can use recomputeLivenessFlags to get precise kill flags.
  /// For defs: IsDead - True if this register is never used by a subsequent
  /// instruction.
  /// This is only valid on register operands.
  unsigned IsDeadOrKill : 1;

  /// See isRenamable().
  unsigned IsRenamable : 1;

  /// IsUndef - True if this register operand reads an "undef" value, i.e. the
  /// read value doesn't matter.  This flag can be set on both use and def
  /// operands.  On a sub-register def operand, it refers to the part of the
  /// register that isn't written.  On a full-register def operand, it is a
  /// noop.  See readsReg().
  ///
  /// This is only valid on registers.
  ///
  /// Note that an instruction may have multiple <undef> operands referring to
  /// the same register.  In that case, the instruction may depend on those
  /// operands reading the same dont-care value.  For example:
  ///
  ///   %1 = XOR undef %2, undef %2
  ///
  /// Any register can be used for %2, and its value doesn't matter, but
  /// the two operands must be the same register.
  ///
  unsigned IsUndef : 1;

  /// IsInternalRead - True if this operand reads a value that was defined
  /// inside the same instruction or bundle.  This flag can be set on both use
  /// and def operands.  On a sub-register def operand, it refers to the part
  /// of the register that isn't written.  On a full-register def operand, it
  /// is a noop.
  ///
  /// When this flag is set, the instruction bundle must contain at least one
  /// other def of the register.  If multiple instructions in the bundle define
  /// the register, the meaning is target-defined.
  unsigned IsInternalRead : 1;

  /// IsEarlyClobber - True if this MO_Register 'def' operand is written to
  /// by the MachineInstr before all input registers are read.  This is used to
  /// model the GCC inline asm '&' constraint modifier.
  unsigned IsEarlyClobber : 1;

  /// IsDebug - True if this MO_Register 'use' operand is in a debug pseudo,
  /// not a real instruction.  Such uses should be ignored during codegen.
  unsigned IsDebug : 1;

  /// SmallContents - This really should be part of the Contents union, but
  /// lives out here so we can get a better packed struct.
  /// MO_Register: Register number.
  /// OffsetedInfo: Low bits of offset.
  union {
    unsigned RegNo;           // For MO_Register.
    unsigned OffsetLo;        // Matches Contents.OffsetedInfo.OffsetHi.
  } SmallContents;

  /// ParentMI - This is the instruction that this operand is embedded into.
  /// This is valid for all operand types, when the operand is in an instr.
  MachineInstr *ParentMI = nullptr;

  /// Contents union - This contains the payload for the various operand types.
  union ContentsUnion {
    ContentsUnion() {}
    MachineBasicBlock *MBB;  // For MO_MachineBasicBlock.
    const ConstantFP *CFP;   // For MO_FPImmediate.
    const ConstantInt *CI;   // For MO_CImmediate. Integers > 64bit.
    int64_t ImmVal;          // For MO_Immediate.
    const uint32_t *RegMask; // For MO_RegisterMask and MO_RegisterLiveOut.
    const MDNode *MD;        // For MO_Metadata.
    MCSymbol *Sym;           // For MO_MCSymbol.
    unsigned CFIIndex;       // For MO_CFI.
    Intrinsic::ID IntrinsicID; // For MO_IntrinsicID.
    unsigned Pred;           // For MO_Predicate
    ArrayRef<int> ShuffleMask; // For MO_ShuffleMask

    struct {                  // For MO_Register.
      // Register number is in SmallContents.RegNo.
      MachineOperand *Prev;   // Access list for register. See MRI.
      MachineOperand *Next;
    } Reg;

    struct { // For MO_DbgInstrRef.
      unsigned InstrIdx;
      unsigned OpIdx;
    } InstrRef;

    /// OffsetedInfo - This struct contains the offset and an object identifier.
    /// this represent the object as with an optional offset from it.
    struct {
      union {
        int Index;                // For MO_*Index - The index itself.
        const char *SymbolName;   // For MO_ExternalSymbol.
        const GlobalValue *GV;    // For MO_GlobalAddress.
        const BlockAddress *BA;   // For MO_BlockAddress.
      } Val;
      // Low bits of offset are in SmallContents.OffsetLo.
      int OffsetHi;               // An offset from the object, high 32 bits.
    } OffsetedInfo;
  } Contents;

  explicit MachineOperand(MachineOperandType K)
      : OpKind(K), SubReg_TargetFlags(0) {
    // Assert that the layout is what we expect. It's easy to grow this object.
    static_assert(alignof(MachineOperand) <= alignof(int64_t),
                  "MachineOperand shouldn't be more than 8 byte aligned");
    static_assert(sizeof(Contents) <= 2 * sizeof(void *),
                  "Contents should be at most two pointers");
    static_assert(sizeof(MachineOperand) <=
                      alignTo<alignof(int64_t)>(2 * sizeof(unsigned) +
                                                3 * sizeof(void *)),
                  "MachineOperand too big. Should be Kind, SmallContents, "
                  "ParentMI, and Contents");
  }

public:
  /// getType - Returns the MachineOperandType for this operand.
  ///
  MachineOperandType getType() const { return (MachineOperandType)OpKind; }

  unsigned getTargetFlags() const {
    return isReg() ? 0 : SubReg_TargetFlags;
  }
  void setTargetFlags(unsigned F) {
    assert(!isReg() && "Register operands can't have target flags");
    SubReg_TargetFlags = F;
    assert(SubReg_TargetFlags == F && "Target flags out of range");
  }
  void addTargetFlag(unsigned F) {
    assert(!isReg() && "Register operands can't have target flags");
    SubReg_TargetFlags |= F;
    assert((SubReg_TargetFlags & F) && "Target flags out of range");
  }


  /// getParent - Return the instruction that this operand belongs to.
  ///
  MachineInstr *getParent() { return ParentMI; }
  const MachineInstr *getParent() const { return ParentMI; }

  /// clearParent - Reset the parent pointer.
  ///
  /// The MachineOperand copy constructor also copies ParentMI, expecting the
  /// original to be deleted. If a MachineOperand is ever stored outside a
  /// MachineInstr, the parent pointer must be cleared.
  ///
  /// Never call clearParent() on an operand in a MachineInstr.
  ///
  void clearParent() { ParentMI = nullptr; }

  /// Returns the index of this operand in the instruction that it belongs to.
  unsigned getOperandNo() const;

  /// Print a subreg index operand.
  /// MO_Immediate operands can also be subreg idices. If it's the case, the
  /// subreg index name will be printed. MachineInstr::isOperandSubregIdx can be
  /// called to check this.
  static void printSubRegIdx(raw_ostream &OS, uint64_t Index,
                             const TargetRegisterInfo *TRI);

  /// Print operand target flags.
  static void printTargetFlags(raw_ostream& OS, const MachineOperand &Op);

  /// Print a MCSymbol as an operand.
  static void printSymbol(raw_ostream &OS, MCSymbol &Sym);

  /// Print a stack object reference.
  static void printStackObjectReference(raw_ostream &OS, unsigned FrameIndex,
                                        bool IsFixed, StringRef Name);

  /// Print the offset with explicit +/- signs.
  static void printOperandOffset(raw_ostream &OS, int64_t Offset);

  /// Print an IRSlotNumber.
  static void printIRSlotNumber(raw_ostream &OS, int Slot);

  /// Print the MachineOperand to \p os.
  /// Providing a valid \p TRI and \p IntrinsicInfo results in a more
  /// target-specific printing. If \p TRI and \p IntrinsicInfo are null, the
  /// function will try to pick it up from the parent.
  void print(raw_ostream &os, const TargetRegisterInfo *TRI = nullptr,
             const TargetIntrinsicInfo *IntrinsicInfo = nullptr) const;

  /// More complex way of printing a MachineOperand.
  /// \param TypeToPrint specifies the generic type to be printed on uses and
  /// defs. It can be determined using MachineInstr::getTypeToPrint.
  /// \param OpIdx - specifies the index of the operand in machine instruction.
  /// This will be used by target dependent MIR formatter. Could be std::nullopt
  /// if the index is unknown, e.g. called by dump().
  /// \param PrintDef - whether we want to print `def` on an operand which
  /// isDef. Sometimes, if the operand is printed before '=', we don't print
  /// `def`.
  /// \param IsStandalone - whether we want a verbose output of the MO. This
  /// prints extra information that can be easily inferred when printing the
  /// whole function, but not when printing only a fragment of it.
  /// \param ShouldPrintRegisterTies - whether we want to print register ties.
  /// Sometimes they are easily determined by the instruction's descriptor
  /// (MachineInstr::hasComplexRegiterTies can determine if it's needed).
  /// \param TiedOperandIdx - if we need to print register ties this needs to
  /// provide the index of the tied register. If not, it will be ignored.
  /// \param TRI - provide more target-specific information to the printer.
  /// Unlike the previous function, this one will not try and get the
  /// information from it's parent.
  /// \param IntrinsicInfo - same as \p TRI.
  void print(raw_ostream &os, ModuleSlotTracker &MST, LLT TypeToPrint,
             std::optional<unsigned> OpIdx, bool PrintDef, bool IsStandalone,
             bool ShouldPrintRegisterTies, unsigned TiedOperandIdx,
             const TargetRegisterInfo *TRI,
             const TargetIntrinsicInfo *IntrinsicInfo) const;

  /// Same as print(os, TRI, IntrinsicInfo), but allows to specify the low-level
  /// type to be printed the same way the full version of print(...) does it.
  void print(raw_ostream &os, LLT TypeToPrint,
             const TargetRegisterInfo *TRI = nullptr,
             const TargetIntrinsicInfo *IntrinsicInfo = nullptr) const;

  void dump() const;

  //===--------------------------------------------------------------------===//
  // Accessors that tell you what kind of MachineOperand you're looking at.
  //===--------------------------------------------------------------------===//

  /// isReg - Tests if this is a MO_Register operand.
  bool isReg() const { return OpKind == MO_Register; }
  /// isImm - Tests if this is a MO_Immediate operand.
  bool isImm() const { return OpKind == MO_Immediate; }
  /// isCImm - Test if this is a MO_CImmediate operand.
  bool isCImm() const { return OpKind == MO_CImmediate; }
  /// isFPImm - Tests if this is a MO_FPImmediate operand.
  bool isFPImm() const { return OpKind == MO_FPImmediate; }
  /// isMBB - Tests if this is a MO_MachineBasicBlock operand.
  bool isMBB() const { return OpKind == MO_MachineBasicBlock; }
  /// isFI - Tests if this is a MO_FrameIndex operand.
  bool isFI() const { return OpKind == MO_FrameIndex; }
  /// isCPI - Tests if this is a MO_ConstantPoolIndex operand.
  bool isCPI() const { return OpKind == MO_ConstantPoolIndex; }
  /// isTargetIndex - Tests if this is a MO_TargetIndex operand.
  bool isTargetIndex() const { return OpKind == MO_TargetIndex; }
  /// isJTI - Tests if this is a MO_JumpTableIndex operand.
  bool isJTI() const { return OpKind == MO_JumpTableIndex; }
  /// isGlobal - Tests if this is a MO_GlobalAddress operand.
  bool isGlobal() const { return OpKind == MO_GlobalAddress; }
  /// isSymbol - Tests if this is a MO_ExternalSymbol operand.
  bool isSymbol() const { return OpKind == MO_ExternalSymbol; }
  /// isBlockAddress - Tests if this is a MO_BlockAddress operand.
  bool isBlockAddress() const { return OpKind == MO_BlockAddress; }
  /// isRegMask - Tests if this is a MO_RegisterMask operand.
  bool isRegMask() const { return OpKind == MO_RegisterMask; }
  /// isRegLiveOut - Tests if this is a MO_RegisterLiveOut operand.
  bool isRegLiveOut() const { return OpKind == MO_RegisterLiveOut; }
  /// isMetadata - Tests if this is a MO_Metadata operand.
  bool isMetadata() const { return OpKind == MO_Metadata; }
  bool isMCSymbol() const { return OpKind == MO_MCSymbol; }
  bool isDbgInstrRef() const { return OpKind == MO_DbgInstrRef; }
  bool isCFIIndex() const { return OpKind == MO_CFIIndex; }
  bool isIntrinsicID() const { return OpKind == MO_IntrinsicID; }
  bool isPredicate() const { return OpKind == MO_Predicate; }
  bool isShuffleMask() const { return OpKind == MO_ShuffleMask; }
  //===--------------------------------------------------------------------===//
  // Accessors for Register Operands
  //===--------------------------------------------------------------------===//

  /// getReg - Returns the register number.
  Register getReg() const {
    assert(isReg() && "This is not a register operand!");
    return Register(SmallContents.RegNo);
  }

  unsigned getSubReg() const {
    assert(isReg() && "Wrong MachineOperand accessor");
    return SubReg_TargetFlags;
  }

  bool isUse() const {
    assert(isReg() && "Wrong MachineOperand accessor");
    return !IsDef;
  }

  bool isDef() const {
    assert(isReg() && "Wrong MachineOperand accessor");
    return IsDef;
  }

  bool isImplicit() const {
    assert(isReg() && "Wrong MachineOperand accessor");
    return IsImp;
  }

  bool isDead() const {
    assert(isReg() && "Wrong MachineOperand accessor");
    return IsDeadOrKill & IsDef;
  }

  bool isKill() const {
    assert(isReg() && "Wrong MachineOperand accessor");
    return IsDeadOrKill & !IsDef;
  }

  bool isUndef() const {
    assert(isReg() && "Wrong MachineOperand accessor");
    return IsUndef;
  }

  /// isRenamable - Returns true if this register may be renamed, i.e. it does
  /// not generate a value that is somehow read in a way that is not represented
  /// by the Machine IR (e.g. to meet an ABI or ISA requirement).  This is only
  /// valid on physical register operands.  Virtual registers are assumed to
  /// always be renamable regardless of the value of this field.
  ///
  /// Operands that are renamable can freely be changed to any other register
  /// that is a member of the register class returned by
  /// MI->getRegClassConstraint().
  ///
  /// isRenamable can return false for several different reasons:
  ///
  /// - ABI constraints (since liveness is not always precisely modeled).  We
  ///   conservatively handle these cases by setting all physical register
  ///   operands that didn’t start out as virtual regs to not be renamable.
  ///   Also any physical register operands created after register allocation or
  ///   whose register is changed after register allocation will not be
  ///   renamable.  This state is tracked in the MachineOperand::IsRenamable
  ///   bit.
  ///
  /// - Opcode/target constraints: for opcodes that have complex register class
  ///   requirements (e.g. that depend on other operands/instructions), we set
  ///   hasExtraSrcRegAllocReq/hasExtraDstRegAllocReq in the machine opcode
  ///   description.  Operands belonging to instructions with opcodes that are
  ///   marked hasExtraSrcRegAllocReq/hasExtraDstRegAllocReq return false from
  ///   isRenamable().  Additionally, the AllowRegisterRenaming target property
  ///   prevents any operands from being marked renamable for targets that don't
  ///   have detailed opcode hasExtraSrcRegAllocReq/hasExtraDstRegAllocReq
  ///   values.
  bool isRenamable() const;

  bool isInternalRead() const {
    assert(isReg() && "Wrong MachineOperand accessor");
    return IsInternalRead;
  }

  bool isEarlyClobber() const {
    assert(isReg() && "Wrong MachineOperand accessor");
    return IsEarlyClobber;
  }

  bool isTied() const {
    assert(isReg() && "Wrong MachineOperand accessor");
    return TiedTo;
  }

  bool isDebug() const {
    assert(isReg() && "Wrong MachineOperand accessor");
    return IsDebug;
  }

  /// readsReg - Returns true if this operand reads the previous value of its
  /// register.  A use operand with the <undef> flag set doesn't read its
  /// register.  A sub-register def implicitly reads the other parts of the
  /// register being redefined unless the <undef> flag is set.
  ///
  /// This refers to reading the register value from before the current
  /// instruction or bundle. Internal bundle reads are not included.
  bool readsReg() const {
    assert(isReg() && "Wrong MachineOperand accessor");
    return !isUndef() && !isInternalRead() && (isUse() || getSubReg());
  }

  /// Return true if this operand can validly be appended to an arbitrary
  /// operand list. i.e. this behaves like an implicit operand.
  bool isValidExcessOperand() const {
    if ((isReg() && isImplicit()) || isRegMask())
      return true;

    // Debug operands
    return isMetadata() || isMCSymbol();
  }

  //===--------------------------------------------------------------------===//
  // Mutators for Register Operands
  //===--------------------------------------------------------------------===//

  /// Change the register this operand corresponds to.
  ///
  void setReg(Register Reg);

  void setSubReg(unsigned subReg) {
    assert(isReg() && "Wrong MachineOperand mutator");
    SubReg_TargetFlags = subReg;
    assert(SubReg_TargetFlags == subReg && "SubReg out of range");
  }

  /// substVirtReg - Substitute the current register with the virtual
  /// subregister Reg:SubReg. Take any existing SubReg index into account,
  /// using TargetRegisterInfo to compose the subreg indices if necessary.
  /// Reg must be a virtual register, SubIdx can be 0.
  ///
  void substVirtReg(Register Reg, unsigned SubIdx, const TargetRegisterInfo&);

  /// substPhysReg - Substitute the current register with the physical register
  /// Reg, taking any existing SubReg into account. For instance,
  /// substPhysReg(%eax) will change %reg1024:sub_8bit to %al.
  ///
  void substPhysReg(MCRegister Reg, const TargetRegisterInfo&);

  void setIsUse(bool Val = true) { setIsDef(!Val); }

  /// Change a def to a use, or a use to a def.
  void setIsDef(bool Val = true);

  void setImplicit(bool Val = true) {
    assert(isReg() && "Wrong MachineOperand mutator");
    IsImp = Val;
  }

  void setIsKill(bool Val = true) {
    assert(isReg() && !IsDef && "Wrong MachineOperand mutator");
    assert((!Val || !isDebug()) && "Marking a debug operation as kill");
    IsDeadOrKill = Val;
  }

  void setIsDead(bool Val = true) {
    assert(isReg() && IsDef && "Wrong MachineOperand mutator");
    IsDeadOrKill = Val;
  }

  void setIsUndef(bool Val = true) {
    assert(isReg() && "Wrong MachineOperand mutator");
    IsUndef = Val;
  }

  void setIsRenamable(bool Val = true);

  void setIsInternalRead(bool Val = true) {
    assert(isReg() && "Wrong MachineOperand mutator");
    IsInternalRead = Val;
  }

  void setIsEarlyClobber(bool Val = true) {
    assert(isReg() && IsDef && "Wrong MachineOperand mutator");
    IsEarlyClobber = Val;
  }

  void setIsDebug(bool Val = true) {
    assert(isReg() && !IsDef && "Wrong MachineOperand mutator");
    IsDebug = Val;
  }

  //===--------------------------------------------------------------------===//
  // Accessors for various operand types.
  //===--------------------------------------------------------------------===//

  int64_t getImm() const {
    assert(isImm() && "Wrong MachineOperand accessor");
    return Contents.ImmVal;
  }

  const ConstantInt *getCImm() const {
    assert(isCImm() && "Wrong MachineOperand accessor");
    return Contents.CI;
  }

  const ConstantFP *getFPImm() const {
    assert(isFPImm() && "Wrong MachineOperand accessor");
    return Contents.CFP;
  }

  MachineBasicBlock *getMBB() const {
    assert(isMBB() && "Wrong MachineOperand accessor");
    return Contents.MBB;
  }

  int getIndex() const {
    assert((isFI() || isCPI() || isTargetIndex() || isJTI()) &&
           "Wrong MachineOperand accessor");
    return Contents.OffsetedInfo.Val.Index;
  }

  const GlobalValue *getGlobal() const {
    assert(isGlobal() && "Wrong MachineOperand accessor");
    return Contents.OffsetedInfo.Val.GV;
  }

  const BlockAddress *getBlockAddress() const {
    assert(isBlockAddress() && "Wrong MachineOperand accessor");
    return Contents.OffsetedInfo.Val.BA;
  }

  MCSymbol *getMCSymbol() const {
    assert(isMCSymbol() && "Wrong MachineOperand accessor");
    return Contents.Sym;
  }

  unsigned getInstrRefInstrIndex() const {
    assert(isDbgInstrRef() && "Wrong MachineOperand accessor");
    return Contents.InstrRef.InstrIdx;
  }

  unsigned getInstrRefOpIndex() const {
    assert(isDbgInstrRef() && "Wrong MachineOperand accessor");
    return Contents.InstrRef.OpIdx;
  }

  unsigned getCFIIndex() const {
    assert(isCFIIndex() && "Wrong MachineOperand accessor");
    return Contents.CFIIndex;
  }

  Intrinsic::ID getIntrinsicID() const {
    assert(isIntrinsicID() && "Wrong MachineOperand accessor");
    return Contents.IntrinsicID;
  }

  unsigned getPredicate() const {
    assert(isPredicate() && "Wrong MachineOperand accessor");
    return Contents.Pred;
  }

  ArrayRef<int> getShuffleMask() const {
    assert(isShuffleMask() && "Wrong MachineOperand accessor");
    return Contents.ShuffleMask;
  }

  /// Return the offset from the symbol in this operand. This always returns 0
  /// for ExternalSymbol operands.
  int64_t getOffset() const {
    assert((isGlobal() || isSymbol() || isMCSymbol() || isCPI() ||
            isTargetIndex() || isBlockAddress()) &&
           "Wrong MachineOperand accessor");
    return int64_t(uint64_t(Contents.OffsetedInfo.OffsetHi) << 32) |
           SmallContents.OffsetLo;
  }

  const char *getSymbolName() const {
    assert(isSymbol() && "Wrong MachineOperand accessor");
    return Contents.OffsetedInfo.Val.SymbolName;
  }

  /// clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
  /// It is sometimes necessary to detach the register mask pointer from its
  /// machine operand. This static method can be used for such detached bit
  /// mask pointers.
  static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg) {
    // See TargetRegisterInfo.h.
    assert(PhysReg < (1u << 30) && "Not a physical register");
    return !(RegMask[PhysReg / 32] & (1u << PhysReg % 32));
  }

  /// clobbersPhysReg - Returns true if this RegMask operand clobbers PhysReg.
  bool clobbersPhysReg(MCRegister PhysReg) const {
     return clobbersPhysReg(getRegMask(), PhysReg);
  }

  /// getRegMask - Returns a bit mask of registers preserved by this RegMask
  /// operand.
  const uint32_t *getRegMask() const {
    assert(isRegMask() && "Wrong MachineOperand accessor");
    return Contents.RegMask;
  }

  /// Returns number of elements needed for a regmask array.
  static unsigned getRegMaskSize(unsigned NumRegs) {
    return (NumRegs + 31) / 32;
  }

  /// getRegLiveOut - Returns a bit mask of live-out registers.
  const uint32_t *getRegLiveOut() const {
    assert(isRegLiveOut() && "Wrong MachineOperand accessor");
    return Contents.RegMask;
  }

  const MDNode *getMetadata() const {
    assert(isMetadata() && "Wrong MachineOperand accessor");
    return Contents.MD;
  }

  //===--------------------------------------------------------------------===//
  // Mutators for various operand types.
  //===--------------------------------------------------------------------===//

  void setImm(int64_t immVal) {
    assert(isImm() && "Wrong MachineOperand mutator");
    Contents.ImmVal = immVal;
  }

  void setCImm(const ConstantInt *CI) {
    assert(isCImm() && "Wrong MachineOperand mutator");
    Contents.CI = CI;
  }

  void setFPImm(const ConstantFP *CFP) {
    assert(isFPImm() && "Wrong MachineOperand mutator");
    Contents.CFP = CFP;
  }

  void setOffset(int64_t Offset) {
    assert((isGlobal() || isSymbol() || isMCSymbol() || isCPI() ||
            isTargetIndex() || isBlockAddress()) &&
           "Wrong MachineOperand mutator");
    SmallContents.OffsetLo = unsigned(Offset);
    Contents.OffsetedInfo.OffsetHi = int(Offset >> 32);
  }

  void setIndex(int Idx) {
    assert((isFI() || isCPI() || isTargetIndex() || isJTI()) &&
           "Wrong MachineOperand mutator");
    Contents.OffsetedInfo.Val.Index = Idx;
  }

  void setMetadata(const MDNode *MD) {
    assert(isMetadata() && "Wrong MachineOperand mutator");
    Contents.MD = MD;
  }

  void setInstrRefInstrIndex(unsigned InstrIdx) {
    assert(isDbgInstrRef() && "Wrong MachineOperand mutator");
    Contents.InstrRef.InstrIdx = InstrIdx;
  }
  void setInstrRefOpIndex(unsigned OpIdx) {
    assert(isDbgInstrRef() && "Wrong MachineOperand mutator");
    Contents.InstrRef.OpIdx = OpIdx;
  }

  void setMBB(MachineBasicBlock *MBB) {
    assert(isMBB() && "Wrong MachineOperand mutator");
    Contents.MBB = MBB;
  }

  /// Sets value of register mask operand referencing Mask.  The
  /// operand does not take ownership of the memory referenced by Mask, it must
  /// remain valid for the lifetime of the operand. See CreateRegMask().
  /// Any physreg with a 0 bit in the mask is clobbered by the instruction.
  void setRegMask(const uint32_t *RegMaskPtr) {
    assert(isRegMask() && "Wrong MachineOperand mutator");
    Contents.RegMask = RegMaskPtr;
  }

  void setIntrinsicID(Intrinsic::ID IID) {
    assert(isIntrinsicID() && "Wrong MachineOperand mutator");
    Contents.IntrinsicID = IID;
  }

  void setPredicate(unsigned Predicate) {
    assert(isPredicate() && "Wrong MachineOperand mutator");
    Contents.Pred = Predicate;
  }

  //===--------------------------------------------------------------------===//
  // Other methods.
  //===--------------------------------------------------------------------===//

  /// Returns true if this operand is identical to the specified operand except
  /// for liveness related flags (isKill, isUndef and isDead). Note that this
  /// should stay in sync with the hash_value overload below.
  bool isIdenticalTo(const MachineOperand &Other) const;

  /// MachineOperand hash_value overload.
  ///
  /// Note that this includes the same information in the hash that
  /// isIdenticalTo uses for comparison. It is thus suited for use in hash
  /// tables which use that function for equality comparisons only. This must
  /// stay exactly in sync with isIdenticalTo above.
  friend hash_code hash_value(const MachineOperand &MO);

  /// ChangeToImmediate - Replace this operand with a new immediate operand of
  /// the specified value.  If an operand is known to be an immediate already,
  /// the setImm method should be used.
  void ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags = 0);

  /// ChangeToFPImmediate - Replace this operand with a new FP immediate operand
  /// of the specified value.  If an operand is known to be an FP immediate
  /// already, the setFPImm method should be used.
  void ChangeToFPImmediate(const ConstantFP *FPImm, unsigned TargetFlags = 0);

  /// ChangeToES - Replace this operand with a new external symbol operand.
  void ChangeToES(const char *SymName, unsigned TargetFlags = 0);

  /// ChangeToGA - Replace this operand with a new global address operand.
  void ChangeToGA(const GlobalValue *GV, int64_t Offset,
                  unsigned TargetFlags = 0);

  /// ChangeToMCSymbol - Replace this operand with a new MC symbol operand.
  void ChangeToMCSymbol(MCSymbol *Sym, unsigned TargetFlags = 0);

  /// Replace this operand with a frame index.
  void ChangeToFrameIndex(int Idx, unsigned TargetFlags = 0);

  /// Replace this operand with a target index.
  void ChangeToTargetIndex(unsigned Idx, int64_t Offset,
                           unsigned TargetFlags = 0);

  /// Replace this operand with an Instruction Reference.
  void ChangeToDbgInstrRef(unsigned InstrIdx, unsigned OpIdx,
                           unsigned TargetFlags = 0);

  /// ChangeToRegister - Replace this operand with a new register operand of
  /// the specified value.  If an operand is known to be an register already,
  /// the setReg method should be used.
  void ChangeToRegister(Register Reg, bool isDef, bool isImp = false,
                        bool isKill = false, bool isDead = false,
                        bool isUndef = false, bool isDebug = false);

  /// getTargetIndexName - If this MachineOperand is a TargetIndex that has a
  /// name, attempt to get the name. Returns nullptr if the TargetIndex does not
  /// have a name. Asserts if MO is not a TargetIndex.
  const char *getTargetIndexName() const;

  //===--------------------------------------------------------------------===//
  // Construction methods.
  //===--------------------------------------------------------------------===//

  static MachineOperand CreateImm(int64_t Val) {
    MachineOperand Op(MachineOperand::MO_Immediate);
    Op.setImm(Val);
    return Op;
  }

  static MachineOperand CreateCImm(const ConstantInt *CI) {
    MachineOperand Op(MachineOperand::MO_CImmediate);
    Op.Contents.CI = CI;
    return Op;
  }

  static MachineOperand CreateFPImm(const ConstantFP *CFP) {
    MachineOperand Op(MachineOperand::MO_FPImmediate);
    Op.Contents.CFP = CFP;
    return Op;
  }

  static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp = false,
                                  bool isKill = false, bool isDead = false,
                                  bool isUndef = false,
                                  bool isEarlyClobber = false,
                                  unsigned SubReg = 0, bool isDebug = false,
                                  bool isInternalRead = false,
                                  bool isRenamable = false) {
    assert(!(isDead && !isDef) && "Dead flag on non-def");
    assert(!(isKill && isDef) && "Kill flag on def");
    MachineOperand Op(MachineOperand::MO_Register);
    Op.IsDef = isDef;
    Op.IsImp = isImp;
    Op.IsDeadOrKill = isKill | isDead;
    Op.IsRenamable = isRenamable;
    Op.IsUndef = isUndef;
    Op.IsInternalRead = isInternalRead;
    Op.IsEarlyClobber = isEarlyClobber;
    Op.TiedTo = 0;
    Op.IsDebug = isDebug;
    Op.SmallContents.RegNo = Reg;
    Op.Contents.Reg.Prev = nullptr;
    Op.Contents.Reg.Next = nullptr;
    Op.setSubReg(SubReg);
    return Op;
  }
  static MachineOperand CreateMBB(MachineBasicBlock *MBB,
                                  unsigned TargetFlags = 0) {
    MachineOperand Op(MachineOperand::MO_MachineBasicBlock);
    Op.setMBB(MBB);
    Op.setTargetFlags(TargetFlags);
    return Op;
  }
  static MachineOperand CreateFI(int Idx) {
    MachineOperand Op(MachineOperand::MO_FrameIndex);
    Op.setIndex(Idx);
    return Op;
  }
  static MachineOperand CreateCPI(unsigned Idx, int Offset,
                                  unsigned TargetFlags = 0) {
    MachineOperand Op(MachineOperand::MO_ConstantPoolIndex);
    Op.setIndex(Idx);
    Op.setOffset(Offset);
    Op.setTargetFlags(TargetFlags);
    return Op;
  }
  static MachineOperand CreateTargetIndex(unsigned Idx, int64_t Offset,
                                          unsigned TargetFlags = 0) {
    MachineOperand Op(MachineOperand::MO_TargetIndex);
    Op.setIndex(Idx);
    Op.setOffset(Offset);
    Op.setTargetFlags(TargetFlags);
    return Op;
  }
  static MachineOperand CreateJTI(unsigned Idx, unsigned TargetFlags = 0) {
    MachineOperand Op(MachineOperand::MO_JumpTableIndex);
    Op.setIndex(Idx);
    Op.setTargetFlags(TargetFlags);
    return Op;
  }
  static MachineOperand CreateGA(const GlobalValue *GV, int64_t Offset,
                                 unsigned TargetFlags = 0) {
    MachineOperand Op(MachineOperand::MO_GlobalAddress);
    Op.Contents.OffsetedInfo.Val.GV = GV;
    Op.setOffset(Offset);
    Op.setTargetFlags(TargetFlags);
    return Op;
  }
  static MachineOperand CreateES(const char *SymName,
                                 unsigned TargetFlags = 0) {
    MachineOperand Op(MachineOperand::MO_ExternalSymbol);
    Op.Contents.OffsetedInfo.Val.SymbolName = SymName;
    Op.setOffset(0); // Offset is always 0.
    Op.setTargetFlags(TargetFlags);
    return Op;
  }
  static MachineOperand CreateBA(const BlockAddress *BA, int64_t Offset,
                                 unsigned TargetFlags = 0) {
    MachineOperand Op(MachineOperand::MO_BlockAddress);
    Op.Contents.OffsetedInfo.Val.BA = BA;
    Op.setOffset(Offset);
    Op.setTargetFlags(TargetFlags);
    return Op;
  }
  /// CreateRegMask - Creates a register mask operand referencing Mask.  The
  /// operand does not take ownership of the memory referenced by Mask, it
  /// must remain valid for the lifetime of the operand.
  ///
  /// A RegMask operand represents a set of non-clobbered physical registers
  /// on an instruction that clobbers many registers, typically a call.  The
  /// bit mask has a bit set for each physreg that is preserved by this
  /// instruction, as described in the documentation for
  /// TargetRegisterInfo::getCallPreservedMask().
  ///
  /// Any physreg with a 0 bit in the mask is clobbered by the instruction.
  ///
  static MachineOperand CreateRegMask(const uint32_t *Mask) {
    assert(Mask && "Missing register mask");
    MachineOperand Op(MachineOperand::MO_RegisterMask);
    Op.Contents.RegMask = Mask;
    return Op;
  }
  static MachineOperand CreateRegLiveOut(const uint32_t *Mask) {
    assert(Mask && "Missing live-out register mask");
    MachineOperand Op(MachineOperand::MO_RegisterLiveOut);
    Op.Contents.RegMask = Mask;
    return Op;
  }
  static MachineOperand CreateMetadata(const MDNode *Meta) {
    MachineOperand Op(MachineOperand::MO_Metadata);
    Op.Contents.MD = Meta;
    return Op;
  }

  static MachineOperand CreateMCSymbol(MCSymbol *Sym,
                                       unsigned TargetFlags = 0) {
    MachineOperand Op(MachineOperand::MO_MCSymbol);
    Op.Contents.Sym = Sym;
    Op.setOffset(0);
    Op.setTargetFlags(TargetFlags);
    return Op;
  }

  static MachineOperand CreateDbgInstrRef(unsigned InstrIdx, unsigned OpIdx) {
    MachineOperand Op(MachineOperand::MO_DbgInstrRef);
    Op.Contents.InstrRef.InstrIdx = InstrIdx;
    Op.Contents.InstrRef.OpIdx = OpIdx;
    return Op;
  }

  static MachineOperand CreateCFIIndex(unsigned CFIIndex) {
    MachineOperand Op(MachineOperand::MO_CFIIndex);
    Op.Contents.CFIIndex = CFIIndex;
    return Op;
  }

  static MachineOperand CreateIntrinsicID(Intrinsic::ID ID) {
    MachineOperand Op(MachineOperand::MO_IntrinsicID);
    Op.Contents.IntrinsicID = ID;
    return Op;
  }

  static MachineOperand CreatePredicate(unsigned Pred) {
    MachineOperand Op(MachineOperand::MO_Predicate);
    Op.Contents.Pred = Pred;
    return Op;
  }

  static MachineOperand CreateShuffleMask(ArrayRef<int> Mask) {
    MachineOperand Op(MachineOperand::MO_ShuffleMask);
    Op.Contents.ShuffleMask = Mask;
    return Op;
  }

  friend class MachineInstr;
  friend class MachineRegisterInfo;

private:
  // If this operand is currently a register operand, and if this is in a
  // function, deregister the operand from the register's use/def list.
  void removeRegFromUses();

  /// Artificial kinds for DenseMap usage.
  enum : unsigned char {
    MO_Empty = MO_Last + 1,
    MO_Tombstone,
  };

  friend struct DenseMapInfo<MachineOperand>;

  //===--------------------------------------------------------------------===//
  // Methods for handling register use/def lists.
  //===--------------------------------------------------------------------===//

  /// isOnRegUseList - Return true if this operand is on a register use/def
  /// list or false if not.  This can only be called for register operands
  /// that are part of a machine instruction.
  bool isOnRegUseList() const {
    assert(isReg() && "Can only add reg operand to use lists");
    return Contents.Reg.Prev != nullptr;
  }
};

template <> struct DenseMapInfo<MachineOperand> {
  static MachineOperand getEmptyKey() {
    return MachineOperand(static_cast<MachineOperand::MachineOperandType>(
        MachineOperand::MO_Empty));
  }
  static MachineOperand getTombstoneKey() {
    return MachineOperand(static_cast<MachineOperand::MachineOperandType>(
        MachineOperand::MO_Tombstone));
  }
  static unsigned getHashValue(const MachineOperand &MO) {
    return hash_value(MO);
  }
  static bool isEqual(const MachineOperand &LHS, const MachineOperand &RHS) {
    if (LHS.getType() == static_cast<MachineOperand::MachineOperandType>(
                             MachineOperand::MO_Empty) ||
        LHS.getType() == static_cast<MachineOperand::MachineOperandType>(
                             MachineOperand::MO_Tombstone))
      return LHS.getType() == RHS.getType();
    return LHS.isIdenticalTo(RHS);
  }
};

inline raw_ostream &operator<<(raw_ostream &OS, const MachineOperand &MO) {
  MO.print(OS);
  return OS;
}

// See friend declaration above. This additional declaration is required in
// order to compile LLVM with IBM xlC compiler.
hash_code hash_value(const MachineOperand &MO);
} // namespace llvm

#endif
PKiwFZ�܃�/-/-CodeGen/MachinePassRegistry.defnu�[���//===- MachinePassRegistry.def - Registry of passes -------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is used as the registry of passes that are for target-independent
// code generator.
//
//===----------------------------------------------------------------------===//

// NOTE: NO INCLUDE GUARD DESIRED!

#ifndef MODULE_ANALYSIS
#define MODULE_ANALYSIS(NAME, PASS_NAME, CONSTRUCTOR)
#endif
MODULE_ANALYSIS("pass-instrumentation", PassInstrumentationAnalysis, (PIC))
#undef MODULE_ANALYSIS

#ifndef MODULE_PASS
#define MODULE_PASS(NAME, PASS_NAME, CONSTRUCTOR)
#endif
MODULE_PASS("pre-isel-intrinsic-lowering", PreISelIntrinsicLoweringPass, ())
#undef MODULE_PASS

#ifndef FUNCTION_ANALYSIS
#define FUNCTION_ANALYSIS(NAME, PASS_NAME, CONSTRUCTOR)
#endif
FUNCTION_ANALYSIS("pass-instrumentation", PassInstrumentationAnalysis, (PIC))
FUNCTION_ANALYSIS("targetir", TargetIRAnalysis, (std::move(TM.getTargetIRAnalysis())))
#undef FUNCTION_ANALYSIS

#ifndef FUNCTION_PASS
#define FUNCTION_PASS(NAME, PASS_NAME, CONSTRUCTOR)
#endif
FUNCTION_PASS("mergeicmps", MergeICmpsPass, ())
FUNCTION_PASS("lower-constant-intrinsics", LowerConstantIntrinsicsPass, ())
FUNCTION_PASS("unreachableblockelim", UnreachableBlockElimPass, ())
FUNCTION_PASS("consthoist", ConstantHoistingPass, ())
FUNCTION_PASS("replace-with-veclib", ReplaceWithVeclib, ())
FUNCTION_PASS("partially-inline-libcalls", PartiallyInlineLibCallsPass, ())
FUNCTION_PASS("ee-instrument", EntryExitInstrumenterPass, (false))
FUNCTION_PASS("post-inline-ee-instrument", EntryExitInstrumenterPass, (true))
FUNCTION_PASS("expand-large-div-rem", ExpandLargeDivRemPass, ())
FUNCTION_PASS("expand-large-fp-convert", ExpandLargeFpConvertPass, ())
FUNCTION_PASS("expand-reductions", ExpandReductionsPass, ())
FUNCTION_PASS("expandvp", ExpandVectorPredicationPass, ())
FUNCTION_PASS("lowerinvoke", LowerInvokePass, ())
FUNCTION_PASS("scalarize-masked-mem-intrin", ScalarizeMaskedMemIntrinPass, ())
FUNCTION_PASS("tlshoist", TLSVariableHoistPass, ())
FUNCTION_PASS("verify", VerifierPass, ())
#undef FUNCTION_PASS

#ifndef LOOP_PASS
#define LOOP_PASS(NAME, PASS_NAME, CONSTRUCTOR)
#endif
LOOP_PASS("loop-reduce", LoopStrengthReducePass, ())
#undef LOOP_PASS

#ifndef MACHINE_MODULE_PASS
#define MACHINE_MODULE_PASS(NAME, PASS_NAME, CONSTRUCTOR)
#endif
#undef MACHINE_MODULE_PASS

#ifndef MACHINE_FUNCTION_ANALYSIS
#define MACHINE_FUNCTION_ANALYSIS(NAME, PASS_NAME, CONSTRUCTOR)
#endif
MACHINE_FUNCTION_ANALYSIS("pass-instrumentation", PassInstrumentationAnalysis, (PIC))
// LiveVariables currently requires pure SSA form.
// FIXME: Once TwoAddressInstruction pass no longer uses kill flags,
// LiveVariables can be removed completely, and LiveIntervals can be directly
// computed. (We still either need to regenerate kill flags after regalloc, or
// preferably fix the scavenger to not depend on them).
// MACHINE_FUNCTION_ANALYSIS("live-vars", LiveVariablesAnalysis())

// MACHINE_FUNCTION_ANALYSIS("live-stacks", LiveStacksPass())
// MACHINE_FUNCTION_ANALYSIS("slot-indexes", SlotIndexesAnalysis())
// MACHINE_FUNCTION_ANALYSIS("edge-bundles", EdgeBundlesAnalysis())
// MACHINE_FUNCTION_ANALYSIS("lazy-machine-bfi", LazyMachineBlockFrequencyInfoAnalysis())
// MACHINE_FUNCTION_ANALYSIS("machine-bfi", MachineBlockFrequencyInfoAnalysis())
// MACHINE_FUNCTION_ANALYSIS("machine-loops", MachineLoopInfoAnalysis())
// MACHINE_FUNCTION_ANALYSIS("machine-dom-frontier", MachineDominanceFrontierAnalysis())
// MACHINE_FUNCTION_ANALYSIS("machine-dom-tree", MachineDominatorTreeAnalysis())
// MACHINE_FUNCTION_ANALYSIS("machine-ore", MachineOptimizationRemarkEmitterPassAnalysis())
// MACHINE_FUNCTION_ANALYSIS("machine-post-dom-tree", MachinePostDominatorTreeAnalysis())
// MACHINE_FUNCTION_ANALYSIS("machine-region-info", MachineRegionInfoPassAnalysis())
// MACHINE_FUNCTION_ANALYSIS("machine-trace-metrics", MachineTraceMetricsAnalysis())
// MACHINE_FUNCTION_ANALYSIS("reaching-def", ReachingDefAnalysisAnalysis())
// MACHINE_FUNCTION_ANALYSIS("live-reg-matrix", LiveRegMatrixAnalysis())
// MACHINE_FUNCTION_ANALYSIS("gc-analysis", GCMachineCodeAnalysisPass())
#undef MACHINE_FUNCTION_ANALYSIS

#ifndef MACHINE_FUNCTION_PASS
#define MACHINE_FUNCTION_PASS(NAME, PASS_NAME, CONSTRUCTOR)
#endif
// MACHINE_FUNCTION_PASS("mir-printer", PrintMIRPass, ())
// MACHINE_FUNCTION_PASS("free-machine-function", FreeMachineFunctionPass, ())
#undef MACHINE_FUNCTION_PASS

// After a pass is converted to new pass manager, its entry should be moved from
// dummy table to the normal one. For example, for a machine function pass,
// DUMMY_MACHINE_FUNCTION_PASS to MACHINE_FUNCTION_PASS.

#ifndef DUMMY_FUNCTION_PASS
#define DUMMY_FUNCTION_PASS(NAME, PASS_NAME, CONSTRUCTOR)
#endif
DUMMY_FUNCTION_PASS("expandmemcmp", ExpandMemCmpPass, ())
DUMMY_FUNCTION_PASS("gc-lowering", GCLoweringPass, ())
DUMMY_FUNCTION_PASS("shadow-stack-gc-lowering", ShadowStackGCLoweringPass, ())
DUMMY_FUNCTION_PASS("sjljehprepare", SjLjEHPreparePass, ())
DUMMY_FUNCTION_PASS("dwarfehprepare", DwarfEHPass, ())
DUMMY_FUNCTION_PASS("winehprepare", WinEHPass, ())
DUMMY_FUNCTION_PASS("wasmehprepare", WasmEHPass, ())
DUMMY_FUNCTION_PASS("codegenprepare", CodeGenPreparePass, ())
DUMMY_FUNCTION_PASS("safe-stack", SafeStackPass, ())
DUMMY_FUNCTION_PASS("stack-protector", StackProtectorPass, ())
DUMMY_FUNCTION_PASS("atomic-expand", AtomicExpandPass, ())
DUMMY_FUNCTION_PASS("interleaved-access", InterleavedAccessPass, ())
DUMMY_FUNCTION_PASS("indirectbr-expand", IndirectBrExpandPass, ())
DUMMY_FUNCTION_PASS("cfguard-dispatch", CFGuardDispatchPass, ())
DUMMY_FUNCTION_PASS("cfguard-check", CFGuardCheckPass, ())
DUMMY_FUNCTION_PASS("gc-info-printer", GCInfoPrinterPass, ())
DUMMY_FUNCTION_PASS("select-optimize", SelectOptimizePass, ())
DUMMY_FUNCTION_PASS("callbrprepare", CallBrPrepare, ())
#undef DUMMY_FUNCTION_PASS

#ifndef DUMMY_MODULE_PASS
#define DUMMY_MODULE_PASS(NAME, PASS_NAME, CONSTRUCTOR)
#endif
DUMMY_MODULE_PASS("lower-emutls", LowerEmuTLSPass, ())
#undef DUMMY_MODULE_PASS

#ifndef DUMMY_MACHINE_MODULE_PASS
#define DUMMY_MACHINE_MODULE_PASS(NAME, PASS_NAME, CONSTRUCTOR)
#endif
DUMMY_MACHINE_MODULE_PASS("machine-outliner", MachineOutlinerPass, ())
#undef DUMMY_MACHINE_MODULE_PASS

#ifndef DUMMY_MACHINE_FUNCTION_PASS
#define DUMMY_MACHINE_FUNCTION_PASS(NAME, PASS_NAME, CONSTRUCTOR)
#endif
DUMMY_MACHINE_FUNCTION_PASS("mir-printer", PrintMIRPass, ())
DUMMY_MACHINE_FUNCTION_PASS("free-machine-function", FreeMachineFunctionPass, ())
DUMMY_MACHINE_FUNCTION_PASS("finalize-isel", FinalizeISelPass, ())
DUMMY_MACHINE_FUNCTION_PASS("localstackalloc", LocalStackSlotPass, ())
DUMMY_MACHINE_FUNCTION_PASS("shrink-wrap", ShrinkWrapPass, ())
DUMMY_MACHINE_FUNCTION_PASS("prologepilog", PrologEpilogInserterPass, ())
DUMMY_MACHINE_FUNCTION_PASS("postrapseudos", ExpandPostRAPseudosPass, ())
DUMMY_MACHINE_FUNCTION_PASS("implicit-null-checks", ImplicitNullChecksPass, ())
DUMMY_MACHINE_FUNCTION_PASS("postmisched", PostMachineSchedulerPass, ())
DUMMY_MACHINE_FUNCTION_PASS("machine-scheduler", MachineSchedulerPass, ())
DUMMY_MACHINE_FUNCTION_PASS("machine-cp", MachineCopyPropagationPass, ())
DUMMY_MACHINE_FUNCTION_PASS("machine-latecleanup", MachineLateInstrsCleanupPass, ())
DUMMY_MACHINE_FUNCTION_PASS("post-RA-sched", PostRASchedulerPass, ())
DUMMY_MACHINE_FUNCTION_PASS("fentry-insert", FEntryInserterPass, ())
DUMMY_MACHINE_FUNCTION_PASS("xray-instrumentation", XRayInstrumentationPass, ())
DUMMY_MACHINE_FUNCTION_PASS("patchable-function", PatchableFunctionPass, ())
DUMMY_MACHINE_FUNCTION_PASS("reg-usage-propagation", RegUsageInfoPropagationPass, ())
DUMMY_MACHINE_FUNCTION_PASS("reg-usage-collector", RegUsageInfoCollectorPass, ())
DUMMY_MACHINE_FUNCTION_PASS("funclet-layout", FuncletLayoutPass, ())
DUMMY_MACHINE_FUNCTION_PASS("stackmap-liveness", StackMapLivenessPass, ())
DUMMY_MACHINE_FUNCTION_PASS("removeredundantdebugvalues", RemoveRedundantDebugValuesPass, ())
DUMMY_MACHINE_FUNCTION_PASS("dot-machine-cfg", MachineCFGPrinter, ())
DUMMY_MACHINE_FUNCTION_PASS("livedebugvalues", LiveDebugValuesPass, ())
DUMMY_MACHINE_FUNCTION_PASS("early-tailduplication", EarlyTailDuplicatePass, ())
DUMMY_MACHINE_FUNCTION_PASS("opt-phis", OptimizePHIsPass, ())
DUMMY_MACHINE_FUNCTION_PASS("stack-coloring", StackColoringPass, ())
DUMMY_MACHINE_FUNCTION_PASS("dead-mi-elimination", DeadMachineInstructionElimPass, ())
DUMMY_MACHINE_FUNCTION_PASS("early-machinelicm", EarlyMachineLICMPass, ())
DUMMY_MACHINE_FUNCTION_PASS("machinelicm", MachineLICMPass, ())
DUMMY_MACHINE_FUNCTION_PASS("machine-cse", MachineCSEPass, ())
DUMMY_MACHINE_FUNCTION_PASS("machine-sink", MachineSinkingPass, ())
DUMMY_MACHINE_FUNCTION_PASS("postra-machine-sink", PostRAMachineSinkingPass, ())
DUMMY_MACHINE_FUNCTION_PASS("peephole-opt", PeepholeOptimizerPass, ())
DUMMY_MACHINE_FUNCTION_PASS("regalloc", RegAllocPass, ())
DUMMY_MACHINE_FUNCTION_PASS("virtregrewriter", VirtRegRewriterPass, ())
DUMMY_MACHINE_FUNCTION_PASS("stack-slot-coloring", StackSlotColoringPass, ())
DUMMY_MACHINE_FUNCTION_PASS("phi-node-elimination", PHIEliminationPass, ())
DUMMY_MACHINE_FUNCTION_PASS("twoaddressinstruction", TwoAddressInstructionPass, ())
DUMMY_MACHINE_FUNCTION_PASS("detect-dead-lanes", DetectDeadLanesPass, ())
DUMMY_MACHINE_FUNCTION_PASS("processimpdefs", ProcessImplicitDefsPass, ())
DUMMY_MACHINE_FUNCTION_PASS("liveintervals", LiveIntervalsPass, ())
DUMMY_MACHINE_FUNCTION_PASS("simple-register-coalescing", RegisterCoalescerPass, ())
DUMMY_MACHINE_FUNCTION_PASS("rename-independent-subregs", RenameIndependentSubregsPass, ())
DUMMY_MACHINE_FUNCTION_PASS("branch-folder", BranchFolderPass, ())
DUMMY_MACHINE_FUNCTION_PASS("tailduplication", TailDuplicatePass, ())
DUMMY_MACHINE_FUNCTION_PASS("block-placement", MachineBlockPlacementPass, ())
DUMMY_MACHINE_FUNCTION_PASS("block-placement-stats", MachineBlockPlacementStatsPass, ())
DUMMY_MACHINE_FUNCTION_PASS("early-ifcvt", EarlyIfConverterPass, ())
DUMMY_MACHINE_FUNCTION_PASS("machine-combiner", MachineCombinerPass, ())
DUMMY_MACHINE_FUNCTION_PASS("lrshrink", LiveRangeShrinkPass, ())
DUMMY_MACHINE_FUNCTION_PASS("break-false-deps", BreakFalseDepsPass, ())
DUMMY_MACHINE_FUNCTION_PASS("cfi-instr-inserter", CFIInstrInserterPass, ())
DUMMY_MACHINE_FUNCTION_PASS("cfguard-longjmp", CFGuardLongjmpPass, ())
DUMMY_MACHINE_FUNCTION_PASS("ra-basic", RABasicPass, ())
DUMMY_MACHINE_FUNCTION_PASS("ra-fast", RAFastPass, ())
DUMMY_MACHINE_FUNCTION_PASS("ra-greedy", RAGreedyPass, ())
DUMMY_MACHINE_FUNCTION_PASS("ra-pbqp", RAPBQPPass, ())
DUMMY_MACHINE_FUNCTION_PASS("legalizer", LegalizerPass, ())
DUMMY_MACHINE_FUNCTION_PASS("irtranslator", IRTranslatorPass, ())
DUMMY_MACHINE_FUNCTION_PASS("regbankselect", RegBankSelectPass, ())
DUMMY_MACHINE_FUNCTION_PASS("instruction-select", InstructionSelectPass, ())
DUMMY_MACHINE_FUNCTION_PASS("reset-machine-function", ResetMachineFunctionPass, ())
DUMMY_MACHINE_FUNCTION_PASS("machineverifier", MachineVerifierPass, ())
DUMMY_MACHINE_FUNCTION_PASS("print-machine-cycles", MachineCycleInfoPrinterPass, ())
DUMMY_MACHINE_FUNCTION_PASS("machine-sanmd", MachineSanitizerBinaryMetadata, ())
DUMMY_MACHINE_FUNCTION_PASS("machine-uniformity", MachineUniformityInfoWrapperPass, ())
DUMMY_MACHINE_FUNCTION_PASS("print-machine-uniformity", MachineUniformityInfoPrinterPass, ())
#undef DUMMY_MACHINE_FUNCTION_PASS
PKiwFZ�H�O�3�3CodeGen/TargetSubtargetInfo.hnu�[���//===- llvm/CodeGen/TargetSubtargetInfo.h - Target Information --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file describes the subtarget options of a Target machine.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_TARGETSUBTARGETINFO_H
#define LLVM_CODEGEN_TARGETSUBTARGETINFO_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/CodeGen/PBQPRAConstraint.h"
#include "llvm/CodeGen/SchedulerRegistry.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/Support/CodeGen.h"
#include <memory>
#include <vector>

namespace llvm {

class APInt;
class MachineFunction;
class ScheduleDAGMutation;
class CallLowering;
class GlobalValue;
class InlineAsmLowering;
class InstrItineraryData;
struct InstrStage;
class InstructionSelector;
class LegalizerInfo;
class MachineInstr;
struct MachineSchedPolicy;
struct MCReadAdvanceEntry;
struct MCWriteLatencyEntry;
struct MCWriteProcResEntry;
class RegisterBankInfo;
class SDep;
class SelectionDAGTargetInfo;
class SUnit;
class TargetFrameLowering;
class TargetInstrInfo;
class TargetLowering;
class TargetRegisterClass;
class TargetRegisterInfo;
class TargetSchedModel;
class Triple;

//===----------------------------------------------------------------------===//
///
/// TargetSubtargetInfo - Generic base class for all target subtargets.  All
/// Target-specific options that control code generation and printing should
/// be exposed through a TargetSubtargetInfo-derived class.
///
class TargetSubtargetInfo : public MCSubtargetInfo {
protected: // Can only create subclasses...
  TargetSubtargetInfo(const Triple &TT, StringRef CPU, StringRef TuneCPU,
                      StringRef FS, ArrayRef<SubtargetFeatureKV> PF,
                      ArrayRef<SubtargetSubTypeKV> PD,
                      const MCWriteProcResEntry *WPR,
                      const MCWriteLatencyEntry *WL,
                      const MCReadAdvanceEntry *RA, const InstrStage *IS,
                      const unsigned *OC, const unsigned *FP);

public:
  // AntiDepBreakMode - Type of anti-dependence breaking that should
  // be performed before post-RA scheduling.
  using AntiDepBreakMode = enum { ANTIDEP_NONE, ANTIDEP_CRITICAL, ANTIDEP_ALL };
  using RegClassVector = SmallVectorImpl<const TargetRegisterClass *>;

  TargetSubtargetInfo() = delete;
  TargetSubtargetInfo(const TargetSubtargetInfo &) = delete;
  TargetSubtargetInfo &operator=(const TargetSubtargetInfo &) = delete;
  ~TargetSubtargetInfo() override;

  virtual bool isXRaySupported() const { return false; }

  // Interfaces to the major aspects of target machine information:
  //
  // -- Instruction opcode and operand information
  // -- Pipelines and scheduling information
  // -- Stack frame information
  // -- Selection DAG lowering information
  // -- Call lowering information
  //
  // N.B. These objects may change during compilation. It's not safe to cache
  // them between functions.
  virtual const TargetInstrInfo *getInstrInfo() const { return nullptr; }
  virtual const TargetFrameLowering *getFrameLowering() const {
    return nullptr;
  }
  virtual const TargetLowering *getTargetLowering() const { return nullptr; }
  virtual const SelectionDAGTargetInfo *getSelectionDAGInfo() const {
    return nullptr;
  }
  virtual const CallLowering *getCallLowering() const { return nullptr; }

  virtual const InlineAsmLowering *getInlineAsmLowering() const {
    return nullptr;
  }

  // FIXME: This lets targets specialize the selector by subtarget (which lets
  // us do things like a dedicated avx512 selector).  However, we might want
  // to also specialize selectors by MachineFunction, which would let us be
  // aware of optsize/optnone and such.
  virtual InstructionSelector *getInstructionSelector() const {
    return nullptr;
  }

  /// Target can subclass this hook to select a different DAG scheduler.
  virtual RegisterScheduler::FunctionPassCtor
      getDAGScheduler(CodeGenOpt::Level) const {
    return nullptr;
  }

  virtual const LegalizerInfo *getLegalizerInfo() const { return nullptr; }

  /// getRegisterInfo - If register information is available, return it.  If
  /// not, return null.
  virtual const TargetRegisterInfo *getRegisterInfo() const { return nullptr; }

  /// If the information for the register banks is available, return it.
  /// Otherwise return nullptr.
  virtual const RegisterBankInfo *getRegBankInfo() const { return nullptr; }

  /// getInstrItineraryData - Returns instruction itinerary data for the target
  /// or specific subtarget.
  virtual const InstrItineraryData *getInstrItineraryData() const {
    return nullptr;
  }

  /// Resolve a SchedClass at runtime, where SchedClass identifies an
  /// MCSchedClassDesc with the isVariant property. This may return the ID of
  /// another variant SchedClass, but repeated invocation must quickly terminate
  /// in a nonvariant SchedClass.
  virtual unsigned resolveSchedClass(unsigned SchedClass,
                                     const MachineInstr *MI,
                                     const TargetSchedModel *SchedModel) const {
    return 0;
  }

  /// Returns true if MI is a dependency breaking zero-idiom instruction for the
  /// subtarget.
  ///
  /// This function also sets bits in Mask related to input operands that
  /// are not in a data dependency relationship.  There is one bit for each
  /// machine operand; implicit operands follow explicit operands in the bit
  /// representation used for Mask.  An empty (i.e. a mask with all bits
  /// cleared) means: data dependencies are "broken" for all the explicit input
  /// machine operands of MI.
  virtual bool isZeroIdiom(const MachineInstr *MI, APInt &Mask) const {
    return false;
  }

  /// Returns true if MI is a dependency breaking instruction for the subtarget.
  ///
  /// Similar in behavior to `isZeroIdiom`. However, it knows how to identify
  /// all dependency breaking instructions (i.e. not just zero-idioms).
  /// 
  /// As for `isZeroIdiom`, this method returns a mask of "broken" dependencies.
  /// (See method `isZeroIdiom` for a detailed description of Mask).
  virtual bool isDependencyBreaking(const MachineInstr *MI, APInt &Mask) const {
    return isZeroIdiom(MI, Mask);
  }

  /// Returns true if MI is a candidate for move elimination.
  ///
  /// A candidate for move elimination may be optimized out at register renaming
  /// stage. Subtargets can specify the set of optimizable moves by
  /// instantiating tablegen class `IsOptimizableRegisterMove` (see
  /// llvm/Target/TargetInstrPredicate.td).
  ///
  /// SubtargetEmitter is responsible for processing all the definitions of class
  /// IsOptimizableRegisterMove, and auto-generate an override for this method.
  virtual bool isOptimizableRegisterMove(const MachineInstr *MI) const {
    return false;
  }

  /// True if the subtarget should run MachineScheduler after aggressive
  /// coalescing.
  ///
  /// This currently replaces the SelectionDAG scheduler with the "source" order
  /// scheduler (though see below for an option to turn this off and use the
  /// TargetLowering preference). It does not yet disable the postRA scheduler.
  virtual bool enableMachineScheduler() const;

  /// True if the machine scheduler should disable the TLI preference
  /// for preRA scheduling with the source level scheduler.
  virtual bool enableMachineSchedDefaultSched() const { return true; }

  /// True if the subtarget should run MachinePipeliner
  virtual bool enableMachinePipeliner() const { return true; };

  /// True if the subtarget should enable joining global copies.
  ///
  /// By default this is enabled if the machine scheduler is enabled, but
  /// can be overridden.
  virtual bool enableJoinGlobalCopies() const;

  /// True if the subtarget should run a scheduler after register allocation.
  ///
  /// By default this queries the PostRAScheduling bit in the scheduling model
  /// which is the preferred way to influence this.
  virtual bool enablePostRAScheduler() const;

  /// True if the subtarget should run a machine scheduler after register
  /// allocation.
  virtual bool enablePostRAMachineScheduler() const;

  /// True if the subtarget should run the atomic expansion pass.
  virtual bool enableAtomicExpand() const;

  /// True if the subtarget should run the indirectbr expansion pass.
  virtual bool enableIndirectBrExpand() const;

  /// Override generic scheduling policy within a region.
  ///
  /// This is a convenient way for targets that don't provide any custom
  /// scheduling heuristics (no custom MachineSchedStrategy) to make
  /// changes to the generic scheduling policy.
  virtual void overrideSchedPolicy(MachineSchedPolicy &Policy,
                                   unsigned NumRegionInstrs) const {}

  // Perform target-specific adjustments to the latency of a schedule
  // dependency.
  // If a pair of operands is associated with the schedule dependency, DefOpIdx
  // and UseOpIdx are the indices of the operands in Def and Use, respectively.
  // Otherwise, either may be -1.
  virtual void adjustSchedDependency(SUnit *Def, int DefOpIdx, SUnit *Use,
                                     int UseOpIdx, SDep &Dep) const {}

  // For use with PostRAScheduling: get the anti-dependence breaking that should
  // be performed before post-RA scheduling.
  virtual AntiDepBreakMode getAntiDepBreakMode() const { return ANTIDEP_NONE; }

  // For use with PostRAScheduling: in CriticalPathRCs, return any register
  // classes that should only be considered for anti-dependence breaking if they
  // are on the critical path.
  virtual void getCriticalPathRCs(RegClassVector &CriticalPathRCs) const {
    return CriticalPathRCs.clear();
  }

  // Provide an ordered list of schedule DAG mutations for the post-RA
  // scheduler.
  virtual void getPostRAMutations(
      std::vector<std::unique_ptr<ScheduleDAGMutation>> &Mutations) const {
  }

  // Provide an ordered list of schedule DAG mutations for the machine
  // pipeliner.
  virtual void getSMSMutations(
      std::vector<std::unique_ptr<ScheduleDAGMutation>> &Mutations) const {
  }

  /// Default to DFA for resource management, return false when target will use
  /// ProcResource in InstrSchedModel instead.
  virtual bool useDFAforSMS() const { return true; }

  // For use with PostRAScheduling: get the minimum optimization level needed
  // to enable post-RA scheduling.
  virtual CodeGenOpt::Level getOptLevelToEnablePostRAScheduler() const {
    return CodeGenOpt::Default;
  }

  /// True if the subtarget should run the local reassignment
  /// heuristic of the register allocator.
  /// This heuristic may be compile time intensive, \p OptLevel provides
  /// a finer grain to tune the register allocator.
  virtual bool enableRALocalReassignment(CodeGenOpt::Level OptLevel) const;

  /// Enable use of alias analysis during code generation (during MI
  /// scheduling, DAGCombine, etc.).
  virtual bool useAA() const;

  /// \brief Sink addresses into blocks using GEP instructions rather than
  /// pointer casts and arithmetic.
  virtual bool addrSinkUsingGEPs() const {
    return useAA();
  }

  /// Enable the use of the early if conversion pass.
  virtual bool enableEarlyIfConversion() const { return false; }

  /// Return PBQPConstraint(s) for the target.
  ///
  /// Override to provide custom PBQP constraints.
  virtual std::unique_ptr<PBQPRAConstraint> getCustomPBQPConstraints() const {
    return nullptr;
  }

  /// Enable tracking of subregister liveness in register allocator.
  /// Please use MachineRegisterInfo::subRegLivenessEnabled() instead where
  /// possible.
  virtual bool enableSubRegLiveness() const { return false; }

  /// This is called after a .mir file was loaded.
  virtual void mirFileLoaded(MachineFunction &MF) const;

  /// True if the register allocator should use the allocation orders exactly as
  /// written in the tablegen descriptions, false if it should allocate
  /// the specified physical register later if is it callee-saved.
  virtual bool ignoreCSRForAllocationOrder(const MachineFunction &MF,
                                           unsigned PhysReg) const {
    return false;
  }

  /// Classify a global function reference. This mainly used to fetch target
  /// special flags for lowering a function address. For example mark a function
  /// call should be plt or pc-related addressing.
  virtual unsigned char
  classifyGlobalFunctionReference(const GlobalValue *GV) const {
    return 0;
  }

  /// Enable spillage copy elimination in MachineCopyPropagation pass. This
  /// helps removing redundant copies generated by register allocator when
  /// handling complex eviction chains.
  virtual bool enableSpillageCopyElimination() const { return false; }
};

} // end namespace llvm

#endif // LLVM_CODEGEN_TARGETSUBTARGETINFO_H
PKiwFZ�a:�R�R�CodeGen/SelectionDAG.hnu�[���//===- llvm/CodeGen/SelectionDAG.h - InstSelection DAG ----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the SelectionDAG class, and transitively defines the
// SDNode class and subclasses.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_SELECTIONDAG_H
#define LLVM_CODEGEN_SELECTIONDAG_H

#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/ilist.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/CodeGen/DAGCombine.h"
#include "llvm/CodeGen/ISDOpcodes.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineValueType.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/Metadata.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/ArrayRecycler.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/RecyclingAllocator.h"
#include <cassert>
#include <cstdint>
#include <functional>
#include <map>
#include <string>
#include <tuple>
#include <utility>
#include <vector>

namespace llvm {

class DIExpression;
class DILabel;
class DIVariable;
class Function;
class Pass;
class Type;
template <class GraphType> struct GraphTraits;
template <typename T, unsigned int N> class SmallSetVector;
template <typename T, typename Enable> struct FoldingSetTrait;
class AAResults;
class BlockAddress;
class BlockFrequencyInfo;
class Constant;
class ConstantFP;
class ConstantInt;
class DataLayout;
struct fltSemantics;
class FunctionLoweringInfo;
class FunctionVarLocs;
class GlobalValue;
struct KnownBits;
class LLVMContext;
class MachineBasicBlock;
class MachineConstantPoolValue;
class MCSymbol;
class OptimizationRemarkEmitter;
class ProfileSummaryInfo;
class SDDbgValue;
class SDDbgOperand;
class SDDbgLabel;
class SelectionDAG;
class SelectionDAGTargetInfo;
class TargetLibraryInfo;
class TargetLowering;
class TargetMachine;
class TargetSubtargetInfo;
class Value;

template <typename T> class GenericSSAContext;
using SSAContext = GenericSSAContext<Function>;
template <typename T> class GenericUniformityInfo;
using UniformityInfo = GenericUniformityInfo<SSAContext>;

class SDVTListNode : public FoldingSetNode {
  friend struct FoldingSetTrait<SDVTListNode>;

  /// A reference to an Interned FoldingSetNodeID for this node.
  /// The Allocator in SelectionDAG holds the data.
  /// SDVTList contains all types which are frequently accessed in SelectionDAG.
  /// The size of this list is not expected to be big so it won't introduce
  /// a memory penalty.
  FoldingSetNodeIDRef FastID;
  const EVT *VTs;
  unsigned int NumVTs;
  /// The hash value for SDVTList is fixed, so cache it to avoid
  /// hash calculation.
  unsigned HashValue;

public:
  SDVTListNode(const FoldingSetNodeIDRef ID, const EVT *VT, unsigned int Num) :
      FastID(ID), VTs(VT), NumVTs(Num) {
    HashValue = ID.ComputeHash();
  }

  SDVTList getSDVTList() {
    SDVTList result = {VTs, NumVTs};
    return result;
  }
};

/// Specialize FoldingSetTrait for SDVTListNode
/// to avoid computing temp FoldingSetNodeID and hash value.
template<> struct FoldingSetTrait<SDVTListNode> : DefaultFoldingSetTrait<SDVTListNode> {
  static void Profile(const SDVTListNode &X, FoldingSetNodeID& ID) {
    ID = X.FastID;
  }

  static bool Equals(const SDVTListNode &X, const FoldingSetNodeID &ID,
                     unsigned IDHash, FoldingSetNodeID &TempID) {
    if (X.HashValue != IDHash)
      return false;
    return ID == X.FastID;
  }

  static unsigned ComputeHash(const SDVTListNode &X, FoldingSetNodeID &TempID) {
    return X.HashValue;
  }
};

template <> struct ilist_alloc_traits<SDNode> {
  static void deleteNode(SDNode *) {
    llvm_unreachable("ilist_traits<SDNode> shouldn't see a deleteNode call!");
  }
};

/// Keeps track of dbg_value information through SDISel.  We do
/// not build SDNodes for these so as not to perturb the generated code;
/// instead the info is kept off to the side in this structure. Each SDNode may
/// have one or more associated dbg_value entries. This information is kept in
/// DbgValMap.
/// Byval parameters are handled separately because they don't use alloca's,
/// which busts the normal mechanism.  There is good reason for handling all
/// parameters separately:  they may not have code generated for them, they
/// should always go at the beginning of the function regardless of other code
/// motion, and debug info for them is potentially useful even if the parameter
/// is unused.  Right now only byval parameters are handled separately.
class SDDbgInfo {
  BumpPtrAllocator Alloc;
  SmallVector<SDDbgValue*, 32> DbgValues;
  SmallVector<SDDbgValue*, 32> ByvalParmDbgValues;
  SmallVector<SDDbgLabel*, 4> DbgLabels;
  using DbgValMapType = DenseMap<const SDNode *, SmallVector<SDDbgValue *, 2>>;
  DbgValMapType DbgValMap;

public:
  SDDbgInfo() = default;
  SDDbgInfo(const SDDbgInfo &) = delete;
  SDDbgInfo &operator=(const SDDbgInfo &) = delete;

  void add(SDDbgValue *V, bool isParameter);

  void add(SDDbgLabel *L) { DbgLabels.push_back(L); }

  /// Invalidate all DbgValues attached to the node and remove
  /// it from the Node-to-DbgValues map.
  void erase(const SDNode *Node);

  void clear() {
    DbgValMap.clear();
    DbgValues.clear();
    ByvalParmDbgValues.clear();
    DbgLabels.clear();
    Alloc.Reset();
  }

  BumpPtrAllocator &getAlloc() { return Alloc; }

  bool empty() const {
    return DbgValues.empty() && ByvalParmDbgValues.empty() && DbgLabels.empty();
  }

  ArrayRef<SDDbgValue*> getSDDbgValues(const SDNode *Node) const {
    auto I = DbgValMap.find(Node);
    if (I != DbgValMap.end())
      return I->second;
    return ArrayRef<SDDbgValue*>();
  }

  using DbgIterator = SmallVectorImpl<SDDbgValue*>::iterator;
  using DbgLabelIterator = SmallVectorImpl<SDDbgLabel*>::iterator;

  DbgIterator DbgBegin() { return DbgValues.begin(); }
  DbgIterator DbgEnd()   { return DbgValues.end(); }
  DbgIterator ByvalParmDbgBegin() { return ByvalParmDbgValues.begin(); }
  DbgIterator ByvalParmDbgEnd()   { return ByvalParmDbgValues.end(); }
  DbgLabelIterator DbgLabelBegin() { return DbgLabels.begin(); }
  DbgLabelIterator DbgLabelEnd()   { return DbgLabels.end(); }
};

void checkForCycles(const SelectionDAG *DAG, bool force = false);

/// This is used to represent a portion of an LLVM function in a low-level
/// Data Dependence DAG representation suitable for instruction selection.
/// This DAG is constructed as the first step of instruction selection in order
/// to allow implementation of machine specific optimizations
/// and code simplifications.
///
/// The representation used by the SelectionDAG is a target-independent
/// representation, which has some similarities to the GCC RTL representation,
/// but is significantly more simple, powerful, and is a graph form instead of a
/// linear form.
///
class SelectionDAG {
  const TargetMachine &TM;
  const SelectionDAGTargetInfo *TSI = nullptr;
  const TargetLowering *TLI = nullptr;
  const TargetLibraryInfo *LibInfo = nullptr;
  const FunctionVarLocs *FnVarLocs = nullptr;
  MachineFunction *MF;
  Pass *SDAGISelPass = nullptr;
  LLVMContext *Context;
  CodeGenOpt::Level OptLevel;

  UniformityInfo *UA = nullptr;
  FunctionLoweringInfo * FLI = nullptr;

  /// The function-level optimization remark emitter.  Used to emit remarks
  /// whenever manipulating the DAG.
  OptimizationRemarkEmitter *ORE;

  ProfileSummaryInfo *PSI = nullptr;
  BlockFrequencyInfo *BFI = nullptr;

  /// List of non-single value types.
  FoldingSet<SDVTListNode> VTListMap;

  /// Pool allocation for misc. objects that are created once per SelectionDAG.
  BumpPtrAllocator Allocator;

  /// The starting token.
  SDNode EntryNode;

  /// The root of the entire DAG.
  SDValue Root;

  /// A linked list of nodes in the current DAG.
  ilist<SDNode> AllNodes;

  /// The AllocatorType for allocating SDNodes. We use
  /// pool allocation with recycling.
  using NodeAllocatorType = RecyclingAllocator<BumpPtrAllocator, SDNode,
                                               sizeof(LargestSDNode),
                                               alignof(MostAlignedSDNode)>;

  /// Pool allocation for nodes.
  NodeAllocatorType NodeAllocator;

  /// This structure is used to memoize nodes, automatically performing
  /// CSE with existing nodes when a duplicate is requested.
  FoldingSet<SDNode> CSEMap;

  /// Pool allocation for machine-opcode SDNode operands.
  BumpPtrAllocator OperandAllocator;
  ArrayRecycler<SDUse> OperandRecycler;

  /// Tracks dbg_value and dbg_label information through SDISel.
  SDDbgInfo *DbgInfo;

  using CallSiteInfo = MachineFunction::CallSiteInfo;
  using CallSiteInfoImpl = MachineFunction::CallSiteInfoImpl;

  struct NodeExtraInfo {
    CallSiteInfo CSInfo;
    MDNode *HeapAllocSite = nullptr;
    MDNode *PCSections = nullptr;
    bool NoMerge = false;
  };
  /// Out-of-line extra information for SDNodes.
  DenseMap<const SDNode *, NodeExtraInfo> SDEI;

  /// PersistentId counter to be used when inserting the next
  /// SDNode to this SelectionDAG. We do not place that under
  /// `#if LLVM_ENABLE_ABI_BREAKING_CHECKS` intentionally because
  /// it adds unneeded complexity without noticeable
  /// benefits (see discussion with @thakis in D120714).
  uint16_t NextPersistentId = 0;

public:
  /// Clients of various APIs that cause global effects on
  /// the DAG can optionally implement this interface.  This allows the clients
  /// to handle the various sorts of updates that happen.
  ///
  /// A DAGUpdateListener automatically registers itself with DAG when it is
  /// constructed, and removes itself when destroyed in RAII fashion.
  struct DAGUpdateListener {
    DAGUpdateListener *const Next;
    SelectionDAG &DAG;

    explicit DAGUpdateListener(SelectionDAG &D)
      : Next(D.UpdateListeners), DAG(D) {
      DAG.UpdateListeners = this;
    }

    virtual ~DAGUpdateListener() {
      assert(DAG.UpdateListeners == this &&
             "DAGUpdateListeners must be destroyed in LIFO order");
      DAG.UpdateListeners = Next;
    }

    /// The node N that was deleted and, if E is not null, an
    /// equivalent node E that replaced it.
    virtual void NodeDeleted(SDNode *N, SDNode *E);

    /// The node N that was updated.
    virtual void NodeUpdated(SDNode *N);

    /// The node N that was inserted.
    virtual void NodeInserted(SDNode *N);
  };

  struct DAGNodeDeletedListener : public DAGUpdateListener {
    std::function<void(SDNode *, SDNode *)> Callback;

    DAGNodeDeletedListener(SelectionDAG &DAG,
                           std::function<void(SDNode *, SDNode *)> Callback)
        : DAGUpdateListener(DAG), Callback(std::move(Callback)) {}

    void NodeDeleted(SDNode *N, SDNode *E) override { Callback(N, E); }

   private:
    virtual void anchor();
  };

  struct DAGNodeInsertedListener : public DAGUpdateListener {
    std::function<void(SDNode *)> Callback;

    DAGNodeInsertedListener(SelectionDAG &DAG,
                            std::function<void(SDNode *)> Callback)
        : DAGUpdateListener(DAG), Callback(std::move(Callback)) {}

    void NodeInserted(SDNode *N) override { Callback(N); }

  private:
    virtual void anchor();
  };

  /// Help to insert SDNodeFlags automatically in transforming. Use
  /// RAII to save and resume flags in current scope.
  class FlagInserter {
    SelectionDAG &DAG;
    SDNodeFlags Flags;
    FlagInserter *LastInserter;

  public:
    FlagInserter(SelectionDAG &SDAG, SDNodeFlags Flags)
        : DAG(SDAG), Flags(Flags),
          LastInserter(SDAG.getFlagInserter()) {
      SDAG.setFlagInserter(this);
    }
    FlagInserter(SelectionDAG &SDAG, SDNode *N)
        : FlagInserter(SDAG, N->getFlags()) {}

    FlagInserter(const FlagInserter &) = delete;
    FlagInserter &operator=(const FlagInserter &) = delete;
    ~FlagInserter() { DAG.setFlagInserter(LastInserter); }

    SDNodeFlags getFlags() const { return Flags; }
  };

  /// When true, additional steps are taken to
  /// ensure that getConstant() and similar functions return DAG nodes that
  /// have legal types. This is important after type legalization since
  /// any illegally typed nodes generated after this point will not experience
  /// type legalization.
  bool NewNodesMustHaveLegalTypes = false;

private:
  /// DAGUpdateListener is a friend so it can manipulate the listener stack.
  friend struct DAGUpdateListener;

  /// Linked list of registered DAGUpdateListener instances.
  /// This stack is maintained by DAGUpdateListener RAII.
  DAGUpdateListener *UpdateListeners = nullptr;

  /// Implementation of setSubgraphColor.
  /// Return whether we had to truncate the search.
  bool setSubgraphColorHelper(SDNode *N, const char *Color,
                              DenseSet<SDNode *> &visited,
                              int level, bool &printed);

  template <typename SDNodeT, typename... ArgTypes>
  SDNodeT *newSDNode(ArgTypes &&... Args) {
    return new (NodeAllocator.template Allocate<SDNodeT>())
        SDNodeT(std::forward<ArgTypes>(Args)...);
  }

  /// Build a synthetic SDNodeT with the given args and extract its subclass
  /// data as an integer (e.g. for use in a folding set).
  ///
  /// The args to this function are the same as the args to SDNodeT's
  /// constructor, except the second arg (assumed to be a const DebugLoc&) is
  /// omitted.
  template <typename SDNodeT, typename... ArgTypes>
  static uint16_t getSyntheticNodeSubclassData(unsigned IROrder,
                                               ArgTypes &&... Args) {
    // The compiler can reduce this expression to a constant iff we pass an
    // empty DebugLoc.  Thankfully, the debug location doesn't have any bearing
    // on the subclass data.
    return SDNodeT(IROrder, DebugLoc(), std::forward<ArgTypes>(Args)...)
        .getRawSubclassData();
  }

  template <typename SDNodeTy>
  static uint16_t getSyntheticNodeSubclassData(unsigned Opc, unsigned Order,
                                                SDVTList VTs, EVT MemoryVT,
                                                MachineMemOperand *MMO) {
    return SDNodeTy(Opc, Order, DebugLoc(), VTs, MemoryVT, MMO)
         .getRawSubclassData();
  }

  void createOperands(SDNode *Node, ArrayRef<SDValue> Vals);

  void removeOperands(SDNode *Node) {
    if (!Node->OperandList)
      return;
    OperandRecycler.deallocate(
        ArrayRecycler<SDUse>::Capacity::get(Node->NumOperands),
        Node->OperandList);
    Node->NumOperands = 0;
    Node->OperandList = nullptr;
  }
  void CreateTopologicalOrder(std::vector<SDNode*>& Order);

public:
  // Maximum depth for recursive analysis such as computeKnownBits, etc.
  static constexpr unsigned MaxRecursionDepth = 6;

  explicit SelectionDAG(const TargetMachine &TM, CodeGenOpt::Level);
  SelectionDAG(const SelectionDAG &) = delete;
  SelectionDAG &operator=(const SelectionDAG &) = delete;
  ~SelectionDAG();

  /// Prepare this SelectionDAG to process code in the given MachineFunction.
  void init(MachineFunction &NewMF, OptimizationRemarkEmitter &NewORE,
            Pass *PassPtr, const TargetLibraryInfo *LibraryInfo,
            UniformityInfo *UA, ProfileSummaryInfo *PSIin,
            BlockFrequencyInfo *BFIin, FunctionVarLocs const *FnVarLocs);

  void setFunctionLoweringInfo(FunctionLoweringInfo * FuncInfo) {
    FLI = FuncInfo;
  }

  /// Clear state and free memory necessary to make this
  /// SelectionDAG ready to process a new block.
  void clear();

  MachineFunction &getMachineFunction() const { return *MF; }
  const Pass *getPass() const { return SDAGISelPass; }

  const DataLayout &getDataLayout() const { return MF->getDataLayout(); }
  const TargetMachine &getTarget() const { return TM; }
  const TargetSubtargetInfo &getSubtarget() const { return MF->getSubtarget(); }
  template <typename STC> const STC &getSubtarget() const {
    return MF->getSubtarget<STC>();
  }
  const TargetLowering &getTargetLoweringInfo() const { return *TLI; }
  const TargetLibraryInfo &getLibInfo() const { return *LibInfo; }
  const SelectionDAGTargetInfo &getSelectionDAGInfo() const { return *TSI; }
  const UniformityInfo *getUniformityInfo() const { return UA; }
  /// Returns the result of the AssignmentTrackingAnalysis pass if it's
  /// available, otherwise return nullptr.
  const FunctionVarLocs *getFunctionVarLocs() const { return FnVarLocs; }
  LLVMContext *getContext() const { return Context; }
  OptimizationRemarkEmitter &getORE() const { return *ORE; }
  ProfileSummaryInfo *getPSI() const { return PSI; }
  BlockFrequencyInfo *getBFI() const { return BFI; }

  FlagInserter *getFlagInserter() { return Inserter; }
  void setFlagInserter(FlagInserter *FI) { Inserter = FI; }

  /// Just dump dot graph to a user-provided path and title.
  /// This doesn't open the dot viewer program and
  /// helps visualization when outside debugging session.
  /// FileName expects absolute path. If provided
  /// without any path separators then the file
  /// will be created in the current directory.
  /// Error will be emitted if the path is insane.
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
  LLVM_DUMP_METHOD void dumpDotGraph(const Twine &FileName, const Twine &Title);
#endif

  /// Pop up a GraphViz/gv window with the DAG rendered using 'dot'.
  void viewGraph(const std::string &Title);
  void viewGraph();

#if LLVM_ENABLE_ABI_BREAKING_CHECKS
  std::map<const SDNode *, std::string> NodeGraphAttrs;
#endif

  /// Clear all previously defined node graph attributes.
  /// Intended to be used from a debugging tool (eg. gdb).
  void clearGraphAttrs();

  /// Set graph attributes for a node. (eg. "color=red".)
  void setGraphAttrs(const SDNode *N, const char *Attrs);

  /// Get graph attributes for a node. (eg. "color=red".)
  /// Used from getNodeAttributes.
  std::string getGraphAttrs(const SDNode *N) const;

  /// Convenience for setting node color attribute.
  void setGraphColor(const SDNode *N, const char *Color);

  /// Convenience for setting subgraph color attribute.
  void setSubgraphColor(SDNode *N, const char *Color);

  using allnodes_const_iterator = ilist<SDNode>::const_iterator;

  allnodes_const_iterator allnodes_begin() const { return AllNodes.begin(); }
  allnodes_const_iterator allnodes_end() const { return AllNodes.end(); }

  using allnodes_iterator = ilist<SDNode>::iterator;

  allnodes_iterator allnodes_begin() { return AllNodes.begin(); }
  allnodes_iterator allnodes_end() { return AllNodes.end(); }

  ilist<SDNode>::size_type allnodes_size() const {
    return AllNodes.size();
  }

  iterator_range<allnodes_iterator> allnodes() {
    return make_range(allnodes_begin(), allnodes_end());
  }
  iterator_range<allnodes_const_iterator> allnodes() const {
    return make_range(allnodes_begin(), allnodes_end());
  }

  /// Return the root tag of the SelectionDAG.
  const SDValue &getRoot() const { return Root; }

  /// Return the token chain corresponding to the entry of the function.
  SDValue getEntryNode() const {
    return SDValue(const_cast<SDNode *>(&EntryNode), 0);
  }

  /// Set the current root tag of the SelectionDAG.
  ///
  const SDValue &setRoot(SDValue N) {
    assert((!N.getNode() || N.getValueType() == MVT::Other) &&
           "DAG root value is not a chain!");
    if (N.getNode())
      checkForCycles(N.getNode(), this);
    Root = N;
    if (N.getNode())
      checkForCycles(this);
    return Root;
  }

#ifndef NDEBUG
  void VerifyDAGDivergence();
#endif

  /// This iterates over the nodes in the SelectionDAG, folding
  /// certain types of nodes together, or eliminating superfluous nodes.  The
  /// Level argument controls whether Combine is allowed to produce nodes and
  /// types that are illegal on the target.
  void Combine(CombineLevel Level, AAResults *AA,
               CodeGenOpt::Level OptLevel);

  /// This transforms the SelectionDAG into a SelectionDAG that
  /// only uses types natively supported by the target.
  /// Returns "true" if it made any changes.
  ///
  /// Note that this is an involved process that may invalidate pointers into
  /// the graph.
  bool LegalizeTypes();

  /// This transforms the SelectionDAG into a SelectionDAG that is
  /// compatible with the target instruction selector, as indicated by the
  /// TargetLowering object.
  ///
  /// Note that this is an involved process that may invalidate pointers into
  /// the graph.
  void Legalize();

  /// Transforms a SelectionDAG node and any operands to it into a node
  /// that is compatible with the target instruction selector, as indicated by
  /// the TargetLowering object.
  ///
  /// \returns true if \c N is a valid, legal node after calling this.
  ///
  /// This essentially runs a single recursive walk of the \c Legalize process
  /// over the given node (and its operands). This can be used to incrementally
  /// legalize the DAG. All of the nodes which are directly replaced,
  /// potentially including N, are added to the output parameter \c
  /// UpdatedNodes so that the delta to the DAG can be understood by the
  /// caller.
  ///
  /// When this returns false, N has been legalized in a way that make the
  /// pointer passed in no longer valid. It may have even been deleted from the
  /// DAG, and so it shouldn't be used further. When this returns true, the
  /// N passed in is a legal node, and can be immediately processed as such.
  /// This may still have done some work on the DAG, and will still populate
  /// UpdatedNodes with any new nodes replacing those originally in the DAG.
  bool LegalizeOp(SDNode *N, SmallSetVector<SDNode *, 16> &UpdatedNodes);

  /// This transforms the SelectionDAG into a SelectionDAG
  /// that only uses vector math operations supported by the target.  This is
  /// necessary as a separate step from Legalize because unrolling a vector
  /// operation can introduce illegal types, which requires running
  /// LegalizeTypes again.
  ///
  /// This returns true if it made any changes; in that case, LegalizeTypes
  /// is called again before Legalize.
  ///
  /// Note that this is an involved process that may invalidate pointers into
  /// the graph.
  bool LegalizeVectors();

  /// This method deletes all unreachable nodes in the SelectionDAG.
  void RemoveDeadNodes();

  /// Remove the specified node from the system.  This node must
  /// have no referrers.
  void DeleteNode(SDNode *N);

  /// Return an SDVTList that represents the list of values specified.
  SDVTList getVTList(EVT VT);
  SDVTList getVTList(EVT VT1, EVT VT2);
  SDVTList getVTList(EVT VT1, EVT VT2, EVT VT3);
  SDVTList getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4);
  SDVTList getVTList(ArrayRef<EVT> VTs);

  //===--------------------------------------------------------------------===//
  // Node creation methods.

  /// Create a ConstantSDNode wrapping a constant value.
  /// If VT is a vector type, the constant is splatted into a BUILD_VECTOR.
  ///
  /// If only legal types can be produced, this does the necessary
  /// transformations (e.g., if the vector element type is illegal).
  /// @{
  SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT,
                      bool isTarget = false, bool isOpaque = false);
  SDValue getConstant(const APInt &Val, const SDLoc &DL, EVT VT,
                      bool isTarget = false, bool isOpaque = false);

  SDValue getAllOnesConstant(const SDLoc &DL, EVT VT, bool IsTarget = false,
                             bool IsOpaque = false) {
    return getConstant(APInt::getAllOnes(VT.getScalarSizeInBits()), DL, VT,
                       IsTarget, IsOpaque);
  }

  SDValue getConstant(const ConstantInt &Val, const SDLoc &DL, EVT VT,
                      bool isTarget = false, bool isOpaque = false);
  SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL,
                            bool isTarget = false);
  SDValue getShiftAmountConstant(uint64_t Val, EVT VT, const SDLoc &DL,
                                 bool LegalTypes = true);
  SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL,
                               bool isTarget = false);

  SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT,
                            bool isOpaque = false) {
    return getConstant(Val, DL, VT, true, isOpaque);
  }
  SDValue getTargetConstant(const APInt &Val, const SDLoc &DL, EVT VT,
                            bool isOpaque = false) {
    return getConstant(Val, DL, VT, true, isOpaque);
  }
  SDValue getTargetConstant(const ConstantInt &Val, const SDLoc &DL, EVT VT,
                            bool isOpaque = false) {
    return getConstant(Val, DL, VT, true, isOpaque);
  }

  /// Create a true or false constant of type \p VT using the target's
  /// BooleanContent for type \p OpVT.
  SDValue getBoolConstant(bool V, const SDLoc &DL, EVT VT, EVT OpVT);
  /// @}

  /// Create a ConstantFPSDNode wrapping a constant value.
  /// If VT is a vector type, the constant is splatted into a BUILD_VECTOR.
  ///
  /// If only legal types can be produced, this does the necessary
  /// transformations (e.g., if the vector element type is illegal).
  /// The forms that take a double should only be used for simple constants
  /// that can be exactly represented in VT.  No checks are made.
  /// @{
  SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT,
                        bool isTarget = false);
  SDValue getConstantFP(const APFloat &Val, const SDLoc &DL, EVT VT,
                        bool isTarget = false);
  SDValue getConstantFP(const ConstantFP &V, const SDLoc &DL, EVT VT,
                        bool isTarget = false);
  SDValue getTargetConstantFP(double Val, const SDLoc &DL, EVT VT) {
    return getConstantFP(Val, DL, VT, true);
  }
  SDValue getTargetConstantFP(const APFloat &Val, const SDLoc &DL, EVT VT) {
    return getConstantFP(Val, DL, VT, true);
  }
  SDValue getTargetConstantFP(const ConstantFP &Val, const SDLoc &DL, EVT VT) {
    return getConstantFP(Val, DL, VT, true);
  }
  /// @}

  SDValue getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT,
                           int64_t offset = 0, bool isTargetGA = false,
                           unsigned TargetFlags = 0);
  SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT,
                                 int64_t offset = 0, unsigned TargetFlags = 0) {
    return getGlobalAddress(GV, DL, VT, offset, true, TargetFlags);
  }
  SDValue getFrameIndex(int FI, EVT VT, bool isTarget = false);
  SDValue getTargetFrameIndex(int FI, EVT VT) {
    return getFrameIndex(FI, VT, true);
  }
  SDValue getJumpTable(int JTI, EVT VT, bool isTarget = false,
                       unsigned TargetFlags = 0);
  SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags = 0) {
    return getJumpTable(JTI, VT, true, TargetFlags);
  }
  SDValue getConstantPool(const Constant *C, EVT VT,
                          MaybeAlign Align = std::nullopt, int Offs = 0,
                          bool isT = false, unsigned TargetFlags = 0);
  SDValue getTargetConstantPool(const Constant *C, EVT VT,
                                MaybeAlign Align = std::nullopt, int Offset = 0,
                                unsigned TargetFlags = 0) {
    return getConstantPool(C, VT, Align, Offset, true, TargetFlags);
  }
  SDValue getConstantPool(MachineConstantPoolValue *C, EVT VT,
                          MaybeAlign Align = std::nullopt, int Offs = 0,
                          bool isT = false, unsigned TargetFlags = 0);
  SDValue getTargetConstantPool(MachineConstantPoolValue *C, EVT VT,
                                MaybeAlign Align = std::nullopt, int Offset = 0,
                                unsigned TargetFlags = 0) {
    return getConstantPool(C, VT, Align, Offset, true, TargetFlags);
  }
  SDValue getTargetIndex(int Index, EVT VT, int64_t Offset = 0,
                         unsigned TargetFlags = 0);
  // When generating a branch to a BB, we don't in general know enough
  // to provide debug info for the BB at that time, so keep this one around.
  SDValue getBasicBlock(MachineBasicBlock *MBB);
  SDValue getExternalSymbol(const char *Sym, EVT VT);
  SDValue getTargetExternalSymbol(const char *Sym, EVT VT,
                                  unsigned TargetFlags = 0);
  SDValue getMCSymbol(MCSymbol *Sym, EVT VT);

  SDValue getValueType(EVT);
  SDValue getRegister(unsigned Reg, EVT VT);
  SDValue getRegisterMask(const uint32_t *RegMask);
  SDValue getEHLabel(const SDLoc &dl, SDValue Root, MCSymbol *Label);
  SDValue getLabelNode(unsigned Opcode, const SDLoc &dl, SDValue Root,
                       MCSymbol *Label);
  SDValue getBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset = 0,
                          bool isTarget = false, unsigned TargetFlags = 0);
  SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT,
                                int64_t Offset = 0, unsigned TargetFlags = 0) {
    return getBlockAddress(BA, VT, Offset, true, TargetFlags);
  }

  SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg,
                       SDValue N) {
    return getNode(ISD::CopyToReg, dl, MVT::Other, Chain,
                   getRegister(Reg, N.getValueType()), N);
  }

  // This version of the getCopyToReg method takes an extra operand, which
  // indicates that there is potentially an incoming glue value (if Glue is not
  // null) and that there should be a glue result.
  SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N,
                       SDValue Glue) {
    SDVTList VTs = getVTList(MVT::Other, MVT::Glue);
    SDValue Ops[] = { Chain, getRegister(Reg, N.getValueType()), N, Glue };
    return getNode(ISD::CopyToReg, dl, VTs,
                   ArrayRef(Ops, Glue.getNode() ? 4 : 3));
  }

  // Similar to last getCopyToReg() except parameter Reg is a SDValue
  SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, SDValue Reg, SDValue N,
                       SDValue Glue) {
    SDVTList VTs = getVTList(MVT::Other, MVT::Glue);
    SDValue Ops[] = { Chain, Reg, N, Glue };
    return getNode(ISD::CopyToReg, dl, VTs,
                   ArrayRef(Ops, Glue.getNode() ? 4 : 3));
  }

  SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT) {
    SDVTList VTs = getVTList(VT, MVT::Other);
    SDValue Ops[] = { Chain, getRegister(Reg, VT) };
    return getNode(ISD::CopyFromReg, dl, VTs, Ops);
  }

  // This version of the getCopyFromReg method takes an extra operand, which
  // indicates that there is potentially an incoming glue value (if Glue is not
  // null) and that there should be a glue result.
  SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT,
                         SDValue Glue) {
    SDVTList VTs = getVTList(VT, MVT::Other, MVT::Glue);
    SDValue Ops[] = { Chain, getRegister(Reg, VT), Glue };
    return getNode(ISD::CopyFromReg, dl, VTs,
                   ArrayRef(Ops, Glue.getNode() ? 3 : 2));
  }

  SDValue getCondCode(ISD::CondCode Cond);

  /// Return an ISD::VECTOR_SHUFFLE node. The number of elements in VT,
  /// which must be a vector type, must match the number of mask elements
  /// NumElts. An integer mask element equal to -1 is treated as undefined.
  SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2,
                           ArrayRef<int> Mask);

  /// Return an ISD::BUILD_VECTOR node. The number of elements in VT,
  /// which must be a vector type, must match the number of operands in Ops.
  /// The operands must have the same type as (or, for integers, a type wider
  /// than) VT's element type.
  SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef<SDValue> Ops) {
    // VerifySDNode (via InsertNode) checks BUILD_VECTOR later.
    return getNode(ISD::BUILD_VECTOR, DL, VT, Ops);
  }

  /// Return an ISD::BUILD_VECTOR node. The number of elements in VT,
  /// which must be a vector type, must match the number of operands in Ops.
  /// The operands must have the same type as (or, for integers, a type wider
  /// than) VT's element type.
  SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef<SDUse> Ops) {
    // VerifySDNode (via InsertNode) checks BUILD_VECTOR later.
    return getNode(ISD::BUILD_VECTOR, DL, VT, Ops);
  }

  /// Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all
  /// elements. VT must be a vector type. Op's type must be the same as (or,
  /// for integers, a type wider than) VT's element type.
  SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op) {
    // VerifySDNode (via InsertNode) checks BUILD_VECTOR later.
    if (Op.getOpcode() == ISD::UNDEF) {
      assert((VT.getVectorElementType() == Op.getValueType() ||
              (VT.isInteger() &&
               VT.getVectorElementType().bitsLE(Op.getValueType()))) &&
             "A splatted value must have a width equal or (for integers) "
             "greater than the vector element type!");
      return getNode(ISD::UNDEF, SDLoc(), VT);
    }

    SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Op);
    return getNode(ISD::BUILD_VECTOR, DL, VT, Ops);
  }

  // Return a splat ISD::SPLAT_VECTOR node, consisting of Op splatted to all
  // elements.
  SDValue getSplatVector(EVT VT, const SDLoc &DL, SDValue Op) {
    if (Op.getOpcode() == ISD::UNDEF) {
      assert((VT.getVectorElementType() == Op.getValueType() ||
              (VT.isInteger() &&
               VT.getVectorElementType().bitsLE(Op.getValueType()))) &&
             "A splatted value must have a width equal or (for integers) "
             "greater than the vector element type!");
      return getNode(ISD::UNDEF, SDLoc(), VT);
    }
    return getNode(ISD::SPLAT_VECTOR, DL, VT, Op);
  }

  /// Returns a node representing a splat of one value into all lanes
  /// of the provided vector type.  This is a utility which returns
  /// either a BUILD_VECTOR or SPLAT_VECTOR depending on the
  /// scalability of the desired vector type.
  SDValue getSplat(EVT VT, const SDLoc &DL, SDValue Op) {
    assert(VT.isVector() && "Can't splat to non-vector type");
    return VT.isScalableVector() ?
      getSplatVector(VT, DL, Op) : getSplatBuildVector(VT, DL, Op);
  }

  /// Returns a vector of type ResVT whose elements contain the linear sequence
  ///   <0, Step, Step * 2, Step * 3, ...>
  SDValue getStepVector(const SDLoc &DL, EVT ResVT, APInt StepVal);

  /// Returns a vector of type ResVT whose elements contain the linear sequence
  ///   <0, 1, 2, 3, ...>
  SDValue getStepVector(const SDLoc &DL, EVT ResVT);

  /// Returns an ISD::VECTOR_SHUFFLE node semantically equivalent to
  /// the shuffle node in input but with swapped operands.
  ///
  /// Example: shuffle A, B, <0,5,2,7> -> shuffle B, A, <4,1,6,3>
  SDValue getCommutedVectorShuffle(const ShuffleVectorSDNode &SV);

  /// Convert Op, which must be of float type, to the
  /// float type VT, by either extending or rounding (by truncation).
  SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT);

  /// Convert Op, which must be a STRICT operation of float type, to the
  /// float type VT, by either extending or rounding (by truncation).
  std::pair<SDValue, SDValue>
  getStrictFPExtendOrRound(SDValue Op, SDValue Chain, const SDLoc &DL, EVT VT);

  /// Convert *_EXTEND_VECTOR_INREG to *_EXTEND opcode.
  static unsigned getOpcode_EXTEND(unsigned Opcode) {
    switch (Opcode) {
    case ISD::ANY_EXTEND:
    case ISD::ANY_EXTEND_VECTOR_INREG:
      return ISD::ANY_EXTEND;
    case ISD::ZERO_EXTEND:
    case ISD::ZERO_EXTEND_VECTOR_INREG:
      return ISD::ZERO_EXTEND;
    case ISD::SIGN_EXTEND:
    case ISD::SIGN_EXTEND_VECTOR_INREG:
      return ISD::SIGN_EXTEND;
    }
    llvm_unreachable("Unknown opcode");
  }

  /// Convert *_EXTEND to *_EXTEND_VECTOR_INREG opcode.
  static unsigned getOpcode_EXTEND_VECTOR_INREG(unsigned Opcode) {
    switch (Opcode) {
    case ISD::ANY_EXTEND:
    case ISD::ANY_EXTEND_VECTOR_INREG:
      return ISD::ANY_EXTEND_VECTOR_INREG;
    case ISD::ZERO_EXTEND:
    case ISD::ZERO_EXTEND_VECTOR_INREG:
      return ISD::ZERO_EXTEND_VECTOR_INREG;
    case ISD::SIGN_EXTEND:
    case ISD::SIGN_EXTEND_VECTOR_INREG:
      return ISD::SIGN_EXTEND_VECTOR_INREG;
    }
    llvm_unreachable("Unknown opcode");
  }

  /// Convert Op, which must be of integer type, to the
  /// integer type VT, by either any-extending or truncating it.
  SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT);

  /// Convert Op, which must be of integer type, to the
  /// integer type VT, by either sign-extending or truncating it.
  SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT);

  /// Convert Op, which must be of integer type, to the
  /// integer type VT, by either zero-extending or truncating it.
  SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT);

  /// Convert Op, which must be of integer type, to the
  /// integer type VT, by either sign/zero-extending (depending on IsSigned) or
  /// truncating it.
  SDValue getExtOrTrunc(bool IsSigned, SDValue Op, const SDLoc &DL, EVT VT) {
    return IsSigned ? getSExtOrTrunc(Op, DL, VT) : getZExtOrTrunc(Op, DL, VT);
  }

  /// Return the expression required to zero extend the Op
  /// value assuming it was the smaller SrcTy value.
  SDValue getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT);

  /// Convert Op, which must be of integer type, to the integer type VT, by
  /// either truncating it or performing either zero or sign extension as
  /// appropriate extension for the pointer's semantics.
  SDValue getPtrExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT);

  /// Return the expression required to extend the Op as a pointer value
  /// assuming it was the smaller SrcTy value. This may be either a zero extend
  /// or a sign extend.
  SDValue getPtrExtendInReg(SDValue Op, const SDLoc &DL, EVT VT);

  /// Convert Op, which must be of integer type, to the integer type VT,
  /// by using an extension appropriate for the target's
  /// BooleanContent for type OpVT or truncating it.
  SDValue getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT, EVT OpVT);

  /// Create negative operation as (SUB 0, Val).
  SDValue getNegative(SDValue Val, const SDLoc &DL, EVT VT);

  /// Create a bitwise NOT operation as (XOR Val, -1).
  SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT);

  /// Create a logical NOT operation as (XOR Val, BooleanOne).
  SDValue getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT);

  /// Create a vector-predicated logical NOT operation as (VP_XOR Val,
  /// BooleanOne, Mask, EVL).
  SDValue getVPLogicalNOT(const SDLoc &DL, SDValue Val, SDValue Mask,
                          SDValue EVL, EVT VT);

  /// Convert a vector-predicated Op, which must be an integer vector, to the
  /// vector-type VT, by performing either vector-predicated zext or truncating
  /// it. The Op will be returned as-is if Op and VT are vectors containing
  /// integer with same width.
  SDValue getVPZExtOrTrunc(const SDLoc &DL, EVT VT, SDValue Op, SDValue Mask,
                           SDValue EVL);

  /// Convert a vector-predicated Op, which must be of integer type, to the
  /// vector-type integer type VT, by either truncating it or performing either
  /// vector-predicated zero or sign extension as appropriate extension for the
  /// pointer's semantics. This function just redirects to getVPZExtOrTrunc
  /// right now.
  SDValue getVPPtrExtOrTrunc(const SDLoc &DL, EVT VT, SDValue Op, SDValue Mask,
                             SDValue EVL);

  /// Returns sum of the base pointer and offset.
  /// Unlike getObjectPtrOffset this does not set NoUnsignedWrap by default.
  SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL,
                               const SDNodeFlags Flags = SDNodeFlags());
  SDValue getMemBasePlusOffset(SDValue Base, SDValue Offset, const SDLoc &DL,
                               const SDNodeFlags Flags = SDNodeFlags());

  /// Create an add instruction with appropriate flags when used for
  /// addressing some offset of an object. i.e. if a load is split into multiple
  /// components, create an add nuw from the base pointer to the offset.
  SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset) {
    SDNodeFlags Flags;
    Flags.setNoUnsignedWrap(true);
    return getMemBasePlusOffset(Ptr, Offset, SL, Flags);
  }

  SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, SDValue Offset) {
    // The object itself can't wrap around the address space, so it shouldn't be
    // possible for the adds of the offsets to the split parts to overflow.
    SDNodeFlags Flags;
    Flags.setNoUnsignedWrap(true);
    return getMemBasePlusOffset(Ptr, Offset, SL, Flags);
  }

  /// Return a new CALLSEQ_START node, that starts new call frame, in which
  /// InSize bytes are set up inside CALLSEQ_START..CALLSEQ_END sequence and
  /// OutSize specifies part of the frame set up prior to the sequence.
  SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize,
                           const SDLoc &DL) {
    SDVTList VTs = getVTList(MVT::Other, MVT::Glue);
    SDValue Ops[] = { Chain,
                      getIntPtrConstant(InSize, DL, true),
                      getIntPtrConstant(OutSize, DL, true) };
    return getNode(ISD::CALLSEQ_START, DL, VTs, Ops);
  }

  /// Return a new CALLSEQ_END node, which always must have a
  /// glue result (to ensure it's not CSE'd).
  /// CALLSEQ_END does not have a useful SDLoc.
  SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2,
                         SDValue InGlue, const SDLoc &DL) {
    SDVTList NodeTys = getVTList(MVT::Other, MVT::Glue);
    SmallVector<SDValue, 4> Ops;
    Ops.push_back(Chain);
    Ops.push_back(Op1);
    Ops.push_back(Op2);
    if (InGlue.getNode())
      Ops.push_back(InGlue);
    return getNode(ISD::CALLSEQ_END, DL, NodeTys, Ops);
  }

  SDValue getCALLSEQ_END(SDValue Chain, uint64_t Size1, uint64_t Size2,
                         SDValue Glue, const SDLoc &DL) {
    return getCALLSEQ_END(
        Chain, getIntPtrConstant(Size1, DL, /*isTarget=*/true),
        getIntPtrConstant(Size2, DL, /*isTarget=*/true), Glue, DL);
  }

  /// Return true if the result of this operation is always undefined.
  bool isUndef(unsigned Opcode, ArrayRef<SDValue> Ops);

  /// Return an UNDEF node. UNDEF does not have a useful SDLoc.
  SDValue getUNDEF(EVT VT) {
    return getNode(ISD::UNDEF, SDLoc(), VT);
  }

  /// Return a node that represents the runtime scaling 'MulImm * RuntimeVL'.
  SDValue getVScale(const SDLoc &DL, EVT VT, APInt MulImm,
                    bool ConstantFold = true);

  SDValue getElementCount(const SDLoc &DL, EVT VT, ElementCount EC,
                          bool ConstantFold = true);

  /// Return a GLOBAL_OFFSET_TABLE node. This does not have a useful SDLoc.
  SDValue getGLOBAL_OFFSET_TABLE(EVT VT) {
    return getNode(ISD::GLOBAL_OFFSET_TABLE, SDLoc(), VT);
  }

  /// Gets or creates the specified node.
  ///
  SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
                  ArrayRef<SDUse> Ops);
  SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
                  ArrayRef<SDValue> Ops, const SDNodeFlags Flags);
  SDValue getNode(unsigned Opcode, const SDLoc &DL, ArrayRef<EVT> ResultTys,
                  ArrayRef<SDValue> Ops);
  SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
                  ArrayRef<SDValue> Ops, const SDNodeFlags Flags);

  // Use flags from current flag inserter.
  SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
                  ArrayRef<SDValue> Ops);
  SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
                  ArrayRef<SDValue> Ops);
  SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue Operand);
  SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
                  SDValue N2);
  SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
                  SDValue N2, SDValue N3);

  // Specialize based on number of operands.
  SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT);
  SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue Operand,
                  const SDNodeFlags Flags);
  SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
                  SDValue N2, const SDNodeFlags Flags);
  SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
                  SDValue N2, SDValue N3, const SDNodeFlags Flags);
  SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
                  SDValue N2, SDValue N3, SDValue N4);
  SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
                  SDValue N2, SDValue N3, SDValue N4, SDValue N5);

  // Specialize again based on number of operands for nodes with a VTList
  // rather than a single VT.
  SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList);
  SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, SDValue N);
  SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, SDValue N1,
                  SDValue N2);
  SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, SDValue N1,
                  SDValue N2, SDValue N3);
  SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, SDValue N1,
                  SDValue N2, SDValue N3, SDValue N4);
  SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList, SDValue N1,
                  SDValue N2, SDValue N3, SDValue N4, SDValue N5);

  /// Compute a TokenFactor to force all the incoming stack arguments to be
  /// loaded from the stack. This is used in tail call lowering to protect
  /// stack arguments from being clobbered.
  SDValue getStackArgumentTokenFactor(SDValue Chain);

  SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src,
                    SDValue Size, Align Alignment, bool isVol,
                    bool AlwaysInline, bool isTailCall,
                    MachinePointerInfo DstPtrInfo,
                    MachinePointerInfo SrcPtrInfo,
                    const AAMDNodes &AAInfo = AAMDNodes(),
                    AAResults *AA = nullptr);

  SDValue getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src,
                     SDValue Size, Align Alignment, bool isVol, bool isTailCall,
                     MachinePointerInfo DstPtrInfo,
                     MachinePointerInfo SrcPtrInfo,
                     const AAMDNodes &AAInfo = AAMDNodes(),
                     AAResults *AA = nullptr);

  SDValue getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src,
                    SDValue Size, Align Alignment, bool isVol,
                    bool AlwaysInline, bool isTailCall,
                    MachinePointerInfo DstPtrInfo,
                    const AAMDNodes &AAInfo = AAMDNodes());

  SDValue getAtomicMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst,
                          SDValue Src, SDValue Size, Type *SizeTy,
                          unsigned ElemSz, bool isTailCall,
                          MachinePointerInfo DstPtrInfo,
                          MachinePointerInfo SrcPtrInfo);

  SDValue getAtomicMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst,
                           SDValue Src, SDValue Size, Type *SizeTy,
                           unsigned ElemSz, bool isTailCall,
                           MachinePointerInfo DstPtrInfo,
                           MachinePointerInfo SrcPtrInfo);

  SDValue getAtomicMemset(SDValue Chain, const SDLoc &dl, SDValue Dst,
                          SDValue Value, SDValue Size, Type *SizeTy,
                          unsigned ElemSz, bool isTailCall,
                          MachinePointerInfo DstPtrInfo);

  /// Helper function to make it easier to build SetCC's if you just have an
  /// ISD::CondCode instead of an SDValue.
  SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS,
                   ISD::CondCode Cond, SDValue Chain = SDValue(),
                   bool IsSignaling = false) {
    assert(LHS.getValueType().isVector() == RHS.getValueType().isVector() &&
           "Vector/scalar operand type mismatch for setcc");
    assert(LHS.getValueType().isVector() == VT.isVector() &&
           "Vector/scalar result type mismatch for setcc");
    assert(Cond != ISD::SETCC_INVALID &&
           "Cannot create a setCC of an invalid node.");
    if (Chain)
      return getNode(IsSignaling ? ISD::STRICT_FSETCCS : ISD::STRICT_FSETCC, DL,
                     {VT, MVT::Other}, {Chain, LHS, RHS, getCondCode(Cond)});
    return getNode(ISD::SETCC, DL, VT, LHS, RHS, getCondCode(Cond));
  }

  /// Helper function to make it easier to build VP_SETCCs if you just have an
  /// ISD::CondCode instead of an SDValue.
  SDValue getSetCCVP(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS,
                     ISD::CondCode Cond, SDValue Mask, SDValue EVL) {
    assert(LHS.getValueType().isVector() && RHS.getValueType().isVector() &&
           "Cannot compare scalars");
    assert(Cond != ISD::SETCC_INVALID &&
           "Cannot create a setCC of an invalid node.");
    return getNode(ISD::VP_SETCC, DL, VT, LHS, RHS, getCondCode(Cond), Mask,
                   EVL);
  }

  /// Helper function to make it easier to build Select's if you just have
  /// operands and don't want to check for vector.
  SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS,
                    SDValue RHS) {
    assert(LHS.getValueType() == VT && RHS.getValueType() == VT &&
           "Cannot use select on differing types");
    auto Opcode = Cond.getValueType().isVector() ? ISD::VSELECT : ISD::SELECT;
    return getNode(Opcode, DL, VT, Cond, LHS, RHS);
  }

  /// Helper function to make it easier to build SelectCC's if you just have an
  /// ISD::CondCode instead of an SDValue.
  SDValue getSelectCC(const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue True,
                      SDValue False, ISD::CondCode Cond) {
    return getNode(ISD::SELECT_CC, DL, True.getValueType(), LHS, RHS, True,
                   False, getCondCode(Cond));
  }

  /// Try to simplify a select/vselect into 1 of its operands or a constant.
  SDValue simplifySelect(SDValue Cond, SDValue TVal, SDValue FVal);

  /// Try to simplify a shift into 1 of its operands or a constant.
  SDValue simplifyShift(SDValue X, SDValue Y);

  /// Try to simplify a floating-point binary operation into 1 of its operands
  /// or a constant.
  SDValue simplifyFPBinop(unsigned Opcode, SDValue X, SDValue Y,
                          SDNodeFlags Flags);

  /// VAArg produces a result and token chain, and takes a pointer
  /// and a source value as input.
  SDValue getVAArg(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr,
                   SDValue SV, unsigned Align);

  /// Gets a node for an atomic cmpxchg op. There are two
  /// valid Opcodes. ISD::ATOMIC_CMO_SWAP produces the value loaded and a
  /// chain result. ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS produces the value loaded,
  /// a success flag (initially i1), and a chain.
  SDValue getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl, EVT MemVT,
                           SDVTList VTs, SDValue Chain, SDValue Ptr,
                           SDValue Cmp, SDValue Swp, MachineMemOperand *MMO);

  /// Gets a node for an atomic op, produces result (if relevant)
  /// and chain and takes 2 operands.
  SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDValue Chain,
                    SDValue Ptr, SDValue Val, MachineMemOperand *MMO);

  /// Gets a node for an atomic op, produces result and chain and
  /// takes 1 operand.
  SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, EVT VT,
                    SDValue Chain, SDValue Ptr, MachineMemOperand *MMO);

  /// Gets a node for an atomic op, produces result and chain and takes N
  /// operands.
  SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
                    SDVTList VTList, ArrayRef<SDValue> Ops,
                    MachineMemOperand *MMO);

  /// Creates a MemIntrinsicNode that may produce a
  /// result and takes a list of operands. Opcode may be INTRINSIC_VOID,
  /// INTRINSIC_W_CHAIN, or a target-specific opcode with a value not
  /// less than FIRST_TARGET_MEMORY_OPCODE.
  SDValue getMemIntrinsicNode(
      unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue> Ops,
      EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment,
      MachineMemOperand::Flags Flags = MachineMemOperand::MOLoad |
                                       MachineMemOperand::MOStore,
      uint64_t Size = 0, const AAMDNodes &AAInfo = AAMDNodes());

  inline SDValue getMemIntrinsicNode(
      unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue> Ops,
      EVT MemVT, MachinePointerInfo PtrInfo,
      MaybeAlign Alignment = std::nullopt,
      MachineMemOperand::Flags Flags = MachineMemOperand::MOLoad |
                                       MachineMemOperand::MOStore,
      uint64_t Size = 0, const AAMDNodes &AAInfo = AAMDNodes()) {
    // Ensure that codegen never sees alignment 0
    return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, PtrInfo,
                               Alignment.value_or(getEVTAlign(MemVT)), Flags,
                               Size, AAInfo);
  }

  SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList,
                              ArrayRef<SDValue> Ops, EVT MemVT,
                              MachineMemOperand *MMO);

  /// Creates a LifetimeSDNode that starts (`IsStart==true`) or ends
  /// (`IsStart==false`) the lifetime of the portion of `FrameIndex` between
  /// offsets `Offset` and `Offset + Size`.
  SDValue getLifetimeNode(bool IsStart, const SDLoc &dl, SDValue Chain,
                          int FrameIndex, int64_t Size, int64_t Offset = -1);

  /// Creates a PseudoProbeSDNode with function GUID `Guid` and
  /// the index of the block `Index` it is probing, as well as the attributes
  /// `attr` of the probe.
  SDValue getPseudoProbeNode(const SDLoc &Dl, SDValue Chain, uint64_t Guid,
                             uint64_t Index, uint32_t Attr);

  /// Create a MERGE_VALUES node from the given operands.
  SDValue getMergeValues(ArrayRef<SDValue> Ops, const SDLoc &dl);

  /// Loads are not normal binary operators: their result type is not
  /// determined by their operands, and they produce a value AND a token chain.
  ///
  /// This function will set the MOLoad flag on MMOFlags, but you can set it if
  /// you want.  The MOStore flag must not be set.
  SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr,
                  MachinePointerInfo PtrInfo,
                  MaybeAlign Alignment = MaybeAlign(),
                  MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
                  const AAMDNodes &AAInfo = AAMDNodes(),
                  const MDNode *Ranges = nullptr);
  /// FIXME: Remove once transition to Align is over.
  LLVM_DEPRECATED("Use the getLoad function that takes a MaybeAlign instead",
                  "")
  inline SDValue
  getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr,
          MachinePointerInfo PtrInfo, unsigned Alignment,
          MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
          const AAMDNodes &AAInfo = AAMDNodes(),
          const MDNode *Ranges = nullptr) {
    return getLoad(VT, dl, Chain, Ptr, PtrInfo, MaybeAlign(Alignment), MMOFlags,
                   AAInfo, Ranges);
  }
  SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr,
                  MachineMemOperand *MMO);
  SDValue
  getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain,
             SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT,
             MaybeAlign Alignment = MaybeAlign(),
             MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
             const AAMDNodes &AAInfo = AAMDNodes());
  SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT,
                     SDValue Chain, SDValue Ptr, EVT MemVT,
                     MachineMemOperand *MMO);
  SDValue getIndexedLoad(SDValue OrigLoad, const SDLoc &dl, SDValue Base,
                         SDValue Offset, ISD::MemIndexedMode AM);
  SDValue getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT,
                  const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Offset,
                  MachinePointerInfo PtrInfo, EVT MemVT, Align Alignment,
                  MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
                  const AAMDNodes &AAInfo = AAMDNodes(),
                  const MDNode *Ranges = nullptr);
  inline SDValue getLoad(
      ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &dl,
      SDValue Chain, SDValue Ptr, SDValue Offset, MachinePointerInfo PtrInfo,
      EVT MemVT, MaybeAlign Alignment = MaybeAlign(),
      MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
      const AAMDNodes &AAInfo = AAMDNodes(), const MDNode *Ranges = nullptr) {
    // Ensures that codegen never sees a None Alignment.
    return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, PtrInfo, MemVT,
                   Alignment.value_or(getEVTAlign(MemVT)), MMOFlags, AAInfo,
                   Ranges);
  }
  /// FIXME: Remove once transition to Align is over.
  LLVM_DEPRECATED("Use the getLoad function that takes a MaybeAlign instead",
                  "")
  inline SDValue
  getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT,
          const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Offset,
          MachinePointerInfo PtrInfo, EVT MemVT, unsigned Alignment,
          MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
          const AAMDNodes &AAInfo = AAMDNodes(),
          const MDNode *Ranges = nullptr) {
    return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, PtrInfo, MemVT,
                   MaybeAlign(Alignment), MMOFlags, AAInfo, Ranges);
  }
  SDValue getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT,
                  const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Offset,
                  EVT MemVT, MachineMemOperand *MMO);

  /// Helper function to build ISD::STORE nodes.
  ///
  /// This function will set the MOStore flag on MMOFlags, but you can set it if
  /// you want.  The MOLoad and MOInvariant flags must not be set.

  SDValue
  getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
           MachinePointerInfo PtrInfo, Align Alignment,
           MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
           const AAMDNodes &AAInfo = AAMDNodes());
  inline SDValue
  getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
           MachinePointerInfo PtrInfo, MaybeAlign Alignment = MaybeAlign(),
           MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
           const AAMDNodes &AAInfo = AAMDNodes()) {
    return getStore(Chain, dl, Val, Ptr, PtrInfo,
                    Alignment.value_or(getEVTAlign(Val.getValueType())),
                    MMOFlags, AAInfo);
  }
  /// FIXME: Remove once transition to Align is over.
  LLVM_DEPRECATED("Use the version that takes a MaybeAlign instead", "")
  inline SDValue
  getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
           MachinePointerInfo PtrInfo, unsigned Alignment,
           MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
           const AAMDNodes &AAInfo = AAMDNodes()) {
    return getStore(Chain, dl, Val, Ptr, PtrInfo, MaybeAlign(Alignment),
                    MMOFlags, AAInfo);
  }
  SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
                   MachineMemOperand *MMO);
  SDValue
  getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
                MachinePointerInfo PtrInfo, EVT SVT, Align Alignment,
                MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
                const AAMDNodes &AAInfo = AAMDNodes());
  inline SDValue
  getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
                MachinePointerInfo PtrInfo, EVT SVT,
                MaybeAlign Alignment = MaybeAlign(),
                MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
                const AAMDNodes &AAInfo = AAMDNodes()) {
    return getTruncStore(Chain, dl, Val, Ptr, PtrInfo, SVT,
                         Alignment.value_or(getEVTAlign(SVT)), MMOFlags,
                         AAInfo);
  }
  /// FIXME: Remove once transition to Align is over.
  LLVM_DEPRECATED("Use the version that takes a MaybeAlign instead", "")
  inline SDValue
  getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
                MachinePointerInfo PtrInfo, EVT SVT, unsigned Alignment,
                MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
                const AAMDNodes &AAInfo = AAMDNodes()) {
    return getTruncStore(Chain, dl, Val, Ptr, PtrInfo, SVT,
                         MaybeAlign(Alignment), MMOFlags, AAInfo);
  }
  SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val,
                        SDValue Ptr, EVT SVT, MachineMemOperand *MMO);
  SDValue getIndexedStore(SDValue OrigStore, const SDLoc &dl, SDValue Base,
                          SDValue Offset, ISD::MemIndexedMode AM);

  SDValue getLoadVP(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT,
                    const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Offset,
                    SDValue Mask, SDValue EVL, MachinePointerInfo PtrInfo,
                    EVT MemVT, Align Alignment,
                    MachineMemOperand::Flags MMOFlags, const AAMDNodes &AAInfo,
                    const MDNode *Ranges = nullptr, bool IsExpanding = false);
  inline SDValue
  getLoadVP(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT,
            const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Offset,
            SDValue Mask, SDValue EVL, MachinePointerInfo PtrInfo, EVT MemVT,
            MaybeAlign Alignment = MaybeAlign(),
            MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
            const AAMDNodes &AAInfo = AAMDNodes(),
            const MDNode *Ranges = nullptr, bool IsExpanding = false) {
    // Ensures that codegen never sees a None Alignment.
    return getLoadVP(AM, ExtType, VT, dl, Chain, Ptr, Offset, Mask, EVL,
                     PtrInfo, MemVT, Alignment.value_or(getEVTAlign(MemVT)),
                     MMOFlags, AAInfo, Ranges, IsExpanding);
  }
  SDValue getLoadVP(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT,
                    const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Offset,
                    SDValue Mask, SDValue EVL, EVT MemVT,
                    MachineMemOperand *MMO, bool IsExpanding = false);
  SDValue getLoadVP(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr,
                    SDValue Mask, SDValue EVL, MachinePointerInfo PtrInfo,
                    MaybeAlign Alignment, MachineMemOperand::Flags MMOFlags,
                    const AAMDNodes &AAInfo, const MDNode *Ranges = nullptr,
                    bool IsExpanding = false);
  SDValue getLoadVP(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr,
                    SDValue Mask, SDValue EVL, MachineMemOperand *MMO,
                    bool IsExpanding = false);
  SDValue getExtLoadVP(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT,
                       SDValue Chain, SDValue Ptr, SDValue Mask, SDValue EVL,
                       MachinePointerInfo PtrInfo, EVT MemVT,
                       MaybeAlign Alignment, MachineMemOperand::Flags MMOFlags,
                       const AAMDNodes &AAInfo, bool IsExpanding = false);
  SDValue getExtLoadVP(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT,
                       SDValue Chain, SDValue Ptr, SDValue Mask, SDValue EVL,
                       EVT MemVT, MachineMemOperand *MMO,
                       bool IsExpanding = false);
  SDValue getIndexedLoadVP(SDValue OrigLoad, const SDLoc &dl, SDValue Base,
                           SDValue Offset, ISD::MemIndexedMode AM);
  SDValue getStoreVP(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr,
                     SDValue Offset, SDValue Mask, SDValue EVL, EVT MemVT,
                     MachineMemOperand *MMO, ISD::MemIndexedMode AM,
                     bool IsTruncating = false, bool IsCompressing = false);
  SDValue getTruncStoreVP(SDValue Chain, const SDLoc &dl, SDValue Val,
                          SDValue Ptr, SDValue Mask, SDValue EVL,
                          MachinePointerInfo PtrInfo, EVT SVT, Align Alignment,
                          MachineMemOperand::Flags MMOFlags,
                          const AAMDNodes &AAInfo, bool IsCompressing = false);
  SDValue getTruncStoreVP(SDValue Chain, const SDLoc &dl, SDValue Val,
                          SDValue Ptr, SDValue Mask, SDValue EVL, EVT SVT,
                          MachineMemOperand *MMO, bool IsCompressing = false);
  SDValue getIndexedStoreVP(SDValue OrigStore, const SDLoc &dl, SDValue Base,
                            SDValue Offset, ISD::MemIndexedMode AM);

  SDValue getStridedLoadVP(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
                           EVT VT, const SDLoc &DL, SDValue Chain, SDValue Ptr,
                           SDValue Offset, SDValue Stride, SDValue Mask,
                           SDValue EVL, MachinePointerInfo PtrInfo, EVT MemVT,
                           Align Alignment, MachineMemOperand::Flags MMOFlags,
                           const AAMDNodes &AAInfo,
                           const MDNode *Ranges = nullptr,
                           bool IsExpanding = false);
  inline SDValue getStridedLoadVP(
      ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &DL,
      SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Stride, SDValue Mask,
      SDValue EVL, MachinePointerInfo PtrInfo, EVT MemVT,
      MaybeAlign Alignment = MaybeAlign(),
      MachineMemOperand::Flags MMOFlags = MachineMemOperand::MONone,
      const AAMDNodes &AAInfo = AAMDNodes(), const MDNode *Ranges = nullptr,
      bool IsExpanding = false) {
    // Ensures that codegen never sees a None Alignment.
    return getStridedLoadVP(AM, ExtType, VT, DL, Chain, Ptr, Offset, Stride,
                            Mask, EVL, PtrInfo, MemVT,
                            Alignment.value_or(getEVTAlign(MemVT)), MMOFlags,
                            AAInfo, Ranges, IsExpanding);
  }
  SDValue getStridedLoadVP(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
                           EVT VT, const SDLoc &DL, SDValue Chain, SDValue Ptr,
                           SDValue Offset, SDValue Stride, SDValue Mask,
                           SDValue EVL, EVT MemVT, MachineMemOperand *MMO,
                           bool IsExpanding = false);
  SDValue getStridedLoadVP(EVT VT, const SDLoc &DL, SDValue Chain, SDValue Ptr,
                           SDValue Stride, SDValue Mask, SDValue EVL,
                           MachinePointerInfo PtrInfo, MaybeAlign Alignment,
                           MachineMemOperand::Flags MMOFlags,
                           const AAMDNodes &AAInfo,
                           const MDNode *Ranges = nullptr,
                           bool IsExpanding = false);
  SDValue getStridedLoadVP(EVT VT, const SDLoc &DL, SDValue Chain, SDValue Ptr,
                           SDValue Stride, SDValue Mask, SDValue EVL,
                           MachineMemOperand *MMO, bool IsExpanding = false);
  SDValue
  getExtStridedLoadVP(ISD::LoadExtType ExtType, const SDLoc &DL, EVT VT,
                      SDValue Chain, SDValue Ptr, SDValue Stride, SDValue Mask,
                      SDValue EVL, MachinePointerInfo PtrInfo, EVT MemVT,
                      MaybeAlign Alignment, MachineMemOperand::Flags MMOFlags,
                      const AAMDNodes &AAInfo, bool IsExpanding = false);
  SDValue getExtStridedLoadVP(ISD::LoadExtType ExtType, const SDLoc &DL, EVT VT,
                              SDValue Chain, SDValue Ptr, SDValue Stride,
                              SDValue Mask, SDValue EVL, EVT MemVT,
                              MachineMemOperand *MMO, bool IsExpanding = false);
  SDValue getIndexedStridedLoadVP(SDValue OrigLoad, const SDLoc &DL,
                                  SDValue Base, SDValue Offset,
                                  ISD::MemIndexedMode AM);
  SDValue getStridedStoreVP(SDValue Chain, const SDLoc &DL, SDValue Val,
                            SDValue Ptr, SDValue Offset, SDValue Stride,
                            SDValue Mask, SDValue EVL, EVT MemVT,
                            MachineMemOperand *MMO, ISD::MemIndexedMode AM,
                            bool IsTruncating = false,
                            bool IsCompressing = false);
  SDValue getTruncStridedStoreVP(SDValue Chain, const SDLoc &DL, SDValue Val,
                                 SDValue Ptr, SDValue Stride, SDValue Mask,
                                 SDValue EVL, MachinePointerInfo PtrInfo,
                                 EVT SVT, Align Alignment,
                                 MachineMemOperand::Flags MMOFlags,
                                 const AAMDNodes &AAInfo,
                                 bool IsCompressing = false);
  SDValue getTruncStridedStoreVP(SDValue Chain, const SDLoc &DL, SDValue Val,
                                 SDValue Ptr, SDValue Stride, SDValue Mask,
                                 SDValue EVL, EVT SVT, MachineMemOperand *MMO,
                                 bool IsCompressing = false);
  SDValue getIndexedStridedStoreVP(SDValue OrigStore, const SDLoc &DL,
                                   SDValue Base, SDValue Offset,
                                   ISD::MemIndexedMode AM);

  SDValue getGatherVP(SDVTList VTs, EVT VT, const SDLoc &dl,
                      ArrayRef<SDValue> Ops, MachineMemOperand *MMO,
                      ISD::MemIndexType IndexType);
  SDValue getScatterVP(SDVTList VTs, EVT VT, const SDLoc &dl,
                       ArrayRef<SDValue> Ops, MachineMemOperand *MMO,
                       ISD::MemIndexType IndexType);

  SDValue getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Base,
                        SDValue Offset, SDValue Mask, SDValue Src0, EVT MemVT,
                        MachineMemOperand *MMO, ISD::MemIndexedMode AM,
                        ISD::LoadExtType, bool IsExpanding = false);
  SDValue getIndexedMaskedLoad(SDValue OrigLoad, const SDLoc &dl, SDValue Base,
                               SDValue Offset, ISD::MemIndexedMode AM);
  SDValue getMaskedStore(SDValue Chain, const SDLoc &dl, SDValue Val,
                         SDValue Base, SDValue Offset, SDValue Mask, EVT MemVT,
                         MachineMemOperand *MMO, ISD::MemIndexedMode AM,
                         bool IsTruncating = false, bool IsCompressing = false);
  SDValue getIndexedMaskedStore(SDValue OrigStore, const SDLoc &dl,
                                SDValue Base, SDValue Offset,
                                ISD::MemIndexedMode AM);
  SDValue getMaskedGather(SDVTList VTs, EVT MemVT, const SDLoc &dl,
                          ArrayRef<SDValue> Ops, MachineMemOperand *MMO,
                          ISD::MemIndexType IndexType, ISD::LoadExtType ExtTy);
  SDValue getMaskedScatter(SDVTList VTs, EVT MemVT, const SDLoc &dl,
                           ArrayRef<SDValue> Ops, MachineMemOperand *MMO,
                           ISD::MemIndexType IndexType,
                           bool IsTruncating = false);

  SDValue getGetFPEnv(SDValue Chain, const SDLoc &dl, SDValue Ptr, EVT MemVT,
                      MachineMemOperand *MMO);
  SDValue getSetFPEnv(SDValue Chain, const SDLoc &dl, SDValue Ptr, EVT MemVT,
                      MachineMemOperand *MMO);

  /// Construct a node to track a Value* through the backend.
  SDValue getSrcValue(const Value *v);

  /// Return an MDNodeSDNode which holds an MDNode.
  SDValue getMDNode(const MDNode *MD);

  /// Return a bitcast using the SDLoc of the value operand, and casting to the
  /// provided type. Use getNode to set a custom SDLoc.
  SDValue getBitcast(EVT VT, SDValue V);

  /// Return an AddrSpaceCastSDNode.
  SDValue getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, unsigned SrcAS,
                           unsigned DestAS);

  /// Return a freeze using the SDLoc of the value operand.
  SDValue getFreeze(SDValue V);

  /// Return an AssertAlignSDNode.
  SDValue getAssertAlign(const SDLoc &DL, SDValue V, Align A);

  /// Swap N1 and N2 if Opcode is a commutative binary opcode
  /// and the canonical form expects the opposite order.
  void canonicalizeCommutativeBinop(unsigned Opcode, SDValue &N1,
                                    SDValue &N2) const;

  /// Return the specified value casted to
  /// the target's desired shift amount type.
  SDValue getShiftAmountOperand(EVT LHSTy, SDValue Op);

  /// Expand the specified \c ISD::VAARG node as the Legalize pass would.
  SDValue expandVAArg(SDNode *Node);

  /// Expand the specified \c ISD::VACOPY node as the Legalize pass would.
  SDValue expandVACopy(SDNode *Node);

  /// Returs an GlobalAddress of the function from the current module with
  /// name matching the given ExternalSymbol. Additionally can provide the
  /// matched function.
  /// Panics the function doesn't exists.
  SDValue getSymbolFunctionGlobalAddress(SDValue Op,
                                         Function **TargetFunction = nullptr);

  /// *Mutate* the specified node in-place to have the
  /// specified operands.  If the resultant node already exists in the DAG,
  /// this does not modify the specified node, instead it returns the node that
  /// already exists.  If the resultant node does not exist in the DAG, the
  /// input node is returned.  As a degenerate case, if you specify the same
  /// input operands as the node already has, the input node is returned.
  SDNode *UpdateNodeOperands(SDNode *N, SDValue Op);
  SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2);
  SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
                               SDValue Op3);
  SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
                               SDValue Op3, SDValue Op4);
  SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
                               SDValue Op3, SDValue Op4, SDValue Op5);
  SDNode *UpdateNodeOperands(SDNode *N, ArrayRef<SDValue> Ops);

  /// Creates a new TokenFactor containing \p Vals. If \p Vals contains 64k
  /// values or more, move values into new TokenFactors in 64k-1 blocks, until
  /// the final TokenFactor has less than 64k operands.
  SDValue getTokenFactor(const SDLoc &DL, SmallVectorImpl<SDValue> &Vals);

  /// *Mutate* the specified machine node's memory references to the provided
  /// list.
  void setNodeMemRefs(MachineSDNode *N,
                      ArrayRef<MachineMemOperand *> NewMemRefs);

  // Calculate divergence of node \p N based on its operands.
  bool calculateDivergence(SDNode *N);

  // Propagates the change in divergence to users
  void updateDivergence(SDNode * N);

  /// These are used for target selectors to *mutate* the
  /// specified node to have the specified return type, Target opcode, and
  /// operands.  Note that target opcodes are stored as
  /// ~TargetOpcode in the node opcode field.  The resultant node is returned.
  SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT);
  SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT, SDValue Op1);
  SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT,
                       SDValue Op1, SDValue Op2);
  SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT,
                       SDValue Op1, SDValue Op2, SDValue Op3);
  SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT,
                       ArrayRef<SDValue> Ops);
  SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT1, EVT VT2);
  SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT1,
                       EVT VT2, ArrayRef<SDValue> Ops);
  SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT1,
                       EVT VT2, EVT VT3, ArrayRef<SDValue> Ops);
  SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT1,
                       EVT VT2, SDValue Op1, SDValue Op2);
  SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, SDVTList VTs,
                       ArrayRef<SDValue> Ops);

  /// This *mutates* the specified node to have the specified
  /// return type, opcode, and operands.
  SDNode *MorphNodeTo(SDNode *N, unsigned Opc, SDVTList VTs,
                      ArrayRef<SDValue> Ops);

  /// Mutate the specified strict FP node to its non-strict equivalent,
  /// unlinking the node from its chain and dropping the metadata arguments.
  /// The node must be a strict FP node.
  SDNode *mutateStrictFPToFP(SDNode *Node);

  /// These are used for target selectors to create a new node
  /// with specified return type(s), MachineInstr opcode, and operands.
  ///
  /// Note that getMachineNode returns the resultant node.  If there is already
  /// a node of the specified opcode and operands, it returns that node instead
  /// of the current one.
  MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT);
  MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT,
                                SDValue Op1);
  MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT,
                                SDValue Op1, SDValue Op2);
  MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT,
                                SDValue Op1, SDValue Op2, SDValue Op3);
  MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT,
                                ArrayRef<SDValue> Ops);
  MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
                                EVT VT2, SDValue Op1, SDValue Op2);
  MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
                                EVT VT2, SDValue Op1, SDValue Op2, SDValue Op3);
  MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
                                EVT VT2, ArrayRef<SDValue> Ops);
  MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
                                EVT VT2, EVT VT3, SDValue Op1, SDValue Op2);
  MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
                                EVT VT2, EVT VT3, SDValue Op1, SDValue Op2,
                                SDValue Op3);
  MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT1,
                                EVT VT2, EVT VT3, ArrayRef<SDValue> Ops);
  MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl,
                                ArrayRef<EVT> ResultTys, ArrayRef<SDValue> Ops);
  MachineSDNode *getMachineNode(unsigned Opcode, const SDLoc &dl, SDVTList VTs,
                                ArrayRef<SDValue> Ops);

  /// A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
  SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT,
                                 SDValue Operand);

  /// A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
  SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT,
                                SDValue Operand, SDValue Subreg);

  /// Get the specified node if it's already available, or else return NULL.
  SDNode *getNodeIfExists(unsigned Opcode, SDVTList VTList,
                          ArrayRef<SDValue> Ops, const SDNodeFlags Flags);
  SDNode *getNodeIfExists(unsigned Opcode, SDVTList VTList,
                          ArrayRef<SDValue> Ops);

  /// Check if a node exists without modifying its flags.
  bool doesNodeExist(unsigned Opcode, SDVTList VTList, ArrayRef<SDValue> Ops);

  /// Creates a SDDbgValue node.
  SDDbgValue *getDbgValue(DIVariable *Var, DIExpression *Expr, SDNode *N,
                          unsigned R, bool IsIndirect, const DebugLoc &DL,
                          unsigned O);

  /// Creates a constant SDDbgValue node.
  SDDbgValue *getConstantDbgValue(DIVariable *Var, DIExpression *Expr,
                                  const Value *C, const DebugLoc &DL,
                                  unsigned O);

  /// Creates a FrameIndex SDDbgValue node.
  SDDbgValue *getFrameIndexDbgValue(DIVariable *Var, DIExpression *Expr,
                                    unsigned FI, bool IsIndirect,
                                    const DebugLoc &DL, unsigned O);

  /// Creates a FrameIndex SDDbgValue node.
  SDDbgValue *getFrameIndexDbgValue(DIVariable *Var, DIExpression *Expr,
                                    unsigned FI,
                                    ArrayRef<SDNode *> Dependencies,
                                    bool IsIndirect, const DebugLoc &DL,
                                    unsigned O);

  /// Creates a VReg SDDbgValue node.
  SDDbgValue *getVRegDbgValue(DIVariable *Var, DIExpression *Expr,
                              unsigned VReg, bool IsIndirect,
                              const DebugLoc &DL, unsigned O);

  /// Creates a SDDbgValue node from a list of locations.
  SDDbgValue *getDbgValueList(DIVariable *Var, DIExpression *Expr,
                              ArrayRef<SDDbgOperand> Locs,
                              ArrayRef<SDNode *> Dependencies, bool IsIndirect,
                              const DebugLoc &DL, unsigned O, bool IsVariadic);

  /// Creates a SDDbgLabel node.
  SDDbgLabel *getDbgLabel(DILabel *Label, const DebugLoc &DL, unsigned O);

  /// Transfer debug values from one node to another, while optionally
  /// generating fragment expressions for split-up values. If \p InvalidateDbg
  /// is set, debug values are invalidated after they are transferred.
  void transferDbgValues(SDValue From, SDValue To, unsigned OffsetInBits = 0,
                         unsigned SizeInBits = 0, bool InvalidateDbg = true);

  /// Remove the specified node from the system. If any of its
  /// operands then becomes dead, remove them as well. Inform UpdateListener
  /// for each node deleted.
  void RemoveDeadNode(SDNode *N);

  /// This method deletes the unreachable nodes in the
  /// given list, and any nodes that become unreachable as a result.
  void RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes);

  /// Modify anything using 'From' to use 'To' instead.
  /// This can cause recursive merging of nodes in the DAG.  Use the first
  /// version if 'From' is known to have a single result, use the second
  /// if you have two nodes with identical results (or if 'To' has a superset
  /// of the results of 'From'), use the third otherwise.
  ///
  /// These methods all take an optional UpdateListener, which (if not null) is
  /// informed about nodes that are deleted and modified due to recursive
  /// changes in the dag.
  ///
  /// These functions only replace all existing uses. It's possible that as
  /// these replacements are being performed, CSE may cause the From node
  /// to be given new uses. These new uses of From are left in place, and
  /// not automatically transferred to To.
  ///
  void ReplaceAllUsesWith(SDValue From, SDValue To);
  void ReplaceAllUsesWith(SDNode *From, SDNode *To);
  void ReplaceAllUsesWith(SDNode *From, const SDValue *To);

  /// Replace any uses of From with To, leaving
  /// uses of other values produced by From.getNode() alone.
  void ReplaceAllUsesOfValueWith(SDValue From, SDValue To);

  /// Like ReplaceAllUsesOfValueWith, but for multiple values at once.
  /// This correctly handles the case where
  /// there is an overlap between the From values and the To values.
  void ReplaceAllUsesOfValuesWith(const SDValue *From, const SDValue *To,
                                  unsigned Num);

  /// If an existing load has uses of its chain, create a token factor node with
  /// that chain and the new memory node's chain and update users of the old
  /// chain to the token factor. This ensures that the new memory node will have
  /// the same relative memory dependency position as the old load. Returns the
  /// new merged load chain.
  SDValue makeEquivalentMemoryOrdering(SDValue OldChain, SDValue NewMemOpChain);

  /// If an existing load has uses of its chain, create a token factor node with
  /// that chain and the new memory node's chain and update users of the old
  /// chain to the token factor. This ensures that the new memory node will have
  /// the same relative memory dependency position as the old load. Returns the
  /// new merged load chain.
  SDValue makeEquivalentMemoryOrdering(LoadSDNode *OldLoad, SDValue NewMemOp);

  /// Topological-sort the AllNodes list and a
  /// assign a unique node id for each node in the DAG based on their
  /// topological order. Returns the number of nodes.
  unsigned AssignTopologicalOrder();

  /// Move node N in the AllNodes list to be immediately
  /// before the given iterator Position. This may be used to update the
  /// topological ordering when the list of nodes is modified.
  void RepositionNode(allnodes_iterator Position, SDNode *N) {
    AllNodes.insert(Position, AllNodes.remove(N));
  }

  /// Returns an APFloat semantics tag appropriate for the given type. If VT is
  /// a vector type, the element semantics are returned.
  static const fltSemantics &EVTToAPFloatSemantics(EVT VT) {
    switch (VT.getScalarType().getSimpleVT().SimpleTy) {
    default: llvm_unreachable("Unknown FP format");
    case MVT::f16:     return APFloat::IEEEhalf();
    case MVT::bf16:    return APFloat::BFloat();
    case MVT::f32:     return APFloat::IEEEsingle();
    case MVT::f64:     return APFloat::IEEEdouble();
    case MVT::f80:     return APFloat::x87DoubleExtended();
    case MVT::f128:    return APFloat::IEEEquad();
    case MVT::ppcf128: return APFloat::PPCDoubleDouble();
    }
  }

  /// Add a dbg_value SDNode. If SD is non-null that means the
  /// value is produced by SD.
  void AddDbgValue(SDDbgValue *DB, bool isParameter);

  /// Add a dbg_label SDNode.
  void AddDbgLabel(SDDbgLabel *DB);

  /// Get the debug values which reference the given SDNode.
  ArrayRef<SDDbgValue*> GetDbgValues(const SDNode* SD) const {
    return DbgInfo->getSDDbgValues(SD);
  }

public:
  /// Return true if there are any SDDbgValue nodes associated
  /// with this SelectionDAG.
  bool hasDebugValues() const { return !DbgInfo->empty(); }

  SDDbgInfo::DbgIterator DbgBegin() const { return DbgInfo->DbgBegin(); }
  SDDbgInfo::DbgIterator DbgEnd() const  { return DbgInfo->DbgEnd(); }

  SDDbgInfo::DbgIterator ByvalParmDbgBegin() const {
    return DbgInfo->ByvalParmDbgBegin();
  }
  SDDbgInfo::DbgIterator ByvalParmDbgEnd() const {
    return DbgInfo->ByvalParmDbgEnd();
  }

  SDDbgInfo::DbgLabelIterator DbgLabelBegin() const {
    return DbgInfo->DbgLabelBegin();
  }
  SDDbgInfo::DbgLabelIterator DbgLabelEnd() const {
    return DbgInfo->DbgLabelEnd();
  }

  /// To be invoked on an SDNode that is slated to be erased. This
  /// function mirrors \c llvm::salvageDebugInfo.
  void salvageDebugInfo(SDNode &N);

  void dump() const;

  /// In most cases this function returns the ABI alignment for a given type,
  /// except for illegal vector types where the alignment exceeds that of the
  /// stack. In such cases we attempt to break the vector down to a legal type
  /// and return the ABI alignment for that instead.
  Align getReducedAlign(EVT VT, bool UseABI);

  /// Create a stack temporary based on the size in bytes and the alignment
  SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment);

  /// Create a stack temporary, suitable for holding the specified value type.
  /// If minAlign is specified, the slot size will have at least that alignment.
  SDValue CreateStackTemporary(EVT VT, unsigned minAlign = 1);

  /// Create a stack temporary suitable for holding either of the specified
  /// value types.
  SDValue CreateStackTemporary(EVT VT1, EVT VT2);

  SDValue FoldSymbolOffset(unsigned Opcode, EVT VT,
                           const GlobalAddressSDNode *GA,
                           const SDNode *N2);

  SDValue FoldConstantArithmetic(unsigned Opcode, const SDLoc &DL, EVT VT,
                                 ArrayRef<SDValue> Ops);

  /// Fold floating-point operations with 2 operands when both operands are
  /// constants and/or undefined.
  SDValue foldConstantFPMath(unsigned Opcode, const SDLoc &DL, EVT VT,
                             SDValue N1, SDValue N2);

  /// Constant fold a setcc to true or false.
  SDValue FoldSetCC(EVT VT, SDValue N1, SDValue N2, ISD::CondCode Cond,
                    const SDLoc &dl);

  /// Return true if the sign bit of Op is known to be zero.
  /// We use this predicate to simplify operations downstream.
  bool SignBitIsZero(SDValue Op, unsigned Depth = 0) const;

  /// Return true if 'Op & Mask' is known to be zero.  We
  /// use this predicate to simplify operations downstream.  Op and Mask are
  /// known to be the same type.
  bool MaskedValueIsZero(SDValue Op, const APInt &Mask,
                         unsigned Depth = 0) const;

  /// Return true if 'Op & Mask' is known to be zero in DemandedElts.  We
  /// use this predicate to simplify operations downstream.  Op and Mask are
  /// known to be the same type.
  bool MaskedValueIsZero(SDValue Op, const APInt &Mask,
                         const APInt &DemandedElts, unsigned Depth = 0) const;

  /// Return true if 'Op' is known to be zero in DemandedElts.  We
  /// use this predicate to simplify operations downstream.
  bool MaskedVectorIsZero(SDValue Op, const APInt &DemandedElts,
                          unsigned Depth = 0) const;

  /// Return true if '(Op & Mask) == Mask'.
  /// Op and Mask are known to be the same type.
  bool MaskedValueIsAllOnes(SDValue Op, const APInt &Mask,
                            unsigned Depth = 0) const;

  /// For each demanded element of a vector, see if it is known to be zero.
  APInt computeVectorKnownZeroElements(SDValue Op, const APInt &DemandedElts,
                                       unsigned Depth = 0) const;

  /// Determine which bits of Op are known to be either zero or one and return
  /// them in Known. For vectors, the known bits are those that are shared by
  /// every vector element.
  /// Targets can implement the computeKnownBitsForTargetNode method in the
  /// TargetLowering class to allow target nodes to be understood.
  KnownBits computeKnownBits(SDValue Op, unsigned Depth = 0) const;

  /// Determine which bits of Op are known to be either zero or one and return
  /// them in Known. The DemandedElts argument allows us to only collect the
  /// known bits that are shared by the requested vector elements.
  /// Targets can implement the computeKnownBitsForTargetNode method in the
  /// TargetLowering class to allow target nodes to be understood.
  KnownBits computeKnownBits(SDValue Op, const APInt &DemandedElts,
                             unsigned Depth = 0) const;

  /// Used to represent the possible overflow behavior of an operation.
  /// Never: the operation cannot overflow.
  /// Always: the operation will always overflow.
  /// Sometime: the operation may or may not overflow.
  enum OverflowKind {
    OFK_Never,
    OFK_Sometime,
    OFK_Always,
  };

  /// Determine if the result of the signed addition of 2 nodes can overflow.
  OverflowKind computeOverflowForSignedAdd(SDValue N0, SDValue N1) const;

  /// Determine if the result of the unsigned addition of 2 nodes can overflow.
  OverflowKind computeOverflowForUnsignedAdd(SDValue N0, SDValue N1) const;

  /// Determine if the result of the addition of 2 nodes can overflow.
  OverflowKind computeOverflowForAdd(bool IsSigned, SDValue N0,
                                     SDValue N1) const {
    return IsSigned ? computeOverflowForSignedAdd(N0, N1)
                    : computeOverflowForUnsignedAdd(N0, N1);
  }

  /// Determine if the result of the signed sub of 2 nodes can overflow.
  OverflowKind computeOverflowForSignedSub(SDValue N0, SDValue N1) const;

  /// Determine if the result of the unsigned sub of 2 nodes can overflow.
  OverflowKind computeOverflowForUnsignedSub(SDValue N0, SDValue N1) const;

  /// Determine if the result of the sub of 2 nodes can overflow.
  OverflowKind computeOverflowForSub(bool IsSigned, SDValue N0,
                                     SDValue N1) const {
    return IsSigned ? computeOverflowForSignedSub(N0, N1)
                    : computeOverflowForUnsignedSub(N0, N1);
  }

  /// Test if the given value is known to have exactly one bit set. This differs
  /// from computeKnownBits in that it doesn't necessarily determine which bit
  /// is set.
  bool isKnownToBeAPowerOfTwo(SDValue Val, unsigned Depth = 0) const;

  /// Return the number of times the sign bit of the register is replicated into
  /// the other bits. We know that at least 1 bit is always equal to the sign
  /// bit (itself), but other cases can give us information. For example,
  /// immediately after an "SRA X, 2", we know that the top 3 bits are all equal
  /// to each other, so we return 3. Targets can implement the
  /// ComputeNumSignBitsForTarget method in the TargetLowering class to allow
  /// target nodes to be understood.
  unsigned ComputeNumSignBits(SDValue Op, unsigned Depth = 0) const;

  /// Return the number of times the sign bit of the register is replicated into
  /// the other bits. We know that at least 1 bit is always equal to the sign
  /// bit (itself), but other cases can give us information. For example,
  /// immediately after an "SRA X, 2", we know that the top 3 bits are all equal
  /// to each other, so we return 3. The DemandedElts argument allows
  /// us to only collect the minimum sign bits of the requested vector elements.
  /// Targets can implement the ComputeNumSignBitsForTarget method in the
  /// TargetLowering class to allow target nodes to be understood.
  unsigned ComputeNumSignBits(SDValue Op, const APInt &DemandedElts,
                              unsigned Depth = 0) const;

  /// Get the upper bound on bit size for this Value \p Op as a signed integer.
  /// i.e.  x == sext(trunc(x to MaxSignedBits) to bitwidth(x)).
  /// Similar to the APInt::getSignificantBits function.
  /// Helper wrapper to ComputeNumSignBits.
  unsigned ComputeMaxSignificantBits(SDValue Op, unsigned Depth = 0) const;

  /// Get the upper bound on bit size for this Value \p Op as a signed integer.
  /// i.e.  x == sext(trunc(x to MaxSignedBits) to bitwidth(x)).
  /// Similar to the APInt::getSignificantBits function.
  /// Helper wrapper to ComputeNumSignBits.
  unsigned ComputeMaxSignificantBits(SDValue Op, const APInt &DemandedElts,
                                     unsigned Depth = 0) const;

  /// Return true if this function can prove that \p Op is never poison
  /// and, if \p PoisonOnly is false, does not have undef bits.
  bool isGuaranteedNotToBeUndefOrPoison(SDValue Op, bool PoisonOnly = false,
                                        unsigned Depth = 0) const;

  /// Return true if this function can prove that \p Op is never poison
  /// and, if \p PoisonOnly is false, does not have undef bits. The DemandedElts
  /// argument limits the check to the requested vector elements.
  bool isGuaranteedNotToBeUndefOrPoison(SDValue Op, const APInt &DemandedElts,
                                        bool PoisonOnly = false,
                                        unsigned Depth = 0) const;

  /// Return true if this function can prove that \p Op is never poison.
  bool isGuaranteedNotToBePoison(SDValue Op, unsigned Depth = 0) const {
    return isGuaranteedNotToBeUndefOrPoison(Op, /*PoisonOnly*/ true, Depth);
  }

  /// Return true if this function can prove that \p Op is never poison. The
  /// DemandedElts argument limits the check to the requested vector elements.
  bool isGuaranteedNotToBePoison(SDValue Op, const APInt &DemandedElts,
                                 unsigned Depth = 0) const {
    return isGuaranteedNotToBeUndefOrPoison(Op, DemandedElts,
                                            /*PoisonOnly*/ true, Depth);
  }

  /// Return true if Op can create undef or poison from non-undef & non-poison
  /// operands. The DemandedElts argument limits the check to the requested
  /// vector elements.
  ///
  /// \p ConsiderFlags controls whether poison producing flags on the
  /// instruction are considered.  This can be used to see if the instruction
  /// could still introduce undef or poison even without poison generating flags
  /// which might be on the instruction.  (i.e. could the result of
  /// Op->dropPoisonGeneratingFlags() still create poison or undef)
  bool canCreateUndefOrPoison(SDValue Op, const APInt &DemandedElts,
                              bool PoisonOnly = false,
                              bool ConsiderFlags = true,
                              unsigned Depth = 0) const;

  /// Return true if Op can create undef or poison from non-undef & non-poison
  /// operands.
  ///
  /// \p ConsiderFlags controls whether poison producing flags on the
  /// instruction are considered.  This can be used to see if the instruction
  /// could still introduce undef or poison even without poison generating flags
  /// which might be on the instruction.  (i.e. could the result of
  /// Op->dropPoisonGeneratingFlags() still create poison or undef)
  bool canCreateUndefOrPoison(SDValue Op, bool PoisonOnly = false,
                              bool ConsiderFlags = true,
                              unsigned Depth = 0) const;

  /// Return true if the specified operand is an ISD::ADD with a ConstantSDNode
  /// on the right-hand side, or if it is an ISD::OR with a ConstantSDNode that
  /// is guaranteed to have the same semantics as an ADD. This handles the
  /// equivalence:
  ///     X|Cst == X+Cst iff X&Cst = 0.
  bool isBaseWithConstantOffset(SDValue Op) const;

  /// Test whether the given SDValue (or all elements of it, if it is a
  /// vector) is known to never be NaN. If \p SNaN is true, returns if \p Op is
  /// known to never be a signaling NaN (it may still be a qNaN).
  bool isKnownNeverNaN(SDValue Op, bool SNaN = false, unsigned Depth = 0) const;

  /// \returns true if \p Op is known to never be a signaling NaN.
  bool isKnownNeverSNaN(SDValue Op, unsigned Depth = 0) const {
    return isKnownNeverNaN(Op, true, Depth);
  }

  /// Test whether the given floating point SDValue is known to never be
  /// positive or negative zero.
  bool isKnownNeverZeroFloat(SDValue Op) const;

  /// Test whether the given SDValue is known to contain non-zero value(s).
  bool isKnownNeverZero(SDValue Op, unsigned Depth = 0) const;

  /// Test whether two SDValues are known to compare equal. This
  /// is true if they are the same value, or if one is negative zero and the
  /// other positive zero.
  bool isEqualTo(SDValue A, SDValue B) const;

  /// Return true if A and B have no common bits set. As an example, this can
  /// allow an 'add' to be transformed into an 'or'.
  bool haveNoCommonBitsSet(SDValue A, SDValue B) const;

  /// Test whether \p V has a splatted value for all the demanded elements.
  ///
  /// On success \p UndefElts will indicate the elements that have UNDEF
  /// values instead of the splat value, this is only guaranteed to be correct
  /// for \p DemandedElts.
  ///
  /// NOTE: The function will return true for a demanded splat of UNDEF values.
  bool isSplatValue(SDValue V, const APInt &DemandedElts, APInt &UndefElts,
                    unsigned Depth = 0) const;

  /// Test whether \p V has a splatted value.
  bool isSplatValue(SDValue V, bool AllowUndefs = false) const;

  /// If V is a splatted value, return the source vector and its splat index.
  SDValue getSplatSourceVector(SDValue V, int &SplatIndex);

  /// If V is a splat vector, return its scalar source operand by extracting
  /// that element from the source vector. If LegalTypes is true, this method
  /// may only return a legally-typed splat value. If it cannot legalize the
  /// splatted value it will return SDValue().
  SDValue getSplatValue(SDValue V, bool LegalTypes = false);

  /// If a SHL/SRA/SRL node \p V has a constant or splat constant shift amount
  /// that is less than the element bit-width of the shift node, return it.
  const APInt *getValidShiftAmountConstant(SDValue V,
                                           const APInt &DemandedElts) const;

  /// If a SHL/SRA/SRL node \p V has constant shift amounts that are all less
  /// than the element bit-width of the shift node, return the minimum value.
  const APInt *
  getValidMinimumShiftAmountConstant(SDValue V,
                                     const APInt &DemandedElts) const;

  /// If a SHL/SRA/SRL node \p V has constant shift amounts that are all less
  /// than the element bit-width of the shift node, return the maximum value.
  const APInt *
  getValidMaximumShiftAmountConstant(SDValue V,
                                     const APInt &DemandedElts) const;

  /// Match a binop + shuffle pyramid that represents a horizontal reduction
  /// over the elements of a vector starting from the EXTRACT_VECTOR_ELT node /p
  /// Extract. The reduction must use one of the opcodes listed in /p
  /// CandidateBinOps and on success /p BinOp will contain the matching opcode.
  /// Returns the vector that is being reduced on, or SDValue() if a reduction
  /// was not matched. If \p AllowPartials is set then in the case of a
  /// reduction pattern that only matches the first few stages, the extracted
  /// subvector of the start of the reduction is returned.
  SDValue matchBinOpReduction(SDNode *Extract, ISD::NodeType &BinOp,
                              ArrayRef<ISD::NodeType> CandidateBinOps,
                              bool AllowPartials = false);

  /// Utility function used by legalize and lowering to
  /// "unroll" a vector operation by splitting out the scalars and operating
  /// on each element individually.  If the ResNE is 0, fully unroll the vector
  /// op. If ResNE is less than the width of the vector op, unroll up to ResNE.
  /// If the  ResNE is greater than the width of the vector op, unroll the
  /// vector op and fill the end of the resulting vector with UNDEFS.
  SDValue UnrollVectorOp(SDNode *N, unsigned ResNE = 0);

  /// Like UnrollVectorOp(), but for the [US](ADD|SUB|MUL)O family of opcodes.
  /// This is a separate function because those opcodes have two results.
  std::pair<SDValue, SDValue> UnrollVectorOverflowOp(SDNode *N,
                                                     unsigned ResNE = 0);

  /// Return true if loads are next to each other and can be
  /// merged. Check that both are nonvolatile and if LD is loading
  /// 'Bytes' bytes from a location that is 'Dist' units away from the
  /// location that the 'Base' load is loading from.
  bool areNonVolatileConsecutiveLoads(LoadSDNode *LD, LoadSDNode *Base,
                                      unsigned Bytes, int Dist) const;

  /// Infer alignment of a load / store address. Return std::nullopt if it
  /// cannot be inferred.
  MaybeAlign InferPtrAlign(SDValue Ptr) const;

  /// Split the scalar node with EXTRACT_ELEMENT using the provided VTs and
  /// return the low/high part.
  std::pair<SDValue, SDValue> SplitScalar(const SDValue &N, const SDLoc &DL,
                                          const EVT &LoVT, const EVT &HiVT);

  /// Compute the VTs needed for the low/hi parts of a type
  /// which is split (or expanded) into two not necessarily identical pieces.
  std::pair<EVT, EVT> GetSplitDestVTs(const EVT &VT) const;

  /// Compute the VTs needed for the low/hi parts of a type, dependent on an
  /// enveloping VT that has been split into two identical pieces. Sets the
  /// HisIsEmpty flag when hi type has zero storage size.
  std::pair<EVT, EVT> GetDependentSplitDestVTs(const EVT &VT, const EVT &EnvVT,
                                               bool *HiIsEmpty) const;

  /// Split the vector with EXTRACT_SUBVECTOR using the provides
  /// VTs and return the low/high part.
  std::pair<SDValue, SDValue> SplitVector(const SDValue &N, const SDLoc &DL,
                                          const EVT &LoVT, const EVT &HiVT);

  /// Split the vector with EXTRACT_SUBVECTOR and return the low/high part.
  std::pair<SDValue, SDValue> SplitVector(const SDValue &N, const SDLoc &DL) {
    EVT LoVT, HiVT;
    std::tie(LoVT, HiVT) = GetSplitDestVTs(N.getValueType());
    return SplitVector(N, DL, LoVT, HiVT);
  }

  /// Split the explicit vector length parameter of a VP operation.
  std::pair<SDValue, SDValue> SplitEVL(SDValue N, EVT VecVT, const SDLoc &DL);

  /// Split the node's operand with EXTRACT_SUBVECTOR and
  /// return the low/high part.
  std::pair<SDValue, SDValue> SplitVectorOperand(const SDNode *N, unsigned OpNo)
  {
    return SplitVector(N->getOperand(OpNo), SDLoc(N));
  }

  /// Widen the vector up to the next power of two using INSERT_SUBVECTOR.
  SDValue WidenVector(const SDValue &N, const SDLoc &DL);

  /// Append the extracted elements from Start to Count out of the vector Op in
  /// Args. If Count is 0, all of the elements will be extracted. The extracted
  /// elements will have type EVT if it is provided, and otherwise their type
  /// will be Op's element type.
  void ExtractVectorElements(SDValue Op, SmallVectorImpl<SDValue> &Args,
                             unsigned Start = 0, unsigned Count = 0,
                             EVT EltVT = EVT());

  /// Compute the default alignment value for the given type.
  Align getEVTAlign(EVT MemoryVT) const;

  /// Test whether the given value is a constant int or similar node.
  SDNode *isConstantIntBuildVectorOrConstantInt(SDValue N) const;

  /// Test whether the given value is a constant FP or similar node.
  SDNode *isConstantFPBuildVectorOrConstantFP(SDValue N) const ;

  /// \returns true if \p N is any kind of constant or build_vector of
  /// constants, int or float. If a vector, it may not necessarily be a splat.
  inline bool isConstantValueOfAnyType(SDValue N) const {
    return isConstantIntBuildVectorOrConstantInt(N) ||
           isConstantFPBuildVectorOrConstantFP(N);
  }

  /// Set CallSiteInfo to be associated with Node.
  void addCallSiteInfo(const SDNode *Node, CallSiteInfoImpl &&CallInfo) {
    SDEI[Node].CSInfo = std::move(CallInfo);
  }
  /// Return CallSiteInfo associated with Node, or a default if none exists.
  CallSiteInfo getCallSiteInfo(const SDNode *Node) {
    auto I = SDEI.find(Node);
    return I != SDEI.end() ? std::move(I->second).CSInfo : CallSiteInfo();
  }
  /// Set HeapAllocSite to be associated with Node.
  void addHeapAllocSite(const SDNode *Node, MDNode *MD) {
    SDEI[Node].HeapAllocSite = MD;
  }
  /// Return HeapAllocSite associated with Node, or nullptr if none exists.
  MDNode *getHeapAllocSite(const SDNode *Node) const {
    auto I = SDEI.find(Node);
    return I != SDEI.end() ? I->second.HeapAllocSite : nullptr;
  }
  /// Set PCSections to be associated with Node.
  void addPCSections(const SDNode *Node, MDNode *MD) {
    SDEI[Node].PCSections = MD;
  }
  /// Return PCSections associated with Node, or nullptr if none exists.
  MDNode *getPCSections(const SDNode *Node) const {
    auto It = SDEI.find(Node);
    return It != SDEI.end() ? It->second.PCSections : nullptr;
  }
  /// Set NoMergeSiteInfo to be associated with Node if NoMerge is true.
  void addNoMergeSiteInfo(const SDNode *Node, bool NoMerge) {
    if (NoMerge)
      SDEI[Node].NoMerge = NoMerge;
  }
  /// Return NoMerge info associated with Node.
  bool getNoMergeSiteInfo(const SDNode *Node) const {
    auto I = SDEI.find(Node);
    return I != SDEI.end() ? I->second.NoMerge : false;
  }

  /// Copy extra info associated with one node to another.
  void copyExtraInfo(SDNode *From, SDNode *To);

  /// Return the current function's default denormal handling kind for the given
  /// floating point type.
  DenormalMode getDenormalMode(EVT VT) const {
    return MF->getDenormalMode(EVTToAPFloatSemantics(VT));
  }

  bool shouldOptForSize() const;

  /// Get the (commutative) neutral element for the given opcode, if it exists.
  SDValue getNeutralElement(unsigned Opcode, const SDLoc &DL, EVT VT,
                            SDNodeFlags Flags);

  /// Some opcodes may create immediate undefined behavior when used with some
  /// values (integer division-by-zero for example). Therefore, these operations
  /// are not generally safe to move around or change.
  bool isSafeToSpeculativelyExecute(unsigned Opcode) const {
    switch (Opcode) {
    case ISD::SDIV:
    case ISD::SREM:
    case ISD::SDIVREM:
    case ISD::UDIV:
    case ISD::UREM:
    case ISD::UDIVREM:
      return false;
    default:
      return true;
    }
  }

  /// Check if the provided node is save to speculatively executed given its
  /// current arguments. So, while `udiv` the opcode is not safe to
  /// speculatively execute, a given `udiv` node may be if the denominator is
  /// known nonzero.
  bool isSafeToSpeculativelyExecuteNode(const SDNode *N) const {
    switch (N->getOpcode()) {
    case ISD::UDIV:
      return isKnownNeverZero(N->getOperand(1));
    default:
      return isSafeToSpeculativelyExecute(N->getOpcode());
    }
  }

  SDValue makeStateFunctionCall(unsigned LibFunc, SDValue Ptr, SDValue InChain,
                                const SDLoc &DLoc);

private:
  void InsertNode(SDNode *N);
  bool RemoveNodeFromCSEMaps(SDNode *N);
  void AddModifiedNodeToCSEMaps(SDNode *N);
  SDNode *FindModifiedNodeSlot(SDNode *N, SDValue Op, void *&InsertPos);
  SDNode *FindModifiedNodeSlot(SDNode *N, SDValue Op1, SDValue Op2,
                               void *&InsertPos);
  SDNode *FindModifiedNodeSlot(SDNode *N, ArrayRef<SDValue> Ops,
                               void *&InsertPos);
  SDNode *UpdateSDLocOnMergeSDNode(SDNode *N, const SDLoc &loc);

  void DeleteNodeNotInCSEMaps(SDNode *N);
  void DeallocateNode(SDNode *N);

  void allnodes_clear();

  /// Look up the node specified by ID in CSEMap.  If it exists, return it.  If
  /// not, return the insertion token that will make insertion faster.  This
  /// overload is for nodes other than Constant or ConstantFP, use the other one
  /// for those.
  SDNode *FindNodeOrInsertPos(const FoldingSetNodeID &ID, void *&InsertPos);

  /// Look up the node specified by ID in CSEMap.  If it exists, return it.  If
  /// not, return the insertion token that will make insertion faster.  Performs
  /// additional processing for constant nodes.
  SDNode *FindNodeOrInsertPos(const FoldingSetNodeID &ID, const SDLoc &DL,
                              void *&InsertPos);

  /// Maps to auto-CSE operations.
  std::vector<CondCodeSDNode*> CondCodeNodes;

  std::vector<SDNode*> ValueTypeNodes;
  std::map<EVT, SDNode*, EVT::compareRawBits> ExtendedValueTypeNodes;
  StringMap<SDNode*> ExternalSymbols;

  std::map<std::pair<std::string, unsigned>, SDNode *> TargetExternalSymbols;
  DenseMap<MCSymbol *, SDNode *> MCSymbols;

  FlagInserter *Inserter = nullptr;
};

template <> struct GraphTraits<SelectionDAG*> : public GraphTraits<SDNode*> {
  using nodes_iterator = pointer_iterator<SelectionDAG::allnodes_iterator>;

  static nodes_iterator nodes_begin(SelectionDAG *G) {
    return nodes_iterator(G->allnodes_begin());
  }

  static nodes_iterator nodes_end(SelectionDAG *G) {
    return nodes_iterator(G->allnodes_end());
  }
};

} // end namespace llvm

#endif // LLVM_CODEGEN_SELECTIONDAG_H
PKiwFZڨ*-��CodeGen/MIRFormatter.hnu�[���//===-- llvm/CodeGen/MIRFormatter.h -----------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the declaration of the MIRFormatter class.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_MIRFORMATTER_H
#define LLVM_CODEGEN_MIRFORMATTER_H

#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/Support/raw_ostream.h"
#include <cstdint>
#include <optional>

namespace llvm {

class MachineFunction;
class MachineInstr;
struct PerFunctionMIParsingState;

/// MIRFormater - Interface to format MIR operand based on target
class MIRFormatter {
public:
  typedef function_ref<bool(StringRef::iterator Loc, const Twine &)>
      ErrorCallbackType;

  MIRFormatter() = default;
  virtual ~MIRFormatter() = default;

  /// Implement target specific printing for machine operand immediate value, so
  /// that we can have more meaningful mnemonic than a 64-bit integer. Passing
  /// std::nullopt to OpIdx means the index is unknown.
  virtual void printImm(raw_ostream &OS, const MachineInstr &MI,
                        std::optional<unsigned> OpIdx, int64_t Imm) const {
    OS << Imm;
  }

  /// Implement target specific parsing of immediate mnemonics. The mnemonic is
  /// dot seperated strings.
  virtual bool parseImmMnemonic(const unsigned OpCode, const unsigned OpIdx,
                                StringRef Src, int64_t &Imm,
                                ErrorCallbackType ErrorCallback) const {
    llvm_unreachable("target did not implement parsing MIR immediate mnemonic");
  }

  /// Implement target specific printing of target custom pseudo source value.
  /// Default implementation is not necessarily the correct MIR serialization
  /// format.
  virtual void
  printCustomPseudoSourceValue(raw_ostream &OS, ModuleSlotTracker &MST,
                               const PseudoSourceValue &PSV) const {
    PSV.printCustom(OS);
  }

  /// Implement target specific parsing of target custom pseudo source value.
  virtual bool parseCustomPseudoSourceValue(
      StringRef Src, MachineFunction &MF, PerFunctionMIParsingState &PFS,
      const PseudoSourceValue *&PSV, ErrorCallbackType ErrorCallback) const {
    llvm_unreachable(
        "target did not implement parsing MIR custom pseudo source value");
  }

  /// Helper functions to print IR value as MIR serialization format which will
  /// be useful for target specific printer, e.g. for printing IR value in
  /// custom pseudo source value.
  static void printIRValue(raw_ostream &OS, const Value &V,
                           ModuleSlotTracker &MST);

  /// Helper functions to parse IR value from MIR serialization format which
  /// will be useful for target specific parser, e.g. for parsing IR value for
  /// custom pseudo source value.
  static bool parseIRValue(StringRef Src, MachineFunction &MF,
                           PerFunctionMIParsingState &PFS, const Value *&V,
                           ErrorCallbackType ErrorCallback);
};

} // end namespace llvm

#endif
PKiwFZ���T����
CodeGen/DIE.hnu�[���//===- lib/CodeGen/DIE.h - DWARF Info Entries -------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Data structures for DWARF info entries.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_DIE_H
#define LLVM_CODEGEN_DIE_H

#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/BinaryFormat/Dwarf.h"
#include "llvm/CodeGen/DwarfStringPoolEntry.h"
#include "llvm/Support/AlignOf.h"
#include "llvm/Support/Allocator.h"
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <new>
#include <type_traits>
#include <utility>
#include <vector>

namespace llvm {

class AsmPrinter;
class DIE;
class DIEUnit;
class DwarfCompileUnit;
class MCExpr;
class MCSection;
class MCSymbol;
class raw_ostream;

//===--------------------------------------------------------------------===//
/// Dwarf abbreviation data, describes one attribute of a Dwarf abbreviation.
class DIEAbbrevData {
  /// Dwarf attribute code.
  dwarf::Attribute Attribute;

  /// Dwarf form code.
  dwarf::Form Form;

  /// Dwarf attribute value for DW_FORM_implicit_const
  int64_t Value = 0;

public:
  DIEAbbrevData(dwarf::Attribute A, dwarf::Form F)
      : Attribute(A), Form(F) {}
  DIEAbbrevData(dwarf::Attribute A, int64_t V)
      : Attribute(A), Form(dwarf::DW_FORM_implicit_const), Value(V) {}

  /// Accessors.
  /// @{
  dwarf::Attribute getAttribute() const { return Attribute; }
  dwarf::Form getForm() const { return Form; }
  int64_t getValue() const { return Value; }
  /// @}

  /// Used to gather unique data for the abbreviation folding set.
  void Profile(FoldingSetNodeID &ID) const;
};

//===--------------------------------------------------------------------===//
/// Dwarf abbreviation, describes the organization of a debug information
/// object.
class DIEAbbrev : public FoldingSetNode {
  /// Unique number for node.
  unsigned Number = 0;

  /// Dwarf tag code.
  dwarf::Tag Tag;

  /// Whether or not this node has children.
  ///
  /// This cheats a bit in all of the uses since the values in the standard
  /// are 0 and 1 for no children and children respectively.
  bool Children;

  /// Raw data bytes for abbreviation.
  SmallVector<DIEAbbrevData, 12> Data;

public:
  DIEAbbrev(dwarf::Tag T, bool C) : Tag(T), Children(C) {}

  /// Accessors.
  /// @{
  dwarf::Tag getTag() const { return Tag; }
  unsigned getNumber() const { return Number; }
  bool hasChildren() const { return Children; }
  const SmallVectorImpl<DIEAbbrevData> &getData() const { return Data; }
  void setChildrenFlag(bool hasChild) { Children = hasChild; }
  void setNumber(unsigned N) { Number = N; }
  /// @}

  /// Adds another set of attribute information to the abbreviation.
  void AddAttribute(dwarf::Attribute Attribute, dwarf::Form Form) {
    Data.push_back(DIEAbbrevData(Attribute, Form));
  }

  /// Adds attribute with DW_FORM_implicit_const value
  void AddImplicitConstAttribute(dwarf::Attribute Attribute, int64_t Value) {
    Data.push_back(DIEAbbrevData(Attribute, Value));
  }

  /// Adds another set of attribute information to the abbreviation.
  void AddAttribute(const DIEAbbrevData &AbbrevData) {
    Data.push_back(AbbrevData);
  }

  /// Used to gather unique data for the abbreviation folding set.
  void Profile(FoldingSetNodeID &ID) const;

  /// Print the abbreviation using the specified asm printer.
  void Emit(const AsmPrinter *AP) const;

  void print(raw_ostream &O) const;
  void dump() const;
};

//===--------------------------------------------------------------------===//
/// Helps unique DIEAbbrev objects and assigns abbreviation numbers.
///
/// This class will unique the DIE abbreviations for a llvm::DIE object and
/// assign a unique abbreviation number to each unique DIEAbbrev object it
/// finds. The resulting collection of DIEAbbrev objects can then be emitted
/// into the .debug_abbrev section.
class DIEAbbrevSet {
  /// The bump allocator to use when creating DIEAbbrev objects in the uniqued
  /// storage container.
  BumpPtrAllocator &Alloc;
  /// FoldingSet that uniques the abbreviations.
  FoldingSet<DIEAbbrev> AbbreviationsSet;
  /// A list of all the unique abbreviations in use.
  std::vector<DIEAbbrev *> Abbreviations;

public:
  DIEAbbrevSet(BumpPtrAllocator &A) : Alloc(A) {}
  ~DIEAbbrevSet();

  /// Generate the abbreviation declaration for a DIE and return a pointer to
  /// the generated abbreviation.
  ///
  /// \param Die the debug info entry to generate the abbreviation for.
  /// \returns A reference to the uniqued abbreviation declaration that is
  /// owned by this class.
  DIEAbbrev &uniqueAbbreviation(DIE &Die);

  /// Print all abbreviations using the specified asm printer.
  void Emit(const AsmPrinter *AP, MCSection *Section) const;
};

//===--------------------------------------------------------------------===//
/// An integer value DIE.
///
class DIEInteger {
  uint64_t Integer;

public:
  explicit DIEInteger(uint64_t I) : Integer(I) {}

  /// Choose the best form for integer.
  static dwarf::Form BestForm(bool IsSigned, uint64_t Int) {
    if (IsSigned) {
      const int64_t SignedInt = Int;
      if ((char)Int == SignedInt)
        return dwarf::DW_FORM_data1;
      if ((short)Int == SignedInt)
        return dwarf::DW_FORM_data2;
      if ((int)Int == SignedInt)
        return dwarf::DW_FORM_data4;
    } else {
      if ((unsigned char)Int == Int)
        return dwarf::DW_FORM_data1;
      if ((unsigned short)Int == Int)
        return dwarf::DW_FORM_data2;
      if ((unsigned int)Int == Int)
        return dwarf::DW_FORM_data4;
    }
    return dwarf::DW_FORM_data8;
  }

  uint64_t getValue() const { return Integer; }
  void setValue(uint64_t Val) { Integer = Val; }

  void emitValue(const AsmPrinter *Asm, dwarf::Form Form) const;
  unsigned sizeOf(const dwarf::FormParams &FormParams, dwarf::Form Form) const;

  void print(raw_ostream &O) const;
};

//===--------------------------------------------------------------------===//
/// An expression DIE.
class DIEExpr {
  const MCExpr *Expr;

public:
  explicit DIEExpr(const MCExpr *E) : Expr(E) {}

  /// Get MCExpr.
  const MCExpr *getValue() const { return Expr; }

  void emitValue(const AsmPrinter *AP, dwarf::Form Form) const;
  unsigned sizeOf(const dwarf::FormParams &FormParams, dwarf::Form Form) const;

  void print(raw_ostream &O) const;
};

//===--------------------------------------------------------------------===//
/// A label DIE.
class DIELabel {
  const MCSymbol *Label;

public:
  explicit DIELabel(const MCSymbol *L) : Label(L) {}

  /// Get MCSymbol.
  const MCSymbol *getValue() const { return Label; }

  void emitValue(const AsmPrinter *AP, dwarf::Form Form) const;
  unsigned sizeOf(const dwarf::FormParams &FormParams, dwarf::Form Form) const;

  void print(raw_ostream &O) const;
};

//===--------------------------------------------------------------------===//
/// A BaseTypeRef DIE.
class DIEBaseTypeRef {
  const DwarfCompileUnit *CU;
  const uint64_t Index;
  static constexpr unsigned ULEB128PadSize = 4;

public:
  explicit DIEBaseTypeRef(const DwarfCompileUnit *TheCU, uint64_t Idx)
    : CU(TheCU), Index(Idx) {}

  /// EmitValue - Emit base type reference.
  void emitValue(const AsmPrinter *AP, dwarf::Form Form) const;
  /// sizeOf - Determine size of the base type reference in bytes.
  unsigned sizeOf(const dwarf::FormParams &, dwarf::Form) const;

  void print(raw_ostream &O) const;
  uint64_t getIndex() const { return Index; }
};

//===--------------------------------------------------------------------===//
/// A simple label difference DIE.
///
class DIEDelta {
  const MCSymbol *LabelHi;
  const MCSymbol *LabelLo;

public:
  DIEDelta(const MCSymbol *Hi, const MCSymbol *Lo) : LabelHi(Hi), LabelLo(Lo) {}

  void emitValue(const AsmPrinter *AP, dwarf::Form Form) const;
  unsigned sizeOf(const dwarf::FormParams &FormParams, dwarf::Form Form) const;

  void print(raw_ostream &O) const;
};

//===--------------------------------------------------------------------===//
/// A container for string pool string values.
///
/// This class is used with the DW_FORM_strp and DW_FORM_GNU_str_index forms.
class DIEString {
  DwarfStringPoolEntryRef S;

public:
  DIEString(DwarfStringPoolEntryRef S) : S(S) {}

  /// Grab the string out of the object.
  StringRef getString() const { return S.getString(); }

  void emitValue(const AsmPrinter *AP, dwarf::Form Form) const;
  unsigned sizeOf(const dwarf::FormParams &FormParams, dwarf::Form Form) const;

  void print(raw_ostream &O) const;
};

//===--------------------------------------------------------------------===//
/// A container for inline string values.
///
/// This class is used with the DW_FORM_string form.
class DIEInlineString {
  StringRef S;

public:
  template <typename Allocator>
  explicit DIEInlineString(StringRef Str, Allocator &A) : S(Str.copy(A)) {}

  ~DIEInlineString() = default;

  /// Grab the string out of the object.
  StringRef getString() const { return S; }

  void emitValue(const AsmPrinter *AP, dwarf::Form Form) const;
  unsigned sizeOf(const dwarf::FormParams &, dwarf::Form) const;

  void print(raw_ostream &O) const;
};

//===--------------------------------------------------------------------===//
/// A pointer to another debug information entry.  An instance of this class can
/// also be used as a proxy for a debug information entry not yet defined
/// (ie. types.)
class DIEEntry {
  DIE *Entry;

public:
  DIEEntry() = delete;
  explicit DIEEntry(DIE &E) : Entry(&E) {}

  DIE &getEntry() const { return *Entry; }

  void emitValue(const AsmPrinter *AP, dwarf::Form Form) const;
  unsigned sizeOf(const dwarf::FormParams &FormParams, dwarf::Form Form) const;

  void print(raw_ostream &O) const;
};

//===--------------------------------------------------------------------===//
/// Represents a pointer to a location list in the debug_loc
/// section.
class DIELocList {
  /// Index into the .debug_loc vector.
  size_t Index;

public:
  DIELocList(size_t I) : Index(I) {}

  /// Grab the current index out.
  size_t getValue() const { return Index; }

  void emitValue(const AsmPrinter *AP, dwarf::Form Form) const;
  unsigned sizeOf(const dwarf::FormParams &FormParams, dwarf::Form Form) const;

  void print(raw_ostream &O) const;
};

//===--------------------------------------------------------------------===//
/// A BaseTypeRef DIE.
class DIEAddrOffset {
  DIEInteger Addr;
  DIEDelta Offset;

public:
  explicit DIEAddrOffset(uint64_t Idx, const MCSymbol *Hi, const MCSymbol *Lo)
      : Addr(Idx), Offset(Hi, Lo) {}

  void emitValue(const AsmPrinter *AP, dwarf::Form Form) const;
  unsigned sizeOf(const dwarf::FormParams &FormParams, dwarf::Form Form) const;

  void print(raw_ostream &O) const;
};

//===--------------------------------------------------------------------===//
/// A debug information entry value. Some of these roughly correlate
/// to DWARF attribute classes.
class DIEBlock;
class DIELoc;
class DIEValue {
public:
  enum Type {
    isNone,
#define HANDLE_DIEVALUE(T) is##T,
#include "llvm/CodeGen/DIEValue.def"
  };

private:
  /// Type of data stored in the value.
  Type Ty = isNone;
  dwarf::Attribute Attribute = (dwarf::Attribute)0;
  dwarf::Form Form = (dwarf::Form)0;

  /// Storage for the value.
  ///
  /// All values that aren't standard layout (or are larger than 8 bytes)
  /// should be stored by reference instead of by value.
  using ValTy =
      AlignedCharArrayUnion<DIEInteger, DIEString, DIEExpr, DIELabel,
                            DIEDelta *, DIEEntry, DIEBlock *, DIELoc *,
                            DIELocList, DIEBaseTypeRef *, DIEAddrOffset *>;

  static_assert(sizeof(ValTy) <= sizeof(uint64_t) ||
                    sizeof(ValTy) <= sizeof(void *),
                "Expected all large types to be stored via pointer");

  /// Underlying stored value.
  ValTy Val;

  template <class T> void construct(T V) {
    static_assert(std::is_standard_layout<T>::value ||
                      std::is_pointer<T>::value,
                  "Expected standard layout or pointer");
    new (reinterpret_cast<void *>(&Val)) T(V);
  }

  template <class T> T *get() { return reinterpret_cast<T *>(&Val); }
  template <class T> const T *get() const {
    return reinterpret_cast<const T *>(&Val);
  }
  template <class T> void destruct() { get<T>()->~T(); }

  /// Destroy the underlying value.
  ///
  /// This should get optimized down to a no-op.  We could skip it if we could
  /// add a static assert on \a std::is_trivially_copyable(), but we currently
  /// support versions of GCC that don't understand that.
  void destroyVal() {
    switch (Ty) {
    case isNone:
      return;
#define HANDLE_DIEVALUE_SMALL(T)                                               \
  case is##T:                                                                  \
    destruct<DIE##T>();                                                        \
    return;
#define HANDLE_DIEVALUE_LARGE(T)                                               \
  case is##T:                                                                  \
    destruct<const DIE##T *>();                                                \
    return;
#include "llvm/CodeGen/DIEValue.def"
    }
  }

  /// Copy the underlying value.
  ///
  /// This should get optimized down to a simple copy.  We need to actually
  /// construct the value, rather than calling memcpy, to satisfy strict
  /// aliasing rules.
  void copyVal(const DIEValue &X) {
    switch (Ty) {
    case isNone:
      return;
#define HANDLE_DIEVALUE_SMALL(T)                                               \
  case is##T:                                                                  \
    construct<DIE##T>(*X.get<DIE##T>());                                       \
    return;
#define HANDLE_DIEVALUE_LARGE(T)                                               \
  case is##T:                                                                  \
    construct<const DIE##T *>(*X.get<const DIE##T *>());                       \
    return;
#include "llvm/CodeGen/DIEValue.def"
    }
  }

public:
  DIEValue() = default;

  DIEValue(const DIEValue &X) : Ty(X.Ty), Attribute(X.Attribute), Form(X.Form) {
    copyVal(X);
  }

  DIEValue &operator=(const DIEValue &X) {
    if (this == &X)
      return *this;
    destroyVal();
    Ty = X.Ty;
    Attribute = X.Attribute;
    Form = X.Form;
    copyVal(X);
    return *this;
  }

  ~DIEValue() { destroyVal(); }

#define HANDLE_DIEVALUE_SMALL(T)                                               \
  DIEValue(dwarf::Attribute Attribute, dwarf::Form Form, const DIE##T &V)      \
      : Ty(is##T), Attribute(Attribute), Form(Form) {                          \
    construct<DIE##T>(V);                                                      \
  }
#define HANDLE_DIEVALUE_LARGE(T)                                               \
  DIEValue(dwarf::Attribute Attribute, dwarf::Form Form, const DIE##T *V)      \
      : Ty(is##T), Attribute(Attribute), Form(Form) {                          \
    assert(V && "Expected valid value");                                       \
    construct<const DIE##T *>(V);                                              \
  }
#include "llvm/CodeGen/DIEValue.def"

  /// Accessors.
  /// @{
  Type getType() const { return Ty; }
  dwarf::Attribute getAttribute() const { return Attribute; }
  dwarf::Form getForm() const { return Form; }
  explicit operator bool() const { return Ty; }
  /// @}

#define HANDLE_DIEVALUE_SMALL(T)                                               \
  const DIE##T &getDIE##T() const {                                            \
    assert(getType() == is##T && "Expected " #T);                              \
    return *get<DIE##T>();                                                     \
  }
#define HANDLE_DIEVALUE_LARGE(T)                                               \
  const DIE##T &getDIE##T() const {                                            \
    assert(getType() == is##T && "Expected " #T);                              \
    return **get<const DIE##T *>();                                            \
  }
#include "llvm/CodeGen/DIEValue.def"

  /// Emit value via the Dwarf writer.
  void emitValue(const AsmPrinter *AP) const;

  /// Return the size of a value in bytes.
  unsigned sizeOf(const dwarf::FormParams &FormParams) const;

  void print(raw_ostream &O) const;
  void dump() const;
};

struct IntrusiveBackListNode {
  PointerIntPair<IntrusiveBackListNode *, 1> Next;

  IntrusiveBackListNode() : Next(this, true) {}

  IntrusiveBackListNode *getNext() const {
    return Next.getInt() ? nullptr : Next.getPointer();
  }
};

struct IntrusiveBackListBase {
  using Node = IntrusiveBackListNode;

  Node *Last = nullptr;

  bool empty() const { return !Last; }

  void push_back(Node &N) {
    assert(N.Next.getPointer() == &N && "Expected unlinked node");
    assert(N.Next.getInt() == true && "Expected unlinked node");

    if (Last) {
      N.Next = Last->Next;
      Last->Next.setPointerAndInt(&N, false);
    }
    Last = &N;
  }

  void push_front(Node &N) {
    assert(N.Next.getPointer() == &N && "Expected unlinked node");
    assert(N.Next.getInt() == true && "Expected unlinked node");

    if (Last) {
      N.Next.setPointerAndInt(Last->Next.getPointer(), false);
      Last->Next.setPointerAndInt(&N, true);
    } else {
      Last = &N;
    }
  }
};

template <class T> class IntrusiveBackList : IntrusiveBackListBase {
public:
  using IntrusiveBackListBase::empty;

  void push_back(T &N) { IntrusiveBackListBase::push_back(N); }
  void push_front(T &N) { IntrusiveBackListBase::push_front(N); }

  T &back() { return *static_cast<T *>(Last); }
  const T &back() const { return *static_cast<T *>(Last); }
  T &front() {
    return *static_cast<T *>(Last ? Last->Next.getPointer() : nullptr);
  }
  const T &front() const {
    return *static_cast<T *>(Last ? Last->Next.getPointer() : nullptr);
  }

  void takeNodes(IntrusiveBackList<T> &Other) {
    if (Other.empty())
      return;

    T *FirstNode = static_cast<T *>(Other.Last->Next.getPointer());
    T *IterNode = FirstNode;
    do {
      // Keep a pointer to the node and increment the iterator.
      T *TmpNode = IterNode;
      IterNode = static_cast<T *>(IterNode->Next.getPointer());

      // Unlink the node and push it back to this list.
      TmpNode->Next.setPointerAndInt(TmpNode, true);
      push_back(*TmpNode);
    } while (IterNode != FirstNode);

    Other.Last = nullptr;
  }

  bool deleteNode(T &N) {
    if (Last == &N) {
      Last = Last->Next.getPointer();
      Last->Next.setInt(true);
      return true;
    }

    Node *cur = Last;
    while (cur && cur->Next.getPointer()) {
      if (cur->Next.getPointer() == &N) {
        cur->Next.setPointer(cur->Next.getPointer()->Next.getPointer());
        return true;
      }
      cur = cur->Next.getPointer();
    }

    return false;
  }

  class const_iterator;
  class iterator
      : public iterator_facade_base<iterator, std::forward_iterator_tag, T> {
    friend class const_iterator;

    Node *N = nullptr;

  public:
    iterator() = default;
    explicit iterator(T *N) : N(N) {}

    iterator &operator++() {
      N = N->getNext();
      return *this;
    }

    explicit operator bool() const { return N; }
    T &operator*() const { return *static_cast<T *>(N); }

    bool operator==(const iterator &X) const { return N == X.N; }
  };

  class const_iterator
      : public iterator_facade_base<const_iterator, std::forward_iterator_tag,
                                    const T> {
    const Node *N = nullptr;

  public:
    const_iterator() = default;
    // Placate MSVC by explicitly scoping 'iterator'.
    const_iterator(typename IntrusiveBackList<T>::iterator X) : N(X.N) {}
    explicit const_iterator(const T *N) : N(N) {}

    const_iterator &operator++() {
      N = N->getNext();
      return *this;
    }

    explicit operator bool() const { return N; }
    const T &operator*() const { return *static_cast<const T *>(N); }

    bool operator==(const const_iterator &X) const { return N == X.N; }
  };

  iterator begin() {
    return Last ? iterator(static_cast<T *>(Last->Next.getPointer())) : end();
  }
  const_iterator begin() const {
    return const_cast<IntrusiveBackList *>(this)->begin();
  }
  iterator end() { return iterator(); }
  const_iterator end() const { return const_iterator(); }

  static iterator toIterator(T &N) { return iterator(&N); }
  static const_iterator toIterator(const T &N) { return const_iterator(&N); }
};

/// A list of DIE values.
///
/// This is a singly-linked list, but instead of reversing the order of
/// insertion, we keep a pointer to the back of the list so we can push in
/// order.
///
/// There are two main reasons to choose a linked list over a customized
/// vector-like data structure.
///
///  1. For teardown efficiency, we want DIEs to be BumpPtrAllocated.  Using a
///     linked list here makes this way easier to accomplish.
///  2. Carrying an extra pointer per \a DIEValue isn't expensive.  45% of DIEs
///     have 2 or fewer values, and 90% have 5 or fewer.  A vector would be
///     over-allocated by 50% on average anyway, the same cost as the
///     linked-list node.
class DIEValueList {
  struct Node : IntrusiveBackListNode {
    DIEValue V;

    explicit Node(DIEValue V) : V(V) {}
  };

  using ListTy = IntrusiveBackList<Node>;

  ListTy List;

public:
  class const_value_iterator;
  class value_iterator
      : public iterator_adaptor_base<value_iterator, ListTy::iterator,
                                     std::forward_iterator_tag, DIEValue> {
    friend class const_value_iterator;

    using iterator_adaptor =
        iterator_adaptor_base<value_iterator, ListTy::iterator,
                              std::forward_iterator_tag, DIEValue>;

  public:
    value_iterator() = default;
    explicit value_iterator(ListTy::iterator X) : iterator_adaptor(X) {}

    explicit operator bool() const { return bool(wrapped()); }
    DIEValue &operator*() const { return wrapped()->V; }
  };

  class const_value_iterator : public iterator_adaptor_base<
                                   const_value_iterator, ListTy::const_iterator,
                                   std::forward_iterator_tag, const DIEValue> {
    using iterator_adaptor =
        iterator_adaptor_base<const_value_iterator, ListTy::const_iterator,
                              std::forward_iterator_tag, const DIEValue>;

  public:
    const_value_iterator() = default;
    const_value_iterator(DIEValueList::value_iterator X)
        : iterator_adaptor(X.wrapped()) {}
    explicit const_value_iterator(ListTy::const_iterator X)
        : iterator_adaptor(X) {}

    explicit operator bool() const { return bool(wrapped()); }
    const DIEValue &operator*() const { return wrapped()->V; }
  };

  using value_range = iterator_range<value_iterator>;
  using const_value_range = iterator_range<const_value_iterator>;

  value_iterator addValue(BumpPtrAllocator &Alloc, const DIEValue &V) {
    List.push_back(*new (Alloc) Node(V));
    return value_iterator(ListTy::toIterator(List.back()));
  }
  template <class T>
  value_iterator addValue(BumpPtrAllocator &Alloc, dwarf::Attribute Attribute,
                          dwarf::Form Form, T &&Value) {
    return addValue(Alloc, DIEValue(Attribute, Form, std::forward<T>(Value)));
  }

  /* zr33: add method here */
  template <class T>
  bool replaceValue(BumpPtrAllocator &Alloc, dwarf::Attribute Attribute,
                    dwarf::Attribute NewAttribute, dwarf::Form Form,
                    T &&NewValue) {
    for (llvm::DIEValue &val : values()) {
      if (val.getAttribute() == Attribute) {
        val = *new (Alloc)
                  DIEValue(NewAttribute, Form, std::forward<T>(NewValue));
        return true;
      }
    }

    return false;
  }

  template <class T>
  bool replaceValue(BumpPtrAllocator &Alloc, dwarf::Attribute Attribute,
                    dwarf::Form Form, T &&NewValue) {
    for (llvm::DIEValue &val : values()) {
      if (val.getAttribute() == Attribute) {
        val = *new (Alloc) DIEValue(Attribute, Form, std::forward<T>(NewValue));
        return true;
      }
    }

    return false;
  }

  bool replaceValue(BumpPtrAllocator &Alloc, dwarf::Attribute Attribute,
                    dwarf::Form Form, DIEValue &NewValue) {
    for (llvm::DIEValue &val : values()) {
      if (val.getAttribute() == Attribute) {
        val = NewValue;
        return true;
      }
    }

    return false;
  }

  bool deleteValue(dwarf::Attribute Attribute) {

    for (auto &node : List) {
      if (node.V.getAttribute() == Attribute) {
        return List.deleteNode(node);
      }
    }

    return false;
  }
  /* end */

  /// Take ownership of the nodes in \p Other, and append them to the back of
  /// the list.
  void takeValues(DIEValueList &Other) { List.takeNodes(Other.List); }

  value_range values() {
    return make_range(value_iterator(List.begin()), value_iterator(List.end()));
  }
  const_value_range values() const {
    return make_range(const_value_iterator(List.begin()),
                      const_value_iterator(List.end()));
  }
};

//===--------------------------------------------------------------------===//
/// A structured debug information entry.  Has an abbreviation which
/// describes its organization.
class DIE : IntrusiveBackListNode, public DIEValueList {
  friend class IntrusiveBackList<DIE>;
  friend class DIEUnit;

  /// Dwarf unit relative offset.
  unsigned Offset = 0;
  /// Size of instance + children.
  unsigned Size = 0;
  unsigned AbbrevNumber = ~0u;
  /// Dwarf tag code.
  dwarf::Tag Tag = (dwarf::Tag)0;
  /// Set to true to force a DIE to emit an abbreviation that says it has
  /// children even when it doesn't. This is used for unit testing purposes.
  bool ForceChildren = false;
  /// Children DIEs.
  IntrusiveBackList<DIE> Children;

  /// The owner is either the parent DIE for children of other DIEs, or a
  /// DIEUnit which contains this DIE as its unit DIE.
  PointerUnion<DIE *, DIEUnit *> Owner;

  explicit DIE(dwarf::Tag Tag) : Tag(Tag) {}

public:
  DIE() = delete;
  DIE(const DIE &RHS) = delete;
  DIE(DIE &&RHS) = delete;
  DIE &operator=(const DIE &RHS) = delete;
  DIE &operator=(const DIE &&RHS) = delete;

  static DIE *get(BumpPtrAllocator &Alloc, dwarf::Tag Tag) {
    return new (Alloc) DIE(Tag);
  }

  // Accessors.
  unsigned getAbbrevNumber() const { return AbbrevNumber; }
  dwarf::Tag getTag() const { return Tag; }
  /// Get the compile/type unit relative offset of this DIE.
  unsigned getOffset() const {
    // A real Offset can't be zero because the unit headers are at offset zero.
    assert(Offset && "Offset being queried before it's been computed.");
    return Offset;
  }
  unsigned getSize() const {
    // A real Size can't be zero because it includes the non-empty abbrev code.
    assert(Size && "Size being queried before it's been ocmputed.");
    return Size;
  }
  bool hasChildren() const { return ForceChildren || !Children.empty(); }
  void setForceChildren(bool B) { ForceChildren = B; }

  using child_iterator = IntrusiveBackList<DIE>::iterator;
  using const_child_iterator = IntrusiveBackList<DIE>::const_iterator;
  using child_range = iterator_range<child_iterator>;
  using const_child_range = iterator_range<const_child_iterator>;

  child_range children() {
    return make_range(Children.begin(), Children.end());
  }
  const_child_range children() const {
    return make_range(Children.begin(), Children.end());
  }

  DIE *getParent() const;

  /// Generate the abbreviation for this DIE.
  ///
  /// Calculate the abbreviation for this, which should be uniqued and
  /// eventually used to call \a setAbbrevNumber().
  DIEAbbrev generateAbbrev() const;

  /// Set the abbreviation number for this DIE.
  void setAbbrevNumber(unsigned I) { AbbrevNumber = I; }

  /// Get the absolute offset within the .debug_info or .debug_types section
  /// for this DIE.
  uint64_t getDebugSectionOffset() const;

  /// Compute the offset of this DIE and all its children.
  ///
  /// This function gets called just before we are going to generate the debug
  /// information and gives each DIE a chance to figure out its CU relative DIE
  /// offset, unique its abbreviation and fill in the abbreviation code, and
  /// return the unit offset that points to where the next DIE will be emitted
  /// within the debug unit section. After this function has been called for all
  /// DIE objects, the DWARF can be generated since all DIEs will be able to
  /// properly refer to other DIE objects since all DIEs have calculated their
  /// offsets.
  ///
  /// \param FormParams Used when calculating sizes.
  /// \param AbbrevSet the abbreviation used to unique DIE abbreviations.
  /// \param CUOffset the compile/type unit relative offset in bytes.
  /// \returns the offset for the DIE that follows this DIE within the
  /// current compile/type unit.
  unsigned computeOffsetsAndAbbrevs(const dwarf::FormParams &FormParams,
                                    DIEAbbrevSet &AbbrevSet, unsigned CUOffset);

  /// Climb up the parent chain to get the compile unit or type unit DIE that
  /// this DIE belongs to.
  ///
  /// \returns the compile or type unit DIE that owns this DIE, or NULL if
  /// this DIE hasn't been added to a unit DIE.
  const DIE *getUnitDie() const;

  /// Climb up the parent chain to get the compile unit or type unit that this
  /// DIE belongs to.
  ///
  /// \returns the DIEUnit that represents the compile or type unit that owns
  /// this DIE, or NULL if this DIE hasn't been added to a unit DIE.
  DIEUnit *getUnit() const;

  void setOffset(unsigned O) { Offset = O; }
  void setSize(unsigned S) { Size = S; }

  /// Add a child to the DIE.
  DIE &addChild(DIE *Child) {
    assert(!Child->getParent() && "Child should be orphaned");
    Child->Owner = this;
    Children.push_back(*Child);
    return Children.back();
  }

  DIE &addChildFront(DIE *Child) {
    assert(!Child->getParent() && "Child should be orphaned");
    Child->Owner = this;
    Children.push_front(*Child);
    return Children.front();
  }

  /// Find a value in the DIE with the attribute given.
  ///
  /// Returns a default-constructed DIEValue (where \a DIEValue::getType()
  /// gives \a DIEValue::isNone) if no such attribute exists.
  DIEValue findAttribute(dwarf::Attribute Attribute) const;

  void print(raw_ostream &O, unsigned IndentCount = 0) const;
  void dump() const;
};

//===--------------------------------------------------------------------===//
/// Represents a compile or type unit.
class DIEUnit {
  /// The compile unit or type unit DIE. This variable must be an instance of
  /// DIE so that we can calculate the DIEUnit from any DIE by traversing the
  /// parent backchain and getting the Unit DIE, and then casting itself to a
  /// DIEUnit. This allows us to be able to find the DIEUnit for any DIE without
  /// having to store a pointer to the DIEUnit in each DIE instance.
  DIE Die;
  /// The section this unit will be emitted in. This may or may not be set to
  /// a valid section depending on the client that is emitting DWARF.
  MCSection *Section = nullptr;
  uint64_t Offset = 0; /// .debug_info or .debug_types absolute section offset.
protected:
  virtual ~DIEUnit() = default;

public:
  explicit DIEUnit(dwarf::Tag UnitTag);
  DIEUnit(const DIEUnit &RHS) = delete;
  DIEUnit(DIEUnit &&RHS) = delete;
  void operator=(const DIEUnit &RHS) = delete;
  void operator=(const DIEUnit &&RHS) = delete;
  /// Set the section that this DIEUnit will be emitted into.
  ///
  /// This function is used by some clients to set the section. Not all clients
  /// that emit DWARF use this section variable.
  void setSection(MCSection *Section) {
    assert(!this->Section);
    this->Section = Section;
  }

  virtual const MCSymbol *getCrossSectionRelativeBaseAddress() const {
    return nullptr;
  }

  /// Return the section that this DIEUnit will be emitted into.
  ///
  /// \returns Section pointer which can be NULL.
  MCSection *getSection() const { return Section; }
  void setDebugSectionOffset(uint64_t O) { Offset = O; }
  uint64_t getDebugSectionOffset() const { return Offset; }
  DIE &getUnitDie() { return Die; }
  const DIE &getUnitDie() const { return Die; }
};

struct BasicDIEUnit final : DIEUnit {
  explicit BasicDIEUnit(dwarf::Tag UnitTag) : DIEUnit(UnitTag) {}
};

//===--------------------------------------------------------------------===//
/// DIELoc - Represents an expression location.
//
class DIELoc : public DIEValueList {
  mutable unsigned Size = 0; // Size in bytes excluding size header.

public:
  DIELoc() = default;

  /// Calculate the size of the location expression.
  unsigned computeSize(const dwarf::FormParams &FormParams) const;

  // TODO: move setSize() and Size to DIEValueList.
  void setSize(unsigned size) { Size = size; }

  /// BestForm - Choose the best form for data.
  ///
  dwarf::Form BestForm(unsigned DwarfVersion) const {
    if (DwarfVersion > 3)
      return dwarf::DW_FORM_exprloc;
    // Pre-DWARF4 location expressions were blocks and not exprloc.
    if ((unsigned char)Size == Size)
      return dwarf::DW_FORM_block1;
    if ((unsigned short)Size == Size)
      return dwarf::DW_FORM_block2;
    if ((unsigned int)Size == Size)
      return dwarf::DW_FORM_block4;
    return dwarf::DW_FORM_block;
  }

  void emitValue(const AsmPrinter *Asm, dwarf::Form Form) const;
  unsigned sizeOf(const dwarf::FormParams &, dwarf::Form Form) const;

  void print(raw_ostream &O) const;
};

//===--------------------------------------------------------------------===//
/// DIEBlock - Represents a block of values.
//
class DIEBlock : public DIEValueList {
  mutable unsigned Size = 0; // Size in bytes excluding size header.

public:
  DIEBlock() = default;

  /// Calculate the size of the location expression.
  unsigned computeSize(const dwarf::FormParams &FormParams) const;

  // TODO: move setSize() and Size to DIEValueList.
  void setSize(unsigned size) { Size = size; }

  /// BestForm - Choose the best form for data.
  ///
  dwarf::Form BestForm() const {
    if ((unsigned char)Size == Size)
      return dwarf::DW_FORM_block1;
    if ((unsigned short)Size == Size)
      return dwarf::DW_FORM_block2;
    if ((unsigned int)Size == Size)
      return dwarf::DW_FORM_block4;
    return dwarf::DW_FORM_block;
  }

  void emitValue(const AsmPrinter *Asm, dwarf::Form Form) const;
  unsigned sizeOf(const dwarf::FormParams &, dwarf::Form Form) const;

  void print(raw_ostream &O) const;
};

} // end namespace llvm

#endif // LLVM_CODEGEN_DIE_H
PKiwFZ�W	��%CodeGen/SelectionDAGAddressAnalysis.hnu�[���//===- SelectionDAGAddressAnalysis.h - DAG Address Analysis -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_SELECTIONDAGADDRESSANALYSIS_H
#define LLVM_CODEGEN_SELECTIONDAGADDRESSANALYSIS_H

#include "llvm/CodeGen/SelectionDAGNodes.h"
#include <cstdint>

namespace llvm {

class SelectionDAG;

/// Helper struct to parse and store a memory address as base + index + offset.
/// We ignore sign extensions when it is safe to do so.
/// The following two expressions are not equivalent. To differentiate we need
/// to store whether there was a sign extension involved in the index
/// computation.
///  (load (i64 add (i64 copyfromreg %c)
///                 (i64 signextend (add (i8 load %index)
///                                      (i8 1))))
/// vs
///
/// (load (i64 add (i64 copyfromreg %c)
///                (i64 signextend (i32 add (i32 signextend (i8 load %index))
///                                         (i32 1)))))
class BaseIndexOffset {
private:
  SDValue Base;
  SDValue Index;
  std::optional<int64_t> Offset;
  bool IsIndexSignExt = false;

public:
  BaseIndexOffset() = default;
  BaseIndexOffset(SDValue Base, SDValue Index, bool IsIndexSignExt)
      : Base(Base), Index(Index), IsIndexSignExt(IsIndexSignExt) {}
  BaseIndexOffset(SDValue Base, SDValue Index, int64_t Offset,
                  bool IsIndexSignExt)
      : Base(Base), Index(Index), Offset(Offset),
        IsIndexSignExt(IsIndexSignExt) {}

  SDValue getBase() { return Base; }
  SDValue getBase() const { return Base; }
  SDValue getIndex() { return Index; }
  SDValue getIndex() const { return Index; }
  void addToOffset(int64_t VectorOff) {
    Offset = Offset.value_or(0) + VectorOff;
  }
  bool hasValidOffset() const { return Offset.has_value(); }
  int64_t getOffset() const { return *Offset; }

  // Returns true if `Other` and `*this` are both some offset from the same base
  // pointer. In that case, `Off` is set to the offset between `*this` and
  // `Other` (negative if `Other` is before `*this`).
  bool equalBaseIndex(const BaseIndexOffset &Other, const SelectionDAG &DAG,
                      int64_t &Off) const;

  bool equalBaseIndex(const BaseIndexOffset &Other,
                      const SelectionDAG &DAG) const {
    int64_t Off;
    return equalBaseIndex(Other, DAG, Off);
  }

  // Returns true if `Other` (with size `OtherSize`) can be proven to be fully
  // contained in `*this` (with size `Size`).
  bool contains(const SelectionDAG &DAG, int64_t BitSize,
                const BaseIndexOffset &Other, int64_t OtherBitSize,
                int64_t &BitOffset) const;

  bool contains(const SelectionDAG &DAG, int64_t BitSize,
                const BaseIndexOffset &Other, int64_t OtherBitSize) const {
    int64_t BitOffset;
    return contains(DAG, BitSize, Other, OtherBitSize, BitOffset);
  }

  // Returns true `Op0` and `Op1` can be proven to alias/not alias, in
  // which case `IsAlias` is set to true/false.
  static bool computeAliasing(const SDNode *Op0,
                              const std::optional<int64_t> NumBytes0,
                              const SDNode *Op1,
                              const std::optional<int64_t> NumBytes1,
                              const SelectionDAG &DAG, bool &IsAlias);

  /// Parses tree in N for base, index, offset addresses.
  static BaseIndexOffset match(const SDNode *N, const SelectionDAG &DAG);

  void print(raw_ostream& OS) const;
  void dump() const;
};

} // end namespace llvm

#endif // LLVM_CODEGEN_SELECTIONDAGADDRESSANALYSIS_H
PKiwFZOBu|�N�NCodeGen/ValueTypes.hnu�[���//===- CodeGen/ValueTypes.h - Low-Level Target independ. types --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the set of low-level target independent types which various
// values in the code generator are.  This allows the target specific behavior
// of instructions to be described to target independent passes.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_VALUETYPES_H
#define LLVM_CODEGEN_VALUETYPES_H

#include "llvm/CodeGen/MachineValueType.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/TypeSize.h"
#include <cassert>
#include <cstdint>
#include <string>

namespace llvm {

  class LLVMContext;
  class Type;

  /// Extended Value Type. Capable of holding value types which are not native
  /// for any processor (such as the i12345 type), as well as the types an MVT
  /// can represent.
  struct EVT {
  private:
    MVT V = MVT::INVALID_SIMPLE_VALUE_TYPE;
    Type *LLVMTy = nullptr;

  public:
    constexpr EVT() = default;
    constexpr EVT(MVT::SimpleValueType SVT) : V(SVT) {}
    constexpr EVT(MVT S) : V(S) {}

    bool operator==(EVT VT) const {
      return !(*this != VT);
    }
    bool operator!=(EVT VT) const {
      if (V.SimpleTy != VT.V.SimpleTy)
        return true;
      if (V.SimpleTy == MVT::INVALID_SIMPLE_VALUE_TYPE)
        return LLVMTy != VT.LLVMTy;
      return false;
    }

    /// Returns the EVT that represents a floating-point type with the given
    /// number of bits. There are two floating-point types with 128 bits - this
    /// returns f128 rather than ppcf128.
    static EVT getFloatingPointVT(unsigned BitWidth) {
      return MVT::getFloatingPointVT(BitWidth);
    }

    /// Returns the EVT that represents an integer with the given number of
    /// bits.
    static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth) {
      MVT M = MVT::getIntegerVT(BitWidth);
      if (M.SimpleTy != MVT::INVALID_SIMPLE_VALUE_TYPE)
        return M;
      return getExtendedIntegerVT(Context, BitWidth);
    }

    /// Returns the EVT that represents a vector NumElements in length, where
    /// each element is of type VT.
    static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements,
                           bool IsScalable = false) {
      MVT M = MVT::getVectorVT(VT.V, NumElements, IsScalable);
      if (M.SimpleTy != MVT::INVALID_SIMPLE_VALUE_TYPE)
        return M;
      return getExtendedVectorVT(Context, VT, NumElements, IsScalable);
    }

    /// Returns the EVT that represents a vector EC.Min elements in length,
    /// where each element is of type VT.
    static EVT getVectorVT(LLVMContext &Context, EVT VT, ElementCount EC) {
      MVT M = MVT::getVectorVT(VT.V, EC);
      if (M.SimpleTy != MVT::INVALID_SIMPLE_VALUE_TYPE)
        return M;
      return getExtendedVectorVT(Context, VT, EC);
    }

    /// Return a vector with the same number of elements as this vector, but
    /// with the element type converted to an integer type with the same
    /// bitwidth.
    EVT changeVectorElementTypeToInteger() const {
      if (isSimple())
        return getSimpleVT().changeVectorElementTypeToInteger();
      return changeExtendedVectorElementTypeToInteger();
    }

    /// Return a VT for a vector type whose attributes match ourselves
    /// with the exception of the element type that is chosen by the caller.
    EVT changeVectorElementType(EVT EltVT) const {
      if (isSimple()) {
        assert(EltVT.isSimple() &&
               "Can't change simple vector VT to have extended element VT");
        return getSimpleVT().changeVectorElementType(EltVT.getSimpleVT());
      }
      return changeExtendedVectorElementType(EltVT);
    }

    /// Return the type converted to an equivalently sized integer or vector
    /// with integer element type. Similar to changeVectorElementTypeToInteger,
    /// but also handles scalars.
    EVT changeTypeToInteger() const {
      if (isVector())
        return changeVectorElementTypeToInteger();

      if (isSimple())
        return getSimpleVT().changeTypeToInteger();
      return changeExtendedTypeToInteger();
    }

    /// Test if the given EVT has zero size, this will fail if called on a
    /// scalable type
    bool isZeroSized() const {
      return getSizeInBits().isZero();
    }

    /// Test if the given EVT is simple (as opposed to being extended).
    bool isSimple() const {
      return V.SimpleTy != MVT::INVALID_SIMPLE_VALUE_TYPE;
    }

    /// Test if the given EVT is extended (as opposed to being simple).
    bool isExtended() const {
      return !isSimple();
    }

    /// Return true if this is a FP or a vector FP type.
    bool isFloatingPoint() const {
      return isSimple() ? V.isFloatingPoint() : isExtendedFloatingPoint();
    }

    /// Return true if this is an integer or a vector integer type.
    bool isInteger() const {
      return isSimple() ? V.isInteger() : isExtendedInteger();
    }

    /// Return true if this is an integer, but not a vector.
    bool isScalarInteger() const {
      return isSimple() ? V.isScalarInteger() : isExtendedScalarInteger();
    }

    /// Return true if this is a vector type where the runtime
    /// length is machine dependent
    bool isScalableTargetExtVT() const {
      return isSimple() && V.isScalableTargetExtVT();
    }

    /// Return true if this is a vector value type.
    bool isVector() const {
      return isSimple() ? V.isVector() : isExtendedVector();
    }

    /// Return true if this is a vector type where the runtime
    /// length is machine dependent
    bool isScalableVector() const {
      return isSimple() ? V.isScalableVector() : isExtendedScalableVector();
    }

    bool isFixedLengthVector() const {
      return isSimple() ? V.isFixedLengthVector()
                        : isExtendedFixedLengthVector();
    }

    /// Return true if the type is a scalable type.
    bool isScalableVT() const {
      return isScalableVector() || isScalableTargetExtVT();
    }

    /// Return true if this is a 16-bit vector type.
    bool is16BitVector() const {
      return isSimple() ? V.is16BitVector() : isExtended16BitVector();
    }

    /// Return true if this is a 32-bit vector type.
    bool is32BitVector() const {
      return isSimple() ? V.is32BitVector() : isExtended32BitVector();
    }

    /// Return true if this is a 64-bit vector type.
    bool is64BitVector() const {
      return isSimple() ? V.is64BitVector() : isExtended64BitVector();
    }

    /// Return true if this is a 128-bit vector type.
    bool is128BitVector() const {
      return isSimple() ? V.is128BitVector() : isExtended128BitVector();
    }

    /// Return true if this is a 256-bit vector type.
    bool is256BitVector() const {
      return isSimple() ? V.is256BitVector() : isExtended256BitVector();
    }

    /// Return true if this is a 512-bit vector type.
    bool is512BitVector() const {
      return isSimple() ? V.is512BitVector() : isExtended512BitVector();
    }

    /// Return true if this is a 1024-bit vector type.
    bool is1024BitVector() const {
      return isSimple() ? V.is1024BitVector() : isExtended1024BitVector();
    }

    /// Return true if this is a 2048-bit vector type.
    bool is2048BitVector() const {
      return isSimple() ? V.is2048BitVector() : isExtended2048BitVector();
    }

    /// Return true if this is an overloaded type for TableGen.
    bool isOverloaded() const {
      return (V==MVT::iAny || V==MVT::fAny || V==MVT::vAny || V==MVT::iPTRAny);
    }

    /// Return true if the bit size is a multiple of 8.
    bool isByteSized() const {
      return !isZeroSized() && getSizeInBits().isKnownMultipleOf(8);
    }

    /// Return true if the size is a power-of-two number of bytes.
    bool isRound() const {
      if (isScalableVector())
        return false;
      unsigned BitSize = getSizeInBits();
      return BitSize >= 8 && !(BitSize & (BitSize - 1));
    }

    /// Return true if this has the same number of bits as VT.
    bool bitsEq(EVT VT) const {
      if (EVT::operator==(VT)) return true;
      return getSizeInBits() == VT.getSizeInBits();
    }

    /// Return true if we know at compile time this has more bits than VT.
    bool knownBitsGT(EVT VT) const {
      return TypeSize::isKnownGT(getSizeInBits(), VT.getSizeInBits());
    }

    /// Return true if we know at compile time this has more than or the same
    /// bits as VT.
    bool knownBitsGE(EVT VT) const {
      return TypeSize::isKnownGE(getSizeInBits(), VT.getSizeInBits());
    }

    /// Return true if we know at compile time this has fewer bits than VT.
    bool knownBitsLT(EVT VT) const {
      return TypeSize::isKnownLT(getSizeInBits(), VT.getSizeInBits());
    }

    /// Return true if we know at compile time this has fewer than or the same
    /// bits as VT.
    bool knownBitsLE(EVT VT) const {
      return TypeSize::isKnownLE(getSizeInBits(), VT.getSizeInBits());
    }

    /// Return true if this has more bits than VT.
    bool bitsGT(EVT VT) const {
      if (EVT::operator==(VT)) return false;
      assert(isScalableVector() == VT.isScalableVector() &&
             "Comparison between scalable and fixed types");
      return knownBitsGT(VT);
    }

    /// Return true if this has no less bits than VT.
    bool bitsGE(EVT VT) const {
      if (EVT::operator==(VT)) return true;
      assert(isScalableVector() == VT.isScalableVector() &&
             "Comparison between scalable and fixed types");
      return knownBitsGE(VT);
    }

    /// Return true if this has less bits than VT.
    bool bitsLT(EVT VT) const {
      if (EVT::operator==(VT)) return false;
      assert(isScalableVector() == VT.isScalableVector() &&
             "Comparison between scalable and fixed types");
      return knownBitsLT(VT);
    }

    /// Return true if this has no more bits than VT.
    bool bitsLE(EVT VT) const {
      if (EVT::operator==(VT)) return true;
      assert(isScalableVector() == VT.isScalableVector() &&
             "Comparison between scalable and fixed types");
      return knownBitsLE(VT);
    }

    /// Return the SimpleValueType held in the specified simple EVT.
    MVT getSimpleVT() const {
      assert(isSimple() && "Expected a SimpleValueType!");
      return V;
    }

    /// If this is a vector type, return the element type, otherwise return
    /// this.
    EVT getScalarType() const {
      return isVector() ? getVectorElementType() : *this;
    }

    /// Given a vector type, return the type of each element.
    EVT getVectorElementType() const {
      assert(isVector() && "Invalid vector type!");
      if (isSimple())
        return V.getVectorElementType();
      return getExtendedVectorElementType();
    }

    /// Given a vector type, return the number of elements it contains.
    unsigned getVectorNumElements() const {
      assert(isVector() && "Invalid vector type!");

      if (isScalableVector())
        llvm::reportInvalidSizeRequest(
            "Possible incorrect use of EVT::getVectorNumElements() for "
            "scalable vector. Scalable flag may be dropped, use "
            "EVT::getVectorElementCount() instead");

      return isSimple() ? V.getVectorNumElements()
                        : getExtendedVectorNumElements();
    }

    // Given a (possibly scalable) vector type, return the ElementCount
    ElementCount getVectorElementCount() const {
      assert((isVector()) && "Invalid vector type!");
      if (isSimple())
        return V.getVectorElementCount();

      return getExtendedVectorElementCount();
    }

    /// Given a vector type, return the minimum number of elements it contains.
    unsigned getVectorMinNumElements() const {
      return getVectorElementCount().getKnownMinValue();
    }

    /// Return the size of the specified value type in bits.
    ///
    /// If the value type is a scalable vector type, the scalable property will
    /// be set and the runtime size will be a positive integer multiple of the
    /// base size.
    TypeSize getSizeInBits() const {
      if (isSimple())
        return V.getSizeInBits();
      return getExtendedSizeInBits();
    }

    /// Return the size of the specified fixed width value type in bits. The
    /// function will assert if the type is scalable.
    uint64_t getFixedSizeInBits() const {
      return getSizeInBits().getFixedValue();
    }

    uint64_t getScalarSizeInBits() const {
      return getScalarType().getSizeInBits().getFixedValue();
    }

    /// Return the number of bytes overwritten by a store of the specified value
    /// type.
    ///
    /// If the value type is a scalable vector type, the scalable property will
    /// be set and the runtime size will be a positive integer multiple of the
    /// base size.
    TypeSize getStoreSize() const {
      TypeSize BaseSize = getSizeInBits();
      return {(BaseSize.getKnownMinValue() + 7) / 8, BaseSize.isScalable()};
    }

    // Return the number of bytes overwritten by a store of this value type or
    // this value type's element type in the case of a vector.
    uint64_t getScalarStoreSize() const {
      return getScalarType().getStoreSize().getFixedValue();
    }

    /// Return the number of bits overwritten by a store of the specified value
    /// type.
    ///
    /// If the value type is a scalable vector type, the scalable property will
    /// be set and the runtime size will be a positive integer multiple of the
    /// base size.
    TypeSize getStoreSizeInBits() const {
      return getStoreSize() * 8;
    }

    /// Rounds the bit-width of the given integer EVT up to the nearest power of
    /// two (and at least to eight), and returns the integer EVT with that
    /// number of bits.
    EVT getRoundIntegerType(LLVMContext &Context) const {
      assert(isInteger() && !isVector() && "Invalid integer type!");
      unsigned BitWidth = getSizeInBits();
      if (BitWidth <= 8)
        return EVT(MVT::i8);
      return getIntegerVT(Context, llvm::bit_ceil(BitWidth));
    }

    /// Finds the smallest simple value type that is greater than or equal to
    /// half the width of this EVT. If no simple value type can be found, an
    /// extended integer value type of half the size (rounded up) is returned.
    EVT getHalfSizedIntegerVT(LLVMContext &Context) const {
      assert(isInteger() && !isVector() && "Invalid integer type!");
      unsigned EVTSize = getSizeInBits();
      for (unsigned IntVT = MVT::FIRST_INTEGER_VALUETYPE;
          IntVT <= MVT::LAST_INTEGER_VALUETYPE; ++IntVT) {
        EVT HalfVT = EVT((MVT::SimpleValueType)IntVT);
        if (HalfVT.getSizeInBits() * 2 >= EVTSize)
          return HalfVT;
      }
      return getIntegerVT(Context, (EVTSize + 1) / 2);
    }

    /// Return a VT for an integer vector type with the size of the
    /// elements doubled. The typed returned may be an extended type.
    EVT widenIntegerVectorElementType(LLVMContext &Context) const {
      EVT EltVT = getVectorElementType();
      EltVT = EVT::getIntegerVT(Context, 2 * EltVT.getSizeInBits());
      return EVT::getVectorVT(Context, EltVT, getVectorElementCount());
    }

    // Return a VT for a vector type with the same element type but
    // half the number of elements. The type returned may be an
    // extended type.
    EVT getHalfNumVectorElementsVT(LLVMContext &Context) const {
      EVT EltVT = getVectorElementType();
      auto EltCnt = getVectorElementCount();
      assert(EltCnt.isKnownEven() && "Splitting vector, but not in half!");
      return EVT::getVectorVT(Context, EltVT, EltCnt.divideCoefficientBy(2));
    }

    // Return a VT for a vector type with the same element type but
    // double the number of elements. The type returned may be an
    // extended type.
    EVT getDoubleNumVectorElementsVT(LLVMContext &Context) const {
      EVT EltVT = getVectorElementType();
      auto EltCnt = getVectorElementCount();
      return EVT::getVectorVT(Context, EltVT, EltCnt * 2);
    }

    /// Returns true if the given vector is a power of 2.
    bool isPow2VectorType() const {
      unsigned NElts = getVectorMinNumElements();
      return !(NElts & (NElts - 1));
    }

    /// Widens the length of the given vector EVT up to the nearest power of 2
    /// and returns that type.
    EVT getPow2VectorType(LLVMContext &Context) const {
      if (!isPow2VectorType()) {
        ElementCount NElts = getVectorElementCount();
        unsigned NewMinCount = 1 << Log2_32_Ceil(NElts.getKnownMinValue());
        NElts = ElementCount::get(NewMinCount, NElts.isScalable());
        return EVT::getVectorVT(Context, getVectorElementType(), NElts);
      }
      else {
        return *this;
      }
    }

    /// This function returns value type as a string, e.g. "i32".
    std::string getEVTString() const;

    /// Support for debugging, callable in GDB: VT.dump()
    void dump() const;

    /// Implement operator<<.
    void print(raw_ostream &OS) const {
      OS << getEVTString();
    }

    /// This method returns an LLVM type corresponding to the specified EVT.
    /// For integer types, this returns an unsigned type. Note that this will
    /// abort for types that cannot be represented.
    Type *getTypeForEVT(LLVMContext &Context) const;

    /// Return the value type corresponding to the specified type.
    /// This returns all pointers as iPTR.  If HandleUnknown is true, unknown
    /// types are returned as Other, otherwise they are invalid.
    static EVT getEVT(Type *Ty, bool HandleUnknown = false);

    intptr_t getRawBits() const {
      if (isSimple())
        return V.SimpleTy;
      else
        return (intptr_t)(LLVMTy);
    }

    /// A meaningless but well-behaved order, useful for constructing
    /// containers.
    struct compareRawBits {
      bool operator()(EVT L, EVT R) const {
        if (L.V.SimpleTy == R.V.SimpleTy)
          return L.LLVMTy < R.LLVMTy;
        else
          return L.V.SimpleTy < R.V.SimpleTy;
      }
    };

  private:
    // Methods for handling the Extended-type case in functions above.
    // These are all out-of-line to prevent users of this header file
    // from having a dependency on Type.h.
    EVT changeExtendedTypeToInteger() const;
    EVT changeExtendedVectorElementType(EVT EltVT) const;
    EVT changeExtendedVectorElementTypeToInteger() const;
    static EVT getExtendedIntegerVT(LLVMContext &C, unsigned BitWidth);
    static EVT getExtendedVectorVT(LLVMContext &C, EVT VT, unsigned NumElements,
                                   bool IsScalable);
    static EVT getExtendedVectorVT(LLVMContext &Context, EVT VT,
                                   ElementCount EC);
    bool isExtendedFloatingPoint() const LLVM_READONLY;
    bool isExtendedInteger() const LLVM_READONLY;
    bool isExtendedScalarInteger() const LLVM_READONLY;
    bool isExtendedVector() const LLVM_READONLY;
    bool isExtended16BitVector() const LLVM_READONLY;
    bool isExtended32BitVector() const LLVM_READONLY;
    bool isExtended64BitVector() const LLVM_READONLY;
    bool isExtended128BitVector() const LLVM_READONLY;
    bool isExtended256BitVector() const LLVM_READONLY;
    bool isExtended512BitVector() const LLVM_READONLY;
    bool isExtended1024BitVector() const LLVM_READONLY;
    bool isExtended2048BitVector() const LLVM_READONLY;
    bool isExtendedFixedLengthVector() const LLVM_READONLY;
    bool isExtendedScalableVector() const LLVM_READONLY;
    EVT getExtendedVectorElementType() const;
    unsigned getExtendedVectorNumElements() const LLVM_READONLY;
    ElementCount getExtendedVectorElementCount() const LLVM_READONLY;
    TypeSize getExtendedSizeInBits() const LLVM_READONLY;
  };

  inline raw_ostream &operator<<(raw_ostream &OS, const EVT &V) {
    V.print(OS);
    return OS;
  }
} // end namespace llvm

#endif // LLVM_CODEGEN_VALUETYPES_H
PKiwFZ	�YYCodeGen/DebugHandlerBase.hnu�[���//===-- llvm/CodeGen/DebugHandlerBase.h -----------------------*- C++ -*--===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Common functionality for different debug information format backends.
// LLVM currently supports DWARF and CodeView.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_DEBUGHANDLERBASE_H
#define LLVM_CODEGEN_DEBUGHANDLERBASE_H

#include "llvm/CodeGen/AsmPrinterHandler.h"
#include "llvm/CodeGen/DbgEntityHistoryCalculator.h"
#include "llvm/CodeGen/LexicalScopes.h"
#include "llvm/IR/DebugInfoMetadata.h"
#include "llvm/IR/DebugLoc.h"
#include <optional>

namespace llvm {

class AsmPrinter;
class MachineInstr;
class MachineModuleInfo;

/// Represents the location at which a variable is stored.
struct DbgVariableLocation {
  /// Base register.
  unsigned Register;

  /// Chain of offsetted loads necessary to load the value if it lives in
  /// memory. Every load except for the last is pointer-sized.
  SmallVector<int64_t, 1> LoadChain;

  /// Present if the location is part of a larger variable.
  std::optional<llvm::DIExpression::FragmentInfo> FragmentInfo;

  /// Extract a VariableLocation from a MachineInstr.
  /// This will only work if Instruction is a debug value instruction
  /// and the associated DIExpression is in one of the supported forms.
  /// If these requirements are not met, the returned Optional will not
  /// have a value.
  static std::optional<DbgVariableLocation>
  extractFromMachineInstruction(const MachineInstr &Instruction);
};

/// Base class for debug information backends. Common functionality related to
/// tracking which variables and scopes are alive at a given PC live here.
class DebugHandlerBase : public AsmPrinterHandler {
protected:
  DebugHandlerBase(AsmPrinter *A);

  /// Target of debug info emission.
  AsmPrinter *Asm = nullptr;

  /// Collected machine module information.
  MachineModuleInfo *MMI = nullptr;

  /// Previous instruction's location information. This is used to
  /// determine label location to indicate scope boundaries in debug info.
  /// We track the previous instruction's source location (if not line 0),
  /// whether it was a label, and its parent BB.
  DebugLoc PrevInstLoc;
  MCSymbol *PrevLabel = nullptr;
  const MachineBasicBlock *PrevInstBB = nullptr;

  /// This location indicates end of function prologue and beginning of
  /// function body.
  DebugLoc PrologEndLoc;

  /// This block includes epilogue instructions.
  const MachineBasicBlock *EpilogBeginBlock = nullptr;

  /// If nonnull, stores the current machine instruction we're processing.
  const MachineInstr *CurMI = nullptr;

  LexicalScopes LScopes;

  /// History of DBG_VALUE and clobber instructions for each user
  /// variable.  Variables are listed in order of appearance.
  DbgValueHistoryMap DbgValues;

  /// Mapping of inlined labels and DBG_LABEL machine instruction.
  DbgLabelInstrMap DbgLabels;

  /// Maps instruction with label emitted before instruction.
  /// FIXME: Make this private from DwarfDebug, we have the necessary accessors
  /// for it.
  DenseMap<const MachineInstr *, MCSymbol *> LabelsBeforeInsn;

  /// Maps instruction with label emitted after instruction.
  DenseMap<const MachineInstr *, MCSymbol *> LabelsAfterInsn;

  /// Indentify instructions that are marking the beginning of or
  /// ending of a scope.
  void identifyScopeMarkers();

  /// Ensure that a label will be emitted before MI.
  void requestLabelBeforeInsn(const MachineInstr *MI) {
    LabelsBeforeInsn.insert(std::make_pair(MI, nullptr));
  }

  /// Ensure that a label will be emitted after MI.
  void requestLabelAfterInsn(const MachineInstr *MI) {
    LabelsAfterInsn.insert(std::make_pair(MI, nullptr));
  }

  virtual void beginFunctionImpl(const MachineFunction *MF) = 0;
  virtual void endFunctionImpl(const MachineFunction *MF) = 0;
  virtual void skippedNonDebugFunction() {}

private:
  InstructionOrdering InstOrdering;

  // AsmPrinterHandler overrides.
public:
  void beginModule(Module *M) override;

  void beginInstruction(const MachineInstr *MI) override;
  void endInstruction() override;

  void beginFunction(const MachineFunction *MF) override;
  void endFunction(const MachineFunction *MF) override;

  void beginBasicBlockSection(const MachineBasicBlock &MBB) override;
  void endBasicBlockSection(const MachineBasicBlock &MBB) override;

  /// Return Label preceding the instruction.
  MCSymbol *getLabelBeforeInsn(const MachineInstr *MI);

  /// Return Label immediately following the instruction.
  MCSymbol *getLabelAfterInsn(const MachineInstr *MI);

  /// If this type is derived from a base type then return base type size.
  static uint64_t getBaseTypeSize(const DIType *Ty);

  /// Return true if type encoding is unsigned.
  static bool isUnsignedDIType(const DIType *Ty);

  const InstructionOrdering &getInstOrdering() const { return InstOrdering; }
};

} // namespace llvm

#endif
PKiwFZ�l/#/#CodeGen/TargetCallingConv.hnu�[���//===-- llvm/CodeGen/TargetCallingConv.h - Calling Convention ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines types for working with calling-convention information.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_TARGETCALLINGCONV_H
#define LLVM_CODEGEN_TARGETCALLINGCONV_H

#include "llvm/CodeGen/MachineValueType.h"
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/Support/Alignment.h"
#include "llvm/Support/MathExtras.h"
#include <cassert>
#include <climits>
#include <cstdint>

namespace llvm {
namespace ISD {

  struct ArgFlagsTy {
  private:
    unsigned IsZExt : 1;     ///< Zero extended
    unsigned IsSExt : 1;     ///< Sign extended
    unsigned IsInReg : 1;    ///< Passed in register
    unsigned IsSRet : 1;     ///< Hidden struct-ret ptr
    unsigned IsByVal : 1;    ///< Struct passed by value
    unsigned IsByRef : 1;    ///< Passed in memory
    unsigned IsNest : 1;     ///< Nested fn static chain
    unsigned IsReturned : 1; ///< Always returned
    unsigned IsSplit : 1;
    unsigned IsInAlloca : 1;   ///< Passed with inalloca
    unsigned IsPreallocated : 1; ///< ByVal without the copy
    unsigned IsSplitEnd : 1;   ///< Last part of a split
    unsigned IsSwiftSelf : 1;  ///< Swift self parameter
    unsigned IsSwiftAsync : 1;  ///< Swift async context parameter
    unsigned IsSwiftError : 1; ///< Swift error parameter
    unsigned IsCFGuardTarget : 1; ///< Control Flow Guard target
    unsigned IsHva : 1;        ///< HVA field for
    unsigned IsHvaStart : 1;   ///< HVA structure start
    unsigned IsSecArgPass : 1; ///< Second argument
    unsigned MemAlign : 4;     ///< Log 2 of alignment when arg is passed in memory
                               ///< (including byval/byref). The max alignment is
                               ///< verified in IR verification.
    unsigned OrigAlign : 5;    ///< Log 2 of original alignment
    unsigned IsInConsecutiveRegsLast : 1;
    unsigned IsInConsecutiveRegs : 1;
    unsigned IsCopyElisionCandidate : 1; ///< Argument copy elision candidate
    unsigned IsPointer : 1;

    unsigned ByValOrByRefSize = 0; ///< Byval or byref struct size

    unsigned PointerAddrSpace = 0; ///< Address space of pointer argument

  public:
    ArgFlagsTy()
        : IsZExt(0), IsSExt(0), IsInReg(0), IsSRet(0), IsByVal(0), IsByRef(0),
          IsNest(0), IsReturned(0), IsSplit(0), IsInAlloca(0),
          IsPreallocated(0), IsSplitEnd(0), IsSwiftSelf(0), IsSwiftAsync(0),
          IsSwiftError(0), IsCFGuardTarget(0), IsHva(0), IsHvaStart(0),
          IsSecArgPass(0), MemAlign(0), OrigAlign(0),
          IsInConsecutiveRegsLast(0), IsInConsecutiveRegs(0),
          IsCopyElisionCandidate(0), IsPointer(0) {
      static_assert(sizeof(*this) == 3 * sizeof(unsigned), "flags are too big");
    }

    bool isZExt() const { return IsZExt; }
    void setZExt() { IsZExt = 1; }

    bool isSExt() const { return IsSExt; }
    void setSExt() { IsSExt = 1; }

    bool isInReg() const { return IsInReg; }
    void setInReg() { IsInReg = 1; }

    bool isSRet() const { return IsSRet; }
    void setSRet() { IsSRet = 1; }

    bool isByVal() const { return IsByVal; }
    void setByVal() { IsByVal = 1; }

    bool isByRef() const { return IsByRef; }
    void setByRef() { IsByRef = 1; }

    bool isInAlloca() const { return IsInAlloca; }
    void setInAlloca() { IsInAlloca = 1; }

    bool isPreallocated() const { return IsPreallocated; }
    void setPreallocated() { IsPreallocated = 1; }

    bool isSwiftSelf() const { return IsSwiftSelf; }
    void setSwiftSelf() { IsSwiftSelf = 1; }

    bool isSwiftAsync() const { return IsSwiftAsync; }
    void setSwiftAsync() { IsSwiftAsync = 1; }

    bool isSwiftError() const { return IsSwiftError; }
    void setSwiftError() { IsSwiftError = 1; }

    bool isCFGuardTarget() const { return IsCFGuardTarget; }
    void setCFGuardTarget() { IsCFGuardTarget = 1; }

    bool isHva() const { return IsHva; }
    void setHva() { IsHva = 1; }

    bool isHvaStart() const { return IsHvaStart; }
    void setHvaStart() { IsHvaStart = 1; }

    bool isSecArgPass() const { return IsSecArgPass; }
    void setSecArgPass() { IsSecArgPass = 1; }

    bool isNest() const { return IsNest; }
    void setNest() { IsNest = 1; }

    bool isReturned() const { return IsReturned; }
    void setReturned(bool V = true) { IsReturned = V; }

    bool isInConsecutiveRegs()  const { return IsInConsecutiveRegs; }
    void setInConsecutiveRegs(bool Flag = true) { IsInConsecutiveRegs = Flag; }

    bool isInConsecutiveRegsLast() const { return IsInConsecutiveRegsLast; }
    void setInConsecutiveRegsLast(bool Flag = true) {
      IsInConsecutiveRegsLast = Flag;
    }

    bool isSplit()   const { return IsSplit; }
    void setSplit()  { IsSplit = 1; }

    bool isSplitEnd()   const { return IsSplitEnd; }
    void setSplitEnd()  { IsSplitEnd = 1; }

    bool isCopyElisionCandidate()  const { return IsCopyElisionCandidate; }
    void setCopyElisionCandidate() { IsCopyElisionCandidate = 1; }

    bool isPointer()  const { return IsPointer; }
    void setPointer() { IsPointer = 1; }

    Align getNonZeroMemAlign() const {
      return decodeMaybeAlign(MemAlign).valueOrOne();
    }

    void setMemAlign(Align A) {
      MemAlign = encode(A);
      assert(getNonZeroMemAlign() == A && "bitfield overflow");
    }

    Align getNonZeroByValAlign() const {
      assert(isByVal());
      MaybeAlign A = decodeMaybeAlign(MemAlign);
      assert(A && "ByValAlign must be defined");
      return *A;
    }

    Align getNonZeroOrigAlign() const {
      return decodeMaybeAlign(OrigAlign).valueOrOne();
    }

    void setOrigAlign(Align A) {
      OrigAlign = encode(A);
      assert(getNonZeroOrigAlign() == A && "bitfield overflow");
    }

    unsigned getByValSize() const {
      assert(isByVal() && !isByRef());
      return ByValOrByRefSize;
    }
    void setByValSize(unsigned S) {
      assert(isByVal() && !isByRef());
      ByValOrByRefSize = S;
    }

    unsigned getByRefSize() const {
      assert(!isByVal() && isByRef());
      return ByValOrByRefSize;
    }
    void setByRefSize(unsigned S) {
      assert(!isByVal() && isByRef());
      ByValOrByRefSize = S;
    }

    unsigned getPointerAddrSpace() const { return PointerAddrSpace; }
    void setPointerAddrSpace(unsigned AS) { PointerAddrSpace = AS; }
};

  /// InputArg - This struct carries flags and type information about a
  /// single incoming (formal) argument or incoming (from the perspective
  /// of the caller) return value virtual register.
  ///
  struct InputArg {
    ArgFlagsTy Flags;
    MVT VT = MVT::Other;
    EVT ArgVT;
    bool Used = false;

    /// Index original Function's argument.
    unsigned OrigArgIndex;
    /// Sentinel value for implicit machine-level input arguments.
    static const unsigned NoArgIndex = UINT_MAX;

    /// Offset in bytes of current input value relative to the beginning of
    /// original argument. E.g. if argument was splitted into four 32 bit
    /// registers, we got 4 InputArgs with PartOffsets 0, 4, 8 and 12.
    unsigned PartOffset;

    InputArg() = default;
    InputArg(ArgFlagsTy flags, EVT vt, EVT argvt, bool used,
             unsigned origIdx, unsigned partOffs)
      : Flags(flags), Used(used), OrigArgIndex(origIdx), PartOffset(partOffs) {
      VT = vt.getSimpleVT();
      ArgVT = argvt;
    }

    bool isOrigArg() const {
      return OrigArgIndex != NoArgIndex;
    }

    unsigned getOrigArgIndex() const {
      assert(OrigArgIndex != NoArgIndex && "Implicit machine-level argument");
      return OrigArgIndex;
    }
  };

  /// OutputArg - This struct carries flags and a value for a
  /// single outgoing (actual) argument or outgoing (from the perspective
  /// of the caller) return value virtual register.
  ///
  struct OutputArg {
    ArgFlagsTy Flags;
    MVT VT;
    EVT ArgVT;

    /// IsFixed - Is this a "fixed" value, ie not passed through a vararg "...".
    bool IsFixed = false;

    /// Index original Function's argument.
    unsigned OrigArgIndex;

    /// Offset in bytes of current output value relative to the beginning of
    /// original argument. E.g. if argument was splitted into four 32 bit
    /// registers, we got 4 OutputArgs with PartOffsets 0, 4, 8 and 12.
    unsigned PartOffset;

    OutputArg() = default;
    OutputArg(ArgFlagsTy flags, MVT vt, EVT argvt, bool isfixed,
              unsigned origIdx, unsigned partOffs)
        : Flags(flags), IsFixed(isfixed), OrigArgIndex(origIdx),
          PartOffset(partOffs) {
      VT = vt;
      ArgVT = argvt;
    }
  };

} // end namespace ISD
} // end namespace llvm

#endif // LLVM_CODEGEN_TARGETCALLINGCONV_H
PKiwFZo���z�zCodeGen/RegisterBankInfo.hnu�[���//===- llvm/CodeGen/RegisterBankInfo.h --------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file This file declares the API for the register bank info.
/// This API is responsible for handling the register banks.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_REGISTERBANKINFO_H
#define LLVM_CODEGEN_REGISTERBANKINFO_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/CodeGen/LowLevelType.h"
#include "llvm/CodeGen/Register.h"
#include "llvm/CodeGen/RegisterBank.h"
#include "llvm/Support/ErrorHandling.h"
#include <cassert>
#include <initializer_list>
#include <memory>

namespace llvm {

class MachineInstr;
class MachineRegisterInfo;
class raw_ostream;
class TargetInstrInfo;
class TargetRegisterClass;
class TargetRegisterInfo;

/// Holds all the information related to register banks.
class RegisterBankInfo {
public:
  /// Helper struct that represents how a value is partially mapped
  /// into a register.
  /// The StartIdx and Length represent what region of the orginal
  /// value this partial mapping covers.
  /// This can be represented as a Mask of contiguous bit starting
  /// at StartIdx bit and spanning Length bits.
  /// StartIdx is the number of bits from the less significant bits.
  struct PartialMapping {
    /// Number of bits at which this partial mapping starts in the
    /// original value.  The bits are counted from less significant
    /// bits to most significant bits.
    unsigned StartIdx;

    /// Length of this mapping in bits. This is how many bits this
    /// partial mapping covers in the original value:
    /// from StartIdx to StartIdx + Length -1.
    unsigned Length;

    /// Register bank where the partial value lives.
    const RegisterBank *RegBank;

    PartialMapping() = default;

    /// Provide a shortcut for quickly building PartialMapping.
    PartialMapping(unsigned StartIdx, unsigned Length,
                   const RegisterBank &RegBank)
        : StartIdx(StartIdx), Length(Length), RegBank(&RegBank) {}

    /// \return the index of in the original value of the most
    /// significant bit that this partial mapping covers.
    unsigned getHighBitIdx() const { return StartIdx + Length - 1; }

    /// Print this partial mapping on dbgs() stream.
    void dump() const;

    /// Print this partial mapping on \p OS;
    void print(raw_ostream &OS) const;

    /// Check that the Mask is compatible with the RegBank.
    /// Indeed, if the RegBank cannot accomadate the "active bits" of the mask,
    /// there is no way this mapping is valid.
    ///
    /// \note This method does not check anything when assertions are disabled.
    ///
    /// \return True is the check was successful.
    bool verify(const RegisterBankInfo &RBI) const;
  };

  /// Helper struct that represents how a value is mapped through
  /// different register banks.
  ///
  /// \note: So far we do not have any users of the complex mappings
  /// (mappings with more than one partial mapping), but when we do,
  /// we would have needed to duplicate partial mappings.
  /// The alternative could be to use an array of pointers of partial
  /// mapping (i.e., PartialMapping **BreakDown) and duplicate the
  /// pointers instead.
  ///
  /// E.g.,
  /// Let say we have a 32-bit add and a <2 x 32-bit> vadd. We
  /// can expand the
  /// <2 x 32-bit> add into 2 x 32-bit add.
  ///
  /// Currently the TableGen-like file would look like:
  /// \code
  /// PartialMapping[] = {
  /// /*32-bit add*/      {0, 32, GPR}, // Scalar entry repeated for first
  ///                                   // vec elt.
  /// /*2x32-bit add*/    {0, 32, GPR}, {32, 32, GPR},
  /// /*<2x32-bit> vadd*/ {0, 64, VPR}
  /// }; // PartialMapping duplicated.
  ///
  /// ValueMapping[] {
  ///   /*plain 32-bit add*/       {&PartialMapping[0], 1},
  ///   /*expanded vadd on 2xadd*/ {&PartialMapping[1], 2},
  ///   /*plain <2x32-bit> vadd*/  {&PartialMapping[3], 1}
  /// };
  /// \endcode
  ///
  /// With the array of pointer, we would have:
  /// \code
  /// PartialMapping[] = {
  /// /*32-bit add lower */ { 0, 32, GPR},
  /// /*32-bit add upper */ {32, 32, GPR},
  /// /*<2x32-bit> vadd */  { 0, 64, VPR}
  /// }; // No more duplication.
  ///
  /// BreakDowns[] = {
  /// /*AddBreakDown*/   &PartialMapping[0],
  /// /*2xAddBreakDown*/ &PartialMapping[0], &PartialMapping[1],
  /// /*VAddBreakDown*/  &PartialMapping[2]
  /// }; // Addresses of PartialMapping duplicated (smaller).
  ///
  /// ValueMapping[] {
  ///   /*plain 32-bit add*/       {&BreakDowns[0], 1},
  ///   /*expanded vadd on 2xadd*/ {&BreakDowns[1], 2},
  ///   /*plain <2x32-bit> vadd*/  {&BreakDowns[3], 1}
  /// };
  /// \endcode
  ///
  /// Given that a PartialMapping is actually small, the code size
  /// impact is actually a degradation. Moreover the compile time will
  /// be hit by the additional indirection.
  /// If PartialMapping gets bigger we may reconsider.
  struct ValueMapping {
    /// How the value is broken down between the different register banks.
    const PartialMapping *BreakDown;

    /// Number of partial mapping to break down this value.
    unsigned NumBreakDowns;

    /// The default constructor creates an invalid (isValid() == false)
    /// instance.
    ValueMapping() : ValueMapping(nullptr, 0) {}

    /// Initialize a ValueMapping with the given parameter.
    /// \p BreakDown needs to have a life time at least as long
    /// as this instance.
    ValueMapping(const PartialMapping *BreakDown, unsigned NumBreakDowns)
        : BreakDown(BreakDown), NumBreakDowns(NumBreakDowns) {}

    /// Iterators through the PartialMappings.
    const PartialMapping *begin() const { return BreakDown; }
    const PartialMapping *end() const { return BreakDown + NumBreakDowns; }

    /// \return true if all partial mappings are the same size and register
    /// bank.
    bool partsAllUniform() const;

    /// Check if this ValueMapping is valid.
    bool isValid() const { return BreakDown && NumBreakDowns; }

    /// Verify that this mapping makes sense for a value of
    /// \p MeaningfulBitWidth.
    /// \note This method does not check anything when assertions are disabled.
    ///
    /// \return True is the check was successful.
    bool verify(const RegisterBankInfo &RBI, unsigned MeaningfulBitWidth) const;

    /// Print this on dbgs() stream.
    void dump() const;

    /// Print this on \p OS;
    void print(raw_ostream &OS) const;
  };

  /// Helper class that represents how the value of an instruction may be
  /// mapped and what is the related cost of such mapping.
  class InstructionMapping {
    /// Identifier of the mapping.
    /// This is used to communicate between the target and the optimizers
    /// which mapping should be realized.
    unsigned ID = InvalidMappingID;

    /// Cost of this mapping.
    unsigned Cost = 0;

    /// Mapping of all the operands.
    const ValueMapping *OperandsMapping = nullptr;

    /// Number of operands.
    unsigned NumOperands = 0;

    const ValueMapping &getOperandMapping(unsigned i) {
      assert(i < getNumOperands() && "Out of bound operand");
      return OperandsMapping[i];
    }

  public:
    /// Constructor for the mapping of an instruction.
    /// \p NumOperands must be equal to number of all the operands of
    /// the related instruction.
    /// The rationale is that it is more efficient for the optimizers
    /// to be able to assume that the mapping of the ith operand is
    /// at the index i.
    InstructionMapping(unsigned ID, unsigned Cost,
                       const ValueMapping *OperandsMapping,
                       unsigned NumOperands)
        : ID(ID), Cost(Cost), OperandsMapping(OperandsMapping),
          NumOperands(NumOperands) {}

    /// Default constructor.
    /// Use this constructor to express that the mapping is invalid.
    InstructionMapping() = default;

    /// Get the cost.
    unsigned getCost() const { return Cost; }

    /// Get the ID.
    unsigned getID() const { return ID; }

    /// Get the number of operands.
    unsigned getNumOperands() const { return NumOperands; }

    /// Get the value mapping of the ith operand.
    /// \pre The mapping for the ith operand has been set.
    /// \pre The ith operand is a register.
    const ValueMapping &getOperandMapping(unsigned i) const {
      const ValueMapping &ValMapping =
          const_cast<InstructionMapping *>(this)->getOperandMapping(i);
      return ValMapping;
    }

    /// Set the mapping for all the operands.
    /// In other words, OpdsMapping should hold at least getNumOperands
    /// ValueMapping.
    void setOperandsMapping(const ValueMapping *OpdsMapping) {
      OperandsMapping = OpdsMapping;
    }

    /// Check whether this object is valid.
    /// This is a lightweight check for obvious wrong instance.
    bool isValid() const {
      return getID() != InvalidMappingID && OperandsMapping;
    }

    /// Verifiy that this mapping makes sense for \p MI.
    /// \pre \p MI must be connected to a MachineFunction.
    ///
    /// \note This method does not check anything when assertions are disabled.
    ///
    /// \return True is the check was successful.
    bool verify(const MachineInstr &MI) const;

    /// Print this on dbgs() stream.
    void dump() const;

    /// Print this on \p OS;
    void print(raw_ostream &OS) const;
  };

  /// Convenient type to represent the alternatives for mapping an
  /// instruction.
  /// \todo When we move to TableGen this should be an array ref.
  using InstructionMappings = SmallVector<const InstructionMapping *, 4>;

  /// Helper class used to get/create the virtual registers that will be used
  /// to replace the MachineOperand when applying a mapping.
  class OperandsMapper {
    /// The OpIdx-th cell contains the index in NewVRegs where the VRegs of the
    /// OpIdx-th operand starts. -1 means we do not have such mapping yet.
    /// Note: We use a SmallVector to avoid heap allocation for most cases.
    SmallVector<int, 8> OpToNewVRegIdx;

    /// Hold the registers that will be used to map MI with InstrMapping.
    SmallVector<Register, 8> NewVRegs;

    /// Current MachineRegisterInfo, used to create new virtual registers.
    MachineRegisterInfo &MRI;

    /// Instruction being remapped.
    MachineInstr &MI;

    /// New mapping of the instruction.
    const InstructionMapping &InstrMapping;

    /// Constant value identifying that the index in OpToNewVRegIdx
    /// for an operand has not been set yet.
    static const int DontKnowIdx;

    /// Get the range in NewVRegs to store all the partial
    /// values for the \p OpIdx-th operand.
    ///
    /// \return The iterator range for the space created.
    //
    /// \pre getMI().getOperand(OpIdx).isReg()
    iterator_range<SmallVectorImpl<Register>::iterator>
    getVRegsMem(unsigned OpIdx);

    /// Get the end iterator for a range starting at \p StartIdx and
    /// spannig \p NumVal in NewVRegs.
    /// \pre StartIdx + NumVal <= NewVRegs.size()
    SmallVectorImpl<Register>::const_iterator
    getNewVRegsEnd(unsigned StartIdx, unsigned NumVal) const;
    SmallVectorImpl<Register>::iterator getNewVRegsEnd(unsigned StartIdx,
                                                       unsigned NumVal);

  public:
    /// Create an OperandsMapper that will hold the information to apply \p
    /// InstrMapping to \p MI.
    /// \pre InstrMapping.verify(MI)
    OperandsMapper(MachineInstr &MI, const InstructionMapping &InstrMapping,
                   MachineRegisterInfo &MRI);

    /// \name Getters.
    /// @{
    /// The MachineInstr being remapped.
    MachineInstr &getMI() const { return MI; }

    /// The final mapping of the instruction.
    const InstructionMapping &getInstrMapping() const { return InstrMapping; }

    /// The MachineRegisterInfo we used to realize the mapping.
    MachineRegisterInfo &getMRI() const { return MRI; }
    /// @}

    /// Create as many new virtual registers as needed for the mapping of the \p
    /// OpIdx-th operand.
    /// The number of registers is determined by the number of breakdown for the
    /// related operand in the instruction mapping.
    /// The type of the new registers is a plain scalar of the right size.
    /// The proper type is expected to be set when the mapping is applied to
    /// the instruction(s) that realizes the mapping.
    ///
    /// \pre getMI().getOperand(OpIdx).isReg()
    ///
    /// \post All the partial mapping of the \p OpIdx-th operand have been
    /// assigned a new virtual register.
    void createVRegs(unsigned OpIdx);

    /// Set the virtual register of the \p PartialMapIdx-th partial mapping of
    /// the OpIdx-th operand to \p NewVReg.
    ///
    /// \pre getMI().getOperand(OpIdx).isReg()
    /// \pre getInstrMapping().getOperandMapping(OpIdx).BreakDown.size() >
    /// PartialMapIdx
    /// \pre NewReg != 0
    ///
    /// \post the \p PartialMapIdx-th register of the value mapping of the \p
    /// OpIdx-th operand has been set.
    void setVRegs(unsigned OpIdx, unsigned PartialMapIdx, Register NewVReg);

    /// Get all the virtual registers required to map the \p OpIdx-th operand of
    /// the instruction.
    ///
    /// This return an empty range when createVRegs or setVRegs has not been
    /// called.
    /// The iterator may be invalidated by a call to setVRegs or createVRegs.
    ///
    /// When \p ForDebug is true, we will not check that the list of new virtual
    /// registers does not contain uninitialized values.
    ///
    /// \pre getMI().getOperand(OpIdx).isReg()
    /// \pre ForDebug || All partial mappings have been set a register
    iterator_range<SmallVectorImpl<Register>::const_iterator>
    getVRegs(unsigned OpIdx, bool ForDebug = false) const;

    /// Print this operands mapper on dbgs() stream.
    void dump() const;

    /// Print this operands mapper on \p OS stream.
    void print(raw_ostream &OS, bool ForDebug = false) const;
  };

protected:
  /// Hold the set of supported register banks.
  const RegisterBank **RegBanks;

  /// Total number of register banks.
  unsigned NumRegBanks;

  /// Hold the sizes of the register banks for all HwModes.
  const unsigned *Sizes;

  /// Current HwMode for the target.
  unsigned HwMode;

  /// Keep dynamically allocated PartialMapping in a separate map.
  /// This shouldn't be needed when everything gets TableGen'ed.
  mutable DenseMap<unsigned, std::unique_ptr<const PartialMapping>>
      MapOfPartialMappings;

  /// Keep dynamically allocated ValueMapping in a separate map.
  /// This shouldn't be needed when everything gets TableGen'ed.
  mutable DenseMap<unsigned, std::unique_ptr<const ValueMapping>>
      MapOfValueMappings;

  /// Keep dynamically allocated array of ValueMapping in a separate map.
  /// This shouldn't be needed when everything gets TableGen'ed.
  mutable DenseMap<unsigned, std::unique_ptr<ValueMapping[]>>
      MapOfOperandsMappings;

  /// Keep dynamically allocated InstructionMapping in a separate map.
  /// This shouldn't be needed when everything gets TableGen'ed.
  mutable DenseMap<unsigned, std::unique_ptr<const InstructionMapping>>
      MapOfInstructionMappings;

  /// Getting the minimal register class of a physreg is expensive.
  /// Cache this information as we get it.
  mutable DenseMap<unsigned, const TargetRegisterClass *> PhysRegMinimalRCs;

  /// Create a RegisterBankInfo that can accommodate up to \p NumRegBanks
  /// RegisterBank instances.
  RegisterBankInfo(const RegisterBank **RegBanks, unsigned NumRegBanks,
                   const unsigned *Sizes, unsigned HwMode);

  /// This constructor is meaningless.
  /// It just provides a default constructor that can be used at link time
  /// when GlobalISel is not built.
  /// That way, targets can still inherit from this class without doing
  /// crazy gymnastic to avoid link time failures.
  /// \note That works because the constructor is inlined.
  RegisterBankInfo() {
    llvm_unreachable("This constructor should not be executed");
  }

  /// Get the register bank identified by \p ID.
  const RegisterBank &getRegBank(unsigned ID) {
    assert(ID < getNumRegBanks() && "Accessing an unknown register bank");
    return *RegBanks[ID];
  }

  /// Get the MinimalPhysRegClass for Reg.
  /// \pre Reg is a physical register.
  const TargetRegisterClass *
  getMinimalPhysRegClass(Register Reg, const TargetRegisterInfo &TRI) const;

  /// Try to get the mapping of \p MI.
  /// See getInstrMapping for more details on what a mapping represents.
  ///
  /// Unlike getInstrMapping the returned InstructionMapping may be invalid
  /// (isValid() == false).
  /// This means that the target independent code is not smart enough
  /// to get the mapping of \p MI and thus, the target has to provide the
  /// information for \p MI.
  ///
  /// This implementation is able to get the mapping of:
  /// - Target specific instructions by looking at the encoding constraints.
  /// - Any instruction if all the register operands have already been assigned
  ///   a register, a register class, or a register bank.
  /// - Copies and phis if at least one of the operands has been assigned a
  ///   register, a register class, or a register bank.
  /// In other words, this method will likely fail to find a mapping for
  /// any generic opcode that has not been lowered by target specific code.
  const InstructionMapping &getInstrMappingImpl(const MachineInstr &MI) const;

  /// Get the uniquely generated PartialMapping for the
  /// given arguments.
  const PartialMapping &getPartialMapping(unsigned StartIdx, unsigned Length,
                                          const RegisterBank &RegBank) const;

  /// \name Methods to get a uniquely generated ValueMapping.
  /// @{

  /// The most common ValueMapping consists of a single PartialMapping.
  /// Feature a method for that.
  const ValueMapping &getValueMapping(unsigned StartIdx, unsigned Length,
                                      const RegisterBank &RegBank) const;

  /// Get the ValueMapping for the given arguments.
  const ValueMapping &getValueMapping(const PartialMapping *BreakDown,
                                      unsigned NumBreakDowns) const;
  /// @}

  /// \name Methods to get a uniquely generated array of ValueMapping.
  /// @{

  /// Get the uniquely generated array of ValueMapping for the
  /// elements of between \p Begin and \p End.
  ///
  /// Elements that are nullptr will be replaced by
  /// invalid ValueMapping (ValueMapping::isValid == false).
  ///
  /// \pre The pointers on ValueMapping between \p Begin and \p End
  /// must uniquely identify a ValueMapping. Otherwise, there is no
  /// guarantee that the return instance will be unique, i.e., another
  /// OperandsMapping could have the same content.
  template <typename Iterator>
  const ValueMapping *getOperandsMapping(Iterator Begin, Iterator End) const;

  /// Get the uniquely generated array of ValueMapping for the
  /// elements of \p OpdsMapping.
  ///
  /// Elements of \p OpdsMapping that are nullptr will be replaced by
  /// invalid ValueMapping (ValueMapping::isValid == false).
  const ValueMapping *getOperandsMapping(
      const SmallVectorImpl<const ValueMapping *> &OpdsMapping) const;

  /// Get the uniquely generated array of ValueMapping for the
  /// given arguments.
  ///
  /// Arguments that are nullptr will be replaced by invalid
  /// ValueMapping (ValueMapping::isValid == false).
  const ValueMapping *getOperandsMapping(
      std::initializer_list<const ValueMapping *> OpdsMapping) const;
  /// @}

  /// \name Methods to get a uniquely generated InstructionMapping.
  /// @{

private:
  /// Method to get a uniquely generated InstructionMapping.
  const InstructionMapping &
  getInstructionMappingImpl(bool IsInvalid, unsigned ID = InvalidMappingID,
                            unsigned Cost = 0,
                            const ValueMapping *OperandsMapping = nullptr,
                            unsigned NumOperands = 0) const;

public:
  /// Method to get a uniquely generated InstructionMapping.
  const InstructionMapping &
  getInstructionMapping(unsigned ID, unsigned Cost,
                        const ValueMapping *OperandsMapping,
                        unsigned NumOperands) const {
    return getInstructionMappingImpl(/*IsInvalid*/ false, ID, Cost,
                                     OperandsMapping, NumOperands);
  }

  /// Method to get a uniquely generated invalid InstructionMapping.
  const InstructionMapping &getInvalidInstructionMapping() const {
    return getInstructionMappingImpl(/*IsInvalid*/ true);
  }
  /// @}

  /// Get the register bank for the \p OpIdx-th operand of \p MI form
  /// the encoding constraints, if any.
  ///
  /// \return A register bank that covers the register class of the
  /// related encoding constraints or nullptr if \p MI did not provide
  /// enough information to deduce it.
  const RegisterBank *
  getRegBankFromConstraints(const MachineInstr &MI, unsigned OpIdx,
                            const TargetInstrInfo &TII,
                            const MachineRegisterInfo &MRI) const;

  /// Helper method to apply something that is like the default mapping.
  /// Basically, that means that \p OpdMapper.getMI() is left untouched
  /// aside from the reassignment of the register operand that have been
  /// remapped.
  ///
  /// The type of all the new registers that have been created by the
  /// mapper are properly remapped to the type of the original registers
  /// they replace. In other words, the semantic of the instruction does
  /// not change, only the register banks.
  ///
  /// If the mapping of one of the operand spans several registers, this
  /// method will abort as this is not like a default mapping anymore.
  ///
  /// \pre For OpIdx in {0..\p OpdMapper.getMI().getNumOperands())
  ///        the range OpdMapper.getVRegs(OpIdx) is empty or of size 1.
  static void applyDefaultMapping(const OperandsMapper &OpdMapper);

  /// See ::applyMapping.
  virtual void applyMappingImpl(const OperandsMapper &OpdMapper) const {
    llvm_unreachable("The target has to implement that part");
  }

public:
  virtual ~RegisterBankInfo() = default;

  /// Get the register bank identified by \p ID.
  const RegisterBank &getRegBank(unsigned ID) const {
    return const_cast<RegisterBankInfo *>(this)->getRegBank(ID);
  }

  /// Get the maximum size in bits that fits in the given register bank.
  unsigned getMaximumSize(unsigned RegBankID) const {
    return Sizes[RegBankID + HwMode * NumRegBanks];
  }

  /// Get the register bank of \p Reg.
  /// If Reg has not been assigned a register, a register class,
  /// or a register bank, then this returns nullptr.
  ///
  /// \pre Reg != 0 (NoRegister)
  const RegisterBank *getRegBank(Register Reg, const MachineRegisterInfo &MRI,
                                 const TargetRegisterInfo &TRI) const;

  /// Get the total number of register banks.
  unsigned getNumRegBanks() const { return NumRegBanks; }

  /// Returns true if the register bank is considered divergent.
  virtual bool isDivergentRegBank(const RegisterBank *RB) const {
    return false;
  }

  /// Get a register bank that covers \p RC.
  ///
  /// \pre \p RC is a user-defined register class (as opposed as one
  /// generated by TableGen).
  ///
  /// \note The mapping RC -> RegBank could be built while adding the
  /// coverage for the register banks. However, we do not do it, because,
  /// at least for now, we only need this information for register classes
  /// that are used in the description of instruction. In other words,
  /// there are just a handful of them and we do not want to waste space.
  ///
  /// \todo This should be TableGen'ed.
  virtual const RegisterBank &
  getRegBankFromRegClass(const TargetRegisterClass &RC, LLT Ty) const {
    llvm_unreachable("The target must override this method");
  }

  /// Get the cost of a copy from \p B to \p A, or put differently,
  /// get the cost of A = COPY B. Since register banks may cover
  /// different size, \p Size specifies what will be the size in bits
  /// that will be copied around.
  ///
  /// \note Since this is a copy, both registers have the same size.
  virtual unsigned copyCost(const RegisterBank &A, const RegisterBank &B,
                            unsigned Size) const {
    // Optimistically assume that copies are coalesced. I.e., when
    // they are on the same bank, they are free.
    // Otherwise assume a non-zero cost of 1. The targets are supposed
    // to override that properly anyway if they care.
    return &A != &B;
  }

  /// \returns true if emitting a copy from \p Src to \p Dst is impossible.
  bool cannotCopy(const RegisterBank &Dst, const RegisterBank &Src,
                  unsigned Size) const {
    return copyCost(Dst, Src, Size) == std::numeric_limits<unsigned>::max();
  }

  /// Get the cost of using \p ValMapping to decompose a register. This is
  /// similar to ::copyCost, except for cases where multiple copy-like
  /// operations need to be inserted. If the register is used as a source
  /// operand and already has a bank assigned, \p CurBank is non-null.
  virtual unsigned
  getBreakDownCost(const ValueMapping &ValMapping,
                   const RegisterBank *CurBank = nullptr) const {
    return std::numeric_limits<unsigned>::max();
  }

  /// Constrain the (possibly generic) virtual register \p Reg to \p RC.
  ///
  /// \pre \p Reg is a virtual register that either has a bank or a class.
  /// \returns The constrained register class, or nullptr if there is none.
  /// \note This is a generic variant of MachineRegisterInfo::constrainRegClass
  /// \note Use MachineRegisterInfo::constrainRegAttrs instead for any non-isel
  /// purpose, including non-select passes of GlobalISel
  static const TargetRegisterClass *
  constrainGenericRegister(Register Reg, const TargetRegisterClass &RC,
                           MachineRegisterInfo &MRI);

  /// Identifier used when the related instruction mapping instance
  /// is generated by target independent code.
  /// Make sure not to use that identifier to avoid possible collision.
  static const unsigned DefaultMappingID;

  /// Identifier used when the related instruction mapping instance
  /// is generated by the default constructor.
  /// Make sure not to use that identifier.
  static const unsigned InvalidMappingID;

  /// Get the mapping of the different operands of \p MI
  /// on the register bank.
  /// This mapping should be the direct translation of \p MI.
  /// In other words, when \p MI is mapped with the returned mapping,
  /// only the register banks of the operands of \p MI need to be updated.
  /// In particular, neither the opcode nor the type of \p MI needs to be
  /// updated for this direct mapping.
  ///
  /// The target independent implementation gives a mapping based on
  /// the register classes for the target specific opcode.
  /// It uses the ID RegisterBankInfo::DefaultMappingID for that mapping.
  /// Make sure you do not use that ID for the alternative mapping
  /// for MI. See getInstrAlternativeMappings for the alternative
  /// mappings.
  ///
  /// For instance, if \p MI is a vector add, the mapping should
  /// not be a scalarization of the add.
  ///
  /// \post returnedVal.verify(MI).
  ///
  /// \note If returnedVal does not verify MI, this would probably mean
  /// that the target does not support that instruction.
  virtual const InstructionMapping &
  getInstrMapping(const MachineInstr &MI) const;

  /// Get the alternative mappings for \p MI.
  /// Alternative in the sense different from getInstrMapping.
  virtual InstructionMappings
  getInstrAlternativeMappings(const MachineInstr &MI) const;

  /// Get the possible mapping for \p MI.
  /// A mapping defines where the different operands may live and at what cost.
  /// For instance, let us consider:
  /// v0(16) = G_ADD <2 x i8> v1, v2
  /// The possible mapping could be:
  ///
  /// {/*ID*/VectorAdd, /*Cost*/1, /*v0*/{(0xFFFF, VPR)}, /*v1*/{(0xFFFF, VPR)},
  ///                              /*v2*/{(0xFFFF, VPR)}}
  /// {/*ID*/ScalarAddx2, /*Cost*/2, /*v0*/{(0x00FF, GPR),(0xFF00, GPR)},
  ///                                /*v1*/{(0x00FF, GPR),(0xFF00, GPR)},
  ///                                /*v2*/{(0x00FF, GPR),(0xFF00, GPR)}}
  ///
  /// \note The first alternative of the returned mapping should be the
  /// direct translation of \p MI current form.
  ///
  /// \post !returnedVal.empty().
  InstructionMappings getInstrPossibleMappings(const MachineInstr &MI) const;

  /// Apply \p OpdMapper.getInstrMapping() to \p OpdMapper.getMI().
  /// After this call \p OpdMapper.getMI() may not be valid anymore.
  /// \p OpdMapper.getInstrMapping().getID() carries the information of
  /// what has been chosen to map \p OpdMapper.getMI(). This ID is set
  /// by the various getInstrXXXMapping method.
  ///
  /// Therefore, getting the mapping and applying it should be kept in
  /// sync.
  void applyMapping(const OperandsMapper &OpdMapper) const {
    // The only mapping we know how to handle is the default mapping.
    if (OpdMapper.getInstrMapping().getID() == DefaultMappingID)
      return applyDefaultMapping(OpdMapper);
    // For other mapping, the target needs to do the right thing.
    // If that means calling applyDefaultMapping, fine, but this
    // must be explicitly stated.
    applyMappingImpl(OpdMapper);
  }

  /// Get the size in bits of \p Reg.
  /// Utility method to get the size of any registers. Unlike
  /// MachineRegisterInfo::getSize, the register does not need to be a
  /// virtual register.
  ///
  /// \pre \p Reg != 0 (NoRegister).
  unsigned getSizeInBits(Register Reg, const MachineRegisterInfo &MRI,
                         const TargetRegisterInfo &TRI) const;

  /// Check that information hold by this instance make sense for the
  /// given \p TRI.
  ///
  /// \note This method does not check anything when assertions are disabled.
  ///
  /// \return True is the check was successful.
  bool verify(const TargetRegisterInfo &TRI) const;
};

inline raw_ostream &
operator<<(raw_ostream &OS,
           const RegisterBankInfo::PartialMapping &PartMapping) {
  PartMapping.print(OS);
  return OS;
}

inline raw_ostream &
operator<<(raw_ostream &OS, const RegisterBankInfo::ValueMapping &ValMapping) {
  ValMapping.print(OS);
  return OS;
}

inline raw_ostream &
operator<<(raw_ostream &OS,
           const RegisterBankInfo::InstructionMapping &InstrMapping) {
  InstrMapping.print(OS);
  return OS;
}

inline raw_ostream &
operator<<(raw_ostream &OS, const RegisterBankInfo::OperandsMapper &OpdMapper) {
  OpdMapper.print(OS, /*ForDebug*/ false);
  return OS;
}

/// Hashing function for PartialMapping.
/// It is required for the hashing of ValueMapping.
hash_code hash_value(const RegisterBankInfo::PartialMapping &PartMapping);

} // end namespace llvm

#endif // LLVM_CODEGEN_REGISTERBANKINFO_H
PKiwFZO_�C��CodeGen/MachineLoopInfo.hnu�[���//===- llvm/CodeGen/MachineLoopInfo.h - Natural Loop Calculator -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the MachineLoopInfo class that is used to identify natural
// loops and determine the loop depth of various nodes of the CFG.  Note that
// natural loops may actually be several loops that share the same header node.
//
// This analysis calculates the nesting structure of loops in a function.  For
// each natural loop identified, this analysis identifies natural loops
// contained entirely within the loop and the basic blocks the make up the loop.
//
// It can calculate on the fly various bits of information, for example:
//
//  * whether there is a preheader for the loop
//  * the number of back edges to the header
//  * whether or not a particular block branches out of the loop
//  * the successor blocks of the loop
//  * the loop depth
//  * the trip count
//  * etc...
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_MACHINELOOPINFO_H
#define LLVM_CODEGEN_MACHINELOOPINFO_H

#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/Support/GenericLoopInfo.h"

namespace llvm {

class MachineDominatorTree;
// Implementation in LoopInfoImpl.h
class MachineLoop;
extern template class LoopBase<MachineBasicBlock, MachineLoop>;

class MachineLoop : public LoopBase<MachineBasicBlock, MachineLoop> {
public:
  /// Return the "top" block in the loop, which is the first block in the linear
  /// layout, ignoring any parts of the loop not contiguous with the part that
  /// contains the header.
  MachineBasicBlock *getTopBlock();

  /// Return the "bottom" block in the loop, which is the last block in the
  /// linear layout, ignoring any parts of the loop not contiguous with the part
  /// that contains the header.
  MachineBasicBlock *getBottomBlock();

  /// Find the block that contains the loop control variable and the
  /// loop test. This will return the latch block if it's one of the exiting
  /// blocks. Otherwise, return the exiting block. Return 'null' when
  /// multiple exiting blocks are present.
  MachineBasicBlock *findLoopControlBlock();

  /// Return the debug location of the start of this loop.
  /// This looks for a BB terminating instruction with a known debug
  /// location by looking at the preheader and header blocks. If it
  /// cannot find a terminating instruction with location information,
  /// it returns an unknown location.
  DebugLoc getStartLoc() const;

  /// Returns true if the instruction is loop invariant.
  /// I.e., all virtual register operands are defined outside of the loop,
  /// physical registers aren't accessed explicitly, and there are no side
  /// effects that aren't captured by the operands or other flags.
  bool isLoopInvariant(MachineInstr &I) const;

  void dump() const;

private:
  friend class LoopInfoBase<MachineBasicBlock, MachineLoop>;

  explicit MachineLoop(MachineBasicBlock *MBB)
    : LoopBase<MachineBasicBlock, MachineLoop>(MBB) {}

  MachineLoop() = default;
};

// Implementation in LoopInfoImpl.h
extern template class LoopInfoBase<MachineBasicBlock, MachineLoop>;

class MachineLoopInfo : public MachineFunctionPass {
  friend class LoopBase<MachineBasicBlock, MachineLoop>;

  LoopInfoBase<MachineBasicBlock, MachineLoop> LI;

public:
  static char ID; // Pass identification, replacement for typeid

  MachineLoopInfo();
  explicit MachineLoopInfo(MachineDominatorTree &MDT)
      : MachineFunctionPass(ID) {
    calculate(MDT);
  }
  MachineLoopInfo(const MachineLoopInfo &) = delete;
  MachineLoopInfo &operator=(const MachineLoopInfo &) = delete;

  LoopInfoBase<MachineBasicBlock, MachineLoop>& getBase() { return LI; }

  /// Find the block that either is the loop preheader, or could
  /// speculatively be used as the preheader. This is e.g. useful to place
  /// loop setup code. Code that cannot be speculated should not be placed
  /// here. SpeculativePreheader is controlling whether it also tries to
  /// find the speculative preheader if the regular preheader is not present.
  /// With FindMultiLoopPreheader = false, nullptr will be returned if the found
  /// preheader is the preheader of multiple loops.
  MachineBasicBlock *
  findLoopPreheader(MachineLoop *L, bool SpeculativePreheader = false,
                    bool FindMultiLoopPreheader = false) const;

  /// The iterator interface to the top-level loops in the current function.
  using iterator = LoopInfoBase<MachineBasicBlock, MachineLoop>::iterator;
  inline iterator begin() const { return LI.begin(); }
  inline iterator end() const { return LI.end(); }
  bool empty() const { return LI.empty(); }

  /// Return the innermost loop that BB lives in. If a basic block is in no loop
  /// (for example the entry node), null is returned.
  inline MachineLoop *getLoopFor(const MachineBasicBlock *BB) const {
    return LI.getLoopFor(BB);
  }

  /// Same as getLoopFor.
  inline const MachineLoop *operator[](const MachineBasicBlock *BB) const {
    return LI.getLoopFor(BB);
  }

  /// Return the loop nesting level of the specified block.
  inline unsigned getLoopDepth(const MachineBasicBlock *BB) const {
    return LI.getLoopDepth(BB);
  }

  /// True if the block is a loop header node.
  inline bool isLoopHeader(const MachineBasicBlock *BB) const {
    return LI.isLoopHeader(BB);
  }

  /// Calculate the natural loop information.
  bool runOnMachineFunction(MachineFunction &F) override;
  void calculate(MachineDominatorTree &MDT);

  void releaseMemory() override { LI.releaseMemory(); }

  void getAnalysisUsage(AnalysisUsage &AU) const override;

  /// This removes the specified top-level loop from this loop info object. The
  /// loop is not deleted, as it will presumably be inserted into another loop.
  inline MachineLoop *removeLoop(iterator I) { return LI.removeLoop(I); }

  /// Change the top-level loop that contains BB to the specified loop. This
  /// should be used by transformations that restructure the loop hierarchy
  /// tree.
  inline void changeLoopFor(MachineBasicBlock *BB, MachineLoop *L) {
    LI.changeLoopFor(BB, L);
  }

  /// Replace the specified loop in the top-level loops list with the indicated
  /// loop.
  inline void changeTopLevelLoop(MachineLoop *OldLoop, MachineLoop *NewLoop) {
    LI.changeTopLevelLoop(OldLoop, NewLoop);
  }

  /// This adds the specified loop to the collection of top-level loops.
  inline void addTopLevelLoop(MachineLoop *New) {
    LI.addTopLevelLoop(New);
  }

  /// This method completely removes BB from all data structures, including all
  /// of the Loop objects it is nested in and our mapping from
  /// MachineBasicBlocks to loops.
  void removeBlock(MachineBasicBlock *BB) {
    LI.removeBlock(BB);
  }
};

// Allow clients to walk the list of nested loops...
template <> struct GraphTraits<const MachineLoop*> {
  using NodeRef = const MachineLoop *;
  using ChildIteratorType = MachineLoopInfo::iterator;

  static NodeRef getEntryNode(const MachineLoop *L) { return L; }
  static ChildIteratorType child_begin(NodeRef N) { return N->begin(); }
  static ChildIteratorType child_end(NodeRef N) { return N->end(); }
};

template <> struct GraphTraits<MachineLoop*> {
  using NodeRef = MachineLoop *;
  using ChildIteratorType = MachineLoopInfo::iterator;

  static NodeRef getEntryNode(MachineLoop *L) { return L; }
  static ChildIteratorType child_begin(NodeRef N) { return N->begin(); }
  static ChildIteratorType child_end(NodeRef N) { return N->end(); }
};

} // end namespace llvm

#endif // LLVM_CODEGEN_MACHINELOOPINFO_H
PKiwFZ��H��CodeGen/ScheduleDAGMutation.hnu�[���//===- ScheduleDAGMutation.h - MachineInstr Scheduling ----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements the ScheduleDAGMutation class, which represents
// a target-specific mutation of the dependency graph for scheduling.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_SCHEDULEDAGMUTATION_H
#define LLVM_CODEGEN_SCHEDULEDAGMUTATION_H

namespace llvm {

class ScheduleDAGInstrs;

/// Mutate the DAG as a postpass after normal DAG building.
class ScheduleDAGMutation {
  virtual void anchor();

public:
  virtual ~ScheduleDAGMutation() = default;

  virtual void apply(ScheduleDAGInstrs *DAG) = 0;
};

} // end namespace llvm

#endif // LLVM_CODEGEN_SCHEDULEDAGMUTATION_H
PKiwFZ���v��CodeGen/MIRFSDiscriminator.hnu�[���//===----- MIRFSDiscriminator.h: MIR FS Discriminator Support --0-- c++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the supporting functions for adding Machine level IR
// Flow Sensitive discriminators to the instruction debug information. With
// this, a cloned machine instruction in a different MachineBasicBlock will
// have its own discriminator value. This is done in a MIRAddFSDiscriminators
// pass.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_MIRFSDISCRIMINATOR_H
#define LLVM_CODEGEN_MIRFSDISCRIMINATOR_H

#include "llvm/ADT/StringRef.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/Support/Discriminator.h"

#include <cassert>
#include <cstdint>

namespace llvm {
class MachineFunction;

using namespace sampleprof;
class MIRAddFSDiscriminators : public MachineFunctionPass {
  MachineFunction *MF = nullptr;
  FSDiscriminatorPass Pass;
  unsigned LowBit;
  unsigned HighBit;

public:
  static char ID;
  /// PassNum is the sequence number this pass is called, start from 1.
  MIRAddFSDiscriminators(FSDiscriminatorPass P = FSDiscriminatorPass::Pass1)
      : MachineFunctionPass(ID), Pass(P) {
    LowBit = getFSPassBitBegin(P);
    HighBit = getFSPassBitEnd(P);
    assert(LowBit < HighBit && "HighBit needs to be greater than Lowbit");
  }

  StringRef getPassName() const override {
    return "Add FS discriminators in MIR";
  }

  /// getNumFSBBs() - Return the number of machine BBs that have FS samples.
  unsigned getNumFSBBs();

  /// getNumFSSamples() - Return the number of samples that have flow sensitive
  /// values.
  uint64_t getNumFSSamples();

  /// getMachineFunction - Return the current machine function.
  const MachineFunction *getMachineFunction() const { return MF; }

private:
  bool runOnMachineFunction(MachineFunction &) override;
};

} // namespace llvm

#endif // LLVM_CODEGEN_MIRFSDISCRIMINATOR_H
PKiwFZ��5�5CodeGen/StackMaps.hnu�[���//===- StackMaps.h - StackMaps ----------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_STACKMAPS_H
#define LLVM_CODEGEN_STACKMAPS_H

#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/Support/Debug.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <vector>

namespace llvm {

class AsmPrinter;
class MCSymbol;
class MCExpr;
class MCStreamer;
class raw_ostream;
class TargetRegisterInfo;

/// MI-level stackmap operands.
///
/// MI stackmap operations take the form:
/// <id>, <numBytes>, live args...
class StackMapOpers {
public:
  /// Enumerate the meta operands.
  enum { IDPos, NBytesPos };

private:
  const MachineInstr* MI;

public:
  explicit StackMapOpers(const MachineInstr *MI);

  /// Return the ID for the given stackmap
  uint64_t getID() const { return MI->getOperand(IDPos).getImm(); }

  /// Return the number of patchable bytes the given stackmap should emit.
  uint32_t getNumPatchBytes() const {
    return MI->getOperand(NBytesPos).getImm();
  }

  /// Get the operand index of the variable list of non-argument operands.
  /// These hold the "live state".
  unsigned getVarIdx() const {
    // Skip ID, nShadowBytes.
    return 2;
  }
};

/// MI-level patchpoint operands.
///
/// MI patchpoint operations take the form:
/// [<def>], <id>, <numBytes>, <target>, <numArgs>, <cc>, ...
///
/// IR patchpoint intrinsics do not have the <cc> operand because calling
/// convention is part of the subclass data.
///
/// SD patchpoint nodes do not have a def operand because it is part of the
/// SDValue.
///
/// Patchpoints following the anyregcc convention are handled specially. For
/// these, the stack map also records the location of the return value and
/// arguments.
class PatchPointOpers {
public:
  /// Enumerate the meta operands.
  enum { IDPos, NBytesPos, TargetPos, NArgPos, CCPos, MetaEnd };

private:
  const MachineInstr *MI;
  bool HasDef;

  unsigned getMetaIdx(unsigned Pos = 0) const {
    assert(Pos < MetaEnd && "Meta operand index out of range.");
    return (HasDef ? 1 : 0) + Pos;
  }

  const MachineOperand &getMetaOper(unsigned Pos) const {
    return MI->getOperand(getMetaIdx(Pos));
  }

public:
  explicit PatchPointOpers(const MachineInstr *MI);

  bool isAnyReg() const { return (getCallingConv() == CallingConv::AnyReg); }
  bool hasDef() const { return HasDef; }

  /// Return the ID for the given patchpoint.
  uint64_t getID() const { return getMetaOper(IDPos).getImm(); }

  /// Return the number of patchable bytes the given patchpoint should emit.
  uint32_t getNumPatchBytes() const {
    return getMetaOper(NBytesPos).getImm();
  }

  /// Returns the target of the underlying call.
  const MachineOperand &getCallTarget() const {
    return getMetaOper(TargetPos);
  }

  /// Returns the calling convention
  CallingConv::ID getCallingConv() const {
    return getMetaOper(CCPos).getImm();
  }

  unsigned getArgIdx() const { return getMetaIdx() + MetaEnd; }

  /// Return the number of call arguments
  uint32_t getNumCallArgs() const {
    return MI->getOperand(getMetaIdx(NArgPos)).getImm();
  }

  /// Get the operand index of the variable list of non-argument operands.
  /// These hold the "live state".
  unsigned getVarIdx() const {
    return getMetaIdx() + MetaEnd + getNumCallArgs();
  }

  /// Get the index at which stack map locations will be recorded.
  /// Arguments are not recorded unless the anyregcc convention is used.
  unsigned getStackMapStartIdx() const {
    if (isAnyReg())
      return getArgIdx();
    return getVarIdx();
  }

  /// Get the next scratch register operand index.
  unsigned getNextScratchIdx(unsigned StartIdx = 0) const;
};

/// MI-level Statepoint operands
///
/// Statepoint operands take the form:
///   <id>, <num patch bytes >, <num call arguments>, <call target>,
///   [call arguments...],
///   <StackMaps::ConstantOp>, <calling convention>,
///   <StackMaps::ConstantOp>, <statepoint flags>,
///   <StackMaps::ConstantOp>, <num deopt args>, [deopt args...],
///   <StackMaps::ConstantOp>, <num gc pointer args>, [gc pointer args...],
///   <StackMaps::ConstantOp>, <num gc allocas>, [gc allocas args...],
///   <StackMaps::ConstantOp>, <num  entries in gc map>, [base/derived pairs]
///   base/derived pairs in gc map are logical indices into <gc pointer args>
///   section.
///   All gc pointers assigned to VRegs produce new value (in form of MI Def
///   operand) and are tied to it.
class StatepointOpers {
  // TODO:: we should change the STATEPOINT representation so that CC and
  // Flags should be part of meta operands, with args and deopt operands, and
  // gc operands all prefixed by their length and a type code. This would be
  // much more consistent.

  // These values are absolute offsets into the operands of the statepoint
  // instruction.
  enum { IDPos, NBytesPos, NCallArgsPos, CallTargetPos, MetaEnd };

  // These values are relative offsets from the start of the statepoint meta
  // arguments (i.e. the end of the call arguments).
  enum { CCOffset = 1, FlagsOffset = 3, NumDeoptOperandsOffset = 5 };

public:
  explicit StatepointOpers(const MachineInstr *MI) : MI(MI) {
    NumDefs = MI->getNumDefs();
  }

  /// Get index of statepoint ID operand.
  unsigned getIDPos() const { return NumDefs + IDPos; }

  /// Get index of Num Patch Bytes operand.
  unsigned getNBytesPos() const { return NumDefs + NBytesPos; }

  /// Get index of Num Call Arguments operand.
  unsigned getNCallArgsPos() const { return NumDefs + NCallArgsPos; }

  /// Get starting index of non call related arguments
  /// (calling convention, statepoint flags, vm state and gc state).
  unsigned getVarIdx() const {
    return MI->getOperand(NumDefs + NCallArgsPos).getImm() + MetaEnd + NumDefs;
  }

  /// Get index of Calling Convention operand.
  unsigned getCCIdx() const { return getVarIdx() + CCOffset; }

  /// Get index of Flags operand.
  unsigned getFlagsIdx() const { return getVarIdx() + FlagsOffset; }

  /// Get index of Number Deopt Arguments operand.
  unsigned getNumDeoptArgsIdx() const {
    return getVarIdx() + NumDeoptOperandsOffset;
  }

  /// Return the ID for the given statepoint.
  uint64_t getID() const { return MI->getOperand(NumDefs + IDPos).getImm(); }

  /// Return the number of patchable bytes the given statepoint should emit.
  uint32_t getNumPatchBytes() const {
    return MI->getOperand(NumDefs + NBytesPos).getImm();
  }

  /// Return the target of the underlying call.
  const MachineOperand &getCallTarget() const {
    return MI->getOperand(NumDefs + CallTargetPos);
  }

  /// Return the calling convention.
  CallingConv::ID getCallingConv() const {
    return MI->getOperand(getCCIdx()).getImm();
  }

  /// Return the statepoint flags.
  uint64_t getFlags() const { return MI->getOperand(getFlagsIdx()).getImm(); }

  uint64_t getNumDeoptArgs() const {
    return MI->getOperand(getNumDeoptArgsIdx()).getImm();
  }

  /// Get index of number of gc map entries.
  unsigned getNumGcMapEntriesIdx();

  /// Get index of number of gc allocas.
  unsigned getNumAllocaIdx();

  /// Get index of number of GC pointers.
  unsigned getNumGCPtrIdx();

  /// Get index of first GC pointer operand of -1 if there are none.
  int getFirstGCPtrIdx();

  /// Get vector of base/derived pairs from statepoint.
  /// Elements are indices into GC Pointer operand list (logical).
  /// Returns number of elements in GCMap.
  unsigned
  getGCPointerMap(SmallVectorImpl<std::pair<unsigned, unsigned>> &GCMap);

  /// Return true if Reg is used only in operands which can be folded to
  /// stack usage.
  bool isFoldableReg(Register Reg) const;

  /// Return true if Reg is used only in operands of MI which can be folded to
  /// stack usage and MI is a statepoint instruction.
  static bool isFoldableReg(const MachineInstr *MI, Register Reg);

private:
  const MachineInstr *MI;
  unsigned NumDefs;
};

class StackMaps {
public:
  struct Location {
    enum LocationType {
      Unprocessed,
      Register,
      Direct,
      Indirect,
      Constant,
      ConstantIndex
    };
    LocationType Type = Unprocessed;
    unsigned Size = 0;
    unsigned Reg = 0;
    int64_t Offset = 0;

    Location() = default;
    Location(LocationType Type, unsigned Size, unsigned Reg, int64_t Offset)
        : Type(Type), Size(Size), Reg(Reg), Offset(Offset) {}
  };

  struct LiveOutReg {
    unsigned short Reg = 0;
    unsigned short DwarfRegNum = 0;
    unsigned short Size = 0;

    LiveOutReg() = default;
    LiveOutReg(unsigned short Reg, unsigned short DwarfRegNum,
               unsigned short Size)
        : Reg(Reg), DwarfRegNum(DwarfRegNum), Size(Size) {}
  };

  // OpTypes are used to encode information about the following logical
  // operand (which may consist of several MachineOperands) for the
  // OpParser.
  using OpType = enum { DirectMemRefOp, IndirectMemRefOp, ConstantOp };

  StackMaps(AsmPrinter &AP);

  /// Get index of next meta operand.
  /// Similar to parseOperand, but does not actually parses operand meaning.
  static unsigned getNextMetaArgIdx(const MachineInstr *MI, unsigned CurIdx);

  void reset() {
    CSInfos.clear();
    ConstPool.clear();
    FnInfos.clear();
  }

  using LocationVec = SmallVector<Location, 8>;
  using LiveOutVec = SmallVector<LiveOutReg, 8>;
  using ConstantPool = MapVector<uint64_t, uint64_t>;

  struct FunctionInfo {
    uint64_t StackSize = 0;
    uint64_t RecordCount = 1;

    FunctionInfo() = default;
    explicit FunctionInfo(uint64_t StackSize) : StackSize(StackSize) {}
  };

  struct CallsiteInfo {
    const MCExpr *CSOffsetExpr = nullptr;
    uint64_t ID = 0;
    LocationVec Locations;
    LiveOutVec LiveOuts;

    CallsiteInfo() = default;
    CallsiteInfo(const MCExpr *CSOffsetExpr, uint64_t ID,
                 LocationVec &&Locations, LiveOutVec &&LiveOuts)
        : CSOffsetExpr(CSOffsetExpr), ID(ID), Locations(std::move(Locations)),
          LiveOuts(std::move(LiveOuts)) {}
  };

  using FnInfoMap = MapVector<const MCSymbol *, FunctionInfo>;
  using CallsiteInfoList = std::vector<CallsiteInfo>;

  /// Generate a stackmap record for a stackmap instruction.
  ///
  /// MI must be a raw STACKMAP, not a PATCHPOINT.
  void recordStackMap(const MCSymbol &L,
                      const MachineInstr &MI);

  /// Generate a stackmap record for a patchpoint instruction.
  void recordPatchPoint(const MCSymbol &L,
                        const MachineInstr &MI);

  /// Generate a stackmap record for a statepoint instruction.
  void recordStatepoint(const MCSymbol &L,
                        const MachineInstr &MI);

  /// If there is any stack map data, create a stack map section and serialize
  /// the map info into it. This clears the stack map data structures
  /// afterwards.
  void serializeToStackMapSection();

  /// Get call site info.
  CallsiteInfoList &getCSInfos() { return CSInfos; }

  /// Get function info.
  FnInfoMap &getFnInfos() { return FnInfos; }

private:
  static const char *WSMP;

  AsmPrinter &AP;
  CallsiteInfoList CSInfos;
  ConstantPool ConstPool;
  FnInfoMap FnInfos;

  MachineInstr::const_mop_iterator
  parseOperand(MachineInstr::const_mop_iterator MOI,
               MachineInstr::const_mop_iterator MOE, LocationVec &Locs,
               LiveOutVec &LiveOuts) const;

  /// Specialized parser of statepoint operands.
  /// They do not directly correspond to StackMap record entries.
  void parseStatepointOpers(const MachineInstr &MI,
                            MachineInstr::const_mop_iterator MOI,
                            MachineInstr::const_mop_iterator MOE,
                            LocationVec &Locations, LiveOutVec &LiveOuts);

  /// Create a live-out register record for the given register @p Reg.
  LiveOutReg createLiveOutReg(unsigned Reg,
                              const TargetRegisterInfo *TRI) const;

  /// Parse the register live-out mask and return a vector of live-out
  /// registers that need to be recorded in the stackmap.
  LiveOutVec parseRegisterLiveOutMask(const uint32_t *Mask) const;

  /// Record the locations of the operands of the provided instruction in a
  /// record keyed by the provided label.  For instructions w/AnyReg calling
  /// convention the return register is also recorded if requested.  For
  /// STACKMAP, and PATCHPOINT the label is expected to immediately *preceed*
  /// lowering of the MI to MCInsts.  For STATEPOINT, it expected to
  /// immediately *follow*.  It's not clear this difference was intentional,
  /// but it exists today.  
  void recordStackMapOpers(const MCSymbol &L,
                           const MachineInstr &MI, uint64_t ID,
                           MachineInstr::const_mop_iterator MOI,
                           MachineInstr::const_mop_iterator MOE,
                           bool recordResult = false);

  /// Emit the stackmap header.
  void emitStackmapHeader(MCStreamer &OS);

  /// Emit the function frame record for each function.
  void emitFunctionFrameRecords(MCStreamer &OS);

  /// Emit the constant pool.
  void emitConstantPoolEntries(MCStreamer &OS);

  /// Emit the callsite info for each stackmap/patchpoint intrinsic call.
  void emitCallsiteEntries(MCStreamer &OS);

  void print(raw_ostream &OS);
  void debug() { print(dbgs()); }
};

} // end namespace llvm

#endif // LLVM_CODEGEN_STACKMAPS_H
PKiwFZ�0cCodeGen/MachineCFGPrinter.hnu�[���//===-- MachineCFGPrinter.h -------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
//===----------------------------------------------------------------------===//

#include "llvm/Analysis/CFGPrinter.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/Support/DOTGraphTraits.h"

namespace llvm {

template <class GraphType> struct GraphTraits;
class DOTMachineFuncInfo {
private:
  const MachineFunction *F;

public:
  DOTMachineFuncInfo(const MachineFunction *F) : F(F) {}

  const MachineFunction *getFunction() const { return this->F; }
};

template <>
struct GraphTraits<DOTMachineFuncInfo *>
    : public GraphTraits<const MachineBasicBlock *> {
  static NodeRef getEntryNode(DOTMachineFuncInfo *CFGInfo) {
    return &(CFGInfo->getFunction()->front());
  }

  // nodes_iterator/begin/end - Allow iteration over all nodes in the graph
  using nodes_iterator = pointer_iterator<MachineFunction::const_iterator>;

  static nodes_iterator nodes_begin(DOTMachineFuncInfo *CFGInfo) {
    return nodes_iterator(CFGInfo->getFunction()->begin());
  }

  static nodes_iterator nodes_end(DOTMachineFuncInfo *CFGInfo) {
    return nodes_iterator(CFGInfo->getFunction()->end());
  }

  static size_t size(DOTMachineFuncInfo *CFGInfo) {
    return CFGInfo->getFunction()->size();
  }
};

template <>
struct DOTGraphTraits<DOTMachineFuncInfo *> : public DefaultDOTGraphTraits {

  DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {}

  static void eraseComment(std::string &OutStr, unsigned &I, unsigned Idx) {
    OutStr.erase(OutStr.begin() + I, OutStr.begin() + Idx);
    --I;
  }

  static std::string getSimpleNodeLabel(const MachineBasicBlock *Node,
                                        DOTMachineFuncInfo *) {
    return SimpleNodeLabelString(Node);
  }

  static std::string getCompleteNodeLabel(
      const MachineBasicBlock *Node, DOTMachineFuncInfo *,
      function_ref<void(raw_string_ostream &, const MachineBasicBlock &)>
          HandleBasicBlock =
              [](raw_string_ostream &OS,
                 const MachineBasicBlock &Node) -> void { OS << Node; },
      function_ref<void(std::string &, unsigned &, unsigned)>
          HandleComment = eraseComment) {
    return CompleteNodeLabelString(Node, HandleBasicBlock, HandleComment);
  }

  std::string getNodeLabel(const MachineBasicBlock *Node,
                           DOTMachineFuncInfo *CFGInfo) {
    if (isSimple())
      return getSimpleNodeLabel(Node, CFGInfo);

    return getCompleteNodeLabel(Node, CFGInfo);
  }

  static std::string getGraphName(DOTMachineFuncInfo *CFGInfo) {
    return "Machine CFG for '" + CFGInfo->getFunction()->getName().str() +
           "' function";
  }
};
} // namespace llvm
PKiwFZZ��C�T�TCodeGen/RegisterPressure.hnu�[���//===- RegisterPressure.h - Dynamic Register Pressure -----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the RegisterPressure class which can be used to track
// MachineInstr level register pressure.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_REGISTERPRESSURE_H
#define LLVM_CODEGEN_REGISTERPRESSURE_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/SparseSet.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/SlotIndexes.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
#include "llvm/MC/LaneBitmask.h"
#include <cassert>
#include <cstdint>
#include <cstdlib>
#include <limits>
#include <vector>

namespace llvm {

class LiveIntervals;
class MachineFunction;
class MachineInstr;
class MachineRegisterInfo;
class RegisterClassInfo;

struct RegisterMaskPair {
  Register RegUnit; ///< Virtual register or register unit.
  LaneBitmask LaneMask;

  RegisterMaskPair(Register RegUnit, LaneBitmask LaneMask)
      : RegUnit(RegUnit), LaneMask(LaneMask) {}
};

/// Base class for register pressure results.
struct RegisterPressure {
  /// Map of max reg pressure indexed by pressure set ID, not class ID.
  std::vector<unsigned> MaxSetPressure;

  /// List of live in virtual registers or physical register units.
  SmallVector<RegisterMaskPair,8> LiveInRegs;
  SmallVector<RegisterMaskPair,8> LiveOutRegs;

  void dump(const TargetRegisterInfo *TRI) const;
};

/// RegisterPressure computed within a region of instructions delimited by
/// TopIdx and BottomIdx.  During pressure computation, the maximum pressure per
/// register pressure set is increased. Once pressure within a region is fully
/// computed, the live-in and live-out sets are recorded.
///
/// This is preferable to RegionPressure when LiveIntervals are available,
/// because delimiting regions by SlotIndex is more robust and convenient than
/// holding block iterators. The block contents can change without invalidating
/// the pressure result.
struct IntervalPressure : RegisterPressure {
  /// Record the boundary of the region being tracked.
  SlotIndex TopIdx;
  SlotIndex BottomIdx;

  void reset();

  void openTop(SlotIndex NextTop);

  void openBottom(SlotIndex PrevBottom);
};

/// RegisterPressure computed within a region of instructions delimited by
/// TopPos and BottomPos. This is a less precise version of IntervalPressure for
/// use when LiveIntervals are unavailable.
struct RegionPressure : RegisterPressure {
  /// Record the boundary of the region being tracked.
  MachineBasicBlock::const_iterator TopPos;
  MachineBasicBlock::const_iterator BottomPos;

  void reset();

  void openTop(MachineBasicBlock::const_iterator PrevTop);

  void openBottom(MachineBasicBlock::const_iterator PrevBottom);
};

/// Capture a change in pressure for a single pressure set. UnitInc may be
/// expressed in terms of upward or downward pressure depending on the client
/// and will be dynamically adjusted for current liveness.
///
/// Pressure increments are tiny, typically 1-2 units, and this is only for
/// heuristics, so we don't check UnitInc overflow. Instead, we may have a
/// higher level assert that pressure is consistent within a region. We also
/// effectively ignore dead defs which don't affect heuristics much.
class PressureChange {
  uint16_t PSetID = 0; // ID+1. 0=Invalid.
  int16_t UnitInc = 0;

public:
  PressureChange() = default;
  PressureChange(unsigned id): PSetID(id + 1) {
    assert(id < std::numeric_limits<uint16_t>::max() && "PSetID overflow.");
  }

  bool isValid() const { return PSetID > 0; }

  unsigned getPSet() const {
    assert(isValid() && "invalid PressureChange");
    return PSetID - 1;
  }

  // If PSetID is invalid, return UINT16_MAX to give it lowest priority.
  unsigned getPSetOrMax() const {
    return (PSetID - 1) & std::numeric_limits<uint16_t>::max();
  }

  int getUnitInc() const { return UnitInc; }

  void setUnitInc(int Inc) { UnitInc = Inc; }

  bool operator==(const PressureChange &RHS) const {
    return PSetID == RHS.PSetID && UnitInc == RHS.UnitInc;
  }

  void dump() const;
};

/// List of PressureChanges in order of increasing, unique PSetID.
///
/// Use a small fixed number, because we can fit more PressureChanges in an
/// empty SmallVector than ever need to be tracked per register class. If more
/// PSets are affected, then we only track the most constrained.
class PressureDiff {
  // The initial design was for MaxPSets=4, but that requires PSet partitions,
  // which are not yet implemented. (PSet partitions are equivalent PSets given
  // the register classes actually in use within the scheduling region.)
  enum { MaxPSets = 16 };

  PressureChange PressureChanges[MaxPSets];

  using iterator = PressureChange *;

  iterator nonconst_begin() { return &PressureChanges[0]; }
  iterator nonconst_end() { return &PressureChanges[MaxPSets]; }

public:
  using const_iterator = const PressureChange *;

  const_iterator begin() const { return &PressureChanges[0]; }
  const_iterator end() const { return &PressureChanges[MaxPSets]; }

  void addPressureChange(Register RegUnit, bool IsDec,
                         const MachineRegisterInfo *MRI);

  void dump(const TargetRegisterInfo &TRI) const;
};

/// List of registers defined and used by a machine instruction.
class RegisterOperands {
public:
  /// List of virtual registers and register units read by the instruction.
  SmallVector<RegisterMaskPair, 8> Uses;
  /// List of virtual registers and register units defined by the
  /// instruction which are not dead.
  SmallVector<RegisterMaskPair, 8> Defs;
  /// List of virtual registers and register units defined by the
  /// instruction but dead.
  SmallVector<RegisterMaskPair, 8> DeadDefs;

  /// Analyze the given instruction \p MI and fill in the Uses, Defs and
  /// DeadDefs list based on the MachineOperand flags.
  void collect(const MachineInstr &MI, const TargetRegisterInfo &TRI,
               const MachineRegisterInfo &MRI, bool TrackLaneMasks,
               bool IgnoreDead);

  /// Use liveness information to find dead defs not marked with a dead flag
  /// and move them to the DeadDefs vector.
  void detectDeadDefs(const MachineInstr &MI, const LiveIntervals &LIS);

  /// Use liveness information to find out which uses/defs are partially
  /// undefined/dead and adjust the RegisterMaskPairs accordingly.
  /// If \p AddFlagsMI is given then missing read-undef and dead flags will be
  /// added to the instruction.
  void adjustLaneLiveness(const LiveIntervals &LIS,
                          const MachineRegisterInfo &MRI, SlotIndex Pos,
                          MachineInstr *AddFlagsMI = nullptr);
};

/// Array of PressureDiffs.
class PressureDiffs {
  PressureDiff *PDiffArray = nullptr;
  unsigned Size = 0;
  unsigned Max = 0;

public:
  PressureDiffs() = default;
  PressureDiffs &operator=(const PressureDiffs &other) = delete;
  PressureDiffs(const PressureDiffs &other) = delete;
  ~PressureDiffs() { free(PDiffArray); }

  void clear() { Size = 0; }

  void init(unsigned N);

  PressureDiff &operator[](unsigned Idx) {
    assert(Idx < Size && "PressureDiff index out of bounds");
    return PDiffArray[Idx];
  }
  const PressureDiff &operator[](unsigned Idx) const {
    return const_cast<PressureDiffs*>(this)->operator[](Idx);
  }

  /// Record pressure difference induced by the given operand list to
  /// node with index \p Idx.
  void addInstruction(unsigned Idx, const RegisterOperands &RegOpers,
                      const MachineRegisterInfo &MRI);
};

/// Store the effects of a change in pressure on things that MI scheduler cares
/// about.
///
/// Excess records the value of the largest difference in register units beyond
/// the target's pressure limits across the affected pressure sets, where
/// largest is defined as the absolute value of the difference. Negative
/// ExcessUnits indicates a reduction in pressure that had already exceeded the
/// target's limits.
///
/// CriticalMax records the largest increase in the tracker's max pressure that
/// exceeds the critical limit for some pressure set determined by the client.
///
/// CurrentMax records the largest increase in the tracker's max pressure that
/// exceeds the current limit for some pressure set determined by the client.
struct RegPressureDelta {
  PressureChange Excess;
  PressureChange CriticalMax;
  PressureChange CurrentMax;

  RegPressureDelta() = default;

  bool operator==(const RegPressureDelta &RHS) const {
    return Excess == RHS.Excess && CriticalMax == RHS.CriticalMax
      && CurrentMax == RHS.CurrentMax;
  }
  bool operator!=(const RegPressureDelta &RHS) const {
    return !operator==(RHS);
  }
  void dump() const;
};

/// A set of live virtual registers and physical register units.
///
/// This is a wrapper around a SparseSet which deals with mapping register unit
/// and virtual register indexes to an index usable by the sparse set.
class LiveRegSet {
private:
  struct IndexMaskPair {
    unsigned Index;
    LaneBitmask LaneMask;

    IndexMaskPair(unsigned Index, LaneBitmask LaneMask)
        : Index(Index), LaneMask(LaneMask) {}

    unsigned getSparseSetIndex() const {
      return Index;
    }
  };

  using RegSet = SparseSet<IndexMaskPair>;
  RegSet Regs;
  unsigned NumRegUnits = 0u;

  unsigned getSparseIndexFromReg(Register Reg) const {
    if (Reg.isVirtual())
      return Register::virtReg2Index(Reg) + NumRegUnits;
    assert(Reg < NumRegUnits);
    return Reg;
  }

  Register getRegFromSparseIndex(unsigned SparseIndex) const {
    if (SparseIndex >= NumRegUnits)
      return Register::index2VirtReg(SparseIndex - NumRegUnits);
    return Register(SparseIndex);
  }

public:
  void clear();
  void init(const MachineRegisterInfo &MRI);

  LaneBitmask contains(Register Reg) const {
    unsigned SparseIndex = getSparseIndexFromReg(Reg);
    RegSet::const_iterator I = Regs.find(SparseIndex);
    if (I == Regs.end())
      return LaneBitmask::getNone();
    return I->LaneMask;
  }

  /// Mark the \p Pair.LaneMask lanes of \p Pair.Reg as live.
  /// Returns the previously live lanes of \p Pair.Reg.
  LaneBitmask insert(RegisterMaskPair Pair) {
    unsigned SparseIndex = getSparseIndexFromReg(Pair.RegUnit);
    auto InsertRes = Regs.insert(IndexMaskPair(SparseIndex, Pair.LaneMask));
    if (!InsertRes.second) {
      LaneBitmask PrevMask = InsertRes.first->LaneMask;
      InsertRes.first->LaneMask |= Pair.LaneMask;
      return PrevMask;
    }
    return LaneBitmask::getNone();
  }

  /// Clears the \p Pair.LaneMask lanes of \p Pair.Reg (mark them as dead).
  /// Returns the previously live lanes of \p Pair.Reg.
  LaneBitmask erase(RegisterMaskPair Pair) {
    unsigned SparseIndex = getSparseIndexFromReg(Pair.RegUnit);
    RegSet::iterator I = Regs.find(SparseIndex);
    if (I == Regs.end())
      return LaneBitmask::getNone();
    LaneBitmask PrevMask = I->LaneMask;
    I->LaneMask &= ~Pair.LaneMask;
    return PrevMask;
  }

  size_t size() const {
    return Regs.size();
  }

  template<typename ContainerT>
  void appendTo(ContainerT &To) const {
    for (const IndexMaskPair &P : Regs) {
      Register Reg = getRegFromSparseIndex(P.Index);
      if (P.LaneMask.any())
        To.push_back(RegisterMaskPair(Reg, P.LaneMask));
    }
  }
};

/// Track the current register pressure at some position in the instruction
/// stream, and remember the high water mark within the region traversed. This
/// does not automatically consider live-through ranges. The client may
/// independently adjust for global liveness.
///
/// Each RegPressureTracker only works within a MachineBasicBlock. Pressure can
/// be tracked across a larger region by storing a RegisterPressure result at
/// each block boundary and explicitly adjusting pressure to account for block
/// live-in and live-out register sets.
///
/// RegPressureTracker holds a reference to a RegisterPressure result that it
/// computes incrementally. During downward tracking, P.BottomIdx or P.BottomPos
/// is invalid until it reaches the end of the block or closeRegion() is
/// explicitly called. Similarly, P.TopIdx is invalid during upward
/// tracking. Changing direction has the side effect of closing region, and
/// traversing past TopIdx or BottomIdx reopens it.
class RegPressureTracker {
  const MachineFunction *MF = nullptr;
  const TargetRegisterInfo *TRI = nullptr;
  const RegisterClassInfo *RCI = nullptr;
  const MachineRegisterInfo *MRI = nullptr;
  const LiveIntervals *LIS = nullptr;

  /// We currently only allow pressure tracking within a block.
  const MachineBasicBlock *MBB = nullptr;

  /// Track the max pressure within the region traversed so far.
  RegisterPressure &P;

  /// Run in two modes dependending on whether constructed with IntervalPressure
  /// or RegisterPressure. If requireIntervals is false, LIS are ignored.
  bool RequireIntervals;

  /// True if UntiedDefs will be populated.
  bool TrackUntiedDefs = false;

  /// True if lanemasks should be tracked.
  bool TrackLaneMasks = false;

  /// Register pressure corresponds to liveness before this instruction
  /// iterator. It may point to the end of the block or a DebugValue rather than
  /// an instruction.
  MachineBasicBlock::const_iterator CurrPos;

  /// Pressure map indexed by pressure set ID, not class ID.
  std::vector<unsigned> CurrSetPressure;

  /// Set of live registers.
  LiveRegSet LiveRegs;

  /// Set of vreg defs that start a live range.
  SparseSet<Register, VirtReg2IndexFunctor> UntiedDefs;
  /// Live-through pressure.
  std::vector<unsigned> LiveThruPressure;

public:
  RegPressureTracker(IntervalPressure &rp) : P(rp), RequireIntervals(true) {}
  RegPressureTracker(RegionPressure &rp) : P(rp), RequireIntervals(false) {}

  void reset();

  void init(const MachineFunction *mf, const RegisterClassInfo *rci,
            const LiveIntervals *lis, const MachineBasicBlock *mbb,
            MachineBasicBlock::const_iterator pos,
            bool TrackLaneMasks, bool TrackUntiedDefs);

  /// Force liveness of virtual registers or physical register
  /// units. Particularly useful to initialize the livein/out state of the
  /// tracker before the first call to advance/recede.
  void addLiveRegs(ArrayRef<RegisterMaskPair> Regs);

  /// Get the MI position corresponding to this register pressure.
  MachineBasicBlock::const_iterator getPos() const { return CurrPos; }

  // Reset the MI position corresponding to the register pressure. This allows
  // schedulers to move instructions above the RegPressureTracker's
  // CurrPos. Since the pressure is computed before CurrPos, the iterator
  // position changes while pressure does not.
  void setPos(MachineBasicBlock::const_iterator Pos) { CurrPos = Pos; }

  /// Recede across the previous instruction.
  void recede(SmallVectorImpl<RegisterMaskPair> *LiveUses = nullptr);

  /// Recede across the previous instruction.
  /// This "low-level" variant assumes that recedeSkipDebugValues() was
  /// called previously and takes precomputed RegisterOperands for the
  /// instruction.
  void recede(const RegisterOperands &RegOpers,
              SmallVectorImpl<RegisterMaskPair> *LiveUses = nullptr);

  /// Recede until we find an instruction which is not a DebugValue.
  void recedeSkipDebugValues();

  /// Advance across the current instruction.
  void advance();

  /// Advance across the current instruction.
  /// This is a "low-level" variant of advance() which takes precomputed
  /// RegisterOperands of the instruction.
  void advance(const RegisterOperands &RegOpers);

  /// Finalize the region boundaries and recored live ins and live outs.
  void closeRegion();

  /// Initialize the LiveThru pressure set based on the untied defs found in
  /// RPTracker.
  void initLiveThru(const RegPressureTracker &RPTracker);

  /// Copy an existing live thru pressure result.
  void initLiveThru(ArrayRef<unsigned> PressureSet) {
    LiveThruPressure.assign(PressureSet.begin(), PressureSet.end());
  }

  ArrayRef<unsigned> getLiveThru() const { return LiveThruPressure; }

  /// Get the resulting register pressure over the traversed region.
  /// This result is complete if closeRegion() was explicitly invoked.
  RegisterPressure &getPressure() { return P; }
  const RegisterPressure &getPressure() const { return P; }

  /// Get the register set pressure at the current position, which may be less
  /// than the pressure across the traversed region.
  const std::vector<unsigned> &getRegSetPressureAtPos() const {
    return CurrSetPressure;
  }

  bool isTopClosed() const;
  bool isBottomClosed() const;

  void closeTop();
  void closeBottom();

  /// Consider the pressure increase caused by traversing this instruction
  /// bottom-up. Find the pressure set with the most change beyond its pressure
  /// limit based on the tracker's current pressure, and record the number of
  /// excess register units of that pressure set introduced by this instruction.
  void getMaxUpwardPressureDelta(const MachineInstr *MI,
                                 PressureDiff *PDiff,
                                 RegPressureDelta &Delta,
                                 ArrayRef<PressureChange> CriticalPSets,
                                 ArrayRef<unsigned> MaxPressureLimit);

  void getUpwardPressureDelta(const MachineInstr *MI,
                              /*const*/ PressureDiff &PDiff,
                              RegPressureDelta &Delta,
                              ArrayRef<PressureChange> CriticalPSets,
                              ArrayRef<unsigned> MaxPressureLimit) const;

  /// Consider the pressure increase caused by traversing this instruction
  /// top-down. Find the pressure set with the most change beyond its pressure
  /// limit based on the tracker's current pressure, and record the number of
  /// excess register units of that pressure set introduced by this instruction.
  void getMaxDownwardPressureDelta(const MachineInstr *MI,
                                   RegPressureDelta &Delta,
                                   ArrayRef<PressureChange> CriticalPSets,
                                   ArrayRef<unsigned> MaxPressureLimit);

  /// Find the pressure set with the most change beyond its pressure limit after
  /// traversing this instruction either upward or downward depending on the
  /// closed end of the current region.
  void getMaxPressureDelta(const MachineInstr *MI,
                           RegPressureDelta &Delta,
                           ArrayRef<PressureChange> CriticalPSets,
                           ArrayRef<unsigned> MaxPressureLimit) {
    if (isTopClosed())
      return getMaxDownwardPressureDelta(MI, Delta, CriticalPSets,
                                         MaxPressureLimit);

    assert(isBottomClosed() && "Uninitialized pressure tracker");
    return getMaxUpwardPressureDelta(MI, nullptr, Delta, CriticalPSets,
                                     MaxPressureLimit);
  }

  /// Get the pressure of each PSet after traversing this instruction bottom-up.
  void getUpwardPressure(const MachineInstr *MI,
                         std::vector<unsigned> &PressureResult,
                         std::vector<unsigned> &MaxPressureResult);

  /// Get the pressure of each PSet after traversing this instruction top-down.
  void getDownwardPressure(const MachineInstr *MI,
                           std::vector<unsigned> &PressureResult,
                           std::vector<unsigned> &MaxPressureResult);

  void getPressureAfterInst(const MachineInstr *MI,
                            std::vector<unsigned> &PressureResult,
                            std::vector<unsigned> &MaxPressureResult) {
    if (isTopClosed())
      return getUpwardPressure(MI, PressureResult, MaxPressureResult);

    assert(isBottomClosed() && "Uninitialized pressure tracker");
    return getDownwardPressure(MI, PressureResult, MaxPressureResult);
  }

  bool hasUntiedDef(Register VirtReg) const {
    return UntiedDefs.count(VirtReg);
  }

  void dump() const;

  void increaseRegPressure(Register RegUnit, LaneBitmask PreviousMask,
                           LaneBitmask NewMask);
  void decreaseRegPressure(Register RegUnit, LaneBitmask PreviousMask,
                           LaneBitmask NewMask);

protected:
  /// Add Reg to the live out set and increase max pressure.
  void discoverLiveOut(RegisterMaskPair Pair);
  /// Add Reg to the live in set and increase max pressure.
  void discoverLiveIn(RegisterMaskPair Pair);

  /// Get the SlotIndex for the first nondebug instruction including or
  /// after the current position.
  SlotIndex getCurrSlot() const;

  void bumpDeadDefs(ArrayRef<RegisterMaskPair> DeadDefs);

  void bumpUpwardPressure(const MachineInstr *MI);
  void bumpDownwardPressure(const MachineInstr *MI);

  void discoverLiveInOrOut(RegisterMaskPair Pair,
                           SmallVectorImpl<RegisterMaskPair> &LiveInOrOut);

  LaneBitmask getLastUsedLanes(Register RegUnit, SlotIndex Pos) const;
  LaneBitmask getLiveLanesAt(Register RegUnit, SlotIndex Pos) const;
  LaneBitmask getLiveThroughAt(Register RegUnit, SlotIndex Pos) const;
};

void dumpRegSetPressure(ArrayRef<unsigned> SetPressure,
                        const TargetRegisterInfo *TRI);

} // end namespace llvm

#endif // LLVM_CODEGEN_REGISTERPRESSURE_H
PKiwFZ���R}�}�CodeGen/MachineFunction.hnu�[���//===- llvm/CodeGen/MachineFunction.h ---------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Collect native machine code for a function.  This class contains a list of
// MachineBasicBlock instances that make up the current compiled function.
//
// This class also contains pointers to various classes which hold
// target-specific information about the generated code.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_MACHINEFUNCTION_H
#define LLVM_CODEGEN_MACHINEFUNCTION_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/GraphTraits.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/ilist.h"
#include "llvm/ADT/iterator.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/IR/EHPersonalities.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/ArrayRecycler.h"
#include "llvm/Support/AtomicOrdering.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Recycler.h"
#include "llvm/Target/TargetOptions.h"
#include <cassert>
#include <cstdint>
#include <memory>
#include <utility>
#include <variant>
#include <vector>

namespace llvm {

class BasicBlock;
class BlockAddress;
class DataLayout;
class DebugLoc;
struct DenormalMode;
class DIExpression;
class DILocalVariable;
class DILocation;
class Function;
class GISelChangeObserver;
class GlobalValue;
class LLVMTargetMachine;
class MachineConstantPool;
class MachineFrameInfo;
class MachineFunction;
class MachineJumpTableInfo;
class MachineModuleInfo;
class MachineRegisterInfo;
class MCContext;
class MCInstrDesc;
class MCSymbol;
class MCSection;
class Pass;
class PseudoSourceValueManager;
class raw_ostream;
class SlotIndexes;
class StringRef;
class TargetRegisterClass;
class TargetSubtargetInfo;
struct WasmEHFuncInfo;
struct WinEHFuncInfo;

template <> struct ilist_alloc_traits<MachineBasicBlock> {
  void deleteNode(MachineBasicBlock *MBB);
};

template <> struct ilist_callback_traits<MachineBasicBlock> {
  void addNodeToList(MachineBasicBlock* N);
  void removeNodeFromList(MachineBasicBlock* N);

  template <class Iterator>
  void transferNodesFromList(ilist_callback_traits &OldList, Iterator, Iterator) {
    assert(this == &OldList && "never transfer MBBs between functions");
  }
};

/// MachineFunctionInfo - This class can be derived from and used by targets to
/// hold private target-specific information for each MachineFunction.  Objects
/// of type are accessed/created with MF::getInfo and destroyed when the
/// MachineFunction is destroyed.
struct MachineFunctionInfo {
  virtual ~MachineFunctionInfo();

  /// Factory function: default behavior is to call new using the
  /// supplied allocator.
  ///
  /// This function can be overridden in a derive class.
  template <typename FuncInfoTy, typename SubtargetTy = TargetSubtargetInfo>
  static FuncInfoTy *create(BumpPtrAllocator &Allocator, const Function &F,
                            const SubtargetTy *STI) {
    return new (Allocator.Allocate<FuncInfoTy>()) FuncInfoTy(F, STI);
  }

  template <typename Ty>
  static Ty *create(BumpPtrAllocator &Allocator, const Ty &MFI) {
    return new (Allocator.Allocate<Ty>()) Ty(MFI);
  }

  /// Make a functionally equivalent copy of this MachineFunctionInfo in \p MF.
  /// This requires remapping MachineBasicBlock references from the original
  /// parent to values in the new function. Targets may assume that virtual
  /// register and frame index values are preserved in the new function.
  virtual MachineFunctionInfo *
  clone(BumpPtrAllocator &Allocator, MachineFunction &DestMF,
        const DenseMap<MachineBasicBlock *, MachineBasicBlock *> &Src2DstMBB)
      const {
    return nullptr;
  }
};

/// Properties which a MachineFunction may have at a given point in time.
/// Each of these has checking code in the MachineVerifier, and passes can
/// require that a property be set.
class MachineFunctionProperties {
  // Possible TODO: Allow targets to extend this (perhaps by allowing the
  // constructor to specify the size of the bit vector)
  // Possible TODO: Allow requiring the negative (e.g. VRegsAllocated could be
  // stated as the negative of "has vregs"

public:
  // The properties are stated in "positive" form; i.e. a pass could require
  // that the property hold, but not that it does not hold.

  // Property descriptions:
  // IsSSA: True when the machine function is in SSA form and virtual registers
  //  have a single def.
  // NoPHIs: The machine function does not contain any PHI instruction.
  // TracksLiveness: True when tracking register liveness accurately.
  //  While this property is set, register liveness information in basic block
  //  live-in lists and machine instruction operands (e.g. implicit defs) is
  //  accurate, kill flags are conservatively accurate (kill flag correctly
  //  indicates the last use of a register, an operand without kill flag may or
  //  may not be the last use of a register). This means it can be used to
  //  change the code in ways that affect the values in registers, for example
  //  by the register scavenger.
  //  When this property is cleared at a very late time, liveness is no longer
  //  reliable.
  // NoVRegs: The machine function does not use any virtual registers.
  // Legalized: In GlobalISel: the MachineLegalizer ran and all pre-isel generic
  //  instructions have been legalized; i.e., all instructions are now one of:
  //   - generic and always legal (e.g., COPY)
  //   - target-specific
  //   - legal pre-isel generic instructions.
  // RegBankSelected: In GlobalISel: the RegBankSelect pass ran and all generic
  //  virtual registers have been assigned to a register bank.
  // Selected: In GlobalISel: the InstructionSelect pass ran and all pre-isel
  //  generic instructions have been eliminated; i.e., all instructions are now
  //  target-specific or non-pre-isel generic instructions (e.g., COPY).
  //  Since only pre-isel generic instructions can have generic virtual register
  //  operands, this also means that all generic virtual registers have been
  //  constrained to virtual registers (assigned to register classes) and that
  //  all sizes attached to them have been eliminated.
  // TiedOpsRewritten: The twoaddressinstruction pass will set this flag, it
  //  means that tied-def have been rewritten to meet the RegConstraint.
  // FailsVerification: Means that the function is not expected to pass machine
  //  verification. This can be set by passes that introduce known problems that
  //  have not been fixed yet.
  // TracksDebugUserValues: Without this property enabled, debug instructions
  // such as DBG_VALUE are allowed to reference virtual registers even if those
  // registers do not have a definition. With the property enabled virtual
  // registers must only be used if they have a definition. This property
  // allows earlier passes in the pipeline to skip updates of `DBG_VALUE`
  // instructions to save compile time.
  enum class Property : unsigned {
    IsSSA,
    NoPHIs,
    TracksLiveness,
    NoVRegs,
    FailedISel,
    Legalized,
    RegBankSelected,
    Selected,
    TiedOpsRewritten,
    FailsVerification,
    TracksDebugUserValues,
    LastProperty = TracksDebugUserValues,
  };

  bool hasProperty(Property P) const {
    return Properties[static_cast<unsigned>(P)];
  }

  MachineFunctionProperties &set(Property P) {
    Properties.set(static_cast<unsigned>(P));
    return *this;
  }

  MachineFunctionProperties &reset(Property P) {
    Properties.reset(static_cast<unsigned>(P));
    return *this;
  }

  /// Reset all the properties.
  MachineFunctionProperties &reset() {
    Properties.reset();
    return *this;
  }

  MachineFunctionProperties &set(const MachineFunctionProperties &MFP) {
    Properties |= MFP.Properties;
    return *this;
  }

  MachineFunctionProperties &reset(const MachineFunctionProperties &MFP) {
    Properties.reset(MFP.Properties);
    return *this;
  }

  // Returns true if all properties set in V (i.e. required by a pass) are set
  // in this.
  bool verifyRequiredProperties(const MachineFunctionProperties &V) const {
    return !V.Properties.test(Properties);
  }

  /// Print the MachineFunctionProperties in human-readable form.
  void print(raw_ostream &OS) const;

private:
  BitVector Properties =
      BitVector(static_cast<unsigned>(Property::LastProperty)+1);
};

struct SEHHandler {
  /// Filter or finally function. Null indicates a catch-all.
  const Function *FilterOrFinally;

  /// Address of block to recover at. Null for a finally handler.
  const BlockAddress *RecoverBA;
};

/// This structure is used to retain landing pad info for the current function.
struct LandingPadInfo {
  MachineBasicBlock *LandingPadBlock;      // Landing pad block.
  SmallVector<MCSymbol *, 1> BeginLabels;  // Labels prior to invoke.
  SmallVector<MCSymbol *, 1> EndLabels;    // Labels after invoke.
  SmallVector<SEHHandler, 1> SEHHandlers;  // SEH handlers active at this lpad.
  MCSymbol *LandingPadLabel = nullptr;     // Label at beginning of landing pad.
  std::vector<int> TypeIds;                // List of type ids (filters negative).

  explicit LandingPadInfo(MachineBasicBlock *MBB)
      : LandingPadBlock(MBB) {}
};

class LLVM_EXTERNAL_VISIBILITY MachineFunction {
  Function &F;
  const LLVMTargetMachine &Target;
  const TargetSubtargetInfo *STI;
  MCContext &Ctx;
  MachineModuleInfo &MMI;

  // RegInfo - Information about each register in use in the function.
  MachineRegisterInfo *RegInfo;

  // Used to keep track of target-specific per-machine function information for
  // the target implementation.
  MachineFunctionInfo *MFInfo;

  // Keep track of objects allocated on the stack.
  MachineFrameInfo *FrameInfo;

  // Keep track of constants which are spilled to memory
  MachineConstantPool *ConstantPool;

  // Keep track of jump tables for switch instructions
  MachineJumpTableInfo *JumpTableInfo;

  // Keep track of the function section.
  MCSection *Section = nullptr;

  // Catchpad unwind destination info for wasm EH.
  // Keeps track of Wasm exception handling related data. This will be null for
  // functions that aren't using a wasm EH personality.
  WasmEHFuncInfo *WasmEHInfo = nullptr;

  // Keeps track of Windows exception handling related data. This will be null
  // for functions that aren't using a funclet-based EH personality.
  WinEHFuncInfo *WinEHInfo = nullptr;

  // Function-level unique numbering for MachineBasicBlocks.  When a
  // MachineBasicBlock is inserted into a MachineFunction is it automatically
  // numbered and this vector keeps track of the mapping from ID's to MBB's.
  std::vector<MachineBasicBlock*> MBBNumbering;

  // Pool-allocate MachineFunction-lifetime and IR objects.
  BumpPtrAllocator Allocator;

  // Allocation management for instructions in function.
  Recycler<MachineInstr> InstructionRecycler;

  // Allocation management for operand arrays on instructions.
  ArrayRecycler<MachineOperand> OperandRecycler;

  // Allocation management for basic blocks in function.
  Recycler<MachineBasicBlock> BasicBlockRecycler;

  // List of machine basic blocks in function
  using BasicBlockListType = ilist<MachineBasicBlock>;
  BasicBlockListType BasicBlocks;

  /// FunctionNumber - This provides a unique ID for each function emitted in
  /// this translation unit.
  ///
  unsigned FunctionNumber;

  /// Alignment - The alignment of the function.
  Align Alignment;

  /// ExposesReturnsTwice - True if the function calls setjmp or related
  /// functions with attribute "returns twice", but doesn't have
  /// the attribute itself.
  /// This is used to limit optimizations which cannot reason
  /// about the control flow of such functions.
  bool ExposesReturnsTwice = false;

  /// True if the function includes any inline assembly.
  bool HasInlineAsm = false;

  /// True if any WinCFI instruction have been emitted in this function.
  bool HasWinCFI = false;

  /// Current high-level properties of the IR of the function (e.g. is in SSA
  /// form or whether registers have been allocated)
  MachineFunctionProperties Properties;

  // Allocation management for pseudo source values.
  std::unique_ptr<PseudoSourceValueManager> PSVManager;

  /// List of moves done by a function's prolog.  Used to construct frame maps
  /// by debug and exception handling consumers.
  std::vector<MCCFIInstruction> FrameInstructions;

  /// List of basic blocks immediately following calls to _setjmp. Used to
  /// construct a table of valid longjmp targets for Windows Control Flow Guard.
  std::vector<MCSymbol *> LongjmpTargets;

  /// List of basic blocks that are the target of catchrets. Used to construct
  /// a table of valid targets for Windows EHCont Guard.
  std::vector<MCSymbol *> CatchretTargets;

  /// \name Exception Handling
  /// \{

  /// List of LandingPadInfo describing the landing pad information.
  std::vector<LandingPadInfo> LandingPads;

  /// Map a landing pad's EH symbol to the call site indexes.
  DenseMap<MCSymbol*, SmallVector<unsigned, 4>> LPadToCallSiteMap;

  /// Map a landing pad to its index.
  DenseMap<const MachineBasicBlock *, unsigned> WasmLPadToIndexMap;

  /// Map of invoke call site index values to associated begin EH_LABEL.
  DenseMap<MCSymbol*, unsigned> CallSiteMap;

  /// CodeView label annotations.
  std::vector<std::pair<MCSymbol *, MDNode *>> CodeViewAnnotations;

  bool CallsEHReturn = false;
  bool CallsUnwindInit = false;
  bool HasEHCatchret = false;
  bool HasEHScopes = false;
  bool HasEHFunclets = false;
  bool IsOutlined = false;

  /// BBID to assign to the next basic block of this function.
  unsigned NextBBID = 0;

  /// Section Type for basic blocks, only relevant with basic block sections.
  BasicBlockSection BBSectionsType = BasicBlockSection::None;

  /// List of C++ TypeInfo used.
  std::vector<const GlobalValue *> TypeInfos;

  /// List of typeids encoding filters used.
  std::vector<unsigned> FilterIds;

  /// List of the indices in FilterIds corresponding to filter terminators.
  std::vector<unsigned> FilterEnds;

  EHPersonality PersonalityTypeCache = EHPersonality::Unknown;

  /// \}

  /// Clear all the members of this MachineFunction, but the ones used
  /// to initialize again the MachineFunction.
  /// More specifically, this deallocates all the dynamically allocated
  /// objects and get rid of all the XXXInfo data structure, but keep
  /// unchanged the references to Fn, Target, MMI, and FunctionNumber.
  void clear();
  /// Allocate and initialize the different members.
  /// In particular, the XXXInfo data structure.
  /// \pre Fn, Target, MMI, and FunctionNumber are properly set.
  void init();

public:
  /// Description of the location of a variable whose Address is valid and
  /// unchanging during function execution. The Address may be:
  /// * A stack index, which can be negative for fixed stack objects.
  /// * A MCRegister, whose entry value contains the address of the variable.
  class VariableDbgInfo {
    std::variant<int, MCRegister> Address;

  public:
    const DILocalVariable *Var;
    const DIExpression *Expr;
    const DILocation *Loc;

    VariableDbgInfo(const DILocalVariable *Var, const DIExpression *Expr,
                    int Slot, const DILocation *Loc)
        : Address(Slot), Var(Var), Expr(Expr), Loc(Loc) {}

    VariableDbgInfo(const DILocalVariable *Var, const DIExpression *Expr,
                    MCRegister EntryValReg, const DILocation *Loc)
        : Address(EntryValReg), Var(Var), Expr(Expr), Loc(Loc) {}

    /// Return true if this variable is in a stack slot.
    bool inStackSlot() const { return std::holds_alternative<int>(Address); }

    /// Return true if this variable is in the entry value of a register.
    bool inEntryValueRegister() const {
      return std::holds_alternative<MCRegister>(Address);
    }

    /// Returns the stack slot of this variable, assuming `inStackSlot()` is
    /// true.
    int getStackSlot() const { return std::get<int>(Address); }

    /// Returns the MCRegister of this variable, assuming
    /// `inEntryValueRegister()` is true.
    MCRegister getEntryValueRegister() const {
      return std::get<MCRegister>(Address);
    }

    /// Updates the stack slot of this variable, assuming `inStackSlot()` is
    /// true.
    void updateStackSlot(int NewSlot) {
      assert(inStackSlot());
      Address = NewSlot;
    }
  };

  class Delegate {
    virtual void anchor();

  public:
    virtual ~Delegate() = default;
    /// Callback after an insertion. This should not modify the MI directly.
    virtual void MF_HandleInsertion(MachineInstr &MI) = 0;
    /// Callback before a removal. This should not modify the MI directly.
    virtual void MF_HandleRemoval(MachineInstr &MI) = 0;
  };

  /// Structure used to represent pair of argument number after call lowering
  /// and register used to transfer that argument.
  /// For now we support only cases when argument is transferred through one
  /// register.
  struct ArgRegPair {
    Register Reg;
    uint16_t ArgNo;
    ArgRegPair(Register R, unsigned Arg) : Reg(R), ArgNo(Arg) {
      assert(Arg < (1 << 16) && "Arg out of range");
    }
  };
  /// Vector of call argument and its forwarding register.
  using CallSiteInfo = SmallVector<ArgRegPair, 1>;
  using CallSiteInfoImpl = SmallVectorImpl<ArgRegPair>;

private:
  Delegate *TheDelegate = nullptr;
  GISelChangeObserver *Observer = nullptr;

  using CallSiteInfoMap = DenseMap<const MachineInstr *, CallSiteInfo>;
  /// Map a call instruction to call site arguments forwarding info.
  CallSiteInfoMap CallSitesInfo;

  /// A helper function that returns call site info for a give call
  /// instruction if debug entry value support is enabled.
  CallSiteInfoMap::iterator getCallSiteInfo(const MachineInstr *MI);

  // Callbacks for insertion and removal.
  void handleInsertion(MachineInstr &MI);
  void handleRemoval(MachineInstr &MI);
  friend struct ilist_traits<MachineInstr>;

public:
  using VariableDbgInfoMapTy = SmallVector<VariableDbgInfo, 4>;
  VariableDbgInfoMapTy VariableDbgInfos;

  /// A count of how many instructions in the function have had numbers
  /// assigned to them. Used for debug value tracking, to determine the
  /// next instruction number.
  unsigned DebugInstrNumberingCount = 0;

  /// Set value of DebugInstrNumberingCount field. Avoid using this unless
  /// you're deserializing this data.
  void setDebugInstrNumberingCount(unsigned Num);

  /// Pair of instruction number and operand number.
  using DebugInstrOperandPair = std::pair<unsigned, unsigned>;

  /// Replacement definition for a debug instruction reference. Made up of a
  /// source instruction / operand pair, destination pair, and a qualifying
  /// subregister indicating what bits in the operand make up the substitution.
  // For example, a debug user
  /// of %1:
  ///    %0:gr32 = someinst, debug-instr-number 1
  ///    %1:gr16 = %0.some_16_bit_subreg, debug-instr-number 2
  /// Would receive the substitution {{2, 0}, {1, 0}, $subreg}, where $subreg is
  /// the subregister number for some_16_bit_subreg.
  class DebugSubstitution {
  public:
    DebugInstrOperandPair Src;  ///< Source instruction / operand pair.
    DebugInstrOperandPair Dest; ///< Replacement instruction / operand pair.
    unsigned Subreg;            ///< Qualifier for which part of Dest is read.

    DebugSubstitution(const DebugInstrOperandPair &Src,
                      const DebugInstrOperandPair &Dest, unsigned Subreg)
        : Src(Src), Dest(Dest), Subreg(Subreg) {}

    /// Order only by source instruction / operand pair: there should never
    /// be duplicate entries for the same source in any collection.
    bool operator<(const DebugSubstitution &Other) const {
      return Src < Other.Src;
    }
  };

  /// Debug value substitutions: a collection of DebugSubstitution objects,
  /// recording changes in where a value is defined. For example, when one
  /// instruction is substituted for another. Keeping a record allows recovery
  /// of variable locations after compilation finishes.
  SmallVector<DebugSubstitution, 8> DebugValueSubstitutions;

  /// Location of a PHI instruction that is also a debug-info variable value,
  /// for the duration of register allocation. Loaded by the PHI-elimination
  /// pass, and emitted as DBG_PHI instructions during VirtRegRewriter, with
  /// maintenance applied by intermediate passes that edit registers (such as
  /// coalescing and the allocator passes).
  class DebugPHIRegallocPos {
  public:
    MachineBasicBlock *MBB; ///< Block where this PHI was originally located.
    Register Reg;           ///< VReg where the control-flow-merge happens.
    unsigned SubReg;        ///< Optional subreg qualifier within Reg.
    DebugPHIRegallocPos(MachineBasicBlock *MBB, Register Reg, unsigned SubReg)
        : MBB(MBB), Reg(Reg), SubReg(SubReg) {}
  };

  /// Map of debug instruction numbers to the position of their PHI instructions
  /// during register allocation. See DebugPHIRegallocPos.
  DenseMap<unsigned, DebugPHIRegallocPos> DebugPHIPositions;

  /// Flag for whether this function contains DBG_VALUEs (false) or
  /// DBG_INSTR_REF (true).
  bool UseDebugInstrRef = false;

  /// Create a substitution between one <instr,operand> value to a different,
  /// new value.
  void makeDebugValueSubstitution(DebugInstrOperandPair, DebugInstrOperandPair,
                                  unsigned SubReg = 0);

  /// Create substitutions for any tracked values in \p Old, to point at
  /// \p New. Needed when we re-create an instruction during optimization,
  /// which has the same signature (i.e., def operands in the same place) but
  /// a modified instruction type, flags, or otherwise. An example: X86 moves
  /// are sometimes transformed into equivalent LEAs.
  /// If the two instructions are not the same opcode, limit which operands to
  /// examine for substitutions to the first N operands by setting
  /// \p MaxOperand.
  void substituteDebugValuesForInst(const MachineInstr &Old, MachineInstr &New,
                                    unsigned MaxOperand = UINT_MAX);

  /// Find the underlying  defining instruction / operand for a COPY instruction
  /// while in SSA form. Copies do not actually define values -- they move them
  /// between registers. Labelling a COPY-like instruction with an instruction
  /// number is to be avoided as it makes value numbers non-unique later in
  /// compilation. This method follows the definition chain for any sequence of
  /// COPY-like instructions to find whatever non-COPY-like instruction defines
  /// the copied value; or for parameters, creates a DBG_PHI on entry.
  /// May insert instructions into the entry block!
  /// \p MI The copy-like instruction to salvage.
  /// \p DbgPHICache A container to cache already-solved COPYs.
  /// \returns An instruction/operand pair identifying the defining value.
  DebugInstrOperandPair
  salvageCopySSA(MachineInstr &MI,
                 DenseMap<Register, DebugInstrOperandPair> &DbgPHICache);

  DebugInstrOperandPair salvageCopySSAImpl(MachineInstr &MI);

  /// Finalise any partially emitted debug instructions. These are DBG_INSTR_REF
  /// instructions where we only knew the vreg of the value they use, not the
  /// instruction that defines that vreg. Once isel finishes, we should have
  /// enough information for every DBG_INSTR_REF to point at an instruction
  /// (or DBG_PHI).
  void finalizeDebugInstrRefs();

  /// Determine whether, in the current machine configuration, we should use
  /// instruction referencing or not.
  bool shouldUseDebugInstrRef() const;

  /// Returns true if the function's variable locations are tracked with
  /// instruction referencing.
  bool useDebugInstrRef() const;

  /// Set whether this function will use instruction referencing or not.
  void setUseDebugInstrRef(bool UseInstrRef);

  /// A reserved operand number representing the instructions memory operand,
  /// for instructions that have a stack spill fused into them.
  const static unsigned int DebugOperandMemNumber;

  MachineFunction(Function &F, const LLVMTargetMachine &Target,
                  const TargetSubtargetInfo &STI, unsigned FunctionNum,
                  MachineModuleInfo &MMI);
  MachineFunction(const MachineFunction &) = delete;
  MachineFunction &operator=(const MachineFunction &) = delete;
  ~MachineFunction();

  /// Reset the instance as if it was just created.
  void reset() {
    clear();
    init();
  }

  /// Reset the currently registered delegate - otherwise assert.
  void resetDelegate(Delegate *delegate) {
    assert(TheDelegate == delegate &&
           "Only the current delegate can perform reset!");
    TheDelegate = nullptr;
  }

  /// Set the delegate. resetDelegate must be called before attempting
  /// to set.
  void setDelegate(Delegate *delegate) {
    assert(delegate && !TheDelegate &&
           "Attempted to set delegate to null, or to change it without "
           "first resetting it!");

    TheDelegate = delegate;
  }

  void setObserver(GISelChangeObserver *O) { Observer = O; }

  GISelChangeObserver *getObserver() const { return Observer; }

  MachineModuleInfo &getMMI() const { return MMI; }
  MCContext &getContext() const { return Ctx; }

  /// Returns the Section this function belongs to.
  MCSection *getSection() const { return Section; }

  /// Indicates the Section this function belongs to.
  void setSection(MCSection *S) { Section = S; }

  PseudoSourceValueManager &getPSVManager() const { return *PSVManager; }

  /// Return the DataLayout attached to the Module associated to this MF.
  const DataLayout &getDataLayout() const;

  /// Return the LLVM function that this machine code represents
  Function &getFunction() { return F; }

  /// Return the LLVM function that this machine code represents
  const Function &getFunction() const { return F; }

  /// getName - Return the name of the corresponding LLVM function.
  StringRef getName() const;

  /// getFunctionNumber - Return a unique ID for the current function.
  unsigned getFunctionNumber() const { return FunctionNumber; }

  /// Returns true if this function has basic block sections enabled.
  bool hasBBSections() const {
    return (BBSectionsType == BasicBlockSection::All ||
            BBSectionsType == BasicBlockSection::List ||
            BBSectionsType == BasicBlockSection::Preset);
  }

  /// Returns true if basic block labels are to be generated for this function.
  bool hasBBLabels() const {
    return BBSectionsType == BasicBlockSection::Labels;
  }

  void setBBSectionsType(BasicBlockSection V) { BBSectionsType = V; }

  /// Assign IsBeginSection IsEndSection fields for basic blocks in this
  /// function.
  void assignBeginEndSections();

  /// getTarget - Return the target machine this machine code is compiled with
  const LLVMTargetMachine &getTarget() const { return Target; }

  /// getSubtarget - Return the subtarget for which this machine code is being
  /// compiled.
  const TargetSubtargetInfo &getSubtarget() const { return *STI; }

  /// getSubtarget - This method returns a pointer to the specified type of
  /// TargetSubtargetInfo.  In debug builds, it verifies that the object being
  /// returned is of the correct type.
  template<typename STC> const STC &getSubtarget() const {
    return *static_cast<const STC *>(STI);
  }

  /// getRegInfo - Return information about the registers currently in use.
  MachineRegisterInfo &getRegInfo() { return *RegInfo; }
  const MachineRegisterInfo &getRegInfo() const { return *RegInfo; }

  /// getFrameInfo - Return the frame info object for the current function.
  /// This object contains information about objects allocated on the stack
  /// frame of the current function in an abstract way.
  MachineFrameInfo &getFrameInfo() { return *FrameInfo; }
  const MachineFrameInfo &getFrameInfo() const { return *FrameInfo; }

  /// getJumpTableInfo - Return the jump table info object for the current
  /// function.  This object contains information about jump tables in the
  /// current function.  If the current function has no jump tables, this will
  /// return null.
  const MachineJumpTableInfo *getJumpTableInfo() const { return JumpTableInfo; }
  MachineJumpTableInfo *getJumpTableInfo() { return JumpTableInfo; }

  /// getOrCreateJumpTableInfo - Get the JumpTableInfo for this function, if it
  /// does already exist, allocate one.
  MachineJumpTableInfo *getOrCreateJumpTableInfo(unsigned JTEntryKind);

  /// getConstantPool - Return the constant pool object for the current
  /// function.
  MachineConstantPool *getConstantPool() { return ConstantPool; }
  const MachineConstantPool *getConstantPool() const { return ConstantPool; }

  /// getWasmEHFuncInfo - Return information about how the current function uses
  /// Wasm exception handling. Returns null for functions that don't use wasm
  /// exception handling.
  const WasmEHFuncInfo *getWasmEHFuncInfo() const { return WasmEHInfo; }
  WasmEHFuncInfo *getWasmEHFuncInfo() { return WasmEHInfo; }

  /// getWinEHFuncInfo - Return information about how the current function uses
  /// Windows exception handling. Returns null for functions that don't use
  /// funclets for exception handling.
  const WinEHFuncInfo *getWinEHFuncInfo() const { return WinEHInfo; }
  WinEHFuncInfo *getWinEHFuncInfo() { return WinEHInfo; }

  /// getAlignment - Return the alignment of the function.
  Align getAlignment() const { return Alignment; }

  /// setAlignment - Set the alignment of the function.
  void setAlignment(Align A) { Alignment = A; }

  /// ensureAlignment - Make sure the function is at least A bytes aligned.
  void ensureAlignment(Align A) {
    if (Alignment < A)
      Alignment = A;
  }

  /// exposesReturnsTwice - Returns true if the function calls setjmp or
  /// any other similar functions with attribute "returns twice" without
  /// having the attribute itself.
  bool exposesReturnsTwice() const {
    return ExposesReturnsTwice;
  }

  /// setCallsSetJmp - Set a flag that indicates if there's a call to
  /// a "returns twice" function.
  void setExposesReturnsTwice(bool B) {
    ExposesReturnsTwice = B;
  }

  /// Returns true if the function contains any inline assembly.
  bool hasInlineAsm() const {
    return HasInlineAsm;
  }

  /// Set a flag that indicates that the function contains inline assembly.
  void setHasInlineAsm(bool B) {
    HasInlineAsm = B;
  }

  bool hasWinCFI() const {
    return HasWinCFI;
  }
  void setHasWinCFI(bool v) { HasWinCFI = v; }

  /// True if this function needs frame moves for debug or exceptions.
  bool needsFrameMoves() const;

  /// Get the function properties
  const MachineFunctionProperties &getProperties() const { return Properties; }
  MachineFunctionProperties &getProperties() { return Properties; }

  /// getInfo - Keep track of various per-function pieces of information for
  /// backends that would like to do so.
  ///
  template<typename Ty>
  Ty *getInfo() {
    return static_cast<Ty*>(MFInfo);
  }

  template<typename Ty>
  const Ty *getInfo() const {
    return static_cast<const Ty *>(MFInfo);
  }

  template <typename Ty> Ty *cloneInfo(const Ty &Old) {
    assert(!MFInfo);
    MFInfo = Ty::template create<Ty>(Allocator, Old);
    return static_cast<Ty *>(MFInfo);
  }

  /// Initialize the target specific MachineFunctionInfo
  void initTargetMachineFunctionInfo(const TargetSubtargetInfo &STI);

  MachineFunctionInfo *cloneInfoFrom(
      const MachineFunction &OrigMF,
      const DenseMap<MachineBasicBlock *, MachineBasicBlock *> &Src2DstMBB) {
    assert(!MFInfo && "new function already has MachineFunctionInfo");
    if (!OrigMF.MFInfo)
      return nullptr;
    return OrigMF.MFInfo->clone(Allocator, *this, Src2DstMBB);
  }

  /// Returns the denormal handling type for the default rounding mode of the
  /// function.
  DenormalMode getDenormalMode(const fltSemantics &FPType) const;

  /// getBlockNumbered - MachineBasicBlocks are automatically numbered when they
  /// are inserted into the machine function.  The block number for a machine
  /// basic block can be found by using the MBB::getNumber method, this method
  /// provides the inverse mapping.
  MachineBasicBlock *getBlockNumbered(unsigned N) const {
    assert(N < MBBNumbering.size() && "Illegal block number");
    assert(MBBNumbering[N] && "Block was removed from the machine function!");
    return MBBNumbering[N];
  }

  /// Should we be emitting segmented stack stuff for the function
  bool shouldSplitStack() const;

  /// getNumBlockIDs - Return the number of MBB ID's allocated.
  unsigned getNumBlockIDs() const { return (unsigned)MBBNumbering.size(); }

  /// RenumberBlocks - This discards all of the MachineBasicBlock numbers and
  /// recomputes them.  This guarantees that the MBB numbers are sequential,
  /// dense, and match the ordering of the blocks within the function.  If a
  /// specific MachineBasicBlock is specified, only that block and those after
  /// it are renumbered.
  void RenumberBlocks(MachineBasicBlock *MBBFrom = nullptr);

  /// print - Print out the MachineFunction in a format suitable for debugging
  /// to the specified stream.
  void print(raw_ostream &OS, const SlotIndexes* = nullptr) const;

  /// viewCFG - This function is meant for use from the debugger.  You can just
  /// say 'call F->viewCFG()' and a ghostview window should pop up from the
  /// program, displaying the CFG of the current function with the code for each
  /// basic block inside.  This depends on there being a 'dot' and 'gv' program
  /// in your path.
  void viewCFG() const;

  /// viewCFGOnly - This function is meant for use from the debugger.  It works
  /// just like viewCFG, but it does not include the contents of basic blocks
  /// into the nodes, just the label.  If you are only interested in the CFG
  /// this can make the graph smaller.
  ///
  void viewCFGOnly() const;

  /// dump - Print the current MachineFunction to cerr, useful for debugger use.
  void dump() const;

  /// Run the current MachineFunction through the machine code verifier, useful
  /// for debugger use.
  /// \returns true if no problems were found.
  bool verify(Pass *p = nullptr, const char *Banner = nullptr,
              bool AbortOnError = true) const;

  // Provide accessors for the MachineBasicBlock list...
  using iterator = BasicBlockListType::iterator;
  using const_iterator = BasicBlockListType::const_iterator;
  using const_reverse_iterator = BasicBlockListType::const_reverse_iterator;
  using reverse_iterator = BasicBlockListType::reverse_iterator;

  /// Support for MachineBasicBlock::getNextNode().
  static BasicBlockListType MachineFunction::*
  getSublistAccess(MachineBasicBlock *) {
    return &MachineFunction::BasicBlocks;
  }

  /// addLiveIn - Add the specified physical register as a live-in value and
  /// create a corresponding virtual register for it.
  Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC);

  //===--------------------------------------------------------------------===//
  // BasicBlock accessor functions.
  //
  iterator                 begin()       { return BasicBlocks.begin(); }
  const_iterator           begin() const { return BasicBlocks.begin(); }
  iterator                 end  ()       { return BasicBlocks.end();   }
  const_iterator           end  () const { return BasicBlocks.end();   }

  reverse_iterator        rbegin()       { return BasicBlocks.rbegin(); }
  const_reverse_iterator  rbegin() const { return BasicBlocks.rbegin(); }
  reverse_iterator        rend  ()       { return BasicBlocks.rend();   }
  const_reverse_iterator  rend  () const { return BasicBlocks.rend();   }

  unsigned                  size() const { return (unsigned)BasicBlocks.size();}
  bool                     empty() const { return BasicBlocks.empty(); }
  const MachineBasicBlock &front() const { return BasicBlocks.front(); }
        MachineBasicBlock &front()       { return BasicBlocks.front(); }
  const MachineBasicBlock & back() const { return BasicBlocks.back(); }
        MachineBasicBlock & back()       { return BasicBlocks.back(); }

  void push_back (MachineBasicBlock *MBB) { BasicBlocks.push_back (MBB); }
  void push_front(MachineBasicBlock *MBB) { BasicBlocks.push_front(MBB); }
  void insert(iterator MBBI, MachineBasicBlock *MBB) {
    BasicBlocks.insert(MBBI, MBB);
  }
  void splice(iterator InsertPt, iterator MBBI) {
    BasicBlocks.splice(InsertPt, BasicBlocks, MBBI);
  }
  void splice(iterator InsertPt, MachineBasicBlock *MBB) {
    BasicBlocks.splice(InsertPt, BasicBlocks, MBB);
  }
  void splice(iterator InsertPt, iterator MBBI, iterator MBBE) {
    BasicBlocks.splice(InsertPt, BasicBlocks, MBBI, MBBE);
  }

  void remove(iterator MBBI) { BasicBlocks.remove(MBBI); }
  void remove(MachineBasicBlock *MBBI) { BasicBlocks.remove(MBBI); }
  void erase(iterator MBBI) { BasicBlocks.erase(MBBI); }
  void erase(MachineBasicBlock *MBBI) { BasicBlocks.erase(MBBI); }

  template <typename Comp>
  void sort(Comp comp) {
    BasicBlocks.sort(comp);
  }

  /// Return the number of \p MachineInstrs in this \p MachineFunction.
  unsigned getInstructionCount() const {
    unsigned InstrCount = 0;
    for (const MachineBasicBlock &MBB : BasicBlocks)
      InstrCount += MBB.size();
    return InstrCount;
  }

  //===--------------------------------------------------------------------===//
  // Internal functions used to automatically number MachineBasicBlocks

  /// Adds the MBB to the internal numbering. Returns the unique number
  /// assigned to the MBB.
  unsigned addToMBBNumbering(MachineBasicBlock *MBB) {
    MBBNumbering.push_back(MBB);
    return (unsigned)MBBNumbering.size()-1;
  }

  /// removeFromMBBNumbering - Remove the specific machine basic block from our
  /// tracker, this is only really to be used by the MachineBasicBlock
  /// implementation.
  void removeFromMBBNumbering(unsigned N) {
    assert(N < MBBNumbering.size() && "Illegal basic block #");
    MBBNumbering[N] = nullptr;
  }

  /// CreateMachineInstr - Allocate a new MachineInstr. Use this instead
  /// of `new MachineInstr'.
  MachineInstr *CreateMachineInstr(const MCInstrDesc &MCID, DebugLoc DL,
                                   bool NoImplicit = false);

  /// Create a new MachineInstr which is a copy of \p Orig, identical in all
  /// ways except the instruction has no parent, prev, or next. Bundling flags
  /// are reset.
  ///
  /// Note: Clones a single instruction, not whole instruction bundles.
  /// Does not perform target specific adjustments; consider using
  /// TargetInstrInfo::duplicate() instead.
  MachineInstr *CloneMachineInstr(const MachineInstr *Orig);

  /// Clones instruction or the whole instruction bundle \p Orig and insert
  /// into \p MBB before \p InsertBefore.
  ///
  /// Note: Does not perform target specific adjustments; consider using
  /// TargetInstrInfo::duplicate() intead.
  MachineInstr &
  cloneMachineInstrBundle(MachineBasicBlock &MBB,
                          MachineBasicBlock::iterator InsertBefore,
                          const MachineInstr &Orig);

  /// DeleteMachineInstr - Delete the given MachineInstr.
  void deleteMachineInstr(MachineInstr *MI);

  /// CreateMachineBasicBlock - Allocate a new MachineBasicBlock. Use this
  /// instead of `new MachineBasicBlock'.
  MachineBasicBlock *CreateMachineBasicBlock(const BasicBlock *bb = nullptr);

  /// DeleteMachineBasicBlock - Delete the given MachineBasicBlock.
  void deleteMachineBasicBlock(MachineBasicBlock *MBB);

  /// getMachineMemOperand - Allocate a new MachineMemOperand.
  /// MachineMemOperands are owned by the MachineFunction and need not be
  /// explicitly deallocated.
  MachineMemOperand *getMachineMemOperand(
      MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s,
      Align base_alignment, const AAMDNodes &AAInfo = AAMDNodes(),
      const MDNode *Ranges = nullptr, SyncScope::ID SSID = SyncScope::System,
      AtomicOrdering Ordering = AtomicOrdering::NotAtomic,
      AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic);

  MachineMemOperand *getMachineMemOperand(
      MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy,
      Align base_alignment, const AAMDNodes &AAInfo = AAMDNodes(),
      const MDNode *Ranges = nullptr, SyncScope::ID SSID = SyncScope::System,
      AtomicOrdering Ordering = AtomicOrdering::NotAtomic,
      AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic);

  /// getMachineMemOperand - Allocate a new MachineMemOperand by copying
  /// an existing one, adjusting by an offset and using the given size.
  /// MachineMemOperands are owned by the MachineFunction and need not be
  /// explicitly deallocated.
  MachineMemOperand *getMachineMemOperand(const MachineMemOperand *MMO,
                                          int64_t Offset, LLT Ty);
  MachineMemOperand *getMachineMemOperand(const MachineMemOperand *MMO,
                                          int64_t Offset, uint64_t Size) {
    return getMachineMemOperand(
        MMO, Offset, Size == ~UINT64_C(0) ? LLT() : LLT::scalar(8 * Size));
  }

  /// getMachineMemOperand - Allocate a new MachineMemOperand by copying
  /// an existing one, replacing only the MachinePointerInfo and size.
  /// MachineMemOperands are owned by the MachineFunction and need not be
  /// explicitly deallocated.
  MachineMemOperand *getMachineMemOperand(const MachineMemOperand *MMO,
                                          const MachinePointerInfo &PtrInfo,
                                          uint64_t Size);
  MachineMemOperand *getMachineMemOperand(const MachineMemOperand *MMO,
                                          const MachinePointerInfo &PtrInfo,
                                          LLT Ty);

  /// Allocate a new MachineMemOperand by copying an existing one,
  /// replacing only AliasAnalysis information. MachineMemOperands are owned
  /// by the MachineFunction and need not be explicitly deallocated.
  MachineMemOperand *getMachineMemOperand(const MachineMemOperand *MMO,
                                          const AAMDNodes &AAInfo);

  /// Allocate a new MachineMemOperand by copying an existing one,
  /// replacing the flags. MachineMemOperands are owned
  /// by the MachineFunction and need not be explicitly deallocated.
  MachineMemOperand *getMachineMemOperand(const MachineMemOperand *MMO,
                                          MachineMemOperand::Flags Flags);

  using OperandCapacity = ArrayRecycler<MachineOperand>::Capacity;

  /// Allocate an array of MachineOperands. This is only intended for use by
  /// internal MachineInstr functions.
  MachineOperand *allocateOperandArray(OperandCapacity Cap) {
    return OperandRecycler.allocate(Cap, Allocator);
  }

  /// Dellocate an array of MachineOperands and recycle the memory. This is
  /// only intended for use by internal MachineInstr functions.
  /// Cap must be the same capacity that was used to allocate the array.
  void deallocateOperandArray(OperandCapacity Cap, MachineOperand *Array) {
    OperandRecycler.deallocate(Cap, Array);
  }

  /// Allocate and initialize a register mask with @p NumRegister bits.
  uint32_t *allocateRegMask();

  ArrayRef<int> allocateShuffleMask(ArrayRef<int> Mask);

  /// Allocate and construct an extra info structure for a `MachineInstr`.
  ///
  /// This is allocated on the function's allocator and so lives the life of
  /// the function.
  MachineInstr::ExtraInfo *createMIExtraInfo(
      ArrayRef<MachineMemOperand *> MMOs, MCSymbol *PreInstrSymbol = nullptr,
      MCSymbol *PostInstrSymbol = nullptr, MDNode *HeapAllocMarker = nullptr,
      MDNode *PCSections = nullptr, uint32_t CFIType = 0);

  /// Allocate a string and populate it with the given external symbol name.
  const char *createExternalSymbolName(StringRef Name);

  //===--------------------------------------------------------------------===//
  // Label Manipulation.

  /// getJTISymbol - Return the MCSymbol for the specified non-empty jump table.
  /// If isLinkerPrivate is specified, an 'l' label is returned, otherwise a
  /// normal 'L' label is returned.
  MCSymbol *getJTISymbol(unsigned JTI, MCContext &Ctx,
                         bool isLinkerPrivate = false) const;

  /// getPICBaseSymbol - Return a function-local symbol to represent the PIC
  /// base.
  MCSymbol *getPICBaseSymbol() const;

  /// Returns a reference to a list of cfi instructions in the function's
  /// prologue.  Used to construct frame maps for debug and exception handling
  /// comsumers.
  const std::vector<MCCFIInstruction> &getFrameInstructions() const {
    return FrameInstructions;
  }

  [[nodiscard]] unsigned addFrameInst(const MCCFIInstruction &Inst);

  /// Returns a reference to a list of symbols immediately following calls to
  /// _setjmp in the function. Used to construct the longjmp target table used
  /// by Windows Control Flow Guard.
  const std::vector<MCSymbol *> &getLongjmpTargets() const {
    return LongjmpTargets;
  }

  /// Add the specified symbol to the list of valid longjmp targets for Windows
  /// Control Flow Guard.
  void addLongjmpTarget(MCSymbol *Target) { LongjmpTargets.push_back(Target); }

  /// Returns a reference to a list of symbols that we have catchrets.
  /// Used to construct the catchret target table used by Windows EHCont Guard.
  const std::vector<MCSymbol *> &getCatchretTargets() const {
    return CatchretTargets;
  }

  /// Add the specified symbol to the list of valid catchret targets for Windows
  /// EHCont Guard.
  void addCatchretTarget(MCSymbol *Target) {
    CatchretTargets.push_back(Target);
  }

  /// \name Exception Handling
  /// \{

  bool callsEHReturn() const { return CallsEHReturn; }
  void setCallsEHReturn(bool b) { CallsEHReturn = b; }

  bool callsUnwindInit() const { return CallsUnwindInit; }
  void setCallsUnwindInit(bool b) { CallsUnwindInit = b; }

  bool hasEHCatchret() const { return HasEHCatchret; }
  void setHasEHCatchret(bool V) { HasEHCatchret = V; }

  bool hasEHScopes() const { return HasEHScopes; }
  void setHasEHScopes(bool V) { HasEHScopes = V; }

  bool hasEHFunclets() const { return HasEHFunclets; }
  void setHasEHFunclets(bool V) { HasEHFunclets = V; }

  bool isOutlined() const { return IsOutlined; }
  void setIsOutlined(bool V) { IsOutlined = V; }

  /// Find or create an LandingPadInfo for the specified MachineBasicBlock.
  LandingPadInfo &getOrCreateLandingPadInfo(MachineBasicBlock *LandingPad);

  /// Return a reference to the landing pad info for the current function.
  const std::vector<LandingPadInfo> &getLandingPads() const {
    return LandingPads;
  }

  /// Provide the begin and end labels of an invoke style call and associate it
  /// with a try landing pad block.
  void addInvoke(MachineBasicBlock *LandingPad,
                 MCSymbol *BeginLabel, MCSymbol *EndLabel);

  /// Add a new panding pad, and extract the exception handling information from
  /// the landingpad instruction. Returns the label ID for the landing pad
  /// entry.
  MCSymbol *addLandingPad(MachineBasicBlock *LandingPad);

  /// Return the type id for the specified typeinfo.  This is function wide.
  unsigned getTypeIDFor(const GlobalValue *TI);

  /// Return the id of the filter encoded by TyIds.  This is function wide.
  int getFilterIDFor(ArrayRef<unsigned> TyIds);

  /// Map the landing pad's EH symbol to the call site indexes.
  void setCallSiteLandingPad(MCSymbol *Sym, ArrayRef<unsigned> Sites);

  /// Return if there is any wasm exception handling.
  bool hasAnyWasmLandingPadIndex() const {
    return !WasmLPadToIndexMap.empty();
  }

  /// Map the landing pad to its index. Used for Wasm exception handling.
  void setWasmLandingPadIndex(const MachineBasicBlock *LPad, unsigned Index) {
    WasmLPadToIndexMap[LPad] = Index;
  }

  /// Returns true if the landing pad has an associate index in wasm EH.
  bool hasWasmLandingPadIndex(const MachineBasicBlock *LPad) const {
    return WasmLPadToIndexMap.count(LPad);
  }

  /// Get the index in wasm EH for a given landing pad.
  unsigned getWasmLandingPadIndex(const MachineBasicBlock *LPad) const {
    assert(hasWasmLandingPadIndex(LPad));
    return WasmLPadToIndexMap.lookup(LPad);
  }

  bool hasAnyCallSiteLandingPad() const {
    return !LPadToCallSiteMap.empty();
  }

  /// Get the call site indexes for a landing pad EH symbol.
  SmallVectorImpl<unsigned> &getCallSiteLandingPad(MCSymbol *Sym) {
    assert(hasCallSiteLandingPad(Sym) &&
           "missing call site number for landing pad!");
    return LPadToCallSiteMap[Sym];
  }

  /// Return true if the landing pad Eh symbol has an associated call site.
  bool hasCallSiteLandingPad(MCSymbol *Sym) {
    return !LPadToCallSiteMap[Sym].empty();
  }

  bool hasAnyCallSiteLabel() const {
    return !CallSiteMap.empty();
  }

  /// Map the begin label for a call site.
  void setCallSiteBeginLabel(MCSymbol *BeginLabel, unsigned Site) {
    CallSiteMap[BeginLabel] = Site;
  }

  /// Get the call site number for a begin label.
  unsigned getCallSiteBeginLabel(MCSymbol *BeginLabel) const {
    assert(hasCallSiteBeginLabel(BeginLabel) &&
           "Missing call site number for EH_LABEL!");
    return CallSiteMap.lookup(BeginLabel);
  }

  /// Return true if the begin label has a call site number associated with it.
  bool hasCallSiteBeginLabel(MCSymbol *BeginLabel) const {
    return CallSiteMap.count(BeginLabel);
  }

  /// Record annotations associated with a particular label.
  void addCodeViewAnnotation(MCSymbol *Label, MDNode *MD) {
    CodeViewAnnotations.push_back({Label, MD});
  }

  ArrayRef<std::pair<MCSymbol *, MDNode *>> getCodeViewAnnotations() const {
    return CodeViewAnnotations;
  }

  /// Return a reference to the C++ typeinfo for the current function.
  const std::vector<const GlobalValue *> &getTypeInfos() const {
    return TypeInfos;
  }

  /// Return a reference to the typeids encoding filters used in the current
  /// function.
  const std::vector<unsigned> &getFilterIds() const {
    return FilterIds;
  }

  /// \}

  /// Collect information used to emit debugging information of a variable in a
  /// stack slot.
  void setVariableDbgInfo(const DILocalVariable *Var, const DIExpression *Expr,
                          int Slot, const DILocation *Loc) {
    VariableDbgInfos.emplace_back(Var, Expr, Slot, Loc);
  }

  /// Collect information used to emit debugging information of a variable in
  /// the entry value of a register.
  void setVariableDbgInfo(const DILocalVariable *Var, const DIExpression *Expr,
                          MCRegister Reg, const DILocation *Loc) {
    VariableDbgInfos.emplace_back(Var, Expr, Reg, Loc);
  }

  VariableDbgInfoMapTy &getVariableDbgInfo() { return VariableDbgInfos; }
  const VariableDbgInfoMapTy &getVariableDbgInfo() const {
    return VariableDbgInfos;
  }

  /// Returns the collection of variables for which we have debug info and that
  /// have been assigned a stack slot.
  auto getInStackSlotVariableDbgInfo() {
    return make_filter_range(getVariableDbgInfo(), [](auto &VarInfo) {
      return VarInfo.inStackSlot();
    });
  }

  /// Returns the collection of variables for which we have debug info and that
  /// have been assigned a stack slot.
  auto getInStackSlotVariableDbgInfo() const {
    return make_filter_range(getVariableDbgInfo(), [](const auto &VarInfo) {
      return VarInfo.inStackSlot();
    });
  }

  /// Returns the collection of variables for which we have debug info and that
  /// have been assigned an entry value register.
  auto getEntryValueVariableDbgInfo() const {
    return make_filter_range(getVariableDbgInfo(), [](const auto &VarInfo) {
      return VarInfo.inEntryValueRegister();
    });
  }

  /// Start tracking the arguments passed to the call \p CallI.
  void addCallArgsForwardingRegs(const MachineInstr *CallI,
                                 CallSiteInfoImpl &&CallInfo) {
    assert(CallI->isCandidateForCallSiteEntry());
    bool Inserted =
        CallSitesInfo.try_emplace(CallI, std::move(CallInfo)).second;
    (void)Inserted;
    assert(Inserted && "Call site info not unique");
  }

  const CallSiteInfoMap &getCallSitesInfo() const {
    return CallSitesInfo;
  }

  /// Following functions update call site info. They should be called before
  /// removing, replacing or copying call instruction.

  /// Erase the call site info for \p MI. It is used to remove a call
  /// instruction from the instruction stream.
  void eraseCallSiteInfo(const MachineInstr *MI);
  /// Copy the call site info from \p Old to \ New. Its usage is when we are
  /// making a copy of the instruction that will be inserted at different point
  /// of the instruction stream.
  void copyCallSiteInfo(const MachineInstr *Old,
                        const MachineInstr *New);

  /// Move the call site info from \p Old to \New call site info. This function
  /// is used when we are replacing one call instruction with another one to
  /// the same callee.
  void moveCallSiteInfo(const MachineInstr *Old,
                        const MachineInstr *New);

  unsigned getNewDebugInstrNum() {
    return ++DebugInstrNumberingCount;
  }
};

//===--------------------------------------------------------------------===//
// GraphTraits specializations for function basic block graphs (CFGs)
//===--------------------------------------------------------------------===//

// Provide specializations of GraphTraits to be able to treat a
// machine function as a graph of machine basic blocks... these are
// the same as the machine basic block iterators, except that the root
// node is implicitly the first node of the function.
//
template <> struct GraphTraits<MachineFunction*> :
  public GraphTraits<MachineBasicBlock*> {
  static NodeRef getEntryNode(MachineFunction *F) { return &F->front(); }

  // nodes_iterator/begin/end - Allow iteration over all nodes in the graph
  using nodes_iterator = pointer_iterator<MachineFunction::iterator>;

  static nodes_iterator nodes_begin(MachineFunction *F) {
    return nodes_iterator(F->begin());
  }

  static nodes_iterator nodes_end(MachineFunction *F) {
    return nodes_iterator(F->end());
  }

  static unsigned       size       (MachineFunction *F) { return F->size(); }
};
template <> struct GraphTraits<const MachineFunction*> :
  public GraphTraits<const MachineBasicBlock*> {
  static NodeRef getEntryNode(const MachineFunction *F) { return &F->front(); }

  // nodes_iterator/begin/end - Allow iteration over all nodes in the graph
  using nodes_iterator = pointer_iterator<MachineFunction::const_iterator>;

  static nodes_iterator nodes_begin(const MachineFunction *F) {
    return nodes_iterator(F->begin());
  }

  static nodes_iterator nodes_end  (const MachineFunction *F) {
    return nodes_iterator(F->end());
  }

  static unsigned       size       (const MachineFunction *F)  {
    return F->size();
  }
};

// Provide specializations of GraphTraits to be able to treat a function as a
// graph of basic blocks... and to walk it in inverse order.  Inverse order for
// a function is considered to be when traversing the predecessor edges of a BB
// instead of the successor edges.
//
template <> struct GraphTraits<Inverse<MachineFunction*>> :
  public GraphTraits<Inverse<MachineBasicBlock*>> {
  static NodeRef getEntryNode(Inverse<MachineFunction *> G) {
    return &G.Graph->front();
  }
};
template <> struct GraphTraits<Inverse<const MachineFunction*>> :
  public GraphTraits<Inverse<const MachineBasicBlock*>> {
  static NodeRef getEntryNode(Inverse<const MachineFunction *> G) {
    return &G.Graph->front();
  }
};

class MachineFunctionAnalysisManager;
void verifyMachineFunction(MachineFunctionAnalysisManager *,
                           const std::string &Banner,
                           const MachineFunction &MF);

} // end namespace llvm

#endif // LLVM_CODEGEN_MACHINEFUNCTION_H
PKiwFZœXg�	�	&CodeGen/MachineBranchProbabilityInfo.hnu�[���//=- MachineBranchProbabilityInfo.h - Branch Probability Analysis -*- C++ -*-=//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This pass is used to evaluate branch probabilties on machine basic blocks.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_MACHINEBRANCHPROBABILITYINFO_H
#define LLVM_CODEGEN_MACHINEBRANCHPROBABILITYINFO_H

#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/Pass.h"
#include "llvm/Support/BranchProbability.h"

namespace llvm {

class MachineBranchProbabilityInfo : public ImmutablePass {
  virtual void anchor();

  // Default weight value. Used when we don't have information about the edge.
  // TODO: DEFAULT_WEIGHT makes sense during static predication, when none of
  // the successors have a weight yet. But it doesn't make sense when providing
  // weight to an edge that may have siblings with non-zero weights. This can
  // be handled various ways, but it's probably fine for an edge with unknown
  // weight to just "inherit" the non-zero weight of an adjacent successor.
  static const uint32_t DEFAULT_WEIGHT = 16;

public:
  static char ID;

  MachineBranchProbabilityInfo();

  void getAnalysisUsage(AnalysisUsage &AU) const override {
    AU.setPreservesAll();
  }

  // Return edge probability.
  BranchProbability getEdgeProbability(const MachineBasicBlock *Src,
                                       const MachineBasicBlock *Dst) const;

  // Same as above, but using a const_succ_iterator from Src. This is faster
  // when the iterator is already available.
  BranchProbability
  getEdgeProbability(const MachineBasicBlock *Src,
                     MachineBasicBlock::const_succ_iterator Dst) const;

  // A 'Hot' edge is an edge which probability is >= 80%.
  bool isEdgeHot(const MachineBasicBlock *Src,
                 const MachineBasicBlock *Dst) const;

  // Print value between 0 (0% probability) and 1 (100% probability),
  // however the value is never equal to 0, and can be 1 only iff SRC block
  // has only one successor.
  raw_ostream &printEdgeProbability(raw_ostream &OS,
                                    const MachineBasicBlock *Src,
                                    const MachineBasicBlock *Dst) const;
};

}


#endif
PKiwFZ�[��,,CodeGen/MachineJumpTableInfo.hnu�[���//===-- CodeGen/MachineJumpTableInfo.h - Abstract Jump Tables  --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// The MachineJumpTableInfo class keeps track of jump tables referenced by
// lowered switch instructions in the MachineFunction.
//
// Instructions reference the address of these jump tables through the use of
// MO_JumpTableIndex values.  When emitting assembly or machine code, these
// virtual address references are converted to refer to the address of the
// function jump tables.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_MACHINEJUMPTABLEINFO_H
#define LLVM_CODEGEN_MACHINEJUMPTABLEINFO_H

#include "llvm/Support/Printable.h"
#include <cassert>
#include <vector>

namespace llvm {

class MachineBasicBlock;
class DataLayout;
class raw_ostream;

/// MachineJumpTableEntry - One jump table in the jump table info.
///
struct MachineJumpTableEntry {
  /// MBBs - The vector of basic blocks from which to create the jump table.
  std::vector<MachineBasicBlock*> MBBs;

  explicit MachineJumpTableEntry(const std::vector<MachineBasicBlock*> &M)
  : MBBs(M) {}
};

class MachineJumpTableInfo {
public:
  /// JTEntryKind - This enum indicates how each entry of the jump table is
  /// represented and emitted.
  enum JTEntryKind {
    /// EK_BlockAddress - Each entry is a plain address of block, e.g.:
    ///     .word LBB123
    EK_BlockAddress,

    /// EK_GPRel64BlockAddress - Each entry is an address of block, encoded
    /// with a relocation as gp-relative, e.g.:
    ///     .gpdword LBB123
    EK_GPRel64BlockAddress,

    /// EK_GPRel32BlockAddress - Each entry is an address of block, encoded
    /// with a relocation as gp-relative, e.g.:
    ///     .gprel32 LBB123
    EK_GPRel32BlockAddress,

    /// EK_LabelDifference32 - Each entry is the address of the block minus
    /// the address of the jump table.  This is used for PIC jump tables where
    /// gprel32 is not supported.  e.g.:
    ///      .word LBB123 - LJTI1_2
    /// If the .set directive is supported, this is emitted as:
    ///      .set L4_5_set_123, LBB123 - LJTI1_2
    ///      .word L4_5_set_123
    EK_LabelDifference32,

    /// EK_Inline - Jump table entries are emitted inline at their point of
    /// use. It is the responsibility of the target to emit the entries.
    EK_Inline,

    /// EK_Custom32 - Each entry is a 32-bit value that is custom lowered by the
    /// TargetLowering::LowerCustomJumpTableEntry hook.
    EK_Custom32
  };
private:
  JTEntryKind EntryKind;
  std::vector<MachineJumpTableEntry> JumpTables;
public:
  explicit MachineJumpTableInfo(JTEntryKind Kind): EntryKind(Kind) {}

  JTEntryKind getEntryKind() const { return EntryKind; }

  /// getEntrySize - Return the size of each entry in the jump table.
  unsigned getEntrySize(const DataLayout &TD) const;
  /// getEntryAlignment - Return the alignment of each entry in the jump table.
  unsigned getEntryAlignment(const DataLayout &TD) const;

  /// createJumpTableIndex - Create a new jump table.
  ///
  unsigned createJumpTableIndex(const std::vector<MachineBasicBlock*> &DestBBs);

  /// isEmpty - Return true if there are no jump tables.
  ///
  bool isEmpty() const { return JumpTables.empty(); }

  const std::vector<MachineJumpTableEntry> &getJumpTables() const {
    return JumpTables;
  }

  /// RemoveJumpTable - Mark the specific index as being dead.  This will
  /// prevent it from being emitted.
  void RemoveJumpTable(unsigned Idx) {
    JumpTables[Idx].MBBs.clear();
  }

  /// RemoveMBBFromJumpTables - If MBB is present in any jump tables, remove it.
  bool RemoveMBBFromJumpTables(MachineBasicBlock *MBB);

  /// ReplaceMBBInJumpTables - If Old is the target of any jump tables, update
  /// the jump tables to branch to New instead.
  bool ReplaceMBBInJumpTables(MachineBasicBlock *Old, MachineBasicBlock *New);

  /// ReplaceMBBInJumpTable - If Old is a target of the jump tables, update
  /// the jump table to branch to New instead.
  bool ReplaceMBBInJumpTable(unsigned Idx, MachineBasicBlock *Old,
                             MachineBasicBlock *New);

  /// print - Used by the MachineFunction printer to print information about
  /// jump tables.  Implemented in MachineFunction.cpp
  ///
  void print(raw_ostream &OS) const;

  /// dump - Call to stderr.
  ///
  void dump() const;
};


/// Prints a jump table entry reference.
///
/// The format is:
///   %jump-table.5       - a jump table entry with index == 5.
///
/// Usage: OS << printJumpTableEntryReference(Idx) << '\n';
Printable printJumpTableEntryReference(unsigned Idx);

} // End llvm namespace

#endif
PKiwFZ��=���CodeGen/SelectionDAGNodes.hnu�[���//===- llvm/CodeGen/SelectionDAGNodes.h - SelectionDAG Nodes ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the SDNode class and derived classes, which are used to
// represent the nodes and operations present in a SelectionDAG.  These nodes
// and operations are machine code level operations, with some similarities to
// the GCC RTL representation.
//
// Clients should include the SelectionDAG.h file instead of this file directly.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_SELECTIONDAGNODES_H
#define LLVM_CODEGEN_SELECTIONDAGNODES_H

#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/GraphTraits.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/ilist_node.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/CodeGen/ISDOpcodes.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineValueType.h"
#include "llvm/CodeGen/Register.h"
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Operator.h"
#include "llvm/Support/AlignOf.h"
#include "llvm/Support/AtomicOrdering.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TypeSize.h"
#include <algorithm>
#include <cassert>
#include <climits>
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <iterator>
#include <string>
#include <tuple>
#include <utility>

namespace llvm {

class APInt;
class Constant;
class GlobalValue;
class MachineBasicBlock;
class MachineConstantPoolValue;
class MCSymbol;
class raw_ostream;
class SDNode;
class SelectionDAG;
class Type;
class Value;

void checkForCycles(const SDNode *N, const SelectionDAG *DAG = nullptr,
                    bool force = false);

/// This represents a list of ValueType's that has been intern'd by
/// a SelectionDAG.  Instances of this simple value class are returned by
/// SelectionDAG::getVTList(...).
///
struct SDVTList {
  const EVT *VTs;
  unsigned int NumVTs;
};

namespace ISD {

  /// Node predicates

/// If N is a BUILD_VECTOR or SPLAT_VECTOR node whose elements are all the
/// same constant or undefined, return true and return the constant value in
/// \p SplatValue.
bool isConstantSplatVector(const SDNode *N, APInt &SplatValue);

/// Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where
/// all of the elements are ~0 or undef. If \p BuildVectorOnly is set to
/// true, it only checks BUILD_VECTOR.
bool isConstantSplatVectorAllOnes(const SDNode *N,
                                  bool BuildVectorOnly = false);

/// Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where
/// all of the elements are 0 or undef. If \p BuildVectorOnly is set to true, it
/// only checks BUILD_VECTOR.
bool isConstantSplatVectorAllZeros(const SDNode *N,
                                   bool BuildVectorOnly = false);

/// Return true if the specified node is a BUILD_VECTOR where all of the
/// elements are ~0 or undef.
bool isBuildVectorAllOnes(const SDNode *N);

/// Return true if the specified node is a BUILD_VECTOR where all of the
/// elements are 0 or undef.
bool isBuildVectorAllZeros(const SDNode *N);

/// Return true if the specified node is a BUILD_VECTOR node of all
/// ConstantSDNode or undef.
bool isBuildVectorOfConstantSDNodes(const SDNode *N);

/// Return true if the specified node is a BUILD_VECTOR node of all
/// ConstantFPSDNode or undef.
bool isBuildVectorOfConstantFPSDNodes(const SDNode *N);

/// Returns true if the specified node is a vector where all elements can
/// be truncated to the specified element size without a loss in meaning.
bool isVectorShrinkable(const SDNode *N, unsigned NewEltSize, bool Signed);

/// Return true if the node has at least one operand and all operands of the
/// specified node are ISD::UNDEF.
bool allOperandsUndef(const SDNode *N);

/// Return true if the specified node is FREEZE(UNDEF).
bool isFreezeUndef(const SDNode *N);

} // end namespace ISD

//===----------------------------------------------------------------------===//
/// Unlike LLVM values, Selection DAG nodes may return multiple
/// values as the result of a computation.  Many nodes return multiple values,
/// from loads (which define a token and a return value) to ADDC (which returns
/// a result and a carry value), to calls (which may return an arbitrary number
/// of values).
///
/// As such, each use of a SelectionDAG computation must indicate the node that
/// computes it as well as which return value to use from that node.  This pair
/// of information is represented with the SDValue value type.
///
class SDValue {
  friend struct DenseMapInfo<SDValue>;

  SDNode *Node = nullptr; // The node defining the value we are using.
  unsigned ResNo = 0;     // Which return value of the node we are using.

public:
  SDValue() = default;
  SDValue(SDNode *node, unsigned resno);

  /// get the index which selects a specific result in the SDNode
  unsigned getResNo() const { return ResNo; }

  /// get the SDNode which holds the desired result
  SDNode *getNode() const { return Node; }

  /// set the SDNode
  void setNode(SDNode *N) { Node = N; }

  inline SDNode *operator->() const { return Node; }

  bool operator==(const SDValue &O) const {
    return Node == O.Node && ResNo == O.ResNo;
  }
  bool operator!=(const SDValue &O) const {
    return !operator==(O);
  }
  bool operator<(const SDValue &O) const {
    return std::tie(Node, ResNo) < std::tie(O.Node, O.ResNo);
  }
  explicit operator bool() const {
    return Node != nullptr;
  }

  SDValue getValue(unsigned R) const {
    return SDValue(Node, R);
  }

  /// Return true if this node is an operand of N.
  bool isOperandOf(const SDNode *N) const;

  /// Return the ValueType of the referenced return value.
  inline EVT getValueType() const;

  /// Return the simple ValueType of the referenced return value.
  MVT getSimpleValueType() const {
    return getValueType().getSimpleVT();
  }

  /// Returns the size of the value in bits.
  ///
  /// If the value type is a scalable vector type, the scalable property will
  /// be set and the runtime size will be a positive integer multiple of the
  /// base size.
  TypeSize getValueSizeInBits() const {
    return getValueType().getSizeInBits();
  }

  uint64_t getScalarValueSizeInBits() const {
    return getValueType().getScalarType().getFixedSizeInBits();
  }

  // Forwarding methods - These forward to the corresponding methods in SDNode.
  inline unsigned getOpcode() const;
  inline unsigned getNumOperands() const;
  inline const SDValue &getOperand(unsigned i) const;
  inline uint64_t getConstantOperandVal(unsigned i) const;
  inline const APInt &getConstantOperandAPInt(unsigned i) const;
  inline bool isTargetMemoryOpcode() const;
  inline bool isTargetOpcode() const;
  inline bool isMachineOpcode() const;
  inline bool isUndef() const;
  inline unsigned getMachineOpcode() const;
  inline const DebugLoc &getDebugLoc() const;
  inline void dump() const;
  inline void dump(const SelectionDAG *G) const;
  inline void dumpr() const;
  inline void dumpr(const SelectionDAG *G) const;

  /// Return true if this operand (which must be a chain) reaches the
  /// specified operand without crossing any side-effecting instructions.
  /// In practice, this looks through token factors and non-volatile loads.
  /// In order to remain efficient, this only
  /// looks a couple of nodes in, it does not do an exhaustive search.
  bool reachesChainWithoutSideEffects(SDValue Dest,
                                      unsigned Depth = 2) const;

  /// Return true if there are no nodes using value ResNo of Node.
  inline bool use_empty() const;

  /// Return true if there is exactly one node using value ResNo of Node.
  inline bool hasOneUse() const;
};

template<> struct DenseMapInfo<SDValue> {
  static inline SDValue getEmptyKey() {
    SDValue V;
    V.ResNo = -1U;
    return V;
  }

  static inline SDValue getTombstoneKey() {
    SDValue V;
    V.ResNo = -2U;
    return V;
  }

  static unsigned getHashValue(const SDValue &Val) {
    return ((unsigned)((uintptr_t)Val.getNode() >> 4) ^
            (unsigned)((uintptr_t)Val.getNode() >> 9)) + Val.getResNo();
  }

  static bool isEqual(const SDValue &LHS, const SDValue &RHS) {
    return LHS == RHS;
  }
};

/// Allow casting operators to work directly on
/// SDValues as if they were SDNode*'s.
template<> struct simplify_type<SDValue> {
  using SimpleType = SDNode *;

  static SimpleType getSimplifiedValue(SDValue &Val) {
    return Val.getNode();
  }
};
template<> struct simplify_type<const SDValue> {
  using SimpleType = /*const*/ SDNode *;

  static SimpleType getSimplifiedValue(const SDValue &Val) {
    return Val.getNode();
  }
};

/// Represents a use of a SDNode. This class holds an SDValue,
/// which records the SDNode being used and the result number, a
/// pointer to the SDNode using the value, and Next and Prev pointers,
/// which link together all the uses of an SDNode.
///
class SDUse {
  /// Val - The value being used.
  SDValue Val;
  /// User - The user of this value.
  SDNode *User = nullptr;
  /// Prev, Next - Pointers to the uses list of the SDNode referred by
  /// this operand.
  SDUse **Prev = nullptr;
  SDUse *Next = nullptr;

public:
  SDUse() = default;
  SDUse(const SDUse &U) = delete;
  SDUse &operator=(const SDUse &) = delete;

  /// Normally SDUse will just implicitly convert to an SDValue that it holds.
  operator const SDValue&() const { return Val; }

  /// If implicit conversion to SDValue doesn't work, the get() method returns
  /// the SDValue.
  const SDValue &get() const { return Val; }

  /// This returns the SDNode that contains this Use.
  SDNode *getUser() { return User; }
  const SDNode *getUser() const { return User; }

  /// Get the next SDUse in the use list.
  SDUse *getNext() const { return Next; }

  /// Convenience function for get().getNode().
  SDNode *getNode() const { return Val.getNode(); }
  /// Convenience function for get().getResNo().
  unsigned getResNo() const { return Val.getResNo(); }
  /// Convenience function for get().getValueType().
  EVT getValueType() const { return Val.getValueType(); }

  /// Convenience function for get().operator==
  bool operator==(const SDValue &V) const {
    return Val == V;
  }

  /// Convenience function for get().operator!=
  bool operator!=(const SDValue &V) const {
    return Val != V;
  }

  /// Convenience function for get().operator<
  bool operator<(const SDValue &V) const {
    return Val < V;
  }

private:
  friend class SelectionDAG;
  friend class SDNode;
  // TODO: unfriend HandleSDNode once we fix its operand handling.
  friend class HandleSDNode;

  void setUser(SDNode *p) { User = p; }

  /// Remove this use from its existing use list, assign it the
  /// given value, and add it to the new value's node's use list.
  inline void set(const SDValue &V);
  /// Like set, but only supports initializing a newly-allocated
  /// SDUse with a non-null value.
  inline void setInitial(const SDValue &V);
  /// Like set, but only sets the Node portion of the value,
  /// leaving the ResNo portion unmodified.
  inline void setNode(SDNode *N);

  void addToList(SDUse **List) {
    Next = *List;
    if (Next) Next->Prev = &Next;
    Prev = List;
    *List = this;
  }

  void removeFromList() {
    *Prev = Next;
    if (Next) Next->Prev = Prev;
  }
};

/// simplify_type specializations - Allow casting operators to work directly on
/// SDValues as if they were SDNode*'s.
template<> struct simplify_type<SDUse> {
  using SimpleType = SDNode *;

  static SimpleType getSimplifiedValue(SDUse &Val) {
    return Val.getNode();
  }
};

/// These are IR-level optimization flags that may be propagated to SDNodes.
/// TODO: This data structure should be shared by the IR optimizer and the
/// the backend.
struct SDNodeFlags {
private:
  bool NoUnsignedWrap : 1;
  bool NoSignedWrap : 1;
  bool Exact : 1;
  bool NoNaNs : 1;
  bool NoInfs : 1;
  bool NoSignedZeros : 1;
  bool AllowReciprocal : 1;
  bool AllowContract : 1;
  bool ApproximateFuncs : 1;
  bool AllowReassociation : 1;

  // We assume instructions do not raise floating-point exceptions by default,
  // and only those marked explicitly may do so.  We could choose to represent
  // this via a positive "FPExcept" flags like on the MI level, but having a
  // negative "NoFPExcept" flag here makes the flag intersection logic more
  // straightforward.
  bool NoFPExcept : 1;
  // Instructions with attached 'unpredictable' metadata on IR level.
  bool Unpredictable : 1;

public:
  /// Default constructor turns off all optimization flags.
  SDNodeFlags()
      : NoUnsignedWrap(false), NoSignedWrap(false), Exact(false), NoNaNs(false),
        NoInfs(false), NoSignedZeros(false), AllowReciprocal(false),
        AllowContract(false), ApproximateFuncs(false),
        AllowReassociation(false), NoFPExcept(false), Unpredictable(false) {}

  /// Propagate the fast-math-flags from an IR FPMathOperator.
  void copyFMF(const FPMathOperator &FPMO) {
    setNoNaNs(FPMO.hasNoNaNs());
    setNoInfs(FPMO.hasNoInfs());
    setNoSignedZeros(FPMO.hasNoSignedZeros());
    setAllowReciprocal(FPMO.hasAllowReciprocal());
    setAllowContract(FPMO.hasAllowContract());
    setApproximateFuncs(FPMO.hasApproxFunc());
    setAllowReassociation(FPMO.hasAllowReassoc());
  }

  // These are mutators for each flag.
  void setNoUnsignedWrap(bool b) { NoUnsignedWrap = b; }
  void setNoSignedWrap(bool b) { NoSignedWrap = b; }
  void setExact(bool b) { Exact = b; }
  void setNoNaNs(bool b) { NoNaNs = b; }
  void setNoInfs(bool b) { NoInfs = b; }
  void setNoSignedZeros(bool b) { NoSignedZeros = b; }
  void setAllowReciprocal(bool b) { AllowReciprocal = b; }
  void setAllowContract(bool b) { AllowContract = b; }
  void setApproximateFuncs(bool b) { ApproximateFuncs = b; }
  void setAllowReassociation(bool b) { AllowReassociation = b; }
  void setNoFPExcept(bool b) { NoFPExcept = b; }
  void setUnpredictable(bool b) { Unpredictable = b; }

  // These are accessors for each flag.
  bool hasNoUnsignedWrap() const { return NoUnsignedWrap; }
  bool hasNoSignedWrap() const { return NoSignedWrap; }
  bool hasExact() const { return Exact; }
  bool hasNoNaNs() const { return NoNaNs; }
  bool hasNoInfs() const { return NoInfs; }
  bool hasNoSignedZeros() const { return NoSignedZeros; }
  bool hasAllowReciprocal() const { return AllowReciprocal; }
  bool hasAllowContract() const { return AllowContract; }
  bool hasApproximateFuncs() const { return ApproximateFuncs; }
  bool hasAllowReassociation() const { return AllowReassociation; }
  bool hasNoFPExcept() const { return NoFPExcept; }
  bool hasUnpredictable() const { return Unpredictable; }

  /// Clear any flags in this flag set that aren't also set in Flags. All
  /// flags will be cleared if Flags are undefined.
  void intersectWith(const SDNodeFlags Flags) {
    NoUnsignedWrap &= Flags.NoUnsignedWrap;
    NoSignedWrap &= Flags.NoSignedWrap;
    Exact &= Flags.Exact;
    NoNaNs &= Flags.NoNaNs;
    NoInfs &= Flags.NoInfs;
    NoSignedZeros &= Flags.NoSignedZeros;
    AllowReciprocal &= Flags.AllowReciprocal;
    AllowContract &= Flags.AllowContract;
    ApproximateFuncs &= Flags.ApproximateFuncs;
    AllowReassociation &= Flags.AllowReassociation;
    NoFPExcept &= Flags.NoFPExcept;
    Unpredictable &= Flags.Unpredictable;
  }
};

/// Represents one node in the SelectionDAG.
///
class SDNode : public FoldingSetNode, public ilist_node<SDNode> {
private:
  /// The operation that this node performs.
  int32_t NodeType;

public:
  /// Unique and persistent id per SDNode in the DAG. Used for debug printing.
  /// We do not place that under `#if LLVM_ENABLE_ABI_BREAKING_CHECKS`
  /// intentionally because it adds unneeded complexity without noticeable
  /// benefits (see discussion with @thakis in D120714).
  uint16_t PersistentId = 0xffff;

protected:
  // We define a set of mini-helper classes to help us interpret the bits in our
  // SubclassData.  These are designed to fit within a uint16_t so they pack
  // with PersistentId.

#if defined(_AIX) && (!defined(__GNUC__) || defined(__clang__))
// Except for GCC; by default, AIX compilers store bit-fields in 4-byte words
// and give the `pack` pragma push semantics.
#define BEGIN_TWO_BYTE_PACK() _Pragma("pack(2)")
#define END_TWO_BYTE_PACK() _Pragma("pack(pop)")
#else
#define BEGIN_TWO_BYTE_PACK()
#define END_TWO_BYTE_PACK()
#endif

BEGIN_TWO_BYTE_PACK()
  class SDNodeBitfields {
    friend class SDNode;
    friend class MemIntrinsicSDNode;
    friend class MemSDNode;
    friend class SelectionDAG;

    uint16_t HasDebugValue : 1;
    uint16_t IsMemIntrinsic : 1;
    uint16_t IsDivergent : 1;
  };
  enum { NumSDNodeBits = 3 };

  class ConstantSDNodeBitfields {
    friend class ConstantSDNode;

    uint16_t : NumSDNodeBits;

    uint16_t IsOpaque : 1;
  };

  class MemSDNodeBitfields {
    friend class MemSDNode;
    friend class MemIntrinsicSDNode;
    friend class AtomicSDNode;

    uint16_t : NumSDNodeBits;

    uint16_t IsVolatile : 1;
    uint16_t IsNonTemporal : 1;
    uint16_t IsDereferenceable : 1;
    uint16_t IsInvariant : 1;
  };
  enum { NumMemSDNodeBits = NumSDNodeBits + 4 };

  class LSBaseSDNodeBitfields {
    friend class LSBaseSDNode;
    friend class VPBaseLoadStoreSDNode;
    friend class MaskedLoadStoreSDNode;
    friend class MaskedGatherScatterSDNode;
    friend class VPGatherScatterSDNode;

    uint16_t : NumMemSDNodeBits;

    // This storage is shared between disparate class hierarchies to hold an
    // enumeration specific to the class hierarchy in use.
    //   LSBaseSDNode => enum ISD::MemIndexedMode
    //   VPLoadStoreBaseSDNode => enum ISD::MemIndexedMode
    //   MaskedLoadStoreBaseSDNode => enum ISD::MemIndexedMode
    //   VPGatherScatterSDNode => enum ISD::MemIndexType
    //   MaskedGatherScatterSDNode => enum ISD::MemIndexType
    uint16_t AddressingMode : 3;
  };
  enum { NumLSBaseSDNodeBits = NumMemSDNodeBits + 3 };

  class LoadSDNodeBitfields {
    friend class LoadSDNode;
    friend class VPLoadSDNode;
    friend class VPStridedLoadSDNode;
    friend class MaskedLoadSDNode;
    friend class MaskedGatherSDNode;
    friend class VPGatherSDNode;

    uint16_t : NumLSBaseSDNodeBits;

    uint16_t ExtTy : 2; // enum ISD::LoadExtType
    uint16_t IsExpanding : 1;
  };

  class StoreSDNodeBitfields {
    friend class StoreSDNode;
    friend class VPStoreSDNode;
    friend class VPStridedStoreSDNode;
    friend class MaskedStoreSDNode;
    friend class MaskedScatterSDNode;
    friend class VPScatterSDNode;

    uint16_t : NumLSBaseSDNodeBits;

    uint16_t IsTruncating : 1;
    uint16_t IsCompressing : 1;
  };

  union {
    char RawSDNodeBits[sizeof(uint16_t)];
    SDNodeBitfields SDNodeBits;
    ConstantSDNodeBitfields ConstantSDNodeBits;
    MemSDNodeBitfields MemSDNodeBits;
    LSBaseSDNodeBitfields LSBaseSDNodeBits;
    LoadSDNodeBitfields LoadSDNodeBits;
    StoreSDNodeBitfields StoreSDNodeBits;
  };
END_TWO_BYTE_PACK()
#undef BEGIN_TWO_BYTE_PACK
#undef END_TWO_BYTE_PACK

  // RawSDNodeBits must cover the entirety of the union.  This means that all of
  // the union's members must have size <= RawSDNodeBits.  We write the RHS as
  // "2" instead of sizeof(RawSDNodeBits) because MSVC can't handle the latter.
  static_assert(sizeof(SDNodeBitfields) <= 2, "field too wide");
  static_assert(sizeof(ConstantSDNodeBitfields) <= 2, "field too wide");
  static_assert(sizeof(MemSDNodeBitfields) <= 2, "field too wide");
  static_assert(sizeof(LSBaseSDNodeBitfields) <= 2, "field too wide");
  static_assert(sizeof(LoadSDNodeBitfields) <= 2, "field too wide");
  static_assert(sizeof(StoreSDNodeBitfields) <= 2, "field too wide");

private:
  friend class SelectionDAG;
  // TODO: unfriend HandleSDNode once we fix its operand handling.
  friend class HandleSDNode;

  /// Unique id per SDNode in the DAG.
  int NodeId = -1;

  /// The values that are used by this operation.
  SDUse *OperandList = nullptr;

  /// The types of the values this node defines.  SDNode's may
  /// define multiple values simultaneously.
  const EVT *ValueList;

  /// List of uses for this SDNode.
  SDUse *UseList = nullptr;

  /// The number of entries in the Operand/Value list.
  unsigned short NumOperands = 0;
  unsigned short NumValues;

  // The ordering of the SDNodes. It roughly corresponds to the ordering of the
  // original LLVM instructions.
  // This is used for turning off scheduling, because we'll forgo
  // the normal scheduling algorithms and output the instructions according to
  // this ordering.
  unsigned IROrder;

  /// Source line information.
  DebugLoc debugLoc;

  /// Return a pointer to the specified value type.
  static const EVT *getValueTypeList(EVT VT);

  SDNodeFlags Flags;

  uint32_t CFIType = 0;

public:
  //===--------------------------------------------------------------------===//
  //  Accessors
  //

  /// Return the SelectionDAG opcode value for this node. For
  /// pre-isel nodes (those for which isMachineOpcode returns false), these
  /// are the opcode values in the ISD and <target>ISD namespaces. For
  /// post-isel opcodes, see getMachineOpcode.
  unsigned getOpcode()  const { return (unsigned)NodeType; }

  /// Test if this node has a target-specific opcode (in the
  /// \<target\>ISD namespace).
  bool isTargetOpcode() const { return NodeType >= ISD::BUILTIN_OP_END; }

  /// Test if this node has a target-specific opcode that may raise
  /// FP exceptions (in the \<target\>ISD namespace and greater than
  /// FIRST_TARGET_STRICTFP_OPCODE).  Note that all target memory
  /// opcode are currently automatically considered to possibly raise
  /// FP exceptions as well.
  bool isTargetStrictFPOpcode() const {
    return NodeType >= ISD::FIRST_TARGET_STRICTFP_OPCODE;
  }

  /// Test if this node has a target-specific
  /// memory-referencing opcode (in the \<target\>ISD namespace and
  /// greater than FIRST_TARGET_MEMORY_OPCODE).
  bool isTargetMemoryOpcode() const {
    return NodeType >= ISD::FIRST_TARGET_MEMORY_OPCODE;
  }

  /// Return true if the type of the node type undefined.
  bool isUndef() const { return NodeType == ISD::UNDEF; }

  /// Test if this node is a memory intrinsic (with valid pointer information).
  /// INTRINSIC_W_CHAIN and INTRINSIC_VOID nodes are sometimes created for
  /// non-memory intrinsics (with chains) that are not really instances of
  /// MemSDNode. For such nodes, we need some extra state to determine the
  /// proper classof relationship.
  bool isMemIntrinsic() const {
    return (NodeType == ISD::INTRINSIC_W_CHAIN ||
            NodeType == ISD::INTRINSIC_VOID) &&
           SDNodeBits.IsMemIntrinsic;
  }

  /// Test if this node is a strict floating point pseudo-op.
  bool isStrictFPOpcode() {
    switch (NodeType) {
      default:
        return false;
      case ISD::STRICT_FP16_TO_FP:
      case ISD::STRICT_FP_TO_FP16:
#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN)               \
      case ISD::STRICT_##DAGN:
#include "llvm/IR/ConstrainedOps.def"
        return true;
    }
  }

  /// Test if this node is a vector predication operation.
  bool isVPOpcode() const { return ISD::isVPOpcode(getOpcode()); }

  /// Test if this node has a post-isel opcode, directly
  /// corresponding to a MachineInstr opcode.
  bool isMachineOpcode() const { return NodeType < 0; }

  /// This may only be called if isMachineOpcode returns
  /// true. It returns the MachineInstr opcode value that the node's opcode
  /// corresponds to.
  unsigned getMachineOpcode() const {
    assert(isMachineOpcode() && "Not a MachineInstr opcode!");
    return ~NodeType;
  }

  bool getHasDebugValue() const { return SDNodeBits.HasDebugValue; }
  void setHasDebugValue(bool b) { SDNodeBits.HasDebugValue = b; }

  bool isDivergent() const { return SDNodeBits.IsDivergent; }

  /// Return true if there are no uses of this node.
  bool use_empty() const { return UseList == nullptr; }

  /// Return true if there is exactly one use of this node.
  bool hasOneUse() const { return hasSingleElement(uses()); }

  /// Return the number of uses of this node. This method takes
  /// time proportional to the number of uses.
  size_t use_size() const { return std::distance(use_begin(), use_end()); }

  /// Return the unique node id.
  int getNodeId() const { return NodeId; }

  /// Set unique node id.
  void setNodeId(int Id) { NodeId = Id; }

  /// Return the node ordering.
  unsigned getIROrder() const { return IROrder; }

  /// Set the node ordering.
  void setIROrder(unsigned Order) { IROrder = Order; }

  /// Return the source location info.
  const DebugLoc &getDebugLoc() const { return debugLoc; }

  /// Set source location info.  Try to avoid this, putting
  /// it in the constructor is preferable.
  void setDebugLoc(DebugLoc dl) { debugLoc = std::move(dl); }

  /// This class provides iterator support for SDUse
  /// operands that use a specific SDNode.
  class use_iterator {
    friend class SDNode;

    SDUse *Op = nullptr;

    explicit use_iterator(SDUse *op) : Op(op) {}

  public:
    using iterator_category = std::forward_iterator_tag;
    using value_type = SDUse;
    using difference_type = std::ptrdiff_t;
    using pointer = value_type *;
    using reference = value_type &;

    use_iterator() = default;
    use_iterator(const use_iterator &I) = default;
    use_iterator &operator=(const use_iterator &) = default;

    bool operator==(const use_iterator &x) const { return Op == x.Op; }
    bool operator!=(const use_iterator &x) const {
      return !operator==(x);
    }

    /// Return true if this iterator is at the end of uses list.
    bool atEnd() const { return Op == nullptr; }

    // Iterator traversal: forward iteration only.
    use_iterator &operator++() {          // Preincrement
      assert(Op && "Cannot increment end iterator!");
      Op = Op->getNext();
      return *this;
    }

    use_iterator operator++(int) {        // Postincrement
      use_iterator tmp = *this; ++*this; return tmp;
    }

    /// Retrieve a pointer to the current user node.
    SDNode *operator*() const {
      assert(Op && "Cannot dereference end iterator!");
      return Op->getUser();
    }

    SDNode *operator->() const { return operator*(); }

    SDUse &getUse() const { return *Op; }

    /// Retrieve the operand # of this use in its user.
    unsigned getOperandNo() const {
      assert(Op && "Cannot dereference end iterator!");
      return (unsigned)(Op - Op->getUser()->OperandList);
    }
  };

  /// Provide iteration support to walk over all uses of an SDNode.
  use_iterator use_begin() const {
    return use_iterator(UseList);
  }

  static use_iterator use_end() { return use_iterator(nullptr); }

  inline iterator_range<use_iterator> uses() {
    return make_range(use_begin(), use_end());
  }
  inline iterator_range<use_iterator> uses() const {
    return make_range(use_begin(), use_end());
  }

  /// Return true if there are exactly NUSES uses of the indicated value.
  /// This method ignores uses of other values defined by this operation.
  bool hasNUsesOfValue(unsigned NUses, unsigned Value) const;

  /// Return true if there are any use of the indicated value.
  /// This method ignores uses of other values defined by this operation.
  bool hasAnyUseOfValue(unsigned Value) const;

  /// Return true if this node is the only use of N.
  bool isOnlyUserOf(const SDNode *N) const;

  /// Return true if this node is an operand of N.
  bool isOperandOf(const SDNode *N) const;

  /// Return true if this node is a predecessor of N.
  /// NOTE: Implemented on top of hasPredecessor and every bit as
  /// expensive. Use carefully.
  bool isPredecessorOf(const SDNode *N) const {
    return N->hasPredecessor(this);
  }

  /// Return true if N is a predecessor of this node.
  /// N is either an operand of this node, or can be reached by recursively
  /// traversing up the operands.
  /// NOTE: This is an expensive method. Use it carefully.
  bool hasPredecessor(const SDNode *N) const;

  /// Returns true if N is a predecessor of any node in Worklist. This
  /// helper keeps Visited and Worklist sets externally to allow unions
  /// searches to be performed in parallel, caching of results across
  /// queries and incremental addition to Worklist. Stops early if N is
  /// found but will resume. Remember to clear Visited and Worklists
  /// if DAG changes. MaxSteps gives a maximum number of nodes to visit before
  /// giving up. The TopologicalPrune flag signals that positive NodeIds are
  /// topologically ordered (Operands have strictly smaller node id) and search
  /// can be pruned leveraging this.
  static bool hasPredecessorHelper(const SDNode *N,
                                   SmallPtrSetImpl<const SDNode *> &Visited,
                                   SmallVectorImpl<const SDNode *> &Worklist,
                                   unsigned int MaxSteps = 0,
                                   bool TopologicalPrune = false) {
    SmallVector<const SDNode *, 8> DeferredNodes;
    if (Visited.count(N))
      return true;

    // Node Id's are assigned in three places: As a topological
    // ordering (> 0), during legalization (results in values set to
    // 0), new nodes (set to -1). If N has a topolgical id then we
    // know that all nodes with ids smaller than it cannot be
    // successors and we need not check them. Filter out all node
    // that can't be matches. We add them to the worklist before exit
    // in case of multiple calls. Note that during selection the topological id
    // may be violated if a node's predecessor is selected before it. We mark
    // this at selection negating the id of unselected successors and
    // restricting topological pruning to positive ids.

    int NId = N->getNodeId();
    // If we Invalidated the Id, reconstruct original NId.
    if (NId < -1)
      NId = -(NId + 1);

    bool Found = false;
    while (!Worklist.empty()) {
      const SDNode *M = Worklist.pop_back_val();
      int MId = M->getNodeId();
      if (TopologicalPrune && M->getOpcode() != ISD::TokenFactor && (NId > 0) &&
          (MId > 0) && (MId < NId)) {
        DeferredNodes.push_back(M);
        continue;
      }
      for (const SDValue &OpV : M->op_values()) {
        SDNode *Op = OpV.getNode();
        if (Visited.insert(Op).second)
          Worklist.push_back(Op);
        if (Op == N)
          Found = true;
      }
      if (Found)
        break;
      if (MaxSteps != 0 && Visited.size() >= MaxSteps)
        break;
    }
    // Push deferred nodes back on worklist.
    Worklist.append(DeferredNodes.begin(), DeferredNodes.end());
    // If we bailed early, conservatively return found.
    if (MaxSteps != 0 && Visited.size() >= MaxSteps)
      return true;
    return Found;
  }

  /// Return true if all the users of N are contained in Nodes.
  /// NOTE: Requires at least one match, but doesn't require them all.
  static bool areOnlyUsersOf(ArrayRef<const SDNode *> Nodes, const SDNode *N);

  /// Return the number of values used by this operation.
  unsigned getNumOperands() const { return NumOperands; }

  /// Return the maximum number of operands that a SDNode can hold.
  static constexpr size_t getMaxNumOperands() {
    return std::numeric_limits<decltype(SDNode::NumOperands)>::max();
  }

  /// Helper method returns the integer value of a ConstantSDNode operand.
  inline uint64_t getConstantOperandVal(unsigned Num) const;

  /// Helper method returns the APInt of a ConstantSDNode operand.
  inline const APInt &getConstantOperandAPInt(unsigned Num) const;

  const SDValue &getOperand(unsigned Num) const {
    assert(Num < NumOperands && "Invalid child # of SDNode!");
    return OperandList[Num];
  }

  using op_iterator = SDUse *;

  op_iterator op_begin() const { return OperandList; }
  op_iterator op_end() const { return OperandList+NumOperands; }
  ArrayRef<SDUse> ops() const { return ArrayRef(op_begin(), op_end()); }

  /// Iterator for directly iterating over the operand SDValue's.
  struct value_op_iterator
      : iterator_adaptor_base<value_op_iterator, op_iterator,
                              std::random_access_iterator_tag, SDValue,
                              ptrdiff_t, value_op_iterator *,
                              value_op_iterator *> {
    explicit value_op_iterator(SDUse *U = nullptr)
      : iterator_adaptor_base(U) {}

    const SDValue &operator*() const { return I->get(); }
  };

  iterator_range<value_op_iterator> op_values() const {
    return make_range(value_op_iterator(op_begin()),
                      value_op_iterator(op_end()));
  }

  SDVTList getVTList() const {
    SDVTList X = { ValueList, NumValues };
    return X;
  }

  /// If this node has a glue operand, return the node
  /// to which the glue operand points. Otherwise return NULL.
  SDNode *getGluedNode() const {
    if (getNumOperands() != 0 &&
        getOperand(getNumOperands()-1).getValueType() == MVT::Glue)
      return getOperand(getNumOperands()-1).getNode();
    return nullptr;
  }

  /// If this node has a glue value with a user, return
  /// the user (there is at most one). Otherwise return NULL.
  SDNode *getGluedUser() const {
    for (use_iterator UI = use_begin(), UE = use_end(); UI != UE; ++UI)
      if (UI.getUse().get().getValueType() == MVT::Glue)
        return *UI;
    return nullptr;
  }

  SDNodeFlags getFlags() const { return Flags; }
  void setFlags(SDNodeFlags NewFlags) { Flags = NewFlags; }

  /// Clear any flags in this node that aren't also set in Flags.
  /// If Flags is not in a defined state then this has no effect.
  void intersectFlagsWith(const SDNodeFlags Flags);

  void setCFIType(uint32_t Type) { CFIType = Type; }
  uint32_t getCFIType() const { return CFIType; }

  /// Return the number of values defined/returned by this operator.
  unsigned getNumValues() const { return NumValues; }

  /// Return the type of a specified result.
  EVT getValueType(unsigned ResNo) const {
    assert(ResNo < NumValues && "Illegal result number!");
    return ValueList[ResNo];
  }

  /// Return the type of a specified result as a simple type.
  MVT getSimpleValueType(unsigned ResNo) const {
    return getValueType(ResNo).getSimpleVT();
  }

  /// Returns MVT::getSizeInBits(getValueType(ResNo)).
  ///
  /// If the value type is a scalable vector type, the scalable property will
  /// be set and the runtime size will be a positive integer multiple of the
  /// base size.
  TypeSize getValueSizeInBits(unsigned ResNo) const {
    return getValueType(ResNo).getSizeInBits();
  }

  using value_iterator = const EVT *;

  value_iterator value_begin() const { return ValueList; }
  value_iterator value_end() const { return ValueList+NumValues; }
  iterator_range<value_iterator> values() const {
    return llvm::make_range(value_begin(), value_end());
  }

  /// Return the opcode of this operation for printing.
  std::string getOperationName(const SelectionDAG *G = nullptr) const;
  static const char* getIndexedModeName(ISD::MemIndexedMode AM);
  void print_types(raw_ostream &OS, const SelectionDAG *G) const;
  void print_details(raw_ostream &OS, const SelectionDAG *G) const;
  void print(raw_ostream &OS, const SelectionDAG *G = nullptr) const;
  void printr(raw_ostream &OS, const SelectionDAG *G = nullptr) const;

  /// Print a SelectionDAG node and all children down to
  /// the leaves.  The given SelectionDAG allows target-specific nodes
  /// to be printed in human-readable form.  Unlike printr, this will
  /// print the whole DAG, including children that appear multiple
  /// times.
  ///
  void printrFull(raw_ostream &O, const SelectionDAG *G = nullptr) const;

  /// Print a SelectionDAG node and children up to
  /// depth "depth."  The given SelectionDAG allows target-specific
  /// nodes to be printed in human-readable form.  Unlike printr, this
  /// will print children that appear multiple times wherever they are
  /// used.
  ///
  void printrWithDepth(raw_ostream &O, const SelectionDAG *G = nullptr,
                       unsigned depth = 100) const;

  /// Dump this node, for debugging.
  void dump() const;

  /// Dump (recursively) this node and its use-def subgraph.
  void dumpr() const;

  /// Dump this node, for debugging.
  /// The given SelectionDAG allows target-specific nodes to be printed
  /// in human-readable form.
  void dump(const SelectionDAG *G) const;

  /// Dump (recursively) this node and its use-def subgraph.
  /// The given SelectionDAG allows target-specific nodes to be printed
  /// in human-readable form.
  void dumpr(const SelectionDAG *G) const;

  /// printrFull to dbgs().  The given SelectionDAG allows
  /// target-specific nodes to be printed in human-readable form.
  /// Unlike dumpr, this will print the whole DAG, including children
  /// that appear multiple times.
  void dumprFull(const SelectionDAG *G = nullptr) const;

  /// printrWithDepth to dbgs().  The given
  /// SelectionDAG allows target-specific nodes to be printed in
  /// human-readable form.  Unlike dumpr, this will print children
  /// that appear multiple times wherever they are used.
  ///
  void dumprWithDepth(const SelectionDAG *G = nullptr,
                      unsigned depth = 100) const;

  /// Gather unique data for the node.
  void Profile(FoldingSetNodeID &ID) const;

  /// This method should only be used by the SDUse class.
  void addUse(SDUse &U) { U.addToList(&UseList); }

protected:
  static SDVTList getSDVTList(EVT VT) {
    SDVTList Ret = { getValueTypeList(VT), 1 };
    return Ret;
  }

  /// Create an SDNode.
  ///
  /// SDNodes are created without any operands, and never own the operand
  /// storage. To add operands, see SelectionDAG::createOperands.
  SDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs)
      : NodeType(Opc), ValueList(VTs.VTs), NumValues(VTs.NumVTs),
        IROrder(Order), debugLoc(std::move(dl)) {
    memset(&RawSDNodeBits, 0, sizeof(RawSDNodeBits));
    assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor");
    assert(NumValues == VTs.NumVTs &&
           "NumValues wasn't wide enough for its operands!");
  }

  /// Release the operands and set this node to have zero operands.
  void DropOperands();
};

/// Wrapper class for IR location info (IR ordering and DebugLoc) to be passed
/// into SDNode creation functions.
/// When an SDNode is created from the DAGBuilder, the DebugLoc is extracted
/// from the original Instruction, and IROrder is the ordinal position of
/// the instruction.
/// When an SDNode is created after the DAG is being built, both DebugLoc and
/// the IROrder are propagated from the original SDNode.
/// So SDLoc class provides two constructors besides the default one, one to
/// be used by the DAGBuilder, the other to be used by others.
class SDLoc {
private:
  DebugLoc DL;
  int IROrder = 0;

public:
  SDLoc() = default;
  SDLoc(const SDNode *N) : DL(N->getDebugLoc()), IROrder(N->getIROrder()) {}
  SDLoc(const SDValue V) : SDLoc(V.getNode()) {}
  SDLoc(const Instruction *I, int Order) : IROrder(Order) {
    assert(Order >= 0 && "bad IROrder");
    if (I)
      DL = I->getDebugLoc();
  }

  unsigned getIROrder() const { return IROrder; }
  const DebugLoc &getDebugLoc() const { return DL; }
};

// Define inline functions from the SDValue class.

inline SDValue::SDValue(SDNode *node, unsigned resno)
    : Node(node), ResNo(resno) {
  // Explicitly check for !ResNo to avoid use-after-free, because there are
  // callers that use SDValue(N, 0) with a deleted N to indicate successful
  // combines.
  assert((!Node || !ResNo || ResNo < Node->getNumValues()) &&
         "Invalid result number for the given node!");
  assert(ResNo < -2U && "Cannot use result numbers reserved for DenseMaps.");
}

inline unsigned SDValue::getOpcode() const {
  return Node->getOpcode();
}

inline EVT SDValue::getValueType() const {
  return Node->getValueType(ResNo);
}

inline unsigned SDValue::getNumOperands() const {
  return Node->getNumOperands();
}

inline const SDValue &SDValue::getOperand(unsigned i) const {
  return Node->getOperand(i);
}

inline uint64_t SDValue::getConstantOperandVal(unsigned i) const {
  return Node->getConstantOperandVal(i);
}

inline const APInt &SDValue::getConstantOperandAPInt(unsigned i) const {
  return Node->getConstantOperandAPInt(i);
}

inline bool SDValue::isTargetOpcode() const {
  return Node->isTargetOpcode();
}

inline bool SDValue::isTargetMemoryOpcode() const {
  return Node->isTargetMemoryOpcode();
}

inline bool SDValue::isMachineOpcode() const {
  return Node->isMachineOpcode();
}

inline unsigned SDValue::getMachineOpcode() const {
  return Node->getMachineOpcode();
}

inline bool SDValue::isUndef() const {
  return Node->isUndef();
}

inline bool SDValue::use_empty() const {
  return !Node->hasAnyUseOfValue(ResNo);
}

inline bool SDValue::hasOneUse() const {
  return Node->hasNUsesOfValue(1, ResNo);
}

inline const DebugLoc &SDValue::getDebugLoc() const {
  return Node->getDebugLoc();
}

inline void SDValue::dump() const {
  return Node->dump();
}

inline void SDValue::dump(const SelectionDAG *G) const {
  return Node->dump(G);
}

inline void SDValue::dumpr() const {
  return Node->dumpr();
}

inline void SDValue::dumpr(const SelectionDAG *G) const {
  return Node->dumpr(G);
}

// Define inline functions from the SDUse class.

inline void SDUse::set(const SDValue &V) {
  if (Val.getNode()) removeFromList();
  Val = V;
  if (V.getNode())
    V->addUse(*this);
}

inline void SDUse::setInitial(const SDValue &V) {
  Val = V;
  V->addUse(*this);
}

inline void SDUse::setNode(SDNode *N) {
  if (Val.getNode()) removeFromList();
  Val.setNode(N);
  if (N) N->addUse(*this);
}

/// This class is used to form a handle around another node that
/// is persistent and is updated across invocations of replaceAllUsesWith on its
/// operand.  This node should be directly created by end-users and not added to
/// the AllNodes list.
class HandleSDNode : public SDNode {
  SDUse Op;

public:
  explicit HandleSDNode(SDValue X)
    : SDNode(ISD::HANDLENODE, 0, DebugLoc(), getSDVTList(MVT::Other)) {
    // HandleSDNodes are never inserted into the DAG, so they won't be
    // auto-numbered. Use ID 65535 as a sentinel.
    PersistentId = 0xffff;

    // Manually set up the operand list. This node type is special in that it's
    // always stack allocated and SelectionDAG does not manage its operands.
    // TODO: This should either (a) not be in the SDNode hierarchy, or (b) not
    // be so special.
    Op.setUser(this);
    Op.setInitial(X);
    NumOperands = 1;
    OperandList = &Op;
  }
  ~HandleSDNode();

  const SDValue &getValue() const { return Op; }
};

class AddrSpaceCastSDNode : public SDNode {
private:
  unsigned SrcAddrSpace;
  unsigned DestAddrSpace;

public:
  AddrSpaceCastSDNode(unsigned Order, const DebugLoc &dl, EVT VT,
                      unsigned SrcAS, unsigned DestAS);

  unsigned getSrcAddressSpace() const { return SrcAddrSpace; }
  unsigned getDestAddressSpace() const { return DestAddrSpace; }

  static bool classof(const SDNode *N) {
    return N->getOpcode() == ISD::ADDRSPACECAST;
  }
};

/// This is an abstract virtual class for memory operations.
class MemSDNode : public SDNode {
private:
  // VT of in-memory value.
  EVT MemoryVT;

protected:
  /// Memory reference information.
  MachineMemOperand *MMO;

public:
  MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTs,
            EVT memvt, MachineMemOperand *MMO);

  bool readMem() const { return MMO->isLoad(); }
  bool writeMem() const { return MMO->isStore(); }

  /// Returns alignment and volatility of the memory access
  Align getOriginalAlign() const { return MMO->getBaseAlign(); }
  Align getAlign() const { return MMO->getAlign(); }

  /// Return the SubclassData value, without HasDebugValue. This contains an
  /// encoding of the volatile flag, as well as bits used by subclasses. This
  /// function should only be used to compute a FoldingSetNodeID value.
  /// The HasDebugValue bit is masked out because CSE map needs to match
  /// nodes with debug info with nodes without debug info. Same is about
  /// isDivergent bit.
  unsigned getRawSubclassData() const {
    uint16_t Data;
    union {
      char RawSDNodeBits[sizeof(uint16_t)];
      SDNodeBitfields SDNodeBits;
    };
    memcpy(&RawSDNodeBits, &this->RawSDNodeBits, sizeof(this->RawSDNodeBits));
    SDNodeBits.HasDebugValue = 0;
    SDNodeBits.IsDivergent = false;
    memcpy(&Data, &RawSDNodeBits, sizeof(RawSDNodeBits));
    return Data;
  }

  bool isVolatile() const { return MemSDNodeBits.IsVolatile; }
  bool isNonTemporal() const { return MemSDNodeBits.IsNonTemporal; }
  bool isDereferenceable() const { return MemSDNodeBits.IsDereferenceable; }
  bool isInvariant() const { return MemSDNodeBits.IsInvariant; }

  // Returns the offset from the location of the access.
  int64_t getSrcValueOffset() const { return MMO->getOffset(); }

  /// Returns the AA info that describes the dereference.
  AAMDNodes getAAInfo() const { return MMO->getAAInfo(); }

  /// Returns the Ranges that describes the dereference.
  const MDNode *getRanges() const { return MMO->getRanges(); }

  /// Returns the synchronization scope ID for this memory operation.
  SyncScope::ID getSyncScopeID() const { return MMO->getSyncScopeID(); }

  /// Return the atomic ordering requirements for this memory operation. For
  /// cmpxchg atomic operations, return the atomic ordering requirements when
  /// store occurs.
  AtomicOrdering getSuccessOrdering() const {
    return MMO->getSuccessOrdering();
  }

  /// Return a single atomic ordering that is at least as strong as both the
  /// success and failure orderings for an atomic operation.  (For operations
  /// other than cmpxchg, this is equivalent to getSuccessOrdering().)
  AtomicOrdering getMergedOrdering() const { return MMO->getMergedOrdering(); }

  /// Return true if the memory operation ordering is Unordered or higher.
  bool isAtomic() const { return MMO->isAtomic(); }

  /// Returns true if the memory operation doesn't imply any ordering
  /// constraints on surrounding memory operations beyond the normal memory
  /// aliasing rules.
  bool isUnordered() const { return MMO->isUnordered(); }

  /// Returns true if the memory operation is neither atomic or volatile.
  bool isSimple() const { return !isAtomic() && !isVolatile(); }

  /// Return the type of the in-memory value.
  EVT getMemoryVT() const { return MemoryVT; }

  /// Return a MachineMemOperand object describing the memory
  /// reference performed by operation.
  MachineMemOperand *getMemOperand() const { return MMO; }

  const MachinePointerInfo &getPointerInfo() const {
    return MMO->getPointerInfo();
  }

  /// Return the address space for the associated pointer
  unsigned getAddressSpace() const {
    return getPointerInfo().getAddrSpace();
  }

  /// Update this MemSDNode's MachineMemOperand information
  /// to reflect the alignment of NewMMO, if it has a greater alignment.
  /// This must only be used when the new alignment applies to all users of
  /// this MachineMemOperand.
  void refineAlignment(const MachineMemOperand *NewMMO) {
    MMO->refineAlignment(NewMMO);
  }

  const SDValue &getChain() const { return getOperand(0); }

  const SDValue &getBasePtr() const {
    switch (getOpcode()) {
    case ISD::STORE:
    case ISD::VP_STORE:
    case ISD::MSTORE:
    case ISD::VP_SCATTER:
    case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
      return getOperand(2);
    case ISD::MGATHER:
    case ISD::MSCATTER:
      return getOperand(3);
    default:
      return getOperand(1);
    }
  }

  // Methods to support isa and dyn_cast
  static bool classof(const SDNode *N) {
    // For some targets, we lower some target intrinsics to a MemIntrinsicNode
    // with either an intrinsic or a target opcode.
    switch (N->getOpcode()) {
    case ISD::LOAD:
    case ISD::STORE:
    case ISD::PREFETCH:
    case ISD::ATOMIC_CMP_SWAP:
    case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
    case ISD::ATOMIC_SWAP:
    case ISD::ATOMIC_LOAD_ADD:
    case ISD::ATOMIC_LOAD_SUB:
    case ISD::ATOMIC_LOAD_AND:
    case ISD::ATOMIC_LOAD_CLR:
    case ISD::ATOMIC_LOAD_OR:
    case ISD::ATOMIC_LOAD_XOR:
    case ISD::ATOMIC_LOAD_NAND:
    case ISD::ATOMIC_LOAD_MIN:
    case ISD::ATOMIC_LOAD_MAX:
    case ISD::ATOMIC_LOAD_UMIN:
    case ISD::ATOMIC_LOAD_UMAX:
    case ISD::ATOMIC_LOAD_FADD:
    case ISD::ATOMIC_LOAD_FSUB:
    case ISD::ATOMIC_LOAD_FMAX:
    case ISD::ATOMIC_LOAD_FMIN:
    case ISD::ATOMIC_LOAD_UINC_WRAP:
    case ISD::ATOMIC_LOAD_UDEC_WRAP:
    case ISD::ATOMIC_LOAD:
    case ISD::ATOMIC_STORE:
    case ISD::MLOAD:
    case ISD::MSTORE:
    case ISD::MGATHER:
    case ISD::MSCATTER:
    case ISD::VP_LOAD:
    case ISD::VP_STORE:
    case ISD::VP_GATHER:
    case ISD::VP_SCATTER:
    case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
    case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
    case ISD::GET_FPENV_MEM:
    case ISD::SET_FPENV_MEM:
      return true;
    default:
      return N->isMemIntrinsic() || N->isTargetMemoryOpcode();
    }
  }
};

/// This is an SDNode representing atomic operations.
class AtomicSDNode : public MemSDNode {
public:
  AtomicSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTL,
               EVT MemVT, MachineMemOperand *MMO)
    : MemSDNode(Opc, Order, dl, VTL, MemVT, MMO) {
    assert(((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE) ||
            MMO->isAtomic()) && "then why are we using an AtomicSDNode?");
  }

  const SDValue &getBasePtr() const { return getOperand(1); }
  const SDValue &getVal() const { return getOperand(2); }

  /// Returns true if this SDNode represents cmpxchg atomic operation, false
  /// otherwise.
  bool isCompareAndSwap() const {
    unsigned Op = getOpcode();
    return Op == ISD::ATOMIC_CMP_SWAP ||
           Op == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS;
  }

  /// For cmpxchg atomic operations, return the atomic ordering requirements
  /// when store does not occur.
  AtomicOrdering getFailureOrdering() const {
    assert(isCompareAndSwap() && "Must be cmpxchg operation");
    return MMO->getFailureOrdering();
  }

  // Methods to support isa and dyn_cast
  static bool classof(const SDNode *N) {
    return N->getOpcode() == ISD::ATOMIC_CMP_SWAP     ||
           N->getOpcode() == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS ||
           N->getOpcode() == ISD::ATOMIC_SWAP         ||
           N->getOpcode() == ISD::ATOMIC_LOAD_ADD     ||
           N->getOpcode() == ISD::ATOMIC_LOAD_SUB     ||
           N->getOpcode() == ISD::ATOMIC_LOAD_AND     ||
           N->getOpcode() == ISD::ATOMIC_LOAD_CLR     ||
           N->getOpcode() == ISD::ATOMIC_LOAD_OR      ||
           N->getOpcode() == ISD::ATOMIC_LOAD_XOR     ||
           N->getOpcode() == ISD::ATOMIC_LOAD_NAND    ||
           N->getOpcode() == ISD::ATOMIC_LOAD_MIN     ||
           N->getOpcode() == ISD::ATOMIC_LOAD_MAX     ||
           N->getOpcode() == ISD::ATOMIC_LOAD_UMIN    ||
           N->getOpcode() == ISD::ATOMIC_LOAD_UMAX    ||
           N->getOpcode() == ISD::ATOMIC_LOAD_FADD    ||
           N->getOpcode() == ISD::ATOMIC_LOAD_FSUB    ||
           N->getOpcode() == ISD::ATOMIC_LOAD_FMAX    ||
           N->getOpcode() == ISD::ATOMIC_LOAD_FMIN    ||
           N->getOpcode() == ISD::ATOMIC_LOAD_UINC_WRAP ||
           N->getOpcode() == ISD::ATOMIC_LOAD_UDEC_WRAP ||
           N->getOpcode() == ISD::ATOMIC_LOAD         ||
           N->getOpcode() == ISD::ATOMIC_STORE;
  }
};

/// This SDNode is used for target intrinsics that touch
/// memory and need an associated MachineMemOperand. Its opcode may be
/// INTRINSIC_VOID, INTRINSIC_W_CHAIN, PREFETCH, or a target-specific opcode
/// with a value not less than FIRST_TARGET_MEMORY_OPCODE.
class MemIntrinsicSDNode : public MemSDNode {
public:
  MemIntrinsicSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl,
                     SDVTList VTs, EVT MemoryVT, MachineMemOperand *MMO)
      : MemSDNode(Opc, Order, dl, VTs, MemoryVT, MMO) {
    SDNodeBits.IsMemIntrinsic = true;
  }

  // Methods to support isa and dyn_cast
  static bool classof(const SDNode *N) {
    // We lower some target intrinsics to their target opcode
    // early a node with a target opcode can be of this class
    return N->isMemIntrinsic()             ||
           N->getOpcode() == ISD::PREFETCH ||
           N->isTargetMemoryOpcode();
  }
};

/// This SDNode is used to implement the code generator
/// support for the llvm IR shufflevector instruction.  It combines elements
/// from two input vectors into a new input vector, with the selection and
/// ordering of elements determined by an array of integers, referred to as
/// the shuffle mask.  For input vectors of width N, mask indices of 0..N-1
/// refer to elements from the LHS input, and indices from N to 2N-1 the RHS.
/// An index of -1 is treated as undef, such that the code generator may put
/// any value in the corresponding element of the result.
class ShuffleVectorSDNode : public SDNode {
  // The memory for Mask is owned by the SelectionDAG's OperandAllocator, and
  // is freed when the SelectionDAG object is destroyed.
  const int *Mask;

protected:
  friend class SelectionDAG;

  ShuffleVectorSDNode(EVT VT, unsigned Order, const DebugLoc &dl, const int *M)
      : SDNode(ISD::VECTOR_SHUFFLE, Order, dl, getSDVTList(VT)), Mask(M) {}

public:
  ArrayRef<int> getMask() const {
    EVT VT = getValueType(0);
    return ArrayRef(Mask, VT.getVectorNumElements());
  }

  int getMaskElt(unsigned Idx) const {
    assert(Idx < getValueType(0).getVectorNumElements() && "Idx out of range!");
    return Mask[Idx];
  }

  bool isSplat() const { return isSplatMask(Mask, getValueType(0)); }

  int getSplatIndex() const {
    assert(isSplat() && "Cannot get splat index for non-splat!");
    EVT VT = getValueType(0);
    for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
      if (Mask[i] >= 0)
        return Mask[i];

    // We can choose any index value here and be correct because all elements
    // are undefined. Return 0 for better potential for callers to simplify.
    return 0;
  }

  static bool isSplatMask(const int *Mask, EVT VT);

  /// Change values in a shuffle permute mask assuming
  /// the two vector operands have swapped position.
  static void commuteMask(MutableArrayRef<int> Mask) {
    unsigned NumElems = Mask.size();
    for (unsigned i = 0; i != NumElems; ++i) {
      int idx = Mask[i];
      if (idx < 0)
        continue;
      else if (idx < (int)NumElems)
        Mask[i] = idx + NumElems;
      else
        Mask[i] = idx - NumElems;
    }
  }

  static bool classof(const SDNode *N) {
    return N->getOpcode() == ISD::VECTOR_SHUFFLE;
  }
};

class ConstantSDNode : public SDNode {
  friend class SelectionDAG;

  const ConstantInt *Value;

  ConstantSDNode(bool isTarget, bool isOpaque, const ConstantInt *val, EVT VT)
      : SDNode(isTarget ? ISD::TargetConstant : ISD::Constant, 0, DebugLoc(),
               getSDVTList(VT)),
        Value(val) {
    ConstantSDNodeBits.IsOpaque = isOpaque;
  }

public:
  const ConstantInt *getConstantIntValue() const { return Value; }
  const APInt &getAPIntValue() const { return Value->getValue(); }
  uint64_t getZExtValue() const { return Value->getZExtValue(); }
  int64_t getSExtValue() const { return Value->getSExtValue(); }
  uint64_t getLimitedValue(uint64_t Limit = UINT64_MAX) {
    return Value->getLimitedValue(Limit);
  }
  MaybeAlign getMaybeAlignValue() const { return Value->getMaybeAlignValue(); }
  Align getAlignValue() const { return Value->getAlignValue(); }

  bool isOne() const { return Value->isOne(); }
  bool isZero() const { return Value->isZero(); }
  bool isAllOnes() const { return Value->isMinusOne(); }
  bool isMaxSignedValue() const { return Value->isMaxValue(true); }
  bool isMinSignedValue() const { return Value->isMinValue(true); }

  bool isOpaque() const { return ConstantSDNodeBits.IsOpaque; }

  static bool classof(const SDNode *N) {
    return N->getOpcode() == ISD::Constant ||
           N->getOpcode() == ISD::TargetConstant;
  }
};

uint64_t SDNode::getConstantOperandVal(unsigned Num) const {
  return cast<ConstantSDNode>(getOperand(Num))->getZExtValue();
}

const APInt &SDNode::getConstantOperandAPInt(unsigned Num) const {
  return cast<ConstantSDNode>(getOperand(Num))->getAPIntValue();
}

class ConstantFPSDNode : public SDNode {
  friend class SelectionDAG;

  const ConstantFP *Value;

  ConstantFPSDNode(bool isTarget, const ConstantFP *val, EVT VT)
      : SDNode(isTarget ? ISD::TargetConstantFP : ISD::ConstantFP, 0,
               DebugLoc(), getSDVTList(VT)),
        Value(val) {}

public:
  const APFloat& getValueAPF() const { return Value->getValueAPF(); }
  const ConstantFP *getConstantFPValue() const { return Value; }

  /// Return true if the value is positive or negative zero.
  bool isZero() const { return Value->isZero(); }

  /// Return true if the value is a NaN.
  bool isNaN() const { return Value->isNaN(); }

  /// Return true if the value is an infinity
  bool isInfinity() const { return Value->isInfinity(); }

  /// Return true if the value is negative.
  bool isNegative() const { return Value->isNegative(); }

  /// We don't rely on operator== working on double values, as
  /// it returns true for things that are clearly not equal, like -0.0 and 0.0.
  /// As such, this method can be used to do an exact bit-for-bit comparison of
  /// two floating point values.

  /// We leave the version with the double argument here because it's just so
  /// convenient to write "2.0" and the like.  Without this function we'd
  /// have to duplicate its logic everywhere it's called.
  bool isExactlyValue(double V) const {
    return Value->getValueAPF().isExactlyValue(V);
  }
  bool isExactlyValue(const APFloat& V) const;

  static bool isValueValidForType(EVT VT, const APFloat& Val);

  static bool classof(const SDNode *N) {
    return N->getOpcode() == ISD::ConstantFP ||
           N->getOpcode() == ISD::TargetConstantFP;
  }
};

/// Returns true if \p V is a constant integer zero.
bool isNullConstant(SDValue V);

/// Returns true if \p V is an FP constant with a value of positive zero.
bool isNullFPConstant(SDValue V);

/// Returns true if \p V is an integer constant with all bits set.
bool isAllOnesConstant(SDValue V);

/// Returns true if \p V is a constant integer one.
bool isOneConstant(SDValue V);

/// Returns true if \p V is a constant min signed integer value.
bool isMinSignedConstant(SDValue V);

/// Returns true if \p V is a neutral element of Opc with Flags.
/// When OperandNo is 0, it checks that V is a left identity. Otherwise, it
/// checks that V is a right identity.
bool isNeutralConstant(unsigned Opc, SDNodeFlags Flags, SDValue V,
                       unsigned OperandNo);

/// Return the non-bitcasted source operand of \p V if it exists.
/// If \p V is not a bitcasted value, it is returned as-is.
SDValue peekThroughBitcasts(SDValue V);

/// Return the non-bitcasted and one-use source operand of \p V if it exists.
/// If \p V is not a bitcasted one-use value, it is returned as-is.
SDValue peekThroughOneUseBitcasts(SDValue V);

/// Return the non-extracted vector source operand of \p V if it exists.
/// If \p V is not an extracted subvector, it is returned as-is.
SDValue peekThroughExtractSubvectors(SDValue V);

/// Return the non-truncated source operand of \p V if it exists.
/// If \p V is not a truncation, it is returned as-is.
SDValue peekThroughTruncates(SDValue V);

/// Returns true if \p V is a bitwise not operation. Assumes that an all ones
/// constant is canonicalized to be operand 1.
bool isBitwiseNot(SDValue V, bool AllowUndefs = false);

/// If \p V is a bitwise not, returns the inverted operand. Otherwise returns
/// an empty SDValue. Only bits set in \p Mask are required to be inverted,
/// other bits may be arbitrary.
SDValue getBitwiseNotOperand(SDValue V, SDValue Mask, bool AllowUndefs);

/// Returns the SDNode if it is a constant splat BuildVector or constant int.
ConstantSDNode *isConstOrConstSplat(SDValue N, bool AllowUndefs = false,
                                    bool AllowTruncation = false);

/// Returns the SDNode if it is a demanded constant splat BuildVector or
/// constant int.
ConstantSDNode *isConstOrConstSplat(SDValue N, const APInt &DemandedElts,
                                    bool AllowUndefs = false,
                                    bool AllowTruncation = false);

/// Returns the SDNode if it is a constant splat BuildVector or constant float.
ConstantFPSDNode *isConstOrConstSplatFP(SDValue N, bool AllowUndefs = false);

/// Returns the SDNode if it is a demanded constant splat BuildVector or
/// constant float.
ConstantFPSDNode *isConstOrConstSplatFP(SDValue N, const APInt &DemandedElts,
                                        bool AllowUndefs = false);

/// Return true if the value is a constant 0 integer or a splatted vector of
/// a constant 0 integer (with no undefs by default).
/// Build vector implicit truncation is not an issue for null values.
bool isNullOrNullSplat(SDValue V, bool AllowUndefs = false);

/// Return true if the value is a constant 1 integer or a splatted vector of a
/// constant 1 integer (with no undefs).
/// Build vector implicit truncation is allowed, but the truncated bits need to
/// be zero.
bool isOneOrOneSplat(SDValue V, bool AllowUndefs = false);

/// Return true if the value is a constant -1 integer or a splatted vector of a
/// constant -1 integer (with no undefs).
/// Does not permit build vector implicit truncation.
bool isAllOnesOrAllOnesSplat(SDValue V, bool AllowUndefs = false);

/// Return true if \p V is either a integer or FP constant.
inline bool isIntOrFPConstant(SDValue V) {
  return isa<ConstantSDNode>(V) || isa<ConstantFPSDNode>(V);
}

class GlobalAddressSDNode : public SDNode {
  friend class SelectionDAG;

  const GlobalValue *TheGlobal;
  int64_t Offset;
  unsigned TargetFlags;

  GlobalAddressSDNode(unsigned Opc, unsigned Order, const DebugLoc &DL,
                      const GlobalValue *GA, EVT VT, int64_t o,
                      unsigned TF);

public:
  const GlobalValue *getGlobal() const { return TheGlobal; }
  int64_t getOffset() const { return Offset; }
  unsigned getTargetFlags() const { return TargetFlags; }
  // Return the address space this GlobalAddress belongs to.
  unsigned getAddressSpace() const;

  static bool classof(const SDNode *N) {
    return N->getOpcode() == ISD::GlobalAddress ||
           N->getOpcode() == ISD::TargetGlobalAddress ||
           N->getOpcode() == ISD::GlobalTLSAddress ||
           N->getOpcode() == ISD::TargetGlobalTLSAddress;
  }
};

class FrameIndexSDNode : public SDNode {
  friend class SelectionDAG;

  int FI;

  FrameIndexSDNode(int fi, EVT VT, bool isTarg)
    : SDNode(isTarg ? ISD::TargetFrameIndex : ISD::FrameIndex,
      0, DebugLoc(), getSDVTList(VT)), FI(fi) {
  }

public:
  int getIndex() const { return FI; }

  static bool classof(const SDNode *N) {
    return N->getOpcode() == ISD::FrameIndex ||
           N->getOpcode() == ISD::TargetFrameIndex;
  }
};

/// This SDNode is used for LIFETIME_START/LIFETIME_END values, which indicate
/// the offet and size that are started/ended in the underlying FrameIndex.
class LifetimeSDNode : public SDNode {
  friend class SelectionDAG;
  int64_t Size;
  int64_t Offset; // -1 if offset is unknown.

  LifetimeSDNode(unsigned Opcode, unsigned Order, const DebugLoc &dl,
                 SDVTList VTs, int64_t Size, int64_t Offset)
      : SDNode(Opcode, Order, dl, VTs), Size(Size), Offset(Offset) {}
public:
  int64_t getFrameIndex() const {
    return cast<FrameIndexSDNode>(getOperand(1))->getIndex();
  }

  bool hasOffset() const { return Offset >= 0; }
  int64_t getOffset() const {
    assert(hasOffset() && "offset is unknown");
    return Offset;
  }
  int64_t getSize() const {
    assert(hasOffset() && "offset is unknown");
    return Size;
  }

  // Methods to support isa and dyn_cast
  static bool classof(const SDNode *N) {
    return N->getOpcode() == ISD::LIFETIME_START ||
           N->getOpcode() == ISD::LIFETIME_END;
  }
};

/// This SDNode is used for PSEUDO_PROBE values, which are the function guid and
/// the index of the basic block being probed. A pseudo probe serves as a place
/// holder and will be removed at the end of compilation. It does not have any
/// operand because we do not want the instruction selection to deal with any.
class PseudoProbeSDNode : public SDNode {
  friend class SelectionDAG;
  uint64_t Guid;
  uint64_t Index;
  uint32_t Attributes;

  PseudoProbeSDNode(unsigned Opcode, unsigned Order, const DebugLoc &Dl,
                    SDVTList VTs, uint64_t Guid, uint64_t Index, uint32_t Attr)
      : SDNode(Opcode, Order, Dl, VTs), Guid(Guid), Index(Index),
        Attributes(Attr) {}

public:
  uint64_t getGuid() const { return Guid; }
  uint64_t getIndex() const { return Index; }
  uint32_t getAttributes() const { return Attributes; }

  // Methods to support isa and dyn_cast
  static bool classof(const SDNode *N) {
    return N->getOpcode() == ISD::PSEUDO_PROBE;
  }
};

class JumpTableSDNode : public SDNode {
  friend class SelectionDAG;

  int JTI;
  unsigned TargetFlags;

  JumpTableSDNode(int jti, EVT VT, bool isTarg, unsigned TF)
    : SDNode(isTarg ? ISD::TargetJumpTable : ISD::JumpTable,
      0, DebugLoc(), getSDVTList(VT)), JTI(jti), TargetFlags(TF) {
  }

public:
  int getIndex() const { return JTI; }
  unsigned getTargetFlags() const { return TargetFlags; }

  static bool classof(const SDNode *N) {
    return N->getOpcode() == ISD::JumpTable ||
           N->getOpcode() == ISD::TargetJumpTable;
  }
};

class ConstantPoolSDNode : public SDNode {
  friend class SelectionDAG;

  union {
    const Constant *ConstVal;
    MachineConstantPoolValue *MachineCPVal;
  } Val;
  int Offset;  // It's a MachineConstantPoolValue if top bit is set.
  Align Alignment; // Minimum alignment requirement of CP.
  unsigned TargetFlags;

  ConstantPoolSDNode(bool isTarget, const Constant *c, EVT VT, int o,
                     Align Alignment, unsigned TF)
      : SDNode(isTarget ? ISD::TargetConstantPool : ISD::ConstantPool, 0,
               DebugLoc(), getSDVTList(VT)),
        Offset(o), Alignment(Alignment), TargetFlags(TF) {
    assert(Offset >= 0 && "Offset is too large");
    Val.ConstVal = c;
  }

  ConstantPoolSDNode(bool isTarget, MachineConstantPoolValue *v, EVT VT, int o,
                     Align Alignment, unsigned TF)
      : SDNode(isTarget ? ISD::TargetConstantPool : ISD::ConstantPool, 0,
               DebugLoc(), getSDVTList(VT)),
        Offset(o), Alignment(Alignment), TargetFlags(TF) {
    assert(Offset >= 0 && "Offset is too large");
    Val.MachineCPVal = v;
    Offset |= 1 << (sizeof(unsigned)*CHAR_BIT-1);
  }

public:
  bool isMachineConstantPoolEntry() const {
    return Offset < 0;
  }

  const Constant *getConstVal() const {
    assert(!isMachineConstantPoolEntry() && "Wrong constantpool type");
    return Val.ConstVal;
  }

  MachineConstantPoolValue *getMachineCPVal() const {
    assert(isMachineConstantPoolEntry() && "Wrong constantpool type");
    return Val.MachineCPVal;
  }

  int getOffset() const {
    return Offset & ~(1 << (sizeof(unsigned)*CHAR_BIT-1));
  }

  // Return the alignment of this constant pool object, which is either 0 (for
  // default alignment) or the desired value.
  Align getAlign() const { return Alignment; }
  unsigned getTargetFlags() const { return TargetFlags; }

  Type *getType() const;

  static bool classof(const SDNode *N) {
    return N->getOpcode() == ISD::ConstantPool ||
           N->getOpcode() == ISD::TargetConstantPool;
  }
};

/// Completely target-dependent object reference.
class TargetIndexSDNode : public SDNode {
  friend class SelectionDAG;

  unsigned TargetFlags;
  int Index;
  int64_t Offset;

public:
  TargetIndexSDNode(int Idx, EVT VT, int64_t Ofs, unsigned TF)
      : SDNode(ISD::TargetIndex, 0, DebugLoc(), getSDVTList(VT)),
        TargetFlags(TF), Index(Idx), Offset(Ofs) {}

  unsigned getTargetFlags() const { return TargetFlags; }
  int getIndex() const { return Index; }
  int64_t getOffset() const { return Offset; }

  static bool classof(const SDNode *N) {
    return N->getOpcode() == ISD::TargetIndex;
  }
};

class BasicBlockSDNode : public SDNode {
  friend class SelectionDAG;

  MachineBasicBlock *MBB;

  /// Debug info is meaningful and potentially useful here, but we create
  /// blocks out of order when they're jumped to, which makes it a bit
  /// harder.  Let's see if we need it first.
  explicit BasicBlockSDNode(MachineBasicBlock *mbb)
    : SDNode(ISD::BasicBlock, 0, DebugLoc(), getSDVTList(MVT::Other)), MBB(mbb)
  {}

public:
  MachineBasicBlock *getBasicBlock() const { return MBB; }

  static bool classof(const SDNode *N) {
    return N->getOpcode() == ISD::BasicBlock;
  }
};

/// A "pseudo-class" with methods for operating on BUILD_VECTORs.
class BuildVectorSDNode : public SDNode {
public:
  // These are constructed as SDNodes and then cast to BuildVectorSDNodes.
  explicit BuildVectorSDNode() = delete;

  /// Check if this is a constant splat, and if so, find the
  /// smallest element size that splats the vector.  If MinSplatBits is
  /// nonzero, the element size must be at least that large.  Note that the
  /// splat element may be the entire vector (i.e., a one element vector).
  /// Returns the splat element value in SplatValue.  Any undefined bits in
  /// that value are zero, and the corresponding bits in the SplatUndef mask
  /// are set.  The SplatBitSize value is set to the splat element size in
  /// bits.  HasAnyUndefs is set to true if any bits in the vector are
  /// undefined.  isBigEndian describes the endianness of the target.
  bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef,
                       unsigned &SplatBitSize, bool &HasAnyUndefs,
                       unsigned MinSplatBits = 0,
                       bool isBigEndian = false) const;

  /// Returns the demanded splatted value or a null value if this is not a
  /// splat.
  ///
  /// The DemandedElts mask indicates the elements that must be in the splat.
  /// If passed a non-null UndefElements bitvector, it will resize it to match
  /// the vector width and set the bits where elements are undef.
  SDValue getSplatValue(const APInt &DemandedElts,
                        BitVector *UndefElements = nullptr) const;

  /// Returns the splatted value or a null value if this is not a splat.
  ///
  /// If passed a non-null UndefElements bitvector, it will resize it to match
  /// the vector width and set the bits where elements are undef.
  SDValue getSplatValue(BitVector *UndefElements = nullptr) const;

  /// Find the shortest repeating sequence of values in the build vector.
  ///
  /// e.g. { u, X, u, X, u, u, X, u } -> { X }
  ///      { X, Y, u, Y, u, u, X, u } -> { X, Y }
  ///
  /// Currently this must be a power-of-2 build vector.
  /// The DemandedElts mask indicates the elements that must be present,
  /// undemanded elements in Sequence may be null (SDValue()). If passed a
  /// non-null UndefElements bitvector, it will resize it to match the original
  /// vector width and set the bits where elements are undef. If result is
  /// false, Sequence will be empty.
  bool getRepeatedSequence(const APInt &DemandedElts,
                           SmallVectorImpl<SDValue> &Sequence,
                           BitVector *UndefElements = nullptr) const;

  /// Find the shortest repeating sequence of values in the build vector.
  ///
  /// e.g. { u, X, u, X, u, u, X, u } -> { X }
  ///      { X, Y, u, Y, u, u, X, u } -> { X, Y }
  ///
  /// Currently this must be a power-of-2 build vector.
  /// If passed a non-null UndefElements bitvector, it will resize it to match
  /// the original vector width and set the bits where elements are undef.
  /// If result is false, Sequence will be empty.
  bool getRepeatedSequence(SmallVectorImpl<SDValue> &Sequence,
                           BitVector *UndefElements = nullptr) const;

  /// Returns the demanded splatted constant or null if this is not a constant
  /// splat.
  ///
  /// The DemandedElts mask indicates the elements that must be in the splat.
  /// If passed a non-null UndefElements bitvector, it will resize it to match
  /// the vector width and set the bits where elements are undef.
  ConstantSDNode *
  getConstantSplatNode(const APInt &DemandedElts,
                       BitVector *UndefElements = nullptr) const;

  /// Returns the splatted constant or null if this is not a constant
  /// splat.
  ///
  /// If passed a non-null UndefElements bitvector, it will resize it to match
  /// the vector width and set the bits where elements are undef.
  ConstantSDNode *
  getConstantSplatNode(BitVector *UndefElements = nullptr) const;

  /// Returns the demanded splatted constant FP or null if this is not a
  /// constant FP splat.
  ///
  /// The DemandedElts mask indicates the elements that must be in the splat.
  /// If passed a non-null UndefElements bitvector, it will resize it to match
  /// the vector width and set the bits where elements are undef.
  ConstantFPSDNode *
  getConstantFPSplatNode(const APInt &DemandedElts,
                         BitVector *UndefElements = nullptr) const;

  /// Returns the splatted constant FP or null if this is not a constant
  /// FP splat.
  ///
  /// If passed a non-null UndefElements bitvector, it will resize it to match
  /// the vector width and set the bits where elements are undef.
  ConstantFPSDNode *
  getConstantFPSplatNode(BitVector *UndefElements = nullptr) const;

  /// If this is a constant FP splat and the splatted constant FP is an
  /// exact power or 2, return the log base 2 integer value.  Otherwise,
  /// return -1.
  ///
  /// The BitWidth specifies the necessary bit precision.
  int32_t getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements,
                                          uint32_t BitWidth) const;

  /// Extract the raw bit data from a build vector of Undef, Constant or
  /// ConstantFP node elements. Each raw bit element will be \p
  /// DstEltSizeInBits wide, undef elements are treated as zero, and entirely
  /// undefined elements are flagged in \p UndefElements.
  bool getConstantRawBits(bool IsLittleEndian, unsigned DstEltSizeInBits,
                          SmallVectorImpl<APInt> &RawBitElements,
                          BitVector &UndefElements) const;

  bool isConstant() const;

  /// If this BuildVector is constant and represents the numerical series
  /// "<a, a+n, a+2n, a+3n, ...>" where a is integer and n is a non-zero integer,
  /// the value "<a,n>" is returned.
  std::optional<std::pair<APInt, APInt>> isConstantSequence() const;

  /// Recast bit data \p SrcBitElements to \p DstEltSizeInBits wide elements.
  /// Undef elements are treated as zero, and entirely undefined elements are
  /// flagged in \p DstUndefElements.
  static void recastRawBits(bool IsLittleEndian, unsigned DstEltSizeInBits,
                            SmallVectorImpl<APInt> &DstBitElements,
                            ArrayRef<APInt> SrcBitElements,
                            BitVector &DstUndefElements,
                            const BitVector &SrcUndefElements);

  static bool classof(const SDNode *N) {
    return N->getOpcode() == ISD::BUILD_VECTOR;
  }
};

/// An SDNode that holds an arbitrary LLVM IR Value. This is
/// used when the SelectionDAG needs to make a simple reference to something
/// in the LLVM IR representation.
///
class SrcValueSDNode : public SDNode {
  friend class SelectionDAG;

  const Value *V;

  /// Create a SrcValue for a general value.
  explicit SrcValueSDNode(const Value *v)
    : SDNode(ISD::SRCVALUE, 0, DebugLoc(), getSDVTList(MVT::Other)), V(v) {}

public:
  /// Return the contained Value.
  const Value *getValue() const { return V; }

  static bool classof(const SDNode *N) {
    return N->getOpcode() == ISD::SRCVALUE;
  }
};

class MDNodeSDNode : public SDNode {
  friend class SelectionDAG;

  const MDNode *MD;

  explicit MDNodeSDNode(const MDNode *md)
  : SDNode(ISD::MDNODE_SDNODE, 0, DebugLoc(), getSDVTList(MVT::Other)), MD(md)
  {}

public:
  const MDNode *getMD() const { return MD; }

  static bool classof(const SDNode *N) {
    return N->getOpcode() == ISD::MDNODE_SDNODE;
  }
};

class RegisterSDNode : public SDNode {
  friend class SelectionDAG;

  Register Reg;

  RegisterSDNode(Register reg, EVT VT)
    : SDNode(ISD::Register, 0, DebugLoc(), getSDVTList(VT)), Reg(reg) {}

public:
  Register getReg() const { return Reg; }

  static bool classof(const SDNode *N) {
    return N->getOpcode() == ISD::Register;
  }
};

class RegisterMaskSDNode : public SDNode {
  friend class SelectionDAG;

  // The memory for RegMask is not owned by the node.
  const uint32_t *RegMask;

  RegisterMaskSDNode(const uint32_t *mask)
    : SDNode(ISD::RegisterMask, 0, DebugLoc(), getSDVTList(MVT::Untyped)),
      RegMask(mask) {}

public:
  const uint32_t *getRegMask() const { return RegMask; }

  static bool classof(const SDNode *N) {
    return N->getOpcode() == ISD::RegisterMask;
  }
};

class BlockAddressSDNode : public SDNode {
  friend class SelectionDAG;

  const BlockAddress *BA;
  int64_t Offset;
  unsigned TargetFlags;

  BlockAddressSDNode(unsigned NodeTy, EVT VT, const BlockAddress *ba,
                     int64_t o, unsigned Flags)
    : SDNode(NodeTy, 0, DebugLoc(), getSDVTList(VT)),
             BA(ba), Offset(o), TargetFlags(Flags) {}

public:
  const BlockAddress *getBlockAddress() const { return BA; }
  int64_t getOffset() const { return Offset; }
  unsigned getTargetFlags() const { return TargetFlags; }

  static bool classof(const SDNode *N) {
    return N->getOpcode() == ISD::BlockAddress ||
           N->getOpcode() == ISD::TargetBlockAddress;
  }
};

class LabelSDNode : public SDNode {
  friend class SelectionDAG;

  MCSymbol *Label;

  LabelSDNode(unsigned Opcode, unsigned Order, const DebugLoc &dl, MCSymbol *L)
      : SDNode(Opcode, Order, dl, getSDVTList(MVT::Other)), Label(L) {
    assert(LabelSDNode::classof(this) && "not a label opcode");
  }

public:
  MCSymbol *getLabel() const { return Label; }

  static bool classof(const SDNode *N) {
    return N->getOpcode() == ISD::EH_LABEL ||
           N->getOpcode() == ISD::ANNOTATION_LABEL;
  }
};

class ExternalSymbolSDNode : public SDNode {
  friend class SelectionDAG;

  const char *Symbol;
  unsigned TargetFlags;

  ExternalSymbolSDNode(bool isTarget, const char *Sym, unsigned TF, EVT VT)
      : SDNode(isTarget ? ISD::TargetExternalSymbol : ISD::ExternalSymbol, 0,
               DebugLoc(), getSDVTList(VT)),
        Symbol(Sym), TargetFlags(TF) {}

public:
  const char *getSymbol() const { return Symbol; }
  unsigned getTargetFlags() const { return TargetFlags; }

  static bool classof(const SDNode *N) {
    return N->getOpcode() == ISD::ExternalSymbol ||
           N->getOpcode() == ISD::TargetExternalSymbol;
  }
};

class MCSymbolSDNode : public SDNode {
  friend class SelectionDAG;

  MCSymbol *Symbol;

  MCSymbolSDNode(MCSymbol *Symbol, EVT VT)
      : SDNode(ISD::MCSymbol, 0, DebugLoc(), getSDVTList(VT)), Symbol(Symbol) {}

public:
  MCSymbol *getMCSymbol() const { return Symbol; }

  static bool classof(const SDNode *N) {
    return N->getOpcode() == ISD::MCSymbol;
  }
};

class CondCodeSDNode : public SDNode {
  friend class SelectionDAG;

  ISD::CondCode Condition;

  explicit CondCodeSDNode(ISD::CondCode Cond)
    : SDNode(ISD::CONDCODE, 0, DebugLoc(), getSDVTList(MVT::Other)),
      Condition(Cond) {}

public:
  ISD::CondCode get() const { return Condition; }

  static bool classof(const SDNode *N) {
    return N->getOpcode() == ISD::CONDCODE;
  }
};

/// This class is used to represent EVT's, which are used
/// to parameterize some operations.
class VTSDNode : public SDNode {
  friend class SelectionDAG;

  EVT ValueType;

  explicit VTSDNode(EVT VT)
    : SDNode(ISD::VALUETYPE, 0, DebugLoc(), getSDVTList(MVT::Other)),
      ValueType(VT) {}

public:
  EVT getVT() const { return ValueType; }

  static bool classof(const SDNode *N) {
    return N->getOpcode() == ISD::VALUETYPE;
  }
};

/// Base class for LoadSDNode and StoreSDNode
class LSBaseSDNode : public MemSDNode {
public:
  LSBaseSDNode(ISD::NodeType NodeTy, unsigned Order, const DebugLoc &dl,
               SDVTList VTs, ISD::MemIndexedMode AM, EVT MemVT,
               MachineMemOperand *MMO)
      : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {
    LSBaseSDNodeBits.AddressingMode = AM;
    assert(getAddressingMode() == AM && "Value truncated");
  }

  const SDValue &getOffset() const {
    return getOperand(getOpcode() == ISD::LOAD ? 2 : 3);
  }

  /// Return the addressing mode for this load or store:
  /// unindexed, pre-inc, pre-dec, post-inc, or post-dec.
  ISD::MemIndexedMode getAddressingMode() const {
    return static_cast<ISD::MemIndexedMode>(LSBaseSDNodeBits.AddressingMode);
  }

  /// Return true if this is a pre/post inc/dec load/store.
  bool isIndexed() const { return getAddressingMode() != ISD::UNINDEXED; }

  /// Return true if this is NOT a pre/post inc/dec load/store.
  bool isUnindexed() const { return getAddressingMode() == ISD::UNINDEXED; }

  static bool classof(const SDNode *N) {
    return N->getOpcode() == ISD::LOAD ||
           N->getOpcode() == ISD::STORE;
  }
};

/// This class is used to represent ISD::LOAD nodes.
class LoadSDNode : public LSBaseSDNode {
  friend class SelectionDAG;

  LoadSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
             ISD::MemIndexedMode AM, ISD::LoadExtType ETy, EVT MemVT,
             MachineMemOperand *MMO)
      : LSBaseSDNode(ISD::LOAD, Order, dl, VTs, AM, MemVT, MMO) {
    LoadSDNodeBits.ExtTy = ETy;
    assert(readMem() && "Load MachineMemOperand is not a load!");
    assert(!writeMem() && "Load MachineMemOperand is a store!");
  }

public:
  /// Return whether this is a plain node,
  /// or one of the varieties of value-extending loads.
  ISD::LoadExtType getExtensionType() const {
    return static_cast<ISD::LoadExtType>(LoadSDNodeBits.ExtTy);
  }

  const SDValue &getBasePtr() const { return getOperand(1); }
  const SDValue &getOffset() const { return getOperand(2); }

  static bool classof(const SDNode *N) {
    return N->getOpcode() == ISD::LOAD;
  }
};

/// This class is used to represent ISD::STORE nodes.
class StoreSDNode : public LSBaseSDNode {
  friend class SelectionDAG;

  StoreSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
              ISD::MemIndexedMode AM, bool isTrunc, EVT MemVT,
              MachineMemOperand *MMO)
      : LSBaseSDNode(ISD::STORE, Order, dl, VTs, AM, MemVT, MMO) {
    StoreSDNodeBits.IsTruncating = isTrunc;
    assert(!readMem() && "Store MachineMemOperand is a load!");
    assert(writeMem() && "Store MachineMemOperand is not a store!");
  }

public:
  /// Return true if the op does a truncation before store.
  /// For integers this is the same as doing a TRUNCATE and storing the result.
  /// For floats, it is the same as doing an FP_ROUND and storing the result.
  bool isTruncatingStore() const { return StoreSDNodeBits.IsTruncating; }
  void setTruncatingStore(bool Truncating) {
    StoreSDNodeBits.IsTruncating = Truncating;
  }

  const SDValue &getValue() const { return getOperand(1); }
  const SDValue &getBasePtr() const { return getOperand(2); }
  const SDValue &getOffset() const { return getOperand(3); }

  static bool classof(const SDNode *N) {
    return N->getOpcode() == ISD::STORE;
  }
};

/// This base class is used to represent VP_LOAD, VP_STORE,
/// EXPERIMENTAL_VP_STRIDED_LOAD and EXPERIMENTAL_VP_STRIDED_STORE nodes
class VPBaseLoadStoreSDNode : public MemSDNode {
public:
  friend class SelectionDAG;

  VPBaseLoadStoreSDNode(ISD::NodeType NodeTy, unsigned Order,
                        const DebugLoc &DL, SDVTList VTs,
                        ISD::MemIndexedMode AM, EVT MemVT,
                        MachineMemOperand *MMO)
      : MemSDNode(NodeTy, Order, DL, VTs, MemVT, MMO) {
    LSBaseSDNodeBits.AddressingMode = AM;
    assert(getAddressingMode() == AM && "Value truncated");
  }

  // VPStridedStoreSDNode (Chain, Data, Ptr,    Offset, Stride, Mask, EVL)
  // VPStoreSDNode        (Chain, Data, Ptr,    Offset, Mask,   EVL)
  // VPStridedLoadSDNode  (Chain, Ptr,  Offset, Stride, Mask,   EVL)
  // VPLoadSDNode         (Chain, Ptr,  Offset, Mask,   EVL)
  // Mask is a vector of i1 elements;
  // the type of EVL is TLI.getVPExplicitVectorLengthTy().
  const SDValue &getOffset() const {
    return getOperand((getOpcode() == ISD::EXPERIMENTAL_VP_STRIDED_LOAD ||
                       getOpcode() == ISD::VP_LOAD)
                          ? 2
                          : 3);
  }
  const SDValue &getBasePtr() const {
    return getOperand((getOpcode() == ISD::EXPERIMENTAL_VP_STRIDED_LOAD ||
                       getOpcode() == ISD::VP_LOAD)
                          ? 1
                          : 2);
  }
  const SDValue &getMask() const {
    switch (getOpcode()) {
    default:
      llvm_unreachable("Invalid opcode");
    case ISD::VP_LOAD:
      return getOperand(3);
    case ISD::VP_STORE:
    case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
      return getOperand(4);
    case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
      return getOperand(5);
    }
  }
  const SDValue &getVectorLength() const {
    switch (getOpcode()) {
    default:
      llvm_unreachable("Invalid opcode");
    case ISD::VP_LOAD:
      return getOperand(4);
    case ISD::VP_STORE:
    case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
      return getOperand(5);
    case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
      return getOperand(6);
    }
  }

  /// Return the addressing mode for this load or store:
  /// unindexed, pre-inc, pre-dec, post-inc, or post-dec.
  ISD::MemIndexedMode getAddressingMode() const {
    return static_cast<ISD::MemIndexedMode>(LSBaseSDNodeBits.AddressingMode);
  }

  /// Return true if this is a pre/post inc/dec load/store.
  bool isIndexed() const { return getAddressingMode() != ISD::UNINDEXED; }

  /// Return true if this is NOT a pre/post inc/dec load/store.
  bool isUnindexed() const { return getAddressingMode() == ISD::UNINDEXED; }

  static bool classof(const SDNode *N) {
    return N->getOpcode() == ISD::EXPERIMENTAL_VP_STRIDED_LOAD ||
           N->getOpcode() == ISD::EXPERIMENTAL_VP_STRIDED_STORE ||
           N->getOpcode() == ISD::VP_LOAD || N->getOpcode() == ISD::VP_STORE;
  }
};

/// This class is used to represent a VP_LOAD node
class VPLoadSDNode : public VPBaseLoadStoreSDNode {
public:
  friend class SelectionDAG;

  VPLoadSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
               ISD::MemIndexedMode AM, ISD::LoadExtType ETy, bool isExpanding,
               EVT MemVT, MachineMemOperand *MMO)
      : VPBaseLoadStoreSDNode(ISD::VP_LOAD, Order, dl, VTs, AM, MemVT, MMO) {
    LoadSDNodeBits.ExtTy = ETy;
    LoadSDNodeBits.IsExpanding = isExpanding;
  }

  ISD::LoadExtType getExtensionType() const {
    return static_cast<ISD::LoadExtType>(LoadSDNodeBits.ExtTy);
  }

  const SDValue &getBasePtr() const { return getOperand(1); }
  const SDValue &getOffset() const { return getOperand(2); }
  const SDValue &getMask() const { return getOperand(3); }
  const SDValue &getVectorLength() const { return getOperand(4); }

  static bool classof(const SDNode *N) {
    return N->getOpcode() == ISD::VP_LOAD;
  }
  bool isExpandingLoad() const { return LoadSDNodeBits.IsExpanding; }
};

/// This class is used to represent an EXPERIMENTAL_VP_STRIDED_LOAD node.
class VPStridedLoadSDNode : public VPBaseLoadStoreSDNode {
public:
  friend class SelectionDAG;

  VPStridedLoadSDNode(unsigned Order, const DebugLoc &DL, SDVTList VTs,
                      ISD::MemIndexedMode AM, ISD::LoadExtType ETy,
                      bool IsExpanding, EVT MemVT, MachineMemOperand *MMO)
      : VPBaseLoadStoreSDNode(ISD::EXPERIMENTAL_VP_STRIDED_LOAD, Order, DL, VTs,
                              AM, MemVT, MMO) {
    LoadSDNodeBits.ExtTy = ETy;
    LoadSDNodeBits.IsExpanding = IsExpanding;
  }

  ISD::LoadExtType getExtensionType() const {
    return static_cast<ISD::LoadExtType>(LoadSDNodeBits.ExtTy);
  }

  const SDValue &getBasePtr() const { return getOperand(1); }
  const SDValue &getOffset() const { return getOperand(2); }
  const SDValue &getStride() const { return getOperand(3); }
  const SDValue &getMask() const { return getOperand(4); }
  const SDValue &getVectorLength() const { return getOperand(5); }

  static bool classof(const SDNode *N) {
    return N->getOpcode() == ISD::EXPERIMENTAL_VP_STRIDED_LOAD;
  }
  bool isExpandingLoad() const { return LoadSDNodeBits.IsExpanding; }
};

/// This class is used to represent a VP_STORE node
class VPStoreSDNode : public VPBaseLoadStoreSDNode {
public:
  friend class SelectionDAG;

  VPStoreSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
                ISD::MemIndexedMode AM, bool isTrunc, bool isCompressing,
                EVT MemVT, MachineMemOperand *MMO)
      : VPBaseLoadStoreSDNode(ISD::VP_STORE, Order, dl, VTs, AM, MemVT, MMO) {
    StoreSDNodeBits.IsTruncating = isTrunc;
    StoreSDNodeBits.IsCompressing = isCompressing;
  }

  /// Return true if this is a truncating store.
  /// For integers this is the same as doing a TRUNCATE and storing the result.
  /// For floats, it is the same as doing an FP_ROUND and storing the result.
  bool isTruncatingStore() const { return StoreSDNodeBits.IsTruncating; }

  /// Returns true if the op does a compression to the vector before storing.
  /// The node contiguously stores the active elements (integers or floats)
  /// in src (those with their respective bit set in writemask k) to unaligned
  /// memory at base_addr.
  bool isCompressingStore() const { return StoreSDNodeBits.IsCompressing; }

  const SDValue &getValue() const { return getOperand(1); }
  const SDValue &getBasePtr() const { return getOperand(2); }
  const SDValue &getOffset() const { return getOperand(3); }
  const SDValue &getMask() const { return getOperand(4); }
  const SDValue &getVectorLength() const { return getOperand(5); }

  static bool classof(const SDNode *N) {
    return N->getOpcode() == ISD::VP_STORE;
  }
};

/// This class is used to represent an EXPERIMENTAL_VP_STRIDED_STORE node.
class VPStridedStoreSDNode : public VPBaseLoadStoreSDNode {
public:
  friend class SelectionDAG;

  VPStridedStoreSDNode(unsigned Order, const DebugLoc &DL, SDVTList VTs,
                       ISD::MemIndexedMode AM, bool IsTrunc, bool IsCompressing,
                       EVT MemVT, MachineMemOperand *MMO)
      : VPBaseLoadStoreSDNode(ISD::EXPERIMENTAL_VP_STRIDED_STORE, Order, DL,
                              VTs, AM, MemVT, MMO) {
    StoreSDNodeBits.IsTruncating = IsTrunc;
    StoreSDNodeBits.IsCompressing = IsCompressing;
  }

  /// Return true if this is a truncating store.
  /// For integers this is the same as doing a TRUNCATE and storing the result.
  /// For floats, it is the same as doing an FP_ROUND and storing the result.
  bool isTruncatingStore() const { return StoreSDNodeBits.IsTruncating; }

  /// Returns true if the op does a compression to the vector before storing.
  /// The node contiguously stores the active elements (integers or floats)
  /// in src (those with their respective bit set in writemask k) to unaligned
  /// memory at base_addr.
  bool isCompressingStore() const { return StoreSDNodeBits.IsCompressing; }

  const SDValue &getValue() const { return getOperand(1); }
  const SDValue &getBasePtr() const { return getOperand(2); }
  const SDValue &getOffset() const { return getOperand(3); }
  const SDValue &getStride() const { return getOperand(4); }
  const SDValue &getMask() const { return getOperand(5); }
  const SDValue &getVectorLength() const { return getOperand(6); }

  static bool classof(const SDNode *N) {
    return N->getOpcode() == ISD::EXPERIMENTAL_VP_STRIDED_STORE;
  }
};

/// This base class is used to represent MLOAD and MSTORE nodes
class MaskedLoadStoreSDNode : public MemSDNode {
public:
  friend class SelectionDAG;

  MaskedLoadStoreSDNode(ISD::NodeType NodeTy, unsigned Order,
                        const DebugLoc &dl, SDVTList VTs,
                        ISD::MemIndexedMode AM, EVT MemVT,
                        MachineMemOperand *MMO)
      : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {
    LSBaseSDNodeBits.AddressingMode = AM;
    assert(getAddressingMode() == AM && "Value truncated");
  }

  // MaskedLoadSDNode (Chain, ptr, offset, mask, passthru)
  // MaskedStoreSDNode (Chain, data, ptr, offset, mask)
  // Mask is a vector of i1 elements
  const SDValue &getOffset() const {
    return getOperand(getOpcode() == ISD::MLOAD ? 2 : 3);
  }
  const SDValue &getMask() const {
    return getOperand(getOpcode() == ISD::MLOAD ? 3 : 4);
  }

  /// Return the addressing mode for this load or store:
  /// unindexed, pre-inc, pre-dec, post-inc, or post-dec.
  ISD::MemIndexedMode getAddressingMode() const {
    return static_cast<ISD::MemIndexedMode>(LSBaseSDNodeBits.AddressingMode);
  }

  /// Return true if this is a pre/post inc/dec load/store.
  bool isIndexed() const { return getAddressingMode() != ISD::UNINDEXED; }

  /// Return true if this is NOT a pre/post inc/dec load/store.
  bool isUnindexed() const { return getAddressingMode() == ISD::UNINDEXED; }

  static bool classof(const SDNode *N) {
    return N->getOpcode() == ISD::MLOAD ||
           N->getOpcode() == ISD::MSTORE;
  }
};

/// This class is used to represent an MLOAD node
class MaskedLoadSDNode : public MaskedLoadStoreSDNode {
public:
  friend class SelectionDAG;

  MaskedLoadSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
                   ISD::MemIndexedMode AM, ISD::LoadExtType ETy,
                   bool IsExpanding, EVT MemVT, MachineMemOperand *MMO)
      : MaskedLoadStoreSDNode(ISD::MLOAD, Order, dl, VTs, AM, MemVT, MMO) {
    LoadSDNodeBits.ExtTy = ETy;
    LoadSDNodeBits.IsExpanding = IsExpanding;
  }

  ISD::LoadExtType getExtensionType() const {
    return static_cast<ISD::LoadExtType>(LoadSDNodeBits.ExtTy);
  }

  const SDValue &getBasePtr() const { return getOperand(1); }
  const SDValue &getOffset() const { return getOperand(2); }
  const SDValue &getMask() const { return getOperand(3); }
  const SDValue &getPassThru() const { return getOperand(4); }

  static bool classof(const SDNode *N) {
    return N->getOpcode() == ISD::MLOAD;
  }

  bool isExpandingLoad() const { return LoadSDNodeBits.IsExpanding; }
};

/// This class is used to represent an MSTORE node
class MaskedStoreSDNode : public MaskedLoadStoreSDNode {
public:
  friend class SelectionDAG;

  MaskedStoreSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
                    ISD::MemIndexedMode AM, bool isTrunc, bool isCompressing,
                    EVT MemVT, MachineMemOperand *MMO)
      : MaskedLoadStoreSDNode(ISD::MSTORE, Order, dl, VTs, AM, MemVT, MMO) {
    StoreSDNodeBits.IsTruncating = isTrunc;
    StoreSDNodeBits.IsCompressing = isCompressing;
  }

  /// Return true if the op does a truncation before store.
  /// For integers this is the same as doing a TRUNCATE and storing the result.
  /// For floats, it is the same as doing an FP_ROUND and storing the result.
  bool isTruncatingStore() const { return StoreSDNodeBits.IsTruncating; }

  /// Returns true if the op does a compression to the vector before storing.
  /// The node contiguously stores the active elements (integers or floats)
  /// in src (those with their respective bit set in writemask k) to unaligned
  /// memory at base_addr.
  bool isCompressingStore() const { return StoreSDNodeBits.IsCompressing; }

  const SDValue &getValue() const { return getOperand(1); }
  const SDValue &getBasePtr() const { return getOperand(2); }
  const SDValue &getOffset() const { return getOperand(3); }
  const SDValue &getMask() const { return getOperand(4); }

  static bool classof(const SDNode *N) {
    return N->getOpcode() == ISD::MSTORE;
  }
};

/// This is a base class used to represent
/// VP_GATHER and VP_SCATTER nodes
///
class VPGatherScatterSDNode : public MemSDNode {
public:
  friend class SelectionDAG;

  VPGatherScatterSDNode(ISD::NodeType NodeTy, unsigned Order,
                        const DebugLoc &dl, SDVTList VTs, EVT MemVT,
                        MachineMemOperand *MMO, ISD::MemIndexType IndexType)
      : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {
    LSBaseSDNodeBits.AddressingMode = IndexType;
    assert(getIndexType() == IndexType && "Value truncated");
  }

  /// How is Index applied to BasePtr when computing addresses.
  ISD::MemIndexType getIndexType() const {
    return static_cast<ISD::MemIndexType>(LSBaseSDNodeBits.AddressingMode);
  }
  bool isIndexScaled() const {
    return !cast<ConstantSDNode>(getScale())->isOne();
  }
  bool isIndexSigned() const { return isIndexTypeSigned(getIndexType()); }

  // In the both nodes address is Op1, mask is Op2:
  // VPGatherSDNode  (Chain, base, index, scale, mask, vlen)
  // VPScatterSDNode (Chain, value, base, index, scale, mask, vlen)
  // Mask is a vector of i1 elements
  const SDValue &getBasePtr() const {
    return getOperand((getOpcode() == ISD::VP_GATHER) ? 1 : 2);
  }
  const SDValue &getIndex() const {
    return getOperand((getOpcode() == ISD::VP_GATHER) ? 2 : 3);
  }
  const SDValue &getScale() const {
    return getOperand((getOpcode() == ISD::VP_GATHER) ? 3 : 4);
  }
  const SDValue &getMask() const {
    return getOperand((getOpcode() == ISD::VP_GATHER) ? 4 : 5);
  }
  const SDValue &getVectorLength() const {
    return getOperand((getOpcode() == ISD::VP_GATHER) ? 5 : 6);
  }

  static bool classof(const SDNode *N) {
    return N->getOpcode() == ISD::VP_GATHER ||
           N->getOpcode() == ISD::VP_SCATTER;
  }
};

/// This class is used to represent an VP_GATHER node
///
class VPGatherSDNode : public VPGatherScatterSDNode {
public:
  friend class SelectionDAG;

  VPGatherSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs, EVT MemVT,
                 MachineMemOperand *MMO, ISD::MemIndexType IndexType)
      : VPGatherScatterSDNode(ISD::VP_GATHER, Order, dl, VTs, MemVT, MMO,
                              IndexType) {}

  static bool classof(const SDNode *N) {
    return N->getOpcode() == ISD::VP_GATHER;
  }
};

/// This class is used to represent an VP_SCATTER node
///
class VPScatterSDNode : public VPGatherScatterSDNode {
public:
  friend class SelectionDAG;

  VPScatterSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs, EVT MemVT,
                  MachineMemOperand *MMO, ISD::MemIndexType IndexType)
      : VPGatherScatterSDNode(ISD::VP_SCATTER, Order, dl, VTs, MemVT, MMO,
                              IndexType) {}

  const SDValue &getValue() const { return getOperand(1); }

  static bool classof(const SDNode *N) {
    return N->getOpcode() == ISD::VP_SCATTER;
  }
};

/// This is a base class used to represent
/// MGATHER and MSCATTER nodes
///
class MaskedGatherScatterSDNode : public MemSDNode {
public:
  friend class SelectionDAG;

  MaskedGatherScatterSDNode(ISD::NodeType NodeTy, unsigned Order,
                            const DebugLoc &dl, SDVTList VTs, EVT MemVT,
                            MachineMemOperand *MMO, ISD::MemIndexType IndexType)
      : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {
    LSBaseSDNodeBits.AddressingMode = IndexType;
    assert(getIndexType() == IndexType && "Value truncated");
  }

  /// How is Index applied to BasePtr when computing addresses.
  ISD::MemIndexType getIndexType() const {
    return static_cast<ISD::MemIndexType>(LSBaseSDNodeBits.AddressingMode);
  }
  bool isIndexScaled() const {
    return !cast<ConstantSDNode>(getScale())->isOne();
  }
  bool isIndexSigned() const { return isIndexTypeSigned(getIndexType()); }

  // In the both nodes address is Op1, mask is Op2:
  // MaskedGatherSDNode  (Chain, passthru, mask, base, index, scale)
  // MaskedScatterSDNode (Chain, value, mask, base, index, scale)
  // Mask is a vector of i1 elements
  const SDValue &getBasePtr() const { return getOperand(3); }
  const SDValue &getIndex()   const { return getOperand(4); }
  const SDValue &getMask()    const { return getOperand(2); }
  const SDValue &getScale()   const { return getOperand(5); }

  static bool classof(const SDNode *N) {
    return N->getOpcode() == ISD::MGATHER ||
           N->getOpcode() == ISD::MSCATTER;
  }
};

/// This class is used to represent an MGATHER node
///
class MaskedGatherSDNode : public MaskedGatherScatterSDNode {
public:
  friend class SelectionDAG;

  MaskedGatherSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
                     EVT MemVT, MachineMemOperand *MMO,
                     ISD::MemIndexType IndexType, ISD::LoadExtType ETy)
      : MaskedGatherScatterSDNode(ISD::MGATHER, Order, dl, VTs, MemVT, MMO,
                                  IndexType) {
    LoadSDNodeBits.ExtTy = ETy;
  }

  const SDValue &getPassThru() const { return getOperand(1); }

  ISD::LoadExtType getExtensionType() const {
    return ISD::LoadExtType(LoadSDNodeBits.ExtTy);
  }

  static bool classof(const SDNode *N) {
    return N->getOpcode() == ISD::MGATHER;
  }
};

/// This class is used to represent an MSCATTER node
///
class MaskedScatterSDNode : public MaskedGatherScatterSDNode {
public:
  friend class SelectionDAG;

  MaskedScatterSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
                      EVT MemVT, MachineMemOperand *MMO,
                      ISD::MemIndexType IndexType, bool IsTrunc)
      : MaskedGatherScatterSDNode(ISD::MSCATTER, Order, dl, VTs, MemVT, MMO,
                                  IndexType) {
    StoreSDNodeBits.IsTruncating = IsTrunc;
  }

  /// Return true if the op does a truncation before store.
  /// For integers this is the same as doing a TRUNCATE and storing the result.
  /// For floats, it is the same as doing an FP_ROUND and storing the result.
  bool isTruncatingStore() const { return StoreSDNodeBits.IsTruncating; }

  const SDValue &getValue() const { return getOperand(1); }

  static bool classof(const SDNode *N) {
    return N->getOpcode() == ISD::MSCATTER;
  }
};

class FPStateAccessSDNode : public MemSDNode {
public:
  friend class SelectionDAG;

  FPStateAccessSDNode(unsigned NodeTy, unsigned Order, const DebugLoc &dl,
                      SDVTList VTs, EVT MemVT, MachineMemOperand *MMO)
      : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {
    assert((NodeTy == ISD::GET_FPENV_MEM || NodeTy == ISD::SET_FPENV_MEM) &&
           "Expected FP state access node");
  }

  static bool classof(const SDNode *N) {
    return N->getOpcode() == ISD::GET_FPENV_MEM ||
           N->getOpcode() == ISD::SET_FPENV_MEM;
  }
};

/// An SDNode that represents everything that will be needed
/// to construct a MachineInstr. These nodes are created during the
/// instruction selection proper phase.
///
/// Note that the only supported way to set the `memoperands` is by calling the
/// `SelectionDAG::setNodeMemRefs` function as the memory management happens
/// inside the DAG rather than in the node.
class MachineSDNode : public SDNode {
private:
  friend class SelectionDAG;

  MachineSDNode(unsigned Opc, unsigned Order, const DebugLoc &DL, SDVTList VTs)
      : SDNode(Opc, Order, DL, VTs) {}

  // We use a pointer union between a single `MachineMemOperand` pointer and
  // a pointer to an array of `MachineMemOperand` pointers. This is null when
  // the number of these is zero, the single pointer variant used when the
  // number is one, and the array is used for larger numbers.
  //
  // The array is allocated via the `SelectionDAG`'s allocator and so will
  // always live until the DAG is cleaned up and doesn't require ownership here.
  //
  // We can't use something simpler like `TinyPtrVector` here because `SDNode`
  // subclasses aren't managed in a conforming C++ manner. See the comments on
  // `SelectionDAG::MorphNodeTo` which details what all goes on, but the
  // constraint here is that these don't manage memory with their constructor or
  // destructor and can be initialized to a good state even if they start off
  // uninitialized.
  PointerUnion<MachineMemOperand *, MachineMemOperand **> MemRefs = {};

  // Note that this could be folded into the above `MemRefs` member if doing so
  // is advantageous at some point. We don't need to store this in most cases.
  // However, at the moment this doesn't appear to make the allocation any
  // smaller and makes the code somewhat simpler to read.
  int NumMemRefs = 0;

public:
  using mmo_iterator = ArrayRef<MachineMemOperand *>::const_iterator;

  ArrayRef<MachineMemOperand *> memoperands() const {
    // Special case the common cases.
    if (NumMemRefs == 0)
      return {};
    if (NumMemRefs == 1)
      return ArrayRef(MemRefs.getAddrOfPtr1(), 1);

    // Otherwise we have an actual array.
    return ArrayRef(cast<MachineMemOperand **>(MemRefs), NumMemRefs);
  }
  mmo_iterator memoperands_begin() const { return memoperands().begin(); }
  mmo_iterator memoperands_end() const { return memoperands().end(); }
  bool memoperands_empty() const { return memoperands().empty(); }

  /// Clear out the memory reference descriptor list.
  void clearMemRefs() {
    MemRefs = nullptr;
    NumMemRefs = 0;
  }

  static bool classof(const SDNode *N) {
    return N->isMachineOpcode();
  }
};

/// An SDNode that records if a register contains a value that is guaranteed to
/// be aligned accordingly.
class AssertAlignSDNode : public SDNode {
  Align Alignment;

public:
  AssertAlignSDNode(unsigned Order, const DebugLoc &DL, EVT VT, Align A)
      : SDNode(ISD::AssertAlign, Order, DL, getSDVTList(VT)), Alignment(A) {}

  Align getAlign() const { return Alignment; }

  static bool classof(const SDNode *N) {
    return N->getOpcode() == ISD::AssertAlign;
  }
};

class SDNodeIterator {
  const SDNode *Node;
  unsigned Operand;

  SDNodeIterator(const SDNode *N, unsigned Op) : Node(N), Operand(Op) {}

public:
  using iterator_category = std::forward_iterator_tag;
  using value_type = SDNode;
  using difference_type = std::ptrdiff_t;
  using pointer = value_type *;
  using reference = value_type &;

  bool operator==(const SDNodeIterator& x) const {
    return Operand == x.Operand;
  }
  bool operator!=(const SDNodeIterator& x) const { return !operator==(x); }

  pointer operator*() const {
    return Node->getOperand(Operand).getNode();
  }
  pointer operator->() const { return operator*(); }

  SDNodeIterator& operator++() {                // Preincrement
    ++Operand;
    return *this;
  }
  SDNodeIterator operator++(int) { // Postincrement
    SDNodeIterator tmp = *this; ++*this; return tmp;
  }
  size_t operator-(SDNodeIterator Other) const {
    assert(Node == Other.Node &&
           "Cannot compare iterators of two different nodes!");
    return Operand - Other.Operand;
  }

  static SDNodeIterator begin(const SDNode *N) { return SDNodeIterator(N, 0); }
  static SDNodeIterator end  (const SDNode *N) {
    return SDNodeIterator(N, N->getNumOperands());
  }

  unsigned getOperand() const { return Operand; }
  const SDNode *getNode() const { return Node; }
};

template <> struct GraphTraits<SDNode*> {
  using NodeRef = SDNode *;
  using ChildIteratorType = SDNodeIterator;

  static NodeRef getEntryNode(SDNode *N) { return N; }

  static ChildIteratorType child_begin(NodeRef N) {
    return SDNodeIterator::begin(N);
  }

  static ChildIteratorType child_end(NodeRef N) {
    return SDNodeIterator::end(N);
  }
};

/// A representation of the largest SDNode, for use in sizeof().
///
/// This needs to be a union because the largest node differs on 32 bit systems
/// with 4 and 8 byte pointer alignment, respectively.
using LargestSDNode = AlignedCharArrayUnion<AtomicSDNode, TargetIndexSDNode,
                                            BlockAddressSDNode,
                                            GlobalAddressSDNode,
                                            PseudoProbeSDNode>;

/// The SDNode class with the greatest alignment requirement.
using MostAlignedSDNode = GlobalAddressSDNode;

namespace ISD {

  /// Returns true if the specified node is a non-extending and unindexed load.
  inline bool isNormalLoad(const SDNode *N) {
    const LoadSDNode *Ld = dyn_cast<LoadSDNode>(N);
    return Ld && Ld->getExtensionType() == ISD::NON_EXTLOAD &&
      Ld->getAddressingMode() == ISD::UNINDEXED;
  }

  /// Returns true if the specified node is a non-extending load.
  inline bool isNON_EXTLoad(const SDNode *N) {
    return isa<LoadSDNode>(N) &&
      cast<LoadSDNode>(N)->getExtensionType() == ISD::NON_EXTLOAD;
  }

  /// Returns true if the specified node is a EXTLOAD.
  inline bool isEXTLoad(const SDNode *N) {
    return isa<LoadSDNode>(N) &&
      cast<LoadSDNode>(N)->getExtensionType() == ISD::EXTLOAD;
  }

  /// Returns true if the specified node is a SEXTLOAD.
  inline bool isSEXTLoad(const SDNode *N) {
    return isa<LoadSDNode>(N) &&
      cast<LoadSDNode>(N)->getExtensionType() == ISD::SEXTLOAD;
  }

  /// Returns true if the specified node is a ZEXTLOAD.
  inline bool isZEXTLoad(const SDNode *N) {
    return isa<LoadSDNode>(N) &&
      cast<LoadSDNode>(N)->getExtensionType() == ISD::ZEXTLOAD;
  }

  /// Returns true if the specified node is an unindexed load.
  inline bool isUNINDEXEDLoad(const SDNode *N) {
    return isa<LoadSDNode>(N) &&
      cast<LoadSDNode>(N)->getAddressingMode() == ISD::UNINDEXED;
  }

  /// Returns true if the specified node is a non-truncating
  /// and unindexed store.
  inline bool isNormalStore(const SDNode *N) {
    const StoreSDNode *St = dyn_cast<StoreSDNode>(N);
    return St && !St->isTruncatingStore() &&
      St->getAddressingMode() == ISD::UNINDEXED;
  }

  /// Returns true if the specified node is an unindexed store.
  inline bool isUNINDEXEDStore(const SDNode *N) {
    return isa<StoreSDNode>(N) &&
      cast<StoreSDNode>(N)->getAddressingMode() == ISD::UNINDEXED;
  }

  /// Attempt to match a unary predicate against a scalar/splat constant or
  /// every element of a constant BUILD_VECTOR.
  /// If AllowUndef is true, then UNDEF elements will pass nullptr to Match.
  bool matchUnaryPredicate(SDValue Op,
                           std::function<bool(ConstantSDNode *)> Match,
                           bool AllowUndefs = false);

  /// Attempt to match a binary predicate against a pair of scalar/splat
  /// constants or every element of a pair of constant BUILD_VECTORs.
  /// If AllowUndef is true, then UNDEF elements will pass nullptr to Match.
  /// If AllowTypeMismatch is true then RetType + ArgTypes don't need to match.
  bool matchBinaryPredicate(
      SDValue LHS, SDValue RHS,
      std::function<bool(ConstantSDNode *, ConstantSDNode *)> Match,
      bool AllowUndefs = false, bool AllowTypeMismatch = false);

  /// Returns true if the specified value is the overflow result from one
  /// of the overflow intrinsic nodes.
  inline bool isOverflowIntrOpRes(SDValue Op) {
    unsigned Opc = Op.getOpcode();
    return (Op.getResNo() == 1 &&
            (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO ||
             Opc == ISD::USUBO || Opc == ISD::SMULO || Opc == ISD::UMULO));
  }

} // end namespace ISD

} // end namespace llvm

#endif // LLVM_CODEGEN_SELECTIONDAGNODES_H
PKiwFZ�#��oo#CodeGen/MachineUniformityAnalysis.hnu�[���//===- MachineUniformityAnalysis.h ---------------------------*- C++ -*----===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file
/// \brief Machine IR instance of the generic uniformity analysis
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_MACHINEUNIFORMITYANALYSIS_H
#define LLVM_CODEGEN_MACHINEUNIFORMITYANALYSIS_H

#include "llvm/ADT/GenericUniformityInfo.h"
#include "llvm/CodeGen/MachineCycleAnalysis.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineSSAContext.h"

namespace llvm {

extern template class GenericUniformityInfo<MachineSSAContext>;
using MachineUniformityInfo = GenericUniformityInfo<MachineSSAContext>;

/// \brief Compute uniformity information for a Machine IR function.
///
/// If \p HasBranchDivergence is false, produces a dummy result which assumes
/// everything is uniform.
MachineUniformityInfo computeMachineUniformityInfo(
    MachineFunction &F, const MachineCycleInfo &cycleInfo,
    const MachineDomTree &domTree, bool HasBranchDivergence);

} // namespace llvm

#endif // LLVM_CODEGEN_MACHINEUNIFORMITYANALYSIS_H
PKiwFZ�~�q�q�CodeGen/MachineScheduler.hnu�[���//===- MachineScheduler.h - MachineInstr Scheduling Pass --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file provides an interface for customizing the standard MachineScheduler
// pass. Note that the entire pass may be replaced as follows:
//
// <Target>TargetMachine::createPassConfig(PassManagerBase &PM) {
//   PM.substitutePass(&MachineSchedulerID, &CustomSchedulerPassID);
//   ...}
//
// The MachineScheduler pass is only responsible for choosing the regions to be
// scheduled. Targets can override the DAG builder and scheduler without
// replacing the pass as follows:
//
// ScheduleDAGInstrs *<Target>PassConfig::
// createMachineScheduler(MachineSchedContext *C) {
//   return new CustomMachineScheduler(C);
// }
//
// The default scheduler, ScheduleDAGMILive, builds the DAG and drives list
// scheduling while updating the instruction stream, register pressure, and live
// intervals. Most targets don't need to override the DAG builder and list
// scheduler, but subtargets that require custom scheduling heuristics may
// plugin an alternate MachineSchedStrategy. The strategy is responsible for
// selecting the highest priority node from the list:
//
// ScheduleDAGInstrs *<Target>PassConfig::
// createMachineScheduler(MachineSchedContext *C) {
//   return new ScheduleDAGMILive(C, CustomStrategy(C));
// }
//
// The DAG builder can also be customized in a sense by adding DAG mutations
// that will run after DAG building and before list scheduling. DAG mutations
// can adjust dependencies based on target-specific knowledge or add weak edges
// to aid heuristics:
//
// ScheduleDAGInstrs *<Target>PassConfig::
// createMachineScheduler(MachineSchedContext *C) {
//   ScheduleDAGMI *DAG = createGenericSchedLive(C);
//   DAG->addMutation(new CustomDAGMutation(...));
//   return DAG;
// }
//
// A target that supports alternative schedulers can use the
// MachineSchedRegistry to allow command line selection. This can be done by
// implementing the following boilerplate:
//
// static ScheduleDAGInstrs *createCustomMachineSched(MachineSchedContext *C) {
//  return new CustomMachineScheduler(C);
// }
// static MachineSchedRegistry
// SchedCustomRegistry("custom", "Run my target's custom scheduler",
//                     createCustomMachineSched);
//
//
// Finally, subtargets that don't need to implement custom heuristics but would
// like to configure the GenericScheduler's policy for a given scheduler region,
// including scheduling direction and register pressure tracking policy, can do
// this:
//
// void <SubTarget>Subtarget::
// overrideSchedPolicy(MachineSchedPolicy &Policy,
//                     unsigned NumRegionInstrs) const {
//   Policy.<Flag> = true;
// }
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_MACHINESCHEDULER_H
#define LLVM_CODEGEN_MACHINESCHEDULER_H

#include "llvm/ADT/APInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachinePassRegistry.h"
#include "llvm/CodeGen/RegisterPressure.h"
#include "llvm/CodeGen/ScheduleDAG.h"
#include "llvm/CodeGen/ScheduleDAGInstrs.h"
#include "llvm/CodeGen/ScheduleDAGMutation.h"
#include "llvm/CodeGen/TargetSchedule.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
#include <algorithm>
#include <cassert>
#include <llvm/Support/raw_ostream.h>
#include <memory>
#include <string>
#include <vector>

namespace llvm {

extern cl::opt<bool> ForceTopDown;
extern cl::opt<bool> ForceBottomUp;
extern cl::opt<bool> VerifyScheduling;
#ifndef NDEBUG
extern cl::opt<bool> ViewMISchedDAGs;
extern cl::opt<bool> PrintDAGs;
#else
extern const bool ViewMISchedDAGs;
extern const bool PrintDAGs;
#endif

class AAResults;
class LiveIntervals;
class MachineDominatorTree;
class MachineFunction;
class MachineInstr;
class MachineLoopInfo;
class RegisterClassInfo;
class SchedDFSResult;
class ScheduleHazardRecognizer;
class TargetInstrInfo;
class TargetPassConfig;
class TargetRegisterInfo;

/// MachineSchedContext provides enough context from the MachineScheduler pass
/// for the target to instantiate a scheduler.
struct MachineSchedContext {
  MachineFunction *MF = nullptr;
  const MachineLoopInfo *MLI = nullptr;
  const MachineDominatorTree *MDT = nullptr;
  const TargetPassConfig *PassConfig = nullptr;
  AAResults *AA = nullptr;
  LiveIntervals *LIS = nullptr;

  RegisterClassInfo *RegClassInfo;

  MachineSchedContext();
  MachineSchedContext &operator=(const MachineSchedContext &other) = delete;
  MachineSchedContext(const MachineSchedContext &other) = delete;
  virtual ~MachineSchedContext();
};

/// MachineSchedRegistry provides a selection of available machine instruction
/// schedulers.
class MachineSchedRegistry
    : public MachinePassRegistryNode<
          ScheduleDAGInstrs *(*)(MachineSchedContext *)> {
public:
  using ScheduleDAGCtor = ScheduleDAGInstrs *(*)(MachineSchedContext *);

  // RegisterPassParser requires a (misnamed) FunctionPassCtor type.
  using FunctionPassCtor = ScheduleDAGCtor;

  static MachinePassRegistry<ScheduleDAGCtor> Registry;

  MachineSchedRegistry(const char *N, const char *D, ScheduleDAGCtor C)
      : MachinePassRegistryNode(N, D, C) {
    Registry.Add(this);
  }

  ~MachineSchedRegistry() { Registry.Remove(this); }

  // Accessors.
  //
  MachineSchedRegistry *getNext() const {
    return (MachineSchedRegistry *)MachinePassRegistryNode::getNext();
  }

  static MachineSchedRegistry *getList() {
    return (MachineSchedRegistry *)Registry.getList();
  }

  static void setListener(MachinePassRegistryListener<FunctionPassCtor> *L) {
    Registry.setListener(L);
  }
};

class ScheduleDAGMI;

/// Define a generic scheduling policy for targets that don't provide their own
/// MachineSchedStrategy. This can be overriden for each scheduling region
/// before building the DAG.
struct MachineSchedPolicy {
  // Allow the scheduler to disable register pressure tracking.
  bool ShouldTrackPressure = false;
  /// Track LaneMasks to allow reordering of independent subregister writes
  /// of the same vreg. \sa MachineSchedStrategy::shouldTrackLaneMasks()
  bool ShouldTrackLaneMasks = false;

  // Allow the scheduler to force top-down or bottom-up scheduling. If neither
  // is true, the scheduler runs in both directions and converges.
  bool OnlyTopDown = false;
  bool OnlyBottomUp = false;

  // Disable heuristic that tries to fetch nodes from long dependency chains
  // first.
  bool DisableLatencyHeuristic = false;

  // Compute DFSResult for use in scheduling heuristics.
  bool ComputeDFSResult = false;

  MachineSchedPolicy() = default;
};

/// MachineSchedStrategy - Interface to the scheduling algorithm used by
/// ScheduleDAGMI.
///
/// Initialization sequence:
///   initPolicy -> shouldTrackPressure -> initialize(DAG) -> registerRoots
class MachineSchedStrategy {
  virtual void anchor();

public:
  virtual ~MachineSchedStrategy() = default;

  /// Optionally override the per-region scheduling policy.
  virtual void initPolicy(MachineBasicBlock::iterator Begin,
                          MachineBasicBlock::iterator End,
                          unsigned NumRegionInstrs) {}

  virtual void dumpPolicy() const {}

  /// Check if pressure tracking is needed before building the DAG and
  /// initializing this strategy. Called after initPolicy.
  virtual bool shouldTrackPressure() const { return true; }

  /// Returns true if lanemasks should be tracked. LaneMask tracking is
  /// necessary to reorder independent subregister defs for the same vreg.
  /// This has to be enabled in combination with shouldTrackPressure().
  virtual bool shouldTrackLaneMasks() const { return false; }

  // If this method returns true, handling of the scheduling regions
  // themselves (in case of a scheduling boundary in MBB) will be done
  // beginning with the topmost region of MBB.
  virtual bool doMBBSchedRegionsTopDown() const { return false; }

  /// Initialize the strategy after building the DAG for a new region.
  virtual void initialize(ScheduleDAGMI *DAG) = 0;

  /// Tell the strategy that MBB is about to be processed.
  virtual void enterMBB(MachineBasicBlock *MBB) {};

  /// Tell the strategy that current MBB is done.
  virtual void leaveMBB() {};

  /// Notify this strategy that all roots have been released (including those
  /// that depend on EntrySU or ExitSU).
  virtual void registerRoots() {}

  /// Pick the next node to schedule, or return NULL. Set IsTopNode to true to
  /// schedule the node at the top of the unscheduled region. Otherwise it will
  /// be scheduled at the bottom.
  virtual SUnit *pickNode(bool &IsTopNode) = 0;

  /// Scheduler callback to notify that a new subtree is scheduled.
  virtual void scheduleTree(unsigned SubtreeID) {}

  /// Notify MachineSchedStrategy that ScheduleDAGMI has scheduled an
  /// instruction and updated scheduled/remaining flags in the DAG nodes.
  virtual void schedNode(SUnit *SU, bool IsTopNode) = 0;

  /// When all predecessor dependencies have been resolved, free this node for
  /// top-down scheduling.
  virtual void releaseTopNode(SUnit *SU) = 0;

  /// When all successor dependencies have been resolved, free this node for
  /// bottom-up scheduling.
  virtual void releaseBottomNode(SUnit *SU) = 0;
};

/// ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply
/// schedules machine instructions according to the given MachineSchedStrategy
/// without much extra book-keeping. This is the common functionality between
/// PreRA and PostRA MachineScheduler.
class ScheduleDAGMI : public ScheduleDAGInstrs {
protected:
  AAResults *AA;
  LiveIntervals *LIS;
  std::unique_ptr<MachineSchedStrategy> SchedImpl;

  /// Ordered list of DAG postprocessing steps.
  std::vector<std::unique_ptr<ScheduleDAGMutation>> Mutations;

  /// The top of the unscheduled zone.
  MachineBasicBlock::iterator CurrentTop;

  /// The bottom of the unscheduled zone.
  MachineBasicBlock::iterator CurrentBottom;

  /// Record the next node in a scheduled cluster.
  const SUnit *NextClusterPred = nullptr;
  const SUnit *NextClusterSucc = nullptr;

#if LLVM_ENABLE_ABI_BREAKING_CHECKS
  /// The number of instructions scheduled so far. Used to cut off the
  /// scheduler at the point determined by misched-cutoff.
  unsigned NumInstrsScheduled = 0;
#endif

public:
  ScheduleDAGMI(MachineSchedContext *C, std::unique_ptr<MachineSchedStrategy> S,
                bool RemoveKillFlags)
      : ScheduleDAGInstrs(*C->MF, C->MLI, RemoveKillFlags), AA(C->AA),
        LIS(C->LIS), SchedImpl(std::move(S)) {}

  // Provide a vtable anchor
  ~ScheduleDAGMI() override;

  /// If this method returns true, handling of the scheduling regions
  /// themselves (in case of a scheduling boundary in MBB) will be done
  /// beginning with the topmost region of MBB.
  bool doMBBSchedRegionsTopDown() const override {
    return SchedImpl->doMBBSchedRegionsTopDown();
  }

  // Returns LiveIntervals instance for use in DAG mutators and such.
  LiveIntervals *getLIS() const { return LIS; }

  /// Return true if this DAG supports VReg liveness and RegPressure.
  virtual bool hasVRegLiveness() const { return false; }

  /// Add a postprocessing step to the DAG builder.
  /// Mutations are applied in the order that they are added after normal DAG
  /// building and before MachineSchedStrategy initialization.
  ///
  /// ScheduleDAGMI takes ownership of the Mutation object.
  void addMutation(std::unique_ptr<ScheduleDAGMutation> Mutation) {
    if (Mutation)
      Mutations.push_back(std::move(Mutation));
  }

  MachineBasicBlock::iterator top() const { return CurrentTop; }
  MachineBasicBlock::iterator bottom() const { return CurrentBottom; }

  /// Implement the ScheduleDAGInstrs interface for handling the next scheduling
  /// region. This covers all instructions in a block, while schedule() may only
  /// cover a subset.
  void enterRegion(MachineBasicBlock *bb,
                   MachineBasicBlock::iterator begin,
                   MachineBasicBlock::iterator end,
                   unsigned regioninstrs) override;

  /// Implement ScheduleDAGInstrs interface for scheduling a sequence of
  /// reorderable instructions.
  void schedule() override;

  void startBlock(MachineBasicBlock *bb) override;
  void finishBlock() override;

  /// Change the position of an instruction within the basic block and update
  /// live ranges and region boundary iterators.
  void moveInstruction(MachineInstr *MI, MachineBasicBlock::iterator InsertPos);

  const SUnit *getNextClusterPred() const { return NextClusterPred; }

  const SUnit *getNextClusterSucc() const { return NextClusterSucc; }

  void viewGraph(const Twine &Name, const Twine &Title) override;
  void viewGraph() override;

protected:
  // Top-Level entry points for the schedule() driver...

  /// Apply each ScheduleDAGMutation step in order. This allows different
  /// instances of ScheduleDAGMI to perform custom DAG postprocessing.
  void postProcessDAG();

  /// Release ExitSU predecessors and setup scheduler queues.
  void initQueues(ArrayRef<SUnit*> TopRoots, ArrayRef<SUnit*> BotRoots);

  /// Update scheduler DAG and queues after scheduling an instruction.
  void updateQueues(SUnit *SU, bool IsTopNode);

  /// Reinsert debug_values recorded in ScheduleDAGInstrs::DbgValues.
  void placeDebugValues();

  /// dump the scheduled Sequence.
  void dumpSchedule() const;
  /// Print execution trace of the schedule top-down or bottom-up.
  void dumpScheduleTraceTopDown() const;
  void dumpScheduleTraceBottomUp() const;

  // Lesser helpers...
  bool checkSchedLimit();

  void findRootsAndBiasEdges(SmallVectorImpl<SUnit*> &TopRoots,
                             SmallVectorImpl<SUnit*> &BotRoots);

  void releaseSucc(SUnit *SU, SDep *SuccEdge);
  void releaseSuccessors(SUnit *SU);
  void releasePred(SUnit *SU, SDep *PredEdge);
  void releasePredecessors(SUnit *SU);
};

/// ScheduleDAGMILive is an implementation of ScheduleDAGInstrs that schedules
/// machine instructions while updating LiveIntervals and tracking regpressure.
class ScheduleDAGMILive : public ScheduleDAGMI {
protected:
  RegisterClassInfo *RegClassInfo;

  /// Information about DAG subtrees. If DFSResult is NULL, then SchedulerTrees
  /// will be empty.
  SchedDFSResult *DFSResult = nullptr;
  BitVector ScheduledTrees;

  MachineBasicBlock::iterator LiveRegionEnd;

  /// Maps vregs to the SUnits of their uses in the current scheduling region.
  VReg2SUnitMultiMap VRegUses;

  // Map each SU to its summary of pressure changes. This array is updated for
  // liveness during bottom-up scheduling. Top-down scheduling may proceed but
  // has no affect on the pressure diffs.
  PressureDiffs SUPressureDiffs;

  /// Register pressure in this region computed by initRegPressure.
  bool ShouldTrackPressure = false;
  bool ShouldTrackLaneMasks = false;
  IntervalPressure RegPressure;
  RegPressureTracker RPTracker;

  /// List of pressure sets that exceed the target's pressure limit before
  /// scheduling, listed in increasing set ID order. Each pressure set is paired
  /// with its max pressure in the currently scheduled regions.
  std::vector<PressureChange> RegionCriticalPSets;

  /// The top of the unscheduled zone.
  IntervalPressure TopPressure;
  RegPressureTracker TopRPTracker;

  /// The bottom of the unscheduled zone.
  IntervalPressure BotPressure;
  RegPressureTracker BotRPTracker;

public:
  ScheduleDAGMILive(MachineSchedContext *C,
                    std::unique_ptr<MachineSchedStrategy> S)
      : ScheduleDAGMI(C, std::move(S), /*RemoveKillFlags=*/false),
        RegClassInfo(C->RegClassInfo), RPTracker(RegPressure),
        TopRPTracker(TopPressure), BotRPTracker(BotPressure) {}

  ~ScheduleDAGMILive() override;

  /// Return true if this DAG supports VReg liveness and RegPressure.
  bool hasVRegLiveness() const override { return true; }

  /// Return true if register pressure tracking is enabled.
  bool isTrackingPressure() const { return ShouldTrackPressure; }

  /// Get current register pressure for the top scheduled instructions.
  const IntervalPressure &getTopPressure() const { return TopPressure; }
  const RegPressureTracker &getTopRPTracker() const { return TopRPTracker; }

  /// Get current register pressure for the bottom scheduled instructions.
  const IntervalPressure &getBotPressure() const { return BotPressure; }
  const RegPressureTracker &getBotRPTracker() const { return BotRPTracker; }

  /// Get register pressure for the entire scheduling region before scheduling.
  const IntervalPressure &getRegPressure() const { return RegPressure; }

  const std::vector<PressureChange> &getRegionCriticalPSets() const {
    return RegionCriticalPSets;
  }

  PressureDiff &getPressureDiff(const SUnit *SU) {
    return SUPressureDiffs[SU->NodeNum];
  }
  const PressureDiff &getPressureDiff(const SUnit *SU) const {
    return SUPressureDiffs[SU->NodeNum];
  }

  /// Compute a DFSResult after DAG building is complete, and before any
  /// queue comparisons.
  void computeDFSResult();

  /// Return a non-null DFS result if the scheduling strategy initialized it.
  const SchedDFSResult *getDFSResult() const { return DFSResult; }

  BitVector &getScheduledTrees() { return ScheduledTrees; }

  /// Implement the ScheduleDAGInstrs interface for handling the next scheduling
  /// region. This covers all instructions in a block, while schedule() may only
  /// cover a subset.
  void enterRegion(MachineBasicBlock *bb,
                   MachineBasicBlock::iterator begin,
                   MachineBasicBlock::iterator end,
                   unsigned regioninstrs) override;

  /// Implement ScheduleDAGInstrs interface for scheduling a sequence of
  /// reorderable instructions.
  void schedule() override;

  /// Compute the cyclic critical path through the DAG.
  unsigned computeCyclicCriticalPath();

  void dump() const override;

protected:
  // Top-Level entry points for the schedule() driver...

  /// Call ScheduleDAGInstrs::buildSchedGraph with register pressure tracking
  /// enabled. This sets up three trackers. RPTracker will cover the entire DAG
  /// region, TopTracker and BottomTracker will be initialized to the top and
  /// bottom of the DAG region without covereing any unscheduled instruction.
  void buildDAGWithRegPressure();

  /// Release ExitSU predecessors and setup scheduler queues. Re-position
  /// the Top RP tracker in case the region beginning has changed.
  void initQueues(ArrayRef<SUnit*> TopRoots, ArrayRef<SUnit*> BotRoots);

  /// Move an instruction and update register pressure.
  void scheduleMI(SUnit *SU, bool IsTopNode);

  // Lesser helpers...

  void initRegPressure();

  void updatePressureDiffs(ArrayRef<RegisterMaskPair> LiveUses);

  void updateScheduledPressure(const SUnit *SU,
                               const std::vector<unsigned> &NewMaxPressure);

  void collectVRegUses(SUnit &SU);
};

//===----------------------------------------------------------------------===//
///
/// Helpers for implementing custom MachineSchedStrategy classes. These take
/// care of the book-keeping associated with list scheduling heuristics.
///
//===----------------------------------------------------------------------===//

/// ReadyQueue encapsulates vector of "ready" SUnits with basic convenience
/// methods for pushing and removing nodes. ReadyQueue's are uniquely identified
/// by an ID. SUnit::NodeQueueId is a mask of the ReadyQueues the SUnit is in.
///
/// This is a convenience class that may be used by implementations of
/// MachineSchedStrategy.
class ReadyQueue {
  unsigned ID;
  std::string Name;
  std::vector<SUnit*> Queue;

public:
  ReadyQueue(unsigned id, const Twine &name): ID(id), Name(name.str()) {}

  unsigned getID() const { return ID; }

  StringRef getName() const { return Name; }

  // SU is in this queue if it's NodeQueueID is a superset of this ID.
  bool isInQueue(SUnit *SU) const { return (SU->NodeQueueId & ID); }

  bool empty() const { return Queue.empty(); }

  void clear() { Queue.clear(); }

  unsigned size() const { return Queue.size(); }

  using iterator = std::vector<SUnit*>::iterator;

  iterator begin() { return Queue.begin(); }

  iterator end() { return Queue.end(); }

  ArrayRef<SUnit*> elements() { return Queue; }

  iterator find(SUnit *SU) { return llvm::find(Queue, SU); }

  void push(SUnit *SU) {
    Queue.push_back(SU);
    SU->NodeQueueId |= ID;
  }

  iterator remove(iterator I) {
    (*I)->NodeQueueId &= ~ID;
    *I = Queue.back();
    unsigned idx = I - Queue.begin();
    Queue.pop_back();
    return Queue.begin() + idx;
  }

  void dump() const;
};

/// Summarize the unscheduled region.
struct SchedRemainder {
  // Critical path through the DAG in expected latency.
  unsigned CriticalPath;
  unsigned CyclicCritPath;

  // Scaled count of micro-ops left to schedule.
  unsigned RemIssueCount;

  bool IsAcyclicLatencyLimited;

  // Unscheduled resources
  SmallVector<unsigned, 16> RemainingCounts;

  SchedRemainder() { reset(); }

  void reset() {
    CriticalPath = 0;
    CyclicCritPath = 0;
    RemIssueCount = 0;
    IsAcyclicLatencyLimited = false;
    RemainingCounts.clear();
  }

  void init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel);
};

/// ResourceSegments are a collection of intervals closed on the
/// left and opened on the right:
///
///     list{ [a1, b1), [a2, b2), ..., [a_N, b_N) }
///
/// The collection has the following properties:
///
/// 1. The list is ordered: a_i < b_i and b_i < a_(i+1)
///
/// 2. The intervals in the collection do not intersect each other.
///
/// A \ref ResourceSegments instance represents the cycle
/// reservation history of the instance of and individual resource.
class ResourceSegments {
public:
  /// Represents an interval of discrete integer values closed on
  /// the left and open on the right: [a, b).
  typedef std::pair<int64_t, int64_t> IntervalTy;

  /// Adds an interval [a, b) to the collection of the instance.
  ///
  /// When adding [a, b[ to the collection, the operation merges the
  /// adjacent intervals. For example
  ///
  ///       0  1  2  3  4  5  6  7  8  9  10
  ///       [-----)  [--)     [--)
  ///     +       [--)
  ///     = [-----------)     [--)
  ///
  /// To be able to debug duplicate resource usage, the function has
  /// assertion that checks that no interval should be added if it
  /// overlaps any of the intervals in the collection. We can
  /// require this because by definition a \ref ResourceSegments is
  /// attached only to an individual resource instance.
  void add(IntervalTy A, const unsigned CutOff = 10);

public:
  /// Checks whether intervals intersect.
  static bool intersects(IntervalTy A, IntervalTy B);

  /// These function return the interval used by a resource in bottom and top
  /// scheduling.
  ///
  /// Consider an instruction that uses resources X0, X1 and X2 as follows:
  ///
  /// X0 X1 X1 X2    +--------+------------+------+
  ///                |Resource|StartAtCycle|Cycles|
  ///                +--------+------------+------+
  ///                |   X0   |     0      |  1   |
  ///                +--------+------------+------+
  ///                |   X1   |     1      |  3   |
  ///                +--------+------------+------+
  ///                |   X2   |     3      |  4   |
  ///                +--------+------------+------+
  ///
  /// If we can schedule the instruction at cycle C, we need to
  /// compute the interval of the resource as follows:
  ///
  /// # TOP DOWN SCHEDULING
  ///
  /// Cycles scheduling flows to the _right_, in the same direction
  /// of time.
  ///
  ///       C      1      2      3      4      5  ...
  /// ------|------|------|------|------|------|----->
  ///       X0     X1     X1     X2   ---> direction of time
  /// X0    [C, C+1)
  /// X1           [C+1,      C+3)
  /// X2                         [C+3, C+4)
  ///
  /// Therefore, the formula to compute the interval for a resource
  /// of an instruction that can be scheduled at cycle C in top-down
  /// scheduling is:
  ///
  ///       [C+StartAtCycle, C+Cycles)
  ///
  ///
  /// # BOTTOM UP SCHEDULING
  ///
  /// Cycles scheduling flows to the _left_, in opposite direction
  /// of time.
  ///
  /// In bottom up scheduling, the scheduling happens in opposite
  /// direction to the execution of the cycles of the
  /// instruction. When the instruction is scheduled at cycle `C`,
  /// the resources are allocated in the past relative to `C`:
  ///
  ///       2      1      C     -1     -2     -3     -4     -5  ...
  /// <-----|------|------|------|------|------|------|------|---
  ///                     X0     X1     X1     X2   ---> direction of time
  /// X0           (C+1, C]
  /// X1                  (C,        C-2]
  /// X2                              (C-2, C-3]
  ///
  /// Therefore, the formula to compute the interval for a resource
  /// of an instruction that can be scheduled at cycle C in bottom-up
  /// scheduling is:
  ///
  ///       [C-Cycle+1, C-StartAtCycle+1)
  ///
  ///
  /// NOTE: In both cases, the number of cycles booked by a
  /// resources is the value (Cycle - StartAtCycles).
  static IntervalTy getResourceIntervalBottom(unsigned C, unsigned StartAtCycle,
                                              unsigned Cycle) {
    return std::make_pair<long, long>((long)C - (long)Cycle + 1L,
                                      (long)C - (long)StartAtCycle + 1L);
  }
  static IntervalTy getResourceIntervalTop(unsigned C, unsigned StartAtCycle,
                                           unsigned Cycle) {
    return std::make_pair<long, long>((long)C + (long)StartAtCycle,
                                      (long)C + (long)Cycle);
  }

private:
  /// Finds the first cycle in which a resource can be allocated.
  ///
  /// The function uses the \param IntervalBuider [*] to build a
  /// resource interval [a, b[ out of the input parameters \param
  /// CurrCycle, \param StartAtCycle and \param Cycle.
  ///
  /// The function then loops through the intervals in the ResourceSegments
  /// and shifts the interval [a, b[ and the ReturnCycle to the
  /// right until there is no intersection between the intervals of
  /// the \ref ResourceSegments instance and the new shifted [a, b[. When
  /// this condition is met, the ReturnCycle  (which
  /// correspond to the cycle in which the resource can be
  /// allocated) is returned.
  ///
  ///               c = CurrCycle in input
  ///               c   1   2   3   4   5   6   7   8   9   10 ... ---> (time
  ///               flow)
  ///  ResourceSegments...  [---)   [-------)           [-----------)
  ///               c   [1     3[  -> StartAtCycle=1, Cycles=3
  ///                 ++c   [1     3)
  ///                     ++c   [1     3)
  ///                         ++c   [1     3)
  ///                             ++c   [1     3)
  ///                                 ++c   [1     3)    ---> returns c
  ///                                 incremented by 5 (c+5)
  ///
  ///
  /// Notice that for bottom-up scheduling the diagram is slightly
  /// different because the current cycle c is always on the right
  /// of the interval [a, b) (see \ref
  /// `getResourceIntervalBottom`). This is because the cycle
  /// increments for bottom-up scheduling moved in the direction
  /// opposite to the direction of time:
  ///
  ///     --------> direction of time.
  ///     XXYZZZ    (resource usage)
  ///     --------> direction of top-down execution cycles.
  ///     <-------- direction of bottom-up execution cycles.
  ///
  /// Even though bottom-up scheduling moves against the flow of
  /// time, the algorithm used to find the first free slot in between
  /// intervals is the same as for top-down scheduling.
  ///
  /// [*] See \ref `getResourceIntervalTop` and
  /// \ref `getResourceIntervalBottom` to see how such resource intervals
  /// are built.
  unsigned
  getFirstAvailableAt(unsigned CurrCycle, unsigned StartAtCycle, unsigned Cycle,
                      std::function<IntervalTy(unsigned, unsigned, unsigned)>
                          IntervalBuilder) const;

public:
  /// getFirstAvailableAtFromBottom and getFirstAvailableAtFromTop
  /// should be merged in a single function in which a function that
  /// creates the `NewInterval` is passed as a parameter.
  unsigned getFirstAvailableAtFromBottom(unsigned CurrCycle,
                                         unsigned StartAtCycle,
                                         unsigned Cycle) const {
    return getFirstAvailableAt(CurrCycle, StartAtCycle, Cycle,
                               getResourceIntervalBottom);
  }
  unsigned getFirstAvailableAtFromTop(unsigned CurrCycle, unsigned StartAtCycle,
                                      unsigned Cycle) const {
    return getFirstAvailableAt(CurrCycle, StartAtCycle, Cycle,
                               getResourceIntervalTop);
  }

private:
  std::list<IntervalTy> _Intervals;
  /// Merge all adjacent intervals in the collection. For all pairs
  /// of adjacient intervals, it performs [a, b) + [b, c) -> [a, c).
  ///
  /// Before performing the merge operation, the intervals are
  /// sorted with \ref sort_predicate.
  void sortAndMerge();

public:
  // constructor for empty set
  explicit ResourceSegments(){};
  bool empty() const { return _Intervals.empty(); }
  explicit ResourceSegments(std::list<IntervalTy> Intervals)
      : _Intervals(Intervals) {
    sortAndMerge();
  }

  friend bool operator==(const ResourceSegments &c1,
                         const ResourceSegments &c2) {
    return c1._Intervals == c2._Intervals;
  }
  friend llvm::raw_ostream &operator<<(llvm::raw_ostream &os,
                                       const ResourceSegments &Segments) {
    os << "{ ";
    for (auto p : Segments._Intervals)
      os << "[" << p.first << ", " << p.second << "), ";
    os << "}\n";
    return os;
  }
};

/// Each Scheduling boundary is associated with ready queues. It tracks the
/// current cycle in the direction of movement, and maintains the state
/// of "hazards" and other interlocks at the current cycle.
class SchedBoundary {
public:
  /// SUnit::NodeQueueId: 0 (none), 1 (top), 2 (bot), 3 (both)
  enum {
    TopQID = 1,
    BotQID = 2,
    LogMaxQID = 2
  };

  ScheduleDAGMI *DAG = nullptr;
  const TargetSchedModel *SchedModel = nullptr;
  SchedRemainder *Rem = nullptr;

  ReadyQueue Available;
  ReadyQueue Pending;

  ScheduleHazardRecognizer *HazardRec = nullptr;

private:
  /// True if the pending Q should be checked/updated before scheduling another
  /// instruction.
  bool CheckPending;

  /// Number of cycles it takes to issue the instructions scheduled in this
  /// zone. It is defined as: scheduled-micro-ops / issue-width + stalls.
  /// See getStalls().
  unsigned CurrCycle;

  /// Micro-ops issued in the current cycle
  unsigned CurrMOps;

  /// MinReadyCycle - Cycle of the soonest available instruction.
  unsigned MinReadyCycle;

  // The expected latency of the critical path in this scheduled zone.
  unsigned ExpectedLatency;

  // The latency of dependence chains leading into this zone.
  // For each node scheduled bottom-up: DLat = max DLat, N.Depth.
  // For each cycle scheduled: DLat -= 1.
  unsigned DependentLatency;

  /// Count the scheduled (issued) micro-ops that can be retired by
  /// time=CurrCycle assuming the first scheduled instr is retired at time=0.
  unsigned RetiredMOps;

  // Count scheduled resources that have been executed. Resources are
  // considered executed if they become ready in the time that it takes to
  // saturate any resource including the one in question. Counts are scaled
  // for direct comparison with other resources. Counts can be compared with
  // MOps * getMicroOpFactor and Latency * getLatencyFactor.
  SmallVector<unsigned, 16> ExecutedResCounts;

  /// Cache the max count for a single resource.
  unsigned MaxExecutedResCount;

  // Cache the critical resources ID in this scheduled zone.
  unsigned ZoneCritResIdx;

  // Is the scheduled region resource limited vs. latency limited.
  bool IsResourceLimited;

public:
private:
  /// Record how resources have been allocated across the cycles of
  /// the execution.
  std::map<unsigned, ResourceSegments> ReservedResourceSegments;
  std::vector<unsigned> ReservedCycles;
  /// For each PIdx, stores first index into ReservedResourceSegments that
  /// corresponds to it.
  ///
  /// For example, consider the following 3 resources (ResourceCount =
  /// 3):
  ///
  ///   +------------+--------+
  ///   |ResourceName|NumUnits|
  ///   +------------+--------+
  ///   |     X      |    2   |
  ///   +------------+--------+
  ///   |     Y      |    3   |
  ///   +------------+--------+
  ///   |     Z      |    1   |
  ///   +------------+--------+
  ///
  /// In this case, the total number of resource instances is 6. The
  /// vector \ref ReservedResourceSegments will have a slot for each instance.
  /// The vector \ref ReservedCyclesIndex will track at what index the first
  /// instance of the resource is found in the vector of \ref
  /// ReservedResourceSegments:
  ///
  ///                              Indexes of instances in
  ///                              ReservedResourceSegments
  ///
  ///                              0   1   2   3   4  5
  /// ReservedCyclesIndex[0] = 0; [X0, X1,
  /// ReservedCyclesIndex[1] = 2;          Y0, Y1, Y2
  /// ReservedCyclesIndex[2] = 5;                     Z
  SmallVector<unsigned, 16> ReservedCyclesIndex;

  // For each PIdx, stores the resource group IDs of its subunits
  SmallVector<APInt, 16> ResourceGroupSubUnitMasks;

#if LLVM_ENABLE_ABI_BREAKING_CHECKS
  // Remember the greatest possible stall as an upper bound on the number of
  // times we should retry the pending queue because of a hazard.
  unsigned MaxObservedStall;
#endif

public:
  /// Pending queues extend the ready queues with the same ID and the
  /// PendingFlag set.
  SchedBoundary(unsigned ID, const Twine &Name):
    Available(ID, Name+".A"), Pending(ID << LogMaxQID, Name+".P") {
    reset();
  }
  SchedBoundary &operator=(const SchedBoundary &other) = delete;
  SchedBoundary(const SchedBoundary &other) = delete;
  ~SchedBoundary();

  void reset();

  void init(ScheduleDAGMI *dag, const TargetSchedModel *smodel,
            SchedRemainder *rem);

  bool isTop() const {
    return Available.getID() == TopQID;
  }

  /// Number of cycles to issue the instructions scheduled in this zone.
  unsigned getCurrCycle() const { return CurrCycle; }

  /// Micro-ops issued in the current cycle
  unsigned getCurrMOps() const { return CurrMOps; }

  // The latency of dependence chains leading into this zone.
  unsigned getDependentLatency() const { return DependentLatency; }

  /// Get the number of latency cycles "covered" by the scheduled
  /// instructions. This is the larger of the critical path within the zone
  /// and the number of cycles required to issue the instructions.
  unsigned getScheduledLatency() const {
    return std::max(ExpectedLatency, CurrCycle);
  }

  unsigned getUnscheduledLatency(SUnit *SU) const {
    return isTop() ? SU->getHeight() : SU->getDepth();
  }

  unsigned getResourceCount(unsigned ResIdx) const {
    return ExecutedResCounts[ResIdx];
  }

  /// Get the scaled count of scheduled micro-ops and resources, including
  /// executed resources.
  unsigned getCriticalCount() const {
    if (!ZoneCritResIdx)
      return RetiredMOps * SchedModel->getMicroOpFactor();
    return getResourceCount(ZoneCritResIdx);
  }

  /// Get a scaled count for the minimum execution time of the scheduled
  /// micro-ops that are ready to execute by getExecutedCount. Notice the
  /// feedback loop.
  unsigned getExecutedCount() const {
    return std::max(CurrCycle * SchedModel->getLatencyFactor(),
                    MaxExecutedResCount);
  }

  unsigned getZoneCritResIdx() const { return ZoneCritResIdx; }

  // Is the scheduled region resource limited vs. latency limited.
  bool isResourceLimited() const { return IsResourceLimited; }

  /// Get the difference between the given SUnit's ready time and the current
  /// cycle.
  unsigned getLatencyStallCycles(SUnit *SU);

  unsigned getNextResourceCycleByInstance(unsigned InstanceIndex,
                                          unsigned Cycles,
                                          unsigned StartAtCycle);

  std::pair<unsigned, unsigned> getNextResourceCycle(const MCSchedClassDesc *SC,
                                                     unsigned PIdx,
                                                     unsigned Cycles,
                                                     unsigned StartAtCycle);

  bool isUnbufferedGroup(unsigned PIdx) const {
    return SchedModel->getProcResource(PIdx)->SubUnitsIdxBegin &&
           !SchedModel->getProcResource(PIdx)->BufferSize;
  }

  bool checkHazard(SUnit *SU);

  unsigned findMaxLatency(ArrayRef<SUnit*> ReadySUs);

  unsigned getOtherResourceCount(unsigned &OtherCritIdx);

  /// Release SU to make it ready. If it's not in hazard, remove it from
  /// pending queue (if already in) and push into available queue.
  /// Otherwise, push the SU into pending queue.
  ///
  /// @param SU The unit to be released.
  /// @param ReadyCycle Until which cycle the unit is ready.
  /// @param InPQueue Whether SU is already in pending queue.
  /// @param Idx Position offset in pending queue (if in it).
  void releaseNode(SUnit *SU, unsigned ReadyCycle, bool InPQueue,
                   unsigned Idx = 0);

  void bumpCycle(unsigned NextCycle);

  void incExecutedResources(unsigned PIdx, unsigned Count);

  unsigned countResource(const MCSchedClassDesc *SC, unsigned PIdx,
                         unsigned Cycles, unsigned ReadyCycle,
                         unsigned StartAtCycle);

  void bumpNode(SUnit *SU);

  void releasePending();

  void removeReady(SUnit *SU);

  /// Call this before applying any other heuristics to the Available queue.
  /// Updates the Available/Pending Q's if necessary and returns the single
  /// available instruction, or NULL if there are multiple candidates.
  SUnit *pickOnlyChoice();

  /// Dump the state of the information that tracks resource usage.
  void dumpReservedCycles() const;
  void dumpScheduledState() const;
};

/// Base class for GenericScheduler. This class maintains information about
/// scheduling candidates based on TargetSchedModel making it easy to implement
/// heuristics for either preRA or postRA scheduling.
class GenericSchedulerBase : public MachineSchedStrategy {
public:
  /// Represent the type of SchedCandidate found within a single queue.
  /// pickNodeBidirectional depends on these listed by decreasing priority.
  enum CandReason : uint8_t {
    NoCand, Only1, PhysReg, RegExcess, RegCritical, Stall, Cluster, Weak,
    RegMax, ResourceReduce, ResourceDemand, BotHeightReduce, BotPathReduce,
    TopDepthReduce, TopPathReduce, NextDefUse, NodeOrder};

#ifndef NDEBUG
  static const char *getReasonStr(GenericSchedulerBase::CandReason Reason);
#endif

  /// Policy for scheduling the next instruction in the candidate's zone.
  struct CandPolicy {
    bool ReduceLatency = false;
    unsigned ReduceResIdx = 0;
    unsigned DemandResIdx = 0;

    CandPolicy() = default;

    bool operator==(const CandPolicy &RHS) const {
      return ReduceLatency == RHS.ReduceLatency &&
             ReduceResIdx == RHS.ReduceResIdx &&
             DemandResIdx == RHS.DemandResIdx;
    }
    bool operator!=(const CandPolicy &RHS) const {
      return !(*this == RHS);
    }
  };

  /// Status of an instruction's critical resource consumption.
  struct SchedResourceDelta {
    // Count critical resources in the scheduled region required by SU.
    unsigned CritResources = 0;

    // Count critical resources from another region consumed by SU.
    unsigned DemandedResources = 0;

    SchedResourceDelta() = default;

    bool operator==(const SchedResourceDelta &RHS) const {
      return CritResources == RHS.CritResources
        && DemandedResources == RHS.DemandedResources;
    }
    bool operator!=(const SchedResourceDelta &RHS) const {
      return !operator==(RHS);
    }
  };

  /// Store the state used by GenericScheduler heuristics, required for the
  /// lifetime of one invocation of pickNode().
  struct SchedCandidate {
    CandPolicy Policy;

    // The best SUnit candidate.
    SUnit *SU;

    // The reason for this candidate.
    CandReason Reason;

    // Whether this candidate should be scheduled at top/bottom.
    bool AtTop;

    // Register pressure values for the best candidate.
    RegPressureDelta RPDelta;

    // Critical resource consumption of the best candidate.
    SchedResourceDelta ResDelta;

    SchedCandidate() { reset(CandPolicy()); }
    SchedCandidate(const CandPolicy &Policy) { reset(Policy); }

    void reset(const CandPolicy &NewPolicy) {
      Policy = NewPolicy;
      SU = nullptr;
      Reason = NoCand;
      AtTop = false;
      RPDelta = RegPressureDelta();
      ResDelta = SchedResourceDelta();
    }

    bool isValid() const { return SU; }

    // Copy the status of another candidate without changing policy.
    void setBest(SchedCandidate &Best) {
      assert(Best.Reason != NoCand && "uninitialized Sched candidate");
      SU = Best.SU;
      Reason = Best.Reason;
      AtTop = Best.AtTop;
      RPDelta = Best.RPDelta;
      ResDelta = Best.ResDelta;
    }

    void initResourceDelta(const ScheduleDAGMI *DAG,
                           const TargetSchedModel *SchedModel);
  };

protected:
  const MachineSchedContext *Context;
  const TargetSchedModel *SchedModel = nullptr;
  const TargetRegisterInfo *TRI = nullptr;

  SchedRemainder Rem;

  GenericSchedulerBase(const MachineSchedContext *C) : Context(C) {}

  void setPolicy(CandPolicy &Policy, bool IsPostRA, SchedBoundary &CurrZone,
                 SchedBoundary *OtherZone);

#ifndef NDEBUG
  void traceCandidate(const SchedCandidate &Cand);
#endif

private:
  bool shouldReduceLatency(const CandPolicy &Policy, SchedBoundary &CurrZone,
                           bool ComputeRemLatency, unsigned &RemLatency) const;
};

// Utility functions used by heuristics in tryCandidate().
bool tryLess(int TryVal, int CandVal,
             GenericSchedulerBase::SchedCandidate &TryCand,
             GenericSchedulerBase::SchedCandidate &Cand,
             GenericSchedulerBase::CandReason Reason);
bool tryGreater(int TryVal, int CandVal,
                GenericSchedulerBase::SchedCandidate &TryCand,
                GenericSchedulerBase::SchedCandidate &Cand,
                GenericSchedulerBase::CandReason Reason);
bool tryLatency(GenericSchedulerBase::SchedCandidate &TryCand,
                GenericSchedulerBase::SchedCandidate &Cand,
                SchedBoundary &Zone);
bool tryPressure(const PressureChange &TryP,
                 const PressureChange &CandP,
                 GenericSchedulerBase::SchedCandidate &TryCand,
                 GenericSchedulerBase::SchedCandidate &Cand,
                 GenericSchedulerBase::CandReason Reason,
                 const TargetRegisterInfo *TRI,
                 const MachineFunction &MF);
unsigned getWeakLeft(const SUnit *SU, bool isTop);
int biasPhysReg(const SUnit *SU, bool isTop);

/// GenericScheduler shrinks the unscheduled zone using heuristics to balance
/// the schedule.
class GenericScheduler : public GenericSchedulerBase {
public:
  GenericScheduler(const MachineSchedContext *C):
    GenericSchedulerBase(C), Top(SchedBoundary::TopQID, "TopQ"),
    Bot(SchedBoundary::BotQID, "BotQ") {}

  void initPolicy(MachineBasicBlock::iterator Begin,
                  MachineBasicBlock::iterator End,
                  unsigned NumRegionInstrs) override;

  void dumpPolicy() const override;

  bool shouldTrackPressure() const override {
    return RegionPolicy.ShouldTrackPressure;
  }

  bool shouldTrackLaneMasks() const override {
    return RegionPolicy.ShouldTrackLaneMasks;
  }

  void initialize(ScheduleDAGMI *dag) override;

  SUnit *pickNode(bool &IsTopNode) override;

  void schedNode(SUnit *SU, bool IsTopNode) override;

  void releaseTopNode(SUnit *SU) override {
    if (SU->isScheduled)
      return;

    Top.releaseNode(SU, SU->TopReadyCycle, false);
    TopCand.SU = nullptr;
  }

  void releaseBottomNode(SUnit *SU) override {
    if (SU->isScheduled)
      return;

    Bot.releaseNode(SU, SU->BotReadyCycle, false);
    BotCand.SU = nullptr;
  }

  void registerRoots() override;

protected:
  ScheduleDAGMILive *DAG = nullptr;

  MachineSchedPolicy RegionPolicy;

  // State of the top and bottom scheduled instruction boundaries.
  SchedBoundary Top;
  SchedBoundary Bot;

  /// Candidate last picked from Top boundary.
  SchedCandidate TopCand;
  /// Candidate last picked from Bot boundary.
  SchedCandidate BotCand;

  void checkAcyclicLatency();

  void initCandidate(SchedCandidate &Cand, SUnit *SU, bool AtTop,
                     const RegPressureTracker &RPTracker,
                     RegPressureTracker &TempTracker);

  virtual bool tryCandidate(SchedCandidate &Cand, SchedCandidate &TryCand,
                            SchedBoundary *Zone) const;

  SUnit *pickNodeBidirectional(bool &IsTopNode);

  void pickNodeFromQueue(SchedBoundary &Zone,
                         const CandPolicy &ZonePolicy,
                         const RegPressureTracker &RPTracker,
                         SchedCandidate &Candidate);

  void reschedulePhysReg(SUnit *SU, bool isTop);
};

/// PostGenericScheduler - Interface to the scheduling algorithm used by
/// ScheduleDAGMI.
///
/// Callbacks from ScheduleDAGMI:
///   initPolicy -> initialize(DAG) -> registerRoots -> pickNode ...
class PostGenericScheduler : public GenericSchedulerBase {
protected:
  ScheduleDAGMI *DAG = nullptr;
  SchedBoundary Top;
  SmallVector<SUnit*, 8> BotRoots;

public:
  PostGenericScheduler(const MachineSchedContext *C):
    GenericSchedulerBase(C), Top(SchedBoundary::TopQID, "TopQ") {}

  ~PostGenericScheduler() override = default;

  void initPolicy(MachineBasicBlock::iterator Begin,
                  MachineBasicBlock::iterator End,
                  unsigned NumRegionInstrs) override {
    /* no configurable policy */
  }

  /// PostRA scheduling does not track pressure.
  bool shouldTrackPressure() const override { return false; }

  void initialize(ScheduleDAGMI *Dag) override;

  void registerRoots() override;

  SUnit *pickNode(bool &IsTopNode) override;

  void scheduleTree(unsigned SubtreeID) override {
    llvm_unreachable("PostRA scheduler does not support subtree analysis.");
  }

  void schedNode(SUnit *SU, bool IsTopNode) override;

  void releaseTopNode(SUnit *SU) override {
    if (SU->isScheduled)
      return;
    Top.releaseNode(SU, SU->TopReadyCycle, false);
  }

  // Only called for roots.
  void releaseBottomNode(SUnit *SU) override {
    BotRoots.push_back(SU);
  }

protected:
  virtual bool tryCandidate(SchedCandidate &Cand, SchedCandidate &TryCand);

  void pickNodeFromQueue(SchedCandidate &Cand);
};

/// Create the standard converging machine scheduler. This will be used as the
/// default scheduler if the target does not set a default.
/// Adds default DAG mutations.
ScheduleDAGMILive *createGenericSchedLive(MachineSchedContext *C);

/// Create a generic scheduler with no vreg liveness or DAG mutation passes.
ScheduleDAGMI *createGenericSchedPostRA(MachineSchedContext *C);

std::unique_ptr<ScheduleDAGMutation>
createLoadClusterDAGMutation(const TargetInstrInfo *TII,
                             const TargetRegisterInfo *TRI);

std::unique_ptr<ScheduleDAGMutation>
createStoreClusterDAGMutation(const TargetInstrInfo *TII,
                              const TargetRegisterInfo *TRI);

std::unique_ptr<ScheduleDAGMutation>
createCopyConstrainDAGMutation(const TargetInstrInfo *TII,
                               const TargetRegisterInfo *TRI);

} // end namespace llvm

#endif // LLVM_CODEGEN_MACHINESCHEDULER_H
PKiwFZ���Vq�q�CodeGen/RDFGraph.hnu�[���//===- RDFGraph.h -----------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Target-independent, SSA-based data flow graph for register data flow (RDF)
// for a non-SSA program representation (e.g. post-RA machine code).
//
//
// *** Introduction
//
// The RDF graph is a collection of nodes, each of which denotes some element
// of the program. There are two main types of such elements: code and refe-
// rences. Conceptually, "code" is something that represents the structure
// of the program, e.g. basic block or a statement, while "reference" is an
// instance of accessing a register, e.g. a definition or a use. Nodes are
// connected with each other based on the structure of the program (such as
// blocks, instructions, etc.), and based on the data flow (e.g. reaching
// definitions, reached uses, etc.). The single-reaching-definition principle
// of SSA is generally observed, although, due to the non-SSA representation
// of the program, there are some differences between the graph and a "pure"
// SSA representation.
//
//
// *** Implementation remarks
//
// Since the graph can contain a large number of nodes, memory consumption
// was one of the major design considerations. As a result, there is a single
// base class NodeBase which defines all members used by all possible derived
// classes. The members are arranged in a union, and a derived class cannot
// add any data members of its own. Each derived class only defines the
// functional interface, i.e. member functions. NodeBase must be a POD,
// which implies that all of its members must also be PODs.
// Since nodes need to be connected with other nodes, pointers have been
// replaced with 32-bit identifiers: each node has an id of type NodeId.
// There are mapping functions in the graph that translate between actual
// memory addresses and the corresponding identifiers.
// A node id of 0 is equivalent to nullptr.
//
//
// *** Structure of the graph
//
// A code node is always a collection of other nodes. For example, a code
// node corresponding to a basic block will contain code nodes corresponding
// to instructions. In turn, a code node corresponding to an instruction will
// contain a list of reference nodes that correspond to the definitions and
// uses of registers in that instruction. The members are arranged into a
// circular list, which is yet another consequence of the effort to save
// memory: for each member node it should be possible to obtain its owner,
// and it should be possible to access all other members. There are other
// ways to accomplish that, but the circular list seemed the most natural.
//
// +- CodeNode -+
// |            | <---------------------------------------------------+
// +-+--------+-+                                                     |
//   |FirstM  |LastM                                                  |
//   |        +-------------------------------------+                 |
//   |                                              |                 |
//   V                                              V                 |
//  +----------+ Next +----------+ Next       Next +----------+ Next  |
//  |          |----->|          |-----> ... ----->|          |----->-+
//  +- Member -+      +- Member -+                 +- Member -+
//
// The order of members is such that related reference nodes (see below)
// should be contiguous on the member list.
//
// A reference node is a node that encapsulates an access to a register,
// in other words, data flowing into or out of a register. There are two
// major kinds of reference nodes: defs and uses. A def node will contain
// the id of the first reached use, and the id of the first reached def.
// Each def and use will contain the id of the reaching def, and also the
// id of the next reached def (for def nodes) or use (for use nodes).
// The "next node sharing the same reaching def" is denoted as "sibling".
// In summary:
// - Def node contains: reaching def, sibling, first reached def, and first
// reached use.
// - Use node contains: reaching def and sibling.
//
// +-- DefNode --+
// | R2 = ...    | <---+--------------------+
// ++---------+--+     |                    |
//  |Reached  |Reached |                    |
//  |Def      |Use     |                    |
//  |         |        |Reaching            |Reaching
//  |         V        |Def                 |Def
//  |      +-- UseNode --+ Sib  +-- UseNode --+ Sib       Sib
//  |      | ... = R2    |----->| ... = R2    |----> ... ----> 0
//  |      +-------------+      +-------------+
//  V
// +-- DefNode --+ Sib
// | R2 = ...    |----> ...
// ++---------+--+
//  |         |
//  |         |
// ...       ...
//
// To get a full picture, the circular lists connecting blocks within a
// function, instructions within a block, etc. should be superimposed with
// the def-def, def-use links shown above.
// To illustrate this, consider a small example in a pseudo-assembly:
// foo:
//   add r2, r0, r1   ; r2 = r0+r1
//   addi r0, r2, 1   ; r0 = r2+1
//   ret r0           ; return value in r0
//
// The graph (in a format used by the debugging functions) would look like:
//
//   DFG dump:[
//   f1: Function foo
//   b2: === %bb.0 === preds(0), succs(0):
//   p3: phi [d4<r0>(,d12,u9):]
//   p5: phi [d6<r1>(,,u10):]
//   s7: add [d8<r2>(,,u13):, u9<r0>(d4):, u10<r1>(d6):]
//   s11: addi [d12<r0>(d4,,u15):, u13<r2>(d8):]
//   s14: ret [u15<r0>(d12):]
//   ]
//
// The f1, b2, p3, etc. are node ids. The letter is prepended to indicate the
// kind of the node (i.e. f - function, b - basic block, p - phi, s - state-
// ment, d - def, u - use).
// The format of a def node is:
//   dN<R>(rd,d,u):sib,
// where
//   N   - numeric node id,
//   R   - register being defined
//   rd  - reaching def,
//   d   - reached def,
//   u   - reached use,
//   sib - sibling.
// The format of a use node is:
//   uN<R>[!](rd):sib,
// where
//   N   - numeric node id,
//   R   - register being used,
//   rd  - reaching def,
//   sib - sibling.
// Possible annotations (usually preceding the node id):
//   +   - preserving def,
//   ~   - clobbering def,
//   "   - shadow ref (follows the node id),
//   !   - fixed register (appears after register name).
//
// The circular lists are not explicit in the dump.
//
//
// *** Node attributes
//
// NodeBase has a member "Attrs", which is the primary way of determining
// the node's characteristics. The fields in this member decide whether
// the node is a code node or a reference node (i.e. node's "type"), then
// within each type, the "kind" determines what specifically this node
// represents. The remaining bits, "flags", contain additional information
// that is even more detailed than the "kind".
// CodeNode's kinds are:
// - Phi:   Phi node, members are reference nodes.
// - Stmt:  Statement, members are reference nodes.
// - Block: Basic block, members are instruction nodes (i.e. Phi or Stmt).
// - Func:  The whole function. The members are basic block nodes.
// RefNode's kinds are:
// - Use.
// - Def.
//
// Meaning of flags:
// - Preserving: applies only to defs. A preserving def is one that can
//   preserve some of the original bits among those that are included in
//   the register associated with that def. For example, if R0 is a 32-bit
//   register, but a def can only change the lower 16 bits, then it will
//   be marked as preserving.
// - Shadow: a reference that has duplicates holding additional reaching
//   defs (see more below).
// - Clobbering: applied only to defs, indicates that the value generated
//   by this def is unspecified. A typical example would be volatile registers
//   after function calls.
// - Fixed: the register in this def/use cannot be replaced with any other
//   register. A typical case would be a parameter register to a call, or
//   the register with the return value from a function.
// - Undef: the register in this reference the register is assumed to have
//   no pre-existing value, even if it appears to be reached by some def.
//   This is typically used to prevent keeping registers artificially live
//   in cases when they are defined via predicated instructions. For example:
//     r0 = add-if-true cond, r10, r11                (1)
//     r0 = add-if-false cond, r12, r13, implicit r0  (2)
//     ... = r0                                       (3)
//   Before (1), r0 is not intended to be live, and the use of r0 in (3) is
//   not meant to be reached by any def preceding (1). However, since the
//   defs in (1) and (2) are both preserving, these properties alone would
//   imply that the use in (3) may indeed be reached by some prior def.
//   Adding Undef flag to the def in (1) prevents that. The Undef flag
//   may be applied to both defs and uses.
// - Dead: applies only to defs. The value coming out of a "dead" def is
//   assumed to be unused, even if the def appears to be reaching other defs
//   or uses. The motivation for this flag comes from dead defs on function
//   calls: there is no way to determine if such a def is dead without
//   analyzing the target's ABI. Hence the graph should contain this info,
//   as it is unavailable otherwise. On the other hand, a def without any
//   uses on a typical instruction is not the intended target for this flag.
//
// *** Shadow references
//
// It may happen that a super-register can have two (or more) non-overlapping
// sub-registers. When both of these sub-registers are defined and followed
// by a use of the super-register, the use of the super-register will not
// have a unique reaching def: both defs of the sub-registers need to be
// accounted for. In such cases, a duplicate use of the super-register is
// added and it points to the extra reaching def. Both uses are marked with
// a flag "shadow". Example:
// Assume t0 is a super-register of r0 and r1, r0 and r1 do not overlap:
//   set r0, 1        ; r0 = 1
//   set r1, 1        ; r1 = 1
//   addi t1, t0, 1   ; t1 = t0+1
//
// The DFG:
//   s1: set [d2<r0>(,,u9):]
//   s3: set [d4<r1>(,,u10):]
//   s5: addi [d6<t1>(,,):, u7"<t0>(d2):, u8"<t0>(d4):]
//
// The statement s5 has two use nodes for t0: u7" and u9". The quotation
// mark " indicates that the node is a shadow.
//

#ifndef LLVM_CODEGEN_RDFGRAPH_H
#define LLVM_CODEGEN_RDFGRAPH_H

#include "RDFRegisters.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/MC/LaneBitmask.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/MathExtras.h"
#include <cassert>
#include <cstdint>
#include <cstring>
#include <map>
#include <memory>
#include <set>
#include <unordered_map>
#include <utility>
#include <vector>

// RDF uses uint32_t to refer to registers. This is to ensure that the type
// size remains specific. In other places, registers are often stored using
// unsigned.
static_assert(sizeof(uint32_t) == sizeof(unsigned), "Those should be equal");

namespace llvm {

class MachineBasicBlock;
class MachineDominanceFrontier;
class MachineDominatorTree;
class MachineFunction;
class MachineInstr;
class MachineOperand;
class raw_ostream;
class TargetInstrInfo;
class TargetRegisterInfo;

namespace rdf {

using NodeId = uint32_t;

struct DataFlowGraph;

struct NodeAttrs {
  // clang-format off
  enum : uint16_t {
    None          = 0x0000,   // Nothing

    // Types: 2 bits
    TypeMask      = 0x0003,
    Code          = 0x0001,   // 01, Container
    Ref           = 0x0002,   // 10, Reference

    // Kind: 3 bits
    KindMask      = 0x0007 << 2,
    Def           = 0x0001 << 2,  // 001
    Use           = 0x0002 << 2,  // 010
    Phi           = 0x0003 << 2,  // 011
    Stmt          = 0x0004 << 2,  // 100
    Block         = 0x0005 << 2,  // 101
    Func          = 0x0006 << 2,  // 110

    // Flags: 7 bits for now
    FlagMask      = 0x007F << 5,
    Shadow        = 0x0001 << 5,  // 0000001, Has extra reaching defs.
    Clobbering    = 0x0002 << 5,  // 0000010, Produces unspecified values.
    PhiRef        = 0x0004 << 5,  // 0000100, Member of PhiNode.
    Preserving    = 0x0008 << 5,  // 0001000, Def can keep original bits.
    Fixed         = 0x0010 << 5,  // 0010000, Fixed register.
    Undef         = 0x0020 << 5,  // 0100000, Has no pre-existing value.
    Dead          = 0x0040 << 5,  // 1000000, Does not define a value.
  };
  // clang-format on

  static uint16_t type(uint16_t T) { //
    return T & TypeMask;
  }
  static uint16_t kind(uint16_t T) { //
    return T & KindMask;
  }
  static uint16_t flags(uint16_t T) { //
    return T & FlagMask;
  }
  static uint16_t set_type(uint16_t A, uint16_t T) {
    return (A & ~TypeMask) | T;
  }

  static uint16_t set_kind(uint16_t A, uint16_t K) {
    return (A & ~KindMask) | K;
  }

  static uint16_t set_flags(uint16_t A, uint16_t F) {
    return (A & ~FlagMask) | F;
  }

  // Test if A contains B.
  static bool contains(uint16_t A, uint16_t B) {
    if (type(A) != Code)
      return false;
    uint16_t KB = kind(B);
    switch (kind(A)) {
    case Func:
      return KB == Block;
    case Block:
      return KB == Phi || KB == Stmt;
    case Phi:
    case Stmt:
      return type(B) == Ref;
    }
    return false;
  }
};

struct BuildOptions {
  enum : unsigned {
    None = 0x00,
    KeepDeadPhis = 0x01, // Do not remove dead phis during build.
    OmitReserved = 0x02, // Do not track reserved registers.
  };
};

template <typename T> struct NodeAddr {
  NodeAddr() = default;
  NodeAddr(T A, NodeId I) : Addr(A), Id(I) {}

  // Type cast (casting constructor). The reason for having this class
  // instead of std::pair.
  template <typename S>
  NodeAddr(const NodeAddr<S> &NA) : Addr(static_cast<T>(NA.Addr)), Id(NA.Id) {}

  bool operator==(const NodeAddr<T> &NA) const {
    assert((Addr == NA.Addr) == (Id == NA.Id));
    return Addr == NA.Addr;
  }
  bool operator!=(const NodeAddr<T> &NA) const { //
    return !operator==(NA);
  }

  T Addr = nullptr;
  NodeId Id = 0;
};

struct NodeBase;

struct RefNode;
struct DefNode;
struct UseNode;
struct PhiUseNode;

struct CodeNode;
struct InstrNode;
struct PhiNode;
struct StmtNode;
struct BlockNode;
struct FuncNode;

// Use these short names with rdf:: qualification to avoid conflicts with
// preexisting names. Do not use 'using namespace rdf'.
using Node = NodeAddr<NodeBase *>;

using Ref = NodeAddr<RefNode *>;
using Def = NodeAddr<DefNode *>;
using Use = NodeAddr<UseNode *>; // This may conflict with llvm::Use.
using PhiUse = NodeAddr<PhiUseNode *>;

using Code = NodeAddr<CodeNode *>;
using Instr = NodeAddr<InstrNode *>;
using Phi = NodeAddr<PhiNode *>;
using Stmt = NodeAddr<StmtNode *>;
using Block = NodeAddr<BlockNode *>;
using Func = NodeAddr<FuncNode *>;

// Fast memory allocation and translation between node id and node address.
// This is really the same idea as the one underlying the "bump pointer
// allocator", the difference being in the translation. A node id is
// composed of two components: the index of the block in which it was
// allocated, and the index within the block. With the default settings,
// where the number of nodes per block is 4096, the node id (minus 1) is:
//
// bit position:                11             0
// +----------------------------+--------------+
// | Index of the block         |Index in block|
// +----------------------------+--------------+
//
// The actual node id is the above plus 1, to avoid creating a node id of 0.
//
// This method significantly improved the build time, compared to using maps
// (std::unordered_map or DenseMap) to translate between pointers and ids.
struct NodeAllocator {
  // Amount of storage for a single node.
  enum { NodeMemSize = 32 };

  NodeAllocator(uint32_t NPB = 4096)
      : NodesPerBlock(NPB), BitsPerIndex(Log2_32(NPB)),
        IndexMask((1 << BitsPerIndex) - 1) {
    assert(isPowerOf2_32(NPB));
  }

  NodeBase *ptr(NodeId N) const {
    uint32_t N1 = N - 1;
    uint32_t BlockN = N1 >> BitsPerIndex;
    uint32_t Offset = (N1 & IndexMask) * NodeMemSize;
    return reinterpret_cast<NodeBase *>(Blocks[BlockN] + Offset);
  }

  NodeId id(const NodeBase *P) const;
  Node New();
  void clear();

private:
  void startNewBlock();
  bool needNewBlock();

  uint32_t makeId(uint32_t Block, uint32_t Index) const {
    // Add 1 to the id, to avoid the id of 0, which is treated as "null".
    return ((Block << BitsPerIndex) | Index) + 1;
  }

  const uint32_t NodesPerBlock;
  const uint32_t BitsPerIndex;
  const uint32_t IndexMask;
  char *ActiveEnd = nullptr;
  std::vector<char *> Blocks;
  using AllocatorTy = BumpPtrAllocatorImpl<MallocAllocator, 65536>;
  AllocatorTy MemPool;
};

using RegisterSet = std::set<RegisterRef>;

struct TargetOperandInfo {
  TargetOperandInfo(const TargetInstrInfo &tii) : TII(tii) {}
  virtual ~TargetOperandInfo() = default;

  virtual bool isPreserving(const MachineInstr &In, unsigned OpNum) const;
  virtual bool isClobbering(const MachineInstr &In, unsigned OpNum) const;
  virtual bool isFixedReg(const MachineInstr &In, unsigned OpNum) const;

  const TargetInstrInfo &TII;
};

// Packed register reference. Only used for storage.
struct PackedRegisterRef {
  RegisterId Reg;
  uint32_t MaskId;
};

struct LaneMaskIndex : private IndexedSet<LaneBitmask> {
  LaneMaskIndex() = default;

  LaneBitmask getLaneMaskForIndex(uint32_t K) const {
    return K == 0 ? LaneBitmask::getAll() : get(K);
  }

  uint32_t getIndexForLaneMask(LaneBitmask LM) {
    assert(LM.any());
    return LM.all() ? 0 : insert(LM);
  }

  uint32_t getIndexForLaneMask(LaneBitmask LM) const {
    assert(LM.any());
    return LM.all() ? 0 : find(LM);
  }
};

struct NodeBase {
public:
  // Make sure this is a POD.
  NodeBase() = default;

  uint16_t getType() const { return NodeAttrs::type(Attrs); }
  uint16_t getKind() const { return NodeAttrs::kind(Attrs); }
  uint16_t getFlags() const { return NodeAttrs::flags(Attrs); }
  NodeId getNext() const { return Next; }

  uint16_t getAttrs() const { return Attrs; }
  void setAttrs(uint16_t A) { Attrs = A; }
  void setFlags(uint16_t F) { setAttrs(NodeAttrs::set_flags(getAttrs(), F)); }

  // Insert node NA after "this" in the circular chain.
  void append(Node NA);

  // Initialize all members to 0.
  void init() { memset(this, 0, sizeof *this); }

  void setNext(NodeId N) { Next = N; }

protected:
  uint16_t Attrs;
  uint16_t Reserved;
  NodeId Next; // Id of the next node in the circular chain.
  // Definitions of nested types. Using anonymous nested structs would make
  // this class definition clearer, but unnamed structs are not a part of
  // the standard.
  struct Def_struct {
    NodeId DD, DU; // Ids of the first reached def and use.
  };
  struct PhiU_struct {
    NodeId PredB; // Id of the predecessor block for a phi use.
  };
  struct Code_struct {
    void *CP;             // Pointer to the actual code.
    NodeId FirstM, LastM; // Id of the first member and last.
  };
  struct Ref_struct {
    NodeId RD, Sib; // Ids of the reaching def and the sibling.
    union {
      Def_struct Def;
      PhiU_struct PhiU;
    };
    union {
      MachineOperand *Op;   // Non-phi refs point to a machine operand.
      PackedRegisterRef PR; // Phi refs store register info directly.
    };
  };

  // The actual payload.
  union {
    Ref_struct RefData;
    Code_struct CodeData;
  };
};
// The allocator allocates chunks of 32 bytes for each node. The fact that
// each node takes 32 bytes in memory is used for fast translation between
// the node id and the node address.
static_assert(sizeof(NodeBase) <= NodeAllocator::NodeMemSize,
              "NodeBase must be at most NodeAllocator::NodeMemSize bytes");

using NodeList = SmallVector<Node, 4>;
using NodeSet = std::set<NodeId>;

struct RefNode : public NodeBase {
  RefNode() = default;

  RegisterRef getRegRef(const DataFlowGraph &G) const;

  MachineOperand &getOp() {
    assert(!(getFlags() & NodeAttrs::PhiRef));
    return *RefData.Op;
  }

  void setRegRef(RegisterRef RR, DataFlowGraph &G);
  void setRegRef(MachineOperand *Op, DataFlowGraph &G);

  NodeId getReachingDef() const { return RefData.RD; }
  void setReachingDef(NodeId RD) { RefData.RD = RD; }

  NodeId getSibling() const { return RefData.Sib; }
  void setSibling(NodeId Sib) { RefData.Sib = Sib; }

  bool isUse() const {
    assert(getType() == NodeAttrs::Ref);
    return getKind() == NodeAttrs::Use;
  }

  bool isDef() const {
    assert(getType() == NodeAttrs::Ref);
    return getKind() == NodeAttrs::Def;
  }

  template <typename Predicate>
  Ref getNextRef(RegisterRef RR, Predicate P, bool NextOnly,
                 const DataFlowGraph &G);
  Node getOwner(const DataFlowGraph &G);
};

struct DefNode : public RefNode {
  NodeId getReachedDef() const { return RefData.Def.DD; }
  void setReachedDef(NodeId D) { RefData.Def.DD = D; }
  NodeId getReachedUse() const { return RefData.Def.DU; }
  void setReachedUse(NodeId U) { RefData.Def.DU = U; }

  void linkToDef(NodeId Self, Def DA);
};

struct UseNode : public RefNode {
  void linkToDef(NodeId Self, Def DA);
};

struct PhiUseNode : public UseNode {
  NodeId getPredecessor() const {
    assert(getFlags() & NodeAttrs::PhiRef);
    return RefData.PhiU.PredB;
  }
  void setPredecessor(NodeId B) {
    assert(getFlags() & NodeAttrs::PhiRef);
    RefData.PhiU.PredB = B;
  }
};

struct CodeNode : public NodeBase {
  template <typename T> T getCode() const { //
    return static_cast<T>(CodeData.CP);
  }
  void setCode(void *C) { CodeData.CP = C; }

  Node getFirstMember(const DataFlowGraph &G) const;
  Node getLastMember(const DataFlowGraph &G) const;
  void addMember(Node NA, const DataFlowGraph &G);
  void addMemberAfter(Node MA, Node NA, const DataFlowGraph &G);
  void removeMember(Node NA, const DataFlowGraph &G);

  NodeList members(const DataFlowGraph &G) const;
  template <typename Predicate>
  NodeList members_if(Predicate P, const DataFlowGraph &G) const;
};

struct InstrNode : public CodeNode {
  Node getOwner(const DataFlowGraph &G);
};

struct PhiNode : public InstrNode {
  MachineInstr *getCode() const { return nullptr; }
};

struct StmtNode : public InstrNode {
  MachineInstr *getCode() const { //
    return CodeNode::getCode<MachineInstr *>();
  }
};

struct BlockNode : public CodeNode {
  MachineBasicBlock *getCode() const {
    return CodeNode::getCode<MachineBasicBlock *>();
  }

  void addPhi(Phi PA, const DataFlowGraph &G);
};

struct FuncNode : public CodeNode {
  MachineFunction *getCode() const {
    return CodeNode::getCode<MachineFunction *>();
  }

  Block findBlock(const MachineBasicBlock *BB, const DataFlowGraph &G) const;
  Block getEntryBlock(const DataFlowGraph &G);
};

struct DataFlowGraph {
  DataFlowGraph(MachineFunction &mf, const TargetInstrInfo &tii,
                const TargetRegisterInfo &tri, const MachineDominatorTree &mdt,
                const MachineDominanceFrontier &mdf);
  DataFlowGraph(MachineFunction &mf, const TargetInstrInfo &tii,
                const TargetRegisterInfo &tri, const MachineDominatorTree &mdt,
                const MachineDominanceFrontier &mdf,
                const TargetOperandInfo &toi);

  struct Config {
    Config() = default;
    Config(unsigned Opts) : Options(Opts) {}
    Config(ArrayRef<const TargetRegisterClass *> RCs) : Classes(RCs) {}
    Config(ArrayRef<MCPhysReg> Track) : TrackRegs(Track.begin(), Track.end()) {}
    Config(ArrayRef<RegisterId> Track)
        : TrackRegs(Track.begin(), Track.end()) {}

    unsigned Options = BuildOptions::None;
    SmallVector<const TargetRegisterClass *> Classes;
    std::set<RegisterId> TrackRegs;
  };

  NodeBase *ptr(NodeId N) const;
  template <typename T> T ptr(NodeId N) const { //
    return static_cast<T>(ptr(N));
  }

  NodeId id(const NodeBase *P) const;

  template <typename T> NodeAddr<T> addr(NodeId N) const {
    return {ptr<T>(N), N};
  }

  Func getFunc() const { return TheFunc; }
  MachineFunction &getMF() const { return MF; }
  const TargetInstrInfo &getTII() const { return TII; }
  const TargetRegisterInfo &getTRI() const { return TRI; }
  const PhysicalRegisterInfo &getPRI() const { return PRI; }
  const MachineDominatorTree &getDT() const { return MDT; }
  const MachineDominanceFrontier &getDF() const { return MDF; }
  const RegisterAggr &getLiveIns() const { return LiveIns; }

  struct DefStack {
    DefStack() = default;

    bool empty() const { return Stack.empty() || top() == bottom(); }

  private:
    using value_type = Def;
    struct Iterator {
      using value_type = DefStack::value_type;

      Iterator &up() {
        Pos = DS.nextUp(Pos);
        return *this;
      }
      Iterator &down() {
        Pos = DS.nextDown(Pos);
        return *this;
      }

      value_type operator*() const {
        assert(Pos >= 1);
        return DS.Stack[Pos - 1];
      }
      const value_type *operator->() const {
        assert(Pos >= 1);
        return &DS.Stack[Pos - 1];
      }
      bool operator==(const Iterator &It) const { return Pos == It.Pos; }
      bool operator!=(const Iterator &It) const { return Pos != It.Pos; }

    private:
      friend struct DefStack;

      Iterator(const DefStack &S, bool Top);

      // Pos-1 is the index in the StorageType object that corresponds to
      // the top of the DefStack.
      const DefStack &DS;
      unsigned Pos;
    };

  public:
    using iterator = Iterator;

    iterator top() const { return Iterator(*this, true); }
    iterator bottom() const { return Iterator(*this, false); }
    unsigned size() const;

    void push(Def DA) { Stack.push_back(DA); }
    void pop();
    void start_block(NodeId N);
    void clear_block(NodeId N);

  private:
    friend struct Iterator;

    using StorageType = std::vector<value_type>;

    bool isDelimiter(const StorageType::value_type &P, NodeId N = 0) const {
      return (P.Addr == nullptr) && (N == 0 || P.Id == N);
    }

    unsigned nextUp(unsigned P) const;
    unsigned nextDown(unsigned P) const;

    StorageType Stack;
  };

  // Make this std::unordered_map for speed of accessing elements.
  // Map: Register (physical or virtual) -> DefStack
  using DefStackMap = std::unordered_map<RegisterId, DefStack>;

  void build(const Config &config);
  void build() { build(Config()); }

  void pushAllDefs(Instr IA, DefStackMap &DM);
  void markBlock(NodeId B, DefStackMap &DefM);
  void releaseBlock(NodeId B, DefStackMap &DefM);

  PackedRegisterRef pack(RegisterRef RR) {
    return {RR.Reg, LMI.getIndexForLaneMask(RR.Mask)};
  }
  PackedRegisterRef pack(RegisterRef RR) const {
    return {RR.Reg, LMI.getIndexForLaneMask(RR.Mask)};
  }
  RegisterRef unpack(PackedRegisterRef PR) const {
    return RegisterRef(PR.Reg, LMI.getLaneMaskForIndex(PR.MaskId));
  }

  RegisterRef makeRegRef(unsigned Reg, unsigned Sub) const;
  RegisterRef makeRegRef(const MachineOperand &Op) const;

  Ref getNextRelated(Instr IA, Ref RA) const;
  Ref getNextShadow(Instr IA, Ref RA, bool Create);

  NodeList getRelatedRefs(Instr IA, Ref RA) const;

  Block findBlock(MachineBasicBlock *BB) const { return BlockNodes.at(BB); }

  void unlinkUse(Use UA, bool RemoveFromOwner) {
    unlinkUseDF(UA);
    if (RemoveFromOwner)
      removeFromOwner(UA);
  }

  void unlinkDef(Def DA, bool RemoveFromOwner) {
    unlinkDefDF(DA);
    if (RemoveFromOwner)
      removeFromOwner(DA);
  }

  bool isTracked(RegisterRef RR) const;
  bool hasUntrackedRef(Stmt S, bool IgnoreReserved = true) const;

  // Some useful filters.
  template <uint16_t Kind> static bool IsRef(const Node BA) {
    return BA.Addr->getType() == NodeAttrs::Ref && BA.Addr->getKind() == Kind;
  }

  template <uint16_t Kind> static bool IsCode(const Node BA) {
    return BA.Addr->getType() == NodeAttrs::Code && BA.Addr->getKind() == Kind;
  }

  static bool IsDef(const Node BA) {
    return BA.Addr->getType() == NodeAttrs::Ref &&
           BA.Addr->getKind() == NodeAttrs::Def;
  }

  static bool IsUse(const Node BA) {
    return BA.Addr->getType() == NodeAttrs::Ref &&
           BA.Addr->getKind() == NodeAttrs::Use;
  }

  static bool IsPhi(const Node BA) {
    return BA.Addr->getType() == NodeAttrs::Code &&
           BA.Addr->getKind() == NodeAttrs::Phi;
  }

  static bool IsPreservingDef(const Def DA) {
    uint16_t Flags = DA.Addr->getFlags();
    return (Flags & NodeAttrs::Preserving) && !(Flags & NodeAttrs::Undef);
  }

private:
  void reset();

  RegisterAggr getLandingPadLiveIns() const;

  Node newNode(uint16_t Attrs);
  Node cloneNode(const Node B);
  Use newUse(Instr Owner, MachineOperand &Op, uint16_t Flags = NodeAttrs::None);
  PhiUse newPhiUse(Phi Owner, RegisterRef RR, Block PredB,
                   uint16_t Flags = NodeAttrs::PhiRef);
  Def newDef(Instr Owner, MachineOperand &Op, uint16_t Flags = NodeAttrs::None);
  Def newDef(Instr Owner, RegisterRef RR, uint16_t Flags = NodeAttrs::PhiRef);
  Phi newPhi(Block Owner);
  Stmt newStmt(Block Owner, MachineInstr *MI);
  Block newBlock(Func Owner, MachineBasicBlock *BB);
  Func newFunc(MachineFunction *MF);

  template <typename Predicate>
  std::pair<Ref, Ref> locateNextRef(Instr IA, Ref RA, Predicate P) const;

  using BlockRefsMap = RegisterAggrMap<NodeId>;

  void buildStmt(Block BA, MachineInstr &In);
  void recordDefsForDF(BlockRefsMap &PhiM, Block BA);
  void buildPhis(BlockRefsMap &PhiM, Block BA);
  void removeUnusedPhis();

  void pushClobbers(Instr IA, DefStackMap &DM);
  void pushDefs(Instr IA, DefStackMap &DM);
  template <typename T> void linkRefUp(Instr IA, NodeAddr<T> TA, DefStack &DS);
  template <typename Predicate>
  void linkStmtRefs(DefStackMap &DefM, Stmt SA, Predicate P);
  void linkBlockRefs(DefStackMap &DefM, Block BA);

  void unlinkUseDF(Use UA);
  void unlinkDefDF(Def DA);

  void removeFromOwner(Ref RA) {
    Instr IA = RA.Addr->getOwner(*this);
    IA.Addr->removeMember(RA, *this);
  }

  // Default TOI object, if not given in the constructor.
  std::unique_ptr<TargetOperandInfo> DefaultTOI;

  MachineFunction &MF;
  const TargetInstrInfo &TII;
  const TargetRegisterInfo &TRI;
  const PhysicalRegisterInfo PRI;
  const MachineDominatorTree &MDT;
  const MachineDominanceFrontier &MDF;
  const TargetOperandInfo &TOI;

  RegisterAggr LiveIns;
  Func TheFunc;
  NodeAllocator Memory;
  // Local map:  MachineBasicBlock -> NodeAddr<BlockNode*>
  std::map<MachineBasicBlock *, Block> BlockNodes;
  // Lane mask map.
  LaneMaskIndex LMI;

  Config BuildCfg;
  std::set<unsigned> TrackedUnits;
  BitVector ReservedRegs;
}; // struct DataFlowGraph

template <typename Predicate>
Ref RefNode::getNextRef(RegisterRef RR, Predicate P, bool NextOnly,
                        const DataFlowGraph &G) {
  // Get the "Next" reference in the circular list that references RR and
  // satisfies predicate "Pred".
  auto NA = G.addr<NodeBase *>(getNext());

  while (NA.Addr != this) {
    if (NA.Addr->getType() == NodeAttrs::Ref) {
      Ref RA = NA;
      if (G.getPRI().equal_to(RA.Addr->getRegRef(G), RR) && P(NA))
        return NA;
      if (NextOnly)
        break;
      NA = G.addr<NodeBase *>(NA.Addr->getNext());
    } else {
      // We've hit the beginning of the chain.
      assert(NA.Addr->getType() == NodeAttrs::Code);
      // Make sure we stop here with NextOnly. Otherwise we can return the
      // wrong ref. Consider the following while creating/linking shadow uses:
      //   -> code -> sr1 -> sr2 -> [back to code]
      // Say that shadow refs sr1, and sr2 have been linked, but we need to
      // create and link another one. Starting from sr2, we'd hit the code
      // node and return sr1 if the iteration didn't stop here.
      if (NextOnly)
        break;
      Code CA = NA;
      NA = CA.Addr->getFirstMember(G);
    }
  }
  // Return the equivalent of "nullptr" if such a node was not found.
  return Ref();
}

template <typename Predicate>
NodeList CodeNode::members_if(Predicate P, const DataFlowGraph &G) const {
  NodeList MM;
  auto M = getFirstMember(G);
  if (M.Id == 0)
    return MM;

  while (M.Addr != this) {
    if (P(M))
      MM.push_back(M);
    M = G.addr<NodeBase *>(M.Addr->getNext());
  }
  return MM;
}

template <typename T> struct Print {
  Print(const T &x, const DataFlowGraph &g) : Obj(x), G(g) {}

  const T &Obj;
  const DataFlowGraph &G;
};

template <typename T> Print(const T &, const DataFlowGraph &) -> Print<T>;

template <typename T> struct PrintNode : Print<NodeAddr<T>> {
  PrintNode(const NodeAddr<T> &x, const DataFlowGraph &g)
      : Print<NodeAddr<T>>(x, g) {}
};

raw_ostream &operator<<(raw_ostream &OS, const Print<RegisterRef> &P);
raw_ostream &operator<<(raw_ostream &OS, const Print<NodeId> &P);
raw_ostream &operator<<(raw_ostream &OS, const Print<Def> &P);
raw_ostream &operator<<(raw_ostream &OS, const Print<Use> &P);
raw_ostream &operator<<(raw_ostream &OS, const Print<PhiUse> &P);
raw_ostream &operator<<(raw_ostream &OS, const Print<Ref> &P);
raw_ostream &operator<<(raw_ostream &OS, const Print<NodeList> &P);
raw_ostream &operator<<(raw_ostream &OS, const Print<NodeSet> &P);
raw_ostream &operator<<(raw_ostream &OS, const Print<Phi> &P);
raw_ostream &operator<<(raw_ostream &OS, const Print<Stmt> &P);
raw_ostream &operator<<(raw_ostream &OS, const Print<Instr> &P);
raw_ostream &operator<<(raw_ostream &OS, const Print<Block> &P);
raw_ostream &operator<<(raw_ostream &OS, const Print<Func> &P);
raw_ostream &operator<<(raw_ostream &OS, const Print<RegisterSet> &P);
raw_ostream &operator<<(raw_ostream &OS, const Print<RegisterAggr> &P);
raw_ostream &operator<<(raw_ostream &OS,
                        const Print<DataFlowGraph::DefStack> &P);

} // end namespace rdf
} // end namespace llvm

#endif // LLVM_CODEGEN_RDFGRAPH_H
PKiwFZ��N&	&	CodeGen/RegisterUsageInfo.hnu�[���//==- RegisterUsageInfo.h - Register Usage Informartion Storage --*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// This pass is required to take advantage of the interprocedural register
/// allocation infrastructure.
///
/// This pass is simple immutable pass which keeps RegMasks (calculated based on
/// actual register allocation) for functions in a module and provides simple
/// API to query this information.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_REGISTERUSAGEINFO_H
#define LLVM_CODEGEN_REGISTERUSAGEINFO_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/InitializePasses.h"
#include "llvm/Pass.h"
#include "llvm/PassRegistry.h"
#include <cstdint>
#include <vector>

namespace llvm {

class Function;
class LLVMTargetMachine;

class PhysicalRegisterUsageInfo : public ImmutablePass {
public:
  static char ID;

  PhysicalRegisterUsageInfo() : ImmutablePass(ID) {
    PassRegistry &Registry = *PassRegistry::getPassRegistry();
    initializePhysicalRegisterUsageInfoPass(Registry);
  }

  /// Set TargetMachine which is used to print analysis.
  void setTargetMachine(const LLVMTargetMachine &TM);

  bool doInitialization(Module &M) override;

  bool doFinalization(Module &M) override;

  /// To store RegMask for given Function *.
  void storeUpdateRegUsageInfo(const Function &FP,
                               ArrayRef<uint32_t> RegMask);

  /// To query stored RegMask for given Function *, it will returns ane empty
  /// array if function is not known.
  ArrayRef<uint32_t> getRegUsageInfo(const Function &FP);

  void print(raw_ostream &OS, const Module *M = nullptr) const override;

private:
  /// A Dense map from Function * to RegMask.
  /// In RegMask 0 means register used (clobbered) by function.
  /// and 1 means content of register will be preserved around function call.
  DenseMap<const Function *, std::vector<uint32_t>> RegMasks;

  const LLVMTargetMachine *TM = nullptr;
};

} // end namespace llvm

#endif // LLVM_CODEGEN_REGISTERUSAGEINFO_H
PKiwFZ��Ԡ���CodeGen/MachineBasicBlock.hnu�[���//===- llvm/CodeGen/MachineBasicBlock.h -------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Collect the sequence of machine instructions for a basic block.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_MACHINEBASICBLOCK_H
#define LLVM_CODEGEN_MACHINEBASICBLOCK_H

#include "llvm/ADT/GraphTraits.h"
#include "llvm/ADT/SparseBitVector.h"
#include "llvm/ADT/ilist.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBundleIterator.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/MC/LaneBitmask.h"
#include "llvm/Support/BranchProbability.h"
#include <cassert>
#include <cstdint>
#include <iterator>
#include <string>
#include <vector>

namespace llvm {

class BasicBlock;
class MachineFunction;
class MCSymbol;
class ModuleSlotTracker;
class Pass;
class Printable;
class SlotIndexes;
class StringRef;
class raw_ostream;
class LiveIntervals;
class TargetRegisterClass;
class TargetRegisterInfo;

// This structure uniquely identifies a basic block section.
// Possible values are
//  {Type: Default, Number: (unsigned)} (These are regular section IDs)
//  {Type: Exception, Number: 0}  (ExceptionSectionID)
//  {Type: Cold, Number: 0}  (ColdSectionID)
struct MBBSectionID {
  enum SectionType {
    Default = 0, // Regular section (these sections are distinguished by the
                 // Number field).
    Exception,   // Special section type for exception handling blocks
    Cold,        // Special section type for cold blocks
  } Type;
  unsigned Number;

  MBBSectionID(unsigned N) : Type(Default), Number(N) {}

  // Special unique sections for cold and exception blocks.
  const static MBBSectionID ColdSectionID;
  const static MBBSectionID ExceptionSectionID;

  bool operator==(const MBBSectionID &Other) const {
    return Type == Other.Type && Number == Other.Number;
  }

  bool operator!=(const MBBSectionID &Other) const { return !(*this == Other); }

private:
  // This is only used to construct the special cold and exception sections.
  MBBSectionID(SectionType T) : Type(T), Number(0) {}
};

template <> struct ilist_traits<MachineInstr> {
private:
  friend class MachineBasicBlock; // Set by the owning MachineBasicBlock.

  MachineBasicBlock *Parent;

  using instr_iterator =
      simple_ilist<MachineInstr, ilist_sentinel_tracking<true>>::iterator;

public:
  void addNodeToList(MachineInstr *N);
  void removeNodeFromList(MachineInstr *N);
  void transferNodesFromList(ilist_traits &FromList, instr_iterator First,
                             instr_iterator Last);
  void deleteNode(MachineInstr *MI);
};

class MachineBasicBlock
    : public ilist_node_with_parent<MachineBasicBlock, MachineFunction> {
public:
  /// Pair of physical register and lane mask.
  /// This is not simply a std::pair typedef because the members should be named
  /// clearly as they both have an integer type.
  struct RegisterMaskPair {
  public:
    MCPhysReg PhysReg;
    LaneBitmask LaneMask;

    RegisterMaskPair(MCPhysReg PhysReg, LaneBitmask LaneMask)
        : PhysReg(PhysReg), LaneMask(LaneMask) {}
  };

private:
  using Instructions = ilist<MachineInstr, ilist_sentinel_tracking<true>>;

  const BasicBlock *BB;
  int Number;
  MachineFunction *xParent;
  Instructions Insts;

  /// Keep track of the predecessor / successor basic blocks.
  std::vector<MachineBasicBlock *> Predecessors;
  std::vector<MachineBasicBlock *> Successors;

  /// Keep track of the probabilities to the successors. This vector has the
  /// same order as Successors, or it is empty if we don't use it (disable
  /// optimization).
  std::vector<BranchProbability> Probs;
  using probability_iterator = std::vector<BranchProbability>::iterator;
  using const_probability_iterator =
      std::vector<BranchProbability>::const_iterator;

  std::optional<uint64_t> IrrLoopHeaderWeight;

  /// Keep track of the physical registers that are livein of the basicblock.
  using LiveInVector = std::vector<RegisterMaskPair>;
  LiveInVector LiveIns;

  /// Alignment of the basic block. One if the basic block does not need to be
  /// aligned.
  Align Alignment;
  /// Maximum amount of bytes that can be added to align the basic block. If the
  /// alignment cannot be reached in this many bytes, no bytes are emitted.
  /// Zero to represent no maximum.
  unsigned MaxBytesForAlignment = 0;

  /// Indicate that this basic block is entered via an exception handler.
  bool IsEHPad = false;

  /// Indicate that this MachineBasicBlock is referenced somewhere other than
  /// as predecessor/successor, a terminator MachineInstr, or a jump table.
  bool MachineBlockAddressTaken = false;

  /// If this MachineBasicBlock corresponds to an IR-level "blockaddress"
  /// constant, this contains a pointer to that block.
  BasicBlock *AddressTakenIRBlock = nullptr;

  /// Indicate that this basic block needs its symbol be emitted regardless of
  /// whether the flow just falls-through to it.
  bool LabelMustBeEmitted = false;

  /// Indicate that this basic block is the entry block of an EH scope, i.e.,
  /// the block that used to have a catchpad or cleanuppad instruction in the
  /// LLVM IR.
  bool IsEHScopeEntry = false;

  /// Indicates if this is a target block of a catchret.
  bool IsEHCatchretTarget = false;

  /// Indicate that this basic block is the entry block of an EH funclet.
  bool IsEHFuncletEntry = false;

  /// Indicate that this basic block is the entry block of a cleanup funclet.
  bool IsCleanupFuncletEntry = false;

  /// Fixed unique ID assigned to this basic block upon creation. Used with
  /// basic block sections and basic block labels.
  std::optional<unsigned> BBID;

  /// With basic block sections, this stores the Section ID of the basic block.
  MBBSectionID SectionID{0};

  // Indicate that this basic block begins a section.
  bool IsBeginSection = false;

  // Indicate that this basic block ends a section.
  bool IsEndSection = false;

  /// Indicate that this basic block is the indirect dest of an INLINEASM_BR.
  bool IsInlineAsmBrIndirectTarget = false;

  /// since getSymbol is a relatively heavy-weight operation, the symbol
  /// is only computed once and is cached.
  mutable MCSymbol *CachedMCSymbol = nullptr;

  /// Cached MCSymbol for this block (used if IsEHCatchRetTarget).
  mutable MCSymbol *CachedEHCatchretMCSymbol = nullptr;

  /// Marks the end of the basic block. Used during basic block sections to
  /// calculate the size of the basic block, or the BB section ending with it.
  mutable MCSymbol *CachedEndMCSymbol = nullptr;

  // Intrusive list support
  MachineBasicBlock() = default;

  explicit MachineBasicBlock(MachineFunction &MF, const BasicBlock *BB);

  ~MachineBasicBlock();

  // MachineBasicBlocks are allocated and owned by MachineFunction.
  friend class MachineFunction;

public:
  /// Return the LLVM basic block that this instance corresponded to originally.
  /// Note that this may be NULL if this instance does not correspond directly
  /// to an LLVM basic block.
  const BasicBlock *getBasicBlock() const { return BB; }

  /// Remove the reference to the underlying IR BasicBlock. This is for
  /// reduction tools and should generally not be used.
  void clearBasicBlock() {
    BB = nullptr;
  }

  /// Return the name of the corresponding LLVM basic block, or an empty string.
  StringRef getName() const;

  /// Return a formatted string to identify this block and its parent function.
  std::string getFullName() const;

  /// Test whether this block is used as as something other than the target
  /// of a terminator, exception-handling target, or jump table. This is
  /// either the result of an IR-level "blockaddress", or some form
  /// of target-specific branch lowering.
  bool hasAddressTaken() const {
    return MachineBlockAddressTaken || AddressTakenIRBlock;
  }

  /// Test whether this block is used as something other than the target of a
  /// terminator, exception-handling target, jump table, or IR blockaddress.
  /// For example, its address might be loaded into a register, or
  /// stored in some branch table that isn't part of MachineJumpTableInfo.
  bool isMachineBlockAddressTaken() const { return MachineBlockAddressTaken; }

  /// Test whether this block is the target of an IR BlockAddress.  (There can
  /// more than one MBB associated with an IR BB where the address is taken.)
  bool isIRBlockAddressTaken() const { return AddressTakenIRBlock; }

  /// Retrieves the BasicBlock which corresponds to this MachineBasicBlock.
  BasicBlock *getAddressTakenIRBlock() const { return AddressTakenIRBlock; }

  /// Set this block to indicate that its address is used as something other
  /// than the target of a terminator, exception-handling target, jump table,
  /// or IR-level "blockaddress".
  void setMachineBlockAddressTaken() { MachineBlockAddressTaken = true; }

  /// Set this block to reflect that it corresponds to an IR-level basic block
  /// with a BlockAddress.
  void setAddressTakenIRBlock(BasicBlock *BB) { AddressTakenIRBlock = BB; }

  /// Test whether this block must have its label emitted.
  bool hasLabelMustBeEmitted() const { return LabelMustBeEmitted; }

  /// Set this block to reflect that, regardless how we flow to it, we need
  /// its label be emitted.
  void setLabelMustBeEmitted() { LabelMustBeEmitted = true; }

  /// Return the MachineFunction containing this basic block.
  const MachineFunction *getParent() const { return xParent; }
  MachineFunction *getParent() { return xParent; }

  using instr_iterator = Instructions::iterator;
  using const_instr_iterator = Instructions::const_iterator;
  using reverse_instr_iterator = Instructions::reverse_iterator;
  using const_reverse_instr_iterator = Instructions::const_reverse_iterator;

  using iterator = MachineInstrBundleIterator<MachineInstr>;
  using const_iterator = MachineInstrBundleIterator<const MachineInstr>;
  using reverse_iterator = MachineInstrBundleIterator<MachineInstr, true>;
  using const_reverse_iterator =
      MachineInstrBundleIterator<const MachineInstr, true>;

  unsigned size() const { return (unsigned)Insts.size(); }
  bool sizeWithoutDebugLargerThan(unsigned Limit) const;
  bool empty() const { return Insts.empty(); }

  MachineInstr       &instr_front()       { return Insts.front(); }
  MachineInstr       &instr_back()        { return Insts.back();  }
  const MachineInstr &instr_front() const { return Insts.front(); }
  const MachineInstr &instr_back()  const { return Insts.back();  }

  MachineInstr       &front()             { return Insts.front(); }
  MachineInstr       &back()              { return *--end();      }
  const MachineInstr &front()       const { return Insts.front(); }
  const MachineInstr &back()        const { return *--end();      }

  instr_iterator                instr_begin()       { return Insts.begin();  }
  const_instr_iterator          instr_begin() const { return Insts.begin();  }
  instr_iterator                  instr_end()       { return Insts.end();    }
  const_instr_iterator            instr_end() const { return Insts.end();    }
  reverse_instr_iterator       instr_rbegin()       { return Insts.rbegin(); }
  const_reverse_instr_iterator instr_rbegin() const { return Insts.rbegin(); }
  reverse_instr_iterator       instr_rend  ()       { return Insts.rend();   }
  const_reverse_instr_iterator instr_rend  () const { return Insts.rend();   }

  using instr_range = iterator_range<instr_iterator>;
  using const_instr_range = iterator_range<const_instr_iterator>;
  instr_range instrs() { return instr_range(instr_begin(), instr_end()); }
  const_instr_range instrs() const {
    return const_instr_range(instr_begin(), instr_end());
  }

  iterator                begin()       { return instr_begin();  }
  const_iterator          begin() const { return instr_begin();  }
  iterator                end  ()       { return instr_end();    }
  const_iterator          end  () const { return instr_end();    }
  reverse_iterator rbegin() {
    return reverse_iterator::getAtBundleBegin(instr_rbegin());
  }
  const_reverse_iterator rbegin() const {
    return const_reverse_iterator::getAtBundleBegin(instr_rbegin());
  }
  reverse_iterator rend() { return reverse_iterator(instr_rend()); }
  const_reverse_iterator rend() const {
    return const_reverse_iterator(instr_rend());
  }

  /// Support for MachineInstr::getNextNode().
  static Instructions MachineBasicBlock::*getSublistAccess(MachineInstr *) {
    return &MachineBasicBlock::Insts;
  }

  inline iterator_range<iterator> terminators() {
    return make_range(getFirstTerminator(), end());
  }
  inline iterator_range<const_iterator> terminators() const {
    return make_range(getFirstTerminator(), end());
  }

  /// Returns a range that iterates over the phis in the basic block.
  inline iterator_range<iterator> phis() {
    return make_range(begin(), getFirstNonPHI());
  }
  inline iterator_range<const_iterator> phis() const {
    return const_cast<MachineBasicBlock *>(this)->phis();
  }

  // Machine-CFG iterators
  using pred_iterator = std::vector<MachineBasicBlock *>::iterator;
  using const_pred_iterator = std::vector<MachineBasicBlock *>::const_iterator;
  using succ_iterator = std::vector<MachineBasicBlock *>::iterator;
  using const_succ_iterator = std::vector<MachineBasicBlock *>::const_iterator;
  using pred_reverse_iterator =
      std::vector<MachineBasicBlock *>::reverse_iterator;
  using const_pred_reverse_iterator =
      std::vector<MachineBasicBlock *>::const_reverse_iterator;
  using succ_reverse_iterator =
      std::vector<MachineBasicBlock *>::reverse_iterator;
  using const_succ_reverse_iterator =
      std::vector<MachineBasicBlock *>::const_reverse_iterator;
  pred_iterator        pred_begin()       { return Predecessors.begin(); }
  const_pred_iterator  pred_begin() const { return Predecessors.begin(); }
  pred_iterator        pred_end()         { return Predecessors.end();   }
  const_pred_iterator  pred_end()   const { return Predecessors.end();   }
  pred_reverse_iterator        pred_rbegin()
                                          { return Predecessors.rbegin();}
  const_pred_reverse_iterator  pred_rbegin() const
                                          { return Predecessors.rbegin();}
  pred_reverse_iterator        pred_rend()
                                          { return Predecessors.rend();  }
  const_pred_reverse_iterator  pred_rend()   const
                                          { return Predecessors.rend();  }
  unsigned             pred_size()  const {
    return (unsigned)Predecessors.size();
  }
  bool                 pred_empty() const { return Predecessors.empty(); }
  succ_iterator        succ_begin()       { return Successors.begin();   }
  const_succ_iterator  succ_begin() const { return Successors.begin();   }
  succ_iterator        succ_end()         { return Successors.end();     }
  const_succ_iterator  succ_end()   const { return Successors.end();     }
  succ_reverse_iterator        succ_rbegin()
                                          { return Successors.rbegin();  }
  const_succ_reverse_iterator  succ_rbegin() const
                                          { return Successors.rbegin();  }
  succ_reverse_iterator        succ_rend()
                                          { return Successors.rend();    }
  const_succ_reverse_iterator  succ_rend()   const
                                          { return Successors.rend();    }
  unsigned             succ_size()  const {
    return (unsigned)Successors.size();
  }
  bool                 succ_empty() const { return Successors.empty();   }

  inline iterator_range<pred_iterator> predecessors() {
    return make_range(pred_begin(), pred_end());
  }
  inline iterator_range<const_pred_iterator> predecessors() const {
    return make_range(pred_begin(), pred_end());
  }
  inline iterator_range<succ_iterator> successors() {
    return make_range(succ_begin(), succ_end());
  }
  inline iterator_range<const_succ_iterator> successors() const {
    return make_range(succ_begin(), succ_end());
  }

  // LiveIn management methods.

  /// Adds the specified register as a live in. Note that it is an error to add
  /// the same register to the same set more than once unless the intention is
  /// to call sortUniqueLiveIns after all registers are added.
  void addLiveIn(MCRegister PhysReg,
                 LaneBitmask LaneMask = LaneBitmask::getAll()) {
    LiveIns.push_back(RegisterMaskPair(PhysReg, LaneMask));
  }
  void addLiveIn(const RegisterMaskPair &RegMaskPair) {
    LiveIns.push_back(RegMaskPair);
  }

  /// Sorts and uniques the LiveIns vector. It can be significantly faster to do
  /// this than repeatedly calling isLiveIn before calling addLiveIn for every
  /// LiveIn insertion.
  void sortUniqueLiveIns();

  /// Clear live in list.
  void clearLiveIns();

  /// Add PhysReg as live in to this block, and ensure that there is a copy of
  /// PhysReg to a virtual register of class RC. Return the virtual register
  /// that is a copy of the live in PhysReg.
  Register addLiveIn(MCRegister PhysReg, const TargetRegisterClass *RC);

  /// Remove the specified register from the live in set.
  void removeLiveIn(MCPhysReg Reg,
                    LaneBitmask LaneMask = LaneBitmask::getAll());

  /// Return true if the specified register is in the live in set.
  bool isLiveIn(MCPhysReg Reg,
                LaneBitmask LaneMask = LaneBitmask::getAll()) const;

  // Iteration support for live in sets.  These sets are kept in sorted
  // order by their register number.
  using livein_iterator = LiveInVector::const_iterator;

  /// Unlike livein_begin, this method does not check that the liveness
  /// information is accurate. Still for debug purposes it may be useful
  /// to have iterators that won't assert if the liveness information
  /// is not current.
  livein_iterator livein_begin_dbg() const { return LiveIns.begin(); }
  iterator_range<livein_iterator> liveins_dbg() const {
    return make_range(livein_begin_dbg(), livein_end());
  }

  livein_iterator livein_begin() const;
  livein_iterator livein_end()   const { return LiveIns.end(); }
  bool            livein_empty() const { return LiveIns.empty(); }
  iterator_range<livein_iterator> liveins() const {
    return make_range(livein_begin(), livein_end());
  }

  /// Remove entry from the livein set and return iterator to the next.
  livein_iterator removeLiveIn(livein_iterator I);

  class liveout_iterator {
  public:
    using iterator_category = std::input_iterator_tag;
    using difference_type = std::ptrdiff_t;
    using value_type = RegisterMaskPair;
    using pointer = const RegisterMaskPair *;
    using reference = const RegisterMaskPair &;

    liveout_iterator(const MachineBasicBlock &MBB, MCPhysReg ExceptionPointer,
                     MCPhysReg ExceptionSelector, bool End)
        : ExceptionPointer(ExceptionPointer),
          ExceptionSelector(ExceptionSelector), BlockI(MBB.succ_begin()),
          BlockEnd(MBB.succ_end()) {
      if (End)
        BlockI = BlockEnd;
      else if (BlockI != BlockEnd) {
        LiveRegI = (*BlockI)->livein_begin();
        if (!advanceToValidPosition())
          return;
        if (LiveRegI->PhysReg == ExceptionPointer ||
            LiveRegI->PhysReg == ExceptionSelector)
          ++(*this);
      }
    }

    liveout_iterator &operator++() {
      do {
        ++LiveRegI;
        if (!advanceToValidPosition())
          return *this;
      } while ((*BlockI)->isEHPad() &&
               (LiveRegI->PhysReg == ExceptionPointer ||
                LiveRegI->PhysReg == ExceptionSelector));
      return *this;
    }

    liveout_iterator operator++(int) {
      liveout_iterator Tmp = *this;
      ++(*this);
      return Tmp;
    }

    reference operator*() const {
      return *LiveRegI;
    }

    pointer operator->() const {
      return &*LiveRegI;
    }

    bool operator==(const liveout_iterator &RHS) const {
      if (BlockI != BlockEnd)
        return BlockI == RHS.BlockI && LiveRegI == RHS.LiveRegI;
      return RHS.BlockI == BlockEnd;
    }

    bool operator!=(const liveout_iterator &RHS) const {
      return !(*this == RHS);
    }
  private:
    bool advanceToValidPosition() {
      if (LiveRegI != (*BlockI)->livein_end())
        return true;

      do {
        ++BlockI;
      } while (BlockI != BlockEnd && (*BlockI)->livein_empty());
      if (BlockI == BlockEnd)
        return false;

      LiveRegI = (*BlockI)->livein_begin();
      return true;
    }

    MCPhysReg ExceptionPointer, ExceptionSelector;
    const_succ_iterator BlockI;
    const_succ_iterator BlockEnd;
    livein_iterator LiveRegI;
  };

  /// Iterator scanning successor basic blocks' liveins to determine the
  /// registers potentially live at the end of this block. There may be
  /// duplicates or overlapping registers in the list returned.
  liveout_iterator liveout_begin() const;
  liveout_iterator liveout_end() const {
    return liveout_iterator(*this, 0, 0, true);
  }
  iterator_range<liveout_iterator> liveouts() const {
    return make_range(liveout_begin(), liveout_end());
  }

  /// Get the clobber mask for the start of this basic block. Funclets use this
  /// to prevent register allocation across funclet transitions.
  const uint32_t *getBeginClobberMask(const TargetRegisterInfo *TRI) const;

  /// Get the clobber mask for the end of the basic block.
  /// \see getBeginClobberMask()
  const uint32_t *getEndClobberMask(const TargetRegisterInfo *TRI) const;

  /// Return alignment of the basic block.
  Align getAlignment() const { return Alignment; }

  /// Set alignment of the basic block.
  void setAlignment(Align A) { Alignment = A; }

  void setAlignment(Align A, unsigned MaxBytes) {
    setAlignment(A);
    setMaxBytesForAlignment(MaxBytes);
  }

  /// Return the maximum amount of padding allowed for aligning the basic block.
  unsigned getMaxBytesForAlignment() const { return MaxBytesForAlignment; }

  /// Set the maximum amount of padding allowed for aligning the basic block
  void setMaxBytesForAlignment(unsigned MaxBytes) {
    MaxBytesForAlignment = MaxBytes;
  }

  /// Returns true if the block is a landing pad. That is this basic block is
  /// entered via an exception handler.
  bool isEHPad() const { return IsEHPad; }

  /// Indicates the block is a landing pad.  That is this basic block is entered
  /// via an exception handler.
  void setIsEHPad(bool V = true) { IsEHPad = V; }

  bool hasEHPadSuccessor() const;

  /// Returns true if this is the entry block of the function.
  bool isEntryBlock() const;

  /// Returns true if this is the entry block of an EH scope, i.e., the block
  /// that used to have a catchpad or cleanuppad instruction in the LLVM IR.
  bool isEHScopeEntry() const { return IsEHScopeEntry; }

  /// Indicates if this is the entry block of an EH scope, i.e., the block that
  /// that used to have a catchpad or cleanuppad instruction in the LLVM IR.
  void setIsEHScopeEntry(bool V = true) { IsEHScopeEntry = V; }

  /// Returns true if this is a target block of a catchret.
  bool isEHCatchretTarget() const { return IsEHCatchretTarget; }

  /// Indicates if this is a target block of a catchret.
  void setIsEHCatchretTarget(bool V = true) { IsEHCatchretTarget = V; }

  /// Returns true if this is the entry block of an EH funclet.
  bool isEHFuncletEntry() const { return IsEHFuncletEntry; }

  /// Indicates if this is the entry block of an EH funclet.
  void setIsEHFuncletEntry(bool V = true) { IsEHFuncletEntry = V; }

  /// Returns true if this is the entry block of a cleanup funclet.
  bool isCleanupFuncletEntry() const { return IsCleanupFuncletEntry; }

  /// Indicates if this is the entry block of a cleanup funclet.
  void setIsCleanupFuncletEntry(bool V = true) { IsCleanupFuncletEntry = V; }

  /// Returns true if this block begins any section.
  bool isBeginSection() const { return IsBeginSection; }

  /// Returns true if this block ends any section.
  bool isEndSection() const { return IsEndSection; }

  void setIsBeginSection(bool V = true) { IsBeginSection = V; }

  void setIsEndSection(bool V = true) { IsEndSection = V; }

  std::optional<unsigned> getBBID() const { return BBID; }

  /// Returns the BBID of the block when BBAddrMapVersion >= 2, otherwise
  /// returns `MachineBasicBlock::Number`.
  /// TODO: Remove this function when version 1 is deprecated and replace its
  /// uses with `getBBID()`.
  unsigned getBBIDOrNumber() const;

  /// Returns the section ID of this basic block.
  MBBSectionID getSectionID() const { return SectionID; }

  /// Returns the unique section ID number of this basic block.
  unsigned getSectionIDNum() const {
    return ((unsigned)MBBSectionID::SectionType::Cold) -
           ((unsigned)SectionID.Type) + SectionID.Number;
  }

  /// Sets the fixed BBID of this basic block.
  void setBBID(unsigned V) {
    assert(!BBID.has_value() && "Cannot change BBID.");
    BBID = V;
  }

  /// Sets the section ID for this basic block.
  void setSectionID(MBBSectionID V) { SectionID = V; }

  /// Returns the MCSymbol marking the end of this basic block.
  MCSymbol *getEndSymbol() const;

  /// Returns true if this block may have an INLINEASM_BR (overestimate, by
  /// checking if any of the successors are indirect targets of any inlineasm_br
  /// in the function).
  bool mayHaveInlineAsmBr() const;

  /// Returns true if this is the indirect dest of an INLINEASM_BR.
  bool isInlineAsmBrIndirectTarget() const {
    return IsInlineAsmBrIndirectTarget;
  }

  /// Indicates if this is the indirect dest of an INLINEASM_BR.
  void setIsInlineAsmBrIndirectTarget(bool V = true) {
    IsInlineAsmBrIndirectTarget = V;
  }

  /// Returns true if it is legal to hoist instructions into this block.
  bool isLegalToHoistInto() const;

  // Code Layout methods.

  /// Move 'this' block before or after the specified block.  This only moves
  /// the block, it does not modify the CFG or adjust potential fall-throughs at
  /// the end of the block.
  void moveBefore(MachineBasicBlock *NewAfter);
  void moveAfter(MachineBasicBlock *NewBefore);

  /// Returns true if this and MBB belong to the same section.
  bool sameSection(const MachineBasicBlock *MBB) const {
    return getSectionID() == MBB->getSectionID();
  }

  /// Update the terminator instructions in block to account for changes to
  /// block layout which may have been made. PreviousLayoutSuccessor should be
  /// set to the block which may have been used as fallthrough before the block
  /// layout was modified.  If the block previously fell through to that block,
  /// it may now need a branch. If it previously branched to another block, it
  /// may now be able to fallthrough to the current layout successor.
  void updateTerminator(MachineBasicBlock *PreviousLayoutSuccessor);

  // Machine-CFG mutators

  /// Add Succ as a successor of this MachineBasicBlock.  The Predecessors list
  /// of Succ is automatically updated. PROB parameter is stored in
  /// Probabilities list. The default probability is set as unknown. Mixing
  /// known and unknown probabilities in successor list is not allowed. When all
  /// successors have unknown probabilities, 1 / N is returned as the
  /// probability for each successor, where N is the number of successors.
  ///
  /// Note that duplicate Machine CFG edges are not allowed.
  void addSuccessor(MachineBasicBlock *Succ,
                    BranchProbability Prob = BranchProbability::getUnknown());

  /// Add Succ as a successor of this MachineBasicBlock.  The Predecessors list
  /// of Succ is automatically updated. The probability is not provided because
  /// BPI is not available (e.g. -O0 is used), in which case edge probabilities
  /// won't be used. Using this interface can save some space.
  void addSuccessorWithoutProb(MachineBasicBlock *Succ);

  /// Set successor probability of a given iterator.
  void setSuccProbability(succ_iterator I, BranchProbability Prob);

  /// Normalize probabilities of all successors so that the sum of them becomes
  /// one. This is usually done when the current update on this MBB is done, and
  /// the sum of its successors' probabilities is not guaranteed to be one. The
  /// user is responsible for the correct use of this function.
  /// MBB::removeSuccessor() has an option to do this automatically.
  void normalizeSuccProbs() {
    BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
  }

  /// Validate successors' probabilities and check if the sum of them is
  /// approximate one. This only works in DEBUG mode.
  void validateSuccProbs() const;

  /// Remove successor from the successors list of this MachineBasicBlock. The
  /// Predecessors list of Succ is automatically updated.
  /// If NormalizeSuccProbs is true, then normalize successors' probabilities
  /// after the successor is removed.
  void removeSuccessor(MachineBasicBlock *Succ,
                       bool NormalizeSuccProbs = false);

  /// Remove specified successor from the successors list of this
  /// MachineBasicBlock. The Predecessors list of Succ is automatically updated.
  /// If NormalizeSuccProbs is true, then normalize successors' probabilities
  /// after the successor is removed.
  /// Return the iterator to the element after the one removed.
  succ_iterator removeSuccessor(succ_iterator I,
                                bool NormalizeSuccProbs = false);

  /// Replace successor OLD with NEW and update probability info.
  void replaceSuccessor(MachineBasicBlock *Old, MachineBasicBlock *New);

  /// Copy a successor (and any probability info) from original block to this
  /// block's. Uses an iterator into the original blocks successors.
  ///
  /// This is useful when doing a partial clone of successors. Afterward, the
  /// probabilities may need to be normalized.
  void copySuccessor(MachineBasicBlock *Orig, succ_iterator I);

  /// Split the old successor into old plus new and updates the probability
  /// info.
  void splitSuccessor(MachineBasicBlock *Old, MachineBasicBlock *New,
                      bool NormalizeSuccProbs = false);

  /// Transfers all the successors from MBB to this machine basic block (i.e.,
  /// copies all the successors FromMBB and remove all the successors from
  /// FromMBB).
  void transferSuccessors(MachineBasicBlock *FromMBB);

  /// Transfers all the successors, as in transferSuccessors, and update PHI
  /// operands in the successor blocks which refer to FromMBB to refer to this.
  void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB);

  /// Return true if any of the successors have probabilities attached to them.
  bool hasSuccessorProbabilities() const { return !Probs.empty(); }

  /// Return true if the specified MBB is a predecessor of this block.
  bool isPredecessor(const MachineBasicBlock *MBB) const;

  /// Return true if the specified MBB is a successor of this block.
  bool isSuccessor(const MachineBasicBlock *MBB) const;

  /// Return true if the specified MBB will be emitted immediately after this
  /// block, such that if this block exits by falling through, control will
  /// transfer to the specified MBB. Note that MBB need not be a successor at
  /// all, for example if this block ends with an unconditional branch to some
  /// other block.
  bool isLayoutSuccessor(const MachineBasicBlock *MBB) const;

  /// Return the successor of this block if it has a single successor.
  /// Otherwise return a null pointer.
  ///
  const MachineBasicBlock *getSingleSuccessor() const;
  MachineBasicBlock *getSingleSuccessor() {
    return const_cast<MachineBasicBlock *>(
        static_cast<const MachineBasicBlock *>(this)->getSingleSuccessor());
  }

  /// Return the fallthrough block if the block can implicitly
  /// transfer control to the block after it by falling off the end of
  /// it. If an explicit branch to the fallthrough block is not allowed,
  /// set JumpToFallThrough to be false. Non-null return is a conservative
  /// answer.
  MachineBasicBlock *getFallThrough(bool JumpToFallThrough = true);

  /// Return the fallthrough block if the block can implicitly
  /// transfer control to it's successor, whether by a branch or
  /// a fallthrough. Non-null return is a conservative answer.
  MachineBasicBlock *getLogicalFallThrough() { return getFallThrough(false); }

  /// Return true if the block can implicitly transfer control to the
  /// block after it by falling off the end of it.  This should return
  /// false if it can reach the block after it, but it uses an
  /// explicit branch to do so (e.g., a table jump).  True is a
  /// conservative answer.
  bool canFallThrough();

  /// Returns a pointer to the first instruction in this block that is not a
  /// PHINode instruction. When adding instructions to the beginning of the
  /// basic block, they should be added before the returned value, not before
  /// the first instruction, which might be PHI.
  /// Returns end() is there's no non-PHI instruction.
  iterator getFirstNonPHI();
  const_iterator getFirstNonPHI() const {
    return const_cast<MachineBasicBlock *>(this)->getFirstNonPHI();
  }

  /// Return the first instruction in MBB after I that is not a PHI or a label.
  /// This is the correct point to insert lowered copies at the beginning of a
  /// basic block that must be before any debugging information.
  iterator SkipPHIsAndLabels(iterator I);

  /// Return the first instruction in MBB after I that is not a PHI, label or
  /// debug.  This is the correct point to insert copies at the beginning of a
  /// basic block.
  iterator SkipPHIsLabelsAndDebug(iterator I, bool SkipPseudoOp = true);

  /// Returns an iterator to the first terminator instruction of this basic
  /// block. If a terminator does not exist, it returns end().
  iterator getFirstTerminator();
  const_iterator getFirstTerminator() const {
    return const_cast<MachineBasicBlock *>(this)->getFirstTerminator();
  }

  /// Same getFirstTerminator but it ignores bundles and return an
  /// instr_iterator instead.
  instr_iterator getFirstInstrTerminator();

  /// Finds the first terminator in a block by scanning forward. This can handle
  /// cases in GlobalISel where there may be non-terminator instructions between
  /// terminators, for which getFirstTerminator() will not work correctly.
  iterator getFirstTerminatorForward();

  /// Returns an iterator to the first non-debug instruction in the basic block,
  /// or end(). Skip any pseudo probe operation if \c SkipPseudoOp is true.
  /// Pseudo probes are like debug instructions which do not turn into real
  /// machine code. We try to use the function to skip both debug instructions
  /// and pseudo probe operations to avoid API proliferation. This should work
  /// most of the time when considering optimizing the rest of code in the
  /// block, except for certain cases where pseudo probes are designed to block
  /// the optimizations. For example, code merge like optimizations are supposed
  /// to be blocked by pseudo probes for better AutoFDO profile quality.
  /// Therefore, they should be considered as a valid instruction when this
  /// function is called in a context of such optimizations. On the other hand,
  /// \c SkipPseudoOp should be true when it's used in optimizations that
  /// unlikely hurt profile quality, e.g., without block merging. The default
  /// value of \c SkipPseudoOp is set to true to maximize code quality in
  /// general, with an explict false value passed in in a few places like branch
  /// folding and if-conversion to favor profile quality.
  iterator getFirstNonDebugInstr(bool SkipPseudoOp = true);
  const_iterator getFirstNonDebugInstr(bool SkipPseudoOp = true) const {
    return const_cast<MachineBasicBlock *>(this)->getFirstNonDebugInstr(
        SkipPseudoOp);
  }

  /// Returns an iterator to the last non-debug instruction in the basic block,
  /// or end(). Skip any pseudo operation if \c SkipPseudoOp is true.
  /// Pseudo probes are like debug instructions which do not turn into real
  /// machine code. We try to use the function to skip both debug instructions
  /// and pseudo probe operations to avoid API proliferation. This should work
  /// most of the time when considering optimizing the rest of code in the
  /// block, except for certain cases where pseudo probes are designed to block
  /// the optimizations. For example, code merge like optimizations are supposed
  /// to be blocked by pseudo probes for better AutoFDO profile quality.
  /// Therefore, they should be considered as a valid instruction when this
  /// function is called in a context of such optimizations. On the other hand,
  /// \c SkipPseudoOp should be true when it's used in optimizations that
  /// unlikely hurt profile quality, e.g., without block merging. The default
  /// value of \c SkipPseudoOp is set to true to maximize code quality in
  /// general, with an explict false value passed in in a few places like branch
  /// folding and if-conversion to favor profile quality.
  iterator getLastNonDebugInstr(bool SkipPseudoOp = true);
  const_iterator getLastNonDebugInstr(bool SkipPseudoOp = true) const {
    return const_cast<MachineBasicBlock *>(this)->getLastNonDebugInstr(
        SkipPseudoOp);
  }

  /// Convenience function that returns true if the block ends in a return
  /// instruction.
  bool isReturnBlock() const {
    return !empty() && back().isReturn();
  }

  /// Convenience function that returns true if the bock ends in a EH scope
  /// return instruction.
  bool isEHScopeReturnBlock() const {
    return !empty() && back().isEHScopeReturn();
  }

  /// Split a basic block into 2 pieces at \p SplitPoint. A new block will be
  /// inserted after this block, and all instructions after \p SplitInst moved
  /// to it (\p SplitInst will be in the original block). If \p LIS is provided,
  /// LiveIntervals will be appropriately updated. \return the newly inserted
  /// block.
  ///
  /// If \p UpdateLiveIns is true, this will ensure the live ins list is
  /// accurate, including for physreg uses/defs in the original block.
  MachineBasicBlock *splitAt(MachineInstr &SplitInst, bool UpdateLiveIns = true,
                             LiveIntervals *LIS = nullptr);

  /// Split the critical edge from this block to the given successor block, and
  /// return the newly created block, or null if splitting is not possible.
  ///
  /// This function updates LiveVariables, MachineDominatorTree, and
  /// MachineLoopInfo, as applicable.
  MachineBasicBlock *
  SplitCriticalEdge(MachineBasicBlock *Succ, Pass &P,
                    std::vector<SparseBitVector<>> *LiveInSets = nullptr);

  /// Check if the edge between this block and the given successor \p
  /// Succ, can be split. If this returns true a subsequent call to
  /// SplitCriticalEdge is guaranteed to return a valid basic block if
  /// no changes occurred in the meantime.
  bool canSplitCriticalEdge(const MachineBasicBlock *Succ) const;

  void pop_front() { Insts.pop_front(); }
  void pop_back() { Insts.pop_back(); }
  void push_back(MachineInstr *MI) { Insts.push_back(MI); }

  /// Insert MI into the instruction list before I, possibly inside a bundle.
  ///
  /// If the insertion point is inside a bundle, MI will be added to the bundle,
  /// otherwise MI will not be added to any bundle. That means this function
  /// alone can't be used to prepend or append instructions to bundles. See
  /// MIBundleBuilder::insert() for a more reliable way of doing that.
  instr_iterator insert(instr_iterator I, MachineInstr *M);

  /// Insert a range of instructions into the instruction list before I.
  template<typename IT>
  void insert(iterator I, IT S, IT E) {
    assert((I == end() || I->getParent() == this) &&
           "iterator points outside of basic block");
    Insts.insert(I.getInstrIterator(), S, E);
  }

  /// Insert MI into the instruction list before I.
  iterator insert(iterator I, MachineInstr *MI) {
    assert((I == end() || I->getParent() == this) &&
           "iterator points outside of basic block");
    assert(!MI->isBundledWithPred() && !MI->isBundledWithSucc() &&
           "Cannot insert instruction with bundle flags");
    return Insts.insert(I.getInstrIterator(), MI);
  }

  /// Insert MI into the instruction list after I.
  iterator insertAfter(iterator I, MachineInstr *MI) {
    assert((I == end() || I->getParent() == this) &&
           "iterator points outside of basic block");
    assert(!MI->isBundledWithPred() && !MI->isBundledWithSucc() &&
           "Cannot insert instruction with bundle flags");
    return Insts.insertAfter(I.getInstrIterator(), MI);
  }

  /// If I is bundled then insert MI into the instruction list after the end of
  /// the bundle, otherwise insert MI immediately after I.
  instr_iterator insertAfterBundle(instr_iterator I, MachineInstr *MI) {
    assert((I == instr_end() || I->getParent() == this) &&
           "iterator points outside of basic block");
    assert(!MI->isBundledWithPred() && !MI->isBundledWithSucc() &&
           "Cannot insert instruction with bundle flags");
    while (I->isBundledWithSucc())
      ++I;
    return Insts.insertAfter(I, MI);
  }

  /// Remove an instruction from the instruction list and delete it.
  ///
  /// If the instruction is part of a bundle, the other instructions in the
  /// bundle will still be bundled after removing the single instruction.
  instr_iterator erase(instr_iterator I);

  /// Remove an instruction from the instruction list and delete it.
  ///
  /// If the instruction is part of a bundle, the other instructions in the
  /// bundle will still be bundled after removing the single instruction.
  instr_iterator erase_instr(MachineInstr *I) {
    return erase(instr_iterator(I));
  }

  /// Remove a range of instructions from the instruction list and delete them.
  iterator erase(iterator I, iterator E) {
    return Insts.erase(I.getInstrIterator(), E.getInstrIterator());
  }

  /// Remove an instruction or bundle from the instruction list and delete it.
  ///
  /// If I points to a bundle of instructions, they are all erased.
  iterator erase(iterator I) {
    return erase(I, std::next(I));
  }

  /// Remove an instruction from the instruction list and delete it.
  ///
  /// If I is the head of a bundle of instructions, the whole bundle will be
  /// erased.
  iterator erase(MachineInstr *I) {
    return erase(iterator(I));
  }

  /// Remove the unbundled instruction from the instruction list without
  /// deleting it.
  ///
  /// This function can not be used to remove bundled instructions, use
  /// remove_instr to remove individual instructions from a bundle.
  MachineInstr *remove(MachineInstr *I) {
    assert(!I->isBundled() && "Cannot remove bundled instructions");
    return Insts.remove(instr_iterator(I));
  }

  /// Remove the possibly bundled instruction from the instruction list
  /// without deleting it.
  ///
  /// If the instruction is part of a bundle, the other instructions in the
  /// bundle will still be bundled after removing the single instruction.
  MachineInstr *remove_instr(MachineInstr *I);

  void clear() {
    Insts.clear();
  }

  /// Take an instruction from MBB 'Other' at the position From, and insert it
  /// into this MBB right before 'Where'.
  ///
  /// If From points to a bundle of instructions, the whole bundle is moved.
  void splice(iterator Where, MachineBasicBlock *Other, iterator From) {
    // The range splice() doesn't allow noop moves, but this one does.
    if (Where != From)
      splice(Where, Other, From, std::next(From));
  }

  /// Take a block of instructions from MBB 'Other' in the range [From, To),
  /// and insert them into this MBB right before 'Where'.
  ///
  /// The instruction at 'Where' must not be included in the range of
  /// instructions to move.
  void splice(iterator Where, MachineBasicBlock *Other,
              iterator From, iterator To) {
    Insts.splice(Where.getInstrIterator(), Other->Insts,
                 From.getInstrIterator(), To.getInstrIterator());
  }

  /// This method unlinks 'this' from the containing function, and returns it,
  /// but does not delete it.
  MachineBasicBlock *removeFromParent();

  /// This method unlinks 'this' from the containing function and deletes it.
  void eraseFromParent();

  /// Given a machine basic block that branched to 'Old', change the code and
  /// CFG so that it branches to 'New' instead.
  void ReplaceUsesOfBlockWith(MachineBasicBlock *Old, MachineBasicBlock *New);

  /// Update all phi nodes in this basic block to refer to basic block \p New
  /// instead of basic block \p Old.
  void replacePhiUsesWith(MachineBasicBlock *Old, MachineBasicBlock *New);

  /// Find the next valid DebugLoc starting at MBBI, skipping any debug
  /// instructions.  Return UnknownLoc if there is none.
  DebugLoc findDebugLoc(instr_iterator MBBI);
  DebugLoc findDebugLoc(iterator MBBI) {
    return findDebugLoc(MBBI.getInstrIterator());
  }

  /// Has exact same behavior as @ref findDebugLoc (it also searches towards the
  /// end of this MBB) except that this function takes a reverse iterator to
  /// identify the starting MI.
  DebugLoc rfindDebugLoc(reverse_instr_iterator MBBI);
  DebugLoc rfindDebugLoc(reverse_iterator MBBI) {
    return rfindDebugLoc(MBBI.getInstrIterator());
  }

  /// Find the previous valid DebugLoc preceding MBBI, skipping any debug
  /// instructions. It is possible to find the last DebugLoc in the MBB using
  /// findPrevDebugLoc(instr_end()).  Return UnknownLoc if there is none.
  DebugLoc findPrevDebugLoc(instr_iterator MBBI);
  DebugLoc findPrevDebugLoc(iterator MBBI) {
    return findPrevDebugLoc(MBBI.getInstrIterator());
  }

  /// Has exact same behavior as @ref findPrevDebugLoc (it also searches towards
  /// the beginning of this MBB) except that this function takes reverse
  /// iterator to identify the starting MI. A minor difference compared to
  /// findPrevDebugLoc is that we can't start scanning at "instr_end".
  DebugLoc rfindPrevDebugLoc(reverse_instr_iterator MBBI);
  DebugLoc rfindPrevDebugLoc(reverse_iterator MBBI) {
    return rfindPrevDebugLoc(MBBI.getInstrIterator());
  }

  /// Find and return the merged DebugLoc of the branch instructions of the
  /// block. Return UnknownLoc if there is none.
  DebugLoc findBranchDebugLoc();

  /// Possible outcome of a register liveness query to computeRegisterLiveness()
  enum LivenessQueryResult {
    LQR_Live,   ///< Register is known to be (at least partially) live.
    LQR_Dead,   ///< Register is known to be fully dead.
    LQR_Unknown ///< Register liveness not decidable from local neighborhood.
  };

  /// Return whether (physical) register \p Reg has been defined and not
  /// killed as of just before \p Before.
  ///
  /// Search is localised to a neighborhood of \p Neighborhood instructions
  /// before (searching for defs or kills) and \p Neighborhood instructions
  /// after (searching just for defs) \p Before.
  ///
  /// \p Reg must be a physical register.
  LivenessQueryResult computeRegisterLiveness(const TargetRegisterInfo *TRI,
                                              MCRegister Reg,
                                              const_iterator Before,
                                              unsigned Neighborhood = 10) const;

  // Debugging methods.
  void dump() const;
  void print(raw_ostream &OS, const SlotIndexes * = nullptr,
             bool IsStandalone = true) const;
  void print(raw_ostream &OS, ModuleSlotTracker &MST,
             const SlotIndexes * = nullptr, bool IsStandalone = true) const;

  enum PrintNameFlag {
    PrintNameIr = (1 << 0), ///< Add IR name where available
    PrintNameAttributes = (1 << 1), ///< Print attributes
  };

  void printName(raw_ostream &os, unsigned printNameFlags = PrintNameIr,
                 ModuleSlotTracker *moduleSlotTracker = nullptr) const;

  // Printing method used by LoopInfo.
  void printAsOperand(raw_ostream &OS, bool PrintType = true) const;

  /// MachineBasicBlocks are uniquely numbered at the function level, unless
  /// they're not in a MachineFunction yet, in which case this will return -1.
  int getNumber() const { return Number; }
  void setNumber(int N) { Number = N; }

  /// Return the MCSymbol for this basic block.
  MCSymbol *getSymbol() const;

  /// Return the EHCatchret Symbol for this basic block.
  MCSymbol *getEHCatchretSymbol() const;

  std::optional<uint64_t> getIrrLoopHeaderWeight() const {
    return IrrLoopHeaderWeight;
  }

  void setIrrLoopHeaderWeight(uint64_t Weight) {
    IrrLoopHeaderWeight = Weight;
  }

  /// Return probability of the edge from this block to MBB. This method should
  /// NOT be called directly, but by using getEdgeProbability method from
  /// MachineBranchProbabilityInfo class.
  BranchProbability getSuccProbability(const_succ_iterator Succ) const;

private:
  /// Return probability iterator corresponding to the I successor iterator.
  probability_iterator getProbabilityIterator(succ_iterator I);
  const_probability_iterator
  getProbabilityIterator(const_succ_iterator I) const;

  friend class MachineBranchProbabilityInfo;
  friend class MIPrinter;

  // Methods used to maintain doubly linked list of blocks...
  friend struct ilist_callback_traits<MachineBasicBlock>;

  // Machine-CFG mutators

  /// Add Pred as a predecessor of this MachineBasicBlock. Don't do this
  /// unless you know what you're doing, because it doesn't update Pred's
  /// successors list. Use Pred->addSuccessor instead.
  void addPredecessor(MachineBasicBlock *Pred);

  /// Remove Pred as a predecessor of this MachineBasicBlock. Don't do this
  /// unless you know what you're doing, because it doesn't update Pred's
  /// successors list. Use Pred->removeSuccessor instead.
  void removePredecessor(MachineBasicBlock *Pred);
};

raw_ostream& operator<<(raw_ostream &OS, const MachineBasicBlock &MBB);

/// Prints a machine basic block reference.
///
/// The format is:
///   %bb.5           - a machine basic block with MBB.getNumber() == 5.
///
/// Usage: OS << printMBBReference(MBB) << '\n';
Printable printMBBReference(const MachineBasicBlock &MBB);

// This is useful when building IndexedMaps keyed on basic block pointers.
struct MBB2NumberFunctor {
  using argument_type = const MachineBasicBlock *;
  unsigned operator()(const MachineBasicBlock *MBB) const {
    return MBB->getNumber();
  }
};

//===--------------------------------------------------------------------===//
// GraphTraits specializations for machine basic block graphs (machine-CFGs)
//===--------------------------------------------------------------------===//

// Provide specializations of GraphTraits to be able to treat a
// MachineFunction as a graph of MachineBasicBlocks.
//

template <> struct GraphTraits<MachineBasicBlock *> {
  using NodeRef = MachineBasicBlock *;
  using ChildIteratorType = MachineBasicBlock::succ_iterator;

  static NodeRef getEntryNode(MachineBasicBlock *BB) { return BB; }
  static ChildIteratorType child_begin(NodeRef N) { return N->succ_begin(); }
  static ChildIteratorType child_end(NodeRef N) { return N->succ_end(); }
};

template <> struct GraphTraits<const MachineBasicBlock *> {
  using NodeRef = const MachineBasicBlock *;
  using ChildIteratorType = MachineBasicBlock::const_succ_iterator;

  static NodeRef getEntryNode(const MachineBasicBlock *BB) { return BB; }
  static ChildIteratorType child_begin(NodeRef N) { return N->succ_begin(); }
  static ChildIteratorType child_end(NodeRef N) { return N->succ_end(); }
};

// Provide specializations of GraphTraits to be able to treat a
// MachineFunction as a graph of MachineBasicBlocks and to walk it
// in inverse order.  Inverse order for a function is considered
// to be when traversing the predecessor edges of a MBB
// instead of the successor edges.
//
template <> struct GraphTraits<Inverse<MachineBasicBlock*>> {
  using NodeRef = MachineBasicBlock *;
  using ChildIteratorType = MachineBasicBlock::pred_iterator;

  static NodeRef getEntryNode(Inverse<MachineBasicBlock *> G) {
    return G.Graph;
  }

  static ChildIteratorType child_begin(NodeRef N) { return N->pred_begin(); }
  static ChildIteratorType child_end(NodeRef N) { return N->pred_end(); }
};

template <> struct GraphTraits<Inverse<const MachineBasicBlock*>> {
  using NodeRef = const MachineBasicBlock *;
  using ChildIteratorType = MachineBasicBlock::const_pred_iterator;

  static NodeRef getEntryNode(Inverse<const MachineBasicBlock *> G) {
    return G.Graph;
  }

  static ChildIteratorType child_begin(NodeRef N) { return N->pred_begin(); }
  static ChildIteratorType child_end(NodeRef N) { return N->pred_end(); }
};

// These accessors are handy for sharing templated code between IR and MIR.
inline auto successors(const MachineBasicBlock *BB) { return BB->successors(); }
inline auto predecessors(const MachineBasicBlock *BB) {
  return BB->predecessors();
}

/// MachineInstrSpan provides an interface to get an iteration range
/// containing the instruction it was initialized with, along with all
/// those instructions inserted prior to or following that instruction
/// at some point after the MachineInstrSpan is constructed.
class MachineInstrSpan {
  MachineBasicBlock &MBB;
  MachineBasicBlock::iterator I, B, E;

public:
  MachineInstrSpan(MachineBasicBlock::iterator I, MachineBasicBlock *BB)
      : MBB(*BB), I(I), B(I == MBB.begin() ? MBB.end() : std::prev(I)),
        E(std::next(I)) {
    assert(I == BB->end() || I->getParent() == BB);
  }

  MachineBasicBlock::iterator begin() {
    return B == MBB.end() ? MBB.begin() : std::next(B);
  }
  MachineBasicBlock::iterator end() { return E; }
  bool empty() { return begin() == end(); }

  MachineBasicBlock::iterator getInitial() { return I; }
};

/// Increment \p It until it points to a non-debug instruction or to \p End
/// and return the resulting iterator. This function should only be used
/// MachineBasicBlock::{iterator, const_iterator, instr_iterator,
/// const_instr_iterator} and the respective reverse iterators.
template <typename IterT>
inline IterT skipDebugInstructionsForward(IterT It, IterT End,
                                          bool SkipPseudoOp = true) {
  while (It != End &&
         (It->isDebugInstr() || (SkipPseudoOp && It->isPseudoProbe())))
    ++It;
  return It;
}

/// Decrement \p It until it points to a non-debug instruction or to \p Begin
/// and return the resulting iterator. This function should only be used
/// MachineBasicBlock::{iterator, const_iterator, instr_iterator,
/// const_instr_iterator} and the respective reverse iterators.
template <class IterT>
inline IterT skipDebugInstructionsBackward(IterT It, IterT Begin,
                                           bool SkipPseudoOp = true) {
  while (It != Begin &&
         (It->isDebugInstr() || (SkipPseudoOp && It->isPseudoProbe())))
    --It;
  return It;
}

/// Increment \p It, then continue incrementing it while it points to a debug
/// instruction. A replacement for std::next.
template <typename IterT>
inline IterT next_nodbg(IterT It, IterT End, bool SkipPseudoOp = true) {
  return skipDebugInstructionsForward(std::next(It), End, SkipPseudoOp);
}

/// Decrement \p It, then continue decrementing it while it points to a debug
/// instruction. A replacement for std::prev.
template <typename IterT>
inline IterT prev_nodbg(IterT It, IterT Begin, bool SkipPseudoOp = true) {
  return skipDebugInstructionsBackward(std::prev(It), Begin, SkipPseudoOp);
}

/// Construct a range iterator which begins at \p It and moves forwards until
/// \p End is reached, skipping any debug instructions.
template <typename IterT>
inline auto instructionsWithoutDebug(IterT It, IterT End,
                                     bool SkipPseudoOp = true) {
  return make_filter_range(make_range(It, End), [=](const MachineInstr &MI) {
    return !MI.isDebugInstr() && !(SkipPseudoOp && MI.isPseudoProbe());
  });
}

} // end namespace llvm

#endif // LLVM_CODEGEN_MACHINEBASICBLOCK_H
PKiwFZQ���wwCodeGen/WasmAddressSpaces.hnu�[���//===--- llvm/CodeGen/WasmAddressSpaces.h -----------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Address Spaces for WebAssembly Type Handling
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_WASM_ADDRESS_SPACES_H
#define LLVM_CODEGEN_WASM_ADDRESS_SPACES_H

namespace llvm {

namespace WebAssembly {

enum WasmAddressSpace : unsigned {
  // Default address space, for pointers to linear memory (stack, heap, data).
  WASM_ADDRESS_SPACE_DEFAULT = 0,
  // A non-integral address space for pointers to named objects outside of
  // linear memory: WebAssembly globals or WebAssembly locals.  Loads and stores
  // to these pointers are lowered to global.get / global.set or local.get /
  // local.set, as appropriate.
  WASM_ADDRESS_SPACE_VAR = 1,
  // A non-integral address space for externref values
  WASM_ADDRESS_SPACE_EXTERNREF = 10,
  // A non-integral address space for funcref values
  WASM_ADDRESS_SPACE_FUNCREF = 20,
};

inline bool isDefaultAddressSpace(unsigned AS) {
  return AS == WASM_ADDRESS_SPACE_DEFAULT;
}
inline bool isWasmVarAddressSpace(unsigned AS) {
  return AS == WASM_ADDRESS_SPACE_VAR;
}
inline bool isValidAddressSpace(unsigned AS) {
  return isDefaultAddressSpace(AS) || isWasmVarAddressSpace(AS);
}

} // namespace WebAssembly

} // namespace llvm

#endif // LLVM_CODEGEN_WASM_ADDRESS_SPACES_H
PKiwFZ��.^^CodeGen/DAGCombine.hnu�[���//===-- llvm/CodeGen/DAGCombine.h  ------- SelectionDAG Nodes ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//

#ifndef LLVM_CODEGEN_DAGCOMBINE_H
#define LLVM_CODEGEN_DAGCOMBINE_H

namespace llvm {

enum CombineLevel {
  BeforeLegalizeTypes,
  AfterLegalizeTypes,
  AfterLegalizeVectorOps,
  AfterLegalizeDAG
};

} // end llvm namespace

#endif
PKiwFZ���ŅŅCodeGen/TargetInstrInfo.hnu�[���//===- llvm/CodeGen/TargetInstrInfo.h - Instruction Info --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file describes the target machine instruction set to the code generator.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_TARGETINSTRINFO_H
#define LLVM_CODEGEN_TARGETINSTRINFO_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/ADT/Uniformity.h"
#include "llvm/CodeGen/MIRFormatter.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineOutliner.h"
#include "llvm/CodeGen/RegisterClassInfo.h"
#include "llvm/CodeGen/VirtRegMap.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/Support/BranchProbability.h"
#include "llvm/Support/ErrorHandling.h"
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <utility>
#include <vector>

namespace llvm {

class DFAPacketizer;
class InstrItineraryData;
class LiveIntervals;
class LiveVariables;
class MachineLoop;
class MachineMemOperand;
class MachineRegisterInfo;
class MCAsmInfo;
class MCInst;
struct MCSchedModel;
class Module;
class ScheduleDAG;
class ScheduleDAGMI;
class ScheduleHazardRecognizer;
class SDNode;
class SelectionDAG;
class SMSchedule;
class SwingSchedulerDAG;
class RegScavenger;
class TargetRegisterClass;
class TargetRegisterInfo;
class TargetSchedModel;
class TargetSubtargetInfo;
enum class MachineCombinerPattern;
enum class MachineTraceStrategy;

template <class T> class SmallVectorImpl;

using ParamLoadedValue = std::pair<MachineOperand, DIExpression*>;

struct DestSourcePair {
  const MachineOperand *Destination;
  const MachineOperand *Source;

  DestSourcePair(const MachineOperand &Dest, const MachineOperand &Src)
      : Destination(&Dest), Source(&Src) {}
};

/// Used to describe a register and immediate addition.
struct RegImmPair {
  Register Reg;
  int64_t Imm;

  RegImmPair(Register Reg, int64_t Imm) : Reg(Reg), Imm(Imm) {}
};

/// Used to describe addressing mode similar to ExtAddrMode in CodeGenPrepare.
/// It holds the register values, the scale value and the displacement.
struct ExtAddrMode {
  Register BaseReg;
  Register ScaledReg;
  int64_t Scale;
  int64_t Displacement;
};

//---------------------------------------------------------------------------
///
/// TargetInstrInfo - Interface to description of machine instruction set
///
class TargetInstrInfo : public MCInstrInfo {
public:
  TargetInstrInfo(unsigned CFSetupOpcode = ~0u, unsigned CFDestroyOpcode = ~0u,
                  unsigned CatchRetOpcode = ~0u, unsigned ReturnOpcode = ~0u)
      : CallFrameSetupOpcode(CFSetupOpcode),
        CallFrameDestroyOpcode(CFDestroyOpcode), CatchRetOpcode(CatchRetOpcode),
        ReturnOpcode(ReturnOpcode) {}
  TargetInstrInfo(const TargetInstrInfo &) = delete;
  TargetInstrInfo &operator=(const TargetInstrInfo &) = delete;
  virtual ~TargetInstrInfo();

  static bool isGenericOpcode(unsigned Opc) {
    return Opc <= TargetOpcode::GENERIC_OP_END;
  }

  static bool isGenericAtomicRMWOpcode(unsigned Opc) {
    return Opc >= TargetOpcode::GENERIC_ATOMICRMW_OP_START &&
           Opc <= TargetOpcode::GENERIC_ATOMICRMW_OP_END;
  }

  /// Given a machine instruction descriptor, returns the register
  /// class constraint for OpNum, or NULL.
  virtual
  const TargetRegisterClass *getRegClass(const MCInstrDesc &MCID, unsigned OpNum,
                                         const TargetRegisterInfo *TRI,
                                         const MachineFunction &MF) const;

  /// Return true if the instruction is trivially rematerializable, meaning it
  /// has no side effects and requires no operands that aren't always available.
  /// This means the only allowed uses are constants and unallocatable physical
  /// registers so that the instructions result is independent of the place
  /// in the function.
  bool isTriviallyReMaterializable(const MachineInstr &MI) const {
    return MI.getOpcode() == TargetOpcode::IMPLICIT_DEF ||
           (MI.getDesc().isRematerializable() &&
            (isReallyTriviallyReMaterializable(MI) ||
             isReallyTriviallyReMaterializableGeneric(MI)));
  }

  /// Given \p MO is a PhysReg use return if it can be ignored for the purpose
  /// of instruction rematerialization or sinking.
  virtual bool isIgnorableUse(const MachineOperand &MO) const {
    return false;
  }

protected:
  /// For instructions with opcodes for which the M_REMATERIALIZABLE flag is
  /// set, this hook lets the target specify whether the instruction is actually
  /// trivially rematerializable, taking into consideration its operands. This
  /// predicate must return false if the instruction has any side effects other
  /// than producing a value, or if it requres any address registers that are
  /// not always available.
  /// Requirements must be check as stated in isTriviallyReMaterializable() .
  virtual bool isReallyTriviallyReMaterializable(const MachineInstr &MI) const {
    return false;
  }

  /// This method commutes the operands of the given machine instruction MI.
  /// The operands to be commuted are specified by their indices OpIdx1 and
  /// OpIdx2.
  ///
  /// If a target has any instructions that are commutable but require
  /// converting to different instructions or making non-trivial changes
  /// to commute them, this method can be overloaded to do that.
  /// The default implementation simply swaps the commutable operands.
  ///
  /// If NewMI is false, MI is modified in place and returned; otherwise, a
  /// new machine instruction is created and returned.
  ///
  /// Do not call this method for a non-commutable instruction.
  /// Even though the instruction is commutable, the method may still
  /// fail to commute the operands, null pointer is returned in such cases.
  virtual MachineInstr *commuteInstructionImpl(MachineInstr &MI, bool NewMI,
                                               unsigned OpIdx1,
                                               unsigned OpIdx2) const;

  /// Assigns the (CommutableOpIdx1, CommutableOpIdx2) pair of commutable
  /// operand indices to (ResultIdx1, ResultIdx2).
  /// One or both input values of the pair: (ResultIdx1, ResultIdx2) may be
  /// predefined to some indices or be undefined (designated by the special
  /// value 'CommuteAnyOperandIndex').
  /// The predefined result indices cannot be re-defined.
  /// The function returns true iff after the result pair redefinition
  /// the fixed result pair is equal to or equivalent to the source pair of
  /// indices: (CommutableOpIdx1, CommutableOpIdx2). It is assumed here that
  /// the pairs (x,y) and (y,x) are equivalent.
  static bool fixCommutedOpIndices(unsigned &ResultIdx1, unsigned &ResultIdx2,
                                   unsigned CommutableOpIdx1,
                                   unsigned CommutableOpIdx2);

private:
  /// For instructions with opcodes for which the M_REMATERIALIZABLE flag is
  /// set and the target hook isReallyTriviallyReMaterializable returns false,
  /// this function does target-independent tests to determine if the
  /// instruction is really trivially rematerializable.
  bool isReallyTriviallyReMaterializableGeneric(const MachineInstr &MI) const;

public:
  /// These methods return the opcode of the frame setup/destroy instructions
  /// if they exist (-1 otherwise).  Some targets use pseudo instructions in
  /// order to abstract away the difference between operating with a frame
  /// pointer and operating without, through the use of these two instructions.
  ///
  unsigned getCallFrameSetupOpcode() const { return CallFrameSetupOpcode; }
  unsigned getCallFrameDestroyOpcode() const { return CallFrameDestroyOpcode; }

  /// Returns true if the argument is a frame pseudo instruction.
  bool isFrameInstr(const MachineInstr &I) const {
    return I.getOpcode() == getCallFrameSetupOpcode() ||
           I.getOpcode() == getCallFrameDestroyOpcode();
  }

  /// Returns true if the argument is a frame setup pseudo instruction.
  bool isFrameSetup(const MachineInstr &I) const {
    return I.getOpcode() == getCallFrameSetupOpcode();
  }

  /// Returns size of the frame associated with the given frame instruction.
  /// For frame setup instruction this is frame that is set up space set up
  /// after the instruction. For frame destroy instruction this is the frame
  /// freed by the caller.
  /// Note, in some cases a call frame (or a part of it) may be prepared prior
  /// to the frame setup instruction. It occurs in the calls that involve
  /// inalloca arguments. This function reports only the size of the frame part
  /// that is set up between the frame setup and destroy pseudo instructions.
  int64_t getFrameSize(const MachineInstr &I) const {
    assert(isFrameInstr(I) && "Not a frame instruction");
    assert(I.getOperand(0).getImm() >= 0);
    return I.getOperand(0).getImm();
  }

  /// Returns the total frame size, which is made up of the space set up inside
  /// the pair of frame start-stop instructions and the space that is set up
  /// prior to the pair.
  int64_t getFrameTotalSize(const MachineInstr &I) const {
    if (isFrameSetup(I)) {
      assert(I.getOperand(1).getImm() >= 0 &&
             "Frame size must not be negative");
      return getFrameSize(I) + I.getOperand(1).getImm();
    }
    return getFrameSize(I);
  }

  unsigned getCatchReturnOpcode() const { return CatchRetOpcode; }
  unsigned getReturnOpcode() const { return ReturnOpcode; }

  /// Returns the actual stack pointer adjustment made by an instruction
  /// as part of a call sequence. By default, only call frame setup/destroy
  /// instructions adjust the stack, but targets may want to override this
  /// to enable more fine-grained adjustment, or adjust by a different value.
  virtual int getSPAdjust(const MachineInstr &MI) const;

  /// Return true if the instruction is a "coalescable" extension instruction.
  /// That is, it's like a copy where it's legal for the source to overlap the
  /// destination. e.g. X86::MOVSX64rr32. If this returns true, then it's
  /// expected the pre-extension value is available as a subreg of the result
  /// register. This also returns the sub-register index in SubIdx.
  virtual bool isCoalescableExtInstr(const MachineInstr &MI, Register &SrcReg,
                                     Register &DstReg, unsigned &SubIdx) const {
    return false;
  }

  /// If the specified machine instruction is a direct
  /// load from a stack slot, return the virtual or physical register number of
  /// the destination along with the FrameIndex of the loaded stack slot.  If
  /// not, return 0.  This predicate must return 0 if the instruction has
  /// any side effects other than loading from the stack slot.
  virtual unsigned isLoadFromStackSlot(const MachineInstr &MI,
                                       int &FrameIndex) const {
    return 0;
  }

  /// Optional extension of isLoadFromStackSlot that returns the number of
  /// bytes loaded from the stack. This must be implemented if a backend
  /// supports partial stack slot spills/loads to further disambiguate
  /// what the load does.
  virtual unsigned isLoadFromStackSlot(const MachineInstr &MI,
                                       int &FrameIndex,
                                       unsigned &MemBytes) const {
    MemBytes = 0;
    return isLoadFromStackSlot(MI, FrameIndex);
  }

  /// Check for post-frame ptr elimination stack locations as well.
  /// This uses a heuristic so it isn't reliable for correctness.
  virtual unsigned isLoadFromStackSlotPostFE(const MachineInstr &MI,
                                             int &FrameIndex) const {
    return 0;
  }

  /// If the specified machine instruction has a load from a stack slot,
  /// return true along with the FrameIndices of the loaded stack slot and the
  /// machine mem operands containing the reference.
  /// If not, return false.  Unlike isLoadFromStackSlot, this returns true for
  /// any instructions that loads from the stack.  This is just a hint, as some
  /// cases may be missed.
  virtual bool hasLoadFromStackSlot(
      const MachineInstr &MI,
      SmallVectorImpl<const MachineMemOperand *> &Accesses) const;

  /// If the specified machine instruction is a direct
  /// store to a stack slot, return the virtual or physical register number of
  /// the source reg along with the FrameIndex of the loaded stack slot.  If
  /// not, return 0.  This predicate must return 0 if the instruction has
  /// any side effects other than storing to the stack slot.
  virtual unsigned isStoreToStackSlot(const MachineInstr &MI,
                                      int &FrameIndex) const {
    return 0;
  }

  /// Optional extension of isStoreToStackSlot that returns the number of
  /// bytes stored to the stack. This must be implemented if a backend
  /// supports partial stack slot spills/loads to further disambiguate
  /// what the store does.
  virtual unsigned isStoreToStackSlot(const MachineInstr &MI,
                                      int &FrameIndex,
                                      unsigned &MemBytes) const {
    MemBytes = 0;
    return isStoreToStackSlot(MI, FrameIndex);
  }

  /// Check for post-frame ptr elimination stack locations as well.
  /// This uses a heuristic, so it isn't reliable for correctness.
  virtual unsigned isStoreToStackSlotPostFE(const MachineInstr &MI,
                                            int &FrameIndex) const {
    return 0;
  }

  /// If the specified machine instruction has a store to a stack slot,
  /// return true along with the FrameIndices of the loaded stack slot and the
  /// machine mem operands containing the reference.
  /// If not, return false.  Unlike isStoreToStackSlot,
  /// this returns true for any instructions that stores to the
  /// stack.  This is just a hint, as some cases may be missed.
  virtual bool hasStoreToStackSlot(
      const MachineInstr &MI,
      SmallVectorImpl<const MachineMemOperand *> &Accesses) const;

  /// Return true if the specified machine instruction
  /// is a copy of one stack slot to another and has no other effect.
  /// Provide the identity of the two frame indices.
  virtual bool isStackSlotCopy(const MachineInstr &MI, int &DestFrameIndex,
                               int &SrcFrameIndex) const {
    return false;
  }

  /// Compute the size in bytes and offset within a stack slot of a spilled
  /// register or subregister.
  ///
  /// \param [out] Size in bytes of the spilled value.
  /// \param [out] Offset in bytes within the stack slot.
  /// \returns true if both Size and Offset are successfully computed.
  ///
  /// Not all subregisters have computable spill slots. For example,
  /// subregisters registers may not be byte-sized, and a pair of discontiguous
  /// subregisters has no single offset.
  ///
  /// Targets with nontrivial bigendian implementations may need to override
  /// this, particularly to support spilled vector registers.
  virtual bool getStackSlotRange(const TargetRegisterClass *RC, unsigned SubIdx,
                                 unsigned &Size, unsigned &Offset,
                                 const MachineFunction &MF) const;

  /// Return true if the given instruction is terminator that is unspillable,
  /// according to isUnspillableTerminatorImpl.
  bool isUnspillableTerminator(const MachineInstr *MI) const {
    return MI->isTerminator() && isUnspillableTerminatorImpl(MI);
  }

  /// Returns the size in bytes of the specified MachineInstr, or ~0U
  /// when this function is not implemented by a target.
  virtual unsigned getInstSizeInBytes(const MachineInstr &MI) const {
    return ~0U;
  }

  /// Return true if the instruction is as cheap as a move instruction.
  ///
  /// Targets for different archs need to override this, and different
  /// micro-architectures can also be finely tuned inside.
  virtual bool isAsCheapAsAMove(const MachineInstr &MI) const {
    return MI.isAsCheapAsAMove();
  }

  /// Return true if the instruction should be sunk by MachineSink.
  ///
  /// MachineSink determines on its own whether the instruction is safe to sink;
  /// this gives the target a hook to override the default behavior with regards
  /// to which instructions should be sunk.
  virtual bool shouldSink(const MachineInstr &MI) const { return true; }

  /// Return false if the instruction should not be hoisted by MachineLICM.
  ///
  /// MachineLICM determines on its own whether the instruction is safe to
  /// hoist; this gives the target a hook to extend this assessment and prevent
  /// an instruction being hoisted from a given loop for target specific
  /// reasons.
  virtual bool shouldHoist(const MachineInstr &MI,
                           const MachineLoop *FromLoop) const {
    return true;
  }

  /// Re-issue the specified 'original' instruction at the
  /// specific location targeting a new destination register.
  /// The register in Orig->getOperand(0).getReg() will be substituted by
  /// DestReg:SubIdx. Any existing subreg index is preserved or composed with
  /// SubIdx.
  virtual void reMaterialize(MachineBasicBlock &MBB,
                             MachineBasicBlock::iterator MI, Register DestReg,
                             unsigned SubIdx, const MachineInstr &Orig,
                             const TargetRegisterInfo &TRI) const;

  /// Clones instruction or the whole instruction bundle \p Orig and
  /// insert into \p MBB before \p InsertBefore. The target may update operands
  /// that are required to be unique.
  ///
  /// \p Orig must not return true for MachineInstr::isNotDuplicable().
  virtual MachineInstr &duplicate(MachineBasicBlock &MBB,
                                  MachineBasicBlock::iterator InsertBefore,
                                  const MachineInstr &Orig) const;

  /// This method must be implemented by targets that
  /// set the M_CONVERTIBLE_TO_3_ADDR flag.  When this flag is set, the target
  /// may be able to convert a two-address instruction into one or more true
  /// three-address instructions on demand.  This allows the X86 target (for
  /// example) to convert ADD and SHL instructions into LEA instructions if they
  /// would require register copies due to two-addressness.
  ///
  /// This method returns a null pointer if the transformation cannot be
  /// performed, otherwise it returns the last new instruction.
  ///
  /// If \p LIS is not nullptr, the LiveIntervals info should be updated for
  /// replacing \p MI with new instructions, even though this function does not
  /// remove MI.
  virtual MachineInstr *convertToThreeAddress(MachineInstr &MI,
                                              LiveVariables *LV,
                                              LiveIntervals *LIS) const {
    return nullptr;
  }

  // This constant can be used as an input value of operand index passed to
  // the method findCommutedOpIndices() to tell the method that the
  // corresponding operand index is not pre-defined and that the method
  // can pick any commutable operand.
  static const unsigned CommuteAnyOperandIndex = ~0U;

  /// This method commutes the operands of the given machine instruction MI.
  ///
  /// The operands to be commuted are specified by their indices OpIdx1 and
  /// OpIdx2. OpIdx1 and OpIdx2 arguments may be set to a special value
  /// 'CommuteAnyOperandIndex', which means that the method is free to choose
  /// any arbitrarily chosen commutable operand. If both arguments are set to
  /// 'CommuteAnyOperandIndex' then the method looks for 2 different commutable
  /// operands; then commutes them if such operands could be found.
  ///
  /// If NewMI is false, MI is modified in place and returned; otherwise, a
  /// new machine instruction is created and returned.
  ///
  /// Do not call this method for a non-commutable instruction or
  /// for non-commuable operands.
  /// Even though the instruction is commutable, the method may still
  /// fail to commute the operands, null pointer is returned in such cases.
  MachineInstr *
  commuteInstruction(MachineInstr &MI, bool NewMI = false,
                     unsigned OpIdx1 = CommuteAnyOperandIndex,
                     unsigned OpIdx2 = CommuteAnyOperandIndex) const;

  /// Returns true iff the routine could find two commutable operands in the
  /// given machine instruction.
  /// The 'SrcOpIdx1' and 'SrcOpIdx2' are INPUT and OUTPUT arguments.
  /// If any of the INPUT values is set to the special value
  /// 'CommuteAnyOperandIndex' then the method arbitrarily picks a commutable
  /// operand, then returns its index in the corresponding argument.
  /// If both of INPUT values are set to 'CommuteAnyOperandIndex' then method
  /// looks for 2 commutable operands.
  /// If INPUT values refer to some operands of MI, then the method simply
  /// returns true if the corresponding operands are commutable and returns
  /// false otherwise.
  ///
  /// For example, calling this method this way:
  ///     unsigned Op1 = 1, Op2 = CommuteAnyOperandIndex;
  ///     findCommutedOpIndices(MI, Op1, Op2);
  /// can be interpreted as a query asking to find an operand that would be
  /// commutable with the operand#1.
  virtual bool findCommutedOpIndices(const MachineInstr &MI,
                                     unsigned &SrcOpIdx1,
                                     unsigned &SrcOpIdx2) const;

  /// Returns true if the target has a preference on the operands order of
  /// the given machine instruction. And specify if \p Commute is required to
  /// get the desired operands order.
  virtual bool hasCommutePreference(MachineInstr &MI, bool &Commute) const {
    return false;
  }

  /// A pair composed of a register and a sub-register index.
  /// Used to give some type checking when modeling Reg:SubReg.
  struct RegSubRegPair {
    Register Reg;
    unsigned SubReg;

    RegSubRegPair(Register Reg = Register(), unsigned SubReg = 0)
        : Reg(Reg), SubReg(SubReg) {}

    bool operator==(const RegSubRegPair& P) const {
      return Reg == P.Reg && SubReg == P.SubReg;
    }
    bool operator!=(const RegSubRegPair& P) const {
      return !(*this == P);
    }
  };

  /// A pair composed of a pair of a register and a sub-register index,
  /// and another sub-register index.
  /// Used to give some type checking when modeling Reg:SubReg1, SubReg2.
  struct RegSubRegPairAndIdx : RegSubRegPair {
    unsigned SubIdx;

    RegSubRegPairAndIdx(Register Reg = Register(), unsigned SubReg = 0,
                        unsigned SubIdx = 0)
        : RegSubRegPair(Reg, SubReg), SubIdx(SubIdx) {}
  };

  /// Build the equivalent inputs of a REG_SEQUENCE for the given \p MI
  /// and \p DefIdx.
  /// \p [out] InputRegs of the equivalent REG_SEQUENCE. Each element of
  /// the list is modeled as <Reg:SubReg, SubIdx>. Operands with the undef
  /// flag are not added to this list.
  /// E.g., REG_SEQUENCE %1:sub1, sub0, %2, sub1 would produce
  /// two elements:
  /// - %1:sub1, sub0
  /// - %2<:0>, sub1
  ///
  /// \returns true if it is possible to build such an input sequence
  /// with the pair \p MI, \p DefIdx. False otherwise.
  ///
  /// \pre MI.isRegSequence() or MI.isRegSequenceLike().
  ///
  /// \note The generic implementation does not provide any support for
  /// MI.isRegSequenceLike(). In other words, one has to override
  /// getRegSequenceLikeInputs for target specific instructions.
  bool
  getRegSequenceInputs(const MachineInstr &MI, unsigned DefIdx,
                       SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const;

  /// Build the equivalent inputs of a EXTRACT_SUBREG for the given \p MI
  /// and \p DefIdx.
  /// \p [out] InputReg of the equivalent EXTRACT_SUBREG.
  /// E.g., EXTRACT_SUBREG %1:sub1, sub0, sub1 would produce:
  /// - %1:sub1, sub0
  ///
  /// \returns true if it is possible to build such an input sequence
  /// with the pair \p MI, \p DefIdx and the operand has no undef flag set.
  /// False otherwise.
  ///
  /// \pre MI.isExtractSubreg() or MI.isExtractSubregLike().
  ///
  /// \note The generic implementation does not provide any support for
  /// MI.isExtractSubregLike(). In other words, one has to override
  /// getExtractSubregLikeInputs for target specific instructions.
  bool getExtractSubregInputs(const MachineInstr &MI, unsigned DefIdx,
                              RegSubRegPairAndIdx &InputReg) const;

  /// Build the equivalent inputs of a INSERT_SUBREG for the given \p MI
  /// and \p DefIdx.
  /// \p [out] BaseReg and \p [out] InsertedReg contain
  /// the equivalent inputs of INSERT_SUBREG.
  /// E.g., INSERT_SUBREG %0:sub0, %1:sub1, sub3 would produce:
  /// - BaseReg: %0:sub0
  /// - InsertedReg: %1:sub1, sub3
  ///
  /// \returns true if it is possible to build such an input sequence
  /// with the pair \p MI, \p DefIdx and the operand has no undef flag set.
  /// False otherwise.
  ///
  /// \pre MI.isInsertSubreg() or MI.isInsertSubregLike().
  ///
  /// \note The generic implementation does not provide any support for
  /// MI.isInsertSubregLike(). In other words, one has to override
  /// getInsertSubregLikeInputs for target specific instructions.
  bool getInsertSubregInputs(const MachineInstr &MI, unsigned DefIdx,
                             RegSubRegPair &BaseReg,
                             RegSubRegPairAndIdx &InsertedReg) const;

  /// Return true if two machine instructions would produce identical values.
  /// By default, this is only true when the two instructions
  /// are deemed identical except for defs. If this function is called when the
  /// IR is still in SSA form, the caller can pass the MachineRegisterInfo for
  /// aggressive checks.
  virtual bool produceSameValue(const MachineInstr &MI0,
                                const MachineInstr &MI1,
                                const MachineRegisterInfo *MRI = nullptr) const;

  /// \returns true if a branch from an instruction with opcode \p BranchOpc
  ///  bytes is capable of jumping to a position \p BrOffset bytes away.
  virtual bool isBranchOffsetInRange(unsigned BranchOpc,
                                     int64_t BrOffset) const {
    llvm_unreachable("target did not implement");
  }

  /// \returns The block that branch instruction \p MI jumps to.
  virtual MachineBasicBlock *getBranchDestBlock(const MachineInstr &MI) const {
    llvm_unreachable("target did not implement");
  }

  /// Insert an unconditional indirect branch at the end of \p MBB to \p
  /// NewDestBB. Optionally, insert the clobbered register restoring in \p
  /// RestoreBB. \p BrOffset indicates the offset of \p NewDestBB relative to
  /// the offset of the position to insert the new branch.
  virtual void insertIndirectBranch(MachineBasicBlock &MBB,
                                    MachineBasicBlock &NewDestBB,
                                    MachineBasicBlock &RestoreBB,
                                    const DebugLoc &DL, int64_t BrOffset = 0,
                                    RegScavenger *RS = nullptr) const {
    llvm_unreachable("target did not implement");
  }

  /// Analyze the branching code at the end of MBB, returning
  /// true if it cannot be understood (e.g. it's a switch dispatch or isn't
  /// implemented for a target).  Upon success, this returns false and returns
  /// with the following information in various cases:
  ///
  /// 1. If this block ends with no branches (it just falls through to its succ)
  ///    just return false, leaving TBB/FBB null.
  /// 2. If this block ends with only an unconditional branch, it sets TBB to be
  ///    the destination block.
  /// 3. If this block ends with a conditional branch and it falls through to a
  ///    successor block, it sets TBB to be the branch destination block and a
  ///    list of operands that evaluate the condition. These operands can be
  ///    passed to other TargetInstrInfo methods to create new branches.
  /// 4. If this block ends with a conditional branch followed by an
  ///    unconditional branch, it returns the 'true' destination in TBB, the
  ///    'false' destination in FBB, and a list of operands that evaluate the
  ///    condition.  These operands can be passed to other TargetInstrInfo
  ///    methods to create new branches.
  ///
  /// Note that removeBranch and insertBranch must be implemented to support
  /// cases where this method returns success.
  ///
  /// If AllowModify is true, then this routine is allowed to modify the basic
  /// block (e.g. delete instructions after the unconditional branch).
  ///
  /// The CFG information in MBB.Predecessors and MBB.Successors must be valid
  /// before calling this function.
  virtual bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
                             MachineBasicBlock *&FBB,
                             SmallVectorImpl<MachineOperand> &Cond,
                             bool AllowModify = false) const {
    return true;
  }

  /// Represents a predicate at the MachineFunction level.  The control flow a
  /// MachineBranchPredicate represents is:
  ///
  ///  Reg = LHS `Predicate` RHS         == ConditionDef
  ///  if Reg then goto TrueDest else goto FalseDest
  ///
  struct MachineBranchPredicate {
    enum ComparePredicate {
      PRED_EQ,     // True if two values are equal
      PRED_NE,     // True if two values are not equal
      PRED_INVALID // Sentinel value
    };

    ComparePredicate Predicate = PRED_INVALID;
    MachineOperand LHS = MachineOperand::CreateImm(0);
    MachineOperand RHS = MachineOperand::CreateImm(0);
    MachineBasicBlock *TrueDest = nullptr;
    MachineBasicBlock *FalseDest = nullptr;
    MachineInstr *ConditionDef = nullptr;

    /// SingleUseCondition is true if ConditionDef is dead except for the
    /// branch(es) at the end of the basic block.
    ///
    bool SingleUseCondition = false;

    explicit MachineBranchPredicate() = default;
  };

  /// Analyze the branching code at the end of MBB and parse it into the
  /// MachineBranchPredicate structure if possible.  Returns false on success
  /// and true on failure.
  ///
  /// If AllowModify is true, then this routine is allowed to modify the basic
  /// block (e.g. delete instructions after the unconditional branch).
  ///
  virtual bool analyzeBranchPredicate(MachineBasicBlock &MBB,
                                      MachineBranchPredicate &MBP,
                                      bool AllowModify = false) const {
    return true;
  }

  /// Remove the branching code at the end of the specific MBB.
  /// This is only invoked in cases where analyzeBranch returns success. It
  /// returns the number of instructions that were removed.
  /// If \p BytesRemoved is non-null, report the change in code size from the
  /// removed instructions.
  virtual unsigned removeBranch(MachineBasicBlock &MBB,
                                int *BytesRemoved = nullptr) const {
    llvm_unreachable("Target didn't implement TargetInstrInfo::removeBranch!");
  }

  /// Insert branch code into the end of the specified MachineBasicBlock. The
  /// operands to this method are the same as those returned by analyzeBranch.
  /// This is only invoked in cases where analyzeBranch returns success. It
  /// returns the number of instructions inserted. If \p BytesAdded is non-null,
  /// report the change in code size from the added instructions.
  ///
  /// It is also invoked by tail merging to add unconditional branches in
  /// cases where analyzeBranch doesn't apply because there was no original
  /// branch to analyze.  At least this much must be implemented, else tail
  /// merging needs to be disabled.
  ///
  /// The CFG information in MBB.Predecessors and MBB.Successors must be valid
  /// before calling this function.
  virtual unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
                                MachineBasicBlock *FBB,
                                ArrayRef<MachineOperand> Cond,
                                const DebugLoc &DL,
                                int *BytesAdded = nullptr) const {
    llvm_unreachable("Target didn't implement TargetInstrInfo::insertBranch!");
  }

  unsigned insertUnconditionalBranch(MachineBasicBlock &MBB,
                                     MachineBasicBlock *DestBB,
                                     const DebugLoc &DL,
                                     int *BytesAdded = nullptr) const {
    return insertBranch(MBB, DestBB, nullptr, ArrayRef<MachineOperand>(), DL,
                        BytesAdded);
  }

  /// Object returned by analyzeLoopForPipelining. Allows software pipelining
  /// implementations to query attributes of the loop being pipelined and to
  /// apply target-specific updates to the loop once pipelining is complete.
  class PipelinerLoopInfo {
  public:
    virtual ~PipelinerLoopInfo();
    /// Return true if the given instruction should not be pipelined and should
    /// be ignored. An example could be a loop comparison, or induction variable
    /// update with no users being pipelined.
    virtual bool shouldIgnoreForPipelining(const MachineInstr *MI) const = 0;

    /// Return true if the proposed schedule should used.  Otherwise return
    /// false to not pipeline the loop. This function should be used to ensure
    /// that pipelined loops meet target-specific quality heuristics.
    virtual bool shouldUseSchedule(SwingSchedulerDAG &SSD, SMSchedule &SMS) {
      return true;
    }

    /// Create a condition to determine if the trip count of the loop is greater
    /// than TC, where TC is always one more than for the previous prologue or
    /// 0 if this is being called for the outermost prologue.
    ///
    /// If the trip count is statically known to be greater than TC, return
    /// true. If the trip count is statically known to be not greater than TC,
    /// return false. Otherwise return nullopt and fill out Cond with the test
    /// condition.
    ///
    /// Note: This hook is guaranteed to be called from the innermost to the
    /// outermost prologue of the loop being software pipelined.
    virtual std::optional<bool>
    createTripCountGreaterCondition(int TC, MachineBasicBlock &MBB,
                                    SmallVectorImpl<MachineOperand> &Cond) = 0;

    /// Modify the loop such that the trip count is
    /// OriginalTC + TripCountAdjust.
    virtual void adjustTripCount(int TripCountAdjust) = 0;

    /// Called when the loop's preheader has been modified to NewPreheader.
    virtual void setPreheader(MachineBasicBlock *NewPreheader) = 0;

    /// Called when the loop is being removed. Any instructions in the preheader
    /// should be removed.
    ///
    /// Once this function is called, no other functions on this object are
    /// valid; the loop has been removed.
    virtual void disposed() = 0;
  };

  /// Analyze loop L, which must be a single-basic-block loop, and if the
  /// conditions can be understood enough produce a PipelinerLoopInfo object.
  virtual std::unique_ptr<PipelinerLoopInfo>
  analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const {
    return nullptr;
  }

  /// Analyze the loop code, return true if it cannot be understood. Upon
  /// success, this function returns false and returns information about the
  /// induction variable and compare instruction used at the end.
  virtual bool analyzeLoop(MachineLoop &L, MachineInstr *&IndVarInst,
                           MachineInstr *&CmpInst) const {
    return true;
  }

  /// Generate code to reduce the loop iteration by one and check if the loop
  /// is finished.  Return the value/register of the new loop count.  We need
  /// this function when peeling off one or more iterations of a loop. This
  /// function assumes the nth iteration is peeled first.
  virtual unsigned reduceLoopCount(MachineBasicBlock &MBB,
                                   MachineBasicBlock &PreHeader,
                                   MachineInstr *IndVar, MachineInstr &Cmp,
                                   SmallVectorImpl<MachineOperand> &Cond,
                                   SmallVectorImpl<MachineInstr *> &PrevInsts,
                                   unsigned Iter, unsigned MaxIter) const {
    llvm_unreachable("Target didn't implement ReduceLoopCount");
  }

  /// Delete the instruction OldInst and everything after it, replacing it with
  /// an unconditional branch to NewDest. This is used by the tail merging pass.
  virtual void ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
                                       MachineBasicBlock *NewDest) const;

  /// Return true if it's legal to split the given basic
  /// block at the specified instruction (i.e. instruction would be the start
  /// of a new basic block).
  virtual bool isLegalToSplitMBBAt(MachineBasicBlock &MBB,
                                   MachineBasicBlock::iterator MBBI) const {
    return true;
  }

  /// Return true if it's profitable to predicate
  /// instructions with accumulated instruction latency of "NumCycles"
  /// of the specified basic block, where the probability of the instructions
  /// being executed is given by Probability, and Confidence is a measure
  /// of our confidence that it will be properly predicted.
  virtual bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles,
                                   unsigned ExtraPredCycles,
                                   BranchProbability Probability) const {
    return false;
  }

  /// Second variant of isProfitableToIfCvt. This one
  /// checks for the case where two basic blocks from true and false path
  /// of a if-then-else (diamond) are predicated on mutually exclusive
  /// predicates, where the probability of the true path being taken is given
  /// by Probability, and Confidence is a measure of our confidence that it
  /// will be properly predicted.
  virtual bool isProfitableToIfCvt(MachineBasicBlock &TMBB, unsigned NumTCycles,
                                   unsigned ExtraTCycles,
                                   MachineBasicBlock &FMBB, unsigned NumFCycles,
                                   unsigned ExtraFCycles,
                                   BranchProbability Probability) const {
    return false;
  }

  /// Return true if it's profitable for if-converter to duplicate instructions
  /// of specified accumulated instruction latencies in the specified MBB to
  /// enable if-conversion.
  /// The probability of the instructions being executed is given by
  /// Probability, and Confidence is a measure of our confidence that it
  /// will be properly predicted.
  virtual bool isProfitableToDupForIfCvt(MachineBasicBlock &MBB,
                                         unsigned NumCycles,
                                         BranchProbability Probability) const {
    return false;
  }

  /// Return the increase in code size needed to predicate a contiguous run of
  /// NumInsts instructions.
  virtual unsigned extraSizeToPredicateInstructions(const MachineFunction &MF,
                                                    unsigned NumInsts) const {
    return 0;
  }

  /// Return an estimate for the code size reduction (in bytes) which will be
  /// caused by removing the given branch instruction during if-conversion.
  virtual unsigned predictBranchSizeForIfCvt(MachineInstr &MI) const {
    return getInstSizeInBytes(MI);
  }

  /// Return true if it's profitable to unpredicate
  /// one side of a 'diamond', i.e. two sides of if-else predicated on mutually
  /// exclusive predicates.
  /// e.g.
  ///   subeq  r0, r1, #1
  ///   addne  r0, r1, #1
  /// =>
  ///   sub    r0, r1, #1
  ///   addne  r0, r1, #1
  ///
  /// This may be profitable is conditional instructions are always executed.
  virtual bool isProfitableToUnpredicate(MachineBasicBlock &TMBB,
                                         MachineBasicBlock &FMBB) const {
    return false;
  }

  /// Return true if it is possible to insert a select
  /// instruction that chooses between TrueReg and FalseReg based on the
  /// condition code in Cond.
  ///
  /// When successful, also return the latency in cycles from TrueReg,
  /// FalseReg, and Cond to the destination register. In most cases, a select
  /// instruction will be 1 cycle, so CondCycles = TrueCycles = FalseCycles = 1
  ///
  /// Some x86 implementations have 2-cycle cmov instructions.
  ///
  /// @param MBB         Block where select instruction would be inserted.
  /// @param Cond        Condition returned by analyzeBranch.
  /// @param DstReg      Virtual dest register that the result should write to.
  /// @param TrueReg     Virtual register to select when Cond is true.
  /// @param FalseReg    Virtual register to select when Cond is false.
  /// @param CondCycles  Latency from Cond+Branch to select output.
  /// @param TrueCycles  Latency from TrueReg to select output.
  /// @param FalseCycles Latency from FalseReg to select output.
  virtual bool canInsertSelect(const MachineBasicBlock &MBB,
                               ArrayRef<MachineOperand> Cond, Register DstReg,
                               Register TrueReg, Register FalseReg,
                               int &CondCycles, int &TrueCycles,
                               int &FalseCycles) const {
    return false;
  }

  /// Insert a select instruction into MBB before I that will copy TrueReg to
  /// DstReg when Cond is true, and FalseReg to DstReg when Cond is false.
  ///
  /// This function can only be called after canInsertSelect() returned true.
  /// The condition in Cond comes from analyzeBranch, and it can be assumed
  /// that the same flags or registers required by Cond are available at the
  /// insertion point.
  ///
  /// @param MBB      Block where select instruction should be inserted.
  /// @param I        Insertion point.
  /// @param DL       Source location for debugging.
  /// @param DstReg   Virtual register to be defined by select instruction.
  /// @param Cond     Condition as computed by analyzeBranch.
  /// @param TrueReg  Virtual register to copy when Cond is true.
  /// @param FalseReg Virtual register to copy when Cons is false.
  virtual void insertSelect(MachineBasicBlock &MBB,
                            MachineBasicBlock::iterator I, const DebugLoc &DL,
                            Register DstReg, ArrayRef<MachineOperand> Cond,
                            Register TrueReg, Register FalseReg) const {
    llvm_unreachable("Target didn't implement TargetInstrInfo::insertSelect!");
  }

  /// Analyze the given select instruction, returning true if
  /// it cannot be understood. It is assumed that MI->isSelect() is true.
  ///
  /// When successful, return the controlling condition and the operands that
  /// determine the true and false result values.
  ///
  ///   Result = SELECT Cond, TrueOp, FalseOp
  ///
  /// Some targets can optimize select instructions, for example by predicating
  /// the instruction defining one of the operands. Such targets should set
  /// Optimizable.
  ///
  /// @param         MI Select instruction to analyze.
  /// @param Cond    Condition controlling the select.
  /// @param TrueOp  Operand number of the value selected when Cond is true.
  /// @param FalseOp Operand number of the value selected when Cond is false.
  /// @param Optimizable Returned as true if MI is optimizable.
  /// @returns False on success.
  virtual bool analyzeSelect(const MachineInstr &MI,
                             SmallVectorImpl<MachineOperand> &Cond,
                             unsigned &TrueOp, unsigned &FalseOp,
                             bool &Optimizable) const {
    assert(MI.getDesc().isSelect() && "MI must be a select instruction");
    return true;
  }

  /// Given a select instruction that was understood by
  /// analyzeSelect and returned Optimizable = true, attempt to optimize MI by
  /// merging it with one of its operands. Returns NULL on failure.
  ///
  /// When successful, returns the new select instruction. The client is
  /// responsible for deleting MI.
  ///
  /// If both sides of the select can be optimized, PreferFalse is used to pick
  /// a side.
  ///
  /// @param MI          Optimizable select instruction.
  /// @param NewMIs     Set that record all MIs in the basic block up to \p
  /// MI. Has to be updated with any newly created MI or deleted ones.
  /// @param PreferFalse Try to optimize FalseOp instead of TrueOp.
  /// @returns Optimized instruction or NULL.
  virtual MachineInstr *optimizeSelect(MachineInstr &MI,
                                       SmallPtrSetImpl<MachineInstr *> &NewMIs,
                                       bool PreferFalse = false) const {
    // This function must be implemented if Optimizable is ever set.
    llvm_unreachable("Target must implement TargetInstrInfo::optimizeSelect!");
  }

  /// Emit instructions to copy a pair of physical registers.
  ///
  /// This function should support copies within any legal register class as
  /// well as any cross-class copies created during instruction selection.
  ///
  /// The source and destination registers may overlap, which may require a
  /// careful implementation when multiple copy instructions are required for
  /// large registers. See for example the ARM target.
  virtual void copyPhysReg(MachineBasicBlock &MBB,
                           MachineBasicBlock::iterator MI, const DebugLoc &DL,
                           MCRegister DestReg, MCRegister SrcReg,
                           bool KillSrc) const {
    llvm_unreachable("Target didn't implement TargetInstrInfo::copyPhysReg!");
  }

  /// Allow targets to tell MachineVerifier whether a specific register
  /// MachineOperand can be used as part of PC-relative addressing.
  /// PC-relative addressing modes in many CISC architectures contain
  /// (non-PC) registers as offsets or scaling values, which inherently
  /// tags the corresponding MachineOperand with OPERAND_PCREL.
  ///
  /// @param MO The MachineOperand in question. MO.isReg() should always
  /// be true.
  /// @return Whether this operand is allowed to be used PC-relatively.
  virtual bool isPCRelRegisterOperandLegal(const MachineOperand &MO) const {
    return false;
  }

  /// Return an index for MachineJumpTableInfo if \p insn is an indirect jump
  /// using a jump table, otherwise -1.
  virtual int getJumpTableIndex(const MachineInstr &MI) const { return -1; }

protected:
  /// Target-dependent implementation for IsCopyInstr.
  /// If the specific machine instruction is a instruction that moves/copies
  /// value from one register to another register return destination and source
  /// registers as machine operands.
  virtual std::optional<DestSourcePair>
  isCopyInstrImpl(const MachineInstr &MI) const {
    return std::nullopt;
  }

  /// Return true if the given terminator MI is not expected to spill. This
  /// sets the live interval as not spillable and adjusts phi node lowering to
  /// not introduce copies after the terminator. Use with care, these are
  /// currently used for hardware loop intrinsics in very controlled situations,
  /// created prior to registry allocation in loops that only have single phi
  /// users for the terminators value. They may run out of registers if not used
  /// carefully.
  virtual bool isUnspillableTerminatorImpl(const MachineInstr *MI) const {
    return false;
  }

public:
  /// If the specific machine instruction is a instruction that moves/copies
  /// value from one register to another register return destination and source
  /// registers as machine operands.
  /// For COPY-instruction the method naturally returns destination and source
  /// registers as machine operands, for all other instructions the method calls
  /// target-dependent implementation.
  std::optional<DestSourcePair> isCopyInstr(const MachineInstr &MI) const {
    if (MI.isCopy()) {
      return DestSourcePair{MI.getOperand(0), MI.getOperand(1)};
    }
    return isCopyInstrImpl(MI);
  }

  /// If the specific machine instruction is an instruction that adds an
  /// immediate value and a physical register, and stores the result in
  /// the given physical register \c Reg, return a pair of the source
  /// register and the offset which has been added.
  virtual std::optional<RegImmPair> isAddImmediate(const MachineInstr &MI,
                                                   Register Reg) const {
    return std::nullopt;
  }

  /// Returns true if MI is an instruction that defines Reg to have a constant
  /// value and the value is recorded in ImmVal. The ImmVal is a result that
  /// should be interpreted as modulo size of Reg.
  virtual bool getConstValDefinedInReg(const MachineInstr &MI,
                                       const Register Reg,
                                       int64_t &ImmVal) const {
    return false;
  }

  /// Store the specified register of the given register class to the specified
  /// stack frame index. The store instruction is to be added to the given
  /// machine basic block before the specified machine instruction. If isKill
  /// is true, the register operand is the last use and must be marked kill. If
  /// \p SrcReg is being directly spilled as part of assigning a virtual
  /// register, \p VReg is the register being assigned. This additional register
  /// argument is needed for certain targets when invoked from RegAllocFast to
  /// map the spilled physical register to its virtual register. A null register
  /// can be passed elsewhere.
  virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
                                   MachineBasicBlock::iterator MI,
                                   Register SrcReg, bool isKill, int FrameIndex,
                                   const TargetRegisterClass *RC,
                                   const TargetRegisterInfo *TRI,
                                   Register VReg) const {
    llvm_unreachable("Target didn't implement "
                     "TargetInstrInfo::storeRegToStackSlot!");
  }

  /// Load the specified register of the given register class from the specified
  /// stack frame index. The load instruction is to be added to the given
  /// machine basic block before the specified machine instruction. If \p
  /// DestReg is being directly reloaded as part of assigning a virtual
  /// register, \p VReg is the register being assigned. This additional register
  /// argument is needed for certain targets when invoked from RegAllocFast to
  /// map the loaded physical register to its virtual register. A null register
  /// can be passed elsewhere.
  virtual void loadRegFromStackSlot(MachineBasicBlock &MBB,
                                    MachineBasicBlock::iterator MI,
                                    Register DestReg, int FrameIndex,
                                    const TargetRegisterClass *RC,
                                    const TargetRegisterInfo *TRI,
                                    Register VReg) const {
    llvm_unreachable("Target didn't implement "
                     "TargetInstrInfo::loadRegFromStackSlot!");
  }

  /// This function is called for all pseudo instructions
  /// that remain after register allocation. Many pseudo instructions are
  /// created to help register allocation. This is the place to convert them
  /// into real instructions. The target can edit MI in place, or it can insert
  /// new instructions and erase MI. The function should return true if
  /// anything was changed.
  virtual bool expandPostRAPseudo(MachineInstr &MI) const { return false; }

  /// Check whether the target can fold a load that feeds a subreg operand
  /// (or a subreg operand that feeds a store).
  /// For example, X86 may want to return true if it can fold
  /// movl (%esp), %eax
  /// subb, %al, ...
  /// Into:
  /// subb (%esp), ...
  ///
  /// Ideally, we'd like the target implementation of foldMemoryOperand() to
  /// reject subregs - but since this behavior used to be enforced in the
  /// target-independent code, moving this responsibility to the targets
  /// has the potential of causing nasty silent breakage in out-of-tree targets.
  virtual bool isSubregFoldable() const { return false; }

  /// For a patchpoint, stackmap, or statepoint intrinsic, return the range of
  /// operands which can't be folded into stack references. Operands outside
  /// of the range are most likely foldable but it is not guaranteed.
  /// These instructions are unique in that stack references for some operands
  /// have the same execution cost (e.g. none) as the unfolded register forms.
  /// The ranged return is guaranteed to include all operands which can't be
  /// folded at zero cost.
  virtual std::pair<unsigned, unsigned>
  getPatchpointUnfoldableRange(const MachineInstr &MI) const;

  /// Attempt to fold a load or store of the specified stack
  /// slot into the specified machine instruction for the specified operand(s).
  /// If this is possible, a new instruction is returned with the specified
  /// operand folded, otherwise NULL is returned.
  /// The new instruction is inserted before MI, and the client is responsible
  /// for removing the old instruction.
  /// If VRM is passed, the assigned physregs can be inspected by target to
  /// decide on using an opcode (note that those assignments can still change).
  MachineInstr *foldMemoryOperand(MachineInstr &MI, ArrayRef<unsigned> Ops,
                                  int FI,
                                  LiveIntervals *LIS = nullptr,
                                  VirtRegMap *VRM = nullptr) const;

  /// Same as the previous version except it allows folding of any load and
  /// store from / to any address, not just from a specific stack slot.
  MachineInstr *foldMemoryOperand(MachineInstr &MI, ArrayRef<unsigned> Ops,
                                  MachineInstr &LoadMI,
                                  LiveIntervals *LIS = nullptr) const;

  /// This function defines the logic to lower COPY instruction to
  /// target specific instruction(s).
  void lowerCopy(MachineInstr *MI, const TargetRegisterInfo *TRI) const;

  /// Return true when there is potentially a faster code sequence
  /// for an instruction chain ending in \p Root. All potential patterns are
  /// returned in the \p Pattern vector. Pattern should be sorted in priority
  /// order since the pattern evaluator stops checking as soon as it finds a
  /// faster sequence.
  /// \param Root - Instruction that could be combined with one of its operands
  /// \param Patterns - Vector of possible combination patterns
  virtual bool
  getMachineCombinerPatterns(MachineInstr &Root,
                             SmallVectorImpl<MachineCombinerPattern> &Patterns,
                             bool DoRegPressureReduce) const;

  /// Return true if target supports reassociation of instructions in machine
  /// combiner pass to reduce register pressure for a given BB.
  virtual bool
  shouldReduceRegisterPressure(const MachineBasicBlock *MBB,
                               const RegisterClassInfo *RegClassInfo) const {
    return false;
  }

  /// Fix up the placeholder we may add in genAlternativeCodeSequence().
  virtual void
  finalizeInsInstrs(MachineInstr &Root, MachineCombinerPattern &P,
                    SmallVectorImpl<MachineInstr *> &InsInstrs) const {}

  /// Return true when a code sequence can improve throughput. It
  /// should be called only for instructions in loops.
  /// \param Pattern - combiner pattern
  virtual bool isThroughputPattern(MachineCombinerPattern Pattern) const;

  /// Return true if the input \P Inst is part of a chain of dependent ops
  /// that are suitable for reassociation, otherwise return false.
  /// If the instruction's operands must be commuted to have a previous
  /// instruction of the same type define the first source operand, \P Commuted
  /// will be set to true.
  bool isReassociationCandidate(const MachineInstr &Inst, bool &Commuted) const;

  /// Return true when \P Inst is both associative and commutative. If \P Invert
  /// is true, then the inverse of \P Inst operation must be tested.
  virtual bool isAssociativeAndCommutative(const MachineInstr &Inst,
                                           bool Invert = false) const {
    return false;
  }

  /// Return the inverse operation opcode if it exists for \P Opcode (e.g. add
  /// for sub and vice versa).
  virtual std::optional<unsigned> getInverseOpcode(unsigned Opcode) const {
    return std::nullopt;
  }

  /// Return true when \P Opcode1 or its inversion is equal to \P Opcode2.
  bool areOpcodesEqualOrInverse(unsigned Opcode1, unsigned Opcode2) const;

  /// Return true when \P Inst has reassociable operands in the same \P MBB.
  virtual bool hasReassociableOperands(const MachineInstr &Inst,
                                       const MachineBasicBlock *MBB) const;

  /// Return true when \P Inst has reassociable sibling.
  virtual bool hasReassociableSibling(const MachineInstr &Inst,
                                      bool &Commuted) const;

  /// When getMachineCombinerPatterns() finds patterns, this function generates
  /// the instructions that could replace the original code sequence. The client
  /// has to decide whether the actual replacement is beneficial or not.
  /// \param Root - Instruction that could be combined with one of its operands
  /// \param Pattern - Combination pattern for Root
  /// \param InsInstrs - Vector of new instructions that implement P
  /// \param DelInstrs - Old instructions, including Root, that could be
  /// replaced by InsInstr
  /// \param InstIdxForVirtReg - map of virtual register to instruction in
  /// InsInstr that defines it
  virtual void genAlternativeCodeSequence(
      MachineInstr &Root, MachineCombinerPattern Pattern,
      SmallVectorImpl<MachineInstr *> &InsInstrs,
      SmallVectorImpl<MachineInstr *> &DelInstrs,
      DenseMap<unsigned, unsigned> &InstIdxForVirtReg) const;

  /// When calculate the latency of the root instruction, accumulate the
  /// latency of the sequence to the root latency.
  /// \param Root - Instruction that could be combined with one of its operands
  virtual bool accumulateInstrSeqToRootLatency(MachineInstr &Root) const {
    return true;
  }

  /// Attempt to reassociate \P Root and \P Prev according to \P Pattern to
  /// reduce critical path length.
  void reassociateOps(MachineInstr &Root, MachineInstr &Prev,
                      MachineCombinerPattern Pattern,
                      SmallVectorImpl<MachineInstr *> &InsInstrs,
                      SmallVectorImpl<MachineInstr *> &DelInstrs,
                      DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const;

  /// Reassociation of some instructions requires inverse operations (e.g.
  /// (X + A) - Y => (X - Y) + A). This method returns a pair of new opcodes
  /// (new root opcode, new prev opcode) that must be used to reassociate \P
  /// Root and \P Prev accoring to \P Pattern.
  std::pair<unsigned, unsigned>
  getReassociationOpcodes(MachineCombinerPattern Pattern,
                          const MachineInstr &Root,
                          const MachineInstr &Prev) const;

  /// The limit on resource length extension we accept in MachineCombiner Pass.
  virtual int getExtendResourceLenLimit() const { return 0; }

  /// This is an architecture-specific helper function of reassociateOps.
  /// Set special operand attributes for new instructions after reassociation.
  virtual void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2,
                                     MachineInstr &NewMI1,
                                     MachineInstr &NewMI2) const {}

  /// Return true when a target supports MachineCombiner.
  virtual bool useMachineCombiner() const { return false; }

  /// Return a strategy that MachineCombiner must use when creating traces.
  virtual MachineTraceStrategy getMachineCombinerTraceStrategy() const;

  /// Return true if the given SDNode can be copied during scheduling
  /// even if it has glue.
  virtual bool canCopyGluedNodeDuringSchedule(SDNode *N) const { return false; }

protected:
  /// Target-dependent implementation for foldMemoryOperand.
  /// Target-independent code in foldMemoryOperand will
  /// take care of adding a MachineMemOperand to the newly created instruction.
  /// The instruction and any auxiliary instructions necessary will be inserted
  /// at InsertPt.
  virtual MachineInstr *
  foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI,
                        ArrayRef<unsigned> Ops,
                        MachineBasicBlock::iterator InsertPt, int FrameIndex,
                        LiveIntervals *LIS = nullptr,
                        VirtRegMap *VRM = nullptr) const {
    return nullptr;
  }

  /// Target-dependent implementation for foldMemoryOperand.
  /// Target-independent code in foldMemoryOperand will
  /// take care of adding a MachineMemOperand to the newly created instruction.
  /// The instruction and any auxiliary instructions necessary will be inserted
  /// at InsertPt.
  virtual MachineInstr *foldMemoryOperandImpl(
      MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
      MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI,
      LiveIntervals *LIS = nullptr) const {
    return nullptr;
  }

  /// Target-dependent implementation of getRegSequenceInputs.
  ///
  /// \returns true if it is possible to build the equivalent
  /// REG_SEQUENCE inputs with the pair \p MI, \p DefIdx. False otherwise.
  ///
  /// \pre MI.isRegSequenceLike().
  ///
  /// \see TargetInstrInfo::getRegSequenceInputs.
  virtual bool getRegSequenceLikeInputs(
      const MachineInstr &MI, unsigned DefIdx,
      SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const {
    return false;
  }

  /// Target-dependent implementation of getExtractSubregInputs.
  ///
  /// \returns true if it is possible to build the equivalent
  /// EXTRACT_SUBREG inputs with the pair \p MI, \p DefIdx. False otherwise.
  ///
  /// \pre MI.isExtractSubregLike().
  ///
  /// \see TargetInstrInfo::getExtractSubregInputs.
  virtual bool getExtractSubregLikeInputs(const MachineInstr &MI,
                                          unsigned DefIdx,
                                          RegSubRegPairAndIdx &InputReg) const {
    return false;
  }

  /// Target-dependent implementation of getInsertSubregInputs.
  ///
  /// \returns true if it is possible to build the equivalent
  /// INSERT_SUBREG inputs with the pair \p MI, \p DefIdx. False otherwise.
  ///
  /// \pre MI.isInsertSubregLike().
  ///
  /// \see TargetInstrInfo::getInsertSubregInputs.
  virtual bool
  getInsertSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx,
                            RegSubRegPair &BaseReg,
                            RegSubRegPairAndIdx &InsertedReg) const {
    return false;
  }

public:
  /// unfoldMemoryOperand - Separate a single instruction which folded a load or
  /// a store or a load and a store into two or more instruction. If this is
  /// possible, returns true as well as the new instructions by reference.
  virtual bool
  unfoldMemoryOperand(MachineFunction &MF, MachineInstr &MI, unsigned Reg,
                      bool UnfoldLoad, bool UnfoldStore,
                      SmallVectorImpl<MachineInstr *> &NewMIs) const {
    return false;
  }

  virtual bool unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
                                   SmallVectorImpl<SDNode *> &NewNodes) const {
    return false;
  }

  /// Returns the opcode of the would be new
  /// instruction after load / store are unfolded from an instruction of the
  /// specified opcode. It returns zero if the specified unfolding is not
  /// possible. If LoadRegIndex is non-null, it is filled in with the operand
  /// index of the operand which will hold the register holding the loaded
  /// value.
  virtual unsigned
  getOpcodeAfterMemoryUnfold(unsigned Opc, bool UnfoldLoad, bool UnfoldStore,
                             unsigned *LoadRegIndex = nullptr) const {
    return 0;
  }

  /// This is used by the pre-regalloc scheduler to determine if two loads are
  /// loading from the same base address. It should only return true if the base
  /// pointers are the same and the only differences between the two addresses
  /// are the offset. It also returns the offsets by reference.
  virtual bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
                                       int64_t &Offset1,
                                       int64_t &Offset2) const {
    return false;
  }

  /// This is a used by the pre-regalloc scheduler to determine (in conjunction
  /// with areLoadsFromSameBasePtr) if two loads should be scheduled together.
  /// On some targets if two loads are loading from
  /// addresses in the same cache line, it's better if they are scheduled
  /// together. This function takes two integers that represent the load offsets
  /// from the common base address. It returns true if it decides it's desirable
  /// to schedule the two loads together. "NumLoads" is the number of loads that
  /// have already been scheduled after Load1.
  virtual bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
                                       int64_t Offset1, int64_t Offset2,
                                       unsigned NumLoads) const {
    return false;
  }

  /// Get the base operand and byte offset of an instruction that reads/writes
  /// memory. This is a convenience function for callers that are only prepared
  /// to handle a single base operand.
  bool getMemOperandWithOffset(const MachineInstr &MI,
                               const MachineOperand *&BaseOp, int64_t &Offset,
                               bool &OffsetIsScalable,
                               const TargetRegisterInfo *TRI) const;

  /// Get zero or more base operands and the byte offset of an instruction that
  /// reads/writes memory. Note that there may be zero base operands if the
  /// instruction accesses a constant address.
  /// It returns false if MI does not read/write memory.
  /// It returns false if base operands and offset could not be determined.
  /// It is not guaranteed to always recognize base operands and offsets in all
  /// cases.
  virtual bool getMemOperandsWithOffsetWidth(
      const MachineInstr &MI, SmallVectorImpl<const MachineOperand *> &BaseOps,
      int64_t &Offset, bool &OffsetIsScalable, unsigned &Width,
      const TargetRegisterInfo *TRI) const {
    return false;
  }

  /// Return true if the instruction contains a base register and offset. If
  /// true, the function also sets the operand position in the instruction
  /// for the base register and offset.
  virtual bool getBaseAndOffsetPosition(const MachineInstr &MI,
                                        unsigned &BasePos,
                                        unsigned &OffsetPos) const {
    return false;
  }

  /// Target dependent implementation to get the values constituting the address
  /// MachineInstr that is accessing memory. These values are returned as a
  /// struct ExtAddrMode which contains all relevant information to make up the
  /// address.
  virtual std::optional<ExtAddrMode>
  getAddrModeFromMemoryOp(const MachineInstr &MemI,
                          const TargetRegisterInfo *TRI) const {
    return std::nullopt;
  }

  /// Returns true if MI's Def is NullValueReg, and the MI
  /// does not change the Zero value. i.e. cases such as rax = shr rax, X where
  /// NullValueReg = rax. Note that if the NullValueReg is non-zero, this
  /// function can return true even if becomes zero. Specifically cases such as
  /// NullValueReg = shl NullValueReg, 63.
  virtual bool preservesZeroValueInReg(const MachineInstr *MI,
                                       const Register NullValueReg,
                                       const TargetRegisterInfo *TRI) const {
    return false;
  }

  /// If the instruction is an increment of a constant value, return the amount.
  virtual bool getIncrementValue(const MachineInstr &MI, int &Value) const {
    return false;
  }

  /// Returns true if the two given memory operations should be scheduled
  /// adjacent. Note that you have to add:
  ///   DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
  /// or
  ///   DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
  /// to TargetPassConfig::createMachineScheduler() to have an effect.
  ///
  /// \p BaseOps1 and \p BaseOps2 are memory operands of two memory operations.
  /// \p NumLoads is the number of loads that will be in the cluster if this
  /// hook returns true.
  /// \p NumBytes is the number of bytes that will be loaded from all the
  /// clustered loads if this hook returns true.
  virtual bool shouldClusterMemOps(ArrayRef<const MachineOperand *> BaseOps1,
                                   ArrayRef<const MachineOperand *> BaseOps2,
                                   unsigned NumLoads, unsigned NumBytes) const {
    llvm_unreachable("target did not implement shouldClusterMemOps()");
  }

  /// Reverses the branch condition of the specified condition list,
  /// returning false on success and true if it cannot be reversed.
  virtual bool
  reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
    return true;
  }

  /// Insert a noop into the instruction stream at the specified point.
  virtual void insertNoop(MachineBasicBlock &MBB,
                          MachineBasicBlock::iterator MI) const;

  /// Insert noops into the instruction stream at the specified point.
  virtual void insertNoops(MachineBasicBlock &MBB,
                           MachineBasicBlock::iterator MI,
                           unsigned Quantity) const;

  /// Return the noop instruction to use for a noop.
  virtual MCInst getNop() const;

  /// Return true for post-incremented instructions.
  virtual bool isPostIncrement(const MachineInstr &MI) const { return false; }

  /// Returns true if the instruction is already predicated.
  virtual bool isPredicated(const MachineInstr &MI) const { return false; }

  /// Assumes the instruction is already predicated and returns true if the
  /// instruction can be predicated again.
  virtual bool canPredicatePredicatedInstr(const MachineInstr &MI) const {
    assert(isPredicated(MI) && "Instruction is not predicated");
    return false;
  }

  // Returns a MIRPrinter comment for this machine operand.
  virtual std::string
  createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op,
                          unsigned OpIdx, const TargetRegisterInfo *TRI) const;

  /// Returns true if the instruction is a
  /// terminator instruction that has not been predicated.
  bool isUnpredicatedTerminator(const MachineInstr &MI) const;

  /// Returns true if MI is an unconditional tail call.
  virtual bool isUnconditionalTailCall(const MachineInstr &MI) const {
    return false;
  }

  /// Returns true if the tail call can be made conditional on BranchCond.
  virtual bool canMakeTailCallConditional(SmallVectorImpl<MachineOperand> &Cond,
                                          const MachineInstr &TailCall) const {
    return false;
  }

  /// Replace the conditional branch in MBB with a conditional tail call.
  virtual void replaceBranchWithTailCall(MachineBasicBlock &MBB,
                                         SmallVectorImpl<MachineOperand> &Cond,
                                         const MachineInstr &TailCall) const {
    llvm_unreachable("Target didn't implement replaceBranchWithTailCall!");
  }

  /// Convert the instruction into a predicated instruction.
  /// It returns true if the operation was successful.
  virtual bool PredicateInstruction(MachineInstr &MI,
                                    ArrayRef<MachineOperand> Pred) const;

  /// Returns true if the first specified predicate
  /// subsumes the second, e.g. GE subsumes GT.
  virtual bool SubsumesPredicate(ArrayRef<MachineOperand> Pred1,
                                 ArrayRef<MachineOperand> Pred2) const {
    return false;
  }

  /// If the specified instruction defines any predicate
  /// or condition code register(s) used for predication, returns true as well
  /// as the definition predicate(s) by reference.
  /// SkipDead should be set to false at any point that dead
  /// predicate instructions should be considered as being defined.
  /// A dead predicate instruction is one that is guaranteed to be removed
  /// after a call to PredicateInstruction.
  virtual bool ClobbersPredicate(MachineInstr &MI,
                                 std::vector<MachineOperand> &Pred,
                                 bool SkipDead) const {
    return false;
  }

  /// Return true if the specified instruction can be predicated.
  /// By default, this returns true for every instruction with a
  /// PredicateOperand.
  virtual bool isPredicable(const MachineInstr &MI) const {
    return MI.getDesc().isPredicable();
  }

  /// Return true if it's safe to move a machine
  /// instruction that defines the specified register class.
  virtual bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
    return true;
  }

  /// Test if the given instruction should be considered a scheduling boundary.
  /// This primarily includes labels and terminators.
  virtual bool isSchedulingBoundary(const MachineInstr &MI,
                                    const MachineBasicBlock *MBB,
                                    const MachineFunction &MF) const;

  /// Measure the specified inline asm to determine an approximation of its
  /// length.
  virtual unsigned getInlineAsmLength(
    const char *Str, const MCAsmInfo &MAI,
    const TargetSubtargetInfo *STI = nullptr) const;

  /// Allocate and return a hazard recognizer to use for this target when
  /// scheduling the machine instructions before register allocation.
  virtual ScheduleHazardRecognizer *
  CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI,
                               const ScheduleDAG *DAG) const;

  /// Allocate and return a hazard recognizer to use for this target when
  /// scheduling the machine instructions before register allocation.
  virtual ScheduleHazardRecognizer *
  CreateTargetMIHazardRecognizer(const InstrItineraryData *,
                                 const ScheduleDAGMI *DAG) const;

  /// Allocate and return a hazard recognizer to use for this target when
  /// scheduling the machine instructions after register allocation.
  virtual ScheduleHazardRecognizer *
  CreateTargetPostRAHazardRecognizer(const InstrItineraryData *,
                                     const ScheduleDAG *DAG) const;

  /// Allocate and return a hazard recognizer to use for by non-scheduling
  /// passes.
  virtual ScheduleHazardRecognizer *
  CreateTargetPostRAHazardRecognizer(const MachineFunction &MF) const {
    return nullptr;
  }

  /// Provide a global flag for disabling the PreRA hazard recognizer that
  /// targets may choose to honor.
  bool usePreRAHazardRecognizer() const;

  /// For a comparison instruction, return the source registers
  /// in SrcReg and SrcReg2 if having two register operands, and the value it
  /// compares against in CmpValue. Return true if the comparison instruction
  /// can be analyzed.
  virtual bool analyzeCompare(const MachineInstr &MI, Register &SrcReg,
                              Register &SrcReg2, int64_t &Mask,
                              int64_t &Value) const {
    return false;
  }

  /// See if the comparison instruction can be converted
  /// into something more efficient. E.g., on ARM most instructions can set the
  /// flags register, obviating the need for a separate CMP.
  virtual bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
                                    Register SrcReg2, int64_t Mask,
                                    int64_t Value,
                                    const MachineRegisterInfo *MRI) const {
    return false;
  }
  virtual bool optimizeCondBranch(MachineInstr &MI) const { return false; }

  /// Try to remove the load by folding it to a register operand at the use.
  /// We fold the load instructions if and only if the
  /// def and use are in the same BB. We only look at one load and see
  /// whether it can be folded into MI. FoldAsLoadDefReg is the virtual register
  /// defined by the load we are trying to fold. DefMI returns the machine
  /// instruction that defines FoldAsLoadDefReg, and the function returns
  /// the machine instruction generated due to folding.
  virtual MachineInstr *optimizeLoadInstr(MachineInstr &MI,
                                          const MachineRegisterInfo *MRI,
                                          Register &FoldAsLoadDefReg,
                                          MachineInstr *&DefMI) const {
    return nullptr;
  }

  /// 'Reg' is known to be defined by a move immediate instruction,
  /// try to fold the immediate into the use instruction.
  /// If MRI->hasOneNonDBGUse(Reg) is true, and this function returns true,
  /// then the caller may assume that DefMI has been erased from its parent
  /// block. The caller may assume that it will not be erased by this
  /// function otherwise.
  virtual bool FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
                             Register Reg, MachineRegisterInfo *MRI) const {
    return false;
  }

  /// Return the number of u-operations the given machine
  /// instruction will be decoded to on the target cpu. The itinerary's
  /// IssueWidth is the number of microops that can be dispatched each
  /// cycle. An instruction with zero microops takes no dispatch resources.
  virtual unsigned getNumMicroOps(const InstrItineraryData *ItinData,
                                  const MachineInstr &MI) const;

  /// Return true for pseudo instructions that don't consume any
  /// machine resources in their current form. These are common cases that the
  /// scheduler should consider free, rather than conservatively handling them
  /// as instructions with no itinerary.
  bool isZeroCost(unsigned Opcode) const {
    return Opcode <= TargetOpcode::COPY;
  }

  virtual int getOperandLatency(const InstrItineraryData *ItinData,
                                SDNode *DefNode, unsigned DefIdx,
                                SDNode *UseNode, unsigned UseIdx) const;

  /// Compute and return the use operand latency of a given pair of def and use.
  /// In most cases, the static scheduling itinerary was enough to determine the
  /// operand latency. But it may not be possible for instructions with variable
  /// number of defs / uses.
  ///
  /// This is a raw interface to the itinerary that may be directly overridden
  /// by a target. Use computeOperandLatency to get the best estimate of
  /// latency.
  virtual int getOperandLatency(const InstrItineraryData *ItinData,
                                const MachineInstr &DefMI, unsigned DefIdx,
                                const MachineInstr &UseMI,
                                unsigned UseIdx) const;

  /// Compute the instruction latency of a given instruction.
  /// If the instruction has higher cost when predicated, it's returned via
  /// PredCost.
  virtual unsigned getInstrLatency(const InstrItineraryData *ItinData,
                                   const MachineInstr &MI,
                                   unsigned *PredCost = nullptr) const;

  virtual unsigned getPredicationCost(const MachineInstr &MI) const;

  virtual int getInstrLatency(const InstrItineraryData *ItinData,
                              SDNode *Node) const;

  /// Return the default expected latency for a def based on its opcode.
  unsigned defaultDefLatency(const MCSchedModel &SchedModel,
                             const MachineInstr &DefMI) const;

  /// Return true if this opcode has high latency to its result.
  virtual bool isHighLatencyDef(int opc) const { return false; }

  /// Compute operand latency between a def of 'Reg'
  /// and a use in the current loop. Return true if the target considered
  /// it 'high'. This is used by optimization passes such as machine LICM to
  /// determine whether it makes sense to hoist an instruction out even in a
  /// high register pressure situation.
  virtual bool hasHighOperandLatency(const TargetSchedModel &SchedModel,
                                     const MachineRegisterInfo *MRI,
                                     const MachineInstr &DefMI, unsigned DefIdx,
                                     const MachineInstr &UseMI,
                                     unsigned UseIdx) const {
    return false;
  }

  /// Compute operand latency of a def of 'Reg'. Return true
  /// if the target considered it 'low'.
  virtual bool hasLowDefLatency(const TargetSchedModel &SchedModel,
                                const MachineInstr &DefMI,
                                unsigned DefIdx) const;

  /// Perform target-specific instruction verification.
  virtual bool verifyInstruction(const MachineInstr &MI,
                                 StringRef &ErrInfo) const {
    return true;
  }

  /// Return the current execution domain and bit mask of
  /// possible domains for instruction.
  ///
  /// Some micro-architectures have multiple execution domains, and multiple
  /// opcodes that perform the same operation in different domains.  For
  /// example, the x86 architecture provides the por, orps, and orpd
  /// instructions that all do the same thing.  There is a latency penalty if a
  /// register is written in one domain and read in another.
  ///
  /// This function returns a pair (domain, mask) containing the execution
  /// domain of MI, and a bit mask of possible domains.  The setExecutionDomain
  /// function can be used to change the opcode to one of the domains in the
  /// bit mask.  Instructions whose execution domain can't be changed should
  /// return a 0 mask.
  ///
  /// The execution domain numbers don't have any special meaning except domain
  /// 0 is used for instructions that are not associated with any interesting
  /// execution domain.
  ///
  virtual std::pair<uint16_t, uint16_t>
  getExecutionDomain(const MachineInstr &MI) const {
    return std::make_pair(0, 0);
  }

  /// Change the opcode of MI to execute in Domain.
  ///
  /// The bit (1 << Domain) must be set in the mask returned from
  /// getExecutionDomain(MI).
  virtual void setExecutionDomain(MachineInstr &MI, unsigned Domain) const {}

  /// Returns the preferred minimum clearance
  /// before an instruction with an unwanted partial register update.
  ///
  /// Some instructions only write part of a register, and implicitly need to
  /// read the other parts of the register.  This may cause unwanted stalls
  /// preventing otherwise unrelated instructions from executing in parallel in
  /// an out-of-order CPU.
  ///
  /// For example, the x86 instruction cvtsi2ss writes its result to bits
  /// [31:0] of the destination xmm register. Bits [127:32] are unaffected, so
  /// the instruction needs to wait for the old value of the register to become
  /// available:
  ///
  ///   addps %xmm1, %xmm0
  ///   movaps %xmm0, (%rax)
  ///   cvtsi2ss %rbx, %xmm0
  ///
  /// In the code above, the cvtsi2ss instruction needs to wait for the addps
  /// instruction before it can issue, even though the high bits of %xmm0
  /// probably aren't needed.
  ///
  /// This hook returns the preferred clearance before MI, measured in
  /// instructions.  Other defs of MI's operand OpNum are avoided in the last N
  /// instructions before MI.  It should only return a positive value for
  /// unwanted dependencies.  If the old bits of the defined register have
  /// useful values, or if MI is determined to otherwise read the dependency,
  /// the hook should return 0.
  ///
  /// The unwanted dependency may be handled by:
  ///
  /// 1. Allocating the same register for an MI def and use.  That makes the
  ///    unwanted dependency identical to a required dependency.
  ///
  /// 2. Allocating a register for the def that has no defs in the previous N
  ///    instructions.
  ///
  /// 3. Calling breakPartialRegDependency() with the same arguments.  This
  ///    allows the target to insert a dependency breaking instruction.
  ///
  virtual unsigned
  getPartialRegUpdateClearance(const MachineInstr &MI, unsigned OpNum,
                               const TargetRegisterInfo *TRI) const {
    // The default implementation returns 0 for no partial register dependency.
    return 0;
  }

  /// Return the minimum clearance before an instruction that reads an
  /// unused register.
  ///
  /// For example, AVX instructions may copy part of a register operand into
  /// the unused high bits of the destination register.
  ///
  /// vcvtsi2sdq %rax, undef %xmm0, %xmm14
  ///
  /// In the code above, vcvtsi2sdq copies %xmm0[127:64] into %xmm14 creating a
  /// false dependence on any previous write to %xmm0.
  ///
  /// This hook works similarly to getPartialRegUpdateClearance, except that it
  /// does not take an operand index. Instead sets \p OpNum to the index of the
  /// unused register.
  virtual unsigned getUndefRegClearance(const MachineInstr &MI, unsigned OpNum,
                                        const TargetRegisterInfo *TRI) const {
    // The default implementation returns 0 for no undef register dependency.
    return 0;
  }

  /// Insert a dependency-breaking instruction
  /// before MI to eliminate an unwanted dependency on OpNum.
  ///
  /// If it wasn't possible to avoid a def in the last N instructions before MI
  /// (see getPartialRegUpdateClearance), this hook will be called to break the
  /// unwanted dependency.
  ///
  /// On x86, an xorps instruction can be used as a dependency breaker:
  ///
  ///   addps %xmm1, %xmm0
  ///   movaps %xmm0, (%rax)
  ///   xorps %xmm0, %xmm0
  ///   cvtsi2ss %rbx, %xmm0
  ///
  /// An <imp-kill> operand should be added to MI if an instruction was
  /// inserted.  This ties the instructions together in the post-ra scheduler.
  ///
  virtual void breakPartialRegDependency(MachineInstr &MI, unsigned OpNum,
                                         const TargetRegisterInfo *TRI) const {}

  /// Create machine specific model for scheduling.
  virtual DFAPacketizer *
  CreateTargetScheduleState(const TargetSubtargetInfo &) const {
    return nullptr;
  }

  /// Sometimes, it is possible for the target
  /// to tell, even without aliasing information, that two MIs access different
  /// memory addresses. This function returns true if two MIs access different
  /// memory addresses and false otherwise.
  ///
  /// Assumes any physical registers used to compute addresses have the same
  /// value for both instructions. (This is the most useful assumption for
  /// post-RA scheduling.)
  ///
  /// See also MachineInstr::mayAlias, which is implemented on top of this
  /// function.
  virtual bool
  areMemAccessesTriviallyDisjoint(const MachineInstr &MIa,
                                  const MachineInstr &MIb) const {
    assert(MIa.mayLoadOrStore() &&
           "MIa must load from or modify a memory location");
    assert(MIb.mayLoadOrStore() &&
           "MIb must load from or modify a memory location");
    return false;
  }

  /// Return the value to use for the MachineCSE's LookAheadLimit,
  /// which is a heuristic used for CSE'ing phys reg defs.
  virtual unsigned getMachineCSELookAheadLimit() const {
    // The default lookahead is small to prevent unprofitable quadratic
    // behavior.
    return 5;
  }

  /// Return the maximal number of alias checks on memory operands. For
  /// instructions with more than one memory operands, the alias check on a
  /// single MachineInstr pair has quadratic overhead and results in
  /// unacceptable performance in the worst case. The limit here is to clamp
  /// that maximal checks performed. Usually, that's the product of memory
  /// operand numbers from that pair of MachineInstr to be checked. For
  /// instance, with two MachineInstrs with 4 and 5 memory operands
  /// correspondingly, a total of 20 checks are required. With this limit set to
  /// 16, their alias check is skipped. We choose to limit the product instead
  /// of the individual instruction as targets may have special MachineInstrs
  /// with a considerably high number of memory operands, such as `ldm` in ARM.
  /// Setting this limit per MachineInstr would result in either too high
  /// overhead or too rigid restriction.
  virtual unsigned getMemOperandAACheckLimit() const { return 16; }

  /// Return an array that contains the ids of the target indices (used for the
  /// TargetIndex machine operand) and their names.
  ///
  /// MIR Serialization is able to serialize only the target indices that are
  /// defined by this method.
  virtual ArrayRef<std::pair<int, const char *>>
  getSerializableTargetIndices() const {
    return std::nullopt;
  }

  /// Decompose the machine operand's target flags into two values - the direct
  /// target flag value and any of bit flags that are applied.
  virtual std::pair<unsigned, unsigned>
  decomposeMachineOperandsTargetFlags(unsigned /*TF*/) const {
    return std::make_pair(0u, 0u);
  }

  /// Return an array that contains the direct target flag values and their
  /// names.
  ///
  /// MIR Serialization is able to serialize only the target flags that are
  /// defined by this method.
  virtual ArrayRef<std::pair<unsigned, const char *>>
  getSerializableDirectMachineOperandTargetFlags() const {
    return std::nullopt;
  }

  /// Return an array that contains the bitmask target flag values and their
  /// names.
  ///
  /// MIR Serialization is able to serialize only the target flags that are
  /// defined by this method.
  virtual ArrayRef<std::pair<unsigned, const char *>>
  getSerializableBitmaskMachineOperandTargetFlags() const {
    return std::nullopt;
  }

  /// Return an array that contains the MMO target flag values and their
  /// names.
  ///
  /// MIR Serialization is able to serialize only the MMO target flags that are
  /// defined by this method.
  virtual ArrayRef<std::pair<MachineMemOperand::Flags, const char *>>
  getSerializableMachineMemOperandTargetFlags() const {
    return std::nullopt;
  }

  /// Determines whether \p Inst is a tail call instruction. Override this
  /// method on targets that do not properly set MCID::Return and MCID::Call on
  /// tail call instructions."
  virtual bool isTailCall(const MachineInstr &Inst) const {
    return Inst.isReturn() && Inst.isCall();
  }

  /// True if the instruction is bound to the top of its basic block and no
  /// other instructions shall be inserted before it. This can be implemented
  /// to prevent register allocator to insert spills before such instructions.
  virtual bool isBasicBlockPrologue(const MachineInstr &MI) const {
    return false;
  }

  /// During PHI eleimination lets target to make necessary checks and
  /// insert the copy to the PHI destination register in a target specific
  /// manner.
  virtual MachineInstr *createPHIDestinationCopy(
      MachineBasicBlock &MBB, MachineBasicBlock::iterator InsPt,
      const DebugLoc &DL, Register Src, Register Dst) const {
    return BuildMI(MBB, InsPt, DL, get(TargetOpcode::COPY), Dst)
        .addReg(Src);
  }

  /// During PHI eleimination lets target to make necessary checks and
  /// insert the copy to the PHI destination register in a target specific
  /// manner.
  virtual MachineInstr *createPHISourceCopy(MachineBasicBlock &MBB,
                                            MachineBasicBlock::iterator InsPt,
                                            const DebugLoc &DL, Register Src,
                                            unsigned SrcSubReg,
                                            Register Dst) const {
    return BuildMI(MBB, InsPt, DL, get(TargetOpcode::COPY), Dst)
        .addReg(Src, 0, SrcSubReg);
  }

  /// Returns a \p outliner::OutlinedFunction struct containing target-specific
  /// information for a set of outlining candidates. Returns std::nullopt if the
  /// candidates are not suitable for outlining.
  virtual std::optional<outliner::OutlinedFunction> getOutliningCandidateInfo(
      std::vector<outliner::Candidate> &RepeatedSequenceLocs) const {
    llvm_unreachable(
        "Target didn't implement TargetInstrInfo::getOutliningCandidateInfo!");
  }

  /// Optional target hook to create the LLVM IR attributes for the outlined
  /// function. If overridden, the overriding function must call the default
  /// implementation.
  virtual void mergeOutliningCandidateAttributes(
      Function &F, std::vector<outliner::Candidate> &Candidates) const;

protected:
  /// Target-dependent implementation for getOutliningTypeImpl.
  virtual outliner::InstrType
  getOutliningTypeImpl(MachineBasicBlock::iterator &MIT, unsigned Flags) const {
    llvm_unreachable(
        "Target didn't implement TargetInstrInfo::getOutliningTypeImpl!");
  }

public:
  /// Returns how or if \p MIT should be outlined. \p Flags is the
  /// target-specific information returned by isMBBSafeToOutlineFrom.
  outliner::InstrType
  getOutliningType(MachineBasicBlock::iterator &MIT, unsigned Flags) const;

  /// Optional target hook that returns true if \p MBB is safe to outline from,
  /// and returns any target-specific information in \p Flags.
  virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB,
                                      unsigned &Flags) const;

  /// Optional target hook which partitions \p MBB into outlinable ranges for
  /// instruction mapping purposes. Each range is defined by two iterators:
  /// [start, end).
  ///
  /// Ranges are expected to be ordered top-down. That is, ranges closer to the
  /// top of the block should come before ranges closer to the end of the block.
  ///
  /// Ranges cannot overlap.
  ///
  /// If an entire block is mappable, then its range is [MBB.begin(), MBB.end())
  ///
  /// All instructions not present in an outlinable range are considered
  /// illegal.
  virtual SmallVector<
      std::pair<MachineBasicBlock::iterator, MachineBasicBlock::iterator>>
  getOutlinableRanges(MachineBasicBlock &MBB, unsigned &Flags) const {
    return {std::make_pair(MBB.begin(), MBB.end())};
  }

  /// Insert a custom frame for outlined functions.
  virtual void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF,
                                  const outliner::OutlinedFunction &OF) const {
    llvm_unreachable(
        "Target didn't implement TargetInstrInfo::buildOutlinedFrame!");
  }

  /// Insert a call to an outlined function into the program.
  /// Returns an iterator to the spot where we inserted the call. This must be
  /// implemented by the target.
  virtual MachineBasicBlock::iterator
  insertOutlinedCall(Module &M, MachineBasicBlock &MBB,
                     MachineBasicBlock::iterator &It, MachineFunction &MF,
                     outliner::Candidate &C) const {
    llvm_unreachable(
        "Target didn't implement TargetInstrInfo::insertOutlinedCall!");
  }

  /// Return true if the function can safely be outlined from.
  /// A function \p MF is considered safe for outlining if an outlined function
  /// produced from instructions in F will produce a program which produces the
  /// same output for any set of given inputs.
  virtual bool isFunctionSafeToOutlineFrom(MachineFunction &MF,
                                           bool OutlineFromLinkOnceODRs) const {
    llvm_unreachable("Target didn't implement "
                     "TargetInstrInfo::isFunctionSafeToOutlineFrom!");
  }

  /// Return true if the function should be outlined from by default.
  virtual bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const {
    return false;
  }

  /// Produce the expression describing the \p MI loading a value into
  /// the physical register \p Reg. This hook should only be used with
  /// \p MIs belonging to VReg-less functions.
  virtual std::optional<ParamLoadedValue>
  describeLoadedValue(const MachineInstr &MI, Register Reg) const;

  /// Given the generic extension instruction \p ExtMI, returns true if this
  /// extension is a likely candidate for being folded into an another
  /// instruction.
  virtual bool isExtendLikelyToBeFolded(MachineInstr &ExtMI,
                                        MachineRegisterInfo &MRI) const {
    return false;
  }

  /// Return MIR formatter to format/parse MIR operands.  Target can override
  /// this virtual function and return target specific MIR formatter.
  virtual const MIRFormatter *getMIRFormatter() const {
    if (!Formatter.get())
      Formatter = std::make_unique<MIRFormatter>();
    return Formatter.get();
  }

  /// Returns the target-specific default value for tail duplication.
  /// This value will be used if the tail-dup-placement-threshold argument is
  /// not provided.
  virtual unsigned getTailDuplicateSize(CodeGenOpt::Level OptLevel) const {
    return OptLevel >= CodeGenOpt::Aggressive ? 4 : 2;
  }

  /// Returns the callee operand from the given \p MI.
  virtual const MachineOperand &getCalleeOperand(const MachineInstr &MI) const {
    return MI.getOperand(0);
  }

  /// Return the uniformity behavior of the given instruction.
  virtual InstructionUniformity
  getInstructionUniformity(const MachineInstr &MI) const {
    return InstructionUniformity::Default;
  }

  /// Returns true if the given \p MI defines a TargetIndex operand that can be
  /// tracked by their offset, can have values, and can have debug info
  /// associated with it. If so, sets \p Index and \p Offset of the target index
  /// operand.
  virtual bool isExplicitTargetIndexDef(const MachineInstr &MI, int &Index,
                                        int64_t &Offset) const {
    return false;
  }

private:
  mutable std::unique_ptr<MIRFormatter> Formatter;
  unsigned CallFrameSetupOpcode, CallFrameDestroyOpcode;
  unsigned CatchRetOpcode;
  unsigned ReturnOpcode;
};

/// Provide DenseMapInfo for TargetInstrInfo::RegSubRegPair.
template <> struct DenseMapInfo<TargetInstrInfo::RegSubRegPair> {
  using RegInfo = DenseMapInfo<unsigned>;

  static inline TargetInstrInfo::RegSubRegPair getEmptyKey() {
    return TargetInstrInfo::RegSubRegPair(RegInfo::getEmptyKey(),
                                          RegInfo::getEmptyKey());
  }

  static inline TargetInstrInfo::RegSubRegPair getTombstoneKey() {
    return TargetInstrInfo::RegSubRegPair(RegInfo::getTombstoneKey(),
                                          RegInfo::getTombstoneKey());
  }

  /// Reuse getHashValue implementation from
  /// std::pair<unsigned, unsigned>.
  static unsigned getHashValue(const TargetInstrInfo::RegSubRegPair &Val) {
    std::pair<unsigned, unsigned> PairVal = std::make_pair(Val.Reg, Val.SubReg);
    return DenseMapInfo<std::pair<unsigned, unsigned>>::getHashValue(PairVal);
  }

  static bool isEqual(const TargetInstrInfo::RegSubRegPair &LHS,
                      const TargetInstrInfo::RegSubRegPair &RHS) {
    return RegInfo::isEqual(LHS.Reg, RHS.Reg) &&
           RegInfo::isEqual(LHS.SubReg, RHS.SubReg);
  }
};

} // end namespace llvm

#endif // LLVM_CODEGEN_TARGETINSTRINFO_H
PKiwFZJ��CodeGen/Register.hnu�[���//===-- llvm/CodeGen/Register.h ---------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_REGISTER_H
#define LLVM_CODEGEN_REGISTER_H

#include "llvm/MC/MCRegister.h"
#include <cassert>

namespace llvm {

/// Wrapper class representing virtual and physical registers. Should be passed
/// by value.
class Register {
  unsigned Reg;

public:
  constexpr Register(unsigned Val = 0) : Reg(Val) {}
  constexpr Register(MCRegister Val) : Reg(Val) {}

  // Register numbers can represent physical registers, virtual registers, and
  // sometimes stack slots. The unsigned values are divided into these ranges:
  //
  //   0           Not a register, can be used as a sentinel.
  //   [1;2^30)    Physical registers assigned by TableGen.
  //   [2^30;2^31) Stack slots. (Rarely used.)
  //   [2^31;2^32) Virtual registers assigned by MachineRegisterInfo.
  //
  // Further sentinels can be allocated from the small negative integers.
  // DenseMapInfo<unsigned> uses -1u and -2u.
  static_assert(std::numeric_limits<decltype(Reg)>::max() >= 0xFFFFFFFF,
                "Reg isn't large enough to hold full range.");

  /// isStackSlot - Sometimes it is useful the be able to store a non-negative
  /// frame index in a variable that normally holds a register. isStackSlot()
  /// returns true if Reg is in the range used for stack slots.
  ///
  /// FIXME: remove in favor of member.
  static constexpr bool isStackSlot(unsigned Reg) {
    return MCRegister::isStackSlot(Reg);
  }

  /// Return true if this is a stack slot.
  constexpr bool isStack() const { return MCRegister::isStackSlot(Reg); }

  /// Compute the frame index from a register value representing a stack slot.
  static int stackSlot2Index(Register Reg) {
    assert(Reg.isStack() && "Not a stack slot");
    return int(Reg - MCRegister::FirstStackSlot);
  }

  /// Convert a non-negative frame index to a stack slot register value.
  static Register index2StackSlot(int FI) {
    assert(FI >= 0 && "Cannot hold a negative frame index.");
    return Register(FI + MCRegister::FirstStackSlot);
  }

  /// Return true if the specified register number is in
  /// the physical register namespace.
  static constexpr bool isPhysicalRegister(unsigned Reg) {
    return MCRegister::isPhysicalRegister(Reg);
  }

  /// Return true if the specified register number is in
  /// the virtual register namespace.
  static constexpr bool isVirtualRegister(unsigned Reg) {
    return Reg & MCRegister::VirtualRegFlag;
  }

  /// Convert a virtual register number to a 0-based index.
  /// The first virtual register in a function will get the index 0.
  static unsigned virtReg2Index(Register Reg) {
    assert(Reg.isVirtual() && "Not a virtual register");
    return Reg & ~MCRegister::VirtualRegFlag;
  }

  /// Convert a 0-based index to a virtual register number.
  /// This is the inverse operation of VirtReg2IndexFunctor below.
  static Register index2VirtReg(unsigned Index) {
    assert(Index < (1u << 31) && "Index too large for virtual register range.");
    return Index | MCRegister::VirtualRegFlag;
  }

  /// Return true if the specified register number is in the virtual register
  /// namespace.
  constexpr bool isVirtual() const { return isVirtualRegister(Reg); }

  /// Return true if the specified register number is in the physical register
  /// namespace.
  constexpr bool isPhysical() const { return isPhysicalRegister(Reg); }

  /// Convert a virtual register number to a 0-based index. The first virtual
  /// register in a function will get the index 0.
  unsigned virtRegIndex() const { return virtReg2Index(Reg); }

  constexpr operator unsigned() const { return Reg; }

  constexpr unsigned id() const { return Reg; }

  constexpr operator MCRegister() const { return MCRegister(Reg); }

  /// Utility to check-convert this value to a MCRegister. The caller is
  /// expected to have already validated that this Register is, indeed,
  /// physical.
  MCRegister asMCReg() const {
    assert(Reg == MCRegister::NoRegister ||
           MCRegister::isPhysicalRegister(Reg));
    return MCRegister(Reg);
  }

  constexpr bool isValid() const { return Reg != MCRegister::NoRegister; }

  /// Comparisons between register objects
  constexpr bool operator==(const Register &Other) const {
    return Reg == Other.Reg;
  }
  constexpr bool operator!=(const Register &Other) const {
    return Reg != Other.Reg;
  }
  constexpr bool operator==(const MCRegister &Other) const {
    return Reg == Other.id();
  }
  constexpr bool operator!=(const MCRegister &Other) const {
    return Reg != Other.id();
  }

  /// Comparisons against register constants. E.g.
  /// * R == AArch64::WZR
  /// * R == 0
  /// * R == VirtRegMap::NO_PHYS_REG
  constexpr bool operator==(unsigned Other) const { return Reg == Other; }
  constexpr bool operator!=(unsigned Other) const { return Reg != Other; }
  constexpr bool operator==(int Other) const { return Reg == unsigned(Other); }
  constexpr bool operator!=(int Other) const { return Reg != unsigned(Other); }
  // MSVC requires that we explicitly declare these two as well.
  constexpr bool operator==(MCPhysReg Other) const {
    return Reg == unsigned(Other);
  }
  constexpr bool operator!=(MCPhysReg Other) const {
    return Reg != unsigned(Other);
  }
};

// Provide DenseMapInfo for Register
template <> struct DenseMapInfo<Register> {
  static inline unsigned getEmptyKey() {
    return DenseMapInfo<unsigned>::getEmptyKey();
  }
  static inline unsigned getTombstoneKey() {
    return DenseMapInfo<unsigned>::getTombstoneKey();
  }
  static unsigned getHashValue(const Register &Val) {
    return DenseMapInfo<unsigned>::getHashValue(Val.id());
  }
  static bool isEqual(const Register &LHS, const Register &RHS) {
    return DenseMapInfo<unsigned>::isEqual(LHS.id(), RHS.id());
  }
};

} // namespace llvm

#endif // LLVM_CODEGEN_REGISTER_H
PKiwFZp��5
5
CodeGen/MacroFusion.hnu�[���//===- MacroFusion.h - Macro Fusion -----------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file This file contains the definition of the DAG scheduling mutation to
/// pair instructions back to back.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_MACROFUSION_H
#define LLVM_CODEGEN_MACROFUSION_H

#include <functional>
#include <memory>

namespace llvm {

class MachineInstr;
class ScheduleDAGMutation;
class TargetInstrInfo;
class TargetSubtargetInfo;
class ScheduleDAGInstrs;
class SUnit;

/// Check if the instr pair, FirstMI and SecondMI, should be fused
/// together. Given SecondMI, when FirstMI is unspecified, then check if
/// SecondMI may be part of a fused pair at all.
using ShouldSchedulePredTy = std::function<bool(const TargetInstrInfo &TII,
                                                const TargetSubtargetInfo &TSI,
                                                const MachineInstr *FirstMI,
                                                const MachineInstr &SecondMI)>;

/// Checks if the number of cluster edges between SU and its predecessors is
/// less than FuseLimit
bool hasLessThanNumFused(const SUnit &SU, unsigned FuseLimit);

/// Create an artificial edge between FirstSU and SecondSU.
/// Make data dependencies from the FirstSU also dependent on the SecondSU to
/// prevent them from being scheduled between the FirstSU and the SecondSU
/// and vice-versa.
/// Fusing more than 2 instructions is not currently supported.
bool fuseInstructionPair(ScheduleDAGInstrs &DAG, SUnit &FirstSU,
                         SUnit &SecondSU);

/// Create a DAG scheduling mutation to pair instructions back to back
/// for instructions that benefit according to the target-specific
/// shouldScheduleAdjacent predicate function.
std::unique_ptr<ScheduleDAGMutation>
createMacroFusionDAGMutation(ShouldSchedulePredTy shouldScheduleAdjacent);

/// Create a DAG scheduling mutation to pair branch instructions with one
/// of their predecessors back to back for instructions that benefit according
/// to the target-specific shouldScheduleAdjacent predicate function.
std::unique_ptr<ScheduleDAGMutation>
createBranchMacroFusionDAGMutation(ShouldSchedulePredTy shouldScheduleAdjacent);

} // end namespace llvm

#endif // LLVM_CODEGEN_MACROFUSION_H
PKiwFZ5i���CodeGen/MachineRegionInfo.hnu�[���//===- llvm/CodeGen/MachineRegionInfo.h -------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_MACHINEREGIONINFO_H
#define LLVM_CODEGEN_MACHINEREGIONINFO_H

#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/Analysis/RegionInfo.h"
#include "llvm/Analysis/RegionIterator.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineDominanceFrontier.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include <cassert>

namespace llvm {

class MachinePostDominatorTree;
class MachineRegion;
class MachineRegionNode;
class MachineRegionInfo;

template <> struct RegionTraits<MachineFunction> {
  using FuncT = MachineFunction;
  using BlockT = MachineBasicBlock;
  using RegionT = MachineRegion;
  using RegionNodeT = MachineRegionNode;
  using RegionInfoT = MachineRegionInfo;
  using DomTreeT = MachineDominatorTree;
  using DomTreeNodeT = MachineDomTreeNode;
  using PostDomTreeT = MachinePostDominatorTree;
  using DomFrontierT = MachineDominanceFrontier;
  using InstT = MachineInstr;
  using LoopT = MachineLoop;
  using LoopInfoT = MachineLoopInfo;

  static unsigned getNumSuccessors(MachineBasicBlock *BB) {
    return BB->succ_size();
  }
};

class MachineRegionNode : public RegionNodeBase<RegionTraits<MachineFunction>> {
public:
  inline MachineRegionNode(MachineRegion *Parent, MachineBasicBlock *Entry,
                           bool isSubRegion = false)
      : RegionNodeBase<RegionTraits<MachineFunction>>(Parent, Entry,
                                                      isSubRegion) {}

  bool operator==(const MachineRegion &RN) const {
    return this == reinterpret_cast<const MachineRegionNode *>(&RN);
  }
};

class MachineRegion : public RegionBase<RegionTraits<MachineFunction>> {
public:
  MachineRegion(MachineBasicBlock *Entry, MachineBasicBlock *Exit,
                MachineRegionInfo *RI, MachineDominatorTree *DT,
                MachineRegion *Parent = nullptr);
  ~MachineRegion();

  bool operator==(const MachineRegionNode &RN) const {
    return &RN == reinterpret_cast<const MachineRegionNode *>(this);
  }
};

class MachineRegionInfo : public RegionInfoBase<RegionTraits<MachineFunction>> {
public:
  explicit MachineRegionInfo();
  ~MachineRegionInfo() override;

  // updateStatistics - Update statistic about created regions.
  void updateStatistics(MachineRegion *R) final;

  void recalculate(MachineFunction &F, MachineDominatorTree *DT,
                   MachinePostDominatorTree *PDT, MachineDominanceFrontier *DF);
};

class MachineRegionInfoPass : public MachineFunctionPass {
  MachineRegionInfo RI;

public:
  static char ID;

  explicit MachineRegionInfoPass();
  ~MachineRegionInfoPass() override;

  MachineRegionInfo &getRegionInfo() { return RI; }

  const MachineRegionInfo &getRegionInfo() const { return RI; }

  /// @name MachineFunctionPass interface
  //@{
  bool runOnMachineFunction(MachineFunction &F) override;
  void releaseMemory() override;
  void verifyAnalysis() const override;
  void getAnalysisUsage(AnalysisUsage &AU) const override;
  void print(raw_ostream &OS, const Module *) const override;
  void dump() const;
  //@}
};

template <>
template <>
inline MachineBasicBlock *
RegionNodeBase<RegionTraits<MachineFunction>>::getNodeAs<MachineBasicBlock>()
    const {
  assert(!isSubRegion() && "This is not a MachineBasicBlock RegionNode!");
  return getEntry();
}

template <>
template <>
inline MachineRegion *
RegionNodeBase<RegionTraits<MachineFunction>>::getNodeAs<MachineRegion>()
    const {
  assert(isSubRegion() && "This is not a subregion RegionNode!");
  auto Unconst =
      const_cast<RegionNodeBase<RegionTraits<MachineFunction>> *>(this);
  return reinterpret_cast<MachineRegion *>(Unconst);
}

RegionNodeGraphTraits(MachineRegionNode, MachineBasicBlock, MachineRegion);
RegionNodeGraphTraits(const MachineRegionNode, MachineBasicBlock,
                      MachineRegion);

RegionGraphTraits(MachineRegion, MachineRegionNode);
RegionGraphTraits(const MachineRegion, const MachineRegionNode);

template <>
struct GraphTraits<MachineRegionInfo *>
    : public GraphTraits<FlatIt<MachineRegionNode *>> {
  using nodes_iterator = df_iterator<NodeRef, df_iterator_default_set<NodeRef>,
                                     false, GraphTraits<FlatIt<NodeRef>>>;

  static NodeRef getEntryNode(MachineRegionInfo *RI) {
    return GraphTraits<FlatIt<MachineRegion *>>::getEntryNode(
        RI->getTopLevelRegion());
  }

  static nodes_iterator nodes_begin(MachineRegionInfo *RI) {
    return nodes_iterator::begin(getEntryNode(RI));
  }

  static nodes_iterator nodes_end(MachineRegionInfo *RI) {
    return nodes_iterator::end(getEntryNode(RI));
  }
};

template <>
struct GraphTraits<MachineRegionInfoPass *>
    : public GraphTraits<MachineRegionInfo *> {
  using nodes_iterator = df_iterator<NodeRef, df_iterator_default_set<NodeRef>,
                                     false, GraphTraits<FlatIt<NodeRef>>>;

  static NodeRef getEntryNode(MachineRegionInfoPass *RI) {
    return GraphTraits<MachineRegionInfo *>::getEntryNode(&RI->getRegionInfo());
  }

  static nodes_iterator nodes_begin(MachineRegionInfoPass *RI) {
    return GraphTraits<MachineRegionInfo *>::nodes_begin(&RI->getRegionInfo());
  }

  static nodes_iterator nodes_end(MachineRegionInfoPass *RI) {
    return GraphTraits<MachineRegionInfo *>::nodes_end(&RI->getRegionInfo());
  }
};

extern template class RegionBase<RegionTraits<MachineFunction>>;
extern template class RegionNodeBase<RegionTraits<MachineFunction>>;
extern template class RegionInfoBase<RegionTraits<MachineFunction>>;

} // end namespace llvm

#endif // LLVM_CODEGEN_MACHINEREGIONINFO_H
PKiwFZ�0Q�HHCodeGen/SDNodeProperties.tdnu�[���//===- SDNodeProperties.td - Common code for DAG isels -----*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

class SDNodeProperty;

// Selection DAG Pattern Operations
class SDPatternOperator {
  list<SDNodeProperty> Properties = [];
}

//===----------------------------------------------------------------------===//
// Selection DAG Node Properties.
//
// Note: These are hard coded into tblgen.
//
def SDNPCommutative : SDNodeProperty;   // X op Y == Y op X
def SDNPAssociative : SDNodeProperty;   // (X op Y) op Z == X op (Y op Z)
def SDNPHasChain    : SDNodeProperty;   // R/W chain operand and result
def SDNPOutGlue     : SDNodeProperty;   // Write a flag result
def SDNPInGlue      : SDNodeProperty;   // Read a flag operand
def SDNPOptInGlue   : SDNodeProperty;   // Optionally read a flag operand
def SDNPMayStore    : SDNodeProperty;   // May write to memory, sets 'mayStore'.
def SDNPMayLoad     : SDNodeProperty;   // May read memory, sets 'mayLoad'.
def SDNPSideEffect  : SDNodeProperty;   // Sets 'HasUnmodelledSideEffects'.
def SDNPMemOperand  : SDNodeProperty;   // Touches memory, has assoc MemOperand
def SDNPVariadic    : SDNodeProperty;   // Node has variable arguments.
def SDNPWantRoot    : SDNodeProperty;   // ComplexPattern gets the root of match
def SDNPWantParent  : SDNodeProperty;   // ComplexPattern gets the parent
PKiwFZ;ȡ���!CodeGen/ExpandVectorPredication.hnu�[���//===-- ExpandVectorPredication.h - Expand vector predication ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_EXPANDVECTORPREDICATION_H
#define LLVM_CODEGEN_EXPANDVECTORPREDICATION_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class ExpandVectorPredicationPass
    : public PassInfoMixin<ExpandVectorPredicationPass> {
public:
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
} // end namespace llvm

#endif // LLVM_CODEGEN_EXPANDVECTORPREDICATION_H
PKiwFZ���99CodeGen/RuntimeLibcalls.hnu�[���//===-- CodeGen/RuntimeLibcalls.h - Runtime Library Calls -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the enum representing the list of runtime library calls
// the backend may emit during code generation, and also some helper functions.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_RUNTIMELIBCALLS_H
#define LLVM_CODEGEN_RUNTIMELIBCALLS_H

#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/Support/AtomicOrdering.h"

namespace llvm {
namespace RTLIB {
  /// RTLIB::Libcall enum - This enum defines all of the runtime library calls
  /// the backend can emit.  The various long double types cannot be merged,
  /// because 80-bit library functions use "xf" and 128-bit use "tf".
  ///
  /// When adding PPCF128 functions here, note that their names generally need
  /// to be overridden for Darwin with the xxx$LDBL128 form.  See
  /// PPCISelLowering.cpp.
  ///
  enum Libcall {
#define HANDLE_LIBCALL(code, name) code,
    #include "llvm/IR/RuntimeLibcalls.def"
#undef HANDLE_LIBCALL
  };

  /// GetFPLibCall - Helper to return the right libcall for the given floating
  /// point type, or UNKNOWN_LIBCALL if there is none.
  Libcall getFPLibCall(EVT VT,
                       Libcall Call_F32,
                       Libcall Call_F64,
                       Libcall Call_F80,
                       Libcall Call_F128,
                       Libcall Call_PPCF128);

  /// getFPEXT - Return the FPEXT_*_* value for the given types, or
  /// UNKNOWN_LIBCALL if there is none.
  Libcall getFPEXT(EVT OpVT, EVT RetVT);

  /// getFPROUND - Return the FPROUND_*_* value for the given types, or
  /// UNKNOWN_LIBCALL if there is none.
  Libcall getFPROUND(EVT OpVT, EVT RetVT);

  /// getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or
  /// UNKNOWN_LIBCALL if there is none.
  Libcall getFPTOSINT(EVT OpVT, EVT RetVT);

  /// getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or
  /// UNKNOWN_LIBCALL if there is none.
  Libcall getFPTOUINT(EVT OpVT, EVT RetVT);

  /// getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or
  /// UNKNOWN_LIBCALL if there is none.
  Libcall getSINTTOFP(EVT OpVT, EVT RetVT);

  /// getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or
  /// UNKNOWN_LIBCALL if there is none.
  Libcall getUINTTOFP(EVT OpVT, EVT RetVT);

  /// getPOWI - Return the POWI_* value for the given types, or
  /// UNKNOWN_LIBCALL if there is none.
  Libcall getPOWI(EVT RetVT);

  /// getLDEXP - Return the LDEXP_* value for the given types, or
  /// UNKNOWN_LIBCALL if there is none.
  Libcall getLDEXP(EVT RetVT);

  /// getFREXP - Return the FREXP_* value for the given types, or
  /// UNKNOWN_LIBCALL if there is none.
  Libcall getFREXP(EVT RetVT);

  /// Return the SYNC_FETCH_AND_* value for the given opcode and type, or
  /// UNKNOWN_LIBCALL if there is none.
  Libcall getSYNC(unsigned Opc, MVT VT);

  /// Return the outline atomics value for the given opcode, atomic ordering
  /// and type, or UNKNOWN_LIBCALL if there is none.
  Libcall getOUTLINE_ATOMIC(unsigned Opc, AtomicOrdering Order, MVT VT);

  /// getMEMCPY_ELEMENT_UNORDERED_ATOMIC - Return
  /// MEMCPY_ELEMENT_UNORDERED_ATOMIC_* value for the given element size or
  /// UNKNOW_LIBCALL if there is none.
  Libcall getMEMCPY_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize);

  /// getMEMMOVE_ELEMENT_UNORDERED_ATOMIC - Return
  /// MEMMOVE_ELEMENT_UNORDERED_ATOMIC_* value for the given element size or
  /// UNKNOW_LIBCALL if there is none.
  Libcall getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize);

  /// getMEMSET_ELEMENT_UNORDERED_ATOMIC - Return
  /// MEMSET_ELEMENT_UNORDERED_ATOMIC_* value for the given element size or
  /// UNKNOW_LIBCALL if there is none.
  Libcall getMEMSET_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize);

}
}

#endif
PKiwFZ$��Tk`k`CodeGen/SlotIndexes.hnu�[���//===- llvm/CodeGen/SlotIndexes.h - Slot indexes representation -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements SlotIndex and related classes. The purpose of SlotIndex
// is to describe a position at which a register can become live, or cease to
// be live.
//
// SlotIndex is mostly a proxy for entries of the SlotIndexList, a class which
// is held is LiveIntervals and provides the real numbering. This allows
// LiveIntervals to perform largely transparent renumbering.
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_SLOTINDEXES_H
#define LLVM_CODEGEN_SLOTINDEXES_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/IntervalMap.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/ilist.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBundle.h"
#include "llvm/Support/Allocator.h"
#include <algorithm>
#include <cassert>
#include <iterator>
#include <utility>

namespace llvm {

class raw_ostream;

  /// This class represents an entry in the slot index list held in the
  /// SlotIndexes pass. It should not be used directly. See the
  /// SlotIndex & SlotIndexes classes for the public interface to this
  /// information.
  class IndexListEntry : public ilist_node<IndexListEntry> {
    MachineInstr *mi;
    unsigned index;

  public:
    IndexListEntry(MachineInstr *mi, unsigned index) : mi(mi), index(index) {}

    MachineInstr* getInstr() const { return mi; }
    void setInstr(MachineInstr *mi) {
      this->mi = mi;
    }

    unsigned getIndex() const { return index; }
    void setIndex(unsigned index) {
      this->index = index;
    }

#ifdef EXPENSIVE_CHECKS
    // When EXPENSIVE_CHECKS is defined, "erased" index list entries will
    // actually be moved to a "graveyard" list, and have their pointers
    // poisoned, so that dangling SlotIndex access can be reliably detected.
    void setPoison() {
      intptr_t tmp = reinterpret_cast<intptr_t>(mi);
      assert(((tmp & 0x1) == 0x0) && "Pointer already poisoned?");
      tmp |= 0x1;
      mi = reinterpret_cast<MachineInstr*>(tmp);
    }

    bool isPoisoned() const { return (reinterpret_cast<intptr_t>(mi) & 0x1) == 0x1; }
#endif // EXPENSIVE_CHECKS
  };

  template <>
  struct ilist_alloc_traits<IndexListEntry>
      : public ilist_noalloc_traits<IndexListEntry> {};

  /// SlotIndex - An opaque wrapper around machine indexes.
  class SlotIndex {
    friend class SlotIndexes;

    enum Slot {
      /// Basic block boundary.  Used for live ranges entering and leaving a
      /// block without being live in the layout neighbor.  Also used as the
      /// def slot of PHI-defs.
      Slot_Block,

      /// Early-clobber register use/def slot.  A live range defined at
      /// Slot_EarlyClobber interferes with normal live ranges killed at
      /// Slot_Register.  Also used as the kill slot for live ranges tied to an
      /// early-clobber def.
      Slot_EarlyClobber,

      /// Normal register use/def slot.  Normal instructions kill and define
      /// register live ranges at this slot.
      Slot_Register,

      /// Dead def kill point.  Kill slot for a live range that is defined by
      /// the same instruction (Slot_Register or Slot_EarlyClobber), but isn't
      /// used anywhere.
      Slot_Dead,

      Slot_Count
    };

    PointerIntPair<IndexListEntry*, 2, unsigned> lie;

    IndexListEntry* listEntry() const {
      assert(isValid() && "Attempt to compare reserved index.");
#ifdef EXPENSIVE_CHECKS
      assert(!lie.getPointer()->isPoisoned() &&
             "Attempt to access deleted list-entry.");
#endif // EXPENSIVE_CHECKS
      return lie.getPointer();
    }

    unsigned getIndex() const {
      return listEntry()->getIndex() | getSlot();
    }

    /// Returns the slot for this SlotIndex.
    Slot getSlot() const {
      return static_cast<Slot>(lie.getInt());
    }

  public:
    enum {
      /// The default distance between instructions as returned by distance().
      /// This may vary as instructions are inserted and removed.
      InstrDist = 4 * Slot_Count
    };

    /// Construct an invalid index.
    SlotIndex() = default;

    // Creates a SlotIndex from an IndexListEntry and a slot. Generally should
    // not be used. This method is only public to facilitate writing certain
    // unit tests.
    SlotIndex(IndexListEntry *entry, unsigned slot) : lie(entry, slot) {}

    // Construct a new slot index from the given one, and set the slot.
    SlotIndex(const SlotIndex &li, Slot s) : lie(li.listEntry(), unsigned(s)) {
      assert(lie.getPointer() != nullptr &&
             "Attempt to construct index with 0 pointer.");
    }

    /// Returns true if this is a valid index. Invalid indices do
    /// not point into an index table, and cannot be compared.
    bool isValid() const {
      return lie.getPointer();
    }

    /// Return true for a valid index.
    explicit operator bool() const { return isValid(); }

    /// Print this index to the given raw_ostream.
    void print(raw_ostream &os) const;

    /// Dump this index to stderr.
    void dump() const;

    /// Compare two SlotIndex objects for equality.
    bool operator==(SlotIndex other) const {
      return lie == other.lie;
    }
    /// Compare two SlotIndex objects for inequality.
    bool operator!=(SlotIndex other) const {
      return lie != other.lie;
    }

    /// Compare two SlotIndex objects. Return true if the first index
    /// is strictly lower than the second.
    bool operator<(SlotIndex other) const {
      return getIndex() < other.getIndex();
    }
    /// Compare two SlotIndex objects. Return true if the first index
    /// is lower than, or equal to, the second.
    bool operator<=(SlotIndex other) const {
      return getIndex() <= other.getIndex();
    }

    /// Compare two SlotIndex objects. Return true if the first index
    /// is greater than the second.
    bool operator>(SlotIndex other) const {
      return getIndex() > other.getIndex();
    }

    /// Compare two SlotIndex objects. Return true if the first index
    /// is greater than, or equal to, the second.
    bool operator>=(SlotIndex other) const {
      return getIndex() >= other.getIndex();
    }

    /// isSameInstr - Return true if A and B refer to the same instruction.
    static bool isSameInstr(SlotIndex A, SlotIndex B) {
      return A.lie.getPointer() == B.lie.getPointer();
    }

    /// isEarlierInstr - Return true if A refers to an instruction earlier than
    /// B. This is equivalent to A < B && !isSameInstr(A, B).
    static bool isEarlierInstr(SlotIndex A, SlotIndex B) {
      return A.listEntry()->getIndex() < B.listEntry()->getIndex();
    }

    /// Return true if A refers to the same instruction as B or an earlier one.
    /// This is equivalent to !isEarlierInstr(B, A).
    static bool isEarlierEqualInstr(SlotIndex A, SlotIndex B) {
      return !isEarlierInstr(B, A);
    }

    /// Return the distance from this index to the given one.
    int distance(SlotIndex other) const {
      return other.getIndex() - getIndex();
    }

    /// Return the scaled distance from this index to the given one, where all
    /// slots on the same instruction have zero distance, assuming that the slot
    /// indices are packed as densely as possible. There are normally gaps
    /// between instructions, so this assumption often doesn't hold. This
    /// results in this function often returning a value greater than the actual
    /// instruction distance.
    int getApproxInstrDistance(SlotIndex other) const {
      return (other.listEntry()->getIndex() - listEntry()->getIndex())
        / Slot_Count;
    }

    /// isBlock - Returns true if this is a block boundary slot.
    bool isBlock() const { return getSlot() == Slot_Block; }

    /// isEarlyClobber - Returns true if this is an early-clobber slot.
    bool isEarlyClobber() const { return getSlot() == Slot_EarlyClobber; }

    /// isRegister - Returns true if this is a normal register use/def slot.
    /// Note that early-clobber slots may also be used for uses and defs.
    bool isRegister() const { return getSlot() == Slot_Register; }

    /// isDead - Returns true if this is a dead def kill slot.
    bool isDead() const { return getSlot() == Slot_Dead; }

    /// Returns the base index for associated with this index. The base index
    /// is the one associated with the Slot_Block slot for the instruction
    /// pointed to by this index.
    SlotIndex getBaseIndex() const {
      return SlotIndex(listEntry(), Slot_Block);
    }

    /// Returns the boundary index for associated with this index. The boundary
    /// index is the one associated with the Slot_Block slot for the instruction
    /// pointed to by this index.
    SlotIndex getBoundaryIndex() const {
      return SlotIndex(listEntry(), Slot_Dead);
    }

    /// Returns the register use/def slot in the current instruction for a
    /// normal or early-clobber def.
    SlotIndex getRegSlot(bool EC = false) const {
      return SlotIndex(listEntry(), EC ? Slot_EarlyClobber : Slot_Register);
    }

    /// Returns the dead def kill slot for the current instruction.
    SlotIndex getDeadSlot() const {
      return SlotIndex(listEntry(), Slot_Dead);
    }

    /// Returns the next slot in the index list. This could be either the
    /// next slot for the instruction pointed to by this index or, if this
    /// index is a STORE, the first slot for the next instruction.
    /// WARNING: This method is considerably more expensive than the methods
    /// that return specific slots (getUseIndex(), etc). If you can - please
    /// use one of those methods.
    SlotIndex getNextSlot() const {
      Slot s = getSlot();
      if (s == Slot_Dead) {
        return SlotIndex(&*++listEntry()->getIterator(), Slot_Block);
      }
      return SlotIndex(listEntry(), s + 1);
    }

    /// Returns the next index. This is the index corresponding to the this
    /// index's slot, but for the next instruction.
    SlotIndex getNextIndex() const {
      return SlotIndex(&*++listEntry()->getIterator(), getSlot());
    }

    /// Returns the previous slot in the index list. This could be either the
    /// previous slot for the instruction pointed to by this index or, if this
    /// index is a Slot_Block, the last slot for the previous instruction.
    /// WARNING: This method is considerably more expensive than the methods
    /// that return specific slots (getUseIndex(), etc). If you can - please
    /// use one of those methods.
    SlotIndex getPrevSlot() const {
      Slot s = getSlot();
      if (s == Slot_Block) {
        return SlotIndex(&*--listEntry()->getIterator(), Slot_Dead);
      }
      return SlotIndex(listEntry(), s - 1);
    }

    /// Returns the previous index. This is the index corresponding to this
    /// index's slot, but for the previous instruction.
    SlotIndex getPrevIndex() const {
      return SlotIndex(&*--listEntry()->getIterator(), getSlot());
    }
  };

  inline raw_ostream& operator<<(raw_ostream &os, SlotIndex li) {
    li.print(os);
    return os;
  }

  using IdxMBBPair = std::pair<SlotIndex, MachineBasicBlock *>;

  /// SlotIndexes pass.
  ///
  /// This pass assigns indexes to each instruction.
  class SlotIndexes : public MachineFunctionPass {
  private:
    // IndexListEntry allocator.
    BumpPtrAllocator ileAllocator;

    using IndexList = ilist<IndexListEntry>;
    IndexList indexList;

    MachineFunction *mf = nullptr;

    using Mi2IndexMap = DenseMap<const MachineInstr *, SlotIndex>;
    Mi2IndexMap mi2iMap;

    /// MBBRanges - Map MBB number to (start, stop) indexes.
    SmallVector<std::pair<SlotIndex, SlotIndex>, 8> MBBRanges;

    /// Idx2MBBMap - Sorted list of pairs of index of first instruction
    /// and MBB id.
    SmallVector<IdxMBBPair, 8> idx2MBBMap;

    IndexListEntry* createEntry(MachineInstr *mi, unsigned index) {
      IndexListEntry *entry =
          static_cast<IndexListEntry *>(ileAllocator.Allocate(
              sizeof(IndexListEntry), alignof(IndexListEntry)));

      new (entry) IndexListEntry(mi, index);

      return entry;
    }

    /// Renumber locally after inserting curItr.
    void renumberIndexes(IndexList::iterator curItr);

  public:
    static char ID;

    SlotIndexes();

    ~SlotIndexes() override;

    void getAnalysisUsage(AnalysisUsage &au) const override;
    void releaseMemory() override;

    bool runOnMachineFunction(MachineFunction &fn) override;

    /// Dump the indexes.
    void dump() const;

    /// Repair indexes after adding and removing instructions.
    void repairIndexesInRange(MachineBasicBlock *MBB,
                              MachineBasicBlock::iterator Begin,
                              MachineBasicBlock::iterator End);

    /// Returns the zero index for this analysis.
    SlotIndex getZeroIndex() {
      assert(indexList.front().getIndex() == 0 && "First index is not 0?");
      return SlotIndex(&indexList.front(), 0);
    }

    /// Returns the base index of the last slot in this analysis.
    SlotIndex getLastIndex() {
      return SlotIndex(&indexList.back(), 0);
    }

    /// Returns true if the given machine instr is mapped to an index,
    /// otherwise returns false.
    bool hasIndex(const MachineInstr &instr) const {
      return mi2iMap.count(&instr);
    }

    /// Returns the base index for the given instruction.
    SlotIndex getInstructionIndex(const MachineInstr &MI,
                                  bool IgnoreBundle = false) const {
      // Instructions inside a bundle have the same number as the bundle itself.
      auto BundleStart = getBundleStart(MI.getIterator());
      auto BundleEnd = getBundleEnd(MI.getIterator());
      // Use the first non-debug instruction in the bundle to get SlotIndex.
      const MachineInstr &BundleNonDebug =
          IgnoreBundle ? MI
                       : *skipDebugInstructionsForward(BundleStart, BundleEnd);
      assert(!BundleNonDebug.isDebugInstr() &&
             "Could not use a debug instruction to query mi2iMap.");
      Mi2IndexMap::const_iterator itr = mi2iMap.find(&BundleNonDebug);
      assert(itr != mi2iMap.end() && "Instruction not found in maps.");
      return itr->second;
    }

    /// Returns the instruction for the given index, or null if the given
    /// index has no instruction associated with it.
    MachineInstr* getInstructionFromIndex(SlotIndex index) const {
      return index.isValid() ? index.listEntry()->getInstr() : nullptr;
    }

    /// Returns the next non-null index, if one exists.
    /// Otherwise returns getLastIndex().
    SlotIndex getNextNonNullIndex(SlotIndex Index) {
      IndexList::iterator I = Index.listEntry()->getIterator();
      IndexList::iterator E = indexList.end();
      while (++I != E)
        if (I->getInstr())
          return SlotIndex(&*I, Index.getSlot());
      // We reached the end of the function.
      return getLastIndex();
    }

    /// getIndexBefore - Returns the index of the last indexed instruction
    /// before MI, or the start index of its basic block.
    /// MI is not required to have an index.
    SlotIndex getIndexBefore(const MachineInstr &MI) const {
      const MachineBasicBlock *MBB = MI.getParent();
      assert(MBB && "MI must be inserted in a basic block");
      MachineBasicBlock::const_iterator I = MI, B = MBB->begin();
      while (true) {
        if (I == B)
          return getMBBStartIdx(MBB);
        --I;
        Mi2IndexMap::const_iterator MapItr = mi2iMap.find(&*I);
        if (MapItr != mi2iMap.end())
          return MapItr->second;
      }
    }

    /// getIndexAfter - Returns the index of the first indexed instruction
    /// after MI, or the end index of its basic block.
    /// MI is not required to have an index.
    SlotIndex getIndexAfter(const MachineInstr &MI) const {
      const MachineBasicBlock *MBB = MI.getParent();
      assert(MBB && "MI must be inserted in a basic block");
      MachineBasicBlock::const_iterator I = MI, E = MBB->end();
      while (true) {
        ++I;
        if (I == E)
          return getMBBEndIdx(MBB);
        Mi2IndexMap::const_iterator MapItr = mi2iMap.find(&*I);
        if (MapItr != mi2iMap.end())
          return MapItr->second;
      }
    }

    /// Return the (start,end) range of the given basic block number.
    const std::pair<SlotIndex, SlotIndex> &
    getMBBRange(unsigned Num) const {
      return MBBRanges[Num];
    }

    /// Return the (start,end) range of the given basic block.
    const std::pair<SlotIndex, SlotIndex> &
    getMBBRange(const MachineBasicBlock *MBB) const {
      return getMBBRange(MBB->getNumber());
    }

    /// Returns the first index in the given basic block number.
    SlotIndex getMBBStartIdx(unsigned Num) const {
      return getMBBRange(Num).first;
    }

    /// Returns the first index in the given basic block.
    SlotIndex getMBBStartIdx(const MachineBasicBlock *mbb) const {
      return getMBBRange(mbb).first;
    }

    /// Returns the last index in the given basic block number.
    SlotIndex getMBBEndIdx(unsigned Num) const {
      return getMBBRange(Num).second;
    }

    /// Returns the last index in the given basic block.
    SlotIndex getMBBEndIdx(const MachineBasicBlock *mbb) const {
      return getMBBRange(mbb).second;
    }

    /// Iterator over the idx2MBBMap (sorted pairs of slot index of basic block
    /// begin and basic block)
    using MBBIndexIterator = SmallVectorImpl<IdxMBBPair>::const_iterator;

    /// Move iterator to the next IdxMBBPair where the SlotIndex is greater or
    /// equal to \p To.
    MBBIndexIterator advanceMBBIndex(MBBIndexIterator I, SlotIndex To) const {
      return std::partition_point(
          I, idx2MBBMap.end(),
          [=](const IdxMBBPair &IM) { return IM.first < To; });
    }

    /// Get an iterator pointing to the IdxMBBPair with the biggest SlotIndex
    /// that is greater or equal to \p Idx.
    MBBIndexIterator findMBBIndex(SlotIndex Idx) const {
      return advanceMBBIndex(idx2MBBMap.begin(), Idx);
    }

    /// Returns an iterator for the begin of the idx2MBBMap.
    MBBIndexIterator MBBIndexBegin() const {
      return idx2MBBMap.begin();
    }

    /// Return an iterator for the end of the idx2MBBMap.
    MBBIndexIterator MBBIndexEnd() const {
      return idx2MBBMap.end();
    }

    /// Returns the basic block which the given index falls in.
    MachineBasicBlock* getMBBFromIndex(SlotIndex index) const {
      if (MachineInstr *MI = getInstructionFromIndex(index))
        return MI->getParent();

      MBBIndexIterator I = findMBBIndex(index);
      // Take the pair containing the index
      MBBIndexIterator J =
        ((I != MBBIndexEnd() && I->first > index) ||
         (I == MBBIndexEnd() && !idx2MBBMap.empty())) ? std::prev(I) : I;

      assert(J != MBBIndexEnd() && J->first <= index &&
             index < getMBBEndIdx(J->second) &&
             "index does not correspond to an MBB");
      return J->second;
    }

    /// Insert the given machine instruction into the mapping. Returns the
    /// assigned index.
    /// If Late is set and there are null indexes between mi's neighboring
    /// instructions, create the new index after the null indexes instead of
    /// before them.
    SlotIndex insertMachineInstrInMaps(MachineInstr &MI, bool Late = false) {
      assert(!MI.isInsideBundle() &&
             "Instructions inside bundles should use bundle start's slot.");
      assert(!mi2iMap.contains(&MI) && "Instr already indexed.");
      // Numbering debug instructions could cause code generation to be
      // affected by debug information.
      assert(!MI.isDebugInstr() && "Cannot number debug instructions.");

      assert(MI.getParent() != nullptr && "Instr must be added to function.");

      // Get the entries where MI should be inserted.
      IndexList::iterator prevItr, nextItr;
      if (Late) {
        // Insert MI's index immediately before the following instruction.
        nextItr = getIndexAfter(MI).listEntry()->getIterator();
        prevItr = std::prev(nextItr);
      } else {
        // Insert MI's index immediately after the preceding instruction.
        prevItr = getIndexBefore(MI).listEntry()->getIterator();
        nextItr = std::next(prevItr);
      }

      // Get a number for the new instr, or 0 if there's no room currently.
      // In the latter case we'll force a renumber later.
      unsigned dist = ((nextItr->getIndex() - prevItr->getIndex())/2) & ~3u;
      unsigned newNumber = prevItr->getIndex() + dist;

      // Insert a new list entry for MI.
      IndexList::iterator newItr =
          indexList.insert(nextItr, createEntry(&MI, newNumber));

      // Renumber locally if we need to.
      if (dist == 0)
        renumberIndexes(newItr);

      SlotIndex newIndex(&*newItr, SlotIndex::Slot_Block);
      mi2iMap.insert(std::make_pair(&MI, newIndex));
      return newIndex;
    }

    /// Removes machine instruction (bundle) \p MI from the mapping.
    /// This should be called before MachineInstr::eraseFromParent() is used to
    /// remove a whole bundle or an unbundled instruction.
    /// If \p AllowBundled is set then this can be used on a bundled
    /// instruction; however, this exists to support handleMoveIntoBundle,
    /// and in general removeSingleMachineInstrFromMaps should be used instead.
    void removeMachineInstrFromMaps(MachineInstr &MI,
                                    bool AllowBundled = false);

    /// Removes a single machine instruction \p MI from the mapping.
    /// This should be called before MachineInstr::eraseFromBundle() is used to
    /// remove a single instruction (out of a bundle).
    void removeSingleMachineInstrFromMaps(MachineInstr &MI);

    /// ReplaceMachineInstrInMaps - Replacing a machine instr with a new one in
    /// maps used by register allocator. \returns the index where the new
    /// instruction was inserted.
    SlotIndex replaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI) {
      Mi2IndexMap::iterator mi2iItr = mi2iMap.find(&MI);
      if (mi2iItr == mi2iMap.end())
        return SlotIndex();
      SlotIndex replaceBaseIndex = mi2iItr->second;
      IndexListEntry *miEntry(replaceBaseIndex.listEntry());
      assert(miEntry->getInstr() == &MI &&
             "Mismatched instruction in index tables.");
      miEntry->setInstr(&NewMI);
      mi2iMap.erase(mi2iItr);
      mi2iMap.insert(std::make_pair(&NewMI, replaceBaseIndex));
      return replaceBaseIndex;
    }

    /// Add the given MachineBasicBlock into the maps.
    /// If it contains any instructions then they must already be in the maps.
    /// This is used after a block has been split by moving some suffix of its
    /// instructions into a newly created block.
    void insertMBBInMaps(MachineBasicBlock *mbb) {
      assert(mbb != &mbb->getParent()->front() &&
             "Can't insert a new block at the beginning of a function.");
      auto prevMBB = std::prev(MachineFunction::iterator(mbb));

      // Create a new entry to be used for the start of mbb and the end of
      // prevMBB.
      IndexListEntry *startEntry = createEntry(nullptr, 0);
      IndexListEntry *endEntry = getMBBEndIdx(&*prevMBB).listEntry();
      IndexListEntry *insEntry =
          mbb->empty() ? endEntry
                       : getInstructionIndex(mbb->front()).listEntry();
      IndexList::iterator newItr =
          indexList.insert(insEntry->getIterator(), startEntry);

      SlotIndex startIdx(startEntry, SlotIndex::Slot_Block);
      SlotIndex endIdx(endEntry, SlotIndex::Slot_Block);

      MBBRanges[prevMBB->getNumber()].second = startIdx;

      assert(unsigned(mbb->getNumber()) == MBBRanges.size() &&
             "Blocks must be added in order");
      MBBRanges.push_back(std::make_pair(startIdx, endIdx));
      idx2MBBMap.push_back(IdxMBBPair(startIdx, mbb));

      renumberIndexes(newItr);
      llvm::sort(idx2MBBMap, less_first());
    }
  };

  // Specialize IntervalMapInfo for half-open slot index intervals.
  template <>
  struct IntervalMapInfo<SlotIndex> : IntervalMapHalfOpenInfo<SlotIndex> {
  };

} // end namespace llvm

#endif // LLVM_CODEGEN_SLOTINDEXES_H
PKiwFZ��
U
UCodeGen/FastISel.hnu�[���//===- FastISel.h - Definition of the FastISel class ------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file defines the FastISel class.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_FASTISEL_H
#define LLVM_CODEGEN_FASTISEL_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineValueType.h"
#include "llvm/CodeGen/TargetLowering.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/InstrTypes.h"
#include <cstdint>
#include <utility>

namespace llvm {

class AllocaInst;
class Instruction;
class IntrinsicInst;
class BasicBlock;
class CallInst;
class Constant;
class ConstantFP;
class DataLayout;
class FunctionLoweringInfo;
class LoadInst;
class MachineConstantPool;
class MachineFrameInfo;
class MachineFunction;
class MachineInstr;
class MachineMemOperand;
class MachineOperand;
class MachineRegisterInfo;
class MCContext;
class MCInstrDesc;
class MCSymbol;
class TargetInstrInfo;
class TargetLibraryInfo;
class TargetMachine;
class TargetRegisterClass;
class TargetRegisterInfo;
class Type;
class User;
class Value;

/// This is a fast-path instruction selection class that generates poor
/// code and doesn't support illegal types or non-trivial lowering, but runs
/// quickly.
class FastISel {
public:
  using ArgListEntry = TargetLoweringBase::ArgListEntry;
  using ArgListTy = TargetLoweringBase::ArgListTy;
  struct CallLoweringInfo {
    Type *RetTy = nullptr;
    bool RetSExt : 1;
    bool RetZExt : 1;
    bool IsVarArg : 1;
    bool IsInReg : 1;
    bool DoesNotReturn : 1;
    bool IsReturnValueUsed : 1;
    bool IsPatchPoint : 1;

    // IsTailCall Should be modified by implementations of FastLowerCall
    // that perform tail call conversions.
    bool IsTailCall = false;

    unsigned NumFixedArgs = -1;
    CallingConv::ID CallConv = CallingConv::C;
    const Value *Callee = nullptr;
    MCSymbol *Symbol = nullptr;
    ArgListTy Args;
    const CallBase *CB = nullptr;
    MachineInstr *Call = nullptr;
    Register ResultReg;
    unsigned NumResultRegs = 0;

    SmallVector<Value *, 16> OutVals;
    SmallVector<ISD::ArgFlagsTy, 16> OutFlags;
    SmallVector<Register, 16> OutRegs;
    SmallVector<ISD::InputArg, 4> Ins;
    SmallVector<Register, 4> InRegs;

    CallLoweringInfo()
        : RetSExt(false), RetZExt(false), IsVarArg(false), IsInReg(false),
          DoesNotReturn(false), IsReturnValueUsed(true), IsPatchPoint(false) {}

    CallLoweringInfo &setCallee(Type *ResultTy, FunctionType *FuncTy,
                                const Value *Target, ArgListTy &&ArgsList,
                                const CallBase &Call) {
      RetTy = ResultTy;
      Callee = Target;

      IsInReg = Call.hasRetAttr(Attribute::InReg);
      DoesNotReturn = Call.doesNotReturn();
      IsVarArg = FuncTy->isVarArg();
      IsReturnValueUsed = !Call.use_empty();
      RetSExt = Call.hasRetAttr(Attribute::SExt);
      RetZExt = Call.hasRetAttr(Attribute::ZExt);

      CallConv = Call.getCallingConv();
      Args = std::move(ArgsList);
      NumFixedArgs = FuncTy->getNumParams();

      CB = &Call;

      return *this;
    }

    CallLoweringInfo &setCallee(Type *ResultTy, FunctionType *FuncTy,
                                MCSymbol *Target, ArgListTy &&ArgsList,
                                const CallBase &Call,
                                unsigned FixedArgs = ~0U) {
      RetTy = ResultTy;
      Callee = Call.getCalledOperand();
      Symbol = Target;

      IsInReg = Call.hasRetAttr(Attribute::InReg);
      DoesNotReturn = Call.doesNotReturn();
      IsVarArg = FuncTy->isVarArg();
      IsReturnValueUsed = !Call.use_empty();
      RetSExt = Call.hasRetAttr(Attribute::SExt);
      RetZExt = Call.hasRetAttr(Attribute::ZExt);

      CallConv = Call.getCallingConv();
      Args = std::move(ArgsList);
      NumFixedArgs = (FixedArgs == ~0U) ? FuncTy->getNumParams() : FixedArgs;

      CB = &Call;

      return *this;
    }

    CallLoweringInfo &setCallee(CallingConv::ID CC, Type *ResultTy,
                                const Value *Target, ArgListTy &&ArgsList,
                                unsigned FixedArgs = ~0U) {
      RetTy = ResultTy;
      Callee = Target;
      CallConv = CC;
      Args = std::move(ArgsList);
      NumFixedArgs = (FixedArgs == ~0U) ? Args.size() : FixedArgs;
      return *this;
    }

    CallLoweringInfo &setCallee(const DataLayout &DL, MCContext &Ctx,
                                CallingConv::ID CC, Type *ResultTy,
                                StringRef Target, ArgListTy &&ArgsList,
                                unsigned FixedArgs = ~0U);

    CallLoweringInfo &setCallee(CallingConv::ID CC, Type *ResultTy,
                                MCSymbol *Target, ArgListTy &&ArgsList,
                                unsigned FixedArgs = ~0U) {
      RetTy = ResultTy;
      Symbol = Target;
      CallConv = CC;
      Args = std::move(ArgsList);
      NumFixedArgs = (FixedArgs == ~0U) ? Args.size() : FixedArgs;
      return *this;
    }

    CallLoweringInfo &setTailCall(bool Value = true) {
      IsTailCall = Value;
      return *this;
    }

    CallLoweringInfo &setIsPatchPoint(bool Value = true) {
      IsPatchPoint = Value;
      return *this;
    }

    ArgListTy &getArgs() { return Args; }

    void clearOuts() {
      OutVals.clear();
      OutFlags.clear();
      OutRegs.clear();
    }

    void clearIns() {
      Ins.clear();
      InRegs.clear();
    }
  };

protected:
  DenseMap<const Value *, Register> LocalValueMap;
  FunctionLoweringInfo &FuncInfo;
  MachineFunction *MF;
  MachineRegisterInfo &MRI;
  MachineFrameInfo &MFI;
  MachineConstantPool &MCP;
  MIMetadata MIMD;
  const TargetMachine &TM;
  const DataLayout &DL;
  const TargetInstrInfo &TII;
  const TargetLowering &TLI;
  const TargetRegisterInfo &TRI;
  const TargetLibraryInfo *LibInfo;
  bool SkipTargetIndependentISel;

  /// The position of the last instruction for materializing constants
  /// for use in the current block. It resets to EmitStartPt when it makes sense
  /// (for example, it's usually profitable to avoid function calls between the
  /// definition and the use)
  MachineInstr *LastLocalValue = nullptr;

  /// The top most instruction in the current block that is allowed for
  /// emitting local variables. LastLocalValue resets to EmitStartPt when it
  /// makes sense (for example, on function calls)
  MachineInstr *EmitStartPt = nullptr;

public:
  virtual ~FastISel();

  /// Return the position of the last instruction emitted for
  /// materializing constants for use in the current block.
  MachineInstr *getLastLocalValue() { return LastLocalValue; }

  /// Update the position of the last instruction emitted for
  /// materializing constants for use in the current block.
  void setLastLocalValue(MachineInstr *I) {
    EmitStartPt = I;
    LastLocalValue = I;
  }

  /// Set the current block to which generated machine instructions will
  /// be appended.
  void startNewBlock();

  /// Flush the local value map.
  void finishBasicBlock();

  /// Return current debug location information.
  DebugLoc getCurDebugLoc() const { return MIMD.getDL(); }

  /// Do "fast" instruction selection for function arguments and append
  /// the machine instructions to the current block. Returns true when
  /// successful.
  bool lowerArguments();

  /// Do "fast" instruction selection for the given LLVM IR instruction
  /// and append the generated machine instructions to the current block.
  /// Returns true if selection was successful.
  bool selectInstruction(const Instruction *I);

  /// Do "fast" instruction selection for the given LLVM IR operator
  /// (Instruction or ConstantExpr), and append generated machine instructions
  /// to the current block. Return true if selection was successful.
  bool selectOperator(const User *I, unsigned Opcode);

  /// Create a virtual register and arrange for it to be assigned the
  /// value for the given LLVM value.
  Register getRegForValue(const Value *V);

  /// Look up the value to see if its value is already cached in a
  /// register. It may be defined by instructions across blocks or defined
  /// locally.
  Register lookUpRegForValue(const Value *V);

  /// This is a wrapper around getRegForValue that also takes care of
  /// truncating or sign-extending the given getelementptr index value.
  Register getRegForGEPIndex(const Value *Idx);

  /// We're checking to see if we can fold \p LI into \p FoldInst. Note
  /// that we could have a sequence where multiple LLVM IR instructions are
  /// folded into the same machineinstr.  For example we could have:
  ///
  ///   A: x = load i32 *P
  ///   B: y = icmp A, 42
  ///   C: br y, ...
  ///
  /// In this scenario, \p LI is "A", and \p FoldInst is "C".  We know about "B"
  /// (and any other folded instructions) because it is between A and C.
  ///
  /// If we succeed folding, return true.
  bool tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst);

  /// The specified machine instr operand is a vreg, and that vreg is
  /// being provided by the specified load instruction.  If possible, try to
  /// fold the load as an operand to the instruction, returning true if
  /// possible.
  ///
  /// This method should be implemented by targets.
  virtual bool tryToFoldLoadIntoMI(MachineInstr * /*MI*/, unsigned /*OpNo*/,
                                   const LoadInst * /*LI*/) {
    return false;
  }

  /// Reset InsertPt to prepare for inserting instructions into the
  /// current block.
  void recomputeInsertPt();

  /// Remove all dead instructions between the I and E.
  void removeDeadCode(MachineBasicBlock::iterator I,
                      MachineBasicBlock::iterator E);

  using SavePoint = MachineBasicBlock::iterator;

  /// Prepare InsertPt to begin inserting instructions into the local
  /// value area and return the old insert position.
  SavePoint enterLocalValueArea();

  /// Reset InsertPt to the given old insert position.
  void leaveLocalValueArea(SavePoint Old);

protected:
  explicit FastISel(FunctionLoweringInfo &FuncInfo,
                    const TargetLibraryInfo *LibInfo,
                    bool SkipTargetIndependentISel = false);

  /// This method is called by target-independent code when the normal
  /// FastISel process fails to select an instruction. This gives targets a
  /// chance to emit code for anything that doesn't fit into FastISel's
  /// framework. It returns true if it was successful.
  virtual bool fastSelectInstruction(const Instruction *I) = 0;

  /// This method is called by target-independent code to do target-
  /// specific argument lowering. It returns true if it was successful.
  virtual bool fastLowerArguments();

  /// This method is called by target-independent code to do target-
  /// specific call lowering. It returns true if it was successful.
  virtual bool fastLowerCall(CallLoweringInfo &CLI);

  /// This method is called by target-independent code to do target-
  /// specific intrinsic lowering. It returns true if it was successful.
  virtual bool fastLowerIntrinsicCall(const IntrinsicInst *II);

  /// This method is called by target-independent code to request that an
  /// instruction with the given type and opcode be emitted.
  virtual unsigned fastEmit_(MVT VT, MVT RetVT, unsigned Opcode);

  /// This method is called by target-independent code to request that an
  /// instruction with the given type, opcode, and register operand be emitted.
  virtual unsigned fastEmit_r(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0);

  /// This method is called by target-independent code to request that an
  /// instruction with the given type, opcode, and register operands be emitted.
  virtual unsigned fastEmit_rr(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0,
                               unsigned Op1);

  /// This method is called by target-independent code to request that an
  /// instruction with the given type, opcode, and register and immediate
  /// operands be emitted.
  virtual unsigned fastEmit_ri(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0,
                               uint64_t Imm);

  /// This method is a wrapper of fastEmit_ri.
  ///
  /// It first tries to emit an instruction with an immediate operand using
  /// fastEmit_ri.  If that fails, it materializes the immediate into a register
  /// and try fastEmit_rr instead.
  Register fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0, uint64_t Imm,
                        MVT ImmType);

  /// This method is called by target-independent code to request that an
  /// instruction with the given type, opcode, and immediate operand be emitted.
  virtual unsigned fastEmit_i(MVT VT, MVT RetVT, unsigned Opcode, uint64_t Imm);

  /// This method is called by target-independent code to request that an
  /// instruction with the given type, opcode, and floating-point immediate
  /// operand be emitted.
  virtual unsigned fastEmit_f(MVT VT, MVT RetVT, unsigned Opcode,
                              const ConstantFP *FPImm);

  /// Emit a MachineInstr with no operands and a result register in the
  /// given register class.
  Register fastEmitInst_(unsigned MachineInstOpcode,
                         const TargetRegisterClass *RC);

  /// Emit a MachineInstr with one register operand and a result register
  /// in the given register class.
  Register fastEmitInst_r(unsigned MachineInstOpcode,
                          const TargetRegisterClass *RC, unsigned Op0);

  /// Emit a MachineInstr with two register operands and a result
  /// register in the given register class.
  Register fastEmitInst_rr(unsigned MachineInstOpcode,
                           const TargetRegisterClass *RC, unsigned Op0,
                           unsigned Op1);

  /// Emit a MachineInstr with three register operands and a result
  /// register in the given register class.
  Register fastEmitInst_rrr(unsigned MachineInstOpcode,
                            const TargetRegisterClass *RC, unsigned Op0,
                            unsigned Op1, unsigned Op2);

  /// Emit a MachineInstr with a register operand, an immediate, and a
  /// result register in the given register class.
  Register fastEmitInst_ri(unsigned MachineInstOpcode,
                           const TargetRegisterClass *RC, unsigned Op0,
                           uint64_t Imm);

  /// Emit a MachineInstr with one register operand and two immediate
  /// operands.
  Register fastEmitInst_rii(unsigned MachineInstOpcode,
                            const TargetRegisterClass *RC, unsigned Op0,
                            uint64_t Imm1, uint64_t Imm2);

  /// Emit a MachineInstr with a floating point immediate, and a result
  /// register in the given register class.
  Register fastEmitInst_f(unsigned MachineInstOpcode,
                          const TargetRegisterClass *RC,
                          const ConstantFP *FPImm);

  /// Emit a MachineInstr with two register operands, an immediate, and a
  /// result register in the given register class.
  Register fastEmitInst_rri(unsigned MachineInstOpcode,
                            const TargetRegisterClass *RC, unsigned Op0,
                            unsigned Op1, uint64_t Imm);

  /// Emit a MachineInstr with a single immediate operand, and a result
  /// register in the given register class.
  Register fastEmitInst_i(unsigned MachineInstOpcode,
                          const TargetRegisterClass *RC, uint64_t Imm);

  /// Emit a MachineInstr for an extract_subreg from a specified index of
  /// a superregister to a specified type.
  Register fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0, uint32_t Idx);

  /// Emit MachineInstrs to compute the value of Op with all but the
  /// least significant bit set to zero.
  Register fastEmitZExtFromI1(MVT VT, unsigned Op0);

  /// Emit an unconditional branch to the given block, unless it is the
  /// immediate (fall-through) successor, and update the CFG.
  void fastEmitBranch(MachineBasicBlock *MSucc, const DebugLoc &DbgLoc);

  /// Emit an unconditional branch to \p FalseMBB, obtains the branch weight
  /// and adds TrueMBB and FalseMBB to the successor list.
  void finishCondBranch(const BasicBlock *BranchBB, MachineBasicBlock *TrueMBB,
                        MachineBasicBlock *FalseMBB);

  /// Update the value map to include the new mapping for this
  /// instruction, or insert an extra copy to get the result in a previous
  /// determined register.
  ///
  /// NOTE: This is only necessary because we might select a block that uses a
  /// value before we select the block that defines the value. It might be
  /// possible to fix this by selecting blocks in reverse postorder.
  void updateValueMap(const Value *I, Register Reg, unsigned NumRegs = 1);

  Register createResultReg(const TargetRegisterClass *RC);

  /// Try to constrain Op so that it is usable by argument OpNum of the
  /// provided MCInstrDesc. If this fails, create a new virtual register in the
  /// correct class and COPY the value there.
  Register constrainOperandRegClass(const MCInstrDesc &II, Register Op,
                                    unsigned OpNum);

  /// Emit a constant in a register using target-specific logic, such as
  /// constant pool loads.
  virtual unsigned fastMaterializeConstant(const Constant *C) { return 0; }

  /// Emit an alloca address in a register using target-specific logic.
  virtual unsigned fastMaterializeAlloca(const AllocaInst *C) { return 0; }

  /// Emit the floating-point constant +0.0 in a register using target-
  /// specific logic.
  virtual unsigned fastMaterializeFloatZero(const ConstantFP *CF) {
    return 0;
  }

  /// Check if \c Add is an add that can be safely folded into \c GEP.
  ///
  /// \c Add can be folded into \c GEP if:
  /// - \c Add is an add,
  /// - \c Add's size matches \c GEP's,
  /// - \c Add is in the same basic block as \c GEP, and
  /// - \c Add has a constant operand.
  bool canFoldAddIntoGEP(const User *GEP, const Value *Add);

  /// Create a machine mem operand from the given instruction.
  MachineMemOperand *createMachineMemOperandFor(const Instruction *I) const;

  CmpInst::Predicate optimizeCmpPredicate(const CmpInst *CI) const;

  bool lowerCallTo(const CallInst *CI, MCSymbol *Symbol, unsigned NumArgs);
  bool lowerCallTo(const CallInst *CI, const char *SymName,
                   unsigned NumArgs);
  bool lowerCallTo(CallLoweringInfo &CLI);

  bool lowerCall(const CallInst *I);
  /// Select and emit code for a binary operator instruction, which has
  /// an opcode which directly corresponds to the given ISD opcode.
  bool selectBinaryOp(const User *I, unsigned ISDOpcode);
  bool selectFNeg(const User *I, const Value *In);
  bool selectGetElementPtr(const User *I);
  bool selectStackmap(const CallInst *I);
  bool selectPatchpoint(const CallInst *I);
  bool selectCall(const User *I);
  bool selectIntrinsicCall(const IntrinsicInst *II);
  bool selectBitCast(const User *I);
  bool selectFreeze(const User *I);
  bool selectCast(const User *I, unsigned Opcode);
  bool selectExtractValue(const User *U);
  bool selectXRayCustomEvent(const CallInst *II);
  bool selectXRayTypedEvent(const CallInst *II);

  bool shouldOptForSize(const MachineFunction *MF) const {
    // TODO: Implement PGSO.
    return MF->getFunction().hasOptSize();
  }

private:
  /// Handle PHI nodes in successor blocks.
  ///
  /// Emit code to ensure constants are copied into registers when needed.
  /// Remember the virtual registers that need to be added to the Machine PHI
  /// nodes as input.  We cannot just directly add them, because expansion might
  /// result in multiple MBB's for one BB.  As such, the start of the BB might
  /// correspond to a different MBB than the end.
  bool handlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB);

  /// Helper for materializeRegForValue to materialize a constant in a
  /// target-independent way.
  Register materializeConstant(const Value *V, MVT VT);

  /// Helper for getRegForVale. This function is called when the value
  /// isn't already available in a register and must be materialized with new
  /// instructions.
  Register materializeRegForValue(const Value *V, MVT VT);

  /// Clears LocalValueMap and moves the area for the new local variables
  /// to the beginning of the block. It helps to avoid spilling cached variables
  /// across heavy instructions like calls.
  void flushLocalValueMap();

  /// Removes dead local value instructions after SavedLastLocalvalue.
  void removeDeadLocalValueCode(MachineInstr *SavedLastLocalValue);

  /// Insertion point before trying to select the current instruction.
  MachineBasicBlock::iterator SavedInsertPt;

  /// Add a stackmap or patchpoint intrinsic call's live variable
  /// operands to a stackmap or patchpoint machine instruction.
  bool addStackMapLiveVars(SmallVectorImpl<MachineOperand> &Ops,
                           const CallInst *CI, unsigned StartIdx);
  bool lowerCallOperands(const CallInst *CI, unsigned ArgIdx, unsigned NumArgs,
                         const Value *Callee, bool ForceRetVoidTy,
                         CallLoweringInfo &CLI);
};

} // end namespace llvm

#endif // LLVM_CODEGEN_FASTISEL_H
PKiwFZ�~��		 CodeGen/BasicBlockSectionUtils.hnu�[���//===- BasicBlockSectionUtils.h - Utilities for basic block sections     --===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_BASICBLOCKSECTIONUTILS_H
#define LLVM_CODEGEN_BASICBLOCKSECTIONUTILS_H

#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/CommandLine.h"

namespace llvm {

extern cl::opt<std::string> BBSectionsColdTextPrefix;

class MachineFunction;
class MachineBasicBlock;

using MachineBasicBlockComparator =
    function_ref<bool(const MachineBasicBlock &, const MachineBasicBlock &)>;

void sortBasicBlocksAndUpdateBranches(MachineFunction &MF,
                                      MachineBasicBlockComparator MBBCmp);

void avoidZeroOffsetLandingPad(MachineFunction &MF);

} // end namespace llvm

#endif // LLVM_CODEGEN_BASICBLOCKSECTIONUTILS_H
PKiwFZ��<�99CodeGen/MultiHazardRecognizer.hnu�[���//=- llvm/CodeGen/MultiHazardRecognizer.h - Scheduling Support ----*- C++ -*-=//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements the MultiHazardRecognizer class, which is a wrapper
// for a set of ScheduleHazardRecognizer instances
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_MULTIHAZARDRECOGNIZER_H
#define LLVM_CODEGEN_MULTIHAZARDRECOGNIZER_H

#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/ScheduleHazardRecognizer.h"

namespace llvm {

class MachineInstr;
class SUnit;

class MultiHazardRecognizer : public ScheduleHazardRecognizer {
  SmallVector<std::unique_ptr<ScheduleHazardRecognizer>, 4> Recognizers;

public:
  MultiHazardRecognizer() = default;
  void AddHazardRecognizer(std::unique_ptr<ScheduleHazardRecognizer> &&);

  bool atIssueLimit() const override;
  HazardType getHazardType(SUnit *, int Stalls = 0) override;
  void Reset() override;
  void EmitInstruction(SUnit *) override;
  void EmitInstruction(MachineInstr *) override;
  unsigned PreEmitNoops(SUnit *) override;
  unsigned PreEmitNoops(MachineInstr *) override;
  bool ShouldPreferAnother(SUnit *) override;
  void AdvanceCycle() override;
  void RecedeCycle() override;
  void EmitNoop() override;
};

} // end namespace llvm

#endif // LLVM_CODEGEN_MULTIHAZARDRECOGNIZER_H
PKiwFZo�`�		"CodeGen/LinkAllCodegenComponents.hnu�[���//===- llvm/Codegen/LinkAllCodegenComponents.h ------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This header file pulls in all codegen related passes for tools like lli and
// llc that need this functionality.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_LINKALLCODEGENCOMPONENTS_H
#define LLVM_CODEGEN_LINKALLCODEGENCOMPONENTS_H

#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/SchedulerRegistry.h"
#include "llvm/Target/TargetMachine.h"
#include <cstdlib>

namespace {
  struct ForceCodegenLinking {
    ForceCodegenLinking() {
      // We must reference the passes in such a way that compilers will not
      // delete it all as dead code, even with whole program optimization,
      // yet is effectively a NO-OP. As the compiler isn't smart enough
      // to know that getenv() never returns -1, this will do the job.
      // This is so that globals in the translation units where these functions
      // are defined are forced to be initialized, populating various
      // registries.
      if (std::getenv("bar") != (char*) -1)
        return;

      (void) llvm::createFastRegisterAllocator();
      (void) llvm::createBasicRegisterAllocator();
      (void) llvm::createGreedyRegisterAllocator();
      (void) llvm::createDefaultPBQPRegisterAllocator();

      (void) llvm::createBURRListDAGScheduler(nullptr,
                                              llvm::CodeGenOpt::Default);
      (void) llvm::createSourceListDAGScheduler(nullptr,
                                                llvm::CodeGenOpt::Default);
      (void) llvm::createHybridListDAGScheduler(nullptr,
                                                llvm::CodeGenOpt::Default);
      (void) llvm::createFastDAGScheduler(nullptr, llvm::CodeGenOpt::Default);
      (void) llvm::createDefaultScheduler(nullptr, llvm::CodeGenOpt::Default);
      (void) llvm::createVLIWDAGScheduler(nullptr, llvm::CodeGenOpt::Default);

    }
  } ForceCodegenLinking; // Force link by creating a global definition.
}

#endif
PKiwFZK�s%%"CodeGen/PreISelIntrinsicLowering.hnu�[���//===- PreISelIntrinsicLowering.h - Pre-ISel intrinsic lowering pass ------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This pass implements IR lowering for the llvm.load.relative and llvm.objc.*
// intrinsics.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_PREISELINTRINSICLOWERING_H
#define LLVM_CODEGEN_PREISELINTRINSICLOWERING_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class Module;
class TargetMachine;

struct PreISelIntrinsicLoweringPass
    : PassInfoMixin<PreISelIntrinsicLoweringPass> {
  const TargetMachine &TM;

  PreISelIntrinsicLoweringPass(const TargetMachine &TM) : TM(TM) {}
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
};

} // end namespace llvm

#endif // LLVM_CODEGEN_PREISELINTRINSICLOWERING_H
PKiwFZ?��?�?�CodeGen/TargetLowering.hnu�[���//===- llvm/CodeGen/TargetLowering.h - Target Lowering Info -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file describes how to lower LLVM code to machine code.  This has two
/// main components:
///
///  1. Which ValueTypes are natively supported by the target.
///  2. Which operations are supported for supported ValueTypes.
///  3. Cost thresholds for alternative implementations of certain operations.
///
/// In addition it has a few other components, like information about FP
/// immediates.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_TARGETLOWERING_H
#define LLVM_CODEGEN_TARGETLOWERING_H

#include "llvm/ADT/APInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/CodeGen/ComplexDeinterleavingPass.h"
#include "llvm/CodeGen/DAGCombine.h"
#include "llvm/CodeGen/ISDOpcodes.h"
#include "llvm/CodeGen/LowLevelTypeUtils.h"
#include "llvm/CodeGen/MachineValueType.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RuntimeLibcalls.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
#include "llvm/CodeGen/TargetCallingConv.h"
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Type.h"
#include "llvm/Support/Alignment.h"
#include "llvm/Support/AtomicOrdering.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
#include <algorithm>
#include <cassert>
#include <climits>
#include <cstdint>
#include <iterator>
#include <map>
#include <string>
#include <utility>
#include <vector>

namespace llvm {

class AssumptionCache;
class CCState;
class CCValAssign;
class Constant;
class FastISel;
class FunctionLoweringInfo;
class GlobalValue;
class Loop;
class GISelKnownBits;
class IntrinsicInst;
class IRBuilderBase;
struct KnownBits;
class LLVMContext;
class MachineBasicBlock;
class MachineFunction;
class MachineInstr;
class MachineJumpTableInfo;
class MachineLoop;
class MachineRegisterInfo;
class MCContext;
class MCExpr;
class Module;
class ProfileSummaryInfo;
class TargetLibraryInfo;
class TargetMachine;
class TargetRegisterClass;
class TargetRegisterInfo;
class TargetTransformInfo;
class Value;

namespace Sched {

enum Preference {
  None,        // No preference
  Source,      // Follow source order.
  RegPressure, // Scheduling for lowest register pressure.
  Hybrid,      // Scheduling for both latency and register pressure.
  ILP,         // Scheduling for ILP in low register pressure mode.
  VLIW,        // Scheduling for VLIW targets.
  Fast,        // Fast suboptimal list scheduling
  Linearize    // Linearize DAG, no scheduling
};

} // end namespace Sched

// MemOp models a memory operation, either memset or memcpy/memmove.
struct MemOp {
private:
  // Shared
  uint64_t Size;
  bool DstAlignCanChange; // true if destination alignment can satisfy any
                          // constraint.
  Align DstAlign;         // Specified alignment of the memory operation.

  bool AllowOverlap;
  // memset only
  bool IsMemset;   // If setthis memory operation is a memset.
  bool ZeroMemset; // If set clears out memory with zeros.
  // memcpy only
  bool MemcpyStrSrc; // Indicates whether the memcpy source is an in-register
                     // constant so it does not need to be loaded.
  Align SrcAlign;    // Inferred alignment of the source or default value if the
                     // memory operation does not need to load the value.
public:
  static MemOp Copy(uint64_t Size, bool DstAlignCanChange, Align DstAlign,
                    Align SrcAlign, bool IsVolatile,
                    bool MemcpyStrSrc = false) {
    MemOp Op;
    Op.Size = Size;
    Op.DstAlignCanChange = DstAlignCanChange;
    Op.DstAlign = DstAlign;
    Op.AllowOverlap = !IsVolatile;
    Op.IsMemset = false;
    Op.ZeroMemset = false;
    Op.MemcpyStrSrc = MemcpyStrSrc;
    Op.SrcAlign = SrcAlign;
    return Op;
  }

  static MemOp Set(uint64_t Size, bool DstAlignCanChange, Align DstAlign,
                   bool IsZeroMemset, bool IsVolatile) {
    MemOp Op;
    Op.Size = Size;
    Op.DstAlignCanChange = DstAlignCanChange;
    Op.DstAlign = DstAlign;
    Op.AllowOverlap = !IsVolatile;
    Op.IsMemset = true;
    Op.ZeroMemset = IsZeroMemset;
    Op.MemcpyStrSrc = false;
    return Op;
  }

  uint64_t size() const { return Size; }
  Align getDstAlign() const {
    assert(!DstAlignCanChange);
    return DstAlign;
  }
  bool isFixedDstAlign() const { return !DstAlignCanChange; }
  bool allowOverlap() const { return AllowOverlap; }
  bool isMemset() const { return IsMemset; }
  bool isMemcpy() const { return !IsMemset; }
  bool isMemcpyWithFixedDstAlign() const {
    return isMemcpy() && !DstAlignCanChange;
  }
  bool isZeroMemset() const { return isMemset() && ZeroMemset; }
  bool isMemcpyStrSrc() const {
    assert(isMemcpy() && "Must be a memcpy");
    return MemcpyStrSrc;
  }
  Align getSrcAlign() const {
    assert(isMemcpy() && "Must be a memcpy");
    return SrcAlign;
  }
  bool isSrcAligned(Align AlignCheck) const {
    return isMemset() || llvm::isAligned(AlignCheck, SrcAlign.value());
  }
  bool isDstAligned(Align AlignCheck) const {
    return DstAlignCanChange || llvm::isAligned(AlignCheck, DstAlign.value());
  }
  bool isAligned(Align AlignCheck) const {
    return isSrcAligned(AlignCheck) && isDstAligned(AlignCheck);
  }
};

/// This base class for TargetLowering contains the SelectionDAG-independent
/// parts that can be used from the rest of CodeGen.
class TargetLoweringBase {
public:
  /// This enum indicates whether operations are valid for a target, and if not,
  /// what action should be used to make them valid.
  enum LegalizeAction : uint8_t {
    Legal,      // The target natively supports this operation.
    Promote,    // This operation should be executed in a larger type.
    Expand,     // Try to expand this to other ops, otherwise use a libcall.
    LibCall,    // Don't try to expand this to other ops, always use a libcall.
    Custom      // Use the LowerOperation hook to implement custom lowering.
  };

  /// This enum indicates whether a types are legal for a target, and if not,
  /// what action should be used to make them valid.
  enum LegalizeTypeAction : uint8_t {
    TypeLegal,           // The target natively supports this type.
    TypePromoteInteger,  // Replace this integer with a larger one.
    TypeExpandInteger,   // Split this integer into two of half the size.
    TypeSoftenFloat,     // Convert this float to a same size integer type.
    TypeExpandFloat,     // Split this float into two of half the size.
    TypeScalarizeVector, // Replace this one-element vector with its element.
    TypeSplitVector,     // Split this vector into two of half the size.
    TypeWidenVector,     // This vector should be widened into a larger vector.
    TypePromoteFloat,    // Replace this float with a larger one.
    TypeSoftPromoteHalf, // Soften half to i16 and use float to do arithmetic.
    TypeScalarizeScalableVector, // This action is explicitly left unimplemented.
                                 // While it is theoretically possible to
                                 // legalize operations on scalable types with a
                                 // loop that handles the vscale * #lanes of the
                                 // vector, this is non-trivial at SelectionDAG
                                 // level and these types are better to be
                                 // widened or promoted.
  };

  /// LegalizeKind holds the legalization kind that needs to happen to EVT
  /// in order to type-legalize it.
  using LegalizeKind = std::pair<LegalizeTypeAction, EVT>;

  /// Enum that describes how the target represents true/false values.
  enum BooleanContent {
    UndefinedBooleanContent,    // Only bit 0 counts, the rest can hold garbage.
    ZeroOrOneBooleanContent,        // All bits zero except for bit 0.
    ZeroOrNegativeOneBooleanContent // All bits equal to bit 0.
  };

  /// Enum that describes what type of support for selects the target has.
  enum SelectSupportKind {
    ScalarValSelect,      // The target supports scalar selects (ex: cmov).
    ScalarCondVectorVal,  // The target supports selects with a scalar condition
                          // and vector values (ex: cmov).
    VectorMaskSelect      // The target supports vector selects with a vector
                          // mask (ex: x86 blends).
  };

  /// Enum that specifies what an atomic load/AtomicRMWInst is expanded
  /// to, if at all. Exists because different targets have different levels of
  /// support for these atomic instructions, and also have different options
  /// w.r.t. what they should expand to.
  enum class AtomicExpansionKind {
    None,    // Don't expand the instruction.
    CastToInteger,    // Cast the atomic instruction to another type, e.g. from
                      // floating-point to integer type.
    LLSC,    // Expand the instruction into loadlinked/storeconditional; used
             // by ARM/AArch64.
    LLOnly,  // Expand the (load) instruction into just a load-linked, which has
             // greater atomic guarantees than a normal load.
    CmpXChg, // Expand the instruction into cmpxchg; used by at least X86.
    MaskedIntrinsic,  // Use a target-specific intrinsic for the LL/SC loop.
    BitTestIntrinsic, // Use a target-specific intrinsic for special bit
                      // operations; used by X86.
    CmpArithIntrinsic,// Use a target-specific intrinsic for special compare
                      // operations; used by X86.
    Expand,           // Generic expansion in terms of other atomic operations.

    // Rewrite to a non-atomic form for use in a known non-preemptible
    // environment.
    NotAtomic
  };

  /// Enum that specifies when a multiplication should be expanded.
  enum class MulExpansionKind {
    Always,            // Always expand the instruction.
    OnlyLegalOrCustom, // Only expand when the resulting instructions are legal
                       // or custom.
  };

  /// Enum that specifies when a float negation is beneficial.
  enum class NegatibleCost {
    Cheaper = 0,    // Negated expression is cheaper.
    Neutral = 1,    // Negated expression has the same cost.
    Expensive = 2   // Negated expression is more expensive.
  };

  /// Enum of different potentially desirable ways to fold (and/or (setcc ...),
  /// (setcc ...)).
  enum AndOrSETCCFoldKind : uint8_t {
    None = 0,   // No fold is preferable.
    AddAnd = 1, // Fold with `Add` op and `And` op is preferable.
    NotAnd = 2, // Fold with `Not` op and `And` op is preferable.
    ABS = 4,    // Fold with `llvm.abs` op is preferable.
  };

  class ArgListEntry {
  public:
    Value *Val = nullptr;
    SDValue Node = SDValue();
    Type *Ty = nullptr;
    bool IsSExt : 1;
    bool IsZExt : 1;
    bool IsInReg : 1;
    bool IsSRet : 1;
    bool IsNest : 1;
    bool IsByVal : 1;
    bool IsByRef : 1;
    bool IsInAlloca : 1;
    bool IsPreallocated : 1;
    bool IsReturned : 1;
    bool IsSwiftSelf : 1;
    bool IsSwiftAsync : 1;
    bool IsSwiftError : 1;
    bool IsCFGuardTarget : 1;
    MaybeAlign Alignment = std::nullopt;
    Type *IndirectType = nullptr;

    ArgListEntry()
        : IsSExt(false), IsZExt(false), IsInReg(false), IsSRet(false),
          IsNest(false), IsByVal(false), IsByRef(false), IsInAlloca(false),
          IsPreallocated(false), IsReturned(false), IsSwiftSelf(false),
          IsSwiftAsync(false), IsSwiftError(false), IsCFGuardTarget(false) {}

    void setAttributes(const CallBase *Call, unsigned ArgIdx);
  };
  using ArgListTy = std::vector<ArgListEntry>;

  virtual void markLibCallAttributes(MachineFunction *MF, unsigned CC,
                                     ArgListTy &Args) const {};

  static ISD::NodeType getExtendForContent(BooleanContent Content) {
    switch (Content) {
    case UndefinedBooleanContent:
      // Extend by adding rubbish bits.
      return ISD::ANY_EXTEND;
    case ZeroOrOneBooleanContent:
      // Extend by adding zero bits.
      return ISD::ZERO_EXTEND;
    case ZeroOrNegativeOneBooleanContent:
      // Extend by copying the sign bit.
      return ISD::SIGN_EXTEND;
    }
    llvm_unreachable("Invalid content kind");
  }

  explicit TargetLoweringBase(const TargetMachine &TM);
  TargetLoweringBase(const TargetLoweringBase &) = delete;
  TargetLoweringBase &operator=(const TargetLoweringBase &) = delete;
  virtual ~TargetLoweringBase() = default;

  /// Return true if the target support strict float operation
  bool isStrictFPEnabled() const {
    return IsStrictFPEnabled;
  }

protected:
  /// Initialize all of the actions to default values.
  void initActions();

public:
  const TargetMachine &getTargetMachine() const { return TM; }

  virtual bool useSoftFloat() const { return false; }

  /// Return the pointer type for the given address space, defaults to
  /// the pointer type from the data layout.
  /// FIXME: The default needs to be removed once all the code is updated.
  virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS = 0) const {
    return MVT::getIntegerVT(DL.getPointerSizeInBits(AS));
  }

  /// Return the in-memory pointer type for the given address space, defaults to
  /// the pointer type from the data layout.  FIXME: The default needs to be
  /// removed once all the code is updated.
  virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS = 0) const {
    return MVT::getIntegerVT(DL.getPointerSizeInBits(AS));
  }

  /// Return the type for frame index, which is determined by
  /// the alloca address space specified through the data layout.
  MVT getFrameIndexTy(const DataLayout &DL) const {
    return getPointerTy(DL, DL.getAllocaAddrSpace());
  }

  /// Return the type for code pointers, which is determined by the program
  /// address space specified through the data layout.
  MVT getProgramPointerTy(const DataLayout &DL) const {
    return getPointerTy(DL, DL.getProgramAddressSpace());
  }

  /// Return the type for operands of fence.
  /// TODO: Let fence operands be of i32 type and remove this.
  virtual MVT getFenceOperandTy(const DataLayout &DL) const {
    return getPointerTy(DL);
  }

  /// Return the type to use for a scalar shift opcode, given the shifted amount
  /// type. Targets should return a legal type if the input type is legal.
  /// Targets can return a type that is too small if the input type is illegal.
  virtual MVT getScalarShiftAmountTy(const DataLayout &, EVT) const;

  /// Returns the type for the shift amount of a shift opcode. For vectors,
  /// returns the input type. For scalars, behavior depends on \p LegalTypes. If
  /// \p LegalTypes is true, calls getScalarShiftAmountTy, otherwise uses
  /// pointer type. If getScalarShiftAmountTy or pointer type cannot represent
  /// all possible shift amounts, returns MVT::i32. In general, \p LegalTypes
  /// should be set to true for calls during type legalization and after type
  /// legalization has been completed.
  EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL,
                       bool LegalTypes = true) const;

  /// Return the preferred type to use for a shift opcode, given the shifted
  /// amount type is \p ShiftValueTy.
  LLVM_READONLY
  virtual LLT getPreferredShiftAmountTy(LLT ShiftValueTy) const {
    return ShiftValueTy;
  }

  /// Returns the type to be used for the index operand of:
  /// ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT,
  /// ISD::INSERT_SUBVECTOR, and ISD::EXTRACT_SUBVECTOR
  virtual MVT getVectorIdxTy(const DataLayout &DL) const {
    return getPointerTy(DL);
  }

  /// Returns the type to be used for the EVL/AVL operand of VP nodes:
  /// ISD::VP_ADD, ISD::VP_SUB, etc. It must be a legal scalar integer type,
  /// and must be at least as large as i32. The EVL is implicitly zero-extended
  /// to any larger type.
  virtual MVT getVPExplicitVectorLengthTy() const { return MVT::i32; }

  /// This callback is used to inspect load/store instructions and add
  /// target-specific MachineMemOperand flags to them.  The default
  /// implementation does nothing.
  virtual MachineMemOperand::Flags getTargetMMOFlags(const Instruction &I) const {
    return MachineMemOperand::MONone;
  }

  /// This callback is used to inspect load/store SDNode.
  /// The default implementation does nothing.
  virtual MachineMemOperand::Flags
  getTargetMMOFlags(const MemSDNode &Node) const {
    return MachineMemOperand::MONone;
  }

  MachineMemOperand::Flags
  getLoadMemOperandFlags(const LoadInst &LI, const DataLayout &DL,
                         AssumptionCache *AC = nullptr,
                         const TargetLibraryInfo *LibInfo = nullptr) const;
  MachineMemOperand::Flags getStoreMemOperandFlags(const StoreInst &SI,
                                                   const DataLayout &DL) const;
  MachineMemOperand::Flags getAtomicMemOperandFlags(const Instruction &AI,
                                                    const DataLayout &DL) const;

  virtual bool isSelectSupported(SelectSupportKind /*kind*/) const {
    return true;
  }

  /// Return true if the @llvm.get.active.lane.mask intrinsic should be expanded
  /// using generic code in SelectionDAGBuilder.
  virtual bool shouldExpandGetActiveLaneMask(EVT VT, EVT OpVT) const {
    return true;
  }

  virtual bool shouldExpandGetVectorLength(EVT CountVT, unsigned VF,
                                           bool IsScalable) const {
    return true;
  }

  // Return true if op(vecreduce(x), vecreduce(y)) should be reassociated to
  // vecreduce(op(x, y)) for the reduction opcode RedOpc.
  virtual bool shouldReassociateReduction(unsigned RedOpc, EVT VT) const {
    return true;
  }

  /// Return true if it is profitable to convert a select of FP constants into
  /// a constant pool load whose address depends on the select condition. The
  /// parameter may be used to differentiate a select with FP compare from
  /// integer compare.
  virtual bool reduceSelectOfFPConstantLoads(EVT CmpOpVT) const {
    return true;
  }

  /// Return true if multiple condition registers are available.
  bool hasMultipleConditionRegisters() const {
    return HasMultipleConditionRegisters;
  }

  /// Return true if the target has BitExtract instructions.
  bool hasExtractBitsInsn() const { return HasExtractBitsInsn; }

  /// Return the preferred vector type legalization action.
  virtual TargetLoweringBase::LegalizeTypeAction
  getPreferredVectorAction(MVT VT) const {
    // The default action for one element vectors is to scalarize
    if (VT.getVectorElementCount().isScalar())
      return TypeScalarizeVector;
    // The default action for an odd-width vector is to widen.
    if (!VT.isPow2VectorType())
      return TypeWidenVector;
    // The default action for other vectors is to promote
    return TypePromoteInteger;
  }

  // Return true if the half type should be passed around as i16, but promoted
  // to float around arithmetic. The default behavior is to pass around as
  // float and convert around loads/stores/bitcasts and other places where
  // the size matters.
  virtual bool softPromoteHalfType() const { return false; }

  // There are two general methods for expanding a BUILD_VECTOR node:
  //  1. Use SCALAR_TO_VECTOR on the defined scalar values and then shuffle
  //     them together.
  //  2. Build the vector on the stack and then load it.
  // If this function returns true, then method (1) will be used, subject to
  // the constraint that all of the necessary shuffles are legal (as determined
  // by isShuffleMaskLegal). If this function returns false, then method (2) is
  // always used. The vector type, and the number of defined values, are
  // provided.
  virtual bool
  shouldExpandBuildVectorWithShuffles(EVT /* VT */,
                                      unsigned DefinedValues) const {
    return DefinedValues < 3;
  }

  /// Return true if integer divide is usually cheaper than a sequence of
  /// several shifts, adds, and multiplies for this target.
  /// The definition of "cheaper" may depend on whether we're optimizing
  /// for speed or for size.
  virtual bool isIntDivCheap(EVT VT, AttributeList Attr) const { return false; }

  /// Return true if the target can handle a standalone remainder operation.
  virtual bool hasStandaloneRem(EVT VT) const {
    return true;
  }

  /// Return true if SQRT(X) shouldn't be replaced with X*RSQRT(X).
  virtual bool isFsqrtCheap(SDValue X, SelectionDAG &DAG) const {
    // Default behavior is to replace SQRT(X) with X*RSQRT(X).
    return false;
  }

  /// Reciprocal estimate status values used by the functions below.
  enum ReciprocalEstimate : int {
    Unspecified = -1,
    Disabled = 0,
    Enabled = 1
  };

  /// Return a ReciprocalEstimate enum value for a square root of the given type
  /// based on the function's attributes. If the operation is not overridden by
  /// the function's attributes, "Unspecified" is returned and target defaults
  /// are expected to be used for instruction selection.
  int getRecipEstimateSqrtEnabled(EVT VT, MachineFunction &MF) const;

  /// Return a ReciprocalEstimate enum value for a division of the given type
  /// based on the function's attributes. If the operation is not overridden by
  /// the function's attributes, "Unspecified" is returned and target defaults
  /// are expected to be used for instruction selection.
  int getRecipEstimateDivEnabled(EVT VT, MachineFunction &MF) const;

  /// Return the refinement step count for a square root of the given type based
  /// on the function's attributes. If the operation is not overridden by
  /// the function's attributes, "Unspecified" is returned and target defaults
  /// are expected to be used for instruction selection.
  int getSqrtRefinementSteps(EVT VT, MachineFunction &MF) const;

  /// Return the refinement step count for a division of the given type based
  /// on the function's attributes. If the operation is not overridden by
  /// the function's attributes, "Unspecified" is returned and target defaults
  /// are expected to be used for instruction selection.
  int getDivRefinementSteps(EVT VT, MachineFunction &MF) const;

  /// Returns true if target has indicated at least one type should be bypassed.
  bool isSlowDivBypassed() const { return !BypassSlowDivWidths.empty(); }

  /// Returns map of slow types for division or remainder with corresponding
  /// fast types
  const DenseMap<unsigned int, unsigned int> &getBypassSlowDivWidths() const {
    return BypassSlowDivWidths;
  }

  /// Return true only if vscale must be a power of two.
  virtual bool isVScaleKnownToBeAPowerOfTwo() const { return false; }

  /// Return true if Flow Control is an expensive operation that should be
  /// avoided.
  bool isJumpExpensive() const { return JumpIsExpensive; }

  /// Return true if selects are only cheaper than branches if the branch is
  /// unlikely to be predicted right.
  bool isPredictableSelectExpensive() const {
    return PredictableSelectIsExpensive;
  }

  virtual bool fallBackToDAGISel(const Instruction &Inst) const {
    return false;
  }

  /// Return true if the following transform is beneficial:
  /// fold (conv (load x)) -> (load (conv*)x)
  /// On architectures that don't natively support some vector loads
  /// efficiently, casting the load to a smaller vector of larger types and
  /// loading is more efficient, however, this can be undone by optimizations in
  /// dag combiner.
  virtual bool isLoadBitCastBeneficial(EVT LoadVT, EVT BitcastVT,
                                       const SelectionDAG &DAG,
                                       const MachineMemOperand &MMO) const;

  /// Return true if the following transform is beneficial:
  /// (store (y (conv x)), y*)) -> (store x, (x*))
  virtual bool isStoreBitCastBeneficial(EVT StoreVT, EVT BitcastVT,
                                        const SelectionDAG &DAG,
                                        const MachineMemOperand &MMO) const {
    // Default to the same logic as loads.
    return isLoadBitCastBeneficial(StoreVT, BitcastVT, DAG, MMO);
  }

  /// Return true if it is expected to be cheaper to do a store of vector
  /// constant with the given size and type for the address space than to
  /// store the individual scalar element constants.
  virtual bool storeOfVectorConstantIsCheap(bool IsZero, EVT MemVT,
                                            unsigned NumElem,
                                            unsigned AddrSpace) const {
    return IsZero;
  }

  /// Allow store merging for the specified type after legalization in addition
  /// to before legalization. This may transform stores that do not exist
  /// earlier (for example, stores created from intrinsics).
  virtual bool mergeStoresAfterLegalization(EVT MemVT) const {
    return true;
  }

  /// Returns if it's reasonable to merge stores to MemVT size.
  virtual bool canMergeStoresTo(unsigned AS, EVT MemVT,
                                const MachineFunction &MF) const {
    return true;
  }

  /// Return true if it is cheap to speculate a call to intrinsic cttz.
  virtual bool isCheapToSpeculateCttz(Type *Ty) const {
    return false;
  }

  /// Return true if it is cheap to speculate a call to intrinsic ctlz.
  virtual bool isCheapToSpeculateCtlz(Type *Ty) const {
    return false;
  }

  /// Return true if ctlz instruction is fast.
  virtual bool isCtlzFast() const {
    return false;
  }

  /// Return the maximum number of "x & (x - 1)" operations that can be done
  /// instead of deferring to a custom CTPOP.
  virtual unsigned getCustomCtpopCost(EVT VT, ISD::CondCode Cond) const {
    return 1;
  }

  /// Return true if instruction generated for equality comparison is folded
  /// with instruction generated for signed comparison.
  virtual bool isEqualityCmpFoldedWithSignedCmp() const { return true; }

  /// Return true if the heuristic to prefer icmp eq zero should be used in code
  /// gen prepare.
  virtual bool preferZeroCompareBranch() const { return false; }

  /// Return true if it is cheaper to split the store of a merged int val
  /// from a pair of smaller values into multiple stores.
  virtual bool isMultiStoresCheaperThanBitsMerge(EVT LTy, EVT HTy) const {
    return false;
  }

  /// Return if the target supports combining a
  /// chain like:
  /// \code
  ///   %andResult = and %val1, #mask
  ///   %icmpResult = icmp %andResult, 0
  /// \endcode
  /// into a single machine instruction of a form like:
  /// \code
  ///   cc = test %register, #mask
  /// \endcode
  virtual bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const {
    return false;
  }

  /// Return true if it is valid to merge the TargetMMOFlags in two SDNodes.
  virtual bool
  areTwoSDNodeTargetMMOFlagsMergeable(const MemSDNode &NodeX,
                                      const MemSDNode &NodeY) const {
    return true;
  }

  /// Use bitwise logic to make pairs of compares more efficient. For example:
  /// and (seteq A, B), (seteq C, D) --> seteq (or (xor A, B), (xor C, D)), 0
  /// This should be true when it takes more than one instruction to lower
  /// setcc (cmp+set on x86 scalar), when bitwise ops are faster than logic on
  /// condition bits (crand on PowerPC), and/or when reducing cmp+br is a win.
  virtual bool convertSetCCLogicToBitwiseLogic(EVT VT) const {
    return false;
  }

  /// Return the preferred operand type if the target has a quick way to compare
  /// integer values of the given size. Assume that any legal integer type can
  /// be compared efficiently. Targets may override this to allow illegal wide
  /// types to return a vector type if there is support to compare that type.
  virtual MVT hasFastEqualityCompare(unsigned NumBits) const {
    MVT VT = MVT::getIntegerVT(NumBits);
    return isTypeLegal(VT) ? VT : MVT::INVALID_SIMPLE_VALUE_TYPE;
  }

  /// Return true if the target should transform:
  /// (X & Y) == Y ---> (~X & Y) == 0
  /// (X & Y) != Y ---> (~X & Y) != 0
  ///
  /// This may be profitable if the target has a bitwise and-not operation that
  /// sets comparison flags. A target may want to limit the transformation based
  /// on the type of Y or if Y is a constant.
  ///
  /// Note that the transform will not occur if Y is known to be a power-of-2
  /// because a mask and compare of a single bit can be handled by inverting the
  /// predicate, for example:
  /// (X & 8) == 8 ---> (X & 8) != 0
  virtual bool hasAndNotCompare(SDValue Y) const {
    return false;
  }

  /// Return true if the target has a bitwise and-not operation:
  /// X = ~A & B
  /// This can be used to simplify select or other instructions.
  virtual bool hasAndNot(SDValue X) const {
    // If the target has the more complex version of this operation, assume that
    // it has this operation too.
    return hasAndNotCompare(X);
  }

  /// Return true if the target has a bit-test instruction:
  ///   (X & (1 << Y)) ==/!= 0
  /// This knowledge can be used to prevent breaking the pattern,
  /// or creating it if it could be recognized.
  virtual bool hasBitTest(SDValue X, SDValue Y) const { return false; }

  /// There are two ways to clear extreme bits (either low or high):
  /// Mask:    x &  (-1 << y)  (the instcombine canonical form)
  /// Shifts:  x >> y << y
  /// Return true if the variant with 2 variable shifts is preferred.
  /// Return false if there is no preference.
  virtual bool shouldFoldMaskToVariableShiftPair(SDValue X) const {
    // By default, let's assume that no one prefers shifts.
    return false;
  }

  /// Return true if it is profitable to fold a pair of shifts into a mask.
  /// This is usually true on most targets. But some targets, like Thumb1,
  /// have immediate shift instructions, but no immediate "and" instruction;
  /// this makes the fold unprofitable.
  virtual bool shouldFoldConstantShiftPairToMask(const SDNode *N,
                                                 CombineLevel Level) const {
    return true;
  }

  /// Should we tranform the IR-optimal check for whether given truncation
  /// down into KeptBits would be truncating or not:
  ///   (add %x, (1 << (KeptBits-1))) srccond (1 << KeptBits)
  /// Into it's more traditional form:
  ///   ((%x << C) a>> C) dstcond %x
  /// Return true if we should transform.
  /// Return false if there is no preference.
  virtual bool shouldTransformSignedTruncationCheck(EVT XVT,
                                                    unsigned KeptBits) const {
    // By default, let's assume that no one prefers shifts.
    return false;
  }

  /// Given the pattern
  ///   (X & (C l>>/<< Y)) ==/!= 0
  /// return true if it should be transformed into:
  ///   ((X <</l>> Y) & C) ==/!= 0
  /// WARNING: if 'X' is a constant, the fold may deadlock!
  /// FIXME: we could avoid passing XC, but we can't use isConstOrConstSplat()
  ///        here because it can end up being not linked in.
  virtual bool shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
      SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y,
      unsigned OldShiftOpcode, unsigned NewShiftOpcode,
      SelectionDAG &DAG) const {
    if (hasBitTest(X, Y)) {
      // One interesting pattern that we'd want to form is 'bit test':
      //   ((1 << Y) & C) ==/!= 0
      // But we also need to be careful not to try to reverse that fold.

      // Is this '1 << Y' ?
      if (OldShiftOpcode == ISD::SHL && CC->isOne())
        return false; // Keep the 'bit test' pattern.

      // Will it be '1 << Y' after the transform ?
      if (XC && NewShiftOpcode == ISD::SHL && XC->isOne())
        return true; // Do form the 'bit test' pattern.
    }

    // If 'X' is a constant, and we transform, then we will immediately
    // try to undo the fold, thus causing endless combine loop.
    // So by default, let's assume everyone prefers the fold
    // iff 'X' is not a constant.
    return !XC;
  }

  /// These two forms are equivalent:
  ///   sub %y, (xor %x, -1)
  ///   add (add %x, 1), %y
  /// The variant with two add's is IR-canonical.
  /// Some targets may prefer one to the other.
  virtual bool preferIncOfAddToSubOfNot(EVT VT) const {
    // By default, let's assume that everyone prefers the form with two add's.
    return true;
  }

  // By default prefer folding (abs (sub nsw x, y)) -> abds(x, y). Some targets
  // may want to avoid this to prevent loss of sub_nsw pattern.
  virtual bool preferABDSToABSWithNSW(EVT VT) const {
    return true;
  }

  // Return true if the target wants to transform Op(Splat(X)) -> Splat(Op(X))
  virtual bool preferScalarizeSplat(SDNode *N) const { return true; }

  /// Return true if the target wants to use the optimization that
  /// turns ext(promotableInst1(...(promotableInstN(load)))) into
  /// promotedInst1(...(promotedInstN(ext(load)))).
  bool enableExtLdPromotion() const { return EnableExtLdPromotion; }

  /// Return true if the target can combine store(extractelement VectorTy,
  /// Idx).
  /// \p Cost[out] gives the cost of that transformation when this is true.
  virtual bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx,
                                         unsigned &Cost) const {
    return false;
  }

  /// Return true if inserting a scalar into a variable element of an undef
  /// vector is more efficiently handled by splatting the scalar instead.
  virtual bool shouldSplatInsEltVarIndex(EVT) const {
    return false;
  }

  /// Return true if target always benefits from combining into FMA for a
  /// given value type. This must typically return false on targets where FMA
  /// takes more cycles to execute than FADD.
  virtual bool enableAggressiveFMAFusion(EVT VT) const { return false; }

  /// Return true if target always benefits from combining into FMA for a
  /// given value type. This must typically return false on targets where FMA
  /// takes more cycles to execute than FADD.
  virtual bool enableAggressiveFMAFusion(LLT Ty) const { return false; }

  /// Return the ValueType of the result of SETCC operations.
  virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
                                 EVT VT) const;

  /// Return the ValueType for comparison libcalls. Comparison libcalls include
  /// floating point comparison calls, and Ordered/Unordered check calls on
  /// floating point numbers.
  virtual
  MVT::SimpleValueType getCmpLibcallReturnType() const;

  /// For targets without i1 registers, this gives the nature of the high-bits
  /// of boolean values held in types wider than i1.
  ///
  /// "Boolean values" are special true/false values produced by nodes like
  /// SETCC and consumed (as the condition) by nodes like SELECT and BRCOND.
  /// Not to be confused with general values promoted from i1.  Some cpus
  /// distinguish between vectors of boolean and scalars; the isVec parameter
  /// selects between the two kinds.  For example on X86 a scalar boolean should
  /// be zero extended from i1, while the elements of a vector of booleans
  /// should be sign extended from i1.
  ///
  /// Some cpus also treat floating point types the same way as they treat
  /// vectors instead of the way they treat scalars.
  BooleanContent getBooleanContents(bool isVec, bool isFloat) const {
    if (isVec)
      return BooleanVectorContents;
    return isFloat ? BooleanFloatContents : BooleanContents;
  }

  BooleanContent getBooleanContents(EVT Type) const {
    return getBooleanContents(Type.isVector(), Type.isFloatingPoint());
  }

  /// Promote the given target boolean to a target boolean of the given type.
  /// A target boolean is an integer value, not necessarily of type i1, the bits
  /// of which conform to getBooleanContents.
  ///
  /// ValVT is the type of values that produced the boolean.
  SDValue promoteTargetBoolean(SelectionDAG &DAG, SDValue Bool,
                               EVT ValVT) const {
    SDLoc dl(Bool);
    EVT BoolVT =
        getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), ValVT);
    ISD::NodeType ExtendCode = getExtendForContent(getBooleanContents(ValVT));
    return DAG.getNode(ExtendCode, dl, BoolVT, Bool);
  }

  /// Return target scheduling preference.
  Sched::Preference getSchedulingPreference() const {
    return SchedPreferenceInfo;
  }

  /// Some scheduler, e.g. hybrid, can switch to different scheduling heuristics
  /// for different nodes. This function returns the preference (or none) for
  /// the given node.
  virtual Sched::Preference getSchedulingPreference(SDNode *) const {
    return Sched::None;
  }

  /// Return the register class that should be used for the specified value
  /// type.
  virtual const TargetRegisterClass *getRegClassFor(MVT VT, bool isDivergent = false) const {
    (void)isDivergent;
    const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy];
    assert(RC && "This value type is not natively supported!");
    return RC;
  }

  /// Allows target to decide about the register class of the
  /// specific value that is live outside the defining block.
  /// Returns true if the value needs uniform register class.
  virtual bool requiresUniformRegister(MachineFunction &MF,
                                       const Value *) const {
    return false;
  }

  /// Return the 'representative' register class for the specified value
  /// type.
  ///
  /// The 'representative' register class is the largest legal super-reg
  /// register class for the register class of the value type.  For example, on
  /// i386 the rep register class for i8, i16, and i32 are GR32; while the rep
  /// register class is GR64 on x86_64.
  virtual const TargetRegisterClass *getRepRegClassFor(MVT VT) const {
    const TargetRegisterClass *RC = RepRegClassForVT[VT.SimpleTy];
    return RC;
  }

  /// Return the cost of the 'representative' register class for the specified
  /// value type.
  virtual uint8_t getRepRegClassCostFor(MVT VT) const {
    return RepRegClassCostForVT[VT.SimpleTy];
  }

  /// Return the preferred strategy to legalize tihs SHIFT instruction, with
  /// \p ExpansionFactor being the recursion depth - how many expansion needed.
  enum class ShiftLegalizationStrategy {
    ExpandToParts,
    ExpandThroughStack,
    LowerToLibcall
  };
  virtual ShiftLegalizationStrategy
  preferredShiftLegalizationStrategy(SelectionDAG &DAG, SDNode *N,
                                     unsigned ExpansionFactor) const {
    if (ExpansionFactor == 1)
      return ShiftLegalizationStrategy::ExpandToParts;
    return ShiftLegalizationStrategy::ExpandThroughStack;
  }

  /// Return true if the target has native support for the specified value type.
  /// This means that it has a register that directly holds it without
  /// promotions or expansions.
  bool isTypeLegal(EVT VT) const {
    assert(!VT.isSimple() ||
           (unsigned)VT.getSimpleVT().SimpleTy < std::size(RegClassForVT));
    return VT.isSimple() && RegClassForVT[VT.getSimpleVT().SimpleTy] != nullptr;
  }

  class ValueTypeActionImpl {
    /// ValueTypeActions - For each value type, keep a LegalizeTypeAction enum
    /// that indicates how instruction selection should deal with the type.
    LegalizeTypeAction ValueTypeActions[MVT::VALUETYPE_SIZE];

  public:
    ValueTypeActionImpl() {
      std::fill(std::begin(ValueTypeActions), std::end(ValueTypeActions),
                TypeLegal);
    }

    LegalizeTypeAction getTypeAction(MVT VT) const {
      return ValueTypeActions[VT.SimpleTy];
    }

    void setTypeAction(MVT VT, LegalizeTypeAction Action) {
      ValueTypeActions[VT.SimpleTy] = Action;
    }
  };

  const ValueTypeActionImpl &getValueTypeActions() const {
    return ValueTypeActions;
  }

  /// Return pair that represents the legalization kind (first) that needs to
  /// happen to EVT (second) in order to type-legalize it.
  ///
  /// First: how we should legalize values of this type, either it is already
  /// legal (return 'Legal') or we need to promote it to a larger type (return
  /// 'Promote'), or we need to expand it into multiple registers of smaller
  /// integer type (return 'Expand').  'Custom' is not an option.
  ///
  /// Second: for types supported by the target, this is an identity function.
  /// For types that must be promoted to larger types, this returns the larger
  /// type to promote to.  For integer types that are larger than the largest
  /// integer register, this contains one step in the expansion to get to the
  /// smaller register. For illegal floating point types, this returns the
  /// integer type to transform to.
  LegalizeKind getTypeConversion(LLVMContext &Context, EVT VT) const;

  /// Return how we should legalize values of this type, either it is already
  /// legal (return 'Legal') or we need to promote it to a larger type (return
  /// 'Promote'), or we need to expand it into multiple registers of smaller
  /// integer type (return 'Expand').  'Custom' is not an option.
  LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const {
    return getTypeConversion(Context, VT).first;
  }
  LegalizeTypeAction getTypeAction(MVT VT) const {
    return ValueTypeActions.getTypeAction(VT);
  }

  /// For types supported by the target, this is an identity function.  For
  /// types that must be promoted to larger types, this returns the larger type
  /// to promote to.  For integer types that are larger than the largest integer
  /// register, this contains one step in the expansion to get to the smaller
  /// register. For illegal floating point types, this returns the integer type
  /// to transform to.
  virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const {
    return getTypeConversion(Context, VT).second;
  }

  /// For types supported by the target, this is an identity function.  For
  /// types that must be expanded (i.e. integer types that are larger than the
  /// largest integer register or illegal floating point types), this returns
  /// the largest legal type it will be expanded to.
  EVT getTypeToExpandTo(LLVMContext &Context, EVT VT) const {
    assert(!VT.isVector());
    while (true) {
      switch (getTypeAction(Context, VT)) {
      case TypeLegal:
        return VT;
      case TypeExpandInteger:
        VT = getTypeToTransformTo(Context, VT);
        break;
      default:
        llvm_unreachable("Type is not legal nor is it to be expanded!");
      }
    }
  }

  /// Vector types are broken down into some number of legal first class types.
  /// For example, EVT::v8f32 maps to 2 EVT::v4f32 with Altivec or SSE1, or 8
  /// promoted EVT::f64 values with the X86 FP stack.  Similarly, EVT::v2i64
  /// turns into 4 EVT::i32 values with both PPC and X86.
  ///
  /// This method returns the number of registers needed, and the VT for each
  /// register.  It also returns the VT and quantity of the intermediate values
  /// before they are promoted/expanded.
  unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT,
                                  EVT &IntermediateVT,
                                  unsigned &NumIntermediates,
                                  MVT &RegisterVT) const;

  /// Certain targets such as MIPS require that some types such as vectors are
  /// always broken down into scalars in some contexts. This occurs even if the
  /// vector type is legal.
  virtual unsigned getVectorTypeBreakdownForCallingConv(
      LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT,
      unsigned &NumIntermediates, MVT &RegisterVT) const {
    return getVectorTypeBreakdown(Context, VT, IntermediateVT, NumIntermediates,
                                  RegisterVT);
  }

  struct IntrinsicInfo {
    unsigned     opc = 0;          // target opcode
    EVT          memVT;            // memory VT

    // value representing memory location
    PointerUnion<const Value *, const PseudoSourceValue *> ptrVal;

    // Fallback address space for use if ptrVal is nullptr. std::nullopt means
    // unknown address space.
    std::optional<unsigned> fallbackAddressSpace;

    int          offset = 0;       // offset off of ptrVal
    uint64_t     size = 0;         // the size of the memory location
                                   // (taken from memVT if zero)
    MaybeAlign align = Align(1);   // alignment

    MachineMemOperand::Flags flags = MachineMemOperand::MONone;
    IntrinsicInfo() = default;
  };

  /// Given an intrinsic, checks if on the target the intrinsic will need to map
  /// to a MemIntrinsicNode (touches memory). If this is the case, it returns
  /// true and store the intrinsic information into the IntrinsicInfo that was
  /// passed to the function.
  virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &,
                                  MachineFunction &,
                                  unsigned /*Intrinsic*/) const {
    return false;
  }

  /// Returns true if the target can instruction select the specified FP
  /// immediate natively. If false, the legalizer will materialize the FP
  /// immediate as a load from a constant pool.
  virtual bool isFPImmLegal(const APFloat & /*Imm*/, EVT /*VT*/,
                            bool ForCodeSize = false) const {
    return false;
  }

  /// Targets can use this to indicate that they only support *some*
  /// VECTOR_SHUFFLE operations, those with specific masks.  By default, if a
  /// target supports the VECTOR_SHUFFLE node, all mask values are assumed to be
  /// legal.
  virtual bool isShuffleMaskLegal(ArrayRef<int> /*Mask*/, EVT /*VT*/) const {
    return true;
  }

  /// Returns true if the operation can trap for the value type.
  ///
  /// VT must be a legal type. By default, we optimistically assume most
  /// operations don't trap except for integer divide and remainder.
  virtual bool canOpTrap(unsigned Op, EVT VT) const;

  /// Similar to isShuffleMaskLegal. Targets can use this to indicate if there
  /// is a suitable VECTOR_SHUFFLE that can be used to replace a VAND with a
  /// constant pool entry.
  virtual bool isVectorClearMaskLegal(ArrayRef<int> /*Mask*/,
                                      EVT /*VT*/) const {
    return false;
  }

  /// How to legalize this custom operation?
  virtual LegalizeAction getCustomOperationAction(SDNode &Op) const {
    return Legal;
  }

  /// Return how this operation should be treated: either it is legal, needs to
  /// be promoted to a larger size, needs to be expanded to some other code
  /// sequence, or the target has a custom expander for it.
  LegalizeAction getOperationAction(unsigned Op, EVT VT) const {
    if (VT.isExtended()) return Expand;
    // If a target-specific SDNode requires legalization, require the target
    // to provide custom legalization for it.
    if (Op >= std::size(OpActions[0]))
      return Custom;
    return OpActions[(unsigned)VT.getSimpleVT().SimpleTy][Op];
  }

  /// Custom method defined by each target to indicate if an operation which
  /// may require a scale is supported natively by the target.
  /// If not, the operation is illegal.
  virtual bool isSupportedFixedPointOperation(unsigned Op, EVT VT,
                                              unsigned Scale) const {
    return false;
  }

  /// Some fixed point operations may be natively supported by the target but
  /// only for specific scales. This method allows for checking
  /// if the width is supported by the target for a given operation that may
  /// depend on scale.
  LegalizeAction getFixedPointOperationAction(unsigned Op, EVT VT,
                                              unsigned Scale) const {
    auto Action = getOperationAction(Op, VT);
    if (Action != Legal)
      return Action;

    // This operation is supported in this type but may only work on specific
    // scales.
    bool Supported;
    switch (Op) {
    default:
      llvm_unreachable("Unexpected fixed point operation.");
    case ISD::SMULFIX:
    case ISD::SMULFIXSAT:
    case ISD::UMULFIX:
    case ISD::UMULFIXSAT:
    case ISD::SDIVFIX:
    case ISD::SDIVFIXSAT:
    case ISD::UDIVFIX:
    case ISD::UDIVFIXSAT:
      Supported = isSupportedFixedPointOperation(Op, VT, Scale);
      break;
    }

    return Supported ? Action : Expand;
  }

  // If Op is a strict floating-point operation, return the result
  // of getOperationAction for the equivalent non-strict operation.
  LegalizeAction getStrictFPOperationAction(unsigned Op, EVT VT) const {
    unsigned EqOpc;
    switch (Op) {
      default: llvm_unreachable("Unexpected FP pseudo-opcode");
#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN)               \
      case ISD::STRICT_##DAGN: EqOpc = ISD::DAGN; break;
#define CMP_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN)               \
      case ISD::STRICT_##DAGN: EqOpc = ISD::SETCC; break;
#include "llvm/IR/ConstrainedOps.def"
    }

    return getOperationAction(EqOpc, VT);
  }

  /// Return true if the specified operation is legal on this target or can be
  /// made legal with custom lowering. This is used to help guide high-level
  /// lowering decisions. LegalOnly is an optional convenience for code paths
  /// traversed pre and post legalisation.
  bool isOperationLegalOrCustom(unsigned Op, EVT VT,
                                bool LegalOnly = false) const {
    if (LegalOnly)
      return isOperationLegal(Op, VT);

    return (VT == MVT::Other || isTypeLegal(VT)) &&
      (getOperationAction(Op, VT) == Legal ||
       getOperationAction(Op, VT) == Custom);
  }

  /// Return true if the specified operation is legal on this target or can be
  /// made legal using promotion. This is used to help guide high-level lowering
  /// decisions. LegalOnly is an optional convenience for code paths traversed
  /// pre and post legalisation.
  bool isOperationLegalOrPromote(unsigned Op, EVT VT,
                                 bool LegalOnly = false) const {
    if (LegalOnly)
      return isOperationLegal(Op, VT);

    return (VT == MVT::Other || isTypeLegal(VT)) &&
      (getOperationAction(Op, VT) == Legal ||
       getOperationAction(Op, VT) == Promote);
  }

  /// Return true if the specified operation is legal on this target or can be
  /// made legal with custom lowering or using promotion. This is used to help
  /// guide high-level lowering decisions. LegalOnly is an optional convenience
  /// for code paths traversed pre and post legalisation.
  bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT,
                                         bool LegalOnly = false) const {
    if (LegalOnly)
      return isOperationLegal(Op, VT);

    return (VT == MVT::Other || isTypeLegal(VT)) &&
      (getOperationAction(Op, VT) == Legal ||
       getOperationAction(Op, VT) == Custom ||
       getOperationAction(Op, VT) == Promote);
  }

  /// Return true if the operation uses custom lowering, regardless of whether
  /// the type is legal or not.
  bool isOperationCustom(unsigned Op, EVT VT) const {
    return getOperationAction(Op, VT) == Custom;
  }

  /// Return true if lowering to a jump table is allowed.
  virtual bool areJTsAllowed(const Function *Fn) const {
    if (Fn->getFnAttribute("no-jump-tables").getValueAsBool())
      return false;

    return isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
           isOperationLegalOrCustom(ISD::BRIND, MVT::Other);
  }

  /// Check whether the range [Low,High] fits in a machine word.
  bool rangeFitsInWord(const APInt &Low, const APInt &High,
                       const DataLayout &DL) const {
    // FIXME: Using the pointer type doesn't seem ideal.
    uint64_t BW = DL.getIndexSizeInBits(0u);
    uint64_t Range = (High - Low).getLimitedValue(UINT64_MAX - 1) + 1;
    return Range <= BW;
  }

  /// Return true if lowering to a jump table is suitable for a set of case
  /// clusters which may contain \p NumCases cases, \p Range range of values.
  virtual bool isSuitableForJumpTable(const SwitchInst *SI, uint64_t NumCases,
                                      uint64_t Range, ProfileSummaryInfo *PSI,
                                      BlockFrequencyInfo *BFI) const;

  /// Returns preferred type for switch condition.
  virtual MVT getPreferredSwitchConditionType(LLVMContext &Context,
                                              EVT ConditionVT) const;

  /// Return true if lowering to a bit test is suitable for a set of case
  /// clusters which contains \p NumDests unique destinations, \p Low and
  /// \p High as its lowest and highest case values, and expects \p NumCmps
  /// case value comparisons. Check if the number of destinations, comparison
  /// metric, and range are all suitable.
  bool isSuitableForBitTests(unsigned NumDests, unsigned NumCmps,
                             const APInt &Low, const APInt &High,
                             const DataLayout &DL) const {
    // FIXME: I don't think NumCmps is the correct metric: a single case and a
    // range of cases both require only one branch to lower. Just looking at the
    // number of clusters and destinations should be enough to decide whether to
    // build bit tests.

    // To lower a range with bit tests, the range must fit the bitwidth of a
    // machine word.
    if (!rangeFitsInWord(Low, High, DL))
      return false;

    // Decide whether it's profitable to lower this range with bit tests. Each
    // destination requires a bit test and branch, and there is an overall range
    // check branch. For a small number of clusters, separate comparisons might
    // be cheaper, and for many destinations, splitting the range might be
    // better.
    return (NumDests == 1 && NumCmps >= 3) || (NumDests == 2 && NumCmps >= 5) ||
           (NumDests == 3 && NumCmps >= 6);
  }

  /// Return true if the specified operation is illegal on this target or
  /// unlikely to be made legal with custom lowering. This is used to help guide
  /// high-level lowering decisions.
  bool isOperationExpand(unsigned Op, EVT VT) const {
    return (!isTypeLegal(VT) || getOperationAction(Op, VT) == Expand);
  }

  /// Return true if the specified operation is legal on this target.
  bool isOperationLegal(unsigned Op, EVT VT) const {
    return (VT == MVT::Other || isTypeLegal(VT)) &&
           getOperationAction(Op, VT) == Legal;
  }

  /// Return how this load with extension should be treated: either it is legal,
  /// needs to be promoted to a larger size, needs to be expanded to some other
  /// code sequence, or the target has a custom expander for it.
  LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT,
                                  EVT MemVT) const {
    if (ValVT.isExtended() || MemVT.isExtended()) return Expand;
    unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy;
    unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy;
    assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValI < MVT::VALUETYPE_SIZE &&
           MemI < MVT::VALUETYPE_SIZE && "Table isn't big enough!");
    unsigned Shift = 4 * ExtType;
    return (LegalizeAction)((LoadExtActions[ValI][MemI] >> Shift) & 0xf);
  }

  /// Return true if the specified load with extension is legal on this target.
  bool isLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const {
    return getLoadExtAction(ExtType, ValVT, MemVT) == Legal;
  }

  /// Return true if the specified load with extension is legal or custom
  /// on this target.
  bool isLoadExtLegalOrCustom(unsigned ExtType, EVT ValVT, EVT MemVT) const {
    return getLoadExtAction(ExtType, ValVT, MemVT) == Legal ||
           getLoadExtAction(ExtType, ValVT, MemVT) == Custom;
  }

  /// Return how this store with truncation should be treated: either it is
  /// legal, needs to be promoted to a larger size, needs to be expanded to some
  /// other code sequence, or the target has a custom expander for it.
  LegalizeAction getTruncStoreAction(EVT ValVT, EVT MemVT) const {
    if (ValVT.isExtended() || MemVT.isExtended()) return Expand;
    unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy;
    unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy;
    assert(ValI < MVT::VALUETYPE_SIZE && MemI < MVT::VALUETYPE_SIZE &&
           "Table isn't big enough!");
    return TruncStoreActions[ValI][MemI];
  }

  /// Return true if the specified store with truncation is legal on this
  /// target.
  bool isTruncStoreLegal(EVT ValVT, EVT MemVT) const {
    return isTypeLegal(ValVT) && getTruncStoreAction(ValVT, MemVT) == Legal;
  }

  /// Return true if the specified store with truncation has solution on this
  /// target.
  bool isTruncStoreLegalOrCustom(EVT ValVT, EVT MemVT) const {
    return isTypeLegal(ValVT) &&
      (getTruncStoreAction(ValVT, MemVT) == Legal ||
       getTruncStoreAction(ValVT, MemVT) == Custom);
  }

  virtual bool canCombineTruncStore(EVT ValVT, EVT MemVT,
                                    bool LegalOnly) const {
    if (LegalOnly)
      return isTruncStoreLegal(ValVT, MemVT);

    return isTruncStoreLegalOrCustom(ValVT, MemVT);
  }

  /// Return how the indexed load should be treated: either it is legal, needs
  /// to be promoted to a larger size, needs to be expanded to some other code
  /// sequence, or the target has a custom expander for it.
  LegalizeAction getIndexedLoadAction(unsigned IdxMode, MVT VT) const {
    return getIndexedModeAction(IdxMode, VT, IMAB_Load);
  }

  /// Return true if the specified indexed load is legal on this target.
  bool isIndexedLoadLegal(unsigned IdxMode, EVT VT) const {
    return VT.isSimple() &&
      (getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Legal ||
       getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Custom);
  }

  /// Return how the indexed store should be treated: either it is legal, needs
  /// to be promoted to a larger size, needs to be expanded to some other code
  /// sequence, or the target has a custom expander for it.
  LegalizeAction getIndexedStoreAction(unsigned IdxMode, MVT VT) const {
    return getIndexedModeAction(IdxMode, VT, IMAB_Store);
  }

  /// Return true if the specified indexed load is legal on this target.
  bool isIndexedStoreLegal(unsigned IdxMode, EVT VT) const {
    return VT.isSimple() &&
      (getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Legal ||
       getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Custom);
  }

  /// Return how the indexed load should be treated: either it is legal, needs
  /// to be promoted to a larger size, needs to be expanded to some other code
  /// sequence, or the target has a custom expander for it.
  LegalizeAction getIndexedMaskedLoadAction(unsigned IdxMode, MVT VT) const {
    return getIndexedModeAction(IdxMode, VT, IMAB_MaskedLoad);
  }

  /// Return true if the specified indexed load is legal on this target.
  bool isIndexedMaskedLoadLegal(unsigned IdxMode, EVT VT) const {
    return VT.isSimple() &&
           (getIndexedMaskedLoadAction(IdxMode, VT.getSimpleVT()) == Legal ||
            getIndexedMaskedLoadAction(IdxMode, VT.getSimpleVT()) == Custom);
  }

  /// Return how the indexed store should be treated: either it is legal, needs
  /// to be promoted to a larger size, needs to be expanded to some other code
  /// sequence, or the target has a custom expander for it.
  LegalizeAction getIndexedMaskedStoreAction(unsigned IdxMode, MVT VT) const {
    return getIndexedModeAction(IdxMode, VT, IMAB_MaskedStore);
  }

  /// Return true if the specified indexed load is legal on this target.
  bool isIndexedMaskedStoreLegal(unsigned IdxMode, EVT VT) const {
    return VT.isSimple() &&
           (getIndexedMaskedStoreAction(IdxMode, VT.getSimpleVT()) == Legal ||
            getIndexedMaskedStoreAction(IdxMode, VT.getSimpleVT()) == Custom);
  }

  /// Returns true if the index type for a masked gather/scatter requires
  /// extending
  virtual bool shouldExtendGSIndex(EVT VT, EVT &EltTy) const { return false; }

  // Returns true if VT is a legal index type for masked gathers/scatters
  // on this target
  virtual bool shouldRemoveExtendFromGSIndex(EVT IndexVT, EVT DataVT) const {
    return false;
  }

  // Return true if the target supports a scatter/gather instruction with
  // indices which are scaled by the particular value.  Note that all targets
  // must by definition support scale of 1.
  virtual bool isLegalScaleForGatherScatter(uint64_t Scale,
                                            uint64_t ElemSize) const {
    // MGATHER/MSCATTER are only required to support scaling by one or by the
    // element size.
    if (Scale != ElemSize && Scale != 1)
      return false;
    return true;
  }

  /// Return how the condition code should be treated: either it is legal, needs
  /// to be expanded to some other code sequence, or the target has a custom
  /// expander for it.
  LegalizeAction
  getCondCodeAction(ISD::CondCode CC, MVT VT) const {
    assert((unsigned)CC < std::size(CondCodeActions) &&
           ((unsigned)VT.SimpleTy >> 3) < std::size(CondCodeActions[0]) &&
           "Table isn't big enough!");
    // See setCondCodeAction for how this is encoded.
    uint32_t Shift = 4 * (VT.SimpleTy & 0x7);
    uint32_t Value = CondCodeActions[CC][VT.SimpleTy >> 3];
    LegalizeAction Action = (LegalizeAction) ((Value >> Shift) & 0xF);
    assert(Action != Promote && "Can't promote condition code!");
    return Action;
  }

  /// Return true if the specified condition code is legal on this target.
  bool isCondCodeLegal(ISD::CondCode CC, MVT VT) const {
    return getCondCodeAction(CC, VT) == Legal;
  }

  /// Return true if the specified condition code is legal or custom on this
  /// target.
  bool isCondCodeLegalOrCustom(ISD::CondCode CC, MVT VT) const {
    return getCondCodeAction(CC, VT) == Legal ||
           getCondCodeAction(CC, VT) == Custom;
  }

  /// If the action for this operation is to promote, this method returns the
  /// ValueType to promote to.
  MVT getTypeToPromoteTo(unsigned Op, MVT VT) const {
    assert(getOperationAction(Op, VT) == Promote &&
           "This operation isn't promoted!");

    // See if this has an explicit type specified.
    std::map<std::pair<unsigned, MVT::SimpleValueType>,
             MVT::SimpleValueType>::const_iterator PTTI =
      PromoteToType.find(std::make_pair(Op, VT.SimpleTy));
    if (PTTI != PromoteToType.end()) return PTTI->second;

    assert((VT.isInteger() || VT.isFloatingPoint()) &&
           "Cannot autopromote this type, add it with AddPromotedToType.");

    MVT NVT = VT;
    do {
      NVT = (MVT::SimpleValueType)(NVT.SimpleTy+1);
      assert(NVT.isInteger() == VT.isInteger() && NVT != MVT::isVoid &&
             "Didn't find type to promote to!");
    } while (!isTypeLegal(NVT) ||
              getOperationAction(Op, NVT) == Promote);
    return NVT;
  }

  virtual EVT getAsmOperandValueType(const DataLayout &DL, Type *Ty,
                                     bool AllowUnknown = false) const {
    return getValueType(DL, Ty, AllowUnknown);
  }

  /// Return the EVT corresponding to this LLVM type.  This is fixed by the LLVM
  /// operations except for the pointer size.  If AllowUnknown is true, this
  /// will return MVT::Other for types with no EVT counterpart (e.g. structs),
  /// otherwise it will assert.
  EVT getValueType(const DataLayout &DL, Type *Ty,
                   bool AllowUnknown = false) const {
    // Lower scalar pointers to native pointer types.
    if (auto *PTy = dyn_cast<PointerType>(Ty))
      return getPointerTy(DL, PTy->getAddressSpace());

    if (auto *VTy = dyn_cast<VectorType>(Ty)) {
      Type *EltTy = VTy->getElementType();
      // Lower vectors of pointers to native pointer types.
      if (auto *PTy = dyn_cast<PointerType>(EltTy)) {
        EVT PointerTy(getPointerTy(DL, PTy->getAddressSpace()));
        EltTy = PointerTy.getTypeForEVT(Ty->getContext());
      }
      return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(EltTy, false),
                              VTy->getElementCount());
    }

    return EVT::getEVT(Ty, AllowUnknown);
  }

  EVT getMemValueType(const DataLayout &DL, Type *Ty,
                      bool AllowUnknown = false) const {
    // Lower scalar pointers to native pointer types.
    if (auto *PTy = dyn_cast<PointerType>(Ty))
      return getPointerMemTy(DL, PTy->getAddressSpace());

    if (auto *VTy = dyn_cast<VectorType>(Ty)) {
      Type *EltTy = VTy->getElementType();
      if (auto *PTy = dyn_cast<PointerType>(EltTy)) {
        EVT PointerTy(getPointerMemTy(DL, PTy->getAddressSpace()));
        EltTy = PointerTy.getTypeForEVT(Ty->getContext());
      }
      return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(EltTy, false),
                              VTy->getElementCount());
    }

    return getValueType(DL, Ty, AllowUnknown);
  }


  /// Return the MVT corresponding to this LLVM type. See getValueType.
  MVT getSimpleValueType(const DataLayout &DL, Type *Ty,
                         bool AllowUnknown = false) const {
    return getValueType(DL, Ty, AllowUnknown).getSimpleVT();
  }

  /// Return the desired alignment for ByVal or InAlloca aggregate function
  /// arguments in the caller parameter area.  This is the actual alignment, not
  /// its logarithm.
  virtual uint64_t getByValTypeAlignment(Type *Ty, const DataLayout &DL) const;

  /// Return the type of registers that this ValueType will eventually require.
  MVT getRegisterType(MVT VT) const {
    assert((unsigned)VT.SimpleTy < std::size(RegisterTypeForVT));
    return RegisterTypeForVT[VT.SimpleTy];
  }

  /// Return the type of registers that this ValueType will eventually require.
  MVT getRegisterType(LLVMContext &Context, EVT VT) const {
    if (VT.isSimple())
      return getRegisterType(VT.getSimpleVT());
    if (VT.isVector()) {
      EVT VT1;
      MVT RegisterVT;
      unsigned NumIntermediates;
      (void)getVectorTypeBreakdown(Context, VT, VT1,
                                   NumIntermediates, RegisterVT);
      return RegisterVT;
    }
    if (VT.isInteger()) {
      return getRegisterType(Context, getTypeToTransformTo(Context, VT));
    }
    llvm_unreachable("Unsupported extended type!");
  }

  /// Return the number of registers that this ValueType will eventually
  /// require.
  ///
  /// This is one for any types promoted to live in larger registers, but may be
  /// more than one for types (like i64) that are split into pieces.  For types
  /// like i140, which are first promoted then expanded, it is the number of
  /// registers needed to hold all the bits of the original type.  For an i140
  /// on a 32 bit machine this means 5 registers.
  ///
  /// RegisterVT may be passed as a way to override the default settings, for
  /// instance with i128 inline assembly operands on SystemZ.
  virtual unsigned
  getNumRegisters(LLVMContext &Context, EVT VT,
                  std::optional<MVT> RegisterVT = std::nullopt) const {
    if (VT.isSimple()) {
      assert((unsigned)VT.getSimpleVT().SimpleTy <
             std::size(NumRegistersForVT));
      return NumRegistersForVT[VT.getSimpleVT().SimpleTy];
    }
    if (VT.isVector()) {
      EVT VT1;
      MVT VT2;
      unsigned NumIntermediates;
      return getVectorTypeBreakdown(Context, VT, VT1, NumIntermediates, VT2);
    }
    if (VT.isInteger()) {
      unsigned BitWidth = VT.getSizeInBits();
      unsigned RegWidth = getRegisterType(Context, VT).getSizeInBits();
      return (BitWidth + RegWidth - 1) / RegWidth;
    }
    llvm_unreachable("Unsupported extended type!");
  }

  /// Certain combinations of ABIs, Targets and features require that types
  /// are legal for some operations and not for other operations.
  /// For MIPS all vector types must be passed through the integer register set.
  virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context,
                                            CallingConv::ID CC, EVT VT) const {
    return getRegisterType(Context, VT);
  }

  /// Certain targets require unusual breakdowns of certain types. For MIPS,
  /// this occurs when a vector type is used, as vector are passed through the
  /// integer register set.
  virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context,
                                                 CallingConv::ID CC,
                                                 EVT VT) const {
    return getNumRegisters(Context, VT);
  }

  /// Certain targets have context sensitive alignment requirements, where one
  /// type has the alignment requirement of another type.
  virtual Align getABIAlignmentForCallingConv(Type *ArgTy,
                                              const DataLayout &DL) const {
    return DL.getABITypeAlign(ArgTy);
  }

  /// If true, then instruction selection should seek to shrink the FP constant
  /// of the specified type to a smaller type in order to save space and / or
  /// reduce runtime.
  virtual bool ShouldShrinkFPConstant(EVT) const { return true; }

  /// Return true if it is profitable to reduce a load to a smaller type.
  /// Example: (i16 (trunc (i32 (load x))) -> i16 load x
  virtual bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy,
                                     EVT NewVT) const {
    // By default, assume that it is cheaper to extract a subvector from a wide
    // vector load rather than creating multiple narrow vector loads.
    if (NewVT.isVector() && !Load->hasOneUse())
      return false;

    return true;
  }

  /// Return true (the default) if it is profitable to remove a sext_inreg(x)
  /// where the sext is redundant, and use x directly.
  virtual bool shouldRemoveRedundantExtend(SDValue Op) const { return true; }

  /// When splitting a value of the specified type into parts, does the Lo
  /// or Hi part come first?  This usually follows the endianness, except
  /// for ppcf128, where the Hi part always comes first.
  bool hasBigEndianPartOrdering(EVT VT, const DataLayout &DL) const {
    return DL.isBigEndian() || VT == MVT::ppcf128;
  }

  /// If true, the target has custom DAG combine transformations that it can
  /// perform for the specified node.
  bool hasTargetDAGCombine(ISD::NodeType NT) const {
    assert(unsigned(NT >> 3) < std::size(TargetDAGCombineArray));
    return TargetDAGCombineArray[NT >> 3] & (1 << (NT&7));
  }

  unsigned getGatherAllAliasesMaxDepth() const {
    return GatherAllAliasesMaxDepth;
  }

  /// Returns the size of the platform's va_list object.
  virtual unsigned getVaListSizeInBits(const DataLayout &DL) const {
    return getPointerTy(DL).getSizeInBits();
  }

  /// Get maximum # of store operations permitted for llvm.memset
  ///
  /// This function returns the maximum number of store operations permitted
  /// to replace a call to llvm.memset. The value is set by the target at the
  /// performance threshold for such a replacement. If OptSize is true,
  /// return the limit for functions that have OptSize attribute.
  unsigned getMaxStoresPerMemset(bool OptSize) const {
    return OptSize ? MaxStoresPerMemsetOptSize : MaxStoresPerMemset;
  }

  /// Get maximum # of store operations permitted for llvm.memcpy
  ///
  /// This function returns the maximum number of store operations permitted
  /// to replace a call to llvm.memcpy. The value is set by the target at the
  /// performance threshold for such a replacement. If OptSize is true,
  /// return the limit for functions that have OptSize attribute.
  unsigned getMaxStoresPerMemcpy(bool OptSize) const {
    return OptSize ? MaxStoresPerMemcpyOptSize : MaxStoresPerMemcpy;
  }

  /// \brief Get maximum # of store operations to be glued together
  ///
  /// This function returns the maximum number of store operations permitted
  /// to glue together during lowering of llvm.memcpy. The value is set by
  //  the target at the performance threshold for such a replacement.
  virtual unsigned getMaxGluedStoresPerMemcpy() const {
    return MaxGluedStoresPerMemcpy;
  }

  /// Get maximum # of load operations permitted for memcmp
  ///
  /// This function returns the maximum number of load operations permitted
  /// to replace a call to memcmp. The value is set by the target at the
  /// performance threshold for such a replacement. If OptSize is true,
  /// return the limit for functions that have OptSize attribute.
  unsigned getMaxExpandSizeMemcmp(bool OptSize) const {
    return OptSize ? MaxLoadsPerMemcmpOptSize : MaxLoadsPerMemcmp;
  }

  /// Get maximum # of store operations permitted for llvm.memmove
  ///
  /// This function returns the maximum number of store operations permitted
  /// to replace a call to llvm.memmove. The value is set by the target at the
  /// performance threshold for such a replacement. If OptSize is true,
  /// return the limit for functions that have OptSize attribute.
  unsigned getMaxStoresPerMemmove(bool OptSize) const {
    return OptSize ? MaxStoresPerMemmoveOptSize : MaxStoresPerMemmove;
  }

  /// Determine if the target supports unaligned memory accesses.
  ///
  /// This function returns true if the target allows unaligned memory accesses
  /// of the specified type in the given address space. If true, it also returns
  /// a relative speed of the unaligned memory access in the last argument by
  /// reference. The higher the speed number the faster the operation comparing
  /// to a number returned by another such call. This is used, for example, in
  /// situations where an array copy/move/set is converted to a sequence of
  /// store operations. Its use helps to ensure that such replacements don't
  /// generate code that causes an alignment error (trap) on the target machine.
  virtual bool allowsMisalignedMemoryAccesses(
      EVT, unsigned AddrSpace = 0, Align Alignment = Align(1),
      MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
      unsigned * /*Fast*/ = nullptr) const {
    return false;
  }

  /// LLT handling variant.
  virtual bool allowsMisalignedMemoryAccesses(
      LLT, unsigned AddrSpace = 0, Align Alignment = Align(1),
      MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
      unsigned * /*Fast*/ = nullptr) const {
    return false;
  }

  /// This function returns true if the memory access is aligned or if the
  /// target allows this specific unaligned memory access. If the access is
  /// allowed, the optional final parameter returns a relative speed of the
  /// access (as defined by the target).
  bool allowsMemoryAccessForAlignment(
      LLVMContext &Context, const DataLayout &DL, EVT VT,
      unsigned AddrSpace = 0, Align Alignment = Align(1),
      MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
      unsigned *Fast = nullptr) const;

  /// Return true if the memory access of this type is aligned or if the target
  /// allows this specific unaligned access for the given MachineMemOperand.
  /// If the access is allowed, the optional final parameter returns a relative
  /// speed of the access (as defined by the target).
  bool allowsMemoryAccessForAlignment(LLVMContext &Context,
                                      const DataLayout &DL, EVT VT,
                                      const MachineMemOperand &MMO,
                                      unsigned *Fast = nullptr) const;

  /// Return true if the target supports a memory access of this type for the
  /// given address space and alignment. If the access is allowed, the optional
  /// final parameter returns the relative speed of the access (as defined by
  /// the target).
  virtual bool
  allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT,
                     unsigned AddrSpace = 0, Align Alignment = Align(1),
                     MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
                     unsigned *Fast = nullptr) const;

  /// Return true if the target supports a memory access of this type for the
  /// given MachineMemOperand. If the access is allowed, the optional
  /// final parameter returns the relative access speed (as defined by the
  /// target).
  bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT,
                          const MachineMemOperand &MMO,
                          unsigned *Fast = nullptr) const;

  /// LLT handling variant.
  bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, LLT Ty,
                          const MachineMemOperand &MMO,
                          unsigned *Fast = nullptr) const;

  /// Returns the target specific optimal type for load and store operations as
  /// a result of memset, memcpy, and memmove lowering.
  /// It returns EVT::Other if the type should be determined using generic
  /// target-independent logic.
  virtual EVT
  getOptimalMemOpType(const MemOp &Op,
                      const AttributeList & /*FuncAttributes*/) const {
    return MVT::Other;
  }

  /// LLT returning variant.
  virtual LLT
  getOptimalMemOpLLT(const MemOp &Op,
                     const AttributeList & /*FuncAttributes*/) const {
    return LLT();
  }

  /// Returns true if it's safe to use load / store of the specified type to
  /// expand memcpy / memset inline.
  ///
  /// This is mostly true for all types except for some special cases. For
  /// example, on X86 targets without SSE2 f64 load / store are done with fldl /
  /// fstpl which also does type conversion. Note the specified type doesn't
  /// have to be legal as the hook is used before type legalization.
  virtual bool isSafeMemOpType(MVT /*VT*/) const { return true; }

  /// Return lower limit for number of blocks in a jump table.
  virtual unsigned getMinimumJumpTableEntries() const;

  /// Return lower limit of the density in a jump table.
  unsigned getMinimumJumpTableDensity(bool OptForSize) const;

  /// Return upper limit for number of entries in a jump table.
  /// Zero if no limit.
  unsigned getMaximumJumpTableSize() const;

  virtual bool isJumpTableRelative() const;

  /// If a physical register, this specifies the register that
  /// llvm.savestack/llvm.restorestack should save and restore.
  Register getStackPointerRegisterToSaveRestore() const {
    return StackPointerRegisterToSaveRestore;
  }

  /// If a physical register, this returns the register that receives the
  /// exception address on entry to an EH pad.
  virtual Register
  getExceptionPointerRegister(const Constant *PersonalityFn) const {
    return Register();
  }

  /// If a physical register, this returns the register that receives the
  /// exception typeid on entry to a landing pad.
  virtual Register
  getExceptionSelectorRegister(const Constant *PersonalityFn) const {
    return Register();
  }

  virtual bool needsFixedCatchObjects() const {
    report_fatal_error("Funclet EH is not implemented for this target");
  }

  /// Return the minimum stack alignment of an argument.
  Align getMinStackArgumentAlignment() const {
    return MinStackArgumentAlignment;
  }

  /// Return the minimum function alignment.
  Align getMinFunctionAlignment() const { return MinFunctionAlignment; }

  /// Return the preferred function alignment.
  Align getPrefFunctionAlignment() const { return PrefFunctionAlignment; }

  /// Return the preferred loop alignment.
  virtual Align getPrefLoopAlignment(MachineLoop *ML = nullptr) const;

  /// Return the maximum amount of bytes allowed to be emitted when padding for
  /// alignment
  virtual unsigned
  getMaxPermittedBytesForAlignment(MachineBasicBlock *MBB) const;

  /// Should loops be aligned even when the function is marked OptSize (but not
  /// MinSize).
  virtual bool alignLoopsWithOptSize() const { return false; }

  /// If the target has a standard location for the stack protector guard,
  /// returns the address of that location. Otherwise, returns nullptr.
  /// DEPRECATED: please override useLoadStackGuardNode and customize
  ///             LOAD_STACK_GUARD, or customize \@llvm.stackguard().
  virtual Value *getIRStackGuard(IRBuilderBase &IRB) const;

  /// Inserts necessary declarations for SSP (stack protection) purpose.
  /// Should be used only when getIRStackGuard returns nullptr.
  virtual void insertSSPDeclarations(Module &M) const;

  /// Return the variable that's previously inserted by insertSSPDeclarations,
  /// if any, otherwise return nullptr. Should be used only when
  /// getIRStackGuard returns nullptr.
  virtual Value *getSDagStackGuard(const Module &M) const;

  /// If this function returns true, stack protection checks should XOR the
  /// frame pointer (or whichever pointer is used to address locals) into the
  /// stack guard value before checking it. getIRStackGuard must return nullptr
  /// if this returns true.
  virtual bool useStackGuardXorFP() const { return false; }

  /// If the target has a standard stack protection check function that
  /// performs validation and error handling, returns the function. Otherwise,
  /// returns nullptr. Must be previously inserted by insertSSPDeclarations.
  /// Should be used only when getIRStackGuard returns nullptr.
  virtual Function *getSSPStackGuardCheck(const Module &M) const;

  /// \returns true if a constant G_UBFX is legal on the target.
  virtual bool isConstantUnsignedBitfieldExtractLegal(unsigned Opc, LLT Ty1,
                                                      LLT Ty2) const {
    return false;
  }

protected:
  Value *getDefaultSafeStackPointerLocation(IRBuilderBase &IRB,
                                            bool UseTLS) const;

public:
  /// Returns the target-specific address of the unsafe stack pointer.
  virtual Value *getSafeStackPointerLocation(IRBuilderBase &IRB) const;

  /// Returns the name of the symbol used to emit stack probes or the empty
  /// string if not applicable.
  virtual bool hasStackProbeSymbol(const MachineFunction &MF) const { return false; }

  virtual bool hasInlineStackProbe(const MachineFunction &MF) const { return false; }

  virtual StringRef getStackProbeSymbolName(const MachineFunction &MF) const {
    return "";
  }

  /// Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g. we
  /// are happy to sink it into basic blocks. A cast may be free, but not
  /// necessarily a no-op. e.g. a free truncate from a 64-bit to 32-bit pointer.
  virtual bool isFreeAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const;

  /// Return true if the pointer arguments to CI should be aligned by aligning
  /// the object whose address is being passed. If so then MinSize is set to the
  /// minimum size the object must be to be aligned and PrefAlign is set to the
  /// preferred alignment.
  virtual bool shouldAlignPointerArgs(CallInst * /*CI*/, unsigned & /*MinSize*/,
                                      Align & /*PrefAlign*/) const {
    return false;
  }

  //===--------------------------------------------------------------------===//
  /// \name Helpers for TargetTransformInfo implementations
  /// @{

  /// Get the ISD node that corresponds to the Instruction class opcode.
  int InstructionOpcodeToISD(unsigned Opcode) const;

  /// @}

  //===--------------------------------------------------------------------===//
  /// \name Helpers for atomic expansion.
  /// @{

  /// Returns the maximum atomic operation size (in bits) supported by
  /// the backend. Atomic operations greater than this size (as well
  /// as ones that are not naturally aligned), will be expanded by
  /// AtomicExpandPass into an __atomic_* library call.
  unsigned getMaxAtomicSizeInBitsSupported() const {
    return MaxAtomicSizeInBitsSupported;
  }

  /// Returns the size in bits of the maximum div/rem the backend supports.
  /// Larger operations will be expanded by ExpandLargeDivRem.
  unsigned getMaxDivRemBitWidthSupported() const {
    return MaxDivRemBitWidthSupported;
  }

  /// Returns the size in bits of the maximum larget fp convert the backend
  /// supports. Larger operations will be expanded by ExpandLargeFPConvert.
  unsigned getMaxLargeFPConvertBitWidthSupported() const {
    return MaxLargeFPConvertBitWidthSupported;
  }

  /// Returns the size of the smallest cmpxchg or ll/sc instruction
  /// the backend supports.  Any smaller operations are widened in
  /// AtomicExpandPass.
  ///
  /// Note that *unlike* operations above the maximum size, atomic ops
  /// are still natively supported below the minimum; they just
  /// require a more complex expansion.
  unsigned getMinCmpXchgSizeInBits() const { return MinCmpXchgSizeInBits; }

  /// Whether the target supports unaligned atomic operations.
  bool supportsUnalignedAtomics() const { return SupportsUnalignedAtomics; }

  /// Whether AtomicExpandPass should automatically insert fences and reduce
  /// ordering for this atomic. This should be true for most architectures with
  /// weak memory ordering. Defaults to false.
  virtual bool shouldInsertFencesForAtomic(const Instruction *I) const {
    return false;
  }

  /// Whether AtomicExpandPass should automatically insert a trailing fence
  /// without reducing the ordering for this atomic. Defaults to false.
  virtual bool
  shouldInsertTrailingFenceForAtomicStore(const Instruction *I) const {
    return false;
  }

  /// Perform a load-linked operation on Addr, returning a "Value *" with the
  /// corresponding pointee type. This may entail some non-trivial operations to
  /// truncate or reconstruct types that will be illegal in the backend. See
  /// ARMISelLowering for an example implementation.
  virtual Value *emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy,
                                Value *Addr, AtomicOrdering Ord) const {
    llvm_unreachable("Load linked unimplemented on this target");
  }

  /// Perform a store-conditional operation to Addr. Return the status of the
  /// store. This should be 0 if the store succeeded, non-zero otherwise.
  virtual Value *emitStoreConditional(IRBuilderBase &Builder, Value *Val,
                                      Value *Addr, AtomicOrdering Ord) const {
    llvm_unreachable("Store conditional unimplemented on this target");
  }

  /// Perform a masked atomicrmw using a target-specific intrinsic. This
  /// represents the core LL/SC loop which will be lowered at a late stage by
  /// the backend. The target-specific intrinsic returns the loaded value and
  /// is not responsible for masking and shifting the result.
  virtual Value *emitMaskedAtomicRMWIntrinsic(IRBuilderBase &Builder,
                                              AtomicRMWInst *AI,
                                              Value *AlignedAddr, Value *Incr,
                                              Value *Mask, Value *ShiftAmt,
                                              AtomicOrdering Ord) const {
    llvm_unreachable("Masked atomicrmw expansion unimplemented on this target");
  }

  /// Perform a atomicrmw expansion using a target-specific way. This is
  /// expected to be called when masked atomicrmw and bit test atomicrmw don't
  /// work, and the target supports another way to lower atomicrmw.
  virtual void emitExpandAtomicRMW(AtomicRMWInst *AI) const {
    llvm_unreachable(
        "Generic atomicrmw expansion unimplemented on this target");
  }

  /// Perform a bit test atomicrmw using a target-specific intrinsic. This
  /// represents the combined bit test intrinsic which will be lowered at a late
  /// stage by the backend.
  virtual void emitBitTestAtomicRMWIntrinsic(AtomicRMWInst *AI) const {
    llvm_unreachable(
        "Bit test atomicrmw expansion unimplemented on this target");
  }

  /// Perform a atomicrmw which the result is only used by comparison, using a
  /// target-specific intrinsic. This represents the combined atomic and compare
  /// intrinsic which will be lowered at a late stage by the backend.
  virtual void emitCmpArithAtomicRMWIntrinsic(AtomicRMWInst *AI) const {
    llvm_unreachable(
        "Compare arith atomicrmw expansion unimplemented on this target");
  }

  /// Perform a masked cmpxchg using a target-specific intrinsic. This
  /// represents the core LL/SC loop which will be lowered at a late stage by
  /// the backend. The target-specific intrinsic returns the loaded value and
  /// is not responsible for masking and shifting the result.
  virtual Value *emitMaskedAtomicCmpXchgIntrinsic(
      IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
      Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
    llvm_unreachable("Masked cmpxchg expansion unimplemented on this target");
  }

  //===--------------------------------------------------------------------===//
  /// \name KCFI check lowering.
  /// @{

  virtual MachineInstr *EmitKCFICheck(MachineBasicBlock &MBB,
                                      MachineBasicBlock::instr_iterator &MBBI,
                                      const TargetInstrInfo *TII) const {
    llvm_unreachable("KCFI is not supported on this target");
  }

  /// @}

  /// Inserts in the IR a target-specific intrinsic specifying a fence.
  /// It is called by AtomicExpandPass before expanding an
  ///   AtomicRMW/AtomicCmpXchg/AtomicStore/AtomicLoad
  ///   if shouldInsertFencesForAtomic returns true.
  ///
  /// Inst is the original atomic instruction, prior to other expansions that
  /// may be performed.
  ///
  /// This function should either return a nullptr, or a pointer to an IR-level
  ///   Instruction*. Even complex fence sequences can be represented by a
  ///   single Instruction* through an intrinsic to be lowered later.
  /// Backends should override this method to produce target-specific intrinsic
  ///   for their fences.
  /// FIXME: Please note that the default implementation here in terms of
  ///   IR-level fences exists for historical/compatibility reasons and is
  ///   *unsound* ! Fences cannot, in general, be used to restore sequential
  ///   consistency. For example, consider the following example:
  /// atomic<int> x = y = 0;
  /// int r1, r2, r3, r4;
  /// Thread 0:
  ///   x.store(1);
  /// Thread 1:
  ///   y.store(1);
  /// Thread 2:
  ///   r1 = x.load();
  ///   r2 = y.load();
  /// Thread 3:
  ///   r3 = y.load();
  ///   r4 = x.load();
  ///  r1 = r3 = 1 and r2 = r4 = 0 is impossible as long as the accesses are all
  ///  seq_cst. But if they are lowered to monotonic accesses, no amount of
  ///  IR-level fences can prevent it.
  /// @{
  virtual Instruction *emitLeadingFence(IRBuilderBase &Builder,
                                        Instruction *Inst,
                                        AtomicOrdering Ord) const;

  virtual Instruction *emitTrailingFence(IRBuilderBase &Builder,
                                         Instruction *Inst,
                                         AtomicOrdering Ord) const;
  /// @}

  // Emits code that executes when the comparison result in the ll/sc
  // expansion of a cmpxchg instruction is such that the store-conditional will
  // not execute.  This makes it possible to balance out the load-linked with
  // a dedicated instruction, if desired.
  // E.g., on ARM, if ldrex isn't followed by strex, the exclusive monitor would
  // be unnecessarily held, except if clrex, inserted by this hook, is executed.
  virtual void emitAtomicCmpXchgNoStoreLLBalance(IRBuilderBase &Builder) const {}

  /// Returns true if arguments should be sign-extended in lib calls.
  virtual bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const {
    return IsSigned;
  }

  /// Returns true if arguments should be extended in lib calls.
  virtual bool shouldExtendTypeInLibCall(EVT Type) const {
    return true;
  }

  /// Returns how the given (atomic) load should be expanded by the
  /// IR-level AtomicExpand pass.
  virtual AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const {
    return AtomicExpansionKind::None;
  }

  /// Returns how the given (atomic) load should be cast by the IR-level
  /// AtomicExpand pass.
  virtual AtomicExpansionKind shouldCastAtomicLoadInIR(LoadInst *LI) const {
    if (LI->getType()->isFloatingPointTy())
      return AtomicExpansionKind::CastToInteger;
    return AtomicExpansionKind::None;
  }

  /// Returns how the given (atomic) store should be expanded by the IR-level
  /// AtomicExpand pass into. For instance AtomicExpansionKind::Expand will try
  /// to use an atomicrmw xchg.
  virtual AtomicExpansionKind shouldExpandAtomicStoreInIR(StoreInst *SI) const {
    return AtomicExpansionKind::None;
  }

  /// Returns how the given (atomic) store should be cast by the IR-level
  /// AtomicExpand pass into. For instance AtomicExpansionKind::CastToInteger
  /// will try to cast the operands to integer values.
  virtual AtomicExpansionKind shouldCastAtomicStoreInIR(StoreInst *SI) const {
    if (SI->getValueOperand()->getType()->isFloatingPointTy())
      return AtomicExpansionKind::CastToInteger;
    return AtomicExpansionKind::None;
  }

  /// Returns how the given atomic cmpxchg should be expanded by the IR-level
  /// AtomicExpand pass.
  virtual AtomicExpansionKind
  shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const {
    return AtomicExpansionKind::None;
  }

  /// Returns how the IR-level AtomicExpand pass should expand the given
  /// AtomicRMW, if at all. Default is to never expand.
  virtual AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
    return RMW->isFloatingPointOperation() ?
      AtomicExpansionKind::CmpXChg : AtomicExpansionKind::None;
  }

  /// Returns how the given atomic atomicrmw should be cast by the IR-level
  /// AtomicExpand pass.
  virtual AtomicExpansionKind
  shouldCastAtomicRMWIInIR(AtomicRMWInst *RMWI) const {
    if (RMWI->getOperation() == AtomicRMWInst::Xchg &&
        (RMWI->getValOperand()->getType()->isFloatingPointTy() ||
         RMWI->getValOperand()->getType()->isPointerTy()))
      return AtomicExpansionKind::CastToInteger;

    return AtomicExpansionKind::None;
  }

  /// On some platforms, an AtomicRMW that never actually modifies the value
  /// (such as fetch_add of 0) can be turned into a fence followed by an
  /// atomic load. This may sound useless, but it makes it possible for the
  /// processor to keep the cacheline shared, dramatically improving
  /// performance. And such idempotent RMWs are useful for implementing some
  /// kinds of locks, see for example (justification + benchmarks):
  /// http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf
  /// This method tries doing that transformation, returning the atomic load if
  /// it succeeds, and nullptr otherwise.
  /// If shouldExpandAtomicLoadInIR returns true on that load, it will undergo
  /// another round of expansion.
  virtual LoadInst *
  lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *RMWI) const {
    return nullptr;
  }

  /// Returns how the platform's atomic operations are extended (ZERO_EXTEND,
  /// SIGN_EXTEND, or ANY_EXTEND).
  virtual ISD::NodeType getExtendForAtomicOps() const {
    return ISD::ZERO_EXTEND;
  }

  /// Returns how the platform's atomic compare and swap expects its comparison
  /// value to be extended (ZERO_EXTEND, SIGN_EXTEND, or ANY_EXTEND). This is
  /// separate from getExtendForAtomicOps, which is concerned with the
  /// sign-extension of the instruction's output, whereas here we are concerned
  /// with the sign-extension of the input. For targets with compare-and-swap
  /// instructions (or sub-word comparisons in their LL/SC loop expansions),
  /// the input can be ANY_EXTEND, but the output will still have a specific
  /// extension.
  virtual ISD::NodeType getExtendForAtomicCmpSwapArg() const {
    return ISD::ANY_EXTEND;
  }

  /// @}

  /// Returns true if we should normalize
  /// select(N0&N1, X, Y) => select(N0, select(N1, X, Y), Y) and
  /// select(N0|N1, X, Y) => select(N0, select(N1, X, Y, Y)) if it is likely
  /// that it saves us from materializing N0 and N1 in an integer register.
  /// Targets that are able to perform and/or on flags should return false here.
  virtual bool shouldNormalizeToSelectSequence(LLVMContext &Context,
                                               EVT VT) const {
    // If a target has multiple condition registers, then it likely has logical
    // operations on those registers.
    if (hasMultipleConditionRegisters())
      return false;
    // Only do the transform if the value won't be split into multiple
    // registers.
    LegalizeTypeAction Action = getTypeAction(Context, VT);
    return Action != TypeExpandInteger && Action != TypeExpandFloat &&
      Action != TypeSplitVector;
  }

  virtual bool isProfitableToCombineMinNumMaxNum(EVT VT) const { return true; }

  /// Return true if a select of constants (select Cond, C1, C2) should be
  /// transformed into simple math ops with the condition value. For example:
  /// select Cond, C1, C1-1 --> add (zext Cond), C1-1
  virtual bool convertSelectOfConstantsToMath(EVT VT) const {
    return false;
  }

  /// Return true if it is profitable to transform an integer
  /// multiplication-by-constant into simpler operations like shifts and adds.
  /// This may be true if the target does not directly support the
  /// multiplication operation for the specified type or the sequence of simpler
  /// ops is faster than the multiply.
  virtual bool decomposeMulByConstant(LLVMContext &Context,
                                      EVT VT, SDValue C) const {
    return false;
  }

  /// Return true if it may be profitable to transform
  /// (mul (add x, c1), c2) -> (add (mul x, c2), c1*c2).
  /// This may not be true if c1 and c2 can be represented as immediates but
  /// c1*c2 cannot, for example.
  /// The target should check if c1, c2 and c1*c2 can be represented as
  /// immediates, or have to be materialized into registers. If it is not sure
  /// about some cases, a default true can be returned to let the DAGCombiner
  /// decide.
  /// AddNode is (add x, c1), and ConstNode is c2.
  virtual bool isMulAddWithConstProfitable(SDValue AddNode,
                                           SDValue ConstNode) const {
    return true;
  }

  /// Return true if it is more correct/profitable to use strict FP_TO_INT
  /// conversion operations - canonicalizing the FP source value instead of
  /// converting all cases and then selecting based on value.
  /// This may be true if the target throws exceptions for out of bounds
  /// conversions or has fast FP CMOV.
  virtual bool shouldUseStrictFP_TO_INT(EVT FpVT, EVT IntVT,
                                        bool IsSigned) const {
    return false;
  }

  /// Return true if it is beneficial to expand an @llvm.powi.* intrinsic.
  /// If not optimizing for size, expanding @llvm.powi.* intrinsics is always
  /// considered beneficial.
  /// If optimizing for size, expansion is only considered beneficial for upto
  /// 5 multiplies and a divide (if the exponent is negative).
  bool isBeneficialToExpandPowI(int64_t Exponent, bool OptForSize) const {
    if (Exponent < 0)
      Exponent = -Exponent;
    uint64_t E = static_cast<uint64_t>(Exponent);
    return !OptForSize || (llvm::popcount(E) + Log2_64(E) < 7);
  }

  //===--------------------------------------------------------------------===//
  // TargetLowering Configuration Methods - These methods should be invoked by
  // the derived class constructor to configure this object for the target.
  //
protected:
  /// Specify how the target extends the result of integer and floating point
  /// boolean values from i1 to a wider type.  See getBooleanContents.
  void setBooleanContents(BooleanContent Ty) {
    BooleanContents = Ty;
    BooleanFloatContents = Ty;
  }

  /// Specify how the target extends the result of integer and floating point
  /// boolean values from i1 to a wider type.  See getBooleanContents.
  void setBooleanContents(BooleanContent IntTy, BooleanContent FloatTy) {
    BooleanContents = IntTy;
    BooleanFloatContents = FloatTy;
  }

  /// Specify how the target extends the result of a vector boolean value from a
  /// vector of i1 to a wider type.  See getBooleanContents.
  void setBooleanVectorContents(BooleanContent Ty) {
    BooleanVectorContents = Ty;
  }

  /// Specify the target scheduling preference.
  void setSchedulingPreference(Sched::Preference Pref) {
    SchedPreferenceInfo = Pref;
  }

  /// Indicate the minimum number of blocks to generate jump tables.
  void setMinimumJumpTableEntries(unsigned Val);

  /// Indicate the maximum number of entries in jump tables.
  /// Set to zero to generate unlimited jump tables.
  void setMaximumJumpTableSize(unsigned);

  /// If set to a physical register, this specifies the register that
  /// llvm.savestack/llvm.restorestack should save and restore.
  void setStackPointerRegisterToSaveRestore(Register R) {
    StackPointerRegisterToSaveRestore = R;
  }

  /// Tells the code generator that the target has multiple (allocatable)
  /// condition registers that can be used to store the results of comparisons
  /// for use by selects and conditional branches. With multiple condition
  /// registers, the code generator will not aggressively sink comparisons into
  /// the blocks of their users.
  void setHasMultipleConditionRegisters(bool hasManyRegs = true) {
    HasMultipleConditionRegisters = hasManyRegs;
  }

  /// Tells the code generator that the target has BitExtract instructions.
  /// The code generator will aggressively sink "shift"s into the blocks of
  /// their users if the users will generate "and" instructions which can be
  /// combined with "shift" to BitExtract instructions.
  void setHasExtractBitsInsn(bool hasExtractInsn = true) {
    HasExtractBitsInsn = hasExtractInsn;
  }

  /// Tells the code generator not to expand logic operations on comparison
  /// predicates into separate sequences that increase the amount of flow
  /// control.
  void setJumpIsExpensive(bool isExpensive = true);

  /// Tells the code generator which bitwidths to bypass.
  void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth) {
    BypassSlowDivWidths[SlowBitWidth] = FastBitWidth;
  }

  /// Add the specified register class as an available regclass for the
  /// specified value type. This indicates the selector can handle values of
  /// that class natively.
  void addRegisterClass(MVT VT, const TargetRegisterClass *RC) {
    assert((unsigned)VT.SimpleTy < std::size(RegClassForVT));
    RegClassForVT[VT.SimpleTy] = RC;
  }

  /// Return the largest legal super-reg register class of the register class
  /// for the specified type and its associated "cost".
  virtual std::pair<const TargetRegisterClass *, uint8_t>
  findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const;

  /// Once all of the register classes are added, this allows us to compute
  /// derived properties we expose.
  void computeRegisterProperties(const TargetRegisterInfo *TRI);

  /// Indicate that the specified operation does not work with the specified
  /// type and indicate what to do about it. Note that VT may refer to either
  /// the type of a result or that of an operand of Op.
  void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action) {
    assert(Op < std::size(OpActions[0]) && "Table isn't big enough!");
    OpActions[(unsigned)VT.SimpleTy][Op] = Action;
  }
  void setOperationAction(ArrayRef<unsigned> Ops, MVT VT,
                          LegalizeAction Action) {
    for (auto Op : Ops)
      setOperationAction(Op, VT, Action);
  }
  void setOperationAction(ArrayRef<unsigned> Ops, ArrayRef<MVT> VTs,
                          LegalizeAction Action) {
    for (auto VT : VTs)
      setOperationAction(Ops, VT, Action);
  }

  /// Indicate that the specified load with extension does not work with the
  /// specified type and indicate what to do about it.
  void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT,
                        LegalizeAction Action) {
    assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValVT.isValid() &&
           MemVT.isValid() && "Table isn't big enough!");
    assert((unsigned)Action < 0x10 && "too many bits for bitfield array");
    unsigned Shift = 4 * ExtType;
    LoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] &= ~((uint16_t)0xF << Shift);
    LoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] |= (uint16_t)Action << Shift;
  }
  void setLoadExtAction(ArrayRef<unsigned> ExtTypes, MVT ValVT, MVT MemVT,
                        LegalizeAction Action) {
    for (auto ExtType : ExtTypes)
      setLoadExtAction(ExtType, ValVT, MemVT, Action);
  }
  void setLoadExtAction(ArrayRef<unsigned> ExtTypes, MVT ValVT,
                        ArrayRef<MVT> MemVTs, LegalizeAction Action) {
    for (auto MemVT : MemVTs)
      setLoadExtAction(ExtTypes, ValVT, MemVT, Action);
  }

  /// Indicate that the specified truncating store does not work with the
  /// specified type and indicate what to do about it.
  void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action) {
    assert(ValVT.isValid() && MemVT.isValid() && "Table isn't big enough!");
    TruncStoreActions[(unsigned)ValVT.SimpleTy][MemVT.SimpleTy] = Action;
  }

  /// Indicate that the specified indexed load does or does not work with the
  /// specified type and indicate what to do abort it.
  ///
  /// NOTE: All indexed mode loads are initialized to Expand in
  /// TargetLowering.cpp
  void setIndexedLoadAction(ArrayRef<unsigned> IdxModes, MVT VT,
                            LegalizeAction Action) {
    for (auto IdxMode : IdxModes)
      setIndexedModeAction(IdxMode, VT, IMAB_Load, Action);
  }

  void setIndexedLoadAction(ArrayRef<unsigned> IdxModes, ArrayRef<MVT> VTs,
                            LegalizeAction Action) {
    for (auto VT : VTs)
      setIndexedLoadAction(IdxModes, VT, Action);
  }

  /// Indicate that the specified indexed store does or does not work with the
  /// specified type and indicate what to do about it.
  ///
  /// NOTE: All indexed mode stores are initialized to Expand in
  /// TargetLowering.cpp
  void setIndexedStoreAction(ArrayRef<unsigned> IdxModes, MVT VT,
                             LegalizeAction Action) {
    for (auto IdxMode : IdxModes)
      setIndexedModeAction(IdxMode, VT, IMAB_Store, Action);
  }

  void setIndexedStoreAction(ArrayRef<unsigned> IdxModes, ArrayRef<MVT> VTs,
                             LegalizeAction Action) {
    for (auto VT : VTs)
      setIndexedStoreAction(IdxModes, VT, Action);
  }

  /// Indicate that the specified indexed masked load does or does not work with
  /// the specified type and indicate what to do about it.
  ///
  /// NOTE: All indexed mode masked loads are initialized to Expand in
  /// TargetLowering.cpp
  void setIndexedMaskedLoadAction(unsigned IdxMode, MVT VT,
                                  LegalizeAction Action) {
    setIndexedModeAction(IdxMode, VT, IMAB_MaskedLoad, Action);
  }

  /// Indicate that the specified indexed masked store does or does not work
  /// with the specified type and indicate what to do about it.
  ///
  /// NOTE: All indexed mode masked stores are initialized to Expand in
  /// TargetLowering.cpp
  void setIndexedMaskedStoreAction(unsigned IdxMode, MVT VT,
                                   LegalizeAction Action) {
    setIndexedModeAction(IdxMode, VT, IMAB_MaskedStore, Action);
  }

  /// Indicate that the specified condition code is or isn't supported on the
  /// target and indicate what to do about it.
  void setCondCodeAction(ArrayRef<ISD::CondCode> CCs, MVT VT,
                         LegalizeAction Action) {
    for (auto CC : CCs) {
      assert(VT.isValid() && (unsigned)CC < std::size(CondCodeActions) &&
             "Table isn't big enough!");
      assert((unsigned)Action < 0x10 && "too many bits for bitfield array");
      /// The lower 3 bits of the SimpleTy index into Nth 4bit set from the
      /// 32-bit value and the upper 29 bits index into the second dimension of
      /// the array to select what 32-bit value to use.
      uint32_t Shift = 4 * (VT.SimpleTy & 0x7);
      CondCodeActions[CC][VT.SimpleTy >> 3] &= ~((uint32_t)0xF << Shift);
      CondCodeActions[CC][VT.SimpleTy >> 3] |= (uint32_t)Action << Shift;
    }
  }
  void setCondCodeAction(ArrayRef<ISD::CondCode> CCs, ArrayRef<MVT> VTs,
                         LegalizeAction Action) {
    for (auto VT : VTs)
      setCondCodeAction(CCs, VT, Action);
  }

  /// If Opc/OrigVT is specified as being promoted, the promotion code defaults
  /// to trying a larger integer/fp until it can find one that works. If that
  /// default is insufficient, this method can be used by the target to override
  /// the default.
  void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) {
    PromoteToType[std::make_pair(Opc, OrigVT.SimpleTy)] = DestVT.SimpleTy;
  }

  /// Convenience method to set an operation to Promote and specify the type
  /// in a single call.
  void setOperationPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) {
    setOperationAction(Opc, OrigVT, Promote);
    AddPromotedToType(Opc, OrigVT, DestVT);
  }

  /// Targets should invoke this method for each target independent node that
  /// they want to provide a custom DAG combiner for by implementing the
  /// PerformDAGCombine virtual method.
  void setTargetDAGCombine(ArrayRef<ISD::NodeType> NTs) {
    for (auto NT : NTs) {
      assert(unsigned(NT >> 3) < std::size(TargetDAGCombineArray));
      TargetDAGCombineArray[NT >> 3] |= 1 << (NT & 7);
    }
  }

  /// Set the target's minimum function alignment.
  void setMinFunctionAlignment(Align Alignment) {
    MinFunctionAlignment = Alignment;
  }

  /// Set the target's preferred function alignment.  This should be set if
  /// there is a performance benefit to higher-than-minimum alignment
  void setPrefFunctionAlignment(Align Alignment) {
    PrefFunctionAlignment = Alignment;
  }

  /// Set the target's preferred loop alignment. Default alignment is one, it
  /// means the target does not care about loop alignment. The target may also
  /// override getPrefLoopAlignment to provide per-loop values.
  void setPrefLoopAlignment(Align Alignment) { PrefLoopAlignment = Alignment; }
  void setMaxBytesForAlignment(unsigned MaxBytes) {
    MaxBytesForAlignment = MaxBytes;
  }

  /// Set the minimum stack alignment of an argument.
  void setMinStackArgumentAlignment(Align Alignment) {
    MinStackArgumentAlignment = Alignment;
  }

  /// Set the maximum atomic operation size supported by the
  /// backend. Atomic operations greater than this size (as well as
  /// ones that are not naturally aligned), will be expanded by
  /// AtomicExpandPass into an __atomic_* library call.
  void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits) {
    MaxAtomicSizeInBitsSupported = SizeInBits;
  }

  /// Set the size in bits of the maximum div/rem the backend supports.
  /// Larger operations will be expanded by ExpandLargeDivRem.
  void setMaxDivRemBitWidthSupported(unsigned SizeInBits) {
    MaxDivRemBitWidthSupported = SizeInBits;
  }

  /// Set the size in bits of the maximum fp convert the backend supports.
  /// Larger operations will be expanded by ExpandLargeFPConvert.
  void setMaxLargeFPConvertBitWidthSupported(unsigned SizeInBits) {
    MaxLargeFPConvertBitWidthSupported = SizeInBits;
  }

  /// Sets the minimum cmpxchg or ll/sc size supported by the backend.
  void setMinCmpXchgSizeInBits(unsigned SizeInBits) {
    MinCmpXchgSizeInBits = SizeInBits;
  }

  /// Sets whether unaligned atomic operations are supported.
  void setSupportsUnalignedAtomics(bool UnalignedSupported) {
    SupportsUnalignedAtomics = UnalignedSupported;
  }

public:
  //===--------------------------------------------------------------------===//
  // Addressing mode description hooks (used by LSR etc).
  //

  /// CodeGenPrepare sinks address calculations into the same BB as Load/Store
  /// instructions reading the address. This allows as much computation as
  /// possible to be done in the address mode for that operand. This hook lets
  /// targets also pass back when this should be done on intrinsics which
  /// load/store.
  virtual bool getAddrModeArguments(IntrinsicInst * /*I*/,
                                    SmallVectorImpl<Value*> &/*Ops*/,
                                    Type *&/*AccessTy*/) const {
    return false;
  }

  /// This represents an addressing mode of:
  ///    BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
  /// If BaseGV is null,  there is no BaseGV.
  /// If BaseOffs is zero, there is no base offset.
  /// If HasBaseReg is false, there is no base register.
  /// If Scale is zero, there is no ScaleReg.  Scale of 1 indicates a reg with
  /// no scale.
  struct AddrMode {
    GlobalValue *BaseGV = nullptr;
    int64_t      BaseOffs = 0;
    bool         HasBaseReg = false;
    int64_t      Scale = 0;
    AddrMode() = default;
  };

  /// Return true if the addressing mode represented by AM is legal for this
  /// target, for a load/store of the specified type.
  ///
  /// The type may be VoidTy, in which case only return true if the addressing
  /// mode is legal for a load/store of any legal type.  TODO: Handle
  /// pre/postinc as well.
  ///
  /// If the address space cannot be determined, it will be -1.
  ///
  /// TODO: Remove default argument
  virtual bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM,
                                     Type *Ty, unsigned AddrSpace,
                                     Instruction *I = nullptr) const;

  /// Return true if the specified immediate is legal icmp immediate, that is
  /// the target has icmp instructions which can compare a register against the
  /// immediate without having to materialize the immediate into a register.
  virtual bool isLegalICmpImmediate(int64_t) const {
    return true;
  }

  /// Return true if the specified immediate is legal add immediate, that is the
  /// target has add instructions which can add a register with the immediate
  /// without having to materialize the immediate into a register.
  virtual bool isLegalAddImmediate(int64_t) const {
    return true;
  }

  /// Return true if the specified immediate is legal for the value input of a
  /// store instruction.
  virtual bool isLegalStoreImmediate(int64_t Value) const {
    // Default implementation assumes that at least 0 works since it is likely
    // that a zero register exists or a zero immediate is allowed.
    return Value == 0;
  }

  /// Return true if it's significantly cheaper to shift a vector by a uniform
  /// scalar than by an amount which will vary across each lane. On x86 before
  /// AVX2 for example, there is a "psllw" instruction for the former case, but
  /// no simple instruction for a general "a << b" operation on vectors.
  /// This should also apply to lowering for vector funnel shifts (rotates).
  virtual bool isVectorShiftByScalarCheap(Type *Ty) const {
    return false;
  }

  /// Given a shuffle vector SVI representing a vector splat, return a new
  /// scalar type of size equal to SVI's scalar type if the new type is more
  /// profitable. Returns nullptr otherwise. For example under MVE float splats
  /// are converted to integer to prevent the need to move from SPR to GPR
  /// registers.
  virtual Type* shouldConvertSplatType(ShuffleVectorInst* SVI) const {
    return nullptr;
  }

  /// Given a set in interconnected phis of type 'From' that are loaded/stored
  /// or bitcast to type 'To', return true if the set should be converted to
  /// 'To'.
  virtual bool shouldConvertPhiType(Type *From, Type *To) const {
    return (From->isIntegerTy() || From->isFloatingPointTy()) &&
           (To->isIntegerTy() || To->isFloatingPointTy());
  }

  /// Returns true if the opcode is a commutative binary operation.
  virtual bool isCommutativeBinOp(unsigned Opcode) const {
    // FIXME: This should get its info from the td file.
    switch (Opcode) {
    case ISD::ADD:
    case ISD::SMIN:
    case ISD::SMAX:
    case ISD::UMIN:
    case ISD::UMAX:
    case ISD::MUL:
    case ISD::MULHU:
    case ISD::MULHS:
    case ISD::SMUL_LOHI:
    case ISD::UMUL_LOHI:
    case ISD::FADD:
    case ISD::FMUL:
    case ISD::AND:
    case ISD::OR:
    case ISD::XOR:
    case ISD::SADDO:
    case ISD::UADDO:
    case ISD::ADDC:
    case ISD::ADDE:
    case ISD::SADDSAT:
    case ISD::UADDSAT:
    case ISD::FMINNUM:
    case ISD::FMAXNUM:
    case ISD::FMINNUM_IEEE:
    case ISD::FMAXNUM_IEEE:
    case ISD::FMINIMUM:
    case ISD::FMAXIMUM:
    case ISD::AVGFLOORS:
    case ISD::AVGFLOORU:
    case ISD::AVGCEILS:
    case ISD::AVGCEILU:
    case ISD::ABDS:
    case ISD::ABDU:
      return true;
    default: return false;
    }
  }

  /// Return true if the node is a math/logic binary operator.
  virtual bool isBinOp(unsigned Opcode) const {
    // A commutative binop must be a binop.
    if (isCommutativeBinOp(Opcode))
      return true;
    // These are non-commutative binops.
    switch (Opcode) {
    case ISD::SUB:
    case ISD::SHL:
    case ISD::SRL:
    case ISD::SRA:
    case ISD::ROTL:
    case ISD::ROTR:
    case ISD::SDIV:
    case ISD::UDIV:
    case ISD::SREM:
    case ISD::UREM:
    case ISD::SSUBSAT:
    case ISD::USUBSAT:
    case ISD::FSUB:
    case ISD::FDIV:
    case ISD::FREM:
      return true;
    default:
      return false;
    }
  }

  /// Return true if it's free to truncate a value of type FromTy to type
  /// ToTy. e.g. On x86 it's free to truncate a i32 value in register EAX to i16
  /// by referencing its sub-register AX.
  /// Targets must return false when FromTy <= ToTy.
  virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const {
    return false;
  }

  /// Return true if a truncation from FromTy to ToTy is permitted when deciding
  /// whether a call is in tail position. Typically this means that both results
  /// would be assigned to the same register or stack slot, but it could mean
  /// the target performs adequate checks of its own before proceeding with the
  /// tail call.  Targets must return false when FromTy <= ToTy.
  virtual bool allowTruncateForTailCall(Type *FromTy, Type *ToTy) const {
    return false;
  }

  virtual bool isTruncateFree(EVT FromVT, EVT ToVT) const { return false; }
  virtual bool isTruncateFree(LLT FromTy, LLT ToTy, const DataLayout &DL,
                              LLVMContext &Ctx) const {
    return isTruncateFree(getApproximateEVTForLLT(FromTy, DL, Ctx),
                          getApproximateEVTForLLT(ToTy, DL, Ctx));
  }

  virtual bool isProfitableToHoist(Instruction *I) const { return true; }

  /// Return true if the extension represented by \p I is free.
  /// Unlikely the is[Z|FP]ExtFree family which is based on types,
  /// this method can use the context provided by \p I to decide
  /// whether or not \p I is free.
  /// This method extends the behavior of the is[Z|FP]ExtFree family.
  /// In other words, if is[Z|FP]Free returns true, then this method
  /// returns true as well. The converse is not true.
  /// The target can perform the adequate checks by overriding isExtFreeImpl.
  /// \pre \p I must be a sign, zero, or fp extension.
  bool isExtFree(const Instruction *I) const {
    switch (I->getOpcode()) {
    case Instruction::FPExt:
      if (isFPExtFree(EVT::getEVT(I->getType()),
                      EVT::getEVT(I->getOperand(0)->getType())))
        return true;
      break;
    case Instruction::ZExt:
      if (isZExtFree(I->getOperand(0)->getType(), I->getType()))
        return true;
      break;
    case Instruction::SExt:
      break;
    default:
      llvm_unreachable("Instruction is not an extension");
    }
    return isExtFreeImpl(I);
  }

  /// Return true if \p Load and \p Ext can form an ExtLoad.
  /// For example, in AArch64
  ///   %L = load i8, i8* %ptr
  ///   %E = zext i8 %L to i32
  /// can be lowered into one load instruction
  ///   ldrb w0, [x0]
  bool isExtLoad(const LoadInst *Load, const Instruction *Ext,
                 const DataLayout &DL) const {
    EVT VT = getValueType(DL, Ext->getType());
    EVT LoadVT = getValueType(DL, Load->getType());

    // If the load has other users and the truncate is not free, the ext
    // probably isn't free.
    if (!Load->hasOneUse() && (isTypeLegal(LoadVT) || !isTypeLegal(VT)) &&
        !isTruncateFree(Ext->getType(), Load->getType()))
      return false;

    // Check whether the target supports casts folded into loads.
    unsigned LType;
    if (isa<ZExtInst>(Ext))
      LType = ISD::ZEXTLOAD;
    else {
      assert(isa<SExtInst>(Ext) && "Unexpected ext type!");
      LType = ISD::SEXTLOAD;
    }

    return isLoadExtLegal(LType, VT, LoadVT);
  }

  /// Return true if any actual instruction that defines a value of type FromTy
  /// implicitly zero-extends the value to ToTy in the result register.
  ///
  /// The function should return true when it is likely that the truncate can
  /// be freely folded with an instruction defining a value of FromTy. If
  /// the defining instruction is unknown (because you're looking at a
  /// function argument, PHI, etc.) then the target may require an
  /// explicit truncate, which is not necessarily free, but this function
  /// does not deal with those cases.
  /// Targets must return false when FromTy >= ToTy.
  virtual bool isZExtFree(Type *FromTy, Type *ToTy) const {
    return false;
  }

  virtual bool isZExtFree(EVT FromTy, EVT ToTy) const { return false; }
  virtual bool isZExtFree(LLT FromTy, LLT ToTy, const DataLayout &DL,
                          LLVMContext &Ctx) const {
    return isZExtFree(getApproximateEVTForLLT(FromTy, DL, Ctx),
                      getApproximateEVTForLLT(ToTy, DL, Ctx));
  }

  /// Return true if zero-extending the specific node Val to type VT2 is free
  /// (either because it's implicitly zero-extended such as ARM ldrb / ldrh or
  /// because it's folded such as X86 zero-extending loads).
  virtual bool isZExtFree(SDValue Val, EVT VT2) const {
    return isZExtFree(Val.getValueType(), VT2);
  }

  /// Return true if sign-extension from FromTy to ToTy is cheaper than
  /// zero-extension.
  virtual bool isSExtCheaperThanZExt(EVT FromTy, EVT ToTy) const {
    return false;
  }

  /// Return true if this constant should be sign extended when promoting to
  /// a larger type.
  virtual bool signExtendConstant(const ConstantInt *C) const { return false; }

  /// Return true if sinking I's operands to the same basic block as I is
  /// profitable, e.g. because the operands can be folded into a target
  /// instruction during instruction selection. After calling the function
  /// \p Ops contains the Uses to sink ordered by dominance (dominating users
  /// come first).
  virtual bool shouldSinkOperands(Instruction *I,
                                  SmallVectorImpl<Use *> &Ops) const {
    return false;
  }

  /// Try to optimize extending or truncating conversion instructions (like
  /// zext, trunc, fptoui, uitofp) for the target.
  virtual bool
  optimizeExtendOrTruncateConversion(Instruction *I, Loop *L,
                                     const TargetTransformInfo &TTI) const {
    return false;
  }

  /// Return true if the target supplies and combines to a paired load
  /// two loaded values of type LoadedType next to each other in memory.
  /// RequiredAlignment gives the minimal alignment constraints that must be met
  /// to be able to select this paired load.
  ///
  /// This information is *not* used to generate actual paired loads, but it is
  /// used to generate a sequence of loads that is easier to combine into a
  /// paired load.
  /// For instance, something like this:
  /// a = load i64* addr
  /// b = trunc i64 a to i32
  /// c = lshr i64 a, 32
  /// d = trunc i64 c to i32
  /// will be optimized into:
  /// b = load i32* addr1
  /// d = load i32* addr2
  /// Where addr1 = addr2 +/- sizeof(i32).
  ///
  /// In other words, unless the target performs a post-isel load combining,
  /// this information should not be provided because it will generate more
  /// loads.
  virtual bool hasPairedLoad(EVT /*LoadedType*/,
                             Align & /*RequiredAlignment*/) const {
    return false;
  }

  /// Return true if the target has a vector blend instruction.
  virtual bool hasVectorBlend() const { return false; }

  /// Get the maximum supported factor for interleaved memory accesses.
  /// Default to be the minimum interleave factor: 2.
  virtual unsigned getMaxSupportedInterleaveFactor() const { return 2; }

  /// Lower an interleaved load to target specific intrinsics. Return
  /// true on success.
  ///
  /// \p LI is the vector load instruction.
  /// \p Shuffles is the shufflevector list to DE-interleave the loaded vector.
  /// \p Indices is the corresponding indices for each shufflevector.
  /// \p Factor is the interleave factor.
  virtual bool lowerInterleavedLoad(LoadInst *LI,
                                    ArrayRef<ShuffleVectorInst *> Shuffles,
                                    ArrayRef<unsigned> Indices,
                                    unsigned Factor) const {
    return false;
  }

  /// Lower an interleaved store to target specific intrinsics. Return
  /// true on success.
  ///
  /// \p SI is the vector store instruction.
  /// \p SVI is the shufflevector to RE-interleave the stored vector.
  /// \p Factor is the interleave factor.
  virtual bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI,
                                     unsigned Factor) const {
    return false;
  }

  /// Lower a deinterleave intrinsic to a target specific load intrinsic.
  /// Return true on success. Currently only supports
  /// llvm.experimental.vector.deinterleave2
  ///
  /// \p DI is the deinterleave intrinsic.
  /// \p LI is the accompanying load instruction
  virtual bool lowerDeinterleaveIntrinsicToLoad(IntrinsicInst *DI,
                                                LoadInst *LI) const {
    return false;
  }

  /// Lower an interleave intrinsic to a target specific store intrinsic.
  /// Return true on success. Currently only supports
  /// llvm.experimental.vector.interleave2
  ///
  /// \p II is the interleave intrinsic.
  /// \p SI is the accompanying store instruction
  virtual bool lowerInterleaveIntrinsicToStore(IntrinsicInst *II,
                                               StoreInst *SI) const {
    return false;
  }

  /// Return true if an fpext operation is free (for instance, because
  /// single-precision floating-point numbers are implicitly extended to
  /// double-precision).
  virtual bool isFPExtFree(EVT DestVT, EVT SrcVT) const {
    assert(SrcVT.isFloatingPoint() && DestVT.isFloatingPoint() &&
           "invalid fpext types");
    return false;
  }

  /// Return true if an fpext operation input to an \p Opcode operation is free
  /// (for instance, because half-precision floating-point numbers are
  /// implicitly extended to float-precision) for an FMA instruction.
  virtual bool isFPExtFoldable(const MachineInstr &MI, unsigned Opcode,
                               LLT DestTy, LLT SrcTy) const {
    return false;
  }

  /// Return true if an fpext operation input to an \p Opcode operation is free
  /// (for instance, because half-precision floating-point numbers are
  /// implicitly extended to float-precision) for an FMA instruction.
  virtual bool isFPExtFoldable(const SelectionDAG &DAG, unsigned Opcode,
                               EVT DestVT, EVT SrcVT) const {
    assert(DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() &&
           "invalid fpext types");
    return isFPExtFree(DestVT, SrcVT);
  }

  /// Return true if folding a vector load into ExtVal (a sign, zero, or any
  /// extend node) is profitable.
  virtual bool isVectorLoadExtDesirable(SDValue ExtVal) const { return false; }

  /// Return true if an fneg operation is free to the point where it is never
  /// worthwhile to replace it with a bitwise operation.
  virtual bool isFNegFree(EVT VT) const {
    assert(VT.isFloatingPoint());
    return false;
  }

  /// Return true if an fabs operation is free to the point where it is never
  /// worthwhile to replace it with a bitwise operation.
  virtual bool isFAbsFree(EVT VT) const {
    assert(VT.isFloatingPoint());
    return false;
  }

  /// Return true if an FMA operation is faster than a pair of fmul and fadd
  /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
  /// returns true, otherwise fmuladd is expanded to fmul + fadd.
  ///
  /// NOTE: This may be called before legalization on types for which FMAs are
  /// not legal, but should return true if those types will eventually legalize
  /// to types that support FMAs. After legalization, it will only be called on
  /// types that support FMAs (via Legal or Custom actions)
  virtual bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
                                          EVT) const {
    return false;
  }

  /// Return true if an FMA operation is faster than a pair of fmul and fadd
  /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
  /// returns true, otherwise fmuladd is expanded to fmul + fadd.
  ///
  /// NOTE: This may be called before legalization on types for which FMAs are
  /// not legal, but should return true if those types will eventually legalize
  /// to types that support FMAs. After legalization, it will only be called on
  /// types that support FMAs (via Legal or Custom actions)
  virtual bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
                                          LLT) const {
    return false;
  }

  /// IR version
  virtual bool isFMAFasterThanFMulAndFAdd(const Function &F, Type *) const {
    return false;
  }

  /// Returns true if \p MI can be combined with another instruction to
  /// form TargetOpcode::G_FMAD. \p N may be an TargetOpcode::G_FADD,
  /// TargetOpcode::G_FSUB, or an TargetOpcode::G_FMUL which will be
  /// distributed into an fadd/fsub.
  virtual bool isFMADLegal(const MachineInstr &MI, LLT Ty) const {
    assert((MI.getOpcode() == TargetOpcode::G_FADD ||
            MI.getOpcode() == TargetOpcode::G_FSUB ||
            MI.getOpcode() == TargetOpcode::G_FMUL) &&
           "unexpected node in FMAD forming combine");
    switch (Ty.getScalarSizeInBits()) {
    case 16:
      return isOperationLegal(TargetOpcode::G_FMAD, MVT::f16);
    case 32:
      return isOperationLegal(TargetOpcode::G_FMAD, MVT::f32);
    case 64:
      return isOperationLegal(TargetOpcode::G_FMAD, MVT::f64);
    default:
      break;
    }

    return false;
  }

  /// Returns true if be combined with to form an ISD::FMAD. \p N may be an
  /// ISD::FADD, ISD::FSUB, or an ISD::FMUL which will be distributed into an
  /// fadd/fsub.
  virtual bool isFMADLegal(const SelectionDAG &DAG, const SDNode *N) const {
    assert((N->getOpcode() == ISD::FADD || N->getOpcode() == ISD::FSUB ||
            N->getOpcode() == ISD::FMUL) &&
           "unexpected node in FMAD forming combine");
    return isOperationLegal(ISD::FMAD, N->getValueType(0));
  }

  // Return true when the decision to generate FMA's (or FMS, FMLA etc) rather
  // than FMUL and ADD is delegated to the machine combiner.
  virtual bool generateFMAsInMachineCombiner(EVT VT,
                                             CodeGenOpt::Level OptLevel) const {
    return false;
  }

  /// Return true if it's profitable to narrow operations of type SrcVT to
  /// DestVT. e.g. on x86, it's profitable to narrow from i32 to i8 but not from
  /// i32 to i16.
  virtual bool isNarrowingProfitable(EVT SrcVT, EVT DestVT) const {
    return false;
  }

  /// Return true if pulling a binary operation into a select with an identity
  /// constant is profitable. This is the inverse of an IR transform.
  /// Example: X + (Cond ? Y : 0) --> Cond ? (X + Y) : X
  virtual bool shouldFoldSelectWithIdentityConstant(unsigned BinOpcode,
                                                    EVT VT) const {
    return false;
  }

  /// Return true if it is beneficial to convert a load of a constant to
  /// just the constant itself.
  /// On some targets it might be more efficient to use a combination of
  /// arithmetic instructions to materialize the constant instead of loading it
  /// from a constant pool.
  virtual bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
                                                 Type *Ty) const {
    return false;
  }

  /// Return true if EXTRACT_SUBVECTOR is cheap for extracting this result type
  /// from this source type with this index. This is needed because
  /// EXTRACT_SUBVECTOR usually has custom lowering that depends on the index of
  /// the first element, and only the target knows which lowering is cheap.
  virtual bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
                                       unsigned Index) const {
    return false;
  }

  /// Try to convert an extract element of a vector binary operation into an
  /// extract element followed by a scalar operation.
  virtual bool shouldScalarizeBinop(SDValue VecOp) const {
    return false;
  }

  /// Return true if extraction of a scalar element from the given vector type
  /// at the given index is cheap. For example, if scalar operations occur on
  /// the same register file as vector operations, then an extract element may
  /// be a sub-register rename rather than an actual instruction.
  virtual bool isExtractVecEltCheap(EVT VT, unsigned Index) const {
    return false;
  }

  /// Try to convert math with an overflow comparison into the corresponding DAG
  /// node operation. Targets may want to override this independently of whether
  /// the operation is legal/custom for the given type because it may obscure
  /// matching of other patterns.
  virtual bool shouldFormOverflowOp(unsigned Opcode, EVT VT,
                                    bool MathUsed) const {
    // TODO: The default logic is inherited from code in CodeGenPrepare.
    // The opcode should not make a difference by default?
    if (Opcode != ISD::UADDO)
      return false;

    // Allow the transform as long as we have an integer type that is not
    // obviously illegal and unsupported and if the math result is used
    // besides the overflow check. On some targets (e.g. SPARC), it is
    // not profitable to form on overflow op if the math result has no
    // concrete users.
    if (VT.isVector())
      return false;
    return MathUsed && (VT.isSimple() || !isOperationExpand(Opcode, VT));
  }

  // Return true if it is profitable to use a scalar input to a BUILD_VECTOR
  // even if the vector itself has multiple uses.
  virtual bool aggressivelyPreferBuildVectorSources(EVT VecVT) const {
    return false;
  }

  // Return true if CodeGenPrepare should consider splitting large offset of a
  // GEP to make the GEP fit into the addressing mode and can be sunk into the
  // same blocks of its users.
  virtual bool shouldConsiderGEPOffsetSplit() const { return false; }

  /// Return true if creating a shift of the type by the given
  /// amount is not profitable.
  virtual bool shouldAvoidTransformToShift(EVT VT, unsigned Amount) const {
    return false;
  }

  /// Does this target require the clearing of high-order bits in a register
  /// passed to the fp16 to fp conversion library function.
  virtual bool shouldKeepZExtForFP16Conv() const { return false; }

  /// Should we generate fp_to_si_sat and fp_to_ui_sat from type FPVT to type VT
  /// from min(max(fptoi)) saturation patterns.
  virtual bool shouldConvertFpToSat(unsigned Op, EVT FPVT, EVT VT) const {
    return isOperationLegalOrCustom(Op, VT);
  }

  /// Does this target support complex deinterleaving
  virtual bool isComplexDeinterleavingSupported() const { return false; }

  /// Does this target support complex deinterleaving with the given operation
  /// and type
  virtual bool isComplexDeinterleavingOperationSupported(
      ComplexDeinterleavingOperation Operation, Type *Ty) const {
    return false;
  }

  /// Create the IR node for the given complex deinterleaving operation.
  /// If one cannot be created using all the given inputs, nullptr should be
  /// returned.
  virtual Value *createComplexDeinterleavingIR(
      IRBuilderBase &B, ComplexDeinterleavingOperation OperationType,
      ComplexDeinterleavingRotation Rotation, Value *InputA, Value *InputB,
      Value *Accumulator = nullptr) const {
    return nullptr;
  }

  //===--------------------------------------------------------------------===//
  // Runtime Library hooks
  //

  /// Rename the default libcall routine name for the specified libcall.
  void setLibcallName(RTLIB::Libcall Call, const char *Name) {
    LibcallRoutineNames[Call] = Name;
  }
  void setLibcallName(ArrayRef<RTLIB::Libcall> Calls, const char *Name) {
    for (auto Call : Calls)
      setLibcallName(Call, Name);
  }

  /// Get the libcall routine name for the specified libcall.
  const char *getLibcallName(RTLIB::Libcall Call) const {
    return LibcallRoutineNames[Call];
  }

  /// Override the default CondCode to be used to test the result of the
  /// comparison libcall against zero.
  void setCmpLibcallCC(RTLIB::Libcall Call, ISD::CondCode CC) {
    CmpLibcallCCs[Call] = CC;
  }

  /// Get the CondCode that's to be used to test the result of the comparison
  /// libcall against zero.
  ISD::CondCode getCmpLibcallCC(RTLIB::Libcall Call) const {
    return CmpLibcallCCs[Call];
  }

  /// Set the CallingConv that should be used for the specified libcall.
  void setLibcallCallingConv(RTLIB::Libcall Call, CallingConv::ID CC) {
    LibcallCallingConvs[Call] = CC;
  }

  /// Get the CallingConv that should be used for the specified libcall.
  CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const {
    return LibcallCallingConvs[Call];
  }

  /// Execute target specific actions to finalize target lowering.
  /// This is used to set extra flags in MachineFrameInformation and freezing
  /// the set of reserved registers.
  /// The default implementation just freezes the set of reserved registers.
  virtual void finalizeLowering(MachineFunction &MF) const;

  //===----------------------------------------------------------------------===//
  //  GlobalISel Hooks
  //===----------------------------------------------------------------------===//
  /// Check whether or not \p MI needs to be moved close to its uses.
  virtual bool shouldLocalize(const MachineInstr &MI, const TargetTransformInfo *TTI) const;


private:
  const TargetMachine &TM;

  /// Tells the code generator that the target has multiple (allocatable)
  /// condition registers that can be used to store the results of comparisons
  /// for use by selects and conditional branches. With multiple condition
  /// registers, the code generator will not aggressively sink comparisons into
  /// the blocks of their users.
  bool HasMultipleConditionRegisters;

  /// Tells the code generator that the target has BitExtract instructions.
  /// The code generator will aggressively sink "shift"s into the blocks of
  /// their users if the users will generate "and" instructions which can be
  /// combined with "shift" to BitExtract instructions.
  bool HasExtractBitsInsn;

  /// Tells the code generator to bypass slow divide or remainder
  /// instructions. For example, BypassSlowDivWidths[32,8] tells the code
  /// generator to bypass 32-bit integer div/rem with an 8-bit unsigned integer
  /// div/rem when the operands are positive and less than 256.
  DenseMap <unsigned int, unsigned int> BypassSlowDivWidths;

  /// Tells the code generator that it shouldn't generate extra flow control
  /// instructions and should attempt to combine flow control instructions via
  /// predication.
  bool JumpIsExpensive;

  /// Information about the contents of the high-bits in boolean values held in
  /// a type wider than i1. See getBooleanContents.
  BooleanContent BooleanContents;

  /// Information about the contents of the high-bits in boolean values held in
  /// a type wider than i1. See getBooleanContents.
  BooleanContent BooleanFloatContents;

  /// Information about the contents of the high-bits in boolean vector values
  /// when the element type is wider than i1. See getBooleanContents.
  BooleanContent BooleanVectorContents;

  /// The target scheduling preference: shortest possible total cycles or lowest
  /// register usage.
  Sched::Preference SchedPreferenceInfo;

  /// The minimum alignment that any argument on the stack needs to have.
  Align MinStackArgumentAlignment;

  /// The minimum function alignment (used when optimizing for size, and to
  /// prevent explicitly provided alignment from leading to incorrect code).
  Align MinFunctionAlignment;

  /// The preferred function alignment (used when alignment unspecified and
  /// optimizing for speed).
  Align PrefFunctionAlignment;

  /// The preferred loop alignment (in log2 bot in bytes).
  Align PrefLoopAlignment;
  /// The maximum amount of bytes permitted to be emitted for alignment.
  unsigned MaxBytesForAlignment;

  /// Size in bits of the maximum atomics size the backend supports.
  /// Accesses larger than this will be expanded by AtomicExpandPass.
  unsigned MaxAtomicSizeInBitsSupported;

  /// Size in bits of the maximum div/rem size the backend supports.
  /// Larger operations will be expanded by ExpandLargeDivRem.
  unsigned MaxDivRemBitWidthSupported;

  /// Size in bits of the maximum larget fp convert size the backend
  /// supports. Larger operations will be expanded by ExpandLargeFPConvert.
  unsigned MaxLargeFPConvertBitWidthSupported;

  /// Size in bits of the minimum cmpxchg or ll/sc operation the
  /// backend supports.
  unsigned MinCmpXchgSizeInBits;

  /// This indicates if the target supports unaligned atomic operations.
  bool SupportsUnalignedAtomics;

  /// If set to a physical register, this specifies the register that
  /// llvm.savestack/llvm.restorestack should save and restore.
  Register StackPointerRegisterToSaveRestore;

  /// This indicates the default register class to use for each ValueType the
  /// target supports natively.
  const TargetRegisterClass *RegClassForVT[MVT::VALUETYPE_SIZE];
  uint16_t NumRegistersForVT[MVT::VALUETYPE_SIZE];
  MVT RegisterTypeForVT[MVT::VALUETYPE_SIZE];

  /// This indicates the "representative" register class to use for each
  /// ValueType the target supports natively. This information is used by the
  /// scheduler to track register pressure. By default, the representative
  /// register class is the largest legal super-reg register class of the
  /// register class of the specified type. e.g. On x86, i8, i16, and i32's
  /// representative class would be GR32.
  const TargetRegisterClass *RepRegClassForVT[MVT::VALUETYPE_SIZE] = {0};

  /// This indicates the "cost" of the "representative" register class for each
  /// ValueType. The cost is used by the scheduler to approximate register
  /// pressure.
  uint8_t RepRegClassCostForVT[MVT::VALUETYPE_SIZE];

  /// For any value types we are promoting or expanding, this contains the value
  /// type that we are changing to.  For Expanded types, this contains one step
  /// of the expand (e.g. i64 -> i32), even if there are multiple steps required
  /// (e.g. i64 -> i16).  For types natively supported by the system, this holds
  /// the same type (e.g. i32 -> i32).
  MVT TransformToType[MVT::VALUETYPE_SIZE];

  /// For each operation and each value type, keep a LegalizeAction that
  /// indicates how instruction selection should deal with the operation.  Most
  /// operations are Legal (aka, supported natively by the target), but
  /// operations that are not should be described.  Note that operations on
  /// non-legal value types are not described here.
  LegalizeAction OpActions[MVT::VALUETYPE_SIZE][ISD::BUILTIN_OP_END];

  /// For each load extension type and each value type, keep a LegalizeAction
  /// that indicates how instruction selection should deal with a load of a
  /// specific value type and extension type. Uses 4-bits to store the action
  /// for each of the 4 load ext types.
  uint16_t LoadExtActions[MVT::VALUETYPE_SIZE][MVT::VALUETYPE_SIZE];

  /// For each value type pair keep a LegalizeAction that indicates whether a
  /// truncating store of a specific value type and truncating type is legal.
  LegalizeAction TruncStoreActions[MVT::VALUETYPE_SIZE][MVT::VALUETYPE_SIZE];

  /// For each indexed mode and each value type, keep a quad of LegalizeAction
  /// that indicates how instruction selection should deal with the load /
  /// store / maskedload / maskedstore.
  ///
  /// The first dimension is the value_type for the reference. The second
  /// dimension represents the various modes for load store.
  uint16_t IndexedModeActions[MVT::VALUETYPE_SIZE][ISD::LAST_INDEXED_MODE];

  /// For each condition code (ISD::CondCode) keep a LegalizeAction that
  /// indicates how instruction selection should deal with the condition code.
  ///
  /// Because each CC action takes up 4 bits, we need to have the array size be
  /// large enough to fit all of the value types. This can be done by rounding
  /// up the MVT::VALUETYPE_SIZE value to the next multiple of 8.
  uint32_t CondCodeActions[ISD::SETCC_INVALID][(MVT::VALUETYPE_SIZE + 7) / 8];

  ValueTypeActionImpl ValueTypeActions;

private:
  /// Targets can specify ISD nodes that they would like PerformDAGCombine
  /// callbacks for by calling setTargetDAGCombine(), which sets a bit in this
  /// array.
  unsigned char
  TargetDAGCombineArray[(ISD::BUILTIN_OP_END+CHAR_BIT-1)/CHAR_BIT];

  /// For operations that must be promoted to a specific type, this holds the
  /// destination type.  This map should be sparse, so don't hold it as an
  /// array.
  ///
  /// Targets add entries to this map with AddPromotedToType(..), clients access
  /// this with getTypeToPromoteTo(..).
  std::map<std::pair<unsigned, MVT::SimpleValueType>, MVT::SimpleValueType>
    PromoteToType;

  /// Stores the name each libcall.
  const char *LibcallRoutineNames[RTLIB::UNKNOWN_LIBCALL + 1];

  /// The ISD::CondCode that should be used to test the result of each of the
  /// comparison libcall against zero.
  ISD::CondCode CmpLibcallCCs[RTLIB::UNKNOWN_LIBCALL];

  /// Stores the CallingConv that should be used for each libcall.
  CallingConv::ID LibcallCallingConvs[RTLIB::UNKNOWN_LIBCALL];

  /// Set default libcall names and calling conventions.
  void InitLibcalls(const Triple &TT);

  /// The bits of IndexedModeActions used to store the legalisation actions
  /// We store the data as   | ML | MS |  L |  S | each taking 4 bits.
  enum IndexedModeActionsBits {
    IMAB_Store = 0,
    IMAB_Load = 4,
    IMAB_MaskedStore = 8,
    IMAB_MaskedLoad = 12
  };

  void setIndexedModeAction(unsigned IdxMode, MVT VT, unsigned Shift,
                            LegalizeAction Action) {
    assert(VT.isValid() && IdxMode < ISD::LAST_INDEXED_MODE &&
           (unsigned)Action < 0xf && "Table isn't big enough!");
    unsigned Ty = (unsigned)VT.SimpleTy;
    IndexedModeActions[Ty][IdxMode] &= ~(0xf << Shift);
    IndexedModeActions[Ty][IdxMode] |= ((uint16_t)Action) << Shift;
  }

  LegalizeAction getIndexedModeAction(unsigned IdxMode, MVT VT,
                                      unsigned Shift) const {
    assert(IdxMode < ISD::LAST_INDEXED_MODE && VT.isValid() &&
           "Table isn't big enough!");
    unsigned Ty = (unsigned)VT.SimpleTy;
    return (LegalizeAction)((IndexedModeActions[Ty][IdxMode] >> Shift) & 0xf);
  }

protected:
  /// Return true if the extension represented by \p I is free.
  /// \pre \p I is a sign, zero, or fp extension and
  ///      is[Z|FP]ExtFree of the related types is not true.
  virtual bool isExtFreeImpl(const Instruction *I) const { return false; }

  /// Depth that GatherAllAliases should should continue looking for chain
  /// dependencies when trying to find a more preferable chain. As an
  /// approximation, this should be more than the number of consecutive stores
  /// expected to be merged.
  unsigned GatherAllAliasesMaxDepth;

  /// \brief Specify maximum number of store instructions per memset call.
  ///
  /// When lowering \@llvm.memset this field specifies the maximum number of
  /// store operations that may be substituted for the call to memset. Targets
  /// must set this value based on the cost threshold for that target. Targets
  /// should assume that the memset will be done using as many of the largest
  /// store operations first, followed by smaller ones, if necessary, per
  /// alignment restrictions. For example, storing 9 bytes on a 32-bit machine
  /// with 16-bit alignment would result in four 2-byte stores and one 1-byte
  /// store.  This only applies to setting a constant array of a constant size.
  unsigned MaxStoresPerMemset;
  /// Likewise for functions with the OptSize attribute.
  unsigned MaxStoresPerMemsetOptSize;

  /// \brief Specify maximum number of store instructions per memcpy call.
  ///
  /// When lowering \@llvm.memcpy this field specifies the maximum number of
  /// store operations that may be substituted for a call to memcpy. Targets
  /// must set this value based on the cost threshold for that target. Targets
  /// should assume that the memcpy will be done using as many of the largest
  /// store operations first, followed by smaller ones, if necessary, per
  /// alignment restrictions. For example, storing 7 bytes on a 32-bit machine
  /// with 32-bit alignment would result in one 4-byte store, a one 2-byte store
  /// and one 1-byte store. This only applies to copying a constant array of
  /// constant size.
  unsigned MaxStoresPerMemcpy;
  /// Likewise for functions with the OptSize attribute.
  unsigned MaxStoresPerMemcpyOptSize;
  /// \brief Specify max number of store instructions to glue in inlined memcpy.
  ///
  /// When memcpy is inlined based on MaxStoresPerMemcpy, specify maximum number
  /// of store instructions to keep together. This helps in pairing and
  //  vectorization later on.
  unsigned MaxGluedStoresPerMemcpy = 0;

  /// \brief Specify maximum number of load instructions per memcmp call.
  ///
  /// When lowering \@llvm.memcmp this field specifies the maximum number of
  /// pairs of load operations that may be substituted for a call to memcmp.
  /// Targets must set this value based on the cost threshold for that target.
  /// Targets should assume that the memcmp will be done using as many of the
  /// largest load operations first, followed by smaller ones, if necessary, per
  /// alignment restrictions. For example, loading 7 bytes on a 32-bit machine
  /// with 32-bit alignment would result in one 4-byte load, a one 2-byte load
  /// and one 1-byte load. This only applies to copying a constant array of
  /// constant size.
  unsigned MaxLoadsPerMemcmp;
  /// Likewise for functions with the OptSize attribute.
  unsigned MaxLoadsPerMemcmpOptSize;

  /// \brief Specify maximum number of store instructions per memmove call.
  ///
  /// When lowering \@llvm.memmove this field specifies the maximum number of
  /// store instructions that may be substituted for a call to memmove. Targets
  /// must set this value based on the cost threshold for that target. Targets
  /// should assume that the memmove will be done using as many of the largest
  /// store operations first, followed by smaller ones, if necessary, per
  /// alignment restrictions. For example, moving 9 bytes on a 32-bit machine
  /// with 8-bit alignment would result in nine 1-byte stores.  This only
  /// applies to copying a constant array of constant size.
  unsigned MaxStoresPerMemmove;
  /// Likewise for functions with the OptSize attribute.
  unsigned MaxStoresPerMemmoveOptSize;

  /// Tells the code generator that select is more expensive than a branch if
  /// the branch is usually predicted right.
  bool PredictableSelectIsExpensive;

  /// \see enableExtLdPromotion.
  bool EnableExtLdPromotion;

  /// Return true if the value types that can be represented by the specified
  /// register class are all legal.
  bool isLegalRC(const TargetRegisterInfo &TRI,
                 const TargetRegisterClass &RC) const;

  /// Replace/modify any TargetFrameIndex operands with a targte-dependent
  /// sequence of memory operands that is recognized by PrologEpilogInserter.
  MachineBasicBlock *emitPatchPoint(MachineInstr &MI,
                                    MachineBasicBlock *MBB) const;

  bool IsStrictFPEnabled;
};

/// This class defines information used to lower LLVM code to legal SelectionDAG
/// operators that the target instruction selector can accept natively.
///
/// This class also defines callbacks that targets must implement to lower
/// target-specific constructs to SelectionDAG operators.
class TargetLowering : public TargetLoweringBase {
public:
  struct DAGCombinerInfo;
  struct MakeLibCallOptions;

  TargetLowering(const TargetLowering &) = delete;
  TargetLowering &operator=(const TargetLowering &) = delete;

  explicit TargetLowering(const TargetMachine &TM);

  bool isPositionIndependent() const;

  virtual bool isSDNodeSourceOfDivergence(const SDNode *N,
                                          FunctionLoweringInfo *FLI,
                                          UniformityInfo *UA) const {
    return false;
  }

  // Lets target to control the following reassociation of operands: (op (op x,
  // c1), y) -> (op (op x, y), c1) where N0 is (op x, c1) and N1 is y. By
  // default consider profitable any case where N0 has single use.  This
  // behavior reflects the condition replaced by this target hook call in the
  // DAGCombiner.  Any particular target can implement its own heuristic to
  // restrict common combiner.
  virtual bool isReassocProfitable(SelectionDAG &DAG, SDValue N0,
                                   SDValue N1) const {
    return N0.hasOneUse();
  }

  // Lets target to control the following reassociation of operands: (op (op x,
  // c1), y) -> (op (op x, y), c1) where N0 is (op x, c1) and N1 is y. By
  // default consider profitable any case where N0 has single use.  This
  // behavior reflects the condition replaced by this target hook call in the
  // combiner.  Any particular target can implement its own heuristic to
  // restrict common combiner.
  virtual bool isReassocProfitable(MachineRegisterInfo &MRI, Register N0,
                                   Register N1) const {
    return MRI.hasOneNonDBGUse(N0);
  }

  virtual bool isSDNodeAlwaysUniform(const SDNode * N) const {
    return false;
  }

  /// Returns true by value, base pointer and offset pointer and addressing mode
  /// by reference if the node's address can be legally represented as
  /// pre-indexed load / store address.
  virtual bool getPreIndexedAddressParts(SDNode * /*N*/, SDValue &/*Base*/,
                                         SDValue &/*Offset*/,
                                         ISD::MemIndexedMode &/*AM*/,
                                         SelectionDAG &/*DAG*/) const {
    return false;
  }

  /// Returns true by value, base pointer and offset pointer and addressing mode
  /// by reference if this node can be combined with a load / store to form a
  /// post-indexed load / store.
  virtual bool getPostIndexedAddressParts(SDNode * /*N*/, SDNode * /*Op*/,
                                          SDValue &/*Base*/,
                                          SDValue &/*Offset*/,
                                          ISD::MemIndexedMode &/*AM*/,
                                          SelectionDAG &/*DAG*/) const {
    return false;
  }

  /// Returns true if the specified base+offset is a legal indexed addressing
  /// mode for this target. \p MI is the load or store instruction that is being
  /// considered for transformation.
  virtual bool isIndexingLegal(MachineInstr &MI, Register Base, Register Offset,
                               bool IsPre, MachineRegisterInfo &MRI) const {
    return false;
  }

  /// Return the entry encoding for a jump table in the current function.  The
  /// returned value is a member of the MachineJumpTableInfo::JTEntryKind enum.
  virtual unsigned getJumpTableEncoding() const;

  virtual const MCExpr *
  LowerCustomJumpTableEntry(const MachineJumpTableInfo * /*MJTI*/,
                            const MachineBasicBlock * /*MBB*/, unsigned /*uid*/,
                            MCContext &/*Ctx*/) const {
    llvm_unreachable("Need to implement this hook if target has custom JTIs");
  }

  /// Returns relocation base for the given PIC jumptable.
  virtual SDValue getPICJumpTableRelocBase(SDValue Table,
                                           SelectionDAG &DAG) const;

  /// This returns the relocation base for the given PIC jumptable, the same as
  /// getPICJumpTableRelocBase, but as an MCExpr.
  virtual const MCExpr *
  getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
                               unsigned JTI, MCContext &Ctx) const;

  /// Return true if folding a constant offset with the given GlobalAddress is
  /// legal.  It is frequently not legal in PIC relocation models.
  virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;

  /// On x86, return true if the operand with index OpNo is a CALL or JUMP
  /// instruction, which can use either a memory constraint or an address
  /// constraint. -fasm-blocks "__asm call foo" lowers to
  /// call void asm sideeffect inteldialect "call ${0:P}", "*m..."
  ///
  /// This function is used by a hack to choose the address constraint,
  /// lowering to a direct call.
  virtual bool
  isInlineAsmTargetBranch(const SmallVectorImpl<StringRef> &AsmStrs,
                          unsigned OpNo) const {
    return false;
  }

  bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node,
                            SDValue &Chain) const;

  void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS,
                           SDValue &NewRHS, ISD::CondCode &CCCode,
                           const SDLoc &DL, const SDValue OldLHS,
                           const SDValue OldRHS) const;

  void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS,
                           SDValue &NewRHS, ISD::CondCode &CCCode,
                           const SDLoc &DL, const SDValue OldLHS,
                           const SDValue OldRHS, SDValue &Chain,
                           bool IsSignaling = false) const;

  /// Returns a pair of (return value, chain).
  /// It is an error to pass RTLIB::UNKNOWN_LIBCALL as \p LC.
  std::pair<SDValue, SDValue> makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC,
                                          EVT RetVT, ArrayRef<SDValue> Ops,
                                          MakeLibCallOptions CallOptions,
                                          const SDLoc &dl,
                                          SDValue Chain = SDValue()) const;

  /// Check whether parameters to a call that are passed in callee saved
  /// registers are the same as from the calling function.  This needs to be
  /// checked for tail call eligibility.
  bool parametersInCSRMatch(const MachineRegisterInfo &MRI,
      const uint32_t *CallerPreservedMask,
      const SmallVectorImpl<CCValAssign> &ArgLocs,
      const SmallVectorImpl<SDValue> &OutVals) const;

  //===--------------------------------------------------------------------===//
  // TargetLowering Optimization Methods
  //

  /// A convenience struct that encapsulates a DAG, and two SDValues for
  /// returning information from TargetLowering to its clients that want to
  /// combine.
  struct TargetLoweringOpt {
    SelectionDAG &DAG;
    bool LegalTys;
    bool LegalOps;
    SDValue Old;
    SDValue New;

    explicit TargetLoweringOpt(SelectionDAG &InDAG,
                               bool LT, bool LO) :
      DAG(InDAG), LegalTys(LT), LegalOps(LO) {}

    bool LegalTypes() const { return LegalTys; }
    bool LegalOperations() const { return LegalOps; }

    bool CombineTo(SDValue O, SDValue N) {
      Old = O;
      New = N;
      return true;
    }
  };

  /// Determines the optimal series of memory ops to replace the memset / memcpy.
  /// Return true if the number of memory ops is below the threshold (Limit).
  /// Note that this is always the case when Limit is ~0.
  /// It returns the types of the sequence of memory ops to perform
  /// memset / memcpy by reference.
  virtual bool
  findOptimalMemOpLowering(std::vector<EVT> &MemOps, unsigned Limit,
                           const MemOp &Op, unsigned DstAS, unsigned SrcAS,
                           const AttributeList &FuncAttributes) const;

  /// Check to see if the specified operand of the specified instruction is a
  /// constant integer.  If so, check to see if there are any bits set in the
  /// constant that are not demanded.  If so, shrink the constant and return
  /// true.
  bool ShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits,
                              const APInt &DemandedElts,
                              TargetLoweringOpt &TLO) const;

  /// Helper wrapper around ShrinkDemandedConstant, demanding all elements.
  bool ShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits,
                              TargetLoweringOpt &TLO) const;

  // Target hook to do target-specific const optimization, which is called by
  // ShrinkDemandedConstant. This function should return true if the target
  // doesn't want ShrinkDemandedConstant to further optimize the constant.
  virtual bool targetShrinkDemandedConstant(SDValue Op,
                                            const APInt &DemandedBits,
                                            const APInt &DemandedElts,
                                            TargetLoweringOpt &TLO) const {
    return false;
  }

  /// Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free.  This
  /// uses isZExtFree and ZERO_EXTEND for the widening cast, but it could be
  /// generalized for targets with other types of implicit widening casts.
  bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth,
                        const APInt &DemandedBits,
                        TargetLoweringOpt &TLO) const;

  /// Look at Op.  At this point, we know that only the DemandedBits bits of the
  /// result of Op are ever used downstream.  If we can use this information to
  /// simplify Op, create a new simplified DAG node and return true, returning
  /// the original and new nodes in Old and New.  Otherwise, analyze the
  /// expression and return a mask of KnownOne and KnownZero bits for the
  /// expression (used to simplify the caller).  The KnownZero/One bits may only
  /// be accurate for those bits in the Demanded masks.
  /// \p AssumeSingleUse When this parameter is true, this function will
  ///    attempt to simplify \p Op even if there are multiple uses.
  ///    Callers are responsible for correctly updating the DAG based on the
  ///    results of this function, because simply replacing replacing TLO.Old
  ///    with TLO.New will be incorrect when this parameter is true and TLO.Old
  ///    has multiple uses.
  bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits,
                            const APInt &DemandedElts, KnownBits &Known,
                            TargetLoweringOpt &TLO, unsigned Depth = 0,
                            bool AssumeSingleUse = false) const;

  /// Helper wrapper around SimplifyDemandedBits, demanding all elements.
  /// Adds Op back to the worklist upon success.
  bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits,
                            KnownBits &Known, TargetLoweringOpt &TLO,
                            unsigned Depth = 0,
                            bool AssumeSingleUse = false) const;

  /// Helper wrapper around SimplifyDemandedBits.
  /// Adds Op back to the worklist upon success.
  bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits,
                            DAGCombinerInfo &DCI) const;

  /// Helper wrapper around SimplifyDemandedBits.
  /// Adds Op back to the worklist upon success.
  bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits,
                            const APInt &DemandedElts,
                            DAGCombinerInfo &DCI) const;

  /// More limited version of SimplifyDemandedBits that can be used to "look
  /// through" ops that don't contribute to the DemandedBits/DemandedElts -
  /// bitwise ops etc.
  SDValue SimplifyMultipleUseDemandedBits(SDValue Op, const APInt &DemandedBits,
                                          const APInt &DemandedElts,
                                          SelectionDAG &DAG,
                                          unsigned Depth = 0) const;

  /// Helper wrapper around SimplifyMultipleUseDemandedBits, demanding all
  /// elements.
  SDValue SimplifyMultipleUseDemandedBits(SDValue Op, const APInt &DemandedBits,
                                          SelectionDAG &DAG,
                                          unsigned Depth = 0) const;

  /// Helper wrapper around SimplifyMultipleUseDemandedBits, demanding all
  /// bits from only some vector elements.
  SDValue SimplifyMultipleUseDemandedVectorElts(SDValue Op,
                                                const APInt &DemandedElts,
                                                SelectionDAG &DAG,
                                                unsigned Depth = 0) const;

  /// Look at Vector Op. At this point, we know that only the DemandedElts
  /// elements of the result of Op are ever used downstream.  If we can use
  /// this information to simplify Op, create a new simplified DAG node and
  /// return true, storing the original and new nodes in TLO.
  /// Otherwise, analyze the expression and return a mask of KnownUndef and
  /// KnownZero elements for the expression (used to simplify the caller).
  /// The KnownUndef/Zero elements may only be accurate for those bits
  /// in the DemandedMask.
  /// \p AssumeSingleUse When this parameter is true, this function will
  ///    attempt to simplify \p Op even if there are multiple uses.
  ///    Callers are responsible for correctly updating the DAG based on the
  ///    results of this function, because simply replacing replacing TLO.Old
  ///    with TLO.New will be incorrect when this parameter is true and TLO.Old
  ///    has multiple uses.
  bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedEltMask,
                                  APInt &KnownUndef, APInt &KnownZero,
                                  TargetLoweringOpt &TLO, unsigned Depth = 0,
                                  bool AssumeSingleUse = false) const;

  /// Helper wrapper around SimplifyDemandedVectorElts.
  /// Adds Op back to the worklist upon success.
  bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedElts,
                                  DAGCombinerInfo &DCI) const;

  /// Return true if the target supports simplifying demanded vector elements by
  /// converting them to undefs.
  virtual bool
  shouldSimplifyDemandedVectorElts(SDValue Op,
                                   const TargetLoweringOpt &TLO) const {
    return true;
  }

  /// Determine which of the bits specified in Mask are known to be either zero
  /// or one and return them in the KnownZero/KnownOne bitsets. The DemandedElts
  /// argument allows us to only collect the known bits that are shared by the
  /// requested vector elements.
  virtual void computeKnownBitsForTargetNode(const SDValue Op,
                                             KnownBits &Known,
                                             const APInt &DemandedElts,
                                             const SelectionDAG &DAG,
                                             unsigned Depth = 0) const;

  /// Determine which of the bits specified in Mask are known to be either zero
  /// or one and return them in the KnownZero/KnownOne bitsets. The DemandedElts
  /// argument allows us to only collect the known bits that are shared by the
  /// requested vector elements. This is for GISel.
  virtual void computeKnownBitsForTargetInstr(GISelKnownBits &Analysis,
                                              Register R, KnownBits &Known,
                                              const APInt &DemandedElts,
                                              const MachineRegisterInfo &MRI,
                                              unsigned Depth = 0) const;

  /// Determine the known alignment for the pointer value \p R. This is can
  /// typically be inferred from the number of low known 0 bits. However, for a
  /// pointer with a non-integral address space, the alignment value may be
  /// independent from the known low bits.
  virtual Align computeKnownAlignForTargetInstr(GISelKnownBits &Analysis,
                                                Register R,
                                                const MachineRegisterInfo &MRI,
                                                unsigned Depth = 0) const;

  /// Determine which of the bits of FrameIndex \p FIOp are known to be 0.
  /// Default implementation computes low bits based on alignment
  /// information. This should preserve known bits passed into it.
  virtual void computeKnownBitsForFrameIndex(int FIOp,
                                             KnownBits &Known,
                                             const MachineFunction &MF) const;

  /// This method can be implemented by targets that want to expose additional
  /// information about sign bits to the DAG Combiner. The DemandedElts
  /// argument allows us to only collect the minimum sign bits that are shared
  /// by the requested vector elements.
  virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
                                                   const APInt &DemandedElts,
                                                   const SelectionDAG &DAG,
                                                   unsigned Depth = 0) const;

  /// This method can be implemented by targets that want to expose additional
  /// information about sign bits to GlobalISel combiners. The DemandedElts
  /// argument allows us to only collect the minimum sign bits that are shared
  /// by the requested vector elements.
  virtual unsigned computeNumSignBitsForTargetInstr(GISelKnownBits &Analysis,
                                                    Register R,
                                                    const APInt &DemandedElts,
                                                    const MachineRegisterInfo &MRI,
                                                    unsigned Depth = 0) const;

  /// Attempt to simplify any target nodes based on the demanded vector
  /// elements, returning true on success. Otherwise, analyze the expression and
  /// return a mask of KnownUndef and KnownZero elements for the expression
  /// (used to simplify the caller). The KnownUndef/Zero elements may only be
  /// accurate for those bits in the DemandedMask.
  virtual bool SimplifyDemandedVectorEltsForTargetNode(
      SDValue Op, const APInt &DemandedElts, APInt &KnownUndef,
      APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth = 0) const;

  /// Attempt to simplify any target nodes based on the demanded bits/elts,
  /// returning true on success. Otherwise, analyze the
  /// expression and return a mask of KnownOne and KnownZero bits for the
  /// expression (used to simplify the caller).  The KnownZero/One bits may only
  /// be accurate for those bits in the Demanded masks.
  virtual bool SimplifyDemandedBitsForTargetNode(SDValue Op,
                                                 const APInt &DemandedBits,
                                                 const APInt &DemandedElts,
                                                 KnownBits &Known,
                                                 TargetLoweringOpt &TLO,
                                                 unsigned Depth = 0) const;

  /// More limited version of SimplifyDemandedBits that can be used to "look
  /// through" ops that don't contribute to the DemandedBits/DemandedElts -
  /// bitwise ops etc.
  virtual SDValue SimplifyMultipleUseDemandedBitsForTargetNode(
      SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
      SelectionDAG &DAG, unsigned Depth) const;

  /// Return true if this function can prove that \p Op is never poison
  /// and, if \p PoisonOnly is false, does not have undef bits. The DemandedElts
  /// argument limits the check to the requested vector elements.
  virtual bool isGuaranteedNotToBeUndefOrPoisonForTargetNode(
      SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
      bool PoisonOnly, unsigned Depth) const;

  /// Return true if Op can create undef or poison from non-undef & non-poison
  /// operands. The DemandedElts argument limits the check to the requested
  /// vector elements.
  virtual bool
  canCreateUndefOrPoisonForTargetNode(SDValue Op, const APInt &DemandedElts,
                                      const SelectionDAG &DAG, bool PoisonOnly,
                                      bool ConsiderFlags, unsigned Depth) const;

  /// Tries to build a legal vector shuffle using the provided parameters
  /// or equivalent variations. The Mask argument maybe be modified as the
  /// function tries different variations.
  /// Returns an empty SDValue if the operation fails.
  SDValue buildLegalVectorShuffle(EVT VT, const SDLoc &DL, SDValue N0,
                                  SDValue N1, MutableArrayRef<int> Mask,
                                  SelectionDAG &DAG) const;

  /// This method returns the constant pool value that will be loaded by LD.
  /// NOTE: You must check for implicit extensions of the constant by LD.
  virtual const Constant *getTargetConstantFromLoad(LoadSDNode *LD) const;

  /// If \p SNaN is false, \returns true if \p Op is known to never be any
  /// NaN. If \p sNaN is true, returns if \p Op is known to never be a signaling
  /// NaN.
  virtual bool isKnownNeverNaNForTargetNode(SDValue Op,
                                            const SelectionDAG &DAG,
                                            bool SNaN = false,
                                            unsigned Depth = 0) const;

  /// Return true if vector \p Op has the same value across all \p DemandedElts,
  /// indicating any elements which may be undef in the output \p UndefElts.
  virtual bool isSplatValueForTargetNode(SDValue Op, const APInt &DemandedElts,
                                         APInt &UndefElts,
                                         const SelectionDAG &DAG,
                                         unsigned Depth = 0) const;

  /// Returns true if the given Opc is considered a canonical constant for the
  /// target, which should not be transformed back into a BUILD_VECTOR.
  virtual bool isTargetCanonicalConstantNode(SDValue Op) const {
    return Op.getOpcode() == ISD::SPLAT_VECTOR;
  }

  struct DAGCombinerInfo {
    void *DC;  // The DAG Combiner object.
    CombineLevel Level;
    bool CalledByLegalizer;

  public:
    SelectionDAG &DAG;

    DAGCombinerInfo(SelectionDAG &dag, CombineLevel level,  bool cl, void *dc)
      : DC(dc), Level(level), CalledByLegalizer(cl), DAG(dag) {}

    bool isBeforeLegalize() const { return Level == BeforeLegalizeTypes; }
    bool isBeforeLegalizeOps() const { return Level < AfterLegalizeVectorOps; }
    bool isAfterLegalizeDAG() const { return Level >= AfterLegalizeDAG; }
    CombineLevel getDAGCombineLevel() { return Level; }
    bool isCalledByLegalizer() const { return CalledByLegalizer; }

    void AddToWorklist(SDNode *N);
    SDValue CombineTo(SDNode *N, ArrayRef<SDValue> To, bool AddTo = true);
    SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true);
    SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo = true);

    bool recursivelyDeleteUnusedNodes(SDNode *N);

    void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO);
  };

  /// Return if the N is a constant or constant vector equal to the true value
  /// from getBooleanContents().
  bool isConstTrueVal(SDValue N) const;

  /// Return if the N is a constant or constant vector equal to the false value
  /// from getBooleanContents().
  bool isConstFalseVal(SDValue N) const;

  /// Return if \p N is a True value when extended to \p VT.
  bool isExtendedTrueVal(const ConstantSDNode *N, EVT VT, bool SExt) const;

  /// Try to simplify a setcc built with the specified operands and cc. If it is
  /// unable to simplify it, return a null SDValue.
  SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond,
                        bool foldBooleans, DAGCombinerInfo &DCI,
                        const SDLoc &dl) const;

  // For targets which wrap address, unwrap for analysis.
  virtual SDValue unwrapAddress(SDValue N) const { return N; }

  /// Returns true (and the GlobalValue and the offset) if the node is a
  /// GlobalAddress + offset.
  virtual bool
  isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const;

  /// This method will be invoked for all target nodes and for any
  /// target-independent nodes that the target has registered with invoke it
  /// for.
  ///
  /// The semantics are as follows:
  /// Return Value:
  ///   SDValue.Val == 0   - No change was made
  ///   SDValue.Val == N   - N was replaced, is dead, and is already handled.
  ///   otherwise          - N should be replaced by the returned Operand.
  ///
  /// In addition, methods provided by DAGCombinerInfo may be used to perform
  /// more complex transformations.
  ///
  virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;

  /// Return true if it is profitable to move this shift by a constant amount
  /// through its operand, adjusting any immediate operands as necessary to
  /// preserve semantics. This transformation may not be desirable if it
  /// disrupts a particularly auspicious target-specific tree (e.g. bitfield
  /// extraction in AArch64). By default, it returns true.
  ///
  /// @param N the shift node
  /// @param Level the current DAGCombine legalization level.
  virtual bool isDesirableToCommuteWithShift(const SDNode *N,
                                             CombineLevel Level) const {
    return true;
  }

  /// GlobalISel - return true if it is profitable to move this shift by a
  /// constant amount through its operand, adjusting any immediate operands as
  /// necessary to preserve semantics. This transformation may not be desirable
  /// if it disrupts a particularly auspicious target-specific tree (e.g.
  /// bitfield extraction in AArch64). By default, it returns true.
  ///
  /// @param MI the shift instruction
  /// @param IsAfterLegal true if running after legalization.
  virtual bool isDesirableToCommuteWithShift(const MachineInstr &MI,
                                             bool IsAfterLegal) const {
    return true;
  }

  // Return AndOrSETCCFoldKind::{AddAnd, ABS} if its desirable to try and
  // optimize LogicOp(SETCC0, SETCC1). An example (what is implemented as of
  // writing this) is:
  //    With C as a power of 2 and C != 0 and C != INT_MIN:
  //    AddAnd:
  //     (icmp eq A, C) | (icmp eq A, -C)
  //            -> (icmp eq and(add(A, C), ~(C + C)), 0)
  //     (icmp ne A, C) & (icmp ne A, -C)w
  //            -> (icmp ne and(add(A, C), ~(C + C)), 0)
  //    ABS:
  //     (icmp eq A, C) | (icmp eq A, -C)
  //            -> (icmp eq Abs(A), C)
  //     (icmp ne A, C) & (icmp ne A, -C)w
  //            -> (icmp ne Abs(A), C)
  //
  // @param LogicOp the logic op
  // @param SETCC0 the first of the SETCC nodes
  // @param SETCC0 the second of the SETCC nodes
  virtual AndOrSETCCFoldKind isDesirableToCombineLogicOpOfSETCC(
      const SDNode *LogicOp, const SDNode *SETCC0, const SDNode *SETCC1) const {
    return AndOrSETCCFoldKind::None;
  }

  /// Return true if it is profitable to combine an XOR of a logical shift
  /// to create a logical shift of NOT. This transformation may not be desirable
  /// if it disrupts a particularly auspicious target-specific tree (e.g.
  /// BIC on ARM/AArch64). By default, it returns true.
  virtual bool isDesirableToCommuteXorWithShift(const SDNode *N) const {
    return true;
  }

  /// Return true if the target has native support for the specified value type
  /// and it is 'desirable' to use the type for the given node type. e.g. On x86
  /// i16 is legal, but undesirable since i16 instruction encodings are longer
  /// and some i16 instructions are slow.
  virtual bool isTypeDesirableForOp(unsigned /*Opc*/, EVT VT) const {
    // By default, assume all legal types are desirable.
    return isTypeLegal(VT);
  }

  /// Return true if it is profitable for dag combiner to transform a floating
  /// point op of specified opcode to a equivalent op of an integer
  /// type. e.g. f32 load -> i32 load can be profitable on ARM.
  virtual bool isDesirableToTransformToIntegerOp(unsigned /*Opc*/,
                                                 EVT /*VT*/) const {
    return false;
  }

  /// This method query the target whether it is beneficial for dag combiner to
  /// promote the specified node. If true, it should return the desired
  /// promotion type by reference.
  virtual bool IsDesirableToPromoteOp(SDValue /*Op*/, EVT &/*PVT*/) const {
    return false;
  }

  /// Return true if the target supports swifterror attribute. It optimizes
  /// loads and stores to reading and writing a specific register.
  virtual bool supportSwiftError() const {
    return false;
  }

  /// Return true if the target supports that a subset of CSRs for the given
  /// machine function is handled explicitly via copies.
  virtual bool supportSplitCSR(MachineFunction *MF) const {
    return false;
  }

  /// Return true if the target supports kcfi operand bundles.
  virtual bool supportKCFIBundles() const { return false; }

  /// Perform necessary initialization to handle a subset of CSRs explicitly
  /// via copies. This function is called at the beginning of instruction
  /// selection.
  virtual void initializeSplitCSR(MachineBasicBlock *Entry) const {
    llvm_unreachable("Not Implemented");
  }

  /// Insert explicit copies in entry and exit blocks. We copy a subset of
  /// CSRs to virtual registers in the entry block, and copy them back to
  /// physical registers in the exit blocks. This function is called at the end
  /// of instruction selection.
  virtual void insertCopiesSplitCSR(
      MachineBasicBlock *Entry,
      const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
    llvm_unreachable("Not Implemented");
  }

  /// Return the newly negated expression if the cost is not expensive and
  /// set the cost in \p Cost to indicate that if it is cheaper or neutral to
  /// do the negation.
  virtual SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG,
                                       bool LegalOps, bool OptForSize,
                                       NegatibleCost &Cost,
                                       unsigned Depth = 0) const;

  SDValue getCheaperOrNeutralNegatedExpression(
      SDValue Op, SelectionDAG &DAG, bool LegalOps, bool OptForSize,
      const NegatibleCost CostThreshold = NegatibleCost::Neutral,
      unsigned Depth = 0) const {
    NegatibleCost Cost = NegatibleCost::Expensive;
    SDValue Neg =
        getNegatedExpression(Op, DAG, LegalOps, OptForSize, Cost, Depth);
    if (!Neg)
      return SDValue();

    if (Cost <= CostThreshold)
      return Neg;

    // Remove the new created node to avoid the side effect to the DAG.
    if (Neg->use_empty())
      DAG.RemoveDeadNode(Neg.getNode());
    return SDValue();
  }

  /// This is the helper function to return the newly negated expression only
  /// when the cost is cheaper.
  SDValue getCheaperNegatedExpression(SDValue Op, SelectionDAG &DAG,
                                      bool LegalOps, bool OptForSize,
                                      unsigned Depth = 0) const {
    return getCheaperOrNeutralNegatedExpression(Op, DAG, LegalOps, OptForSize,
                                                NegatibleCost::Cheaper, Depth);
  }

  /// This is the helper function to return the newly negated expression if
  /// the cost is not expensive.
  SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG, bool LegalOps,
                               bool OptForSize, unsigned Depth = 0) const {
    NegatibleCost Cost = NegatibleCost::Expensive;
    return getNegatedExpression(Op, DAG, LegalOps, OptForSize, Cost, Depth);
  }

  //===--------------------------------------------------------------------===//
  // Lowering methods - These methods must be implemented by targets so that
  // the SelectionDAGBuilder code knows how to lower these.
  //

  /// Target-specific splitting of values into parts that fit a register
  /// storing a legal type
  virtual bool splitValueIntoRegisterParts(
      SelectionDAG & DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
      unsigned NumParts, MVT PartVT, std::optional<CallingConv::ID> CC) const {
    return false;
  }

  /// Allows the target to handle physreg-carried dependency
  /// in target-specific way. Used from the ScheduleDAGSDNodes to decide whether
  /// to add the edge to the dependency graph.
  /// Def - input: Selection DAG node defininfg physical register
  /// User - input: Selection DAG node using physical register
  /// Op - input: Number of User operand
  /// PhysReg - inout: set to the physical register if the edge is
  /// necessary, unchanged otherwise
  /// Cost - inout: physical register copy cost.
  /// Returns 'true' is the edge is necessary, 'false' otherwise
  virtual bool checkForPhysRegDependency(SDNode *Def, SDNode *User, unsigned Op,
                                         const TargetRegisterInfo *TRI,
                                         const TargetInstrInfo *TII,
                                         unsigned &PhysReg, int &Cost) const {
    return false;
  }

  /// Target-specific combining of register parts into its original value
  virtual SDValue
  joinRegisterPartsIntoValue(SelectionDAG &DAG, const SDLoc &DL,
                             const SDValue *Parts, unsigned NumParts,
                             MVT PartVT, EVT ValueVT,
                             std::optional<CallingConv::ID> CC) const {
    return SDValue();
  }

  /// This hook must be implemented to lower the incoming (formal) arguments,
  /// described by the Ins array, into the specified DAG. The implementation
  /// should fill in the InVals array with legal-type argument values, and
  /// return the resulting token chain value.
  virtual SDValue LowerFormalArguments(
      SDValue /*Chain*/, CallingConv::ID /*CallConv*/, bool /*isVarArg*/,
      const SmallVectorImpl<ISD::InputArg> & /*Ins*/, const SDLoc & /*dl*/,
      SelectionDAG & /*DAG*/, SmallVectorImpl<SDValue> & /*InVals*/) const {
    llvm_unreachable("Not Implemented");
  }

  /// This structure contains all information that is necessary for lowering
  /// calls. It is passed to TLI::LowerCallTo when the SelectionDAG builder
  /// needs to lower a call, and targets will see this struct in their LowerCall
  /// implementation.
  struct CallLoweringInfo {
    SDValue Chain;
    Type *RetTy = nullptr;
    bool RetSExt           : 1;
    bool RetZExt           : 1;
    bool IsVarArg          : 1;
    bool IsInReg           : 1;
    bool DoesNotReturn     : 1;
    bool IsReturnValueUsed : 1;
    bool IsConvergent      : 1;
    bool IsPatchPoint      : 1;
    bool IsPreallocated : 1;
    bool NoMerge           : 1;

    // IsTailCall should be modified by implementations of
    // TargetLowering::LowerCall that perform tail call conversions.
    bool IsTailCall = false;

    // Is Call lowering done post SelectionDAG type legalization.
    bool IsPostTypeLegalization = false;

    unsigned NumFixedArgs = -1;
    CallingConv::ID CallConv = CallingConv::C;
    SDValue Callee;
    ArgListTy Args;
    SelectionDAG &DAG;
    SDLoc DL;
    const CallBase *CB = nullptr;
    SmallVector<ISD::OutputArg, 32> Outs;
    SmallVector<SDValue, 32> OutVals;
    SmallVector<ISD::InputArg, 32> Ins;
    SmallVector<SDValue, 4> InVals;
    const ConstantInt *CFIType = nullptr;

    CallLoweringInfo(SelectionDAG &DAG)
        : RetSExt(false), RetZExt(false), IsVarArg(false), IsInReg(false),
          DoesNotReturn(false), IsReturnValueUsed(true), IsConvergent(false),
          IsPatchPoint(false), IsPreallocated(false), NoMerge(false),
          DAG(DAG) {}

    CallLoweringInfo &setDebugLoc(const SDLoc &dl) {
      DL = dl;
      return *this;
    }

    CallLoweringInfo &setChain(SDValue InChain) {
      Chain = InChain;
      return *this;
    }

    // setCallee with target/module-specific attributes
    CallLoweringInfo &setLibCallee(CallingConv::ID CC, Type *ResultType,
                                   SDValue Target, ArgListTy &&ArgsList) {
      RetTy = ResultType;
      Callee = Target;
      CallConv = CC;
      NumFixedArgs = ArgsList.size();
      Args = std::move(ArgsList);

      DAG.getTargetLoweringInfo().markLibCallAttributes(
          &(DAG.getMachineFunction()), CC, Args);
      return *this;
    }

    CallLoweringInfo &setCallee(CallingConv::ID CC, Type *ResultType,
                                SDValue Target, ArgListTy &&ArgsList) {
      RetTy = ResultType;
      Callee = Target;
      CallConv = CC;
      NumFixedArgs = ArgsList.size();
      Args = std::move(ArgsList);
      return *this;
    }

    CallLoweringInfo &setCallee(Type *ResultType, FunctionType *FTy,
                                SDValue Target, ArgListTy &&ArgsList,
                                const CallBase &Call) {
      RetTy = ResultType;

      IsInReg = Call.hasRetAttr(Attribute::InReg);
      DoesNotReturn =
          Call.doesNotReturn() ||
          (!isa<InvokeInst>(Call) && isa<UnreachableInst>(Call.getNextNode()));
      IsVarArg = FTy->isVarArg();
      IsReturnValueUsed = !Call.use_empty();
      RetSExt = Call.hasRetAttr(Attribute::SExt);
      RetZExt = Call.hasRetAttr(Attribute::ZExt);
      NoMerge = Call.hasFnAttr(Attribute::NoMerge);

      Callee = Target;

      CallConv = Call.getCallingConv();
      NumFixedArgs = FTy->getNumParams();
      Args = std::move(ArgsList);

      CB = &Call;

      return *this;
    }

    CallLoweringInfo &setInRegister(bool Value = true) {
      IsInReg = Value;
      return *this;
    }

    CallLoweringInfo &setNoReturn(bool Value = true) {
      DoesNotReturn = Value;
      return *this;
    }

    CallLoweringInfo &setVarArg(bool Value = true) {
      IsVarArg = Value;
      return *this;
    }

    CallLoweringInfo &setTailCall(bool Value = true) {
      IsTailCall = Value;
      return *this;
    }

    CallLoweringInfo &setDiscardResult(bool Value = true) {
      IsReturnValueUsed = !Value;
      return *this;
    }

    CallLoweringInfo &setConvergent(bool Value = true) {
      IsConvergent = Value;
      return *this;
    }

    CallLoweringInfo &setSExtResult(bool Value = true) {
      RetSExt = Value;
      return *this;
    }

    CallLoweringInfo &setZExtResult(bool Value = true) {
      RetZExt = Value;
      return *this;
    }

    CallLoweringInfo &setIsPatchPoint(bool Value = true) {
      IsPatchPoint = Value;
      return *this;
    }

    CallLoweringInfo &setIsPreallocated(bool Value = true) {
      IsPreallocated = Value;
      return *this;
    }

    CallLoweringInfo &setIsPostTypeLegalization(bool Value=true) {
      IsPostTypeLegalization = Value;
      return *this;
    }

    CallLoweringInfo &setCFIType(const ConstantInt *Type) {
      CFIType = Type;
      return *this;
    }

    ArgListTy &getArgs() {
      return Args;
    }
  };

  /// This structure is used to pass arguments to makeLibCall function.
  struct MakeLibCallOptions {
    // By passing type list before soften to makeLibCall, the target hook
    // shouldExtendTypeInLibCall can get the original type before soften.
    ArrayRef<EVT> OpsVTBeforeSoften;
    EVT RetVTBeforeSoften;
    bool IsSExt : 1;
    bool DoesNotReturn : 1;
    bool IsReturnValueUsed : 1;
    bool IsPostTypeLegalization : 1;
    bool IsSoften : 1;

    MakeLibCallOptions()
        : IsSExt(false), DoesNotReturn(false), IsReturnValueUsed(true),
          IsPostTypeLegalization(false), IsSoften(false) {}

    MakeLibCallOptions &setSExt(bool Value = true) {
      IsSExt = Value;
      return *this;
    }

    MakeLibCallOptions &setNoReturn(bool Value = true) {
      DoesNotReturn = Value;
      return *this;
    }

    MakeLibCallOptions &setDiscardResult(bool Value = true) {
      IsReturnValueUsed = !Value;
      return *this;
    }

    MakeLibCallOptions &setIsPostTypeLegalization(bool Value = true) {
      IsPostTypeLegalization = Value;
      return *this;
    }

    MakeLibCallOptions &setTypeListBeforeSoften(ArrayRef<EVT> OpsVT, EVT RetVT,
                                                bool Value = true) {
      OpsVTBeforeSoften = OpsVT;
      RetVTBeforeSoften = RetVT;
      IsSoften = Value;
      return *this;
    }
  };

  /// This function lowers an abstract call to a function into an actual call.
  /// This returns a pair of operands.  The first element is the return value
  /// for the function (if RetTy is not VoidTy).  The second element is the
  /// outgoing token chain. It calls LowerCall to do the actual lowering.
  std::pair<SDValue, SDValue> LowerCallTo(CallLoweringInfo &CLI) const;

  /// This hook must be implemented to lower calls into the specified
  /// DAG. The outgoing arguments to the call are described by the Outs array,
  /// and the values to be returned by the call are described by the Ins
  /// array. The implementation should fill in the InVals array with legal-type
  /// return values from the call, and return the resulting token chain value.
  virtual SDValue
    LowerCall(CallLoweringInfo &/*CLI*/,
              SmallVectorImpl<SDValue> &/*InVals*/) const {
    llvm_unreachable("Not Implemented");
  }

  /// Target-specific cleanup for formal ByVal parameters.
  virtual void HandleByVal(CCState *, unsigned &, Align) const {}

  /// This hook should be implemented to check whether the return values
  /// described by the Outs array can fit into the return registers.  If false
  /// is returned, an sret-demotion is performed.
  virtual bool CanLowerReturn(CallingConv::ID /*CallConv*/,
                              MachineFunction &/*MF*/, bool /*isVarArg*/,
               const SmallVectorImpl<ISD::OutputArg> &/*Outs*/,
               LLVMContext &/*Context*/) const
  {
    // Return true by default to get preexisting behavior.
    return true;
  }

  /// This hook must be implemented to lower outgoing return values, described
  /// by the Outs array, into the specified DAG. The implementation should
  /// return the resulting token chain value.
  virtual SDValue LowerReturn(SDValue /*Chain*/, CallingConv::ID /*CallConv*/,
                              bool /*isVarArg*/,
                              const SmallVectorImpl<ISD::OutputArg> & /*Outs*/,
                              const SmallVectorImpl<SDValue> & /*OutVals*/,
                              const SDLoc & /*dl*/,
                              SelectionDAG & /*DAG*/) const {
    llvm_unreachable("Not Implemented");
  }

  /// Return true if result of the specified node is used by a return node
  /// only. It also compute and return the input chain for the tail call.
  ///
  /// This is used to determine whether it is possible to codegen a libcall as
  /// tail call at legalization time.
  virtual bool isUsedByReturnOnly(SDNode *, SDValue &/*Chain*/) const {
    return false;
  }

  /// Return true if the target may be able emit the call instruction as a tail
  /// call. This is used by optimization passes to determine if it's profitable
  /// to duplicate return instructions to enable tailcall optimization.
  virtual bool mayBeEmittedAsTailCall(const CallInst *) const {
    return false;
  }

  /// Return the builtin name for the __builtin___clear_cache intrinsic
  /// Default is to invoke the clear cache library call
  virtual const char * getClearCacheBuiltinName() const {
    return "__clear_cache";
  }

  /// Return the register ID of the name passed in. Used by named register
  /// global variables extension. There is no target-independent behaviour
  /// so the default action is to bail.
  virtual Register getRegisterByName(const char* RegName, LLT Ty,
                                     const MachineFunction &MF) const {
    report_fatal_error("Named registers not implemented for this target");
  }

  /// Return the type that should be used to zero or sign extend a
  /// zeroext/signext integer return value.  FIXME: Some C calling conventions
  /// require the return type to be promoted, but this is not true all the time,
  /// e.g. i1/i8/i16 on x86/x86_64. It is also not necessary for non-C calling
  /// conventions. The frontend should handle this and include all of the
  /// necessary information.
  virtual EVT getTypeForExtReturn(LLVMContext &Context, EVT VT,
                                       ISD::NodeType /*ExtendKind*/) const {
    EVT MinVT = getRegisterType(MVT::i32);
    return VT.bitsLT(MinVT) ? MinVT : VT;
  }

  /// For some targets, an LLVM struct type must be broken down into multiple
  /// simple types, but the calling convention specifies that the entire struct
  /// must be passed in a block of consecutive registers.
  virtual bool
  functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv,
                                            bool isVarArg,
                                            const DataLayout &DL) const {
    return false;
  }

  /// For most targets, an LLVM type must be broken down into multiple
  /// smaller types. Usually the halves are ordered according to the endianness
  /// but for some platform that would break. So this method will default to
  /// matching the endianness but can be overridden.
  virtual bool
  shouldSplitFunctionArgumentsAsLittleEndian(const DataLayout &DL) const {
    return DL.isLittleEndian();
  }

  /// Returns a 0 terminated array of registers that can be safely used as
  /// scratch registers.
  virtual const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const {
    return nullptr;
  }

  /// Returns a 0 terminated array of rounding control registers that can be
  /// attached into strict FP call.
  virtual ArrayRef<MCPhysReg> getRoundingControlRegisters() const {
    return ArrayRef<MCPhysReg>();
  }

  /// This callback is used to prepare for a volatile or atomic load.
  /// It takes a chain node as input and returns the chain for the load itself.
  ///
  /// Having a callback like this is necessary for targets like SystemZ,
  /// which allows a CPU to reuse the result of a previous load indefinitely,
  /// even if a cache-coherent store is performed by another CPU.  The default
  /// implementation does nothing.
  virtual SDValue prepareVolatileOrAtomicLoad(SDValue Chain, const SDLoc &DL,
                                              SelectionDAG &DAG) const {
    return Chain;
  }

  /// Should SelectionDAG lower an atomic store of the given kind as a normal
  /// StoreSDNode (as opposed to an AtomicSDNode)?  NOTE: The intention is to
  /// eventually migrate all targets to the using StoreSDNodes, but porting is
  /// being done target at a time.
  virtual bool lowerAtomicStoreAsStoreSDNode(const StoreInst &SI) const {
    assert(SI.isAtomic() && "violated precondition");
    return false;
  }

  /// Should SelectionDAG lower an atomic load of the given kind as a normal
  /// LoadSDNode (as opposed to an AtomicSDNode)?  NOTE: The intention is to
  /// eventually migrate all targets to the using LoadSDNodes, but porting is
  /// being done target at a time.
  virtual bool lowerAtomicLoadAsLoadSDNode(const LoadInst &LI) const {
    assert(LI.isAtomic() && "violated precondition");
    return false;
  }


  /// This callback is invoked by the type legalizer to legalize nodes with an
  /// illegal operand type but legal result types.  It replaces the
  /// LowerOperation callback in the type Legalizer.  The reason we can not do
  /// away with LowerOperation entirely is that LegalizeDAG isn't yet ready to
  /// use this callback.
  ///
  /// TODO: Consider merging with ReplaceNodeResults.
  ///
  /// The target places new result values for the node in Results (their number
  /// and types must exactly match those of the original return values of
  /// the node), or leaves Results empty, which indicates that the node is not
  /// to be custom lowered after all.
  /// The default implementation calls LowerOperation.
  virtual void LowerOperationWrapper(SDNode *N,
                                     SmallVectorImpl<SDValue> &Results,
                                     SelectionDAG &DAG) const;

  /// This callback is invoked for operations that are unsupported by the
  /// target, which are registered to use 'custom' lowering, and whose defined
  /// values are all legal.  If the target has no operations that require custom
  /// lowering, it need not implement this.  The default implementation of this
  /// aborts.
  virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;

  /// This callback is invoked when a node result type is illegal for the
  /// target, and the operation was registered to use 'custom' lowering for that
  /// result type.  The target places new result values for the node in Results
  /// (their number and types must exactly match those of the original return
  /// values of the node), or leaves Results empty, which indicates that the
  /// node is not to be custom lowered after all.
  ///
  /// If the target has no operations that require custom lowering, it need not
  /// implement this.  The default implementation aborts.
  virtual void ReplaceNodeResults(SDNode * /*N*/,
                                  SmallVectorImpl<SDValue> &/*Results*/,
                                  SelectionDAG &/*DAG*/) const {
    llvm_unreachable("ReplaceNodeResults not implemented for this target!");
  }

  /// This method returns the name of a target specific DAG node.
  virtual const char *getTargetNodeName(unsigned Opcode) const;

  /// This method returns a target specific FastISel object, or null if the
  /// target does not support "fast" ISel.
  virtual FastISel *createFastISel(FunctionLoweringInfo &,
                                   const TargetLibraryInfo *) const {
    return nullptr;
  }

  bool verifyReturnAddressArgumentIsConstant(SDValue Op,
                                             SelectionDAG &DAG) const;

  //===--------------------------------------------------------------------===//
  // Inline Asm Support hooks
  //

  /// This hook allows the target to expand an inline asm call to be explicit
  /// llvm code if it wants to.  This is useful for turning simple inline asms
  /// into LLVM intrinsics, which gives the compiler more information about the
  /// behavior of the code.
  virtual bool ExpandInlineAsm(CallInst *) const {
    return false;
  }

  enum ConstraintType {
    C_Register,            // Constraint represents specific register(s).
    C_RegisterClass,       // Constraint represents any of register(s) in class.
    C_Memory,              // Memory constraint.
    C_Address,             // Address constraint.
    C_Immediate,           // Requires an immediate.
    C_Other,               // Something else.
    C_Unknown              // Unsupported constraint.
  };

  enum ConstraintWeight {
    // Generic weights.
    CW_Invalid  = -1,     // No match.
    CW_Okay     = 0,      // Acceptable.
    CW_Good     = 1,      // Good weight.
    CW_Better   = 2,      // Better weight.
    CW_Best     = 3,      // Best weight.

    // Well-known weights.
    CW_SpecificReg  = CW_Okay,    // Specific register operands.
    CW_Register     = CW_Good,    // Register operands.
    CW_Memory       = CW_Better,  // Memory operands.
    CW_Constant     = CW_Best,    // Constant operand.
    CW_Default      = CW_Okay     // Default or don't know type.
  };

  /// This contains information for each constraint that we are lowering.
  struct AsmOperandInfo : public InlineAsm::ConstraintInfo {
    /// This contains the actual string for the code, like "m".  TargetLowering
    /// picks the 'best' code from ConstraintInfo::Codes that most closely
    /// matches the operand.
    std::string ConstraintCode;

    /// Information about the constraint code, e.g. Register, RegisterClass,
    /// Memory, Other, Unknown.
    TargetLowering::ConstraintType ConstraintType = TargetLowering::C_Unknown;

    /// If this is the result output operand or a clobber, this is null,
    /// otherwise it is the incoming operand to the CallInst.  This gets
    /// modified as the asm is processed.
    Value *CallOperandVal = nullptr;

    /// The ValueType for the operand value.
    MVT ConstraintVT = MVT::Other;

    /// Copy constructor for copying from a ConstraintInfo.
    AsmOperandInfo(InlineAsm::ConstraintInfo Info)
        : InlineAsm::ConstraintInfo(std::move(Info)) {}

    /// Return true of this is an input operand that is a matching constraint
    /// like "4".
    bool isMatchingInputConstraint() const;

    /// If this is an input matching constraint, this method returns the output
    /// operand it matches.
    unsigned getMatchedOperand() const;
  };

  using AsmOperandInfoVector = std::vector<AsmOperandInfo>;

  /// Split up the constraint string from the inline assembly value into the
  /// specific constraints and their prefixes, and also tie in the associated
  /// operand values.  If this returns an empty vector, and if the constraint
  /// string itself isn't empty, there was an error parsing.
  virtual AsmOperandInfoVector ParseConstraints(const DataLayout &DL,
                                                const TargetRegisterInfo *TRI,
                                                const CallBase &Call) const;

  /// Examine constraint type and operand type and determine a weight value.
  /// The operand object must already have been set up with the operand type.
  virtual ConstraintWeight getMultipleConstraintMatchWeight(
      AsmOperandInfo &info, int maIndex) const;

  /// Examine constraint string and operand type and determine a weight value.
  /// The operand object must already have been set up with the operand type.
  virtual ConstraintWeight getSingleConstraintMatchWeight(
      AsmOperandInfo &info, const char *constraint) const;

  /// Determines the constraint code and constraint type to use for the specific
  /// AsmOperandInfo, setting OpInfo.ConstraintCode and OpInfo.ConstraintType.
  /// If the actual operand being passed in is available, it can be passed in as
  /// Op, otherwise an empty SDValue can be passed.
  virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo,
                                      SDValue Op,
                                      SelectionDAG *DAG = nullptr) const;

  /// Given a constraint, return the type of constraint it is for this target.
  virtual ConstraintType getConstraintType(StringRef Constraint) const;

  /// Given a physical register constraint (e.g.  {edx}), return the register
  /// number and the register class for the register.
  ///
  /// Given a register class constraint, like 'r', if this corresponds directly
  /// to an LLVM register class, return a register of 0 and the register class
  /// pointer.
  ///
  /// This should only be used for C_Register constraints.  On error, this
  /// returns a register number of 0 and a null register class pointer.
  virtual std::pair<unsigned, const TargetRegisterClass *>
  getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
                               StringRef Constraint, MVT VT) const;

  virtual unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const {
    if (ConstraintCode == "m")
      return InlineAsm::Constraint_m;
    if (ConstraintCode == "o")
      return InlineAsm::Constraint_o;
    if (ConstraintCode == "X")
      return InlineAsm::Constraint_X;
    if (ConstraintCode == "p")
      return InlineAsm::Constraint_p;
    return InlineAsm::Constraint_Unknown;
  }

  /// Try to replace an X constraint, which matches anything, with another that
  /// has more specific requirements based on the type of the corresponding
  /// operand.  This returns null if there is no replacement to make.
  virtual const char *LowerXConstraint(EVT ConstraintVT) const;

  /// Lower the specified operand into the Ops vector.  If it is invalid, don't
  /// add anything to Ops.
  virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
                                            std::vector<SDValue> &Ops,
                                            SelectionDAG &DAG) const;

  // Lower custom output constraints. If invalid, return SDValue().
  virtual SDValue LowerAsmOutputForConstraint(SDValue &Chain, SDValue &Glue,
                                              const SDLoc &DL,
                                              const AsmOperandInfo &OpInfo,
                                              SelectionDAG &DAG) const;

  // Targets may override this function to collect operands from the CallInst
  // and for example, lower them into the SelectionDAG operands.
  virtual void CollectTargetIntrinsicOperands(const CallInst &I,
                                              SmallVectorImpl<SDValue> &Ops,
                                              SelectionDAG &DAG) const;

  //===--------------------------------------------------------------------===//
  // Div utility functions
  //

  SDValue BuildSDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization,
                    SmallVectorImpl<SDNode *> &Created) const;
  SDValue BuildUDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization,
                    SmallVectorImpl<SDNode *> &Created) const;

  /// Targets may override this function to provide custom SDIV lowering for
  /// power-of-2 denominators.  If the target returns an empty SDValue, LLVM
  /// assumes SDIV is expensive and replaces it with a series of other integer
  /// operations.
  virtual SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor,
                                SelectionDAG &DAG,
                                SmallVectorImpl<SDNode *> &Created) const;

  /// Targets may override this function to provide custom SREM lowering for
  /// power-of-2 denominators.  If the target returns an empty SDValue, LLVM
  /// assumes SREM is expensive and replaces it with a series of other integer
  /// operations.
  virtual SDValue BuildSREMPow2(SDNode *N, const APInt &Divisor,
                                SelectionDAG &DAG,
                                SmallVectorImpl<SDNode *> &Created) const;

  /// Indicate whether this target prefers to combine FDIVs with the same
  /// divisor. If the transform should never be done, return zero. If the
  /// transform should be done, return the minimum number of divisor uses
  /// that must exist.
  virtual unsigned combineRepeatedFPDivisors() const {
    return 0;
  }

  /// Hooks for building estimates in place of slower divisions and square
  /// roots.

  /// Return either a square root or its reciprocal estimate value for the input
  /// operand.
  /// \p Enabled is a ReciprocalEstimate enum with value either 'Unspecified' or
  /// 'Enabled' as set by a potential default override attribute.
  /// If \p RefinementSteps is 'Unspecified', the number of Newton-Raphson
  /// refinement iterations required to generate a sufficient (though not
  /// necessarily IEEE-754 compliant) estimate is returned in that parameter.
  /// The boolean UseOneConstNR output is used to select a Newton-Raphson
  /// algorithm implementation that uses either one or two constants.
  /// The boolean Reciprocal is used to select whether the estimate is for the
  /// square root of the input operand or the reciprocal of its square root.
  /// A target may choose to implement its own refinement within this function.
  /// If that's true, then return '0' as the number of RefinementSteps to avoid
  /// any further refinement of the estimate.
  /// An empty SDValue return means no estimate sequence can be created.
  virtual SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG,
                                  int Enabled, int &RefinementSteps,
                                  bool &UseOneConstNR, bool Reciprocal) const {
    return SDValue();
  }

  /// Try to convert the fminnum/fmaxnum to a compare/select sequence. This is
  /// required for correctness since InstCombine might have canonicalized a
  /// fcmp+select sequence to a FMINNUM/FMAXNUM intrinsic.  If we were to fall
  /// through to the default expansion/soften to libcall, we might introduce a
  /// link-time dependency on libm into a file that originally did not have one.
  SDValue createSelectForFMINNUM_FMAXNUM(SDNode *Node, SelectionDAG &DAG) const;

  /// Return a reciprocal estimate value for the input operand.
  /// \p Enabled is a ReciprocalEstimate enum with value either 'Unspecified' or
  /// 'Enabled' as set by a potential default override attribute.
  /// If \p RefinementSteps is 'Unspecified', the number of Newton-Raphson
  /// refinement iterations required to generate a sufficient (though not
  /// necessarily IEEE-754 compliant) estimate is returned in that parameter.
  /// A target may choose to implement its own refinement within this function.
  /// If that's true, then return '0' as the number of RefinementSteps to avoid
  /// any further refinement of the estimate.
  /// An empty SDValue return means no estimate sequence can be created.
  virtual SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG,
                                   int Enabled, int &RefinementSteps) const {
    return SDValue();
  }

  /// Return a target-dependent comparison result if the input operand is
  /// suitable for use with a square root estimate calculation. For example, the
  /// comparison may check if the operand is NAN, INF, zero, normal, etc. The
  /// result should be used as the condition operand for a select or branch.
  virtual SDValue getSqrtInputTest(SDValue Operand, SelectionDAG &DAG,
                                   const DenormalMode &Mode) const;

  /// Return a target-dependent result if the input operand is not suitable for
  /// use with a square root estimate calculation.
  virtual SDValue getSqrtResultForDenormInput(SDValue Operand,
                                              SelectionDAG &DAG) const {
    return DAG.getConstantFP(0.0, SDLoc(Operand), Operand.getValueType());
  }

  //===--------------------------------------------------------------------===//
  // Legalization utility functions
  //

  /// Expand a MUL or [US]MUL_LOHI of n-bit values into two or four nodes,
  /// respectively, each computing an n/2-bit part of the result.
  /// \param Result A vector that will be filled with the parts of the result
  ///        in little-endian order.
  /// \param LL Low bits of the LHS of the MUL.  You can use this parameter
  ///        if you want to control how low bits are extracted from the LHS.
  /// \param LH High bits of the LHS of the MUL.  See LL for meaning.
  /// \param RL Low bits of the RHS of the MUL.  See LL for meaning
  /// \param RH High bits of the RHS of the MUL.  See LL for meaning.
  /// \returns true if the node has been expanded, false if it has not
  bool expandMUL_LOHI(unsigned Opcode, EVT VT, const SDLoc &dl, SDValue LHS,
                      SDValue RHS, SmallVectorImpl<SDValue> &Result, EVT HiLoVT,
                      SelectionDAG &DAG, MulExpansionKind Kind,
                      SDValue LL = SDValue(), SDValue LH = SDValue(),
                      SDValue RL = SDValue(), SDValue RH = SDValue()) const;

  /// Expand a MUL into two nodes.  One that computes the high bits of
  /// the result and one that computes the low bits.
  /// \param HiLoVT The value type to use for the Lo and Hi nodes.
  /// \param LL Low bits of the LHS of the MUL.  You can use this parameter
  ///        if you want to control how low bits are extracted from the LHS.
  /// \param LH High bits of the LHS of the MUL.  See LL for meaning.
  /// \param RL Low bits of the RHS of the MUL.  See LL for meaning
  /// \param RH High bits of the RHS of the MUL.  See LL for meaning.
  /// \returns true if the node has been expanded. false if it has not
  bool expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT,
                 SelectionDAG &DAG, MulExpansionKind Kind,
                 SDValue LL = SDValue(), SDValue LH = SDValue(),
                 SDValue RL = SDValue(), SDValue RH = SDValue()) const;

  /// Attempt to expand an n-bit div/rem/divrem by constant using a n/2-bit
  /// urem by constant and other arithmetic ops. The n/2-bit urem by constant
  /// will be expanded by DAGCombiner. This is not possible for all constant
  /// divisors.
  /// \param N Node to expand
  /// \param Result A vector that will be filled with the lo and high parts of
  ///        the results. For *DIVREM, this will be the quotient parts followed
  ///        by the remainder parts.
  /// \param HiLoVT The value type to use for the Lo and Hi parts. Should be
  ///        half of VT.
  /// \param LL Low bits of the LHS of the operation. You can use this
  ///        parameter if you want to control how low bits are extracted from
  ///        the LHS.
  /// \param LH High bits of the LHS of the operation. See LL for meaning.
  /// \returns true if the node has been expanded, false if it has not.
  bool expandDIVREMByConstant(SDNode *N, SmallVectorImpl<SDValue> &Result,
                              EVT HiLoVT, SelectionDAG &DAG,
                              SDValue LL = SDValue(),
                              SDValue LH = SDValue()) const;

  /// Expand funnel shift.
  /// \param N Node to expand
  /// \returns The expansion if successful, SDValue() otherwise
  SDValue expandFunnelShift(SDNode *N, SelectionDAG &DAG) const;

  /// Expand rotations.
  /// \param N Node to expand
  /// \param AllowVectorOps expand vector rotate, this should only be performed
  ///        if the legalization is happening outside of LegalizeVectorOps
  /// \returns The expansion if successful, SDValue() otherwise
  SDValue expandROT(SDNode *N, bool AllowVectorOps, SelectionDAG &DAG) const;

  /// Expand shift-by-parts.
  /// \param N Node to expand
  /// \param Lo lower-output-part after conversion
  /// \param Hi upper-output-part after conversion
  void expandShiftParts(SDNode *N, SDValue &Lo, SDValue &Hi,
                        SelectionDAG &DAG) const;

  /// Expand float(f32) to SINT(i64) conversion
  /// \param N Node to expand
  /// \param Result output after conversion
  /// \returns True, if the expansion was successful, false otherwise
  bool expandFP_TO_SINT(SDNode *N, SDValue &Result, SelectionDAG &DAG) const;

  /// Expand float to UINT conversion
  /// \param N Node to expand
  /// \param Result output after conversion
  /// \param Chain output chain after conversion
  /// \returns True, if the expansion was successful, false otherwise
  bool expandFP_TO_UINT(SDNode *N, SDValue &Result, SDValue &Chain,
                        SelectionDAG &DAG) const;

  /// Expand UINT(i64) to double(f64) conversion
  /// \param N Node to expand
  /// \param Result output after conversion
  /// \param Chain output chain after conversion
  /// \returns True, if the expansion was successful, false otherwise
  bool expandUINT_TO_FP(SDNode *N, SDValue &Result, SDValue &Chain,
                        SelectionDAG &DAG) const;

  /// Expand fminnum/fmaxnum into fminnum_ieee/fmaxnum_ieee with quieted inputs.
  SDValue expandFMINNUM_FMAXNUM(SDNode *N, SelectionDAG &DAG) const;

  /// Expand FP_TO_[US]INT_SAT into FP_TO_[US]INT and selects or min/max.
  /// \param N Node to expand
  /// \returns The expansion result
  SDValue expandFP_TO_INT_SAT(SDNode *N, SelectionDAG &DAG) const;

  /// Expand check for floating point class.
  /// \param ResultVT The type of intrinsic call result.
  /// \param Op The tested value.
  /// \param Test The test to perform.
  /// \param Flags The optimization flags.
  /// \returns The expansion result or SDValue() if it fails.
  SDValue expandIS_FPCLASS(EVT ResultVT, SDValue Op, FPClassTest Test,
                           SDNodeFlags Flags, const SDLoc &DL,
                           SelectionDAG &DAG) const;

  /// Expand CTPOP nodes. Expands vector/scalar CTPOP nodes,
  /// vector nodes can only succeed if all operations are legal/custom.
  /// \param N Node to expand
  /// \returns The expansion result or SDValue() if it fails.
  SDValue expandCTPOP(SDNode *N, SelectionDAG &DAG) const;

  /// Expand VP_CTPOP nodes.
  /// \returns The expansion result or SDValue() if it fails.
  SDValue expandVPCTPOP(SDNode *N, SelectionDAG &DAG) const;

  /// Expand CTLZ/CTLZ_ZERO_UNDEF nodes. Expands vector/scalar CTLZ nodes,
  /// vector nodes can only succeed if all operations are legal/custom.
  /// \param N Node to expand
  /// \returns The expansion result or SDValue() if it fails.
  SDValue expandCTLZ(SDNode *N, SelectionDAG &DAG) const;

  /// Expand VP_CTLZ/VP_CTLZ_ZERO_UNDEF nodes.
  /// \param N Node to expand
  /// \returns The expansion result or SDValue() if it fails.
  SDValue expandVPCTLZ(SDNode *N, SelectionDAG &DAG) const;

  /// Expand CTTZ via Table Lookup.
  /// \param N Node to expand
  /// \returns The expansion result or SDValue() if it fails.
  SDValue CTTZTableLookup(SDNode *N, SelectionDAG &DAG, const SDLoc &DL, EVT VT,
                          SDValue Op, unsigned NumBitsPerElt) const;

  /// Expand CTTZ/CTTZ_ZERO_UNDEF nodes. Expands vector/scalar CTTZ nodes,
  /// vector nodes can only succeed if all operations are legal/custom.
  /// \param N Node to expand
  /// \returns The expansion result or SDValue() if it fails.
  SDValue expandCTTZ(SDNode *N, SelectionDAG &DAG) const;

  /// Expand VP_CTTZ/VP_CTTZ_ZERO_UNDEF nodes.
  /// \param N Node to expand
  /// \returns The expansion result or SDValue() if it fails.
  SDValue expandVPCTTZ(SDNode *N, SelectionDAG &DAG) const;

  /// Expand ABS nodes. Expands vector/scalar ABS nodes,
  /// vector nodes can only succeed if all operations are legal/custom.
  /// (ABS x) -> (XOR (ADD x, (SRA x, type_size)), (SRA x, type_size))
  /// \param N Node to expand
  /// \param IsNegative indicate negated abs
  /// \returns The expansion result or SDValue() if it fails.
  SDValue expandABS(SDNode *N, SelectionDAG &DAG,
                    bool IsNegative = false) const;

  /// Expand ABDS/ABDU nodes. Expands vector/scalar ABDS/ABDU nodes.
  /// \param N Node to expand
  /// \returns The expansion result or SDValue() if it fails.
  SDValue expandABD(SDNode *N, SelectionDAG &DAG) const;

  /// Expand BSWAP nodes. Expands scalar/vector BSWAP nodes with i16/i32/i64
  /// scalar types. Returns SDValue() if expand fails.
  /// \param N Node to expand
  /// \returns The expansion result or SDValue() if it fails.
  SDValue expandBSWAP(SDNode *N, SelectionDAG &DAG) const;

  /// Expand VP_BSWAP nodes. Expands VP_BSWAP nodes with
  /// i16/i32/i64 scalar types. Returns SDValue() if expand fails. \param N Node
  /// to expand \returns The expansion result or SDValue() if it fails.
  SDValue expandVPBSWAP(SDNode *N, SelectionDAG &DAG) const;

  /// Expand BITREVERSE nodes. Expands scalar/vector BITREVERSE nodes.
  /// Returns SDValue() if expand fails.
  /// \param N Node to expand
  /// \returns The expansion result or SDValue() if it fails.
  SDValue expandBITREVERSE(SDNode *N, SelectionDAG &DAG) const;

  /// Expand VP_BITREVERSE nodes. Expands VP_BITREVERSE nodes with
  /// i8/i16/i32/i64 scalar types. \param N Node to expand \returns The
  /// expansion result or SDValue() if it fails.
  SDValue expandVPBITREVERSE(SDNode *N, SelectionDAG &DAG) const;

  /// Turn load of vector type into a load of the individual elements.
  /// \param LD load to expand
  /// \returns BUILD_VECTOR and TokenFactor nodes.
  std::pair<SDValue, SDValue> scalarizeVectorLoad(LoadSDNode *LD,
                                                  SelectionDAG &DAG) const;

  // Turn a store of a vector type into stores of the individual elements.
  /// \param ST Store with a vector value type
  /// \returns TokenFactor of the individual store chains.
  SDValue scalarizeVectorStore(StoreSDNode *ST, SelectionDAG &DAG) const;

  /// Expands an unaligned load to 2 half-size loads for an integer, and
  /// possibly more for vectors.
  std::pair<SDValue, SDValue> expandUnalignedLoad(LoadSDNode *LD,
                                                  SelectionDAG &DAG) const;

  /// Expands an unaligned store to 2 half-size stores for integer values, and
  /// possibly more for vectors.
  SDValue expandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG) const;

  /// Increments memory address \p Addr according to the type of the value
  /// \p DataVT that should be stored. If the data is stored in compressed
  /// form, the memory address should be incremented according to the number of
  /// the stored elements. This number is equal to the number of '1's bits
  /// in the \p Mask.
  /// \p DataVT is a vector type. \p Mask is a vector value.
  /// \p DataVT and \p Mask have the same number of vector elements.
  SDValue IncrementMemoryAddress(SDValue Addr, SDValue Mask, const SDLoc &DL,
                                 EVT DataVT, SelectionDAG &DAG,
                                 bool IsCompressedMemory) const;

  /// Get a pointer to vector element \p Idx located in memory for a vector of
  /// type \p VecVT starting at a base address of \p VecPtr. If \p Idx is out of
  /// bounds the returned pointer is unspecified, but will be within the vector
  /// bounds.
  SDValue getVectorElementPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT,
                                  SDValue Index) const;

  /// Get a pointer to a sub-vector of type \p SubVecVT at index \p Idx located
  /// in memory for a vector of type \p VecVT starting at a base address of
  /// \p VecPtr. If \p Idx plus the size of \p SubVecVT is out of bounds the
  /// returned pointer is unspecified, but the value returned will be such that
  /// the entire subvector would be within the vector bounds.
  SDValue getVectorSubVecPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT,
                                 EVT SubVecVT, SDValue Index) const;

  /// Method for building the DAG expansion of ISD::[US][MIN|MAX]. This
  /// method accepts integers as its arguments.
  SDValue expandIntMINMAX(SDNode *Node, SelectionDAG &DAG) const;

  /// Method for building the DAG expansion of ISD::[US][ADD|SUB]SAT. This
  /// method accepts integers as its arguments.
  SDValue expandAddSubSat(SDNode *Node, SelectionDAG &DAG) const;

  /// Method for building the DAG expansion of ISD::[US]SHLSAT. This
  /// method accepts integers as its arguments.
  SDValue expandShlSat(SDNode *Node, SelectionDAG &DAG) const;

  /// Method for building the DAG expansion of ISD::[U|S]MULFIX[SAT]. This
  /// method accepts integers as its arguments.
  SDValue expandFixedPointMul(SDNode *Node, SelectionDAG &DAG) const;

  /// Method for building the DAG expansion of ISD::[US]DIVFIX[SAT]. This
  /// method accepts integers as its arguments.
  /// Note: This method may fail if the division could not be performed
  /// within the type. Clients must retry with a wider type if this happens.
  SDValue expandFixedPointDiv(unsigned Opcode, const SDLoc &dl,
                              SDValue LHS, SDValue RHS,
                              unsigned Scale, SelectionDAG &DAG) const;

  /// Method for building the DAG expansion of ISD::U(ADD|SUB)O. Expansion
  /// always suceeds and populates the Result and Overflow arguments.
  void expandUADDSUBO(SDNode *Node, SDValue &Result, SDValue &Overflow,
                      SelectionDAG &DAG) const;

  /// Method for building the DAG expansion of ISD::S(ADD|SUB)O. Expansion
  /// always suceeds and populates the Result and Overflow arguments.
  void expandSADDSUBO(SDNode *Node, SDValue &Result, SDValue &Overflow,
                      SelectionDAG &DAG) const;

  /// Method for building the DAG expansion of ISD::[US]MULO. Returns whether
  /// expansion was successful and populates the Result and Overflow arguments.
  bool expandMULO(SDNode *Node, SDValue &Result, SDValue &Overflow,
                  SelectionDAG &DAG) const;

  /// Expand a VECREDUCE_* into an explicit calculation. If Count is specified,
  /// only the first Count elements of the vector are used.
  SDValue expandVecReduce(SDNode *Node, SelectionDAG &DAG) const;

  /// Expand a VECREDUCE_SEQ_* into an explicit ordered calculation.
  SDValue expandVecReduceSeq(SDNode *Node, SelectionDAG &DAG) const;

  /// Expand an SREM or UREM using SDIV/UDIV or SDIVREM/UDIVREM, if legal.
  /// Returns true if the expansion was successful.
  bool expandREM(SDNode *Node, SDValue &Result, SelectionDAG &DAG) const;

  /// Method for building the DAG expansion of ISD::VECTOR_SPLICE. This
  /// method accepts vectors as its arguments.
  SDValue expandVectorSplice(SDNode *Node, SelectionDAG &DAG) const;

  /// Legalize a SETCC or VP_SETCC with given LHS and RHS and condition code CC
  /// on the current target. A VP_SETCC will additionally be given a Mask
  /// and/or EVL not equal to SDValue().
  ///
  /// If the SETCC has been legalized using AND / OR, then the legalized node
  /// will be stored in LHS. RHS and CC will be set to SDValue(). NeedInvert
  /// will be set to false. This will also hold if the VP_SETCC has been
  /// legalized using VP_AND / VP_OR.
  ///
  /// If the SETCC / VP_SETCC has been legalized by using
  /// getSetCCSwappedOperands(), then the values of LHS and RHS will be
  /// swapped, CC will be set to the new condition, and NeedInvert will be set
  /// to false.
  ///
  /// If the SETCC / VP_SETCC has been legalized using the inverse condcode,
  /// then LHS and RHS will be unchanged, CC will set to the inverted condcode,
  /// and NeedInvert will be set to true. The caller must invert the result of
  /// the SETCC with SelectionDAG::getLogicalNOT() or take equivalent action to
  /// swap the effect of a true/false result.
  ///
  /// \returns true if the SETCC / VP_SETCC has been legalized, false if it
  /// hasn't.
  bool LegalizeSetCCCondCode(SelectionDAG &DAG, EVT VT, SDValue &LHS,
                             SDValue &RHS, SDValue &CC, SDValue Mask,
                             SDValue EVL, bool &NeedInvert, const SDLoc &dl,
                             SDValue &Chain, bool IsSignaling = false) const;

  //===--------------------------------------------------------------------===//
  // Instruction Emitting Hooks
  //

  /// This method should be implemented by targets that mark instructions with
  /// the 'usesCustomInserter' flag.  These instructions are special in various
  /// ways, which require special support to insert.  The specified MachineInstr
  /// is created but not inserted into any basic blocks, and this method is
  /// called to expand it into a sequence of instructions, potentially also
  /// creating new basic blocks and control flow.
  /// As long as the returned basic block is different (i.e., we created a new
  /// one), the custom inserter is free to modify the rest of \p MBB.
  virtual MachineBasicBlock *
  EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const;

  /// This method should be implemented by targets that mark instructions with
  /// the 'hasPostISelHook' flag. These instructions must be adjusted after
  /// instruction selection by target hooks.  e.g. To fill in optional defs for
  /// ARM 's' setting instructions.
  virtual void AdjustInstrPostInstrSelection(MachineInstr &MI,
                                             SDNode *Node) const;

  /// If this function returns true, SelectionDAGBuilder emits a
  /// LOAD_STACK_GUARD node when it is lowering Intrinsic::stackprotector.
  virtual bool useLoadStackGuardNode() const {
    return false;
  }

  virtual SDValue emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val,
                                      const SDLoc &DL) const {
    llvm_unreachable("not implemented for this target");
  }

  /// Lower TLS global address SDNode for target independent emulated TLS model.
  virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA,
                                          SelectionDAG &DAG) const;

  /// Expands target specific indirect branch for the case of JumpTable
  /// expanasion.
  virtual SDValue expandIndirectJTBranch(const SDLoc& dl, SDValue Value, SDValue Addr,
                                         SelectionDAG &DAG) const {
    return DAG.getNode(ISD::BRIND, dl, MVT::Other, Value, Addr);
  }

  // seteq(x, 0) -> truncate(srl(ctlz(zext(x)), log2(#bits)))
  // If we're comparing for equality to zero and isCtlzFast is true, expose the
  // fact that this can be implemented as a ctlz/srl pair, so that the dag
  // combiner can fold the new nodes.
  SDValue lowerCmpEqZeroToCtlzSrl(SDValue Op, SelectionDAG &DAG) const;

private:
  SDValue foldSetCCWithAnd(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond,
                           const SDLoc &DL, DAGCombinerInfo &DCI) const;
  SDValue foldSetCCWithBinOp(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond,
                             const SDLoc &DL, DAGCombinerInfo &DCI) const;

  SDValue optimizeSetCCOfSignedTruncationCheck(EVT SCCVT, SDValue N0,
                                               SDValue N1, ISD::CondCode Cond,
                                               DAGCombinerInfo &DCI,
                                               const SDLoc &DL) const;

  // (X & (C l>>/<< Y)) ==/!= 0  -->  ((X <</l>> Y) & C) ==/!= 0
  SDValue optimizeSetCCByHoistingAndByConstFromLogicalShift(
      EVT SCCVT, SDValue N0, SDValue N1C, ISD::CondCode Cond,
      DAGCombinerInfo &DCI, const SDLoc &DL) const;

  SDValue prepareUREMEqFold(EVT SETCCVT, SDValue REMNode,
                            SDValue CompTargetNode, ISD::CondCode Cond,
                            DAGCombinerInfo &DCI, const SDLoc &DL,
                            SmallVectorImpl<SDNode *> &Created) const;
  SDValue buildUREMEqFold(EVT SETCCVT, SDValue REMNode, SDValue CompTargetNode,
                          ISD::CondCode Cond, DAGCombinerInfo &DCI,
                          const SDLoc &DL) const;

  SDValue prepareSREMEqFold(EVT SETCCVT, SDValue REMNode,
                            SDValue CompTargetNode, ISD::CondCode Cond,
                            DAGCombinerInfo &DCI, const SDLoc &DL,
                            SmallVectorImpl<SDNode *> &Created) const;
  SDValue buildSREMEqFold(EVT SETCCVT, SDValue REMNode, SDValue CompTargetNode,
                          ISD::CondCode Cond, DAGCombinerInfo &DCI,
                          const SDLoc &DL) const;
};

/// Given an LLVM IR type and return type attributes, compute the return value
/// EVTs and flags, and optionally also the offsets, if the return value is
/// being lowered to memory.
void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr,
                   SmallVectorImpl<ISD::OutputArg> &Outs,
                   const TargetLowering &TLI, const DataLayout &DL);

} // end namespace llvm

#endif // LLVM_CODEGEN_TARGETLOWERING_H
PKiwFZYrGggCodeGen/IndirectThunks.hnu�[���//===---- IndirectThunks.h - Indirect Thunk Base Class ----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// Contains a base class for Passes that inject an MI thunk.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_INDIRECTTHUNKS_H
#define LLVM_CODEGEN_INDIRECTTHUNKS_H

#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Module.h"

namespace llvm {

template <typename Derived, typename InsertedThunksTy = bool>
class ThunkInserter {
  Derived &getDerived() { return *static_cast<Derived *>(this); }

protected:
  // A variable used to track whether (and possible which) thunks have been
  // inserted so far. InsertedThunksTy is usually a bool, but can be other types
  // to represent more than one type of thunk. Requires an |= operator to
  // accumulate results.
  InsertedThunksTy InsertedThunks;
  void doInitialization(Module &M) {}
  void createThunkFunction(MachineModuleInfo &MMI, StringRef Name,
                           bool Comdat = true, StringRef TargetAttrs = "");

public:
  void init(Module &M) {
    InsertedThunks = InsertedThunksTy{};
    getDerived().doInitialization(M);
  }
  // return `true` if `MMI` or `MF` was modified
  bool run(MachineModuleInfo &MMI, MachineFunction &MF);
};

template <typename Derived, typename InsertedThunksTy>
void ThunkInserter<Derived, InsertedThunksTy>::createThunkFunction(
    MachineModuleInfo &MMI, StringRef Name, bool Comdat,
    StringRef TargetAttrs) {
  assert(Name.startswith(getDerived().getThunkPrefix()) &&
         "Created a thunk with an unexpected prefix!");

  Module &M = const_cast<Module &>(*MMI.getModule());
  LLVMContext &Ctx = M.getContext();
  auto Type = FunctionType::get(Type::getVoidTy(Ctx), false);
  Function *F = Function::Create(Type,
                                 Comdat ? GlobalValue::LinkOnceODRLinkage
                                        : GlobalValue::InternalLinkage,
                                 Name, &M);
  if (Comdat) {
    F->setVisibility(GlobalValue::HiddenVisibility);
    F->setComdat(M.getOrInsertComdat(Name));
  }

  // Add Attributes so that we don't create a frame, unwind information, or
  // inline.
  AttrBuilder B(Ctx);
  B.addAttribute(llvm::Attribute::NoUnwind);
  B.addAttribute(llvm::Attribute::Naked);
  if (TargetAttrs != "")
    B.addAttribute("target-features", TargetAttrs);
  F->addFnAttrs(B);

  // Populate our function a bit so that we can verify.
  BasicBlock *Entry = BasicBlock::Create(Ctx, "entry", F);
  IRBuilder<> Builder(Entry);

  Builder.CreateRetVoid();

  // MachineFunctions aren't created automatically for the IR-level constructs
  // we already made. Create them and insert them into the module.
  MachineFunction &MF = MMI.getOrCreateMachineFunction(*F);
  // A MachineBasicBlock must not be created for the Entry block; code
  // generation from an empty naked function in C source code also does not
  // generate one.  At least GlobalISel asserts if this invariant isn't
  // respected.

  // Set MF properties. We never use vregs...
  MF.getProperties().set(MachineFunctionProperties::Property::NoVRegs);
}

template <typename Derived, typename InsertedThunksTy>
bool ThunkInserter<Derived, InsertedThunksTy>::run(MachineModuleInfo &MMI,
                                                   MachineFunction &MF) {
  // If MF is not a thunk, check to see if we need to insert a thunk.
  if (!MF.getName().startswith(getDerived().getThunkPrefix())) {
    // Only add a thunk if one of the functions has the corresponding feature
    // enabled in its subtarget, and doesn't enable external thunks. The target
    // can use InsertedThunks to detect whether relevant thunks have already
    // been inserted.
    // FIXME: Conditionalize on indirect calls so we don't emit a thunk when
    // nothing will end up calling it.
    // FIXME: It's a little silly to look at every function just to enumerate
    // the subtargets, but eventually we'll want to look at them for indirect
    // calls, so maybe this is OK.
    if (!getDerived().mayUseThunk(MF, InsertedThunks))
      return false;

    InsertedThunks |= getDerived().insertThunks(MMI, MF);
    return true;
  }

  // If this *is* a thunk function, we need to populate it with the correct MI.
  getDerived().populateThunk(MF);
  return true;
}

} // namespace llvm

#endif
PKiwFZ���n��CodeGen/RegisterScavenging.hnu�[���//===- RegisterScavenging.h - Machine register scavenging -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file
/// This file declares the machine register scavenger class. It can provide
/// information such as unused register at any point in a machine basic block.
/// It also provides a mechanism to make registers available by evicting them
/// to spill slots.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_REGISTERSCAVENGING_H
#define LLVM_CODEGEN_REGISTERSCAVENGING_H

#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/LiveRegUnits.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/MC/LaneBitmask.h"

namespace llvm {

class MachineInstr;
class TargetInstrInfo;
class TargetRegisterClass;
class TargetRegisterInfo;

class RegScavenger {
  const TargetRegisterInfo *TRI = nullptr;
  const TargetInstrInfo *TII = nullptr;
  MachineRegisterInfo *MRI = nullptr;
  MachineBasicBlock *MBB = nullptr;
  MachineBasicBlock::iterator MBBI;
  unsigned NumRegUnits = 0;

  /// True if RegScavenger is currently tracking the liveness of registers.
  bool Tracking = false;

  /// Information on scavenged registers (held in a spill slot).
  struct ScavengedInfo {
    ScavengedInfo(int FI = -1) : FrameIndex(FI) {}

    /// A spill slot used for scavenging a register post register allocation.
    int FrameIndex;

    /// If non-zero, the specific register is currently being
    /// scavenged. That is, it is spilled to this scavenging stack slot.
    Register Reg;

    /// The instruction that restores the scavenged register from stack.
    const MachineInstr *Restore = nullptr;
  };

  /// A vector of information on scavenged registers.
  SmallVector<ScavengedInfo, 2> Scavenged;

  LiveRegUnits LiveUnits;

  // These BitVectors are only used internally to forward(). They are members
  // to avoid frequent reallocations.
  BitVector KillRegUnits, DefRegUnits;
  BitVector TmpRegUnits;

public:
  RegScavenger() = default;

  /// Record that \p Reg is in use at scavenging index \p FI. This is for
  /// targets which need to directly manage the spilling process, and need to
  /// update the scavenger's internal state.  It's expected this be called a
  /// second time with \p Restore set to a non-null value, so that the
  /// externally inserted restore instruction resets the scavenged slot
  /// liveness when encountered.
  void assignRegToScavengingIndex(int FI, Register Reg,
                                  MachineInstr *Restore = nullptr) {
    for (ScavengedInfo &Slot : Scavenged) {
      if (Slot.FrameIndex == FI) {
        assert(!Slot.Reg || Slot.Reg == Reg);
        Slot.Reg = Reg;
        Slot.Restore = Restore;
        return;
      }
    }

    llvm_unreachable("did not find scavenging index");
  }

  /// Start tracking liveness from the begin of basic block \p MBB.
  void enterBasicBlock(MachineBasicBlock &MBB);

  /// Start tracking liveness from the end of basic block \p MBB.
  /// Use backward() to move towards the beginning of the block. This is
  /// preferred to enterBasicBlock() and forward() because it does not depend
  /// on the presence of kill flags.
  void enterBasicBlockEnd(MachineBasicBlock &MBB);

  /// Move the internal MBB iterator and update register states.
  void forward();

  /// Move the internal MBB iterator and update register states until
  /// it has processed the specific iterator.
  void forward(MachineBasicBlock::iterator I) {
    while (!Tracking || MBBI != I)
      forward();
  }

  /// Update internal register state and move MBB iterator backwards.
  /// Contrary to unprocess() this method gives precise results even in the
  /// absence of kill flags.
  void backward();

  /// Call backward() as long as the internal iterator does not point to \p I.
  void backward(MachineBasicBlock::iterator I) {
    while (MBBI != I)
      backward();
  }

  /// Move the internal MBB iterator but do not update register states.
  void skipTo(MachineBasicBlock::iterator I) {
    if (I == MachineBasicBlock::iterator(nullptr))
      Tracking = false;
    MBBI = I;
  }

  MachineBasicBlock::iterator getCurrentPosition() const { return MBBI; }

  /// Return if a specific register is currently used.
  bool isRegUsed(Register Reg, bool includeReserved = true) const;

  /// Return all available registers in the register class in Mask.
  BitVector getRegsAvailable(const TargetRegisterClass *RC);

  /// Find an unused register of the specified register class.
  /// Return 0 if none is found.
  Register FindUnusedReg(const TargetRegisterClass *RC) const;

  /// Add a scavenging frame index.
  void addScavengingFrameIndex(int FI) {
    Scavenged.push_back(ScavengedInfo(FI));
  }

  /// Query whether a frame index is a scavenging frame index.
  bool isScavengingFrameIndex(int FI) const {
    for (const ScavengedInfo &SI : Scavenged)
      if (SI.FrameIndex == FI)
        return true;

    return false;
  }

  /// Get an array of scavenging frame indices.
  void getScavengingFrameIndices(SmallVectorImpl<int> &A) const {
    for (const ScavengedInfo &I : Scavenged)
      if (I.FrameIndex >= 0)
        A.push_back(I.FrameIndex);
  }

  /// Make a register of the specific register class available from the current
  /// position backwards to the place before \p To. If \p RestoreAfter is true
  /// this includes the instruction following the current position.
  /// SPAdj is the stack adjustment due to call frame, it's passed along to
  /// eliminateFrameIndex().
  /// Returns the scavenged register.
  ///
  /// If \p AllowSpill is false, fail if a spill is required to make the
  /// register available, and return NoRegister.
  Register scavengeRegisterBackwards(const TargetRegisterClass &RC,
                                     MachineBasicBlock::iterator To,
                                     bool RestoreAfter, int SPAdj,
                                     bool AllowSpill = true);

  /// Tell the scavenger a register is used.
  void setRegUsed(Register Reg, LaneBitmask LaneMask = LaneBitmask::getAll());

private:
  /// Returns true if a register is reserved. It is never "unused".
  bool isReserved(Register Reg) const { return MRI->isReserved(Reg); }

  /// setUsed / setUnused - Mark the state of one or a number of register units.
  ///
  void setUsed(const BitVector &RegUnits) {
    LiveUnits.addUnits(RegUnits);
  }
  void setUnused(const BitVector &RegUnits) {
    LiveUnits.removeUnits(RegUnits);
  }

  /// Processes the current instruction and fill the KillRegUnits and
  /// DefRegUnits bit vectors.
  void determineKillsAndDefs();

  /// Add all Reg Units that Reg contains to BV.
  void addRegUnits(BitVector &BV, MCRegister Reg);

  /// Remove all Reg Units that \p Reg contains from \p BV.
  void removeRegUnits(BitVector &BV, MCRegister Reg);

  /// Initialize RegisterScavenger.
  void init(MachineBasicBlock &MBB);

  /// Spill a register after position \p After and reload it before position
  /// \p UseMI.
  ScavengedInfo &spill(Register Reg, const TargetRegisterClass &RC, int SPAdj,
                       MachineBasicBlock::iterator Before,
                       MachineBasicBlock::iterator &UseMI);
};

/// Replaces all frame index virtual registers with physical registers. Uses the
/// register scavenger to find an appropriate register to use.
void scavengeFrameVirtualRegs(MachineFunction &MF, RegScavenger &RS);

} // end namespace llvm

#endif // LLVM_CODEGEN_REGISTERSCAVENGING_H
PKiwFZ:����CodeGen/MIRSampleProfile.hnu�[���//===----- MIRSampleProfile.h: SampleFDO Support in MIR ---*- c++ -*-------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the supoorting functions for machine level Sample FDO
// loader. This is used in Flow Sensitive SampelFDO.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_MIRSAMPLEPROFILE_H
#define LLVM_CODEGEN_MIRSAMPLEPROFILE_H

#include "llvm/ADT/IntrusiveRefCntPtr.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/Support/Discriminator.h"
#include <memory>
#include <string>

namespace llvm {
class AnalysisUsage;
class MachineBlockFrequencyInfo;
class MachineFunction;
class Module;

namespace vfs {
class FileSystem;
} // namespace vfs

using namespace sampleprof;

class MIRProfileLoader;
class MIRProfileLoaderPass : public MachineFunctionPass {
  MachineFunction *MF;
  std::string ProfileFileName;
  FSDiscriminatorPass P;
  unsigned LowBit;
  unsigned HighBit;

public:
  static char ID;
  /// FS bits will only use the '1' bits in the Mask.
  MIRProfileLoaderPass(std::string FileName = "",
                       std::string RemappingFileName = "",
                       FSDiscriminatorPass P = FSDiscriminatorPass::Pass1,
                       IntrusiveRefCntPtr<vfs::FileSystem> FS = nullptr);

  /// getMachineFunction - Return the last machine function computed.
  const MachineFunction *getMachineFunction() const { return MF; }

  StringRef getPassName() const override { return "SampleFDO loader in MIR"; }

private:
  void init(MachineFunction &MF);
  bool runOnMachineFunction(MachineFunction &) override;
  bool doInitialization(Module &M) override;
  void getAnalysisUsage(AnalysisUsage &AU) const override;

  std::unique_ptr<MIRProfileLoader> MIRSampleLoader;
  /// Hold the information of the basic block frequency.
  MachineBlockFrequencyInfo *MBFI;
};

} // namespace llvm

#endif // LLVM_CODEGEN_MIRSAMPLEPROFILE_H
PKiwFZ
U��N�NCodeGen/LiveIntervals.hnu�[���//===- LiveIntervals.h - Live Interval Analysis -----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file This file implements the LiveInterval analysis pass.  Given some
/// numbering of each the machine instructions (in this implemention depth-first
/// order) an interval [i, j) is said to be a live interval for register v if
/// there is no instruction with number j' > j such that v is live at j' and
/// there is no instruction with number i' < i such that v is live at i'. In
/// this implementation intervals can have holes, i.e. an interval might look
/// like [1,20), [50,65), [1000,1001).
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_LIVEINTERVALS_H
#define LLVM_CODEGEN_LIVEINTERVALS_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/IndexedMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/LiveInterval.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/SlotIndexes.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
#include "llvm/MC/LaneBitmask.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include <cassert>
#include <cstdint>
#include <utility>

namespace llvm {

extern cl::opt<bool> UseSegmentSetForPhysRegs;

class BitVector;
class LiveIntervalCalc;
class MachineBlockFrequencyInfo;
class MachineDominatorTree;
class MachineFunction;
class MachineInstr;
class MachineRegisterInfo;
class raw_ostream;
class TargetInstrInfo;
class VirtRegMap;

  class LiveIntervals : public MachineFunctionPass {
    MachineFunction *MF = nullptr;
    MachineRegisterInfo *MRI = nullptr;
    const TargetRegisterInfo *TRI = nullptr;
    const TargetInstrInfo *TII = nullptr;
    SlotIndexes *Indexes = nullptr;
    MachineDominatorTree *DomTree = nullptr;
    LiveIntervalCalc *LICalc = nullptr;

    /// Special pool allocator for VNInfo's (LiveInterval val#).
    VNInfo::Allocator VNInfoAllocator;

    /// Live interval pointers for all the virtual registers.
    IndexedMap<LiveInterval*, VirtReg2IndexFunctor> VirtRegIntervals;

    /// Sorted list of instructions with register mask operands. Always use the
    /// 'r' slot, RegMasks are normal clobbers, not early clobbers.
    SmallVector<SlotIndex, 8> RegMaskSlots;

    /// This vector is parallel to RegMaskSlots, it holds a pointer to the
    /// corresponding register mask.  This pointer can be recomputed as:
    ///
    ///   MI = Indexes->getInstructionFromIndex(RegMaskSlot[N]);
    ///   unsigned OpNum = findRegMaskOperand(MI);
    ///   RegMaskBits[N] = MI->getOperand(OpNum).getRegMask();
    ///
    /// This is kept in a separate vector partly because some standard
    /// libraries don't support lower_bound() with mixed objects, partly to
    /// improve locality when searching in RegMaskSlots.
    /// Also see the comment in LiveInterval::find().
    SmallVector<const uint32_t*, 8> RegMaskBits;

    /// For each basic block number, keep (begin, size) pairs indexing into the
    /// RegMaskSlots and RegMaskBits arrays.
    /// Note that basic block numbers may not be layout contiguous, that's why
    /// we can't just keep track of the first register mask in each basic
    /// block.
    SmallVector<std::pair<unsigned, unsigned>, 8> RegMaskBlocks;

    /// Keeps a live range set for each register unit to track fixed physreg
    /// interference.
    SmallVector<LiveRange*, 0> RegUnitRanges;

  public:
    static char ID;

    LiveIntervals();
    ~LiveIntervals() override;

    /// Calculate the spill weight to assign to a single instruction.
    static float getSpillWeight(bool isDef, bool isUse,
                                const MachineBlockFrequencyInfo *MBFI,
                                const MachineInstr &MI);

    /// Calculate the spill weight to assign to a single instruction.
    static float getSpillWeight(bool isDef, bool isUse,
                                const MachineBlockFrequencyInfo *MBFI,
                                const MachineBasicBlock *MBB);

    LiveInterval &getInterval(Register Reg) {
      if (hasInterval(Reg))
        return *VirtRegIntervals[Reg.id()];

      return createAndComputeVirtRegInterval(Reg);
    }

    const LiveInterval &getInterval(Register Reg) const {
      return const_cast<LiveIntervals*>(this)->getInterval(Reg);
    }

    bool hasInterval(Register Reg) const {
      return VirtRegIntervals.inBounds(Reg.id()) &&
             VirtRegIntervals[Reg.id()];
    }

    /// Interval creation.
    LiveInterval &createEmptyInterval(Register Reg) {
      assert(!hasInterval(Reg) && "Interval already exists!");
      VirtRegIntervals.grow(Reg.id());
      VirtRegIntervals[Reg.id()] = createInterval(Reg);
      return *VirtRegIntervals[Reg.id()];
    }

    LiveInterval &createAndComputeVirtRegInterval(Register Reg) {
      LiveInterval &LI = createEmptyInterval(Reg);
      computeVirtRegInterval(LI);
      return LI;
    }

    /// Interval removal.
    void removeInterval(Register Reg) {
      delete VirtRegIntervals[Reg];
      VirtRegIntervals[Reg] = nullptr;
    }

    /// Given a register and an instruction, adds a live segment from that
    /// instruction to the end of its MBB.
    LiveInterval::Segment addSegmentToEndOfBlock(Register Reg,
                                                 MachineInstr &startInst);

    /// After removing some uses of a register, shrink its live range to just
    /// the remaining uses. This method does not compute reaching defs for new
    /// uses, and it doesn't remove dead defs.
    /// Dead PHIDef values are marked as unused. New dead machine instructions
    /// are added to the dead vector. Returns true if the interval may have been
    /// separated into multiple connected components.
    bool shrinkToUses(LiveInterval *li,
                      SmallVectorImpl<MachineInstr*> *dead = nullptr);

    /// Specialized version of
    /// shrinkToUses(LiveInterval *li, SmallVectorImpl<MachineInstr*> *dead)
    /// that works on a subregister live range and only looks at uses matching
    /// the lane mask of the subregister range.
    /// This may leave the subrange empty which needs to be cleaned up with
    /// LiveInterval::removeEmptySubranges() afterwards.
    void shrinkToUses(LiveInterval::SubRange &SR, Register Reg);

    /// Extend the live range \p LR to reach all points in \p Indices. The
    /// points in the \p Indices array must be jointly dominated by the union
    /// of the existing defs in \p LR and points in \p Undefs.
    ///
    /// PHI-defs are added as needed to maintain SSA form.
    ///
    /// If a SlotIndex in \p Indices is the end index of a basic block, \p LR
    /// will be extended to be live out of the basic block.
    /// If a SlotIndex in \p Indices is jointy dominated only by points in
    /// \p Undefs, the live range will not be extended to that point.
    ///
    /// See also LiveRangeCalc::extend().
    void extendToIndices(LiveRange &LR, ArrayRef<SlotIndex> Indices,
                         ArrayRef<SlotIndex> Undefs);

    void extendToIndices(LiveRange &LR, ArrayRef<SlotIndex> Indices) {
      extendToIndices(LR, Indices, /*Undefs=*/{});
    }

    /// If \p LR has a live value at \p Kill, prune its live range by removing
    /// any liveness reachable from Kill. Add live range end points to
    /// EndPoints such that extendToIndices(LI, EndPoints) will reconstruct the
    /// value's live range.
    ///
    /// Calling pruneValue() and extendToIndices() can be used to reconstruct
    /// SSA form after adding defs to a virtual register.
    void pruneValue(LiveRange &LR, SlotIndex Kill,
                    SmallVectorImpl<SlotIndex> *EndPoints);

    /// This function should not be used. Its intent is to tell you that you are
    /// doing something wrong if you call pruneValue directly on a
    /// LiveInterval. Indeed, you are supposed to call pruneValue on the main
    /// LiveRange and all the LiveRanges of the subranges if any.
    LLVM_ATTRIBUTE_UNUSED void pruneValue(LiveInterval &, SlotIndex,
                                          SmallVectorImpl<SlotIndex> *) {
      llvm_unreachable(
          "Use pruneValue on the main LiveRange and on each subrange");
    }

    SlotIndexes *getSlotIndexes() const {
      return Indexes;
    }

    /// Returns true if the specified machine instr has been removed or was
    /// never entered in the map.
    bool isNotInMIMap(const MachineInstr &Instr) const {
      return !Indexes->hasIndex(Instr);
    }

    /// Returns the base index of the given instruction.
    SlotIndex getInstructionIndex(const MachineInstr &Instr) const {
      return Indexes->getInstructionIndex(Instr);
    }

    /// Returns the instruction associated with the given index.
    MachineInstr* getInstructionFromIndex(SlotIndex index) const {
      return Indexes->getInstructionFromIndex(index);
    }

    /// Return the first index in the given basic block.
    SlotIndex getMBBStartIdx(const MachineBasicBlock *mbb) const {
      return Indexes->getMBBStartIdx(mbb);
    }

    /// Return the last index in the given basic block.
    SlotIndex getMBBEndIdx(const MachineBasicBlock *mbb) const {
      return Indexes->getMBBEndIdx(mbb);
    }

    bool isLiveInToMBB(const LiveRange &LR,
                       const MachineBasicBlock *mbb) const {
      return LR.liveAt(getMBBStartIdx(mbb));
    }

    bool isLiveOutOfMBB(const LiveRange &LR,
                        const MachineBasicBlock *mbb) const {
      return LR.liveAt(getMBBEndIdx(mbb).getPrevSlot());
    }

    MachineBasicBlock* getMBBFromIndex(SlotIndex index) const {
      return Indexes->getMBBFromIndex(index);
    }

    void insertMBBInMaps(MachineBasicBlock *MBB) {
      Indexes->insertMBBInMaps(MBB);
      assert(unsigned(MBB->getNumber()) == RegMaskBlocks.size() &&
             "Blocks must be added in order.");
      RegMaskBlocks.push_back(std::make_pair(RegMaskSlots.size(), 0));
    }

    SlotIndex InsertMachineInstrInMaps(MachineInstr &MI) {
      return Indexes->insertMachineInstrInMaps(MI);
    }

    void InsertMachineInstrRangeInMaps(MachineBasicBlock::iterator B,
                                       MachineBasicBlock::iterator E) {
      for (MachineBasicBlock::iterator I = B; I != E; ++I)
        Indexes->insertMachineInstrInMaps(*I);
    }

    void RemoveMachineInstrFromMaps(MachineInstr &MI) {
      Indexes->removeMachineInstrFromMaps(MI);
    }

    SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI) {
      return Indexes->replaceMachineInstrInMaps(MI, NewMI);
    }

    VNInfo::Allocator& getVNInfoAllocator() { return VNInfoAllocator; }

    void getAnalysisUsage(AnalysisUsage &AU) const override;
    void releaseMemory() override;

    /// Pass entry point; Calculates LiveIntervals.
    bool runOnMachineFunction(MachineFunction&) override;

    /// Implement the dump method.
    void print(raw_ostream &O, const Module* = nullptr) const override;

    /// If LI is confined to a single basic block, return a pointer to that
    /// block.  If LI is live in to or out of any block, return NULL.
    MachineBasicBlock *intervalIsInOneMBB(const LiveInterval &LI) const;

    /// Returns true if VNI is killed by any PHI-def values in LI.
    /// This may conservatively return true to avoid expensive computations.
    bool hasPHIKill(const LiveInterval &LI, const VNInfo *VNI) const;

    /// Add kill flags to any instruction that kills a virtual register.
    void addKillFlags(const VirtRegMap*);

    /// Call this method to notify LiveIntervals that instruction \p MI has been
    /// moved within a basic block. This will update the live intervals for all
    /// operands of \p MI. Moves between basic blocks are not supported.
    ///
    /// \param UpdateFlags Update live intervals for nonallocatable physregs.
    void handleMove(MachineInstr &MI, bool UpdateFlags = false);

    /// Update intervals of operands of all instructions in the newly
    /// created bundle specified by \p BundleStart.
    ///
    /// \param UpdateFlags Update live intervals for nonallocatable physregs.
    ///
    /// Assumes existing liveness is accurate.
    /// \pre BundleStart should be the first instruction in the Bundle.
    /// \pre BundleStart should not have a have SlotIndex as one will be assigned.
    void handleMoveIntoNewBundle(MachineInstr &BundleStart,
                                 bool UpdateFlags = false);

    /// Update live intervals for instructions in a range of iterators. It is
    /// intended for use after target hooks that may insert or remove
    /// instructions, and is only efficient for a small number of instructions.
    ///
    /// OrigRegs is a vector of registers that were originally used by the
    /// instructions in the range between the two iterators.
    ///
    /// Currently, the only only changes that are supported are simple removal
    /// and addition of uses.
    void repairIntervalsInRange(MachineBasicBlock *MBB,
                                MachineBasicBlock::iterator Begin,
                                MachineBasicBlock::iterator End,
                                ArrayRef<Register> OrigRegs);

    // Register mask functions.
    //
    // Machine instructions may use a register mask operand to indicate that a
    // large number of registers are clobbered by the instruction.  This is
    // typically used for calls.
    //
    // For compile time performance reasons, these clobbers are not recorded in
    // the live intervals for individual physical registers.  Instead,
    // LiveIntervalAnalysis maintains a sorted list of instructions with
    // register mask operands.

    /// Returns a sorted array of slot indices of all instructions with
    /// register mask operands.
    ArrayRef<SlotIndex> getRegMaskSlots() const { return RegMaskSlots; }

    /// Returns a sorted array of slot indices of all instructions with register
    /// mask operands in the basic block numbered \p MBBNum.
    ArrayRef<SlotIndex> getRegMaskSlotsInBlock(unsigned MBBNum) const {
      std::pair<unsigned, unsigned> P = RegMaskBlocks[MBBNum];
      return getRegMaskSlots().slice(P.first, P.second);
    }

    /// Returns an array of register mask pointers corresponding to
    /// getRegMaskSlots().
    ArrayRef<const uint32_t*> getRegMaskBits() const { return RegMaskBits; }

    /// Returns an array of mask pointers corresponding to
    /// getRegMaskSlotsInBlock(MBBNum).
    ArrayRef<const uint32_t*> getRegMaskBitsInBlock(unsigned MBBNum) const {
      std::pair<unsigned, unsigned> P = RegMaskBlocks[MBBNum];
      return getRegMaskBits().slice(P.first, P.second);
    }

    /// Test if \p LI is live across any register mask instructions, and
    /// compute a bit mask of physical registers that are not clobbered by any
    /// of them.
    ///
    /// Returns false if \p LI doesn't cross any register mask instructions. In
    /// that case, the bit vector is not filled in.
    bool checkRegMaskInterference(const LiveInterval &LI,
                                  BitVector &UsableRegs);

    // Register unit functions.
    //
    // Fixed interference occurs when MachineInstrs use physregs directly
    // instead of virtual registers. This typically happens when passing
    // arguments to a function call, or when instructions require operands in
    // fixed registers.
    //
    // Each physreg has one or more register units, see MCRegisterInfo. We
    // track liveness per register unit to handle aliasing registers more
    // efficiently.

    /// Return the live range for register unit \p Unit. It will be computed if
    /// it doesn't exist.
    LiveRange &getRegUnit(unsigned Unit) {
      LiveRange *LR = RegUnitRanges[Unit];
      if (!LR) {
        // Compute missing ranges on demand.
        // Use segment set to speed-up initial computation of the live range.
        RegUnitRanges[Unit] = LR = new LiveRange(UseSegmentSetForPhysRegs);
        computeRegUnitRange(*LR, Unit);
      }
      return *LR;
    }

    /// Return the live range for register unit \p Unit if it has already been
    /// computed, or nullptr if it hasn't been computed yet.
    LiveRange *getCachedRegUnit(unsigned Unit) {
      return RegUnitRanges[Unit];
    }

    const LiveRange *getCachedRegUnit(unsigned Unit) const {
      return RegUnitRanges[Unit];
    }

    /// Remove computed live range for register unit \p Unit. Subsequent uses
    /// should rely on on-demand recomputation.
    void removeRegUnit(unsigned Unit) {
      delete RegUnitRanges[Unit];
      RegUnitRanges[Unit] = nullptr;
    }

    /// Remove associated live ranges for the register units associated with \p
    /// Reg. Subsequent uses should rely on on-demand recomputation.  \note This
    /// method can result in inconsistent liveness tracking if multiple phyical
    /// registers share a regunit, and should be used cautiously.
    void removeAllRegUnitsForPhysReg(MCRegister Reg) {
      for (MCRegUnit Unit : TRI->regunits(Reg))
        removeRegUnit(Unit);
    }

    /// Remove value numbers and related live segments starting at position
    /// \p Pos that are part of any liverange of physical register \p Reg or one
    /// of its subregisters.
    void removePhysRegDefAt(MCRegister Reg, SlotIndex Pos);

    /// Remove value number and related live segments of \p LI and its subranges
    /// that start at position \p Pos.
    void removeVRegDefAt(LiveInterval &LI, SlotIndex Pos);

    /// Split separate components in LiveInterval \p LI into separate intervals.
    void splitSeparateComponents(LiveInterval &LI,
                                 SmallVectorImpl<LiveInterval*> &SplitLIs);

    /// For live interval \p LI with correct SubRanges construct matching
    /// information for the main live range. Expects the main live range to not
    /// have any segments or value numbers.
    void constructMainRangeFromSubranges(LiveInterval &LI);

  private:
    /// Compute live intervals for all virtual registers.
    void computeVirtRegs();

    /// Compute RegMaskSlots and RegMaskBits.
    void computeRegMasks();

    /// Walk the values in \p LI and check for dead values:
    /// - Dead PHIDef values are marked as unused.
    /// - Dead operands are marked as such.
    /// - Completely dead machine instructions are added to the \p dead vector
    ///   if it is not nullptr.
    /// Returns true if any PHI value numbers have been removed which may
    /// have separated the interval into multiple connected components.
    bool computeDeadValues(LiveInterval &LI,
                           SmallVectorImpl<MachineInstr*> *dead);

    static LiveInterval *createInterval(Register Reg);

    void printInstrs(raw_ostream &O) const;
    void dumpInstrs() const;

    void computeLiveInRegUnits();
    void computeRegUnitRange(LiveRange&, unsigned Unit);
    bool computeVirtRegInterval(LiveInterval&);

    using ShrinkToUsesWorkList = SmallVector<std::pair<SlotIndex, VNInfo*>, 16>;
    void extendSegmentsToUses(LiveRange &Segments,
                              ShrinkToUsesWorkList &WorkList, Register Reg,
                              LaneBitmask LaneMask);

    /// Helper function for repairIntervalsInRange(), walks backwards and
    /// creates/modifies live segments in \p LR to match the operands found.
    /// Only full operands or operands with subregisters matching \p LaneMask
    /// are considered.
    void repairOldRegInRange(MachineBasicBlock::iterator Begin,
                             MachineBasicBlock::iterator End,
                             const SlotIndex endIdx, LiveRange &LR,
                             Register Reg,
                             LaneBitmask LaneMask = LaneBitmask::getAll());

    class HMEditor;
  };

} // end namespace llvm

#endif
PKiwFZ��i��CodeGen/DwarfStringPoolEntry.hnu�[���//===- llvm/CodeGen/DwarfStringPoolEntry.h - String pool entry --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_DWARFSTRINGPOOLENTRY_H
#define LLVM_CODEGEN_DWARFSTRINGPOOLENTRY_H

#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/StringMap.h"

namespace llvm {

class MCSymbol;

/// Data for a string pool entry.
struct DwarfStringPoolEntry {
  static constexpr unsigned NotIndexed = -1;

  MCSymbol *Symbol = nullptr;
  uint64_t Offset = 0;
  unsigned Index = 0;

  bool isIndexed() const { return Index != NotIndexed; }
};

/// DwarfStringPoolEntryRef: Dwarf string pool entry reference.
///
/// Dwarf string pool entry keeps string value and its data.
/// There are two variants how data are represented:
///
///   1. By value - StringMapEntry<DwarfStringPoolEntry>.
///   2. By pointer - StringMapEntry<DwarfStringPoolEntry *>.
///
/// The "By pointer" variant allows for reducing memory usage for the case
/// when string pool entry does not have data: it keeps the null pointer
/// and so no need to waste space for the full DwarfStringPoolEntry.
/// It is recommended to use "By pointer" variant if not all entries
/// of dwarf string pool have corresponding DwarfStringPoolEntry.

class DwarfStringPoolEntryRef {
  /// Pointer type for "By value" string entry.
  using ByValStringEntryPtr = const StringMapEntry<DwarfStringPoolEntry> *;

  /// Pointer type for "By pointer" string entry.
  using ByPtrStringEntryPtr = const StringMapEntry<DwarfStringPoolEntry *> *;

  /// Pointer to the dwarf string pool Entry.
  PointerUnion<ByValStringEntryPtr, ByPtrStringEntryPtr> MapEntry = nullptr;

public:
  DwarfStringPoolEntryRef() = default;

  /// ASSUMPTION: DwarfStringPoolEntryRef keeps pointer to \p Entry,
  /// thus specified entry mustn`t be reallocated.
  DwarfStringPoolEntryRef(const StringMapEntry<DwarfStringPoolEntry> &Entry)
      : MapEntry(&Entry) {}

  /// ASSUMPTION: DwarfStringPoolEntryRef keeps pointer to \p Entry,
  /// thus specified entry mustn`t be reallocated.
  DwarfStringPoolEntryRef(const StringMapEntry<DwarfStringPoolEntry *> &Entry)
      : MapEntry(&Entry) {
    assert(cast<ByPtrStringEntryPtr>(MapEntry)->second != nullptr);
  }

  explicit operator bool() const { return !MapEntry.isNull(); }

  /// \returns symbol for the dwarf string.
  MCSymbol *getSymbol() const {
    assert(getEntry().Symbol && "No symbol available!");
    return getEntry().Symbol;
  }

  /// \returns offset for the dwarf string.
  uint64_t getOffset() const { return getEntry().Offset; }

  /// \returns index for the dwarf string.
  unsigned getIndex() const {
    assert(getEntry().isIndexed() && "Index is not set!");
    return getEntry().Index;
  }

  /// \returns string.
  StringRef getString() const {
    if (isa<ByValStringEntryPtr>(MapEntry))
      return cast<ByValStringEntryPtr>(MapEntry)->first();

    return cast<ByPtrStringEntryPtr>(MapEntry)->first();
  }

  /// \returns the entire string pool entry for convenience.
  const DwarfStringPoolEntry &getEntry() const {
    if (isa<ByValStringEntryPtr>(MapEntry))
      return cast<ByValStringEntryPtr>(MapEntry)->second;

    return *cast<ByPtrStringEntryPtr>(MapEntry)->second;
  }

  bool operator==(const DwarfStringPoolEntryRef &X) const {
    return MapEntry.getOpaqueValue() == X.MapEntry.getOpaqueValue();
  }

  bool operator!=(const DwarfStringPoolEntryRef &X) const {
    return MapEntry.getOpaqueValue() != X.MapEntry.getOpaqueValue();
  }
};

} // end namespace llvm

#endif
PKiwFZio�Ś�CodeGen/LiveIntervalCalc.hnu�[���//===- LiveIntervalCalc.h - Calculate live intervals -----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// The LiveIntervalCalc class is an extension of LiveRangeCalc targeted to the
// computation and modification of the LiveInterval variants of LiveRanges.
// LiveIntervals are meant to track liveness of registers and stack slots and
// LiveIntervalCalc adds to LiveRangeCalc all the machinery required to
// construct the liveness of virtual registers tracked by a LiveInterval.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_LIVEINTERVALCALC_H
#define LLVM_CODEGEN_LIVEINTERVALCALC_H

#include "llvm/CodeGen/LiveRangeCalc.h"

namespace llvm {

template <class NodeT> class DomTreeNodeBase;

using MachineDomTreeNode = DomTreeNodeBase<MachineBasicBlock>;

class LiveIntervalCalc : public LiveRangeCalc {
  /// Extend the live range of @p LR to reach all uses of Reg.
  ///
  /// If @p LR is a main range, or if @p LI is null, then all uses must be
  /// jointly dominated by the definitions from @p LR. If @p LR is a subrange
  /// of the live interval @p LI, corresponding to lane mask @p LaneMask,
  /// all uses must be jointly dominated by the definitions from @p LR
  /// together with definitions of other lanes where @p LR becomes undefined
  /// (via <def,read-undef> operands).
  /// If @p LR is a main range, the @p LaneMask should be set to ~0, i.e.
  /// LaneBitmask::getAll().
  void extendToUses(LiveRange &LR, Register Reg, LaneBitmask LaneMask,
                    LiveInterval *LI = nullptr);

public:
  LiveIntervalCalc() = default;

  /// createDeadDefs - Create a dead def in LI for every def operand of Reg.
  /// Each instruction defining Reg gets a new VNInfo with a corresponding
  /// minimal live range.
  void createDeadDefs(LiveRange &LR, Register Reg);

  /// Extend the live range of @p LR to reach all uses of Reg.
  ///
  /// All uses must be jointly dominated by existing liveness.  PHI-defs are
  /// inserted as needed to preserve SSA form.
  void extendToUses(LiveRange &LR, MCRegister PhysReg) {
    extendToUses(LR, PhysReg, LaneBitmask::getAll());
  }

  /// Calculates liveness for the register specified in live interval @p LI.
  /// Creates subregister live ranges as needed if subreg liveness tracking is
  /// enabled.
  void calculate(LiveInterval &LI, bool TrackSubRegs);

  /// For live interval \p LI with correct SubRanges construct matching
  /// information for the main live range. Expects the main live range to not
  /// have any segments or value numbers.
  void constructMainRangeFromSubranges(LiveInterval &LI);
};

} // end namespace llvm

#endif // LLVM_CODEGEN_LIVEINTERVALCALC_H
PKiwFZNɷ?�?�CodeGen/AsmPrinter.hnu�[���//===- llvm/CodeGen/AsmPrinter.h - AsmPrinter Framework ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains a class to be used as the base class for target specific
// asm writers.  This class primarily handles common functionality used by
// all asm writers.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_ASMPRINTER_H
#define LLVM_CODEGEN_ASMPRINTER_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/BinaryFormat/Dwarf.h"
#include "llvm/CodeGen/AsmPrinterHandler.h"
#include "llvm/CodeGen/DwarfStringPoolEntry.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/StackMaps.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/Support/ErrorHandling.h"
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>

namespace llvm {

class AddrLabelMap;
class BasicBlock;
class BlockAddress;
class Constant;
class ConstantArray;
class DataLayout;
class DIE;
class DIEAbbrev;
class DwarfDebug;
class GCMetadataPrinter;
class GCStrategy;
class GlobalAlias;
class GlobalObject;
class GlobalValue;
class GlobalVariable;
class MachineBasicBlock;
class MachineConstantPoolValue;
class MachineDominatorTree;
class MachineFunction;
class MachineInstr;
class MachineJumpTableInfo;
class MachineLoopInfo;
class MachineModuleInfo;
class MachineOptimizationRemarkEmitter;
class MCAsmInfo;
class MCCFIInstruction;
class MCContext;
class MCExpr;
class MCInst;
class MCSection;
class MCStreamer;
class MCSubtargetInfo;
class MCSymbol;
class MCTargetOptions;
class MDNode;
class Module;
class PseudoProbeHandler;
class raw_ostream;
class StringRef;
class TargetLoweringObjectFile;
class TargetMachine;
class Twine;

namespace remarks {
class RemarkStreamer;
}

/// This class is intended to be used as a driving class for all asm writers.
class AsmPrinter : public MachineFunctionPass {
public:
  /// Target machine description.
  TargetMachine &TM;

  /// Target Asm Printer information.
  const MCAsmInfo *MAI = nullptr;

  /// This is the context for the output file that we are streaming. This owns
  /// all of the global MC-related objects for the generated translation unit.
  MCContext &OutContext;

  /// This is the MCStreamer object for the file we are generating. This
  /// contains the transient state for the current translation unit that we are
  /// generating (such as the current section etc).
  std::unique_ptr<MCStreamer> OutStreamer;

  /// The current machine function.
  MachineFunction *MF = nullptr;

  /// This is a pointer to the current MachineModuleInfo.
  MachineModuleInfo *MMI = nullptr;

  /// This is a pointer to the current MachineDominatorTree.
  MachineDominatorTree *MDT = nullptr;

  /// This is a pointer to the current MachineLoopInfo.
  MachineLoopInfo *MLI = nullptr;

  /// Optimization remark emitter.
  MachineOptimizationRemarkEmitter *ORE = nullptr;

  /// The symbol for the entry in __patchable_function_entires.
  MCSymbol *CurrentPatchableFunctionEntrySym = nullptr;

  /// The symbol for the current function. This is recalculated at the beginning
  /// of each call to runOnMachineFunction().
  MCSymbol *CurrentFnSym = nullptr;

  /// The symbol for the current function descriptor on AIX. This is created
  /// at the beginning of each call to SetupMachineFunction().
  MCSymbol *CurrentFnDescSym = nullptr;

  /// The symbol used to represent the start of the current function for the
  /// purpose of calculating its size (e.g. using the .size directive). By
  /// default, this is equal to CurrentFnSym.
  MCSymbol *CurrentFnSymForSize = nullptr;

  /// Map a basic block section ID to the begin and end symbols of that section
  ///  which determine the section's range.
  struct MBBSectionRange {
    MCSymbol *BeginLabel, *EndLabel;
  };

  MapVector<unsigned, MBBSectionRange> MBBSectionRanges;

  /// Map global GOT equivalent MCSymbols to GlobalVariables and keep track of
  /// its number of uses by other globals.
  using GOTEquivUsePair = std::pair<const GlobalVariable *, unsigned>;
  MapVector<const MCSymbol *, GOTEquivUsePair> GlobalGOTEquivs;

  /// struct HandlerInfo and Handlers permit users or target extended
  /// AsmPrinter to add their own handlers.
  struct HandlerInfo {
    std::unique_ptr<AsmPrinterHandler> Handler;
    StringRef TimerName;
    StringRef TimerDescription;
    StringRef TimerGroupName;
    StringRef TimerGroupDescription;

    HandlerInfo(std::unique_ptr<AsmPrinterHandler> Handler, StringRef TimerName,
                StringRef TimerDescription, StringRef TimerGroupName,
                StringRef TimerGroupDescription)
        : Handler(std::move(Handler)), TimerName(TimerName),
          TimerDescription(TimerDescription), TimerGroupName(TimerGroupName),
          TimerGroupDescription(TimerGroupDescription) {}
  };

  // Flags representing which CFI section is required for a function/module.
  enum class CFISection : unsigned {
    None = 0, ///< Do not emit either .eh_frame or .debug_frame
    EH = 1,   ///< Emit .eh_frame
    Debug = 2 ///< Emit .debug_frame
  };

private:
  MCSymbol *CurrentFnEnd = nullptr;

  /// Map a basic block section ID to the exception symbol associated with that
  /// section. Map entries are assigned and looked up via
  /// AsmPrinter::getMBBExceptionSym.
  DenseMap<unsigned, MCSymbol *> MBBSectionExceptionSyms;

  // The symbol used to represent the start of the current BB section of the
  // function. This is used to calculate the size of the BB section.
  MCSymbol *CurrentSectionBeginSym = nullptr;

  /// This map keeps track of which symbol is being used for the specified basic
  /// block's address of label.
  std::unique_ptr<AddrLabelMap> AddrLabelSymbols;

  /// The garbage collection metadata printer table.
  DenseMap<GCStrategy *, std::unique_ptr<GCMetadataPrinter>> GCMetadataPrinters;

  /// Emit comments in assembly output if this is true.
  bool VerboseAsm;

  /// Output stream for the stack usage file (i.e., .su file).
  std::unique_ptr<raw_fd_ostream> StackUsageStream;

  /// List of symbols to be inserted into PC sections.
  DenseMap<const MDNode *, SmallVector<const MCSymbol *>> PCSectionsSymbols;

  static char ID;

protected:
  MCSymbol *CurrentFnBegin = nullptr;

  /// For dso_local functions, the current $local alias for the function.
  MCSymbol *CurrentFnBeginLocal = nullptr;

  /// A vector of all debug/EH info emitters we should use. This vector
  /// maintains ownership of the emitters.
  std::vector<HandlerInfo> Handlers;
  size_t NumUserHandlers = 0;

  StackMaps SM;

private:
  /// If generated on the fly this own the instance.
  std::unique_ptr<MachineDominatorTree> OwnedMDT;

  /// If generated on the fly this own the instance.
  std::unique_ptr<MachineLoopInfo> OwnedMLI;

  /// If the target supports dwarf debug info, this pointer is non-null.
  DwarfDebug *DD = nullptr;

  /// A handler that supports pseudo probe emission with embedded inline
  /// context.
  PseudoProbeHandler *PP = nullptr;

  /// CFISection type the module needs i.e. either .eh_frame or .debug_frame.
  CFISection ModuleCFISection = CFISection::None;

  /// True if the module contains split-stack functions. This is used to
  /// emit .note.GNU-split-stack section as required by the linker for
  /// special handling split-stack function calling no-split-stack function.
  bool HasSplitStack = false;

  /// True if the module contains no-split-stack functions. This is used to emit
  /// .note.GNU-no-split-stack section when it also contains functions without a
  /// split stack prologue.
  bool HasNoSplitStack = false;

  /// Raw FDOstream for outputting machine basic block frequncies if the
  /// --mbb-profile-dump flag is set for downstream cost modelling applications
  std::unique_ptr<raw_fd_ostream> MBBProfileDumpFileOutput;

protected:
  explicit AsmPrinter(TargetMachine &TM, std::unique_ptr<MCStreamer> Streamer);

public:
  ~AsmPrinter() override;

  DwarfDebug *getDwarfDebug() { return DD; }
  DwarfDebug *getDwarfDebug() const { return DD; }

  uint16_t getDwarfVersion() const;
  void setDwarfVersion(uint16_t Version);

  bool isDwarf64() const;

  /// Returns 4 for DWARF32 and 8 for DWARF64.
  unsigned int getDwarfOffsetByteSize() const;

  /// Returns 4 for DWARF32 and 12 for DWARF64.
  unsigned int getUnitLengthFieldByteSize() const;

  /// Returns information about the byte size of DW_FORM values.
  dwarf::FormParams getDwarfFormParams() const;

  bool isPositionIndependent() const;

  /// Return true if assembly output should contain comments.
  bool isVerbose() const { return VerboseAsm; }

  /// Return a unique ID for the current function.
  unsigned getFunctionNumber() const;

  /// Return symbol for the function pseudo stack if the stack frame is not a
  /// register based.
  virtual const MCSymbol *getFunctionFrameSymbol() const { return nullptr; }

  MCSymbol *getFunctionBegin() const { return CurrentFnBegin; }
  MCSymbol *getFunctionEnd() const { return CurrentFnEnd; }

  // Return the exception symbol associated with the MBB section containing a
  // given basic block.
  MCSymbol *getMBBExceptionSym(const MachineBasicBlock &MBB);

  /// Return the symbol to be used for the specified basic block when its
  /// address is taken.  This cannot be its normal LBB label because the block
  /// may be accessed outside its containing function.
  MCSymbol *getAddrLabelSymbol(const BasicBlock *BB) {
    return getAddrLabelSymbolToEmit(BB).front();
  }

  /// Return the symbol to be used for the specified basic block when its
  /// address is taken.  If other blocks were RAUW'd to this one, we may have
  /// to emit them as well, return the whole set.
  ArrayRef<MCSymbol *> getAddrLabelSymbolToEmit(const BasicBlock *BB);

  /// If the specified function has had any references to address-taken blocks
  /// generated, but the block got deleted, return the symbol now so we can
  /// emit it.  This prevents emitting a reference to a symbol that has no
  /// definition.
  void takeDeletedSymbolsForFunction(const Function *F,
                                     std::vector<MCSymbol *> &Result);

  /// Return information about object file lowering.
  const TargetLoweringObjectFile &getObjFileLowering() const;

  /// Return information about data layout.
  const DataLayout &getDataLayout() const;

  /// Return the pointer size from the TargetMachine
  unsigned getPointerSize() const;

  /// Return information about subtarget.
  const MCSubtargetInfo &getSubtargetInfo() const;

  void EmitToStreamer(MCStreamer &S, const MCInst &Inst);

  /// Emits inital debug location directive.
  void emitInitialRawDwarfLocDirective(const MachineFunction &MF);

  /// Return the current section we are emitting to.
  const MCSection *getCurrentSection() const;

  void getNameWithPrefix(SmallVectorImpl<char> &Name,
                         const GlobalValue *GV) const;

  MCSymbol *getSymbol(const GlobalValue *GV) const;

  /// Similar to getSymbol() but preferred for references. On ELF, this uses a
  /// local symbol if a reference to GV is guaranteed to be resolved to the
  /// definition in the same module.
  MCSymbol *getSymbolPreferLocal(const GlobalValue &GV) const;

  bool doesDwarfUseRelocationsAcrossSections() const {
    return DwarfUsesRelocationsAcrossSections;
  }

  void setDwarfUsesRelocationsAcrossSections(bool Enable) {
    DwarfUsesRelocationsAcrossSections = Enable;
  }

  //===------------------------------------------------------------------===//
  // XRay instrumentation implementation.
  //===------------------------------------------------------------------===//
public:
  // This describes the kind of sled we're storing in the XRay table.
  enum class SledKind : uint8_t {
    FUNCTION_ENTER = 0,
    FUNCTION_EXIT = 1,
    TAIL_CALL = 2,
    LOG_ARGS_ENTER = 3,
    CUSTOM_EVENT = 4,
    TYPED_EVENT = 5,
  };

  // The table will contain these structs that point to the sled, the function
  // containing the sled, and what kind of sled (and whether they should always
  // be instrumented). We also use a version identifier that the runtime can use
  // to decide what to do with the sled, depending on the version of the sled.
  struct XRayFunctionEntry {
    const MCSymbol *Sled;
    const MCSymbol *Function;
    SledKind Kind;
    bool AlwaysInstrument;
    const class Function *Fn;
    uint8_t Version;

    void emit(int, MCStreamer *) const;
  };

  // All the sleds to be emitted.
  SmallVector<XRayFunctionEntry, 4> Sleds;

  // Helper function to record a given XRay sled.
  void recordSled(MCSymbol *Sled, const MachineInstr &MI, SledKind Kind,
                  uint8_t Version = 0);

  /// Emit a table with all XRay instrumentation points.
  void emitXRayTable();

  void emitPatchableFunctionEntries();

  //===------------------------------------------------------------------===//
  // MachineFunctionPass Implementation.
  //===------------------------------------------------------------------===//

  /// Record analysis usage.
  void getAnalysisUsage(AnalysisUsage &AU) const override;

  /// Set up the AsmPrinter when we are working on a new module. If your pass
  /// overrides this, it must make sure to explicitly call this implementation.
  bool doInitialization(Module &M) override;

  /// Shut down the asmprinter. If you override this in your pass, you must make
  /// sure to call it explicitly.
  bool doFinalization(Module &M) override;

  /// Emit the specified function out to the OutStreamer.
  bool runOnMachineFunction(MachineFunction &MF) override {
    SetupMachineFunction(MF);
    emitFunctionBody();
    return false;
  }

  //===------------------------------------------------------------------===//
  // Coarse grained IR lowering routines.
  //===------------------------------------------------------------------===//

  /// This should be called when a new MachineFunction is being processed from
  /// runOnMachineFunction.
  virtual void SetupMachineFunction(MachineFunction &MF);

  /// This method emits the body and trailer for a function.
  void emitFunctionBody();

  void emitCFIInstruction(const MachineInstr &MI);

  void emitFrameAlloc(const MachineInstr &MI);

  void emitStackSizeSection(const MachineFunction &MF);

  void emitStackUsage(const MachineFunction &MF);

  void emitBBAddrMapSection(const MachineFunction &MF);

  void emitKCFITrapEntry(const MachineFunction &MF, const MCSymbol *Symbol);
  virtual void emitKCFITypeId(const MachineFunction &MF);

  void emitPseudoProbe(const MachineInstr &MI);

  void emitRemarksSection(remarks::RemarkStreamer &RS);

  /// Emits a label as reference for PC sections.
  void emitPCSectionsLabel(const MachineFunction &MF, const MDNode &MD);

  /// Emits the PC sections collected from instructions.
  void emitPCSections(const MachineFunction &MF);

  /// Get the CFISection type for a function.
  CFISection getFunctionCFISectionType(const Function &F) const;

  /// Get the CFISection type for a function.
  CFISection getFunctionCFISectionType(const MachineFunction &MF) const;

  /// Get the CFISection type for the module.
  CFISection getModuleCFISectionType() const { return ModuleCFISection; }

  bool needsSEHMoves();

  /// Since emitting CFI unwind information is entangled with supporting the
  /// exceptions, this returns true for platforms which use CFI unwind
  /// information for other purposes (debugging, sanitizers, ...) when
  /// `MCAsmInfo::ExceptionsType == ExceptionHandling::None`.
  bool usesCFIWithoutEH() const;

  /// Print to the current output stream assembly representations of the
  /// constants in the constant pool MCP. This is used to print out constants
  /// which have been "spilled to memory" by the code generator.
  virtual void emitConstantPool();

  /// Print assembly representations of the jump tables used by the current
  /// function to the current output stream.
  virtual void emitJumpTableInfo();

  /// Emit the specified global variable to the .s file.
  virtual void emitGlobalVariable(const GlobalVariable *GV);

  /// Check to see if the specified global is a special global used by LLVM. If
  /// so, emit it and return true, otherwise do nothing and return false.
  bool emitSpecialLLVMGlobal(const GlobalVariable *GV);

  /// `llvm.global_ctors` and `llvm.global_dtors` are arrays of Structor
  /// structs.
  ///
  /// Priority - init priority
  /// Func - global initialization or global clean-up function
  /// ComdatKey - associated data
  struct Structor {
    int Priority = 0;
    Constant *Func = nullptr;
    GlobalValue *ComdatKey = nullptr;

    Structor() = default;
  };

  /// This method gathers an array of Structors and then sorts them out by
  /// Priority.
  /// @param List The initializer of `llvm.global_ctors` or `llvm.global_dtors`
  /// array.
  /// @param[out] Structors Sorted Structor structs by Priority.
  void preprocessXXStructorList(const DataLayout &DL, const Constant *List,
                                SmallVector<Structor, 8> &Structors);

  /// This method emits `llvm.global_ctors` or `llvm.global_dtors` list.
  virtual void emitXXStructorList(const DataLayout &DL, const Constant *List,
                                  bool IsCtor);

  /// Emit an alignment directive to the specified power of two boundary. If a
  /// global value is specified, and if that global has an explicit alignment
  /// requested, it will override the alignment request if required for
  /// correctness.
  void emitAlignment(Align Alignment, const GlobalObject *GV = nullptr,
                     unsigned MaxBytesToEmit = 0) const;

  /// Lower the specified LLVM Constant to an MCExpr.
  virtual const MCExpr *lowerConstant(const Constant *CV);

  /// Print a general LLVM constant to the .s file.
  /// On AIX, when an alias refers to a sub-element of a global variable, the
  /// label of that alias needs to be emitted before the corresponding element.
  using AliasMapTy = DenseMap<uint64_t, SmallVector<const GlobalAlias *, 1>>;
  void emitGlobalConstant(const DataLayout &DL, const Constant *CV,
                          AliasMapTy *AliasList = nullptr);

  /// Unnamed constant global variables solely contaning a pointer to
  /// another globals variable act like a global variable "proxy", or GOT
  /// equivalents, i.e., it's only used to hold the address of the latter. One
  /// optimization is to replace accesses to these proxies by using the GOT
  /// entry for the final global instead. Hence, we select GOT equivalent
  /// candidates among all the module global variables, avoid emitting them
  /// unnecessarily and finally replace references to them by pc relative
  /// accesses to GOT entries.
  void computeGlobalGOTEquivs(Module &M);

  /// Constant expressions using GOT equivalent globals may not be
  /// eligible for PC relative GOT entry conversion, in such cases we need to
  /// emit the proxies we previously omitted in EmitGlobalVariable.
  void emitGlobalGOTEquivs();

  /// Emit the stack maps.
  void emitStackMaps();

  //===------------------------------------------------------------------===//
  // Overridable Hooks
  //===------------------------------------------------------------------===//

  void addAsmPrinterHandler(HandlerInfo Handler) {
    Handlers.insert(Handlers.begin(), std::move(Handler));
    NumUserHandlers++;
  }

  // Targets can, or in the case of EmitInstruction, must implement these to
  // customize output.

  /// This virtual method can be overridden by targets that want to emit
  /// something at the start of their file.
  virtual void emitStartOfAsmFile(Module &) {}

  /// This virtual method can be overridden by targets that want to emit
  /// something at the end of their file.
  virtual void emitEndOfAsmFile(Module &) {}

  /// Targets can override this to emit stuff before the first basic block in
  /// the function.
  virtual void emitFunctionBodyStart() {}

  /// Targets can override this to emit stuff after the last basic block in the
  /// function.
  virtual void emitFunctionBodyEnd() {}

  /// Targets can override this to emit stuff at the start of a basic block.
  /// By default, this method prints the label for the specified
  /// MachineBasicBlock, an alignment (if present) and a comment describing it
  /// if appropriate.
  virtual void emitBasicBlockStart(const MachineBasicBlock &MBB);

  /// Targets can override this to emit stuff at the end of a basic block.
  virtual void emitBasicBlockEnd(const MachineBasicBlock &MBB);

  /// Targets should implement this to emit instructions.
  virtual void emitInstruction(const MachineInstr *) {
    llvm_unreachable("EmitInstruction not implemented");
  }

  /// Return the symbol for the specified constant pool entry.
  virtual MCSymbol *GetCPISymbol(unsigned CPID) const;

  virtual void emitFunctionEntryLabel();

  virtual void emitFunctionDescriptor() {
    llvm_unreachable("Function descriptor is target-specific.");
  }

  virtual void emitMachineConstantPoolValue(MachineConstantPoolValue *MCPV);

  /// Targets can override this to change how global constants that are part of
  /// a C++ static/global constructor list are emitted.
  virtual void emitXXStructor(const DataLayout &DL, const Constant *CV) {
    emitGlobalConstant(DL, CV);
  }

  /// Return true if the basic block has exactly one predecessor and the control
  /// transfer mechanism between the predecessor and this block is a
  /// fall-through.
  virtual bool
  isBlockOnlyReachableByFallthrough(const MachineBasicBlock *MBB) const;

  /// Targets can override this to customize the output of IMPLICIT_DEF
  /// instructions in verbose mode.
  virtual void emitImplicitDef(const MachineInstr *MI) const;

  /// Emit N NOP instructions.
  void emitNops(unsigned N);

  //===------------------------------------------------------------------===//
  // Symbol Lowering Routines.
  //===------------------------------------------------------------------===//

  MCSymbol *createTempSymbol(const Twine &Name) const;

  /// Return the MCSymbol for a private symbol with global value name as its
  /// base, with the specified suffix.
  MCSymbol *getSymbolWithGlobalValueBase(const GlobalValue *GV,
                                         StringRef Suffix) const;

  /// Return the MCSymbol for the specified ExternalSymbol.
  MCSymbol *GetExternalSymbolSymbol(StringRef Sym) const;

  /// Return the symbol for the specified jump table entry.
  MCSymbol *GetJTISymbol(unsigned JTID, bool isLinkerPrivate = false) const;

  /// Return the symbol for the specified jump table .set
  /// FIXME: privatize to AsmPrinter.
  MCSymbol *GetJTSetSymbol(unsigned UID, unsigned MBBID) const;

  /// Return the MCSymbol used to satisfy BlockAddress uses of the specified
  /// basic block.
  MCSymbol *GetBlockAddressSymbol(const BlockAddress *BA) const;
  MCSymbol *GetBlockAddressSymbol(const BasicBlock *BB) const;

  //===------------------------------------------------------------------===//
  // Emission Helper Routines.
  //===------------------------------------------------------------------===//

  /// This is just convenient handler for printing offsets.
  void printOffset(int64_t Offset, raw_ostream &OS) const;

  /// Emit a byte directive and value.
  void emitInt8(int Value) const;

  /// Emit a short directive and value.
  void emitInt16(int Value) const;

  /// Emit a long directive and value.
  void emitInt32(int Value) const;

  /// Emit a long long directive and value.
  void emitInt64(uint64_t Value) const;

  /// Emit the specified signed leb128 value.
  void emitSLEB128(int64_t Value, const char *Desc = nullptr) const;

  /// Emit the specified unsigned leb128 value.
  void emitULEB128(uint64_t Value, const char *Desc = nullptr,
                   unsigned PadTo = 0) const;

  /// Emit something like ".long Hi-Lo" where the size in bytes of the directive
  /// is specified by Size and Hi/Lo specify the labels.  This implicitly uses
  /// .set if it is available.
  void emitLabelDifference(const MCSymbol *Hi, const MCSymbol *Lo,
                           unsigned Size) const;

  /// Emit something like ".uleb128 Hi-Lo".
  void emitLabelDifferenceAsULEB128(const MCSymbol *Hi,
                                    const MCSymbol *Lo) const;

  /// Emit something like ".long Label+Offset" where the size in bytes of the
  /// directive is specified by Size and Label specifies the label.  This
  /// implicitly uses .set if it is available.
  void emitLabelPlusOffset(const MCSymbol *Label, uint64_t Offset,
                           unsigned Size, bool IsSectionRelative = false) const;

  /// Emit something like ".long Label" where the size in bytes of the directive
  /// is specified by Size and Label specifies the label.
  void emitLabelReference(const MCSymbol *Label, unsigned Size,
                          bool IsSectionRelative = false) const {
    emitLabelPlusOffset(Label, 0, Size, IsSectionRelative);
  }

  //===------------------------------------------------------------------===//
  // Dwarf Emission Helper Routines
  //===------------------------------------------------------------------===//

  /// Emit a .byte 42 directive that corresponds to an encoding.  If verbose
  /// assembly output is enabled, we output comments describing the encoding.
  /// Desc is a string saying what the encoding is specifying (e.g. "LSDA").
  void emitEncodingByte(unsigned Val, const char *Desc = nullptr) const;

  /// Return the size of the encoding in bytes.
  unsigned GetSizeOfEncodedValue(unsigned Encoding) const;

  /// Emit reference to a ttype global with a specified encoding.
  virtual void emitTTypeReference(const GlobalValue *GV, unsigned Encoding);

  /// Emit a reference to a symbol for use in dwarf. Different object formats
  /// represent this in different ways. Some use a relocation others encode
  /// the label offset in its section.
  void emitDwarfSymbolReference(const MCSymbol *Label,
                                bool ForceOffset = false) const;

  /// Emit the 4- or 8-byte offset of a string from the start of its section.
  ///
  /// When possible, emit a DwarfStringPool section offset without any
  /// relocations, and without using the symbol.  Otherwise, defers to \a
  /// emitDwarfSymbolReference().
  ///
  /// The length of the emitted value depends on the DWARF format.
  void emitDwarfStringOffset(DwarfStringPoolEntry S) const;

  /// Emit the 4-or 8-byte offset of a string from the start of its section.
  void emitDwarfStringOffset(DwarfStringPoolEntryRef S) const {
    emitDwarfStringOffset(S.getEntry());
  }

  /// Emit something like ".long Label + Offset" or ".quad Label + Offset"
  /// depending on the DWARF format.
  void emitDwarfOffset(const MCSymbol *Label, uint64_t Offset) const;

  /// Emit 32- or 64-bit value depending on the DWARF format.
  void emitDwarfLengthOrOffset(uint64_t Value) const;

  /// Emit a unit length field. The actual format, DWARF32 or DWARF64, is chosen
  /// according to the settings.
  void emitDwarfUnitLength(uint64_t Length, const Twine &Comment) const;

  /// Emit a unit length field. The actual format, DWARF32 or DWARF64, is chosen
  /// according to the settings.
  /// Return the end symbol generated inside, the caller needs to emit it.
  MCSymbol *emitDwarfUnitLength(const Twine &Prefix,
                                const Twine &Comment) const;

  /// Emit reference to a call site with a specified encoding
  void emitCallSiteOffset(const MCSymbol *Hi, const MCSymbol *Lo,
                          unsigned Encoding) const;
  /// Emit an integer value corresponding to the call site encoding
  void emitCallSiteValue(uint64_t Value, unsigned Encoding) const;

  /// Get the value for DW_AT_APPLE_isa. Zero if no isa encoding specified.
  virtual unsigned getISAEncoding() { return 0; }

  /// Emit the directive and value for debug thread local expression
  ///
  /// \p Value - The value to emit.
  /// \p Size - The size of the integer (in bytes) to emit.
  virtual void emitDebugValue(const MCExpr *Value, unsigned Size) const;

  //===------------------------------------------------------------------===//
  // Dwarf Lowering Routines
  //===------------------------------------------------------------------===//

  /// Emit frame instruction to describe the layout of the frame.
  void emitCFIInstruction(const MCCFIInstruction &Inst) const;

  /// Emit Dwarf abbreviation table.
  template <typename T> void emitDwarfAbbrevs(const T &Abbrevs) const {
    // For each abbreviation.
    for (const auto &Abbrev : Abbrevs)
      emitDwarfAbbrev(*Abbrev);

    // Mark end of abbreviations.
    emitULEB128(0, "EOM(3)");
  }

  void emitDwarfAbbrev(const DIEAbbrev &Abbrev) const;

  /// Recursively emit Dwarf DIE tree.
  void emitDwarfDIE(const DIE &Die) const;

  //===------------------------------------------------------------------===//
  // Inline Asm Support
  //===------------------------------------------------------------------===//

  // These are hooks that targets can override to implement inline asm
  // support.  These should probably be moved out of AsmPrinter someday.

  /// Print information related to the specified machine instr that is
  /// independent of the operand, and may be independent of the instr itself.
  /// This can be useful for portably encoding the comment character or other
  /// bits of target-specific knowledge into the asmstrings.  The syntax used is
  /// ${:comment}.  Targets can override this to add support for their own
  /// strange codes.
  virtual void PrintSpecial(const MachineInstr *MI, raw_ostream &OS,
                            StringRef Code) const;

  /// Print the MachineOperand as a symbol. Targets with complex handling of
  /// symbol references should override the base implementation.
  virtual void PrintSymbolOperand(const MachineOperand &MO, raw_ostream &OS);

  /// Print the specified operand of MI, an INLINEASM instruction, using the
  /// specified assembler variant.  Targets should override this to format as
  /// appropriate.  This method can return true if the operand is erroneous.
  virtual bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
                               const char *ExtraCode, raw_ostream &OS);

  /// Print the specified operand of MI, an INLINEASM instruction, using the
  /// specified assembler variant as an address. Targets should override this to
  /// format as appropriate.  This method can return true if the operand is
  /// erroneous.
  virtual bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo,
                                     const char *ExtraCode, raw_ostream &OS);

  /// Let the target do anything it needs to do before emitting inlineasm.
  /// \p StartInfo - the subtarget info before parsing inline asm
  virtual void emitInlineAsmStart() const;

  /// Let the target do anything it needs to do after emitting inlineasm.
  /// This callback can be used restore the original mode in case the
  /// inlineasm contains directives to switch modes.
  /// \p StartInfo - the original subtarget info before inline asm
  /// \p EndInfo   - the final subtarget info after parsing the inline asm,
  ///                or NULL if the value is unknown.
  virtual void emitInlineAsmEnd(const MCSubtargetInfo &StartInfo,
                                const MCSubtargetInfo *EndInfo) const;

  /// This emits visibility information about symbol, if this is supported by
  /// the target.
  void emitVisibility(MCSymbol *Sym, unsigned Visibility,
                      bool IsDefinition = true) const;

  /// This emits linkage information about \p GVSym based on \p GV, if this is
  /// supported by the target.
  virtual void emitLinkage(const GlobalValue *GV, MCSymbol *GVSym) const;

  /// Return the alignment for the specified \p GV.
  static Align getGVAlignment(const GlobalObject *GV, const DataLayout &DL,
                              Align InAlign = Align(1));

private:
  /// Private state for PrintSpecial()
  // Assign a unique ID to this machine instruction.
  mutable const MachineInstr *LastMI = nullptr;
  mutable unsigned LastFn = 0;
  mutable unsigned Counter = ~0U;

  bool DwarfUsesRelocationsAcrossSections = false;

  /// This method emits the header for the current function.
  virtual void emitFunctionHeader();

  /// This method emits a comment next to header for the current function.
  virtual void emitFunctionHeaderComment();

  /// Emit a blob of inline asm to the output streamer.
  void
  emitInlineAsm(StringRef Str, const MCSubtargetInfo &STI,
                const MCTargetOptions &MCOptions,
                const MDNode *LocMDNode = nullptr,
                InlineAsm::AsmDialect AsmDialect = InlineAsm::AD_ATT) const;

  /// This method formats and emits the specified machine instruction that is an
  /// inline asm.
  void emitInlineAsm(const MachineInstr *MI) const;

  /// Add inline assembly info to the diagnostics machinery, so we can
  /// emit file and position info. Returns SrcMgr memory buffer position.
  unsigned addInlineAsmDiagBuffer(StringRef AsmStr,
                                  const MDNode *LocMDNode) const;

  //===------------------------------------------------------------------===//
  // Internal Implementation Details
  //===------------------------------------------------------------------===//

  void emitJumpTableEntry(const MachineJumpTableInfo *MJTI,
                          const MachineBasicBlock *MBB, unsigned uid) const;
  void emitLLVMUsedList(const ConstantArray *InitList);
  /// Emit llvm.ident metadata in an '.ident' directive.
  void emitModuleIdents(Module &M);
  /// Emit bytes for llvm.commandline metadata.
  virtual void emitModuleCommandLines(Module &M);

  GCMetadataPrinter *getOrCreateGCPrinter(GCStrategy &S);
  void emitGlobalAlias(Module &M, const GlobalAlias &GA);
  void emitGlobalIFunc(Module &M, const GlobalIFunc &GI);

  /// This method decides whether the specified basic block requires a label.
  bool shouldEmitLabelForBasicBlock(const MachineBasicBlock &MBB) const;

protected:
  virtual bool shouldEmitWeakSwiftAsyncExtendedFramePointerFlags() const {
    return false;
  }
};

} // end namespace llvm

#endif // LLVM_CODEGEN_ASMPRINTER_H
PKiwFZ���¯�CodeGen/MachineStableHash.hnu�[���//===------------ MachineStableHash.h - MIR Stable Hashing Utilities ------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Stable hashing for MachineInstr and MachineOperand. Useful or getting a
// hash across runs, modules, etc.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_MACHINESTABLEHASH_H
#define LLVM_CODEGEN_MACHINESTABLEHASH_H

#include "llvm/CodeGen/StableHashing.h"

namespace llvm {
class MachineBasicBlock;
class MachineFunction;
class MachineInstr;
class MachineOperand;

stable_hash stableHashValue(const MachineOperand &MO);
stable_hash stableHashValue(const MachineInstr &MI, bool HashVRegs = false,
                            bool HashConstantPoolIndices = false,
                            bool HashMemOperands = false);
stable_hash stableHashValue(const MachineBasicBlock &MBB);
stable_hash stableHashValue(const MachineFunction &MF);

} // namespace llvm

#endif
PKiwFZx��:""CodeGen/TargetSchedule.hnu�[���//===- llvm/CodeGen/TargetSchedule.h - Sched Machine Model ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines a wrapper around MCSchedModel that allows the interface to
// benefit from information currently only available in TargetInstrInfo.
// Ideally, the scheduling interface would be fully defined in the MC layer.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_TARGETSCHEDULE_H
#define LLVM_CODEGEN_TARGETSCHEDULE_H

#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/TargetSubtargetInfo.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/MC/MCInstrItineraries.h"
#include "llvm/MC/MCSchedule.h"

namespace llvm {

class MachineInstr;
class TargetInstrInfo;

/// Provide an instruction scheduling machine model to CodeGen passes.
class TargetSchedModel {
  // For efficiency, hold a copy of the statically defined MCSchedModel for this
  // processor.
  MCSchedModel SchedModel;
  InstrItineraryData InstrItins;
  const TargetSubtargetInfo *STI = nullptr;
  const TargetInstrInfo *TII = nullptr;

  SmallVector<unsigned, 16> ResourceFactors;

  // Multiply to normalize microops to resource units.
  unsigned MicroOpFactor = 0;

  // Resource units per cycle. Latency normalization factor.
  unsigned ResourceLCM = 0;

  unsigned computeInstrLatency(const MCSchedClassDesc &SCDesc) const;

public:
  TargetSchedModel() : SchedModel(MCSchedModel::GetDefaultSchedModel()) {}

  /// Initialize the machine model for instruction scheduling.
  ///
  /// The machine model API keeps a copy of the top-level MCSchedModel table
  /// indices and may query TargetSubtargetInfo and TargetInstrInfo to resolve
  /// dynamic properties.
  void init(const TargetSubtargetInfo *TSInfo);

  /// Return the MCSchedClassDesc for this instruction.
  const MCSchedClassDesc *resolveSchedClass(const MachineInstr *MI) const;

  /// TargetSubtargetInfo getter.
  const TargetSubtargetInfo *getSubtargetInfo() const { return STI; }

  /// TargetInstrInfo getter.
  const TargetInstrInfo *getInstrInfo() const { return TII; }

  /// Return true if this machine model includes an instruction-level
  /// scheduling model.
  ///
  /// This is more detailed than the course grain IssueWidth and default
  /// latency properties, but separate from the per-cycle itinerary data.
  bool hasInstrSchedModel() const;

  const MCSchedModel *getMCSchedModel() const { return &SchedModel; }

  /// Return true if this machine model includes cycle-to-cycle itinerary
  /// data.
  ///
  /// This models scheduling at each stage in the processor pipeline.
  bool hasInstrItineraries() const;

  const InstrItineraryData *getInstrItineraries() const {
    if (hasInstrItineraries())
      return &InstrItins;
    return nullptr;
  }

  /// Return true if this machine model includes an instruction-level
  /// scheduling model or cycle-to-cycle itinerary data.
  bool hasInstrSchedModelOrItineraries() const {
    return hasInstrSchedModel() || hasInstrItineraries();
  }
  bool enableIntervals() const { return SchedModel.EnableIntervals; }
  /// Identify the processor corresponding to the current subtarget.
  unsigned getProcessorID() const { return SchedModel.getProcessorID(); }

  /// Maximum number of micro-ops that may be scheduled per cycle.
  unsigned getIssueWidth() const { return SchedModel.IssueWidth; }

  /// Return true if new group must begin.
  bool mustBeginGroup(const MachineInstr *MI,
                          const MCSchedClassDesc *SC = nullptr) const;
  /// Return true if current group must end.
  bool mustEndGroup(const MachineInstr *MI,
                          const MCSchedClassDesc *SC = nullptr) const;

  /// Return the number of issue slots required for this MI.
  unsigned getNumMicroOps(const MachineInstr *MI,
                          const MCSchedClassDesc *SC = nullptr) const;

  /// Get the number of kinds of resources for this target.
  unsigned getNumProcResourceKinds() const {
    return SchedModel.getNumProcResourceKinds();
  }

  /// Get a processor resource by ID for convenience.
  const MCProcResourceDesc *getProcResource(unsigned PIdx) const {
    return SchedModel.getProcResource(PIdx);
  }

#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
  const char *getResourceName(unsigned PIdx) const {
    if (!PIdx)
      return "MOps";
    return SchedModel.getProcResource(PIdx)->Name;
  }
#endif

  using ProcResIter = const MCWriteProcResEntry *;

  // Get an iterator into the processor resources consumed by this
  // scheduling class.
  ProcResIter getWriteProcResBegin(const MCSchedClassDesc *SC) const {
    // The subtarget holds a single resource table for all processors.
    return STI->getWriteProcResBegin(SC);
  }
  ProcResIter getWriteProcResEnd(const MCSchedClassDesc *SC) const {
    return STI->getWriteProcResEnd(SC);
  }

  /// Multiply the number of units consumed for a resource by this factor
  /// to normalize it relative to other resources.
  unsigned getResourceFactor(unsigned ResIdx) const {
    return ResourceFactors[ResIdx];
  }

  /// Multiply number of micro-ops by this factor to normalize it
  /// relative to other resources.
  unsigned getMicroOpFactor() const {
    return MicroOpFactor;
  }

  /// Multiply cycle count by this factor to normalize it relative to
  /// other resources. This is the number of resource units per cycle.
  unsigned getLatencyFactor() const {
    return ResourceLCM;
  }

  /// Number of micro-ops that may be buffered for OOO execution.
  unsigned getMicroOpBufferSize() const { return SchedModel.MicroOpBufferSize; }

  /// Number of resource units that may be buffered for OOO execution.
  /// \return The buffer size in resource units or -1 for unlimited.
  int getResourceBufferSize(unsigned PIdx) const {
    return SchedModel.getProcResource(PIdx)->BufferSize;
  }

  /// Compute operand latency based on the available machine model.
  ///
  /// Compute and return the latency of the given data dependent def and use
  /// when the operand indices are already known. UseMI may be NULL for an
  /// unknown user.
  unsigned computeOperandLatency(const MachineInstr *DefMI, unsigned DefOperIdx,
                                 const MachineInstr *UseMI, unsigned UseOperIdx)
    const;

  /// Compute the instruction latency based on the available machine
  /// model.
  ///
  /// Compute and return the expected latency of this instruction independent of
  /// a particular use. computeOperandLatency is the preferred API, but this is
  /// occasionally useful to help estimate instruction cost.
  ///
  /// If UseDefaultDefLatency is false and no new machine sched model is
  /// present this method falls back to TII->getInstrLatency with an empty
  /// instruction itinerary (this is so we preserve the previous behavior of the
  /// if converter after moving it to TargetSchedModel).
  unsigned computeInstrLatency(const MachineInstr *MI,
                               bool UseDefaultDefLatency = true) const;
  unsigned computeInstrLatency(const MCInst &Inst) const;
  unsigned computeInstrLatency(unsigned Opcode) const;


  /// Output dependency latency of a pair of defs of the same register.
  ///
  /// This is typically one cycle.
  unsigned computeOutputLatency(const MachineInstr *DefMI, unsigned DefOperIdx,
                                const MachineInstr *DepMI) const;

  /// Compute the reciprocal throughput of the given instruction.
  double computeReciprocalThroughput(const MachineInstr *MI) const;
  double computeReciprocalThroughput(const MCInst &MI) const;
  double computeReciprocalThroughput(unsigned Opcode) const;
};

} // end namespace llvm

#endif // LLVM_CODEGEN_TARGETSCHEDULE_H
PKiwFZ����CodeGen/HardwareLoops.hnu�[���//===- HardwareLoops.h ------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// Defines an IR pass for the creation of hardware loops.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_HARDWARELOOPS_H
#define LLVM_CODEGEN_HARDWARELOOPS_H

#include "llvm/IR/PassManager.h"

namespace llvm {

struct HardwareLoopOptions {
  std::optional<unsigned> Decrement;
  std::optional<unsigned> Bitwidth;
  std::optional<bool> Force;
  std::optional<bool> ForcePhi;
  std::optional<bool> ForceNested;
  std::optional<bool> ForceGuard;

  HardwareLoopOptions &setDecrement(unsigned Count) {
    Decrement = Count;
    return *this;
  }
  HardwareLoopOptions &setCounterBitwidth(unsigned Width) {
    Bitwidth = Width;
    return *this;
  }
  HardwareLoopOptions &setForce(bool Force) {
    this->Force = Force;
    return *this;
  }
  HardwareLoopOptions &setForcePhi(bool Force) {
    ForcePhi = Force;
    return *this;
  }
  HardwareLoopOptions &setForceNested(bool Force) {
    ForceNested = Force;
    return *this;
  }
  HardwareLoopOptions &setForceGuard(bool Force) {
    ForceGuard = Force;
    return *this;
  }
  bool getForcePhi() const {
    return ForcePhi.has_value() && ForcePhi.value();
  }
  bool getForceNested() const {
    return ForceNested.has_value() && ForceNested.value();
  }
  bool getForceGuard() const {
    return ForceGuard.has_value() && ForceGuard.value();
  }
};

class HardwareLoopsPass : public PassInfoMixin<HardwareLoopsPass> {
  HardwareLoopOptions Opts;

public:
  explicit HardwareLoopsPass(HardwareLoopOptions Opts = {})
    : Opts(Opts) { }

  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

} // end namespace llvm

#endif // LLVM_CODEGEN_HARDWARELOOPS_H
PKiwFZ��o�5�5CodeGen/AccelTable.hnu�[���//==- include/llvm/CodeGen/AccelTable.h - Accelerator Tables -----*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// This file contains support for writing accelerator tables.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_ACCELTABLE_H
#define LLVM_CODEGEN_ACCELTABLE_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/STLFunctionalExtras.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/BinaryFormat/Dwarf.h"
#include "llvm/CodeGen/DIE.h"
#include "llvm/CodeGen/DwarfStringPoolEntry.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/DJB.h"
#include "llvm/Support/Debug.h"
#include <cstdint>
#include <vector>

/// \file
/// The DWARF and Apple accelerator tables are an indirect hash table optimized
/// for null lookup rather than access to known data. The Apple accelerator
/// tables are a precursor of the newer DWARF v5 accelerator tables. Both
/// formats share common design ideas.
///
/// The Apple accelerator table are output into an on-disk format that looks
/// like this:
///
/// .------------------.
/// |  HEADER          |
/// |------------------|
/// |  BUCKETS         |
/// |------------------|
/// |  HASHES          |
/// |------------------|
/// |  OFFSETS         |
/// |------------------|
/// |  DATA            |
/// `------------------'
///
/// The header contains a magic number, version, type of hash function,
/// the number of buckets, total number of hashes, and room for a special struct
/// of data and the length of that struct.
///
/// The buckets contain an index (e.g. 6) into the hashes array. The hashes
/// section contains all of the 32-bit hash values in contiguous memory, and the
/// offsets contain the offset into the data area for the particular hash.
///
/// For a lookup example, we could hash a function name and take it modulo the
/// number of buckets giving us our bucket. From there we take the bucket value
/// as an index into the hashes table and look at each successive hash as long
/// as the hash value is still the same modulo result (bucket value) as earlier.
/// If we have a match we look at that same entry in the offsets table and grab
/// the offset in the data for our final match.
///
/// The DWARF v5 accelerator table consists of zero or more name indices that
/// are output into an on-disk format that looks like this:
///
/// .------------------.
/// |  HEADER          |
/// |------------------|
/// |  CU LIST         |
/// |------------------|
/// |  LOCAL TU LIST   |
/// |------------------|
/// |  FOREIGN TU LIST |
/// |------------------|
/// |  HASH TABLE      |
/// |------------------|
/// |  NAME TABLE      |
/// |------------------|
/// |  ABBREV TABLE    |
/// |------------------|
/// |  ENTRY POOL      |
/// `------------------'
///
/// For the full documentation please refer to the DWARF 5 standard.
///
///
/// This file defines the class template AccelTable, which is represents an
/// abstract view of an Accelerator table, without any notion of an on-disk
/// layout. This class is parameterized by an entry type, which should derive
/// from AccelTableData. This is the type of individual entries in the table,
/// and it should store the data necessary to emit them. AppleAccelTableData is
/// the base class for Apple Accelerator Table entries, which have a uniform
/// structure based on a sequence of Atoms. There are different sub-classes
/// derived from AppleAccelTable, which differ in the set of Atoms and how they
/// obtain their values.
///
/// An Apple Accelerator Table can be serialized by calling emitAppleAccelTable
/// function.

namespace llvm {

class AsmPrinter;
class DwarfCompileUnit;
class DwarfDebug;
class MCSymbol;
class raw_ostream;

/// Interface which the different types of accelerator table data have to
/// conform. It serves as a base class for different values of the template
/// argument of the AccelTable class template.
class AccelTableData {
public:
  virtual ~AccelTableData() = default;

  bool operator<(const AccelTableData &Other) const {
    return order() < Other.order();
  }

    // Subclasses should implement:
    // static uint32_t hash(StringRef Name);

#ifndef NDEBUG
  virtual void print(raw_ostream &OS) const = 0;
#endif
protected:
  virtual uint64_t order() const = 0;
};

/// A base class holding non-template-dependant functionality of the AccelTable
/// class. Clients should not use this class directly but rather instantiate
/// AccelTable with a type derived from AccelTableData.
class AccelTableBase {
public:
  using HashFn = uint32_t(StringRef);

  /// Represents a group of entries with identical name (and hence, hash value).
  struct HashData {
    DwarfStringPoolEntryRef Name;
    uint32_t HashValue;
    std::vector<AccelTableData *> Values;
    MCSymbol *Sym;

#ifndef NDEBUG
    void print(raw_ostream &OS) const;
    void dump() const { print(dbgs()); }
#endif
  };
  using HashList = std::vector<HashData *>;
  using BucketList = std::vector<HashList>;

protected:
  /// Allocator for HashData and Values.
  BumpPtrAllocator Allocator;

  using StringEntries = MapVector<StringRef, HashData>;
  StringEntries Entries;

  HashFn *Hash;
  uint32_t BucketCount = 0;
  uint32_t UniqueHashCount = 0;

  HashList Hashes;
  BucketList Buckets;

  void computeBucketCount();

  AccelTableBase(HashFn *Hash) : Hash(Hash) {}

public:
  void finalize(AsmPrinter *Asm, StringRef Prefix);
  ArrayRef<HashList> getBuckets() const { return Buckets; }
  uint32_t getBucketCount() const { return BucketCount; }
  uint32_t getUniqueHashCount() const { return UniqueHashCount; }
  uint32_t getUniqueNameCount() const { return Entries.size(); }

#ifndef NDEBUG
  void print(raw_ostream &OS) const;
  void dump() const { print(dbgs()); }
#endif

  AccelTableBase(const AccelTableBase &) = delete;
  void operator=(const AccelTableBase &) = delete;
};

/// This class holds an abstract representation of an Accelerator Table,
/// consisting of a sequence of buckets, each bucket containint a sequence of
/// HashData entries. The class is parameterized by the type of entries it
/// holds. The type template parameter also defines the hash function to use for
/// hashing names.
template <typename DataT> class AccelTable : public AccelTableBase {
public:
  AccelTable() : AccelTableBase(DataT::hash) {}

  template <typename... Types>
  void addName(DwarfStringPoolEntryRef Name, Types &&... Args);
};

template <typename AccelTableDataT>
template <typename... Types>
void AccelTable<AccelTableDataT>::addName(DwarfStringPoolEntryRef Name,
                                          Types &&... Args) {
  assert(Buckets.empty() && "Already finalized!");
  // If the string is in the list already then add this die to the list
  // otherwise add a new one.
  auto &It = Entries[Name.getString()];
  if (It.Values.empty()) {
    It.Name = Name;
    It.HashValue = Hash(Name.getString());
  }
  It.Values.push_back(new (Allocator)
                          AccelTableDataT(std::forward<Types>(Args)...));
}

/// A base class for different implementations of Data classes for Apple
/// Accelerator Tables. The columns in the table are defined by the static Atoms
/// variable defined on the subclasses.
class AppleAccelTableData : public AccelTableData {
public:
  /// An Atom defines the form of the data in an Apple accelerator table.
  /// Conceptually it is a column in the accelerator consisting of a type and a
  /// specification of the form of its data.
  struct Atom {
    /// Atom Type.
    const uint16_t Type;
    /// DWARF Form.
    const uint16_t Form;

    constexpr Atom(uint16_t Type, uint16_t Form) : Type(Type), Form(Form) {}

#ifndef NDEBUG
    void print(raw_ostream &OS) const;
    void dump() const { print(dbgs()); }
#endif
  };
  // Subclasses should define:
  // static constexpr Atom Atoms[];

  virtual void emit(AsmPrinter *Asm) const = 0;

  static uint32_t hash(StringRef Buffer) { return djbHash(Buffer); }
};

/// The Data class implementation for DWARF v5 accelerator table. Unlike the
/// Apple Data classes, this class is just a DIE wrapper, and does not know to
/// serialize itself. The complete serialization logic is in the
/// emitDWARF5AccelTable function.
class DWARF5AccelTableData : public AccelTableData {
public:
  static uint32_t hash(StringRef Name) { return caseFoldingDjbHash(Name); }

  DWARF5AccelTableData(const DIE &Die) : Die(Die) {}

#ifndef NDEBUG
  void print(raw_ostream &OS) const override;
#endif

  const DIE &getDie() const { return Die; }
  uint64_t getDieOffset() const { return Die.getOffset(); }
  unsigned getDieTag() const { return Die.getTag(); }

protected:
  const DIE &Die;

  uint64_t order() const override { return Die.getOffset(); }
};

class DWARF5AccelTableStaticData : public AccelTableData {
public:
  static uint32_t hash(StringRef Name) { return caseFoldingDjbHash(Name); }

  DWARF5AccelTableStaticData(uint64_t DieOffset, unsigned DieTag,
                             unsigned CUIndex)
      : DieOffset(DieOffset), DieTag(DieTag), CUIndex(CUIndex) {}

#ifndef NDEBUG
  void print(raw_ostream &OS) const override;
#endif

  uint64_t getDieOffset() const { return DieOffset; }
  unsigned getDieTag() const { return DieTag; }
  unsigned getCUIndex() const { return CUIndex; }

protected:
  uint64_t DieOffset;
  unsigned DieTag;
  unsigned CUIndex;

  uint64_t order() const override { return DieOffset; }
};

void emitAppleAccelTableImpl(AsmPrinter *Asm, AccelTableBase &Contents,
                             StringRef Prefix, const MCSymbol *SecBegin,
                             ArrayRef<AppleAccelTableData::Atom> Atoms);

/// Emit an Apple Accelerator Table consisting of entries in the specified
/// AccelTable. The DataT template parameter should be derived from
/// AppleAccelTableData.
template <typename DataT>
void emitAppleAccelTable(AsmPrinter *Asm, AccelTable<DataT> &Contents,
                         StringRef Prefix, const MCSymbol *SecBegin) {
  static_assert(std::is_convertible<DataT *, AppleAccelTableData *>::value);
  emitAppleAccelTableImpl(Asm, Contents, Prefix, SecBegin, DataT::Atoms);
}

void emitDWARF5AccelTable(AsmPrinter *Asm,
                          AccelTable<DWARF5AccelTableData> &Contents,
                          const DwarfDebug &DD,
                          ArrayRef<std::unique_ptr<DwarfCompileUnit>> CUs);

void emitDWARF5AccelTable(
    AsmPrinter *Asm, AccelTable<DWARF5AccelTableStaticData> &Contents,
    ArrayRef<MCSymbol *> CUs,
    llvm::function_ref<unsigned(const DWARF5AccelTableStaticData &)>
        getCUIndexForEntry);

/// Accelerator table data implementation for simple Apple accelerator tables
/// with just a DIE reference.
class AppleAccelTableOffsetData : public AppleAccelTableData {
public:
  AppleAccelTableOffsetData(const DIE &D) : Die(D) {}

  void emit(AsmPrinter *Asm) const override;

  static constexpr Atom Atoms[] = {
      Atom(dwarf::DW_ATOM_die_offset, dwarf::DW_FORM_data4)};

#ifndef NDEBUG
  void print(raw_ostream &OS) const override;
#endif
protected:
  uint64_t order() const override { return Die.getOffset(); }

  const DIE &Die;
};

/// Accelerator table data implementation for Apple type accelerator tables.
class AppleAccelTableTypeData : public AppleAccelTableOffsetData {
public:
  AppleAccelTableTypeData(const DIE &D) : AppleAccelTableOffsetData(D) {}

  void emit(AsmPrinter *Asm) const override;

  static constexpr Atom Atoms[] = {
      Atom(dwarf::DW_ATOM_die_offset, dwarf::DW_FORM_data4),
      Atom(dwarf::DW_ATOM_die_tag, dwarf::DW_FORM_data2),
      Atom(dwarf::DW_ATOM_type_flags, dwarf::DW_FORM_data1)};

#ifndef NDEBUG
  void print(raw_ostream &OS) const override;
#endif
};

/// Accelerator table data implementation for simple Apple accelerator tables
/// with a DIE offset but no actual DIE pointer.
class AppleAccelTableStaticOffsetData : public AppleAccelTableData {
public:
  AppleAccelTableStaticOffsetData(uint32_t Offset) : Offset(Offset) {}

  void emit(AsmPrinter *Asm) const override;

  static constexpr Atom Atoms[] = {
      Atom(dwarf::DW_ATOM_die_offset, dwarf::DW_FORM_data4)};

#ifndef NDEBUG
  void print(raw_ostream &OS) const override;
#endif
protected:
  uint64_t order() const override { return Offset; }

  uint32_t Offset;
};

/// Accelerator table data implementation for type accelerator tables with
/// a DIE offset but no actual DIE pointer.
class AppleAccelTableStaticTypeData : public AppleAccelTableStaticOffsetData {
public:
  AppleAccelTableStaticTypeData(uint32_t Offset, uint16_t Tag,
                                bool ObjCClassIsImplementation,
                                uint32_t QualifiedNameHash)
      : AppleAccelTableStaticOffsetData(Offset),
        QualifiedNameHash(QualifiedNameHash), Tag(Tag),
        ObjCClassIsImplementation(ObjCClassIsImplementation) {}

  void emit(AsmPrinter *Asm) const override;

  static constexpr Atom Atoms[] = {
      Atom(dwarf::DW_ATOM_die_offset, dwarf::DW_FORM_data4),
      Atom(dwarf::DW_ATOM_die_tag, dwarf::DW_FORM_data2),
      Atom(5, dwarf::DW_FORM_data1), Atom(6, dwarf::DW_FORM_data4)};

#ifndef NDEBUG
  void print(raw_ostream &OS) const override;
#endif
protected:
  uint64_t order() const override { return Offset; }

  uint32_t QualifiedNameHash;
  uint16_t Tag;
  bool ObjCClassIsImplementation;
};

} // end namespace llvm

#endif // LLVM_CODEGEN_ACCELTABLE_H
PKiwFZw���-�-CodeGen/LiveRangeCalc.hnu�[���//===- LiveRangeCalc.h - Calculate live ranges -----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// The LiveRangeCalc class can be used to implement the computation of
// live ranges from scratch.
// It caches information about values in the CFG to speed up repeated
// operations on the same live range.  The cache can be shared by
// non-overlapping live ranges. SplitKit uses that when computing the live
// range of split products.
//
// A low-level interface is available to clients that know where a variable is
// live, but don't know which value it has as every point.  LiveRangeCalc will
// propagate values down the dominator tree, and even insert PHI-defs where
// needed. SplitKit uses this faster interface when possible.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_CODEGEN_LIVERANGECALC_H
#define LLVM_CODEGEN_LIVERANGECALC_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/IndexedMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/LiveInterval.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/SlotIndexes.h"
#include <utility>

namespace llvm {

template <class NodeT> class DomTreeNodeBase;
class MachineDominatorTree;
class MachineFunction;
class MachineRegisterInfo;

using MachineDomTreeNode = DomTreeNodeBase<MachineBasicBlock>;

class LiveRangeCalc {
  const MachineFunction *MF = nullptr;
  const MachineRegisterInfo *MRI = nullptr;
  SlotIndexes *Indexes = nullptr;
  MachineDominatorTree *DomTree = nullptr;
  VNInfo::Allocator *Alloc = nullptr;

  /// LiveOutPair - A value and the block that defined it.  The domtree node is
  /// redundant, it can be computed as: MDT[Indexes.getMBBFromIndex(VNI->def)].
  using LiveOutPair = std::pair<VNInfo *, MachineDomTreeNode *>;

  /// LiveOutMap - Map basic blocks to the value leaving the block.
  using LiveOutMap = IndexedMap<LiveOutPair, MBB2NumberFunctor>;

  /// Bit vector of active entries in LiveOut, also used as a visited set by
  /// findReachingDefs.  One entry per basic block, indexed by block number.
  /// This is kept as a separate bit vector because it can be cleared quickly
  /// when switching live ranges.
  BitVector Seen;

  /// Map LiveRange to sets of blocks (represented by bit vectors) that
  /// in the live range are defined on entry and undefined on entry.
  /// A block is defined on entry if there is a path from at least one of
  /// the defs in the live range to the entry of the block, and conversely,
  /// a block is undefined on entry, if there is no such path (i.e. no
  /// definition reaches the entry of the block). A single LiveRangeCalc
  /// object is used to track live-out information for multiple registers
  /// in live range splitting (which is ok, since the live ranges of these
  /// registers do not overlap), but the defined/undefined information must
  /// be kept separate for each individual range.
  /// By convention, EntryInfoMap[&LR] = { Defined, Undefined }.
  using EntryInfoMap = DenseMap<LiveRange *, std::pair<BitVector, BitVector>>;
  EntryInfoMap EntryInfos;

  /// Map each basic block where a live range is live out to the live-out value
  /// and its defining block.
  ///
  /// For every basic block, MBB, one of these conditions shall be true:
  ///
  ///  1. !Seen.count(MBB->getNumber())
  ///     Blocks without a Seen bit are ignored.
  ///  2. LiveOut[MBB].second.getNode() == MBB
  ///     The live-out value is defined in MBB.
  ///  3. forall P in preds(MBB): LiveOut[P] == LiveOut[MBB]
  ///     The live-out value passses through MBB. All predecessors must carry
  ///     the same value.
  ///
  /// The domtree node may be null, it can be computed.
  ///
  /// The map can be shared by multiple live ranges as long as no two are
  /// live-out of the same block.
  LiveOutMap Map;

  /// LiveInBlock - Information about a basic block where a live range is known
  /// to be live-in, but the value has not yet been determined.
  struct LiveInBlock {
    // The live range set that is live-in to this block.  The algorithms can
    // handle multiple non-overlapping live ranges simultaneously.
    LiveRange &LR;

    // DomNode - Dominator tree node for the block.
    // Cleared when the final value has been determined and LI has been updated.
    MachineDomTreeNode *DomNode;

    // Position in block where the live-in range ends, or SlotIndex() if the
    // range passes through the block.  When the final value has been
    // determined, the range from the block start to Kill will be added to LI.
    SlotIndex Kill;

    // Live-in value filled in by updateSSA once it is known.
    VNInfo *Value = nullptr;

    LiveInBlock(LiveRange &LR, MachineDomTreeNode *node, SlotIndex kill)
        : LR(LR), DomNode(node), Kill(kill) {}
  };

  /// LiveIn - Work list of blocks where the live-in value has yet to be
  /// determined.  This list is typically computed by findReachingDefs() and
  /// used as a work list by updateSSA().  The low-level interface may also be
  /// used to add entries directly.
  SmallVector<LiveInBlock, 16> LiveIn;

  /// Check if the entry to block @p MBB can be reached by any of the defs
  /// in @p LR. Return true if none of the defs reach the entry to @p MBB.
  bool isDefOnEntry(LiveRange &LR, ArrayRef<SlotIndex> Undefs,
                    MachineBasicBlock &MBB, BitVector &DefOnEntry,
                    BitVector &UndefOnEntry);

  /// Find the set of defs that can reach @p Kill. @p Kill must belong to
  /// @p UseMBB.
  ///
  /// If exactly one def can reach @p UseMBB, and the def dominates @p Kill,
  /// all paths from the def to @p UseMBB are added to @p LR, and the function
  /// returns true.
  ///
  /// If multiple values can reach @p UseMBB, the blocks that need @p LR to be
  /// live in are added to the LiveIn array, and the function returns false.
  ///
  /// The array @p Undef provides the locations where the range @p LR becomes
  /// undefined by <def,read-undef> operands on other subranges. If @p Undef
  /// is non-empty and @p Kill is jointly dominated only by the entries of
  /// @p Undef, the function returns false.
  ///
  /// PhysReg, when set, is used to verify live-in lists on basic blocks.
  bool findReachingDefs(LiveRange &LR, MachineBasicBlock &UseMBB, SlotIndex Use,
                        unsigned PhysReg, ArrayRef<SlotIndex> Undefs);

  /// updateSSA - Compute the values that will be live in to all requested
  /// blocks in LiveIn.  Create PHI-def values as required to preserve SSA form.
  ///
  /// Every live-in block must be jointly dominated by the added live-out
  /// blocks.  No values are read from the live ranges.
  void updateSSA();

  /// Transfer information from the LiveIn vector to the live ranges and update
  /// the given @p LiveOuts.
  void updateFromLiveIns();

protected:
  /// Some getters to expose in a read-only way some private fields to
  /// subclasses.
  const MachineFunction *getMachineFunction() { return MF; }
  const MachineRegisterInfo *getRegInfo() const { return MRI; }
  SlotIndexes *getIndexes() { return Indexes; }
  MachineDominatorTree *getDomTree() { return DomTree; }
  VNInfo::Allocator *getVNAlloc() { return Alloc; }

  /// Reset Map and Seen fields.
  void resetLiveOutMap();

public:
  LiveRangeCalc() = default;

  //===--------------------------------------------------------------------===//
  // High-level interface.
  //===--------------------------------------------------------------------===//
  //
  // Calculate live ranges from scratch.
  //

  /// reset - Prepare caches for a new set of non-overlapping live ranges.  The
  /// caches must be reset before attempting calculations with a live range
  /// that may overlap a previously computed live range, and before the first
  /// live range in a function.  If live ranges are not known to be
  /// non-overlapping, call reset before each.
  void reset(const MachineFunction *mf, SlotIndexes *SI,
             MachineDominatorTree *MDT, VNInfo::Allocator *VNIA);

  //===--------------------------------------------------------------------===//
  // Mid-level interface.
  //===--------------------------------------------------------------------===//
  //
  // Modify existing live ranges.
  //

  /// Extend the live range of @p LR to reach @p Use.
  ///
  /// The existing values in @p LR must be live so they jointly dominate @p Use.
  /// If @p Use is not dominated by a single existing value, PHI-defs are
  /// inserted as required to preserve SSA form.
  ///
  /// PhysReg, when set, is used to verify live-in lists on basic blocks.
  void extend(LiveRange &LR, SlotIndex Use, unsigned PhysReg,
              ArrayRef<SlotIndex> Undefs);

  //===--------------------------------------------------------------------===//
  // Low-level interface.
  //===--------------------------------------------------------------------===//
  //
  // These functions can be used to compute live ranges where the live-in and
  // live-out blocks are already known, but the SSA value in each block is
  // unknown.
  //
  // After calling reset(), add known live-out values and known live-in blocks.
  // Then call calculateValues() to compute the actual value that is
  // live-in to each block, and add liveness to the live ranges.
  //

  /// setLiveOutValue - Indicate that VNI is live out from MBB.  The
  /// calculateValues() function will not add liveness for MBB, the caller
  /// should take care of that.
  ///
  /// VNI may be null only if MBB is a live-through block also passed to
  /// addLiveInBlock().
  void setLiveOutValue(MachineBasicBlock *MBB, VNInfo *VNI) {
    Seen.set(MBB->getNumber());
    Map[MBB] = LiveOutPair(VNI, nullptr);
  }

  /// addLiveInBlock - Add a block with an unknown live-in value.  This
  /// function can only be called once per basic block.  Once the live-in value
  /// has been determined, calculateValues() will add liveness to LI.
  ///
  /// @param LR      The live range that is live-in to the block.
  /// @param DomNode The domtree node for the block.
  /// @param Kill    Index in block where LI is killed.  If the value is
  ///                live-through, set Kill = SLotIndex() and also call
  ///                setLiveOutValue(MBB, 0).
  void addLiveInBlock(LiveRange &LR, MachineDomTreeNode *DomNode,
                      SlotIndex Kill = SlotIndex()) {
    LiveIn.push_back(LiveInBlock(LR, DomNode, Kill));
  }

  /// calculateValues - Calculate the value that will be live-in to each block
  /// added with addLiveInBlock.  Add PHI-def values as needed to preserve SSA
  /// form.  Add liveness to all live-in blocks up to the Kill point, or the
  /// whole block for live-through blocks.
  ///
  /// Every predecessor of a live-in block must have been given a value with
  /// setLiveOutValue, the value may be null for live-trough blocks.
  void calculateValues();

  /// A diagnostic function to check if the end of the block @p MBB is
  /// jointly dominated by the blocks corresponding to the slot indices
  /// in @p Defs. This function is mainly for use in self-verification
  /// checks.
  LLVM_ATTRIBUTE_UNUSED
  static bool isJointlyDominated(const MachineBasicBlock *MBB,
                                 ArrayRef<SlotIndex> Defs,
                                 const SlotIndexes &Indexes);
};

} // end namespace llvm

#endif // LLVM_CODEGEN_LIVERANGECALC_H
PKiwFZp�����Remarks/BitstreamRemarkParser.hnu�[���//===-- BitstreamRemarkParser.h - Bitstream parser --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file provides an implementation of the remark parser using the LLVM
// Bitstream format.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_REMARKS_BITSTREAMREMARKPARSER_H
#define LLVM_REMARKS_BITSTREAMREMARKPARSER_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Bitstream/BitstreamReader.h"
#include "llvm/Support/Error.h"
#include <array>
#include <cstdint>
#include <optional>

namespace llvm {
namespace remarks {

/// Helper to parse a META_BLOCK for a bitstream remark container.
struct BitstreamMetaParserHelper {
  /// The Bitstream reader.
  BitstreamCursor &Stream;
  /// Reference to the storage for the block info.
  BitstreamBlockInfo &BlockInfo;
  /// The parsed content: depending on the container type, some fields might be
  /// empty.
  std::optional<uint64_t> ContainerVersion;
  std::optional<uint8_t> ContainerType;
  std::optional<StringRef> StrTabBuf;
  std::optional<StringRef> ExternalFilePath;
  std::optional<uint64_t> RemarkVersion;

  /// Continue parsing with \p Stream. \p Stream is expected to contain a
  /// ENTER_SUBBLOCK to the META_BLOCK at the current position.
  /// \p Stream is expected to have a BLOCKINFO_BLOCK set.
  BitstreamMetaParserHelper(BitstreamCursor &Stream,
                            BitstreamBlockInfo &BlockInfo);

  /// Parse the META_BLOCK and fill the available entries.
  /// This helper does not check for the validity of the fields.
  Error parse();
};

/// Helper to parse a REMARK_BLOCK for a bitstream remark container.
struct BitstreamRemarkParserHelper {
  /// The Bitstream reader.
  BitstreamCursor &Stream;
  /// The parsed content: depending on the remark, some fields might be empty.
  std::optional<uint8_t> Type;
  std::optional<uint64_t> RemarkNameIdx;
  std::optional<uint64_t> PassNameIdx;
  std::optional<uint64_t> FunctionNameIdx;
  std::optional<uint64_t> SourceFileNameIdx;
  std::optional<uint32_t> SourceLine;
  std::optional<uint32_t> SourceColumn;
  std::optional<uint64_t> Hotness;
  struct Argument {
    std::optional<uint64_t> KeyIdx;
    std::optional<uint64_t> ValueIdx;
    std::optional<uint64_t> SourceFileNameIdx;
    std::optional<uint32_t> SourceLine;
    std::optional<uint32_t> SourceColumn;
  };
  std::optional<ArrayRef<Argument>> Args;
  /// Avoid re-allocating a vector every time.
  SmallVector<Argument, 8> TmpArgs;

  /// Continue parsing with \p Stream. \p Stream is expected to contain a
  /// ENTER_SUBBLOCK to the REMARK_BLOCK at the current position.
  /// \p Stream is expected to have a BLOCKINFO_BLOCK set and to have already
  /// parsed the META_BLOCK.
  BitstreamRemarkParserHelper(BitstreamCursor &Stream);

  /// Parse the REMARK_BLOCK and fill the available entries.
  /// This helper does not check for the validity of the fields.
  Error parse();
};

/// Helper to parse any bitstream remark container.
struct BitstreamParserHelper {
  /// The Bitstream reader.
  BitstreamCursor Stream;
  /// The block info block.
  BitstreamBlockInfo BlockInfo;
  /// Start parsing at \p Buffer.
  BitstreamParserHelper(StringRef Buffer);
  /// Parse the magic number.
  Expected<std::array<char, 4>> parseMagic();
  /// Parse the block info block containing all the abbrevs.
  /// This needs to be called before calling any other parsing function.
  Error parseBlockInfoBlock();
  /// Return true if the next block is a META_BLOCK. This function does not move
  /// the cursor.
  Expected<bool> isMetaBlock();
  /// Return true if the next block is a REMARK_BLOCK. This function does not
  /// move the cursor.
  Expected<bool> isRemarkBlock();
  /// Return true if the parser reached the end of the stream.
  bool atEndOfStream() { return Stream.AtEndOfStream(); }
  /// Jump to the end of the stream, skipping everything.
  void skipToEnd() { return Stream.skipToEnd(); }
};

} // end namespace remarks
} // end namespace llvm

#endif // LLVM_REMARKS_BITSTREAMREMARKPARSER_H
PKiwFZE-�>>"Remarks/BitstreamRemarkContainer.hnu�[���//===-- BitstreamRemarkContainer.h - Container for remarks --------------*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file provides declarations for things used in the various types of
// remark containers.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_REMARKS_BITSTREAMREMARKCONTAINER_H
#define LLVM_REMARKS_BITSTREAMREMARKCONTAINER_H

#include "llvm/ADT/StringRef.h"
#include "llvm/Bitstream/BitCodes.h"
#include <cstdint>

namespace llvm {
namespace remarks {

/// The current version of the remark container.
/// Note: this is different from the version of the remark entry.
constexpr uint64_t CurrentContainerVersion = 0;
/// The magic number used for identifying remark blocks.
constexpr StringLiteral ContainerMagic("RMRK");

/// Type of the remark container.
/// The remark container has two modes:
/// * separate: the metadata is separate from the remarks and points to the
///             auxiliary file that contains the remarks.
/// * standalone: the metadata and the remarks are emitted together.
enum class BitstreamRemarkContainerType {
  /// The metadata emitted separately.
  /// This will contain the following:
  /// * Container version and type
  /// * String table
  /// * External file
  SeparateRemarksMeta,
  /// The remarks emitted separately.
  /// This will contain the following:
  /// * Container version and type
  /// * Remark version
  SeparateRemarksFile,
  /// Everything is emitted together.
  /// This will contain the following:
  /// * Container version and type
  /// * Remark version
  /// * String table
  Standalone,
  First = SeparateRemarksMeta,
  Last = Standalone,
};

/// The possible blocks that will be encountered in a bitstream remark
/// container.
enum BlockIDs {
  /// The metadata block is mandatory. It should always come after the
  /// BLOCKINFO_BLOCK, and contains metadata that should be used when parsing
  /// REMARK_BLOCKs.
  /// There should always be only one META_BLOCK.
  META_BLOCK_ID = bitc::FIRST_APPLICATION_BLOCKID,
  /// One remark entry is represented using a REMARK_BLOCK. There can be
  /// multiple REMARK_BLOCKs in the same file.
  REMARK_BLOCK_ID
};

constexpr StringRef MetaBlockName = StringRef("Meta", 4);
constexpr StringRef RemarkBlockName = StringRef("Remark", 6);

/// The possible records that can be encountered in the previously described
/// blocks.
enum RecordIDs {
  // Meta block records.
  RECORD_META_CONTAINER_INFO = 1,
  RECORD_META_REMARK_VERSION,
  RECORD_META_STRTAB,
  RECORD_META_EXTERNAL_FILE,
  // Remark block records.
  RECORD_REMARK_HEADER,
  RECORD_REMARK_DEBUG_LOC,
  RECORD_REMARK_HOTNESS,
  RECORD_REMARK_ARG_WITH_DEBUGLOC,
  RECORD_REMARK_ARG_WITHOUT_DEBUGLOC,
  // Helpers.
  RECORD_FIRST = RECORD_META_CONTAINER_INFO,
  RECORD_LAST = RECORD_REMARK_ARG_WITHOUT_DEBUGLOC
};

constexpr StringRef MetaContainerInfoName = StringRef("Container info", 14);
constexpr StringRef MetaRemarkVersionName = StringRef("Remark version", 14);
constexpr StringRef MetaStrTabName = StringRef("String table", 12);
constexpr StringRef MetaExternalFileName = StringRef("External File", 13);
constexpr StringRef RemarkHeaderName = StringRef("Remark header", 13);
constexpr StringRef RemarkDebugLocName = StringRef("Remark debug location", 21);
constexpr StringRef RemarkHotnessName = StringRef("Remark hotness", 14);
constexpr StringRef RemarkArgWithDebugLocName =
    StringRef("Argument with debug location", 28);
constexpr StringRef RemarkArgWithoutDebugLocName = StringRef("Argument", 8);

} // end namespace remarks
} // end namespace llvm

#endif // LLVM_REMARKS_BITSTREAMREMARKCONTAINER_H
PKiwFZ��Z��Remarks/RemarkFormat.hnu�[���//===-- llvm/Remarks/RemarkFormat.h - The format of remarks -----*- C++/-*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines utilities to deal with the format of remarks.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_REMARKS_REMARKFORMAT_H
#define LLVM_REMARKS_REMARKFORMAT_H

#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Error.h"

namespace llvm {
namespace remarks {

constexpr StringLiteral Magic("REMARKS");

/// The format used for serializing/deserializing remarks.
enum class Format { Unknown, YAML, YAMLStrTab, Bitstream };

/// Parse and validate a string for the remark format.
Expected<Format> parseFormat(StringRef FormatStr);

/// Parse and validate a magic number to a remark format.
Expected<Format> magicToFormat(StringRef Magic);

} // end namespace remarks
} // end namespace llvm

#endif // LLVM_REMARKS_REMARKFORMAT_H
PKiwFZuP�33Remarks/YAMLRemarkSerializer.hnu�[���//===-- YAMLRemarkSerializer.h - YAML Remark serialization ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file provides an interface for serializing remarks to YAML.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_REMARKS_YAMLREMARKSERIALIZER_H
#define LLVM_REMARKS_YAMLREMARKSERIALIZER_H

#include "llvm/Remarks/RemarkSerializer.h"
#include "llvm/Support/YAMLTraits.h"
#include <optional>

namespace llvm {
namespace remarks {

/// Serialize the remarks to YAML. One remark entry looks like this:
/// --- !<TYPE>
/// Pass:            <PASSNAME>
/// Name:            <REMARKNAME>
/// DebugLoc:        { File: <SOURCEFILENAME>, Line: <SOURCELINE>,
///                    Column: <SOURCECOLUMN> }
/// Function:        <FUNCTIONNAME>
/// Args:
///   - <KEY>: <VALUE>
///     DebugLoc:        { File: <FILE>, Line: <LINE>, Column: <COL> }
/// ...
struct YAMLRemarkSerializer : public RemarkSerializer {
  /// The YAML streamer.
  yaml::Output YAMLOutput;

  YAMLRemarkSerializer(raw_ostream &OS, SerializerMode Mode,
                       std::optional<StringTable> StrTab = std::nullopt);

  void emit(const Remark &Remark) override;
  std::unique_ptr<MetaSerializer> metaSerializer(
      raw_ostream &OS,
      std::optional<StringRef> ExternalFilename = std::nullopt) override;

  static bool classof(const RemarkSerializer *S) {
    return S->SerializerFormat == Format::YAML;
  }

protected:
  YAMLRemarkSerializer(Format SerializerFormat, raw_ostream &OS,
                       SerializerMode Mode,
                       std::optional<StringTable> StrTab = std::nullopt);
};

struct YAMLMetaSerializer : public MetaSerializer {
  std::optional<StringRef> ExternalFilename;

  YAMLMetaSerializer(raw_ostream &OS, std::optional<StringRef> ExternalFilename)
      : MetaSerializer(OS), ExternalFilename(ExternalFilename) {}

  void emit() override;
};

/// Serialize the remarks to YAML using a string table. An remark entry looks
/// like the regular YAML remark but instead of string entries it's using
/// numbers that map to an index in the string table.
struct YAMLStrTabRemarkSerializer : public YAMLRemarkSerializer {
  /// Wether we already emitted the metadata in standalone mode.
  /// This should be set to true after the first invocation of `emit`.
  bool DidEmitMeta = false;

  YAMLStrTabRemarkSerializer(raw_ostream &OS, SerializerMode Mode)
      : YAMLRemarkSerializer(Format::YAMLStrTab, OS, Mode) {
    // We always need a string table for this type of serializer.
    StrTab.emplace();
  }
  YAMLStrTabRemarkSerializer(raw_ostream &OS, SerializerMode Mode,
                             StringTable StrTab)
      : YAMLRemarkSerializer(Format::YAMLStrTab, OS, Mode, std::move(StrTab)) {}

  /// Override to emit the metadata if necessary.
  void emit(const Remark &Remark) override;

  std::unique_ptr<MetaSerializer> metaSerializer(
      raw_ostream &OS,
      std::optional<StringRef> ExternalFilename = std::nullopt) override;

  static bool classof(const RemarkSerializer *S) {
    return S->SerializerFormat == Format::YAMLStrTab;
  }
};

struct YAMLStrTabMetaSerializer : public YAMLMetaSerializer {
  /// The string table is part of the metadata.
  const StringTable &StrTab;

  YAMLStrTabMetaSerializer(raw_ostream &OS,
                           std::optional<StringRef> ExternalFilename,
                           const StringTable &StrTab)
      : YAMLMetaSerializer(OS, ExternalFilename), StrTab(StrTab) {}

  void emit() override;
};

} // end namespace remarks
} // end namespace llvm

#endif // LLVM_REMARKS_YAMLREMARKSERIALIZER_H
PKiwFZ�ص�G
G
Remarks/RemarkStringTable.hnu�[���//===-- RemarkStringTable.h - Serializing string table ----------*- C++/-*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This class is used to deduplicate and serialize a string table used for
// generating remarks.
//
// For parsing a string table, use ParsedStringTable in RemarkParser.h
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_REMARKS_REMARKSTRINGTABLE_H
#define LLVM_REMARKS_REMARKSTRINGTABLE_H

#include "llvm/ADT/StringMap.h"
#include "llvm/Support/Allocator.h"
#include <vector>

namespace llvm {

class raw_ostream;
class StringRef;

namespace remarks {

struct ParsedStringTable;
struct Remark;

/// The string table used for serializing remarks.
/// This table can be for example serialized in a section to be consumed after
/// the compilation.
struct StringTable {
  /// The string table containing all the unique strings used in the output.
  /// It maps a string to an unique ID.
  StringMap<unsigned, BumpPtrAllocator> StrTab;
  /// Total size of the string table when serialized.
  size_t SerializedSize = 0;

  StringTable() = default;

  /// Disable copy.
  StringTable(const StringTable &) = delete;
  StringTable &operator=(const StringTable &) = delete;
  /// Should be movable.
  StringTable(StringTable &&) = default;
  StringTable &operator=(StringTable &&) = default;

  /// Construct a string table from a ParsedStringTable.
  StringTable(const ParsedStringTable &Other);

  /// Add a string to the table. It returns an unique ID of the string.
  std::pair<unsigned, StringRef> add(StringRef Str);
  /// Modify \p R to use strings from this string table. If the string table
  /// does not contain the strings, it adds them.
  void internalize(Remark &R);
  /// Serialize the string table to a stream. It is serialized as a little
  /// endian uint64 (the size of the table in bytes) followed by a sequence of
  /// NULL-terminated strings, where the N-th string is the string with the ID N
  /// in the StrTab map.
  void serialize(raw_ostream &OS) const;
  /// Serialize the string table to a vector. This allows users to do the actual
  /// writing to file/memory/other.
  /// The string with the ID == N should be the N-th element in the vector.
  std::vector<StringRef> serialize() const;
};

} // end namespace remarks
} // end namespace llvm

#endif // LLVM_REMARKS_REMARKSTRINGTABLE_H
PKiwFZ
�/reeRemarks/RemarkParser.hnu�[���//===-- llvm/Remarks/Remark.h - The remark type -----------------*- C++/-*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file provides an interface for parsing remarks in LLVM.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_REMARKS_REMARKPARSER_H
#define LLVM_REMARKS_REMARKPARSER_H

#include "llvm/ADT/StringRef.h"
#include "llvm/Remarks/RemarkFormat.h"
#include "llvm/Support/Error.h"
#include <memory>
#include <optional>

namespace llvm {
namespace remarks {

struct Remark;

class EndOfFileError : public ErrorInfo<EndOfFileError> {
public:
  static char ID;

  EndOfFileError() = default;

  void log(raw_ostream &OS) const override { OS << "End of file reached."; }
  std::error_code convertToErrorCode() const override {
    return inconvertibleErrorCode();
  }
};

/// Parser used to parse a raw buffer to remarks::Remark objects.
struct RemarkParser {
  /// The format of the parser.
  Format ParserFormat;
  /// Path to prepend when opening an external remark file.
  std::string ExternalFilePrependPath;

  RemarkParser(Format ParserFormat) : ParserFormat(ParserFormat) {}

  /// If no error occurs, this returns a valid Remark object.
  /// If an error of type EndOfFileError occurs, it is safe to recover from it
  /// by stopping the parsing.
  /// If any other error occurs, it should be propagated to the user.
  /// The pointer should never be null.
  virtual Expected<std::unique_ptr<Remark>> next() = 0;

  virtual ~RemarkParser() = default;
};

/// In-memory representation of the string table parsed from a buffer (e.g. the
/// remarks section).
struct ParsedStringTable {
  /// The buffer mapped from the section contents.
  StringRef Buffer;
  /// This object has high changes to be std::move'd around, so don't use a
  /// SmallVector for once.
  std::vector<size_t> Offsets;

  ParsedStringTable(StringRef Buffer);
  /// Disable copy.
  ParsedStringTable(const ParsedStringTable &) = delete;
  ParsedStringTable &operator=(const ParsedStringTable &) = delete;
  /// Should be movable.
  ParsedStringTable(ParsedStringTable &&) = default;
  ParsedStringTable &operator=(ParsedStringTable &&) = default;

  size_t size() const { return Offsets.size(); }
  Expected<StringRef> operator[](size_t Index) const;
};

Expected<std::unique_ptr<RemarkParser>> createRemarkParser(Format ParserFormat,
                                                           StringRef Buf);

Expected<std::unique_ptr<RemarkParser>>
createRemarkParser(Format ParserFormat, StringRef Buf,
                   ParsedStringTable StrTab);

Expected<std::unique_ptr<RemarkParser>> createRemarkParserFromMeta(
    Format ParserFormat, StringRef Buf,
    std::optional<ParsedStringTable> StrTab = std::nullopt,
    std::optional<StringRef> ExternalFilePrependPath = std::nullopt);

} // end namespace remarks
} // end namespace llvm

#endif // LLVM_REMARKS_REMARKPARSER_H
PKiwFZ:h��Remarks/Remark.hnu�[���//===-- llvm/Remarks/Remark.h - The remark type -----------------*- C++/-*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines an abstraction for handling remarks.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_REMARKS_REMARK_H
#define LLVM_REMARKS_REMARK_H

#include "llvm-c/Remarks.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/CBindingWrapping.h"
#include "llvm/Support/raw_ostream.h"
#include <optional>
#include <string>

namespace llvm {
namespace remarks {

/// The current version of the remark entry.
constexpr uint64_t CurrentRemarkVersion = 0;

/// The debug location used to track a remark back to the source file.
struct RemarkLocation {
  /// Absolute path of the source file corresponding to this remark.
  StringRef SourceFilePath;
  unsigned SourceLine = 0;
  unsigned SourceColumn = 0;

  /// Implement operator<< on RemarkLocation.
  void print(raw_ostream &OS) const;
};

// Create wrappers for C Binding types (see CBindingWrapping.h).
DEFINE_SIMPLE_CONVERSION_FUNCTIONS(RemarkLocation, LLVMRemarkDebugLocRef)

/// A key-value pair with a debug location that is used to display the remarks
/// at the right place in the source.
struct Argument {
  StringRef Key;
  // FIXME: We might want to be able to store other types than strings here.
  StringRef Val;
  // If set, the debug location corresponding to the value.
  std::optional<RemarkLocation> Loc;

  /// Implement operator<< on Argument.
  void print(raw_ostream &OS) const;
};

// Create wrappers for C Binding types (see CBindingWrapping.h).
DEFINE_SIMPLE_CONVERSION_FUNCTIONS(Argument, LLVMRemarkArgRef)

/// The type of the remark.
enum class Type {
  Unknown,
  Passed,
  Missed,
  Analysis,
  AnalysisFPCommute,
  AnalysisAliasing,
  Failure,
  First = Unknown,
  Last = Failure
};

inline StringRef typeToStr(Type Ty) {
  switch (Ty) {
  case Type::Unknown:
    return "Unknown";
  case Type::Missed:
    return "Missed";
  case Type::Passed:
    return "Passed";
  case Type::Analysis:
    return "Analysis";
  case Type::AnalysisFPCommute:
    return "AnalysisFPCommute";
  case Type::AnalysisAliasing:
    return "AnalysisAliasing";
  default:
    return "Failure";
  }
}

/// A remark type used for both emission and parsing.
struct Remark {
  /// The type of the remark.
  Type RemarkType = Type::Unknown;

  /// Name of the pass that triggers the emission of this remark.
  StringRef PassName;

  /// Textual identifier for the remark (single-word, camel-case). Can be used
  /// by external tools reading the output file for remarks to identify the
  /// remark.
  StringRef RemarkName;

  /// Mangled name of the function that triggers the emssion of this remark.
  StringRef FunctionName;

  /// The location in the source file of the remark.
  std::optional<RemarkLocation> Loc;

  /// If profile information is available, this is the number of times the
  /// corresponding code was executed in a profile instrumentation run.
  std::optional<uint64_t> Hotness;

  /// Arguments collected via the streaming interface.
  SmallVector<Argument, 5> Args;

  Remark() = default;
  Remark(Remark &&) = default;
  Remark &operator=(Remark &&) = default;

  /// Return a message composed from the arguments as a string.
  std::string getArgsAsMsg() const;

  /// Clone this remark to explicitly ask for a copy.
  Remark clone() const { return *this; }

  /// Implement operator<< on Remark.
  void print(raw_ostream &OS) const;

private:
  /// In order to avoid unwanted copies, "delete" the copy constructor.
  /// If a copy is needed, it should be done through `Remark::clone()`.
  Remark(const Remark &) = default;
  Remark& operator=(const Remark &) = default;
};

// Create wrappers for C Binding types (see CBindingWrapping.h).
DEFINE_SIMPLE_CONVERSION_FUNCTIONS(Remark, LLVMRemarkEntryRef)

/// Comparison operators for Remark objects and dependent objects.

template <typename T>
bool operator<(const std::optional<T> &LHS, const std::optional<T> &RHS) {
  // Sorting based on optionals should result in all `None` entries to appear
  // before the valid entries. For example, remarks with no debug location will
  // appear first.
  if (!LHS && !RHS)
    return false;
  if (!LHS && RHS)
    return true;
  if (LHS && !RHS)
    return false;
  return *LHS < *RHS;
}

inline bool operator==(const RemarkLocation &LHS, const RemarkLocation &RHS) {
  return LHS.SourceFilePath == RHS.SourceFilePath &&
         LHS.SourceLine == RHS.SourceLine &&
         LHS.SourceColumn == RHS.SourceColumn;
}

inline bool operator!=(const RemarkLocation &LHS, const RemarkLocation &RHS) {
  return !(LHS == RHS);
}

inline bool operator<(const RemarkLocation &LHS, const RemarkLocation &RHS) {
  return std::make_tuple(LHS.SourceFilePath, LHS.SourceLine, LHS.SourceColumn) <
         std::make_tuple(RHS.SourceFilePath, RHS.SourceLine, RHS.SourceColumn);
}

inline bool operator==(const Argument &LHS, const Argument &RHS) {
  return LHS.Key == RHS.Key && LHS.Val == RHS.Val && LHS.Loc == RHS.Loc;
}

inline bool operator!=(const Argument &LHS, const Argument &RHS) {
  return !(LHS == RHS);
}

inline bool operator<(const Argument &LHS, const Argument &RHS) {
  return std::make_tuple(LHS.Key, LHS.Val, LHS.Loc) <
         std::make_tuple(RHS.Key, RHS.Val, RHS.Loc);
}

inline bool operator==(const Remark &LHS, const Remark &RHS) {
  return LHS.RemarkType == RHS.RemarkType && LHS.PassName == RHS.PassName &&
         LHS.RemarkName == RHS.RemarkName &&
         LHS.FunctionName == RHS.FunctionName && LHS.Loc == RHS.Loc &&
         LHS.Hotness == RHS.Hotness && LHS.Args == RHS.Args;
}

inline bool operator!=(const Remark &LHS, const Remark &RHS) {
  return !(LHS == RHS);
}

inline bool operator<(const Remark &LHS, const Remark &RHS) {
  return std::make_tuple(LHS.RemarkType, LHS.PassName, LHS.RemarkName,
                         LHS.FunctionName, LHS.Loc, LHS.Hotness, LHS.Args) <
         std::make_tuple(RHS.RemarkType, RHS.PassName, RHS.RemarkName,
                         RHS.FunctionName, RHS.Loc, RHS.Hotness, RHS.Args);
}

inline raw_ostream &operator<<(raw_ostream &OS, const RemarkLocation &RLoc) {
  RLoc.print(OS);
  return OS;
}

inline raw_ostream &operator<<(raw_ostream &OS, const Argument &Arg) {
  Arg.print(OS);
  return OS;
}

inline raw_ostream &operator<<(raw_ostream &OS, const Remark &Remark) {
  Remark.print(OS);
  return OS;
}

} // end namespace remarks
} // end namespace llvm

#endif /* LLVM_REMARKS_REMARK_H */
PKiwFZ��["��Remarks/RemarkSerializer.hnu�[���//===-- RemarkSerializer.h - Remark serialization interface -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file provides an interface for serializing remarks to different formats.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_REMARKS_REMARKSERIALIZER_H
#define LLVM_REMARKS_REMARKSERIALIZER_H

#include "llvm/Remarks/RemarkFormat.h"
#include "llvm/Remarks/RemarkStringTable.h"
#include <optional>

namespace llvm {

class raw_ostream;

namespace remarks {

struct Remark;

enum class SerializerMode {
  Separate,  // A mode where the metadata is serialized separately from the
             // remarks. Typically, this is used when the remarks need to be
             // streamed to a side file and the metadata is embedded into the
             // final result of the compilation.
  Standalone // A mode where everything can be retrieved in the same
             // file/buffer. Typically, this is used for storing remarks for
             // later use.
};

struct MetaSerializer;

/// This is the base class for a remark serializer.
/// It includes support for using a string table while emitting.
struct RemarkSerializer {
  /// The format of the serializer.
  Format SerializerFormat;
  /// The open raw_ostream that the remark diagnostics are emitted to.
  raw_ostream &OS;
  /// The serialization mode.
  SerializerMode Mode;
  /// The string table containing all the unique strings used in the output.
  /// The table can be serialized to be consumed after the compilation.
  std::optional<StringTable> StrTab;

  RemarkSerializer(Format SerializerFormat, raw_ostream &OS,
                   SerializerMode Mode)
      : SerializerFormat(SerializerFormat), OS(OS), Mode(Mode) {}

  /// This is just an interface.
  virtual ~RemarkSerializer() = default;
  /// Emit a remark to the stream.
  virtual void emit(const Remark &Remark) = 0;
  /// Return the corresponding metadata serializer.
  virtual std::unique_ptr<MetaSerializer>
  metaSerializer(raw_ostream &OS,
                 std::optional<StringRef> ExternalFilename = std::nullopt) = 0;
};

/// This is the base class for a remark metadata serializer.
struct MetaSerializer {
  /// The open raw_ostream that the metadata is emitted to.
  raw_ostream &OS;

  MetaSerializer(raw_ostream &OS) : OS(OS) {}

  /// This is just an interface.
  virtual ~MetaSerializer() = default;
  virtual void emit() = 0;
};

/// Create a remark serializer.
Expected<std::unique_ptr<RemarkSerializer>>
createRemarkSerializer(Format RemarksFormat, SerializerMode Mode,
                       raw_ostream &OS);

/// Create a remark serializer that uses a pre-filled string table.
Expected<std::unique_ptr<RemarkSerializer>>
createRemarkSerializer(Format RemarksFormat, SerializerMode Mode,
                       raw_ostream &OS, remarks::StringTable StrTab);

} // end namespace remarks
} // end namespace llvm

#endif // LLVM_REMARKS_REMARKSERIALIZER_H
PKiwFZd�y���#Remarks/BitstreamRemarkSerializer.hnu�[���//===-- BitstreamRemarkSerializer.h - Bitstream serializer ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file provides an implementation of the serializer using the LLVM
// Bitstream format.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_REMARKS_BITSTREAMREMARKSERIALIZER_H
#define LLVM_REMARKS_BITSTREAMREMARKSERIALIZER_H

#include "llvm/Bitstream/BitstreamWriter.h"
#include "llvm/Remarks/BitstreamRemarkContainer.h"
#include "llvm/Remarks/RemarkSerializer.h"
#include <optional>

namespace llvm {
namespace remarks {

struct Remarks;

/// Serialize the remarks to LLVM bitstream.
/// This class provides ways to emit remarks in the LLVM bitstream format and
/// its associated metadata.
///
/// * The separate model:
///   Separate meta:        | Container info
///                         | String table
///                         | External file
///
///   Separate remarks:     | Container info
///                         | Remark version
///                         | Remark0
///                         | Remark1
///                         | Remark2
///                         | ...
///
/// * The standalone model: | Container info
///                         | String table
///                         | Remark version
///                         | Remark0
///                         | Remark1
///                         | Remark2
///                         | ...
///
struct BitstreamRemarkSerializerHelper {
  /// Buffer used for encoding the bitstream before writing it to the final
  /// stream.
  SmallVector<char, 1024> Encoded;
  /// Buffer used to construct records and pass to the bitstream writer.
  SmallVector<uint64_t, 64> R;
  /// The Bitstream writer.
  BitstreamWriter Bitstream;
  /// The type of the container we are serializing.
  BitstreamRemarkContainerType ContainerType;

  /// Abbrev IDs initialized in the block info block.
  /// Note: depending on the container type, some IDs might be uninitialized.
  /// Warning: When adding more abbrev IDs, make sure to update the
  /// BlockCodeSize (in the call to EnterSubblock).
  uint64_t RecordMetaContainerInfoAbbrevID = 0;
  uint64_t RecordMetaRemarkVersionAbbrevID = 0;
  uint64_t RecordMetaStrTabAbbrevID = 0;
  uint64_t RecordMetaExternalFileAbbrevID = 0;
  uint64_t RecordRemarkHeaderAbbrevID = 0;
  uint64_t RecordRemarkDebugLocAbbrevID = 0;
  uint64_t RecordRemarkHotnessAbbrevID = 0;
  uint64_t RecordRemarkArgWithDebugLocAbbrevID = 0;
  uint64_t RecordRemarkArgWithoutDebugLocAbbrevID = 0;

  BitstreamRemarkSerializerHelper(BitstreamRemarkContainerType ContainerType);

  // Disable copy and move: Bitstream points to Encoded, which needs special
  // handling during copy/move, but moving the vectors is probably useless
  // anyway.
  BitstreamRemarkSerializerHelper(const BitstreamRemarkSerializerHelper &) =
      delete;
  BitstreamRemarkSerializerHelper &
  operator=(const BitstreamRemarkSerializerHelper &) = delete;
  BitstreamRemarkSerializerHelper(BitstreamRemarkSerializerHelper &&) = delete;
  BitstreamRemarkSerializerHelper &
  operator=(BitstreamRemarkSerializerHelper &&) = delete;

  /// Set up the necessary block info entries according to the container type.
  void setupBlockInfo();

  /// Set up the block info for the metadata block.
  void setupMetaBlockInfo();
  /// The remark version in the metadata block.
  void setupMetaRemarkVersion();
  void emitMetaRemarkVersion(uint64_t RemarkVersion);
  /// The strtab in the metadata block.
  void setupMetaStrTab();
  void emitMetaStrTab(const StringTable &StrTab);
  /// The external file in the metadata block.
  void setupMetaExternalFile();
  void emitMetaExternalFile(StringRef Filename);

  /// The block info for the remarks block.
  void setupRemarkBlockInfo();

  /// Emit the metadata for the remarks.
  void emitMetaBlock(uint64_t ContainerVersion,
                     std::optional<uint64_t> RemarkVersion,
                     std::optional<const StringTable *> StrTab = std::nullopt,
                     std::optional<StringRef> Filename = std::nullopt);

  /// Emit a remark block. The string table is required.
  void emitRemarkBlock(const Remark &Remark, StringTable &StrTab);
  /// Finalize the writing to \p OS.
  void flushToStream(raw_ostream &OS);
  /// Finalize the writing to a buffer.
  /// The contents of the buffer remain valid for the lifetime of the object.
  /// Any call to any other function in this class will invalidate the buffer.
  StringRef getBuffer();
};

/// Implementation of the remark serializer using LLVM bitstream.
struct BitstreamRemarkSerializer : public RemarkSerializer {
  /// The file should contain:
  /// 1) The block info block that describes how to read the blocks.
  /// 2) The metadata block that contains various information about the remarks
  ///    in the file.
  /// 3) A number of remark blocks.

  /// We need to set up 1) and 2) first, so that we can emit 3) after. This flag
  /// is used to emit the first two blocks only once.
  bool DidSetUp = false;
  /// The helper to emit bitstream.
  BitstreamRemarkSerializerHelper Helper;

  /// Construct a serializer that will create its own string table.
  BitstreamRemarkSerializer(raw_ostream &OS, SerializerMode Mode);
  /// Construct a serializer with a pre-filled string table.
  BitstreamRemarkSerializer(raw_ostream &OS, SerializerMode Mode,
                            StringTable StrTab);

  /// Emit a remark to the stream. This also emits the metadata associated to
  /// the remarks based on the SerializerMode specified at construction.
  /// This writes the serialized output to the provided stream.
  void emit(const Remark &Remark) override;
  /// The metadata serializer associated to this remark serializer. Based on the
  /// container type of the current serializer, the container type of the
  /// metadata serializer will change.
  std::unique_ptr<MetaSerializer> metaSerializer(
      raw_ostream &OS,
      std::optional<StringRef> ExternalFilename = std::nullopt) override;

  static bool classof(const RemarkSerializer *S) {
    return S->SerializerFormat == Format::Bitstream;
  }
};

/// Serializer of metadata for bitstream remarks.
struct BitstreamMetaSerializer : public MetaSerializer {
  /// This class can be used with [1] a pre-constructed
  /// BitstreamRemarkSerializerHelper, or with [2] one that is owned by the meta
  /// serializer. In case of [1], we need to be able to store a reference to the
  /// object, while in case of [2] we need to store the whole object.
  std::optional<BitstreamRemarkSerializerHelper> TmpHelper;
  /// The actual helper, that can point to \p TmpHelper or to an external helper
  /// object.
  BitstreamRemarkSerializerHelper *Helper = nullptr;

  std::optional<const StringTable *> StrTab;
  std::optional<StringRef> ExternalFilename;

  /// Create a new meta serializer based on \p ContainerType.
  BitstreamMetaSerializer(
      raw_ostream &OS, BitstreamRemarkContainerType ContainerType,
      std::optional<const StringTable *> StrTab = std::nullopt,
      std::optional<StringRef> ExternalFilename = std::nullopt)
      : MetaSerializer(OS), TmpHelper(std::nullopt), Helper(nullptr),
        StrTab(StrTab), ExternalFilename(ExternalFilename) {
    TmpHelper.emplace(ContainerType);
    Helper = &*TmpHelper;
  }

  /// Create a new meta serializer based on a previously built \p Helper.
  BitstreamMetaSerializer(
      raw_ostream &OS, BitstreamRemarkSerializerHelper &Helper,
      std::optional<const StringTable *> StrTab = std::nullopt,
      std::optional<StringRef> ExternalFilename = std::nullopt)
      : MetaSerializer(OS), TmpHelper(std::nullopt), Helper(&Helper),
        StrTab(StrTab), ExternalFilename(ExternalFilename) {}

  void emit() override;
};

} // end namespace remarks
} // end namespace llvm

#endif // LLVM_REMARKS_BITSTREAMREMARKSERIALIZER_H
PKiwFZ��ۻ�Remarks/RemarkStreamer.hnu�[���//===- llvm/Remarks/RemarkStreamer.h ----------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the main interface for streaming remarks.
//
// This is used to stream any llvm::remarks::Remark to an open file taking
// advantage of all the serialization capabilities developed for remarks (e.g.
// metadata in a section, bitstream format, etc.).
//
// Typically, a specialized remark emitter should hold a reference to the main
// remark streamer set up in the LLVMContext, and should convert specialized
// diagnostics to llvm::remarks::Remark objects as they get emitted.
//
// Specialized remark emitters can be components like:
// * Remarks from LLVM (M)IR passes
// * Remarks from the frontend
// * Remarks from an intermediate IR
//
// This allows for composition between specialized remark emitters throughout
// the compilation pipeline, that end up in the same file, using the same format
// and serialization techniques.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_REMARKS_REMARKSTREAMER_H
#define LLVM_REMARKS_REMARKSTREAMER_H

#include "llvm/Remarks/RemarkSerializer.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/Regex.h"
#include <memory>
#include <optional>

namespace llvm {

class raw_ostream;

namespace remarks {
class RemarkStreamer final {
  /// The regex used to filter remarks based on the passes that emit them.
  std::optional<Regex> PassFilter;
  /// The object used to serialize the remarks to a specific format.
  std::unique_ptr<remarks::RemarkSerializer> RemarkSerializer;
  /// The filename that the remark diagnostics are emitted to.
  const std::optional<std::string> Filename;

public:
  RemarkStreamer(std::unique_ptr<remarks::RemarkSerializer> RemarkSerializer,
                 std::optional<StringRef> Filename = std::nullopt);

  /// Return the filename that the remark diagnostics are emitted to.
  std::optional<StringRef> getFilename() const {
    return Filename ? std::optional<StringRef>(*Filename) : std::nullopt;
  }
  /// Return stream that the remark diagnostics are emitted to.
  raw_ostream &getStream() { return RemarkSerializer->OS; }
  /// Return the serializer used for this stream.
  remarks::RemarkSerializer &getSerializer() { return *RemarkSerializer; }
  /// Set a pass filter based on a regex \p Filter.
  /// Returns an error if the regex is invalid.
  Error setFilter(StringRef Filter);
  /// Check wether the string matches the filter.
  bool matchesFilter(StringRef Str);
  /// Check if the remarks also need to have associated metadata in a section.
  bool needsSection() const;
};
} // end namespace remarks
} // end namespace llvm

#endif // LLVM_REMARKS_REMARKSTREAMER_H
PKiwFZ[c_Remarks/RemarkLinker.hnu�[���//===-- llvm/Remarks/RemarkLinker.h -----------------------------*- C++/-*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file provides an interface to link together multiple remark files.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_REMARKS_REMARKLINKER_H
#define LLVM_REMARKS_REMARKLINKER_H

#include "llvm/Remarks/Remark.h"
#include "llvm/Remarks/RemarkFormat.h"
#include "llvm/Remarks/RemarkStringTable.h"
#include "llvm/Support/Error.h"
#include <memory>
#include <optional>
#include <set>

namespace llvm {

namespace object {
class ObjectFile;
}

namespace remarks {

struct RemarkLinker {
private:
  /// Compare through the pointers.
  struct RemarkPtrCompare {
    bool operator()(const std::unique_ptr<Remark> &LHS,
                    const std::unique_ptr<Remark> &RHS) const {
      assert(LHS && RHS && "Invalid pointers to compare.");
      return *LHS < *RHS;
    };
  };

  /// The main string table for the remarks.
  /// Note: all remarks should use the strings from this string table to avoid
  /// dangling references.
  StringTable StrTab;

  /// A set holding unique remarks.
  /// FIXME: std::set is probably not the most appropriate data structure here.
  /// Due to the limitation of having a move-only key, there isn't another
  /// obvious choice for now.
  std::set<std::unique_ptr<Remark>, RemarkPtrCompare> Remarks;

  /// A path to append before the external file path found in remark metadata.
  std::optional<std::string> PrependPath;

  /// If true, keep all remarks, otherwise only keep remarks with valid debug
  /// locations.
  bool KeepAllRemarks = true;

  /// Keep this remark. If it's already in the set, discard it.
  Remark &keep(std::unique_ptr<Remark> Remark);

  /// Returns true if \p R should be kept. If KeepAllRemarks is false, only
  /// return true if \p R has a valid debug location.
  bool shouldKeepRemark(const Remark &R) {
    return KeepAllRemarks ? true : R.Loc.has_value();
  }

public:
  /// Set a path to prepend to the external file path.
  void setExternalFilePrependPath(StringRef PrependPath);

  /// Set KeepAllRemarks to \p B.
  void setKeepAllRemarks(bool B) { KeepAllRemarks = B; }

  /// Link the remarks found in \p Buffer.
  /// If \p RemarkFormat is not provided, try to deduce it from the metadata in
  /// \p Buffer.
  /// \p Buffer can be either a standalone remark container or just
  /// metadata. This takes care of uniquing and merging the remarks.
  Error link(StringRef Buffer,
             std::optional<Format> RemarkFormat = std::nullopt);

  /// Link the remarks found in \p Obj by looking for the right section and
  /// calling the method above.
  Error link(const object::ObjectFile &Obj,
             std::optional<Format> RemarkFormat = std::nullopt);

  /// Serialize the linked remarks to the stream \p OS, using the format \p
  /// RemarkFormat.
  /// This clears internal state such as the string table.
  /// Note: this implies that the serialization mode is standalone.
  Error serialize(raw_ostream &OS, Format RemarksFormat) const;

  /// Check whether there are any remarks linked.
  bool empty() const { return Remarks.empty(); }

  /// Return a collection of the linked unique remarks to iterate on.
  /// Ex:
  /// for (const Remark &R : RL.remarks() { [...] }
  using iterator = pointee_iterator<decltype(Remarks)::const_iterator>;

  iterator_range<iterator> remarks() const {
    return {Remarks.begin(), Remarks.end()};
  }
};

/// Returns a buffer with the contents of the remarks section depending on the
/// format of the file. If the section doesn't exist, this returns an empty
/// optional.
Expected<std::optional<StringRef>>
getRemarksSectionContents(const object::ObjectFile &Obj);

} // end namespace remarks
} // end namespace llvm

#endif // LLVM_REMARKS_REMARKLINKER_H
PKiwFZA:i��� Remarks/HotnessThresholdParser.hnu�[���//===- HotnessThresholdParser.h - Parser for hotness threshold --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file implements a simple parser to decode commandline option for
/// remarks hotness threshold that supports both int and a special 'auto' value.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_REMARKS_HOTNESSTHRESHOLDPARSER_H
#define LLVM_REMARKS_HOTNESSTHRESHOLDPARSER_H

#include "llvm/Support/CommandLine.h"
#include <optional>

namespace llvm {
namespace remarks {

// Parse remarks hotness threshold argument value.
// Valid option values are
// 1. integer: manually specified threshold; or
// 2. string 'auto': automatically get threshold from profile summary.
//
// Return std::nullopt Optional if 'auto' is specified, indicating the value
// will be filled later during PSI.
inline Expected<std::optional<uint64_t>> parseHotnessThresholdOption(StringRef Arg) {
  if (Arg == "auto")
    return std::nullopt;

  int64_t Val;
  if (Arg.getAsInteger(10, Val))
    return createStringError(llvm::inconvertibleErrorCode(),
                             "Not an integer: %s", Arg.data());

  // Negative integer effectively means no threshold
  return Val < 0 ? 0 : Val;
}

// A simple CL parser for '*-remarks-hotness-threshold='
class HotnessThresholdParser : public cl::parser<std::optional<uint64_t>> {
public:
  HotnessThresholdParser(cl::Option &O) : cl::parser<std::optional<uint64_t>>(O) {}

  bool parse(cl::Option &O, StringRef ArgName, StringRef Arg,
             std::optional<uint64_t> &V) {
    auto ResultOrErr = parseHotnessThresholdOption(Arg);
    if (!ResultOrErr)
      return O.error("Invalid argument '" + Arg +
                     "', only integer or 'auto' is supported.");

    V = *ResultOrErr;
    return false;
  }
};

} // namespace remarks
} // namespace llvm
#endif // LLVM_REMARKS_HOTNESSTHRESHOLDPARSER_H
PKiwFZ�__ExecutionEngine/MCJIT.hnu�[���//===-- MCJIT.h - MC-Based Just-In-Time Execution Engine --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file forces the MCJIT to link in on certain operating systems.
// (Windows).
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_MCJIT_H
#define LLVM_EXECUTIONENGINE_MCJIT_H

#include "llvm/ExecutionEngine/ExecutionEngine.h"
#include <cstdlib>

extern "C" void LLVMLinkInMCJIT();

namespace {
  struct ForceMCJITLinking {
    ForceMCJITLinking() {
      // We must reference MCJIT in such a way that compilers will not
      // delete it all as dead code, even with whole program optimization,
      // yet is effectively a NO-OP. As the compiler isn't smart enough
      // to know that getenv() never returns -1, this will do the job.
      // This is so that globals in the translation units where these functions
      // are defined are forced to be initialized, populating various
      // registries.
      if (std::getenv("bar") != (char*) -1)
        return;

      LLVMLinkInMCJIT();
    }
  } ForceMCJITLinking;
}

#endif
PKiwFZZ徎TTExecutionEngine/GenericValue.hnu�[���//===- GenericValue.h - Represent any type of LLVM value --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// The GenericValue class is used to represent an LLVM value of arbitrary type.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_GENERICVALUE_H
#define LLVM_EXECUTIONENGINE_GENERICVALUE_H

#include "llvm/ADT/APInt.h"
#include <vector>

namespace llvm {

using PointerTy = void *;

struct GenericValue {
  struct IntPair {
    unsigned int first;
    unsigned int second;
  };
  union {
    double DoubleVal;
    float FloatVal;
    PointerTy PointerVal;
    struct IntPair UIntPairVal;
    unsigned char Untyped[8];
  };
  APInt IntVal; // also used for long doubles.
  // For aggregate data types.
  std::vector<GenericValue> AggregateVal;

  // to make code faster, set GenericValue to zero could be omitted, but it is
  // potentially can cause problems, since GenericValue to store garbage
  // instead of zero.
  GenericValue() : IntVal(1, 0) {
    UIntPairVal.first = 0;
    UIntPairVal.second = 0;
  }
  explicit GenericValue(void *V) : PointerVal(V), IntVal(1, 0) {}
};

inline GenericValue PTOGV(void *P) { return GenericValue(P); }
inline void *GVTOP(const GenericValue &GV) { return GV.PointerVal; }

} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_GENERICVALUE_H
PKiwFZ�dCCExecutionEngine/ObjectCache.hnu�[���//===-- ObjectCache.h - Class definition for the ObjectCache ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_OBJECTCACHE_H
#define LLVM_EXECUTIONENGINE_OBJECTCACHE_H

#include <memory>

namespace llvm {

class MemoryBuffer;
class MemoryBufferRef;
class Module;

/// This is the base ObjectCache type which can be provided to an
/// ExecutionEngine for the purpose of avoiding compilation for Modules that
/// have already been compiled and an object file is available.
class ObjectCache {
  virtual void anchor();

public:
  ObjectCache() = default;

  virtual ~ObjectCache() = default;

  /// notifyObjectCompiled - Provides a pointer to compiled code for Module M.
  virtual void notifyObjectCompiled(const Module *M, MemoryBufferRef Obj) = 0;

  /// Returns a pointer to a newly allocated MemoryBuffer that contains the
  /// object which corresponds with Module M, or 0 if an object is not
  /// available.
  virtual std::unique_ptr<MemoryBuffer> getObject(const Module* M) = 0;
};

} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_OBJECTCACHE_H
PKiwFZ�O�1#ExecutionEngine/JITLink/ELF_riscv.hnu�[���//===----- ELF_riscv.h - JIT link functions for ELF/riscv ----*- C++ -*----===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
//===----------------------------------------------------------------------===//
//
// jit-link functions for ELF/riscv.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_JITLINK_ELF_RISCV_H
#define LLVM_EXECUTIONENGINE_JITLINK_ELF_RISCV_H

#include "llvm/ExecutionEngine/JITLink/JITLink.h"

namespace llvm {
namespace jitlink {

/// Create a LinkGraph from an ELF/riscv relocatable object
///
/// Note: The graph does not take ownership of the underlying buffer, nor copy
/// its contents. The caller is responsible for ensuring that the object buffer
/// outlives the graph.
Expected<std::unique_ptr<LinkGraph>>
createLinkGraphFromELFObject_riscv(MemoryBufferRef ObjectBuffer);

/// jit-link the given object buffer, which must be a ELF riscv object file.
void link_ELF_riscv(std::unique_ptr<LinkGraph> G,
                    std::unique_ptr<JITLinkContext> Ctx);

/// Returns a pass that performs linker relaxation. Should be added to
/// PostAllocationPasses.
LinkGraphPassFunction createRelaxationPass_ELF_riscv();

} // end namespace jitlink
} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_JITLINK_ELF_RISCV_H
PKiwFZ��Y��3�3#ExecutionEngine/JITLink/loongarch.hnu�[���//= loongarch.h - Generic JITLink loongarch edge kinds, utilities -*- C++ -*-=//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Generic utilities for graphs representing loongarch objects.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_JITLINK_LOONGARCH_H
#define LLVM_EXECUTIONENGINE_JITLINK_LOONGARCH_H

#include "TableManager.h"
#include "llvm/ExecutionEngine/JITLink/JITLink.h"
#include "llvm/ExecutionEngine/Orc/Shared/MemoryFlags.h"

namespace llvm {
namespace jitlink {
namespace loongarch {

/// Represents loongarch fixups.
enum EdgeKind_loongarch : Edge::Kind {
  /// A plain 64-bit pointer value relocation.
  ///
  /// Fixup expression:
  ///   Fixup <- Target + Addend : uint64
  ///
  Pointer64 = Edge::FirstRelocation,

  /// A plain 32-bit pointer value relocation.
  ///
  /// Fixup expression:
  ///   Fixup <- Target + Addend : uint32
  ///
  /// Errors:
  ///   - The target must reside in the low 32-bits of the address space,
  ///     otherwise an out-of-range error will be returned.
  ///
  Pointer32,

  /// A 26-bit PC-relative branch.
  ///
  /// Represents a PC-relative call or branch to a target within +/-128Mb. The
  /// target must be 4-byte aligned.
  ///
  /// Fixup expression:
  ///   Fixup <- (Target - Fixup + Addend) >> 2 : int26
  ///
  /// Notes:
  ///   The '26' in the name refers to the number operand bits and follows the
  /// naming convention used by the corresponding ELF relocations. Since the low
  /// two bits must be zero (because of the 4-byte alignment of the target) the
  /// operand is effectively a signed 28-bit number.
  ///
  /// Errors:
  ///   - The result of the unshifted part of the fixup expression must be
  ///     4-byte aligned otherwise an alignment error will be returned.
  ///   - The result of the fixup expression must fit into an int26 otherwise an
  ///     out-of-range error will be returned.
  ///
  Branch26PCRel,

  /// A 32-bit delta.
  ///
  /// Delta from the fixup to the target.
  ///
  /// Fixup expression:
  ///   Fixup <- Target - Fixup + Addend : int32
  ///
  /// Errors:
  ///   - The result of the fixup expression must fit into an int32, otherwise
  ///     an out-of-range error will be returned.
  ///
  Delta32,

  /// A 32-bit negative delta.
  ///
  /// Delta from the target back to the fixup.
  ///
  /// Fixup expression:
  ///   Fixup <- Fixup - Target + Addend : int32
  ///
  /// Errors:
  ///   - The result of the fixup expression must fit into an int32, otherwise
  ///     an out-of-range error will be returned.
  ///
  NegDelta32,

  /// A 64-bit delta.
  ///
  /// Delta from the fixup to the target.
  ///
  /// Fixup expression:
  ///   Fixup <- Target - Fixup + Addend : int64
  ///
  Delta64,

  /// The signed 20-bit delta from the fixup page to the page containing the
  /// target.
  ///
  /// Fixup expression:
  ///   Fixup <- (((Target + Addend + ((Target + Addend) & 0x800)) & ~0xfff)
  //              - (Fixup & ~0xfff)) >> 12 : int20
  ///
  /// Notes:
  ///   For PCALAU12I fixups.
  ///
  /// Errors:
  ///   - The result of the fixup expression must fit into an int20 otherwise an
  ///     out-of-range error will be returned.
  ///
  Page20,

  /// The 12-bit offset of the target within its page.
  ///
  /// Typically used to fix up ADDI/LD_W/LD_D immediates.
  ///
  /// Fixup expression:
  ///   Fixup <- ((Target + Addend) >> Shift) & 0xfff : int12
  ///
  PageOffset12,

  /// A GOT entry getter/constructor, transformed to Page20 pointing at the GOT
  /// entry for the original target.
  ///
  /// Indicates that this edge should be transformed into a Page20 targeting
  /// the GOT entry for the edge's current target, maintaining the same addend.
  /// A GOT entry for the target should be created if one does not already
  /// exist.
  ///
  /// Edges of this kind are usually handled by a GOT/PLT builder pass inserted
  /// by default.
  ///
  /// Fixup expression:
  ///   NONE
  ///
  /// Errors:
  ///   - *ASSERTION* Failure to handle edges of this kind prior to the fixup
  ///     phase will result in an assert/unreachable during the fixup phase.
  ///
  RequestGOTAndTransformToPage20,

  /// A GOT entry getter/constructor, transformed to Pageoffset12 pointing at
  /// the GOT entry for the original target.
  ///
  /// Indicates that this edge should be transformed into a PageOffset12
  /// targeting the GOT entry for the edge's current target, maintaining the
  /// same addend. A GOT entry for the target should be created if one does not
  /// already exist.
  ///
  /// Edges of this kind are usually handled by a GOT/PLT builder pass inserted
  /// by default.
  ///
  /// Fixup expression:
  ///   NONE
  ///
  RequestGOTAndTransformToPageOffset12,
};

/// Returns a string name for the given loongarch edge. For debugging purposes
/// only.
const char *getEdgeKindName(Edge::Kind K);

// Returns extract bits Val[Hi:Lo].
inline uint32_t extractBits(uint32_t Val, unsigned Hi, unsigned Lo) {
  return (Val & (((1UL << (Hi + 1)) - 1))) >> Lo;
}

/// Apply fixup expression for edge to block content.
inline Error applyFixup(LinkGraph &G, Block &B, const Edge &E) {
  using namespace support;

  char *BlockWorkingMem = B.getAlreadyMutableContent().data();
  char *FixupPtr = BlockWorkingMem + E.getOffset();
  uint64_t FixupAddress = (B.getAddress() + E.getOffset()).getValue();
  uint64_t TargetAddress = E.getTarget().getAddress().getValue();
  int64_t Addend = E.getAddend();

  switch (E.getKind()) {
  case Pointer64:
    *(ulittle64_t *)FixupPtr = TargetAddress + Addend;
    break;
  case Pointer32: {
    uint64_t Value = TargetAddress + Addend;
    if (Value > std::numeric_limits<uint32_t>::max())
      return makeTargetOutOfRangeError(G, B, E);
    *(ulittle32_t *)FixupPtr = Value;
    break;
  }
  case Branch26PCRel: {
    int64_t Value = TargetAddress - FixupAddress + Addend;

    if (!isInt<28>(Value))
      return makeTargetOutOfRangeError(G, B, E);

    if (!isShiftedInt<26, 2>(Value))
      return makeAlignmentError(orc::ExecutorAddr(FixupAddress), Value, 4, E);

    uint32_t RawInstr = *(little32_t *)FixupPtr;
    uint32_t Imm = static_cast<uint32_t>(Value >> 2);
    uint32_t Imm15_0 = extractBits(Imm, /*Hi=*/15, /*Lo=*/0) << 10;
    uint32_t Imm25_16 = extractBits(Imm, /*Hi=*/25, /*Lo=*/16);
    *(little32_t *)FixupPtr = RawInstr | Imm15_0 | Imm25_16;
    break;
  }
  case Delta32: {
    int64_t Value = TargetAddress - FixupAddress + Addend;

    if (!isInt<32>(Value))
      return makeTargetOutOfRangeError(G, B, E);
    *(little32_t *)FixupPtr = Value;
    break;
  }
  case NegDelta32: {
    int64_t Value = FixupAddress - TargetAddress + Addend;
    if (!isInt<32>(Value))
      return makeTargetOutOfRangeError(G, B, E);
    *(little32_t *)FixupPtr = Value;
    break;
  }
  case Delta64:
    *(little64_t *)FixupPtr = TargetAddress - FixupAddress + Addend;
    break;
  case Page20: {
    uint64_t Target = TargetAddress + Addend;
    uint64_t TargetPage =
        (Target + (Target & 0x800)) & ~static_cast<uint64_t>(0xfff);
    uint64_t PCPage = FixupAddress & ~static_cast<uint64_t>(0xfff);

    int64_t PageDelta = TargetPage - PCPage;
    if (!isInt<32>(PageDelta))
      return makeTargetOutOfRangeError(G, B, E);

    uint32_t RawInstr = *(little32_t *)FixupPtr;
    uint32_t Imm31_12 = extractBits(PageDelta, /*Hi=*/31, /*Lo=*/12) << 5;
    *(little32_t *)FixupPtr = RawInstr | Imm31_12;
    break;
  }
  case PageOffset12: {
    uint64_t TargetOffset = (TargetAddress + Addend) & 0xfff;

    uint32_t RawInstr = *(ulittle32_t *)FixupPtr;
    uint32_t Imm11_0 = TargetOffset << 10;
    *(ulittle32_t *)FixupPtr = RawInstr | Imm11_0;
    break;
  }
  default:
    return make_error<JITLinkError>(
        "In graph " + G.getName() + ", section " + B.getSection().getName() +
        " unsupported edge kind " + getEdgeKindName(E.getKind()));
  }

  return Error::success();
}

/// loongarch null pointer content.
extern const char NullPointerContent[8];
inline ArrayRef<char> getGOTEntryBlockContent(LinkGraph &G) {
  return {reinterpret_cast<const char *>(NullPointerContent),
          G.getPointerSize()};
}

/// loongarch stub content.
///
/// Contains the instruction sequence for an indirect jump via an in-memory
/// pointer:
///   pcalau12i $t8, %page20(ptr)
///   ld.[w/d]  $t8, %pageoff12(ptr)
///   jr        $t8
constexpr size_t StubEntrySize = 12;
extern const uint8_t LA64StubContent[StubEntrySize];
extern const uint8_t LA32StubContent[StubEntrySize];
inline ArrayRef<char> getStubBlockContent(LinkGraph &G) {
  auto StubContent =
      G.getPointerSize() == 8 ? LA64StubContent : LA32StubContent;
  return {reinterpret_cast<const char *>(StubContent), StubEntrySize};
}

/// Creates a new pointer block in the given section and returns an
/// Anonymous symobl pointing to it.
///
/// If InitialTarget is given then an Pointer64 relocation will be added to the
/// block pointing at InitialTarget.
///
/// The pointer block will have the following default values:
///   alignment: PointerSize
///   alignment-offset: 0
inline Symbol &createAnonymousPointer(LinkGraph &G, Section &PointerSection,
                                      Symbol *InitialTarget = nullptr,
                                      uint64_t InitialAddend = 0) {
  auto &B = G.createContentBlock(PointerSection, getGOTEntryBlockContent(G),
                                 orc::ExecutorAddr(), G.getPointerSize(), 0);
  if (InitialTarget)
    B.addEdge(G.getPointerSize() == 8 ? Pointer64 : Pointer32, 0,
              *InitialTarget, InitialAddend);
  return G.addAnonymousSymbol(B, 0, G.getPointerSize(), false, false);
}

/// Create a jump stub that jumps via the pointer at the given symbol and
/// an anonymous symbol pointing to it. Return the anonymous symbol.
inline Symbol &createAnonymousPointerJumpStub(LinkGraph &G,
                                              Section &StubSection,
                                              Symbol &PointerSymbol) {
  Block &StubContentBlock = G.createContentBlock(
      StubSection, getStubBlockContent(G), orc::ExecutorAddr(), 4, 0);
  StubContentBlock.addEdge(Page20, 0, PointerSymbol, 0);
  StubContentBlock.addEdge(PageOffset12, 4, PointerSymbol, 0);
  return G.addAnonymousSymbol(StubContentBlock, 0, StubEntrySize, true, false);
}

/// Global Offset Table Builder.
class GOTTableManager : public TableManager<GOTTableManager> {
public:
  static StringRef getSectionName() { return "$__GOT"; }

  bool visitEdge(LinkGraph &G, Block *B, Edge &E) {
    Edge::Kind KindToSet = Edge::Invalid;
    switch (E.getKind()) {
    case RequestGOTAndTransformToPage20:
      KindToSet = Page20;
      break;
    case RequestGOTAndTransformToPageOffset12:
      KindToSet = PageOffset12;
      break;
    default:
      return false;
    }
    assert(KindToSet != Edge::Invalid &&
           "Fell through switch, but no new kind to set");
    DEBUG_WITH_TYPE("jitlink", {
      dbgs() << "  Fixing " << G.getEdgeKindName(E.getKind()) << " edge at "
             << B->getFixupAddress(E) << " (" << B->getAddress() << " + "
             << formatv("{0:x}", E.getOffset()) << ")\n";
    });
    E.setKind(KindToSet);
    E.setTarget(getEntryForTarget(G, E.getTarget()));
    return true;
  }

  Symbol &createEntry(LinkGraph &G, Symbol &Target) {
    return createAnonymousPointer(G, getGOTSection(G), &Target);
  }

private:
  Section &getGOTSection(LinkGraph &G) {
    if (!GOTSection)
      GOTSection = &G.createSection(getSectionName(),
                                    orc::MemProt::Read | orc::MemProt::Exec);
    return *GOTSection;
  }

  Section *GOTSection = nullptr;
};

/// Procedure Linkage Table Builder.
class PLTTableManager : public TableManager<PLTTableManager> {
public:
  PLTTableManager(GOTTableManager &GOT) : GOT(GOT) {}

  static StringRef getSectionName() { return "$__STUBS"; }

  bool visitEdge(LinkGraph &G, Block *B, Edge &E) {
    if (E.getKind() == Branch26PCRel && !E.getTarget().isDefined()) {
      DEBUG_WITH_TYPE("jitlink", {
        dbgs() << "  Fixing " << G.getEdgeKindName(E.getKind()) << " edge at "
               << B->getFixupAddress(E) << " (" << B->getAddress() << " + "
               << formatv("{0:x}", E.getOffset()) << ")\n";
      });
      E.setTarget(getEntryForTarget(G, E.getTarget()));
      return true;
    }
    return false;
  }

  Symbol &createEntry(LinkGraph &G, Symbol &Target) {
    return createAnonymousPointerJumpStub(G, getStubsSection(G),
                                          GOT.getEntryForTarget(G, Target));
  }

public:
  Section &getStubsSection(LinkGraph &G) {
    if (!StubsSection)
      StubsSection = &G.createSection(getSectionName(),
                                      orc::MemProt::Read | orc::MemProt::Exec);
    return *StubsSection;
  }

  GOTTableManager &GOT;
  Section *StubsSection = nullptr;
};

} // namespace loongarch
} // namespace jitlink
} // namespace llvm

#endif
PKiwFZS����%ExecutionEngine/JITLink/COFF_x86_64.hnu�[���//===--- COFF_x86_64.h - JIT link functions for COFF/x86-64 ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// jit-link functions for COFF/x86-64.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_JITLINK_COFF_X86_64_H
#define LLVM_EXECUTIONENGINE_JITLINK_COFF_X86_64_H

#include "llvm/ExecutionEngine/JITLink/JITLink.h"

namespace llvm {
namespace jitlink {

/// Create a LinkGraph from an COFF/x86-64 relocatable object.
///
/// Note: The graph does not take ownership of the underlying buffer, nor copy
/// its contents. The caller is responsible for ensuring that the object buffer
/// outlives the graph.
Expected<std::unique_ptr<LinkGraph>>
createLinkGraphFromCOFFObject_x86_64(MemoryBufferRef ObjectBuffer);

/// jit-link the given object buffer, which must be a COFF x86-64 object file.
void link_COFF_x86_64(std::unique_ptr<LinkGraph> G,
                      std::unique_ptr<JITLinkContext> Ctx);

/// Return the string name of the given COFF x86-64 edge kind.
const char *getCOFFX86RelocationKindName(Edge::Kind R);
} // end namespace jitlink
} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_JITLINK_COFF_X86_64_H
PKiwFZ���{{ExecutionEngine/JITLink/riscv.hnu�[���//===--  riscv.h  - Generic JITLink riscv edge kinds, utilities -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Generic utilities for graphs representing riscv objects.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_JITLINK_RISCV_H
#define LLVM_EXECUTIONENGINE_JITLINK_RISCV_H

#include "llvm/ExecutionEngine/JITLink/JITLink.h"

namespace llvm {
namespace jitlink {
namespace riscv {

/// Represents riscv fixups. Ordered in the same way as the relocations in
/// include/llvm/BinaryFormat/ELFRelocs/RISCV.def.
enum EdgeKind_riscv : Edge::Kind {

  // TODO: Capture and replace to generic fixups
  /// A plain 32-bit pointer value relocation
  ///
  /// Fixup expression:
  ///   Fixup <= Target + Addend : uint32
  ///
  R_RISCV_32 = Edge::FirstRelocation,

  /// A plain 64-bit pointer value relocation
  ///
  /// Fixup expression:
  ///   Fixup <- Target + Addend : uint32
  ///
  R_RISCV_64,

  /// PC-relative branch pointer value relocation
  ///
  /// Fixup expression:
  ///   Fixup <- (Target - Fixup + Addend)
  ///
  R_RISCV_BRANCH,

  /// High 20 bits of PC-relative jump pointer value relocation
  ///
  /// Fixup expression:
  ///   Fixup <- Target - Fixup + Addend
  ///
  R_RISCV_JAL,

  /// PC relative call
  ///
  /// Fixup expression:
  ///   Fixup <- (Target - Fixup + Addend)
  R_RISCV_CALL,

  /// PC relative call by PLT
  ///
  /// Fixup expression:
  ///   Fixup <- (Target - Fixup + Addend)
  R_RISCV_CALL_PLT,

  /// PC relative GOT offset
  ///
  /// Fixup expression:
  ///   Fixup <- (GOT - Fixup + Addend) >> 12
  R_RISCV_GOT_HI20,

  /// High 20 bits of PC relative relocation
  ///
  /// Fixup expression:
  ///   Fixup <- (Target - Fixup + Addend + 0x800) >> 12
  R_RISCV_PCREL_HI20,

  /// Low 12 bits of PC relative relocation, used by I type instruction format
  ///
  /// Fixup expression:
  ///   Fixup <- (Target - Fixup + Addend) & 0xFFF
  R_RISCV_PCREL_LO12_I,

  /// Low 12 bits of PC relative relocation, used by S type instruction format
  ///
  /// Fixup expression:
  ///   Fixup <- (Target - Fixup + Addend) & 0xFFF
  R_RISCV_PCREL_LO12_S,

  /// High 20 bits of 32-bit pointer value relocation
  ///
  /// Fixup expression
  ///   Fixup <- (Target + Addend + 0x800) >> 12
  R_RISCV_HI20,

  /// Low 12 bits of 32-bit pointer value relocation
  ///
  /// Fixup expression
  ///   Fixup <- (Target + Addend) & 0xFFF
  R_RISCV_LO12_I,

  /// Low 12 bits of 32-bit pointer value relocation, used by S type instruction
  /// format
  ///
  /// Fixup expression
  ///   Fixup <- (Target + Addend) & 0xFFF
  R_RISCV_LO12_S,

  /// 8 bits label addition
  ///
  /// Fixup expression
  ///   Fixup <- (Target - *{1}Fixup + Addend)
  R_RISCV_ADD8,

  /// 16 bits label addition
  ///
  /// Fixup expression
  ///   Fixup <- (Target - *{2}Fixup + Addend)
  R_RISCV_ADD16,

  /// 32 bits label addition
  ///
  /// Fixup expression:
  ///   Fixup <- (Target - *{4}Fixup + Addend)
  R_RISCV_ADD32,

  /// 64 bits label addition
  ///
  /// Fixup expression:
  ///   Fixup <- (Target - *{8}Fixup + Addend)
  R_RISCV_ADD64,

  /// 8 bits label subtraction
  ///
  /// Fixup expression
  ///   Fixup <- (Target - *{1}Fixup - Addend)
  R_RISCV_SUB8,

  /// 16 bits label subtraction
  ///
  /// Fixup expression
  ///   Fixup <- (Target - *{2}Fixup - Addend)
  R_RISCV_SUB16,

  /// 32 bits label subtraction
  ///
  /// Fixup expression
  ///   Fixup <- (Target - *{4}Fixup - Addend)
  R_RISCV_SUB32,

  /// 64 bits label subtraction
  ///
  /// Fixup expression
  ///   Fixup <- (Target - *{8}Fixup - Addend)
  R_RISCV_SUB64,

  /// 8-bit PC-relative branch offset
  ///
  /// Fixup expression:
  ///   Fixup <- (Target - Fixup + Addend)
  R_RISCV_RVC_BRANCH,

  /// 11-bit PC-relative jump offset
  ///
  /// Fixup expression:
  ///   Fixup <- (Target - Fixup + Addend)
  R_RISCV_RVC_JUMP,

  /// 6 bits label subtraction
  ///
  /// Fixup expression
  ///   Fixup <- (Target - *{1}Fixup - Addend)
  R_RISCV_SUB6,

  /// Local label assignment
  ///
  /// Fixup expression:
  ///   Fixup <- (Target + Addend)
  R_RISCV_SET6,

  /// Local label assignment
  ///
  /// Fixup expression:
  ///   Fixup <- (Target + Addend)
  R_RISCV_SET8,

  /// Local label assignment
  ///
  /// Fixup expression:
  ///   Fixup <- (Target + Addend)
  R_RISCV_SET16,

  /// Local label assignment
  ///
  /// Fixup expression:
  ///   Fixup <- (Target + Addend)
  R_RISCV_SET32,

  /// 32 bits PC relative relocation
  ///
  /// Fixup expression:
  ///   Fixup <- (Target - Fixup + Addend)
  R_RISCV_32_PCREL,

  /// An auipc/jalr pair eligible for linker relaxation.
  ///
  /// Linker relaxation will replace this with R_RISCV_RVC_JUMP or R_RISCV_JAL
  /// if it succeeds, or with R_RISCV_CALL_PLT if it fails.
  CallRelaxable,

  /// Alignment requirement used by linker relaxation.
  ///
  /// Linker relaxation will use this to ensure all code sequences are properly
  /// aligned and then remove these edges from the graph.
  AlignRelaxable,
};

/// Returns a string name for the given riscv edge. For debugging purposes
/// only
const char *getEdgeKindName(Edge::Kind K);
} // namespace riscv
} // namespace jitlink
} // namespace llvm

#endif
PKiwFZZ�HH%ExecutionEngine/JITLink/MachO_arm64.hnu�[���//===---- MachO_arm64.h - JIT link functions for MachO/arm64 ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// jit-link functions for MachO/arm64.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_JITLINK_MACHO_ARM64_H
#define LLVM_EXECUTIONENGINE_JITLINK_MACHO_ARM64_H

#include "llvm/ExecutionEngine/JITLink/JITLink.h"

namespace llvm {
namespace jitlink {

/// Create a LinkGraph from a MachO/arm64 relocatable object.
///
/// Note: The graph does not take ownership of the underlying buffer, nor copy
/// its contents. The caller is responsible for ensuring that the object buffer
/// outlives the graph.
Expected<std::unique_ptr<LinkGraph>>
createLinkGraphFromMachOObject_arm64(MemoryBufferRef ObjectBuffer);

/// jit-link the given object buffer, which must be a MachO arm64 object file.
///
/// If PrePrunePasses is empty then a default mark-live pass will be inserted
/// that will mark all exported atoms live. If PrePrunePasses is not empty, the
/// caller is responsible for including a pass to mark atoms as live.
///
/// If PostPrunePasses is empty then a default GOT-and-stubs insertion pass will
/// be inserted. If PostPrunePasses is not empty then the caller is responsible
/// for including a pass to insert GOT and stub edges.
void link_MachO_arm64(std::unique_ptr<LinkGraph> G,
                      std::unique_ptr<JITLinkContext> Ctx);

/// Returns a pass suitable for splitting __eh_frame sections in MachO/x86-64
/// objects.
LinkGraphPassFunction createEHFrameSplitterPass_MachO_arm64();

/// Returns a pass suitable for fixing missing edges in an __eh_frame section
/// in a MachO/x86-64 object.
LinkGraphPassFunction createEHFrameEdgeFixerPass_MachO_arm64();

} // end namespace jitlink
} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_JITLINK_MACHO_ARM64_H
PKiwFZ#D�"ExecutionEngine/JITLink/ELF_i386.hnu�[���//===--- ELF_i386.h - JIT link functions for ELF/i386 --*- C++ -*----===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
//===----------------------------------------------------------------------===//
//
// jit-link functions for ELF/i386.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_JITLINK_ELF_I386_H
#define LLVM_EXECUTIONENGINE_JITLINK_ELF_I386_H

#include "llvm/ExecutionEngine/JITLink/JITLink.h"

namespace llvm {
namespace jitlink {

/// Create a LinkGraph from an ELF/i386 relocatable object
///
/// Note: The graph does not take ownership of the underlying buffer, nor copy
/// its contents. The caller is responsible for ensuring that the object buffer
/// outlives the graph.
Expected<std::unique_ptr<LinkGraph>>
createLinkGraphFromELFObject_i386(MemoryBufferRef ObjectBuffer);

/// jit-link the given object buffer, which must be a ELF i386 relocatable
/// object file.
void link_ELF_i386(std::unique_ptr<LinkGraph> G,
                   std::unique_ptr<JITLinkContext> Ctx);

} // end namespace jitlink
} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_JITLINK_ELF_I386_H
PKiwFZ=f�2%ExecutionEngine/JITLink/MemoryFlags.hnu�[���//===-------- MemoryFlags.h - Memory allocation flags -----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Defines types and operations related to memory protection and allocation
// lifetimes.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_JITLINK_MEMORYFLAGS_H
#define LLVM_EXECUTIONENGINE_JITLINK_MEMORYFLAGS_H

#include "llvm/ADT/BitmaskEnum.h"
#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Memory.h"
#include "llvm/Support/raw_ostream.h"

namespace llvm {
namespace jitlink {

/// Describes Read/Write/Exec permissions for memory.
enum class MemProt {
  None = 0,
  Read = 1U << 0,
  Write = 1U << 1,
  Exec = 1U << 2,
  LLVM_MARK_AS_BITMASK_ENUM(/* LargestValue = */ Exec)
};

/// Print a MemProt as an RWX triple.
raw_ostream &operator<<(raw_ostream &OS, MemProt MP);

/// Convert a MemProt value to a corresponding sys::Memory::ProtectionFlags
/// value.
inline sys::Memory::ProtectionFlags toSysMemoryProtectionFlags(MemProt MP) {
  std::underlying_type_t<sys::Memory::ProtectionFlags> PF = 0;
  if ((MP & MemProt::Read) != MemProt::None)
    PF |= sys::Memory::MF_READ;
  if ((MP & MemProt::Write) != MemProt::None)
    PF |= sys::Memory::MF_WRITE;
  if ((MP & MemProt::Exec) != MemProt::None)
    PF |= sys::Memory::MF_EXEC;
  return static_cast<sys::Memory::ProtectionFlags>(PF);
}

/// Convert a sys::Memory::ProtectionFlags value to a corresponding MemProt
/// value.
inline MemProt fromSysMemoryProtectionFlags(sys::Memory::ProtectionFlags PF) {
  MemProt MP = MemProt::None;
  if (PF & sys::Memory::MF_READ)
    MP |= MemProt::Read;
  if (PF & sys::Memory::MF_WRITE)
    MP |= MemProt::Write;
  if (PF & sys::Memory::MF_EXEC)
    MP |= MemProt::None;
  return MP;
}

/// Describes a memory deallocation policy for memory to be allocated by a
/// JITLinkMemoryManager.
///
/// All memory allocated by a call to JITLinkMemoryManager::allocate should be
/// deallocated if a call is made to
/// JITLinkMemoryManager::InFlightAllocation::abandon. The policies below apply
/// to finalized allocations.
enum class MemDeallocPolicy {
  /// Standard memory should be deallocated when the deallocate method is called
  /// for the finalized allocation.
  Standard,

  /// Finalize memory should be overwritten and then deallocated after all
  /// finalization functions have been run.
  Finalize
};

/// Print a MemDeallocPolicy.
raw_ostream &operator<<(raw_ostream &OS, MemDeallocPolicy MDP);

/// A pair of memory protections and allocation policies.
///
/// Optimized for use as a small map key.
class AllocGroup {
  friend struct llvm::DenseMapInfo<AllocGroup>;

  using underlying_type = uint8_t;
  static constexpr unsigned BitsForProt = 3;
  static constexpr unsigned BitsForDeallocPolicy = 1;
  static constexpr unsigned MaxIdentifiers =
      1U << (BitsForProt + BitsForDeallocPolicy);

public:
  static constexpr unsigned NumGroups = MaxIdentifiers;

  /// Create a default AllocGroup. No memory protections, standard
  /// deallocation policy.
  AllocGroup() = default;

  /// Create an AllocGroup from a MemProt only -- uses
  /// MemoryDeallocationPolicy::Standard.
  AllocGroup(MemProt MP) : Id(static_cast<underlying_type>(MP)) {}

  /// Create an AllocGroup from a MemProt and a MemoryDeallocationPolicy.
  AllocGroup(MemProt MP, MemDeallocPolicy MDP)
      : Id(static_cast<underlying_type>(MP) |
           (static_cast<underlying_type>(MDP) << BitsForProt)) {}

  /// Returns the MemProt for this group.
  MemProt getMemProt() const {
    return static_cast<MemProt>(Id & ((1U << BitsForProt) - 1));
  }

  /// Returns the MemoryDeallocationPolicy for this group.
  MemDeallocPolicy getMemDeallocPolicy() const {
    return static_cast<MemDeallocPolicy>(Id >> BitsForProt);
  }

  friend bool operator==(const AllocGroup &LHS, const AllocGroup &RHS) {
    return LHS.Id == RHS.Id;
  }

  friend bool operator!=(const AllocGroup &LHS, const AllocGroup &RHS) {
    return !(LHS == RHS);
  }

  friend bool operator<(const AllocGroup &LHS, const AllocGroup &RHS) {
    return LHS.Id < RHS.Id;
  }

private:
  AllocGroup(underlying_type RawId) : Id(RawId) {}
  underlying_type Id = 0;
};

/// A specialized small-map for AllocGroups.
///
/// Iteration order is guaranteed to match key ordering.
template <typename T> class AllocGroupSmallMap {
private:
  using ElemT = std::pair<AllocGroup, T>;
  using VectorTy = SmallVector<ElemT, 4>;

  static bool compareKey(const ElemT &E, const AllocGroup &G) {
    return E.first < G;
  }

public:
  using iterator = typename VectorTy::iterator;

  AllocGroupSmallMap() = default;
  AllocGroupSmallMap(std::initializer_list<std::pair<AllocGroup, T>> Inits)
      : Elems(Inits) {
    llvm::sort(Elems, llvm::less_first());
  }

  iterator begin() { return Elems.begin(); }
  iterator end() { return Elems.end(); }
  iterator find(AllocGroup G) {
    auto I = lower_bound(Elems, G, compareKey);
    return (I->first == G) ? I : end();
  }

  bool empty() const { return Elems.empty(); }
  size_t size() const { return Elems.size(); }

  T &operator[](AllocGroup G) {
    auto I = lower_bound(Elems, G, compareKey);
    if (I == Elems.end() || I->first != G)
      I = Elems.insert(I, std::make_pair(G, T()));
    return I->second;
  }

private:
  VectorTy Elems;
};

/// Print an AllocGroup.
raw_ostream &operator<<(raw_ostream &OS, AllocGroup AG);

} // end namespace jitlink

template <> struct DenseMapInfo<jitlink::MemProt> {
  static inline jitlink::MemProt getEmptyKey() {
    return jitlink::MemProt(~uint8_t(0));
  }
  static inline jitlink::MemProt getTombstoneKey() {
    return jitlink::MemProt(~uint8_t(0) - 1);
  }
  static unsigned getHashValue(const jitlink::MemProt &Val) {
    using UT = std::underlying_type_t<jitlink::MemProt>;
    return DenseMapInfo<UT>::getHashValue(static_cast<UT>(Val));
  }
  static bool isEqual(const jitlink::MemProt &LHS,
                      const jitlink::MemProt &RHS) {
    return LHS == RHS;
  }
};

template <> struct DenseMapInfo<jitlink::AllocGroup> {
  static inline jitlink::AllocGroup getEmptyKey() {
    return jitlink::AllocGroup(~uint8_t(0));
  }
  static inline jitlink::AllocGroup getTombstoneKey() {
    return jitlink::AllocGroup(~uint8_t(0) - 1);
  }
  static unsigned getHashValue(const jitlink::AllocGroup &Val) {
    return DenseMapInfo<jitlink::AllocGroup::underlying_type>::getHashValue(
        Val.Id);
  }
  static bool isEqual(const jitlink::AllocGroup &LHS,
                      const jitlink::AllocGroup &RHS) {
    return LHS == RHS;
  }
};

} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_JITLINK_MEMORYFLAGS_H
PKiwFZ�n
�c+c+ExecutionEngine/JITLink/ppc64.hnu�[���//===--- ppc64.h - Generic JITLink ppc64 edge kinds, utilities --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Generic utilities for graphs representing 64-bit PowerPC objects.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_JITLINK_PPC64_H
#define LLVM_EXECUTIONENGINE_JITLINK_PPC64_H

#include "llvm/ExecutionEngine/JITLink/JITLink.h"
#include "llvm/ExecutionEngine/JITLink/TableManager.h"
#include "llvm/Support/Endian.h"

namespace llvm::jitlink::ppc64 {

/// Represents ppc64 fixups and other ppc64-specific edge kinds.
enum EdgeKind_ppc64 : Edge::Kind {
  Pointer64 = Edge::FirstRelocation,
  Pointer32,
  Delta64,
  Delta32,
  NegDelta32,
  Delta16,
  Delta16HA,
  Delta16LO,
  TOCDelta16HA,
  TOCDelta16LO,
  TOCDelta16DS,
  TOCDelta16LODS,
  CallBranchDelta,
  // Need to restore r2 after the bl, suggesting the bl is followed by a nop.
  CallBranchDeltaRestoreTOC,
  // Need PLT call stub using TOC, TOC pointer is not saved before branching.
  RequestPLTCallStub,
  // Need PLT call stub using TOC, TOC pointer is saved before branching.
  RequestPLTCallStubSaveTOC,
  // Need PLT call stub without using TOC.
  RequestPLTCallStubNoTOC,
};

enum PLTCallStubKind {
  LongBranch,
  LongBranchSaveR2,
  LongBranchNoTOC,
};

extern const char NullPointerContent[8];
extern const char PointerJumpStubContent_big[20];
extern const char PointerJumpStubContent_little[20];
extern const char PointerJumpStubNoTOCContent_big[32];
extern const char PointerJumpStubNoTOCContent_little[32];

struct PLTCallStubReloc {
  Edge::Kind K;
  size_t Offset;
  Edge::AddendT A;
};

struct PLTCallStubInfo {
  ArrayRef<char> Content;
  SmallVector<PLTCallStubReloc, 2> Relocs;
};

template <support::endianness Endianness>
inline PLTCallStubInfo pickStub(PLTCallStubKind StubKind) {
  constexpr bool isLE = Endianness == support::endianness::little;
  switch (StubKind) {
  case LongBranch: {
    ArrayRef<char> Content =
        isLE ? PointerJumpStubContent_little : PointerJumpStubContent_big;
    // Skip save r2.
    Content = Content.slice(4);
    return PLTCallStubInfo{
        Content,
        {{TOCDelta16HA, 0, 0}, {TOCDelta16LO, 4, 0}},
    };
  }
  case LongBranchSaveR2: {
    ArrayRef<char> Content =
        isLE ? PointerJumpStubContent_little : PointerJumpStubContent_big;
    return PLTCallStubInfo{
        Content,
        {{TOCDelta16HA, 4, 0}, {TOCDelta16LO, 8, 0}},
    };
  }
  case LongBranchNoTOC: {
    ArrayRef<char> Content = isLE ? PointerJumpStubNoTOCContent_little
                                  : PointerJumpStubNoTOCContent_big;
    return PLTCallStubInfo{
        Content,
        {{Delta16HA, 16, 8}, {Delta16LO, 20, 12}},
    };
  }
  }
  llvm_unreachable("Unknown PLTCallStubKind enum");
}

inline Symbol &createAnonymousPointer(LinkGraph &G, Section &PointerSection,
                                      Symbol *InitialTarget = nullptr,
                                      uint64_t InitialAddend = 0) {
  assert(G.getPointerSize() == sizeof(NullPointerContent) &&
         "LinkGraph's pointer size should be consistent with size of "
         "NullPointerContent");
  Block &B = G.createContentBlock(PointerSection, NullPointerContent,
                                  orc::ExecutorAddr(), G.getPointerSize(), 0);
  if (InitialTarget)
    B.addEdge(Pointer64, 0, *InitialTarget, InitialAddend);
  return G.addAnonymousSymbol(B, 0, G.getPointerSize(), false, false);
}

template <support::endianness Endianness>
inline Symbol &createAnonymousPointerJumpStub(LinkGraph &G,
                                              Section &StubSection,
                                              Symbol &PointerSymbol,
                                              PLTCallStubKind StubKind) {
  PLTCallStubInfo StubInfo = pickStub<Endianness>(StubKind);
  Block &B = G.createContentBlock(StubSection, StubInfo.Content,
                                  orc::ExecutorAddr(), 4, 0);
  for (auto const &Reloc : StubInfo.Relocs)
    B.addEdge(Reloc.K, Reloc.Offset, PointerSymbol, Reloc.A);
  return G.addAnonymousSymbol(B, 0, StubInfo.Content.size(), true, false);
}

template <support::endianness Endianness>
class TOCTableManager : public TableManager<TOCTableManager<Endianness>> {
public:
  // FIXME: `llvm-jitlink -check` relies this name to be $__GOT.
  static StringRef getSectionName() { return "$__GOT"; }

  bool visitEdge(LinkGraph &G, Block *B, Edge &E) {
    Edge::Kind K = E.getKind();
    switch (K) {
    case TOCDelta16HA:
    case TOCDelta16LO:
    case TOCDelta16DS:
    case TOCDelta16LODS:
    case CallBranchDeltaRestoreTOC:
    case RequestPLTCallStub:
    case RequestPLTCallStubSaveTOC:
      // Create TOC section if TOC relocation, PLT or GOT is used.
      getOrCreateTOCSection(G);
      return false;
    default:
      return false;
    }
  }

  Symbol &createEntry(LinkGraph &G, Symbol &Target) {
    return createAnonymousPointer(G, getOrCreateTOCSection(G), &Target);
  }

private:
  Section &getOrCreateTOCSection(LinkGraph &G) {
    TOCSection = G.findSectionByName(getSectionName());
    if (!TOCSection)
      TOCSection = &G.createSection(getSectionName(), orc::MemProt::Read);
    return *TOCSection;
  }

  Section *TOCSection = nullptr;
};

template <support::endianness Endianness>
class PLTTableManager : public TableManager<PLTTableManager<Endianness>> {
public:
  PLTTableManager(TOCTableManager<Endianness> &TOC) : TOC(TOC) {}

  static StringRef getSectionName() { return "$__STUBS"; }

  bool visitEdge(LinkGraph &G, Block *B, Edge &E) {
    Edge::Kind K = E.getKind();
    if (K == ppc64::RequestPLTCallStubSaveTOC && E.getTarget().isExternal()) {
      E.setKind(ppc64::CallBranchDeltaRestoreTOC);
      this->StubKind = LongBranchSaveR2;
      E.setTarget(this->getEntryForTarget(G, E.getTarget()));
      return true;
    }
    if (K == ppc64::RequestPLTCallStubNoTOC && E.getTarget().isExternal()) {
      E.setKind(ppc64::CallBranchDelta);
      this->StubKind = LongBranchNoTOC;
      E.setTarget(this->getEntryForTarget(G, E.getTarget()));
      return true;
    }
    return false;
  }

  Symbol &createEntry(LinkGraph &G, Symbol &Target) {
    return createAnonymousPointerJumpStub<Endianness>(
        G, getOrCreateStubsSection(G), TOC.getEntryForTarget(G, Target),
        this->StubKind);
  }

private:
  Section &getOrCreateStubsSection(LinkGraph &G) {
    PLTSection = G.findSectionByName(getSectionName());
    if (!PLTSection)
      PLTSection = &G.createSection(getSectionName(),
                                    orc::MemProt::Read | orc::MemProt::Exec);
    return *PLTSection;
  }

  TOCTableManager<Endianness> &TOC;
  Section *PLTSection = nullptr;
  PLTCallStubKind StubKind;
};

/// Returns a string name for the given ppc64 edge. For debugging purposes
/// only.
const char *getEdgeKindName(Edge::Kind K);

inline static uint16_t ha16(uint64_t x) { return (x + 0x8000) >> 16; }

inline static uint16_t lo16(uint64_t x) { return x & 0xffff; }

/// Apply fixup expression for edge to block content.
template <support::endianness Endianness>
inline Error applyFixup(LinkGraph &G, Block &B, const Edge &E,
                        const Symbol *TOCSymbol) {
  char *BlockWorkingMem = B.getAlreadyMutableContent().data();
  char *FixupPtr = BlockWorkingMem + E.getOffset();
  orc::ExecutorAddr FixupAddress = B.getAddress() + E.getOffset();
  int64_t S = E.getTarget().getAddress().getValue();
  int64_t A = E.getAddend();
  int64_t P = FixupAddress.getValue();
  int64_t TOCBase = TOCSymbol ? TOCSymbol->getAddress().getValue() : 0;
  Edge::Kind K = E.getKind();

  DEBUG_WITH_TYPE("jitlink", {
    dbgs() << "    Applying fixup on " << G.getEdgeKindName(K)
           << " edge, (S, A, P, .TOC.) = (" << formatv("{0:x}", S) << ", "
           << formatv("{0:x}", A) << ", " << formatv("{0:x}", P) << ", "
           << formatv("{0:x}", TOCBase) << ")\n";
  });

  switch (K) {
  case Pointer64: {
    uint64_t Value = S + A;
    support::endian::write64<Endianness>(FixupPtr, Value);
    break;
  }
  case Delta16HA:
  case Delta16LO: {
    int64_t Value = S + A - P;
    if (LLVM_UNLIKELY(!isInt<32>(Value))) {
      return makeTargetOutOfRangeError(G, B, E);
    }
    if (K == Delta16LO)
      support::endian::write16<Endianness>(FixupPtr, lo16(Value));
    else
      support::endian::write16<Endianness>(FixupPtr, ha16(Value));
    break;
  }
  case TOCDelta16HA:
  case TOCDelta16LO: {
    int64_t Value = S + A - TOCBase;
    if (LLVM_UNLIKELY(!isInt<32>(Value))) {
      return makeTargetOutOfRangeError(G, B, E);
    }
    if (K == TOCDelta16LO)
      support::endian::write16<Endianness>(FixupPtr, lo16(Value));
    else
      support::endian::write16<Endianness>(FixupPtr, ha16(Value));
    break;
  }
  case TOCDelta16DS:
  case TOCDelta16LODS: {
    int64_t Value = S + A - TOCBase;
    if (LLVM_UNLIKELY(!isInt<32>(Value))) {
      return makeTargetOutOfRangeError(G, B, E);
    }
    if (K == TOCDelta16LODS)
      support::endian::write16<Endianness>(FixupPtr, lo16(Value) & ~3);
    else
      support::endian::write16<Endianness>(FixupPtr, Value & ~3);
    break;
  }
  case CallBranchDeltaRestoreTOC:
  case CallBranchDelta: {
    int64_t Value = S + A - P;
    if (LLVM_UNLIKELY(!isInt<26>(Value))) {
      return makeTargetOutOfRangeError(G, B, E);
    }
    uint32_t Inst = support::endian::read32<Endianness>(FixupPtr);
    support::endian::write32<Endianness>(FixupPtr, (Inst & 0xfc000003) |
                                                       (Value & 0x03fffffc));
    if (K == CallBranchDeltaRestoreTOC) {
      uint32_t NopInst = support::endian::read32<Endianness>(FixupPtr + 4);
      assert(NopInst == 0x60000000 &&
             "NOP should be placed here for restoring r2");
      (void)NopInst;
      // Restore r2 by instruction 0xe8410018 which is `ld r2, 24(r1)`.
      support::endian::write32<Endianness>(FixupPtr + 4, 0xe8410018);
    }
    break;
  }
  case Delta64: {
    int64_t Value = S + A - P;
    support::endian::write64<Endianness>(FixupPtr, Value);
    break;
  }
  case Delta32: {
    int64_t Value = S + A - P;
    if (LLVM_UNLIKELY(!isInt<32>(Value))) {
      return makeTargetOutOfRangeError(G, B, E);
    }
    support::endian::write32<Endianness>(FixupPtr, Value);
    break;
  }
  case NegDelta32: {
    int64_t Value = P - S + A;
    if (LLVM_UNLIKELY(!isInt<32>(Value))) {
      return makeTargetOutOfRangeError(G, B, E);
    }
    support::endian::write32<Endianness>(FixupPtr, Value);
    break;
  }
  default:
    return make_error<JITLinkError>(
        "In graph " + G.getName() + ", section " + B.getSection().getName() +
        " unsupported edge kind " + getEdgeKindName(E.getKind()));
  }
  return Error::success();
}

} // end namespace llvm::jitlink::ppc64

#endif // LLVM_EXECUTIONENGINE_JITLINK_PPC64_H
PKiwFZ�ۛK(i(i!ExecutionEngine/JITLink/aarch64.hnu�[���//=== aarch64.h - Generic JITLink aarch64 edge kinds, utilities -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Generic utilities for graphs representing aarch64 objects.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_JITLINK_AARCH64_H
#define LLVM_EXECUTIONENGINE_JITLINK_AARCH64_H

#include "TableManager.h"
#include "llvm/ExecutionEngine/JITLink/JITLink.h"
#include "llvm/ExecutionEngine/Orc/Shared/MemoryFlags.h"

namespace llvm {
namespace jitlink {
namespace aarch64 {

/// Represents aarch64 fixups and other aarch64-specific edge kinds.
enum EdgeKind_aarch64 : Edge::Kind {

  /// A plain 64-bit pointer value relocation.
  ///
  /// Fixup expression:
  ///   Fixup <- Target + Addend : uint64
  ///
  Pointer64 = Edge::FirstRelocation,

  /// A plain 32-bit pointer value relocation.
  ///
  /// Fixup expression:
  ///   Fixup <- Target + Addend : uint32
  ///
  /// Errors:
  ///   - The target must reside in the low 32-bits of the address space,
  ///     otherwise an out-of-range error will be returned.
  ///
  Pointer32,

  /// A 64-bit delta.
  ///
  /// Delta from the fixup to the target.
  ///
  /// Fixup expression:
  ///   Fixup <- Target - Fixup + Addend : int64
  ///
  Delta64,

  /// A 32-bit delta.
  ///
  /// Delta from the fixup to the target.
  ///
  /// Fixup expression:
  ///   Fixup <- Target - Fixup + Addend : int64
  ///
  /// Errors:
  ///   - The result of the fixup expression must fit into an int32, otherwise
  ///     an out-of-range error will be returned.
  ///
  Delta32,

  /// A 64-bit negative delta.
  ///
  /// Delta from target back to the fixup.
  ///
  /// Fixup expression:
  ///   Fixup <- Fixup - Target + Addend : int64
  ///
  NegDelta64,

  /// A 32-bit negative delta.
  ///
  /// Delta from the target back to the fixup.
  ///
  /// Fixup expression:
  ///   Fixup <- Fixup - Target + Addend : int32
  ///
  /// Errors:
  ///   - The result of the fixup expression must fit into an int32, otherwise
  ///     an out-of-range error will be returned.
  NegDelta32,

  /// A 26-bit PC-relative branch.
  ///
  /// Represents a PC-relative call or branch to a target within +/-128Mb. The
  /// target must be 32-bit aligned.
  ///
  /// Fixup expression:
  ///   Fixup <- (Target - Fixup + Addend) >> 2 : int26
  ///
  /// Notes:
  ///   The '26' in the name refers to the number operand bits and follows the
  /// naming convention used by the corresponding ELF and MachO relocations.
  /// Since the low two bits must be zero (because of the 32-bit alignment of
  /// the target) the operand is effectively a signed 28-bit number.
  ///
  ///
  /// Errors:
  ///   - The result of the unshifted part of the fixup expression must be
  ///     32-bit aligned otherwise an alignment error will be returned.
  ///   - The result of the fixup expression must fit into an int26 otherwise an
  ///     out-of-range error will be returned.
  Branch26PCRel,

  /// A 14-bit PC-relative test and branch.
  ///
  /// Represents a PC-relative test and branch to a target within +/-32Kb. The
  /// target must be 32-bit aligned.
  ///
  /// Fixup expression:
  ///   Fixup <- (Target - Fixup + Addend) >> 2 : int14
  ///
  /// Notes:
  ///   The '14' in the name refers to the number operand bits and follows the
  /// naming convention used by the corresponding ELF relocation.
  /// Since the low two bits must be zero (because of the 32-bit alignment of
  /// the target) the operand is effectively a signed 16-bit number.
  ///
  ///
  /// Errors:
  ///   - The result of the unshifted part of the fixup expression must be
  ///     32-bit aligned otherwise an alignment error will be returned.
  ///   - The result of the fixup expression must fit into an int14 otherwise an
  ///     out-of-range error will be returned.
  TestAndBranch14PCRel,

  /// A 19-bit PC-relative conditional branch.
  ///
  /// Represents a PC-relative conditional branch to a target within +/-1Mb. The
  /// target must be 32-bit aligned.
  ///
  /// Fixup expression:
  ///   Fixup <- (Target - Fixup + Addend) >> 2 : int19
  ///
  /// Notes:
  ///   The '19' in the name refers to the number operand bits and follows the
  /// naming convention used by the corresponding ELF relocation.
  /// Since the low two bits must be zero (because of the 32-bit alignment of
  /// the target) the operand is effectively a signed 21-bit number.
  ///
  ///
  /// Errors:
  ///   - The result of the unshifted part of the fixup expression must be
  ///     32-bit aligned otherwise an alignment error will be returned.
  ///   - The result of the fixup expression must fit into an int19 otherwise an
  ///     out-of-range error will be returned.
  CondBranch19PCRel,

  /// A 16-bit slice of the target address (which slice depends on the
  /// instruction at the fixup location).
  ///
  /// Used to fix up MOVK/MOVN/MOVZ instructions.
  ///
  /// Fixup expression:
  ///
  ///   Fixup <- (Target + Addend) >> Shift : uint16
  ///
  ///   where Shift is encoded in the instruction at the fixup location.
  ///
  MoveWide16,

  /// The signed 21-bit delta from the fixup to the target.
  ///
  /// Typically used to load a pointers at a PC-relative offset of +/- 1Mb. The
  /// target must be 32-bit aligned.
  ///
  /// Fixup expression:
  ///
  ///   Fixup <- (Target - Fixup) >> 2 : int19
  ///
  /// Errors:
  ///   - The result of the unshifted part of the fixup expression must be
  ///     32-bit aligned otherwise an alignment error will be returned.
  ///   - The result of the fixup expression must fit into an an int19 or an
  ///     out-of-range error will be returned.
  LDRLiteral19,

  /// The signed 21-bit delta from the fixup to the target.
  ///
  /// Fixup expression:
  ///
  ///   Fixup <- Target - Fixup + Addend : int21
  ///
  /// Notes:
  ///   For ADR fixups.
  ///
  /// Errors:
  ///   - The result of the fixup expression must fit into an int21 otherwise an
  ///     out-of-range error will be returned.
  ADRLiteral21,

  /// The signed 21-bit delta from the fixup page to the page containing the
  /// target.
  ///
  /// Fixup expression:
  ///
  ///   Fixup <- (((Target + Addend) & ~0xfff) - (Fixup & ~0xfff)) >> 12 : int21
  ///
  /// Notes:
  ///   For ADRP fixups.
  ///
  /// Errors:
  ///   - The result of the fixup expression must fit into an int21 otherwise an
  ///     out-of-range error will be returned.
  Page21,

  /// The 12-bit (potentially shifted) offset of the target within its page.
  ///
  /// Typically used to fix up LDR immediates.
  ///
  /// Fixup expression:
  ///
  ///   Fixup <- ((Target + Addend) >> Shift) & 0xfff : uint12
  ///
  ///   where Shift is encoded in the size field of the instruction.
  ///
  /// Errors:
  ///   - The result of the unshifted part of the fixup expression must be
  ///     aligned otherwise an alignment error will be returned.
  ///   - The result of the fixup expression must fit into a uint12 otherwise an
  ///     out-of-range error will be returned.
  PageOffset12,

  /// A GOT entry getter/constructor, transformed to Page21 pointing at the GOT
  /// entry for the original target.
  ///
  /// Indicates that this edge should be transformed into a Page21 targeting
  /// the GOT entry for the edge's current target, maintaining the same addend.
  /// A GOT entry for the target should be created if one does not already
  /// exist.
  ///
  /// Edges of this kind are usually handled by a GOT builder pass inserted by
  /// default.
  ///
  /// Fixup expression:
  ///   NONE
  ///
  /// Errors:
  ///   - *ASSERTION* Failure to handle edges of this kind prior to the fixup
  ///     phase will result in an assert/unreachable during the fixup phase.
  ///
  RequestGOTAndTransformToPage21,

  /// A GOT entry getter/constructor, transformed to Pageoffset12 pointing at
  /// the GOT entry for the original target.
  ///
  /// Indicates that this edge should be transformed into a PageOffset12
  /// targeting the GOT entry for the edge's current target, maintaining the
  /// same addend. A GOT entry for the target should be created if one does not
  /// already exist.
  ///
  /// Edges of this kind are usually handled by a GOT builder pass inserted by
  /// default.
  ///
  /// Fixup expression:
  ///   NONE
  ///
  /// Errors:
  ///   - *ASSERTION* Failure to handle edges of this kind prior to the fixup
  ///     phase will result in an assert/unreachable during the fixup phase.
  ///
  RequestGOTAndTransformToPageOffset12,

  /// A GOT entry getter/constructor, transformed to Delta32 pointing at the GOT
  /// entry for the original target.
  ///
  /// Indicates that this edge should be transformed into a Delta32/ targeting
  /// the GOT entry for the edge's current target, maintaining the same addend.
  /// A GOT entry for the target should be created if one does not already
  /// exist.
  ///
  /// Edges of this kind are usually handled by a GOT builder pass inserted by
  /// default.
  ///
  /// Fixup expression:
  ///   NONE
  ///
  /// Errors:
  ///   - *ASSERTION* Failure to handle edges of this kind prior to the fixup
  ///     phase will result in an assert/unreachable during the fixup phase.
  ///
  RequestGOTAndTransformToDelta32,

  /// A TLVP entry getter/constructor, transformed to Page21.
  ///
  /// Indicates that this edge should be transformed into a Page21 targeting the
  /// TLVP entry for the edge's current target. A TLVP entry for the target
  /// should be created if one does not already exist.
  ///
  /// Fixup expression:
  ///   NONE
  ///
  /// Errors:
  ///   - *ASSERTION* Failure to handle edges of this kind prior to the fixup
  ///     phase will result in an assert/unreachable during the fixup phase.
  ///
  RequestTLVPAndTransformToPage21,

  /// A TLVP entry getter/constructor, transformed to PageOffset12.
  ///
  /// Indicates that this edge should be transformed into a PageOffset12
  /// targeting the TLVP entry for the edge's current target. A TLVP entry for
  /// the target should be created if one does not already exist.
  ///
  /// Fixup expression:
  ///   NONE
  ///
  /// Errors:
  ///   - *ASSERTION* Failure to handle edges of this kind prior to the fixup
  ///     phase will result in an assert/unreachable during the fixup phase.
  ///
  RequestTLVPAndTransformToPageOffset12,

  /// A TLSDesc entry getter/constructor, transformed to Page21.
  ///
  /// Indicates that this edge should be transformed into a Page21 targeting the
  /// TLSDesc entry for the edge's current target. A TLSDesc entry for the
  /// target should be created if one does not already exist.
  ///
  /// Fixup expression:
  ///   NONE
  ///
  /// Errors:
  ///   - *ASSERTION* Failure to handle edges of this kind prior to the fixup
  ///     phase will result in an assert/unreachable during the fixup phase.
  ///
  RequestTLSDescEntryAndTransformToPage21,

  /// A TLSDesc entry getter/constructor, transformed to PageOffset12.
  ///
  /// Indicates that this edge should be transformed into a PageOffset12
  /// targeting the TLSDesc entry for the edge's current target. A TLSDesc entry
  /// for the target should be created if one does not already exist.
  ///
  /// Fixup expression:
  ///   NONE
  ///
  /// Errors:
  ///   - *ASSERTION* Failure to handle edges of this kind prior to the fixup
  ///     phase will result in an assert/unreachable during the fixup phase.
  ///
  RequestTLSDescEntryAndTransformToPageOffset12,
};

/// Returns a string name for the given aarch64 edge. For debugging purposes
/// only
const char *getEdgeKindName(Edge::Kind K);

// Returns whether the Instr is LD/ST (imm12)
inline bool isLoadStoreImm12(uint32_t Instr) {
  constexpr uint32_t LoadStoreImm12Mask = 0x3b000000;
  return (Instr & LoadStoreImm12Mask) == 0x39000000;
}

inline bool isTestAndBranchImm14(uint32_t Instr) {
  constexpr uint32_t TestAndBranchImm14Mask = 0x7e000000;
  return (Instr & TestAndBranchImm14Mask) == 0x36000000;
}

inline bool isCondBranchImm19(uint32_t Instr) {
  constexpr uint32_t CondBranchImm19Mask = 0xfe000000;
  return (Instr & CondBranchImm19Mask) == 0x54000000;
}

inline bool isCompAndBranchImm19(uint32_t Instr) {
  constexpr uint32_t CompAndBranchImm19Mask = 0x7e000000;
  return (Instr & CompAndBranchImm19Mask) == 0x34000000;
}

inline bool isADR(uint32_t Instr) {
  constexpr uint32_t ADRMask = 0x9f000000;
  return (Instr & ADRMask) == 0x10000000;
}

// Returns the amount the address operand of LD/ST (imm12)
// should be shifted right by.
//
// The shift value varies by the data size of LD/ST instruction.
// For instance, LDH instructoin needs the address to be shifted
// right by 1.
inline unsigned getPageOffset12Shift(uint32_t Instr) {
  constexpr uint32_t Vec128Mask = 0x04800000;

  if (isLoadStoreImm12(Instr)) {
    uint32_t ImplicitShift = Instr >> 30;
    if (ImplicitShift == 0)
      if ((Instr & Vec128Mask) == Vec128Mask)
        ImplicitShift = 4;

    return ImplicitShift;
  }

  return 0;
}

// Returns whether the Instr is MOVK/MOVZ (imm16) with a zero immediate field
inline bool isMoveWideImm16(uint32_t Instr) {
  constexpr uint32_t MoveWideImm16Mask = 0x5f9fffe0;
  return (Instr & MoveWideImm16Mask) == 0x52800000;
}

// Returns the amount the address operand of MOVK/MOVZ (imm16)
// should be shifted right by.
//
// The shift value is specfied in the assembly as LSL #<shift>.
inline unsigned getMoveWide16Shift(uint32_t Instr) {
  if (isMoveWideImm16(Instr)) {
    uint32_t ImplicitShift = (Instr >> 21) & 0b11;
    return ImplicitShift << 4;
  }

  return 0;
}

/// Apply fixup expression for edge to block content.
inline Error applyFixup(LinkGraph &G, Block &B, const Edge &E) {
  using namespace support;

  char *BlockWorkingMem = B.getAlreadyMutableContent().data();
  char *FixupPtr = BlockWorkingMem + E.getOffset();
  orc::ExecutorAddr FixupAddress = B.getAddress() + E.getOffset();

  switch (E.getKind()) {
  case Pointer64: {
    uint64_t Value = E.getTarget().getAddress().getValue() + E.getAddend();
    *(ulittle64_t *)FixupPtr = Value;
    break;
  }
  case Pointer32: {
    uint64_t Value = E.getTarget().getAddress().getValue() + E.getAddend();
    if (Value > std::numeric_limits<uint32_t>::max())
      return makeTargetOutOfRangeError(G, B, E);
    *(ulittle32_t *)FixupPtr = Value;
    break;
  }
  case Delta32:
  case Delta64:
  case NegDelta32:
  case NegDelta64: {
    int64_t Value;
    if (E.getKind() == Delta32 || E.getKind() == Delta64)
      Value = E.getTarget().getAddress() - FixupAddress + E.getAddend();
    else
      Value = FixupAddress - E.getTarget().getAddress() + E.getAddend();

    if (E.getKind() == Delta32 || E.getKind() == NegDelta32) {
      if (Value < std::numeric_limits<int32_t>::min() ||
          Value > std::numeric_limits<int32_t>::max())
        return makeTargetOutOfRangeError(G, B, E);
      *(little32_t *)FixupPtr = Value;
    } else
      *(little64_t *)FixupPtr = Value;
    break;
  }
  case Branch26PCRel: {
    assert((FixupAddress.getValue() & 0x3) == 0 &&
           "Branch-inst is not 32-bit aligned");

    int64_t Value = E.getTarget().getAddress() - FixupAddress + E.getAddend();

    if (static_cast<uint64_t>(Value) & 0x3)
      return make_error<JITLinkError>("BranchPCRel26 target is not 32-bit "
                                      "aligned");

    if (Value < -(1 << 27) || Value > ((1 << 27) - 1))
      return makeTargetOutOfRangeError(G, B, E);

    uint32_t RawInstr = *(little32_t *)FixupPtr;
    assert((RawInstr & 0x7fffffff) == 0x14000000 &&
           "RawInstr isn't a B or BR immediate instruction");
    uint32_t Imm = (static_cast<uint32_t>(Value) & ((1 << 28) - 1)) >> 2;
    uint32_t FixedInstr = RawInstr | Imm;
    *(little32_t *)FixupPtr = FixedInstr;
    break;
  }
  case MoveWide16: {
    uint64_t TargetOffset =
        (E.getTarget().getAddress() + E.getAddend()).getValue();

    uint32_t RawInstr = *(ulittle32_t *)FixupPtr;
    assert(isMoveWideImm16(RawInstr) &&
           "RawInstr isn't a MOVK/MOVZ instruction");

    unsigned ImmShift = getMoveWide16Shift(RawInstr);
    uint32_t Imm = (TargetOffset >> ImmShift) & 0xffff;
    uint32_t FixedInstr = RawInstr | (Imm << 5);
    *(ulittle32_t *)FixupPtr = FixedInstr;
    break;
  }
  case LDRLiteral19: {
    assert((FixupAddress.getValue() & 0x3) == 0 && "LDR is not 32-bit aligned");
    assert(E.getAddend() == 0 && "LDRLiteral19 with non-zero addend");
    uint32_t RawInstr = *(ulittle32_t *)FixupPtr;
    assert(RawInstr == 0x58000010 && "RawInstr isn't a 64-bit LDR literal");
    int64_t Delta = E.getTarget().getAddress() - FixupAddress;
    if (Delta & 0x3)
      return make_error<JITLinkError>("LDR literal target is not 32-bit "
                                      "aligned");
    if (Delta < -(1 << 20) || Delta > ((1 << 20) - 1))
      return makeTargetOutOfRangeError(G, B, E);

    uint32_t EncodedImm = ((static_cast<uint32_t>(Delta) >> 2) & 0x7ffff) << 5;
    uint32_t FixedInstr = RawInstr | EncodedImm;
    *(ulittle32_t *)FixupPtr = FixedInstr;
    break;
  }
  case ADRLiteral21: {
    assert((FixupAddress.getValue() & 0x3) == 0 && "ADR is not 32-bit aligned");
    uint32_t RawInstr = *(ulittle32_t *)FixupPtr;
    assert(isADR(RawInstr) && "RawInstr is not an ADR");
    int64_t Delta = E.getTarget().getAddress() + E.getAddend() - FixupAddress;
    if (!isInt<21>(Delta))
      return makeTargetOutOfRangeError(G, B, E);
    auto UDelta = static_cast<uint32_t>(Delta);
    uint32_t EncodedImmHi = ((UDelta >> 2) & 0x7ffff) << 5;
    uint32_t EncodedImmLo = (UDelta & 0x3) << 29;
    uint32_t FixedInstr = RawInstr | EncodedImmHi | EncodedImmLo;
    *(ulittle32_t *)FixupPtr = FixedInstr;
    break;
  }
  case TestAndBranch14PCRel: {
    assert((FixupAddress.getValue() & 0x3) == 0 &&
           "Test and branch is not 32-bit aligned");
    uint32_t RawInstr = *(ulittle32_t *)FixupPtr;
    assert(isTestAndBranchImm14(RawInstr) &&
           "RawInstr is not a test and branch");
    int64_t Delta = E.getTarget().getAddress() + E.getAddend() - FixupAddress;
    if (Delta & 0x3)
      return make_error<JITLinkError>(
          "Test and branch literal target is not 32-bit aligned");
    if (!isInt<16>(Delta))
      return makeTargetOutOfRangeError(G, B, E);
    uint32_t EncodedImm = ((static_cast<uint32_t>(Delta) >> 2) & 0x3fff) << 5;
    uint32_t FixedInstr = RawInstr | EncodedImm;
    *(ulittle32_t *)FixupPtr = FixedInstr;
    break;
  }
  case CondBranch19PCRel: {
    assert((FixupAddress.getValue() & 0x3) == 0 &&
           "Conditional branch is not 32-bit aligned");
    uint32_t RawInstr = *(ulittle32_t *)FixupPtr;
    assert((isCondBranchImm19(RawInstr) || isCompAndBranchImm19(RawInstr)) &&
           "RawInstr is not a conditional branch");
    int64_t Delta = E.getTarget().getAddress() + E.getAddend() - FixupAddress;
    if (Delta & 0x3)
      return make_error<JITLinkError>(
          "Conditional branch literal target is not 32-bit "
          "aligned");
    if (!isInt<21>(Delta))
      return makeTargetOutOfRangeError(G, B, E);
    uint32_t EncodedImm = ((static_cast<uint32_t>(Delta) >> 2) & 0x7ffff) << 5;
    uint32_t FixedInstr = RawInstr | EncodedImm;
    *(ulittle32_t *)FixupPtr = FixedInstr;
    break;
  }
  case Page21: {
    uint64_t TargetPage =
        (E.getTarget().getAddress().getValue() + E.getAddend()) &
        ~static_cast<uint64_t>(4096 - 1);
    uint64_t PCPage =
        FixupAddress.getValue() & ~static_cast<uint64_t>(4096 - 1);

    int64_t PageDelta = TargetPage - PCPage;
    if (!isInt<33>(PageDelta))
      return makeTargetOutOfRangeError(G, B, E);

    uint32_t RawInstr = *(ulittle32_t *)FixupPtr;
    assert((RawInstr & 0xffffffe0) == 0x90000000 &&
           "RawInstr isn't an ADRP instruction");
    uint32_t ImmLo = (static_cast<uint64_t>(PageDelta) >> 12) & 0x3;
    uint32_t ImmHi = (static_cast<uint64_t>(PageDelta) >> 14) & 0x7ffff;
    uint32_t FixedInstr = RawInstr | (ImmLo << 29) | (ImmHi << 5);
    *(ulittle32_t *)FixupPtr = FixedInstr;
    break;
  }
  case PageOffset12: {
    uint64_t TargetOffset =
        (E.getTarget().getAddress() + E.getAddend()).getValue() & 0xfff;

    uint32_t RawInstr = *(ulittle32_t *)FixupPtr;
    unsigned ImmShift = getPageOffset12Shift(RawInstr);

    if (TargetOffset & ((1 << ImmShift) - 1))
      return make_error<JITLinkError>("PAGEOFF12 target is not aligned");

    uint32_t EncodedImm = (TargetOffset >> ImmShift) << 10;
    uint32_t FixedInstr = RawInstr | EncodedImm;
    *(ulittle32_t *)FixupPtr = FixedInstr;
    break;
  }
  default:
    return make_error<JITLinkError>(
        "In graph " + G.getName() + ", section " + B.getSection().getName() +
        " unsupported edge kind " + getEdgeKindName(E.getKind()));
  }

  return Error::success();
}

/// aarch64 pointer size.
constexpr uint64_t PointerSize = 8;

/// AArch64 null pointer content.
extern const char NullPointerContent[PointerSize];

/// AArch64 pointer jump stub content.
///
/// Contains the instruction sequence for an indirect jump via an in-memory
/// pointer:
///   ADRP x16, ptr@page21
///   LDR  x16, [x16, ptr@pageoff12]
///   BR   x16
extern const char PointerJumpStubContent[12];

/// Creates a new pointer block in the given section and returns an
/// Anonymous symobl pointing to it.
///
/// If InitialTarget is given then an Pointer64 relocation will be added to the
/// block pointing at InitialTarget.
///
/// The pointer block will have the following default values:
///   alignment: 64-bit
///   alignment-offset: 0
///   address: highest allowable (~7U)
inline Symbol &createAnonymousPointer(LinkGraph &G, Section &PointerSection,
                                      Symbol *InitialTarget = nullptr,
                                      uint64_t InitialAddend = 0) {
  auto &B = G.createContentBlock(PointerSection, NullPointerContent,
                                 orc::ExecutorAddr(~uint64_t(7)), 8, 0);
  if (InitialTarget)
    B.addEdge(Pointer64, 0, *InitialTarget, InitialAddend);
  return G.addAnonymousSymbol(B, 0, 8, false, false);
}

/// Create a jump stub block that jumps via the pointer at the given symbol.
///
/// The stub block will have the following default values:
///   alignment: 32-bit
///   alignment-offset: 0
///   address: highest allowable: (~11U)
inline Block &createPointerJumpStubBlock(LinkGraph &G, Section &StubSection,
                                         Symbol &PointerSymbol) {
  auto &B = G.createContentBlock(StubSection, PointerJumpStubContent,
                                 orc::ExecutorAddr(~uint64_t(11)), 1, 0);
  B.addEdge(Page21, 0, PointerSymbol, 0);
  B.addEdge(PageOffset12, 4, PointerSymbol, 0);
  return B;
}

/// Create a jump stub that jumps via the pointer at the given symbol and
/// an anonymous symbol pointing to it. Return the anonymous symbol.
///
/// The stub block will be created by createPointerJumpStubBlock.
inline Symbol &createAnonymousPointerJumpStub(LinkGraph &G,
                                              Section &StubSection,
                                              Symbol &PointerSymbol) {
  return G.addAnonymousSymbol(
      createPointerJumpStubBlock(G, StubSection, PointerSymbol), 0,
      sizeof(PointerJumpStubContent), true, false);
}

/// Global Offset Table Builder.
class GOTTableManager : public TableManager<GOTTableManager> {
public:
  static StringRef getSectionName() { return "$__GOT"; }

  bool visitEdge(LinkGraph &G, Block *B, Edge &E) {
    Edge::Kind KindToSet = Edge::Invalid;
    const char *BlockWorkingMem = B->getContent().data();
    const char *FixupPtr = BlockWorkingMem + E.getOffset();

    switch (E.getKind()) {
    case aarch64::RequestGOTAndTransformToPage21:
    case aarch64::RequestTLVPAndTransformToPage21: {
      KindToSet = aarch64::Page21;
      break;
    }
    case aarch64::RequestGOTAndTransformToPageOffset12:
    case aarch64::RequestTLVPAndTransformToPageOffset12: {
      KindToSet = aarch64::PageOffset12;
      uint32_t RawInstr = *(const support::ulittle32_t *)FixupPtr;
      (void)RawInstr;
      assert(E.getAddend() == 0 &&
             "GOTPageOffset12/TLVPageOffset12 with non-zero addend");
      assert((RawInstr & 0xfffffc00) == 0xf9400000 &&
             "RawInstr isn't a 64-bit LDR immediate");
      break;
    }
    case aarch64::RequestGOTAndTransformToDelta32: {
      KindToSet = aarch64::Delta32;
      break;
    }
    default:
      return false;
    }
    assert(KindToSet != Edge::Invalid &&
           "Fell through switch, but no new kind to set");
    DEBUG_WITH_TYPE("jitlink", {
      dbgs() << "  Fixing " << G.getEdgeKindName(E.getKind()) << " edge at "
             << B->getFixupAddress(E) << " (" << B->getAddress() << " + "
             << formatv("{0:x}", E.getOffset()) << ")\n";
    });
    E.setKind(KindToSet);
    E.setTarget(getEntryForTarget(G, E.getTarget()));
    return true;
  }

  Symbol &createEntry(LinkGraph &G, Symbol &Target) {
    return createAnonymousPointer(G, getGOTSection(G), &Target);
  }

private:
  Section &getGOTSection(LinkGraph &G) {
    if (!GOTSection)
      GOTSection = &G.createSection(getSectionName(),
                                    orc::MemProt::Read | orc::MemProt::Exec);
    return *GOTSection;
  }

  Section *GOTSection = nullptr;
};

/// Procedure Linkage Table Builder.
class PLTTableManager : public TableManager<PLTTableManager> {
public:
  PLTTableManager(GOTTableManager &GOT) : GOT(GOT) {}

  static StringRef getSectionName() { return "$__STUBS"; }

  bool visitEdge(LinkGraph &G, Block *B, Edge &E) {
    if (E.getKind() == aarch64::Branch26PCRel && !E.getTarget().isDefined()) {
      DEBUG_WITH_TYPE("jitlink", {
        dbgs() << "  Fixing " << G.getEdgeKindName(E.getKind()) << " edge at "
               << B->getFixupAddress(E) << " (" << B->getAddress() << " + "
               << formatv("{0:x}", E.getOffset()) << ")\n";
      });
      E.setTarget(getEntryForTarget(G, E.getTarget()));
      return true;
    }
    return false;
  }

  Symbol &createEntry(LinkGraph &G, Symbol &Target) {
    return createAnonymousPointerJumpStub(G, getStubsSection(G),
                                          GOT.getEntryForTarget(G, Target));
  }

public:
  Section &getStubsSection(LinkGraph &G) {
    if (!StubsSection)
      StubsSection = &G.createSection(getSectionName(),
                                      orc::MemProt::Read | orc::MemProt::Exec);
    return *StubsSection;
  }

  GOTTableManager &GOT;
  Section *StubsSection = nullptr;
};

} // namespace aarch64
} // namespace jitlink
} // namespace llvm

#endif // LLVM_EXECUTIONENGINE_JITLINK_AARCH64_H
PKiwFZ�ZЄ��%ExecutionEngine/JITLink/ELF_aarch64.hnu�[���//===--- ELF_aarch64.h - JIT link functions for ELF/aarch64 --*- C++ -*----===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
//===----------------------------------------------------------------------===//
//
// jit-link functions for ELF/aarch64.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_JITLINK_ELF_AARCH64_H
#define LLVM_EXECUTIONENGINE_JITLINK_ELF_AARCH64_H

#include "llvm/ExecutionEngine/JITLink/JITLink.h"

namespace llvm {
namespace jitlink {

/// Create a LinkGraph from an ELF/aarch64 relocatable object
///
/// Note: The graph does not take ownership of the underlying buffer, nor copy
/// its contents. The caller is responsible for ensuring that the object buffer
/// outlives the graph.
Expected<std::unique_ptr<LinkGraph>>
createLinkGraphFromELFObject_aarch64(MemoryBufferRef ObjectBuffer);

/// jit-link the given object buffer, which must be a ELF aarch64 relocatable
/// object file.
void link_ELF_aarch64(std::unique_ptr<LinkGraph> G,
                      std::unique_ptr<JITLinkContext> Ctx);

} // end namespace jitlink
} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_JITLINK_ELF_AARCH64_H
PKiwFZ�i
��
�
&ExecutionEngine/JITLink/TableManager.hnu�[���//===---------------------- TableManager.h ----------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Fix edge for edge that needs an entry to reference the target symbol
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_JITLINK_TABLEMANAGER_H
#define LLVM_EXECUTIONENGINE_JITLINK_TABLEMANAGER_H

#include "llvm/ExecutionEngine/JITLink/JITLink.h"
#include "llvm/Support/Debug.h"

namespace llvm {
namespace jitlink {

/// A CRTP base for tables that are built on demand, e.g. Global Offset Tables
/// and Procedure Linkage Tables.
/// The getEntyrForTarget function returns the table entry corresponding to the
/// given target, calling down to the implementation class to build an entry if
/// one does not already exist.
template <typename TableManagerImplT> class TableManager {
public:
  /// Return the constructed entry
  ///
  /// Use parameter G to construct the entry for target symbol
  Symbol &getEntryForTarget(LinkGraph &G, Symbol &Target) {
    assert(Target.hasName() && "Edge cannot point to anonymous target");

    auto EntryI = Entries.find(Target.getName());

    // Build the entry if it doesn't exist.
    if (EntryI == Entries.end()) {
      auto &Entry = impl().createEntry(G, Target);
      DEBUG_WITH_TYPE("jitlink", {
        dbgs() << "    Created " << impl().getSectionName() << " entry for "
               << Target.getName() << ": " << Entry << "\n";
      });
      EntryI = Entries.insert(std::make_pair(Target.getName(), &Entry)).first;
    }

    assert(EntryI != Entries.end() && "Could not get entry symbol");
    DEBUG_WITH_TYPE("jitlink", {
      dbgs() << "    Using " << impl().getSectionName() << " entry "
             << *EntryI->second << "\n";
    });
    return *EntryI->second;
  }

  /// Register a pre-existing entry.
  ///
  /// Objects may include pre-existing table entries (e.g. for GOTs).
  /// This method can be used to register those entries so that they will not
  /// be duplicated by createEntry  the first time that getEntryForTarget is
  /// called.
  bool registerPreExistingEntry(Symbol &Target, Symbol &Entry) {
    assert(Target.hasName() && "Edge cannot point to anonymous target");
    auto Res = Entries.insert({
        Target.getName(),
        &Entry,
    });
    return Res.second;
  }

private:
  TableManagerImplT &impl() { return static_cast<TableManagerImplT &>(*this); }
  DenseMap<StringRef, Symbol *> Entries;
};

} // namespace jitlink
} // namespace llvm

#endif
PKiwFZ@�6I��'ExecutionEngine/JITLink/ELF_loongarch.hnu�[���//===-- ELF_loongarch.h - JIT link functions for ELF/loongarch -*- C++ -*--===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
//===----------------------------------------------------------------------===//
//
// jit-link functions for ELF/loongarch.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_JITLINK_ELF_LOONGARCH_H
#define LLVM_EXECUTIONENGINE_JITLINK_ELF_LOONGARCH_H

#include "llvm/ExecutionEngine/JITLink/JITLink.h"

namespace llvm {
namespace jitlink {

/// Create a LinkGraph from an ELF/loongarch relocatable object
///
/// Note: The graph does not take ownership of the underlying buffer, nor copy
/// its contents. The caller is responsible for ensuring that the object buffer
/// outlives the graph.
Expected<std::unique_ptr<LinkGraph>>
createLinkGraphFromELFObject_loongarch(MemoryBufferRef ObjectBuffer);

/// jit-link the given object buffer, which must be an ELF loongarch object
/// file.
void link_ELF_loongarch(std::unique_ptr<LinkGraph> G,
                        std::unique_ptr<JITLinkContext> Ctx);

} // end namespace jitlink
} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_JITLINK_ELF_LOONGARCH_H
PKiwFZgpd{8{8.ExecutionEngine/JITLink/JITLinkMemoryManager.hnu�[���//===-- JITLinkMemoryManager.h - JITLink mem manager interface --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Contains the JITLinkMemoryManager interface.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_JITLINK_JITLINKMEMORYMANAGER_H
#define LLVM_EXECUTIONENGINE_JITLINK_JITLINKMEMORYMANAGER_H

#include "llvm/ADT/FunctionExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ExecutionEngine/JITLink/JITLinkDylib.h"
#include "llvm/ExecutionEngine/Orc/Shared/AllocationActions.h"
#include "llvm/ExecutionEngine/Orc/Shared/ExecutorAddress.h"
#include "llvm/ExecutionEngine/Orc/Shared/MemoryFlags.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/MSVCErrorWorkarounds.h"
#include "llvm/Support/Memory.h"
#include "llvm/Support/RecyclingAllocator.h"

#include <cstdint>
#include <future>
#include <mutex>

namespace llvm {
namespace jitlink {

class Block;
class LinkGraph;
class Section;

/// Manages allocations of JIT memory.
///
/// Instances of this class may be accessed concurrently from multiple threads
/// and their implemetations should include any necessary synchronization.
class JITLinkMemoryManager {
public:

  /// Represents a finalized allocation.
  ///
  /// Finalized allocations must be passed to the
  /// JITLinkMemoryManager:deallocate method prior to being destroyed.
  ///
  /// The interpretation of the Address associated with the finalized allocation
  /// is up to the memory manager implementation. Common options are using the
  /// base address of the allocation, or the address of a memory management
  /// object that tracks the allocation.
  class FinalizedAlloc {
    friend class JITLinkMemoryManager;

    static constexpr auto InvalidAddr = ~uint64_t(0);

  public:
    FinalizedAlloc() = default;
    explicit FinalizedAlloc(orc::ExecutorAddr A) : A(A) {
      assert(A.getValue() != InvalidAddr &&
             "Explicitly creating an invalid allocation?");
    }
    FinalizedAlloc(const FinalizedAlloc &) = delete;
    FinalizedAlloc(FinalizedAlloc &&Other) : A(Other.A) {
      Other.A.setValue(InvalidAddr);
    }
    FinalizedAlloc &operator=(const FinalizedAlloc &) = delete;
    FinalizedAlloc &operator=(FinalizedAlloc &&Other) {
      assert(A.getValue() == InvalidAddr &&
             "Cannot overwrite active finalized allocation");
      std::swap(A, Other.A);
      return *this;
    }
    ~FinalizedAlloc() {
      assert(A.getValue() == InvalidAddr &&
             "Finalized allocation was not deallocated");
    }

    /// FinalizedAllocs convert to false for default-constructed, and
    /// true otherwise. Default-constructed allocs need not be deallocated.
    explicit operator bool() const { return A.getValue() != InvalidAddr; }

    /// Returns the address associated with this finalized allocation.
    /// The allocation is unmodified.
    orc::ExecutorAddr getAddress() const { return A; }

    /// Returns the address associated with this finalized allocation and
    /// resets this object to the default state.
    /// This should only be used by allocators when deallocating memory.
    orc::ExecutorAddr release() {
      orc::ExecutorAddr Tmp = A;
      A.setValue(InvalidAddr);
      return Tmp;
    }

  private:
    orc::ExecutorAddr A{InvalidAddr};
  };

  /// Represents an allocation which has not been finalized yet.
  ///
  /// InFlightAllocs manage both executor memory allocations and working
  /// memory allocations.
  ///
  /// On finalization, the InFlightAlloc should transfer the content of
  /// working memory into executor memory, apply memory protections, and
  /// run any finalization functions.
  ///
  /// Working memory should be kept alive at least until one of the following
  /// happens: (1) the InFlightAlloc instance is destroyed, (2) the
  /// InFlightAlloc is abandoned, (3) finalized target memory is destroyed.
  ///
  /// If abandon is called then working memory and executor memory should both
  /// be freed.
  class InFlightAlloc {
  public:
    using OnFinalizedFunction = unique_function<void(Expected<FinalizedAlloc>)>;
    using OnAbandonedFunction = unique_function<void(Error)>;

    virtual ~InFlightAlloc();

    /// Called prior to finalization if the allocation should be abandoned.
    virtual void abandon(OnAbandonedFunction OnAbandoned) = 0;

    /// Called to transfer working memory to the target and apply finalization.
    virtual void finalize(OnFinalizedFunction OnFinalized) = 0;

    /// Synchronous convenience version of finalize.
    Expected<FinalizedAlloc> finalize() {
      std::promise<MSVCPExpected<FinalizedAlloc>> FinalizeResultP;
      auto FinalizeResultF = FinalizeResultP.get_future();
      finalize([&](Expected<FinalizedAlloc> Result) {
        FinalizeResultP.set_value(std::move(Result));
      });
      return FinalizeResultF.get();
    }
  };

  /// Typedef for the argument to be passed to OnAllocatedFunction.
  using AllocResult = Expected<std::unique_ptr<InFlightAlloc>>;

  /// Called when allocation has been completed.
  using OnAllocatedFunction = unique_function<void(AllocResult)>;

  /// Called when deallocation has completed.
  using OnDeallocatedFunction = unique_function<void(Error)>;

  virtual ~JITLinkMemoryManager();

  /// Start the allocation process.
  ///
  /// If the initial allocation is successful then the OnAllocated function will
  /// be called with a std::unique_ptr<InFlightAlloc> value. If the assocation
  /// is unsuccessful then the OnAllocated function will be called with an
  /// Error.
  virtual void allocate(const JITLinkDylib *JD, LinkGraph &G,
                        OnAllocatedFunction OnAllocated) = 0;

  /// Convenience function for blocking allocation.
  AllocResult allocate(const JITLinkDylib *JD, LinkGraph &G) {
    std::promise<MSVCPExpected<std::unique_ptr<InFlightAlloc>>> AllocResultP;
    auto AllocResultF = AllocResultP.get_future();
    allocate(JD, G, [&](AllocResult Alloc) {
      AllocResultP.set_value(std::move(Alloc));
    });
    return AllocResultF.get();
  }

  /// Deallocate a list of allocation objects.
  ///
  /// Dealloc actions will be run in reverse order (from the end of the vector
  /// to the start).
  virtual void deallocate(std::vector<FinalizedAlloc> Allocs,
                          OnDeallocatedFunction OnDeallocated) = 0;

  /// Convenience function for deallocation of a single alloc.
  void deallocate(FinalizedAlloc Alloc, OnDeallocatedFunction OnDeallocated) {
    std::vector<FinalizedAlloc> Allocs;
    Allocs.push_back(std::move(Alloc));
    deallocate(std::move(Allocs), std::move(OnDeallocated));
  }

  /// Convenience function for blocking deallocation.
  Error deallocate(std::vector<FinalizedAlloc> Allocs) {
    std::promise<MSVCPError> DeallocResultP;
    auto DeallocResultF = DeallocResultP.get_future();
    deallocate(std::move(Allocs),
               [&](Error Err) { DeallocResultP.set_value(std::move(Err)); });
    return DeallocResultF.get();
  }

  /// Convenience function for blocking deallocation of a single alloc.
  Error deallocate(FinalizedAlloc Alloc) {
    std::vector<FinalizedAlloc> Allocs;
    Allocs.push_back(std::move(Alloc));
    return deallocate(std::move(Allocs));
  }
};

/// BasicLayout simplifies the implementation of JITLinkMemoryManagers.
///
/// BasicLayout groups Sections into Segments based on their memory protection
/// and deallocation policies. JITLinkMemoryManagers can construct a BasicLayout
/// from a Graph, and then assign working memory and addresses to each of the
/// Segments. These addreses will be mapped back onto the Graph blocks in
/// the apply method.
class BasicLayout {
public:
  /// The Alignment, ContentSize and ZeroFillSize of each segment will be
  /// pre-filled from the Graph. Clients must set the Addr and WorkingMem fields
  /// prior to calling apply.
  //
  // FIXME: The C++98 initializer is an attempt to work around compile failures
  // due to http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_defects.html#1397.
  // We should be able to switch this back to member initialization once that
  // issue is fixed.
  class Segment {
    friend class BasicLayout;

  public:
    Segment()
        : ContentSize(0), ZeroFillSize(0), Addr(0), WorkingMem(nullptr),
          NextWorkingMemOffset(0) {}
    Align Alignment;
    size_t ContentSize;
    uint64_t ZeroFillSize;
    orc::ExecutorAddr Addr;
    char *WorkingMem = nullptr;

  private:
    size_t NextWorkingMemOffset;
    std::vector<Block *> ContentBlocks, ZeroFillBlocks;
  };

  /// A convenience class that further groups segments based on memory
  /// deallocation policy. This allows clients to make two slab allocations:
  /// one for all standard segments, and one for all finalize segments.
  struct ContiguousPageBasedLayoutSizes {
    uint64_t StandardSegs = 0;
    uint64_t FinalizeSegs = 0;

    uint64_t total() const { return StandardSegs + FinalizeSegs; }
  };

private:
  using SegmentMap = orc::AllocGroupSmallMap<Segment>;

public:
  BasicLayout(LinkGraph &G);

  /// Return a reference to the graph this allocation was created from.
  LinkGraph &getGraph() { return G; }

  /// Returns the total number of required to allocate all segments (with each
  /// segment padded out to page size) for all standard segments, and all
  /// finalize segments.
  ///
  /// This is a convenience function for the common case where the segments will
  /// be allocated contiguously.
  ///
  /// This function will return an error if any segment has an alignment that
  /// is higher than a page.
  Expected<ContiguousPageBasedLayoutSizes>
  getContiguousPageBasedLayoutSizes(uint64_t PageSize);

  /// Returns an iterator over the segments of the layout.
  iterator_range<SegmentMap::iterator> segments() {
    return {Segments.begin(), Segments.end()};
  }

  /// Apply the layout to the graph.
  Error apply();

  /// Returns a reference to the AllocActions in the graph.
  /// This convenience function saves callers from having to #include
  /// LinkGraph.h if all they need are allocation actions.
  orc::shared::AllocActions &graphAllocActions();

private:
  LinkGraph &G;
  SegmentMap Segments;
};

/// A utility class for making simple allocations using JITLinkMemoryManager.
///
/// SimpleSegementAlloc takes a mapping of AllocGroups to Segments and uses
/// this to create a LinkGraph with one Section (containing one Block) per
/// Segment. Clients can obtain a pointer to the working memory and executor
/// address of that block using the Segment's AllocGroup. Once memory has been
/// populated, clients can call finalize to finalize the memory.
///
/// Note: Segments with MemLifetimePolicy::NoAlloc are not permitted, since
/// they would not be useful, and their presence is likely to indicate a bug.
class SimpleSegmentAlloc {
public:
  /// Describes a segment to be allocated.
  struct Segment {
    Segment() = default;
    Segment(size_t ContentSize, Align ContentAlign)
        : ContentSize(ContentSize), ContentAlign(ContentAlign) {}

    size_t ContentSize = 0;
    Align ContentAlign;
  };

  /// Describes the segment working memory and executor address.
  struct SegmentInfo {
    orc::ExecutorAddr Addr;
    MutableArrayRef<char> WorkingMem;
  };

  using SegmentMap = orc::AllocGroupSmallMap<Segment>;

  using OnCreatedFunction = unique_function<void(Expected<SimpleSegmentAlloc>)>;

  using OnFinalizedFunction =
      JITLinkMemoryManager::InFlightAlloc::OnFinalizedFunction;

  static void Create(JITLinkMemoryManager &MemMgr, const JITLinkDylib *JD,
                     SegmentMap Segments, OnCreatedFunction OnCreated);

  static Expected<SimpleSegmentAlloc> Create(JITLinkMemoryManager &MemMgr,
                                             const JITLinkDylib *JD,
                                             SegmentMap Segments);

  SimpleSegmentAlloc(SimpleSegmentAlloc &&);
  SimpleSegmentAlloc &operator=(SimpleSegmentAlloc &&);
  ~SimpleSegmentAlloc();

  /// Returns the SegmentInfo for the given group.
  SegmentInfo getSegInfo(orc::AllocGroup AG);

  /// Finalize all groups (async version).
  void finalize(OnFinalizedFunction OnFinalized) {
    Alloc->finalize(std::move(OnFinalized));
  }

  /// Finalize all groups.
  Expected<JITLinkMemoryManager::FinalizedAlloc> finalize() {
    return Alloc->finalize();
  }

private:
  SimpleSegmentAlloc(
      std::unique_ptr<LinkGraph> G,
      orc::AllocGroupSmallMap<Block *> ContentBlocks,
      std::unique_ptr<JITLinkMemoryManager::InFlightAlloc> Alloc);

  std::unique_ptr<LinkGraph> G;
  orc::AllocGroupSmallMap<Block *> ContentBlocks;
  std::unique_ptr<JITLinkMemoryManager::InFlightAlloc> Alloc;
};

/// A JITLinkMemoryManager that allocates in-process memory.
class InProcessMemoryManager : public JITLinkMemoryManager {
public:
  class IPInFlightAlloc;

  /// Attempts to auto-detect the host page size.
  static Expected<std::unique_ptr<InProcessMemoryManager>> Create();

  /// Create an instance using the given page size.
  InProcessMemoryManager(uint64_t PageSize) : PageSize(PageSize) {}

  void allocate(const JITLinkDylib *JD, LinkGraph &G,
                OnAllocatedFunction OnAllocated) override;

  // Use overloads from base class.
  using JITLinkMemoryManager::allocate;

  void deallocate(std::vector<FinalizedAlloc> Alloc,
                  OnDeallocatedFunction OnDeallocated) override;

  // Use overloads from base class.
  using JITLinkMemoryManager::deallocate;

private:
  // FIXME: Use an in-place array instead of a vector for DeallocActions.
  //        There shouldn't need to be a heap alloc for this.
  struct FinalizedAllocInfo {
    sys::MemoryBlock StandardSegments;
    std::vector<orc::shared::WrapperFunctionCall> DeallocActions;
  };

  FinalizedAlloc createFinalizedAlloc(
      sys::MemoryBlock StandardSegments,
      std::vector<orc::shared::WrapperFunctionCall> DeallocActions);

  uint64_t PageSize;
  std::mutex FinalizedAllocsMutex;
  RecyclingAllocator<BumpPtrAllocator, FinalizedAllocInfo> FinalizedAllocInfos;
};

} // end namespace jitlink
} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_JITLINK_JITLINKMEMORYMANAGER_H
PKiwFZFO#55ExecutionEngine/JITLink/COFF.hnu�[���//===------- COFF.h - Generic JIT link function for COFF ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Generic jit-link functions for COFF.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_JITLINK_COFF_H
#define LLVM_EXECUTIONENGINE_JITLINK_COFF_H

#include "llvm/ExecutionEngine/JITLink/JITLink.h"

namespace llvm {
namespace jitlink {

/// Create a LinkGraph from an COFF relocatable object.
///
/// Note: The graph does not take ownership of the underlying buffer, nor copy
/// its contents. The caller is responsible for ensuring that the object buffer
/// outlives the graph.
Expected<std::unique_ptr<LinkGraph>>
createLinkGraphFromCOFFObject(MemoryBufferRef ObjectBuffer);

/// Link the given graph.
///
/// Uses conservative defaults for GOT and stub handling based on the target
/// platform.
void link_COFF(std::unique_ptr<LinkGraph> G,
               std::unique_ptr<JITLinkContext> Ctx);

} // end namespace jitlink
} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_JITLINK_COFF_H
PKiwFZ4G�P��#ExecutionEngine/JITLink/ELF_ppc64.hnu�[���//===------ ELF_ppc64.h - JIT link functions for ELF/ppc64 ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// jit-link functions for ELF/ppc64{le}.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_JITLINK_ELF_PPC64_H
#define LLVM_EXECUTIONENGINE_JITLINK_ELF_PPC64_H

#include "llvm/ExecutionEngine/JITLink/JITLink.h"

namespace llvm::jitlink {

/// Create a LinkGraph from an ELF/ppc64 relocatable object.
///
/// Note: The graph does not take ownership of the underlying buffer, nor copy
/// its contents. The caller is responsible for ensuring that the object buffer
/// outlives the graph.
///
/// WARNING: The big-endian backend has not been tested yet.
Expected<std::unique_ptr<LinkGraph>>
createLinkGraphFromELFObject_ppc64(MemoryBufferRef ObjectBuffer);

/// Create a LinkGraph from an ELF/ppc64le relocatable object.
///
/// Note: The graph does not take ownership of the underlying buffer, nor copy
/// its contents. The caller is responsible for ensuring that the object buffer
/// outlives the graph.
Expected<std::unique_ptr<LinkGraph>>
createLinkGraphFromELFObject_ppc64le(MemoryBufferRef ObjectBuffer);

/// jit-link the given object buffer, which must be a ELF ppc64le object file.
///
/// WARNING: The big-endian backend has not been tested yet.
void link_ELF_ppc64(std::unique_ptr<LinkGraph> G,
                    std::unique_ptr<JITLinkContext> Ctx);

/// jit-link the given object buffer, which must be a ELF ppc64le object file.
void link_ELF_ppc64le(std::unique_ptr<LinkGraph> G,
                      std::unique_ptr<JITLinkContext> Ctx);

} // end namespace llvm::jitlink

#endif // LLVM_EXECUTIONENGINE_JITLINK_ELF_PPC64_H
PKiwFZNB��RWRW ExecutionEngine/JITLink/x86_64.hnu�[���//===-- x86_64.h - Generic JITLink x86-64 edge kinds, utilities -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Generic utilities for graphs representing x86-64 objects.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_JITLINK_X86_64_H
#define LLVM_EXECUTIONENGINE_JITLINK_X86_64_H

#include "llvm/ExecutionEngine/JITLink/JITLink.h"
#include "llvm/ExecutionEngine/JITLink/TableManager.h"

namespace llvm {
namespace jitlink {
namespace x86_64 {

/// Represents x86-64 fixups and other x86-64-specific edge kinds.
enum EdgeKind_x86_64 : Edge::Kind {

  /// A plain 64-bit pointer value relocation.
  ///
  /// Fixup expression:
  ///   Fixup <- Target + Addend : uint64
  ///
  Pointer64 = Edge::FirstRelocation,

  /// A plain 32-bit pointer value relocation.
  ///
  /// Fixup expression:
  ///   Fixup <- Target + Addend : uint32
  ///
  /// Errors:
  ///   - The target must reside in the low 32-bits of the address space,
  ///     otherwise an out-of-range error will be returned.
  ///
  Pointer32,

  /// A signed 32-bit pointer value relocation
  ///
  /// Fixup expression:
  ///   Fixup <- Target + Addend : int32
  ///
  /// Errors:
  ///   - The target must reside in the signed 32-bits([-2**31, 2**32 - 1]) of
  ///   the address space, otherwise an out-of-range error will be returned.
  Pointer32Signed,

  /// A plain 16-bit pointer value relocation.
  ///
  /// Fixup expression:
  ///   Fixup <- Target + Addend : uint16
  ///
  /// Errors:
  ///   - The target must reside in the low 16-bits of the address space,
  ///     otherwise an out-of-range error will be returned.
  ///
  Pointer16,

  /// A plain 8-bit pointer value relocation.
  ///
  /// Fixup expression:
  ///   Fixup <- Target + Addend : uint8
  ///
  /// Errors:
  ///   - The target must reside in the low 8-bits of the address space,
  ///     otherwise an out-of-range error will be returned.
  ///
  Pointer8,

  /// A 64-bit delta.
  ///
  /// Delta from the fixup to the target.
  ///
  /// Fixup expression:
  ///   Fixup <- Target - Fixup + Addend : int64
  ///
  Delta64,

  /// A 32-bit delta.
  ///
  /// Delta from the fixup to the target.
  ///
  /// Fixup expression:
  ///   Fixup <- Target - Fixup + Addend : int64
  ///
  /// Errors:
  ///   - The result of the fixup expression must fit into an int32, otherwise
  ///     an out-of-range error will be returned.
  ///
  Delta32,

  /// A 64-bit negative delta.
  ///
  /// Delta from target back to the fixup.
  ///
  /// Fixup expression:
  ///   Fixup <- Fixup - Target + Addend : int64
  ///
  NegDelta64,

  /// A 32-bit negative delta.
  ///
  /// Delta from the target back to the fixup.
  ///
  /// Fixup expression:
  ///   Fixup <- Fixup - Target + Addend : int32
  ///
  /// Errors:
  ///   - The result of the fixup expression must fit into an int32, otherwise
  ///     an out-of-range error will be returned.
  NegDelta32,

  /// A 64-bit GOT delta.
  ///
  /// Delta from the global offset table to the target
  ///
  /// Fixup expression:
  ///   Fixup <- Target - GOTSymbol + Addend : int64
  ///
  /// Errors:
  ///   - *ASSERTION* Failure to a null pointer GOTSymbol, which the GOT section
  ///     symbol was not been defined.
  Delta64FromGOT,

  /// A 32-bit PC-relative branch.
  ///
  /// Represents a PC-relative call or branch to a target. This can be used to
  /// identify, record, and/or patch call sites.
  ///
  /// The fixup expression for this kind includes an implicit offset to account
  /// for the PC (unlike the Delta edges) so that a Branch32PCRel with a target
  /// T and addend zero is a call/branch to the start (offset zero) of T.
  ///
  /// Fixup expression:
  ///   Fixup <- Target - (Fixup + 4) + Addend : int32
  ///
  /// Errors:
  ///   - The result of the fixup expression must fit into an int32, otherwise
  ///     an out-of-range error will be returned.
  ///
  BranchPCRel32,

  /// A 32-bit PC-relative relocation.
  ///
  /// Represents a data/control flow instruction using PC-relative addressing
  /// to a target.
  ///
  /// The fixup expression for this kind includes an implicit offset to account
  /// for the PC (unlike the Delta edges) so that a PCRel32 with a target
  /// T and addend zero is a call/branch to the start (offset zero) of T.
  ///
  /// Fixup expression:
  ///   Fixup <- Target - (Fixup + 4) + Addend : int32
  ///
  /// Errors:
  ///   - The result of the fixup expression must fit into an int32, otherwise
  ///     an out-of-range error will be returned.
  ///
  PCRel32,

  /// A 32-bit PC-relative branch to a pointer jump stub.
  ///
  /// The target of this relocation should be a pointer jump stub of the form:
  ///
  /// \code{.s}
  ///   .text
  ///   jmpq *tgtptr(%rip)
  ///   ; ...
  ///
  ///   .data
  ///   tgtptr:
  ///     .quad 0
  /// \endcode
  ///
  /// This edge kind has the same fixup expression as BranchPCRel32, but further
  /// identifies the call/branch as being to a pointer jump stub. For edges of
  /// this kind the jump stub should not be bypassed (use
  /// BranchPCRel32ToPtrJumpStubBypassable for that), but the pointer location
  /// target may be recorded to allow manipulation at runtime.
  ///
  /// Fixup expression:
  ///   Fixup <- Target - Fixup + Addend - 4 : int32
  ///
  /// Errors:
  ///   - The result of the fixup expression must fit into an int32, otherwise
  ///     an out-of-range error will be returned.
  ///
  BranchPCRel32ToPtrJumpStub,

  /// A relaxable version of BranchPCRel32ToPtrJumpStub.
  ///
  /// The edge kind has the same fixup expression as BranchPCRel32ToPtrJumpStub,
  /// but identifies the call/branch as being to a pointer jump stub that may be
  /// bypassed with a direct jump to the ultimate target if the ultimate target
  /// is within range of the fixup location.
  ///
  /// Fixup expression:
  ///   Fixup <- Target - Fixup + Addend - 4: int32
  ///
  /// Errors:
  ///   - The result of the fixup expression must fit into an int32, otherwise
  ///     an out-of-range error will be returned.
  ///
  BranchPCRel32ToPtrJumpStubBypassable,

  /// A GOT entry getter/constructor, transformed to Delta32 pointing at the GOT
  /// entry for the original target.
  ///
  /// Indicates that this edge should be transformed into a Delta32 targeting
  /// the GOT entry for the edge's current target, maintaining the same addend.
  /// A GOT entry for the target should be created if one does not already
  /// exist.
  ///
  /// Edges of this kind are usually handled by a GOT builder pass inserted by
  /// default.
  ///
  /// Fixup expression:
  ///   NONE
  ///
  /// Errors:
  ///   - *ASSERTION* Failure to handle edges of this kind prior to the fixup
  ///     phase will result in an assert/unreachable during the fixup phase.
  ///
  RequestGOTAndTransformToDelta32,

  /// A GOT entry getter/constructor, transformed to Delta64 pointing at the GOT
  /// entry for the original target.
  ///
  /// Indicates that this edge should be transformed into a Delta64 targeting
  /// the GOT entry for the edge's current target, maintaining the same addend.
  /// A GOT entry for the target should be created if one does not already
  /// exist.
  ///
  /// Edges of this kind are usually handled by a GOT builder pass inserted by
  /// default.
  ///
  /// Fixup expression:
  ///   NONE
  ///
  /// Errors:
  ///   - *ASSERTION* Failure to handle edges of this kind prior to the fixup
  ///     phase will result in an assert/unreachable during the fixup phase.
  ///
  RequestGOTAndTransformToDelta64,

  /// A GOT entry offset within GOT getter/constructor, transformed to
  /// Delta64FromGOT
  /// pointing at the GOT entry for the original target
  ///
  /// Indicates that this edge should be transformed into a Delta64FromGOT
  /// targeting
  /// the GOT entry for the edge's current target, maintaining the same addend.
  /// A GOT entry for the target should be created if one does not already
  /// exist.
  ///
  /// Edges of this kind are usually handled by a GOT builder pass inserted by
  /// default
  ///
  /// Fixup expression:
  ///   NONE
  ///
  /// Errors:
  ///   - *ASSERTION* Failure to handle edges of this kind prior to the fixup
  ///     phase will result in an assert/unreachable during the fixup phase
  RequestGOTAndTransformToDelta64FromGOT,

  /// A PC-relative load of a GOT entry, relaxable if GOT entry target is
  /// in-range of the fixup
  ///
  /// TODO: Explain the optimization
  ///
  /// Fixup expression
  ///   Fixup <- Target - (Fixup + 4) + Addend : int32
  ///
  /// Errors:
  ///   - The result of the fixup expression must fit into an int32, otherwise
  ///     an out-of-range error will be returned.
  //
  PCRel32GOTLoadRelaxable,

  /// A PC-relative REX load of a GOT entry, relaxable if GOT entry target
  /// is in-range of the fixup.
  ///
  /// If the GOT entry target is in-range of the fixup then the load from the
  /// GOT may be replaced with a direct memory address calculation.
  ///
  /// Fixup expression:
  ///   Fixup <- Target - (Fixup + 4) + Addend : int32
  ///
  /// Errors:
  ///   - The result of the fixup expression must fit into an int32, otherwise
  ///     an out-of-range error will be returned.
  ///
  PCRel32GOTLoadREXRelaxable,

  /// A GOT entry getter/constructor, transformed to
  /// PCRel32ToGOTLoadREXRelaxable pointing at the GOT entry for the original
  /// target.
  ///
  /// Indicates that this edge should be lowered to a PC32ToGOTLoadREXRelaxable
  /// targeting the GOT entry for the edge's current target, maintaining the
  /// same addend. A GOT entry for the target should be created if one does not
  /// already exist.
  ///
  /// Edges of this kind are usually lowered by a GOT builder pass inserted by
  /// default.
  ///
  /// Fixup expression:
  ///   NONE
  ///
  /// Errors:
  ///   - *ASSERTION* Failure to handle edges of this kind prior to the fixup
  ///     phase will result in an assert/unreachable during the fixup phase.
  ///
  RequestGOTAndTransformToPCRel32GOTLoadREXRelaxable,

  /// A GOT entry getter/constructor, transformed to
  /// PCRel32ToGOTLoadRelaxable pointing at the GOT entry for the original
  /// target.
  ///
  /// Indicates that this edge should be lowered to a PC32ToGOTLoadRelaxable
  /// targeting the GOT entry for the edge's current target, maintaining the
  /// same addend. A GOT entry for the target should be created if one does not
  /// already exist.
  ///
  /// Edges of this kind are usually lowered by a GOT builder pass inserted by
  /// default.
  ///
  /// Fixup expression:
  ///   NONE
  ///
  /// Errors:
  ///   - *ASSERTION* Failure to handle edges of this kind prior to the fixup
  ///     phase will result in an assert/unreachable during the fixup phase.
  ///
  RequestGOTAndTransformToPCRel32GOTLoadRelaxable,

  /// A PC-relative REX load of a Thread Local Variable Pointer (TLVP) entry,
  /// relaxable if the TLVP entry target is in-range of the fixup.
  ///
  /// If the TLVP entry target is in-range of the fixup then the load from the
  /// TLVP may be replaced with a direct memory address calculation.
  ///
  /// The target of this edge must be a thread local variable entry of the form
  ///   .quad <tlv getter thunk>
  ///   .quad <tlv key>
  ///   .quad <tlv initializer>
  ///
  /// Fixup expression:
  ///   Fixup <- Target - (Fixup + 4) + Addend : int32
  ///
  /// Errors:
  ///   - The result of the fixup expression must fit into an int32, otherwise
  ///     an out-of-range error will be returned.
  ///   - The target must be either external, or a TLV entry of the required
  ///     form, otherwise a malformed TLV entry error will be returned.
  ///
  PCRel32TLVPLoadREXRelaxable,

  /// TODO: Explain the generic edge kind
  RequestTLSDescInGOTAndTransformToDelta32,

  /// A TLVP entry getter/constructor, transformed to
  /// Delta32ToTLVPLoadREXRelaxable.
  ///
  /// Indicates that this edge should be transformed into a
  /// Delta32ToTLVPLoadREXRelaxable targeting the TLVP entry for the edge's
  /// current target. A TLVP entry for the target should be created if one does
  /// not already exist.
  ///
  /// Fixup expression:
  ///   NONE
  ///
  /// Errors:
  ///   - *ASSERTION* Failure to handle edges of this kind prior to the fixup
  ///     phase will result in an assert/unreachable during the fixup phase.
  ///
  RequestTLVPAndTransformToPCRel32TLVPLoadREXRelaxable,
  // First platform specific relocation.
  FirstPlatformRelocation
};

/// Returns a string name for the given x86-64 edge. For debugging purposes
/// only.
const char *getEdgeKindName(Edge::Kind K);

/// Apply fixup expression for edge to block content.
inline Error applyFixup(LinkGraph &G, Block &B, const Edge &E,
                        const Symbol *GOTSymbol) {
  using namespace support;

  char *BlockWorkingMem = B.getAlreadyMutableContent().data();
  char *FixupPtr = BlockWorkingMem + E.getOffset();
  auto FixupAddress = B.getAddress() + E.getOffset();

  switch (E.getKind()) {

  case Pointer64: {
    uint64_t Value = E.getTarget().getAddress().getValue() + E.getAddend();
    *(ulittle64_t *)FixupPtr = Value;
    break;
  }

  case Pointer32: {
    uint64_t Value = E.getTarget().getAddress().getValue() + E.getAddend();
    if (LLVM_LIKELY(isUInt<32>(Value)))
      *(ulittle32_t *)FixupPtr = Value;
    else
      return makeTargetOutOfRangeError(G, B, E);
    break;
  }
  case Pointer32Signed: {
    int64_t Value = E.getTarget().getAddress().getValue() + E.getAddend();
    if (LLVM_LIKELY(isInt<32>(Value)))
      *(little32_t *)FixupPtr = Value;
    else
      return makeTargetOutOfRangeError(G, B, E);
    break;
  }

  case Pointer16: {
    uint64_t Value = E.getTarget().getAddress().getValue() + E.getAddend();
    if (LLVM_LIKELY(isUInt<16>(Value)))
      *(ulittle16_t *)FixupPtr = Value;
    else
      return makeTargetOutOfRangeError(G, B, E);
    break;
  }

  case Pointer8: {
    uint64_t Value = E.getTarget().getAddress().getValue() + E.getAddend();
    if (LLVM_LIKELY(isUInt<8>(Value)))
      *(uint8_t *)FixupPtr = Value;
    else
      return makeTargetOutOfRangeError(G, B, E);
    break;
  }

  case PCRel32:
  case BranchPCRel32:
  case BranchPCRel32ToPtrJumpStub:
  case BranchPCRel32ToPtrJumpStubBypassable:
  case PCRel32GOTLoadRelaxable:
  case PCRel32GOTLoadREXRelaxable:
  case PCRel32TLVPLoadREXRelaxable: {
    int64_t Value =
        E.getTarget().getAddress() - (FixupAddress + 4) + E.getAddend();
    if (LLVM_LIKELY(isInt<32>(Value)))
      *(little32_t *)FixupPtr = Value;
    else
      return makeTargetOutOfRangeError(G, B, E);
    break;
  }

  case Delta64: {
    int64_t Value = E.getTarget().getAddress() - FixupAddress + E.getAddend();
    *(little64_t *)FixupPtr = Value;
    break;
  }

  case Delta32: {
    int64_t Value = E.getTarget().getAddress() - FixupAddress + E.getAddend();
    if (LLVM_LIKELY(isInt<32>(Value)))
      *(little32_t *)FixupPtr = Value;
    else
      return makeTargetOutOfRangeError(G, B, E);
    break;
  }

  case NegDelta64: {
    int64_t Value = FixupAddress - E.getTarget().getAddress() + E.getAddend();
    *(little64_t *)FixupPtr = Value;
    break;
  }

  case NegDelta32: {
    int64_t Value = FixupAddress - E.getTarget().getAddress() + E.getAddend();
    if (LLVM_LIKELY(isInt<32>(Value)))
      *(little32_t *)FixupPtr = Value;
    else
      return makeTargetOutOfRangeError(G, B, E);
    break;
  }
  case Delta64FromGOT: {
    assert(GOTSymbol && "No GOT section symbol");
    int64_t Value =
        E.getTarget().getAddress() - GOTSymbol->getAddress() + E.getAddend();
    *(little64_t *)FixupPtr = Value;
    break;
  }

  default:
    return make_error<JITLinkError>(
        "In graph " + G.getName() + ", section " + B.getSection().getName() +
        " unsupported edge kind " + getEdgeKindName(E.getKind()));
  }

  return Error::success();
}

/// x86_64 pointer size.
constexpr uint64_t PointerSize = 8;

/// x86-64 null pointer content.
extern const char NullPointerContent[PointerSize];

/// x86-64 pointer jump stub content.
///
/// Contains the instruction sequence for an indirect jump via an in-memory
/// pointer:
///   jmpq *ptr(%rip)
extern const char PointerJumpStubContent[6];

/// Creates a new pointer block in the given section and returns an anonymous
/// symbol pointing to it.
///
/// If InitialTarget is given then an Pointer64 relocation will be added to the
/// block pointing at InitialTarget.
///
/// The pointer block will have the following default values:
///   alignment: 64-bit
///   alignment-offset: 0
///   address: highest allowable (~7U)
inline Symbol &createAnonymousPointer(LinkGraph &G, Section &PointerSection,
                                      Symbol *InitialTarget = nullptr,
                                      uint64_t InitialAddend = 0) {
  auto &B = G.createContentBlock(PointerSection, NullPointerContent,
                                 orc::ExecutorAddr(~uint64_t(7)), 8, 0);
  if (InitialTarget)
    B.addEdge(Pointer64, 0, *InitialTarget, InitialAddend);
  return G.addAnonymousSymbol(B, 0, 8, false, false);
}

/// Create a jump stub block that jumps via the pointer at the given symbol.
///
/// The stub block will have the following default values:
///   alignment: 8-bit
///   alignment-offset: 0
///   address: highest allowable: (~5U)
inline Block &createPointerJumpStubBlock(LinkGraph &G, Section &StubSection,
                                         Symbol &PointerSymbol) {
  auto &B = G.createContentBlock(StubSection, PointerJumpStubContent,
                                 orc::ExecutorAddr(~uint64_t(5)), 1, 0);
  B.addEdge(Delta32, 2, PointerSymbol, -4);
  return B;
}

/// Create a jump stub that jumps via the pointer at the given symbol and
/// an anonymous symbol pointing to it. Return the anonymous symbol.
///
/// The stub block will be created by createPointerJumpStubBlock.
inline Symbol &createAnonymousPointerJumpStub(LinkGraph &G,
                                              Section &StubSection,
                                              Symbol &PointerSymbol) {
  return G.addAnonymousSymbol(
      createPointerJumpStubBlock(G, StubSection, PointerSymbol), 0, 6, true,
      false);
}

/// Global Offset Table Builder.
class GOTTableManager : public TableManager<GOTTableManager> {
public:
  static StringRef getSectionName() { return "$__GOT"; }

  bool visitEdge(LinkGraph &G, Block *B, Edge &E) {
    Edge::Kind KindToSet = Edge::Invalid;
    switch (E.getKind()) {
    case x86_64::Delta64FromGOT: {
      // we need to make sure that the GOT section exists, but don't otherwise
      // need to fix up this edge
      getGOTSection(G);
      return false;
    }
    case x86_64::RequestGOTAndTransformToPCRel32GOTLoadREXRelaxable:
      KindToSet = x86_64::PCRel32GOTLoadREXRelaxable;
      break;
    case x86_64::RequestGOTAndTransformToPCRel32GOTLoadRelaxable:
      KindToSet = x86_64::PCRel32GOTLoadRelaxable;
      break;
    case x86_64::RequestGOTAndTransformToDelta64:
      KindToSet = x86_64::Delta64;
      break;
    case x86_64::RequestGOTAndTransformToDelta64FromGOT:
      KindToSet = x86_64::Delta64FromGOT;
      break;
    case x86_64::RequestGOTAndTransformToDelta32:
      KindToSet = x86_64::Delta32;
      break;
    default:
      return false;
    }
    assert(KindToSet != Edge::Invalid &&
           "Fell through switch, but no new kind to set");
    DEBUG_WITH_TYPE("jitlink", {
      dbgs() << "  Fixing " << G.getEdgeKindName(E.getKind()) << " edge at "
             << B->getFixupAddress(E) << " (" << B->getAddress() << " + "
             << formatv("{0:x}", E.getOffset()) << ")\n";
    });
    E.setKind(KindToSet);
    E.setTarget(getEntryForTarget(G, E.getTarget()));
    return true;
  }

  Symbol &createEntry(LinkGraph &G, Symbol &Target) {
    return createAnonymousPointer(G, getGOTSection(G), &Target);
  }

private:
  Section &getGOTSection(LinkGraph &G) {
    if (!GOTSection)
      GOTSection = &G.createSection(getSectionName(), orc::MemProt::Read);
    return *GOTSection;
  }

  Section *GOTSection = nullptr;
};

/// Procedure Linkage Table Builder.
class PLTTableManager : public TableManager<PLTTableManager> {
public:
  PLTTableManager(GOTTableManager &GOT) : GOT(GOT) {}

  static StringRef getSectionName() { return "$__STUBS"; }

  bool visitEdge(LinkGraph &G, Block *B, Edge &E) {
    if (E.getKind() == x86_64::BranchPCRel32 && !E.getTarget().isDefined()) {
      DEBUG_WITH_TYPE("jitlink", {
        dbgs() << "  Fixing " << G.getEdgeKindName(E.getKind()) << " edge at "
               << B->getFixupAddress(E) << " (" << B->getAddress() << " + "
               << formatv("{0:x}", E.getOffset()) << ")\n";
      });
      // Set the edge kind to Branch32ToPtrJumpStubBypassable to enable it to
      // be optimized when the target is in-range.
      E.setKind(x86_64::BranchPCRel32ToPtrJumpStubBypassable);
      E.setTarget(getEntryForTarget(G, E.getTarget()));
      return true;
    }
    return false;
  }

  Symbol &createEntry(LinkGraph &G, Symbol &Target) {
    return createAnonymousPointerJumpStub(G, getStubsSection(G),
                                          GOT.getEntryForTarget(G, Target));
  }

public:
  Section &getStubsSection(LinkGraph &G) {
    if (!PLTSection)
      PLTSection = &G.createSection(getSectionName(),
                                    orc::MemProt::Read | orc::MemProt::Exec);
    return *PLTSection;
  }

  GOTTableManager &GOT;
  Section *PLTSection = nullptr;
};

/// Optimize the GOT and Stub relocations if the edge target address is in range
/// 1. PCRel32GOTLoadRelaxable. For this edge kind, if the target is in range,
/// then replace GOT load with lea
/// 2. BranchPCRel32ToPtrJumpStubRelaxable. For this edge kind, if the target is
/// in range, replace a indirect jump by plt stub with a direct jump to the
/// target
Error optimizeGOTAndStubAccesses(LinkGraph &G);

} // namespace x86_64
} // end namespace jitlink
} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_JITLINK_X86_64_H
PKiwFZ�&�&!ExecutionEngine/JITLink/aarch32.hnu�[���//===------ aarch32.h - Generic JITLink arm/thumb utilities -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Generic utilities for graphs representing arm/thumb objects.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_JITLINK_AARCH32
#define LLVM_EXECUTIONENGINE_JITLINK_AARCH32

#include "TableManager.h"
#include "llvm/ExecutionEngine/JITLink/JITLink.h"
#include "llvm/ExecutionEngine/Orc/Shared/ExecutorAddress.h"
#include "llvm/Support/ARMBuildAttributes.h"
#include "llvm/Support/Error.h"

namespace llvm {
namespace jitlink {
namespace aarch32 {

/// JITLink-internal AArch32 fixup kinds
enum EdgeKind_aarch32 : Edge::Kind {

  ///
  /// Relocations of class Data respect target endianness (unless otherwise
  /// specified)
  ///
  FirstDataRelocation = Edge::FirstRelocation,

  /// Relative 32-bit value relocation
  Data_Delta32 = FirstDataRelocation,

  /// Absolute 32-bit value relocation
  Data_Pointer32,

  LastDataRelocation = Data_Pointer32,

  ///
  /// Relocations of class Arm (covers fixed-width 4-byte instruction subset)
  ///
  FirstArmRelocation,

  /// TODO: Arm_Call is here only as a placeholder for now.
  Arm_Call = FirstArmRelocation,

  LastArmRelocation = Arm_Call,

  ///
  /// Relocations of class Thumb16 and Thumb32 (covers Thumb instruction subset)
  ///
  FirstThumbRelocation,

  /// Write immediate value for PC-relative branch with link (can bridge between
  /// Arm and Thumb).
  Thumb_Call = FirstThumbRelocation,

  /// Write immediate value for (unconditional) PC-relative branch without link.
  Thumb_Jump24,

  /// Write immediate value to the lower halfword of the destination register
  Thumb_MovwAbsNC,

  /// Write immediate value to the top halfword of the destination register
  Thumb_MovtAbs,

  LastThumbRelocation = Thumb_MovtAbs,
};

/// Flags enum for AArch32-specific symbol properties
enum TargetFlags_aarch32 : TargetFlagsType {
  ThumbSymbol = 1 << 0,
};

/// Human-readable name for a given CPU architecture kind
const char *getCPUArchName(ARMBuildAttrs::CPUArch K);

/// Get a human-readable name for the given AArch32 edge kind.
const char *getEdgeKindName(Edge::Kind K);

/// AArch32 uses stubs for a number of purposes, like branch range extension
/// or interworking between Arm and Thumb instruction subsets.
///
/// Stub implementations vary depending on CPU architecture (v4, v6, v7),
/// instruction subset and branch type (absolute/PC-relative).
///
/// For each kind of stub, the StubsFlavor defines one concrete form that is
/// used throughout the LinkGraph.
///
/// Stubs are often called "veneers" in the official docs and online.
///
enum StubsFlavor {
  Unsupported = 0,
  Thumbv7,
};

/// JITLink sub-arch configuration for Arm CPU models
struct ArmConfig {
  bool J1J2BranchEncoding = false;
  StubsFlavor Stubs = Unsupported;
};

/// Obtain the sub-arch configuration for a given Arm CPU model.
inline ArmConfig getArmConfigForCPUArch(ARMBuildAttrs::CPUArch CPUArch) {
  ArmConfig ArmCfg;
  switch (CPUArch) {
  case ARMBuildAttrs::v7:
  case ARMBuildAttrs::v8_A:
    ArmCfg.J1J2BranchEncoding = true;
    ArmCfg.Stubs = Thumbv7;
    break;
  default:
    DEBUG_WITH_TYPE("jitlink", {
      dbgs() << "  Warning: ARM config not defined for CPU architecture "
             << getCPUArchName(CPUArch);
    });
    break;
  }
  return ArmCfg;
}

/// Immutable pair of halfwords, Hi and Lo, with overflow check
struct HalfWords {
  constexpr HalfWords() : Hi(0), Lo(0) {}
  constexpr HalfWords(uint32_t Hi, uint32_t Lo) : Hi(Hi), Lo(Lo) {
    assert(isUInt<16>(Hi) && "Overflow in first half-word");
    assert(isUInt<16>(Lo) && "Overflow in second half-word");
  }
  const uint16_t Hi; // First halfword
  const uint16_t Lo; // Second halfword
};

/// Collection of named constants per fixup kind. It may contain but is not
/// limited to the following entries:
///
///   Opcode      - Values of the op-code bits in the instruction, with
///                 unaffected bits nulled
///   OpcodeMask  - Mask with all bits set that encode the op-code
///   ImmMask     - Mask with all bits set that encode the immediate value
///   RegMask     - Mask with all bits set that encode the register
///
template <EdgeKind_aarch32 Kind> struct FixupInfo {};

template <> struct FixupInfo<Thumb_Jump24> {
  static constexpr HalfWords Opcode{0xf000, 0x8000};
  static constexpr HalfWords OpcodeMask{0xf800, 0x8000};
  static constexpr HalfWords ImmMask{0x07ff, 0x2fff};
  static constexpr uint16_t LoBitConditional = 0x1000;
};

template <> struct FixupInfo<Thumb_Call> {
  static constexpr HalfWords Opcode{0xf000, 0xc000};
  static constexpr HalfWords OpcodeMask{0xf800, 0xc000};
  static constexpr HalfWords ImmMask{0x07ff, 0x2fff};
  static constexpr uint16_t LoBitH = 0x0001;
  static constexpr uint16_t LoBitNoBlx = 0x1000;
};

template <> struct FixupInfo<Thumb_MovtAbs> {
  static constexpr HalfWords Opcode{0xf2c0, 0x0000};
  static constexpr HalfWords OpcodeMask{0xfbf0, 0x8000};
  static constexpr HalfWords ImmMask{0x040f, 0x70ff};
  static constexpr HalfWords RegMask{0x0000, 0x0f00};
};

template <>
struct FixupInfo<Thumb_MovwAbsNC> : public FixupInfo<Thumb_MovtAbs> {
  static constexpr HalfWords Opcode{0xf240, 0x0000};
};

/// Helper function to read the initial addend for Data-class relocations.
Expected<int64_t> readAddendData(LinkGraph &G, Block &B, const Edge &E);

/// Helper function to read the initial addend for Arm-class relocations.
Expected<int64_t> readAddendArm(LinkGraph &G, Block &B, const Edge &E);

/// Helper function to read the initial addend for Thumb-class relocations.
Expected<int64_t> readAddendThumb(LinkGraph &G, Block &B, const Edge &E,
                                  const ArmConfig &ArmCfg);

/// Read the initial addend for a REL-type relocation. It's the value encoded
/// in the immediate field of the fixup location by the compiler.
inline Expected<int64_t> readAddend(LinkGraph &G, Block &B, const Edge &E,
                                    const ArmConfig &ArmCfg) {
  Edge::Kind Kind = E.getKind();
  if (Kind <= LastDataRelocation)
    return readAddendData(G, B, E);

  if (Kind <= LastArmRelocation)
    return readAddendArm(G, B, E);

  if (Kind <= LastThumbRelocation)
    return readAddendThumb(G, B, E, ArmCfg);

  llvm_unreachable("Relocation must be of class Data, Arm or Thumb");
}

/// Helper function to apply the fixup for Data-class relocations.
Error applyFixupData(LinkGraph &G, Block &B, const Edge &E);

/// Helper function to apply the fixup for Arm-class relocations.
Error applyFixupArm(LinkGraph &G, Block &B, const Edge &E);

/// Helper function to apply the fixup for Thumb-class relocations.
Error applyFixupThumb(LinkGraph &G, Block &B, const Edge &E,
                      const ArmConfig &ArmCfg);

/// Apply fixup expression for edge to block content.
inline Error applyFixup(LinkGraph &G, Block &B, const Edge &E,
                        const ArmConfig &ArmCfg) {
  Edge::Kind Kind = E.getKind();

  if (Kind <= LastDataRelocation)
    return applyFixupData(G, B, E);

  if (Kind <= LastArmRelocation)
    return applyFixupArm(G, B, E);

  if (Kind <= LastThumbRelocation)
    return applyFixupThumb(G, B, E, ArmCfg);

  llvm_unreachable("Relocation must be of class Data, Arm or Thumb");
}

/// Stubs builder for a specific StubsFlavor
///
/// Right now we only have one default stub kind, but we want to extend this
/// and allow creation of specific kinds in the future (e.g. branch range
/// extension or interworking).
///
/// Let's keep it simple for the moment and not wire this through a GOT.
///
template <StubsFlavor Flavor>
class StubsManager : public TableManager<StubsManager<Flavor>> {
public:
  StubsManager() = default;

  /// Name of the object file section that will contain all our stubs.
  static StringRef getSectionName() { return "__llvm_jitlink_STUBS"; }

  /// Implements link-graph traversal via visitExistingEdges().
  bool visitEdge(LinkGraph &G, Block *B, Edge &E) {
    if (E.getTarget().isDefined())
      return false;

    switch (E.getKind()) {
    case Thumb_Call:
    case Thumb_Jump24: {
      DEBUG_WITH_TYPE("jitlink", {
        dbgs() << "  Fixing " << G.getEdgeKindName(E.getKind()) << " edge at "
               << B->getFixupAddress(E) << " (" << B->getAddress() << " + "
               << formatv("{0:x}", E.getOffset()) << ")\n";
      });
      E.setTarget(this->getEntryForTarget(G, E.getTarget()));
      return true;
    }
    }
    return false;
  }

  /// Create a branch range extension stub for the class's flavor.
  Symbol &createEntry(LinkGraph &G, Symbol &Target);

private:
  /// Create a new node in the link-graph for the given stub template.
  template <size_t Size>
  Block &addStub(LinkGraph &G, const uint8_t (&Code)[Size],
                 uint64_t Alignment) {
    ArrayRef<char> Template(reinterpret_cast<const char *>(Code), Size);
    return G.createContentBlock(getStubsSection(G), Template,
                                orc::ExecutorAddr(), Alignment, 0);
  }

  /// Get or create the object file section that will contain all our stubs.
  Section &getStubsSection(LinkGraph &G) {
    if (!StubsSection)
      StubsSection = &G.createSection(getSectionName(),
                                      orc::MemProt::Read | orc::MemProt::Exec);
    return *StubsSection;
  }

  Section *StubsSection = nullptr;
};

/// Create a branch range extension stub with Thumb encoding for v7 CPUs.
template <>
Symbol &StubsManager<Thumbv7>::createEntry(LinkGraph &G, Symbol &Target);

} // namespace aarch32
} // namespace jitlink
} // namespace llvm

#endif // LLVM_EXECUTIONENGINE_JITLINK_AARCH32
PKiwFZ>g,8kk%ExecutionEngine/JITLink/ELF_aarch32.hnu�[���//===---- ELF_aarch32.h - JIT link functions for arm/thumb -----*- C++ -*--===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// jit-link functions for ELF/aarch32.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_JITLINK_ELF_AARCH32
#define LLVM_EXECUTIONENGINE_JITLINK_ELF_AARCH32

#include "llvm/ExecutionEngine/JITLink/JITLink.h"
#include "llvm/ExecutionEngine/JITLink/aarch32.h"

namespace llvm {
namespace jitlink {

/// Create a LinkGraph from an ELF/arm relocatable object
///
/// Note: The graph does not take ownership of the underlying buffer, nor copy
/// its contents. The caller is responsible for ensuring that the object buffer
/// outlives the graph.
Expected<std::unique_ptr<LinkGraph>>
createLinkGraphFromELFObject_aarch32(MemoryBufferRef ObjectBuffer);

/// jit-link the given object buffer, which must be an ELF arm/thumb object
/// file.
void link_ELF_aarch32(std::unique_ptr<LinkGraph> G,
                      std::unique_ptr<JITLinkContext> Ctx);

} // end namespace jitlink
} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_JITLINK_ELF_AARCH32
PKiwFZ��iiExecutionEngine/JITLink/MachO.hnu�[���//===------- MachO.h - Generic JIT link function for MachO ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Generic jit-link functions for MachO.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_JITLINK_MACHO_H
#define LLVM_EXECUTIONENGINE_JITLINK_MACHO_H

#include "llvm/ExecutionEngine/JITLink/JITLink.h"

namespace llvm {
namespace jitlink {

/// Create a LinkGraph from a MachO relocatable object.
///
/// Note: The graph does not take ownership of the underlying buffer, nor copy
/// its contents. The caller is responsible for ensuring that the object buffer
/// outlives the graph.
Expected<std::unique_ptr<LinkGraph>>
createLinkGraphFromMachOObject(MemoryBufferRef ObjectBuffer);

/// jit-link the given ObjBuffer, which must be a MachO object file.
///
/// Uses conservative defaults for GOT and stub handling based on the target
/// platform.
void link_MachO(std::unique_ptr<LinkGraph> G,
                std::unique_ptr<JITLinkContext> Ctx);

} // end namespace jitlink
} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_JITLINK_MACHO_H
PKiwFZC���%%&ExecutionEngine/JITLink/MachO_x86_64.hnu�[���//===--- MachO_x86_64.h - JIT link functions for MachO/x86-64 ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// jit-link functions for MachO/x86-64.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_JITLINK_MACHO_X86_64_H
#define LLVM_EXECUTIONENGINE_JITLINK_MACHO_X86_64_H

#include "llvm/ExecutionEngine/JITLink/JITLink.h"

namespace llvm {
namespace jitlink {

/// Create a LinkGraph from a MachO/x86-64 relocatable object.
///
/// Note: The graph does not take ownership of the underlying buffer, nor copy
/// its contents. The caller is responsible for ensuring that the object buffer
/// outlives the graph.
Expected<std::unique_ptr<LinkGraph>>
createLinkGraphFromMachOObject_x86_64(MemoryBufferRef ObjectBuffer);

/// jit-link the given LinkGraph.
///
/// If PrePrunePasses is empty then a default mark-live pass will be inserted
/// that will mark all exported atoms live. If PrePrunePasses is not empty, the
/// caller is responsible for including a pass to mark atoms as live.
///
/// If PostPrunePasses is empty then a default GOT-and-stubs insertion pass will
/// be inserted. If PostPrunePasses is not empty then the caller is responsible
/// for including a pass to insert GOT and stub edges.
void link_MachO_x86_64(std::unique_ptr<LinkGraph> G,
                       std::unique_ptr<JITLinkContext> Ctx);

/// Returns a pass suitable for splitting __eh_frame sections in MachO/x86-64
/// objects.
LinkGraphPassFunction createEHFrameSplitterPass_MachO_x86_64();

/// Returns a pass suitable for fixing missing edges in an __eh_frame section
/// in a MachO/x86-64 object.
LinkGraphPassFunction createEHFrameEdgeFixerPass_MachO_x86_64();

} // end namespace jitlink
} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_JITLINK_MACHO_X86_64_H
PKiwFZ�)���4ExecutionEngine/JITLink/DWARFRecordSectionSplitter.hnu�[���//===--------- DWARFRecordSectionSplitter.h - JITLink -----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_JITLINK_DWARFRECORDSECTIONSPLITTER_H
#define LLVM_EXECUTIONENGINE_JITLINK_DWARFRECORDSECTIONSPLITTER_H

#include "llvm/ExecutionEngine/JITLink/JITLink.h"

namespace llvm {
namespace jitlink {

/// A LinkGraph pass that splits blocks in a section that follows the DWARF
/// Record format into sub-blocks where each header gets its own block.
/// When splitting EHFrames, DWARFRecordSectionSplitter should not be run
/// without EHFrameEdgeFixer, which is responsible for adding FDE-to-CIE edges.
class DWARFRecordSectionSplitter {
public:
  DWARFRecordSectionSplitter(StringRef SectionName);
  Error operator()(LinkGraph &G);

private:
  Error processBlock(LinkGraph &G, Block &B, LinkGraph::SplitBlockCache &Cache);

  StringRef SectionName;
};

} // namespace jitlink
} // namespace llvm

#endif // LLVM_EXECUTIONENGINE_JITLINK_DWARFRECORDSECTIONSPLITTER_H
PKiwFZ%p8p8ExecutionEngine/JITLink/i386.hnu�[���//=== i386.h - Generic JITLink i386 edge kinds, utilities -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Generic utilities for graphs representing i386 objects.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_JITLINK_I386_H
#define LLVM_EXECUTIONENGINE_JITLINK_I386_H

#include "llvm/ExecutionEngine/JITLink/JITLink.h"
#include "llvm/ExecutionEngine/JITLink/TableManager.h"

namespace llvm::jitlink::i386 {
/// Represets i386 fixups
enum EdgeKind_i386 : Edge::Kind {

  /// None
  None = Edge::FirstRelocation,

  /// A plain 32-bit pointer value relocation.
  ///
  /// Fixup expression:
  ///   Fixup <- Target + Addend : uint32
  ///
  /// Errors:
  ///   - The target must reside in the low 32-bits of the address space,
  ///     otherwise an out-of-range error will be returned.
  ///
  Pointer32,

  /// A 32-bit PC-relative relocation.
  ///
  /// Represents a data/control flow instruction using PC-relative addressing
  /// to a target.
  ///
  /// The fixup expression for this kind includes an implicit offset to account
  /// for the PC (unlike the Delta edges) so that a PCRel32 with a target
  /// T and addend zero is a call/branch to the start (offset zero) of T.
  ///
  /// Fixup expression:
  ///   Fixup <- Target - (Fixup + 4) + Addend : int32
  ///
  /// Errors:
  ///   - The result of the fixup expression must fit into an int32, otherwise
  ///     an out-of-range error will be returned.
  ///
  PCRel32,

  /// A plain 16-bit pointer value relocation.
  ///
  /// Fixup expression:
  ///   Fixup <- Target + Addend : uint16
  ///
  /// Errors:
  ///   - The target must reside in the low 16-bits of the address space,
  ///     otherwise an out-of-range error will be returned.
  ///
  Pointer16,

  /// A 16-bit PC-relative relocation.
  ///
  /// Represents a data/control flow instruction using PC-relative addressing
  /// to a target.
  ///
  /// The fixup expression for this kind includes an implicit offset to account
  /// for the PC (unlike the Delta edges) so that a PCRel16 with a target
  /// T and addend zero is a call/branch to the start (offset zero) of T.
  ///
  /// Fixup expression:
  ///   Fixup <- Target - (Fixup + 4) + Addend : int16
  ///
  /// Errors:
  ///   - The result of the fixup expression must fit into an int16, otherwise
  ///     an out-of-range error will be returned.
  ///
  PCRel16,

  /// A 32-bit delta.
  ///
  /// Delta from the fixup to the target.
  ///
  /// Fixup expression:
  ///   Fixup <- Target - Fixup + Addend : int64
  ///
  /// Errors:
  ///   - The result of the fixup expression must fit into an int32, otherwise
  ///     an out-of-range error will be returned.
  Delta32,

  /// A 32-bit GOT delta.
  ///
  /// Delta from the global offset table to the target.
  ///
  /// Fixup expression:
  ///   Fixup <- Target - GOTSymbol + Addend : int32
  ///
  /// Errors:
  ///   - *ASSERTION* Failure to a null pointer GOTSymbol, which the GOT section
  ///     symbol was not been defined.
  Delta32FromGOT,

  /// A GOT entry offset within GOT getter/constructor, transformed to
  /// Delta32FromGOT pointing at the GOT entry for the original target.
  ///
  /// Indicates that this edge should be transformed into a Delta32FromGOT
  /// targeting the GOT entry for the edge's current target, maintaining the
  /// same addend.
  /// A GOT entry for the target should be created if one does not already
  /// exist.
  ///
  /// Edges of this kind are usually handled by a GOT builder pass inserted by
  /// default
  ///
  /// Fixup expression:
  ///   NONE
  ///
  /// Errors:
  ///   - *ASSERTION* Failure to handle edges of this kind prior to the fixup
  ///     phase will result in an assert/unreachable during the fixup phase
  RequestGOTAndTransformToDelta32FromGOT,

  /// A 32-bit PC-relative branch.
  ///
  /// Represents a PC-relative call or branch to a target. This can be used to
  /// identify, record, and/or patch call sites.
  ///
  /// The fixup expression for this kind includes an implicit offset to account
  /// for the PC (unlike the Delta edges) so that a Branch32PCRel with a target
  /// T and addend zero is a call/branch to the start (offset zero) of T.
  ///
  /// Fixup expression:
  ///   Fixup <- Target - (Fixup + 4) + Addend : int32
  ///
  /// Errors:
  ///   - The result of the fixup expression must fit into an int32, otherwise
  ///     an out-of-range error will be returned.
  ///
  BranchPCRel32,

  /// A 32-bit PC-relative branch to a pointer jump stub.
  ///
  /// The target of this relocation should be a pointer jump stub of the form:
  ///
  /// \code{.s}
  ///   .text
  ///   jmp *tgtptr
  ///   ; ...
  ///
  ///   .data
  ///   tgtptr:
  ///     .quad 0
  /// \endcode
  ///
  /// This edge kind has the same fixup expression as BranchPCRel32, but further
  /// identifies the call/branch as being to a pointer jump stub. For edges of
  /// this kind the jump stub should not be bypassed (use
  /// BranchPCRel32ToPtrJumpStubBypassable for that), but the pointer location
  /// target may be recorded to allow manipulation at runtime.
  ///
  /// Fixup expression:
  ///   Fixup <- Target - Fixup + Addend - 4 : int32
  ///
  /// Errors:
  ///   - The result of the fixup expression must fit into an int32, otherwise
  ///     an out-of-range error will be returned.
  ///
  BranchPCRel32ToPtrJumpStub,

  /// A relaxable version of BranchPCRel32ToPtrJumpStub.
  ///
  /// The edge kind has the same fixup expression as BranchPCRel32ToPtrJumpStub,
  /// but identifies the call/branch as being to a pointer jump stub that may be
  /// bypassed with a direct jump to the ultimate target if the ultimate target
  /// is within range of the fixup location.
  ///
  /// Fixup expression:
  ///   Fixup <- Target - Fixup + Addend - 4: int32
  ///
  /// Errors:
  ///   - The result of the fixup expression must fit into an int32, otherwise
  ///     an out-of-range error will be returned.
  ///
  BranchPCRel32ToPtrJumpStubBypassable,
};

/// Returns a string name for the given i386 edge. For debugging purposes
/// only
const char *getEdgeKindName(Edge::Kind K);

/// Apply fixup expression for edge to block content.
inline Error applyFixup(LinkGraph &G, Block &B, const Edge &E,
                        const Symbol *GOTSymbol) {
  using namespace i386;
  using namespace llvm::support;

  char *BlockWorkingMem = B.getAlreadyMutableContent().data();
  char *FixupPtr = BlockWorkingMem + E.getOffset();
  auto FixupAddress = B.getAddress() + E.getOffset();

  switch (E.getKind()) {
  case i386::None: {
    break;
  }

  case i386::Pointer32: {
    uint32_t Value = E.getTarget().getAddress().getValue() + E.getAddend();
    *(ulittle32_t *)FixupPtr = Value;
    break;
  }

  case i386::PCRel32: {
    int32_t Value =
        E.getTarget().getAddress() - (FixupAddress + 4) + E.getAddend();
    *(little32_t *)FixupPtr = Value;
    break;
  }

  case i386::Pointer16: {
    uint32_t Value = E.getTarget().getAddress().getValue() + E.getAddend();
    if (LLVM_LIKELY(isUInt<16>(Value)))
      *(ulittle16_t *)FixupPtr = Value;
    else
      return makeTargetOutOfRangeError(G, B, E);
    break;
  }

  case i386::PCRel16: {
    int32_t Value =
        E.getTarget().getAddress() - (FixupAddress + 4) + E.getAddend();
    if (LLVM_LIKELY(isInt<16>(Value)))
      *(little16_t *)FixupPtr = Value;
    else
      return makeTargetOutOfRangeError(G, B, E);
    break;
  }

  case i386::Delta32: {
    int32_t Value = E.getTarget().getAddress() - FixupAddress + E.getAddend();
    *(little32_t *)FixupPtr = Value;
    break;
  }

  case i386::Delta32FromGOT: {
    assert(GOTSymbol && "No GOT section symbol");
    int32_t Value =
        E.getTarget().getAddress() - GOTSymbol->getAddress() + E.getAddend();
    *(little32_t *)FixupPtr = Value;
    break;
  }

  case i386::BranchPCRel32:
  case i386::BranchPCRel32ToPtrJumpStub:
  case i386::BranchPCRel32ToPtrJumpStubBypassable: {
    int32_t Value =
        E.getTarget().getAddress() - (FixupAddress + 4) + E.getAddend();
    *(little32_t *)FixupPtr = Value;
    break;
  }

  default:
    return make_error<JITLinkError>(
        "In graph " + G.getName() + ", section " + B.getSection().getName() +
        " unsupported edge kind " + getEdgeKindName(E.getKind()));
  }

  return Error::success();
}

/// i386 pointer size.
constexpr uint32_t PointerSize = 4;

/// i386 null pointer content.
extern const char NullPointerContent[PointerSize];

/// i386 pointer jump stub content.
///
/// Contains the instruction sequence for an indirect jump via an in-memory
/// pointer:
///   jmpq *ptr
extern const char PointerJumpStubContent[6];

/// Creates a new pointer block in the given section and returns an anonymous
/// symbol pointing to it.
///
/// If InitialTarget is given then an Pointer32 relocation will be added to the
/// block pointing at InitialTarget.
///
/// The pointer block will have the following default values:
///   alignment: 32-bit
///   alignment-offset: 0
///   address: highest allowable (~7U)
inline Symbol &createAnonymousPointer(LinkGraph &G, Section &PointerSection,
                                      Symbol *InitialTarget = nullptr,
                                      uint64_t InitialAddend = 0) {
  auto &B = G.createContentBlock(PointerSection, NullPointerContent,
                                 orc::ExecutorAddr(), 8, 0);
  if (InitialTarget)
    B.addEdge(Pointer32, 0, *InitialTarget, InitialAddend);
  return G.addAnonymousSymbol(B, 0, PointerSize, false, false);
}

/// Create a jump stub block that jumps via the pointer at the given symbol.
///
/// The stub block will have the following default values:
///   alignment: 8-bit
///   alignment-offset: 0
///   address: highest allowable: (~5U)
inline Block &createPointerJumpStubBlock(LinkGraph &G, Section &StubSection,
                                         Symbol &PointerSymbol) {
  auto &B = G.createContentBlock(StubSection, PointerJumpStubContent,
                                 orc::ExecutorAddr(), 8, 0);
  B.addEdge(Pointer32,
            // Offset is 2 because the the first 2 bytes of the
            // jump stub block are {0xff, 0x25} -- an indirect absolute
            // jump.
            2, PointerSymbol, 0);
  return B;
}

/// Create a jump stub that jumps via the pointer at the given symbol and
/// an anonymous symbol pointing to it. Return the anonymous symbol.
///
/// The stub block will be created by createPointerJumpStubBlock.
inline Symbol &createAnonymousPointerJumpStub(LinkGraph &G,
                                              Section &StubSection,
                                              Symbol &PointerSymbol) {
  return G.addAnonymousSymbol(
      createPointerJumpStubBlock(G, StubSection, PointerSymbol), 0, 6, true,
      false);
}

/// Global Offset Table Builder.
class GOTTableManager : public TableManager<GOTTableManager> {
public:
  static StringRef getSectionName() { return "$__GOT"; }

  bool visitEdge(LinkGraph &G, Block *B, Edge &E) {
    Edge::Kind KindToSet = Edge::Invalid;
    switch (E.getKind()) {
    case i386::Delta32FromGOT: {
      // we need to make sure that the GOT section exists, but don't otherwise
      // need to fix up this edge
      getGOTSection(G);
      return false;
    }
    case i386::RequestGOTAndTransformToDelta32FromGOT:
      KindToSet = i386::Delta32FromGOT;
      break;
    default:
      return false;
    }
    assert(KindToSet != Edge::Invalid &&
           "Fell through switch, but no new kind to set");
    DEBUG_WITH_TYPE("jitlink", {
      dbgs() << "  Fixing " << G.getEdgeKindName(E.getKind()) << " edge at "
             << B->getFixupAddress(E) << " (" << B->getAddress() << " + "
             << formatv("{0:x}", E.getOffset()) << ")\n";
    });
    E.setKind(KindToSet);
    E.setTarget(getEntryForTarget(G, E.getTarget()));
    return true;
  }

  Symbol &createEntry(LinkGraph &G, Symbol &Target) {
    return createAnonymousPointer(G, getGOTSection(G), &Target);
  }

private:
  Section &getGOTSection(LinkGraph &G) {
    if (!GOTSection)
      GOTSection = &G.createSection(getSectionName(), orc::MemProt::Read);
    return *GOTSection;
  }

  Section *GOTSection = nullptr;
};

/// Procedure Linkage Table Builder.
class PLTTableManager : public TableManager<PLTTableManager> {
public:
  PLTTableManager(GOTTableManager &GOT) : GOT(GOT) {}

  static StringRef getSectionName() { return "$__STUBS"; }

  bool visitEdge(LinkGraph &G, Block *B, Edge &E) {
    if (E.getKind() == i386::BranchPCRel32 && !E.getTarget().isDefined()) {
      DEBUG_WITH_TYPE("jitlink", {
        dbgs() << "  Fixing " << G.getEdgeKindName(E.getKind()) << " edge at "
               << B->getFixupAddress(E) << " (" << B->getAddress() << " + "
               << formatv("{0:x}", E.getOffset()) << ")\n";
      });
      // Set the edge kind to Branch32ToPtrJumpStubBypassable to enable it to
      // be optimized when the target is in-range.
      E.setKind(i386::BranchPCRel32ToPtrJumpStubBypassable);
      E.setTarget(getEntryForTarget(G, E.getTarget()));
      return true;
    }
    return false;
  }

  Symbol &createEntry(LinkGraph &G, Symbol &Target) {
    return createAnonymousPointerJumpStub(G, getStubsSection(G),
                                          GOT.getEntryForTarget(G, Target));
  }

public:
  Section &getStubsSection(LinkGraph &G) {
    if (!PLTSection)
      PLTSection = &G.createSection(getSectionName(),
                                    orc::MemProt::Read | orc::MemProt::Exec);
    return *PLTSection;
  }

  GOTTableManager &GOT;
  Section *PLTSection = nullptr;
};

/// Optimize the GOT and Stub relocations if the edge target address is in range
/// 1. PCRel32GOTLoadRelaxable. For this edge kind, if the target is in range,
/// then replace GOT load with lea. (THIS IS UNIMPLEMENTED RIGHT NOW!)
/// 2. BranchPCRel32ToPtrJumpStubRelaxable. For this edge kind, if the target is
/// in range, replace a indirect jump by plt stub with a direct jump to the
/// target
Error optimizeGOTAndStubAccesses(LinkGraph &G);

} // namespace llvm::jitlink::i386

#endif // LLVM_EXECUTIONENGINE_JITLINK_I386_H
PKiwFZ �4?��&ExecutionEngine/JITLink/JITLinkDylib.hnu�[���//===-- JITLinkDylib.h - JITLink Dylib type ---------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Defines the JITLinkDylib API.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_JITLINK_JITLINKDYLIB_H
#define LLVM_EXECUTIONENGINE_JITLINK_JITLINKDYLIB_H

#include <string>

namespace llvm {
namespace jitlink {

class JITLinkDylib {
public:
  JITLinkDylib(std::string Name) : Name(std::move(Name)) {}

  /// Get the name for this JITLinkDylib.
  const std::string &getName() const { return Name; }

private:
  std::string Name;
};

} // end namespace jitlink
} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_JITLINK_JITLINKDYLIB_H
PKiwFZuwbh00$ExecutionEngine/JITLink/ELF_x86_64.hnu�[���//===--- ELF_x86_64.h - JIT link functions for ELF/x86-64 ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// jit-link functions for ELF/x86-64.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_JITLINK_ELF_X86_64_H
#define LLVM_EXECUTIONENGINE_JITLINK_ELF_X86_64_H

#include "llvm/ExecutionEngine/JITLink/JITLink.h"

namespace llvm {
namespace jitlink {

/// Create a LinkGraph from an ELF/x86-64 relocatable object.
///
/// Note: The graph does not take ownership of the underlying buffer, nor copy
/// its contents. The caller is responsible for ensuring that the object buffer
/// outlives the graph.
Expected<std::unique_ptr<LinkGraph>>
createLinkGraphFromELFObject_x86_64(MemoryBufferRef ObjectBuffer);

/// jit-link the given object buffer, which must be a ELF x86-64 object file.
void link_ELF_x86_64(std::unique_ptr<LinkGraph> G,
                     std::unique_ptr<JITLinkContext> Ctx);

} // end namespace jitlink
} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_JITLINK_ELF_X86_64_H
PKiwFZ�|U�(ExecutionEngine/JITLink/EHFrameSupport.hnu�[���//===--------- EHFrameSupport.h - JITLink eh-frame utils --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// EHFrame registration support for JITLink.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_JITLINK_EHFRAMESUPPORT_H
#define LLVM_EXECUTIONENGINE_JITLINK_EHFRAMESUPPORT_H

#include "llvm/ExecutionEngine/JITLink/JITLink.h"
#include "llvm/ExecutionEngine/JITSymbol.h"
#include "llvm/Support/Error.h"
#include "llvm/TargetParser/Triple.h"

namespace llvm {
namespace jitlink {

/// Inspect an eh-frame CFI record.
class EHFrameCFIBlockInspector {
public:
  /// Identify CFI record type and edges based on number and order of edges
  /// in the given block only. This assumes that the block contains one CFI
  /// record that has already been split out and fixed by the
  /// DWARFRecordSplitter and EHFrameEdgeFixer passes.
  ///
  /// Zero or one outgoing edges: Record is CIE. If present, edge points to
  /// personality.
  ///
  /// Two or three outgoing edges: Record is an FDE. First edge points to CIE,
  /// second to PC-begin, third (if present) to LSDA.
  ///
  /// It is illegal to call this function on a block with four or more edges.
  static EHFrameCFIBlockInspector FromEdgeScan(Block &B);

  /// Returns true if this frame is an FDE, false for a CIE.
  bool isFDE() const { return CIEEdge != nullptr; }

  /// Returns true if this frame is a CIE, false for an FDE.
  bool isCIE() const { return CIEEdge == nullptr; }

  /// If this is a CIE record, returns the Edge pointing at the personality
  /// function, if any.
  /// It is illegal to call this method on FDE records.
  Edge *getPersonalityEdge() const {
    assert(isCIE() && "CFI record is not a CIE");
    return PersonalityEdge;
  }

  /// If this is an FDE record, returns the Edge pointing to the CIE.
  /// If this is a CIE record, returns null.
  ///
  /// The result is not valid if any modification has been made to the block
  /// after parsing.
  Edge *getCIEEdge() const { return CIEEdge; }

  /// If this is an FDE record, returns the Edge pointing at the PC-begin
  /// symbol.
  /// If this a CIE record, returns null.
  Edge *getPCBeginEdge() const { return PCBeginEdge; }

  /// If this is an FDE record, returns the Edge pointing at the LSDA, if any.
  /// It is illegal to call this method on CIE records.
  Edge *getLSDAEdge() const {
    assert(isFDE() && "CFI record is not an FDE");
    return LSDAEdge;
  }

private:
  EHFrameCFIBlockInspector(Edge *PersonalityEdge);
  EHFrameCFIBlockInspector(Edge &CIEEdge, Edge &PCBeginEdge, Edge *LSDAEdge);

  Edge *CIEEdge = nullptr;
  Edge *PCBeginEdge = nullptr;
  union {
    Edge *PersonalityEdge;
    Edge *LSDAEdge;
  };
};

/// Supports registration/deregistration of EH-frames in a target process.
class EHFrameRegistrar {
public:
  virtual ~EHFrameRegistrar();
  virtual Error registerEHFrames(orc::ExecutorAddrRange EHFrameSection) = 0;
  virtual Error deregisterEHFrames(orc::ExecutorAddrRange EHFrameSection) = 0;
};

/// Registers / Deregisters EH-frames in the current process.
class InProcessEHFrameRegistrar final : public EHFrameRegistrar {
public:
  Error registerEHFrames(orc::ExecutorAddrRange EHFrameSection) override;

  Error deregisterEHFrames(orc::ExecutorAddrRange EHFrameSection) override;
};

using StoreFrameRangeFunction = std::function<void(
    orc::ExecutorAddr EHFrameSectionAddr, size_t EHFrameSectionSize)>;

/// Creates a pass that records the address and size of the EH frame section.
/// If no eh-frame section is found then the address and size will both be given
/// as zero.
///
/// Authors of JITLinkContexts can use this function to register a post-fixup
/// pass that records the range of the eh-frame section. This range can
/// be used after finalization to register and deregister the frame.
LinkGraphPassFunction
createEHFrameRecorderPass(const Triple &TT,
                          StoreFrameRangeFunction StoreFrameRange);

} // end namespace jitlink
} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_JITLINK_EHFRAMESUPPORT_H
PKiwFZ�}�++ExecutionEngine/JITLink/ELF.hnu�[���//===------- ELF.h - Generic JIT link function for ELF ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Generic jit-link functions for ELF.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_JITLINK_ELF_H
#define LLVM_EXECUTIONENGINE_JITLINK_ELF_H

#include "llvm/ExecutionEngine/JITLink/JITLink.h"

namespace llvm {
namespace jitlink {

/// Create a LinkGraph from an ELF relocatable object.
///
/// Note: The graph does not take ownership of the underlying buffer, nor copy
/// its contents. The caller is responsible for ensuring that the object buffer
/// outlives the graph.
Expected<std::unique_ptr<LinkGraph>>
createLinkGraphFromELFObject(MemoryBufferRef ObjectBuffer);

/// Link the given graph.
///
/// Uses conservative defaults for GOT and stub handling based on the target
/// platform.
void link_ELF(std::unique_ptr<LinkGraph> G,
              std::unique_ptr<JITLinkContext> Ctx);

} // end namespace jitlink
} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_JITLINK_ELF_H
PKiwFZ˲h���!ExecutionEngine/JITLink/JITLink.hnu�[���//===------------ JITLink.h - JIT linker functionality ----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Contains generic JIT-linker types.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_JITLINK_JITLINK_H
#define LLVM_EXECUTIONENGINE_JITLINK_JITLINK_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ExecutionEngine/JITLink/JITLinkMemoryManager.h"
#include "llvm/ExecutionEngine/JITSymbol.h"
#include "llvm/ExecutionEngine/Orc/Shared/ExecutorAddress.h"
#include "llvm/ExecutionEngine/Orc/Shared/ExecutorSymbolDef.h"
#include "llvm/ExecutionEngine/Orc/Shared/MemoryFlags.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/BinaryStreamReader.h"
#include "llvm/Support/BinaryStreamWriter.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/FormatVariadic.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/TargetParser/SubtargetFeature.h"
#include "llvm/TargetParser/Triple.h"
#include <optional>

#include <map>
#include <string>
#include <system_error>

namespace llvm {
namespace jitlink {

class LinkGraph;
class Symbol;
class Section;

/// Base class for errors originating in JIT linker, e.g. missing relocation
/// support.
class JITLinkError : public ErrorInfo<JITLinkError> {
public:
  static char ID;

  JITLinkError(Twine ErrMsg) : ErrMsg(ErrMsg.str()) {}

  void log(raw_ostream &OS) const override;
  const std::string &getErrorMessage() const { return ErrMsg; }
  std::error_code convertToErrorCode() const override;

private:
  std::string ErrMsg;
};

/// Represents fixups and constraints in the LinkGraph.
class Edge {
public:
  using Kind = uint8_t;

  enum GenericEdgeKind : Kind {
    Invalid,                    // Invalid edge value.
    FirstKeepAlive,             // Keeps target alive. Offset/addend zero.
    KeepAlive = FirstKeepAlive, // Tag first edge kind that preserves liveness.
    FirstRelocation             // First architecture specific relocation.
  };

  using OffsetT = uint32_t;
  using AddendT = int64_t;

  Edge(Kind K, OffsetT Offset, Symbol &Target, AddendT Addend)
      : Target(&Target), Offset(Offset), Addend(Addend), K(K) {}

  OffsetT getOffset() const { return Offset; }
  void setOffset(OffsetT Offset) { this->Offset = Offset; }
  Kind getKind() const { return K; }
  void setKind(Kind K) { this->K = K; }
  bool isRelocation() const { return K >= FirstRelocation; }
  Kind getRelocation() const {
    assert(isRelocation() && "Not a relocation edge");
    return K - FirstRelocation;
  }
  bool isKeepAlive() const { return K >= FirstKeepAlive; }
  Symbol &getTarget() const { return *Target; }
  void setTarget(Symbol &Target) { this->Target = &Target; }
  AddendT getAddend() const { return Addend; }
  void setAddend(AddendT Addend) { this->Addend = Addend; }

private:
  Symbol *Target = nullptr;
  OffsetT Offset = 0;
  AddendT Addend = 0;
  Kind K = 0;
};

/// Returns the string name of the given generic edge kind, or "unknown"
/// otherwise. Useful for debugging.
const char *getGenericEdgeKindName(Edge::Kind K);

/// Base class for Addressable entities (externals, absolutes, blocks).
class Addressable {
  friend class LinkGraph;

protected:
  Addressable(orc::ExecutorAddr Address, bool IsDefined)
      : Address(Address), IsDefined(IsDefined), IsAbsolute(false) {}

  Addressable(orc::ExecutorAddr Address)
      : Address(Address), IsDefined(false), IsAbsolute(true) {
    assert(!(IsDefined && IsAbsolute) &&
           "Block cannot be both defined and absolute");
  }

public:
  Addressable(const Addressable &) = delete;
  Addressable &operator=(const Addressable &) = default;
  Addressable(Addressable &&) = delete;
  Addressable &operator=(Addressable &&) = default;

  orc::ExecutorAddr getAddress() const { return Address; }
  void setAddress(orc::ExecutorAddr Address) { this->Address = Address; }

  /// Returns true if this is a defined addressable, in which case you
  /// can downcast this to a Block.
  bool isDefined() const { return static_cast<bool>(IsDefined); }
  bool isAbsolute() const { return static_cast<bool>(IsAbsolute); }

private:
  void setAbsolute(bool IsAbsolute) {
    assert(!IsDefined && "Cannot change the Absolute flag on a defined block");
    this->IsAbsolute = IsAbsolute;
  }

  orc::ExecutorAddr Address;
  uint64_t IsDefined : 1;
  uint64_t IsAbsolute : 1;

protected:
  // bitfields for Block, allocated here to improve packing.
  uint64_t ContentMutable : 1;
  uint64_t P2Align : 5;
  uint64_t AlignmentOffset : 56;
};

using SectionOrdinal = unsigned;

/// An Addressable with content and edges.
class Block : public Addressable {
  friend class LinkGraph;

private:
  /// Create a zero-fill defined addressable.
  Block(Section &Parent, orc::ExecutorAddrDiff Size, orc::ExecutorAddr Address,
        uint64_t Alignment, uint64_t AlignmentOffset)
      : Addressable(Address, true), Parent(&Parent), Size(Size) {
    assert(isPowerOf2_64(Alignment) && "Alignment must be power of 2");
    assert(AlignmentOffset < Alignment &&
           "Alignment offset cannot exceed alignment");
    assert(AlignmentOffset <= MaxAlignmentOffset &&
           "Alignment offset exceeds maximum");
    ContentMutable = false;
    P2Align = Alignment ? llvm::countr_zero(Alignment) : 0;
    this->AlignmentOffset = AlignmentOffset;
  }

  /// Create a defined addressable for the given content.
  /// The Content is assumed to be non-writable, and will be copied when
  /// mutations are required.
  Block(Section &Parent, ArrayRef<char> Content, orc::ExecutorAddr Address,
        uint64_t Alignment, uint64_t AlignmentOffset)
      : Addressable(Address, true), Parent(&Parent), Data(Content.data()),
        Size(Content.size()) {
    assert(isPowerOf2_64(Alignment) && "Alignment must be power of 2");
    assert(AlignmentOffset < Alignment &&
           "Alignment offset cannot exceed alignment");
    assert(AlignmentOffset <= MaxAlignmentOffset &&
           "Alignment offset exceeds maximum");
    ContentMutable = false;
    P2Align = Alignment ? llvm::countr_zero(Alignment) : 0;
    this->AlignmentOffset = AlignmentOffset;
  }

  /// Create a defined addressable for the given content.
  /// The content is assumed to be writable, and the caller is responsible
  /// for ensuring that it lives for the duration of the Block's lifetime.
  /// The standard way to achieve this is to allocate it on the Graph's
  /// allocator.
  Block(Section &Parent, MutableArrayRef<char> Content,
        orc::ExecutorAddr Address, uint64_t Alignment, uint64_t AlignmentOffset)
      : Addressable(Address, true), Parent(&Parent), Data(Content.data()),
        Size(Content.size()) {
    assert(isPowerOf2_64(Alignment) && "Alignment must be power of 2");
    assert(AlignmentOffset < Alignment &&
           "Alignment offset cannot exceed alignment");
    assert(AlignmentOffset <= MaxAlignmentOffset &&
           "Alignment offset exceeds maximum");
    ContentMutable = true;
    P2Align = Alignment ? llvm::countr_zero(Alignment) : 0;
    this->AlignmentOffset = AlignmentOffset;
  }

public:
  using EdgeVector = std::vector<Edge>;
  using edge_iterator = EdgeVector::iterator;
  using const_edge_iterator = EdgeVector::const_iterator;

  Block(const Block &) = delete;
  Block &operator=(const Block &) = delete;
  Block(Block &&) = delete;
  Block &operator=(Block &&) = delete;

  /// Return the parent section for this block.
  Section &getSection() const { return *Parent; }

  /// Returns true if this is a zero-fill block.
  ///
  /// If true, getSize is callable but getContent is not (the content is
  /// defined to be a sequence of zero bytes of length Size).
  bool isZeroFill() const { return !Data; }

  /// Returns the size of this defined addressable.
  size_t getSize() const { return Size; }

  /// Returns the address range of this defined addressable.
  orc::ExecutorAddrRange getRange() const {
    return orc::ExecutorAddrRange(getAddress(), getSize());
  }

  /// Get the content for this block. Block must not be a zero-fill block.
  ArrayRef<char> getContent() const {
    assert(Data && "Block does not contain content");
    return ArrayRef<char>(Data, Size);
  }

  /// Set the content for this block.
  /// Caller is responsible for ensuring the underlying bytes are not
  /// deallocated while pointed to by this block.
  void setContent(ArrayRef<char> Content) {
    assert(Content.data() && "Setting null content");
    Data = Content.data();
    Size = Content.size();
    ContentMutable = false;
  }

  /// Get mutable content for this block.
  ///
  /// If this Block's content is not already mutable this will trigger a copy
  /// of the existing immutable content to a new, mutable buffer allocated using
  /// LinkGraph::allocateContent.
  MutableArrayRef<char> getMutableContent(LinkGraph &G);

  /// Get mutable content for this block.
  ///
  /// This block's content must already be mutable. It is a programmatic error
  /// to call this on a block with immutable content -- consider using
  /// getMutableContent instead.
  MutableArrayRef<char> getAlreadyMutableContent() {
    assert(Data && "Block does not contain content");
    assert(ContentMutable && "Content is not mutable");
    return MutableArrayRef<char>(const_cast<char *>(Data), Size);
  }

  /// Set mutable content for this block.
  ///
  /// The caller is responsible for ensuring that the memory pointed to by
  /// MutableContent is not deallocated while pointed to by this block.
  void setMutableContent(MutableArrayRef<char> MutableContent) {
    assert(MutableContent.data() && "Setting null content");
    Data = MutableContent.data();
    Size = MutableContent.size();
    ContentMutable = true;
  }

  /// Returns true if this block's content is mutable.
  ///
  /// This is primarily useful for asserting that a block is already in a
  /// mutable state prior to modifying the content. E.g. when applying
  /// fixups we expect the block to already be mutable as it should have been
  /// copied to working memory.
  bool isContentMutable() const { return ContentMutable; }

  /// Get the alignment for this content.
  uint64_t getAlignment() const { return 1ull << P2Align; }

  /// Set the alignment for this content.
  void setAlignment(uint64_t Alignment) {
    assert(isPowerOf2_64(Alignment) && "Alignment must be a power of two");
    P2Align = Alignment ? llvm::countr_zero(Alignment) : 0;
  }

  /// Get the alignment offset for this content.
  uint64_t getAlignmentOffset() const { return AlignmentOffset; }

  /// Set the alignment offset for this content.
  void setAlignmentOffset(uint64_t AlignmentOffset) {
    assert(AlignmentOffset < (1ull << P2Align) &&
           "Alignment offset can't exceed alignment");
    this->AlignmentOffset = AlignmentOffset;
  }

  /// Add an edge to this block.
  void addEdge(Edge::Kind K, Edge::OffsetT Offset, Symbol &Target,
               Edge::AddendT Addend) {
    assert((K == Edge::KeepAlive || !isZeroFill()) &&
           "Adding edge to zero-fill block?");
    Edges.push_back(Edge(K, Offset, Target, Addend));
  }

  /// Add an edge by copying an existing one. This is typically used when
  /// moving edges between blocks.
  void addEdge(const Edge &E) { Edges.push_back(E); }

  /// Return the list of edges attached to this content.
  iterator_range<edge_iterator> edges() {
    return make_range(Edges.begin(), Edges.end());
  }

  /// Returns the list of edges attached to this content.
  iterator_range<const_edge_iterator> edges() const {
    return make_range(Edges.begin(), Edges.end());
  }

  /// Return the size of the edges list.
  size_t edges_size() const { return Edges.size(); }

  /// Returns true if the list of edges is empty.
  bool edges_empty() const { return Edges.empty(); }

  /// Remove the edge pointed to by the given iterator.
  /// Returns an iterator to the new next element.
  edge_iterator removeEdge(edge_iterator I) { return Edges.erase(I); }

  /// Returns the address of the fixup for the given edge, which is equal to
  /// this block's address plus the edge's offset.
  orc::ExecutorAddr getFixupAddress(const Edge &E) const {
    return getAddress() + E.getOffset();
  }

private:
  static constexpr uint64_t MaxAlignmentOffset = (1ULL << 56) - 1;

  void setSection(Section &Parent) { this->Parent = &Parent; }

  Section *Parent;
  const char *Data = nullptr;
  size_t Size = 0;
  std::vector<Edge> Edges;
};

// Align an address to conform with block alignment requirements.
inline uint64_t alignToBlock(uint64_t Addr, const Block &B) {
  uint64_t Delta = (B.getAlignmentOffset() - Addr) % B.getAlignment();
  return Addr + Delta;
}

// Align a orc::ExecutorAddr to conform with block alignment requirements.
inline orc::ExecutorAddr alignToBlock(orc::ExecutorAddr Addr, const Block &B) {
  return orc::ExecutorAddr(alignToBlock(Addr.getValue(), B));
}

// Returns true if the given blocks contains exactly one valid c-string.
// Zero-fill blocks of size 1 count as valid empty strings. Content blocks
// must end with a zero, and contain no zeros before the end.
bool isCStringBlock(Block &B);

/// Describes symbol linkage. This can be used to resolve definition clashes.
enum class Linkage : uint8_t {
  Strong,
  Weak,
};

/// Holds target-specific properties for a symbol.
using TargetFlagsType = uint8_t;

/// For errors and debugging output.
const char *getLinkageName(Linkage L);

/// Defines the scope in which this symbol should be visible:
///   Default -- Visible in the public interface of the linkage unit.
///   Hidden -- Visible within the linkage unit, but not exported from it.
///   Local -- Visible only within the LinkGraph.
enum class Scope : uint8_t {
  Default,
  Hidden,
  Local
};

/// For debugging output.
const char *getScopeName(Scope S);

raw_ostream &operator<<(raw_ostream &OS, const Block &B);

/// Symbol representation.
///
/// Symbols represent locations within Addressable objects.
/// They can be either Named or Anonymous.
/// Anonymous symbols have neither linkage nor visibility, and must point at
/// ContentBlocks.
/// Named symbols may be in one of four states:
///   - Null: Default initialized. Assignable, but otherwise unusable.
///   - Defined: Has both linkage and visibility and points to a ContentBlock
///   - Common: Has both linkage and visibility, points to a null Addressable.
///   - External: Has neither linkage nor visibility, points to an external
///     Addressable.
///
class Symbol {
  friend class LinkGraph;

private:
  Symbol(Addressable &Base, orc::ExecutorAddrDiff Offset, StringRef Name,
         orc::ExecutorAddrDiff Size, Linkage L, Scope S, bool IsLive,
         bool IsCallable)
      : Name(Name), Base(&Base), Offset(Offset), WeakRef(0), Size(Size) {
    assert(Offset <= MaxOffset && "Offset out of range");
    setLinkage(L);
    setScope(S);
    setLive(IsLive);
    setCallable(IsCallable);
    setTargetFlags(TargetFlagsType{});
  }

  static Symbol &constructExternal(BumpPtrAllocator &Allocator,
                                   Addressable &Base, StringRef Name,
                                   orc::ExecutorAddrDiff Size, Linkage L,
                                   bool WeaklyReferenced) {
    assert(!Base.isDefined() &&
           "Cannot create external symbol from defined block");
    assert(!Name.empty() && "External symbol name cannot be empty");
    auto *Sym = Allocator.Allocate<Symbol>();
    new (Sym) Symbol(Base, 0, Name, Size, L, Scope::Default, false, false);
    Sym->setWeaklyReferenced(WeaklyReferenced);
    return *Sym;
  }

  static Symbol &constructAbsolute(BumpPtrAllocator &Allocator,
                                   Addressable &Base, StringRef Name,
                                   orc::ExecutorAddrDiff Size, Linkage L,
                                   Scope S, bool IsLive) {
    assert(!Base.isDefined() &&
           "Cannot create absolute symbol from a defined block");
    auto *Sym = Allocator.Allocate<Symbol>();
    new (Sym) Symbol(Base, 0, Name, Size, L, S, IsLive, false);
    return *Sym;
  }

  static Symbol &constructAnonDef(BumpPtrAllocator &Allocator, Block &Base,
                                  orc::ExecutorAddrDiff Offset,
                                  orc::ExecutorAddrDiff Size, bool IsCallable,
                                  bool IsLive) {
    assert((Offset + Size) <= Base.getSize() &&
           "Symbol extends past end of block");
    auto *Sym = Allocator.Allocate<Symbol>();
    new (Sym) Symbol(Base, Offset, StringRef(), Size, Linkage::Strong,
                     Scope::Local, IsLive, IsCallable);
    return *Sym;
  }

  static Symbol &constructNamedDef(BumpPtrAllocator &Allocator, Block &Base,
                                   orc::ExecutorAddrDiff Offset, StringRef Name,
                                   orc::ExecutorAddrDiff Size, Linkage L,
                                   Scope S, bool IsLive, bool IsCallable) {
    assert((Offset + Size) <= Base.getSize() &&
           "Symbol extends past end of block");
    assert(!Name.empty() && "Name cannot be empty");
    auto *Sym = Allocator.Allocate<Symbol>();
    new (Sym) Symbol(Base, Offset, Name, Size, L, S, IsLive, IsCallable);
    return *Sym;
  }

public:
  /// Create a null Symbol. This allows Symbols to be default initialized for
  /// use in containers (e.g. as map values). Null symbols are only useful for
  /// assigning to.
  Symbol() = default;

  // Symbols are not movable or copyable.
  Symbol(const Symbol &) = delete;
  Symbol &operator=(const Symbol &) = delete;
  Symbol(Symbol &&) = delete;
  Symbol &operator=(Symbol &&) = delete;

  /// Returns true if this symbol has a name.
  bool hasName() const { return !Name.empty(); }

  /// Returns the name of this symbol (empty if the symbol is anonymous).
  StringRef getName() const {
    assert((!Name.empty() || getScope() == Scope::Local) &&
           "Anonymous symbol has non-local scope");
    return Name;
  }

  /// Rename this symbol. The client is responsible for updating scope and
  /// linkage if this name-change requires it.
  void setName(StringRef Name) { this->Name = Name; }

  /// Returns true if this Symbol has content (potentially) defined within this
  /// object file (i.e. is anything but an external or absolute symbol).
  bool isDefined() const {
    assert(Base && "Attempt to access null symbol");
    return Base->isDefined();
  }

  /// Returns true if this symbol is live (i.e. should be treated as a root for
  /// dead stripping).
  bool isLive() const {
    assert(Base && "Attempting to access null symbol");
    return IsLive;
  }

  /// Set this symbol's live bit.
  void setLive(bool IsLive) { this->IsLive = IsLive; }

  /// Returns true is this symbol is callable.
  bool isCallable() const { return IsCallable; }

  /// Set this symbol's callable bit.
  void setCallable(bool IsCallable) { this->IsCallable = IsCallable; }

  /// Returns true if the underlying addressable is an unresolved external.
  bool isExternal() const {
    assert(Base && "Attempt to access null symbol");
    return !Base->isDefined() && !Base->isAbsolute();
  }

  /// Returns true if the underlying addressable is an absolute symbol.
  bool isAbsolute() const {
    assert(Base && "Attempt to access null symbol");
    return Base->isAbsolute();
  }

  /// Return the addressable that this symbol points to.
  Addressable &getAddressable() {
    assert(Base && "Cannot get underlying addressable for null symbol");
    return *Base;
  }

  /// Return the addressable that this symbol points to.
  const Addressable &getAddressable() const {
    assert(Base && "Cannot get underlying addressable for null symbol");
    return *Base;
  }

  /// Return the Block for this Symbol (Symbol must be defined).
  Block &getBlock() {
    assert(Base && "Cannot get block for null symbol");
    assert(Base->isDefined() && "Not a defined symbol");
    return static_cast<Block &>(*Base);
  }

  /// Return the Block for this Symbol (Symbol must be defined).
  const Block &getBlock() const {
    assert(Base && "Cannot get block for null symbol");
    assert(Base->isDefined() && "Not a defined symbol");
    return static_cast<const Block &>(*Base);
  }

  /// Returns the offset for this symbol within the underlying addressable.
  orc::ExecutorAddrDiff getOffset() const { return Offset; }

  void setOffset(orc::ExecutorAddrDiff NewOffset) {
    assert(NewOffset < getBlock().getSize() && "Offset out of range");
    Offset = NewOffset;
  }

  /// Returns the address of this symbol.
  orc::ExecutorAddr getAddress() const { return Base->getAddress() + Offset; }

  /// Returns the size of this symbol.
  orc::ExecutorAddrDiff getSize() const { return Size; }

  /// Set the size of this symbol.
  void setSize(orc::ExecutorAddrDiff Size) {
    assert(Base && "Cannot set size for null Symbol");
    assert((Size == 0 || Base->isDefined()) &&
           "Non-zero size can only be set for defined symbols");
    assert((Offset + Size <= static_cast<const Block &>(*Base).getSize()) &&
           "Symbol size cannot extend past the end of its containing block");
    this->Size = Size;
  }

  /// Returns the address range of this symbol.
  orc::ExecutorAddrRange getRange() const {
    return orc::ExecutorAddrRange(getAddress(), getSize());
  }

  /// Returns true if this symbol is backed by a zero-fill block.
  /// This method may only be called on defined symbols.
  bool isSymbolZeroFill() const { return getBlock().isZeroFill(); }

  /// Returns the content in the underlying block covered by this symbol.
  /// This method may only be called on defined non-zero-fill symbols.
  ArrayRef<char> getSymbolContent() const {
    return getBlock().getContent().slice(Offset, Size);
  }

  /// Get the linkage for this Symbol.
  Linkage getLinkage() const { return static_cast<Linkage>(L); }

  /// Set the linkage for this Symbol.
  void setLinkage(Linkage L) {
    assert((L == Linkage::Strong || (!Base->isAbsolute() && !Name.empty())) &&
           "Linkage can only be applied to defined named symbols");
    this->L = static_cast<uint8_t>(L);
  }

  /// Get the visibility for this Symbol.
  Scope getScope() const { return static_cast<Scope>(S); }

  /// Set the visibility for this Symbol.
  void setScope(Scope S) {
    assert((!Name.empty() || S == Scope::Local) &&
           "Can not set anonymous symbol to non-local scope");
    assert((S != Scope::Local || Base->isDefined() || Base->isAbsolute()) &&
           "Invalid visibility for symbol type");
    this->S = static_cast<uint8_t>(S);
  }

  /// Check wehther the given target flags are set for this Symbol.
  bool hasTargetFlags(TargetFlagsType Flags) const {
    return static_cast<TargetFlagsType>(TargetFlags) & Flags;
  }

  /// Set the target flags for this Symbol.
  void setTargetFlags(TargetFlagsType Flags) {
    assert(Flags <= 1 && "Add more bits to store more than single flag");
    TargetFlags = Flags;
  }

  /// Returns true if this is a weakly referenced external symbol.
  /// This method may only be called on external symbols.
  bool isWeaklyReferenced() const {
    assert(isExternal() && "isWeaklyReferenced called on non-external");
    return WeakRef;
  }

  /// Set the WeaklyReferenced value for this symbol.
  /// This method may only be called on external symbols.
  void setWeaklyReferenced(bool WeakRef) {
    assert(isExternal() && "setWeaklyReferenced called on non-external");
    this->WeakRef = WeakRef;
  }

private:
  void makeExternal(Addressable &A) {
    assert(!A.isDefined() && !A.isAbsolute() &&
           "Attempting to make external with defined or absolute block");
    Base = &A;
    Offset = 0;
    setScope(Scope::Default);
    IsLive = 0;
    // note: Size, Linkage and IsCallable fields left unchanged.
  }

  void makeAbsolute(Addressable &A) {
    assert(!A.isDefined() && A.isAbsolute() &&
           "Attempting to make absolute with defined or external block");
    Base = &A;
    Offset = 0;
  }

  void setBlock(Block &B) { Base = &B; }

  static constexpr uint64_t MaxOffset = (1ULL << 59) - 1;

  // FIXME: A char* or SymbolStringPtr may pack better.
  StringRef Name;
  Addressable *Base = nullptr;
  uint64_t Offset : 57;
  uint64_t L : 1;
  uint64_t S : 2;
  uint64_t IsLive : 1;
  uint64_t IsCallable : 1;
  uint64_t WeakRef : 1;
  uint64_t TargetFlags : 1;
  size_t Size = 0;
};

raw_ostream &operator<<(raw_ostream &OS, const Symbol &A);

void printEdge(raw_ostream &OS, const Block &B, const Edge &E,
               StringRef EdgeKindName);

/// Represents an object file section.
class Section {
  friend class LinkGraph;

private:
  Section(StringRef Name, orc::MemProt Prot, SectionOrdinal SecOrdinal)
      : Name(Name), Prot(Prot), SecOrdinal(SecOrdinal) {}

  using SymbolSet = DenseSet<Symbol *>;
  using BlockSet = DenseSet<Block *>;

public:
  using symbol_iterator = SymbolSet::iterator;
  using const_symbol_iterator = SymbolSet::const_iterator;

  using block_iterator = BlockSet::iterator;
  using const_block_iterator = BlockSet::const_iterator;

  ~Section();

  // Sections are not movable or copyable.
  Section(const Section &) = delete;
  Section &operator=(const Section &) = delete;
  Section(Section &&) = delete;
  Section &operator=(Section &&) = delete;

  /// Returns the name of this section.
  StringRef getName() const { return Name; }

  /// Returns the protection flags for this section.
  orc::MemProt getMemProt() const { return Prot; }

  /// Set the protection flags for this section.
  void setMemProt(orc::MemProt Prot) { this->Prot = Prot; }

  /// Get the memory lifetime policy for this section.
  orc::MemLifetimePolicy getMemLifetimePolicy() const { return MLP; }

  /// Set the memory lifetime policy for this section.
  void setMemLifetimePolicy(orc::MemLifetimePolicy MLP) { this->MLP = MLP; }

  /// Returns the ordinal for this section.
  SectionOrdinal getOrdinal() const { return SecOrdinal; }

  /// Returns true if this section is empty (contains no blocks or symbols).
  bool empty() const { return Blocks.empty(); }

  /// Returns an iterator over the blocks defined in this section.
  iterator_range<block_iterator> blocks() {
    return make_range(Blocks.begin(), Blocks.end());
  }

  /// Returns an iterator over the blocks defined in this section.
  iterator_range<const_block_iterator> blocks() const {
    return make_range(Blocks.begin(), Blocks.end());
  }

  /// Returns the number of blocks in this section.
  BlockSet::size_type blocks_size() const { return Blocks.size(); }

  /// Returns an iterator over the symbols defined in this section.
  iterator_range<symbol_iterator> symbols() {
    return make_range(Symbols.begin(), Symbols.end());
  }

  /// Returns an iterator over the symbols defined in this section.
  iterator_range<const_symbol_iterator> symbols() const {
    return make_range(Symbols.begin(), Symbols.end());
  }

  /// Return the number of symbols in this section.
  SymbolSet::size_type symbols_size() const { return Symbols.size(); }

private:
  void addSymbol(Symbol &Sym) {
    assert(!Symbols.count(&Sym) && "Symbol is already in this section");
    Symbols.insert(&Sym);
  }

  void removeSymbol(Symbol &Sym) {
    assert(Symbols.count(&Sym) && "symbol is not in this section");
    Symbols.erase(&Sym);
  }

  void addBlock(Block &B) {
    assert(!Blocks.count(&B) && "Block is already in this section");
    Blocks.insert(&B);
  }

  void removeBlock(Block &B) {
    assert(Blocks.count(&B) && "Block is not in this section");
    Blocks.erase(&B);
  }

  void transferContentTo(Section &DstSection) {
    if (&DstSection == this)
      return;
    for (auto *S : Symbols)
      DstSection.addSymbol(*S);
    for (auto *B : Blocks)
      DstSection.addBlock(*B);
    Symbols.clear();
    Blocks.clear();
  }

  StringRef Name;
  orc::MemProt Prot;
  orc::MemLifetimePolicy MLP = orc::MemLifetimePolicy::Standard;
  SectionOrdinal SecOrdinal = 0;
  BlockSet Blocks;
  SymbolSet Symbols;
};

/// Represents a section address range via a pair of Block pointers
/// to the first and last Blocks in the section.
class SectionRange {
public:
  SectionRange() = default;
  SectionRange(const Section &Sec) {
    if (Sec.blocks().empty())
      return;
    First = Last = *Sec.blocks().begin();
    for (auto *B : Sec.blocks()) {
      if (B->getAddress() < First->getAddress())
        First = B;
      if (B->getAddress() > Last->getAddress())
        Last = B;
    }
  }
  Block *getFirstBlock() const {
    assert((!Last || First) && "First can not be null if end is non-null");
    return First;
  }
  Block *getLastBlock() const {
    assert((First || !Last) && "Last can not be null if start is non-null");
    return Last;
  }
  bool empty() const {
    assert((First || !Last) && "Last can not be null if start is non-null");
    return !First;
  }
  orc::ExecutorAddr getStart() const {
    return First ? First->getAddress() : orc::ExecutorAddr();
  }
  orc::ExecutorAddr getEnd() const {
    return Last ? Last->getAddress() + Last->getSize() : orc::ExecutorAddr();
  }
  orc::ExecutorAddrDiff getSize() const { return getEnd() - getStart(); }

  orc::ExecutorAddrRange getRange() const {
    return orc::ExecutorAddrRange(getStart(), getEnd());
  }

private:
  Block *First = nullptr;
  Block *Last = nullptr;
};

class LinkGraph {
private:
  using SectionMap = DenseMap<StringRef, std::unique_ptr<Section>>;
  using ExternalSymbolSet = DenseSet<Symbol *>;
  using BlockSet = DenseSet<Block *>;

  template <typename... ArgTs>
  Addressable &createAddressable(ArgTs &&... Args) {
    Addressable *A =
        reinterpret_cast<Addressable *>(Allocator.Allocate<Addressable>());
    new (A) Addressable(std::forward<ArgTs>(Args)...);
    return *A;
  }

  void destroyAddressable(Addressable &A) {
    A.~Addressable();
    Allocator.Deallocate(&A);
  }

  template <typename... ArgTs> Block &createBlock(ArgTs &&... Args) {
    Block *B = reinterpret_cast<Block *>(Allocator.Allocate<Block>());
    new (B) Block(std::forward<ArgTs>(Args)...);
    B->getSection().addBlock(*B);
    return *B;
  }

  void destroyBlock(Block &B) {
    B.~Block();
    Allocator.Deallocate(&B);
  }

  void destroySymbol(Symbol &S) {
    S.~Symbol();
    Allocator.Deallocate(&S);
  }

  static iterator_range<Section::block_iterator> getSectionBlocks(Section &S) {
    return S.blocks();
  }

  static iterator_range<Section::const_block_iterator>
  getSectionConstBlocks(const Section &S) {
    return S.blocks();
  }

  static iterator_range<Section::symbol_iterator>
  getSectionSymbols(Section &S) {
    return S.symbols();
  }

  static iterator_range<Section::const_symbol_iterator>
  getSectionConstSymbols(const Section &S) {
    return S.symbols();
  }

  struct GetSectionMapEntryValue {
    Section &operator()(SectionMap::value_type &KV) const { return *KV.second; }
  };

  struct GetSectionMapEntryConstValue {
    const Section &operator()(const SectionMap::value_type &KV) const {
      return *KV.second;
    }
  };

public:
  using external_symbol_iterator = ExternalSymbolSet::iterator;

  using section_iterator =
      mapped_iterator<SectionMap::iterator, GetSectionMapEntryValue>;
  using const_section_iterator =
      mapped_iterator<SectionMap::const_iterator, GetSectionMapEntryConstValue>;

  template <typename OuterItrT, typename InnerItrT, typename T,
            iterator_range<InnerItrT> getInnerRange(
                typename OuterItrT::reference)>
  class nested_collection_iterator
      : public iterator_facade_base<
            nested_collection_iterator<OuterItrT, InnerItrT, T, getInnerRange>,
            std::forward_iterator_tag, T> {
  public:
    nested_collection_iterator() = default;

    nested_collection_iterator(OuterItrT OuterI, OuterItrT OuterE)
        : OuterI(OuterI), OuterE(OuterE),
          InnerI(getInnerBegin(OuterI, OuterE)) {
      moveToNonEmptyInnerOrEnd();
    }

    bool operator==(const nested_collection_iterator &RHS) const {
      return (OuterI == RHS.OuterI) && (InnerI == RHS.InnerI);
    }

    T operator*() const {
      assert(InnerI != getInnerRange(*OuterI).end() && "Dereferencing end?");
      return *InnerI;
    }

    nested_collection_iterator operator++() {
      ++InnerI;
      moveToNonEmptyInnerOrEnd();
      return *this;
    }

  private:
    static InnerItrT getInnerBegin(OuterItrT OuterI, OuterItrT OuterE) {
      return OuterI != OuterE ? getInnerRange(*OuterI).begin() : InnerItrT();
    }

    void moveToNonEmptyInnerOrEnd() {
      while (OuterI != OuterE && InnerI == getInnerRange(*OuterI).end()) {
        ++OuterI;
        InnerI = getInnerBegin(OuterI, OuterE);
      }
    }

    OuterItrT OuterI, OuterE;
    InnerItrT InnerI;
  };

  using defined_symbol_iterator =
      nested_collection_iterator<section_iterator, Section::symbol_iterator,
                                 Symbol *, getSectionSymbols>;

  using const_defined_symbol_iterator =
      nested_collection_iterator<const_section_iterator,
                                 Section::const_symbol_iterator, const Symbol *,
                                 getSectionConstSymbols>;

  using block_iterator =
      nested_collection_iterator<section_iterator, Section::block_iterator,
                                 Block *, getSectionBlocks>;

  using const_block_iterator =
      nested_collection_iterator<const_section_iterator,
                                 Section::const_block_iterator, const Block *,
                                 getSectionConstBlocks>;

  using GetEdgeKindNameFunction = const char *(*)(Edge::Kind);

  LinkGraph(std::string Name, const Triple &TT, SubtargetFeatures Features,
            unsigned PointerSize, support::endianness Endianness,
            GetEdgeKindNameFunction GetEdgeKindName)
      : Name(std::move(Name)), TT(TT), Features(std::move(Features)),
        PointerSize(PointerSize), Endianness(Endianness),
        GetEdgeKindName(std::move(GetEdgeKindName)) {}

  LinkGraph(std::string Name, const Triple &TT, unsigned PointerSize,
            support::endianness Endianness,
            GetEdgeKindNameFunction GetEdgeKindName)
      : LinkGraph(std::move(Name), TT, SubtargetFeatures(), PointerSize,
                  Endianness, GetEdgeKindName) {}

  LinkGraph(const LinkGraph &) = delete;
  LinkGraph &operator=(const LinkGraph &) = delete;
  LinkGraph(LinkGraph &&) = delete;
  LinkGraph &operator=(LinkGraph &&) = delete;

  /// Returns the name of this graph (usually the name of the original
  /// underlying MemoryBuffer).
  const std::string &getName() const { return Name; }

  /// Returns the target triple for this Graph.
  const Triple &getTargetTriple() const { return TT; }

  /// Return the subtarget features for this Graph.
  const SubtargetFeatures &getFeatures() const { return Features; }

  /// Returns the pointer size for use in this graph.
  unsigned getPointerSize() const { return PointerSize; }

  /// Returns the endianness of content in this graph.
  support::endianness getEndianness() const { return Endianness; }

  const char *getEdgeKindName(Edge::Kind K) const { return GetEdgeKindName(K); }

  /// Allocate a mutable buffer of the given size using the LinkGraph's
  /// allocator.
  MutableArrayRef<char> allocateBuffer(size_t Size) {
    return {Allocator.Allocate<char>(Size), Size};
  }

  /// Allocate a copy of the given string using the LinkGraph's allocator.
  /// This can be useful when renaming symbols or adding new content to the
  /// graph.
  MutableArrayRef<char> allocateContent(ArrayRef<char> Source) {
    auto *AllocatedBuffer = Allocator.Allocate<char>(Source.size());
    llvm::copy(Source, AllocatedBuffer);
    return MutableArrayRef<char>(AllocatedBuffer, Source.size());
  }

  /// Allocate a copy of the given string using the LinkGraph's allocator.
  /// This can be useful when renaming symbols or adding new content to the
  /// graph.
  ///
  /// Note: This Twine-based overload requires an extra string copy and an
  /// extra heap allocation for large strings. The ArrayRef<char> overload
  /// should be preferred where possible.
  MutableArrayRef<char> allocateContent(Twine Source) {
    SmallString<256> TmpBuffer;
    auto SourceStr = Source.toStringRef(TmpBuffer);
    auto *AllocatedBuffer = Allocator.Allocate<char>(SourceStr.size());
    llvm::copy(SourceStr, AllocatedBuffer);
    return MutableArrayRef<char>(AllocatedBuffer, SourceStr.size());
  }

  /// Allocate a copy of the given string using the LinkGraph's allocator.
  ///
  /// The allocated string will be terminated with a null character, and the
  /// returned MutableArrayRef will include this null character in the last
  /// position.
  MutableArrayRef<char> allocateCString(StringRef Source) {
    char *AllocatedBuffer = Allocator.Allocate<char>(Source.size() + 1);
    llvm::copy(Source, AllocatedBuffer);
    AllocatedBuffer[Source.size()] = '\0';
    return MutableArrayRef<char>(AllocatedBuffer, Source.size() + 1);
  }

  /// Allocate a copy of the given string using the LinkGraph's allocator.
  ///
  /// The allocated string will be terminated with a null character, and the
  /// returned MutableArrayRef will include this null character in the last
  /// position.
  ///
  /// Note: This Twine-based overload requires an extra string copy and an
  /// extra heap allocation for large strings. The ArrayRef<char> overload
  /// should be preferred where possible.
  MutableArrayRef<char> allocateCString(Twine Source) {
    SmallString<256> TmpBuffer;
    auto SourceStr = Source.toStringRef(TmpBuffer);
    auto *AllocatedBuffer = Allocator.Allocate<char>(SourceStr.size() + 1);
    llvm::copy(SourceStr, AllocatedBuffer);
    AllocatedBuffer[SourceStr.size()] = '\0';
    return MutableArrayRef<char>(AllocatedBuffer, SourceStr.size() + 1);
  }

  /// Create a section with the given name, protection flags, and alignment.
  Section &createSection(StringRef Name, orc::MemProt Prot) {
    assert(!Sections.count(Name) && "Duplicate section name");
    std::unique_ptr<Section> Sec(new Section(Name, Prot, Sections.size()));
    return *Sections.insert(std::make_pair(Name, std::move(Sec))).first->second;
  }

  /// Create a content block.
  Block &createContentBlock(Section &Parent, ArrayRef<char> Content,
                            orc::ExecutorAddr Address, uint64_t Alignment,
                            uint64_t AlignmentOffset) {
    return createBlock(Parent, Content, Address, Alignment, AlignmentOffset);
  }

  /// Create a content block with initially mutable data.
  Block &createMutableContentBlock(Section &Parent,
                                   MutableArrayRef<char> MutableContent,
                                   orc::ExecutorAddr Address,
                                   uint64_t Alignment,
                                   uint64_t AlignmentOffset) {
    return createBlock(Parent, MutableContent, Address, Alignment,
                       AlignmentOffset);
  }

  /// Create a content block with initially mutable data of the given size.
  /// Content will be allocated via the LinkGraph's allocateBuffer method.
  /// By default the memory will be zero-initialized. Passing false for
  /// ZeroInitialize will prevent this.
  Block &createMutableContentBlock(Section &Parent, size_t ContentSize,
                                   orc::ExecutorAddr Address,
                                   uint64_t Alignment, uint64_t AlignmentOffset,
                                   bool ZeroInitialize = true) {
    auto Content = allocateBuffer(ContentSize);
    if (ZeroInitialize)
      memset(Content.data(), 0, Content.size());
    return createBlock(Parent, Content, Address, Alignment, AlignmentOffset);
  }

  /// Create a zero-fill block.
  Block &createZeroFillBlock(Section &Parent, orc::ExecutorAddrDiff Size,
                             orc::ExecutorAddr Address, uint64_t Alignment,
                             uint64_t AlignmentOffset) {
    return createBlock(Parent, Size, Address, Alignment, AlignmentOffset);
  }

  /// Returns a BinaryStreamReader for the given block.
  BinaryStreamReader getBlockContentReader(Block &B) {
    ArrayRef<uint8_t> C(
        reinterpret_cast<const uint8_t *>(B.getContent().data()), B.getSize());
    return BinaryStreamReader(C, getEndianness());
  }

  /// Returns a BinaryStreamWriter for the given block.
  /// This will call getMutableContent to obtain mutable content for the block.
  BinaryStreamWriter getBlockContentWriter(Block &B) {
    MutableArrayRef<uint8_t> C(
        reinterpret_cast<uint8_t *>(B.getMutableContent(*this).data()),
        B.getSize());
    return BinaryStreamWriter(C, getEndianness());
  }

  /// Cache type for the splitBlock function.
  using SplitBlockCache = std::optional<SmallVector<Symbol *, 8>>;

  /// Splits block B at the given index which must be greater than zero.
  /// If SplitIndex == B.getSize() then this function is a no-op and returns B.
  /// If SplitIndex < B.getSize() then this function returns a new block
  /// covering the range [ 0, SplitIndex ), and B is modified to cover the range
  /// [ SplitIndex, B.size() ).
  ///
  /// The optional Cache parameter can be used to speed up repeated calls to
  /// splitBlock for a single block. If the value is None the cache will be
  /// treated as uninitialized and splitBlock will populate it. Otherwise it
  /// is assumed to contain the list of Symbols pointing at B, sorted in
  /// descending order of offset.
  ///
  /// Notes:
  ///
  /// 1. splitBlock must be used with care. Splitting a block may cause
  ///    incoming edges to become invalid if the edge target subexpression
  ///    points outside the bounds of the newly split target block (E.g. an
  ///    edge 'S + 10 : Pointer64' where S points to a newly split block
  ///    whose size is less than 10). No attempt is made to detect invalidation
  ///    of incoming edges, as in general this requires context that the
  ///    LinkGraph does not have. Clients are responsible for ensuring that
  ///    splitBlock is not used in a way that invalidates edges.
  ///
  /// 2. The newly introduced block will have a new ordinal which will be
  ///    higher than any other ordinals in the section. Clients are responsible
  ///    for re-assigning block ordinals to restore a compatible order if
  ///    needed.
  ///
  /// 3. The cache is not automatically updated if new symbols are introduced
  ///    between calls to splitBlock. Any newly introduced symbols may be
  ///    added to the cache manually (descending offset order must be
  ///    preserved), or the cache can be set to None and rebuilt by
  ///    splitBlock on the next call.
  Block &splitBlock(Block &B, size_t SplitIndex,
                    SplitBlockCache *Cache = nullptr);

  /// Add an external symbol.
  /// Some formats (e.g. ELF) allow Symbols to have sizes. For Symbols whose
  /// size is not known, you should substitute '0'.
  /// The IsWeaklyReferenced argument determines whether the symbol must be
  /// present during lookup: Externals that are strongly referenced must be
  /// found or an error will be emitted. Externals that are weakly referenced
  /// are permitted to be undefined, in which case they are assigned an address
  /// of 0.
  Symbol &addExternalSymbol(StringRef Name, orc::ExecutorAddrDiff Size,
                            bool IsWeaklyReferenced) {
    assert(llvm::count_if(ExternalSymbols,
                          [&](const Symbol *Sym) {
                            return Sym->getName() == Name;
                          }) == 0 &&
           "Duplicate external symbol");
    auto &Sym = Symbol::constructExternal(
        Allocator, createAddressable(orc::ExecutorAddr(), false), Name, Size,
        Linkage::Strong, IsWeaklyReferenced);
    ExternalSymbols.insert(&Sym);
    return Sym;
  }

  /// Add an absolute symbol.
  Symbol &addAbsoluteSymbol(StringRef Name, orc::ExecutorAddr Address,
                            orc::ExecutorAddrDiff Size, Linkage L, Scope S,
                            bool IsLive) {
    assert((S == Scope::Local || llvm::count_if(AbsoluteSymbols,
                                               [&](const Symbol *Sym) {
                                                 return Sym->getName() == Name;
                                               }) == 0) &&
                                    "Duplicate absolute symbol");
    auto &Sym = Symbol::constructAbsolute(Allocator, createAddressable(Address),
                                          Name, Size, L, S, IsLive);
    AbsoluteSymbols.insert(&Sym);
    return Sym;
  }

  /// Add an anonymous symbol.
  Symbol &addAnonymousSymbol(Block &Content, orc::ExecutorAddrDiff Offset,
                             orc::ExecutorAddrDiff Size, bool IsCallable,
                             bool IsLive) {
    auto &Sym = Symbol::constructAnonDef(Allocator, Content, Offset, Size,
                                         IsCallable, IsLive);
    Content.getSection().addSymbol(Sym);
    return Sym;
  }

  /// Add a named symbol.
  Symbol &addDefinedSymbol(Block &Content, orc::ExecutorAddrDiff Offset,
                           StringRef Name, orc::ExecutorAddrDiff Size,
                           Linkage L, Scope S, bool IsCallable, bool IsLive) {
    assert((S == Scope::Local || llvm::count_if(defined_symbols(),
                                                [&](const Symbol *Sym) {
                                                  return Sym->getName() == Name;
                                                }) == 0) &&
           "Duplicate defined symbol");
    auto &Sym = Symbol::constructNamedDef(Allocator, Content, Offset, Name,
                                          Size, L, S, IsLive, IsCallable);
    Content.getSection().addSymbol(Sym);
    return Sym;
  }

  iterator_range<section_iterator> sections() {
    return make_range(
        section_iterator(Sections.begin(), GetSectionMapEntryValue()),
        section_iterator(Sections.end(), GetSectionMapEntryValue()));
  }

  iterator_range<const_section_iterator> sections() const {
    return make_range(
        const_section_iterator(Sections.begin(),
                               GetSectionMapEntryConstValue()),
        const_section_iterator(Sections.end(), GetSectionMapEntryConstValue()));
  }

  size_t sections_size() const { return Sections.size(); }

  /// Returns the section with the given name if it exists, otherwise returns
  /// null.
  Section *findSectionByName(StringRef Name) {
    auto I = Sections.find(Name);
    if (I == Sections.end())
      return nullptr;
    return I->second.get();
  }

  iterator_range<block_iterator> blocks() {
    auto Secs = sections();
    return make_range(block_iterator(Secs.begin(), Secs.end()),
                      block_iterator(Secs.end(), Secs.end()));
  }

  iterator_range<const_block_iterator> blocks() const {
    auto Secs = sections();
    return make_range(const_block_iterator(Secs.begin(), Secs.end()),
                      const_block_iterator(Secs.end(), Secs.end()));
  }

  iterator_range<external_symbol_iterator> external_symbols() {
    return make_range(ExternalSymbols.begin(), ExternalSymbols.end());
  }

  iterator_range<external_symbol_iterator> absolute_symbols() {
    return make_range(AbsoluteSymbols.begin(), AbsoluteSymbols.end());
  }

  iterator_range<defined_symbol_iterator> defined_symbols() {
    auto Secs = sections();
    return make_range(defined_symbol_iterator(Secs.begin(), Secs.end()),
                      defined_symbol_iterator(Secs.end(), Secs.end()));
  }

  iterator_range<const_defined_symbol_iterator> defined_symbols() const {
    auto Secs = sections();
    return make_range(const_defined_symbol_iterator(Secs.begin(), Secs.end()),
                      const_defined_symbol_iterator(Secs.end(), Secs.end()));
  }

  /// Make the given symbol external (must not already be external).
  ///
  /// Symbol size, linkage and callability will be left unchanged. Symbol scope
  /// will be set to Default, and offset will be reset to 0.
  void makeExternal(Symbol &Sym) {
    assert(!Sym.isExternal() && "Symbol is already external");
    if (Sym.isAbsolute()) {
      assert(AbsoluteSymbols.count(&Sym) &&
             "Sym is not in the absolute symbols set");
      assert(Sym.getOffset() == 0 && "Absolute not at offset 0");
      AbsoluteSymbols.erase(&Sym);
      auto &A = Sym.getAddressable();
      A.setAbsolute(false);
      A.setAddress(orc::ExecutorAddr());
    } else {
      assert(Sym.isDefined() && "Sym is not a defined symbol");
      Section &Sec = Sym.getBlock().getSection();
      Sec.removeSymbol(Sym);
      Sym.makeExternal(createAddressable(orc::ExecutorAddr(), false));
    }
    ExternalSymbols.insert(&Sym);
  }

  /// Make the given symbol an absolute with the given address (must not already
  /// be absolute).
  ///
  /// The symbol's size, linkage, and callability, and liveness will be left
  /// unchanged, and its offset will be reset to 0.
  ///
  /// If the symbol was external then its scope will be set to local, otherwise
  /// it will be left unchanged.
  void makeAbsolute(Symbol &Sym, orc::ExecutorAddr Address) {
    assert(!Sym.isAbsolute() && "Symbol is already absolute");
    if (Sym.isExternal()) {
      assert(ExternalSymbols.count(&Sym) &&
             "Sym is not in the absolute symbols set");
      assert(Sym.getOffset() == 0 && "External is not at offset 0");
      ExternalSymbols.erase(&Sym);
      auto &A = Sym.getAddressable();
      A.setAbsolute(true);
      A.setAddress(Address);
      Sym.setScope(Scope::Local);
    } else {
      assert(Sym.isDefined() && "Sym is not a defined symbol");
      Section &Sec = Sym.getBlock().getSection();
      Sec.removeSymbol(Sym);
      Sym.makeAbsolute(createAddressable(Address));
    }
    AbsoluteSymbols.insert(&Sym);
  }

  /// Turn an absolute or external symbol into a defined one by attaching it to
  /// a block. Symbol must not already be defined.
  void makeDefined(Symbol &Sym, Block &Content, orc::ExecutorAddrDiff Offset,
                   orc::ExecutorAddrDiff Size, Linkage L, Scope S,
                   bool IsLive) {
    assert(!Sym.isDefined() && "Sym is already a defined symbol");
    if (Sym.isAbsolute()) {
      assert(AbsoluteSymbols.count(&Sym) &&
             "Symbol is not in the absolutes set");
      AbsoluteSymbols.erase(&Sym);
    } else {
      assert(ExternalSymbols.count(&Sym) &&
             "Symbol is not in the externals set");
      ExternalSymbols.erase(&Sym);
    }
    Addressable &OldBase = *Sym.Base;
    Sym.setBlock(Content);
    Sym.setOffset(Offset);
    Sym.setSize(Size);
    Sym.setLinkage(L);
    Sym.setScope(S);
    Sym.setLive(IsLive);
    Content.getSection().addSymbol(Sym);
    destroyAddressable(OldBase);
  }

  /// Transfer a defined symbol from one block to another.
  ///
  /// The symbol's offset within DestBlock is set to NewOffset.
  ///
  /// If ExplicitNewSize is given as None then the size of the symbol will be
  /// checked and auto-truncated to at most the size of the remainder (from the
  /// given offset) of the size of the new block.
  ///
  /// All other symbol attributes are unchanged.
  void
  transferDefinedSymbol(Symbol &Sym, Block &DestBlock,
                        orc::ExecutorAddrDiff NewOffset,
                        std::optional<orc::ExecutorAddrDiff> ExplicitNewSize) {
    auto &OldSection = Sym.getBlock().getSection();
    Sym.setBlock(DestBlock);
    Sym.setOffset(NewOffset);
    if (ExplicitNewSize)
      Sym.setSize(*ExplicitNewSize);
    else {
      auto RemainingBlockSize = DestBlock.getSize() - NewOffset;
      if (Sym.getSize() > RemainingBlockSize)
        Sym.setSize(RemainingBlockSize);
    }
    if (&DestBlock.getSection() != &OldSection) {
      OldSection.removeSymbol(Sym);
      DestBlock.getSection().addSymbol(Sym);
    }
  }

  /// Transfers the given Block and all Symbols pointing to it to the given
  /// Section.
  ///
  /// No attempt is made to check compatibility of the source and destination
  /// sections. Blocks may be moved between sections with incompatible
  /// permissions (e.g. from data to text). The client is responsible for
  /// ensuring that this is safe.
  void transferBlock(Block &B, Section &NewSection) {
    auto &OldSection = B.getSection();
    if (&OldSection == &NewSection)
      return;
    SmallVector<Symbol *> AttachedSymbols;
    for (auto *S : OldSection.symbols())
      if (&S->getBlock() == &B)
        AttachedSymbols.push_back(S);
    for (auto *S : AttachedSymbols) {
      OldSection.removeSymbol(*S);
      NewSection.addSymbol(*S);
    }
    OldSection.removeBlock(B);
    NewSection.addBlock(B);
  }

  /// Move all blocks and symbols from the source section to the destination
  /// section.
  ///
  /// If PreserveSrcSection is true (or SrcSection and DstSection are the same)
  /// then SrcSection is preserved, otherwise it is removed (the default).
  void mergeSections(Section &DstSection, Section &SrcSection,
                     bool PreserveSrcSection = false) {
    if (&DstSection == &SrcSection)
      return;
    for (auto *B : SrcSection.blocks())
      B->setSection(DstSection);
    SrcSection.transferContentTo(DstSection);
    if (!PreserveSrcSection)
      removeSection(SrcSection);
  }

  /// Removes an external symbol. Also removes the underlying Addressable.
  void removeExternalSymbol(Symbol &Sym) {
    assert(!Sym.isDefined() && !Sym.isAbsolute() &&
           "Sym is not an external symbol");
    assert(ExternalSymbols.count(&Sym) && "Symbol is not in the externals set");
    ExternalSymbols.erase(&Sym);
    Addressable &Base = *Sym.Base;
    assert(llvm::none_of(ExternalSymbols,
                         [&](Symbol *AS) { return AS->Base == &Base; }) &&
           "Base addressable still in use");
    destroySymbol(Sym);
    destroyAddressable(Base);
  }

  /// Remove an absolute symbol. Also removes the underlying Addressable.
  void removeAbsoluteSymbol(Symbol &Sym) {
    assert(!Sym.isDefined() && Sym.isAbsolute() &&
           "Sym is not an absolute symbol");
    assert(AbsoluteSymbols.count(&Sym) &&
           "Symbol is not in the absolute symbols set");
    AbsoluteSymbols.erase(&Sym);
    Addressable &Base = *Sym.Base;
    assert(llvm::none_of(ExternalSymbols,
                         [&](Symbol *AS) { return AS->Base == &Base; }) &&
           "Base addressable still in use");
    destroySymbol(Sym);
    destroyAddressable(Base);
  }

  /// Removes defined symbols. Does not remove the underlying block.
  void removeDefinedSymbol(Symbol &Sym) {
    assert(Sym.isDefined() && "Sym is not a defined symbol");
    Sym.getBlock().getSection().removeSymbol(Sym);
    destroySymbol(Sym);
  }

  /// Remove a block. The block reference is defunct after calling this
  /// function and should no longer be used.
  void removeBlock(Block &B) {
    assert(llvm::none_of(B.getSection().symbols(),
                         [&](const Symbol *Sym) {
                           return &Sym->getBlock() == &B;
                         }) &&
           "Block still has symbols attached");
    B.getSection().removeBlock(B);
    destroyBlock(B);
  }

  /// Remove a section. The section reference is defunct after calling this
  /// function and should no longer be used.
  void removeSection(Section &Sec) {
    assert(Sections.count(Sec.getName()) && "Section not found");
    assert(Sections.find(Sec.getName())->second.get() == &Sec &&
           "Section map entry invalid");
    Sections.erase(Sec.getName());
  }

  /// Accessor for the AllocActions object for this graph. This can be used to
  /// register allocation action calls prior to finalization.
  ///
  /// Accessing this object after finalization will result in undefined
  /// behavior.
  orc::shared::AllocActions &allocActions() { return AAs; }

  /// Dump the graph.
  void dump(raw_ostream &OS);

private:
  // Put the BumpPtrAllocator first so that we don't free any of the underlying
  // memory until the Symbol/Addressable destructors have been run.
  BumpPtrAllocator Allocator;

  std::string Name;
  Triple TT;
  SubtargetFeatures Features;
  unsigned PointerSize;
  support::endianness Endianness;
  GetEdgeKindNameFunction GetEdgeKindName = nullptr;
  DenseMap<StringRef, std::unique_ptr<Section>> Sections;
  ExternalSymbolSet ExternalSymbols;
  ExternalSymbolSet AbsoluteSymbols;
  orc::shared::AllocActions AAs;
};

inline MutableArrayRef<char> Block::getMutableContent(LinkGraph &G) {
  if (!ContentMutable)
    setMutableContent(G.allocateContent({Data, Size}));
  return MutableArrayRef<char>(const_cast<char *>(Data), Size);
}

/// Enables easy lookup of blocks by addresses.
class BlockAddressMap {
public:
  using AddrToBlockMap = std::map<orc::ExecutorAddr, Block *>;
  using const_iterator = AddrToBlockMap::const_iterator;

  /// A block predicate that always adds all blocks.
  static bool includeAllBlocks(const Block &B) { return true; }

  /// A block predicate that always includes blocks with non-null addresses.
  static bool includeNonNull(const Block &B) { return !!B.getAddress(); }

  BlockAddressMap() = default;

  /// Add a block to the map. Returns an error if the block overlaps with any
  /// existing block.
  template <typename PredFn = decltype(includeAllBlocks)>
  Error addBlock(Block &B, PredFn Pred = includeAllBlocks) {
    if (!Pred(B))
      return Error::success();

    auto I = AddrToBlock.upper_bound(B.getAddress());

    // If we're not at the end of the map, check for overlap with the next
    // element.
    if (I != AddrToBlock.end()) {
      if (B.getAddress() + B.getSize() > I->second->getAddress())
        return overlapError(B, *I->second);
    }

    // If we're not at the start of the map, check for overlap with the previous
    // element.
    if (I != AddrToBlock.begin()) {
      auto &PrevBlock = *std::prev(I)->second;
      if (PrevBlock.getAddress() + PrevBlock.getSize() > B.getAddress())
        return overlapError(B, PrevBlock);
    }

    AddrToBlock.insert(I, std::make_pair(B.getAddress(), &B));
    return Error::success();
  }

  /// Add a block to the map without checking for overlap with existing blocks.
  /// The client is responsible for ensuring that the block added does not
  /// overlap with any existing block.
  void addBlockWithoutChecking(Block &B) { AddrToBlock[B.getAddress()] = &B; }

  /// Add a range of blocks to the map. Returns an error if any block in the
  /// range overlaps with any other block in the range, or with any existing
  /// block in the map.
  template <typename BlockPtrRange,
            typename PredFn = decltype(includeAllBlocks)>
  Error addBlocks(BlockPtrRange &&Blocks, PredFn Pred = includeAllBlocks) {
    for (auto *B : Blocks)
      if (auto Err = addBlock(*B, Pred))
        return Err;
    return Error::success();
  }

  /// Add a range of blocks to the map without checking for overlap with
  /// existing blocks. The client is responsible for ensuring that the block
  /// added does not overlap with any existing block.
  template <typename BlockPtrRange>
  void addBlocksWithoutChecking(BlockPtrRange &&Blocks) {
    for (auto *B : Blocks)
      addBlockWithoutChecking(*B);
  }

  /// Iterates over (Address, Block*) pairs in ascending order of address.
  const_iterator begin() const { return AddrToBlock.begin(); }
  const_iterator end() const { return AddrToBlock.end(); }

  /// Returns the block starting at the given address, or nullptr if no such
  /// block exists.
  Block *getBlockAt(orc::ExecutorAddr Addr) const {
    auto I = AddrToBlock.find(Addr);
    if (I == AddrToBlock.end())
      return nullptr;
    return I->second;
  }

  /// Returns the block covering the given address, or nullptr if no such block
  /// exists.
  Block *getBlockCovering(orc::ExecutorAddr Addr) const {
    auto I = AddrToBlock.upper_bound(Addr);
    if (I == AddrToBlock.begin())
      return nullptr;
    auto *B = std::prev(I)->second;
    if (Addr < B->getAddress() + B->getSize())
      return B;
    return nullptr;
  }

private:
  Error overlapError(Block &NewBlock, Block &ExistingBlock) {
    auto NewBlockEnd = NewBlock.getAddress() + NewBlock.getSize();
    auto ExistingBlockEnd =
        ExistingBlock.getAddress() + ExistingBlock.getSize();
    return make_error<JITLinkError>(
        "Block at " +
        formatv("{0:x16} -- {1:x16}", NewBlock.getAddress().getValue(),
                NewBlockEnd.getValue()) +
        " overlaps " +
        formatv("{0:x16} -- {1:x16}", ExistingBlock.getAddress().getValue(),
                ExistingBlockEnd.getValue()));
  }

  AddrToBlockMap AddrToBlock;
};

/// A map of addresses to Symbols.
class SymbolAddressMap {
public:
  using SymbolVector = SmallVector<Symbol *, 1>;

  /// Add a symbol to the SymbolAddressMap.
  void addSymbol(Symbol &Sym) {
    AddrToSymbols[Sym.getAddress()].push_back(&Sym);
  }

  /// Add all symbols in a given range to the SymbolAddressMap.
  template <typename SymbolPtrCollection>
  void addSymbols(SymbolPtrCollection &&Symbols) {
    for (auto *Sym : Symbols)
      addSymbol(*Sym);
  }

  /// Returns the list of symbols that start at the given address, or nullptr if
  /// no such symbols exist.
  const SymbolVector *getSymbolsAt(orc::ExecutorAddr Addr) const {
    auto I = AddrToSymbols.find(Addr);
    if (I == AddrToSymbols.end())
      return nullptr;
    return &I->second;
  }

private:
  std::map<orc::ExecutorAddr, SymbolVector> AddrToSymbols;
};

/// A function for mutating LinkGraphs.
using LinkGraphPassFunction = unique_function<Error(LinkGraph &)>;

/// A list of LinkGraph passes.
using LinkGraphPassList = std::vector<LinkGraphPassFunction>;

/// An LinkGraph pass configuration, consisting of a list of pre-prune,
/// post-prune, and post-fixup passes.
struct PassConfiguration {

  /// Pre-prune passes.
  ///
  /// These passes are called on the graph after it is built, and before any
  /// symbols have been pruned. Graph nodes still have their original vmaddrs.
  ///
  /// Notable use cases: Marking symbols live or should-discard.
  LinkGraphPassList PrePrunePasses;

  /// Post-prune passes.
  ///
  /// These passes are called on the graph after dead stripping, but before
  /// memory is allocated or nodes assigned their final addresses.
  ///
  /// Notable use cases: Building GOT, stub, and TLV symbols.
  LinkGraphPassList PostPrunePasses;

  /// Post-allocation passes.
  ///
  /// These passes are called on the graph after memory has been allocated and
  /// defined nodes have been assigned their final addresses, but before the
  /// context has been notified of these addresses. At this point externals
  /// have not been resolved, and symbol content has not yet been copied into
  /// working memory.
  ///
  /// Notable use cases: Setting up data structures associated with addresses
  /// of defined symbols (e.g. a mapping of __dso_handle to JITDylib* for the
  /// JIT runtime) -- using a PostAllocationPass for this ensures that the
  /// data structures are in-place before any query for resolved symbols
  /// can complete.
  LinkGraphPassList PostAllocationPasses;

  /// Pre-fixup passes.
  ///
  /// These passes are called on the graph after memory has been allocated,
  /// content copied into working memory, and all nodes (including externals)
  /// have been assigned their final addresses, but before any fixups have been
  /// applied.
  ///
  /// Notable use cases: Late link-time optimizations like GOT and stub
  /// elimination.
  LinkGraphPassList PreFixupPasses;

  /// Post-fixup passes.
  ///
  /// These passes are called on the graph after block contents has been copied
  /// to working memory, and fixups applied. Blocks have been updated to point
  /// to their fixed up content.
  ///
  /// Notable use cases: Testing and validation.
  LinkGraphPassList PostFixupPasses;
};

/// Flags for symbol lookup.
///
/// FIXME: These basically duplicate orc::SymbolLookupFlags -- We should merge
///        the two types once we have an OrcSupport library.
enum class SymbolLookupFlags { RequiredSymbol, WeaklyReferencedSymbol };

raw_ostream &operator<<(raw_ostream &OS, const SymbolLookupFlags &LF);

/// A map of symbol names to resolved addresses.
using AsyncLookupResult = DenseMap<StringRef, orc::ExecutorSymbolDef>;

/// A function object to call with a resolved symbol map (See AsyncLookupResult)
/// or an error if resolution failed.
class JITLinkAsyncLookupContinuation {
public:
  virtual ~JITLinkAsyncLookupContinuation() = default;
  virtual void run(Expected<AsyncLookupResult> LR) = 0;

private:
  virtual void anchor();
};

/// Create a lookup continuation from a function object.
template <typename Continuation>
std::unique_ptr<JITLinkAsyncLookupContinuation>
createLookupContinuation(Continuation Cont) {

  class Impl final : public JITLinkAsyncLookupContinuation {
  public:
    Impl(Continuation C) : C(std::move(C)) {}
    void run(Expected<AsyncLookupResult> LR) override { C(std::move(LR)); }

  private:
    Continuation C;
  };

  return std::make_unique<Impl>(std::move(Cont));
}

/// Holds context for a single jitLink invocation.
class JITLinkContext {
public:
  using LookupMap = DenseMap<StringRef, SymbolLookupFlags>;

  /// Create a JITLinkContext.
  JITLinkContext(const JITLinkDylib *JD) : JD(JD) {}

  /// Destroy a JITLinkContext.
  virtual ~JITLinkContext();

  /// Return the JITLinkDylib that this link is targeting, if any.
  const JITLinkDylib *getJITLinkDylib() const { return JD; }

  /// Return the MemoryManager to be used for this link.
  virtual JITLinkMemoryManager &getMemoryManager() = 0;

  /// Notify this context that linking failed.
  /// Called by JITLink if linking cannot be completed.
  virtual void notifyFailed(Error Err) = 0;

  /// Called by JITLink to resolve external symbols. This method is passed a
  /// lookup continutation which it must call with a result to continue the
  /// linking process.
  virtual void lookup(const LookupMap &Symbols,
                      std::unique_ptr<JITLinkAsyncLookupContinuation> LC) = 0;

  /// Called by JITLink once all defined symbols in the graph have been assigned
  /// their final memory locations in the target process. At this point the
  /// LinkGraph can be inspected to build a symbol table, however the block
  /// content will not generally have been copied to the target location yet.
  ///
  /// If the client detects an error in the LinkGraph state (e.g. unexpected or
  /// missing symbols) they may return an error here. The error will be
  /// propagated to notifyFailed and the linker will bail out.
  virtual Error notifyResolved(LinkGraph &G) = 0;

  /// Called by JITLink to notify the context that the object has been
  /// finalized (i.e. emitted to memory and memory permissions set). If all of
  /// this objects dependencies have also been finalized then the code is ready
  /// to run.
  virtual void notifyFinalized(JITLinkMemoryManager::FinalizedAlloc Alloc) = 0;

  /// Called by JITLink prior to linking to determine whether default passes for
  /// the target should be added. The default implementation returns true.
  /// If subclasses override this method to return false for any target then
  /// they are required to fully configure the pass pipeline for that target.
  virtual bool shouldAddDefaultTargetPasses(const Triple &TT) const;

  /// Returns the mark-live pass to be used for this link. If no pass is
  /// returned (the default) then the target-specific linker implementation will
  /// choose a conservative default (usually marking all symbols live).
  /// This function is only called if shouldAddDefaultTargetPasses returns true,
  /// otherwise the JITContext is responsible for adding a mark-live pass in
  /// modifyPassConfig.
  virtual LinkGraphPassFunction getMarkLivePass(const Triple &TT) const;

  /// Called by JITLink to modify the pass pipeline prior to linking.
  /// The default version performs no modification.
  virtual Error modifyPassConfig(LinkGraph &G, PassConfiguration &Config);

private:
  const JITLinkDylib *JD = nullptr;
};

/// Marks all symbols in a graph live. This can be used as a default,
/// conservative mark-live implementation.
Error markAllSymbolsLive(LinkGraph &G);

/// Create an out of range error for the given edge in the given block.
Error makeTargetOutOfRangeError(const LinkGraph &G, const Block &B,
                                const Edge &E);

Error makeAlignmentError(llvm::orc::ExecutorAddr Loc, uint64_t Value, int N,
                         const Edge &E);

/// Base case for edge-visitors where the visitor-list is empty.
inline void visitEdge(LinkGraph &G, Block *B, Edge &E) {}

/// Applies the first visitor in the list to the given edge. If the visitor's
/// visitEdge method returns true then we return immediately, otherwise we
/// apply the next visitor.
template <typename VisitorT, typename... VisitorTs>
void visitEdge(LinkGraph &G, Block *B, Edge &E, VisitorT &&V,
               VisitorTs &&...Vs) {
  if (!V.visitEdge(G, B, E))
    visitEdge(G, B, E, std::forward<VisitorTs>(Vs)...);
}

/// For each edge in the given graph, apply a list of visitors to the edge,
/// stopping when the first visitor's visitEdge method returns true.
///
/// Only visits edges that were in the graph at call time: if any visitor
/// adds new edges those will not be visited. Visitors are not allowed to
/// remove edges (though they can change their kind, target, and addend).
template <typename... VisitorTs>
void visitExistingEdges(LinkGraph &G, VisitorTs &&...Vs) {
  // We may add new blocks during this process, but we don't want to iterate
  // over them, so build a worklist.
  std::vector<Block *> Worklist(G.blocks().begin(), G.blocks().end());

  for (auto *B : Worklist)
    for (auto &E : B->edges())
      visitEdge(G, B, E, std::forward<VisitorTs>(Vs)...);
}

/// Create a LinkGraph from the given object buffer.
///
/// Note: The graph does not take ownership of the underlying buffer, nor copy
/// its contents. The caller is responsible for ensuring that the object buffer
/// outlives the graph.
Expected<std::unique_ptr<LinkGraph>>
createLinkGraphFromObject(MemoryBufferRef ObjectBuffer);

/// Link the given graph.
void link(std::unique_ptr<LinkGraph> G, std::unique_ptr<JITLinkContext> Ctx);

} // end namespace jitlink
} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_JITLINK_JITLINK_H
PKiwFZ����!�!&ExecutionEngine/SectionMemoryManager.hnu�[���//===- SectionMemoryManager.h - Memory manager for MCJIT/RtDyld -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the declaration of a section-based memory manager used by
// the MCJIT execution engine and RuntimeDyld.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_SECTIONMEMORYMANAGER_H
#define LLVM_EXECUTIONENGINE_SECTIONMEMORYMANAGER_H

#include "llvm/ADT/SmallVector.h"
#include "llvm/ExecutionEngine/RTDyldMemoryManager.h"
#include "llvm/Support/Memory.h"
#include <cstdint>
#include <string>
#include <system_error>

namespace llvm {

/// This is a simple memory manager which implements the methods called by
/// the RuntimeDyld class to allocate memory for section-based loading of
/// objects, usually those generated by the MCJIT execution engine.
///
/// This memory manager allocates all section memory as read-write.  The
/// RuntimeDyld will copy JITed section memory into these allocated blocks
/// and perform any necessary linking and relocations.
///
/// Any client using this memory manager MUST ensure that section-specific
/// page permissions have been applied before attempting to execute functions
/// in the JITed object.  Permissions can be applied either by calling
/// MCJIT::finalizeObject or by calling SectionMemoryManager::finalizeMemory
/// directly.  Clients of MCJIT should call MCJIT::finalizeObject.
class SectionMemoryManager : public RTDyldMemoryManager {
public:
  /// This enum describes the various reasons to allocate pages from
  /// allocateMappedMemory.
  enum class AllocationPurpose {
    Code,
    ROData,
    RWData,
  };

  /// Implementations of this interface are used by SectionMemoryManager to
  /// request pages from the operating system.
  class MemoryMapper {
  public:
    /// This method attempts to allocate \p NumBytes bytes of virtual memory for
    /// \p Purpose.  \p NearBlock may point to an existing allocation, in which
    /// case an attempt is made to allocate more memory near the existing block.
    /// The actual allocated address is not guaranteed to be near the requested
    /// address.  \p Flags is used to set the initial protection flags for the
    /// block of the memory.  \p EC [out] returns an object describing any error
    /// that occurs.
    ///
    /// This method may allocate more than the number of bytes requested.  The
    /// actual number of bytes allocated is indicated in the returned
    /// MemoryBlock.
    ///
    /// The start of the allocated block must be aligned with the system
    /// allocation granularity (64K on Windows, page size on Linux).  If the
    /// address following \p NearBlock is not so aligned, it will be rounded up
    /// to the next allocation granularity boundary.
    ///
    /// \r a non-null MemoryBlock if the function was successful, otherwise a
    /// null MemoryBlock with \p EC describing the error.
    virtual sys::MemoryBlock
    allocateMappedMemory(AllocationPurpose Purpose, size_t NumBytes,
                         const sys::MemoryBlock *const NearBlock,
                         unsigned Flags, std::error_code &EC) = 0;

    /// This method sets the protection flags for a block of memory to the state
    /// specified by \p Flags.  The behavior is not specified if the memory was
    /// not allocated using the allocateMappedMemory method.
    /// \p Block describes the memory block to be protected.
    /// \p Flags specifies the new protection state to be assigned to the block.
    ///
    /// If \p Flags is MF_WRITE, the actual behavior varies with the operating
    /// system (i.e. MF_READ | MF_WRITE on Windows) and the target architecture
    /// (i.e. MF_WRITE -> MF_READ | MF_WRITE on i386).
    ///
    /// \r error_success if the function was successful, or an error_code
    /// describing the failure if an error occurred.
    virtual std::error_code protectMappedMemory(const sys::MemoryBlock &Block,
                                                unsigned Flags) = 0;

    /// This method releases a block of memory that was allocated with the
    /// allocateMappedMemory method. It should not be used to release any memory
    /// block allocated any other way.
    /// \p Block describes the memory to be released.
    ///
    /// \r error_success if the function was successful, or an error_code
    /// describing the failure if an error occurred.
    virtual std::error_code releaseMappedMemory(sys::MemoryBlock &M) = 0;

    virtual ~MemoryMapper();
  };

  /// Creates a SectionMemoryManager instance with \p MM as the associated
  /// memory mapper.  If \p MM is nullptr then a default memory mapper is used
  /// that directly calls into the operating system.
  SectionMemoryManager(MemoryMapper *MM = nullptr);
  SectionMemoryManager(const SectionMemoryManager &) = delete;
  void operator=(const SectionMemoryManager &) = delete;
  ~SectionMemoryManager() override;

  /// Allocates a memory block of (at least) the given size suitable for
  /// executable code.
  ///
  /// The value of \p Alignment must be a power of two.  If \p Alignment is zero
  /// a default alignment of 16 will be used.
  uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment,
                               unsigned SectionID,
                               StringRef SectionName) override;

  /// Allocates a memory block of (at least) the given size suitable for
  /// executable code.
  ///
  /// The value of \p Alignment must be a power of two.  If \p Alignment is zero
  /// a default alignment of 16 will be used.
  uint8_t *allocateDataSection(uintptr_t Size, unsigned Alignment,
                               unsigned SectionID, StringRef SectionName,
                               bool isReadOnly) override;

  /// Update section-specific memory permissions and other attributes.
  ///
  /// This method is called when object loading is complete and section page
  /// permissions can be applied.  It is up to the memory manager implementation
  /// to decide whether or not to act on this method.  The memory manager will
  /// typically allocate all sections as read-write and then apply specific
  /// permissions when this method is called.  Code sections cannot be executed
  /// until this function has been called.  In addition, any cache coherency
  /// operations needed to reliably use the memory are also performed.
  ///
  /// \returns true if an error occurred, false otherwise.
  bool finalizeMemory(std::string *ErrMsg = nullptr) override;

  /// Invalidate instruction cache for code sections.
  ///
  /// Some platforms with separate data cache and instruction cache require
  /// explicit cache flush, otherwise JIT code manipulations (like resolved
  /// relocations) will get to the data cache but not to the instruction cache.
  ///
  /// This method is called from finalizeMemory.
  virtual void invalidateInstructionCache();

private:
  struct FreeMemBlock {
    // The actual block of free memory
    sys::MemoryBlock Free;
    // If there is a pending allocation from the same reservation right before
    // this block, store it's index in PendingMem, to be able to update the
    // pending region if part of this block is allocated, rather than having to
    // create a new one
    unsigned PendingPrefixIndex;
  };

  struct MemoryGroup {
    // PendingMem contains all blocks of memory (subblocks of AllocatedMem)
    // which have not yet had their permissions applied, but have been given
    // out to the user. FreeMem contains all block of memory, which have
    // neither had their permissions applied, nor been given out to the user.
    SmallVector<sys::MemoryBlock, 16> PendingMem;
    SmallVector<FreeMemBlock, 16> FreeMem;

    // All memory blocks that have been requested from the system
    SmallVector<sys::MemoryBlock, 16> AllocatedMem;

    sys::MemoryBlock Near;
  };

  uint8_t *allocateSection(AllocationPurpose Purpose, uintptr_t Size,
                           unsigned Alignment);

  std::error_code applyMemoryGroupPermissions(MemoryGroup &MemGroup,
                                              unsigned Permissions);

  void anchor() override;

  MemoryGroup CodeMem;
  MemoryGroup RWDataMem;
  MemoryGroup RODataMem;
  MemoryMapper *MMapper;
  std::unique_ptr<MemoryMapper> OwnedMMapper;
};

} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_SECTIONMEMORYMANAGER_H
PKiwFZ�7q+||!ExecutionEngine/OProfileWrapper.hnu�[���//===-- OProfileWrapper.h - OProfile JIT API Wrapper ------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// This file defines a OProfileWrapper object that detects if the oprofile
// daemon is running, and provides wrappers for opagent functions used to
// communicate with the oprofile JIT interface. The dynamic library libopagent
// does not need to be linked directly as this object lazily loads the library
// when the first op_ function is called.
//
// See http://oprofile.sourceforge.net/doc/devel/jit-interface.html for the
// definition of the interface.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_OPROFILEWRAPPER_H
#define LLVM_EXECUTIONENGINE_OPROFILEWRAPPER_H

#include "llvm/Support/DataTypes.h"
#include <opagent.h>

namespace llvm {


class OProfileWrapper {
  typedef  op_agent_t    (*op_open_agent_ptr_t)();
  typedef  int           (*op_close_agent_ptr_t)(op_agent_t);
  typedef  int           (*op_write_native_code_ptr_t)(op_agent_t,
                                                const char*,
                                                uint64_t,
                                                void const*,
                                                const unsigned int);
  typedef  int           (*op_write_debug_line_info_ptr_t)(op_agent_t,
                                                void const*,
                                                size_t,
                                                struct debug_line_info const*);
  typedef  int           (*op_unload_native_code_ptr_t)(op_agent_t, uint64_t);

  // Also used for op_minor_version function which has the same signature
  typedef  int           (*op_major_version_ptr_t)();

  // This is not a part of the opagent API, but is useful nonetheless
  typedef  bool          (*IsOProfileRunningPtrT)();


  op_agent_t                      Agent;
  op_open_agent_ptr_t             OpenAgentFunc;
  op_close_agent_ptr_t            CloseAgentFunc;
  op_write_native_code_ptr_t      WriteNativeCodeFunc;
  op_write_debug_line_info_ptr_t  WriteDebugLineInfoFunc;
  op_unload_native_code_ptr_t     UnloadNativeCodeFunc;
  op_major_version_ptr_t          MajorVersionFunc;
  op_major_version_ptr_t          MinorVersionFunc;
  IsOProfileRunningPtrT           IsOProfileRunningFunc;

  bool Initialized;

public:
  OProfileWrapper();

  // For testing with a mock opagent implementation, skips the dynamic load and
  // the function resolution.
  OProfileWrapper(op_open_agent_ptr_t OpenAgentImpl,
                  op_close_agent_ptr_t CloseAgentImpl,
                  op_write_native_code_ptr_t WriteNativeCodeImpl,
                  op_write_debug_line_info_ptr_t WriteDebugLineInfoImpl,
                  op_unload_native_code_ptr_t UnloadNativeCodeImpl,
                  op_major_version_ptr_t MajorVersionImpl,
                  op_major_version_ptr_t MinorVersionImpl,
                  IsOProfileRunningPtrT MockIsOProfileRunningImpl = 0)
  : OpenAgentFunc(OpenAgentImpl),
    CloseAgentFunc(CloseAgentImpl),
    WriteNativeCodeFunc(WriteNativeCodeImpl),
    WriteDebugLineInfoFunc(WriteDebugLineInfoImpl),
    UnloadNativeCodeFunc(UnloadNativeCodeImpl),
    MajorVersionFunc(MajorVersionImpl),
    MinorVersionFunc(MinorVersionImpl),
    IsOProfileRunningFunc(MockIsOProfileRunningImpl),
    Initialized(true)
  {
  }

  // Calls op_open_agent in the oprofile JIT library and saves the returned
  // op_agent_t handle internally so it can be used when calling all the other
  // op_* functions. Callers of this class do not need to keep track of
  // op_agent_t objects.
  bool op_open_agent();

  int op_close_agent();
  int op_write_native_code(const char* name,
                           uint64_t addr,
                           void const* code,
                           const unsigned int size);
  int op_write_debug_line_info(void const* code,
                               size_t num_entries,
                               struct debug_line_info const* info);
  int op_unload_native_code(uint64_t addr);
  int op_major_version();
  int op_minor_version();

  // Returns true if the oprofiled process is running, the opagent library is
  // loaded and a connection to the agent has been established, and false
  // otherwise.
  bool isAgentAvailable();

private:
  // Loads the libopagent library and initializes this wrapper if the oprofile
  // daemon is running
  bool initialize();

  // Searches /proc for the oprofile daemon and returns true if the process if
  // found, or false otherwise.
  bool checkForOProfileProcEntry();

  bool isOProfileRunning();
};

} // namespace llvm

#endif // LLVM_EXECUTIONENGINE_OPROFILEWRAPPER_H
PKiwFZX�9�$�$&ExecutionEngine/Orc/SymbolStringPool.hnu�[���//===-- SymbolStringPool.h -- Thread-safe pool for JIT symbols --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Contains a thread-safe string pool suitable for use with ORC.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_ORC_SYMBOLSTRINGPOOL_H
#define LLVM_EXECUTIONENGINE_ORC_SYMBOLSTRINGPOOL_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringMap.h"
#include <atomic>
#include <mutex>

namespace llvm {

class raw_ostream;

namespace orc {

class SymbolStringPtrBase;
class SymbolStringPtr;
class NonOwningSymbolStringPtr;

/// String pool for symbol names used by the JIT.
class SymbolStringPool {
  friend class SymbolStringPoolTest;
  friend class SymbolStringPtrBase;

  // Implemented in DebugUtils.h.
  friend raw_ostream &operator<<(raw_ostream &OS, const SymbolStringPool &SSP);

public:
  /// Destroy a SymbolStringPool.
  ~SymbolStringPool();

  /// Create a symbol string pointer from the given string.
  SymbolStringPtr intern(StringRef S);

  /// Remove from the pool any entries that are no longer referenced.
  void clearDeadEntries();

  /// Returns true if the pool is empty.
  bool empty() const;

private:
  size_t getRefCount(const SymbolStringPtrBase &S) const;

  using RefCountType = std::atomic<size_t>;
  using PoolMap = StringMap<RefCountType>;
  using PoolMapEntry = StringMapEntry<RefCountType>;
  mutable std::mutex PoolMutex;
  PoolMap Pool;
};

/// Base class for both owning and non-owning symbol-string ptrs.
///
/// All symbol-string ptrs are convertible to bool, dereferenceable and
/// comparable.
///
/// SymbolStringPtrBases are default-constructible and constructible
/// from nullptr to enable comparison with these values.
class SymbolStringPtrBase {
  friend class SymbolStringPool;
  friend struct DenseMapInfo<SymbolStringPtr>;
  friend struct DenseMapInfo<NonOwningSymbolStringPtr>;

public:
  SymbolStringPtrBase() = default;
  SymbolStringPtrBase(std::nullptr_t) {}

  explicit operator bool() const { return S; }

  StringRef operator*() const { return S->first(); }

  friend bool operator==(SymbolStringPtrBase LHS, SymbolStringPtrBase RHS) {
    return LHS.S == RHS.S;
  }

  friend bool operator!=(SymbolStringPtrBase LHS, SymbolStringPtrBase RHS) {
    return !(LHS == RHS);
  }

  friend bool operator<(SymbolStringPtrBase LHS, SymbolStringPtrBase RHS) {
    return LHS.S < RHS.S;
  }

#ifndef NDEBUG
  // Returns true if the pool entry's ref count is above zero (or if the entry
  // is an empty or tombstone value). Useful for debugging and testing -- this
  // method can be used to identify SymbolStringPtrs and
  // NonOwningSymbolStringPtrs that are pointing to abandoned pool entries.
  bool poolEntryIsAlive() const {
    return isRealPoolEntry(S) ? S->getValue() != 0 : true;
  }
#endif

protected:
  using PoolEntry = SymbolStringPool::PoolMapEntry;
  using PoolEntryPtr = PoolEntry *;

  SymbolStringPtrBase(PoolEntryPtr S) : S(S) {}

  constexpr static uintptr_t EmptyBitPattern =
      std::numeric_limits<uintptr_t>::max()
      << PointerLikeTypeTraits<PoolEntryPtr>::NumLowBitsAvailable;

  constexpr static uintptr_t TombstoneBitPattern =
      (std::numeric_limits<uintptr_t>::max() - 1)
      << PointerLikeTypeTraits<PoolEntryPtr>::NumLowBitsAvailable;

  constexpr static uintptr_t InvalidPtrMask =
      (std::numeric_limits<uintptr_t>::max() - 3)
      << PointerLikeTypeTraits<PoolEntryPtr>::NumLowBitsAvailable;

  // Returns false for null, empty, and tombstone values, true otherwise.
  static bool isRealPoolEntry(PoolEntryPtr P) {
    return ((reinterpret_cast<uintptr_t>(P) - 1) & InvalidPtrMask) !=
           InvalidPtrMask;
  }

  size_t getRefCount() const {
    return isRealPoolEntry(S) ? size_t(S->getValue()) : size_t(0);
  }

  PoolEntryPtr S = nullptr;
};

/// Pointer to a pooled string representing a symbol name.
class SymbolStringPtr : public SymbolStringPtrBase {
  friend class OrcV2CAPIHelper;
  friend class SymbolStringPool;
  friend struct DenseMapInfo<SymbolStringPtr>;

public:
  SymbolStringPtr() = default;
  SymbolStringPtr(std::nullptr_t) {}
  SymbolStringPtr(const SymbolStringPtr &Other) : SymbolStringPtrBase(Other.S) {
    incRef();
  }

  explicit SymbolStringPtr(NonOwningSymbolStringPtr Other);

  SymbolStringPtr& operator=(const SymbolStringPtr &Other) {
    decRef();
    S = Other.S;
    incRef();
    return *this;
  }

  SymbolStringPtr(SymbolStringPtr &&Other) { std::swap(S, Other.S); }

  SymbolStringPtr& operator=(SymbolStringPtr &&Other) {
    decRef();
    S = nullptr;
    std::swap(S, Other.S);
    return *this;
  }

  ~SymbolStringPtr() { decRef(); }

private:
  SymbolStringPtr(PoolEntryPtr S) : SymbolStringPtrBase(S) { incRef(); }

  void incRef() {
    if (isRealPoolEntry(S))
      ++S->getValue();
  }

  void decRef() {
    if (isRealPoolEntry(S)) {
      assert(S->getValue() && "Releasing SymbolStringPtr with zero ref count");
      --S->getValue();
    }
  }

  static SymbolStringPtr getEmptyVal() {
    return SymbolStringPtr(reinterpret_cast<PoolEntryPtr>(EmptyBitPattern));
  }

  static SymbolStringPtr getTombstoneVal() {
    return SymbolStringPtr(reinterpret_cast<PoolEntryPtr>(TombstoneBitPattern));
  }
};

/// Non-owning SymbolStringPool entry pointer. Instances are comparable with
/// SymbolStringPtr instances and guaranteed to have the same hash, but do not
/// affect the ref-count of the pooled string (and are therefore cheaper to
/// copy).
///
/// NonOwningSymbolStringPtrs are silently invalidated if the pool entry's
/// ref-count drops to zero, so they should only be used in contexts where a
/// corresponding SymbolStringPtr is known to exist (which will guarantee that
/// the ref-count stays above zero). E.g. in a graph where nodes are
/// represented by SymbolStringPtrs the edges can be represented by pairs of
/// NonOwningSymbolStringPtrs and this will make the introduction of deletion
/// of edges cheaper.
class NonOwningSymbolStringPtr : public SymbolStringPtrBase {
  friend struct DenseMapInfo<orc::NonOwningSymbolStringPtr>;

public:
  NonOwningSymbolStringPtr() = default;
  explicit NonOwningSymbolStringPtr(const SymbolStringPtr &S)
      : SymbolStringPtrBase(S) {}

  using SymbolStringPtrBase::operator=;

private:
  NonOwningSymbolStringPtr(PoolEntryPtr S) : SymbolStringPtrBase(S) {}

  static NonOwningSymbolStringPtr getEmptyVal() {
    return NonOwningSymbolStringPtr(
        reinterpret_cast<PoolEntryPtr>(EmptyBitPattern));
  }

  static NonOwningSymbolStringPtr getTombstoneVal() {
    return NonOwningSymbolStringPtr(
        reinterpret_cast<PoolEntryPtr>(TombstoneBitPattern));
  }
};

inline SymbolStringPtr::SymbolStringPtr(NonOwningSymbolStringPtr Other)
    : SymbolStringPtrBase(Other) {
  assert(poolEntryIsAlive() &&
         "SymbolStringPtr constructed from invalid non-owning pointer.");

  if (isRealPoolEntry(S))
    ++S->getValue();
}

inline SymbolStringPool::~SymbolStringPool() {
#ifndef NDEBUG
  clearDeadEntries();
  assert(Pool.empty() && "Dangling references at pool destruction time");
#endif // NDEBUG
}

inline SymbolStringPtr SymbolStringPool::intern(StringRef S) {
  std::lock_guard<std::mutex> Lock(PoolMutex);
  PoolMap::iterator I;
  bool Added;
  std::tie(I, Added) = Pool.try_emplace(S, 0);
  return SymbolStringPtr(&*I);
}

inline void SymbolStringPool::clearDeadEntries() {
  std::lock_guard<std::mutex> Lock(PoolMutex);
  for (auto I = Pool.begin(), E = Pool.end(); I != E;) {
    auto Tmp = I++;
    if (Tmp->second == 0)
      Pool.erase(Tmp);
  }
}

inline bool SymbolStringPool::empty() const {
  std::lock_guard<std::mutex> Lock(PoolMutex);
  return Pool.empty();
}

inline size_t
SymbolStringPool::getRefCount(const SymbolStringPtrBase &S) const {
  return S.getRefCount();
}

} // end namespace orc

template <>
struct DenseMapInfo<orc::SymbolStringPtr> {

  static orc::SymbolStringPtr getEmptyKey() {
    return orc::SymbolStringPtr::getEmptyVal();
  }

  static orc::SymbolStringPtr getTombstoneKey() {
    return orc::SymbolStringPtr::getTombstoneVal();
  }

  static unsigned getHashValue(const orc::SymbolStringPtrBase &V) {
    return DenseMapInfo<orc::SymbolStringPtr::PoolEntryPtr>::getHashValue(V.S);
  }

  static bool isEqual(const orc::SymbolStringPtrBase &LHS,
                      const orc::SymbolStringPtrBase &RHS) {
    return LHS.S == RHS.S;
  }
};

template <> struct DenseMapInfo<orc::NonOwningSymbolStringPtr> {

  static orc::NonOwningSymbolStringPtr getEmptyKey() {
    return orc::NonOwningSymbolStringPtr::getEmptyVal();
  }

  static orc::NonOwningSymbolStringPtr getTombstoneKey() {
    return orc::NonOwningSymbolStringPtr::getTombstoneVal();
  }

  static unsigned getHashValue(const orc::SymbolStringPtrBase &V) {
    return DenseMapInfo<
        orc::NonOwningSymbolStringPtr::PoolEntryPtr>::getHashValue(V.S);
  }

  static bool isEqual(const orc::SymbolStringPtrBase &LHS,
                      const orc::SymbolStringPtrBase &RHS) {
    return LHS.S == RHS.S;
  }
};

} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_ORC_SYMBOLSTRINGPOOL_H
PKiwFZ�(G""3ExecutionEngine/Orc/EPCGenericRTDyldMemoryManager.hnu�[���//===---- EPCGenericRTDyldMemoryManager.h - EPC-based MemMgr ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Defines a RuntimeDyld::MemoryManager that uses EPC and the ORC runtime
// bootstrap functions.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_ORC_EPCGENERICRTDYLDMEMORYMANAGER_H
#define LLVM_EXECUTIONENGINE_ORC_EPCGENERICRTDYLDMEMORYMANAGER_H

#include "llvm/ExecutionEngine/Orc/ExecutorProcessControl.h"
#include "llvm/ExecutionEngine/RuntimeDyld.h"

#define DEBUG_TYPE "orc"

namespace llvm {
namespace orc {

/// Remote-mapped RuntimeDyld-compatible memory manager.
class EPCGenericRTDyldMemoryManager : public RuntimeDyld::MemoryManager {
public:
  /// Symbol addresses for memory access.
  struct SymbolAddrs {
    ExecutorAddr Instance;
    ExecutorAddr Reserve;
    ExecutorAddr Finalize;
    ExecutorAddr Deallocate;
    ExecutorAddr RegisterEHFrame;
    ExecutorAddr DeregisterEHFrame;
  };

  /// Create an EPCGenericRTDyldMemoryManager using the given EPC, looking up
  /// the default symbol names in the bootstrap symbol set.
  static Expected<std::unique_ptr<EPCGenericRTDyldMemoryManager>>
  CreateWithDefaultBootstrapSymbols(ExecutorProcessControl &EPC);

  /// Create an EPCGenericRTDyldMemoryManager using the given EPC and symbol
  /// addrs.
  EPCGenericRTDyldMemoryManager(ExecutorProcessControl &EPC, SymbolAddrs SAs);

  EPCGenericRTDyldMemoryManager(const EPCGenericRTDyldMemoryManager &) = delete;
  EPCGenericRTDyldMemoryManager &
  operator=(const EPCGenericRTDyldMemoryManager &) = delete;
  EPCGenericRTDyldMemoryManager(EPCGenericRTDyldMemoryManager &&) = delete;
  EPCGenericRTDyldMemoryManager &
  operator=(EPCGenericRTDyldMemoryManager &&) = delete;
  ~EPCGenericRTDyldMemoryManager();

  uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment,
                               unsigned SectionID,
                               StringRef SectionName) override;

  uint8_t *allocateDataSection(uintptr_t Size, unsigned Alignment,
                               unsigned SectionID, StringRef SectionName,
                               bool IsReadOnly) override;

  void reserveAllocationSpace(uintptr_t CodeSize, Align CodeAlign,
                              uintptr_t RODataSize, Align RODataAlign,
                              uintptr_t RWDataSize, Align RWDataAlign) override;

  bool needsToReserveAllocationSpace() override;

  void registerEHFrames(uint8_t *Addr, uint64_t LoadAddr, size_t Size) override;

  void deregisterEHFrames() override;

  void notifyObjectLoaded(RuntimeDyld &Dyld,
                          const object::ObjectFile &Obj) override;

  bool finalizeMemory(std::string *ErrMsg = nullptr) override;

private:
  struct SectionAlloc {
  public:
    SectionAlloc(uint64_t Size, unsigned Align)
        : Size(Size), Align(Align),
          Contents(std::make_unique<uint8_t[]>(Size + Align - 1)) {}

    uint64_t Size;
    unsigned Align;
    std::unique_ptr<uint8_t[]> Contents;
    ExecutorAddr RemoteAddr;
  };

  // Group of section allocations to be allocated together in the executor. The
  // RemoteCodeAddr will stand in as the id of the group for deallocation
  // purposes.
  struct SectionAllocGroup {
    SectionAllocGroup() = default;
    SectionAllocGroup(const SectionAllocGroup &) = delete;
    SectionAllocGroup &operator=(const SectionAllocGroup &) = delete;
    SectionAllocGroup(SectionAllocGroup &&) = default;
    SectionAllocGroup &operator=(SectionAllocGroup &&) = default;

    ExecutorAddrRange RemoteCode;
    ExecutorAddrRange RemoteROData;
    ExecutorAddrRange RemoteRWData;
    std::vector<ExecutorAddrRange> UnfinalizedEHFrames;
    std::vector<SectionAlloc> CodeAllocs, RODataAllocs, RWDataAllocs;
  };

  // Maps all allocations in SectionAllocs to aligned blocks
  void mapAllocsToRemoteAddrs(RuntimeDyld &Dyld,
                              std::vector<SectionAlloc> &SecAllocs,
                              ExecutorAddr NextAddr);

  ExecutorProcessControl &EPC;
  SymbolAddrs SAs;

  std::mutex M;
  std::vector<SectionAllocGroup> Unmapped;
  std::vector<SectionAllocGroup> Unfinalized;
  std::vector<ExecutorAddr> FinalizedAllocs;
  std::string ErrMsg;
};

} // end namespace orc
} // end namespace llvm

#undef DEBUG_TYPE

#endif // LLVM_EXECUTIONENGINE_ORC_EPCGENERICRTDYLDMEMORYMANAGER_H
PKiwFZ����� ExecutionEngine/Orc/DebugUtils.hnu�[���//===----- DebugUtils.h - Utilities for debugging ORC JITs ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Utilities for debugging ORC-based JITs.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_ORC_DEBUGUTILS_H
#define LLVM_EXECUTIONENGINE_ORC_DEBUGUTILS_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ExecutionEngine/Orc/Core.h"
#include "llvm/ExecutionEngine/Orc/SymbolStringPool.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/raw_ostream.h"
#include <memory>
#include <string>

namespace llvm {

class MemoryBuffer;

namespace orc {

// --raw_ostream operators for ORC types--

/// Render a SymbolStringPtr.
raw_ostream &operator<<(raw_ostream &OS, const SymbolStringPtr &Sym);

/// Render a SymbolNameSet.
raw_ostream &operator<<(raw_ostream &OS, const SymbolNameSet &Symbols);

/// Render a SymbolNameVector.
raw_ostream &operator<<(raw_ostream &OS, const SymbolNameVector &Symbols);

/// Render an array of SymbolStringPtrs.
raw_ostream &operator<<(raw_ostream &OS, ArrayRef<SymbolStringPtr> Symbols);

/// Render JITSymbolFlags.
raw_ostream &operator<<(raw_ostream &OS, const JITSymbolFlags &Flags);

/// Render a SymbolFlagsMap entry.
raw_ostream &operator<<(raw_ostream &OS, const SymbolFlagsMap::value_type &KV);

/// Render a SymbolMap entry.
raw_ostream &operator<<(raw_ostream &OS, const SymbolMap::value_type &KV);

/// Render a SymbolFlagsMap.
raw_ostream &operator<<(raw_ostream &OS, const SymbolFlagsMap &SymbolFlags);

/// Render a SymbolMap.
raw_ostream &operator<<(raw_ostream &OS, const SymbolMap &Symbols);

/// Render a SymbolDependenceMap entry.
raw_ostream &operator<<(raw_ostream &OS,
                        const SymbolDependenceMap::value_type &KV);

/// Render a SymbolDependendeMap.
raw_ostream &operator<<(raw_ostream &OS, const SymbolDependenceMap &Deps);

/// Render a MaterializationUnit.
raw_ostream &operator<<(raw_ostream &OS, const MaterializationUnit &MU);

//// Render a JITDylibLookupFlags instance.
raw_ostream &operator<<(raw_ostream &OS,
                        const JITDylibLookupFlags &JDLookupFlags);

/// Rendar a SymbolLookupFlags instance.
raw_ostream &operator<<(raw_ostream &OS, const SymbolLookupFlags &LookupFlags);

/// Render a SymbolLookupSet entry.
raw_ostream &operator<<(raw_ostream &OS, const SymbolLookupSet::value_type &KV);

/// Render a SymbolLookupSet.
raw_ostream &operator<<(raw_ostream &OS, const SymbolLookupSet &LookupSet);

/// Render a JITDylibSearchOrder.
raw_ostream &operator<<(raw_ostream &OS,
                        const JITDylibSearchOrder &SearchOrder);

/// Render a SymbolAliasMap.
raw_ostream &operator<<(raw_ostream &OS, const SymbolAliasMap &Aliases);

/// Render a SymbolState.
raw_ostream &operator<<(raw_ostream &OS, const SymbolState &S);

/// Render a LookupKind.
raw_ostream &operator<<(raw_ostream &OS, const LookupKind &K);

/// Dump a SymbolStringPool. Useful for debugging dangling-pointer crashes.
raw_ostream &operator<<(raw_ostream &OS, const SymbolStringPool &SSP);

/// A function object that can be used as an ObjectTransformLayer transform
/// to dump object files to disk at a specified path.
class DumpObjects {
public:
  /// Construct a DumpObjects transform that will dump objects to disk.
  ///
  /// @param DumpDir specifies the path to write dumped objects to. DumpDir may
  /// be empty, in which case files will be dumped to the working directory. If
  /// DumpDir is non-empty then any trailing separators will be discarded.
  ///
  /// @param IdentifierOverride specifies a file name stem to use when dumping
  /// objects. If empty, each MemoryBuffer's identifier will be used (with a .o
  /// suffix added if not already present). If an identifier override is
  /// supplied it will be used instead (since all buffers will use the same
  /// identifier, the resulting files will be named <ident>.o, <ident>.2.o,
  /// <ident>.3.o, and so on). IdentifierOverride should not contain an
  /// extension, as a .o suffix will be added by DumpObjects.
  DumpObjects(std::string DumpDir = "", std::string IdentifierOverride = "");

  /// Dumps the given buffer to disk.
  Expected<std::unique_ptr<MemoryBuffer>>
  operator()(std::unique_ptr<MemoryBuffer> Obj);

private:
  StringRef getBufferIdentifier(MemoryBuffer &B);
  std::string DumpDir;
  std::string IdentifierOverride;
};

} // End namespace orc
} // End namespace llvm

#endif // LLVM_EXECUTIONENGINE_ORC_DEBUGUTILS_H
PKiwFZ�m���!ExecutionEngine/Orc/Speculation.hnu�[���//===-- Speculation.h - Speculative Compilation --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Contains the definition to support speculative compilation when laziness is
// enabled.
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_ORC_SPECULATION_H
#define LLVM_EXECUTIONENGINE_ORC_SPECULATION_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ExecutionEngine/Orc/Core.h"
#include "llvm/ExecutionEngine/Orc/DebugUtils.h"
#include "llvm/ExecutionEngine/Orc/IRCompileLayer.h"
#include "llvm/Support/Debug.h"
#include <mutex>
#include <type_traits>
#include <utility>

namespace llvm {
namespace orc {

class Speculator;

// Track the Impls (JITDylib,Symbols) of Symbols while lazy call through
// trampolines are created. Operations are guarded by locks tp ensure that Imap
// stays in consistent state after read/write

class ImplSymbolMap {
  friend class Speculator;

public:
  using AliaseeDetails = std::pair<SymbolStringPtr, JITDylib *>;
  using Alias = SymbolStringPtr;
  using ImapTy = DenseMap<Alias, AliaseeDetails>;
  void trackImpls(SymbolAliasMap ImplMaps, JITDylib *SrcJD);

private:
  // FIX ME: find a right way to distinguish the pre-compile Symbols, and update
  // the callsite
  std::optional<AliaseeDetails> getImplFor(const SymbolStringPtr &StubSymbol) {
    std::lock_guard<std::mutex> Lockit(ConcurrentAccess);
    auto Position = Maps.find(StubSymbol);
    if (Position != Maps.end())
      return Position->getSecond();
    else
      return std::nullopt;
  }

  std::mutex ConcurrentAccess;
  ImapTy Maps;
};

// Defines Speculator Concept,
class Speculator {
public:
  using TargetFAddr = ExecutorAddr;
  using FunctionCandidatesMap = DenseMap<SymbolStringPtr, SymbolNameSet>;
  using StubAddrLikelies = DenseMap<TargetFAddr, SymbolNameSet>;

private:
  void registerSymbolsWithAddr(TargetFAddr ImplAddr,
                               SymbolNameSet likelySymbols) {
    std::lock_guard<std::mutex> Lockit(ConcurrentAccess);
    GlobalSpecMap.insert({ImplAddr, std::move(likelySymbols)});
  }

  void launchCompile(ExecutorAddr FAddr) {
    SymbolNameSet CandidateSet;
    // Copy CandidateSet is necessary, to avoid unsynchronized access to
    // the datastructure.
    {
      std::lock_guard<std::mutex> Lockit(ConcurrentAccess);
      auto It = GlobalSpecMap.find(FAddr);
      if (It == GlobalSpecMap.end())
        return;
      CandidateSet = It->getSecond();
    }

    SymbolDependenceMap SpeculativeLookUpImpls;

    for (auto &Callee : CandidateSet) {
      auto ImplSymbol = AliaseeImplTable.getImplFor(Callee);
      // try to distinguish already compiled & library symbols
      if (!ImplSymbol)
        continue;
      const auto &ImplSymbolName = ImplSymbol->first;
      JITDylib *ImplJD = ImplSymbol->second;
      auto &SymbolsInJD = SpeculativeLookUpImpls[ImplJD];
      SymbolsInJD.insert(ImplSymbolName);
    }

    DEBUG_WITH_TYPE("orc", {
      for (auto &I : SpeculativeLookUpImpls) {
        llvm::dbgs() << "\n In " << I.first->getName() << " JITDylib ";
        for (auto &N : I.second)
          llvm::dbgs() << "\n Likely Symbol : " << N;
      }
    });

    // for a given symbol, there may be no symbol qualified for speculatively
    // compile try to fix this before jumping to this code if possible.
    for (auto &LookupPair : SpeculativeLookUpImpls)
      ES.lookup(
          LookupKind::Static,
          makeJITDylibSearchOrder(LookupPair.first,
                                  JITDylibLookupFlags::MatchAllSymbols),
          SymbolLookupSet(LookupPair.second), SymbolState::Ready,
          [this](Expected<SymbolMap> Result) {
            if (auto Err = Result.takeError())
              ES.reportError(std::move(Err));
          },
          NoDependenciesToRegister);
  }

public:
  Speculator(ImplSymbolMap &Impl, ExecutionSession &ref)
      : AliaseeImplTable(Impl), ES(ref), GlobalSpecMap(0) {}
  Speculator(const Speculator &) = delete;
  Speculator(Speculator &&) = delete;
  Speculator &operator=(const Speculator &) = delete;
  Speculator &operator=(Speculator &&) = delete;

  /// Define symbols for this Speculator object (__orc_speculator) and the
  /// speculation runtime entry point symbol (__orc_speculate_for) in the
  /// given JITDylib.
  Error addSpeculationRuntime(JITDylib &JD, MangleAndInterner &Mangle);

  // Speculatively compile likely functions for the given Stub Address.
  // destination of __orc_speculate_for jump
  void speculateFor(TargetFAddr StubAddr) { launchCompile(StubAddr); }

  // FIXME : Register with Stub Address, after JITLink Fix.
  void registerSymbols(FunctionCandidatesMap Candidates, JITDylib *JD) {
    for (auto &SymPair : Candidates) {
      auto Target = SymPair.first;
      auto Likely = SymPair.second;

      auto OnReadyFixUp = [Likely, Target,
                           this](Expected<SymbolMap> ReadySymbol) {
        if (ReadySymbol) {
          auto RDef = (*ReadySymbol)[Target];
          registerSymbolsWithAddr(RDef.getAddress(), std::move(Likely));
        } else
          this->getES().reportError(ReadySymbol.takeError());
      };
      // Include non-exported symbols also.
      ES.lookup(
          LookupKind::Static,
          makeJITDylibSearchOrder(JD, JITDylibLookupFlags::MatchAllSymbols),
          SymbolLookupSet(Target, SymbolLookupFlags::WeaklyReferencedSymbol),
          SymbolState::Ready, OnReadyFixUp, NoDependenciesToRegister);
    }
  }

  ExecutionSession &getES() { return ES; }

private:
  static void speculateForEntryPoint(Speculator *Ptr, uint64_t StubId);
  std::mutex ConcurrentAccess;
  ImplSymbolMap &AliaseeImplTable;
  ExecutionSession &ES;
  StubAddrLikelies GlobalSpecMap;
};

class IRSpeculationLayer : public IRLayer {
public:
  using IRlikiesStrRef =
      std::optional<DenseMap<StringRef, DenseSet<StringRef>>>;
  using ResultEval = std::function<IRlikiesStrRef(Function &)>;
  using TargetAndLikelies = DenseMap<SymbolStringPtr, SymbolNameSet>;

  IRSpeculationLayer(ExecutionSession &ES, IRLayer &BaseLayer, Speculator &Spec,
                     MangleAndInterner &Mangle, ResultEval Interpreter)
      : IRLayer(ES, BaseLayer.getManglingOptions()), NextLayer(BaseLayer),
        S(Spec), Mangle(Mangle), QueryAnalysis(Interpreter) {}

  void emit(std::unique_ptr<MaterializationResponsibility> R,
            ThreadSafeModule TSM) override;

private:
  TargetAndLikelies
  internToJITSymbols(DenseMap<StringRef, DenseSet<StringRef>> IRNames) {
    assert(!IRNames.empty() && "No IRNames received to Intern?");
    TargetAndLikelies InternedNames;
    for (auto &NamePair : IRNames) {
      DenseSet<SymbolStringPtr> TargetJITNames;
      for (auto &TargetNames : NamePair.second)
        TargetJITNames.insert(Mangle(TargetNames));
      InternedNames[Mangle(NamePair.first)] = std::move(TargetJITNames);
    }
    return InternedNames;
  }

  IRLayer &NextLayer;
  Speculator &S;
  MangleAndInterner &Mangle;
  ResultEval QueryAnalysis;
};

} // namespace orc
} // namespace llvm

#endif // LLVM_EXECUTIONENGINE_ORC_SPECULATION_H
PKiwFZ*��V-V-#ExecutionEngine/Orc/MachOPlatform.hnu�[���//===-- MachOPlatform.h - Utilities for executing MachO in Orc --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Utilities for executing JIT'd MachO in Orc.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_ORC_MACHOPLATFORM_H
#define LLVM_EXECUTIONENGINE_ORC_MACHOPLATFORM_H

#include "llvm/ADT/StringRef.h"
#include "llvm/ExecutionEngine/Orc/Core.h"
#include "llvm/ExecutionEngine/Orc/ExecutorProcessControl.h"
#include "llvm/ExecutionEngine/Orc/ObjectLinkingLayer.h"
#include "llvm/ExecutionEngine/Orc/Shared/ExecutorAddress.h"

#include <future>
#include <thread>
#include <vector>

namespace llvm {
namespace orc {

/// Mediates between MachO initialization and ExecutionSession state.
class MachOPlatform : public Platform {
public:
  // Used internally by MachOPlatform, but made public to enable serialization.
  struct MachOJITDylibDepInfo {
    bool Sealed = false;
    std::vector<ExecutorAddr> DepHeaders;
  };

  // Used internally by MachOPlatform, but made public to enable serialization.
  using MachOJITDylibDepInfoMap =
      std::vector<std::pair<ExecutorAddr, MachOJITDylibDepInfo>>;

  /// Try to create a MachOPlatform instance, adding the ORC runtime to the
  /// given JITDylib.
  ///
  /// The ORC runtime requires access to a number of symbols in libc++, and
  /// requires access to symbols in libobjc, and libswiftCore to support
  /// Objective-C and Swift code. It is up to the caller to ensure that the
  /// requried symbols can be referenced by code added to PlatformJD. The
  /// standard way to achieve this is to first attach dynamic library search
  /// generators for either the given process, or for the specific required
  /// libraries, to PlatformJD, then to create the platform instance:
  ///
  /// \code{.cpp}
  ///   auto &PlatformJD = ES.createBareJITDylib("stdlib");
  ///   PlatformJD.addGenerator(
  ///     ExitOnErr(EPCDynamicLibrarySearchGenerator
  ///                 ::GetForTargetProcess(EPC)));
  ///   ES.setPlatform(
  ///     ExitOnErr(MachOPlatform::Create(ES, ObjLayer, EPC, PlatformJD,
  ///                                     "/path/to/orc/runtime")));
  /// \endcode
  ///
  /// Alternatively, these symbols could be added to another JITDylib that
  /// PlatformJD links against.
  ///
  /// Clients are also responsible for ensuring that any JIT'd code that
  /// depends on runtime functions (including any code using TLV or static
  /// destructors) can reference the runtime symbols. This is usually achieved
  /// by linking any JITDylibs containing regular code against
  /// PlatformJD.
  ///
  /// By default, MachOPlatform will add the set of aliases returned by the
  /// standardPlatformAliases function. This includes both required aliases
  /// (e.g. __cxa_atexit -> __orc_rt_macho_cxa_atexit for static destructor
  /// support), and optional aliases that provide JIT versions of common
  /// functions (e.g. dlopen -> __orc_rt_macho_jit_dlopen). Clients can
  /// override these defaults by passing a non-None value for the
  /// RuntimeAliases function, in which case the client is responsible for
  /// setting up all aliases (including the required ones).
  static Expected<std::unique_ptr<MachOPlatform>>
  Create(ExecutionSession &ES, ObjectLinkingLayer &ObjLinkingLayer,
         JITDylib &PlatformJD, std::unique_ptr<DefinitionGenerator> OrcRuntime,
         std::optional<SymbolAliasMap> RuntimeAliases = std::nullopt);

  /// Construct using a path to the ORC runtime.
  static Expected<std::unique_ptr<MachOPlatform>>
  Create(ExecutionSession &ES, ObjectLinkingLayer &ObjLinkingLayer,
         JITDylib &PlatformJD, const char *OrcRuntimePath,
         std::optional<SymbolAliasMap> RuntimeAliases = std::nullopt);

  ExecutionSession &getExecutionSession() const { return ES; }
  ObjectLinkingLayer &getObjectLinkingLayer() const { return ObjLinkingLayer; }

  Error setupJITDylib(JITDylib &JD) override;
  Error teardownJITDylib(JITDylib &JD) override;
  Error notifyAdding(ResourceTracker &RT,
                     const MaterializationUnit &MU) override;
  Error notifyRemoving(ResourceTracker &RT) override;

  /// Returns an AliasMap containing the default aliases for the MachOPlatform.
  /// This can be modified by clients when constructing the platform to add
  /// or remove aliases.
  static SymbolAliasMap standardPlatformAliases(ExecutionSession &ES);

  /// Returns the array of required CXX aliases.
  static ArrayRef<std::pair<const char *, const char *>> requiredCXXAliases();

  /// Returns the array of standard runtime utility aliases for MachO.
  static ArrayRef<std::pair<const char *, const char *>>
  standardRuntimeUtilityAliases();

private:
  // Data needed for bootstrap only.
  struct BootstrapInfo {
    std::mutex Mutex;
    std::condition_variable CV;
    size_t ActiveGraphs = 0;
    shared::AllocActions DeferredAAs;
    ExecutorAddr MachOHeaderAddr;
  };

  // The MachOPlatformPlugin scans/modifies LinkGraphs to support MachO
  // platform features including initializers, exceptions, TLV, and language
  // runtime registration.
  class MachOPlatformPlugin : public ObjectLinkingLayer::Plugin {
  public:
    MachOPlatformPlugin(MachOPlatform &MP) : MP(MP) {}

    void modifyPassConfig(MaterializationResponsibility &MR,
                          jitlink::LinkGraph &G,
                          jitlink::PassConfiguration &Config) override;

    SyntheticSymbolDependenciesMap
    getSyntheticSymbolDependencies(MaterializationResponsibility &MR) override;

    // FIXME: We should be tentatively tracking scraped sections and discarding
    // if the MR fails.
    Error notifyFailed(MaterializationResponsibility &MR) override {
      return Error::success();
    }

    Error notifyRemovingResources(JITDylib &JD, ResourceKey K) override {
      return Error::success();
    }

    void notifyTransferringResources(JITDylib &JD, ResourceKey DstKey,
                                     ResourceKey SrcKey) override {}

  private:
    using InitSymbolDepMap =
        DenseMap<MaterializationResponsibility *, JITLinkSymbolSet>;

    struct UnwindSections {
      SmallVector<ExecutorAddrRange> CodeRanges;
      ExecutorAddrRange DwarfSection;
      ExecutorAddrRange CompactUnwindSection;
    };

    struct ObjCImageInfo {
      uint32_t Version = 0;
      uint32_t Flags = 0;
    };

    Error bootstrapPipelineStart(jitlink::LinkGraph &G);
    Error bootstrapPipelineRecordRuntimeFunctions(jitlink::LinkGraph &G);
    Error bootstrapPipelineEnd(jitlink::LinkGraph &G);

    Error associateJITDylibHeaderSymbol(jitlink::LinkGraph &G,
                                        MaterializationResponsibility &MR);

    Error preserveImportantSections(jitlink::LinkGraph &G,
                                    MaterializationResponsibility &MR);

    Error processObjCImageInfo(jitlink::LinkGraph &G,
                               MaterializationResponsibility &MR);

    Error fixTLVSectionsAndEdges(jitlink::LinkGraph &G, JITDylib &JD);

    std::optional<UnwindSections> findUnwindSectionInfo(jitlink::LinkGraph &G);

    Error registerObjectPlatformSections(jitlink::LinkGraph &G, JITDylib &JD,
                                         bool InBootstrapPhase);

    Error createObjCRuntimeObject(jitlink::LinkGraph &G);
    Error populateObjCRuntimeObject(jitlink::LinkGraph &G,
                                    MaterializationResponsibility &MR);

    std::mutex PluginMutex;
    MachOPlatform &MP;

    // FIXME: ObjCImageInfos and HeaderAddrs need to be cleared when
    // JITDylibs are removed.
    DenseMap<JITDylib *, ObjCImageInfo> ObjCImageInfos;
    DenseMap<JITDylib *, ExecutorAddr> HeaderAddrs;
    InitSymbolDepMap InitSymbolDeps;
  };

  using GetJITDylibHeaderSendResultFn =
      unique_function<void(Expected<ExecutorAddr>)>;
  using GetJITDylibNameSendResultFn =
      unique_function<void(Expected<StringRef>)>;
  using PushInitializersSendResultFn =
      unique_function<void(Expected<MachOJITDylibDepInfoMap>)>;
  using SendSymbolAddressFn = unique_function<void(Expected<ExecutorAddr>)>;

  static bool supportedTarget(const Triple &TT);

  MachOPlatform(ExecutionSession &ES, ObjectLinkingLayer &ObjLinkingLayer,
                JITDylib &PlatformJD,
                std::unique_ptr<DefinitionGenerator> OrcRuntimeGenerator,
                Error &Err);

  // Associate MachOPlatform JIT-side runtime support functions with handlers.
  Error associateRuntimeSupportFunctions();

  // Implements rt_pushInitializers by making repeat async lookups for
  // initializer symbols (each lookup may spawn more initializer symbols if
  // it pulls in new materializers, e.g. from objects in a static library).
  void pushInitializersLoop(PushInitializersSendResultFn SendResult,
                            JITDylibSP JD);

  // Handle requests from the ORC runtime to push MachO initializer info.
  void rt_pushInitializers(PushInitializersSendResultFn SendResult,
                           ExecutorAddr JDHeaderAddr);

  // Handle requests for symbol addresses from the ORC runtime.
  void rt_lookupSymbol(SendSymbolAddressFn SendResult, ExecutorAddr Handle,
                       StringRef SymbolName);

  // Call the ORC runtime to create a pthread key.
  Expected<uint64_t> createPThreadKey();

  ExecutionSession &ES;
  JITDylib &PlatformJD;
  ObjectLinkingLayer &ObjLinkingLayer;

  SymbolStringPtr MachOHeaderStartSymbol = ES.intern("___dso_handle");

  struct RuntimeFunction {
    RuntimeFunction(SymbolStringPtr Name) : Name(std::move(Name)) {}
    SymbolStringPtr Name;
    ExecutorAddr Addr;
  };

  RuntimeFunction PlatformBootstrap{
      ES.intern("___orc_rt_macho_platform_bootstrap")};
  RuntimeFunction PlatformShutdown{
      ES.intern("___orc_rt_macho_platform_shutdown")};
  RuntimeFunction RegisterEHFrameSection{
      ES.intern("___orc_rt_macho_register_ehframe_section")};
  RuntimeFunction DeregisterEHFrameSection{
      ES.intern("___orc_rt_macho_deregister_ehframe_section")};
  RuntimeFunction RegisterJITDylib{
      ES.intern("___orc_rt_macho_register_jitdylib")};
  RuntimeFunction DeregisterJITDylib{
      ES.intern("___orc_rt_macho_deregister_jitdylib")};
  RuntimeFunction RegisterObjectPlatformSections{
      ES.intern("___orc_rt_macho_register_object_platform_sections")};
  RuntimeFunction DeregisterObjectPlatformSections{
      ES.intern("___orc_rt_macho_deregister_object_platform_sections")};
  RuntimeFunction CreatePThreadKey{
      ES.intern("___orc_rt_macho_create_pthread_key")};
  RuntimeFunction RegisterObjCRuntimeObject{
      ES.intern("___orc_rt_macho_register_objc_runtime_object")};
  RuntimeFunction DeregisterObjCRuntimeObject{
      ES.intern("___orc_rt_macho_deregister_objc_runtime_object")};

  DenseMap<JITDylib *, SymbolLookupSet> RegisteredInitSymbols;

  std::mutex PlatformMutex;
  DenseMap<JITDylib *, ExecutorAddr> JITDylibToHeaderAddr;
  DenseMap<ExecutorAddr, JITDylib *> HeaderAddrToJITDylib;
  DenseMap<JITDylib *, uint64_t> JITDylibToPThreadKey;

  std::atomic<BootstrapInfo *> Bootstrap;
};

namespace shared {

using SPSNamedExecutorAddrRangeSequence =
    SPSSequence<SPSTuple<SPSString, SPSExecutorAddrRange>>;

} // end namespace shared
} // end namespace orc
} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_ORC_MACHOPLATFORM_H
PKiwFZ[�O���4ExecutionEngine/Orc/EPCGenericJITLinkMemoryManager.hnu�[���//===- EPCGenericJITLinkMemoryManager.h - EPC-based mem manager -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Implements JITLinkMemoryManager by making remove calls via
// ExecutorProcessControl::callWrapperAsync.
//
// This simplifies the implementaton of new ExecutorProcessControl instances,
// as this implementation will always work (at the cost of some performance
// overhead for the calls).
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_ORC_EPCGENERICJITLINKMEMORYMANAGER_H
#define LLVM_EXECUTIONENGINE_ORC_EPCGENERICJITLINKMEMORYMANAGER_H

#include "llvm/ExecutionEngine/JITLink/JITLinkMemoryManager.h"
#include "llvm/ExecutionEngine/Orc/Core.h"

namespace llvm {
namespace orc {

class EPCGenericJITLinkMemoryManager : public jitlink::JITLinkMemoryManager {
public:
  /// Function addresses for memory access.
  struct SymbolAddrs {
    ExecutorAddr Allocator;
    ExecutorAddr Reserve;
    ExecutorAddr Finalize;
    ExecutorAddr Deallocate;
  };

  /// Create an EPCGenericJITLinkMemoryManager instance from a given set of
  /// function addrs.
  EPCGenericJITLinkMemoryManager(ExecutorProcessControl &EPC, SymbolAddrs SAs)
      : EPC(EPC), SAs(SAs) {}

  void allocate(const jitlink::JITLinkDylib *JD, jitlink::LinkGraph &G,
                OnAllocatedFunction OnAllocated) override;

  // Use overloads from base class.
  using JITLinkMemoryManager::allocate;

  void deallocate(std::vector<FinalizedAlloc> Allocs,
                  OnDeallocatedFunction OnDeallocated) override;

  // Use overloads from base class.
  using JITLinkMemoryManager::deallocate;

private:
  class InFlightAlloc;

  void completeAllocation(ExecutorAddr AllocAddr, jitlink::BasicLayout BL,
                          OnAllocatedFunction OnAllocated);

  ExecutorProcessControl &EPC;
  SymbolAddrs SAs;
};

namespace shared {

/// FIXME: This specialization should be moved into TargetProcessControlTypes.h
///        (or whereever those types get merged to) once ORC depends on JITLink.
template <>
class SPSSerializationTraits<SPSExecutorAddr,
                             jitlink::JITLinkMemoryManager::FinalizedAlloc> {
public:
  static size_t size(const jitlink::JITLinkMemoryManager::FinalizedAlloc &FA) {
    return SPSArgList<SPSExecutorAddr>::size(ExecutorAddr(FA.getAddress()));
  }

  static bool
  serialize(SPSOutputBuffer &OB,
            const jitlink::JITLinkMemoryManager::FinalizedAlloc &FA) {
    return SPSArgList<SPSExecutorAddr>::serialize(
        OB, ExecutorAddr(FA.getAddress()));
  }

  static bool deserialize(SPSInputBuffer &IB,
                          jitlink::JITLinkMemoryManager::FinalizedAlloc &FA) {
    ExecutorAddr A;
    if (!SPSArgList<SPSExecutorAddr>::deserialize(IB, A))
      return false;
    FA = jitlink::JITLinkMemoryManager::FinalizedAlloc(A);
    return true;
  }
};

} // end namespace shared
} // end namespace orc
} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_ORC_EPCGENERICJITLINKMEMORYMANAGER_H
PKiwFZ�ڀ�&ExecutionEngine/Orc/ThreadSafeModule.hnu�[���//===----------- ThreadSafeModule.h -- Layer interfaces ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Thread safe wrappers and utilities for Module and LLVMContext.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_ORC_THREADSAFEMODULE_H
#define LLVM_EXECUTIONENGINE_ORC_THREADSAFEMODULE_H

#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "llvm/Support/Compiler.h"

#include <functional>
#include <memory>
#include <mutex>

namespace llvm {
namespace orc {

/// An LLVMContext together with an associated mutex that can be used to lock
/// the context to prevent concurrent access by other threads.
class ThreadSafeContext {
private:
  struct State {
    State(std::unique_ptr<LLVMContext> Ctx) : Ctx(std::move(Ctx)) {}

    std::unique_ptr<LLVMContext> Ctx;
    std::recursive_mutex Mutex;
  };

public:
  // RAII based lock for ThreadSafeContext.
  class [[nodiscard]] Lock {
  public:
    Lock(std::shared_ptr<State> S) : S(std::move(S)), L(this->S->Mutex) {}

  private:
    std::shared_ptr<State> S;
    std::unique_lock<std::recursive_mutex> L;
  };

  /// Construct a null context.
  ThreadSafeContext() = default;

  /// Construct a ThreadSafeContext from the given LLVMContext.
  ThreadSafeContext(std::unique_ptr<LLVMContext> NewCtx)
      : S(std::make_shared<State>(std::move(NewCtx))) {
    assert(S->Ctx != nullptr &&
           "Can not construct a ThreadSafeContext from a nullptr");
  }

  /// Returns a pointer to the LLVMContext that was used to construct this
  /// instance, or null if the instance was default constructed.
  LLVMContext *getContext() { return S ? S->Ctx.get() : nullptr; }

  /// Returns a pointer to the LLVMContext that was used to construct this
  /// instance, or null if the instance was default constructed.
  const LLVMContext *getContext() const { return S ? S->Ctx.get() : nullptr; }

  Lock getLock() const {
    assert(S && "Can not lock an empty ThreadSafeContext");
    return Lock(S);
  }

private:
  std::shared_ptr<State> S;
};

/// An LLVM Module together with a shared ThreadSafeContext.
class ThreadSafeModule {
public:
  /// Default construct a ThreadSafeModule. This results in a null module and
  /// null context.
  ThreadSafeModule() = default;

  ThreadSafeModule(ThreadSafeModule &&Other) = default;

  ThreadSafeModule &operator=(ThreadSafeModule &&Other) {
    // We have to explicitly define this move operator to copy the fields in
    // reverse order (i.e. module first) to ensure the dependencies are
    // protected: The old module that is being overwritten must be destroyed
    // *before* the context that it depends on.
    // We also need to lock the context to make sure the module tear-down
    // does not overlap any other work on the context.
    if (M) {
      auto L = TSCtx.getLock();
      M = nullptr;
    }
    M = std::move(Other.M);
    TSCtx = std::move(Other.TSCtx);
    return *this;
  }

  /// Construct a ThreadSafeModule from a unique_ptr<Module> and a
  /// unique_ptr<LLVMContext>. This creates a new ThreadSafeContext from the
  /// given context.
  ThreadSafeModule(std::unique_ptr<Module> M, std::unique_ptr<LLVMContext> Ctx)
      : M(std::move(M)), TSCtx(std::move(Ctx)) {}

  /// Construct a ThreadSafeModule from a unique_ptr<Module> and an
  /// existing ThreadSafeContext.
  ThreadSafeModule(std::unique_ptr<Module> M, ThreadSafeContext TSCtx)
      : M(std::move(M)), TSCtx(std::move(TSCtx)) {}

  ~ThreadSafeModule() {
    // We need to lock the context while we destruct the module.
    if (M) {
      auto L = TSCtx.getLock();
      M = nullptr;
    }
  }

  /// Boolean conversion: This ThreadSafeModule will evaluate to true if it
  /// wraps a non-null module.
  explicit operator bool() const {
    if (M) {
      assert(TSCtx.getContext() &&
             "Non-null module must have non-null context");
      return true;
    }
    return false;
  }

  /// Locks the associated ThreadSafeContext and calls the given function
  /// on the contained Module.
  template <typename Func> decltype(auto) withModuleDo(Func &&F) {
    assert(M && "Can not call on null module");
    auto Lock = TSCtx.getLock();
    return F(*M);
  }

  /// Locks the associated ThreadSafeContext and calls the given function
  /// on the contained Module.
  template <typename Func> decltype(auto) withModuleDo(Func &&F) const {
    assert(M && "Can not call on null module");
    auto Lock = TSCtx.getLock();
    return F(*M);
  }

  /// Locks the associated ThreadSafeContext and calls the given function,
  /// passing the contained std::unique_ptr<Module>. The given function should
  /// consume the Module.
  template <typename Func> decltype(auto) consumingModuleDo(Func &&F) {
    auto Lock = TSCtx.getLock();
    return F(std::move(M));
  }

  /// Get a raw pointer to the contained module without locking the context.
  Module *getModuleUnlocked() { return M.get(); }

  /// Get a raw pointer to the contained module without locking the context.
  const Module *getModuleUnlocked() const { return M.get(); }

  /// Returns the context for this ThreadSafeModule.
  ThreadSafeContext getContext() const { return TSCtx; }

private:
  std::unique_ptr<Module> M;
  ThreadSafeContext TSCtx;
};

using GVPredicate = std::function<bool(const GlobalValue &)>;
using GVModifier = std::function<void(GlobalValue &)>;

/// Clones the given module on to a new context.
ThreadSafeModule
cloneToNewContext(const ThreadSafeModule &TSMW,
                  GVPredicate ShouldCloneDef = GVPredicate(),
                  GVModifier UpdateClonedDefSource = GVModifier());

} // End namespace orc
} // End namespace llvm

#endif // LLVM_EXECUTIONENGINE_ORC_THREADSAFEMODULE_H
PKiwFZ�5&ExecutionEngine/Orc/IRTransformLayer.hnu�[���//===- IRTransformLayer.h - Run all IR through a functor --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Run all IR passed in through a user supplied functor.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_ORC_IRTRANSFORMLAYER_H
#define LLVM_EXECUTIONENGINE_ORC_IRTRANSFORMLAYER_H

#include "llvm/ADT/FunctionExtras.h"
#include "llvm/ExecutionEngine/JITSymbol.h"
#include "llvm/ExecutionEngine/Orc/Layer.h"
#include <memory>

namespace llvm {
namespace orc {

/// A layer that applies a transform to emitted modules.
/// The transform function is responsible for locking the ThreadSafeContext
/// before operating on the module.
class IRTransformLayer : public IRLayer {
public:
  using TransformFunction = unique_function<Expected<ThreadSafeModule>(
      ThreadSafeModule, MaterializationResponsibility &R)>;

  IRTransformLayer(ExecutionSession &ES, IRLayer &BaseLayer,
                   TransformFunction Transform = identityTransform);

  void setTransform(TransformFunction Transform) {
    this->Transform = std::move(Transform);
  }

  void emit(std::unique_ptr<MaterializationResponsibility> R,
            ThreadSafeModule TSM) override;

  static ThreadSafeModule identityTransform(ThreadSafeModule TSM,
                                            MaterializationResponsibility &R) {
    return TSM;
  }

private:
  IRLayer &BaseLayer;
  TransformFunction Transform;
};

} // end namespace orc
} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_ORC_IRTRANSFORMLAYER_H
PKiwFZ��mЃ$�$(ExecutionEngine/Orc/ObjectLinkingLayer.hnu�[���//===-- ObjectLinkingLayer.h - JITLink-based jit linking layer --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Contains the definition for an JITLink-based, in-process object linking
// layer.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_ORC_OBJECTLINKINGLAYER_H
#define LLVM_EXECUTIONENGINE_ORC_OBJECTLINKINGLAYER_H

#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ExecutionEngine/JITLink/JITLink.h"
#include "llvm/ExecutionEngine/JITSymbol.h"
#include "llvm/ExecutionEngine/Orc/Core.h"
#include "llvm/ExecutionEngine/Orc/Layer.h"
#include "llvm/Support/Error.h"
#include <algorithm>
#include <cassert>
#include <functional>
#include <list>
#include <memory>
#include <utility>
#include <vector>

namespace llvm {

namespace jitlink {
class EHFrameRegistrar;
class LinkGraph;
class Symbol;
} // namespace jitlink

namespace orc {

class ObjectLinkingLayerJITLinkContext;

/// An ObjectLayer implementation built on JITLink.
///
/// Clients can use this class to add relocatable object files to an
/// ExecutionSession, and it typically serves as the base layer (underneath
/// a compiling layer like IRCompileLayer) for the rest of the JIT.
class ObjectLinkingLayer : public RTTIExtends<ObjectLinkingLayer, ObjectLayer>,
                           private ResourceManager {
  friend class ObjectLinkingLayerJITLinkContext;

public:
  static char ID;

  /// Plugin instances can be added to the ObjectLinkingLayer to receive
  /// callbacks when code is loaded or emitted, and when JITLink is being
  /// configured.
  class Plugin {
  public:
    using JITLinkSymbolSet = DenseSet<jitlink::Symbol *>;
    using SyntheticSymbolDependenciesMap =
        DenseMap<SymbolStringPtr, JITLinkSymbolSet>;

    virtual ~Plugin();
    virtual void modifyPassConfig(MaterializationResponsibility &MR,
                                  jitlink::LinkGraph &G,
                                  jitlink::PassConfiguration &Config) {}

    // Deprecated. Don't use this in new code. There will be a proper mechanism
    // for capturing object buffers.
    virtual void notifyMaterializing(MaterializationResponsibility &MR,
                                     jitlink::LinkGraph &G,
                                     jitlink::JITLinkContext &Ctx,
                                     MemoryBufferRef InputObject) {}

    virtual void notifyLoaded(MaterializationResponsibility &MR) {}
    virtual Error notifyEmitted(MaterializationResponsibility &MR) {
      return Error::success();
    }
    virtual Error notifyFailed(MaterializationResponsibility &MR) = 0;
    virtual Error notifyRemovingResources(JITDylib &JD, ResourceKey K) = 0;
    virtual void notifyTransferringResources(JITDylib &JD, ResourceKey DstKey,
                                             ResourceKey SrcKey) = 0;

    /// Return any dependencies that synthetic symbols (e.g. init symbols)
    /// have on symbols in the LinkGraph.
    /// This is used by the ObjectLinkingLayer to update the dependencies for
    /// the synthetic symbols.
    virtual SyntheticSymbolDependenciesMap
    getSyntheticSymbolDependencies(MaterializationResponsibility &MR) {
      return SyntheticSymbolDependenciesMap();
    }
  };

  using ReturnObjectBufferFunction =
      std::function<void(std::unique_ptr<MemoryBuffer>)>;

  /// Construct an ObjectLinkingLayer using the ExecutorProcessControl
  /// instance's memory manager.
  ObjectLinkingLayer(ExecutionSession &ES);

  /// Construct an ObjectLinkingLayer using a custom memory manager.
  ObjectLinkingLayer(ExecutionSession &ES,
                     jitlink::JITLinkMemoryManager &MemMgr);

  /// Construct an ObjectLinkingLayer. Takes ownership of the given
  /// JITLinkMemoryManager. This method is a temporary hack to simplify
  /// co-existence with RTDyldObjectLinkingLayer (which also owns its
  /// allocators).
  ObjectLinkingLayer(ExecutionSession &ES,
                     std::unique_ptr<jitlink::JITLinkMemoryManager> MemMgr);

  /// Destruct an ObjectLinkingLayer.
  ~ObjectLinkingLayer();

  /// Set an object buffer return function. By default object buffers are
  /// deleted once the JIT has linked them. If a return function is set then
  /// it will be called to transfer ownership of the buffer instead.
  void setReturnObjectBuffer(ReturnObjectBufferFunction ReturnObjectBuffer) {
    this->ReturnObjectBuffer = std::move(ReturnObjectBuffer);
  }

  /// Add a pass-config modifier.
  ObjectLinkingLayer &addPlugin(std::unique_ptr<Plugin> P) {
    std::lock_guard<std::mutex> Lock(LayerMutex);
    Plugins.push_back(std::move(P));
    return *this;
  }

  /// Add a LinkGraph to the JITDylib targeted by the given tracker.
  Error add(ResourceTrackerSP, std::unique_ptr<jitlink::LinkGraph> G);

  /// Add a LinkGraph to the given JITDylib.
  Error add(JITDylib &JD, std::unique_ptr<jitlink::LinkGraph> G) {
    return add(JD.getDefaultResourceTracker(), std::move(G));
  }

  // Un-hide ObjectLayer add methods.
  using ObjectLayer::add;

  /// Emit an object file.
  void emit(std::unique_ptr<MaterializationResponsibility> R,
            std::unique_ptr<MemoryBuffer> O) override;

  /// Emit a LinkGraph.
  void emit(std::unique_ptr<MaterializationResponsibility> R,
            std::unique_ptr<jitlink::LinkGraph> G);

  /// Instructs this ObjectLinkingLayer instance to override the symbol flags
  /// found in the AtomGraph with the flags supplied by the
  /// MaterializationResponsibility instance. This is a workaround to support
  /// symbol visibility in COFF, which does not use the libObject's
  /// SF_Exported flag. Use only when generating / adding COFF object files.
  ///
  /// FIXME: We should be able to remove this if/when COFF properly tracks
  /// exported symbols.
  ObjectLinkingLayer &
  setOverrideObjectFlagsWithResponsibilityFlags(bool OverrideObjectFlags) {
    this->OverrideObjectFlags = OverrideObjectFlags;
    return *this;
  }

  /// If set, this ObjectLinkingLayer instance will claim responsibility
  /// for any symbols provided by a given object file that were not already in
  /// the MaterializationResponsibility instance. Setting this flag allows
  /// higher-level program representations (e.g. LLVM IR) to be added based on
  /// only a subset of the symbols they provide, without having to write
  /// intervening layers to scan and add the additional symbols. This trades
  /// diagnostic quality for convenience however: If all symbols are enumerated
  /// up-front then clashes can be detected and reported early (and usually
  /// deterministically). If this option is set, clashes for the additional
  /// symbols may not be detected until late, and detection may depend on
  /// the flow of control through JIT'd code. Use with care.
  ObjectLinkingLayer &
  setAutoClaimResponsibilityForObjectSymbols(bool AutoClaimObjectSymbols) {
    this->AutoClaimObjectSymbols = AutoClaimObjectSymbols;
    return *this;
  }

private:
  using FinalizedAlloc = jitlink::JITLinkMemoryManager::FinalizedAlloc;

  void modifyPassConfig(MaterializationResponsibility &MR,
                        jitlink::LinkGraph &G,
                        jitlink::PassConfiguration &PassConfig);
  void notifyLoaded(MaterializationResponsibility &MR);
  Error notifyEmitted(MaterializationResponsibility &MR, FinalizedAlloc FA);

  Error handleRemoveResources(JITDylib &JD, ResourceKey K) override;
  void handleTransferResources(JITDylib &JD, ResourceKey DstKey,
                               ResourceKey SrcKey) override;

  mutable std::mutex LayerMutex;
  jitlink::JITLinkMemoryManager &MemMgr;
  std::unique_ptr<jitlink::JITLinkMemoryManager> MemMgrOwnership;
  bool OverrideObjectFlags = false;
  bool AutoClaimObjectSymbols = false;
  ReturnObjectBufferFunction ReturnObjectBuffer;
  DenseMap<ResourceKey, std::vector<FinalizedAlloc>> Allocs;
  std::vector<std::unique_ptr<Plugin>> Plugins;
};

class EHFrameRegistrationPlugin : public ObjectLinkingLayer::Plugin {
public:
  EHFrameRegistrationPlugin(
      ExecutionSession &ES,
      std::unique_ptr<jitlink::EHFrameRegistrar> Registrar);
  void modifyPassConfig(MaterializationResponsibility &MR,
                        jitlink::LinkGraph &G,
                        jitlink::PassConfiguration &PassConfig) override;
  Error notifyEmitted(MaterializationResponsibility &MR) override;
  Error notifyFailed(MaterializationResponsibility &MR) override;
  Error notifyRemovingResources(JITDylib &JD, ResourceKey K) override;
  void notifyTransferringResources(JITDylib &JD, ResourceKey DstKey,
                                   ResourceKey SrcKey) override;

private:
  std::mutex EHFramePluginMutex;
  ExecutionSession &ES;
  std::unique_ptr<jitlink::EHFrameRegistrar> Registrar;
  DenseMap<MaterializationResponsibility *, ExecutorAddrRange> InProcessLinks;
  DenseMap<ResourceKey, std::vector<ExecutorAddrRange>> EHFrameRanges;
};

} // end namespace orc
} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_ORC_OBJECTLINKINGLAYER_H
PKiwFZ����qq9ExecutionEngine/Orc/TargetProcess/SimpleRemoteEPCServer.hnu�[���//===---- SimpleRemoteEPCServer.h - EPC over abstract channel ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// EPC over simple abstract channel.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_ORC_TARGETPROCESS_SIMPLEREMOTEEPCSERVER_H
#define LLVM_EXECUTIONENGINE_ORC_TARGETPROCESS_SIMPLEREMOTEEPCSERVER_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/FunctionExtras.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/ExecutionEngine/Orc/Shared/SimpleRemoteEPCUtils.h"
#include "llvm/ExecutionEngine/Orc/Shared/TargetProcessControlTypes.h"
#include "llvm/ExecutionEngine/Orc/Shared/WrapperFunctionUtils.h"
#include "llvm/ExecutionEngine/Orc/TargetProcess/ExecutorBootstrapService.h"
#include "llvm/ExecutionEngine/Orc/TargetProcess/SimpleExecutorDylibManager.h"
#include "llvm/Support/DynamicLibrary.h"
#include "llvm/Support/Error.h"

#include <condition_variable>
#include <future>
#include <memory>
#include <mutex>

namespace llvm {
namespace orc {

/// A simple EPC server implementation.
class SimpleRemoteEPCServer : public SimpleRemoteEPCTransportClient {
public:
  using ReportErrorFunction = unique_function<void(Error)>;

  /// Dispatches calls to runWrapper.
  class Dispatcher {
  public:
    virtual ~Dispatcher();
    virtual void dispatch(unique_function<void()> Work) = 0;
    virtual void shutdown() = 0;
  };

#if LLVM_ENABLE_THREADS
  class ThreadDispatcher : public Dispatcher {
  public:
    void dispatch(unique_function<void()> Work) override;
    void shutdown() override;

  private:
    std::mutex DispatchMutex;
    bool Running = true;
    size_t Outstanding = 0;
    std::condition_variable OutstandingCV;
  };
#endif

  class Setup {
    friend class SimpleRemoteEPCServer;

  public:
    SimpleRemoteEPCServer &server() { return S; }
    StringMap<std::vector<char>> &bootstrapMap() { return BootstrapMap; }
    template <typename T, typename SPSTagT>
    void setBootstrapMapValue(std::string Key, const T &Value) {
      std::vector<char> Buffer;
      Buffer.resize(shared::SPSArgList<SPSTagT>::size(Value));
      shared::SPSOutputBuffer OB(Buffer.data(), Buffer.size());
      bool Success = shared::SPSArgList<SPSTagT>::serialize(OB, Value);
      (void)Success;
      assert(Success && "Bootstrap map value serialization failed");
      BootstrapMap[std::move(Key)] = std::move(Buffer);
    }
    StringMap<ExecutorAddr> &bootstrapSymbols() { return BootstrapSymbols; }
    std::vector<std::unique_ptr<ExecutorBootstrapService>> &services() {
      return Services;
    }
    void setDispatcher(std::unique_ptr<Dispatcher> D) { S.D = std::move(D); }
    void setErrorReporter(unique_function<void(Error)> ReportError) {
      S.ReportError = std::move(ReportError);
    }

  private:
    Setup(SimpleRemoteEPCServer &S) : S(S) {}
    SimpleRemoteEPCServer &S;
    StringMap<std::vector<char>> BootstrapMap;
    StringMap<ExecutorAddr> BootstrapSymbols;
    std::vector<std::unique_ptr<ExecutorBootstrapService>> Services;
  };

  static StringMap<ExecutorAddr> defaultBootstrapSymbols();

  template <typename TransportT, typename... TransportTCtorArgTs>
  static Expected<std::unique_ptr<SimpleRemoteEPCServer>>
  Create(unique_function<Error(Setup &S)> SetupFunction,
         TransportTCtorArgTs &&...TransportTCtorArgs) {
    auto Server = std::make_unique<SimpleRemoteEPCServer>();
    Setup S(*Server);
    if (auto Err = SetupFunction(S))
      return std::move(Err);

    // Set ReportError up-front so that it can be used if construction
    // process fails.
    if (!Server->ReportError)
      Server->ReportError = [](Error Err) {
        logAllUnhandledErrors(std::move(Err), errs(), "SimpleRemoteEPCServer ");
      };

    // Attempt to create transport.
    auto T = TransportT::Create(
        *Server, std::forward<TransportTCtorArgTs>(TransportTCtorArgs)...);
    if (!T)
      return T.takeError();
    Server->T = std::move(*T);
    if (auto Err = Server->T->start())
      return std::move(Err);

    // If transport creation succeeds then start up services.
    Server->Services = std::move(S.services());
    Server->Services.push_back(
        std::make_unique<rt_bootstrap::SimpleExecutorDylibManager>());
    for (auto &Service : Server->Services)
      Service->addBootstrapSymbols(S.bootstrapSymbols());

    if (auto Err = Server->sendSetupMessage(std::move(S.BootstrapMap),
                                            std::move(S.BootstrapSymbols)))
      return std::move(Err);
    return std::move(Server);
  }

  /// Set an error reporter for this server.
  void setErrorReporter(ReportErrorFunction ReportError) {
    this->ReportError = std::move(ReportError);
  }

  /// Call to handle an incoming message.
  ///
  /// Returns 'Disconnect' if the message is a 'detach' message from the remote
  /// otherwise returns 'Continue'. If the server has moved to an error state,
  /// returns an error, which should be reported and treated as a 'Disconnect'.
  Expected<HandleMessageAction>
  handleMessage(SimpleRemoteEPCOpcode OpC, uint64_t SeqNo, ExecutorAddr TagAddr,
                SimpleRemoteEPCArgBytesVector ArgBytes) override;

  Error waitForDisconnect();

  void handleDisconnect(Error Err) override;

private:
  Error sendMessage(SimpleRemoteEPCOpcode OpC, uint64_t SeqNo,
                    ExecutorAddr TagAddr, ArrayRef<char> ArgBytes);

  Error sendSetupMessage(StringMap<std::vector<char>> BootstrapMap,
                         StringMap<ExecutorAddr> BootstrapSymbols);

  Error handleResult(uint64_t SeqNo, ExecutorAddr TagAddr,
                     SimpleRemoteEPCArgBytesVector ArgBytes);
  void handleCallWrapper(uint64_t RemoteSeqNo, ExecutorAddr TagAddr,
                         SimpleRemoteEPCArgBytesVector ArgBytes);

  shared::WrapperFunctionResult
  doJITDispatch(const void *FnTag, const char *ArgData, size_t ArgSize);

  static shared::CWrapperFunctionResult jitDispatchEntry(void *DispatchCtx,
                                                         const void *FnTag,
                                                         const char *ArgData,
                                                         size_t ArgSize);

  uint64_t getNextSeqNo() { return NextSeqNo++; }
  void releaseSeqNo(uint64_t) {}

  using PendingJITDispatchResultsMap =
      DenseMap<uint64_t, std::promise<shared::WrapperFunctionResult> *>;

  std::mutex ServerStateMutex;
  std::condition_variable ShutdownCV;
  enum { ServerRunning, ServerShuttingDown, ServerShutDown } RunState;
  Error ShutdownErr = Error::success();
  std::unique_ptr<SimpleRemoteEPCTransport> T;
  std::unique_ptr<Dispatcher> D;
  std::vector<std::unique_ptr<ExecutorBootstrapService>> Services;
  ReportErrorFunction ReportError;

  uint64_t NextSeqNo = 0;
  PendingJITDispatchResultsMap PendingJITDispatchResults;
  std::vector<sys::DynamicLibrary> Dylibs;
};

} // end namespace orc
} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_ORC_TARGETPROCESS_SIMPLEREMOTEEPCSERVER_H
PKiwFZ@���<ExecutionEngine/Orc/TargetProcess/ExecutorBootstrapService.hnu�[���//===- ExecutorService.h - Provide bootstrap symbols to session -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Provides a service by supplying some set of bootstrap symbols.
//
// FIXME: The functionality in this file should be moved to the ORC runtime.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_ORC_TARGETPROCESS_EXECUTORBOOTSTRAPSERVICE_H
#define LLVM_EXECUTIONENGINE_ORC_TARGETPROCESS_EXECUTORBOOTSTRAPSERVICE_H

#include "llvm/ADT/StringMap.h"
#include "llvm/ExecutionEngine/Orc/Shared/ExecutorAddress.h"

namespace llvm {
namespace orc {

class ExecutorBootstrapService {
public:
  virtual ~ExecutorBootstrapService();

  virtual void
  addBootstrapSymbols(StringMap<ExecutorAddr> &BootstrapSymbols) = 0;
  virtual Error shutdown() = 0;
};

} // end namespace orc
} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_ORC_TARGETPROCESS_EXECUTORBOOTSTRAPSERVICE_H
PKiwFZp��ww4ExecutionEngine/Orc/TargetProcess/RegisterEHFrames.hnu�[���//===----- RegisterEHFrames.h -- Register EH frame sections -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Support for dynamically registering and deregistering eh-frame sections
// in-process via libunwind.
//
// FIXME: The functionality in this file should be moved to the ORC runtime.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_ORC_TARGETPROCESS_REGISTEREHFRAMES_H
#define LLVM_EXECUTIONENGINE_ORC_TARGETPROCESS_REGISTEREHFRAMES_H

#include "llvm/ExecutionEngine/Orc/Shared/WrapperFunctionUtils.h"
#include "llvm/Support/Error.h"

namespace llvm {
namespace orc {

/// Register frames in the given eh-frame section with libunwind.
Error registerEHFrameSection(const void *EHFrameSectionAddr,
                             size_t EHFrameSectionSize);

/// Unregister frames in the given eh-frame section with libunwind.
Error deregisterEHFrameSection(const void *EHFrameSectionAddr,
                               size_t EHFrameSectionSize);

} // end namespace orc
} // end namespace llvm

extern "C" llvm::orc::shared::CWrapperFunctionResult
llvm_orc_registerEHFrameSectionWrapper(const char *Data, uint64_t Size);

extern "C" llvm::orc::shared::CWrapperFunctionResult
llvm_orc_deregisterEHFrameSectionWrapper(const char *Data, uint64_t Size);

#endif // LLVM_EXECUTIONENGINE_ORC_TARGETPROCESS_REGISTEREHFRAMES_H
PKiwFZ�I�q*	*	?ExecutionEngine/Orc/TargetProcess/SimpleExecutorMemoryManager.hnu�[���//===---------------- SimpleExecutorMemoryManager.h -------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// A simple allocator class suitable for basic remote-JIT use.
//
// FIXME: The functionality in this file should be moved to the ORC runtime.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_ORC_TARGETPROCESS_SIMPLEEXECUTORMEMORYMANAGER_H
#define LLVM_EXECUTIONENGINE_ORC_TARGETPROCESS_SIMPLEEXECUTORMEMORYMANAGER_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ExecutionEngine/Orc/Shared/ExecutorAddress.h"
#include "llvm/ExecutionEngine/Orc/Shared/TargetProcessControlTypes.h"
#include "llvm/ExecutionEngine/Orc/Shared/WrapperFunctionUtils.h"
#include "llvm/ExecutionEngine/Orc/TargetProcess/ExecutorBootstrapService.h"
#include "llvm/Support/Error.h"

#include <mutex>

namespace llvm {
namespace orc {
namespace rt_bootstrap {

/// Simple page-based allocator.
class SimpleExecutorMemoryManager : public ExecutorBootstrapService {
public:
  virtual ~SimpleExecutorMemoryManager();

  Expected<ExecutorAddr> allocate(uint64_t Size);
  Error finalize(tpctypes::FinalizeRequest &FR);
  Error deallocate(const std::vector<ExecutorAddr> &Bases);

  Error shutdown() override;
  void addBootstrapSymbols(StringMap<ExecutorAddr> &M) override;

private:
  struct Allocation {
    size_t Size = 0;
    std::vector<shared::WrapperFunctionCall> DeallocationActions;
  };

  using AllocationsMap = DenseMap<void *, Allocation>;

  Error deallocateImpl(void *Base, Allocation &A);

  static llvm::orc::shared::CWrapperFunctionResult
  reserveWrapper(const char *ArgData, size_t ArgSize);

  static llvm::orc::shared::CWrapperFunctionResult
  finalizeWrapper(const char *ArgData, size_t ArgSize);

  static llvm::orc::shared::CWrapperFunctionResult
  deallocateWrapper(const char *ArgData, size_t ArgSize);

  std::mutex M;
  AllocationsMap Allocations;
};

} // end namespace rt_bootstrap
} // end namespace orc
} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_ORC_TARGETPROCESS_SIMPLEEXECUTORMEMORYMANAGER_H
PKiwFZ��0ExecutionEngine/Orc/TargetProcess/JITLoaderGDB.hnu�[���//===- JITLoaderGDB.h - Register objects via GDB JIT interface -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Register objects for access by debuggers via the GDB JIT interface.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_ORC_TARGETPROCESS_JITLOADERGDB_H
#define LLVM_EXECUTIONENGINE_ORC_TARGETPROCESS_JITLOADERGDB_H

#include "llvm/ExecutionEngine/Orc/Shared/WrapperFunctionUtils.h"
#include <cstdint>

extern "C" llvm::orc::shared::CWrapperFunctionResult
llvm_orc_registerJITLoaderGDBWrapper(const char *Data, uint64_t Size);

extern "C" llvm::orc::shared::CWrapperFunctionResult
llvm_orc_registerJITLoaderGDBAllocAction(const char *Data, size_t Size);

#endif // LLVM_EXECUTIONENGINE_ORC_TARGETPROCESS_JITLOADERGDB_H
PKiwFZ?J�j��>ExecutionEngine/Orc/TargetProcess/SimpleExecutorDylibManager.hnu�[���//===--------------- SimpleExecutorDylibManager.h ---------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// A simple dynamic library management class. Allows dynamic libraries to be
// loaded and searched.
//
// FIXME: The functionality in this file should be moved to the ORC runtime.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_ORC_TARGETPROCESS_SIMPLEEXECUTORDYLIBMANAGER_H
#define LLVM_EXECUTIONENGINE_ORC_TARGETPROCESS_SIMPLEEXECUTORDYLIBMANAGER_H

#include "llvm/ADT/DenseSet.h"
#include "llvm/ExecutionEngine/Orc/Shared/ExecutorAddress.h"
#include "llvm/ExecutionEngine/Orc/Shared/SimpleRemoteEPCUtils.h"
#include "llvm/ExecutionEngine/Orc/Shared/TargetProcessControlTypes.h"
#include "llvm/ExecutionEngine/Orc/Shared/WrapperFunctionUtils.h"
#include "llvm/ExecutionEngine/Orc/TargetProcess/ExecutorBootstrapService.h"
#include "llvm/Support/DynamicLibrary.h"
#include "llvm/Support/Error.h"

#include <mutex>

namespace llvm {
namespace orc {
namespace rt_bootstrap {

/// Simple page-based allocator.
class SimpleExecutorDylibManager : public ExecutorBootstrapService {
public:
  virtual ~SimpleExecutorDylibManager();

  Expected<tpctypes::DylibHandle> open(const std::string &Path, uint64_t Mode);
  Expected<std::vector<ExecutorAddr>> lookup(tpctypes::DylibHandle H,
                                             const RemoteSymbolLookupSet &L);

  Error shutdown() override;
  void addBootstrapSymbols(StringMap<ExecutorAddr> &M) override;

private:
  using DylibSet = DenseSet<void *>;

  static llvm::orc::shared::CWrapperFunctionResult
  openWrapper(const char *ArgData, size_t ArgSize);

  static llvm::orc::shared::CWrapperFunctionResult
  lookupWrapper(const char *ArgData, size_t ArgSize);

  std::mutex M;
  DylibSet Dylibs;
};

} // end namespace rt_bootstrap
} // end namespace orc
} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_ORC_TARGETPROCESS_SIMPLEEXECUTORDYLIBMANAGER_H
PKiwFZ�w��8ExecutionEngine/Orc/TargetProcess/TargetExecutionUtils.hnu�[���//===-- TargetExecutionUtils.h - Utils for execution in target --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Utilities for execution in the target process.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_ORC_TARGETPROCESS_TARGETEXECUTIONUTILS_H
#define LLVM_EXECUTIONENGINE_ORC_TARGETPROCESS_TARGETEXECUTIONUTILS_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
#include <string>

namespace llvm {
namespace orc {

/// Run a main function, returning the result.
///
/// If the optional ProgramName argument is given then it will be inserted
/// before the strings in Args as the first argument to the called function.
///
/// It is legal to have an empty argument list and no program name, however
/// many main functions will expect a name argument at least, and will fail
/// if none is provided.
int runAsMain(int (*Main)(int, char *[]), ArrayRef<std::string> Args,
              std::optional<StringRef> ProgramName = std::nullopt);

int runAsVoidFunction(int (*Func)(void));
int runAsIntFunction(int (*Func)(int), int Arg);

} // end namespace orc
} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_ORC_TARGETPROCESS_TARGETEXECUTIONUTILS_H
PKiwFZO��/

EExecutionEngine/Orc/TargetProcess/ExecutorSharedMemoryMapperService.hnu�[���//===----------- ExecutorSharedMemoryMapperService.h ------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_ORC_TARGETPROCESS_EXECUTORSHAREDMEMORYMAPPERSERVICE_H
#define LLVM_EXECUTIONENGINE_ORC_TARGETPROCESS_EXECUTORSHAREDMEMORYMAPPERSERVICE_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ExecutionEngine/Orc/Shared/TargetProcessControlTypes.h"
#include "llvm/ExecutionEngine/Orc/TargetProcess/ExecutorBootstrapService.h"

#include <atomic>
#include <mutex>

#if defined(_WIN32)
#include <windows.h>
#endif

namespace llvm {
namespace orc {
namespace rt_bootstrap {

class ExecutorSharedMemoryMapperService final
    : public ExecutorBootstrapService {
public:
  ~ExecutorSharedMemoryMapperService(){};

  Expected<std::pair<ExecutorAddr, std::string>> reserve(uint64_t Size);
  Expected<ExecutorAddr> initialize(ExecutorAddr Reservation,
                                    tpctypes::SharedMemoryFinalizeRequest &FR);

  Error deinitialize(const std::vector<ExecutorAddr> &Bases);
  Error release(const std::vector<ExecutorAddr> &Bases);

  Error shutdown() override;
  void addBootstrapSymbols(StringMap<ExecutorAddr> &M) override;

private:
  struct Allocation {
    std::vector<shared::WrapperFunctionCall> DeinitializationActions;
  };
  using AllocationMap = DenseMap<ExecutorAddr, Allocation>;

  struct Reservation {
    size_t Size;
    std::vector<ExecutorAddr> Allocations;
#if defined(_WIN32)
    HANDLE SharedMemoryFile;
#endif
  };
  using ReservationMap = DenseMap<void *, Reservation>;

  static llvm::orc::shared::CWrapperFunctionResult
  reserveWrapper(const char *ArgData, size_t ArgSize);

  static llvm::orc::shared::CWrapperFunctionResult
  initializeWrapper(const char *ArgData, size_t ArgSize);

  static llvm::orc::shared::CWrapperFunctionResult
  deinitializeWrapper(const char *ArgData, size_t ArgSize);

  static llvm::orc::shared::CWrapperFunctionResult
  releaseWrapper(const char *ArgData, size_t ArgSize);

#if (defined(LLVM_ON_UNIX) && !defined(__ANDROID__)) || defined(_WIN32)
  std::atomic<int> SharedMemoryCount{0};
#endif

  std::mutex Mutex;
  ReservationMap Reservations;
  AllocationMap Allocations;
};

} // namespace rt_bootstrap
} // namespace orc
} // namespace llvm
#endif // LLVM_EXECUTIONENGINE_ORC_TARGETPROCESS_EXECUTORSHAREDMEMORYMAPPERSERVICE_H
PKiwFZ�nA��%ExecutionEngine/Orc/SimpleRemoteEPC.hnu�[���//===---- SimpleRemoteEPC.h - Simple remote executor control ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Simple remote executor process control.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_ORC_SIMPLEREMOTEEPC_H
#define LLVM_EXECUTIONENGINE_ORC_SIMPLEREMOTEEPC_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/FunctionExtras.h"
#include "llvm/ExecutionEngine/Orc/EPCGenericDylibManager.h"
#include "llvm/ExecutionEngine/Orc/EPCGenericJITLinkMemoryManager.h"
#include "llvm/ExecutionEngine/Orc/EPCGenericMemoryAccess.h"
#include "llvm/ExecutionEngine/Orc/ExecutorProcessControl.h"
#include "llvm/ExecutionEngine/Orc/Shared/SimpleRemoteEPCUtils.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/MSVCErrorWorkarounds.h"

#include <future>

namespace llvm {
namespace orc {

class SimpleRemoteEPC : public ExecutorProcessControl,
                        public SimpleRemoteEPCTransportClient {
public:
  /// A setup object containing callbacks to construct a memory manager and
  /// memory access object. Both are optional. If not specified,
  /// EPCGenericJITLinkMemoryManager and EPCGenericMemoryAccess will be used.
  struct Setup {
    using CreateMemoryManagerFn =
        Expected<std::unique_ptr<jitlink::JITLinkMemoryManager>>(
            SimpleRemoteEPC &);
    using CreateMemoryAccessFn =
        Expected<std::unique_ptr<MemoryAccess>>(SimpleRemoteEPC &);

    unique_function<CreateMemoryManagerFn> CreateMemoryManager;
    unique_function<CreateMemoryAccessFn> CreateMemoryAccess;
  };

  /// Create a SimpleRemoteEPC using the given transport type and args.
  template <typename TransportT, typename... TransportTCtorArgTs>
  static Expected<std::unique_ptr<SimpleRemoteEPC>>
  Create(std::unique_ptr<TaskDispatcher> D, Setup S,
         TransportTCtorArgTs &&...TransportTCtorArgs) {
    std::unique_ptr<SimpleRemoteEPC> SREPC(
        new SimpleRemoteEPC(std::make_shared<SymbolStringPool>(),
                            std::move(D)));
    auto T = TransportT::Create(
        *SREPC, std::forward<TransportTCtorArgTs>(TransportTCtorArgs)...);
    if (!T)
      return T.takeError();
    SREPC->T = std::move(*T);
    if (auto Err = SREPC->setup(std::move(S)))
      return joinErrors(std::move(Err), SREPC->disconnect());
    return std::move(SREPC);
  }

  SimpleRemoteEPC(const SimpleRemoteEPC &) = delete;
  SimpleRemoteEPC &operator=(const SimpleRemoteEPC &) = delete;
  SimpleRemoteEPC(SimpleRemoteEPC &&) = delete;
  SimpleRemoteEPC &operator=(SimpleRemoteEPC &&) = delete;
  ~SimpleRemoteEPC();

  Expected<tpctypes::DylibHandle> loadDylib(const char *DylibPath) override;

  Expected<std::vector<tpctypes::LookupResult>>
  lookupSymbols(ArrayRef<LookupRequest> Request) override;

  Expected<int32_t> runAsMain(ExecutorAddr MainFnAddr,
                              ArrayRef<std::string> Args) override;

  Expected<int32_t> runAsVoidFunction(ExecutorAddr VoidFnAddr) override;

  Expected<int32_t> runAsIntFunction(ExecutorAddr IntFnAddr, int Arg) override;

  void callWrapperAsync(ExecutorAddr WrapperFnAddr,
                        IncomingWFRHandler OnComplete,
                        ArrayRef<char> ArgBuffer) override;

  Error disconnect() override;

  Expected<HandleMessageAction>
  handleMessage(SimpleRemoteEPCOpcode OpC, uint64_t SeqNo, ExecutorAddr TagAddr,
                SimpleRemoteEPCArgBytesVector ArgBytes) override;

  void handleDisconnect(Error Err) override;

private:
  SimpleRemoteEPC(std::shared_ptr<SymbolStringPool> SSP,
                  std::unique_ptr<TaskDispatcher> D)
    : ExecutorProcessControl(std::move(SSP), std::move(D)) {}

  static Expected<std::unique_ptr<jitlink::JITLinkMemoryManager>>
  createDefaultMemoryManager(SimpleRemoteEPC &SREPC);
  static Expected<std::unique_ptr<MemoryAccess>>
  createDefaultMemoryAccess(SimpleRemoteEPC &SREPC);

  Error sendMessage(SimpleRemoteEPCOpcode OpC, uint64_t SeqNo,
                    ExecutorAddr TagAddr, ArrayRef<char> ArgBytes);

  Error handleSetup(uint64_t SeqNo, ExecutorAddr TagAddr,
                    SimpleRemoteEPCArgBytesVector ArgBytes);
  Error setup(Setup S);

  Error handleResult(uint64_t SeqNo, ExecutorAddr TagAddr,
                     SimpleRemoteEPCArgBytesVector ArgBytes);
  void handleCallWrapper(uint64_t RemoteSeqNo, ExecutorAddr TagAddr,
                         SimpleRemoteEPCArgBytesVector ArgBytes);
  Error handleHangup(SimpleRemoteEPCArgBytesVector ArgBytes);

  uint64_t getNextSeqNo() { return NextSeqNo++; }
  void releaseSeqNo(uint64_t SeqNo) {}

  using PendingCallWrapperResultsMap =
    DenseMap<uint64_t, IncomingWFRHandler>;

  std::mutex SimpleRemoteEPCMutex;
  std::condition_variable DisconnectCV;
  bool Disconnected = false;
  Error DisconnectErr = Error::success();

  std::unique_ptr<SimpleRemoteEPCTransport> T;
  std::unique_ptr<jitlink::JITLinkMemoryManager> OwnedMemMgr;
  std::unique_ptr<MemoryAccess> OwnedMemAccess;

  std::unique_ptr<EPCGenericDylibManager> DylibMgr;
  ExecutorAddr RunAsMainAddr;
  ExecutorAddr RunAsVoidFunctionAddr;
  ExecutorAddr RunAsIntFunctionAddr;

  uint64_t NextSeqNo = 0;
  PendingCallWrapperResultsMap PendingCallWrapperResults;
};

} // end namespace orc
} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_ORC_SIMPLEREMOTEEPC_H
PKiwFZ����J�J#ExecutionEngine/Orc/OrcABISupport.hnu�[���//===- OrcABISupport.h - ABI support code -----------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// ABI specific code for Orc, e.g. callback assembly.
//
// ABI classes should be part of the JIT *target* process, not the host
// process (except where you're doing hosted JITing and the two are one and the
// same).
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_ORC_ORCABISUPPORT_H
#define LLVM_EXECUTIONENGINE_ORC_ORCABISUPPORT_H

#include "llvm/ExecutionEngine/Orc/Shared/ExecutorAddress.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include <cstdint>

namespace llvm {
namespace orc {

struct IndirectStubsAllocationSizes {
  uint64_t StubBytes = 0;
  uint64_t PointerBytes = 0;
  unsigned NumStubs = 0;
};

template <typename ORCABI>
IndirectStubsAllocationSizes
getIndirectStubsBlockSizes(unsigned MinStubs, unsigned RoundToMultipleOf = 0) {
  assert(
      (RoundToMultipleOf == 0 || (RoundToMultipleOf % ORCABI::StubSize == 0)) &&
      "RoundToMultipleOf is not a multiple of stub size");
  uint64_t StubBytes = MinStubs * ORCABI::StubSize;
  if (RoundToMultipleOf)
    StubBytes = alignTo(StubBytes, RoundToMultipleOf);
  unsigned NumStubs = StubBytes / ORCABI::StubSize;
  uint64_t PointerBytes = NumStubs * ORCABI::PointerSize;
  return {StubBytes, PointerBytes, NumStubs};
}

/// Generic ORC ABI support.
///
/// This class can be substituted as the target architecture support class for
/// ORC templates that require one (e.g. IndirectStubsManagers). It does not
/// support lazy JITing however, and any attempt to use that functionality
/// will result in execution of an llvm_unreachable.
class OrcGenericABI {
public:
  static constexpr unsigned PointerSize = sizeof(uintptr_t);
  static constexpr unsigned TrampolineSize = 1;
  static constexpr unsigned StubSize = 1;
  static constexpr unsigned StubToPointerMaxDisplacement = 1;
  static constexpr unsigned ResolverCodeSize = 1;

  static void writeResolverCode(char *ResolveWorkingMem,
                                ExecutorAddr ResolverTargetAddr,
                                ExecutorAddr ReentryFnAddr,
                                ExecutorAddr ReentryCtxAddr) {
    llvm_unreachable("writeResolverCode is not supported by the generic host "
                     "support class");
  }

  static void writeTrampolines(char *TrampolineBlockWorkingMem,
                               ExecutorAddr TrampolineBlockTargetAddr,
                               ExecutorAddr ResolverAddr,
                               unsigned NumTrampolines) {
    llvm_unreachable("writeTrampolines is not supported by the generic host "
                     "support class");
  }

  static void writeIndirectStubsBlock(char *StubsBlockWorkingMem,
                                      ExecutorAddr StubsBlockTargetAddress,
                                      ExecutorAddr PointersBlockTargetAddress,
                                      unsigned NumStubs) {
    llvm_unreachable(
        "writeIndirectStubsBlock is not supported by the generic host "
        "support class");
  }
};

class OrcAArch64 {
public:
  static constexpr unsigned PointerSize = 8;
  static constexpr unsigned TrampolineSize = 12;
  static constexpr unsigned StubSize = 8;
  static constexpr unsigned StubToPointerMaxDisplacement = 1U << 27;
  static constexpr unsigned ResolverCodeSize = 0x120;

  /// Write the resolver code into the given memory. The user is
  /// responsible for allocating the memory and setting permissions.
  ///
  /// ReentryFnAddr should be the address of a function whose signature matches
  /// void* (*)(void *TrampolineAddr, void *ReentryCtxAddr). The ReentryCtxAddr
  /// argument of writeResolverCode will be passed as the second argument to
  /// the function at ReentryFnAddr.
  static void writeResolverCode(char *ResolverWorkingMem,
                                ExecutorAddr ResolverTargetAddress,
                                ExecutorAddr ReentryFnAddr,
                                ExecutorAddr RentryCtxAddr);

  /// Write the requested number of trampolines into the given memory,
  /// which must be big enough to hold 1 pointer, plus NumTrampolines
  /// trampolines.
  static void writeTrampolines(char *TrampolineBlockWorkingMem,
                               ExecutorAddr TrampolineBlockTargetAddress,
                               ExecutorAddr ResolverAddr,
                               unsigned NumTrampolines);

  /// Write NumStubs indirect stubs to working memory at StubsBlockWorkingMem.
  /// Stubs will be written as if linked at StubsBlockTargetAddress, with the
  /// Nth stub using the Nth pointer in memory starting at
  /// PointersBlockTargetAddress.
  static void writeIndirectStubsBlock(char *StubsBlockWorkingMem,
                                      ExecutorAddr StubsBlockTargetAddress,
                                      ExecutorAddr PointersBlockTargetAddress,
                                      unsigned MinStubs);
};

/// X86_64 code that's common to all ABIs.
///
/// X86_64 supports lazy JITing.
class OrcX86_64_Base {
public:
  static constexpr unsigned PointerSize = 8;
  static constexpr unsigned TrampolineSize = 8;
  static constexpr unsigned StubSize = 8;
  static constexpr unsigned StubToPointerMaxDisplacement = 1 << 31;

  /// Write the requested number of trampolines into the given memory,
  /// which must be big enough to hold 1 pointer, plus NumTrampolines
  /// trampolines.
  static void writeTrampolines(char *TrampolineBlockWorkingMem,
                               ExecutorAddr TrampolineBlockTargetAddress,
                               ExecutorAddr ResolverAddr,
                               unsigned NumTrampolines);

  /// Write NumStubs indirect stubs to working memory at StubsBlockWorkingMem.
  /// Stubs will be written as if linked at StubsBlockTargetAddress, with the
  /// Nth stub using the Nth pointer in memory starting at
  /// PointersBlockTargetAddress.
  static void writeIndirectStubsBlock(char *StubsBlockWorkingMem,
                                      ExecutorAddr StubsBlockTargetAddress,
                                      ExecutorAddr PointersBlockTargetAddress,
                                      unsigned NumStubs);
};

/// X86_64 support for SysV ABI (Linux, MacOSX).
///
/// X86_64_SysV supports lazy JITing.
class OrcX86_64_SysV : public OrcX86_64_Base {
public:
  static constexpr unsigned ResolverCodeSize = 0x6C;

  /// Write the resolver code into the given memory. The user is
  /// responsible for allocating the memory and setting permissions.
  ///
  /// ReentryFnAddr should be the address of a function whose signature matches
  /// void* (*)(void *TrampolineAddr, void *ReentryCtxAddr). The ReentryCtxAddr
  /// argument of writeResolverCode will be passed as the second argument to
  /// the function at ReentryFnAddr.
  static void writeResolverCode(char *ResolverWorkingMem,
                                ExecutorAddr ResolverTargetAddress,
                                ExecutorAddr ReentryFnAddr,
                                ExecutorAddr ReentryCtxAddr);
};

/// X86_64 support for Win32.
///
/// X86_64_Win32 supports lazy JITing.
class OrcX86_64_Win32 : public OrcX86_64_Base {
public:
  static constexpr unsigned ResolverCodeSize = 0x74;

  /// Write the resolver code into the given memory. The user is
  /// responsible for allocating the memory and setting permissions.
  ///
  /// ReentryFnAddr should be the address of a function whose signature matches
  /// void* (*)(void *TrampolineAddr, void *ReentryCtxAddr). The ReentryCtxAddr
  /// argument of writeResolverCode will be passed as the second argument to
  /// the function at ReentryFnAddr.
  static void writeResolverCode(char *ResolverWorkingMem,
                                ExecutorAddr ResolverTargetAddress,
                                ExecutorAddr ReentryFnAddr,
                                ExecutorAddr ReentryCtxAddr);
};

/// I386 support.
///
/// I386 supports lazy JITing.
class OrcI386 {
public:
  static constexpr unsigned PointerSize = 4;
  static constexpr unsigned TrampolineSize = 8;
  static constexpr unsigned StubSize = 8;
  static constexpr unsigned StubToPointerMaxDisplacement = 1 << 31;
  static constexpr unsigned ResolverCodeSize = 0x4a;

  /// Write the resolver code into the given memory. The user is
  /// responsible for allocating the memory and setting permissions.
  ///
  /// ReentryFnAddr should be the address of a function whose signature matches
  /// void* (*)(void *TrampolineAddr, void *ReentryCtxAddr). The ReentryCtxAddr
  /// argument of writeResolverCode will be passed as the second argument to
  /// the function at ReentryFnAddr.
  static void writeResolverCode(char *ResolverWorkingMem,
                                ExecutorAddr ResolverTargetAddress,
                                ExecutorAddr ReentryFnAddr,
                                ExecutorAddr ReentryCtxAddr);

  /// Write the requested number of trampolines into the given memory,
  /// which must be big enough to hold 1 pointer, plus NumTrampolines
  /// trampolines.
  static void writeTrampolines(char *TrampolineBlockWorkingMem,
                               ExecutorAddr TrampolineBlockTargetAddress,
                               ExecutorAddr ResolverAddr,
                               unsigned NumTrampolines);

  /// Write NumStubs indirect stubs to working memory at StubsBlockWorkingMem.
  /// Stubs will be written as if linked at StubsBlockTargetAddress, with the
  /// Nth stub using the Nth pointer in memory starting at
  /// PointersBlockTargetAddress.
  static void writeIndirectStubsBlock(char *StubsBlockWorkingMem,
                                      ExecutorAddr StubsBlockTargetAddress,
                                      ExecutorAddr PointersBlockTargetAddress,
                                      unsigned NumStubs);
};

// @brief Mips32 support.
//
// Mips32 supports lazy JITing.
class OrcMips32_Base {
public:
  static constexpr unsigned PointerSize = 4;
  static constexpr unsigned TrampolineSize = 20;
  static constexpr unsigned StubSize = 8;
  static constexpr unsigned StubToPointerMaxDisplacement = 1 << 31;
  static constexpr unsigned ResolverCodeSize = 0xfc;

  /// Write the requested number of trampolines into the given memory,
  /// which must be big enough to hold 1 pointer, plus NumTrampolines
  /// trampolines.
  static void writeTrampolines(char *TrampolineBlockWorkingMem,
                               ExecutorAddr TrampolineBlockTargetAddress,
                               ExecutorAddr ResolverAddr,
                               unsigned NumTrampolines);

  /// Write the resolver code into the given memory. The user is
  /// responsible for allocating the memory and setting permissions.
  ///
  /// ReentryFnAddr should be the address of a function whose signature matches
  /// void* (*)(void *TrampolineAddr, void *ReentryCtxAddr). The ReentryCtxAddr
  /// argument of writeResolverCode will be passed as the second argument to
  /// the function at ReentryFnAddr.
  static void writeResolverCode(char *ResolverBlockWorkingMem,
                                ExecutorAddr ResolverBlockTargetAddress,
                                ExecutorAddr ReentryFnAddr,
                                ExecutorAddr ReentryCtxAddr, bool isBigEndian);
  /// Write NumStubs indirect stubs to working memory at StubsBlockWorkingMem.
  /// Stubs will be written as if linked at StubsBlockTargetAddress, with the
  /// Nth stub using the Nth pointer in memory starting at
  /// PointersBlockTargetAddress.
  static void writeIndirectStubsBlock(char *StubsBlockWorkingMem,
                                      ExecutorAddr StubsBlockTargetAddress,
                                      ExecutorAddr PointersBlockTargetAddress,
                                      unsigned NumStubs);
};

class OrcMips32Le : public OrcMips32_Base {
public:
  static void writeResolverCode(char *ResolverWorkingMem,
                                ExecutorAddr ResolverTargetAddress,
                                ExecutorAddr ReentryFnAddr,
                                ExecutorAddr ReentryCtxAddr) {
    OrcMips32_Base::writeResolverCode(ResolverWorkingMem, ResolverTargetAddress,
                                      ReentryFnAddr, ReentryCtxAddr, false);
  }
};

class OrcMips32Be : public OrcMips32_Base {
public:
  static void writeResolverCode(char *ResolverWorkingMem,
                                ExecutorAddr ResolverTargetAddress,
                                ExecutorAddr ReentryFnAddr,
                                ExecutorAddr ReentryCtxAddr) {
    OrcMips32_Base::writeResolverCode(ResolverWorkingMem, ResolverTargetAddress,
                                      ReentryFnAddr, ReentryCtxAddr, true);
  }
};

// @brief Mips64 support.
//
// Mips64 supports lazy JITing.
class OrcMips64 {
public:
  static constexpr unsigned PointerSize = 8;
  static constexpr unsigned TrampolineSize = 40;
  static constexpr unsigned StubSize = 32;
  static constexpr unsigned StubToPointerMaxDisplacement = 1 << 31;
  static constexpr unsigned ResolverCodeSize = 0x120;

  /// Write the resolver code into the given memory. The user is
  /// responsible for allocating the memory and setting permissions.
  ///
  /// ReentryFnAddr should be the address of a function whose signature matches
  /// void* (*)(void *TrampolineAddr, void *ReentryCtxAddr). The ReentryCtxAddr
  /// argument of writeResolverCode will be passed as the second argument to
  /// the function at ReentryFnAddr.
  static void writeResolverCode(char *ResolverWorkingMem,
                                ExecutorAddr ResolverTargetAddress,
                                ExecutorAddr ReentryFnAddr,
                                ExecutorAddr ReentryCtxAddr);

  /// Write the requested number of trampolines into the given memory,
  /// which must be big enough to hold 1 pointer, plus NumTrampolines
  /// trampolines.
  static void writeTrampolines(char *TrampolineBlockWorkingMem,
                               ExecutorAddr TrampolineBlockTargetAddress,
                               ExecutorAddr ResolverFnAddr,
                               unsigned NumTrampolines);
  /// Write NumStubs indirect stubs to working memory at StubsBlockWorkingMem.
  /// Stubs will be written as if linked at StubsBlockTargetAddress, with the
  /// Nth stub using the Nth pointer in memory starting at
  /// PointersBlockTargetAddress.
  static void writeIndirectStubsBlock(char *StubsBlockWorkingMem,
                                      ExecutorAddr StubsBlockTargetAddress,
                                      ExecutorAddr PointersBlockTargetAddress,
                                      unsigned NumStubs);
};

// @brief riscv64 support.
//
// RISC-V 64 supports lazy JITing.
class OrcRiscv64 {
public:
  static constexpr unsigned PointerSize = 8;
  static constexpr unsigned TrampolineSize = 16;
  static constexpr unsigned StubSize = 16;
  static constexpr unsigned StubToPointerMaxDisplacement = 1 << 31;
  static constexpr unsigned ResolverCodeSize = 0x148;

  /// Write the resolver code into the given memory. The user is
  /// responsible for allocating the memory and setting permissions.
  ///
  /// ReentryFnAddr should be the address of a function whose signature matches
  /// void* (*)(void *TrampolineAddr, void *ReentryCtxAddr). The ReentryCtxAddr
  /// argument of writeResolverCode will be passed as the second argument to
  /// the function at ReentryFnAddr.
  static void writeResolverCode(char *ResolverWorkingMem,
                                ExecutorAddr ResolverTargetAddress,
                                ExecutorAddr ReentryFnAddr,
                                ExecutorAddr ReentryCtxAddr);

  /// Write the requested number of trampolines into the given memory,
  /// which must be big enough to hold 1 pointer, plus NumTrampolines
  /// trampolines.
  static void writeTrampolines(char *TrampolineBlockWorkingMem,
                               ExecutorAddr TrampolineBlockTargetAddress,
                               ExecutorAddr ResolverFnAddr,
                               unsigned NumTrampolines);
  /// Write NumStubs indirect stubs to working memory at StubsBlockWorkingMem.
  /// Stubs will be written as if linked at StubsBlockTargetAddress, with the
  /// Nth stub using the Nth pointer in memory starting at
  /// PointersBlockTargetAddress.
  static void writeIndirectStubsBlock(char *StubsBlockWorkingMem,
                                      ExecutorAddr StubsBlockTargetAddress,
                                      ExecutorAddr PointersBlockTargetAddress,
                                      unsigned NumStubs);
};

// @brief loongarch64 support.
//
// LoongArch 64 supports lazy JITing.
class OrcLoongArch64 {
public:
  static constexpr unsigned PointerSize = 8;
  static constexpr unsigned TrampolineSize = 16;
  static constexpr unsigned StubSize = 16;
  static constexpr unsigned StubToPointerMaxDisplacement = 1 << 31;
  static constexpr unsigned ResolverCodeSize = 0xc8;

  /// Write the resolver code into the given memory. The user is
  /// responsible for allocating the memory and setting permissions.
  ///
  /// ReentryFnAddr should be the address of a function whose signature matches
  /// void* (*)(void *TrampolineAddr, void *ReentryCtxAddr). The ReentryCtxAddr
  /// argument of writeResolverCode will be passed as the second argument to
  /// the function at ReentryFnAddr.
  static void writeResolverCode(char *ResolverWorkingMem,
                                ExecutorAddr ResolverTargetAddress,
                                ExecutorAddr ReentryFnAddr,
                                ExecutorAddr ReentryCtxAddr);

  /// Write the requested number of trampolines into the given memory,
  /// which must be big enough to hold 1 pointer, plus NumTrampolines
  /// trampolines.
  static void writeTrampolines(char *TrampolineBlockWorkingMem,
                               ExecutorAddr TrampolineBlockTargetAddress,
                               ExecutorAddr ResolverFnAddr,
                               unsigned NumTrampolines);

  /// Write NumStubs indirect stubs to working memory at StubsBlockWorkingMem.
  /// Stubs will be written as if linked at StubsBlockTargetAddress, with the
  /// Nth stub using the Nth pointer in memory starting at
  /// PointersBlockTargetAddress.
  static void writeIndirectStubsBlock(char *StubsBlockWorkingMem,
                                      ExecutorAddr StubsBlockTargetAddress,
                                      ExecutorAddr PointersBlockTargetAddress,
                                      unsigned NumStubs);
};

} // end namespace orc
} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_ORC_ORCABISUPPORT_H
PKiwFZS�p||'ExecutionEngine/Orc/SpeculateAnalyses.hnu�[���//===-- SpeculateAnalyses.h  --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// Contains the Analyses and Result Interpretation to select likely functions
/// to Speculatively compile before they are called. [Purely Experimentation]
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_ORC_SPECULATEANALYSES_H
#define LLVM_EXECUTIONENGINE_ORC_SPECULATEANALYSES_H

#include "llvm/Analysis/BranchProbabilityInfo.h"
#include "llvm/ExecutionEngine/Orc/Core.h"
#include "llvm/ExecutionEngine/Orc/Speculation.h"

#include <vector>

namespace llvm {

namespace orc {

// Provides common code.
class SpeculateQuery {
protected:
  void findCalles(const BasicBlock *, DenseSet<StringRef> &);
  bool isStraightLine(const Function &F);

public:
  using ResultTy = std::optional<DenseMap<StringRef, DenseSet<StringRef>>>;
};

// Direct calls in high frequency basic blocks are extracted.
class BlockFreqQuery : public SpeculateQuery {
  size_t numBBToGet(size_t);

public:
  // Find likely next executables based on IR Block Frequency
  ResultTy operator()(Function &F);
};

// This Query generates a sequence of basic blocks which follows the order of
// execution.
// A handful of BB with higher block frequencies are taken, then path to entry
// and end BB are discovered by traversing up & down the CFG.
class SequenceBBQuery : public SpeculateQuery {
  struct WalkDirection {
    bool Upward = true, Downward = true;
    // the block associated contain a call
    bool CallerBlock = false;
  };

public:
  using VisitedBlocksInfoTy = DenseMap<const BasicBlock *, WalkDirection>;
  using BlockListTy = SmallVector<const BasicBlock *, 8>;
  using BackEdgesInfoTy =
      SmallVector<std::pair<const BasicBlock *, const BasicBlock *>, 8>;
  using BlockFreqInfoTy =
      SmallVector<std::pair<const BasicBlock *, uint64_t>, 8>;

private:
  std::size_t getHottestBlocks(std::size_t TotalBlocks);
  BlockListTy rearrangeBB(const Function &, const BlockListTy &);
  BlockListTy queryCFG(Function &, const BlockListTy &);
  void traverseToEntryBlock(const BasicBlock *, const BlockListTy &,
                            const BackEdgesInfoTy &,
                            const BranchProbabilityInfo *,
                            VisitedBlocksInfoTy &);
  void traverseToExitBlock(const BasicBlock *, const BlockListTy &,
                           const BackEdgesInfoTy &,
                           const BranchProbabilityInfo *,
                           VisitedBlocksInfoTy &);

public:
  ResultTy operator()(Function &F);
};

} // namespace orc
} // namespace llvm

#endif // LLVM_EXECUTIONENGINE_ORC_SPECULATEANALYSES_H
PKiwFZ�d2u,ExecutionEngine/Orc/EPCGenericMemoryAccess.hnu�[���//===- EPCGenericMemoryAccess.h - Generic EPC MemoryAccess impl -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Implements ExecutorProcessControl::MemoryAccess by making calls to
// ExecutorProcessControl::callWrapperAsync.
//
// This simplifies the implementaton of new ExecutorProcessControl instances,
// as this implementation will always work (at the cost of some performance
// overhead for the calls).
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_ORC_EPCGENERICMEMORYACCESS_H
#define LLVM_EXECUTIONENGINE_ORC_EPCGENERICMEMORYACCESS_H

#include "llvm/ExecutionEngine/Orc/Core.h"

namespace llvm {
namespace orc {

class EPCGenericMemoryAccess : public ExecutorProcessControl::MemoryAccess {
public:
  /// Function addresses for memory access.
  struct FuncAddrs {
    ExecutorAddr WriteUInt8s;
    ExecutorAddr WriteUInt16s;
    ExecutorAddr WriteUInt32s;
    ExecutorAddr WriteUInt64s;
    ExecutorAddr WriteBuffers;
  };

  /// Create an EPCGenericMemoryAccess instance from a given set of
  /// function addrs.
  EPCGenericMemoryAccess(ExecutorProcessControl &EPC, FuncAddrs FAs)
      : EPC(EPC), FAs(FAs) {}

  void writeUInt8sAsync(ArrayRef<tpctypes::UInt8Write> Ws,
                        WriteResultFn OnWriteComplete) override {
    using namespace shared;
    EPC.callSPSWrapperAsync<void(SPSSequence<SPSMemoryAccessUInt8Write>)>(
        FAs.WriteUInt8s, std::move(OnWriteComplete), Ws);
  }

  void writeUInt16sAsync(ArrayRef<tpctypes::UInt16Write> Ws,
                         WriteResultFn OnWriteComplete) override {
    using namespace shared;
    EPC.callSPSWrapperAsync<void(SPSSequence<SPSMemoryAccessUInt16Write>)>(
        FAs.WriteUInt16s, std::move(OnWriteComplete), Ws);
  }

  void writeUInt32sAsync(ArrayRef<tpctypes::UInt32Write> Ws,
                         WriteResultFn OnWriteComplete) override {
    using namespace shared;
    EPC.callSPSWrapperAsync<void(SPSSequence<SPSMemoryAccessUInt32Write>)>(
        FAs.WriteUInt32s, std::move(OnWriteComplete), Ws);
  }

  void writeUInt64sAsync(ArrayRef<tpctypes::UInt64Write> Ws,
                         WriteResultFn OnWriteComplete) override {
    using namespace shared;
    EPC.callSPSWrapperAsync<void(SPSSequence<SPSMemoryAccessUInt64Write>)>(
        FAs.WriteUInt64s, std::move(OnWriteComplete), Ws);
  }

  void writeBuffersAsync(ArrayRef<tpctypes::BufferWrite> Ws,
                         WriteResultFn OnWriteComplete) override {
    using namespace shared;
    EPC.callSPSWrapperAsync<void(SPSSequence<SPSMemoryAccessBufferWrite>)>(
        FAs.WriteBuffers, std::move(OnWriteComplete), Ws);
  }

private:
  ExecutorProcessControl &EPC;
  FuncAddrs FAs;
};

} // end namespace orc
} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_ORC_EPCGENERICMEMORYACCESS_H
PKiwFZ@&�C�M�M,ExecutionEngine/Orc/ExecutorProcessControl.hnu�[���//===- ExecutorProcessControl.h - Executor process control APIs -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Utilities for interacting with the executor processes.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_ORC_EXECUTORPROCESSCONTROL_H
#define LLVM_EXECUTIONENGINE_ORC_EXECUTORPROCESSCONTROL_H

#include "llvm/ADT/StringRef.h"
#include "llvm/ExecutionEngine/JITLink/JITLinkMemoryManager.h"
#include "llvm/ExecutionEngine/Orc/Shared/ExecutorAddress.h"
#include "llvm/ExecutionEngine/Orc/Shared/TargetProcessControlTypes.h"
#include "llvm/ExecutionEngine/Orc/Shared/WrapperFunctionUtils.h"
#include "llvm/ExecutionEngine/Orc/SymbolStringPool.h"
#include "llvm/ExecutionEngine/Orc/TaskDispatch.h"
#include "llvm/Support/DynamicLibrary.h"
#include "llvm/Support/MSVCErrorWorkarounds.h"
#include "llvm/TargetParser/Triple.h"

#include <future>
#include <mutex>
#include <vector>

namespace llvm {
namespace orc {

class ExecutionSession;
class SymbolLookupSet;

/// ExecutorProcessControl supports interaction with a JIT target process.
class ExecutorProcessControl {
  friend class ExecutionSession;
public:

  /// A handler or incoming WrapperFunctionResults -- either return values from
  /// callWrapper* calls, or incoming JIT-dispatch requests.
  ///
  /// IncomingWFRHandlers are constructible from
  /// unique_function<void(shared::WrapperFunctionResult)>s using the
  /// runInPlace function or a RunWithDispatch object.
  class IncomingWFRHandler {
    friend class ExecutorProcessControl;
  public:
    IncomingWFRHandler() = default;
    explicit operator bool() const { return !!H; }
    void operator()(shared::WrapperFunctionResult WFR) { H(std::move(WFR)); }
  private:
    template <typename FnT> IncomingWFRHandler(FnT &&Fn)
      : H(std::forward<FnT>(Fn)) {}

    unique_function<void(shared::WrapperFunctionResult)> H;
  };

  /// Constructs an IncomingWFRHandler from a function object that is callable
  /// as void(shared::WrapperFunctionResult). The function object will be called
  /// directly. This should be used with care as it may block listener threads
  /// in remote EPCs. It is only suitable for simple tasks (e.g. setting a
  /// future), or for performing some quick analysis before dispatching "real"
  /// work as a Task.
  class RunInPlace {
  public:
    template <typename FnT>
    IncomingWFRHandler operator()(FnT &&Fn) {
      return IncomingWFRHandler(std::forward<FnT>(Fn));
    }
  };

  /// Constructs an IncomingWFRHandler from a function object by creating a new
  /// function object that dispatches the original using a TaskDispatcher,
  /// wrapping the original as a GenericNamedTask.
  ///
  /// This is the default approach for running WFR handlers.
  class RunAsTask {
  public:
    RunAsTask(TaskDispatcher &D) : D(D) {}

    template <typename FnT>
    IncomingWFRHandler operator()(FnT &&Fn) {
      return IncomingWFRHandler(
          [&D = this->D, Fn = std::move(Fn)]
          (shared::WrapperFunctionResult WFR) mutable {
              D.dispatch(
                makeGenericNamedTask(
                    [Fn = std::move(Fn), WFR = std::move(WFR)]() mutable {
                      Fn(std::move(WFR));
                    }, "WFR handler task"));
          });
    }
  private:
    TaskDispatcher &D;
  };

  /// APIs for manipulating memory in the target process.
  class MemoryAccess {
  public:
    /// Callback function for asynchronous writes.
    using WriteResultFn = unique_function<void(Error)>;

    virtual ~MemoryAccess();

    virtual void writeUInt8sAsync(ArrayRef<tpctypes::UInt8Write> Ws,
                                  WriteResultFn OnWriteComplete) = 0;

    virtual void writeUInt16sAsync(ArrayRef<tpctypes::UInt16Write> Ws,
                                   WriteResultFn OnWriteComplete) = 0;

    virtual void writeUInt32sAsync(ArrayRef<tpctypes::UInt32Write> Ws,
                                   WriteResultFn OnWriteComplete) = 0;

    virtual void writeUInt64sAsync(ArrayRef<tpctypes::UInt64Write> Ws,
                                   WriteResultFn OnWriteComplete) = 0;

    virtual void writeBuffersAsync(ArrayRef<tpctypes::BufferWrite> Ws,
                                   WriteResultFn OnWriteComplete) = 0;

    Error writeUInt8s(ArrayRef<tpctypes::UInt8Write> Ws) {
      std::promise<MSVCPError> ResultP;
      auto ResultF = ResultP.get_future();
      writeUInt8sAsync(Ws,
                       [&](Error Err) { ResultP.set_value(std::move(Err)); });
      return ResultF.get();
    }

    Error writeUInt16s(ArrayRef<tpctypes::UInt16Write> Ws) {
      std::promise<MSVCPError> ResultP;
      auto ResultF = ResultP.get_future();
      writeUInt16sAsync(Ws,
                        [&](Error Err) { ResultP.set_value(std::move(Err)); });
      return ResultF.get();
    }

    Error writeUInt32s(ArrayRef<tpctypes::UInt32Write> Ws) {
      std::promise<MSVCPError> ResultP;
      auto ResultF = ResultP.get_future();
      writeUInt32sAsync(Ws,
                        [&](Error Err) { ResultP.set_value(std::move(Err)); });
      return ResultF.get();
    }

    Error writeUInt64s(ArrayRef<tpctypes::UInt64Write> Ws) {
      std::promise<MSVCPError> ResultP;
      auto ResultF = ResultP.get_future();
      writeUInt64sAsync(Ws,
                        [&](Error Err) { ResultP.set_value(std::move(Err)); });
      return ResultF.get();
    }

    Error writeBuffers(ArrayRef<tpctypes::BufferWrite> Ws) {
      std::promise<MSVCPError> ResultP;
      auto ResultF = ResultP.get_future();
      writeBuffersAsync(Ws,
                        [&](Error Err) { ResultP.set_value(std::move(Err)); });
      return ResultF.get();
    }
  };

  /// A pair of a dylib and a set of symbols to be looked up.
  struct LookupRequest {
    LookupRequest(tpctypes::DylibHandle Handle, const SymbolLookupSet &Symbols)
        : Handle(Handle), Symbols(Symbols) {}
    tpctypes::DylibHandle Handle;
    const SymbolLookupSet &Symbols;
  };

  /// Contains the address of the dispatch function and context that the ORC
  /// runtime can use to call functions in the JIT.
  struct JITDispatchInfo {
    ExecutorAddr JITDispatchFunction;
    ExecutorAddr JITDispatchContext;
  };

  ExecutorProcessControl(std::shared_ptr<SymbolStringPool> SSP,
                         std::unique_ptr<TaskDispatcher> D)
    : SSP(std::move(SSP)), D(std::move(D)) {}

  virtual ~ExecutorProcessControl();

  /// Return the ExecutionSession associated with this instance.
  /// Not callable until the ExecutionSession has been associated.
  ExecutionSession &getExecutionSession() {
    assert(ES && "No ExecutionSession associated yet");
    return *ES;
  }

  /// Intern a symbol name in the SymbolStringPool.
  SymbolStringPtr intern(StringRef SymName) { return SSP->intern(SymName); }

  /// Return a shared pointer to the SymbolStringPool for this instance.
  std::shared_ptr<SymbolStringPool> getSymbolStringPool() const { return SSP; }

  TaskDispatcher &getDispatcher() { return *D; }

  /// Return the Triple for the target process.
  const Triple &getTargetTriple() const { return TargetTriple; }

  /// Get the page size for the target process.
  unsigned getPageSize() const { return PageSize; }

  /// Get the JIT dispatch function and context address for the executor.
  const JITDispatchInfo &getJITDispatchInfo() const { return JDI; }

  /// Return a MemoryAccess object for the target process.
  MemoryAccess &getMemoryAccess() const {
    assert(MemAccess && "No MemAccess object set.");
    return *MemAccess;
  }

  /// Return a JITLinkMemoryManager for the target process.
  jitlink::JITLinkMemoryManager &getMemMgr() const {
    assert(MemMgr && "No MemMgr object set");
    return *MemMgr;
  }

  /// Returns the bootstrap map.
  const StringMap<std::vector<char>> &getBootstrapMap() const {
    return BootstrapMap;
  }

  /// Look up and SPS-deserialize a bootstrap map value.
  ///
  ///
  template <typename T, typename SPSTagT>
  Error getBootstrapMapValue(StringRef Key, std::optional<T> &Val) const {
    Val = std::nullopt;

    auto I = BootstrapMap.find(Key);
    if (I == BootstrapMap.end())
      return Error::success();

    T Tmp;
    shared::SPSInputBuffer IB(I->second.data(), I->second.size());
    if (!shared::SPSArgList<SPSTagT>::deserialize(IB, Tmp))
      return make_error<StringError>("Could not deserialize value for key " +
                                         Key,
                                     inconvertibleErrorCode());

    Val = std::move(Tmp);
    return Error::success();
  }

  /// Returns the bootstrap symbol map.
  const StringMap<ExecutorAddr> &getBootstrapSymbolsMap() const {
    return BootstrapSymbols;
  }

  /// For each (ExecutorAddr&, StringRef) pair, looks up the string in the
  /// bootstrap symbols map and writes its address to the ExecutorAddr if
  /// found. If any symbol is not found then the function returns an error.
  Error getBootstrapSymbols(
      ArrayRef<std::pair<ExecutorAddr &, StringRef>> Pairs) const {
    for (const auto &KV : Pairs) {
      auto I = BootstrapSymbols.find(KV.second);
      if (I == BootstrapSymbols.end())
        return make_error<StringError>("Symbol \"" + KV.second +
                                           "\" not found "
                                           "in bootstrap symbols map",
                                       inconvertibleErrorCode());

      KV.first = I->second;
    }
    return Error::success();
  }

  /// Load the dynamic library at the given path and return a handle to it.
  /// If LibraryPath is null this function will return the global handle for
  /// the target process.
  virtual Expected<tpctypes::DylibHandle> loadDylib(const char *DylibPath) = 0;

  /// Search for symbols in the target process.
  ///
  /// The result of the lookup is a 2-dimentional array of target addresses
  /// that correspond to the lookup order. If a required symbol is not
  /// found then this method will return an error. If a weakly referenced
  /// symbol is not found then it be assigned a '0' value.
  virtual Expected<std::vector<tpctypes::LookupResult>>
  lookupSymbols(ArrayRef<LookupRequest> Request) = 0;

  /// Run function with a main-like signature.
  virtual Expected<int32_t> runAsMain(ExecutorAddr MainFnAddr,
                                      ArrayRef<std::string> Args) = 0;

  // TODO: move this to ORC runtime.
  /// Run function with a int (*)(void) signature.
  virtual Expected<int32_t> runAsVoidFunction(ExecutorAddr VoidFnAddr) = 0;

  // TODO: move this to ORC runtime.
  /// Run function with a int (*)(int) signature.
  virtual Expected<int32_t> runAsIntFunction(ExecutorAddr IntFnAddr,
                                             int Arg) = 0;

  /// Run a wrapper function in the executor. The given WFRHandler will be
  /// called on the result when it is returned.
  ///
  /// The wrapper function should be callable as:
  ///
  /// \code{.cpp}
  ///   CWrapperFunctionResult fn(uint8_t *Data, uint64_t Size);
  /// \endcode{.cpp}
  virtual void callWrapperAsync(ExecutorAddr WrapperFnAddr,
                                IncomingWFRHandler OnComplete,
                                ArrayRef<char> ArgBuffer) = 0;

  /// Run a wrapper function in the executor using the given Runner to dispatch
  /// OnComplete when the result is ready.
  template <typename RunPolicyT, typename FnT>
  void callWrapperAsync(RunPolicyT &&Runner, ExecutorAddr WrapperFnAddr,
                        FnT &&OnComplete, ArrayRef<char> ArgBuffer) {
    callWrapperAsync(
        WrapperFnAddr, Runner(std::forward<FnT>(OnComplete)), ArgBuffer);
  }

  /// Run a wrapper function in the executor. OnComplete will be dispatched
  /// as a GenericNamedTask using this instance's TaskDispatch object.
  template <typename FnT>
  void callWrapperAsync(ExecutorAddr WrapperFnAddr, FnT &&OnComplete,
                        ArrayRef<char> ArgBuffer) {
    callWrapperAsync(RunAsTask(*D), WrapperFnAddr,
                     std::forward<FnT>(OnComplete), ArgBuffer);
  }

  /// Run a wrapper function in the executor. The wrapper function should be
  /// callable as:
  ///
  /// \code{.cpp}
  ///   CWrapperFunctionResult fn(uint8_t *Data, uint64_t Size);
  /// \endcode{.cpp}
  shared::WrapperFunctionResult callWrapper(ExecutorAddr WrapperFnAddr,
                                            ArrayRef<char> ArgBuffer) {
    std::promise<shared::WrapperFunctionResult> RP;
    auto RF = RP.get_future();
    callWrapperAsync(
        RunInPlace(), WrapperFnAddr,
        [&](shared::WrapperFunctionResult R) {
          RP.set_value(std::move(R));
        }, ArgBuffer);
    return RF.get();
  }

  /// Run a wrapper function using SPS to serialize the arguments and
  /// deserialize the results.
  template <typename SPSSignature, typename RunPolicyT, typename SendResultT,
            typename... ArgTs>
  void callSPSWrapperAsync(RunPolicyT &&Runner, ExecutorAddr WrapperFnAddr,
                           SendResultT &&SendResult, const ArgTs &...Args) {
    shared::WrapperFunction<SPSSignature>::callAsync(
        [this, WrapperFnAddr, Runner = std::move(Runner)]
        (auto &&SendResult, const char *ArgData, size_t ArgSize) mutable {
          this->callWrapperAsync(std::move(Runner), WrapperFnAddr,
                                 std::move(SendResult),
                                 ArrayRef<char>(ArgData, ArgSize));
        },
        std::forward<SendResultT>(SendResult), Args...);
  }

  /// Run a wrapper function using SPS to serialize the arguments and
  /// deserialize the results.
  template <typename SPSSignature, typename SendResultT, typename... ArgTs>
  void callSPSWrapperAsync(ExecutorAddr WrapperFnAddr, SendResultT &&SendResult,
                           const ArgTs &...Args) {
    callSPSWrapperAsync<SPSSignature>(RunAsTask(*D), WrapperFnAddr,
                                      std::forward<SendResultT>(SendResult),
                                      Args...);
  }

  /// Run a wrapper function using SPS to serialize the arguments and
  /// deserialize the results.
  ///
  /// If SPSSignature is a non-void function signature then the second argument
  /// (the first in the Args list) should be a reference to a return value.
  template <typename SPSSignature, typename... WrapperCallArgTs>
  Error callSPSWrapper(ExecutorAddr WrapperFnAddr,
                       WrapperCallArgTs &&...WrapperCallArgs) {
    return shared::WrapperFunction<SPSSignature>::call(
        [this, WrapperFnAddr](const char *ArgData, size_t ArgSize) {
          return callWrapper(WrapperFnAddr, ArrayRef<char>(ArgData, ArgSize));
        },
        std::forward<WrapperCallArgTs>(WrapperCallArgs)...);
  }

  /// Disconnect from the target process.
  ///
  /// This should be called after the JIT session is shut down.
  virtual Error disconnect() = 0;

protected:

  std::shared_ptr<SymbolStringPool> SSP;
  std::unique_ptr<TaskDispatcher> D;
  ExecutionSession *ES = nullptr;
  Triple TargetTriple;
  unsigned PageSize = 0;
  JITDispatchInfo JDI;
  MemoryAccess *MemAccess = nullptr;
  jitlink::JITLinkMemoryManager *MemMgr = nullptr;
  StringMap<std::vector<char>> BootstrapMap;
  StringMap<ExecutorAddr> BootstrapSymbols;
};

/// A ExecutorProcessControl instance that asserts if any of its methods are
/// used. Suitable for use is unit tests, and by ORC clients who haven't moved
/// to ExecutorProcessControl-based APIs yet.
class UnsupportedExecutorProcessControl : public ExecutorProcessControl {
public:
  UnsupportedExecutorProcessControl(
      std::shared_ptr<SymbolStringPool> SSP = nullptr,
      std::unique_ptr<TaskDispatcher> D = nullptr,
      const std::string &TT = "", unsigned PageSize = 0)
      : ExecutorProcessControl(SSP ? std::move(SSP)
                               : std::make_shared<SymbolStringPool>(),
                               D ? std::move(D)
                               : std::make_unique<InPlaceTaskDispatcher>()) {
    this->TargetTriple = Triple(TT);
    this->PageSize = PageSize;
  }

  Expected<tpctypes::DylibHandle> loadDylib(const char *DylibPath) override {
    llvm_unreachable("Unsupported");
  }

  Expected<std::vector<tpctypes::LookupResult>>
  lookupSymbols(ArrayRef<LookupRequest> Request) override {
    llvm_unreachable("Unsupported");
  }

  Expected<int32_t> runAsMain(ExecutorAddr MainFnAddr,
                              ArrayRef<std::string> Args) override {
    llvm_unreachable("Unsupported");
  }

  Expected<int32_t> runAsVoidFunction(ExecutorAddr VoidFnAddr) override {
    llvm_unreachable("Unsupported");
  }

  Expected<int32_t> runAsIntFunction(ExecutorAddr IntFnAddr, int Arg) override {
    llvm_unreachable("Unsupported");
  }

  void callWrapperAsync(ExecutorAddr WrapperFnAddr,
                        IncomingWFRHandler OnComplete,
                        ArrayRef<char> ArgBuffer) override {
    llvm_unreachable("Unsupported");
  }

  Error disconnect() override { return Error::success(); }
};

/// A ExecutorProcessControl implementation targeting the current process.
class SelfExecutorProcessControl
    : public ExecutorProcessControl,
      private ExecutorProcessControl::MemoryAccess {
public:
  SelfExecutorProcessControl(
      std::shared_ptr<SymbolStringPool> SSP, std::unique_ptr<TaskDispatcher> D,
      Triple TargetTriple, unsigned PageSize,
      std::unique_ptr<jitlink::JITLinkMemoryManager> MemMgr);

  /// Create a SelfExecutorProcessControl with the given symbol string pool and
  /// memory manager.
  /// If no symbol string pool is given then one will be created.
  /// If no memory manager is given a jitlink::InProcessMemoryManager will
  /// be created and used by default.
  static Expected<std::unique_ptr<SelfExecutorProcessControl>>
  Create(std::shared_ptr<SymbolStringPool> SSP = nullptr,
         std::unique_ptr<TaskDispatcher> D = nullptr,
         std::unique_ptr<jitlink::JITLinkMemoryManager> MemMgr = nullptr);

  Expected<tpctypes::DylibHandle> loadDylib(const char *DylibPath) override;

  Expected<std::vector<tpctypes::LookupResult>>
  lookupSymbols(ArrayRef<LookupRequest> Request) override;

  Expected<int32_t> runAsMain(ExecutorAddr MainFnAddr,
                              ArrayRef<std::string> Args) override;

  Expected<int32_t> runAsVoidFunction(ExecutorAddr VoidFnAddr) override;

  Expected<int32_t> runAsIntFunction(ExecutorAddr IntFnAddr, int Arg) override;

  void callWrapperAsync(ExecutorAddr WrapperFnAddr,
                        IncomingWFRHandler OnComplete,
                        ArrayRef<char> ArgBuffer) override;

  Error disconnect() override;

private:
  void writeUInt8sAsync(ArrayRef<tpctypes::UInt8Write> Ws,
                        WriteResultFn OnWriteComplete) override;

  void writeUInt16sAsync(ArrayRef<tpctypes::UInt16Write> Ws,
                         WriteResultFn OnWriteComplete) override;

  void writeUInt32sAsync(ArrayRef<tpctypes::UInt32Write> Ws,
                         WriteResultFn OnWriteComplete) override;

  void writeUInt64sAsync(ArrayRef<tpctypes::UInt64Write> Ws,
                         WriteResultFn OnWriteComplete) override;

  void writeBuffersAsync(ArrayRef<tpctypes::BufferWrite> Ws,
                         WriteResultFn OnWriteComplete) override;

  static shared::CWrapperFunctionResult
  jitDispatchViaWrapperFunctionManager(void *Ctx, const void *FnTag,
                                       const char *Data, size_t Size);

  std::unique_ptr<jitlink::JITLinkMemoryManager> OwnedMemMgr;
  char GlobalManglingPrefix = 0;
};

} // end namespace orc
} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_ORC_EXECUTORPROCESSCONTROL_H
PKiwFZf-�Ә�"ExecutionEngine/Orc/TaskDispatch.hnu�[���//===--------- TaskDispatch.h - ORC task dispatch utils ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Task and TaskDispatch classes.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_ORC_TASKDISPATCH_H
#define LLVM_EXECUTIONENGINE_ORC_TASKDISPATCH_H

#include "llvm/Config/llvm-config.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ExtensibleRTTI.h"
#include "llvm/Support/raw_ostream.h"

#include <cassert>
#include <string>

#if LLVM_ENABLE_THREADS
#include <condition_variable>
#include <mutex>
#include <thread>
#endif

namespace llvm {
namespace orc {

/// Represents an abstract task for ORC to run.
class Task : public RTTIExtends<Task, RTTIRoot> {
public:
  static char ID;

  virtual ~Task() = default;

  /// Description of the task to be performed. Used for logging.
  virtual void printDescription(raw_ostream &OS) = 0;

  /// Run the task.
  virtual void run() = 0;

private:
  void anchor() override;
};

/// Base class for generic tasks.
class GenericNamedTask : public RTTIExtends<GenericNamedTask, Task> {
public:
  static char ID;
  static const char *DefaultDescription;
};

/// Generic task implementation.
template <typename FnT> class GenericNamedTaskImpl : public GenericNamedTask {
public:
  GenericNamedTaskImpl(FnT &&Fn, std::string DescBuffer)
      : Fn(std::forward<FnT>(Fn)), Desc(DescBuffer.c_str()),
        DescBuffer(std::move(DescBuffer)) {}
  GenericNamedTaskImpl(FnT &&Fn, const char *Desc)
      : Fn(std::forward<FnT>(Fn)), Desc(Desc) {
    assert(Desc && "Description cannot be null");
  }
  void printDescription(raw_ostream &OS) override { OS << Desc; }
  void run() override { Fn(); }

private:
  FnT Fn;
  const char *Desc;
  std::string DescBuffer;
};

/// Create a generic named task from a std::string description.
template <typename FnT>
std::unique_ptr<GenericNamedTask> makeGenericNamedTask(FnT &&Fn,
                                                       std::string Desc) {
  return std::make_unique<GenericNamedTaskImpl<FnT>>(std::forward<FnT>(Fn),
                                                     std::move(Desc));
}

/// Create a generic named task from a const char * description.
template <typename FnT>
std::unique_ptr<GenericNamedTask>
makeGenericNamedTask(FnT &&Fn, const char *Desc = nullptr) {
  if (!Desc)
    Desc = GenericNamedTask::DefaultDescription;
  return std::make_unique<GenericNamedTaskImpl<FnT>>(std::forward<FnT>(Fn),
                                                     Desc);
}

/// Abstract base for classes that dispatch ORC Tasks.
class TaskDispatcher {
public:
  virtual ~TaskDispatcher();

  /// Run the given task.
  virtual void dispatch(std::unique_ptr<Task> T) = 0;

  /// Called by ExecutionSession. Waits until all tasks have completed.
  virtual void shutdown() = 0;
};

/// Runs all tasks on the current thread.
class InPlaceTaskDispatcher : public TaskDispatcher {
public:
  void dispatch(std::unique_ptr<Task> T) override;
  void shutdown() override;
};

#if LLVM_ENABLE_THREADS

class DynamicThreadPoolTaskDispatcher : public TaskDispatcher {
public:
  void dispatch(std::unique_ptr<Task> T) override;
  void shutdown() override;
private:
  std::mutex DispatchMutex;
  bool Running = true;
  size_t Outstanding = 0;
  std::condition_variable OutstandingCV;
};

#endif // LLVM_ENABLE_THREADS

} // End namespace orc
} // End namespace llvm

#endif // LLVM_EXECUTIONENGINE_ORC_TASKDISPATCH_H
PKiwFZm	�m��,ExecutionEngine/Orc/EPCGenericDylibManager.hnu�[���//===- EPCGenericDylibManager.h -- Generic EPC Dylib management -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Implements dylib loading and searching by making calls to
// ExecutorProcessControl::callWrapper.
//
// This simplifies the implementaton of new ExecutorProcessControl instances,
// as this implementation will always work (at the cost of some performance
// overhead for the calls).
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_ORC_EPCGENERICDYLIBMANAGER_H
#define LLVM_EXECUTIONENGINE_ORC_EPCGENERICDYLIBMANAGER_H

#include "llvm/ExecutionEngine/Orc/ExecutorProcessControl.h"
#include "llvm/ExecutionEngine/Orc/Shared/SimpleRemoteEPCUtils.h"

namespace llvm {
namespace orc {

class SymbolLookupSet;

class EPCGenericDylibManager {
public:
  /// Function addresses for memory access.
  struct SymbolAddrs {
    ExecutorAddr Instance;
    ExecutorAddr Open;
    ExecutorAddr Lookup;
  };

  /// Create an EPCGenericMemoryAccess instance from a given set of
  /// function addrs.
  static Expected<EPCGenericDylibManager>
  CreateWithDefaultBootstrapSymbols(ExecutorProcessControl &EPC);

  /// Create an EPCGenericMemoryAccess instance from a given set of
  /// function addrs.
  EPCGenericDylibManager(ExecutorProcessControl &EPC, SymbolAddrs SAs)
      : EPC(EPC), SAs(SAs) {}

  /// Loads the dylib with the given name.
  Expected<tpctypes::DylibHandle> open(StringRef Path, uint64_t Mode);

  /// Looks up symbols within the given dylib.
  Expected<std::vector<ExecutorAddr>> lookup(tpctypes::DylibHandle H,
                                             const SymbolLookupSet &Lookup);

  /// Looks up symbols within the given dylib.
  Expected<std::vector<ExecutorAddr>>
  lookup(tpctypes::DylibHandle H, const RemoteSymbolLookupSet &Lookup);

private:
  ExecutorProcessControl &EPC;
  SymbolAddrs SAs;
};

} // end namespace orc
} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_ORC_EPCGENERICDYLIBMANAGER_H
PKiwFZ�"r���*ExecutionEngine/Orc/CompileOnDemandLayer.hnu�[���//===- CompileOnDemandLayer.h - Compile each function on demand -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// JIT layer for breaking up modules and inserting callbacks to allow
// individual functions to be compiled on demand.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_ORC_COMPILEONDEMANDLAYER_H
#define LLVM_EXECUTIONENGINE_ORC_COMPILEONDEMANDLAYER_H

#include "llvm/ADT/APInt.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ExecutionEngine/JITSymbol.h"
#include "llvm/ExecutionEngine/Orc/IndirectionUtils.h"
#include "llvm/ExecutionEngine/Orc/Layer.h"
#include "llvm/ExecutionEngine/Orc/LazyReexports.h"
#include "llvm/ExecutionEngine/Orc/Shared/OrcError.h"
#include "llvm/ExecutionEngine/Orc/Speculation.h"
#include "llvm/ExecutionEngine/RuntimeDyld.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalAlias.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Mangler.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Type.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Utils/ValueMapper.h"
#include <algorithm>
#include <cassert>
#include <functional>
#include <iterator>
#include <list>
#include <memory>
#include <optional>
#include <set>
#include <utility>
#include <vector>

namespace llvm {
namespace orc {

class CompileOnDemandLayer : public IRLayer {
  friend class PartitioningIRMaterializationUnit;

public:
  /// Builder for IndirectStubsManagers.
  using IndirectStubsManagerBuilder =
      std::function<std::unique_ptr<IndirectStubsManager>()>;

  using GlobalValueSet = std::set<const GlobalValue *>;

  /// Partitioning function.
  using PartitionFunction =
      std::function<std::optional<GlobalValueSet>(GlobalValueSet Requested)>;

  /// Off-the-shelf partitioning which compiles all requested symbols (usually
  /// a single function at a time).
  static std::optional<GlobalValueSet>
  compileRequested(GlobalValueSet Requested);

  /// Off-the-shelf partitioning which compiles whole modules whenever any
  /// symbol in them is requested.
  static std::optional<GlobalValueSet>
  compileWholeModule(GlobalValueSet Requested);

  /// Construct a CompileOnDemandLayer.
  CompileOnDemandLayer(ExecutionSession &ES, IRLayer &BaseLayer,
                        LazyCallThroughManager &LCTMgr,
                        IndirectStubsManagerBuilder BuildIndirectStubsManager);

  /// Sets the partition function.
  void setPartitionFunction(PartitionFunction Partition);

  /// Sets the ImplSymbolMap
  void setImplMap(ImplSymbolMap *Imp);

  /// Emits the given module. This should not be called by clients: it will be
  /// called by the JIT when a definition added via the add method is requested.
  void emit(std::unique_ptr<MaterializationResponsibility> R,
            ThreadSafeModule TSM) override;

private:
  struct PerDylibResources {
  public:
    PerDylibResources(JITDylib &ImplD,
                      std::unique_ptr<IndirectStubsManager> ISMgr)
        : ImplD(ImplD), ISMgr(std::move(ISMgr)) {}
    JITDylib &getImplDylib() { return ImplD; }
    IndirectStubsManager &getISManager() { return *ISMgr; }

  private:
    JITDylib &ImplD;
    std::unique_ptr<IndirectStubsManager> ISMgr;
  };

  using PerDylibResourcesMap = std::map<const JITDylib *, PerDylibResources>;

  PerDylibResources &getPerDylibResources(JITDylib &TargetD);

  void cleanUpModule(Module &M);

  void expandPartition(GlobalValueSet &Partition);

  void emitPartition(std::unique_ptr<MaterializationResponsibility> R,
                     ThreadSafeModule TSM,
                     IRMaterializationUnit::SymbolNameToDefinitionMap Defs);

  mutable std::mutex CODLayerMutex;

  IRLayer &BaseLayer;
  LazyCallThroughManager &LCTMgr;
  IndirectStubsManagerBuilder BuildIndirectStubsManager;
  PerDylibResourcesMap DylibResources;
  PartitionFunction Partition = compileRequested;
  SymbolLinkagePromoter PromoteSymbols;
  ImplSymbolMap *AliaseeImpls = nullptr;
};

} // end namespace orc
} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_ORC_COMPILEONDEMANDLAYER_H
PKiwFZ-�u��S�SExecutionEngine/Orc/LLJIT.hnu�[���//===----- LLJIT.h -- An ORC-based JIT for compiling LLVM IR ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// An ORC-based JIT for compiling LLVM IR.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_ORC_LLJIT_H
#define LLVM_EXECUTIONENGINE_ORC_LLJIT_H

#include "llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h"
#include "llvm/ExecutionEngine/Orc/CompileUtils.h"
#include "llvm/ExecutionEngine/Orc/ExecutionUtils.h"
#include "llvm/ExecutionEngine/Orc/IRCompileLayer.h"
#include "llvm/ExecutionEngine/Orc/IRTransformLayer.h"
#include "llvm/ExecutionEngine/Orc/JITTargetMachineBuilder.h"
#include "llvm/ExecutionEngine/Orc/ThreadSafeModule.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ThreadPool.h"
#include <variant>

namespace llvm {
namespace orc {

class LLJITBuilderState;
class LLLazyJITBuilderState;
class ObjectTransformLayer;
class ExecutorProcessControl;

/// A pre-fabricated ORC JIT stack that can serve as an alternative to MCJIT.
///
/// Create instances using LLJITBuilder.
class LLJIT {
  template <typename, typename, typename> friend class LLJITBuilderSetters;

  friend Expected<JITDylibSP> setUpGenericLLVMIRPlatform(LLJIT &J);

public:
  /// Initializer support for LLJIT.
  class PlatformSupport {
  public:
    virtual ~PlatformSupport();

    virtual Error initialize(JITDylib &JD) = 0;

    virtual Error deinitialize(JITDylib &JD) = 0;

  protected:
    static void setInitTransform(LLJIT &J,
                                 IRTransformLayer::TransformFunction T);
  };

  /// Destruct this instance. If a multi-threaded instance, waits for all
  /// compile threads to complete.
  virtual ~LLJIT();

  /// Returns the ExecutionSession for this instance.
  ExecutionSession &getExecutionSession() { return *ES; }

  /// Returns a reference to the triple for this instance.
  const Triple &getTargetTriple() const { return TT; }

  /// Returns a reference to the DataLayout for this instance.
  const DataLayout &getDataLayout() const { return DL; }

  /// Returns a reference to the JITDylib representing the JIT'd main program.
  JITDylib &getMainJITDylib() { return *Main; }

  /// Returns the ProcessSymbols JITDylib, which by default reflects non-JIT'd
  /// symbols in the host process.
  ///
  /// Note: JIT'd code should not be added to the ProcessSymbols JITDylib. Use
  /// the main JITDylib or a custom JITDylib instead.
  JITDylibSP getProcessSymbolsJITDylib();

  /// Returns the Platform JITDylib, which will contain the ORC runtime (if
  /// given) and any platform symbols.
  ///
  /// Note: JIT'd code should not be added to the Platform JITDylib. Use the
  /// main JITDylib or a custom JITDylib instead.
  JITDylibSP getPlatformJITDylib();

  /// Returns the JITDylib with the given name, or nullptr if no JITDylib with
  /// that name exists.
  JITDylib *getJITDylibByName(StringRef Name) {
    return ES->getJITDylibByName(Name);
  }

  /// Load a (real) dynamic library and make its symbols available through a
  /// new JITDylib with the same name.
  ///
  /// If the given *executor* path contains a valid platform dynamic library
  /// then that library will be loaded, and a new bare JITDylib whose name is
  /// the given path will be created to make the library's symbols available to
  /// JIT'd code.
  Expected<JITDylib &> loadPlatformDynamicLibrary(const char *Path);

  /// Link a static library into the given JITDylib.
  ///
  /// If the given MemoryBuffer contains a valid static archive (or a universal
  /// binary with an archive slice that fits the LLJIT instance's platform /
  /// architecture) then it will be added to the given JITDylib using a
  /// StaticLibraryDefinitionGenerator.
  Error linkStaticLibraryInto(JITDylib &JD,
                              std::unique_ptr<MemoryBuffer> LibBuffer);

  /// Link a static library into the given JITDylib.
  ///
  /// If the given *host* path contains a valid static archive (or a universal
  /// binary with an archive slice that fits the LLJIT instance's platform /
  /// architecture) then it will be added to the given JITDylib using a
  /// StaticLibraryDefinitionGenerator.
  Error linkStaticLibraryInto(JITDylib &JD, const char *Path);

  /// Create a new JITDylib with the given name and return a reference to it.
  ///
  /// JITDylib names must be unique. If the given name is derived from user
  /// input or elsewhere in the environment then the client should check
  /// (e.g. by calling getJITDylibByName) that the given name is not already in
  /// use.
  Expected<JITDylib &> createJITDylib(std::string Name);

  /// Returns the default link order for this LLJIT instance. This link order
  /// will be appended to the link order of JITDylibs created by LLJIT's
  /// createJITDylib method.
  JITDylibSearchOrder defaultLinkOrder() { return DefaultLinks; }

  /// Adds an IR module with the given ResourceTracker.
  Error addIRModule(ResourceTrackerSP RT, ThreadSafeModule TSM);

  /// Adds an IR module to the given JITDylib.
  Error addIRModule(JITDylib &JD, ThreadSafeModule TSM);

  /// Adds an IR module to the Main JITDylib.
  Error addIRModule(ThreadSafeModule TSM) {
    return addIRModule(*Main, std::move(TSM));
  }

  /// Adds an object file to the given JITDylib.
  Error addObjectFile(ResourceTrackerSP RT, std::unique_ptr<MemoryBuffer> Obj);

  /// Adds an object file to the given JITDylib.
  Error addObjectFile(JITDylib &JD, std::unique_ptr<MemoryBuffer> Obj);

  /// Adds an object file to the given JITDylib.
  Error addObjectFile(std::unique_ptr<MemoryBuffer> Obj) {
    return addObjectFile(*Main, std::move(Obj));
  }

  /// Look up a symbol in JITDylib JD by the symbol's linker-mangled name (to
  /// look up symbols based on their IR name use the lookup function instead).
  Expected<ExecutorAddr> lookupLinkerMangled(JITDylib &JD,
                                             SymbolStringPtr Name);

  /// Look up a symbol in JITDylib JD by the symbol's linker-mangled name (to
  /// look up symbols based on their IR name use the lookup function instead).
  Expected<ExecutorAddr> lookupLinkerMangled(JITDylib &JD,
                                             StringRef Name) {
    return lookupLinkerMangled(JD, ES->intern(Name));
  }

  /// Look up a symbol in the main JITDylib by the symbol's linker-mangled name
  /// (to look up symbols based on their IR name use the lookup function
  /// instead).
  Expected<ExecutorAddr> lookupLinkerMangled(StringRef Name) {
    return lookupLinkerMangled(*Main, Name);
  }

  /// Look up a symbol in JITDylib JD based on its IR symbol name.
  Expected<ExecutorAddr> lookup(JITDylib &JD, StringRef UnmangledName) {
    return lookupLinkerMangled(JD, mangle(UnmangledName));
  }

  /// Look up a symbol in the main JITDylib based on its IR symbol name.
  Expected<ExecutorAddr> lookup(StringRef UnmangledName) {
    return lookup(*Main, UnmangledName);
  }

  /// Set the PlatformSupport instance.
  void setPlatformSupport(std::unique_ptr<PlatformSupport> PS) {
    this->PS = std::move(PS);
  }

  /// Get the PlatformSupport instance.
  PlatformSupport *getPlatformSupport() { return PS.get(); }

  /// Run the initializers for the given JITDylib.
  Error initialize(JITDylib &JD) {
    DEBUG_WITH_TYPE("orc", {
      dbgs() << "LLJIT running initializers for JITDylib \"" << JD.getName()
             << "\"\n";
    });
    assert(PS && "PlatformSupport must be set to run initializers.");
    return PS->initialize(JD);
  }

  /// Run the deinitializers for the given JITDylib.
  Error deinitialize(JITDylib &JD) {
    DEBUG_WITH_TYPE("orc", {
      dbgs() << "LLJIT running deinitializers for JITDylib \"" << JD.getName()
             << "\"\n";
    });
    assert(PS && "PlatformSupport must be set to run initializers.");
    return PS->deinitialize(JD);
  }

  /// Returns a reference to the ObjLinkingLayer
  ObjectLayer &getObjLinkingLayer() { return *ObjLinkingLayer; }

  /// Returns a reference to the object transform layer.
  ObjectTransformLayer &getObjTransformLayer() { return *ObjTransformLayer; }

  /// Returns a reference to the IR transform layer.
  IRTransformLayer &getIRTransformLayer() { return *TransformLayer; }

  /// Returns a reference to the IR compile layer.
  IRCompileLayer &getIRCompileLayer() { return *CompileLayer; }

  /// Returns a linker-mangled version of UnmangledName.
  std::string mangle(StringRef UnmangledName) const;

  /// Returns an interned, linker-mangled version of UnmangledName.
  SymbolStringPtr mangleAndIntern(StringRef UnmangledName) const {
    return ES->intern(mangle(UnmangledName));
  }

protected:
  static Expected<std::unique_ptr<ObjectLayer>>
  createObjectLinkingLayer(LLJITBuilderState &S, ExecutionSession &ES);

  static Expected<std::unique_ptr<IRCompileLayer::IRCompiler>>
  createCompileFunction(LLJITBuilderState &S, JITTargetMachineBuilder JTMB);

  /// Create an LLJIT instance with a single compile thread.
  LLJIT(LLJITBuilderState &S, Error &Err);

  Error applyDataLayout(Module &M);

  void recordCtorDtors(Module &M);

  std::unique_ptr<ExecutionSession> ES;
  std::unique_ptr<PlatformSupport> PS;

  JITDylib *ProcessSymbols = nullptr;
  JITDylib *Platform = nullptr;
  JITDylib *Main = nullptr;

  JITDylibSearchOrder DefaultLinks;

  DataLayout DL;
  Triple TT;
  std::unique_ptr<ThreadPool> CompileThreads;

  std::unique_ptr<ObjectLayer> ObjLinkingLayer;
  std::unique_ptr<ObjectTransformLayer> ObjTransformLayer;
  std::unique_ptr<IRCompileLayer> CompileLayer;
  std::unique_ptr<IRTransformLayer> TransformLayer;
  std::unique_ptr<IRTransformLayer> InitHelperTransformLayer;
};

/// An extended version of LLJIT that supports lazy function-at-a-time
/// compilation of LLVM IR.
class LLLazyJIT : public LLJIT {
  template <typename, typename, typename> friend class LLJITBuilderSetters;

public:

  /// Sets the partition function.
  void
  setPartitionFunction(CompileOnDemandLayer::PartitionFunction Partition) {
    CODLayer->setPartitionFunction(std::move(Partition));
  }

  /// Returns a reference to the on-demand layer.
  CompileOnDemandLayer &getCompileOnDemandLayer() { return *CODLayer; }

  /// Add a module to be lazily compiled to JITDylib JD.
  Error addLazyIRModule(JITDylib &JD, ThreadSafeModule M);

  /// Add a module to be lazily compiled to the main JITDylib.
  Error addLazyIRModule(ThreadSafeModule M) {
    return addLazyIRModule(*Main, std::move(M));
  }

private:

  // Create a single-threaded LLLazyJIT instance.
  LLLazyJIT(LLLazyJITBuilderState &S, Error &Err);

  std::unique_ptr<LazyCallThroughManager> LCTMgr;
  std::unique_ptr<CompileOnDemandLayer> CODLayer;
};

class LLJITBuilderState {
public:
  using ObjectLinkingLayerCreator =
      std::function<Expected<std::unique_ptr<ObjectLayer>>(ExecutionSession &,
                                                           const Triple &)>;

  using CompileFunctionCreator =
      std::function<Expected<std::unique_ptr<IRCompileLayer::IRCompiler>>(
          JITTargetMachineBuilder JTMB)>;

  using ProcessSymbolsJITDylibSetupFunction =
      std::function<Error(JITDylib &JD)>;

  using PlatformSetupFunction = unique_function<Expected<JITDylibSP>(LLJIT &J)>;

  std::unique_ptr<ExecutorProcessControl> EPC;
  std::unique_ptr<ExecutionSession> ES;
  std::optional<JITTargetMachineBuilder> JTMB;
  std::optional<DataLayout> DL;
  bool LinkProcessSymbolsByDefault = true;
  ProcessSymbolsJITDylibSetupFunction SetupProcessSymbolsJITDylib;
  ObjectLinkingLayerCreator CreateObjectLinkingLayer;
  CompileFunctionCreator CreateCompileFunction;
  PlatformSetupFunction SetUpPlatform;
  unsigned NumCompileThreads = 0;
  bool EnableDebuggerSupport = false;

  /// Called prior to JIT class construcion to fix up defaults.
  Error prepareForConstruction();
};

template <typename JITType, typename SetterImpl, typename State>
class LLJITBuilderSetters {
public:
  /// Set a ExecutorProcessControl for this instance.
  /// This should not be called if ExecutionSession has already been set.
  SetterImpl &
  setExecutorProcessControl(std::unique_ptr<ExecutorProcessControl> EPC) {
    assert(
        !impl().ES &&
        "setExecutorProcessControl should not be called if an ExecutionSession "
        "has already been set");
    impl().EPC = std::move(EPC);
    return impl();
  }

  /// Set an ExecutionSession for this instance.
  SetterImpl &setExecutionSession(std::unique_ptr<ExecutionSession> ES) {
    assert(
        !impl().EPC &&
        "setExecutionSession should not be called if an ExecutorProcessControl "
        "object has already been set");
    impl().ES = std::move(ES);
    return impl();
  }

  /// Set the JITTargetMachineBuilder for this instance.
  ///
  /// If this method is not called, JITTargetMachineBuilder::detectHost will be
  /// used to construct a default target machine builder for the host platform.
  SetterImpl &setJITTargetMachineBuilder(JITTargetMachineBuilder JTMB) {
    impl().JTMB = std::move(JTMB);
    return impl();
  }

  /// Return a reference to the JITTargetMachineBuilder.
  ///
  std::optional<JITTargetMachineBuilder> &getJITTargetMachineBuilder() {
    return impl().JTMB;
  }

  /// Set a DataLayout for this instance. If no data layout is specified then
  /// the target's default data layout will be used.
  SetterImpl &setDataLayout(std::optional<DataLayout> DL) {
    impl().DL = std::move(DL);
    return impl();
  }

  /// The LinkProcessSymbolsDyDefault flag determines whether the "Process"
  /// JITDylib will be added to the default link order at LLJIT construction
  /// time. If true, the Process JITDylib will be added as the last item in the
  /// default link order. If false (or if the Process JITDylib is disabled via
  /// setProcessSymbolsJITDylibSetup) then the Process JITDylib will not appear
  /// in the default link order.
  SetterImpl &setLinkProcessSymbolsByDefault(bool LinkProcessSymbolsByDefault) {
    impl().LinkProcessSymbolsByDefault = LinkProcessSymbolsByDefault;
    return impl();
  }

  /// Set a setup function for the process symbols dylib. If not provided,
  /// but LinkProcessSymbolsJITDylibByDefault is true, then the process-symbols
  /// JITDylib will be configured with a DynamicLibrarySearchGenerator with a
  /// default symbol filter.
  SetterImpl &setProcessSymbolsJITDylibSetup(
      LLJITBuilderState::ProcessSymbolsJITDylibSetupFunction
          SetupProcessSymbolsJITDylib) {
    impl().SetupProcessSymbolsJITDylib = std::move(SetupProcessSymbolsJITDylib);
    return impl();
  }

  /// Set an ObjectLinkingLayer creation function.
  ///
  /// If this method is not called, a default creation function will be used
  /// that will construct an RTDyldObjectLinkingLayer.
  SetterImpl &setObjectLinkingLayerCreator(
      LLJITBuilderState::ObjectLinkingLayerCreator CreateObjectLinkingLayer) {
    impl().CreateObjectLinkingLayer = std::move(CreateObjectLinkingLayer);
    return impl();
  }

  /// Set a CompileFunctionCreator.
  ///
  /// If this method is not called, a default creation function wil be used
  /// that will construct a basic IR compile function that is compatible with
  /// the selected number of threads (SimpleCompiler for '0' compile threads,
  /// ConcurrentIRCompiler otherwise).
  SetterImpl &setCompileFunctionCreator(
      LLJITBuilderState::CompileFunctionCreator CreateCompileFunction) {
    impl().CreateCompileFunction = std::move(CreateCompileFunction);
    return impl();
  }

  /// Set up an PlatformSetupFunction.
  ///
  /// If this method is not called then setUpGenericLLVMIRPlatform
  /// will be used to configure the JIT's platform support.
  SetterImpl &
  setPlatformSetUp(LLJITBuilderState::PlatformSetupFunction SetUpPlatform) {
    impl().SetUpPlatform = std::move(SetUpPlatform);
    return impl();
  }

  /// Set the number of compile threads to use.
  ///
  /// If set to zero, compilation will be performed on the execution thread when
  /// JITing in-process. If set to any other number N, a thread pool of N
  /// threads will be created for compilation.
  ///
  /// If this method is not called, behavior will be as if it were called with
  /// a zero argument.
  SetterImpl &setNumCompileThreads(unsigned NumCompileThreads) {
    impl().NumCompileThreads = NumCompileThreads;
    return impl();
  }

  /// Enable / disable debugger support (off by default).
  SetterImpl &setEnableDebuggerSupport(bool EnableDebuggerSupport) {
    impl().EnableDebuggerSupport = EnableDebuggerSupport;
    return impl();
  }

  /// Set an ExecutorProcessControl object.
  ///
  /// If the platform uses ObjectLinkingLayer by default and no
  /// ObjectLinkingLayerCreator has been set then the ExecutorProcessControl
  /// object will be used to supply the memory manager for the
  /// ObjectLinkingLayer.
  SetterImpl &setExecutorProcessControl(ExecutorProcessControl &EPC) {
    impl().EPC = &EPC;
    return impl();
  }

  /// Create an instance of the JIT.
  Expected<std::unique_ptr<JITType>> create() {
    if (auto Err = impl().prepareForConstruction())
      return std::move(Err);

    Error Err = Error::success();
    std::unique_ptr<JITType> J(new JITType(impl(), Err));
    if (Err)
      return std::move(Err);
    return std::move(J);
  }

protected:
  SetterImpl &impl() { return static_cast<SetterImpl &>(*this); }
};

/// Constructs LLJIT instances.
class LLJITBuilder
    : public LLJITBuilderState,
      public LLJITBuilderSetters<LLJIT, LLJITBuilder, LLJITBuilderState> {};

class LLLazyJITBuilderState : public LLJITBuilderState {
  friend class LLLazyJIT;

public:
  using IndirectStubsManagerBuilderFunction =
      std::function<std::unique_ptr<IndirectStubsManager>()>;

  Triple TT;
  ExecutorAddr LazyCompileFailureAddr;
  std::unique_ptr<LazyCallThroughManager> LCTMgr;
  IndirectStubsManagerBuilderFunction ISMBuilder;

  Error prepareForConstruction();
};

template <typename JITType, typename SetterImpl, typename State>
class LLLazyJITBuilderSetters
    : public LLJITBuilderSetters<JITType, SetterImpl, State> {
public:
  /// Set the address in the target address to call if a lazy compile fails.
  ///
  /// If this method is not called then the value will default to 0.
  SetterImpl &setLazyCompileFailureAddr(ExecutorAddr Addr) {
    this->impl().LazyCompileFailureAddr = Addr;
    return this->impl();
  }

  /// Set the lazy-callthrough manager.
  ///
  /// If this method is not called then a default, in-process lazy callthrough
  /// manager for the host platform will be used.
  SetterImpl &
  setLazyCallthroughManager(std::unique_ptr<LazyCallThroughManager> LCTMgr) {
    this->impl().LCTMgr = std::move(LCTMgr);
    return this->impl();
  }

  /// Set the IndirectStubsManager builder function.
  ///
  /// If this method is not called then a default, in-process
  /// IndirectStubsManager builder for the host platform will be used.
  SetterImpl &setIndirectStubsManagerBuilder(
      LLLazyJITBuilderState::IndirectStubsManagerBuilderFunction ISMBuilder) {
    this->impl().ISMBuilder = std::move(ISMBuilder);
    return this->impl();
  }
};

/// Constructs LLLazyJIT instances.
class LLLazyJITBuilder
    : public LLLazyJITBuilderState,
      public LLLazyJITBuilderSetters<LLLazyJIT, LLLazyJITBuilder,
                                     LLLazyJITBuilderState> {};

/// Configure the LLJIT instance to use orc runtime support. This overload
/// assumes that the client has manually configured a Platform object.
Error setUpOrcPlatformManually(LLJIT &J);

/// Configure the LLJIT instance to use the ORC runtime and the detected
/// native target for the executor.
class ExecutorNativePlatform {
public:
  /// Set up using path to Orc runtime.
  ExecutorNativePlatform(std::string OrcRuntimePath)
      : OrcRuntime(std::move(OrcRuntimePath)) {}

  /// Set up using the given memory buffer.
  ExecutorNativePlatform(std::unique_ptr<MemoryBuffer> OrcRuntimeMB)
      : OrcRuntime(std::move(OrcRuntimeMB)) {}

  // TODO: add compiler-rt.

  /// Add a path to the VC runtime.
  ExecutorNativePlatform &addVCRuntime(std::string VCRuntimePath,
                                       bool StaticVCRuntime) {
    VCRuntime = {std::move(VCRuntimePath), StaticVCRuntime};
    return *this;
  }

  Expected<JITDylibSP> operator()(LLJIT &J);

private:
  std::variant<std::string, std::unique_ptr<MemoryBuffer>> OrcRuntime;
  std::optional<std::pair<std::string, bool>> VCRuntime;
};

/// Configure the LLJIT instance to scrape modules for llvm.global_ctors and
/// llvm.global_dtors variables and (if present) build initialization and
/// deinitialization functions. Platform specific initialization configurations
/// should be preferred where available.
Expected<JITDylibSP> setUpGenericLLVMIRPlatform(LLJIT &J);

/// Configure the LLJIT instance to disable platform support explicitly. This is
/// useful in two cases: for platforms that don't have such requirements and for
/// platforms, that we have no explicit support yet and that don't work well
/// with the generic IR platform.
Expected<JITDylibSP> setUpInactivePlatform(LLJIT &J);

} // End namespace orc
} // End namespace llvm

#endif // LLVM_EXECUTIONENGINE_ORC_LLJIT_H
PKiwFZ�]��)ExecutionEngine/Orc/ObjectFileInterface.hnu�[���//===-- ObjectFileInterface.h - MU interface utils for objects --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Utilities for building MaterializationUnit::Interface objects from
// object files.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_ORC_OBJECTFILEINTERFACE_H
#define LLVM_EXECUTIONENGINE_ORC_OBJECTFILEINTERFACE_H

#include "llvm/ExecutionEngine/Orc/Core.h"
#include "llvm/Support/MemoryBuffer.h"

namespace llvm {
namespace orc {

/// Adds an initializer symbol to the given MU interface.
/// The init symbol's name is guaranteed to be unique within I, and will be of
/// the form $.<ObjFileName>.__inits.<N>, where N is some integer.
void addInitSymbol(MaterializationUnit::Interface &I, ExecutionSession &ES,
                   StringRef ObjFileName);

/// Returns a MaterializationUnit::Interface for the object file contained in
/// the given buffer, or an error if the buffer does not contain a valid object
/// file.
Expected<MaterializationUnit::Interface>
getObjectFileInterface(ExecutionSession &ES, MemoryBufferRef ObjBuffer);

} // End namespace orc
} // End namespace llvm

#endif // LLVM_EXECUTIONENGINE_ORC_OBJECTFILEINTERFACE_H
PKiwFZ�H��4�4$ExecutionEngine/Orc/ExecutionUtils.hnu�[���//===- ExecutionUtils.h - Utilities for executing code in Orc ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Contains utilities for executing code in Orc.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_ORC_EXECUTIONUTILS_H
#define LLVM_EXECUTIONENGINE_ORC_EXECUTIONUTILS_H

#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/ExecutionEngine/JITSymbol.h"
#include "llvm/ExecutionEngine/Orc/Core.h"
#include "llvm/ExecutionEngine/Orc/Mangling.h"
#include "llvm/ExecutionEngine/Orc/ObjectLinkingLayer.h"
#include "llvm/ExecutionEngine/Orc/Shared/OrcError.h"
#include "llvm/ExecutionEngine/RuntimeDyld.h"
#include "llvm/Object/Archive.h"
#include "llvm/Support/DynamicLibrary.h"
#include <algorithm>
#include <cstdint>
#include <utility>
#include <vector>

namespace llvm {

class ConstantArray;
class GlobalVariable;
class Function;
class Module;
class Value;

namespace object {
class MachOUniversalBinary;
}

namespace orc {

class ObjectLayer;

/// This iterator provides a convenient way to iterate over the elements
///        of an llvm.global_ctors/llvm.global_dtors instance.
///
///   The easiest way to get hold of instances of this class is to use the
/// getConstructors/getDestructors functions.
class CtorDtorIterator {
public:
  /// Accessor for an element of the global_ctors/global_dtors array.
  ///
  ///   This class provides a read-only view of the element with any casts on
  /// the function stripped away.
  struct Element {
    Element(unsigned Priority, Function *Func, Value *Data)
      : Priority(Priority), Func(Func), Data(Data) {}

    unsigned Priority;
    Function *Func;
    Value *Data;
  };

  /// Construct an iterator instance. If End is true then this iterator
  ///        acts as the end of the range, otherwise it is the beginning.
  CtorDtorIterator(const GlobalVariable *GV, bool End);

  /// Test iterators for equality.
  bool operator==(const CtorDtorIterator &Other) const;

  /// Test iterators for inequality.
  bool operator!=(const CtorDtorIterator &Other) const;

  /// Pre-increment iterator.
  CtorDtorIterator& operator++();

  /// Post-increment iterator.
  CtorDtorIterator operator++(int);

  /// Dereference iterator. The resulting value provides a read-only view
  ///        of this element of the global_ctors/global_dtors list.
  Element operator*() const;

private:
  const ConstantArray *InitList;
  unsigned I;
};

/// Create an iterator range over the entries of the llvm.global_ctors
///        array.
iterator_range<CtorDtorIterator> getConstructors(const Module &M);

/// Create an iterator range over the entries of the llvm.global_ctors
///        array.
iterator_range<CtorDtorIterator> getDestructors(const Module &M);

/// This iterator provides a convenient way to iterate over GlobalValues that
/// have initialization effects.
class StaticInitGVIterator {
public:
  StaticInitGVIterator() = default;

  StaticInitGVIterator(Module &M)
      : I(M.global_values().begin()), E(M.global_values().end()),
        ObjFmt(Triple(M.getTargetTriple()).getObjectFormat()) {
    if (I != E) {
      if (!isStaticInitGlobal(*I))
        moveToNextStaticInitGlobal();
    } else
      I = E = Module::global_value_iterator();
  }

  bool operator==(const StaticInitGVIterator &O) const { return I == O.I; }
  bool operator!=(const StaticInitGVIterator &O) const { return I != O.I; }

  StaticInitGVIterator &operator++() {
    assert(I != E && "Increment past end of range");
    moveToNextStaticInitGlobal();
    return *this;
  }

  GlobalValue &operator*() { return *I; }

private:
  bool isStaticInitGlobal(GlobalValue &GV);
  void moveToNextStaticInitGlobal() {
    ++I;
    while (I != E && !isStaticInitGlobal(*I))
      ++I;
    if (I == E)
      I = E = Module::global_value_iterator();
  }

  Module::global_value_iterator I, E;
  Triple::ObjectFormatType ObjFmt;
};

/// Create an iterator range over the GlobalValues that contribute to static
/// initialization.
inline iterator_range<StaticInitGVIterator> getStaticInitGVs(Module &M) {
  return make_range(StaticInitGVIterator(M), StaticInitGVIterator());
}

class CtorDtorRunner {
public:
  CtorDtorRunner(JITDylib &JD) : JD(JD) {}
  void add(iterator_range<CtorDtorIterator> CtorDtors);
  Error run();

private:
  using CtorDtorList = std::vector<SymbolStringPtr>;
  using CtorDtorPriorityMap = std::map<unsigned, CtorDtorList>;

  JITDylib &JD;
  CtorDtorPriorityMap CtorDtorsByPriority;
};

/// Support class for static dtor execution. For hosted (in-process) JITs
///        only!
///
///   If a __cxa_atexit function isn't found C++ programs that use static
/// destructors will fail to link. However, we don't want to use the host
/// process's __cxa_atexit, because it will schedule JIT'd destructors to run
/// after the JIT has been torn down, which is no good. This class makes it easy
/// to override __cxa_atexit (and the related __dso_handle).
///
///   To use, clients should manually call searchOverrides from their symbol
/// resolver. This should generally be done after attempting symbol resolution
/// inside the JIT, but before searching the host process's symbol table. When
/// the client determines that destructors should be run (generally at JIT
/// teardown or after a return from main), the runDestructors method should be
/// called.
class LocalCXXRuntimeOverridesBase {
public:
  /// Run any destructors recorded by the overriden __cxa_atexit function
  /// (CXAAtExitOverride).
  void runDestructors();

protected:
  using DestructorPtr = void (*)(void *);
  using CXXDestructorDataPair = std::pair<DestructorPtr, void *>;
  using CXXDestructorDataPairList = std::vector<CXXDestructorDataPair>;
  CXXDestructorDataPairList DSOHandleOverride;
  static int CXAAtExitOverride(DestructorPtr Destructor, void *Arg,
                               void *DSOHandle);
};

class LocalCXXRuntimeOverrides : public LocalCXXRuntimeOverridesBase {
public:
  Error enable(JITDylib &JD, MangleAndInterner &Mangler);
};

/// An interface for Itanium __cxa_atexit interposer implementations.
class ItaniumCXAAtExitSupport {
public:
  struct AtExitRecord {
    void (*F)(void *);
    void *Ctx;
  };

  void registerAtExit(void (*F)(void *), void *Ctx, void *DSOHandle);
  void runAtExits(void *DSOHandle);

private:
  std::mutex AtExitsMutex;
  DenseMap<void *, std::vector<AtExitRecord>> AtExitRecords;
};

/// A utility class to expose symbols found via dlsym to the JIT.
///
/// If an instance of this class is attached to a JITDylib as a fallback
/// definition generator, then any symbol found in the given DynamicLibrary that
/// passes the 'Allow' predicate will be added to the JITDylib.
class DynamicLibrarySearchGenerator : public DefinitionGenerator {
public:
  using SymbolPredicate = std::function<bool(const SymbolStringPtr &)>;

  /// Create a DynamicLibrarySearchGenerator that searches for symbols in the
  /// given sys::DynamicLibrary.
  ///
  /// If the Allow predicate is given then only symbols matching the predicate
  /// will be searched for. If the predicate is not given then all symbols will
  /// be searched for.
  DynamicLibrarySearchGenerator(sys::DynamicLibrary Dylib, char GlobalPrefix,
                                SymbolPredicate Allow = SymbolPredicate());

  /// Permanently loads the library at the given path and, on success, returns
  /// a DynamicLibrarySearchGenerator that will search it for symbol definitions
  /// in the library. On failure returns the reason the library failed to load.
  static Expected<std::unique_ptr<DynamicLibrarySearchGenerator>>
  Load(const char *FileName, char GlobalPrefix,
       SymbolPredicate Allow = SymbolPredicate());

  /// Creates a DynamicLibrarySearchGenerator that searches for symbols in
  /// the current process.
  static Expected<std::unique_ptr<DynamicLibrarySearchGenerator>>
  GetForCurrentProcess(char GlobalPrefix,
                       SymbolPredicate Allow = SymbolPredicate()) {
    return Load(nullptr, GlobalPrefix, std::move(Allow));
  }

  Error tryToGenerate(LookupState &LS, LookupKind K, JITDylib &JD,
                      JITDylibLookupFlags JDLookupFlags,
                      const SymbolLookupSet &Symbols) override;

private:
  sys::DynamicLibrary Dylib;
  SymbolPredicate Allow;
  char GlobalPrefix;
};

/// A utility class to expose symbols from a static library.
///
/// If an instance of this class is attached to a JITDylib as a fallback
/// definition generator, then any symbol found in the archive will result in
/// the containing object being added to the JITDylib.
class StaticLibraryDefinitionGenerator : public DefinitionGenerator {
public:
  // Interface builder function for objects loaded from this archive.
  using GetObjectFileInterface =
      unique_function<Expected<MaterializationUnit::Interface>(
          ExecutionSession &ES, MemoryBufferRef ObjBuffer)>;

  /// Try to create a StaticLibraryDefinitionGenerator from the given path.
  ///
  /// This call will succeed if the file at the given path is a static library
  /// or a MachO universal binary containing a static library that is compatible
  /// with the ExecutionSession's triple. Otherwise it will return an error.
  static Expected<std::unique_ptr<StaticLibraryDefinitionGenerator>>
  Load(ObjectLayer &L, const char *FileName,
       GetObjectFileInterface GetObjFileInterface = GetObjectFileInterface());

  /// Try to create a StaticLibrarySearchGenerator from the given memory buffer
  /// and Archive object.
  static Expected<std::unique_ptr<StaticLibraryDefinitionGenerator>>
  Create(ObjectLayer &L, std::unique_ptr<MemoryBuffer> ArchiveBuffer,
         std::unique_ptr<object::Archive> Archive,
         GetObjectFileInterface GetObjFileInterface = GetObjectFileInterface());

  /// Try to create a StaticLibrarySearchGenerator from the given memory buffer.
  /// This call will succeed if the buffer contains a valid archive, otherwise
  /// it will return an error.
  ///
  /// This call will succeed if the buffer contains a valid static library or a
  /// MachO universal binary containing a static library that is compatible
  /// with the ExecutionSession's triple. Otherwise it will return an error.
  static Expected<std::unique_ptr<StaticLibraryDefinitionGenerator>>
  Create(ObjectLayer &L, std::unique_ptr<MemoryBuffer> ArchiveBuffer,
         GetObjectFileInterface GetObjFileInterface = GetObjectFileInterface());

  /// Returns a list of filenames of dynamic libraries that this archive has
  /// imported. This class does not load these libraries by itself. User is
  /// responsible for making sure these libraries are avaliable to the JITDylib.
  const std::set<std::string> &getImportedDynamicLibraries() const {
    return ImportedDynamicLibraries;
  }

  Error tryToGenerate(LookupState &LS, LookupKind K, JITDylib &JD,
                      JITDylibLookupFlags JDLookupFlags,
                      const SymbolLookupSet &Symbols) override;

private:
  StaticLibraryDefinitionGenerator(ObjectLayer &L,
                                   std::unique_ptr<MemoryBuffer> ArchiveBuffer,
                                   std::unique_ptr<object::Archive> Archive,
                                   GetObjectFileInterface GetObjFileInterface,
                                   Error &Err);
  Error buildObjectFilesMap();

  static Expected<std::pair<size_t, size_t>>
  getSliceRangeForArch(object::MachOUniversalBinary &UB, const Triple &TT);

  ObjectLayer &L;
  GetObjectFileInterface GetObjFileInterface;
  std::set<std::string> ImportedDynamicLibraries;
  std::unique_ptr<MemoryBuffer> ArchiveBuffer;
  std::unique_ptr<object::Archive> Archive;
  DenseMap<SymbolStringPtr, MemoryBufferRef> ObjectFilesMap;
};

/// A utility class to create COFF dllimport GOT symbols (__imp_*) and PLT
/// stubs.
///
/// If an instance of this class is attached to a JITDylib as a fallback
/// definition generator, PLT stubs and dllimport __imp_ symbols will be
/// generated for external symbols found outside the given jitdylib. Currently
/// only supports x86_64 architecture.
class DLLImportDefinitionGenerator : public DefinitionGenerator {
public:
  /// Creates a DLLImportDefinitionGenerator instance.
  static std::unique_ptr<DLLImportDefinitionGenerator>
  Create(ExecutionSession &ES, ObjectLinkingLayer &L);

  Error tryToGenerate(LookupState &LS, LookupKind K, JITDylib &JD,
                      JITDylibLookupFlags JDLookupFlags,
                      const SymbolLookupSet &Symbols) override;

private:
  DLLImportDefinitionGenerator(ExecutionSession &ES, ObjectLinkingLayer &L)
      : ES(ES), L(L) {}

  static Expected<unsigned> getTargetPointerSize(const Triple &TT);
  static Expected<support::endianness> getTargetEndianness(const Triple &TT);
  Expected<std::unique_ptr<jitlink::LinkGraph>>
  createStubsGraph(const SymbolMap &Resolved);

  static StringRef getImpPrefix() { return "__imp_"; }

  static StringRef getSectionName() { return "$__DLLIMPORT_STUBS"; }

  ExecutionSession &ES;
  ObjectLinkingLayer &L;
};

} // end namespace orc
} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_ORC_EXECUTIONUTILS_H
PKiwFZ'b���!�!)ExecutionEngine/Orc/EPCIndirectionUtils.hnu�[���//===--- EPCIndirectionUtils.h - EPC based indirection utils ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Indirection utilities (stubs, trampolines, lazy call-throughs) that use the
// ExecutorProcessControl API to interact with the executor process.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_ORC_EPCINDIRECTIONUTILS_H
#define LLVM_EXECUTIONENGINE_ORC_EPCINDIRECTIONUTILS_H

#include "llvm/ExecutionEngine/JITLink/JITLinkMemoryManager.h"
#include "llvm/ExecutionEngine/Orc/IndirectionUtils.h"
#include "llvm/ExecutionEngine/Orc/LazyReexports.h"

#include <mutex>

namespace llvm {
namespace orc {

class ExecutorProcessControl;

/// Provides ExecutorProcessControl based indirect stubs, trampoline pool and
/// lazy call through manager.
class EPCIndirectionUtils {
  friend class EPCIndirectionUtilsAccess;

public:
  /// ABI support base class. Used to write resolver, stub, and trampoline
  /// blocks.
  class ABISupport {
  protected:
    ABISupport(unsigned PointerSize, unsigned TrampolineSize, unsigned StubSize,
               unsigned StubToPointerMaxDisplacement, unsigned ResolverCodeSize)
        : PointerSize(PointerSize), TrampolineSize(TrampolineSize),
          StubSize(StubSize),
          StubToPointerMaxDisplacement(StubToPointerMaxDisplacement),
          ResolverCodeSize(ResolverCodeSize) {}

  public:
    virtual ~ABISupport();

    unsigned getPointerSize() const { return PointerSize; }
    unsigned getTrampolineSize() const { return TrampolineSize; }
    unsigned getStubSize() const { return StubSize; }
    unsigned getStubToPointerMaxDisplacement() const {
      return StubToPointerMaxDisplacement;
    }
    unsigned getResolverCodeSize() const { return ResolverCodeSize; }

    virtual void writeResolverCode(char *ResolverWorkingMem,
                                   ExecutorAddr ResolverTargetAddr,
                                   ExecutorAddr ReentryFnAddr,
                                   ExecutorAddr ReentryCtxAddr) const = 0;

    virtual void writeTrampolines(char *TrampolineBlockWorkingMem,
                                  ExecutorAddr TrampolineBlockTragetAddr,
                                  ExecutorAddr ResolverAddr,
                                  unsigned NumTrampolines) const = 0;

    virtual void writeIndirectStubsBlock(
        char *StubsBlockWorkingMem, ExecutorAddr StubsBlockTargetAddress,
        ExecutorAddr PointersBlockTargetAddress, unsigned NumStubs) const = 0;

  private:
    unsigned PointerSize = 0;
    unsigned TrampolineSize = 0;
    unsigned StubSize = 0;
    unsigned StubToPointerMaxDisplacement = 0;
    unsigned ResolverCodeSize = 0;
  };

  /// Create using the given ABI class.
  template <typename ORCABI>
  static std::unique_ptr<EPCIndirectionUtils>
  CreateWithABI(ExecutorProcessControl &EPC);

  /// Create based on the ExecutorProcessControl triple.
  static Expected<std::unique_ptr<EPCIndirectionUtils>>
  Create(ExecutorProcessControl &EPC);

  /// Create based on the ExecutorProcessControl triple.
  static Expected<std::unique_ptr<EPCIndirectionUtils>>
  Create(ExecutionSession &ES) {
    return Create(ES.getExecutorProcessControl());
  }

  /// Return a reference to the ExecutorProcessControl object.
  ExecutorProcessControl &getExecutorProcessControl() const { return EPC; }

  /// Return a reference to the ABISupport object for this instance.
  ABISupport &getABISupport() const { return *ABI; }

  /// Release memory for resources held by this instance. This *must* be called
  /// prior to destruction of the class.
  Error cleanup();

  /// Write resolver code to the executor process and return its address.
  /// This must be called before any call to createTrampolinePool or
  /// createLazyCallThroughManager.
  Expected<ExecutorAddr> writeResolverBlock(ExecutorAddr ReentryFnAddr,
                                            ExecutorAddr ReentryCtxAddr);

  /// Returns the address of the Resolver block. Returns zero if the
  /// writeResolverBlock method has not previously been called.
  ExecutorAddr getResolverBlockAddress() const { return ResolverBlockAddr; }

  /// Create an IndirectStubsManager for the executor process.
  std::unique_ptr<IndirectStubsManager> createIndirectStubsManager();

  /// Create a TrampolinePool for the executor process.
  TrampolinePool &getTrampolinePool();

  /// Create a LazyCallThroughManager.
  /// This function should only be called once.
  LazyCallThroughManager &
  createLazyCallThroughManager(ExecutionSession &ES,
                               ExecutorAddr ErrorHandlerAddr);

  /// Create a LazyCallThroughManager for the executor process.
  LazyCallThroughManager &getLazyCallThroughManager() {
    assert(LCTM && "createLazyCallThroughManager must be called first");
    return *LCTM;
  }

private:
  using FinalizedAlloc = jitlink::JITLinkMemoryManager::FinalizedAlloc;

  struct IndirectStubInfo {
    IndirectStubInfo() = default;
    IndirectStubInfo(ExecutorAddr StubAddress, ExecutorAddr PointerAddress)
        : StubAddress(StubAddress), PointerAddress(PointerAddress) {}
    ExecutorAddr StubAddress;
    ExecutorAddr PointerAddress;
  };

  using IndirectStubInfoVector = std::vector<IndirectStubInfo>;

  /// Create an EPCIndirectionUtils instance.
  EPCIndirectionUtils(ExecutorProcessControl &EPC,
                      std::unique_ptr<ABISupport> ABI);

  Expected<IndirectStubInfoVector> getIndirectStubs(unsigned NumStubs);

  std::mutex EPCUIMutex;
  ExecutorProcessControl &EPC;
  std::unique_ptr<ABISupport> ABI;
  ExecutorAddr ResolverBlockAddr;
  FinalizedAlloc ResolverBlock;
  std::unique_ptr<TrampolinePool> TP;
  std::unique_ptr<LazyCallThroughManager> LCTM;

  std::vector<IndirectStubInfo> AvailableIndirectStubs;
  std::vector<FinalizedAlloc> IndirectStubAllocs;
};

/// This will call writeResolver on the given EPCIndirectionUtils instance
/// to set up re-entry via a function that will directly return the trampoline
/// landing address.
///
/// The EPCIndirectionUtils' LazyCallThroughManager must have been previously
/// created via EPCIndirectionUtils::createLazyCallThroughManager.
///
/// The EPCIndirectionUtils' writeResolver method must not have been previously
/// called.
///
/// This function is experimental and likely subject to revision.
Error setUpInProcessLCTMReentryViaEPCIU(EPCIndirectionUtils &EPCIU);

namespace detail {

template <typename ORCABI>
class ABISupportImpl : public EPCIndirectionUtils::ABISupport {
public:
  ABISupportImpl()
      : ABISupport(ORCABI::PointerSize, ORCABI::TrampolineSize,
                   ORCABI::StubSize, ORCABI::StubToPointerMaxDisplacement,
                   ORCABI::ResolverCodeSize) {}

  void writeResolverCode(char *ResolverWorkingMem,
                         ExecutorAddr ResolverTargetAddr,
                         ExecutorAddr ReentryFnAddr,
                         ExecutorAddr ReentryCtxAddr) const override {
    ORCABI::writeResolverCode(ResolverWorkingMem, ResolverTargetAddr,
                              ReentryFnAddr, ReentryCtxAddr);
  }

  void writeTrampolines(char *TrampolineBlockWorkingMem,
                        ExecutorAddr TrampolineBlockTargetAddr,
                        ExecutorAddr ResolverAddr,
                        unsigned NumTrampolines) const override {
    ORCABI::writeTrampolines(TrampolineBlockWorkingMem,
                             TrampolineBlockTargetAddr, ResolverAddr,
                             NumTrampolines);
  }

  void writeIndirectStubsBlock(char *StubsBlockWorkingMem,
                               ExecutorAddr StubsBlockTargetAddress,
                               ExecutorAddr PointersBlockTargetAddress,
                               unsigned NumStubs) const override {
    ORCABI::writeIndirectStubsBlock(StubsBlockWorkingMem,
                                    StubsBlockTargetAddress,
                                    PointersBlockTargetAddress, NumStubs);
  }
};

} // end namespace detail

template <typename ORCABI>
std::unique_ptr<EPCIndirectionUtils>
EPCIndirectionUtils::CreateWithABI(ExecutorProcessControl &EPC) {
  return std::unique_ptr<EPCIndirectionUtils>(new EPCIndirectionUtils(
      EPC, std::make_unique<detail::ABISupportImpl<ORCABI>>()));
}

} // end namespace orc
} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_ORC_EPCINDIRECTIONUTILS_H
PKiwFZ|0�ڷ
�
6ExecutionEngine/Orc/EPCDynamicLibrarySearchGenerator.hnu�[���//===------------ EPCDynamicLibrarySearchGenerator.h ------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Support loading and searching of dynamic libraries in an executor process
// via the ExecutorProcessControl class.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_ORC_EPCDYNAMICLIBRARYSEARCHGENERATOR_H
#define LLVM_EXECUTIONENGINE_ORC_EPCDYNAMICLIBRARYSEARCHGENERATOR_H

#include "llvm/ADT/FunctionExtras.h"
#include "llvm/ExecutionEngine/Orc/Core.h"

namespace llvm {
namespace orc {

class ExecutorProcessControl;

class EPCDynamicLibrarySearchGenerator : public DefinitionGenerator {
public:
  using SymbolPredicate = unique_function<bool(const SymbolStringPtr &)>;

  /// Create a DynamicLibrarySearchGenerator that searches for symbols in the
  /// library with the given handle.
  ///
  /// If the Allow predicate is given then only symbols matching the predicate
  /// will be searched for. If the predicate is not given then all symbols will
  /// be searched for.
  EPCDynamicLibrarySearchGenerator(ExecutionSession &ES,
                                   tpctypes::DylibHandle H,
                                   SymbolPredicate Allow = SymbolPredicate())
      : EPC(ES.getExecutorProcessControl()), H(H), Allow(std::move(Allow)) {}

  /// Permanently loads the library at the given path and, on success, returns
  /// a DynamicLibrarySearchGenerator that will search it for symbol definitions
  /// in the library. On failure returns the reason the library failed to load.
  static Expected<std::unique_ptr<EPCDynamicLibrarySearchGenerator>>
  Load(ExecutionSession &ES, const char *LibraryPath,
       SymbolPredicate Allow = SymbolPredicate());

  /// Creates a EPCDynamicLibrarySearchGenerator that searches for symbols in
  /// the target process.
  static Expected<std::unique_ptr<EPCDynamicLibrarySearchGenerator>>
  GetForTargetProcess(ExecutionSession &ES,
                      SymbolPredicate Allow = SymbolPredicate()) {
    return Load(ES, nullptr, std::move(Allow));
  }

  Error tryToGenerate(LookupState &LS, LookupKind K, JITDylib &JD,
                      JITDylibLookupFlags JDLookupFlags,
                      const SymbolLookupSet &Symbols) override;

private:
  ExecutorProcessControl &EPC;
  tpctypes::DylibHandle H;
  SymbolPredicate Allow;
};

} // end namespace orc
} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_ORC_EPCDYNAMICLIBRARYSEARCHGENERATOR_H
PKiwFZe��
�
*ExecutionEngine/Orc/COFFVCRuntimeSupport.hnu�[���//===----- COFFVCRuntimeSupport.h -- VC runtime support in ORC --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Utilities for loading and initializaing vc runtime in Orc.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_ORC_COFFCRUNTIMESUPPORT_H
#define LLVM_EXECUTIONENGINE_ORC_COFFCRUNTIMESUPPORT_H

#include "llvm/ADT/StringRef.h"
#include "llvm/ExecutionEngine/Orc/Core.h"
#include "llvm/ExecutionEngine/Orc/ExecutorProcessControl.h"
#include "llvm/ExecutionEngine/Orc/ObjectLinkingLayer.h"
#include "llvm/ExecutionEngine/Orc/Shared/ExecutorAddress.h"

#include <future>
#include <memory>
#include <thread>
#include <vector>

namespace llvm {
namespace orc {

/// Bootstraps the vc runtime within jitdylibs.
class COFFVCRuntimeBootstrapper {
public:
  /// Try to create a COFFVCRuntimeBootstrapper instance. An optional
  /// RuntimePath can be given to specify the location of directory that
  /// contains all vc runtime library files such as ucrt.lib and msvcrt.lib. If
  /// not path was given, it will try to search the MSVC toolchain and Windows
  /// SDK installation and use the found library files automatically.
  ///
  /// Note that depending on the build setting, a different library
  /// file must be used. In general, if vc runtime was statically linked to the
  /// object file that is to be jit-linked, LoadStaticVCRuntime and
  /// InitializeStaticVCRuntime must be used with libcmt.lib, libucrt.lib,
  /// libvcruntimelib. If vc runtime was dynamically linked LoadDynamicVCRuntime
  /// must be used along with msvcrt.lib, ucrt.lib, vcruntime.lib.
  ///
  /// More information is on:
  /// https://docs.microsoft.com/en-us/cpp/c-runtime-library/crt-library-features
  static Expected<std::unique_ptr<COFFVCRuntimeBootstrapper>>
  Create(ExecutionSession &ES, ObjectLinkingLayer &ObjLinkingLayer,
         const char *RuntimePath = nullptr);

  /// Adds symbol definitions of static version of msvc runtime libraries.
  Expected<std::vector<std::string>>
  loadStaticVCRuntime(JITDylib &JD, bool DebugVersion = false);

  /// Runs the initializer of static version of msvc runtime libraries.
  /// This must be called before calling any functions requiring c runtime (e.g.
  /// printf) within the jit session. Note that proper initialization of vc
  /// runtime requires ability of running static initializers. Cosider setting
  /// up COFFPlatform.
  Error initializeStaticVCRuntime(JITDylib &JD);

  /// Adds symbol definitions of dynamic versino of msvc runtie libraries.
  Expected<std::vector<std::string>>
  loadDynamicVCRuntime(JITDylib &JD, bool DebugVersion = false);

private:
  COFFVCRuntimeBootstrapper(ExecutionSession &ES,
                            ObjectLinkingLayer &ObjLinkingLayer,
                            const char *RuntimePath);

  ExecutionSession &ES;
  ObjectLinkingLayer &ObjLinkingLayer;
  std::string RuntimePath;

  struct MSVCToolchainPath {
    SmallString<256> VCToolchainLib;
    SmallString<256> UCRTSdkLib;
  };

  static Expected<MSVCToolchainPath> getMSVCToolchainPath();
  Error loadVCRuntime(JITDylib &JD, std::vector<std::string> &ImportedLibraries,
                      ArrayRef<StringRef> VCLibs, ArrayRef<StringRef> UCRTLibs);
};

} // namespace orc
} // namespace llvm

#endif
PKiwFZ�P�\��-ExecutionEngine/Orc/JITTargetMachineBuilder.hnu�[���//===- JITTargetMachineBuilder.h - Build TargetMachines for JIT -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// A utitily for building TargetMachines for JITs.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_ORC_JITTARGETMACHINEBUILDER_H
#define LLVM_EXECUTIONENGINE_ORC_JITTARGETMACHINEBUILDER_H

#include "llvm/Support/CodeGen.h"
#include "llvm/Support/Error.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/TargetParser/SubtargetFeature.h"
#include "llvm/TargetParser/Triple.h"
#include <memory>
#include <optional>
#include <string>
#include <vector>

namespace llvm {

class raw_ostream;

namespace orc {

/// A utility class for building TargetMachines for JITs.
class JITTargetMachineBuilder {
#ifndef NDEBUG
  friend class JITTargetMachineBuilderPrinter;
#endif
public:
  /// Create a JITTargetMachineBuilder based on the given triple.
  ///
  /// Note: TargetOptions is default-constructed, then EmulatedTLS is set to
  /// true. If EmulatedTLS is not required, these values should be reset before
  /// calling createTargetMachine.
  JITTargetMachineBuilder(Triple TT);

  /// Create a JITTargetMachineBuilder for the host system.
  ///
  /// Note: TargetOptions is default-constructed, then EmulatedTLS is set to
  /// true. If EmulatedTLS is not required, these values should be reset before
  /// calling createTargetMachine.
  static Expected<JITTargetMachineBuilder> detectHost();

  /// Create a TargetMachine.
  ///
  /// This operation will fail if the requested target is not registered,
  /// in which case see llvm/Support/TargetSelect.h. To JIT IR the Target and
  /// the target's AsmPrinter must both be registered. To JIT assembly
  /// (including inline and module level assembly) the target's AsmParser must
  /// also be registered.
  Expected<std::unique_ptr<TargetMachine>> createTargetMachine();

  /// Get the default DataLayout for the target.
  ///
  /// Note: This is reasonably expensive, as it creates a temporary
  /// TargetMachine instance under the hood. It is only suitable for use during
  /// JIT setup.
  Expected<DataLayout> getDefaultDataLayoutForTarget() {
    auto TM = createTargetMachine();
    if (!TM)
      return TM.takeError();
    return (*TM)->createDataLayout();
  }

  /// Set the CPU string.
  JITTargetMachineBuilder &setCPU(std::string CPU) {
    this->CPU = std::move(CPU);
    return *this;
  }

  /// Returns the CPU string.
  const std::string &getCPU() const { return CPU; }

  /// Set the relocation model.
  JITTargetMachineBuilder &setRelocationModel(std::optional<Reloc::Model> RM) {
    this->RM = std::move(RM);
    return *this;
  }

  /// Get the relocation model.
  const std::optional<Reloc::Model> &getRelocationModel() const { return RM; }

  /// Set the code model.
  JITTargetMachineBuilder &setCodeModel(std::optional<CodeModel::Model> CM) {
    this->CM = std::move(CM);
    return *this;
  }

  /// Get the code model.
  const std::optional<CodeModel::Model> &getCodeModel() const { return CM; }

  /// Set the LLVM CodeGen optimization level.
  JITTargetMachineBuilder &setCodeGenOptLevel(CodeGenOpt::Level OptLevel) {
    this->OptLevel = OptLevel;
    return *this;
  }

  /// Set subtarget features.
  JITTargetMachineBuilder &setFeatures(StringRef FeatureString) {
    Features = SubtargetFeatures(FeatureString);
    return *this;
  }

  /// Add subtarget features.
  JITTargetMachineBuilder &
  addFeatures(const std::vector<std::string> &FeatureVec);

  /// Access subtarget features.
  SubtargetFeatures &getFeatures() { return Features; }

  /// Access subtarget features.
  const SubtargetFeatures &getFeatures() const { return Features; }

  /// Set TargetOptions.
  ///
  /// Note: This operation will overwrite any previously configured options,
  /// including EmulatedTLS and UseInitArray which the JITTargetMachineBuilder
  /// sets by default. Clients are responsible for re-enabling these overwritten
  /// options.
  JITTargetMachineBuilder &setOptions(TargetOptions Options) {
    this->Options = std::move(Options);
    return *this;
  }

  /// Access TargetOptions.
  TargetOptions &getOptions() { return Options; }

  /// Access TargetOptions.
  const TargetOptions &getOptions() const { return Options; }

  /// Access Triple.
  Triple &getTargetTriple() { return TT; }

  /// Access Triple.
  const Triple &getTargetTriple() const { return TT; }

private:
  Triple TT;
  std::string CPU;
  SubtargetFeatures Features;
  TargetOptions Options;
  std::optional<Reloc::Model> RM;
  std::optional<CodeModel::Model> CM;
  CodeGenOpt::Level OptLevel = CodeGenOpt::Default;
};

#ifndef NDEBUG
class JITTargetMachineBuilderPrinter {
public:
  JITTargetMachineBuilderPrinter(JITTargetMachineBuilder &JTMB,
                                 StringRef Indent)
      : JTMB(JTMB), Indent(Indent) {}
  void print(raw_ostream &OS) const;

  friend raw_ostream &operator<<(raw_ostream &OS,
                                 const JITTargetMachineBuilderPrinter &JTMBP) {
    JTMBP.print(OS);
    return OS;
  }

private:
  JITTargetMachineBuilder &JTMB;
  StringRef Indent;
};
#endif // NDEBUG

} // end namespace orc
} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_ORC_JITTARGETMACHINEBUILDER_H
PKiwFZv}J	J	)ExecutionEngine/Orc/EPCEHFrameRegistrar.hnu�[���//===-- EPCEHFrameRegistrar.h - EPC based eh-frame registration -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// ExecutorProcessControl based eh-frame registration.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_ORC_EPCEHFRAMEREGISTRAR_H
#define LLVM_EXECUTIONENGINE_ORC_EPCEHFRAMEREGISTRAR_H

#include "llvm/ExecutionEngine/JITLink/EHFrameSupport.h"
#include "llvm/ExecutionEngine/Orc/Shared/ExecutorAddress.h"

namespace llvm {
namespace orc {

class ExecutionSession;

/// Register/Deregisters EH frames in a remote process via a
/// ExecutorProcessControl instance.
class EPCEHFrameRegistrar : public jitlink::EHFrameRegistrar {
public:
  /// Create from a ExecutorProcessControl instance alone. This will use
  /// the EPC's lookupSymbols method to find the registration/deregistration
  /// function addresses by name.
  ///
  /// If RegistrationFunctionsDylib is non-None then it will be searched to
  /// find the registration functions. If it is None then the process dylib
  /// will be loaded to find the registration functions.
  static Expected<std::unique_ptr<EPCEHFrameRegistrar>>
  Create(ExecutionSession &ES,
         std::optional<ExecutorAddr> RegistrationFunctionsDylib = std::nullopt);

  /// Create a EPCEHFrameRegistrar with the given ExecutorProcessControl
  /// object and registration/deregistration function addresses.
  EPCEHFrameRegistrar(ExecutionSession &ES,
                      ExecutorAddr RegisterEHFrameWrapperFnAddr,
                      ExecutorAddr DeregisterEHFRameWrapperFnAddr)
      : ES(ES), RegisterEHFrameWrapperFnAddr(RegisterEHFrameWrapperFnAddr),
        DeregisterEHFrameWrapperFnAddr(DeregisterEHFRameWrapperFnAddr) {}

  Error registerEHFrames(ExecutorAddrRange EHFrameSection) override;
  Error deregisterEHFrames(ExecutorAddrRange EHFrameSection) override;

private:
  ExecutionSession &ES;
  ExecutorAddr RegisterEHFrameWrapperFnAddr;
  ExecutorAddr DeregisterEHFrameWrapperFnAddr;
};

} // end namespace orc
} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_ORC_EPCEHFRAMEREGISTRAR_H
PKiwFZ��RU��.ExecutionEngine/Orc/RTDyldObjectLinkingLayer.hnu�[���//===- RTDyldObjectLinkingLayer.h - RTDyld-based jit linking  ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Contains the definition for an RTDyld-based, in-process object linking layer.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_ORC_RTDYLDOBJECTLINKINGLAYER_H
#define LLVM_EXECUTIONENGINE_ORC_RTDYLDOBJECTLINKINGLAYER_H

#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ExecutionEngine/JITEventListener.h"
#include "llvm/ExecutionEngine/JITSymbol.h"
#include "llvm/ExecutionEngine/Orc/Core.h"
#include "llvm/ExecutionEngine/Orc/Layer.h"
#include "llvm/ExecutionEngine/RuntimeDyld.h"
#include "llvm/Object/ObjectFile.h"
#include "llvm/Support/Error.h"
#include <algorithm>
#include <cassert>
#include <functional>
#include <list>
#include <memory>
#include <utility>
#include <vector>

namespace llvm {
namespace orc {

class RTDyldObjectLinkingLayer
    : public RTTIExtends<RTDyldObjectLinkingLayer, ObjectLayer>,
      private ResourceManager {
public:
  static char ID;

  /// Functor for receiving object-loaded notifications.
  using NotifyLoadedFunction = std::function<void(
      MaterializationResponsibility &R, const object::ObjectFile &Obj,
      const RuntimeDyld::LoadedObjectInfo &)>;

  /// Functor for receiving finalization notifications.
  using NotifyEmittedFunction = std::function<void(
      MaterializationResponsibility &R, std::unique_ptr<MemoryBuffer>)>;

  using GetMemoryManagerFunction =
      unique_function<std::unique_ptr<RuntimeDyld::MemoryManager>()>;

  /// Construct an ObjectLinkingLayer with the given NotifyLoaded,
  ///        and NotifyEmitted functors.
  RTDyldObjectLinkingLayer(ExecutionSession &ES,
                           GetMemoryManagerFunction GetMemoryManager);

  ~RTDyldObjectLinkingLayer();

  /// Emit the object.
  void emit(std::unique_ptr<MaterializationResponsibility> R,
            std::unique_ptr<MemoryBuffer> O) override;

  /// Set the NotifyLoaded callback.
  RTDyldObjectLinkingLayer &setNotifyLoaded(NotifyLoadedFunction NotifyLoaded) {
    this->NotifyLoaded = std::move(NotifyLoaded);
    return *this;
  }

  /// Set the NotifyEmitted callback.
  RTDyldObjectLinkingLayer &
  setNotifyEmitted(NotifyEmittedFunction NotifyEmitted) {
    this->NotifyEmitted = std::move(NotifyEmitted);
    return *this;
  }

  /// Set the 'ProcessAllSections' flag.
  ///
  /// If set to true, all sections in each object file will be allocated using
  /// the memory manager, rather than just the sections required for execution.
  ///
  /// This is kludgy, and may be removed in the future.
  RTDyldObjectLinkingLayer &setProcessAllSections(bool ProcessAllSections) {
    this->ProcessAllSections = ProcessAllSections;
    return *this;
  }

  /// Instructs this RTDyldLinkingLayer2 instance to override the symbol flags
  /// returned by RuntimeDyld for any given object file with the flags supplied
  /// by the MaterializationResponsibility instance. This is a workaround to
  /// support symbol visibility in COFF, which does not use the libObject's
  /// SF_Exported flag. Use only when generating / adding COFF object files.
  ///
  /// FIXME: We should be able to remove this if/when COFF properly tracks
  /// exported symbols.
  RTDyldObjectLinkingLayer &
  setOverrideObjectFlagsWithResponsibilityFlags(bool OverrideObjectFlags) {
    this->OverrideObjectFlags = OverrideObjectFlags;
    return *this;
  }

  /// If set, this RTDyldObjectLinkingLayer instance will claim responsibility
  /// for any symbols provided by a given object file that were not already in
  /// the MaterializationResponsibility instance. Setting this flag allows
  /// higher-level program representations (e.g. LLVM IR) to be added based on
  /// only a subset of the symbols they provide, without having to write
  /// intervening layers to scan and add the additional symbols. This trades
  /// diagnostic quality for convenience however: If all symbols are enumerated
  /// up-front then clashes can be detected and reported early (and usually
  /// deterministically). If this option is set, clashes for the additional
  /// symbols may not be detected until late, and detection may depend on
  /// the flow of control through JIT'd code. Use with care.
  RTDyldObjectLinkingLayer &
  setAutoClaimResponsibilityForObjectSymbols(bool AutoClaimObjectSymbols) {
    this->AutoClaimObjectSymbols = AutoClaimObjectSymbols;
    return *this;
  }

  /// Register a JITEventListener.
  void registerJITEventListener(JITEventListener &L);

  /// Unregister a JITEventListener.
  void unregisterJITEventListener(JITEventListener &L);

private:
  using MemoryManagerUP = std::unique_ptr<RuntimeDyld::MemoryManager>;

  Error onObjLoad(MaterializationResponsibility &R,
                  const object::ObjectFile &Obj,
                  RuntimeDyld::MemoryManager &MemMgr,
                  RuntimeDyld::LoadedObjectInfo &LoadedObjInfo,
                  std::map<StringRef, JITEvaluatedSymbol> Resolved,
                  std::set<StringRef> &InternalSymbols);

  void onObjEmit(MaterializationResponsibility &R,
                 object::OwningBinary<object::ObjectFile> O,
                 std::unique_ptr<RuntimeDyld::MemoryManager> MemMgr,
                 std::unique_ptr<RuntimeDyld::LoadedObjectInfo> LoadedObjInfo,
                 Error Err);

  Error handleRemoveResources(JITDylib &JD, ResourceKey K) override;
  void handleTransferResources(JITDylib &JD, ResourceKey DstKey,
                               ResourceKey SrcKey) override;

  mutable std::mutex RTDyldLayerMutex;
  GetMemoryManagerFunction GetMemoryManager;
  NotifyLoadedFunction NotifyLoaded;
  NotifyEmittedFunction NotifyEmitted;
  bool ProcessAllSections = false;
  bool OverrideObjectFlags = false;
  bool AutoClaimObjectSymbols = false;
  DenseMap<ResourceKey, std::vector<MemoryManagerUP>> MemMgrs;
  std::vector<JITEventListener *> EventListeners;
};

} // end namespace orc
} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_ORC_RTDYLDOBJECTLINKINGLAYER_H
PKiwFZ�+-??%ExecutionEngine/Orc/Shared/OrcError.hnu�[���//===--------------- OrcError.h - Orc Error Types ---------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Define an error category, error codes, and helper utilities for Orc.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_ORC_SHARED_ORCERROR_H
#define LLVM_EXECUTIONENGINE_ORC_SHARED_ORCERROR_H

#include "llvm/Support/Error.h"
#include "llvm/Support/raw_ostream.h"
#include <string>
#include <system_error>

namespace llvm {
namespace orc {

enum class OrcErrorCode : int {
  // RPC Errors
  UnknownORCError = 1,
  DuplicateDefinition,
  JITSymbolNotFound,
  RemoteAllocatorDoesNotExist,
  RemoteAllocatorIdAlreadyInUse,
  RemoteMProtectAddrUnrecognized,
  RemoteIndirectStubsOwnerDoesNotExist,
  RemoteIndirectStubsOwnerIdAlreadyInUse,
  RPCConnectionClosed,
  RPCCouldNotNegotiateFunction,
  RPCResponseAbandoned,
  UnexpectedRPCCall,
  UnexpectedRPCResponse,
  UnknownErrorCodeFromRemote,
  UnknownResourceHandle,
  MissingSymbolDefinitions,
  UnexpectedSymbolDefinitions,
};

std::error_code orcError(OrcErrorCode ErrCode);

class DuplicateDefinition : public ErrorInfo<DuplicateDefinition> {
public:
  static char ID;

  DuplicateDefinition(std::string SymbolName);
  std::error_code convertToErrorCode() const override;
  void log(raw_ostream &OS) const override;
  const std::string &getSymbolName() const;
private:
  std::string SymbolName;
};

class JITSymbolNotFound : public ErrorInfo<JITSymbolNotFound> {
public:
  static char ID;

  JITSymbolNotFound(std::string SymbolName);
  std::error_code convertToErrorCode() const override;
  void log(raw_ostream &OS) const override;
  const std::string &getSymbolName() const;
private:
  std::string SymbolName;
};

} // End namespace orc.
} // End namespace llvm.

#endif // LLVM_EXECUTIONENGINE_ORC_SHARED_ORCERROR_H
PKiwFZ"�#���.ExecutionEngine/Orc/Shared/ExecutorSymbolDef.hnu�[���//===--------- ExecutorSymbolDef.h - (Addr, Flags) pair ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Represents a defining location for a JIT symbol.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_ORC_SHARED_EXECUTORSYMBOLDEF_H
#define LLVM_EXECUTIONENGINE_ORC_SHARED_EXECUTORSYMBOLDEF_H

#include "llvm/ExecutionEngine/JITSymbol.h"
#include "llvm/ExecutionEngine/Orc/Shared/ExecutorAddress.h"

namespace llvm {
namespace orc {

/// Represents a defining location for a JIT symbol.
class ExecutorSymbolDef {
public:
  ExecutorSymbolDef() = default;
  ExecutorSymbolDef(ExecutorAddr Addr, JITSymbolFlags Flags)
    : Addr(Addr), Flags(Flags) {}

  const ExecutorAddr &getAddress() const { return Addr; }

  const JITSymbolFlags &getFlags() const { return Flags; }

  void setFlags(JITSymbolFlags Flags) { this->Flags = Flags; }

  friend bool operator==(const ExecutorSymbolDef &LHS,
                         const ExecutorSymbolDef &RHS) {
    return LHS.getAddress() == RHS.getAddress() &&
           LHS.getFlags() == RHS.getFlags();
  }

  friend bool operator!=(const ExecutorSymbolDef &LHS,
                         const ExecutorSymbolDef &RHS) {
    return !(LHS == RHS);
  }

private:
  ExecutorAddr Addr;
  JITSymbolFlags Flags;
};

} // End namespace orc.
} // End namespace llvm.

#endif // LLVM_EXECUTIONENGINE_ORC_SHARED_EXECUTORSYMBOLDEF_H
PKiwFZU�B�ss.ExecutionEngine/Orc/Shared/AllocationActions.hnu�[���//===- AllocationActions.h -- JITLink allocation support calls  -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Structures for making memory allocation support calls.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_ORC_SHARED_ALLOCATIONACTIONS_H
#define LLVM_EXECUTIONENGINE_ORC_SHARED_ALLOCATIONACTIONS_H

#include "llvm/ExecutionEngine/Orc/Shared/ExecutorAddress.h"
#include "llvm/ExecutionEngine/Orc/Shared/WrapperFunctionUtils.h"
#include "llvm/Support/Memory.h"

#include <vector>

namespace llvm {
namespace orc {
namespace shared {

/// A pair of WrapperFunctionCalls, one to be run at finalization time, one to
/// be run at deallocation time.
///
/// AllocActionCallPairs should be constructed for paired operations (e.g.
/// __register_ehframe and __deregister_ehframe for eh-frame registration).
/// See comments for AllocActions for execution ordering.
///
/// For unpaired operations one or the other member can be left unused, as
/// AllocationActionCalls with an FnAddr of zero will be skipped.
struct AllocActionCallPair {
  WrapperFunctionCall Finalize;
  WrapperFunctionCall Dealloc;
};

/// A vector of allocation actions to be run for this allocation.
///
/// Finalize allocations will be run in order at finalize time. Dealloc
/// actions will be run in reverse order at deallocation time.
using AllocActions = std::vector<AllocActionCallPair>;

/// Returns the number of deallocaton actions in the given AllocActions array.
///
/// This can be useful if clients want to pre-allocate room for deallocation
/// actions with the rest of their memory.
inline size_t numDeallocActions(const AllocActions &AAs) {
  return llvm::count_if(
      AAs, [](const AllocActionCallPair &P) { return !!P.Dealloc; });
}

/// Run finalize actions.
///
/// If any finalize action fails then the corresponding dealloc actions will be
/// run in reverse order (not including the deallocation action for the failed
/// finalize action), and the error for the failing action will be returned.
///
/// If all finalize actions succeed then a vector of deallocation actions will
/// be returned. The dealloc actions should be run by calling
/// runDeallocationActions. If this function succeeds then the AA argument will
/// be cleared before the function returns.
Expected<std::vector<WrapperFunctionCall>>
runFinalizeActions(AllocActions &AAs);

/// Run deallocation actions.
/// Dealloc actions will be run in reverse order (from last element of DAs to
/// first).
Error runDeallocActions(ArrayRef<WrapperFunctionCall> DAs);

using SPSAllocActionCallPair =
    SPSTuple<SPSWrapperFunctionCall, SPSWrapperFunctionCall>;

template <>
class SPSSerializationTraits<SPSAllocActionCallPair,
                             AllocActionCallPair> {
  using AL = SPSAllocActionCallPair::AsArgList;

public:
  static size_t size(const AllocActionCallPair &AAP) {
    return AL::size(AAP.Finalize, AAP.Dealloc);
  }

  static bool serialize(SPSOutputBuffer &OB,
                        const AllocActionCallPair &AAP) {
    return AL::serialize(OB, AAP.Finalize, AAP.Dealloc);
  }

  static bool deserialize(SPSInputBuffer &IB,
                          AllocActionCallPair &AAP) {
    return AL::deserialize(IB, AAP.Finalize, AAP.Dealloc);
  }
};

} // end namespace shared
} // end namespace orc
} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_ORC_SHARED_ALLOCATIONACTIONS_H
PKiwFZ���+�a�a6ExecutionEngine/Orc/Shared/SimplePackedSerialization.hnu�[���//===---- SimplePackedSerialization.h - simple serialization ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// The behavior of the utilities in this header must be synchronized with the
// behavior of the utilities in
// compiler-rt/lib/orc/simple_packed_serialization.h.
//
// The Simple Packed Serialization (SPS) utilities are used to generate
// argument and return buffers for wrapper functions using the following
// serialization scheme:
//
// Primitives (signed types should be two's complement):
//   bool, char, int8_t, uint8_t -- 8-bit (0=false, 1=true)
//   int16_t, uint16_t           -- 16-bit little endian
//   int32_t, uint32_t           -- 32-bit little endian
//   int64_t, int64_t            -- 64-bit little endian
//
// Sequence<T>:
//   Serialized as the sequence length (as a uint64_t) followed by the
//   serialization of each of the elements without padding.
//
// Tuple<T1, ..., TN>:
//   Serialized as each of the element types from T1 to TN without padding.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_ORC_SHARED_SIMPLEPACKEDSERIALIZATION_H
#define LLVM_EXECUTIONENGINE_ORC_SHARED_SIMPLEPACKEDSERIALIZATION_H

#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/SwapByteOrder.h"

#include <limits>
#include <optional>
#include <string>
#include <tuple>
#include <type_traits>
#include <utility>
#include <vector>

namespace llvm {
namespace orc {
namespace shared {

/// Output char buffer with overflow check.
class SPSOutputBuffer {
public:
  SPSOutputBuffer(char *Buffer, size_t Remaining)
      : Buffer(Buffer), Remaining(Remaining) {}
  bool write(const char *Data, size_t Size) {
    assert(Data && "Data must not be null");
    if (Size > Remaining)
      return false;
    memcpy(Buffer, Data, Size);
    Buffer += Size;
    Remaining -= Size;
    return true;
  }

private:
  char *Buffer = nullptr;
  size_t Remaining = 0;
};

/// Input char buffer with underflow check.
class SPSInputBuffer {
public:
  SPSInputBuffer() = default;
  SPSInputBuffer(const char *Buffer, size_t Remaining)
      : Buffer(Buffer), Remaining(Remaining) {}
  bool read(char *Data, size_t Size) {
    if (Size > Remaining)
      return false;
    memcpy(Data, Buffer, Size);
    Buffer += Size;
    Remaining -= Size;
    return true;
  }

  const char *data() const { return Buffer; }
  bool skip(size_t Size) {
    if (Size > Remaining)
      return false;
    Buffer += Size;
    Remaining -= Size;
    return true;
  }

private:
  const char *Buffer = nullptr;
  size_t Remaining = 0;
};

/// Specialize to describe how to serialize/deserialize to/from the given
/// concrete type.
template <typename SPSTagT, typename ConcreteT, typename _ = void>
class SPSSerializationTraits;

/// A utility class for serializing to a blob from a variadic list.
template <typename... ArgTs> class SPSArgList;

// Empty list specialization for SPSArgList.
template <> class SPSArgList<> {
public:
  static size_t size() { return 0; }

  static bool serialize(SPSOutputBuffer &OB) { return true; }
  static bool deserialize(SPSInputBuffer &IB) { return true; }

  static bool serializeToSmallVector(SmallVectorImpl<char> &V) { return true; }

  static bool deserializeFromSmallVector(const SmallVectorImpl<char> &V) {
    return true;
  }
};

// Non-empty list specialization for SPSArgList.
template <typename SPSTagT, typename... SPSTagTs>
class SPSArgList<SPSTagT, SPSTagTs...> {
public:
  // FIXME: This typedef is here to enable SPS arg serialization from
  // JITLink. It can be removed once JITLink can access SPS directly.
  using OutputBuffer = SPSOutputBuffer;

  template <typename ArgT, typename... ArgTs>
  static size_t size(const ArgT &Arg, const ArgTs &...Args) {
    return SPSSerializationTraits<SPSTagT, ArgT>::size(Arg) +
           SPSArgList<SPSTagTs...>::size(Args...);
  }

  template <typename ArgT, typename... ArgTs>
  static bool serialize(SPSOutputBuffer &OB, const ArgT &Arg,
                        const ArgTs &...Args) {
    return SPSSerializationTraits<SPSTagT, ArgT>::serialize(OB, Arg) &&
           SPSArgList<SPSTagTs...>::serialize(OB, Args...);
  }

  template <typename ArgT, typename... ArgTs>
  static bool deserialize(SPSInputBuffer &IB, ArgT &Arg, ArgTs &...Args) {
    return SPSSerializationTraits<SPSTagT, ArgT>::deserialize(IB, Arg) &&
           SPSArgList<SPSTagTs...>::deserialize(IB, Args...);
  }
};

/// SPS serialization for integral types, bool, and char.
template <typename SPSTagT>
class SPSSerializationTraits<
    SPSTagT, SPSTagT,
    std::enable_if_t<std::is_same<SPSTagT, bool>::value ||
                     std::is_same<SPSTagT, char>::value ||
                     std::is_same<SPSTagT, int8_t>::value ||
                     std::is_same<SPSTagT, int16_t>::value ||
                     std::is_same<SPSTagT, int32_t>::value ||
                     std::is_same<SPSTagT, int64_t>::value ||
                     std::is_same<SPSTagT, uint8_t>::value ||
                     std::is_same<SPSTagT, uint16_t>::value ||
                     std::is_same<SPSTagT, uint32_t>::value ||
                     std::is_same<SPSTagT, uint64_t>::value>> {
public:
  static size_t size(const SPSTagT &Value) { return sizeof(SPSTagT); }

  static bool serialize(SPSOutputBuffer &OB, const SPSTagT &Value) {
    SPSTagT Tmp = Value;
    if (sys::IsBigEndianHost)
      sys::swapByteOrder(Tmp);
    return OB.write(reinterpret_cast<const char *>(&Tmp), sizeof(Tmp));
  }

  static bool deserialize(SPSInputBuffer &IB, SPSTagT &Value) {
    SPSTagT Tmp;
    if (!IB.read(reinterpret_cast<char *>(&Tmp), sizeof(Tmp)))
      return false;
    if (sys::IsBigEndianHost)
      sys::swapByteOrder(Tmp);
    Value = Tmp;
    return true;
  }
};

// Any empty placeholder suitable as a substitute for void when deserializing
class SPSEmpty {};

/// SPS tag type for tuples.
///
/// A blob tuple should be serialized by serializing each of the elements in
/// sequence.
template <typename... SPSTagTs> class SPSTuple {
public:
  /// Convenience typedef of the corresponding arg list.
  typedef SPSArgList<SPSTagTs...> AsArgList;
};

/// SPS tag type for optionals.
///
/// SPSOptionals should be serialized as a bool with true indicating that an
/// SPSTagT value is present, and false indicating that there is no value.
/// If the boolean is true then the serialized SPSTagT will follow immediately
/// after it.
template <typename SPSTagT> class SPSOptional {};

/// SPS tag type for sequences.
///
/// SPSSequences should be serialized as a uint64_t sequence length,
/// followed by the serialization of each of the elements.
template <typename SPSElementTagT> class SPSSequence;

/// SPS tag type for strings, which are equivalent to sequences of chars.
using SPSString = SPSSequence<char>;

/// SPS tag type for maps.
///
/// SPS maps are just sequences of (Key, Value) tuples.
template <typename SPSTagT1, typename SPSTagT2>
using SPSMap = SPSSequence<SPSTuple<SPSTagT1, SPSTagT2>>;

/// Serialization for SPSEmpty type.
template <> class SPSSerializationTraits<SPSEmpty, SPSEmpty> {
public:
  static size_t size(const SPSEmpty &EP) { return 0; }
  static bool serialize(SPSOutputBuffer &OB, const SPSEmpty &BE) {
    return true;
  }
  static bool deserialize(SPSInputBuffer &IB, SPSEmpty &BE) { return true; }
};

/// Specialize this to implement 'trivial' sequence serialization for
/// a concrete sequence type.
///
/// Trivial sequence serialization uses the sequence's 'size' member to get the
/// length of the sequence, and uses a range-based for loop to iterate over the
/// elements.
///
/// Specializing this template class means that you do not need to provide a
/// specialization of SPSSerializationTraits for your type.
template <typename SPSElementTagT, typename ConcreteSequenceT>
class TrivialSPSSequenceSerialization {
public:
  static constexpr bool available = false;
};

/// Specialize this to implement 'trivial' sequence deserialization for
/// a concrete sequence type.
///
/// Trivial deserialization calls a static 'reserve(SequenceT&)' method on your
/// specialization (you must implement this) to reserve space, and then calls
/// a static 'append(SequenceT&, ElementT&) method to append each of the
/// deserialized elements.
///
/// Specializing this template class means that you do not need to provide a
/// specialization of SPSSerializationTraits for your type.
template <typename SPSElementTagT, typename ConcreteSequenceT>
class TrivialSPSSequenceDeserialization {
public:
  static constexpr bool available = false;
};

/// Trivial std::string -> SPSSequence<char> serialization.
template <> class TrivialSPSSequenceSerialization<char, std::string> {
public:
  static constexpr bool available = true;
};

/// Trivial SPSSequence<char> -> std::string deserialization.
template <> class TrivialSPSSequenceDeserialization<char, std::string> {
public:
  static constexpr bool available = true;

  using element_type = char;

  static void reserve(std::string &S, uint64_t Size) { S.reserve(Size); }
  static bool append(std::string &S, char C) {
    S.push_back(C);
    return true;
  }
};

/// Trivial std::vector<T> -> SPSSequence<SPSElementTagT> serialization.
template <typename SPSElementTagT, typename T>
class TrivialSPSSequenceSerialization<SPSElementTagT, std::vector<T>> {
public:
  static constexpr bool available = true;
};

/// Trivial SPSSequence<SPSElementTagT> -> std::vector<T> deserialization.
template <typename SPSElementTagT, typename T>
class TrivialSPSSequenceDeserialization<SPSElementTagT, std::vector<T>> {
public:
  static constexpr bool available = true;

  using element_type = typename std::vector<T>::value_type;

  static void reserve(std::vector<T> &V, uint64_t Size) { V.reserve(Size); }
  static bool append(std::vector<T> &V, T E) {
    V.push_back(std::move(E));
    return true;
  }
};

/// Trivial SmallVectorImpl<T> -> SPSSequence<char> serialization.
template <typename SPSElementTagT, typename T>
class TrivialSPSSequenceSerialization<SPSElementTagT, SmallVectorImpl<T>> {
public:
  static constexpr bool available = true;
};

/// Trivial SPSSequence<SPSElementTagT> -> SmallVectorImpl<T> deserialization.
template <typename SPSElementTagT, typename T>
class TrivialSPSSequenceDeserialization<SPSElementTagT, SmallVectorImpl<T>> {
public:
  static constexpr bool available = true;

  using element_type = typename SmallVectorImpl<T>::value_type;

  static void reserve(SmallVectorImpl<T> &V, uint64_t Size) { V.reserve(Size); }
  static bool append(SmallVectorImpl<T> &V, T E) {
    V.push_back(std::move(E));
    return true;
  }
};

/// Trivial SmallVectorImpl<T> -> SPSSequence<char> serialization.
template <typename SPSElementTagT, typename T, unsigned N>
class TrivialSPSSequenceSerialization<SPSElementTagT, SmallVector<T, N>>
    : public TrivialSPSSequenceSerialization<SPSElementTagT,
                                             SmallVectorImpl<T>> {};

/// Trivial SPSSequence<SPSElementTagT> -> SmallVectorImpl<T> deserialization.
template <typename SPSElementTagT, typename T, unsigned N>
class TrivialSPSSequenceDeserialization<SPSElementTagT, SmallVector<T, N>>
    : public TrivialSPSSequenceDeserialization<SPSElementTagT,
                                               SmallVectorImpl<T>> {};

/// Trivial ArrayRef<T> -> SPSSequence<SPSElementTagT> serialization.
template <typename SPSElementTagT, typename T>
class TrivialSPSSequenceSerialization<SPSElementTagT, ArrayRef<T>> {
public:
  static constexpr bool available = true;
};

/// Specialized SPSSequence<char> -> ArrayRef<char> serialization.
///
/// On deserialize, points directly into the input buffer.
template <> class SPSSerializationTraits<SPSSequence<char>, ArrayRef<char>> {
public:
  static size_t size(const ArrayRef<char> &A) {
    return SPSArgList<uint64_t>::size(static_cast<uint64_t>(A.size())) +
           A.size();
  }

  static bool serialize(SPSOutputBuffer &OB, const ArrayRef<char> &A) {
    if (!SPSArgList<uint64_t>::serialize(OB, static_cast<uint64_t>(A.size())))
      return false;
    if (A.empty()) // Empty ArrayRef may have null data, so bail out early.
      return true;
    return OB.write(A.data(), A.size());
  }

  static bool deserialize(SPSInputBuffer &IB, ArrayRef<char> &A) {
    uint64_t Size;
    if (!SPSArgList<uint64_t>::deserialize(IB, Size))
      return false;
    if (Size > std::numeric_limits<size_t>::max())
      return false;
    A = {Size ? IB.data() : nullptr, static_cast<size_t>(Size)};
    return IB.skip(Size);
  }
};

/// 'Trivial' sequence serialization: Sequence is serialized as a uint64_t size
/// followed by a for-earch loop over the elements of the sequence to serialize
/// each of them.
template <typename SPSElementTagT, typename SequenceT>
class SPSSerializationTraits<SPSSequence<SPSElementTagT>, SequenceT,
                             std::enable_if_t<TrivialSPSSequenceSerialization<
                                 SPSElementTagT, SequenceT>::available>> {
public:
  static size_t size(const SequenceT &S) {
    size_t Size = SPSArgList<uint64_t>::size(static_cast<uint64_t>(S.size()));
    for (const auto &E : S)
      Size += SPSArgList<SPSElementTagT>::size(E);
    return Size;
  }

  static bool serialize(SPSOutputBuffer &OB, const SequenceT &S) {
    if (!SPSArgList<uint64_t>::serialize(OB, static_cast<uint64_t>(S.size())))
      return false;
    for (const auto &E : S)
      if (!SPSArgList<SPSElementTagT>::serialize(OB, E))
        return false;
    return true;
  }

  static bool deserialize(SPSInputBuffer &IB, SequenceT &S) {
    using TBSD = TrivialSPSSequenceDeserialization<SPSElementTagT, SequenceT>;
    uint64_t Size;
    if (!SPSArgList<uint64_t>::deserialize(IB, Size))
      return false;
    TBSD::reserve(S, Size);
    for (size_t I = 0; I != Size; ++I) {
      typename TBSD::element_type E;
      if (!SPSArgList<SPSElementTagT>::deserialize(IB, E))
        return false;
      if (!TBSD::append(S, std::move(E)))
        return false;
    }
    return true;
  }
};

/// SPSTuple serialization for std::tuple.
template <typename... SPSTagTs, typename... Ts>
class SPSSerializationTraits<SPSTuple<SPSTagTs...>, std::tuple<Ts...>> {
private:
  using TupleArgList = typename SPSTuple<SPSTagTs...>::AsArgList;
  using ArgIndices = std::make_index_sequence<sizeof...(Ts)>;

  template <std::size_t... I>
  static size_t size(const std::tuple<Ts...> &T, std::index_sequence<I...>) {
    return TupleArgList::size(std::get<I>(T)...);
  }

  template <std::size_t... I>
  static bool serialize(SPSOutputBuffer &OB, const std::tuple<Ts...> &T,
                        std::index_sequence<I...>) {
    return TupleArgList::serialize(OB, std::get<I>(T)...);
  }

  template <std::size_t... I>
  static bool deserialize(SPSInputBuffer &IB, std::tuple<Ts...> &T,
                          std::index_sequence<I...>) {
    return TupleArgList::deserialize(IB, std::get<I>(T)...);
  }

public:
  static size_t size(const std::tuple<Ts...> &T) {
    return size(T, ArgIndices{});
  }

  static bool serialize(SPSOutputBuffer &OB, const std::tuple<Ts...> &T) {
    return serialize(OB, T, ArgIndices{});
  }

  static bool deserialize(SPSInputBuffer &IB, std::tuple<Ts...> &T) {
    return deserialize(IB, T, ArgIndices{});
  }
};

/// SPSTuple serialization for std::pair.
template <typename SPSTagT1, typename SPSTagT2, typename T1, typename T2>
class SPSSerializationTraits<SPSTuple<SPSTagT1, SPSTagT2>, std::pair<T1, T2>> {
public:
  static size_t size(const std::pair<T1, T2> &P) {
    return SPSArgList<SPSTagT1>::size(P.first) +
           SPSArgList<SPSTagT2>::size(P.second);
  }

  static bool serialize(SPSOutputBuffer &OB, const std::pair<T1, T2> &P) {
    return SPSArgList<SPSTagT1>::serialize(OB, P.first) &&
           SPSArgList<SPSTagT2>::serialize(OB, P.second);
  }

  static bool deserialize(SPSInputBuffer &IB, std::pair<T1, T2> &P) {
    return SPSArgList<SPSTagT1>::deserialize(IB, P.first) &&
           SPSArgList<SPSTagT2>::deserialize(IB, P.second);
  }
};

/// SPSOptional serialization for std::optional.
template <typename SPSTagT, typename T>
class SPSSerializationTraits<SPSOptional<SPSTagT>, std::optional<T>> {
public:
  static size_t size(const std::optional<T> &Value) {
    size_t Size = SPSArgList<bool>::size(!!Value);
    if (Value)
      Size += SPSArgList<SPSTagT>::size(*Value);
    return Size;
  }

  static bool serialize(SPSOutputBuffer &OB, const std::optional<T> &Value) {
    if (!SPSArgList<bool>::serialize(OB, !!Value))
      return false;
    if (Value)
      return SPSArgList<SPSTagT>::serialize(OB, *Value);
    return true;
  }

  static bool deserialize(SPSInputBuffer &IB, std::optional<T> &Value) {
    bool HasValue;
    if (!SPSArgList<bool>::deserialize(IB, HasValue))
      return false;
    if (HasValue) {
      Value = T();
      return SPSArgList<SPSTagT>::deserialize(IB, *Value);
    } else
      Value = std::optional<T>();
    return true;
  }
};

/// Serialization for StringRefs.
///
/// Serialization is as for regular strings. Deserialization points directly
/// into the blob.
template <> class SPSSerializationTraits<SPSString, StringRef> {
public:
  static size_t size(const StringRef &S) {
    return SPSArgList<uint64_t>::size(static_cast<uint64_t>(S.size())) +
           S.size();
  }

  static bool serialize(SPSOutputBuffer &OB, StringRef S) {
    if (!SPSArgList<uint64_t>::serialize(OB, static_cast<uint64_t>(S.size())))
      return false;
    if (S.empty()) // Empty StringRef may have null data, so bail out early.
      return true;
    return OB.write(S.data(), S.size());
  }

  static bool deserialize(SPSInputBuffer &IB, StringRef &S) {
    const char *Data = nullptr;
    uint64_t Size;
    if (!SPSArgList<uint64_t>::deserialize(IB, Size))
      return false;
    Data = IB.data();
    if (!IB.skip(Size))
      return false;
    S = StringRef(Size ? Data : nullptr, Size);
    return true;
  }
};

/// Serialization for StringMap<ValueT>s.
template <typename SPSValueT, typename ValueT>
class SPSSerializationTraits<SPSSequence<SPSTuple<SPSString, SPSValueT>>,
                             StringMap<ValueT>> {
public:
  static size_t size(const StringMap<ValueT> &M) {
    size_t Sz = SPSArgList<uint64_t>::size(static_cast<uint64_t>(M.size()));
    for (auto &E : M)
      Sz += SPSArgList<SPSString, SPSValueT>::size(E.first(), E.second);
    return Sz;
  }

  static bool serialize(SPSOutputBuffer &OB, const StringMap<ValueT> &M) {
    if (!SPSArgList<uint64_t>::serialize(OB, static_cast<uint64_t>(M.size())))
      return false;

    for (auto &E : M)
      if (!SPSArgList<SPSString, SPSValueT>::serialize(OB, E.first(), E.second))
        return false;

    return true;
  }

  static bool deserialize(SPSInputBuffer &IB, StringMap<ValueT> &M) {
    uint64_t Size;
    assert(M.empty() && "M already contains elements");

    if (!SPSArgList<uint64_t>::deserialize(IB, Size))
      return false;

    while (Size--) {
      StringRef S;
      ValueT V;
      if (!SPSArgList<SPSString, SPSValueT>::deserialize(IB, S, V))
        return false;
      if (!M.insert(std::make_pair(S, V)).second)
        return false;
    }

    return true;
  }
};

/// SPS tag type for errors.
class SPSError;

/// SPS tag type for expecteds, which are either a T or a string representing
/// an error.
template <typename SPSTagT> class SPSExpected;

namespace detail {

/// Helper type for serializing Errors.
///
/// llvm::Errors are move-only, and not inspectable except by consuming them.
/// This makes them unsuitable for direct serialization via
/// SPSSerializationTraits, which needs to inspect values twice (once to
/// determine the amount of space to reserve, and then again to serialize).
///
/// The SPSSerializableError type is a helper that can be
/// constructed from an llvm::Error, but inspected more than once.
struct SPSSerializableError {
  bool HasError = false;
  std::string ErrMsg;
};

/// Helper type for serializing Expected<T>s.
///
/// See SPSSerializableError for more details.
///
// FIXME: Use std::variant for storage once we have c++17.
template <typename T> struct SPSSerializableExpected {
  bool HasValue = false;
  T Value{};
  std::string ErrMsg;
};

inline SPSSerializableError toSPSSerializable(Error Err) {
  if (Err)
    return {true, toString(std::move(Err))};
  return {false, {}};
}

inline Error fromSPSSerializable(SPSSerializableError BSE) {
  if (BSE.HasError)
    return make_error<StringError>(BSE.ErrMsg, inconvertibleErrorCode());
  return Error::success();
}

template <typename T>
SPSSerializableExpected<T> toSPSSerializable(Expected<T> E) {
  if (E)
    return {true, std::move(*E), {}};
  else
    return {false, T(), toString(E.takeError())};
}

template <typename T>
Expected<T> fromSPSSerializable(SPSSerializableExpected<T> BSE) {
  if (BSE.HasValue)
    return std::move(BSE.Value);
  else
    return make_error<StringError>(BSE.ErrMsg, inconvertibleErrorCode());
}

} // end namespace detail

/// Serialize to a SPSError from a detail::SPSSerializableError.
template <>
class SPSSerializationTraits<SPSError, detail::SPSSerializableError> {
public:
  static size_t size(const detail::SPSSerializableError &BSE) {
    size_t Size = SPSArgList<bool>::size(BSE.HasError);
    if (BSE.HasError)
      Size += SPSArgList<SPSString>::size(BSE.ErrMsg);
    return Size;
  }

  static bool serialize(SPSOutputBuffer &OB,
                        const detail::SPSSerializableError &BSE) {
    if (!SPSArgList<bool>::serialize(OB, BSE.HasError))
      return false;
    if (BSE.HasError)
      if (!SPSArgList<SPSString>::serialize(OB, BSE.ErrMsg))
        return false;
    return true;
  }

  static bool deserialize(SPSInputBuffer &IB,
                          detail::SPSSerializableError &BSE) {
    if (!SPSArgList<bool>::deserialize(IB, BSE.HasError))
      return false;

    if (!BSE.HasError)
      return true;

    return SPSArgList<SPSString>::deserialize(IB, BSE.ErrMsg);
  }
};

/// Serialize to a SPSExpected<SPSTagT> from a
/// detail::SPSSerializableExpected<T>.
template <typename SPSTagT, typename T>
class SPSSerializationTraits<SPSExpected<SPSTagT>,
                             detail::SPSSerializableExpected<T>> {
public:
  static size_t size(const detail::SPSSerializableExpected<T> &BSE) {
    size_t Size = SPSArgList<bool>::size(BSE.HasValue);
    if (BSE.HasValue)
      Size += SPSArgList<SPSTagT>::size(BSE.Value);
    else
      Size += SPSArgList<SPSString>::size(BSE.ErrMsg);
    return Size;
  }

  static bool serialize(SPSOutputBuffer &OB,
                        const detail::SPSSerializableExpected<T> &BSE) {
    if (!SPSArgList<bool>::serialize(OB, BSE.HasValue))
      return false;

    if (BSE.HasValue)
      return SPSArgList<SPSTagT>::serialize(OB, BSE.Value);

    return SPSArgList<SPSString>::serialize(OB, BSE.ErrMsg);
  }

  static bool deserialize(SPSInputBuffer &IB,
                          detail::SPSSerializableExpected<T> &BSE) {
    if (!SPSArgList<bool>::deserialize(IB, BSE.HasValue))
      return false;

    if (BSE.HasValue)
      return SPSArgList<SPSTagT>::deserialize(IB, BSE.Value);

    return SPSArgList<SPSString>::deserialize(IB, BSE.ErrMsg);
  }
};

/// Serialize to a SPSExpected<SPSTagT> from a detail::SPSSerializableError.
template <typename SPSTagT>
class SPSSerializationTraits<SPSExpected<SPSTagT>,
                             detail::SPSSerializableError> {
public:
  static size_t size(const detail::SPSSerializableError &BSE) {
    assert(BSE.HasError && "Cannot serialize expected from a success value");
    return SPSArgList<bool>::size(false) +
           SPSArgList<SPSString>::size(BSE.ErrMsg);
  }

  static bool serialize(SPSOutputBuffer &OB,
                        const detail::SPSSerializableError &BSE) {
    assert(BSE.HasError && "Cannot serialize expected from a success value");
    if (!SPSArgList<bool>::serialize(OB, false))
      return false;
    return SPSArgList<SPSString>::serialize(OB, BSE.ErrMsg);
  }
};

/// Serialize to a SPSExpected<SPSTagT> from a T.
template <typename SPSTagT, typename T>
class SPSSerializationTraits<SPSExpected<SPSTagT>, T> {
public:
  static size_t size(const T &Value) {
    return SPSArgList<bool>::size(true) + SPSArgList<SPSTagT>::size(Value);
  }

  static bool serialize(SPSOutputBuffer &OB, const T &Value) {
    if (!SPSArgList<bool>::serialize(OB, true))
      return false;
    return SPSArgList<SPSTagT>::serialize(Value);
  }
};

} // end namespace shared
} // end namespace orc
} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_ORC_SHARED_SIMPLEPACKEDSERIALIZATION_H
PKiwFZz���)�),ExecutionEngine/Orc/Shared/ExecutorAddress.hnu�[���//===------ ExecutorAddress.h - Executing process address -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Represents an address in the executing program.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_ORC_SHARED_EXECUTORADDRESS_H
#define LLVM_EXECUTIONENGINE_ORC_SHARED_EXECUTORADDRESS_H

#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/ADT/identity.h"
#include "llvm/ExecutionEngine/Orc/Shared/SimplePackedSerialization.h"
#include "llvm/Support/FormatVariadic.h"
#include "llvm/Support/raw_ostream.h"

#include <cassert>
#include <type_traits>

namespace llvm {
namespace orc {

using ExecutorAddrDiff = uint64_t;

/// Represents an address in the executor process.
class ExecutorAddr {
public:
  /// A wrap/unwrap function that leaves pointers unmodified.
  template <typename T> using rawPtr = llvm::identity<T *>;

  /// Default wrap function to use on this host.
  template <typename T> using defaultWrap = rawPtr<T>;

  /// Default unwrap function to use on this host.
  template <typename T> using defaultUnwrap = rawPtr<T>;

  /// Merges a tag into the raw address value:
  ///   P' = P | (TagValue << TagOffset).
  class Tag {
  public:
    constexpr Tag(uintptr_t TagValue, uintptr_t TagOffset)
        : TagMask(TagValue << TagOffset) {}

    template <typename T> constexpr T *operator()(T *P) {
      return reinterpret_cast<T *>(reinterpret_cast<uintptr_t>(P) | TagMask);
    }

  private:
    uintptr_t TagMask;
  };

  /// Strips a tag of the given length from the given offset within the pointer:
  /// P' = P & ~(((1 << TagLen) -1) << TagOffset)
  class Untag {
  public:
    constexpr Untag(uintptr_t TagLen, uintptr_t TagOffset)
        : UntagMask(~(((uintptr_t(1) << TagLen) - 1) << TagOffset)) {}

    template <typename T> constexpr T *operator()(T *P) {
      return reinterpret_cast<T *>(reinterpret_cast<uintptr_t>(P) & UntagMask);
    }

  private:
    uintptr_t UntagMask;
  };

  ExecutorAddr() = default;

  /// Create an ExecutorAddr from the given value.
  explicit constexpr ExecutorAddr(uint64_t Addr) : Addr(Addr) {}

  /// Create an ExecutorAddr from the given pointer.
  /// Warning: This should only be used when JITing in-process.
  template <typename T, typename UnwrapFn = defaultUnwrap<T>>
  static ExecutorAddr fromPtr(T *Ptr, UnwrapFn &&Unwrap = UnwrapFn()) {
    return ExecutorAddr(
        static_cast<uint64_t>(reinterpret_cast<uintptr_t>(Unwrap(Ptr))));
  }

  /// Cast this ExecutorAddr to a pointer of the given type.
  /// Warning: This should only be used when JITing in-process.
  template <typename T, typename WrapFn = defaultWrap<std::remove_pointer_t<T>>>
  std::enable_if_t<std::is_pointer<T>::value, T>
  toPtr(WrapFn &&Wrap = WrapFn()) const {
    uintptr_t IntPtr = static_cast<uintptr_t>(Addr);
    assert(IntPtr == Addr && "ExecutorAddr value out of range for uintptr_t");
    return Wrap(reinterpret_cast<T>(IntPtr));
  }

  /// Cast this ExecutorAddr to a pointer of the given function type.
  /// Warning: This should only be used when JITing in-process.
  template <typename T, typename WrapFn = defaultWrap<T>>
  std::enable_if_t<std::is_function<T>::value, T *>
  toPtr(WrapFn &&Wrap = WrapFn()) const {
    uintptr_t IntPtr = static_cast<uintptr_t>(Addr);
    assert(IntPtr == Addr && "ExecutorAddr value out of range for uintptr_t");
    return Wrap(reinterpret_cast<T *>(IntPtr));
  }

  uint64_t getValue() const { return Addr; }
  void setValue(uint64_t Addr) { this->Addr = Addr; }
  bool isNull() const { return Addr == 0; }

  explicit operator bool() const { return Addr != 0; }

  friend bool operator==(const ExecutorAddr &LHS, const ExecutorAddr &RHS) {
    return LHS.Addr == RHS.Addr;
  }

  friend bool operator!=(const ExecutorAddr &LHS, const ExecutorAddr &RHS) {
    return LHS.Addr != RHS.Addr;
  }

  friend bool operator<(const ExecutorAddr &LHS, const ExecutorAddr &RHS) {
    return LHS.Addr < RHS.Addr;
  }

  friend bool operator<=(const ExecutorAddr &LHS, const ExecutorAddr &RHS) {
    return LHS.Addr <= RHS.Addr;
  }

  friend bool operator>(const ExecutorAddr &LHS, const ExecutorAddr &RHS) {
    return LHS.Addr > RHS.Addr;
  }

  friend bool operator>=(const ExecutorAddr &LHS, const ExecutorAddr &RHS) {
    return LHS.Addr >= RHS.Addr;
  }

  ExecutorAddr &operator++() {
    ++Addr;
    return *this;
  }
  ExecutorAddr &operator--() {
    --Addr;
    return *this;
  }
  ExecutorAddr operator++(int) { return ExecutorAddr(Addr++); }
  ExecutorAddr operator--(int) { return ExecutorAddr(Addr--); }

  ExecutorAddr &operator+=(const ExecutorAddrDiff &Delta) {
    Addr += Delta;
    return *this;
  }

  ExecutorAddr &operator-=(const ExecutorAddrDiff &Delta) {
    Addr -= Delta;
    return *this;
  }

private:
  uint64_t Addr = 0;
};

/// Subtracting two addresses yields an offset.
inline ExecutorAddrDiff operator-(const ExecutorAddr &LHS,
                                  const ExecutorAddr &RHS) {
  return ExecutorAddrDiff(LHS.getValue() - RHS.getValue());
}

/// Adding an offset and an address yields an address.
inline ExecutorAddr operator+(const ExecutorAddr &LHS,
                              const ExecutorAddrDiff &RHS) {
  return ExecutorAddr(LHS.getValue() + RHS);
}

/// Adding an address and an offset yields an address.
inline ExecutorAddr operator+(const ExecutorAddrDiff &LHS,
                              const ExecutorAddr &RHS) {
  return ExecutorAddr(LHS + RHS.getValue());
}

/// Subtracting an offset from an address yields an address.
inline ExecutorAddr operator-(const ExecutorAddr &LHS,
                              const ExecutorAddrDiff &RHS) {
  return ExecutorAddr(LHS.getValue() - RHS);
}

/// Taking the modulus of an address and a diff yields a diff.
inline ExecutorAddrDiff operator%(const ExecutorAddr &LHS,
                                  const ExecutorAddrDiff &RHS) {
  return ExecutorAddrDiff(LHS.getValue() % RHS);
}

/// Represents an address range in the exceutor process.
struct ExecutorAddrRange {
  ExecutorAddrRange() = default;
  ExecutorAddrRange(ExecutorAddr Start, ExecutorAddr End)
      : Start(Start), End(End) {}
  ExecutorAddrRange(ExecutorAddr Start, ExecutorAddrDiff Size)
      : Start(Start), End(Start + Size) {}

  bool empty() const { return Start == End; }
  ExecutorAddrDiff size() const { return End - Start; }

  friend bool operator==(const ExecutorAddrRange &LHS,
                         const ExecutorAddrRange &RHS) {
    return LHS.Start == RHS.Start && LHS.End == RHS.End;
  }
  friend bool operator!=(const ExecutorAddrRange &LHS,
                         const ExecutorAddrRange &RHS) {
    return !(LHS == RHS);
  }
  friend bool operator<(const ExecutorAddrRange &LHS,
                        const ExecutorAddrRange &RHS) {
    return LHS.Start < RHS.Start ||
           (LHS.Start == RHS.Start && LHS.End < RHS.End);
  }
  friend bool operator<=(const ExecutorAddrRange &LHS,
                         const ExecutorAddrRange &RHS) {
    return LHS.Start < RHS.Start ||
           (LHS.Start == RHS.Start && LHS.End <= RHS.End);
  }
  friend bool operator>(const ExecutorAddrRange &LHS,
                        const ExecutorAddrRange &RHS) {
    return LHS.Start > RHS.Start ||
           (LHS.Start == RHS.Start && LHS.End > RHS.End);
  }
  friend bool operator>=(const ExecutorAddrRange &LHS,
                         const ExecutorAddrRange &RHS) {
    return LHS.Start > RHS.Start ||
           (LHS.Start == RHS.Start && LHS.End >= RHS.End);
  }

  bool contains(ExecutorAddr Addr) const { return Start <= Addr && Addr < End; }
  bool overlaps(const ExecutorAddrRange &Other) {
    return !(Other.End <= Start || End <= Other.Start);
  }

  ExecutorAddr Start;
  ExecutorAddr End;
};

inline raw_ostream &operator<<(raw_ostream &OS, const ExecutorAddr &A) {
  return OS << formatv("{0:x}", A.getValue());
}

inline raw_ostream &operator<<(raw_ostream &OS, const ExecutorAddrRange &R) {
  return OS << formatv("{0:x} -- {1:x}", R.Start.getValue(), R.End.getValue());
}

namespace shared {

class SPSExecutorAddr {};

/// SPS serializatior for ExecutorAddr.
template <> class SPSSerializationTraits<SPSExecutorAddr, ExecutorAddr> {
public:
  static size_t size(const ExecutorAddr &EA) {
    return SPSArgList<uint64_t>::size(EA.getValue());
  }

  static bool serialize(SPSOutputBuffer &BOB, const ExecutorAddr &EA) {
    return SPSArgList<uint64_t>::serialize(BOB, EA.getValue());
  }

  static bool deserialize(SPSInputBuffer &BIB, ExecutorAddr &EA) {
    uint64_t Tmp;
    if (!SPSArgList<uint64_t>::deserialize(BIB, Tmp))
      return false;
    EA = ExecutorAddr(Tmp);
    return true;
  }
};

using SPSExecutorAddrRange = SPSTuple<SPSExecutorAddr, SPSExecutorAddr>;

/// Serialization traits for address ranges.
template <>
class SPSSerializationTraits<SPSExecutorAddrRange, ExecutorAddrRange> {
public:
  static size_t size(const ExecutorAddrRange &Value) {
    return SPSArgList<SPSExecutorAddr, SPSExecutorAddr>::size(Value.Start,
                                                              Value.End);
  }

  static bool serialize(SPSOutputBuffer &BOB, const ExecutorAddrRange &Value) {
    return SPSArgList<SPSExecutorAddr, SPSExecutorAddr>::serialize(
        BOB, Value.Start, Value.End);
  }

  static bool deserialize(SPSInputBuffer &BIB, ExecutorAddrRange &Value) {
    return SPSArgList<SPSExecutorAddr, SPSExecutorAddr>::deserialize(
        BIB, Value.Start, Value.End);
  }
};

using SPSExecutorAddrRangeSequence = SPSSequence<SPSExecutorAddrRange>;

} // End namespace shared.
} // End namespace orc.

// Provide DenseMapInfo for ExecutorAddrs.
template <> struct DenseMapInfo<orc::ExecutorAddr> {
  static inline orc::ExecutorAddr getEmptyKey() {
    return orc::ExecutorAddr(DenseMapInfo<uint64_t>::getEmptyKey());
  }
  static inline orc::ExecutorAddr getTombstoneKey() {
    return orc::ExecutorAddr(DenseMapInfo<uint64_t>::getTombstoneKey());
  }

  static unsigned getHashValue(const orc::ExecutorAddr &Addr) {
    return DenseMapInfo<uint64_t>::getHashValue(Addr.getValue());
  }

  static bool isEqual(const orc::ExecutorAddr &LHS,
                      const orc::ExecutorAddr &RHS) {
    return DenseMapInfo<uint64_t>::isEqual(LHS.getValue(), RHS.getValue());
  }
};

} // End namespace llvm.

#endif // LLVM_EXECUTIONENGINE_ORC_SHARED_EXECUTORADDRESS_H
PKiwFZ�`V�	�	*ExecutionEngine/Orc/Shared/ObjectFormats.hnu�[���//===------ ObjectFormats.h - Object format details for ORC -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// ORC-specific object format details.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_ORC_SHARED_OBJECTFORMATS_H
#define LLVM_EXECUTIONENGINE_ORC_SHARED_OBJECTFORMATS_H

#include "llvm/ADT/StringRef.h"

namespace llvm {
namespace orc {

// MachO section names.

extern StringRef MachODataCommonSectionName;
extern StringRef MachODataDataSectionName;
extern StringRef MachOEHFrameSectionName;
extern StringRef MachOCompactUnwindInfoSectionName;
extern StringRef MachOModInitFuncSectionName;
extern StringRef MachOObjCCatListSectionName;
extern StringRef MachOObjCCatList2SectionName;
extern StringRef MachOObjCClassListSectionName;
extern StringRef MachOObjCClassNameSectionName;
extern StringRef MachOObjCClassRefsSectionName;
extern StringRef MachOObjCConstSectionName;
extern StringRef MachOObjCDataSectionName;
extern StringRef MachOObjCImageInfoSectionName;
extern StringRef MachOObjCMethNameSectionName;
extern StringRef MachOObjCMethTypeSectionName;
extern StringRef MachOObjCNLCatListSectionName;
extern StringRef MachOObjCSelRefsSectionName;
extern StringRef MachOSwift5ProtoSectionName;
extern StringRef MachOSwift5ProtosSectionName;
extern StringRef MachOSwift5TypesSectionName;
extern StringRef MachOSwift5TypeRefSectionName;
extern StringRef MachOSwift5FieldMetadataSectionName;
extern StringRef MachOSwift5EntrySectionName;
extern StringRef MachOThreadBSSSectionName;
extern StringRef MachOThreadDataSectionName;
extern StringRef MachOThreadVarsSectionName;

extern StringRef MachOInitSectionNames[19];

// ELF section names.
extern StringRef ELFEHFrameSectionName;
extern StringRef ELFInitArrayFuncSectionName;

extern StringRef ELFThreadBSSSectionName;
extern StringRef ELFThreadDataSectionName;

bool isMachOInitializerSection(StringRef SegName, StringRef SecName);
bool isMachOInitializerSection(StringRef QualifiedName);

bool isELFInitializerSection(StringRef SecName);

bool isCOFFInitializerSection(StringRef Name);

} // end namespace orc
} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_ORC_SHARED_MEMORYFLAGS_H
PKiwFZ|�^��1ExecutionEngine/Orc/Shared/SimpleRemoteEPCUtils.hnu�[���//===--- SimpleRemoteEPCUtils.h - Utils for Simple Remote EPC ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Message definitions and other utilities for SimpleRemoteEPC and
// SimpleRemoteEPCServer.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_ORC_SHARED_SIMPLEREMOTEEPCUTILS_H
#define LLVM_EXECUTIONENGINE_ORC_SHARED_SIMPLEREMOTEEPCUTILS_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ExecutionEngine/Orc/Shared/ExecutorAddress.h"
#include "llvm/ExecutionEngine/Orc/Shared/SimplePackedSerialization.h"
#include "llvm/Support/Error.h"

#include <atomic>
#include <mutex>
#include <string>
#include <thread>

namespace llvm {
namespace orc {

namespace SimpleRemoteEPCDefaultBootstrapSymbolNames {
extern const char *ExecutorSessionObjectName;
extern const char *DispatchFnName;
} // end namespace SimpleRemoteEPCDefaultBootstrapSymbolNames

enum class SimpleRemoteEPCOpcode : uint8_t {
  Setup,
  Hangup,
  Result,
  CallWrapper,
  LastOpC = CallWrapper
};

struct SimpleRemoteEPCExecutorInfo {
  std::string TargetTriple;
  uint64_t PageSize;
  StringMap<std::vector<char>> BootstrapMap;
  StringMap<ExecutorAddr> BootstrapSymbols;
};

using SimpleRemoteEPCArgBytesVector = SmallVector<char, 128>;

class SimpleRemoteEPCTransportClient {
public:
  enum HandleMessageAction { ContinueSession, EndSession };

  virtual ~SimpleRemoteEPCTransportClient();

  /// Handle receipt of a message.
  ///
  /// Returns an Error if the message cannot be handled, 'EndSession' if the
  /// client will not accept any further messages, and 'ContinueSession'
  /// otherwise.
  virtual Expected<HandleMessageAction>
  handleMessage(SimpleRemoteEPCOpcode OpC, uint64_t SeqNo, ExecutorAddr TagAddr,
                SimpleRemoteEPCArgBytesVector ArgBytes) = 0;

  /// Handle a disconnection from the underlying transport. No further messages
  /// should be sent to handleMessage after this is called.
  /// Err may contain an Error value indicating unexpected disconnection. This
  /// allows clients to log such errors, but no attempt should be made at
  /// recovery (which should be handled inside the transport class, if it is
  /// supported at all).
  virtual void handleDisconnect(Error Err) = 0;
};

class SimpleRemoteEPCTransport {
public:
  virtual ~SimpleRemoteEPCTransport();

  /// Called during setup of the client to indicate that the client is ready
  /// to receive messages.
  ///
  /// Transport objects should not access the client until this method is
  /// called.
  virtual Error start() = 0;

  /// Send a SimpleRemoteEPC message.
  ///
  /// This function may be called concurrently. Subclasses should implement
  /// locking if required for the underlying transport.
  virtual Error sendMessage(SimpleRemoteEPCOpcode OpC, uint64_t SeqNo,
                            ExecutorAddr TagAddr, ArrayRef<char> ArgBytes) = 0;

  /// Trigger disconnection from the transport. The implementation should
  /// respond by calling handleDisconnect on the client once disconnection
  /// is complete. May be called more than once and from different threads.
  virtual void disconnect() = 0;
};

/// Uses read/write on FileDescriptors for transport.
class FDSimpleRemoteEPCTransport : public SimpleRemoteEPCTransport {
public:
  /// Create a FDSimpleRemoteEPCTransport using the given FDs for
  /// reading (InFD) and writing (OutFD).
  static Expected<std::unique_ptr<FDSimpleRemoteEPCTransport>>
  Create(SimpleRemoteEPCTransportClient &C, int InFD, int OutFD);

  /// Create a FDSimpleRemoteEPCTransport using the given FD for both
  /// reading and writing.
  static Expected<std::unique_ptr<FDSimpleRemoteEPCTransport>>
  Create(SimpleRemoteEPCTransportClient &C, int FD) {
    return Create(C, FD, FD);
  }

  ~FDSimpleRemoteEPCTransport() override;

  Error start() override;

  Error sendMessage(SimpleRemoteEPCOpcode OpC, uint64_t SeqNo,
                    ExecutorAddr TagAddr, ArrayRef<char> ArgBytes) override;

  void disconnect() override;

private:
  FDSimpleRemoteEPCTransport(SimpleRemoteEPCTransportClient &C, int InFD,
                             int OutFD)
      : C(C), InFD(InFD), OutFD(OutFD) {}

  Error readBytes(char *Dst, size_t Size, bool *IsEOF = nullptr);
  int writeBytes(const char *Src, size_t Size);
  void listenLoop();

  std::mutex M;
  SimpleRemoteEPCTransportClient &C;
  std::thread ListenerThread;
  int InFD, OutFD;
  std::atomic<bool> Disconnected{false};
};

struct RemoteSymbolLookupSetElement {
  std::string Name;
  bool Required;
};

using RemoteSymbolLookupSet = std::vector<RemoteSymbolLookupSetElement>;

struct RemoteSymbolLookup {
  uint64_t H;
  RemoteSymbolLookupSet Symbols;
};

namespace shared {

using SPSRemoteSymbolLookupSetElement = SPSTuple<SPSString, bool>;

using SPSRemoteSymbolLookupSet = SPSSequence<SPSRemoteSymbolLookupSetElement>;

using SPSRemoteSymbolLookup = SPSTuple<uint64_t, SPSRemoteSymbolLookupSet>;

/// Tuple containing target triple, page size, and bootstrap symbols.
using SPSSimpleRemoteEPCExecutorInfo =
    SPSTuple<SPSString, uint64_t,
             SPSSequence<SPSTuple<SPSString, SPSSequence<char>>>,
             SPSSequence<SPSTuple<SPSString, SPSExecutorAddr>>>;

template <>
class SPSSerializationTraits<SPSRemoteSymbolLookupSetElement,
                             RemoteSymbolLookupSetElement> {
public:
  static size_t size(const RemoteSymbolLookupSetElement &V) {
    return SPSArgList<SPSString, bool>::size(V.Name, V.Required);
  }

  static size_t serialize(SPSOutputBuffer &OB,
                          const RemoteSymbolLookupSetElement &V) {
    return SPSArgList<SPSString, bool>::serialize(OB, V.Name, V.Required);
  }

  static size_t deserialize(SPSInputBuffer &IB,
                            RemoteSymbolLookupSetElement &V) {
    return SPSArgList<SPSString, bool>::deserialize(IB, V.Name, V.Required);
  }
};

template <>
class SPSSerializationTraits<SPSRemoteSymbolLookup, RemoteSymbolLookup> {
public:
  static size_t size(const RemoteSymbolLookup &V) {
    return SPSArgList<uint64_t, SPSRemoteSymbolLookupSet>::size(V.H, V.Symbols);
  }

  static size_t serialize(SPSOutputBuffer &OB, const RemoteSymbolLookup &V) {
    return SPSArgList<uint64_t, SPSRemoteSymbolLookupSet>::serialize(OB, V.H,
                                                                     V.Symbols);
  }

  static size_t deserialize(SPSInputBuffer &IB, RemoteSymbolLookup &V) {
    return SPSArgList<uint64_t, SPSRemoteSymbolLookupSet>::deserialize(
        IB, V.H, V.Symbols);
  }
};

template <>
class SPSSerializationTraits<SPSSimpleRemoteEPCExecutorInfo,
                             SimpleRemoteEPCExecutorInfo> {
public:
  static size_t size(const SimpleRemoteEPCExecutorInfo &SI) {
    return SPSSimpleRemoteEPCExecutorInfo::AsArgList ::size(
        SI.TargetTriple, SI.PageSize, SI.BootstrapMap, SI.BootstrapSymbols);
  }

  static bool serialize(SPSOutputBuffer &OB,
                        const SimpleRemoteEPCExecutorInfo &SI) {
    return SPSSimpleRemoteEPCExecutorInfo::AsArgList ::serialize(
        OB, SI.TargetTriple, SI.PageSize, SI.BootstrapMap, SI.BootstrapSymbols);
  }

  static bool deserialize(SPSInputBuffer &IB, SimpleRemoteEPCExecutorInfo &SI) {
    return SPSSimpleRemoteEPCExecutorInfo::AsArgList ::deserialize(
        IB, SI.TargetTriple, SI.PageSize, SI.BootstrapMap, SI.BootstrapSymbols);
  }
};

using SPSLoadDylibSignature = SPSExpected<SPSExecutorAddr>(SPSExecutorAddr,
                                                           SPSString, uint64_t);

using SPSLookupSymbolsSignature =
    SPSExpected<SPSSequence<SPSSequence<SPSExecutorAddr>>>(
        SPSExecutorAddr, SPSSequence<SPSRemoteSymbolLookup>);

} // end namespace shared
} // end namespace orc
} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_ORC_SHARED_SIMPLEREMOTEEPCUTILS_H
PKiwFZL/��&�&6ExecutionEngine/Orc/Shared/TargetProcessControlTypes.hnu�[���//===--- TargetProcessControlTypes.h -- Shared Core/TPC types ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// TargetProcessControl types that are used by both the Orc and
// OrcTargetProcess libraries.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_ORC_SHARED_TARGETPROCESSCONTROLTYPES_H
#define LLVM_EXECUTIONENGINE_ORC_SHARED_TARGETPROCESSCONTROLTYPES_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ExecutionEngine/JITSymbol.h"
#include "llvm/ExecutionEngine/Orc/Shared/AllocationActions.h"
#include "llvm/ExecutionEngine/Orc/Shared/ExecutorAddress.h"
#include "llvm/ExecutionEngine/Orc/Shared/MemoryFlags.h"
#include "llvm/ExecutionEngine/Orc/Shared/SimplePackedSerialization.h"
#include "llvm/ExecutionEngine/Orc/Shared/WrapperFunctionUtils.h"
#include "llvm/Support/Memory.h"

#include <vector>

namespace llvm {
namespace orc {
namespace tpctypes {

struct RemoteAllocGroup {
  RemoteAllocGroup() = default;
  RemoteAllocGroup(MemProt Prot) : Prot(Prot) {}
  RemoteAllocGroup(MemProt Prot, bool FinalizeLifetime)
      : Prot(Prot), FinalizeLifetime(FinalizeLifetime) {}
  RemoteAllocGroup(const AllocGroup &AG) : Prot(AG.getMemProt()) {
    assert(AG.getMemLifetimePolicy() != orc::MemLifetimePolicy::NoAlloc &&
           "Cannot use no-alloc memory in a remote alloc request");
    FinalizeLifetime =
        AG.getMemLifetimePolicy() == orc::MemLifetimePolicy::Finalize;
  }

  MemProt Prot;
  bool FinalizeLifetime = false;
};

struct SegFinalizeRequest {
  RemoteAllocGroup RAG;
  ExecutorAddr Addr;
  uint64_t Size;
  ArrayRef<char> Content;
};

struct FinalizeRequest {
  std::vector<SegFinalizeRequest> Segments;
  shared::AllocActions Actions;
};

struct SharedMemorySegFinalizeRequest {
  RemoteAllocGroup RAG;
  ExecutorAddr Addr;
  uint64_t Size;
};

struct SharedMemoryFinalizeRequest {
  std::vector<SharedMemorySegFinalizeRequest> Segments;
  shared::AllocActions Actions;
};

template <typename T> struct UIntWrite {
  UIntWrite() = default;
  UIntWrite(ExecutorAddr Addr, T Value) : Addr(Addr), Value(Value) {}

  ExecutorAddr Addr;
  T Value = 0;
};

/// Describes a write to a uint8_t.
using UInt8Write = UIntWrite<uint8_t>;

/// Describes a write to a uint16_t.
using UInt16Write = UIntWrite<uint16_t>;

/// Describes a write to a uint32_t.
using UInt32Write = UIntWrite<uint32_t>;

/// Describes a write to a uint64_t.
using UInt64Write = UIntWrite<uint64_t>;

/// Describes a write to a buffer.
/// For use with TargetProcessControl::MemoryAccess objects.
struct BufferWrite {
  BufferWrite() = default;
  BufferWrite(ExecutorAddr Addr, StringRef Buffer)
      : Addr(Addr), Buffer(Buffer) {}

  ExecutorAddr Addr;
  StringRef Buffer;
};

/// A handle used to represent a loaded dylib in the target process.
using DylibHandle = ExecutorAddr;

using LookupResult = std::vector<ExecutorAddr>;

} // end namespace tpctypes

namespace shared {

class SPSRemoteAllocGroup;

using SPSSegFinalizeRequest =
    SPSTuple<SPSRemoteAllocGroup, SPSExecutorAddr, uint64_t, SPSSequence<char>>;

using SPSFinalizeRequest = SPSTuple<SPSSequence<SPSSegFinalizeRequest>,
                                    SPSSequence<SPSAllocActionCallPair>>;

using SPSSharedMemorySegFinalizeRequest =
    SPSTuple<SPSRemoteAllocGroup, SPSExecutorAddr, uint64_t>;

using SPSSharedMemoryFinalizeRequest =
    SPSTuple<SPSSequence<SPSSharedMemorySegFinalizeRequest>,
             SPSSequence<SPSAllocActionCallPair>>;

template <typename T>
using SPSMemoryAccessUIntWrite = SPSTuple<SPSExecutorAddr, T>;

using SPSMemoryAccessUInt8Write = SPSMemoryAccessUIntWrite<uint8_t>;
using SPSMemoryAccessUInt16Write = SPSMemoryAccessUIntWrite<uint16_t>;
using SPSMemoryAccessUInt32Write = SPSMemoryAccessUIntWrite<uint32_t>;
using SPSMemoryAccessUInt64Write = SPSMemoryAccessUIntWrite<uint64_t>;

using SPSMemoryAccessBufferWrite = SPSTuple<SPSExecutorAddr, SPSSequence<char>>;

template <>
class SPSSerializationTraits<SPSRemoteAllocGroup, tpctypes::RemoteAllocGroup> {
  enum WireBits {
    ReadBit = 1 << 0,
    WriteBit = 1 << 1,
    ExecBit = 1 << 2,
    FinalizeBit = 1 << 3
  };

public:
  static size_t size(const tpctypes::RemoteAllocGroup &RAG) {
    // All AllocGroup values encode to the same size.
    return SPSArgList<uint8_t>::size(uint8_t(0));
  }

  static bool serialize(SPSOutputBuffer &OB,
                        const tpctypes::RemoteAllocGroup &RAG) {
    uint8_t WireValue = 0;
    if ((RAG.Prot & MemProt::Read) != MemProt::None)
      WireValue |= ReadBit;
    if ((RAG.Prot & MemProt::Write) != MemProt::None)
      WireValue |= WriteBit;
    if ((RAG.Prot & MemProt::Exec) != MemProt::None)
      WireValue |= ExecBit;
    if (RAG.FinalizeLifetime)
      WireValue |= FinalizeBit;
    return SPSArgList<uint8_t>::serialize(OB, WireValue);
  }

  static bool deserialize(SPSInputBuffer &IB, tpctypes::RemoteAllocGroup &RAG) {
    uint8_t Val;
    if (!SPSArgList<uint8_t>::deserialize(IB, Val))
      return false;
    MemProt MP = MemProt::None;
    if (Val & ReadBit)
      MP |= MemProt::Read;
    if (Val & WriteBit)
      MP |= MemProt::Write;
    if (Val & ExecBit)
      MP |= MemProt::Exec;
    bool FinalizeLifetime = (Val & FinalizeBit) ? true : false;
    RAG = {MP, FinalizeLifetime};
    return true;
  }
};

template <>
class SPSSerializationTraits<SPSSegFinalizeRequest,
                             tpctypes::SegFinalizeRequest> {
  using SFRAL = SPSSegFinalizeRequest::AsArgList;

public:
  static size_t size(const tpctypes::SegFinalizeRequest &SFR) {
    return SFRAL::size(SFR.RAG, SFR.Addr, SFR.Size, SFR.Content);
  }

  static bool serialize(SPSOutputBuffer &OB,
                        const tpctypes::SegFinalizeRequest &SFR) {
    return SFRAL::serialize(OB, SFR.RAG, SFR.Addr, SFR.Size, SFR.Content);
  }

  static bool deserialize(SPSInputBuffer &IB,
                          tpctypes::SegFinalizeRequest &SFR) {
    return SFRAL::deserialize(IB, SFR.RAG, SFR.Addr, SFR.Size, SFR.Content);
  }
};

template <>
class SPSSerializationTraits<SPSFinalizeRequest, tpctypes::FinalizeRequest> {
  using FRAL = SPSFinalizeRequest::AsArgList;

public:
  static size_t size(const tpctypes::FinalizeRequest &FR) {
    return FRAL::size(FR.Segments, FR.Actions);
  }

  static bool serialize(SPSOutputBuffer &OB,
                        const tpctypes::FinalizeRequest &FR) {
    return FRAL::serialize(OB, FR.Segments, FR.Actions);
  }

  static bool deserialize(SPSInputBuffer &IB, tpctypes::FinalizeRequest &FR) {
    return FRAL::deserialize(IB, FR.Segments, FR.Actions);
  }
};

template <>
class SPSSerializationTraits<SPSSharedMemorySegFinalizeRequest,
                             tpctypes::SharedMemorySegFinalizeRequest> {
  using SFRAL = SPSSharedMemorySegFinalizeRequest::AsArgList;

public:
  static size_t size(const tpctypes::SharedMemorySegFinalizeRequest &SFR) {
    return SFRAL::size(SFR.RAG, SFR.Addr, SFR.Size);
  }

  static bool serialize(SPSOutputBuffer &OB,
                        const tpctypes::SharedMemorySegFinalizeRequest &SFR) {
    return SFRAL::serialize(OB, SFR.RAG, SFR.Addr, SFR.Size);
  }

  static bool deserialize(SPSInputBuffer &IB,
                          tpctypes::SharedMemorySegFinalizeRequest &SFR) {
    return SFRAL::deserialize(IB, SFR.RAG, SFR.Addr, SFR.Size);
  }
};

template <>
class SPSSerializationTraits<SPSSharedMemoryFinalizeRequest,
                             tpctypes::SharedMemoryFinalizeRequest> {
  using FRAL = SPSSharedMemoryFinalizeRequest::AsArgList;

public:
  static size_t size(const tpctypes::SharedMemoryFinalizeRequest &FR) {
    return FRAL::size(FR.Segments, FR.Actions);
  }

  static bool serialize(SPSOutputBuffer &OB,
                        const tpctypes::SharedMemoryFinalizeRequest &FR) {
    return FRAL::serialize(OB, FR.Segments, FR.Actions);
  }

  static bool deserialize(SPSInputBuffer &IB,
                          tpctypes::SharedMemoryFinalizeRequest &FR) {
    return FRAL::deserialize(IB, FR.Segments, FR.Actions);
  }
};

template <typename T>
class SPSSerializationTraits<SPSMemoryAccessUIntWrite<T>,
                             tpctypes::UIntWrite<T>> {
public:
  static size_t size(const tpctypes::UIntWrite<T> &W) {
    return SPSTuple<SPSExecutorAddr, T>::AsArgList::size(W.Addr, W.Value);
  }

  static bool serialize(SPSOutputBuffer &OB, const tpctypes::UIntWrite<T> &W) {
    return SPSTuple<SPSExecutorAddr, T>::AsArgList::serialize(OB, W.Addr,
                                                              W.Value);
  }

  static bool deserialize(SPSInputBuffer &IB, tpctypes::UIntWrite<T> &W) {
    return SPSTuple<SPSExecutorAddr, T>::AsArgList::deserialize(IB, W.Addr,
                                                                W.Value);
  }
};

template <>
class SPSSerializationTraits<SPSMemoryAccessBufferWrite,
                             tpctypes::BufferWrite> {
public:
  static size_t size(const tpctypes::BufferWrite &W) {
    return SPSTuple<SPSExecutorAddr, SPSSequence<char>>::AsArgList::size(
        W.Addr, W.Buffer);
  }

  static bool serialize(SPSOutputBuffer &OB, const tpctypes::BufferWrite &W) {
    return SPSTuple<SPSExecutorAddr, SPSSequence<char>>::AsArgList ::serialize(
        OB, W.Addr, W.Buffer);
  }

  static bool deserialize(SPSInputBuffer &IB, tpctypes::BufferWrite &W) {
    return SPSTuple<SPSExecutorAddr,
                    SPSSequence<char>>::AsArgList ::deserialize(IB, W.Addr,
                                                                W.Buffer);
  }
};

} // end namespace shared
} // end namespace orc
} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_ORC_SHARED_TARGETPROCESSCONTROLTYPES_H
PKiwFZ��G���(ExecutionEngine/Orc/Shared/OrcRTBridge.hnu�[���//===---- OrcRTBridge.h -- Utils for interacting with orc-rt ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Declares types and symbol names provided by the ORC runtime.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_ORC_SHARED_ORCRTBRIDGE_H
#define LLVM_EXECUTIONENGINE_ORC_SHARED_ORCRTBRIDGE_H

#include "llvm/ADT/StringMap.h"
#include "llvm/ExecutionEngine/Orc/Shared/ExecutorAddress.h"
#include "llvm/ExecutionEngine/Orc/Shared/SimpleRemoteEPCUtils.h"
#include "llvm/ExecutionEngine/Orc/Shared/TargetProcessControlTypes.h"

namespace llvm {
namespace orc {
namespace rt {

extern const char *SimpleExecutorDylibManagerInstanceName;
extern const char *SimpleExecutorDylibManagerOpenWrapperName;
extern const char *SimpleExecutorDylibManagerLookupWrapperName;

extern const char *SimpleExecutorMemoryManagerInstanceName;
extern const char *SimpleExecutorMemoryManagerReserveWrapperName;
extern const char *SimpleExecutorMemoryManagerFinalizeWrapperName;
extern const char *SimpleExecutorMemoryManagerDeallocateWrapperName;

extern const char *ExecutorSharedMemoryMapperServiceInstanceName;
extern const char *ExecutorSharedMemoryMapperServiceReserveWrapperName;
extern const char *ExecutorSharedMemoryMapperServiceInitializeWrapperName;
extern const char *ExecutorSharedMemoryMapperServiceDeinitializeWrapperName;
extern const char *ExecutorSharedMemoryMapperServiceReleaseWrapperName;

extern const char *MemoryWriteUInt8sWrapperName;
extern const char *MemoryWriteUInt16sWrapperName;
extern const char *MemoryWriteUInt32sWrapperName;
extern const char *MemoryWriteUInt64sWrapperName;
extern const char *MemoryWriteBuffersWrapperName;

extern const char *RegisterEHFrameSectionWrapperName;
extern const char *DeregisterEHFrameSectionWrapperName;

extern const char *RunAsMainWrapperName;
extern const char *RunAsVoidFunctionWrapperName;
extern const char *RunAsIntFunctionWrapperName;

using SPSSimpleExecutorDylibManagerOpenSignature =
    shared::SPSExpected<shared::SPSExecutorAddr>(shared::SPSExecutorAddr,
                                                 shared::SPSString, uint64_t);

using SPSSimpleExecutorDylibManagerLookupSignature =
    shared::SPSExpected<shared::SPSSequence<shared::SPSExecutorAddr>>(
        shared::SPSExecutorAddr, shared::SPSExecutorAddr,
        shared::SPSRemoteSymbolLookupSet);

using SPSSimpleExecutorMemoryManagerReserveSignature =
    shared::SPSExpected<shared::SPSExecutorAddr>(shared::SPSExecutorAddr,
                                                 uint64_t);
using SPSSimpleExecutorMemoryManagerFinalizeSignature =
    shared::SPSError(shared::SPSExecutorAddr, shared::SPSFinalizeRequest);
using SPSSimpleExecutorMemoryManagerDeallocateSignature = shared::SPSError(
    shared::SPSExecutorAddr, shared::SPSSequence<shared::SPSExecutorAddr>);

// ExecutorSharedMemoryMapperService
using SPSExecutorSharedMemoryMapperServiceReserveSignature =
    shared::SPSExpected<
        shared::SPSTuple<shared::SPSExecutorAddr, shared::SPSString>>(
        shared::SPSExecutorAddr, uint64_t);
using SPSExecutorSharedMemoryMapperServiceInitializeSignature =
    shared::SPSExpected<shared::SPSExecutorAddr>(
        shared::SPSExecutorAddr, shared::SPSExecutorAddr,
        shared::SPSSharedMemoryFinalizeRequest);
using SPSExecutorSharedMemoryMapperServiceDeinitializeSignature =
    shared::SPSError(shared::SPSExecutorAddr,
                     shared::SPSSequence<shared::SPSExecutorAddr>);
using SPSExecutorSharedMemoryMapperServiceReleaseSignature = shared::SPSError(
    shared::SPSExecutorAddr, shared::SPSSequence<shared::SPSExecutorAddr>);

using SPSRunAsMainSignature = int64_t(shared::SPSExecutorAddr,
                                      shared::SPSSequence<shared::SPSString>);
using SPSRunAsVoidFunctionSignature = int32_t(shared::SPSExecutorAddr);
using SPSRunAsIntFunctionSignature = int32_t(shared::SPSExecutorAddr, int32_t);
} // end namespace rt
} // end namespace orc
} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_ORC_SHARED_ORCRTBRIDGE_H
PKiwFZ[
_S��(ExecutionEngine/Orc/Shared/MemoryFlags.hnu�[���//===-------- MemoryFlags.h - Memory allocation flags -----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Defines types and operations related to memory protection and allocation
// lifetimes.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_ORC_SHARED_MEMORYFLAGS_H
#define LLVM_EXECUTIONENGINE_ORC_SHARED_MEMORYFLAGS_H

#include "llvm/ADT/BitmaskEnum.h"
#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Memory.h"
#include "llvm/Support/raw_ostream.h"

namespace llvm {
namespace orc {

/// Describes Read/Write/Exec permissions for memory.
enum class MemProt {
  None = 0,
  Read = 1U << 0,
  Write = 1U << 1,
  Exec = 1U << 2,
  LLVM_MARK_AS_BITMASK_ENUM(/* LargestValue = */ Exec)
};

/// Print a MemProt as an RWX triple.
inline raw_ostream &operator<<(raw_ostream &OS, MemProt MP) {
  return OS << (((MP & MemProt::Read) != MemProt::None) ? 'R' : '-')
            << (((MP & MemProt::Write) != MemProt::None) ? 'W' : '-')
            << (((MP & MemProt::Exec) != MemProt::None) ? 'X' : '-');
}

/// Convert a MemProt value to a corresponding sys::Memory::ProtectionFlags
/// value.
inline sys::Memory::ProtectionFlags toSysMemoryProtectionFlags(MemProt MP) {
  std::underlying_type_t<sys::Memory::ProtectionFlags> PF = 0;
  if ((MP & MemProt::Read) != MemProt::None)
    PF |= sys::Memory::MF_READ;
  if ((MP & MemProt::Write) != MemProt::None)
    PF |= sys::Memory::MF_WRITE;
  if ((MP & MemProt::Exec) != MemProt::None)
    PF |= sys::Memory::MF_EXEC;
  return static_cast<sys::Memory::ProtectionFlags>(PF);
}

/// Convert a sys::Memory::ProtectionFlags value to a corresponding MemProt
/// value.
inline MemProt fromSysMemoryProtectionFlags(sys::Memory::ProtectionFlags PF) {
  MemProt MP = MemProt::None;
  if (PF & sys::Memory::MF_READ)
    MP |= MemProt::Read;
  if (PF & sys::Memory::MF_WRITE)
    MP |= MemProt::Write;
  if (PF & sys::Memory::MF_EXEC)
    MP |= MemProt::None;
  return MP;
}

/// Describes a memory lifetime policy for memory to be allocated by a
/// JITLinkMemoryManager.
///
/// All memory allocated by a call to JITLinkMemoryManager::allocate should be
/// deallocated if a call is made to
/// JITLinkMemoryManager::InFlightAllocation::abandon. The policies below apply
/// to finalized allocations.
enum class MemLifetimePolicy {
  /// Standard memory should be allocated by the allocator and then deallocated
  /// when the deallocate method is called for the finalized allocation.
  Standard,

  /// Finalize memory should be allocated by the allocator, and then be
  /// overwritten and deallocated after all finalization functions have been
  /// run.
  Finalize,

  /// NoAlloc memory should not be allocated by the JITLinkMemoryManager at
  /// all. It is used for sections that don't need to be transferred to the
  /// executor process, typically metadata sections.
  NoAlloc
};

/// Print a MemDeallocPolicy.
inline raw_ostream &operator<<(raw_ostream &OS, MemLifetimePolicy MLP) {
  switch (MLP) {
  case MemLifetimePolicy::Standard:
    OS << "standard";
    break;
  case MemLifetimePolicy::Finalize:
    OS << "finalize";
    break;
  case MemLifetimePolicy::NoAlloc:
    OS << "noalloc";
    break;
  }
  return OS;
}

/// A pair of memory protections and allocation policies.
///
/// Optimized for use as a small map key.
class AllocGroup {
  friend struct llvm::DenseMapInfo<AllocGroup>;

  using underlying_type = uint8_t;
  static constexpr unsigned BitsForProt = 3;
  static constexpr unsigned BitsForLifetimePolicy = 2;
  static constexpr unsigned MaxIdentifiers =
      1U << (BitsForProt + BitsForLifetimePolicy);

public:
  static constexpr unsigned NumGroups = MaxIdentifiers;

  /// Create a default AllocGroup. No memory protections, standard
  /// lifetime policy.
  AllocGroup() = default;

  /// Create an AllocGroup from a MemProt only -- uses
  /// MemLifetimePolicy::Standard.
  AllocGroup(MemProt MP) : Id(static_cast<underlying_type>(MP)) {}

  /// Create an AllocGroup from a MemProt and a MemLifetimePolicy.
  AllocGroup(MemProt MP, MemLifetimePolicy MLP)
      : Id(static_cast<underlying_type>(MP) |
           (static_cast<underlying_type>(MLP) << BitsForProt)) {}

  /// Returns the MemProt for this group.
  MemProt getMemProt() const {
    return static_cast<MemProt>(Id & ((1U << BitsForProt) - 1));
  }

  /// Returns the MemLifetimePolicy for this group.
  MemLifetimePolicy getMemLifetimePolicy() const {
    return static_cast<MemLifetimePolicy>(Id >> BitsForProt);
  }

  friend bool operator==(const AllocGroup &LHS, const AllocGroup &RHS) {
    return LHS.Id == RHS.Id;
  }

  friend bool operator!=(const AllocGroup &LHS, const AllocGroup &RHS) {
    return !(LHS == RHS);
  }

  friend bool operator<(const AllocGroup &LHS, const AllocGroup &RHS) {
    return LHS.Id < RHS.Id;
  }

private:
  AllocGroup(underlying_type RawId) : Id(RawId) {}
  underlying_type Id = 0;
};

/// A specialized small-map for AllocGroups.
///
/// Iteration order is guaranteed to match key ordering.
template <typename T> class AllocGroupSmallMap {
private:
  using ElemT = std::pair<AllocGroup, T>;
  using VectorTy = SmallVector<ElemT, 4>;

  static bool compareKey(const ElemT &E, const AllocGroup &G) {
    return E.first < G;
  }

public:
  using iterator = typename VectorTy::iterator;

  AllocGroupSmallMap() = default;
  AllocGroupSmallMap(std::initializer_list<std::pair<AllocGroup, T>> Inits)
      : Elems(Inits) {
    llvm::sort(Elems, llvm::less_first());
  }

  iterator begin() { return Elems.begin(); }
  iterator end() { return Elems.end(); }
  iterator find(AllocGroup G) {
    auto I = lower_bound(Elems, G, compareKey);
    return (I->first == G) ? I : end();
  }

  bool empty() const { return Elems.empty(); }
  size_t size() const { return Elems.size(); }

  T &operator[](AllocGroup G) {
    auto I = lower_bound(Elems, G, compareKey);
    if (I == Elems.end() || I->first != G)
      I = Elems.insert(I, std::make_pair(G, T()));
    return I->second;
  }

private:
  VectorTy Elems;
};

/// Print an AllocGroup.
inline raw_ostream &operator<<(raw_ostream &OS, AllocGroup AG) {
  return OS << '(' << AG.getMemProt() << ", " << AG.getMemLifetimePolicy()
            << ')';
}

} // end namespace orc

template <> struct DenseMapInfo<orc::MemProt> {
  static inline orc::MemProt getEmptyKey() { return orc::MemProt(~uint8_t(0)); }
  static inline orc::MemProt getTombstoneKey() {
    return orc::MemProt(~uint8_t(0) - 1);
  }
  static unsigned getHashValue(const orc::MemProt &Val) {
    using UT = std::underlying_type_t<orc::MemProt>;
    return DenseMapInfo<UT>::getHashValue(static_cast<UT>(Val));
  }
  static bool isEqual(const orc::MemProt &LHS, const orc::MemProt &RHS) {
    return LHS == RHS;
  }
};

template <> struct DenseMapInfo<orc::AllocGroup> {
  static inline orc::AllocGroup getEmptyKey() {
    return orc::AllocGroup(~uint8_t(0));
  }
  static inline orc::AllocGroup getTombstoneKey() {
    return orc::AllocGroup(~uint8_t(0) - 1);
  }
  static unsigned getHashValue(const orc::AllocGroup &Val) {
    return DenseMapInfo<orc::AllocGroup::underlying_type>::getHashValue(Val.Id);
  }
  static bool isEqual(const orc::AllocGroup &LHS, const orc::AllocGroup &RHS) {
    return LHS == RHS;
  }
};

} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_ORC_SHARED_MEMORYFLAGS_H
PKiwFZ
K/n/n1ExecutionEngine/Orc/Shared/WrapperFunctionUtils.hnu�[���//===- WrapperFunctionUtils.h - Utilities for wrapper functions -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// A buffer for serialized results.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_ORC_SHARED_WRAPPERFUNCTIONUTILS_H
#define LLVM_EXECUTIONENGINE_ORC_SHARED_WRAPPERFUNCTIONUTILS_H

#include "llvm/ExecutionEngine/Orc/Shared/ExecutorAddress.h"
#include "llvm/ExecutionEngine/Orc/Shared/SimplePackedSerialization.h"
#include "llvm/Support/Error.h"

#include <type_traits>

namespace llvm {
namespace orc {
namespace shared {

// Must be kept in-sync with compiler-rt/lib/orc/c-api.h.
union CWrapperFunctionResultDataUnion {
  char *ValuePtr;
  char Value[sizeof(ValuePtr)];
};

// Must be kept in-sync with compiler-rt/lib/orc/c-api.h.
typedef struct {
  CWrapperFunctionResultDataUnion Data;
  size_t Size;
} CWrapperFunctionResult;

/// C++ wrapper function result: Same as CWrapperFunctionResult but
/// auto-releases memory.
class WrapperFunctionResult {
public:
  /// Create a default WrapperFunctionResult.
  WrapperFunctionResult() { init(R); }

  /// Create a WrapperFunctionResult by taking ownership of a
  /// CWrapperFunctionResult.
  ///
  /// Warning: This should only be used by clients writing wrapper-function
  /// caller utilities (like TargetProcessControl).
  WrapperFunctionResult(CWrapperFunctionResult R) : R(R) {
    // Reset R.
    init(R);
  }

  WrapperFunctionResult(const WrapperFunctionResult &) = delete;
  WrapperFunctionResult &operator=(const WrapperFunctionResult &) = delete;

  WrapperFunctionResult(WrapperFunctionResult &&Other) {
    init(R);
    std::swap(R, Other.R);
  }

  WrapperFunctionResult &operator=(WrapperFunctionResult &&Other) {
    WrapperFunctionResult Tmp(std::move(Other));
    std::swap(R, Tmp.R);
    return *this;
  }

  ~WrapperFunctionResult() {
    if ((R.Size > sizeof(R.Data.Value)) ||
        (R.Size == 0 && R.Data.ValuePtr != nullptr))
      free(R.Data.ValuePtr);
  }

  /// Release ownership of the contained CWrapperFunctionResult.
  /// Warning: Do not use -- this method will be removed in the future. It only
  /// exists to temporarily support some code that will eventually be moved to
  /// the ORC runtime.
  CWrapperFunctionResult release() {
    CWrapperFunctionResult Tmp;
    init(Tmp);
    std::swap(R, Tmp);
    return Tmp;
  }

  /// Get a pointer to the data contained in this instance.
  char *data() {
    assert((R.Size != 0 || R.Data.ValuePtr == nullptr) &&
           "Cannot get data for out-of-band error value");
    return R.Size > sizeof(R.Data.Value) ? R.Data.ValuePtr : R.Data.Value;
  }

  /// Get a const pointer to the data contained in this instance.
  const char *data() const {
    assert((R.Size != 0 || R.Data.ValuePtr == nullptr) &&
           "Cannot get data for out-of-band error value");
    return R.Size > sizeof(R.Data.Value) ? R.Data.ValuePtr : R.Data.Value;
  }

  /// Returns the size of the data contained in this instance.
  size_t size() const {
    assert((R.Size != 0 || R.Data.ValuePtr == nullptr) &&
           "Cannot get data for out-of-band error value");
    return R.Size;
  }

  /// Returns true if this value is equivalent to a default-constructed
  /// WrapperFunctionResult.
  bool empty() const { return R.Size == 0 && R.Data.ValuePtr == nullptr; }

  /// Create a WrapperFunctionResult with the given size and return a pointer
  /// to the underlying memory.
  static WrapperFunctionResult allocate(size_t Size) {
    // Reset.
    WrapperFunctionResult WFR;
    WFR.R.Size = Size;
    if (WFR.R.Size > sizeof(WFR.R.Data.Value))
      WFR.R.Data.ValuePtr = (char *)malloc(WFR.R.Size);
    return WFR;
  }

  /// Copy from the given char range.
  static WrapperFunctionResult copyFrom(const char *Source, size_t Size) {
    auto WFR = allocate(Size);
    memcpy(WFR.data(), Source, Size);
    return WFR;
  }

  /// Copy from the given null-terminated string (includes the null-terminator).
  static WrapperFunctionResult copyFrom(const char *Source) {
    return copyFrom(Source, strlen(Source) + 1);
  }

  /// Copy from the given std::string (includes the null terminator).
  static WrapperFunctionResult copyFrom(const std::string &Source) {
    return copyFrom(Source.c_str());
  }

  /// Create an out-of-band error by copying the given string.
  static WrapperFunctionResult createOutOfBandError(const char *Msg) {
    // Reset.
    WrapperFunctionResult WFR;
    char *Tmp = (char *)malloc(strlen(Msg) + 1);
    strcpy(Tmp, Msg);
    WFR.R.Data.ValuePtr = Tmp;
    return WFR;
  }

  /// Create an out-of-band error by copying the given string.
  static WrapperFunctionResult createOutOfBandError(const std::string &Msg) {
    return createOutOfBandError(Msg.c_str());
  }

  /// If this value is an out-of-band error then this returns the error message,
  /// otherwise returns nullptr.
  const char *getOutOfBandError() const {
    return R.Size == 0 ? R.Data.ValuePtr : nullptr;
  }

private:
  static void init(CWrapperFunctionResult &R) {
    R.Data.ValuePtr = nullptr;
    R.Size = 0;
  }

  CWrapperFunctionResult R;
};

namespace detail {

template <typename SPSArgListT, typename... ArgTs>
WrapperFunctionResult
serializeViaSPSToWrapperFunctionResult(const ArgTs &...Args) {
  auto Result = WrapperFunctionResult::allocate(SPSArgListT::size(Args...));
  SPSOutputBuffer OB(Result.data(), Result.size());
  if (!SPSArgListT::serialize(OB, Args...))
    return WrapperFunctionResult::createOutOfBandError(
        "Error serializing arguments to blob in call");
  return Result;
}

template <typename RetT> class WrapperFunctionHandlerCaller {
public:
  template <typename HandlerT, typename ArgTupleT, std::size_t... I>
  static decltype(auto) call(HandlerT &&H, ArgTupleT &Args,
                             std::index_sequence<I...>) {
    return std::forward<HandlerT>(H)(std::get<I>(Args)...);
  }
};

template <> class WrapperFunctionHandlerCaller<void> {
public:
  template <typename HandlerT, typename ArgTupleT, std::size_t... I>
  static SPSEmpty call(HandlerT &&H, ArgTupleT &Args,
                       std::index_sequence<I...>) {
    std::forward<HandlerT>(H)(std::get<I>(Args)...);
    return SPSEmpty();
  }
};

template <typename WrapperFunctionImplT,
          template <typename> class ResultSerializer, typename... SPSTagTs>
class WrapperFunctionHandlerHelper
    : public WrapperFunctionHandlerHelper<
          decltype(&std::remove_reference_t<WrapperFunctionImplT>::operator()),
          ResultSerializer, SPSTagTs...> {};

template <typename RetT, typename... ArgTs,
          template <typename> class ResultSerializer, typename... SPSTagTs>
class WrapperFunctionHandlerHelper<RetT(ArgTs...), ResultSerializer,
                                   SPSTagTs...> {
public:
  using ArgTuple = std::tuple<std::decay_t<ArgTs>...>;
  using ArgIndices = std::make_index_sequence<std::tuple_size<ArgTuple>::value>;

  template <typename HandlerT>
  static WrapperFunctionResult apply(HandlerT &&H, const char *ArgData,
                                     size_t ArgSize) {
    ArgTuple Args;
    if (!deserialize(ArgData, ArgSize, Args, ArgIndices{}))
      return WrapperFunctionResult::createOutOfBandError(
          "Could not deserialize arguments for wrapper function call");

    auto HandlerResult = WrapperFunctionHandlerCaller<RetT>::call(
        std::forward<HandlerT>(H), Args, ArgIndices{});

    return ResultSerializer<decltype(HandlerResult)>::serialize(
        std::move(HandlerResult));
  }

private:
  template <std::size_t... I>
  static bool deserialize(const char *ArgData, size_t ArgSize, ArgTuple &Args,
                          std::index_sequence<I...>) {
    SPSInputBuffer IB(ArgData, ArgSize);
    return SPSArgList<SPSTagTs...>::deserialize(IB, std::get<I>(Args)...);
  }
};

// Map function pointers to function types.
template <typename RetT, typename... ArgTs,
          template <typename> class ResultSerializer, typename... SPSTagTs>
class WrapperFunctionHandlerHelper<RetT (*)(ArgTs...), ResultSerializer,
                                   SPSTagTs...>
    : public WrapperFunctionHandlerHelper<RetT(ArgTs...), ResultSerializer,
                                          SPSTagTs...> {};

// Map non-const member function types to function types.
template <typename ClassT, typename RetT, typename... ArgTs,
          template <typename> class ResultSerializer, typename... SPSTagTs>
class WrapperFunctionHandlerHelper<RetT (ClassT::*)(ArgTs...), ResultSerializer,
                                   SPSTagTs...>
    : public WrapperFunctionHandlerHelper<RetT(ArgTs...), ResultSerializer,
                                          SPSTagTs...> {};

// Map const member function types to function types.
template <typename ClassT, typename RetT, typename... ArgTs,
          template <typename> class ResultSerializer, typename... SPSTagTs>
class WrapperFunctionHandlerHelper<RetT (ClassT::*)(ArgTs...) const,
                                   ResultSerializer, SPSTagTs...>
    : public WrapperFunctionHandlerHelper<RetT(ArgTs...), ResultSerializer,
                                          SPSTagTs...> {};

template <typename WrapperFunctionImplT,
          template <typename> class ResultSerializer, typename... SPSTagTs>
class WrapperFunctionAsyncHandlerHelper
    : public WrapperFunctionAsyncHandlerHelper<
          decltype(&std::remove_reference_t<WrapperFunctionImplT>::operator()),
          ResultSerializer, SPSTagTs...> {};

template <typename RetT, typename SendResultT, typename... ArgTs,
          template <typename> class ResultSerializer, typename... SPSTagTs>
class WrapperFunctionAsyncHandlerHelper<RetT(SendResultT, ArgTs...),
                                        ResultSerializer, SPSTagTs...> {
public:
  using ArgTuple = std::tuple<std::decay_t<ArgTs>...>;
  using ArgIndices = std::make_index_sequence<std::tuple_size<ArgTuple>::value>;

  template <typename HandlerT, typename SendWrapperFunctionResultT>
  static void applyAsync(HandlerT &&H,
                         SendWrapperFunctionResultT &&SendWrapperFunctionResult,
                         const char *ArgData, size_t ArgSize) {
    ArgTuple Args;
    if (!deserialize(ArgData, ArgSize, Args, ArgIndices{})) {
      SendWrapperFunctionResult(WrapperFunctionResult::createOutOfBandError(
          "Could not deserialize arguments for wrapper function call"));
      return;
    }

    auto SendResult =
        [SendWFR = std::move(SendWrapperFunctionResult)](auto Result) mutable {
          using ResultT = decltype(Result);
          SendWFR(ResultSerializer<ResultT>::serialize(std::move(Result)));
        };

    callAsync(std::forward<HandlerT>(H), std::move(SendResult), std::move(Args),
              ArgIndices{});
  }

private:
  template <std::size_t... I>
  static bool deserialize(const char *ArgData, size_t ArgSize, ArgTuple &Args,
                          std::index_sequence<I...>) {
    SPSInputBuffer IB(ArgData, ArgSize);
    return SPSArgList<SPSTagTs...>::deserialize(IB, std::get<I>(Args)...);
  }

  template <typename HandlerT, typename SerializeAndSendResultT,
            typename ArgTupleT, std::size_t... I>
  static void callAsync(HandlerT &&H,
                        SerializeAndSendResultT &&SerializeAndSendResult,
                        ArgTupleT Args, std::index_sequence<I...>) {
    (void)Args; // Silence a buggy GCC warning.
    return std::forward<HandlerT>(H)(std::move(SerializeAndSendResult),
                                     std::move(std::get<I>(Args))...);
  }
};

// Map function pointers to function types.
template <typename RetT, typename... ArgTs,
          template <typename> class ResultSerializer, typename... SPSTagTs>
class WrapperFunctionAsyncHandlerHelper<RetT (*)(ArgTs...), ResultSerializer,
                                        SPSTagTs...>
    : public WrapperFunctionAsyncHandlerHelper<RetT(ArgTs...), ResultSerializer,
                                               SPSTagTs...> {};

// Map non-const member function types to function types.
template <typename ClassT, typename RetT, typename... ArgTs,
          template <typename> class ResultSerializer, typename... SPSTagTs>
class WrapperFunctionAsyncHandlerHelper<RetT (ClassT::*)(ArgTs...),
                                        ResultSerializer, SPSTagTs...>
    : public WrapperFunctionAsyncHandlerHelper<RetT(ArgTs...), ResultSerializer,
                                               SPSTagTs...> {};

// Map const member function types to function types.
template <typename ClassT, typename RetT, typename... ArgTs,
          template <typename> class ResultSerializer, typename... SPSTagTs>
class WrapperFunctionAsyncHandlerHelper<RetT (ClassT::*)(ArgTs...) const,
                                        ResultSerializer, SPSTagTs...>
    : public WrapperFunctionAsyncHandlerHelper<RetT(ArgTs...), ResultSerializer,
                                               SPSTagTs...> {};

template <typename SPSRetTagT, typename RetT> class ResultSerializer {
public:
  static WrapperFunctionResult serialize(RetT Result) {
    return serializeViaSPSToWrapperFunctionResult<SPSArgList<SPSRetTagT>>(
        Result);
  }
};

template <typename SPSRetTagT> class ResultSerializer<SPSRetTagT, Error> {
public:
  static WrapperFunctionResult serialize(Error Err) {
    return serializeViaSPSToWrapperFunctionResult<SPSArgList<SPSRetTagT>>(
        toSPSSerializable(std::move(Err)));
  }
};

template <typename SPSRetTagT>
class ResultSerializer<SPSRetTagT, ErrorSuccess> {
public:
  static WrapperFunctionResult serialize(ErrorSuccess Err) {
    return serializeViaSPSToWrapperFunctionResult<SPSArgList<SPSRetTagT>>(
        toSPSSerializable(std::move(Err)));
  }
};

template <typename SPSRetTagT, typename T>
class ResultSerializer<SPSRetTagT, Expected<T>> {
public:
  static WrapperFunctionResult serialize(Expected<T> E) {
    return serializeViaSPSToWrapperFunctionResult<SPSArgList<SPSRetTagT>>(
        toSPSSerializable(std::move(E)));
  }
};

template <typename SPSRetTagT, typename RetT> class ResultDeserializer {
public:
  static RetT makeValue() { return RetT(); }
  static void makeSafe(RetT &Result) {}

  static Error deserialize(RetT &Result, const char *ArgData, size_t ArgSize) {
    SPSInputBuffer IB(ArgData, ArgSize);
    if (!SPSArgList<SPSRetTagT>::deserialize(IB, Result))
      return make_error<StringError>(
          "Error deserializing return value from blob in call",
          inconvertibleErrorCode());
    return Error::success();
  }
};

template <> class ResultDeserializer<SPSError, Error> {
public:
  static Error makeValue() { return Error::success(); }
  static void makeSafe(Error &Err) { cantFail(std::move(Err)); }

  static Error deserialize(Error &Err, const char *ArgData, size_t ArgSize) {
    SPSInputBuffer IB(ArgData, ArgSize);
    SPSSerializableError BSE;
    if (!SPSArgList<SPSError>::deserialize(IB, BSE))
      return make_error<StringError>(
          "Error deserializing return value from blob in call",
          inconvertibleErrorCode());
    Err = fromSPSSerializable(std::move(BSE));
    return Error::success();
  }
};

template <typename SPSTagT, typename T>
class ResultDeserializer<SPSExpected<SPSTagT>, Expected<T>> {
public:
  static Expected<T> makeValue() { return T(); }
  static void makeSafe(Expected<T> &E) { cantFail(E.takeError()); }

  static Error deserialize(Expected<T> &E, const char *ArgData,
                           size_t ArgSize) {
    SPSInputBuffer IB(ArgData, ArgSize);
    SPSSerializableExpected<T> BSE;
    if (!SPSArgList<SPSExpected<SPSTagT>>::deserialize(IB, BSE))
      return make_error<StringError>(
          "Error deserializing return value from blob in call",
          inconvertibleErrorCode());
    E = fromSPSSerializable(std::move(BSE));
    return Error::success();
  }
};

template <typename SPSRetTagT, typename RetT> class AsyncCallResultHelper {
  // Did you forget to use Error / Expected in your handler?
};

} // end namespace detail

template <typename SPSSignature> class WrapperFunction;

template <typename SPSRetTagT, typename... SPSTagTs>
class WrapperFunction<SPSRetTagT(SPSTagTs...)> {
private:
  template <typename RetT>
  using ResultSerializer = detail::ResultSerializer<SPSRetTagT, RetT>;

public:
  /// Call a wrapper function. Caller should be callable as
  /// WrapperFunctionResult Fn(const char *ArgData, size_t ArgSize);
  template <typename CallerFn, typename RetT, typename... ArgTs>
  static Error call(const CallerFn &Caller, RetT &Result,
                    const ArgTs &...Args) {

    // RetT might be an Error or Expected value. Set the checked flag now:
    // we don't want the user to have to check the unused result if this
    // operation fails.
    detail::ResultDeserializer<SPSRetTagT, RetT>::makeSafe(Result);

    auto ArgBuffer =
        detail::serializeViaSPSToWrapperFunctionResult<SPSArgList<SPSTagTs...>>(
            Args...);
    if (const char *ErrMsg = ArgBuffer.getOutOfBandError())
      return make_error<StringError>(ErrMsg, inconvertibleErrorCode());

    WrapperFunctionResult ResultBuffer =
        Caller(ArgBuffer.data(), ArgBuffer.size());
    if (auto ErrMsg = ResultBuffer.getOutOfBandError())
      return make_error<StringError>(ErrMsg, inconvertibleErrorCode());

    return detail::ResultDeserializer<SPSRetTagT, RetT>::deserialize(
        Result, ResultBuffer.data(), ResultBuffer.size());
  }

  /// Call an async wrapper function.
  /// Caller should be callable as
  /// void Fn(unique_function<void(WrapperFunctionResult)> SendResult,
  ///         WrapperFunctionResult ArgBuffer);
  template <typename AsyncCallerFn, typename SendDeserializedResultFn,
            typename... ArgTs>
  static void callAsync(AsyncCallerFn &&Caller,
                        SendDeserializedResultFn &&SendDeserializedResult,
                        const ArgTs &...Args) {
    using RetT = typename std::tuple_element<
        1, typename detail::WrapperFunctionHandlerHelper<
               std::remove_reference_t<SendDeserializedResultFn>,
               ResultSerializer, SPSRetTagT>::ArgTuple>::type;

    auto ArgBuffer =
        detail::serializeViaSPSToWrapperFunctionResult<SPSArgList<SPSTagTs...>>(
            Args...);
    if (auto *ErrMsg = ArgBuffer.getOutOfBandError()) {
      SendDeserializedResult(
          make_error<StringError>(ErrMsg, inconvertibleErrorCode()),
          detail::ResultDeserializer<SPSRetTagT, RetT>::makeValue());
      return;
    }

    auto SendSerializedResult = [SDR = std::move(SendDeserializedResult)](
                                    WrapperFunctionResult R) mutable {
      RetT RetVal = detail::ResultDeserializer<SPSRetTagT, RetT>::makeValue();
      detail::ResultDeserializer<SPSRetTagT, RetT>::makeSafe(RetVal);

      if (auto *ErrMsg = R.getOutOfBandError()) {
        SDR(make_error<StringError>(ErrMsg, inconvertibleErrorCode()),
            std::move(RetVal));
        return;
      }

      SPSInputBuffer IB(R.data(), R.size());
      if (auto Err = detail::ResultDeserializer<SPSRetTagT, RetT>::deserialize(
              RetVal, R.data(), R.size()))
        SDR(std::move(Err), std::move(RetVal));

      SDR(Error::success(), std::move(RetVal));
    };

    Caller(std::move(SendSerializedResult), ArgBuffer.data(), ArgBuffer.size());
  }

  /// Handle a call to a wrapper function.
  template <typename HandlerT>
  static WrapperFunctionResult handle(const char *ArgData, size_t ArgSize,
                                      HandlerT &&Handler) {
    using WFHH =
        detail::WrapperFunctionHandlerHelper<std::remove_reference_t<HandlerT>,
                                             ResultSerializer, SPSTagTs...>;
    return WFHH::apply(std::forward<HandlerT>(Handler), ArgData, ArgSize);
  }

  /// Handle a call to an async wrapper function.
  template <typename HandlerT, typename SendResultT>
  static void handleAsync(const char *ArgData, size_t ArgSize,
                          HandlerT &&Handler, SendResultT &&SendResult) {
    using WFAHH = detail::WrapperFunctionAsyncHandlerHelper<
        std::remove_reference_t<HandlerT>, ResultSerializer, SPSTagTs...>;
    WFAHH::applyAsync(std::forward<HandlerT>(Handler),
                      std::forward<SendResultT>(SendResult), ArgData, ArgSize);
  }

private:
  template <typename T> static const T &makeSerializable(const T &Value) {
    return Value;
  }

  static detail::SPSSerializableError makeSerializable(Error Err) {
    return detail::toSPSSerializable(std::move(Err));
  }

  template <typename T>
  static detail::SPSSerializableExpected<T> makeSerializable(Expected<T> E) {
    return detail::toSPSSerializable(std::move(E));
  }
};

template <typename... SPSTagTs>
class WrapperFunction<void(SPSTagTs...)>
    : private WrapperFunction<SPSEmpty(SPSTagTs...)> {

public:
  template <typename CallerFn, typename... ArgTs>
  static Error call(const CallerFn &Caller, const ArgTs &...Args) {
    SPSEmpty BE;
    return WrapperFunction<SPSEmpty(SPSTagTs...)>::call(Caller, BE, Args...);
  }

  template <typename AsyncCallerFn, typename SendDeserializedResultFn,
            typename... ArgTs>
  static void callAsync(AsyncCallerFn &&Caller,
                        SendDeserializedResultFn &&SendDeserializedResult,
                        const ArgTs &...Args) {
    WrapperFunction<SPSEmpty(SPSTagTs...)>::callAsync(
        std::forward<AsyncCallerFn>(Caller),
        [SDR = std::move(SendDeserializedResult)](Error SerializeErr,
                                                  SPSEmpty E) mutable {
          SDR(std::move(SerializeErr));
        },
        Args...);
  }

  using WrapperFunction<SPSEmpty(SPSTagTs...)>::handle;
  using WrapperFunction<SPSEmpty(SPSTagTs...)>::handleAsync;
};

/// A function object that takes an ExecutorAddr as its first argument,
/// casts that address to a ClassT*, then calls the given method on that
/// pointer passing in the remaining function arguments. This utility
/// removes some of the boilerplate from writing wrappers for method calls.
///
///   @code{.cpp}
///   class MyClass {
///   public:
///     void myMethod(uint32_t, bool) { ... }
///   };
///
///   // SPS Method signature -- note MyClass object address as first argument.
///   using SPSMyMethodWrapperSignature =
///     SPSTuple<SPSExecutorAddr, uint32_t, bool>;
///
///   WrapperFunctionResult
///   myMethodCallWrapper(const char *ArgData, size_t ArgSize) {
///     return WrapperFunction<SPSMyMethodWrapperSignature>::handle(
///        ArgData, ArgSize, makeMethodWrapperHandler(&MyClass::myMethod));
///   }
///   @endcode
///
template <typename RetT, typename ClassT, typename... ArgTs>
class MethodWrapperHandler {
public:
  using MethodT = RetT (ClassT::*)(ArgTs...);
  MethodWrapperHandler(MethodT M) : M(M) {}
  RetT operator()(ExecutorAddr ObjAddr, ArgTs &...Args) {
    return (ObjAddr.toPtr<ClassT*>()->*M)(std::forward<ArgTs>(Args)...);
  }

private:
  MethodT M;
};

/// Create a MethodWrapperHandler object from the given method pointer.
template <typename RetT, typename ClassT, typename... ArgTs>
MethodWrapperHandler<RetT, ClassT, ArgTs...>
makeMethodWrapperHandler(RetT (ClassT::*Method)(ArgTs...)) {
  return MethodWrapperHandler<RetT, ClassT, ArgTs...>(Method);
}

/// Represents a serialized wrapper function call.
/// Serializing calls themselves allows us to batch them: We can make one
/// "run-wrapper-functions" utility and send it a list of calls to run.
///
/// The motivating use-case for this API is JITLink allocation actions, where
/// we want to run multiple functions to finalize linked memory without having
/// to make separate IPC calls for each one.
class WrapperFunctionCall {
public:
  using ArgDataBufferType = SmallVector<char, 24>;

  /// Create a WrapperFunctionCall using the given SPS serializer to serialize
  /// the arguments.
  template <typename SPSSerializer, typename... ArgTs>
  static Expected<WrapperFunctionCall> Create(ExecutorAddr FnAddr,
                                              const ArgTs &...Args) {
    ArgDataBufferType ArgData;
    ArgData.resize(SPSSerializer::size(Args...));
    SPSOutputBuffer OB(ArgData.empty() ? nullptr : ArgData.data(),
                       ArgData.size());
    if (SPSSerializer::serialize(OB, Args...))
      return WrapperFunctionCall(FnAddr, std::move(ArgData));
    return make_error<StringError>("Cannot serialize arguments for "
                                   "AllocActionCall",
                                   inconvertibleErrorCode());
  }

  WrapperFunctionCall() = default;

  /// Create a WrapperFunctionCall from a target function and arg buffer.
  WrapperFunctionCall(ExecutorAddr FnAddr, ArgDataBufferType ArgData)
      : FnAddr(FnAddr), ArgData(std::move(ArgData)) {}

  /// Returns the address to be called.
  const ExecutorAddr &getCallee() const { return FnAddr; }

  /// Returns the argument data.
  const ArgDataBufferType &getArgData() const { return ArgData; }

  /// WrapperFunctionCalls convert to true if the callee is non-null.
  explicit operator bool() const { return !!FnAddr; }

  /// Run call returning raw WrapperFunctionResult.
  shared::WrapperFunctionResult run() const {
    using FnTy =
        shared::CWrapperFunctionResult(const char *ArgData, size_t ArgSize);
    return shared::WrapperFunctionResult(
        FnAddr.toPtr<FnTy *>()(ArgData.data(), ArgData.size()));
  }

  /// Run call and deserialize result using SPS.
  template <typename SPSRetT, typename RetT>
  std::enable_if_t<!std::is_same<SPSRetT, void>::value, Error>
  runWithSPSRet(RetT &RetVal) const {
    auto WFR = run();
    if (const char *ErrMsg = WFR.getOutOfBandError())
      return make_error<StringError>(ErrMsg, inconvertibleErrorCode());
    shared::SPSInputBuffer IB(WFR.data(), WFR.size());
    if (!shared::SPSSerializationTraits<SPSRetT, RetT>::deserialize(IB, RetVal))
      return make_error<StringError>("Could not deserialize result from "
                                     "serialized wrapper function call",
                                     inconvertibleErrorCode());
    return Error::success();
  }

  /// Overload for SPS functions returning void.
  template <typename SPSRetT>
  std::enable_if_t<std::is_same<SPSRetT, void>::value, Error>
  runWithSPSRet() const {
    shared::SPSEmpty E;
    return runWithSPSRet<shared::SPSEmpty>(E);
  }

  /// Run call and deserialize an SPSError result. SPSError returns and
  /// deserialization failures are merged into the returned error.
  Error runWithSPSRetErrorMerged() const {
    detail::SPSSerializableError RetErr;
    if (auto Err = runWithSPSRet<SPSError>(RetErr))
      return Err;
    return detail::fromSPSSerializable(std::move(RetErr));
  }

private:
  orc::ExecutorAddr FnAddr;
  ArgDataBufferType ArgData;
};

using SPSWrapperFunctionCall = SPSTuple<SPSExecutorAddr, SPSSequence<char>>;

template <>
class SPSSerializationTraits<SPSWrapperFunctionCall, WrapperFunctionCall> {
public:
  static size_t size(const WrapperFunctionCall &WFC) {
    return SPSWrapperFunctionCall::AsArgList::size(WFC.getCallee(),
                                                   WFC.getArgData());
  }

  static bool serialize(SPSOutputBuffer &OB, const WrapperFunctionCall &WFC) {
    return SPSWrapperFunctionCall::AsArgList::serialize(OB, WFC.getCallee(),
                                                        WFC.getArgData());
  }

  static bool deserialize(SPSInputBuffer &IB, WrapperFunctionCall &WFC) {
    ExecutorAddr FnAddr;
    WrapperFunctionCall::ArgDataBufferType ArgData;
    if (!SPSWrapperFunctionCall::AsArgList::deserialize(IB, FnAddr, ArgData))
      return false;
    WFC = WrapperFunctionCall(FnAddr, std::move(ArgData));
    return true;
  }
};

} // end namespace shared
} // end namespace orc
} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_ORC_SHARED_WRAPPERFUNCTIONUTILS_H
PKiwFZ���j��ExecutionEngine/Orc/Layer.hnu�[���//===---------------- Layer.h -- Layer interfaces --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Layer interfaces.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_ORC_LAYER_H
#define LLVM_EXECUTIONENGINE_ORC_LAYER_H

#include "llvm/ExecutionEngine/Orc/Core.h"
#include "llvm/ExecutionEngine/Orc/Mangling.h"
#include "llvm/ExecutionEngine/Orc/ThreadSafeModule.h"
#include "llvm/IR/Module.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ExtensibleRTTI.h"
#include "llvm/Support/MemoryBuffer.h"

namespace llvm {
namespace orc {

/// IRMaterializationUnit is a convenient base class for MaterializationUnits
/// wrapping LLVM IR. Represents materialization responsibility for all symbols
/// in the given module. If symbols are overridden by other definitions, then
/// their linkage is changed to available-externally.
class IRMaterializationUnit : public MaterializationUnit {
public:
  using SymbolNameToDefinitionMap = std::map<SymbolStringPtr, GlobalValue *>;

  /// Create an IRMaterializationLayer. Scans the module to build the
  /// SymbolFlags and SymbolToDefinition maps.
  IRMaterializationUnit(ExecutionSession &ES,
                        const IRSymbolMapper::ManglingOptions &MO,
                        ThreadSafeModule TSM);

  /// Create an IRMaterializationLayer from a module, and pre-existing
  /// SymbolFlags and SymbolToDefinition maps. The maps must provide
  /// entries for each definition in M.
  /// This constructor is useful for delegating work from one
  /// IRMaterializationUnit to another.
  IRMaterializationUnit(ThreadSafeModule TSM, Interface I,
                        SymbolNameToDefinitionMap SymbolToDefinition);

  /// Return the ModuleIdentifier as the name for this MaterializationUnit.
  StringRef getName() const override;

  /// Return a reference to the contained ThreadSafeModule.
  const ThreadSafeModule &getModule() const { return TSM; }

protected:
  ThreadSafeModule TSM;
  SymbolNameToDefinitionMap SymbolToDefinition;

private:
  static SymbolStringPtr getInitSymbol(ExecutionSession &ES,
                                       const ThreadSafeModule &TSM);

  void discard(const JITDylib &JD, const SymbolStringPtr &Name) override;
};

/// Interface for layers that accept LLVM IR.
class IRLayer {
public:
  IRLayer(ExecutionSession &ES, const IRSymbolMapper::ManglingOptions *&MO)
      : ES(ES), MO(MO) {}

  virtual ~IRLayer();

  /// Returns the ExecutionSession for this layer.
  ExecutionSession &getExecutionSession() { return ES; }

  /// Get the mangling options for this layer.
  const IRSymbolMapper::ManglingOptions *&getManglingOptions() const {
    return MO;
  }

  /// Sets the CloneToNewContextOnEmit flag (false by default).
  ///
  /// When set, IR modules added to this layer will be cloned on to a new
  /// context before emit is called. This can be used by clients who want
  /// to load all IR using one LLVMContext (to save memory via type and
  /// constant uniquing), but want to move Modules to fresh contexts before
  /// compiling them to enable concurrent compilation.
  /// Single threaded clients, or clients who load every module on a new
  /// context, need not set this.
  void setCloneToNewContextOnEmit(bool CloneToNewContextOnEmit) {
    this->CloneToNewContextOnEmit = CloneToNewContextOnEmit;
  }

  /// Returns the current value of the CloneToNewContextOnEmit flag.
  bool getCloneToNewContextOnEmit() const { return CloneToNewContextOnEmit; }

  /// Add a MaterializatinoUnit representing the given IR to the JITDylib
  /// targeted by the given tracker.
  virtual Error add(ResourceTrackerSP RT, ThreadSafeModule TSM);

  /// Adds a MaterializationUnit representing the given IR to the given
  /// JITDylib. If RT is not specif
  Error add(JITDylib &JD, ThreadSafeModule TSM) {
    return add(JD.getDefaultResourceTracker(), std::move(TSM));
  }

  /// Emit should materialize the given IR.
  virtual void emit(std::unique_ptr<MaterializationResponsibility> R,
                    ThreadSafeModule TSM) = 0;

private:
  bool CloneToNewContextOnEmit = false;
  ExecutionSession &ES;
  const IRSymbolMapper::ManglingOptions *&MO;
};

/// MaterializationUnit that materializes modules by calling the 'emit' method
/// on the given IRLayer.
class BasicIRLayerMaterializationUnit : public IRMaterializationUnit {
public:
  BasicIRLayerMaterializationUnit(IRLayer &L,
                                  const IRSymbolMapper::ManglingOptions &MO,
                                  ThreadSafeModule TSM);

private:
  void materialize(std::unique_ptr<MaterializationResponsibility> R) override;

  IRLayer &L;
};

/// Interface for Layers that accept object files.
class ObjectLayer : public RTTIExtends<ObjectLayer, RTTIRoot> {
public:
  static char ID;

  ObjectLayer(ExecutionSession &ES);
  virtual ~ObjectLayer();

  /// Returns the execution session for this layer.
  ExecutionSession &getExecutionSession() { return ES; }

  /// Adds a MaterializationUnit for the object file in the given memory buffer
  /// to the JITDylib for the given ResourceTracker.
  virtual Error add(ResourceTrackerSP RT, std::unique_ptr<MemoryBuffer> O,
                    MaterializationUnit::Interface I);

  /// Adds a MaterializationUnit for the object file in the given memory buffer
  /// to the JITDylib for the given ResourceTracker. The interface for the
  /// object will be built using the default object interface builder.
  Error add(ResourceTrackerSP RT, std::unique_ptr<MemoryBuffer> O);

  /// Adds a MaterializationUnit for the object file in the given memory buffer
  /// to the given JITDylib.
  Error add(JITDylib &JD, std::unique_ptr<MemoryBuffer> O,
            MaterializationUnit::Interface I) {
    return add(JD.getDefaultResourceTracker(), std::move(O), std::move(I));
  }

  /// Adds a MaterializationUnit for the object file in the given memory buffer
  /// to the given JITDylib. The interface for the object will be built using
  /// the default object interface builder.
  Error add(JITDylib &JD, std::unique_ptr<MemoryBuffer> O);

  /// Emit should materialize the given IR.
  virtual void emit(std::unique_ptr<MaterializationResponsibility> R,
                    std::unique_ptr<MemoryBuffer> O) = 0;

private:
  ExecutionSession &ES;
};

/// Materializes the given object file (represented by a MemoryBuffer
/// instance) by calling 'emit' on the given ObjectLayer.
class BasicObjectLayerMaterializationUnit : public MaterializationUnit {
public:
  /// Create using the default object interface builder function.
  static Expected<std::unique_ptr<BasicObjectLayerMaterializationUnit>>
  Create(ObjectLayer &L, std::unique_ptr<MemoryBuffer> O);

  BasicObjectLayerMaterializationUnit(ObjectLayer &L,
                                      std::unique_ptr<MemoryBuffer> O,
                                      Interface I);

  /// Return the buffer's identifier as the name for this MaterializationUnit.
  StringRef getName() const override;

private:
  void materialize(std::unique_ptr<MaterializationResponsibility> R) override;
  void discard(const JITDylib &JD, const SymbolStringPtr &Name) override;

  ObjectLayer &L;
  std::unique_ptr<MemoryBuffer> O;
};

} // End namespace orc
} // End namespace llvm

#endif // LLVM_EXECUTIONENGINE_ORC_LAYER_H
PKiwFZC��)n"n""ExecutionEngine/Orc/COFFPlatform.hnu�[���//===--- COFFPlatform.h -- Utilities for executing COFF in Orc --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Utilities for executing JIT'd COFF in Orc.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_ORC_COFFPLATFORM_H
#define LLVM_EXECUTIONENGINE_ORC_COFFPLATFORM_H

#include "llvm/ADT/StringRef.h"
#include "llvm/ExecutionEngine/Orc/COFFVCRuntimeSupport.h"
#include "llvm/ExecutionEngine/Orc/Core.h"
#include "llvm/ExecutionEngine/Orc/ExecutionUtils.h"
#include "llvm/ExecutionEngine/Orc/ExecutorProcessControl.h"
#include "llvm/ExecutionEngine/Orc/ObjectLinkingLayer.h"
#include "llvm/ExecutionEngine/Orc/Shared/ExecutorAddress.h"

#include <future>
#include <memory>
#include <thread>
#include <vector>

namespace llvm {
namespace orc {

/// Mediates between COFF initialization and ExecutionSession state.
class COFFPlatform : public Platform {
public:
  /// A function that will be called with the name of dll file that must be
  /// loaded.
  using LoadDynamicLibrary =
      unique_function<Error(JITDylib &JD, StringRef DLLFileName)>;

  /// Try to create a COFFPlatform instance, adding the ORC runtime to the
  /// given JITDylib.
  static Expected<std::unique_ptr<COFFPlatform>>
  Create(ExecutionSession &ES, ObjectLinkingLayer &ObjLinkingLayer,
         JITDylib &PlatformJD,
         std::unique_ptr<MemoryBuffer> OrcRuntimeArchiveBuffer,
         LoadDynamicLibrary LoadDynLibrary, bool StaticVCRuntime = false,
         const char *VCRuntimePath = nullptr,
         std::optional<SymbolAliasMap> RuntimeAliases = std::nullopt);

  static Expected<std::unique_ptr<COFFPlatform>>
  Create(ExecutionSession &ES, ObjectLinkingLayer &ObjLinkingLayer,
         JITDylib &PlatformJD, const char *OrcRuntimePath,
         LoadDynamicLibrary LoadDynLibrary, bool StaticVCRuntime = false,
         const char *VCRuntimePath = nullptr,
         std::optional<SymbolAliasMap> RuntimeAliases = std::nullopt);

  ExecutionSession &getExecutionSession() const { return ES; }
  ObjectLinkingLayer &getObjectLinkingLayer() const { return ObjLinkingLayer; }

  Error setupJITDylib(JITDylib &JD) override;
  Error teardownJITDylib(JITDylib &JD) override;
  Error notifyAdding(ResourceTracker &RT,
                     const MaterializationUnit &MU) override;
  Error notifyRemoving(ResourceTracker &RT) override;

  /// Returns an AliasMap containing the default aliases for the COFFPlatform.
  /// This can be modified by clients when constructing the platform to add
  /// or remove aliases.
  static SymbolAliasMap standardPlatformAliases(ExecutionSession &ES);

  /// Returns the array of required CXX aliases.
  static ArrayRef<std::pair<const char *, const char *>> requiredCXXAliases();

  /// Returns the array of standard runtime utility aliases for COFF.
  static ArrayRef<std::pair<const char *, const char *>>
  standardRuntimeUtilityAliases();

  static StringRef getSEHFrameSectionName() { return ".pdata"; }

private:
  using COFFJITDylibDepInfo = std::vector<ExecutorAddr>;
  using COFFJITDylibDepInfoMap =
      std::vector<std::pair<ExecutorAddr, COFFJITDylibDepInfo>>;
  using COFFObjectSectionsMap =
      SmallVector<std::pair<std::string, ExecutorAddrRange>>;
  using PushInitializersSendResultFn =
      unique_function<void(Expected<COFFJITDylibDepInfoMap>)>;
  using SendSymbolAddressFn = unique_function<void(Expected<ExecutorAddr>)>;
  using JITDylibDepMap = DenseMap<JITDylib *, SmallVector<JITDylib *>>;

  // The COFFPlatformPlugin scans/modifies LinkGraphs to support COFF
  // platform features including initializers, exceptions, and language
  // runtime registration.
  class COFFPlatformPlugin : public ObjectLinkingLayer::Plugin {
  public:
    COFFPlatformPlugin(COFFPlatform &CP) : CP(CP) {}

    void modifyPassConfig(MaterializationResponsibility &MR,
                          jitlink::LinkGraph &G,
                          jitlink::PassConfiguration &Config) override;

    SyntheticSymbolDependenciesMap
    getSyntheticSymbolDependencies(MaterializationResponsibility &MR) override;

    // FIXME: We should be tentatively tracking scraped sections and discarding
    // if the MR fails.
    Error notifyFailed(MaterializationResponsibility &MR) override {
      return Error::success();
    }

    Error notifyRemovingResources(JITDylib &JD, ResourceKey K) override {
      return Error::success();
    }

    void notifyTransferringResources(JITDylib &JD, ResourceKey DstKey,
                                     ResourceKey SrcKey) override {}

  private:
    using InitSymbolDepMap =
        DenseMap<MaterializationResponsibility *, JITLinkSymbolSet>;

    Error associateJITDylibHeaderSymbol(jitlink::LinkGraph &G,
                                        MaterializationResponsibility &MR,
                                        bool Bootstrap);

    Error preserveInitializerSections(jitlink::LinkGraph &G,
                                      MaterializationResponsibility &MR);
    Error registerObjectPlatformSections(jitlink::LinkGraph &G, JITDylib &JD);
    Error registerObjectPlatformSectionsInBootstrap(jitlink::LinkGraph &G,
                                                    JITDylib &JD);

    std::mutex PluginMutex;
    COFFPlatform &CP;
    InitSymbolDepMap InitSymbolDeps;
  };

  struct JDBootstrapState {
    JITDylib *JD = nullptr;
    std::string JDName;
    ExecutorAddr HeaderAddr;
    std::list<COFFObjectSectionsMap> ObjectSectionsMaps;
    SmallVector<std::pair<std::string, ExecutorAddr>> Initializers;
  };

  static bool supportedTarget(const Triple &TT);

  COFFPlatform(
      ExecutionSession &ES, ObjectLinkingLayer &ObjLinkingLayer,
      JITDylib &PlatformJD,
      std::unique_ptr<StaticLibraryDefinitionGenerator> OrcRuntimeGenerator,
      std::unique_ptr<MemoryBuffer> OrcRuntimeArchiveBuffer,
      std::unique_ptr<object::Archive> OrcRuntimeArchive,
      LoadDynamicLibrary LoadDynLibrary, bool StaticVCRuntime,
      const char *VCRuntimePath, Error &Err);

  // Associate COFFPlatform JIT-side runtime support functions with handlers.
  Error associateRuntimeSupportFunctions(JITDylib &PlatformJD);

  // Records the addresses of runtime symbols used by the platform.
  Error bootstrapCOFFRuntime(JITDylib &PlatformJD);

  // Run a specific void function if it exists.
  Error runSymbolIfExists(JITDylib &PlatformJD, StringRef SymbolName);

  // Run collected initializers in boostrap stage.
  Error runBootstrapInitializers(JDBootstrapState &BState);
  Error runBootstrapSubsectionInitializers(JDBootstrapState &BState,
                                           StringRef Start, StringRef End);

  // Build dependency graph of a JITDylib
  Expected<JITDylibDepMap> buildJDDepMap(JITDylib &JD);

  Expected<MemoryBufferRef> getPerJDObjectFile();

  // Implements rt_pushInitializers by making repeat async lookups for
  // initializer symbols (each lookup may spawn more initializer symbols if
  // it pulls in new materializers, e.g. from objects in a static library).
  void pushInitializersLoop(PushInitializersSendResultFn SendResult,
                            JITDylibSP JD, JITDylibDepMap &JDDepMap);

  void rt_pushInitializers(PushInitializersSendResultFn SendResult,
                           ExecutorAddr JDHeaderAddr);

  void rt_lookupSymbol(SendSymbolAddressFn SendResult, ExecutorAddr Handle,
                       StringRef SymbolName);

  ExecutionSession &ES;
  ObjectLinkingLayer &ObjLinkingLayer;

  LoadDynamicLibrary LoadDynLibrary;
  std::unique_ptr<COFFVCRuntimeBootstrapper> VCRuntimeBootstrap;
  std::unique_ptr<MemoryBuffer> OrcRuntimeArchiveBuffer;
  std::unique_ptr<object::Archive> OrcRuntimeArchive;
  bool StaticVCRuntime;

  SymbolStringPtr COFFHeaderStartSymbol;

  // State of bootstrap in progress
  std::map<JITDylib *, JDBootstrapState> JDBootstrapStates;
  std::atomic<bool> Bootstrapping;

  ExecutorAddr orc_rt_coff_platform_bootstrap;
  ExecutorAddr orc_rt_coff_platform_shutdown;
  ExecutorAddr orc_rt_coff_register_object_sections;
  ExecutorAddr orc_rt_coff_deregister_object_sections;
  ExecutorAddr orc_rt_coff_register_jitdylib;
  ExecutorAddr orc_rt_coff_deregister_jitdylib;

  DenseMap<JITDylib *, ExecutorAddr> JITDylibToHeaderAddr;
  DenseMap<ExecutorAddr, JITDylib *> HeaderAddrToJITDylib;

  DenseMap<JITDylib *, SymbolLookupSet> RegisteredInitSymbols;

  std::set<std::string> DylibsToPreload;

  std::mutex PlatformMutex;
};

} // end namespace orc
} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_ORC_COFFPLATFORM_H
PKiwFZVLoV�	�	-ExecutionEngine/Orc/EPCDebugObjectRegistrar.hnu�[���//===- EPCDebugObjectRegistrar.h - EPC-based debug registration -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// ExecutorProcessControl based registration of debug objects.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_ORC_EPCDEBUGOBJECTREGISTRAR_H
#define LLVM_EXECUTIONENGINE_ORC_EPCDEBUGOBJECTREGISTRAR_H

#include "llvm/ExecutionEngine/JITSymbol.h"
#include "llvm/ExecutionEngine/Orc/Shared/ExecutorAddress.h"
#include "llvm/ExecutionEngine/Orc/Shared/WrapperFunctionUtils.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/Memory.h"

#include <cstdint>
#include <memory>
#include <vector>

namespace llvm {
namespace orc {

class ExecutionSession;

/// Abstract interface for registering debug objects in the executor process.
class DebugObjectRegistrar {
public:
  virtual Error registerDebugObject(ExecutorAddrRange TargetMem,
                                    bool AutoRegisterCode) = 0;
  virtual ~DebugObjectRegistrar() = default;
};

/// Use ExecutorProcessControl to register debug objects locally or in a remote
/// executor process.
class EPCDebugObjectRegistrar : public DebugObjectRegistrar {
public:
  EPCDebugObjectRegistrar(ExecutionSession &ES, ExecutorAddr RegisterFn)
      : ES(ES), RegisterFn(RegisterFn) {}

  Error registerDebugObject(ExecutorAddrRange TargetMem,
                            bool AutoRegisterCode) override;

private:
  ExecutionSession &ES;
  ExecutorAddr RegisterFn;
};

/// Create a ExecutorProcessControl-based DebugObjectRegistrar that emits debug
/// objects to the GDB JIT interface. This will use the EPC's lookupSymbols
/// method to find the registration/deregistration  function addresses by name.
///
/// If RegistrationFunctionsDylib is non-None then it will be searched to find
/// the registration functions. If it is None then the process dylib will be
/// loaded to find the registration functions.
Expected<std::unique_ptr<EPCDebugObjectRegistrar>> createJITLoaderGDBRegistrar(
    ExecutionSession &ES,
    std::optional<ExecutorAddr> RegistrationFunctionDylib = std::nullopt);

} // end namespace orc
} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_ORC_EPCDEBUGOBJECTREGISTRAR_H
PKiwFZ���"..*ExecutionEngine/Orc/ObjectTransformLayer.hnu�[���//===- ObjectTransformLayer.h - Run all objects through functor -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Run all objects passed in through a user supplied functor.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_ORC_OBJECTTRANSFORMLAYER_H
#define LLVM_EXECUTIONENGINE_ORC_OBJECTTRANSFORMLAYER_H

#include "llvm/ExecutionEngine/JITSymbol.h"
#include "llvm/ExecutionEngine/Orc/Layer.h"
#include <algorithm>
#include <memory>

namespace llvm {
namespace orc {

class ObjectTransformLayer
    : public RTTIExtends<ObjectTransformLayer, ObjectLayer> {
public:
  static char ID;

  using TransformFunction =
      std::function<Expected<std::unique_ptr<MemoryBuffer>>(
          std::unique_ptr<MemoryBuffer>)>;

  ObjectTransformLayer(ExecutionSession &ES, ObjectLayer &BaseLayer,
                       TransformFunction Transform = TransformFunction());

  void emit(std::unique_ptr<MaterializationResponsibility> R,
            std::unique_ptr<MemoryBuffer> O) override;

  void setTransform(TransformFunction Transform) {
    this->Transform = std::move(Transform);
  }

private:
  ObjectLayer &BaseLayer;
  TransformFunction Transform;
};

} // end namespace orc
} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_ORC_OBJECTTRANSFORMLAYER_H
PKiwFZ��~j��"ExecutionEngine/Orc/CompileUtils.hnu�[���//===- CompileUtils.h - Utilities for compiling IR in the JIT ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Contains utilities for compiling IR to object files.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_ORC_COMPILEUTILS_H
#define LLVM_EXECUTIONENGINE_ORC_COMPILEUTILS_H

#include "llvm/ExecutionEngine/Orc/IRCompileLayer.h"
#include "llvm/ExecutionEngine/Orc/JITTargetMachineBuilder.h"
#include "llvm/ExecutionEngine/Orc/Layer.h"
#include <memory>

namespace llvm {

class MemoryBuffer;
class Module;
class ObjectCache;
class TargetMachine;

namespace orc {

IRSymbolMapper::ManglingOptions
irManglingOptionsFromTargetOptions(const TargetOptions &Opts);

/// Simple compile functor: Takes a single IR module and returns an ObjectFile.
/// This compiler supports a single compilation thread and LLVMContext only.
/// For multithreaded compilation, use ConcurrentIRCompiler below.
class SimpleCompiler : public IRCompileLayer::IRCompiler {
public:
  using CompileResult = std::unique_ptr<MemoryBuffer>;

  /// Construct a simple compile functor with the given target.
  SimpleCompiler(TargetMachine &TM, ObjectCache *ObjCache = nullptr)
      : IRCompiler(irManglingOptionsFromTargetOptions(TM.Options)), TM(TM),
        ObjCache(ObjCache) {}

  /// Set an ObjectCache to query before compiling.
  void setObjectCache(ObjectCache *NewCache) { ObjCache = NewCache; }

  /// Compile a Module to an ObjectFile.
  Expected<CompileResult> operator()(Module &M) override;

private:
  IRSymbolMapper::ManglingOptions
  manglingOptionsForTargetMachine(const TargetMachine &TM);

  CompileResult tryToLoadFromObjectCache(const Module &M);
  void notifyObjectCompiled(const Module &M, const MemoryBuffer &ObjBuffer);

  TargetMachine &TM;
  ObjectCache *ObjCache = nullptr;
};

/// A SimpleCompiler that owns its TargetMachine.
///
/// This convenient for clients who don't want to own their TargetMachines,
/// e.g. LLJIT.
class TMOwningSimpleCompiler : public SimpleCompiler {
public:
  TMOwningSimpleCompiler(std::unique_ptr<TargetMachine> TM,
                         ObjectCache *ObjCache = nullptr)
      : SimpleCompiler(*TM, ObjCache), TM(std::move(TM)) {}

private:
  // FIXME: shared because std::functions (and consequently
  // IRCompileLayer::CompileFunction) are not moveable.
  std::shared_ptr<llvm::TargetMachine> TM;
};

/// A thread-safe version of SimpleCompiler.
///
/// This class creates a new TargetMachine and SimpleCompiler instance for each
/// compile.
class ConcurrentIRCompiler : public IRCompileLayer::IRCompiler {
public:
  ConcurrentIRCompiler(JITTargetMachineBuilder JTMB,
                       ObjectCache *ObjCache = nullptr);

  void setObjectCache(ObjectCache *ObjCache) { this->ObjCache = ObjCache; }

  Expected<std::unique_ptr<MemoryBuffer>> operator()(Module &M) override;

private:
  JITTargetMachineBuilder JTMB;
  ObjectCache *ObjCache = nullptr;
};

} // end namespace orc

} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_ORC_COMPILEUTILS_H
PKiwFZa�پ#ExecutionEngine/Orc/LazyReexports.hnu�[���//===------ LazyReexports.h -- Utilities for lazy reexports -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Lazy re-exports are similar to normal re-exports, except that for callable
// symbols the definitions are replaced with trampolines that will look up and
// call through to the re-exported symbol at runtime. This can be used to
// enable lazy compilation.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_ORC_LAZYREEXPORTS_H
#define LLVM_EXECUTIONENGINE_ORC_LAZYREEXPORTS_H

#include "llvm/ADT/STLExtras.h"
#include "llvm/ExecutionEngine/Orc/Core.h"
#include "llvm/ExecutionEngine/Orc/IndirectionUtils.h"
#include "llvm/ExecutionEngine/Orc/Speculation.h"

namespace llvm {

class Triple;

namespace orc {

/// Manages a set of 'lazy call-through' trampolines. These are compiler
/// re-entry trampolines that are pre-bound to look up a given symbol in a given
/// JITDylib, then jump to that address. Since compilation of symbols is
/// triggered on first lookup, these call-through trampolines can be used to
/// implement lazy compilation.
///
/// The easiest way to construct these call-throughs is using the lazyReexport
/// function.
class LazyCallThroughManager {
public:
  using NotifyResolvedFunction =
      unique_function<Error(ExecutorAddr ResolvedAddr)>;

  LazyCallThroughManager(ExecutionSession &ES, ExecutorAddr ErrorHandlerAddr,
                         TrampolinePool *TP);

  // Return a free call-through trampoline and bind it to look up and call
  // through to the given symbol.
  Expected<ExecutorAddr>
  getCallThroughTrampoline(JITDylib &SourceJD, SymbolStringPtr SymbolName,
                           NotifyResolvedFunction NotifyResolved);

  void resolveTrampolineLandingAddress(
      ExecutorAddr TrampolineAddr,
      TrampolinePool::NotifyLandingResolvedFunction NotifyLandingResolved);

  virtual ~LazyCallThroughManager() = default;

protected:
  using NotifyLandingResolvedFunction =
      TrampolinePool::NotifyLandingResolvedFunction;

  struct ReexportsEntry {
    JITDylib *SourceJD;
    SymbolStringPtr SymbolName;
  };

  ExecutorAddr reportCallThroughError(Error Err);
  Expected<ReexportsEntry> findReexport(ExecutorAddr TrampolineAddr);
  Error notifyResolved(ExecutorAddr TrampolineAddr, ExecutorAddr ResolvedAddr);
  void setTrampolinePool(TrampolinePool &TP) { this->TP = &TP; }

private:
  using ReexportsMap = std::map<ExecutorAddr, ReexportsEntry>;

  using NotifiersMap = std::map<ExecutorAddr, NotifyResolvedFunction>;

  std::mutex LCTMMutex;
  ExecutionSession &ES;
  ExecutorAddr ErrorHandlerAddr;
  TrampolinePool *TP = nullptr;
  ReexportsMap Reexports;
  NotifiersMap Notifiers;
};

/// A lazy call-through manager that builds trampolines in the current process.
class LocalLazyCallThroughManager : public LazyCallThroughManager {
private:
  using NotifyTargetResolved = unique_function<void(ExecutorAddr)>;

  LocalLazyCallThroughManager(ExecutionSession &ES,
                              ExecutorAddr ErrorHandlerAddr)
      : LazyCallThroughManager(ES, ErrorHandlerAddr, nullptr) {}

  template <typename ORCABI> Error init() {
    auto TP = LocalTrampolinePool<ORCABI>::Create(
        [this](ExecutorAddr TrampolineAddr,
               TrampolinePool::NotifyLandingResolvedFunction
                   NotifyLandingResolved) {
          resolveTrampolineLandingAddress(TrampolineAddr,
                                          std::move(NotifyLandingResolved));
        });

    if (!TP)
      return TP.takeError();

    this->TP = std::move(*TP);
    setTrampolinePool(*this->TP);
    return Error::success();
  }

  std::unique_ptr<TrampolinePool> TP;

public:
  /// Create a LocalLazyCallThroughManager using the given ABI. See
  /// createLocalLazyCallThroughManager.
  template <typename ORCABI>
  static Expected<std::unique_ptr<LocalLazyCallThroughManager>>
  Create(ExecutionSession &ES, ExecutorAddr ErrorHandlerAddr) {
    auto LLCTM = std::unique_ptr<LocalLazyCallThroughManager>(
        new LocalLazyCallThroughManager(ES, ErrorHandlerAddr));

    if (auto Err = LLCTM->init<ORCABI>())
      return std::move(Err);

    return std::move(LLCTM);
  }
};

/// Create a LocalLazyCallThroughManager from the given triple and execution
/// session.
Expected<std::unique_ptr<LazyCallThroughManager>>
createLocalLazyCallThroughManager(const Triple &T, ExecutionSession &ES,
                                  ExecutorAddr ErrorHandlerAddr);

/// A materialization unit that builds lazy re-exports. These are callable
/// entry points that call through to the given symbols.
/// Unlike a 'true' re-export, the address of the lazy re-export will not
/// match the address of the re-exported symbol, but calling it will behave
/// the same as calling the re-exported symbol.
class LazyReexportsMaterializationUnit : public MaterializationUnit {
public:
  LazyReexportsMaterializationUnit(LazyCallThroughManager &LCTManager,
                                   IndirectStubsManager &ISManager,
                                   JITDylib &SourceJD,
                                   SymbolAliasMap CallableAliases,
                                   ImplSymbolMap *SrcJDLoc);

  StringRef getName() const override;

private:
  void materialize(std::unique_ptr<MaterializationResponsibility> R) override;
  void discard(const JITDylib &JD, const SymbolStringPtr &Name) override;
  static MaterializationUnit::Interface
  extractFlags(const SymbolAliasMap &Aliases);

  LazyCallThroughManager &LCTManager;
  IndirectStubsManager &ISManager;
  JITDylib &SourceJD;
  SymbolAliasMap CallableAliases;
  ImplSymbolMap *AliaseeTable;
};

/// Define lazy-reexports based on the given SymbolAliasMap. Each lazy re-export
/// is a callable symbol that will look up and dispatch to the given aliasee on
/// first call. All subsequent calls will go directly to the aliasee.
inline std::unique_ptr<LazyReexportsMaterializationUnit>
lazyReexports(LazyCallThroughManager &LCTManager,
              IndirectStubsManager &ISManager, JITDylib &SourceJD,
              SymbolAliasMap CallableAliases,
              ImplSymbolMap *SrcJDLoc = nullptr) {
  return std::make_unique<LazyReexportsMaterializationUnit>(
      LCTManager, ISManager, SourceJD, std::move(CallableAliases), SrcJDLoc);
}

} // End namespace orc
} // End namespace llvm

#endif // LLVM_EXECUTIONENGINE_ORC_LAZYREEXPORTS_H
PKiwFZ�&�7Q7Q&ExecutionEngine/Orc/IndirectionUtils.hnu�[���//===- IndirectionUtils.h - Utilities for adding indirections ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Contains utilities for adding indirections and breaking up modules.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_ORC_INDIRECTIONUTILS_H
#define LLVM_EXECUTIONENGINE_ORC_INDIRECTIONUTILS_H

#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ExecutionEngine/JITSymbol.h"
#include "llvm/ExecutionEngine/Orc/Core.h"
#include "llvm/ExecutionEngine/Orc/OrcABISupport.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/Memory.h"
#include "llvm/Support/Process.h"
#include "llvm/Transforms/Utils/ValueMapper.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <functional>
#include <future>
#include <map>
#include <memory>
#include <system_error>
#include <utility>
#include <vector>

namespace llvm {

class Constant;
class Function;
class FunctionType;
class GlobalAlias;
class GlobalVariable;
class Module;
class PointerType;
class Triple;
class Twine;
class Value;
class MCDisassembler;
class MCInstrAnalysis;

namespace jitlink {
class LinkGraph;
class Symbol;
} // namespace jitlink

namespace orc {

/// Base class for pools of compiler re-entry trampolines.
/// These trampolines are callable addresses that save all register state
/// before calling a supplied function to return the trampoline landing
/// address, then restore all state before jumping to that address. They
/// are used by various ORC APIs to support lazy compilation
class TrampolinePool {
public:
  using NotifyLandingResolvedFunction =
      unique_function<void(ExecutorAddr) const>;

  using ResolveLandingFunction = unique_function<void(
      ExecutorAddr TrampolineAddr,
      NotifyLandingResolvedFunction OnLandingResolved) const>;

  virtual ~TrampolinePool();

  /// Get an available trampoline address.
  /// Returns an error if no trampoline can be created.
  Expected<ExecutorAddr> getTrampoline() {
    std::lock_guard<std::mutex> Lock(TPMutex);
    if (AvailableTrampolines.empty()) {
      if (auto Err = grow())
        return std::move(Err);
    }
    assert(!AvailableTrampolines.empty() && "Failed to grow trampoline pool");
    auto TrampolineAddr = AvailableTrampolines.back();
    AvailableTrampolines.pop_back();
    return TrampolineAddr;
  }

  /// Returns the given trampoline to the pool for re-use.
  void releaseTrampoline(ExecutorAddr TrampolineAddr) {
    std::lock_guard<std::mutex> Lock(TPMutex);
    AvailableTrampolines.push_back(TrampolineAddr);
  }

protected:
  virtual Error grow() = 0;

  std::mutex TPMutex;
  std::vector<ExecutorAddr> AvailableTrampolines;
};

/// A trampoline pool for trampolines within the current process.
template <typename ORCABI> class LocalTrampolinePool : public TrampolinePool {
public:
  /// Creates a LocalTrampolinePool with the given RunCallback function.
  /// Returns an error if this function is unable to correctly allocate, write
  /// and protect the resolver code block.
  static Expected<std::unique_ptr<LocalTrampolinePool>>
  Create(ResolveLandingFunction ResolveLanding) {
    Error Err = Error::success();

    auto LTP = std::unique_ptr<LocalTrampolinePool>(
        new LocalTrampolinePool(std::move(ResolveLanding), Err));

    if (Err)
      return std::move(Err);
    return std::move(LTP);
  }

private:
  static JITTargetAddress reenter(void *TrampolinePoolPtr, void *TrampolineId) {
    LocalTrampolinePool<ORCABI> *TrampolinePool =
        static_cast<LocalTrampolinePool *>(TrampolinePoolPtr);

    std::promise<ExecutorAddr> LandingAddressP;
    auto LandingAddressF = LandingAddressP.get_future();

    TrampolinePool->ResolveLanding(ExecutorAddr::fromPtr(TrampolineId),
                                   [&](ExecutorAddr LandingAddress) {
                                     LandingAddressP.set_value(LandingAddress);
                                   });
    return LandingAddressF.get().getValue();
  }

  LocalTrampolinePool(ResolveLandingFunction ResolveLanding, Error &Err)
      : ResolveLanding(std::move(ResolveLanding)) {

    ErrorAsOutParameter _(&Err);

    /// Try to set up the resolver block.
    std::error_code EC;
    ResolverBlock = sys::OwningMemoryBlock(sys::Memory::allocateMappedMemory(
        ORCABI::ResolverCodeSize, nullptr,
        sys::Memory::MF_READ | sys::Memory::MF_WRITE, EC));
    if (EC) {
      Err = errorCodeToError(EC);
      return;
    }

    ORCABI::writeResolverCode(static_cast<char *>(ResolverBlock.base()),
                              ExecutorAddr::fromPtr(ResolverBlock.base()),
                              ExecutorAddr::fromPtr(&reenter),
                              ExecutorAddr::fromPtr(this));

    EC = sys::Memory::protectMappedMemory(ResolverBlock.getMemoryBlock(),
                                          sys::Memory::MF_READ |
                                              sys::Memory::MF_EXEC);
    if (EC) {
      Err = errorCodeToError(EC);
      return;
    }
  }

  Error grow() override {
    assert(AvailableTrampolines.empty() && "Growing prematurely?");

    std::error_code EC;
    auto TrampolineBlock =
        sys::OwningMemoryBlock(sys::Memory::allocateMappedMemory(
            sys::Process::getPageSizeEstimate(), nullptr,
            sys::Memory::MF_READ | sys::Memory::MF_WRITE, EC));
    if (EC)
      return errorCodeToError(EC);

    unsigned NumTrampolines =
        (sys::Process::getPageSizeEstimate() - ORCABI::PointerSize) /
        ORCABI::TrampolineSize;

    char *TrampolineMem = static_cast<char *>(TrampolineBlock.base());
    ORCABI::writeTrampolines(
        TrampolineMem, ExecutorAddr::fromPtr(TrampolineMem),
        ExecutorAddr::fromPtr(ResolverBlock.base()), NumTrampolines);

    for (unsigned I = 0; I < NumTrampolines; ++I)
      AvailableTrampolines.push_back(
          ExecutorAddr::fromPtr(TrampolineMem + (I * ORCABI::TrampolineSize)));

    if (auto EC = sys::Memory::protectMappedMemory(
                    TrampolineBlock.getMemoryBlock(),
                    sys::Memory::MF_READ | sys::Memory::MF_EXEC))
      return errorCodeToError(EC);

    TrampolineBlocks.push_back(std::move(TrampolineBlock));
    return Error::success();
  }

  ResolveLandingFunction ResolveLanding;

  sys::OwningMemoryBlock ResolverBlock;
  std::vector<sys::OwningMemoryBlock> TrampolineBlocks;
};

/// Target-independent base class for compile callback management.
class JITCompileCallbackManager {
public:
  using CompileFunction = std::function<ExecutorAddr()>;

  virtual ~JITCompileCallbackManager() = default;

  /// Reserve a compile callback.
  Expected<ExecutorAddr> getCompileCallback(CompileFunction Compile);

  /// Execute the callback for the given trampoline id. Called by the JIT
  ///        to compile functions on demand.
  ExecutorAddr executeCompileCallback(ExecutorAddr TrampolineAddr);

protected:
  /// Construct a JITCompileCallbackManager.
  JITCompileCallbackManager(std::unique_ptr<TrampolinePool> TP,
                            ExecutionSession &ES,
                            ExecutorAddr ErrorHandlerAddress)
      : TP(std::move(TP)), ES(ES),
        CallbacksJD(ES.createBareJITDylib("<Callbacks>")),
        ErrorHandlerAddress(ErrorHandlerAddress) {}

  void setTrampolinePool(std::unique_ptr<TrampolinePool> TP) {
    this->TP = std::move(TP);
  }

private:
  std::mutex CCMgrMutex;
  std::unique_ptr<TrampolinePool> TP;
  ExecutionSession &ES;
  JITDylib &CallbacksJD;
  ExecutorAddr ErrorHandlerAddress;
  std::map<ExecutorAddr, SymbolStringPtr> AddrToSymbol;
  size_t NextCallbackId = 0;
};

/// Manage compile callbacks for in-process JITs.
template <typename ORCABI>
class LocalJITCompileCallbackManager : public JITCompileCallbackManager {
public:
  /// Create a new LocalJITCompileCallbackManager.
  static Expected<std::unique_ptr<LocalJITCompileCallbackManager>>
  Create(ExecutionSession &ES, ExecutorAddr ErrorHandlerAddress) {
    Error Err = Error::success();
    auto CCMgr = std::unique_ptr<LocalJITCompileCallbackManager>(
        new LocalJITCompileCallbackManager(ES, ErrorHandlerAddress, Err));
    if (Err)
      return std::move(Err);
    return std::move(CCMgr);
  }

private:
  /// Construct a InProcessJITCompileCallbackManager.
  /// @param ErrorHandlerAddress The address of an error handler in the target
  ///                            process to be used if a compile callback fails.
  LocalJITCompileCallbackManager(ExecutionSession &ES,
                                 ExecutorAddr ErrorHandlerAddress, Error &Err)
      : JITCompileCallbackManager(nullptr, ES, ErrorHandlerAddress) {
    using NotifyLandingResolvedFunction =
        TrampolinePool::NotifyLandingResolvedFunction;

    ErrorAsOutParameter _(&Err);
    auto TP = LocalTrampolinePool<ORCABI>::Create(
        [this](ExecutorAddr TrampolineAddr,
               NotifyLandingResolvedFunction NotifyLandingResolved) {
          NotifyLandingResolved(executeCompileCallback(TrampolineAddr));
        });

    if (!TP) {
      Err = TP.takeError();
      return;
    }

    setTrampolinePool(std::move(*TP));
  }
};

/// Base class for managing collections of named indirect stubs.
class IndirectStubsManager {
public:
  /// Map type for initializing the manager. See init.
  using StubInitsMap = StringMap<std::pair<ExecutorAddr, JITSymbolFlags>>;

  virtual ~IndirectStubsManager() = default;

  /// Create a single stub with the given name, target address and flags.
  virtual Error createStub(StringRef StubName, ExecutorAddr StubAddr,
                           JITSymbolFlags StubFlags) = 0;

  /// Create StubInits.size() stubs with the given names, target
  ///        addresses, and flags.
  virtual Error createStubs(const StubInitsMap &StubInits) = 0;

  /// Find the stub with the given name. If ExportedStubsOnly is true,
  ///        this will only return a result if the stub's flags indicate that it
  ///        is exported.
  virtual ExecutorSymbolDef findStub(StringRef Name,
                                     bool ExportedStubsOnly) = 0;

  /// Find the implementation-pointer for the stub.
  virtual ExecutorSymbolDef findPointer(StringRef Name) = 0;

  /// Change the value of the implementation pointer for the stub.
  virtual Error updatePointer(StringRef Name, ExecutorAddr NewAddr) = 0;

private:
  virtual void anchor();
};

template <typename ORCABI> class LocalIndirectStubsInfo {
public:
  LocalIndirectStubsInfo(unsigned NumStubs, sys::OwningMemoryBlock StubsMem)
      : NumStubs(NumStubs), StubsMem(std::move(StubsMem)) {}

  static Expected<LocalIndirectStubsInfo> create(unsigned MinStubs,
                                                 unsigned PageSize) {
    auto ISAS = getIndirectStubsBlockSizes<ORCABI>(MinStubs, PageSize);

    assert((ISAS.StubBytes % PageSize == 0) &&
           "StubBytes is not a page size multiple");
    uint64_t PointerAlloc = alignTo(ISAS.PointerBytes, PageSize);

    // Allocate memory for stubs and pointers in one call.
    std::error_code EC;
    auto StubsAndPtrsMem =
        sys::OwningMemoryBlock(sys::Memory::allocateMappedMemory(
            ISAS.StubBytes + PointerAlloc, nullptr,
            sys::Memory::MF_READ | sys::Memory::MF_WRITE, EC));
    if (EC)
      return errorCodeToError(EC);

    sys::MemoryBlock StubsBlock(StubsAndPtrsMem.base(), ISAS.StubBytes);
    auto StubsBlockMem = static_cast<char *>(StubsAndPtrsMem.base());
    auto PtrBlockAddress =
        ExecutorAddr::fromPtr(StubsBlockMem) + ISAS.StubBytes;

    ORCABI::writeIndirectStubsBlock(StubsBlockMem,
                                    ExecutorAddr::fromPtr(StubsBlockMem),
                                    PtrBlockAddress, ISAS.NumStubs);

    if (auto EC = sys::Memory::protectMappedMemory(
            StubsBlock, sys::Memory::MF_READ | sys::Memory::MF_EXEC))
      return errorCodeToError(EC);

    return LocalIndirectStubsInfo(ISAS.NumStubs, std::move(StubsAndPtrsMem));
  }

  unsigned getNumStubs() const { return NumStubs; }

  void *getStub(unsigned Idx) const {
    return static_cast<char *>(StubsMem.base()) + Idx * ORCABI::StubSize;
  }

  void **getPtr(unsigned Idx) const {
    char *PtrsBase =
        static_cast<char *>(StubsMem.base()) + NumStubs * ORCABI::StubSize;
    return reinterpret_cast<void **>(PtrsBase) + Idx;
  }

private:
  unsigned NumStubs = 0;
  sys::OwningMemoryBlock StubsMem;
};

/// IndirectStubsManager implementation for the host architecture, e.g.
///        OrcX86_64. (See OrcArchitectureSupport.h).
template <typename TargetT>
class LocalIndirectStubsManager : public IndirectStubsManager {
public:
  Error createStub(StringRef StubName, ExecutorAddr StubAddr,
                   JITSymbolFlags StubFlags) override {
    std::lock_guard<std::mutex> Lock(StubsMutex);
    if (auto Err = reserveStubs(1))
      return Err;

    createStubInternal(StubName, StubAddr, StubFlags);

    return Error::success();
  }

  Error createStubs(const StubInitsMap &StubInits) override {
    std::lock_guard<std::mutex> Lock(StubsMutex);
    if (auto Err = reserveStubs(StubInits.size()))
      return Err;

    for (const auto &Entry : StubInits)
      createStubInternal(Entry.first(), Entry.second.first,
                         Entry.second.second);

    return Error::success();
  }

  ExecutorSymbolDef findStub(StringRef Name, bool ExportedStubsOnly) override {
    std::lock_guard<std::mutex> Lock(StubsMutex);
    auto I = StubIndexes.find(Name);
    if (I == StubIndexes.end())
      return ExecutorSymbolDef();
    auto Key = I->second.first;
    void *StubPtr = IndirectStubsInfos[Key.first].getStub(Key.second);
    assert(StubPtr && "Missing stub address");
    auto StubAddr = ExecutorAddr::fromPtr(StubPtr);
    auto StubSymbol = ExecutorSymbolDef(StubAddr, I->second.second);
    if (ExportedStubsOnly && !StubSymbol.getFlags().isExported())
      return ExecutorSymbolDef();
    return StubSymbol;
  }

  ExecutorSymbolDef findPointer(StringRef Name) override {
    std::lock_guard<std::mutex> Lock(StubsMutex);
    auto I = StubIndexes.find(Name);
    if (I == StubIndexes.end())
      return ExecutorSymbolDef();
    auto Key = I->second.first;
    void *PtrPtr = IndirectStubsInfos[Key.first].getPtr(Key.second);
    assert(PtrPtr && "Missing pointer address");
    auto PtrAddr = ExecutorAddr::fromPtr(PtrPtr);
    return ExecutorSymbolDef(PtrAddr, I->second.second);
  }

  Error updatePointer(StringRef Name, ExecutorAddr NewAddr) override {
    using AtomicIntPtr = std::atomic<uintptr_t>;

    std::lock_guard<std::mutex> Lock(StubsMutex);
    auto I = StubIndexes.find(Name);
    assert(I != StubIndexes.end() && "No stub pointer for symbol");
    auto Key = I->second.first;
    AtomicIntPtr *AtomicStubPtr = reinterpret_cast<AtomicIntPtr *>(
        IndirectStubsInfos[Key.first].getPtr(Key.second));
    *AtomicStubPtr = static_cast<uintptr_t>(NewAddr.getValue());
    return Error::success();
  }

private:
  Error reserveStubs(unsigned NumStubs) {
    if (NumStubs <= FreeStubs.size())
      return Error::success();

    unsigned NewStubsRequired = NumStubs - FreeStubs.size();
    unsigned NewBlockId = IndirectStubsInfos.size();
    auto ISI =
        LocalIndirectStubsInfo<TargetT>::create(NewStubsRequired, PageSize);
    if (!ISI)
      return ISI.takeError();
    for (unsigned I = 0; I < ISI->getNumStubs(); ++I)
      FreeStubs.push_back(std::make_pair(NewBlockId, I));
    IndirectStubsInfos.push_back(std::move(*ISI));
    return Error::success();
  }

  void createStubInternal(StringRef StubName, ExecutorAddr InitAddr,
                          JITSymbolFlags StubFlags) {
    auto Key = FreeStubs.back();
    FreeStubs.pop_back();
    *IndirectStubsInfos[Key.first].getPtr(Key.second) =
        InitAddr.toPtr<void *>();
    StubIndexes[StubName] = std::make_pair(Key, StubFlags);
  }

  unsigned PageSize = sys::Process::getPageSizeEstimate();
  std::mutex StubsMutex;
  std::vector<LocalIndirectStubsInfo<TargetT>> IndirectStubsInfos;
  using StubKey = std::pair<uint16_t, uint16_t>;
  std::vector<StubKey> FreeStubs;
  StringMap<std::pair<StubKey, JITSymbolFlags>> StubIndexes;
};

/// Create a local compile callback manager.
///
/// The given target triple will determine the ABI, and the given
/// ErrorHandlerAddress will be used by the resulting compile callback
/// manager if a compile callback fails.
Expected<std::unique_ptr<JITCompileCallbackManager>>
createLocalCompileCallbackManager(const Triple &T, ExecutionSession &ES,
                                  ExecutorAddr ErrorHandlerAddress);

/// Create a local indriect stubs manager builder.
///
/// The given target triple will determine the ABI.
std::function<std::unique_ptr<IndirectStubsManager>()>
createLocalIndirectStubsManagerBuilder(const Triple &T);

/// Build a function pointer of FunctionType with the given constant
///        address.
///
///   Usage example: Turn a trampoline address into a function pointer constant
/// for use in a stub.
Constant *createIRTypedAddress(FunctionType &FT, ExecutorAddr Addr);

/// Create a function pointer with the given type, name, and initializer
///        in the given Module.
GlobalVariable *createImplPointer(PointerType &PT, Module &M, const Twine &Name,
                                  Constant *Initializer);

/// Turn a function declaration into a stub function that makes an
///        indirect call using the given function pointer.
void makeStub(Function &F, Value &ImplPointer);

/// Promotes private symbols to global hidden, and renames to prevent clashes
/// with other promoted symbols. The same SymbolPromoter instance should be
/// used for all symbols to be added to a single JITDylib.
class SymbolLinkagePromoter {
public:
  /// Promote symbols in the given module. Returns the set of global values
  /// that have been renamed/promoted.
  std::vector<GlobalValue *> operator()(Module &M);

private:
  unsigned NextId = 0;
};

/// Clone a function declaration into a new module.
///
///   This function can be used as the first step towards creating a callback
/// stub (see makeStub).
///
///   If the VMap argument is non-null, a mapping will be added between F and
/// the new declaration, and between each of F's arguments and the new
/// declaration's arguments. This map can then be passed in to moveFunction to
/// move the function body if required. Note: When moving functions between
/// modules with these utilities, all decls should be cloned (and added to a
/// single VMap) before any bodies are moved. This will ensure that references
/// between functions all refer to the versions in the new module.
Function *cloneFunctionDecl(Module &Dst, const Function &F,
                            ValueToValueMapTy *VMap = nullptr);

/// Clone a global variable declaration into a new module.
GlobalVariable *cloneGlobalVariableDecl(Module &Dst, const GlobalVariable &GV,
                                        ValueToValueMapTy *VMap = nullptr);

/// Clone a global alias declaration into a new module.
GlobalAlias *cloneGlobalAliasDecl(Module &Dst, const GlobalAlias &OrigA,
                                  ValueToValueMapTy &VMap);

/// Introduce relocations to \p Sym in its own definition if there are any
/// pointers formed via PC-relative address that do not already have a
/// relocation.
///
/// This is useful when introducing indirection via a stub function at link time
/// without compiler support. If a function pointer is formed without a
/// relocation, e.g. in the definition of \c foo
///
/// \code
/// _foo:
///   leaq -7(%rip), rax # form pointer to _foo without relocation
/// _bar:
///   leaq (%rip), %rax  # uses X86_64_RELOC_SIGNED to '_foo'
/// \endcode
///
/// the pointer to \c _foo computed by \c _foo and \c _bar may differ if we
/// introduce a stub for _foo. If the pointer is used as a key, this may be
/// observable to the program. This pass will attempt to introduce the missing
/// "self-relocation" on the leaq instruction.
///
/// This is based on disassembly and should be considered "best effort". It may
/// silently fail to add relocations.
Error addFunctionPointerRelocationsToCurrentSymbol(jitlink::Symbol &Sym,
                                                   jitlink::LinkGraph &G,
                                                   MCDisassembler &Disassembler,
                                                   MCInstrAnalysis &MIA);

} // end namespace orc

} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_ORC_INDIRECTIONUTILS_H
PKiwFZ� ^%�
�
*ExecutionEngine/Orc/LookupAndRecordAddrs.hnu�[���//===-- LookupAndRecordAddrs.h - Symbol lookup support utility --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Record the addresses of a set of symbols into ExecutorAddr objects.
//
// This can be used to avoid repeated lookup (via ExecutionSession::lookup) of
// the given symbols.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_ORC_LOOKUPANDRECORDADDRS_H
#define LLVM_EXECUTIONENGINE_ORC_LOOKUPANDRECORDADDRS_H

#include "llvm/ADT/FunctionExtras.h"
#include "llvm/ExecutionEngine/Orc/Core.h"
#include "llvm/ExecutionEngine/Orc/Shared/ExecutorAddress.h"

#include <vector>

namespace llvm {
namespace orc {

/// Record addresses of the given symbols in the given ExecutorAddrs.
///
/// Useful for making permanent records of symbol addreses to call or
/// access in the executor (e.g. runtime support functions in Platform
/// subclasses).
///
/// By default the symbols are looked up using
/// SymbolLookupFlags::RequiredSymbol, and an error will be generated if any of
/// the requested symbols are not defined.
///
/// If SymbolLookupFlags::WeaklyReferencedSymbol is used then any missing
/// symbols will have their corresponding address objects set to zero, and
/// this function will never generate an error (the caller will need to check
/// addresses before using them).
///
/// Asynchronous version.
void lookupAndRecordAddrs(
    unique_function<void(Error)> OnRecorded, ExecutionSession &ES, LookupKind K,
    const JITDylibSearchOrder &SearchOrder,
    std::vector<std::pair<SymbolStringPtr, ExecutorAddr *>> Pairs,
    SymbolLookupFlags LookupFlags = SymbolLookupFlags::RequiredSymbol);

/// Record addresses of the given symbols in the given ExecutorAddrs.
///
/// Blocking version.
Error lookupAndRecordAddrs(
    ExecutionSession &ES, LookupKind K, const JITDylibSearchOrder &SearchOrder,
    std::vector<std::pair<SymbolStringPtr, ExecutorAddr *>> Pairs,
    SymbolLookupFlags LookupFlags = SymbolLookupFlags::RequiredSymbol);

/// Record addresses of given symbols in the given ExecutorAddrs.
///
/// ExecutorProcessControl lookup version. Lookups are always implicitly
/// weak.
Error lookupAndRecordAddrs(
    ExecutorProcessControl &EPC, tpctypes::DylibHandle H,
    std::vector<std::pair<SymbolStringPtr, ExecutorAddr *>> Pairs,
    SymbolLookupFlags LookupFlags = SymbolLookupFlags::RequiredSymbol);

} // End namespace orc
} // End namespace llvm

#endif // LLVM_EXECUTIONENGINE_ORC_LOOKUPANDRECORDADDRS_H
PKiwFZ�i��	�	0ExecutionEngine/Orc/MapperJITLinkMemoryManager.hnu�[���//===--------------- MapperJITLinkMemoryManager.h -*- C++ -*---------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Implements JITLinkMemoryManager using MemoryMapper
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_ORC_MAPPERJITLINKMEMORYMANAGER_H
#define LLVM_EXECUTIONENGINE_ORC_MAPPERJITLINKMEMORYMANAGER_H

#include "llvm/ADT/IntervalMap.h"
#include "llvm/ExecutionEngine/JITLink/JITLinkMemoryManager.h"
#include "llvm/ExecutionEngine/Orc/MemoryMapper.h"

namespace llvm {
namespace orc {

class MapperJITLinkMemoryManager : public jitlink::JITLinkMemoryManager {
public:
  MapperJITLinkMemoryManager(size_t ReservationGranularity,
                             std::unique_ptr<MemoryMapper> Mapper);

  template <class MemoryMapperType, class... Args>
  static Expected<std::unique_ptr<MapperJITLinkMemoryManager>>
  CreateWithMapper(size_t ReservationGranularity, Args &&...A) {
    auto Mapper = MemoryMapperType::Create(std::forward<Args>(A)...);
    if (!Mapper)
      return Mapper.takeError();

    return std::make_unique<MapperJITLinkMemoryManager>(ReservationGranularity,
                                                        std::move(*Mapper));
  }

  void allocate(const jitlink::JITLinkDylib *JD, jitlink::LinkGraph &G,
                OnAllocatedFunction OnAllocated) override;
  // synchronous overload
  using JITLinkMemoryManager::allocate;

  void deallocate(std::vector<FinalizedAlloc> Allocs,
                  OnDeallocatedFunction OnDeallocated) override;
  // synchronous overload
  using JITLinkMemoryManager::deallocate;

private:
  class InFlightAlloc;

  std::mutex Mutex;

  // We reserve multiples of this from the executor address space
  size_t ReservationUnits;

  // Ranges that have been reserved in executor but not yet allocated
  using AvailableMemoryMap = IntervalMap<ExecutorAddr, bool>;
  AvailableMemoryMap::Allocator AMAllocator;
  IntervalMap<ExecutorAddr, bool> AvailableMemory;

  // Ranges that have been reserved in executor and already allocated
  DenseMap<ExecutorAddr, ExecutorAddrDiff> UsedMemory;

  std::unique_ptr<MemoryMapper> Mapper;
};

} // end namespace orc
} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_ORC_MAPPERJITLINKMEMORYMANAGER_H
PKiwFZ]P��,4,4ExecutionEngine/Orc/Core.hnu�[���//===------ Core.h -- Core ORC APIs (Layer, JITDylib, etc.) -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Contains core ORC APIs.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_ORC_CORE_H
#define LLVM_EXECUTIONENGINE_ORC_CORE_H

#include "llvm/ADT/BitmaskEnum.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/FunctionExtras.h"
#include "llvm/ADT/IntrusiveRefCntPtr.h"
#include "llvm/ExecutionEngine/JITLink/JITLinkDylib.h"
#include "llvm/ExecutionEngine/JITSymbol.h"
#include "llvm/ExecutionEngine/Orc/ExecutorProcessControl.h"
#include "llvm/ExecutionEngine/Orc/Shared/ExecutorAddress.h"
#include "llvm/ExecutionEngine/Orc/Shared/ExecutorSymbolDef.h"
#include "llvm/ExecutionEngine/Orc/Shared/WrapperFunctionUtils.h"
#include "llvm/ExecutionEngine/Orc/TaskDispatch.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ExtensibleRTTI.h"

#include <atomic>
#include <future>
#include <memory>
#include <vector>

namespace llvm {
namespace orc {

// Forward declare some classes.
class AsynchronousSymbolQuery;
class ExecutionSession;
class MaterializationUnit;
class MaterializationResponsibility;
class JITDylib;
class ResourceTracker;
class InProgressLookupState;

enum class SymbolState : uint8_t;

using ResourceTrackerSP = IntrusiveRefCntPtr<ResourceTracker>;
using JITDylibSP = IntrusiveRefCntPtr<JITDylib>;

using ResourceKey = uintptr_t;

/// API to remove / transfer ownership of JIT resources.
class ResourceTracker : public ThreadSafeRefCountedBase<ResourceTracker> {
private:
  friend class ExecutionSession;
  friend class JITDylib;
  friend class MaterializationResponsibility;

public:
  ResourceTracker(const ResourceTracker &) = delete;
  ResourceTracker &operator=(const ResourceTracker &) = delete;
  ResourceTracker(ResourceTracker &&) = delete;
  ResourceTracker &operator=(ResourceTracker &&) = delete;

  ~ResourceTracker();

  /// Return the JITDylib targeted by this tracker.
  JITDylib &getJITDylib() const {
    return *reinterpret_cast<JITDylib *>(JDAndFlag.load() &
                                         ~static_cast<uintptr_t>(1));
  }

  /// Runs the given callback under the session lock, passing in the associated
  /// ResourceKey. This is the safe way to associate resources with trackers.
  template <typename Func> Error withResourceKeyDo(Func &&F);

  /// Remove all resources associated with this key.
  Error remove();

  /// Transfer all resources associated with this key to the given
  /// tracker, which must target the same JITDylib as this one.
  void transferTo(ResourceTracker &DstRT);

  /// Return true if this tracker has become defunct.
  bool isDefunct() const { return JDAndFlag.load() & 0x1; }

  /// Returns the key associated with this tracker.
  /// This method should not be used except for debug logging: there is no
  /// guarantee that the returned value will remain valid.
  ResourceKey getKeyUnsafe() const { return reinterpret_cast<uintptr_t>(this); }

private:
  ResourceTracker(JITDylibSP JD);

  void makeDefunct();

  std::atomic_uintptr_t JDAndFlag;
};

/// Listens for ResourceTracker operations.
class ResourceManager {
public:
  virtual ~ResourceManager();
  virtual Error handleRemoveResources(JITDylib &JD, ResourceKey K) = 0;
  virtual void handleTransferResources(JITDylib &JD, ResourceKey DstK,
                                       ResourceKey SrcK) = 0;
};

/// A set of symbol names (represented by SymbolStringPtrs for
//         efficiency).
using SymbolNameSet = DenseSet<SymbolStringPtr>;

/// A vector of symbol names.
using SymbolNameVector = std::vector<SymbolStringPtr>;

/// A map from symbol names (as SymbolStringPtrs) to JITSymbols
/// (address/flags pairs).
using SymbolMap = DenseMap<SymbolStringPtr, ExecutorSymbolDef>;

/// A map from symbol names (as SymbolStringPtrs) to JITSymbolFlags.
using SymbolFlagsMap = DenseMap<SymbolStringPtr, JITSymbolFlags>;

/// A map from JITDylibs to sets of symbols.
using SymbolDependenceMap = DenseMap<JITDylib *, SymbolNameSet>;

/// Lookup flags that apply to each dylib in the search order for a lookup.
///
/// If MatchHiddenSymbolsOnly is used (the default) for a given dylib, then
/// only symbols in that Dylib's interface will be searched. If
/// MatchHiddenSymbols is used then symbols with hidden visibility will match
/// as well.
enum class JITDylibLookupFlags { MatchExportedSymbolsOnly, MatchAllSymbols };

/// Lookup flags that apply to each symbol in a lookup.
///
/// If RequiredSymbol is used (the default) for a given symbol then that symbol
/// must be found during the lookup or the lookup will fail returning a
/// SymbolNotFound error. If WeaklyReferencedSymbol is used and the given
/// symbol is not found then the query will continue, and no result for the
/// missing symbol will be present in the result (assuming the rest of the
/// lookup succeeds).
enum class SymbolLookupFlags { RequiredSymbol, WeaklyReferencedSymbol };

/// Describes the kind of lookup being performed. The lookup kind is passed to
/// symbol generators (if they're invoked) to help them determine what
/// definitions to generate.
///
/// Static -- Lookup is being performed as-if at static link time (e.g.
///           generators representing static archives should pull in new
///           definitions).
///
/// DLSym -- Lookup is being performed as-if at runtime (e.g. generators
///          representing static archives should not pull in new definitions).
enum class LookupKind { Static, DLSym };

/// A list of (JITDylib*, JITDylibLookupFlags) pairs to be used as a search
/// order during symbol lookup.
using JITDylibSearchOrder =
    std::vector<std::pair<JITDylib *, JITDylibLookupFlags>>;

/// Convenience function for creating a search order from an ArrayRef of
/// JITDylib*, all with the same flags.
inline JITDylibSearchOrder makeJITDylibSearchOrder(
    ArrayRef<JITDylib *> JDs,
    JITDylibLookupFlags Flags = JITDylibLookupFlags::MatchExportedSymbolsOnly) {
  JITDylibSearchOrder O;
  O.reserve(JDs.size());
  for (auto *JD : JDs)
    O.push_back(std::make_pair(JD, Flags));
  return O;
}

/// A set of symbols to look up, each associated with a SymbolLookupFlags
/// value.
///
/// This class is backed by a vector and optimized for fast insertion,
/// deletion and iteration. It does not guarantee a stable order between
/// operations, and will not automatically detect duplicate elements (they
/// can be manually checked by calling the validate method).
class SymbolLookupSet {
public:
  using value_type = std::pair<SymbolStringPtr, SymbolLookupFlags>;
  using UnderlyingVector = std::vector<value_type>;
  using iterator = UnderlyingVector::iterator;
  using const_iterator = UnderlyingVector::const_iterator;

  SymbolLookupSet() = default;

  explicit SymbolLookupSet(
      SymbolStringPtr Name,
      SymbolLookupFlags Flags = SymbolLookupFlags::RequiredSymbol) {
    add(std::move(Name), Flags);
  }

  /// Construct a SymbolLookupSet from an initializer list of SymbolStringPtrs.
  explicit SymbolLookupSet(
      std::initializer_list<SymbolStringPtr> Names,
      SymbolLookupFlags Flags = SymbolLookupFlags::RequiredSymbol) {
    Symbols.reserve(Names.size());
    for (const auto &Name : Names)
      add(std::move(Name), Flags);
  }

  /// Construct a SymbolLookupSet from a SymbolNameSet with the given
  /// Flags used for each value.
  explicit SymbolLookupSet(
      const SymbolNameSet &Names,
      SymbolLookupFlags Flags = SymbolLookupFlags::RequiredSymbol) {
    Symbols.reserve(Names.size());
    for (const auto &Name : Names)
      add(Name, Flags);
  }

  /// Construct a SymbolLookupSet from a vector of symbols with the given Flags
  /// used for each value.
  /// If the ArrayRef contains duplicates it is up to the client to remove these
  /// before using this instance for lookup.
  explicit SymbolLookupSet(
      ArrayRef<SymbolStringPtr> Names,
      SymbolLookupFlags Flags = SymbolLookupFlags::RequiredSymbol) {
    Symbols.reserve(Names.size());
    for (const auto &Name : Names)
      add(Name, Flags);
  }

  /// Construct a SymbolLookupSet from DenseMap keys.
  template <typename KeyT>
  static SymbolLookupSet
  fromMapKeys(const DenseMap<SymbolStringPtr, KeyT> &M,
              SymbolLookupFlags Flags = SymbolLookupFlags::RequiredSymbol) {
    SymbolLookupSet Result;
    Result.Symbols.reserve(M.size());
    for (const auto &KV : M)
      Result.add(KV.first, Flags);
    return Result;
  }

  /// Add an element to the set. The client is responsible for checking that
  /// duplicates are not added.
  SymbolLookupSet &
  add(SymbolStringPtr Name,
      SymbolLookupFlags Flags = SymbolLookupFlags::RequiredSymbol) {
    Symbols.push_back(std::make_pair(std::move(Name), Flags));
    return *this;
  }

  /// Quickly append one lookup set to another.
  SymbolLookupSet &append(SymbolLookupSet Other) {
    Symbols.reserve(Symbols.size() + Other.size());
    for (auto &KV : Other)
      Symbols.push_back(std::move(KV));
    return *this;
  }

  bool empty() const { return Symbols.empty(); }
  UnderlyingVector::size_type size() const { return Symbols.size(); }
  iterator begin() { return Symbols.begin(); }
  iterator end() { return Symbols.end(); }
  const_iterator begin() const { return Symbols.begin(); }
  const_iterator end() const { return Symbols.end(); }

  /// Removes the Ith element of the vector, replacing it with the last element.
  void remove(UnderlyingVector::size_type I) {
    std::swap(Symbols[I], Symbols.back());
    Symbols.pop_back();
  }

  /// Removes the element pointed to by the given iterator. This iterator and
  /// all subsequent ones (including end()) are invalidated.
  void remove(iterator I) { remove(I - begin()); }

  /// Removes all elements matching the given predicate, which must be callable
  /// as bool(const SymbolStringPtr &, SymbolLookupFlags Flags).
  template <typename PredFn> void remove_if(PredFn &&Pred) {
    UnderlyingVector::size_type I = 0;
    while (I != Symbols.size()) {
      const auto &Name = Symbols[I].first;
      auto Flags = Symbols[I].second;
      if (Pred(Name, Flags))
        remove(I);
      else
        ++I;
    }
  }

  /// Loop over the elements of this SymbolLookupSet, applying the Body function
  /// to each one. Body must be callable as
  /// bool(const SymbolStringPtr &, SymbolLookupFlags).
  /// If Body returns true then the element just passed in is removed from the
  /// set. If Body returns false then the element is retained.
  template <typename BodyFn>
  auto forEachWithRemoval(BodyFn &&Body) -> std::enable_if_t<
      std::is_same<decltype(Body(std::declval<const SymbolStringPtr &>(),
                                 std::declval<SymbolLookupFlags>())),
                   bool>::value> {
    UnderlyingVector::size_type I = 0;
    while (I != Symbols.size()) {
      const auto &Name = Symbols[I].first;
      auto Flags = Symbols[I].second;
      if (Body(Name, Flags))
        remove(I);
      else
        ++I;
    }
  }

  /// Loop over the elements of this SymbolLookupSet, applying the Body function
  /// to each one. Body must be callable as
  /// Expected<bool>(const SymbolStringPtr &, SymbolLookupFlags).
  /// If Body returns a failure value, the loop exits immediately. If Body
  /// returns true then the element just passed in is removed from the set. If
  /// Body returns false then the element is retained.
  template <typename BodyFn>
  auto forEachWithRemoval(BodyFn &&Body) -> std::enable_if_t<
      std::is_same<decltype(Body(std::declval<const SymbolStringPtr &>(),
                                 std::declval<SymbolLookupFlags>())),
                   Expected<bool>>::value,
      Error> {
    UnderlyingVector::size_type I = 0;
    while (I != Symbols.size()) {
      const auto &Name = Symbols[I].first;
      auto Flags = Symbols[I].second;
      auto Remove = Body(Name, Flags);
      if (!Remove)
        return Remove.takeError();
      if (*Remove)
        remove(I);
      else
        ++I;
    }
    return Error::success();
  }

  /// Construct a SymbolNameVector from this instance by dropping the Flags
  /// values.
  SymbolNameVector getSymbolNames() const {
    SymbolNameVector Names;
    Names.reserve(Symbols.size());
    for (const auto &KV : Symbols)
      Names.push_back(KV.first);
    return Names;
  }

  /// Sort the lookup set by pointer value. This sort is fast but sensitive to
  /// allocation order and so should not be used where a consistent order is
  /// required.
  void sortByAddress() { llvm::sort(Symbols, llvm::less_first()); }

  /// Sort the lookup set lexicographically. This sort is slow but the order
  /// is unaffected by allocation order.
  void sortByName() {
    llvm::sort(Symbols, [](const value_type &LHS, const value_type &RHS) {
      return *LHS.first < *RHS.first;
    });
  }

  /// Remove any duplicate elements. If a SymbolLookupSet is not duplicate-free
  /// by construction, this method can be used to turn it into a proper set.
  void removeDuplicates() {
    sortByAddress();
    auto LastI = std::unique(Symbols.begin(), Symbols.end());
    Symbols.erase(LastI, Symbols.end());
  }

#ifndef NDEBUG
  /// Returns true if this set contains any duplicates. This should only be used
  /// in assertions.
  bool containsDuplicates() {
    if (Symbols.size() < 2)
      return false;
    sortByAddress();
    for (UnderlyingVector::size_type I = 1; I != Symbols.size(); ++I)
      if (Symbols[I].first == Symbols[I - 1].first)
        return true;
    return false;
  }
#endif

private:
  UnderlyingVector Symbols;
};

struct SymbolAliasMapEntry {
  SymbolAliasMapEntry() = default;
  SymbolAliasMapEntry(SymbolStringPtr Aliasee, JITSymbolFlags AliasFlags)
      : Aliasee(std::move(Aliasee)), AliasFlags(AliasFlags) {}

  SymbolStringPtr Aliasee;
  JITSymbolFlags AliasFlags;
};

/// A map of Symbols to (Symbol, Flags) pairs.
using SymbolAliasMap = DenseMap<SymbolStringPtr, SymbolAliasMapEntry>;

/// Callback to notify client that symbols have been resolved.
using SymbolsResolvedCallback = unique_function<void(Expected<SymbolMap>)>;

/// Callback to register the dependencies for a given query.
using RegisterDependenciesFunction =
    std::function<void(const SymbolDependenceMap &)>;

/// This can be used as the value for a RegisterDependenciesFunction if there
/// are no dependants to register with.
extern RegisterDependenciesFunction NoDependenciesToRegister;

class ResourceTrackerDefunct : public ErrorInfo<ResourceTrackerDefunct> {
public:
  static char ID;

  ResourceTrackerDefunct(ResourceTrackerSP RT);
  std::error_code convertToErrorCode() const override;
  void log(raw_ostream &OS) const override;

private:
  ResourceTrackerSP RT;
};

/// Used to notify a JITDylib that the given set of symbols failed to
/// materialize.
class FailedToMaterialize : public ErrorInfo<FailedToMaterialize> {
public:
  static char ID;

  FailedToMaterialize(std::shared_ptr<SymbolStringPool> SSP,
                      std::shared_ptr<SymbolDependenceMap> Symbols);
  ~FailedToMaterialize();
  std::error_code convertToErrorCode() const override;
  void log(raw_ostream &OS) const override;
  const SymbolDependenceMap &getSymbols() const { return *Symbols; }

private:
  std::shared_ptr<SymbolStringPool> SSP;
  std::shared_ptr<SymbolDependenceMap> Symbols;
};

/// Used to notify clients when symbols can not be found during a lookup.
class SymbolsNotFound : public ErrorInfo<SymbolsNotFound> {
public:
  static char ID;

  SymbolsNotFound(std::shared_ptr<SymbolStringPool> SSP, SymbolNameSet Symbols);
  SymbolsNotFound(std::shared_ptr<SymbolStringPool> SSP,
                  SymbolNameVector Symbols);
  std::error_code convertToErrorCode() const override;
  void log(raw_ostream &OS) const override;
  std::shared_ptr<SymbolStringPool> getSymbolStringPool() { return SSP; }
  const SymbolNameVector &getSymbols() const { return Symbols; }

private:
  std::shared_ptr<SymbolStringPool> SSP;
  SymbolNameVector Symbols;
};

/// Used to notify clients that a set of symbols could not be removed.
class SymbolsCouldNotBeRemoved : public ErrorInfo<SymbolsCouldNotBeRemoved> {
public:
  static char ID;

  SymbolsCouldNotBeRemoved(std::shared_ptr<SymbolStringPool> SSP,
                           SymbolNameSet Symbols);
  std::error_code convertToErrorCode() const override;
  void log(raw_ostream &OS) const override;
  std::shared_ptr<SymbolStringPool> getSymbolStringPool() { return SSP; }
  const SymbolNameSet &getSymbols() const { return Symbols; }

private:
  std::shared_ptr<SymbolStringPool> SSP;
  SymbolNameSet Symbols;
};

/// Errors of this type should be returned if a module fails to include
/// definitions that are claimed by the module's associated
/// MaterializationResponsibility. If this error is returned it is indicative of
/// a broken transformation / compiler / object cache.
class MissingSymbolDefinitions : public ErrorInfo<MissingSymbolDefinitions> {
public:
  static char ID;

  MissingSymbolDefinitions(std::shared_ptr<SymbolStringPool> SSP,
                           std::string ModuleName, SymbolNameVector Symbols)
      : SSP(std::move(SSP)), ModuleName(std::move(ModuleName)),
        Symbols(std::move(Symbols)) {}
  std::error_code convertToErrorCode() const override;
  void log(raw_ostream &OS) const override;
  std::shared_ptr<SymbolStringPool> getSymbolStringPool() { return SSP; }
  const std::string &getModuleName() const { return ModuleName; }
  const SymbolNameVector &getSymbols() const { return Symbols; }
private:
  std::shared_ptr<SymbolStringPool> SSP;
  std::string ModuleName;
  SymbolNameVector Symbols;
};

/// Errors of this type should be returned if a module contains definitions for
/// symbols that are not claimed by the module's associated
/// MaterializationResponsibility. If this error is returned it is indicative of
/// a broken transformation / compiler / object cache.
class UnexpectedSymbolDefinitions : public ErrorInfo<UnexpectedSymbolDefinitions> {
public:
  static char ID;

  UnexpectedSymbolDefinitions(std::shared_ptr<SymbolStringPool> SSP,
                              std::string ModuleName, SymbolNameVector Symbols)
      : SSP(std::move(SSP)), ModuleName(std::move(ModuleName)),
        Symbols(std::move(Symbols)) {}
  std::error_code convertToErrorCode() const override;
  void log(raw_ostream &OS) const override;
  std::shared_ptr<SymbolStringPool> getSymbolStringPool() { return SSP; }
  const std::string &getModuleName() const { return ModuleName; }
  const SymbolNameVector &getSymbols() const { return Symbols; }
private:
  std::shared_ptr<SymbolStringPool> SSP;
  std::string ModuleName;
  SymbolNameVector Symbols;
};

/// Tracks responsibility for materialization, and mediates interactions between
/// MaterializationUnits and JDs.
///
/// An instance of this class is passed to MaterializationUnits when their
/// materialize method is called. It allows MaterializationUnits to resolve and
/// emit symbols, or abandon materialization by notifying any unmaterialized
/// symbols of an error.
class MaterializationResponsibility {
  friend class ExecutionSession;
  friend class JITDylib;

public:
  MaterializationResponsibility(MaterializationResponsibility &&) = delete;
  MaterializationResponsibility &
  operator=(MaterializationResponsibility &&) = delete;

  /// Destruct a MaterializationResponsibility instance. In debug mode
  ///        this asserts that all symbols being tracked have been either
  ///        emitted or notified of an error.
  ~MaterializationResponsibility();

  /// Runs the given callback under the session lock, passing in the associated
  /// ResourceKey. This is the safe way to associate resources with trackers.
  template <typename Func> Error withResourceKeyDo(Func &&F) const {
    return RT->withResourceKeyDo(std::forward<Func>(F));
  }

  /// Returns the target JITDylib that these symbols are being materialized
  ///        into.
  JITDylib &getTargetJITDylib() const { return JD; }

  /// Returns the ExecutionSession for this instance.
  ExecutionSession &getExecutionSession() const;

  /// Returns the symbol flags map for this responsibility instance.
  /// Note: The returned flags may have transient flags (Lazy, Materializing)
  /// set. These should be stripped with JITSymbolFlags::stripTransientFlags
  /// before using.
  const SymbolFlagsMap &getSymbols() const { return SymbolFlags; }

  /// Returns the initialization pseudo-symbol, if any. This symbol will also
  /// be present in the SymbolFlagsMap for this MaterializationResponsibility
  /// object.
  const SymbolStringPtr &getInitializerSymbol() const { return InitSymbol; }

  /// Returns the names of any symbols covered by this
  /// MaterializationResponsibility object that have queries pending. This
  /// information can be used to return responsibility for unrequested symbols
  /// back to the JITDylib via the delegate method.
  SymbolNameSet getRequestedSymbols() const;

  /// Notifies the target JITDylib that the given symbols have been resolved.
  /// This will update the given symbols' addresses in the JITDylib, and notify
  /// any pending queries on the given symbols of their resolution. The given
  /// symbols must be ones covered by this MaterializationResponsibility
  /// instance. Individual calls to this method may resolve a subset of the
  /// symbols, but all symbols must have been resolved prior to calling emit.
  ///
  /// This method will return an error if any symbols being resolved have been
  /// moved to the error state due to the failure of a dependency. If this
  /// method returns an error then clients should log it and call
  /// failMaterialize. If no dependencies have been registered for the
  /// symbols covered by this MaterializationResponsibiility then this method
  /// is guaranteed to return Error::success() and can be wrapped with cantFail.
  Error notifyResolved(const SymbolMap &Symbols);

  /// Notifies the target JITDylib (and any pending queries on that JITDylib)
  /// that all symbols covered by this MaterializationResponsibility instance
  /// have been emitted.
  ///
  /// This method will return an error if any symbols being resolved have been
  /// moved to the error state due to the failure of a dependency. If this
  /// method returns an error then clients should log it and call
  /// failMaterialize. If no dependencies have been registered for the
  /// symbols covered by this MaterializationResponsibiility then this method
  /// is guaranteed to return Error::success() and can be wrapped with cantFail.
  Error notifyEmitted();

  /// Attempt to claim responsibility for new definitions. This method can be
  /// used to claim responsibility for symbols that are added to a
  /// materialization unit during the compilation process (e.g. literal pool
  /// symbols). Symbol linkage rules are the same as for symbols that are
  /// defined up front: duplicate strong definitions will result in errors.
  /// Duplicate weak definitions will be discarded (in which case they will
  /// not be added to this responsibility instance).
  ///
  ///   This method can be used by materialization units that want to add
  /// additional symbols at materialization time (e.g. stubs, compile
  /// callbacks, metadata).
  Error defineMaterializing(SymbolFlagsMap SymbolFlags);

  /// Notify all not-yet-emitted covered by this MaterializationResponsibility
  /// instance that an error has occurred.
  /// This will remove all symbols covered by this MaterializationResponsibilty
  /// from the target JITDylib, and send an error to any queries waiting on
  /// these symbols.
  void failMaterialization();

  /// Transfers responsibility to the given MaterializationUnit for all
  /// symbols defined by that MaterializationUnit. This allows
  /// materializers to break up work based on run-time information (e.g.
  /// by introspecting which symbols have actually been looked up and
  /// materializing only those).
  Error replace(std::unique_ptr<MaterializationUnit> MU);

  /// Delegates responsibility for the given symbols to the returned
  /// materialization responsibility. Useful for breaking up work between
  /// threads, or different kinds of materialization processes.
  Expected<std::unique_ptr<MaterializationResponsibility>>
  delegate(const SymbolNameSet &Symbols);

  void addDependencies(const SymbolStringPtr &Name,
                       const SymbolDependenceMap &Dependencies);

  /// Add dependencies that apply to all symbols covered by this instance.
  void addDependenciesForAll(const SymbolDependenceMap &Dependencies);

private:
  /// Create a MaterializationResponsibility for the given JITDylib and
  ///        initial symbols.
  MaterializationResponsibility(ResourceTrackerSP RT,
                                SymbolFlagsMap SymbolFlags,
                                SymbolStringPtr InitSymbol)
      : JD(RT->getJITDylib()), RT(std::move(RT)),
        SymbolFlags(std::move(SymbolFlags)), InitSymbol(std::move(InitSymbol)) {
    assert(!this->SymbolFlags.empty() && "Materializing nothing?");
  }

  JITDylib &JD;
  ResourceTrackerSP RT;
  SymbolFlagsMap SymbolFlags;
  SymbolStringPtr InitSymbol;
};

/// A MaterializationUnit represents a set of symbol definitions that can
///        be materialized as a group, or individually discarded (when
///        overriding definitions are encountered).
///
/// MaterializationUnits are used when providing lazy definitions of symbols to
/// JITDylibs. The JITDylib will call materialize when the address of a symbol
/// is requested via the lookup method. The JITDylib will call discard if a
/// stronger definition is added or already present.
class MaterializationUnit {
  friend class ExecutionSession;
  friend class JITDylib;

public:
  static char ID;

  struct Interface {
    Interface() = default;
    Interface(SymbolFlagsMap InitalSymbolFlags, SymbolStringPtr InitSymbol)
        : SymbolFlags(std::move(InitalSymbolFlags)),
          InitSymbol(std::move(InitSymbol)) {
      assert((!this->InitSymbol || this->SymbolFlags.count(this->InitSymbol)) &&
             "If set, InitSymbol should appear in InitialSymbolFlags map");
    }

    SymbolFlagsMap SymbolFlags;
    SymbolStringPtr InitSymbol;
  };

  MaterializationUnit(Interface I)
      : SymbolFlags(std::move(I.SymbolFlags)),
        InitSymbol(std::move(I.InitSymbol)) {}
  virtual ~MaterializationUnit() = default;

  /// Return the name of this materialization unit. Useful for debugging
  /// output.
  virtual StringRef getName() const = 0;

  /// Return the set of symbols that this source provides.
  const SymbolFlagsMap &getSymbols() const { return SymbolFlags; }

  /// Returns the initialization symbol for this MaterializationUnit (if any).
  const SymbolStringPtr &getInitializerSymbol() const { return InitSymbol; }

  /// Implementations of this method should materialize all symbols
  ///        in the materialzation unit, except for those that have been
  ///        previously discarded.
  virtual void
  materialize(std::unique_ptr<MaterializationResponsibility> R) = 0;

  /// Called by JITDylibs to notify MaterializationUnits that the given symbol
  /// has been overridden.
  void doDiscard(const JITDylib &JD, const SymbolStringPtr &Name) {
    SymbolFlags.erase(Name);
    if (InitSymbol == Name) {
      DEBUG_WITH_TYPE("orc", {
        dbgs() << "In " << getName() << ": discarding init symbol \""
               << *Name << "\"\n";
      });
      InitSymbol = nullptr;
    }
    discard(JD, std::move(Name));
  }

protected:
  SymbolFlagsMap SymbolFlags;
  SymbolStringPtr InitSymbol;

private:
  virtual void anchor();

  /// Implementations of this method should discard the given symbol
  ///        from the source (e.g. if the source is an LLVM IR Module and the
  ///        symbol is a function, delete the function body or mark it available
  ///        externally).
  virtual void discard(const JITDylib &JD, const SymbolStringPtr &Name) = 0;
};

/// A MaterializationUnit implementation for pre-existing absolute symbols.
///
/// All symbols will be resolved and marked ready as soon as the unit is
/// materialized.
class AbsoluteSymbolsMaterializationUnit : public MaterializationUnit {
public:
  AbsoluteSymbolsMaterializationUnit(SymbolMap Symbols);

  StringRef getName() const override;

private:
  void materialize(std::unique_ptr<MaterializationResponsibility> R) override;
  void discard(const JITDylib &JD, const SymbolStringPtr &Name) override;
  static MaterializationUnit::Interface extractFlags(const SymbolMap &Symbols);

  SymbolMap Symbols;
};

/// Create an AbsoluteSymbolsMaterializationUnit with the given symbols.
/// Useful for inserting absolute symbols into a JITDylib. E.g.:
/// \code{.cpp}
///   JITDylib &JD = ...;
///   SymbolStringPtr Foo = ...;
///   ExecutorSymbolDef FooSym = ...;
///   if (auto Err = JD.define(absoluteSymbols({{Foo, FooSym}})))
///     return Err;
/// \endcode
///
inline std::unique_ptr<AbsoluteSymbolsMaterializationUnit>
absoluteSymbols(SymbolMap Symbols) {
  return std::make_unique<AbsoluteSymbolsMaterializationUnit>(
      std::move(Symbols));
}

/// A materialization unit for symbol aliases. Allows existing symbols to be
/// aliased with alternate flags.
class ReExportsMaterializationUnit : public MaterializationUnit {
public:
  /// SourceJD is allowed to be nullptr, in which case the source JITDylib is
  /// taken to be whatever JITDylib these definitions are materialized in (and
  /// MatchNonExported has no effect). This is useful for defining aliases
  /// within a JITDylib.
  ///
  /// Note: Care must be taken that no sets of aliases form a cycle, as such
  ///       a cycle will result in a deadlock when any symbol in the cycle is
  ///       resolved.
  ReExportsMaterializationUnit(JITDylib *SourceJD,
                               JITDylibLookupFlags SourceJDLookupFlags,
                               SymbolAliasMap Aliases);

  StringRef getName() const override;

private:
  void materialize(std::unique_ptr<MaterializationResponsibility> R) override;
  void discard(const JITDylib &JD, const SymbolStringPtr &Name) override;
  static MaterializationUnit::Interface
  extractFlags(const SymbolAliasMap &Aliases);

  JITDylib *SourceJD = nullptr;
  JITDylibLookupFlags SourceJDLookupFlags;
  SymbolAliasMap Aliases;
};

/// Create a ReExportsMaterializationUnit with the given aliases.
/// Useful for defining symbol aliases.: E.g., given a JITDylib JD containing
/// symbols "foo" and "bar", we can define aliases "baz" (for "foo") and "qux"
/// (for "bar") with: \code{.cpp}
///   SymbolStringPtr Baz = ...;
///   SymbolStringPtr Qux = ...;
///   if (auto Err = JD.define(symbolAliases({
///       {Baz, { Foo, JITSymbolFlags::Exported }},
///       {Qux, { Bar, JITSymbolFlags::Weak }}}))
///     return Err;
/// \endcode
inline std::unique_ptr<ReExportsMaterializationUnit>
symbolAliases(SymbolAliasMap Aliases) {
  return std::make_unique<ReExportsMaterializationUnit>(
      nullptr, JITDylibLookupFlags::MatchAllSymbols, std::move(Aliases));
}

/// Create a materialization unit for re-exporting symbols from another JITDylib
/// with alternative names/flags.
/// SourceJD will be searched using the given JITDylibLookupFlags.
inline std::unique_ptr<ReExportsMaterializationUnit>
reexports(JITDylib &SourceJD, SymbolAliasMap Aliases,
          JITDylibLookupFlags SourceJDLookupFlags =
              JITDylibLookupFlags::MatchExportedSymbolsOnly) {
  return std::make_unique<ReExportsMaterializationUnit>(
      &SourceJD, SourceJDLookupFlags, std::move(Aliases));
}

/// Build a SymbolAliasMap for the common case where you want to re-export
/// symbols from another JITDylib with the same linkage/flags.
Expected<SymbolAliasMap>
buildSimpleReexportsAliasMap(JITDylib &SourceJD, const SymbolNameSet &Symbols);

/// Represents the state that a symbol has reached during materialization.
enum class SymbolState : uint8_t {
  Invalid,       /// No symbol should be in this state.
  NeverSearched, /// Added to the symbol table, never queried.
  Materializing, /// Queried, materialization begun.
  Resolved,      /// Assigned address, still materializing.
  Emitted,       /// Emitted to memory, but waiting on transitive dependencies.
  Ready = 0x3f   /// Ready and safe for clients to access.
};

/// A symbol query that returns results via a callback when results are
///        ready.
///
/// makes a callback when all symbols are available.
class AsynchronousSymbolQuery {
  friend class ExecutionSession;
  friend class InProgressFullLookupState;
  friend class JITDylib;
  friend class JITSymbolResolverAdapter;
  friend class MaterializationResponsibility;

public:
  /// Create a query for the given symbols. The NotifyComplete
  /// callback will be called once all queried symbols reach the given
  /// minimum state.
  AsynchronousSymbolQuery(const SymbolLookupSet &Symbols,
                          SymbolState RequiredState,
                          SymbolsResolvedCallback NotifyComplete);

  /// Notify the query that a requested symbol has reached the required state.
  void notifySymbolMetRequiredState(const SymbolStringPtr &Name,
                                    ExecutorSymbolDef Sym);

  /// Returns true if all symbols covered by this query have been
  ///        resolved.
  bool isComplete() const { return OutstandingSymbolsCount == 0; }


private:
  void handleComplete(ExecutionSession &ES);

  SymbolState getRequiredState() { return RequiredState; }

  void addQueryDependence(JITDylib &JD, SymbolStringPtr Name);

  void removeQueryDependence(JITDylib &JD, const SymbolStringPtr &Name);

  void dropSymbol(const SymbolStringPtr &Name);

  void handleFailed(Error Err);

  void detach();

  SymbolsResolvedCallback NotifyComplete;
  SymbolDependenceMap QueryRegistrations;
  SymbolMap ResolvedSymbols;
  size_t OutstandingSymbolsCount;
  SymbolState RequiredState;
};

/// Wraps state for a lookup-in-progress.
/// DefinitionGenerators can optionally take ownership of a LookupState object
/// to suspend a lookup-in-progress while they search for definitions.
class LookupState {
  friend class OrcV2CAPIHelper;
  friend class ExecutionSession;

public:
  LookupState();
  LookupState(LookupState &&);
  LookupState &operator=(LookupState &&);
  ~LookupState();

  /// Continue the lookup. This can be called by DefinitionGenerators
  /// to re-start a captured query-application operation.
  void continueLookup(Error Err);

private:
  LookupState(std::unique_ptr<InProgressLookupState> IPLS);

  // For C API.
  void reset(InProgressLookupState *IPLS);

  std::unique_ptr<InProgressLookupState> IPLS;
};

/// Definition generators can be attached to JITDylibs to generate new
/// definitions for otherwise unresolved symbols during lookup.
class DefinitionGenerator {
public:
  virtual ~DefinitionGenerator();

  /// DefinitionGenerators should override this method to insert new
  /// definitions into the parent JITDylib. K specifies the kind of this
  /// lookup. JD specifies the target JITDylib being searched, and
  /// JDLookupFlags specifies whether the search should match against
  /// hidden symbols. Finally, Symbols describes the set of unresolved
  /// symbols and their associated lookup flags.
  virtual Error tryToGenerate(LookupState &LS, LookupKind K, JITDylib &JD,
                              JITDylibLookupFlags JDLookupFlags,
                              const SymbolLookupSet &LookupSet) = 0;
};

/// Represents a JIT'd dynamic library.
///
/// This class aims to mimic the behavior of a regular dylib or shared object,
/// but without requiring the contained program representations to be compiled
/// up-front. The JITDylib's content is defined by adding MaterializationUnits,
/// and contained MaterializationUnits will typically rely on the JITDylib's
/// links-against order to resolve external references (similar to a regular
/// dylib).
///
/// The JITDylib object is a thin wrapper that references state held by the
/// ExecutionSession. JITDylibs can be removed, clearing this underlying state
/// and leaving the JITDylib object in a defunct state. In this state the
/// JITDylib's name is guaranteed to remain accessible. If the ExecutionSession
/// is still alive then other operations are callable but will return an Error
/// or null result (depending on the API). It is illegal to call any operation
/// other than getName on a JITDylib after the ExecutionSession has been torn
/// down.
///
/// JITDylibs cannot be moved or copied. Their address is stable, and useful as
/// a key in some JIT data structures.
class JITDylib : public ThreadSafeRefCountedBase<JITDylib>,
                 public jitlink::JITLinkDylib {
  friend class AsynchronousSymbolQuery;
  friend class ExecutionSession;
  friend class Platform;
  friend class MaterializationResponsibility;
public:

  JITDylib(const JITDylib &) = delete;
  JITDylib &operator=(const JITDylib &) = delete;
  JITDylib(JITDylib &&) = delete;
  JITDylib &operator=(JITDylib &&) = delete;
  ~JITDylib();

  /// Get a reference to the ExecutionSession for this JITDylib.
  ///
  /// It is legal to call this method on a defunct JITDylib, however the result
  /// will only usable if the ExecutionSession is still alive. If this JITDylib
  /// is held by an error that may have torn down the JIT then the result
  /// should not be used.
  ExecutionSession &getExecutionSession() const { return ES; }

  /// Dump current JITDylib state to OS.
  ///
  /// It is legal to call this method on a defunct JITDylib.
  void dump(raw_ostream &OS);

  /// Calls remove on all trackers currently associated with this JITDylib.
  /// Does not run static deinits.
  ///
  /// Note that removal happens outside the session lock, so new code may be
  /// added concurrently while the clear is underway, and the newly added
  /// code will *not* be cleared. Adding new code concurrently with a clear
  /// is usually a bug and should be avoided.
  ///
  /// It is illegal to call this method on a defunct JITDylib and the client
  /// is responsible for ensuring that they do not do so.
  Error clear();

  /// Get the default resource tracker for this JITDylib.
  ///
  /// It is illegal to call this method on a defunct JITDylib and the client
  /// is responsible for ensuring that they do not do so.
  ResourceTrackerSP getDefaultResourceTracker();

  /// Create a resource tracker for this JITDylib.
  ///
  /// It is illegal to call this method on a defunct JITDylib and the client
  /// is responsible for ensuring that they do not do so.
  ResourceTrackerSP createResourceTracker();

  /// Adds a definition generator to this JITDylib and returns a referenece to
  /// it.
  ///
  /// When JITDylibs are searched during lookup, if no existing definition of
  /// a symbol is found, then any generators that have been added are run (in
  /// the order that they were added) to potentially generate a definition.
  ///
  /// It is illegal to call this method on a defunct JITDylib and the client
  /// is responsible for ensuring that they do not do so.
  template <typename GeneratorT>
  GeneratorT &addGenerator(std::unique_ptr<GeneratorT> DefGenerator);

  /// Remove a definition generator from this JITDylib.
  ///
  /// The given generator must exist in this JITDylib's generators list (i.e.
  /// have been added and not yet removed).
  ///
  /// It is illegal to call this method on a defunct JITDylib and the client
  /// is responsible for ensuring that they do not do so.
  void removeGenerator(DefinitionGenerator &G);

  /// Set the link order to be used when fixing up definitions in JITDylib.
  /// This will replace the previous link order, and apply to any symbol
  /// resolutions made for definitions in this JITDylib after the call to
  /// setLinkOrder (even if the definition itself was added before the
  /// call).
  ///
  /// If LinkAgainstThisJITDylibFirst is true (the default) then this JITDylib
  /// will add itself to the beginning of the LinkOrder (Clients should not
  /// put this JITDylib in the list in this case, to avoid redundant lookups).
  ///
  /// If LinkAgainstThisJITDylibFirst is false then the link order will be used
  /// as-is. The primary motivation for this feature is to support deliberate
  /// shadowing of symbols in this JITDylib by a facade JITDylib. For example,
  /// the facade may resolve function names to stubs, and the stubs may compile
  /// lazily by looking up symbols in this dylib. Adding the facade dylib
  /// as the first in the link order (instead of this dylib) ensures that
  /// definitions within this dylib resolve to the lazy-compiling stubs,
  /// rather than immediately materializing the definitions in this dylib.
  ///
  /// It is illegal to call this method on a defunct JITDylib and the client
  /// is responsible for ensuring that they do not do so.
  void setLinkOrder(JITDylibSearchOrder NewSearchOrder,
                    bool LinkAgainstThisJITDylibFirst = true);

  /// Append the given JITDylibSearchOrder to the link order for this
  /// JITDylib (discarding any elements already present in this JITDylib's
  /// link order).
  void addToLinkOrder(const JITDylibSearchOrder &NewLinks);

  /// Add the given JITDylib to the link order for definitions in this
  /// JITDylib.
  ///
  /// It is illegal to call this method on a defunct JITDylib and the client
  /// is responsible for ensuring that they do not do so.
  void addToLinkOrder(JITDylib &JD,
                      JITDylibLookupFlags JDLookupFlags =
                          JITDylibLookupFlags::MatchExportedSymbolsOnly);

  /// Replace OldJD with NewJD in the link order if OldJD is present.
  /// Otherwise this operation is a no-op.
  ///
  /// It is illegal to call this method on a defunct JITDylib and the client
  /// is responsible for ensuring that they do not do so.
  void replaceInLinkOrder(JITDylib &OldJD, JITDylib &NewJD,
                          JITDylibLookupFlags JDLookupFlags =
                              JITDylibLookupFlags::MatchExportedSymbolsOnly);

  /// Remove the given JITDylib from the link order for this JITDylib if it is
  /// present. Otherwise this operation is a no-op.
  ///
  /// It is illegal to call this method on a defunct JITDylib and the client
  /// is responsible for ensuring that they do not do so.
  void removeFromLinkOrder(JITDylib &JD);

  /// Do something with the link order (run under the session lock).
  ///
  /// It is illegal to call this method on a defunct JITDylib and the client
  /// is responsible for ensuring that they do not do so.
  template <typename Func>
  auto withLinkOrderDo(Func &&F)
      -> decltype(F(std::declval<const JITDylibSearchOrder &>()));

  /// Define all symbols provided by the materialization unit to be part of this
  /// JITDylib.
  ///
  /// If RT is not specified then the default resource tracker will be used.
  ///
  /// This overload always takes ownership of the MaterializationUnit. If any
  /// errors occur, the MaterializationUnit consumed.
  ///
  /// It is illegal to call this method on a defunct JITDylib and the client
  /// is responsible for ensuring that they do not do so.
  template <typename MaterializationUnitType>
  Error define(std::unique_ptr<MaterializationUnitType> &&MU,
               ResourceTrackerSP RT = nullptr);

  /// Define all symbols provided by the materialization unit to be part of this
  /// JITDylib.
  ///
  /// This overload only takes ownership of the MaterializationUnit no error is
  /// generated. If an error occurs, ownership remains with the caller. This
  /// may allow the caller to modify the MaterializationUnit to correct the
  /// issue, then re-call define.
  ///
  /// It is illegal to call this method on a defunct JITDylib and the client
  /// is responsible for ensuring that they do not do so.
  template <typename MaterializationUnitType>
  Error define(std::unique_ptr<MaterializationUnitType> &MU,
               ResourceTrackerSP RT = nullptr);

  /// Tries to remove the given symbols.
  ///
  /// If any symbols are not defined in this JITDylib this method will return
  /// a SymbolsNotFound error covering the missing symbols.
  ///
  /// If all symbols are found but some symbols are in the process of being
  /// materialized this method will return a SymbolsCouldNotBeRemoved error.
  ///
  /// On success, all symbols are removed. On failure, the JITDylib state is
  /// left unmodified (no symbols are removed).
  ///
  /// It is illegal to call this method on a defunct JITDylib and the client
  /// is responsible for ensuring that they do not do so.
  Error remove(const SymbolNameSet &Names);

  /// Returns the given JITDylibs and all of their transitive dependencies in
  /// DFS order (based on linkage relationships). Each JITDylib will appear
  /// only once.
  ///
  /// If any JITDylib in the order is defunct then this method will return an
  /// error, otherwise returns the order.
  static Expected<std::vector<JITDylibSP>>
  getDFSLinkOrder(ArrayRef<JITDylibSP> JDs);

  /// Returns the given JITDylibs and all of their transitive dependencies in
  /// reverse DFS order (based on linkage relationships). Each JITDylib will
  /// appear only once.
  ///
  /// If any JITDylib in the order is defunct then this method will return an
  /// error, otherwise returns the order.
  static Expected<std::vector<JITDylibSP>>
  getReverseDFSLinkOrder(ArrayRef<JITDylibSP> JDs);

  /// Return this JITDylib and its transitive dependencies in DFS order
  /// based on linkage relationships.
  ///
  /// If any JITDylib in the order is defunct then this method will return an
  /// error, otherwise returns the order.
  Expected<std::vector<JITDylibSP>> getDFSLinkOrder();

  /// Rteurn this JITDylib and its transitive dependencies in reverse DFS order
  /// based on linkage relationships.
  ///
  /// If any JITDylib in the order is defunct then this method will return an
  /// error, otherwise returns the order.
  Expected<std::vector<JITDylibSP>> getReverseDFSLinkOrder();

private:
  using AsynchronousSymbolQuerySet =
    std::set<std::shared_ptr<AsynchronousSymbolQuery>>;

  using AsynchronousSymbolQueryList =
      std::vector<std::shared_ptr<AsynchronousSymbolQuery>>;

  struct UnmaterializedInfo {
    UnmaterializedInfo(std::unique_ptr<MaterializationUnit> MU,
                       ResourceTracker *RT)
        : MU(std::move(MU)), RT(RT) {}

    std::unique_ptr<MaterializationUnit> MU;
    ResourceTracker *RT;
  };

  using UnmaterializedInfosMap =
      DenseMap<SymbolStringPtr, std::shared_ptr<UnmaterializedInfo>>;

  using UnmaterializedInfosList =
      std::vector<std::shared_ptr<UnmaterializedInfo>>;

  struct MaterializingInfo {
    SymbolDependenceMap Dependants;
    SymbolDependenceMap UnemittedDependencies;

    void addQuery(std::shared_ptr<AsynchronousSymbolQuery> Q);
    void removeQuery(const AsynchronousSymbolQuery &Q);
    AsynchronousSymbolQueryList takeQueriesMeeting(SymbolState RequiredState);
    AsynchronousSymbolQueryList takeAllPendingQueries() {
      return std::move(PendingQueries);
    }
    bool hasQueriesPending() const { return !PendingQueries.empty(); }
    const AsynchronousSymbolQueryList &pendingQueries() const {
      return PendingQueries;
    }
  private:
    AsynchronousSymbolQueryList PendingQueries;
  };

  using MaterializingInfosMap = DenseMap<SymbolStringPtr, MaterializingInfo>;

  class SymbolTableEntry {
  public:
    SymbolTableEntry() = default;
    SymbolTableEntry(JITSymbolFlags Flags)
        : Flags(Flags), State(static_cast<uint8_t>(SymbolState::NeverSearched)),
          MaterializerAttached(false), PendingRemoval(false) {}

    ExecutorAddr getAddress() const { return Addr; }
    JITSymbolFlags getFlags() const { return Flags; }
    SymbolState getState() const { return static_cast<SymbolState>(State); }

    bool hasMaterializerAttached() const { return MaterializerAttached; }
    bool isPendingRemoval() const { return PendingRemoval; }

    void setAddress(ExecutorAddr Addr) { this->Addr = Addr; }
    void setFlags(JITSymbolFlags Flags) { this->Flags = Flags; }
    void setState(SymbolState State) {
      assert(static_cast<uint8_t>(State) < (1 << 6) &&
             "State does not fit in bitfield");
      this->State = static_cast<uint8_t>(State);
    }

    void setMaterializerAttached(bool MaterializerAttached) {
      this->MaterializerAttached = MaterializerAttached;
    }

    void setPendingRemoval(bool PendingRemoval) {
      this->PendingRemoval = PendingRemoval;
    }

    ExecutorSymbolDef getSymbol() const { return {Addr, Flags}; }

  private:
    ExecutorAddr Addr;
    JITSymbolFlags Flags;
    uint8_t State : 6;
    uint8_t MaterializerAttached : 1;
    uint8_t PendingRemoval : 1;
  };

  using SymbolTable = DenseMap<SymbolStringPtr, SymbolTableEntry>;

  JITDylib(ExecutionSession &ES, std::string Name);

  std::pair<AsynchronousSymbolQuerySet, std::shared_ptr<SymbolDependenceMap>>
  removeTracker(ResourceTracker &RT);

  void transferTracker(ResourceTracker &DstRT, ResourceTracker &SrcRT);

  Error defineImpl(MaterializationUnit &MU);

  void installMaterializationUnit(std::unique_ptr<MaterializationUnit> MU,
                                  ResourceTracker &RT);

  void detachQueryHelper(AsynchronousSymbolQuery &Q,
                         const SymbolNameSet &QuerySymbols);

  void transferEmittedNodeDependencies(MaterializingInfo &DependantMI,
                                       const SymbolStringPtr &DependantName,
                                       MaterializingInfo &EmittedMI);

  Expected<SymbolFlagsMap>
  defineMaterializing(MaterializationResponsibility &FromMR,
                      SymbolFlagsMap SymbolFlags);

  Error replace(MaterializationResponsibility &FromMR,
                std::unique_ptr<MaterializationUnit> MU);

  Expected<std::unique_ptr<MaterializationResponsibility>>
  delegate(MaterializationResponsibility &FromMR, SymbolFlagsMap SymbolFlags,
           SymbolStringPtr InitSymbol);

  SymbolNameSet getRequestedSymbols(const SymbolFlagsMap &SymbolFlags) const;

  void addDependencies(const SymbolStringPtr &Name,
                       const SymbolDependenceMap &Dependants);

  Error resolve(MaterializationResponsibility &MR, const SymbolMap &Resolved);

  Error emit(MaterializationResponsibility &MR, const SymbolFlagsMap &Emitted);

  void unlinkMaterializationResponsibility(MaterializationResponsibility &MR);

  using FailedSymbolsWorklist =
      std::vector<std::pair<JITDylib *, SymbolStringPtr>>;

  static std::pair<AsynchronousSymbolQuerySet,
                   std::shared_ptr<SymbolDependenceMap>>
      failSymbols(FailedSymbolsWorklist);

  ExecutionSession &ES;
  enum { Open, Closing, Closed } State = Open;
  std::mutex GeneratorsMutex;
  SymbolTable Symbols;
  UnmaterializedInfosMap UnmaterializedInfos;
  MaterializingInfosMap MaterializingInfos;
  std::vector<std::shared_ptr<DefinitionGenerator>> DefGenerators;
  JITDylibSearchOrder LinkOrder;
  ResourceTrackerSP DefaultTracker;

  // Map trackers to sets of symbols tracked.
  DenseMap<ResourceTracker *, SymbolNameVector> TrackerSymbols;
  DenseMap<ResourceTracker *, DenseSet<MaterializationResponsibility *>>
      TrackerMRs;
};

/// Platforms set up standard symbols and mediate interactions between dynamic
/// initializers (e.g. C++ static constructors) and ExecutionSession state.
/// Note that Platforms do not automatically run initializers: clients are still
/// responsible for doing this.
class Platform {
public:
  virtual ~Platform();

  /// This method will be called outside the session lock each time a JITDylib
  /// is created (unless it is created with EmptyJITDylib set) to allow the
  /// Platform to install any JITDylib specific standard symbols (e.g
  /// __dso_handle).
  virtual Error setupJITDylib(JITDylib &JD) = 0;

  /// This method will be called outside the session lock each time a JITDylib
  /// is removed to allow the Platform to remove any JITDylib-specific data.
  virtual Error teardownJITDylib(JITDylib &JD) = 0;

  /// This method will be called under the ExecutionSession lock each time a
  /// MaterializationUnit is added to a JITDylib.
  virtual Error notifyAdding(ResourceTracker &RT,
                             const MaterializationUnit &MU) = 0;

  /// This method will be called under the ExecutionSession lock when a
  /// ResourceTracker is removed.
  virtual Error notifyRemoving(ResourceTracker &RT) = 0;

  /// A utility function for looking up initializer symbols. Performs a blocking
  /// lookup for the given symbols in each of the given JITDylibs.
  ///
  /// Note: This function is deprecated and will be removed in the near future.
  static Expected<DenseMap<JITDylib *, SymbolMap>>
  lookupInitSymbols(ExecutionSession &ES,
                    const DenseMap<JITDylib *, SymbolLookupSet> &InitSyms);

  /// Performs an async lookup for the given symbols in each of the given
  /// JITDylibs, calling the given handler once all lookups have completed.
  static void
  lookupInitSymbolsAsync(unique_function<void(Error)> OnComplete,
                         ExecutionSession &ES,
                         const DenseMap<JITDylib *, SymbolLookupSet> &InitSyms);
};

/// A materialization task.
class MaterializationTask : public RTTIExtends<MaterializationTask, Task> {
public:
  static char ID;

  MaterializationTask(std::unique_ptr<MaterializationUnit> MU,
                      std::unique_ptr<MaterializationResponsibility> MR)
      : MU(std::move(MU)), MR(std::move(MR)) {}
  void printDescription(raw_ostream &OS) override;
  void run() override;

private:
  std::unique_ptr<MaterializationUnit> MU;
  std::unique_ptr<MaterializationResponsibility> MR;
};

/// An ExecutionSession represents a running JIT program.
class ExecutionSession {
  friend class InProgressLookupFlagsState;
  friend class InProgressFullLookupState;
  friend class JITDylib;
  friend class LookupState;
  friend class MaterializationResponsibility;
  friend class ResourceTracker;

public:
  /// For reporting errors.
  using ErrorReporter = std::function<void(Error)>;

  /// Send a result to the remote.
  using SendResultFunction = unique_function<void(shared::WrapperFunctionResult)>;

  /// For dispatching ORC tasks (typically materialization tasks).
  using DispatchTaskFunction = unique_function<void(std::unique_ptr<Task> T)>;

  /// An asynchronous wrapper-function callable from the executor via
  /// jit-dispatch.
  using JITDispatchHandlerFunction = unique_function<void(
      SendResultFunction SendResult,
      const char *ArgData, size_t ArgSize)>;

  /// A map associating tag names with asynchronous wrapper function
  /// implementations in the JIT.
  using JITDispatchHandlerAssociationMap =
      DenseMap<SymbolStringPtr, JITDispatchHandlerFunction>;

  /// Construct an ExecutionSession with the given ExecutorProcessControl
  /// object.
  ExecutionSession(std::unique_ptr<ExecutorProcessControl> EPC);

  /// Destroy an ExecutionSession. Verifies that endSession was called prior to
  /// destruction.
  ~ExecutionSession();

  /// End the session. Closes all JITDylibs and disconnects from the
  /// executor. Clients must call this method before destroying the session.
  Error endSession();

  /// Get the ExecutorProcessControl object associated with this
  /// ExecutionSession.
  ExecutorProcessControl &getExecutorProcessControl() { return *EPC; }

  /// Return the triple for the executor.
  const Triple &getTargetTriple() const { return EPC->getTargetTriple(); }

  /// Get the SymbolStringPool for this instance.
  std::shared_ptr<SymbolStringPool> getSymbolStringPool() {
    return EPC->getSymbolStringPool();
  }

  /// Add a symbol name to the SymbolStringPool and return a pointer to it.
  SymbolStringPtr intern(StringRef SymName) { return EPC->intern(SymName); }

  /// Set the Platform for this ExecutionSession.
  void setPlatform(std::unique_ptr<Platform> P) { this->P = std::move(P); }

  /// Get the Platform for this session.
  /// Will return null if no Platform has been set for this ExecutionSession.
  Platform *getPlatform() { return P.get(); }

  /// Run the given lambda with the session mutex locked.
  template <typename Func> decltype(auto) runSessionLocked(Func &&F) {
    std::lock_guard<std::recursive_mutex> Lock(SessionMutex);
    return F();
  }

  /// Register the given ResourceManager with this ExecutionSession.
  /// Managers will be notified of events in reverse order of registration.
  void registerResourceManager(ResourceManager &RM);

  /// Deregister the given ResourceManager with this ExecutionSession.
  /// Manager must have been previously registered.
  void deregisterResourceManager(ResourceManager &RM);

  /// Return a pointer to the "name" JITDylib.
  /// Ownership of JITDylib remains within Execution Session
  JITDylib *getJITDylibByName(StringRef Name);

  /// Add a new bare JITDylib to this ExecutionSession.
  ///
  /// The JITDylib Name is required to be unique. Clients should verify that
  /// names are not being re-used (E.g. by calling getJITDylibByName) if names
  /// are based on user input.
  ///
  /// This call does not install any library code or symbols into the newly
  /// created JITDylib. The client is responsible for all configuration.
  JITDylib &createBareJITDylib(std::string Name);

  /// Add a new JITDylib to this ExecutionSession.
  ///
  /// The JITDylib Name is required to be unique. Clients should verify that
  /// names are not being re-used (e.g. by calling getJITDylibByName) if names
  /// are based on user input.
  ///
  /// If a Platform is attached then Platform::setupJITDylib will be called to
  /// install standard platform symbols (e.g. standard library interposes).
  /// If no Platform is attached this call is equivalent to createBareJITDylib.
  Expected<JITDylib &> createJITDylib(std::string Name);

  /// Closes the given JITDylib.
  ///
  /// This method clears all resources held for the JITDylib, puts it in the
  /// closed state, and clears all references held by the ExecutionSession and
  /// other JITDylibs. No further code can be added to the JITDylib, and the
  /// object will be freed once any remaining JITDylibSPs to it are destroyed.
  ///
  /// This method does *not* run static destructors.
  ///
  /// This method can only be called once for each JITDylib.
  Error removeJITDylib(JITDylib &JD);

  /// Set the error reporter function.
  ExecutionSession &setErrorReporter(ErrorReporter ReportError) {
    this->ReportError = std::move(ReportError);
    return *this;
  }

  /// Report a error for this execution session.
  ///
  /// Unhandled errors can be sent here to log them.
  void reportError(Error Err) { ReportError(std::move(Err)); }

  /// Set the task dispatch function.
  ExecutionSession &setDispatchTask(DispatchTaskFunction DispatchTask) {
    this->DispatchTask = std::move(DispatchTask);
    return *this;
  }

  /// Search the given JITDylibs to find the flags associated with each of the
  /// given symbols.
  void lookupFlags(LookupKind K, JITDylibSearchOrder SearchOrder,
                   SymbolLookupSet Symbols,
                   unique_function<void(Expected<SymbolFlagsMap>)> OnComplete);

  /// Blocking version of lookupFlags.
  Expected<SymbolFlagsMap> lookupFlags(LookupKind K,
                                       JITDylibSearchOrder SearchOrder,
                                       SymbolLookupSet Symbols);

  /// Search the given JITDylibs for the given symbols.
  ///
  /// SearchOrder lists the JITDylibs to search. For each dylib, the associated
  /// boolean indicates whether the search should match against non-exported
  /// (hidden visibility) symbols in that dylib (true means match against
  /// non-exported symbols, false means do not match).
  ///
  /// The NotifyComplete callback will be called once all requested symbols
  /// reach the required state.
  ///
  /// If all symbols are found, the RegisterDependencies function will be called
  /// while the session lock is held. This gives clients a chance to register
  /// dependencies for on the queried symbols for any symbols they are
  /// materializing (if a MaterializationResponsibility instance is present,
  /// this can be implemented by calling
  /// MaterializationResponsibility::addDependencies). If there are no
  /// dependenant symbols for this query (e.g. it is being made by a top level
  /// client to get an address to call) then the value NoDependenciesToRegister
  /// can be used.
  void lookup(LookupKind K, const JITDylibSearchOrder &SearchOrder,
              SymbolLookupSet Symbols, SymbolState RequiredState,
              SymbolsResolvedCallback NotifyComplete,
              RegisterDependenciesFunction RegisterDependencies);

  /// Blocking version of lookup above. Returns the resolved symbol map.
  /// If WaitUntilReady is true (the default), will not return until all
  /// requested symbols are ready (or an error occurs). If WaitUntilReady is
  /// false, will return as soon as all requested symbols are resolved,
  /// or an error occurs. If WaitUntilReady is false and an error occurs
  /// after resolution, the function will return a success value, but the
  /// error will be reported via reportErrors.
  Expected<SymbolMap> lookup(const JITDylibSearchOrder &SearchOrder,
                             SymbolLookupSet Symbols,
                             LookupKind K = LookupKind::Static,
                             SymbolState RequiredState = SymbolState::Ready,
                             RegisterDependenciesFunction RegisterDependencies =
                                 NoDependenciesToRegister);

  /// Convenience version of blocking lookup.
  /// Searches each of the JITDylibs in the search order in turn for the given
  /// symbol.
  Expected<ExecutorSymbolDef>
  lookup(const JITDylibSearchOrder &SearchOrder, SymbolStringPtr Symbol,
         SymbolState RequiredState = SymbolState::Ready);

  /// Convenience version of blocking lookup.
  /// Searches each of the JITDylibs in the search order in turn for the given
  /// symbol. The search will not find non-exported symbols.
  Expected<ExecutorSymbolDef>
  lookup(ArrayRef<JITDylib *> SearchOrder, SymbolStringPtr Symbol,
         SymbolState RequiredState = SymbolState::Ready);

  /// Convenience version of blocking lookup.
  /// Searches each of the JITDylibs in the search order in turn for the given
  /// symbol. The search will not find non-exported symbols.
  Expected<ExecutorSymbolDef>
  lookup(ArrayRef<JITDylib *> SearchOrder, StringRef Symbol,
         SymbolState RequiredState = SymbolState::Ready);

  /// Materialize the given unit.
  void dispatchTask(std::unique_ptr<Task> T) {
    assert(T && "T must be non-null");
    DEBUG_WITH_TYPE("orc", dumpDispatchInfo(*T));
    DispatchTask(std::move(T));
  }

  /// Run a wrapper function in the executor.
  ///
  /// The wrapper function should be callable as:
  ///
  /// \code{.cpp}
  ///   CWrapperFunctionResult fn(uint8_t *Data, uint64_t Size);
  /// \endcode{.cpp}
  ///
  /// The given OnComplete function will be called to return the result.
  template <typename... ArgTs>
  void callWrapperAsync(ArgTs &&... Args) {
    EPC->callWrapperAsync(std::forward<ArgTs>(Args)...);
  }

  /// Run a wrapper function in the executor. The wrapper function should be
  /// callable as:
  ///
  /// \code{.cpp}
  ///   CWrapperFunctionResult fn(uint8_t *Data, uint64_t Size);
  /// \endcode{.cpp}
  shared::WrapperFunctionResult callWrapper(ExecutorAddr WrapperFnAddr,
                                            ArrayRef<char> ArgBuffer) {
    return EPC->callWrapper(WrapperFnAddr, ArgBuffer);
  }

  /// Run a wrapper function using SPS to serialize the arguments and
  /// deserialize the results.
  template <typename SPSSignature, typename SendResultT, typename... ArgTs>
  void callSPSWrapperAsync(ExecutorAddr WrapperFnAddr, SendResultT &&SendResult,
                           const ArgTs &...Args) {
    EPC->callSPSWrapperAsync<SPSSignature, SendResultT, ArgTs...>(
        WrapperFnAddr, std::forward<SendResultT>(SendResult), Args...);
  }

  /// Run a wrapper function using SPS to serialize the arguments and
  /// deserialize the results.
  ///
  /// If SPSSignature is a non-void function signature then the second argument
  /// (the first in the Args list) should be a reference to a return value.
  template <typename SPSSignature, typename... WrapperCallArgTs>
  Error callSPSWrapper(ExecutorAddr WrapperFnAddr,
                       WrapperCallArgTs &&...WrapperCallArgs) {
    return EPC->callSPSWrapper<SPSSignature, WrapperCallArgTs...>(
        WrapperFnAddr, std::forward<WrapperCallArgTs>(WrapperCallArgs)...);
  }

  /// Wrap a handler that takes concrete argument types (and a sender for a
  /// concrete return type) to produce an AsyncHandlerWrapperFunction. Uses SPS
  /// to unpack the arguments and pack the result.
  ///
  /// This function is intended to support easy construction of
  /// AsyncHandlerWrapperFunctions that can be associated with a tag
  /// (using registerJITDispatchHandler) and called from the executor.
  template <typename SPSSignature, typename HandlerT>
  static JITDispatchHandlerFunction wrapAsyncWithSPS(HandlerT &&H) {
    return [H = std::forward<HandlerT>(H)](
               SendResultFunction SendResult,
               const char *ArgData, size_t ArgSize) mutable {
      shared::WrapperFunction<SPSSignature>::handleAsync(ArgData, ArgSize, H,
                                                         std::move(SendResult));
    };
  }

  /// Wrap a class method that takes concrete argument types (and a sender for
  /// a concrete return type) to produce an AsyncHandlerWrapperFunction. Uses
  /// SPS to unpack teh arguments and pack the result.
  ///
  /// This function is intended to support easy construction of
  /// AsyncHandlerWrapperFunctions that can be associated with a tag
  /// (using registerJITDispatchHandler) and called from the executor.
  template <typename SPSSignature, typename ClassT, typename... MethodArgTs>
  static JITDispatchHandlerFunction
  wrapAsyncWithSPS(ClassT *Instance, void (ClassT::*Method)(MethodArgTs...)) {
    return wrapAsyncWithSPS<SPSSignature>(
        [Instance, Method](MethodArgTs &&...MethodArgs) {
          (Instance->*Method)(std::forward<MethodArgTs>(MethodArgs)...);
        });
  }

  /// For each tag symbol name, associate the corresponding
  /// AsyncHandlerWrapperFunction with the address of that symbol. The
  /// handler becomes callable from the executor using the ORC runtime
  /// __orc_rt_jit_dispatch function and the given tag.
  ///
  /// Tag symbols will be looked up in JD using LookupKind::Static,
  /// JITDylibLookupFlags::MatchAllSymbols (hidden tags will be found), and
  /// LookupFlags::WeaklyReferencedSymbol. Missing tag definitions will not
  /// cause an error, the handler will simply be dropped.
  Error registerJITDispatchHandlers(JITDylib &JD,
                                    JITDispatchHandlerAssociationMap WFs);

  /// Run a registered jit-side wrapper function.
  /// This should be called by the ExecutorProcessControl instance in response
  /// to incoming jit-dispatch requests from the executor.
  void runJITDispatchHandler(SendResultFunction SendResult,
                             ExecutorAddr HandlerFnTagAddr,
                             ArrayRef<char> ArgBuffer);

  /// Dump the state of all the JITDylibs in this session.
  void dump(raw_ostream &OS);

private:
  static void logErrorsToStdErr(Error Err) {
    logAllUnhandledErrors(std::move(Err), errs(), "JIT session error: ");
  }

  static void runOnCurrentThread(std::unique_ptr<Task> T) { T->run(); }

  void dispatchOutstandingMUs();

  static std::unique_ptr<MaterializationResponsibility>
  createMaterializationResponsibility(ResourceTracker &RT,
                                      SymbolFlagsMap Symbols,
                                      SymbolStringPtr InitSymbol) {
    auto &JD = RT.getJITDylib();
    std::unique_ptr<MaterializationResponsibility> MR(
        new MaterializationResponsibility(&RT, std::move(Symbols),
                                          std::move(InitSymbol)));
    JD.TrackerMRs[&RT].insert(MR.get());
    return MR;
  }

  Error removeResourceTracker(ResourceTracker &RT);
  void transferResourceTracker(ResourceTracker &DstRT, ResourceTracker &SrcRT);
  void destroyResourceTracker(ResourceTracker &RT);

  // State machine functions for query application..

  /// IL_updateCandidatesFor is called to remove already-defined symbols that
  /// match a given query from the set of candidate symbols to generate
  /// definitions for (no need to generate a definition if one already exists).
  Error IL_updateCandidatesFor(JITDylib &JD, JITDylibLookupFlags JDLookupFlags,
                               SymbolLookupSet &Candidates,
                               SymbolLookupSet *NonCandidates);

  /// OL_applyQueryPhase1 is an optionally re-startable loop for triggering
  /// definition generation. It is called when a lookup is performed, and again
  /// each time that LookupState::continueLookup is called.
  void OL_applyQueryPhase1(std::unique_ptr<InProgressLookupState> IPLS,
                           Error Err);

  /// OL_completeLookup is run once phase 1 successfully completes for a lookup
  /// call. It attempts to attach the symbol to all symbol table entries and
  /// collect all MaterializationUnits to dispatch. If this method fails then
  /// all MaterializationUnits will be left un-materialized.
  void OL_completeLookup(std::unique_ptr<InProgressLookupState> IPLS,
                         std::shared_ptr<AsynchronousSymbolQuery> Q,
                         RegisterDependenciesFunction RegisterDependencies);

  /// OL_completeLookupFlags is run once phase 1 successfully completes for a
  /// lookupFlags call.
  void OL_completeLookupFlags(
      std::unique_ptr<InProgressLookupState> IPLS,
      unique_function<void(Expected<SymbolFlagsMap>)> OnComplete);

  // State machine functions for MaterializationResponsibility.
  void OL_destroyMaterializationResponsibility(
      MaterializationResponsibility &MR);
  SymbolNameSet OL_getRequestedSymbols(const MaterializationResponsibility &MR);
  Error OL_notifyResolved(MaterializationResponsibility &MR,
                          const SymbolMap &Symbols);
  Error OL_notifyEmitted(MaterializationResponsibility &MR);
  Error OL_defineMaterializing(MaterializationResponsibility &MR,
                               SymbolFlagsMap SymbolFlags);
  void OL_notifyFailed(MaterializationResponsibility &MR);
  Error OL_replace(MaterializationResponsibility &MR,
                   std::unique_ptr<MaterializationUnit> MU);
  Expected<std::unique_ptr<MaterializationResponsibility>>
  OL_delegate(MaterializationResponsibility &MR, const SymbolNameSet &Symbols);
  void OL_addDependencies(MaterializationResponsibility &MR,
                          const SymbolStringPtr &Name,
                          const SymbolDependenceMap &Dependencies);
  void OL_addDependenciesForAll(MaterializationResponsibility &MR,
                                const SymbolDependenceMap &Dependencies);

#ifndef NDEBUG
  void dumpDispatchInfo(Task &T);
#endif // NDEBUG

  mutable std::recursive_mutex SessionMutex;
  bool SessionOpen = true;
  std::unique_ptr<ExecutorProcessControl> EPC;
  std::unique_ptr<Platform> P;
  ErrorReporter ReportError = logErrorsToStdErr;
  DispatchTaskFunction DispatchTask = runOnCurrentThread;

  std::vector<ResourceManager *> ResourceManagers;

  std::vector<JITDylibSP> JDs;

  // FIXME: Remove this (and runOutstandingMUs) once the linking layer works
  //        with callbacks from asynchronous queries.
  mutable std::recursive_mutex OutstandingMUsMutex;
  std::vector<std::pair<std::unique_ptr<MaterializationUnit>,
                        std::unique_ptr<MaterializationResponsibility>>>
      OutstandingMUs;

  mutable std::mutex JITDispatchHandlersMutex;
  DenseMap<ExecutorAddr, std::shared_ptr<JITDispatchHandlerFunction>>
      JITDispatchHandlers;
};

template <typename Func> Error ResourceTracker::withResourceKeyDo(Func &&F) {
  return getJITDylib().getExecutionSession().runSessionLocked([&]() -> Error {
    if (isDefunct())
      return make_error<ResourceTrackerDefunct>(this);
    F(getKeyUnsafe());
    return Error::success();
  });
}

inline ExecutionSession &
MaterializationResponsibility::getExecutionSession() const {
  return JD.getExecutionSession();
}

template <typename GeneratorT>
GeneratorT &JITDylib::addGenerator(std::unique_ptr<GeneratorT> DefGenerator) {
  auto &G = *DefGenerator;
  ES.runSessionLocked([&] {
    assert(State == Open && "Cannot add generator to closed JITDylib");
    DefGenerators.push_back(std::move(DefGenerator));
  });
  return G;
}

template <typename Func>
auto JITDylib::withLinkOrderDo(Func &&F)
    -> decltype(F(std::declval<const JITDylibSearchOrder &>())) {
  assert(State == Open && "Cannot use link order of closed JITDylib");
  return ES.runSessionLocked([&]() { return F(LinkOrder); });
}

template <typename MaterializationUnitType>
Error JITDylib::define(std::unique_ptr<MaterializationUnitType> &&MU,
                       ResourceTrackerSP RT) {
  assert(MU && "Can not define with a null MU");

  if (MU->getSymbols().empty()) {
    // Empty MUs are allowable but pathological, so issue a warning.
    DEBUG_WITH_TYPE("orc", {
      dbgs() << "Warning: Discarding empty MU " << MU->getName() << " for "
             << getName() << "\n";
    });
    return Error::success();
  } else
    DEBUG_WITH_TYPE("orc", {
      dbgs() << "Defining MU " << MU->getName() << " for " << getName()
             << " (tracker: ";
      if (RT == getDefaultResourceTracker())
        dbgs() << "default)";
      else if (RT)
        dbgs() << RT.get() << ")\n";
      else
        dbgs() << "0x0, default will be used)\n";
    });

  return ES.runSessionLocked([&, this]() -> Error {
    assert(State == Open && "JD is defunct");

    if (auto Err = defineImpl(*MU))
      return Err;

    if (!RT)
      RT = getDefaultResourceTracker();

    if (auto *P = ES.getPlatform()) {
      if (auto Err = P->notifyAdding(*RT, *MU))
        return Err;
    }

    installMaterializationUnit(std::move(MU), *RT);
    return Error::success();
  });
}

template <typename MaterializationUnitType>
Error JITDylib::define(std::unique_ptr<MaterializationUnitType> &MU,
                       ResourceTrackerSP RT) {
  assert(MU && "Can not define with a null MU");

  if (MU->getSymbols().empty()) {
    // Empty MUs are allowable but pathological, so issue a warning.
    DEBUG_WITH_TYPE("orc", {
      dbgs() << "Warning: Discarding empty MU " << MU->getName() << getName()
             << "\n";
    });
    return Error::success();
  } else
    DEBUG_WITH_TYPE("orc", {
      dbgs() << "Defining MU " << MU->getName() << " for " << getName()
             << " (tracker: ";
      if (RT == getDefaultResourceTracker())
        dbgs() << "default)";
      else if (RT)
        dbgs() << RT.get() << ")\n";
      else
        dbgs() << "0x0, default will be used)\n";
    });

  return ES.runSessionLocked([&, this]() -> Error {
    assert(State == Open && "JD is defunct");

    if (auto Err = defineImpl(*MU))
      return Err;

    if (!RT)
      RT = getDefaultResourceTracker();

    if (auto *P = ES.getPlatform()) {
      if (auto Err = P->notifyAdding(*RT, *MU))
        return Err;
    }

    installMaterializationUnit(std::move(MU), *RT);
    return Error::success();
  });
}

/// ReexportsGenerator can be used with JITDylib::addGenerator to automatically
/// re-export a subset of the source JITDylib's symbols in the target.
class ReexportsGenerator : public DefinitionGenerator {
public:
  using SymbolPredicate = std::function<bool(SymbolStringPtr)>;

  /// Create a reexports generator. If an Allow predicate is passed, only
  /// symbols for which the predicate returns true will be reexported. If no
  /// Allow predicate is passed, all symbols will be exported.
  ReexportsGenerator(JITDylib &SourceJD,
                     JITDylibLookupFlags SourceJDLookupFlags,
                     SymbolPredicate Allow = SymbolPredicate());

  Error tryToGenerate(LookupState &LS, LookupKind K, JITDylib &JD,
                      JITDylibLookupFlags JDLookupFlags,
                      const SymbolLookupSet &LookupSet) override;

private:
  JITDylib &SourceJD;
  JITDylibLookupFlags SourceJDLookupFlags;
  SymbolPredicate Allow;
};

// --------------- IMPLEMENTATION --------------
// Implementations for inline functions/methods.
// ---------------------------------------------

inline MaterializationResponsibility::~MaterializationResponsibility() {
  getExecutionSession().OL_destroyMaterializationResponsibility(*this);
}

inline SymbolNameSet MaterializationResponsibility::getRequestedSymbols() const {
  return getExecutionSession().OL_getRequestedSymbols(*this);
}

inline Error MaterializationResponsibility::notifyResolved(
    const SymbolMap &Symbols) {
  return getExecutionSession().OL_notifyResolved(*this, Symbols);
}

inline Error MaterializationResponsibility::notifyEmitted() {
  return getExecutionSession().OL_notifyEmitted(*this);
}

inline Error MaterializationResponsibility::defineMaterializing(
    SymbolFlagsMap SymbolFlags) {
  return getExecutionSession().OL_defineMaterializing(*this,
                                                      std::move(SymbolFlags));
}

inline void MaterializationResponsibility::failMaterialization() {
  getExecutionSession().OL_notifyFailed(*this);
}

inline Error MaterializationResponsibility::replace(
    std::unique_ptr<MaterializationUnit> MU) {
  return getExecutionSession().OL_replace(*this, std::move(MU));
}

inline Expected<std::unique_ptr<MaterializationResponsibility>>
MaterializationResponsibility::delegate(const SymbolNameSet &Symbols) {
  return getExecutionSession().OL_delegate(*this, Symbols);
}

inline void MaterializationResponsibility::addDependencies(
    const SymbolStringPtr &Name, const SymbolDependenceMap &Dependencies) {
  getExecutionSession().OL_addDependencies(*this, Name, Dependencies);
}

inline void MaterializationResponsibility::addDependenciesForAll(
    const SymbolDependenceMap &Dependencies) {
  getExecutionSession().OL_addDependenciesForAll(*this, Dependencies);
}

} // End namespace orc
} // End namespace llvm

#endif // LLVM_EXECUTIONENGINE_ORC_CORE_H
PKiwFZ�7R��	�	+ExecutionEngine/Orc/DebuggerSupportPlugin.hnu�[���//===--- DebugerSupportPlugin.h -- Utils for debugger support ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Generates debug objects and registers them using the jit-loader-gdb protocol.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_ORC_DEBUGGERSUPPORTPLUGIN_H
#define LLVM_EXECUTIONENGINE_ORC_DEBUGGERSUPPORTPLUGIN_H

#include "llvm/ExecutionEngine/Orc/Core.h"
#include "llvm/ExecutionEngine/Orc/EPCDebugObjectRegistrar.h"
#include "llvm/ExecutionEngine/Orc/ObjectLinkingLayer.h"

namespace llvm {
namespace orc {

/// For each object containing debug info, installs JITLink passes to synthesize
/// a debug object and then register it via the GDB JIT-registration interface.
///
/// Currently MachO only. For ELF use DebugObjectManagerPlugin. These two
/// plugins will be merged in the near future.
class GDBJITDebugInfoRegistrationPlugin : public ObjectLinkingLayer::Plugin {
public:
  class DebugSectionSynthesizer {
  public:
    virtual ~DebugSectionSynthesizer() = default;
    virtual Error startSynthesis() = 0;
    virtual Error completeSynthesisAndRegister() = 0;
  };

  static Expected<std::unique_ptr<GDBJITDebugInfoRegistrationPlugin>>
  Create(ExecutionSession &ES, JITDylib &ProcessJD, const Triple &TT);

  GDBJITDebugInfoRegistrationPlugin(ExecutorAddr RegisterActionAddr)
      : RegisterActionAddr(RegisterActionAddr) {}

  Error notifyFailed(MaterializationResponsibility &MR) override;
  Error notifyRemovingResources(JITDylib &JD, ResourceKey K) override;

  void notifyTransferringResources(JITDylib &JD, ResourceKey DstKey,
                                   ResourceKey SrcKey) override;

  void modifyPassConfig(MaterializationResponsibility &MR,
                        jitlink::LinkGraph &LG,
                        jitlink::PassConfiguration &PassConfig) override;

private:
  void modifyPassConfigForMachO(MaterializationResponsibility &MR,
                                jitlink::LinkGraph &LG,
                                jitlink::PassConfiguration &PassConfig);

  ExecutorAddr RegisterActionAddr;
};

} // namespace orc
} // namespace llvm

#endif // LLVM_EXECUTIONENGINE_ORC_DEBUGGERSUPPORTPLUGIN_H
PKiwFZ�4��$ExecutionEngine/Orc/IRCompileLayer.hnu�[���//===- IRCompileLayer.h -- Eagerly compile IR for JIT -----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Contains the definition for a basic, eagerly compiling layer of the JIT.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_ORC_IRCOMPILELAYER_H
#define LLVM_EXECUTIONENGINE_ORC_IRCOMPILELAYER_H

#include "llvm/ADT/STLExtras.h"
#include "llvm/ExecutionEngine/JITSymbol.h"
#include "llvm/ExecutionEngine/Orc/Layer.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/MemoryBuffer.h"
#include <functional>
#include <memory>
#include <mutex>

namespace llvm {

class Module;

namespace orc {

class IRCompileLayer : public IRLayer {
public:
  class IRCompiler {
  public:
    IRCompiler(IRSymbolMapper::ManglingOptions MO) : MO(std::move(MO)) {}
    virtual ~IRCompiler();
    const IRSymbolMapper::ManglingOptions &getManglingOptions() const {
      return MO;
    }
    virtual Expected<std::unique_ptr<MemoryBuffer>> operator()(Module &M) = 0;

  protected:
    IRSymbolMapper::ManglingOptions &manglingOptions() { return MO; }

  private:
    IRSymbolMapper::ManglingOptions MO;
  };

  using NotifyCompiledFunction = std::function<void(
      MaterializationResponsibility &R, ThreadSafeModule TSM)>;

  IRCompileLayer(ExecutionSession &ES, ObjectLayer &BaseLayer,
                 std::unique_ptr<IRCompiler> Compile);

  IRCompiler &getCompiler() { return *Compile; }

  void setNotifyCompiled(NotifyCompiledFunction NotifyCompiled);

  void emit(std::unique_ptr<MaterializationResponsibility> R,
            ThreadSafeModule TSM) override;

private:
  mutable std::mutex IRLayerMutex;
  ObjectLayer &BaseLayer;
  std::unique_ptr<IRCompiler> Compile;
  const IRSymbolMapper::ManglingOptions *ManglingOpts;
  NotifyCompiledFunction NotifyCompiled = NotifyCompiledFunction();
};

} // end namespace orc
} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_ORC_IRCOMPILELAYER_H
PKiwFZ��J�/2/2$ExecutionEngine/Orc/ELFNixPlatform.hnu�[���//===-- ELFNixPlatform.h -- Utilities for executing ELF in Orc --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Linux/BSD support for executing JIT'd ELF in Orc.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_ORC_ELFNIXPLATFORM_H
#define LLVM_EXECUTIONENGINE_ORC_ELFNIXPLATFORM_H

#include "llvm/ADT/StringRef.h"
#include "llvm/ExecutionEngine/Orc/Core.h"
#include "llvm/ExecutionEngine/Orc/ExecutorProcessControl.h"
#include "llvm/ExecutionEngine/Orc/ObjectLinkingLayer.h"
#include "llvm/ExecutionEngine/Orc/Shared/ExecutorAddress.h"

#include <future>
#include <thread>
#include <vector>

namespace llvm {
namespace orc {

struct ELFPerObjectSectionsToRegister {
  ExecutorAddrRange EHFrameSection;
  ExecutorAddrRange ThreadDataSection;
};

struct ELFNixJITDylibInitializers {
  using SectionList = std::vector<ExecutorAddrRange>;

  ELFNixJITDylibInitializers(std::string Name, ExecutorAddr DSOHandleAddress)
      : Name(std::move(Name)), DSOHandleAddress(std::move(DSOHandleAddress)) {}

  std::string Name;
  ExecutorAddr DSOHandleAddress;

  StringMap<SectionList> InitSections;
};

class ELFNixJITDylibDeinitializers {};

using ELFNixJITDylibInitializerSequence =
    std::vector<ELFNixJITDylibInitializers>;

using ELFNixJITDylibDeinitializerSequence =
    std::vector<ELFNixJITDylibDeinitializers>;

/// Mediates between ELFNix initialization and ExecutionSession state.
class ELFNixPlatform : public Platform {
public:
  /// Try to create a ELFNixPlatform instance, adding the ORC runtime to the
  /// given JITDylib.
  ///
  /// The ORC runtime requires access to a number of symbols in
  /// libc++. It is up to the caller to ensure that the requried
  /// symbols can be referenced by code added to PlatformJD. The
  /// standard way to achieve this is to first attach dynamic library
  /// search generators for either the given process, or for the
  /// specific required libraries, to PlatformJD, then to create the
  /// platform instance:
  ///
  /// \code{.cpp}
  ///   auto &PlatformJD = ES.createBareJITDylib("stdlib");
  ///   PlatformJD.addGenerator(
  ///     ExitOnErr(EPCDynamicLibrarySearchGenerator
  ///                 ::GetForTargetProcess(EPC)));
  ///   ES.setPlatform(
  ///     ExitOnErr(ELFNixPlatform::Create(ES, ObjLayer, EPC, PlatformJD,
  ///                                     "/path/to/orc/runtime")));
  /// \endcode
  ///
  /// Alternatively, these symbols could be added to another JITDylib that
  /// PlatformJD links against.
  ///
  /// Clients are also responsible for ensuring that any JIT'd code that
  /// depends on runtime functions (including any code using TLV or static
  /// destructors) can reference the runtime symbols. This is usually achieved
  /// by linking any JITDylibs containing regular code against
  /// PlatformJD.
  ///
  /// By default, ELFNixPlatform will add the set of aliases returned by the
  /// standardPlatformAliases function. This includes both required aliases
  /// (e.g. __cxa_atexit -> __orc_rt_elf_cxa_atexit for static destructor
  /// support), and optional aliases that provide JIT versions of common
  /// functions (e.g. dlopen -> __orc_rt_elf_jit_dlopen). Clients can
  /// override these defaults by passing a non-None value for the
  /// RuntimeAliases function, in which case the client is responsible for
  /// setting up all aliases (including the required ones).
  static Expected<std::unique_ptr<ELFNixPlatform>>
  Create(ExecutionSession &ES, ObjectLinkingLayer &ObjLinkingLayer,
         JITDylib &PlatformJD, std::unique_ptr<DefinitionGenerator> OrcRuntime,
         std::optional<SymbolAliasMap> RuntimeAliases = std::nullopt);

  /// Construct using a path to the ORC runtime.
  static Expected<std::unique_ptr<ELFNixPlatform>>
  Create(ExecutionSession &ES, ObjectLinkingLayer &ObjLinkingLayer,
         JITDylib &PlatformJD, const char *OrcRuntimePath,
         std::optional<SymbolAliasMap> RuntimeAliases = std::nullopt);

  ExecutionSession &getExecutionSession() const { return ES; }
  ObjectLinkingLayer &getObjectLinkingLayer() const { return ObjLinkingLayer; }

  Error setupJITDylib(JITDylib &JD) override;
  Error teardownJITDylib(JITDylib &JD) override;
  Error notifyAdding(ResourceTracker &RT,
                     const MaterializationUnit &MU) override;
  Error notifyRemoving(ResourceTracker &RT) override;

  /// Returns an AliasMap containing the default aliases for the ELFNixPlatform.
  /// This can be modified by clients when constructing the platform to add
  /// or remove aliases.
  static Expected<SymbolAliasMap> standardPlatformAliases(ExecutionSession &ES,
                                                          JITDylib &PlatformJD);

  /// Returns the array of required CXX aliases.
  static ArrayRef<std::pair<const char *, const char *>> requiredCXXAliases();

  /// Returns the array of standard runtime utility aliases for ELF.
  static ArrayRef<std::pair<const char *, const char *>>
  standardRuntimeUtilityAliases();

private:
  // The ELFNixPlatformPlugin scans/modifies LinkGraphs to support ELF
  // platform features including initializers, exceptions, TLV, and language
  // runtime registration.
  class ELFNixPlatformPlugin : public ObjectLinkingLayer::Plugin {
  public:
    ELFNixPlatformPlugin(ELFNixPlatform &MP) : MP(MP) {}

    void modifyPassConfig(MaterializationResponsibility &MR,
                          jitlink::LinkGraph &G,
                          jitlink::PassConfiguration &Config) override;

    SyntheticSymbolDependenciesMap
    getSyntheticSymbolDependencies(MaterializationResponsibility &MR) override;

    // FIXME: We should be tentatively tracking scraped sections and discarding
    // if the MR fails.
    Error notifyFailed(MaterializationResponsibility &MR) override {
      return Error::success();
    }

    Error notifyRemovingResources(JITDylib &JD, ResourceKey K) override {
      return Error::success();
    }

    void notifyTransferringResources(JITDylib &JD, ResourceKey DstKey,
                                     ResourceKey SrcKey) override {}

  private:
    using InitSymbolDepMap =
        DenseMap<MaterializationResponsibility *, JITLinkSymbolSet>;

    void addInitializerSupportPasses(MaterializationResponsibility &MR,
                                     jitlink::PassConfiguration &Config);

    void addDSOHandleSupportPasses(MaterializationResponsibility &MR,
                                   jitlink::PassConfiguration &Config);

    void addEHAndTLVSupportPasses(MaterializationResponsibility &MR,
                                  jitlink::PassConfiguration &Config);

    Error preserveInitSections(jitlink::LinkGraph &G,
                               MaterializationResponsibility &MR);

    Error registerInitSections(jitlink::LinkGraph &G, JITDylib &JD);

    Error fixTLVSectionsAndEdges(jitlink::LinkGraph &G, JITDylib &JD);

    std::mutex PluginMutex;
    ELFNixPlatform &MP;
    InitSymbolDepMap InitSymbolDeps;
  };

  using SendInitializerSequenceFn =
      unique_function<void(Expected<ELFNixJITDylibInitializerSequence>)>;

  using SendDeinitializerSequenceFn =
      unique_function<void(Expected<ELFNixJITDylibDeinitializerSequence>)>;

  using SendSymbolAddressFn = unique_function<void(Expected<ExecutorAddr>)>;

  static bool supportedTarget(const Triple &TT);

  ELFNixPlatform(ExecutionSession &ES, ObjectLinkingLayer &ObjLinkingLayer,
                 JITDylib &PlatformJD,
                 std::unique_ptr<DefinitionGenerator> OrcRuntimeGenerator,
                 Error &Err);

  // Associate ELFNixPlatform JIT-side runtime support functions with handlers.
  Error associateRuntimeSupportFunctions(JITDylib &PlatformJD);

  void getInitializersBuildSequencePhase(SendInitializerSequenceFn SendResult,
                                         JITDylib &JD,
                                         std::vector<JITDylibSP> DFSLinkOrder);

  void getInitializersLookupPhase(SendInitializerSequenceFn SendResult,
                                  JITDylib &JD);

  void rt_getInitializers(SendInitializerSequenceFn SendResult,
                          StringRef JDName);

  void rt_getDeinitializers(SendDeinitializerSequenceFn SendResult,
                            ExecutorAddr Handle);

  void rt_lookupSymbol(SendSymbolAddressFn SendResult, ExecutorAddr Handle,
                       StringRef SymbolName);

  // Records the addresses of runtime symbols used by the platform.
  Error bootstrapELFNixRuntime(JITDylib &PlatformJD);

  Error registerInitInfo(JITDylib &JD,
                         ArrayRef<jitlink::Section *> InitSections);

  Error registerPerObjectSections(const ELFPerObjectSectionsToRegister &POSR);

  Expected<uint64_t> createPThreadKey();

  ExecutionSession &ES;
  ObjectLinkingLayer &ObjLinkingLayer;

  SymbolStringPtr DSOHandleSymbol;
  std::atomic<bool> RuntimeBootstrapped{false};

  ExecutorAddr orc_rt_elfnix_platform_bootstrap;
  ExecutorAddr orc_rt_elfnix_platform_shutdown;
  ExecutorAddr orc_rt_elfnix_register_object_sections;
  ExecutorAddr orc_rt_elfnix_create_pthread_key;

  DenseMap<JITDylib *, SymbolLookupSet> RegisteredInitSymbols;

  // InitSeqs gets its own mutex to avoid locking the whole session when
  // aggregating data from the jitlink.
  std::mutex PlatformMutex;
  DenseMap<JITDylib *, ELFNixJITDylibInitializers> InitSeqs;
  std::vector<ELFPerObjectSectionsToRegister> BootstrapPOSRs;

  DenseMap<ExecutorAddr, JITDylib *> HandleAddrToJITDylib;
  DenseMap<JITDylib *, uint64_t> JITDylibToPThreadKey;
};

namespace shared {

using SPSELFPerObjectSectionsToRegister =
    SPSTuple<SPSExecutorAddrRange, SPSExecutorAddrRange>;

template <>
class SPSSerializationTraits<SPSELFPerObjectSectionsToRegister,
                             ELFPerObjectSectionsToRegister> {

public:
  static size_t size(const ELFPerObjectSectionsToRegister &MOPOSR) {
    return SPSELFPerObjectSectionsToRegister::AsArgList::size(
        MOPOSR.EHFrameSection, MOPOSR.ThreadDataSection);
  }

  static bool serialize(SPSOutputBuffer &OB,
                        const ELFPerObjectSectionsToRegister &MOPOSR) {
    return SPSELFPerObjectSectionsToRegister::AsArgList::serialize(
        OB, MOPOSR.EHFrameSection, MOPOSR.ThreadDataSection);
  }

  static bool deserialize(SPSInputBuffer &IB,
                          ELFPerObjectSectionsToRegister &MOPOSR) {
    return SPSELFPerObjectSectionsToRegister::AsArgList::deserialize(
        IB, MOPOSR.EHFrameSection, MOPOSR.ThreadDataSection);
  }
};

using SPSNamedExecutorAddrRangeSequenceMap =
    SPSSequence<SPSTuple<SPSString, SPSExecutorAddrRangeSequence>>;

using SPSELFNixJITDylibInitializers =
    SPSTuple<SPSString, SPSExecutorAddr, SPSNamedExecutorAddrRangeSequenceMap>;

using SPSELFNixJITDylibInitializerSequence =
    SPSSequence<SPSELFNixJITDylibInitializers>;

/// Serialization traits for ELFNixJITDylibInitializers.
template <>
class SPSSerializationTraits<SPSELFNixJITDylibInitializers,
                             ELFNixJITDylibInitializers> {
public:
  static size_t size(const ELFNixJITDylibInitializers &MOJDIs) {
    return SPSELFNixJITDylibInitializers::AsArgList::size(
        MOJDIs.Name, MOJDIs.DSOHandleAddress, MOJDIs.InitSections);
  }

  static bool serialize(SPSOutputBuffer &OB,
                        const ELFNixJITDylibInitializers &MOJDIs) {
    return SPSELFNixJITDylibInitializers::AsArgList::serialize(
        OB, MOJDIs.Name, MOJDIs.DSOHandleAddress, MOJDIs.InitSections);
  }

  static bool deserialize(SPSInputBuffer &IB,
                          ELFNixJITDylibInitializers &MOJDIs) {
    return SPSELFNixJITDylibInitializers::AsArgList::deserialize(
        IB, MOJDIs.Name, MOJDIs.DSOHandleAddress, MOJDIs.InitSections);
  }
};

using SPSELFJITDylibDeinitializers = SPSEmpty;

using SPSELFJITDylibDeinitializerSequence =
    SPSSequence<SPSELFJITDylibDeinitializers>;

template <>
class SPSSerializationTraits<SPSELFJITDylibDeinitializers,
                             ELFNixJITDylibDeinitializers> {
public:
  static size_t size(const ELFNixJITDylibDeinitializers &MOJDDs) { return 0; }

  static bool serialize(SPSOutputBuffer &OB,
                        const ELFNixJITDylibDeinitializers &MOJDDs) {
    return true;
  }

  static bool deserialize(SPSInputBuffer &IB,
                          ELFNixJITDylibDeinitializers &MOJDDs) {
    MOJDDs = ELFNixJITDylibDeinitializers();
    return true;
  }
};

} // end namespace shared
} // end namespace orc
} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_ORC_ELFNIXPLATFORM_H
PKiwFZ6K�**ExecutionEngine/Orc/Mangling.hnu�[���//===------ Mangling.h -- Name Mangling Utilities for ORC -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Name mangling utilities for ORC.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_ORC_MANGLING_H
#define LLVM_EXECUTIONENGINE_ORC_MANGLING_H

#include "llvm/ExecutionEngine/Orc/Core.h"
#include "llvm/ExecutionEngine/Orc/ThreadSafeModule.h"
#include "llvm/IR/Module.h"
#include "llvm/Support/MemoryBuffer.h"

namespace llvm {
namespace orc {

/// Mangles symbol names then uniques them in the context of an
/// ExecutionSession.
class MangleAndInterner {
public:
  MangleAndInterner(ExecutionSession &ES, const DataLayout &DL);
  SymbolStringPtr operator()(StringRef Name);

private:
  ExecutionSession &ES;
  const DataLayout &DL;
};

/// Maps IR global values to their linker symbol names / flags.
///
/// This utility can be used when adding new IR globals in the JIT.
class IRSymbolMapper {
public:
  struct ManglingOptions {
    bool EmulatedTLS = false;
  };

  using SymbolNameToDefinitionMap = std::map<SymbolStringPtr, GlobalValue *>;

  /// Add mangled symbols for the given GlobalValues to SymbolFlags.
  /// If a SymbolToDefinitionMap pointer is supplied then it will be populated
  /// with Name-to-GlobalValue* mappings. Note that this mapping is not
  /// necessarily one-to-one: thread-local GlobalValues, for example, may
  /// produce more than one symbol, in which case the map will contain duplicate
  /// values.
  static void add(ExecutionSession &ES, const ManglingOptions &MO,
                  ArrayRef<GlobalValue *> GVs, SymbolFlagsMap &SymbolFlags,
                  SymbolNameToDefinitionMap *SymbolToDefinition = nullptr);
};

} // End namespace orc
} // End namespace llvm

#endif // LLVM_EXECUTIONENGINE_ORC_MANGLING_H
PKiwFZ-���.ExecutionEngine/Orc/DebugObjectManagerPlugin.hnu�[���//===---- DebugObjectManagerPlugin.h - JITLink debug objects ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// ObjectLinkingLayer plugin for emitting debug objects.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_ORC_DEBUGOBJECTMANAGERPLUGIN_H
#define LLVM_EXECUTIONENGINE_ORC_DEBUGOBJECTMANAGERPLUGIN_H

#include "llvm/ExecutionEngine/JITLink/JITLink.h"
#include "llvm/ExecutionEngine/Orc/Core.h"
#include "llvm/ExecutionEngine/Orc/EPCDebugObjectRegistrar.h"
#include "llvm/ExecutionEngine/Orc/ObjectLinkingLayer.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/Memory.h"
#include "llvm/Support/MemoryBufferRef.h"
#include "llvm/TargetParser/Triple.h"

#include <functional>
#include <map>
#include <memory>
#include <mutex>

namespace llvm {
namespace orc {

class DebugObject;

/// Creates and manages DebugObjects for JITLink artifacts.
///
/// DebugObjects are created when linking for a MaterializationResponsibility
/// starts. They are pending as long as materialization is in progress.
///
/// There can only be one pending DebugObject per MaterializationResponsibility.
/// If materialization fails, pending DebugObjects are discarded.
///
/// Once executable code for the MaterializationResponsibility is emitted, the
/// corresponding DebugObject is finalized to target memory and the provided
/// DebugObjectRegistrar is notified. Ownership of DebugObjects remains with the
/// plugin.
///
class DebugObjectManagerPlugin : public ObjectLinkingLayer::Plugin {
public:
  // DEPRECATED - Please specify options explicitly
  DebugObjectManagerPlugin(ExecutionSession &ES,
                           std::unique_ptr<DebugObjectRegistrar> Target);

  /// Create the plugin to submit DebugObjects for JITLink artifacts. For all
  /// options the recommended setting is true.
  ///
  /// RequireDebugSections:
  ///   Submit debug objects to the executor only if they contain actual debug
  ///   info. Turning this off may allow minimal debugging based on raw symbol
  ///   names. Note that this may cause significant memory and transport
  ///   overhead for objects built with a release configuration.
  ///
  /// AutoRegisterCode:
  ///   Notify the debugger for each new debug object. This is a good default
  ///   mode, but it may cause significant overhead when adding many modules in
  ///   sequence. When turning this off, the user has to issue the call to
  ///   __jit_debug_register_code() on the executor side manually.
  ///
  DebugObjectManagerPlugin(ExecutionSession &ES,
                           std::unique_ptr<DebugObjectRegistrar> Target,
                           bool RequireDebugSections, bool AutoRegisterCode);
  ~DebugObjectManagerPlugin();

  void notifyMaterializing(MaterializationResponsibility &MR,
                           jitlink::LinkGraph &G, jitlink::JITLinkContext &Ctx,
                           MemoryBufferRef InputObject) override;

  Error notifyEmitted(MaterializationResponsibility &MR) override;
  Error notifyFailed(MaterializationResponsibility &MR) override;
  Error notifyRemovingResources(JITDylib &JD, ResourceKey K) override;

  void notifyTransferringResources(JITDylib &JD, ResourceKey DstKey,
                                   ResourceKey SrcKey) override;

  void modifyPassConfig(MaterializationResponsibility &MR,
                        jitlink::LinkGraph &LG,
                        jitlink::PassConfiguration &PassConfig) override;

private:
  ExecutionSession &ES;

  using OwnedDebugObject = std::unique_ptr<DebugObject>;
  std::map<MaterializationResponsibility *, OwnedDebugObject> PendingObjs;
  std::map<ResourceKey, std::vector<OwnedDebugObject>> RegisteredObjs;

  std::mutex PendingObjsLock;
  std::mutex RegisteredObjsLock;

  std::unique_ptr<DebugObjectRegistrar> Target;
  bool RequireDebugSections;
  bool AutoRegisterCode;
};

} // namespace orc
} // namespace llvm

#endif // LLVM_EXECUTIONENGINE_ORC_DEBUGOBJECTMANAGERPLUGIN_H
PKiwFZ��J"��"ExecutionEngine/Orc/MemoryMapper.hnu�[���//===- MemoryMapper.h - Cross-process memory mapper -------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Cross-process (and in-process) memory mapping and transfer
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_ORC_MEMORYMAPPER_H
#define LLVM_EXECUTIONENGINE_ORC_MEMORYMAPPER_H

#include "llvm/ExecutionEngine/Orc/Core.h"
#include "llvm/ExecutionEngine/Orc/Shared/MemoryFlags.h"
#include "llvm/Support/Process.h"

#include <mutex>

namespace llvm {
namespace orc {

/// Manages mapping, content transfer and protections for JIT memory
class MemoryMapper {
public:
  /// Represents a single allocation containing multiple segments and
  /// initialization and deinitialization actions
  struct AllocInfo {
    struct SegInfo {
      ExecutorAddrDiff Offset;
      const char *WorkingMem;
      size_t ContentSize;
      size_t ZeroFillSize;
      AllocGroup AG;
    };

    ExecutorAddr MappingBase;
    std::vector<SegInfo> Segments;
    shared::AllocActions Actions;
  };

  using OnReservedFunction = unique_function<void(Expected<ExecutorAddrRange>)>;

  // Page size of the target process
  virtual unsigned int getPageSize() = 0;

  /// Reserves address space in executor process
  virtual void reserve(size_t NumBytes, OnReservedFunction OnReserved) = 0;

  /// Provides working memory
  virtual char *prepare(ExecutorAddr Addr, size_t ContentSize) = 0;

  using OnInitializedFunction = unique_function<void(Expected<ExecutorAddr>)>;

  /// Ensures executor memory is synchronized with working copy memory, sends
  /// functions to be called after initilization and before deinitialization and
  /// applies memory protections
  /// Returns a unique address identifying the allocation. This address should
  /// be passed to deinitialize to run deallocation actions (and reset
  /// permissions where possible).
  virtual void initialize(AllocInfo &AI,
                          OnInitializedFunction OnInitialized) = 0;

  using OnDeinitializedFunction = unique_function<void(Error)>;

  /// Runs previously specified deinitialization actions
  /// Executor addresses returned by initialize should be passed
  virtual void deinitialize(ArrayRef<ExecutorAddr> Allocations,
                            OnDeinitializedFunction OnDeInitialized) = 0;

  using OnReleasedFunction = unique_function<void(Error)>;

  /// Release address space acquired through reserve()
  virtual void release(ArrayRef<ExecutorAddr> Reservations,
                       OnReleasedFunction OnRelease) = 0;

  virtual ~MemoryMapper();
};

class InProcessMemoryMapper : public MemoryMapper {
public:
  InProcessMemoryMapper(size_t PageSize);

  static Expected<std::unique_ptr<InProcessMemoryMapper>> Create();

  unsigned int getPageSize() override { return PageSize; }

  void reserve(size_t NumBytes, OnReservedFunction OnReserved) override;

  void initialize(AllocInfo &AI, OnInitializedFunction OnInitialized) override;

  char *prepare(ExecutorAddr Addr, size_t ContentSize) override;

  void deinitialize(ArrayRef<ExecutorAddr> Allocations,
                    OnDeinitializedFunction OnDeInitialized) override;

  void release(ArrayRef<ExecutorAddr> Reservations,
               OnReleasedFunction OnRelease) override;

  ~InProcessMemoryMapper() override;

private:
  struct Allocation {
    size_t Size;
    std::vector<shared::WrapperFunctionCall> DeinitializationActions;
  };
  using AllocationMap = DenseMap<ExecutorAddr, Allocation>;

  struct Reservation {
    size_t Size;
    std::vector<ExecutorAddr> Allocations;
  };
  using ReservationMap = DenseMap<void *, Reservation>;

  std::mutex Mutex;
  ReservationMap Reservations;
  AllocationMap Allocations;

  size_t PageSize;
};

class SharedMemoryMapper final : public MemoryMapper {
public:
  struct SymbolAddrs {
    ExecutorAddr Instance;
    ExecutorAddr Reserve;
    ExecutorAddr Initialize;
    ExecutorAddr Deinitialize;
    ExecutorAddr Release;
  };

  SharedMemoryMapper(ExecutorProcessControl &EPC, SymbolAddrs SAs,
                     size_t PageSize);

  static Expected<std::unique_ptr<SharedMemoryMapper>>
  Create(ExecutorProcessControl &EPC, SymbolAddrs SAs);

  unsigned int getPageSize() override { return PageSize; }

  void reserve(size_t NumBytes, OnReservedFunction OnReserved) override;

  char *prepare(ExecutorAddr Addr, size_t ContentSize) override;

  void initialize(AllocInfo &AI, OnInitializedFunction OnInitialized) override;

  void deinitialize(ArrayRef<ExecutorAddr> Allocations,
                    OnDeinitializedFunction OnDeInitialized) override;

  void release(ArrayRef<ExecutorAddr> Reservations,
               OnReleasedFunction OnRelease) override;

  ~SharedMemoryMapper() override;

private:
  struct Reservation {
    void *LocalAddr;
    size_t Size;
  };

  ExecutorProcessControl &EPC;
  SymbolAddrs SAs;

  std::mutex Mutex;

  std::map<ExecutorAddr, Reservation> Reservations;

  size_t PageSize;
};

} // namespace orc
} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_ORC_MEMORYMAPPER_H
PKiwFZ�W����"ExecutionEngine/JITEventListener.hnu�[���//===- JITEventListener.h - Exposes events from JIT compilation -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the JITEventListener interface, which lets users get
// callbacks when significant events happen during the JIT compilation process.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_JITEVENTLISTENER_H
#define LLVM_EXECUTIONENGINE_JITEVENTLISTENER_H

#include "llvm-c/ExecutionEngine.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/ExecutionEngine/RuntimeDyld.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/Support/CBindingWrapping.h"
#include <cstdint>

namespace llvm {

class IntelJITEventsWrapper;
class OProfileWrapper;

namespace object {

class ObjectFile;

} // end namespace object

/// JITEventListener - Abstract interface for use by the JIT to notify clients
/// about significant events during compilation. For example, to notify
/// profilers and debuggers that need to know where functions have been emitted.
///
/// The default implementation of each method does nothing.
class JITEventListener {
public:
  using ObjectKey = uint64_t;

  JITEventListener() = default;
  virtual ~JITEventListener() = default;

  /// notifyObjectLoaded - Called after an object has had its sections allocated
  /// and addresses assigned to all symbols. Note: Section memory will not have
  /// been relocated yet. notifyFunctionLoaded will not be called for
  /// individual functions in the object.
  ///
  /// ELF-specific information
  /// The ObjectImage contains the generated object image
  /// with section headers updated to reflect the address at which sections
  /// were loaded and with relocations performed in-place on debug sections.
  virtual void notifyObjectLoaded(ObjectKey K, const object::ObjectFile &Obj,
                                  const RuntimeDyld::LoadedObjectInfo &L) {}

  /// notifyFreeingObject - Called just before the memory associated with
  /// a previously emitted object is released.
  virtual void notifyFreeingObject(ObjectKey K) {}

  // Get a pointe to the GDB debugger registration listener.
  static JITEventListener *createGDBRegistrationListener();

#if LLVM_USE_INTEL_JITEVENTS
  // Construct an IntelJITEventListener
  static JITEventListener *createIntelJITEventListener();

  // Construct an IntelJITEventListener with a test Intel JIT API implementation
  static JITEventListener *createIntelJITEventListener(
                                      IntelJITEventsWrapper* AlternativeImpl);
#else
  static JITEventListener *createIntelJITEventListener() { return nullptr; }

  static JITEventListener *createIntelJITEventListener(
                                      IntelJITEventsWrapper* AlternativeImpl) {
    return nullptr;
  }
#endif // USE_INTEL_JITEVENTS

#if LLVM_USE_OPROFILE
  // Construct an OProfileJITEventListener
  static JITEventListener *createOProfileJITEventListener();

  // Construct an OProfileJITEventListener with a test opagent implementation
  static JITEventListener *createOProfileJITEventListener(
                                      OProfileWrapper* AlternativeImpl);
#else
  static JITEventListener *createOProfileJITEventListener() { return nullptr; }

  static JITEventListener *createOProfileJITEventListener(
                                      OProfileWrapper* AlternativeImpl) {
    return nullptr;
  }
#endif // USE_OPROFILE

#if LLVM_USE_PERF
  static JITEventListener *createPerfJITEventListener();
#else
  static JITEventListener *createPerfJITEventListener()
  {
    return nullptr;
  }
#endif // USE_PERF

private:
  virtual void anchor();
};

DEFINE_SIMPLE_CONVERSION_FUNCTIONS(JITEventListener, LLVMJITEventListenerRef)

} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_JITEVENTLISTENER_H
PKiwFZ� wQgQg!ExecutionEngine/ExecutionEngine.hnu�[���//===- ExecutionEngine.h - Abstract Execution Engine Interface --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the abstract interface that implements execution support
// for LLVM.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_EXECUTIONENGINE_H
#define LLVM_EXECUTIONENGINE_EXECUTIONENGINE_H

#include "llvm-c/ExecutionEngine.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ExecutionEngine/JITSymbol.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Module.h"
#include "llvm/Object/Binary.h"
#include "llvm/Support/CBindingWrapping.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Mutex.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include <algorithm>
#include <cstdint>
#include <functional>
#include <map>
#include <memory>
#include <optional>
#include <string>
#include <vector>

namespace llvm {

class Constant;
class Function;
struct GenericValue;
class GlobalValue;
class GlobalVariable;
class JITEventListener;
class MCJITMemoryManager;
class ObjectCache;
class RTDyldMemoryManager;
class Triple;
class Type;

namespace object {

class Archive;
class ObjectFile;

} // end namespace object

/// Helper class for helping synchronize access to the global address map
/// table.  Access to this class should be serialized under a mutex.
class ExecutionEngineState {
public:
  using GlobalAddressMapTy = StringMap<uint64_t>;

private:
  /// GlobalAddressMap - A mapping between LLVM global symbol names values and
  /// their actualized version...
  GlobalAddressMapTy GlobalAddressMap;

  /// GlobalAddressReverseMap - This is the reverse mapping of GlobalAddressMap,
  /// used to convert raw addresses into the LLVM global value that is emitted
  /// at the address.  This map is not computed unless getGlobalValueAtAddress
  /// is called at some point.
  std::map<uint64_t, std::string> GlobalAddressReverseMap;

public:
  GlobalAddressMapTy &getGlobalAddressMap() {
    return GlobalAddressMap;
  }

  std::map<uint64_t, std::string> &getGlobalAddressReverseMap() {
    return GlobalAddressReverseMap;
  }

  /// Erase an entry from the mapping table.
  ///
  /// \returns The address that \p ToUnmap was happed to.
  uint64_t RemoveMapping(StringRef Name);
};

using FunctionCreator = std::function<void *(const std::string &)>;

/// Abstract interface for implementation execution of LLVM modules,
/// designed to support both interpreter and just-in-time (JIT) compiler
/// implementations.
class ExecutionEngine {
  /// The state object holding the global address mapping, which must be
  /// accessed synchronously.
  //
  // FIXME: There is no particular need the entire map needs to be
  // synchronized.  Wouldn't a reader-writer design be better here?
  ExecutionEngineState EEState;

  /// The target data for the platform for which execution is being performed.
  ///
  /// Note: the DataLayout is LLVMContext specific because it has an
  /// internal cache based on type pointers. It makes unsafe to reuse the
  /// ExecutionEngine across context, we don't enforce this rule but undefined
  /// behavior can occurs if the user tries to do it.
  const DataLayout DL;

  /// Whether lazy JIT compilation is enabled.
  bool CompilingLazily;

  /// Whether JIT compilation of external global variables is allowed.
  bool GVCompilationDisabled;

  /// Whether the JIT should perform lookups of external symbols (e.g.,
  /// using dlsym).
  bool SymbolSearchingDisabled;

  /// Whether the JIT should verify IR modules during compilation.
  bool VerifyModules;

  friend class EngineBuilder;  // To allow access to JITCtor and InterpCtor.

protected:
  /// The list of Modules that we are JIT'ing from.  We use a SmallVector to
  /// optimize for the case where there is only one module.
  SmallVector<std::unique_ptr<Module>, 1> Modules;

  /// getMemoryforGV - Allocate memory for a global variable.
  virtual char *getMemoryForGV(const GlobalVariable *GV);

  static ExecutionEngine *(*MCJITCtor)(
      std::unique_ptr<Module> M, std::string *ErrorStr,
      std::shared_ptr<MCJITMemoryManager> MM,
      std::shared_ptr<LegacyJITSymbolResolver> SR,
      std::unique_ptr<TargetMachine> TM);

  static ExecutionEngine *(*InterpCtor)(std::unique_ptr<Module> M,
                                        std::string *ErrorStr);

  /// LazyFunctionCreator - If an unknown function is needed, this function
  /// pointer is invoked to create it.  If this returns null, the JIT will
  /// abort.
  FunctionCreator LazyFunctionCreator;

  /// getMangledName - Get mangled name.
  std::string getMangledName(const GlobalValue *GV);

  std::string ErrMsg;

public:
  /// lock - This lock protects the ExecutionEngine and MCJIT classes. It must
  /// be held while changing the internal state of any of those classes.
  sys::Mutex lock;

  //===--------------------------------------------------------------------===//
  //  ExecutionEngine Startup
  //===--------------------------------------------------------------------===//

  virtual ~ExecutionEngine();

  /// Add a Module to the list of modules that we can JIT from.
  virtual void addModule(std::unique_ptr<Module> M) {
    Modules.push_back(std::move(M));
  }

  /// addObjectFile - Add an ObjectFile to the execution engine.
  ///
  /// This method is only supported by MCJIT.  MCJIT will immediately load the
  /// object into memory and adds its symbols to the list used to resolve
  /// external symbols while preparing other objects for execution.
  ///
  /// Objects added using this function will not be made executable until
  /// needed by another object.
  ///
  /// MCJIT will take ownership of the ObjectFile.
  virtual void addObjectFile(std::unique_ptr<object::ObjectFile> O);
  virtual void addObjectFile(object::OwningBinary<object::ObjectFile> O);

  /// addArchive - Add an Archive to the execution engine.
  ///
  /// This method is only supported by MCJIT.  MCJIT will use the archive to
  /// resolve external symbols in objects it is loading.  If a symbol is found
  /// in the Archive the contained object file will be extracted (in memory)
  /// and loaded for possible execution.
  virtual void addArchive(object::OwningBinary<object::Archive> A);

  //===--------------------------------------------------------------------===//

  const DataLayout &getDataLayout() const { return DL; }

  /// removeModule - Removes a Module from the list of modules, but does not
  /// free the module's memory. Returns true if M is found, in which case the
  /// caller assumes responsibility for deleting the module.
  //
  // FIXME: This stealth ownership transfer is horrible. This will probably be
  //        fixed by deleting ExecutionEngine.
  virtual bool removeModule(Module *M);

  /// FindFunctionNamed - Search all of the active modules to find the function that
  /// defines FnName.  This is very slow operation and shouldn't be used for
  /// general code.
  virtual Function *FindFunctionNamed(StringRef FnName);

  /// FindGlobalVariableNamed - Search all of the active modules to find the global variable
  /// that defines Name.  This is very slow operation and shouldn't be used for
  /// general code.
  virtual GlobalVariable *FindGlobalVariableNamed(StringRef Name, bool AllowInternal = false);

  /// runFunction - Execute the specified function with the specified arguments,
  /// and return the result.
  ///
  /// For MCJIT execution engines, clients are encouraged to use the
  /// "GetFunctionAddress" method (rather than runFunction) and cast the
  /// returned uint64_t to the desired function pointer type. However, for
  /// backwards compatibility MCJIT's implementation can execute 'main-like'
  /// function (i.e. those returning void or int, and taking either no
  /// arguments or (int, char*[])).
  virtual GenericValue runFunction(Function *F,
                                   ArrayRef<GenericValue> ArgValues) = 0;

  /// getPointerToNamedFunction - This method returns the address of the
  /// specified function by using the dlsym function call.  As such it is only
  /// useful for resolving library symbols, not code generated symbols.
  ///
  /// If AbortOnFailure is false and no function with the given name is
  /// found, this function silently returns a null pointer. Otherwise,
  /// it prints a message to stderr and aborts.
  ///
  /// This function is deprecated for the MCJIT execution engine.
  virtual void *getPointerToNamedFunction(StringRef Name,
                                          bool AbortOnFailure = true) = 0;

  /// mapSectionAddress - map a section to its target address space value.
  /// Map the address of a JIT section as returned from the memory manager
  /// to the address in the target process as the running code will see it.
  /// This is the address which will be used for relocation resolution.
  virtual void mapSectionAddress(const void *LocalAddress,
                                 uint64_t TargetAddress) {
    llvm_unreachable("Re-mapping of section addresses not supported with this "
                     "EE!");
  }

  /// generateCodeForModule - Run code generation for the specified module and
  /// load it into memory.
  ///
  /// When this function has completed, all code and data for the specified
  /// module, and any module on which this module depends, will be generated
  /// and loaded into memory, but relocations will not yet have been applied
  /// and all memory will be readable and writable but not executable.
  ///
  /// This function is primarily useful when generating code for an external
  /// target, allowing the client an opportunity to remap section addresses
  /// before relocations are applied.  Clients that intend to execute code
  /// locally can use the getFunctionAddress call, which will generate code
  /// and apply final preparations all in one step.
  ///
  /// This method has no effect for the interpeter.
  virtual void generateCodeForModule(Module *M) {}

  /// finalizeObject - ensure the module is fully processed and is usable.
  ///
  /// It is the user-level function for completing the process of making the
  /// object usable for execution.  It should be called after sections within an
  /// object have been relocated using mapSectionAddress.  When this method is
  /// called the MCJIT execution engine will reapply relocations for a loaded
  /// object.  This method has no effect for the interpeter.
  ///
  /// Returns true on success, false on failure. Error messages can be retrieved
  /// by calling getError();
  virtual void finalizeObject() {}

  /// Returns true if an error has been recorded.
  bool hasError() const { return !ErrMsg.empty(); }

  /// Clear the error message.
  void clearErrorMessage() { ErrMsg.clear(); }

  /// Returns the most recent error message.
  const std::string &getErrorMessage() const { return ErrMsg; }

  /// runStaticConstructorsDestructors - This method is used to execute all of
  /// the static constructors or destructors for a program.
  ///
  /// \param isDtors - Run the destructors instead of constructors.
  virtual void runStaticConstructorsDestructors(bool isDtors);

  /// This method is used to execute all of the static constructors or
  /// destructors for a particular module.
  ///
  /// \param isDtors - Run the destructors instead of constructors.
  void runStaticConstructorsDestructors(Module &module, bool isDtors);


  /// runFunctionAsMain - This is a helper function which wraps runFunction to
  /// handle the common task of starting up main with the specified argc, argv,
  /// and envp parameters.
  int runFunctionAsMain(Function *Fn, const std::vector<std::string> &argv,
                        const char * const * envp);


  /// addGlobalMapping - Tell the execution engine that the specified global is
  /// at the specified location.  This is used internally as functions are JIT'd
  /// and as global variables are laid out in memory.  It can and should also be
  /// used by clients of the EE that want to have an LLVM global overlay
  /// existing data in memory. Values to be mapped should be named, and have
  /// external or weak linkage. Mappings are automatically removed when their
  /// GlobalValue is destroyed.
  void addGlobalMapping(const GlobalValue *GV, void *Addr);
  void addGlobalMapping(StringRef Name, uint64_t Addr);

  /// clearAllGlobalMappings - Clear all global mappings and start over again,
  /// for use in dynamic compilation scenarios to move globals.
  void clearAllGlobalMappings();

  /// clearGlobalMappingsFromModule - Clear all global mappings that came from a
  /// particular module, because it has been removed from the JIT.
  void clearGlobalMappingsFromModule(Module *M);

  /// updateGlobalMapping - Replace an existing mapping for GV with a new
  /// address.  This updates both maps as required.  If "Addr" is null, the
  /// entry for the global is removed from the mappings.  This returns the old
  /// value of the pointer, or null if it was not in the map.
  uint64_t updateGlobalMapping(const GlobalValue *GV, void *Addr);
  uint64_t updateGlobalMapping(StringRef Name, uint64_t Addr);

  /// getAddressToGlobalIfAvailable - This returns the address of the specified
  /// global symbol.
  uint64_t getAddressToGlobalIfAvailable(StringRef S);

  /// getPointerToGlobalIfAvailable - This returns the address of the specified
  /// global value if it is has already been codegen'd, otherwise it returns
  /// null.
  void *getPointerToGlobalIfAvailable(StringRef S);
  void *getPointerToGlobalIfAvailable(const GlobalValue *GV);

  /// getPointerToGlobal - This returns the address of the specified global
  /// value. This may involve code generation if it's a function.
  ///
  /// This function is deprecated for the MCJIT execution engine.  Use
  /// getGlobalValueAddress instead.
  void *getPointerToGlobal(const GlobalValue *GV);

  /// getPointerToFunction - The different EE's represent function bodies in
  /// different ways.  They should each implement this to say what a function
  /// pointer should look like.  When F is destroyed, the ExecutionEngine will
  /// remove its global mapping and free any machine code.  Be sure no threads
  /// are running inside F when that happens.
  ///
  /// This function is deprecated for the MCJIT execution engine.  Use
  /// getFunctionAddress instead.
  virtual void *getPointerToFunction(Function *F) = 0;

  /// getPointerToFunctionOrStub - If the specified function has been
  /// code-gen'd, return a pointer to the function.  If not, compile it, or use
  /// a stub to implement lazy compilation if available.  See
  /// getPointerToFunction for the requirements on destroying F.
  ///
  /// This function is deprecated for the MCJIT execution engine.  Use
  /// getFunctionAddress instead.
  virtual void *getPointerToFunctionOrStub(Function *F) {
    // Default implementation, just codegen the function.
    return getPointerToFunction(F);
  }

  /// getGlobalValueAddress - Return the address of the specified global
  /// value. This may involve code generation.
  ///
  /// This function should not be called with the interpreter engine.
  virtual uint64_t getGlobalValueAddress(const std::string &Name) {
    // Default implementation for the interpreter.  MCJIT will override this.
    // JIT and interpreter clients should use getPointerToGlobal instead.
    return 0;
  }

  /// getFunctionAddress - Return the address of the specified function.
  /// This may involve code generation.
  virtual uint64_t getFunctionAddress(const std::string &Name) {
    // Default implementation for the interpreter.  MCJIT will override this.
    // Interpreter clients should use getPointerToFunction instead.
    return 0;
  }

  /// getGlobalValueAtAddress - Return the LLVM global value object that starts
  /// at the specified address.
  ///
  const GlobalValue *getGlobalValueAtAddress(void *Addr);

  /// StoreValueToMemory - Stores the data in Val of type Ty at address Ptr.
  /// Ptr is the address of the memory at which to store Val, cast to
  /// GenericValue *.  It is not a pointer to a GenericValue containing the
  /// address at which to store Val.
  void StoreValueToMemory(const GenericValue &Val, GenericValue *Ptr,
                          Type *Ty);

  void InitializeMemory(const Constant *Init, void *Addr);

  /// getOrEmitGlobalVariable - Return the address of the specified global
  /// variable, possibly emitting it to memory if needed.  This is used by the
  /// Emitter.
  ///
  /// This function is deprecated for the MCJIT execution engine.  Use
  /// getGlobalValueAddress instead.
  virtual void *getOrEmitGlobalVariable(const GlobalVariable *GV) {
    return getPointerToGlobal((const GlobalValue *)GV);
  }

  /// Registers a listener to be called back on various events within
  /// the JIT.  See JITEventListener.h for more details.  Does not
  /// take ownership of the argument.  The argument may be NULL, in
  /// which case these functions do nothing.
  virtual void RegisterJITEventListener(JITEventListener *) {}
  virtual void UnregisterJITEventListener(JITEventListener *) {}

  /// Sets the pre-compiled object cache.  The ownership of the ObjectCache is
  /// not changed.  Supported by MCJIT but not the interpreter.
  virtual void setObjectCache(ObjectCache *) {
    llvm_unreachable("No support for an object cache");
  }

  /// setProcessAllSections (MCJIT Only): By default, only sections that are
  /// "required for execution" are passed to the RTDyldMemoryManager, and other
  /// sections are discarded. Passing 'true' to this method will cause
  /// RuntimeDyld to pass all sections to its RTDyldMemoryManager regardless
  /// of whether they are "required to execute" in the usual sense.
  ///
  /// Rationale: Some MCJIT clients want to be able to inspect metadata
  /// sections (e.g. Dwarf, Stack-maps) to enable functionality or analyze
  /// performance. Passing these sections to the memory manager allows the
  /// client to make policy about the relevant sections, rather than having
  /// MCJIT do it.
  virtual void setProcessAllSections(bool ProcessAllSections) {
    llvm_unreachable("No support for ProcessAllSections option");
  }

  /// Return the target machine (if available).
  virtual TargetMachine *getTargetMachine() { return nullptr; }

  /// DisableLazyCompilation - When lazy compilation is off (the default), the
  /// JIT will eagerly compile every function reachable from the argument to
  /// getPointerToFunction.  If lazy compilation is turned on, the JIT will only
  /// compile the one function and emit stubs to compile the rest when they're
  /// first called.  If lazy compilation is turned off again while some lazy
  /// stubs are still around, and one of those stubs is called, the program will
  /// abort.
  ///
  /// In order to safely compile lazily in a threaded program, the user must
  /// ensure that 1) only one thread at a time can call any particular lazy
  /// stub, and 2) any thread modifying LLVM IR must hold the JIT's lock
  /// (ExecutionEngine::lock) or otherwise ensure that no other thread calls a
  /// lazy stub.  See http://llvm.org/PR5184 for details.
  void DisableLazyCompilation(bool Disabled = true) {
    CompilingLazily = !Disabled;
  }
  bool isCompilingLazily() const {
    return CompilingLazily;
  }

  /// DisableGVCompilation - If called, the JIT will abort if it's asked to
  /// allocate space and populate a GlobalVariable that is not internal to
  /// the module.
  void DisableGVCompilation(bool Disabled = true) {
    GVCompilationDisabled = Disabled;
  }
  bool isGVCompilationDisabled() const {
    return GVCompilationDisabled;
  }

  /// DisableSymbolSearching - If called, the JIT will not try to lookup unknown
  /// symbols with dlsym.  A client can still use InstallLazyFunctionCreator to
  /// resolve symbols in a custom way.
  void DisableSymbolSearching(bool Disabled = true) {
    SymbolSearchingDisabled = Disabled;
  }
  bool isSymbolSearchingDisabled() const {
    return SymbolSearchingDisabled;
  }

  /// Enable/Disable IR module verification.
  ///
  /// Note: Module verification is enabled by default in Debug builds, and
  /// disabled by default in Release. Use this method to override the default.
  void setVerifyModules(bool Verify) {
    VerifyModules = Verify;
  }
  bool getVerifyModules() const {
    return VerifyModules;
  }

  /// InstallLazyFunctionCreator - If an unknown function is needed, the
  /// specified function pointer is invoked to create it.  If it returns null,
  /// the JIT will abort.
  void InstallLazyFunctionCreator(FunctionCreator C) {
    LazyFunctionCreator = std::move(C);
  }

protected:
  ExecutionEngine(DataLayout DL) : DL(std::move(DL)) {}
  explicit ExecutionEngine(DataLayout DL, std::unique_ptr<Module> M);
  explicit ExecutionEngine(std::unique_ptr<Module> M);

  void emitGlobals();

  void emitGlobalVariable(const GlobalVariable *GV);

  GenericValue getConstantValue(const Constant *C);
  void LoadValueFromMemory(GenericValue &Result, GenericValue *Ptr,
                           Type *Ty);

private:
  void Init(std::unique_ptr<Module> M);
};

namespace EngineKind {

  // These are actually bitmasks that get or-ed together.
  enum Kind {
    JIT         = 0x1,
    Interpreter = 0x2
  };
  const static Kind Either = (Kind)(JIT | Interpreter);

} // end namespace EngineKind

/// Builder class for ExecutionEngines. Use this by stack-allocating a builder,
/// chaining the various set* methods, and terminating it with a .create()
/// call.
class EngineBuilder {
private:
  std::unique_ptr<Module> M;
  EngineKind::Kind WhichEngine;
  std::string *ErrorStr;
  CodeGenOpt::Level OptLevel;
  std::shared_ptr<MCJITMemoryManager> MemMgr;
  std::shared_ptr<LegacyJITSymbolResolver> Resolver;
  TargetOptions Options;
  std::optional<Reloc::Model> RelocModel;
  std::optional<CodeModel::Model> CMModel;
  std::string MArch;
  std::string MCPU;
  SmallVector<std::string, 4> MAttrs;
  bool VerifyModules;
  bool EmulatedTLS = true;

public:
  /// Default constructor for EngineBuilder.
  EngineBuilder();

  /// Constructor for EngineBuilder.
  EngineBuilder(std::unique_ptr<Module> M);

  // Out-of-line since we don't have the def'n of RTDyldMemoryManager here.
  ~EngineBuilder();

  /// setEngineKind - Controls whether the user wants the interpreter, the JIT,
  /// or whichever engine works.  This option defaults to EngineKind::Either.
  EngineBuilder &setEngineKind(EngineKind::Kind w) {
    WhichEngine = w;
    return *this;
  }

  /// setMCJITMemoryManager - Sets the MCJIT memory manager to use. This allows
  /// clients to customize their memory allocation policies for the MCJIT. This
  /// is only appropriate for the MCJIT; setting this and configuring the builder
  /// to create anything other than MCJIT will cause a runtime error. If create()
  /// is called and is successful, the created engine takes ownership of the
  /// memory manager. This option defaults to NULL.
  EngineBuilder &setMCJITMemoryManager(std::unique_ptr<RTDyldMemoryManager> mcjmm);

  EngineBuilder&
  setMemoryManager(std::unique_ptr<MCJITMemoryManager> MM);

  EngineBuilder &setSymbolResolver(std::unique_ptr<LegacyJITSymbolResolver> SR);

  /// setErrorStr - Set the error string to write to on error.  This option
  /// defaults to NULL.
  EngineBuilder &setErrorStr(std::string *e) {
    ErrorStr = e;
    return *this;
  }

  /// setOptLevel - Set the optimization level for the JIT.  This option
  /// defaults to CodeGenOpt::Default.
  EngineBuilder &setOptLevel(CodeGenOpt::Level l) {
    OptLevel = l;
    return *this;
  }

  /// setTargetOptions - Set the target options that the ExecutionEngine
  /// target is using. Defaults to TargetOptions().
  EngineBuilder &setTargetOptions(const TargetOptions &Opts) {
    Options = Opts;
    return *this;
  }

  /// setRelocationModel - Set the relocation model that the ExecutionEngine
  /// target is using. Defaults to target specific default "Reloc::Default".
  EngineBuilder &setRelocationModel(Reloc::Model RM) {
    RelocModel = RM;
    return *this;
  }

  /// setCodeModel - Set the CodeModel that the ExecutionEngine target
  /// data is using. Defaults to target specific default
  /// "CodeModel::JITDefault".
  EngineBuilder &setCodeModel(CodeModel::Model M) {
    CMModel = M;
    return *this;
  }

  /// setMArch - Override the architecture set by the Module's triple.
  EngineBuilder &setMArch(StringRef march) {
    MArch.assign(march.begin(), march.end());
    return *this;
  }

  /// setMCPU - Target a specific cpu type.
  EngineBuilder &setMCPU(StringRef mcpu) {
    MCPU.assign(mcpu.begin(), mcpu.end());
    return *this;
  }

  /// setVerifyModules - Set whether the JIT implementation should verify
  /// IR modules during compilation.
  EngineBuilder &setVerifyModules(bool Verify) {
    VerifyModules = Verify;
    return *this;
  }

  /// setMAttrs - Set cpu-specific attributes.
  template<typename StringSequence>
  EngineBuilder &setMAttrs(const StringSequence &mattrs) {
    MAttrs.clear();
    MAttrs.append(mattrs.begin(), mattrs.end());
    return *this;
  }

  void setEmulatedTLS(bool EmulatedTLS) {
    this->EmulatedTLS = EmulatedTLS;
  }

  TargetMachine *selectTarget();

  /// selectTarget - Pick a target either via -march or by guessing the native
  /// arch.  Add any CPU features specified via -mcpu or -mattr.
  TargetMachine *selectTarget(const Triple &TargetTriple,
                              StringRef MArch,
                              StringRef MCPU,
                              const SmallVectorImpl<std::string>& MAttrs);

  ExecutionEngine *create() {
    return create(selectTarget());
  }

  ExecutionEngine *create(TargetMachine *TM);
};

// Create wrappers for C Binding types (see CBindingWrapping.h).
DEFINE_SIMPLE_CONVERSION_FUNCTIONS(ExecutionEngine, LLVMExecutionEngineRef)

} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_EXECUTIONENGINE_H
PKiwFZa��55ExecutionEngine/RuntimeDyld.hnu�[���//===- RuntimeDyld.h - Run-time dynamic linker for MC-JIT -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Interface for the runtime dynamic linker facilities of the MC-JIT.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_RUNTIMEDYLD_H
#define LLVM_EXECUTIONENGINE_RUNTIMEDYLD_H

#include "llvm/ADT/FunctionExtras.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/DebugInfo/DIContext.h"
#include "llvm/ExecutionEngine/JITSymbol.h"
#include "llvm/Object/ObjectFile.h"
#include "llvm/Support/Error.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <map>
#include <memory>
#include <string>
#include <system_error>

namespace llvm {

namespace object {

template <typename T> class OwningBinary;

} // end namespace object

/// Base class for errors originating in RuntimeDyld, e.g. missing relocation
/// support.
class RuntimeDyldError : public ErrorInfo<RuntimeDyldError> {
public:
  static char ID;

  RuntimeDyldError(std::string ErrMsg) : ErrMsg(std::move(ErrMsg)) {}

  void log(raw_ostream &OS) const override;
  const std::string &getErrorMessage() const { return ErrMsg; }
  std::error_code convertToErrorCode() const override;

private:
  std::string ErrMsg;
};

class RuntimeDyldImpl;

class RuntimeDyld {
public:
  // Change the address associated with a section when resolving relocations.
  // Any relocations already associated with the symbol will be re-resolved.
  void reassignSectionAddress(unsigned SectionID, uint64_t Addr);

  using NotifyStubEmittedFunction = std::function<void(
      StringRef FileName, StringRef SectionName, StringRef SymbolName,
      unsigned SectionID, uint32_t StubOffset)>;

  /// Information about the loaded object.
  class LoadedObjectInfo : public llvm::LoadedObjectInfo {
    friend class RuntimeDyldImpl;

  public:
    using ObjSectionToIDMap = std::map<object::SectionRef, unsigned>;

    LoadedObjectInfo(RuntimeDyldImpl &RTDyld, ObjSectionToIDMap ObjSecToIDMap)
        : RTDyld(RTDyld), ObjSecToIDMap(std::move(ObjSecToIDMap)) {}

    virtual object::OwningBinary<object::ObjectFile>
    getObjectForDebug(const object::ObjectFile &Obj) const = 0;

    uint64_t
    getSectionLoadAddress(const object::SectionRef &Sec) const override;

  protected:
    virtual void anchor();

    RuntimeDyldImpl &RTDyld;
    ObjSectionToIDMap ObjSecToIDMap;
  };

  /// Memory Management.
  class MemoryManager {
    friend class RuntimeDyld;

  public:
    MemoryManager() = default;
    virtual ~MemoryManager() = default;

    /// Allocate a memory block of (at least) the given size suitable for
    /// executable code. The SectionID is a unique identifier assigned by the
    /// RuntimeDyld instance, and optionally recorded by the memory manager to
    /// access a loaded section.
    virtual uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment,
                                         unsigned SectionID,
                                         StringRef SectionName) = 0;

    /// Allocate a memory block of (at least) the given size suitable for data.
    /// The SectionID is a unique identifier assigned by the JIT engine, and
    /// optionally recorded by the memory manager to access a loaded section.
    virtual uint8_t *allocateDataSection(uintptr_t Size, unsigned Alignment,
                                         unsigned SectionID,
                                         StringRef SectionName,
                                         bool IsReadOnly) = 0;

    /// An allocated TLS section
    struct TLSSection {
      /// The pointer to the initialization image
      uint8_t *InitializationImage;
      /// The TLS offset
      intptr_t Offset;
    };

    /// Allocate a memory block of (at least) the given size to be used for
    /// thread-local storage (TLS).
    virtual TLSSection allocateTLSSection(uintptr_t Size, unsigned Alignment,
                                          unsigned SectionID,
                                          StringRef SectionName);

    /// Inform the memory manager about the total amount of memory required to
    /// allocate all sections to be loaded:
    /// \p CodeSize - the total size of all code sections
    /// \p DataSizeRO - the total size of all read-only data sections
    /// \p DataSizeRW - the total size of all read-write data sections
    ///
    /// Note that by default the callback is disabled. To enable it
    /// redefine the method needsToReserveAllocationSpace to return true.
    virtual void reserveAllocationSpace(uintptr_t CodeSize, Align CodeAlign,
                                        uintptr_t RODataSize, Align RODataAlign,
                                        uintptr_t RWDataSize,
                                        Align RWDataAlign) {}

    /// Override to return true to enable the reserveAllocationSpace callback.
    virtual bool needsToReserveAllocationSpace() { return false; }

    /// Override to return false to tell LLVM no stub space will be needed.
    /// This requires some guarantees depending on architecuture, but when
    /// you know what you are doing it saves allocated space.
    virtual bool allowStubAllocation() const { return true; }

    /// Register the EH frames with the runtime so that c++ exceptions work.
    ///
    /// \p Addr parameter provides the local address of the EH frame section
    /// data, while \p LoadAddr provides the address of the data in the target
    /// address space.  If the section has not been remapped (which will usually
    /// be the case for local execution) these two values will be the same.
    virtual void registerEHFrames(uint8_t *Addr, uint64_t LoadAddr,
                                  size_t Size) = 0;
    virtual void deregisterEHFrames() = 0;

    /// This method is called when object loading is complete and section page
    /// permissions can be applied.  It is up to the memory manager implementation
    /// to decide whether or not to act on this method.  The memory manager will
    /// typically allocate all sections as read-write and then apply specific
    /// permissions when this method is called.  Code sections cannot be executed
    /// until this function has been called.  In addition, any cache coherency
    /// operations needed to reliably use the memory are also performed.
    ///
    /// Returns true if an error occurred, false otherwise.
    virtual bool finalizeMemory(std::string *ErrMsg = nullptr) = 0;

    /// This method is called after an object has been loaded into memory but
    /// before relocations are applied to the loaded sections.
    ///
    /// Memory managers which are preparing code for execution in an external
    /// address space can use this call to remap the section addresses for the
    /// newly loaded object.
    ///
    /// For clients that do not need access to an ExecutionEngine instance this
    /// method should be preferred to its cousin
    /// MCJITMemoryManager::notifyObjectLoaded as this method is compatible with
    /// ORC JIT stacks.
    virtual void notifyObjectLoaded(RuntimeDyld &RTDyld,
                                    const object::ObjectFile &Obj) {}

  private:
    virtual void anchor();

    bool FinalizationLocked = false;
  };

  /// Construct a RuntimeDyld instance.
  RuntimeDyld(MemoryManager &MemMgr, JITSymbolResolver &Resolver);
  RuntimeDyld(const RuntimeDyld &) = delete;
  RuntimeDyld &operator=(const RuntimeDyld &) = delete;
  ~RuntimeDyld();

  /// Add the referenced object file to the list of objects to be loaded and
  /// relocated.
  std::unique_ptr<LoadedObjectInfo> loadObject(const object::ObjectFile &O);

  /// Get the address of our local copy of the symbol. This may or may not
  /// be the address used for relocation (clients can copy the data around
  /// and resolve relocatons based on where they put it).
  void *getSymbolLocalAddress(StringRef Name) const;

  /// Get the section ID for the section containing the given symbol.
  unsigned getSymbolSectionID(StringRef Name) const;

  /// Get the target address and flags for the named symbol.
  /// This address is the one used for relocation.
  JITEvaluatedSymbol getSymbol(StringRef Name) const;

  /// Returns a copy of the symbol table. This can be used by on-finalized
  /// callbacks to extract the symbol table before throwing away the
  /// RuntimeDyld instance. Because the map keys (StringRefs) are backed by
  /// strings inside the RuntimeDyld instance, the map should be processed
  /// before the RuntimeDyld instance is discarded.
  std::map<StringRef, JITEvaluatedSymbol> getSymbolTable() const;

  /// Resolve the relocations for all symbols we currently know about.
  void resolveRelocations();

  /// Map a section to its target address space value.
  /// Map the address of a JIT section as returned from the memory manager
  /// to the address in the target process as the running code will see it.
  /// This is the address which will be used for relocation resolution.
  void mapSectionAddress(const void *LocalAddress, uint64_t TargetAddress);

  /// Returns the section's working memory.
  StringRef getSectionContent(unsigned SectionID) const;

  /// If the section was loaded, return the section's load address,
  /// otherwise return std::nullopt.
  uint64_t getSectionLoadAddress(unsigned SectionID) const;

  /// Set the NotifyStubEmitted callback. This is used for debugging
  /// purposes. A callback is made for each stub that is generated.
  void setNotifyStubEmitted(NotifyStubEmittedFunction NotifyStubEmitted) {
    this->NotifyStubEmitted = std::move(NotifyStubEmitted);
  }

  /// Register any EH frame sections that have been loaded but not previously
  /// registered with the memory manager.  Note, RuntimeDyld is responsible
  /// for identifying the EH frame and calling the memory manager with the
  /// EH frame section data.  However, the memory manager itself will handle
  /// the actual target-specific EH frame registration.
  void registerEHFrames();

  void deregisterEHFrames();

  bool hasError();
  StringRef getErrorString();

  /// By default, only sections that are "required for execution" are passed to
  /// the RTDyldMemoryManager, and other sections are discarded. Passing 'true'
  /// to this method will cause RuntimeDyld to pass all sections to its
  /// memory manager regardless of whether they are "required to execute" in the
  /// usual sense. This is useful for inspecting metadata sections that may not
  /// contain relocations, E.g. Debug info, stackmaps.
  ///
  /// Must be called before the first object file is loaded.
  void setProcessAllSections(bool ProcessAllSections) {
    assert(!Dyld && "setProcessAllSections must be called before loadObject.");
    this->ProcessAllSections = ProcessAllSections;
  }

  /// Perform all actions needed to make the code owned by this RuntimeDyld
  /// instance executable:
  ///
  /// 1) Apply relocations.
  /// 2) Register EH frames.
  /// 3) Update memory permissions*.
  ///
  /// * Finalization is potentially recursive**, and the 3rd step will only be
  ///   applied by the outermost call to finalize. This allows different
  ///   RuntimeDyld instances to share a memory manager without the innermost
  ///   finalization locking the memory and causing relocation fixup errors in
  ///   outer instances.
  ///
  /// ** Recursive finalization occurs when one RuntimeDyld instances needs the
  ///   address of a symbol owned by some other instance in order to apply
  ///   relocations.
  ///
  void finalizeWithMemoryManagerLocking();

private:
  friend void jitLinkForORC(
      object::OwningBinary<object::ObjectFile> O,
      RuntimeDyld::MemoryManager &MemMgr, JITSymbolResolver &Resolver,
      bool ProcessAllSections,
      unique_function<Error(const object::ObjectFile &Obj, LoadedObjectInfo &,
                            std::map<StringRef, JITEvaluatedSymbol>)>
          OnLoaded,
      unique_function<void(object::OwningBinary<object::ObjectFile> O,
                           std::unique_ptr<LoadedObjectInfo>, Error)>
          OnEmitted);

  // RuntimeDyldImpl is the actual class. RuntimeDyld is just the public
  // interface.
  std::unique_ptr<RuntimeDyldImpl> Dyld;
  MemoryManager &MemMgr;
  JITSymbolResolver &Resolver;
  bool ProcessAllSections;
  NotifyStubEmittedFunction NotifyStubEmitted;
};

// Asynchronous JIT link for ORC.
//
// Warning: This API is experimental and probably should not be used by anyone
// but ORC's RTDyldObjectLinkingLayer2. Internally it constructs a RuntimeDyld
// instance and uses continuation passing to perform the fix-up and finalize
// steps asynchronously.
void jitLinkForORC(
    object::OwningBinary<object::ObjectFile> O,
    RuntimeDyld::MemoryManager &MemMgr, JITSymbolResolver &Resolver,
    bool ProcessAllSections,
    unique_function<Error(const object::ObjectFile &Obj,
                          RuntimeDyld::LoadedObjectInfo &,
                          std::map<StringRef, JITEvaluatedSymbol>)>
        OnLoaded,
    unique_function<void(object::OwningBinary<object::ObjectFile>,
                         std::unique_ptr<RuntimeDyld::LoadedObjectInfo>, Error)>
        OnEmitted);

} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_RUNTIMEDYLD_H
PKiwFZ�6j���%ExecutionEngine/RTDyldMemoryManager.hnu�[���//===-- RTDyldMemoryManager.cpp - Memory manager for MC-JIT -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Interface of the runtime dynamic memory manager base class.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_RTDYLDMEMORYMANAGER_H
#define LLVM_EXECUTIONENGINE_RTDYLDMEMORYMANAGER_H

#include "llvm-c/ExecutionEngine.h"
#include "llvm/ExecutionEngine/JITSymbol.h"
#include "llvm/ExecutionEngine/RuntimeDyld.h"
#include "llvm/Support/CBindingWrapping.h"
#include <cstddef>
#include <cstdint>
#include <string>

namespace llvm {

class ExecutionEngine;

namespace object {
  class ObjectFile;
} // end namespace object

class MCJITMemoryManager : public RuntimeDyld::MemoryManager {
public:
  // Don't hide the notifyObjectLoaded method from RuntimeDyld::MemoryManager.
  using RuntimeDyld::MemoryManager::notifyObjectLoaded;

  /// This method is called after an object has been loaded into memory but
  /// before relocations are applied to the loaded sections.  The object load
  /// may have been initiated by MCJIT to resolve an external symbol for another
  /// object that is being finalized.  In that case, the object about which
  /// the memory manager is being notified will be finalized immediately after
  /// the memory manager returns from this call.
  ///
  /// Memory managers which are preparing code for execution in an external
  /// address space can use this call to remap the section addresses for the
  /// newly loaded object.
  virtual void notifyObjectLoaded(ExecutionEngine *EE,
                                  const object::ObjectFile &) {}

private:
  void anchor() override;
};

// RuntimeDyld clients often want to handle the memory management of
// what gets placed where. For JIT clients, this is the subset of
// JITMemoryManager required for dynamic loading of binaries.
//
// FIXME: As the RuntimeDyld fills out, additional routines will be needed
//        for the varying types of objects to be allocated.
class RTDyldMemoryManager : public MCJITMemoryManager,
                            public LegacyJITSymbolResolver {
public:
  RTDyldMemoryManager() = default;
  RTDyldMemoryManager(const RTDyldMemoryManager&) = delete;
  void operator=(const RTDyldMemoryManager&) = delete;
  ~RTDyldMemoryManager() override;

  /// Register EH frames in the current process.
  static void registerEHFramesInProcess(uint8_t *Addr, size_t Size);

  /// Deregister EH frames in the current proces.
  static void deregisterEHFramesInProcess(uint8_t *Addr, size_t Size);

  void registerEHFrames(uint8_t *Addr, uint64_t LoadAddr, size_t Size) override;
  void deregisterEHFrames() override;

  /// This method returns the address of the specified function or variable in
  /// the current process.
  static uint64_t getSymbolAddressInProcess(const std::string &Name);

  /// Legacy symbol lookup - DEPRECATED! Please override findSymbol instead.
  ///
  /// This method returns the address of the specified function or variable.
  /// It is used to resolve symbols during module linking.
  virtual uint64_t getSymbolAddress(const std::string &Name) {
    return getSymbolAddressInProcess(Name);
  }

  /// This method returns a RuntimeDyld::SymbolInfo for the specified function
  /// or variable. It is used to resolve symbols during module linking.
  ///
  /// By default this falls back on the legacy lookup method:
  /// 'getSymbolAddress'. The address returned by getSymbolAddress is treated as
  /// a strong, exported symbol, consistent with historical treatment by
  /// RuntimeDyld.
  ///
  /// Clients writing custom RTDyldMemoryManagers are encouraged to override
  /// this method and return a SymbolInfo with the flags set correctly. This is
  /// necessary for RuntimeDyld to correctly handle weak and non-exported symbols.
  JITSymbol findSymbol(const std::string &Name) override {
    return JITSymbol(getSymbolAddress(Name), JITSymbolFlags::Exported);
  }

  /// Legacy symbol lookup -- DEPRECATED! Please override
  /// findSymbolInLogicalDylib instead.
  ///
  /// Default to treating all modules as separate.
  virtual uint64_t getSymbolAddressInLogicalDylib(const std::string &Name) {
    return 0;
  }

  /// Default to treating all modules as separate.
  ///
  /// By default this falls back on the legacy lookup method:
  /// 'getSymbolAddressInLogicalDylib'. The address returned by
  /// getSymbolAddressInLogicalDylib is treated as a strong, exported symbol,
  /// consistent with historical treatment by RuntimeDyld.
  ///
  /// Clients writing custom RTDyldMemoryManagers are encouraged to override
  /// this method and return a SymbolInfo with the flags set correctly. This is
  /// necessary for RuntimeDyld to correctly handle weak and non-exported symbols.
  JITSymbol
  findSymbolInLogicalDylib(const std::string &Name) override {
    return JITSymbol(getSymbolAddressInLogicalDylib(Name),
                          JITSymbolFlags::Exported);
  }

  /// This method returns the address of the specified function. As such it is
  /// only useful for resolving library symbols, not code generated symbols.
  ///
  /// If \p AbortOnFailure is false and no function with the given name is
  /// found, this function returns a null pointer. Otherwise, it prints a
  /// message to stderr and aborts.
  ///
  /// This function is deprecated for memory managers to be used with
  /// MCJIT or RuntimeDyld.  Use getSymbolAddress instead.
  virtual void *getPointerToNamedFunction(const std::string &Name,
                                          bool AbortOnFailure = true);

protected:
  struct EHFrame {
    uint8_t *Addr;
    size_t Size;
  };
  typedef std::vector<EHFrame> EHFrameInfos;
  EHFrameInfos EHFrames;

private:
  void anchor() override;
};

// Create wrappers for C Binding types (see CBindingWrapping.h).
DEFINE_SIMPLE_CONVERSION_FUNCTIONS(
    RTDyldMemoryManager, LLVMMCJITMemoryManagerRef)

} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_RTDYLDMEMORYMANAGER_H
PKiwFZA�Ӱhh$ExecutionEngine/RuntimeDyldChecker.hnu�[���//===---- RuntimeDyldChecker.h - RuntimeDyld tester framework -----*- C++ -*-=//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_RUNTIMEDYLDCHECKER_H
#define LLVM_EXECUTIONENGINE_RUNTIMEDYLDCHECKER_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ExecutionEngine/JITSymbol.h"
#include "llvm/Support/Endian.h"
#include <optional>

#include <cstdint>
#include <memory>
#include <string>
#include <utility>

namespace llvm {

class StringRef;
class MCDisassembler;
class MemoryBuffer;
class MCInstPrinter;
class RuntimeDyld;
class RuntimeDyldCheckerImpl;
class raw_ostream;

/// RuntimeDyld invariant checker for verifying that RuntimeDyld has
///        correctly applied relocations.
///
/// The RuntimeDyldChecker class evaluates expressions against an attached
/// RuntimeDyld instance to verify that relocations have been applied
/// correctly.
///
/// The expression language supports basic pointer arithmetic and bit-masking,
/// and has limited disassembler integration for accessing instruction
/// operands and the next PC (program counter) address for each instruction.
///
/// The language syntax is:
///
/// check = expr '=' expr
///
/// expr = binary_expr
///      | sliceable_expr
///
/// sliceable_expr = '*{' number '}' load_addr_expr [slice]
///                | '(' expr ')' [slice]
///                | ident_expr [slice]
///                | number [slice]
///
/// slice = '[' high-bit-index ':' low-bit-index ']'
///
/// load_addr_expr = symbol
///                | '(' symbol '+' number ')'
///                | '(' symbol '-' number ')'
///
/// ident_expr = 'decode_operand' '(' symbol ',' operand-index ')'
///            | 'next_pc'        '(' symbol ')'
///            | 'stub_addr' '(' stub-container-name ',' symbol ')'
///            | 'got_addr' '(' stub-container-name ',' symbol ')'
///            | 'section_addr' '(' stub-container-name ',' symbol ')'
///            | symbol
///
/// binary_expr = expr '+' expr
///             | expr '-' expr
///             | expr '&' expr
///             | expr '|' expr
///             | expr '<<' expr
///             | expr '>>' expr
///
class RuntimeDyldChecker {
public:
  class MemoryRegionInfo {
  public:
    MemoryRegionInfo() = default;

    /// Constructor for symbols/sections with content.
    MemoryRegionInfo(ArrayRef<char> Content, JITTargetAddress TargetAddress)
        : ContentPtr(Content.data()), Size(Content.size()),
          TargetAddress(TargetAddress) {}

    /// Constructor for zero-fill symbols/sections.
    MemoryRegionInfo(uint64_t Size, JITTargetAddress TargetAddress)
        : Size(Size), TargetAddress(TargetAddress) {}

    /// Returns true if this is a zero-fill symbol/section.
    bool isZeroFill() const {
      assert(Size && "setContent/setZeroFill must be called first");
      return !ContentPtr;
    }

    /// Set the content for this memory region.
    void setContent(ArrayRef<char> Content) {
      assert(!ContentPtr && !Size && "Content/zero-fill already set");
      ContentPtr = Content.data();
      Size = Content.size();
    }

    /// Set a zero-fill length for this memory region.
    void setZeroFill(uint64_t Size) {
      assert(!ContentPtr && !this->Size && "Content/zero-fill already set");
      this->Size = Size;
    }

    /// Returns the content for this section if there is any.
    ArrayRef<char> getContent() const {
      assert(!isZeroFill() && "Can't get content for a zero-fill section");
      return {ContentPtr, static_cast<size_t>(Size)};
    }

    /// Returns the zero-fill length for this section.
    uint64_t getZeroFillLength() const {
      assert(isZeroFill() && "Can't get zero-fill length for content section");
      return Size;
    }

    /// Set the target address for this region.
    void setTargetAddress(JITTargetAddress TargetAddress) {
      assert(!this->TargetAddress && "TargetAddress already set");
      this->TargetAddress = TargetAddress;
    }

    /// Return the target address for this region.
    JITTargetAddress getTargetAddress() const { return TargetAddress; }

  private:
    const char *ContentPtr = nullptr;
    uint64_t Size = 0;
    JITTargetAddress TargetAddress = 0;
  };

  using IsSymbolValidFunction = std::function<bool(StringRef Symbol)>;
  using GetSymbolInfoFunction =
      std::function<Expected<MemoryRegionInfo>(StringRef SymbolName)>;
  using GetSectionInfoFunction = std::function<Expected<MemoryRegionInfo>(
      StringRef FileName, StringRef SectionName)>;
  using GetStubInfoFunction = std::function<Expected<MemoryRegionInfo>(
      StringRef StubContainer, StringRef TargetName)>;
  using GetGOTInfoFunction = std::function<Expected<MemoryRegionInfo>(
      StringRef GOTContainer, StringRef TargetName)>;

  RuntimeDyldChecker(IsSymbolValidFunction IsSymbolValid,
                     GetSymbolInfoFunction GetSymbolInfo,
                     GetSectionInfoFunction GetSectionInfo,
                     GetStubInfoFunction GetStubInfo,
                     GetGOTInfoFunction GetGOTInfo,
                     support::endianness Endianness,
                     MCDisassembler *Disassembler, MCInstPrinter *InstPrinter,
                     raw_ostream &ErrStream);
  ~RuntimeDyldChecker();

  /// Check a single expression against the attached RuntimeDyld
  ///        instance.
  bool check(StringRef CheckExpr) const;

  /// Scan the given memory buffer for lines beginning with the string
  ///        in RulePrefix. The remainder of the line is passed to the check
  ///        method to be evaluated as an expression.
  bool checkAllRulesInBuffer(StringRef RulePrefix, MemoryBuffer *MemBuf) const;

  /// Returns the address of the requested section (or an error message
  ///        in the second element of the pair if the address cannot be found).
  ///
  /// if 'LocalAddress' is true, this returns the address of the section
  /// within the linker's memory. If 'LocalAddress' is false it returns the
  /// address within the target process (i.e. the load address).
  std::pair<uint64_t, std::string> getSectionAddr(StringRef FileName,
                                                  StringRef SectionName,
                                                  bool LocalAddress);

  /// If there is a section at the given local address, return its load
  /// address, otherwise return std::nullopt.
  std::optional<uint64_t> getSectionLoadAddress(void *LocalAddress) const;

private:
  std::unique_ptr<RuntimeDyldCheckerImpl> Impl;
};

} // end namespace llvm

#endif
PKiwFZ!��H�9�9ExecutionEngine/JITSymbol.hnu�[���//===- JITSymbol.h - JIT symbol abstraction ---------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Abstraction for target process addresses.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_JITSYMBOL_H
#define LLVM_EXECUTIONENGINE_JITSYMBOL_H

#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <functional>
#include <map>
#include <set>
#include <string>

#include "llvm/ADT/BitmaskEnum.h"
#include "llvm/ADT/FunctionExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Error.h"

namespace llvm {

class GlobalValue;
class GlobalValueSummary;

namespace object {

class SymbolRef;

} // end namespace object

/// Represents an address in the target process's address space.
using JITTargetAddress = uint64_t;

/// Convert a JITTargetAddress to a pointer.
///
/// Note: This is a raw cast of the address bit pattern to the given pointer
/// type. When casting to a function pointer in order to execute JIT'd code
/// jitTargetAddressToFunction should be preferred, as it will also perform
/// pointer signing on targets that require it.
template <typename T> T jitTargetAddressToPointer(JITTargetAddress Addr) {
  static_assert(std::is_pointer<T>::value, "T must be a pointer type");
  uintptr_t IntPtr = static_cast<uintptr_t>(Addr);
  assert(IntPtr == Addr && "JITTargetAddress value out of range for uintptr_t");
  return reinterpret_cast<T>(IntPtr);
}

/// Convert a JITTargetAddress to a callable function pointer.
///
/// Casts the given address to a callable function pointer. This operation
/// will perform pointer signing for platforms that require it (e.g. arm64e).
template <typename T> T jitTargetAddressToFunction(JITTargetAddress Addr) {
  static_assert(std::is_pointer<T>::value &&
                    std::is_function<std::remove_pointer_t<T>>::value,
                "T must be a function pointer type");
  return jitTargetAddressToPointer<T>(Addr);
}

/// Convert a pointer to a JITTargetAddress.
template <typename T> JITTargetAddress pointerToJITTargetAddress(T *Ptr) {
  return static_cast<JITTargetAddress>(reinterpret_cast<uintptr_t>(Ptr));
}

/// Flags for symbols in the JIT.
class JITSymbolFlags {
public:
  using UnderlyingType = uint8_t;
  using TargetFlagsType = uint8_t;

  enum FlagNames : UnderlyingType {
    None = 0,
    HasError = 1U << 0,
    Weak = 1U << 1,
    Common = 1U << 2,
    Absolute = 1U << 3,
    Exported = 1U << 4,
    Callable = 1U << 5,
    MaterializationSideEffectsOnly = 1U << 6,
    LLVM_MARK_AS_BITMASK_ENUM( // LargestValue =
        MaterializationSideEffectsOnly)
  };

  /// Default-construct a JITSymbolFlags instance.
  JITSymbolFlags() = default;

  /// Construct a JITSymbolFlags instance from the given flags.
  JITSymbolFlags(FlagNames Flags) : Flags(Flags) {}

  /// Construct a JITSymbolFlags instance from the given flags and target
  ///        flags.
  JITSymbolFlags(FlagNames Flags, TargetFlagsType TargetFlags)
      : TargetFlags(TargetFlags), Flags(Flags) {}

  /// Implicitly convert to bool. Returs true if any flag is set.
  explicit operator bool() const { return Flags != None || TargetFlags != 0; }

  /// Compare for equality.
  bool operator==(const JITSymbolFlags &RHS) const {
    return Flags == RHS.Flags && TargetFlags == RHS.TargetFlags;
  }

  /// Bitwise AND-assignment for FlagNames.
  JITSymbolFlags &operator&=(const FlagNames &RHS) {
    Flags &= RHS;
    return *this;
  }

  /// Bitwise OR-assignment for FlagNames.
  JITSymbolFlags &operator|=(const FlagNames &RHS) {
    Flags |= RHS;
    return *this;
  }

  /// Return true if there was an error retrieving this symbol.
  bool hasError() const {
    return (Flags & HasError) == HasError;
  }

  /// Returns true if the Weak flag is set.
  bool isWeak() const {
    return (Flags & Weak) == Weak;
  }

  /// Returns true if the Common flag is set.
  bool isCommon() const {
    return (Flags & Common) == Common;
  }

  /// Returns true if the symbol isn't weak or common.
  bool isStrong() const {
    return !isWeak() && !isCommon();
  }

  /// Returns true if the Exported flag is set.
  bool isExported() const {
    return (Flags & Exported) == Exported;
  }

  /// Returns true if the given symbol is known to be callable.
  bool isCallable() const { return (Flags & Callable) == Callable; }

  /// Returns true if this symbol is a materialization-side-effects-only
  /// symbol. Such symbols do not have a real address. They exist to trigger
  /// and support synchronization of materialization side effects, e.g. for
  /// collecting initialization information. These symbols will vanish from
  /// the symbol table immediately upon reaching the ready state, and will
  /// appear to queries as if they were never defined (except that query
  /// callback execution will be delayed until they reach the ready state).
  /// MaterializationSideEffectOnly symbols should only be queried using the
  /// SymbolLookupFlags::WeaklyReferencedSymbol flag (see
  /// llvm/include/llvm/ExecutionEngine/Orc/Core.h).
  bool hasMaterializationSideEffectsOnly() const {
    return (Flags & MaterializationSideEffectsOnly) ==
           MaterializationSideEffectsOnly;
  }

  /// Get the underlying flags value as an integer.
  UnderlyingType getRawFlagsValue() const {
    return static_cast<UnderlyingType>(Flags);
  }

  /// Return a reference to the target-specific flags.
  TargetFlagsType& getTargetFlags() { return TargetFlags; }

  /// Return a reference to the target-specific flags.
  const TargetFlagsType& getTargetFlags() const { return TargetFlags; }

  /// Construct a JITSymbolFlags value based on the flags of the given global
  /// value.
  static JITSymbolFlags fromGlobalValue(const GlobalValue &GV);

  /// Construct a JITSymbolFlags value based on the flags of the given global
  /// value summary.
  static JITSymbolFlags fromSummary(GlobalValueSummary *S);

  /// Construct a JITSymbolFlags value based on the flags of the given libobject
  /// symbol.
  static Expected<JITSymbolFlags>
  fromObjectSymbol(const object::SymbolRef &Symbol);

private:
  TargetFlagsType TargetFlags = 0;
  FlagNames Flags = None;
};

inline JITSymbolFlags operator&(const JITSymbolFlags &LHS,
                                const JITSymbolFlags::FlagNames &RHS) {
  JITSymbolFlags Tmp = LHS;
  Tmp &= RHS;
  return Tmp;
}

inline JITSymbolFlags operator|(const JITSymbolFlags &LHS,
                                const JITSymbolFlags::FlagNames &RHS) {
  JITSymbolFlags Tmp = LHS;
  Tmp |= RHS;
  return Tmp;
}

/// ARM-specific JIT symbol flags.
/// FIXME: This should be moved into a target-specific header.
class ARMJITSymbolFlags {
public:
  ARMJITSymbolFlags() = default;

  enum FlagNames {
    None = 0,
    Thumb = 1 << 0
  };

  operator JITSymbolFlags::TargetFlagsType&() { return Flags; }

  static ARMJITSymbolFlags fromObjectSymbol(const object::SymbolRef &Symbol);

private:
  JITSymbolFlags::TargetFlagsType Flags = 0;
};

/// Represents a symbol that has been evaluated to an address already.
class JITEvaluatedSymbol {
public:
  JITEvaluatedSymbol() = default;

  /// Create a 'null' symbol.
  JITEvaluatedSymbol(std::nullptr_t) {}

  /// Create a symbol for the given address and flags.
  JITEvaluatedSymbol(JITTargetAddress Address, JITSymbolFlags Flags)
      : Address(Address), Flags(Flags) {}

  /// Create a symbol from the given pointer with the given flags.
  template <typename T>
  static JITEvaluatedSymbol
  fromPointer(T *P, JITSymbolFlags Flags = JITSymbolFlags::Exported) {
    return JITEvaluatedSymbol(pointerToJITTargetAddress(P), Flags);
  }

  /// An evaluated symbol converts to 'true' if its address is non-zero.
  explicit operator bool() const { return Address != 0; }

  /// Return the address of this symbol.
  JITTargetAddress getAddress() const { return Address; }

  /// Return the flags for this symbol.
  JITSymbolFlags getFlags() const { return Flags; }

  /// Set the flags for this symbol.
  void setFlags(JITSymbolFlags Flags) { this->Flags = std::move(Flags); }

private:
  JITTargetAddress Address = 0;
  JITSymbolFlags Flags;
};

/// Represents a symbol in the JIT.
class JITSymbol {
public:
  using GetAddressFtor = unique_function<Expected<JITTargetAddress>()>;

  /// Create a 'null' symbol, used to represent a "symbol not found"
  ///        result from a successful (non-erroneous) lookup.
  JITSymbol(std::nullptr_t)
      : CachedAddr(0) {}

  /// Create a JITSymbol representing an error in the symbol lookup
  ///        process (e.g. a network failure during a remote lookup).
  JITSymbol(Error Err)
    : Err(std::move(Err)), Flags(JITSymbolFlags::HasError) {}

  /// Create a symbol for a definition with a known address.
  JITSymbol(JITTargetAddress Addr, JITSymbolFlags Flags)
      : CachedAddr(Addr), Flags(Flags) {}

  /// Construct a JITSymbol from a JITEvaluatedSymbol.
  JITSymbol(JITEvaluatedSymbol Sym)
      : CachedAddr(Sym.getAddress()), Flags(Sym.getFlags()) {}

  /// Create a symbol for a definition that doesn't have a known address
  ///        yet.
  /// @param GetAddress A functor to materialize a definition (fixing the
  ///        address) on demand.
  ///
  ///   This constructor allows a JIT layer to provide a reference to a symbol
  /// definition without actually materializing the definition up front. The
  /// user can materialize the definition at any time by calling the getAddress
  /// method.
  JITSymbol(GetAddressFtor GetAddress, JITSymbolFlags Flags)
      : GetAddress(std::move(GetAddress)), CachedAddr(0), Flags(Flags) {}

  JITSymbol(const JITSymbol&) = delete;
  JITSymbol& operator=(const JITSymbol&) = delete;

  JITSymbol(JITSymbol &&Other)
    : GetAddress(std::move(Other.GetAddress)), Flags(std::move(Other.Flags)) {
    if (Flags.hasError())
      Err = std::move(Other.Err);
    else
      CachedAddr = std::move(Other.CachedAddr);
  }

  JITSymbol& operator=(JITSymbol &&Other) {
    GetAddress = std::move(Other.GetAddress);
    Flags = std::move(Other.Flags);
    if (Flags.hasError())
      Err = std::move(Other.Err);
    else
      CachedAddr = std::move(Other.CachedAddr);
    return *this;
  }

  ~JITSymbol() {
    if (Flags.hasError())
      Err.~Error();
    else
      CachedAddr.~JITTargetAddress();
  }

  /// Returns true if the symbol exists, false otherwise.
  explicit operator bool() const {
    return !Flags.hasError() && (CachedAddr || GetAddress);
  }

  /// Move the error field value out of this JITSymbol.
  Error takeError() {
    if (Flags.hasError())
      return std::move(Err);
    return Error::success();
  }

  /// Get the address of the symbol in the target address space. Returns
  ///        '0' if the symbol does not exist.
  Expected<JITTargetAddress> getAddress() {
    assert(!Flags.hasError() && "getAddress called on error value");
    if (GetAddress) {
      if (auto CachedAddrOrErr = GetAddress()) {
        GetAddress = nullptr;
        CachedAddr = *CachedAddrOrErr;
        assert(CachedAddr && "Symbol could not be materialized.");
      } else
        return CachedAddrOrErr.takeError();
    }
    return CachedAddr;
  }

  JITSymbolFlags getFlags() const { return Flags; }

private:
  GetAddressFtor GetAddress;
  union {
    JITTargetAddress CachedAddr;
    Error Err;
  };
  JITSymbolFlags Flags;
};

/// Symbol resolution interface.
///
/// Allows symbol flags and addresses to be looked up by name.
/// Symbol queries are done in bulk (i.e. you request resolution of a set of
/// symbols, rather than a single one) to reduce IPC overhead in the case of
/// remote JITing, and expose opportunities for parallel compilation.
class JITSymbolResolver {
public:
  using LookupSet = std::set<StringRef>;
  using LookupResult = std::map<StringRef, JITEvaluatedSymbol>;
  using OnResolvedFunction = unique_function<void(Expected<LookupResult>)>;

  virtual ~JITSymbolResolver() = default;

  /// Returns the fully resolved address and flags for each of the given
  ///        symbols.
  ///
  /// This method will return an error if any of the given symbols can not be
  /// resolved, or if the resolution process itself triggers an error.
  virtual void lookup(const LookupSet &Symbols,
                      OnResolvedFunction OnResolved) = 0;

  /// Returns the subset of the given symbols that should be materialized by
  /// the caller. Only weak/common symbols should be looked up, as strong
  /// definitions are implicitly always part of the caller's responsibility.
  virtual Expected<LookupSet>
  getResponsibilitySet(const LookupSet &Symbols) = 0;

  /// Specify if this resolver can return valid symbols with zero value.
  virtual bool allowsZeroSymbols() { return false; }

private:
  virtual void anchor();
};

/// Legacy symbol resolution interface.
class LegacyJITSymbolResolver : public JITSymbolResolver {
public:
  /// Performs lookup by, for each symbol, first calling
  ///        findSymbolInLogicalDylib and if that fails calling
  ///        findSymbol.
  void lookup(const LookupSet &Symbols, OnResolvedFunction OnResolved) final;

  /// Performs flags lookup by calling findSymbolInLogicalDylib and
  ///        returning the flags value for that symbol.
  Expected<LookupSet> getResponsibilitySet(const LookupSet &Symbols) final;

  /// This method returns the address of the specified symbol if it exists
  /// within the logical dynamic library represented by this JITSymbolResolver.
  /// Unlike findSymbol, queries through this interface should return addresses
  /// for hidden symbols.
  ///
  /// This is of particular importance for the Orc JIT APIs, which support lazy
  /// compilation by breaking up modules: Each of those broken out modules
  /// must be able to resolve hidden symbols provided by the others. Clients
  /// writing memory managers for MCJIT can usually ignore this method.
  ///
  /// This method will be queried by RuntimeDyld when checking for previous
  /// definitions of common symbols.
  virtual JITSymbol findSymbolInLogicalDylib(const std::string &Name) = 0;

  /// This method returns the address of the specified function or variable.
  /// It is used to resolve symbols during module linking.
  ///
  /// If the returned symbol's address is equal to ~0ULL then RuntimeDyld will
  /// skip all relocations for that symbol, and the client will be responsible
  /// for handling them manually.
  virtual JITSymbol findSymbol(const std::string &Name) = 0;

private:
  void anchor() override;
};

} // end namespace llvm

#endif // LLVM_EXECUTIONENGINE_JITSYMBOL_H
PKiwFZ�O�iiExecutionEngine/Interpreter.hnu�[���//===-- Interpreter.h - Abstract Execution Engine Interface -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file forces the interpreter to link in on certain operating systems.
// (Windows).
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_EXECUTIONENGINE_INTERPRETER_H
#define LLVM_EXECUTIONENGINE_INTERPRETER_H

#include "llvm/ExecutionEngine/ExecutionEngine.h"

extern "C" void LLVMLinkInInterpreter();

namespace {
  struct ForceInterpreterLinking {
    ForceInterpreterLinking() { LLVMLinkInInterpreter(); }
  } ForceInterpreterLinking;
}

#endif
PKiwFZd&����LinkAllIR.hnu�[���//===----- LinkAllIR.h - Reference All VMCore Code --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This header file pulls in all the object modules of the VMCore library so
// that tools like llc, opt, and lli can ensure they are linked with all symbols
// from libVMCore.a It should only be used from a tool's main program.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_LINKALLIR_H
#define LLVM_LINKALLIR_H

#include "llvm/BinaryFormat/Dwarf.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Verifier.h"
#include "llvm/Support/DynamicLibrary.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/Memory.h"
#include "llvm/Support/Mutex.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/Process.h"
#include "llvm/Support/Program.h"
#include "llvm/Support/Signals.h"
#include <cstdlib>

namespace {
  struct ForceVMCoreLinking {
    ForceVMCoreLinking() {
      // We must reference VMCore in such a way that compilers will not
      // delete it all as dead code, even with whole program optimization,
      // yet is effectively a NO-OP. As the compiler isn't smart enough
      // to know that getenv() never returns -1, this will do the job.
      // This is so that globals in the translation units where these functions
      // are defined are forced to be initialized, populating various
      // registries.
      if (std::getenv("bar") != (char*) -1)
        return;
      llvm::LLVMContext Context;
      (void)new llvm::Module("", Context);
      (void)new llvm::UnreachableInst(Context);
      (void)    llvm::createVerifierPass();
    }
  } ForceVMCoreLinking;
}

#endif
PKiwFZV���*�*Analysis/DomTreeUpdater.hnu�[���//===- DomTreeUpdater.h - DomTree/Post DomTree Updater ----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the DomTreeUpdater class, which provides a uniform way to
// update dominator tree related data structures.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_DOMTREEUPDATER_H
#define LLVM_ANALYSIS_DOMTREEUPDATER_H

#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/Support/Compiler.h"
#include <cstddef>
#include <functional>
#include <vector>

namespace llvm {
class PostDominatorTree;

class DomTreeUpdater {
public:
  enum class UpdateStrategy : unsigned char { Eager = 0, Lazy = 1 };

  explicit DomTreeUpdater(UpdateStrategy Strategy_) : Strategy(Strategy_) {}
  DomTreeUpdater(DominatorTree &DT_, UpdateStrategy Strategy_)
      : DT(&DT_), Strategy(Strategy_) {}
  DomTreeUpdater(DominatorTree *DT_, UpdateStrategy Strategy_)
      : DT(DT_), Strategy(Strategy_) {}
  DomTreeUpdater(PostDominatorTree &PDT_, UpdateStrategy Strategy_)
      : PDT(&PDT_), Strategy(Strategy_) {}
  DomTreeUpdater(PostDominatorTree *PDT_, UpdateStrategy Strategy_)
      : PDT(PDT_), Strategy(Strategy_) {}
  DomTreeUpdater(DominatorTree &DT_, PostDominatorTree &PDT_,
                 UpdateStrategy Strategy_)
      : DT(&DT_), PDT(&PDT_), Strategy(Strategy_) {}
  DomTreeUpdater(DominatorTree *DT_, PostDominatorTree *PDT_,
                 UpdateStrategy Strategy_)
      : DT(DT_), PDT(PDT_), Strategy(Strategy_) {}

  ~DomTreeUpdater() { flush(); }

  /// Returns true if the current strategy is Lazy.
  bool isLazy() const { return Strategy == UpdateStrategy::Lazy; };

  /// Returns true if the current strategy is Eager.
  bool isEager() const { return Strategy == UpdateStrategy::Eager; };

  /// Returns true if it holds a DominatorTree.
  bool hasDomTree() const { return DT != nullptr; }

  /// Returns true if it holds a PostDominatorTree.
  bool hasPostDomTree() const { return PDT != nullptr; }

  /// Returns true if there is BasicBlock awaiting deletion.
  /// The deletion will only happen until a flush event and
  /// all available trees are up-to-date.
  /// Returns false under Eager UpdateStrategy.
  bool hasPendingDeletedBB() const { return !DeletedBBs.empty(); }

  /// Returns true if DelBB is awaiting deletion.
  /// Returns false under Eager UpdateStrategy.
  bool isBBPendingDeletion(BasicBlock *DelBB) const;

  /// Returns true if either of DT or PDT is valid and the tree has at
  /// least one update pending. If DT or PDT is nullptr it is treated
  /// as having no pending updates. This function does not check
  /// whether there is BasicBlock awaiting deletion.
  /// Returns false under Eager UpdateStrategy.
  bool hasPendingUpdates() const;

  /// Returns true if there are DominatorTree updates queued.
  /// Returns false under Eager UpdateStrategy or DT is nullptr.
  bool hasPendingDomTreeUpdates() const;

  /// Returns true if there are PostDominatorTree updates queued.
  /// Returns false under Eager UpdateStrategy or PDT is nullptr.
  bool hasPendingPostDomTreeUpdates() const;

  ///@{
  /// \name Mutation APIs
  ///
  /// These methods provide APIs for submitting updates to the DominatorTree and
  /// the PostDominatorTree.
  ///
  /// Note: There are two strategies to update the DominatorTree and the
  /// PostDominatorTree:
  /// 1. Eager UpdateStrategy: Updates are submitted and then flushed
  /// immediately.
  /// 2. Lazy UpdateStrategy: Updates are submitted but only flushed when you
  /// explicitly call Flush APIs. It is recommended to use this update strategy
  /// when you submit a bunch of updates multiple times which can then
  /// add up to a large number of updates between two queries on the
  /// DominatorTree. The incremental updater can reschedule the updates or
  /// decide to recalculate the dominator tree in order to speedup the updating
  /// process depending on the number of updates.
  ///
  /// Although GenericDomTree provides several update primitives,
  /// it is not encouraged to use these APIs directly.

  /// Submit updates to all available trees.
  /// The Eager Strategy flushes updates immediately while the Lazy Strategy
  /// queues the updates.
  ///
  /// Note: The "existence" of an edge in a CFG refers to the CFG which DTU is
  /// in sync with + all updates before that single update.
  ///
  /// CAUTION!
  /// 1. It is required for the state of the LLVM IR to be updated
  /// *before* submitting the updates because the internal update routine will
  /// analyze the current state of the CFG to determine whether an update
  /// is valid.
  /// 2. It is illegal to submit any update that has already been submitted,
  /// i.e., you are supposed not to insert an existent edge or delete a
  /// nonexistent edge.
  void applyUpdates(ArrayRef<DominatorTree::UpdateType> Updates);

  /// Submit updates to all available trees. It will also
  /// 1. discard duplicated updates,
  /// 2. remove invalid updates. (Invalid updates means deletion of an edge that
  /// still exists or insertion of an edge that does not exist.)
  /// The Eager Strategy flushes updates immediately while the Lazy Strategy
  /// queues the updates.
  ///
  /// Note: The "existence" of an edge in a CFG refers to the CFG which DTU is
  /// in sync with + all updates before that single update.
  ///
  /// CAUTION!
  /// 1. It is required for the state of the LLVM IR to be updated
  /// *before* submitting the updates because the internal update routine will
  /// analyze the current state of the CFG to determine whether an update
  /// is valid.
  /// 2. It is illegal to submit any update that has already been submitted,
  /// i.e., you are supposed not to insert an existent edge or delete a
  /// nonexistent edge.
  /// 3. It is only legal to submit updates to an edge in the order CFG changes
  /// are made. The order you submit updates on different edges is not
  /// restricted.
  void applyUpdatesPermissive(ArrayRef<DominatorTree::UpdateType> Updates);

  /// Notify DTU that the entry block was replaced.
  /// Recalculate all available trees and flush all BasicBlocks
  /// awaiting deletion immediately.
  void recalculate(Function &F);

  /// Delete DelBB. DelBB will be removed from its Parent and
  /// erased from available trees if it exists and finally get deleted.
  /// Under Eager UpdateStrategy, DelBB will be processed immediately.
  /// Under Lazy UpdateStrategy, DelBB will be queued until a flush event and
  /// all available trees are up-to-date. Assert if any instruction of DelBB is
  /// modified while awaiting deletion. When both DT and PDT are nullptrs, DelBB
  /// will be queued until flush() is called.
  void deleteBB(BasicBlock *DelBB);

  /// Delete DelBB. DelBB will be removed from its Parent and
  /// erased from available trees if it exists. Then the callback will
  /// be called. Finally, DelBB will be deleted.
  /// Under Eager UpdateStrategy, DelBB will be processed immediately.
  /// Under Lazy UpdateStrategy, DelBB will be queued until a flush event and
  /// all available trees are up-to-date. Assert if any instruction of DelBB is
  /// modified while awaiting deletion. Multiple callbacks can be queued for one
  /// DelBB under Lazy UpdateStrategy.
  void callbackDeleteBB(BasicBlock *DelBB,
                        std::function<void(BasicBlock *)> Callback);

  ///@}

  ///@{
  /// \name Flush APIs
  ///
  /// CAUTION! By the moment these flush APIs are called, the current CFG needs
  /// to be the same as the CFG which DTU is in sync with + all updates
  /// submitted.

  /// Flush DomTree updates and return DomTree.
  /// It flushes Deleted BBs if both trees are up-to-date.
  /// It must only be called when it has a DomTree.
  DominatorTree &getDomTree();

  /// Flush PostDomTree updates and return PostDomTree.
  /// It flushes Deleted BBs if both trees are up-to-date.
  /// It must only be called when it has a PostDomTree.
  PostDominatorTree &getPostDomTree();

  /// Apply all pending updates to available trees and flush all BasicBlocks
  /// awaiting deletion.

  void flush();

  ///@}

  /// Debug method to help view the internal state of this class.
  LLVM_DUMP_METHOD void dump() const;

private:
  class CallBackOnDeletion final : public CallbackVH {
  public:
    CallBackOnDeletion(BasicBlock *V,
                       std::function<void(BasicBlock *)> Callback)
        : CallbackVH(V), DelBB(V), Callback_(Callback) {}

  private:
    BasicBlock *DelBB = nullptr;
    std::function<void(BasicBlock *)> Callback_;

    void deleted() override {
      Callback_(DelBB);
      CallbackVH::deleted();
    }
  };

  SmallVector<DominatorTree::UpdateType, 16> PendUpdates;
  size_t PendDTUpdateIndex = 0;
  size_t PendPDTUpdateIndex = 0;
  DominatorTree *DT = nullptr;
  PostDominatorTree *PDT = nullptr;
  const UpdateStrategy Strategy;
  SmallPtrSet<BasicBlock *, 8> DeletedBBs;
  std::vector<CallBackOnDeletion> Callbacks;
  bool IsRecalculatingDomTree = false;
  bool IsRecalculatingPostDomTree = false;

  /// First remove all the instructions of DelBB and then make sure DelBB has a
  /// valid terminator instruction which is necessary to have when DelBB still
  /// has to be inside of its parent Function while awaiting deletion under Lazy
  /// UpdateStrategy to prevent other routines from asserting the state of the
  /// IR is inconsistent. Assert if DelBB is nullptr or has predecessors.
  void validateDeleteBB(BasicBlock *DelBB);

  /// Returns true if at least one BasicBlock is deleted.
  bool forceFlushDeletedBB();

  /// Helper function to apply all pending DomTree updates.
  void applyDomTreeUpdates();

  /// Helper function to apply all pending PostDomTree updates.
  void applyPostDomTreeUpdates();

  /// Helper function to flush deleted BasicBlocks if all available
  /// trees are up-to-date.
  void tryFlushDeletedBB();

  /// Drop all updates applied by all available trees and delete BasicBlocks if
  /// all available trees are up-to-date.
  void dropOutOfDateUpdates();

  /// Erase Basic Block node that has been unlinked from Function
  /// in the DomTree and PostDomTree.
  void eraseDelBBNode(BasicBlock *DelBB);

  /// Returns true if the update appears in the LLVM IR.
  /// It is used to check whether an update is valid in
  /// insertEdge/deleteEdge or is unnecessary in the batch update.
  bool isUpdateValid(DominatorTree::UpdateType Update) const;

  /// Returns true if the update is self dominance.
  bool isSelfDominance(DominatorTree::UpdateType Update) const;
};
} // namespace llvm

#endif // LLVM_ANALYSIS_DOMTREEUPDATER_H
PKiwFZ�[ӹ%Analysis/FunctionPropertiesAnalysis.hnu�[���//=- FunctionPropertiesAnalysis.h - Function Properties Analysis --*- C++ -*-=//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the FunctionPropertiesInfo and FunctionPropertiesAnalysis
// classes used to extract function properties.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_FUNCTIONPROPERTIESANALYSIS_H
#define LLVM_ANALYSIS_FUNCTIONPROPERTIESANALYSIS_H

#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/PassManager.h"

namespace llvm {
class DominatorTree;
class Function;
class LoopInfo;

class FunctionPropertiesInfo {
  friend class FunctionPropertiesUpdater;
  void updateForBB(const BasicBlock &BB, int64_t Direction);
  void updateAggregateStats(const Function &F, const LoopInfo &LI);
  void reIncludeBB(const BasicBlock &BB);

public:
  static FunctionPropertiesInfo
  getFunctionPropertiesInfo(const Function &F, const DominatorTree &DT,
                            const LoopInfo &LI);

  static FunctionPropertiesInfo
  getFunctionPropertiesInfo(Function &F, FunctionAnalysisManager &FAM);

  bool operator==(const FunctionPropertiesInfo &FPI) const {
    return std::memcmp(this, &FPI, sizeof(FunctionPropertiesInfo)) == 0;
  }

  bool operator!=(const FunctionPropertiesInfo &FPI) const {
    return !(*this == FPI);
  }

  void print(raw_ostream &OS) const;

  /// Number of basic blocks
  int64_t BasicBlockCount = 0;

  /// Number of blocks reached from a conditional instruction, or that are
  /// 'cases' of a SwitchInstr.
  // FIXME: We may want to replace this with a more meaningful metric, like
  // number of conditionally executed blocks:
  // 'if (a) s();' would be counted here as 2 blocks, just like
  // 'if (a) s(); else s2(); s3();' would.
  int64_t BlocksReachedFromConditionalInstruction = 0;

  /// Number of uses of this function, plus 1 if the function is callable
  /// outside the module.
  int64_t Uses = 0;

  /// Number of direct calls made from this function to other functions
  /// defined in this module.
  int64_t DirectCallsToDefinedFunctions = 0;

  // Load Instruction Count
  int64_t LoadInstCount = 0;

  // Store Instruction Count
  int64_t StoreInstCount = 0;

  // Maximum Loop Depth in the Function
  int64_t MaxLoopDepth = 0;

  // Number of Top Level Loops in the Function
  int64_t TopLevelLoopCount = 0;

  // All non-debug instructions
  int64_t TotalInstructionCount = 0;
};

// Analysis pass
class FunctionPropertiesAnalysis
    : public AnalysisInfoMixin<FunctionPropertiesAnalysis> {

public:
  static AnalysisKey Key;

  using Result = const FunctionPropertiesInfo;

  FunctionPropertiesInfo run(Function &F, FunctionAnalysisManager &FAM);
};

/// Printer pass for the FunctionPropertiesAnalysis results.
class FunctionPropertiesPrinterPass
    : public PassInfoMixin<FunctionPropertiesPrinterPass> {
  raw_ostream &OS;

public:
  explicit FunctionPropertiesPrinterPass(raw_ostream &OS) : OS(OS) {}

  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

/// Correctly update FunctionPropertiesInfo post-inlining. A
/// FunctionPropertiesUpdater keeps the state necessary for tracking the changes
/// llvm::InlineFunction makes. The idea is that inlining will at most modify
/// a few BBs of the Caller (maybe the entry BB and definitely the callsite BB)
/// and potentially affect exception handling BBs in the case of invoke
/// inlining.
class FunctionPropertiesUpdater {
public:
  FunctionPropertiesUpdater(FunctionPropertiesInfo &FPI, CallBase &CB);

  void finish(FunctionAnalysisManager &FAM) const;
  bool finishAndTest(FunctionAnalysisManager &FAM) const {
    finish(FAM);
    return isUpdateValid(Caller, FPI, FAM);
  }

private:
  FunctionPropertiesInfo &FPI;
  BasicBlock &CallSiteBB;
  Function &Caller;

  static bool isUpdateValid(Function &F, const FunctionPropertiesInfo &FPI,
                            FunctionAnalysisManager &FAM);

  DenseSet<const BasicBlock *> Successors;
};
} // namespace llvm
#endif // LLVM_ANALYSIS_FUNCTIONPROPERTIESANALYSIS_H
PKiwFZ8)�=�� Analysis/CFLAliasAnalysisUtils.hnu�[���//=- CFLAliasAnalysisUtils.h - Utilities for CFL Alias Analysis ----*- C++-*-=//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// \file
// These are the utilities/helpers used by the CFL Alias Analyses available in
// tree, i.e. Steensgaard's and Andersens'.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_CFLALIASANALYSISUTILS_H
#define LLVM_ANALYSIS_CFLALIASANALYSISUTILS_H

#include "llvm/IR/Argument.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/ValueHandle.h"

namespace llvm {

namespace cflaa {

template <typename AAResult> struct FunctionHandle final : public CallbackVH {
  FunctionHandle(Function *Fn, AAResult *Result)
      : CallbackVH(Fn), Result(Result) {
    assert(Fn != nullptr);
    assert(Result != nullptr);
  }

  void deleted() override { removeSelfFromCache(); }
  void allUsesReplacedWith(Value *) override { removeSelfFromCache(); }

private:
  AAResult *Result;

  void removeSelfFromCache() {
    assert(Result != nullptr);
    auto *Val = getValPtr();
    Result->evict(cast<Function>(Val));
    setValPtr(nullptr);
  }
};

static inline const Function *parentFunctionOfValue(const Value *Val) {
  if (auto *Inst = dyn_cast<Instruction>(Val)) {
    auto *Bb = Inst->getParent();
    return Bb->getParent();
  }

  if (auto *Arg = dyn_cast<Argument>(Val))
    return Arg->getParent();
  return nullptr;
}
} // namespace cflaa
} // namespace llvm

#endif // LLVM_ANALYSIS_CFLALIASANALYSISUTILS_H
PKiwFZh�>�##Analysis/TargetTransformInfo.hnu�[���//===- TargetTransformInfo.h ------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// This pass exposes codegen information to IR-level passes. Every
/// transformation that uses codegen information is broken into three parts:
/// 1. The IR-level analysis pass.
/// 2. The IR-level transformation interface which provides the needed
///    information.
/// 3. Codegen-level implementation which uses target-specific hooks.
///
/// This file defines #2, which is the interface that IR-level transformations
/// use for querying the codegen.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_TARGETTRANSFORMINFO_H
#define LLVM_ANALYSIS_TARGETTRANSFORMINFO_H

#include "llvm/ADT/SmallBitVector.h"
#include "llvm/IR/FMF.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Pass.h"
#include "llvm/Support/AtomicOrdering.h"
#include "llvm/Support/BranchProbability.h"
#include "llvm/Support/InstructionCost.h"
#include <functional>
#include <optional>
#include <utility>

namespace llvm {

namespace Intrinsic {
typedef unsigned ID;
}

class AllocaInst;
class AssumptionCache;
class BlockFrequencyInfo;
class DominatorTree;
class BranchInst;
class CallBase;
class Function;
class GlobalValue;
class InstCombiner;
class OptimizationRemarkEmitter;
class InterleavedAccessInfo;
class IntrinsicInst;
class LoadInst;
class Loop;
class LoopInfo;
class LoopVectorizationLegality;
class ProfileSummaryInfo;
class RecurrenceDescriptor;
class SCEV;
class ScalarEvolution;
class StoreInst;
class SwitchInst;
class TargetLibraryInfo;
class Type;
class User;
class Value;
class VPIntrinsic;
struct KnownBits;

/// Information about a load/store intrinsic defined by the target.
struct MemIntrinsicInfo {
  /// This is the pointer that the intrinsic is loading from or storing to.
  /// If this is non-null, then analysis/optimization passes can assume that
  /// this intrinsic is functionally equivalent to a load/store from this
  /// pointer.
  Value *PtrVal = nullptr;

  // Ordering for atomic operations.
  AtomicOrdering Ordering = AtomicOrdering::NotAtomic;

  // Same Id is set by the target for corresponding load/store intrinsics.
  unsigned short MatchingId = 0;

  bool ReadMem = false;
  bool WriteMem = false;
  bool IsVolatile = false;

  bool isUnordered() const {
    return (Ordering == AtomicOrdering::NotAtomic ||
            Ordering == AtomicOrdering::Unordered) &&
           !IsVolatile;
  }
};

/// Attributes of a target dependent hardware loop.
struct HardwareLoopInfo {
  HardwareLoopInfo() = delete;
  HardwareLoopInfo(Loop *L);
  Loop *L = nullptr;
  BasicBlock *ExitBlock = nullptr;
  BranchInst *ExitBranch = nullptr;
  const SCEV *ExitCount = nullptr;
  IntegerType *CountType = nullptr;
  Value *LoopDecrement = nullptr; // Decrement the loop counter by this
                                  // value in every iteration.
  bool IsNestingLegal = false;    // Can a hardware loop be a parent to
                                  // another hardware loop?
  bool CounterInReg = false;      // Should loop counter be updated in
                                  // the loop via a phi?
  bool PerformEntryTest = false;  // Generate the intrinsic which also performs
                                  // icmp ne zero on the loop counter value and
                                  // produces an i1 to guard the loop entry.
  bool isHardwareLoopCandidate(ScalarEvolution &SE, LoopInfo &LI,
                               DominatorTree &DT, bool ForceNestedLoop = false,
                               bool ForceHardwareLoopPHI = false);
  bool canAnalyze(LoopInfo &LI);
};

class IntrinsicCostAttributes {
  const IntrinsicInst *II = nullptr;
  Type *RetTy = nullptr;
  Intrinsic::ID IID;
  SmallVector<Type *, 4> ParamTys;
  SmallVector<const Value *, 4> Arguments;
  FastMathFlags FMF;
  // If ScalarizationCost is UINT_MAX, the cost of scalarizing the
  // arguments and the return value will be computed based on types.
  InstructionCost ScalarizationCost = InstructionCost::getInvalid();

public:
  IntrinsicCostAttributes(
      Intrinsic::ID Id, const CallBase &CI,
      InstructionCost ScalarCost = InstructionCost::getInvalid(),
      bool TypeBasedOnly = false);

  IntrinsicCostAttributes(
      Intrinsic::ID Id, Type *RTy, ArrayRef<Type *> Tys,
      FastMathFlags Flags = FastMathFlags(), const IntrinsicInst *I = nullptr,
      InstructionCost ScalarCost = InstructionCost::getInvalid());

  IntrinsicCostAttributes(Intrinsic::ID Id, Type *RTy,
                          ArrayRef<const Value *> Args);

  IntrinsicCostAttributes(
      Intrinsic::ID Id, Type *RTy, ArrayRef<const Value *> Args,
      ArrayRef<Type *> Tys, FastMathFlags Flags = FastMathFlags(),
      const IntrinsicInst *I = nullptr,
      InstructionCost ScalarCost = InstructionCost::getInvalid());

  Intrinsic::ID getID() const { return IID; }
  const IntrinsicInst *getInst() const { return II; }
  Type *getReturnType() const { return RetTy; }
  FastMathFlags getFlags() const { return FMF; }
  InstructionCost getScalarizationCost() const { return ScalarizationCost; }
  const SmallVectorImpl<const Value *> &getArgs() const { return Arguments; }
  const SmallVectorImpl<Type *> &getArgTypes() const { return ParamTys; }

  bool isTypeBasedOnly() const {
    return Arguments.empty();
  }

  bool skipScalarizationCost() const { return ScalarizationCost.isValid(); }
};

enum class TailFoldingStyle {
  /// Don't use tail folding
  None,
  /// Use predicate only to mask operations on data in the loop.
  /// When the VL is not known to be a power-of-2, this method requires a
  /// runtime overflow check for the i + VL in the loop because it compares the
  /// scalar induction variable against the tripcount rounded up by VL which may
  /// overflow. When the VL is a power-of-2, both the increment and uprounded
  /// tripcount will overflow to 0, which does not require a runtime check
  /// since the loop is exited when the loop induction variable equals the
  /// uprounded trip-count, which are both 0.
  Data,
  /// Same as Data, but avoids using the get.active.lane.mask intrinsic to
  /// calculate the mask and instead implements this with a
  /// splat/stepvector/cmp.
  /// FIXME: Can this kind be removed now that SelectionDAGBuilder expands the
  /// active.lane.mask intrinsic when it is not natively supported?
  DataWithoutLaneMask,
  /// Use predicate to control both data and control flow.
  /// This method always requires a runtime overflow check for the i + VL
  /// increment inside the loop, because it uses the result direclty in the
  /// active.lane.mask to calculate the mask for the next iteration. If the
  /// increment overflows, the mask is no longer correct.
  DataAndControlFlow,
  /// Use predicate to control both data and control flow, but modify
  /// the trip count so that a runtime overflow check can be avoided
  /// and such that the scalar epilogue loop can always be removed.
  DataAndControlFlowWithoutRuntimeCheck
};

struct TailFoldingInfo {
  TargetLibraryInfo *TLI;
  LoopVectorizationLegality *LVL;
  InterleavedAccessInfo *IAI;
  TailFoldingInfo(TargetLibraryInfo *TLI, LoopVectorizationLegality *LVL,
                  InterleavedAccessInfo *IAI)
      : TLI(TLI), LVL(LVL), IAI(IAI) {}
};

class TargetTransformInfo;
typedef TargetTransformInfo TTI;

/// This pass provides access to the codegen interfaces that are needed
/// for IR-level transformations.
class TargetTransformInfo {
public:
  /// Construct a TTI object using a type implementing the \c Concept
  /// API below.
  ///
  /// This is used by targets to construct a TTI wrapping their target-specific
  /// implementation that encodes appropriate costs for their target.
  template <typename T> TargetTransformInfo(T Impl);

  /// Construct a baseline TTI object using a minimal implementation of
  /// the \c Concept API below.
  ///
  /// The TTI implementation will reflect the information in the DataLayout
  /// provided if non-null.
  explicit TargetTransformInfo(const DataLayout &DL);

  // Provide move semantics.
  TargetTransformInfo(TargetTransformInfo &&Arg);
  TargetTransformInfo &operator=(TargetTransformInfo &&RHS);

  // We need to define the destructor out-of-line to define our sub-classes
  // out-of-line.
  ~TargetTransformInfo();

  /// Handle the invalidation of this information.
  ///
  /// When used as a result of \c TargetIRAnalysis this method will be called
  /// when the function this was computed for changes. When it returns false,
  /// the information is preserved across those changes.
  bool invalidate(Function &, const PreservedAnalyses &,
                  FunctionAnalysisManager::Invalidator &) {
    // FIXME: We should probably in some way ensure that the subtarget
    // information for a function hasn't changed.
    return false;
  }

  /// \name Generic Target Information
  /// @{

  /// The kind of cost model.
  ///
  /// There are several different cost models that can be customized by the
  /// target. The normalization of each cost model may be target specific.
  /// e.g. TCK_SizeAndLatency should be comparable to target thresholds such as
  /// those derived from MCSchedModel::LoopMicroOpBufferSize etc.
  enum TargetCostKind {
    TCK_RecipThroughput, ///< Reciprocal throughput.
    TCK_Latency,         ///< The latency of instruction.
    TCK_CodeSize,        ///< Instruction code size.
    TCK_SizeAndLatency   ///< The weighted sum of size and latency.
  };

  /// Underlying constants for 'cost' values in this interface.
  ///
  /// Many APIs in this interface return a cost. This enum defines the
  /// fundamental values that should be used to interpret (and produce) those
  /// costs. The costs are returned as an int rather than a member of this
  /// enumeration because it is expected that the cost of one IR instruction
  /// may have a multiplicative factor to it or otherwise won't fit directly
  /// into the enum. Moreover, it is common to sum or average costs which works
  /// better as simple integral values. Thus this enum only provides constants.
  /// Also note that the returned costs are signed integers to make it natural
  /// to add, subtract, and test with zero (a common boundary condition). It is
  /// not expected that 2^32 is a realistic cost to be modeling at any point.
  ///
  /// Note that these costs should usually reflect the intersection of code-size
  /// cost and execution cost. A free instruction is typically one that folds
  /// into another instruction. For example, reg-to-reg moves can often be
  /// skipped by renaming the registers in the CPU, but they still are encoded
  /// and thus wouldn't be considered 'free' here.
  enum TargetCostConstants {
    TCC_Free = 0,     ///< Expected to fold away in lowering.
    TCC_Basic = 1,    ///< The cost of a typical 'add' instruction.
    TCC_Expensive = 4 ///< The cost of a 'div' instruction on x86.
  };

  /// Estimate the cost of a GEP operation when lowered.
  ///
  /// \p PointeeType is the source element type of the GEP.
  /// \p Ptr is the base pointer operand.
  /// \p Operands is the list of indices following the base pointer.
  ///
  /// \p AccessType is a hint as to what type of memory might be accessed by
  /// users of the GEP. getGEPCost will use it to determine if the GEP can be
  /// folded into the addressing mode of a load/store. If AccessType is null,
  /// then the resulting target type based off of PointeeType will be used as an
  /// approximation.
  InstructionCost
  getGEPCost(Type *PointeeType, const Value *Ptr,
             ArrayRef<const Value *> Operands, Type *AccessType = nullptr,
             TargetCostKind CostKind = TCK_SizeAndLatency) const;

  /// Describe known properties for a set of pointers.
  struct PointersChainInfo {
    /// All the GEPs in a set have same base address.
    unsigned IsSameBaseAddress : 1;
    /// These properties only valid if SameBaseAddress is set.
    /// True if all pointers are separated by a unit stride.
    unsigned IsUnitStride : 1;
    /// True if distance between any two neigbouring pointers is a known value.
    unsigned IsKnownStride : 1;
    unsigned Reserved : 29;

    bool isSameBase() const { return IsSameBaseAddress; }
    bool isUnitStride() const { return IsSameBaseAddress && IsUnitStride; }
    bool isKnownStride() const { return IsSameBaseAddress && IsKnownStride; }

    static PointersChainInfo getUnitStride() {
      return {/*IsSameBaseAddress=*/1, /*IsUnitStride=*/1,
              /*IsKnownStride=*/1, 0};
    }
    static PointersChainInfo getKnownStride() {
      return {/*IsSameBaseAddress=*/1, /*IsUnitStride=*/0,
              /*IsKnownStride=*/1, 0};
    }
    static PointersChainInfo getUnknownStride() {
      return {/*IsSameBaseAddress=*/1, /*IsUnitStride=*/0,
              /*IsKnownStride=*/0, 0};
    }
  };
  static_assert(sizeof(PointersChainInfo) == 4, "Was size increase justified?");

  /// Estimate the cost of a chain of pointers (typically pointer operands of a
  /// chain of loads or stores within same block) operations set when lowered.
  /// \p AccessTy is the type of the loads/stores that will ultimately use the
  /// \p Ptrs.
  InstructionCost
  getPointersChainCost(ArrayRef<const Value *> Ptrs, const Value *Base,
                       const PointersChainInfo &Info, Type *AccessTy,
                       TargetCostKind CostKind = TTI::TCK_RecipThroughput

  ) const;

  /// \returns A value by which our inlining threshold should be multiplied.
  /// This is primarily used to bump up the inlining threshold wholesale on
  /// targets where calls are unusually expensive.
  ///
  /// TODO: This is a rather blunt instrument.  Perhaps altering the costs of
  /// individual classes of instructions would be better.
  unsigned getInliningThresholdMultiplier() const;

  /// \returns A value to be added to the inlining threshold.
  unsigned adjustInliningThreshold(const CallBase *CB) const;

  /// \returns The cost of having an Alloca in the caller if not inlined, to be
  /// added to the threshold
  unsigned getCallerAllocaCost(const CallBase *CB, const AllocaInst *AI) const;

  /// \returns Vector bonus in percent.
  ///
  /// Vector bonuses: We want to more aggressively inline vector-dense kernels
  /// and apply this bonus based on the percentage of vector instructions. A
  /// bonus is applied if the vector instructions exceed 50% and half that
  /// amount is applied if it exceeds 10%. Note that these bonuses are some what
  /// arbitrary and evolved over time by accident as much as because they are
  /// principled bonuses.
  /// FIXME: It would be nice to base the bonus values on something more
  /// scientific. A target may has no bonus on vector instructions.
  int getInlinerVectorBonusPercent() const;

  /// \return the expected cost of a memcpy, which could e.g. depend on the
  /// source/destination type and alignment and the number of bytes copied.
  InstructionCost getMemcpyCost(const Instruction *I) const;

  /// Returns the maximum memset / memcpy size in bytes that still makes it
  /// profitable to inline the call.
  uint64_t getMaxMemIntrinsicInlineSizeThreshold() const;

  /// \return The estimated number of case clusters when lowering \p 'SI'.
  /// \p JTSize Set a jump table size only when \p SI is suitable for a jump
  /// table.
  unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI,
                                            unsigned &JTSize,
                                            ProfileSummaryInfo *PSI,
                                            BlockFrequencyInfo *BFI) const;

  /// Estimate the cost of a given IR user when lowered.
  ///
  /// This can estimate the cost of either a ConstantExpr or Instruction when
  /// lowered.
  ///
  /// \p Operands is a list of operands which can be a result of transformations
  /// of the current operands. The number of the operands on the list must equal
  /// to the number of the current operands the IR user has. Their order on the
  /// list must be the same as the order of the current operands the IR user
  /// has.
  ///
  /// The returned cost is defined in terms of \c TargetCostConstants, see its
  /// comments for a detailed explanation of the cost values.
  InstructionCost getInstructionCost(const User *U,
                                     ArrayRef<const Value *> Operands,
                                     TargetCostKind CostKind) const;

  /// This is a helper function which calls the three-argument
  /// getInstructionCost with \p Operands which are the current operands U has.
  InstructionCost getInstructionCost(const User *U,
                                     TargetCostKind CostKind) const {
    SmallVector<const Value *, 4> Operands(U->operand_values());
    return getInstructionCost(U, Operands, CostKind);
  }

  /// If a branch or a select condition is skewed in one direction by more than
  /// this factor, it is very likely to be predicted correctly.
  BranchProbability getPredictableBranchThreshold() const;

  /// Return true if branch divergence exists.
  ///
  /// Branch divergence has a significantly negative impact on GPU performance
  /// when threads in the same wavefront take different paths due to conditional
  /// branches.
  ///
  /// If \p F is passed, provides a context function. If \p F is known to only
  /// execute in a single threaded environment, the target may choose to skip
  /// uniformity analysis and assume all values are uniform.
  bool hasBranchDivergence(const Function *F = nullptr) const;

  /// Returns whether V is a source of divergence.
  ///
  /// This function provides the target-dependent information for
  /// the target-independent UniformityAnalysis.
  bool isSourceOfDivergence(const Value *V) const;

  // Returns true for the target specific
  // set of operations which produce uniform result
  // even taking non-uniform arguments
  bool isAlwaysUniform(const Value *V) const;

  /// Query the target whether the specified address space cast from FromAS to
  /// ToAS is valid.
  bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const;

  /// Return false if a \p AS0 address cannot possibly alias a \p AS1 address.
  bool addrspacesMayAlias(unsigned AS0, unsigned AS1) const;

  /// Returns the address space ID for a target's 'flat' address space. Note
  /// this is not necessarily the same as addrspace(0), which LLVM sometimes
  /// refers to as the generic address space. The flat address space is a
  /// generic address space that can be used access multiple segments of memory
  /// with different address spaces. Access of a memory location through a
  /// pointer with this address space is expected to be legal but slower
  /// compared to the same memory location accessed through a pointer with a
  /// different address space.
  //
  /// This is for targets with different pointer representations which can
  /// be converted with the addrspacecast instruction. If a pointer is converted
  /// to this address space, optimizations should attempt to replace the access
  /// with the source address space.
  ///
  /// \returns ~0u if the target does not have such a flat address space to
  /// optimize away.
  unsigned getFlatAddressSpace() const;

  /// Return any intrinsic address operand indexes which may be rewritten if
  /// they use a flat address space pointer.
  ///
  /// \returns true if the intrinsic was handled.
  bool collectFlatAddressOperands(SmallVectorImpl<int> &OpIndexes,
                                  Intrinsic::ID IID) const;

  bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const;

  /// Return true if globals in this address space can have initializers other
  /// than `undef`.
  bool canHaveNonUndefGlobalInitializerInAddressSpace(unsigned AS) const;

  unsigned getAssumedAddrSpace(const Value *V) const;

  bool isSingleThreaded() const;

  std::pair<const Value *, unsigned>
  getPredicatedAddrSpace(const Value *V) const;

  /// Rewrite intrinsic call \p II such that \p OldV will be replaced with \p
  /// NewV, which has a different address space. This should happen for every
  /// operand index that collectFlatAddressOperands returned for the intrinsic.
  /// \returns nullptr if the intrinsic was not handled. Otherwise, returns the
  /// new value (which may be the original \p II with modified operands).
  Value *rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV,
                                          Value *NewV) const;

  /// Test whether calls to a function lower to actual program function
  /// calls.
  ///
  /// The idea is to test whether the program is likely to require a 'call'
  /// instruction or equivalent in order to call the given function.
  ///
  /// FIXME: It's not clear that this is a good or useful query API. Client's
  /// should probably move to simpler cost metrics using the above.
  /// Alternatively, we could split the cost interface into distinct code-size
  /// and execution-speed costs. This would allow modelling the core of this
  /// query more accurately as a call is a single small instruction, but
  /// incurs significant execution cost.
  bool isLoweredToCall(const Function *F) const;

  struct LSRCost {
    /// TODO: Some of these could be merged. Also, a lexical ordering
    /// isn't always optimal.
    unsigned Insns;
    unsigned NumRegs;
    unsigned AddRecCost;
    unsigned NumIVMuls;
    unsigned NumBaseAdds;
    unsigned ImmCost;
    unsigned SetupCost;
    unsigned ScaleCost;
  };

  /// Parameters that control the generic loop unrolling transformation.
  struct UnrollingPreferences {
    /// The cost threshold for the unrolled loop. Should be relative to the
    /// getInstructionCost values returned by this API, and the expectation is
    /// that the unrolled loop's instructions when run through that interface
    /// should not exceed this cost. However, this is only an estimate. Also,
    /// specific loops may be unrolled even with a cost above this threshold if
    /// deemed profitable. Set this to UINT_MAX to disable the loop body cost
    /// restriction.
    unsigned Threshold;
    /// If complete unrolling will reduce the cost of the loop, we will boost
    /// the Threshold by a certain percent to allow more aggressive complete
    /// unrolling. This value provides the maximum boost percentage that we
    /// can apply to Threshold (The value should be no less than 100).
    /// BoostedThreshold = Threshold * min(RolledCost / UnrolledCost,
    ///                                    MaxPercentThresholdBoost / 100)
    /// E.g. if complete unrolling reduces the loop execution time by 50%
    /// then we boost the threshold by the factor of 2x. If unrolling is not
    /// expected to reduce the running time, then we do not increase the
    /// threshold.
    unsigned MaxPercentThresholdBoost;
    /// The cost threshold for the unrolled loop when optimizing for size (set
    /// to UINT_MAX to disable).
    unsigned OptSizeThreshold;
    /// The cost threshold for the unrolled loop, like Threshold, but used
    /// for partial/runtime unrolling (set to UINT_MAX to disable).
    unsigned PartialThreshold;
    /// The cost threshold for the unrolled loop when optimizing for size, like
    /// OptSizeThreshold, but used for partial/runtime unrolling (set to
    /// UINT_MAX to disable).
    unsigned PartialOptSizeThreshold;
    /// A forced unrolling factor (the number of concatenated bodies of the
    /// original loop in the unrolled loop body). When set to 0, the unrolling
    /// transformation will select an unrolling factor based on the current cost
    /// threshold and other factors.
    unsigned Count;
    /// Default unroll count for loops with run-time trip count.
    unsigned DefaultUnrollRuntimeCount;
    // Set the maximum unrolling factor. The unrolling factor may be selected
    // using the appropriate cost threshold, but may not exceed this number
    // (set to UINT_MAX to disable). This does not apply in cases where the
    // loop is being fully unrolled.
    unsigned MaxCount;
    /// Set the maximum unrolling factor for full unrolling. Like MaxCount, but
    /// applies even if full unrolling is selected. This allows a target to fall
    /// back to Partial unrolling if full unrolling is above FullUnrollMaxCount.
    unsigned FullUnrollMaxCount;
    // Represents number of instructions optimized when "back edge"
    // becomes "fall through" in unrolled loop.
    // For now we count a conditional branch on a backedge and a comparison
    // feeding it.
    unsigned BEInsns;
    /// Allow partial unrolling (unrolling of loops to expand the size of the
    /// loop body, not only to eliminate small constant-trip-count loops).
    bool Partial;
    /// Allow runtime unrolling (unrolling of loops to expand the size of the
    /// loop body even when the number of loop iterations is not known at
    /// compile time).
    bool Runtime;
    /// Allow generation of a loop remainder (extra iterations after unroll).
    bool AllowRemainder;
    /// Allow emitting expensive instructions (such as divisions) when computing
    /// the trip count of a loop for runtime unrolling.
    bool AllowExpensiveTripCount;
    /// Apply loop unroll on any kind of loop
    /// (mainly to loops that fail runtime unrolling).
    bool Force;
    /// Allow using trip count upper bound to unroll loops.
    bool UpperBound;
    /// Allow unrolling of all the iterations of the runtime loop remainder.
    bool UnrollRemainder;
    /// Allow unroll and jam. Used to enable unroll and jam for the target.
    bool UnrollAndJam;
    /// Threshold for unroll and jam, for inner loop size. The 'Threshold'
    /// value above is used during unroll and jam for the outer loop size.
    /// This value is used in the same manner to limit the size of the inner
    /// loop.
    unsigned UnrollAndJamInnerLoopThreshold;
    /// Don't allow loop unrolling to simulate more than this number of
    /// iterations when checking full unroll profitability
    unsigned MaxIterationsCountToAnalyze;
    /// Don't disable runtime unroll for the loops which were vectorized.
    bool UnrollVectorizedLoop = false;
  };

  /// Get target-customized preferences for the generic loop unrolling
  /// transformation. The caller will initialize UP with the current
  /// target-independent defaults.
  void getUnrollingPreferences(Loop *L, ScalarEvolution &,
                               UnrollingPreferences &UP,
                               OptimizationRemarkEmitter *ORE) const;

  /// Query the target whether it would be profitable to convert the given loop
  /// into a hardware loop.
  bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
                                AssumptionCache &AC, TargetLibraryInfo *LibInfo,
                                HardwareLoopInfo &HWLoopInfo) const;

  /// Query the target whether it would be prefered to create a predicated
  /// vector loop, which can avoid the need to emit a scalar epilogue loop.
  bool preferPredicateOverEpilogue(TailFoldingInfo *TFI) const;

  /// Query the target what the preferred style of tail folding is.
  /// \param IVUpdateMayOverflow Tells whether it is known if the IV update
  /// may (or will never) overflow for the suggested VF/UF in the given loop.
  /// Targets can use this information to select a more optimal tail folding
  /// style. The value conservatively defaults to true, such that no assumptions
  /// are made on overflow.
  TailFoldingStyle
  getPreferredTailFoldingStyle(bool IVUpdateMayOverflow = true) const;

  // Parameters that control the loop peeling transformation
  struct PeelingPreferences {
    /// A forced peeling factor (the number of bodied of the original loop
    /// that should be peeled off before the loop body). When set to 0, the
    /// a peeling factor based on profile information and other factors.
    unsigned PeelCount;
    /// Allow peeling off loop iterations.
    bool AllowPeeling;
    /// Allow peeling off loop iterations for loop nests.
    bool AllowLoopNestsPeeling;
    /// Allow peeling basing on profile. Uses to enable peeling off all
    /// iterations basing on provided profile.
    /// If the value is true the peeling cost model can decide to peel only
    /// some iterations and in this case it will set this to false.
    bool PeelProfiledIterations;
  };

  /// Get target-customized preferences for the generic loop peeling
  /// transformation. The caller will initialize \p PP with the current
  /// target-independent defaults with information from \p L and \p SE.
  void getPeelingPreferences(Loop *L, ScalarEvolution &SE,
                             PeelingPreferences &PP) const;

  /// Targets can implement their own combinations for target-specific
  /// intrinsics. This function will be called from the InstCombine pass every
  /// time a target-specific intrinsic is encountered.
  ///
  /// \returns std::nullopt to not do anything target specific or a value that
  /// will be returned from the InstCombiner. It is possible to return null and
  /// stop further processing of the intrinsic by returning nullptr.
  std::optional<Instruction *> instCombineIntrinsic(InstCombiner & IC,
                                                    IntrinsicInst & II) const;
  /// Can be used to implement target-specific instruction combining.
  /// \see instCombineIntrinsic
  std::optional<Value *> simplifyDemandedUseBitsIntrinsic(
      InstCombiner & IC, IntrinsicInst & II, APInt DemandedMask,
      KnownBits & Known, bool &KnownBitsComputed) const;
  /// Can be used to implement target-specific instruction combining.
  /// \see instCombineIntrinsic
  std::optional<Value *> simplifyDemandedVectorEltsIntrinsic(
      InstCombiner & IC, IntrinsicInst & II, APInt DemandedElts,
      APInt & UndefElts, APInt & UndefElts2, APInt & UndefElts3,
      std::function<void(Instruction *, unsigned, APInt, APInt &)>
          SimplifyAndSetOp) const;
  /// @}

  /// \name Scalar Target Information
  /// @{

  /// Flags indicating the kind of support for population count.
  ///
  /// Compared to the SW implementation, HW support is supposed to
  /// significantly boost the performance when the population is dense, and it
  /// may or may not degrade performance if the population is sparse. A HW
  /// support is considered as "Fast" if it can outperform, or is on a par
  /// with, SW implementation when the population is sparse; otherwise, it is
  /// considered as "Slow".
  enum PopcntSupportKind { PSK_Software, PSK_SlowHardware, PSK_FastHardware };

  /// Return true if the specified immediate is legal add immediate, that
  /// is the target has add instructions which can add a register with the
  /// immediate without having to materialize the immediate into a register.
  bool isLegalAddImmediate(int64_t Imm) const;

  /// Return true if the specified immediate is legal icmp immediate,
  /// that is the target has icmp instructions which can compare a register
  /// against the immediate without having to materialize the immediate into a
  /// register.
  bool isLegalICmpImmediate(int64_t Imm) const;

  /// Return true if the addressing mode represented by AM is legal for
  /// this target, for a load/store of the specified type.
  /// The type may be VoidTy, in which case only return true if the addressing
  /// mode is legal for a load/store of any legal type.
  /// If target returns true in LSRWithInstrQueries(), I may be valid.
  /// TODO: Handle pre/postinc as well.
  bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
                             bool HasBaseReg, int64_t Scale,
                             unsigned AddrSpace = 0,
                             Instruction *I = nullptr) const;

  /// Return true if LSR cost of C1 is lower than C2.
  bool isLSRCostLess(const TargetTransformInfo::LSRCost &C1,
                     const TargetTransformInfo::LSRCost &C2) const;

  /// Return true if LSR major cost is number of registers. Targets which
  /// implement their own isLSRCostLess and unset number of registers as major
  /// cost should return false, otherwise return true.
  bool isNumRegsMajorCostOfLSR() const;

  /// \returns true if LSR should not optimize a chain that includes \p I.
  bool isProfitableLSRChainElement(Instruction *I) const;

  /// Return true if the target can fuse a compare and branch.
  /// Loop-strength-reduction (LSR) uses that knowledge to adjust its cost
  /// calculation for the instructions in a loop.
  bool canMacroFuseCmp() const;

  /// Return true if the target can save a compare for loop count, for example
  /// hardware loop saves a compare.
  bool canSaveCmp(Loop *L, BranchInst **BI, ScalarEvolution *SE, LoopInfo *LI,
                  DominatorTree *DT, AssumptionCache *AC,
                  TargetLibraryInfo *LibInfo) const;

  enum AddressingModeKind {
    AMK_PreIndexed,
    AMK_PostIndexed,
    AMK_None
  };

  /// Return the preferred addressing mode LSR should make efforts to generate.
  AddressingModeKind getPreferredAddressingMode(const Loop *L,
                                                ScalarEvolution *SE) const;

  /// Return true if the target supports masked store.
  bool isLegalMaskedStore(Type *DataType, Align Alignment) const;
  /// Return true if the target supports masked load.
  bool isLegalMaskedLoad(Type *DataType, Align Alignment) const;

  /// Return true if the target supports nontemporal store.
  bool isLegalNTStore(Type *DataType, Align Alignment) const;
  /// Return true if the target supports nontemporal load.
  bool isLegalNTLoad(Type *DataType, Align Alignment) const;

  /// \Returns true if the target supports broadcasting a load to a vector of
  /// type <NumElements x ElementTy>.
  bool isLegalBroadcastLoad(Type *ElementTy, ElementCount NumElements) const;

  /// Return true if the target supports masked scatter.
  bool isLegalMaskedScatter(Type *DataType, Align Alignment) const;
  /// Return true if the target supports masked gather.
  bool isLegalMaskedGather(Type *DataType, Align Alignment) const;
  /// Return true if the target forces scalarizing of llvm.masked.gather
  /// intrinsics.
  bool forceScalarizeMaskedGather(VectorType *Type, Align Alignment) const;
  /// Return true if the target forces scalarizing of llvm.masked.scatter
  /// intrinsics.
  bool forceScalarizeMaskedScatter(VectorType *Type, Align Alignment) const;

  /// Return true if the target supports masked compress store.
  bool isLegalMaskedCompressStore(Type *DataType) const;
  /// Return true if the target supports masked expand load.
  bool isLegalMaskedExpandLoad(Type *DataType) const;

  /// Return true if this is an alternating opcode pattern that can be lowered
  /// to a single instruction on the target. In X86 this is for the addsub
  /// instruction which corrsponds to a Shuffle + Fadd + FSub pattern in IR.
  /// This function expectes two opcodes: \p Opcode1 and \p Opcode2 being
  /// selected by \p OpcodeMask. The mask contains one bit per lane and is a `0`
  /// when \p Opcode0 is selected and `1` when Opcode1 is selected.
  /// \p VecTy is the vector type of the instruction to be generated.
  bool isLegalAltInstr(VectorType *VecTy, unsigned Opcode0, unsigned Opcode1,
                       const SmallBitVector &OpcodeMask) const;

  /// Return true if we should be enabling ordered reductions for the target.
  bool enableOrderedReductions() const;

  /// Return true if the target has a unified operation to calculate division
  /// and remainder. If so, the additional implicit multiplication and
  /// subtraction required to calculate a remainder from division are free. This
  /// can enable more aggressive transformations for division and remainder than
  /// would typically be allowed using throughput or size cost models.
  bool hasDivRemOp(Type *DataType, bool IsSigned) const;

  /// Return true if the given instruction (assumed to be a memory access
  /// instruction) has a volatile variant. If that's the case then we can avoid
  /// addrspacecast to generic AS for volatile loads/stores. Default
  /// implementation returns false, which prevents address space inference for
  /// volatile loads/stores.
  bool hasVolatileVariant(Instruction *I, unsigned AddrSpace) const;

  /// Return true if target doesn't mind addresses in vectors.
  bool prefersVectorizedAddressing() const;

  /// Return the cost of the scaling factor used in the addressing
  /// mode represented by AM for this target, for a load/store
  /// of the specified type.
  /// If the AM is supported, the return value must be >= 0.
  /// If the AM is not supported, it returns a negative value.
  /// TODO: Handle pre/postinc as well.
  InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
                                       int64_t BaseOffset, bool HasBaseReg,
                                       int64_t Scale,
                                       unsigned AddrSpace = 0) const;

  /// Return true if the loop strength reduce pass should make
  /// Instruction* based TTI queries to isLegalAddressingMode(). This is
  /// needed on SystemZ, where e.g. a memcpy can only have a 12 bit unsigned
  /// immediate offset and no index register.
  bool LSRWithInstrQueries() const;

  /// Return true if it's free to truncate a value of type Ty1 to type
  /// Ty2. e.g. On x86 it's free to truncate a i32 value in register EAX to i16
  /// by referencing its sub-register AX.
  bool isTruncateFree(Type *Ty1, Type *Ty2) const;

  /// Return true if it is profitable to hoist instruction in the
  /// then/else to before if.
  bool isProfitableToHoist(Instruction *I) const;

  bool useAA() const;

  /// Return true if this type is legal.
  bool isTypeLegal(Type *Ty) const;

  /// Returns the estimated number of registers required to represent \p Ty.
  unsigned getRegUsageForType(Type *Ty) const;

  /// Return true if switches should be turned into lookup tables for the
  /// target.
  bool shouldBuildLookupTables() const;

  /// Return true if switches should be turned into lookup tables
  /// containing this constant value for the target.
  bool shouldBuildLookupTablesForConstant(Constant *C) const;

  /// Return true if lookup tables should be turned into relative lookup tables.
  bool shouldBuildRelLookupTables() const;

  /// Return true if the input function which is cold at all call sites,
  ///  should use coldcc calling convention.
  bool useColdCCForColdCall(Function &F) const;

  /// Estimate the overhead of scalarizing an instruction. Insert and Extract
  /// are set if the demanded result elements need to be inserted and/or
  /// extracted from vectors.
  InstructionCost getScalarizationOverhead(VectorType *Ty,
                                           const APInt &DemandedElts,
                                           bool Insert, bool Extract,
                                           TTI::TargetCostKind CostKind) const;

  /// Estimate the overhead of scalarizing an instructions unique
  /// non-constant operands. The (potentially vector) types to use for each of
  /// argument are passes via Tys.
  InstructionCost
  getOperandsScalarizationOverhead(ArrayRef<const Value *> Args,
                                   ArrayRef<Type *> Tys,
                                   TTI::TargetCostKind CostKind) const;

  /// If target has efficient vector element load/store instructions, it can
  /// return true here so that insertion/extraction costs are not added to
  /// the scalarization cost of a load/store.
  bool supportsEfficientVectorElementLoadStore() const;

  /// If the target supports tail calls.
  bool supportsTailCalls() const;

  /// If target supports tail call on \p CB
  bool supportsTailCallFor(const CallBase *CB) const;

  /// Don't restrict interleaved unrolling to small loops.
  bool enableAggressiveInterleaving(bool LoopHasReductions) const;

  /// Returns options for expansion of memcmp. IsZeroCmp is
  // true if this is the expansion of memcmp(p1, p2, s) == 0.
  struct MemCmpExpansionOptions {
    // Return true if memcmp expansion is enabled.
    operator bool() const { return MaxNumLoads > 0; }

    // Maximum number of load operations.
    unsigned MaxNumLoads = 0;

    // The list of available load sizes (in bytes), sorted in decreasing order.
    SmallVector<unsigned, 8> LoadSizes;

    // For memcmp expansion when the memcmp result is only compared equal or
    // not-equal to 0, allow up to this number of load pairs per block. As an
    // example, this may allow 'memcmp(a, b, 3) == 0' in a single block:
    //   a0 = load2bytes &a[0]
    //   b0 = load2bytes &b[0]
    //   a2 = load1byte  &a[2]
    //   b2 = load1byte  &b[2]
    //   r  = cmp eq (a0 ^ b0 | a2 ^ b2), 0
    unsigned NumLoadsPerBlock = 1;

    // Set to true to allow overlapping loads. For example, 7-byte compares can
    // be done with two 4-byte compares instead of 4+2+1-byte compares. This
    // requires all loads in LoadSizes to be doable in an unaligned way.
    bool AllowOverlappingLoads = false;
  };
  MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize,
                                               bool IsZeroCmp) const;

  /// Should the Select Optimization pass be enabled and ran.
  bool enableSelectOptimize() const;

  /// Enable matching of interleaved access groups.
  bool enableInterleavedAccessVectorization() const;

  /// Enable matching of interleaved access groups that contain predicated
  /// accesses or gaps and therefore vectorized using masked
  /// vector loads/stores.
  bool enableMaskedInterleavedAccessVectorization() const;

  /// Indicate that it is potentially unsafe to automatically vectorize
  /// floating-point operations because the semantics of vector and scalar
  /// floating-point semantics may differ. For example, ARM NEON v7 SIMD math
  /// does not support IEEE-754 denormal numbers, while depending on the
  /// platform, scalar floating-point math does.
  /// This applies to floating-point math operations and calls, not memory
  /// operations, shuffles, or casts.
  bool isFPVectorizationPotentiallyUnsafe() const;

  /// Determine if the target supports unaligned memory accesses.
  bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth,
                                      unsigned AddressSpace = 0,
                                      Align Alignment = Align(1),
                                      unsigned *Fast = nullptr) const;

  /// Return hardware support for population count.
  PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) const;

  /// Return true if the hardware has a fast square-root instruction.
  bool haveFastSqrt(Type *Ty) const;

  /// Return true if the cost of the instruction is too high to speculatively
  /// execute and should be kept behind a branch.
  /// This normally just wraps around a getInstructionCost() call, but some
  /// targets might report a low TCK_SizeAndLatency value that is incompatible
  /// with the fixed TCC_Expensive value.
  /// NOTE: This assumes the instruction passes isSafeToSpeculativelyExecute().
  bool isExpensiveToSpeculativelyExecute(const Instruction *I) const;

  /// Return true if it is faster to check if a floating-point value is NaN
  /// (or not-NaN) versus a comparison against a constant FP zero value.
  /// Targets should override this if materializing a 0.0 for comparison is
  /// generally as cheap as checking for ordered/unordered.
  bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) const;

  /// Return the expected cost of supporting the floating point operation
  /// of the specified type.
  InstructionCost getFPOpCost(Type *Ty) const;

  /// Return the expected cost of materializing for the given integer
  /// immediate of the specified type.
  InstructionCost getIntImmCost(const APInt &Imm, Type *Ty,
                                TargetCostKind CostKind) const;

  /// Return the expected cost of materialization for the given integer
  /// immediate of the specified type for a given instruction. The cost can be
  /// zero if the immediate can be folded into the specified instruction.
  InstructionCost getIntImmCostInst(unsigned Opc, unsigned Idx,
                                    const APInt &Imm, Type *Ty,
                                    TargetCostKind CostKind,
                                    Instruction *Inst = nullptr) const;
  InstructionCost getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
                                      const APInt &Imm, Type *Ty,
                                      TargetCostKind CostKind) const;

  /// Return the expected cost for the given integer when optimising
  /// for size. This is different than the other integer immediate cost
  /// functions in that it is subtarget agnostic. This is useful when you e.g.
  /// target one ISA such as Aarch32 but smaller encodings could be possible
  /// with another such as Thumb. This return value is used as a penalty when
  /// the total costs for a constant is calculated (the bigger the cost, the
  /// more beneficial constant hoisting is).
  InstructionCost getIntImmCodeSizeCost(unsigned Opc, unsigned Idx,
                                        const APInt &Imm, Type *Ty) const;
  /// @}

  /// \name Vector Target Information
  /// @{

  /// The various kinds of shuffle patterns for vector queries.
  enum ShuffleKind {
    SK_Broadcast,        ///< Broadcast element 0 to all other elements.
    SK_Reverse,          ///< Reverse the order of the vector.
    SK_Select,           ///< Selects elements from the corresponding lane of
                         ///< either source operand. This is equivalent to a
                         ///< vector select with a constant condition operand.
    SK_Transpose,        ///< Transpose two vectors.
    SK_InsertSubvector,  ///< InsertSubvector. Index indicates start offset.
    SK_ExtractSubvector, ///< ExtractSubvector Index indicates start offset.
    SK_PermuteTwoSrc,    ///< Merge elements from two source vectors into one
                         ///< with any shuffle mask.
    SK_PermuteSingleSrc, ///< Shuffle elements of single source vector with any
                         ///< shuffle mask.
    SK_Splice            ///< Concatenates elements from the first input vector
                         ///< with elements of the second input vector. Returning
                         ///< a vector of the same type as the input vectors.
                         ///< Index indicates start offset in first input vector.
  };

  /// Additional information about an operand's possible values.
  enum OperandValueKind {
    OK_AnyValue,               // Operand can have any value.
    OK_UniformValue,           // Operand is uniform (splat of a value).
    OK_UniformConstantValue,   // Operand is uniform constant.
    OK_NonUniformConstantValue // Operand is a non uniform constant value.
  };

  /// Additional properties of an operand's values.
  enum OperandValueProperties {
    OP_None = 0,
    OP_PowerOf2 = 1,
    OP_NegatedPowerOf2 = 2,
  };

  // Describe the values an operand can take.  We're in the process
  // of migrating uses of OperandValueKind and OperandValueProperties
  // to use this class, and then will change the internal representation.
  struct OperandValueInfo {
    OperandValueKind Kind = OK_AnyValue;
    OperandValueProperties Properties = OP_None;

    bool isConstant() const {
      return Kind == OK_UniformConstantValue || Kind == OK_NonUniformConstantValue;
    }
    bool isUniform() const {
      return Kind == OK_UniformConstantValue || Kind == OK_UniformValue;
    }
    bool isPowerOf2() const {
      return Properties == OP_PowerOf2;
    }
    bool isNegatedPowerOf2() const {
      return Properties == OP_NegatedPowerOf2;
    }

    OperandValueInfo getNoProps() const {
      return {Kind, OP_None};
    }
  };

  /// \return the number of registers in the target-provided register class.
  unsigned getNumberOfRegisters(unsigned ClassID) const;

  /// \return the target-provided register class ID for the provided type,
  /// accounting for type promotion and other type-legalization techniques that
  /// the target might apply. However, it specifically does not account for the
  /// scalarization or splitting of vector types. Should a vector type require
  /// scalarization or splitting into multiple underlying vector registers, that
  /// type should be mapped to a register class containing no registers.
  /// Specifically, this is designed to provide a simple, high-level view of the
  /// register allocation later performed by the backend. These register classes
  /// don't necessarily map onto the register classes used by the backend.
  /// FIXME: It's not currently possible to determine how many registers
  /// are used by the provided type.
  unsigned getRegisterClassForType(bool Vector, Type *Ty = nullptr) const;

  /// \return the target-provided register class name
  const char *getRegisterClassName(unsigned ClassID) const;

  enum RegisterKind { RGK_Scalar, RGK_FixedWidthVector, RGK_ScalableVector };

  /// \return The width of the largest scalar or vector register type.
  TypeSize getRegisterBitWidth(RegisterKind K) const;

  /// \return The width of the smallest vector register type.
  unsigned getMinVectorRegisterBitWidth() const;

  /// \return The maximum value of vscale if the target specifies an
  ///  architectural maximum vector length, and std::nullopt otherwise.
  std::optional<unsigned> getMaxVScale() const;

  /// \return the value of vscale to tune the cost model for.
  std::optional<unsigned> getVScaleForTuning() const;

  /// \return true if vscale is known to be a power of 2
  bool isVScaleKnownToBeAPowerOfTwo() const;

  /// \return True if the vectorization factor should be chosen to
  /// make the vector of the smallest element type match the size of a
  /// vector register. For wider element types, this could result in
  /// creating vectors that span multiple vector registers.
  /// If false, the vectorization factor will be chosen based on the
  /// size of the widest element type.
  /// \p K Register Kind for vectorization.
  bool shouldMaximizeVectorBandwidth(TargetTransformInfo::RegisterKind K) const;

  /// \return The minimum vectorization factor for types of given element
  /// bit width, or 0 if there is no minimum VF. The returned value only
  /// applies when shouldMaximizeVectorBandwidth returns true.
  /// If IsScalable is true, the returned ElementCount must be a scalable VF.
  ElementCount getMinimumVF(unsigned ElemWidth, bool IsScalable) const;

  /// \return The maximum vectorization factor for types of given element
  /// bit width and opcode, or 0 if there is no maximum VF.
  /// Currently only used by the SLP vectorizer.
  unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const;

  /// \return The minimum vectorization factor for the store instruction. Given
  /// the initial estimation of the minimum vector factor and store value type,
  /// it tries to find possible lowest VF, which still might be profitable for
  /// the vectorization.
  /// \param VF Initial estimation of the minimum vector factor.
  /// \param ScalarMemTy Scalar memory type of the store operation.
  /// \param ScalarValTy Scalar type of the stored value.
  /// Currently only used by the SLP vectorizer.
  unsigned getStoreMinimumVF(unsigned VF, Type *ScalarMemTy,
                             Type *ScalarValTy) const;

  /// \return True if it should be considered for address type promotion.
  /// \p AllowPromotionWithoutCommonHeader Set true if promoting \p I is
  /// profitable without finding other extensions fed by the same input.
  bool shouldConsiderAddressTypePromotion(
      const Instruction &I, bool &AllowPromotionWithoutCommonHeader) const;

  /// \return The size of a cache line in bytes.
  unsigned getCacheLineSize() const;

  /// The possible cache levels
  enum class CacheLevel {
    L1D, // The L1 data cache
    L2D, // The L2 data cache

    // We currently do not model L3 caches, as their sizes differ widely between
    // microarchitectures. Also, we currently do not have a use for L3 cache
    // size modeling yet.
  };

  /// \return The size of the cache level in bytes, if available.
  std::optional<unsigned> getCacheSize(CacheLevel Level) const;

  /// \return The associativity of the cache level, if available.
  std::optional<unsigned> getCacheAssociativity(CacheLevel Level) const;

  /// \return How much before a load we should place the prefetch
  /// instruction.  This is currently measured in number of
  /// instructions.
  unsigned getPrefetchDistance() const;

  /// Some HW prefetchers can handle accesses up to a certain constant stride.
  /// Sometimes prefetching is beneficial even below the HW prefetcher limit,
  /// and the arguments provided are meant to serve as a basis for deciding this
  /// for a particular loop.
  ///
  /// \param NumMemAccesses        Number of memory accesses in the loop.
  /// \param NumStridedMemAccesses Number of the memory accesses that
  ///                              ScalarEvolution could find a known stride
  ///                              for.
  /// \param NumPrefetches         Number of software prefetches that will be
  ///                              emitted as determined by the addresses
  ///                              involved and the cache line size.
  /// \param HasCall               True if the loop contains a call.
  ///
  /// \return This is the minimum stride in bytes where it makes sense to start
  ///         adding SW prefetches. The default is 1, i.e. prefetch with any
  ///         stride.
  unsigned getMinPrefetchStride(unsigned NumMemAccesses,
                                unsigned NumStridedMemAccesses,
                                unsigned NumPrefetches, bool HasCall) const;

  /// \return The maximum number of iterations to prefetch ahead.  If
  /// the required number of iterations is more than this number, no
  /// prefetching is performed.
  unsigned getMaxPrefetchIterationsAhead() const;

  /// \return True if prefetching should also be done for writes.
  bool enableWritePrefetching() const;

  /// \return if target want to issue a prefetch in address space \p AS.
  bool shouldPrefetchAddressSpace(unsigned AS) const;

  /// \return The maximum interleave factor that any transform should try to
  /// perform for this target. This number depends on the level of parallelism
  /// and the number of execution units in the CPU.
  unsigned getMaxInterleaveFactor(ElementCount VF) const;

  /// Collect properties of V used in cost analysis, e.g. OP_PowerOf2.
  static OperandValueInfo getOperandInfo(const Value *V);

  /// This is an approximation of reciprocal throughput of a math/logic op.
  /// A higher cost indicates less expected throughput.
  /// From Agner Fog's guides, reciprocal throughput is "the average number of
  /// clock cycles per instruction when the instructions are not part of a
  /// limiting dependency chain."
  /// Therefore, costs should be scaled to account for multiple execution units
  /// on the target that can process this type of instruction. For example, if
  /// there are 5 scalar integer units and 2 vector integer units that can
  /// calculate an 'add' in a single cycle, this model should indicate that the
  /// cost of the vector add instruction is 2.5 times the cost of the scalar
  /// add instruction.
  /// \p Args is an optional argument which holds the instruction operands
  /// values so the TTI can analyze those values searching for special
  /// cases or optimizations based on those values.
  /// \p CxtI is the optional original context instruction, if one exists, to
  /// provide even more information.
  InstructionCost getArithmeticInstrCost(
      unsigned Opcode, Type *Ty,
      TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
      TTI::OperandValueInfo Opd1Info = {TTI::OK_AnyValue, TTI::OP_None},
      TTI::OperandValueInfo Opd2Info = {TTI::OK_AnyValue, TTI::OP_None},
      ArrayRef<const Value *> Args = ArrayRef<const Value *>(),
      const Instruction *CxtI = nullptr) const;

  /// \return The cost of a shuffle instruction of kind Kind and of type Tp.
  /// The exact mask may be passed as Mask, or else the array will be empty.
  /// The index and subtype parameters are used by the subvector insertion and
  /// extraction shuffle kinds to show the insert/extract point and the type of
  /// the subvector being inserted/extracted. The operands of the shuffle can be
  /// passed through \p Args, which helps improve the cost estimation in some
  /// cases, like in broadcast loads.
  /// NOTE: For subvector extractions Tp represents the source type.
  InstructionCost
  getShuffleCost(ShuffleKind Kind, VectorType *Tp,
                 ArrayRef<int> Mask = std::nullopt,
                 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
                 int Index = 0, VectorType *SubTp = nullptr,
                 ArrayRef<const Value *> Args = std::nullopt) const;

  /// Represents a hint about the context in which a cast is used.
  ///
  /// For zext/sext, the context of the cast is the operand, which must be a
  /// load of some kind. For trunc, the context is of the cast is the single
  /// user of the instruction, which must be a store of some kind.
  ///
  /// This enum allows the vectorizer to give getCastInstrCost an idea of the
  /// type of cast it's dealing with, as not every cast is equal. For instance,
  /// the zext of a load may be free, but the zext of an interleaving load can
  //// be (very) expensive!
  ///
  /// See \c getCastContextHint to compute a CastContextHint from a cast
  /// Instruction*. Callers can use it if they don't need to override the
  /// context and just want it to be calculated from the instruction.
  ///
  /// FIXME: This handles the types of load/store that the vectorizer can
  /// produce, which are the cases where the context instruction is most
  /// likely to be incorrect. There are other situations where that can happen
  /// too, which might be handled here but in the long run a more general
  /// solution of costing multiple instructions at the same times may be better.
  enum class CastContextHint : uint8_t {
    None,          ///< The cast is not used with a load/store of any kind.
    Normal,        ///< The cast is used with a normal load/store.
    Masked,        ///< The cast is used with a masked load/store.
    GatherScatter, ///< The cast is used with a gather/scatter.
    Interleave,    ///< The cast is used with an interleaved load/store.
    Reversed,      ///< The cast is used with a reversed load/store.
  };

  /// Calculates a CastContextHint from \p I.
  /// This should be used by callers of getCastInstrCost if they wish to
  /// determine the context from some instruction.
  /// \returns the CastContextHint for ZExt/SExt/Trunc, None if \p I is nullptr,
  /// or if it's another type of cast.
  static CastContextHint getCastContextHint(const Instruction *I);

  /// \return The expected cost of cast instructions, such as bitcast, trunc,
  /// zext, etc. If there is an existing instruction that holds Opcode, it
  /// may be passed in the 'I' parameter.
  InstructionCost
  getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
                   TTI::CastContextHint CCH,
                   TTI::TargetCostKind CostKind = TTI::TCK_SizeAndLatency,
                   const Instruction *I = nullptr) const;

  /// \return The expected cost of a sign- or zero-extended vector extract. Use
  /// Index = -1 to indicate that there is no information about the index value.
  InstructionCost getExtractWithExtendCost(unsigned Opcode, Type *Dst,
                                           VectorType *VecTy,
                                           unsigned Index) const;

  /// \return The expected cost of control-flow related instructions such as
  /// Phi, Ret, Br, Switch.
  InstructionCost
  getCFInstrCost(unsigned Opcode,
                 TTI::TargetCostKind CostKind = TTI::TCK_SizeAndLatency,
                 const Instruction *I = nullptr) const;

  /// \returns The expected cost of compare and select instructions. If there
  /// is an existing instruction that holds Opcode, it may be passed in the
  /// 'I' parameter. The \p VecPred parameter can be used to indicate the select
  /// is using a compare with the specified predicate as condition. When vector
  /// types are passed, \p VecPred must be used for all lanes.
  InstructionCost
  getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
                     CmpInst::Predicate VecPred,
                     TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
                     const Instruction *I = nullptr) const;

  /// \return The expected cost of vector Insert and Extract.
  /// Use -1 to indicate that there is no information on the index value.
  /// This is used when the instruction is not available; a typical use
  /// case is to provision the cost of vectorization/scalarization in
  /// vectorizer passes.
  InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val,
                                     TTI::TargetCostKind CostKind,
                                     unsigned Index = -1, Value *Op0 = nullptr,
                                     Value *Op1 = nullptr) const;

  /// \return The expected cost of vector Insert and Extract.
  /// This is used when instruction is available, and implementation
  /// asserts 'I' is not nullptr.
  ///
  /// A typical suitable use case is cost estimation when vector instruction
  /// exists (e.g., from basic blocks during transformation).
  InstructionCost getVectorInstrCost(const Instruction &I, Type *Val,
                                     TTI::TargetCostKind CostKind,
                                     unsigned Index = -1) const;

  /// \return The cost of replication shuffle of \p VF elements typed \p EltTy
  /// \p ReplicationFactor times.
  ///
  /// For example, the mask for \p ReplicationFactor=3 and \p VF=4 is:
  ///   <0,0,0,1,1,1,2,2,2,3,3,3>
  InstructionCost getReplicationShuffleCost(Type *EltTy, int ReplicationFactor,
                                            int VF,
                                            const APInt &DemandedDstElts,
                                            TTI::TargetCostKind CostKind);

  /// \return The cost of Load and Store instructions.
  InstructionCost
  getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment,
                  unsigned AddressSpace,
                  TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
                  OperandValueInfo OpdInfo = {OK_AnyValue, OP_None},
                  const Instruction *I = nullptr) const;

  /// \return The cost of VP Load and Store instructions.
  InstructionCost
  getVPMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment,
                    unsigned AddressSpace,
                    TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
                    const Instruction *I = nullptr) const;

  /// \return The cost of masked Load and Store instructions.
  InstructionCost getMaskedMemoryOpCost(
      unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace,
      TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput) const;

  /// \return The cost of Gather or Scatter operation
  /// \p Opcode - is a type of memory access Load or Store
  /// \p DataTy - a vector type of the data to be loaded or stored
  /// \p Ptr - pointer [or vector of pointers] - address[es] in memory
  /// \p VariableMask - true when the memory access is predicated with a mask
  ///                   that is not a compile-time constant
  /// \p Alignment - alignment of single element
  /// \p I - the optional original context instruction, if one exists, e.g. the
  ///        load/store to transform or the call to the gather/scatter intrinsic
  InstructionCost getGatherScatterOpCost(
      unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask,
      Align Alignment, TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
      const Instruction *I = nullptr) const;

  /// \return The cost of the interleaved memory operation.
  /// \p Opcode is the memory operation code
  /// \p VecTy is the vector type of the interleaved access.
  /// \p Factor is the interleave factor
  /// \p Indices is the indices for interleaved load members (as interleaved
  ///    load allows gaps)
  /// \p Alignment is the alignment of the memory operation
  /// \p AddressSpace is address space of the pointer.
  /// \p UseMaskForCond indicates if the memory access is predicated.
  /// \p UseMaskForGaps indicates if gaps should be masked.
  InstructionCost getInterleavedMemoryOpCost(
      unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
      Align Alignment, unsigned AddressSpace,
      TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
      bool UseMaskForCond = false, bool UseMaskForGaps = false) const;

  /// A helper function to determine the type of reduction algorithm used
  /// for a given \p Opcode and set of FastMathFlags \p FMF.
  static bool requiresOrderedReduction(std::optional<FastMathFlags> FMF) {
    return FMF && !(*FMF).allowReassoc();
  }

  /// Calculate the cost of vector reduction intrinsics.
  ///
  /// This is the cost of reducing the vector value of type \p Ty to a scalar
  /// value using the operation denoted by \p Opcode. The FastMathFlags
  /// parameter \p FMF indicates what type of reduction we are performing:
  ///   1. Tree-wise. This is the typical 'fast' reduction performed that
  ///   involves successively splitting a vector into half and doing the
  ///   operation on the pair of halves until you have a scalar value. For
  ///   example:
  ///     (v0, v1, v2, v3)
  ///     ((v0+v2), (v1+v3), undef, undef)
  ///     ((v0+v2+v1+v3), undef, undef, undef)
  ///   This is the default behaviour for integer operations, whereas for
  ///   floating point we only do this if \p FMF indicates that
  ///   reassociation is allowed.
  ///   2. Ordered. For a vector with N elements this involves performing N
  ///   operations in lane order, starting with an initial scalar value, i.e.
  ///     result = InitVal + v0
  ///     result = result + v1
  ///     result = result + v2
  ///     result = result + v3
  ///   This is only the case for FP operations and when reassociation is not
  ///   allowed.
  ///
  InstructionCost getArithmeticReductionCost(
      unsigned Opcode, VectorType *Ty, std::optional<FastMathFlags> FMF,
      TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput) const;

  InstructionCost getMinMaxReductionCost(
      Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF = FastMathFlags(),
      TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput) const;

  /// Calculate the cost of an extended reduction pattern, similar to
  /// getArithmeticReductionCost of an Add reduction with multiply and optional
  /// extensions. This is the cost of as:
  /// ResTy vecreduce.add(mul (A, B)).
  /// ResTy vecreduce.add(mul(ext(Ty A), ext(Ty B)).
  InstructionCost getMulAccReductionCost(
      bool IsUnsigned, Type *ResTy, VectorType *Ty,
      TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput) const;

  /// Calculate the cost of an extended reduction pattern, similar to
  /// getArithmeticReductionCost of a reduction with an extension.
  /// This is the cost of as:
  /// ResTy vecreduce.opcode(ext(Ty A)).
  InstructionCost getExtendedReductionCost(
      unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *Ty,
      FastMathFlags FMF,
      TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput) const;

  /// \returns The cost of Intrinsic instructions. Analyses the real arguments.
  /// Three cases are handled: 1. scalar instruction 2. vector instruction
  /// 3. scalar instruction which is to be vectorized.
  InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
                                        TTI::TargetCostKind CostKind) const;

  /// \returns The cost of Call instructions.
  InstructionCost getCallInstrCost(
      Function *F, Type *RetTy, ArrayRef<Type *> Tys,
      TTI::TargetCostKind CostKind = TTI::TCK_SizeAndLatency) const;

  /// \returns The number of pieces into which the provided type must be
  /// split during legalization. Zero is returned when the answer is unknown.
  unsigned getNumberOfParts(Type *Tp) const;

  /// \returns The cost of the address computation. For most targets this can be
  /// merged into the instruction indexing mode. Some targets might want to
  /// distinguish between address computation for memory operations on vector
  /// types and scalar types. Such targets should override this function.
  /// The 'SE' parameter holds pointer for the scalar evolution object which
  /// is used in order to get the Ptr step value in case of constant stride.
  /// The 'Ptr' parameter holds SCEV of the access pointer.
  InstructionCost getAddressComputationCost(Type *Ty,
                                            ScalarEvolution *SE = nullptr,
                                            const SCEV *Ptr = nullptr) const;

  /// \returns The cost, if any, of keeping values of the given types alive
  /// over a callsite.
  ///
  /// Some types may require the use of register classes that do not have
  /// any callee-saved registers, so would require a spill and fill.
  InstructionCost getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) const;

  /// \returns True if the intrinsic is a supported memory intrinsic.  Info
  /// will contain additional information - whether the intrinsic may write
  /// or read to memory, volatility and the pointer.  Info is undefined
  /// if false is returned.
  bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) const;

  /// \returns The maximum element size, in bytes, for an element
  /// unordered-atomic memory intrinsic.
  unsigned getAtomicMemIntrinsicMaxElementSize() const;

  /// \returns A value which is the result of the given memory intrinsic.  New
  /// instructions may be created to extract the result from the given intrinsic
  /// memory operation.  Returns nullptr if the target cannot create a result
  /// from the given intrinsic.
  Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
                                           Type *ExpectedType) const;

  /// \returns The type to use in a loop expansion of a memcpy call.
  Type *getMemcpyLoopLoweringType(
      LLVMContext &Context, Value *Length, unsigned SrcAddrSpace,
      unsigned DestAddrSpace, unsigned SrcAlign, unsigned DestAlign,
      std::optional<uint32_t> AtomicElementSize = std::nullopt) const;

  /// \param[out] OpsOut The operand types to copy RemainingBytes of memory.
  /// \param RemainingBytes The number of bytes to copy.
  ///
  /// Calculates the operand types to use when copying \p RemainingBytes of
  /// memory, where source and destination alignments are \p SrcAlign and
  /// \p DestAlign respectively.
  void getMemcpyLoopResidualLoweringType(
      SmallVectorImpl<Type *> &OpsOut, LLVMContext &Context,
      unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace,
      unsigned SrcAlign, unsigned DestAlign,
      std::optional<uint32_t> AtomicCpySize = std::nullopt) const;

  /// \returns True if the two functions have compatible attributes for inlining
  /// purposes.
  bool areInlineCompatible(const Function *Caller,
                           const Function *Callee) const;

  /// \returns True if the caller and callee agree on how \p Types will be
  /// passed to or returned from the callee.
  /// to the callee.
  /// \param Types List of types to check.
  bool areTypesABICompatible(const Function *Caller, const Function *Callee,
                             const ArrayRef<Type *> &Types) const;

  /// The type of load/store indexing.
  enum MemIndexedMode {
    MIM_Unindexed, ///< No indexing.
    MIM_PreInc,    ///< Pre-incrementing.
    MIM_PreDec,    ///< Pre-decrementing.
    MIM_PostInc,   ///< Post-incrementing.
    MIM_PostDec    ///< Post-decrementing.
  };

  /// \returns True if the specified indexed load for the given type is legal.
  bool isIndexedLoadLegal(enum MemIndexedMode Mode, Type *Ty) const;

  /// \returns True if the specified indexed store for the given type is legal.
  bool isIndexedStoreLegal(enum MemIndexedMode Mode, Type *Ty) const;

  /// \returns The bitwidth of the largest vector type that should be used to
  /// load/store in the given address space.
  unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const;

  /// \returns True if the load instruction is legal to vectorize.
  bool isLegalToVectorizeLoad(LoadInst *LI) const;

  /// \returns True if the store instruction is legal to vectorize.
  bool isLegalToVectorizeStore(StoreInst *SI) const;

  /// \returns True if it is legal to vectorize the given load chain.
  bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes, Align Alignment,
                                   unsigned AddrSpace) const;

  /// \returns True if it is legal to vectorize the given store chain.
  bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes, Align Alignment,
                                    unsigned AddrSpace) const;

  /// \returns True if it is legal to vectorize the given reduction kind.
  bool isLegalToVectorizeReduction(const RecurrenceDescriptor &RdxDesc,
                                   ElementCount VF) const;

  /// \returns True if the given type is supported for scalable vectors
  bool isElementTypeLegalForScalableVector(Type *Ty) const;

  /// \returns The new vector factor value if the target doesn't support \p
  /// SizeInBytes loads or has a better vector factor.
  unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize,
                               unsigned ChainSizeInBytes,
                               VectorType *VecTy) const;

  /// \returns The new vector factor value if the target doesn't support \p
  /// SizeInBytes stores or has a better vector factor.
  unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize,
                                unsigned ChainSizeInBytes,
                                VectorType *VecTy) const;

  /// Flags describing the kind of vector reduction.
  struct ReductionFlags {
    ReductionFlags() = default;
    bool IsMaxOp =
        false; ///< If the op a min/max kind, true if it's a max operation.
    bool IsSigned = false; ///< Whether the operation is a signed int reduction.
    bool NoNaN =
        false; ///< If op is an fp min/max, whether NaNs may be present.
  };

  /// \returns True if the target prefers reductions in loop.
  bool preferInLoopReduction(unsigned Opcode, Type *Ty,
                             ReductionFlags Flags) const;

  /// \returns True if the target prefers reductions select kept in the loop
  /// when tail folding. i.e.
  /// loop:
  ///   p = phi (0, s)
  ///   a = add (p, x)
  ///   s = select (mask, a, p)
  /// vecreduce.add(s)
  ///
  /// As opposed to the normal scheme of p = phi (0, a) which allows the select
  /// to be pulled out of the loop. If the select(.., add, ..) can be predicated
  /// by the target, this can lead to cleaner code generation.
  bool preferPredicatedReductionSelect(unsigned Opcode, Type *Ty,
                                       ReductionFlags Flags) const;

  /// Return true if the loop vectorizer should consider vectorizing an
  /// otherwise scalar epilogue loop.
  bool preferEpilogueVectorization() const;

  /// \returns True if the target wants to expand the given reduction intrinsic
  /// into a shuffle sequence.
  bool shouldExpandReduction(const IntrinsicInst *II) const;

  /// \returns the size cost of rematerializing a GlobalValue address relative
  /// to a stack reload.
  unsigned getGISelRematGlobalCost() const;

  /// \returns the lower bound of a trip count to decide on vectorization
  /// while tail-folding.
  unsigned getMinTripCountTailFoldingThreshold() const;

  /// \returns True if the target supports scalable vectors.
  bool supportsScalableVectors() const;

  /// \return true when scalable vectorization is preferred.
  bool enableScalableVectorization() const;

  /// \name Vector Predication Information
  /// @{
  /// Whether the target supports the %evl parameter of VP intrinsic efficiently
  /// in hardware, for the given opcode and type/alignment. (see LLVM Language
  /// Reference - "Vector Predication Intrinsics").
  /// Use of %evl is discouraged when that is not the case.
  bool hasActiveVectorLength(unsigned Opcode, Type *DataType,
                             Align Alignment) const;

  struct VPLegalization {
    enum VPTransform {
      // keep the predicating parameter
      Legal = 0,
      // where legal, discard the predicate parameter
      Discard = 1,
      // transform into something else that is also predicating
      Convert = 2
    };

    // How to transform the EVL parameter.
    // Legal:   keep the EVL parameter as it is.
    // Discard: Ignore the EVL parameter where it is safe to do so.
    // Convert: Fold the EVL into the mask parameter.
    VPTransform EVLParamStrategy;

    // How to transform the operator.
    // Legal:   The target supports this operator.
    // Convert: Convert this to a non-VP operation.
    // The 'Discard' strategy is invalid.
    VPTransform OpStrategy;

    bool shouldDoNothing() const {
      return (EVLParamStrategy == Legal) && (OpStrategy == Legal);
    }
    VPLegalization(VPTransform EVLParamStrategy, VPTransform OpStrategy)
        : EVLParamStrategy(EVLParamStrategy), OpStrategy(OpStrategy) {}
  };

  /// \returns How the target needs this vector-predicated operation to be
  /// transformed.
  VPLegalization getVPLegalizationStrategy(const VPIntrinsic &PI) const;
  /// @}

  /// \returns Whether a 32-bit branch instruction is available in Arm or Thumb
  /// state.
  ///
  /// Used by the LowerTypeTests pass, which constructs an IR inline assembler
  /// node containing a jump table in a format suitable for the target, so it
  /// needs to know what format of jump table it can legally use.
  ///
  /// For non-Arm targets, this function isn't used. It defaults to returning
  /// false, but it shouldn't matter what it returns anyway.
  bool hasArmWideBranch(bool Thumb) const;

  /// \return The maximum number of function arguments the target supports.
  unsigned getMaxNumArgs() const;

  /// @}

private:
  /// The abstract base class used to type erase specific TTI
  /// implementations.
  class Concept;

  /// The template model for the base class which wraps a concrete
  /// implementation in a type erased interface.
  template <typename T> class Model;

  std::unique_ptr<Concept> TTIImpl;
};

class TargetTransformInfo::Concept {
public:
  virtual ~Concept() = 0;
  virtual const DataLayout &getDataLayout() const = 0;
  virtual InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr,
                                     ArrayRef<const Value *> Operands,
                                     Type *AccessType,
                                     TTI::TargetCostKind CostKind) = 0;
  virtual InstructionCost
  getPointersChainCost(ArrayRef<const Value *> Ptrs, const Value *Base,
                       const TTI::PointersChainInfo &Info, Type *AccessTy,
                       TTI::TargetCostKind CostKind) = 0;
  virtual unsigned getInliningThresholdMultiplier() const = 0;
  virtual unsigned adjustInliningThreshold(const CallBase *CB) = 0;
  virtual int getInlinerVectorBonusPercent() const = 0;
  virtual unsigned getCallerAllocaCost(const CallBase *CB,
                                       const AllocaInst *AI) const = 0;
  virtual InstructionCost getMemcpyCost(const Instruction *I) = 0;
  virtual uint64_t getMaxMemIntrinsicInlineSizeThreshold() const = 0;
  virtual unsigned
  getEstimatedNumberOfCaseClusters(const SwitchInst &SI, unsigned &JTSize,
                                   ProfileSummaryInfo *PSI,
                                   BlockFrequencyInfo *BFI) = 0;
  virtual InstructionCost getInstructionCost(const User *U,
                                             ArrayRef<const Value *> Operands,
                                             TargetCostKind CostKind) = 0;
  virtual BranchProbability getPredictableBranchThreshold() = 0;
  virtual bool hasBranchDivergence(const Function *F = nullptr) = 0;
  virtual bool isSourceOfDivergence(const Value *V) = 0;
  virtual bool isAlwaysUniform(const Value *V) = 0;
  virtual bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const = 0;
  virtual bool addrspacesMayAlias(unsigned AS0, unsigned AS1) const = 0;
  virtual unsigned getFlatAddressSpace() = 0;
  virtual bool collectFlatAddressOperands(SmallVectorImpl<int> &OpIndexes,
                                          Intrinsic::ID IID) const = 0;
  virtual bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const = 0;
  virtual bool
  canHaveNonUndefGlobalInitializerInAddressSpace(unsigned AS) const = 0;
  virtual unsigned getAssumedAddrSpace(const Value *V) const = 0;
  virtual bool isSingleThreaded() const = 0;
  virtual std::pair<const Value *, unsigned>
  getPredicatedAddrSpace(const Value *V) const = 0;
  virtual Value *rewriteIntrinsicWithAddressSpace(IntrinsicInst *II,
                                                  Value *OldV,
                                                  Value *NewV) const = 0;
  virtual bool isLoweredToCall(const Function *F) = 0;
  virtual void getUnrollingPreferences(Loop *L, ScalarEvolution &,
                                       UnrollingPreferences &UP,
                                       OptimizationRemarkEmitter *ORE) = 0;
  virtual void getPeelingPreferences(Loop *L, ScalarEvolution &SE,
                                     PeelingPreferences &PP) = 0;
  virtual bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
                                        AssumptionCache &AC,
                                        TargetLibraryInfo *LibInfo,
                                        HardwareLoopInfo &HWLoopInfo) = 0;
  virtual bool preferPredicateOverEpilogue(TailFoldingInfo *TFI) = 0;
  virtual TailFoldingStyle
  getPreferredTailFoldingStyle(bool IVUpdateMayOverflow = true) = 0;
  virtual std::optional<Instruction *> instCombineIntrinsic(
      InstCombiner &IC, IntrinsicInst &II) = 0;
  virtual std::optional<Value *> simplifyDemandedUseBitsIntrinsic(
      InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask,
      KnownBits & Known, bool &KnownBitsComputed) = 0;
  virtual std::optional<Value *> simplifyDemandedVectorEltsIntrinsic(
      InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts,
      APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3,
      std::function<void(Instruction *, unsigned, APInt, APInt &)>
          SimplifyAndSetOp) = 0;
  virtual bool isLegalAddImmediate(int64_t Imm) = 0;
  virtual bool isLegalICmpImmediate(int64_t Imm) = 0;
  virtual bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV,
                                     int64_t BaseOffset, bool HasBaseReg,
                                     int64_t Scale, unsigned AddrSpace,
                                     Instruction *I) = 0;
  virtual bool isLSRCostLess(const TargetTransformInfo::LSRCost &C1,
                             const TargetTransformInfo::LSRCost &C2) = 0;
  virtual bool isNumRegsMajorCostOfLSR() = 0;
  virtual bool isProfitableLSRChainElement(Instruction *I) = 0;
  virtual bool canMacroFuseCmp() = 0;
  virtual bool canSaveCmp(Loop *L, BranchInst **BI, ScalarEvolution *SE,
                          LoopInfo *LI, DominatorTree *DT, AssumptionCache *AC,
                          TargetLibraryInfo *LibInfo) = 0;
  virtual AddressingModeKind
    getPreferredAddressingMode(const Loop *L, ScalarEvolution *SE) const = 0;
  virtual bool isLegalMaskedStore(Type *DataType, Align Alignment) = 0;
  virtual bool isLegalMaskedLoad(Type *DataType, Align Alignment) = 0;
  virtual bool isLegalNTStore(Type *DataType, Align Alignment) = 0;
  virtual bool isLegalNTLoad(Type *DataType, Align Alignment) = 0;
  virtual bool isLegalBroadcastLoad(Type *ElementTy,
                                    ElementCount NumElements) const = 0;
  virtual bool isLegalMaskedScatter(Type *DataType, Align Alignment) = 0;
  virtual bool isLegalMaskedGather(Type *DataType, Align Alignment) = 0;
  virtual bool forceScalarizeMaskedGather(VectorType *DataType,
                                          Align Alignment) = 0;
  virtual bool forceScalarizeMaskedScatter(VectorType *DataType,
                                           Align Alignment) = 0;
  virtual bool isLegalMaskedCompressStore(Type *DataType) = 0;
  virtual bool isLegalMaskedExpandLoad(Type *DataType) = 0;
  virtual bool isLegalAltInstr(VectorType *VecTy, unsigned Opcode0,
                               unsigned Opcode1,
                               const SmallBitVector &OpcodeMask) const = 0;
  virtual bool enableOrderedReductions() = 0;
  virtual bool hasDivRemOp(Type *DataType, bool IsSigned) = 0;
  virtual bool hasVolatileVariant(Instruction *I, unsigned AddrSpace) = 0;
  virtual bool prefersVectorizedAddressing() = 0;
  virtual InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
                                               int64_t BaseOffset,
                                               bool HasBaseReg, int64_t Scale,
                                               unsigned AddrSpace) = 0;
  virtual bool LSRWithInstrQueries() = 0;
  virtual bool isTruncateFree(Type *Ty1, Type *Ty2) = 0;
  virtual bool isProfitableToHoist(Instruction *I) = 0;
  virtual bool useAA() = 0;
  virtual bool isTypeLegal(Type *Ty) = 0;
  virtual unsigned getRegUsageForType(Type *Ty) = 0;
  virtual bool shouldBuildLookupTables() = 0;
  virtual bool shouldBuildLookupTablesForConstant(Constant *C) = 0;
  virtual bool shouldBuildRelLookupTables() = 0;
  virtual bool useColdCCForColdCall(Function &F) = 0;
  virtual InstructionCost getScalarizationOverhead(VectorType *Ty,
                                                   const APInt &DemandedElts,
                                                   bool Insert, bool Extract,
                                                   TargetCostKind CostKind) = 0;
  virtual InstructionCost
  getOperandsScalarizationOverhead(ArrayRef<const Value *> Args,
                                   ArrayRef<Type *> Tys,
                                   TargetCostKind CostKind) = 0;
  virtual bool supportsEfficientVectorElementLoadStore() = 0;
  virtual bool supportsTailCalls() = 0;
  virtual bool supportsTailCallFor(const CallBase *CB) = 0;
  virtual bool enableAggressiveInterleaving(bool LoopHasReductions) = 0;
  virtual MemCmpExpansionOptions
  enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const = 0;
  virtual bool enableSelectOptimize() = 0;
  virtual bool enableInterleavedAccessVectorization() = 0;
  virtual bool enableMaskedInterleavedAccessVectorization() = 0;
  virtual bool isFPVectorizationPotentiallyUnsafe() = 0;
  virtual bool allowsMisalignedMemoryAccesses(LLVMContext &Context,
                                              unsigned BitWidth,
                                              unsigned AddressSpace,
                                              Align Alignment,
                                              unsigned *Fast) = 0;
  virtual PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) = 0;
  virtual bool haveFastSqrt(Type *Ty) = 0;
  virtual bool isExpensiveToSpeculativelyExecute(const Instruction *I) = 0;
  virtual bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) = 0;
  virtual InstructionCost getFPOpCost(Type *Ty) = 0;
  virtual InstructionCost getIntImmCodeSizeCost(unsigned Opc, unsigned Idx,
                                                const APInt &Imm, Type *Ty) = 0;
  virtual InstructionCost getIntImmCost(const APInt &Imm, Type *Ty,
                                        TargetCostKind CostKind) = 0;
  virtual InstructionCost getIntImmCostInst(unsigned Opc, unsigned Idx,
                                            const APInt &Imm, Type *Ty,
                                            TargetCostKind CostKind,
                                            Instruction *Inst = nullptr) = 0;
  virtual InstructionCost getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
                                              const APInt &Imm, Type *Ty,
                                              TargetCostKind CostKind) = 0;
  virtual unsigned getNumberOfRegisters(unsigned ClassID) const = 0;
  virtual unsigned getRegisterClassForType(bool Vector,
                                           Type *Ty = nullptr) const = 0;
  virtual const char *getRegisterClassName(unsigned ClassID) const = 0;
  virtual TypeSize getRegisterBitWidth(RegisterKind K) const = 0;
  virtual unsigned getMinVectorRegisterBitWidth() const = 0;
  virtual std::optional<unsigned> getMaxVScale() const = 0;
  virtual std::optional<unsigned> getVScaleForTuning() const = 0;
  virtual bool isVScaleKnownToBeAPowerOfTwo() const = 0;
  virtual bool
  shouldMaximizeVectorBandwidth(TargetTransformInfo::RegisterKind K) const = 0;
  virtual ElementCount getMinimumVF(unsigned ElemWidth,
                                    bool IsScalable) const = 0;
  virtual unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const = 0;
  virtual unsigned getStoreMinimumVF(unsigned VF, Type *ScalarMemTy,
                                     Type *ScalarValTy) const = 0;
  virtual bool shouldConsiderAddressTypePromotion(
      const Instruction &I, bool &AllowPromotionWithoutCommonHeader) = 0;
  virtual unsigned getCacheLineSize() const = 0;
  virtual std::optional<unsigned> getCacheSize(CacheLevel Level) const = 0;
  virtual std::optional<unsigned> getCacheAssociativity(CacheLevel Level)
      const = 0;

  /// \return How much before a load we should place the prefetch
  /// instruction.  This is currently measured in number of
  /// instructions.
  virtual unsigned getPrefetchDistance() const = 0;

  /// \return Some HW prefetchers can handle accesses up to a certain
  /// constant stride.  This is the minimum stride in bytes where it
  /// makes sense to start adding SW prefetches.  The default is 1,
  /// i.e. prefetch with any stride.  Sometimes prefetching is beneficial
  /// even below the HW prefetcher limit, and the arguments provided are
  /// meant to serve as a basis for deciding this for a particular loop.
  virtual unsigned getMinPrefetchStride(unsigned NumMemAccesses,
                                        unsigned NumStridedMemAccesses,
                                        unsigned NumPrefetches,
                                        bool HasCall) const = 0;

  /// \return The maximum number of iterations to prefetch ahead.  If
  /// the required number of iterations is more than this number, no
  /// prefetching is performed.
  virtual unsigned getMaxPrefetchIterationsAhead() const = 0;

  /// \return True if prefetching should also be done for writes.
  virtual bool enableWritePrefetching() const = 0;

  /// \return if target want to issue a prefetch in address space \p AS.
  virtual bool shouldPrefetchAddressSpace(unsigned AS) const = 0;

  virtual unsigned getMaxInterleaveFactor(ElementCount VF) = 0;
  virtual InstructionCost getArithmeticInstrCost(
      unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
      OperandValueInfo Opd1Info, OperandValueInfo Opd2Info,
      ArrayRef<const Value *> Args, const Instruction *CxtI = nullptr) = 0;

  virtual InstructionCost getShuffleCost(ShuffleKind Kind, VectorType *Tp,
                                         ArrayRef<int> Mask,
                                         TTI::TargetCostKind CostKind,
                                         int Index, VectorType *SubTp,
                                         ArrayRef<const Value *> Args) = 0;
  virtual InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst,
                                           Type *Src, CastContextHint CCH,
                                           TTI::TargetCostKind CostKind,
                                           const Instruction *I) = 0;
  virtual InstructionCost getExtractWithExtendCost(unsigned Opcode, Type *Dst,
                                                   VectorType *VecTy,
                                                   unsigned Index) = 0;
  virtual InstructionCost getCFInstrCost(unsigned Opcode,
                                         TTI::TargetCostKind CostKind,
                                         const Instruction *I = nullptr) = 0;
  virtual InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
                                             Type *CondTy,
                                             CmpInst::Predicate VecPred,
                                             TTI::TargetCostKind CostKind,
                                             const Instruction *I) = 0;
  virtual InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val,
                                             TTI::TargetCostKind CostKind,
                                             unsigned Index, Value *Op0,
                                             Value *Op1) = 0;
  virtual InstructionCost getVectorInstrCost(const Instruction &I, Type *Val,
                                             TTI::TargetCostKind CostKind,
                                             unsigned Index) = 0;

  virtual InstructionCost
  getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, int VF,
                            const APInt &DemandedDstElts,
                            TTI::TargetCostKind CostKind) = 0;

  virtual InstructionCost
  getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment,
                  unsigned AddressSpace, TTI::TargetCostKind CostKind,
                  OperandValueInfo OpInfo, const Instruction *I) = 0;
  virtual InstructionCost getVPMemoryOpCost(unsigned Opcode, Type *Src,
                                            Align Alignment,
                                            unsigned AddressSpace,
                                            TTI::TargetCostKind CostKind,
                                            const Instruction *I) = 0;
  virtual InstructionCost
  getMaskedMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment,
                        unsigned AddressSpace,
                        TTI::TargetCostKind CostKind) = 0;
  virtual InstructionCost
  getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr,
                         bool VariableMask, Align Alignment,
                         TTI::TargetCostKind CostKind,
                         const Instruction *I = nullptr) = 0;

  virtual InstructionCost getInterleavedMemoryOpCost(
      unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
      Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
      bool UseMaskForCond = false, bool UseMaskForGaps = false) = 0;
  virtual InstructionCost
  getArithmeticReductionCost(unsigned Opcode, VectorType *Ty,
                             std::optional<FastMathFlags> FMF,
                             TTI::TargetCostKind CostKind) = 0;
  virtual InstructionCost
  getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF,
                         TTI::TargetCostKind CostKind) = 0;
  virtual InstructionCost getExtendedReductionCost(
      unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *Ty,
      FastMathFlags FMF,
      TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput) = 0;
  virtual InstructionCost getMulAccReductionCost(
      bool IsUnsigned, Type *ResTy, VectorType *Ty,
      TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput) = 0;
  virtual InstructionCost
  getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
                        TTI::TargetCostKind CostKind) = 0;
  virtual InstructionCost getCallInstrCost(Function *F, Type *RetTy,
                                           ArrayRef<Type *> Tys,
                                           TTI::TargetCostKind CostKind) = 0;
  virtual unsigned getNumberOfParts(Type *Tp) = 0;
  virtual InstructionCost
  getAddressComputationCost(Type *Ty, ScalarEvolution *SE, const SCEV *Ptr) = 0;
  virtual InstructionCost
  getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) = 0;
  virtual bool getTgtMemIntrinsic(IntrinsicInst *Inst,
                                  MemIntrinsicInfo &Info) = 0;
  virtual unsigned getAtomicMemIntrinsicMaxElementSize() const = 0;
  virtual Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
                                                   Type *ExpectedType) = 0;
  virtual Type *getMemcpyLoopLoweringType(
      LLVMContext &Context, Value *Length, unsigned SrcAddrSpace,
      unsigned DestAddrSpace, unsigned SrcAlign, unsigned DestAlign,
      std::optional<uint32_t> AtomicElementSize) const = 0;

  virtual void getMemcpyLoopResidualLoweringType(
      SmallVectorImpl<Type *> &OpsOut, LLVMContext &Context,
      unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace,
      unsigned SrcAlign, unsigned DestAlign,
      std::optional<uint32_t> AtomicCpySize) const = 0;
  virtual bool areInlineCompatible(const Function *Caller,
                                   const Function *Callee) const = 0;
  virtual bool areTypesABICompatible(const Function *Caller,
                                     const Function *Callee,
                                     const ArrayRef<Type *> &Types) const = 0;
  virtual bool isIndexedLoadLegal(MemIndexedMode Mode, Type *Ty) const = 0;
  virtual bool isIndexedStoreLegal(MemIndexedMode Mode, Type *Ty) const = 0;
  virtual unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const = 0;
  virtual bool isLegalToVectorizeLoad(LoadInst *LI) const = 0;
  virtual bool isLegalToVectorizeStore(StoreInst *SI) const = 0;
  virtual bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,
                                           Align Alignment,
                                           unsigned AddrSpace) const = 0;
  virtual bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,
                                            Align Alignment,
                                            unsigned AddrSpace) const = 0;
  virtual bool isLegalToVectorizeReduction(const RecurrenceDescriptor &RdxDesc,
                                           ElementCount VF) const = 0;
  virtual bool isElementTypeLegalForScalableVector(Type *Ty) const = 0;
  virtual unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize,
                                       unsigned ChainSizeInBytes,
                                       VectorType *VecTy) const = 0;
  virtual unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize,
                                        unsigned ChainSizeInBytes,
                                        VectorType *VecTy) const = 0;
  virtual bool preferInLoopReduction(unsigned Opcode, Type *Ty,
                                     ReductionFlags) const = 0;
  virtual bool preferPredicatedReductionSelect(unsigned Opcode, Type *Ty,
                                               ReductionFlags) const = 0;
  virtual bool preferEpilogueVectorization() const = 0;

  virtual bool shouldExpandReduction(const IntrinsicInst *II) const = 0;
  virtual unsigned getGISelRematGlobalCost() const = 0;
  virtual unsigned getMinTripCountTailFoldingThreshold() const = 0;
  virtual bool enableScalableVectorization() const = 0;
  virtual bool supportsScalableVectors() const = 0;
  virtual bool hasActiveVectorLength(unsigned Opcode, Type *DataType,
                                     Align Alignment) const = 0;
  virtual VPLegalization
  getVPLegalizationStrategy(const VPIntrinsic &PI) const = 0;
  virtual bool hasArmWideBranch(bool Thumb) const = 0;
  virtual unsigned getMaxNumArgs() const = 0;
};

template <typename T>
class TargetTransformInfo::Model final : public TargetTransformInfo::Concept {
  T Impl;

public:
  Model(T Impl) : Impl(std::move(Impl)) {}
  ~Model() override = default;

  const DataLayout &getDataLayout() const override {
    return Impl.getDataLayout();
  }

  InstructionCost
  getGEPCost(Type *PointeeType, const Value *Ptr,
             ArrayRef<const Value *> Operands, Type *AccessType,
             TargetTransformInfo::TargetCostKind CostKind) override {
    return Impl.getGEPCost(PointeeType, Ptr, Operands, AccessType, CostKind);
  }
  InstructionCost getPointersChainCost(ArrayRef<const Value *> Ptrs,
                                       const Value *Base,
                                       const PointersChainInfo &Info,
                                       Type *AccessTy,
                                       TargetCostKind CostKind) override {
    return Impl.getPointersChainCost(Ptrs, Base, Info, AccessTy, CostKind);
  }
  unsigned getInliningThresholdMultiplier() const override {
    return Impl.getInliningThresholdMultiplier();
  }
  unsigned adjustInliningThreshold(const CallBase *CB) override {
    return Impl.adjustInliningThreshold(CB);
  }
  int getInlinerVectorBonusPercent() const override {
    return Impl.getInlinerVectorBonusPercent();
  }
  unsigned getCallerAllocaCost(const CallBase *CB,
                               const AllocaInst *AI) const override {
    return Impl.getCallerAllocaCost(CB, AI);
  }
  InstructionCost getMemcpyCost(const Instruction *I) override {
    return Impl.getMemcpyCost(I);
  }

  uint64_t getMaxMemIntrinsicInlineSizeThreshold() const override {
    return Impl.getMaxMemIntrinsicInlineSizeThreshold();
  }

  InstructionCost getInstructionCost(const User *U,
                                     ArrayRef<const Value *> Operands,
                                     TargetCostKind CostKind) override {
    return Impl.getInstructionCost(U, Operands, CostKind);
  }
  BranchProbability getPredictableBranchThreshold() override {
    return Impl.getPredictableBranchThreshold();
  }
  bool hasBranchDivergence(const Function *F = nullptr) override {
    return Impl.hasBranchDivergence(F);
  }
  bool isSourceOfDivergence(const Value *V) override {
    return Impl.isSourceOfDivergence(V);
  }

  bool isAlwaysUniform(const Value *V) override {
    return Impl.isAlwaysUniform(V);
  }

  bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const override {
    return Impl.isValidAddrSpaceCast(FromAS, ToAS);
  }

  bool addrspacesMayAlias(unsigned AS0, unsigned AS1) const override {
    return Impl.addrspacesMayAlias(AS0, AS1);
  }

  unsigned getFlatAddressSpace() override { return Impl.getFlatAddressSpace(); }

  bool collectFlatAddressOperands(SmallVectorImpl<int> &OpIndexes,
                                  Intrinsic::ID IID) const override {
    return Impl.collectFlatAddressOperands(OpIndexes, IID);
  }

  bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const override {
    return Impl.isNoopAddrSpaceCast(FromAS, ToAS);
  }

  bool
  canHaveNonUndefGlobalInitializerInAddressSpace(unsigned AS) const override {
    return Impl.canHaveNonUndefGlobalInitializerInAddressSpace(AS);
  }

  unsigned getAssumedAddrSpace(const Value *V) const override {
    return Impl.getAssumedAddrSpace(V);
  }

  bool isSingleThreaded() const override { return Impl.isSingleThreaded(); }

  std::pair<const Value *, unsigned>
  getPredicatedAddrSpace(const Value *V) const override {
    return Impl.getPredicatedAddrSpace(V);
  }

  Value *rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV,
                                          Value *NewV) const override {
    return Impl.rewriteIntrinsicWithAddressSpace(II, OldV, NewV);
  }

  bool isLoweredToCall(const Function *F) override {
    return Impl.isLoweredToCall(F);
  }
  void getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
                               UnrollingPreferences &UP,
                               OptimizationRemarkEmitter *ORE) override {
    return Impl.getUnrollingPreferences(L, SE, UP, ORE);
  }
  void getPeelingPreferences(Loop *L, ScalarEvolution &SE,
                             PeelingPreferences &PP) override {
    return Impl.getPeelingPreferences(L, SE, PP);
  }
  bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
                                AssumptionCache &AC, TargetLibraryInfo *LibInfo,
                                HardwareLoopInfo &HWLoopInfo) override {
    return Impl.isHardwareLoopProfitable(L, SE, AC, LibInfo, HWLoopInfo);
  }
  bool preferPredicateOverEpilogue(TailFoldingInfo *TFI) override {
    return Impl.preferPredicateOverEpilogue(TFI);
  }
  TailFoldingStyle
  getPreferredTailFoldingStyle(bool IVUpdateMayOverflow = true) override {
    return Impl.getPreferredTailFoldingStyle(IVUpdateMayOverflow);
  }
  std::optional<Instruction *>
  instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) override {
    return Impl.instCombineIntrinsic(IC, II);
  }
  std::optional<Value *>
  simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II,
                                   APInt DemandedMask, KnownBits &Known,
                                   bool &KnownBitsComputed) override {
    return Impl.simplifyDemandedUseBitsIntrinsic(IC, II, DemandedMask, Known,
                                                 KnownBitsComputed);
  }
  std::optional<Value *> simplifyDemandedVectorEltsIntrinsic(
      InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
      APInt &UndefElts2, APInt &UndefElts3,
      std::function<void(Instruction *, unsigned, APInt, APInt &)>
          SimplifyAndSetOp) override {
    return Impl.simplifyDemandedVectorEltsIntrinsic(
        IC, II, DemandedElts, UndefElts, UndefElts2, UndefElts3,
        SimplifyAndSetOp);
  }
  bool isLegalAddImmediate(int64_t Imm) override {
    return Impl.isLegalAddImmediate(Imm);
  }
  bool isLegalICmpImmediate(int64_t Imm) override {
    return Impl.isLegalICmpImmediate(Imm);
  }
  bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
                             bool HasBaseReg, int64_t Scale, unsigned AddrSpace,
                             Instruction *I) override {
    return Impl.isLegalAddressingMode(Ty, BaseGV, BaseOffset, HasBaseReg, Scale,
                                      AddrSpace, I);
  }
  bool isLSRCostLess(const TargetTransformInfo::LSRCost &C1,
                     const TargetTransformInfo::LSRCost &C2) override {
    return Impl.isLSRCostLess(C1, C2);
  }
  bool isNumRegsMajorCostOfLSR() override {
    return Impl.isNumRegsMajorCostOfLSR();
  }
  bool isProfitableLSRChainElement(Instruction *I) override {
    return Impl.isProfitableLSRChainElement(I);
  }
  bool canMacroFuseCmp() override { return Impl.canMacroFuseCmp(); }
  bool canSaveCmp(Loop *L, BranchInst **BI, ScalarEvolution *SE, LoopInfo *LI,
                  DominatorTree *DT, AssumptionCache *AC,
                  TargetLibraryInfo *LibInfo) override {
    return Impl.canSaveCmp(L, BI, SE, LI, DT, AC, LibInfo);
  }
  AddressingModeKind
    getPreferredAddressingMode(const Loop *L,
                               ScalarEvolution *SE) const override {
    return Impl.getPreferredAddressingMode(L, SE);
  }
  bool isLegalMaskedStore(Type *DataType, Align Alignment) override {
    return Impl.isLegalMaskedStore(DataType, Alignment);
  }
  bool isLegalMaskedLoad(Type *DataType, Align Alignment) override {
    return Impl.isLegalMaskedLoad(DataType, Alignment);
  }
  bool isLegalNTStore(Type *DataType, Align Alignment) override {
    return Impl.isLegalNTStore(DataType, Alignment);
  }
  bool isLegalNTLoad(Type *DataType, Align Alignment) override {
    return Impl.isLegalNTLoad(DataType, Alignment);
  }
  bool isLegalBroadcastLoad(Type *ElementTy,
                            ElementCount NumElements) const override {
    return Impl.isLegalBroadcastLoad(ElementTy, NumElements);
  }
  bool isLegalMaskedScatter(Type *DataType, Align Alignment) override {
    return Impl.isLegalMaskedScatter(DataType, Alignment);
  }
  bool isLegalMaskedGather(Type *DataType, Align Alignment) override {
    return Impl.isLegalMaskedGather(DataType, Alignment);
  }
  bool forceScalarizeMaskedGather(VectorType *DataType,
                                  Align Alignment) override {
    return Impl.forceScalarizeMaskedGather(DataType, Alignment);
  }
  bool forceScalarizeMaskedScatter(VectorType *DataType,
                                   Align Alignment) override {
    return Impl.forceScalarizeMaskedScatter(DataType, Alignment);
  }
  bool isLegalMaskedCompressStore(Type *DataType) override {
    return Impl.isLegalMaskedCompressStore(DataType);
  }
  bool isLegalMaskedExpandLoad(Type *DataType) override {
    return Impl.isLegalMaskedExpandLoad(DataType);
  }
  bool isLegalAltInstr(VectorType *VecTy, unsigned Opcode0, unsigned Opcode1,
                       const SmallBitVector &OpcodeMask) const override {
    return Impl.isLegalAltInstr(VecTy, Opcode0, Opcode1, OpcodeMask);
  }
  bool enableOrderedReductions() override {
    return Impl.enableOrderedReductions();
  }
  bool hasDivRemOp(Type *DataType, bool IsSigned) override {
    return Impl.hasDivRemOp(DataType, IsSigned);
  }
  bool hasVolatileVariant(Instruction *I, unsigned AddrSpace) override {
    return Impl.hasVolatileVariant(I, AddrSpace);
  }
  bool prefersVectorizedAddressing() override {
    return Impl.prefersVectorizedAddressing();
  }
  InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
                                       int64_t BaseOffset, bool HasBaseReg,
                                       int64_t Scale,
                                       unsigned AddrSpace) override {
    return Impl.getScalingFactorCost(Ty, BaseGV, BaseOffset, HasBaseReg, Scale,
                                     AddrSpace);
  }
  bool LSRWithInstrQueries() override { return Impl.LSRWithInstrQueries(); }
  bool isTruncateFree(Type *Ty1, Type *Ty2) override {
    return Impl.isTruncateFree(Ty1, Ty2);
  }
  bool isProfitableToHoist(Instruction *I) override {
    return Impl.isProfitableToHoist(I);
  }
  bool useAA() override { return Impl.useAA(); }
  bool isTypeLegal(Type *Ty) override { return Impl.isTypeLegal(Ty); }
  unsigned getRegUsageForType(Type *Ty) override {
    return Impl.getRegUsageForType(Ty);
  }
  bool shouldBuildLookupTables() override {
    return Impl.shouldBuildLookupTables();
  }
  bool shouldBuildLookupTablesForConstant(Constant *C) override {
    return Impl.shouldBuildLookupTablesForConstant(C);
  }
  bool shouldBuildRelLookupTables() override {
    return Impl.shouldBuildRelLookupTables();
  }
  bool useColdCCForColdCall(Function &F) override {
    return Impl.useColdCCForColdCall(F);
  }

  InstructionCost getScalarizationOverhead(VectorType *Ty,
                                           const APInt &DemandedElts,
                                           bool Insert, bool Extract,
                                           TargetCostKind CostKind) override {
    return Impl.getScalarizationOverhead(Ty, DemandedElts, Insert, Extract,
                                         CostKind);
  }
  InstructionCost
  getOperandsScalarizationOverhead(ArrayRef<const Value *> Args,
                                   ArrayRef<Type *> Tys,
                                   TargetCostKind CostKind) override {
    return Impl.getOperandsScalarizationOverhead(Args, Tys, CostKind);
  }

  bool supportsEfficientVectorElementLoadStore() override {
    return Impl.supportsEfficientVectorElementLoadStore();
  }

  bool supportsTailCalls() override { return Impl.supportsTailCalls(); }
  bool supportsTailCallFor(const CallBase *CB) override {
    return Impl.supportsTailCallFor(CB);
  }

  bool enableAggressiveInterleaving(bool LoopHasReductions) override {
    return Impl.enableAggressiveInterleaving(LoopHasReductions);
  }
  MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize,
                                               bool IsZeroCmp) const override {
    return Impl.enableMemCmpExpansion(OptSize, IsZeroCmp);
  }
  bool enableInterleavedAccessVectorization() override {
    return Impl.enableInterleavedAccessVectorization();
  }
  bool enableSelectOptimize() override {
    return Impl.enableSelectOptimize();
  }
  bool enableMaskedInterleavedAccessVectorization() override {
    return Impl.enableMaskedInterleavedAccessVectorization();
  }
  bool isFPVectorizationPotentiallyUnsafe() override {
    return Impl.isFPVectorizationPotentiallyUnsafe();
  }
  bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth,
                                      unsigned AddressSpace, Align Alignment,
                                      unsigned *Fast) override {
    return Impl.allowsMisalignedMemoryAccesses(Context, BitWidth, AddressSpace,
                                               Alignment, Fast);
  }
  PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) override {
    return Impl.getPopcntSupport(IntTyWidthInBit);
  }
  bool haveFastSqrt(Type *Ty) override { return Impl.haveFastSqrt(Ty); }

  bool isExpensiveToSpeculativelyExecute(const Instruction* I) override {
    return Impl.isExpensiveToSpeculativelyExecute(I);
  }

  bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) override {
    return Impl.isFCmpOrdCheaperThanFCmpZero(Ty);
  }

  InstructionCost getFPOpCost(Type *Ty) override {
    return Impl.getFPOpCost(Ty);
  }

  InstructionCost getIntImmCodeSizeCost(unsigned Opc, unsigned Idx,
                                        const APInt &Imm, Type *Ty) override {
    return Impl.getIntImmCodeSizeCost(Opc, Idx, Imm, Ty);
  }
  InstructionCost getIntImmCost(const APInt &Imm, Type *Ty,
                                TargetCostKind CostKind) override {
    return Impl.getIntImmCost(Imm, Ty, CostKind);
  }
  InstructionCost getIntImmCostInst(unsigned Opc, unsigned Idx,
                                    const APInt &Imm, Type *Ty,
                                    TargetCostKind CostKind,
                                    Instruction *Inst = nullptr) override {
    return Impl.getIntImmCostInst(Opc, Idx, Imm, Ty, CostKind, Inst);
  }
  InstructionCost getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
                                      const APInt &Imm, Type *Ty,
                                      TargetCostKind CostKind) override {
    return Impl.getIntImmCostIntrin(IID, Idx, Imm, Ty, CostKind);
  }
  unsigned getNumberOfRegisters(unsigned ClassID) const override {
    return Impl.getNumberOfRegisters(ClassID);
  }
  unsigned getRegisterClassForType(bool Vector,
                                   Type *Ty = nullptr) const override {
    return Impl.getRegisterClassForType(Vector, Ty);
  }
  const char *getRegisterClassName(unsigned ClassID) const override {
    return Impl.getRegisterClassName(ClassID);
  }
  TypeSize getRegisterBitWidth(RegisterKind K) const override {
    return Impl.getRegisterBitWidth(K);
  }
  unsigned getMinVectorRegisterBitWidth() const override {
    return Impl.getMinVectorRegisterBitWidth();
  }
  std::optional<unsigned> getMaxVScale() const override {
    return Impl.getMaxVScale();
  }
  std::optional<unsigned> getVScaleForTuning() const override {
    return Impl.getVScaleForTuning();
  }
  bool isVScaleKnownToBeAPowerOfTwo() const override {
    return Impl.isVScaleKnownToBeAPowerOfTwo();
  }
  bool shouldMaximizeVectorBandwidth(
      TargetTransformInfo::RegisterKind K) const override {
    return Impl.shouldMaximizeVectorBandwidth(K);
  }
  ElementCount getMinimumVF(unsigned ElemWidth,
                            bool IsScalable) const override {
    return Impl.getMinimumVF(ElemWidth, IsScalable);
  }
  unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const override {
    return Impl.getMaximumVF(ElemWidth, Opcode);
  }
  unsigned getStoreMinimumVF(unsigned VF, Type *ScalarMemTy,
                             Type *ScalarValTy) const override {
    return Impl.getStoreMinimumVF(VF, ScalarMemTy, ScalarValTy);
  }
  bool shouldConsiderAddressTypePromotion(
      const Instruction &I, bool &AllowPromotionWithoutCommonHeader) override {
    return Impl.shouldConsiderAddressTypePromotion(
        I, AllowPromotionWithoutCommonHeader);
  }
  unsigned getCacheLineSize() const override { return Impl.getCacheLineSize(); }
  std::optional<unsigned> getCacheSize(CacheLevel Level) const override {
    return Impl.getCacheSize(Level);
  }
  std::optional<unsigned>
  getCacheAssociativity(CacheLevel Level) const override {
    return Impl.getCacheAssociativity(Level);
  }

  /// Return the preferred prefetch distance in terms of instructions.
  ///
  unsigned getPrefetchDistance() const override {
    return Impl.getPrefetchDistance();
  }

  /// Return the minimum stride necessary to trigger software
  /// prefetching.
  ///
  unsigned getMinPrefetchStride(unsigned NumMemAccesses,
                                unsigned NumStridedMemAccesses,
                                unsigned NumPrefetches,
                                bool HasCall) const override {
    return Impl.getMinPrefetchStride(NumMemAccesses, NumStridedMemAccesses,
                                     NumPrefetches, HasCall);
  }

  /// Return the maximum prefetch distance in terms of loop
  /// iterations.
  ///
  unsigned getMaxPrefetchIterationsAhead() const override {
    return Impl.getMaxPrefetchIterationsAhead();
  }

  /// \return True if prefetching should also be done for writes.
  bool enableWritePrefetching() const override {
    return Impl.enableWritePrefetching();
  }

  /// \return if target want to issue a prefetch in address space \p AS.
  bool shouldPrefetchAddressSpace(unsigned AS) const override {
    return Impl.shouldPrefetchAddressSpace(AS);
  }

  unsigned getMaxInterleaveFactor(ElementCount VF) override {
    return Impl.getMaxInterleaveFactor(VF);
  }
  unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI,
                                            unsigned &JTSize,
                                            ProfileSummaryInfo *PSI,
                                            BlockFrequencyInfo *BFI) override {
    return Impl.getEstimatedNumberOfCaseClusters(SI, JTSize, PSI, BFI);
  }
  InstructionCost getArithmeticInstrCost(
      unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
      OperandValueInfo Opd1Info, OperandValueInfo Opd2Info,
      ArrayRef<const Value *> Args,
      const Instruction *CxtI = nullptr) override {
    return Impl.getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info, Opd2Info,
                                       Args, CxtI);
  }

  InstructionCost getShuffleCost(ShuffleKind Kind, VectorType *Tp,
                                 ArrayRef<int> Mask,
                                 TTI::TargetCostKind CostKind, int Index,
                                 VectorType *SubTp,
                                 ArrayRef<const Value *> Args) override {
    return Impl.getShuffleCost(Kind, Tp, Mask, CostKind, Index, SubTp, Args);
  }
  InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
                                   CastContextHint CCH,
                                   TTI::TargetCostKind CostKind,
                                   const Instruction *I) override {
    return Impl.getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I);
  }
  InstructionCost getExtractWithExtendCost(unsigned Opcode, Type *Dst,
                                           VectorType *VecTy,
                                           unsigned Index) override {
    return Impl.getExtractWithExtendCost(Opcode, Dst, VecTy, Index);
  }
  InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind,
                                 const Instruction *I = nullptr) override {
    return Impl.getCFInstrCost(Opcode, CostKind, I);
  }
  InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
                                     CmpInst::Predicate VecPred,
                                     TTI::TargetCostKind CostKind,
                                     const Instruction *I) override {
    return Impl.getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I);
  }
  InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val,
                                     TTI::TargetCostKind CostKind,
                                     unsigned Index, Value *Op0,
                                     Value *Op1) override {
    return Impl.getVectorInstrCost(Opcode, Val, CostKind, Index, Op0, Op1);
  }
  InstructionCost getVectorInstrCost(const Instruction &I, Type *Val,
                                     TTI::TargetCostKind CostKind,
                                     unsigned Index) override {
    return Impl.getVectorInstrCost(I, Val, CostKind, Index);
  }
  InstructionCost
  getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, int VF,
                            const APInt &DemandedDstElts,
                            TTI::TargetCostKind CostKind) override {
    return Impl.getReplicationShuffleCost(EltTy, ReplicationFactor, VF,
                                          DemandedDstElts, CostKind);
  }
  InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment,
                                  unsigned AddressSpace,
                                  TTI::TargetCostKind CostKind,
                                  OperandValueInfo OpInfo,
                                  const Instruction *I) override {
    return Impl.getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, CostKind,
                                OpInfo, I);
  }
  InstructionCost getVPMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment,
                                    unsigned AddressSpace,
                                    TTI::TargetCostKind CostKind,
                                    const Instruction *I) override {
    return Impl.getVPMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
                                  CostKind, I);
  }
  InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *Src,
                                        Align Alignment, unsigned AddressSpace,
                                        TTI::TargetCostKind CostKind) override {
    return Impl.getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
                                      CostKind);
  }
  InstructionCost
  getGatherScatterOpCost(unsigned Opcode, Type *DataTy, const Value *Ptr,
                         bool VariableMask, Align Alignment,
                         TTI::TargetCostKind CostKind,
                         const Instruction *I = nullptr) override {
    return Impl.getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
                                       Alignment, CostKind, I);
  }
  InstructionCost getInterleavedMemoryOpCost(
      unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
      Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
      bool UseMaskForCond, bool UseMaskForGaps) override {
    return Impl.getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
                                           Alignment, AddressSpace, CostKind,
                                           UseMaskForCond, UseMaskForGaps);
  }
  InstructionCost
  getArithmeticReductionCost(unsigned Opcode, VectorType *Ty,
                             std::optional<FastMathFlags> FMF,
                             TTI::TargetCostKind CostKind) override {
    return Impl.getArithmeticReductionCost(Opcode, Ty, FMF, CostKind);
  }
  InstructionCost
  getMinMaxReductionCost(Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF,
                         TTI::TargetCostKind CostKind) override {
    return Impl.getMinMaxReductionCost(IID, Ty, FMF, CostKind);
  }
  InstructionCost
  getExtendedReductionCost(unsigned Opcode, bool IsUnsigned, Type *ResTy,
                           VectorType *Ty, FastMathFlags FMF,
                           TTI::TargetCostKind CostKind) override {
    return Impl.getExtendedReductionCost(Opcode, IsUnsigned, ResTy, Ty, FMF,
                                         CostKind);
  }
  InstructionCost
  getMulAccReductionCost(bool IsUnsigned, Type *ResTy, VectorType *Ty,
                         TTI::TargetCostKind CostKind) override {
    return Impl.getMulAccReductionCost(IsUnsigned, ResTy, Ty, CostKind);
  }
  InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
                                        TTI::TargetCostKind CostKind) override {
    return Impl.getIntrinsicInstrCost(ICA, CostKind);
  }
  InstructionCost getCallInstrCost(Function *F, Type *RetTy,
                                   ArrayRef<Type *> Tys,
                                   TTI::TargetCostKind CostKind) override {
    return Impl.getCallInstrCost(F, RetTy, Tys, CostKind);
  }
  unsigned getNumberOfParts(Type *Tp) override {
    return Impl.getNumberOfParts(Tp);
  }
  InstructionCost getAddressComputationCost(Type *Ty, ScalarEvolution *SE,
                                            const SCEV *Ptr) override {
    return Impl.getAddressComputationCost(Ty, SE, Ptr);
  }
  InstructionCost getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) override {
    return Impl.getCostOfKeepingLiveOverCall(Tys);
  }
  bool getTgtMemIntrinsic(IntrinsicInst *Inst,
                          MemIntrinsicInfo &Info) override {
    return Impl.getTgtMemIntrinsic(Inst, Info);
  }
  unsigned getAtomicMemIntrinsicMaxElementSize() const override {
    return Impl.getAtomicMemIntrinsicMaxElementSize();
  }
  Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
                                           Type *ExpectedType) override {
    return Impl.getOrCreateResultFromMemIntrinsic(Inst, ExpectedType);
  }
  Type *getMemcpyLoopLoweringType(
      LLVMContext &Context, Value *Length, unsigned SrcAddrSpace,
      unsigned DestAddrSpace, unsigned SrcAlign, unsigned DestAlign,
      std::optional<uint32_t> AtomicElementSize) const override {
    return Impl.getMemcpyLoopLoweringType(Context, Length, SrcAddrSpace,
                                          DestAddrSpace, SrcAlign, DestAlign,
                                          AtomicElementSize);
  }
  void getMemcpyLoopResidualLoweringType(
      SmallVectorImpl<Type *> &OpsOut, LLVMContext &Context,
      unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace,
      unsigned SrcAlign, unsigned DestAlign,
      std::optional<uint32_t> AtomicCpySize) const override {
    Impl.getMemcpyLoopResidualLoweringType(OpsOut, Context, RemainingBytes,
                                           SrcAddrSpace, DestAddrSpace,
                                           SrcAlign, DestAlign, AtomicCpySize);
  }
  bool areInlineCompatible(const Function *Caller,
                           const Function *Callee) const override {
    return Impl.areInlineCompatible(Caller, Callee);
  }
  bool areTypesABICompatible(const Function *Caller, const Function *Callee,
                             const ArrayRef<Type *> &Types) const override {
    return Impl.areTypesABICompatible(Caller, Callee, Types);
  }
  bool isIndexedLoadLegal(MemIndexedMode Mode, Type *Ty) const override {
    return Impl.isIndexedLoadLegal(Mode, Ty, getDataLayout());
  }
  bool isIndexedStoreLegal(MemIndexedMode Mode, Type *Ty) const override {
    return Impl.isIndexedStoreLegal(Mode, Ty, getDataLayout());
  }
  unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const override {
    return Impl.getLoadStoreVecRegBitWidth(AddrSpace);
  }
  bool isLegalToVectorizeLoad(LoadInst *LI) const override {
    return Impl.isLegalToVectorizeLoad(LI);
  }
  bool isLegalToVectorizeStore(StoreInst *SI) const override {
    return Impl.isLegalToVectorizeStore(SI);
  }
  bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes, Align Alignment,
                                   unsigned AddrSpace) const override {
    return Impl.isLegalToVectorizeLoadChain(ChainSizeInBytes, Alignment,
                                            AddrSpace);
  }
  bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes, Align Alignment,
                                    unsigned AddrSpace) const override {
    return Impl.isLegalToVectorizeStoreChain(ChainSizeInBytes, Alignment,
                                             AddrSpace);
  }
  bool isLegalToVectorizeReduction(const RecurrenceDescriptor &RdxDesc,
                                   ElementCount VF) const override {
    return Impl.isLegalToVectorizeReduction(RdxDesc, VF);
  }
  bool isElementTypeLegalForScalableVector(Type *Ty) const override {
    return Impl.isElementTypeLegalForScalableVector(Ty);
  }
  unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize,
                               unsigned ChainSizeInBytes,
                               VectorType *VecTy) const override {
    return Impl.getLoadVectorFactor(VF, LoadSize, ChainSizeInBytes, VecTy);
  }
  unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize,
                                unsigned ChainSizeInBytes,
                                VectorType *VecTy) const override {
    return Impl.getStoreVectorFactor(VF, StoreSize, ChainSizeInBytes, VecTy);
  }
  bool preferInLoopReduction(unsigned Opcode, Type *Ty,
                             ReductionFlags Flags) const override {
    return Impl.preferInLoopReduction(Opcode, Ty, Flags);
  }
  bool preferPredicatedReductionSelect(unsigned Opcode, Type *Ty,
                                       ReductionFlags Flags) const override {
    return Impl.preferPredicatedReductionSelect(Opcode, Ty, Flags);
  }
  bool preferEpilogueVectorization() const override {
    return Impl.preferEpilogueVectorization();
  }

  bool shouldExpandReduction(const IntrinsicInst *II) const override {
    return Impl.shouldExpandReduction(II);
  }

  unsigned getGISelRematGlobalCost() const override {
    return Impl.getGISelRematGlobalCost();
  }

  unsigned getMinTripCountTailFoldingThreshold() const override {
    return Impl.getMinTripCountTailFoldingThreshold();
  }

  bool supportsScalableVectors() const override {
    return Impl.supportsScalableVectors();
  }

  bool enableScalableVectorization() const override {
    return Impl.enableScalableVectorization();
  }

  bool hasActiveVectorLength(unsigned Opcode, Type *DataType,
                             Align Alignment) const override {
    return Impl.hasActiveVectorLength(Opcode, DataType, Alignment);
  }

  VPLegalization
  getVPLegalizationStrategy(const VPIntrinsic &PI) const override {
    return Impl.getVPLegalizationStrategy(PI);
  }

  bool hasArmWideBranch(bool Thumb) const override {
    return Impl.hasArmWideBranch(Thumb);
  }

  unsigned getMaxNumArgs() const override {
    return Impl.getMaxNumArgs();
  }
};

template <typename T>
TargetTransformInfo::TargetTransformInfo(T Impl)
    : TTIImpl(new Model<T>(Impl)) {}

/// Analysis pass providing the \c TargetTransformInfo.
///
/// The core idea of the TargetIRAnalysis is to expose an interface through
/// which LLVM targets can analyze and provide information about the middle
/// end's target-independent IR. This supports use cases such as target-aware
/// cost modeling of IR constructs.
///
/// This is a function analysis because much of the cost modeling for targets
/// is done in a subtarget specific way and LLVM supports compiling different
/// functions targeting different subtargets in order to support runtime
/// dispatch according to the observed subtarget.
class TargetIRAnalysis : public AnalysisInfoMixin<TargetIRAnalysis> {
public:
  typedef TargetTransformInfo Result;

  /// Default construct a target IR analysis.
  ///
  /// This will use the module's datalayout to construct a baseline
  /// conservative TTI result.
  TargetIRAnalysis();

  /// Construct an IR analysis pass around a target-provide callback.
  ///
  /// The callback will be called with a particular function for which the TTI
  /// is needed and must return a TTI object for that function.
  TargetIRAnalysis(std::function<Result(const Function &)> TTICallback);

  // Value semantics. We spell out the constructors for MSVC.
  TargetIRAnalysis(const TargetIRAnalysis &Arg)
      : TTICallback(Arg.TTICallback) {}
  TargetIRAnalysis(TargetIRAnalysis &&Arg)
      : TTICallback(std::move(Arg.TTICallback)) {}
  TargetIRAnalysis &operator=(const TargetIRAnalysis &RHS) {
    TTICallback = RHS.TTICallback;
    return *this;
  }
  TargetIRAnalysis &operator=(TargetIRAnalysis &&RHS) {
    TTICallback = std::move(RHS.TTICallback);
    return *this;
  }

  Result run(const Function &F, FunctionAnalysisManager &);

private:
  friend AnalysisInfoMixin<TargetIRAnalysis>;
  static AnalysisKey Key;

  /// The callback used to produce a result.
  ///
  /// We use a completely opaque callback so that targets can provide whatever
  /// mechanism they desire for constructing the TTI for a given function.
  ///
  /// FIXME: Should we really use std::function? It's relatively inefficient.
  /// It might be possible to arrange for even stateful callbacks to outlive
  /// the analysis and thus use a function_ref which would be lighter weight.
  /// This may also be less error prone as the callback is likely to reference
  /// the external TargetMachine, and that reference needs to never dangle.
  std::function<Result(const Function &)> TTICallback;

  /// Helper function used as the callback in the default constructor.
  static Result getDefaultTTI(const Function &F);
};

/// Wrapper pass for TargetTransformInfo.
///
/// This pass can be constructed from a TTI object which it stores internally
/// and is queried by passes.
class TargetTransformInfoWrapperPass : public ImmutablePass {
  TargetIRAnalysis TIRA;
  std::optional<TargetTransformInfo> TTI;

  virtual void anchor();

public:
  static char ID;

  /// We must provide a default constructor for the pass but it should
  /// never be used.
  ///
  /// Use the constructor below or call one of the creation routines.
  TargetTransformInfoWrapperPass();

  explicit TargetTransformInfoWrapperPass(TargetIRAnalysis TIRA);

  TargetTransformInfo &getTTI(const Function &F);
};

/// Create an analysis pass wrapper around a TTI object.
///
/// This analysis pass just holds the TTI instance and makes it available to
/// clients.
ImmutablePass *createTargetTransformInfoWrapperPass(TargetIRAnalysis TIRA);

} // namespace llvm

#endif
PKiwFZ��SggAnalysis/ConstraintSystem.hnu�[���//===- ConstraintSystem.h -  A system of linear constraints. --------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_CONSTRAINTSYSTEM_H
#define LLVM_ANALYSIS_CONSTRAINTSYSTEM_H

#include "llvm/ADT/APInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/MathExtras.h"

#include <string>

namespace llvm {

class Value;
class ConstraintSystem {
  struct Entry {
    int64_t Coefficient;
    uint16_t Id;

    Entry(int64_t Coefficient, uint16_t Id)
        : Coefficient(Coefficient), Id(Id) {}
  };

  static int64_t getConstPart(const Entry &E) {
    if (E.Id == 0)
      return E.Coefficient;
    return 0;
  }

  static int64_t getLastCoefficient(ArrayRef<Entry> Row, uint16_t Id) {
    if (Row.empty())
      return 0;
    if (Row.back().Id == Id)
      return Row.back().Coefficient;
    return 0;
  }

  size_t NumVariables = 0;

  /// Current linear constraints in the system.
  /// An entry of the form c0, c1, ... cn represents the following constraint:
  ///   c0 >= v0 * c1 + .... + v{n-1} * cn
  SmallVector<SmallVector<Entry, 8>, 4> Constraints;

  /// A map of variables (IR values) to their corresponding index in the
  /// constraint system.
  DenseMap<Value *, unsigned> Value2Index;

  /// Current greatest common divisor for all coefficients in the system.
  uint32_t GCD = 1;

  // Eliminate constraints from the system using Fourier–Motzkin elimination.
  bool eliminateUsingFM();

  /// Returns true if there may be a solution for the constraints in the system.
  bool mayHaveSolutionImpl();

  /// Get list of variable names from the Value2Index map.
  SmallVector<std::string> getVarNamesList() const;

public:
  ConstraintSystem() {}
  ConstraintSystem(ArrayRef<Value *> FunctionArgs) {
    NumVariables += FunctionArgs.size();
    for (auto *Arg : FunctionArgs) {
      Value2Index.insert({Arg, Value2Index.size() + 1});
    }
  }
  ConstraintSystem(const DenseMap<Value *, unsigned> &Value2Index)
      : NumVariables(Value2Index.size()), Value2Index(Value2Index) {}

  bool addVariableRow(ArrayRef<int64_t> R) {
    assert(Constraints.empty() || R.size() == NumVariables);
    // If all variable coefficients are 0, the constraint does not provide any
    // usable information.
    if (all_of(ArrayRef(R).drop_front(1), [](int64_t C) { return C == 0; }))
      return false;

    SmallVector<Entry, 4> NewRow;
    for (const auto &[Idx, C] : enumerate(R)) {
      if (C == 0)
        continue;
      auto A = std::abs(C);
      GCD = APIntOps::GreatestCommonDivisor({32, (uint32_t)A}, {32, GCD})
                .getZExtValue();

      NewRow.emplace_back(C, Idx);
    }
    if (Constraints.empty())
      NumVariables = R.size();
    Constraints.push_back(std::move(NewRow));
    return true;
  }

  DenseMap<Value *, unsigned> &getValue2Index() { return Value2Index; }
  const DenseMap<Value *, unsigned> &getValue2Index() const {
    return Value2Index;
  }

  bool addVariableRowFill(ArrayRef<int64_t> R) {
    // If all variable coefficients are 0, the constraint does not provide any
    // usable information.
    if (all_of(ArrayRef(R).drop_front(1), [](int64_t C) { return C == 0; }))
      return false;

    NumVariables = std::max(R.size(), NumVariables);
    return addVariableRow(R);
  }

  /// Returns true if there may be a solution for the constraints in the system.
  bool mayHaveSolution();

  static SmallVector<int64_t, 8> negate(SmallVector<int64_t, 8> R) {
    // The negated constraint R is obtained by multiplying by -1 and adding 1 to
    // the constant.
    R[0] += 1;
    return negateOrEqual(R);
  }

  /// Multiplies each coefficient in the given vector by -1. Does not modify the
  /// original vector.
  ///
  /// \param R The vector of coefficients to be negated.
  static SmallVector<int64_t, 8> negateOrEqual(SmallVector<int64_t, 8> R) {
    // The negated constraint R is obtained by multiplying by -1.
    for (auto &C : R)
      if (MulOverflow(C, int64_t(-1), C))
        return {};
    return R;
  }

  /// Converts the given vector to form a strict less than inequality. Does not
  /// modify the original vector.
  ///
  /// \param R The vector of coefficients to be converted.
  static SmallVector<int64_t, 8> toStrictLessThan(SmallVector<int64_t, 8> R) {
    // The strict less than is obtained by subtracting 1 from the constant.
    if (SubOverflow(R[0], int64_t(1), R[0])) {
      return {};
    }
    return R;
  }

  bool isConditionImplied(SmallVector<int64_t, 8> R) const;

  SmallVector<int64_t> getLastConstraint() const {
    assert(!Constraints.empty() && "Constraint system is empty");
    SmallVector<int64_t> Result(NumVariables, 0);
    for (auto &Entry : Constraints.back())
      Result[Entry.Id] = Entry.Coefficient;
    return Result;
  }

  void popLastConstraint() { Constraints.pop_back(); }
  void popLastNVariables(unsigned N) {
    assert(NumVariables > N);
    NumVariables -= N;
  }

  /// Returns the number of rows in the constraint system.
  unsigned size() const { return Constraints.size(); }

  /// Print the constraints in the system.
  void dump() const;
};
} // namespace llvm

#endif // LLVM_ANALYSIS_CONSTRAINTSYSTEM_H
PKiwFZ�,�:;=;=Analysis/ProfileSummaryInfo.hnu�[���//===- llvm/Analysis/ProfileSummaryInfo.h - profile summary ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains a pass that provides access to profile summary
// information.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_PROFILESUMMARYINFO_H
#define LLVM_ANALYSIS_PROFILESUMMARYINFO_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/Analysis/BlockFrequencyInfo.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/PassManager.h"
#include "llvm/IR/ProfileSummary.h"
#include "llvm/Pass.h"
#include <memory>
#include <optional>

namespace llvm {
class BasicBlock;
class CallBase;
class MachineFunction;

/// Analysis providing profile information.
///
/// This is an immutable analysis pass that provides ability to query global
/// (program-level) profile information. The main APIs are isHotCount and
/// isColdCount that tells whether a given profile count is considered hot/cold
/// based on the profile summary. This also provides convenience methods to
/// check whether a function is hot or cold.

// FIXME: Provide convenience methods to determine hotness/coldness of other IR
// units. This would require making this depend on BFI.
class ProfileSummaryInfo {
private:
  const Module *M;
  std::unique_ptr<ProfileSummary> Summary;
  void computeThresholds();
  // Count thresholds to answer isHotCount and isColdCount queries.
  std::optional<uint64_t> HotCountThreshold, ColdCountThreshold;
  // True if the working set size of the code is considered huge,
  // because the number of profile counts required to reach the hot
  // percentile is above a huge threshold.
  std::optional<bool> HasHugeWorkingSetSize;
  // True if the working set size of the code is considered large,
  // because the number of profile counts required to reach the hot
  // percentile is above a large threshold.
  std::optional<bool> HasLargeWorkingSetSize;
  // Compute the threshold for a given cutoff.
  std::optional<uint64_t> computeThreshold(int PercentileCutoff) const;
  // The map that caches the threshold values. The keys are the percentile
  // cutoff values and the values are the corresponding threshold values.
  mutable DenseMap<int, uint64_t> ThresholdCache;

public:
  ProfileSummaryInfo(const Module &M) : M(&M) { refresh(); }
  ProfileSummaryInfo(ProfileSummaryInfo &&Arg) = default;

  /// If no summary is present, attempt to refresh.
  void refresh();

  /// Returns true if profile summary is available.
  bool hasProfileSummary() const { return Summary != nullptr; }

  /// Returns true if module \c M has sample profile.
  bool hasSampleProfile() const {
    return hasProfileSummary() &&
           Summary->getKind() == ProfileSummary::PSK_Sample;
  }

  /// Returns true if module \c M has instrumentation profile.
  bool hasInstrumentationProfile() const {
    return hasProfileSummary() &&
           Summary->getKind() == ProfileSummary::PSK_Instr;
  }

  /// Returns true if module \c M has context sensitive instrumentation profile.
  bool hasCSInstrumentationProfile() const {
    return hasProfileSummary() &&
           Summary->getKind() == ProfileSummary::PSK_CSInstr;
  }

  /// Handle the invalidation of this information.
  ///
  /// When used as a result of \c ProfileSummaryAnalysis this method will be
  /// called when the module this was computed for changes. Since profile
  /// summary is immutable after it is annotated on the module, we return false
  /// here.
  bool invalidate(Module &, const PreservedAnalyses &,
                  ModuleAnalysisManager::Invalidator &) {
    return false;
  }

  /// Returns the profile count for \p CallInst.
  std::optional<uint64_t> getProfileCount(const CallBase &CallInst,
                                          BlockFrequencyInfo *BFI,
                                          bool AllowSynthetic = false) const;
  /// Returns true if module \c M has partial-profile sample profile.
  bool hasPartialSampleProfile() const;
  /// Returns true if the working set size of the code is considered huge.
  bool hasHugeWorkingSetSize() const;
  /// Returns true if the working set size of the code is considered large.
  bool hasLargeWorkingSetSize() const;
  /// Returns true if \p F has hot function entry. If it returns false, it
  /// either means it is not hot or it is unknown whether it is hot or not (for
  /// example, no profile data is available).
  template <typename FuncT> bool isFunctionEntryHot(const FuncT *F) const {
    if (!F || !hasProfileSummary())
      return false;
    std::optional<Function::ProfileCount> FunctionCount = getEntryCount(F);
    // FIXME: The heuristic used below for determining hotness is based on
    // preliminary SPEC tuning for inliner. This will eventually be a
    // convenience method that calls isHotCount.
    return FunctionCount && isHotCount(FunctionCount->getCount());
  }

  /// Returns true if \p F contains hot code.
  template <typename FuncT, typename BFIT>
  bool isFunctionHotInCallGraph(const FuncT *F, BFIT &BFI) const {
    if (!F || !hasProfileSummary())
      return false;
    if (auto FunctionCount = getEntryCount(F))
      if (isHotCount(FunctionCount->getCount()))
        return true;

    if (auto TotalCallCount = getTotalCallCount(F))
      if (isHotCount(*TotalCallCount))
        return true;

    for (const auto &BB : *F)
      if (isHotBlock(&BB, &BFI))
        return true;
    return false;
  }
  /// Returns true if \p F has cold function entry.
  bool isFunctionEntryCold(const Function *F) const;
  /// Returns true if \p F contains only cold code.
  template <typename FuncT, typename BFIT>
  bool isFunctionColdInCallGraph(const FuncT *F, BFIT &BFI) const {
    if (!F || !hasProfileSummary())
      return false;
    if (auto FunctionCount = getEntryCount(F))
      if (!isColdCount(FunctionCount->getCount()))
        return false;

    if (auto TotalCallCount = getTotalCallCount(F))
      if (!isColdCount(*TotalCallCount))
        return false;

    for (const auto &BB : *F)
      if (!isColdBlock(&BB, &BFI))
        return false;
    return true;
  }
  /// Returns true if the hotness of \p F is unknown.
  bool isFunctionHotnessUnknown(const Function &F) const;
  /// Returns true if \p F contains hot code with regard to a given hot
  /// percentile cutoff value.
  template <typename FuncT, typename BFIT>
  bool isFunctionHotInCallGraphNthPercentile(int PercentileCutoff,
                                             const FuncT *F, BFIT &BFI) const {
    return isFunctionHotOrColdInCallGraphNthPercentile<true, FuncT, BFIT>(
        PercentileCutoff, F, BFI);
  }
  /// Returns true if \p F contains cold code with regard to a given cold
  /// percentile cutoff value.
  template <typename FuncT, typename BFIT>
  bool isFunctionColdInCallGraphNthPercentile(int PercentileCutoff,
                                              const FuncT *F, BFIT &BFI) const {
    return isFunctionHotOrColdInCallGraphNthPercentile<false, FuncT, BFIT>(
        PercentileCutoff, F, BFI);
  }
  /// Returns true if count \p C is considered hot.
  bool isHotCount(uint64_t C) const;
  /// Returns true if count \p C is considered cold.
  bool isColdCount(uint64_t C) const;
  /// Returns true if count \p C is considered hot with regard to a given
  /// hot percentile cutoff value.
  /// PercentileCutoff is encoded as a 6 digit decimal fixed point number, where
  /// the first two digits are the whole part. E.g. 995000 for 99.5 percentile.
  bool isHotCountNthPercentile(int PercentileCutoff, uint64_t C) const;
  /// Returns true if count \p C is considered cold with regard to a given
  /// cold percentile cutoff value.
  /// PercentileCutoff is encoded as a 6 digit decimal fixed point number, where
  /// the first two digits are the whole part. E.g. 995000 for 99.5 percentile.
  bool isColdCountNthPercentile(int PercentileCutoff, uint64_t C) const;

  /// Returns true if BasicBlock \p BB is considered hot.
  template <typename BBType, typename BFIT>
  bool isHotBlock(const BBType *BB, BFIT *BFI) const {
    auto Count = BFI->getBlockProfileCount(BB);
    return Count && isHotCount(*Count);
  }

  /// Returns true if BasicBlock \p BB is considered cold.
  template <typename BBType, typename BFIT>
  bool isColdBlock(const BBType *BB, BFIT *BFI) const {
    auto Count = BFI->getBlockProfileCount(BB);
    return Count && isColdCount(*Count);
  }

  template <typename BFIT>
  bool isColdBlock(BlockFrequency BlockFreq, const BFIT *BFI) const {
    auto Count = BFI->getProfileCountFromFreq(BlockFreq.getFrequency());
    return Count && isColdCount(*Count);
  }

  template <typename BBType, typename BFIT>
  bool isHotBlockNthPercentile(int PercentileCutoff, const BBType *BB,
                               BFIT *BFI) const {
    return isHotOrColdBlockNthPercentile<true, BBType, BFIT>(PercentileCutoff,
                                                             BB, BFI);
  }

  template <typename BFIT>
  bool isHotBlockNthPercentile(int PercentileCutoff, BlockFrequency BlockFreq,
                               BFIT *BFI) const {
    return isHotOrColdBlockNthPercentile<true, BFIT>(PercentileCutoff,
                                                     BlockFreq, BFI);
  }

  /// Returns true if BasicBlock \p BB is considered cold with regard to a given
  /// cold percentile cutoff value.
  /// PercentileCutoff is encoded as a 6 digit decimal fixed point number, where
  /// the first two digits are the whole part. E.g. 995000 for 99.5 percentile.
  template <typename BBType, typename BFIT>
  bool isColdBlockNthPercentile(int PercentileCutoff, const BBType *BB,
                                BFIT *BFI) const {
    return isHotOrColdBlockNthPercentile<false, BBType, BFIT>(PercentileCutoff,
                                                              BB, BFI);
  }
  template <typename BFIT>
  bool isColdBlockNthPercentile(int PercentileCutoff, BlockFrequency BlockFreq,
                                BFIT *BFI) const {
    return isHotOrColdBlockNthPercentile<false, BFIT>(PercentileCutoff,
                                                      BlockFreq, BFI);
  }
  /// Returns true if the call site \p CB is considered hot.
  bool isHotCallSite(const CallBase &CB, BlockFrequencyInfo *BFI) const;
  /// Returns true if call site \p CB is considered cold.
  bool isColdCallSite(const CallBase &CB, BlockFrequencyInfo *BFI) const;
  /// Returns HotCountThreshold if set. Recompute HotCountThreshold
  /// if not set.
  uint64_t getOrCompHotCountThreshold() const;
  /// Returns ColdCountThreshold if set. Recompute HotCountThreshold
  /// if not set.
  uint64_t getOrCompColdCountThreshold() const;
  /// Returns HotCountThreshold if set.
  uint64_t getHotCountThreshold() const {
    return HotCountThreshold.value_or(0);
  }
  /// Returns ColdCountThreshold if set.
  uint64_t getColdCountThreshold() const {
    return ColdCountThreshold.value_or(0);
  }

private:
  template <typename FuncT>
  std::optional<uint64_t> getTotalCallCount(const FuncT *F) const {
    return std::nullopt;
  }

  template <bool isHot, typename FuncT, typename BFIT>
  bool isFunctionHotOrColdInCallGraphNthPercentile(int PercentileCutoff,
                                                   const FuncT *F,
                                                   BFIT &FI) const {
    if (!F || !hasProfileSummary())
      return false;
    if (auto FunctionCount = getEntryCount(F)) {
      if (isHot &&
          isHotCountNthPercentile(PercentileCutoff, FunctionCount->getCount()))
        return true;
      if (!isHot && !isColdCountNthPercentile(PercentileCutoff,
                                              FunctionCount->getCount()))
        return false;
    }
    if (auto TotalCallCount = getTotalCallCount(F)) {
      if (isHot && isHotCountNthPercentile(PercentileCutoff, *TotalCallCount))
        return true;
      if (!isHot &&
          !isColdCountNthPercentile(PercentileCutoff, *TotalCallCount))
        return false;
    }
    for (const auto &BB : *F) {
      if (isHot && isHotBlockNthPercentile(PercentileCutoff, &BB, &FI))
        return true;
      if (!isHot && !isColdBlockNthPercentile(PercentileCutoff, &BB, &FI))
        return false;
    }
    return !isHot;
  }

  template <bool isHot>
  bool isHotOrColdCountNthPercentile(int PercentileCutoff, uint64_t C) const;

  template <bool isHot, typename BBType, typename BFIT>
  bool isHotOrColdBlockNthPercentile(int PercentileCutoff, const BBType *BB,
                                     BFIT *BFI) const {
    auto Count = BFI->getBlockProfileCount(BB);
    if (isHot)
      return Count && isHotCountNthPercentile(PercentileCutoff, *Count);
    else
      return Count && isColdCountNthPercentile(PercentileCutoff, *Count);
  }

  template <bool isHot, typename BFIT>
  bool isHotOrColdBlockNthPercentile(int PercentileCutoff,
                                     BlockFrequency BlockFreq,
                                     BFIT *BFI) const {
    auto Count = BFI->getProfileCountFromFreq(BlockFreq.getFrequency());
    if (isHot)
      return Count && isHotCountNthPercentile(PercentileCutoff, *Count);
    else
      return Count && isColdCountNthPercentile(PercentileCutoff, *Count);
  }

  template <typename FuncT>
  std::optional<Function::ProfileCount> getEntryCount(const FuncT *F) const {
    return F->getEntryCount();
  }
};

template <>
inline std::optional<uint64_t>
ProfileSummaryInfo::getTotalCallCount<Function>(const Function *F) const {
  if (!hasSampleProfile())
    return std::nullopt;
  uint64_t TotalCallCount = 0;
  for (const auto &BB : *F)
    for (const auto &I : BB)
      if (isa<CallInst>(I) || isa<InvokeInst>(I))
        if (auto CallCount = getProfileCount(cast<CallBase>(I), nullptr))
          TotalCallCount += *CallCount;
  return TotalCallCount;
}

// Declare template specialization for llvm::MachineFunction. Do not implement
// here, because we cannot include MachineFunction header here, that would break
// dependency rules.
template <>
std::optional<Function::ProfileCount>
ProfileSummaryInfo::getEntryCount<MachineFunction>(
    const MachineFunction *F) const;

/// An analysis pass based on legacy pass manager to deliver ProfileSummaryInfo.
class ProfileSummaryInfoWrapperPass : public ImmutablePass {
  std::unique_ptr<ProfileSummaryInfo> PSI;

public:
  static char ID;
  ProfileSummaryInfoWrapperPass();

  ProfileSummaryInfo &getPSI() { return *PSI; }
  const ProfileSummaryInfo &getPSI() const { return *PSI; }

  bool doInitialization(Module &M) override;
  bool doFinalization(Module &M) override;
  void getAnalysisUsage(AnalysisUsage &AU) const override {
    AU.setPreservesAll();
  }
};

/// An analysis pass based on the new PM to deliver ProfileSummaryInfo.
class ProfileSummaryAnalysis
    : public AnalysisInfoMixin<ProfileSummaryAnalysis> {
public:
  typedef ProfileSummaryInfo Result;

  Result run(Module &M, ModuleAnalysisManager &);

private:
  friend AnalysisInfoMixin<ProfileSummaryAnalysis>;
  static AnalysisKey Key;
};

/// Printer pass that uses \c ProfileSummaryAnalysis.
class ProfileSummaryPrinterPass
    : public PassInfoMixin<ProfileSummaryPrinterPass> {
  raw_ostream &OS;

public:
  explicit ProfileSummaryPrinterPass(raw_ostream &OS) : OS(OS) {}
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
};

} // end namespace llvm

#endif
PKiwFZ�C����Analysis/RegionInfo.hnu�[���//===- RegionInfo.h - SESE region analysis ----------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Calculate a program structure tree built out of single entry single exit
// regions.
// The basic ideas are taken from "The Program Structure Tree - Richard Johnson,
// David Pearson, Keshav Pingali - 1994", however enriched with ideas from "The
// Refined Process Structure Tree - Jussi Vanhatalo, Hagen Voelyer, Jana
// Koehler - 2009".
// The algorithm to calculate these data structures however is completely
// different, as it takes advantage of existing information already available
// in (Post)dominace tree and dominance frontier passes. This leads to a simpler
// and in practice hopefully better performing algorithm. The runtime of the
// algorithms described in the papers above are both linear in graph size,
// O(V+E), whereas this algorithm is not, as the dominance frontier information
// itself is not, but in practice runtime seems to be in the order of magnitude
// of dominance tree calculation.
//
// WARNING: LLVM is generally very concerned about compile time such that
//          the use of additional analysis passes in the default
//          optimization sequence is avoided as much as possible.
//          Specifically, if you do not need the RegionInfo, but dominance
//          information could be sufficient please base your work only on
//          the dominator tree. Most passes maintain it, such that using
//          it has often near zero cost. In contrast RegionInfo is by
//          default not available, is not maintained by existing
//          transformations and there is no intention to do so.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_REGIONINFO_H
#define LLVM_ANALYSIS_REGIONINFO_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/GraphTraits.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Pass.h"
#include <algorithm>
#include <cassert>
#include <map>
#include <memory>
#include <set>
#include <string>
#include <type_traits>
#include <vector>

namespace llvm {

class BasicBlock;
class DominanceFrontier;
class Loop;
class LoopInfo;
class PostDominatorTree;
class Region;
template <class RegionTr> class RegionBase;
class RegionInfo;
template <class RegionTr> class RegionInfoBase;
class RegionNode;
class raw_ostream;

// Class to be specialized for different users of RegionInfo
// (i.e. BasicBlocks or MachineBasicBlocks). This is only to avoid needing to
// pass around an unreasonable number of template parameters.
template <class FuncT_>
struct RegionTraits {
  // FuncT
  // BlockT
  // RegionT
  // RegionNodeT
  // RegionInfoT
  using BrokenT = typename FuncT_::UnknownRegionTypeError;
};

template <>
struct RegionTraits<Function> {
  using FuncT = Function;
  using BlockT = BasicBlock;
  using RegionT = Region;
  using RegionNodeT = RegionNode;
  using RegionInfoT = RegionInfo;
  using DomTreeT = DominatorTree;
  using DomTreeNodeT = DomTreeNode;
  using DomFrontierT = DominanceFrontier;
  using PostDomTreeT = PostDominatorTree;
  using InstT = Instruction;
  using LoopT = Loop;
  using LoopInfoT = LoopInfo;

  static unsigned getNumSuccessors(BasicBlock *BB) {
    return BB->getTerminator()->getNumSuccessors();
  }
};

/// Marker class to iterate over the elements of a Region in flat mode.
///
/// The class is used to either iterate in Flat mode or by not using it to not
/// iterate in Flat mode.  During a Flat mode iteration all Regions are entered
/// and the iteration returns every BasicBlock.  If the Flat mode is not
/// selected for SubRegions just one RegionNode containing the subregion is
/// returned.
template <class GraphType>
class FlatIt {};

/// A RegionNode represents a subregion or a BasicBlock that is part of a
/// Region.
template <class Tr>
class RegionNodeBase {
  friend class RegionBase<Tr>;

public:
  using BlockT = typename Tr::BlockT;
  using RegionT = typename Tr::RegionT;

private:
  /// This is the entry basic block that starts this region node.  If this is a
  /// BasicBlock RegionNode, then entry is just the basic block, that this
  /// RegionNode represents.  Otherwise it is the entry of this (Sub)RegionNode.
  ///
  /// In the BBtoRegionNode map of the parent of this node, BB will always map
  /// to this node no matter which kind of node this one is.
  ///
  /// The node can hold either a Region or a BasicBlock.
  /// Use one bit to save, if this RegionNode is a subregion or BasicBlock
  /// RegionNode.
  PointerIntPair<BlockT *, 1, bool> entry;

  /// The parent Region of this RegionNode.
  /// @see getParent()
  RegionT *parent;

protected:
  /// Create a RegionNode.
  ///
  /// @param Parent      The parent of this RegionNode.
  /// @param Entry       The entry BasicBlock of the RegionNode.  If this
  ///                    RegionNode represents a BasicBlock, this is the
  ///                    BasicBlock itself.  If it represents a subregion, this
  ///                    is the entry BasicBlock of the subregion.
  /// @param isSubRegion If this RegionNode represents a SubRegion.
  inline RegionNodeBase(RegionT *Parent, BlockT *Entry,
                        bool isSubRegion = false)
      : entry(Entry, isSubRegion), parent(Parent) {}

public:
  RegionNodeBase(const RegionNodeBase &) = delete;
  RegionNodeBase &operator=(const RegionNodeBase &) = delete;

  /// Get the parent Region of this RegionNode.
  ///
  /// The parent Region is the Region this RegionNode belongs to. If for
  /// example a BasicBlock is element of two Regions, there exist two
  /// RegionNodes for this BasicBlock. Each with the getParent() function
  /// pointing to the Region this RegionNode belongs to.
  ///
  /// @return Get the parent Region of this RegionNode.
  inline RegionT *getParent() const { return parent; }

  /// Get the entry BasicBlock of this RegionNode.
  ///
  /// If this RegionNode represents a BasicBlock this is just the BasicBlock
  /// itself, otherwise we return the entry BasicBlock of the Subregion
  ///
  /// @return The entry BasicBlock of this RegionNode.
  inline BlockT *getEntry() const { return entry.getPointer(); }

  /// Get the content of this RegionNode.
  ///
  /// This can be either a BasicBlock or a subregion. Before calling getNodeAs()
  /// check the type of the content with the isSubRegion() function call.
  ///
  /// @return The content of this RegionNode.
  template <class T> inline T *getNodeAs() const;

  /// Is this RegionNode a subregion?
  ///
  /// @return True if it contains a subregion. False if it contains a
  ///         BasicBlock.
  inline bool isSubRegion() const { return entry.getInt(); }
};

//===----------------------------------------------------------------------===//
/// A single entry single exit Region.
///
/// A Region is a connected subgraph of a control flow graph that has exactly
/// two connections to the remaining graph. It can be used to analyze or
/// optimize parts of the control flow graph.
///
/// A <em> simple Region </em> is connected to the remaining graph by just two
/// edges. One edge entering the Region and another one leaving the Region.
///
/// An <em> extended Region </em> (or just Region) is a subgraph that can be
/// transform into a simple Region. The transformation is done by adding
/// BasicBlocks that merge several entry or exit edges so that after the merge
/// just one entry and one exit edge exists.
///
/// The \e Entry of a Region is the first BasicBlock that is passed after
/// entering the Region. It is an element of the Region. The entry BasicBlock
/// dominates all BasicBlocks in the Region.
///
/// The \e Exit of a Region is the first BasicBlock that is passed after
/// leaving the Region. It is not an element of the Region. The exit BasicBlock,
/// postdominates all BasicBlocks in the Region.
///
/// A <em> canonical Region </em> cannot be constructed by combining smaller
/// Regions.
///
/// Region A is the \e parent of Region B, if B is completely contained in A.
///
/// Two canonical Regions either do not intersect at all or one is
/// the parent of the other.
///
/// The <em> Program Structure Tree</em> is a graph (V, E) where V is the set of
/// Regions in the control flow graph and E is the \e parent relation of these
/// Regions.
///
/// Example:
///
/// \verbatim
/// A simple control flow graph, that contains two regions.
///
///        1
///       / |
///      2   |
///     / \   3
///    4   5  |
///    |   |  |
///    6   7  8
///     \  | /
///      \ |/       Region A: 1 -> 9 {1,2,3,4,5,6,7,8}
///        9        Region B: 2 -> 9 {2,4,5,6,7}
/// \endverbatim
///
/// You can obtain more examples by either calling
///
/// <tt> "opt -passes='print<regions>' anyprogram.ll" </tt>
/// or
/// <tt> "opt -view-regions-only anyprogram.ll" </tt>
///
/// on any LLVM file you are interested in.
///
/// The first call returns a textual representation of the program structure
/// tree, the second one creates a graphical representation using graphviz.
template <class Tr>
class RegionBase : public RegionNodeBase<Tr> {
  friend class RegionInfoBase<Tr>;

  using FuncT = typename Tr::FuncT;
  using BlockT = typename Tr::BlockT;
  using RegionInfoT = typename Tr::RegionInfoT;
  using RegionT = typename Tr::RegionT;
  using RegionNodeT = typename Tr::RegionNodeT;
  using DomTreeT = typename Tr::DomTreeT;
  using LoopT = typename Tr::LoopT;
  using LoopInfoT = typename Tr::LoopInfoT;
  using InstT = typename Tr::InstT;

  using BlockTraits = GraphTraits<BlockT *>;
  using InvBlockTraits = GraphTraits<Inverse<BlockT *>>;
  using SuccIterTy = typename BlockTraits::ChildIteratorType;
  using PredIterTy = typename InvBlockTraits::ChildIteratorType;

  // Information necessary to manage this Region.
  RegionInfoT *RI;
  DomTreeT *DT;

  // The exit BasicBlock of this region.
  // (The entry BasicBlock is part of RegionNode)
  BlockT *exit;

  using RegionSet = std::vector<std::unique_ptr<RegionT>>;

  // The subregions of this region.
  RegionSet children;

  using BBNodeMapT = std::map<BlockT *, std::unique_ptr<RegionNodeT>>;

  // Save the BasicBlock RegionNodes that are element of this Region.
  mutable BBNodeMapT BBNodeMap;

  /// Check if a BB is in this Region. This check also works
  /// if the region is incorrectly built. (EXPENSIVE!)
  void verifyBBInRegion(BlockT *BB) const;

  /// Walk over all the BBs of the region starting from BB and
  /// verify that all reachable basic blocks are elements of the region.
  /// (EXPENSIVE!)
  void verifyWalk(BlockT *BB, std::set<BlockT *> *visitedBB) const;

  /// Verify if the region and its children are valid regions (EXPENSIVE!)
  void verifyRegionNest() const;

public:
  /// Create a new region.
  ///
  /// @param Entry  The entry basic block of the region.
  /// @param Exit   The exit basic block of the region.
  /// @param RI     The region info object that is managing this region.
  /// @param DT     The dominator tree of the current function.
  /// @param Parent The surrounding region or NULL if this is a top level
  ///               region.
  RegionBase(BlockT *Entry, BlockT *Exit, RegionInfoT *RI, DomTreeT *DT,
             RegionT *Parent = nullptr);

  RegionBase(const RegionBase &) = delete;
  RegionBase &operator=(const RegionBase &) = delete;

  /// Delete the Region and all its subregions.
  ~RegionBase();

  /// Get the entry BasicBlock of the Region.
  /// @return The entry BasicBlock of the region.
  BlockT *getEntry() const {
    return RegionNodeBase<Tr>::getEntry();
  }

  /// Replace the entry basic block of the region with the new basic
  ///        block.
  ///
  /// @param BB  The new entry basic block of the region.
  void replaceEntry(BlockT *BB);

  /// Replace the exit basic block of the region with the new basic
  ///        block.
  ///
  /// @param BB  The new exit basic block of the region.
  void replaceExit(BlockT *BB);

  /// Recursively replace the entry basic block of the region.
  ///
  /// This function replaces the entry basic block with a new basic block. It
  /// also updates all child regions that have the same entry basic block as
  /// this region.
  ///
  /// @param NewEntry The new entry basic block.
  void replaceEntryRecursive(BlockT *NewEntry);

  /// Recursively replace the exit basic block of the region.
  ///
  /// This function replaces the exit basic block with a new basic block. It
  /// also updates all child regions that have the same exit basic block as
  /// this region.
  ///
  /// @param NewExit The new exit basic block.
  void replaceExitRecursive(BlockT *NewExit);

  /// Get the exit BasicBlock of the Region.
  /// @return The exit BasicBlock of the Region, NULL if this is the TopLevel
  ///         Region.
  BlockT *getExit() const { return exit; }

  /// Get the parent of the Region.
  /// @return The parent of the Region or NULL if this is a top level
  ///         Region.
  RegionT *getParent() const {
    return RegionNodeBase<Tr>::getParent();
  }

  /// Get the RegionNode representing the current Region.
  /// @return The RegionNode representing the current Region.
  RegionNodeT *getNode() const {
    return const_cast<RegionNodeT *>(
        reinterpret_cast<const RegionNodeT *>(this));
  }

  /// Get the nesting level of this Region.
  ///
  /// An toplevel Region has depth 0.
  ///
  /// @return The depth of the region.
  unsigned getDepth() const;

  /// Check if a Region is the TopLevel region.
  ///
  /// The toplevel region represents the whole function.
  bool isTopLevelRegion() const { return exit == nullptr; }

  /// Return a new (non-canonical) region, that is obtained by joining
  ///        this region with its predecessors.
  ///
  /// @return A region also starting at getEntry(), but reaching to the next
  ///         basic block that forms with getEntry() a (non-canonical) region.
  ///         NULL if such a basic block does not exist.
  RegionT *getExpandedRegion() const;

  /// Return the first block of this region's single entry edge,
  ///        if existing.
  ///
  /// @return The BasicBlock starting this region's single entry edge,
  ///         else NULL.
  BlockT *getEnteringBlock() const;

  /// Return the first block of this region's single exit edge,
  ///        if existing.
  ///
  /// @return The BasicBlock starting this region's single exit edge,
  ///         else NULL.
  BlockT *getExitingBlock() const;

  /// Collect all blocks of this region's single exit edge, if existing.
  ///
  /// @return True if this region contains all the predecessors of the exit.
  bool getExitingBlocks(SmallVectorImpl<BlockT *> &Exitings) const;

  /// Is this a simple region?
  ///
  /// A region is simple if it has exactly one exit and one entry edge.
  ///
  /// @return True if the Region is simple.
  bool isSimple() const;

  /// Returns the name of the Region.
  /// @return The Name of the Region.
  std::string getNameStr() const;

  /// Return the RegionInfo object, that belongs to this Region.
  RegionInfoT *getRegionInfo() const { return RI; }

  /// PrintStyle - Print region in difference ways.
  enum PrintStyle { PrintNone, PrintBB, PrintRN };

  /// Print the region.
  ///
  /// @param OS The output stream the Region is printed to.
  /// @param printTree Print also the tree of subregions.
  /// @param level The indentation level used for printing.
  void print(raw_ostream &OS, bool printTree = true, unsigned level = 0,
             PrintStyle Style = PrintNone) const;

#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
  /// Print the region to stderr.
  void dump() const;
#endif

  /// Check if the region contains a BasicBlock.
  ///
  /// @param BB The BasicBlock that might be contained in this Region.
  /// @return True if the block is contained in the region otherwise false.
  bool contains(const BlockT *BB) const;

  /// Check if the region contains another region.
  ///
  /// @param SubRegion The region that might be contained in this Region.
  /// @return True if SubRegion is contained in the region otherwise false.
  bool contains(const RegionT *SubRegion) const {
    // Toplevel Region.
    if (!getExit())
      return true;

    return contains(SubRegion->getEntry()) &&
           (contains(SubRegion->getExit()) ||
            SubRegion->getExit() == getExit());
  }

  /// Check if the region contains an Instruction.
  ///
  /// @param Inst The Instruction that might be contained in this region.
  /// @return True if the Instruction is contained in the region otherwise
  /// false.
  bool contains(const InstT *Inst) const { return contains(Inst->getParent()); }

  /// Check if the region contains a loop.
  ///
  /// @param L The loop that might be contained in this region.
  /// @return True if the loop is contained in the region otherwise false.
  ///         In case a NULL pointer is passed to this function the result
  ///         is false, except for the region that describes the whole function.
  ///         In that case true is returned.
  bool contains(const LoopT *L) const;

  /// Get the outermost loop in the region that contains a loop.
  ///
  /// Find for a Loop L the outermost loop OuterL that is a parent loop of L
  /// and is itself contained in the region.
  ///
  /// @param L The loop the lookup is started.
  /// @return The outermost loop in the region, NULL if such a loop does not
  ///         exist or if the region describes the whole function.
  LoopT *outermostLoopInRegion(LoopT *L) const;

  /// Get the outermost loop in the region that contains a basic block.
  ///
  /// Find for a basic block BB the outermost loop L that contains BB and is
  /// itself contained in the region.
  ///
  /// @param LI A pointer to a LoopInfo analysis.
  /// @param BB The basic block surrounded by the loop.
  /// @return The outermost loop in the region, NULL if such a loop does not
  ///         exist or if the region describes the whole function.
  LoopT *outermostLoopInRegion(LoopInfoT *LI, BlockT *BB) const;

  /// Get the subregion that starts at a BasicBlock
  ///
  /// @param BB The BasicBlock the subregion should start.
  /// @return The Subregion if available, otherwise NULL.
  RegionT *getSubRegionNode(BlockT *BB) const;

  /// Get the RegionNode for a BasicBlock
  ///
  /// @param BB The BasicBlock at which the RegionNode should start.
  /// @return If available, the RegionNode that represents the subregion
  ///         starting at BB. If no subregion starts at BB, the RegionNode
  ///         representing BB.
  RegionNodeT *getNode(BlockT *BB) const;

  /// Get the BasicBlock RegionNode for a BasicBlock
  ///
  /// @param BB The BasicBlock for which the RegionNode is requested.
  /// @return The RegionNode representing the BB.
  RegionNodeT *getBBNode(BlockT *BB) const;

  /// Add a new subregion to this Region.
  ///
  /// @param SubRegion The new subregion that will be added.
  /// @param moveChildren Move the children of this region, that are also
  ///                     contained in SubRegion into SubRegion.
  void addSubRegion(RegionT *SubRegion, bool moveChildren = false);

  /// Remove a subregion from this Region.
  ///
  /// The subregion is not deleted, as it will probably be inserted into another
  /// region.
  /// @param SubRegion The SubRegion that will be removed.
  RegionT *removeSubRegion(RegionT *SubRegion);

  /// Move all direct child nodes of this Region to another Region.
  ///
  /// @param To The Region the child nodes will be transferred to.
  void transferChildrenTo(RegionT *To);

  /// Verify if the region is a correct region.
  ///
  /// Check if this is a correctly build Region. This is an expensive check, as
  /// the complete CFG of the Region will be walked.
  void verifyRegion() const;

  /// Clear the cache for BB RegionNodes.
  ///
  /// After calling this function the BasicBlock RegionNodes will be stored at
  /// different memory locations. RegionNodes obtained before this function is
  /// called are therefore not comparable to RegionNodes abtained afterwords.
  void clearNodeCache();

  /// @name Subregion Iterators
  ///
  /// These iterators iterator over all subregions of this Region.
  //@{
  using iterator = typename RegionSet::iterator;
  using const_iterator = typename RegionSet::const_iterator;

  iterator begin() { return children.begin(); }
  iterator end() { return children.end(); }

  const_iterator begin() const { return children.begin(); }
  const_iterator end() const { return children.end(); }
  //@}

  /// @name BasicBlock Iterators
  ///
  /// These iterators iterate over all BasicBlocks that are contained in this
  /// Region. The iterator also iterates over BasicBlocks that are elements of
  /// a subregion of this Region. It is therefore called a flat iterator.
  //@{
  template <bool IsConst>
  class block_iterator_wrapper
      : public df_iterator<
            std::conditional_t<IsConst, const BlockT, BlockT> *> {
    using super =
        df_iterator<std::conditional_t<IsConst, const BlockT, BlockT> *>;

  public:
    using Self = block_iterator_wrapper<IsConst>;
    using value_type = typename super::value_type;

    // Construct the begin iterator.
    block_iterator_wrapper(value_type Entry, value_type Exit)
        : super(df_begin(Entry)) {
      // Mark the exit of the region as visited, so that the children of the
      // exit and the exit itself, i.e. the block outside the region will never
      // be visited.
      super::Visited.insert(Exit);
    }

    // Construct the end iterator.
    block_iterator_wrapper() : super(df_end<value_type>((BlockT *)nullptr)) {}

    /*implicit*/ block_iterator_wrapper(super I) : super(I) {}

    // FIXME: Even a const_iterator returns a non-const BasicBlock pointer.
    //        This was introduced for backwards compatibility, but should
    //        be removed as soon as all users are fixed.
    BlockT *operator*() const {
      return const_cast<BlockT *>(super::operator*());
    }
  };

  using block_iterator = block_iterator_wrapper<false>;
  using const_block_iterator = block_iterator_wrapper<true>;

  block_iterator block_begin() { return block_iterator(getEntry(), getExit()); }

  block_iterator block_end() { return block_iterator(); }

  const_block_iterator block_begin() const {
    return const_block_iterator(getEntry(), getExit());
  }
  const_block_iterator block_end() const { return const_block_iterator(); }

  using block_range = iterator_range<block_iterator>;
  using const_block_range = iterator_range<const_block_iterator>;

  /// Returns a range view of the basic blocks in the region.
  inline block_range blocks() {
    return block_range(block_begin(), block_end());
  }

  /// Returns a range view of the basic blocks in the region.
  ///
  /// This is the 'const' version of the range view.
  inline const_block_range blocks() const {
    return const_block_range(block_begin(), block_end());
  }
  //@}

  /// @name Element Iterators
  ///
  /// These iterators iterate over all BasicBlock and subregion RegionNodes that
  /// are direct children of this Region. It does not iterate over any
  /// RegionNodes that are also element of a subregion of this Region.
  //@{
  using element_iterator =
      df_iterator<RegionNodeT *, df_iterator_default_set<RegionNodeT *>, false,
                  GraphTraits<RegionNodeT *>>;

  using const_element_iterator =
      df_iterator<const RegionNodeT *,
                  df_iterator_default_set<const RegionNodeT *>, false,
                  GraphTraits<const RegionNodeT *>>;

  element_iterator element_begin();
  element_iterator element_end();
  iterator_range<element_iterator> elements() {
    return make_range(element_begin(), element_end());
  }

  const_element_iterator element_begin() const;
  const_element_iterator element_end() const;
  iterator_range<const_element_iterator> elements() const {
    return make_range(element_begin(), element_end());
  }
  //@}
};

/// Print a RegionNode.
template <class Tr>
inline raw_ostream &operator<<(raw_ostream &OS, const RegionNodeBase<Tr> &Node);

//===----------------------------------------------------------------------===//
/// Analysis that detects all canonical Regions.
///
/// The RegionInfo pass detects all canonical regions in a function. The Regions
/// are connected using the parent relation. This builds a Program Structure
/// Tree.
template <class Tr>
class RegionInfoBase {
  friend class RegionInfo;
  friend class MachineRegionInfo;

  using BlockT = typename Tr::BlockT;
  using FuncT = typename Tr::FuncT;
  using RegionT = typename Tr::RegionT;
  using RegionInfoT = typename Tr::RegionInfoT;
  using DomTreeT = typename Tr::DomTreeT;
  using DomTreeNodeT = typename Tr::DomTreeNodeT;
  using PostDomTreeT = typename Tr::PostDomTreeT;
  using DomFrontierT = typename Tr::DomFrontierT;
  using BlockTraits = GraphTraits<BlockT *>;
  using InvBlockTraits = GraphTraits<Inverse<BlockT *>>;
  using SuccIterTy = typename BlockTraits::ChildIteratorType;
  using PredIterTy = typename InvBlockTraits::ChildIteratorType;

  using BBtoBBMap = DenseMap<BlockT *, BlockT *>;
  using BBtoRegionMap = DenseMap<BlockT *, RegionT *>;

  RegionInfoBase();

  RegionInfoBase(RegionInfoBase &&Arg)
    : DT(std::move(Arg.DT)), PDT(std::move(Arg.PDT)), DF(std::move(Arg.DF)),
      TopLevelRegion(std::move(Arg.TopLevelRegion)),
      BBtoRegion(std::move(Arg.BBtoRegion)) {
    Arg.wipe();
  }

  RegionInfoBase &operator=(RegionInfoBase &&RHS) {
    DT = std::move(RHS.DT);
    PDT = std::move(RHS.PDT);
    DF = std::move(RHS.DF);
    TopLevelRegion = std::move(RHS.TopLevelRegion);
    BBtoRegion = std::move(RHS.BBtoRegion);
    RHS.wipe();
    return *this;
  }

  virtual ~RegionInfoBase();

  DomTreeT *DT;
  PostDomTreeT *PDT;
  DomFrontierT *DF;

  /// The top level region.
  RegionT *TopLevelRegion = nullptr;

  /// Map every BB to the smallest region, that contains BB.
  BBtoRegionMap BBtoRegion;

protected:
  /// Update refences to a RegionInfoT held by the RegionT managed here
  ///
  /// This is a post-move helper. Regions hold references to the owning
  /// RegionInfo object. After a move these need to be fixed.
  template<typename TheRegionT>
  void updateRegionTree(RegionInfoT &RI, TheRegionT *R) {
    if (!R)
      return;
    R->RI = &RI;
    for (auto &SubR : *R)
      updateRegionTree(RI, SubR.get());
  }

private:
  /// Wipe this region tree's state without releasing any resources.
  ///
  /// This is essentially a post-move helper only. It leaves the object in an
  /// assignable and destroyable state, but otherwise invalid.
  void wipe() {
    DT = nullptr;
    PDT = nullptr;
    DF = nullptr;
    TopLevelRegion = nullptr;
    BBtoRegion.clear();
  }

  // Check whether the entries of BBtoRegion for the BBs of region
  // SR are correct. Triggers an assertion if not. Calls itself recursively for
  // subregions.
  void verifyBBMap(const RegionT *SR) const;

  // Returns true if BB is in the dominance frontier of
  // entry, because it was inherited from exit. In the other case there is an
  // edge going from entry to BB without passing exit.
  bool isCommonDomFrontier(BlockT *BB, BlockT *entry, BlockT *exit) const;

  // Check if entry and exit surround a valid region, based on
  // dominance tree and dominance frontier.
  bool isRegion(BlockT *entry, BlockT *exit) const;

  // Saves a shortcut pointing from entry to exit.
  // This function may extend this shortcut if possible.
  void insertShortCut(BlockT *entry, BlockT *exit, BBtoBBMap *ShortCut) const;

  // Returns the next BB that postdominates N, while skipping
  // all post dominators that cannot finish a canonical region.
  DomTreeNodeT *getNextPostDom(DomTreeNodeT *N, BBtoBBMap *ShortCut) const;

  // A region is trivial, if it contains only one BB.
  bool isTrivialRegion(BlockT *entry, BlockT *exit) const;

  // Creates a single entry single exit region.
  RegionT *createRegion(BlockT *entry, BlockT *exit);

  // Detect all regions starting with bb 'entry'.
  void findRegionsWithEntry(BlockT *entry, BBtoBBMap *ShortCut);

  // Detects regions in F.
  void scanForRegions(FuncT &F, BBtoBBMap *ShortCut);

  // Get the top most parent with the same entry block.
  RegionT *getTopMostParent(RegionT *region);

  // Build the region hierarchy after all region detected.
  void buildRegionsTree(DomTreeNodeT *N, RegionT *region);

  // Update statistic about created regions.
  virtual void updateStatistics(RegionT *R) = 0;

  // Detect all regions in function and build the region tree.
  void calculate(FuncT &F);

public:
  RegionInfoBase(const RegionInfoBase &) = delete;
  RegionInfoBase &operator=(const RegionInfoBase &) = delete;

  static bool VerifyRegionInfo;
  static typename RegionT::PrintStyle printStyle;

  void print(raw_ostream &OS) const;
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
  void dump() const;
#endif

  void releaseMemory();

  /// Get the smallest region that contains a BasicBlock.
  ///
  /// @param BB The basic block.
  /// @return The smallest region, that contains BB or NULL, if there is no
  /// region containing BB.
  RegionT *getRegionFor(BlockT *BB) const;

  ///  Set the smallest region that surrounds a basic block.
  ///
  /// @param BB The basic block surrounded by a region.
  /// @param R The smallest region that surrounds BB.
  void setRegionFor(BlockT *BB, RegionT *R);

  /// A shortcut for getRegionFor().
  ///
  /// @param BB The basic block.
  /// @return The smallest region, that contains BB or NULL, if there is no
  /// region containing BB.
  RegionT *operator[](BlockT *BB) const;

  /// Return the exit of the maximal refined region, that starts at a
  /// BasicBlock.
  ///
  /// @param BB The BasicBlock the refined region starts.
  BlockT *getMaxRegionExit(BlockT *BB) const;

  /// Find the smallest region that contains two regions.
  ///
  /// @param A The first region.
  /// @param B The second region.
  /// @return The smallest region containing A and B.
  RegionT *getCommonRegion(RegionT *A, RegionT *B) const;

  /// Find the smallest region that contains two basic blocks.
  ///
  /// @param A The first basic block.
  /// @param B The second basic block.
  /// @return The smallest region that contains A and B.
  RegionT *getCommonRegion(BlockT *A, BlockT *B) const {
    return getCommonRegion(getRegionFor(A), getRegionFor(B));
  }

  /// Find the smallest region that contains a set of regions.
  ///
  /// @param Regions A vector of regions.
  /// @return The smallest region that contains all regions in Regions.
  RegionT *getCommonRegion(SmallVectorImpl<RegionT *> &Regions) const;

  /// Find the smallest region that contains a set of basic blocks.
  ///
  /// @param BBs A vector of basic blocks.
  /// @return The smallest region that contains all basic blocks in BBS.
  RegionT *getCommonRegion(SmallVectorImpl<BlockT *> &BBs) const;

  RegionT *getTopLevelRegion() const { return TopLevelRegion; }

  /// Clear the Node Cache for all Regions.
  ///
  /// @see Region::clearNodeCache()
  void clearNodeCache() {
    if (TopLevelRegion)
      TopLevelRegion->clearNodeCache();
  }

  void verifyAnalysis() const;
};

class RegionNode : public RegionNodeBase<RegionTraits<Function>> {
public:
  inline RegionNode(Region *Parent, BasicBlock *Entry, bool isSubRegion = false)
      : RegionNodeBase<RegionTraits<Function>>(Parent, Entry, isSubRegion) {}

  bool operator==(const Region &RN) const {
    return this == reinterpret_cast<const RegionNode *>(&RN);
  }
};

class Region : public RegionBase<RegionTraits<Function>> {
public:
  Region(BasicBlock *Entry, BasicBlock *Exit, RegionInfo *RI, DominatorTree *DT,
         Region *Parent = nullptr);
  ~Region();

  bool operator==(const RegionNode &RN) const {
    return &RN == reinterpret_cast<const RegionNode *>(this);
  }
};

class RegionInfo : public RegionInfoBase<RegionTraits<Function>> {
public:
  using Base = RegionInfoBase<RegionTraits<Function>>;

  explicit RegionInfo();

  RegionInfo(RegionInfo &&Arg) : Base(std::move(static_cast<Base &>(Arg))) {
    updateRegionTree(*this, TopLevelRegion);
  }

  RegionInfo &operator=(RegionInfo &&RHS) {
    Base::operator=(std::move(static_cast<Base &>(RHS)));
    updateRegionTree(*this, TopLevelRegion);
    return *this;
  }

  ~RegionInfo() override;

  /// Handle invalidation explicitly.
  bool invalidate(Function &F, const PreservedAnalyses &PA,
                  FunctionAnalysisManager::Invalidator &);

  // updateStatistics - Update statistic about created regions.
  void updateStatistics(Region *R) final;

  void recalculate(Function &F, DominatorTree *DT, PostDominatorTree *PDT,
                   DominanceFrontier *DF);

#ifndef NDEBUG
  /// Opens a viewer to show the GraphViz visualization of the regions.
  ///
  /// Useful during debugging as an alternative to dump().
  void view();

  /// Opens a viewer to show the GraphViz visualization of this region
  /// without instructions in the BasicBlocks.
  ///
  /// Useful during debugging as an alternative to dump().
  void viewOnly();
#endif
};

class RegionInfoPass : public FunctionPass {
  RegionInfo RI;

public:
  static char ID;

  explicit RegionInfoPass();
  ~RegionInfoPass() override;

  RegionInfo &getRegionInfo() { return RI; }

  const RegionInfo &getRegionInfo() const { return RI; }

  /// @name FunctionPass interface
  //@{
  bool runOnFunction(Function &F) override;
  void releaseMemory() override;
  void verifyAnalysis() const override;
  void getAnalysisUsage(AnalysisUsage &AU) const override;
  void print(raw_ostream &OS, const Module *) const override;
  void dump() const;
  //@}
};

/// Analysis pass that exposes the \c RegionInfo for a function.
class RegionInfoAnalysis : public AnalysisInfoMixin<RegionInfoAnalysis> {
  friend AnalysisInfoMixin<RegionInfoAnalysis>;

  static AnalysisKey Key;

public:
  using Result = RegionInfo;

  RegionInfo run(Function &F, FunctionAnalysisManager &AM);
};

/// Printer pass for the \c RegionInfo.
class RegionInfoPrinterPass : public PassInfoMixin<RegionInfoPrinterPass> {
  raw_ostream &OS;

public:
  explicit RegionInfoPrinterPass(raw_ostream &OS);

  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

/// Verifier pass for the \c RegionInfo.
struct RegionInfoVerifierPass : PassInfoMixin<RegionInfoVerifierPass> {
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

template <>
template <>
inline BasicBlock *
RegionNodeBase<RegionTraits<Function>>::getNodeAs<BasicBlock>() const {
  assert(!isSubRegion() && "This is not a BasicBlock RegionNode!");
  return getEntry();
}

template <>
template <>
inline Region *
RegionNodeBase<RegionTraits<Function>>::getNodeAs<Region>() const {
  assert(isSubRegion() && "This is not a subregion RegionNode!");
  auto Unconst = const_cast<RegionNodeBase<RegionTraits<Function>> *>(this);
  return reinterpret_cast<Region *>(Unconst);
}

template <class Tr>
inline raw_ostream &operator<<(raw_ostream &OS,
                               const RegionNodeBase<Tr> &Node) {
  using BlockT = typename Tr::BlockT;
  using RegionT = typename Tr::RegionT;

  if (Node.isSubRegion())
    return OS << Node.template getNodeAs<RegionT>()->getNameStr();
  else
    return OS << Node.template getNodeAs<BlockT>()->getName();
}

extern template class RegionBase<RegionTraits<Function>>;
extern template class RegionNodeBase<RegionTraits<Function>>;
extern template class RegionInfoBase<RegionTraits<Function>>;

} // end namespace llvm

#endif // LLVM_ANALYSIS_REGIONINFO_H
PKiwFZQ*
:����Analysis/LazyCallGraph.hnu�[���//===- LazyCallGraph.h - Analysis of a Module's call graph ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// Implements a lazy call graph analysis and related passes for the new pass
/// manager.
///
/// NB: This is *not* a traditional call graph! It is a graph which models both
/// the current calls and potential calls. As a consequence there are many
/// edges in this call graph that do not correspond to a 'call' or 'invoke'
/// instruction.
///
/// The primary use cases of this graph analysis is to facilitate iterating
/// across the functions of a module in ways that ensure all callees are
/// visited prior to a caller (given any SCC constraints), or vice versa. As
/// such is it particularly well suited to organizing CGSCC optimizations such
/// as inlining, outlining, argument promotion, etc. That is its primary use
/// case and motivates the design. It may not be appropriate for other
/// purposes. The use graph of functions or some other conservative analysis of
/// call instructions may be interesting for optimizations and subsequent
/// analyses which don't work in the context of an overly specified
/// potential-call-edge graph.
///
/// To understand the specific rules and nature of this call graph analysis,
/// see the documentation of the \c LazyCallGraph below.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_LAZYCALLGRAPH_H
#define LLVM_ANALYSIS_LAZYCALLGRAPH_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/raw_ostream.h"
#include <cassert>
#include <iterator>
#include <optional>
#include <string>
#include <utility>

namespace llvm {

class Constant;
class Function;
template <class GraphType> struct GraphTraits;
class Module;
class TargetLibraryInfo;
class Value;

/// A lazily constructed view of the call graph of a module.
///
/// With the edges of this graph, the motivating constraint that we are
/// attempting to maintain is that function-local optimization, CGSCC-local
/// optimizations, and optimizations transforming a pair of functions connected
/// by an edge in the graph, do not invalidate a bottom-up traversal of the SCC
/// DAG. That is, no optimizations will delete, remove, or add an edge such
/// that functions already visited in a bottom-up order of the SCC DAG are no
/// longer valid to have visited, or such that functions not yet visited in
/// a bottom-up order of the SCC DAG are not required to have already been
/// visited.
///
/// Within this constraint, the desire is to minimize the merge points of the
/// SCC DAG. The greater the fanout of the SCC DAG and the fewer merge points
/// in the SCC DAG, the more independence there is in optimizing within it.
/// There is a strong desire to enable parallelization of optimizations over
/// the call graph, and both limited fanout and merge points will (artificially
/// in some cases) limit the scaling of such an effort.
///
/// To this end, graph represents both direct and any potential resolution to
/// an indirect call edge. Another way to think about it is that it represents
/// both the direct call edges and any direct call edges that might be formed
/// through static optimizations. Specifically, it considers taking the address
/// of a function to be an edge in the call graph because this might be
/// forwarded to become a direct call by some subsequent function-local
/// optimization. The result is that the graph closely follows the use-def
/// edges for functions. Walking "up" the graph can be done by looking at all
/// of the uses of a function.
///
/// The roots of the call graph are the external functions and functions
/// escaped into global variables. Those functions can be called from outside
/// of the module or via unknowable means in the IR -- we may not be able to
/// form even a potential call edge from a function body which may dynamically
/// load the function and call it.
///
/// This analysis still requires updates to remain valid after optimizations
/// which could potentially change the set of potential callees. The
/// constraints it operates under only make the traversal order remain valid.
///
/// The entire analysis must be re-computed if full interprocedural
/// optimizations run at any point. For example, globalopt completely
/// invalidates the information in this analysis.
///
/// FIXME: This class is named LazyCallGraph in a lame attempt to distinguish
/// it from the existing CallGraph. At some point, it is expected that this
/// will be the only call graph and it will be renamed accordingly.
class LazyCallGraph {
public:
  class Node;
  class EdgeSequence;
  class SCC;
  class RefSCC;

  /// A class used to represent edges in the call graph.
  ///
  /// The lazy call graph models both *call* edges and *reference* edges. Call
  /// edges are much what you would expect, and exist when there is a 'call' or
  /// 'invoke' instruction of some function. Reference edges are also tracked
  /// along side these, and exist whenever any instruction (transitively
  /// through its operands) references a function. All call edges are
  /// inherently reference edges, and so the reference graph forms a superset
  /// of the formal call graph.
  ///
  /// All of these forms of edges are fundamentally represented as outgoing
  /// edges. The edges are stored in the source node and point at the target
  /// node. This allows the edge structure itself to be a very compact data
  /// structure: essentially a tagged pointer.
  class Edge {
  public:
    /// The kind of edge in the graph.
    enum Kind : bool { Ref = false, Call = true };

    Edge();
    explicit Edge(Node &N, Kind K);

    /// Test whether the edge is null.
    ///
    /// This happens when an edge has been deleted. We leave the edge objects
    /// around but clear them.
    explicit operator bool() const;

    /// Returns the \c Kind of the edge.
    Kind getKind() const;

    /// Test whether the edge represents a direct call to a function.
    ///
    /// This requires that the edge is not null.
    bool isCall() const;

    /// Get the call graph node referenced by this edge.
    ///
    /// This requires that the edge is not null.
    Node &getNode() const;

    /// Get the function referenced by this edge.
    ///
    /// This requires that the edge is not null.
    Function &getFunction() const;

  private:
    friend class LazyCallGraph::EdgeSequence;
    friend class LazyCallGraph::RefSCC;

    PointerIntPair<Node *, 1, Kind> Value;

    void setKind(Kind K) { Value.setInt(K); }
  };

  /// The edge sequence object.
  ///
  /// This typically exists entirely within the node but is exposed as
  /// a separate type because a node doesn't initially have edges. An explicit
  /// population step is required to produce this sequence at first and it is
  /// then cached in the node. It is also used to represent edges entering the
  /// graph from outside the module to model the graph's roots.
  ///
  /// The sequence itself both iterable and indexable. The indexes remain
  /// stable even as the sequence mutates (including removal).
  class EdgeSequence {
    friend class LazyCallGraph;
    friend class LazyCallGraph::Node;
    friend class LazyCallGraph::RefSCC;

    using VectorT = SmallVector<Edge, 4>;
    using VectorImplT = SmallVectorImpl<Edge>;

  public:
    /// An iterator used for the edges to both entry nodes and child nodes.
    class iterator
        : public iterator_adaptor_base<iterator, VectorImplT::iterator,
                                       std::forward_iterator_tag> {
      friend class LazyCallGraph;
      friend class LazyCallGraph::Node;

      VectorImplT::iterator E;

      // Build the iterator for a specific position in the edge list.
      iterator(VectorImplT::iterator BaseI, VectorImplT::iterator E)
          : iterator_adaptor_base(BaseI), E(E) {
        while (I != E && !*I)
          ++I;
      }

    public:
      iterator() = default;

      using iterator_adaptor_base::operator++;
      iterator &operator++() {
        do {
          ++I;
        } while (I != E && !*I);
        return *this;
      }
    };

    /// An iterator over specifically call edges.
    ///
    /// This has the same iteration properties as the \c iterator, but
    /// restricts itself to edges which represent actual calls.
    class call_iterator
        : public iterator_adaptor_base<call_iterator, VectorImplT::iterator,
                                       std::forward_iterator_tag> {
      friend class LazyCallGraph;
      friend class LazyCallGraph::Node;

      VectorImplT::iterator E;

      /// Advance the iterator to the next valid, call edge.
      void advanceToNextEdge() {
        while (I != E && (!*I || !I->isCall()))
          ++I;
      }

      // Build the iterator for a specific position in the edge list.
      call_iterator(VectorImplT::iterator BaseI, VectorImplT::iterator E)
          : iterator_adaptor_base(BaseI), E(E) {
        advanceToNextEdge();
      }

    public:
      call_iterator() = default;

      using iterator_adaptor_base::operator++;
      call_iterator &operator++() {
        ++I;
        advanceToNextEdge();
        return *this;
      }
    };

    iterator begin() { return iterator(Edges.begin(), Edges.end()); }
    iterator end() { return iterator(Edges.end(), Edges.end()); }

    Edge &operator[](Node &N) {
      assert(EdgeIndexMap.contains(&N) && "No such edge!");
      auto &E = Edges[EdgeIndexMap.find(&N)->second];
      assert(E && "Dead or null edge!");
      return E;
    }

    Edge *lookup(Node &N) {
      auto EI = EdgeIndexMap.find(&N);
      if (EI == EdgeIndexMap.end())
        return nullptr;
      auto &E = Edges[EI->second];
      return E ? &E : nullptr;
    }

    call_iterator call_begin() {
      return call_iterator(Edges.begin(), Edges.end());
    }
    call_iterator call_end() { return call_iterator(Edges.end(), Edges.end()); }

    iterator_range<call_iterator> calls() {
      return make_range(call_begin(), call_end());
    }

    bool empty() {
      for (auto &E : Edges)
        if (E)
          return false;

      return true;
    }

  private:
    VectorT Edges;
    DenseMap<Node *, int> EdgeIndexMap;

    EdgeSequence() = default;

    /// Internal helper to insert an edge to a node.
    void insertEdgeInternal(Node &ChildN, Edge::Kind EK);

    /// Internal helper to change an edge kind.
    void setEdgeKind(Node &ChildN, Edge::Kind EK);

    /// Internal helper to remove the edge to the given function.
    bool removeEdgeInternal(Node &ChildN);
  };

  /// A node in the call graph.
  ///
  /// This represents a single node. Its primary roles are to cache the list of
  /// callees, de-duplicate and provide fast testing of whether a function is a
  /// callee, and facilitate iteration of child nodes in the graph.
  ///
  /// The node works much like an optional in order to lazily populate the
  /// edges of each node. Until populated, there are no edges. Once populated,
  /// you can access the edges by dereferencing the node or using the `->`
  /// operator as if the node was an `std::optional<EdgeSequence>`.
  class Node {
    friend class LazyCallGraph;
    friend class LazyCallGraph::RefSCC;

  public:
    LazyCallGraph &getGraph() const { return *G; }

    Function &getFunction() const { return *F; }

    StringRef getName() const { return F->getName(); }

    /// Equality is defined as address equality.
    bool operator==(const Node &N) const { return this == &N; }
    bool operator!=(const Node &N) const { return !operator==(N); }

    /// Tests whether the node has been populated with edges.
    bool isPopulated() const { return Edges.has_value(); }

    /// Tests whether this is actually a dead node and no longer valid.
    ///
    /// Users rarely interact with nodes in this state and other methods are
    /// invalid. This is used to model a node in an edge list where the
    /// function has been completely removed.
    bool isDead() const {
      assert(!G == !F &&
             "Both graph and function pointers should be null or non-null.");
      return !G;
    }

    // We allow accessing the edges by dereferencing or using the arrow
    // operator, essentially wrapping the internal optional.
    EdgeSequence &operator*() const {
      // Rip const off because the node itself isn't changing here.
      return const_cast<EdgeSequence &>(*Edges);
    }
    EdgeSequence *operator->() const { return &**this; }

    /// Populate the edges of this node if necessary.
    ///
    /// The first time this is called it will populate the edges for this node
    /// in the graph. It does this by scanning the underlying function, so once
    /// this is done, any changes to that function must be explicitly reflected
    /// in updates to the graph.
    ///
    /// \returns the populated \c EdgeSequence to simplify walking it.
    ///
    /// This will not update or re-scan anything if called repeatedly. Instead,
    /// the edge sequence is cached and returned immediately on subsequent
    /// calls.
    EdgeSequence &populate() {
      if (Edges)
        return *Edges;

      return populateSlow();
    }

  private:
    LazyCallGraph *G;
    Function *F;

    // We provide for the DFS numbering and Tarjan walk lowlink numbers to be
    // stored directly within the node. These are both '-1' when nodes are part
    // of an SCC (or RefSCC), or '0' when not yet reached in a DFS walk.
    int DFSNumber = 0;
    int LowLink = 0;

    std::optional<EdgeSequence> Edges;

    /// Basic constructor implements the scanning of F into Edges and
    /// EdgeIndexMap.
    Node(LazyCallGraph &G, Function &F) : G(&G), F(&F) {}

    /// Implementation of the scan when populating.
    EdgeSequence &populateSlow();

    /// Internal helper to directly replace the function with a new one.
    ///
    /// This is used to facilitate transformations which need to replace the
    /// formal Function object but directly move the body and users from one to
    /// the other.
    void replaceFunction(Function &NewF);

    void clear() { Edges.reset(); }

    /// Print the name of this node's function.
    friend raw_ostream &operator<<(raw_ostream &OS, const Node &N) {
      return OS << N.F->getName();
    }

    /// Dump the name of this node's function to stderr.
    void dump() const;
  };

  /// An SCC of the call graph.
  ///
  /// This represents a Strongly Connected Component of the direct call graph
  /// -- ignoring indirect calls and function references. It stores this as
  /// a collection of call graph nodes. While the order of nodes in the SCC is
  /// stable, it is not any particular order.
  ///
  /// The SCCs are nested within a \c RefSCC, see below for details about that
  /// outer structure. SCCs do not support mutation of the call graph, that
  /// must be done through the containing \c RefSCC in order to fully reason
  /// about the ordering and connections of the graph.
  class LLVM_EXTERNAL_VISIBILITY SCC {
    friend class LazyCallGraph;
    friend class LazyCallGraph::Node;

    RefSCC *OuterRefSCC;
    SmallVector<Node *, 1> Nodes;

    template <typename NodeRangeT>
    SCC(RefSCC &OuterRefSCC, NodeRangeT &&Nodes)
        : OuterRefSCC(&OuterRefSCC), Nodes(std::forward<NodeRangeT>(Nodes)) {}

    void clear() {
      OuterRefSCC = nullptr;
      Nodes.clear();
    }

    /// Print a short description useful for debugging or logging.
    ///
    /// We print the function names in the SCC wrapped in '()'s and skipping
    /// the middle functions if there are a large number.
    //
    // Note: this is defined inline to dodge issues with GCC's interpretation
    // of enclosing namespaces for friend function declarations.
    friend raw_ostream &operator<<(raw_ostream &OS, const SCC &C) {
      OS << '(';
      int I = 0;
      for (LazyCallGraph::Node &N : C) {
        if (I > 0)
          OS << ", ";
        // Elide the inner elements if there are too many.
        if (I > 8) {
          OS << "..., " << *C.Nodes.back();
          break;
        }
        OS << N;
        ++I;
      }
      OS << ')';
      return OS;
    }

    /// Dump a short description of this SCC to stderr.
    void dump() const;

#if !defined(NDEBUG) || defined(EXPENSIVE_CHECKS)
    /// Verify invariants about the SCC.
    ///
    /// This will attempt to validate all of the basic invariants within an
    /// SCC, but not that it is a strongly connected component per se.
    /// Primarily useful while building and updating the graph to check that
    /// basic properties are in place rather than having inexplicable crashes
    /// later.
    void verify();
#endif

  public:
    using iterator = pointee_iterator<SmallVectorImpl<Node *>::const_iterator>;

    iterator begin() const { return Nodes.begin(); }
    iterator end() const { return Nodes.end(); }

    int size() const { return Nodes.size(); }

    RefSCC &getOuterRefSCC() const { return *OuterRefSCC; }

    /// Test if this SCC is a parent of \a C.
    ///
    /// Note that this is linear in the number of edges departing the current
    /// SCC.
    bool isParentOf(const SCC &C) const;

    /// Test if this SCC is an ancestor of \a C.
    ///
    /// Note that in the worst case this is linear in the number of edges
    /// departing the current SCC and every SCC in the entire graph reachable
    /// from this SCC. Thus this very well may walk every edge in the entire
    /// call graph! Do not call this in a tight loop!
    bool isAncestorOf(const SCC &C) const;

    /// Test if this SCC is a child of \a C.
    ///
    /// See the comments for \c isParentOf for detailed notes about the
    /// complexity of this routine.
    bool isChildOf(const SCC &C) const { return C.isParentOf(*this); }

    /// Test if this SCC is a descendant of \a C.
    ///
    /// See the comments for \c isParentOf for detailed notes about the
    /// complexity of this routine.
    bool isDescendantOf(const SCC &C) const { return C.isAncestorOf(*this); }

    /// Provide a short name by printing this SCC to a std::string.
    ///
    /// This copes with the fact that we don't have a name per se for an SCC
    /// while still making the use of this in debugging and logging useful.
    std::string getName() const {
      std::string Name;
      raw_string_ostream OS(Name);
      OS << *this;
      OS.flush();
      return Name;
    }
  };

  /// A RefSCC of the call graph.
  ///
  /// This models a Strongly Connected Component of function reference edges in
  /// the call graph. As opposed to actual SCCs, these can be used to scope
  /// subgraphs of the module which are independent from other subgraphs of the
  /// module because they do not reference it in any way. This is also the unit
  /// where we do mutation of the graph in order to restrict mutations to those
  /// which don't violate this independence.
  ///
  /// A RefSCC contains a DAG of actual SCCs. All the nodes within the RefSCC
  /// are necessarily within some actual SCC that nests within it. Since
  /// a direct call *is* a reference, there will always be at least one RefSCC
  /// around any SCC.
  ///
  /// Spurious ref edges, meaning ref edges that still exist in the call graph
  /// even though the corresponding IR reference no longer exists, are allowed.
  /// This is mostly to support argument promotion, which can modify a caller to
  /// no longer pass a function. The only place that needs to specially handle
  /// this is deleting a dead function/node, otherwise the dead ref edges are
  /// automatically removed when visiting the function/node no longer containing
  /// the ref edge.
  class RefSCC {
    friend class LazyCallGraph;
    friend class LazyCallGraph::Node;

    LazyCallGraph *G;

    /// A postorder list of the inner SCCs.
    SmallVector<SCC *, 4> SCCs;

    /// A map from SCC to index in the postorder list.
    SmallDenseMap<SCC *, int, 4> SCCIndices;

    /// Fast-path constructor. RefSCCs should instead be constructed by calling
    /// formRefSCCFast on the graph itself.
    RefSCC(LazyCallGraph &G);

    void clear() {
      SCCs.clear();
      SCCIndices.clear();
    }

    /// Print a short description useful for debugging or logging.
    ///
    /// We print the SCCs wrapped in '[]'s and skipping the middle SCCs if
    /// there are a large number.
    //
    // Note: this is defined inline to dodge issues with GCC's interpretation
    // of enclosing namespaces for friend function declarations.
    friend raw_ostream &operator<<(raw_ostream &OS, const RefSCC &RC) {
      OS << '[';
      int I = 0;
      for (LazyCallGraph::SCC &C : RC) {
        if (I > 0)
          OS << ", ";
        // Elide the inner elements if there are too many.
        if (I > 4) {
          OS << "..., " << *RC.SCCs.back();
          break;
        }
        OS << C;
        ++I;
      }
      OS << ']';
      return OS;
    }

    /// Dump a short description of this RefSCC to stderr.
    void dump() const;

#if !defined(NDEBUG) || defined(EXPENSIVE_CHECKS)
    /// Verify invariants about the RefSCC and all its SCCs.
    ///
    /// This will attempt to validate all of the invariants *within* the
    /// RefSCC, but not that it is a strongly connected component of the larger
    /// graph. This makes it useful even when partially through an update.
    ///
    /// Invariants checked:
    /// - SCCs and their indices match.
    /// - The SCCs list is in fact in post-order.
    void verify();
#endif

  public:
    using iterator = pointee_iterator<SmallVectorImpl<SCC *>::const_iterator>;
    using range = iterator_range<iterator>;
    using parent_iterator =
        pointee_iterator<SmallPtrSetImpl<RefSCC *>::const_iterator>;

    iterator begin() const { return SCCs.begin(); }
    iterator end() const { return SCCs.end(); }

    ssize_t size() const { return SCCs.size(); }

    SCC &operator[](int Idx) { return *SCCs[Idx]; }

    iterator find(SCC &C) const {
      return SCCs.begin() + SCCIndices.find(&C)->second;
    }

    /// Test if this RefSCC is a parent of \a RC.
    ///
    /// CAUTION: This method walks every edge in the \c RefSCC, it can be very
    /// expensive.
    bool isParentOf(const RefSCC &RC) const;

    /// Test if this RefSCC is an ancestor of \a RC.
    ///
    /// CAUTION: This method walks the directed graph of edges as far as
    /// necessary to find a possible path to the argument. In the worst case
    /// this may walk the entire graph and can be extremely expensive.
    bool isAncestorOf(const RefSCC &RC) const;

    /// Test if this RefSCC is a child of \a RC.
    ///
    /// CAUTION: This method walks every edge in the argument \c RefSCC, it can
    /// be very expensive.
    bool isChildOf(const RefSCC &RC) const { return RC.isParentOf(*this); }

    /// Test if this RefSCC is a descendant of \a RC.
    ///
    /// CAUTION: This method walks the directed graph of edges as far as
    /// necessary to find a possible path from the argument. In the worst case
    /// this may walk the entire graph and can be extremely expensive.
    bool isDescendantOf(const RefSCC &RC) const {
      return RC.isAncestorOf(*this);
    }

    /// Provide a short name by printing this RefSCC to a std::string.
    ///
    /// This copes with the fact that we don't have a name per se for an RefSCC
    /// while still making the use of this in debugging and logging useful.
    std::string getName() const {
      std::string Name;
      raw_string_ostream OS(Name);
      OS << *this;
      OS.flush();
      return Name;
    }

    ///@{
    /// \name Mutation API
    ///
    /// These methods provide the core API for updating the call graph in the
    /// presence of (potentially still in-flight) DFS-found RefSCCs and SCCs.
    ///
    /// Note that these methods sometimes have complex runtimes, so be careful
    /// how you call them.

    /// Make an existing internal ref edge into a call edge.
    ///
    /// This may form a larger cycle and thus collapse SCCs into TargetN's SCC.
    /// If that happens, the optional callback \p MergedCB will be invoked (if
    /// provided) on the SCCs being merged away prior to actually performing
    /// the merge. Note that this will never include the target SCC as that
    /// will be the SCC functions are merged into to resolve the cycle. Once
    /// this function returns, these merged SCCs are not in a valid state but
    /// the pointers will remain valid until destruction of the parent graph
    /// instance for the purpose of clearing cached information. This function
    /// also returns 'true' if a cycle was formed and some SCCs merged away as
    /// a convenience.
    ///
    /// After this operation, both SourceN's SCC and TargetN's SCC may move
    /// position within this RefSCC's postorder list. Any SCCs merged are
    /// merged into the TargetN's SCC in order to preserve reachability analyses
    /// which took place on that SCC.
    bool switchInternalEdgeToCall(
        Node &SourceN, Node &TargetN,
        function_ref<void(ArrayRef<SCC *> MergedSCCs)> MergeCB = {});

    /// Make an existing internal call edge between separate SCCs into a ref
    /// edge.
    ///
    /// If SourceN and TargetN in separate SCCs within this RefSCC, changing
    /// the call edge between them to a ref edge is a trivial operation that
    /// does not require any structural changes to the call graph.
    void switchTrivialInternalEdgeToRef(Node &SourceN, Node &TargetN);

    /// Make an existing internal call edge within a single SCC into a ref
    /// edge.
    ///
    /// Since SourceN and TargetN are part of a single SCC, this SCC may be
    /// split up due to breaking a cycle in the call edges that formed it. If
    /// that happens, then this routine will insert new SCCs into the postorder
    /// list *before* the SCC of TargetN (previously the SCC of both). This
    /// preserves postorder as the TargetN can reach all of the other nodes by
    /// definition of previously being in a single SCC formed by the cycle from
    /// SourceN to TargetN.
    ///
    /// The newly added SCCs are added *immediately* and contiguously
    /// prior to the TargetN SCC and return the range covering the new SCCs in
    /// the RefSCC's postorder sequence. You can directly iterate the returned
    /// range to observe all of the new SCCs in postorder.
    ///
    /// Note that if SourceN and TargetN are in separate SCCs, the simpler
    /// routine `switchTrivialInternalEdgeToRef` should be used instead.
    iterator_range<iterator> switchInternalEdgeToRef(Node &SourceN,
                                                     Node &TargetN);

    /// Make an existing outgoing ref edge into a call edge.
    ///
    /// Note that this is trivial as there are no cyclic impacts and there
    /// remains a reference edge.
    void switchOutgoingEdgeToCall(Node &SourceN, Node &TargetN);

    /// Make an existing outgoing call edge into a ref edge.
    ///
    /// This is trivial as there are no cyclic impacts and there remains
    /// a reference edge.
    void switchOutgoingEdgeToRef(Node &SourceN, Node &TargetN);

    /// Insert a ref edge from one node in this RefSCC to another in this
    /// RefSCC.
    ///
    /// This is always a trivial operation as it doesn't change any part of the
    /// graph structure besides connecting the two nodes.
    ///
    /// Note that we don't support directly inserting internal *call* edges
    /// because that could change the graph structure and requires returning
    /// information about what became invalid. As a consequence, the pattern
    /// should be to first insert the necessary ref edge, and then to switch it
    /// to a call edge if needed and handle any invalidation that results. See
    /// the \c switchInternalEdgeToCall routine for details.
    void insertInternalRefEdge(Node &SourceN, Node &TargetN);

    /// Insert an edge whose parent is in this RefSCC and child is in some
    /// child RefSCC.
    ///
    /// There must be an existing path from the \p SourceN to the \p TargetN.
    /// This operation is inexpensive and does not change the set of SCCs and
    /// RefSCCs in the graph.
    void insertOutgoingEdge(Node &SourceN, Node &TargetN, Edge::Kind EK);

    /// Insert an edge whose source is in a descendant RefSCC and target is in
    /// this RefSCC.
    ///
    /// There must be an existing path from the target to the source in this
    /// case.
    ///
    /// NB! This is has the potential to be a very expensive function. It
    /// inherently forms a cycle in the prior RefSCC DAG and we have to merge
    /// RefSCCs to resolve that cycle. But finding all of the RefSCCs which
    /// participate in the cycle can in the worst case require traversing every
    /// RefSCC in the graph. Every attempt is made to avoid that, but passes
    /// must still exercise caution calling this routine repeatedly.
    ///
    /// Also note that this can only insert ref edges. In order to insert
    /// a call edge, first insert a ref edge and then switch it to a call edge.
    /// These are intentionally kept as separate interfaces because each step
    /// of the operation invalidates a different set of data structures.
    ///
    /// This returns all the RefSCCs which were merged into the this RefSCC
    /// (the target's). This allows callers to invalidate any cached
    /// information.
    ///
    /// FIXME: We could possibly optimize this quite a bit for cases where the
    /// caller and callee are very nearby in the graph. See comments in the
    /// implementation for details, but that use case might impact users.
    SmallVector<RefSCC *, 1> insertIncomingRefEdge(Node &SourceN,
                                                   Node &TargetN);

    /// Remove an edge whose source is in this RefSCC and target is *not*.
    ///
    /// This removes an inter-RefSCC edge. All inter-RefSCC edges originating
    /// from this SCC have been fully explored by any in-flight DFS graph
    /// formation, so this is always safe to call once you have the source
    /// RefSCC.
    ///
    /// This operation does not change the cyclic structure of the graph and so
    /// is very inexpensive. It may change the connectivity graph of the SCCs
    /// though, so be careful calling this while iterating over them.
    void removeOutgoingEdge(Node &SourceN, Node &TargetN);

    /// Remove a list of ref edges which are entirely within this RefSCC.
    ///
    /// Both the \a SourceN and all of the \a TargetNs must be within this
    /// RefSCC. Removing these edges may break cycles that form this RefSCC and
    /// thus this operation may change the RefSCC graph significantly. In
    /// particular, this operation will re-form new RefSCCs based on the
    /// remaining connectivity of the graph. The following invariants are
    /// guaranteed to hold after calling this method:
    ///
    /// 1) If a ref-cycle remains after removal, it leaves this RefSCC intact
    ///    and in the graph. No new RefSCCs are built.
    /// 2) Otherwise, this RefSCC will be dead after this call and no longer in
    ///    the graph or the postorder traversal of the call graph. Any iterator
    ///    pointing at this RefSCC will become invalid.
    /// 3) All newly formed RefSCCs will be returned and the order of the
    ///    RefSCCs returned will be a valid postorder traversal of the new
    ///    RefSCCs.
    /// 4) No RefSCC other than this RefSCC has its member set changed (this is
    ///    inherent in the definition of removing such an edge).
    ///
    /// These invariants are very important to ensure that we can build
    /// optimization pipelines on top of the CGSCC pass manager which
    /// intelligently update the RefSCC graph without invalidating other parts
    /// of the RefSCC graph.
    ///
    /// Note that we provide no routine to remove a *call* edge. Instead, you
    /// must first switch it to a ref edge using \c switchInternalEdgeToRef.
    /// This split API is intentional as each of these two steps can invalidate
    /// a different aspect of the graph structure and needs to have the
    /// invalidation handled independently.
    ///
    /// The runtime complexity of this method is, in the worst case, O(V+E)
    /// where V is the number of nodes in this RefSCC and E is the number of
    /// edges leaving the nodes in this RefSCC. Note that E includes both edges
    /// within this RefSCC and edges from this RefSCC to child RefSCCs. Some
    /// effort has been made to minimize the overhead of common cases such as
    /// self-edges and edge removals which result in a spanning tree with no
    /// more cycles.
    [[nodiscard]] SmallVector<RefSCC *, 1>
    removeInternalRefEdge(Node &SourceN, ArrayRef<Node *> TargetNs);

    /// A convenience wrapper around the above to handle trivial cases of
    /// inserting a new call edge.
    ///
    /// This is trivial whenever the target is in the same SCC as the source or
    /// the edge is an outgoing edge to some descendant SCC. In these cases
    /// there is no change to the cyclic structure of SCCs or RefSCCs.
    ///
    /// To further make calling this convenient, it also handles inserting
    /// already existing edges.
    void insertTrivialCallEdge(Node &SourceN, Node &TargetN);

    /// A convenience wrapper around the above to handle trivial cases of
    /// inserting a new ref edge.
    ///
    /// This is trivial whenever the target is in the same RefSCC as the source
    /// or the edge is an outgoing edge to some descendant RefSCC. In these
    /// cases there is no change to the cyclic structure of the RefSCCs.
    ///
    /// To further make calling this convenient, it also handles inserting
    /// already existing edges.
    void insertTrivialRefEdge(Node &SourceN, Node &TargetN);

    /// Directly replace a node's function with a new function.
    ///
    /// This should be used when moving the body and users of a function to
    /// a new formal function object but not otherwise changing the call graph
    /// structure in any way.
    ///
    /// It requires that the old function in the provided node have zero uses
    /// and the new function must have calls and references to it establishing
    /// an equivalent graph.
    void replaceNodeFunction(Node &N, Function &NewF);

    ///@}
  };

  /// A post-order depth-first RefSCC iterator over the call graph.
  ///
  /// This iterator walks the cached post-order sequence of RefSCCs. However,
  /// it trades stability for flexibility. It is restricted to a forward
  /// iterator but will survive mutations which insert new RefSCCs and continue
  /// to point to the same RefSCC even if it moves in the post-order sequence.
  class postorder_ref_scc_iterator
      : public iterator_facade_base<postorder_ref_scc_iterator,
                                    std::forward_iterator_tag, RefSCC> {
    friend class LazyCallGraph;
    friend class LazyCallGraph::Node;

    /// Nonce type to select the constructor for the end iterator.
    struct IsAtEndT {};

    LazyCallGraph *G;
    RefSCC *RC = nullptr;

    /// Build the begin iterator for a node.
    postorder_ref_scc_iterator(LazyCallGraph &G) : G(&G), RC(getRC(G, 0)) {
      incrementUntilNonEmptyRefSCC();
    }

    /// Build the end iterator for a node. This is selected purely by overload.
    postorder_ref_scc_iterator(LazyCallGraph &G, IsAtEndT /*Nonce*/) : G(&G) {}

    /// Get the post-order RefSCC at the given index of the postorder walk,
    /// populating it if necessary.
    static RefSCC *getRC(LazyCallGraph &G, int Index) {
      if (Index == (int)G.PostOrderRefSCCs.size())
        // We're at the end.
        return nullptr;

      return G.PostOrderRefSCCs[Index];
    }

    // Keep incrementing until RC is non-empty (or null).
    void incrementUntilNonEmptyRefSCC() {
      while (RC && RC->size() == 0)
        increment();
    }

    void increment() {
      assert(RC && "Cannot increment the end iterator!");
      RC = getRC(*G, G->RefSCCIndices.find(RC)->second + 1);
    }

  public:
    bool operator==(const postorder_ref_scc_iterator &Arg) const {
      return G == Arg.G && RC == Arg.RC;
    }

    reference operator*() const { return *RC; }

    using iterator_facade_base::operator++;
    postorder_ref_scc_iterator &operator++() {
      increment();
      incrementUntilNonEmptyRefSCC();
      return *this;
    }
  };

  /// Construct a graph for the given module.
  ///
  /// This sets up the graph and computes all of the entry points of the graph.
  /// No function definitions are scanned until their nodes in the graph are
  /// requested during traversal.
  LazyCallGraph(Module &M,
                function_ref<TargetLibraryInfo &(Function &)> GetTLI);

  LazyCallGraph(LazyCallGraph &&G);
  LazyCallGraph &operator=(LazyCallGraph &&RHS);

  bool invalidate(Module &, const PreservedAnalyses &PA,
                  ModuleAnalysisManager::Invalidator &);

  EdgeSequence::iterator begin() { return EntryEdges.begin(); }
  EdgeSequence::iterator end() { return EntryEdges.end(); }

  void buildRefSCCs();

  postorder_ref_scc_iterator postorder_ref_scc_begin() {
    if (!EntryEdges.empty())
      assert(!PostOrderRefSCCs.empty() &&
             "Must form RefSCCs before iterating them!");
    return postorder_ref_scc_iterator(*this);
  }
  postorder_ref_scc_iterator postorder_ref_scc_end() {
    if (!EntryEdges.empty())
      assert(!PostOrderRefSCCs.empty() &&
             "Must form RefSCCs before iterating them!");
    return postorder_ref_scc_iterator(*this,
                                      postorder_ref_scc_iterator::IsAtEndT());
  }

  iterator_range<postorder_ref_scc_iterator> postorder_ref_sccs() {
    return make_range(postorder_ref_scc_begin(), postorder_ref_scc_end());
  }

  /// Lookup a function in the graph which has already been scanned and added.
  Node *lookup(const Function &F) const { return NodeMap.lookup(&F); }

  /// Lookup a function's SCC in the graph.
  ///
  /// \returns null if the function hasn't been assigned an SCC via the RefSCC
  /// iterator walk.
  SCC *lookupSCC(Node &N) const { return SCCMap.lookup(&N); }

  /// Lookup a function's RefSCC in the graph.
  ///
  /// \returns null if the function hasn't been assigned a RefSCC via the
  /// RefSCC iterator walk.
  RefSCC *lookupRefSCC(Node &N) const {
    if (SCC *C = lookupSCC(N))
      return &C->getOuterRefSCC();

    return nullptr;
  }

  /// Get a graph node for a given function, scanning it to populate the graph
  /// data as necessary.
  Node &get(Function &F) {
    Node *&N = NodeMap[&F];
    if (N)
      return *N;

    return insertInto(F, N);
  }

  /// Get the sequence of known and defined library functions.
  ///
  /// These functions, because they are known to LLVM, can have calls
  /// introduced out of thin air from arbitrary IR.
  ArrayRef<Function *> getLibFunctions() const {
    return LibFunctions.getArrayRef();
  }

  /// Test whether a function is a known and defined library function tracked by
  /// the call graph.
  ///
  /// Because these functions are known to LLVM they are specially modeled in
  /// the call graph and even when all IR-level references have been removed
  /// remain active and reachable.
  bool isLibFunction(Function &F) const { return LibFunctions.count(&F); }

  ///@{
  /// \name Pre-SCC Mutation API
  ///
  /// These methods are only valid to call prior to forming any SCCs for this
  /// call graph. They can be used to update the core node-graph during
  /// a node-based inorder traversal that precedes any SCC-based traversal.
  ///
  /// Once you begin manipulating a call graph's SCCs, most mutation of the
  /// graph must be performed via a RefSCC method. There are some exceptions
  /// below.

  /// Update the call graph after inserting a new edge.
  void insertEdge(Node &SourceN, Node &TargetN, Edge::Kind EK);

  /// Update the call graph after inserting a new edge.
  void insertEdge(Function &Source, Function &Target, Edge::Kind EK) {
    return insertEdge(get(Source), get(Target), EK);
  }

  /// Update the call graph after deleting an edge.
  void removeEdge(Node &SourceN, Node &TargetN);

  /// Update the call graph after deleting an edge.
  void removeEdge(Function &Source, Function &Target) {
    return removeEdge(get(Source), get(Target));
  }

  ///@}

  ///@{
  /// \name General Mutation API
  ///
  /// There are a very limited set of mutations allowed on the graph as a whole
  /// once SCCs have started to be formed. These routines have strict contracts
  /// but may be called at any point.

  /// Remove a dead function from the call graph (typically to delete it).
  ///
  /// Note that the function must have an empty use list, and the call graph
  /// must be up-to-date prior to calling this. That means it is by itself in
  /// a maximal SCC which is by itself in a maximal RefSCC, etc. No structural
  /// changes result from calling this routine other than potentially removing
  /// entry points into the call graph.
  ///
  /// If SCC formation has begun, this function must not be part of the current
  /// DFS in order to call this safely. Typically, the function will have been
  /// fully visited by the DFS prior to calling this routine.
  void removeDeadFunction(Function &F);

  /// Add a new function split/outlined from an existing function.
  ///
  /// The new function may only reference other functions that the original
  /// function did.
  ///
  /// The original function must reference (either directly or indirectly) the
  /// new function.
  ///
  /// The new function may also reference the original function.
  /// It may end up in a parent SCC in the case that the original function's
  /// edge to the new function is a ref edge, and the edge back is a call edge.
  void addSplitFunction(Function &OriginalFunction, Function &NewFunction);

  /// Add new ref-recursive functions split/outlined from an existing function.
  ///
  /// The new functions may only reference other functions that the original
  /// function did. The new functions may reference (not call) the original
  /// function.
  ///
  /// The original function must reference (not call) all new functions.
  /// All new functions must reference (not call) each other.
  void addSplitRefRecursiveFunctions(Function &OriginalFunction,
                                     ArrayRef<Function *> NewFunctions);

  ///@}

  ///@{
  /// \name Static helpers for code doing updates to the call graph.
  ///
  /// These helpers are used to implement parts of the call graph but are also
  /// useful to code doing updates or otherwise wanting to walk the IR in the
  /// same patterns as when we build the call graph.

  /// Recursively visits the defined functions whose address is reachable from
  /// every constant in the \p Worklist.
  ///
  /// Doesn't recurse through any constants already in the \p Visited set, and
  /// updates that set with every constant visited.
  ///
  /// For each defined function, calls \p Callback with that function.
  static void visitReferences(SmallVectorImpl<Constant *> &Worklist,
                              SmallPtrSetImpl<Constant *> &Visited,
                              function_ref<void(Function &)> Callback);

  ///@}

private:
  using node_stack_iterator = SmallVectorImpl<Node *>::reverse_iterator;
  using node_stack_range = iterator_range<node_stack_iterator>;

  /// Allocator that holds all the call graph nodes.
  SpecificBumpPtrAllocator<Node> BPA;

  /// Maps function->node for fast lookup.
  DenseMap<const Function *, Node *> NodeMap;

  /// The entry edges into the graph.
  ///
  /// These edges are from "external" sources. Put another way, they
  /// escape at the module scope.
  EdgeSequence EntryEdges;

  /// Allocator that holds all the call graph SCCs.
  SpecificBumpPtrAllocator<SCC> SCCBPA;

  /// Maps Function -> SCC for fast lookup.
  DenseMap<Node *, SCC *> SCCMap;

  /// Allocator that holds all the call graph RefSCCs.
  SpecificBumpPtrAllocator<RefSCC> RefSCCBPA;

  /// The post-order sequence of RefSCCs.
  ///
  /// This list is lazily formed the first time we walk the graph.
  SmallVector<RefSCC *, 16> PostOrderRefSCCs;

  /// A map from RefSCC to the index for it in the postorder sequence of
  /// RefSCCs.
  DenseMap<RefSCC *, int> RefSCCIndices;

  /// Defined functions that are also known library functions which the
  /// optimizer can reason about and therefore might introduce calls to out of
  /// thin air.
  SmallSetVector<Function *, 4> LibFunctions;

  /// Helper to insert a new function, with an already looked-up entry in
  /// the NodeMap.
  Node &insertInto(Function &F, Node *&MappedN);

  /// Helper to initialize a new node created outside of creating SCCs and add
  /// it to the NodeMap if necessary. For example, useful when a function is
  /// split.
  Node &initNode(Function &F);

  /// Helper to update pointers back to the graph object during moves.
  void updateGraphPtrs();

  /// Allocates an SCC and constructs it using the graph allocator.
  ///
  /// The arguments are forwarded to the constructor.
  template <typename... Ts> SCC *createSCC(Ts &&...Args) {
    return new (SCCBPA.Allocate()) SCC(std::forward<Ts>(Args)...);
  }

  /// Allocates a RefSCC and constructs it using the graph allocator.
  ///
  /// The arguments are forwarded to the constructor.
  template <typename... Ts> RefSCC *createRefSCC(Ts &&...Args) {
    return new (RefSCCBPA.Allocate()) RefSCC(std::forward<Ts>(Args)...);
  }

  /// Common logic for building SCCs from a sequence of roots.
  ///
  /// This is a very generic implementation of the depth-first walk and SCC
  /// formation algorithm. It uses a generic sequence of roots and generic
  /// callbacks for each step. This is designed to be used to implement both
  /// the RefSCC formation and SCC formation with shared logic.
  ///
  /// Currently this is a relatively naive implementation of Tarjan's DFS
  /// algorithm to form the SCCs.
  ///
  /// FIXME: We should consider newer variants such as Nuutila.
  template <typename RootsT, typename GetBeginT, typename GetEndT,
            typename GetNodeT, typename FormSCCCallbackT>
  static void buildGenericSCCs(RootsT &&Roots, GetBeginT &&GetBegin,
                               GetEndT &&GetEnd, GetNodeT &&GetNode,
                               FormSCCCallbackT &&FormSCC);

  /// Build the SCCs for a RefSCC out of a list of nodes.
  void buildSCCs(RefSCC &RC, node_stack_range Nodes);

  /// Get the index of a RefSCC within the postorder traversal.
  ///
  /// Requires that this RefSCC is a valid one in the (perhaps partial)
  /// postorder traversed part of the graph.
  int getRefSCCIndex(RefSCC &RC) {
    auto IndexIt = RefSCCIndices.find(&RC);
    assert(IndexIt != RefSCCIndices.end() && "RefSCC doesn't have an index!");
    assert(PostOrderRefSCCs[IndexIt->second] == &RC &&
           "Index does not point back at RC!");
    return IndexIt->second;
  }
};

inline LazyCallGraph::Edge::Edge() = default;
inline LazyCallGraph::Edge::Edge(Node &N, Kind K) : Value(&N, K) {}

inline LazyCallGraph::Edge::operator bool() const {
  return Value.getPointer() && !Value.getPointer()->isDead();
}

inline LazyCallGraph::Edge::Kind LazyCallGraph::Edge::getKind() const {
  assert(*this && "Queried a null edge!");
  return Value.getInt();
}

inline bool LazyCallGraph::Edge::isCall() const {
  assert(*this && "Queried a null edge!");
  return getKind() == Call;
}

inline LazyCallGraph::Node &LazyCallGraph::Edge::getNode() const {
  assert(*this && "Queried a null edge!");
  return *Value.getPointer();
}

inline Function &LazyCallGraph::Edge::getFunction() const {
  assert(*this && "Queried a null edge!");
  return getNode().getFunction();
}

// Provide GraphTraits specializations for call graphs.
template <> struct GraphTraits<LazyCallGraph::Node *> {
  using NodeRef = LazyCallGraph::Node *;
  using ChildIteratorType = LazyCallGraph::EdgeSequence::iterator;

  static NodeRef getEntryNode(NodeRef N) { return N; }
  static ChildIteratorType child_begin(NodeRef N) { return (*N)->begin(); }
  static ChildIteratorType child_end(NodeRef N) { return (*N)->end(); }
};
template <> struct GraphTraits<LazyCallGraph *> {
  using NodeRef = LazyCallGraph::Node *;
  using ChildIteratorType = LazyCallGraph::EdgeSequence::iterator;

  static NodeRef getEntryNode(NodeRef N) { return N; }
  static ChildIteratorType child_begin(NodeRef N) { return (*N)->begin(); }
  static ChildIteratorType child_end(NodeRef N) { return (*N)->end(); }
};

/// An analysis pass which computes the call graph for a module.
class LazyCallGraphAnalysis : public AnalysisInfoMixin<LazyCallGraphAnalysis> {
  friend AnalysisInfoMixin<LazyCallGraphAnalysis>;

  static AnalysisKey Key;

public:
  /// Inform generic clients of the result type.
  using Result = LazyCallGraph;

  /// Compute the \c LazyCallGraph for the module \c M.
  ///
  /// This just builds the set of entry points to the call graph. The rest is
  /// built lazily as it is walked.
  LazyCallGraph run(Module &M, ModuleAnalysisManager &AM) {
    FunctionAnalysisManager &FAM =
        AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
    auto GetTLI = [&FAM](Function &F) -> TargetLibraryInfo & {
      return FAM.getResult<TargetLibraryAnalysis>(F);
    };
    return LazyCallGraph(M, GetTLI);
  }
};

/// A pass which prints the call graph to a \c raw_ostream.
///
/// This is primarily useful for testing the analysis.
class LazyCallGraphPrinterPass
    : public PassInfoMixin<LazyCallGraphPrinterPass> {
  raw_ostream &OS;

public:
  explicit LazyCallGraphPrinterPass(raw_ostream &OS);

  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
};

/// A pass which prints the call graph as a DOT file to a \c raw_ostream.
///
/// This is primarily useful for visualization purposes.
class LazyCallGraphDOTPrinterPass
    : public PassInfoMixin<LazyCallGraphDOTPrinterPass> {
  raw_ostream &OS;

public:
  explicit LazyCallGraphDOTPrinterPass(raw_ostream &OS);

  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
};

} // end namespace llvm

#endif // LLVM_ANALYSIS_LAZYCALLGRAPH_H
PKiwFZKD;PPAnalysis/InstCount.hnu�[���//===- InstCount.h - Collects the count of all instructions -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This pass collects the count of all instructions and reports them
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_INSTCOUNT_H
#define LLVM_ANALYSIS_INSTCOUNT_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class Function;

struct InstCountPass : PassInfoMixin<InstCountPass> {
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &);
};

} // end namespace llvm

#endif // LLVM_ANALYSIS_INSTCOUNT_H
PKiwFZ��+D��Analysis/HeatUtils.hnu�[���//===-- HeatUtils.h - Utility for printing heat colors ----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Utility for printing heat colors based on profiling information.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_HEATUTILS_H
#define LLVM_ANALYSIS_HEATUTILS_H

#include <cstdint>
#include <string>

namespace llvm {

class BlockFrequencyInfo;
class Function;

// Returns number of calls of calledFunction by callerFunction.
uint64_t
getNumOfCalls(Function &callerFunction, Function &calledFunction);

// Returns the maximum frequency of a BB in a function.
uint64_t getMaxFreq(const Function &F, const BlockFrequencyInfo *BFI);

// Calculates heat color based on current and maximum frequencies.
std::string getHeatColor(uint64_t freq, uint64_t maxFreq);

// Calculates heat color based on percent of "hotness".
std::string getHeatColor(double percent);

} // namespace llvm

#endif
PKiwFZQ�ϔA;A;Analysis/InlineAdvisor.hnu�[���//===- InlineAdvisor.h - Inlining decision making abstraction -*- C++ ---*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
#ifndef LLVM_ANALYSIS_INLINEADVISOR_H
#define LLVM_ANALYSIS_INLINEADVISOR_H

#include "llvm/Analysis/CGSCCPassManager.h"
#include "llvm/Analysis/InlineCost.h"
#include "llvm/Analysis/LazyCallGraph.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/IR/PassManager.h"
#include <memory>

namespace llvm {
class BasicBlock;
class CallBase;
class Function;
class Module;
class OptimizationRemark;
class ImportedFunctionsInliningStatistics;
class OptimizationRemarkEmitter;
struct ReplayInlinerSettings;

/// There are 4 scenarios we can use the InlineAdvisor:
/// - Default - use manual heuristics.
///
/// - Release mode, the expected mode for production, day to day deployments.
/// In this mode, when building the compiler, we also compile a pre-trained ML
/// model to native code, and link it as a static library. This mode has low
/// overhead and no additional dependencies for the compiler runtime.
///
/// - Development mode, for training new models.
/// In this mode, we trade off runtime performance for flexibility. This mode
/// requires the full C Tensorflow API library, and evaluates models
/// dynamically. This mode also permits generating training logs, for offline
/// training.
///
/// - Dynamically load an advisor via a plugin (PluginInlineAdvisorAnalysis)
enum class InliningAdvisorMode : int { Default, Release, Development };

// Each entry represents an inline driver.
enum class InlinePass : int {
  AlwaysInliner,
  CGSCCInliner,
  EarlyInliner,
  ModuleInliner,
  MLInliner,
  ReplayCGSCCInliner,
  ReplaySampleProfileInliner,
  SampleProfileInliner,
};

/// Provides context on when an inline advisor is constructed in the pipeline
/// (e.g., link phase, inline driver).
struct InlineContext {
  ThinOrFullLTOPhase LTOPhase;

  InlinePass Pass;
};

std::string AnnotateInlinePassName(InlineContext IC);

class InlineAdvisor;
/// Capture state between an inlining decision having had been made, and
/// its impact being observable. When collecting model training data, this
/// allows recording features/decisions/partial reward data sets.
///
/// Derivations of this type are expected to be tightly coupled with their
/// InliningAdvisors. The base type implements the minimal contractual
/// obligations.
class InlineAdvice {
public:
  InlineAdvice(InlineAdvisor *Advisor, CallBase &CB,
               OptimizationRemarkEmitter &ORE, bool IsInliningRecommended);

  InlineAdvice(InlineAdvice &&) = delete;
  InlineAdvice(const InlineAdvice &) = delete;
  virtual ~InlineAdvice() {
    assert(Recorded && "InlineAdvice should have been informed of the "
                       "inliner's decision in all cases");
  }

  /// Exactly one of the record* APIs must be called. Implementers may extend
  /// behavior by implementing the corresponding record*Impl.
  ///
  /// Call after inlining succeeded, and did not result in deleting the callee.
  void recordInlining();

  /// Call after inlining succeeded, and results in the callee being
  /// delete-able, meaning, it has no more users, and will be cleaned up
  /// subsequently.
  void recordInliningWithCalleeDeleted();

  /// Call after the decision for a call site was to not inline.
  void recordUnsuccessfulInlining(const InlineResult &Result) {
    markRecorded();
    recordUnsuccessfulInliningImpl(Result);
  }

  /// Call to indicate inlining was not attempted.
  void recordUnattemptedInlining() {
    markRecorded();
    recordUnattemptedInliningImpl();
  }

  /// Get the inlining recommendation.
  bool isInliningRecommended() const { return IsInliningRecommended; }
  const DebugLoc &getOriginalCallSiteDebugLoc() const { return DLoc; }
  const BasicBlock *getOriginalCallSiteBasicBlock() const { return Block; }

protected:
  virtual void recordInliningImpl() {}
  virtual void recordInliningWithCalleeDeletedImpl() {}
  virtual void recordUnsuccessfulInliningImpl(const InlineResult &Result) {}
  virtual void recordUnattemptedInliningImpl() {}

  InlineAdvisor *const Advisor;
  /// Caller and Callee are pre-inlining.
  Function *const Caller;
  Function *const Callee;

  // Capture the context of CB before inlining, as a successful inlining may
  // change that context, and we want to report success or failure in the
  // original context.
  const DebugLoc DLoc;
  const BasicBlock *const Block;
  OptimizationRemarkEmitter &ORE;
  const bool IsInliningRecommended;

private:
  void markRecorded() {
    assert(!Recorded && "Recording should happen exactly once");
    Recorded = true;
  }
  void recordInlineStatsIfNeeded();

  bool Recorded = false;
};

class DefaultInlineAdvice : public InlineAdvice {
public:
  DefaultInlineAdvice(InlineAdvisor *Advisor, CallBase &CB,
                      std::optional<InlineCost> OIC,
                      OptimizationRemarkEmitter &ORE, bool EmitRemarks = true)
      : InlineAdvice(Advisor, CB, ORE, OIC.has_value()), OriginalCB(&CB),
        OIC(OIC), EmitRemarks(EmitRemarks) {}

private:
  void recordUnsuccessfulInliningImpl(const InlineResult &Result) override;
  void recordInliningWithCalleeDeletedImpl() override;
  void recordInliningImpl() override;

private:
  CallBase *const OriginalCB;
  std::optional<InlineCost> OIC;
  bool EmitRemarks;
};

/// Interface for deciding whether to inline a call site or not.
class InlineAdvisor {
public:
  InlineAdvisor(InlineAdvisor &&) = delete;
  virtual ~InlineAdvisor();

  /// Get an InlineAdvice containing a recommendation on whether to
  /// inline or not. \p CB is assumed to be a direct call. \p FAM is assumed to
  /// be up-to-date wrt previous inlining decisions. \p MandatoryOnly indicates
  /// only mandatory (always-inline) call sites should be recommended - this
  /// allows the InlineAdvisor track such inlininings.
  /// Returns:
  /// - An InlineAdvice with the inlining recommendation.
  /// - Null when no recommendation is made (https://reviews.llvm.org/D110658).
  /// TODO: Consider removing the Null return scenario by incorporating the
  /// SampleProfile inliner into an InlineAdvisor
  std::unique_ptr<InlineAdvice> getAdvice(CallBase &CB,
                                          bool MandatoryOnly = false);

  /// This must be called when the Inliner pass is entered, to allow the
  /// InlineAdvisor update internal state, as result of function passes run
  /// between Inliner pass runs (for the same module).
  virtual void onPassEntry(LazyCallGraph::SCC *SCC = nullptr) {}

  /// This must be called when the Inliner pass is exited, as function passes
  /// may be run subsequently. This allows an implementation of InlineAdvisor
  /// to prepare for a partial update, based on the optional SCC.
  virtual void onPassExit(LazyCallGraph::SCC *SCC = nullptr) {}

  /// Support for printer pass
  virtual void print(raw_ostream &OS) const {
    OS << "Unimplemented InlineAdvisor print\n";
  }

  /// NOTE pass name is annotated only when inline advisor constructor provides InlineContext.
  const char *getAnnotatedInlinePassName() const {
    return AnnotatedInlinePassName.c_str();
  }

protected:
  InlineAdvisor(Module &M, FunctionAnalysisManager &FAM,
                std::optional<InlineContext> IC = std::nullopt);
  virtual std::unique_ptr<InlineAdvice> getAdviceImpl(CallBase &CB) = 0;
  virtual std::unique_ptr<InlineAdvice> getMandatoryAdvice(CallBase &CB,
                                                           bool Advice);

  Module &M;
  FunctionAnalysisManager &FAM;
  const std::optional<InlineContext> IC;
  const std::string AnnotatedInlinePassName;
  std::unique_ptr<ImportedFunctionsInliningStatistics> ImportedFunctionsStats;

  enum class MandatoryInliningKind { NotMandatory, Always, Never };

  static MandatoryInliningKind getMandatoryKind(CallBase &CB,
                                                FunctionAnalysisManager &FAM,
                                                OptimizationRemarkEmitter &ORE);

  OptimizationRemarkEmitter &getCallerORE(CallBase &CB);

private:
  friend class InlineAdvice;
};

/// The default (manual heuristics) implementation of the InlineAdvisor. This
/// implementation does not need to keep state between inliner pass runs, and is
/// reusable as-is for inliner pass test scenarios, as well as for regular use.
class DefaultInlineAdvisor : public InlineAdvisor {
public:
  DefaultInlineAdvisor(Module &M, FunctionAnalysisManager &FAM,
                       InlineParams Params, InlineContext IC)
      : InlineAdvisor(M, FAM, IC), Params(Params) {}

private:
  std::unique_ptr<InlineAdvice> getAdviceImpl(CallBase &CB) override;

  InlineParams Params;
};

/// Used for dynamically registering InlineAdvisors as plugins
///
/// An advisor plugin adds a new advisor at runtime by registering an instance
/// of PluginInlineAdvisorAnalysis in the current ModuleAnalysisManager.
/// For example, the following code dynamically registers a
/// DefaultInlineAdvisor:
///
/// namespace {
///
/// InlineAdvisor *defaultAdvisorFactory(Module &M, FunctionAnalysisManager
/// &FAM,
///                                      InlineParams Params, InlineContext IC)
///                                      {
///   return new DefaultInlineAdvisor(M, FAM, Params, IC);
/// }
///
/// struct DefaultDynamicAdvisor : PassInfoMixin<DefaultDynamicAdvisor> {
///   PreservedAnalyses run(Module &, ModuleAnalysisManager &MAM) {
///     PluginInlineAdvisorAnalysis PA(defaultAdvisorFactory);
///     MAM.registerPass([&] { return PA; });
///     return PreservedAnalyses::all();
///   }
/// };
///
/// } // namespace
///
/// extern "C" LLVM_ATTRIBUTE_WEAK ::llvm::PassPluginLibraryInfo
/// llvmGetPassPluginInfo() {
///   return {LLVM_PLUGIN_API_VERSION, "DynamicDefaultAdvisor",
///   LLVM_VERSION_STRING,
///           [](PassBuilder &PB) {
///             PB.registerPipelineStartEPCallback(
///                 [](ModulePassManager &MPM, OptimizationLevel Level) {
///                   MPM.addPass(DefaultDynamicAdvisor());
///                 });
///           }};
/// }
///
/// A plugin must implement an AdvisorFactory and register it with a
/// PluginInlineAdvisorAnlysis to the provided ModuleanAlysisManager.
///
/// If such a plugin has been registered
/// InlineAdvisorAnalysis::Result::tryCreate will return the dynamically loaded
/// advisor.
///
class PluginInlineAdvisorAnalysis
    : public AnalysisInfoMixin<PluginInlineAdvisorAnalysis> {
public:
  static AnalysisKey Key;
  static bool HasBeenRegistered;

  typedef InlineAdvisor *(*AdvisorFactory)(Module &M,
                                           FunctionAnalysisManager &FAM,
                                           InlineParams Params,
                                           InlineContext IC);

  PluginInlineAdvisorAnalysis(AdvisorFactory Factory) : Factory(Factory) {
    HasBeenRegistered = true;
    assert(Factory != nullptr &&
           "The plugin advisor factory should not be a null pointer.");
  }

  struct Result {
    AdvisorFactory Factory;
  };

  Result run(Module &M, ModuleAnalysisManager &MAM) { return {Factory}; }
  Result getResult() { return {Factory}; }

private:
  AdvisorFactory Factory;
};

/// The InlineAdvisorAnalysis is a module pass because the InlineAdvisor
/// needs to capture state right before inlining commences over a module.
class InlineAdvisorAnalysis : public AnalysisInfoMixin<InlineAdvisorAnalysis> {
public:
  static AnalysisKey Key;
  InlineAdvisorAnalysis() = default;
  struct Result {
    Result(Module &M, ModuleAnalysisManager &MAM) : M(M), MAM(MAM) {}
    bool invalidate(Module &, const PreservedAnalyses &PA,
                    ModuleAnalysisManager::Invalidator &) {
      // Check whether the analysis has been explicitly invalidated. Otherwise,
      // it's stateless and remains preserved.
      auto PAC = PA.getChecker<InlineAdvisorAnalysis>();
      return !PAC.preservedWhenStateless();
    }
    bool tryCreate(InlineParams Params, InliningAdvisorMode Mode,
                   const ReplayInlinerSettings &ReplaySettings,
                   InlineContext IC);
    InlineAdvisor *getAdvisor() const { return Advisor.get(); }

  private:
    Module &M;
    ModuleAnalysisManager &MAM;
    std::unique_ptr<InlineAdvisor> Advisor;
  };

  Result run(Module &M, ModuleAnalysisManager &MAM) { return Result(M, MAM); }
};

/// Printer pass for the FunctionPropertiesAnalysis results.
class InlineAdvisorAnalysisPrinterPass
    : public PassInfoMixin<InlineAdvisorAnalysisPrinterPass> {
  raw_ostream &OS;

public:
  explicit InlineAdvisorAnalysisPrinterPass(raw_ostream &OS) : OS(OS) {}

  PreservedAnalyses run(Module &M, ModuleAnalysisManager &MAM);

  PreservedAnalyses run(LazyCallGraph::SCC &InitialC, CGSCCAnalysisManager &AM,
                        LazyCallGraph &CG, CGSCCUpdateResult &UR);
};

std::unique_ptr<InlineAdvisor>
getReleaseModeAdvisor(Module &M, ModuleAnalysisManager &MAM,
                      std::function<bool(CallBase &)> GetDefaultAdvice);

std::unique_ptr<InlineAdvisor>
getDevelopmentModeAdvisor(Module &M, ModuleAnalysisManager &MAM,
                          std::function<bool(CallBase &)> GetDefaultAdvice);

// Default (manual policy) decision making helper APIs. Shared with the legacy
// pass manager inliner.

/// Return the cost only if the inliner should attempt to inline at the given
/// CallSite. If we return the cost, we will emit an optimisation remark later
/// using that cost, so we won't do so from this function. Return std::nullopt
/// if inlining should not be attempted.
std::optional<InlineCost>
shouldInline(CallBase &CB, function_ref<InlineCost(CallBase &CB)> GetInlineCost,
             OptimizationRemarkEmitter &ORE, bool EnableDeferral = true);

/// Emit ORE message.
void emitInlinedInto(OptimizationRemarkEmitter &ORE, DebugLoc DLoc,
                     const BasicBlock *Block, const Function &Callee,
                     const Function &Caller, bool IsMandatory,
                     function_ref<void(OptimizationRemark &)> ExtraContext = {},
                     const char *PassName = nullptr);

/// Emit ORE message based in cost (default heuristic).
void emitInlinedIntoBasedOnCost(OptimizationRemarkEmitter &ORE, DebugLoc DLoc,
                                const BasicBlock *Block, const Function &Callee,
                                const Function &Caller, const InlineCost &IC,
                                bool ForProfileContext = false,
                                const char *PassName = nullptr);

/// Add location info to ORE message.
void addLocationToRemarks(OptimizationRemark &Remark, DebugLoc DLoc);

/// Set the inline-remark attribute.
void setInlineRemark(CallBase &CB, StringRef Message);

/// Utility for extracting the inline cost message to a string.
std::string inlineCostStr(const InlineCost &IC);
} // namespace llvm
#endif // LLVM_ANALYSIS_INLINEADVISOR_H
PKiwFZ��C��Analysis/DDGPrinter.hnu�[���//===- llvm/Analysis/DDGPrinter.h -------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

//===----------------------------------------------------------------------===//
//
// This file defines the DOT printer for the Data-Dependence Graph (DDG).
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_DDGPRINTER_H
#define LLVM_ANALYSIS_DDGPRINTER_H

#include "llvm/Analysis/DDG.h"
#include "llvm/Support/DOTGraphTraits.h"

namespace llvm {
class LPMUpdater;
class Loop;

//===--------------------------------------------------------------------===//
// Implementation of DDG DOT Printer for a loop.
//===--------------------------------------------------------------------===//
class DDGDotPrinterPass : public PassInfoMixin<DDGDotPrinterPass> {
public:
  PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
                        LoopStandardAnalysisResults &AR, LPMUpdater &U);
};

//===--------------------------------------------------------------------===//
// Specialization of DOTGraphTraits.
//===--------------------------------------------------------------------===//
template <>
struct DOTGraphTraits<const DataDependenceGraph *>
    : public DefaultDOTGraphTraits {

  DOTGraphTraits(bool IsSimple = false) : DefaultDOTGraphTraits(IsSimple) {}

  /// Generate a title for the graph in DOT format
  std::string getGraphName(const DataDependenceGraph *G) {
    assert(G && "expected a valid pointer to the graph.");
    return "DDG for '" + std::string(G->getName()) + "'";
  }

  /// Print a DDG node either in concise form (-ddg-dot-only) or
  /// verbose mode (-ddg-dot).
  std::string getNodeLabel(const DDGNode *Node,
                           const DataDependenceGraph *Graph);

  /// Print attributes of an edge in the DDG graph. If the edge
  /// is a MemoryDependence edge, then detailed dependence info
  /// available from DependenceAnalysis is displayed.
  std::string
  getEdgeAttributes(const DDGNode *Node,
                    GraphTraits<const DDGNode *>::ChildIteratorType I,
                    const DataDependenceGraph *G);

  /// Do not print nodes that are part of a pi-block separately. They
  /// will be printed when their containing pi-block is being printed.
  bool isNodeHidden(const DDGNode *Node, const DataDependenceGraph *G);

private:
  /// Print a DDG node in concise form.
  static std::string getSimpleNodeLabel(const DDGNode *Node,
                                        const DataDependenceGraph *G);

  /// Print a DDG node with more information including containing instructions
  /// and detailed information about the dependence edges.
  static std::string getVerboseNodeLabel(const DDGNode *Node,
                                         const DataDependenceGraph *G);

  /// Print a DDG edge in concise form.
  static std::string getSimpleEdgeAttributes(const DDGNode *Src,
                                             const DDGEdge *Edge,
                                             const DataDependenceGraph *G);

  /// Print a DDG edge with more information including detailed information
  /// about the dependence edges.
  static std::string getVerboseEdgeAttributes(const DDGNode *Src,
                                              const DDGEdge *Edge,
                                              const DataDependenceGraph *G);
};

using DDGDotGraphTraits = DOTGraphTraits<const DataDependenceGraph *>;

} // namespace llvm

#endif // LLVM_ANALYSIS_DDGPRINTER_H
PKiwFZ��9�"�"�Analysis/ScalarEvolution.hnu�[���//===- llvm/Analysis/ScalarEvolution.h - Scalar Evolution -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// The ScalarEvolution class is an LLVM pass which can be used to analyze and
// categorize scalar expressions in loops.  It specializes in recognizing
// general induction variables, representing them with the abstract and opaque
// SCEV class.  Given this analysis, trip counts of loops and other important
// properties can be obtained.
//
// This analysis is primarily useful for induction variable substitution and
// strength reduction.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_SCALAREVOLUTION_H
#define LLVM_ANALYSIS_SCALAREVOLUTION_H

#include "llvm/ADT/APInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/ConstantRange.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/PassManager.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/IR/ValueMap.h"
#include "llvm/Pass.h"
#include <cassert>
#include <cstdint>
#include <memory>
#include <optional>
#include <utility>

namespace llvm {

class OverflowingBinaryOperator;
class AssumptionCache;
class BasicBlock;
class Constant;
class ConstantInt;
class DataLayout;
class DominatorTree;
class Function;
class GEPOperator;
class Instruction;
class LLVMContext;
class Loop;
class LoopInfo;
class raw_ostream;
class ScalarEvolution;
class SCEVAddRecExpr;
class SCEVUnknown;
class StructType;
class TargetLibraryInfo;
class Type;
class Value;
enum SCEVTypes : unsigned short;

extern bool VerifySCEV;

/// This class represents an analyzed expression in the program.  These are
/// opaque objects that the client is not allowed to do much with directly.
///
class SCEV : public FoldingSetNode {
  friend struct FoldingSetTrait<SCEV>;

  /// A reference to an Interned FoldingSetNodeID for this node.  The
  /// ScalarEvolution's BumpPtrAllocator holds the data.
  FoldingSetNodeIDRef FastID;

  // The SCEV baseclass this node corresponds to
  const SCEVTypes SCEVType;

protected:
  // Estimated complexity of this node's expression tree size.
  const unsigned short ExpressionSize;

  /// This field is initialized to zero and may be used in subclasses to store
  /// miscellaneous information.
  unsigned short SubclassData = 0;

public:
  /// NoWrapFlags are bitfield indices into SubclassData.
  ///
  /// Add and Mul expressions may have no-unsigned-wrap <NUW> or
  /// no-signed-wrap <NSW> properties, which are derived from the IR
  /// operator. NSW is a misnomer that we use to mean no signed overflow or
  /// underflow.
  ///
  /// AddRec expressions may have a no-self-wraparound <NW> property if, in
  /// the integer domain, abs(step) * max-iteration(loop) <=
  /// unsigned-max(bitwidth).  This means that the recurrence will never reach
  /// its start value if the step is non-zero.  Computing the same value on
  /// each iteration is not considered wrapping, and recurrences with step = 0
  /// are trivially <NW>.  <NW> is independent of the sign of step and the
  /// value the add recurrence starts with.
  ///
  /// Note that NUW and NSW are also valid properties of a recurrence, and
  /// either implies NW. For convenience, NW will be set for a recurrence
  /// whenever either NUW or NSW are set.
  ///
  /// We require that the flag on a SCEV apply to the entire scope in which
  /// that SCEV is defined.  A SCEV's scope is set of locations dominated by
  /// a defining location, which is in turn described by the following rules:
  /// * A SCEVUnknown is at the point of definition of the Value.
  /// * A SCEVConstant is defined at all points.
  /// * A SCEVAddRec is defined starting with the header of the associated
  ///   loop.
  /// * All other SCEVs are defined at the earlest point all operands are
  ///   defined.
  ///
  /// The above rules describe a maximally hoisted form (without regards to
  /// potential control dependence).  A SCEV is defined anywhere a
  /// corresponding instruction could be defined in said maximally hoisted
  /// form.  Note that SCEVUDivExpr (currently the only expression type which
  /// can trap) can be defined per these rules in regions where it would trap
  /// at runtime.  A SCEV being defined does not require the existence of any
  /// instruction within the defined scope.
  enum NoWrapFlags {
    FlagAnyWrap = 0,    // No guarantee.
    FlagNW = (1 << 0),  // No self-wrap.
    FlagNUW = (1 << 1), // No unsigned wrap.
    FlagNSW = (1 << 2), // No signed wrap.
    NoWrapMask = (1 << 3) - 1
  };

  explicit SCEV(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy,
                unsigned short ExpressionSize)
      : FastID(ID), SCEVType(SCEVTy), ExpressionSize(ExpressionSize) {}
  SCEV(const SCEV &) = delete;
  SCEV &operator=(const SCEV &) = delete;

  SCEVTypes getSCEVType() const { return SCEVType; }

  /// Return the LLVM type of this SCEV expression.
  Type *getType() const;

  /// Return operands of this SCEV expression.
  ArrayRef<const SCEV *> operands() const;

  /// Return true if the expression is a constant zero.
  bool isZero() const;

  /// Return true if the expression is a constant one.
  bool isOne() const;

  /// Return true if the expression is a constant all-ones value.
  bool isAllOnesValue() const;

  /// Return true if the specified scev is negated, but not a constant.
  bool isNonConstantNegative() const;

  // Returns estimated size of the mathematical expression represented by this
  // SCEV. The rules of its calculation are following:
  // 1) Size of a SCEV without operands (like constants and SCEVUnknown) is 1;
  // 2) Size SCEV with operands Op1, Op2, ..., OpN is calculated by formula:
  //    (1 + Size(Op1) + ... + Size(OpN)).
  // This value gives us an estimation of time we need to traverse through this
  // SCEV and all its operands recursively. We may use it to avoid performing
  // heavy transformations on SCEVs of excessive size for sake of saving the
  // compilation time.
  unsigned short getExpressionSize() const {
    return ExpressionSize;
  }

  /// Print out the internal representation of this scalar to the specified
  /// stream.  This should really only be used for debugging purposes.
  void print(raw_ostream &OS) const;

  /// This method is used for debugging.
  void dump() const;
};

// Specialize FoldingSetTrait for SCEV to avoid needing to compute
// temporary FoldingSetNodeID values.
template <> struct FoldingSetTrait<SCEV> : DefaultFoldingSetTrait<SCEV> {
  static void Profile(const SCEV &X, FoldingSetNodeID &ID) { ID = X.FastID; }

  static bool Equals(const SCEV &X, const FoldingSetNodeID &ID, unsigned IDHash,
                     FoldingSetNodeID &TempID) {
    return ID == X.FastID;
  }

  static unsigned ComputeHash(const SCEV &X, FoldingSetNodeID &TempID) {
    return X.FastID.ComputeHash();
  }
};

inline raw_ostream &operator<<(raw_ostream &OS, const SCEV &S) {
  S.print(OS);
  return OS;
}

/// An object of this class is returned by queries that could not be answered.
/// For example, if you ask for the number of iterations of a linked-list
/// traversal loop, you will get one of these.  None of the standard SCEV
/// operations are valid on this class, it is just a marker.
struct SCEVCouldNotCompute : public SCEV {
  SCEVCouldNotCompute();

  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const SCEV *S);
};

/// This class represents an assumption made using SCEV expressions which can
/// be checked at run-time.
class SCEVPredicate : public FoldingSetNode {
  friend struct FoldingSetTrait<SCEVPredicate>;

  /// A reference to an Interned FoldingSetNodeID for this node.  The
  /// ScalarEvolution's BumpPtrAllocator holds the data.
  FoldingSetNodeIDRef FastID;

public:
  enum SCEVPredicateKind { P_Union, P_Compare, P_Wrap };

protected:
  SCEVPredicateKind Kind;
  ~SCEVPredicate() = default;
  SCEVPredicate(const SCEVPredicate &) = default;
  SCEVPredicate &operator=(const SCEVPredicate &) = default;

public:
  SCEVPredicate(const FoldingSetNodeIDRef ID, SCEVPredicateKind Kind);

  SCEVPredicateKind getKind() const { return Kind; }

  /// Returns the estimated complexity of this predicate.  This is roughly
  /// measured in the number of run-time checks required.
  virtual unsigned getComplexity() const { return 1; }

  /// Returns true if the predicate is always true. This means that no
  /// assumptions were made and nothing needs to be checked at run-time.
  virtual bool isAlwaysTrue() const = 0;

  /// Returns true if this predicate implies \p N.
  virtual bool implies(const SCEVPredicate *N) const = 0;

  /// Prints a textual representation of this predicate with an indentation of
  /// \p Depth.
  virtual void print(raw_ostream &OS, unsigned Depth = 0) const = 0;
};

inline raw_ostream &operator<<(raw_ostream &OS, const SCEVPredicate &P) {
  P.print(OS);
  return OS;
}

// Specialize FoldingSetTrait for SCEVPredicate to avoid needing to compute
// temporary FoldingSetNodeID values.
template <>
struct FoldingSetTrait<SCEVPredicate> : DefaultFoldingSetTrait<SCEVPredicate> {
  static void Profile(const SCEVPredicate &X, FoldingSetNodeID &ID) {
    ID = X.FastID;
  }

  static bool Equals(const SCEVPredicate &X, const FoldingSetNodeID &ID,
                     unsigned IDHash, FoldingSetNodeID &TempID) {
    return ID == X.FastID;
  }

  static unsigned ComputeHash(const SCEVPredicate &X,
                              FoldingSetNodeID &TempID) {
    return X.FastID.ComputeHash();
  }
};

/// This class represents an assumption that the expression LHS Pred RHS
/// evaluates to true, and this can be checked at run-time.
class SCEVComparePredicate final : public SCEVPredicate {
  /// We assume that LHS Pred RHS is true.
  const ICmpInst::Predicate Pred;
  const SCEV *LHS;
  const SCEV *RHS;

public:
  SCEVComparePredicate(const FoldingSetNodeIDRef ID,
                       const ICmpInst::Predicate Pred,
                       const SCEV *LHS, const SCEV *RHS);

  /// Implementation of the SCEVPredicate interface
  bool implies(const SCEVPredicate *N) const override;
  void print(raw_ostream &OS, unsigned Depth = 0) const override;
  bool isAlwaysTrue() const override;

  ICmpInst::Predicate getPredicate() const { return Pred; }

  /// Returns the left hand side of the predicate.
  const SCEV *getLHS() const { return LHS; }

  /// Returns the right hand side of the predicate.
  const SCEV *getRHS() const { return RHS; }

  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const SCEVPredicate *P) {
    return P->getKind() == P_Compare;
  }
};

/// This class represents an assumption made on an AddRec expression. Given an
/// affine AddRec expression {a,+,b}, we assume that it has the nssw or nusw
/// flags (defined below) in the first X iterations of the loop, where X is a
/// SCEV expression returned by getPredicatedBackedgeTakenCount).
///
/// Note that this does not imply that X is equal to the backedge taken
/// count. This means that if we have a nusw predicate for i32 {0,+,1} with a
/// predicated backedge taken count of X, we only guarantee that {0,+,1} has
/// nusw in the first X iterations. {0,+,1} may still wrap in the loop if we
/// have more than X iterations.
class SCEVWrapPredicate final : public SCEVPredicate {
public:
  /// Similar to SCEV::NoWrapFlags, but with slightly different semantics
  /// for FlagNUSW. The increment is considered to be signed, and a + b
  /// (where b is the increment) is considered to wrap if:
  ///    zext(a + b) != zext(a) + sext(b)
  ///
  /// If Signed is a function that takes an n-bit tuple and maps to the
  /// integer domain as the tuples value interpreted as twos complement,
  /// and Unsigned a function that takes an n-bit tuple and maps to the
  /// integer domain as as the base two value of input tuple, then a + b
  /// has IncrementNUSW iff:
  ///
  /// 0 <= Unsigned(a) + Signed(b) < 2^n
  ///
  /// The IncrementNSSW flag has identical semantics with SCEV::FlagNSW.
  ///
  /// Note that the IncrementNUSW flag is not commutative: if base + inc
  /// has IncrementNUSW, then inc + base doesn't neccessarily have this
  /// property. The reason for this is that this is used for sign/zero
  /// extending affine AddRec SCEV expressions when a SCEVWrapPredicate is
  /// assumed. A {base,+,inc} expression is already non-commutative with
  /// regards to base and inc, since it is interpreted as:
  ///     (((base + inc) + inc) + inc) ...
  enum IncrementWrapFlags {
    IncrementAnyWrap = 0,     // No guarantee.
    IncrementNUSW = (1 << 0), // No unsigned with signed increment wrap.
    IncrementNSSW = (1 << 1), // No signed with signed increment wrap
                              // (equivalent with SCEV::NSW)
    IncrementNoWrapMask = (1 << 2) - 1
  };

  /// Convenient IncrementWrapFlags manipulation methods.
  [[nodiscard]] static SCEVWrapPredicate::IncrementWrapFlags
  clearFlags(SCEVWrapPredicate::IncrementWrapFlags Flags,
             SCEVWrapPredicate::IncrementWrapFlags OffFlags) {
    assert((Flags & IncrementNoWrapMask) == Flags && "Invalid flags value!");
    assert((OffFlags & IncrementNoWrapMask) == OffFlags &&
           "Invalid flags value!");
    return (SCEVWrapPredicate::IncrementWrapFlags)(Flags & ~OffFlags);
  }

  [[nodiscard]] static SCEVWrapPredicate::IncrementWrapFlags
  maskFlags(SCEVWrapPredicate::IncrementWrapFlags Flags, int Mask) {
    assert((Flags & IncrementNoWrapMask) == Flags && "Invalid flags value!");
    assert((Mask & IncrementNoWrapMask) == Mask && "Invalid mask value!");

    return (SCEVWrapPredicate::IncrementWrapFlags)(Flags & Mask);
  }

  [[nodiscard]] static SCEVWrapPredicate::IncrementWrapFlags
  setFlags(SCEVWrapPredicate::IncrementWrapFlags Flags,
           SCEVWrapPredicate::IncrementWrapFlags OnFlags) {
    assert((Flags & IncrementNoWrapMask) == Flags && "Invalid flags value!");
    assert((OnFlags & IncrementNoWrapMask) == OnFlags &&
           "Invalid flags value!");

    return (SCEVWrapPredicate::IncrementWrapFlags)(Flags | OnFlags);
  }

  /// Returns the set of SCEVWrapPredicate no wrap flags implied by a
  /// SCEVAddRecExpr.
  [[nodiscard]] static SCEVWrapPredicate::IncrementWrapFlags
  getImpliedFlags(const SCEVAddRecExpr *AR, ScalarEvolution &SE);

private:
  const SCEVAddRecExpr *AR;
  IncrementWrapFlags Flags;

public:
  explicit SCEVWrapPredicate(const FoldingSetNodeIDRef ID,
                             const SCEVAddRecExpr *AR,
                             IncrementWrapFlags Flags);

  /// Returns the set assumed no overflow flags.
  IncrementWrapFlags getFlags() const { return Flags; }

  /// Implementation of the SCEVPredicate interface
  const SCEVAddRecExpr *getExpr() const;
  bool implies(const SCEVPredicate *N) const override;
  void print(raw_ostream &OS, unsigned Depth = 0) const override;
  bool isAlwaysTrue() const override;

  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const SCEVPredicate *P) {
    return P->getKind() == P_Wrap;
  }
};

/// This class represents a composition of other SCEV predicates, and is the
/// class that most clients will interact with.  This is equivalent to a
/// logical "AND" of all the predicates in the union.
///
/// NB! Unlike other SCEVPredicate sub-classes this class does not live in the
/// ScalarEvolution::Preds folding set.  This is why the \c add function is sound.
class SCEVUnionPredicate final : public SCEVPredicate {
private:
  using PredicateMap =
      DenseMap<const SCEV *, SmallVector<const SCEVPredicate *, 4>>;

  /// Vector with references to all predicates in this union.
  SmallVector<const SCEVPredicate *, 16> Preds;

  /// Adds a predicate to this union.
  void add(const SCEVPredicate *N);

public:
  SCEVUnionPredicate(ArrayRef<const SCEVPredicate *> Preds);

  const SmallVectorImpl<const SCEVPredicate *> &getPredicates() const {
    return Preds;
  }

  /// Implementation of the SCEVPredicate interface
  bool isAlwaysTrue() const override;
  bool implies(const SCEVPredicate *N) const override;
  void print(raw_ostream &OS, unsigned Depth) const override;

  /// We estimate the complexity of a union predicate as the size number of
  /// predicates in the union.
  unsigned getComplexity() const override { return Preds.size(); }

  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const SCEVPredicate *P) {
    return P->getKind() == P_Union;
  }
};

/// The main scalar evolution driver. Because client code (intentionally)
/// can't do much with the SCEV objects directly, they must ask this class
/// for services.
class ScalarEvolution {
  friend class ScalarEvolutionsTest;

public:
  /// An enum describing the relationship between a SCEV and a loop.
  enum LoopDisposition {
    LoopVariant,   ///< The SCEV is loop-variant (unknown).
    LoopInvariant, ///< The SCEV is loop-invariant.
    LoopComputable ///< The SCEV varies predictably with the loop.
  };

  /// An enum describing the relationship between a SCEV and a basic block.
  enum BlockDisposition {
    DoesNotDominateBlock,  ///< The SCEV does not dominate the block.
    DominatesBlock,        ///< The SCEV dominates the block.
    ProperlyDominatesBlock ///< The SCEV properly dominates the block.
  };

  /// Convenient NoWrapFlags manipulation that hides enum casts and is
  /// visible in the ScalarEvolution name space.
  [[nodiscard]] static SCEV::NoWrapFlags maskFlags(SCEV::NoWrapFlags Flags,
                                                   int Mask) {
    return (SCEV::NoWrapFlags)(Flags & Mask);
  }
  [[nodiscard]] static SCEV::NoWrapFlags setFlags(SCEV::NoWrapFlags Flags,
                                                  SCEV::NoWrapFlags OnFlags) {
    return (SCEV::NoWrapFlags)(Flags | OnFlags);
  }
  [[nodiscard]] static SCEV::NoWrapFlags
  clearFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags OffFlags) {
    return (SCEV::NoWrapFlags)(Flags & ~OffFlags);
  }
  [[nodiscard]] static bool hasFlags(SCEV::NoWrapFlags Flags,
                                     SCEV::NoWrapFlags TestFlags) {
    return TestFlags == maskFlags(Flags, TestFlags);
  };

  ScalarEvolution(Function &F, TargetLibraryInfo &TLI, AssumptionCache &AC,
                  DominatorTree &DT, LoopInfo &LI);
  ScalarEvolution(ScalarEvolution &&Arg);
  ~ScalarEvolution();

  LLVMContext &getContext() const { return F.getContext(); }

  /// Test if values of the given type are analyzable within the SCEV
  /// framework. This primarily includes integer types, and it can optionally
  /// include pointer types if the ScalarEvolution class has access to
  /// target-specific information.
  bool isSCEVable(Type *Ty) const;

  /// Return the size in bits of the specified type, for which isSCEVable must
  /// return true.
  uint64_t getTypeSizeInBits(Type *Ty) const;

  /// Return a type with the same bitwidth as the given type and which
  /// represents how SCEV will treat the given type, for which isSCEVable must
  /// return true. For pointer types, this is the pointer-sized integer type.
  Type *getEffectiveSCEVType(Type *Ty) const;

  // Returns a wider type among {Ty1, Ty2}.
  Type *getWiderType(Type *Ty1, Type *Ty2) const;

  /// Return true if there exists a point in the program at which both
  /// A and B could be operands to the same instruction.
  /// SCEV expressions are generally assumed to correspond to instructions
  /// which could exists in IR.  In general, this requires that there exists
  /// a use point in the program where all operands dominate the use.
  ///
  /// Example:
  /// loop {
  ///   if
  ///     loop { v1 = load @global1; }
  ///   else
  ///     loop { v2 = load @global2; }
  /// }
  /// No SCEV with operand V1, and v2 can exist in this program.
  bool instructionCouldExistWitthOperands(const SCEV *A, const SCEV *B);

  /// Return true if the SCEV is a scAddRecExpr or it contains
  /// scAddRecExpr. The result will be cached in HasRecMap.
  bool containsAddRecurrence(const SCEV *S);

  /// Is operation \p BinOp between \p LHS and \p RHS provably does not have
  /// a signed/unsigned overflow (\p Signed)? If \p CtxI is specified, the
  /// no-overflow fact should be true in the context of this instruction.
  bool willNotOverflow(Instruction::BinaryOps BinOp, bool Signed,
                       const SCEV *LHS, const SCEV *RHS,
                       const Instruction *CtxI = nullptr);

  /// Parse NSW/NUW flags from add/sub/mul IR binary operation \p Op into
  /// SCEV no-wrap flags, and deduce flag[s] that aren't known yet.
  /// Does not mutate the original instruction. Returns std::nullopt if it could
  /// not deduce more precise flags than the instruction already has, otherwise
  /// returns proven flags.
  std::optional<SCEV::NoWrapFlags>
  getStrengthenedNoWrapFlagsFromBinOp(const OverflowingBinaryOperator *OBO);

  /// Notify this ScalarEvolution that \p User directly uses SCEVs in \p Ops.
  void registerUser(const SCEV *User, ArrayRef<const SCEV *> Ops);

  /// Return true if the SCEV expression contains an undef value.
  bool containsUndefs(const SCEV *S) const;

  /// Return true if the SCEV expression contains a Value that has been
  /// optimised out and is now a nullptr.
  bool containsErasedValue(const SCEV *S) const;

  /// Return a SCEV expression for the full generality of the specified
  /// expression.
  const SCEV *getSCEV(Value *V);

  /// Return an existing SCEV for V if there is one, otherwise return nullptr.
  const SCEV *getExistingSCEV(Value *V);

  const SCEV *getConstant(ConstantInt *V);
  const SCEV *getConstant(const APInt &Val);
  const SCEV *getConstant(Type *Ty, uint64_t V, bool isSigned = false);
  const SCEV *getLosslessPtrToIntExpr(const SCEV *Op, unsigned Depth = 0);
  const SCEV *getPtrToIntExpr(const SCEV *Op, Type *Ty);
  const SCEV *getTruncateExpr(const SCEV *Op, Type *Ty, unsigned Depth = 0);
  const SCEV *getVScale(Type *Ty);
  const SCEV *getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth = 0);
  const SCEV *getZeroExtendExprImpl(const SCEV *Op, Type *Ty,
                                    unsigned Depth = 0);
  const SCEV *getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth = 0);
  const SCEV *getSignExtendExprImpl(const SCEV *Op, Type *Ty,
                                    unsigned Depth = 0);
  const SCEV *getCastExpr(SCEVTypes Kind, const SCEV *Op, Type *Ty);
  const SCEV *getAnyExtendExpr(const SCEV *Op, Type *Ty);
  const SCEV *getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
                         SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
                         unsigned Depth = 0);
  const SCEV *getAddExpr(const SCEV *LHS, const SCEV *RHS,
                         SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
                         unsigned Depth = 0) {
    SmallVector<const SCEV *, 2> Ops = {LHS, RHS};
    return getAddExpr(Ops, Flags, Depth);
  }
  const SCEV *getAddExpr(const SCEV *Op0, const SCEV *Op1, const SCEV *Op2,
                         SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
                         unsigned Depth = 0) {
    SmallVector<const SCEV *, 3> Ops = {Op0, Op1, Op2};
    return getAddExpr(Ops, Flags, Depth);
  }
  const SCEV *getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
                         SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
                         unsigned Depth = 0);
  const SCEV *getMulExpr(const SCEV *LHS, const SCEV *RHS,
                         SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
                         unsigned Depth = 0) {
    SmallVector<const SCEV *, 2> Ops = {LHS, RHS};
    return getMulExpr(Ops, Flags, Depth);
  }
  const SCEV *getMulExpr(const SCEV *Op0, const SCEV *Op1, const SCEV *Op2,
                         SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
                         unsigned Depth = 0) {
    SmallVector<const SCEV *, 3> Ops = {Op0, Op1, Op2};
    return getMulExpr(Ops, Flags, Depth);
  }
  const SCEV *getUDivExpr(const SCEV *LHS, const SCEV *RHS);
  const SCEV *getUDivExactExpr(const SCEV *LHS, const SCEV *RHS);
  const SCEV *getURemExpr(const SCEV *LHS, const SCEV *RHS);
  const SCEV *getAddRecExpr(const SCEV *Start, const SCEV *Step, const Loop *L,
                            SCEV::NoWrapFlags Flags);
  const SCEV *getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
                            const Loop *L, SCEV::NoWrapFlags Flags);
  const SCEV *getAddRecExpr(const SmallVectorImpl<const SCEV *> &Operands,
                            const Loop *L, SCEV::NoWrapFlags Flags) {
    SmallVector<const SCEV *, 4> NewOp(Operands.begin(), Operands.end());
    return getAddRecExpr(NewOp, L, Flags);
  }

  /// Checks if \p SymbolicPHI can be rewritten as an AddRecExpr under some
  /// Predicates. If successful return these <AddRecExpr, Predicates>;
  /// The function is intended to be called from PSCEV (the caller will decide
  /// whether to actually add the predicates and carry out the rewrites).
  std::optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
  createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI);

  /// Returns an expression for a GEP
  ///
  /// \p GEP The GEP. The indices contained in the GEP itself are ignored,
  /// instead we use IndexExprs.
  /// \p IndexExprs The expressions for the indices.
  const SCEV *getGEPExpr(GEPOperator *GEP,
                         const SmallVectorImpl<const SCEV *> &IndexExprs);
  const SCEV *getAbsExpr(const SCEV *Op, bool IsNSW);
  const SCEV *getMinMaxExpr(SCEVTypes Kind,
                            SmallVectorImpl<const SCEV *> &Operands);
  const SCEV *getSequentialMinMaxExpr(SCEVTypes Kind,
                                      SmallVectorImpl<const SCEV *> &Operands);
  const SCEV *getSMaxExpr(const SCEV *LHS, const SCEV *RHS);
  const SCEV *getSMaxExpr(SmallVectorImpl<const SCEV *> &Operands);
  const SCEV *getUMaxExpr(const SCEV *LHS, const SCEV *RHS);
  const SCEV *getUMaxExpr(SmallVectorImpl<const SCEV *> &Operands);
  const SCEV *getSMinExpr(const SCEV *LHS, const SCEV *RHS);
  const SCEV *getSMinExpr(SmallVectorImpl<const SCEV *> &Operands);
  const SCEV *getUMinExpr(const SCEV *LHS, const SCEV *RHS,
                          bool Sequential = false);
  const SCEV *getUMinExpr(SmallVectorImpl<const SCEV *> &Operands,
                          bool Sequential = false);
  const SCEV *getUnknown(Value *V);
  const SCEV *getCouldNotCompute();

  /// Return a SCEV for the constant 0 of a specific type.
  const SCEV *getZero(Type *Ty) { return getConstant(Ty, 0); }

  /// Return a SCEV for the constant 1 of a specific type.
  const SCEV *getOne(Type *Ty) { return getConstant(Ty, 1); }

  /// Return a SCEV for the constant \p Power of two.
  const SCEV *getPowerOfTwo(Type *Ty, unsigned Power) {
    assert(Power < getTypeSizeInBits(Ty) && "Power out of range");
    return getConstant(APInt::getOneBitSet(getTypeSizeInBits(Ty), Power));
  }

  /// Return a SCEV for the constant -1 of a specific type.
  const SCEV *getMinusOne(Type *Ty) {
    return getConstant(Ty, -1, /*isSigned=*/true);
  }

  /// Return an expression for a TypeSize.
  const SCEV *getSizeOfExpr(Type *IntTy, TypeSize Size);

  /// Return an expression for the alloc size of AllocTy that is type IntTy
  const SCEV *getSizeOfExpr(Type *IntTy, Type *AllocTy);

  /// Return an expression for the store size of StoreTy that is type IntTy
  const SCEV *getStoreSizeOfExpr(Type *IntTy, Type *StoreTy);

  /// Return an expression for offsetof on the given field with type IntTy
  const SCEV *getOffsetOfExpr(Type *IntTy, StructType *STy, unsigned FieldNo);

  /// Return the SCEV object corresponding to -V.
  const SCEV *getNegativeSCEV(const SCEV *V,
                              SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap);

  /// Return the SCEV object corresponding to ~V.
  const SCEV *getNotSCEV(const SCEV *V);

  /// Return LHS-RHS.  Minus is represented in SCEV as A+B*-1.
  ///
  /// If the LHS and RHS are pointers which don't share a common base
  /// (according to getPointerBase()), this returns a SCEVCouldNotCompute.
  /// To compute the difference between two unrelated pointers, you can
  /// explicitly convert the arguments using getPtrToIntExpr(), for pointer
  /// types that support it.
  const SCEV *getMinusSCEV(const SCEV *LHS, const SCEV *RHS,
                           SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
                           unsigned Depth = 0);

  /// Compute ceil(N / D). N and D are treated as unsigned values.
  ///
  /// Since SCEV doesn't have native ceiling division, this generates a
  /// SCEV expression of the following form:
  ///
  /// umin(N, 1) + floor((N - umin(N, 1)) / D)
  ///
  /// A denominator of zero or poison is handled the same way as getUDivExpr().
  const SCEV *getUDivCeilSCEV(const SCEV *N, const SCEV *D);

  /// Return a SCEV corresponding to a conversion of the input value to the
  /// specified type.  If the type must be extended, it is zero extended.
  const SCEV *getTruncateOrZeroExtend(const SCEV *V, Type *Ty,
                                      unsigned Depth = 0);

  /// Return a SCEV corresponding to a conversion of the input value to the
  /// specified type.  If the type must be extended, it is sign extended.
  const SCEV *getTruncateOrSignExtend(const SCEV *V, Type *Ty,
                                      unsigned Depth = 0);

  /// Return a SCEV corresponding to a conversion of the input value to the
  /// specified type.  If the type must be extended, it is zero extended.  The
  /// conversion must not be narrowing.
  const SCEV *getNoopOrZeroExtend(const SCEV *V, Type *Ty);

  /// Return a SCEV corresponding to a conversion of the input value to the
  /// specified type.  If the type must be extended, it is sign extended.  The
  /// conversion must not be narrowing.
  const SCEV *getNoopOrSignExtend(const SCEV *V, Type *Ty);

  /// Return a SCEV corresponding to a conversion of the input value to the
  /// specified type. If the type must be extended, it is extended with
  /// unspecified bits. The conversion must not be narrowing.
  const SCEV *getNoopOrAnyExtend(const SCEV *V, Type *Ty);

  /// Return a SCEV corresponding to a conversion of the input value to the
  /// specified type.  The conversion must not be widening.
  const SCEV *getTruncateOrNoop(const SCEV *V, Type *Ty);

  /// Promote the operands to the wider of the types using zero-extension, and
  /// then perform a umax operation with them.
  const SCEV *getUMaxFromMismatchedTypes(const SCEV *LHS, const SCEV *RHS);

  /// Promote the operands to the wider of the types using zero-extension, and
  /// then perform a umin operation with them.
  const SCEV *getUMinFromMismatchedTypes(const SCEV *LHS, const SCEV *RHS,
                                         bool Sequential = false);

  /// Promote the operands to the wider of the types using zero-extension, and
  /// then perform a umin operation with them. N-ary function.
  const SCEV *getUMinFromMismatchedTypes(SmallVectorImpl<const SCEV *> &Ops,
                                         bool Sequential = false);

  /// Transitively follow the chain of pointer-type operands until reaching a
  /// SCEV that does not have a single pointer operand. This returns a
  /// SCEVUnknown pointer for well-formed pointer-type expressions, but corner
  /// cases do exist.
  const SCEV *getPointerBase(const SCEV *V);

  /// Compute an expression equivalent to S - getPointerBase(S).
  const SCEV *removePointerBase(const SCEV *S);

  /// Return a SCEV expression for the specified value at the specified scope
  /// in the program.  The L value specifies a loop nest to evaluate the
  /// expression at, where null is the top-level or a specified loop is
  /// immediately inside of the loop.
  ///
  /// This method can be used to compute the exit value for a variable defined
  /// in a loop by querying what the value will hold in the parent loop.
  ///
  /// In the case that a relevant loop exit value cannot be computed, the
  /// original value V is returned.
  const SCEV *getSCEVAtScope(const SCEV *S, const Loop *L);

  /// This is a convenience function which does getSCEVAtScope(getSCEV(V), L).
  const SCEV *getSCEVAtScope(Value *V, const Loop *L);

  /// Test whether entry to the loop is protected by a conditional between LHS
  /// and RHS.  This is used to help avoid max expressions in loop trip
  /// counts, and to eliminate casts.
  bool isLoopEntryGuardedByCond(const Loop *L, ICmpInst::Predicate Pred,
                                const SCEV *LHS, const SCEV *RHS);

  /// Test whether entry to the basic block is protected by a conditional
  /// between LHS and RHS.
  bool isBasicBlockEntryGuardedByCond(const BasicBlock *BB,
                                      ICmpInst::Predicate Pred, const SCEV *LHS,
                                      const SCEV *RHS);

  /// Test whether the backedge of the loop is protected by a conditional
  /// between LHS and RHS.  This is used to eliminate casts.
  bool isLoopBackedgeGuardedByCond(const Loop *L, ICmpInst::Predicate Pred,
                                   const SCEV *LHS, const SCEV *RHS);

  /// A version of getTripCountFromExitCount below which always picks an
  /// evaluation type which can not result in overflow.
  const SCEV *getTripCountFromExitCount(const SCEV *ExitCount);

  /// Convert from an "exit count" (i.e. "backedge taken count") to a "trip
  /// count".  A "trip count" is the number of times the header of the loop
  /// will execute if an exit is taken after the specified number of backedges
  /// have been taken.  (e.g. TripCount = ExitCount + 1).  Note that the
  /// expression can overflow if ExitCount = UINT_MAX.  If EvalTy is not wide
  /// enough to hold the result without overflow, result unsigned wraps with
  /// 2s-complement semantics.  ex: EC = 255 (i8), TC = 0 (i8)
  const SCEV *getTripCountFromExitCount(const SCEV *ExitCount, Type *EvalTy,
                                        const Loop *L);

  /// Returns the exact trip count of the loop if we can compute it, and
  /// the result is a small constant.  '0' is used to represent an unknown
  /// or non-constant trip count.  Note that a trip count is simply one more
  /// than the backedge taken count for the loop.
  unsigned getSmallConstantTripCount(const Loop *L);

  /// Return the exact trip count for this loop if we exit through ExitingBlock.
  /// '0' is used to represent an unknown or non-constant trip count.  Note
  /// that a trip count is simply one more than the backedge taken count for
  /// the same exit.
  /// This "trip count" assumes that control exits via ExitingBlock. More
  /// precisely, it is the number of times that control will reach ExitingBlock
  /// before taking the branch. For loops with multiple exits, it may not be
  /// the number times that the loop header executes if the loop exits
  /// prematurely via another branch.
  unsigned getSmallConstantTripCount(const Loop *L,
                                     const BasicBlock *ExitingBlock);

  /// Returns the upper bound of the loop trip count as a normal unsigned
  /// value.
  /// Returns 0 if the trip count is unknown or not constant.
  unsigned getSmallConstantMaxTripCount(const Loop *L);

  /// Returns the largest constant divisor of the trip count as a normal
  /// unsigned value, if possible. This means that the actual trip count is
  /// always a multiple of the returned value. Returns 1 if the trip count is
  /// unknown or not guaranteed to be the multiple of a constant., Will also
  /// return 1 if the trip count is very large (>= 2^32).
  /// Note that the argument is an exit count for loop L, NOT a trip count.
  unsigned getSmallConstantTripMultiple(const Loop *L,
                                        const SCEV *ExitCount);

  /// Returns the largest constant divisor of the trip count of the
  /// loop.  Will return 1 if no trip count could be computed, or if a
  /// divisor could not be found.
  unsigned getSmallConstantTripMultiple(const Loop *L);

  /// Returns the largest constant divisor of the trip count of this loop as a
  /// normal unsigned value, if possible. This means that the actual trip
  /// count is always a multiple of the returned value (don't forget the trip
  /// count could very well be zero as well!). As explained in the comments
  /// for getSmallConstantTripCount, this assumes that control exits the loop
  /// via ExitingBlock.
  unsigned getSmallConstantTripMultiple(const Loop *L,
                                        const BasicBlock *ExitingBlock);

  /// The terms "backedge taken count" and "exit count" are used
  /// interchangeably to refer to the number of times the backedge of a loop 
  /// has executed before the loop is exited.
  enum ExitCountKind {
    /// An expression exactly describing the number of times the backedge has
    /// executed when a loop is exited.
    Exact,
    /// A constant which provides an upper bound on the exact trip count.
    ConstantMaximum,
    /// An expression which provides an upper bound on the exact trip count.
    SymbolicMaximum,
  };

  /// Return the number of times the backedge executes before the given exit
  /// would be taken; if not exactly computable, return SCEVCouldNotCompute. 
  /// For a single exit loop, this value is equivelent to the result of
  /// getBackedgeTakenCount.  The loop is guaranteed to exit (via *some* exit)
  /// before the backedge is executed (ExitCount + 1) times.  Note that there
  /// is no guarantee about *which* exit is taken on the exiting iteration.
  const SCEV *getExitCount(const Loop *L, const BasicBlock *ExitingBlock,
                           ExitCountKind Kind = Exact);

  /// If the specified loop has a predictable backedge-taken count, return it,
  /// otherwise return a SCEVCouldNotCompute object. The backedge-taken count is
  /// the number of times the loop header will be branched to from within the
  /// loop, assuming there are no abnormal exists like exception throws. This is
  /// one less than the trip count of the loop, since it doesn't count the first
  /// iteration, when the header is branched to from outside the loop.
  ///
  /// Note that it is not valid to call this method on a loop without a
  /// loop-invariant backedge-taken count (see
  /// hasLoopInvariantBackedgeTakenCount).
  const SCEV *getBackedgeTakenCount(const Loop *L, ExitCountKind Kind = Exact);

  /// Similar to getBackedgeTakenCount, except it will add a set of
  /// SCEV predicates to Predicates that are required to be true in order for
  /// the answer to be correct. Predicates can be checked with run-time
  /// checks and can be used to perform loop versioning.
  const SCEV *getPredicatedBackedgeTakenCount(const Loop *L,
                                              SmallVector<const SCEVPredicate *, 4> &Predicates);

  /// When successful, this returns a SCEVConstant that is greater than or equal
  /// to (i.e. a "conservative over-approximation") of the value returend by
  /// getBackedgeTakenCount.  If such a value cannot be computed, it returns the
  /// SCEVCouldNotCompute object.
  const SCEV *getConstantMaxBackedgeTakenCount(const Loop *L) {
    return getBackedgeTakenCount(L, ConstantMaximum);
  }

  /// When successful, this returns a SCEV that is greater than or equal
  /// to (i.e. a "conservative over-approximation") of the value returend by
  /// getBackedgeTakenCount.  If such a value cannot be computed, it returns the
  /// SCEVCouldNotCompute object.
  const SCEV *getSymbolicMaxBackedgeTakenCount(const Loop *L) {
    return getBackedgeTakenCount(L, SymbolicMaximum);
  }

  /// Return true if the backedge taken count is either the value returned by
  /// getConstantMaxBackedgeTakenCount or zero.
  bool isBackedgeTakenCountMaxOrZero(const Loop *L);

  /// Return true if the specified loop has an analyzable loop-invariant
  /// backedge-taken count.
  bool hasLoopInvariantBackedgeTakenCount(const Loop *L);

  // This method should be called by the client when it made any change that
  // would invalidate SCEV's answers, and the client wants to remove all loop
  // information held internally by ScalarEvolution. This is intended to be used
  // when the alternative to forget a loop is too expensive (i.e. large loop
  // bodies).
  void forgetAllLoops();

  /// This method should be called by the client when it has changed a loop in
  /// a way that may effect ScalarEvolution's ability to compute a trip count,
  /// or if the loop is deleted.  This call is potentially expensive for large
  /// loop bodies.
  void forgetLoop(const Loop *L);

  // This method invokes forgetLoop for the outermost loop of the given loop
  // \p L, making ScalarEvolution forget about all this subtree. This needs to
  // be done whenever we make a transform that may affect the parameters of the
  // outer loop, such as exit counts for branches.
  void forgetTopmostLoop(const Loop *L);

  /// This method should be called by the client when it has changed a value
  /// in a way that may effect its value, or which may disconnect it from a
  /// def-use chain linking it to a loop.
  void forgetValue(Value *V);

  /// Called when the client has changed the disposition of values in
  /// this loop.
  ///
  /// We don't have a way to invalidate per-loop dispositions. Clear and
  /// recompute is simpler.
  void forgetLoopDispositions();

  /// Called when the client has changed the disposition of values in
  /// a loop or block.
  ///
  /// We don't have a way to invalidate per-loop/per-block dispositions. Clear
  /// and recompute is simpler.
  void forgetBlockAndLoopDispositions(Value *V = nullptr);

  /// Determine the minimum number of zero bits that S is guaranteed to end in
  /// (at every loop iteration).  It is, at the same time, the minimum number
  /// of times S is divisible by 2.  For example, given {4,+,8} it returns 2.
  /// If S is guaranteed to be 0, it returns the bitwidth of S.
  uint32_t getMinTrailingZeros(const SCEV *S);

  /// Returns the max constant multiple of S.
  APInt getConstantMultiple(const SCEV *S);

  // Returns the max constant multiple of S. If S is exactly 0, return 1.
  APInt getNonZeroConstantMultiple(const SCEV *S);

  /// Determine the unsigned range for a particular SCEV.
  /// NOTE: This returns a copy of the reference returned by getRangeRef.
  ConstantRange getUnsignedRange(const SCEV *S) {
    return getRangeRef(S, HINT_RANGE_UNSIGNED);
  }

  /// Determine the min of the unsigned range for a particular SCEV.
  APInt getUnsignedRangeMin(const SCEV *S) {
    return getRangeRef(S, HINT_RANGE_UNSIGNED).getUnsignedMin();
  }

  /// Determine the max of the unsigned range for a particular SCEV.
  APInt getUnsignedRangeMax(const SCEV *S) {
    return getRangeRef(S, HINT_RANGE_UNSIGNED).getUnsignedMax();
  }

  /// Determine the signed range for a particular SCEV.
  /// NOTE: This returns a copy of the reference returned by getRangeRef.
  ConstantRange getSignedRange(const SCEV *S) {
    return getRangeRef(S, HINT_RANGE_SIGNED);
  }

  /// Determine the min of the signed range for a particular SCEV.
  APInt getSignedRangeMin(const SCEV *S) {
    return getRangeRef(S, HINT_RANGE_SIGNED).getSignedMin();
  }

  /// Determine the max of the signed range for a particular SCEV.
  APInt getSignedRangeMax(const SCEV *S) {
    return getRangeRef(S, HINT_RANGE_SIGNED).getSignedMax();
  }

  /// Test if the given expression is known to be negative.
  bool isKnownNegative(const SCEV *S);

  /// Test if the given expression is known to be positive.
  bool isKnownPositive(const SCEV *S);

  /// Test if the given expression is known to be non-negative.
  bool isKnownNonNegative(const SCEV *S);

  /// Test if the given expression is known to be non-positive.
  bool isKnownNonPositive(const SCEV *S);

  /// Test if the given expression is known to be non-zero.
  bool isKnownNonZero(const SCEV *S);

  /// Splits SCEV expression \p S into two SCEVs. One of them is obtained from
  /// \p S by substitution of all AddRec sub-expression related to loop \p L
  /// with initial value of that SCEV. The second is obtained from \p S by
  /// substitution of all AddRec sub-expressions related to loop \p L with post
  /// increment of this AddRec in the loop \p L. In both cases all other AddRec
  /// sub-expressions (not related to \p L) remain the same.
  /// If the \p S contains non-invariant unknown SCEV the function returns
  /// CouldNotCompute SCEV in both values of std::pair.
  /// For example, for SCEV S={0, +, 1}<L1> + {0, +, 1}<L2> and loop L=L1
  /// the function returns pair:
  /// first = {0, +, 1}<L2>
  /// second = {1, +, 1}<L1> + {0, +, 1}<L2>
  /// We can see that for the first AddRec sub-expression it was replaced with
  /// 0 (initial value) for the first element and to {1, +, 1}<L1> (post
  /// increment value) for the second one. In both cases AddRec expression
  /// related to L2 remains the same.
  std::pair<const SCEV *, const SCEV *> SplitIntoInitAndPostInc(const Loop *L,
                                                                const SCEV *S);

  /// We'd like to check the predicate on every iteration of the most dominated
  /// loop between loops used in LHS and RHS.
  /// To do this we use the following list of steps:
  /// 1. Collect set S all loops on which either LHS or RHS depend.
  /// 2. If S is non-empty
  /// a. Let PD be the element of S which is dominated by all other elements.
  /// b. Let E(LHS) be value of LHS on entry of PD.
  ///    To get E(LHS), we should just take LHS and replace all AddRecs that are
  ///    attached to PD on with their entry values.
  ///    Define E(RHS) in the same way.
  /// c. Let B(LHS) be value of L on backedge of PD.
  ///    To get B(LHS), we should just take LHS and replace all AddRecs that are
  ///    attached to PD on with their backedge values.
  ///    Define B(RHS) in the same way.
  /// d. Note that E(LHS) and E(RHS) are automatically available on entry of PD,
  ///    so we can assert on that.
  /// e. Return true if isLoopEntryGuardedByCond(Pred, E(LHS), E(RHS)) &&
  ///                   isLoopBackedgeGuardedByCond(Pred, B(LHS), B(RHS))
  bool isKnownViaInduction(ICmpInst::Predicate Pred, const SCEV *LHS,
                           const SCEV *RHS);

  /// Test if the given expression is known to satisfy the condition described
  /// by Pred, LHS, and RHS.
  bool isKnownPredicate(ICmpInst::Predicate Pred, const SCEV *LHS,
                        const SCEV *RHS);

  /// Check whether the condition described by Pred, LHS, and RHS is true or
  /// false. If we know it, return the evaluation of this condition. If neither
  /// is proved, return std::nullopt.
  std::optional<bool> evaluatePredicate(ICmpInst::Predicate Pred,
                                        const SCEV *LHS, const SCEV *RHS);

  /// Test if the given expression is known to satisfy the condition described
  /// by Pred, LHS, and RHS in the given Context.
  bool isKnownPredicateAt(ICmpInst::Predicate Pred, const SCEV *LHS,
                          const SCEV *RHS, const Instruction *CtxI);

  /// Check whether the condition described by Pred, LHS, and RHS is true or
  /// false in the given \p Context. If we know it, return the evaluation of
  /// this condition. If neither is proved, return std::nullopt.
  std::optional<bool> evaluatePredicateAt(ICmpInst::Predicate Pred,
                                          const SCEV *LHS, const SCEV *RHS,
                                          const Instruction *CtxI);

  /// Test if the condition described by Pred, LHS, RHS is known to be true on
  /// every iteration of the loop of the recurrency LHS.
  bool isKnownOnEveryIteration(ICmpInst::Predicate Pred,
                               const SCEVAddRecExpr *LHS, const SCEV *RHS);

  /// Information about the number of loop iterations for which a loop exit's
  /// branch condition evaluates to the not-taken path.  This is a temporary
  /// pair of exact and max expressions that are eventually summarized in
  /// ExitNotTakenInfo and BackedgeTakenInfo.
  struct ExitLimit {
    const SCEV *ExactNotTaken; // The exit is not taken exactly this many times
    const SCEV *ConstantMaxNotTaken; // The exit is not taken at most this many
                                     // times
    const SCEV *SymbolicMaxNotTaken;

    // Not taken either exactly ConstantMaxNotTaken or zero times
    bool MaxOrZero = false;

    /// A set of predicate guards for this ExitLimit. The result is only valid
    /// if all of the predicates in \c Predicates evaluate to 'true' at
    /// run-time.
    SmallPtrSet<const SCEVPredicate *, 4> Predicates;

    void addPredicate(const SCEVPredicate *P) {
      assert(!isa<SCEVUnionPredicate>(P) && "Only add leaf predicates here!");
      Predicates.insert(P);
    }

    /// Construct either an exact exit limit from a constant, or an unknown
    /// one from a SCEVCouldNotCompute.  No other types of SCEVs are allowed
    /// as arguments and asserts enforce that internally.
    /*implicit*/ ExitLimit(const SCEV *E);

    ExitLimit(
        const SCEV *E, const SCEV *ConstantMaxNotTaken,
        const SCEV *SymbolicMaxNotTaken, bool MaxOrZero,
        ArrayRef<const SmallPtrSetImpl<const SCEVPredicate *> *> PredSetList =
            std::nullopt);

    ExitLimit(const SCEV *E, const SCEV *ConstantMaxNotTaken,
              const SCEV *SymbolicMaxNotTaken, bool MaxOrZero,
              const SmallPtrSetImpl<const SCEVPredicate *> &PredSet);

    /// Test whether this ExitLimit contains any computed information, or
    /// whether it's all SCEVCouldNotCompute values.
    bool hasAnyInfo() const {
      return !isa<SCEVCouldNotCompute>(ExactNotTaken) ||
             !isa<SCEVCouldNotCompute>(ConstantMaxNotTaken);
    }

    /// Test whether this ExitLimit contains all information.
    bool hasFullInfo() const {
      return !isa<SCEVCouldNotCompute>(ExactNotTaken);
    }
  };

  /// Compute the number of times the backedge of the specified loop will
  /// execute if its exit condition were a conditional branch of ExitCond.
  ///
  /// \p ControlsOnlyExit is true if ExitCond directly controls the only exit
  /// branch. In this case, we can assume that the loop exits only if the
  /// condition is true and can infer that failing to meet the condition prior
  /// to integer wraparound results in undefined behavior.
  ///
  /// If \p AllowPredicates is set, this call will try to use a minimal set of
  /// SCEV predicates in order to return an exact answer.
  ExitLimit computeExitLimitFromCond(const Loop *L, Value *ExitCond,
                                     bool ExitIfTrue, bool ControlsOnlyExit,
                                     bool AllowPredicates = false);

  /// A predicate is said to be monotonically increasing if may go from being
  /// false to being true as the loop iterates, but never the other way
  /// around.  A predicate is said to be monotonically decreasing if may go
  /// from being true to being false as the loop iterates, but never the other
  /// way around.
  enum MonotonicPredicateType {
    MonotonicallyIncreasing,
    MonotonicallyDecreasing
  };

  /// If, for all loop invariant X, the predicate "LHS `Pred` X" is
  /// monotonically increasing or decreasing, returns
  /// Some(MonotonicallyIncreasing) and Some(MonotonicallyDecreasing)
  /// respectively. If we could not prove either of these facts, returns
  /// std::nullopt.
  std::optional<MonotonicPredicateType>
  getMonotonicPredicateType(const SCEVAddRecExpr *LHS,
                            ICmpInst::Predicate Pred);

  struct LoopInvariantPredicate {
    ICmpInst::Predicate Pred;
    const SCEV *LHS;
    const SCEV *RHS;

    LoopInvariantPredicate(ICmpInst::Predicate Pred, const SCEV *LHS,
                           const SCEV *RHS)
        : Pred(Pred), LHS(LHS), RHS(RHS) {}
  };
  /// If the result of the predicate LHS `Pred` RHS is loop invariant with
  /// respect to L, return a LoopInvariantPredicate with LHS and RHS being
  /// invariants, available at L's entry. Otherwise, return std::nullopt.
  std::optional<LoopInvariantPredicate>
  getLoopInvariantPredicate(ICmpInst::Predicate Pred, const SCEV *LHS,
                            const SCEV *RHS, const Loop *L,
                            const Instruction *CtxI = nullptr);

  /// If the result of the predicate LHS `Pred` RHS is loop invariant with
  /// respect to L at given Context during at least first MaxIter iterations,
  /// return a LoopInvariantPredicate with LHS and RHS being invariants,
  /// available at L's entry. Otherwise, return std::nullopt. The predicate
  /// should be the loop's exit condition.
  std::optional<LoopInvariantPredicate>
  getLoopInvariantExitCondDuringFirstIterations(ICmpInst::Predicate Pred,
                                                const SCEV *LHS,
                                                const SCEV *RHS, const Loop *L,
                                                const Instruction *CtxI,
                                                const SCEV *MaxIter);

  std::optional<LoopInvariantPredicate>
  getLoopInvariantExitCondDuringFirstIterationsImpl(
      ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L,
      const Instruction *CtxI, const SCEV *MaxIter);

  /// Simplify LHS and RHS in a comparison with predicate Pred. Return true
  /// iff any changes were made. If the operands are provably equal or
  /// unequal, LHS and RHS are set to the same value and Pred is set to either
  /// ICMP_EQ or ICMP_NE.
  bool SimplifyICmpOperands(ICmpInst::Predicate &Pred, const SCEV *&LHS,
                            const SCEV *&RHS, unsigned Depth = 0);

  /// Return the "disposition" of the given SCEV with respect to the given
  /// loop.
  LoopDisposition getLoopDisposition(const SCEV *S, const Loop *L);

  /// Return true if the value of the given SCEV is unchanging in the
  /// specified loop.
  bool isLoopInvariant(const SCEV *S, const Loop *L);

  /// Determine if the SCEV can be evaluated at loop's entry. It is true if it
  /// doesn't depend on a SCEVUnknown of an instruction which is dominated by
  /// the header of loop L.
  bool isAvailableAtLoopEntry(const SCEV *S, const Loop *L);

  /// Return true if the given SCEV changes value in a known way in the
  /// specified loop.  This property being true implies that the value is
  /// variant in the loop AND that we can emit an expression to compute the
  /// value of the expression at any particular loop iteration.
  bool hasComputableLoopEvolution(const SCEV *S, const Loop *L);

  /// Return the "disposition" of the given SCEV with respect to the given
  /// block.
  BlockDisposition getBlockDisposition(const SCEV *S, const BasicBlock *BB);

  /// Return true if elements that makes up the given SCEV dominate the
  /// specified basic block.
  bool dominates(const SCEV *S, const BasicBlock *BB);

  /// Return true if elements that makes up the given SCEV properly dominate
  /// the specified basic block.
  bool properlyDominates(const SCEV *S, const BasicBlock *BB);

  /// Test whether the given SCEV has Op as a direct or indirect operand.
  bool hasOperand(const SCEV *S, const SCEV *Op) const;

  /// Return the size of an element read or written by Inst.
  const SCEV *getElementSize(Instruction *Inst);

  void print(raw_ostream &OS) const;
  void verify() const;
  bool invalidate(Function &F, const PreservedAnalyses &PA,
                  FunctionAnalysisManager::Invalidator &Inv);

  /// Return the DataLayout associated with the module this SCEV instance is
  /// operating on.
  const DataLayout &getDataLayout() const {
    return F.getParent()->getDataLayout();
  }

  const SCEVPredicate *getEqualPredicate(const SCEV *LHS, const SCEV *RHS);
  const SCEVPredicate *getComparePredicate(ICmpInst::Predicate Pred,
                                           const SCEV *LHS, const SCEV *RHS);

  const SCEVPredicate *
  getWrapPredicate(const SCEVAddRecExpr *AR,
                   SCEVWrapPredicate::IncrementWrapFlags AddedFlags);

  /// Re-writes the SCEV according to the Predicates in \p A.
  const SCEV *rewriteUsingPredicate(const SCEV *S, const Loop *L,
                                    const SCEVPredicate &A);
  /// Tries to convert the \p S expression to an AddRec expression,
  /// adding additional predicates to \p Preds as required.
  const SCEVAddRecExpr *convertSCEVToAddRecWithPredicates(
      const SCEV *S, const Loop *L,
      SmallPtrSetImpl<const SCEVPredicate *> &Preds);

  /// Compute \p LHS - \p RHS and returns the result as an APInt if it is a
  /// constant, and std::nullopt if it isn't.
  ///
  /// This is intended to be a cheaper version of getMinusSCEV.  We can be
  /// frugal here since we just bail out of actually constructing and
  /// canonicalizing an expression in the cases where the result isn't going
  /// to be a constant.
  std::optional<APInt> computeConstantDifference(const SCEV *LHS,
                                                 const SCEV *RHS);

  /// Update no-wrap flags of an AddRec. This may drop the cached info about
  /// this AddRec (such as range info) in case if new flags may potentially
  /// sharpen it.
  void setNoWrapFlags(SCEVAddRecExpr *AddRec, SCEV::NoWrapFlags Flags);

  /// Try to apply information from loop guards for \p L to \p Expr.
  const SCEV *applyLoopGuards(const SCEV *Expr, const Loop *L);

  /// Return true if the loop has no abnormal exits. That is, if the loop
  /// is not infinite, it must exit through an explicit edge in the CFG.
  /// (As opposed to either a) throwing out of the function or b) entering a
  /// well defined infinite loop in some callee.)
  bool loopHasNoAbnormalExits(const Loop *L) {
    return getLoopProperties(L).HasNoAbnormalExits;
  }

  /// Return true if this loop is finite by assumption.  That is,
  /// to be infinite, it must also be undefined.
  bool loopIsFiniteByAssumption(const Loop *L);

  class FoldID {
    const SCEV *Op = nullptr;
    const Type *Ty = nullptr;
    unsigned short C;

  public:
    FoldID(SCEVTypes C, const SCEV *Op, const Type *Ty) : Op(Op), Ty(Ty), C(C) {
      assert(Op);
      assert(Ty);
    }

    FoldID(unsigned short C) : C(C) {}

    unsigned computeHash() const {
      return detail::combineHashValue(
          C, detail::combineHashValue(reinterpret_cast<uintptr_t>(Op),
                                      reinterpret_cast<uintptr_t>(Ty)));
    }

    bool operator==(const FoldID &RHS) const {
      return std::tie(Op, Ty, C) == std::tie(RHS.Op, RHS.Ty, RHS.C);
    }
  };

private:
  /// A CallbackVH to arrange for ScalarEvolution to be notified whenever a
  /// Value is deleted.
  class SCEVCallbackVH final : public CallbackVH {
    ScalarEvolution *SE;

    void deleted() override;
    void allUsesReplacedWith(Value *New) override;

  public:
    SCEVCallbackVH(Value *V, ScalarEvolution *SE = nullptr);
  };

  friend class SCEVCallbackVH;
  friend class SCEVExpander;
  friend class SCEVUnknown;

  /// The function we are analyzing.
  Function &F;

  /// Does the module have any calls to the llvm.experimental.guard intrinsic
  /// at all?  If this is false, we avoid doing work that will only help if
  /// thare are guards present in the IR.
  bool HasGuards;

  /// The target library information for the target we are targeting.
  TargetLibraryInfo &TLI;

  /// The tracker for \@llvm.assume intrinsics in this function.
  AssumptionCache &AC;

  /// The dominator tree.
  DominatorTree &DT;

  /// The loop information for the function we are currently analyzing.
  LoopInfo &LI;

  /// This SCEV is used to represent unknown trip counts and things.
  std::unique_ptr<SCEVCouldNotCompute> CouldNotCompute;

  /// The type for HasRecMap.
  using HasRecMapType = DenseMap<const SCEV *, bool>;

  /// This is a cache to record whether a SCEV contains any scAddRecExpr.
  HasRecMapType HasRecMap;

  /// The type for ExprValueMap.
  using ValueSetVector = SmallSetVector<Value *, 4>;
  using ExprValueMapType = DenseMap<const SCEV *, ValueSetVector>;

  /// ExprValueMap -- This map records the original values from which
  /// the SCEV expr is generated from.
  ExprValueMapType ExprValueMap;

  /// The type for ValueExprMap.
  using ValueExprMapType =
      DenseMap<SCEVCallbackVH, const SCEV *, DenseMapInfo<Value *>>;

  /// This is a cache of the values we have analyzed so far.
  ValueExprMapType ValueExprMap;

  /// This is a cache for expressions that got folded to a different existing
  /// SCEV.
  DenseMap<FoldID, const SCEV *> FoldCache;
  DenseMap<const SCEV *, SmallVector<FoldID, 2>> FoldCacheUser;

  /// Mark predicate values currently being processed by isImpliedCond.
  SmallPtrSet<const Value *, 6> PendingLoopPredicates;

  /// Mark SCEVUnknown Phis currently being processed by getRangeRef.
  SmallPtrSet<const PHINode *, 6> PendingPhiRanges;

  /// Mark SCEVUnknown Phis currently being processed by getRangeRefIter.
  SmallPtrSet<const PHINode *, 6> PendingPhiRangesIter;

  // Mark SCEVUnknown Phis currently being processed by isImpliedViaMerge.
  SmallPtrSet<const PHINode *, 6> PendingMerges;

  /// Set to true by isLoopBackedgeGuardedByCond when we're walking the set of
  /// conditions dominating the backedge of a loop.
  bool WalkingBEDominatingConds = false;

  /// Set to true by isKnownPredicateViaSplitting when we're trying to prove a
  /// predicate by splitting it into a set of independent predicates.
  bool ProvingSplitPredicate = false;

  /// Memoized values for the getConstantMultiple
  DenseMap<const SCEV *, APInt> ConstantMultipleCache;

  /// Return the Value set from which the SCEV expr is generated.
  ArrayRef<Value *> getSCEVValues(const SCEV *S);

  /// Private helper method for the getConstantMultiple method.
  APInt getConstantMultipleImpl(const SCEV *S);

  /// Information about the number of times a particular loop exit may be
  /// reached before exiting the loop.
  struct ExitNotTakenInfo {
    PoisoningVH<BasicBlock> ExitingBlock;
    const SCEV *ExactNotTaken;
    const SCEV *ConstantMaxNotTaken;
    const SCEV *SymbolicMaxNotTaken;
    SmallPtrSet<const SCEVPredicate *, 4> Predicates;

    explicit ExitNotTakenInfo(
        PoisoningVH<BasicBlock> ExitingBlock, const SCEV *ExactNotTaken,
        const SCEV *ConstantMaxNotTaken, const SCEV *SymbolicMaxNotTaken,
        const SmallPtrSet<const SCEVPredicate *, 4> &Predicates)
        : ExitingBlock(ExitingBlock), ExactNotTaken(ExactNotTaken),
          ConstantMaxNotTaken(ConstantMaxNotTaken),
          SymbolicMaxNotTaken(SymbolicMaxNotTaken), Predicates(Predicates) {}

    bool hasAlwaysTruePredicate() const {
      return Predicates.empty();
    }
  };

  /// Information about the backedge-taken count of a loop. This currently
  /// includes an exact count and a maximum count.
  ///
  class BackedgeTakenInfo {
    friend class ScalarEvolution;

    /// A list of computable exits and their not-taken counts.  Loops almost
    /// never have more than one computable exit.
    SmallVector<ExitNotTakenInfo, 1> ExitNotTaken;

    /// Expression indicating the least constant maximum backedge-taken count of
    /// the loop that is known, or a SCEVCouldNotCompute. This expression is
    /// only valid if the redicates associated with all loop exits are true.
    const SCEV *ConstantMax = nullptr;

    /// Indicating if \c ExitNotTaken has an element for every exiting block in
    /// the loop.
    bool IsComplete = false;

    /// Expression indicating the least maximum backedge-taken count of the loop
    /// that is known, or a SCEVCouldNotCompute. Lazily computed on first query.
    const SCEV *SymbolicMax = nullptr;

    /// True iff the backedge is taken either exactly Max or zero times.
    bool MaxOrZero = false;

    bool isComplete() const { return IsComplete; }
    const SCEV *getConstantMax() const { return ConstantMax; }

  public:
    BackedgeTakenInfo() = default;
    BackedgeTakenInfo(BackedgeTakenInfo &&) = default;
    BackedgeTakenInfo &operator=(BackedgeTakenInfo &&) = default;

    using EdgeExitInfo = std::pair<BasicBlock *, ExitLimit>;

    /// Initialize BackedgeTakenInfo from a list of exact exit counts.
    BackedgeTakenInfo(ArrayRef<EdgeExitInfo> ExitCounts, bool IsComplete,
                      const SCEV *ConstantMax, bool MaxOrZero);

    /// Test whether this BackedgeTakenInfo contains any computed information,
    /// or whether it's all SCEVCouldNotCompute values.
    bool hasAnyInfo() const {
      return !ExitNotTaken.empty() ||
             !isa<SCEVCouldNotCompute>(getConstantMax());
    }

    /// Test whether this BackedgeTakenInfo contains complete information.
    bool hasFullInfo() const { return isComplete(); }

    /// Return an expression indicating the exact *backedge-taken*
    /// count of the loop if it is known or SCEVCouldNotCompute
    /// otherwise.  If execution makes it to the backedge on every
    /// iteration (i.e. there are no abnormal exists like exception
    /// throws and thread exits) then this is the number of times the
    /// loop header will execute minus one.
    ///
    /// If the SCEV predicate associated with the answer can be different
    /// from AlwaysTrue, we must add a (non null) Predicates argument.
    /// The SCEV predicate associated with the answer will be added to
    /// Predicates. A run-time check needs to be emitted for the SCEV
    /// predicate in order for the answer to be valid.
    ///
    /// Note that we should always know if we need to pass a predicate
    /// argument or not from the way the ExitCounts vector was computed.
    /// If we allowed SCEV predicates to be generated when populating this
    /// vector, this information can contain them and therefore a
    /// SCEVPredicate argument should be added to getExact.
    const SCEV *getExact(const Loop *L, ScalarEvolution *SE,
                         SmallVector<const SCEVPredicate *, 4> *Predicates = nullptr) const;

    /// Return the number of times this loop exit may fall through to the back
    /// edge, or SCEVCouldNotCompute. The loop is guaranteed not to exit via
    /// this block before this number of iterations, but may exit via another
    /// block.
    const SCEV *getExact(const BasicBlock *ExitingBlock,
                         ScalarEvolution *SE) const;

    /// Get the constant max backedge taken count for the loop.
    const SCEV *getConstantMax(ScalarEvolution *SE) const;

    /// Get the constant max backedge taken count for the particular loop exit.
    const SCEV *getConstantMax(const BasicBlock *ExitingBlock,
                               ScalarEvolution *SE) const;

    /// Get the symbolic max backedge taken count for the loop.
    const SCEV *getSymbolicMax(const Loop *L, ScalarEvolution *SE);

    /// Get the symbolic max backedge taken count for the particular loop exit.
    const SCEV *getSymbolicMax(const BasicBlock *ExitingBlock,
                               ScalarEvolution *SE) const;

    /// Return true if the number of times this backedge is taken is either the
    /// value returned by getConstantMax or zero.
    bool isConstantMaxOrZero(ScalarEvolution *SE) const;
  };

  /// Cache the backedge-taken count of the loops for this function as they
  /// are computed.
  DenseMap<const Loop *, BackedgeTakenInfo> BackedgeTakenCounts;

  /// Cache the predicated backedge-taken count of the loops for this
  /// function as they are computed.
  DenseMap<const Loop *, BackedgeTakenInfo> PredicatedBackedgeTakenCounts;

  /// Loops whose backedge taken counts directly use this non-constant SCEV.
  DenseMap<const SCEV *, SmallPtrSet<PointerIntPair<const Loop *, 1, bool>, 4>>
      BECountUsers;

  /// This map contains entries for all of the PHI instructions that we
  /// attempt to compute constant evolutions for.  This allows us to avoid
  /// potentially expensive recomputation of these properties.  An instruction
  /// maps to null if we are unable to compute its exit value.
  DenseMap<PHINode *, Constant *> ConstantEvolutionLoopExitValue;

  /// This map contains entries for all the expressions that we attempt to
  /// compute getSCEVAtScope information for, which can be expensive in
  /// extreme cases.
  DenseMap<const SCEV *, SmallVector<std::pair<const Loop *, const SCEV *>, 2>>
      ValuesAtScopes;

  /// Reverse map for invalidation purposes: Stores of which SCEV and which
  /// loop this is the value-at-scope of.
  DenseMap<const SCEV *, SmallVector<std::pair<const Loop *, const SCEV *>, 2>>
      ValuesAtScopesUsers;

  /// Memoized computeLoopDisposition results.
  DenseMap<const SCEV *,
           SmallVector<PointerIntPair<const Loop *, 2, LoopDisposition>, 2>>
      LoopDispositions;

  struct LoopProperties {
    /// Set to true if the loop contains no instruction that can abnormally exit
    /// the loop (i.e. via throwing an exception, by terminating the thread
    /// cleanly or by infinite looping in a called function).  Strictly
    /// speaking, the last one is not leaving the loop, but is identical to
    /// leaving the loop for reasoning about undefined behavior.
    bool HasNoAbnormalExits;

    /// Set to true if the loop contains no instruction that can have side
    /// effects (i.e. via throwing an exception, volatile or atomic access).
    bool HasNoSideEffects;
  };

  /// Cache for \c getLoopProperties.
  DenseMap<const Loop *, LoopProperties> LoopPropertiesCache;

  /// Return a \c LoopProperties instance for \p L, creating one if necessary.
  LoopProperties getLoopProperties(const Loop *L);

  bool loopHasNoSideEffects(const Loop *L) {
    return getLoopProperties(L).HasNoSideEffects;
  }

  /// Compute a LoopDisposition value.
  LoopDisposition computeLoopDisposition(const SCEV *S, const Loop *L);

  /// Memoized computeBlockDisposition results.
  DenseMap<
      const SCEV *,
      SmallVector<PointerIntPair<const BasicBlock *, 2, BlockDisposition>, 2>>
      BlockDispositions;

  /// Compute a BlockDisposition value.
  BlockDisposition computeBlockDisposition(const SCEV *S, const BasicBlock *BB);

  /// Stores all SCEV that use a given SCEV as its direct operand.
  DenseMap<const SCEV *, SmallPtrSet<const SCEV *, 8> > SCEVUsers;

  /// Memoized results from getRange
  DenseMap<const SCEV *, ConstantRange> UnsignedRanges;

  /// Memoized results from getRange
  DenseMap<const SCEV *, ConstantRange> SignedRanges;

  /// Used to parameterize getRange
  enum RangeSignHint { HINT_RANGE_UNSIGNED, HINT_RANGE_SIGNED };

  /// Set the memoized range for the given SCEV.
  const ConstantRange &setRange(const SCEV *S, RangeSignHint Hint,
                                ConstantRange CR) {
    DenseMap<const SCEV *, ConstantRange> &Cache =
        Hint == HINT_RANGE_UNSIGNED ? UnsignedRanges : SignedRanges;

    auto Pair = Cache.try_emplace(S, std::move(CR));
    if (!Pair.second)
      Pair.first->second = std::move(CR);
    return Pair.first->second;
  }

  /// Determine the range for a particular SCEV.
  /// NOTE: This returns a reference to an entry in a cache. It must be
  /// copied if its needed for longer.
  const ConstantRange &getRangeRef(const SCEV *S, RangeSignHint Hint,
                                   unsigned Depth = 0);

  /// Determine the range for a particular SCEV, but evaluates ranges for
  /// operands iteratively first.
  const ConstantRange &getRangeRefIter(const SCEV *S, RangeSignHint Hint);

  /// Determines the range for the affine SCEVAddRecExpr {\p Start,+,\p Step}.
  /// Helper for \c getRange.
  ConstantRange getRangeForAffineAR(const SCEV *Start, const SCEV *Step,
                                    const APInt &MaxBECount);

  /// Determines the range for the affine non-self-wrapping SCEVAddRecExpr {\p
  /// Start,+,\p Step}<nw>.
  ConstantRange getRangeForAffineNoSelfWrappingAR(const SCEVAddRecExpr *AddRec,
                                                  const SCEV *MaxBECount,
                                                  unsigned BitWidth,
                                                  RangeSignHint SignHint);

  /// Try to compute a range for the affine SCEVAddRecExpr {\p Start,+,\p
  /// Step} by "factoring out" a ternary expression from the add recurrence.
  /// Helper called by \c getRange.
  ConstantRange getRangeViaFactoring(const SCEV *Start, const SCEV *Step,
                                     const APInt &MaxBECount);

  /// If the unknown expression U corresponds to a simple recurrence, return
  /// a constant range which represents the entire recurrence.  Note that
  /// *add* recurrences with loop invariant steps aren't represented by
  /// SCEVUnknowns and thus don't use this mechanism.
  ConstantRange getRangeForUnknownRecurrence(const SCEVUnknown *U);

  /// We know that there is no SCEV for the specified value.  Analyze the
  /// expression recursively.
  const SCEV *createSCEV(Value *V);

  /// We know that there is no SCEV for the specified value. Create a new SCEV
  /// for \p V iteratively.
  const SCEV *createSCEVIter(Value *V);
  /// Collect operands of \p V for which SCEV expressions should be constructed
  /// first. Returns a SCEV directly if it can be constructed trivially for \p
  /// V.
  const SCEV *getOperandsToCreate(Value *V, SmallVectorImpl<Value *> &Ops);

  /// Provide the special handling we need to analyze PHI SCEVs.
  const SCEV *createNodeForPHI(PHINode *PN);

  /// Helper function called from createNodeForPHI.
  const SCEV *createAddRecFromPHI(PHINode *PN);

  /// A helper function for createAddRecFromPHI to handle simple cases.
  const SCEV *createSimpleAffineAddRec(PHINode *PN, Value *BEValueV,
                                            Value *StartValueV);

  /// Helper function called from createNodeForPHI.
  const SCEV *createNodeFromSelectLikePHI(PHINode *PN);

  /// Provide special handling for a select-like instruction (currently this
  /// is either a select instruction or a phi node).  \p Ty is the type of the
  /// instruction being processed, that is assumed equivalent to
  /// "Cond ? TrueVal : FalseVal".
  std::optional<const SCEV *>
  createNodeForSelectOrPHIInstWithICmpInstCond(Type *Ty, ICmpInst *Cond,
                                               Value *TrueVal, Value *FalseVal);

  /// See if we can model this select-like instruction via umin_seq expression.
  const SCEV *createNodeForSelectOrPHIViaUMinSeq(Value *I, Value *Cond,
                                                 Value *TrueVal,
                                                 Value *FalseVal);

  /// Given a value \p V, which is a select-like instruction (currently this is
  /// either a select instruction or a phi node), which is assumed equivalent to
  ///   Cond ? TrueVal : FalseVal
  /// see if we can model it as a SCEV expression.
  const SCEV *createNodeForSelectOrPHI(Value *V, Value *Cond, Value *TrueVal,
                                       Value *FalseVal);

  /// Provide the special handling we need to analyze GEP SCEVs.
  const SCEV *createNodeForGEP(GEPOperator *GEP);

  /// Implementation code for getSCEVAtScope; called at most once for each
  /// SCEV+Loop pair.
  const SCEV *computeSCEVAtScope(const SCEV *S, const Loop *L);

  /// Return the BackedgeTakenInfo for the given loop, lazily computing new
  /// values if the loop hasn't been analyzed yet. The returned result is
  /// guaranteed not to be predicated.
  BackedgeTakenInfo &getBackedgeTakenInfo(const Loop *L);

  /// Similar to getBackedgeTakenInfo, but will add predicates as required
  /// with the purpose of returning complete information.
  const BackedgeTakenInfo &getPredicatedBackedgeTakenInfo(const Loop *L);

  /// Compute the number of times the specified loop will iterate.
  /// If AllowPredicates is set, we will create new SCEV predicates as
  /// necessary in order to return an exact answer.
  BackedgeTakenInfo computeBackedgeTakenCount(const Loop *L,
                                              bool AllowPredicates = false);

  /// Compute the number of times the backedge of the specified loop will
  /// execute if it exits via the specified block. If AllowPredicates is set,
  /// this call will try to use a minimal set of SCEV predicates in order to
  /// return an exact answer.
  ExitLimit computeExitLimit(const Loop *L, BasicBlock *ExitingBlock,
                             bool AllowPredicates = false);

  /// Return a symbolic upper bound for the backedge taken count of the loop.
  /// This is more general than getConstantMaxBackedgeTakenCount as it returns
  /// an arbitrary expression as opposed to only constants.
  const SCEV *computeSymbolicMaxBackedgeTakenCount(const Loop *L);

  // Helper functions for computeExitLimitFromCond to avoid exponential time
  // complexity.

  class ExitLimitCache {
    // It may look like we need key on the whole (L, ExitIfTrue,
    // ControlsOnlyExit, AllowPredicates) tuple, but recursive calls to
    // computeExitLimitFromCondCached from computeExitLimitFromCondImpl only
    // vary the in \c ExitCond and \c ControlsOnlyExit parameters.  We remember
    // the initial values of the other values to assert our assumption.
    SmallDenseMap<PointerIntPair<Value *, 1>, ExitLimit> TripCountMap;

    const Loop *L;
    bool ExitIfTrue;
    bool AllowPredicates;

  public:
    ExitLimitCache(const Loop *L, bool ExitIfTrue, bool AllowPredicates)
        : L(L), ExitIfTrue(ExitIfTrue), AllowPredicates(AllowPredicates) {}

    std::optional<ExitLimit> find(const Loop *L, Value *ExitCond,
                                  bool ExitIfTrue, bool ControlsOnlyExit,
                                  bool AllowPredicates);

    void insert(const Loop *L, Value *ExitCond, bool ExitIfTrue,
                bool ControlsOnlyExit, bool AllowPredicates,
                const ExitLimit &EL);
  };

  using ExitLimitCacheTy = ExitLimitCache;

  ExitLimit computeExitLimitFromCondCached(ExitLimitCacheTy &Cache,
                                           const Loop *L, Value *ExitCond,
                                           bool ExitIfTrue,
                                           bool ControlsOnlyExit,
                                           bool AllowPredicates);
  ExitLimit computeExitLimitFromCondImpl(ExitLimitCacheTy &Cache, const Loop *L,
                                         Value *ExitCond, bool ExitIfTrue,
                                         bool ControlsOnlyExit,
                                         bool AllowPredicates);
  std::optional<ScalarEvolution::ExitLimit> computeExitLimitFromCondFromBinOp(
      ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue,
      bool ControlsOnlyExit, bool AllowPredicates);

  /// Compute the number of times the backedge of the specified loop will
  /// execute if its exit condition were a conditional branch of the ICmpInst
  /// ExitCond and ExitIfTrue. If AllowPredicates is set, this call will try
  /// to use a minimal set of SCEV predicates in order to return an exact
  /// answer.
  ExitLimit computeExitLimitFromICmp(const Loop *L, ICmpInst *ExitCond,
                                     bool ExitIfTrue,
                                     bool IsSubExpr,
                                     bool AllowPredicates = false);

  /// Variant of previous which takes the components representing an ICmp
  /// as opposed to the ICmpInst itself.  Note that the prior version can
  /// return more precise results in some cases and is preferred when caller
  /// has a materialized ICmp.
  ExitLimit computeExitLimitFromICmp(const Loop *L, ICmpInst::Predicate Pred,
                                     const SCEV *LHS, const SCEV *RHS,
                                     bool IsSubExpr,
                                     bool AllowPredicates = false);

  /// Compute the number of times the backedge of the specified loop will
  /// execute if its exit condition were a switch with a single exiting case
  /// to ExitingBB.
  ExitLimit computeExitLimitFromSingleExitSwitch(const Loop *L,
                                                 SwitchInst *Switch,
                                                 BasicBlock *ExitingBB,
                                                 bool IsSubExpr);

  /// Compute the exit limit of a loop that is controlled by a
  /// "(IV >> 1) != 0" type comparison.  We cannot compute the exact trip
  /// count in these cases (since SCEV has no way of expressing them), but we
  /// can still sometimes compute an upper bound.
  ///
  /// Return an ExitLimit for a loop whose backedge is guarded by `LHS Pred
  /// RHS`.
  ExitLimit computeShiftCompareExitLimit(Value *LHS, Value *RHS, const Loop *L,
                                         ICmpInst::Predicate Pred);

  /// If the loop is known to execute a constant number of times (the
  /// condition evolves only from constants), try to evaluate a few iterations
  /// of the loop until we get the exit condition gets a value of ExitWhen
  /// (true or false).  If we cannot evaluate the exit count of the loop,
  /// return CouldNotCompute.
  const SCEV *computeExitCountExhaustively(const Loop *L, Value *Cond,
                                           bool ExitWhen);

  /// Return the number of times an exit condition comparing the specified
  /// value to zero will execute.  If not computable, return CouldNotCompute.
  /// If AllowPredicates is set, this call will try to use a minimal set of
  /// SCEV predicates in order to return an exact answer.
  ExitLimit howFarToZero(const SCEV *V, const Loop *L, bool IsSubExpr,
                         bool AllowPredicates = false);

  /// Return the number of times an exit condition checking the specified
  /// value for nonzero will execute.  If not computable, return
  /// CouldNotCompute.
  ExitLimit howFarToNonZero(const SCEV *V, const Loop *L);

  /// Return the number of times an exit condition containing the specified
  /// less-than comparison will execute.  If not computable, return
  /// CouldNotCompute.
  ///
  /// \p isSigned specifies whether the less-than is signed.
  ///
  /// \p ControlsOnlyExit is true when the LHS < RHS condition directly controls
  /// the branch (loops exits only if condition is true). In this case, we can
  /// use NoWrapFlags to skip overflow checks.
  ///
  /// If \p AllowPredicates is set, this call will try to use a minimal set of
  /// SCEV predicates in order to return an exact answer.
  ExitLimit howManyLessThans(const SCEV *LHS, const SCEV *RHS, const Loop *L,
                             bool isSigned, bool ControlsOnlyExit,
                             bool AllowPredicates = false);

  ExitLimit howManyGreaterThans(const SCEV *LHS, const SCEV *RHS, const Loop *L,
                                bool isSigned, bool IsSubExpr,
                                bool AllowPredicates = false);

  /// Return a predecessor of BB (which may not be an immediate predecessor)
  /// which has exactly one successor from which BB is reachable, or null if
  /// no such block is found.
  std::pair<const BasicBlock *, const BasicBlock *>
  getPredecessorWithUniqueSuccessorForBB(const BasicBlock *BB) const;

  /// Test whether the condition described by Pred, LHS, and RHS is true
  /// whenever the given FoundCondValue value evaluates to true in given
  /// Context. If Context is nullptr, then the found predicate is true
  /// everywhere. LHS and FoundLHS may have different type width.
  bool isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS,
                     const Value *FoundCondValue, bool Inverse,
                     const Instruction *Context = nullptr);

  /// Test whether the condition described by Pred, LHS, and RHS is true
  /// whenever the given FoundCondValue value evaluates to true in given
  /// Context. If Context is nullptr, then the found predicate is true
  /// everywhere. LHS and FoundLHS must have same type width.
  bool isImpliedCondBalancedTypes(ICmpInst::Predicate Pred, const SCEV *LHS,
                                  const SCEV *RHS,
                                  ICmpInst::Predicate FoundPred,
                                  const SCEV *FoundLHS, const SCEV *FoundRHS,
                                  const Instruction *CtxI);

  /// Test whether the condition described by Pred, LHS, and RHS is true
  /// whenever the condition described by FoundPred, FoundLHS, FoundRHS is
  /// true in given Context. If Context is nullptr, then the found predicate is
  /// true everywhere.
  bool isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS,
                     ICmpInst::Predicate FoundPred, const SCEV *FoundLHS,
                     const SCEV *FoundRHS,
                     const Instruction *Context = nullptr);

  /// Test whether the condition described by Pred, LHS, and RHS is true
  /// whenever the condition described by Pred, FoundLHS, and FoundRHS is
  /// true in given Context. If Context is nullptr, then the found predicate is
  /// true everywhere.
  bool isImpliedCondOperands(ICmpInst::Predicate Pred, const SCEV *LHS,
                             const SCEV *RHS, const SCEV *FoundLHS,
                             const SCEV *FoundRHS,
                             const Instruction *Context = nullptr);

  /// Test whether the condition described by Pred, LHS, and RHS is true
  /// whenever the condition described by Pred, FoundLHS, and FoundRHS is
  /// true. Here LHS is an operation that includes FoundLHS as one of its
  /// arguments.
  bool isImpliedViaOperations(ICmpInst::Predicate Pred,
                              const SCEV *LHS, const SCEV *RHS,
                              const SCEV *FoundLHS, const SCEV *FoundRHS,
                              unsigned Depth = 0);

  /// Test whether the condition described by Pred, LHS, and RHS is true.
  /// Use only simple non-recursive types of checks, such as range analysis etc.
  bool isKnownViaNonRecursiveReasoning(ICmpInst::Predicate Pred,
                                       const SCEV *LHS, const SCEV *RHS);

  /// Test whether the condition described by Pred, LHS, and RHS is true
  /// whenever the condition described by Pred, FoundLHS, and FoundRHS is
  /// true.
  bool isImpliedCondOperandsHelper(ICmpInst::Predicate Pred, const SCEV *LHS,
                                   const SCEV *RHS, const SCEV *FoundLHS,
                                   const SCEV *FoundRHS);

  /// Test whether the condition described by Pred, LHS, and RHS is true
  /// whenever the condition described by Pred, FoundLHS, and FoundRHS is
  /// true.  Utility function used by isImpliedCondOperands.  Tries to get
  /// cases like "X `sgt` 0 => X - 1 `sgt` -1".
  bool isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred, const SCEV *LHS,
                                      const SCEV *RHS, const SCEV *FoundLHS,
                                      const SCEV *FoundRHS);

  /// Return true if the condition denoted by \p LHS \p Pred \p RHS is implied
  /// by a call to @llvm.experimental.guard in \p BB.
  bool isImpliedViaGuard(const BasicBlock *BB, ICmpInst::Predicate Pred,
                         const SCEV *LHS, const SCEV *RHS);

  /// Test whether the condition described by Pred, LHS, and RHS is true
  /// whenever the condition described by Pred, FoundLHS, and FoundRHS is
  /// true.
  ///
  /// This routine tries to rule out certain kinds of integer overflow, and
  /// then tries to reason about arithmetic properties of the predicates.
  bool isImpliedCondOperandsViaNoOverflow(ICmpInst::Predicate Pred,
                                          const SCEV *LHS, const SCEV *RHS,
                                          const SCEV *FoundLHS,
                                          const SCEV *FoundRHS);

  /// Test whether the condition described by Pred, LHS, and RHS is true
  /// whenever the condition described by Pred, FoundLHS, and FoundRHS is
  /// true.
  ///
  /// This routine tries to weaken the known condition basing on fact that
  /// FoundLHS is an AddRec.
  bool isImpliedCondOperandsViaAddRecStart(ICmpInst::Predicate Pred,
                                           const SCEV *LHS, const SCEV *RHS,
                                           const SCEV *FoundLHS,
                                           const SCEV *FoundRHS,
                                           const Instruction *CtxI);

  /// Test whether the condition described by Pred, LHS, and RHS is true
  /// whenever the condition described by Pred, FoundLHS, and FoundRHS is
  /// true.
  ///
  /// This routine tries to figure out predicate for Phis which are SCEVUnknown
  /// if it is true for every possible incoming value from their respective
  /// basic blocks.
  bool isImpliedViaMerge(ICmpInst::Predicate Pred,
                         const SCEV *LHS, const SCEV *RHS,
                         const SCEV *FoundLHS, const SCEV *FoundRHS,
                         unsigned Depth);

  /// Test whether the condition described by Pred, LHS, and RHS is true
  /// whenever the condition described by Pred, FoundLHS, and FoundRHS is
  /// true.
  ///
  /// This routine tries to reason about shifts.
  bool isImpliedCondOperandsViaShift(ICmpInst::Predicate Pred, const SCEV *LHS,
                                     const SCEV *RHS, const SCEV *FoundLHS,
                                     const SCEV *FoundRHS);

  /// If we know that the specified Phi is in the header of its containing
  /// loop, we know the loop executes a constant number of times, and the PHI
  /// node is just a recurrence involving constants, fold it.
  Constant *getConstantEvolutionLoopExitValue(PHINode *PN, const APInt &BEs,
                                              const Loop *L);

  /// Test if the given expression is known to satisfy the condition described
  /// by Pred and the known constant ranges of LHS and RHS.
  bool isKnownPredicateViaConstantRanges(ICmpInst::Predicate Pred,
                                         const SCEV *LHS, const SCEV *RHS);

  /// Try to prove the condition described by "LHS Pred RHS" by ruling out
  /// integer overflow.
  ///
  /// For instance, this will return true for "A s< (A + C)<nsw>" if C is
  /// positive.
  bool isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred, const SCEV *LHS,
                                     const SCEV *RHS);

  /// Try to split Pred LHS RHS into logical conjunctions (and's) and try to
  /// prove them individually.
  bool isKnownPredicateViaSplitting(ICmpInst::Predicate Pred, const SCEV *LHS,
                                    const SCEV *RHS);

  /// Try to match the Expr as "(L + R)<Flags>".
  bool splitBinaryAdd(const SCEV *Expr, const SCEV *&L, const SCEV *&R,
                      SCEV::NoWrapFlags &Flags);

  /// Forget predicated/non-predicated backedge taken counts for the given loop.
  void forgetBackedgeTakenCounts(const Loop *L, bool Predicated);

  /// Drop memoized information for all \p SCEVs.
  void forgetMemoizedResults(ArrayRef<const SCEV *> SCEVs);

  /// Helper for forgetMemoizedResults.
  void forgetMemoizedResultsImpl(const SCEV *S);

  /// Iterate over instructions in \p Worklist and their users. Erase entries
  /// from ValueExprMap and collect SCEV expressions in \p ToForget
  void visitAndClearUsers(SmallVectorImpl<Instruction *> &Worklist,
                          SmallPtrSetImpl<Instruction *> &Visited,
                          SmallVectorImpl<const SCEV *> &ToForget);

  /// Erase Value from ValueExprMap and ExprValueMap.
  void eraseValueFromMap(Value *V);

  /// Insert V to S mapping into ValueExprMap and ExprValueMap.
  void insertValueToMap(Value *V, const SCEV *S);

  /// Return false iff given SCEV contains a SCEVUnknown with NULL value-
  /// pointer.
  bool checkValidity(const SCEV *S) const;

  /// Return true if `ExtendOpTy`({`Start`,+,`Step`}) can be proved to be
  /// equal to {`ExtendOpTy`(`Start`),+,`ExtendOpTy`(`Step`)}.  This is
  /// equivalent to proving no signed (resp. unsigned) wrap in
  /// {`Start`,+,`Step`} if `ExtendOpTy` is `SCEVSignExtendExpr`
  /// (resp. `SCEVZeroExtendExpr`).
  template <typename ExtendOpTy>
  bool proveNoWrapByVaryingStart(const SCEV *Start, const SCEV *Step,
                                 const Loop *L);

  /// Try to prove NSW or NUW on \p AR relying on ConstantRange manipulation.
  SCEV::NoWrapFlags proveNoWrapViaConstantRanges(const SCEVAddRecExpr *AR);

  /// Try to prove NSW on \p AR by proving facts about conditions known  on
  /// entry and backedge.
  SCEV::NoWrapFlags proveNoSignedWrapViaInduction(const SCEVAddRecExpr *AR);

  /// Try to prove NUW on \p AR by proving facts about conditions known on
  /// entry and backedge.
  SCEV::NoWrapFlags proveNoUnsignedWrapViaInduction(const SCEVAddRecExpr *AR);

  std::optional<MonotonicPredicateType>
  getMonotonicPredicateTypeImpl(const SCEVAddRecExpr *LHS,
                                ICmpInst::Predicate Pred);

  /// Return SCEV no-wrap flags that can be proven based on reasoning about
  /// how poison produced from no-wrap flags on this value (e.g. a nuw add)
  /// would trigger undefined behavior on overflow.
  SCEV::NoWrapFlags getNoWrapFlagsFromUB(const Value *V);

  /// Return a scope which provides an upper bound on the defining scope of
  /// 'S'. Specifically, return the first instruction in said bounding scope.
  /// Return nullptr if the scope is trivial (function entry).
  /// (See scope definition rules associated with flag discussion above)
  const Instruction *getNonTrivialDefiningScopeBound(const SCEV *S);

  /// Return a scope which provides an upper bound on the defining scope for
  /// a SCEV with the operands in Ops.  The outparam Precise is set if the
  /// bound found is a precise bound (i.e. must be the defining scope.)
  const Instruction *getDefiningScopeBound(ArrayRef<const SCEV *> Ops,
                                           bool &Precise);

  /// Wrapper around the above for cases which don't care if the bound
  /// is precise.
  const Instruction *getDefiningScopeBound(ArrayRef<const SCEV *> Ops);

  /// Given two instructions in the same function, return true if we can
  /// prove B must execute given A executes.
  bool isGuaranteedToTransferExecutionTo(const Instruction *A,
                                         const Instruction *B);

  /// Return true if the SCEV corresponding to \p I is never poison.  Proving
  /// this is more complex than proving that just \p I is never poison, since
  /// SCEV commons expressions across control flow, and you can have cases
  /// like:
  ///
  ///   idx0 = a + b;
  ///   ptr[idx0] = 100;
  ///   if (<condition>) {
  ///     idx1 = a +nsw b;
  ///     ptr[idx1] = 200;
  ///   }
  ///
  /// where the SCEV expression (+ a b) is guaranteed to not be poison (and
  /// hence not sign-overflow) only if "<condition>" is true.  Since both
  /// `idx0` and `idx1` will be mapped to the same SCEV expression, (+ a b),
  /// it is not okay to annotate (+ a b) with <nsw> in the above example.
  bool isSCEVExprNeverPoison(const Instruction *I);

  /// This is like \c isSCEVExprNeverPoison but it specifically works for
  /// instructions that will get mapped to SCEV add recurrences.  Return true
  /// if \p I will never generate poison under the assumption that \p I is an
  /// add recurrence on the loop \p L.
  bool isAddRecNeverPoison(const Instruction *I, const Loop *L);

  /// Similar to createAddRecFromPHI, but with the additional flexibility of
  /// suggesting runtime overflow checks in case casts are encountered.
  /// If successful, the analysis records that for this loop, \p SymbolicPHI,
  /// which is the UnknownSCEV currently representing the PHI, can be rewritten
  /// into an AddRec, assuming some predicates; The function then returns the
  /// AddRec and the predicates as a pair, and caches this pair in
  /// PredicatedSCEVRewrites.
  /// If the analysis is not successful, a mapping from the \p SymbolicPHI to
  /// itself (with no predicates) is recorded, and a nullptr with an empty
  /// predicates vector is returned as a pair.
  std::optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
  createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI);

  /// Compute the maximum backedge count based on the range of values
  /// permitted by Start, End, and Stride. This is for loops of the form
  /// {Start, +, Stride} LT End.
  ///
  /// Preconditions:
  /// * the induction variable is known to be positive.
  /// * the induction variable is assumed not to overflow (i.e. either it
  ///   actually doesn't, or we'd have to immediately execute UB)
  /// We *don't* assert these preconditions so please be careful.
  const SCEV *computeMaxBECountForLT(const SCEV *Start, const SCEV *Stride,
                                     const SCEV *End, unsigned BitWidth,
                                     bool IsSigned);

  /// Verify if an linear IV with positive stride can overflow when in a
  /// less-than comparison, knowing the invariant term of the comparison,
  /// the stride.
  bool canIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride, bool IsSigned);

  /// Verify if an linear IV with negative stride can overflow when in a
  /// greater-than comparison, knowing the invariant term of the comparison,
  /// the stride.
  bool canIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride, bool IsSigned);

  /// Get add expr already created or create a new one.
  const SCEV *getOrCreateAddExpr(ArrayRef<const SCEV *> Ops,
                                 SCEV::NoWrapFlags Flags);

  /// Get mul expr already created or create a new one.
  const SCEV *getOrCreateMulExpr(ArrayRef<const SCEV *> Ops,
                                 SCEV::NoWrapFlags Flags);

  // Get addrec expr already created or create a new one.
  const SCEV *getOrCreateAddRecExpr(ArrayRef<const SCEV *> Ops,
                                    const Loop *L, SCEV::NoWrapFlags Flags);

  /// Return x if \p Val is f(x) where f is a 1-1 function.
  const SCEV *stripInjectiveFunctions(const SCEV *Val) const;

  /// Find all of the loops transitively used in \p S, and fill \p LoopsUsed.
  /// A loop is considered "used" by an expression if it contains
  /// an add rec on said loop.
  void getUsedLoops(const SCEV *S, SmallPtrSetImpl<const Loop *> &LoopsUsed);

  /// Try to match the pattern generated by getURemExpr(A, B). If successful,
  /// Assign A and B to LHS and RHS, respectively.
  bool matchURem(const SCEV *Expr, const SCEV *&LHS, const SCEV *&RHS);

  /// Look for a SCEV expression with type `SCEVType` and operands `Ops` in
  /// `UniqueSCEVs`.  Return if found, else nullptr.
  SCEV *findExistingSCEVInCache(SCEVTypes SCEVType, ArrayRef<const SCEV *> Ops);

  /// Get reachable blocks in this function, making limited use of SCEV
  /// reasoning about conditions.
  void getReachableBlocks(SmallPtrSetImpl<BasicBlock *> &Reachable,
                          Function &F);

  /// Return the given SCEV expression with a new set of operands.
  /// This preserves the origial nowrap flags.
  const SCEV *getWithOperands(const SCEV *S,
                              SmallVectorImpl<const SCEV *> &NewOps);

  FoldingSet<SCEV> UniqueSCEVs;
  FoldingSet<SCEVPredicate> UniquePreds;
  BumpPtrAllocator SCEVAllocator;

  /// This maps loops to a list of addrecs that directly use said loop.
  DenseMap<const Loop *, SmallVector<const SCEVAddRecExpr *, 4>> LoopUsers;

  /// Cache tentative mappings from UnknownSCEVs in a Loop, to a SCEV expression
  /// they can be rewritten into under certain predicates.
  DenseMap<std::pair<const SCEVUnknown *, const Loop *>,
           std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
      PredicatedSCEVRewrites;

  /// Set of AddRecs for which proving NUW via an induction has already been
  /// tried.
  SmallPtrSet<const SCEVAddRecExpr *, 16> UnsignedWrapViaInductionTried;

  /// Set of AddRecs for which proving NSW via an induction has already been
  /// tried.
  SmallPtrSet<const SCEVAddRecExpr *, 16> SignedWrapViaInductionTried;

  /// The head of a linked list of all SCEVUnknown values that have been
  /// allocated. This is used by releaseMemory to locate them all and call
  /// their destructors.
  SCEVUnknown *FirstUnknown = nullptr;
};

/// Analysis pass that exposes the \c ScalarEvolution for a function.
class ScalarEvolutionAnalysis
    : public AnalysisInfoMixin<ScalarEvolutionAnalysis> {
  friend AnalysisInfoMixin<ScalarEvolutionAnalysis>;

  static AnalysisKey Key;

public:
  using Result = ScalarEvolution;

  ScalarEvolution run(Function &F, FunctionAnalysisManager &AM);
};

/// Verifier pass for the \c ScalarEvolutionAnalysis results.
class ScalarEvolutionVerifierPass
    : public PassInfoMixin<ScalarEvolutionVerifierPass> {
public:
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

/// Printer pass for the \c ScalarEvolutionAnalysis results.
class ScalarEvolutionPrinterPass
    : public PassInfoMixin<ScalarEvolutionPrinterPass> {
  raw_ostream &OS;

public:
  explicit ScalarEvolutionPrinterPass(raw_ostream &OS) : OS(OS) {}

  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

class ScalarEvolutionWrapperPass : public FunctionPass {
  std::unique_ptr<ScalarEvolution> SE;

public:
  static char ID;

  ScalarEvolutionWrapperPass();

  ScalarEvolution &getSE() { return *SE; }
  const ScalarEvolution &getSE() const { return *SE; }

  bool runOnFunction(Function &F) override;
  void releaseMemory() override;
  void getAnalysisUsage(AnalysisUsage &AU) const override;
  void print(raw_ostream &OS, const Module * = nullptr) const override;
  void verifyAnalysis() const override;
};

/// An interface layer with SCEV used to manage how we see SCEV expressions
/// for values in the context of existing predicates. We can add new
/// predicates, but we cannot remove them.
///
/// This layer has multiple purposes:
///   - provides a simple interface for SCEV versioning.
///   - guarantees that the order of transformations applied on a SCEV
///     expression for a single Value is consistent across two different
///     getSCEV calls. This means that, for example, once we've obtained
///     an AddRec expression for a certain value through expression
///     rewriting, we will continue to get an AddRec expression for that
///     Value.
///   - lowers the number of expression rewrites.
class PredicatedScalarEvolution {
public:
  PredicatedScalarEvolution(ScalarEvolution &SE, Loop &L);

  const SCEVPredicate &getPredicate() const;

  /// Returns the SCEV expression of V, in the context of the current SCEV
  /// predicate.  The order of transformations applied on the expression of V
  /// returned by ScalarEvolution is guaranteed to be preserved, even when
  /// adding new predicates.
  const SCEV *getSCEV(Value *V);

  /// Get the (predicated) backedge count for the analyzed loop.
  const SCEV *getBackedgeTakenCount();

  /// Adds a new predicate.
  void addPredicate(const SCEVPredicate &Pred);

  /// Attempts to produce an AddRecExpr for V by adding additional SCEV
  /// predicates. If we can't transform the expression into an AddRecExpr we
  /// return nullptr and not add additional SCEV predicates to the current
  /// context.
  const SCEVAddRecExpr *getAsAddRec(Value *V);

  /// Proves that V doesn't overflow by adding SCEV predicate.
  void setNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags);

  /// Returns true if we've proved that V doesn't wrap by means of a SCEV
  /// predicate.
  bool hasNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags);

  /// Returns the ScalarEvolution analysis used.
  ScalarEvolution *getSE() const { return &SE; }

  /// We need to explicitly define the copy constructor because of FlagsMap.
  PredicatedScalarEvolution(const PredicatedScalarEvolution &);

  /// Print the SCEV mappings done by the Predicated Scalar Evolution.
  /// The printed text is indented by \p Depth.
  void print(raw_ostream &OS, unsigned Depth) const;

  /// Check if \p AR1 and \p AR2 are equal, while taking into account
  /// Equal predicates in Preds.
  bool areAddRecsEqualWithPreds(const SCEVAddRecExpr *AR1,
                                const SCEVAddRecExpr *AR2) const;

private:
  /// Increments the version number of the predicate.  This needs to be called
  /// every time the SCEV predicate changes.
  void updateGeneration();

  /// Holds a SCEV and the version number of the SCEV predicate used to
  /// perform the rewrite of the expression.
  using RewriteEntry = std::pair<unsigned, const SCEV *>;

  /// Maps a SCEV to the rewrite result of that SCEV at a certain version
  /// number. If this number doesn't match the current Generation, we will
  /// need to do a rewrite. To preserve the transformation order of previous
  /// rewrites, we will rewrite the previous result instead of the original
  /// SCEV.
  DenseMap<const SCEV *, RewriteEntry> RewriteMap;

  /// Records what NoWrap flags we've added to a Value *.
  ValueMap<Value *, SCEVWrapPredicate::IncrementWrapFlags> FlagsMap;

  /// The ScalarEvolution analysis.
  ScalarEvolution &SE;

  /// The analyzed Loop.
  const Loop &L;

  /// The SCEVPredicate that forms our context. We will rewrite all
  /// expressions assuming that this predicate true.
  std::unique_ptr<SCEVUnionPredicate> Preds;

  /// Marks the version of the SCEV predicate used. When rewriting a SCEV
  /// expression we mark it with the version of the predicate. We use this to
  /// figure out if the predicate has changed from the last rewrite of the
  /// SCEV. If so, we need to perform a new rewrite.
  unsigned Generation = 0;

  /// The backedge taken count.
  const SCEV *BackedgeCount = nullptr;
};

template <> struct DenseMapInfo<ScalarEvolution::FoldID> {
  static inline ScalarEvolution::FoldID getEmptyKey() {
    ScalarEvolution::FoldID ID(0);
    return ID;
  }
  static inline ScalarEvolution::FoldID getTombstoneKey() {
    ScalarEvolution::FoldID ID(1);
    return ID;
  }

  static unsigned getHashValue(const ScalarEvolution::FoldID &Val) {
    return Val.computeHash();
  }

  static bool isEqual(const ScalarEvolution::FoldID &LHS,
                      const ScalarEvolution::FoldID &RHS) {
    return LHS == RHS;
  }
};

} // end namespace llvm

#endif // LLVM_ANALYSIS_SCALAREVOLUTION_H
PKiwFZޒ�!��(Analysis/InstructionPrecedenceTracking.hnu�[���//===-- InstructionPrecedenceTracking.h -------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// Implements a class that is able to define some instructions as "special"
// (e.g. as having implicit control flow, or writing memory, or having another
// interesting property) and then efficiently answers queries of the types:
// 1. Are there any special instructions in the block of interest?
// 2. Return first of the special instructions in the given block;
// 3. Check if the given instruction is preceeded by the first special
//    instruction in the same block.
// The class provides caching that allows to answer these queries quickly. The
// user must make sure that the cached data is invalidated properly whenever
// a content of some tracked block is changed.
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_INSTRUCTIONPRECEDENCETRACKING_H
#define LLVM_ANALYSIS_INSTRUCTIONPRECEDENCETRACKING_H

#include "llvm/ADT/DenseMap.h"

namespace llvm {

class BasicBlock;
class Instruction;

class InstructionPrecedenceTracking {
  // Maps a block to the topmost special instruction in it. If the value is
  // nullptr, it means that it is known that this block does not contain any
  // special instructions.
  DenseMap<const BasicBlock *, const Instruction *> FirstSpecialInsts;

  // Fills information about the given block's special instructions.
  void fill(const BasicBlock *BB);

#ifndef NDEBUG
  /// Asserts that the cached info for \p BB is up-to-date. This helps to catch
  /// the usage error of accessing a block without properly invalidating after a
  /// previous transform.
  void validate(const BasicBlock *BB) const;

  /// Asserts whether or not the contents of this tracking is up-to-date. This
  /// helps to catch the usage error of accessing a block without properly
  /// invalidating after a previous transform.
  void validateAll() const;
#endif

protected:
  /// Returns the topmost special instruction from the block \p BB. Returns
  /// nullptr if there is no special instructions in the block.
  const Instruction *getFirstSpecialInstruction(const BasicBlock *BB);

  /// Returns true iff at least one instruction from the basic block \p BB is
  /// special.
  bool hasSpecialInstructions(const BasicBlock *BB);

  /// Returns true iff the first special instruction of \p Insn's block exists
  /// and dominates \p Insn.
  bool isPreceededBySpecialInstruction(const Instruction *Insn);

  /// A predicate that defines whether or not the instruction \p Insn is
  /// considered special and needs to be tracked. Implementing this method in
  /// children classes allows to implement tracking of implicit control flow,
  /// memory writing instructions or any other kinds of instructions we might
  /// be interested in.
  virtual bool isSpecialInstruction(const Instruction *Insn) const = 0;

  virtual ~InstructionPrecedenceTracking() = default;

public:
  /// Notifies this tracking that we are going to insert a new instruction \p
  /// Inst to the basic block \p BB. It makes all necessary updates to internal
  /// caches to keep them consistent.
  void insertInstructionTo(const Instruction *Inst, const BasicBlock *BB);

  /// Notifies this tracking that we are going to remove the instruction \p Inst
  /// It makes all necessary updates to internal caches to keep them consistent.
  void removeInstruction(const Instruction *Inst);

  /// Notifies this tracking that we are going to replace all uses of \p Inst.
  /// It makes all necessary updates to internal caches to keep them consistent.
  /// Should typically be called before a RAUW.
  void removeUsersOf(const Instruction *Inst);

  /// Invalidates all information from this tracking.
  void clear();
};

/// This class allows to keep track on instructions with implicit control flow.
/// These are instructions that may not pass execution to their successors. For
/// example, throwing calls and guards do not always do this. If we need to know
/// for sure that some instruction is guaranteed to execute if the given block
/// is reached, then we need to make sure that there is no implicit control flow
/// instruction (ICFI) preceding it. For example, this check is required if we
/// perform PRE moving non-speculable instruction to other place.
class ImplicitControlFlowTracking : public InstructionPrecedenceTracking {
public:
  /// Returns the topmost instruction with implicit control flow from the given
  /// basic block. Returns nullptr if there is no such instructions in the block.
  const Instruction *getFirstICFI(const BasicBlock *BB) {
    return getFirstSpecialInstruction(BB);
  }

  /// Returns true if at least one instruction from the given basic block has
  /// implicit control flow.
  bool hasICF(const BasicBlock *BB) {
    return hasSpecialInstructions(BB);
  }

  /// Returns true if the first ICFI of Insn's block exists and dominates Insn.
  bool isDominatedByICFIFromSameBlock(const Instruction *Insn) {
    return isPreceededBySpecialInstruction(Insn);
  }

  bool isSpecialInstruction(const Instruction *Insn) const override;
};

class MemoryWriteTracking : public InstructionPrecedenceTracking {
public:
  /// Returns the topmost instruction that may write memory from the given
  /// basic block. Returns nullptr if there is no such instructions in the block.
  const Instruction *getFirstMemoryWrite(const BasicBlock *BB) {
    return getFirstSpecialInstruction(BB);
  }

  /// Returns true if at least one instruction from the given basic block may
  /// write memory.
  bool mayWriteToMemory(const BasicBlock *BB) {
    return hasSpecialInstructions(BB);
  }

  /// Returns true if the first memory writing instruction of Insn's block
  /// exists and dominates Insn.
  bool isDominatedByMemoryWriteFromSameBlock(const Instruction *Insn) {
    return isPreceededBySpecialInstruction(Insn);
  }

  bool isSpecialInstruction(const Instruction *Insn) const override;
};

} // llvm

#endif // LLVM_ANALYSIS_INSTRUCTIONPRECEDENCETRACKING_H
PKiwFZ�����Analysis/DemandedBits.hnu�[���//===- llvm/Analysis/DemandedBits.h - Determine demanded bits ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This pass implements a demanded bits analysis. A demanded bit is one that
// contributes to a result; bits that are not demanded can be either zero or
// one without affecting control or data flow. For example in this sequence:
//
//   %1 = add i32 %x, %y
//   %2 = trunc i32 %1 to i16
//
// Only the lowest 16 bits of %1 are demanded; the rest are removed by the
// trunc.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_DEMANDEDBITS_H
#define LLVM_ANALYSIS_DEMANDEDBITS_H

#include "llvm/ADT/APInt.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/IR/PassManager.h"
#include <optional>

namespace llvm {

class AssumptionCache;
class DominatorTree;
class Function;
class Instruction;
struct KnownBits;
class raw_ostream;

class DemandedBits {
public:
  DemandedBits(Function &F, AssumptionCache &AC, DominatorTree &DT) :
    F(F), AC(AC), DT(DT) {}

  /// Return the bits demanded from instruction I.
  ///
  /// For vector instructions individual vector elements are not distinguished:
  /// A bit is demanded if it is demanded for any of the vector elements. The
  /// size of the return value corresponds to the type size in bits of the
  /// scalar type.
  ///
  /// Instructions that do not have integer or vector of integer type are
  /// accepted, but will always produce a mask with all bits set.
  APInt getDemandedBits(Instruction *I);

  /// Return the bits demanded from use U.
  APInt getDemandedBits(Use *U);

  /// Return true if, during analysis, I could not be reached.
  bool isInstructionDead(Instruction *I);

  /// Return whether this use is dead by means of not having any demanded bits.
  bool isUseDead(Use *U);

  void print(raw_ostream &OS);

  /// Compute alive bits of one addition operand from alive output and known
  /// operand bits
  static APInt determineLiveOperandBitsAdd(unsigned OperandNo,
                                           const APInt &AOut,
                                           const KnownBits &LHS,
                                           const KnownBits &RHS);

  /// Compute alive bits of one subtraction operand from alive output and known
  /// operand bits
  static APInt determineLiveOperandBitsSub(unsigned OperandNo,
                                           const APInt &AOut,
                                           const KnownBits &LHS,
                                           const KnownBits &RHS);

private:
  void performAnalysis();
  void determineLiveOperandBits(const Instruction *UserI,
    const Value *Val, unsigned OperandNo,
    const APInt &AOut, APInt &AB,
    KnownBits &Known, KnownBits &Known2, bool &KnownBitsComputed);

  Function &F;
  AssumptionCache &AC;
  DominatorTree &DT;

  bool Analyzed = false;

  // The set of visited instructions (non-integer-typed only).
  SmallPtrSet<Instruction*, 32> Visited;
  DenseMap<Instruction *, APInt> AliveBits;
  // Uses with no demanded bits. If the user also has no demanded bits, the use
  // might not be stored explicitly in this map, to save memory during analysis.
  SmallPtrSet<Use *, 16> DeadUses;
};

/// An analysis that produces \c DemandedBits for a function.
class DemandedBitsAnalysis : public AnalysisInfoMixin<DemandedBitsAnalysis> {
  friend AnalysisInfoMixin<DemandedBitsAnalysis>;

  static AnalysisKey Key;

public:
  /// Provide the result type for this analysis pass.
  using Result = DemandedBits;

  /// Run the analysis pass over a function and produce demanded bits
  /// information.
  DemandedBits run(Function &F, FunctionAnalysisManager &AM);
};

/// Printer pass for DemandedBits
class DemandedBitsPrinterPass : public PassInfoMixin<DemandedBitsPrinterPass> {
  raw_ostream &OS;

public:
  explicit DemandedBitsPrinterPass(raw_ostream &OS) : OS(OS) {}

  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

} // end namespace llvm

#endif // LLVM_ANALYSIS_DEMANDEDBITS_H
PKiwFZ�Њ���$Analysis/OptimizationRemarkEmitter.hnu�[���//===- OptimizationRemarkEmitter.h - Optimization Diagnostic ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Optimization diagnostic interfaces.  It's packaged as an analysis pass so
// that by using this service passes become dependent on BFI as well.  BFI is
// used to compute the "hotness" of the diagnostic message.
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_OPTIMIZATIONREMARKEMITTER_H
#define LLVM_ANALYSIS_OPTIMIZATIONREMARKEMITTER_H

#include "llvm/Analysis/BlockFrequencyInfo.h"
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Pass.h"
#include <optional>

namespace llvm {
class Function;
class Value;

/// The optimization diagnostic interface.
///
/// It allows reporting when optimizations are performed and when they are not
/// along with the reasons for it.  Hotness information of the corresponding
/// code region can be included in the remark if DiagnosticsHotnessRequested is
/// enabled in the LLVM context.
class OptimizationRemarkEmitter {
public:
  OptimizationRemarkEmitter(const Function *F, BlockFrequencyInfo *BFI)
      : F(F), BFI(BFI) {}

  /// This variant can be used to generate ORE on demand (without the
  /// analysis pass).
  ///
  /// Note that this ctor has a very different cost depending on whether
  /// F->getContext().getDiagnosticsHotnessRequested() is on or not.  If it's off
  /// the operation is free.
  ///
  /// Whereas if DiagnosticsHotnessRequested is on, it is fairly expensive
  /// operation since BFI and all its required analyses are computed.  This is
  /// for example useful for CGSCC passes that can't use function analyses
  /// passes in the old PM.
  OptimizationRemarkEmitter(const Function *F);

  OptimizationRemarkEmitter(OptimizationRemarkEmitter &&Arg)
      : F(Arg.F), BFI(Arg.BFI) {}

  OptimizationRemarkEmitter &operator=(OptimizationRemarkEmitter &&RHS) {
    F = RHS.F;
    BFI = RHS.BFI;
    return *this;
  }

  /// Handle invalidation events in the new pass manager.
  bool invalidate(Function &F, const PreservedAnalyses &PA,
                  FunctionAnalysisManager::Invalidator &Inv);

  /// Return true iff at least *some* remarks are enabled.
  bool enabled() const {
    return F->getContext().getLLVMRemarkStreamer() ||
           F->getContext().getDiagHandlerPtr()->isAnyRemarkEnabled();
  }

  /// Output the remark via the diagnostic handler and to the
  /// optimization record file.
  void emit(DiagnosticInfoOptimizationBase &OptDiag);

  /// Take a lambda that returns a remark which will be emitted.  Second
  /// argument is only used to restrict this to functions.
  template <typename T>
  void emit(T RemarkBuilder, decltype(RemarkBuilder()) * = nullptr) {
    // Avoid building the remark unless we know there are at least *some*
    // remarks enabled. We can't currently check whether remarks are requested
    // for the calling pass since that requires actually building the remark.

    if (enabled()) {
      auto R = RemarkBuilder();
      static_assert(
          std::is_base_of<DiagnosticInfoOptimizationBase, decltype(R)>::value,
          "the lambda passed to emit() must return a remark");
      emit((DiagnosticInfoOptimizationBase &)R);
    }
  }

  /// Whether we allow for extra compile-time budget to perform more
  /// analysis to produce fewer false positives.
  ///
  /// This is useful when reporting missed optimizations.  In this case we can
  /// use the extra analysis (1) to filter trivial false positives or (2) to
  /// provide more context so that non-trivial false positives can be quickly
  /// detected by the user.
  bool allowExtraAnalysis(StringRef PassName) const {
    return OptimizationRemarkEmitter::allowExtraAnalysis(*F, PassName);
  }
  static bool allowExtraAnalysis(const Function &F, StringRef PassName) {
    return allowExtraAnalysis(F.getContext(), PassName);
  }
  static bool allowExtraAnalysis(LLVMContext &Ctx, StringRef PassName) {
    return Ctx.getLLVMRemarkStreamer() ||
           Ctx.getDiagHandlerPtr()->isAnyRemarkEnabled(PassName);
  }

private:
  const Function *F;

  BlockFrequencyInfo *BFI;

  /// If we generate BFI on demand, we need to free it when ORE is freed.
  std::unique_ptr<BlockFrequencyInfo> OwnedBFI;

  /// Compute hotness from IR value (currently assumed to be a block) if PGO is
  /// available.
  std::optional<uint64_t> computeHotness(const Value *V);

  /// Similar but use value from \p OptDiag and update hotness there.
  void computeHotness(DiagnosticInfoIROptimization &OptDiag);

  /// Only allow verbose messages if we know we're filtering by hotness
  /// (BFI is only set in this case).
  bool shouldEmitVerbose() { return BFI != nullptr; }

  OptimizationRemarkEmitter(const OptimizationRemarkEmitter &) = delete;
  void operator=(const OptimizationRemarkEmitter &) = delete;
};

/// Add a small namespace to avoid name clashes with the classes used in
/// the streaming interface.  We want these to be short for better
/// write/readability.
namespace ore {
using NV = DiagnosticInfoOptimizationBase::Argument;
using setIsVerbose = DiagnosticInfoOptimizationBase::setIsVerbose;
using setExtraArgs = DiagnosticInfoOptimizationBase::setExtraArgs;
}

/// OptimizationRemarkEmitter legacy analysis pass
///
/// Note that this pass shouldn't generally be marked as preserved by other
/// passes.  It's holding onto BFI, so if the pass does not preserve BFI, BFI
/// could be freed.
class OptimizationRemarkEmitterWrapperPass : public FunctionPass {
  std::unique_ptr<OptimizationRemarkEmitter> ORE;

public:
  OptimizationRemarkEmitterWrapperPass();

  bool runOnFunction(Function &F) override;

  void getAnalysisUsage(AnalysisUsage &AU) const override;

  OptimizationRemarkEmitter &getORE() {
    assert(ORE && "pass not run yet");
    return *ORE;
  }

  static char ID;
};

class OptimizationRemarkEmitterAnalysis
    : public AnalysisInfoMixin<OptimizationRemarkEmitterAnalysis> {
  friend AnalysisInfoMixin<OptimizationRemarkEmitterAnalysis>;
  static AnalysisKey Key;

public:
  /// Provide the result typedef for this analysis pass.
  typedef OptimizationRemarkEmitter Result;

  /// Run the analysis pass over a function and produce BFI.
  Result run(Function &F, FunctionAnalysisManager &AM);
};
}
#endif // LLVM_ANALYSIS_OPTIMIZATIONREMARKEMITTER_H
PKiwFZ8�@J��Analysis/Interval.hnu�[���//===- llvm/Analysis/Interval.h - Interval Class Declaration ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the declaration of the Interval class, which
// represents a set of CFG nodes and is a portion of an interval partition.
//
// Intervals have some interesting and useful properties, including the
// following:
//    1. The header node of an interval dominates all of the elements of the
//       interval
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_INTERVAL_H
#define LLVM_ANALYSIS_INTERVAL_H

#include "llvm/ADT/GraphTraits.h"
#include <vector>

namespace llvm {

class BasicBlock;
class raw_ostream;

//===----------------------------------------------------------------------===//
//
/// Interval Class - An Interval is a set of nodes defined such that every node
/// in the interval has all of its predecessors in the interval (except for the
/// header)
///
class Interval {
  /// HeaderNode - The header BasicBlock, which dominates all BasicBlocks in this
  /// interval.  Also, any loops in this interval must go through the HeaderNode.
  ///
  BasicBlock *HeaderNode;

public:
  using succ_iterator = std::vector<BasicBlock*>::iterator;
  using pred_iterator = std::vector<BasicBlock*>::iterator;
  using node_iterator = std::vector<BasicBlock*>::iterator;

  inline Interval(BasicBlock *Header) : HeaderNode(Header) {
    Nodes.push_back(Header);
  }

  inline BasicBlock *getHeaderNode() const { return HeaderNode; }

  /// Nodes - The basic blocks in this interval.
  std::vector<BasicBlock*> Nodes;

  /// Successors - List of BasicBlocks that are reachable directly from nodes in
  /// this interval, but are not in the interval themselves.
  /// These nodes necessarily must be header nodes for other intervals.
  std::vector<BasicBlock*> Successors;

  /// Predecessors - List of BasicBlocks that have this Interval's header block
  /// as one of their successors.
  std::vector<BasicBlock*> Predecessors;

  /// contains - Find out if a basic block is in this interval
  inline bool contains(BasicBlock *BB) const {
    for (BasicBlock *Node : Nodes)
      if (Node == BB)
        return true;
    return false;
    // I don't want the dependency on <algorithm>
    //return find(Nodes.begin(), Nodes.end(), BB) != Nodes.end();
  }

  /// isSuccessor - find out if a basic block is a successor of this Interval
  inline bool isSuccessor(BasicBlock *BB) const {
    for (BasicBlock *Successor : Successors)
      if (Successor == BB)
        return true;
    return false;
    // I don't want the dependency on <algorithm>
    //return find(Successors.begin(), Successors.end(), BB) != Successors.end();
  }

  /// Equality operator.  It is only valid to compare two intervals from the
  /// same partition, because of this, all we have to check is the header node
  /// for equality.
  inline bool operator==(const Interval &I) const {
    return HeaderNode == I.HeaderNode;
  }

  /// print - Show contents in human readable format...
  void print(raw_ostream &O) const;
};

/// succ_begin/succ_end - define methods so that Intervals may be used
/// just like BasicBlocks can with the succ_* functions, and *::succ_iterator.
///
inline Interval::succ_iterator succ_begin(Interval *I) {
  return I->Successors.begin();
}
inline Interval::succ_iterator succ_end(Interval *I)   {
  return I->Successors.end();
}

/// pred_begin/pred_end - define methods so that Intervals may be used
/// just like BasicBlocks can with the pred_* functions, and *::pred_iterator.
///
inline Interval::pred_iterator pred_begin(Interval *I) {
  return I->Predecessors.begin();
}
inline Interval::pred_iterator pred_end(Interval *I)   {
  return I->Predecessors.end();
}

template <> struct GraphTraits<Interval*> {
  using NodeRef = Interval *;
  using ChildIteratorType = Interval::succ_iterator;

  static NodeRef getEntryNode(Interval *I) { return I; }

  /// nodes_iterator/begin/end - Allow iteration over all nodes in the graph
  static ChildIteratorType child_begin(NodeRef N) { return succ_begin(N); }
  static ChildIteratorType child_end(NodeRef N) { return succ_end(N); }
};

template <> struct GraphTraits<Inverse<Interval*>> {
  using NodeRef = Interval *;
  using ChildIteratorType = Interval::pred_iterator;

  static NodeRef getEntryNode(Inverse<Interval *> G) { return G.Graph; }
  static ChildIteratorType child_begin(NodeRef N) { return pred_begin(N); }
  static ChildIteratorType child_end(NodeRef N) { return pred_end(N); }
};

} // end namespace llvm

#endif // LLVM_ANALYSIS_INTERVAL_H
PKiwFZ�=)��0�0Analysis/MemoryBuiltins.hnu�[���//==- llvm/Analysis/MemoryBuiltins.h - Calls to memory builtins --*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This family of functions identifies calls to builtin functions that allocate
// or free memory.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_MEMORYBUILTINS_H
#define LLVM_ANALYSIS_MEMORYBUILTINS_H

#include "llvm/ADT/APInt.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Analysis/TargetFolder.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/InstVisitor.h"
#include "llvm/IR/ValueHandle.h"
#include <cstdint>
#include <optional>
#include <utility>

namespace llvm {

class AllocaInst;
class AAResults;
class Argument;
class ConstantPointerNull;
class DataLayout;
class ExtractElementInst;
class ExtractValueInst;
class GEPOperator;
class GlobalAlias;
class GlobalVariable;
class Instruction;
class IntegerType;
class IntrinsicInst;
class IntToPtrInst;
class LLVMContext;
class LoadInst;
class PHINode;
class SelectInst;
class Type;
class UndefValue;
class Value;

/// Tests if a value is a call or invoke to a library function that
/// allocates or reallocates memory (either malloc, calloc, realloc, or strdup
/// like).
bool isAllocationFn(const Value *V, const TargetLibraryInfo *TLI);
bool isAllocationFn(const Value *V,
                    function_ref<const TargetLibraryInfo &(Function &)> GetTLI);

/// Tests if a value is a call or invoke to a library function that
/// allocates memory via new.
bool isNewLikeFn(const Value *V, const TargetLibraryInfo *TLI);

/// Tests if a value is a call or invoke to a library function that
/// allocates memory similar to malloc or calloc.
bool isMallocOrCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI);

/// Tests if a value is a call or invoke to a library function that
/// allocates memory (either malloc, calloc, or strdup like).
bool isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI);

/// Tests if a function is a call or invoke to a library function that
/// reallocates memory (e.g., realloc).
bool isReallocLikeFn(const Function *F);

/// If this is a call to a realloc function, return the reallocated operand.
Value *getReallocatedOperand(const CallBase *CB);

//===----------------------------------------------------------------------===//
//  free Call Utility Functions.
//

/// isLibFreeFunction - Returns true if the function is a builtin free()
bool isLibFreeFunction(const Function *F, const LibFunc TLIFn);

/// If this if a call to a free function, return the freed operand.
Value *getFreedOperand(const CallBase *CB, const TargetLibraryInfo *TLI);

//===----------------------------------------------------------------------===//
//  Properties of allocation functions
//

/// Return true if this is a call to an allocation function that does not have
/// side effects that we are required to preserve beyond the effect of
/// allocating a new object.
/// Ex: If our allocation routine has a counter for the number of objects
/// allocated, and the program prints it on exit, can the value change due
/// to optimization? Answer is highly language dependent.
/// Note: *Removable* really does mean removable; it does not mean observable.
/// A language (e.g. C++) can allow removing allocations without allowing
/// insertion or speculative execution of allocation routines.
bool isRemovableAlloc(const CallBase *V, const TargetLibraryInfo *TLI);

/// Gets the alignment argument for an aligned_alloc-like function, using either
/// built-in knowledge based on fuction names/signatures or allocalign
/// attributes. Note: the Value returned may not indicate a valid alignment, per
/// the definition of the allocalign attribute.
Value *getAllocAlignment(const CallBase *V, const TargetLibraryInfo *TLI);

/// Return the size of the requested allocation. With a trivial mapper, this is
/// similar to calling getObjectSize(..., Exact), but without looking through
/// calls that return their argument. A mapper function can be used to replace
/// one Value* (operand to the allocation) with another. This is useful when
/// doing abstract interpretation.
std::optional<APInt> getAllocSize(
    const CallBase *CB, const TargetLibraryInfo *TLI,
    function_ref<const Value *(const Value *)> Mapper = [](const Value *V) {
      return V;
    });

/// If this is a call to an allocation function that initializes memory to a
/// fixed value, return said value in the requested type.  Otherwise, return
/// nullptr.
Constant *getInitialValueOfAllocation(const Value *V,
                                      const TargetLibraryInfo *TLI,
                                      Type *Ty);

/// If a function is part of an allocation family (e.g.
/// malloc/realloc/calloc/free), return the identifier for its family
/// of functions.
std::optional<StringRef> getAllocationFamily(const Value *I,
                                             const TargetLibraryInfo *TLI);

//===----------------------------------------------------------------------===//
//  Utility functions to compute size of objects.
//

/// Various options to control the behavior of getObjectSize.
struct ObjectSizeOpts {
  /// Controls how we handle conditional statements with unknown conditions.
  enum class Mode : uint8_t {
    /// All branches must be known and have the same size, starting from the
    /// offset, to be merged.
    ExactSizeFromOffset,
    /// All branches must be known and have the same underlying size and offset
    /// to be merged.
    ExactUnderlyingSizeAndOffset,
    /// Evaluate all branches of an unknown condition. If all evaluations
    /// succeed, pick the minimum size.
    Min,
    /// Same as Min, except we pick the maximum size of all of the branches.
    Max,
  };

  /// How we want to evaluate this object's size.
  Mode EvalMode = Mode::ExactSizeFromOffset;
  /// Whether to round the result up to the alignment of allocas, byval
  /// arguments, and global variables.
  bool RoundToAlign = false;
  /// If this is true, null pointers in address space 0 will be treated as
  /// though they can't be evaluated. Otherwise, null is always considered to
  /// point to a 0 byte region of memory.
  bool NullIsUnknownSize = false;
  /// If set, used for more accurate evaluation
  AAResults *AA = nullptr;
};

/// Compute the size of the object pointed by Ptr. Returns true and the
/// object size in Size if successful, and false otherwise. In this context, by
/// object we mean the region of memory starting at Ptr to the end of the
/// underlying object pointed to by Ptr.
///
/// WARNING: The object size returned is the allocation size.  This does not
/// imply dereferenceability at site of use since the object may be freeed in
/// between.
bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL,
                   const TargetLibraryInfo *TLI, ObjectSizeOpts Opts = {});

/// Try to turn a call to \@llvm.objectsize into an integer value of the given
/// Type. Returns null on failure. If MustSucceed is true, this function will
/// not return null, and may return conservative values governed by the second
/// argument of the call to objectsize.
Value *lowerObjectSizeCall(IntrinsicInst *ObjectSize, const DataLayout &DL,
                           const TargetLibraryInfo *TLI, bool MustSucceed);
Value *lowerObjectSizeCall(
    IntrinsicInst *ObjectSize, const DataLayout &DL,
    const TargetLibraryInfo *TLI, AAResults *AA, bool MustSucceed,
    SmallVectorImpl<Instruction *> *InsertedInstructions = nullptr);

using SizeOffsetType = std::pair<APInt, APInt>;

/// Evaluate the size and offset of an object pointed to by a Value*
/// statically. Fails if size or offset are not known at compile time.
class ObjectSizeOffsetVisitor
  : public InstVisitor<ObjectSizeOffsetVisitor, SizeOffsetType> {
  const DataLayout &DL;
  const TargetLibraryInfo *TLI;
  ObjectSizeOpts Options;
  unsigned IntTyBits;
  APInt Zero;
  SmallPtrSet<Instruction *, 8> SeenInsts;

  APInt align(APInt Size, MaybeAlign Align);

  SizeOffsetType unknown() {
    return std::make_pair(APInt(), APInt());
  }

public:
  ObjectSizeOffsetVisitor(const DataLayout &DL, const TargetLibraryInfo *TLI,
                          LLVMContext &Context, ObjectSizeOpts Options = {});

  SizeOffsetType compute(Value *V);

  static bool knownSize(const SizeOffsetType &SizeOffset) {
    return SizeOffset.first.getBitWidth() > 1;
  }

  static bool knownOffset(const SizeOffsetType &SizeOffset) {
    return SizeOffset.second.getBitWidth() > 1;
  }

  static bool bothKnown(const SizeOffsetType &SizeOffset) {
    return knownSize(SizeOffset) && knownOffset(SizeOffset);
  }

  // These are "private", except they can't actually be made private. Only
  // compute() should be used by external users.
  SizeOffsetType visitAllocaInst(AllocaInst &I);
  SizeOffsetType visitArgument(Argument &A);
  SizeOffsetType visitCallBase(CallBase &CB);
  SizeOffsetType visitConstantPointerNull(ConstantPointerNull&);
  SizeOffsetType visitExtractElementInst(ExtractElementInst &I);
  SizeOffsetType visitExtractValueInst(ExtractValueInst &I);
  SizeOffsetType visitGlobalAlias(GlobalAlias &GA);
  SizeOffsetType visitGlobalVariable(GlobalVariable &GV);
  SizeOffsetType visitIntToPtrInst(IntToPtrInst&);
  SizeOffsetType visitLoadInst(LoadInst &I);
  SizeOffsetType visitPHINode(PHINode&);
  SizeOffsetType visitSelectInst(SelectInst &I);
  SizeOffsetType visitUndefValue(UndefValue&);
  SizeOffsetType visitInstruction(Instruction &I);

private:
  SizeOffsetType findLoadSizeOffset(
      LoadInst &LoadFrom, BasicBlock &BB, BasicBlock::iterator From,
      SmallDenseMap<BasicBlock *, SizeOffsetType, 8> &VisitedBlocks,
      unsigned &ScannedInstCount);
  SizeOffsetType combineSizeOffset(SizeOffsetType LHS, SizeOffsetType RHS);
  SizeOffsetType computeImpl(Value *V);
  bool CheckedZextOrTrunc(APInt &I);
};

using SizeOffsetEvalType = std::pair<Value *, Value *>;

/// Evaluate the size and offset of an object pointed to by a Value*.
/// May create code to compute the result at run-time.
class ObjectSizeOffsetEvaluator
  : public InstVisitor<ObjectSizeOffsetEvaluator, SizeOffsetEvalType> {
  using BuilderTy = IRBuilder<TargetFolder, IRBuilderCallbackInserter>;
  using WeakEvalType = std::pair<WeakTrackingVH, WeakTrackingVH>;
  using CacheMapTy = DenseMap<const Value *, WeakEvalType>;
  using PtrSetTy = SmallPtrSet<const Value *, 8>;

  const DataLayout &DL;
  const TargetLibraryInfo *TLI;
  LLVMContext &Context;
  BuilderTy Builder;
  IntegerType *IntTy;
  Value *Zero;
  CacheMapTy CacheMap;
  PtrSetTy SeenVals;
  ObjectSizeOpts EvalOpts;
  SmallPtrSet<Instruction *, 8> InsertedInstructions;

  SizeOffsetEvalType compute_(Value *V);

public:
  static SizeOffsetEvalType unknown() {
    return std::make_pair(nullptr, nullptr);
  }

  ObjectSizeOffsetEvaluator(const DataLayout &DL, const TargetLibraryInfo *TLI,
                            LLVMContext &Context, ObjectSizeOpts EvalOpts = {});

  SizeOffsetEvalType compute(Value *V);

  bool knownSize(SizeOffsetEvalType SizeOffset) {
    return SizeOffset.first;
  }

  bool knownOffset(SizeOffsetEvalType SizeOffset) {
    return SizeOffset.second;
  }

  bool anyKnown(SizeOffsetEvalType SizeOffset) {
    return knownSize(SizeOffset) || knownOffset(SizeOffset);
  }

  bool bothKnown(SizeOffsetEvalType SizeOffset) {
    return knownSize(SizeOffset) && knownOffset(SizeOffset);
  }

  // The individual instruction visitors should be treated as private.
  SizeOffsetEvalType visitAllocaInst(AllocaInst &I);
  SizeOffsetEvalType visitCallBase(CallBase &CB);
  SizeOffsetEvalType visitExtractElementInst(ExtractElementInst &I);
  SizeOffsetEvalType visitExtractValueInst(ExtractValueInst &I);
  SizeOffsetEvalType visitGEPOperator(GEPOperator &GEP);
  SizeOffsetEvalType visitIntToPtrInst(IntToPtrInst&);
  SizeOffsetEvalType visitLoadInst(LoadInst &I);
  SizeOffsetEvalType visitPHINode(PHINode &PHI);
  SizeOffsetEvalType visitSelectInst(SelectInst &I);
  SizeOffsetEvalType visitInstruction(Instruction &I);
};

} // end namespace llvm

#endif // LLVM_ANALYSIS_MEMORYBUILTINS_H
PKiwFZ'��r1X1XAnalysis/TargetLibraryInfo.hnu�[���//===-- TargetLibraryInfo.h - Library information ---------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_TARGETLIBRARYINFO_H
#define LLVM_ANALYSIS_TARGETLIBRARYINFO_H

#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Pass.h"
#include "llvm/TargetParser/Triple.h"
#include <optional>

namespace llvm {

template <typename T> class ArrayRef;
class Function;
class Module;
class Triple;

/// Describes a possible vectorization of a function.
/// Function 'VectorFnName' is equivalent to 'ScalarFnName' vectorized
/// by a factor 'VectorizationFactor'.
struct VecDesc {
  StringRef ScalarFnName;
  StringRef VectorFnName;
  ElementCount VectorizationFactor;
  bool Masked;
};

  enum LibFunc : unsigned {
#define TLI_DEFINE_ENUM
#include "llvm/Analysis/TargetLibraryInfo.def"

    NumLibFuncs,
    NotLibFunc
  };

/// Implementation of the target library information.
///
/// This class constructs tables that hold the target library information and
/// make it available. However, it is somewhat expensive to compute and only
/// depends on the triple. So users typically interact with the \c
/// TargetLibraryInfo wrapper below.
class TargetLibraryInfoImpl {
  friend class TargetLibraryInfo;

  unsigned char AvailableArray[(NumLibFuncs+3)/4];
  DenseMap<unsigned, std::string> CustomNames;
  static StringLiteral const StandardNames[NumLibFuncs];
  bool ShouldExtI32Param, ShouldExtI32Return, ShouldSignExtI32Param, ShouldSignExtI32Return;
  unsigned SizeOfInt;

  enum AvailabilityState {
    StandardName = 3, // (memset to all ones)
    CustomName = 1,
    Unavailable = 0  // (memset to all zeros)
  };
  void setState(LibFunc F, AvailabilityState State) {
    AvailableArray[F/4] &= ~(3 << 2*(F&3));
    AvailableArray[F/4] |= State << 2*(F&3);
  }
  AvailabilityState getState(LibFunc F) const {
    return static_cast<AvailabilityState>((AvailableArray[F/4] >> 2*(F&3)) & 3);
  }

  /// Vectorization descriptors - sorted by ScalarFnName.
  std::vector<VecDesc> VectorDescs;
  /// Scalarization descriptors - same content as VectorDescs but sorted based
  /// on VectorFnName rather than ScalarFnName.
  std::vector<VecDesc> ScalarDescs;

  /// Return true if the function type FTy is valid for the library function
  /// F, regardless of whether the function is available.
  bool isValidProtoForLibFunc(const FunctionType &FTy, LibFunc F,
                              const Module &M) const;

public:
  /// List of known vector-functions libraries.
  ///
  /// The vector-functions library defines, which functions are vectorizable
  /// and with which factor. The library can be specified by either frontend,
  /// or a commandline option, and then used by
  /// addVectorizableFunctionsFromVecLib for filling up the tables of
  /// vectorizable functions.
  enum VectorLibrary {
    NoLibrary,        // Don't use any vector library.
    Accelerate,       // Use Accelerate framework.
    DarwinLibSystemM, // Use Darwin's libsystem_m.
    LIBMVEC_X86,      // GLIBC Vector Math library.
    MASSV,            // IBM MASS vector library.
    SVML,             // Intel short vector math library.
    SLEEFGNUABI, // SLEEF - SIMD Library for Evaluating Elementary Functions.
    ArmPL        // Arm Performance Libraries.
  };

  TargetLibraryInfoImpl();
  explicit TargetLibraryInfoImpl(const Triple &T);

  // Provide value semantics.
  TargetLibraryInfoImpl(const TargetLibraryInfoImpl &TLI);
  TargetLibraryInfoImpl(TargetLibraryInfoImpl &&TLI);
  TargetLibraryInfoImpl &operator=(const TargetLibraryInfoImpl &TLI);
  TargetLibraryInfoImpl &operator=(TargetLibraryInfoImpl &&TLI);

  /// Searches for a particular function name.
  ///
  /// If it is one of the known library functions, return true and set F to the
  /// corresponding value.
  bool getLibFunc(StringRef funcName, LibFunc &F) const;

  /// Searches for a particular function name, also checking that its type is
  /// valid for the library function matching that name.
  ///
  /// If it is one of the known library functions, return true and set F to the
  /// corresponding value.
  ///
  /// FDecl is assumed to have a parent Module when using this function.
  bool getLibFunc(const Function &FDecl, LibFunc &F) const;

  /// Forces a function to be marked as unavailable.
  void setUnavailable(LibFunc F) {
    setState(F, Unavailable);
  }

  /// Forces a function to be marked as available.
  void setAvailable(LibFunc F) {
    setState(F, StandardName);
  }

  /// Forces a function to be marked as available and provide an alternate name
  /// that must be used.
  void setAvailableWithName(LibFunc F, StringRef Name) {
    if (StandardNames[F] != Name) {
      setState(F, CustomName);
      CustomNames[F] = std::string(Name);
      assert(CustomNames.contains(F));
    } else {
      setState(F, StandardName);
    }
  }

  /// Disables all builtins.
  ///
  /// This can be used for options like -fno-builtin.
  void disableAllFunctions();

  /// Add a set of scalar -> vector mappings, queryable via
  /// getVectorizedFunction and getScalarizedFunction.
  void addVectorizableFunctions(ArrayRef<VecDesc> Fns);

  /// Calls addVectorizableFunctions with a known preset of functions for the
  /// given vector library.
  void addVectorizableFunctionsFromVecLib(enum VectorLibrary VecLib,
                                          const llvm::Triple &TargetTriple);

  /// Return true if the function F has a vector equivalent with vectorization
  /// factor VF.
  bool isFunctionVectorizable(StringRef F, const ElementCount &VF) const {
    return !(getVectorizedFunction(F, VF, false).empty() &&
             getVectorizedFunction(F, VF, true).empty());
  }

  /// Return true if the function F has a vector equivalent with any
  /// vectorization factor.
  bool isFunctionVectorizable(StringRef F) const;

  /// Return the name of the equivalent of F, vectorized with factor VF. If no
  /// such mapping exists, return the empty string.
  StringRef getVectorizedFunction(StringRef F, const ElementCount &VF,
                                  bool Masked) const;

  /// Set to true iff i32 parameters to library functions should have signext
  /// or zeroext attributes if they correspond to C-level int or unsigned int,
  /// respectively.
  void setShouldExtI32Param(bool Val) {
    ShouldExtI32Param = Val;
  }

  /// Set to true iff i32 results from library functions should have signext
  /// or zeroext attributes if they correspond to C-level int or unsigned int,
  /// respectively.
  void setShouldExtI32Return(bool Val) {
    ShouldExtI32Return = Val;
  }

  /// Set to true iff i32 parameters to library functions should have signext
  /// attribute if they correspond to C-level int or unsigned int.
  void setShouldSignExtI32Param(bool Val) {
    ShouldSignExtI32Param = Val;
  }

  /// Set to true iff i32 results from library functions should have signext
  /// attribute if they correspond to C-level int or unsigned int.
  void setShouldSignExtI32Return(bool Val) {
    ShouldSignExtI32Return = Val;
  }

  /// Returns the size of the wchar_t type in bytes or 0 if the size is unknown.
  /// This queries the 'wchar_size' metadata.
  unsigned getWCharSize(const Module &M) const;

  /// Returns the size of the size_t type in bits.
  unsigned getSizeTSize(const Module &M) const;

  /// Get size of a C-level int or unsigned int, in bits.
  unsigned getIntSize() const {
    return SizeOfInt;
  }

  /// Initialize the C-level size of an integer.
  void setIntSize(unsigned Bits) {
    SizeOfInt = Bits;
  }

  /// Returns the largest vectorization factor used in the list of
  /// vector functions.
  void getWidestVF(StringRef ScalarF, ElementCount &FixedVF,
                   ElementCount &Scalable) const;

  /// Returns true if call site / callee has cdecl-compatible calling
  /// conventions.
  static bool isCallingConvCCompatible(CallBase *CI);
  static bool isCallingConvCCompatible(Function *Callee);
};

/// Provides information about what library functions are available for
/// the current target.
///
/// This both allows optimizations to handle them specially and frontends to
/// disable such optimizations through -fno-builtin etc.
class TargetLibraryInfo {
  friend class TargetLibraryAnalysis;
  friend class TargetLibraryInfoWrapperPass;

  /// The global (module level) TLI info.
  const TargetLibraryInfoImpl *Impl;

  /// Support for -fno-builtin* options as function attributes, overrides
  /// information in global TargetLibraryInfoImpl.
  BitVector OverrideAsUnavailable;

public:
  explicit TargetLibraryInfo(const TargetLibraryInfoImpl &Impl,
                             std::optional<const Function *> F = std::nullopt)
      : Impl(&Impl), OverrideAsUnavailable(NumLibFuncs) {
    if (!F)
      return;
    if ((*F)->hasFnAttribute("no-builtins"))
      disableAllFunctions();
    else {
      // Disable individual libc/libm calls in TargetLibraryInfo.
      LibFunc LF;
      AttributeSet FnAttrs = (*F)->getAttributes().getFnAttrs();
      for (const Attribute &Attr : FnAttrs) {
        if (!Attr.isStringAttribute())
          continue;
        auto AttrStr = Attr.getKindAsString();
        if (!AttrStr.consume_front("no-builtin-"))
          continue;
        if (getLibFunc(AttrStr, LF))
          setUnavailable(LF);
      }
    }
  }

  // Provide value semantics.
  TargetLibraryInfo(const TargetLibraryInfo &TLI) = default;
  TargetLibraryInfo(TargetLibraryInfo &&TLI)
      : Impl(TLI.Impl), OverrideAsUnavailable(TLI.OverrideAsUnavailable) {}
  TargetLibraryInfo &operator=(const TargetLibraryInfo &TLI) = default;
  TargetLibraryInfo &operator=(TargetLibraryInfo &&TLI) {
    Impl = TLI.Impl;
    OverrideAsUnavailable = TLI.OverrideAsUnavailable;
    return *this;
  }

  /// Determine whether a callee with the given TLI can be inlined into
  /// caller with this TLI, based on 'nobuiltin' attributes. When requested,
  /// allow inlining into a caller with a superset of the callee's nobuiltin
  /// attributes, which is conservatively correct.
  bool areInlineCompatible(const TargetLibraryInfo &CalleeTLI,
                           bool AllowCallerSuperset) const {
    if (!AllowCallerSuperset)
      return OverrideAsUnavailable == CalleeTLI.OverrideAsUnavailable;
    BitVector B = OverrideAsUnavailable;
    B |= CalleeTLI.OverrideAsUnavailable;
    // We can inline if the union of the caller and callee's nobuiltin
    // attributes is no stricter than the caller's nobuiltin attributes.
    return B == OverrideAsUnavailable;
  }

  /// Return true if the function type FTy is valid for the library function
  /// F, regardless of whether the function is available.
  bool isValidProtoForLibFunc(const FunctionType &FTy, LibFunc F,
                              const Module &M) const {
    return Impl->isValidProtoForLibFunc(FTy, F, M);
  }

  /// Searches for a particular function name.
  ///
  /// If it is one of the known library functions, return true and set F to the
  /// corresponding value.
  bool getLibFunc(StringRef funcName, LibFunc &F) const {
    return Impl->getLibFunc(funcName, F);
  }

  bool getLibFunc(const Function &FDecl, LibFunc &F) const {
    return Impl->getLibFunc(FDecl, F);
  }

  /// If a callbase does not have the 'nobuiltin' attribute, return if the
  /// called function is a known library function and set F to that function.
  bool getLibFunc(const CallBase &CB, LibFunc &F) const {
    return !CB.isNoBuiltin() && CB.getCalledFunction() &&
           getLibFunc(*(CB.getCalledFunction()), F);
  }

  /// Disables all builtins.
  ///
  /// This can be used for options like -fno-builtin.
  void disableAllFunctions() LLVM_ATTRIBUTE_UNUSED {
    OverrideAsUnavailable.set();
  }

  /// Forces a function to be marked as unavailable.
  void setUnavailable(LibFunc F) LLVM_ATTRIBUTE_UNUSED {
    OverrideAsUnavailable.set(F);
  }

  TargetLibraryInfoImpl::AvailabilityState getState(LibFunc F) const {
    if (OverrideAsUnavailable[F])
      return TargetLibraryInfoImpl::Unavailable;
    return Impl->getState(F);
  }

  /// Tests whether a library function is available.
  bool has(LibFunc F) const {
    return getState(F) != TargetLibraryInfoImpl::Unavailable;
  }
  bool isFunctionVectorizable(StringRef F, const ElementCount &VF) const {
    return Impl->isFunctionVectorizable(F, VF);
  }
  bool isFunctionVectorizable(StringRef F) const {
    return Impl->isFunctionVectorizable(F);
  }
  StringRef getVectorizedFunction(StringRef F, const ElementCount &VF,
                                  bool Masked = false) const {
    return Impl->getVectorizedFunction(F, VF, Masked);
  }

  /// Tests if the function is both available and a candidate for optimized code
  /// generation.
  bool hasOptimizedCodeGen(LibFunc F) const {
    if (getState(F) == TargetLibraryInfoImpl::Unavailable)
      return false;
    switch (F) {
    default: break;
    case LibFunc_copysign:     case LibFunc_copysignf:  case LibFunc_copysignl:
    case LibFunc_fabs:         case LibFunc_fabsf:      case LibFunc_fabsl:
    case LibFunc_sin:          case LibFunc_sinf:       case LibFunc_sinl:
    case LibFunc_cos:          case LibFunc_cosf:       case LibFunc_cosl:
    case LibFunc_sqrt:         case LibFunc_sqrtf:      case LibFunc_sqrtl:
    case LibFunc_sqrt_finite:  case LibFunc_sqrtf_finite:
                                                   case LibFunc_sqrtl_finite:
    case LibFunc_fmax:         case LibFunc_fmaxf:      case LibFunc_fmaxl:
    case LibFunc_fmin:         case LibFunc_fminf:      case LibFunc_fminl:
    case LibFunc_floor:        case LibFunc_floorf:     case LibFunc_floorl:
    case LibFunc_nearbyint:    case LibFunc_nearbyintf: case LibFunc_nearbyintl:
    case LibFunc_ceil:         case LibFunc_ceilf:      case LibFunc_ceill:
    case LibFunc_rint:         case LibFunc_rintf:      case LibFunc_rintl:
    case LibFunc_round:        case LibFunc_roundf:     case LibFunc_roundl:
    case LibFunc_trunc:        case LibFunc_truncf:     case LibFunc_truncl:
    case LibFunc_log2:         case LibFunc_log2f:      case LibFunc_log2l:
    case LibFunc_exp2:         case LibFunc_exp2f:      case LibFunc_exp2l:
    case LibFunc_ldexp:        case LibFunc_ldexpf:     case LibFunc_ldexpl:
    case LibFunc_memcpy:       case LibFunc_memset:     case LibFunc_memmove:
    case LibFunc_memcmp:       case LibFunc_bcmp:       case LibFunc_strcmp:
    case LibFunc_strcpy:       case LibFunc_stpcpy:     case LibFunc_strlen:
    case LibFunc_strnlen:      case LibFunc_memchr:     case LibFunc_mempcpy:
      return true;
    }
    return false;
  }

  StringRef getName(LibFunc F) const {
    auto State = getState(F);
    if (State == TargetLibraryInfoImpl::Unavailable)
      return StringRef();
    if (State == TargetLibraryInfoImpl::StandardName)
      return Impl->StandardNames[F];
    assert(State == TargetLibraryInfoImpl::CustomName);
    return Impl->CustomNames.find(F)->second;
  }

  static void initExtensionsForTriple(bool &ShouldExtI32Param,
                                      bool &ShouldExtI32Return,
                                      bool &ShouldSignExtI32Param,
                                      bool &ShouldSignExtI32Return,
                                      const Triple &T) {
    ShouldExtI32Param     = ShouldExtI32Return     = false;
    ShouldSignExtI32Param = ShouldSignExtI32Return = false;

    // PowerPC64, Sparc64, SystemZ need signext/zeroext on i32 parameters and
    // returns corresponding to C-level ints and unsigned ints.
    if (T.isPPC64() || T.getArch() == Triple::sparcv9 ||
        T.getArch() == Triple::systemz) {
      ShouldExtI32Param = true;
      ShouldExtI32Return = true;
    }
    // LoongArch, Mips, and riscv64, on the other hand, need signext on i32
    // parameters corresponding to both signed and unsigned ints.
    if (T.isLoongArch() || T.isMIPS() || T.isRISCV64()) {
      ShouldSignExtI32Param = true;
    }
    // LoongArch and riscv64 need signext on i32 returns corresponding to both
    // signed and unsigned ints.
    if (T.isLoongArch() || T.isRISCV64()) {
      ShouldSignExtI32Return = true;
    }
  }

  /// Returns extension attribute kind to be used for i32 parameters
  /// corresponding to C-level int or unsigned int.  May be zeroext, signext,
  /// or none.
private:
  static Attribute::AttrKind getExtAttrForI32Param(bool ShouldExtI32Param_,
                                                   bool ShouldSignExtI32Param_,
                                                   bool Signed = true) {
    if (ShouldExtI32Param_)
      return Signed ? Attribute::SExt : Attribute::ZExt;
    if (ShouldSignExtI32Param_)
      return Attribute::SExt;
    return Attribute::None;
  }

public:
  static Attribute::AttrKind getExtAttrForI32Param(const Triple &T,
                                                   bool Signed = true) {
    bool ShouldExtI32Param, ShouldExtI32Return;
    bool ShouldSignExtI32Param, ShouldSignExtI32Return;
    initExtensionsForTriple(ShouldExtI32Param, ShouldExtI32Return,
                            ShouldSignExtI32Param, ShouldSignExtI32Return, T);
    return getExtAttrForI32Param(ShouldExtI32Param, ShouldSignExtI32Param,
                                 Signed);
  }

  Attribute::AttrKind getExtAttrForI32Param(bool Signed = true) const {
    return getExtAttrForI32Param(Impl->ShouldExtI32Param,
                                 Impl->ShouldSignExtI32Param, Signed);
  }

  /// Returns extension attribute kind to be used for i32 return values
  /// corresponding to C-level int or unsigned int.  May be zeroext, signext,
  /// or none.
private:
  static Attribute::AttrKind getExtAttrForI32Return(bool ShouldExtI32Return_,
                                                    bool ShouldSignExtI32Return_,
                                                    bool Signed) {
    if (ShouldExtI32Return_)
      return Signed ? Attribute::SExt : Attribute::ZExt;
    if (ShouldSignExtI32Return_)
      return Attribute::SExt;
    return Attribute::None;
  }

public:
  static Attribute::AttrKind getExtAttrForI32Return(const Triple &T,
                                                   bool Signed = true) {
    bool ShouldExtI32Param, ShouldExtI32Return;
    bool ShouldSignExtI32Param, ShouldSignExtI32Return;
    initExtensionsForTriple(ShouldExtI32Param, ShouldExtI32Return,
                            ShouldSignExtI32Param, ShouldSignExtI32Return, T);
    return getExtAttrForI32Return(ShouldExtI32Return, ShouldSignExtI32Return,
                                  Signed);
  }

  Attribute::AttrKind getExtAttrForI32Return(bool Signed = true) const {
    return getExtAttrForI32Return(Impl->ShouldExtI32Return,
                                  Impl->ShouldSignExtI32Return, Signed);
  }

  // Helper to create an AttributeList for args (and ret val) which all have
  // the same signedness. Attributes in AL may be passed in to include them
  // as well in the returned AttributeList.
  AttributeList getAttrList(LLVMContext *C, ArrayRef<unsigned> ArgNos,
                            bool Signed, bool Ret = false,
                            AttributeList AL = AttributeList()) const {
    if (auto AK = getExtAttrForI32Param(Signed))
      for (auto ArgNo : ArgNos)
        AL = AL.addParamAttribute(*C, ArgNo, AK);
    if (Ret)
      if (auto AK = getExtAttrForI32Return(Signed))
        AL = AL.addRetAttribute(*C, AK);
    return AL;
  }

  /// \copydoc TargetLibraryInfoImpl::getWCharSize()
  unsigned getWCharSize(const Module &M) const {
    return Impl->getWCharSize(M);
  }

  /// \copydoc TargetLibraryInfoImpl::getSizeTSize()
  unsigned getSizeTSize(const Module &M) const { return Impl->getSizeTSize(M); }

  /// \copydoc TargetLibraryInfoImpl::getIntSize()
  unsigned getIntSize() const {
    return Impl->getIntSize();
  }

  /// Handle invalidation from the pass manager.
  ///
  /// If we try to invalidate this info, just return false. It cannot become
  /// invalid even if the module or function changes.
  bool invalidate(Module &, const PreservedAnalyses &,
                  ModuleAnalysisManager::Invalidator &) {
    return false;
  }
  bool invalidate(Function &, const PreservedAnalyses &,
                  FunctionAnalysisManager::Invalidator &) {
    return false;
  }
  /// Returns the largest vectorization factor used in the list of
  /// vector functions.
  void getWidestVF(StringRef ScalarF, ElementCount &FixedVF,
                   ElementCount &ScalableVF) const {
    Impl->getWidestVF(ScalarF, FixedVF, ScalableVF);
  }

  /// Check if the function "F" is listed in a library known to LLVM.
  bool isKnownVectorFunctionInLibrary(StringRef F) const {
    return this->isFunctionVectorizable(F);
  }
};

/// Analysis pass providing the \c TargetLibraryInfo.
///
/// Note that this pass's result cannot be invalidated, it is immutable for the
/// life of the module.
class TargetLibraryAnalysis : public AnalysisInfoMixin<TargetLibraryAnalysis> {
public:
  typedef TargetLibraryInfo Result;

  /// Default construct the library analysis.
  ///
  /// This will use the module's triple to construct the library info for that
  /// module.
  TargetLibraryAnalysis() = default;

  /// Construct a library analysis with baseline Module-level info.
  ///
  /// This will be supplemented with Function-specific info in the Result.
  TargetLibraryAnalysis(TargetLibraryInfoImpl BaselineInfoImpl)
      : BaselineInfoImpl(std::move(BaselineInfoImpl)) {}

  TargetLibraryInfo run(const Function &F, FunctionAnalysisManager &);

private:
  friend AnalysisInfoMixin<TargetLibraryAnalysis>;
  static AnalysisKey Key;

  std::optional<TargetLibraryInfoImpl> BaselineInfoImpl;
};

class TargetLibraryInfoWrapperPass : public ImmutablePass {
  TargetLibraryAnalysis TLA;
  std::optional<TargetLibraryInfo> TLI;

  virtual void anchor();

public:
  static char ID;
  TargetLibraryInfoWrapperPass();
  explicit TargetLibraryInfoWrapperPass(const Triple &T);
  explicit TargetLibraryInfoWrapperPass(const TargetLibraryInfoImpl &TLI);

  TargetLibraryInfo &getTLI(const Function &F) {
    FunctionAnalysisManager DummyFAM;
    TLI = TLA.run(F, DummyFAM);
    return *TLI;
  }
};

} // end namespace llvm

#endif
PKiwFZ��/$$Analysis/TargetFolder.hnu�[���//====- TargetFolder.h - Constant folding helper ---------------*- C++ -*-====//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the TargetFolder class, a helper for IRBuilder.
// It provides IRBuilder with a set of methods for creating constants with
// target dependent folding, in addition to the same target-independent
// folding that the ConstantFolder class provides.  For general constant
// creation and folding, use ConstantExpr and the routines in
// llvm/Analysis/ConstantFolding.h.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_TARGETFOLDER_H
#define LLVM_ANALYSIS_TARGETFOLDER_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/IRBuilderFolder.h"
#include "llvm/IR/Operator.h"

namespace llvm {

class Constant;
class DataLayout;
class Type;

/// TargetFolder - Create constants with target dependent folding.
class TargetFolder final : public IRBuilderFolder {
  const DataLayout &DL;

  /// Fold - Fold the constant using target specific information.
  Constant *Fold(Constant *C) const {
    return ConstantFoldConstant(C, DL);
  }

  virtual void anchor();

public:
  explicit TargetFolder(const DataLayout &DL) : DL(DL) {}

  //===--------------------------------------------------------------------===//
  // Value-based folders.
  //
  // Return an existing value or a constant if the operation can be simplified.
  // Otherwise return nullptr.
  //===--------------------------------------------------------------------===//

  Value *FoldBinOp(Instruction::BinaryOps Opc, Value *LHS,
                   Value *RHS) const override {
    auto *LC = dyn_cast<Constant>(LHS);
    auto *RC = dyn_cast<Constant>(RHS);
    if (LC && RC) {
      if (ConstantExpr::isDesirableBinOp(Opc))
        return Fold(ConstantExpr::get(Opc, LC, RC));
      return ConstantFoldBinaryOpOperands(Opc, LC, RC, DL);
    }
    return nullptr;
  }

  Value *FoldExactBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS,
                        bool IsExact) const override {
    auto *LC = dyn_cast<Constant>(LHS);
    auto *RC = dyn_cast<Constant>(RHS);
    if (LC && RC) {
      if (ConstantExpr::isDesirableBinOp(Opc))
        return Fold(ConstantExpr::get(
            Opc, LC, RC, IsExact ? PossiblyExactOperator::IsExact : 0));
      return ConstantFoldBinaryOpOperands(Opc, LC, RC, DL);
    }
    return nullptr;
  }

  Value *FoldNoWrapBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS,
                         bool HasNUW, bool HasNSW) const override {
    auto *LC = dyn_cast<Constant>(LHS);
    auto *RC = dyn_cast<Constant>(RHS);
    if (LC && RC) {
      if (ConstantExpr::isDesirableBinOp(Opc)) {
        unsigned Flags = 0;
        if (HasNUW)
          Flags |= OverflowingBinaryOperator::NoUnsignedWrap;
        if (HasNSW)
          Flags |= OverflowingBinaryOperator::NoSignedWrap;
        return Fold(ConstantExpr::get(Opc, LC, RC, Flags));
      }
      return ConstantFoldBinaryOpOperands(Opc, LC, RC, DL);
    }
    return nullptr;
  }

  Value *FoldBinOpFMF(Instruction::BinaryOps Opc, Value *LHS, Value *RHS,
                      FastMathFlags FMF) const override {
    return FoldBinOp(Opc, LHS, RHS);
  }

  Value *FoldICmp(CmpInst::Predicate P, Value *LHS, Value *RHS) const override {
    auto *LC = dyn_cast<Constant>(LHS);
    auto *RC = dyn_cast<Constant>(RHS);
    if (LC && RC)
      return Fold(ConstantExpr::getCompare(P, LC, RC));
    return nullptr;
  }

  Value *FoldUnOpFMF(Instruction::UnaryOps Opc, Value *V,
                      FastMathFlags FMF) const override {
    if (Constant *C = dyn_cast<Constant>(V))
      return ConstantFoldUnaryOpOperand(Opc, C, DL);
    return nullptr;
  }

  Value *FoldGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList,
                 bool IsInBounds = false) const override {
    if (!ConstantExpr::isSupportedGetElementPtr(Ty))
      return nullptr;

    if (auto *PC = dyn_cast<Constant>(Ptr)) {
      // Every index must be constant.
      if (any_of(IdxList, [](Value *V) { return !isa<Constant>(V); }))
        return nullptr;
      if (IsInBounds)
        return Fold(ConstantExpr::getInBoundsGetElementPtr(Ty, PC, IdxList));
      else
        return Fold(ConstantExpr::getGetElementPtr(Ty, PC, IdxList));
    }
    return nullptr;
  }

  Value *FoldSelect(Value *C, Value *True, Value *False) const override {
    auto *CC = dyn_cast<Constant>(C);
    auto *TC = dyn_cast<Constant>(True);
    auto *FC = dyn_cast<Constant>(False);
    if (CC && TC && FC)
      return ConstantFoldSelectInstruction(CC, TC, FC);

    return nullptr;
  }

  Value *FoldExtractValue(Value *Agg,
                          ArrayRef<unsigned> IdxList) const override {
    if (auto *CAgg = dyn_cast<Constant>(Agg))
      return ConstantFoldExtractValueInstruction(CAgg, IdxList);
    return nullptr;
  };

  Value *FoldInsertValue(Value *Agg, Value *Val,
                         ArrayRef<unsigned> IdxList) const override {
    auto *CAgg = dyn_cast<Constant>(Agg);
    auto *CVal = dyn_cast<Constant>(Val);
    if (CAgg && CVal)
      return ConstantFoldInsertValueInstruction(CAgg, CVal, IdxList);
    return nullptr;
  }

  Value *FoldExtractElement(Value *Vec, Value *Idx) const override {
    auto *CVec = dyn_cast<Constant>(Vec);
    auto *CIdx = dyn_cast<Constant>(Idx);
    if (CVec && CIdx)
      return Fold(ConstantExpr::getExtractElement(CVec, CIdx));
    return nullptr;
  }

  Value *FoldInsertElement(Value *Vec, Value *NewElt,
                           Value *Idx) const override {
    auto *CVec = dyn_cast<Constant>(Vec);
    auto *CNewElt = dyn_cast<Constant>(NewElt);
    auto *CIdx = dyn_cast<Constant>(Idx);
    if (CVec && CNewElt && CIdx)
      return Fold(ConstantExpr::getInsertElement(CVec, CNewElt, CIdx));
    return nullptr;
  }

  Value *FoldShuffleVector(Value *V1, Value *V2,
                           ArrayRef<int> Mask) const override {
    auto *C1 = dyn_cast<Constant>(V1);
    auto *C2 = dyn_cast<Constant>(V2);
    if (C1 && C2)
      return Fold(ConstantExpr::getShuffleVector(C1, C2, Mask));
    return nullptr;
  }

  //===--------------------------------------------------------------------===//
  // Cast/Conversion Operators
  //===--------------------------------------------------------------------===//

  Constant *CreateCast(Instruction::CastOps Op, Constant *C,
                       Type *DestTy) const override {
    if (C->getType() == DestTy)
      return C; // avoid calling Fold
    return Fold(ConstantExpr::getCast(Op, C, DestTy));
  }
  Constant *CreateIntCast(Constant *C, Type *DestTy,
                          bool isSigned) const override {
    if (C->getType() == DestTy)
      return C; // avoid calling Fold
    return Fold(ConstantExpr::getIntegerCast(C, DestTy, isSigned));
  }
  Constant *CreatePointerCast(Constant *C, Type *DestTy) const override {
    if (C->getType() == DestTy)
      return C; // avoid calling Fold
    return Fold(ConstantExpr::getPointerCast(C, DestTy));
  }
  Constant *CreateFPCast(Constant *C, Type *DestTy) const override {
    if (C->getType() == DestTy)
      return C; // avoid calling Fold
    return Fold(ConstantExpr::getFPCast(C, DestTy));
  }
  Constant *CreateBitCast(Constant *C, Type *DestTy) const override {
    return CreateCast(Instruction::BitCast, C, DestTy);
  }
  Constant *CreateIntToPtr(Constant *C, Type *DestTy) const override {
    return CreateCast(Instruction::IntToPtr, C, DestTy);
  }
  Constant *CreatePtrToInt(Constant *C, Type *DestTy) const override {
    return CreateCast(Instruction::PtrToInt, C, DestTy);
  }
  Constant *CreateZExtOrBitCast(Constant *C, Type *DestTy) const override {
    if (C->getType() == DestTy)
      return C; // avoid calling Fold
    return Fold(ConstantExpr::getZExtOrBitCast(C, DestTy));
  }
  Constant *CreateSExtOrBitCast(Constant *C, Type *DestTy) const override {
    if (C->getType() == DestTy)
      return C; // avoid calling Fold
    return Fold(ConstantExpr::getSExtOrBitCast(C, DestTy));
  }
  Constant *CreateTruncOrBitCast(Constant *C, Type *DestTy) const override {
    if (C->getType() == DestTy)
      return C; // avoid calling Fold
    return Fold(ConstantExpr::getTruncOrBitCast(C, DestTy));
  }

  Constant *CreatePointerBitCastOrAddrSpaceCast(Constant *C,
                                                Type *DestTy) const override {
    if (C->getType() == DestTy)
      return C; // avoid calling Fold
    return Fold(ConstantExpr::getPointerBitCastOrAddrSpaceCast(C, DestTy));
  }

  //===--------------------------------------------------------------------===//
  // Compare Instructions
  //===--------------------------------------------------------------------===//

  Constant *CreateFCmp(CmpInst::Predicate P, Constant *LHS,
                       Constant *RHS) const override {
    return Fold(ConstantExpr::getCompare(P, LHS, RHS));
  }
};

}

#endif
PKiwFZ]�Ʊ�5�5Analysis/InlineCost.hnu�[���//===- InlineCost.h - Cost analysis for inliner -----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements heuristics for inlining decisions.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_INLINECOST_H
#define LLVM_ANALYSIS_INLINECOST_H

#include "llvm/ADT/APInt.h"
#include "llvm/ADT/STLFunctionalExtras.h"
#include "llvm/Analysis/InlineModelFeatureMaps.h"
#include "llvm/IR/PassManager.h"
#include <cassert>
#include <climits>
#include <optional>

namespace llvm {
class AssumptionCache;
class OptimizationRemarkEmitter;
class BlockFrequencyInfo;
class CallBase;
class DataLayout;
class Function;
class ProfileSummaryInfo;
class TargetTransformInfo;
class TargetLibraryInfo;

namespace InlineConstants {
// Various thresholds used by inline cost analysis.
/// Use when optsize (-Os) is specified.
const int OptSizeThreshold = 50;

/// Use when minsize (-Oz) is specified.
const int OptMinSizeThreshold = 5;

/// Use when -O3 is specified.
const int OptAggressiveThreshold = 250;

// Various magic constants used to adjust heuristics.
int getInstrCost();
const int IndirectCallThreshold = 100;
const int LoopPenalty = 25;
const int LastCallToStaticBonus = 15000;
const int ColdccPenalty = 2000;
/// Do not inline functions which allocate this many bytes on the stack
/// when the caller is recursive.
const unsigned TotalAllocaSizeRecursiveCaller = 1024;
/// Do not inline dynamic allocas that have been constant propagated to be
/// static allocas above this amount in bytes.
const uint64_t MaxSimplifiedDynamicAllocaToInline = 65536;

const char FunctionInlineCostMultiplierAttributeName[] =
    "function-inline-cost-multiplier";

const char MaxInlineStackSizeAttributeName[] = "inline-max-stacksize";
} // namespace InlineConstants

// The cost-benefit pair computed by cost-benefit analysis.
class CostBenefitPair {
public:
  CostBenefitPair(APInt Cost, APInt Benefit) : Cost(Cost), Benefit(Benefit) {}

  const APInt &getCost() const { return Cost; }

  const APInt &getBenefit() const { return Benefit; }

private:
  APInt Cost;
  APInt Benefit;
};

/// Represents the cost of inlining a function.
///
/// This supports special values for functions which should "always" or
/// "never" be inlined. Otherwise, the cost represents a unitless amount;
/// smaller values increase the likelihood of the function being inlined.
///
/// Objects of this type also provide the adjusted threshold for inlining
/// based on the information available for a particular callsite. They can be
/// directly tested to determine if inlining should occur given the cost and
/// threshold for this cost metric.
class InlineCost {
  enum SentinelValues { AlwaysInlineCost = INT_MIN, NeverInlineCost = INT_MAX };

  /// The estimated cost of inlining this callsite.
  int Cost = 0;

  /// The adjusted threshold against which this cost was computed.
  int Threshold = 0;

  /// The amount of StaticBonus that has been applied.
  int StaticBonusApplied = 0;

  /// Must be set for Always and Never instances.
  const char *Reason = nullptr;

  /// The cost-benefit pair computed by cost-benefit analysis.
  std::optional<CostBenefitPair> CostBenefit;

  // Trivial constructor, interesting logic in the factory functions below.
  InlineCost(int Cost, int Threshold, int StaticBonusApplied,
             const char *Reason = nullptr,
             std::optional<CostBenefitPair> CostBenefit = std::nullopt)
      : Cost(Cost), Threshold(Threshold),
        StaticBonusApplied(StaticBonusApplied), Reason(Reason),
        CostBenefit(CostBenefit) {
    assert((isVariable() || Reason) &&
           "Reason must be provided for Never or Always");
  }

public:
  static InlineCost get(int Cost, int Threshold, int StaticBonus = 0) {
    assert(Cost > AlwaysInlineCost && "Cost crosses sentinel value");
    assert(Cost < NeverInlineCost && "Cost crosses sentinel value");
    return InlineCost(Cost, Threshold, StaticBonus);
  }
  static InlineCost
  getAlways(const char *Reason,
            std::optional<CostBenefitPair> CostBenefit = std::nullopt) {
    return InlineCost(AlwaysInlineCost, 0, 0, Reason, CostBenefit);
  }
  static InlineCost
  getNever(const char *Reason,
           std::optional<CostBenefitPair> CostBenefit = std::nullopt) {
    return InlineCost(NeverInlineCost, 0, 0, Reason, CostBenefit);
  }

  /// Test whether the inline cost is low enough for inlining.
  explicit operator bool() const { return Cost < Threshold; }

  bool isAlways() const { return Cost == AlwaysInlineCost; }
  bool isNever() const { return Cost == NeverInlineCost; }
  bool isVariable() const { return !isAlways() && !isNever(); }

  /// Get the inline cost estimate.
  /// It is an error to call this on an "always" or "never" InlineCost.
  int getCost() const {
    assert(isVariable() && "Invalid access of InlineCost");
    return Cost;
  }

  /// Get the threshold against which the cost was computed
  int getThreshold() const {
    assert(isVariable() && "Invalid access of InlineCost");
    return Threshold;
  }

  /// Get the amount of StaticBonus applied.
  int getStaticBonusApplied() const {
    assert(isVariable() && "Invalid access of InlineCost");
    return StaticBonusApplied;
  }

  /// Get the cost-benefit pair which was computed by cost-benefit analysis
  std::optional<CostBenefitPair> getCostBenefit() const { return CostBenefit; }

  /// Get the reason of Always or Never.
  const char *getReason() const {
    assert((Reason || isVariable()) &&
           "InlineCost reason must be set for Always or Never");
    return Reason;
  }

  /// Get the cost delta from the threshold for inlining.
  /// Only valid if the cost is of the variable kind. Returns a negative
  /// value if the cost is too high to inline.
  int getCostDelta() const { return Threshold - getCost(); }
};

/// InlineResult is basically true or false. For false results the message
/// describes a reason.
class InlineResult {
  const char *Message = nullptr;
  InlineResult(const char *Message = nullptr) : Message(Message) {}

public:
  static InlineResult success() { return {}; }
  static InlineResult failure(const char *Reason) {
    return InlineResult(Reason);
  }
  bool isSuccess() const { return Message == nullptr; }
  const char *getFailureReason() const {
    assert(!isSuccess() &&
           "getFailureReason should only be called in failure cases");
    return Message;
  }
};

/// Thresholds to tune inline cost analysis. The inline cost analysis decides
/// the condition to apply a threshold and applies it. Otherwise,
/// DefaultThreshold is used. If a threshold is Optional, it is applied only
/// when it has a valid value. Typically, users of inline cost analysis
/// obtain an InlineParams object through one of the \c getInlineParams methods
/// and pass it to \c getInlineCost. Some specialized versions of inliner
/// (such as the pre-inliner) might have custom logic to compute \c InlineParams
/// object.

struct InlineParams {
  /// The default threshold to start with for a callee.
  int DefaultThreshold = -1;

  /// Threshold to use for callees with inline hint.
  std::optional<int> HintThreshold;

  /// Threshold to use for cold callees.
  std::optional<int> ColdThreshold;

  /// Threshold to use when the caller is optimized for size.
  std::optional<int> OptSizeThreshold;

  /// Threshold to use when the caller is optimized for minsize.
  std::optional<int> OptMinSizeThreshold;

  /// Threshold to use when the callsite is considered hot.
  std::optional<int> HotCallSiteThreshold;

  /// Threshold to use when the callsite is considered hot relative to function
  /// entry.
  std::optional<int> LocallyHotCallSiteThreshold;

  /// Threshold to use when the callsite is considered cold.
  std::optional<int> ColdCallSiteThreshold;

  /// Compute inline cost even when the cost has exceeded the threshold.
  std::optional<bool> ComputeFullInlineCost;

  /// Indicate whether we should allow inline deferral.
  std::optional<bool> EnableDeferral;

  /// Indicate whether we allow inlining for recursive call.
  std::optional<bool> AllowRecursiveCall = false;
};

std::optional<int> getStringFnAttrAsInt(CallBase &CB, StringRef AttrKind);

/// Generate the parameters to tune the inline cost analysis based only on the
/// commandline options.
InlineParams getInlineParams();

/// Generate the parameters to tune the inline cost analysis based on command
/// line options. If -inline-threshold option is not explicitly passed,
/// \p Threshold is used as the default threshold.
InlineParams getInlineParams(int Threshold);

/// Generate the parameters to tune the inline cost analysis based on command
/// line options. If -inline-threshold option is not explicitly passed,
/// the default threshold is computed from \p OptLevel and \p SizeOptLevel.
/// An \p OptLevel value above 3 is considered an aggressive optimization mode.
/// \p SizeOptLevel of 1 corresponds to the -Os flag and 2 corresponds to
/// the -Oz flag.
InlineParams getInlineParams(unsigned OptLevel, unsigned SizeOptLevel);

/// Return the cost associated with a callsite, including parameter passing
/// and the call/return instruction.
int getCallsiteCost(const CallBase &Call, const DataLayout &DL);

/// Get an InlineCost object representing the cost of inlining this
/// callsite.
///
/// Note that a default threshold is passed into this function. This threshold
/// could be modified based on callsite's properties and only costs below this
/// new threshold are computed with any accuracy. The new threshold can be
/// used to bound the computation necessary to determine whether the cost is
/// sufficiently low to warrant inlining.
///
/// Also note that calling this function *dynamically* computes the cost of
/// inlining the callsite. It is an expensive, heavyweight call.
InlineCost
getInlineCost(CallBase &Call, const InlineParams &Params,
              TargetTransformInfo &CalleeTTI,
              function_ref<AssumptionCache &(Function &)> GetAssumptionCache,
              function_ref<const TargetLibraryInfo &(Function &)> GetTLI,
              function_ref<BlockFrequencyInfo &(Function &)> GetBFI = nullptr,
              ProfileSummaryInfo *PSI = nullptr,
              OptimizationRemarkEmitter *ORE = nullptr);

/// Get an InlineCost with the callee explicitly specified.
/// This allows you to calculate the cost of inlining a function via a
/// pointer. This behaves exactly as the version with no explicit callee
/// parameter in all other respects.
//
InlineCost
getInlineCost(CallBase &Call, Function *Callee, const InlineParams &Params,
              TargetTransformInfo &CalleeTTI,
              function_ref<AssumptionCache &(Function &)> GetAssumptionCache,
              function_ref<const TargetLibraryInfo &(Function &)> GetTLI,
              function_ref<BlockFrequencyInfo &(Function &)> GetBFI = nullptr,
              ProfileSummaryInfo *PSI = nullptr,
              OptimizationRemarkEmitter *ORE = nullptr);

/// Returns InlineResult::success() if the call site should be always inlined
/// because of user directives, and the inlining is viable. Returns
/// InlineResult::failure() if the inlining may never happen because of user
/// directives or incompatibilities detectable without needing callee traversal.
/// Otherwise returns std::nullopt, meaning that inlining should be decided
/// based on other criteria (e.g. cost modeling).
std::optional<InlineResult> getAttributeBasedInliningDecision(
    CallBase &Call, Function *Callee, TargetTransformInfo &CalleeTTI,
    function_ref<const TargetLibraryInfo &(Function &)> GetTLI);

/// Get the cost estimate ignoring thresholds. This is similar to getInlineCost
/// when passed InlineParams::ComputeFullInlineCost, or a non-null ORE. It
/// uses default InlineParams otherwise.
/// Contrary to getInlineCost, which makes a threshold-based final evaluation of
/// should/shouldn't inline, captured in InlineResult, getInliningCostEstimate
/// returns:
/// - std::nullopt, if the inlining cannot happen (is illegal)
/// - an integer, representing the cost.
std::optional<int> getInliningCostEstimate(
    CallBase &Call, TargetTransformInfo &CalleeTTI,
    function_ref<AssumptionCache &(Function &)> GetAssumptionCache,
    function_ref<BlockFrequencyInfo &(Function &)> GetBFI = nullptr,
    ProfileSummaryInfo *PSI = nullptr,
    OptimizationRemarkEmitter *ORE = nullptr);

/// Get the expanded cost features. The features are returned unconditionally,
/// even if inlining is impossible.
std::optional<InlineCostFeatures> getInliningCostFeatures(
    CallBase &Call, TargetTransformInfo &CalleeTTI,
    function_ref<AssumptionCache &(Function &)> GetAssumptionCache,
    function_ref<BlockFrequencyInfo &(Function &)> GetBFI = nullptr,
    ProfileSummaryInfo *PSI = nullptr,
    OptimizationRemarkEmitter *ORE = nullptr);

/// Minimal filter to detect invalid constructs for inlining.
InlineResult isInlineViable(Function &Callee);

// This pass is used to annotate instructions during the inline process for
// debugging and analysis. The main purpose of the pass is to see and test
// inliner's decisions when creating new optimizations to InlineCost.
struct InlineCostAnnotationPrinterPass
    : PassInfoMixin<InlineCostAnnotationPrinterPass> {
  raw_ostream &OS;

public:
  explicit InlineCostAnnotationPrinterPass(raw_ostream &OS) : OS(OS) {}
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM);
};
} // namespace llvm

#endif
PKiwFZu��W#W#Analysis/LoopIterator.hnu�[���//===--------- LoopIterator.h - Iterate over loop blocks --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// This file defines iterators to visit the basic blocks within a loop.
//
// These iterators currently visit blocks within subloops as well.
// Unfortunately we have no efficient way of summarizing loop exits which would
// allow skipping subloops during traversal.
//
// If you want to visit all blocks in a loop and don't need an ordered traveral,
// use Loop::block_begin() instead.
//
// This is intentionally designed to work with ill-formed loops in which the
// backedge has been deleted. The only prerequisite is that all blocks
// contained within the loop according to the most recent LoopInfo analysis are
// reachable from the loop header.
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_LOOPITERATOR_H
#define LLVM_ANALYSIS_LOOPITERATOR_H

#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/Analysis/LoopInfo.h"

namespace llvm {

class LoopBlocksTraversal;

// A traits type that is intended to be used in graph algorithms. The graph
// traits starts at the loop header, and traverses the BasicBlocks that are in
// the loop body, but not the loop header. Since the loop header is skipped,
// the back edges are excluded.
//
// TODO: Explore the possibility to implement LoopBlocksTraversal in terms of
//       LoopBodyTraits, so that insertEdge doesn't have to be specialized.
struct LoopBodyTraits {
  using NodeRef = std::pair<const Loop *, BasicBlock *>;

  // This wraps a const Loop * into the iterator, so we know which edges to
  // filter out.
  class WrappedSuccIterator
      : public iterator_adaptor_base<
            WrappedSuccIterator, succ_iterator,
            typename std::iterator_traits<succ_iterator>::iterator_category,
            NodeRef, std::ptrdiff_t, NodeRef *, NodeRef> {
    using BaseT = iterator_adaptor_base<
        WrappedSuccIterator, succ_iterator,
        typename std::iterator_traits<succ_iterator>::iterator_category,
        NodeRef, std::ptrdiff_t, NodeRef *, NodeRef>;

    const Loop *L;

  public:
    WrappedSuccIterator(succ_iterator Begin, const Loop *L)
        : BaseT(Begin), L(L) {}

    NodeRef operator*() const { return {L, *I}; }
  };

  struct LoopBodyFilter {
    bool operator()(NodeRef N) const {
      const Loop *L = N.first;
      return N.second != L->getHeader() && L->contains(N.second);
    }
  };

  using ChildIteratorType =
      filter_iterator<WrappedSuccIterator, LoopBodyFilter>;

  static NodeRef getEntryNode(const Loop &G) { return {&G, G.getHeader()}; }

  static ChildIteratorType child_begin(NodeRef Node) {
    return make_filter_range(make_range<WrappedSuccIterator>(
                                 {succ_begin(Node.second), Node.first},
                                 {succ_end(Node.second), Node.first}),
                             LoopBodyFilter{})
        .begin();
  }

  static ChildIteratorType child_end(NodeRef Node) {
    return make_filter_range(make_range<WrappedSuccIterator>(
                                 {succ_begin(Node.second), Node.first},
                                 {succ_end(Node.second), Node.first}),
                             LoopBodyFilter{})
        .end();
  }
};

/// Store the result of a depth first search within basic blocks contained by a
/// single loop.
///
/// TODO: This could be generalized for any CFG region, or the entire CFG.
class LoopBlocksDFS {
public:
  /// Postorder list iterators.
  typedef std::vector<BasicBlock*>::const_iterator POIterator;
  typedef std::vector<BasicBlock*>::const_reverse_iterator RPOIterator;

  friend class LoopBlocksTraversal;

private:
  Loop *L;

  /// Map each block to its postorder number. A block is only mapped after it is
  /// preorder visited by DFS. It's postorder number is initially zero and set
  /// to nonzero after it is finished by postorder traversal.
  DenseMap<BasicBlock*, unsigned> PostNumbers;
  std::vector<BasicBlock*> PostBlocks;

public:
  LoopBlocksDFS(Loop *Container) :
    L(Container), PostNumbers(NextPowerOf2(Container->getNumBlocks())) {
    PostBlocks.reserve(Container->getNumBlocks());
  }

  Loop *getLoop() const { return L; }

  /// Traverse the loop blocks and store the DFS result.
  void perform(LoopInfo *LI);

  /// Return true if postorder numbers are assigned to all loop blocks.
  bool isComplete() const { return PostBlocks.size() == L->getNumBlocks(); }

  /// Iterate over the cached postorder blocks.
  POIterator beginPostorder() const {
    assert(isComplete() && "bad loop DFS");
    return PostBlocks.begin();
  }
  POIterator endPostorder() const { return PostBlocks.end(); }

  /// Reverse iterate over the cached postorder blocks.
  RPOIterator beginRPO() const {
    assert(isComplete() && "bad loop DFS");
    return PostBlocks.rbegin();
  }
  RPOIterator endRPO() const { return PostBlocks.rend(); }

  /// Return true if this block has been preorder visited.
  bool hasPreorder(BasicBlock *BB) const { return PostNumbers.count(BB); }

  /// Return true if this block has a postorder number.
  bool hasPostorder(BasicBlock *BB) const {
    DenseMap<BasicBlock*, unsigned>::const_iterator I = PostNumbers.find(BB);
    return I != PostNumbers.end() && I->second;
  }

  /// Get a block's postorder number.
  unsigned getPostorder(BasicBlock *BB) const {
    DenseMap<BasicBlock*, unsigned>::const_iterator I = PostNumbers.find(BB);
    assert(I != PostNumbers.end() && "block not visited by DFS");
    assert(I->second && "block not finished by DFS");
    return I->second;
  }

  /// Get a block's reverse postorder number.
  unsigned getRPO(BasicBlock *BB) const {
    return 1 + PostBlocks.size() - getPostorder(BB);
  }

  void clear() {
    PostNumbers.clear();
    PostBlocks.clear();
  }
};

/// Wrapper class to LoopBlocksDFS that provides a standard begin()/end()
/// interface for the DFS reverse post-order traversal of blocks in a loop body.
class LoopBlocksRPO {
private:
  LoopBlocksDFS DFS;

public:
  LoopBlocksRPO(Loop *Container) : DFS(Container) {}

  /// Traverse the loop blocks and store the DFS result.
  void perform(LoopInfo *LI) {
    DFS.perform(LI);
  }

  /// Reverse iterate over the cached postorder blocks.
  LoopBlocksDFS::RPOIterator begin() const { return DFS.beginRPO(); }
  LoopBlocksDFS::RPOIterator end() const { return DFS.endRPO(); }
};

/// Specialize po_iterator_storage to record postorder numbers.
template<> class po_iterator_storage<LoopBlocksTraversal, true> {
  LoopBlocksTraversal &LBT;
public:
  po_iterator_storage(LoopBlocksTraversal &lbs) : LBT(lbs) {}
  // These functions are defined below.
  bool insertEdge(std::optional<BasicBlock *> From, BasicBlock *To);
  void finishPostorder(BasicBlock *BB);
};

/// Traverse the blocks in a loop using a depth-first search.
class LoopBlocksTraversal {
public:
  /// Graph traversal iterator.
  typedef po_iterator<BasicBlock*, LoopBlocksTraversal, true> POTIterator;

private:
  LoopBlocksDFS &DFS;
  LoopInfo *LI;

public:
  LoopBlocksTraversal(LoopBlocksDFS &Storage, LoopInfo *LInfo) :
    DFS(Storage), LI(LInfo) {}

  /// Postorder traversal over the graph. This only needs to be done once.
  /// po_iterator "automatically" calls back to visitPreorder and
  /// finishPostorder to record the DFS result.
  POTIterator begin() {
    assert(DFS.PostBlocks.empty() && "Need clear DFS result before traversing");
    assert(DFS.L->getNumBlocks() && "po_iterator cannot handle an empty graph");
    return po_ext_begin(DFS.L->getHeader(), *this);
  }
  POTIterator end() {
    // po_ext_end interface requires a basic block, but ignores its value.
    return po_ext_end(DFS.L->getHeader(), *this);
  }

  /// Called by po_iterator upon reaching a block via a CFG edge. If this block
  /// is contained in the loop and has not been visited, then mark it preorder
  /// visited and return true.
  ///
  /// TODO: If anyone is interested, we could record preorder numbers here.
  bool visitPreorder(BasicBlock *BB) {
    if (!DFS.L->contains(LI->getLoopFor(BB)))
      return false;

    return DFS.PostNumbers.insert(std::make_pair(BB, 0)).second;
  }

  /// Called by po_iterator each time it advances, indicating a block's
  /// postorder.
  void finishPostorder(BasicBlock *BB) {
    assert(DFS.PostNumbers.count(BB) && "Loop DFS skipped preorder");
    DFS.PostBlocks.push_back(BB);
    DFS.PostNumbers[BB] = DFS.PostBlocks.size();
  }
};

inline bool po_iterator_storage<LoopBlocksTraversal, true>::insertEdge(
    std::optional<BasicBlock *> From, BasicBlock *To) {
  return LBT.visitPreorder(To);
}

inline void po_iterator_storage<LoopBlocksTraversal, true>::
finishPostorder(BasicBlock *BB) {
  LBT.finishPostorder(BB);
}

} // End namespace llvm

#endif
PKiwFZ��s����Analysis/DependenceAnalysis.hnu�[���//===-- llvm/Analysis/DependenceAnalysis.h -------------------- -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// DependenceAnalysis is an LLVM pass that analyses dependences between memory
// accesses. Currently, it is an implementation of the approach described in
//
//            Practical Dependence Testing
//            Goff, Kennedy, Tseng
//            PLDI 1991
//
// There's a single entry point that analyzes the dependence between a pair
// of memory references in a function, returning either NULL, for no dependence,
// or a more-or-less detailed description of the dependence between them.
//
// This pass exists to support the DependenceGraph pass. There are two separate
// passes because there's a useful separation of concerns. A dependence exists
// if two conditions are met:
//
//    1) Two instructions reference the same memory location, and
//    2) There is a flow of control leading from one instruction to the other.
//
// DependenceAnalysis attacks the first condition; DependenceGraph will attack
// the second (it's not yet ready).
//
// Please note that this is work in progress and the interface is subject to
// change.
//
// Plausible changes:
//    Return a set of more precise dependences instead of just one dependence
//    summarizing all.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_DEPENDENCEANALYSIS_H
#define LLVM_ANALYSIS_DEPENDENCEANALYSIS_H

#include "llvm/ADT/SmallBitVector.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Pass.h"

namespace llvm {
  class AAResults;
  template <typename T> class ArrayRef;
  class Loop;
  class LoopInfo;
  class ScalarEvolution;
  class SCEV;
  class SCEVConstant;
  class raw_ostream;

  /// Dependence - This class represents a dependence between two memory
  /// memory references in a function. It contains minimal information and
  /// is used in the very common situation where the compiler is unable to
  /// determine anything beyond the existence of a dependence; that is, it
  /// represents a confused dependence (see also FullDependence). In most
  /// cases (for output, flow, and anti dependences), the dependence implies
  /// an ordering, where the source must precede the destination; in contrast,
  /// input dependences are unordered.
  ///
  /// When a dependence graph is built, each Dependence will be a member of
  /// the set of predecessor edges for its destination instruction and a set
  /// if successor edges for its source instruction. These sets are represented
  /// as singly-linked lists, with the "next" fields stored in the dependence
  /// itelf.
  class Dependence {
  protected:
    Dependence(Dependence &&) = default;
    Dependence &operator=(Dependence &&) = default;

  public:
    Dependence(Instruction *Source, Instruction *Destination)
        : Src(Source), Dst(Destination) {}
    virtual ~Dependence() = default;

    /// Dependence::DVEntry - Each level in the distance/direction vector
    /// has a direction (or perhaps a union of several directions), and
    /// perhaps a distance.
    struct DVEntry {
      enum : unsigned char {
        NONE = 0,
        LT = 1,
        EQ = 2,
        LE = 3,
        GT = 4,
        NE = 5,
        GE = 6,
        ALL = 7
      };
      unsigned char Direction : 3; // Init to ALL, then refine.
      bool Scalar    : 1; // Init to true.
      bool PeelFirst : 1; // Peeling the first iteration will break dependence.
      bool PeelLast  : 1; // Peeling the last iteration will break the dependence.
      bool Splitable : 1; // Splitting the loop will break dependence.
      const SCEV *Distance = nullptr; // NULL implies no distance available.
      DVEntry()
          : Direction(ALL), Scalar(true), PeelFirst(false), PeelLast(false),
            Splitable(false) {}
    };

    /// getSrc - Returns the source instruction for this dependence.
    ///
    Instruction *getSrc() const { return Src; }

    /// getDst - Returns the destination instruction for this dependence.
    ///
    Instruction *getDst() const { return Dst; }

    /// isInput - Returns true if this is an input dependence.
    ///
    bool isInput() const;

    /// isOutput - Returns true if this is an output dependence.
    ///
    bool isOutput() const;

    /// isFlow - Returns true if this is a flow (aka true) dependence.
    ///
    bool isFlow() const;

    /// isAnti - Returns true if this is an anti dependence.
    ///
    bool isAnti() const;

    /// isOrdered - Returns true if dependence is Output, Flow, or Anti
    ///
    bool isOrdered() const { return isOutput() || isFlow() || isAnti(); }

    /// isUnordered - Returns true if dependence is Input
    ///
    bool isUnordered() const { return isInput(); }

    /// isLoopIndependent - Returns true if this is a loop-independent
    /// dependence.
    virtual bool isLoopIndependent() const { return true; }

    /// isConfused - Returns true if this dependence is confused
    /// (the compiler understands nothing and makes worst-case
    /// assumptions).
    virtual bool isConfused() const { return true; }

    /// isConsistent - Returns true if this dependence is consistent
    /// (occurs every time the source and destination are executed).
    virtual bool isConsistent() const { return false; }

    /// getLevels - Returns the number of common loops surrounding the
    /// source and destination of the dependence.
    virtual unsigned getLevels() const { return 0; }

    /// getDirection - Returns the direction associated with a particular
    /// level.
    virtual unsigned getDirection(unsigned Level) const { return DVEntry::ALL; }

    /// getDistance - Returns the distance (or NULL) associated with a
    /// particular level.
    virtual const SCEV *getDistance(unsigned Level) const { return nullptr; }

    /// Check if the direction vector is negative. A negative direction
    /// vector means Src and Dst are reversed in the actual program.
    virtual bool isDirectionNegative() const { return false; }

    /// If the direction vector is negative, normalize the direction
    /// vector to make it non-negative. Normalization is done by reversing
    /// Src and Dst, plus reversing the dependence directions and distances
    /// in the vector.
    virtual bool normalize(ScalarEvolution *SE) { return false; }

    /// isPeelFirst - Returns true if peeling the first iteration from
    /// this loop will break this dependence.
    virtual bool isPeelFirst(unsigned Level) const { return false; }

    /// isPeelLast - Returns true if peeling the last iteration from
    /// this loop will break this dependence.
    virtual bool isPeelLast(unsigned Level) const { return false; }

    /// isSplitable - Returns true if splitting this loop will break
    /// the dependence.
    virtual bool isSplitable(unsigned Level) const { return false; }

    /// isScalar - Returns true if a particular level is scalar; that is,
    /// if no subscript in the source or destination mention the induction
    /// variable associated with the loop at this level.
    virtual bool isScalar(unsigned Level) const;

    /// getNextPredecessor - Returns the value of the NextPredecessor
    /// field.
    const Dependence *getNextPredecessor() const { return NextPredecessor; }

    /// getNextSuccessor - Returns the value of the NextSuccessor
    /// field.
    const Dependence *getNextSuccessor() const { return NextSuccessor; }

    /// setNextPredecessor - Sets the value of the NextPredecessor
    /// field.
    void setNextPredecessor(const Dependence *pred) { NextPredecessor = pred; }

    /// setNextSuccessor - Sets the value of the NextSuccessor
    /// field.
    void setNextSuccessor(const Dependence *succ) { NextSuccessor = succ; }

    /// dump - For debugging purposes, dumps a dependence to OS.
    ///
    void dump(raw_ostream &OS) const;

  protected:
    Instruction *Src, *Dst;

  private:
    const Dependence *NextPredecessor = nullptr, *NextSuccessor = nullptr;
    friend class DependenceInfo;
  };

  /// FullDependence - This class represents a dependence between two memory
  /// references in a function. It contains detailed information about the
  /// dependence (direction vectors, etc.) and is used when the compiler is
  /// able to accurately analyze the interaction of the references; that is,
  /// it is not a confused dependence (see Dependence). In most cases
  /// (for output, flow, and anti dependences), the dependence implies an
  /// ordering, where the source must precede the destination; in contrast,
  /// input dependences are unordered.
  class FullDependence final : public Dependence {
  public:
    FullDependence(Instruction *Src, Instruction *Dst, bool LoopIndependent,
                   unsigned Levels);

    /// isLoopIndependent - Returns true if this is a loop-independent
    /// dependence.
    bool isLoopIndependent() const override { return LoopIndependent; }

    /// isConfused - Returns true if this dependence is confused
    /// (the compiler understands nothing and makes worst-case
    /// assumptions).
    bool isConfused() const override { return false; }

    /// isConsistent - Returns true if this dependence is consistent
    /// (occurs every time the source and destination are executed).
    bool isConsistent() const override { return Consistent; }

    /// getLevels - Returns the number of common loops surrounding the
    /// source and destination of the dependence.
    unsigned getLevels() const override { return Levels; }

    /// getDirection - Returns the direction associated with a particular
    /// level.
    unsigned getDirection(unsigned Level) const override;

    /// getDistance - Returns the distance (or NULL) associated with a
    /// particular level.
    const SCEV *getDistance(unsigned Level) const override;

    /// Check if the direction vector is negative. A negative direction
    /// vector means Src and Dst are reversed in the actual program.
    bool isDirectionNegative() const override;

    /// If the direction vector is negative, normalize the direction
    /// vector to make it non-negative. Normalization is done by reversing
    /// Src and Dst, plus reversing the dependence directions and distances
    /// in the vector.
    bool normalize(ScalarEvolution *SE) override;

    /// isPeelFirst - Returns true if peeling the first iteration from
    /// this loop will break this dependence.
    bool isPeelFirst(unsigned Level) const override;

    /// isPeelLast - Returns true if peeling the last iteration from
    /// this loop will break this dependence.
    bool isPeelLast(unsigned Level) const override;

    /// isSplitable - Returns true if splitting the loop will break
    /// the dependence.
    bool isSplitable(unsigned Level) const override;

    /// isScalar - Returns true if a particular level is scalar; that is,
    /// if no subscript in the source or destination mention the induction
    /// variable associated with the loop at this level.
    bool isScalar(unsigned Level) const override;

  private:
    unsigned short Levels;
    bool LoopIndependent;
    bool Consistent; // Init to true, then refine.
    std::unique_ptr<DVEntry[]> DV;
    friend class DependenceInfo;
  };

  /// DependenceInfo - This class is the main dependence-analysis driver.
  ///
  class DependenceInfo {
  public:
    DependenceInfo(Function *F, AAResults *AA, ScalarEvolution *SE,
                   LoopInfo *LI)
        : AA(AA), SE(SE), LI(LI), F(F) {}

    /// Handle transitive invalidation when the cached analysis results go away.
    bool invalidate(Function &F, const PreservedAnalyses &PA,
                    FunctionAnalysisManager::Invalidator &Inv);

    /// depends - Tests for a dependence between the Src and Dst instructions.
    /// Returns NULL if no dependence; otherwise, returns a Dependence (or a
    /// FullDependence) with as much information as can be gleaned.
    /// The flag PossiblyLoopIndependent should be set by the caller
    /// if it appears that control flow can reach from Src to Dst
    /// without traversing a loop back edge.
    std::unique_ptr<Dependence> depends(Instruction *Src,
                                        Instruction *Dst,
                                        bool PossiblyLoopIndependent);

    /// getSplitIteration - Give a dependence that's splittable at some
    /// particular level, return the iteration that should be used to split
    /// the loop.
    ///
    /// Generally, the dependence analyzer will be used to build
    /// a dependence graph for a function (basically a map from instructions
    /// to dependences). Looking for cycles in the graph shows us loops
    /// that cannot be trivially vectorized/parallelized.
    ///
    /// We can try to improve the situation by examining all the dependences
    /// that make up the cycle, looking for ones we can break.
    /// Sometimes, peeling the first or last iteration of a loop will break
    /// dependences, and there are flags for those possibilities.
    /// Sometimes, splitting a loop at some other iteration will do the trick,
    /// and we've got a flag for that case. Rather than waste the space to
    /// record the exact iteration (since we rarely know), we provide
    /// a method that calculates the iteration. It's a drag that it must work
    /// from scratch, but wonderful in that it's possible.
    ///
    /// Here's an example:
    ///
    ///    for (i = 0; i < 10; i++)
    ///        A[i] = ...
    ///        ... = A[11 - i]
    ///
    /// There's a loop-carried flow dependence from the store to the load,
    /// found by the weak-crossing SIV test. The dependence will have a flag,
    /// indicating that the dependence can be broken by splitting the loop.
    /// Calling getSplitIteration will return 5.
    /// Splitting the loop breaks the dependence, like so:
    ///
    ///    for (i = 0; i <= 5; i++)
    ///        A[i] = ...
    ///        ... = A[11 - i]
    ///    for (i = 6; i < 10; i++)
    ///        A[i] = ...
    ///        ... = A[11 - i]
    ///
    /// breaks the dependence and allows us to vectorize/parallelize
    /// both loops.
    const SCEV *getSplitIteration(const Dependence &Dep, unsigned Level);

    Function *getFunction() const { return F; }

  private:
    AAResults *AA;
    ScalarEvolution *SE;
    LoopInfo *LI;
    Function *F;

    /// Subscript - This private struct represents a pair of subscripts from
    /// a pair of potentially multi-dimensional array references. We use a
    /// vector of them to guide subscript partitioning.
    struct Subscript {
      const SCEV *Src;
      const SCEV *Dst;
      enum ClassificationKind { ZIV, SIV, RDIV, MIV, NonLinear } Classification;
      SmallBitVector Loops;
      SmallBitVector GroupLoops;
      SmallBitVector Group;
    };

    struct CoefficientInfo {
      const SCEV *Coeff;
      const SCEV *PosPart;
      const SCEV *NegPart;
      const SCEV *Iterations;
    };

    struct BoundInfo {
      const SCEV *Iterations;
      const SCEV *Upper[8];
      const SCEV *Lower[8];
      unsigned char Direction;
      unsigned char DirSet;
    };

    /// Constraint - This private class represents a constraint, as defined
    /// in the paper
    ///
    ///           Practical Dependence Testing
    ///           Goff, Kennedy, Tseng
    ///           PLDI 1991
    ///
    /// There are 5 kinds of constraint, in a hierarchy.
    ///   1) Any - indicates no constraint, any dependence is possible.
    ///   2) Line - A line ax + by = c, where a, b, and c are parameters,
    ///             representing the dependence equation.
    ///   3) Distance - The value d of the dependence distance;
    ///   4) Point - A point <x, y> representing the dependence from
    ///              iteration x to iteration y.
    ///   5) Empty - No dependence is possible.
    class Constraint {
    private:
      enum ConstraintKind { Empty, Point, Distance, Line, Any } Kind;
      ScalarEvolution *SE;
      const SCEV *A;
      const SCEV *B;
      const SCEV *C;
      const Loop *AssociatedLoop;

    public:
      /// isEmpty - Return true if the constraint is of kind Empty.
      bool isEmpty() const { return Kind == Empty; }

      /// isPoint - Return true if the constraint is of kind Point.
      bool isPoint() const { return Kind == Point; }

      /// isDistance - Return true if the constraint is of kind Distance.
      bool isDistance() const { return Kind == Distance; }

      /// isLine - Return true if the constraint is of kind Line.
      /// Since Distance's can also be represented as Lines, we also return
      /// true if the constraint is of kind Distance.
      bool isLine() const { return Kind == Line || Kind == Distance; }

      /// isAny - Return true if the constraint is of kind Any;
      bool isAny() const { return Kind == Any; }

      /// getX - If constraint is a point <X, Y>, returns X.
      /// Otherwise assert.
      const SCEV *getX() const;

      /// getY - If constraint is a point <X, Y>, returns Y.
      /// Otherwise assert.
      const SCEV *getY() const;

      /// getA - If constraint is a line AX + BY = C, returns A.
      /// Otherwise assert.
      const SCEV *getA() const;

      /// getB - If constraint is a line AX + BY = C, returns B.
      /// Otherwise assert.
      const SCEV *getB() const;

      /// getC - If constraint is a line AX + BY = C, returns C.
      /// Otherwise assert.
      const SCEV *getC() const;

      /// getD - If constraint is a distance, returns D.
      /// Otherwise assert.
      const SCEV *getD() const;

      /// getAssociatedLoop - Returns the loop associated with this constraint.
      const Loop *getAssociatedLoop() const;

      /// setPoint - Change a constraint to Point.
      void setPoint(const SCEV *X, const SCEV *Y, const Loop *CurrentLoop);

      /// setLine - Change a constraint to Line.
      void setLine(const SCEV *A, const SCEV *B,
                   const SCEV *C, const Loop *CurrentLoop);

      /// setDistance - Change a constraint to Distance.
      void setDistance(const SCEV *D, const Loop *CurrentLoop);

      /// setEmpty - Change a constraint to Empty.
      void setEmpty();

      /// setAny - Change a constraint to Any.
      void setAny(ScalarEvolution *SE);

      /// dump - For debugging purposes. Dumps the constraint
      /// out to OS.
      void dump(raw_ostream &OS) const;
    };

    /// establishNestingLevels - Examines the loop nesting of the Src and Dst
    /// instructions and establishes their shared loops. Sets the variables
    /// CommonLevels, SrcLevels, and MaxLevels.
    /// The source and destination instructions needn't be contained in the same
    /// loop. The routine establishNestingLevels finds the level of most deeply
    /// nested loop that contains them both, CommonLevels. An instruction that's
    /// not contained in a loop is at level = 0. MaxLevels is equal to the level
    /// of the source plus the level of the destination, minus CommonLevels.
    /// This lets us allocate vectors MaxLevels in length, with room for every
    /// distinct loop referenced in both the source and destination subscripts.
    /// The variable SrcLevels is the nesting depth of the source instruction.
    /// It's used to help calculate distinct loops referenced by the destination.
    /// Here's the map from loops to levels:
    ///            0 - unused
    ///            1 - outermost common loop
    ///          ... - other common loops
    /// CommonLevels - innermost common loop
    ///          ... - loops containing Src but not Dst
    ///    SrcLevels - innermost loop containing Src but not Dst
    ///          ... - loops containing Dst but not Src
    ///    MaxLevels - innermost loop containing Dst but not Src
    /// Consider the follow code fragment:
    ///    for (a = ...) {
    ///      for (b = ...) {
    ///        for (c = ...) {
    ///          for (d = ...) {
    ///            A[] = ...;
    ///          }
    ///        }
    ///        for (e = ...) {
    ///          for (f = ...) {
    ///            for (g = ...) {
    ///              ... = A[];
    ///            }
    ///          }
    ///        }
    ///      }
    ///    }
    /// If we're looking at the possibility of a dependence between the store
    /// to A (the Src) and the load from A (the Dst), we'll note that they
    /// have 2 loops in common, so CommonLevels will equal 2 and the direction
    /// vector for Result will have 2 entries. SrcLevels = 4 and MaxLevels = 7.
    /// A map from loop names to level indices would look like
    ///     a - 1
    ///     b - 2 = CommonLevels
    ///     c - 3
    ///     d - 4 = SrcLevels
    ///     e - 5
    ///     f - 6
    ///     g - 7 = MaxLevels
    void establishNestingLevels(const Instruction *Src,
                                const Instruction *Dst);

    unsigned CommonLevels, SrcLevels, MaxLevels;

    /// mapSrcLoop - Given one of the loops containing the source, return
    /// its level index in our numbering scheme.
    unsigned mapSrcLoop(const Loop *SrcLoop) const;

    /// mapDstLoop - Given one of the loops containing the destination,
    /// return its level index in our numbering scheme.
    unsigned mapDstLoop(const Loop *DstLoop) const;

    /// isLoopInvariant - Returns true if Expression is loop invariant
    /// in LoopNest.
    bool isLoopInvariant(const SCEV *Expression, const Loop *LoopNest) const;

    /// Makes sure all subscript pairs share the same integer type by
    /// sign-extending as necessary.
    /// Sign-extending a subscript is safe because getelementptr assumes the
    /// array subscripts are signed.
    void unifySubscriptType(ArrayRef<Subscript *> Pairs);

    /// removeMatchingExtensions - Examines a subscript pair.
    /// If the source and destination are identically sign (or zero)
    /// extended, it strips off the extension in an effort to
    /// simplify the actual analysis.
    void removeMatchingExtensions(Subscript *Pair);

    /// collectCommonLoops - Finds the set of loops from the LoopNest that
    /// have a level <= CommonLevels and are referred to by the SCEV Expression.
    void collectCommonLoops(const SCEV *Expression,
                            const Loop *LoopNest,
                            SmallBitVector &Loops) const;

    /// checkSrcSubscript - Examines the SCEV Src, returning true iff it's
    /// linear. Collect the set of loops mentioned by Src.
    bool checkSrcSubscript(const SCEV *Src,
                           const Loop *LoopNest,
                           SmallBitVector &Loops);

    /// checkDstSubscript - Examines the SCEV Dst, returning true iff it's
    /// linear. Collect the set of loops mentioned by Dst.
    bool checkDstSubscript(const SCEV *Dst,
                           const Loop *LoopNest,
                           SmallBitVector &Loops);

    /// isKnownPredicate - Compare X and Y using the predicate Pred.
    /// Basically a wrapper for SCEV::isKnownPredicate,
    /// but tries harder, especially in the presence of sign and zero
    /// extensions and symbolics.
    bool isKnownPredicate(ICmpInst::Predicate Pred,
                          const SCEV *X,
                          const SCEV *Y) const;

    /// isKnownLessThan - Compare to see if S is less than Size
    /// Another wrapper for isKnownNegative(S - max(Size, 1)) with some extra
    /// checking if S is an AddRec and we can prove lessthan using the loop
    /// bounds.
    bool isKnownLessThan(const SCEV *S, const SCEV *Size) const;

    /// isKnownNonNegative - Compare to see if S is known not to be negative
    /// Uses the fact that S comes from Ptr, which may be an inbound GEP,
    /// Proving there is no wrapping going on.
    bool isKnownNonNegative(const SCEV *S, const Value *Ptr) const;

    /// collectUpperBound - All subscripts are the same type (on my machine,
    /// an i64). The loop bound may be a smaller type. collectUpperBound
    /// find the bound, if available, and zero extends it to the Type T.
    /// (I zero extend since the bound should always be >= 0.)
    /// If no upper bound is available, return NULL.
    const SCEV *collectUpperBound(const Loop *l, Type *T) const;

    /// collectConstantUpperBound - Calls collectUpperBound(), then
    /// attempts to cast it to SCEVConstant. If the cast fails,
    /// returns NULL.
    const SCEVConstant *collectConstantUpperBound(const Loop *l, Type *T) const;

    /// classifyPair - Examines the subscript pair (the Src and Dst SCEVs)
    /// and classifies it as either ZIV, SIV, RDIV, MIV, or Nonlinear.
    /// Collects the associated loops in a set.
    Subscript::ClassificationKind classifyPair(const SCEV *Src,
                                           const Loop *SrcLoopNest,
                                           const SCEV *Dst,
                                           const Loop *DstLoopNest,
                                           SmallBitVector &Loops);

    /// testZIV - Tests the ZIV subscript pair (Src and Dst) for dependence.
    /// Returns true if any possible dependence is disproved.
    /// If there might be a dependence, returns false.
    /// If the dependence isn't proven to exist,
    /// marks the Result as inconsistent.
    bool testZIV(const SCEV *Src,
                 const SCEV *Dst,
                 FullDependence &Result) const;

    /// testSIV - Tests the SIV subscript pair (Src and Dst) for dependence.
    /// Things of the form [c1 + a1*i] and [c2 + a2*j], where
    /// i and j are induction variables, c1 and c2 are loop invariant,
    /// and a1 and a2 are constant.
    /// Returns true if any possible dependence is disproved.
    /// If there might be a dependence, returns false.
    /// Sets appropriate direction vector entry and, when possible,
    /// the distance vector entry.
    /// If the dependence isn't proven to exist,
    /// marks the Result as inconsistent.
    bool testSIV(const SCEV *Src,
                 const SCEV *Dst,
                 unsigned &Level,
                 FullDependence &Result,
                 Constraint &NewConstraint,
                 const SCEV *&SplitIter) const;

    /// testRDIV - Tests the RDIV subscript pair (Src and Dst) for dependence.
    /// Things of the form [c1 + a1*i] and [c2 + a2*j]
    /// where i and j are induction variables, c1 and c2 are loop invariant,
    /// and a1 and a2 are constant.
    /// With minor algebra, this test can also be used for things like
    /// [c1 + a1*i + a2*j][c2].
    /// Returns true if any possible dependence is disproved.
    /// If there might be a dependence, returns false.
    /// Marks the Result as inconsistent.
    bool testRDIV(const SCEV *Src,
                  const SCEV *Dst,
                  FullDependence &Result) const;

    /// testMIV - Tests the MIV subscript pair (Src and Dst) for dependence.
    /// Returns true if dependence disproved.
    /// Can sometimes refine direction vectors.
    bool testMIV(const SCEV *Src,
                 const SCEV *Dst,
                 const SmallBitVector &Loops,
                 FullDependence &Result) const;

    /// strongSIVtest - Tests the strong SIV subscript pair (Src and Dst)
    /// for dependence.
    /// Things of the form [c1 + a*i] and [c2 + a*i],
    /// where i is an induction variable, c1 and c2 are loop invariant,
    /// and a is a constant
    /// Returns true if any possible dependence is disproved.
    /// If there might be a dependence, returns false.
    /// Sets appropriate direction and distance.
    bool strongSIVtest(const SCEV *Coeff,
                       const SCEV *SrcConst,
                       const SCEV *DstConst,
                       const Loop *CurrentLoop,
                       unsigned Level,
                       FullDependence &Result,
                       Constraint &NewConstraint) const;

    /// weakCrossingSIVtest - Tests the weak-crossing SIV subscript pair
    /// (Src and Dst) for dependence.
    /// Things of the form [c1 + a*i] and [c2 - a*i],
    /// where i is an induction variable, c1 and c2 are loop invariant,
    /// and a is a constant.
    /// Returns true if any possible dependence is disproved.
    /// If there might be a dependence, returns false.
    /// Sets appropriate direction entry.
    /// Set consistent to false.
    /// Marks the dependence as splitable.
    bool weakCrossingSIVtest(const SCEV *SrcCoeff,
                             const SCEV *SrcConst,
                             const SCEV *DstConst,
                             const Loop *CurrentLoop,
                             unsigned Level,
                             FullDependence &Result,
                             Constraint &NewConstraint,
                             const SCEV *&SplitIter) const;

    /// ExactSIVtest - Tests the SIV subscript pair
    /// (Src and Dst) for dependence.
    /// Things of the form [c1 + a1*i] and [c2 + a2*i],
    /// where i is an induction variable, c1 and c2 are loop invariant,
    /// and a1 and a2 are constant.
    /// Returns true if any possible dependence is disproved.
    /// If there might be a dependence, returns false.
    /// Sets appropriate direction entry.
    /// Set consistent to false.
    bool exactSIVtest(const SCEV *SrcCoeff,
                      const SCEV *DstCoeff,
                      const SCEV *SrcConst,
                      const SCEV *DstConst,
                      const Loop *CurrentLoop,
                      unsigned Level,
                      FullDependence &Result,
                      Constraint &NewConstraint) const;

    /// weakZeroSrcSIVtest - Tests the weak-zero SIV subscript pair
    /// (Src and Dst) for dependence.
    /// Things of the form [c1] and [c2 + a*i],
    /// where i is an induction variable, c1 and c2 are loop invariant,
    /// and a is a constant. See also weakZeroDstSIVtest.
    /// Returns true if any possible dependence is disproved.
    /// If there might be a dependence, returns false.
    /// Sets appropriate direction entry.
    /// Set consistent to false.
    /// If loop peeling will break the dependence, mark appropriately.
    bool weakZeroSrcSIVtest(const SCEV *DstCoeff,
                            const SCEV *SrcConst,
                            const SCEV *DstConst,
                            const Loop *CurrentLoop,
                            unsigned Level,
                            FullDependence &Result,
                            Constraint &NewConstraint) const;

    /// weakZeroDstSIVtest - Tests the weak-zero SIV subscript pair
    /// (Src and Dst) for dependence.
    /// Things of the form [c1 + a*i] and [c2],
    /// where i is an induction variable, c1 and c2 are loop invariant,
    /// and a is a constant. See also weakZeroSrcSIVtest.
    /// Returns true if any possible dependence is disproved.
    /// If there might be a dependence, returns false.
    /// Sets appropriate direction entry.
    /// Set consistent to false.
    /// If loop peeling will break the dependence, mark appropriately.
    bool weakZeroDstSIVtest(const SCEV *SrcCoeff,
                            const SCEV *SrcConst,
                            const SCEV *DstConst,
                            const Loop *CurrentLoop,
                            unsigned Level,
                            FullDependence &Result,
                            Constraint &NewConstraint) const;

    /// exactRDIVtest - Tests the RDIV subscript pair for dependence.
    /// Things of the form [c1 + a*i] and [c2 + b*j],
    /// where i and j are induction variable, c1 and c2 are loop invariant,
    /// and a and b are constants.
    /// Returns true if any possible dependence is disproved.
    /// Marks the result as inconsistent.
    /// Works in some cases that symbolicRDIVtest doesn't,
    /// and vice versa.
    bool exactRDIVtest(const SCEV *SrcCoeff,
                       const SCEV *DstCoeff,
                       const SCEV *SrcConst,
                       const SCEV *DstConst,
                       const Loop *SrcLoop,
                       const Loop *DstLoop,
                       FullDependence &Result) const;

    /// symbolicRDIVtest - Tests the RDIV subscript pair for dependence.
    /// Things of the form [c1 + a*i] and [c2 + b*j],
    /// where i and j are induction variable, c1 and c2 are loop invariant,
    /// and a and b are constants.
    /// Returns true if any possible dependence is disproved.
    /// Marks the result as inconsistent.
    /// Works in some cases that exactRDIVtest doesn't,
    /// and vice versa. Can also be used as a backup for
    /// ordinary SIV tests.
    bool symbolicRDIVtest(const SCEV *SrcCoeff,
                          const SCEV *DstCoeff,
                          const SCEV *SrcConst,
                          const SCEV *DstConst,
                          const Loop *SrcLoop,
                          const Loop *DstLoop) const;

    /// gcdMIVtest - Tests an MIV subscript pair for dependence.
    /// Returns true if any possible dependence is disproved.
    /// Marks the result as inconsistent.
    /// Can sometimes disprove the equal direction for 1 or more loops.
    //  Can handle some symbolics that even the SIV tests don't get,
    /// so we use it as a backup for everything.
    bool gcdMIVtest(const SCEV *Src,
                    const SCEV *Dst,
                    FullDependence &Result) const;

    /// banerjeeMIVtest - Tests an MIV subscript pair for dependence.
    /// Returns true if any possible dependence is disproved.
    /// Marks the result as inconsistent.
    /// Computes directions.
    bool banerjeeMIVtest(const SCEV *Src,
                         const SCEV *Dst,
                         const SmallBitVector &Loops,
                         FullDependence &Result) const;

    /// collectCoefficientInfo - Walks through the subscript,
    /// collecting each coefficient, the associated loop bounds,
    /// and recording its positive and negative parts for later use.
    CoefficientInfo *collectCoeffInfo(const SCEV *Subscript,
                                      bool SrcFlag,
                                      const SCEV *&Constant) const;

    /// getPositivePart - X^+ = max(X, 0).
    ///
    const SCEV *getPositivePart(const SCEV *X) const;

    /// getNegativePart - X^- = min(X, 0).
    ///
    const SCEV *getNegativePart(const SCEV *X) const;

    /// getLowerBound - Looks through all the bounds info and
    /// computes the lower bound given the current direction settings
    /// at each level.
    const SCEV *getLowerBound(BoundInfo *Bound) const;

    /// getUpperBound - Looks through all the bounds info and
    /// computes the upper bound given the current direction settings
    /// at each level.
    const SCEV *getUpperBound(BoundInfo *Bound) const;

    /// exploreDirections - Hierarchically expands the direction vector
    /// search space, combining the directions of discovered dependences
    /// in the DirSet field of Bound. Returns the number of distinct
    /// dependences discovered. If the dependence is disproved,
    /// it will return 0.
    unsigned exploreDirections(unsigned Level,
                               CoefficientInfo *A,
                               CoefficientInfo *B,
                               BoundInfo *Bound,
                               const SmallBitVector &Loops,
                               unsigned &DepthExpanded,
                               const SCEV *Delta) const;

    /// testBounds - Returns true iff the current bounds are plausible.
    bool testBounds(unsigned char DirKind,
                    unsigned Level,
                    BoundInfo *Bound,
                    const SCEV *Delta) const;

    /// findBoundsALL - Computes the upper and lower bounds for level K
    /// using the * direction. Records them in Bound.
    void findBoundsALL(CoefficientInfo *A,
                       CoefficientInfo *B,
                       BoundInfo *Bound,
                       unsigned K) const;

    /// findBoundsLT - Computes the upper and lower bounds for level K
    /// using the < direction. Records them in Bound.
    void findBoundsLT(CoefficientInfo *A,
                      CoefficientInfo *B,
                      BoundInfo *Bound,
                      unsigned K) const;

    /// findBoundsGT - Computes the upper and lower bounds for level K
    /// using the > direction. Records them in Bound.
    void findBoundsGT(CoefficientInfo *A,
                      CoefficientInfo *B,
                      BoundInfo *Bound,
                      unsigned K) const;

    /// findBoundsEQ - Computes the upper and lower bounds for level K
    /// using the = direction. Records them in Bound.
    void findBoundsEQ(CoefficientInfo *A,
                      CoefficientInfo *B,
                      BoundInfo *Bound,
                      unsigned K) const;

    /// intersectConstraints - Updates X with the intersection
    /// of the Constraints X and Y. Returns true if X has changed.
    bool intersectConstraints(Constraint *X,
                              const Constraint *Y);

    /// propagate - Review the constraints, looking for opportunities
    /// to simplify a subscript pair (Src and Dst).
    /// Return true if some simplification occurs.
    /// If the simplification isn't exact (that is, if it is conservative
    /// in terms of dependence), set consistent to false.
    bool propagate(const SCEV *&Src,
                   const SCEV *&Dst,
                   SmallBitVector &Loops,
                   SmallVectorImpl<Constraint> &Constraints,
                   bool &Consistent);

    /// propagateDistance - Attempt to propagate a distance
    /// constraint into a subscript pair (Src and Dst).
    /// Return true if some simplification occurs.
    /// If the simplification isn't exact (that is, if it is conservative
    /// in terms of dependence), set consistent to false.
    bool propagateDistance(const SCEV *&Src,
                           const SCEV *&Dst,
                           Constraint &CurConstraint,
                           bool &Consistent);

    /// propagatePoint - Attempt to propagate a point
    /// constraint into a subscript pair (Src and Dst).
    /// Return true if some simplification occurs.
    bool propagatePoint(const SCEV *&Src,
                        const SCEV *&Dst,
                        Constraint &CurConstraint);

    /// propagateLine - Attempt to propagate a line
    /// constraint into a subscript pair (Src and Dst).
    /// Return true if some simplification occurs.
    /// If the simplification isn't exact (that is, if it is conservative
    /// in terms of dependence), set consistent to false.
    bool propagateLine(const SCEV *&Src,
                       const SCEV *&Dst,
                       Constraint &CurConstraint,
                       bool &Consistent);

    /// findCoefficient - Given a linear SCEV,
    /// return the coefficient corresponding to specified loop.
    /// If there isn't one, return the SCEV constant 0.
    /// For example, given a*i + b*j + c*k, returning the coefficient
    /// corresponding to the j loop would yield b.
    const SCEV *findCoefficient(const SCEV *Expr,
                                const Loop *TargetLoop) const;

    /// zeroCoefficient - Given a linear SCEV,
    /// return the SCEV given by zeroing out the coefficient
    /// corresponding to the specified loop.
    /// For example, given a*i + b*j + c*k, zeroing the coefficient
    /// corresponding to the j loop would yield a*i + c*k.
    const SCEV *zeroCoefficient(const SCEV *Expr,
                                const Loop *TargetLoop) const;

    /// addToCoefficient - Given a linear SCEV Expr,
    /// return the SCEV given by adding some Value to the
    /// coefficient corresponding to the specified TargetLoop.
    /// For example, given a*i + b*j + c*k, adding 1 to the coefficient
    /// corresponding to the j loop would yield a*i + (b+1)*j + c*k.
    const SCEV *addToCoefficient(const SCEV *Expr,
                                 const Loop *TargetLoop,
                                 const SCEV *Value)  const;

    /// updateDirection - Update direction vector entry
    /// based on the current constraint.
    void updateDirection(Dependence::DVEntry &Level,
                         const Constraint &CurConstraint) const;

    /// Given a linear access function, tries to recover subscripts
    /// for each dimension of the array element access.
    bool tryDelinearize(Instruction *Src, Instruction *Dst,
                        SmallVectorImpl<Subscript> &Pair);

    /// Tries to delinearize \p Src and \p Dst access functions for a fixed size
    /// multi-dimensional array. Calls tryDelinearizeFixedSizeImpl() to
    /// delinearize \p Src and \p Dst separately,
    bool tryDelinearizeFixedSize(Instruction *Src, Instruction *Dst,
                                 const SCEV *SrcAccessFn,
                                 const SCEV *DstAccessFn,
                                 SmallVectorImpl<const SCEV *> &SrcSubscripts,
                                 SmallVectorImpl<const SCEV *> &DstSubscripts);

    /// Tries to delinearize access function for a multi-dimensional array with
    /// symbolic runtime sizes.
    /// Returns true upon success and false otherwise.
    bool tryDelinearizeParametricSize(
        Instruction *Src, Instruction *Dst, const SCEV *SrcAccessFn,
        const SCEV *DstAccessFn, SmallVectorImpl<const SCEV *> &SrcSubscripts,
        SmallVectorImpl<const SCEV *> &DstSubscripts);

    /// checkSubscript - Helper function for checkSrcSubscript and
    /// checkDstSubscript to avoid duplicate code
    bool checkSubscript(const SCEV *Expr, const Loop *LoopNest,
                        SmallBitVector &Loops, bool IsSrc);
  }; // class DependenceInfo

  /// AnalysisPass to compute dependence information in a function
  class DependenceAnalysis : public AnalysisInfoMixin<DependenceAnalysis> {
  public:
    typedef DependenceInfo Result;
    Result run(Function &F, FunctionAnalysisManager &FAM);

  private:
    static AnalysisKey Key;
    friend struct AnalysisInfoMixin<DependenceAnalysis>;
  }; // class DependenceAnalysis

  /// Printer pass to dump DA results.
  struct DependenceAnalysisPrinterPass
      : public PassInfoMixin<DependenceAnalysisPrinterPass> {
    DependenceAnalysisPrinterPass(raw_ostream &OS,
                                  bool NormalizeResults = false)
        : OS(OS), NormalizeResults(NormalizeResults) {}

    PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM);

  private:
    raw_ostream &OS;
    bool NormalizeResults;
  }; // class DependenceAnalysisPrinterPass

  /// Legacy pass manager pass to access dependence information
  class DependenceAnalysisWrapperPass : public FunctionPass {
  public:
    static char ID; // Class identification, replacement for typeinfo
    DependenceAnalysisWrapperPass();

    bool runOnFunction(Function &F) override;
    void releaseMemory() override;
    void getAnalysisUsage(AnalysisUsage &) const override;
    void print(raw_ostream &, const Module * = nullptr) const override;
    DependenceInfo &getDI() const;

  private:
    std::unique_ptr<DependenceInfo> info;
  }; // class DependenceAnalysisWrapperPass

  /// createDependenceAnalysisPass - This creates an instance of the
  /// DependenceAnalysis wrapper pass.
  FunctionPass *createDependenceAnalysisWrapperPass();

} // namespace llvm

#endif
PKiwFZ��U ZZ Analysis/DominanceFrontierImpl.hnu�[���//===- llvm/Analysis/DominanceFrontier.h - Dominator Frontiers --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This is the generic implementation of the DominanceFrontier class, which
// calculate and holds the dominance frontier for a function for.
//
// This should be considered deprecated, don't add any more uses of this data
// structure.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_DOMINANCEFRONTIERIMPL_H
#define LLVM_ANALYSIS_DOMINANCEFRONTIERIMPL_H

#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Analysis/DominanceFrontier.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/GenericDomTree.h"
#include "llvm/Support/raw_ostream.h"
#include <cassert>
#include <set>
#include <utility>
#include <vector>

namespace llvm {

template <class BlockT>
class DFCalculateWorkObject {
public:
  using DomTreeNodeT = DomTreeNodeBase<BlockT>;

  DFCalculateWorkObject(BlockT *B, BlockT *P, const DomTreeNodeT *N,
                        const DomTreeNodeT *PN)
      : currentBB(B), parentBB(P), Node(N), parentNode(PN) {}

  BlockT *currentBB;
  BlockT *parentBB;
  const DomTreeNodeT *Node;
  const DomTreeNodeT *parentNode;
};

template <class BlockT, bool IsPostDom>
void DominanceFrontierBase<BlockT, IsPostDom>::removeBlock(BlockT *BB) {
  assert(find(BB) != end() && "Block is not in DominanceFrontier!");
  for (iterator I = begin(), E = end(); I != E; ++I)
    I->second.erase(BB);
  Frontiers.erase(BB);
}

template <class BlockT, bool IsPostDom>
void DominanceFrontierBase<BlockT, IsPostDom>::addToFrontier(iterator I,
                                                             BlockT *Node) {
  assert(I != end() && "BB is not in DominanceFrontier!");
  assert(I->second.count(Node) && "Node is not in DominanceFrontier of BB");
  I->second.erase(Node);
}

template <class BlockT, bool IsPostDom>
void DominanceFrontierBase<BlockT, IsPostDom>::removeFromFrontier(
    iterator I, BlockT *Node) {
  assert(I != end() && "BB is not in DominanceFrontier!");
  assert(I->second.count(Node) && "Node is not in DominanceFrontier of BB");
  I->second.erase(Node);
}

template <class BlockT, bool IsPostDom>
bool DominanceFrontierBase<BlockT, IsPostDom>::compareDomSet(
    DomSetType &DS1, const DomSetType &DS2) const {
  std::set<BlockT *> tmpSet;
  for (BlockT *BB : DS2)
    tmpSet.insert(BB);

  for (typename DomSetType::const_iterator I = DS1.begin(), E = DS1.end();
       I != E;) {
    BlockT *Node = *I++;

    if (tmpSet.erase(Node) == 0)
      // Node is in DS1 but tnot in DS2.
      return true;
  }

  if (!tmpSet.empty()) {
    // There are nodes that are in DS2 but not in DS1.
    return true;
  }

  // DS1 and DS2 matches.
  return false;
}

template <class BlockT, bool IsPostDom>
bool DominanceFrontierBase<BlockT, IsPostDom>::compare(
    DominanceFrontierBase<BlockT, IsPostDom> &Other) const {
  DomSetMapType tmpFrontiers;
  for (typename DomSetMapType::const_iterator I = Other.begin(),
                                              E = Other.end();
       I != E; ++I)
    tmpFrontiers.insert(std::make_pair(I->first, I->second));

  for (typename DomSetMapType::iterator I = tmpFrontiers.begin(),
                                        E = tmpFrontiers.end();
       I != E;) {
    BlockT *Node = I->first;
    const_iterator DFI = find(Node);
    if (DFI == end())
      return true;

    if (compareDomSet(I->second, DFI->second))
      return true;

    ++I;
    tmpFrontiers.erase(Node);
  }

  if (!tmpFrontiers.empty())
    return true;

  return false;
}

template <class BlockT, bool IsPostDom>
void DominanceFrontierBase<BlockT, IsPostDom>::print(raw_ostream &OS) const {
  for (const_iterator I = begin(), E = end(); I != E; ++I) {
    OS << "  DomFrontier for BB ";
    if (I->first)
      I->first->printAsOperand(OS, false);
    else
      OS << " <<exit node>>";
    OS << " is:\t";

    const std::set<BlockT *> &BBs = I->second;

    for (const BlockT *BB : BBs) {
      OS << ' ';
      if (BB)
        BB->printAsOperand(OS, false);
      else
        OS << "<<exit node>>";
    }
    OS << '\n';
  }
}

#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
template <class BlockT, bool IsPostDom>
void DominanceFrontierBase<BlockT, IsPostDom>::dump() const {
  print(dbgs());
}
#endif

template <class BlockT>
const typename ForwardDominanceFrontierBase<BlockT>::DomSetType &
ForwardDominanceFrontierBase<BlockT>::calculate(const DomTreeT &DT,
                                                const DomTreeNodeT *Node) {
  BlockT *BB = Node->getBlock();
  DomSetType *Result = nullptr;

  std::vector<DFCalculateWorkObject<BlockT>> workList;
  SmallPtrSet<BlockT *, 32> visited;

  workList.push_back(DFCalculateWorkObject<BlockT>(BB, nullptr, Node, nullptr));
  do {
    DFCalculateWorkObject<BlockT> *currentW = &workList.back();
    assert(currentW && "Missing work object.");

    BlockT *currentBB = currentW->currentBB;
    BlockT *parentBB = currentW->parentBB;
    const DomTreeNodeT *currentNode = currentW->Node;
    const DomTreeNodeT *parentNode = currentW->parentNode;
    assert(currentBB && "Invalid work object. Missing current Basic Block");
    assert(currentNode && "Invalid work object. Missing current Node");
    DomSetType &S = this->Frontiers[currentBB];

    // Visit each block only once.
    if (visited.insert(currentBB).second) {
      // Loop over CFG successors to calculate DFlocal[currentNode]
      for (const auto Succ : children<BlockT *>(currentBB)) {
        // Does Node immediately dominate this successor?
        if (DT[Succ]->getIDom() != currentNode)
          S.insert(Succ);
      }
    }

    // At this point, S is DFlocal.  Now we union in DFup's of our children...
    // Loop through and visit the nodes that Node immediately dominates (Node's
    // children in the IDomTree)
    bool visitChild = false;
    for (typename DomTreeNodeT::const_iterator NI = currentNode->begin(),
                                               NE = currentNode->end();
         NI != NE; ++NI) {
      DomTreeNodeT *IDominee = *NI;
      BlockT *childBB = IDominee->getBlock();
      if (visited.count(childBB) == 0) {
        workList.push_back(DFCalculateWorkObject<BlockT>(
            childBB, currentBB, IDominee, currentNode));
        visitChild = true;
      }
    }

    // If all children are visited or there is any child then pop this block
    // from the workList.
    if (!visitChild) {
      if (!parentBB) {
        Result = &S;
        break;
      }

      typename DomSetType::const_iterator CDFI = S.begin(), CDFE = S.end();
      DomSetType &parentSet = this->Frontiers[parentBB];
      for (; CDFI != CDFE; ++CDFI) {
        if (!DT.properlyDominates(parentNode, DT[*CDFI]))
          parentSet.insert(*CDFI);
      }
      workList.pop_back();
    }

  } while (!workList.empty());

  return *Result;
}

} // end namespace llvm

#endif // LLVM_ANALYSIS_DOMINANCEFRONTIERIMPL_H
PKiwFZ�ɾ�vFvFAnalysis/IVDescriptors.hnu�[���//===- llvm/Analysis/IVDescriptors.h - IndVar Descriptors -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file "describes" induction and recurrence variables.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_IVDESCRIPTORS_H
#define LLVM_ANALYSIS_IVDESCRIPTORS_H

#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/ValueHandle.h"

namespace llvm {

class AssumptionCache;
class DemandedBits;
class DominatorTree;
class Instruction;
class Loop;
class PredicatedScalarEvolution;
class ScalarEvolution;
class SCEV;
class StoreInst;

/// These are the kinds of recurrences that we support.
enum class RecurKind {
  None,       ///< Not a recurrence.
  Add,        ///< Sum of integers.
  Mul,        ///< Product of integers.
  Or,         ///< Bitwise or logical OR of integers.
  And,        ///< Bitwise or logical AND of integers.
  Xor,        ///< Bitwise or logical XOR of integers.
  SMin,       ///< Signed integer min implemented in terms of select(cmp()).
  SMax,       ///< Signed integer max implemented in terms of select(cmp()).
  UMin,       ///< Unisgned integer min implemented in terms of select(cmp()).
  UMax,       ///< Unsigned integer max implemented in terms of select(cmp()).
  FAdd,       ///< Sum of floats.
  FMul,       ///< Product of floats.
  FMin,       ///< FP min implemented in terms of select(cmp()).
  FMax,       ///< FP max implemented in terms of select(cmp()).
  FMinimum,   ///< FP min with llvm.minimum semantics
  FMaximum,   ///< FP max with llvm.maximum semantics
  FMulAdd,    ///< Fused multiply-add of floats (a * b + c).
  SelectICmp, ///< Integer select(icmp(),x,y) where one of (x,y) is loop
              ///< invariant
  SelectFCmp  ///< Integer select(fcmp(),x,y) where one of (x,y) is loop
              ///< invariant
};

/// The RecurrenceDescriptor is used to identify recurrences variables in a
/// loop. Reduction is a special case of recurrence that has uses of the
/// recurrence variable outside the loop. The method isReductionPHI identifies
/// reductions that are basic recurrences.
///
/// Basic recurrences are defined as the summation, product, OR, AND, XOR, min,
/// or max of a set of terms. For example: for(i=0; i<n; i++) { total +=
/// array[i]; } is a summation of array elements. Basic recurrences are a
/// special case of chains of recurrences (CR). See ScalarEvolution for CR
/// references.

/// This struct holds information about recurrence variables.
class RecurrenceDescriptor {
public:
  RecurrenceDescriptor() = default;

  RecurrenceDescriptor(Value *Start, Instruction *Exit, StoreInst *Store,
                       RecurKind K, FastMathFlags FMF, Instruction *ExactFP,
                       Type *RT, bool Signed, bool Ordered,
                       SmallPtrSetImpl<Instruction *> &CI,
                       unsigned MinWidthCastToRecurTy)
      : IntermediateStore(Store), StartValue(Start), LoopExitInstr(Exit),
        Kind(K), FMF(FMF), ExactFPMathInst(ExactFP), RecurrenceType(RT),
        IsSigned(Signed), IsOrdered(Ordered),
        MinWidthCastToRecurrenceType(MinWidthCastToRecurTy) {
    CastInsts.insert(CI.begin(), CI.end());
  }

  /// This POD struct holds information about a potential recurrence operation.
  class InstDesc {
  public:
    InstDesc(bool IsRecur, Instruction *I, Instruction *ExactFP = nullptr)
        : IsRecurrence(IsRecur), PatternLastInst(I),
          RecKind(RecurKind::None), ExactFPMathInst(ExactFP) {}

    InstDesc(Instruction *I, RecurKind K, Instruction *ExactFP = nullptr)
        : IsRecurrence(true), PatternLastInst(I), RecKind(K),
          ExactFPMathInst(ExactFP) {}

    bool isRecurrence() const { return IsRecurrence; }

    bool needsExactFPMath() const { return ExactFPMathInst != nullptr; }

    Instruction *getExactFPMathInst() const { return ExactFPMathInst; }

    RecurKind getRecKind() const { return RecKind; }

    Instruction *getPatternInst() const { return PatternLastInst; }

  private:
    // Is this instruction a recurrence candidate.
    bool IsRecurrence;
    // The last instruction in a min/max pattern (select of the select(icmp())
    // pattern), or the current recurrence instruction otherwise.
    Instruction *PatternLastInst;
    // If this is a min/max pattern.
    RecurKind RecKind;
    // Recurrence does not allow floating-point reassociation.
    Instruction *ExactFPMathInst;
  };

  /// Returns a struct describing if the instruction 'I' can be a recurrence
  /// variable of type 'Kind' for a Loop \p L and reduction PHI \p Phi.
  /// If the recurrence is a min/max pattern of select(icmp()) this function
  /// advances the instruction pointer 'I' from the compare instruction to the
  /// select instruction and stores this pointer in 'PatternLastInst' member of
  /// the returned struct.
  static InstDesc isRecurrenceInstr(Loop *L, PHINode *Phi, Instruction *I,
                                    RecurKind Kind, InstDesc &Prev,
                                    FastMathFlags FuncFMF);

  /// Returns true if instruction I has multiple uses in Insts
  static bool hasMultipleUsesOf(Instruction *I,
                                SmallPtrSetImpl<Instruction *> &Insts,
                                unsigned MaxNumUses);

  /// Returns true if all uses of the instruction I is within the Set.
  static bool areAllUsesIn(Instruction *I, SmallPtrSetImpl<Instruction *> &Set);

  /// Returns a struct describing if the instruction is a llvm.(s/u)(min/max),
  /// llvm.minnum/maxnum or a Select(ICmp(X, Y), X, Y) pair of instructions
  /// corresponding to a min(X, Y) or max(X, Y), matching the recurrence kind \p
  /// Kind. \p Prev specifies the description of an already processed select
  /// instruction, so its corresponding cmp can be matched to it.
  static InstDesc isMinMaxPattern(Instruction *I, RecurKind Kind,
                                  const InstDesc &Prev);

  /// Returns a struct describing whether the instruction is either a
  ///   Select(ICmp(A, B), X, Y), or
  ///   Select(FCmp(A, B), X, Y)
  /// where one of (X, Y) is a loop invariant integer and the other is a PHI
  /// value. \p Prev specifies the description of an already processed select
  /// instruction, so its corresponding cmp can be matched to it.
  static InstDesc isSelectCmpPattern(Loop *Loop, PHINode *OrigPhi,
                                     Instruction *I, InstDesc &Prev);

  /// Returns a struct describing if the instruction is a
  /// Select(FCmp(X, Y), (Z = X op PHINode), PHINode) instruction pattern.
  static InstDesc isConditionalRdxPattern(RecurKind Kind, Instruction *I);

  /// Returns identity corresponding to the RecurrenceKind.
  Value *getRecurrenceIdentity(RecurKind K, Type *Tp, FastMathFlags FMF) const;

  /// Returns the opcode corresponding to the RecurrenceKind.
  static unsigned getOpcode(RecurKind Kind);

  /// Returns true if Phi is a reduction of type Kind and adds it to the
  /// RecurrenceDescriptor. If either \p DB is non-null or \p AC and \p DT are
  /// non-null, the minimal bit width needed to compute the reduction will be
  /// computed.
  static bool
  AddReductionVar(PHINode *Phi, RecurKind Kind, Loop *TheLoop,
                  FastMathFlags FuncFMF, RecurrenceDescriptor &RedDes,
                  DemandedBits *DB = nullptr, AssumptionCache *AC = nullptr,
                  DominatorTree *DT = nullptr, ScalarEvolution *SE = nullptr);

  /// Returns true if Phi is a reduction in TheLoop. The RecurrenceDescriptor
  /// is returned in RedDes. If either \p DB is non-null or \p AC and \p DT are
  /// non-null, the minimal bit width needed to compute the reduction will be
  /// computed. If \p SE is non-null, store instructions to loop invariant
  /// addresses are processed.
  static bool
  isReductionPHI(PHINode *Phi, Loop *TheLoop, RecurrenceDescriptor &RedDes,
                 DemandedBits *DB = nullptr, AssumptionCache *AC = nullptr,
                 DominatorTree *DT = nullptr, ScalarEvolution *SE = nullptr);

  /// Returns true if Phi is a fixed-order recurrence. A fixed-order recurrence
  /// is a non-reduction recurrence relation in which the value of the
  /// recurrence in the current loop iteration equals a value defined in a
  /// previous iteration (e.g. if the value is defined in the previous
  /// iteration, we refer to it as first-order recurrence, if it is defined in
  /// the iteration before the previous, we refer to it as second-order
  /// recurrence and so on). Note that this function optimistically assumes that
  /// uses of the recurrence can be re-ordered if necessary and users need to
  /// check and perform the re-ordering.
  static bool isFixedOrderRecurrence(PHINode *Phi, Loop *TheLoop,
                                     DominatorTree *DT);

  RecurKind getRecurrenceKind() const { return Kind; }

  unsigned getOpcode() const { return getOpcode(getRecurrenceKind()); }

  FastMathFlags getFastMathFlags() const { return FMF; }

  TrackingVH<Value> getRecurrenceStartValue() const { return StartValue; }

  Instruction *getLoopExitInstr() const { return LoopExitInstr; }

  /// Returns true if the recurrence has floating-point math that requires
  /// precise (ordered) operations.
  bool hasExactFPMath() const { return ExactFPMathInst != nullptr; }

  /// Returns 1st non-reassociative FP instruction in the PHI node's use-chain.
  Instruction *getExactFPMathInst() const { return ExactFPMathInst; }

  /// Returns true if the recurrence kind is an integer kind.
  static bool isIntegerRecurrenceKind(RecurKind Kind);

  /// Returns true if the recurrence kind is a floating point kind.
  static bool isFloatingPointRecurrenceKind(RecurKind Kind);

  /// Returns true if the recurrence kind is an integer min/max kind.
  static bool isIntMinMaxRecurrenceKind(RecurKind Kind) {
    return Kind == RecurKind::UMin || Kind == RecurKind::UMax ||
           Kind == RecurKind::SMin || Kind == RecurKind::SMax;
  }

  /// Returns true if the recurrence kind is a floating-point min/max kind.
  static bool isFPMinMaxRecurrenceKind(RecurKind Kind) {
    return Kind == RecurKind::FMin || Kind == RecurKind::FMax ||
           Kind == RecurKind::FMinimum || Kind == RecurKind::FMaximum;
  }

  /// Returns true if the recurrence kind is any min/max kind.
  static bool isMinMaxRecurrenceKind(RecurKind Kind) {
    return isIntMinMaxRecurrenceKind(Kind) || isFPMinMaxRecurrenceKind(Kind);
  }

  /// Returns true if the recurrence kind is of the form
  ///   select(cmp(),x,y) where one of (x,y) is loop invariant.
  static bool isSelectCmpRecurrenceKind(RecurKind Kind) {
    return Kind == RecurKind::SelectICmp || Kind == RecurKind::SelectFCmp;
  }

  /// Returns the type of the recurrence. This type can be narrower than the
  /// actual type of the Phi if the recurrence has been type-promoted.
  Type *getRecurrenceType() const { return RecurrenceType; }

  /// Returns a reference to the instructions used for type-promoting the
  /// recurrence.
  const SmallPtrSet<Instruction *, 8> &getCastInsts() const { return CastInsts; }

  /// Returns the minimum width used by the recurrence in bits.
  unsigned getMinWidthCastToRecurrenceTypeInBits() const {
    return MinWidthCastToRecurrenceType;
  }

  /// Returns true if all source operands of the recurrence are SExtInsts.
  bool isSigned() const { return IsSigned; }

  /// Expose an ordered FP reduction to the instance users.
  bool isOrdered() const { return IsOrdered; }

  /// Attempts to find a chain of operations from Phi to LoopExitInst that can
  /// be treated as a set of reductions instructions for in-loop reductions.
  SmallVector<Instruction *, 4> getReductionOpChain(PHINode *Phi,
                                                    Loop *L) const;

  /// Returns true if the instruction is a call to the llvm.fmuladd intrinsic.
  static bool isFMulAddIntrinsic(Instruction *I) {
    return isa<IntrinsicInst>(I) &&
           cast<IntrinsicInst>(I)->getIntrinsicID() == Intrinsic::fmuladd;
  }

  /// Reductions may store temporary or final result to an invariant address.
  /// If there is such a store in the loop then, after successfull run of
  /// AddReductionVar method, this field will be assigned the last met store.
  StoreInst *IntermediateStore = nullptr;

private:
  // The starting value of the recurrence.
  // It does not have to be zero!
  TrackingVH<Value> StartValue;
  // The instruction who's value is used outside the loop.
  Instruction *LoopExitInstr = nullptr;
  // The kind of the recurrence.
  RecurKind Kind = RecurKind::None;
  // The fast-math flags on the recurrent instructions.  We propagate these
  // fast-math flags into the vectorized FP instructions we generate.
  FastMathFlags FMF;
  // First instance of non-reassociative floating-point in the PHI's use-chain.
  Instruction *ExactFPMathInst = nullptr;
  // The type of the recurrence.
  Type *RecurrenceType = nullptr;
  // True if all source operands of the recurrence are SExtInsts.
  bool IsSigned = false;
  // True if this recurrence can be treated as an in-order reduction.
  // Currently only a non-reassociative FAdd can be considered in-order,
  // if it is also the only FAdd in the PHI's use chain.
  bool IsOrdered = false;
  // Instructions used for type-promoting the recurrence.
  SmallPtrSet<Instruction *, 8> CastInsts;
  // The minimum width used by the recurrence.
  unsigned MinWidthCastToRecurrenceType;
};

/// A struct for saving information about induction variables.
class InductionDescriptor {
public:
  /// This enum represents the kinds of inductions that we support.
  enum InductionKind {
    IK_NoInduction,  ///< Not an induction variable.
    IK_IntInduction, ///< Integer induction variable. Step = C.
    IK_PtrInduction, ///< Pointer induction var. Step = C.
    IK_FpInduction   ///< Floating point induction variable.
  };

public:
  /// Default constructor - creates an invalid induction.
  InductionDescriptor() = default;

  Value *getStartValue() const { return StartValue; }
  InductionKind getKind() const { return IK; }
  const SCEV *getStep() const { return Step; }
  BinaryOperator *getInductionBinOp() const { return InductionBinOp; }
  ConstantInt *getConstIntStepValue() const;

  /// Returns true if \p Phi is an induction in the loop \p L. If \p Phi is an
  /// induction, the induction descriptor \p D will contain the data describing
  /// this induction. Since Induction Phis can only be present inside loop
  /// headers, the function will assert if it is passed a Phi whose parent is
  /// not the loop header. If by some other means the caller has a better SCEV
  /// expression for \p Phi than the one returned by the ScalarEvolution
  /// analysis, it can be passed through \p Expr. If the def-use chain
  /// associated with the phi includes casts (that we know we can ignore
  /// under proper runtime checks), they are passed through \p CastsToIgnore.
  static bool
  isInductionPHI(PHINode *Phi, const Loop *L, ScalarEvolution *SE,
                 InductionDescriptor &D, const SCEV *Expr = nullptr,
                 SmallVectorImpl<Instruction *> *CastsToIgnore = nullptr);

  /// Returns true if \p Phi is a floating point induction in the loop \p L.
  /// If \p Phi is an induction, the induction descriptor \p D will contain
  /// the data describing this induction.
  static bool isFPInductionPHI(PHINode *Phi, const Loop *L, ScalarEvolution *SE,
                               InductionDescriptor &D);

  /// Returns true if \p Phi is a loop \p L induction, in the context associated
  /// with the run-time predicate of PSE. If \p Assume is true, this can add
  /// further SCEV predicates to \p PSE in order to prove that \p Phi is an
  /// induction.
  /// If \p Phi is an induction, \p D will contain the data describing this
  /// induction.
  static bool isInductionPHI(PHINode *Phi, const Loop *L,
                             PredicatedScalarEvolution &PSE,
                             InductionDescriptor &D, bool Assume = false);

  /// Returns floating-point induction operator that does not allow
  /// reassociation (transforming the induction requires an override of normal
  /// floating-point rules).
  Instruction *getExactFPMathInst() {
    if (IK == IK_FpInduction && InductionBinOp &&
        !InductionBinOp->hasAllowReassoc())
      return InductionBinOp;
    return nullptr;
  }

  /// Returns binary opcode of the induction operator.
  Instruction::BinaryOps getInductionOpcode() const {
    return InductionBinOp ? InductionBinOp->getOpcode()
                          : Instruction::BinaryOpsEnd;
  }

  /// Returns a reference to the type cast instructions in the induction
  /// update chain, that are redundant when guarded with a runtime
  /// SCEV overflow check.
  const SmallVectorImpl<Instruction *> &getCastInsts() const {
    return RedundantCasts;
  }

private:
  /// Private constructor - used by \c isInductionPHI.
  InductionDescriptor(Value *Start, InductionKind K, const SCEV *Step,
                      BinaryOperator *InductionBinOp = nullptr,
                      SmallVectorImpl<Instruction *> *Casts = nullptr);

  /// Start value.
  TrackingVH<Value> StartValue;
  /// Induction kind.
  InductionKind IK = IK_NoInduction;
  /// Step value.
  const SCEV *Step = nullptr;
  // Instruction that advances induction variable.
  BinaryOperator *InductionBinOp = nullptr;
  // Instructions used for type-casts of the induction variable,
  // that are redundant when guarded with a runtime SCEV overflow check.
  SmallVector<Instruction *, 2> RedundantCasts;
};

} // end namespace llvm

#endif // LLVM_ANALYSIS_IVDESCRIPTORS_H
PKiwFZ�r��(�(Analysis/CFGPrinter.hnu�[���//===-- CFGPrinter.h - CFG printer external interface -----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines a 'dot-cfg' analysis pass, which emits the
// cfg.<fnname>.dot file for each function in the program, with a graph of the
// CFG for that function.
//
// This file defines external functions that can be called to explicitly
// instantiate the CFG printer.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_CFGPRINTER_H
#define LLVM_ANALYSIS_CFGPRINTER_H

#include "llvm/Analysis/BlockFrequencyInfo.h"
#include "llvm/Analysis/BranchProbabilityInfo.h"
#include "llvm/Analysis/HeatUtils.h"
#include "llvm/IR/CFG.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/PassManager.h"
#include "llvm/IR/ProfDataUtils.h"
#include "llvm/Support/DOTGraphTraits.h"
#include "llvm/Support/FormatVariadic.h"

namespace llvm {
template <class GraphType> struct GraphTraits;
class CFGViewerPass : public PassInfoMixin<CFGViewerPass> {
public:
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
  static bool isRequired() { return true; }
};

class CFGOnlyViewerPass : public PassInfoMixin<CFGOnlyViewerPass> {
public:
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
  static bool isRequired() { return true; }
};

class CFGPrinterPass : public PassInfoMixin<CFGPrinterPass> {
public:
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
  static bool isRequired() { return true; }
};

class CFGOnlyPrinterPass : public PassInfoMixin<CFGOnlyPrinterPass> {
public:
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
  static bool isRequired() { return true; }
};

class DOTFuncInfo {
private:
  const Function *F;
  const BlockFrequencyInfo *BFI;
  const BranchProbabilityInfo *BPI;
  uint64_t MaxFreq;
  bool ShowHeat;
  bool EdgeWeights;
  bool RawWeights;

public:
  DOTFuncInfo(const Function *F) : DOTFuncInfo(F, nullptr, nullptr, 0) {}

  DOTFuncInfo(const Function *F, const BlockFrequencyInfo *BFI,
              const BranchProbabilityInfo *BPI, uint64_t MaxFreq)
      : F(F), BFI(BFI), BPI(BPI), MaxFreq(MaxFreq) {
    ShowHeat = false;
    EdgeWeights = !!BPI; // Print EdgeWeights when BPI is available.
    RawWeights = !!BFI;  // Print RawWeights when BFI is available.
  }

  const BlockFrequencyInfo *getBFI() const { return BFI; }

  const BranchProbabilityInfo *getBPI() const { return BPI; }

  const Function *getFunction() const { return this->F; }

  uint64_t getMaxFreq() const { return MaxFreq; }

  uint64_t getFreq(const BasicBlock *BB) const {
    return BFI->getBlockFreq(BB).getFrequency();
  }

  void setHeatColors(bool ShowHeat) { this->ShowHeat = ShowHeat; }

  bool showHeatColors() { return ShowHeat; }

  void setRawEdgeWeights(bool RawWeights) { this->RawWeights = RawWeights; }

  bool useRawEdgeWeights() { return RawWeights; }

  void setEdgeWeights(bool EdgeWeights) { this->EdgeWeights = EdgeWeights; }

  bool showEdgeWeights() { return EdgeWeights; }
};

template <>
struct GraphTraits<DOTFuncInfo *> : public GraphTraits<const BasicBlock *> {
  static NodeRef getEntryNode(DOTFuncInfo *CFGInfo) {
    return &(CFGInfo->getFunction()->getEntryBlock());
  }

  // nodes_iterator/begin/end - Allow iteration over all nodes in the graph
  using nodes_iterator = pointer_iterator<Function::const_iterator>;

  static nodes_iterator nodes_begin(DOTFuncInfo *CFGInfo) {
    return nodes_iterator(CFGInfo->getFunction()->begin());
  }

  static nodes_iterator nodes_end(DOTFuncInfo *CFGInfo) {
    return nodes_iterator(CFGInfo->getFunction()->end());
  }

  static size_t size(DOTFuncInfo *CFGInfo) {
    return CFGInfo->getFunction()->size();
  }
};

template <typename BasicBlockT>
std::string SimpleNodeLabelString(const BasicBlockT *Node) {
  if (!Node->getName().empty())
    return Node->getName().str();

  std::string Str;
  raw_string_ostream OS(Str);

  Node->printAsOperand(OS, false);
  return OS.str();
}

template <typename BasicBlockT>
std::string CompleteNodeLabelString(
    const BasicBlockT *Node,
    function_ref<void(raw_string_ostream &, const BasicBlockT &)>
        HandleBasicBlock,
    function_ref<void(std::string &, unsigned &, unsigned)>
        HandleComment) {

  enum { MaxColumns = 80 };
  std::string Str;
  raw_string_ostream OS(Str);

  if (Node->getName().empty()) {
    Node->printAsOperand(OS, false);
    OS << ':';
  }

  HandleBasicBlock(OS, *Node);
  std::string OutStr = OS.str();
  if (OutStr[0] == '\n')
    OutStr.erase(OutStr.begin());

  unsigned ColNum = 0;
  unsigned LastSpace = 0;
  for (unsigned i = 0; i != OutStr.length(); ++i) {
    if (OutStr[i] == '\n') { // Left justify
      OutStr[i] = '\\';
      OutStr.insert(OutStr.begin() + i + 1, 'l');
      ColNum = 0;
      LastSpace = 0;
    } else if (OutStr[i] == ';') {             // Delete comments!
      unsigned Idx = OutStr.find('\n', i + 1); // Find end of line
      HandleComment(OutStr, i, Idx);
    } else if (ColNum == MaxColumns) { // Wrap lines.
      // Wrap very long names even though we can't find a space.
      if (!LastSpace)
        LastSpace = i;
      OutStr.insert(LastSpace, "\\l...");
      ColNum = i - LastSpace;
      LastSpace = 0;
      i += 3; // The loop will advance 'i' again.
    } else
      ++ColNum;
    if (OutStr[i] == ' ')
      LastSpace = i;
  }
  return OutStr;
}

template <>
struct DOTGraphTraits<DOTFuncInfo *> : public DefaultDOTGraphTraits {

  // Cache for is hidden property
  DenseMap<const BasicBlock *, bool> isOnDeoptOrUnreachablePath;

  DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {}

  static void eraseComment(std::string &OutStr, unsigned &I, unsigned Idx) {
    OutStr.erase(OutStr.begin() + I, OutStr.begin() + Idx);
    --I;
  }

  static std::string getGraphName(DOTFuncInfo *CFGInfo) {
    return "CFG for '" + CFGInfo->getFunction()->getName().str() + "' function";
  }

  static std::string getSimpleNodeLabel(const BasicBlock *Node, DOTFuncInfo *) {
    return SimpleNodeLabelString(Node);
  }

  static std::string getCompleteNodeLabel(
      const BasicBlock *Node, DOTFuncInfo *,
      function_ref<void(raw_string_ostream &, const BasicBlock &)>
          HandleBasicBlock = [](raw_string_ostream &OS,
                                const BasicBlock &Node) -> void { OS << Node; },
      function_ref<void(std::string &, unsigned &, unsigned)>
          HandleComment = eraseComment) {
    return CompleteNodeLabelString(Node, HandleBasicBlock, HandleComment);
  }

  std::string getNodeLabel(const BasicBlock *Node, DOTFuncInfo *CFGInfo) {

    if (isSimple())
      return getSimpleNodeLabel(Node, CFGInfo);
    else
      return getCompleteNodeLabel(Node, CFGInfo);
  }

  static std::string getEdgeSourceLabel(const BasicBlock *Node,
                                        const_succ_iterator I) {
    // Label source of conditional branches with "T" or "F"
    if (const BranchInst *BI = dyn_cast<BranchInst>(Node->getTerminator()))
      if (BI->isConditional())
        return (I == succ_begin(Node)) ? "T" : "F";

    // Label source of switch edges with the associated value.
    if (const SwitchInst *SI = dyn_cast<SwitchInst>(Node->getTerminator())) {
      unsigned SuccNo = I.getSuccessorIndex();

      if (SuccNo == 0)
        return "def";

      std::string Str;
      raw_string_ostream OS(Str);
      auto Case = *SwitchInst::ConstCaseIt::fromSuccessorIndex(SI, SuccNo);
      OS << Case.getCaseValue()->getValue();
      return OS.str();
    }
    return "";
  }

  /// Display the raw branch weights from PGO.
  std::string getEdgeAttributes(const BasicBlock *Node, const_succ_iterator I,
                                DOTFuncInfo *CFGInfo) {
    if (!CFGInfo->showEdgeWeights())
      return "";

    const Instruction *TI = Node->getTerminator();
    if (TI->getNumSuccessors() == 1)
      return "penwidth=2";

    unsigned OpNo = I.getSuccessorIndex();

    if (OpNo >= TI->getNumSuccessors())
      return "";

    BasicBlock *SuccBB = TI->getSuccessor(OpNo);
    auto BranchProb = CFGInfo->getBPI()->getEdgeProbability(Node, SuccBB);
    double WeightPercent = ((double)BranchProb.getNumerator()) /
                           ((double)BranchProb.getDenominator());
    double Width = 1 + WeightPercent;

    if (!CFGInfo->useRawEdgeWeights())
      return formatv("label=\"{0:P}\" penwidth={1}", WeightPercent, Width)
          .str();

    // Prepend a 'W' to indicate that this is a weight rather than the actual
    // profile count (due to scaling).

    uint64_t Freq = CFGInfo->getFreq(Node);
    std::string Attrs = formatv("label=\"W:{0}\" penwidth={1}",
                                (uint64_t)(Freq * WeightPercent), Width);
    if (Attrs.size())
      return Attrs;

    MDNode *WeightsNode = getBranchWeightMDNode(*TI);
    if (!WeightsNode)
      return "";

    OpNo = I.getSuccessorIndex() + 1;
    if (OpNo >= WeightsNode->getNumOperands())
      return "";
    ConstantInt *Weight =
        mdconst::dyn_extract<ConstantInt>(WeightsNode->getOperand(OpNo));
    if (!Weight)
      return "";
    return ("label=\"W:" + std::to_string(Weight->getZExtValue()) +
            "\" penwidth=" + std::to_string(Width));
  }

  std::string getNodeAttributes(const BasicBlock *Node, DOTFuncInfo *CFGInfo) {

    if (!CFGInfo->showHeatColors())
      return "";

    uint64_t Freq = CFGInfo->getFreq(Node);
    std::string Color = getHeatColor(Freq, CFGInfo->getMaxFreq());
    std::string EdgeColor = (Freq <= (CFGInfo->getMaxFreq() / 2))
                                ? (getHeatColor(0))
                                : (getHeatColor(1));

    std::string Attrs = "color=\"" + EdgeColor + "ff\", style=filled," +
                        " fillcolor=\"" + Color + "70\"";
    return Attrs;
  }
  bool isNodeHidden(const BasicBlock *Node, const DOTFuncInfo *CFGInfo);
  void computeDeoptOrUnreachablePaths(const Function *F);
};
} // End llvm namespace

namespace llvm {
class FunctionPass;
FunctionPass *createCFGPrinterLegacyPassPass();
FunctionPass *createCFGOnlyPrinterLegacyPassPass();
} // End llvm namespace

#endif
PKiwFZ�#[
��Analysis/LazyValueInfo.hnu�[���//===- LazyValueInfo.h - Value constraint analysis --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the interface for lazy computation of value constraint
// information.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_LAZYVALUEINFO_H
#define LLVM_ANALYSIS_LAZYVALUEINFO_H

#include "llvm/IR/PassManager.h"
#include "llvm/Pass.h"

namespace llvm {
  class AssumptionCache;
  class Constant;
  class ConstantRange;
  class DataLayout;
  class DominatorTree;
  class Instruction;
  class TargetLibraryInfo;
  class Value;

/// This pass computes, caches, and vends lazy value constraint information.
class LazyValueInfo {
  friend class LazyValueInfoWrapperPass;
  AssumptionCache *AC = nullptr;
  const DataLayout *DL = nullptr;
  class TargetLibraryInfo *TLI = nullptr;
  void *PImpl = nullptr;
  LazyValueInfo(const LazyValueInfo&) = delete;
  void operator=(const LazyValueInfo&) = delete;
public:
  ~LazyValueInfo();
  LazyValueInfo() = default;
  LazyValueInfo(AssumptionCache *AC_, const DataLayout *DL_,
                TargetLibraryInfo *TLI_)
      : AC(AC_), DL(DL_), TLI(TLI_) {}
  LazyValueInfo(LazyValueInfo &&Arg)
      : AC(Arg.AC), DL(Arg.DL), TLI(Arg.TLI), PImpl(Arg.PImpl) {
    Arg.PImpl = nullptr;
  }
  LazyValueInfo &operator=(LazyValueInfo &&Arg) {
    releaseMemory();
    AC = Arg.AC;
    DL = Arg.DL;
    TLI = Arg.TLI;
    PImpl = Arg.PImpl;
    Arg.PImpl = nullptr;
    return *this;
  }

  /// This is used to return true/false/dunno results.
  enum Tristate {
    Unknown = -1, False = 0, True = 1
  };

  // Public query interface.

  /// Determine whether the specified value comparison with a constant is known
  /// to be true or false on the specified CFG edge.
  /// Pred is a CmpInst predicate.
  Tristate getPredicateOnEdge(unsigned Pred, Value *V, Constant *C,
                              BasicBlock *FromBB, BasicBlock *ToBB,
                              Instruction *CxtI = nullptr);

  /// Determine whether the specified value comparison with a constant is known
  /// to be true or false at the specified instruction.
  /// \p Pred is a CmpInst predicate. If \p UseBlockValue is true, the block
  /// value is also taken into account.
  Tristate getPredicateAt(unsigned Pred, Value *V, Constant *C,
                          Instruction *CxtI, bool UseBlockValue);

  /// Determine whether the specified value comparison is known to be true
  /// or false at the specified instruction. While this takes two Value's,
  /// it still requires that one of them is a constant.
  /// \p Pred is a CmpInst predicate.
  /// If \p UseBlockValue is true, the block value is also taken into account.
  Tristate getPredicateAt(unsigned Pred, Value *LHS, Value *RHS,
                          Instruction *CxtI, bool UseBlockValue);

  /// Determine whether the specified value is known to be a constant at the
  /// specified instruction. Return null if not.
  Constant *getConstant(Value *V, Instruction *CxtI);

  /// Return the ConstantRange constraint that is known to hold for the
  /// specified value at the specified instruction. This may only be called
  /// on integer-typed Values.
  ConstantRange getConstantRange(Value *V, Instruction *CxtI,
                                 bool UndefAllowed = true);

  /// Return the ConstantRange constraint that is known to hold for the value
  /// at a specific use-site.
  ConstantRange getConstantRangeAtUse(const Use &U, bool UndefAllowed = true);

  /// Determine whether the specified value is known to be a
  /// constant on the specified edge.  Return null if not.
  Constant *getConstantOnEdge(Value *V, BasicBlock *FromBB, BasicBlock *ToBB,
                              Instruction *CxtI = nullptr);

  /// Return the ConstantRage constraint that is known to hold for the
  /// specified value on the specified edge. This may be only be called
  /// on integer-typed Values.
  ConstantRange getConstantRangeOnEdge(Value *V, BasicBlock *FromBB,
                                       BasicBlock *ToBB,
                                       Instruction *CxtI = nullptr);

  /// Inform the analysis cache that we have threaded an edge from
  /// PredBB to OldSucc to be from PredBB to NewSucc instead.
  void threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc, BasicBlock *NewSucc);

  /// Remove information related to this value from the cache.
  void forgetValue(Value *V);

  /// Inform the analysis cache that we have erased a block.
  void eraseBlock(BasicBlock *BB);

  /// Complete flush all previously computed values
  void clear(const Module *M);

  /// Print the \LazyValueInfo Analysis.
  /// We pass in the DTree that is required for identifying which basic blocks
  /// we can solve/print for, in the LVIPrinter.
  void printLVI(Function &F, DominatorTree &DTree, raw_ostream &OS);

  // For old PM pass. Delete once LazyValueInfoWrapperPass is gone.
  void releaseMemory();

  /// Handle invalidation events in the new pass manager.
  bool invalidate(Function &F, const PreservedAnalyses &PA,
                  FunctionAnalysisManager::Invalidator &Inv);
};

/// Analysis to compute lazy value information.
class LazyValueAnalysis : public AnalysisInfoMixin<LazyValueAnalysis> {
public:
  typedef LazyValueInfo Result;
  Result run(Function &F, FunctionAnalysisManager &FAM);

private:
  static AnalysisKey Key;
  friend struct AnalysisInfoMixin<LazyValueAnalysis>;
};

/// Wrapper around LazyValueInfo.
class LazyValueInfoWrapperPass : public FunctionPass {
  LazyValueInfoWrapperPass(const LazyValueInfoWrapperPass&) = delete;
  void operator=(const LazyValueInfoWrapperPass&) = delete;
public:
  static char ID;
  LazyValueInfoWrapperPass();
  ~LazyValueInfoWrapperPass() override {
    assert(!Info.PImpl && "releaseMemory not called");
  }

  LazyValueInfo &getLVI();

  void getAnalysisUsage(AnalysisUsage &AU) const override;
  void releaseMemory() override;
  bool runOnFunction(Function &F) override;
private:
  LazyValueInfo Info;
};

}  // end namespace llvm

#endif

PKiwFZ~���>>Analysis/MemoryProfileInfo.hnu�[���//===- llvm/Analysis/MemoryProfileInfo.h - memory profile info ---*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains utilities to analyze memory profile information.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_MEMORYPROFILEINFO_H
#define LLVM_ANALYSIS_MEMORYPROFILEINFO_H

#include "llvm/IR/Constants.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/ModuleSummaryIndex.h"
#include <map>

namespace llvm {
namespace memprof {

/// Return the allocation type for a given set of memory profile values.
AllocationType getAllocType(uint64_t TotalLifetimeAccessDensity,
                            uint64_t AllocCount, uint64_t TotalLifetime);

/// Build callstack metadata from the provided list of call stack ids. Returns
/// the resulting metadata node.
MDNode *buildCallstackMetadata(ArrayRef<uint64_t> CallStack, LLVMContext &Ctx);

/// Returns the stack node from an MIB metadata node.
MDNode *getMIBStackNode(const MDNode *MIB);

/// Returns the allocation type from an MIB metadata node.
AllocationType getMIBAllocType(const MDNode *MIB);

/// Returns the string to use in attributes with the given type.
std::string getAllocTypeAttributeString(AllocationType Type);

/// True if the AllocTypes bitmask contains just a single type.
bool hasSingleAllocType(uint8_t AllocTypes);

/// Class to build a trie of call stack contexts for a particular profiled
/// allocation call, along with their associated allocation types.
/// The allocation will be at the root of the trie, which is then used to
/// compute the minimum lists of context ids needed to associate a call context
/// with a single allocation type.
class CallStackTrie {
private:
  struct CallStackTrieNode {
    // Allocation types for call context sharing the context prefix at this
    // node.
    uint8_t AllocTypes;
    // Map of caller stack id to the corresponding child Trie node.
    std::map<uint64_t, CallStackTrieNode *> Callers;
    CallStackTrieNode(AllocationType Type)
        : AllocTypes(static_cast<uint8_t>(Type)) {}
  };

  // The node for the allocation at the root.
  CallStackTrieNode *Alloc = nullptr;
  // The allocation's leaf stack id.
  uint64_t AllocStackId = 0;

  void deleteTrieNode(CallStackTrieNode *Node) {
    if (!Node)
      return;
    for (auto C : Node->Callers)
      deleteTrieNode(C.second);
    delete Node;
  }

  // Recursive helper to trim contexts and create metadata nodes.
  bool buildMIBNodes(CallStackTrieNode *Node, LLVMContext &Ctx,
                     std::vector<uint64_t> &MIBCallStack,
                     std::vector<Metadata *> &MIBNodes,
                     bool CalleeHasAmbiguousCallerContext);

public:
  CallStackTrie() = default;
  ~CallStackTrie() { deleteTrieNode(Alloc); }

  bool empty() const { return Alloc == nullptr; }

  /// Add a call stack context with the given allocation type to the Trie.
  /// The context is represented by the list of stack ids (computed during
  /// matching via a debug location hash), expected to be in order from the
  /// allocation call down to the bottom of the call stack (i.e. callee to
  /// caller order).
  void addCallStack(AllocationType AllocType, ArrayRef<uint64_t> StackIds);

  /// Add the call stack context along with its allocation type from the MIB
  /// metadata to the Trie.
  void addCallStack(MDNode *MIB);

  /// Build and attach the minimal necessary MIB metadata. If the alloc has a
  /// single allocation type, add a function attribute instead. The reason for
  /// adding an attribute in this case is that it matches how the behavior for
  /// allocation calls will be communicated to lib call simplification after
  /// cloning or another optimization to distinguish the allocation types,
  /// which is lower overhead and more direct than maintaining this metadata.
  /// Returns true if memprof metadata attached, false if not (attribute added).
  bool buildAndAttachMIBMetadata(CallBase *CI);
};

/// Helper class to iterate through stack ids in both metadata (memprof MIB and
/// callsite) and the corresponding ThinLTO summary data structures
/// (CallsiteInfo and MIBInfo). This simplifies implementation of client code
/// which doesn't need to worry about whether we are operating with IR (Regular
/// LTO), or summary (ThinLTO).
template <class NodeT, class IteratorT> class CallStack {
public:
  CallStack(const NodeT *N = nullptr) : N(N) {}

  // Implement minimum required methods for range-based for loop.
  // The default implementation assumes we are operating on ThinLTO data
  // structures, which have a vector of StackIdIndices. There are specialized
  // versions provided to iterate through metadata.
  struct CallStackIterator {
    const NodeT *N = nullptr;
    IteratorT Iter;
    CallStackIterator(const NodeT *N, bool End);
    uint64_t operator*();
    bool operator==(const CallStackIterator &rhs) { return Iter == rhs.Iter; }
    bool operator!=(const CallStackIterator &rhs) { return !(*this == rhs); }
    void operator++() { ++Iter; }
  };

  bool empty() const { return N == nullptr; }

  CallStackIterator begin() const;
  CallStackIterator end() const { return CallStackIterator(N, /*End*/ true); }
  CallStackIterator beginAfterSharedPrefix(CallStack &Other);
  uint64_t back() const;

private:
  const NodeT *N = nullptr;
};

template <class NodeT, class IteratorT>
CallStack<NodeT, IteratorT>::CallStackIterator::CallStackIterator(
    const NodeT *N, bool End)
    : N(N) {
  if (!N) {
    Iter = nullptr;
    return;
  }
  Iter = End ? N->StackIdIndices.end() : N->StackIdIndices.begin();
}

template <class NodeT, class IteratorT>
uint64_t CallStack<NodeT, IteratorT>::CallStackIterator::operator*() {
  assert(Iter != N->StackIdIndices.end());
  return *Iter;
}

template <class NodeT, class IteratorT>
uint64_t CallStack<NodeT, IteratorT>::back() const {
  assert(N);
  return N->StackIdIndices.back();
}

template <class NodeT, class IteratorT>
typename CallStack<NodeT, IteratorT>::CallStackIterator
CallStack<NodeT, IteratorT>::begin() const {
  return CallStackIterator(N, /*End*/ false);
}

template <class NodeT, class IteratorT>
typename CallStack<NodeT, IteratorT>::CallStackIterator
CallStack<NodeT, IteratorT>::beginAfterSharedPrefix(CallStack &Other) {
  CallStackIterator Cur = begin();
  for (CallStackIterator OtherCur = Other.begin();
       Cur != end() && OtherCur != Other.end(); ++Cur, ++OtherCur)
    assert(*Cur == *OtherCur);
  return Cur;
}

/// Specializations for iterating through IR metadata stack contexts.
template <>
CallStack<MDNode, MDNode::op_iterator>::CallStackIterator::CallStackIterator(
    const MDNode *N, bool End);
template <>
uint64_t CallStack<MDNode, MDNode::op_iterator>::CallStackIterator::operator*();
template <> uint64_t CallStack<MDNode, MDNode::op_iterator>::back() const;

} // end namespace memprof
} // end namespace llvm

#endif
PKiwFZ�h�Analysis/RegionPass.hnu�[���//===- RegionPass.h - RegionPass class --------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the RegionPass class. All region based analysis,
// optimization and transformation passes are derived from RegionPass.
// This class is implemented following the some ideas of the LoopPass.h class.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_REGIONPASS_H
#define LLVM_ANALYSIS_REGIONPASS_H

#include "llvm/IR/LegacyPassManagers.h"
#include "llvm/Pass.h"
#include <deque>

namespace llvm {
class Function;
class RGPassManager;
class Region;
class RegionInfo;

//===----------------------------------------------------------------------===//
/// A pass that runs on each Region in a function.
///
/// RegionPass is managed by RGPassManager.
class RegionPass : public Pass {
public:
  explicit RegionPass(char &pid) : Pass(PT_Region, pid) {}

  //===--------------------------------------------------------------------===//
  /// @name To be implemented by every RegionPass
  ///
  //@{
  /// Run the pass on a specific Region
  ///
  /// Accessing regions not contained in the current region is not allowed.
  ///
  /// @param R The region this pass is run on.
  /// @param RGM The RegionPassManager that manages this Pass.
  ///
  /// @return True if the pass modifies this Region.
  virtual bool runOnRegion(Region *R, RGPassManager &RGM) = 0;

  /// Get a pass to print the LLVM IR in the region.
  ///
  /// @param O      The output stream to print the Region.
  /// @param Banner The banner to separate different printed passes.
  ///
  /// @return The pass to print the LLVM IR in the region.
  Pass *createPrinterPass(raw_ostream &O,
                          const std::string &Banner) const override;

  using llvm::Pass::doInitialization;
  using llvm::Pass::doFinalization;

  virtual bool doInitialization(Region *R, RGPassManager &RGM) { return false; }
  virtual bool doFinalization() { return false; }
  //@}

  //===--------------------------------------------------------------------===//
  /// @name PassManager API
  ///
  //@{
  void preparePassManager(PMStack &PMS) override;

  void assignPassManager(PMStack &PMS,
                         PassManagerType PMT = PMT_RegionPassManager) override;

  PassManagerType getPotentialPassManagerType() const override {
    return PMT_RegionPassManager;
  }
  //@}

protected:
  /// Optional passes call this function to check whether the pass should be
  /// skipped. This is the case when optimization bisect is over the limit.
  bool skipRegion(Region &R) const;
};

/// The pass manager to schedule RegionPasses.
class RGPassManager : public FunctionPass, public PMDataManager {
  std::deque<Region*> RQ;
  RegionInfo *RI;
  Region *CurrentRegion;

public:
  static char ID;
  explicit RGPassManager();

  /// Execute all of the passes scheduled for execution.
  ///
  /// @return True if any of the passes modifies the function.
  bool runOnFunction(Function &F) override;

  /// Pass Manager itself does not invalidate any analysis info.
  /// RGPassManager needs RegionInfo.
  void getAnalysisUsage(AnalysisUsage &Info) const override;

  StringRef getPassName() const override { return "Region Pass Manager"; }

  PMDataManager *getAsPMDataManager() override { return this; }
  Pass *getAsPass() override { return this; }

  /// Print passes managed by this manager.
  void dumpPassStructure(unsigned Offset) override;

  /// Get passes contained by this manager.
  Pass *getContainedPass(unsigned N) {
    assert(N < PassVector.size() && "Pass number out of range!");
    Pass *FP = static_cast<Pass *>(PassVector[N]);
    return FP;
  }

  PassManagerType getPassManagerType() const override {
    return PMT_RegionPassManager;
  }
};

} // End llvm namespace

#endif
PKiwFZˍ�ۤ	�	Analysis/EHUtils.hnu�[���//===-- Analysis/EHUtils.h - Exception handling related utils --*-//C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//

#ifndef LLVM_ANALYSIS_EHUTILS_H
#define LLVM_ANALYSIS_EHUTILS_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"

namespace llvm {

/// Compute a list of blocks that are only reachable via EH paths.
template <typename FunctionT, typename BlockT>
static void computeEHOnlyBlocks(FunctionT &F, DenseSet<BlockT *> &EHBlocks) {
  // A block can be unknown if its not reachable from anywhere
  // EH if its only reachable from start blocks via some path through EH pads
  // NonEH if it's reachable from Non EH blocks as well.
  enum Status { Unknown = 0, EH = 1, NonEH = 2 };
  DenseSet<BlockT *> WorkList;
  DenseMap<BlockT *, Status> Statuses;

  auto GetStatus = [&](BlockT *BB) {
    if (Statuses.find(BB) != Statuses.end())
      return Statuses[BB];
    else
      return Unknown;
  };

  auto CheckPredecessors = [&](BlockT *BB, Status Stat) {
    for (auto *PredBB : predecessors(BB)) {
      Status PredStatus = GetStatus(PredBB);
      // If status of predecessor block has gone above current block
      // we update current blocks status.
      if (PredStatus > Stat)
        Stat = PredStatus;
    }
    return Stat;
  };

  auto AddSuccesors = [&](BlockT *BB) {
    for (auto *SuccBB : successors(BB)) {
      if (!SuccBB->isEHPad())
        WorkList.insert(SuccBB);
    }
  };

  // Insert the successors of start block and landing pads successor.
  BlockT *StartBlock = &F.front();
  Statuses[StartBlock] = NonEH;
  AddSuccesors(StartBlock);

  for (auto &BB : F) {
    if (BB.isEHPad()) {
      AddSuccesors(&BB);
      Statuses[&BB] = EH;
    }
  }

  // Worklist iterative algorithm.
  while (!WorkList.empty()) {
    auto *BB = *WorkList.begin();
    WorkList.erase(BB);

    Status OldStatus = GetStatus(BB);

    // Check on predecessors and check for
    // Status update.
    Status NewStatus = CheckPredecessors(BB, OldStatus);

    // Did the block status change?
    bool Changed = OldStatus != NewStatus;
    if (Changed) {
      AddSuccesors(BB);
      Statuses[BB] = NewStatus;
    }
  }

  EHBlocks.clear();
  for (auto Entry : Statuses) {
    if (Entry.second == EH)
      EHBlocks.insert(Entry.first);
  }
}
} // namespace llvm

#endif
PKiwFZkz

Analysis/CallGraphSCCPass.hnu�[���//===- CallGraphSCCPass.h - Pass that operates BU on call graph -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the CallGraphSCCPass class, which is used for passes which
// are implemented as bottom-up traversals on the call graph.  Because there may
// be cycles in the call graph, passes of this type operate on the call-graph in
// SCC order: that is, they process function bottom-up, except for recursive
// functions, which they process all at once.
//
// These passes are inherently interprocedural, and are required to keep the
// call graph up-to-date if they do anything which could modify it.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_CALLGRAPHSCCPASS_H
#define LLVM_ANALYSIS_CALLGRAPHSCCPASS_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/Pass.h"
#include <vector>

namespace llvm {

class CallGraph;
class CallGraphNode;
class CallGraphSCC;
class PMStack;

class CallGraphSCCPass : public Pass {
public:
  explicit CallGraphSCCPass(char &pid) : Pass(PT_CallGraphSCC, pid) {}

  /// createPrinterPass - Get a pass that prints the Module
  /// corresponding to a CallGraph.
  Pass *createPrinterPass(raw_ostream &OS,
                          const std::string &Banner) const override;

  using llvm::Pass::doInitialization;
  using llvm::Pass::doFinalization;

  /// doInitialization - This method is called before the SCC's of the program
  /// has been processed, allowing the pass to do initialization as necessary.
  virtual bool doInitialization(CallGraph &CG) {
    return false;
  }

  /// runOnSCC - This method should be implemented by the subclass to perform
  /// whatever action is necessary for the specified SCC.  Note that
  /// non-recursive (or only self-recursive) functions will have an SCC size of
  /// 1, where recursive portions of the call graph will have SCC size > 1.
  ///
  /// SCC passes that add or delete functions to the SCC are required to update
  /// the SCC list, otherwise stale pointers may be dereferenced.
  virtual bool runOnSCC(CallGraphSCC &SCC) = 0;

  /// doFinalization - This method is called after the SCC's of the program has
  /// been processed, allowing the pass to do final cleanup as necessary.
  virtual bool doFinalization(CallGraph &CG) {
    return false;
  }

  /// Assign pass manager to manager this pass
  void assignPassManager(PMStack &PMS, PassManagerType PMT) override;

  ///  Return what kind of Pass Manager can manage this pass.
  PassManagerType getPotentialPassManagerType() const override {
    return PMT_CallGraphPassManager;
  }

  /// getAnalysisUsage - For this class, we declare that we require and preserve
  /// the call graph.  If the derived class implements this method, it should
  /// always explicitly call the implementation here.
  void getAnalysisUsage(AnalysisUsage &Info) const override;

protected:
  /// Optional passes call this function to check whether the pass should be
  /// skipped. This is the case when optimization bisect is over the limit.
  bool skipSCC(CallGraphSCC &SCC) const;
};

/// CallGraphSCC - This is a single SCC that a CallGraphSCCPass is run on.
class CallGraphSCC {
  const CallGraph &CG; // The call graph for this SCC.
  void *Context; // The CGPassManager object that is vending this.
  std::vector<CallGraphNode *> Nodes;

public:
  CallGraphSCC(CallGraph &cg, void *context) : CG(cg), Context(context) {}

  void initialize(ArrayRef<CallGraphNode *> NewNodes) {
    Nodes.assign(NewNodes.begin(), NewNodes.end());
  }

  bool isSingular() const { return Nodes.size() == 1; }
  unsigned size() const { return Nodes.size(); }

  /// ReplaceNode - This informs the SCC and the pass manager that the specified
  /// Old node has been deleted, and New is to be used in its place.
  void ReplaceNode(CallGraphNode *Old, CallGraphNode *New);

  /// DeleteNode - This informs the SCC and the pass manager that the specified
  /// Old node has been deleted.
  void DeleteNode(CallGraphNode *Old);

  using iterator = std::vector<CallGraphNode *>::const_iterator;

  iterator begin() const { return Nodes.begin(); }
  iterator end() const { return Nodes.end(); }

  const CallGraph &getCallGraph() { return CG; }
};

void initializeDummyCGSCCPassPass(PassRegistry &);

/// This pass is required by interprocedural register allocation. It forces
/// codegen to follow bottom up order on call graph.
class DummyCGSCCPass : public CallGraphSCCPass {
public:
  static char ID;

  DummyCGSCCPass() : CallGraphSCCPass(ID) {
    PassRegistry &Registry = *PassRegistry::getPassRegistry();
    initializeDummyCGSCCPassPass(Registry);
  }

  bool runOnSCC(CallGraphSCC &SCC) override { return false; }

  void getAnalysisUsage(AnalysisUsage &AU) const override {
    AU.setPreservesAll();
  }
};

} // end namespace llvm

#endif // LLVM_ANALYSIS_CALLGRAPHSCCPASS_H
PKiwFZ?o��&�&Analysis/ConstantFolding.hnu�[���//===-- ConstantFolding.h - Fold instructions into constants ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares routines for folding instructions into constants when all
// operands are constants, for example "sub i32 1, 0" -> "1".
//
// Also, to supplement the basic VMCore ConstantExpr simplifications,
// this file declares some additional folding routines that can make use of
// DataLayout information. These functions cannot go in VMCore due to library
// dependency issues.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_CONSTANTFOLDING_H
#define LLVM_ANALYSIS_CONSTANTFOLDING_H

#include <stdint.h>

namespace llvm {
class APInt;
template <typename T> class ArrayRef;
class CallBase;
class Constant;
class DSOLocalEquivalent;
class DataLayout;
class Function;
class GlobalValue;
class GlobalVariable;
class Instruction;
class TargetLibraryInfo;
class Type;

/// If this constant is a constant offset from a global, return the global and
/// the constant. Because of constantexprs, this function is recursive.
/// If the global is part of a dso_local_equivalent constant, return it through
/// `Equiv` if it is provided.
bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV, APInt &Offset,
                                const DataLayout &DL,
                                DSOLocalEquivalent **DSOEquiv = nullptr);

/// ConstantFoldInstruction - Try to constant fold the specified instruction.
/// If successful, the constant result is returned, if not, null is returned.
/// Note that this fails if not all of the operands are constant.  Otherwise,
/// this function can only fail when attempting to fold instructions like loads
/// and stores, which have no constant expression form.
Constant *ConstantFoldInstruction(Instruction *I, const DataLayout &DL,
                                  const TargetLibraryInfo *TLI = nullptr);

/// ConstantFoldConstant - Fold the constant using the specified DataLayout.
/// This function always returns a non-null constant: Either the folding result,
/// or the original constant if further folding is not possible.
Constant *ConstantFoldConstant(const Constant *C, const DataLayout &DL,
                               const TargetLibraryInfo *TLI = nullptr);

/// ConstantFoldInstOperands - Attempt to constant fold an instruction with the
/// specified operands.  If successful, the constant result is returned, if not,
/// null is returned.  Note that this function can fail when attempting to
/// fold instructions like loads and stores, which have no constant expression
/// form.
///
Constant *ConstantFoldInstOperands(Instruction *I, ArrayRef<Constant *> Ops,
                                   const DataLayout &DL,
                                   const TargetLibraryInfo *TLI = nullptr);

/// Attempt to constant fold a compare instruction (icmp/fcmp) with the
/// specified operands. Returns null or a constant expression of the specified
/// operands on failure.
/// Denormal inputs may be flushed based on the denormal handling mode.
Constant *ConstantFoldCompareInstOperands(
    unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL,
    const TargetLibraryInfo *TLI = nullptr, const Instruction *I = nullptr);

/// Attempt to constant fold a unary operation with the specified operand.
/// Returns null on failure.
Constant *ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op,
                                     const DataLayout &DL);

/// Attempt to constant fold a binary operation with the specified operands.
/// Returns null or a constant expression of the specified operands on failure.
Constant *ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS,
                                       Constant *RHS, const DataLayout &DL);

/// Attempt to constant fold a floating point binary operation with the
/// specified operands, applying the denormal handling mod to the operands.
/// Returns null or a constant expression of the specified operands on failure.
Constant *ConstantFoldFPInstOperands(unsigned Opcode, Constant *LHS,
                                     Constant *RHS, const DataLayout &DL,
                                     const Instruction *I);

/// Attempt to flush float point constant according to denormal mode set in the
/// instruction's parent function attributes. If so, return a zero with the
/// correct sign, otherwise return the original constant. Inputs and outputs to
/// floating point instructions can have their mode set separately, so the
/// direction is also needed.
///
/// If the calling function's "denormal-fp-math" input mode is "dynamic" for the
/// floating-point type, returns nullptr for denormal inputs.
Constant *FlushFPConstant(Constant *Operand, const Instruction *I,
                          bool IsOutput);

/// Attempt to constant fold a select instruction with the specified
/// operands. The constant result is returned if successful; if not, null is
/// returned.
Constant *ConstantFoldSelectInstruction(Constant *Cond, Constant *V1,
                                        Constant *V2);

/// Attempt to constant fold a cast with the specified operand.  If it
/// fails, it returns a constant expression of the specified operand.
Constant *ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy,
                                  const DataLayout &DL);

/// ConstantFoldInsertValueInstruction - Attempt to constant fold an insertvalue
/// instruction with the specified operands and indices.  The constant result is
/// returned if successful; if not, null is returned.
Constant *ConstantFoldInsertValueInstruction(Constant *Agg, Constant *Val,
                                             ArrayRef<unsigned> Idxs);

/// Attempt to constant fold an extractvalue instruction with the
/// specified operands and indices.  The constant result is returned if
/// successful; if not, null is returned.
Constant *ConstantFoldExtractValueInstruction(Constant *Agg,
                                              ArrayRef<unsigned> Idxs);

/// Attempt to constant fold an insertelement instruction with the
/// specified operands and indices.  The constant result is returned if
/// successful; if not, null is returned.
Constant *ConstantFoldInsertElementInstruction(Constant *Val,
                                               Constant *Elt,
                                               Constant *Idx);

/// Attempt to constant fold an extractelement instruction with the
/// specified operands and indices.  The constant result is returned if
/// successful; if not, null is returned.
Constant *ConstantFoldExtractElementInstruction(Constant *Val, Constant *Idx);

/// Attempt to constant fold a shufflevector instruction with the
/// specified operands and mask.  See class ShuffleVectorInst for a description
/// of the mask representation. The constant result is returned if successful;
/// if not, null is returned.
Constant *ConstantFoldShuffleVectorInstruction(Constant *V1, Constant *V2,
                                               ArrayRef<int> Mask);

/// Extract value of C at the given Offset reinterpreted as Ty. If bits past
/// the end of C are accessed, they are assumed to be poison.
Constant *ConstantFoldLoadFromConst(Constant *C, Type *Ty, const APInt &Offset,
                                    const DataLayout &DL);

/// Extract value of C reinterpreted as Ty. Same as previous API with zero
/// offset.
Constant *ConstantFoldLoadFromConst(Constant *C, Type *Ty,
                                    const DataLayout &DL);

/// Return the value that a load from C with offset Offset would produce if it
/// is constant and determinable. If this is not determinable, return null.
Constant *ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty, APInt Offset,
                                       const DataLayout &DL);

/// Return the value that a load from C would produce if it is constant and
/// determinable. If this is not determinable, return null.
Constant *ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty,
                                       const DataLayout &DL);

/// If C is a uniform value where all bits are the same (either all zero, all
/// ones, all undef or all poison), return the corresponding uniform value in
/// the new type. If the value is not uniform or the result cannot be
/// represented, return null.
Constant *ConstantFoldLoadFromUniformValue(Constant *C, Type *Ty);

/// canConstantFoldCallTo - Return true if its even possible to fold a call to
/// the specified function.
bool canConstantFoldCallTo(const CallBase *Call, const Function *F);

/// ConstantFoldCall - Attempt to constant fold a call to the specified function
/// with the specified arguments, returning null if unsuccessful.
Constant *ConstantFoldCall(const CallBase *Call, Function *F,
                           ArrayRef<Constant *> Operands,
                           const TargetLibraryInfo *TLI = nullptr);

/// ConstantFoldLoadThroughBitcast - try to cast constant to destination type
/// returning null if unsuccessful. Can cast pointer to pointer or pointer to
/// integer and vice versa if their sizes are equal.
Constant *ConstantFoldLoadThroughBitcast(Constant *C, Type *DestTy,
                                         const DataLayout &DL);

/// Check whether the given call has no side-effects.
/// Specifically checks for math routimes which sometimes set errno.
bool isMathLibCallNoop(const CallBase *Call, const TargetLibraryInfo *TLI);

Constant *ReadByteArrayFromGlobal(const GlobalVariable *GV, uint64_t Offset);
}

#endif
PKiwFZ�00Analysis/IntervalPartition.hnu�[���//===- IntervalPartition.h - Interval partition Calculation -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the declaration of the IntervalPartition class, which
// calculates and represents the interval partition of a function, or a
// preexisting interval partition.
//
// In this way, the interval partition may be used to reduce a flow graph down
// to its degenerate single node interval partition (unless it is irreducible).
//
// TODO: The IntervalPartition class should take a bool parameter that tells
// whether it should add the "tails" of an interval to an interval itself or if
// they should be represented as distinct intervals.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_INTERVALPARTITION_H
#define LLVM_ANALYSIS_INTERVALPARTITION_H

#include "llvm/Pass.h"
#include <map>
#include <vector>

namespace llvm {

class BasicBlock;
class Interval;

//===----------------------------------------------------------------------===//
//
// IntervalPartition - This class builds and holds an "interval partition" for
// a function.  This partition divides the control flow graph into a set of
// maximal intervals, as defined with the properties above.  Intuitively, an
// interval is a (possibly nonexistent) loop with a "tail" of non-looping
// nodes following it.
//
class IntervalPartition : public FunctionPass {
  using IntervalMapTy = std::map<BasicBlock *, Interval *>;
  IntervalMapTy IntervalMap;

  using IntervalListTy = std::vector<Interval *>;
  Interval *RootInterval = nullptr;
  std::vector<Interval *> Intervals;

public:
  static char ID; // Pass identification, replacement for typeid

  IntervalPartition();

  // run - Calculate the interval partition for this function
  bool runOnFunction(Function &F) override;

  // IntervalPartition ctor - Build a reduced interval partition from an
  // existing interval graph.  This takes an additional boolean parameter to
  // distinguish it from a copy constructor.  Always pass in false for now.
  IntervalPartition(IntervalPartition &I, bool);

  // print - Show contents in human readable format...
  void print(raw_ostream &O, const Module* = nullptr) const override;

  // getRootInterval() - Return the root interval that contains the starting
  // block of the function.
  inline Interval *getRootInterval() { return RootInterval; }

  // isDegeneratePartition() - Returns true if the interval partition contains
  // a single interval, and thus cannot be simplified anymore.
  bool isDegeneratePartition() { return Intervals.size() == 1; }

  // TODO: isIrreducible - look for triangle graph.

  // getBlockInterval - Return the interval that a basic block exists in.
  inline Interval *getBlockInterval(BasicBlock *BB) {
    IntervalMapTy::iterator I = IntervalMap.find(BB);
    return I != IntervalMap.end() ? I->second : nullptr;
  }

  // getAnalysisUsage - Implement the Pass API
  void getAnalysisUsage(AnalysisUsage &AU) const override {
    AU.setPreservesAll();
  }

  // Interface to Intervals vector...
  const std::vector<Interval*> &getIntervals() const { return Intervals; }

  // releaseMemory - Reset state back to before function was analyzed
  void releaseMemory() override;

private:
  // addIntervalToPartition - Add an interval to the internal list of intervals,
  // and then add mappings from all of the basic blocks in the interval to the
  // interval itself (in the IntervalMap).
  void addIntervalToPartition(Interval *I);

  // updatePredecessors - Interval generation only sets the successor fields of
  // the interval data structures.  After interval generation is complete,
  // run through all of the intervals and propagate successor info as
  // predecessor info.
  void updatePredecessors(Interval *Int);
};

} // end namespace llvm

#endif // LLVM_ANALYSIS_INTERVALPARTITION_H
PKiwFZM�♀�Analysis/CFG.hnu�[���//===-- Analysis/CFG.h - BasicBlock Analyses --------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This family of functions performs analyses on basic blocks, and instructions
// contained within basic blocks.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_CFG_H
#define LLVM_ANALYSIS_CFG_H

#include "llvm/ADT/GraphTraits.h"
#include "llvm/ADT/SmallPtrSet.h"
#include <utility>

namespace llvm {

class BasicBlock;
class DominatorTree;
class Function;
class Instruction;
class LoopInfo;
template <typename T> class SmallVectorImpl;

/// Analyze the specified function to find all of the loop backedges in the
/// function and return them.  This is a relatively cheap (compared to
/// computing dominators and loop info) analysis.
///
/// The output is added to Result, as pairs of <from,to> edge info.
void FindFunctionBackedges(
    const Function &F,
    SmallVectorImpl<std::pair<const BasicBlock *, const BasicBlock *> > &
        Result);

/// Search for the specified successor of basic block BB and return its position
/// in the terminator instruction's list of successors.  It is an error to call
/// this with a block that is not a successor.
unsigned GetSuccessorNumber(const BasicBlock *BB, const BasicBlock *Succ);

/// Return true if the specified edge is a critical edge. Critical edges are
/// edges from a block with multiple successors to a block with multiple
/// predecessors.
///
bool isCriticalEdge(const Instruction *TI, unsigned SuccNum,
                    bool AllowIdenticalEdges = false);
bool isCriticalEdge(const Instruction *TI, const BasicBlock *Succ,
                    bool AllowIdenticalEdges = false);

/// Determine whether instruction 'To' is reachable from 'From', without passing
/// through any blocks in ExclusionSet, returning true if uncertain.
///
/// Determine whether there is a path from From to To within a single function.
/// Returns false only if we can prove that once 'From' has been executed then
/// 'To' can not be executed. Conservatively returns true.
///
/// This function is linear with respect to the number of blocks in the CFG,
/// walking down successors from From to reach To, with a fixed threshold.
/// Using DT or LI allows us to answer more quickly. LI reduces the cost of
/// an entire loop of any number of blocks to be the same as the cost of a
/// single block. DT reduces the cost by allowing the search to terminate when
/// we find a block that dominates the block containing 'To'. DT is most useful
/// on branchy code but not loops, and LI is most useful on code with loops but
/// does not help on branchy code outside loops.
bool isPotentiallyReachable(
    const Instruction *From, const Instruction *To,
    const SmallPtrSetImpl<BasicBlock *> *ExclusionSet = nullptr,
    const DominatorTree *DT = nullptr, const LoopInfo *LI = nullptr);

/// Determine whether block 'To' is reachable from 'From', returning
/// true if uncertain.
///
/// Determine whether there is a path from From to To within a single function.
/// Returns false only if we can prove that once 'From' has been reached then
/// 'To' can not be executed. Conservatively returns true.
bool isPotentiallyReachable(
    const BasicBlock *From, const BasicBlock *To,
    const SmallPtrSetImpl<BasicBlock *> *ExclusionSet = nullptr,
    const DominatorTree *DT = nullptr, const LoopInfo *LI = nullptr);

/// Determine whether there is at least one path from a block in
/// 'Worklist' to 'StopBB' without passing through any blocks in
/// 'ExclusionSet', returning true if uncertain.
///
/// Determine whether there is a path from at least one block in Worklist to
/// StopBB within a single function without passing through any of the blocks
/// in 'ExclusionSet'. Returns false only if we can prove that once any block
/// in 'Worklist' has been reached then 'StopBB' can not be executed.
/// Conservatively returns true.
bool isPotentiallyReachableFromMany(
    SmallVectorImpl<BasicBlock *> &Worklist, const BasicBlock *StopBB,
    const SmallPtrSetImpl<BasicBlock *> *ExclusionSet,
    const DominatorTree *DT = nullptr, const LoopInfo *LI = nullptr);

/// Return true if the control flow in \p RPOTraversal is irreducible.
///
/// This is a generic implementation to detect CFG irreducibility based on loop
/// info analysis. It can be used for any kind of CFG (Loop, MachineLoop,
/// Function, MachineFunction, etc.) by providing an RPO traversal (\p
/// RPOTraversal) and the loop info analysis (\p LI) of the CFG. This utility
/// function is only recommended when loop info analysis is available. If loop
/// info analysis isn't available, please, don't compute it explicitly for this
/// purpose. There are more efficient ways to detect CFG irreducibility that
/// don't require recomputing loop info analysis (e.g., T1/T2 or Tarjan's
/// algorithm).
///
/// Requirements:
///   1) GraphTraits must be implemented for NodeT type. It is used to access
///      NodeT successors.
//    2) \p RPOTraversal must be a valid reverse post-order traversal of the
///      target CFG with begin()/end() iterator interfaces.
///   3) \p LI must be a valid LoopInfoBase that contains up-to-date loop
///      analysis information of the CFG.
///
/// This algorithm uses the information about reducible loop back-edges already
/// computed in \p LI. When a back-edge is found during the RPO traversal, the
/// algorithm checks whether the back-edge is one of the reducible back-edges in
/// loop info. If it isn't, the CFG is irreducible. For example, for the CFG
/// below (canonical irreducible graph) loop info won't contain any loop, so the
/// algorithm will return that the CFG is irreducible when checking the B <-
/// -> C back-edge.
///
/// (A->B, A->C, B->C, C->B, C->D)
///    A
///  /   \
/// B<- ->C
///       |
///       D
///
template <class NodeT, class RPOTraversalT, class LoopInfoT,
          class GT = GraphTraits<NodeT>>
bool containsIrreducibleCFG(RPOTraversalT &RPOTraversal, const LoopInfoT &LI) {
  /// Check whether the edge (\p Src, \p Dst) is a reducible loop backedge
  /// according to LI. I.e., check if there exists a loop that contains Src and
  /// where Dst is the loop header.
  auto isProperBackedge = [&](NodeT Src, NodeT Dst) {
    for (const auto *Lp = LI.getLoopFor(Src); Lp; Lp = Lp->getParentLoop()) {
      if (Lp->getHeader() == Dst)
        return true;
    }
    return false;
  };

  SmallPtrSet<NodeT, 32> Visited;
  for (NodeT Node : RPOTraversal) {
    Visited.insert(Node);
    for (NodeT Succ : make_range(GT::child_begin(Node), GT::child_end(Node))) {
      // Succ hasn't been visited yet
      if (!Visited.count(Succ))
        continue;
      // We already visited Succ, thus Node->Succ must be a backedge. Check that
      // the head matches what we have in the loop information. Otherwise, we
      // have an irreducible graph.
      if (!isProperBackedge(Node, Succ))
        return true;
    }
  }

  return false;
}
} // End llvm namespace

#endif
PKiwFZ����Analysis/TensorSpec.hnu�[���//===- TensorSpec.h - type descriptor for a tensor --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
#ifndef LLVM_ANALYSIS_TENSORSPEC_H
#define LLVM_ANALYSIS_TENSORSPEC_H

#include "llvm/Config/llvm-config.h"

#include "llvm/ADT/StringMap.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/Support/JSON.h"

#include <memory>
#include <optional>
#include <vector>

namespace llvm {
/// TensorSpec encapsulates the specification of a tensor: its dimensions, or
/// "shape" (row-major), its type (see TensorSpec::getDataType specializations
/// for supported types), its name and port (see "TensorFlow: Large-Scale
/// Machine Learning on Heterogeneous Distributed Systems", section 4.2, para 2:
/// https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/45166.pdf)
///
/// Known tensor types. The left part is the C type, the right is a name we
/// can use to identify the type (to implement TensorSpec equality checks), and
/// to use, if needed, when mapping to an underlying evaluator's type system.
/// The main requirement is that the C type we use has the same size and
/// encoding (e.g. endian-ness) as the one used by the evaluator.
#define SUPPORTED_TENSOR_TYPES(M)                                              \
  M(float, Float)                                                              \
  M(double, Double)                                                            \
  M(int8_t, Int8)                                                              \
  M(uint8_t, UInt8)                                                            \
  M(int16_t, Int16)                                                            \
  M(uint16_t, UInt16)                                                          \
  M(int32_t, Int32)                                                            \
  M(uint32_t, UInt32)                                                          \
  M(int64_t, Int64)                                                            \
  M(uint64_t, UInt64)

enum class TensorType {
  Invalid,
#define _TENSOR_TYPE_ENUM_MEMBERS(_, Name) Name,
  SUPPORTED_TENSOR_TYPES(_TENSOR_TYPE_ENUM_MEMBERS)
#undef _TENSOR_TYPE_ENUM_MEMBERS
      Total
};

class TensorSpec final {
public:
  template <typename T>
  static TensorSpec createSpec(const std::string &Name,
                               const std::vector<int64_t> &Shape,
                               int Port = 0) {
    return TensorSpec(Name, Port, getDataType<T>(), sizeof(T), Shape);
  }

  const std::string &name() const { return Name; }
  int port() const { return Port; }
  TensorType type() const { return Type; }
  const std::vector<int64_t> &shape() const { return Shape; }

  bool operator==(const TensorSpec &Other) const {
    return Name == Other.Name && Port == Other.Port && Type == Other.Type &&
           Shape == Other.Shape;
  }

  bool operator!=(const TensorSpec &Other) const { return !(*this == Other); }

  /// Get the number of elements in a tensor with this shape.
  size_t getElementCount() const { return ElementCount; }
  /// Get the size, in bytes, of one element.
  size_t getElementByteSize() const { return ElementSize; }
  /// Get the total size of a memory buffer needed to store the whole tensor.
  size_t getTotalTensorBufferSize() const { return ElementCount * ElementSize; }

  template <typename T> bool isElementType() const {
    return getDataType<T>() == Type;
  }

  TensorSpec(const std::string &NewName, const TensorSpec &Other)
      : TensorSpec(NewName, Other.Port, Other.Type, Other.ElementSize,
                   Other.Shape) {}

  void toJSON(json::OStream &OS) const;

private:
  TensorSpec(const std::string &Name, int Port, TensorType Type,
             size_t ElementSize, const std::vector<int64_t> &Shape);

  template <typename T> static TensorType getDataType();

  std::string Name;
  int Port = 0;
  TensorType Type = TensorType::Invalid;
  std::vector<int64_t> Shape;
  size_t ElementCount = 0;
  size_t ElementSize = 0;
};

/// For debugging.
std::string tensorValueToString(const char *Buffer, const TensorSpec &Spec);

/// Construct a TensorSpec from a JSON dictionary of the form:
/// { "name": <string>,
///   "port": <int>,
///   "type": <string. Use LLVM's types, e.g. float, double, int64_t>,
///   "shape": <array of ints> }
/// For the "type" field, see the C++ primitive types used in
/// TFUTILS_SUPPORTED_TYPES.
std::optional<TensorSpec> getTensorSpecFromJSON(LLVMContext &Ctx,
                                                const json::Value &Value);

#define TFUTILS_GETDATATYPE_DEF(T, Name)                                       \
  template <> TensorType TensorSpec::getDataType<T>();
SUPPORTED_TENSOR_TYPES(TFUTILS_GETDATATYPE_DEF)

#undef TFUTILS_GETDATATYPE_DEF
} // namespace llvm

#endif // LLVM_ANALYSIS_TENSORSPEC_H
PKiwFZg.q�mmAnalysis/GuardUtils.hnu�[���//===-- GuardUtils.h - Utils for work with guards ---------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// Utils that are used to perform analyzes related to guards and their
// conditions.
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_GUARDUTILS_H
#define LLVM_ANALYSIS_GUARDUTILS_H

namespace llvm {

class BasicBlock;
class Use;
class User;
class Value;

/// Returns true iff \p U has semantics of a guard expressed in a form of call
/// of llvm.experimental.guard intrinsic.
bool isGuard(const User *U);

/// Returns true iff \p U is a widenable branch (that is, parseWidenableBranch
/// returns true).
bool isWidenableBranch(const User *U);

/// Returns true iff \p U has semantics of a guard expressed in a form of a
/// widenable conditional branch to deopt block.
bool isGuardAsWidenableBranch(const User *U);

/// If U is widenable branch looking like:
///   %cond = ...
///   %wc = call i1 @llvm.experimental.widenable.condition()
///   %branch_cond = and i1 %cond, %wc
///   br i1 %branch_cond, label %if_true_bb, label %if_false_bb ; <--- U
/// The function returns true, and the values %cond and %wc and blocks
/// %if_true_bb, if_false_bb are returned in
/// the parameters (Condition, WidenableCondition, IfTrueBB and IfFalseFF)
/// respectively. If \p U does not match this pattern, return false.
bool parseWidenableBranch(const User *U, Value *&Condition,
                          Value *&WidenableCondition, BasicBlock *&IfTrueBB,
                          BasicBlock *&IfFalseBB);

/// Analgous to the above, but return the Uses so that that they can be
/// modified. Unlike previous version, Condition is optional and may be null.
bool parseWidenableBranch(User *U, Use *&Cond, Use *&WC, BasicBlock *&IfTrueBB,
                          BasicBlock *&IfFalseBB);
  
} // llvm

#endif // LLVM_ANALYSIS_GUARDUTILS_H
PKiwFZj��2�2Analysis/MemoryLocation.hnu�[���//===- MemoryLocation.h - Memory location descriptions ----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// This file provides utility analysis objects describing memory locations.
/// These are used both by the Alias Analysis infrastructure and more
/// specialized memory analysis layers.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_MEMORYLOCATION_H
#define LLVM_ANALYSIS_MEMORYLOCATION_H

#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/IR/Metadata.h"
#include "llvm/Support/TypeSize.h"

#include <optional>

namespace llvm {

class CallBase;
class Instruction;
class LoadInst;
class StoreInst;
class MemTransferInst;
class MemIntrinsic;
class AtomicCmpXchgInst;
class AtomicMemTransferInst;
class AtomicMemIntrinsic;
class AtomicRMWInst;
class AnyMemTransferInst;
class AnyMemIntrinsic;
class TargetLibraryInfo;
class VAArgInst;
class Value;

// Represents the size of a MemoryLocation. Logically, it's an
// std::optional<uint63_t> that also carries a bit to represent whether the
// integer it contains, N, is 'precise'. Precise, in this context, means that we
// know that the area of storage referenced by the given MemoryLocation must be
// precisely N bytes. An imprecise value is formed as the union of two or more
// precise values, and can conservatively represent all of the values unioned
// into it. Importantly, imprecise values are an *upper-bound* on the size of a
// MemoryLocation.
//
// Concretely, a precise MemoryLocation is (%p, 4) in
// store i32 0, i32* %p
//
// Since we know that %p must be at least 4 bytes large at this point.
// Otherwise, we have UB. An example of an imprecise MemoryLocation is (%p, 4)
// at the memcpy in
//
//   %n = select i1 %foo, i64 1, i64 4
//   call void @llvm.memcpy.p0i8.p0i8.i64(i8* %p, i8* %baz, i64 %n, i32 1,
//                                        i1 false)
//
// ...Since we'll copy *up to* 4 bytes into %p, but we can't guarantee that
// we'll ever actually do so.
//
// If asked to represent a pathologically large value, this will degrade to
// std::nullopt.
class LocationSize {
  enum : uint64_t {
    BeforeOrAfterPointer = ~uint64_t(0),
    AfterPointer = BeforeOrAfterPointer - 1,
    MapEmpty = BeforeOrAfterPointer - 2,
    MapTombstone = BeforeOrAfterPointer - 3,
    ImpreciseBit = uint64_t(1) << 63,

    // The maximum value we can represent without falling back to 'unknown'.
    MaxValue = (MapTombstone - 1) & ~ImpreciseBit,
  };

  uint64_t Value;

  // Hack to support implicit construction. This should disappear when the
  // public LocationSize ctor goes away.
  enum DirectConstruction { Direct };

  constexpr LocationSize(uint64_t Raw, DirectConstruction): Value(Raw) {}

  static_assert(AfterPointer & ImpreciseBit,
                "AfterPointer is imprecise by definition.");
  static_assert(BeforeOrAfterPointer & ImpreciseBit,
                "BeforeOrAfterPointer is imprecise by definition.");

public:
  // FIXME: Migrate all users to construct via either `precise` or `upperBound`,
  // to make it more obvious at the callsite the kind of size that they're
  // providing.
  //
  // Since the overwhelming majority of users of this provide precise values,
  // this assumes the provided value is precise.
  constexpr LocationSize(uint64_t Raw)
      : Value(Raw > MaxValue ? AfterPointer : Raw) {}

  static LocationSize precise(uint64_t Value) { return LocationSize(Value); }
  static LocationSize precise(TypeSize Value) {
    if (Value.isScalable())
      return afterPointer();
    return precise(Value.getFixedValue());
  }

  static LocationSize upperBound(uint64_t Value) {
    // You can't go lower than 0, so give a precise result.
    if (LLVM_UNLIKELY(Value == 0))
      return precise(0);
    if (LLVM_UNLIKELY(Value > MaxValue))
      return afterPointer();
    return LocationSize(Value | ImpreciseBit, Direct);
  }
  static LocationSize upperBound(TypeSize Value) {
    if (Value.isScalable())
      return afterPointer();
    return upperBound(Value.getFixedValue());
  }

  /// Any location after the base pointer (but still within the underlying
  /// object).
  constexpr static LocationSize afterPointer() {
    return LocationSize(AfterPointer, Direct);
  }

  /// Any location before or after the base pointer (but still within the
  /// underlying object).
  constexpr static LocationSize beforeOrAfterPointer() {
    return LocationSize(BeforeOrAfterPointer, Direct);
  }

  // Sentinel values, generally used for maps.
  constexpr static LocationSize mapTombstone() {
    return LocationSize(MapTombstone, Direct);
  }
  constexpr static LocationSize mapEmpty() {
    return LocationSize(MapEmpty, Direct);
  }

  // Returns a LocationSize that can correctly represent either `*this` or
  // `Other`.
  LocationSize unionWith(LocationSize Other) const {
    if (Other == *this)
      return *this;

    if (Value == BeforeOrAfterPointer || Other.Value == BeforeOrAfterPointer)
      return beforeOrAfterPointer();
    if (Value == AfterPointer || Other.Value == AfterPointer)
      return afterPointer();

    return upperBound(std::max(getValue(), Other.getValue()));
  }

  bool hasValue() const {
    return Value != AfterPointer && Value != BeforeOrAfterPointer;
  }
  uint64_t getValue() const {
    assert(hasValue() && "Getting value from an unknown LocationSize!");
    return Value & ~ImpreciseBit;
  }

  // Returns whether or not this value is precise. Note that if a value is
  // precise, it's guaranteed to not be unknown.
  bool isPrecise() const {
    return (Value & ImpreciseBit) == 0;
  }

  // Convenience method to check if this LocationSize's value is 0.
  bool isZero() const { return hasValue() && getValue() == 0; }

  /// Whether accesses before the base pointer are possible.
  bool mayBeBeforePointer() const { return Value == BeforeOrAfterPointer; }

  bool operator==(const LocationSize &Other) const {
    return Value == Other.Value;
  }

  bool operator!=(const LocationSize &Other) const {
    return !(*this == Other);
  }

  // Ordering operators are not provided, since it's unclear if there's only one
  // reasonable way to compare:
  // - values that don't exist against values that do, and
  // - precise values to imprecise values

  void print(raw_ostream &OS) const;

  // Returns an opaque value that represents this LocationSize. Cannot be
  // reliably converted back into a LocationSize.
  uint64_t toRaw() const { return Value; }
};

inline raw_ostream &operator<<(raw_ostream &OS, LocationSize Size) {
  Size.print(OS);
  return OS;
}

/// Representation for a specific memory location.
///
/// This abstraction can be used to represent a specific location in memory.
/// The goal of the location is to represent enough information to describe
/// abstract aliasing, modification, and reference behaviors of whatever
/// value(s) are stored in memory at the particular location.
///
/// The primary user of this interface is LLVM's Alias Analysis, but other
/// memory analyses such as MemoryDependence can use it as well.
class MemoryLocation {
public:
  /// UnknownSize - This is a special value which can be used with the
  /// size arguments in alias queries to indicate that the caller does not
  /// know the sizes of the potential memory references.
  enum : uint64_t { UnknownSize = ~UINT64_C(0) };

  /// The address of the start of the location.
  const Value *Ptr;

  /// The maximum size of the location, in address-units, or
  /// UnknownSize if the size is not known.
  ///
  /// Note that an unknown size does not mean the pointer aliases the entire
  /// virtual address space, because there are restrictions on stepping out of
  /// one object and into another. See
  /// http://llvm.org/docs/LangRef.html#pointeraliasing
  LocationSize Size;

  /// The metadata nodes which describes the aliasing of the location (each
  /// member is null if that kind of information is unavailable).
  AAMDNodes AATags;

  void print(raw_ostream &OS) const { OS << *Ptr << " " << Size << "\n"; }

  /// Return a location with information about the memory reference by the given
  /// instruction.
  static MemoryLocation get(const LoadInst *LI);
  static MemoryLocation get(const StoreInst *SI);
  static MemoryLocation get(const VAArgInst *VI);
  static MemoryLocation get(const AtomicCmpXchgInst *CXI);
  static MemoryLocation get(const AtomicRMWInst *RMWI);
  static MemoryLocation get(const Instruction *Inst) {
    return *MemoryLocation::getOrNone(Inst);
  }
  static std::optional<MemoryLocation> getOrNone(const Instruction *Inst);

  /// Return a location representing the source of a memory transfer.
  static MemoryLocation getForSource(const MemTransferInst *MTI);
  static MemoryLocation getForSource(const AtomicMemTransferInst *MTI);
  static MemoryLocation getForSource(const AnyMemTransferInst *MTI);

  /// Return a location representing the destination of a memory set or
  /// transfer.
  static MemoryLocation getForDest(const MemIntrinsic *MI);
  static MemoryLocation getForDest(const AtomicMemIntrinsic *MI);
  static MemoryLocation getForDest(const AnyMemIntrinsic *MI);
  static std::optional<MemoryLocation> getForDest(const CallBase *CI,
                                                  const TargetLibraryInfo &TLI);

  /// Return a location representing a particular argument of a call.
  static MemoryLocation getForArgument(const CallBase *Call, unsigned ArgIdx,
                                       const TargetLibraryInfo *TLI);
  static MemoryLocation getForArgument(const CallBase *Call, unsigned ArgIdx,
                                       const TargetLibraryInfo &TLI) {
    return getForArgument(Call, ArgIdx, &TLI);
  }

  /// Return a location that may access any location after Ptr, while remaining
  /// within the underlying object.
  static MemoryLocation getAfter(const Value *Ptr,
                                 const AAMDNodes &AATags = AAMDNodes()) {
    return MemoryLocation(Ptr, LocationSize::afterPointer(), AATags);
  }

  /// Return a location that may access any location before or after Ptr, while
  /// remaining within the underlying object.
  static MemoryLocation
  getBeforeOrAfter(const Value *Ptr, const AAMDNodes &AATags = AAMDNodes()) {
    return MemoryLocation(Ptr, LocationSize::beforeOrAfterPointer(), AATags);
  }

  // Return the exact size if the exact size is known at compiletime,
  // otherwise return MemoryLocation::UnknownSize.
  static uint64_t getSizeOrUnknown(const TypeSize &T) {
    return T.isScalable() ? UnknownSize : T.getFixedValue();
  }

  MemoryLocation() : Ptr(nullptr), Size(LocationSize::beforeOrAfterPointer()) {}

  explicit MemoryLocation(const Value *Ptr, LocationSize Size,
                          const AAMDNodes &AATags = AAMDNodes())
      : Ptr(Ptr), Size(Size), AATags(AATags) {}

  MemoryLocation getWithNewPtr(const Value *NewPtr) const {
    MemoryLocation Copy(*this);
    Copy.Ptr = NewPtr;
    return Copy;
  }

  MemoryLocation getWithNewSize(LocationSize NewSize) const {
    MemoryLocation Copy(*this);
    Copy.Size = NewSize;
    return Copy;
  }

  MemoryLocation getWithoutAATags() const {
    MemoryLocation Copy(*this);
    Copy.AATags = AAMDNodes();
    return Copy;
  }

  bool operator==(const MemoryLocation &Other) const {
    return Ptr == Other.Ptr && Size == Other.Size && AATags == Other.AATags;
  }
};

// Specialize DenseMapInfo.
template <> struct DenseMapInfo<LocationSize> {
  static inline LocationSize getEmptyKey() {
    return LocationSize::mapEmpty();
  }
  static inline LocationSize getTombstoneKey() {
    return LocationSize::mapTombstone();
  }
  static unsigned getHashValue(const LocationSize &Val) {
    return DenseMapInfo<uint64_t>::getHashValue(Val.toRaw());
  }
  static bool isEqual(const LocationSize &LHS, const LocationSize &RHS) {
    return LHS == RHS;
  }
};

template <> struct DenseMapInfo<MemoryLocation> {
  static inline MemoryLocation getEmptyKey() {
    return MemoryLocation(DenseMapInfo<const Value *>::getEmptyKey(),
                          DenseMapInfo<LocationSize>::getEmptyKey());
  }
  static inline MemoryLocation getTombstoneKey() {
    return MemoryLocation(DenseMapInfo<const Value *>::getTombstoneKey(),
                          DenseMapInfo<LocationSize>::getTombstoneKey());
  }
  static unsigned getHashValue(const MemoryLocation &Val) {
    return DenseMapInfo<const Value *>::getHashValue(Val.Ptr) ^
           DenseMapInfo<LocationSize>::getHashValue(Val.Size) ^
           DenseMapInfo<AAMDNodes>::getHashValue(Val.AATags);
  }
  static bool isEqual(const MemoryLocation &LHS, const MemoryLocation &RHS) {
    return LHS == RHS;
  }
};
}

#endif
PKiwFZ,����Analysis/CaptureTracking.hnu�[���//===----- llvm/Analysis/CaptureTracking.h - Pointer capture ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains routines that help determine which pointers are captured.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_CAPTURETRACKING_H
#define LLVM_ANALYSIS_CAPTURETRACKING_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLFunctionalExtras.h"

namespace llvm {

  class Value;
  class Use;
  class DataLayout;
  class Instruction;
  class DominatorTree;
  class LoopInfo;
  class Function;
  template <typename T> class SmallPtrSetImpl;

  /// getDefaultMaxUsesToExploreForCaptureTracking - Return default value of
  /// the maximal number of uses to explore before giving up. It is used by
  /// PointerMayBeCaptured family analysis.
  unsigned getDefaultMaxUsesToExploreForCaptureTracking();

  /// PointerMayBeCaptured - Return true if this pointer value may be captured
  /// by the enclosing function (which is required to exist).  This routine can
  /// be expensive, so consider caching the results.  The boolean ReturnCaptures
  /// specifies whether returning the value (or part of it) from the function
  /// counts as capturing it or not.  The boolean StoreCaptures specified
  /// whether storing the value (or part of it) into memory anywhere
  /// automatically counts as capturing it or not.
  /// MaxUsesToExplore specifies how many uses the analysis should explore for
  /// one value before giving up due too "too many uses". If MaxUsesToExplore
  /// is zero, a default value is assumed.
  bool PointerMayBeCaptured(const Value *V, bool ReturnCaptures,
                            bool StoreCaptures, unsigned MaxUsesToExplore = 0);

  /// Variant of the above function which accepts a set of Values that are
  /// ephemeral and cannot cause pointers to escape.
  bool PointerMayBeCaptured(const Value *V, bool ReturnCaptures,
                            bool StoreCaptures,
                            const SmallPtrSetImpl<const Value *> &EphValues,
                            unsigned MaxUsesToExplore = 0);

  /// PointerMayBeCapturedBefore - Return true if this pointer value may be
  /// captured by the enclosing function (which is required to exist). If a
  /// DominatorTree is provided, only captures which happen before the given
  /// instruction are considered. This routine can be expensive, so consider
  /// caching the results.  The boolean ReturnCaptures specifies whether
  /// returning the value (or part of it) from the function counts as capturing
  /// it or not.  The boolean StoreCaptures specified whether storing the value
  /// (or part of it) into memory anywhere automatically counts as capturing it
  /// or not. Captures by the provided instruction are considered if the
  /// final parameter is true.
  /// MaxUsesToExplore specifies how many uses the analysis should explore for
  /// one value before giving up due too "too many uses". If MaxUsesToExplore
  /// is zero, a default value is assumed.
  bool PointerMayBeCapturedBefore(const Value *V, bool ReturnCaptures,
                                  bool StoreCaptures, const Instruction *I,
                                  const DominatorTree *DT,
                                  bool IncludeI = false,
                                  unsigned MaxUsesToExplore = 0,
                                  const LoopInfo *LI = nullptr);

  // Returns the 'earliest' instruction that captures \p V in \F. An instruction
  // A is considered earlier than instruction B, if A dominates B. If 2 escapes
  // do not dominate each other, the terminator of the common dominator is
  // chosen. If not all uses can be analyzed, the earliest escape is set to
  // the first instruction in the function entry block. If \p V does not escape,
  // nullptr is returned. Note that the caller of the function has to ensure
  // that the instruction the result value is compared against is not in a
  // cycle.
  Instruction *
  FindEarliestCapture(const Value *V, Function &F, bool ReturnCaptures,
                      bool StoreCaptures, const DominatorTree &DT,
                      const SmallPtrSetImpl<const Value *> &EphValues,
                      unsigned MaxUsesToExplore = 0);

  /// This callback is used in conjunction with PointerMayBeCaptured. In
  /// addition to the interface here, you'll need to provide your own getters
  /// to see whether anything was captured.
  struct CaptureTracker {
    virtual ~CaptureTracker();

    /// tooManyUses - The depth of traversal has breached a limit. There may be
    /// capturing instructions that will not be passed into captured().
    virtual void tooManyUses() = 0;

    /// shouldExplore - This is the use of a value derived from the pointer.
    /// To prune the search (ie., assume that none of its users could possibly
    /// capture) return false. To search it, return true.
    ///
    /// U->getUser() is always an Instruction.
    virtual bool shouldExplore(const Use *U);

    /// captured - Information about the pointer was captured by the user of
    /// use U. Return true to stop the traversal or false to continue looking
    /// for more capturing instructions.
    virtual bool captured(const Use *U) = 0;

    /// isDereferenceableOrNull - Overload to allow clients with additional
    /// knowledge about pointer dereferenceability to provide it and thereby
    /// avoid conservative responses when a pointer is compared to null.
    virtual bool isDereferenceableOrNull(Value *O, const DataLayout &DL);
  };

  /// Types of use capture kinds, see \p DetermineUseCaptureKind.
  enum class UseCaptureKind {
    NO_CAPTURE,
    MAY_CAPTURE,
    PASSTHROUGH,
  };

  /// Determine what kind of capture behaviour \p U may exhibit.
  ///
  /// A use can be no-capture, a use can potentially capture, or a use can be
  /// passthrough such that the uses of the user or \p U should be inspected.
  /// The \p IsDereferenceableOrNull callback is used to rule out capturing for
  /// certain comparisons.
  UseCaptureKind
  DetermineUseCaptureKind(const Use &U,
                          llvm::function_ref<bool(Value *, const DataLayout &)>
                              IsDereferenceableOrNull);

  /// PointerMayBeCaptured - Visit the value and the values derived from it and
  /// find values which appear to be capturing the pointer value. This feeds
  /// results into and is controlled by the CaptureTracker object.
  /// MaxUsesToExplore specifies how many uses the analysis should explore for
  /// one value before giving up due too "too many uses". If MaxUsesToExplore
  /// is zero, a default value is assumed.
  void PointerMayBeCaptured(const Value *V, CaptureTracker *Tracker,
                            unsigned MaxUsesToExplore = 0);

  /// Returns true if the pointer is to a function-local object that never
  /// escapes from the function.
  bool isNonEscapingLocalObject(
      const Value *V,
      SmallDenseMap<const Value *, bool, 8> *IsCapturedCache = nullptr);
} // end namespace llvm

#endif
PKiwFZD�		Analysis/UniformityAnalysis.hnu�[���//===- UniformityAnalysis.h ---------------------*- C++ -*-----------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file
/// \brief LLVM IR instance of the generic uniformity analysis
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_UNIFORMITYANALYSIS_H
#define LLVM_ANALYSIS_UNIFORMITYANALYSIS_H

#include "llvm/ADT/GenericUniformityInfo.h"
#include "llvm/Analysis/CycleAnalysis.h"

namespace llvm {

extern template class GenericUniformityInfo<SSAContext>;
using UniformityInfo = GenericUniformityInfo<SSAContext>;

/// Analysis pass which computes \ref UniformityInfo.
class UniformityInfoAnalysis
    : public AnalysisInfoMixin<UniformityInfoAnalysis> {
  friend AnalysisInfoMixin<UniformityInfoAnalysis>;
  static AnalysisKey Key;

public:
  /// Provide the result typedef for this analysis pass.
  using Result = UniformityInfo;

  /// Run the analysis pass over a function and produce a dominator tree.
  UniformityInfo run(Function &F, FunctionAnalysisManager &);

  // TODO: verify analysis
};

/// Printer pass for the \c UniformityInfo.
class UniformityInfoPrinterPass
    : public PassInfoMixin<UniformityInfoPrinterPass> {
  raw_ostream &OS;

public:
  explicit UniformityInfoPrinterPass(raw_ostream &OS);

  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

/// Legacy analysis pass which computes a \ref CycleInfo.
class UniformityInfoWrapperPass : public FunctionPass {
  Function *m_function = nullptr;
  UniformityInfo m_uniformityInfo;

public:
  static char ID;

  UniformityInfoWrapperPass();

  UniformityInfo &getUniformityInfo() { return m_uniformityInfo; }
  const UniformityInfo &getUniformityInfo() const { return m_uniformityInfo; }

  bool runOnFunction(Function &F) override;
  void getAnalysisUsage(AnalysisUsage &AU) const override;
  void releaseMemory() override;
  void print(raw_ostream &OS, const Module *M = nullptr) const override;

  // TODO: verify analysis
};

} // namespace llvm

#endif // LLVM_ANALYSIS_UNIFORMITYANALYSIS_H
PKiwFZ���}
}
Analysis/InlineOrder.hnu�[���//===- InlineOrder.h - Inlining order abstraction -*- C++ ---*-------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
#ifndef LLVM_ANALYSIS_INLINEORDER_H
#define LLVM_ANALYSIS_INLINEORDER_H

#include "llvm/ADT/STLFunctionalExtras.h"
#include "llvm/Analysis/InlineCost.h"
#include <utility>

namespace llvm {
class CallBase;

template <typename T> class InlineOrder {
public:
  virtual ~InlineOrder() = default;

  virtual size_t size() = 0;

  virtual void push(const T &Elt) = 0;

  virtual T pop() = 0;

  virtual void erase_if(function_ref<bool(T)> Pred) = 0;

  bool empty() { return !size(); }
};

std::unique_ptr<InlineOrder<std::pair<CallBase *, int>>>
getDefaultInlineOrder(FunctionAnalysisManager &FAM, const InlineParams &Params,
                      ModuleAnalysisManager &MAM, Module &M);

std::unique_ptr<InlineOrder<std::pair<CallBase *, int>>>
getInlineOrder(FunctionAnalysisManager &FAM, const InlineParams &Params,
               ModuleAnalysisManager &MAM, Module &M);

/// Used for dynamically loading instances of InlineOrder as plugins
///
/// Plugins must implement an InlineOrderFactory, for an example refer to:
/// llvm/unittests/Analysis/InlineOrderPlugin/InlineOrderPlugin.cpp
///
/// If a PluginInlineOrderAnalysis has been registered with the
/// current ModuleAnalysisManager, llvm::getInlineOrder returns an
/// InlineOrder created by the PluginInlineOrderAnalysis' Factory.
///
class PluginInlineOrderAnalysis
    : public AnalysisInfoMixin<PluginInlineOrderAnalysis> {
public:
  static AnalysisKey Key;

  typedef std::unique_ptr<InlineOrder<std::pair<CallBase *, int>>> (
      *InlineOrderFactory)(FunctionAnalysisManager &FAM,
                           const InlineParams &Params,
                           ModuleAnalysisManager &MAM, Module &M);

  PluginInlineOrderAnalysis(InlineOrderFactory Factory) : Factory(Factory) {
    HasBeenRegistered = true;
    assert(Factory != nullptr &&
           "The plugin inline order factory should not be a null pointer.");
  }

  struct Result {
    InlineOrderFactory Factory;
  };

  Result run(Module &, ModuleAnalysisManager &) { return {Factory}; }
  Result getResult() { return {Factory}; }

  static bool isRegistered() { return HasBeenRegistered; }
  static void unregister() { HasBeenRegistered = false; }

private:
  static bool HasBeenRegistered;
  InlineOrderFactory Factory;
};

} // namespace llvm
#endif // LLVM_ANALYSIS_INLINEORDER_H
PKiwFZ��@5;;Analysis/ScalarFuncs.defnu�[���//===-- ScalarFuncs.def - Library information ----------*- C++ -*----------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

// This .def file creates mapping from standard IEEE math functions
// their corresponding entries in the IBM MASS (scalar) library.
// LLVM intrinsic math functions will be handled in PPCISelLowing to
// allow existing optimizations like pow(x,0.5) --> sqrt(x).

#if defined(TLI_DEFINE_SCALAR_MASS_FUNCS)
#define TLI_DEFINE_SCALAR_MASS_FUNC(SCAL, MASSENTRY) {SCAL, MASSENTRY},
#endif

TLI_DEFINE_SCALAR_MASS_FUNC("acosf", "__xl_acosf")
TLI_DEFINE_SCALAR_MASS_FUNC("__acosf_finite", "__xl_acosf")
TLI_DEFINE_SCALAR_MASS_FUNC("acos", "__xl_acos")
TLI_DEFINE_SCALAR_MASS_FUNC("__acos_finite", "__xl_acos")

TLI_DEFINE_SCALAR_MASS_FUNC("acoshf", "__xl_acoshf")
TLI_DEFINE_SCALAR_MASS_FUNC("__acoshf_finite", "__xl_acoshf")
TLI_DEFINE_SCALAR_MASS_FUNC("acosh", "__xl_acosh")
TLI_DEFINE_SCALAR_MASS_FUNC("__acosh_finite", "__xl_acosh")

TLI_DEFINE_SCALAR_MASS_FUNC("asinf", "__xl_asinf")
TLI_DEFINE_SCALAR_MASS_FUNC("__asinf_finite", "__xl_asinf")
TLI_DEFINE_SCALAR_MASS_FUNC("asin", "__xl_asin")
TLI_DEFINE_SCALAR_MASS_FUNC("__asin_finite", "__xl_asin")

TLI_DEFINE_SCALAR_MASS_FUNC("asinhf", "__xl_asinhf")
TLI_DEFINE_SCALAR_MASS_FUNC("asinh", "__xl_asinh")

TLI_DEFINE_SCALAR_MASS_FUNC("atanf", "__xl_atanf")
TLI_DEFINE_SCALAR_MASS_FUNC("atan", "__xl_atan")

TLI_DEFINE_SCALAR_MASS_FUNC("atan2f", "__xl_atan2f")
TLI_DEFINE_SCALAR_MASS_FUNC("__atan2f_finite", "__xl_atan2f")
TLI_DEFINE_SCALAR_MASS_FUNC("atan2", "__xl_atan2")
TLI_DEFINE_SCALAR_MASS_FUNC("__atan2_finite", "__xl_atan2")

TLI_DEFINE_SCALAR_MASS_FUNC("atanhf", "__xl_atanhf")
TLI_DEFINE_SCALAR_MASS_FUNC("__atanhf_finite", "__xl_atanhf")
TLI_DEFINE_SCALAR_MASS_FUNC("atanh", "__xl_atanh")
TLI_DEFINE_SCALAR_MASS_FUNC("__atanh_finite", "__xl_atanh")

TLI_DEFINE_SCALAR_MASS_FUNC("cbrtf", "__xl_cbrtf")
TLI_DEFINE_SCALAR_MASS_FUNC("cbrt", "__xl_cbrt")

TLI_DEFINE_SCALAR_MASS_FUNC("cosf", "__xl_cosf")
TLI_DEFINE_SCALAR_MASS_FUNC("cos", "__xl_cos")

TLI_DEFINE_SCALAR_MASS_FUNC("coshf", "__xl_coshf")
TLI_DEFINE_SCALAR_MASS_FUNC("__coshf_finite", "__xl_coshf")
TLI_DEFINE_SCALAR_MASS_FUNC("cosh", "__xl_cosh")
TLI_DEFINE_SCALAR_MASS_FUNC("__cosh_finite", "__xl_cosh")

TLI_DEFINE_SCALAR_MASS_FUNC("erff", "__xl_erff")
TLI_DEFINE_SCALAR_MASS_FUNC("erf", "__xl_erf")

TLI_DEFINE_SCALAR_MASS_FUNC("erfcf", "__xl_erfcf")
TLI_DEFINE_SCALAR_MASS_FUNC("erfc", "__xl_erfc")

TLI_DEFINE_SCALAR_MASS_FUNC("expf", "__xl_expf")
TLI_DEFINE_SCALAR_MASS_FUNC("__expf_finite", "__xl_expf")
TLI_DEFINE_SCALAR_MASS_FUNC("exp", "__xl_exp")
TLI_DEFINE_SCALAR_MASS_FUNC("__exp_finite", "__xl_exp")

TLI_DEFINE_SCALAR_MASS_FUNC("expm1f", "__xl_expm1f")
TLI_DEFINE_SCALAR_MASS_FUNC("expm1", "__xl_expm1")

TLI_DEFINE_SCALAR_MASS_FUNC("hypotf", "__xl_hypotf")
TLI_DEFINE_SCALAR_MASS_FUNC("hypot", "__xl_hypot")

TLI_DEFINE_SCALAR_MASS_FUNC("lgammaf", "__xl_lgammaf")
TLI_DEFINE_SCALAR_MASS_FUNC("lgamma", "__xl_lgamma")

TLI_DEFINE_SCALAR_MASS_FUNC("logf", "__xl_logf")
TLI_DEFINE_SCALAR_MASS_FUNC("__logf_finite", "__xl_logf")
TLI_DEFINE_SCALAR_MASS_FUNC("log", "__xl_log")
TLI_DEFINE_SCALAR_MASS_FUNC("__log_finite", "__xl_log")

TLI_DEFINE_SCALAR_MASS_FUNC("log10f", "__xl_log10f")
TLI_DEFINE_SCALAR_MASS_FUNC("__log10f_finite", "__xl_log10f")
TLI_DEFINE_SCALAR_MASS_FUNC("log10", "__xl_log10")
TLI_DEFINE_SCALAR_MASS_FUNC("__log10_finite", "__xl_log10")

TLI_DEFINE_SCALAR_MASS_FUNC("log1pf", "__xl_log1pf")
TLI_DEFINE_SCALAR_MASS_FUNC("log1p", "__xl_log1p")

TLI_DEFINE_SCALAR_MASS_FUNC("powf", "__xl_powf")
TLI_DEFINE_SCALAR_MASS_FUNC("__powf_finite", "__xl_powf")
TLI_DEFINE_SCALAR_MASS_FUNC("pow", "__xl_pow")
TLI_DEFINE_SCALAR_MASS_FUNC("__pow_finite", "__xl_pow")

TLI_DEFINE_SCALAR_MASS_FUNC("rsqrt", "__xl_rsqrt")

TLI_DEFINE_SCALAR_MASS_FUNC("sinf", "__xl_sinf")
TLI_DEFINE_SCALAR_MASS_FUNC("sin", "__xl_sin")

TLI_DEFINE_SCALAR_MASS_FUNC("sinhf", "__xl_sinhf")
TLI_DEFINE_SCALAR_MASS_FUNC("__sinhf_finite", "__xl_sinhf")
TLI_DEFINE_SCALAR_MASS_FUNC("sinh", "__xl_sinh")
TLI_DEFINE_SCALAR_MASS_FUNC("__sinh_finite", "__xl_sinh")

TLI_DEFINE_SCALAR_MASS_FUNC("sqrt", "__xl_sqrt")

TLI_DEFINE_SCALAR_MASS_FUNC("tanf", "__xl_tanf")
TLI_DEFINE_SCALAR_MASS_FUNC("tan", "__xl_tan")

TLI_DEFINE_SCALAR_MASS_FUNC("tanhf", "__xl_tanhf")
TLI_DEFINE_SCALAR_MASS_FUNC("tanh", "__xl_tanh")

#undef TLI_DEFINE_SCALAR_MASS_FUNCS
#undef TLI_DEFINE_SCALAR_MASS_FUNC
PKiwFZ���f��Analysis/LoopNestAnalysis.hnu�[���//===- llvm/Analysis/LoopNestAnalysis.h -------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file defines the interface for the loop nest analysis.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_LOOPNESTANALYSIS_H
#define LLVM_ANALYSIS_LOOPNESTANALYSIS_H

#include "llvm/ADT/STLExtras.h"
#include "llvm/Analysis/LoopAnalysisManager.h"
#include "llvm/Analysis/LoopInfo.h"

namespace llvm {

using LoopVectorTy = SmallVector<Loop *, 8>;

class LPMUpdater;

/// This class represents a loop nest and can be used to query its properties.
class LLVM_EXTERNAL_VISIBILITY LoopNest {
public:
  using InstrVectorTy = SmallVector<const Instruction *>;

  /// Construct a loop nest rooted by loop \p Root.
  LoopNest(Loop &Root, ScalarEvolution &SE);

  LoopNest() = delete;

  /// Construct a LoopNest object.
  static std::unique_ptr<LoopNest> getLoopNest(Loop &Root, ScalarEvolution &SE);

  /// Return true if the given loops \p OuterLoop and \p InnerLoop are
  /// perfectly nested with respect to each other, and false otherwise.
  /// Example:
  /// \code
  ///   for(i)
  ///     for(j)
  ///       for(k)
  /// \endcode
  /// arePerfectlyNested(loop_i, loop_j, SE) would return true.
  /// arePerfectlyNested(loop_j, loop_k, SE) would return true.
  /// arePerfectlyNested(loop_i, loop_k, SE) would return false.
  static bool arePerfectlyNested(const Loop &OuterLoop, const Loop &InnerLoop,
                                 ScalarEvolution &SE);

  /// Return a vector of instructions that prevent the LoopNest given
  /// by loops \p OuterLoop and \p InnerLoop from being perfect.
  static InstrVectorTy getInterveningInstructions(const Loop &OuterLoop,
                                                  const Loop &InnerLoop,
                                                  ScalarEvolution &SE);

  /// Return the maximum nesting depth of the loop nest rooted by loop \p Root.
  /// For example given the loop nest:
  /// \code
  ///   for(i)     // loop at level 1 and Root of the nest
  ///     for(j)   // loop at level 2
  ///       <code>
  ///       for(k) // loop at level 3
  /// \endcode
  /// getMaxPerfectDepth(Loop_i) would return 2.
  static unsigned getMaxPerfectDepth(const Loop &Root, ScalarEvolution &SE);

  /// Recursivelly traverse all empty 'single successor' basic blocks of \p From
  /// (if there are any). When \p CheckUniquePred is set to true, check if
  /// each of the empty single successors has a unique predecessor. Return
  /// the last basic block found or \p End if it was reached during the search.
  static const BasicBlock &skipEmptyBlockUntil(const BasicBlock *From,
                                               const BasicBlock *End,
                                               bool CheckUniquePred = false);

  /// Return the outermost loop in the loop nest.
  Loop &getOutermostLoop() const { return *Loops.front(); }

  /// Return the innermost loop in the loop nest if the nest has only one
  /// innermost loop, and a nullptr otherwise.
  /// Note: the innermost loop returned is not necessarily perfectly nested.
  Loop *getInnermostLoop() const {
    if (Loops.size() == 1)
      return Loops.back();

    // The loops in the 'Loops' vector have been collected in breadth first
    // order, therefore if the last 2 loops in it have the same nesting depth
    // there isn't a unique innermost loop in the nest.
    Loop *LastLoop = Loops.back();
    auto SecondLastLoopIter = ++Loops.rbegin();
    return (LastLoop->getLoopDepth() == (*SecondLastLoopIter)->getLoopDepth())
               ? nullptr
               : LastLoop;
  }

  /// Return the loop at the given \p Index.
  Loop *getLoop(unsigned Index) const {
    assert(Index < Loops.size() && "Index is out of bounds");
    return Loops[Index];
  }

  /// Get the loop index of the given loop \p L.
  unsigned getLoopIndex(const Loop &L) const {
    for (unsigned I = 0; I < getNumLoops(); ++I)
      if (getLoop(I) == &L)
        return I;
    llvm_unreachable("Loop not in the loop nest");
  }

  /// Return the number of loops in the nest.
  size_t getNumLoops() const { return Loops.size(); }

  /// Get the loops in the nest.
  ArrayRef<Loop *> getLoops() const { return Loops; }

  /// Get the loops in the nest at the given \p Depth.
  LoopVectorTy getLoopsAtDepth(unsigned Depth) const {
    assert(Depth >= Loops.front()->getLoopDepth() &&
           Depth <= Loops.back()->getLoopDepth() && "Invalid depth");
    LoopVectorTy Result;
    for (unsigned I = 0; I < getNumLoops(); ++I) {
      Loop *L = getLoop(I);
      if (L->getLoopDepth() == Depth)
        Result.push_back(L);
      else if (L->getLoopDepth() > Depth)
        break;
    }
    return Result;
  }

  /// Retrieve a vector of perfect loop nests contained in the current loop
  /// nest. For example, given the following  nest containing 4 loops, this
  /// member function would return {{L1,L2},{L3,L4}}.
  /// \code
  ///   for(i) // L1
  ///     for(j) // L2
  ///       <code>
  ///       for(k) // L3
  ///         for(l) // L4
  /// \endcode
  SmallVector<LoopVectorTy, 4> getPerfectLoops(ScalarEvolution &SE) const;

  /// Return the loop nest depth (i.e. the loop depth of the 'deepest' loop)
  /// For example given the loop nest:
  /// \code
  ///   for(i)      // loop at level 1 and Root of the nest
  ///     for(j1)   // loop at level 2
  ///       for(k)  // loop at level 3
  ///     for(j2)   // loop at level 2
  /// \endcode
  /// getNestDepth() would return 3.
  unsigned getNestDepth() const {
    int NestDepth =
        Loops.back()->getLoopDepth() - Loops.front()->getLoopDepth() + 1;
    assert(NestDepth > 0 && "Expecting NestDepth to be at least 1");
    return NestDepth;
  }

  /// Return the maximum perfect nesting depth.
  unsigned getMaxPerfectDepth() const { return MaxPerfectDepth; }

  /// Return true if all loops in the loop nest are in simplify form.
  bool areAllLoopsSimplifyForm() const {
    return all_of(Loops, [](const Loop *L) { return L->isLoopSimplifyForm(); });
  }

  /// Return true if all loops in the loop nest are in rotated form.
  bool areAllLoopsRotatedForm() const {
    return all_of(Loops, [](const Loop *L) { return L->isRotatedForm(); });
  }

  /// Return the function to which the loop-nest belongs.
  Function *getParent() const {
    return Loops.front()->getHeader()->getParent();
  }

  StringRef getName() const { return Loops.front()->getName(); }

protected:
  const unsigned MaxPerfectDepth; // maximum perfect nesting depth level.
  LoopVectorTy Loops; // the loops in the nest (in breadth first order).

private:
  enum LoopNestEnum {
    PerfectLoopNest,
    ImperfectLoopNest,
    InvalidLoopStructure,
    OuterLoopLowerBoundUnknown
  };
  static LoopNestEnum analyzeLoopNestForPerfectNest(const Loop &OuterLoop,
                                                    const Loop &InnerLoop,
                                                    ScalarEvolution &SE);
};

raw_ostream &operator<<(raw_ostream &, const LoopNest &);

/// This analysis provides information for a loop nest. The analysis runs on
/// demand and can be initiated via AM.getResult<LoopNestAnalysis>.
class LoopNestAnalysis : public AnalysisInfoMixin<LoopNestAnalysis> {
  friend AnalysisInfoMixin<LoopNestAnalysis>;
  static AnalysisKey Key;

public:
  using Result = LoopNest;
  Result run(Loop &L, LoopAnalysisManager &AM, LoopStandardAnalysisResults &AR);
};

/// Printer pass for the \c LoopNest results.
class LoopNestPrinterPass : public PassInfoMixin<LoopNestPrinterPass> {
  raw_ostream &OS;

public:
  explicit LoopNestPrinterPass(raw_ostream &OS) : OS(OS) {}

  PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
                        LoopStandardAnalysisResults &AR, LPMUpdater &U);
};

} // namespace llvm

#endif // LLVM_ANALYSIS_LOOPNESTANALYSIS_H
PKiwFZ�hhAnalysis/AssumeBundleQueries.hnu�[���//===- AssumeBundleQueries.h - utilis to query assume bundles ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contain tools to query into assume bundles. assume bundles can be
// built using utilities from Transform/Utils/AssumeBundleBuilder.h
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_ASSUMEBUNDLEQUERIES_H
#define LLVM_ANALYSIS_ASSUMEBUNDLEQUERIES_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/IR/IntrinsicInst.h"

namespace llvm {
class AssumptionCache;
class DominatorTree;
class Instruction;
class Value;

/// Index of elements in the operand bundle.
/// If the element exist it is guaranteed to be what is specified in this enum
/// but it may not exist.
enum AssumeBundleArg {
  ABA_WasOn = 0,
  ABA_Argument = 1,
};

/// Query the operand bundle of an llvm.assume to find a single attribute of
/// the specified kind applied on a specified Value.
///
/// This has a non-constant complexity. It should only be used when a single
/// attribute is going to be queried.
///
/// Return true iff the queried attribute was found.
/// If ArgVal is set. the argument will be stored to ArgVal.
bool hasAttributeInAssume(AssumeInst &Assume, Value *IsOn, StringRef AttrName,
                          uint64_t *ArgVal = nullptr);
inline bool hasAttributeInAssume(AssumeInst &Assume, Value *IsOn,
                                 Attribute::AttrKind Kind,
                                 uint64_t *ArgVal = nullptr) {
  return hasAttributeInAssume(Assume, IsOn,
                              Attribute::getNameFromAttrKind(Kind), ArgVal);
}

template<> struct DenseMapInfo<Attribute::AttrKind> {
  static Attribute::AttrKind getEmptyKey() {
    return Attribute::EmptyKey;
  }
  static Attribute::AttrKind getTombstoneKey() {
    return Attribute::TombstoneKey;
  }
  static unsigned getHashValue(Attribute::AttrKind AK) {
    return hash_combine(AK);
  }
  static bool isEqual(Attribute::AttrKind LHS, Attribute::AttrKind RHS) {
    return LHS == RHS;
  }
};

/// The map Key contains the Value on for which the attribute is valid and
/// the Attribute that is valid for that value.
/// If the Attribute is not on any value, the Value is nullptr.
using RetainedKnowledgeKey = std::pair<Value *, Attribute::AttrKind>;

struct MinMax {
  uint64_t Min;
  uint64_t Max;
};

/// A mapping from intrinsics (=`llvm.assume` calls) to a value range
/// (=knowledge) that is encoded in them. How the value range is interpreted
/// depends on the RetainedKnowledgeKey that was used to get this out of the
/// RetainedKnowledgeMap.
using Assume2KnowledgeMap = DenseMap<AssumeInst *, MinMax>;

using RetainedKnowledgeMap =
    DenseMap<RetainedKnowledgeKey, Assume2KnowledgeMap>;

/// Insert into the map all the informations contained in the operand bundles of
/// the llvm.assume. This should be used instead of hasAttributeInAssume when
/// many queries are going to be made on the same llvm.assume.
/// String attributes are not inserted in the map.
/// If the IR changes the map will be outdated.
void fillMapFromAssume(AssumeInst &Assume, RetainedKnowledgeMap &Result);

/// Represent one information held inside an operand bundle of an llvm.assume.
/// AttrKind is the property that holds.
/// WasOn if not null is that Value for which AttrKind holds.
/// ArgValue is optionally an argument of the attribute.
/// For example if we know that %P has an alignment of at least four:
///  - AttrKind will be Attribute::Alignment.
///  - WasOn will be %P.
///  - ArgValue will be 4.
struct RetainedKnowledge {
  Attribute::AttrKind AttrKind = Attribute::None;
  uint64_t ArgValue = 0;
  Value *WasOn = nullptr;
  bool operator==(RetainedKnowledge Other) const {
    return AttrKind == Other.AttrKind && WasOn == Other.WasOn &&
           ArgValue == Other.ArgValue;
  }
  bool operator!=(RetainedKnowledge Other) const { return !(*this == Other); }
  /// This is only intended for use in std::min/std::max between attribute that
  /// only differ in ArgValue.
  bool operator<(RetainedKnowledge Other) const {
    assert(((AttrKind == Other.AttrKind && WasOn == Other.WasOn) ||
            AttrKind == Attribute::None || Other.AttrKind == Attribute::None) &&
           "This is only intend for use in min/max to select the best for "
           "RetainedKnowledge that is otherwise equal");
    return ArgValue < Other.ArgValue;
  }
  operator bool() const { return AttrKind != Attribute::None; }
  static RetainedKnowledge none() { return RetainedKnowledge{}; }
};

/// Retreive the information help by Assume on the operand at index Idx.
/// Assume should be an llvm.assume and Idx should be in the operand bundle.
RetainedKnowledge getKnowledgeFromOperandInAssume(AssumeInst &Assume,
                                                  unsigned Idx);

/// Retreive the information help by the Use U of an llvm.assume. the use should
/// be in the operand bundle.
inline RetainedKnowledge getKnowledgeFromUseInAssume(const Use *U) {
  return getKnowledgeFromOperandInAssume(*cast<AssumeInst>(U->getUser()),
                                         U->getOperandNo());
}

/// Tag in operand bundle indicating that this bundle should be ignored.
constexpr StringRef IgnoreBundleTag = "ignore";

/// Return true iff the operand bundles of the provided llvm.assume doesn't
/// contain any valuable information. This is true when:
///  - The operand bundle is empty
///  - The operand bundle only contains information about dropped values or
///    constant folded values.
///
/// the argument to the call of llvm.assume may still be useful even if the
/// function returned true.
bool isAssumeWithEmptyBundle(AssumeInst &Assume);

/// Return a valid Knowledge associated to the Use U if its Attribute kind is
/// in AttrKinds.
RetainedKnowledge getKnowledgeFromUse(const Use *U,
                                      ArrayRef<Attribute::AttrKind> AttrKinds);

/// Return a valid Knowledge associated to the Value V if its Attribute kind is
/// in AttrKinds and it matches the Filter.
RetainedKnowledge getKnowledgeForValue(
    const Value *V, ArrayRef<Attribute::AttrKind> AttrKinds,
    AssumptionCache *AC = nullptr,
    function_ref<bool(RetainedKnowledge, Instruction *,
                            const CallBase::BundleOpInfo *)>
        Filter = [](auto...) { return true; });

/// Return a valid Knowledge associated to the Value V if its Attribute kind is
/// in AttrKinds and the knowledge is suitable to be used in the context of
/// CtxI.
RetainedKnowledge getKnowledgeValidInContext(
    const Value *V, ArrayRef<Attribute::AttrKind> AttrKinds,
    const Instruction *CtxI, const DominatorTree *DT = nullptr,
    AssumptionCache *AC = nullptr);

/// This extracts the Knowledge from an element of an operand bundle.
/// This is mostly for use in the assume builder.
RetainedKnowledge getKnowledgeFromBundle(AssumeInst &Assume,
                                         const CallBase::BundleOpInfo &BOI);

} // namespace llvm

#endif
PKiwFZ���0Analysis/StackSafetyAnalysis.hnu�[���//===- StackSafetyAnalysis.h - Stack memory safety analysis -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Stack Safety Analysis detects allocas and arguments with safe access.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_STACKSAFETYANALYSIS_H
#define LLVM_ANALYSIS_STACKSAFETYANALYSIS_H

#include "llvm/IR/ModuleSummaryIndex.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Pass.h"

namespace llvm {

class AllocaInst;
class ScalarEvolution;

/// Interface to access stack safety analysis results for single function.
class StackSafetyInfo {
public:
  struct InfoTy;

private:
  Function *F = nullptr;
  std::function<ScalarEvolution &()> GetSE;
  mutable std::unique_ptr<InfoTy> Info;

public:
  StackSafetyInfo();
  StackSafetyInfo(Function *F, std::function<ScalarEvolution &()> GetSE);
  StackSafetyInfo(StackSafetyInfo &&);
  StackSafetyInfo &operator=(StackSafetyInfo &&);
  ~StackSafetyInfo();

  const InfoTy &getInfo() const;

  // TODO: Add useful for client methods.
  void print(raw_ostream &O) const;

  /// Parameters use for a FunctionSummary.
  /// Function collects access information of all pointer parameters.
  /// Information includes a range of direct access of parameters by the
  /// functions and all call sites accepting the parameter.
  /// StackSafety assumes that missing parameter information means possibility
  /// of access to the parameter with any offset, so we can correctly link
  /// code without StackSafety information, e.g. non-ThinLTO.
  std::vector<FunctionSummary::ParamAccess>
  getParamAccesses(ModuleSummaryIndex &Index) const;
};

class StackSafetyGlobalInfo {
public:
  struct InfoTy;

private:
  Module *M = nullptr;
  std::function<const StackSafetyInfo &(Function &F)> GetSSI;
  const ModuleSummaryIndex *Index = nullptr;
  mutable std::unique_ptr<InfoTy> Info;
  const InfoTy &getInfo() const;

public:
  StackSafetyGlobalInfo();
  StackSafetyGlobalInfo(
      Module *M, std::function<const StackSafetyInfo &(Function &F)> GetSSI,
      const ModuleSummaryIndex *Index);
  StackSafetyGlobalInfo(StackSafetyGlobalInfo &&);
  StackSafetyGlobalInfo &operator=(StackSafetyGlobalInfo &&);
  ~StackSafetyGlobalInfo();

  // Whether we can prove that all accesses to this Alloca are in-range and
  // during its lifetime.
  bool isSafe(const AllocaInst &AI) const;

  // Returns true if the instruction can be proven to do only two types of
  // memory accesses:
  //  (1) live stack locations in-bounds or
  //  (2) non-stack locations.
  bool stackAccessIsSafe(const Instruction &I) const;
  void print(raw_ostream &O) const;
  void dump() const;
};

/// StackSafetyInfo wrapper for the new pass manager.
class StackSafetyAnalysis : public AnalysisInfoMixin<StackSafetyAnalysis> {
  friend AnalysisInfoMixin<StackSafetyAnalysis>;
  static AnalysisKey Key;

public:
  using Result = StackSafetyInfo;
  StackSafetyInfo run(Function &F, FunctionAnalysisManager &AM);
};

/// Printer pass for the \c StackSafetyAnalysis results.
class StackSafetyPrinterPass : public PassInfoMixin<StackSafetyPrinterPass> {
  raw_ostream &OS;

public:
  explicit StackSafetyPrinterPass(raw_ostream &OS) : OS(OS) {}
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

/// StackSafetyInfo wrapper for the legacy pass manager
class StackSafetyInfoWrapperPass : public FunctionPass {
  StackSafetyInfo SSI;

public:
  static char ID;
  StackSafetyInfoWrapperPass();

  const StackSafetyInfo &getResult() const { return SSI; }

  void print(raw_ostream &O, const Module *M) const override;
  void getAnalysisUsage(AnalysisUsage &AU) const override;

  bool runOnFunction(Function &F) override;
};

/// This pass performs the global (interprocedural) stack safety analysis (new
/// pass manager).
class StackSafetyGlobalAnalysis
    : public AnalysisInfoMixin<StackSafetyGlobalAnalysis> {
  friend AnalysisInfoMixin<StackSafetyGlobalAnalysis>;
  static AnalysisKey Key;

public:
  using Result = StackSafetyGlobalInfo;
  Result run(Module &M, ModuleAnalysisManager &AM);
};

/// Printer pass for the \c StackSafetyGlobalAnalysis results.
class StackSafetyGlobalPrinterPass
    : public PassInfoMixin<StackSafetyGlobalPrinterPass> {
  raw_ostream &OS;

public:
  explicit StackSafetyGlobalPrinterPass(raw_ostream &OS) : OS(OS) {}
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
};

/// This pass performs the global (interprocedural) stack safety analysis
/// (legacy pass manager).
class StackSafetyGlobalInfoWrapperPass : public ModulePass {
  StackSafetyGlobalInfo SSGI;

public:
  static char ID;

  StackSafetyGlobalInfoWrapperPass();
  ~StackSafetyGlobalInfoWrapperPass();

  const StackSafetyGlobalInfo &getResult() const { return SSGI; }

  void print(raw_ostream &O, const Module *M) const override;
  void getAnalysisUsage(AnalysisUsage &AU) const override;

  bool runOnModule(Module &M) override;
};

bool needsParamAccessSummary(const Module &M);

void generateParamAccessSummary(ModuleSummaryIndex &Index);

} // end namespace llvm

#endif // LLVM_ANALYSIS_STACKSAFETYANALYSIS_H
PKiwFZ��߲)�)Analysis/IntervalIterator.hnu�[���//===- IntervalIterator.h - Interval Iterator Declaration -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines an iterator that enumerates the intervals in a control flow
// graph of some sort.  This iterator is parametric, allowing iterator over the
// following types of graphs:
//
//  1. A Function* object, composed of BasicBlock nodes.
//  2. An IntervalPartition& object, composed of Interval nodes.
//
// This iterator is defined to walk the control flow graph, returning intervals
// in depth first order.  These intervals are completely filled in except for
// the predecessor fields (the successor information is filled in however).
//
// By default, the intervals created by this iterator are deleted after they
// are no longer any use to the iterator.  This behavior can be changed by
// passing a false value into the intervals_begin() function. This causes the
// IOwnMem member to be set, and the intervals to not be deleted.
//
// It is only safe to use this if all of the intervals are deleted by the caller
// and all of the intervals are processed.  However, the user of the iterator is
// not allowed to modify or delete the intervals until after the iterator has
// been used completely.  The IntervalPartition class uses this functionality.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_INTERVALITERATOR_H
#define LLVM_ANALYSIS_INTERVALITERATOR_H

#include "llvm/ADT/GraphTraits.h"
#include "llvm/Analysis/Interval.h"
#include "llvm/Analysis/IntervalPartition.h"
#include "llvm/IR/CFG.h"
#include <algorithm>
#include <cassert>
#include <iterator>
#include <set>
#include <utility>
#include <vector>

namespace llvm {

class BasicBlock;
class Function;

// getNodeHeader - Given a source graph node and the source graph, return the
// BasicBlock that is the header node.  This is the opposite of
// getSourceGraphNode.
inline BasicBlock *getNodeHeader(BasicBlock *BB) { return BB; }
inline BasicBlock *getNodeHeader(Interval *I) { return I->getHeaderNode(); }

// getSourceGraphNode - Given a BasicBlock and the source graph, return the
// source graph node that corresponds to the BasicBlock.  This is the opposite
// of getNodeHeader.
inline BasicBlock *getSourceGraphNode(Function *, BasicBlock *BB) {
  return BB;
}
inline Interval *getSourceGraphNode(IntervalPartition *IP, BasicBlock *BB) {
  return IP->getBlockInterval(BB);
}

// addNodeToInterval - This method exists to assist the generic ProcessNode
// with the task of adding a node to the new interval, depending on the
// type of the source node.  In the case of a CFG source graph (BasicBlock
// case), the BasicBlock itself is added to the interval.
inline void addNodeToInterval(Interval *Int, BasicBlock *BB) {
  Int->Nodes.push_back(BB);
}

// addNodeToInterval - This method exists to assist the generic ProcessNode
// with the task of adding a node to the new interval, depending on the
// type of the source node.  In the case of a CFG source graph (BasicBlock
// case), the BasicBlock itself is added to the interval.  In the case of
// an IntervalPartition source graph (Interval case), all of the member
// BasicBlocks are added to the interval.
inline void addNodeToInterval(Interval *Int, Interval *I) {
  // Add all of the nodes in I as new nodes in Int.
  llvm::append_range(Int->Nodes, I->Nodes);
}

template<class NodeTy, class OrigContainer_t, class GT = GraphTraits<NodeTy *>,
         class IGT = GraphTraits<Inverse<NodeTy *>>>
class IntervalIterator {
  std::vector<std::pair<Interval *, typename Interval::succ_iterator>> IntStack;
  std::set<BasicBlock *> Visited;
  OrigContainer_t *OrigContainer;
  bool IOwnMem;     // If True, delete intervals when done with them
                    // See file header for conditions of use

public:
  using iterator_category = std::forward_iterator_tag;

  IntervalIterator() = default; // End iterator, empty stack

  IntervalIterator(Function *M, bool OwnMemory) : IOwnMem(OwnMemory) {
    OrigContainer = M;
    if (!ProcessInterval(&M->front())) {
      llvm_unreachable("ProcessInterval should never fail for first interval!");
    }
  }

  IntervalIterator(IntervalIterator &&x)
      : IntStack(std::move(x.IntStack)), Visited(std::move(x.Visited)),
        OrigContainer(x.OrigContainer), IOwnMem(x.IOwnMem) {
    x.IOwnMem = false;
  }

  IntervalIterator(IntervalPartition &IP, bool OwnMemory) : IOwnMem(OwnMemory) {
    OrigContainer = &IP;
    if (!ProcessInterval(IP.getRootInterval())) {
      llvm_unreachable("ProcessInterval should never fail for first interval!");
    }
  }

  ~IntervalIterator() {
    if (IOwnMem)
      while (!IntStack.empty()) {
        delete operator*();
        IntStack.pop_back();
      }
  }

  bool operator==(const IntervalIterator &x) const {
    return IntStack == x.IntStack;
  }
  bool operator!=(const IntervalIterator &x) const { return !(*this == x); }

  const Interval *operator*() const { return IntStack.back().first; }
  Interval *operator*() { return IntStack.back().first; }
  const Interval *operator->() const { return operator*(); }
  Interval *operator->() { return operator*(); }

  IntervalIterator &operator++() { // Preincrement
    assert(!IntStack.empty() && "Attempting to use interval iterator at end!");
    do {
      // All of the intervals on the stack have been visited.  Try visiting
      // their successors now.
      Interval::succ_iterator &SuccIt = IntStack.back().second,
                                EndIt = succ_end(IntStack.back().first);
      while (SuccIt != EndIt) {                 // Loop over all interval succs
        bool Done = ProcessInterval(getSourceGraphNode(OrigContainer, *SuccIt));
        ++SuccIt;                               // Increment iterator
        if (Done) return *this;                 // Found a new interval! Use it!
      }

      // Free interval memory... if necessary
      if (IOwnMem) delete IntStack.back().first;

      // We ran out of successors for this interval... pop off the stack
      IntStack.pop_back();
    } while (!IntStack.empty());

    return *this;
  }

  IntervalIterator operator++(int) { // Postincrement
    IntervalIterator tmp = *this;
    ++*this;
    return tmp;
  }

private:
  // ProcessInterval - This method is used during the construction of the
  // interval graph.  It walks through the source graph, recursively creating
  // an interval per invocation until the entire graph is covered.  This uses
  // the ProcessNode method to add all of the nodes to the interval.
  //
  // This method is templated because it may operate on two different source
  // graphs: a basic block graph, or a preexisting interval graph.
  bool ProcessInterval(NodeTy *Node) {
    BasicBlock *Header = getNodeHeader(Node);
    if (!Visited.insert(Header).second)
      return false;

    Interval *Int = new Interval(Header);

    // Check all of our successors to see if they are in the interval...
    for (typename GT::ChildIteratorType I = GT::child_begin(Node),
           E = GT::child_end(Node); I != E; ++I)
      ProcessNode(Int, getSourceGraphNode(OrigContainer, *I));

    IntStack.push_back(std::make_pair(Int, succ_begin(Int)));
    return true;
  }

  // ProcessNode - This method is called by ProcessInterval to add nodes to the
  // interval being constructed, and it is also called recursively as it walks
  // the source graph.  A node is added to the current interval only if all of
  // its predecessors are already in the graph.  This also takes care of keeping
  // the successor set of an interval up to date.
  //
  // This method is templated because it may operate on two different source
  // graphs: a basic block graph, or a preexisting interval graph.
  void ProcessNode(Interval *Int, NodeTy *Node) {
    assert(Int && "Null interval == bad!");
    assert(Node && "Null Node == bad!");

    BasicBlock *NodeHeader = getNodeHeader(Node);

    if (Visited.count(NodeHeader)) {     // Node already been visited?
      if (Int->contains(NodeHeader)) {   // Already in this interval...
        return;
      } else {                           // In other interval, add as successor
        if (!Int->isSuccessor(NodeHeader)) // Add only if not already in set
          Int->Successors.push_back(NodeHeader);
      }
    } else {                             // Otherwise, not in interval yet
      for (typename IGT::ChildIteratorType I = IGT::child_begin(Node),
             E = IGT::child_end(Node); I != E; ++I) {
        if (!Int->contains(*I)) {        // If pred not in interval, we can't be
          if (!Int->isSuccessor(NodeHeader)) // Add only if not already in set
            Int->Successors.push_back(NodeHeader);
          return;                        // See you later
        }
      }

      // If we get here, then all of the predecessors of BB are in the interval
      // already.  In this case, we must add BB to the interval!
      addNodeToInterval(Int, Node);
      Visited.insert(NodeHeader);     // The node has now been visited!

      if (Int->isSuccessor(NodeHeader)) {
        // If we were in the successor list from before... remove from succ list
        llvm::erase_value(Int->Successors, NodeHeader);
      }

      // Now that we have discovered that Node is in the interval, perhaps some
      // of its successors are as well?
      for (typename GT::ChildIteratorType It = GT::child_begin(Node),
             End = GT::child_end(Node); It != End; ++It)
        ProcessNode(Int, getSourceGraphNode(OrigContainer, *It));
    }
  }
};

using function_interval_iterator = IntervalIterator<BasicBlock, Function>;
using interval_part_interval_iterator =
    IntervalIterator<Interval, IntervalPartition>;

inline function_interval_iterator intervals_begin(Function *F,
                                                  bool DeleteInts = true) {
  return function_interval_iterator(F, DeleteInts);
}
inline function_interval_iterator intervals_end(Function *) {
  return function_interval_iterator();
}

inline interval_part_interval_iterator
   intervals_begin(IntervalPartition &IP, bool DeleteIntervals = true) {
  return interval_part_interval_iterator(IP, DeleteIntervals);
}

inline interval_part_interval_iterator intervals_end(IntervalPartition &IP) {
  return interval_part_interval_iterator();
}

} // end namespace llvm

#endif // LLVM_ANALYSIS_INTERVALITERATOR_H
PKiwFZ0]��M�MAnalysis/SparsePropagation.hnu�[���//===- SparsePropagation.h - Sparse Conditional Property Propagation ------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements an abstract sparse conditional propagation algorithm,
// modeled after SCCP, but with a customizable lattice function.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_SPARSEPROPAGATION_H
#define LLVM_ANALYSIS_SPARSEPROPAGATION_H

#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Instructions.h"
#include "llvm/Support/Debug.h"
#include <set>

#define DEBUG_TYPE "sparseprop"

namespace llvm {

/// A template for translating between LLVM Values and LatticeKeys. Clients must
/// provide a specialization of LatticeKeyInfo for their LatticeKey type.
template <class LatticeKey> struct LatticeKeyInfo {
  // static inline Value *getValueFromLatticeKey(LatticeKey Key);
  // static inline LatticeKey getLatticeKeyFromValue(Value *V);
};

template <class LatticeKey, class LatticeVal,
          class KeyInfo = LatticeKeyInfo<LatticeKey>>
class SparseSolver;

/// AbstractLatticeFunction - This class is implemented by the dataflow instance
/// to specify what the lattice values are and how they handle merges etc.  This
/// gives the client the power to compute lattice values from instructions,
/// constants, etc.  The current requirement is that lattice values must be
/// copyable.  At the moment, nothing tries to avoid copying.  Additionally,
/// lattice keys must be able to be used as keys of a mapping data structure.
/// Internally, the generic solver currently uses a DenseMap to map lattice keys
/// to lattice values.  If the lattice key is a non-standard type, a
/// specialization of DenseMapInfo must be provided.
template <class LatticeKey, class LatticeVal> class AbstractLatticeFunction {
private:
  LatticeVal UndefVal, OverdefinedVal, UntrackedVal;

public:
  AbstractLatticeFunction(LatticeVal undefVal, LatticeVal overdefinedVal,
                          LatticeVal untrackedVal) {
    UndefVal = undefVal;
    OverdefinedVal = overdefinedVal;
    UntrackedVal = untrackedVal;
  }

  virtual ~AbstractLatticeFunction() = default;

  LatticeVal getUndefVal()       const { return UndefVal; }
  LatticeVal getOverdefinedVal() const { return OverdefinedVal; }
  LatticeVal getUntrackedVal()   const { return UntrackedVal; }

  /// IsUntrackedValue - If the specified LatticeKey is obviously uninteresting
  /// to the analysis (i.e., it would always return UntrackedVal), this
  /// function can return true to avoid pointless work.
  virtual bool IsUntrackedValue(LatticeKey Key) { return false; }

  /// ComputeLatticeVal - Compute and return a LatticeVal corresponding to the
  /// given LatticeKey.
  virtual LatticeVal ComputeLatticeVal(LatticeKey Key) {
    return getOverdefinedVal();
  }

  /// IsSpecialCasedPHI - Given a PHI node, determine whether this PHI node is
  /// one that the we want to handle through ComputeInstructionState.
  virtual bool IsSpecialCasedPHI(PHINode *PN) { return false; }

  /// MergeValues - Compute and return the merge of the two specified lattice
  /// values.  Merging should only move one direction down the lattice to
  /// guarantee convergence (toward overdefined).
  virtual LatticeVal MergeValues(LatticeVal X, LatticeVal Y) {
    return getOverdefinedVal(); // always safe, never useful.
  }

  /// ComputeInstructionState - Compute the LatticeKeys that change as a result
  /// of executing instruction \p I. Their associated LatticeVals are store in
  /// \p ChangedValues.
  virtual void
  ComputeInstructionState(Instruction &I,
                          DenseMap<LatticeKey, LatticeVal> &ChangedValues,
                          SparseSolver<LatticeKey, LatticeVal> &SS) = 0;

  /// PrintLatticeVal - Render the given LatticeVal to the specified stream.
  virtual void PrintLatticeVal(LatticeVal LV, raw_ostream &OS);

  /// PrintLatticeKey - Render the given LatticeKey to the specified stream.
  virtual void PrintLatticeKey(LatticeKey Key, raw_ostream &OS);

  /// GetValueFromLatticeVal - If the given LatticeVal is representable as an
  /// LLVM value, return it; otherwise, return nullptr. If a type is given, the
  /// returned value must have the same type. This function is used by the
  /// generic solver in attempting to resolve branch and switch conditions.
  virtual Value *GetValueFromLatticeVal(LatticeVal LV, Type *Ty = nullptr) {
    return nullptr;
  }
};

/// SparseSolver - This class is a general purpose solver for Sparse Conditional
/// Propagation with a programmable lattice function.
template <class LatticeKey, class LatticeVal, class KeyInfo>
class SparseSolver {

  /// LatticeFunc - This is the object that knows the lattice and how to
  /// compute transfer functions.
  AbstractLatticeFunction<LatticeKey, LatticeVal> *LatticeFunc;

  /// ValueState - Holds the LatticeVals associated with LatticeKeys.
  DenseMap<LatticeKey, LatticeVal> ValueState;

  /// BBExecutable - Holds the basic blocks that are executable.
  SmallPtrSet<BasicBlock *, 16> BBExecutable;

  /// ValueWorkList - Holds values that should be processed.
  SmallVector<Value *, 64> ValueWorkList;

  /// BBWorkList - Holds basic blocks that should be processed.
  SmallVector<BasicBlock *, 64> BBWorkList;

  using Edge = std::pair<BasicBlock *, BasicBlock *>;

  /// KnownFeasibleEdges - Entries in this set are edges which have already had
  /// PHI nodes retriggered.
  std::set<Edge> KnownFeasibleEdges;

public:
  explicit SparseSolver(
      AbstractLatticeFunction<LatticeKey, LatticeVal> *Lattice)
      : LatticeFunc(Lattice) {}
  SparseSolver(const SparseSolver &) = delete;
  SparseSolver &operator=(const SparseSolver &) = delete;

  /// Solve - Solve for constants and executable blocks.
  void Solve();

  void Print(raw_ostream &OS) const;

  /// getExistingValueState - Return the LatticeVal object corresponding to the
  /// given value from the ValueState map. If the value is not in the map,
  /// UntrackedVal is returned, unlike the getValueState method.
  LatticeVal getExistingValueState(LatticeKey Key) const {
    auto I = ValueState.find(Key);
    return I != ValueState.end() ? I->second : LatticeFunc->getUntrackedVal();
  }

  /// getValueState - Return the LatticeVal object corresponding to the given
  /// value from the ValueState map. If the value is not in the map, its state
  /// is initialized.
  LatticeVal getValueState(LatticeKey Key);

  /// isEdgeFeasible - Return true if the control flow edge from the 'From'
  /// basic block to the 'To' basic block is currently feasible.  If
  /// AggressiveUndef is true, then this treats values with unknown lattice
  /// values as undefined.  This is generally only useful when solving the
  /// lattice, not when querying it.
  bool isEdgeFeasible(BasicBlock *From, BasicBlock *To,
                      bool AggressiveUndef = false);

  /// isBlockExecutable - Return true if there are any known feasible
  /// edges into the basic block.  This is generally only useful when
  /// querying the lattice.
  bool isBlockExecutable(BasicBlock *BB) const {
    return BBExecutable.count(BB);
  }

  /// MarkBlockExecutable - This method can be used by clients to mark all of
  /// the blocks that are known to be intrinsically live in the processed unit.
  void MarkBlockExecutable(BasicBlock *BB);

private:
  /// UpdateState - When the state of some LatticeKey is potentially updated to
  /// the given LatticeVal, this function notices and adds the LLVM value
  /// corresponding the key to the work list, if needed.
  void UpdateState(LatticeKey Key, LatticeVal LV);

  /// markEdgeExecutable - Mark a basic block as executable, adding it to the BB
  /// work list if it is not already executable.
  void markEdgeExecutable(BasicBlock *Source, BasicBlock *Dest);

  /// getFeasibleSuccessors - Return a vector of booleans to indicate which
  /// successors are reachable from a given terminator instruction.
  void getFeasibleSuccessors(Instruction &TI, SmallVectorImpl<bool> &Succs,
                             bool AggressiveUndef);

  void visitInst(Instruction &I);
  void visitPHINode(PHINode &I);
  void visitTerminator(Instruction &TI);
};

//===----------------------------------------------------------------------===//
//                  AbstractLatticeFunction Implementation
//===----------------------------------------------------------------------===//

template <class LatticeKey, class LatticeVal>
void AbstractLatticeFunction<LatticeKey, LatticeVal>::PrintLatticeVal(
    LatticeVal V, raw_ostream &OS) {
  if (V == UndefVal)
    OS << "undefined";
  else if (V == OverdefinedVal)
    OS << "overdefined";
  else if (V == UntrackedVal)
    OS << "untracked";
  else
    OS << "unknown lattice value";
}

template <class LatticeKey, class LatticeVal>
void AbstractLatticeFunction<LatticeKey, LatticeVal>::PrintLatticeKey(
    LatticeKey Key, raw_ostream &OS) {
  OS << "unknown lattice key";
}

//===----------------------------------------------------------------------===//
//                          SparseSolver Implementation
//===----------------------------------------------------------------------===//

template <class LatticeKey, class LatticeVal, class KeyInfo>
LatticeVal
SparseSolver<LatticeKey, LatticeVal, KeyInfo>::getValueState(LatticeKey Key) {
  auto I = ValueState.find(Key);
  if (I != ValueState.end())
    return I->second; // Common case, in the map

  if (LatticeFunc->IsUntrackedValue(Key))
    return LatticeFunc->getUntrackedVal();
  LatticeVal LV = LatticeFunc->ComputeLatticeVal(Key);

  // If this value is untracked, don't add it to the map.
  if (LV == LatticeFunc->getUntrackedVal())
    return LV;
  return ValueState[Key] = std::move(LV);
}

template <class LatticeKey, class LatticeVal, class KeyInfo>
void SparseSolver<LatticeKey, LatticeVal, KeyInfo>::UpdateState(LatticeKey Key,
                                                                LatticeVal LV) {
  auto I = ValueState.find(Key);
  if (I != ValueState.end() && I->second == LV)
    return; // No change.

  // Update the state of the given LatticeKey and add its corresponding LLVM
  // value to the work list.
  ValueState[Key] = std::move(LV);
  if (Value *V = KeyInfo::getValueFromLatticeKey(Key))
    ValueWorkList.push_back(V);
}

template <class LatticeKey, class LatticeVal, class KeyInfo>
void SparseSolver<LatticeKey, LatticeVal, KeyInfo>::MarkBlockExecutable(
    BasicBlock *BB) {
  if (!BBExecutable.insert(BB).second)
    return;
  LLVM_DEBUG(dbgs() << "Marking Block Executable: " << BB->getName() << "\n");
  BBWorkList.push_back(BB); // Add the block to the work list!
}

template <class LatticeKey, class LatticeVal, class KeyInfo>
void SparseSolver<LatticeKey, LatticeVal, KeyInfo>::markEdgeExecutable(
    BasicBlock *Source, BasicBlock *Dest) {
  if (!KnownFeasibleEdges.insert(Edge(Source, Dest)).second)
    return; // This edge is already known to be executable!

  LLVM_DEBUG(dbgs() << "Marking Edge Executable: " << Source->getName()
                    << " -> " << Dest->getName() << "\n");

  if (BBExecutable.count(Dest)) {
    // The destination is already executable, but we just made an edge
    // feasible that wasn't before.  Revisit the PHI nodes in the block
    // because they have potentially new operands.
    for (BasicBlock::iterator I = Dest->begin(); isa<PHINode>(I); ++I)
      visitPHINode(*cast<PHINode>(I));
  } else {
    MarkBlockExecutable(Dest);
  }
}

template <class LatticeKey, class LatticeVal, class KeyInfo>
void SparseSolver<LatticeKey, LatticeVal, KeyInfo>::getFeasibleSuccessors(
    Instruction &TI, SmallVectorImpl<bool> &Succs, bool AggressiveUndef) {
  Succs.resize(TI.getNumSuccessors());
  if (TI.getNumSuccessors() == 0)
    return;

  if (BranchInst *BI = dyn_cast<BranchInst>(&TI)) {
    if (BI->isUnconditional()) {
      Succs[0] = true;
      return;
    }

    LatticeVal BCValue;
    if (AggressiveUndef)
      BCValue =
          getValueState(KeyInfo::getLatticeKeyFromValue(BI->getCondition()));
    else
      BCValue = getExistingValueState(
          KeyInfo::getLatticeKeyFromValue(BI->getCondition()));

    if (BCValue == LatticeFunc->getOverdefinedVal() ||
        BCValue == LatticeFunc->getUntrackedVal()) {
      // Overdefined condition variables can branch either way.
      Succs[0] = Succs[1] = true;
      return;
    }

    // If undefined, neither is feasible yet.
    if (BCValue == LatticeFunc->getUndefVal())
      return;

    Constant *C =
        dyn_cast_or_null<Constant>(LatticeFunc->GetValueFromLatticeVal(
            std::move(BCValue), BI->getCondition()->getType()));
    if (!C || !isa<ConstantInt>(C)) {
      // Non-constant values can go either way.
      Succs[0] = Succs[1] = true;
      return;
    }

    // Constant condition variables mean the branch can only go a single way
    Succs[C->isNullValue()] = true;
    return;
  }

  if (!isa<SwitchInst>(TI)) {
    // Unknown termintor, assume all successors are feasible.
    Succs.assign(Succs.size(), true);
    return;
  }

  SwitchInst &SI = cast<SwitchInst>(TI);
  LatticeVal SCValue;
  if (AggressiveUndef)
    SCValue = getValueState(KeyInfo::getLatticeKeyFromValue(SI.getCondition()));
  else
    SCValue = getExistingValueState(
        KeyInfo::getLatticeKeyFromValue(SI.getCondition()));

  if (SCValue == LatticeFunc->getOverdefinedVal() ||
      SCValue == LatticeFunc->getUntrackedVal()) {
    // All destinations are executable!
    Succs.assign(TI.getNumSuccessors(), true);
    return;
  }

  // If undefined, neither is feasible yet.
  if (SCValue == LatticeFunc->getUndefVal())
    return;

  Constant *C = dyn_cast_or_null<Constant>(LatticeFunc->GetValueFromLatticeVal(
      std::move(SCValue), SI.getCondition()->getType()));
  if (!C || !isa<ConstantInt>(C)) {
    // All destinations are executable!
    Succs.assign(TI.getNumSuccessors(), true);
    return;
  }
  SwitchInst::CaseHandle Case = *SI.findCaseValue(cast<ConstantInt>(C));
  Succs[Case.getSuccessorIndex()] = true;
}

template <class LatticeKey, class LatticeVal, class KeyInfo>
bool SparseSolver<LatticeKey, LatticeVal, KeyInfo>::isEdgeFeasible(
    BasicBlock *From, BasicBlock *To, bool AggressiveUndef) {
  SmallVector<bool, 16> SuccFeasible;
  Instruction *TI = From->getTerminator();
  getFeasibleSuccessors(*TI, SuccFeasible, AggressiveUndef);

  for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i)
    if (TI->getSuccessor(i) == To && SuccFeasible[i])
      return true;

  return false;
}

template <class LatticeKey, class LatticeVal, class KeyInfo>
void SparseSolver<LatticeKey, LatticeVal, KeyInfo>::visitTerminator(
    Instruction &TI) {
  SmallVector<bool, 16> SuccFeasible;
  getFeasibleSuccessors(TI, SuccFeasible, true);

  BasicBlock *BB = TI.getParent();

  // Mark all feasible successors executable...
  for (unsigned i = 0, e = SuccFeasible.size(); i != e; ++i)
    if (SuccFeasible[i])
      markEdgeExecutable(BB, TI.getSuccessor(i));
}

template <class LatticeKey, class LatticeVal, class KeyInfo>
void SparseSolver<LatticeKey, LatticeVal, KeyInfo>::visitPHINode(PHINode &PN) {
  // The lattice function may store more information on a PHINode than could be
  // computed from its incoming values.  For example, SSI form stores its sigma
  // functions as PHINodes with a single incoming value.
  if (LatticeFunc->IsSpecialCasedPHI(&PN)) {
    DenseMap<LatticeKey, LatticeVal> ChangedValues;
    LatticeFunc->ComputeInstructionState(PN, ChangedValues, *this);
    for (auto &ChangedValue : ChangedValues)
      if (ChangedValue.second != LatticeFunc->getUntrackedVal())
        UpdateState(std::move(ChangedValue.first),
                    std::move(ChangedValue.second));
    return;
  }

  LatticeKey Key = KeyInfo::getLatticeKeyFromValue(&PN);
  LatticeVal PNIV = getValueState(Key);
  LatticeVal Overdefined = LatticeFunc->getOverdefinedVal();

  // If this value is already overdefined (common) just return.
  if (PNIV == Overdefined || PNIV == LatticeFunc->getUntrackedVal())
    return; // Quick exit

  // Super-extra-high-degree PHI nodes are unlikely to ever be interesting,
  // and slow us down a lot.  Just mark them overdefined.
  if (PN.getNumIncomingValues() > 64) {
    UpdateState(Key, Overdefined);
    return;
  }

  // Look at all of the executable operands of the PHI node.  If any of them
  // are overdefined, the PHI becomes overdefined as well.  Otherwise, ask the
  // transfer function to give us the merge of the incoming values.
  for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i) {
    // If the edge is not yet known to be feasible, it doesn't impact the PHI.
    if (!isEdgeFeasible(PN.getIncomingBlock(i), PN.getParent(), true))
      continue;

    // Merge in this value.
    LatticeVal OpVal =
        getValueState(KeyInfo::getLatticeKeyFromValue(PN.getIncomingValue(i)));
    if (OpVal != PNIV)
      PNIV = LatticeFunc->MergeValues(PNIV, OpVal);

    if (PNIV == Overdefined)
      break; // Rest of input values don't matter.
  }

  // Update the PHI with the compute value, which is the merge of the inputs.
  UpdateState(Key, PNIV);
}

template <class LatticeKey, class LatticeVal, class KeyInfo>
void SparseSolver<LatticeKey, LatticeVal, KeyInfo>::visitInst(Instruction &I) {
  // PHIs are handled by the propagation logic, they are never passed into the
  // transfer functions.
  if (PHINode *PN = dyn_cast<PHINode>(&I))
    return visitPHINode(*PN);

  // Otherwise, ask the transfer function what the result is.  If this is
  // something that we care about, remember it.
  DenseMap<LatticeKey, LatticeVal> ChangedValues;
  LatticeFunc->ComputeInstructionState(I, ChangedValues, *this);
  for (auto &ChangedValue : ChangedValues)
    if (ChangedValue.second != LatticeFunc->getUntrackedVal())
      UpdateState(ChangedValue.first, ChangedValue.second);

  if (I.isTerminator())
    visitTerminator(I);
}

template <class LatticeKey, class LatticeVal, class KeyInfo>
void SparseSolver<LatticeKey, LatticeVal, KeyInfo>::Solve() {
  // Process the work lists until they are empty!
  while (!BBWorkList.empty() || !ValueWorkList.empty()) {
    // Process the value work list.
    while (!ValueWorkList.empty()) {
      Value *V = ValueWorkList.pop_back_val();

      LLVM_DEBUG(dbgs() << "\nPopped off V-WL: " << *V << "\n");

      // "V" got into the work list because it made a transition. See if any
      // users are both live and in need of updating.
      for (User *U : V->users())
        if (Instruction *Inst = dyn_cast<Instruction>(U))
          if (BBExecutable.count(Inst->getParent())) // Inst is executable?
            visitInst(*Inst);
    }

    // Process the basic block work list.
    while (!BBWorkList.empty()) {
      BasicBlock *BB = BBWorkList.pop_back_val();

      LLVM_DEBUG(dbgs() << "\nPopped off BBWL: " << *BB);

      // Notify all instructions in this basic block that they are newly
      // executable.
      for (Instruction &I : *BB)
        visitInst(I);
    }
  }
}

template <class LatticeKey, class LatticeVal, class KeyInfo>
void SparseSolver<LatticeKey, LatticeVal, KeyInfo>::Print(
    raw_ostream &OS) const {
  if (ValueState.empty())
    return;

  LatticeKey Key;
  LatticeVal LV;

  OS << "ValueState:\n";
  for (auto &Entry : ValueState) {
    std::tie(Key, LV) = Entry;
    if (LV == LatticeFunc->getUntrackedVal())
      continue;
    OS << "\t";
    LatticeFunc->PrintLatticeVal(LV, OS);
    OS << ": ";
    LatticeFunc->PrintLatticeKey(Key, OS);
    OS << "\n";
  }
}
} // end namespace llvm

#undef DEBUG_TYPE

#endif // LLVM_ANALYSIS_SPARSEPROPAGATION_H
PKiwFZWmM���!Analysis/CFLAndersAliasAnalysis.hnu�[���//==- CFLAndersAliasAnalysis.h - Unification-based Alias Analysis -*- C++-*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// This is the interface for LLVM's inclusion-based alias analysis
/// implemented with CFL graph reachability.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_CFLANDERSALIASANALYSIS_H
#define LLVM_ANALYSIS_CFLANDERSALIASANALYSIS_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/CFLAliasAnalysisUtils.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Pass.h"
#include <forward_list>
#include <memory>

namespace llvm {

template <typename T> class Optional;
class Function;
class MemoryLocation;
class TargetLibraryInfo;

namespace cflaa {

struct AliasSummary;

} // end namespace cflaa

class CFLAndersAAResult : public AAResultBase<CFLAndersAAResult> {
  friend AAResultBase<CFLAndersAAResult>;

  class FunctionInfo;

public:
  explicit CFLAndersAAResult(
      std::function<const TargetLibraryInfo &(Function &F)> GetTLI);
  CFLAndersAAResult(CFLAndersAAResult &&RHS);
  ~CFLAndersAAResult();

  /// Handle invalidation events from the new pass manager.
  /// By definition, this result is stateless and so remains valid.
  bool invalidate(Function &, const PreservedAnalyses &,
                  FunctionAnalysisManager::Invalidator &) {
    return false;
  }

  /// Evict the given function from cache
  void evict(const Function *Fn);

  /// Get the alias summary for the given function
  /// Return nullptr if the summary is not found or not available
  const cflaa::AliasSummary *getAliasSummary(const Function &);

  AliasResult query(const MemoryLocation &, const MemoryLocation &);
  AliasResult alias(const MemoryLocation &, const MemoryLocation &,
                    AAQueryInfo &);

private:
  /// Ensures that the given function is available in the cache.
  /// Returns the appropriate entry from the cache.
  const Optional<FunctionInfo> &ensureCached(const Function &);

  /// Inserts the given Function into the cache.
  void scan(const Function &);

  /// Build summary for a given function
  FunctionInfo buildInfoFrom(const Function &);

  std::function<const TargetLibraryInfo &(Function &F)> GetTLI;

  /// Cached mapping of Functions to their StratifiedSets.
  /// If a function's sets are currently being built, it is marked
  /// in the cache as an Optional without a value. This way, if we
  /// have any kind of recursion, it is discernable from a function
  /// that simply has empty sets.
  DenseMap<const Function *, Optional<FunctionInfo>> Cache;

  std::forward_list<cflaa::FunctionHandle<CFLAndersAAResult>> Handles;
};

/// Analysis pass providing a never-invalidated alias analysis result.
///
/// FIXME: We really should refactor CFL to use the analysis more heavily, and
/// in particular to leverage invalidation to trigger re-computation.
class CFLAndersAA : public AnalysisInfoMixin<CFLAndersAA> {
  friend AnalysisInfoMixin<CFLAndersAA>;

  static AnalysisKey Key;

public:
  using Result = CFLAndersAAResult;

  CFLAndersAAResult run(Function &F, FunctionAnalysisManager &AM);
};

/// Legacy wrapper pass to provide the CFLAndersAAResult object.
class CFLAndersAAWrapperPass : public ImmutablePass {
  std::unique_ptr<CFLAndersAAResult> Result;

public:
  static char ID;

  CFLAndersAAWrapperPass();

  CFLAndersAAResult &getResult() { return *Result; }
  const CFLAndersAAResult &getResult() const { return *Result; }

  void initializePass() override;
  void getAnalysisUsage(AnalysisUsage &AU) const override;
};

// createCFLAndersAAWrapperPass - This pass implements a set-based approach to
// alias analysis.
ImmutablePass *createCFLAndersAAWrapperPass();

} // end namespace llvm

#endif // LLVM_ANALYSIS_CFLANDERSALIASANALYSIS_H
PKiwFZU�)�EpEpAnalysis/LoopInfoImpl.hnu�[���//===- llvm/Analysis/LoopInfoImpl.h - Natural Loop Calculator ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This is the generic implementation of LoopInfo used for both Loops and
// MachineLoops.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_LOOPINFOIMPL_H
#define LLVM_ANALYSIS_LOOPINFOIMPL_H

#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetOperations.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/IR/Dominators.h"

namespace llvm {

//===----------------------------------------------------------------------===//
// APIs for simple analysis of the loop. See header notes.

/// getExitingBlocks - Return all blocks inside the loop that have successors
/// outside of the loop.  These are the blocks _inside of the current loop_
/// which branch out.  The returned list is always unique.
///
template <class BlockT, class LoopT>
void LoopBase<BlockT, LoopT>::getExitingBlocks(
    SmallVectorImpl<BlockT *> &ExitingBlocks) const {
  assert(!isInvalid() && "Loop not in a valid state!");
  for (const auto BB : blocks())
    for (auto *Succ : children<BlockT *>(BB))
      if (!contains(Succ)) {
        // Not in current loop? It must be an exit block.
        ExitingBlocks.push_back(BB);
        break;
      }
}

/// getExitingBlock - If getExitingBlocks would return exactly one block,
/// return that block. Otherwise return null.
template <class BlockT, class LoopT>
BlockT *LoopBase<BlockT, LoopT>::getExitingBlock() const {
  assert(!isInvalid() && "Loop not in a valid state!");
  auto notInLoop = [&](BlockT *BB) { return !contains(BB); };
  auto isExitBlock = [&](BlockT *BB, bool AllowRepeats) -> BlockT * {
    assert(!AllowRepeats && "Unexpected parameter value.");
    // Child not in current loop?  It must be an exit block.
    return any_of(children<BlockT *>(BB), notInLoop) ? BB : nullptr;
  };

  return find_singleton<BlockT>(blocks(), isExitBlock);
}

/// getExitBlocks - Return all of the successor blocks of this loop.  These
/// are the blocks _outside of the current loop_ which are branched to.
///
template <class BlockT, class LoopT>
void LoopBase<BlockT, LoopT>::getExitBlocks(
    SmallVectorImpl<BlockT *> &ExitBlocks) const {
  assert(!isInvalid() && "Loop not in a valid state!");
  for (const auto BB : blocks())
    for (auto *Succ : children<BlockT *>(BB))
      if (!contains(Succ))
        // Not in current loop? It must be an exit block.
        ExitBlocks.push_back(Succ);
}

/// getExitBlock - If getExitBlocks would return exactly one block,
/// return that block. Otherwise return null.
template <class BlockT, class LoopT>
std::pair<BlockT *, bool> getExitBlockHelper(const LoopBase<BlockT, LoopT> *L,
                                             bool Unique) {
  assert(!L->isInvalid() && "Loop not in a valid state!");
  auto notInLoop = [&](BlockT *BB,
                       bool AllowRepeats) -> std::pair<BlockT *, bool> {
    assert(AllowRepeats == Unique && "Unexpected parameter value.");
    return {!L->contains(BB) ? BB : nullptr, false};
  };
  auto singleExitBlock = [&](BlockT *BB,
                             bool AllowRepeats) -> std::pair<BlockT *, bool> {
    assert(AllowRepeats == Unique && "Unexpected parameter value.");
    return find_singleton_nested<BlockT>(children<BlockT *>(BB), notInLoop,
                                         AllowRepeats);
  };
  return find_singleton_nested<BlockT>(L->blocks(), singleExitBlock, Unique);
}

template <class BlockT, class LoopT>
bool LoopBase<BlockT, LoopT>::hasNoExitBlocks() const {
  auto RC = getExitBlockHelper(this, false);
  if (RC.second)
    // found multiple exit blocks
    return false;
  // return true if there is no exit block
  return !RC.first;
}

/// getExitBlock - If getExitBlocks would return exactly one block,
/// return that block. Otherwise return null.
template <class BlockT, class LoopT>
BlockT *LoopBase<BlockT, LoopT>::getExitBlock() const {
  return getExitBlockHelper(this, false).first;
}

template <class BlockT, class LoopT>
bool LoopBase<BlockT, LoopT>::hasDedicatedExits() const {
  // Each predecessor of each exit block of a normal loop is contained
  // within the loop.
  SmallVector<BlockT *, 4> UniqueExitBlocks;
  getUniqueExitBlocks(UniqueExitBlocks);
  for (BlockT *EB : UniqueExitBlocks)
    for (BlockT *Predecessor : children<Inverse<BlockT *>>(EB))
      if (!contains(Predecessor))
        return false;
  // All the requirements are met.
  return true;
}

// Helper function to get unique loop exits. Pred is a predicate pointing to
// BasicBlocks in a loop which should be considered to find loop exits.
template <class BlockT, class LoopT, typename PredicateT>
void getUniqueExitBlocksHelper(const LoopT *L,
                               SmallVectorImpl<BlockT *> &ExitBlocks,
                               PredicateT Pred) {
  assert(!L->isInvalid() && "Loop not in a valid state!");
  SmallPtrSet<BlockT *, 32> Visited;
  auto Filtered = make_filter_range(L->blocks(), Pred);
  for (BlockT *BB : Filtered)
    for (BlockT *Successor : children<BlockT *>(BB))
      if (!L->contains(Successor))
        if (Visited.insert(Successor).second)
          ExitBlocks.push_back(Successor);
}

template <class BlockT, class LoopT>
void LoopBase<BlockT, LoopT>::getUniqueExitBlocks(
    SmallVectorImpl<BlockT *> &ExitBlocks) const {
  getUniqueExitBlocksHelper(this, ExitBlocks,
                            [](const BlockT *BB) { return true; });
}

template <class BlockT, class LoopT>
void LoopBase<BlockT, LoopT>::getUniqueNonLatchExitBlocks(
    SmallVectorImpl<BlockT *> &ExitBlocks) const {
  const BlockT *Latch = getLoopLatch();
  assert(Latch && "Latch block must exists");
  getUniqueExitBlocksHelper(this, ExitBlocks,
                            [Latch](const BlockT *BB) { return BB != Latch; });
}

template <class BlockT, class LoopT>
BlockT *LoopBase<BlockT, LoopT>::getUniqueExitBlock() const {
  return getExitBlockHelper(this, true).first;
}

/// getExitEdges - Return all pairs of (_inside_block_,_outside_block_).
template <class BlockT, class LoopT>
void LoopBase<BlockT, LoopT>::getExitEdges(
    SmallVectorImpl<Edge> &ExitEdges) const {
  assert(!isInvalid() && "Loop not in a valid state!");
  for (const auto BB : blocks())
    for (auto *Succ : children<BlockT *>(BB))
      if (!contains(Succ))
        // Not in current loop? It must be an exit block.
        ExitEdges.emplace_back(BB, Succ);
}

/// getLoopPreheader - If there is a preheader for this loop, return it.  A
/// loop has a preheader if there is only one edge to the header of the loop
/// from outside of the loop and it is legal to hoist instructions into the
/// predecessor. If this is the case, the block branching to the header of the
/// loop is the preheader node.
///
/// This method returns null if there is no preheader for the loop.
///
template <class BlockT, class LoopT>
BlockT *LoopBase<BlockT, LoopT>::getLoopPreheader() const {
  assert(!isInvalid() && "Loop not in a valid state!");
  // Keep track of nodes outside the loop branching to the header...
  BlockT *Out = getLoopPredecessor();
  if (!Out)
    return nullptr;

  // Make sure we are allowed to hoist instructions into the predecessor.
  if (!Out->isLegalToHoistInto())
    return nullptr;

  // Make sure there is only one exit out of the preheader.
  typedef GraphTraits<BlockT *> BlockTraits;
  typename BlockTraits::ChildIteratorType SI = BlockTraits::child_begin(Out);
  ++SI;
  if (SI != BlockTraits::child_end(Out))
    return nullptr; // Multiple exits from the block, must not be a preheader.

  // The predecessor has exactly one successor, so it is a preheader.
  return Out;
}

/// getLoopPredecessor - If the given loop's header has exactly one unique
/// predecessor outside the loop, return it. Otherwise return null.
/// This is less strict that the loop "preheader" concept, which requires
/// the predecessor to have exactly one successor.
///
template <class BlockT, class LoopT>
BlockT *LoopBase<BlockT, LoopT>::getLoopPredecessor() const {
  assert(!isInvalid() && "Loop not in a valid state!");
  // Keep track of nodes outside the loop branching to the header...
  BlockT *Out = nullptr;

  // Loop over the predecessors of the header node...
  BlockT *Header = getHeader();
  for (const auto Pred : children<Inverse<BlockT *>>(Header)) {
    if (!contains(Pred)) { // If the block is not in the loop...
      if (Out && Out != Pred)
        return nullptr; // Multiple predecessors outside the loop
      Out = Pred;
    }
  }

  return Out;
}

/// getLoopLatch - If there is a single latch block for this loop, return it.
/// A latch block is a block that contains a branch back to the header.
template <class BlockT, class LoopT>
BlockT *LoopBase<BlockT, LoopT>::getLoopLatch() const {
  assert(!isInvalid() && "Loop not in a valid state!");
  BlockT *Header = getHeader();
  BlockT *Latch = nullptr;
  for (const auto Pred : children<Inverse<BlockT *>>(Header)) {
    if (contains(Pred)) {
      if (Latch)
        return nullptr;
      Latch = Pred;
    }
  }

  return Latch;
}

//===----------------------------------------------------------------------===//
// APIs for updating loop information after changing the CFG
//

/// addBasicBlockToLoop - This method is used by other analyses to update loop
/// information.  NewBB is set to be a new member of the current loop.
/// Because of this, it is added as a member of all parent loops, and is added
/// to the specified LoopInfo object as being in the current basic block.  It
/// is not valid to replace the loop header with this method.
///
template <class BlockT, class LoopT>
void LoopBase<BlockT, LoopT>::addBasicBlockToLoop(
    BlockT *NewBB, LoopInfoBase<BlockT, LoopT> &LIB) {
  assert(!isInvalid() && "Loop not in a valid state!");
#ifndef NDEBUG
  if (!Blocks.empty()) {
    auto SameHeader = LIB[getHeader()];
    assert(contains(SameHeader) && getHeader() == SameHeader->getHeader() &&
           "Incorrect LI specified for this loop!");
  }
#endif
  assert(NewBB && "Cannot add a null basic block to the loop!");
  assert(!LIB[NewBB] && "BasicBlock already in the loop!");

  LoopT *L = static_cast<LoopT *>(this);

  // Add the loop mapping to the LoopInfo object...
  LIB.BBMap[NewBB] = L;

  // Add the basic block to this loop and all parent loops...
  while (L) {
    L->addBlockEntry(NewBB);
    L = L->getParentLoop();
  }
}

/// replaceChildLoopWith - This is used when splitting loops up.  It replaces
/// the OldChild entry in our children list with NewChild, and updates the
/// parent pointer of OldChild to be null and the NewChild to be this loop.
/// This updates the loop depth of the new child.
template <class BlockT, class LoopT>
void LoopBase<BlockT, LoopT>::replaceChildLoopWith(LoopT *OldChild,
                                                   LoopT *NewChild) {
  assert(!isInvalid() && "Loop not in a valid state!");
  assert(OldChild->ParentLoop == this && "This loop is already broken!");
  assert(!NewChild->ParentLoop && "NewChild already has a parent!");
  typename std::vector<LoopT *>::iterator I = find(SubLoops, OldChild);
  assert(I != SubLoops.end() && "OldChild not in loop!");
  *I = NewChild;
  OldChild->ParentLoop = nullptr;
  NewChild->ParentLoop = static_cast<LoopT *>(this);
}

/// verifyLoop - Verify loop structure
template <class BlockT, class LoopT>
void LoopBase<BlockT, LoopT>::verifyLoop() const {
  assert(!isInvalid() && "Loop not in a valid state!");
#ifndef NDEBUG
  assert(!Blocks.empty() && "Loop header is missing");

  // Setup for using a depth-first iterator to visit every block in the loop.
  SmallVector<BlockT *, 8> ExitBBs;
  getExitBlocks(ExitBBs);
  df_iterator_default_set<BlockT *> VisitSet;
  VisitSet.insert(ExitBBs.begin(), ExitBBs.end());

  // Keep track of the BBs visited.
  SmallPtrSet<BlockT *, 8> VisitedBBs;

  // Check the individual blocks.
  for (BlockT *BB : depth_first_ext(getHeader(), VisitSet)) {
    assert(std::any_of(GraphTraits<BlockT *>::child_begin(BB),
                       GraphTraits<BlockT *>::child_end(BB),
                       [&](BlockT *B) { return contains(B); }) &&
           "Loop block has no in-loop successors!");

    assert(std::any_of(GraphTraits<Inverse<BlockT *>>::child_begin(BB),
                       GraphTraits<Inverse<BlockT *>>::child_end(BB),
                       [&](BlockT *B) { return contains(B); }) &&
           "Loop block has no in-loop predecessors!");

    SmallVector<BlockT *, 2> OutsideLoopPreds;
    for (BlockT *B :
         llvm::make_range(GraphTraits<Inverse<BlockT *>>::child_begin(BB),
                          GraphTraits<Inverse<BlockT *>>::child_end(BB)))
      if (!contains(B))
        OutsideLoopPreds.push_back(B);

    if (BB == getHeader()) {
      assert(!OutsideLoopPreds.empty() && "Loop is unreachable!");
    } else if (!OutsideLoopPreds.empty()) {
      // A non-header loop shouldn't be reachable from outside the loop,
      // though it is permitted if the predecessor is not itself actually
      // reachable.
      BlockT *EntryBB = &BB->getParent()->front();
      for (BlockT *CB : depth_first(EntryBB))
        for (unsigned i = 0, e = OutsideLoopPreds.size(); i != e; ++i)
          assert(CB != OutsideLoopPreds[i] &&
                 "Loop has multiple entry points!");
    }
    assert(BB != &getHeader()->getParent()->front() &&
           "Loop contains function entry block!");

    VisitedBBs.insert(BB);
  }

  if (VisitedBBs.size() != getNumBlocks()) {
    dbgs() << "The following blocks are unreachable in the loop: ";
    for (auto *BB : Blocks) {
      if (!VisitedBBs.count(BB)) {
        dbgs() << *BB << "\n";
      }
    }
    assert(false && "Unreachable block in loop");
  }

  // Check the subloops.
  for (iterator I = begin(), E = end(); I != E; ++I)
    // Each block in each subloop should be contained within this loop.
    for (block_iterator BI = (*I)->block_begin(), BE = (*I)->block_end();
         BI != BE; ++BI) {
      assert(contains(*BI) &&
             "Loop does not contain all the blocks of a subloop!");
    }

  // Check the parent loop pointer.
  if (ParentLoop) {
    assert(is_contained(*ParentLoop, this) &&
           "Loop is not a subloop of its parent!");
  }
#endif
}

/// verifyLoop - Verify loop structure of this loop and all nested loops.
template <class BlockT, class LoopT>
void LoopBase<BlockT, LoopT>::verifyLoopNest(
    DenseSet<const LoopT *> *Loops) const {
  assert(!isInvalid() && "Loop not in a valid state!");
  Loops->insert(static_cast<const LoopT *>(this));
  // Verify this loop.
  verifyLoop();
  // Verify the subloops.
  for (iterator I = begin(), E = end(); I != E; ++I)
    (*I)->verifyLoopNest(Loops);
}

template <class BlockT, class LoopT>
void LoopBase<BlockT, LoopT>::print(raw_ostream &OS, bool Verbose,
                                    bool PrintNested, unsigned Depth) const {
  OS.indent(Depth * 2);
  if (static_cast<const LoopT *>(this)->isAnnotatedParallel())
    OS << "Parallel ";
  OS << "Loop at depth " << getLoopDepth() << " containing: ";

  BlockT *H = getHeader();
  for (unsigned i = 0; i < getBlocks().size(); ++i) {
    BlockT *BB = getBlocks()[i];
    if (!Verbose) {
      if (i)
        OS << ",";
      BB->printAsOperand(OS, false);
    } else
      OS << "\n";

    if (BB == H)
      OS << "<header>";
    if (isLoopLatch(BB))
      OS << "<latch>";
    if (isLoopExiting(BB))
      OS << "<exiting>";
    if (Verbose)
      BB->print(OS);
  }

  if (PrintNested) {
    OS << "\n";

    for (iterator I = begin(), E = end(); I != E; ++I)
      (*I)->print(OS, /*Verbose*/ false, PrintNested, Depth + 2);
  }
}

//===----------------------------------------------------------------------===//
/// Stable LoopInfo Analysis - Build a loop tree using stable iterators so the
/// result does / not depend on use list (block predecessor) order.
///

/// Discover a subloop with the specified backedges such that: All blocks within
/// this loop are mapped to this loop or a subloop. And all subloops within this
/// loop have their parent loop set to this loop or a subloop.
template <class BlockT, class LoopT>
static void discoverAndMapSubloop(LoopT *L, ArrayRef<BlockT *> Backedges,
                                  LoopInfoBase<BlockT, LoopT> *LI,
                                  const DomTreeBase<BlockT> &DomTree) {
  typedef GraphTraits<Inverse<BlockT *>> InvBlockTraits;

  unsigned NumBlocks = 0;
  unsigned NumSubloops = 0;

  // Perform a backward CFG traversal using a worklist.
  std::vector<BlockT *> ReverseCFGWorklist(Backedges.begin(), Backedges.end());
  while (!ReverseCFGWorklist.empty()) {
    BlockT *PredBB = ReverseCFGWorklist.back();
    ReverseCFGWorklist.pop_back();

    LoopT *Subloop = LI->getLoopFor(PredBB);
    if (!Subloop) {
      if (!DomTree.isReachableFromEntry(PredBB))
        continue;

      // This is an undiscovered block. Map it to the current loop.
      LI->changeLoopFor(PredBB, L);
      ++NumBlocks;
      if (PredBB == L->getHeader())
        continue;
      // Push all block predecessors on the worklist.
      ReverseCFGWorklist.insert(ReverseCFGWorklist.end(),
                                InvBlockTraits::child_begin(PredBB),
                                InvBlockTraits::child_end(PredBB));
    } else {
      // This is a discovered block. Find its outermost discovered loop.
      Subloop = Subloop->getOutermostLoop();

      // If it is already discovered to be a subloop of this loop, continue.
      if (Subloop == L)
        continue;

      // Discover a subloop of this loop.
      Subloop->setParentLoop(L);
      ++NumSubloops;
      NumBlocks += Subloop->getBlocksVector().capacity();
      PredBB = Subloop->getHeader();
      // Continue traversal along predecessors that are not loop-back edges from
      // within this subloop tree itself. Note that a predecessor may directly
      // reach another subloop that is not yet discovered to be a subloop of
      // this loop, which we must traverse.
      for (const auto Pred : children<Inverse<BlockT *>>(PredBB)) {
        if (LI->getLoopFor(Pred) != Subloop)
          ReverseCFGWorklist.push_back(Pred);
      }
    }
  }
  L->getSubLoopsVector().reserve(NumSubloops);
  L->reserveBlocks(NumBlocks);
}

/// Populate all loop data in a stable order during a single forward DFS.
template <class BlockT, class LoopT> class PopulateLoopsDFS {
  typedef GraphTraits<BlockT *> BlockTraits;
  typedef typename BlockTraits::ChildIteratorType SuccIterTy;

  LoopInfoBase<BlockT, LoopT> *LI;

public:
  PopulateLoopsDFS(LoopInfoBase<BlockT, LoopT> *li) : LI(li) {}

  void traverse(BlockT *EntryBlock);

protected:
  void insertIntoLoop(BlockT *Block);
};

/// Top-level driver for the forward DFS within the loop.
template <class BlockT, class LoopT>
void PopulateLoopsDFS<BlockT, LoopT>::traverse(BlockT *EntryBlock) {
  for (BlockT *BB : post_order(EntryBlock))
    insertIntoLoop(BB);
}

/// Add a single Block to its ancestor loops in PostOrder. If the block is a
/// subloop header, add the subloop to its parent in PostOrder, then reverse the
/// Block and Subloop vectors of the now complete subloop to achieve RPO.
template <class BlockT, class LoopT>
void PopulateLoopsDFS<BlockT, LoopT>::insertIntoLoop(BlockT *Block) {
  LoopT *Subloop = LI->getLoopFor(Block);
  if (Subloop && Block == Subloop->getHeader()) {
    // We reach this point once per subloop after processing all the blocks in
    // the subloop.
    if (!Subloop->isOutermost())
      Subloop->getParentLoop()->getSubLoopsVector().push_back(Subloop);
    else
      LI->addTopLevelLoop(Subloop);

    // For convenience, Blocks and Subloops are inserted in postorder. Reverse
    // the lists, except for the loop header, which is always at the beginning.
    Subloop->reverseBlock(1);
    std::reverse(Subloop->getSubLoopsVector().begin(),
                 Subloop->getSubLoopsVector().end());

    Subloop = Subloop->getParentLoop();
  }
  for (; Subloop; Subloop = Subloop->getParentLoop())
    Subloop->addBlockEntry(Block);
}

/// Analyze LoopInfo discovers loops during a postorder DominatorTree traversal
/// interleaved with backward CFG traversals within each subloop
/// (discoverAndMapSubloop). The backward traversal skips inner subloops, so
/// this part of the algorithm is linear in the number of CFG edges. Subloop and
/// Block vectors are then populated during a single forward CFG traversal
/// (PopulateLoopDFS).
///
/// During the two CFG traversals each block is seen three times:
/// 1) Discovered and mapped by a reverse CFG traversal.
/// 2) Visited during a forward DFS CFG traversal.
/// 3) Reverse-inserted in the loop in postorder following forward DFS.
///
/// The Block vectors are inclusive, so step 3 requires loop-depth number of
/// insertions per block.
template <class BlockT, class LoopT>
void LoopInfoBase<BlockT, LoopT>::analyze(const DomTreeBase<BlockT> &DomTree) {
  // Postorder traversal of the dominator tree.
  const DomTreeNodeBase<BlockT> *DomRoot = DomTree.getRootNode();
  for (auto DomNode : post_order(DomRoot)) {

    BlockT *Header = DomNode->getBlock();
    SmallVector<BlockT *, 4> Backedges;

    // Check each predecessor of the potential loop header.
    for (const auto Backedge : children<Inverse<BlockT *>>(Header)) {
      // If Header dominates predBB, this is a new loop. Collect the backedges.
      if (DomTree.dominates(Header, Backedge) &&
          DomTree.isReachableFromEntry(Backedge)) {
        Backedges.push_back(Backedge);
      }
    }
    // Perform a backward CFG traversal to discover and map blocks in this loop.
    if (!Backedges.empty()) {
      LoopT *L = AllocateLoop(Header);
      discoverAndMapSubloop(L, ArrayRef<BlockT *>(Backedges), this, DomTree);
    }
  }
  // Perform a single forward CFG traversal to populate block and subloop
  // vectors for all loops.
  PopulateLoopsDFS<BlockT, LoopT> DFS(this);
  DFS.traverse(DomRoot->getBlock());
}

template <class BlockT, class LoopT>
SmallVector<LoopT *, 4>
LoopInfoBase<BlockT, LoopT>::getLoopsInPreorder() const {
  SmallVector<LoopT *, 4> PreOrderLoops, PreOrderWorklist;
  // The outer-most loop actually goes into the result in the same relative
  // order as we walk it. But LoopInfo stores the top level loops in reverse
  // program order so for here we reverse it to get forward program order.
  // FIXME: If we change the order of LoopInfo we will want to remove the
  // reverse here.
  for (LoopT *RootL : reverse(*this)) {
    auto PreOrderLoopsInRootL = RootL->getLoopsInPreorder();
    PreOrderLoops.append(PreOrderLoopsInRootL.begin(),
                         PreOrderLoopsInRootL.end());
  }

  return PreOrderLoops;
}

template <class BlockT, class LoopT>
SmallVector<LoopT *, 4>
LoopInfoBase<BlockT, LoopT>::getLoopsInReverseSiblingPreorder() const {
  SmallVector<LoopT *, 4> PreOrderLoops, PreOrderWorklist;
  // The outer-most loop actually goes into the result in the same relative
  // order as we walk it. LoopInfo stores the top level loops in reverse
  // program order so we walk in order here.
  // FIXME: If we change the order of LoopInfo we will want to add a reverse
  // here.
  for (LoopT *RootL : *this) {
    assert(PreOrderWorklist.empty() &&
           "Must start with an empty preorder walk worklist.");
    PreOrderWorklist.push_back(RootL);
    do {
      LoopT *L = PreOrderWorklist.pop_back_val();
      // Sub-loops are stored in forward program order, but will process the
      // worklist backwards so we can just append them in order.
      PreOrderWorklist.append(L->begin(), L->end());
      PreOrderLoops.push_back(L);
    } while (!PreOrderWorklist.empty());
  }

  return PreOrderLoops;
}

// Debugging
template <class BlockT, class LoopT>
void LoopInfoBase<BlockT, LoopT>::print(raw_ostream &OS) const {
  for (unsigned i = 0; i < TopLevelLoops.size(); ++i)
    TopLevelLoops[i]->print(OS);
#if 0
  for (DenseMap<BasicBlock*, LoopT*>::const_iterator I = BBMap.begin(),
         E = BBMap.end(); I != E; ++I)
    OS << "BB '" << I->first->getName() << "' level = "
       << I->second->getLoopDepth() << "\n";
#endif
}

template <typename T>
bool compareVectors(std::vector<T> &BB1, std::vector<T> &BB2) {
  llvm::sort(BB1);
  llvm::sort(BB2);
  return BB1 == BB2;
}

template <class BlockT, class LoopT>
void addInnerLoopsToHeadersMap(DenseMap<BlockT *, const LoopT *> &LoopHeaders,
                               const LoopInfoBase<BlockT, LoopT> &LI,
                               const LoopT &L) {
  LoopHeaders[L.getHeader()] = &L;
  for (LoopT *SL : L)
    addInnerLoopsToHeadersMap(LoopHeaders, LI, *SL);
}

#ifndef NDEBUG
template <class BlockT, class LoopT>
static void compareLoops(const LoopT *L, const LoopT *OtherL,
                         DenseMap<BlockT *, const LoopT *> &OtherLoopHeaders) {
  BlockT *H = L->getHeader();
  BlockT *OtherH = OtherL->getHeader();
  assert(H == OtherH &&
         "Mismatched headers even though found in the same map entry!");

  assert(L->getLoopDepth() == OtherL->getLoopDepth() &&
         "Mismatched loop depth!");
  const LoopT *ParentL = L, *OtherParentL = OtherL;
  do {
    assert(ParentL->getHeader() == OtherParentL->getHeader() &&
           "Mismatched parent loop headers!");
    ParentL = ParentL->getParentLoop();
    OtherParentL = OtherParentL->getParentLoop();
  } while (ParentL);

  for (const LoopT *SubL : *L) {
    BlockT *SubH = SubL->getHeader();
    const LoopT *OtherSubL = OtherLoopHeaders.lookup(SubH);
    assert(OtherSubL && "Inner loop is missing in computed loop info!");
    OtherLoopHeaders.erase(SubH);
    compareLoops(SubL, OtherSubL, OtherLoopHeaders);
  }

  std::vector<BlockT *> BBs = L->getBlocks();
  std::vector<BlockT *> OtherBBs = OtherL->getBlocks();
  assert(compareVectors(BBs, OtherBBs) &&
         "Mismatched basic blocks in the loops!");

  const SmallPtrSetImpl<const BlockT *> &BlocksSet = L->getBlocksSet();
  const SmallPtrSetImpl<const BlockT *> &OtherBlocksSet =
      OtherL->getBlocksSet();
  assert(BlocksSet.size() == OtherBlocksSet.size() &&
         llvm::set_is_subset(BlocksSet, OtherBlocksSet) &&
         "Mismatched basic blocks in BlocksSets!");
}
#endif

template <class BlockT, class LoopT>
void LoopInfoBase<BlockT, LoopT>::verify(
    const DomTreeBase<BlockT> &DomTree) const {
  DenseSet<const LoopT *> Loops;
  for (iterator I = begin(), E = end(); I != E; ++I) {
    assert((*I)->isOutermost() && "Top-level loop has a parent!");
    (*I)->verifyLoopNest(&Loops);
  }

// Verify that blocks are mapped to valid loops.
#ifndef NDEBUG
  for (auto &Entry : BBMap) {
    const BlockT *BB = Entry.first;
    LoopT *L = Entry.second;
    assert(Loops.count(L) && "orphaned loop");
    assert(L->contains(BB) && "orphaned block");
    for (LoopT *ChildLoop : *L)
      assert(!ChildLoop->contains(BB) &&
             "BBMap should point to the innermost loop containing BB");
  }

  // Recompute LoopInfo to verify loops structure.
  LoopInfoBase<BlockT, LoopT> OtherLI;
  OtherLI.analyze(DomTree);

  // Build a map we can use to move from our LI to the computed one. This
  // allows us to ignore the particular order in any layer of the loop forest
  // while still comparing the structure.
  DenseMap<BlockT *, const LoopT *> OtherLoopHeaders;
  for (LoopT *L : OtherLI)
    addInnerLoopsToHeadersMap(OtherLoopHeaders, OtherLI, *L);

  // Walk the top level loops and ensure there is a corresponding top-level
  // loop in the computed version and then recursively compare those loop
  // nests.
  for (LoopT *L : *this) {
    BlockT *Header = L->getHeader();
    const LoopT *OtherL = OtherLoopHeaders.lookup(Header);
    assert(OtherL && "Top level loop is missing in computed loop info!");
    // Now that we've matched this loop, erase its header from the map.
    OtherLoopHeaders.erase(Header);
    // And recursively compare these loops.
    compareLoops(L, OtherL, OtherLoopHeaders);
  }

  // Any remaining entries in the map are loops which were found when computing
  // a fresh LoopInfo but not present in the current one.
  if (!OtherLoopHeaders.empty()) {
    for (const auto &HeaderAndLoop : OtherLoopHeaders)
      dbgs() << "Found new loop: " << *HeaderAndLoop.second << "\n";
    llvm_unreachable("Found new loops when recomputing LoopInfo!");
  }
#endif
}

} // End llvm namespace

#endif
PKiwFZՁ� � !Analysis/InlineModelFeatureMaps.hnu�[���//===- InlineModelFeatureMaps.h - common model runner defs ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//

#ifndef LLVM_ANALYSIS_INLINEMODELFEATUREMAPS_H
#define LLVM_ANALYSIS_INLINEMODELFEATUREMAPS_H

#include "llvm/Analysis/TensorSpec.h"

#include <array>
#include <string>
#include <vector>

namespace llvm {

// List of cost features. A "cost" feature is a summand of the heuristic-based
// inline cost, and we define them separately to preserve the original heuristic
// behavior.
#define INLINE_COST_FEATURE_ITERATOR(M)                                        \
  M(int64_t, {1}, sroa_savings,                                                \
    "Savings from SROA (scalar replacement of aggregates)")                    \
  M(int64_t, {1}, sroa_losses,                                                 \
    "Losses from SROA (scalar replacement of aggregates)")                     \
  M(int64_t, {1}, load_elimination, "Cost of load elimination in the call")    \
  M(int64_t, {1}, call_penalty,                                                \
    "Accumulation of penalty applied to call sites when inlining")             \
  M(int64_t, {1}, call_argument_setup,                                         \
    "Accumulation of call argument setup costs")                               \
  M(int64_t, {1}, load_relative_intrinsic,                                     \
    "Accumulation of costs of loading relative intrinsics")                    \
  M(int64_t, {1}, lowered_call_arg_setup,                                      \
    "Accumulation of cost of lowered call argument setups")                    \
  M(int64_t, {1}, indirect_call_penalty,                                       \
    "Accumulation of costs for indirect calls")                                \
  M(int64_t, {1}, jump_table_penalty, "Accumulation of costs for jump tables") \
  M(int64_t, {1}, case_cluster_penalty,                                        \
    "Accumulation of costs for case clusters")                                 \
  M(int64_t, {1}, switch_penalty,                                              \
    "Accumulation of costs for switch statements")                             \
  M(int64_t, {1}, unsimplified_common_instructions,                            \
    "Costs from unsimplified common instructions")                             \
  M(int64_t, {1}, num_loops, "Number of loops in the caller")                  \
  M(int64_t, {1}, dead_blocks, "Number of dead blocks in the caller")          \
  M(int64_t, {1}, simplified_instructions,                                     \
    "Number of simplified instructions")                                       \
  M(int64_t, {1}, constant_args,                                               \
    "Number of constant arguments in the call site")                           \
  M(int64_t, {1}, constant_offset_ptr_args,                                    \
    "Number of constant offset pointer args in the call site")                 \
  M(int64_t, {1}, callsite_cost, "Estimated cost of the call site")            \
  M(int64_t, {1}, cold_cc_penalty, "Penalty for a cold calling convention")    \
  M(int64_t, {1}, last_call_to_static_bonus,                                   \
    "Bonus for being the last call to static")                                 \
  M(int64_t, {1}, is_multiple_blocks,                                          \
    "Boolean; is the Callee multiple blocks")                                  \
  M(int64_t, {1}, nested_inlines,                                              \
    "Would the default inliner perfom nested inlining")                        \
  M(int64_t, {1}, nested_inline_cost_estimate,                                 \
    "Estimate of the accumulated cost of nested inlines")                      \
  M(int64_t, {1}, threshold, "Threshold for the heuristic inliner")

// clang-format off
enum class InlineCostFeatureIndex : size_t {
#define POPULATE_INDICES(DTYPE, SHAPE, NAME, DOC) NAME,
  INLINE_COST_FEATURE_ITERATOR(POPULATE_INDICES)
#undef POPULATE_INDICES

  NumberOfFeatures
};
// clang-format on

using InlineCostFeatures =
    std::array<int,
               static_cast<size_t>(InlineCostFeatureIndex::NumberOfFeatures)>;

constexpr bool isHeuristicInlineCostFeature(InlineCostFeatureIndex Feature) {
  return Feature != InlineCostFeatureIndex::sroa_savings &&
         Feature != InlineCostFeatureIndex::is_multiple_blocks &&
         Feature != InlineCostFeatureIndex::dead_blocks &&
         Feature != InlineCostFeatureIndex::simplified_instructions &&
         Feature != InlineCostFeatureIndex::constant_args &&
         Feature != InlineCostFeatureIndex::constant_offset_ptr_args &&
         Feature != InlineCostFeatureIndex::nested_inlines &&
         Feature != InlineCostFeatureIndex::nested_inline_cost_estimate &&
         Feature != InlineCostFeatureIndex::threshold;
}

// List of features. Each feature is defined through a triple:
// - the name of an enum member, which will be the feature index
// - a textual name, used for Tensorflow model binding (so it needs to match the
// names used by the Tensorflow model)
// - a documentation description. Currently, that is not used anywhere
// programmatically, and serves as workaround to inability of inserting comments
// in macros.
#define INLINE_FEATURE_ITERATOR(M)                                             \
  M(int64_t, {1}, callee_basic_block_count,                                    \
    "number of basic blocks of the callee")                                    \
  M(int64_t, {1}, callsite_height,                                             \
    "position of the call site in the original call graph - measured from "    \
    "the farthest SCC")                                                        \
  M(int64_t, {1}, node_count,                                                  \
    "total current number of defined functions in the module")                 \
  M(int64_t, {1}, nr_ctant_params,                                             \
    "number of parameters in the call site that are constants")                \
  M(int64_t, {1}, cost_estimate, "total cost estimate (threshold - free)")     \
  M(int64_t, {1}, edge_count, "total number of calls in the module")           \
  M(int64_t, {1}, caller_users,                                                \
    "number of module-internal users of the caller, +1 if the caller is "      \
    "exposed externally")                                                      \
  M(int64_t, {1}, caller_conditionally_executed_blocks,                        \
    "number of blocks reached from a conditional instruction, in the caller")  \
  M(int64_t, {1}, caller_basic_block_count,                                    \
    "number of basic blocks in the caller")                                    \
  M(int64_t, {1}, callee_conditionally_executed_blocks,                        \
    "number of blocks reached from a conditional instruction, in the callee")  \
  M(int64_t, {1}, callee_users,                                                \
    "number of module-internal users of the callee, +1 if the callee is "      \
    "exposed externally")

// clang-format off
enum class FeatureIndex : size_t {
#define POPULATE_INDICES(DTYPE, SHAPE, NAME, COMMENT) NAME,
// InlineCost features - these must come first
  INLINE_COST_FEATURE_ITERATOR(POPULATE_INDICES)

// Non-cost features
  INLINE_FEATURE_ITERATOR(POPULATE_INDICES)
#undef POPULATE_INDICES

  NumberOfFeatures
};
// clang-format on

constexpr FeatureIndex
inlineCostFeatureToMlFeature(InlineCostFeatureIndex Feature) {
  return static_cast<FeatureIndex>(static_cast<size_t>(Feature));
}

constexpr size_t NumberOfFeatures =
    static_cast<size_t>(FeatureIndex::NumberOfFeatures);

extern const std::vector<TensorSpec> FeatureMap;

extern const char *const DecisionName;
extern const TensorSpec InlineDecisionSpec;
extern const char *const DefaultDecisionName;
extern const TensorSpec DefaultDecisionSpec;
extern const char *const RewardName;

using InlineFeatures = std::vector<int64_t>;

} // namespace llvm
#endif // LLVM_ANALYSIS_INLINEMODELFEATUREMAPS_H
PKiwFZ�N�:��Analysis/ValueLatticeUtils.hnu�[���//===-- ValueLatticeUtils.h - Utils for solving lattices --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares common functions useful for performing data-flow analyses
// that propagate values across function boundaries.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_VALUELATTICEUTILS_H
#define LLVM_ANALYSIS_VALUELATTICEUTILS_H

namespace llvm {

class Function;
class GlobalVariable;

/// Determine if the values of the given function's arguments can be tracked
/// interprocedurally. The value of an argument can be tracked if the function
/// has local linkage and its address is not taken.
bool canTrackArgumentsInterprocedurally(Function *F);

/// Determine if the values of the given function's returns can be tracked
/// interprocedurally. Return values can be tracked if the function has an
/// exact definition and it doesn't have the "naked" attribute. Naked functions
/// may contain assembly code that returns untrackable values.
bool canTrackReturnsInterprocedurally(Function *F);

/// Determine if the value maintained in the given global variable can be
/// tracked interprocedurally. A value can be tracked if the global variable
/// has local linkage and is only used by non-volatile loads and stores.
bool canTrackGlobalVariableInterprocedurally(GlobalVariable *GV);

} // end namespace llvm

#endif // LLVM_ANALYSIS_VALUELATTICEUTILS_H
PKiwFZF�5�X&X&Analysis/ObjCARCAnalysisUtils.hnu�[���//===- ObjCARCAnalysisUtils.h - ObjC ARC Analysis Utilities -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// This file defines common analysis utilities used by the ObjC ARC Optimizer.
/// ARC stands for Automatic Reference Counting and is a system for managing
/// reference counts for objects in Objective C.
///
/// WARNING: This file knows about certain library functions. It recognizes them
/// by name, and hardwires knowledge of their semantics.
///
/// WARNING: This file knows about how certain Objective-C library functions are
/// used. Naive LLVM IR transformations which would otherwise be
/// behavior-preserving may break these assumptions.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_OBJCARCANALYSISUTILS_H
#define LLVM_ANALYSIS_OBJCARCANALYSISUTILS_H

#include "llvm/Analysis/ObjCARCInstKind.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/ValueHandle.h"
#include <optional>

namespace llvm {

class AAResults;

namespace objcarc {

/// A handy option to enable/disable all ARC Optimizations.
extern bool EnableARCOpts;

/// Test if the given module looks interesting to run ARC optimization
/// on.
inline bool ModuleHasARC(const Module &M) {
  return
    M.getNamedValue("llvm.objc.retain") ||
    M.getNamedValue("llvm.objc.release") ||
    M.getNamedValue("llvm.objc.autorelease") ||
    M.getNamedValue("llvm.objc.retainAutoreleasedReturnValue") ||
    M.getNamedValue("llvm.objc.unsafeClaimAutoreleasedReturnValue") ||
    M.getNamedValue("llvm.objc.retainBlock") ||
    M.getNamedValue("llvm.objc.autoreleaseReturnValue") ||
    M.getNamedValue("llvm.objc.autoreleasePoolPush") ||
    M.getNamedValue("llvm.objc.loadWeakRetained") ||
    M.getNamedValue("llvm.objc.loadWeak") ||
    M.getNamedValue("llvm.objc.destroyWeak") ||
    M.getNamedValue("llvm.objc.storeWeak") ||
    M.getNamedValue("llvm.objc.initWeak") ||
    M.getNamedValue("llvm.objc.moveWeak") ||
    M.getNamedValue("llvm.objc.copyWeak") ||
    M.getNamedValue("llvm.objc.retainedObject") ||
    M.getNamedValue("llvm.objc.unretainedObject") ||
    M.getNamedValue("llvm.objc.unretainedPointer") ||
    M.getNamedValue("llvm.objc.clang.arc.use");
}

/// This is a wrapper around getUnderlyingObject which also knows how to
/// look through objc_retain and objc_autorelease calls, which we know to return
/// their argument verbatim.
inline const Value *GetUnderlyingObjCPtr(const Value *V) {
  for (;;) {
    V = getUnderlyingObject(V);
    if (!IsForwarding(GetBasicARCInstKind(V)))
      break;
    V = cast<CallInst>(V)->getArgOperand(0);
  }

  return V;
}

/// A wrapper for GetUnderlyingObjCPtr used for results memoization.
inline const Value *GetUnderlyingObjCPtrCached(
    const Value *V,
    DenseMap<const Value *, std::pair<WeakVH, WeakTrackingVH>> &Cache) {
  // The entry is invalid if either value handle is null.
  auto InCache = Cache.lookup(V);
  if (InCache.first && InCache.second)
    return InCache.second;

  const Value *Computed = GetUnderlyingObjCPtr(V);
  Cache[V] =
      std::make_pair(const_cast<Value *>(V), const_cast<Value *>(Computed));
  return Computed;
}

/// The RCIdentity root of a value \p V is a dominating value U for which
/// retaining or releasing U is equivalent to retaining or releasing V. In other
/// words, ARC operations on \p V are equivalent to ARC operations on \p U.
///
/// We use this in the ARC optimizer to make it easier to match up ARC
/// operations by always mapping ARC operations to RCIdentityRoots instead of
/// pointers themselves.
///
/// The two ways that we see RCIdentical values in ObjC are via:
///
///   1. PointerCasts
///   2. Forwarding Calls that return their argument verbatim.
///
/// Thus this function strips off pointer casts and forwarding calls. *NOTE*
/// This implies that two RCIdentical values must alias.
inline const Value *GetRCIdentityRoot(const Value *V) {
  for (;;) {
    V = V->stripPointerCasts();
    if (!IsForwarding(GetBasicARCInstKind(V)))
      break;
    V = cast<CallInst>(V)->getArgOperand(0);
  }
  return V;
}

/// Helper which calls const Value *GetRCIdentityRoot(const Value *V) and just
/// casts away the const of the result. For documentation about what an
/// RCIdentityRoot (and by extension GetRCIdentityRoot is) look at that
/// function.
inline Value *GetRCIdentityRoot(Value *V) {
  return const_cast<Value *>(GetRCIdentityRoot((const Value *)V));
}

/// Assuming the given instruction is one of the special calls such as
/// objc_retain or objc_release, return the RCIdentity root of the argument of
/// the call.
inline Value *GetArgRCIdentityRoot(Value *Inst) {
  return GetRCIdentityRoot(cast<CallInst>(Inst)->getArgOperand(0));
}

inline bool IsNullOrUndef(const Value *V) {
  return isa<ConstantPointerNull>(V) || isa<UndefValue>(V);
}

inline bool IsNoopInstruction(const Instruction *I) {
  return isa<BitCastInst>(I) ||
    (isa<GetElementPtrInst>(I) &&
     cast<GetElementPtrInst>(I)->hasAllZeroIndices());
}

/// Test whether the given value is possible a retainable object pointer.
inline bool IsPotentialRetainableObjPtr(const Value *Op) {
  // Pointers to static or stack storage are not valid retainable object
  // pointers.
  if (isa<Constant>(Op) || isa<AllocaInst>(Op))
    return false;
  // Special arguments can not be a valid retainable object pointer.
  if (const Argument *Arg = dyn_cast<Argument>(Op))
    if (Arg->hasPassPointeeByValueCopyAttr() || Arg->hasNestAttr() ||
        Arg->hasStructRetAttr())
      return false;
  // Only consider values with pointer types.
  //
  // It seemes intuitive to exclude function pointer types as well, since
  // functions are never retainable object pointers, however clang occasionally
  // bitcasts retainable object pointers to function-pointer type temporarily.
  PointerType *Ty = dyn_cast<PointerType>(Op->getType());
  if (!Ty)
    return false;
  // Conservatively assume anything else is a potential retainable object
  // pointer.
  return true;
}

bool IsPotentialRetainableObjPtr(const Value *Op, AAResults &AA);

/// Helper for GetARCInstKind. Determines what kind of construct CS
/// is.
inline ARCInstKind GetCallSiteClass(const CallBase &CB) {
  for (const Use &U : CB.args())
    if (IsPotentialRetainableObjPtr(U))
      return CB.onlyReadsMemory() ? ARCInstKind::User : ARCInstKind::CallOrUser;

  return CB.onlyReadsMemory() ? ARCInstKind::None : ARCInstKind::Call;
}

/// Return true if this value refers to a distinct and identifiable
/// object.
///
/// This is similar to AliasAnalysis's isIdentifiedObject, except that it uses
/// special knowledge of ObjC conventions.
inline bool IsObjCIdentifiedObject(const Value *V) {
  // Assume that call results and arguments have their own "provenance".
  // Constants (including GlobalVariables) and Allocas are never
  // reference-counted.
  if (isa<CallInst>(V) || isa<InvokeInst>(V) ||
      isa<Argument>(V) || isa<Constant>(V) ||
      isa<AllocaInst>(V))
    return true;

  if (const LoadInst *LI = dyn_cast<LoadInst>(V)) {
    const Value *Pointer =
      GetRCIdentityRoot(LI->getPointerOperand());
    if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(Pointer)) {
      // A constant pointer can't be pointing to an object on the heap. It may
      // be reference-counted, but it won't be deleted.
      if (GV->isConstant())
        return true;
      StringRef Name = GV->getName();
      // These special variables are known to hold values which are not
      // reference-counted pointers.
      if (Name.startswith("\01l_objc_msgSend_fixup_"))
        return true;

      StringRef Section = GV->getSection();
      if (Section.contains("__message_refs") ||
          Section.contains("__objc_classrefs") ||
          Section.contains("__objc_superrefs") ||
          Section.contains("__objc_methname") || Section.contains("__cstring"))
        return true;
    }
  }

  return false;
}

enum class ARCMDKindID {
  ImpreciseRelease,
  CopyOnEscape,
  NoObjCARCExceptions,
};

/// A cache of MDKinds used by various ARC optimizations.
class ARCMDKindCache {
  Module *M;

  /// The Metadata Kind for clang.imprecise_release metadata.
  std::optional<unsigned> ImpreciseReleaseMDKind;

  /// The Metadata Kind for clang.arc.copy_on_escape metadata.
  std::optional<unsigned> CopyOnEscapeMDKind;

  /// The Metadata Kind for clang.arc.no_objc_arc_exceptions metadata.
  std::optional<unsigned> NoObjCARCExceptionsMDKind;

public:
  void init(Module *Mod) {
    M = Mod;
    ImpreciseReleaseMDKind = std::nullopt;
    CopyOnEscapeMDKind = std::nullopt;
    NoObjCARCExceptionsMDKind = std::nullopt;
  }

  unsigned get(ARCMDKindID ID) {
    switch (ID) {
    case ARCMDKindID::ImpreciseRelease:
      if (!ImpreciseReleaseMDKind)
        ImpreciseReleaseMDKind =
            M->getContext().getMDKindID("clang.imprecise_release");
      return *ImpreciseReleaseMDKind;
    case ARCMDKindID::CopyOnEscape:
      if (!CopyOnEscapeMDKind)
        CopyOnEscapeMDKind =
            M->getContext().getMDKindID("clang.arc.copy_on_escape");
      return *CopyOnEscapeMDKind;
    case ARCMDKindID::NoObjCARCExceptions:
      if (!NoObjCARCExceptionsMDKind)
        NoObjCARCExceptionsMDKind =
            M->getContext().getMDKindID("clang.arc.no_objc_arc_exceptions");
      return *NoObjCARCExceptionsMDKind;
    }
    llvm_unreachable("Covered switch isn't covered?!");
  }
};

} // end namespace objcarc
} // end namespace llvm

#endif
PKiwFZ����Analysis/IndirectCallVisitor.hnu�[���//===-- IndirectCallVisitor.h - indirect call visitor ---------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements defines a visitor class and a helper function that find
// all indirect call-sites in a function.

#ifndef LLVM_ANALYSIS_INDIRECTCALLVISITOR_H
#define LLVM_ANALYSIS_INDIRECTCALLVISITOR_H

#include "llvm/IR/InstVisitor.h"
#include <vector>

namespace llvm {
// Visitor class that finds all indirect call.
struct PGOIndirectCallVisitor : public InstVisitor<PGOIndirectCallVisitor> {
  std::vector<CallBase *> IndirectCalls;
  PGOIndirectCallVisitor() = default;

  void visitCallBase(CallBase &Call) {
    if (Call.isIndirectCall())
      IndirectCalls.push_back(&Call);
  }
};

// Helper function that finds all indirect call sites.
inline std::vector<CallBase *> findIndirectCalls(Function &F) {
  PGOIndirectCallVisitor ICV;
  ICV.visit(F);
  return ICV.IndirectCalls;
}
} // namespace llvm

#endif
PKiwFZ�����Analysis/CFGSCCPrinter.hnu�[���//===-- CFGSCCPrinter.h ---------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_CFGSCCPRINTER_H
#define LLVM_ANALYSIS_CFGSCCPRINTER_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class CFGSCCPrinterPass : public PassInfoMixin<CFGSCCPrinterPass> {
  raw_ostream &OS;

public:
  explicit CFGSCCPrinterPass(raw_ostream &OS) : OS(OS) {}
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
} // namespace llvm

#endif
PKiwFZ:?oё=�=Analysis/InstructionSimplify.hnu�[���//===-- InstructionSimplify.h - Fold instrs into simpler forms --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares routines for folding instructions into simpler forms
// that do not require creating new instructions.  This does constant folding
// ("add i32 1, 1" -> "2") but can also handle non-constant operands, either
// returning a constant ("and i32 %x, 0" -> "0") or an already existing value
// ("and i32 %x, %x" -> "%x").  If the simplification is also an instruction
// then it dominates the original instruction.
//
// These routines implicitly resolve undef uses. The easiest way to be safe when
// using these routines to obtain simplified values for existing instructions is
// to always replace all uses of the instructions with the resulting simplified
// values. This will prevent other code from seeing the same undef uses and
// resolving them to different values.
//
// They require that all the IR that they encounter be valid and inserted into a
// parent function.
//
// Additionally, these routines can't simplify to the instructions that are not
// def-reachable, meaning we can't just scan the basic block for instructions
// to simplify to.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_INSTRUCTIONSIMPLIFY_H
#define LLVM_ANALYSIS_INSTRUCTIONSIMPLIFY_H

#include "llvm/IR/PatternMatch.h"

namespace llvm {

template <typename T, typename... TArgs> class AnalysisManager;
template <class T> class ArrayRef;
class AssumptionCache;
class BinaryOperator;
class CallBase;
class DataLayout;
class DominatorTree;
class Function;
class Instruction;
struct LoopStandardAnalysisResults;
class MDNode;
class Pass;
template <class T, unsigned n> class SmallSetVector;
class TargetLibraryInfo;
class Type;
class Value;

/// InstrInfoQuery provides an interface to query additional information for
/// instructions like metadata or keywords like nsw, which provides conservative
/// results if the users specified it is safe to use.
struct InstrInfoQuery {
  InstrInfoQuery(bool UMD) : UseInstrInfo(UMD) {}
  InstrInfoQuery() = default;
  bool UseInstrInfo = true;

  MDNode *getMetadata(const Instruction *I, unsigned KindID) const {
    if (UseInstrInfo)
      return I->getMetadata(KindID);
    return nullptr;
  }

  template <class InstT> bool hasNoUnsignedWrap(const InstT *Op) const {
    if (UseInstrInfo)
      return Op->hasNoUnsignedWrap();
    return false;
  }

  template <class InstT> bool hasNoSignedWrap(const InstT *Op) const {
    if (UseInstrInfo)
      return Op->hasNoSignedWrap();
    return false;
  }

  bool isExact(const BinaryOperator *Op) const {
    if (UseInstrInfo && isa<PossiblyExactOperator>(Op))
      return cast<PossiblyExactOperator>(Op)->isExact();
    return false;
  }

  template <class InstT> bool hasNoSignedZeros(const InstT *Op) const {
    if (UseInstrInfo)
      return Op->hasNoSignedZeros();
    return false;
  }
};

struct SimplifyQuery {
  const DataLayout &DL;
  const TargetLibraryInfo *TLI = nullptr;
  const DominatorTree *DT = nullptr;
  AssumptionCache *AC = nullptr;
  const Instruction *CxtI = nullptr;

  // Wrapper to query additional information for instructions like metadata or
  // keywords like nsw, which provides conservative results if those cannot
  // be safely used.
  const InstrInfoQuery IIQ;

  /// Controls whether simplifications are allowed to constrain the range of
  /// possible values for uses of undef. If it is false, simplifications are not
  /// allowed to assume a particular value for a use of undef for example.
  bool CanUseUndef = true;

  SimplifyQuery(const DataLayout &DL, const Instruction *CXTI = nullptr)
      : DL(DL), CxtI(CXTI) {}

  SimplifyQuery(const DataLayout &DL, const TargetLibraryInfo *TLI,
                const DominatorTree *DT = nullptr,
                AssumptionCache *AC = nullptr,
                const Instruction *CXTI = nullptr, bool UseInstrInfo = true,
                bool CanUseUndef = true)
      : DL(DL), TLI(TLI), DT(DT), AC(AC), CxtI(CXTI), IIQ(UseInstrInfo),
        CanUseUndef(CanUseUndef) {}
  SimplifyQuery getWithInstruction(Instruction *I) const {
    SimplifyQuery Copy(*this);
    Copy.CxtI = I;
    return Copy;
  }
  SimplifyQuery getWithoutUndef() const {
    SimplifyQuery Copy(*this);
    Copy.CanUseUndef = false;
    return Copy;
  }

  /// If CanUseUndef is true, returns whether \p V is undef.
  /// Otherwise always return false.
  bool isUndefValue(Value *V) const {
    if (!CanUseUndef)
      return false;

    using namespace PatternMatch;
    return match(V, m_Undef());
  }
};

// NOTE: the explicit multiple argument versions of these functions are
// deprecated.
// Please use the SimplifyQuery versions in new code.

/// Given operands for an Add, fold the result or return null.
Value *simplifyAddInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW,
                       const SimplifyQuery &Q);

/// Given operands for a Sub, fold the result or return null.
Value *simplifySubInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW,
                       const SimplifyQuery &Q);

/// Given operands for a Mul, fold the result or return null.
Value *simplifyMulInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW,
                       const SimplifyQuery &Q);

/// Given operands for an SDiv, fold the result or return null.
Value *simplifySDivInst(Value *LHS, Value *RHS, bool IsExact,
                        const SimplifyQuery &Q);

/// Given operands for a UDiv, fold the result or return null.
Value *simplifyUDivInst(Value *LHS, Value *RHS, bool IsExact,
                        const SimplifyQuery &Q);

/// Given operands for an SRem, fold the result or return null.
Value *simplifySRemInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);

/// Given operands for a URem, fold the result or return null.
Value *simplifyURemInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);

/// Given operand for an FNeg, fold the result or return null.
Value *simplifyFNegInst(Value *Op, FastMathFlags FMF, const SimplifyQuery &Q);


/// Given operands for an FAdd, fold the result or return null.
Value *
simplifyFAddInst(Value *LHS, Value *RHS, FastMathFlags FMF,
                 const SimplifyQuery &Q,
                 fp::ExceptionBehavior ExBehavior = fp::ebIgnore,
                 RoundingMode Rounding = RoundingMode::NearestTiesToEven);

/// Given operands for an FSub, fold the result or return null.
Value *
simplifyFSubInst(Value *LHS, Value *RHS, FastMathFlags FMF,
                 const SimplifyQuery &Q,
                 fp::ExceptionBehavior ExBehavior = fp::ebIgnore,
                 RoundingMode Rounding = RoundingMode::NearestTiesToEven);

/// Given operands for an FMul, fold the result or return null.
Value *
simplifyFMulInst(Value *LHS, Value *RHS, FastMathFlags FMF,
                 const SimplifyQuery &Q,
                 fp::ExceptionBehavior ExBehavior = fp::ebIgnore,
                 RoundingMode Rounding = RoundingMode::NearestTiesToEven);

/// Given operands for the multiplication of a FMA, fold the result or return
/// null. In contrast to simplifyFMulInst, this function will not perform
/// simplifications whose unrounded results differ when rounded to the argument
/// type.
Value *simplifyFMAFMul(Value *LHS, Value *RHS, FastMathFlags FMF,
                       const SimplifyQuery &Q,
                       fp::ExceptionBehavior ExBehavior = fp::ebIgnore,
                       RoundingMode Rounding = RoundingMode::NearestTiesToEven);

/// Given operands for an FDiv, fold the result or return null.
Value *
simplifyFDivInst(Value *LHS, Value *RHS, FastMathFlags FMF,
                 const SimplifyQuery &Q,
                 fp::ExceptionBehavior ExBehavior = fp::ebIgnore,
                 RoundingMode Rounding = RoundingMode::NearestTiesToEven);

/// Given operands for an FRem, fold the result or return null.
Value *
simplifyFRemInst(Value *LHS, Value *RHS, FastMathFlags FMF,
                 const SimplifyQuery &Q,
                 fp::ExceptionBehavior ExBehavior = fp::ebIgnore,
                 RoundingMode Rounding = RoundingMode::NearestTiesToEven);

/// Given operands for a Shl, fold the result or return null.
Value *simplifyShlInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
                       const SimplifyQuery &Q);

/// Given operands for a LShr, fold the result or return null.
Value *simplifyLShrInst(Value *Op0, Value *Op1, bool IsExact,
                        const SimplifyQuery &Q);

/// Given operands for a AShr, fold the result or return nulll.
Value *simplifyAShrInst(Value *Op0, Value *Op1, bool IsExact,
                        const SimplifyQuery &Q);

/// Given operands for an And, fold the result or return null.
Value *simplifyAndInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);

/// Given operands for an Or, fold the result or return null.
Value *simplifyOrInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);

/// Given operands for an Xor, fold the result or return null.
Value *simplifyXorInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);

/// Given operands for an ICmpInst, fold the result or return null.
Value *simplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
                        const SimplifyQuery &Q);

/// Given operands for an FCmpInst, fold the result or return null.
Value *simplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
                        FastMathFlags FMF, const SimplifyQuery &Q);

/// Given operands for a SelectInst, fold the result or return null.
Value *simplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
                          const SimplifyQuery &Q);

/// Given operands for a GetElementPtrInst, fold the result or return null.
Value *simplifyGEPInst(Type *SrcTy, Value *Ptr, ArrayRef<Value *> Indices,
                       bool InBounds, const SimplifyQuery &Q);

/// Given operands for an InsertValueInst, fold the result or return null.
Value *simplifyInsertValueInst(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
                               const SimplifyQuery &Q);

/// Given operands for an InsertElement, fold the result or return null.
Value *simplifyInsertElementInst(Value *Vec, Value *Elt, Value *Idx,
                                 const SimplifyQuery &Q);

/// Given operands for an ExtractValueInst, fold the result or return null.
Value *simplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
                                const SimplifyQuery &Q);

/// Given operands for an ExtractElementInst, fold the result or return null.
Value *simplifyExtractElementInst(Value *Vec, Value *Idx,
                                  const SimplifyQuery &Q);

/// Given operands for a CastInst, fold the result or return null.
Value *simplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty,
                        const SimplifyQuery &Q);

/// Given operands for a ShuffleVectorInst, fold the result or return null.
/// See class ShuffleVectorInst for a description of the mask representation.
Value *simplifyShuffleVectorInst(Value *Op0, Value *Op1, ArrayRef<int> Mask,
                                 Type *RetTy, const SimplifyQuery &Q);

//=== Helper functions for higher up the class hierarchy.

/// Given operands for a CmpInst, fold the result or return null.
Value *simplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
                       const SimplifyQuery &Q);

/// Given operand for a UnaryOperator, fold the result or return null.
Value *simplifyUnOp(unsigned Opcode, Value *Op, const SimplifyQuery &Q);

/// Given operand for a UnaryOperator, fold the result or return null.
/// Try to use FastMathFlags when folding the result.
Value *simplifyUnOp(unsigned Opcode, Value *Op, FastMathFlags FMF,
                    const SimplifyQuery &Q);

/// Given operands for a BinaryOperator, fold the result or return null.
Value *simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
                     const SimplifyQuery &Q);

/// Given operands for a BinaryOperator, fold the result or return null.
/// Try to use FastMathFlags when folding the result.
Value *simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, FastMathFlags FMF,
                     const SimplifyQuery &Q);

/// Given a callsite, callee, and arguments, fold the result or return null.
Value *simplifyCall(CallBase *Call, Value *Callee, ArrayRef<Value *> Args,
                    const SimplifyQuery &Q);

/// Given a constrained FP intrinsic call, tries to compute its simplified
/// version. Returns a simplified result or null.
///
/// This function provides an additional contract: it guarantees that if
/// simplification succeeds that the intrinsic is side effect free. As a result,
/// successful simplification can be used to delete the intrinsic not just
/// replace its result.
Value *simplifyConstrainedFPCall(CallBase *Call, const SimplifyQuery &Q);

/// Given an operand for a Freeze, see if we can fold the result.
/// If not, this returns null.
Value *simplifyFreezeInst(Value *Op, const SimplifyQuery &Q);

/// Given a load instruction and its pointer operand, fold the result or return
/// null.
Value *simplifyLoadInst(LoadInst *LI, Value *PtrOp, const SimplifyQuery &Q);

/// See if we can compute a simplified version of this instruction. If not,
/// return null.
Value *simplifyInstruction(Instruction *I, const SimplifyQuery &Q);

/// Like \p simplifyInstruction but the operands of \p I are replaced with
/// \p NewOps. Returns a simplified value, or null if none was found.
Value *
simplifyInstructionWithOperands(Instruction *I, ArrayRef<Value *> NewOps,
                                const SimplifyQuery &Q);

/// See if V simplifies when its operand Op is replaced with RepOp. If not,
/// return null.
/// AllowRefinement specifies whether the simplification can be a refinement
/// (e.g. 0 instead of poison), or whether it needs to be strictly identical.
/// Op and RepOp can be assumed to not be poison when determining refinement.
Value *simplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
                              const SimplifyQuery &Q, bool AllowRefinement);

/// Replace all uses of 'I' with 'SimpleV' and simplify the uses recursively.
///
/// This first performs a normal RAUW of I with SimpleV. It then recursively
/// attempts to simplify those users updated by the operation. The 'I'
/// instruction must not be equal to the simplified value 'SimpleV'.
/// If UnsimplifiedUsers is provided, instructions that could not be simplified
/// are added to it.
///
/// The function returns true if any simplifications were performed.
bool replaceAndRecursivelySimplify(
    Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI = nullptr,
    const DominatorTree *DT = nullptr, AssumptionCache *AC = nullptr,
    SmallSetVector<Instruction *, 8> *UnsimplifiedUsers = nullptr);

// These helper functions return a SimplifyQuery structure that contains as
// many of the optional analysis we use as are currently valid.  This is the
// strongly preferred way of constructing SimplifyQuery in passes.
const SimplifyQuery getBestSimplifyQuery(Pass &, Function &);
template <class T, class... TArgs>
const SimplifyQuery getBestSimplifyQuery(AnalysisManager<T, TArgs...> &,
                                         Function &);
const SimplifyQuery getBestSimplifyQuery(LoopStandardAnalysisResults &,
                                         const DataLayout &);
} // end namespace llvm

#endif
PKiwFZ�:�[��!Analysis/SyncDependenceAnalysis.hnu�[���//===- SyncDependenceAnalysis.h - Divergent Branch Dependence -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// \file
// This file defines the SyncDependenceAnalysis class, which computes for
// every divergent branch the set of phi nodes that the branch will make
// divergent.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_SYNCDEPENDENCEANALYSIS_H
#define LLVM_ANALYSIS_SYNCDEPENDENCEANALYSIS_H

#include "llvm/ADT/SmallPtrSet.h"
#include <map>
#include <memory>
#include <unordered_map>
#include <vector>

namespace llvm {

class BasicBlock;
class DominatorTree;
class Instruction;
class LoopInfo;
class PostDominatorTree;

using ConstBlockSet = SmallPtrSet<const BasicBlock *, 4>;
struct ControlDivergenceDesc {
  // Join points of divergent disjoint paths.
  ConstBlockSet JoinDivBlocks;
  // Divergent loop exits
  ConstBlockSet LoopDivBlocks;
};

struct ModifiedPO {
  std::vector<const BasicBlock *> LoopPO;
  std::unordered_map<const BasicBlock *, unsigned> POIndex;
  void appendBlock(const BasicBlock &BB) {
    POIndex[&BB] = LoopPO.size();
    LoopPO.push_back(&BB);
  }
  unsigned getIndexOf(const BasicBlock &BB) const {
    return POIndex.find(&BB)->second;
  }
  unsigned size() const { return LoopPO.size(); }
  const BasicBlock *getBlockAt(unsigned Idx) const { return LoopPO[Idx]; }
};

/// \brief Relates points of divergent control to join points in
/// reducible CFGs.
///
/// This analysis relates points of divergent control to points of converging
/// divergent control. The analysis requires all loops to be reducible.
class SyncDependenceAnalysis {
public:
  ~SyncDependenceAnalysis();
  SyncDependenceAnalysis(const DominatorTree &DT, const PostDominatorTree &PDT,
                         const LoopInfo &LI);

  /// \brief Computes divergent join points and loop exits caused by branch
  /// divergence in \p Term.
  ///
  /// The set of blocks which are reachable by disjoint paths from \p Term.
  /// The set also contains loop exits if there two disjoint paths:
  /// one from \p Term to the loop exit and another from \p Term to the loop
  /// header. Those exit blocks are added to the returned set.
  /// If L is the parent loop of \p Term and an exit of L is in the returned
  /// set then L is a divergent loop.
  const ControlDivergenceDesc &getJoinBlocks(const Instruction &Term);

private:
  static ControlDivergenceDesc EmptyDivergenceDesc;

  ModifiedPO LoopPO;

  const DominatorTree &DT;
  const PostDominatorTree &PDT;
  const LoopInfo &LI;

  std::map<const Instruction *, std::unique_ptr<ControlDivergenceDesc>>
      CachedControlDivDescs;
};

} // namespace llvm

#endif // LLVM_ANALYSIS_SYNCDEPENDENCEANALYSIS_H
PKiwFZ����Analysis/Utils/Local.hnu�[���//===- Local.h - Functions to perform local transformations -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This family of functions perform various local transformations to the
// program.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_UTILS_LOCAL_H
#define LLVM_ANALYSIS_UTILS_LOCAL_H

namespace llvm {

class DataLayout;
class IRBuilderBase;
class User;
class Value;

/// Given a getelementptr instruction/constantexpr, emit the code necessary to
/// compute the offset from the base pointer (without adding in the base
/// pointer). Return the result as a signed integer of intptr size.
/// When NoAssumptions is true, no assumptions about index computation not
/// overflowing is made.
Value *emitGEPOffset(IRBuilderBase *Builder, const DataLayout &DL, User *GEP,
                     bool NoAssumptions = false);

} // namespace llvm

#endif // LLVM_ANALYSIS_UTILS_LOCAL_H
PKiwFZ�﹒Analysis/Utils/TFUtils.hnu�[���//===- TFUtils.h - utilities for tensorflow C API ---------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
#ifndef LLVM_ANALYSIS_UTILS_TFUTILS_H
#define LLVM_ANALYSIS_UTILS_TFUTILS_H

#include "llvm/Config/llvm-config.h"

#ifdef LLVM_HAVE_TFLITE
#include "llvm/ADT/StringMap.h"
#include "llvm/Analysis/TensorSpec.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/Support/JSON.h"

#include <memory>
#include <vector>

namespace llvm {

/// Load a SavedModel, find the given inputs and outputs, and setup storage
/// for input tensors. The user is responsible for correctly dimensioning the
/// input tensors and setting their values before calling evaluate().
/// To initialize:
/// - construct the object
/// - initialize the input tensors using initInput. Indices must correspond to
///   indices in the InputNames used at construction.
/// To use:
/// - set input values by using getInput to get each input tensor, and then
///   setting internal scalars, for all dimensions (tensors are row-major:
///   https://github.com/tensorflow/tensorflow/blob/r1.5/tensorflow/c/c_api.h#L205)
/// - call evaluate. The input tensors' values are not consumed after this, and
///   may still be read.
/// - use the outputs in the output vector
class TFModelEvaluatorImpl;
class EvaluationResultImpl;

class TFModelEvaluator final {
public:
  /// The result of a model evaluation. Handles the lifetime of the output
  /// tensors, which means that their values need to be used before
  /// the EvaluationResult's dtor is called.
  class EvaluationResult {
  public:
    EvaluationResult(const EvaluationResult &) = delete;
    EvaluationResult &operator=(const EvaluationResult &Other) = delete;

    EvaluationResult(EvaluationResult &&Other);
    EvaluationResult &operator=(EvaluationResult &&Other);

    ~EvaluationResult();

    /// Get a (const) pointer to the first element of the tensor at Index.
    template <typename T> T *getTensorValue(size_t Index) {
      return static_cast<T *>(getUntypedTensorValue(Index));
    }

    template <typename T> const T *getTensorValue(size_t Index) const {
      return static_cast<T *>(getUntypedTensorValue(Index));
    }

    /// Get a (const) pointer to the untyped data of the tensor.
    void *getUntypedTensorValue(size_t Index);
    const void *getUntypedTensorValue(size_t Index) const;

  private:
    friend class TFModelEvaluator;
    EvaluationResult(std::unique_ptr<EvaluationResultImpl> Impl);
    std::unique_ptr<EvaluationResultImpl> Impl;
  };

  TFModelEvaluator(StringRef SavedModelPath,
                   const std::vector<TensorSpec> &InputSpecs,
                   const std::vector<TensorSpec> &OutputSpecs,
                   const char *Tags = "serve");

  ~TFModelEvaluator();
  TFModelEvaluator(const TFModelEvaluator &) = delete;
  TFModelEvaluator(TFModelEvaluator &&) = delete;

  /// Evaluate the model, assuming it is valid. Returns std::nullopt if the
  /// evaluation fails or the model is invalid, or an EvaluationResult
  /// otherwise. The inputs are assumed to have been already provided via
  /// getInput(). When returning std::nullopt, it also invalidates this object.
  std::optional<EvaluationResult> evaluate();

  /// Provides access to the input vector.
  template <typename T> T *getInput(size_t Index) {
    return static_cast<T *>(getUntypedInput(Index));
  }

  /// Returns true if the tensorflow model was loaded successfully, false
  /// otherwise.
  bool isValid() const { return !!Impl; }

  /// Untyped access to input.
  void *getUntypedInput(size_t Index);

private:
  std::unique_ptr<TFModelEvaluatorImpl> Impl;
};

} // namespace llvm

#endif // LLVM_HAVE_TFLITE
#endif // LLVM_ANALYSIS_UTILS_TFUTILS_H
PKiwFZ���Analysis/Utils/TrainingLogger.hnu�[���//===- TrainingLogger.h - mlgo feature/reward logging  ----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// The design goals of the logger are:
// - no dependencies that llvm doesn't already have.
// - support streaming, so that we don't need to buffer data during compilation
// - 0-decoding tensor values. Tensor values are potentially very large buffers
// of scalars. Because of their potentially large size, avoiding
// serialization/deserialization overhead is preferred.
//
// The simple logger produces an output of the form (each line item on its line)
// - header: a json object describing the data that will follow.
// - context: e.g. function name, for regalloc, or "default" for module-wide
// optimizations like the inliner. This is the context to which the subsequent
// data corresponds.
// - observation number.
// - tensor values - raw bytes of the tensors, in the order given in the header.
// The values are in succession, i.e. no separator is found between successive
// tensor values. At the end, there is a new line character.
// - [score] - this is optional, and is present if it was present in the header.
// Currently, for final rewards, we output "0" scores after each observation,
// except for the last one.
// <repeat>
// The file should be read as binary, but the reason we use newlines is mostly
// ease of debugging: the log can be opened in a text editor and, while tensor
// values are inscrutable, at least the sequence of data can be easily observed.
// Of course, the buffer of tensor values could contain '\n' bytes. A reader
// should use the header information to know how much data to read for the
// tensor values, and not use line information for that.
//
// An example reader, used for test, is available at
// Analysis/models/log_reader.py
//
// Example:
// {"features":[list of TensorSpecs], "score":<a tensor spec>}
// {"context": "aFunction"}
// {"observation": 0}
// <bytes>
// {"outcome": 0}
// <bytes for the tensor corresponding to the "score" spec in the header>
// {"observation": 1}
// ...
// {"context": "anotherFunction"}
// {"observation": 0}
// ...
//

#ifndef LLVM_ANALYSIS_UTILS_TRAININGLOGGER_H
#define LLVM_ANALYSIS_UTILS_TRAININGLOGGER_H

#include "llvm/Config/llvm-config.h"

#include "llvm/ADT/StringMap.h"
#include "llvm/Analysis/TensorSpec.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/Support/JSON.h"

#include <memory>
#include <optional>
#include <vector>

namespace llvm {

/// Logging utility - given an ordered specification of features, and assuming
/// a scalar reward, allow logging feature values and rewards.
/// The assumption is that, for an event to be logged (i.e. a set of feature
/// values and a reward), the user calls the log* API for each feature exactly
/// once, providing the index matching the position in the feature spec list
/// provided at construction. The example assumes the first feature's element
/// type is float, the second is int64, and the reward is float:
///
/// event 0:
///   logFloatValue(0, ...)
///   logInt64Value(1, ...)
///   ...
///   logFloatReward(...)
/// event 1:
///   logFloatValue(0, ...)
///   logInt64Value(1, ...)
///   ...
///   logFloatReward(...)
///
/// At the end, call print to generate the log.
/// Alternatively, don't call logReward at the end of each event, just
/// log{Float|Int32|Int64}FinalReward at the end.
class Logger final {
  std::unique_ptr<raw_ostream> OS;
  const std::vector<TensorSpec> FeatureSpecs;
  const TensorSpec RewardSpec;
  const bool IncludeReward;
  StringMap<size_t> ObservationIDs;
  std::string CurrentContext;

  void writeHeader(std::optional<TensorSpec> AdviceSpec);
  void writeTensor(const TensorSpec &Spec, const char *RawData) {
    OS->write(RawData, Spec.getTotalTensorBufferSize());
  }
  void logRewardImpl(const char *RawData);

public:
  /// Construct a Logger. If IncludeReward is false, then logReward or
  /// logFinalReward shouldn't be called, and the reward feature won't be
  /// printed out.
  /// NOTE: the FeatureSpecs are expected to be in the same order (i.e. have
  /// corresponding indices) with any MLModelRunner implementations
  /// corresponding to the model being trained/logged.
  Logger(std::unique_ptr<raw_ostream> OS,
         const std::vector<TensorSpec> &FeatureSpecs,
         const TensorSpec &RewardSpec, bool IncludeReward,
         std::optional<TensorSpec> AdviceSpec = std::nullopt);

  void switchContext(StringRef Name);
  void startObservation();
  void endObservation();
  void flush() { OS->flush(); }

  const std::string &currentContext() const { return CurrentContext; }

  /// Check if there is at least an observation for `currentContext()`.
  bool hasObservationInProgress() const {
    return hasAnyObservationForContext(CurrentContext);
  }

  /// Check if there is at least an observation for the context `Ctx`.
  bool hasAnyObservationForContext(StringRef Ctx) const {
    return ObservationIDs.contains(Ctx);
  }

  template <typename T> void logReward(T Value) {
    logRewardImpl(reinterpret_cast<const char *>(&Value));
  }

  void logTensorValue(size_t FeatureID, const char *RawData) {
    writeTensor(FeatureSpecs[FeatureID], RawData);
  }
};

} // namespace llvm
#endif // LLVM_ANALYSIS_UTILS_TRAININGLOGGER_H
PKiwFZ������4Analysis/Utils/ImportedFunctionsInliningStatistics.hnu�[���//===-- ImportedFunctionsInliningStatistics.h -------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// Generating inliner statistics for imported functions, mostly useful for
// ThinLTO.
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_UTILS_IMPORTEDFUNCTIONSINLININGSTATISTICS_H
#define LLVM_ANALYSIS_UTILS_IMPORTEDFUNCTIONSINLININGSTATISTICS_H

#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include <memory>
#include <vector>

namespace llvm {
class Module;
class Function;
/// Calculate and dump ThinLTO specific inliner stats.
/// The main statistics are:
/// (1) Number of inlined imported functions,
/// (2) Number of imported functions inlined into importing module (indirect),
/// (3) Number of non imported functions inlined into importing module
/// (indirect).
/// The difference between first and the second is that first stat counts
/// all performed inlines on imported functions, but the second one only the
/// functions that have been eventually inlined to a function in the importing
/// module (by a chain of inlines). Because llvm uses bottom-up inliner, it is
/// possible to e.g. import function `A`, `B` and then inline `B` to `A`,
/// and after this `A` might be too big to be inlined into some other function
/// that calls it. It calculates this statistic by building graph, where
/// the nodes are functions, and edges are performed inlines and then by marking
/// the edges starting from not imported function.
///
/// If `Verbose` is set to true, then it also dumps statistics
/// per each inlined function, sorted by the greatest inlines count like
/// - number of performed inlines
/// - number of performed inlines to importing module
class ImportedFunctionsInliningStatistics {
private:
  /// InlineGraphNode represents node in graph of inlined functions.
  struct InlineGraphNode {
    // Default-constructible and movable.
    InlineGraphNode() = default;
    InlineGraphNode(InlineGraphNode &&) = default;
    InlineGraphNode &operator=(InlineGraphNode &&) = default;

    llvm::SmallVector<InlineGraphNode *, 8> InlinedCallees;
    /// Incremented every direct inline.
    int32_t NumberOfInlines = 0;
    /// Number of inlines into non imported function (possibly indirect via
    /// intermediate inlines). Computed based on graph search.
    int32_t NumberOfRealInlines = 0;
    bool Imported = false;
    bool Visited = false;
  };

public:
  ImportedFunctionsInliningStatistics() = default;
  ImportedFunctionsInliningStatistics(
      const ImportedFunctionsInliningStatistics &) = delete;

  /// Set information like AllFunctions, ImportedFunctions, ModuleName.
  void setModuleInfo(const Module &M);
  /// Record inline of @param Callee to @param Caller for statistis.
  void recordInline(const Function &Caller, const Function &Callee);
  /// Dump stats computed with InlinerStatistics class.
  /// If @param Verbose is true then separate statistics for every inlined
  /// function will be printed.
  void dump(bool Verbose);

private:
  /// Creates new Node in NodeMap and sets attributes, or returns existed one.
  InlineGraphNode &createInlineGraphNode(const Function &);
  void calculateRealInlines();
  void dfs(InlineGraphNode &GraphNode);

  using NodesMapTy =
      llvm::StringMap<std::unique_ptr<InlineGraphNode>>;
  using SortedNodesTy =
      std::vector<const NodesMapTy::MapEntryTy*>;
  /// Returns vector of elements sorted by
  /// (-NumberOfInlines, -NumberOfRealInlines, FunctionName).
  SortedNodesTy getSortedNodes();

private:
  /// This map manage life of all InlineGraphNodes. Unique pointer to
  /// InlineGraphNode used since the node pointers are also saved in the
  /// InlinedCallees vector. If it would store InlineGraphNode instead then the
  /// address of the node would not be invariant.
  NodesMapTy NodesMap;
  /// Non external functions that have some other function inlined inside.
  std::vector<StringRef> NonImportedCallers;
  int AllFunctions = 0;
  int ImportedFunctions = 0;
  StringRef ModuleName;
};

enum class InlinerFunctionImportStatsOpts {
  No = 0,
  Basic = 1,
  Verbose = 2,
};

} // llvm

#endif // LLVM_ANALYSIS_UTILS_IMPORTEDFUNCTIONSINLININGSTATISTICS_H
PKiwFZ&(ϊ�
�
Analysis/ObjCARCUtil.hnu�[���//===- ObjCARCUtil.h - ObjC ARC Utility Functions ---------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// This file defines ARC utility functions which are used by various parts of
/// the compiler.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_OBJCARCUTIL_H
#define LLVM_ANALYSIS_OBJCARCUTIL_H

#include "llvm/Analysis/ObjCARCInstKind.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/LLVMContext.h"

namespace llvm {
namespace objcarc {

inline const char *getRVMarkerModuleFlagStr() {
  return "clang.arc.retainAutoreleasedReturnValueMarker";
}

inline bool hasAttachedCallOpBundle(const CallBase *CB) {
  // Ignore the bundle if the return type is void. Global optimization passes
  // can turn the called function's return type to void. That should happen only
  // if the call doesn't return and the call to @llvm.objc.clang.arc.noop.use
  // no longer consumes the function return or is deleted. In that case, it's
  // not necessary to emit the marker instruction or calls to the ARC runtime
  // functions.
  return !CB->getFunctionType()->getReturnType()->isVoidTy() &&
         CB->getOperandBundle(LLVMContext::OB_clang_arc_attachedcall)
             .has_value();
}

/// This function returns operand bundle clang_arc_attachedcall's argument,
/// which is the address of the ARC runtime function.
inline std::optional<Function *> getAttachedARCFunction(const CallBase *CB) {
  auto B = CB->getOperandBundle(LLVMContext::OB_clang_arc_attachedcall);
  if (!B)
    return std::nullopt;

  return cast<Function>(B->Inputs[0]);
}

/// Check whether the function is retainRV/unsafeClaimRV.
inline bool isRetainOrClaimRV(ARCInstKind Kind) {
  return Kind == ARCInstKind::RetainRV || Kind == ARCInstKind::UnsafeClaimRV;
}

/// This function returns the ARCInstKind of the function attached to operand
/// bundle clang_arc_attachedcall. It returns std::nullopt if the call doesn't
/// have the operand bundle or the operand is null. Otherwise it returns either
/// RetainRV or UnsafeClaimRV.
inline ARCInstKind getAttachedARCFunctionKind(const CallBase *CB) {
  std::optional<Function *> Fn = getAttachedARCFunction(CB);
  if (!Fn)
    return ARCInstKind::None;
  auto FnClass = GetFunctionClass(*Fn);
  assert(isRetainOrClaimRV(FnClass) && "unexpected ARC runtime function");
  return FnClass;
}

} // end namespace objcarc
} // end namespace llvm

#endif
PKiwFZp
z/ww&Analysis/InlineSizeEstimatorAnalysis.hnu�[���//===- InlineSizeEstimatorAnalysis.h - ML size estimator --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//

#ifndef LLVM_ANALYSIS_INLINESIZEESTIMATORANALYSIS_H
#define LLVM_ANALYSIS_INLINESIZEESTIMATORANALYSIS_H

#include "llvm/IR/PassManager.h"

namespace llvm {
class Function;

class TFModelEvaluator;
class InlineSizeEstimatorAnalysis
    : public AnalysisInfoMixin<InlineSizeEstimatorAnalysis> {
public:
  InlineSizeEstimatorAnalysis();
  InlineSizeEstimatorAnalysis(InlineSizeEstimatorAnalysis &&);
  ~InlineSizeEstimatorAnalysis();

  static AnalysisKey Key;
  using Result = std::optional<size_t>;
  Result run(const Function &F, FunctionAnalysisManager &FAM);
  static bool isEvaluatorRequested();

private:
  std::unique_ptr<TFModelEvaluator> Evaluator;
};

class InlineSizeEstimatorAnalysisPrinterPass
    : public PassInfoMixin<InlineSizeEstimatorAnalysisPrinterPass> {
  raw_ostream &OS;

public:
  explicit InlineSizeEstimatorAnalysisPrinterPass(raw_ostream &OS) : OS(OS) {}

  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
} // namespace llvm
#endif // LLVM_ANALYSIS_INLINESIZEESTIMATORANALYSIS_H
PKiwFZ^U��R�RAnalysis/MustExecute.hnu�[���//===- MustExecute.h - Is an instruction known to execute--------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// Contains a collection of routines for determining if a given instruction is
/// guaranteed to execute if a given point in control flow is reached. The most
/// common example is an instruction within a loop being provably executed if we
/// branch to the header of it's containing loop.
///
/// There are two interfaces available to determine if an instruction is
/// executed once a given point in the control flow is reached:
/// 1) A loop-centric one derived from LoopSafetyInfo.
/// 2) A "must be executed context"-based one implemented in the
///    MustBeExecutedContextExplorer.
/// Please refer to the class comments for more information.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_MUSTEXECUTE_H
#define LLVM_ANALYSIS_MUSTEXECUTE_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/Analysis/InstructionPrecedenceTracking.h"
#include "llvm/IR/EHPersonalities.h"
#include "llvm/IR/PassManager.h"

namespace llvm {

namespace {
template <typename T> using GetterTy = std::function<T *(const Function &F)>;
}

class BasicBlock;
class DominatorTree;
class Instruction;
class Loop;
class LoopInfo;
class PostDominatorTree;
class raw_ostream;

/// Captures loop safety information.
/// It keep information for loop blocks may throw exception or otherwise
/// exit abnormally on any iteration of the loop which might actually execute
/// at runtime.  The primary way to consume this information is via
/// isGuaranteedToExecute below, but some callers bailout or fallback to
/// alternate reasoning if a loop contains any implicit control flow.
/// NOTE: LoopSafetyInfo contains cached information regarding loops and their
/// particular blocks. This information is only dropped on invocation of
/// computeLoopSafetyInfo. If the loop or any of its block is deleted, or if
/// any thrower instructions have been added or removed from them, or if the
/// control flow has changed, or in case of other meaningful modifications, the
/// LoopSafetyInfo needs to be recomputed. If a meaningful modifications to the
/// loop were made and the info wasn't recomputed properly, the behavior of all
/// methods except for computeLoopSafetyInfo is undefined.
class LoopSafetyInfo {
  // Used to update funclet bundle operands.
  DenseMap<BasicBlock *, ColorVector> BlockColors;

protected:
  /// Computes block colors.
  void computeBlockColors(const Loop *CurLoop);

public:
  /// Returns block colors map that is used to update funclet operand bundles.
  const DenseMap<BasicBlock *, ColorVector> &getBlockColors() const;

  /// Copy colors of block \p Old into the block \p New.
  void copyColors(BasicBlock *New, BasicBlock *Old);

  /// Returns true iff the block \p BB potentially may throw exception. It can
  /// be false-positive in cases when we want to avoid complex analysis.
  virtual bool blockMayThrow(const BasicBlock *BB) const = 0;

  /// Returns true iff any block of the loop for which this info is contains an
  /// instruction that may throw or otherwise exit abnormally.
  virtual bool anyBlockMayThrow() const = 0;

  /// Return true if we must reach the block \p BB under assumption that the
  /// loop \p CurLoop is entered.
  bool allLoopPathsLeadToBlock(const Loop *CurLoop, const BasicBlock *BB,
                               const DominatorTree *DT) const;

  /// Computes safety information for a loop checks loop body & header for
  /// the possibility of may throw exception, it takes LoopSafetyInfo and loop
  /// as argument. Updates safety information in LoopSafetyInfo argument.
  /// Note: This is defined to clear and reinitialize an already initialized
  /// LoopSafetyInfo.  Some callers rely on this fact.
  virtual void computeLoopSafetyInfo(const Loop *CurLoop) = 0;

  /// Returns true if the instruction in a loop is guaranteed to execute at
  /// least once (under the assumption that the loop is entered).
  virtual bool isGuaranteedToExecute(const Instruction &Inst,
                                     const DominatorTree *DT,
                                     const Loop *CurLoop) const = 0;

  LoopSafetyInfo() = default;

  virtual ~LoopSafetyInfo() = default;
};


/// Simple and conservative implementation of LoopSafetyInfo that can give
/// false-positive answers to its queries in order to avoid complicated
/// analysis.
class SimpleLoopSafetyInfo: public LoopSafetyInfo {
  bool MayThrow = false;       // The current loop contains an instruction which
                               // may throw.
  bool HeaderMayThrow = false; // Same as previous, but specific to loop header

public:
  bool blockMayThrow(const BasicBlock *BB) const override;

  bool anyBlockMayThrow() const override;

  void computeLoopSafetyInfo(const Loop *CurLoop) override;

  bool isGuaranteedToExecute(const Instruction &Inst,
                             const DominatorTree *DT,
                             const Loop *CurLoop) const override;
};

/// This implementation of LoopSafetyInfo use ImplicitControlFlowTracking to
/// give precise answers on "may throw" queries. This implementation uses cache
/// that should be invalidated by calling the methods insertInstructionTo and
/// removeInstruction whenever we modify a basic block's contents by adding or
/// removing instructions.
class ICFLoopSafetyInfo: public LoopSafetyInfo {
  bool MayThrow = false;       // The current loop contains an instruction which
                               // may throw.
  // Contains information about implicit control flow in this loop's blocks.
  mutable ImplicitControlFlowTracking ICF;
  // Contains information about instruction that may possibly write memory.
  mutable MemoryWriteTracking MW;

public:
  bool blockMayThrow(const BasicBlock *BB) const override;

  bool anyBlockMayThrow() const override;

  void computeLoopSafetyInfo(const Loop *CurLoop) override;

  bool isGuaranteedToExecute(const Instruction &Inst,
                             const DominatorTree *DT,
                             const Loop *CurLoop) const override;

  /// Returns true if we could not execute a memory-modifying instruction before
  /// we enter \p BB under assumption that \p CurLoop is entered.
  bool doesNotWriteMemoryBefore(const BasicBlock *BB, const Loop *CurLoop)
      const;

  /// Returns true if we could not execute a memory-modifying instruction before
  /// we execute \p I under assumption that \p CurLoop is entered.
  bool doesNotWriteMemoryBefore(const Instruction &I, const Loop *CurLoop)
      const;

  /// Inform the safety info that we are planning to insert a new instruction
  /// \p Inst into the basic block \p BB. It will make all cache updates to keep
  /// it correct after this insertion.
  void insertInstructionTo(const Instruction *Inst, const BasicBlock *BB);

  /// Inform safety info that we are planning to remove the instruction \p Inst
  /// from its block. It will make all cache updates to keep it correct after
  /// this removal.
  void removeInstruction(const Instruction *Inst);
};

bool mayContainIrreducibleControl(const Function &F, const LoopInfo *LI);

struct MustBeExecutedContextExplorer;

/// Enum that allows us to spell out the direction.
enum class ExplorationDirection {
  BACKWARD = 0,
  FORWARD = 1,
};

/// Must be executed iterators visit stretches of instructions that are
/// guaranteed to be executed together, potentially with other instruction
/// executed in-between.
///
/// Given the following code, and assuming all statements are single
/// instructions which transfer execution to the successor (see
/// isGuaranteedToTransferExecutionToSuccessor), there are two possible
/// outcomes. If we start the iterator at A, B, or E, we will visit only A, B,
/// and E. If we start at C or D, we will visit all instructions A-E.
///
/// \code
///   A;
///   B;
///   if (...) {
///     C;
///     D;
///   }
///   E;
/// \endcode
///
///
/// Below is the example extneded with instructions F and G. Now we assume F
/// might not transfer execution to it's successor G. As a result we get the
/// following visit sets:
///
/// Start Instruction   | Visit Set
/// A                   | A, B,       E, F
///    B                | A, B,       E, F
///       C             | A, B, C, D, E, F
///          D          | A, B, C, D, E, F
///             E       | A, B,       E, F
///                F    | A, B,       E, F
///                   G | A, B,       E, F, G
///
///
/// \code
///   A;
///   B;
///   if (...) {
///     C;
///     D;
///   }
///   E;
///   F;  // Might not transfer execution to its successor G.
///   G;
/// \endcode
///
///
/// A more complex example involving conditionals, loops, break, and continue
/// is shown below. We again assume all instructions will transmit control to
/// the successor and we assume we can prove the inner loop to be finite. We
/// omit non-trivial branch conditions as the exploration is oblivious to them.
/// Constant branches are assumed to be unconditional in the CFG. The resulting
/// visist sets are shown in the table below.
///
/// \code
///   A;
///   while (true) {
///     B;
///     if (...)
///       C;
///     if (...)
///       continue;
///     D;
///     if (...)
///       break;
///     do {
///       if (...)
///         continue;
///       E;
///     } while (...);
///     F;
///   }
///   G;
/// \endcode
///
/// Start Instruction    | Visit Set
/// A                    | A, B
///    B                 | A, B
///       C              | A, B, C
///          D           | A, B,    D
///             E        | A, B,    D, E, F
///                F     | A, B,    D,    F
///                   G  | A, B,    D,       G
///
///
/// Note that the examples show optimal visist sets but not necessarily the ones
/// derived by the explorer depending on the available CFG analyses (see
/// MustBeExecutedContextExplorer). Also note that we, depending on the options,
/// the visit set can contain instructions from other functions.
struct MustBeExecutedIterator {
  /// Type declarations that make his class an input iterator.
  ///{
  typedef const Instruction *value_type;
  typedef std::ptrdiff_t difference_type;
  typedef const Instruction **pointer;
  typedef const Instruction *&reference;
  typedef std::input_iterator_tag iterator_category;
  ///}

  using ExplorerTy = MustBeExecutedContextExplorer;

  MustBeExecutedIterator(const MustBeExecutedIterator &Other) = default;

  MustBeExecutedIterator(MustBeExecutedIterator &&Other)
      : Visited(std::move(Other.Visited)), Explorer(Other.Explorer),
        CurInst(Other.CurInst), Head(Other.Head), Tail(Other.Tail) {}

  MustBeExecutedIterator &operator=(MustBeExecutedIterator &&Other) {
    if (this != &Other) {
      std::swap(Visited, Other.Visited);
      std::swap(CurInst, Other.CurInst);
      std::swap(Head, Other.Head);
      std::swap(Tail, Other.Tail);
    }
    return *this;
  }

  ~MustBeExecutedIterator() = default;

  /// Pre- and post-increment operators.
  ///{
  MustBeExecutedIterator &operator++() {
    CurInst = advance();
    return *this;
  }

  MustBeExecutedIterator operator++(int) {
    MustBeExecutedIterator tmp(*this);
    operator++();
    return tmp;
  }
  ///}

  /// Equality and inequality operators. Note that we ignore the history here.
  ///{
  bool operator==(const MustBeExecutedIterator &Other) const {
    return CurInst == Other.CurInst && Head == Other.Head && Tail == Other.Tail;
  }

  bool operator!=(const MustBeExecutedIterator &Other) const {
    return !(*this == Other);
  }
  ///}

  /// Return the underlying instruction.
  const Instruction *&operator*() { return CurInst; }
  const Instruction *getCurrentInst() const { return CurInst; }

  /// Return true if \p I was encountered by this iterator already.
  bool count(const Instruction *I) const {
    return Visited.count({I, ExplorationDirection::FORWARD}) ||
           Visited.count({I, ExplorationDirection::BACKWARD});
  }

private:
  using VisitedSetTy =
      DenseSet<PointerIntPair<const Instruction *, 1, ExplorationDirection>>;

  /// Private constructors.
  MustBeExecutedIterator(ExplorerTy &Explorer, const Instruction *I);

  /// Reset the iterator to its initial state pointing at \p I.
  void reset(const Instruction *I);

  /// Reset the iterator to point at \p I, keep cached state.
  void resetInstruction(const Instruction *I);

  /// Try to advance one of the underlying positions (Head or Tail).
  ///
  /// \return The next instruction in the must be executed context, or nullptr
  ///         if none was found.
  const Instruction *advance();

  /// A set to track the visited instructions in order to deal with endless
  /// loops and recursion.
  VisitedSetTy Visited;

  /// A reference to the explorer that created this iterator.
  ExplorerTy &Explorer;

  /// The instruction we are currently exposing to the user. There is always an
  /// instruction that we know is executed with the given program point,
  /// initially the program point itself.
  const Instruction *CurInst;

  /// Two positions that mark the program points where this iterator will look
  /// for the next instruction. Note that the current instruction is either the
  /// one pointed to by Head, Tail, or both.
  const Instruction *Head, *Tail;

  friend struct MustBeExecutedContextExplorer;
};

/// A "must be executed context" for a given program point PP is the set of
/// instructions, potentially before and after PP, that are executed always when
/// PP is reached. The MustBeExecutedContextExplorer an interface to explore
/// "must be executed contexts" in a module through the use of
/// MustBeExecutedIterator.
///
/// The explorer exposes "must be executed iterators" that traverse the must be
/// executed context. There is little information sharing between iterators as
/// the expected use case involves few iterators for "far apart" instructions.
/// If that changes, we should consider caching more intermediate results.
struct MustBeExecutedContextExplorer {

  /// In the description of the parameters we use PP to denote a program point
  /// for which the must be executed context is explored, or put differently,
  /// for which the MustBeExecutedIterator is created.
  ///
  /// \param ExploreInterBlock    Flag to indicate if instructions in blocks
  ///                             other than the parent of PP should be
  ///                             explored.
  /// \param ExploreCFGForward    Flag to indicate if instructions located after
  ///                             PP in the CFG, e.g., post-dominating PP,
  ///                             should be explored.
  /// \param ExploreCFGBackward   Flag to indicate if instructions located
  ///                             before PP in the CFG, e.g., dominating PP,
  ///                             should be explored.
  MustBeExecutedContextExplorer(
      bool ExploreInterBlock, bool ExploreCFGForward, bool ExploreCFGBackward,
      GetterTy<const LoopInfo> LIGetter =
          [](const Function &) { return nullptr; },
      GetterTy<const DominatorTree> DTGetter =
          [](const Function &) { return nullptr; },
      GetterTy<const PostDominatorTree> PDTGetter =
          [](const Function &) { return nullptr; })
      : ExploreInterBlock(ExploreInterBlock),
        ExploreCFGForward(ExploreCFGForward),
        ExploreCFGBackward(ExploreCFGBackward), LIGetter(LIGetter),
        DTGetter(DTGetter), PDTGetter(PDTGetter), EndIterator(*this, nullptr) {}

  /// Iterator-based interface. \see MustBeExecutedIterator.
  ///{
  using iterator = MustBeExecutedIterator;
  using const_iterator = const MustBeExecutedIterator;

  /// Return an iterator to explore the context around \p PP.
  iterator &begin(const Instruction *PP) {
    auto &It = InstructionIteratorMap[PP];
    if (!It)
      It.reset(new iterator(*this, PP));
    return *It;
  }

  /// Return an iterator to explore the cached context around \p PP.
  const_iterator &begin(const Instruction *PP) const {
    return *InstructionIteratorMap.find(PP)->second;
  }

  /// Return an universal end iterator.
  ///{
  iterator &end() { return EndIterator; }
  iterator &end(const Instruction *) { return EndIterator; }

  const_iterator &end() const { return EndIterator; }
  const_iterator &end(const Instruction *) const { return EndIterator; }
  ///}

  /// Return an iterator range to explore the context around \p PP.
  llvm::iterator_range<iterator> range(const Instruction *PP) {
    return llvm::make_range(begin(PP), end(PP));
  }

  /// Return an iterator range to explore the cached context around \p PP.
  llvm::iterator_range<const_iterator> range(const Instruction *PP) const {
    return llvm::make_range(begin(PP), end(PP));
  }
  ///}

  /// Check \p Pred on all instructions in the context.
  ///
  /// This method will evaluate \p Pred and return
  /// true if \p Pred holds in every instruction.
  bool checkForAllContext(const Instruction *PP,
                          function_ref<bool(const Instruction *)> Pred) {
    for (auto EIt = begin(PP), EEnd = end(PP); EIt != EEnd; ++EIt)
      if (!Pred(*EIt))
        return false;
    return true;
  }

  /// Helper to look for \p I in the context of \p PP.
  ///
  /// The context is expanded until \p I was found or no more expansion is
  /// possible.
  ///
  /// \returns True, iff \p I was found.
  bool findInContextOf(const Instruction *I, const Instruction *PP) {
    auto EIt = begin(PP), EEnd = end(PP);
    return findInContextOf(I, EIt, EEnd);
  }

  /// Helper to look for \p I in the context defined by \p EIt and \p EEnd.
  ///
  /// The context is expanded until \p I was found or no more expansion is
  /// possible.
  ///
  /// \returns True, iff \p I was found.
  bool findInContextOf(const Instruction *I, iterator &EIt, iterator &EEnd) {
    bool Found = EIt.count(I);
    while (!Found && EIt != EEnd)
      Found = (++EIt).getCurrentInst() == I;
    return Found;
  }

  /// Return the next instruction that is guaranteed to be executed after \p PP.
  ///
  /// \param It              The iterator that is used to traverse the must be
  ///                        executed context.
  /// \param PP              The program point for which the next instruction
  ///                        that is guaranteed to execute is determined.
  const Instruction *
  getMustBeExecutedNextInstruction(MustBeExecutedIterator &It,
                                   const Instruction *PP);
  /// Return the previous instr. that is guaranteed to be executed before \p PP.
  ///
  /// \param It              The iterator that is used to traverse the must be
  ///                        executed context.
  /// \param PP              The program point for which the previous instr.
  ///                        that is guaranteed to execute is determined.
  const Instruction *
  getMustBeExecutedPrevInstruction(MustBeExecutedIterator &It,
                                   const Instruction *PP);

  /// Find the next join point from \p InitBB in forward direction.
  const BasicBlock *findForwardJoinPoint(const BasicBlock *InitBB);

  /// Find the next join point from \p InitBB in backward direction.
  const BasicBlock *findBackwardJoinPoint(const BasicBlock *InitBB);

  /// Parameter that limit the performed exploration. See the constructor for
  /// their meaning.
  ///{
  const bool ExploreInterBlock;
  const bool ExploreCFGForward;
  const bool ExploreCFGBackward;
  ///}

private:
  /// Getters for common CFG analyses: LoopInfo, DominatorTree, and
  /// PostDominatorTree.
  ///{
  GetterTy<const LoopInfo> LIGetter;
  GetterTy<const DominatorTree> DTGetter;
  GetterTy<const PostDominatorTree> PDTGetter;
  ///}

  /// Map to cache isGuaranteedToTransferExecutionToSuccessor results.
  DenseMap<const BasicBlock *, std::optional<bool>> BlockTransferMap;

  /// Map to cache containsIrreducibleCFG results.
  DenseMap<const Function *, std::optional<bool>> IrreducibleControlMap;

  /// Map from instructions to associated must be executed iterators.
  DenseMap<const Instruction *, std::unique_ptr<MustBeExecutedIterator>>
      InstructionIteratorMap;

  /// A unique end iterator.
  MustBeExecutedIterator EndIterator;
};

class MustExecutePrinterPass : public PassInfoMixin<MustExecutePrinterPass> {
  raw_ostream &OS;

public:
  MustExecutePrinterPass(raw_ostream &OS) : OS(OS) {}
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

class MustBeExecutedContextPrinterPass
    : public PassInfoMixin<MustBeExecutedContextPrinterPass> {
  raw_ostream &OS;

public:
  MustBeExecutedContextPrinterPass(raw_ostream &OS) : OS(OS) {}
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
};

} // namespace llvm

#endif
PKiwFZ�֤B��Analysis/BasicAliasAnalysis.hnu�[���//===- BasicAliasAnalysis.h - Stateless, local Alias Analysis ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// This is the interface for LLVM's primary stateless and local alias analysis.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_BASICALIASANALYSIS_H
#define LLVM_ANALYSIS_BASICALIASANALYSIS_H

#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Pass.h"
#include <memory>
#include <optional>
#include <utility>

namespace llvm {

class AssumptionCache;
class DataLayout;
class DominatorTree;
class Function;
class GEPOperator;
class PHINode;
class SelectInst;
class TargetLibraryInfo;
class Value;

/// This is the AA result object for the basic, local, and stateless alias
/// analysis. It implements the AA query interface in an entirely stateless
/// manner. As one consequence, it is never invalidated due to IR changes.
/// While it does retain some storage, that is used as an optimization and not
/// to preserve information from query to query. However it does retain handles
/// to various other analyses and must be recomputed when those analyses are.
class BasicAAResult : public AAResultBase {
  const DataLayout &DL;
  const Function &F;
  const TargetLibraryInfo &TLI;
  AssumptionCache &AC;
  DominatorTree *DT;

public:
  BasicAAResult(const DataLayout &DL, const Function &F,
                const TargetLibraryInfo &TLI, AssumptionCache &AC,
                DominatorTree *DT = nullptr)
      : DL(DL), F(F), TLI(TLI), AC(AC), DT(DT) {}

  BasicAAResult(const BasicAAResult &Arg)
      : AAResultBase(Arg), DL(Arg.DL), F(Arg.F), TLI(Arg.TLI), AC(Arg.AC),
        DT(Arg.DT) {}
  BasicAAResult(BasicAAResult &&Arg)
      : AAResultBase(std::move(Arg)), DL(Arg.DL), F(Arg.F), TLI(Arg.TLI),
        AC(Arg.AC), DT(Arg.DT) {}

  /// Handle invalidation events in the new pass manager.
  bool invalidate(Function &Fn, const PreservedAnalyses &PA,
                  FunctionAnalysisManager::Invalidator &Inv);

  AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB,
                    AAQueryInfo &AAQI, const Instruction *CtxI);

  ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc,
                           AAQueryInfo &AAQI);

  ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2,
                           AAQueryInfo &AAQI);

  /// Returns a bitmask that should be unconditionally applied to the ModRef
  /// info of a memory location. This allows us to eliminate Mod and/or Ref
  /// from the ModRef info based on the knowledge that the memory location
  /// points to constant and/or locally-invariant memory.
  ///
  /// If IgnoreLocals is true, then this method returns NoModRef for memory
  /// that points to a local alloca.
  ModRefInfo getModRefInfoMask(const MemoryLocation &Loc, AAQueryInfo &AAQI,
                               bool IgnoreLocals = false);

  /// Get the location associated with a pointer argument of a callsite.
  ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx);

  /// Returns the behavior when calling the given call site.
  MemoryEffects getMemoryEffects(const CallBase *Call, AAQueryInfo &AAQI);

  /// Returns the behavior when calling the given function. For use when the
  /// call site is not known.
  MemoryEffects getMemoryEffects(const Function *Fn);

private:
  struct DecomposedGEP;

  /// Tracks instructions visited by pointsToConstantMemory.
  SmallPtrSet<const Value *, 16> Visited;

  static DecomposedGEP
  DecomposeGEPExpression(const Value *V, const DataLayout &DL,
                         AssumptionCache *AC, DominatorTree *DT);

  /// A Heuristic for aliasGEP that searches for a constant offset
  /// between the variables.
  ///
  /// GetLinearExpression has some limitations, as generally zext(%x + 1)
  /// != zext(%x) + zext(1) if the arithmetic overflows. GetLinearExpression
  /// will therefore conservatively refuse to decompose these expressions.
  /// However, we know that, for all %x, zext(%x) != zext(%x + 1), even if
  /// the addition overflows.
  bool constantOffsetHeuristic(const DecomposedGEP &GEP, LocationSize V1Size,
                               LocationSize V2Size, AssumptionCache *AC,
                               DominatorTree *DT, const AAQueryInfo &AAQI);

  bool isValueEqualInPotentialCycles(const Value *V1, const Value *V2,
                                     const AAQueryInfo &AAQI);

  void subtractDecomposedGEPs(DecomposedGEP &DestGEP,
                              const DecomposedGEP &SrcGEP,
                              const AAQueryInfo &AAQI);

  AliasResult aliasGEP(const GEPOperator *V1, LocationSize V1Size,
                       const Value *V2, LocationSize V2Size,
                       const Value *UnderlyingV1, const Value *UnderlyingV2,
                       AAQueryInfo &AAQI);

  AliasResult aliasPHI(const PHINode *PN, LocationSize PNSize,
                       const Value *V2, LocationSize V2Size, AAQueryInfo &AAQI);

  AliasResult aliasSelect(const SelectInst *SI, LocationSize SISize,
                          const Value *V2, LocationSize V2Size,
                          AAQueryInfo &AAQI);

  AliasResult aliasCheck(const Value *V1, LocationSize V1Size, const Value *V2,
                         LocationSize V2Size, AAQueryInfo &AAQI,
                         const Instruction *CtxI);

  AliasResult aliasCheckRecursive(const Value *V1, LocationSize V1Size,
                                  const Value *V2, LocationSize V2Size,
                                  AAQueryInfo &AAQI, const Value *O1,
                                  const Value *O2);
};

/// Analysis pass providing a never-invalidated alias analysis result.
class BasicAA : public AnalysisInfoMixin<BasicAA> {
  friend AnalysisInfoMixin<BasicAA>;

  static AnalysisKey Key;

public:
  using Result = BasicAAResult;

  BasicAAResult run(Function &F, FunctionAnalysisManager &AM);
};

/// Legacy wrapper pass to provide the BasicAAResult object.
class BasicAAWrapperPass : public FunctionPass {
  std::unique_ptr<BasicAAResult> Result;

  virtual void anchor();

public:
  static char ID;

  BasicAAWrapperPass();

  BasicAAResult &getResult() { return *Result; }
  const BasicAAResult &getResult() const { return *Result; }

  bool runOnFunction(Function &F) override;
  void getAnalysisUsage(AnalysisUsage &AU) const override;
};

FunctionPass *createBasicAAWrapperPass();

} // end namespace llvm

#endif // LLVM_ANALYSIS_BASICALIASANALYSIS_H
PKiwFZ�L����Analysis/CmpInstAnalysis.hnu�[���//===-- CmpInstAnalysis.h - Utils to help fold compare insts ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file holds routines to help analyse compare instructions
// and fold them into constants or other compare instructions
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_CMPINSTANALYSIS_H
#define LLVM_ANALYSIS_CMPINSTANALYSIS_H

#include "llvm/IR/InstrTypes.h"

namespace llvm {
  class Type;
  class Value;

  /// Encode a icmp predicate into a three bit mask. These bits are carefully
  /// arranged to allow folding of expressions such as:
  ///
  ///      (A < B) | (A > B) --> (A != B)
  ///
  /// Note that this is only valid if the first and second predicates have the
  /// same sign. It is illegal to do: (A u< B) | (A s> B)
  ///
  /// Three bits are used to represent the condition, as follows:
  ///   0  A > B
  ///   1  A == B
  ///   2  A < B
  ///
  /// <=>  Value  Definition
  /// 000     0   Always false
  /// 001     1   A >  B
  /// 010     2   A == B
  /// 011     3   A >= B
  /// 100     4   A <  B
  /// 101     5   A != B
  /// 110     6   A <= B
  /// 111     7   Always true
  ///
  unsigned getICmpCode(CmpInst::Predicate Pred);

  /// This is the complement of getICmpCode. It turns a predicate code into
  /// either a constant true or false or the predicate for a new ICmp.
  /// The sign is passed in to determine which kind of predicate to use in the
  /// new ICmp instruction.
  /// Non-NULL return value will be a true or false constant.
  /// NULL return means a new ICmp is needed. The predicate is output in Pred.
  Constant *getPredForICmpCode(unsigned Code, bool Sign, Type *OpTy,
                               CmpInst::Predicate &Pred);

  /// Return true if both predicates match sign or if at least one of them is an
  /// equality comparison (which is signless).
  bool predicatesFoldable(CmpInst::Predicate P1, CmpInst::Predicate P2);

  /// Similar to getICmpCode but for FCmpInst. This encodes a fcmp predicate
  /// into a four bit mask.
  inline unsigned getFCmpCode(CmpInst::Predicate CC) {
    assert(CmpInst::FCMP_FALSE <= CC && CC <= CmpInst::FCMP_TRUE &&
           "Unexpected FCmp predicate!");
    // Take advantage of the bit pattern of CmpInst::Predicate here.
    //                                          U L G E
    static_assert(CmpInst::FCMP_FALSE == 0); // 0 0 0 0
    static_assert(CmpInst::FCMP_OEQ == 1);   // 0 0 0 1
    static_assert(CmpInst::FCMP_OGT == 2);   // 0 0 1 0
    static_assert(CmpInst::FCMP_OGE == 3);   // 0 0 1 1
    static_assert(CmpInst::FCMP_OLT == 4);   // 0 1 0 0
    static_assert(CmpInst::FCMP_OLE == 5);   // 0 1 0 1
    static_assert(CmpInst::FCMP_ONE == 6);   // 0 1 1 0
    static_assert(CmpInst::FCMP_ORD == 7);   // 0 1 1 1
    static_assert(CmpInst::FCMP_UNO == 8);   // 1 0 0 0
    static_assert(CmpInst::FCMP_UEQ == 9);   // 1 0 0 1
    static_assert(CmpInst::FCMP_UGT == 10);  // 1 0 1 0
    static_assert(CmpInst::FCMP_UGE == 11);  // 1 0 1 1
    static_assert(CmpInst::FCMP_ULT == 12);  // 1 1 0 0
    static_assert(CmpInst::FCMP_ULE == 13);  // 1 1 0 1
    static_assert(CmpInst::FCMP_UNE == 14);  // 1 1 1 0
    static_assert(CmpInst::FCMP_TRUE == 15); // 1 1 1 1
    return CC;
  }

  /// This is the complement of getFCmpCode. It turns a predicate code into
  /// either a constant true or false or the predicate for a new FCmp.
  /// Non-NULL return value will be a true or false constant.
  /// NULL return means a new ICmp is needed. The predicate is output in Pred.
  Constant *getPredForFCmpCode(unsigned Code, Type *OpTy,
                               CmpInst::Predicate &Pred);

  /// Decompose an icmp into the form ((X & Mask) pred 0) if possible. The
  /// returned predicate is either == or !=. Returns false if decomposition
  /// fails.
  bool decomposeBitTestICmp(Value *LHS, Value *RHS, CmpInst::Predicate &Pred,
                            Value *&X, APInt &Mask,
                            bool LookThroughTrunc = true);

} // end namespace llvm

#endif
PKiwFZ�jP��Analysis/ReplayInlineAdvisor.hnu�[���//===- ReplayInlineAdvisor.h - Replay Inline Advisor interface -*- C++ --*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
#ifndef LLVM_ANALYSIS_REPLAYINLINEADVISOR_H
#define LLVM_ANALYSIS_REPLAYINLINEADVISOR_H

#include "llvm/ADT/StringSet.h"
#include "llvm/Analysis/InlineAdvisor.h"

namespace llvm {
class CallBase;
class Function;
class LLVMContext;
class Module;

struct CallSiteFormat {
  enum class Format : int {
    Line,
    LineColumn,
    LineDiscriminator,
    LineColumnDiscriminator
  };

  bool outputColumn() const {
    return OutputFormat == Format::LineColumn ||
           OutputFormat == Format::LineColumnDiscriminator;
  }

  bool outputDiscriminator() const {
    return OutputFormat == Format::LineDiscriminator ||
           OutputFormat == Format::LineColumnDiscriminator;
  }

  Format OutputFormat;
};

/// Replay Inliner Setup
struct ReplayInlinerSettings {
  enum class Scope : int { Function, Module };
  enum class Fallback : int { Original, AlwaysInline, NeverInline };

  StringRef ReplayFile;
  Scope ReplayScope;
  Fallback ReplayFallback;
  CallSiteFormat ReplayFormat;
};

/// Get call site location as a string with the given format
std::string formatCallSiteLocation(DebugLoc DLoc, const CallSiteFormat &Format);

std::unique_ptr<InlineAdvisor>
getReplayInlineAdvisor(Module &M, FunctionAnalysisManager &FAM,
                       LLVMContext &Context,
                       std::unique_ptr<InlineAdvisor> OriginalAdvisor,
                       const ReplayInlinerSettings &ReplaySettings,
                       bool EmitRemarks, InlineContext IC);

/// Replay inline advisor that uses optimization remarks from inlining of
/// previous build to guide current inlining. This is useful for inliner tuning.
class ReplayInlineAdvisor : public InlineAdvisor {
public:
  ReplayInlineAdvisor(Module &M, FunctionAnalysisManager &FAM,
                      LLVMContext &Context,
                      std::unique_ptr<InlineAdvisor> OriginalAdvisor,
                      const ReplayInlinerSettings &ReplaySettings,
                      bool EmitRemarks, InlineContext IC);
  std::unique_ptr<InlineAdvice> getAdviceImpl(CallBase &CB) override;
  bool areReplayRemarksLoaded() const { return HasReplayRemarks; }

private:
  bool hasInlineAdvice(Function &F) const {
    return (ReplaySettings.ReplayScope ==
            ReplayInlinerSettings::Scope::Module) ||
           CallersToReplay.contains(F.getName());
  }
  std::unique_ptr<InlineAdvisor> OriginalAdvisor;
  bool HasReplayRemarks = false;
  const ReplayInlinerSettings ReplaySettings;
  bool EmitRemarks = false;

  StringMap<bool> InlineSitesFromRemarks;
  StringSet<> CallersToReplay;
};
} // namespace llvm
#endif // LLVM_ANALYSIS_REPLAYINLINEADVISOR_H
PKiwFZ����N�NAnalysis/DDG.hnu�[���//===- llvm/Analysis/DDG.h --------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Data-Dependence Graph (DDG).
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_DDG_H
#define LLVM_ANALYSIS_DDG_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DirectedGraph.h"
#include "llvm/Analysis/DependenceAnalysis.h"
#include "llvm/Analysis/DependenceGraphBuilder.h"
#include "llvm/Analysis/LoopAnalysisManager.h"

namespace llvm {
class Function;
class Loop;
class LoopInfo;
class DDGNode;
class DDGEdge;
using DDGNodeBase = DGNode<DDGNode, DDGEdge>;
using DDGEdgeBase = DGEdge<DDGNode, DDGEdge>;
using DDGBase = DirectedGraph<DDGNode, DDGEdge>;
class LPMUpdater;

/// Data Dependence Graph Node
/// The graph can represent the following types of nodes:
/// 1. Single instruction node containing just one instruction.
/// 2. Multiple instruction node where two or more instructions from
///    the same basic block are merged into one node.
/// 3. Pi-block node which is a group of other DDG nodes that are part of a
///    strongly-connected component of the graph.
///    A pi-block node contains more than one single or multiple instruction
///    nodes. The root node cannot be part of a pi-block.
/// 4. Root node is a special node that connects to all components such that
///    there is always a path from it to any node in the graph.
class DDGNode : public DDGNodeBase {
public:
  using InstructionListType = SmallVectorImpl<Instruction *>;

  enum class NodeKind {
    Unknown,
    SingleInstruction,
    MultiInstruction,
    PiBlock,
    Root,
  };

  DDGNode() = delete;
  DDGNode(const NodeKind K) : Kind(K) {}
  DDGNode(const DDGNode &N) = default;
  DDGNode(DDGNode &&N) : DDGNodeBase(std::move(N)), Kind(N.Kind) {}
  virtual ~DDGNode() = 0;

  DDGNode &operator=(const DDGNode &N) {
    DGNode::operator=(N);
    Kind = N.Kind;
    return *this;
  }

  DDGNode &operator=(DDGNode &&N) {
    DGNode::operator=(std::move(N));
    Kind = N.Kind;
    return *this;
  }

  /// Getter for the kind of this node.
  NodeKind getKind() const { return Kind; }

  /// Collect a list of instructions, in \p IList, for which predicate \p Pred
  /// evaluates to true when iterating over instructions of this node. Return
  /// true if at least one instruction was collected, and false otherwise.
  bool collectInstructions(llvm::function_ref<bool(Instruction *)> const &Pred,
                           InstructionListType &IList) const;

protected:
  /// Setter for the kind of this node.
  void setKind(NodeKind K) { Kind = K; }

private:
  NodeKind Kind;
};

/// Subclass of DDGNode representing the root node of the graph.
/// There should only be one such node in a given graph.
class RootDDGNode : public DDGNode {
public:
  RootDDGNode() : DDGNode(NodeKind::Root) {}
  RootDDGNode(const RootDDGNode &N) = delete;
  RootDDGNode(RootDDGNode &&N) : DDGNode(std::move(N)) {}
  ~RootDDGNode() = default;

  /// Define classof to be able to use isa<>, cast<>, dyn_cast<>, etc.
  static bool classof(const DDGNode *N) {
    return N->getKind() == NodeKind::Root;
  }
  static bool classof(const RootDDGNode *N) { return true; }
};

/// Subclass of DDGNode representing single or multi-instruction nodes.
class SimpleDDGNode : public DDGNode {
  friend class DDGBuilder;

public:
  SimpleDDGNode() = delete;
  SimpleDDGNode(Instruction &I);
  SimpleDDGNode(const SimpleDDGNode &N);
  SimpleDDGNode(SimpleDDGNode &&N);
  ~SimpleDDGNode();

  SimpleDDGNode &operator=(const SimpleDDGNode &N) = default;

  SimpleDDGNode &operator=(SimpleDDGNode &&N) {
    DDGNode::operator=(std::move(N));
    InstList = std::move(N.InstList);
    return *this;
  }

  /// Get the list of instructions in this node.
  const InstructionListType &getInstructions() const {
    assert(!InstList.empty() && "Instruction List is empty.");
    return InstList;
  }
  InstructionListType &getInstructions() {
    return const_cast<InstructionListType &>(
        static_cast<const SimpleDDGNode *>(this)->getInstructions());
  }

  /// Get the first/last instruction in the node.
  Instruction *getFirstInstruction() const { return getInstructions().front(); }
  Instruction *getLastInstruction() const { return getInstructions().back(); }

  /// Define classof to be able to use isa<>, cast<>, dyn_cast<>, etc.
  static bool classof(const DDGNode *N) {
    return N->getKind() == NodeKind::SingleInstruction ||
           N->getKind() == NodeKind::MultiInstruction;
  }
  static bool classof(const SimpleDDGNode *N) { return true; }

private:
  /// Append the list of instructions in \p Input to this node.
  void appendInstructions(const InstructionListType &Input) {
    setKind((InstList.size() == 0 && Input.size() == 1)
                ? NodeKind::SingleInstruction
                : NodeKind::MultiInstruction);
    llvm::append_range(InstList, Input);
  }
  void appendInstructions(const SimpleDDGNode &Input) {
    appendInstructions(Input.getInstructions());
  }

  /// List of instructions associated with a single or multi-instruction node.
  SmallVector<Instruction *, 2> InstList;
};

/// Subclass of DDGNode representing a pi-block. A pi-block represents a group
/// of DDG nodes that are part of a strongly-connected component of the graph.
/// Replacing all the SCCs with pi-blocks results in an acyclic representation
/// of the DDG. For example if we have:
/// {a -> b}, {b -> c, d}, {c -> a}
/// the cycle a -> b -> c -> a is abstracted into a pi-block "p" as follows:
/// {p -> d} with "p" containing: {a -> b}, {b -> c}, {c -> a}
class PiBlockDDGNode : public DDGNode {
public:
  using PiNodeList = SmallVector<DDGNode *, 4>;

  PiBlockDDGNode() = delete;
  PiBlockDDGNode(const PiNodeList &List);
  PiBlockDDGNode(const PiBlockDDGNode &N);
  PiBlockDDGNode(PiBlockDDGNode &&N);
  ~PiBlockDDGNode();

  PiBlockDDGNode &operator=(const PiBlockDDGNode &N) = default;

  PiBlockDDGNode &operator=(PiBlockDDGNode &&N) {
    DDGNode::operator=(std::move(N));
    NodeList = std::move(N.NodeList);
    return *this;
  }

  /// Get the list of nodes in this pi-block.
  const PiNodeList &getNodes() const {
    assert(!NodeList.empty() && "Node list is empty.");
    return NodeList;
  }
  PiNodeList &getNodes() {
    return const_cast<PiNodeList &>(
        static_cast<const PiBlockDDGNode *>(this)->getNodes());
  }

  /// Define classof to be able to use isa<>, cast<>, dyn_cast<>, etc.
  static bool classof(const DDGNode *N) {
    return N->getKind() == NodeKind::PiBlock;
  }

private:
  /// List of nodes in this pi-block.
  PiNodeList NodeList;
};

/// Data Dependency Graph Edge.
/// An edge in the DDG can represent a def-use relationship or
/// a memory dependence based on the result of DependenceAnalysis.
/// A rooted edge connects the root node to one of the components
/// of the graph.
class DDGEdge : public DDGEdgeBase {
public:
  /// The kind of edge in the DDG
  enum class EdgeKind {
    Unknown,
    RegisterDefUse,
    MemoryDependence,
    Rooted,
    Last = Rooted // Must be equal to the largest enum value.
  };

  explicit DDGEdge(DDGNode &N) = delete;
  DDGEdge(DDGNode &N, EdgeKind K) : DDGEdgeBase(N), Kind(K) {}
  DDGEdge(const DDGEdge &E) : DDGEdgeBase(E), Kind(E.getKind()) {}
  DDGEdge(DDGEdge &&E) : DDGEdgeBase(std::move(E)), Kind(E.Kind) {}
  DDGEdge &operator=(const DDGEdge &E) = default;

  DDGEdge &operator=(DDGEdge &&E) {
    DDGEdgeBase::operator=(std::move(E));
    Kind = E.Kind;
    return *this;
  }

  /// Get the edge kind
  EdgeKind getKind() const { return Kind; };

  /// Return true if this is a def-use edge, and false otherwise.
  bool isDefUse() const { return Kind == EdgeKind::RegisterDefUse; }

  /// Return true if this is a memory dependence edge, and false otherwise.
  bool isMemoryDependence() const { return Kind == EdgeKind::MemoryDependence; }

  /// Return true if this is an edge stemming from the root node, and false
  /// otherwise.
  bool isRooted() const { return Kind == EdgeKind::Rooted; }

private:
  EdgeKind Kind;
};

/// Encapsulate some common data and functionality needed for different
/// variations of data dependence graphs.
template <typename NodeType> class DependenceGraphInfo {
public:
  using DependenceList = SmallVector<std::unique_ptr<Dependence>, 1>;

  DependenceGraphInfo() = delete;
  DependenceGraphInfo(const DependenceGraphInfo &G) = delete;
  DependenceGraphInfo(const std::string &N, const DependenceInfo &DepInfo)
      : Name(N), DI(DepInfo), Root(nullptr) {}
  DependenceGraphInfo(DependenceGraphInfo &&G)
      : Name(std::move(G.Name)), DI(std::move(G.DI)), Root(G.Root) {}
  virtual ~DependenceGraphInfo() = default;

  /// Return the label that is used to name this graph.
  StringRef getName() const { return Name; }

  /// Return the root node of the graph.
  NodeType &getRoot() const {
    assert(Root && "Root node is not available yet. Graph construction may "
                   "still be in progress\n");
    return *Root;
  }

  /// Collect all the data dependency infos coming from any pair of memory
  /// accesses from \p Src to \p Dst, and store them into \p Deps. Return true
  /// if a dependence exists, and false otherwise.
  bool getDependencies(const NodeType &Src, const NodeType &Dst,
                       DependenceList &Deps) const;

  /// Return a string representing the type of dependence that the dependence
  /// analysis identified between the two given nodes. This function assumes
  /// that there is a memory dependence between the given two nodes.
  std::string getDependenceString(const NodeType &Src,
                                  const NodeType &Dst) const;

protected:
  // Name of the graph.
  std::string Name;

  // Store a copy of DependenceInfo in the graph, so that individual memory
  // dependencies don't need to be stored. Instead when the dependence is
  // queried it is recomputed using @DI.
  const DependenceInfo DI;

  // A special node in the graph that has an edge to every connected component of
  // the graph, to ensure all nodes are reachable in a graph walk.
  NodeType *Root = nullptr;
};

using DDGInfo = DependenceGraphInfo<DDGNode>;

/// Data Dependency Graph
class DataDependenceGraph : public DDGBase, public DDGInfo {
  friend AbstractDependenceGraphBuilder<DataDependenceGraph>;
  friend class DDGBuilder;

public:
  using NodeType = DDGNode;
  using EdgeType = DDGEdge;

  DataDependenceGraph() = delete;
  DataDependenceGraph(const DataDependenceGraph &G) = delete;
  DataDependenceGraph(DataDependenceGraph &&G)
      : DDGBase(std::move(G)), DDGInfo(std::move(G)) {}
  DataDependenceGraph(Function &F, DependenceInfo &DI);
  DataDependenceGraph(Loop &L, LoopInfo &LI, DependenceInfo &DI);
  ~DataDependenceGraph();

  /// If node \p N belongs to a pi-block return a pointer to the pi-block,
  /// otherwise return null.
  const PiBlockDDGNode *getPiBlock(const NodeType &N) const;

protected:
  /// Add node \p N to the graph, if it's not added yet, and keep track of the
  /// root node as well as pi-blocks and their members. Return true if node is
  /// successfully added.
  bool addNode(NodeType &N);

private:
  using PiBlockMapType = DenseMap<const NodeType *, const PiBlockDDGNode *>;

  /// Mapping from graph nodes to their containing pi-blocks. If a node is not
  /// part of a pi-block, it will not appear in this map.
  PiBlockMapType PiBlockMap;
};

/// Concrete implementation of a pure data dependence graph builder. This class
/// provides custom implementation for the pure-virtual functions used in the
/// generic dependence graph build algorithm.
///
/// For information about time complexity of the build algorithm see the
/// comments near the declaration of AbstractDependenceGraphBuilder.
class DDGBuilder : public AbstractDependenceGraphBuilder<DataDependenceGraph> {
public:
  DDGBuilder(DataDependenceGraph &G, DependenceInfo &D,
             const BasicBlockListType &BBs)
      : AbstractDependenceGraphBuilder(G, D, BBs) {}
  DDGNode &createRootNode() final {
    auto *RN = new RootDDGNode();
    assert(RN && "Failed to allocate memory for DDG root node.");
    Graph.addNode(*RN);
    return *RN;
  }
  DDGNode &createFineGrainedNode(Instruction &I) final {
    auto *SN = new SimpleDDGNode(I);
    assert(SN && "Failed to allocate memory for simple DDG node.");
    Graph.addNode(*SN);
    return *SN;
  }
  DDGNode &createPiBlock(const NodeListType &L) final {
    auto *Pi = new PiBlockDDGNode(L);
    assert(Pi && "Failed to allocate memory for pi-block node.");
    Graph.addNode(*Pi);
    return *Pi;
  }
  DDGEdge &createDefUseEdge(DDGNode &Src, DDGNode &Tgt) final {
    auto *E = new DDGEdge(Tgt, DDGEdge::EdgeKind::RegisterDefUse);
    assert(E && "Failed to allocate memory for edge");
    Graph.connect(Src, Tgt, *E);
    return *E;
  }
  DDGEdge &createMemoryEdge(DDGNode &Src, DDGNode &Tgt) final {
    auto *E = new DDGEdge(Tgt, DDGEdge::EdgeKind::MemoryDependence);
    assert(E && "Failed to allocate memory for edge");
    Graph.connect(Src, Tgt, *E);
    return *E;
  }
  DDGEdge &createRootedEdge(DDGNode &Src, DDGNode &Tgt) final {
    auto *E = new DDGEdge(Tgt, DDGEdge::EdgeKind::Rooted);
    assert(E && "Failed to allocate memory for edge");
    assert(isa<RootDDGNode>(Src) && "Expected root node");
    Graph.connect(Src, Tgt, *E);
    return *E;
  }

  const NodeListType &getNodesInPiBlock(const DDGNode &N) final {
    auto *PiNode = dyn_cast<const PiBlockDDGNode>(&N);
    assert(PiNode && "Expected a pi-block node.");
    return PiNode->getNodes();
  }

  /// Return true if the two nodes \pSrc and \pTgt are both simple nodes and
  /// the consecutive instructions after merging belong to the same basic block.
  bool areNodesMergeable(const DDGNode &Src, const DDGNode &Tgt) const final;
  void mergeNodes(DDGNode &Src, DDGNode &Tgt) final;
  bool shouldSimplify() const final;
  bool shouldCreatePiBlocks() const final;
};

raw_ostream &operator<<(raw_ostream &OS, const DDGNode &N);
raw_ostream &operator<<(raw_ostream &OS, const DDGNode::NodeKind K);
raw_ostream &operator<<(raw_ostream &OS, const DDGEdge &E);
raw_ostream &operator<<(raw_ostream &OS, const DDGEdge::EdgeKind K);
raw_ostream &operator<<(raw_ostream &OS, const DataDependenceGraph &G);

//===--------------------------------------------------------------------===//
// DDG Analysis Passes
//===--------------------------------------------------------------------===//

/// Analysis pass that builds the DDG for a loop.
class DDGAnalysis : public AnalysisInfoMixin<DDGAnalysis> {
public:
  using Result = std::unique_ptr<DataDependenceGraph>;
  Result run(Loop &L, LoopAnalysisManager &AM, LoopStandardAnalysisResults &AR);

private:
  friend AnalysisInfoMixin<DDGAnalysis>;
  static AnalysisKey Key;
};

/// Textual printer pass for the DDG of a loop.
class DDGAnalysisPrinterPass : public PassInfoMixin<DDGAnalysisPrinterPass> {
public:
  explicit DDGAnalysisPrinterPass(raw_ostream &OS) : OS(OS) {}
  PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
                        LoopStandardAnalysisResults &AR, LPMUpdater &U);

private:
  raw_ostream &OS;
};

//===--------------------------------------------------------------------===//
// DependenceGraphInfo Implementation
//===--------------------------------------------------------------------===//

template <typename NodeType>
bool DependenceGraphInfo<NodeType>::getDependencies(
    const NodeType &Src, const NodeType &Dst, DependenceList &Deps) const {
  assert(Deps.empty() && "Expected empty output list at the start.");

  // List of memory access instructions from src and dst nodes.
  SmallVector<Instruction *, 8> SrcIList, DstIList;
  auto isMemoryAccess = [](const Instruction *I) {
    return I->mayReadOrWriteMemory();
  };
  Src.collectInstructions(isMemoryAccess, SrcIList);
  Dst.collectInstructions(isMemoryAccess, DstIList);

  for (auto *SrcI : SrcIList)
    for (auto *DstI : DstIList)
      if (auto Dep =
              const_cast<DependenceInfo *>(&DI)->depends(SrcI, DstI, true))
        Deps.push_back(std::move(Dep));

  return !Deps.empty();
}

template <typename NodeType>
std::string
DependenceGraphInfo<NodeType>::getDependenceString(const NodeType &Src,
                                                   const NodeType &Dst) const {
  std::string Str;
  raw_string_ostream OS(Str);
  DependenceList Deps;
  if (!getDependencies(Src, Dst, Deps))
    return OS.str();
  interleaveComma(Deps, OS, [&](const std::unique_ptr<Dependence> &D) {
    D->dump(OS);
    // Remove the extra new-line character printed by the dump
    // method
    if (OS.str().back() == '\n')
      OS.str().pop_back();
  });

  return OS.str();
}

//===--------------------------------------------------------------------===//
// GraphTraits specializations for the DDG
//===--------------------------------------------------------------------===//

/// non-const versions of the grapth trait specializations for DDG
template <> struct GraphTraits<DDGNode *> {
  using NodeRef = DDGNode *;

  static DDGNode *DDGGetTargetNode(DGEdge<DDGNode, DDGEdge> *P) {
    return &P->getTargetNode();
  }

  // Provide a mapped iterator so that the GraphTrait-based implementations can
  // find the target nodes without having to explicitly go through the edges.
  using ChildIteratorType =
      mapped_iterator<DDGNode::iterator, decltype(&DDGGetTargetNode)>;
  using ChildEdgeIteratorType = DDGNode::iterator;

  static NodeRef getEntryNode(NodeRef N) { return N; }
  static ChildIteratorType child_begin(NodeRef N) {
    return ChildIteratorType(N->begin(), &DDGGetTargetNode);
  }
  static ChildIteratorType child_end(NodeRef N) {
    return ChildIteratorType(N->end(), &DDGGetTargetNode);
  }

  static ChildEdgeIteratorType child_edge_begin(NodeRef N) {
    return N->begin();
  }
  static ChildEdgeIteratorType child_edge_end(NodeRef N) { return N->end(); }
};

template <>
struct GraphTraits<DataDependenceGraph *> : public GraphTraits<DDGNode *> {
  using nodes_iterator = DataDependenceGraph::iterator;
  static NodeRef getEntryNode(DataDependenceGraph *DG) {
    return &DG->getRoot();
  }
  static nodes_iterator nodes_begin(DataDependenceGraph *DG) {
    return DG->begin();
  }
  static nodes_iterator nodes_end(DataDependenceGraph *DG) { return DG->end(); }
};

/// const versions of the grapth trait specializations for DDG
template <> struct GraphTraits<const DDGNode *> {
  using NodeRef = const DDGNode *;

  static const DDGNode *DDGGetTargetNode(const DGEdge<DDGNode, DDGEdge> *P) {
    return &P->getTargetNode();
  }

  // Provide a mapped iterator so that the GraphTrait-based implementations can
  // find the target nodes without having to explicitly go through the edges.
  using ChildIteratorType =
      mapped_iterator<DDGNode::const_iterator, decltype(&DDGGetTargetNode)>;
  using ChildEdgeIteratorType = DDGNode::const_iterator;

  static NodeRef getEntryNode(NodeRef N) { return N; }
  static ChildIteratorType child_begin(NodeRef N) {
    return ChildIteratorType(N->begin(), &DDGGetTargetNode);
  }
  static ChildIteratorType child_end(NodeRef N) {
    return ChildIteratorType(N->end(), &DDGGetTargetNode);
  }

  static ChildEdgeIteratorType child_edge_begin(NodeRef N) {
    return N->begin();
  }
  static ChildEdgeIteratorType child_edge_end(NodeRef N) { return N->end(); }
};

template <>
struct GraphTraits<const DataDependenceGraph *>
    : public GraphTraits<const DDGNode *> {
  using nodes_iterator = DataDependenceGraph::const_iterator;
  static NodeRef getEntryNode(const DataDependenceGraph *DG) {
    return &DG->getRoot();
  }
  static nodes_iterator nodes_begin(const DataDependenceGraph *DG) {
    return DG->begin();
  }
  static nodes_iterator nodes_end(const DataDependenceGraph *DG) {
    return DG->end();
  }
};

} // namespace llvm

#endif // LLVM_ANALYSIS_DDG_H
PKiwFZ{�սLLAnalysis/Lint.hnu�[���//===-- llvm/Analysis/Lint.h - LLVM IR Lint ---------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines lint interfaces that can be used for some validation of
// input to the system, and for checking that transformations haven't done
// something bad. In contrast to the Verifier, the Lint checker checks for
// undefined behavior or constructions with likely unintended behavior.
//
// To see what specifically is checked, look at Lint.cpp
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_LINT_H
#define LLVM_ANALYSIS_LINT_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class Module;
class Function;

/// Lint a module.
///
/// This should only be used for debugging, because it plays games with
/// PassManagers and stuff.
void lintModule(const Module &M);

// Lint a function.
void lintFunction(const Function &F);

class LintPass : public PassInfoMixin<LintPass> {
public:
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

} // namespace llvm

#endif // LLVM_ANALYSIS_LINT_H
PKiwFZ���//!Analysis/CFLSteensAliasAnalysis.hnu�[���//==- CFLSteensAliasAnalysis.h - Unification-based Alias Analysis -*- C++-*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// This is the interface for LLVM's unification-based alias analysis
/// implemented with CFL graph reachability.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_CFLSTEENSALIASANALYSIS_H
#define LLVM_ANALYSIS_CFLSTEENSALIASANALYSIS_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/CFLAliasAnalysisUtils.h"
#include "llvm/Analysis/MemoryLocation.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Pass.h"
#include <forward_list>
#include <memory>

namespace llvm {

class Function;
class TargetLibraryInfo;

namespace cflaa {

struct AliasSummary;

} // end namespace cflaa

class CFLSteensAAResult : public AAResultBase<CFLSteensAAResult> {
  friend AAResultBase<CFLSteensAAResult>;

  class FunctionInfo;

public:
  explicit CFLSteensAAResult(
      std::function<const TargetLibraryInfo &(Function &)> GetTLI);
  CFLSteensAAResult(CFLSteensAAResult &&Arg);
  ~CFLSteensAAResult();

  /// Handle invalidation events from the new pass manager.
  ///
  /// By definition, this result is stateless and so remains valid.
  bool invalidate(Function &, const PreservedAnalyses &,
                  FunctionAnalysisManager::Invalidator &) {
    return false;
  }

  /// Inserts the given Function into the cache.
  void scan(Function *Fn);

  void evict(Function *Fn);

  /// Ensures that the given function is available in the cache.
  /// Returns the appropriate entry from the cache.
  const Optional<FunctionInfo> &ensureCached(Function *Fn);

  /// Get the alias summary for the given function
  /// Return nullptr if the summary is not found or not available
  const cflaa::AliasSummary *getAliasSummary(Function &Fn);

  AliasResult query(const MemoryLocation &LocA, const MemoryLocation &LocB);

  AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB,
                    AAQueryInfo &AAQI) {
    if (LocA.Ptr == LocB.Ptr)
      return AliasResult::MustAlias;

    // Comparisons between global variables and other constants should be
    // handled by BasicAA.
    // CFLSteensAA may report NoAlias when comparing a GlobalValue and
    // ConstantExpr, but every query needs to have at least one Value tied to a
    // Function, and neither GlobalValues nor ConstantExprs are.
    if (isa<Constant>(LocA.Ptr) && isa<Constant>(LocB.Ptr))
      return AAResultBase::alias(LocA, LocB, AAQI);

    AliasResult QueryResult = query(LocA, LocB);
    if (QueryResult == AliasResult::MayAlias)
      return AAResultBase::alias(LocA, LocB, AAQI);

    return QueryResult;
  }

private:
  std::function<const TargetLibraryInfo &(Function &)> GetTLI;

  /// Cached mapping of Functions to their StratifiedSets.
  /// If a function's sets are currently being built, it is marked
  /// in the cache as an Optional without a value. This way, if we
  /// have any kind of recursion, it is discernable from a function
  /// that simply has empty sets.
  DenseMap<Function *, Optional<FunctionInfo>> Cache;
  std::forward_list<cflaa::FunctionHandle<CFLSteensAAResult>> Handles;

  FunctionInfo buildSetsFrom(Function *F);
};

/// Analysis pass providing a never-invalidated alias analysis result.
///
/// FIXME: We really should refactor CFL to use the analysis more heavily, and
/// in particular to leverage invalidation to trigger re-computation of sets.
class CFLSteensAA : public AnalysisInfoMixin<CFLSteensAA> {
  friend AnalysisInfoMixin<CFLSteensAA>;

  static AnalysisKey Key;

public:
  using Result = CFLSteensAAResult;

  CFLSteensAAResult run(Function &F, FunctionAnalysisManager &AM);
};

/// Legacy wrapper pass to provide the CFLSteensAAResult object.
class CFLSteensAAWrapperPass : public ImmutablePass {
  std::unique_ptr<CFLSteensAAResult> Result;

public:
  static char ID;

  CFLSteensAAWrapperPass();

  CFLSteensAAResult &getResult() { return *Result; }
  const CFLSteensAAResult &getResult() const { return *Result; }

  void initializePass() override;
  void getAnalysisUsage(AnalysisUsage &AU) const override;
};

// createCFLSteensAAWrapperPass - This pass implements a set-based approach to
// alias analysis.
ImmutablePass *createCFLSteensAAWrapperPass();

} // end namespace llvm

#endif // LLVM_ANALYSIS_CFLSTEENSALIASANALYSIS_H
PKiwFZr���88Analysis/GlobalsModRef.hnu�[���//===- GlobalsModRef.h - Simple Mod/Ref AA for Globals ----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// This is the interface for a simple mod/ref and alias analysis over globals.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_GLOBALSMODREF_H
#define LLVM_ANALYSIS_GLOBALSMODREF_H

#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/IR/PassManager.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/Pass.h"
#include <list>

namespace llvm {
class CallGraph;
class Function;

/// An alias analysis result set for globals.
///
/// This focuses on handling aliasing properties of globals and interprocedural
/// function call mod/ref information.
class GlobalsAAResult : public AAResultBase {
  class FunctionInfo;

  const DataLayout &DL;
  std::function<const TargetLibraryInfo &(Function &F)> GetTLI;

  /// The globals that do not have their addresses taken.
  SmallPtrSet<const GlobalValue *, 8> NonAddressTakenGlobals;

  /// Are there functions with local linkage that may modify globals.
  bool UnknownFunctionsWithLocalLinkage = false;

  /// IndirectGlobals - The memory pointed to by this global is known to be
  /// 'owned' by the global.
  SmallPtrSet<const GlobalValue *, 8> IndirectGlobals;

  /// AllocsForIndirectGlobals - If an instruction allocates memory for an
  /// indirect global, this map indicates which one.
  DenseMap<const Value *, const GlobalValue *> AllocsForIndirectGlobals;

  /// For each function, keep track of what globals are modified or read.
  DenseMap<const Function *, FunctionInfo> FunctionInfos;

  /// A map of functions to SCC. The SCCs are described by a simple integer
  /// ID that is only useful for comparing for equality (are two functions
  /// in the same SCC or not?)
  DenseMap<const Function *, unsigned> FunctionToSCCMap;

  /// Handle to clear this analysis on deletion of values.
  struct DeletionCallbackHandle final : CallbackVH {
    GlobalsAAResult *GAR;
    std::list<DeletionCallbackHandle>::iterator I;

    DeletionCallbackHandle(GlobalsAAResult &GAR, Value *V)
        : CallbackVH(V), GAR(&GAR) {}

    void deleted() override;
  };

  /// List of callbacks for globals being tracked by this analysis. Note that
  /// these objects are quite large, but we only anticipate having one per
  /// global tracked by this analysis. There are numerous optimizations we
  /// could perform to the memory utilization here if this becomes a problem.
  std::list<DeletionCallbackHandle> Handles;

  explicit GlobalsAAResult(
      const DataLayout &DL,
      std::function<const TargetLibraryInfo &(Function &F)> GetTLI);

  friend struct RecomputeGlobalsAAPass;

public:
  GlobalsAAResult(GlobalsAAResult &&Arg);
  ~GlobalsAAResult();

  bool invalidate(Module &M, const PreservedAnalyses &PA,
                  ModuleAnalysisManager::Invalidator &);

  static GlobalsAAResult
  analyzeModule(Module &M,
                std::function<const TargetLibraryInfo &(Function &F)> GetTLI,
                CallGraph &CG);

  //------------------------------------------------
  // Implement the AliasAnalysis API
  //
  AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB,
                    AAQueryInfo &AAQI, const Instruction *CtxI);

  using AAResultBase::getModRefInfo;
  ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc,
                           AAQueryInfo &AAQI);

  using AAResultBase::getMemoryEffects;
  /// getMemoryEffects - Return the behavior of the specified function if
  /// called from the specified call site.  The call site may be null in which
  /// case the most generic behavior of this function should be returned.
  MemoryEffects getMemoryEffects(const Function *F);

private:
  FunctionInfo *getFunctionInfo(const Function *F);

  void AnalyzeGlobals(Module &M);
  void AnalyzeCallGraph(CallGraph &CG, Module &M);
  bool AnalyzeUsesOfPointer(Value *V,
                            SmallPtrSetImpl<Function *> *Readers = nullptr,
                            SmallPtrSetImpl<Function *> *Writers = nullptr,
                            GlobalValue *OkayStoreDest = nullptr);
  bool AnalyzeIndirectGlobalMemory(GlobalVariable *GV);
  void CollectSCCMembership(CallGraph &CG);

  bool isNonEscapingGlobalNoAlias(const GlobalValue *GV, const Value *V);
  ModRefInfo getModRefInfoForArgument(const CallBase *Call,
                                      const GlobalValue *GV, AAQueryInfo &AAQI);
};

/// Analysis pass providing a never-invalidated alias analysis result.
class GlobalsAA : public AnalysisInfoMixin<GlobalsAA> {
  friend AnalysisInfoMixin<GlobalsAA>;
  static AnalysisKey Key;

public:
  typedef GlobalsAAResult Result;

  GlobalsAAResult run(Module &M, ModuleAnalysisManager &AM);
};

struct RecomputeGlobalsAAPass : PassInfoMixin<RecomputeGlobalsAAPass> {
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
};

/// Legacy wrapper pass to provide the GlobalsAAResult object.
class GlobalsAAWrapperPass : public ModulePass {
  std::unique_ptr<GlobalsAAResult> Result;

public:
  static char ID;

  GlobalsAAWrapperPass();

  GlobalsAAResult &getResult() { return *Result; }
  const GlobalsAAResult &getResult() const { return *Result; }

  bool runOnModule(Module &M) override;
  bool doFinalization(Module &M) override;
  void getAnalysisUsage(AnalysisUsage &AU) const override;
};

//===--------------------------------------------------------------------===//
//
// createGlobalsAAWrapperPass - This pass provides alias and mod/ref info for
// global values that do not have their addresses taken.
//
ModulePass *createGlobalsAAWrapperPass();
}

#endif
PKiwFZ!EO�vvAnalysis/ObjCARCInstKind.hnu�[���//===- ObjCARCInstKind.h - ARC instruction equivalence classes --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_OBJCARCINSTKIND_H
#define LLVM_ANALYSIS_OBJCARCINSTKIND_H

#include "llvm/IR/Instructions.h"

namespace llvm {
namespace objcarc {

/// \enum ARCInstKind
///
/// Equivalence classes of instructions in the ARC Model.
///
/// Since we do not have "instructions" to represent ARC concepts in LLVM IR,
/// we instead operate on equivalence classes of instructions.
///
/// TODO: This should be split into two enums: a runtime entry point enum
/// (possibly united with the ARCRuntimeEntrypoint class) and an enum that deals
/// with effects of instructions in the ARC model (which would handle the notion
/// of a User or CallOrUser).
enum class ARCInstKind {
  Retain,                   ///< objc_retain
  RetainRV,                 ///< objc_retainAutoreleasedReturnValue
  UnsafeClaimRV,            ///< objc_unsafeClaimAutoreleasedReturnValue
  RetainBlock,              ///< objc_retainBlock
  Release,                  ///< objc_release
  Autorelease,              ///< objc_autorelease
  AutoreleaseRV,            ///< objc_autoreleaseReturnValue
  AutoreleasepoolPush,      ///< objc_autoreleasePoolPush
  AutoreleasepoolPop,       ///< objc_autoreleasePoolPop
  NoopCast,                 ///< objc_retainedObject, etc.
  FusedRetainAutorelease,   ///< objc_retainAutorelease
  FusedRetainAutoreleaseRV, ///< objc_retainAutoreleaseReturnValue
  LoadWeakRetained,         ///< objc_loadWeakRetained (primitive)
  StoreWeak,                ///< objc_storeWeak (primitive)
  InitWeak,                 ///< objc_initWeak (derived)
  LoadWeak,                 ///< objc_loadWeak (derived)
  MoveWeak,                 ///< objc_moveWeak (derived)
  CopyWeak,                 ///< objc_copyWeak (derived)
  DestroyWeak,              ///< objc_destroyWeak (derived)
  StoreStrong,              ///< objc_storeStrong (derived)
  IntrinsicUser,            ///< llvm.objc.clang.arc.use
  CallOrUser,               ///< could call objc_release and/or "use" pointers
  Call,                     ///< could call objc_release
  User,                     ///< could "use" a pointer
  None                      ///< anything that is inert from an ARC perspective.
};

raw_ostream &operator<<(raw_ostream &OS, const ARCInstKind Class);

/// Test if the given class is a kind of user.
bool IsUser(ARCInstKind Class);

/// Test if the given class is objc_retain or equivalent.
bool IsRetain(ARCInstKind Class);

/// Test if the given class is objc_autorelease or equivalent.
bool IsAutorelease(ARCInstKind Class);

/// Test if the given class represents instructions which return their
/// argument verbatim.
bool IsForwarding(ARCInstKind Class);

/// Test if the given class represents instructions which do nothing if
/// passed a null pointer.
bool IsNoopOnNull(ARCInstKind Class);

/// Test if the given class represents instructions which do nothing if
/// passed a global variable.
bool IsNoopOnGlobal(ARCInstKind Class);

/// Test if the given class represents instructions which are always safe
/// to mark with the "tail" keyword.
bool IsAlwaysTail(ARCInstKind Class);

/// Test if the given class represents instructions which are never safe
/// to mark with the "tail" keyword.
bool IsNeverTail(ARCInstKind Class);

/// Test if the given class represents instructions which are always safe
/// to mark with the nounwind attribute.
bool IsNoThrow(ARCInstKind Class);

/// Test whether the given instruction can autorelease any pointer or cause an
/// autoreleasepool pop.
bool CanInterruptRV(ARCInstKind Class);

/// Determine if F is one of the special known Functions.  If it isn't,
/// return ARCInstKind::CallOrUser.
ARCInstKind GetFunctionClass(const Function *F);

/// Determine which objc runtime call instruction class V belongs to.
///
/// This is similar to GetARCInstKind except that it only detects objc
/// runtime calls. This allows it to be faster.
///
inline ARCInstKind GetBasicARCInstKind(const Value *V) {
  if (const CallInst *CI = dyn_cast<CallInst>(V)) {
    if (const Function *F = CI->getCalledFunction())
      return GetFunctionClass(F);
    // Otherwise, be conservative.
    return ARCInstKind::CallOrUser;
  }

  // Otherwise, be conservative.
  return isa<InvokeInst>(V) ? ARCInstKind::CallOrUser : ARCInstKind::User;
}

/// Map V to its ARCInstKind equivalence class.
ARCInstKind GetARCInstKind(const Value *V);

/// Returns false if conservatively we can prove that any instruction mapped to
/// this kind can not decrement ref counts. Returns true otherwise.
bool CanDecrementRefCount(ARCInstKind Kind);

} // end namespace objcarc
} // end namespace llvm

#endif
PKiwFZ��m�B�B�Analysis/ValueTracking.hnu�[���//===- llvm/Analysis/ValueTracking.h - Walk computations --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains routines that help analyze properties that chains of
// computations have.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_VALUETRACKING_H
#define LLVM_ANALYSIS_VALUETRACKING_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Intrinsics.h"
#include <cassert>
#include <cstdint>

namespace llvm {

class Operator;
class AddOperator;
class AllocaInst;
class APInt;
class AssumptionCache;
class DominatorTree;
class GEPOperator;
class LoadInst;
class WithOverflowInst;
struct KnownBits;
class Loop;
class LoopInfo;
class MDNode;
struct SimplifyQuery;
class StringRef;
class TargetLibraryInfo;
class Value;

constexpr unsigned MaxAnalysisRecursionDepth = 6;

/// Determine which bits of V are known to be either zero or one and return
/// them in the KnownZero/KnownOne bit sets.
///
/// This function is defined on values with integer type, values with pointer
/// type, and vectors of integers.  In the case
/// where V is a vector, the known zero and known one values are the
/// same width as the vector element, and the bit is set only if it is true
/// for all of the elements in the vector.
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL,
                      unsigned Depth = 0, AssumptionCache *AC = nullptr,
                      const Instruction *CxtI = nullptr,
                      const DominatorTree *DT = nullptr,
                      bool UseInstrInfo = true);

/// Determine which bits of V are known to be either zero or one and return
/// them in the KnownZero/KnownOne bit sets.
///
/// This function is defined on values with integer type, values with pointer
/// type, and vectors of integers.  In the case
/// where V is a vector, the known zero and known one values are the
/// same width as the vector element, and the bit is set only if it is true
/// for all of the demanded elements in the vector.
void computeKnownBits(const Value *V, const APInt &DemandedElts,
                      KnownBits &Known, const DataLayout &DL,
                      unsigned Depth = 0, AssumptionCache *AC = nullptr,
                      const Instruction *CxtI = nullptr,
                      const DominatorTree *DT = nullptr,
                      bool UseInstrInfo = true);

/// Returns the known bits rather than passing by reference.
KnownBits computeKnownBits(const Value *V, const DataLayout &DL,
                           unsigned Depth = 0, AssumptionCache *AC = nullptr,
                           const Instruction *CxtI = nullptr,
                           const DominatorTree *DT = nullptr,
                           bool UseInstrInfo = true);

/// Returns the known bits rather than passing by reference.
KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
                           const DataLayout &DL, unsigned Depth = 0,
                           AssumptionCache *AC = nullptr,
                           const Instruction *CxtI = nullptr,
                           const DominatorTree *DT = nullptr,
                           bool UseInstrInfo = true);

/// Compute known bits from the range metadata.
/// \p KnownZero the set of bits that are known to be zero
/// \p KnownOne the set of bits that are known to be one
void computeKnownBitsFromRangeMetadata(const MDNode &Ranges, KnownBits &Known);

/// Merge bits known from assumes into Known.
void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
                                unsigned Depth, const SimplifyQuery &Q);

/// Using KnownBits LHS/RHS produce the known bits for logic op (and/xor/or).
KnownBits analyzeKnownBitsFromAndXorOr(
    const Operator *I, const KnownBits &KnownLHS, const KnownBits &KnownRHS,
    unsigned Depth, const DataLayout &DL, AssumptionCache *AC = nullptr,
    const Instruction *CxtI = nullptr, const DominatorTree *DT = nullptr,
    bool UseInstrInfo = true);

/// Return true if LHS and RHS have no common bits set.
bool haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
                         const DataLayout &DL, AssumptionCache *AC = nullptr,
                         const Instruction *CxtI = nullptr,
                         const DominatorTree *DT = nullptr,
                         bool UseInstrInfo = true);

/// Return true if the given value is known to have exactly one bit set when
/// defined. For vectors return true if every element is known to be a power
/// of two when defined. Supports values with integer or pointer type and
/// vectors of integers. If 'OrZero' is set, then return true if the given
/// value is either a power of two or zero.
bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
                            bool OrZero = false, unsigned Depth = 0,
                            AssumptionCache *AC = nullptr,
                            const Instruction *CxtI = nullptr,
                            const DominatorTree *DT = nullptr,
                            bool UseInstrInfo = true);

bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI);

/// Return true if the given value is known to be non-zero when defined. For
/// vectors, return true if every element is known to be non-zero when
/// defined. For pointers, if the context instruction and dominator tree are
/// specified, perform context-sensitive analysis and return true if the
/// pointer couldn't possibly be null at the specified instruction.
/// Supports values with integer or pointer type and vectors of integers.
bool isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth = 0,
                    AssumptionCache *AC = nullptr,
                    const Instruction *CxtI = nullptr,
                    const DominatorTree *DT = nullptr,
                    bool UseInstrInfo = true);

/// Return true if the two given values are negation.
/// Currently can recoginze Value pair:
/// 1: <X, Y> if X = sub (0, Y) or Y = sub (0, X)
/// 2: <X, Y> if X = sub (A, B) and Y = sub (B, A)
bool isKnownNegation(const Value *X, const Value *Y, bool NeedNSW = false);

/// Returns true if the give value is known to be non-negative.
bool isKnownNonNegative(const Value *V, const DataLayout &DL,
                        unsigned Depth = 0, AssumptionCache *AC = nullptr,
                        const Instruction *CxtI = nullptr,
                        const DominatorTree *DT = nullptr,
                        bool UseInstrInfo = true);

/// Returns true if the given value is known be positive (i.e. non-negative
/// and non-zero).
bool isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth = 0,
                     AssumptionCache *AC = nullptr,
                     const Instruction *CxtI = nullptr,
                     const DominatorTree *DT = nullptr,
                     bool UseInstrInfo = true);

/// Returns true if the given value is known be negative (i.e. non-positive
/// and non-zero).
bool isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth = 0,
                     AssumptionCache *AC = nullptr,
                     const Instruction *CxtI = nullptr,
                     const DominatorTree *DT = nullptr,
                     bool UseInstrInfo = true);

/// Return true if the given values are known to be non-equal when defined.
/// Supports scalar integer types only.
bool isKnownNonEqual(const Value *V1, const Value *V2, const DataLayout &DL,
                     AssumptionCache *AC = nullptr,
                     const Instruction *CxtI = nullptr,
                     const DominatorTree *DT = nullptr,
                     bool UseInstrInfo = true);

/// Return true if 'V & Mask' is known to be zero. We use this predicate to
/// simplify operations downstream. Mask is known to be zero for bits that V
/// cannot have.
///
/// This function is defined on values with integer type, values with pointer
/// type, and vectors of integers.  In the case
/// where V is a vector, the mask, known zero, and known one values are the
/// same width as the vector element, and the bit is set only if it is true
/// for all of the elements in the vector.
bool MaskedValueIsZero(const Value *V, const APInt &Mask, const DataLayout &DL,
                       unsigned Depth = 0, AssumptionCache *AC = nullptr,
                       const Instruction *CxtI = nullptr,
                       const DominatorTree *DT = nullptr,
                       bool UseInstrInfo = true);

/// Return the number of times the sign bit of the register is replicated into
/// the other bits. We know that at least 1 bit is always equal to the sign
/// bit (itself), but other cases can give us information. For example,
/// immediately after an "ashr X, 2", we know that the top 3 bits are all
/// equal to each other, so we return 3. For vectors, return the number of
/// sign bits for the vector element with the mininum number of known sign
/// bits.
unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL,
                            unsigned Depth = 0, AssumptionCache *AC = nullptr,
                            const Instruction *CxtI = nullptr,
                            const DominatorTree *DT = nullptr,
                            bool UseInstrInfo = true);

/// Get the upper bound on bit size for this Value \p Op as a signed integer.
/// i.e.  x == sext(trunc(x to MaxSignificantBits) to bitwidth(x)).
/// Similar to the APInt::getSignificantBits function.
unsigned ComputeMaxSignificantBits(const Value *Op, const DataLayout &DL,
                                   unsigned Depth = 0,
                                   AssumptionCache *AC = nullptr,
                                   const Instruction *CxtI = nullptr,
                                   const DominatorTree *DT = nullptr);

/// Map a call instruction to an intrinsic ID.  Libcalls which have equivalent
/// intrinsics are treated as-if they were intrinsics.
Intrinsic::ID getIntrinsicForCallSite(const CallBase &CB,
                                      const TargetLibraryInfo *TLI);

/// Returns a pair of values, which if passed to llvm.is.fpclass, returns the
/// same result as an fcmp with the given operands.
///
/// If \p LookThroughSrc is true, consider the input value when computing the
/// mask.
///
/// If \p LookThroughSrc is false, ignore the source value (i.e. the first pair
/// element will always be LHS.
std::pair<Value *, FPClassTest> fcmpToClassTest(CmpInst::Predicate Pred,
                                                const Function &F, Value *LHS,
                                                Value *RHS,
                                                bool LookThroughSrc = true);

struct KnownFPClass {
  /// Floating-point classes the value could be one of.
  FPClassTest KnownFPClasses = fcAllFlags;

  /// std::nullopt if the sign bit is unknown, true if the sign bit is
  /// definitely set or false if the sign bit is definitely unset.
  std::optional<bool> SignBit;

  /// Return true if it's known this can never be one of the mask entries.
  bool isKnownNever(FPClassTest Mask) const {
    return (KnownFPClasses & Mask) == fcNone;
  }

  bool isUnknown() const {
    return KnownFPClasses == fcAllFlags && !SignBit;
  }

  /// Return true if it's known this can never be a nan.
  bool isKnownNeverNaN() const {
    return isKnownNever(fcNan);
  }

  /// Return true if it's known this can never be an infinity.
  bool isKnownNeverInfinity() const {
    return isKnownNever(fcInf);
  }

  /// Return true if it's known this can never be +infinity.
  bool isKnownNeverPosInfinity() const {
    return isKnownNever(fcPosInf);
  }

  /// Return true if it's known this can never be -infinity.
  bool isKnownNeverNegInfinity() const {
    return isKnownNever(fcNegInf);
  }

  /// Return true if it's known this can never be a subnormal
  bool isKnownNeverSubnormal() const {
    return isKnownNever(fcSubnormal);
  }

  /// Return true if it's known this can never be a positive subnormal
  bool isKnownNeverPosSubnormal() const {
    return isKnownNever(fcPosSubnormal);
  }

  /// Return true if it's known this can never be a negative subnormal
  bool isKnownNeverNegSubnormal() const {
    return isKnownNever(fcNegSubnormal);
  }

  /// Return true if it's known this can never be a zero. This means a literal
  /// [+-]0, and does not include denormal inputs implicitly treated as [+-]0.
  bool isKnownNeverZero() const {
    return isKnownNever(fcZero);
  }

  /// Return true if it's known this can never be a literal positive zero.
  bool isKnownNeverPosZero() const {
    return isKnownNever(fcPosZero);
  }

  /// Return true if it's known this can never be a negative zero. This means a
  /// literal -0 and does not include denormal inputs implicitly treated as -0.
  bool isKnownNeverNegZero() const {
    return isKnownNever(fcNegZero);
  }

  /// Return true if it's know this can never be interpreted as a zero. This
  /// extends isKnownNeverZero to cover the case where the assumed
  /// floating-point mode for the function interprets denormals as zero.
  bool isKnownNeverLogicalZero(const Function &F, Type *Ty) const;

  /// Return true if it's know this can never be interpreted as a negative zero.
  bool isKnownNeverLogicalNegZero(const Function &F, Type *Ty) const;

  /// Return true if it's know this can never be interpreted as a positive zero.
  bool isKnownNeverLogicalPosZero(const Function &F, Type *Ty) const;

  static constexpr FPClassTest OrderedLessThanZeroMask =
      fcNegSubnormal | fcNegNormal | fcNegInf;
  static constexpr FPClassTest OrderedGreaterThanZeroMask =
      fcPosSubnormal | fcPosNormal | fcPosInf;

  /// Return true if we can prove that the analyzed floating-point value is
  /// either NaN or never less than -0.0.
  ///
  ///      NaN --> true
  ///       +0 --> true
  ///       -0 --> true
  ///   x > +0 --> true
  ///   x < -0 --> false
  bool cannotBeOrderedLessThanZero() const {
    return isKnownNever(OrderedLessThanZeroMask);
  }

  /// Return true if we can prove that the analyzed floating-point value is
  /// either NaN or never greater than -0.0.
  ///      NaN --> true
  ///       +0 --> true
  ///       -0 --> true
  ///   x > +0 --> false
  ///   x < -0 --> true
  bool cannotBeOrderedGreaterThanZero() const {
    return isKnownNever(OrderedGreaterThanZeroMask);
  }

  KnownFPClass &operator|=(const KnownFPClass &RHS) {
    KnownFPClasses = KnownFPClasses | RHS.KnownFPClasses;

    if (SignBit != RHS.SignBit)
      SignBit = std::nullopt;
    return *this;
  }

  void knownNot(FPClassTest RuleOut) {
    KnownFPClasses = KnownFPClasses & ~RuleOut;
  }

  void fneg() {
    KnownFPClasses = llvm::fneg(KnownFPClasses);
    if (SignBit)
      SignBit = !*SignBit;
  }

  void fabs() {
    if (KnownFPClasses & fcNegZero)
      KnownFPClasses |= fcPosZero;

    if (KnownFPClasses & fcNegInf)
      KnownFPClasses |= fcPosInf;

    if (KnownFPClasses & fcNegSubnormal)
      KnownFPClasses |= fcPosSubnormal;

    if (KnownFPClasses & fcNegNormal)
      KnownFPClasses |= fcPosNormal;

    signBitMustBeZero();
  }

  /// Return true if the sign bit must be 0, ignoring the sign of nans.
  bool signBitIsZeroOrNaN() const {
    return isKnownNever(fcNegative);
  }

  /// Assume the sign bit is zero.
  void signBitMustBeZero() {
    KnownFPClasses &= (fcPositive | fcNan);
    SignBit = false;
  }

  void copysign(const KnownFPClass &Sign) {
    // Don't know anything about the sign of the source. Expand the possible set
    // to its opposite sign pair.
    if (KnownFPClasses & fcZero)
      KnownFPClasses |= fcZero;
    if (KnownFPClasses & fcSubnormal)
      KnownFPClasses |= fcSubnormal;
    if (KnownFPClasses & fcNormal)
      KnownFPClasses |= fcNormal;
    if (KnownFPClasses & fcInf)
      KnownFPClasses |= fcInf;

    // Sign bit is exactly preserved even for nans.
    SignBit = Sign.SignBit;

    // Clear sign bits based on the input sign mask.
    if (Sign.isKnownNever(fcPositive | fcNan) || (SignBit && *SignBit))
      KnownFPClasses &= (fcNegative | fcNan);
    if (Sign.isKnownNever(fcNegative | fcNan) || (SignBit && !*SignBit))
      KnownFPClasses &= (fcPositive | fcNan);
  }

  // Propagate knowledge that a non-NaN source implies the result can also not
  // be a NaN. For unconstrained operations, signaling nans are not guaranteed
  // to be quieted but cannot be introduced.
  void propagateNaN(const KnownFPClass &Src, bool PreserveSign = false) {
    if (Src.isKnownNever(fcNan)) {
      knownNot(fcNan);
      if (PreserveSign)
        SignBit = Src.SignBit;
    } else if (Src.isKnownNever(fcSNan))
      knownNot(fcSNan);
  }

  /// Propagate knowledge from a source value that could be a denormal or
  /// zero. We have to be conservative since output flushing is not guaranteed,
  /// so known-never-zero may not hold.
  ///
  /// This assumes a copy-like operation and will replace any currently known
  /// information.
  void propagateDenormal(const KnownFPClass &Src, const Function &F, Type *Ty);

  /// Report known classes if \p Src is evaluated through a potentially
  /// canonicalizing operation. We can assume signaling nans will not be
  /// introduced, but cannot assume a denormal will be flushed under FTZ/DAZ.
  ///
  /// This assumes a copy-like operation and will replace any currently known
  /// information.
  void propagateCanonicalizingSrc(const KnownFPClass &Src, const Function &F,
                                  Type *Ty);

  void resetAll() { *this = KnownFPClass(); }
};

inline KnownFPClass operator|(KnownFPClass LHS, const KnownFPClass &RHS) {
  LHS |= RHS;
  return LHS;
}

inline KnownFPClass operator|(const KnownFPClass &LHS, KnownFPClass &&RHS) {
  RHS |= LHS;
  return std::move(RHS);
}

/// Determine which floating-point classes are valid for \p V, and return them
/// in KnownFPClass bit sets.
///
/// This function is defined on values with floating-point type, values vectors
/// of floating-point type, and arrays of floating-point type.

/// \p InterestedClasses is a compile time optimization hint for which floating
/// point classes should be queried. Queries not specified in \p
/// InterestedClasses should be reliable if they are determined during the
/// query.
KnownFPClass computeKnownFPClass(
    const Value *V, const APInt &DemandedElts, const DataLayout &DL,
    FPClassTest InterestedClasses = fcAllFlags, unsigned Depth = 0,
    const TargetLibraryInfo *TLI = nullptr, AssumptionCache *AC = nullptr,
    const Instruction *CxtI = nullptr, const DominatorTree *DT = nullptr,
    bool UseInstrInfo = true);

KnownFPClass computeKnownFPClass(
    const Value *V, const DataLayout &DL,
    FPClassTest InterestedClasses = fcAllFlags, unsigned Depth = 0,
    const TargetLibraryInfo *TLI = nullptr, AssumptionCache *AC = nullptr,
    const Instruction *CxtI = nullptr, const DominatorTree *DT = nullptr,
    bool UseInstrInfo = true);

/// Return true if we can prove that the specified FP value is never equal to
/// -0.0. Users should use caution when considering PreserveSign
/// denormal-fp-math.
inline bool cannotBeNegativeZero(const Value *V, const DataLayout &DL,
                                 const TargetLibraryInfo *TLI = nullptr,
                                 unsigned Depth = 0,
                                 AssumptionCache *AC = nullptr,
                                 const Instruction *CtxI = nullptr,
                                 const DominatorTree *DT = nullptr,
                                 bool UseInstrInfo = true) {
  KnownFPClass Known = computeKnownFPClass(V, DL, fcNegZero, Depth, TLI, AC,
                                           CtxI, DT, UseInstrInfo);
  return Known.isKnownNeverNegZero();
}

bool CannotBeOrderedLessThanZero(const Value *V, const DataLayout &DL,
                                 const TargetLibraryInfo *TLI);

/// Return true if we can prove that the specified FP value is either NaN or
/// never less than -0.0.
///
///      NaN --> true
///       +0 --> true
///       -0 --> true
///   x > +0 --> true
///   x < -0 --> false
inline bool cannotBeOrderedLessThanZero(const Value *V, const DataLayout &DL,
                                        const TargetLibraryInfo *TLI = nullptr,
                                        unsigned Depth = 0,
                                        AssumptionCache *AC = nullptr,
                                        const Instruction *CtxI = nullptr,
                                        const DominatorTree *DT = nullptr,
                                        bool UseInstrInfo = true) {
  KnownFPClass Known =
      computeKnownFPClass(V, DL, KnownFPClass::OrderedLessThanZeroMask, Depth,
                          TLI, AC, CtxI, DT, UseInstrInfo);
  return Known.cannotBeOrderedLessThanZero();
}

/// Return true if the floating-point scalar value is not an infinity or if
/// the floating-point vector value has no infinities. Return false if a value
/// could ever be infinity.
inline bool isKnownNeverInfinity(const Value *V, const DataLayout &DL,
                                 const TargetLibraryInfo *TLI = nullptr,
                                 unsigned Depth = 0,
                                 AssumptionCache *AC = nullptr,
                                 const Instruction *CtxI = nullptr,
                                 const DominatorTree *DT = nullptr,
                                 bool UseInstrInfo = true) {
  KnownFPClass Known = computeKnownFPClass(V, DL, fcInf, Depth, TLI, AC, CtxI,
                                           DT, UseInstrInfo);
  return Known.isKnownNeverInfinity();
}

/// Return true if the floating-point value can never contain a NaN or infinity.
inline bool isKnownNeverInfOrNaN(
    const Value *V, const DataLayout &DL, const TargetLibraryInfo *TLI,
    unsigned Depth = 0, AssumptionCache *AC = nullptr,
    const Instruction *CtxI = nullptr, const DominatorTree *DT = nullptr,
    bool UseInstrInfo = true) {
  KnownFPClass Known = computeKnownFPClass(V, DL, fcInf | fcNan, Depth, TLI, AC,
                                           CtxI, DT, UseInstrInfo);
  return Known.isKnownNeverNaN() && Known.isKnownNeverInfinity();
}

/// Return true if the floating-point scalar value is not a NaN or if the
/// floating-point vector value has no NaN elements. Return false if a value
/// could ever be NaN.
inline bool isKnownNeverNaN(const Value *V, const DataLayout &DL,
                            const TargetLibraryInfo *TLI, unsigned Depth = 0,
                            AssumptionCache *AC = nullptr,
                            const Instruction *CtxI = nullptr,
                            const DominatorTree *DT = nullptr,
                            bool UseInstrInfo = true) {
  KnownFPClass Known = computeKnownFPClass(V, DL, fcNan, Depth, TLI, AC, CtxI,
                                           DT, UseInstrInfo);
  return Known.isKnownNeverNaN();
}

/// Return true if we can prove that the specified FP value's sign bit is 0.
///
///      NaN --> true/false (depending on the NaN's sign bit)
///       +0 --> true
///       -0 --> false
///   x > +0 --> true
///   x < -0 --> false
bool SignBitMustBeZero(const Value *V, const DataLayout &DL,
                       const TargetLibraryInfo *TLI);

/// If the specified value can be set by repeating the same byte in memory,
/// return the i8 value that it is represented with. This is true for all i8
/// values obviously, but is also true for i32 0, i32 -1, i16 0xF0F0, double
/// 0.0 etc. If the value can't be handled with a repeated byte store (e.g.
/// i16 0x1234), return null. If the value is entirely undef and padding,
/// return undef.
Value *isBytewiseValue(Value *V, const DataLayout &DL);

/// Given an aggregate and an sequence of indices, see if the scalar value
/// indexed is already around as a register, for example if it were inserted
/// directly into the aggregate.
///
/// If InsertBefore is not null, this function will duplicate (modified)
/// insertvalues when a part of a nested struct is extracted.
Value *FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
                         Instruction *InsertBefore = nullptr);

/// Analyze the specified pointer to see if it can be expressed as a base
/// pointer plus a constant offset. Return the base and offset to the caller.
///
/// This is a wrapper around Value::stripAndAccumulateConstantOffsets that
/// creates and later unpacks the required APInt.
inline Value *GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
                                               const DataLayout &DL,
                                               bool AllowNonInbounds = true) {
  APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
  Value *Base =
      Ptr->stripAndAccumulateConstantOffsets(DL, OffsetAPInt, AllowNonInbounds);

  Offset = OffsetAPInt.getSExtValue();
  return Base;
}
inline const Value *
GetPointerBaseWithConstantOffset(const Value *Ptr, int64_t &Offset,
                                 const DataLayout &DL,
                                 bool AllowNonInbounds = true) {
  return GetPointerBaseWithConstantOffset(const_cast<Value *>(Ptr), Offset, DL,
                                          AllowNonInbounds);
}

/// Returns true if the GEP is based on a pointer to a string (array of
// \p CharSize integers) and is indexing into this string.
bool isGEPBasedOnPointerToString(const GEPOperator *GEP, unsigned CharSize = 8);

/// Represents offset+length into a ConstantDataArray.
struct ConstantDataArraySlice {
  /// ConstantDataArray pointer. nullptr indicates a zeroinitializer (a valid
  /// initializer, it just doesn't fit the ConstantDataArray interface).
  const ConstantDataArray *Array;

  /// Slice starts at this Offset.
  uint64_t Offset;

  /// Length of the slice.
  uint64_t Length;

  /// Moves the Offset and adjusts Length accordingly.
  void move(uint64_t Delta) {
    assert(Delta < Length);
    Offset += Delta;
    Length -= Delta;
  }

  /// Convenience accessor for elements in the slice.
  uint64_t operator[](unsigned I) const {
    return Array == nullptr ? 0 : Array->getElementAsInteger(I + Offset);
  }
};

/// Returns true if the value \p V is a pointer into a ConstantDataArray.
/// If successful \p Slice will point to a ConstantDataArray info object
/// with an appropriate offset.
bool getConstantDataArrayInfo(const Value *V, ConstantDataArraySlice &Slice,
                              unsigned ElementSize, uint64_t Offset = 0);

/// This function computes the length of a null-terminated C string pointed to
/// by V. If successful, it returns true and returns the string in Str. If
/// unsuccessful, it returns false. This does not include the trailing null
/// character by default. If TrimAtNul is set to false, then this returns any
/// trailing null characters as well as any other characters that come after
/// it.
bool getConstantStringInfo(const Value *V, StringRef &Str,
                           bool TrimAtNul = true);

/// If we can compute the length of the string pointed to by the specified
/// pointer, return 'len+1'.  If we can't, return 0.
uint64_t GetStringLength(const Value *V, unsigned CharSize = 8);

/// This function returns call pointer argument that is considered the same by
/// aliasing rules. You CAN'T use it to replace one value with another. If
/// \p MustPreserveNullness is true, the call must preserve the nullness of
/// the pointer.
const Value *getArgumentAliasingToReturnedPointer(const CallBase *Call,
                                                  bool MustPreserveNullness);
inline Value *getArgumentAliasingToReturnedPointer(CallBase *Call,
                                                   bool MustPreserveNullness) {
  return const_cast<Value *>(getArgumentAliasingToReturnedPointer(
      const_cast<const CallBase *>(Call), MustPreserveNullness));
}

/// {launder,strip}.invariant.group returns pointer that aliases its argument,
/// and it only captures pointer by returning it.
/// These intrinsics are not marked as nocapture, because returning is
/// considered as capture. The arguments are not marked as returned neither,
/// because it would make it useless. If \p MustPreserveNullness is true,
/// the intrinsic must preserve the nullness of the pointer.
bool isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
    const CallBase *Call, bool MustPreserveNullness);

/// This method strips off any GEP address adjustments and pointer casts from
/// the specified value, returning the original object being addressed. Note
/// that the returned value has pointer type if the specified value does. If
/// the MaxLookup value is non-zero, it limits the number of instructions to
/// be stripped off.
const Value *getUnderlyingObject(const Value *V, unsigned MaxLookup = 6);
inline Value *getUnderlyingObject(Value *V, unsigned MaxLookup = 6) {
  // Force const to avoid infinite recursion.
  const Value *VConst = V;
  return const_cast<Value *>(getUnderlyingObject(VConst, MaxLookup));
}

/// This method is similar to getUnderlyingObject except that it can
/// look through phi and select instructions and return multiple objects.
///
/// If LoopInfo is passed, loop phis are further analyzed.  If a pointer
/// accesses different objects in each iteration, we don't look through the
/// phi node. E.g. consider this loop nest:
///
///   int **A;
///   for (i)
///     for (j) {
///        A[i][j] = A[i-1][j] * B[j]
///     }
///
/// This is transformed by Load-PRE to stash away A[i] for the next iteration
/// of the outer loop:
///
///   Curr = A[0];          // Prev_0
///   for (i: 1..N) {
///     Prev = Curr;        // Prev = PHI (Prev_0, Curr)
///     Curr = A[i];
///     for (j: 0..N) {
///        Curr[j] = Prev[j] * B[j]
///     }
///   }
///
/// Since A[i] and A[i-1] are independent pointers, getUnderlyingObjects
/// should not assume that Curr and Prev share the same underlying object thus
/// it shouldn't look through the phi above.
void getUnderlyingObjects(const Value *V,
                          SmallVectorImpl<const Value *> &Objects,
                          LoopInfo *LI = nullptr, unsigned MaxLookup = 6);

/// This is a wrapper around getUnderlyingObjects and adds support for basic
/// ptrtoint+arithmetic+inttoptr sequences.
bool getUnderlyingObjectsForCodeGen(const Value *V,
                                    SmallVectorImpl<Value *> &Objects);

/// Returns unique alloca where the value comes from, or nullptr.
/// If OffsetZero is true check that V points to the begining of the alloca.
AllocaInst *findAllocaForValue(Value *V, bool OffsetZero = false);
inline const AllocaInst *findAllocaForValue(const Value *V,
                                            bool OffsetZero = false) {
  return findAllocaForValue(const_cast<Value *>(V), OffsetZero);
}

/// Return true if the only users of this pointer are lifetime markers.
bool onlyUsedByLifetimeMarkers(const Value *V);

/// Return true if the only users of this pointer are lifetime markers or
/// droppable instructions.
bool onlyUsedByLifetimeMarkersOrDroppableInsts(const Value *V);

/// Return true if speculation of the given load must be suppressed to avoid
/// ordering or interfering with an active sanitizer.  If not suppressed,
/// dereferenceability and alignment must be proven separately.  Note: This
/// is only needed for raw reasoning; if you use the interface below
/// (isSafeToSpeculativelyExecute), this is handled internally.
bool mustSuppressSpeculation(const LoadInst &LI);

/// Return true if the instruction does not have any effects besides
/// calculating the result and does not have undefined behavior.
///
/// This method never returns true for an instruction that returns true for
/// mayHaveSideEffects; however, this method also does some other checks in
/// addition. It checks for undefined behavior, like dividing by zero or
/// loading from an invalid pointer (but not for undefined results, like a
/// shift with a shift amount larger than the width of the result). It checks
/// for malloc and alloca because speculatively executing them might cause a
/// memory leak. It also returns false for instructions related to control
/// flow, specifically terminators and PHI nodes.
///
/// If the CtxI is specified this method performs context-sensitive analysis
/// and returns true if it is safe to execute the instruction immediately
/// before the CtxI.
///
/// If the CtxI is NOT specified this method only looks at the instruction
/// itself and its operands, so if this method returns true, it is safe to
/// move the instruction as long as the correct dominance relationships for
/// the operands and users hold.
///
/// This method can return true for instructions that read memory;
/// for such instructions, moving them may change the resulting value.
bool isSafeToSpeculativelyExecute(const Instruction *I,
                                  const Instruction *CtxI = nullptr,
                                  AssumptionCache *AC = nullptr,
                                  const DominatorTree *DT = nullptr,
                                  const TargetLibraryInfo *TLI = nullptr);

/// This returns the same result as isSafeToSpeculativelyExecute if Opcode is
/// the actual opcode of Inst. If the provided and actual opcode differ, the
/// function (virtually) overrides the opcode of Inst with the provided
/// Opcode. There are come constraints in this case:
/// * If Opcode has a fixed number of operands (eg, as binary operators do),
///   then Inst has to have at least as many leading operands. The function
///   will ignore all trailing operands beyond that number.
/// * If Opcode allows for an arbitrary number of operands (eg, as CallInsts
///   do), then all operands are considered.
/// * The virtual instruction has to satisfy all typing rules of the provided
///   Opcode.
/// * This function is pessimistic in the following sense: If one actually
///   materialized the virtual instruction, then isSafeToSpeculativelyExecute
///   may say that the materialized instruction is speculatable whereas this
///   function may have said that the instruction wouldn't be speculatable.
///   This behavior is a shortcoming in the current implementation and not
///   intentional.
bool isSafeToSpeculativelyExecuteWithOpcode(
    unsigned Opcode, const Instruction *Inst, const Instruction *CtxI = nullptr,
    AssumptionCache *AC = nullptr, const DominatorTree *DT = nullptr,
    const TargetLibraryInfo *TLI = nullptr);

/// Returns true if the result or effects of the given instructions \p I
/// depend values not reachable through the def use graph.
/// * Memory dependence arises for example if the instruction reads from
///   memory or may produce effects or undefined behaviour. Memory dependent
///   instructions generally cannot be reorderd with respect to other memory
///   dependent instructions.
/// * Control dependence arises for example if the instruction may fault
///   if lifted above a throwing call or infinite loop.
bool mayHaveNonDefUseDependency(const Instruction &I);

/// Return true if it is an intrinsic that cannot be speculated but also
/// cannot trap.
bool isAssumeLikeIntrinsic(const Instruction *I);

/// Return true if it is valid to use the assumptions provided by an
/// assume intrinsic, I, at the point in the control-flow identified by the
/// context instruction, CxtI.
bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI,
                             const DominatorTree *DT = nullptr);

enum class OverflowResult {
  /// Always overflows in the direction of signed/unsigned min value.
  AlwaysOverflowsLow,
  /// Always overflows in the direction of signed/unsigned max value.
  AlwaysOverflowsHigh,
  /// May or may not overflow.
  MayOverflow,
  /// Never overflows.
  NeverOverflows,
};

OverflowResult computeOverflowForUnsignedMul(const Value *LHS, const Value *RHS,
                                             const DataLayout &DL,
                                             AssumptionCache *AC,
                                             const Instruction *CxtI,
                                             const DominatorTree *DT,
                                             bool UseInstrInfo = true);
OverflowResult computeOverflowForSignedMul(const Value *LHS, const Value *RHS,
                                           const DataLayout &DL,
                                           AssumptionCache *AC,
                                           const Instruction *CxtI,
                                           const DominatorTree *DT,
                                           bool UseInstrInfo = true);
OverflowResult computeOverflowForUnsignedAdd(const Value *LHS, const Value *RHS,
                                             const DataLayout &DL,
                                             AssumptionCache *AC,
                                             const Instruction *CxtI,
                                             const DominatorTree *DT,
                                             bool UseInstrInfo = true);
OverflowResult computeOverflowForSignedAdd(const Value *LHS, const Value *RHS,
                                           const DataLayout &DL,
                                           AssumptionCache *AC = nullptr,
                                           const Instruction *CxtI = nullptr,
                                           const DominatorTree *DT = nullptr);
/// This version also leverages the sign bit of Add if known.
OverflowResult computeOverflowForSignedAdd(const AddOperator *Add,
                                           const DataLayout &DL,
                                           AssumptionCache *AC = nullptr,
                                           const Instruction *CxtI = nullptr,
                                           const DominatorTree *DT = nullptr);
OverflowResult computeOverflowForUnsignedSub(const Value *LHS, const Value *RHS,
                                             const DataLayout &DL,
                                             AssumptionCache *AC,
                                             const Instruction *CxtI,
                                             const DominatorTree *DT);
OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS,
                                           const DataLayout &DL,
                                           AssumptionCache *AC,
                                           const Instruction *CxtI,
                                           const DominatorTree *DT);

/// Returns true if the arithmetic part of the \p WO 's result is
/// used only along the paths control dependent on the computation
/// not overflowing, \p WO being an <op>.with.overflow intrinsic.
bool isOverflowIntrinsicNoWrap(const WithOverflowInst *WO,
                               const DominatorTree &DT);

/// Determine the possible constant range of vscale with the given bit width,
/// based on the vscale_range function attribute.
ConstantRange getVScaleRange(const Function *F, unsigned BitWidth);

/// Determine the possible constant range of an integer or vector of integer
/// value. This is intended as a cheap, non-recursive check.
ConstantRange computeConstantRange(const Value *V, bool ForSigned,
                                   bool UseInstrInfo = true,
                                   AssumptionCache *AC = nullptr,
                                   const Instruction *CtxI = nullptr,
                                   const DominatorTree *DT = nullptr,
                                   unsigned Depth = 0);

/// Return true if this function can prove that the instruction I will
/// always transfer execution to one of its successors (including the next
/// instruction that follows within a basic block). E.g. this is not
/// guaranteed for function calls that could loop infinitely.
///
/// In other words, this function returns false for instructions that may
/// transfer execution or fail to transfer execution in a way that is not
/// captured in the CFG nor in the sequence of instructions within a basic
/// block.
///
/// Undefined behavior is assumed not to happen, so e.g. division is
/// guaranteed to transfer execution to the following instruction even
/// though division by zero might cause undefined behavior.
bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I);

/// Returns true if this block does not contain a potential implicit exit.
/// This is equivelent to saying that all instructions within the basic block
/// are guaranteed to transfer execution to their successor within the basic
/// block. This has the same assumptions w.r.t. undefined behavior as the
/// instruction variant of this function.
bool isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB);

/// Return true if every instruction in the range (Begin, End) is
/// guaranteed to transfer execution to its static successor. \p ScanLimit
/// bounds the search to avoid scanning huge blocks.
bool isGuaranteedToTransferExecutionToSuccessor(
    BasicBlock::const_iterator Begin, BasicBlock::const_iterator End,
    unsigned ScanLimit = 32);

/// Same as previous, but with range expressed via iterator_range.
bool isGuaranteedToTransferExecutionToSuccessor(
    iterator_range<BasicBlock::const_iterator> Range, unsigned ScanLimit = 32);

/// Return true if this function can prove that the instruction I
/// is executed for every iteration of the loop L.
///
/// Note that this currently only considers the loop header.
bool isGuaranteedToExecuteForEveryIteration(const Instruction *I,
                                            const Loop *L);

/// Return true if \p PoisonOp's user yields poison or raises UB if its
/// operand \p PoisonOp is poison.
///
/// If \p PoisonOp is a vector or an aggregate and the operation's result is a
/// single value, any poison element in /p PoisonOp should make the result
/// poison or raise UB.
///
/// To filter out operands that raise UB on poison, you can use
/// getGuaranteedNonPoisonOp.
bool propagatesPoison(const Use &PoisonOp);

/// Insert operands of I into Ops such that I will trigger undefined behavior
/// if I is executed and that operand has a poison value.
void getGuaranteedNonPoisonOps(const Instruction *I,
                               SmallVectorImpl<const Value *> &Ops);

/// Insert operands of I into Ops such that I will trigger undefined behavior
/// if I is executed and that operand is not a well-defined value
/// (i.e. has undef bits or poison).
void getGuaranteedWellDefinedOps(const Instruction *I,
                                 SmallVectorImpl<const Value *> &Ops);

/// Return true if the given instruction must trigger undefined behavior
/// when I is executed with any operands which appear in KnownPoison holding
/// a poison value at the point of execution.
bool mustTriggerUB(const Instruction *I,
                   const SmallPtrSetImpl<const Value *> &KnownPoison);

/// Return true if this function can prove that if Inst is executed
/// and yields a poison value or undef bits, then that will trigger
/// undefined behavior.
///
/// Note that this currently only considers the basic block that is
/// the parent of Inst.
bool programUndefinedIfUndefOrPoison(const Instruction *Inst);
bool programUndefinedIfPoison(const Instruction *Inst);

/// canCreateUndefOrPoison returns true if Op can create undef or poison from
/// non-undef & non-poison operands.
/// For vectors, canCreateUndefOrPoison returns true if there is potential
/// poison or undef in any element of the result when vectors without
/// undef/poison poison are given as operands.
/// For example, given `Op = shl <2 x i32> %x, <0, 32>`, this function returns
/// true. If Op raises immediate UB but never creates poison or undef
/// (e.g. sdiv I, 0), canCreatePoison returns false.
///
/// \p ConsiderFlagsAndMetadata controls whether poison producing flags and
/// metadata on the instruction are considered.  This can be used to see if the
/// instruction could still introduce undef or poison even without poison
/// generating flags and metadata which might be on the instruction.
/// (i.e. could the result of Op->dropPoisonGeneratingFlags() still create
/// poison or undef)
///
/// canCreatePoison returns true if Op can create poison from non-poison
/// operands.
bool canCreateUndefOrPoison(const Operator *Op,
                            bool ConsiderFlagsAndMetadata = true);
bool canCreatePoison(const Operator *Op, bool ConsiderFlagsAndMetadata = true);

/// Return true if V is poison given that ValAssumedPoison is already poison.
/// For example, if ValAssumedPoison is `icmp X, 10` and V is `icmp X, 5`,
/// impliesPoison returns true.
bool impliesPoison(const Value *ValAssumedPoison, const Value *V);

/// Return true if this function can prove that V does not have undef bits
/// and is never poison. If V is an aggregate value or vector, check whether
/// all elements (except padding) are not undef or poison.
/// Note that this is different from canCreateUndefOrPoison because the
/// function assumes Op's operands are not poison/undef.
///
/// If CtxI and DT are specified this method performs flow-sensitive analysis
/// and returns true if it is guaranteed to be never undef or poison
/// immediately before the CtxI.
bool isGuaranteedNotToBeUndefOrPoison(const Value *V,
                                      AssumptionCache *AC = nullptr,
                                      const Instruction *CtxI = nullptr,
                                      const DominatorTree *DT = nullptr,
                                      unsigned Depth = 0);
bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC = nullptr,
                               const Instruction *CtxI = nullptr,
                               const DominatorTree *DT = nullptr,
                               unsigned Depth = 0);

/// Return true if undefined behavior would provable be executed on the path to
/// OnPathTo if Root produced a posion result.  Note that this doesn't say
/// anything about whether OnPathTo is actually executed or whether Root is
/// actually poison.  This can be used to assess whether a new use of Root can
/// be added at a location which is control equivalent with OnPathTo (such as
/// immediately before it) without introducing UB which didn't previously
/// exist.  Note that a false result conveys no information.
bool mustExecuteUBIfPoisonOnPathTo(Instruction *Root,
                                   Instruction *OnPathTo,
                                   DominatorTree *DT);

/// Specific patterns of select instructions we can match.
enum SelectPatternFlavor {
  SPF_UNKNOWN = 0,
  SPF_SMIN,    /// Signed minimum
  SPF_UMIN,    /// Unsigned minimum
  SPF_SMAX,    /// Signed maximum
  SPF_UMAX,    /// Unsigned maximum
  SPF_FMINNUM, /// Floating point minnum
  SPF_FMAXNUM, /// Floating point maxnum
  SPF_ABS,     /// Absolute value
  SPF_NABS     /// Negated absolute value
};

/// Behavior when a floating point min/max is given one NaN and one
/// non-NaN as input.
enum SelectPatternNaNBehavior {
  SPNB_NA = 0,        /// NaN behavior not applicable.
  SPNB_RETURNS_NAN,   /// Given one NaN input, returns the NaN.
  SPNB_RETURNS_OTHER, /// Given one NaN input, returns the non-NaN.
  SPNB_RETURNS_ANY    /// Given one NaN input, can return either (or
                      /// it has been determined that no operands can
                      /// be NaN).
};

struct SelectPatternResult {
  SelectPatternFlavor Flavor;
  SelectPatternNaNBehavior NaNBehavior; /// Only applicable if Flavor is
                                        /// SPF_FMINNUM or SPF_FMAXNUM.
  bool Ordered; /// When implementing this min/max pattern as
                /// fcmp; select, does the fcmp have to be
                /// ordered?

  /// Return true if \p SPF is a min or a max pattern.
  static bool isMinOrMax(SelectPatternFlavor SPF) {
    return SPF != SPF_UNKNOWN && SPF != SPF_ABS && SPF != SPF_NABS;
  }
};

/// Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind
/// and providing the out parameter results if we successfully match.
///
/// For ABS/NABS, LHS will be set to the input to the abs idiom. RHS will be
/// the negation instruction from the idiom.
///
/// If CastOp is not nullptr, also match MIN/MAX idioms where the type does
/// not match that of the original select. If this is the case, the cast
/// operation (one of Trunc,SExt,Zext) that must be done to transform the
/// type of LHS and RHS into the type of V is returned in CastOp.
///
/// For example:
///   %1 = icmp slt i32 %a, i32 4
///   %2 = sext i32 %a to i64
///   %3 = select i1 %1, i64 %2, i64 4
///
/// -> LHS = %a, RHS = i32 4, *CastOp = Instruction::SExt
///
SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
                                       Instruction::CastOps *CastOp = nullptr,
                                       unsigned Depth = 0);

inline SelectPatternResult matchSelectPattern(const Value *V, const Value *&LHS,
                                              const Value *&RHS) {
  Value *L = const_cast<Value *>(LHS);
  Value *R = const_cast<Value *>(RHS);
  auto Result = matchSelectPattern(const_cast<Value *>(V), L, R);
  LHS = L;
  RHS = R;
  return Result;
}

/// Determine the pattern that a select with the given compare as its
/// predicate and given values as its true/false operands would match.
SelectPatternResult matchDecomposedSelectPattern(
    CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS,
    Instruction::CastOps *CastOp = nullptr, unsigned Depth = 0);

/// Return the canonical comparison predicate for the specified
/// minimum/maximum flavor.
CmpInst::Predicate getMinMaxPred(SelectPatternFlavor SPF, bool Ordered = false);

/// Return the inverse minimum/maximum flavor of the specified flavor.
/// For example, signed minimum is the inverse of signed maximum.
SelectPatternFlavor getInverseMinMaxFlavor(SelectPatternFlavor SPF);

Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID);

/// Return the minimum or maximum constant value for the specified integer
/// min/max flavor and type.
APInt getMinMaxLimit(SelectPatternFlavor SPF, unsigned BitWidth);

/// Check if the values in \p VL are select instructions that can be converted
/// to a min or max (vector) intrinsic. Returns the intrinsic ID, if such a
/// conversion is possible, together with a bool indicating whether all select
/// conditions are only used by the selects. Otherwise return
/// Intrinsic::not_intrinsic.
std::pair<Intrinsic::ID, bool>
canConvertToMinOrMaxIntrinsic(ArrayRef<Value *> VL);

/// Attempt to match a simple first order recurrence cycle of the form:
///   %iv = phi Ty [%Start, %Entry], [%Inc, %backedge]
///   %inc = binop %iv, %step
/// OR
///   %iv = phi Ty [%Start, %Entry], [%Inc, %backedge]
///   %inc = binop %step, %iv
///
/// A first order recurrence is a formula with the form: X_n = f(X_(n-1))
///
/// A couple of notes on subtleties in that definition:
/// * The Step does not have to be loop invariant.  In math terms, it can
///   be a free variable.  We allow recurrences with both constant and
///   variable coefficients. Callers may wish to filter cases where Step
///   does not dominate P.
/// * For non-commutative operators, we will match both forms.  This
///   results in some odd recurrence structures.  Callers may wish to filter
///   out recurrences where the phi is not the LHS of the returned operator.
/// * Because of the structure matched, the caller can assume as a post
///   condition of the match the presence of a Loop with P's parent as it's
///   header *except* in unreachable code.  (Dominance decays in unreachable
///   code.)
///
/// NOTE: This is intentional simple.  If you want the ability to analyze
/// non-trivial loop conditons, see ScalarEvolution instead.
bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start,
                           Value *&Step);

/// Analogous to the above, but starting from the binary operator
bool matchSimpleRecurrence(const BinaryOperator *I, PHINode *&P, Value *&Start,
                           Value *&Step);

/// Return true if RHS is known to be implied true by LHS.  Return false if
/// RHS is known to be implied false by LHS.  Otherwise, return std::nullopt if
/// no implication can be made. A & B must be i1 (boolean) values or a vector of
/// such values. Note that the truth table for implication is the same as <=u on
/// i1 values (but not
/// <=s!).  The truth table for both is:
///    | T | F (B)
///  T | T | F
///  F | T | T
/// (A)
std::optional<bool> isImpliedCondition(const Value *LHS, const Value *RHS,
                                       const DataLayout &DL,
                                       bool LHSIsTrue = true,
                                       unsigned Depth = 0);
std::optional<bool> isImpliedCondition(const Value *LHS,
                                       CmpInst::Predicate RHSPred,
                                       const Value *RHSOp0, const Value *RHSOp1,
                                       const DataLayout &DL,
                                       bool LHSIsTrue = true,
                                       unsigned Depth = 0);

/// Return the boolean condition value in the context of the given instruction
/// if it is known based on dominating conditions.
std::optional<bool> isImpliedByDomCondition(const Value *Cond,
                                            const Instruction *ContextI,
                                            const DataLayout &DL);
std::optional<bool> isImpliedByDomCondition(CmpInst::Predicate Pred,
                                            const Value *LHS, const Value *RHS,
                                            const Instruction *ContextI,
                                            const DataLayout &DL);
} // end namespace llvm

#endif // LLVM_ANALYSIS_VALUETRACKING_H
PKiwFZ��D!Analysis/InteractiveModelRunner.hnu�[���//===- InteractiveModelRunner.h ---- "gym" ML model runner  -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//

#ifndef LLVM_ANALYSIS_INTERACTIVEMODELRUNNER_H
#define LLVM_ANALYSIS_INTERACTIVEMODELRUNNER_H

#include "llvm/Analysis/MLModelRunner.h"
#include "llvm/Analysis/TensorSpec.h"
#include "llvm/Analysis/Utils/TrainingLogger.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/raw_ostream.h"
#include <system_error>

namespace llvm {

/// A MLModelRunner that asks for advice from an external agent, or host. It
/// uses 2 files - ideally named pipes - one to send data to that agent, and
/// one to receive advice.
/// The data exchange uses the training logger (Utils/TrainingLogger.h) format.
/// Specifically, the compiler will send the log header, set the context, and
/// send observations; the host is expected to reply with a tensor value after
/// each observation as a binary buffer that's conforming to the shape of the
/// advice. Interleaved, the data closely resembles the training log for a
/// log where we don't capture the reward signal.
///
/// Note that the correctness of the received data is the responsibility of the
/// host. In particular, if insufficient data were sent, the compiler will block
/// when waiting for an advice.
///
/// Note that the host can either open the pipes RW, or open first the pipe to
/// the compiler - i.e. the "Inbound" - and then the "Outbound", to avoid
/// deadlock. This is because the compiler first tries to open the inbound
/// (which will hang until there's a writer on the other end).
class InteractiveModelRunner : public MLModelRunner {
public:
  InteractiveModelRunner(LLVMContext &Ctx,
                         const std::vector<TensorSpec> &Inputs,
                         const TensorSpec &Advice, StringRef OutboundName,
                         StringRef InboundName);

  static bool classof(const MLModelRunner *R) {
    return R->getKind() == MLModelRunner::Kind::Interactive;
  }
  void switchContext(StringRef Name) override {
    Log->switchContext(Name);
    Log->flush();
  }

  virtual ~InteractiveModelRunner();

private:
  void *evaluateUntyped() override;
  // This must be declared before InEC if we want to initialize it in the
  // ctor initializer list.
  int Inbound = -1;
  const std::vector<TensorSpec> InputSpecs;
  const TensorSpec OutputSpec;
  std::error_code OutEC;
  std::error_code InEC;
  std::vector<char> OutputBuffer;
  std::unique_ptr<Logger> Log;
};
} // namespace llvm
#endif // LLVM_ANALYSIS_INTERACTIVEMODELRUNNER_H
PKiwFZ��T
�
�"Analysis/TargetTransformInfoImpl.hnu�[���//===- TargetTransformInfoImpl.h --------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// This file provides helpers for the implementation of
/// a TargetTransformInfo-conforming class.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_TARGETTRANSFORMINFOIMPL_H
#define LLVM_ANALYSIS_TARGETTRANSFORMINFOIMPL_H

#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Analysis/VectorUtils.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/PatternMatch.h"
#include <optional>
#include <utility>

namespace llvm {

class Function;

/// Base class for use as a mix-in that aids implementing
/// a TargetTransformInfo-compatible class.
class TargetTransformInfoImplBase {
protected:
  typedef TargetTransformInfo TTI;

  const DataLayout &DL;

  explicit TargetTransformInfoImplBase(const DataLayout &DL) : DL(DL) {}

public:
  // Provide value semantics. MSVC requires that we spell all of these out.
  TargetTransformInfoImplBase(const TargetTransformInfoImplBase &Arg) = default;
  TargetTransformInfoImplBase(TargetTransformInfoImplBase &&Arg) : DL(Arg.DL) {}

  const DataLayout &getDataLayout() const { return DL; }

  InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr,
                             ArrayRef<const Value *> Operands, Type *AccessType,
                             TTI::TargetCostKind CostKind) const {
    // In the basic model, we just assume that all-constant GEPs will be folded
    // into their uses via addressing modes.
    for (const Value *Operand : Operands)
      if (!isa<Constant>(Operand))
        return TTI::TCC_Basic;

    return TTI::TCC_Free;
  }

  unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI,
                                            unsigned &JTSize,
                                            ProfileSummaryInfo *PSI,
                                            BlockFrequencyInfo *BFI) const {
    (void)PSI;
    (void)BFI;
    JTSize = 0;
    return SI.getNumCases();
  }

  unsigned getInliningThresholdMultiplier() const { return 1; }
  unsigned adjustInliningThreshold(const CallBase *CB) const { return 0; }
  unsigned getCallerAllocaCost(const CallBase *CB, const AllocaInst *AI) const {
    return 0;
  };

  int getInlinerVectorBonusPercent() const { return 150; }

  InstructionCost getMemcpyCost(const Instruction *I) const {
    return TTI::TCC_Expensive;
  }

  uint64_t getMaxMemIntrinsicInlineSizeThreshold() const {
    return 64;
  }

  // Although this default value is arbitrary, it is not random. It is assumed
  // that a condition that evaluates the same way by a higher percentage than
  // this is best represented as control flow. Therefore, the default value N
  // should be set such that the win from N% correct executions is greater than
  // the loss from (100 - N)% mispredicted executions for the majority of
  //  intended targets.
  BranchProbability getPredictableBranchThreshold() const {
    return BranchProbability(99, 100);
  }

  bool hasBranchDivergence(const Function *F = nullptr) const { return false; }

  bool isSourceOfDivergence(const Value *V) const { return false; }

  bool isAlwaysUniform(const Value *V) const { return false; }

  bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const {
    return false;
  }

  bool addrspacesMayAlias(unsigned AS0, unsigned AS1) const {
    return true;
  }

  unsigned getFlatAddressSpace() const { return -1; }

  bool collectFlatAddressOperands(SmallVectorImpl<int> &OpIndexes,
                                  Intrinsic::ID IID) const {
    return false;
  }

  bool isNoopAddrSpaceCast(unsigned, unsigned) const { return false; }
  bool canHaveNonUndefGlobalInitializerInAddressSpace(unsigned AS) const {
    return AS == 0;
  };

  unsigned getAssumedAddrSpace(const Value *V) const { return -1; }

  bool isSingleThreaded() const { return false; }

  std::pair<const Value *, unsigned>
  getPredicatedAddrSpace(const Value *V) const {
    return std::make_pair(nullptr, -1);
  }

  Value *rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV,
                                          Value *NewV) const {
    return nullptr;
  }

  bool isLoweredToCall(const Function *F) const {
    assert(F && "A concrete function must be provided to this routine.");

    // FIXME: These should almost certainly not be handled here, and instead
    // handled with the help of TLI or the target itself. This was largely
    // ported from existing analysis heuristics here so that such refactorings
    // can take place in the future.

    if (F->isIntrinsic())
      return false;

    if (F->hasLocalLinkage() || !F->hasName())
      return true;

    StringRef Name = F->getName();

    // These will all likely lower to a single selection DAG node.
    if (Name == "copysign" || Name == "copysignf" || Name == "copysignl" ||
        Name == "fabs" || Name == "fabsf" || Name == "fabsl" || Name == "sin" ||
        Name == "fmin" || Name == "fminf" || Name == "fminl" ||
        Name == "fmax" || Name == "fmaxf" || Name == "fmaxl" ||
        Name == "sinf" || Name == "sinl" || Name == "cos" || Name == "cosf" ||
        Name == "cosl" || Name == "sqrt" || Name == "sqrtf" || Name == "sqrtl")
      return false;

    // These are all likely to be optimized into something smaller.
    if (Name == "pow" || Name == "powf" || Name == "powl" || Name == "exp2" ||
        Name == "exp2l" || Name == "exp2f" || Name == "floor" ||
        Name == "floorf" || Name == "ceil" || Name == "round" ||
        Name == "ffs" || Name == "ffsl" || Name == "abs" || Name == "labs" ||
        Name == "llabs")
      return false;

    return true;
  }

  bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
                                AssumptionCache &AC, TargetLibraryInfo *LibInfo,
                                HardwareLoopInfo &HWLoopInfo) const {
    return false;
  }

  bool preferPredicateOverEpilogue(TailFoldingInfo *TFI) const { return false; }

  TailFoldingStyle
  getPreferredTailFoldingStyle(bool IVUpdateMayOverflow = true) const {
    return TailFoldingStyle::DataWithoutLaneMask;
  }

  std::optional<Instruction *> instCombineIntrinsic(InstCombiner &IC,
                                                    IntrinsicInst &II) const {
    return std::nullopt;
  }

  std::optional<Value *>
  simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, IntrinsicInst &II,
                                   APInt DemandedMask, KnownBits &Known,
                                   bool &KnownBitsComputed) const {
    return std::nullopt;
  }

  std::optional<Value *> simplifyDemandedVectorEltsIntrinsic(
      InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
      APInt &UndefElts2, APInt &UndefElts3,
      std::function<void(Instruction *, unsigned, APInt, APInt &)>
          SimplifyAndSetOp) const {
    return std::nullopt;
  }

  void getUnrollingPreferences(Loop *, ScalarEvolution &,
                               TTI::UnrollingPreferences &,
                               OptimizationRemarkEmitter *) const {}

  void getPeelingPreferences(Loop *, ScalarEvolution &,
                             TTI::PeelingPreferences &) const {}

  bool isLegalAddImmediate(int64_t Imm) const { return false; }

  bool isLegalICmpImmediate(int64_t Imm) const { return false; }

  bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
                             bool HasBaseReg, int64_t Scale, unsigned AddrSpace,
                             Instruction *I = nullptr) const {
    // Guess that only reg and reg+reg addressing is allowed. This heuristic is
    // taken from the implementation of LSR.
    return !BaseGV && BaseOffset == 0 && (Scale == 0 || Scale == 1);
  }

  bool isLSRCostLess(const TTI::LSRCost &C1, const TTI::LSRCost &C2) const {
    return std::tie(C1.NumRegs, C1.AddRecCost, C1.NumIVMuls, C1.NumBaseAdds,
                    C1.ScaleCost, C1.ImmCost, C1.SetupCost) <
           std::tie(C2.NumRegs, C2.AddRecCost, C2.NumIVMuls, C2.NumBaseAdds,
                    C2.ScaleCost, C2.ImmCost, C2.SetupCost);
  }

  bool isNumRegsMajorCostOfLSR() const { return true; }

  bool isProfitableLSRChainElement(Instruction *I) const { return false; }

  bool canMacroFuseCmp() const { return false; }

  bool canSaveCmp(Loop *L, BranchInst **BI, ScalarEvolution *SE, LoopInfo *LI,
                  DominatorTree *DT, AssumptionCache *AC,
                  TargetLibraryInfo *LibInfo) const {
    return false;
  }

  TTI::AddressingModeKind
    getPreferredAddressingMode(const Loop *L, ScalarEvolution *SE) const {
    return TTI::AMK_None;
  }

  bool isLegalMaskedStore(Type *DataType, Align Alignment) const {
    return false;
  }

  bool isLegalMaskedLoad(Type *DataType, Align Alignment) const {
    return false;
  }

  bool isLegalNTStore(Type *DataType, Align Alignment) const {
    // By default, assume nontemporal memory stores are available for stores
    // that are aligned and have a size that is a power of 2.
    unsigned DataSize = DL.getTypeStoreSize(DataType);
    return Alignment >= DataSize && isPowerOf2_32(DataSize);
  }

  bool isLegalNTLoad(Type *DataType, Align Alignment) const {
    // By default, assume nontemporal memory loads are available for loads that
    // are aligned and have a size that is a power of 2.
    unsigned DataSize = DL.getTypeStoreSize(DataType);
    return Alignment >= DataSize && isPowerOf2_32(DataSize);
  }

  bool isLegalBroadcastLoad(Type *ElementTy, ElementCount NumElements) const {
    return false;
  }

  bool isLegalMaskedScatter(Type *DataType, Align Alignment) const {
    return false;
  }

  bool isLegalMaskedGather(Type *DataType, Align Alignment) const {
    return false;
  }

  bool forceScalarizeMaskedGather(VectorType *DataType, Align Alignment) const {
    return false;
  }

  bool forceScalarizeMaskedScatter(VectorType *DataType,
                                   Align Alignment) const {
    return false;
  }

  bool isLegalMaskedCompressStore(Type *DataType) const { return false; }

  bool isLegalAltInstr(VectorType *VecTy, unsigned Opcode0, unsigned Opcode1,
                       const SmallBitVector &OpcodeMask) const {
    return false;
  }

  bool isLegalMaskedExpandLoad(Type *DataType) const { return false; }

  bool enableOrderedReductions() const { return false; }

  bool hasDivRemOp(Type *DataType, bool IsSigned) const { return false; }

  bool hasVolatileVariant(Instruction *I, unsigned AddrSpace) const {
    return false;
  }

  bool prefersVectorizedAddressing() const { return true; }

  InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
                                       int64_t BaseOffset, bool HasBaseReg,
                                       int64_t Scale,
                                       unsigned AddrSpace) const {
    // Guess that all legal addressing mode are free.
    if (isLegalAddressingMode(Ty, BaseGV, BaseOffset, HasBaseReg, Scale,
                              AddrSpace))
      return 0;
    return -1;
  }

  bool LSRWithInstrQueries() const { return false; }

  bool isTruncateFree(Type *Ty1, Type *Ty2) const { return false; }

  bool isProfitableToHoist(Instruction *I) const { return true; }

  bool useAA() const { return false; }

  bool isTypeLegal(Type *Ty) const { return false; }

  unsigned getRegUsageForType(Type *Ty) const { return 1; }

  bool shouldBuildLookupTables() const { return true; }

  bool shouldBuildLookupTablesForConstant(Constant *C) const { return true; }

  bool shouldBuildRelLookupTables() const { return false; }

  bool useColdCCForColdCall(Function &F) const { return false; }

  InstructionCost getScalarizationOverhead(VectorType *Ty,
                                           const APInt &DemandedElts,
                                           bool Insert, bool Extract,
                                           TTI::TargetCostKind CostKind) const {
    return 0;
  }

  InstructionCost
  getOperandsScalarizationOverhead(ArrayRef<const Value *> Args,
                                   ArrayRef<Type *> Tys,
                                   TTI::TargetCostKind CostKind) const {
    return 0;
  }

  bool supportsEfficientVectorElementLoadStore() const { return false; }

  bool supportsTailCalls() const { return true; }

  bool supportsTailCallFor(const CallBase *CB) const {
    return supportsTailCalls();
  }

  bool enableAggressiveInterleaving(bool LoopHasReductions) const {
    return false;
  }

  TTI::MemCmpExpansionOptions enableMemCmpExpansion(bool OptSize,
                                                    bool IsZeroCmp) const {
    return {};
  }

  bool enableSelectOptimize() const { return true; }

  bool enableInterleavedAccessVectorization() const { return false; }

  bool enableMaskedInterleavedAccessVectorization() const { return false; }

  bool isFPVectorizationPotentiallyUnsafe() const { return false; }

  bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth,
                                      unsigned AddressSpace, Align Alignment,
                                      unsigned *Fast) const {
    return false;
  }

  TTI::PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) const {
    return TTI::PSK_Software;
  }

  bool haveFastSqrt(Type *Ty) const { return false; }

  bool isExpensiveToSpeculativelyExecute(const Instruction *I) { return true; }

  bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) const { return true; }

  InstructionCost getFPOpCost(Type *Ty) const {
    return TargetTransformInfo::TCC_Basic;
  }

  InstructionCost getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx,
                                        const APInt &Imm, Type *Ty) const {
    return 0;
  }

  InstructionCost getIntImmCost(const APInt &Imm, Type *Ty,
                                TTI::TargetCostKind CostKind) const {
    return TTI::TCC_Basic;
  }

  InstructionCost getIntImmCostInst(unsigned Opcode, unsigned Idx,
                                    const APInt &Imm, Type *Ty,
                                    TTI::TargetCostKind CostKind,
                                    Instruction *Inst = nullptr) const {
    return TTI::TCC_Free;
  }

  InstructionCost getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
                                      const APInt &Imm, Type *Ty,
                                      TTI::TargetCostKind CostKind) const {
    return TTI::TCC_Free;
  }

  unsigned getNumberOfRegisters(unsigned ClassID) const { return 8; }

  unsigned getRegisterClassForType(bool Vector, Type *Ty = nullptr) const {
    return Vector ? 1 : 0;
  };

  const char *getRegisterClassName(unsigned ClassID) const {
    switch (ClassID) {
    default:
      return "Generic::Unknown Register Class";
    case 0:
      return "Generic::ScalarRC";
    case 1:
      return "Generic::VectorRC";
    }
  }

  TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const {
    return TypeSize::getFixed(32);
  }

  unsigned getMinVectorRegisterBitWidth() const { return 128; }

  std::optional<unsigned> getMaxVScale() const { return std::nullopt; }
  std::optional<unsigned> getVScaleForTuning() const { return std::nullopt; }
  bool isVScaleKnownToBeAPowerOfTwo() const { return false; }

  bool
  shouldMaximizeVectorBandwidth(TargetTransformInfo::RegisterKind K) const {
    return false;
  }

  ElementCount getMinimumVF(unsigned ElemWidth, bool IsScalable) const {
    return ElementCount::get(0, IsScalable);
  }

  unsigned getMaximumVF(unsigned ElemWidth, unsigned Opcode) const { return 0; }
  unsigned getStoreMinimumVF(unsigned VF, Type *, Type *) const { return VF; }

  bool shouldConsiderAddressTypePromotion(
      const Instruction &I, bool &AllowPromotionWithoutCommonHeader) const {
    AllowPromotionWithoutCommonHeader = false;
    return false;
  }

  unsigned getCacheLineSize() const { return 0; }
  std::optional<unsigned>
  getCacheSize(TargetTransformInfo::CacheLevel Level) const {
    switch (Level) {
    case TargetTransformInfo::CacheLevel::L1D:
      [[fallthrough]];
    case TargetTransformInfo::CacheLevel::L2D:
      return std::nullopt;
    }
    llvm_unreachable("Unknown TargetTransformInfo::CacheLevel");
  }

  std::optional<unsigned>
  getCacheAssociativity(TargetTransformInfo::CacheLevel Level) const {
    switch (Level) {
    case TargetTransformInfo::CacheLevel::L1D:
      [[fallthrough]];
    case TargetTransformInfo::CacheLevel::L2D:
      return std::nullopt;
    }

    llvm_unreachable("Unknown TargetTransformInfo::CacheLevel");
  }

  unsigned getPrefetchDistance() const { return 0; }
  unsigned getMinPrefetchStride(unsigned NumMemAccesses,
                                unsigned NumStridedMemAccesses,
                                unsigned NumPrefetches, bool HasCall) const {
    return 1;
  }
  unsigned getMaxPrefetchIterationsAhead() const { return UINT_MAX; }
  bool enableWritePrefetching() const { return false; }
  bool shouldPrefetchAddressSpace(unsigned AS) const { return !AS; }

  unsigned getMaxInterleaveFactor(ElementCount VF) const { return 1; }

  InstructionCost getArithmeticInstrCost(
      unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
      TTI::OperandValueInfo Opd1Info, TTI::OperandValueInfo Opd2Info,
      ArrayRef<const Value *> Args,
      const Instruction *CxtI = nullptr) const {
    // Widenable conditions will eventually lower into constants, so some
    // operations with them will be trivially optimized away.
    auto IsWidenableCondition = [](const Value *V) {
      if (auto *II = dyn_cast<IntrinsicInst>(V))
        if (II->getIntrinsicID() == Intrinsic::experimental_widenable_condition)
          return true;
      return false;
    };
    // FIXME: A number of transformation tests seem to require these values
    // which seems a little odd for how arbitary there are.
    switch (Opcode) {
    default:
      break;
    case Instruction::FDiv:
    case Instruction::FRem:
    case Instruction::SDiv:
    case Instruction::SRem:
    case Instruction::UDiv:
    case Instruction::URem:
      // FIXME: Unlikely to be true for CodeSize.
      return TTI::TCC_Expensive;
    case Instruction::And:
    case Instruction::Or:
      if (any_of(Args, IsWidenableCondition))
        return TTI::TCC_Free;
      break;
    }

    // Assume a 3cy latency for fp arithmetic ops.
    if (CostKind == TTI::TCK_Latency)
      if (Ty->getScalarType()->isFloatingPointTy())
        return 3;

    return 1;
  }

  InstructionCost
  getShuffleCost(TTI::ShuffleKind Kind, VectorType *Ty, ArrayRef<int> Mask,
                 TTI::TargetCostKind CostKind, int Index, VectorType *SubTp,
                 ArrayRef<const Value *> Args = std::nullopt) const {
    return 1;
  }

  InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
                                   TTI::CastContextHint CCH,
                                   TTI::TargetCostKind CostKind,
                                   const Instruction *I) const {
    switch (Opcode) {
    default:
      break;
    case Instruction::IntToPtr: {
      unsigned SrcSize = Src->getScalarSizeInBits();
      if (DL.isLegalInteger(SrcSize) &&
          SrcSize <= DL.getPointerTypeSizeInBits(Dst))
        return 0;
      break;
    }
    case Instruction::PtrToInt: {
      unsigned DstSize = Dst->getScalarSizeInBits();
      if (DL.isLegalInteger(DstSize) &&
          DstSize >= DL.getPointerTypeSizeInBits(Src))
        return 0;
      break;
    }
    case Instruction::BitCast:
      if (Dst == Src || (Dst->isPointerTy() && Src->isPointerTy()))
        // Identity and pointer-to-pointer casts are free.
        return 0;
      break;
    case Instruction::Trunc: {
      // trunc to a native type is free (assuming the target has compare and
      // shift-right of the same width).
      TypeSize DstSize = DL.getTypeSizeInBits(Dst);
      if (!DstSize.isScalable() && DL.isLegalInteger(DstSize.getFixedValue()))
        return 0;
      break;
    }
    }
    return 1;
  }

  InstructionCost getExtractWithExtendCost(unsigned Opcode, Type *Dst,
                                           VectorType *VecTy,
                                           unsigned Index) const {
    return 1;
  }

  InstructionCost getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind,
                                 const Instruction *I = nullptr) const {
    // A phi would be free, unless we're costing the throughput because it
    // will require a register.
    if (Opcode == Instruction::PHI && CostKind != TTI::TCK_RecipThroughput)
      return 0;
    return 1;
  }

  InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
                                     CmpInst::Predicate VecPred,
                                     TTI::TargetCostKind CostKind,
                                     const Instruction *I) const {
    return 1;
  }

  InstructionCost getVectorInstrCost(unsigned Opcode, Type *Val,
                                     TTI::TargetCostKind CostKind,
                                     unsigned Index, Value *Op0,
                                     Value *Op1) const {
    return 1;
  }

  InstructionCost getVectorInstrCost(const Instruction &I, Type *Val,
                                     TTI::TargetCostKind CostKind,
                                     unsigned Index) const {
    return 1;
  }

  unsigned getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, int VF,
                                     const APInt &DemandedDstElts,
                                     TTI::TargetCostKind CostKind) {
    return 1;
  }

  InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment,
                                  unsigned AddressSpace,
                                  TTI::TargetCostKind CostKind,
                                  TTI::OperandValueInfo OpInfo,
                                  const Instruction *I) const {
    return 1;
  }

  InstructionCost getVPMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment,
                                    unsigned AddressSpace,
                                    TTI::TargetCostKind CostKind,
                                    const Instruction *I) const {
    return 1;
  }

  InstructionCost getMaskedMemoryOpCost(unsigned Opcode, Type *Src,
                                        Align Alignment, unsigned AddressSpace,
                                        TTI::TargetCostKind CostKind) const {
    return 1;
  }

  InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy,
                                         const Value *Ptr, bool VariableMask,
                                         Align Alignment,
                                         TTI::TargetCostKind CostKind,
                                         const Instruction *I = nullptr) const {
    return 1;
  }

  unsigned getInterleavedMemoryOpCost(
      unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
      Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
      bool UseMaskForCond, bool UseMaskForGaps) const {
    return 1;
  }

  InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
                                        TTI::TargetCostKind CostKind) const {
    switch (ICA.getID()) {
    default:
      break;
    case Intrinsic::annotation:
    case Intrinsic::assume:
    case Intrinsic::sideeffect:
    case Intrinsic::pseudoprobe:
    case Intrinsic::arithmetic_fence:
    case Intrinsic::dbg_assign:
    case Intrinsic::dbg_declare:
    case Intrinsic::dbg_value:
    case Intrinsic::dbg_label:
    case Intrinsic::invariant_start:
    case Intrinsic::invariant_end:
    case Intrinsic::launder_invariant_group:
    case Intrinsic::strip_invariant_group:
    case Intrinsic::is_constant:
    case Intrinsic::lifetime_start:
    case Intrinsic::lifetime_end:
    case Intrinsic::experimental_noalias_scope_decl:
    case Intrinsic::objectsize:
    case Intrinsic::ptr_annotation:
    case Intrinsic::var_annotation:
    case Intrinsic::experimental_gc_result:
    case Intrinsic::experimental_gc_relocate:
    case Intrinsic::coro_alloc:
    case Intrinsic::coro_begin:
    case Intrinsic::coro_free:
    case Intrinsic::coro_end:
    case Intrinsic::coro_frame:
    case Intrinsic::coro_size:
    case Intrinsic::coro_align:
    case Intrinsic::coro_suspend:
    case Intrinsic::coro_subfn_addr:
    case Intrinsic::threadlocal_address:
    case Intrinsic::experimental_widenable_condition:
      // These intrinsics don't actually represent code after lowering.
      return 0;
    }
    return 1;
  }

  InstructionCost getCallInstrCost(Function *F, Type *RetTy,
                                   ArrayRef<Type *> Tys,
                                   TTI::TargetCostKind CostKind) const {
    return 1;
  }

  // Assume that we have a register of the right size for the type.
  unsigned getNumberOfParts(Type *Tp) const { return 1; }

  InstructionCost getAddressComputationCost(Type *Tp, ScalarEvolution *,
                                            const SCEV *) const {
    return 0;
  }

  InstructionCost getArithmeticReductionCost(unsigned, VectorType *,
                                             std::optional<FastMathFlags> FMF,
                                             TTI::TargetCostKind) const {
    return 1;
  }

  InstructionCost getMinMaxReductionCost(Intrinsic::ID IID, VectorType *,
                                         FastMathFlags,
                                         TTI::TargetCostKind) const {
    return 1;
  }

  InstructionCost getExtendedReductionCost(unsigned Opcode, bool IsUnsigned,
                                           Type *ResTy, VectorType *Ty,
                                           FastMathFlags FMF,
                                           TTI::TargetCostKind CostKind) const {
    return 1;
  }

  InstructionCost getMulAccReductionCost(bool IsUnsigned, Type *ResTy,
                                         VectorType *Ty,
                                         TTI::TargetCostKind CostKind) const {
    return 1;
  }

  InstructionCost getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) const {
    return 0;
  }

  bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) const {
    return false;
  }

  unsigned getAtomicMemIntrinsicMaxElementSize() const {
    // Note for overrides: You must ensure for all element unordered-atomic
    // memory intrinsics that all power-of-2 element sizes up to, and
    // including, the return value of this method have a corresponding
    // runtime lib call. These runtime lib call definitions can be found
    // in RuntimeLibcalls.h
    return 0;
  }

  Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
                                           Type *ExpectedType) const {
    return nullptr;
  }

  Type *
  getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length,
                            unsigned SrcAddrSpace, unsigned DestAddrSpace,
                            unsigned SrcAlign, unsigned DestAlign,
                            std::optional<uint32_t> AtomicElementSize) const {
    return AtomicElementSize ? Type::getIntNTy(Context, *AtomicElementSize * 8)
                             : Type::getInt8Ty(Context);
  }

  void getMemcpyLoopResidualLoweringType(
      SmallVectorImpl<Type *> &OpsOut, LLVMContext &Context,
      unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace,
      unsigned SrcAlign, unsigned DestAlign,
      std::optional<uint32_t> AtomicCpySize) const {
    unsigned OpSizeInBytes = AtomicCpySize ? *AtomicCpySize : 1;
    Type *OpType = Type::getIntNTy(Context, OpSizeInBytes * 8);
    for (unsigned i = 0; i != RemainingBytes; i += OpSizeInBytes)
      OpsOut.push_back(OpType);
  }

  bool areInlineCompatible(const Function *Caller,
                           const Function *Callee) const {
    return (Caller->getFnAttribute("target-cpu") ==
            Callee->getFnAttribute("target-cpu")) &&
           (Caller->getFnAttribute("target-features") ==
            Callee->getFnAttribute("target-features"));
  }

  bool areTypesABICompatible(const Function *Caller, const Function *Callee,
                             const ArrayRef<Type *> &Types) const {
    return (Caller->getFnAttribute("target-cpu") ==
            Callee->getFnAttribute("target-cpu")) &&
           (Caller->getFnAttribute("target-features") ==
            Callee->getFnAttribute("target-features"));
  }

  bool isIndexedLoadLegal(TTI::MemIndexedMode Mode, Type *Ty,
                          const DataLayout &DL) const {
    return false;
  }

  bool isIndexedStoreLegal(TTI::MemIndexedMode Mode, Type *Ty,
                           const DataLayout &DL) const {
    return false;
  }

  unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const { return 128; }

  bool isLegalToVectorizeLoad(LoadInst *LI) const { return true; }

  bool isLegalToVectorizeStore(StoreInst *SI) const { return true; }

  bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes, Align Alignment,
                                   unsigned AddrSpace) const {
    return true;
  }

  bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes, Align Alignment,
                                    unsigned AddrSpace) const {
    return true;
  }

  bool isLegalToVectorizeReduction(const RecurrenceDescriptor &RdxDesc,
                                   ElementCount VF) const {
    return true;
  }

  bool isElementTypeLegalForScalableVector(Type *Ty) const { return true; }

  unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize,
                               unsigned ChainSizeInBytes,
                               VectorType *VecTy) const {
    return VF;
  }

  unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize,
                                unsigned ChainSizeInBytes,
                                VectorType *VecTy) const {
    return VF;
  }

  bool preferInLoopReduction(unsigned Opcode, Type *Ty,
                             TTI::ReductionFlags Flags) const {
    return false;
  }

  bool preferPredicatedReductionSelect(unsigned Opcode, Type *Ty,
                                       TTI::ReductionFlags Flags) const {
    return false;
  }

  bool preferEpilogueVectorization() const {
    return true;
  }

  bool shouldExpandReduction(const IntrinsicInst *II) const { return true; }

  unsigned getGISelRematGlobalCost() const { return 1; }

  unsigned getMinTripCountTailFoldingThreshold() const { return 0; }

  bool supportsScalableVectors() const { return false; }

  bool enableScalableVectorization() const { return false; }

  bool hasActiveVectorLength(unsigned Opcode, Type *DataType,
                             Align Alignment) const {
    return false;
  }

  TargetTransformInfo::VPLegalization
  getVPLegalizationStrategy(const VPIntrinsic &PI) const {
    return TargetTransformInfo::VPLegalization(
        /* EVLParamStrategy */ TargetTransformInfo::VPLegalization::Discard,
        /* OperatorStrategy */ TargetTransformInfo::VPLegalization::Convert);
  }

  bool hasArmWideBranch(bool) const { return false; }

  unsigned getMaxNumArgs() const { return UINT_MAX; }

protected:
  // Obtain the minimum required size to hold the value (without the sign)
  // In case of a vector it returns the min required size for one element.
  unsigned minRequiredElementSize(const Value *Val, bool &isSigned) const {
    if (isa<ConstantDataVector>(Val) || isa<ConstantVector>(Val)) {
      const auto *VectorValue = cast<Constant>(Val);

      // In case of a vector need to pick the max between the min
      // required size for each element
      auto *VT = cast<FixedVectorType>(Val->getType());

      // Assume unsigned elements
      isSigned = false;

      // The max required size is the size of the vector element type
      unsigned MaxRequiredSize =
          VT->getElementType()->getPrimitiveSizeInBits().getFixedValue();

      unsigned MinRequiredSize = 0;
      for (unsigned i = 0, e = VT->getNumElements(); i < e; ++i) {
        if (auto *IntElement =
                dyn_cast<ConstantInt>(VectorValue->getAggregateElement(i))) {
          bool signedElement = IntElement->getValue().isNegative();
          // Get the element min required size.
          unsigned ElementMinRequiredSize =
              IntElement->getValue().getSignificantBits() - 1;
          // In case one element is signed then all the vector is signed.
          isSigned |= signedElement;
          // Save the max required bit size between all the elements.
          MinRequiredSize = std::max(MinRequiredSize, ElementMinRequiredSize);
        } else {
          // not an int constant element
          return MaxRequiredSize;
        }
      }
      return MinRequiredSize;
    }

    if (const auto *CI = dyn_cast<ConstantInt>(Val)) {
      isSigned = CI->getValue().isNegative();
      return CI->getValue().getSignificantBits() - 1;
    }

    if (const auto *Cast = dyn_cast<SExtInst>(Val)) {
      isSigned = true;
      return Cast->getSrcTy()->getScalarSizeInBits() - 1;
    }

    if (const auto *Cast = dyn_cast<ZExtInst>(Val)) {
      isSigned = false;
      return Cast->getSrcTy()->getScalarSizeInBits();
    }

    isSigned = false;
    return Val->getType()->getScalarSizeInBits();
  }

  bool isStridedAccess(const SCEV *Ptr) const {
    return Ptr && isa<SCEVAddRecExpr>(Ptr);
  }

  const SCEVConstant *getConstantStrideStep(ScalarEvolution *SE,
                                            const SCEV *Ptr) const {
    if (!isStridedAccess(Ptr))
      return nullptr;
    const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ptr);
    return dyn_cast<SCEVConstant>(AddRec->getStepRecurrence(*SE));
  }

  bool isConstantStridedAccessLessThan(ScalarEvolution *SE, const SCEV *Ptr,
                                       int64_t MergeDistance) const {
    const SCEVConstant *Step = getConstantStrideStep(SE, Ptr);
    if (!Step)
      return false;
    APInt StrideVal = Step->getAPInt();
    if (StrideVal.getBitWidth() > 64)
      return false;
    // FIXME: Need to take absolute value for negative stride case.
    return StrideVal.getSExtValue() < MergeDistance;
  }
};

/// CRTP base class for use as a mix-in that aids implementing
/// a TargetTransformInfo-compatible class.
template <typename T>
class TargetTransformInfoImplCRTPBase : public TargetTransformInfoImplBase {
private:
  typedef TargetTransformInfoImplBase BaseT;

protected:
  explicit TargetTransformInfoImplCRTPBase(const DataLayout &DL) : BaseT(DL) {}

public:
  using BaseT::getGEPCost;

  InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr,
                             ArrayRef<const Value *> Operands, Type *AccessType,
                             TTI::TargetCostKind CostKind) {
    assert(PointeeType && Ptr && "can't get GEPCost of nullptr");
    auto *BaseGV = dyn_cast<GlobalValue>(Ptr->stripPointerCasts());
    bool HasBaseReg = (BaseGV == nullptr);

    auto PtrSizeBits = DL.getPointerTypeSizeInBits(Ptr->getType());
    APInt BaseOffset(PtrSizeBits, 0);
    int64_t Scale = 0;

    auto GTI = gep_type_begin(PointeeType, Operands);
    Type *TargetType = nullptr;

    // Handle the case where the GEP instruction has a single operand,
    // the basis, therefore TargetType is a nullptr.
    if (Operands.empty())
      return !BaseGV ? TTI::TCC_Free : TTI::TCC_Basic;

    for (auto I = Operands.begin(); I != Operands.end(); ++I, ++GTI) {
      TargetType = GTI.getIndexedType();
      // We assume that the cost of Scalar GEP with constant index and the
      // cost of Vector GEP with splat constant index are the same.
      const ConstantInt *ConstIdx = dyn_cast<ConstantInt>(*I);
      if (!ConstIdx)
        if (auto Splat = getSplatValue(*I))
          ConstIdx = dyn_cast<ConstantInt>(Splat);
      if (StructType *STy = GTI.getStructTypeOrNull()) {
        // For structures the index is always splat or scalar constant
        assert(ConstIdx && "Unexpected GEP index");
        uint64_t Field = ConstIdx->getZExtValue();
        BaseOffset += DL.getStructLayout(STy)->getElementOffset(Field);
      } else {
        // If this operand is a scalable type, bail out early.
        // TODO: handle scalable vectors
        if (isa<ScalableVectorType>(TargetType))
          return TTI::TCC_Basic;
        int64_t ElementSize =
            DL.getTypeAllocSize(GTI.getIndexedType()).getFixedValue();
        if (ConstIdx) {
          BaseOffset +=
              ConstIdx->getValue().sextOrTrunc(PtrSizeBits) * ElementSize;
        } else {
          // Needs scale register.
          if (Scale != 0)
            // No addressing mode takes two scale registers.
            return TTI::TCC_Basic;
          Scale = ElementSize;
        }
      }
    }

    // If we haven't been provided a hint, use the target type for now.
    //
    // TODO: Take a look at potentially removing this: This is *slightly* wrong
    // as it's possible to have a GEP with a foldable target type but a memory
    // access that isn't foldable. For example, this load isn't foldable on
    // RISC-V:
    //
    // %p = getelementptr i32, ptr %base, i32 42
    // %x = load <2 x i32>, ptr %p
    if (!AccessType)
      AccessType = TargetType;

    // If the final address of the GEP is a legal addressing mode for the given
    // access type, then we can fold it into its users.
    if (static_cast<T *>(this)->isLegalAddressingMode(
            AccessType, const_cast<GlobalValue *>(BaseGV),
            BaseOffset.sextOrTrunc(64).getSExtValue(), HasBaseReg, Scale,
            Ptr->getType()->getPointerAddressSpace()))
      return TTI::TCC_Free;

    // TODO: Instead of returning TCC_Basic here, we should use
    // getArithmeticInstrCost. Or better yet, provide a hook to let the target
    // model it.
    return TTI::TCC_Basic;
  }

  InstructionCost getPointersChainCost(ArrayRef<const Value *> Ptrs,
                                       const Value *Base,
                                       const TTI::PointersChainInfo &Info,
                                       Type *AccessTy,
                                       TTI::TargetCostKind CostKind) {
    InstructionCost Cost = TTI::TCC_Free;
    // In the basic model we take into account GEP instructions only
    // (although here can come alloca instruction, a value, constants and/or
    // constant expressions, PHIs, bitcasts ... whatever allowed to be used as a
    // pointer). Typically, if Base is a not a GEP-instruction and all the
    // pointers are relative to the same base address, all the rest are
    // either GEP instructions, PHIs, bitcasts or constants. When we have same
    // base, we just calculate cost of each non-Base GEP as an ADD operation if
    // any their index is a non-const.
    // If no known dependecies between the pointers cost is calculated as a sum
    // of costs of GEP instructions.
    for (const Value *V : Ptrs) {
      const auto *GEP = dyn_cast<GetElementPtrInst>(V);
      if (!GEP)
        continue;
      if (Info.isSameBase() && V != Base) {
        if (GEP->hasAllConstantIndices())
          continue;
        Cost += static_cast<T *>(this)->getArithmeticInstrCost(
            Instruction::Add, GEP->getType(), CostKind,
            {TTI::OK_AnyValue, TTI::OP_None}, {TTI::OK_AnyValue, TTI::OP_None},
            std::nullopt);
      } else {
        SmallVector<const Value *> Indices(GEP->indices());
        Cost += static_cast<T *>(this)->getGEPCost(GEP->getSourceElementType(),
                                                   GEP->getPointerOperand(),
                                                   Indices, AccessTy, CostKind);
      }
    }
    return Cost;
  }

  InstructionCost getInstructionCost(const User *U,
                                     ArrayRef<const Value *> Operands,
                                     TTI::TargetCostKind CostKind) {
    using namespace llvm::PatternMatch;

    auto *TargetTTI = static_cast<T *>(this);
    // Handle non-intrinsic calls, invokes, and callbr.
    // FIXME: Unlikely to be true for anything but CodeSize.
    auto *CB = dyn_cast<CallBase>(U);
    if (CB && !isa<IntrinsicInst>(U)) {
      if (const Function *F = CB->getCalledFunction()) {
        if (!TargetTTI->isLoweredToCall(F))
          return TTI::TCC_Basic; // Give a basic cost if it will be lowered

        return TTI::TCC_Basic * (F->getFunctionType()->getNumParams() + 1);
      }
      // For indirect or other calls, scale cost by number of arguments.
      return TTI::TCC_Basic * (CB->arg_size() + 1);
    }

    Type *Ty = U->getType();
    unsigned Opcode = Operator::getOpcode(U);
    auto *I = dyn_cast<Instruction>(U);
    switch (Opcode) {
    default:
      break;
    case Instruction::Call: {
      assert(isa<IntrinsicInst>(U) && "Unexpected non-intrinsic call");
      auto *Intrinsic = cast<IntrinsicInst>(U);
      IntrinsicCostAttributes CostAttrs(Intrinsic->getIntrinsicID(), *CB);
      return TargetTTI->getIntrinsicInstrCost(CostAttrs, CostKind);
    }
    case Instruction::Br:
    case Instruction::Ret:
    case Instruction::PHI:
    case Instruction::Switch:
      return TargetTTI->getCFInstrCost(Opcode, CostKind, I);
    case Instruction::ExtractValue:
    case Instruction::Freeze:
      return TTI::TCC_Free;
    case Instruction::Alloca:
      if (cast<AllocaInst>(U)->isStaticAlloca())
        return TTI::TCC_Free;
      break;
    case Instruction::GetElementPtr: {
      const auto *GEP = cast<GEPOperator>(U);
      Type *AccessType = nullptr;
      // For now, only provide the AccessType in the simple case where the GEP
      // only has one user.
      if (GEP->hasOneUser() && I)
        AccessType = I->user_back()->getAccessType();

      return TargetTTI->getGEPCost(GEP->getSourceElementType(),
                                   Operands.front(), Operands.drop_front(),
                                   AccessType, CostKind);
    }
    case Instruction::Add:
    case Instruction::FAdd:
    case Instruction::Sub:
    case Instruction::FSub:
    case Instruction::Mul:
    case Instruction::FMul:
    case Instruction::UDiv:
    case Instruction::SDiv:
    case Instruction::FDiv:
    case Instruction::URem:
    case Instruction::SRem:
    case Instruction::FRem:
    case Instruction::Shl:
    case Instruction::LShr:
    case Instruction::AShr:
    case Instruction::And:
    case Instruction::Or:
    case Instruction::Xor:
    case Instruction::FNeg: {
      const TTI::OperandValueInfo Op1Info = TTI::getOperandInfo(Operands[0]);
      TTI::OperandValueInfo Op2Info;
      if (Opcode != Instruction::FNeg)
        Op2Info = TTI::getOperandInfo(Operands[1]);
      return TargetTTI->getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
                                               Op2Info, Operands, I);
    }
    case Instruction::IntToPtr:
    case Instruction::PtrToInt:
    case Instruction::SIToFP:
    case Instruction::UIToFP:
    case Instruction::FPToUI:
    case Instruction::FPToSI:
    case Instruction::Trunc:
    case Instruction::FPTrunc:
    case Instruction::BitCast:
    case Instruction::FPExt:
    case Instruction::SExt:
    case Instruction::ZExt:
    case Instruction::AddrSpaceCast: {
      Type *OpTy = Operands[0]->getType();
      return TargetTTI->getCastInstrCost(
          Opcode, Ty, OpTy, TTI::getCastContextHint(I), CostKind, I);
    }
    case Instruction::Store: {
      auto *SI = cast<StoreInst>(U);
      Type *ValTy = Operands[0]->getType();
      TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(Operands[0]);
      return TargetTTI->getMemoryOpCost(Opcode, ValTy, SI->getAlign(),
                                        SI->getPointerAddressSpace(), CostKind,
                                        OpInfo, I);
    }
    case Instruction::Load: {
      // FIXME: Arbitary cost which could come from the backend.
      if (CostKind == TTI::TCK_Latency)
        return 4;
      auto *LI = cast<LoadInst>(U);
      Type *LoadType = U->getType();
      // If there is a non-register sized type, the cost estimation may expand
      // it to be several instructions to load into multiple registers on the
      // target.  But, if the only use of the load is a trunc instruction to a
      // register sized type, the instruction selector can combine these
      // instructions to be a single load.  So, in this case, we use the
      // destination type of the trunc instruction rather than the load to
      // accurately estimate the cost of this load instruction.
      if (CostKind == TTI::TCK_CodeSize && LI->hasOneUse() &&
          !LoadType->isVectorTy()) {
        if (const TruncInst *TI = dyn_cast<TruncInst>(*LI->user_begin()))
          LoadType = TI->getDestTy();
      }
      return TargetTTI->getMemoryOpCost(Opcode, LoadType, LI->getAlign(),
                                        LI->getPointerAddressSpace(), CostKind,
                                        {TTI::OK_AnyValue, TTI::OP_None}, I);
    }
    case Instruction::Select: {
      const Value *Op0, *Op1;
      if (match(U, m_LogicalAnd(m_Value(Op0), m_Value(Op1))) ||
          match(U, m_LogicalOr(m_Value(Op0), m_Value(Op1)))) {
        // select x, y, false --> x & y
        // select x, true, y --> x | y
        const auto Op1Info = TTI::getOperandInfo(Op0);
        const auto Op2Info = TTI::getOperandInfo(Op1);
        assert(Op0->getType()->getScalarSizeInBits() == 1 &&
               Op1->getType()->getScalarSizeInBits() == 1);

        SmallVector<const Value *, 2> Operands{Op0, Op1};
        return TargetTTI->getArithmeticInstrCost(
            match(U, m_LogicalOr()) ? Instruction::Or : Instruction::And, Ty,
            CostKind, Op1Info, Op2Info, Operands, I);
      }
      Type *CondTy = Operands[0]->getType();
      return TargetTTI->getCmpSelInstrCost(Opcode, U->getType(), CondTy,
                                           CmpInst::BAD_ICMP_PREDICATE,
                                           CostKind, I);
    }
    case Instruction::ICmp:
    case Instruction::FCmp: {
      Type *ValTy = Operands[0]->getType();
      // TODO: Also handle ICmp/FCmp constant expressions.
      return TargetTTI->getCmpSelInstrCost(Opcode, ValTy, U->getType(),
                                           I ? cast<CmpInst>(I)->getPredicate()
                                             : CmpInst::BAD_ICMP_PREDICATE,
                                           CostKind, I);
    }
    case Instruction::InsertElement: {
      auto *IE = dyn_cast<InsertElementInst>(U);
      if (!IE)
        return TTI::TCC_Basic; // FIXME
      unsigned Idx = -1;
      if (auto *CI = dyn_cast<ConstantInt>(Operands[2]))
        if (CI->getValue().getActiveBits() <= 32)
          Idx = CI->getZExtValue();
      return TargetTTI->getVectorInstrCost(*IE, Ty, CostKind, Idx);
    }
    case Instruction::ShuffleVector: {
      auto *Shuffle = dyn_cast<ShuffleVectorInst>(U);
      if (!Shuffle)
        return TTI::TCC_Basic; // FIXME

      auto *VecTy = cast<VectorType>(U->getType());
      auto *VecSrcTy = cast<VectorType>(Operands[0]->getType());
      int NumSubElts, SubIndex;

      if (Shuffle->changesLength()) {
        // Treat a 'subvector widening' as a free shuffle.
        if (Shuffle->increasesLength() && Shuffle->isIdentityWithPadding())
          return 0;

        if (Shuffle->isExtractSubvectorMask(SubIndex))
          return TargetTTI->getShuffleCost(TTI::SK_ExtractSubvector, VecSrcTy,
                                           Shuffle->getShuffleMask(), CostKind,
                                           SubIndex, VecTy, Operands);

        if (Shuffle->isInsertSubvectorMask(NumSubElts, SubIndex))
          return TargetTTI->getShuffleCost(
              TTI::SK_InsertSubvector, VecTy, Shuffle->getShuffleMask(),
              CostKind, SubIndex,
              FixedVectorType::get(VecTy->getScalarType(), NumSubElts),
              Operands);

        int ReplicationFactor, VF;
        if (Shuffle->isReplicationMask(ReplicationFactor, VF)) {
          APInt DemandedDstElts =
              APInt::getZero(Shuffle->getShuffleMask().size());
          for (auto I : enumerate(Shuffle->getShuffleMask())) {
            if (I.value() != PoisonMaskElem)
              DemandedDstElts.setBit(I.index());
          }
          return TargetTTI->getReplicationShuffleCost(
              VecSrcTy->getElementType(), ReplicationFactor, VF,
              DemandedDstElts, CostKind);
        }

        return CostKind == TTI::TCK_RecipThroughput ? -1 : 1;
      }

      if (Shuffle->isIdentity())
        return 0;

      if (Shuffle->isReverse())
        return TargetTTI->getShuffleCost(TTI::SK_Reverse, VecTy,
                                         Shuffle->getShuffleMask(), CostKind, 0,
                                         nullptr, Operands);

      if (Shuffle->isSelect())
        return TargetTTI->getShuffleCost(TTI::SK_Select, VecTy,
                                         Shuffle->getShuffleMask(), CostKind, 0,
                                         nullptr, Operands);

      if (Shuffle->isTranspose())
        return TargetTTI->getShuffleCost(TTI::SK_Transpose, VecTy,
                                         Shuffle->getShuffleMask(), CostKind, 0,
                                         nullptr, Operands);

      if (Shuffle->isZeroEltSplat())
        return TargetTTI->getShuffleCost(TTI::SK_Broadcast, VecTy,
                                         Shuffle->getShuffleMask(), CostKind, 0,
                                         nullptr, Operands);

      if (Shuffle->isSingleSource())
        return TargetTTI->getShuffleCost(TTI::SK_PermuteSingleSrc, VecTy,
                                         Shuffle->getShuffleMask(), CostKind, 0,
                                         nullptr, Operands);

      if (Shuffle->isInsertSubvectorMask(NumSubElts, SubIndex))
        return TargetTTI->getShuffleCost(
            TTI::SK_InsertSubvector, VecTy, Shuffle->getShuffleMask(), CostKind,
            SubIndex, FixedVectorType::get(VecTy->getScalarType(), NumSubElts),
            Operands);

      if (Shuffle->isSplice(SubIndex))
        return TargetTTI->getShuffleCost(TTI::SK_Splice, VecTy,
                                         Shuffle->getShuffleMask(), CostKind,
                                         SubIndex, nullptr, Operands);

      return TargetTTI->getShuffleCost(TTI::SK_PermuteTwoSrc, VecTy,
                                       Shuffle->getShuffleMask(), CostKind, 0,
                                       nullptr, Operands);
    }
    case Instruction::ExtractElement: {
      auto *EEI = dyn_cast<ExtractElementInst>(U);
      if (!EEI)
        return TTI::TCC_Basic; // FIXME
      unsigned Idx = -1;
      if (auto *CI = dyn_cast<ConstantInt>(Operands[1]))
        if (CI->getValue().getActiveBits() <= 32)
          Idx = CI->getZExtValue();
      Type *DstTy = Operands[0]->getType();
      return TargetTTI->getVectorInstrCost(*EEI, DstTy, CostKind, Idx);
    }
    }

    // By default, just classify everything as 'basic' or -1 to represent that
    // don't know the throughput cost.
    return CostKind == TTI::TCK_RecipThroughput ? -1 : TTI::TCC_Basic;
  }

  bool isExpensiveToSpeculativelyExecute(const Instruction *I) {
    auto *TargetTTI = static_cast<T *>(this);
    SmallVector<const Value *, 4> Ops(I->operand_values());
    InstructionCost Cost = TargetTTI->getInstructionCost(
        I, Ops, TargetTransformInfo::TCK_SizeAndLatency);
    return Cost >= TargetTransformInfo::TCC_Expensive;
  }
};
} // namespace llvm

#endif
PKiwFZ�̓"
"
Analysis/LoopUnrollAnalyzer.hnu�[���//===- llvm/Analysis/LoopUnrollAnalyzer.h - Loop Unroll Analyzer-*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements UnrolledInstAnalyzer class. It's used for predicting
// potential effects that loop unrolling might have, such as enabling constant
// propagation and other optimizations.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_LOOPUNROLLANALYZER_H
#define LLVM_ANALYSIS_LOOPUNROLLANALYZER_H

#include "llvm/ADT/APInt.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/IR/InstVisitor.h"

// This class is used to get an estimate of the optimization effects that we
// could get from complete loop unrolling. It comes from the fact that some
// loads might be replaced with concrete constant values and that could trigger
// a chain of instruction simplifications.
//
// E.g. we might have:
//   int a[] = {0, 1, 0};
//   v = 0;
//   for (i = 0; i < 3; i ++)
//     v += b[i]*a[i];
// If we completely unroll the loop, we would get:
//   v = b[0]*a[0] + b[1]*a[1] + b[2]*a[2]
// Which then will be simplified to:
//   v = b[0]* 0 + b[1]* 1 + b[2]* 0
// And finally:
//   v = b[1]
namespace llvm {
class Instruction;

class UnrolledInstAnalyzer : private InstVisitor<UnrolledInstAnalyzer, bool> {
  typedef InstVisitor<UnrolledInstAnalyzer, bool> Base;
  friend class InstVisitor<UnrolledInstAnalyzer, bool>;
  struct SimplifiedAddress {
    Value *Base = nullptr;
    ConstantInt *Offset = nullptr;
  };

public:
  UnrolledInstAnalyzer(unsigned Iteration,
                       DenseMap<Value *, Value *> &SimplifiedValues,
                       ScalarEvolution &SE, const Loop *L)
      : SimplifiedValues(SimplifiedValues), SE(SE), L(L) {
      IterationNumber = SE.getConstant(APInt(64, Iteration));
  }

  // Allow access to the initial visit method.
  using Base::visit;

private:
  /// A cache of pointer bases and constant-folded offsets corresponding
  /// to GEP (or derived from GEP) instructions.
  ///
  /// In order to find the base pointer one needs to perform non-trivial
  /// traversal of the corresponding SCEV expression, so it's good to have the
  /// results saved.
  DenseMap<Value *, SimplifiedAddress> SimplifiedAddresses;

  /// SCEV expression corresponding to number of currently simulated
  /// iteration.
  const SCEV *IterationNumber;

  /// While we walk the loop instructions, we build up and maintain a mapping
  /// of simplified values specific to this iteration.  The idea is to propagate
  /// any special information we have about loads that can be replaced with
  /// constants after complete unrolling, and account for likely simplifications
  /// post-unrolling.
  DenseMap<Value *, Value *> &SimplifiedValues;

  ScalarEvolution &SE;
  const Loop *L;

  bool simplifyInstWithSCEV(Instruction *I);

  bool visitInstruction(Instruction &I);
  bool visitBinaryOperator(BinaryOperator &I);
  bool visitLoad(LoadInst &I);
  bool visitCastInst(CastInst &I);
  bool visitCmpInst(CmpInst &I);
  bool visitPHINode(PHINode &PN);
};
}
#endif
PKiwFZ�{׆q
q
(Analysis/IndirectCallPromotionAnalysis.hnu�[���//===- IndirectCallPromotionAnalysis.h - Indirect call analysis -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// Interface to identify indirect call promotion candidates.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_INDIRECTCALLPROMOTIONANALYSIS_H
#define LLVM_ANALYSIS_INDIRECTCALLPROMOTIONANALYSIS_H

#include "llvm/ProfileData/InstrProf.h"

namespace llvm {

class Instruction;

// Class for identifying profitable indirect call promotion candidates when
// the indirect-call value profile metadata is available.
class ICallPromotionAnalysis {
private:
  // Allocate space to read the profile annotation.
  std::unique_ptr<InstrProfValueData[]> ValueDataArray;

  // Count is the call count for the direct-call target.
  // TotalCount is the total call count for the indirect-call callsite.
  // RemainingCount is the TotalCount minus promoted-direct-call count.
  // Return true we should promote this indirect-call target.
  bool isPromotionProfitable(uint64_t Count, uint64_t TotalCount,
                             uint64_t RemainingCount);

  // Returns the number of profitable candidates to promote for the
  // current ValueDataArray and the given \p Inst.
  uint32_t getProfitablePromotionCandidates(const Instruction *Inst,
                                            uint32_t NumVals,
                                            uint64_t TotalCount);

  // Noncopyable
  ICallPromotionAnalysis(const ICallPromotionAnalysis &other) = delete;
  ICallPromotionAnalysis &
  operator=(const ICallPromotionAnalysis &other) = delete;

public:
  ICallPromotionAnalysis();

  /// Returns reference to array of InstrProfValueData for the given
  /// instruction \p I.
  ///
  /// The \p NumVals, \p TotalCount and \p NumCandidates
  /// are set to the number of values in the array, the total profile count
  /// of the indirect call \p I, and the number of profitable candidates
  /// in the given array (which is sorted in reverse order of profitability).
  ///
  /// The returned array space is owned by this class, and overwritten on
  /// subsequent calls.
  ArrayRef<InstrProfValueData>
  getPromotionCandidatesForInstruction(const Instruction *I, uint32_t &NumVals,
                                       uint64_t &TotalCount,
                                       uint32_t &NumCandidates);
};

} // end namespace llvm

#endif
PKiwFZF����!Analysis/BlockFrequencyInfoImpl.hnu�[���//==- BlockFrequencyInfoImpl.h - Block Frequency Implementation --*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Shared implementation of BlockFrequency for IR and Machine Instructions.
// See the documentation below for BlockFrequencyInfoImpl for details.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_BLOCKFREQUENCYINFOIMPL_H
#define LLVM_ANALYSIS_BLOCKFREQUENCYINFOIMPL_H

#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/GraphTraits.h"
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/SparseBitVector.h"
#include "llvm/ADT/Twine.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/Support/BlockFrequency.h"
#include "llvm/Support/BranchProbability.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/DOTGraphTraits.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/ScaledNumber.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <deque>
#include <iterator>
#include <limits>
#include <list>
#include <optional>
#include <queue>
#include <string>
#include <utility>
#include <vector>

#define DEBUG_TYPE "block-freq"

namespace llvm {
extern llvm::cl::opt<bool> CheckBFIUnknownBlockQueries;

extern llvm::cl::opt<bool> UseIterativeBFIInference;
extern llvm::cl::opt<unsigned> IterativeBFIMaxIterationsPerBlock;
extern llvm::cl::opt<double> IterativeBFIPrecision;

class BranchProbabilityInfo;
class Function;
class Loop;
class LoopInfo;
class MachineBasicBlock;
class MachineBranchProbabilityInfo;
class MachineFunction;
class MachineLoop;
class MachineLoopInfo;

namespace bfi_detail {

struct IrreducibleGraph;

// This is part of a workaround for a GCC 4.7 crash on lambdas.
template <class BT> struct BlockEdgesAdder;

/// Mass of a block.
///
/// This class implements a sort of fixed-point fraction always between 0.0 and
/// 1.0.  getMass() == std::numeric_limits<uint64_t>::max() indicates a value of
/// 1.0.
///
/// Masses can be added and subtracted.  Simple saturation arithmetic is used,
/// so arithmetic operations never overflow or underflow.
///
/// Masses can be multiplied.  Multiplication treats full mass as 1.0 and uses
/// an inexpensive floating-point algorithm that's off-by-one (almost, but not
/// quite, maximum precision).
///
/// Masses can be scaled by \a BranchProbability at maximum precision.
class BlockMass {
  uint64_t Mass = 0;

public:
  BlockMass() = default;
  explicit BlockMass(uint64_t Mass) : Mass(Mass) {}

  static BlockMass getEmpty() { return BlockMass(); }

  static BlockMass getFull() {
    return BlockMass(std::numeric_limits<uint64_t>::max());
  }

  uint64_t getMass() const { return Mass; }

  bool isFull() const { return Mass == std::numeric_limits<uint64_t>::max(); }
  bool isEmpty() const { return !Mass; }

  bool operator!() const { return isEmpty(); }

  /// Add another mass.
  ///
  /// Adds another mass, saturating at \a isFull() rather than overflowing.
  BlockMass &operator+=(BlockMass X) {
    uint64_t Sum = Mass + X.Mass;
    Mass = Sum < Mass ? std::numeric_limits<uint64_t>::max() : Sum;
    return *this;
  }

  /// Subtract another mass.
  ///
  /// Subtracts another mass, saturating at \a isEmpty() rather than
  /// undeflowing.
  BlockMass &operator-=(BlockMass X) {
    uint64_t Diff = Mass - X.Mass;
    Mass = Diff > Mass ? 0 : Diff;
    return *this;
  }

  BlockMass &operator*=(BranchProbability P) {
    Mass = P.scale(Mass);
    return *this;
  }

  bool operator==(BlockMass X) const { return Mass == X.Mass; }
  bool operator!=(BlockMass X) const { return Mass != X.Mass; }
  bool operator<=(BlockMass X) const { return Mass <= X.Mass; }
  bool operator>=(BlockMass X) const { return Mass >= X.Mass; }
  bool operator<(BlockMass X) const { return Mass < X.Mass; }
  bool operator>(BlockMass X) const { return Mass > X.Mass; }

  /// Convert to scaled number.
  ///
  /// Convert to \a ScaledNumber.  \a isFull() gives 1.0, while \a isEmpty()
  /// gives slightly above 0.0.
  ScaledNumber<uint64_t> toScaled() const;

  void dump() const;
  raw_ostream &print(raw_ostream &OS) const;
};

inline BlockMass operator+(BlockMass L, BlockMass R) {
  return BlockMass(L) += R;
}
inline BlockMass operator-(BlockMass L, BlockMass R) {
  return BlockMass(L) -= R;
}
inline BlockMass operator*(BlockMass L, BranchProbability R) {
  return BlockMass(L) *= R;
}
inline BlockMass operator*(BranchProbability L, BlockMass R) {
  return BlockMass(R) *= L;
}

inline raw_ostream &operator<<(raw_ostream &OS, BlockMass X) {
  return X.print(OS);
}

} // end namespace bfi_detail

/// Base class for BlockFrequencyInfoImpl
///
/// BlockFrequencyInfoImplBase has supporting data structures and some
/// algorithms for BlockFrequencyInfoImplBase.  Only algorithms that depend on
/// the block type (or that call such algorithms) are skipped here.
///
/// Nevertheless, the majority of the overall algorithm documentation lives with
/// BlockFrequencyInfoImpl.  See there for details.
class BlockFrequencyInfoImplBase {
public:
  using Scaled64 = ScaledNumber<uint64_t>;
  using BlockMass = bfi_detail::BlockMass;

  /// Representative of a block.
  ///
  /// This is a simple wrapper around an index into the reverse-post-order
  /// traversal of the blocks.
  ///
  /// Unlike a block pointer, its order has meaning (location in the
  /// topological sort) and it's class is the same regardless of block type.
  struct BlockNode {
    using IndexType = uint32_t;

    IndexType Index;

    BlockNode() : Index(std::numeric_limits<uint32_t>::max()) {}
    BlockNode(IndexType Index) : Index(Index) {}

    bool operator==(const BlockNode &X) const { return Index == X.Index; }
    bool operator!=(const BlockNode &X) const { return Index != X.Index; }
    bool operator<=(const BlockNode &X) const { return Index <= X.Index; }
    bool operator>=(const BlockNode &X) const { return Index >= X.Index; }
    bool operator<(const BlockNode &X) const { return Index < X.Index; }
    bool operator>(const BlockNode &X) const { return Index > X.Index; }

    bool isValid() const { return Index <= getMaxIndex(); }

    static size_t getMaxIndex() {
       return std::numeric_limits<uint32_t>::max() - 1;
    }
  };

  /// Stats about a block itself.
  struct FrequencyData {
    Scaled64 Scaled;
    uint64_t Integer;
  };

  /// Data about a loop.
  ///
  /// Contains the data necessary to represent a loop as a pseudo-node once it's
  /// packaged.
  struct LoopData {
    using ExitMap = SmallVector<std::pair<BlockNode, BlockMass>, 4>;
    using NodeList = SmallVector<BlockNode, 4>;
    using HeaderMassList = SmallVector<BlockMass, 1>;

    LoopData *Parent;            ///< The parent loop.
    bool IsPackaged = false;     ///< Whether this has been packaged.
    uint32_t NumHeaders = 1;     ///< Number of headers.
    ExitMap Exits;               ///< Successor edges (and weights).
    NodeList Nodes;              ///< Header and the members of the loop.
    HeaderMassList BackedgeMass; ///< Mass returned to each loop header.
    BlockMass Mass;
    Scaled64 Scale;

    LoopData(LoopData *Parent, const BlockNode &Header)
      : Parent(Parent), Nodes(1, Header), BackedgeMass(1) {}

    template <class It1, class It2>
    LoopData(LoopData *Parent, It1 FirstHeader, It1 LastHeader, It2 FirstOther,
             It2 LastOther)
        : Parent(Parent), Nodes(FirstHeader, LastHeader) {
      NumHeaders = Nodes.size();
      Nodes.insert(Nodes.end(), FirstOther, LastOther);
      BackedgeMass.resize(NumHeaders);
    }

    bool isHeader(const BlockNode &Node) const {
      if (isIrreducible())
        return std::binary_search(Nodes.begin(), Nodes.begin() + NumHeaders,
                                  Node);
      return Node == Nodes[0];
    }

    BlockNode getHeader() const { return Nodes[0]; }
    bool isIrreducible() const { return NumHeaders > 1; }

    HeaderMassList::difference_type getHeaderIndex(const BlockNode &B) {
      assert(isHeader(B) && "this is only valid on loop header blocks");
      if (isIrreducible())
        return std::lower_bound(Nodes.begin(), Nodes.begin() + NumHeaders, B) -
               Nodes.begin();
      return 0;
    }

    NodeList::const_iterator members_begin() const {
      return Nodes.begin() + NumHeaders;
    }

    NodeList::const_iterator members_end() const { return Nodes.end(); }
    iterator_range<NodeList::const_iterator> members() const {
      return make_range(members_begin(), members_end());
    }
  };

  /// Index of loop information.
  struct WorkingData {
    BlockNode Node;           ///< This node.
    LoopData *Loop = nullptr; ///< The loop this block is inside.
    BlockMass Mass;           ///< Mass distribution from the entry block.

    WorkingData(const BlockNode &Node) : Node(Node) {}

    bool isLoopHeader() const { return Loop && Loop->isHeader(Node); }

    bool isDoubleLoopHeader() const {
      return isLoopHeader() && Loop->Parent && Loop->Parent->isIrreducible() &&
             Loop->Parent->isHeader(Node);
    }

    LoopData *getContainingLoop() const {
      if (!isLoopHeader())
        return Loop;
      if (!isDoubleLoopHeader())
        return Loop->Parent;
      return Loop->Parent->Parent;
    }

    /// Resolve a node to its representative.
    ///
    /// Get the node currently representing Node, which could be a containing
    /// loop.
    ///
    /// This function should only be called when distributing mass.  As long as
    /// there are no irreducible edges to Node, then it will have complexity
    /// O(1) in this context.
    ///
    /// In general, the complexity is O(L), where L is the number of loop
    /// headers Node has been packaged into.  Since this method is called in
    /// the context of distributing mass, L will be the number of loop headers
    /// an early exit edge jumps out of.
    BlockNode getResolvedNode() const {
      auto *L = getPackagedLoop();
      return L ? L->getHeader() : Node;
    }

    LoopData *getPackagedLoop() const {
      if (!Loop || !Loop->IsPackaged)
        return nullptr;
      auto *L = Loop;
      while (L->Parent && L->Parent->IsPackaged)
        L = L->Parent;
      return L;
    }

    /// Get the appropriate mass for a node.
    ///
    /// Get appropriate mass for Node.  If Node is a loop-header (whose loop
    /// has been packaged), returns the mass of its pseudo-node.  If it's a
    /// node inside a packaged loop, it returns the loop's mass.
    BlockMass &getMass() {
      if (!isAPackage())
        return Mass;
      if (!isADoublePackage())
        return Loop->Mass;
      return Loop->Parent->Mass;
    }

    /// Has ContainingLoop been packaged up?
    bool isPackaged() const { return getResolvedNode() != Node; }

    /// Has Loop been packaged up?
    bool isAPackage() const { return isLoopHeader() && Loop->IsPackaged; }

    /// Has Loop been packaged up twice?
    bool isADoublePackage() const {
      return isDoubleLoopHeader() && Loop->Parent->IsPackaged;
    }
  };

  /// Unscaled probability weight.
  ///
  /// Probability weight for an edge in the graph (including the
  /// successor/target node).
  ///
  /// All edges in the original function are 32-bit.  However, exit edges from
  /// loop packages are taken from 64-bit exit masses, so we need 64-bits of
  /// space in general.
  ///
  /// In addition to the raw weight amount, Weight stores the type of the edge
  /// in the current context (i.e., the context of the loop being processed).
  /// Is this a local edge within the loop, an exit from the loop, or a
  /// backedge to the loop header?
  struct Weight {
    enum DistType { Local, Exit, Backedge };
    DistType Type = Local;
    BlockNode TargetNode;
    uint64_t Amount = 0;

    Weight() = default;
    Weight(DistType Type, BlockNode TargetNode, uint64_t Amount)
        : Type(Type), TargetNode(TargetNode), Amount(Amount) {}
  };

  /// Distribution of unscaled probability weight.
  ///
  /// Distribution of unscaled probability weight to a set of successors.
  ///
  /// This class collates the successor edge weights for later processing.
  ///
  /// \a DidOverflow indicates whether \a Total did overflow while adding to
  /// the distribution.  It should never overflow twice.
  struct Distribution {
    using WeightList = SmallVector<Weight, 4>;

    WeightList Weights;       ///< Individual successor weights.
    uint64_t Total = 0;       ///< Sum of all weights.
    bool DidOverflow = false; ///< Whether \a Total did overflow.

    Distribution() = default;

    void addLocal(const BlockNode &Node, uint64_t Amount) {
      add(Node, Amount, Weight::Local);
    }

    void addExit(const BlockNode &Node, uint64_t Amount) {
      add(Node, Amount, Weight::Exit);
    }

    void addBackedge(const BlockNode &Node, uint64_t Amount) {
      add(Node, Amount, Weight::Backedge);
    }

    /// Normalize the distribution.
    ///
    /// Combines multiple edges to the same \a Weight::TargetNode and scales
    /// down so that \a Total fits into 32-bits.
    ///
    /// This is linear in the size of \a Weights.  For the vast majority of
    /// cases, adjacent edge weights are combined by sorting WeightList and
    /// combining adjacent weights.  However, for very large edge lists an
    /// auxiliary hash table is used.
    void normalize();

  private:
    void add(const BlockNode &Node, uint64_t Amount, Weight::DistType Type);
  };

  /// Data about each block.  This is used downstream.
  std::vector<FrequencyData> Freqs;

  /// Whether each block is an irreducible loop header.
  /// This is used downstream.
  SparseBitVector<> IsIrrLoopHeader;

  /// Loop data: see initializeLoops().
  std::vector<WorkingData> Working;

  /// Indexed information about loops.
  std::list<LoopData> Loops;

  /// Virtual destructor.
  ///
  /// Need a virtual destructor to mask the compiler warning about
  /// getBlockName().
  virtual ~BlockFrequencyInfoImplBase() = default;

  /// Add all edges out of a packaged loop to the distribution.
  ///
  /// Adds all edges from LocalLoopHead to Dist.  Calls addToDist() to add each
  /// successor edge.
  ///
  /// \return \c true unless there's an irreducible backedge.
  bool addLoopSuccessorsToDist(const LoopData *OuterLoop, LoopData &Loop,
                               Distribution &Dist);

  /// Add an edge to the distribution.
  ///
  /// Adds an edge to Succ to Dist.  If \c LoopHead.isValid(), then whether the
  /// edge is local/exit/backedge is in the context of LoopHead.  Otherwise,
  /// every edge should be a local edge (since all the loops are packaged up).
  ///
  /// \return \c true unless aborted due to an irreducible backedge.
  bool addToDist(Distribution &Dist, const LoopData *OuterLoop,
                 const BlockNode &Pred, const BlockNode &Succ, uint64_t Weight);

  /// Analyze irreducible SCCs.
  ///
  /// Separate irreducible SCCs from \c G, which is an explicit graph of \c
  /// OuterLoop (or the top-level function, if \c OuterLoop is \c nullptr).
  /// Insert them into \a Loops before \c Insert.
  ///
  /// \return the \c LoopData nodes representing the irreducible SCCs.
  iterator_range<std::list<LoopData>::iterator>
  analyzeIrreducible(const bfi_detail::IrreducibleGraph &G, LoopData *OuterLoop,
                     std::list<LoopData>::iterator Insert);

  /// Update a loop after packaging irreducible SCCs inside of it.
  ///
  /// Update \c OuterLoop.  Before finding irreducible control flow, it was
  /// partway through \a computeMassInLoop(), so \a LoopData::Exits and \a
  /// LoopData::BackedgeMass need to be reset.  Also, nodes that were packaged
  /// up need to be removed from \a OuterLoop::Nodes.
  void updateLoopWithIrreducible(LoopData &OuterLoop);

  /// Distribute mass according to a distribution.
  ///
  /// Distributes the mass in Source according to Dist.  If LoopHead.isValid(),
  /// backedges and exits are stored in its entry in Loops.
  ///
  /// Mass is distributed in parallel from two copies of the source mass.
  void distributeMass(const BlockNode &Source, LoopData *OuterLoop,
                      Distribution &Dist);

  /// Compute the loop scale for a loop.
  void computeLoopScale(LoopData &Loop);

  /// Adjust the mass of all headers in an irreducible loop.
  ///
  /// Initially, irreducible loops are assumed to distribute their mass
  /// equally among its headers. This can lead to wrong frequency estimates
  /// since some headers may be executed more frequently than others.
  ///
  /// This adjusts header mass distribution so it matches the weights of
  /// the backedges going into each of the loop headers.
  void adjustLoopHeaderMass(LoopData &Loop);

  void distributeIrrLoopHeaderMass(Distribution &Dist);

  /// Package up a loop.
  void packageLoop(LoopData &Loop);

  /// Unwrap loops.
  void unwrapLoops();

  /// Finalize frequency metrics.
  ///
  /// Calculates final frequencies and cleans up no-longer-needed data
  /// structures.
  void finalizeMetrics();

  /// Clear all memory.
  void clear();

  virtual std::string getBlockName(const BlockNode &Node) const;
  std::string getLoopName(const LoopData &Loop) const;

  virtual raw_ostream &print(raw_ostream &OS) const { return OS; }
  void dump() const { print(dbgs()); }

  Scaled64 getFloatingBlockFreq(const BlockNode &Node) const;

  BlockFrequency getBlockFreq(const BlockNode &Node) const;
  std::optional<uint64_t>
  getBlockProfileCount(const Function &F, const BlockNode &Node,
                       bool AllowSynthetic = false) const;
  std::optional<uint64_t>
  getProfileCountFromFreq(const Function &F, uint64_t Freq,
                          bool AllowSynthetic = false) const;
  bool isIrrLoopHeader(const BlockNode &Node);

  void setBlockFreq(const BlockNode &Node, uint64_t Freq);

  raw_ostream &printBlockFreq(raw_ostream &OS, const BlockNode &Node) const;
  raw_ostream &printBlockFreq(raw_ostream &OS,
                              const BlockFrequency &Freq) const;

  uint64_t getEntryFreq() const {
    assert(!Freqs.empty());
    return Freqs[0].Integer;
  }
};

namespace bfi_detail {

template <class BlockT> struct TypeMap {};
template <> struct TypeMap<BasicBlock> {
  using BlockT = BasicBlock;
  using BlockKeyT = AssertingVH<const BasicBlock>;
  using FunctionT = Function;
  using BranchProbabilityInfoT = BranchProbabilityInfo;
  using LoopT = Loop;
  using LoopInfoT = LoopInfo;
};
template <> struct TypeMap<MachineBasicBlock> {
  using BlockT = MachineBasicBlock;
  using BlockKeyT = const MachineBasicBlock *;
  using FunctionT = MachineFunction;
  using BranchProbabilityInfoT = MachineBranchProbabilityInfo;
  using LoopT = MachineLoop;
  using LoopInfoT = MachineLoopInfo;
};

template <class BlockT, class BFIImplT>
class BFICallbackVH;

/// Get the name of a MachineBasicBlock.
///
/// Get the name of a MachineBasicBlock.  It's templated so that including from
/// CodeGen is unnecessary (that would be a layering issue).
///
/// This is used mainly for debug output.  The name is similar to
/// MachineBasicBlock::getFullName(), but skips the name of the function.
template <class BlockT> std::string getBlockName(const BlockT *BB) {
  assert(BB && "Unexpected nullptr");
  auto MachineName = "BB" + Twine(BB->getNumber());
  if (BB->getBasicBlock())
    return (MachineName + "[" + BB->getName() + "]").str();
  return MachineName.str();
}
/// Get the name of a BasicBlock.
template <> inline std::string getBlockName(const BasicBlock *BB) {
  assert(BB && "Unexpected nullptr");
  return BB->getName().str();
}

/// Graph of irreducible control flow.
///
/// This graph is used for determining the SCCs in a loop (or top-level
/// function) that has irreducible control flow.
///
/// During the block frequency algorithm, the local graphs are defined in a
/// light-weight way, deferring to the \a BasicBlock or \a MachineBasicBlock
/// graphs for most edges, but getting others from \a LoopData::ExitMap.  The
/// latter only has successor information.
///
/// \a IrreducibleGraph makes this graph explicit.  It's in a form that can use
/// \a GraphTraits (so that \a analyzeIrreducible() can use \a scc_iterator),
/// and it explicitly lists predecessors and successors.  The initialization
/// that relies on \c MachineBasicBlock is defined in the header.
struct IrreducibleGraph {
  using BFIBase = BlockFrequencyInfoImplBase;

  BFIBase &BFI;

  using BlockNode = BFIBase::BlockNode;
  struct IrrNode {
    BlockNode Node;
    unsigned NumIn = 0;
    std::deque<const IrrNode *> Edges;

    IrrNode(const BlockNode &Node) : Node(Node) {}

    using iterator = std::deque<const IrrNode *>::const_iterator;

    iterator pred_begin() const { return Edges.begin(); }
    iterator succ_begin() const { return Edges.begin() + NumIn; }
    iterator pred_end() const { return succ_begin(); }
    iterator succ_end() const { return Edges.end(); }
  };
  BlockNode Start;
  const IrrNode *StartIrr = nullptr;
  std::vector<IrrNode> Nodes;
  SmallDenseMap<uint32_t, IrrNode *, 4> Lookup;

  /// Construct an explicit graph containing irreducible control flow.
  ///
  /// Construct an explicit graph of the control flow in \c OuterLoop (or the
  /// top-level function, if \c OuterLoop is \c nullptr).  Uses \c
  /// addBlockEdges to add block successors that have not been packaged into
  /// loops.
  ///
  /// \a BlockFrequencyInfoImpl::computeIrreducibleMass() is the only expected
  /// user of this.
  template <class BlockEdgesAdder>
  IrreducibleGraph(BFIBase &BFI, const BFIBase::LoopData *OuterLoop,
                   BlockEdgesAdder addBlockEdges) : BFI(BFI) {
    initialize(OuterLoop, addBlockEdges);
  }

  template <class BlockEdgesAdder>
  void initialize(const BFIBase::LoopData *OuterLoop,
                  BlockEdgesAdder addBlockEdges);
  void addNodesInLoop(const BFIBase::LoopData &OuterLoop);
  void addNodesInFunction();

  void addNode(const BlockNode &Node) {
    Nodes.emplace_back(Node);
    BFI.Working[Node.Index].getMass() = BlockMass::getEmpty();
  }

  void indexNodes();
  template <class BlockEdgesAdder>
  void addEdges(const BlockNode &Node, const BFIBase::LoopData *OuterLoop,
                BlockEdgesAdder addBlockEdges);
  void addEdge(IrrNode &Irr, const BlockNode &Succ,
               const BFIBase::LoopData *OuterLoop);
};

template <class BlockEdgesAdder>
void IrreducibleGraph::initialize(const BFIBase::LoopData *OuterLoop,
                                  BlockEdgesAdder addBlockEdges) {
  if (OuterLoop) {
    addNodesInLoop(*OuterLoop);
    for (auto N : OuterLoop->Nodes)
      addEdges(N, OuterLoop, addBlockEdges);
  } else {
    addNodesInFunction();
    for (uint32_t Index = 0; Index < BFI.Working.size(); ++Index)
      addEdges(Index, OuterLoop, addBlockEdges);
  }
  StartIrr = Lookup[Start.Index];
}

template <class BlockEdgesAdder>
void IrreducibleGraph::addEdges(const BlockNode &Node,
                                const BFIBase::LoopData *OuterLoop,
                                BlockEdgesAdder addBlockEdges) {
  auto L = Lookup.find(Node.Index);
  if (L == Lookup.end())
    return;
  IrrNode &Irr = *L->second;
  const auto &Working = BFI.Working[Node.Index];

  if (Working.isAPackage())
    for (const auto &I : Working.Loop->Exits)
      addEdge(Irr, I.first, OuterLoop);
  else
    addBlockEdges(*this, Irr, OuterLoop);
}

} // end namespace bfi_detail

/// Shared implementation for block frequency analysis.
///
/// This is a shared implementation of BlockFrequencyInfo and
/// MachineBlockFrequencyInfo, and calculates the relative frequencies of
/// blocks.
///
/// LoopInfo defines a loop as a "non-trivial" SCC dominated by a single block,
/// which is called the header.  A given loop, L, can have sub-loops, which are
/// loops within the subgraph of L that exclude its header.  (A "trivial" SCC
/// consists of a single block that does not have a self-edge.)
///
/// In addition to loops, this algorithm has limited support for irreducible
/// SCCs, which are SCCs with multiple entry blocks.  Irreducible SCCs are
/// discovered on the fly, and modelled as loops with multiple headers.
///
/// The headers of irreducible sub-SCCs consist of its entry blocks and all
/// nodes that are targets of a backedge within it (excluding backedges within
/// true sub-loops).  Block frequency calculations act as if a block is
/// inserted that intercepts all the edges to the headers.  All backedges and
/// entries point to this block.  Its successors are the headers, which split
/// the frequency evenly.
///
/// This algorithm leverages BlockMass and ScaledNumber to maintain precision,
/// separates mass distribution from loop scaling, and dithers to eliminate
/// probability mass loss.
///
/// The implementation is split between BlockFrequencyInfoImpl, which knows the
/// type of graph being modelled (BasicBlock vs. MachineBasicBlock), and
/// BlockFrequencyInfoImplBase, which doesn't.  The base class uses \a
/// BlockNode, a wrapper around a uint32_t.  BlockNode is numbered from 0 in
/// reverse-post order.  This gives two advantages:  it's easy to compare the
/// relative ordering of two nodes, and maps keyed on BlockT can be represented
/// by vectors.
///
/// This algorithm is O(V+E), unless there is irreducible control flow, in
/// which case it's O(V*E) in the worst case.
///
/// These are the main stages:
///
///  0. Reverse post-order traversal (\a initializeRPOT()).
///
///     Run a single post-order traversal and save it (in reverse) in RPOT.
///     All other stages make use of this ordering.  Save a lookup from BlockT
///     to BlockNode (the index into RPOT) in Nodes.
///
///  1. Loop initialization (\a initializeLoops()).
///
///     Translate LoopInfo/MachineLoopInfo into a form suitable for the rest of
///     the algorithm.  In particular, store the immediate members of each loop
///     in reverse post-order.
///
///  2. Calculate mass and scale in loops (\a computeMassInLoops()).
///
///     For each loop (bottom-up), distribute mass through the DAG resulting
///     from ignoring backedges and treating sub-loops as a single pseudo-node.
///     Track the backedge mass distributed to the loop header, and use it to
///     calculate the loop scale (number of loop iterations).  Immediate
///     members that represent sub-loops will already have been visited and
///     packaged into a pseudo-node.
///
///     Distributing mass in a loop is a reverse-post-order traversal through
///     the loop.  Start by assigning full mass to the Loop header.  For each
///     node in the loop:
///
///         - Fetch and categorize the weight distribution for its successors.
///           If this is a packaged-subloop, the weight distribution is stored
///           in \a LoopData::Exits.  Otherwise, fetch it from
///           BranchProbabilityInfo.
///
///         - Each successor is categorized as \a Weight::Local, a local edge
///           within the current loop, \a Weight::Backedge, a backedge to the
///           loop header, or \a Weight::Exit, any successor outside the loop.
///           The weight, the successor, and its category are stored in \a
///           Distribution.  There can be multiple edges to each successor.
///
///         - If there's a backedge to a non-header, there's an irreducible SCC.
///           The usual flow is temporarily aborted.  \a
///           computeIrreducibleMass() finds the irreducible SCCs within the
///           loop, packages them up, and restarts the flow.
///
///         - Normalize the distribution:  scale weights down so that their sum
///           is 32-bits, and coalesce multiple edges to the same node.
///
///         - Distribute the mass accordingly, dithering to minimize mass loss,
///           as described in \a distributeMass().
///
///     In the case of irreducible loops, instead of a single loop header,
///     there will be several. The computation of backedge masses is similar
///     but instead of having a single backedge mass, there will be one
///     backedge per loop header. In these cases, each backedge will carry
///     a mass proportional to the edge weights along the corresponding
///     path.
///
///     At the end of propagation, the full mass assigned to the loop will be
///     distributed among the loop headers proportionally according to the
///     mass flowing through their backedges.
///
///     Finally, calculate the loop scale from the accumulated backedge mass.
///
///  3. Distribute mass in the function (\a computeMassInFunction()).
///
///     Finally, distribute mass through the DAG resulting from packaging all
///     loops in the function.  This uses the same algorithm as distributing
///     mass in a loop, except that there are no exit or backedge edges.
///
///  4. Unpackage loops (\a unwrapLoops()).
///
///     Initialize each block's frequency to a floating point representation of
///     its mass.
///
///     Visit loops top-down, scaling the frequencies of its immediate members
///     by the loop's pseudo-node's frequency.
///
///  5. Convert frequencies to a 64-bit range (\a finalizeMetrics()).
///
///     Using the min and max frequencies as a guide, translate floating point
///     frequencies to an appropriate range in uint64_t.
///
/// It has some known flaws.
///
///   - The model of irreducible control flow is a rough approximation.
///
///     Modelling irreducible control flow exactly involves setting up and
///     solving a group of infinite geometric series.  Such precision is
///     unlikely to be worthwhile, since most of our algorithms give up on
///     irreducible control flow anyway.
///
///     Nevertheless, we might find that we need to get closer.  Here's a sort
///     of TODO list for the model with diminishing returns, to be completed as
///     necessary.
///
///       - The headers for the \a LoopData representing an irreducible SCC
///         include non-entry blocks.  When these extra blocks exist, they
///         indicate a self-contained irreducible sub-SCC.  We could treat them
///         as sub-loops, rather than arbitrarily shoving the problematic
///         blocks into the headers of the main irreducible SCC.
///
///       - Entry frequencies are assumed to be evenly split between the
///         headers of a given irreducible SCC, which is the only option if we
///         need to compute mass in the SCC before its parent loop.  Instead,
///         we could partially compute mass in the parent loop, and stop when
///         we get to the SCC.  Here, we have the correct ratio of entry
///         masses, which we can use to adjust their relative frequencies.
///         Compute mass in the SCC, and then continue propagation in the
///         parent.
///
///       - We can propagate mass iteratively through the SCC, for some fixed
///         number of iterations.  Each iteration starts by assigning the entry
///         blocks their backedge mass from the prior iteration.  The final
///         mass for each block (and each exit, and the total backedge mass
///         used for computing loop scale) is the sum of all iterations.
///         (Running this until fixed point would "solve" the geometric
///         series by simulation.)
template <class BT> class BlockFrequencyInfoImpl : BlockFrequencyInfoImplBase {
  // This is part of a workaround for a GCC 4.7 crash on lambdas.
  friend struct bfi_detail::BlockEdgesAdder<BT>;

  using BlockT = typename bfi_detail::TypeMap<BT>::BlockT;
  using BlockKeyT = typename bfi_detail::TypeMap<BT>::BlockKeyT;
  using FunctionT = typename bfi_detail::TypeMap<BT>::FunctionT;
  using BranchProbabilityInfoT =
      typename bfi_detail::TypeMap<BT>::BranchProbabilityInfoT;
  using LoopT = typename bfi_detail::TypeMap<BT>::LoopT;
  using LoopInfoT = typename bfi_detail::TypeMap<BT>::LoopInfoT;
  using Successor = GraphTraits<const BlockT *>;
  using Predecessor = GraphTraits<Inverse<const BlockT *>>;
  using BFICallbackVH =
      bfi_detail::BFICallbackVH<BlockT, BlockFrequencyInfoImpl>;

  const BranchProbabilityInfoT *BPI = nullptr;
  const LoopInfoT *LI = nullptr;
  const FunctionT *F = nullptr;

  // All blocks in reverse postorder.
  std::vector<const BlockT *> RPOT;
  DenseMap<BlockKeyT, std::pair<BlockNode, BFICallbackVH>> Nodes;

  using rpot_iterator = typename std::vector<const BlockT *>::const_iterator;

  rpot_iterator rpot_begin() const { return RPOT.begin(); }
  rpot_iterator rpot_end() const { return RPOT.end(); }

  size_t getIndex(const rpot_iterator &I) const { return I - rpot_begin(); }

  BlockNode getNode(const rpot_iterator &I) const {
    return BlockNode(getIndex(I));
  }

  BlockNode getNode(const BlockT *BB) const { return Nodes.lookup(BB).first; }

  const BlockT *getBlock(const BlockNode &Node) const {
    assert(Node.Index < RPOT.size());
    return RPOT[Node.Index];
  }

  /// Run (and save) a post-order traversal.
  ///
  /// Saves a reverse post-order traversal of all the nodes in \a F.
  void initializeRPOT();

  /// Initialize loop data.
  ///
  /// Build up \a Loops using \a LoopInfo.  \a LoopInfo gives us a mapping from
  /// each block to the deepest loop it's in, but we need the inverse.  For each
  /// loop, we store in reverse post-order its "immediate" members, defined as
  /// the header, the headers of immediate sub-loops, and all other blocks in
  /// the loop that are not in sub-loops.
  void initializeLoops();

  /// Propagate to a block's successors.
  ///
  /// In the context of distributing mass through \c OuterLoop, divide the mass
  /// currently assigned to \c Node between its successors.
  ///
  /// \return \c true unless there's an irreducible backedge.
  bool propagateMassToSuccessors(LoopData *OuterLoop, const BlockNode &Node);

  /// Compute mass in a particular loop.
  ///
  /// Assign mass to \c Loop's header, and then for each block in \c Loop in
  /// reverse post-order, distribute mass to its successors.  Only visits nodes
  /// that have not been packaged into sub-loops.
  ///
  /// \pre \a computeMassInLoop() has been called for each subloop of \c Loop.
  /// \return \c true unless there's an irreducible backedge.
  bool computeMassInLoop(LoopData &Loop);

  /// Try to compute mass in the top-level function.
  ///
  /// Assign mass to the entry block, and then for each block in reverse
  /// post-order, distribute mass to its successors.  Skips nodes that have
  /// been packaged into loops.
  ///
  /// \pre \a computeMassInLoops() has been called.
  /// \return \c true unless there's an irreducible backedge.
  bool tryToComputeMassInFunction();

  /// Compute mass in (and package up) irreducible SCCs.
  ///
  /// Find the irreducible SCCs in \c OuterLoop, add them to \a Loops (in front
  /// of \c Insert), and call \a computeMassInLoop() on each of them.
  ///
  /// If \c OuterLoop is \c nullptr, it refers to the top-level function.
  ///
  /// \pre \a computeMassInLoop() has been called for each subloop of \c
  /// OuterLoop.
  /// \pre \c Insert points at the last loop successfully processed by \a
  /// computeMassInLoop().
  /// \pre \c OuterLoop has irreducible SCCs.
  void computeIrreducibleMass(LoopData *OuterLoop,
                              std::list<LoopData>::iterator Insert);

  /// Compute mass in all loops.
  ///
  /// For each loop bottom-up, call \a computeMassInLoop().
  ///
  /// \a computeMassInLoop() aborts (and returns \c false) on loops that
  /// contain a irreducible sub-SCCs.  Use \a computeIrreducibleMass() and then
  /// re-enter \a computeMassInLoop().
  ///
  /// \post \a computeMassInLoop() has returned \c true for every loop.
  void computeMassInLoops();

  /// Compute mass in the top-level function.
  ///
  /// Uses \a tryToComputeMassInFunction() and \a computeIrreducibleMass() to
  /// compute mass in the top-level function.
  ///
  /// \post \a tryToComputeMassInFunction() has returned \c true.
  void computeMassInFunction();

  std::string getBlockName(const BlockNode &Node) const override {
    return bfi_detail::getBlockName(getBlock(Node));
  }

  /// The current implementation for computing relative block frequencies does
  /// not handle correctly control-flow graphs containing irreducible loops. To
  /// resolve the problem, we apply a post-processing step, which iteratively
  /// updates block frequencies based on the frequencies of their predesessors.
  /// This corresponds to finding the stationary point of the Markov chain by
  /// an iterative method aka "PageRank computation".
  /// The algorithm takes at most O(|E| * IterativeBFIMaxIterations) steps but
  /// typically converges faster.
  ///
  /// Decide whether we want to apply iterative inference for a given function.
  bool needIterativeInference() const;

  /// Apply an iterative post-processing to infer correct counts for irr loops.
  void applyIterativeInference();

  using ProbMatrixType = std::vector<std::vector<std::pair<size_t, Scaled64>>>;

  /// Run iterative inference for a probability matrix and initial frequencies.
  void iterativeInference(const ProbMatrixType &ProbMatrix,
                          std::vector<Scaled64> &Freq) const;

  /// Find all blocks to apply inference on, that is, reachable from the entry
  /// and backward reachable from exists along edges with positive probability.
  void findReachableBlocks(std::vector<const BlockT *> &Blocks) const;

  /// Build a matrix of probabilities with transitions (edges) between the
  /// blocks: ProbMatrix[I] holds pairs (J, P), where Pr[J -> I | J] = P
  void initTransitionProbabilities(
      const std::vector<const BlockT *> &Blocks,
      const DenseMap<const BlockT *, size_t> &BlockIndex,
      ProbMatrixType &ProbMatrix) const;

#ifndef NDEBUG
  /// Compute the discrepancy between current block frequencies and the
  /// probability matrix.
  Scaled64 discrepancy(const ProbMatrixType &ProbMatrix,
                       const std::vector<Scaled64> &Freq) const;
#endif

public:
  BlockFrequencyInfoImpl() = default;

  const FunctionT *getFunction() const { return F; }

  void calculate(const FunctionT &F, const BranchProbabilityInfoT &BPI,
                 const LoopInfoT &LI);

  using BlockFrequencyInfoImplBase::getEntryFreq;

  BlockFrequency getBlockFreq(const BlockT *BB) const {
    return BlockFrequencyInfoImplBase::getBlockFreq(getNode(BB));
  }

  std::optional<uint64_t>
  getBlockProfileCount(const Function &F, const BlockT *BB,
                       bool AllowSynthetic = false) const {
    return BlockFrequencyInfoImplBase::getBlockProfileCount(F, getNode(BB),
                                                            AllowSynthetic);
  }

  std::optional<uint64_t>
  getProfileCountFromFreq(const Function &F, uint64_t Freq,
                          bool AllowSynthetic = false) const {
    return BlockFrequencyInfoImplBase::getProfileCountFromFreq(F, Freq,
                                                               AllowSynthetic);
  }

  bool isIrrLoopHeader(const BlockT *BB) {
    return BlockFrequencyInfoImplBase::isIrrLoopHeader(getNode(BB));
  }

  void setBlockFreq(const BlockT *BB, uint64_t Freq);

  void forgetBlock(const BlockT *BB) {
    // We don't erase corresponding items from `Freqs`, `RPOT` and other to
    // avoid invalidating indices. Doing so would have saved some memory, but
    // it's not worth it.
    Nodes.erase(BB);
  }

  Scaled64 getFloatingBlockFreq(const BlockT *BB) const {
    return BlockFrequencyInfoImplBase::getFloatingBlockFreq(getNode(BB));
  }

  const BranchProbabilityInfoT &getBPI() const { return *BPI; }

  /// Print the frequencies for the current function.
  ///
  /// Prints the frequencies for the blocks in the current function.
  ///
  /// Blocks are printed in the natural iteration order of the function, rather
  /// than reverse post-order.  This provides two advantages:  writing -analyze
  /// tests is easier (since blocks come out in source order), and even
  /// unreachable blocks are printed.
  ///
  /// \a BlockFrequencyInfoImplBase::print() only knows reverse post-order, so
  /// we need to override it here.
  raw_ostream &print(raw_ostream &OS) const override;

  using BlockFrequencyInfoImplBase::dump;
  using BlockFrequencyInfoImplBase::printBlockFreq;

  raw_ostream &printBlockFreq(raw_ostream &OS, const BlockT *BB) const {
    return BlockFrequencyInfoImplBase::printBlockFreq(OS, getNode(BB));
  }

  void verifyMatch(BlockFrequencyInfoImpl<BT> &Other) const;
};

namespace bfi_detail {

template <class BFIImplT>
class BFICallbackVH<BasicBlock, BFIImplT> : public CallbackVH {
  BFIImplT *BFIImpl;

public:
  BFICallbackVH() = default;

  BFICallbackVH(const BasicBlock *BB, BFIImplT *BFIImpl)
      : CallbackVH(BB), BFIImpl(BFIImpl) {}

  virtual ~BFICallbackVH() = default;

  void deleted() override {
    BFIImpl->forgetBlock(cast<BasicBlock>(getValPtr()));
  }
};

/// Dummy implementation since MachineBasicBlocks aren't Values, so ValueHandles
/// don't apply to them.
template <class BFIImplT>
class BFICallbackVH<MachineBasicBlock, BFIImplT> {
public:
  BFICallbackVH() = default;
  BFICallbackVH(const MachineBasicBlock *, BFIImplT *) {}
};

} // end namespace bfi_detail

template <class BT>
void BlockFrequencyInfoImpl<BT>::calculate(const FunctionT &F,
                                           const BranchProbabilityInfoT &BPI,
                                           const LoopInfoT &LI) {
  // Save the parameters.
  this->BPI = &BPI;
  this->LI = &LI;
  this->F = &F;

  // Clean up left-over data structures.
  BlockFrequencyInfoImplBase::clear();
  RPOT.clear();
  Nodes.clear();

  // Initialize.
  LLVM_DEBUG(dbgs() << "\nblock-frequency: " << F.getName()
                    << "\n================="
                    << std::string(F.getName().size(), '=') << "\n");
  initializeRPOT();
  initializeLoops();

  // Visit loops in post-order to find the local mass distribution, and then do
  // the full function.
  computeMassInLoops();
  computeMassInFunction();
  unwrapLoops();
  // Apply a post-processing step improving computed frequencies for functions
  // with irreducible loops.
  if (needIterativeInference())
    applyIterativeInference();
  finalizeMetrics();

  if (CheckBFIUnknownBlockQueries) {
    // To detect BFI queries for unknown blocks, add entries for unreachable
    // blocks, if any. This is to distinguish between known/existing unreachable
    // blocks and unknown blocks.
    for (const BlockT &BB : F)
      if (!Nodes.count(&BB))
        setBlockFreq(&BB, 0);
  }
}

template <class BT>
void BlockFrequencyInfoImpl<BT>::setBlockFreq(const BlockT *BB, uint64_t Freq) {
  if (Nodes.count(BB))
    BlockFrequencyInfoImplBase::setBlockFreq(getNode(BB), Freq);
  else {
    // If BB is a newly added block after BFI is done, we need to create a new
    // BlockNode for it assigned with a new index. The index can be determined
    // by the size of Freqs.
    BlockNode NewNode(Freqs.size());
    Nodes[BB] = {NewNode, BFICallbackVH(BB, this)};
    Freqs.emplace_back();
    BlockFrequencyInfoImplBase::setBlockFreq(NewNode, Freq);
  }
}

template <class BT> void BlockFrequencyInfoImpl<BT>::initializeRPOT() {
  const BlockT *Entry = &F->front();
  RPOT.reserve(F->size());
  std::copy(po_begin(Entry), po_end(Entry), std::back_inserter(RPOT));
  std::reverse(RPOT.begin(), RPOT.end());

  assert(RPOT.size() - 1 <= BlockNode::getMaxIndex() &&
         "More nodes in function than Block Frequency Info supports");

  LLVM_DEBUG(dbgs() << "reverse-post-order-traversal\n");
  for (rpot_iterator I = rpot_begin(), E = rpot_end(); I != E; ++I) {
    BlockNode Node = getNode(I);
    LLVM_DEBUG(dbgs() << " - " << getIndex(I) << ": " << getBlockName(Node)
                      << "\n");
    Nodes[*I] = {Node, BFICallbackVH(*I, this)};
  }

  Working.reserve(RPOT.size());
  for (size_t Index = 0; Index < RPOT.size(); ++Index)
    Working.emplace_back(Index);
  Freqs.resize(RPOT.size());
}

template <class BT> void BlockFrequencyInfoImpl<BT>::initializeLoops() {
  LLVM_DEBUG(dbgs() << "loop-detection\n");
  if (LI->empty())
    return;

  // Visit loops top down and assign them an index.
  std::deque<std::pair<const LoopT *, LoopData *>> Q;
  for (const LoopT *L : *LI)
    Q.emplace_back(L, nullptr);
  while (!Q.empty()) {
    const LoopT *Loop = Q.front().first;
    LoopData *Parent = Q.front().second;
    Q.pop_front();

    BlockNode Header = getNode(Loop->getHeader());
    assert(Header.isValid());

    Loops.emplace_back(Parent, Header);
    Working[Header.Index].Loop = &Loops.back();
    LLVM_DEBUG(dbgs() << " - loop = " << getBlockName(Header) << "\n");

    for (const LoopT *L : *Loop)
      Q.emplace_back(L, &Loops.back());
  }

  // Visit nodes in reverse post-order and add them to their deepest containing
  // loop.
  for (size_t Index = 0; Index < RPOT.size(); ++Index) {
    // Loop headers have already been mostly mapped.
    if (Working[Index].isLoopHeader()) {
      LoopData *ContainingLoop = Working[Index].getContainingLoop();
      if (ContainingLoop)
        ContainingLoop->Nodes.push_back(Index);
      continue;
    }

    const LoopT *Loop = LI->getLoopFor(RPOT[Index]);
    if (!Loop)
      continue;

    // Add this node to its containing loop's member list.
    BlockNode Header = getNode(Loop->getHeader());
    assert(Header.isValid());
    const auto &HeaderData = Working[Header.Index];
    assert(HeaderData.isLoopHeader());

    Working[Index].Loop = HeaderData.Loop;
    HeaderData.Loop->Nodes.push_back(Index);
    LLVM_DEBUG(dbgs() << " - loop = " << getBlockName(Header)
                      << ": member = " << getBlockName(Index) << "\n");
  }
}

template <class BT> void BlockFrequencyInfoImpl<BT>::computeMassInLoops() {
  // Visit loops with the deepest first, and the top-level loops last.
  for (auto L = Loops.rbegin(), E = Loops.rend(); L != E; ++L) {
    if (computeMassInLoop(*L))
      continue;
    auto Next = std::next(L);
    computeIrreducibleMass(&*L, L.base());
    L = std::prev(Next);
    if (computeMassInLoop(*L))
      continue;
    llvm_unreachable("unhandled irreducible control flow");
  }
}

template <class BT>
bool BlockFrequencyInfoImpl<BT>::computeMassInLoop(LoopData &Loop) {
  // Compute mass in loop.
  LLVM_DEBUG(dbgs() << "compute-mass-in-loop: " << getLoopName(Loop) << "\n");

  if (Loop.isIrreducible()) {
    LLVM_DEBUG(dbgs() << "isIrreducible = true\n");
    Distribution Dist;
    unsigned NumHeadersWithWeight = 0;
    std::optional<uint64_t> MinHeaderWeight;
    DenseSet<uint32_t> HeadersWithoutWeight;
    HeadersWithoutWeight.reserve(Loop.NumHeaders);
    for (uint32_t H = 0; H < Loop.NumHeaders; ++H) {
      auto &HeaderNode = Loop.Nodes[H];
      const BlockT *Block = getBlock(HeaderNode);
      IsIrrLoopHeader.set(Loop.Nodes[H].Index);
      std::optional<uint64_t> HeaderWeight = Block->getIrrLoopHeaderWeight();
      if (!HeaderWeight) {
        LLVM_DEBUG(dbgs() << "Missing irr loop header metadata on "
                          << getBlockName(HeaderNode) << "\n");
        HeadersWithoutWeight.insert(H);
        continue;
      }
      LLVM_DEBUG(dbgs() << getBlockName(HeaderNode)
                        << " has irr loop header weight " << *HeaderWeight
                        << "\n");
      NumHeadersWithWeight++;
      uint64_t HeaderWeightValue = *HeaderWeight;
      if (!MinHeaderWeight || HeaderWeightValue < MinHeaderWeight)
        MinHeaderWeight = HeaderWeightValue;
      if (HeaderWeightValue) {
        Dist.addLocal(HeaderNode, HeaderWeightValue);
      }
    }
    // As a heuristic, if some headers don't have a weight, give them the
    // minimum weight seen (not to disrupt the existing trends too much by
    // using a weight that's in the general range of the other headers' weights,
    // and the minimum seems to perform better than the average.)
    // FIXME: better update in the passes that drop the header weight.
    // If no headers have a weight, give them even weight (use weight 1).
    if (!MinHeaderWeight)
      MinHeaderWeight = 1;
    for (uint32_t H : HeadersWithoutWeight) {
      auto &HeaderNode = Loop.Nodes[H];
      assert(!getBlock(HeaderNode)->getIrrLoopHeaderWeight() &&
             "Shouldn't have a weight metadata");
      uint64_t MinWeight = *MinHeaderWeight;
      LLVM_DEBUG(dbgs() << "Giving weight " << MinWeight << " to "
                        << getBlockName(HeaderNode) << "\n");
      if (MinWeight)
        Dist.addLocal(HeaderNode, MinWeight);
    }
    distributeIrrLoopHeaderMass(Dist);
    for (const BlockNode &M : Loop.Nodes)
      if (!propagateMassToSuccessors(&Loop, M))
        llvm_unreachable("unhandled irreducible control flow");
    if (NumHeadersWithWeight == 0)
      // No headers have a metadata. Adjust header mass.
      adjustLoopHeaderMass(Loop);
  } else {
    Working[Loop.getHeader().Index].getMass() = BlockMass::getFull();
    if (!propagateMassToSuccessors(&Loop, Loop.getHeader()))
      llvm_unreachable("irreducible control flow to loop header!?");
    for (const BlockNode &M : Loop.members())
      if (!propagateMassToSuccessors(&Loop, M))
        // Irreducible backedge.
        return false;
  }

  computeLoopScale(Loop);
  packageLoop(Loop);
  return true;
}

template <class BT>
bool BlockFrequencyInfoImpl<BT>::tryToComputeMassInFunction() {
  // Compute mass in function.
  LLVM_DEBUG(dbgs() << "compute-mass-in-function\n");
  assert(!Working.empty() && "no blocks in function");
  assert(!Working[0].isLoopHeader() && "entry block is a loop header");

  Working[0].getMass() = BlockMass::getFull();
  for (rpot_iterator I = rpot_begin(), IE = rpot_end(); I != IE; ++I) {
    // Check for nodes that have been packaged.
    BlockNode Node = getNode(I);
    if (Working[Node.Index].isPackaged())
      continue;

    if (!propagateMassToSuccessors(nullptr, Node))
      return false;
  }
  return true;
}

template <class BT> void BlockFrequencyInfoImpl<BT>::computeMassInFunction() {
  if (tryToComputeMassInFunction())
    return;
  computeIrreducibleMass(nullptr, Loops.begin());
  if (tryToComputeMassInFunction())
    return;
  llvm_unreachable("unhandled irreducible control flow");
}

template <class BT>
bool BlockFrequencyInfoImpl<BT>::needIterativeInference() const {
  if (!UseIterativeBFIInference)
    return false;
  if (!F->getFunction().hasProfileData())
    return false;
  // Apply iterative inference only if the function contains irreducible loops;
  // otherwise, computed block frequencies are reasonably correct.
  for (auto L = Loops.rbegin(), E = Loops.rend(); L != E; ++L) {
    if (L->isIrreducible())
      return true;
  }
  return false;
}

template <class BT> void BlockFrequencyInfoImpl<BT>::applyIterativeInference() {
  // Extract blocks for processing: a block is considered for inference iff it
  // can be reached from the entry by edges with a positive probability.
  // Non-processed blocks are assigned with the zero frequency and are ignored
  // in the computation
  std::vector<const BlockT *> ReachableBlocks;
  findReachableBlocks(ReachableBlocks);
  if (ReachableBlocks.empty())
    return;

  // The map is used to to index successors/predecessors of reachable blocks in
  // the ReachableBlocks vector
  DenseMap<const BlockT *, size_t> BlockIndex;
  // Extract initial frequencies for the reachable blocks
  auto Freq = std::vector<Scaled64>(ReachableBlocks.size());
  Scaled64 SumFreq;
  for (size_t I = 0; I < ReachableBlocks.size(); I++) {
    const BlockT *BB = ReachableBlocks[I];
    BlockIndex[BB] = I;
    Freq[I] = getFloatingBlockFreq(BB);
    SumFreq += Freq[I];
  }
  assert(!SumFreq.isZero() && "empty initial block frequencies");

  LLVM_DEBUG(dbgs() << "Applying iterative inference for " << F->getName()
                    << " with " << ReachableBlocks.size() << " blocks\n");

  // Normalizing frequencies so they sum up to 1.0
  for (auto &Value : Freq) {
    Value /= SumFreq;
  }

  // Setting up edge probabilities using sparse matrix representation:
  // ProbMatrix[I] holds a vector of pairs (J, P) where Pr[J -> I | J] = P
  ProbMatrixType ProbMatrix;
  initTransitionProbabilities(ReachableBlocks, BlockIndex, ProbMatrix);

  // Run the propagation
  iterativeInference(ProbMatrix, Freq);

  // Assign computed frequency values
  for (const BlockT &BB : *F) {
    auto Node = getNode(&BB);
    if (!Node.isValid())
      continue;
    if (BlockIndex.count(&BB)) {
      Freqs[Node.Index].Scaled = Freq[BlockIndex[&BB]];
    } else {
      Freqs[Node.Index].Scaled = Scaled64::getZero();
    }
  }
}

template <class BT>
void BlockFrequencyInfoImpl<BT>::iterativeInference(
    const ProbMatrixType &ProbMatrix, std::vector<Scaled64> &Freq) const {
  assert(0.0 < IterativeBFIPrecision && IterativeBFIPrecision < 1.0 &&
         "incorrectly specified precision");
  // Convert double precision to Scaled64
  const auto Precision =
      Scaled64::getInverse(static_cast<uint64_t>(1.0 / IterativeBFIPrecision));
  const size_t MaxIterations = IterativeBFIMaxIterationsPerBlock * Freq.size();

#ifndef NDEBUG
  LLVM_DEBUG(dbgs() << "  Initial discrepancy = "
                    << discrepancy(ProbMatrix, Freq).toString() << "\n");
#endif

  // Successors[I] holds unique sucessors of the I-th block
  auto Successors = std::vector<std::vector<size_t>>(Freq.size());
  for (size_t I = 0; I < Freq.size(); I++) {
    for (const auto &Jump : ProbMatrix[I]) {
      Successors[Jump.first].push_back(I);
    }
  }

  // To speedup computation, we maintain a set of "active" blocks whose
  // frequencies need to be updated based on the incoming edges.
  // The set is dynamic and changes after every update. Initially all blocks
  // with a positive frequency are active
  auto IsActive = BitVector(Freq.size(), false);
  std::queue<size_t> ActiveSet;
  for (size_t I = 0; I < Freq.size(); I++) {
    if (Freq[I] > 0) {
      ActiveSet.push(I);
      IsActive[I] = true;
    }
  }

  // Iterate over the blocks propagating frequencies
  size_t It = 0;
  while (It++ < MaxIterations && !ActiveSet.empty()) {
    size_t I = ActiveSet.front();
    ActiveSet.pop();
    IsActive[I] = false;

    // Compute a new frequency for the block: NewFreq := Freq \times ProbMatrix.
    // A special care is taken for self-edges that needs to be scaled by
    // (1.0 - SelfProb), where SelfProb is the sum of probabilities on the edges
    Scaled64 NewFreq;
    Scaled64 OneMinusSelfProb = Scaled64::getOne();
    for (const auto &Jump : ProbMatrix[I]) {
      if (Jump.first == I) {
        OneMinusSelfProb -= Jump.second;
      } else {
        NewFreq += Freq[Jump.first] * Jump.second;
      }
    }
    if (OneMinusSelfProb != Scaled64::getOne())
      NewFreq /= OneMinusSelfProb;

    // If the block's frequency has changed enough, then
    // make sure the block and its successors are in the active set
    auto Change = Freq[I] >= NewFreq ? Freq[I] - NewFreq : NewFreq - Freq[I];
    if (Change > Precision) {
      ActiveSet.push(I);
      IsActive[I] = true;
      for (size_t Succ : Successors[I]) {
        if (!IsActive[Succ]) {
          ActiveSet.push(Succ);
          IsActive[Succ] = true;
        }
      }
    }

    // Update the frequency for the block
    Freq[I] = NewFreq;
  }

  LLVM_DEBUG(dbgs() << "  Completed " << It << " inference iterations"
                    << format(" (%0.0f per block)", double(It) / Freq.size())
                    << "\n");
#ifndef NDEBUG
  LLVM_DEBUG(dbgs() << "  Final   discrepancy = "
                    << discrepancy(ProbMatrix, Freq).toString() << "\n");
#endif
}

template <class BT>
void BlockFrequencyInfoImpl<BT>::findReachableBlocks(
    std::vector<const BlockT *> &Blocks) const {
  // Find all blocks to apply inference on, that is, reachable from the entry
  // along edges with non-zero probablities
  std::queue<const BlockT *> Queue;
  SmallPtrSet<const BlockT *, 8> Reachable;
  const BlockT *Entry = &F->front();
  Queue.push(Entry);
  Reachable.insert(Entry);
  while (!Queue.empty()) {
    const BlockT *SrcBB = Queue.front();
    Queue.pop();
    for (const BlockT *DstBB : children<const BlockT *>(SrcBB)) {
      auto EP = BPI->getEdgeProbability(SrcBB, DstBB);
      if (EP.isZero())
        continue;
      if (Reachable.insert(DstBB).second)
        Queue.push(DstBB);
    }
  }

  // Find all blocks to apply inference on, that is, backward reachable from
  // the entry along (backward) edges with non-zero probablities
  SmallPtrSet<const BlockT *, 8> InverseReachable;
  for (const BlockT &BB : *F) {
    // An exit block is a block without any successors
    bool HasSucc = GraphTraits<const BlockT *>::child_begin(&BB) !=
                   GraphTraits<const BlockT *>::child_end(&BB);
    if (!HasSucc && Reachable.count(&BB)) {
      Queue.push(&BB);
      InverseReachable.insert(&BB);
    }
  }
  while (!Queue.empty()) {
    const BlockT *SrcBB = Queue.front();
    Queue.pop();
    for (const BlockT *DstBB : children<Inverse<const BlockT *>>(SrcBB)) {
      auto EP = BPI->getEdgeProbability(DstBB, SrcBB);
      if (EP.isZero())
        continue;
      if (InverseReachable.insert(DstBB).second)
        Queue.push(DstBB);
    }
  }

  // Collect the result
  Blocks.reserve(F->size());
  for (const BlockT &BB : *F) {
    if (Reachable.count(&BB) && InverseReachable.count(&BB)) {
      Blocks.push_back(&BB);
    }
  }
}

template <class BT>
void BlockFrequencyInfoImpl<BT>::initTransitionProbabilities(
    const std::vector<const BlockT *> &Blocks,
    const DenseMap<const BlockT *, size_t> &BlockIndex,
    ProbMatrixType &ProbMatrix) const {
  const size_t NumBlocks = Blocks.size();
  auto Succs = std::vector<std::vector<std::pair<size_t, Scaled64>>>(NumBlocks);
  auto SumProb = std::vector<Scaled64>(NumBlocks);

  // Find unique successors and corresponding probabilities for every block
  for (size_t Src = 0; Src < NumBlocks; Src++) {
    const BlockT *BB = Blocks[Src];
    SmallPtrSet<const BlockT *, 2> UniqueSuccs;
    for (const auto SI : children<const BlockT *>(BB)) {
      // Ignore cold blocks
      if (BlockIndex.find(SI) == BlockIndex.end())
        continue;
      // Ignore parallel edges between BB and SI blocks
      if (!UniqueSuccs.insert(SI).second)
        continue;
      // Ignore jumps with zero probability
      auto EP = BPI->getEdgeProbability(BB, SI);
      if (EP.isZero())
        continue;

      auto EdgeProb =
          Scaled64::getFraction(EP.getNumerator(), EP.getDenominator());
      size_t Dst = BlockIndex.find(SI)->second;
      Succs[Src].push_back(std::make_pair(Dst, EdgeProb));
      SumProb[Src] += EdgeProb;
    }
  }

  // Add transitions for every jump with positive branch probability
  ProbMatrix = ProbMatrixType(NumBlocks);
  for (size_t Src = 0; Src < NumBlocks; Src++) {
    // Ignore blocks w/o successors
    if (Succs[Src].empty())
      continue;

    assert(!SumProb[Src].isZero() && "Zero sum probability of non-exit block");
    for (auto &Jump : Succs[Src]) {
      size_t Dst = Jump.first;
      Scaled64 Prob = Jump.second;
      ProbMatrix[Dst].push_back(std::make_pair(Src, Prob / SumProb[Src]));
    }
  }

  // Add transitions from sinks to the source
  size_t EntryIdx = BlockIndex.find(&F->front())->second;
  for (size_t Src = 0; Src < NumBlocks; Src++) {
    if (Succs[Src].empty()) {
      ProbMatrix[EntryIdx].push_back(std::make_pair(Src, Scaled64::getOne()));
    }
  }
}

#ifndef NDEBUG
template <class BT>
BlockFrequencyInfoImplBase::Scaled64 BlockFrequencyInfoImpl<BT>::discrepancy(
    const ProbMatrixType &ProbMatrix, const std::vector<Scaled64> &Freq) const {
  assert(Freq[0] > 0 && "Incorrectly computed frequency of the entry block");
  Scaled64 Discrepancy;
  for (size_t I = 0; I < ProbMatrix.size(); I++) {
    Scaled64 Sum;
    for (const auto &Jump : ProbMatrix[I]) {
      Sum += Freq[Jump.first] * Jump.second;
    }
    Discrepancy += Freq[I] >= Sum ? Freq[I] - Sum : Sum - Freq[I];
  }
  // Normalizing by the frequency of the entry block
  return Discrepancy / Freq[0];
}
#endif

/// \note This should be a lambda, but that crashes GCC 4.7.
namespace bfi_detail {

template <class BT> struct BlockEdgesAdder {
  using BlockT = BT;
  using LoopData = BlockFrequencyInfoImplBase::LoopData;
  using Successor = GraphTraits<const BlockT *>;

  const BlockFrequencyInfoImpl<BT> &BFI;

  explicit BlockEdgesAdder(const BlockFrequencyInfoImpl<BT> &BFI)
      : BFI(BFI) {}

  void operator()(IrreducibleGraph &G, IrreducibleGraph::IrrNode &Irr,
                  const LoopData *OuterLoop) {
    const BlockT *BB = BFI.RPOT[Irr.Node.Index];
    for (const auto *Succ : children<const BlockT *>(BB))
      G.addEdge(Irr, BFI.getNode(Succ), OuterLoop);
  }
};

} // end namespace bfi_detail

template <class BT>
void BlockFrequencyInfoImpl<BT>::computeIrreducibleMass(
    LoopData *OuterLoop, std::list<LoopData>::iterator Insert) {
  LLVM_DEBUG(dbgs() << "analyze-irreducible-in-";
             if (OuterLoop) dbgs()
             << "loop: " << getLoopName(*OuterLoop) << "\n";
             else dbgs() << "function\n");

  using namespace bfi_detail;

  // Ideally, addBlockEdges() would be declared here as a lambda, but that
  // crashes GCC 4.7.
  BlockEdgesAdder<BT> addBlockEdges(*this);
  IrreducibleGraph G(*this, OuterLoop, addBlockEdges);

  for (auto &L : analyzeIrreducible(G, OuterLoop, Insert))
    computeMassInLoop(L);

  if (!OuterLoop)
    return;
  updateLoopWithIrreducible(*OuterLoop);
}

// A helper function that converts a branch probability into weight.
inline uint32_t getWeightFromBranchProb(const BranchProbability Prob) {
  return Prob.getNumerator();
}

template <class BT>
bool
BlockFrequencyInfoImpl<BT>::propagateMassToSuccessors(LoopData *OuterLoop,
                                                      const BlockNode &Node) {
  LLVM_DEBUG(dbgs() << " - node: " << getBlockName(Node) << "\n");
  // Calculate probability for successors.
  Distribution Dist;
  if (auto *Loop = Working[Node.Index].getPackagedLoop()) {
    assert(Loop != OuterLoop && "Cannot propagate mass in a packaged loop");
    if (!addLoopSuccessorsToDist(OuterLoop, *Loop, Dist))
      // Irreducible backedge.
      return false;
  } else {
    const BlockT *BB = getBlock(Node);
    for (auto SI = GraphTraits<const BlockT *>::child_begin(BB),
              SE = GraphTraits<const BlockT *>::child_end(BB);
         SI != SE; ++SI)
      if (!addToDist(
              Dist, OuterLoop, Node, getNode(*SI),
              getWeightFromBranchProb(BPI->getEdgeProbability(BB, SI))))
        // Irreducible backedge.
        return false;
  }

  // Distribute mass to successors, saving exit and backedge data in the
  // loop header.
  distributeMass(Node, OuterLoop, Dist);
  return true;
}

template <class BT>
raw_ostream &BlockFrequencyInfoImpl<BT>::print(raw_ostream &OS) const {
  if (!F)
    return OS;
  OS << "block-frequency-info: " << F->getName() << "\n";
  for (const BlockT &BB : *F) {
    OS << " - " << bfi_detail::getBlockName(&BB) << ": float = ";
    getFloatingBlockFreq(&BB).print(OS, 5)
        << ", int = " << getBlockFreq(&BB).getFrequency();
    if (std::optional<uint64_t> ProfileCount =
        BlockFrequencyInfoImplBase::getBlockProfileCount(
            F->getFunction(), getNode(&BB)))
      OS << ", count = " << *ProfileCount;
    if (std::optional<uint64_t> IrrLoopHeaderWeight =
            BB.getIrrLoopHeaderWeight())
      OS << ", irr_loop_header_weight = " << *IrrLoopHeaderWeight;
    OS << "\n";
  }

  // Add an extra newline for readability.
  OS << "\n";
  return OS;
}

template <class BT>
void BlockFrequencyInfoImpl<BT>::verifyMatch(
    BlockFrequencyInfoImpl<BT> &Other) const {
  bool Match = true;
  DenseMap<const BlockT *, BlockNode> ValidNodes;
  DenseMap<const BlockT *, BlockNode> OtherValidNodes;
  for (auto &Entry : Nodes) {
    const BlockT *BB = Entry.first;
    if (BB) {
      ValidNodes[BB] = Entry.second.first;
    }
  }
  for (auto &Entry : Other.Nodes) {
    const BlockT *BB = Entry.first;
    if (BB) {
      OtherValidNodes[BB] = Entry.second.first;
    }
  }
  unsigned NumValidNodes = ValidNodes.size();
  unsigned NumOtherValidNodes = OtherValidNodes.size();
  if (NumValidNodes != NumOtherValidNodes) {
    Match = false;
    dbgs() << "Number of blocks mismatch: " << NumValidNodes << " vs "
           << NumOtherValidNodes << "\n";
  } else {
    for (auto &Entry : ValidNodes) {
      const BlockT *BB = Entry.first;
      BlockNode Node = Entry.second;
      if (OtherValidNodes.count(BB)) {
        BlockNode OtherNode = OtherValidNodes[BB];
        const auto &Freq = Freqs[Node.Index];
        const auto &OtherFreq = Other.Freqs[OtherNode.Index];
        if (Freq.Integer != OtherFreq.Integer) {
          Match = false;
          dbgs() << "Freq mismatch: " << bfi_detail::getBlockName(BB) << " "
                 << Freq.Integer << " vs " << OtherFreq.Integer << "\n";
        }
      } else {
        Match = false;
        dbgs() << "Block " << bfi_detail::getBlockName(BB) << " index "
               << Node.Index << " does not exist in Other.\n";
      }
    }
    // If there's a valid node in OtherValidNodes that's not in ValidNodes,
    // either the above num check or the check on OtherValidNodes will fail.
  }
  if (!Match) {
    dbgs() << "This\n";
    print(dbgs());
    dbgs() << "Other\n";
    Other.print(dbgs());
  }
  assert(Match && "BFI mismatch");
}

// Graph trait base class for block frequency information graph
// viewer.

enum GVDAGType { GVDT_None, GVDT_Fraction, GVDT_Integer, GVDT_Count };

template <class BlockFrequencyInfoT, class BranchProbabilityInfoT>
struct BFIDOTGraphTraitsBase : public DefaultDOTGraphTraits {
  using GTraits = GraphTraits<BlockFrequencyInfoT *>;
  using NodeRef = typename GTraits::NodeRef;
  using EdgeIter = typename GTraits::ChildIteratorType;
  using NodeIter = typename GTraits::nodes_iterator;

  uint64_t MaxFrequency = 0;

  explicit BFIDOTGraphTraitsBase(bool isSimple = false)
      : DefaultDOTGraphTraits(isSimple) {}

  static StringRef getGraphName(const BlockFrequencyInfoT *G) {
    return G->getFunction()->getName();
  }

  std::string getNodeAttributes(NodeRef Node, const BlockFrequencyInfoT *Graph,
                                unsigned HotPercentThreshold = 0) {
    std::string Result;
    if (!HotPercentThreshold)
      return Result;

    // Compute MaxFrequency on the fly:
    if (!MaxFrequency) {
      for (NodeIter I = GTraits::nodes_begin(Graph),
                    E = GTraits::nodes_end(Graph);
           I != E; ++I) {
        NodeRef N = *I;
        MaxFrequency =
            std::max(MaxFrequency, Graph->getBlockFreq(N).getFrequency());
      }
    }
    BlockFrequency Freq = Graph->getBlockFreq(Node);
    BlockFrequency HotFreq =
        (BlockFrequency(MaxFrequency) *
         BranchProbability::getBranchProbability(HotPercentThreshold, 100));

    if (Freq < HotFreq)
      return Result;

    raw_string_ostream OS(Result);
    OS << "color=\"red\"";
    OS.flush();
    return Result;
  }

  std::string getNodeLabel(NodeRef Node, const BlockFrequencyInfoT *Graph,
                           GVDAGType GType, int layout_order = -1) {
    std::string Result;
    raw_string_ostream OS(Result);

    if (layout_order != -1)
      OS << Node->getName() << "[" << layout_order << "] : ";
    else
      OS << Node->getName() << " : ";
    switch (GType) {
    case GVDT_Fraction:
      Graph->printBlockFreq(OS, Node);
      break;
    case GVDT_Integer:
      OS << Graph->getBlockFreq(Node).getFrequency();
      break;
    case GVDT_Count: {
      auto Count = Graph->getBlockProfileCount(Node);
      if (Count)
        OS << *Count;
      else
        OS << "Unknown";
      break;
    }
    case GVDT_None:
      llvm_unreachable("If we are not supposed to render a graph we should "
                       "never reach this point.");
    }
    return Result;
  }

  std::string getEdgeAttributes(NodeRef Node, EdgeIter EI,
                                const BlockFrequencyInfoT *BFI,
                                const BranchProbabilityInfoT *BPI,
                                unsigned HotPercentThreshold = 0) {
    std::string Str;
    if (!BPI)
      return Str;

    BranchProbability BP = BPI->getEdgeProbability(Node, EI);
    uint32_t N = BP.getNumerator();
    uint32_t D = BP.getDenominator();
    double Percent = 100.0 * N / D;
    raw_string_ostream OS(Str);
    OS << format("label=\"%.1f%%\"", Percent);

    if (HotPercentThreshold) {
      BlockFrequency EFreq = BFI->getBlockFreq(Node) * BP;
      BlockFrequency HotFreq = BlockFrequency(MaxFrequency) *
                               BranchProbability(HotPercentThreshold, 100);

      if (EFreq >= HotFreq) {
        OS << ",color=\"red\"";
      }
    }

    OS.flush();
    return Str;
  }
};

} // end namespace llvm

#undef DEBUG_TYPE

#endif // LLVM_ANALYSIS_BLOCKFREQUENCYINFOIMPL_H
PKiwFZ`�I��Analysis/CodeMetrics.hnu�[���//===- CodeMetrics.h - Code cost measurements -------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements various weight measurements for code, helping
// the Inliner and other passes decide whether to duplicate its contents.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_CODEMETRICS_H
#define LLVM_ANALYSIS_CODEMETRICS_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/Support/InstructionCost.h"

namespace llvm {
class AssumptionCache;
class BasicBlock;
class Loop;
class Function;
template <class T> class SmallPtrSetImpl;
class TargetTransformInfo;
class Value;

/// Utility to calculate the size and a few similar metrics for a set
/// of basic blocks.
struct CodeMetrics {
  /// True if this function contains a call to setjmp or other functions
  /// with attribute "returns twice" without having the attribute itself.
  bool exposesReturnsTwice = false;

  /// True if this function calls itself.
  bool isRecursive = false;

  /// True if this function cannot be duplicated.
  ///
  /// True if this function contains one or more indirect branches, or it contains
  /// one or more 'noduplicate' instructions.
  bool notDuplicatable = false;

  /// True if this function contains a call to a convergent function.
  bool convergent = false;

  /// True if this function calls alloca (in the C sense).
  bool usesDynamicAlloca = false;

  /// Code size cost of the analyzed blocks.
  InstructionCost NumInsts = 0;

  /// Number of analyzed blocks.
  unsigned NumBlocks = false;

  /// Keeps track of basic block code size estimates.
  DenseMap<const BasicBlock *, InstructionCost> NumBBInsts;

  /// Keep track of the number of calls to 'big' functions.
  unsigned NumCalls = false;

  /// The number of calls to internal functions with a single caller.
  ///
  /// These are likely targets for future inlining, likely exposed by
  /// interleaved devirtualization.
  unsigned NumInlineCandidates = 0;

  /// How many instructions produce vector values.
  ///
  /// The inliner is more aggressive with inlining vector kernels.
  unsigned NumVectorInsts = 0;

  /// How many 'ret' instructions the blocks contain.
  unsigned NumRets = 0;

  /// Add information about a block to the current state.
  void analyzeBasicBlock(const BasicBlock *BB, const TargetTransformInfo &TTI,
                         const SmallPtrSetImpl<const Value *> &EphValues,
                         bool PrepareForLTO = false);

  /// Collect a loop's ephemeral values (those used only by an assume
  /// or similar intrinsics in the loop).
  static void collectEphemeralValues(const Loop *L, AssumptionCache *AC,
                                     SmallPtrSetImpl<const Value *> &EphValues);

  /// Collect a functions's ephemeral values (those used only by an
  /// assume or similar intrinsics in the function).
  static void collectEphemeralValues(const Function *L, AssumptionCache *AC,
                                     SmallPtrSetImpl<const Value *> &EphValues);
};

}

#endif
PKiwFZG�xc�	�	Analysis/MLModelRunner.hnu�[���//===- MLModelRunner.h ---- ML model runner interface -----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//

#ifndef LLVM_ANALYSIS_MLMODELRUNNER_H
#define LLVM_ANALYSIS_MLMODELRUNNER_H

#include "llvm/Analysis/TensorSpec.h"
#include "llvm/IR/PassManager.h"

namespace llvm {
class LLVMContext;

/// MLModelRunner interface: abstraction of a mechanism for evaluating a
/// tensorflow "saved model".
/// NOTE: feature indices are expected to be consistent all accross
/// MLModelRunners (pertaining to the same model), and also Loggers (see
/// TFUtils.h)
class MLModelRunner {
public:
  // Disallows copy and assign.
  MLModelRunner(const MLModelRunner &) = delete;
  MLModelRunner &operator=(const MLModelRunner &) = delete;
  virtual ~MLModelRunner() = default;

  template <typename T> T evaluate() {
    return *reinterpret_cast<T *>(evaluateUntyped());
  }

  template <typename T, typename I> T *getTensor(I FeatureID) {
    return reinterpret_cast<T *>(
        getTensorUntyped(static_cast<size_t>(FeatureID)));
  }

  template <typename T, typename I> const T *getTensor(I FeatureID) const {
    return reinterpret_cast<const T *>(
        getTensorUntyped(static_cast<size_t>(FeatureID)));
  }

  void *getTensorUntyped(size_t Index) { return InputBuffers[Index]; }
  const void *getTensorUntyped(size_t Index) const {
    return (const_cast<MLModelRunner *>(this))->getTensorUntyped(Index);
  }

  enum class Kind : int { Unknown, Release, Development, NoOp, Interactive };
  Kind getKind() const { return Type; }
  virtual void switchContext(StringRef Name) {}

protected:
  MLModelRunner(LLVMContext &Ctx, Kind Type, size_t NrInputs)
      : Ctx(Ctx), Type(Type), InputBuffers(NrInputs) {
    assert(Type != Kind::Unknown);
  }
  virtual void *evaluateUntyped() = 0;

  void setUpBufferForTensor(size_t Index, const TensorSpec &Spec,
                            void *Buffer) {
    if (!Buffer) {
      OwnedBuffers.emplace_back(Spec.getTotalTensorBufferSize());
      Buffer = OwnedBuffers.back().data();
    }
    InputBuffers[Index] = Buffer;
  }

  LLVMContext &Ctx;
  const Kind Type;

private:
  std::vector<void *> InputBuffers;
  std::vector<std::vector<char *>> OwnedBuffers;
};
} // namespace llvm

#endif // LLVM_ANALYSIS_MLMODELRUNNER_H
PKiwFZ5Ԛl�� Analysis/ModuleSummaryAnalysis.hnu�[���//===- ModuleSummaryAnalysis.h - Module summary index builder ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// This is the interface to build a ModuleSummaryIndex for a module.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_MODULESUMMARYANALYSIS_H
#define LLVM_ANALYSIS_MODULESUMMARYANALYSIS_H

#include "llvm/IR/ModuleSummaryIndex.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Pass.h"
#include <functional>
#include <optional>

namespace llvm {

class BlockFrequencyInfo;
class Function;
class Module;
class ProfileSummaryInfo;
class StackSafetyInfo;

/// Direct function to compute a \c ModuleSummaryIndex from a given module.
///
/// If operating within a pass manager which has defined ways to compute the \c
/// BlockFrequencyInfo for a given function, that can be provided via
/// a std::function callback. Otherwise, this routine will manually construct
/// that information.
ModuleSummaryIndex buildModuleSummaryIndex(
    const Module &M,
    std::function<BlockFrequencyInfo *(const Function &F)> GetBFICallback,
    ProfileSummaryInfo *PSI,
    std::function<const StackSafetyInfo *(const Function &F)> GetSSICallback =
        [](const Function &F) -> const StackSafetyInfo * { return nullptr; });

/// Analysis pass to provide the ModuleSummaryIndex object.
class ModuleSummaryIndexAnalysis
    : public AnalysisInfoMixin<ModuleSummaryIndexAnalysis> {
  friend AnalysisInfoMixin<ModuleSummaryIndexAnalysis>;

  static AnalysisKey Key;

public:
  using Result = ModuleSummaryIndex;

  Result run(Module &M, ModuleAnalysisManager &AM);
};

/// Legacy wrapper pass to provide the ModuleSummaryIndex object.
class ModuleSummaryIndexWrapperPass : public ModulePass {
  std::optional<ModuleSummaryIndex> Index;

public:
  static char ID;

  ModuleSummaryIndexWrapperPass();

  /// Get the index built by pass
  ModuleSummaryIndex &getIndex() { return *Index; }
  const ModuleSummaryIndex &getIndex() const { return *Index; }

  bool runOnModule(Module &M) override;
  bool doFinalization(Module &M) override;
  void getAnalysisUsage(AnalysisUsage &AU) const override;
};

//===--------------------------------------------------------------------===//
//
// createModuleSummaryIndexWrapperPass - This pass builds a ModuleSummaryIndex
// object for the module, to be written to bitcode or LLVM assembly.
//
ModulePass *createModuleSummaryIndexWrapperPass();

/// Legacy wrapper pass to provide the ModuleSummaryIndex object.
class ImmutableModuleSummaryIndexWrapperPass : public ImmutablePass {
  const ModuleSummaryIndex *Index;

public:
  static char ID;

  ImmutableModuleSummaryIndexWrapperPass(
      const ModuleSummaryIndex *Index = nullptr);
  const ModuleSummaryIndex *getIndex() const { return Index; }
  void getAnalysisUsage(AnalysisUsage &AU) const override;
};

//===--------------------------------------------------------------------===//
//
// ImmutableModuleSummaryIndexWrapperPass - This pass wrap provided
// ModuleSummaryIndex object for the module, to be used by other passes.
//
ImmutablePass *
createImmutableModuleSummaryIndexWrapperPass(const ModuleSummaryIndex *Index);

/// Returns true if the instruction could have memprof metadata, used to ensure
/// consistency between summary analysis and the ThinLTO backend processing.
bool mayHaveMemprofSummary(const CallBase *CB);

} // end namespace llvm

#endif // LLVM_ANALYSIS_MODULESUMMARYANALYSIS_H
PKiwFZ?zYX6�6�Analysis/VecFuncs.defnu�[���//===-- VecFuncs.def - Library information -------------*- C++ -*-----------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

// This .def file will create mappings from scalar math functions to vector
// functions along with their vectorization factor. The current support includes
// such mappings for Accelerate framework, MASS vector library, and SVML library.
// This .def file also allows creating an array of vector functions supported in
// the specified framework or library.

#if defined(TLI_DEFINE_MASSV_VECFUNCS_NAMES)
#define TLI_DEFINE_MASSV_VECFUNCS
#define TLI_DEFINE_VECFUNC(SCAL, VEC, VF) VEC,
#endif

#define FIXED(NL) ElementCount::getFixed(NL)
#define SCALABLE(NL) ElementCount::getScalable(NL)
#define NOMASK false
#define MASKED true

#if !(defined(TLI_DEFINE_VECFUNC))
#define TLI_DEFINE_VECFUNC(SCAL, VEC, VF) {SCAL, VEC, VF, NOMASK},
#endif

#if defined(TLI_DEFINE_ACCELERATE_VECFUNCS)
// Accelerate framework's Vector Functions

// Floating-Point Arithmetic and Auxiliary Functions
TLI_DEFINE_VECFUNC("ceilf", "vceilf", FIXED(4))
TLI_DEFINE_VECFUNC("fabsf", "vfabsf", FIXED(4))
TLI_DEFINE_VECFUNC("llvm.fabs.f32", "vfabsf", FIXED(4))
TLI_DEFINE_VECFUNC("floorf", "vfloorf", FIXED(4))
TLI_DEFINE_VECFUNC("sqrtf", "vsqrtf", FIXED(4))
TLI_DEFINE_VECFUNC("llvm.sqrt.f32", "vsqrtf", FIXED(4))

// Exponential and Logarithmic Functions
TLI_DEFINE_VECFUNC("expf", "vexpf", FIXED(4))
TLI_DEFINE_VECFUNC("llvm.exp.f32", "vexpf", FIXED(4))
TLI_DEFINE_VECFUNC("expm1f", "vexpm1f", FIXED(4))
TLI_DEFINE_VECFUNC("logf", "vlogf", FIXED(4))
TLI_DEFINE_VECFUNC("llvm.log.f32", "vlogf", FIXED(4))
TLI_DEFINE_VECFUNC("log1pf", "vlog1pf", FIXED(4))
TLI_DEFINE_VECFUNC("log10f", "vlog10f", FIXED(4))
TLI_DEFINE_VECFUNC("llvm.log10.f32", "vlog10f", FIXED(4))
TLI_DEFINE_VECFUNC("logbf", "vlogbf", FIXED(4))

// Trigonometric Functions
TLI_DEFINE_VECFUNC("sinf", "vsinf", FIXED(4))
TLI_DEFINE_VECFUNC("llvm.sin.f32", "vsinf", FIXED(4))
TLI_DEFINE_VECFUNC("cosf", "vcosf", FIXED(4))
TLI_DEFINE_VECFUNC("llvm.cos.f32", "vcosf", FIXED(4))
TLI_DEFINE_VECFUNC("tanf", "vtanf", FIXED(4))
TLI_DEFINE_VECFUNC("asinf", "vasinf", FIXED(4))
TLI_DEFINE_VECFUNC("acosf", "vacosf", FIXED(4))
TLI_DEFINE_VECFUNC("atanf", "vatanf", FIXED(4))

// Hyperbolic Functions
TLI_DEFINE_VECFUNC("sinhf", "vsinhf", FIXED(4))
TLI_DEFINE_VECFUNC("coshf", "vcoshf", FIXED(4))
TLI_DEFINE_VECFUNC("tanhf", "vtanhf", FIXED(4))
TLI_DEFINE_VECFUNC("asinhf", "vasinhf", FIXED(4))
TLI_DEFINE_VECFUNC("acoshf", "vacoshf", FIXED(4))
TLI_DEFINE_VECFUNC("atanhf", "vatanhf", FIXED(4))

#elif defined(TLI_DEFINE_DARWIN_LIBSYSTEM_M_VECFUNCS)
// Darwin libsystem_m vector functions.

// Exponential and Logarithmic Functions
TLI_DEFINE_VECFUNC("exp", "_simd_exp_d2", FIXED(2))
TLI_DEFINE_VECFUNC("llvm.exp.f64", "_simd_exp_d2", FIXED(2))
TLI_DEFINE_VECFUNC("expf", "_simd_exp_f4", FIXED(4))
TLI_DEFINE_VECFUNC("llvm.exp.f32", "_simd_exp_f4", FIXED(4))

// Trigonometric Functions
TLI_DEFINE_VECFUNC("acos", "_simd_acos_d2", FIXED(2))
TLI_DEFINE_VECFUNC("acosf", "_simd_acos_f4", FIXED(4))
TLI_DEFINE_VECFUNC("asin", "_simd_asin_d2", FIXED(2))
TLI_DEFINE_VECFUNC("asinf", "_simd_asin_f4", FIXED(4))

TLI_DEFINE_VECFUNC("atan", "_simd_atan_d2", FIXED(2))
TLI_DEFINE_VECFUNC("atanf", "_simd_atan_f4", FIXED(4))
TLI_DEFINE_VECFUNC("atan2", "_simd_atan2_d2", FIXED(2))
TLI_DEFINE_VECFUNC("atan2f", "_simd_atan2_f4", FIXED(4))

TLI_DEFINE_VECFUNC("cos", "_simd_cos_d2", FIXED(2))
TLI_DEFINE_VECFUNC("llvm.cos.f64", "_simd_cos_d2", FIXED(2))
TLI_DEFINE_VECFUNC("cosf", "_simd_cos_f4", FIXED(4))
TLI_DEFINE_VECFUNC("llvm.cos.f32", "_simd_cos_f4", FIXED(4))

TLI_DEFINE_VECFUNC("sin", "_simd_sin_d2", FIXED(2))
TLI_DEFINE_VECFUNC("llvm.sin.f64", "_simd_sin_d2", FIXED(2))
TLI_DEFINE_VECFUNC("sinf", "_simd_sin_f4", FIXED(4))
TLI_DEFINE_VECFUNC("llvm.sin.f32", "_simd_sin_f4", FIXED(4))

// Floating-Point Arithmetic and Auxiliary Functions
TLI_DEFINE_VECFUNC("cbrt", "_simd_cbrt_d2", FIXED(2))
TLI_DEFINE_VECFUNC("cbrtf", "_simd_cbrt_f4", FIXED(4))
TLI_DEFINE_VECFUNC("erf", "_simd_erf_d2", FIXED(2))
TLI_DEFINE_VECFUNC("erff", "_simd_erf_f4", FIXED(4))
TLI_DEFINE_VECFUNC("pow", "_simd_pow_d2", FIXED(2))
TLI_DEFINE_VECFUNC("llvm.pow.f64", "_simd_pow_d2", FIXED(2))
TLI_DEFINE_VECFUNC("powf", "_simd_pow_f4", FIXED(4))
TLI_DEFINE_VECFUNC("llvm.pow.f32", "_simd_pow_f4", FIXED(4))

// Hyperbolic Functions
TLI_DEFINE_VECFUNC("sinh", "_simd_sinh_d2", FIXED(2))
TLI_DEFINE_VECFUNC("sinhf", "_simd_sinh_f4", FIXED(4))
TLI_DEFINE_VECFUNC("cosh", "_simd_cosh_d2", FIXED(2))
TLI_DEFINE_VECFUNC("coshf", "_simd_cosh_f4", FIXED(4))
TLI_DEFINE_VECFUNC("tanh", "_simd_tanh_d2", FIXED(2))
TLI_DEFINE_VECFUNC("tanhf", "_simd_tanh_f4", FIXED(4))
TLI_DEFINE_VECFUNC("asinh", "_simd_asinh_d2", FIXED(2))
TLI_DEFINE_VECFUNC("asinhf", "_simd_asinh_f4", FIXED(4))
TLI_DEFINE_VECFUNC("acosh", "_simd_acosh_d2", FIXED(2))
TLI_DEFINE_VECFUNC("acoshf", "_simd_acosh_f4", FIXED(4))
TLI_DEFINE_VECFUNC("atanh", "_simd_atanh_d2", FIXED(2))
TLI_DEFINE_VECFUNC("atanhf", "_simd_atanh_f4", FIXED(4))

#elif defined(TLI_DEFINE_LIBMVEC_X86_VECFUNCS)
// GLIBC Vector math Functions

TLI_DEFINE_VECFUNC("sin", "_ZGVbN2v_sin", FIXED(2))
TLI_DEFINE_VECFUNC("sin", "_ZGVdN4v_sin", FIXED(4))

TLI_DEFINE_VECFUNC("sinf", "_ZGVbN4v_sinf", FIXED(4))
TLI_DEFINE_VECFUNC("sinf", "_ZGVdN8v_sinf", FIXED(8))

TLI_DEFINE_VECFUNC("llvm.sin.f64", "_ZGVbN2v_sin", FIXED(2))
TLI_DEFINE_VECFUNC("llvm.sin.f64", "_ZGVdN4v_sin", FIXED(4))

TLI_DEFINE_VECFUNC("llvm.sin.f32", "_ZGVbN4v_sinf", FIXED(4))
TLI_DEFINE_VECFUNC("llvm.sin.f32", "_ZGVdN8v_sinf", FIXED(8))

TLI_DEFINE_VECFUNC("cos", "_ZGVbN2v_cos", FIXED(2))
TLI_DEFINE_VECFUNC("cos", "_ZGVdN4v_cos", FIXED(4))

TLI_DEFINE_VECFUNC("cosf", "_ZGVbN4v_cosf", FIXED(4))
TLI_DEFINE_VECFUNC("cosf", "_ZGVdN8v_cosf", FIXED(8))

TLI_DEFINE_VECFUNC("llvm.cos.f64", "_ZGVbN2v_cos", FIXED(2))
TLI_DEFINE_VECFUNC("llvm.cos.f64", "_ZGVdN4v_cos", FIXED(4))

TLI_DEFINE_VECFUNC("llvm.cos.f32", "_ZGVbN4v_cosf", FIXED(4))
TLI_DEFINE_VECFUNC("llvm.cos.f32", "_ZGVdN8v_cosf", FIXED(8))

TLI_DEFINE_VECFUNC("pow", "_ZGVbN2vv_pow", FIXED(2))
TLI_DEFINE_VECFUNC("pow", "_ZGVdN4vv_pow", FIXED(4))

TLI_DEFINE_VECFUNC("powf", "_ZGVbN4vv_powf", FIXED(4))
TLI_DEFINE_VECFUNC("powf", "_ZGVdN8vv_powf", FIXED(8))

TLI_DEFINE_VECFUNC("__pow_finite", "_ZGVbN2vv___pow_finite", FIXED(2))
TLI_DEFINE_VECFUNC("__pow_finite", "_ZGVdN4vv___pow_finite", FIXED(4))

TLI_DEFINE_VECFUNC("__powf_finite", "_ZGVbN4vv___powf_finite", FIXED(4))
TLI_DEFINE_VECFUNC("__powf_finite", "_ZGVdN8vv___powf_finite", FIXED(8))

TLI_DEFINE_VECFUNC("llvm.pow.f64", "_ZGVbN2vv_pow", FIXED(2))
TLI_DEFINE_VECFUNC("llvm.pow.f64", "_ZGVdN4vv_pow", FIXED(4))

TLI_DEFINE_VECFUNC("llvm.pow.f32", "_ZGVbN4vv_powf", FIXED(4))
TLI_DEFINE_VECFUNC("llvm.pow.f32", "_ZGVdN8vv_powf", FIXED(8))

TLI_DEFINE_VECFUNC("exp", "_ZGVbN2v_exp", FIXED(2))
TLI_DEFINE_VECFUNC("exp", "_ZGVdN4v_exp", FIXED(4))

TLI_DEFINE_VECFUNC("expf", "_ZGVbN4v_expf", FIXED(4))
TLI_DEFINE_VECFUNC("expf", "_ZGVdN8v_expf", FIXED(8))

TLI_DEFINE_VECFUNC("__exp_finite", "_ZGVbN2v___exp_finite", FIXED(2))
TLI_DEFINE_VECFUNC("__exp_finite", "_ZGVdN4v___exp_finite", FIXED(4))

TLI_DEFINE_VECFUNC("__expf_finite", "_ZGVbN4v___expf_finite", FIXED(4))
TLI_DEFINE_VECFUNC("__expf_finite", "_ZGVdN8v___expf_finite", FIXED(8))

TLI_DEFINE_VECFUNC("llvm.exp.f64", "_ZGVbN2v_exp", FIXED(2))
TLI_DEFINE_VECFUNC("llvm.exp.f64", "_ZGVdN4v_exp", FIXED(4))

TLI_DEFINE_VECFUNC("llvm.exp.f32", "_ZGVbN4v_expf", FIXED(4))
TLI_DEFINE_VECFUNC("llvm.exp.f32", "_ZGVdN8v_expf", FIXED(8))

TLI_DEFINE_VECFUNC("log", "_ZGVbN2v_log", FIXED(2))
TLI_DEFINE_VECFUNC("log", "_ZGVdN4v_log", FIXED(4))

TLI_DEFINE_VECFUNC("logf", "_ZGVbN4v_logf", FIXED(4))
TLI_DEFINE_VECFUNC("logf", "_ZGVdN8v_logf", FIXED(8))

TLI_DEFINE_VECFUNC("__log_finite", "_ZGVbN2v___log_finite", FIXED(2))
TLI_DEFINE_VECFUNC("__log_finite", "_ZGVdN4v___log_finite", FIXED(4))

TLI_DEFINE_VECFUNC("__logf_finite", "_ZGVbN4v___logf_finite", FIXED(4))
TLI_DEFINE_VECFUNC("__logf_finite", "_ZGVdN8v___logf_finite", FIXED(8))

TLI_DEFINE_VECFUNC("llvm.log.f64", "_ZGVbN2v_log", FIXED(2))
TLI_DEFINE_VECFUNC("llvm.log.f64", "_ZGVdN4v_log", FIXED(4))

TLI_DEFINE_VECFUNC("llvm.log.f32", "_ZGVbN4v_logf", FIXED(4))
TLI_DEFINE_VECFUNC("llvm.log.f32", "_ZGVdN8v_logf", FIXED(8))

#elif defined(TLI_DEFINE_MASSV_VECFUNCS)
// IBM MASS library's vector Functions

// Floating-Point Arithmetic and Auxiliary Functions
TLI_DEFINE_VECFUNC("cbrt", "__cbrtd2", FIXED(2))
TLI_DEFINE_VECFUNC("cbrtf", "__cbrtf4", FIXED(4))
TLI_DEFINE_VECFUNC("pow", "__powd2", FIXED(2))
TLI_DEFINE_VECFUNC("llvm.pow.f64", "__powd2", FIXED(2))
TLI_DEFINE_VECFUNC("powf", "__powf4", FIXED(4))
TLI_DEFINE_VECFUNC("llvm.pow.f32", "__powf4", FIXED(4))

// Exponential and Logarithmic Functions
TLI_DEFINE_VECFUNC("exp", "__expd2", FIXED(2))
TLI_DEFINE_VECFUNC("llvm.exp.f64", "__expd2", FIXED(2))
TLI_DEFINE_VECFUNC("expf", "__expf4", FIXED(4))
TLI_DEFINE_VECFUNC("llvm.exp.f32", "__expf4", FIXED(4))
TLI_DEFINE_VECFUNC("exp2", "__exp2d2", FIXED(2))
TLI_DEFINE_VECFUNC("llvm.exp2.f64", "__exp2d2", FIXED(2))
TLI_DEFINE_VECFUNC("exp2f", "__exp2f4", FIXED(4))
TLI_DEFINE_VECFUNC("llvm.exp2.f32", "__exp2f4", FIXED(4))
TLI_DEFINE_VECFUNC("expm1", "__expm1d2", FIXED(2))
TLI_DEFINE_VECFUNC("expm1f", "__expm1f4", FIXED(4))
TLI_DEFINE_VECFUNC("log", "__logd2", FIXED(2))
TLI_DEFINE_VECFUNC("llvm.log.f64", "__logd2", FIXED(2))
TLI_DEFINE_VECFUNC("logf", "__logf4", FIXED(4))
TLI_DEFINE_VECFUNC("llvm.log.f32", "__logf4", FIXED(4))
TLI_DEFINE_VECFUNC("log1p", "__log1pd2", FIXED(2))
TLI_DEFINE_VECFUNC("log1pf", "__log1pf4", FIXED(4))
TLI_DEFINE_VECFUNC("log10", "__log10d2", FIXED(2))
TLI_DEFINE_VECFUNC("llvm.log10.f64", "__log10d2", FIXED(2))
TLI_DEFINE_VECFUNC("log10f", "__log10f4", FIXED(4))
TLI_DEFINE_VECFUNC("llvm.log10.f32", "__log10f4", FIXED(4))
TLI_DEFINE_VECFUNC("log2", "__log2d2", FIXED(2))
TLI_DEFINE_VECFUNC("llvm.log2.f64", "__log2d2", FIXED(2))
TLI_DEFINE_VECFUNC("log2f", "__log2f4", FIXED(4))
TLI_DEFINE_VECFUNC("llvm.log2.f32", "__log2f4", FIXED(4))

// Trigonometric Functions
TLI_DEFINE_VECFUNC("sin", "__sind2", FIXED(2))
TLI_DEFINE_VECFUNC("llvm.sin.f64", "__sind2", FIXED(2))
TLI_DEFINE_VECFUNC("sinf", "__sinf4", FIXED(4))
TLI_DEFINE_VECFUNC("llvm.sin.f32", "__sinf4", FIXED(4))
TLI_DEFINE_VECFUNC("cos", "__cosd2", FIXED(2))
TLI_DEFINE_VECFUNC("llvm.cos.f64", "__cosd2", FIXED(2))
TLI_DEFINE_VECFUNC("cosf", "__cosf4", FIXED(4))
TLI_DEFINE_VECFUNC("llvm.cos.f32", "__cosf4", FIXED(4))
TLI_DEFINE_VECFUNC("tan", "__tand2", FIXED(2))
TLI_DEFINE_VECFUNC("tanf", "__tanf4", FIXED(4))
TLI_DEFINE_VECFUNC("asin", "__asind2", FIXED(2))
TLI_DEFINE_VECFUNC("asinf", "__asinf4", FIXED(4))
TLI_DEFINE_VECFUNC("acos", "__acosd2", FIXED(2))
TLI_DEFINE_VECFUNC("acosf", "__acosf4", FIXED(4))
TLI_DEFINE_VECFUNC("atan", "__atand2", FIXED(2))
TLI_DEFINE_VECFUNC("atanf", "__atanf4", FIXED(4))
TLI_DEFINE_VECFUNC("atan2", "__atan2d2", FIXED(2))
TLI_DEFINE_VECFUNC("atan2f", "__atan2f4", FIXED(4))

// Hyperbolic Functions
TLI_DEFINE_VECFUNC("sinh", "__sinhd2", FIXED(2))
TLI_DEFINE_VECFUNC("sinhf", "__sinhf4", FIXED(4))
TLI_DEFINE_VECFUNC("cosh", "__coshd2", FIXED(2))
TLI_DEFINE_VECFUNC("coshf", "__coshf4", FIXED(4))
TLI_DEFINE_VECFUNC("tanh", "__tanhd2", FIXED(2))
TLI_DEFINE_VECFUNC("tanhf", "__tanhf4", FIXED(4))
TLI_DEFINE_VECFUNC("asinh", "__asinhd2", FIXED(2))
TLI_DEFINE_VECFUNC("asinhf", "__asinhf4", FIXED(4))
TLI_DEFINE_VECFUNC("acosh", "__acoshd2", FIXED(2))
TLI_DEFINE_VECFUNC("acoshf", "__acoshf4", FIXED(4))
TLI_DEFINE_VECFUNC("atanh", "__atanhd2", FIXED(2))
TLI_DEFINE_VECFUNC("atanhf", "__atanhf4", FIXED(4))


#elif defined(TLI_DEFINE_SVML_VECFUNCS)
// Intel SVM library's Vector Functions

TLI_DEFINE_VECFUNC("sin", "__svml_sin2", FIXED(2))
TLI_DEFINE_VECFUNC("sin", "__svml_sin4", FIXED(4))
TLI_DEFINE_VECFUNC("sin", "__svml_sin8", FIXED(8))

TLI_DEFINE_VECFUNC("sinf", "__svml_sinf4", FIXED(4))
TLI_DEFINE_VECFUNC("sinf", "__svml_sinf8", FIXED(8))
TLI_DEFINE_VECFUNC("sinf", "__svml_sinf16", FIXED(16))

TLI_DEFINE_VECFUNC("llvm.sin.f64", "__svml_sin2", FIXED(2))
TLI_DEFINE_VECFUNC("llvm.sin.f64", "__svml_sin4", FIXED(4))
TLI_DEFINE_VECFUNC("llvm.sin.f64", "__svml_sin8", FIXED(8))

TLI_DEFINE_VECFUNC("llvm.sin.f32", "__svml_sinf4", FIXED(4))
TLI_DEFINE_VECFUNC("llvm.sin.f32", "__svml_sinf8", FIXED(8))
TLI_DEFINE_VECFUNC("llvm.sin.f32", "__svml_sinf16", FIXED(16))

TLI_DEFINE_VECFUNC("cos", "__svml_cos2", FIXED(2))
TLI_DEFINE_VECFUNC("cos", "__svml_cos4", FIXED(4))
TLI_DEFINE_VECFUNC("cos", "__svml_cos8", FIXED(8))

TLI_DEFINE_VECFUNC("cosf", "__svml_cosf4", FIXED(4))
TLI_DEFINE_VECFUNC("cosf", "__svml_cosf8", FIXED(8))
TLI_DEFINE_VECFUNC("cosf", "__svml_cosf16", FIXED(16))

TLI_DEFINE_VECFUNC("llvm.cos.f64", "__svml_cos2", FIXED(2))
TLI_DEFINE_VECFUNC("llvm.cos.f64", "__svml_cos4", FIXED(4))
TLI_DEFINE_VECFUNC("llvm.cos.f64", "__svml_cos8", FIXED(8))

TLI_DEFINE_VECFUNC("llvm.cos.f32", "__svml_cosf4", FIXED(4))
TLI_DEFINE_VECFUNC("llvm.cos.f32", "__svml_cosf8", FIXED(8))
TLI_DEFINE_VECFUNC("llvm.cos.f32", "__svml_cosf16", FIXED(16))

TLI_DEFINE_VECFUNC("pow", "__svml_pow2", FIXED(2))
TLI_DEFINE_VECFUNC("pow", "__svml_pow4", FIXED(4))
TLI_DEFINE_VECFUNC("pow", "__svml_pow8", FIXED(8))

TLI_DEFINE_VECFUNC("powf", "__svml_powf4", FIXED(4))
TLI_DEFINE_VECFUNC("powf", "__svml_powf8", FIXED(8))
TLI_DEFINE_VECFUNC("powf", "__svml_powf16", FIXED(16))

TLI_DEFINE_VECFUNC("__pow_finite", "__svml_pow2", FIXED(2))
TLI_DEFINE_VECFUNC("__pow_finite", "__svml_pow4", FIXED(4))
TLI_DEFINE_VECFUNC("__pow_finite", "__svml_pow8", FIXED(8))

TLI_DEFINE_VECFUNC("__powf_finite", "__svml_powf4", FIXED(4))
TLI_DEFINE_VECFUNC("__powf_finite", "__svml_powf8", FIXED(8))
TLI_DEFINE_VECFUNC("__powf_finite", "__svml_powf16", FIXED(16))

TLI_DEFINE_VECFUNC("llvm.pow.f64", "__svml_pow2", FIXED(2))
TLI_DEFINE_VECFUNC("llvm.pow.f64", "__svml_pow4", FIXED(4))
TLI_DEFINE_VECFUNC("llvm.pow.f64", "__svml_pow8", FIXED(8))

TLI_DEFINE_VECFUNC("llvm.pow.f32", "__svml_powf4", FIXED(4))
TLI_DEFINE_VECFUNC("llvm.pow.f32", "__svml_powf8", FIXED(8))
TLI_DEFINE_VECFUNC("llvm.pow.f32", "__svml_powf16", FIXED(16))

TLI_DEFINE_VECFUNC("exp", "__svml_exp2", FIXED(2))
TLI_DEFINE_VECFUNC("exp", "__svml_exp4", FIXED(4))
TLI_DEFINE_VECFUNC("exp", "__svml_exp8", FIXED(8))

TLI_DEFINE_VECFUNC("expf", "__svml_expf4", FIXED(4))
TLI_DEFINE_VECFUNC("expf", "__svml_expf8", FIXED(8))
TLI_DEFINE_VECFUNC("expf", "__svml_expf16", FIXED(16))

TLI_DEFINE_VECFUNC("__exp_finite", "__svml_exp2", FIXED(2))
TLI_DEFINE_VECFUNC("__exp_finite", "__svml_exp4", FIXED(4))
TLI_DEFINE_VECFUNC("__exp_finite", "__svml_exp8", FIXED(8))

TLI_DEFINE_VECFUNC("__expf_finite", "__svml_expf4", FIXED(4))
TLI_DEFINE_VECFUNC("__expf_finite", "__svml_expf8", FIXED(8))
TLI_DEFINE_VECFUNC("__expf_finite", "__svml_expf16", FIXED(16))

TLI_DEFINE_VECFUNC("llvm.exp.f64", "__svml_exp2", FIXED(2))
TLI_DEFINE_VECFUNC("llvm.exp.f64", "__svml_exp4", FIXED(4))
TLI_DEFINE_VECFUNC("llvm.exp.f64", "__svml_exp8", FIXED(8))

TLI_DEFINE_VECFUNC("llvm.exp.f32", "__svml_expf4", FIXED(4))
TLI_DEFINE_VECFUNC("llvm.exp.f32", "__svml_expf8", FIXED(8))
TLI_DEFINE_VECFUNC("llvm.exp.f32", "__svml_expf16", FIXED(16))

TLI_DEFINE_VECFUNC("log", "__svml_log2", FIXED(2))
TLI_DEFINE_VECFUNC("log", "__svml_log4", FIXED(4))
TLI_DEFINE_VECFUNC("log", "__svml_log8", FIXED(8))

TLI_DEFINE_VECFUNC("logf", "__svml_logf4", FIXED(4))
TLI_DEFINE_VECFUNC("logf", "__svml_logf8", FIXED(8))
TLI_DEFINE_VECFUNC("logf", "__svml_logf16", FIXED(16))

TLI_DEFINE_VECFUNC("__log_finite", "__svml_log2", FIXED(2))
TLI_DEFINE_VECFUNC("__log_finite", "__svml_log4", FIXED(4))
TLI_DEFINE_VECFUNC("__log_finite", "__svml_log8", FIXED(8))

TLI_DEFINE_VECFUNC("__logf_finite", "__svml_logf4", FIXED(4))
TLI_DEFINE_VECFUNC("__logf_finite", "__svml_logf8", FIXED(8))
TLI_DEFINE_VECFUNC("__logf_finite", "__svml_logf16", FIXED(16))

TLI_DEFINE_VECFUNC("llvm.log.f64", "__svml_log2", FIXED(2))
TLI_DEFINE_VECFUNC("llvm.log.f64", "__svml_log4", FIXED(4))
TLI_DEFINE_VECFUNC("llvm.log.f64", "__svml_log8", FIXED(8))

TLI_DEFINE_VECFUNC("llvm.log.f32", "__svml_logf4", FIXED(4))
TLI_DEFINE_VECFUNC("llvm.log.f32", "__svml_logf8", FIXED(8))
TLI_DEFINE_VECFUNC("llvm.log.f32", "__svml_logf16", FIXED(16))

TLI_DEFINE_VECFUNC("log2", "__svml_log22", FIXED(2))
TLI_DEFINE_VECFUNC("log2", "__svml_log24", FIXED(4))
TLI_DEFINE_VECFUNC("log2", "__svml_log28", FIXED(8))

TLI_DEFINE_VECFUNC("log2f", "__svml_log2f4", FIXED(4))
TLI_DEFINE_VECFUNC("log2f", "__svml_log2f8", FIXED(8))
TLI_DEFINE_VECFUNC("log2f", "__svml_log2f16", FIXED(16))

TLI_DEFINE_VECFUNC("__log2_finite", "__svml_log22", FIXED(2))
TLI_DEFINE_VECFUNC("__log2_finite", "__svml_log24", FIXED(4))
TLI_DEFINE_VECFUNC("__log2_finite", "__svml_log28", FIXED(8))

TLI_DEFINE_VECFUNC("__log2f_finite", "__svml_log2f4", FIXED(4))
TLI_DEFINE_VECFUNC("__log2f_finite", "__svml_log2f8", FIXED(8))
TLI_DEFINE_VECFUNC("__log2f_finite", "__svml_log2f16", FIXED(16))

TLI_DEFINE_VECFUNC("llvm.log2.f64", "__svml_log22", FIXED(2))
TLI_DEFINE_VECFUNC("llvm.log2.f64", "__svml_log24", FIXED(4))
TLI_DEFINE_VECFUNC("llvm.log2.f64", "__svml_log28", FIXED(8))

TLI_DEFINE_VECFUNC("llvm.log2.f32", "__svml_log2f4", FIXED(4))
TLI_DEFINE_VECFUNC("llvm.log2.f32", "__svml_log2f8", FIXED(8))
TLI_DEFINE_VECFUNC("llvm.log2.f32", "__svml_log2f16", FIXED(16))

TLI_DEFINE_VECFUNC("log10", "__svml_log102", FIXED(2))
TLI_DEFINE_VECFUNC("log10", "__svml_log104", FIXED(4))
TLI_DEFINE_VECFUNC("log10", "__svml_log108", FIXED(8))

TLI_DEFINE_VECFUNC("log10f", "__svml_log10f4", FIXED(4))
TLI_DEFINE_VECFUNC("log10f", "__svml_log10f8", FIXED(8))
TLI_DEFINE_VECFUNC("log10f", "__svml_log10f16", FIXED(16))

TLI_DEFINE_VECFUNC("__log10_finite", "__svml_log102", FIXED(2))
TLI_DEFINE_VECFUNC("__log10_finite", "__svml_log104", FIXED(4))
TLI_DEFINE_VECFUNC("__log10_finite", "__svml_log108", FIXED(8))

TLI_DEFINE_VECFUNC("__log10f_finite", "__svml_log10f4", FIXED(4))
TLI_DEFINE_VECFUNC("__log10f_finite", "__svml_log10f8", FIXED(8))
TLI_DEFINE_VECFUNC("__log10f_finite", "__svml_log10f16", FIXED(16))

TLI_DEFINE_VECFUNC("llvm.log10.f64", "__svml_log102", FIXED(2))
TLI_DEFINE_VECFUNC("llvm.log10.f64", "__svml_log104", FIXED(4))
TLI_DEFINE_VECFUNC("llvm.log10.f64", "__svml_log108", FIXED(8))

TLI_DEFINE_VECFUNC("llvm.log10.f32", "__svml_log10f4", FIXED(4))
TLI_DEFINE_VECFUNC("llvm.log10.f32", "__svml_log10f8", FIXED(8))
TLI_DEFINE_VECFUNC("llvm.log10.f32", "__svml_log10f16", FIXED(16))

TLI_DEFINE_VECFUNC("sqrt", "__svml_sqrt2", FIXED(2))
TLI_DEFINE_VECFUNC("sqrt", "__svml_sqrt4", FIXED(4))
TLI_DEFINE_VECFUNC("sqrt", "__svml_sqrt8", FIXED(8))

TLI_DEFINE_VECFUNC("sqrtf", "__svml_sqrtf4", FIXED(4))
TLI_DEFINE_VECFUNC("sqrtf", "__svml_sqrtf8", FIXED(8))
TLI_DEFINE_VECFUNC("sqrtf", "__svml_sqrtf16", FIXED(16))

TLI_DEFINE_VECFUNC("__sqrt_finite", "__svml_sqrt2", FIXED(2))
TLI_DEFINE_VECFUNC("__sqrt_finite", "__svml_sqrt4", FIXED(4))
TLI_DEFINE_VECFUNC("__sqrt_finite", "__svml_sqrt8", FIXED(8))

TLI_DEFINE_VECFUNC("__sqrtf_finite", "__svml_sqrtf4", FIXED(4))
TLI_DEFINE_VECFUNC("__sqrtf_finite", "__svml_sqrtf8", FIXED(8))
TLI_DEFINE_VECFUNC("__sqrtf_finite", "__svml_sqrtf16", FIXED(16))

TLI_DEFINE_VECFUNC("exp2", "__svml_exp22", FIXED(2))
TLI_DEFINE_VECFUNC("exp2", "__svml_exp24", FIXED(4))
TLI_DEFINE_VECFUNC("exp2", "__svml_exp28", FIXED(8))

TLI_DEFINE_VECFUNC("exp2f", "__svml_exp2f4", FIXED(4))
TLI_DEFINE_VECFUNC("exp2f", "__svml_exp2f8", FIXED(8))
TLI_DEFINE_VECFUNC("exp2f", "__svml_exp2f16", FIXED(16))

TLI_DEFINE_VECFUNC("llvm.exp2.f64", "__svml_exp22", FIXED(2))
TLI_DEFINE_VECFUNC("llvm.exp2.f64", "__svml_exp24", FIXED(4))
TLI_DEFINE_VECFUNC("llvm.exp2.f64", "__svml_exp28", FIXED(8))

TLI_DEFINE_VECFUNC("llvm.exp2.f32", "__svml_exp2f4", FIXED(4))
TLI_DEFINE_VECFUNC("llvm.exp2.f32", "__svml_exp2f8", FIXED(8))
TLI_DEFINE_VECFUNC("llvm.exp2.f32", "__svml_exp2f16", FIXED(16))

TLI_DEFINE_VECFUNC("__exp2_finite", "__svml_exp22", FIXED(2))
TLI_DEFINE_VECFUNC("__exp2_finite", "__svml_exp24", FIXED(4))
TLI_DEFINE_VECFUNC("__exp2_finite", "__svml_exp28", FIXED(8))

TLI_DEFINE_VECFUNC("__exp2f_finite", "__svml_exp2f4", FIXED(4))
TLI_DEFINE_VECFUNC("__exp2f_finite", "__svml_exp2f8", FIXED(8))
TLI_DEFINE_VECFUNC("__exp2f_finite", "__svml_exp2f16", FIXED(16))

#elif defined(TLI_DEFINE_SLEEFGNUABI_VF2_VECFUNCS)

TLI_DEFINE_VECFUNC( "acos", "_ZGVnN2v_acos", FIXED(2))
TLI_DEFINE_VECFUNC( "llvm.acos.f64", "_ZGVnN2v_acos", FIXED(2))

TLI_DEFINE_VECFUNC( "asin", "_ZGVnN2v_asin", FIXED(2))
TLI_DEFINE_VECFUNC( "llvm.asin.f64", "_ZGVnN2v_asin", FIXED(2))

TLI_DEFINE_VECFUNC( "atan", "_ZGVnN2v_atan", FIXED(2))
TLI_DEFINE_VECFUNC( "llvm.atan.f64", "_ZGVnN2v_atan", FIXED(2))

TLI_DEFINE_VECFUNC( "atan2", "_ZGVnN2vv_atan2", FIXED(2))
TLI_DEFINE_VECFUNC( "llvm.atan2.f64", "_ZGVnN2vv_atan2", FIXED(2))
TLI_DEFINE_VECFUNC( "llvm.atan2.v2f64", "_ZGVnN2vv_atan2", FIXED(2))

TLI_DEFINE_VECFUNC( "atanh", "_ZGVnN2v_atanh", FIXED(2))
TLI_DEFINE_VECFUNC( "llvm.atanh.f64", "_ZGVnN2v_atanh", FIXED(2))

TLI_DEFINE_VECFUNC( "cos", "_ZGVnN2v_cos", FIXED(2))
TLI_DEFINE_VECFUNC( "llvm.cos.f64", "_ZGVnN2v_cos", FIXED(2))

TLI_DEFINE_VECFUNC( "cosh", "_ZGVnN2v_cosh", FIXED(2))
TLI_DEFINE_VECFUNC( "llvm.cosh.f64", "_ZGVnN2v_cosh", FIXED(2))

TLI_DEFINE_VECFUNC( "exp", "_ZGVnN2v_exp", FIXED(2))
TLI_DEFINE_VECFUNC( "llvm.exp.f64", "_ZGVnN2v_exp", FIXED(2))
TLI_DEFINE_VECFUNC( "llvm.exp.v2f64", "_ZGVnN2v_exp", FIXED(2))

TLI_DEFINE_VECFUNC( "exp2", "_ZGVnN2v_exp2", FIXED(2))
TLI_DEFINE_VECFUNC( "llvm.exp2.f64", "_ZGVnN2v_exp2", FIXED(2))
TLI_DEFINE_VECFUNC( "llvm.exp2.v2f64", "_ZGVnN2v_exp2", FIXED(2))

TLI_DEFINE_VECFUNC( "exp10", "_ZGVnN2v_exp10", FIXED(2))
TLI_DEFINE_VECFUNC( "llvm.exp10.f64", "_ZGVnN2v_exp10", FIXED(2))
TLI_DEFINE_VECFUNC( "llvm.exp10.v2f64", "_ZGVnN2v_exp10", FIXED(2))

TLI_DEFINE_VECFUNC( "lgamma", "_ZGVnN2v_lgamma", FIXED(2))
TLI_DEFINE_VECFUNC( "llvm.lgamma.f64", "_ZGVnN2v_lgamma", FIXED(2))

TLI_DEFINE_VECFUNC( "log", "_ZGVnN2v_log", FIXED(2))
TLI_DEFINE_VECFUNC( "llvm.log.f64", "_ZGVnN2v_log", FIXED(2))

TLI_DEFINE_VECFUNC( "log2", "_ZGVnN2v_log2", FIXED(2))
TLI_DEFINE_VECFUNC( "llvm.log2.f64", "_ZGVnN2v_log2", FIXED(2))

TLI_DEFINE_VECFUNC( "log10", "_ZGVnN2v_log10", FIXED(2))
TLI_DEFINE_VECFUNC( "llvm.log10.f64", "_ZGVnN2v_log10", FIXED(2))

TLI_DEFINE_VECFUNC( "pow", "_ZGVnN2vv_pow", FIXED(2))
TLI_DEFINE_VECFUNC( "llvm.pow.f64", "_ZGVnN2vv_pow", FIXED(2))
TLI_DEFINE_VECFUNC( "llvm.pow.v2f64", "_ZGVnN2vv_pow", FIXED(2))

TLI_DEFINE_VECFUNC( "sin", "_ZGVnN2v_sin", FIXED(2))
TLI_DEFINE_VECFUNC( "llvm.sin.f64", "_ZGVnN2v_sin", FIXED(2))

TLI_DEFINE_VECFUNC( "sinh", "_ZGVnN2v_sinh", FIXED(2))
TLI_DEFINE_VECFUNC( "llvm.sinh.f64", "_ZGVnN2v_sinh", FIXED(2))

TLI_DEFINE_VECFUNC( "sqrt", "_ZGVnN2v_sqrt", FIXED(2))

TLI_DEFINE_VECFUNC( "tan", "_ZGVnN2v_tan", FIXED(2))
TLI_DEFINE_VECFUNC( "llvm.tan.f64", "_ZGVnN2v_tan", FIXED(2))

TLI_DEFINE_VECFUNC( "tanh", "_ZGVnN2v_tanh", FIXED(2))
TLI_DEFINE_VECFUNC( "llvm.tanh.f64", "_ZGVnN2v_tanh", FIXED(2))

TLI_DEFINE_VECFUNC( "tgamma", "_ZGVnN2v_tgamma", FIXED(2))
TLI_DEFINE_VECFUNC( "llvm.tgamma.f64", "_ZGVnN2v_tgamma", FIXED(2))

#elif defined(TLI_DEFINE_SLEEFGNUABI_VF4_VECFUNCS)

TLI_DEFINE_VECFUNC( "acosf", "_ZGVnN4v_acosf", FIXED(4))
TLI_DEFINE_VECFUNC( "llvm.acos.f32", "_ZGVnN4v_acosf", FIXED(4))

TLI_DEFINE_VECFUNC( "asinf", "_ZGVnN4v_asinf", FIXED(4))
TLI_DEFINE_VECFUNC( "llvm.asin.f32", "_ZGVnN4v_asinf", FIXED(4))

TLI_DEFINE_VECFUNC( "atanf", "_ZGVnN4v_atanf", FIXED(4))
TLI_DEFINE_VECFUNC( "llvm.atan.f32", "_ZGVnN4v_atanf", FIXED(4))

TLI_DEFINE_VECFUNC( "atan2f", "_ZGVnN4vv_atan2f", FIXED(4))
TLI_DEFINE_VECFUNC( "llvm.atan2.f32", "_ZGVnN4vv_atan2f", FIXED(4))
TLI_DEFINE_VECFUNC( "llvm.atan2.v4f32", "_ZGVnN4vv_atan2f", FIXED(4))

TLI_DEFINE_VECFUNC( "atanhf", "_ZGVnN4v_atanhf", FIXED(4))
TLI_DEFINE_VECFUNC( "llvm.atanh.f32", "_ZGVnN4v_atanhf", FIXED(4))

TLI_DEFINE_VECFUNC( "cosf", "_ZGVnN4v_cosf", FIXED(4))
TLI_DEFINE_VECFUNC( "llvm.cos.f32", "_ZGVnN4v_cosf", FIXED(4))

TLI_DEFINE_VECFUNC( "coshf", "_ZGVnN4v_coshf", FIXED(4))
TLI_DEFINE_VECFUNC( "llvm.cosh.f32", "_ZGVnN4v_coshf", FIXED(4))

TLI_DEFINE_VECFUNC( "expf", "_ZGVnN4v_expf", FIXED(4))
TLI_DEFINE_VECFUNC( "llvm.exp.f32", "_ZGVnN4v_expf", FIXED(4))
TLI_DEFINE_VECFUNC( "llvm.exp.v4f32", "_ZGVnN4v_expf", FIXED(4))

TLI_DEFINE_VECFUNC( "exp2f", "_ZGVnN4v_exp2f", FIXED(4))
TLI_DEFINE_VECFUNC( "llvm.exp2.f32", "_ZGVnN4v_exp2f", FIXED(4))
TLI_DEFINE_VECFUNC( "llvm.exp2.v4f32", "_ZGVnN4v_exp2f", FIXED(4))

TLI_DEFINE_VECFUNC( "exp10f", "_ZGVnN4v_exp10f", FIXED(4))
TLI_DEFINE_VECFUNC( "llvm.exp10.f32", "_ZGVnN4v_exp10f", FIXED(4))
TLI_DEFINE_VECFUNC( "llvm.exp10.v4f32", "_ZGVnN4v_exp10f", FIXED(4))

TLI_DEFINE_VECFUNC( "lgammaf", "_ZGVnN4v_lgammaf", FIXED(4))
TLI_DEFINE_VECFUNC( "llvm.lgamma.f32", "_ZGVnN4v_lgammaf", FIXED(4))

TLI_DEFINE_VECFUNC( "logf", "_ZGVnN4v_logf", FIXED(4))
TLI_DEFINE_VECFUNC( "llvm.log.f32", "_ZGVnN4v_logf", FIXED(4))

TLI_DEFINE_VECFUNC( "log2f", "_ZGVnN4v_log2f", FIXED(4))
TLI_DEFINE_VECFUNC( "llvm.log2.f32", "_ZGVnN4v_log2f", FIXED(4))

TLI_DEFINE_VECFUNC( "log10f", "_ZGVnN4v_log10f", FIXED(4))
TLI_DEFINE_VECFUNC( "llvm.log10.f32", "_ZGVnN4v_log10f", FIXED(4))

TLI_DEFINE_VECFUNC( "powf", "_ZGVnN4vv_powf", FIXED(4))
TLI_DEFINE_VECFUNC( "llvm.pow.f32", "_ZGVnN4vv_powf", FIXED(4))
TLI_DEFINE_VECFUNC( "llvm.pow.v4f32", "_ZGVnN4vv_powf", FIXED(4))

TLI_DEFINE_VECFUNC( "sinf", "_ZGVnN4v_sinf", FIXED(4))
TLI_DEFINE_VECFUNC( "llvm.sin.f32", "_ZGVnN4v_sinf", FIXED(4))

TLI_DEFINE_VECFUNC( "sinhf", "_ZGVnN4v_sinhf", FIXED(4))
TLI_DEFINE_VECFUNC( "llvm.sinh.f32", "_ZGVnN4v_sinhf", FIXED(4))

TLI_DEFINE_VECFUNC( "sqrtf", "_ZGVnN4v_sqrtf", FIXED(4))

TLI_DEFINE_VECFUNC( "tanf", "_ZGVnN4v_tanf", FIXED(4))
TLI_DEFINE_VECFUNC( "llvm.tan.f32", "_ZGVnN4v_tanf", FIXED(4))

TLI_DEFINE_VECFUNC( "tanhf", "_ZGVnN4v_tanhf", FIXED(4))
TLI_DEFINE_VECFUNC( "llvm.tanh.f32", "_ZGVnN4v_tanhf", FIXED(4))

TLI_DEFINE_VECFUNC( "tgammaf", "_ZGVnN4v_tgammaf", FIXED(4))
TLI_DEFINE_VECFUNC( "llvm.tgamma.f32", "_ZGVnN4v_tgammaf", FIXED(4))

#elif defined(TLI_DEFINE_SLEEFGNUABI_SCALABLE_VECFUNCS)

TLI_DEFINE_VECFUNC("acos", "_ZGVsMxv_acos",  SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("acosf", "_ZGVsMxv_acosf", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC("asin", "_ZGVsMxv_asin",  SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("asinf", "_ZGVsMxv_asinf", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC("atan", "_ZGVsMxv_atan",  SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("atanf", "_ZGVsMxv_atanf", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC("atan2", "_ZGVsMxvv_atan2",  SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("atan2f", "_ZGVsMxvv_atan2f", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC("atanh", "_ZGVsMxv_atanh",  SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("atanhf", "_ZGVsMxv_atanhf", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC("cos", "_ZGVsMxv_cos",  SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("cosf", "_ZGVsMxv_cosf", SCALABLE(4), MASKED)
TLI_DEFINE_VECFUNC("llvm.cos.f64", "_ZGVsMxv_cos", SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("llvm.cos.f32", "_ZGVsMxv_cosf", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC("cosh", "_ZGVsMxv_cosh",  SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("coshf", "_ZGVsMxv_coshf", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC("exp", "_ZGVsMxv_exp",  SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("expf", "_ZGVsMxv_expf", SCALABLE(4), MASKED)
TLI_DEFINE_VECFUNC("llvm.exp.f64", "_ZGVsMxv_exp", SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("llvm.exp.f32", "_ZGVsMxv_expf", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC("exp2", "_ZGVsMxv_exp2",  SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("exp2f", "_ZGVsMxv_exp2f", SCALABLE(4), MASKED)
TLI_DEFINE_VECFUNC("llvm.exp2.f64", "_ZGVsMxv_exp2", SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("llvm.exp2.f32", "_ZGVsMxv_exp2f", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC("exp10", "_ZGVsMxv_exp10",  SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("exp10f", "_ZGVsMxv_exp10f", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC("lgamma", "_ZGVsMxv_lgamma",  SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("lgammaf", "_ZGVsMxv_lgammaf", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC("log", "_ZGVsMxv_log",  SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("logf", "_ZGVsMxv_logf", SCALABLE(4), MASKED)
TLI_DEFINE_VECFUNC("llvm.log.f64", "_ZGVsMxv_log", SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("llvm.log.f32", "_ZGVsMxv_logf", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC( "log2", "_ZGVsMxv_log2", SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC( "log2f", "_ZGVsMxv_log2f", SCALABLE(4), MASKED)
TLI_DEFINE_VECFUNC( "llvm.log2.f64", "_ZGVsMxv_log2", SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC( "llvm.log2.f32", "_ZGVsMxv_log2f", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC("log10", "_ZGVsMxv_log10",  SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("log10f", "_ZGVsMxv_log10f", SCALABLE(4), MASKED)
TLI_DEFINE_VECFUNC("llvm.log10.f64", "_ZGVsMxv_log10", SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("llvm.log10.f32", "_ZGVsMxv_log10f", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC("pow", "_ZGVsMxvv_pow", SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("powf", "_ZGVsMxvv_powf", SCALABLE(4), MASKED)
TLI_DEFINE_VECFUNC("llvm.pow.f64", "_ZGVsMxvv_pow", SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("llvm.pow.f32", "_ZGVsMxvv_powf", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC("sin", "_ZGVsMxv_sin",  SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("sinf", "_ZGVsMxv_sinf", SCALABLE(4), MASKED)
TLI_DEFINE_VECFUNC("llvm.sin.f64", "_ZGVsMxv_sin", SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("llvm.sin.f32", "_ZGVsMxv_sinf", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC("sinh", "_ZGVsMxv_sinh",  SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("sinhf", "_ZGVsMxv_sinhf", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC("sqrt", "_ZGVsMxv_sqrt",  SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("sqrtf", "_ZGVsMxv_sqrtf", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC("tan", "_ZGVsMxv_tan",  SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("tanf", "_ZGVsMxv_tanf", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC("tanh", "_ZGVsMxv_tanh",  SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("tanhf", "_ZGVsMxv_tanhf", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC("tgamma", "_ZGVsMxv_tgamma",  SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("tgammaf", "_ZGVsMxv_tgammaf", SCALABLE(4), MASKED)

#elif defined(TLI_DEFINE_ARMPL_VECFUNCS)

TLI_DEFINE_VECFUNC("acos", "armpl_vacosq_f64", FIXED(2), NOMASK)
TLI_DEFINE_VECFUNC("acosf", "armpl_vacosq_f32", FIXED(4), NOMASK)
TLI_DEFINE_VECFUNC("acos", "armpl_svacos_f64_x",  SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("acosf", "armpl_svacos_f32_x", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC("acosh", "armpl_vacoshq_f64", FIXED(2), NOMASK)
TLI_DEFINE_VECFUNC("acoshf", "armpl_vacoshq_f32", FIXED(4), NOMASK)
TLI_DEFINE_VECFUNC("acosh", "armpl_svacosh_f64_x",  SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("acoshf", "armpl_svacosh_f32_x", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC("asin", "armpl_vasinq_f64", FIXED(2), NOMASK)
TLI_DEFINE_VECFUNC("asinf", "armpl_vasinq_f32", FIXED(4), NOMASK)
TLI_DEFINE_VECFUNC("asin", "armpl_svasin_f64_x",  SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("asinf", "armpl_svasin_f32_x", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC("asinh", "armpl_vasinhq_f64", FIXED(2), NOMASK)
TLI_DEFINE_VECFUNC("asinhf", "armpl_vasinhq_f32", FIXED(4), NOMASK)
TLI_DEFINE_VECFUNC("asinh", "armpl_svasinh_f64_x",  SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("asinhf", "armpl_svasinh_f32_x", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC("atan", "armpl_vatanq_f64", FIXED(2), NOMASK)
TLI_DEFINE_VECFUNC("atanf", "armpl_vatanq_f32", FIXED(4), NOMASK)
TLI_DEFINE_VECFUNC("atan", "armpl_svatan_f64_x",  SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("atanf", "armpl_svatan_f32_x", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC("atan2", "armpl_vatan2q_f64", FIXED(2), NOMASK)
TLI_DEFINE_VECFUNC("atan2f", "armpl_vatan2q_f32", FIXED(4), NOMASK)
TLI_DEFINE_VECFUNC("atan2", "armpl_svatan2_f64_x",  SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("atan2f", "armpl_svatan2_f32_x", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC("atanh", "armpl_vatanhq_f64", FIXED(2), NOMASK)
TLI_DEFINE_VECFUNC("atanhf", "armpl_vatanhq_f32", FIXED(4), NOMASK)
TLI_DEFINE_VECFUNC("atanh", "armpl_svatanh_f64_x",  SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("atanhf", "armpl_svatanh_f32_x", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC("cbrt", "armpl_vcbrtq_f64", FIXED(2), NOMASK)
TLI_DEFINE_VECFUNC("cbrtf", "armpl_vcbrtq_f32", FIXED(4), NOMASK)
TLI_DEFINE_VECFUNC("cbrt", "armpl_svcbrt_f64_x",  SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("cbrtf", "armpl_svcbrt_f32_x", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC("copysign", "armpl_vcopysignq_f64", FIXED(2), NOMASK)
TLI_DEFINE_VECFUNC("copysignf", "armpl_vcopysignq_f32", FIXED(4), NOMASK)
TLI_DEFINE_VECFUNC("copysign", "armpl_svcopysign_f64_x",  SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("copysignf", "armpl_svcopysign_f32_x", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC("cos", "armpl_vcosq_f64", FIXED(2), NOMASK)
TLI_DEFINE_VECFUNC("cosf", "armpl_vcosq_f32", FIXED(4), NOMASK)
TLI_DEFINE_VECFUNC("cos", "armpl_svcos_f64_x",  SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("cosf", "armpl_svcos_f32_x", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC("llvm.cos.f64", "armpl_vcosq_f64", FIXED(2), NOMASK)
TLI_DEFINE_VECFUNC("llvm.cos.f32", "armpl_vcosq_f32", FIXED(4), NOMASK)
TLI_DEFINE_VECFUNC("llvm.cos.f64", "armpl_svcos_f64_x", SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("llvm.cos.f32", "armpl_svcos_f32_x", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC("cosh", "armpl_vcoshq_f64", FIXED(2), NOMASK)
TLI_DEFINE_VECFUNC("coshf", "armpl_vcoshq_f32", FIXED(4), NOMASK)
TLI_DEFINE_VECFUNC("cosh", "armpl_svcosh_f64_x",  SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("coshf", "armpl_svcosh_f32_x", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC("erf", "armpl_verfq_f64", FIXED(2), NOMASK)
TLI_DEFINE_VECFUNC("erff", "armpl_verfq_f32", FIXED(4), NOMASK)
TLI_DEFINE_VECFUNC("erf", "armpl_sverf_f64_x",  SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("erff", "armpl_sverf_f32_x", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC("erfc", "armpl_verfcq_f64", FIXED(2), NOMASK)
TLI_DEFINE_VECFUNC("erfcf", "armpl_verfcq_f32", FIXED(4), NOMASK)
TLI_DEFINE_VECFUNC("erfc", "armpl_sverfc_f64_x",  SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("erfcf", "armpl_sverfc_f32_x", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC("exp", "armpl_vexpq_f64", FIXED(2), NOMASK)
TLI_DEFINE_VECFUNC("expf", "armpl_vexpq_f32", FIXED(4), NOMASK)
TLI_DEFINE_VECFUNC("exp", "armpl_svexp_f64_x",  SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("expf", "armpl_svexp_f32_x", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC("llvm.exp.f64", "armpl_vexpq_f64", FIXED(2), NOMASK)
TLI_DEFINE_VECFUNC("llvm.exp.f32", "armpl_vexpq_f32", FIXED(4), NOMASK)
TLI_DEFINE_VECFUNC("llvm.exp.f64", "armpl_svexp_f64_x", SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("llvm.exp.f32", "armpl_svexp_f32_x", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC("exp2", "armpl_vexp2q_f64", FIXED(2), NOMASK)
TLI_DEFINE_VECFUNC("exp2f", "armpl_vexp2q_f32", FIXED(4), NOMASK)
TLI_DEFINE_VECFUNC("exp2", "armpl_svexp2_f64_x",  SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("exp2f", "armpl_svexp2_f32_x", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC("llvm.exp2.f64", "armpl_vexp2q_f64", FIXED(2), NOMASK)
TLI_DEFINE_VECFUNC("llvm.exp2.f32", "armpl_vexp2q_f32", FIXED(4), NOMASK)
TLI_DEFINE_VECFUNC("llvm.exp2.f64", "armpl_svexp2_f64_x", SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("llvm.exp2.f32", "armpl_svexp2_f32_x", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC("exp10", "armpl_vexp10q_f64", FIXED(2), NOMASK)
TLI_DEFINE_VECFUNC("exp10f", "armpl_vexp10q_f32", FIXED(4), NOMASK)
TLI_DEFINE_VECFUNC("exp10", "armpl_svexp10_f64_x",  SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("exp10f", "armpl_svexp10_f32_x", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC("expm1", "armpl_vexpm1q_f64", FIXED(2), NOMASK)
TLI_DEFINE_VECFUNC("expm1f", "armpl_vexpm1q_f32", FIXED(4), NOMASK)
TLI_DEFINE_VECFUNC("expm1", "armpl_svexpm1_f64_x",  SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("expm1f", "armpl_svexpm1_f32_x", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC("fdim", "armpl_vfdimq_f64", FIXED(2), NOMASK)
TLI_DEFINE_VECFUNC("fdimf", "armpl_vfdimq_f32", FIXED(4), NOMASK)
TLI_DEFINE_VECFUNC("fdim", "armpl_svfdim_f64_x",  SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("fdimf", "armpl_svfdim_f32_x", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC("fma", "armpl_vfmaq_f64", FIXED(2), NOMASK)
TLI_DEFINE_VECFUNC("fmaf", "armpl_vfmaq_f32", FIXED(4), NOMASK)
TLI_DEFINE_VECFUNC("fma", "armpl_svfma_f64_x",  SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("fmaf", "armpl_svfma_f32_x", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC("fmin", "armpl_vfminq_f64", FIXED(2), NOMASK)
TLI_DEFINE_VECFUNC("fminf", "armpl_vfminq_f32", FIXED(4), NOMASK)
TLI_DEFINE_VECFUNC("fmin", "armpl_svfmin_f64_x",  SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("fminf", "armpl_svfmin_f32_x", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC("fmod", "armpl_vfmodq_f64", FIXED(2), NOMASK)
TLI_DEFINE_VECFUNC("fmodf", "armpl_vfmodq_f32", FIXED(4), NOMASK)
TLI_DEFINE_VECFUNC("fmod", "armpl_svfmod_f64_x",  SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("fmodf", "armpl_svfmod_f32_x", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC("hypot", "armpl_vhypotq_f64", FIXED(2), NOMASK)
TLI_DEFINE_VECFUNC("hypotf", "armpl_vhypotq_f32", FIXED(4), NOMASK)
TLI_DEFINE_VECFUNC("hypot", "armpl_svhypot_f64_x",  SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("hypotf", "armpl_svhypot_f32_x", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC("lgamma", "armpl_vlgammaq_f64", FIXED(2), NOMASK)
TLI_DEFINE_VECFUNC("lgammaf", "armpl_vlgammaq_f32", FIXED(4), NOMASK)
TLI_DEFINE_VECFUNC("lgamma", "armpl_svlgamma_f64_x",  SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("lgammaf", "armpl_svlgamma_f32_x", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC("log", "armpl_vlogq_f64", FIXED(2), NOMASK)
TLI_DEFINE_VECFUNC("logf", "armpl_vlogq_f32", FIXED(4), NOMASK)
TLI_DEFINE_VECFUNC("log", "armpl_svlog_f64_x",  SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("logf", "armpl_svlog_f32_x", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC("llvm.log.f64", "armpl_vlogq_f64", FIXED(2), NOMASK)
TLI_DEFINE_VECFUNC("llvm.log.f32", "armpl_vlogq_f32", FIXED(4), NOMASK)
TLI_DEFINE_VECFUNC("llvm.log.f64", "armpl_svlog_f64_x", SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("llvm.log.f32", "armpl_svlog_f32_x", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC("log1p", "armpl_vlog1pq_f64", FIXED(2), NOMASK)
TLI_DEFINE_VECFUNC("log1pf", "armpl_vlog1pq_f32", FIXED(4), NOMASK)
TLI_DEFINE_VECFUNC("log1p", "armpl_svlog1p_f64_x", SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("log1pf", "armpl_svlog1p_f32_x", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC("log2", "armpl_vlog2q_f64", FIXED(2), NOMASK)
TLI_DEFINE_VECFUNC("log2f", "armpl_vlog2q_f32", FIXED(4), NOMASK)
TLI_DEFINE_VECFUNC("log2", "armpl_svlog2_f64_x", SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("log2f", "armpl_svlog2_f32_x", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC("llvm.log2.f64", "armpl_vlog2q_f64", FIXED(2), NOMASK)
TLI_DEFINE_VECFUNC("llvm.log2.f32", "armpl_vlog2q_f32", FIXED(4), NOMASK)
TLI_DEFINE_VECFUNC("llvm.log2.f64", "armpl_svlog2_f64_x", SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("llvm.log2.f32", "armpl_svlog2_f32_x", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC("log10", "armpl_vlog10q_f64", FIXED(2), NOMASK)
TLI_DEFINE_VECFUNC("log10f", "armpl_vlog10q_f32", FIXED(4), NOMASK)
TLI_DEFINE_VECFUNC("log10", "armpl_svlog10_f64_x",  SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("log10f", "armpl_svlog10_f32_x", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC("llvm.log10.f64", "armpl_vlog10q_f64", FIXED(2), NOMASK)
TLI_DEFINE_VECFUNC("llvm.log10.f32", "armpl_vlog10q_f32", FIXED(4), NOMASK)
TLI_DEFINE_VECFUNC("llvm.log10.f64", "armpl_svlog10_f64_x", SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("llvm.log10.f32", "armpl_svlog10_f32_x", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC("nextafter", "armpl_vnextafterq_f64", FIXED(2), NOMASK)
TLI_DEFINE_VECFUNC("nextafterf", "armpl_vnextafterq_f32", FIXED(4), NOMASK)
TLI_DEFINE_VECFUNC("nextafter", "armpl_svnextafter_f64_x",  SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("nextafterf", "armpl_svnextafter_f32_x", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC("pow", "armpl_vpowq_f64", FIXED(2), NOMASK)
TLI_DEFINE_VECFUNC("powf", "armpl_vpowq_f32", FIXED(4), NOMASK)
TLI_DEFINE_VECFUNC("pow", "armpl_svpow_f64_x", SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("powf", "armpl_svpow_f32_x", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC("llvm.pow.f64", "armpl_vpowq_f64", FIXED(2), NOMASK)
TLI_DEFINE_VECFUNC("llvm.pow.f32", "armpl_vpowq_f32", FIXED(4), NOMASK)
TLI_DEFINE_VECFUNC("llvm.pow.f64", "armpl_svpow_f64_x", SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("llvm.pow.f32", "armpl_svpow_f32_x", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC("sin", "armpl_vsinq_f64", FIXED(2), NOMASK)
TLI_DEFINE_VECFUNC("sinf", "armpl_vsinq_f32", FIXED(4), NOMASK)
TLI_DEFINE_VECFUNC("sin", "armpl_svsin_f64_x",  SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("sinf", "armpl_svsin_f32_x", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC("llvm.sin.f64", "armpl_vsinq_f64", FIXED(2), NOMASK)
TLI_DEFINE_VECFUNC("llvm.sin.f32", "armpl_vsinq_f32", FIXED(4), NOMASK)
TLI_DEFINE_VECFUNC("llvm.sin.f64", "armpl_svsin_f64_x", SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("llvm.sin.f32", "armpl_svsin_f32_x", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC("sinh", "armpl_vsinhq_f64", FIXED(2), NOMASK)
TLI_DEFINE_VECFUNC("sinhf", "armpl_vsinhq_f32", FIXED(4), NOMASK)
TLI_DEFINE_VECFUNC("sinh", "armpl_svsinh_f64_x",  SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("sinhf", "armpl_svsinh_f32_x", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC("sinpi", "armpl_vsinpiq_f64", FIXED(2), NOMASK)
TLI_DEFINE_VECFUNC("sinpif", "armpl_vsinpiq_f32", FIXED(4), NOMASK)
TLI_DEFINE_VECFUNC("sinpi", "armpl_svsinpi_f64_x",  SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("sinpif", "armpl_svsinpi_f32_x", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC("sqrt", "armpl_vsqrtq_f64", FIXED(2), NOMASK)
TLI_DEFINE_VECFUNC("sqrtf", "armpl_vsqrtq_f32", FIXED(4), NOMASK)
TLI_DEFINE_VECFUNC("sqrt", "armpl_svsqrt_f64_x",  SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("sqrtf", "armpl_svsqrt_f32_x", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC("tan", "armpl_vtanq_f64", FIXED(2), NOMASK)
TLI_DEFINE_VECFUNC("tanf", "armpl_vtanq_f32", FIXED(4), NOMASK)
TLI_DEFINE_VECFUNC("tan", "armpl_svtan_f64_x",  SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("tanf", "armpl_svtan_f32_x", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC("tanh", "armpl_vtanhq_f64", FIXED(2), NOMASK)
TLI_DEFINE_VECFUNC("tanhf", "armpl_vtanhq_f32", FIXED(4), NOMASK)
TLI_DEFINE_VECFUNC("tanh", "armpl_svtanh_f64_x",  SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("tanhf", "armpl_svtanh_f32_x", SCALABLE(4), MASKED)

TLI_DEFINE_VECFUNC("tgamma", "armpl_vtgammaq_f64", FIXED(2), NOMASK)
TLI_DEFINE_VECFUNC("tgammaf", "armpl_vtgammaq_f32", FIXED(4), NOMASK)
TLI_DEFINE_VECFUNC("tgamma", "armpl_svtgamma_f64_x",  SCALABLE(2), MASKED)
TLI_DEFINE_VECFUNC("tgammaf", "armpl_svtgamma_f32_x", SCALABLE(4), MASKED)

#else
#error "Must choose which vector library functions are to be defined."
#endif

#undef MASKED
#undef NOMASK
#undef SCALABLE
#undef FIXED

#undef TLI_DEFINE_VECFUNC
#undef TLI_DEFINE_ACCELERATE_VECFUNCS
#undef TLI_DEFINE_DARWIN_LIBSYSTEM_M_VECFUNCS
#undef TLI_DEFINE_LIBMVEC_X86_VECFUNCS
#undef TLI_DEFINE_MASSV_VECFUNCS
#undef TLI_DEFINE_SVML_VECFUNCS
#undef TLI_DEFINE_SLEEFGNUABI_VF2_VECFUNCS
#undef TLI_DEFINE_SLEEFGNUABI_VF4_VECFUNCS
#undef TLI_DEFINE_SLEEFGNUABI_SCALABLE_VECFUNCS
#undef TLI_DEFINE_MASSV_VECFUNCS_NAMES
#undef TLI_DEFINE_ARMPL_VECFUNCS
PKiwFZδ׬b~b~%Analysis/ScalarEvolutionExpressions.hnu�[���//===- llvm/Analysis/ScalarEvolutionExpressions.h - SCEV Exprs --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the classes used to represent and build scalar expressions.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_SCALAREVOLUTIONEXPRESSIONS_H
#define LLVM_ANALYSIS_SCALAREVOLUTIONEXPRESSIONS_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
#include <cassert>
#include <cstddef>

namespace llvm {

class APInt;
class Constant;
class ConstantInt;
class ConstantRange;
class Loop;
class Type;
class Value;

enum SCEVTypes : unsigned short {
  // These should be ordered in terms of increasing complexity to make the
  // folders simpler.
  scConstant,
  scVScale,
  scTruncate,
  scZeroExtend,
  scSignExtend,
  scAddExpr,
  scMulExpr,
  scUDivExpr,
  scAddRecExpr,
  scUMaxExpr,
  scSMaxExpr,
  scUMinExpr,
  scSMinExpr,
  scSequentialUMinExpr,
  scPtrToInt,
  scUnknown,
  scCouldNotCompute
};

/// This class represents a constant integer value.
class SCEVConstant : public SCEV {
  friend class ScalarEvolution;

  ConstantInt *V;

  SCEVConstant(const FoldingSetNodeIDRef ID, ConstantInt *v)
      : SCEV(ID, scConstant, 1), V(v) {}

public:
  ConstantInt *getValue() const { return V; }
  const APInt &getAPInt() const { return getValue()->getValue(); }

  Type *getType() const { return V->getType(); }

  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const SCEV *S) { return S->getSCEVType() == scConstant; }
};

/// This class represents the value of vscale, as used when defining the length
/// of a scalable vector or returned by the llvm.vscale() intrinsic.
class SCEVVScale : public SCEV {
  friend class ScalarEvolution;

  SCEVVScale(const FoldingSetNodeIDRef ID, Type *ty)
      : SCEV(ID, scVScale, 0), Ty(ty) {}

  Type *Ty;

public:
  Type *getType() const { return Ty; }

  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const SCEV *S) { return S->getSCEVType() == scVScale; }
};

inline unsigned short computeExpressionSize(ArrayRef<const SCEV *> Args) {
  APInt Size(16, 1);
  for (const auto *Arg : Args)
    Size = Size.uadd_sat(APInt(16, Arg->getExpressionSize()));
  return (unsigned short)Size.getZExtValue();
}

/// This is the base class for unary cast operator classes.
class SCEVCastExpr : public SCEV {
protected:
  const SCEV *Op;
  Type *Ty;

  SCEVCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy, const SCEV *op,
               Type *ty);

public:
  const SCEV *getOperand() const { return Op; }
  const SCEV *getOperand(unsigned i) const {
    assert(i == 0 && "Operand index out of range!");
    return Op;
  }
  ArrayRef<const SCEV *> operands() const { return Op; }
  size_t getNumOperands() const { return 1; }
  Type *getType() const { return Ty; }

  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const SCEV *S) {
    return S->getSCEVType() == scPtrToInt || S->getSCEVType() == scTruncate ||
           S->getSCEVType() == scZeroExtend || S->getSCEVType() == scSignExtend;
  }
};

/// This class represents a cast from a pointer to a pointer-sized integer
/// value.
class SCEVPtrToIntExpr : public SCEVCastExpr {
  friend class ScalarEvolution;

  SCEVPtrToIntExpr(const FoldingSetNodeIDRef ID, const SCEV *Op, Type *ITy);

public:
  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const SCEV *S) { return S->getSCEVType() == scPtrToInt; }
};

/// This is the base class for unary integral cast operator classes.
class SCEVIntegralCastExpr : public SCEVCastExpr {
protected:
  SCEVIntegralCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy,
                       const SCEV *op, Type *ty);

public:
  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const SCEV *S) {
    return S->getSCEVType() == scTruncate || S->getSCEVType() == scZeroExtend ||
           S->getSCEVType() == scSignExtend;
  }
};

/// This class represents a truncation of an integer value to a
/// smaller integer value.
class SCEVTruncateExpr : public SCEVIntegralCastExpr {
  friend class ScalarEvolution;

  SCEVTruncateExpr(const FoldingSetNodeIDRef ID, const SCEV *op, Type *ty);

public:
  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const SCEV *S) { return S->getSCEVType() == scTruncate; }
};

/// This class represents a zero extension of a small integer value
/// to a larger integer value.
class SCEVZeroExtendExpr : public SCEVIntegralCastExpr {
  friend class ScalarEvolution;

  SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID, const SCEV *op, Type *ty);

public:
  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const SCEV *S) {
    return S->getSCEVType() == scZeroExtend;
  }
};

/// This class represents a sign extension of a small integer value
/// to a larger integer value.
class SCEVSignExtendExpr : public SCEVIntegralCastExpr {
  friend class ScalarEvolution;

  SCEVSignExtendExpr(const FoldingSetNodeIDRef ID, const SCEV *op, Type *ty);

public:
  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const SCEV *S) {
    return S->getSCEVType() == scSignExtend;
  }
};

/// This node is a base class providing common functionality for
/// n'ary operators.
class SCEVNAryExpr : public SCEV {
protected:
  // Since SCEVs are immutable, ScalarEvolution allocates operand
  // arrays with its SCEVAllocator, so this class just needs a simple
  // pointer rather than a more elaborate vector-like data structure.
  // This also avoids the need for a non-trivial destructor.
  const SCEV *const *Operands;
  size_t NumOperands;

  SCEVNAryExpr(const FoldingSetNodeIDRef ID, enum SCEVTypes T,
               const SCEV *const *O, size_t N)
      : SCEV(ID, T, computeExpressionSize(ArrayRef(O, N))), Operands(O),
        NumOperands(N) {}

public:
  size_t getNumOperands() const { return NumOperands; }

  const SCEV *getOperand(unsigned i) const {
    assert(i < NumOperands && "Operand index out of range!");
    return Operands[i];
  }

  ArrayRef<const SCEV *> operands() const {
    return ArrayRef(Operands, NumOperands);
  }

  NoWrapFlags getNoWrapFlags(NoWrapFlags Mask = NoWrapMask) const {
    return (NoWrapFlags)(SubclassData & Mask);
  }

  bool hasNoUnsignedWrap() const {
    return getNoWrapFlags(FlagNUW) != FlagAnyWrap;
  }

  bool hasNoSignedWrap() const {
    return getNoWrapFlags(FlagNSW) != FlagAnyWrap;
  }

  bool hasNoSelfWrap() const { return getNoWrapFlags(FlagNW) != FlagAnyWrap; }

  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const SCEV *S) {
    return S->getSCEVType() == scAddExpr || S->getSCEVType() == scMulExpr ||
           S->getSCEVType() == scSMaxExpr || S->getSCEVType() == scUMaxExpr ||
           S->getSCEVType() == scSMinExpr || S->getSCEVType() == scUMinExpr ||
           S->getSCEVType() == scSequentialUMinExpr ||
           S->getSCEVType() == scAddRecExpr;
  }
};

/// This node is the base class for n'ary commutative operators.
class SCEVCommutativeExpr : public SCEVNAryExpr {
protected:
  SCEVCommutativeExpr(const FoldingSetNodeIDRef ID, enum SCEVTypes T,
                      const SCEV *const *O, size_t N)
      : SCEVNAryExpr(ID, T, O, N) {}

public:
  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const SCEV *S) {
    return S->getSCEVType() == scAddExpr || S->getSCEVType() == scMulExpr ||
           S->getSCEVType() == scSMaxExpr || S->getSCEVType() == scUMaxExpr ||
           S->getSCEVType() == scSMinExpr || S->getSCEVType() == scUMinExpr;
  }

  /// Set flags for a non-recurrence without clearing previously set flags.
  void setNoWrapFlags(NoWrapFlags Flags) { SubclassData |= Flags; }
};

/// This node represents an addition of some number of SCEVs.
class SCEVAddExpr : public SCEVCommutativeExpr {
  friend class ScalarEvolution;

  Type *Ty;

  SCEVAddExpr(const FoldingSetNodeIDRef ID, const SCEV *const *O, size_t N)
      : SCEVCommutativeExpr(ID, scAddExpr, O, N) {
    auto *FirstPointerTypedOp = find_if(operands(), [](const SCEV *Op) {
      return Op->getType()->isPointerTy();
    });
    if (FirstPointerTypedOp != operands().end())
      Ty = (*FirstPointerTypedOp)->getType();
    else
      Ty = getOperand(0)->getType();
  }

public:
  Type *getType() const { return Ty; }

  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const SCEV *S) { return S->getSCEVType() == scAddExpr; }
};

/// This node represents multiplication of some number of SCEVs.
class SCEVMulExpr : public SCEVCommutativeExpr {
  friend class ScalarEvolution;

  SCEVMulExpr(const FoldingSetNodeIDRef ID, const SCEV *const *O, size_t N)
      : SCEVCommutativeExpr(ID, scMulExpr, O, N) {}

public:
  Type *getType() const { return getOperand(0)->getType(); }

  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const SCEV *S) { return S->getSCEVType() == scMulExpr; }
};

/// This class represents a binary unsigned division operation.
class SCEVUDivExpr : public SCEV {
  friend class ScalarEvolution;

  std::array<const SCEV *, 2> Operands;

  SCEVUDivExpr(const FoldingSetNodeIDRef ID, const SCEV *lhs, const SCEV *rhs)
      : SCEV(ID, scUDivExpr, computeExpressionSize({lhs, rhs})) {
    Operands[0] = lhs;
    Operands[1] = rhs;
  }

public:
  const SCEV *getLHS() const { return Operands[0]; }
  const SCEV *getRHS() const { return Operands[1]; }
  size_t getNumOperands() const { return 2; }
  const SCEV *getOperand(unsigned i) const {
    assert((i == 0 || i == 1) && "Operand index out of range!");
    return i == 0 ? getLHS() : getRHS();
  }

  ArrayRef<const SCEV *> operands() const { return Operands; }

  Type *getType() const {
    // In most cases the types of LHS and RHS will be the same, but in some
    // crazy cases one or the other may be a pointer. ScalarEvolution doesn't
    // depend on the type for correctness, but handling types carefully can
    // avoid extra casts in the SCEVExpander. The LHS is more likely to be
    // a pointer type than the RHS, so use the RHS' type here.
    return getRHS()->getType();
  }

  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const SCEV *S) { return S->getSCEVType() == scUDivExpr; }
};

/// This node represents a polynomial recurrence on the trip count
/// of the specified loop.  This is the primary focus of the
/// ScalarEvolution framework; all the other SCEV subclasses are
/// mostly just supporting infrastructure to allow SCEVAddRecExpr
/// expressions to be created and analyzed.
///
/// All operands of an AddRec are required to be loop invariant.
///
class SCEVAddRecExpr : public SCEVNAryExpr {
  friend class ScalarEvolution;

  const Loop *L;

  SCEVAddRecExpr(const FoldingSetNodeIDRef ID, const SCEV *const *O, size_t N,
                 const Loop *l)
      : SCEVNAryExpr(ID, scAddRecExpr, O, N), L(l) {}

public:
  Type *getType() const { return getStart()->getType(); }
  const SCEV *getStart() const { return Operands[0]; }
  const Loop *getLoop() const { return L; }

  /// Constructs and returns the recurrence indicating how much this
  /// expression steps by.  If this is a polynomial of degree N, it
  /// returns a chrec of degree N-1.  We cannot determine whether
  /// the step recurrence has self-wraparound.
  const SCEV *getStepRecurrence(ScalarEvolution &SE) const {
    if (isAffine())
      return getOperand(1);
    return SE.getAddRecExpr(
        SmallVector<const SCEV *, 3>(operands().drop_front()), getLoop(),
        FlagAnyWrap);
  }

  /// Return true if this represents an expression A + B*x where A
  /// and B are loop invariant values.
  bool isAffine() const {
    // We know that the start value is invariant.  This expression is thus
    // affine iff the step is also invariant.
    return getNumOperands() == 2;
  }

  /// Return true if this represents an expression A + B*x + C*x^2
  /// where A, B and C are loop invariant values.  This corresponds
  /// to an addrec of the form {L,+,M,+,N}
  bool isQuadratic() const { return getNumOperands() == 3; }

  /// Set flags for a recurrence without clearing any previously set flags.
  /// For AddRec, either NUW or NSW implies NW. Keep track of this fact here
  /// to make it easier to propagate flags.
  void setNoWrapFlags(NoWrapFlags Flags) {
    if (Flags & (FlagNUW | FlagNSW))
      Flags = ScalarEvolution::setFlags(Flags, FlagNW);
    SubclassData |= Flags;
  }

  /// Return the value of this chain of recurrences at the specified
  /// iteration number.
  const SCEV *evaluateAtIteration(const SCEV *It, ScalarEvolution &SE) const;

  /// Return the value of this chain of recurrences at the specified iteration
  /// number. Takes an explicit list of operands to represent an AddRec.
  static const SCEV *evaluateAtIteration(ArrayRef<const SCEV *> Operands,
                                         const SCEV *It, ScalarEvolution &SE);

  /// Return the number of iterations of this loop that produce
  /// values in the specified constant range.  Another way of
  /// looking at this is that it returns the first iteration number
  /// where the value is not in the condition, thus computing the
  /// exit count.  If the iteration count can't be computed, an
  /// instance of SCEVCouldNotCompute is returned.
  const SCEV *getNumIterationsInRange(const ConstantRange &Range,
                                      ScalarEvolution &SE) const;

  /// Return an expression representing the value of this expression
  /// one iteration of the loop ahead.
  const SCEVAddRecExpr *getPostIncExpr(ScalarEvolution &SE) const;

  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const SCEV *S) {
    return S->getSCEVType() == scAddRecExpr;
  }
};

/// This node is the base class min/max selections.
class SCEVMinMaxExpr : public SCEVCommutativeExpr {
  friend class ScalarEvolution;

  static bool isMinMaxType(enum SCEVTypes T) {
    return T == scSMaxExpr || T == scUMaxExpr || T == scSMinExpr ||
           T == scUMinExpr;
  }

protected:
  /// Note: Constructing subclasses via this constructor is allowed
  SCEVMinMaxExpr(const FoldingSetNodeIDRef ID, enum SCEVTypes T,
                 const SCEV *const *O, size_t N)
      : SCEVCommutativeExpr(ID, T, O, N) {
    assert(isMinMaxType(T));
    // Min and max never overflow
    setNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW));
  }

public:
  Type *getType() const { return getOperand(0)->getType(); }

  static bool classof(const SCEV *S) { return isMinMaxType(S->getSCEVType()); }

  static enum SCEVTypes negate(enum SCEVTypes T) {
    switch (T) {
    case scSMaxExpr:
      return scSMinExpr;
    case scSMinExpr:
      return scSMaxExpr;
    case scUMaxExpr:
      return scUMinExpr;
    case scUMinExpr:
      return scUMaxExpr;
    default:
      llvm_unreachable("Not a min or max SCEV type!");
    }
  }
};

/// This class represents a signed maximum selection.
class SCEVSMaxExpr : public SCEVMinMaxExpr {
  friend class ScalarEvolution;

  SCEVSMaxExpr(const FoldingSetNodeIDRef ID, const SCEV *const *O, size_t N)
      : SCEVMinMaxExpr(ID, scSMaxExpr, O, N) {}

public:
  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const SCEV *S) { return S->getSCEVType() == scSMaxExpr; }
};

/// This class represents an unsigned maximum selection.
class SCEVUMaxExpr : public SCEVMinMaxExpr {
  friend class ScalarEvolution;

  SCEVUMaxExpr(const FoldingSetNodeIDRef ID, const SCEV *const *O, size_t N)
      : SCEVMinMaxExpr(ID, scUMaxExpr, O, N) {}

public:
  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const SCEV *S) { return S->getSCEVType() == scUMaxExpr; }
};

/// This class represents a signed minimum selection.
class SCEVSMinExpr : public SCEVMinMaxExpr {
  friend class ScalarEvolution;

  SCEVSMinExpr(const FoldingSetNodeIDRef ID, const SCEV *const *O, size_t N)
      : SCEVMinMaxExpr(ID, scSMinExpr, O, N) {}

public:
  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const SCEV *S) { return S->getSCEVType() == scSMinExpr; }
};

/// This class represents an unsigned minimum selection.
class SCEVUMinExpr : public SCEVMinMaxExpr {
  friend class ScalarEvolution;

  SCEVUMinExpr(const FoldingSetNodeIDRef ID, const SCEV *const *O, size_t N)
      : SCEVMinMaxExpr(ID, scUMinExpr, O, N) {}

public:
  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const SCEV *S) { return S->getSCEVType() == scUMinExpr; }
};

/// This node is the base class for sequential/in-order min/max selections.
/// Note that their fundamental difference from SCEVMinMaxExpr's is that they
/// are early-returning upon reaching saturation point.
/// I.e. given `0 umin_seq poison`, the result will be `0`,
/// while the result of `0 umin poison` is `poison`.
class SCEVSequentialMinMaxExpr : public SCEVNAryExpr {
  friend class ScalarEvolution;

  static bool isSequentialMinMaxType(enum SCEVTypes T) {
    return T == scSequentialUMinExpr;
  }

  /// Set flags for a non-recurrence without clearing previously set flags.
  void setNoWrapFlags(NoWrapFlags Flags) { SubclassData |= Flags; }

protected:
  /// Note: Constructing subclasses via this constructor is allowed
  SCEVSequentialMinMaxExpr(const FoldingSetNodeIDRef ID, enum SCEVTypes T,
                           const SCEV *const *O, size_t N)
      : SCEVNAryExpr(ID, T, O, N) {
    assert(isSequentialMinMaxType(T));
    // Min and max never overflow
    setNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW));
  }

public:
  Type *getType() const { return getOperand(0)->getType(); }

  static SCEVTypes getEquivalentNonSequentialSCEVType(SCEVTypes Ty) {
    assert(isSequentialMinMaxType(Ty));
    switch (Ty) {
    case scSequentialUMinExpr:
      return scUMinExpr;
    default:
      llvm_unreachable("Not a sequential min/max type.");
    }
  }

  SCEVTypes getEquivalentNonSequentialSCEVType() const {
    return getEquivalentNonSequentialSCEVType(getSCEVType());
  }

  static bool classof(const SCEV *S) {
    return isSequentialMinMaxType(S->getSCEVType());
  }
};

/// This class represents a sequential/in-order unsigned minimum selection.
class SCEVSequentialUMinExpr : public SCEVSequentialMinMaxExpr {
  friend class ScalarEvolution;

  SCEVSequentialUMinExpr(const FoldingSetNodeIDRef ID, const SCEV *const *O,
                         size_t N)
      : SCEVSequentialMinMaxExpr(ID, scSequentialUMinExpr, O, N) {}

public:
  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const SCEV *S) {
    return S->getSCEVType() == scSequentialUMinExpr;
  }
};

/// This means that we are dealing with an entirely unknown SCEV
/// value, and only represent it as its LLVM Value.  This is the
/// "bottom" value for the analysis.
class SCEVUnknown final : public SCEV, private CallbackVH {
  friend class ScalarEvolution;

  /// The parent ScalarEvolution value. This is used to update the
  /// parent's maps when the value associated with a SCEVUnknown is
  /// deleted or RAUW'd.
  ScalarEvolution *SE;

  /// The next pointer in the linked list of all SCEVUnknown
  /// instances owned by a ScalarEvolution.
  SCEVUnknown *Next;

  SCEVUnknown(const FoldingSetNodeIDRef ID, Value *V, ScalarEvolution *se,
              SCEVUnknown *next)
      : SCEV(ID, scUnknown, 1), CallbackVH(V), SE(se), Next(next) {}

  // Implement CallbackVH.
  void deleted() override;
  void allUsesReplacedWith(Value *New) override;

public:
  Value *getValue() const { return getValPtr(); }

  Type *getType() const { return getValPtr()->getType(); }

  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const SCEV *S) { return S->getSCEVType() == scUnknown; }
};

/// This class defines a simple visitor class that may be used for
/// various SCEV analysis purposes.
template <typename SC, typename RetVal = void> struct SCEVVisitor {
  RetVal visit(const SCEV *S) {
    switch (S->getSCEVType()) {
    case scConstant:
      return ((SC *)this)->visitConstant((const SCEVConstant *)S);
    case scVScale:
      return ((SC *)this)->visitVScale((const SCEVVScale *)S);
    case scPtrToInt:
      return ((SC *)this)->visitPtrToIntExpr((const SCEVPtrToIntExpr *)S);
    case scTruncate:
      return ((SC *)this)->visitTruncateExpr((const SCEVTruncateExpr *)S);
    case scZeroExtend:
      return ((SC *)this)->visitZeroExtendExpr((const SCEVZeroExtendExpr *)S);
    case scSignExtend:
      return ((SC *)this)->visitSignExtendExpr((const SCEVSignExtendExpr *)S);
    case scAddExpr:
      return ((SC *)this)->visitAddExpr((const SCEVAddExpr *)S);
    case scMulExpr:
      return ((SC *)this)->visitMulExpr((const SCEVMulExpr *)S);
    case scUDivExpr:
      return ((SC *)this)->visitUDivExpr((const SCEVUDivExpr *)S);
    case scAddRecExpr:
      return ((SC *)this)->visitAddRecExpr((const SCEVAddRecExpr *)S);
    case scSMaxExpr:
      return ((SC *)this)->visitSMaxExpr((const SCEVSMaxExpr *)S);
    case scUMaxExpr:
      return ((SC *)this)->visitUMaxExpr((const SCEVUMaxExpr *)S);
    case scSMinExpr:
      return ((SC *)this)->visitSMinExpr((const SCEVSMinExpr *)S);
    case scUMinExpr:
      return ((SC *)this)->visitUMinExpr((const SCEVUMinExpr *)S);
    case scSequentialUMinExpr:
      return ((SC *)this)
          ->visitSequentialUMinExpr((const SCEVSequentialUMinExpr *)S);
    case scUnknown:
      return ((SC *)this)->visitUnknown((const SCEVUnknown *)S);
    case scCouldNotCompute:
      return ((SC *)this)->visitCouldNotCompute((const SCEVCouldNotCompute *)S);
    }
    llvm_unreachable("Unknown SCEV kind!");
  }

  RetVal visitCouldNotCompute(const SCEVCouldNotCompute *S) {
    llvm_unreachable("Invalid use of SCEVCouldNotCompute!");
  }
};

/// Visit all nodes in the expression tree using worklist traversal.
///
/// Visitor implements:
///   // return true to follow this node.
///   bool follow(const SCEV *S);
///   // return true to terminate the search.
///   bool isDone();
template <typename SV> class SCEVTraversal {
  SV &Visitor;
  SmallVector<const SCEV *, 8> Worklist;
  SmallPtrSet<const SCEV *, 8> Visited;

  void push(const SCEV *S) {
    if (Visited.insert(S).second && Visitor.follow(S))
      Worklist.push_back(S);
  }

public:
  SCEVTraversal(SV &V) : Visitor(V) {}

  void visitAll(const SCEV *Root) {
    push(Root);
    while (!Worklist.empty() && !Visitor.isDone()) {
      const SCEV *S = Worklist.pop_back_val();

      switch (S->getSCEVType()) {
      case scConstant:
      case scVScale:
      case scUnknown:
        continue;
      case scPtrToInt:
      case scTruncate:
      case scZeroExtend:
      case scSignExtend:
      case scAddExpr:
      case scMulExpr:
      case scUDivExpr:
      case scSMaxExpr:
      case scUMaxExpr:
      case scSMinExpr:
      case scUMinExpr:
      case scSequentialUMinExpr:
      case scAddRecExpr:
        for (const auto *Op : S->operands()) {
          push(Op);
          if (Visitor.isDone())
            break;
        }
        continue;
      case scCouldNotCompute:
        llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
      }
      llvm_unreachable("Unknown SCEV kind!");
    }
  }
};

/// Use SCEVTraversal to visit all nodes in the given expression tree.
template <typename SV> void visitAll(const SCEV *Root, SV &Visitor) {
  SCEVTraversal<SV> T(Visitor);
  T.visitAll(Root);
}

/// Return true if any node in \p Root satisfies the predicate \p Pred.
template <typename PredTy>
bool SCEVExprContains(const SCEV *Root, PredTy Pred) {
  struct FindClosure {
    bool Found = false;
    PredTy Pred;

    FindClosure(PredTy Pred) : Pred(Pred) {}

    bool follow(const SCEV *S) {
      if (!Pred(S))
        return true;

      Found = true;
      return false;
    }

    bool isDone() const { return Found; }
  };

  FindClosure FC(Pred);
  visitAll(Root, FC);
  return FC.Found;
}

/// This visitor recursively visits a SCEV expression and re-writes it.
/// The result from each visit is cached, so it will return the same
/// SCEV for the same input.
template <typename SC>
class SCEVRewriteVisitor : public SCEVVisitor<SC, const SCEV *> {
protected:
  ScalarEvolution &SE;
  // Memoize the result of each visit so that we only compute once for
  // the same input SCEV. This is to avoid redundant computations when
  // a SCEV is referenced by multiple SCEVs. Without memoization, this
  // visit algorithm would have exponential time complexity in the worst
  // case, causing the compiler to hang on certain tests.
  SmallDenseMap<const SCEV *, const SCEV *> RewriteResults;

public:
  SCEVRewriteVisitor(ScalarEvolution &SE) : SE(SE) {}

  const SCEV *visit(const SCEV *S) {
    auto It = RewriteResults.find(S);
    if (It != RewriteResults.end())
      return It->second;
    auto *Visited = SCEVVisitor<SC, const SCEV *>::visit(S);
    auto Result = RewriteResults.try_emplace(S, Visited);
    assert(Result.second && "Should insert a new entry");
    return Result.first->second;
  }

  const SCEV *visitConstant(const SCEVConstant *Constant) { return Constant; }

  const SCEV *visitVScale(const SCEVVScale *VScale) { return VScale; }

  const SCEV *visitPtrToIntExpr(const SCEVPtrToIntExpr *Expr) {
    const SCEV *Operand = ((SC *)this)->visit(Expr->getOperand());
    return Operand == Expr->getOperand()
               ? Expr
               : SE.getPtrToIntExpr(Operand, Expr->getType());
  }

  const SCEV *visitTruncateExpr(const SCEVTruncateExpr *Expr) {
    const SCEV *Operand = ((SC *)this)->visit(Expr->getOperand());
    return Operand == Expr->getOperand()
               ? Expr
               : SE.getTruncateExpr(Operand, Expr->getType());
  }

  const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) {
    const SCEV *Operand = ((SC *)this)->visit(Expr->getOperand());
    return Operand == Expr->getOperand()
               ? Expr
               : SE.getZeroExtendExpr(Operand, Expr->getType());
  }

  const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) {
    const SCEV *Operand = ((SC *)this)->visit(Expr->getOperand());
    return Operand == Expr->getOperand()
               ? Expr
               : SE.getSignExtendExpr(Operand, Expr->getType());
  }

  const SCEV *visitAddExpr(const SCEVAddExpr *Expr) {
    SmallVector<const SCEV *, 2> Operands;
    bool Changed = false;
    for (const auto *Op : Expr->operands()) {
      Operands.push_back(((SC *)this)->visit(Op));
      Changed |= Op != Operands.back();
    }
    return !Changed ? Expr : SE.getAddExpr(Operands);
  }

  const SCEV *visitMulExpr(const SCEVMulExpr *Expr) {
    SmallVector<const SCEV *, 2> Operands;
    bool Changed = false;
    for (const auto *Op : Expr->operands()) {
      Operands.push_back(((SC *)this)->visit(Op));
      Changed |= Op != Operands.back();
    }
    return !Changed ? Expr : SE.getMulExpr(Operands);
  }

  const SCEV *visitUDivExpr(const SCEVUDivExpr *Expr) {
    auto *LHS = ((SC *)this)->visit(Expr->getLHS());
    auto *RHS = ((SC *)this)->visit(Expr->getRHS());
    bool Changed = LHS != Expr->getLHS() || RHS != Expr->getRHS();
    return !Changed ? Expr : SE.getUDivExpr(LHS, RHS);
  }

  const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) {
    SmallVector<const SCEV *, 2> Operands;
    bool Changed = false;
    for (const auto *Op : Expr->operands()) {
      Operands.push_back(((SC *)this)->visit(Op));
      Changed |= Op != Operands.back();
    }
    return !Changed ? Expr
                    : SE.getAddRecExpr(Operands, Expr->getLoop(),
                                       Expr->getNoWrapFlags());
  }

  const SCEV *visitSMaxExpr(const SCEVSMaxExpr *Expr) {
    SmallVector<const SCEV *, 2> Operands;
    bool Changed = false;
    for (const auto *Op : Expr->operands()) {
      Operands.push_back(((SC *)this)->visit(Op));
      Changed |= Op != Operands.back();
    }
    return !Changed ? Expr : SE.getSMaxExpr(Operands);
  }

  const SCEV *visitUMaxExpr(const SCEVUMaxExpr *Expr) {
    SmallVector<const SCEV *, 2> Operands;
    bool Changed = false;
    for (const auto *Op : Expr->operands()) {
      Operands.push_back(((SC *)this)->visit(Op));
      Changed |= Op != Operands.back();
    }
    return !Changed ? Expr : SE.getUMaxExpr(Operands);
  }

  const SCEV *visitSMinExpr(const SCEVSMinExpr *Expr) {
    SmallVector<const SCEV *, 2> Operands;
    bool Changed = false;
    for (const auto *Op : Expr->operands()) {
      Operands.push_back(((SC *)this)->visit(Op));
      Changed |= Op != Operands.back();
    }
    return !Changed ? Expr : SE.getSMinExpr(Operands);
  }

  const SCEV *visitUMinExpr(const SCEVUMinExpr *Expr) {
    SmallVector<const SCEV *, 2> Operands;
    bool Changed = false;
    for (const auto *Op : Expr->operands()) {
      Operands.push_back(((SC *)this)->visit(Op));
      Changed |= Op != Operands.back();
    }
    return !Changed ? Expr : SE.getUMinExpr(Operands);
  }

  const SCEV *visitSequentialUMinExpr(const SCEVSequentialUMinExpr *Expr) {
    SmallVector<const SCEV *, 2> Operands;
    bool Changed = false;
    for (const auto *Op : Expr->operands()) {
      Operands.push_back(((SC *)this)->visit(Op));
      Changed |= Op != Operands.back();
    }
    return !Changed ? Expr : SE.getUMinExpr(Operands, /*Sequential=*/true);
  }

  const SCEV *visitUnknown(const SCEVUnknown *Expr) { return Expr; }

  const SCEV *visitCouldNotCompute(const SCEVCouldNotCompute *Expr) {
    return Expr;
  }
};

using ValueToValueMap = DenseMap<const Value *, Value *>;
using ValueToSCEVMapTy = DenseMap<const Value *, const SCEV *>;

/// The SCEVParameterRewriter takes a scalar evolution expression and updates
/// the SCEVUnknown components following the Map (Value -> SCEV).
class SCEVParameterRewriter : public SCEVRewriteVisitor<SCEVParameterRewriter> {
public:
  static const SCEV *rewrite(const SCEV *Scev, ScalarEvolution &SE,
                             ValueToSCEVMapTy &Map) {
    SCEVParameterRewriter Rewriter(SE, Map);
    return Rewriter.visit(Scev);
  }

  SCEVParameterRewriter(ScalarEvolution &SE, ValueToSCEVMapTy &M)
      : SCEVRewriteVisitor(SE), Map(M) {}

  const SCEV *visitUnknown(const SCEVUnknown *Expr) {
    auto I = Map.find(Expr->getValue());
    if (I == Map.end())
      return Expr;
    return I->second;
  }

private:
  ValueToSCEVMapTy &Map;
};

using LoopToScevMapT = DenseMap<const Loop *, const SCEV *>;

/// The SCEVLoopAddRecRewriter takes a scalar evolution expression and applies
/// the Map (Loop -> SCEV) to all AddRecExprs.
class SCEVLoopAddRecRewriter
    : public SCEVRewriteVisitor<SCEVLoopAddRecRewriter> {
public:
  SCEVLoopAddRecRewriter(ScalarEvolution &SE, LoopToScevMapT &M)
      : SCEVRewriteVisitor(SE), Map(M) {}

  static const SCEV *rewrite(const SCEV *Scev, LoopToScevMapT &Map,
                             ScalarEvolution &SE) {
    SCEVLoopAddRecRewriter Rewriter(SE, Map);
    return Rewriter.visit(Scev);
  }

  const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) {
    SmallVector<const SCEV *, 2> Operands;
    for (const SCEV *Op : Expr->operands())
      Operands.push_back(visit(Op));

    const Loop *L = Expr->getLoop();
    if (0 == Map.count(L))
      return SE.getAddRecExpr(Operands, L, Expr->getNoWrapFlags());

    return SCEVAddRecExpr::evaluateAtIteration(Operands, Map[L], SE);
  }

private:
  LoopToScevMapT &Map;
};

} // end namespace llvm

#endif // LLVM_ANALYSIS_SCALAREVOLUTIONEXPRESSIONS_H
PKiwFZ�+T�||Analysis/SyntheticCountsUtils.hnu�[���//===- SyntheticCountsUtils.h - utilities for count propagation--*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines utilities for synthetic counts propagation.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_SYNTHETICCOUNTSUTILS_H
#define LLVM_ANALYSIS_SYNTHETICCOUNTSUTILS_H

#include "llvm/ADT/STLFunctionalExtras.h"
#include "llvm/Analysis/CallGraph.h"
#include "llvm/Support/ScaledNumber.h"

namespace llvm {

/// Class with methods to propagate synthetic entry counts.
///
/// This class is templated on the type of the call graph and designed to work
/// with the traditional per-module callgraph and the summary callgraphs used in
/// ThinLTO. This contains only static methods and alias templates.
template <typename CallGraphType> class SyntheticCountsUtils {
public:
  using Scaled64 = ScaledNumber<uint64_t>;
  using CGT = GraphTraits<CallGraphType>;
  using NodeRef = typename CGT::NodeRef;
  using EdgeRef = typename CGT::EdgeRef;
  using SccTy = std::vector<NodeRef>;

  // Not all EdgeRef have information about the source of the edge. Hence
  // NodeRef corresponding to the source of the EdgeRef is explicitly passed.
  using GetProfCountTy =
      function_ref<std::optional<Scaled64>(NodeRef, EdgeRef)>;
  using AddCountTy = function_ref<void(NodeRef, Scaled64)>;

  static void propagate(const CallGraphType &CG, GetProfCountTy GetProfCount,
                        AddCountTy AddCount);

private:
  static void propagateFromSCC(const SccTy &SCC, GetProfCountTy GetProfCount,
                               AddCountTy AddCount);
};
} // namespace llvm

#endif
PKiwFZR��$$Analysis/LoopPass.hnu�[���//===- LoopPass.h - LoopPass class ----------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines LoopPass class. All loop optimization
// and transformation passes are derived from LoopPass.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_LOOPPASS_H
#define LLVM_ANALYSIS_LOOPPASS_H

#include "llvm/IR/LegacyPassManagers.h"
#include "llvm/Pass.h"
#include <deque>

namespace llvm {

class Loop;
class LoopInfo;
class LPPassManager;
class Function;

class LoopPass : public Pass {
public:
  explicit LoopPass(char &pid) : Pass(PT_Loop, pid) {}

  /// getPrinterPass - Get a pass to print the function corresponding
  /// to a Loop.
  Pass *createPrinterPass(raw_ostream &O,
                          const std::string &Banner) const override;

  // runOnLoop - This method should be implemented by the subclass to perform
  // whatever action is necessary for the specified Loop.
  virtual bool runOnLoop(Loop *L, LPPassManager &LPM) = 0;

  using llvm::Pass::doInitialization;
  using llvm::Pass::doFinalization;

  // Initialization and finalization hooks.
  virtual bool doInitialization(Loop *L, LPPassManager &LPM) {
    return false;
  }

  // Finalization hook does not supply Loop because at this time
  // loop nest is completely different.
  virtual bool doFinalization() { return false; }

  // Check if this pass is suitable for the current LPPassManager, if
  // available. This pass P is not suitable for a LPPassManager if P
  // is not preserving higher level analysis info used by other
  // LPPassManager passes. In such case, pop LPPassManager from the
  // stack. This will force assignPassManager() to create new
  // LPPassManger as expected.
  void preparePassManager(PMStack &PMS) override;

  /// Assign pass manager to manage this pass
  void assignPassManager(PMStack &PMS, PassManagerType PMT) override;

  ///  Return what kind of Pass Manager can manage this pass.
  PassManagerType getPotentialPassManagerType() const override {
    return PMT_LoopPassManager;
  }

protected:
  /// Optional passes call this function to check whether the pass should be
  /// skipped. This is the case when Attribute::OptimizeNone is set or when
  /// optimization bisect is over the limit.
  bool skipLoop(const Loop *L) const;
};

class LPPassManager : public FunctionPass, public PMDataManager {
public:
  static char ID;
  explicit LPPassManager();

  /// run - Execute all of the passes scheduled for execution.  Keep track of
  /// whether any of the passes modifies the module, and if so, return true.
  bool runOnFunction(Function &F) override;

  /// Pass Manager itself does not invalidate any analysis info.
  // LPPassManager needs LoopInfo.
  void getAnalysisUsage(AnalysisUsage &Info) const override;

  StringRef getPassName() const override { return "Loop Pass Manager"; }

  PMDataManager *getAsPMDataManager() override { return this; }
  Pass *getAsPass() override { return this; }

  /// Print passes managed by this manager
  void dumpPassStructure(unsigned Offset) override;

  LoopPass *getContainedPass(unsigned N) {
    assert(N < PassVector.size() && "Pass number out of range!");
    LoopPass *LP = static_cast<LoopPass *>(PassVector[N]);
    return LP;
  }

  PassManagerType getPassManagerType() const override {
    return PMT_LoopPassManager;
  }

public:
  // Add a new loop into the loop queue.
  void addLoop(Loop &L);

  // Mark \p L as deleted.
  void markLoopAsDeleted(Loop &L);

private:
  std::deque<Loop *> LQ;
  LoopInfo *LI;
  Loop *CurrentLoop;
  bool CurrentLoopDeleted;
};

// This pass is required by the LCSSA transformation. It is used inside
// LPPassManager to check if current pass preserves LCSSA form, and if it does
// pass manager calls lcssa verification for the current loop.
struct LCSSAVerificationPass : public FunctionPass {
  static char ID;
  LCSSAVerificationPass();

  bool runOnFunction(Function &F) override { return false; }

  void getAnalysisUsage(AnalysisUsage &AU) const override {
    AU.setPreservesAll();
  }
};

} // End llvm namespace

#endif
PKiwFZ)Z<��!Analysis/ModuleDebugInfoPrinter.hnu�[���//===- ModuleDebugInfoPrinter.h - -----------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_MODULEDEBUGINFOPRINTER_H
#define LLVM_ANALYSIS_MODULEDEBUGINFOPRINTER_H

#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/PassManager.h"

namespace llvm {
class raw_ostream;

class ModuleDebugInfoPrinterPass
    : public PassInfoMixin<ModuleDebugInfoPrinterPass> {
  DebugInfoFinder Finder;
  raw_ostream &OS;

public:
  explicit ModuleDebugInfoPrinterPass(raw_ostream &OS);
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
};
} // end namespace llvm

#endif // LLVM_ANALYSIS_MODULEDEBUGINFOPRINTER_H
PKiwFZt�%��!Analysis/NoInferenceModelRunner.hnu�[���//===- NoInferenceModelRunner.h ---- noop ML model runner  ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//

#ifndef LLVM_ANALYSIS_NOINFERENCEMODELRUNNER_H
#define LLVM_ANALYSIS_NOINFERENCEMODELRUNNER_H

#include "llvm/Analysis/MLModelRunner.h"
#include "llvm/Analysis/TensorSpec.h"
#include "llvm/Config/llvm-config.h"
namespace llvm {
/// A pseudo model runner. We use it to store feature values when collecting
/// logs for the default policy, in 'development' mode, but never ask it to
/// 'run'.
class NoInferenceModelRunner : public MLModelRunner {
public:
  NoInferenceModelRunner(LLVMContext &Ctx,
                         const std::vector<TensorSpec> &Inputs);

  static bool classof(const MLModelRunner *R) {
    return R->getKind() == MLModelRunner::Kind::NoOp;
  }

private:
  void *evaluateUntyped() override {
    llvm_unreachable("We shouldn't call run on this model runner.");
  }
};
} // namespace llvm
#endif // LLVM_ANALYSIS_NOINFERENCEMODELRUNNER_H
PKiwFZ��;����Analysis/MemorySSA.hnu�[���//===- MemorySSA.h - Build Memory SSA ---------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file
/// This file exposes an interface to building/using memory SSA to
/// walk memory instructions using a use/def graph.
///
/// Memory SSA class builds an SSA form that links together memory access
/// instructions such as loads, stores, atomics, and calls. Additionally, it
/// does a trivial form of "heap versioning" Every time the memory state changes
/// in the program, we generate a new heap version. It generates
/// MemoryDef/Uses/Phis that are overlayed on top of the existing instructions.
///
/// As a trivial example,
/// define i32 @main() #0 {
/// entry:
///   %call = call noalias i8* @_Znwm(i64 4) #2
///   %0 = bitcast i8* %call to i32*
///   %call1 = call noalias i8* @_Znwm(i64 4) #2
///   %1 = bitcast i8* %call1 to i32*
///   store i32 5, i32* %0, align 4
///   store i32 7, i32* %1, align 4
///   %2 = load i32* %0, align 4
///   %3 = load i32* %1, align 4
///   %add = add nsw i32 %2, %3
///   ret i32 %add
/// }
///
/// Will become
/// define i32 @main() #0 {
/// entry:
///   ; 1 = MemoryDef(0)
///   %call = call noalias i8* @_Znwm(i64 4) #3
///   %2 = bitcast i8* %call to i32*
///   ; 2 = MemoryDef(1)
///   %call1 = call noalias i8* @_Znwm(i64 4) #3
///   %4 = bitcast i8* %call1 to i32*
///   ; 3 = MemoryDef(2)
///   store i32 5, i32* %2, align 4
///   ; 4 = MemoryDef(3)
///   store i32 7, i32* %4, align 4
///   ; MemoryUse(3)
///   %7 = load i32* %2, align 4
///   ; MemoryUse(4)
///   %8 = load i32* %4, align 4
///   %add = add nsw i32 %7, %8
///   ret i32 %add
/// }
///
/// Given this form, all the stores that could ever effect the load at %8 can be
/// gotten by using the MemoryUse associated with it, and walking from use to
/// def until you hit the top of the function.
///
/// Each def also has a list of users associated with it, so you can walk from
/// both def to users, and users to defs. Note that we disambiguate MemoryUses,
/// but not the RHS of MemoryDefs. You can see this above at %7, which would
/// otherwise be a MemoryUse(4). Being disambiguated means that for a given
/// store, all the MemoryUses on its use lists are may-aliases of that store
/// (but the MemoryDefs on its use list may not be).
///
/// MemoryDefs are not disambiguated because it would require multiple reaching
/// definitions, which would require multiple phis, and multiple memoryaccesses
/// per instruction.
///
/// In addition to the def/use graph described above, MemoryDefs also contain
/// an "optimized" definition use.  The "optimized" use points to some def
/// reachable through the memory def chain.  The optimized def *may* (but is
/// not required to) alias the original MemoryDef, but no def *closer* to the
/// source def may alias it.  As the name implies, the purpose of the optimized
/// use is to allow caching of clobber searches for memory defs.  The optimized
/// def may be nullptr, in which case clients must walk the defining access
/// chain.
///
/// When iterating the uses of a MemoryDef, both defining uses and optimized
/// uses will be encountered.  If only one type is needed, the client must
/// filter the use walk.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_MEMORYSSA_H
#define LLVM_ANALYSIS_MEMORYSSA_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/ilist_node.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/MemoryLocation.h"
#include "llvm/Analysis/PHITransAddr.h"
#include "llvm/IR/DerivedUser.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/User.h"
#include "llvm/Pass.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <iterator>
#include <memory>
#include <utility>

namespace llvm {

template <class GraphType> struct GraphTraits;
class BasicBlock;
class Function;
class Instruction;
class LLVMContext;
class MemoryAccess;
class MemorySSAWalker;
class Module;
class Use;
class Value;
class raw_ostream;

namespace MSSAHelpers {

struct AllAccessTag {};
struct DefsOnlyTag {};

} // end namespace MSSAHelpers

enum : unsigned {
  // Used to signify what the default invalid ID is for MemoryAccess's
  // getID()
  INVALID_MEMORYACCESS_ID = -1U
};

template <class T> class memoryaccess_def_iterator_base;
using memoryaccess_def_iterator = memoryaccess_def_iterator_base<MemoryAccess>;
using const_memoryaccess_def_iterator =
    memoryaccess_def_iterator_base<const MemoryAccess>;

// The base for all memory accesses. All memory accesses in a block are
// linked together using an intrusive list.
class MemoryAccess
    : public DerivedUser,
      public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>,
      public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>> {
public:
  using AllAccessType =
      ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>;
  using DefsOnlyType =
      ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>>;

  MemoryAccess(const MemoryAccess &) = delete;
  MemoryAccess &operator=(const MemoryAccess &) = delete;

  void *operator new(size_t) = delete;

  // Methods for support type inquiry through isa, cast, and
  // dyn_cast
  static bool classof(const Value *V) {
    unsigned ID = V->getValueID();
    return ID == MemoryUseVal || ID == MemoryPhiVal || ID == MemoryDefVal;
  }

  BasicBlock *getBlock() const { return Block; }

  void print(raw_ostream &OS) const;
  void dump() const;

  /// The user iterators for a memory access
  using iterator = user_iterator;
  using const_iterator = const_user_iterator;

  /// This iterator walks over all of the defs in a given
  /// MemoryAccess. For MemoryPhi nodes, this walks arguments. For
  /// MemoryUse/MemoryDef, this walks the defining access.
  memoryaccess_def_iterator defs_begin();
  const_memoryaccess_def_iterator defs_begin() const;
  memoryaccess_def_iterator defs_end();
  const_memoryaccess_def_iterator defs_end() const;

  /// Get the iterators for the all access list and the defs only list
  /// We default to the all access list.
  AllAccessType::self_iterator getIterator() {
    return this->AllAccessType::getIterator();
  }
  AllAccessType::const_self_iterator getIterator() const {
    return this->AllAccessType::getIterator();
  }
  AllAccessType::reverse_self_iterator getReverseIterator() {
    return this->AllAccessType::getReverseIterator();
  }
  AllAccessType::const_reverse_self_iterator getReverseIterator() const {
    return this->AllAccessType::getReverseIterator();
  }
  DefsOnlyType::self_iterator getDefsIterator() {
    return this->DefsOnlyType::getIterator();
  }
  DefsOnlyType::const_self_iterator getDefsIterator() const {
    return this->DefsOnlyType::getIterator();
  }
  DefsOnlyType::reverse_self_iterator getReverseDefsIterator() {
    return this->DefsOnlyType::getReverseIterator();
  }
  DefsOnlyType::const_reverse_self_iterator getReverseDefsIterator() const {
    return this->DefsOnlyType::getReverseIterator();
  }

protected:
  friend class MemoryDef;
  friend class MemoryPhi;
  friend class MemorySSA;
  friend class MemoryUse;
  friend class MemoryUseOrDef;

  /// Used by MemorySSA to change the block of a MemoryAccess when it is
  /// moved.
  void setBlock(BasicBlock *BB) { Block = BB; }

  /// Used for debugging and tracking things about MemoryAccesses.
  /// Guaranteed unique among MemoryAccesses, no guarantees otherwise.
  inline unsigned getID() const;

  MemoryAccess(LLVMContext &C, unsigned Vty, DeleteValueTy DeleteValue,
               BasicBlock *BB, unsigned NumOperands)
      : DerivedUser(Type::getVoidTy(C), Vty, nullptr, NumOperands, DeleteValue),
        Block(BB) {}

  // Use deleteValue() to delete a generic MemoryAccess.
  ~MemoryAccess() = default;

private:
  BasicBlock *Block;
};

template <>
struct ilist_alloc_traits<MemoryAccess> {
  static void deleteNode(MemoryAccess *MA) { MA->deleteValue(); }
};

inline raw_ostream &operator<<(raw_ostream &OS, const MemoryAccess &MA) {
  MA.print(OS);
  return OS;
}

/// Class that has the common methods + fields of memory uses/defs. It's
/// a little awkward to have, but there are many cases where we want either a
/// use or def, and there are many cases where uses are needed (defs aren't
/// acceptable), and vice-versa.
///
/// This class should never be instantiated directly; make a MemoryUse or
/// MemoryDef instead.
class MemoryUseOrDef : public MemoryAccess {
public:
  void *operator new(size_t) = delete;

  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);

  /// Get the instruction that this MemoryUse represents.
  Instruction *getMemoryInst() const { return MemoryInstruction; }

  /// Get the access that produces the memory state used by this Use.
  MemoryAccess *getDefiningAccess() const { return getOperand(0); }

  static bool classof(const Value *MA) {
    return MA->getValueID() == MemoryUseVal || MA->getValueID() == MemoryDefVal;
  }

  /// Do we have an optimized use?
  inline bool isOptimized() const;
  /// Return the MemoryAccess associated with the optimized use, or nullptr.
  inline MemoryAccess *getOptimized() const;
  /// Sets the optimized use for a MemoryDef.
  inline void setOptimized(MemoryAccess *);

  /// Reset the ID of what this MemoryUse was optimized to, causing it to
  /// be rewalked by the walker if necessary.
  /// This really should only be called by tests.
  inline void resetOptimized();

protected:
  friend class MemorySSA;
  friend class MemorySSAUpdater;

  MemoryUseOrDef(LLVMContext &C, MemoryAccess *DMA, unsigned Vty,
                 DeleteValueTy DeleteValue, Instruction *MI, BasicBlock *BB,
                 unsigned NumOperands)
      : MemoryAccess(C, Vty, DeleteValue, BB, NumOperands),
        MemoryInstruction(MI) {
    setDefiningAccess(DMA);
  }

  // Use deleteValue() to delete a generic MemoryUseOrDef.
  ~MemoryUseOrDef() = default;

  void setDefiningAccess(MemoryAccess *DMA, bool Optimized = false) {
    if (!Optimized) {
      setOperand(0, DMA);
      return;
    }
    setOptimized(DMA);
  }

private:
  Instruction *MemoryInstruction;
};

/// Represents read-only accesses to memory
///
/// In particular, the set of Instructions that will be represented by
/// MemoryUse's is exactly the set of Instructions for which
/// AliasAnalysis::getModRefInfo returns "Ref".
class MemoryUse final : public MemoryUseOrDef {
public:
  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);

  MemoryUse(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB)
      : MemoryUseOrDef(C, DMA, MemoryUseVal, deleteMe, MI, BB,
                       /*NumOperands=*/1) {}

  // allocate space for exactly one operand
  void *operator new(size_t S) { return User::operator new(S, 1); }
  void operator delete(void *Ptr) { User::operator delete(Ptr); }

  static bool classof(const Value *MA) {
    return MA->getValueID() == MemoryUseVal;
  }

  void print(raw_ostream &OS) const;

  void setOptimized(MemoryAccess *DMA) {
    OptimizedID = DMA->getID();
    setOperand(0, DMA);
  }

  /// Whether the MemoryUse is optimized. If ensureOptimizedUses() was called,
  /// uses will usually be optimized, but this is not guaranteed (e.g. due to
  /// invalidation and optimization limits.)
  bool isOptimized() const {
    return getDefiningAccess() && OptimizedID == getDefiningAccess()->getID();
  }

  MemoryAccess *getOptimized() const {
    return getDefiningAccess();
  }

  void resetOptimized() {
    OptimizedID = INVALID_MEMORYACCESS_ID;
  }

protected:
  friend class MemorySSA;

private:
  static void deleteMe(DerivedUser *Self);

  unsigned OptimizedID = INVALID_MEMORYACCESS_ID;
};

template <>
struct OperandTraits<MemoryUse> : public FixedNumOperandTraits<MemoryUse, 1> {};
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUse, MemoryAccess)

/// Represents a read-write access to memory, whether it is a must-alias,
/// or a may-alias.
///
/// In particular, the set of Instructions that will be represented by
/// MemoryDef's is exactly the set of Instructions for which
/// AliasAnalysis::getModRefInfo returns "Mod" or "ModRef".
/// Note that, in order to provide def-def chains, all defs also have a use
/// associated with them. This use points to the nearest reaching
/// MemoryDef/MemoryPhi.
class MemoryDef final : public MemoryUseOrDef {
public:
  friend class MemorySSA;

  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);

  MemoryDef(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB,
            unsigned Ver)
      : MemoryUseOrDef(C, DMA, MemoryDefVal, deleteMe, MI, BB,
                       /*NumOperands=*/2),
        ID(Ver) {}

  // allocate space for exactly two operands
  void *operator new(size_t S) { return User::operator new(S, 2); }
  void operator delete(void *Ptr) { User::operator delete(Ptr); }

  static bool classof(const Value *MA) {
    return MA->getValueID() == MemoryDefVal;
  }

  void setOptimized(MemoryAccess *MA) {
    setOperand(1, MA);
    OptimizedID = MA->getID();
  }

  MemoryAccess *getOptimized() const {
    return cast_or_null<MemoryAccess>(getOperand(1));
  }

  bool isOptimized() const {
    return getOptimized() && OptimizedID == getOptimized()->getID();
  }

  void resetOptimized() {
    OptimizedID = INVALID_MEMORYACCESS_ID;
    setOperand(1, nullptr);
  }

  void print(raw_ostream &OS) const;

  unsigned getID() const { return ID; }

private:
  static void deleteMe(DerivedUser *Self);

  const unsigned ID;
  unsigned OptimizedID = INVALID_MEMORYACCESS_ID;
};

template <>
struct OperandTraits<MemoryDef> : public FixedNumOperandTraits<MemoryDef, 2> {};
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryDef, MemoryAccess)

template <>
struct OperandTraits<MemoryUseOrDef> {
  static Use *op_begin(MemoryUseOrDef *MUD) {
    if (auto *MU = dyn_cast<MemoryUse>(MUD))
      return OperandTraits<MemoryUse>::op_begin(MU);
    return OperandTraits<MemoryDef>::op_begin(cast<MemoryDef>(MUD));
  }

  static Use *op_end(MemoryUseOrDef *MUD) {
    if (auto *MU = dyn_cast<MemoryUse>(MUD))
      return OperandTraits<MemoryUse>::op_end(MU);
    return OperandTraits<MemoryDef>::op_end(cast<MemoryDef>(MUD));
  }

  static unsigned operands(const MemoryUseOrDef *MUD) {
    if (const auto *MU = dyn_cast<MemoryUse>(MUD))
      return OperandTraits<MemoryUse>::operands(MU);
    return OperandTraits<MemoryDef>::operands(cast<MemoryDef>(MUD));
  }
};
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUseOrDef, MemoryAccess)

/// Represents phi nodes for memory accesses.
///
/// These have the same semantic as regular phi nodes, with the exception that
/// only one phi will ever exist in a given basic block.
/// Guaranteeing one phi per block means guaranteeing there is only ever one
/// valid reaching MemoryDef/MemoryPHI along each path to the phi node.
/// This is ensured by not allowing disambiguation of the RHS of a MemoryDef or
/// a MemoryPhi's operands.
/// That is, given
/// if (a) {
///   store %a
///   store %b
/// }
/// it *must* be transformed into
/// if (a) {
///    1 = MemoryDef(liveOnEntry)
///    store %a
///    2 = MemoryDef(1)
///    store %b
/// }
/// and *not*
/// if (a) {
///    1 = MemoryDef(liveOnEntry)
///    store %a
///    2 = MemoryDef(liveOnEntry)
///    store %b
/// }
/// even if the two stores do not conflict. Otherwise, both 1 and 2 reach the
/// end of the branch, and if there are not two phi nodes, one will be
/// disconnected completely from the SSA graph below that point.
/// Because MemoryUse's do not generate new definitions, they do not have this
/// issue.
class MemoryPhi final : public MemoryAccess {
  // allocate space for exactly zero operands
  void *operator new(size_t S) { return User::operator new(S); }

public:
  void operator delete(void *Ptr) { User::operator delete(Ptr); }

  /// Provide fast operand accessors
  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);

  MemoryPhi(LLVMContext &C, BasicBlock *BB, unsigned Ver, unsigned NumPreds = 0)
      : MemoryAccess(C, MemoryPhiVal, deleteMe, BB, 0), ID(Ver),
        ReservedSpace(NumPreds) {
    allocHungoffUses(ReservedSpace);
  }

  // Block iterator interface. This provides access to the list of incoming
  // basic blocks, which parallels the list of incoming values.
  using block_iterator = BasicBlock **;
  using const_block_iterator = BasicBlock *const *;

  block_iterator block_begin() {
    return reinterpret_cast<block_iterator>(op_begin() + ReservedSpace);
  }

  const_block_iterator block_begin() const {
    return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace);
  }

  block_iterator block_end() { return block_begin() + getNumOperands(); }

  const_block_iterator block_end() const {
    return block_begin() + getNumOperands();
  }

  iterator_range<block_iterator> blocks() {
    return make_range(block_begin(), block_end());
  }

  iterator_range<const_block_iterator> blocks() const {
    return make_range(block_begin(), block_end());
  }

  op_range incoming_values() { return operands(); }

  const_op_range incoming_values() const { return operands(); }

  /// Return the number of incoming edges
  unsigned getNumIncomingValues() const { return getNumOperands(); }

  /// Return incoming value number x
  MemoryAccess *getIncomingValue(unsigned I) const { return getOperand(I); }
  void setIncomingValue(unsigned I, MemoryAccess *V) {
    assert(V && "PHI node got a null value!");
    setOperand(I, V);
  }

  static unsigned getOperandNumForIncomingValue(unsigned I) { return I; }
  static unsigned getIncomingValueNumForOperand(unsigned I) { return I; }

  /// Return incoming basic block number @p i.
  BasicBlock *getIncomingBlock(unsigned I) const { return block_begin()[I]; }

  /// Return incoming basic block corresponding
  /// to an operand of the PHI.
  BasicBlock *getIncomingBlock(const Use &U) const {
    assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?");
    return getIncomingBlock(unsigned(&U - op_begin()));
  }

  /// Return incoming basic block corresponding
  /// to value use iterator.
  BasicBlock *getIncomingBlock(MemoryAccess::const_user_iterator I) const {
    return getIncomingBlock(I.getUse());
  }

  void setIncomingBlock(unsigned I, BasicBlock *BB) {
    assert(BB && "PHI node got a null basic block!");
    block_begin()[I] = BB;
  }

  /// Add an incoming value to the end of the PHI list
  void addIncoming(MemoryAccess *V, BasicBlock *BB) {
    if (getNumOperands() == ReservedSpace)
      growOperands(); // Get more space!
    // Initialize some new operands.
    setNumHungOffUseOperands(getNumOperands() + 1);
    setIncomingValue(getNumOperands() - 1, V);
    setIncomingBlock(getNumOperands() - 1, BB);
  }

  /// Return the first index of the specified basic
  /// block in the value list for this PHI.  Returns -1 if no instance.
  int getBasicBlockIndex(const BasicBlock *BB) const {
    for (unsigned I = 0, E = getNumOperands(); I != E; ++I)
      if (block_begin()[I] == BB)
        return I;
    return -1;
  }

  MemoryAccess *getIncomingValueForBlock(const BasicBlock *BB) const {
    int Idx = getBasicBlockIndex(BB);
    assert(Idx >= 0 && "Invalid basic block argument!");
    return getIncomingValue(Idx);
  }

  // After deleting incoming position I, the order of incoming may be changed.
  void unorderedDeleteIncoming(unsigned I) {
    unsigned E = getNumOperands();
    assert(I < E && "Cannot remove out of bounds Phi entry.");
    // MemoryPhi must have at least two incoming values, otherwise the MemoryPhi
    // itself should be deleted.
    assert(E >= 2 && "Cannot only remove incoming values in MemoryPhis with "
                     "at least 2 values.");
    setIncomingValue(I, getIncomingValue(E - 1));
    setIncomingBlock(I, block_begin()[E - 1]);
    setOperand(E - 1, nullptr);
    block_begin()[E - 1] = nullptr;
    setNumHungOffUseOperands(getNumOperands() - 1);
  }

  // After deleting entries that satisfy Pred, remaining entries may have
  // changed order.
  template <typename Fn> void unorderedDeleteIncomingIf(Fn &&Pred) {
    for (unsigned I = 0, E = getNumOperands(); I != E; ++I)
      if (Pred(getIncomingValue(I), getIncomingBlock(I))) {
        unorderedDeleteIncoming(I);
        E = getNumOperands();
        --I;
      }
    assert(getNumOperands() >= 1 &&
           "Cannot remove all incoming blocks in a MemoryPhi.");
  }

  // After deleting incoming block BB, the incoming blocks order may be changed.
  void unorderedDeleteIncomingBlock(const BasicBlock *BB) {
    unorderedDeleteIncomingIf(
        [&](const MemoryAccess *, const BasicBlock *B) { return BB == B; });
  }

  // After deleting incoming memory access MA, the incoming accesses order may
  // be changed.
  void unorderedDeleteIncomingValue(const MemoryAccess *MA) {
    unorderedDeleteIncomingIf(
        [&](const MemoryAccess *M, const BasicBlock *) { return MA == M; });
  }

  static bool classof(const Value *V) {
    return V->getValueID() == MemoryPhiVal;
  }

  void print(raw_ostream &OS) const;

  unsigned getID() const { return ID; }

protected:
  friend class MemorySSA;

  /// this is more complicated than the generic
  /// User::allocHungoffUses, because we have to allocate Uses for the incoming
  /// values and pointers to the incoming blocks, all in one allocation.
  void allocHungoffUses(unsigned N) {
    User::allocHungoffUses(N, /* IsPhi */ true);
  }

private:
  // For debugging only
  const unsigned ID;
  unsigned ReservedSpace;

  /// This grows the operand list in response to a push_back style of
  /// operation.  This grows the number of ops by 1.5 times.
  void growOperands() {
    unsigned E = getNumOperands();
    // 2 op PHI nodes are VERY common, so reserve at least enough for that.
    ReservedSpace = std::max(E + E / 2, 2u);
    growHungoffUses(ReservedSpace, /* IsPhi */ true);
  }

  static void deleteMe(DerivedUser *Self);
};

inline unsigned MemoryAccess::getID() const {
  assert((isa<MemoryDef>(this) || isa<MemoryPhi>(this)) &&
         "only memory defs and phis have ids");
  if (const auto *MD = dyn_cast<MemoryDef>(this))
    return MD->getID();
  return cast<MemoryPhi>(this)->getID();
}

inline bool MemoryUseOrDef::isOptimized() const {
  if (const auto *MD = dyn_cast<MemoryDef>(this))
    return MD->isOptimized();
  return cast<MemoryUse>(this)->isOptimized();
}

inline MemoryAccess *MemoryUseOrDef::getOptimized() const {
  if (const auto *MD = dyn_cast<MemoryDef>(this))
    return MD->getOptimized();
  return cast<MemoryUse>(this)->getOptimized();
}

inline void MemoryUseOrDef::setOptimized(MemoryAccess *MA) {
  if (auto *MD = dyn_cast<MemoryDef>(this))
    MD->setOptimized(MA);
  else
    cast<MemoryUse>(this)->setOptimized(MA);
}

inline void MemoryUseOrDef::resetOptimized() {
  if (auto *MD = dyn_cast<MemoryDef>(this))
    MD->resetOptimized();
  else
    cast<MemoryUse>(this)->resetOptimized();
}

template <> struct OperandTraits<MemoryPhi> : public HungoffOperandTraits<2> {};
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryPhi, MemoryAccess)

/// Encapsulates MemorySSA, including all data associated with memory
/// accesses.
class MemorySSA {
public:
  MemorySSA(Function &, AliasAnalysis *, DominatorTree *);

  // MemorySSA must remain where it's constructed; Walkers it creates store
  // pointers to it.
  MemorySSA(MemorySSA &&) = delete;

  ~MemorySSA();

  MemorySSAWalker *getWalker();
  MemorySSAWalker *getSkipSelfWalker();

  /// Given a memory Mod/Ref'ing instruction, get the MemorySSA
  /// access associated with it. If passed a basic block gets the memory phi
  /// node that exists for that block, if there is one. Otherwise, this will get
  /// a MemoryUseOrDef.
  MemoryUseOrDef *getMemoryAccess(const Instruction *I) const {
    return cast_or_null<MemoryUseOrDef>(ValueToMemoryAccess.lookup(I));
  }

  MemoryPhi *getMemoryAccess(const BasicBlock *BB) const {
    return cast_or_null<MemoryPhi>(ValueToMemoryAccess.lookup(cast<Value>(BB)));
  }

  DominatorTree &getDomTree() const { return *DT; }

  void dump() const;
  void print(raw_ostream &) const;

  /// Return true if \p MA represents the live on entry value
  ///
  /// Loads and stores from pointer arguments and other global values may be
  /// defined by memory operations that do not occur in the current function, so
  /// they may be live on entry to the function. MemorySSA represents such
  /// memory state by the live on entry definition, which is guaranteed to occur
  /// before any other memory access in the function.
  inline bool isLiveOnEntryDef(const MemoryAccess *MA) const {
    return MA == LiveOnEntryDef.get();
  }

  inline MemoryAccess *getLiveOnEntryDef() const {
    return LiveOnEntryDef.get();
  }

  // Sadly, iplists, by default, owns and deletes pointers added to the
  // list. It's not currently possible to have two iplists for the same type,
  // where one owns the pointers, and one does not. This is because the traits
  // are per-type, not per-tag.  If this ever changes, we should make the
  // DefList an iplist.
  using AccessList = iplist<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>;
  using DefsList =
      simple_ilist<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>>;

  /// Return the list of MemoryAccess's for a given basic block.
  ///
  /// This list is not modifiable by the user.
  const AccessList *getBlockAccesses(const BasicBlock *BB) const {
    return getWritableBlockAccesses(BB);
  }

  /// Return the list of MemoryDef's and MemoryPhi's for a given basic
  /// block.
  ///
  /// This list is not modifiable by the user.
  const DefsList *getBlockDefs(const BasicBlock *BB) const {
    return getWritableBlockDefs(BB);
  }

  /// Given two memory accesses in the same basic block, determine
  /// whether MemoryAccess \p A dominates MemoryAccess \p B.
  bool locallyDominates(const MemoryAccess *A, const MemoryAccess *B) const;

  /// Given two memory accesses in potentially different blocks,
  /// determine whether MemoryAccess \p A dominates MemoryAccess \p B.
  bool dominates(const MemoryAccess *A, const MemoryAccess *B) const;

  /// Given a MemoryAccess and a Use, determine whether MemoryAccess \p A
  /// dominates Use \p B.
  bool dominates(const MemoryAccess *A, const Use &B) const;

  enum class VerificationLevel { Fast, Full };
  /// Verify that MemorySSA is self consistent (IE definitions dominate
  /// all uses, uses appear in the right places).  This is used by unit tests.
  void verifyMemorySSA(VerificationLevel = VerificationLevel::Fast) const;

  /// Used in various insertion functions to specify whether we are talking
  /// about the beginning or end of a block.
  enum InsertionPlace { Beginning, End, BeforeTerminator };

  /// By default, uses are *not* optimized during MemorySSA construction.
  /// Calling this method will attempt to optimize all MemoryUses, if this has
  /// not happened yet for this MemorySSA instance. This should be done if you
  /// plan to query the clobbering access for most uses, or if you walk the
  /// def-use chain of uses.
  void ensureOptimizedUses();

  AliasAnalysis &getAA() { return *AA; }

protected:
  // Used by Memory SSA dumpers and wrapper pass
  friend class MemorySSAUpdater;

  void verifyOrderingDominationAndDefUses(
      Function &F, VerificationLevel = VerificationLevel::Fast) const;
  void verifyDominationNumbers(const Function &F) const;
  void verifyPrevDefInPhis(Function &F) const;

  // This is used by the use optimizer and updater.
  AccessList *getWritableBlockAccesses(const BasicBlock *BB) const {
    auto It = PerBlockAccesses.find(BB);
    return It == PerBlockAccesses.end() ? nullptr : It->second.get();
  }

  // This is used by the use optimizer and updater.
  DefsList *getWritableBlockDefs(const BasicBlock *BB) const {
    auto It = PerBlockDefs.find(BB);
    return It == PerBlockDefs.end() ? nullptr : It->second.get();
  }

  // These is used by the updater to perform various internal MemorySSA
  // machinsations.  They do not always leave the IR in a correct state, and
  // relies on the updater to fixup what it breaks, so it is not public.

  void moveTo(MemoryUseOrDef *What, BasicBlock *BB, AccessList::iterator Where);
  void moveTo(MemoryAccess *What, BasicBlock *BB, InsertionPlace Point);

  // Rename the dominator tree branch rooted at BB.
  void renamePass(BasicBlock *BB, MemoryAccess *IncomingVal,
                  SmallPtrSetImpl<BasicBlock *> &Visited) {
    renamePass(DT->getNode(BB), IncomingVal, Visited, true, true);
  }

  void removeFromLookups(MemoryAccess *);
  void removeFromLists(MemoryAccess *, bool ShouldDelete = true);
  void insertIntoListsForBlock(MemoryAccess *, const BasicBlock *,
                               InsertionPlace);
  void insertIntoListsBefore(MemoryAccess *, const BasicBlock *,
                             AccessList::iterator);
  MemoryUseOrDef *createDefinedAccess(Instruction *, MemoryAccess *,
                                      const MemoryUseOrDef *Template = nullptr,
                                      bool CreationMustSucceed = true);

private:
  class ClobberWalkerBase;
  class CachingWalker;
  class SkipSelfWalker;
  class OptimizeUses;

  CachingWalker *getWalkerImpl();
  void buildMemorySSA(BatchAAResults &BAA);

  void prepareForMoveTo(MemoryAccess *, BasicBlock *);
  void verifyUseInDefs(MemoryAccess *, MemoryAccess *) const;

  using AccessMap = DenseMap<const BasicBlock *, std::unique_ptr<AccessList>>;
  using DefsMap = DenseMap<const BasicBlock *, std::unique_ptr<DefsList>>;

  void markUnreachableAsLiveOnEntry(BasicBlock *BB);
  MemoryPhi *createMemoryPhi(BasicBlock *BB);
  template <typename AliasAnalysisType>
  MemoryUseOrDef *createNewAccess(Instruction *, AliasAnalysisType *,
                                  const MemoryUseOrDef *Template = nullptr);
  void placePHINodes(const SmallPtrSetImpl<BasicBlock *> &);
  MemoryAccess *renameBlock(BasicBlock *, MemoryAccess *, bool);
  void renameSuccessorPhis(BasicBlock *, MemoryAccess *, bool);
  void renamePass(DomTreeNode *, MemoryAccess *IncomingVal,
                  SmallPtrSetImpl<BasicBlock *> &Visited,
                  bool SkipVisited = false, bool RenameAllUses = false);
  AccessList *getOrCreateAccessList(const BasicBlock *);
  DefsList *getOrCreateDefsList(const BasicBlock *);
  void renumberBlock(const BasicBlock *) const;
  AliasAnalysis *AA = nullptr;
  DominatorTree *DT;
  Function &F;

  // Memory SSA mappings
  DenseMap<const Value *, MemoryAccess *> ValueToMemoryAccess;

  // These two mappings contain the main block to access/def mappings for
  // MemorySSA. The list contained in PerBlockAccesses really owns all the
  // MemoryAccesses.
  // Both maps maintain the invariant that if a block is found in them, the
  // corresponding list is not empty, and if a block is not found in them, the
  // corresponding list is empty.
  AccessMap PerBlockAccesses;
  DefsMap PerBlockDefs;
  std::unique_ptr<MemoryAccess, ValueDeleter> LiveOnEntryDef;

  // Domination mappings
  // Note that the numbering is local to a block, even though the map is
  // global.
  mutable SmallPtrSet<const BasicBlock *, 16> BlockNumberingValid;
  mutable DenseMap<const MemoryAccess *, unsigned long> BlockNumbering;

  // Memory SSA building info
  std::unique_ptr<ClobberWalkerBase> WalkerBase;
  std::unique_ptr<CachingWalker> Walker;
  std::unique_ptr<SkipSelfWalker> SkipWalker;
  unsigned NextID = 0;
  bool IsOptimized = false;
};

/// Enables verification of MemorySSA.
///
/// The checks which this flag enables is exensive and disabled by default
/// unless `EXPENSIVE_CHECKS` is defined.  The flag `-verify-memoryssa` can be
/// used to selectively enable the verification without re-compilation.
extern bool VerifyMemorySSA;

// Internal MemorySSA utils, for use by MemorySSA classes and walkers
class MemorySSAUtil {
protected:
  friend class GVNHoist;
  friend class MemorySSAWalker;

  // This function should not be used by new passes.
  static bool defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU,
                                  AliasAnalysis &AA);
};

/// An analysis that produces \c MemorySSA for a function.
///
class MemorySSAAnalysis : public AnalysisInfoMixin<MemorySSAAnalysis> {
  friend AnalysisInfoMixin<MemorySSAAnalysis>;

  static AnalysisKey Key;

public:
  // Wrap MemorySSA result to ensure address stability of internal MemorySSA
  // pointers after construction.  Use a wrapper class instead of plain
  // unique_ptr<MemorySSA> to avoid build breakage on MSVC.
  struct Result {
    Result(std::unique_ptr<MemorySSA> &&MSSA) : MSSA(std::move(MSSA)) {}

    MemorySSA &getMSSA() { return *MSSA.get(); }

    std::unique_ptr<MemorySSA> MSSA;

    bool invalidate(Function &F, const PreservedAnalyses &PA,
                    FunctionAnalysisManager::Invalidator &Inv);
  };

  Result run(Function &F, FunctionAnalysisManager &AM);
};

/// Printer pass for \c MemorySSA.
class MemorySSAPrinterPass : public PassInfoMixin<MemorySSAPrinterPass> {
  raw_ostream &OS;
  bool EnsureOptimizedUses;

public:
  explicit MemorySSAPrinterPass(raw_ostream &OS, bool EnsureOptimizedUses)
      : OS(OS), EnsureOptimizedUses(EnsureOptimizedUses) {}

  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

/// Printer pass for \c MemorySSA via the walker.
class MemorySSAWalkerPrinterPass
    : public PassInfoMixin<MemorySSAWalkerPrinterPass> {
  raw_ostream &OS;

public:
  explicit MemorySSAWalkerPrinterPass(raw_ostream &OS) : OS(OS) {}

  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

/// Verifier pass for \c MemorySSA.
struct MemorySSAVerifierPass : PassInfoMixin<MemorySSAVerifierPass> {
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

/// Legacy analysis pass which computes \c MemorySSA.
class MemorySSAWrapperPass : public FunctionPass {
public:
  MemorySSAWrapperPass();

  static char ID;

  bool runOnFunction(Function &) override;
  void releaseMemory() override;
  MemorySSA &getMSSA() { return *MSSA; }
  const MemorySSA &getMSSA() const { return *MSSA; }

  void getAnalysisUsage(AnalysisUsage &AU) const override;

  void verifyAnalysis() const override;
  void print(raw_ostream &OS, const Module *M = nullptr) const override;

private:
  std::unique_ptr<MemorySSA> MSSA;
};

/// This is the generic walker interface for walkers of MemorySSA.
/// Walkers are used to be able to further disambiguate the def-use chains
/// MemorySSA gives you, or otherwise produce better info than MemorySSA gives
/// you.
/// In particular, while the def-use chains provide basic information, and are
/// guaranteed to give, for example, the nearest may-aliasing MemoryDef for a
/// MemoryUse as AliasAnalysis considers it, a user mant want better or other
/// information. In particular, they may want to use SCEV info to further
/// disambiguate memory accesses, or they may want the nearest dominating
/// may-aliasing MemoryDef for a call or a store. This API enables a
/// standardized interface to getting and using that info.
class MemorySSAWalker {
public:
  MemorySSAWalker(MemorySSA *);
  virtual ~MemorySSAWalker() = default;

  using MemoryAccessSet = SmallVector<MemoryAccess *, 8>;

  /// Given a memory Mod/Ref/ModRef'ing instruction, calling this
  /// will give you the nearest dominating MemoryAccess that Mod's the location
  /// the instruction accesses (by skipping any def which AA can prove does not
  /// alias the location(s) accessed by the instruction given).
  ///
  /// Note that this will return a single access, and it must dominate the
  /// Instruction, so if an operand of a MemoryPhi node Mod's the instruction,
  /// this will return the MemoryPhi, not the operand. This means that
  /// given:
  /// if (a) {
  ///   1 = MemoryDef(liveOnEntry)
  ///   store %a
  /// } else {
  ///   2 = MemoryDef(liveOnEntry)
  ///   store %b
  /// }
  /// 3 = MemoryPhi(2, 1)
  /// MemoryUse(3)
  /// load %a
  ///
  /// calling this API on load(%a) will return the MemoryPhi, not the MemoryDef
  /// in the if (a) branch.
  MemoryAccess *getClobberingMemoryAccess(const Instruction *I,
                                          BatchAAResults &AA) {
    MemoryAccess *MA = MSSA->getMemoryAccess(I);
    assert(MA && "Handed an instruction that MemorySSA doesn't recognize?");
    return getClobberingMemoryAccess(MA, AA);
  }

  /// Does the same thing as getClobberingMemoryAccess(const Instruction *I),
  /// but takes a MemoryAccess instead of an Instruction.
  virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
                                                  BatchAAResults &AA) = 0;

  /// Given a potentially clobbering memory access and a new location,
  /// calling this will give you the nearest dominating clobbering MemoryAccess
  /// (by skipping non-aliasing def links).
  ///
  /// This version of the function is mainly used to disambiguate phi translated
  /// pointers, where the value of a pointer may have changed from the initial
  /// memory access. Note that this expects to be handed either a MemoryUse,
  /// or an already potentially clobbering access. Unlike the above API, if
  /// given a MemoryDef that clobbers the pointer as the starting access, it
  /// will return that MemoryDef, whereas the above would return the clobber
  /// starting from the use side of  the memory def.
  virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
                                                  const MemoryLocation &,
                                                  BatchAAResults &AA) = 0;

  MemoryAccess *getClobberingMemoryAccess(const Instruction *I) {
    BatchAAResults BAA(MSSA->getAA());
    return getClobberingMemoryAccess(I, BAA);
  }

  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) {
    BatchAAResults BAA(MSSA->getAA());
    return getClobberingMemoryAccess(MA, BAA);
  }

  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
                                          const MemoryLocation &Loc) {
    BatchAAResults BAA(MSSA->getAA());
    return getClobberingMemoryAccess(MA, Loc, BAA);
  }

  /// Given a memory access, invalidate anything this walker knows about
  /// that access.
  /// This API is used by walkers that store information to perform basic cache
  /// invalidation.  This will be called by MemorySSA at appropriate times for
  /// the walker it uses or returns.
  virtual void invalidateInfo(MemoryAccess *) {}

protected:
  friend class MemorySSA; // For updating MSSA pointer in MemorySSA move
                          // constructor.
  MemorySSA *MSSA;
};

/// A MemorySSAWalker that does no alias queries, or anything else. It
/// simply returns the links as they were constructed by the builder.
class DoNothingMemorySSAWalker final : public MemorySSAWalker {
public:
  // Keep the overrides below from hiding the Instruction overload of
  // getClobberingMemoryAccess.
  using MemorySSAWalker::getClobberingMemoryAccess;

  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
                                          BatchAAResults &) override;
  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
                                          const MemoryLocation &,
                                          BatchAAResults &) override;
};

using MemoryAccessPair = std::pair<MemoryAccess *, MemoryLocation>;
using ConstMemoryAccessPair = std::pair<const MemoryAccess *, MemoryLocation>;

/// Iterator base class used to implement const and non-const iterators
/// over the defining accesses of a MemoryAccess.
template <class T>
class memoryaccess_def_iterator_base
    : public iterator_facade_base<memoryaccess_def_iterator_base<T>,
                                  std::forward_iterator_tag, T, ptrdiff_t, T *,
                                  T *> {
  using BaseT = typename memoryaccess_def_iterator_base::iterator_facade_base;

public:
  memoryaccess_def_iterator_base(T *Start) : Access(Start) {}
  memoryaccess_def_iterator_base() = default;

  bool operator==(const memoryaccess_def_iterator_base &Other) const {
    return Access == Other.Access && (!Access || ArgNo == Other.ArgNo);
  }

  // This is a bit ugly, but for MemoryPHI's, unlike PHINodes, you can't get the
  // block from the operand in constant time (In a PHINode, the uselist has
  // both, so it's just subtraction). We provide it as part of the
  // iterator to avoid callers having to linear walk to get the block.
  // If the operation becomes constant time on MemoryPHI's, this bit of
  // abstraction breaking should be removed.
  BasicBlock *getPhiArgBlock() const {
    MemoryPhi *MP = dyn_cast<MemoryPhi>(Access);
    assert(MP && "Tried to get phi arg block when not iterating over a PHI");
    return MP->getIncomingBlock(ArgNo);
  }

  typename std::iterator_traits<BaseT>::pointer operator*() const {
    assert(Access && "Tried to access past the end of our iterator");
    // Go to the first argument for phis, and the defining access for everything
    // else.
    if (const MemoryPhi *MP = dyn_cast<MemoryPhi>(Access))
      return MP->getIncomingValue(ArgNo);
    return cast<MemoryUseOrDef>(Access)->getDefiningAccess();
  }

  using BaseT::operator++;
  memoryaccess_def_iterator_base &operator++() {
    assert(Access && "Hit end of iterator");
    if (const MemoryPhi *MP = dyn_cast<MemoryPhi>(Access)) {
      if (++ArgNo >= MP->getNumIncomingValues()) {
        ArgNo = 0;
        Access = nullptr;
      }
    } else {
      Access = nullptr;
    }
    return *this;
  }

private:
  T *Access = nullptr;
  unsigned ArgNo = 0;
};

inline memoryaccess_def_iterator MemoryAccess::defs_begin() {
  return memoryaccess_def_iterator(this);
}

inline const_memoryaccess_def_iterator MemoryAccess::defs_begin() const {
  return const_memoryaccess_def_iterator(this);
}

inline memoryaccess_def_iterator MemoryAccess::defs_end() {
  return memoryaccess_def_iterator();
}

inline const_memoryaccess_def_iterator MemoryAccess::defs_end() const {
  return const_memoryaccess_def_iterator();
}

/// GraphTraits for a MemoryAccess, which walks defs in the normal case,
/// and uses in the inverse case.
template <> struct GraphTraits<MemoryAccess *> {
  using NodeRef = MemoryAccess *;
  using ChildIteratorType = memoryaccess_def_iterator;

  static NodeRef getEntryNode(NodeRef N) { return N; }
  static ChildIteratorType child_begin(NodeRef N) { return N->defs_begin(); }
  static ChildIteratorType child_end(NodeRef N) { return N->defs_end(); }
};

template <> struct GraphTraits<Inverse<MemoryAccess *>> {
  using NodeRef = MemoryAccess *;
  using ChildIteratorType = MemoryAccess::iterator;

  static NodeRef getEntryNode(NodeRef N) { return N; }
  static ChildIteratorType child_begin(NodeRef N) { return N->user_begin(); }
  static ChildIteratorType child_end(NodeRef N) { return N->user_end(); }
};

/// Provide an iterator that walks defs, giving both the memory access,
/// and the current pointer location, updating the pointer location as it
/// changes due to phi node translation.
///
/// This iterator, while somewhat specialized, is what most clients actually
/// want when walking upwards through MemorySSA def chains. It takes a pair of
/// <MemoryAccess,MemoryLocation>, and walks defs, properly translating the
/// memory location through phi nodes for the user.
class upward_defs_iterator
    : public iterator_facade_base<upward_defs_iterator,
                                  std::forward_iterator_tag,
                                  const MemoryAccessPair> {
  using BaseT = upward_defs_iterator::iterator_facade_base;

public:
  upward_defs_iterator(const MemoryAccessPair &Info, DominatorTree *DT)
      : DefIterator(Info.first), Location(Info.second),
        OriginalAccess(Info.first), DT(DT) {
    CurrentPair.first = nullptr;

    WalkingPhi = Info.first && isa<MemoryPhi>(Info.first);
    fillInCurrentPair();
  }

  upward_defs_iterator() { CurrentPair.first = nullptr; }

  bool operator==(const upward_defs_iterator &Other) const {
    return DefIterator == Other.DefIterator;
  }

  typename std::iterator_traits<BaseT>::reference operator*() const {
    assert(DefIterator != OriginalAccess->defs_end() &&
           "Tried to access past the end of our iterator");
    return CurrentPair;
  }

  using BaseT::operator++;
  upward_defs_iterator &operator++() {
    assert(DefIterator != OriginalAccess->defs_end() &&
           "Tried to access past the end of the iterator");
    ++DefIterator;
    if (DefIterator != OriginalAccess->defs_end())
      fillInCurrentPair();
    return *this;
  }

  BasicBlock *getPhiArgBlock() const { return DefIterator.getPhiArgBlock(); }

private:
  /// Returns true if \p Ptr is guaranteed to be loop invariant for any possible
  /// loop. In particular, this guarantees that it only references a single
  /// MemoryLocation during execution of the containing function.
  bool IsGuaranteedLoopInvariant(const Value *Ptr) const;

  void fillInCurrentPair() {
    CurrentPair.first = *DefIterator;
    CurrentPair.second = Location;
    if (WalkingPhi && Location.Ptr) {
      PHITransAddr Translator(
          const_cast<Value *>(Location.Ptr),
          OriginalAccess->getBlock()->getModule()->getDataLayout(), nullptr);

      if (Value *Addr =
              Translator.translateValue(OriginalAccess->getBlock(),
                                        DefIterator.getPhiArgBlock(), DT, true))
        if (Addr != CurrentPair.second.Ptr)
          CurrentPair.second = CurrentPair.second.getWithNewPtr(Addr);

      // Mark size as unknown, if the location is not guaranteed to be
      // loop-invariant for any possible loop in the function. Setting the size
      // to unknown guarantees that any memory accesses that access locations
      // after the pointer are considered as clobbers, which is important to
      // catch loop carried dependences.
      if (!IsGuaranteedLoopInvariant(CurrentPair.second.Ptr))
        CurrentPair.second = CurrentPair.second.getWithNewSize(
            LocationSize::beforeOrAfterPointer());
    }
  }

  MemoryAccessPair CurrentPair;
  memoryaccess_def_iterator DefIterator;
  MemoryLocation Location;
  MemoryAccess *OriginalAccess = nullptr;
  DominatorTree *DT = nullptr;
  bool WalkingPhi = false;
};

inline upward_defs_iterator
upward_defs_begin(const MemoryAccessPair &Pair, DominatorTree &DT) {
  return upward_defs_iterator(Pair, &DT);
}

inline upward_defs_iterator upward_defs_end() { return upward_defs_iterator(); }

inline iterator_range<upward_defs_iterator>
upward_defs(const MemoryAccessPair &Pair, DominatorTree &DT) {
  return make_range(upward_defs_begin(Pair, DT), upward_defs_end());
}

/// Walks the defining accesses of MemoryDefs. Stops after we hit something that
/// has no defining use (e.g. a MemoryPhi or liveOnEntry). Note that, when
/// comparing against a null def_chain_iterator, this will compare equal only
/// after walking said Phi/liveOnEntry.
///
/// The UseOptimizedChain flag specifies whether to walk the clobbering
/// access chain, or all the accesses.
///
/// Normally, MemoryDef are all just def/use linked together, so a def_chain on
/// a MemoryDef will walk all MemoryDefs above it in the program until it hits
/// a phi node.  The optimized chain walks the clobbering access of a store.
/// So if you are just trying to find, given a store, what the next
/// thing that would clobber the same memory is, you want the optimized chain.
template <class T, bool UseOptimizedChain = false>
struct def_chain_iterator
    : public iterator_facade_base<def_chain_iterator<T, UseOptimizedChain>,
                                  std::forward_iterator_tag, MemoryAccess *> {
  def_chain_iterator() : MA(nullptr) {}
  def_chain_iterator(T MA) : MA(MA) {}

  T operator*() const { return MA; }

  def_chain_iterator &operator++() {
    // N.B. liveOnEntry has a null defining access.
    if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) {
      if (UseOptimizedChain && MUD->isOptimized())
        MA = MUD->getOptimized();
      else
        MA = MUD->getDefiningAccess();
    } else {
      MA = nullptr;
    }

    return *this;
  }

  bool operator==(const def_chain_iterator &O) const { return MA == O.MA; }

private:
  T MA;
};

template <class T>
inline iterator_range<def_chain_iterator<T>>
def_chain(T MA, MemoryAccess *UpTo = nullptr) {
#ifdef EXPENSIVE_CHECKS
  assert((!UpTo || find(def_chain(MA), UpTo) != def_chain_iterator<T>()) &&
         "UpTo isn't in the def chain!");
#endif
  return make_range(def_chain_iterator<T>(MA), def_chain_iterator<T>(UpTo));
}

template <class T>
inline iterator_range<def_chain_iterator<T, true>> optimized_def_chain(T MA) {
  return make_range(def_chain_iterator<T, true>(MA),
                    def_chain_iterator<T, true>(nullptr));
}

} // end namespace llvm

#endif // LLVM_ANALYSIS_MEMORYSSA_H
PKiwFZ���I	I	!Analysis/AliasAnalysisEvaluator.hnu�[���//===- AliasAnalysisEvaluator.h - Alias Analysis Accuracy Evaluator -------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This file implements a simple N^2 alias analysis accuracy evaluator. The
/// analysis result is a set of statistics of how many times the AA
/// infrastructure provides each kind of alias result and mod/ref result when
/// queried with all pairs of pointers in the function.
///
/// It can be used to evaluate a change in an alias analysis implementation,
/// algorithm, or the AA pipeline infrastructure itself. It acts like a stable
/// and easily tested consumer of all AA information exposed.
///
/// This is inspired and adapted from code by: Naveen Neelakantam, Francesco
/// Spadini, and Wojciech Stryjewski.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_ALIASANALYSISEVALUATOR_H
#define LLVM_ANALYSIS_ALIASANALYSISEVALUATOR_H

#include "llvm/IR/PassManager.h"

namespace llvm {
class AAResults;
class Function;
class FunctionPass;

class AAEvaluator : public PassInfoMixin<AAEvaluator> {
  int64_t FunctionCount = 0;
  int64_t NoAliasCount = 0, MayAliasCount = 0, PartialAliasCount = 0;
  int64_t MustAliasCount = 0;
  int64_t NoModRefCount = 0, ModCount = 0, RefCount = 0, ModRefCount = 0;

public:
  AAEvaluator() = default;
  AAEvaluator(AAEvaluator &&Arg)
      : FunctionCount(Arg.FunctionCount), NoAliasCount(Arg.NoAliasCount),
        MayAliasCount(Arg.MayAliasCount),
        PartialAliasCount(Arg.PartialAliasCount),
        MustAliasCount(Arg.MustAliasCount), NoModRefCount(Arg.NoModRefCount),
        ModCount(Arg.ModCount), RefCount(Arg.RefCount),
        ModRefCount(Arg.ModRefCount) {
    Arg.FunctionCount = 0;
  }
  ~AAEvaluator();

  /// Run the pass over the function.
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);

private:
  // Allow the legacy pass to run this using an internal API.
  friend class AAEvalLegacyPass;

  void runInternal(Function &F, AAResults &AA);
};

/// Create a wrapper of the above for the legacy pass manager.
FunctionPass *createAAEvalPass();

}

#endif
PKiwFZ&َ��9�9Analysis/ValueLattice.hnu�[���//===- ValueLattice.h - Value constraint analysis ---------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_VALUELATTICE_H
#define LLVM_ANALYSIS_VALUELATTICE_H

#include "llvm/IR/Constants.h"
#include "llvm/IR/ConstantRange.h"
#include "llvm/IR/Instructions.h"

//===----------------------------------------------------------------------===//
//                               ValueLatticeElement
//===----------------------------------------------------------------------===//

namespace llvm {

class Constant;

/// This class represents lattice values for constants.
///
/// FIXME: This is basically just for bringup, this can be made a lot more rich
/// in the future.
///
class ValueLatticeElement {
  enum ValueLatticeElementTy {
    /// This Value has no known value yet.  As a result, this implies the
    /// producing instruction is dead.  Caution: We use this as the starting
    /// state in our local meet rules.  In this usage, it's taken to mean
    /// "nothing known yet".
    /// Transition to any other state allowed.
    unknown,

    /// This Value is an UndefValue constant or produces undef. Undefined values
    /// can be merged with constants (or single element constant ranges),
    /// assuming all uses of the result will be replaced.
    /// Transition allowed to the following states:
    ///  constant
    ///  constantrange_including_undef
    ///  overdefined
    undef,

    /// This Value has a specific constant value.  The constant cannot be undef.
    /// (For constant integers, constantrange is used instead. Integer typed
    /// constantexprs can appear as constant.) Note that the constant state
    /// can be reached by merging undef & constant states.
    /// Transition allowed to the following states:
    ///  overdefined
    constant,

    /// This Value is known to not have the specified value. (For constant
    /// integers, constantrange is used instead.  As above, integer typed
    /// constantexprs can appear here.)
    /// Transition allowed to the following states:
    ///  overdefined
    notconstant,

    /// The Value falls within this range. (Used only for integer typed values.)
    /// Transition allowed to the following states:
    ///  constantrange (new range must be a superset of the existing range)
    ///  constantrange_including_undef
    ///  overdefined
    constantrange,

    /// This Value falls within this range, but also may be undef.
    /// Merging it with other constant ranges results in
    /// constantrange_including_undef.
    /// Transition allowed to the following states:
    ///  overdefined
    constantrange_including_undef,

    /// We can not precisely model the dynamic values this value might take.
    /// No transitions are allowed after reaching overdefined.
    overdefined,
  };

  ValueLatticeElementTy Tag : 8;
  /// Number of times a constant range has been extended with widening enabled.
  unsigned NumRangeExtensions : 8;

  /// The union either stores a pointer to a constant or a constant range,
  /// associated to the lattice element. We have to ensure that Range is
  /// initialized or destroyed when changing state to or from constantrange.
  union {
    Constant *ConstVal;
    ConstantRange Range;
  };

  /// Destroy contents of lattice value, without destructing the object.
  void destroy() {
    switch (Tag) {
    case overdefined:
    case unknown:
    case undef:
    case constant:
    case notconstant:
      break;
    case constantrange_including_undef:
    case constantrange:
      Range.~ConstantRange();
      break;
    };
  }

public:
  /// Struct to control some aspects related to merging constant ranges.
  struct MergeOptions {
    /// The merge value may include undef.
    bool MayIncludeUndef;

    /// Handle repeatedly extending a range by going to overdefined after a
    /// number of steps.
    bool CheckWiden;

    /// The number of allowed widening steps (including setting the range
    /// initially).
    unsigned MaxWidenSteps;

    MergeOptions() : MergeOptions(false, false) {}

    MergeOptions(bool MayIncludeUndef, bool CheckWiden,
                 unsigned MaxWidenSteps = 1)
        : MayIncludeUndef(MayIncludeUndef), CheckWiden(CheckWiden),
          MaxWidenSteps(MaxWidenSteps) {}

    MergeOptions &setMayIncludeUndef(bool V = true) {
      MayIncludeUndef = V;
      return *this;
    }

    MergeOptions &setCheckWiden(bool V = true) {
      CheckWiden = V;
      return *this;
    }

    MergeOptions &setMaxWidenSteps(unsigned Steps = 1) {
      CheckWiden = true;
      MaxWidenSteps = Steps;
      return *this;
    }
  };

  // ConstVal and Range are initialized on-demand.
  ValueLatticeElement() : Tag(unknown), NumRangeExtensions(0) {}

  ~ValueLatticeElement() { destroy(); }

  ValueLatticeElement(const ValueLatticeElement &Other)
      : Tag(Other.Tag), NumRangeExtensions(0) {
    switch (Other.Tag) {
    case constantrange:
    case constantrange_including_undef:
      new (&Range) ConstantRange(Other.Range);
      NumRangeExtensions = Other.NumRangeExtensions;
      break;
    case constant:
    case notconstant:
      ConstVal = Other.ConstVal;
      break;
    case overdefined:
    case unknown:
    case undef:
      break;
    }
  }

  ValueLatticeElement(ValueLatticeElement &&Other)
      : Tag(Other.Tag), NumRangeExtensions(0) {
    switch (Other.Tag) {
    case constantrange:
    case constantrange_including_undef:
      new (&Range) ConstantRange(std::move(Other.Range));
      NumRangeExtensions = Other.NumRangeExtensions;
      break;
    case constant:
    case notconstant:
      ConstVal = Other.ConstVal;
      break;
    case overdefined:
    case unknown:
    case undef:
      break;
    }
    Other.Tag = unknown;
  }

  ValueLatticeElement &operator=(const ValueLatticeElement &Other) {
    destroy();
    new (this) ValueLatticeElement(Other);
    return *this;
  }

  ValueLatticeElement &operator=(ValueLatticeElement &&Other) {
    destroy();
    new (this) ValueLatticeElement(std::move(Other));
    return *this;
  }

  static ValueLatticeElement get(Constant *C) {
    ValueLatticeElement Res;
    if (isa<UndefValue>(C))
      Res.markUndef();
    else
      Res.markConstant(C);
    return Res;
  }
  static ValueLatticeElement getNot(Constant *C) {
    ValueLatticeElement Res;
    assert(!isa<UndefValue>(C) && "!= undef is not supported");
    Res.markNotConstant(C);
    return Res;
  }
  static ValueLatticeElement getRange(ConstantRange CR,
                                      bool MayIncludeUndef = false) {
    if (CR.isFullSet())
      return getOverdefined();

    if (CR.isEmptySet()) {
      ValueLatticeElement Res;
      if (MayIncludeUndef)
        Res.markUndef();
      return Res;
    }

    ValueLatticeElement Res;
    Res.markConstantRange(std::move(CR),
                          MergeOptions().setMayIncludeUndef(MayIncludeUndef));
    return Res;
  }
  static ValueLatticeElement getOverdefined() {
    ValueLatticeElement Res;
    Res.markOverdefined();
    return Res;
  }

  bool isUndef() const { return Tag == undef; }
  bool isUnknown() const { return Tag == unknown; }
  bool isUnknownOrUndef() const { return Tag == unknown || Tag == undef; }
  bool isConstant() const { return Tag == constant; }
  bool isNotConstant() const { return Tag == notconstant; }
  bool isConstantRangeIncludingUndef() const {
    return Tag == constantrange_including_undef;
  }
  /// Returns true if this value is a constant range. Use \p UndefAllowed to
  /// exclude non-singleton constant ranges that may also be undef. Note that
  /// this function also returns true if the range may include undef, but only
  /// contains a single element. In that case, it can be replaced by a constant.
  bool isConstantRange(bool UndefAllowed = true) const {
    return Tag == constantrange || (Tag == constantrange_including_undef &&
                                    (UndefAllowed || Range.isSingleElement()));
  }
  bool isOverdefined() const { return Tag == overdefined; }

  Constant *getConstant() const {
    assert(isConstant() && "Cannot get the constant of a non-constant!");
    return ConstVal;
  }

  Constant *getNotConstant() const {
    assert(isNotConstant() && "Cannot get the constant of a non-notconstant!");
    return ConstVal;
  }

  /// Returns the constant range for this value. Use \p UndefAllowed to exclude
  /// non-singleton constant ranges that may also be undef. Note that this
  /// function also returns a range if the range may include undef, but only
  /// contains a single element. In that case, it can be replaced by a constant.
  const ConstantRange &getConstantRange(bool UndefAllowed = true) const {
    assert(isConstantRange(UndefAllowed) &&
           "Cannot get the constant-range of a non-constant-range!");
    return Range;
  }

  std::optional<APInt> asConstantInteger() const {
    if (isConstant() && isa<ConstantInt>(getConstant())) {
      return cast<ConstantInt>(getConstant())->getValue();
    } else if (isConstantRange() && getConstantRange().isSingleElement()) {
      return *getConstantRange().getSingleElement();
    }
    return std::nullopt;
  }

  bool markOverdefined() {
    if (isOverdefined())
      return false;
    destroy();
    Tag = overdefined;
    return true;
  }

  bool markUndef() {
    if (isUndef())
      return false;

    assert(isUnknown());
    Tag = undef;
    return true;
  }

  bool markConstant(Constant *V, bool MayIncludeUndef = false) {
    if (isa<UndefValue>(V))
      return markUndef();

    if (isConstant()) {
      assert(getConstant() == V && "Marking constant with different value");
      return false;
    }

    if (ConstantInt *CI = dyn_cast<ConstantInt>(V))
      return markConstantRange(
          ConstantRange(CI->getValue()),
          MergeOptions().setMayIncludeUndef(MayIncludeUndef));

    assert(isUnknown() || isUndef());
    Tag = constant;
    ConstVal = V;
    return true;
  }

  bool markNotConstant(Constant *V) {
    assert(V && "Marking constant with NULL");
    if (ConstantInt *CI = dyn_cast<ConstantInt>(V))
      return markConstantRange(
          ConstantRange(CI->getValue() + 1, CI->getValue()));

    if (isa<UndefValue>(V))
      return false;

    if (isNotConstant()) {
      assert(getNotConstant() == V && "Marking !constant with different value");
      return false;
    }

    assert(isUnknown());
    Tag = notconstant;
    ConstVal = V;
    return true;
  }

  /// Mark the object as constant range with \p NewR. If the object is already a
  /// constant range, nothing changes if the existing range is equal to \p
  /// NewR and the tag. Otherwise \p NewR must be a superset of the existing
  /// range or the object must be undef. The tag is set to
  /// constant_range_including_undef if either the existing value or the new
  /// range may include undef.
  bool markConstantRange(ConstantRange NewR,
                         MergeOptions Opts = MergeOptions()) {
    assert(!NewR.isEmptySet() && "should only be called for non-empty sets");

    if (NewR.isFullSet())
      return markOverdefined();

    ValueLatticeElementTy OldTag = Tag;
    ValueLatticeElementTy NewTag =
        (isUndef() || isConstantRangeIncludingUndef() || Opts.MayIncludeUndef)
            ? constantrange_including_undef
            : constantrange;
    if (isConstantRange()) {
      Tag = NewTag;
      if (getConstantRange() == NewR)
        return Tag != OldTag;

      // Simple form of widening. If a range is extended multiple times, go to
      // overdefined.
      if (Opts.CheckWiden && ++NumRangeExtensions > Opts.MaxWidenSteps)
        return markOverdefined();

      assert(NewR.contains(getConstantRange()) &&
             "Existing range must be a subset of NewR");
      Range = std::move(NewR);
      return true;
    }

    assert(isUnknown() || isUndef());

    NumRangeExtensions = 0;
    Tag = NewTag;
    new (&Range) ConstantRange(std::move(NewR));
    return true;
  }

  /// Updates this object to approximate both this object and RHS. Returns
  /// true if this object has been changed.
  bool mergeIn(const ValueLatticeElement &RHS,
               MergeOptions Opts = MergeOptions()) {
    if (RHS.isUnknown() || isOverdefined())
      return false;
    if (RHS.isOverdefined()) {
      markOverdefined();
      return true;
    }

    if (isUndef()) {
      assert(!RHS.isUnknown());
      if (RHS.isUndef())
        return false;
      if (RHS.isConstant())
        return markConstant(RHS.getConstant(), true);
      if (RHS.isConstantRange())
        return markConstantRange(RHS.getConstantRange(true),
                                 Opts.setMayIncludeUndef());
      return markOverdefined();
    }

    if (isUnknown()) {
      assert(!RHS.isUnknown() && "Unknow RHS should be handled earlier");
      *this = RHS;
      return true;
    }

    if (isConstant()) {
      if (RHS.isConstant() && getConstant() == RHS.getConstant())
        return false;
      if (RHS.isUndef())
        return false;
      markOverdefined();
      return true;
    }

    if (isNotConstant()) {
      if (RHS.isNotConstant() && getNotConstant() == RHS.getNotConstant())
        return false;
      markOverdefined();
      return true;
    }

    auto OldTag = Tag;
    assert(isConstantRange() && "New ValueLattice type?");
    if (RHS.isUndef()) {
      Tag = constantrange_including_undef;
      return OldTag != Tag;
    }

    if (!RHS.isConstantRange()) {
      // We can get here if we've encountered a constantexpr of integer type
      // and merge it with a constantrange.
      markOverdefined();
      return true;
    }

    ConstantRange NewR = getConstantRange().unionWith(RHS.getConstantRange());
    return markConstantRange(
        std::move(NewR),
        Opts.setMayIncludeUndef(RHS.isConstantRangeIncludingUndef()));
  }

  // Compares this symbolic value with Other using Pred and returns either
  /// true, false or undef constants, or nullptr if the comparison cannot be
  /// evaluated.
  Constant *getCompare(CmpInst::Predicate Pred, Type *Ty,
                       const ValueLatticeElement &Other,
                       const DataLayout &DL) const;

  unsigned getNumRangeExtensions() const { return NumRangeExtensions; }
  void setNumRangeExtensions(unsigned N) { NumRangeExtensions = N; }
};

static_assert(sizeof(ValueLatticeElement) <= 40,
              "size of ValueLatticeElement changed unexpectedly");

raw_ostream &operator<<(raw_ostream &OS, const ValueLatticeElement &Val);
} // end namespace llvm
#endif
PKiwFZ�h�S7676Analysis/AliasSetTracker.hnu�[���//===- llvm/Analysis/AliasSetTracker.h - Build Alias Sets -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines two classes: AliasSetTracker and AliasSet. These interfaces
// are used to classify a collection of pointer references into a maximal number
// of disjoint sets. Each AliasSet object constructed by the AliasSetTracker
// object refers to memory disjoint from the other sets.
//
// An AliasSetTracker can only be used on immutable IR.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_ALIASSETTRACKER_H
#define LLVM_ANALYSIS_ALIASSETTRACKER_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/ADT/ilist.h"
#include "llvm/ADT/ilist_node.h"
#include "llvm/Analysis/MemoryLocation.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/PassManager.h"
#include "llvm/IR/ValueHandle.h"
#include <cassert>
#include <cstddef>
#include <iterator>
#include <vector>

namespace llvm {

class AliasResult;
class AliasSetTracker;
class AnyMemSetInst;
class AnyMemTransferInst;
class BasicBlock;
class BatchAAResults;
class LoadInst;
enum class ModRefInfo : uint8_t;
class raw_ostream;
class StoreInst;
class VAArgInst;
class Value;

class AliasSet : public ilist_node<AliasSet> {
  friend class AliasSetTracker;

  class PointerRec {
    Value *Val;  // The pointer this record corresponds to.
    PointerRec **PrevInList = nullptr;
    PointerRec *NextInList = nullptr;
    AliasSet *AS = nullptr;
    LocationSize Size = LocationSize::mapEmpty();
    AAMDNodes AAInfo;

    // Whether the size for this record has been set at all. This makes no
    // guarantees about the size being known.
    bool isSizeSet() const { return Size != LocationSize::mapEmpty(); }

  public:
    PointerRec(Value *V)
      : Val(V), AAInfo(DenseMapInfo<AAMDNodes>::getEmptyKey()) {}

    Value *getValue() const { return Val; }

    PointerRec *getNext() const { return NextInList; }
    bool hasAliasSet() const { return AS != nullptr; }

    PointerRec** setPrevInList(PointerRec **PIL) {
      PrevInList = PIL;
      return &NextInList;
    }

    bool updateSizeAndAAInfo(LocationSize NewSize, const AAMDNodes &NewAAInfo) {
      bool SizeChanged = false;
      if (NewSize != Size) {
        LocationSize OldSize = Size;
        Size = isSizeSet() ? Size.unionWith(NewSize) : NewSize;
        SizeChanged = OldSize != Size;
      }

      if (AAInfo == DenseMapInfo<AAMDNodes>::getEmptyKey())
        // We don't have a AAInfo yet. Set it to NewAAInfo.
        AAInfo = NewAAInfo;
      else {
        AAMDNodes Intersection(AAInfo.intersect(NewAAInfo));
        SizeChanged |= Intersection != AAInfo;
        AAInfo = Intersection;
      }
      return SizeChanged;
    }

    LocationSize getSize() const {
      assert(isSizeSet() && "Getting an unset size!");
      return Size;
    }

    /// Return the AAInfo, or null if there is no information or conflicting
    /// information.
    AAMDNodes getAAInfo() const {
      // If we have missing or conflicting AAInfo, return null.
      if (AAInfo == DenseMapInfo<AAMDNodes>::getEmptyKey() ||
          AAInfo == DenseMapInfo<AAMDNodes>::getTombstoneKey())
        return AAMDNodes();
      return AAInfo;
    }

    AliasSet *getAliasSet(AliasSetTracker &AST) {
      assert(AS && "No AliasSet yet!");
      if (AS->Forward) {
        AliasSet *OldAS = AS;
        AS = OldAS->getForwardedTarget(AST);
        AS->addRef();
        OldAS->dropRef(AST);
      }
      return AS;
    }

    void setAliasSet(AliasSet *as) {
      assert(!AS && "Already have an alias set!");
      AS = as;
    }

    void eraseFromList() {
      if (NextInList) NextInList->PrevInList = PrevInList;
      *PrevInList = NextInList;
      if (AS->PtrListEnd == &NextInList) {
        AS->PtrListEnd = PrevInList;
        assert(*AS->PtrListEnd == nullptr && "List not terminated right!");
      }
      delete this;
    }
  };

  // Doubly linked list of nodes.
  PointerRec *PtrList = nullptr;
  PointerRec **PtrListEnd;
  // Forwarding pointer.
  AliasSet *Forward = nullptr;

  /// All instructions without a specific address in this alias set.
  std::vector<AssertingVH<Instruction>> UnknownInsts;

  /// Number of nodes pointing to this AliasSet plus the number of AliasSets
  /// forwarding to it.
  unsigned RefCount : 27;

  // Signifies that this set should be considered to alias any pointer.
  // Use when the tracker holding this set is saturated.
  unsigned AliasAny : 1;

  /// The kinds of access this alias set models.
  ///
  /// We keep track of whether this alias set merely refers to the locations of
  /// memory (and not any particular access), whether it modifies or references
  /// the memory, or whether it does both. The lattice goes from "NoAccess" to
  /// either RefAccess or ModAccess, then to ModRefAccess as necessary.
  enum AccessLattice {
    NoAccess = 0,
    RefAccess = 1,
    ModAccess = 2,
    ModRefAccess = RefAccess | ModAccess
  };
  unsigned Access : 2;

  /// The kind of alias relationship between pointers of the set.
  ///
  /// These represent conservatively correct alias results between any members
  /// of the set. We represent these independently of the values of alias
  /// results in order to pack it into a single bit. Lattice goes from
  /// MustAlias to MayAlias.
  enum AliasLattice {
    SetMustAlias = 0, SetMayAlias = 1
  };
  unsigned Alias : 1;

  unsigned SetSize = 0;

  void addRef() { ++RefCount; }

  void dropRef(AliasSetTracker &AST) {
    assert(RefCount >= 1 && "Invalid reference count detected!");
    if (--RefCount == 0)
      removeFromTracker(AST);
  }

public:
  AliasSet(const AliasSet &) = delete;
  AliasSet &operator=(const AliasSet &) = delete;

  /// Accessors...
  bool isRef() const { return Access & RefAccess; }
  bool isMod() const { return Access & ModAccess; }
  bool isMustAlias() const { return Alias == SetMustAlias; }
  bool isMayAlias()  const { return Alias == SetMayAlias; }

  /// Return true if this alias set should be ignored as part of the
  /// AliasSetTracker object.
  bool isForwardingAliasSet() const { return Forward; }

  /// Merge the specified alias set into this alias set.
  void mergeSetIn(AliasSet &AS, AliasSetTracker &AST, BatchAAResults &BatchAA);

  // Alias Set iteration - Allow access to all of the pointers which are part of
  // this alias set.
  class iterator;
  iterator begin() const { return iterator(PtrList); }
  iterator end()   const { return iterator(); }
  bool empty() const { return PtrList == nullptr; }

  // Unfortunately, ilist::size() is linear, so we have to add code to keep
  // track of the list's exact size.
  unsigned size() { return SetSize; }

  void print(raw_ostream &OS) const;
  void dump() const;

  /// Define an iterator for alias sets... this is just a forward iterator.
  class iterator {
    PointerRec *CurNode;

  public:
    using iterator_category = std::forward_iterator_tag;
    using value_type = PointerRec;
    using difference_type = std::ptrdiff_t;
    using pointer = value_type *;
    using reference = value_type &;

    explicit iterator(PointerRec *CN = nullptr) : CurNode(CN) {}

    bool operator==(const iterator& x) const {
      return CurNode == x.CurNode;
    }
    bool operator!=(const iterator& x) const { return !operator==(x); }

    value_type &operator*() const {
      assert(CurNode && "Dereferencing AliasSet.end()!");
      return *CurNode;
    }
    value_type *operator->() const { return &operator*(); }

    Value *getPointer() const { return CurNode->getValue(); }
    LocationSize getSize() const { return CurNode->getSize(); }
    AAMDNodes getAAInfo() const { return CurNode->getAAInfo(); }

    iterator& operator++() {                // Preincrement
      assert(CurNode && "Advancing past AliasSet.end()!");
      CurNode = CurNode->getNext();
      return *this;
    }
    iterator operator++(int) { // Postincrement
      iterator tmp = *this; ++*this; return tmp;
    }
  };

private:
  // Can only be created by AliasSetTracker.
  AliasSet()
      : PtrListEnd(&PtrList), RefCount(0),  AliasAny(false), Access(NoAccess),
        Alias(SetMustAlias) {}

  PointerRec *getSomePointer() const {
    return PtrList;
  }

  /// Return the real alias set this represents. If this has been merged with
  /// another set and is forwarding, return the ultimate destination set. This
  /// also implements the union-find collapsing as well.
  AliasSet *getForwardedTarget(AliasSetTracker &AST) {
    if (!Forward) return this;

    AliasSet *Dest = Forward->getForwardedTarget(AST);
    if (Dest != Forward) {
      Dest->addRef();
      Forward->dropRef(AST);
      Forward = Dest;
    }
    return Dest;
  }

  void removeFromTracker(AliasSetTracker &AST);

  void addPointer(AliasSetTracker &AST, PointerRec &Entry, LocationSize Size,
                  const AAMDNodes &AAInfo, bool KnownMustAlias = false,
                  bool SkipSizeUpdate = false);
  void addUnknownInst(Instruction *I, BatchAAResults &AA);

public:
  /// If the specified pointer "may" (or must) alias one of the members in the
  /// set return the appropriate AliasResult. Otherwise return NoAlias.
  AliasResult aliasesPointer(const Value *Ptr, LocationSize Size,
                             const AAMDNodes &AAInfo, BatchAAResults &AA) const;
  ModRefInfo aliasesUnknownInst(const Instruction *Inst,
                                BatchAAResults &AA) const;
};

inline raw_ostream& operator<<(raw_ostream &OS, const AliasSet &AS) {
  AS.print(OS);
  return OS;
}

class AliasSetTracker {
  BatchAAResults &AA;
  ilist<AliasSet> AliasSets;

  using PointerMapType = DenseMap<AssertingVH<Value>, AliasSet::PointerRec *>;

  // Map from pointers to their node
  PointerMapType PointerMap;

public:
  /// Create an empty collection of AliasSets, and use the specified alias
  /// analysis object to disambiguate load and store addresses.
  explicit AliasSetTracker(BatchAAResults &AA) : AA(AA) {}
  ~AliasSetTracker() { clear(); }

  /// These methods are used to add different types of instructions to the alias
  /// sets. Adding a new instruction can result in one of three actions
  /// happening:
  ///
  ///   1. If the instruction doesn't alias any other sets, create a new set.
  ///   2. If the instruction aliases exactly one set, add it to the set
  ///   3. If the instruction aliases multiple sets, merge the sets, and add
  ///      the instruction to the result.
  ///
  /// These methods return true if inserting the instruction resulted in the
  /// addition of a new alias set (i.e., the pointer did not alias anything).
  ///
  void add(Value *Ptr, LocationSize Size, const AAMDNodes &AAInfo); // Add a loc
  void add(LoadInst *LI);
  void add(StoreInst *SI);
  void add(VAArgInst *VAAI);
  void add(AnyMemSetInst *MSI);
  void add(AnyMemTransferInst *MTI);
  void add(Instruction *I);       // Dispatch to one of the other add methods...
  void add(BasicBlock &BB);       // Add all instructions in basic block
  void add(const AliasSetTracker &AST); // Add alias relations from another AST
  void addUnknown(Instruction *I);

  void clear();

  /// Return the alias sets that are active.
  const ilist<AliasSet> &getAliasSets() const { return AliasSets; }

  /// Return the alias set which contains the specified memory location.  If
  /// the memory location aliases two or more existing alias sets, will have
  /// the effect of merging those alias sets before the single resulting alias
  /// set is returned.
  AliasSet &getAliasSetFor(const MemoryLocation &MemLoc);

  /// Return the underlying alias analysis object used by this tracker.
  BatchAAResults &getAliasAnalysis() const { return AA; }

  using iterator = ilist<AliasSet>::iterator;
  using const_iterator = ilist<AliasSet>::const_iterator;

  const_iterator begin() const { return AliasSets.begin(); }
  const_iterator end()   const { return AliasSets.end(); }

  iterator begin() { return AliasSets.begin(); }
  iterator end()   { return AliasSets.end(); }

  void print(raw_ostream &OS) const;
  void dump() const;

private:
  friend class AliasSet;

  // The total number of pointers contained in all "may" alias sets.
  unsigned TotalMayAliasSetSize = 0;

  // A non-null value signifies this AST is saturated. A saturated AST lumps
  // all pointers into a single "May" set.
  AliasSet *AliasAnyAS = nullptr;

  void removeAliasSet(AliasSet *AS);

  /// Just like operator[] on the map, except that it creates an entry for the
  /// pointer if it doesn't already exist.
  AliasSet::PointerRec &getEntryFor(Value *V) {
    AliasSet::PointerRec *&Entry = PointerMap[V];
    if (!Entry)
      Entry = new AliasSet::PointerRec(V);
    return *Entry;
  }

  AliasSet &addPointer(MemoryLocation Loc, AliasSet::AccessLattice E);
  AliasSet *mergeAliasSetsForPointer(const Value *Ptr, LocationSize Size,
                                     const AAMDNodes &AAInfo,
                                     bool &MustAliasAll);

  /// Merge all alias sets into a single set that is considered to alias any
  /// pointer.
  AliasSet &mergeAllAliasSets();

  AliasSet *findAliasSetForUnknownInst(Instruction *Inst);
};

inline raw_ostream& operator<<(raw_ostream &OS, const AliasSetTracker &AST) {
  AST.print(OS);
  return OS;
}

class AliasSetsPrinterPass : public PassInfoMixin<AliasSetsPrinterPass> {
  raw_ostream &OS;

public:
  explicit AliasSetsPrinterPass(raw_ostream &OS);
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

} // end namespace llvm

#endif // LLVM_ANALYSIS_ALIASSETTRACKER_H
PKiwFZ��Analysis/InstSimplifyFolder.hnu�[���//===- InstSimplifyFolder.h - InstSimplify folding helper --------*- C++-*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the InstSimplifyFolder class, a helper for IRBuilder.
// It provides IRBuilder with a set of methods for folding operations to
// existing values using InstructionSimplify. At the moment, only a subset of
// the implementation uses InstructionSimplify. The rest of the implementation
// only folds constants.
//
// The folder also applies target-specific constant folding.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_INSTSIMPLIFYFOLDER_H
#define LLVM_ANALYSIS_INSTSIMPLIFYFOLDER_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/TargetFolder.h"
#include "llvm/IR/IRBuilderFolder.h"
#include "llvm/IR/Instruction.h"

namespace llvm {
class Constant;

/// InstSimplifyFolder - Use InstructionSimplify to fold operations to existing
/// values. Also applies target-specific constant folding when not using
/// InstructionSimplify.
class InstSimplifyFolder final : public IRBuilderFolder {
  TargetFolder ConstFolder;
  SimplifyQuery SQ;

  virtual void anchor();

public:
  InstSimplifyFolder(const DataLayout &DL) : ConstFolder(DL), SQ(DL) {}

  //===--------------------------------------------------------------------===//
  // Value-based folders.
  //
  // Return an existing value or a constant if the operation can be simplified.
  // Otherwise return nullptr.
  //===--------------------------------------------------------------------===//

  Value *FoldBinOp(Instruction::BinaryOps Opc, Value *LHS,
                   Value *RHS) const override {
    return simplifyBinOp(Opc, LHS, RHS, SQ);
  }

  Value *FoldExactBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS,
                        bool IsExact) const override {
    return simplifyBinOp(Opc, LHS, RHS, SQ);
  }

  Value *FoldNoWrapBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS,
                         bool HasNUW, bool HasNSW) const override {
    return simplifyBinOp(Opc, LHS, RHS, SQ);
  }

  Value *FoldBinOpFMF(Instruction::BinaryOps Opc, Value *LHS, Value *RHS,
                      FastMathFlags FMF) const override {
    return simplifyBinOp(Opc, LHS, RHS, FMF, SQ);
  }

  Value *FoldUnOpFMF(Instruction::UnaryOps Opc, Value *V,
                      FastMathFlags FMF) const override {
    return simplifyUnOp(Opc, V, FMF, SQ);
  }

  Value *FoldICmp(CmpInst::Predicate P, Value *LHS, Value *RHS) const override {
    return simplifyICmpInst(P, LHS, RHS, SQ);
  }

  Value *FoldGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList,
                 bool IsInBounds = false) const override {
    return simplifyGEPInst(Ty, Ptr, IdxList, IsInBounds, SQ);
  }

  Value *FoldSelect(Value *C, Value *True, Value *False) const override {
    return simplifySelectInst(C, True, False, SQ);
  }

  Value *FoldExtractValue(Value *Agg,
                          ArrayRef<unsigned> IdxList) const override {
    return simplifyExtractValueInst(Agg, IdxList, SQ);
  };

  Value *FoldInsertValue(Value *Agg, Value *Val,
                         ArrayRef<unsigned> IdxList) const override {
    return simplifyInsertValueInst(Agg, Val, IdxList, SQ);
  }

  Value *FoldExtractElement(Value *Vec, Value *Idx) const override {
    return simplifyExtractElementInst(Vec, Idx, SQ);
  }

  Value *FoldInsertElement(Value *Vec, Value *NewElt,
                           Value *Idx) const override {
    return simplifyInsertElementInst(Vec, NewElt, Idx, SQ);
  }

  Value *FoldShuffleVector(Value *V1, Value *V2,
                           ArrayRef<int> Mask) const override {
    Type *RetTy = VectorType::get(
        cast<VectorType>(V1->getType())->getElementType(), Mask.size(),
        isa<ScalableVectorType>(V1->getType()));
    return simplifyShuffleVectorInst(V1, V2, Mask, RetTy, SQ);
  }

  //===--------------------------------------------------------------------===//
  // Cast/Conversion Operators
  //===--------------------------------------------------------------------===//

  Value *CreateCast(Instruction::CastOps Op, Constant *C,
                    Type *DestTy) const override {
    if (C->getType() == DestTy)
      return C; // avoid calling Fold
    return ConstFolder.CreateCast(Op, C, DestTy);
  }
  Value *CreateIntCast(Constant *C, Type *DestTy,
                       bool isSigned) const override {
    if (C->getType() == DestTy)
      return C; // avoid calling Fold
    return ConstFolder.CreateIntCast(C, DestTy, isSigned);
  }
  Value *CreatePointerCast(Constant *C, Type *DestTy) const override {
    if (C->getType() == DestTy)
      return C; // avoid calling Fold
    return ConstFolder.CreatePointerCast(C, DestTy);
  }
  Value *CreateFPCast(Constant *C, Type *DestTy) const override {
    if (C->getType() == DestTy)
      return C; // avoid calling Fold
    return ConstFolder.CreateFPCast(C, DestTy);
  }
  Value *CreateBitCast(Constant *C, Type *DestTy) const override {
    return ConstFolder.CreateBitCast(C, DestTy);
  }
  Value *CreateIntToPtr(Constant *C, Type *DestTy) const override {
    return ConstFolder.CreateIntToPtr(C, DestTy);
  }
  Value *CreatePtrToInt(Constant *C, Type *DestTy) const override {
    return ConstFolder.CreatePtrToInt(C, DestTy);
  }
  Value *CreateZExtOrBitCast(Constant *C, Type *DestTy) const override {
    if (C->getType() == DestTy)
      return C; // avoid calling Fold
    return ConstFolder.CreateZExtOrBitCast(C, DestTy);
  }
  Value *CreateSExtOrBitCast(Constant *C, Type *DestTy) const override {
    if (C->getType() == DestTy)
      return C; // avoid calling Fold
    return ConstFolder.CreateSExtOrBitCast(C, DestTy);
  }
  Value *CreateTruncOrBitCast(Constant *C, Type *DestTy) const override {
    if (C->getType() == DestTy)
      return C; // avoid calling Fold
    return ConstFolder.CreateTruncOrBitCast(C, DestTy);
  }

  Value *CreatePointerBitCastOrAddrSpaceCast(Constant *C,
                                             Type *DestTy) const override {
    if (C->getType() == DestTy)
      return C; // avoid calling Fold
    return ConstFolder.CreatePointerBitCastOrAddrSpaceCast(C, DestTy);
  }

  //===--------------------------------------------------------------------===//
  // Compare Instructions
  //===--------------------------------------------------------------------===//

  Value *CreateFCmp(CmpInst::Predicate P, Constant *LHS,
                    Constant *RHS) const override {
    return ConstFolder.CreateFCmp(P, LHS, RHS);
  }
};

} // end namespace llvm

#endif // LLVM_ANALYSIS_INSTSIMPLIFYFOLDER_H
PKiwFZ�XZP��Analysis/Delinearization.hnu�[���//===---- Delinearization.h - MultiDimensional Index Delinearization ------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This implements an analysis pass that tries to delinearize all GEP
// instructions in all loops using the SCEV analysis functionality. This pass is
// only used for testing purposes: if your pass needs delinearization, please
// use the on-demand SCEVAddRecExpr::delinearize() function.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_DELINEARIZATION_H
#define LLVM_ANALYSIS_DELINEARIZATION_H

#include "llvm/IR/PassManager.h"

namespace llvm {
class raw_ostream;
template <typename T> class SmallVectorImpl;
class GetElementPtrInst;
class ScalarEvolution;
class SCEV;

/// Compute the array dimensions Sizes from the set of Terms extracted from
/// the memory access function of this SCEVAddRecExpr (second step of
/// delinearization).
void findArrayDimensions(ScalarEvolution &SE,
                         SmallVectorImpl<const SCEV *> &Terms,
                         SmallVectorImpl<const SCEV *> &Sizes,
                         const SCEV *ElementSize);

/// Collect parametric terms occurring in step expressions (first step of
/// delinearization).
void collectParametricTerms(ScalarEvolution &SE, const SCEV *Expr,
                            SmallVectorImpl<const SCEV *> &Terms);

/// Return in Subscripts the access functions for each dimension in Sizes
/// (third step of delinearization).
void computeAccessFunctions(ScalarEvolution &SE, const SCEV *Expr,
                            SmallVectorImpl<const SCEV *> &Subscripts,
                            SmallVectorImpl<const SCEV *> &Sizes);
/// Split this SCEVAddRecExpr into two vectors of SCEVs representing the
/// subscripts and sizes of an array access.
///
/// The delinearization is a 3 step process: the first two steps compute the
/// sizes of each subscript and the third step computes the access functions
/// for the delinearized array:
///
/// 1. Find the terms in the step functions
/// 2. Compute the array size
/// 3. Compute the access function: divide the SCEV by the array size
///    starting with the innermost dimensions found in step 2. The Quotient
///    is the SCEV to be divided in the next step of the recursion. The
///    Remainder is the subscript of the innermost dimension. Loop over all
///    array dimensions computed in step 2.
///
/// To compute a uniform array size for several memory accesses to the same
/// object, one can collect in step 1 all the step terms for all the memory
/// accesses, and compute in step 2 a unique array shape. This guarantees
/// that the array shape will be the same across all memory accesses.
///
/// FIXME: We could derive the result of steps 1 and 2 from a description of
/// the array shape given in metadata.
///
/// Example:
///
/// A[][n][m]
///
/// for i
///   for j
///     for k
///       A[j+k][2i][5i] =
///
/// The initial SCEV:
///
/// A[{{{0,+,2*m+5}_i, +, n*m}_j, +, n*m}_k]
///
/// 1. Find the different terms in the step functions:
/// -> [2*m, 5, n*m, n*m]
///
/// 2. Compute the array size: sort and unique them
/// -> [n*m, 2*m, 5]
/// find the GCD of all the terms = 1
/// divide by the GCD and erase constant terms
/// -> [n*m, 2*m]
/// GCD = m
/// divide by GCD -> [n, 2]
/// remove constant terms
/// -> [n]
/// size of the array is A[unknown][n][m]
///
/// 3. Compute the access function
/// a. Divide {{{0,+,2*m+5}_i, +, n*m}_j, +, n*m}_k by the innermost size m
/// Quotient: {{{0,+,2}_i, +, n}_j, +, n}_k
/// Remainder: {{{0,+,5}_i, +, 0}_j, +, 0}_k
/// The remainder is the subscript of the innermost array dimension: [5i].
///
/// b. Divide Quotient: {{{0,+,2}_i, +, n}_j, +, n}_k by next outer size n
/// Quotient: {{{0,+,0}_i, +, 1}_j, +, 1}_k
/// Remainder: {{{0,+,2}_i, +, 0}_j, +, 0}_k
/// The Remainder is the subscript of the next array dimension: [2i].
///
/// The subscript of the outermost dimension is the Quotient: [j+k].
///
/// Overall, we have: A[][n][m], and the access function: A[j+k][2i][5i].
void delinearize(ScalarEvolution &SE, const SCEV *Expr,
                 SmallVectorImpl<const SCEV *> &Subscripts,
                 SmallVectorImpl<const SCEV *> &Sizes, const SCEV *ElementSize);

/// Gathers the individual index expressions from a GEP instruction.
///
/// This function optimistically assumes the GEP references into a fixed size
/// array. If this is actually true, this function returns a list of array
/// subscript expressions in \p Subscripts and a list of integers describing
/// the size of the individual array dimensions in \p Sizes. Both lists have
/// either equal length or the size list is one element shorter in case there
/// is no known size available for the outermost array dimension. Returns true
/// if successful and false otherwise.
bool getIndexExpressionsFromGEP(ScalarEvolution &SE,
                                const GetElementPtrInst *GEP,
                                SmallVectorImpl<const SCEV *> &Subscripts,
                                SmallVectorImpl<int> &Sizes);

/// Implementation of fixed size array delinearization. Try to delinearize
/// access function for a fixed size multi-dimensional array, by deriving
/// subscripts from GEP instructions. Returns true upon success and false
/// otherwise. \p Inst is the load/store instruction whose pointer operand is
/// the one we want to delinearize. \p AccessFn is its corresponding SCEV
/// expression w.r.t. the surrounding loop.
bool tryDelinearizeFixedSizeImpl(ScalarEvolution *SE, Instruction *Inst,
                                 const SCEV *AccessFn,
                                 SmallVectorImpl<const SCEV *> &Subscripts,
                                 SmallVectorImpl<int> &Sizes);

struct DelinearizationPrinterPass
    : public PassInfoMixin<DelinearizationPrinterPass> {
  explicit DelinearizationPrinterPass(raw_ostream &OS);
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);

private:
  raw_ostream &OS;
};
} // namespace llvm

#endif // LLVM_ANALYSIS_DELINEARIZATION_H
PKiwFZx���!Analysis/TypeBasedAliasAnalysis.hnu�[���//===- TypeBasedAliasAnalysis.h - Type-Based Alias Analysis -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file
/// This is the interface for a metadata-based TBAA. See the source file for
/// details on the algorithm.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_TYPEBASEDALIASANALYSIS_H
#define LLVM_ANALYSIS_TYPEBASEDALIASANALYSIS_H

#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Pass.h"
#include <memory>

namespace llvm {

class CallBase;
class Function;
class MDNode;
class MemoryLocation;

/// A simple AA result that uses TBAA metadata to answer queries.
class TypeBasedAAResult : public AAResultBase {
public:
  /// Handle invalidation events from the new pass manager.
  ///
  /// By definition, this result is stateless and so remains valid.
  bool invalidate(Function &, const PreservedAnalyses &,
                  FunctionAnalysisManager::Invalidator &) {
    return false;
  }

  AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB,
                    AAQueryInfo &AAQI, const Instruction *CtxI);
  ModRefInfo getModRefInfoMask(const MemoryLocation &Loc, AAQueryInfo &AAQI,
                               bool IgnoreLocals);

  MemoryEffects getMemoryEffects(const CallBase *Call, AAQueryInfo &AAQI);
  MemoryEffects getMemoryEffects(const Function *F);
  ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc,
                           AAQueryInfo &AAQI);
  ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2,
                           AAQueryInfo &AAQI);

private:
  bool Aliases(const MDNode *A, const MDNode *B) const;
};

/// Analysis pass providing a never-invalidated alias analysis result.
class TypeBasedAA : public AnalysisInfoMixin<TypeBasedAA> {
  friend AnalysisInfoMixin<TypeBasedAA>;

  static AnalysisKey Key;

public:
  using Result = TypeBasedAAResult;

  TypeBasedAAResult run(Function &F, FunctionAnalysisManager &AM);
};

/// Legacy wrapper pass to provide the TypeBasedAAResult object.
class TypeBasedAAWrapperPass : public ImmutablePass {
  std::unique_ptr<TypeBasedAAResult> Result;

public:
  static char ID;

  TypeBasedAAWrapperPass();

  TypeBasedAAResult &getResult() { return *Result; }
  const TypeBasedAAResult &getResult() const { return *Result; }

  bool doInitialization(Module &M) override;
  bool doFinalization(Module &M) override;
  void getAnalysisUsage(AnalysisUsage &AU) const override;
};

//===--------------------------------------------------------------------===//
//
// createTypeBasedAAWrapperPass - This pass implements metadata-based
// type-based alias analysis.
//
ImmutablePass *createTypeBasedAAWrapperPass();

} // end namespace llvm

#endif // LLVM_ANALYSIS_TYPEBASEDALIASANALYSIS_H
PKiwFZ�(��~
~
'Analysis/ScalarEvolutionNormalization.hnu�[���//===- llvm/Analysis/ScalarEvolutionNormalization.h - See below -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines utilities for working with "normalized" ScalarEvolution
// expressions.
//
// The following example illustrates post-increment uses and how normalized
// expressions help.
//
//   for (i=0; i!=n; ++i) {
//     ...
//   }
//   use(i);
//
// While the expression for most uses of i inside the loop is {0,+,1}<%L>, the
// expression for the use of i outside the loop is {1,+,1}<%L>, since i is
// incremented at the end of the loop body. This is inconveient, since it
// suggests that we need two different induction variables, one that starts
// at 0 and one that starts at 1. We'd prefer to be able to think of these as
// the same induction variable, with uses inside the loop using the
// "pre-incremented" value, and uses after the loop using the
// "post-incremented" value.
//
// Expressions for post-incremented uses are represented as an expression
// paired with a set of loops for which the expression is in "post-increment"
// mode (there may be multiple loops).
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_SCALAREVOLUTIONNORMALIZATION_H
#define LLVM_ANALYSIS_SCALAREVOLUTIONNORMALIZATION_H

#include "llvm/ADT/STLFunctionalExtras.h"
#include "llvm/ADT/SmallPtrSet.h"

namespace llvm {

class Loop;
class ScalarEvolution;
class SCEV;
class SCEVAddRecExpr;

typedef SmallPtrSet<const Loop *, 2> PostIncLoopSet;

typedef function_ref<bool(const SCEVAddRecExpr *)> NormalizePredTy;

/// Normalize \p S to be post-increment for all loops present in \p
/// Loops. Returns nullptr if the result is not invertible and \p
/// CheckInvertible is true.
const SCEV *normalizeForPostIncUse(const SCEV *S, const PostIncLoopSet &Loops,
                                   ScalarEvolution &SE,
                                   bool CheckInvertible = true);

/// Normalize \p S for all add recurrence sub-expressions for which \p
/// Pred returns true.
const SCEV *normalizeForPostIncUseIf(const SCEV *S, NormalizePredTy Pred,
                                     ScalarEvolution &SE);

/// Denormalize \p S to be post-increment for all loops present in \p
/// Loops.
const SCEV *denormalizeForPostIncUse(const SCEV *S, const PostIncLoopSet &Loops,
                                     ScalarEvolution &SE);
} // namespace llvm

#endif
PKiwFZ���i�iAnalysis/LoopInfo.hnu�[���//===- llvm/Analysis/LoopInfo.h - Natural Loop Calculator -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares a GenericLoopInfo instantiation for LLVM IR.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_LOOPINFO_H
#define LLVM_ANALYSIS_LOOPINFO_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/GraphTraits.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/CFG.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Pass.h"
#include "llvm/Support/GenericLoopInfo.h"
#include <algorithm>
#include <optional>
#include <utility>

namespace llvm {

class DominatorTree;
class InductionDescriptor;
class Instruction;
class LoopInfo;
class Loop;
class MDNode;
class MemorySSAUpdater;
class ScalarEvolution;
class raw_ostream;

// Implementation in Support/GenericLoopInfoImpl.h
extern template class LoopBase<BasicBlock, Loop>;

/// Represents a single loop in the control flow graph.  Note that not all SCCs
/// in the CFG are necessarily loops.
class LLVM_EXTERNAL_VISIBILITY Loop : public LoopBase<BasicBlock, Loop> {
public:
  /// A range representing the start and end location of a loop.
  class LocRange {
    DebugLoc Start;
    DebugLoc End;

  public:
    LocRange() = default;
    LocRange(DebugLoc Start) : Start(Start), End(Start) {}
    LocRange(DebugLoc Start, DebugLoc End)
        : Start(std::move(Start)), End(std::move(End)) {}

    const DebugLoc &getStart() const { return Start; }
    const DebugLoc &getEnd() const { return End; }

    /// Check for null.
    ///
    explicit operator bool() const { return Start && End; }
  };

  /// Return true if the specified value is loop invariant.
  bool isLoopInvariant(const Value *V) const;

  /// Return true if all the operands of the specified instruction are loop
  /// invariant.
  bool hasLoopInvariantOperands(const Instruction *I) const;

  /// If the given value is an instruction inside of the loop and it can be
  /// hoisted, do so to make it trivially loop-invariant.
  /// Return true if \c V is already loop-invariant, and false if \c V can't
  /// be made loop-invariant. If \c V is made loop-invariant, \c Changed is
  /// set to true. This function can be used as a slightly more aggressive
  /// replacement for isLoopInvariant.
  ///
  /// If InsertPt is specified, it is the point to hoist instructions to.
  /// If null, the terminator of the loop preheader is used.
  ///
  bool makeLoopInvariant(Value *V, bool &Changed,
                         Instruction *InsertPt = nullptr,
                         MemorySSAUpdater *MSSAU = nullptr,
                         ScalarEvolution *SE = nullptr) const;

  /// If the given instruction is inside of the loop and it can be hoisted, do
  /// so to make it trivially loop-invariant.
  /// Return true if \c I is already loop-invariant, and false if \c I can't
  /// be made loop-invariant. If \c I is made loop-invariant, \c Changed is
  /// set to true. This function can be used as a slightly more aggressive
  /// replacement for isLoopInvariant.
  ///
  /// If InsertPt is specified, it is the point to hoist instructions to.
  /// If null, the terminator of the loop preheader is used.
  ///
  bool makeLoopInvariant(Instruction *I, bool &Changed,
                         Instruction *InsertPt = nullptr,
                         MemorySSAUpdater *MSSAU = nullptr,
                         ScalarEvolution *SE = nullptr) const;

  /// Check to see if the loop has a canonical induction variable: an integer
  /// recurrence that starts at 0 and increments by one each time through the
  /// loop. If so, return the phi node that corresponds to it.
  ///
  /// The IndVarSimplify pass transforms loops to have a canonical induction
  /// variable.
  ///
  PHINode *getCanonicalInductionVariable() const;

  /// Get the latch condition instruction.
  ICmpInst *getLatchCmpInst() const;

  /// Obtain the unique incoming and back edge. Return false if they are
  /// non-unique or the loop is dead; otherwise, return true.
  bool getIncomingAndBackEdge(BasicBlock *&Incoming,
                              BasicBlock *&Backedge) const;

  /// Below are some utilities to get the loop guard, loop bounds and induction
  /// variable, and to check if a given phinode is an auxiliary induction
  /// variable, if the loop is guarded, and if the loop is canonical.
  ///
  /// Here is an example:
  /// \code
  /// for (int i = lb; i < ub; i+=step)
  ///   <loop body>
  /// --- pseudo LLVMIR ---
  /// beforeloop:
  ///   guardcmp = (lb < ub)
  ///   if (guardcmp) goto preheader; else goto afterloop
  /// preheader:
  /// loop:
  ///   i_1 = phi[{lb, preheader}, {i_2, latch}]
  ///   <loop body>
  ///   i_2 = i_1 + step
  /// latch:
  ///   cmp = (i_2 < ub)
  ///   if (cmp) goto loop
  /// exit:
  /// afterloop:
  /// \endcode
  ///
  /// - getBounds
  ///   - getInitialIVValue      --> lb
  ///   - getStepInst            --> i_2 = i_1 + step
  ///   - getStepValue           --> step
  ///   - getFinalIVValue        --> ub
  ///   - getCanonicalPredicate  --> '<'
  ///   - getDirection           --> Increasing
  ///
  /// - getInductionVariable            --> i_1
  /// - isAuxiliaryInductionVariable(x) --> true if x == i_1
  /// - getLoopGuardBranch()
  ///                 --> `if (guardcmp) goto preheader; else goto afterloop`
  /// - isGuarded()                     --> true
  /// - isCanonical                     --> false
  struct LoopBounds {
    /// Return the LoopBounds object if
    /// - the given \p IndVar is an induction variable
    /// - the initial value of the induction variable can be found
    /// - the step instruction of the induction variable can be found
    /// - the final value of the induction variable can be found
    ///
    /// Else std::nullopt.
    static std::optional<Loop::LoopBounds>
    getBounds(const Loop &L, PHINode &IndVar, ScalarEvolution &SE);

    /// Get the initial value of the loop induction variable.
    Value &getInitialIVValue() const { return InitialIVValue; }

    /// Get the instruction that updates the loop induction variable.
    Instruction &getStepInst() const { return StepInst; }

    /// Get the step that the loop induction variable gets updated by in each
    /// loop iteration. Return nullptr if not found.
    Value *getStepValue() const { return StepValue; }

    /// Get the final value of the loop induction variable.
    Value &getFinalIVValue() const { return FinalIVValue; }

    /// Return the canonical predicate for the latch compare instruction, if
    /// able to be calcuated. Else BAD_ICMP_PREDICATE.
    ///
    /// A predicate is considered as canonical if requirements below are all
    /// satisfied:
    /// 1. The first successor of the latch branch is the loop header
    ///    If not, inverse the predicate.
    /// 2. One of the operands of the latch comparison is StepInst
    ///    If not, and
    ///    - if the current calcuated predicate is not ne or eq, flip the
    ///      predicate.
    ///    - else if the loop is increasing, return slt
    ///      (notice that it is safe to change from ne or eq to sign compare)
    ///    - else if the loop is decreasing, return sgt
    ///      (notice that it is safe to change from ne or eq to sign compare)
    ///
    /// Here is an example when both (1) and (2) are not satisfied:
    /// \code
    /// loop.header:
    ///  %iv = phi [%initialiv, %loop.preheader], [%inc, %loop.header]
    ///  %inc = add %iv, %step
    ///  %cmp = slt %iv, %finaliv
    ///  br %cmp, %loop.exit, %loop.header
    /// loop.exit:
    /// \endcode
    /// - The second successor of the latch branch is the loop header instead
    ///   of the first successor (slt -> sge)
    /// - The first operand of the latch comparison (%cmp) is the IndVar (%iv)
    ///   instead of the StepInst (%inc) (sge -> sgt)
    ///
    /// The predicate would be sgt if both (1) and (2) are satisfied.
    /// getCanonicalPredicate() returns sgt for this example.
    /// Note: The IR is not changed.
    ICmpInst::Predicate getCanonicalPredicate() const;

    /// An enum for the direction of the loop
    /// - for (int i = 0; i < ub; ++i)  --> Increasing
    /// - for (int i = ub; i > 0; --i)  --> Descresing
    /// - for (int i = x; i != y; i+=z) --> Unknown
    enum class Direction { Increasing, Decreasing, Unknown };

    /// Get the direction of the loop.
    Direction getDirection() const;

  private:
    LoopBounds(const Loop &Loop, Value &I, Instruction &SI, Value *SV, Value &F,
               ScalarEvolution &SE)
        : L(Loop), InitialIVValue(I), StepInst(SI), StepValue(SV),
          FinalIVValue(F), SE(SE) {}

    const Loop &L;

    // The initial value of the loop induction variable
    Value &InitialIVValue;

    // The instruction that updates the loop induction variable
    Instruction &StepInst;

    // The value that the loop induction variable gets updated by in each loop
    // iteration
    Value *StepValue;

    // The final value of the loop induction variable
    Value &FinalIVValue;

    ScalarEvolution &SE;
  };

  /// Return the struct LoopBounds collected if all struct members are found,
  /// else std::nullopt.
  std::optional<LoopBounds> getBounds(ScalarEvolution &SE) const;

  /// Return the loop induction variable if found, else return nullptr.
  /// An instruction is considered as the loop induction variable if
  /// - it is an induction variable of the loop; and
  /// - it is used to determine the condition of the branch in the loop latch
  ///
  /// Note: the induction variable doesn't need to be canonical, i.e. starts at
  /// zero and increments by one each time through the loop (but it can be).
  PHINode *getInductionVariable(ScalarEvolution &SE) const;

  /// Get the loop induction descriptor for the loop induction variable. Return
  /// true if the loop induction variable is found.
  bool getInductionDescriptor(ScalarEvolution &SE,
                              InductionDescriptor &IndDesc) const;

  /// Return true if the given PHINode \p AuxIndVar is
  /// - in the loop header
  /// - not used outside of the loop
  /// - incremented by a loop invariant step for each loop iteration
  /// - step instruction opcode should be add or sub
  /// Note: auxiliary induction variable is not required to be used in the
  ///       conditional branch in the loop latch. (but it can be)
  bool isAuxiliaryInductionVariable(PHINode &AuxIndVar,
                                    ScalarEvolution &SE) const;

  /// Return the loop guard branch, if it exists.
  ///
  /// This currently only works on simplified loop, as it requires a preheader
  /// and a latch to identify the guard. It will work on loops of the form:
  /// \code
  /// GuardBB:
  ///   br cond1, Preheader, ExitSucc <== GuardBranch
  /// Preheader:
  ///   br Header
  /// Header:
  ///  ...
  ///   br Latch
  /// Latch:
  ///   br cond2, Header, ExitBlock
  /// ExitBlock:
  ///   br ExitSucc
  /// ExitSucc:
  /// \endcode
  BranchInst *getLoopGuardBranch() const;

  /// Return true iff the loop is
  /// - in simplify rotated form, and
  /// - guarded by a loop guard branch.
  bool isGuarded() const { return (getLoopGuardBranch() != nullptr); }

  /// Return true if the loop is in rotated form.
  ///
  /// This does not check if the loop was rotated by loop rotation, instead it
  /// only checks if the loop is in rotated form (has a valid latch that exists
  /// the loop).
  bool isRotatedForm() const {
    assert(!isInvalid() && "Loop not in a valid state!");
    BasicBlock *Latch = getLoopLatch();
    return Latch && isLoopExiting(Latch);
  }

  /// Return true if the loop induction variable starts at zero and increments
  /// by one each time through the loop.
  bool isCanonical(ScalarEvolution &SE) const;

  /// Return true if the Loop is in LCSSA form. If \p IgnoreTokens is set to
  /// true, token values defined inside loop are allowed to violate LCSSA form.
  bool isLCSSAForm(const DominatorTree &DT, bool IgnoreTokens = true) const;

  /// Return true if this Loop and all inner subloops are in LCSSA form. If \p
  /// IgnoreTokens is set to true, token values defined inside loop are allowed
  /// to violate LCSSA form.
  bool isRecursivelyLCSSAForm(const DominatorTree &DT, const LoopInfo &LI,
                              bool IgnoreTokens = true) const;

  /// Return true if the Loop is in the form that the LoopSimplify form
  /// transforms loops to, which is sometimes called normal form.
  bool isLoopSimplifyForm() const;

  /// Return true if the loop body is safe to clone in practice.
  bool isSafeToClone() const;

  /// Returns true if the loop is annotated parallel.
  ///
  /// A parallel loop can be assumed to not contain any dependencies between
  /// iterations by the compiler. That is, any loop-carried dependency checking
  /// can be skipped completely when parallelizing the loop on the target
  /// machine. Thus, if the parallel loop information originates from the
  /// programmer, e.g. via the OpenMP parallel for pragma, it is the
  /// programmer's responsibility to ensure there are no loop-carried
  /// dependencies. The final execution order of the instructions across
  /// iterations is not guaranteed, thus, the end result might or might not
  /// implement actual concurrent execution of instructions across multiple
  /// iterations.
  bool isAnnotatedParallel() const;

  /// Return the llvm.loop loop id metadata node for this loop if it is present.
  ///
  /// If this loop contains the same llvm.loop metadata on each branch to the
  /// header then the node is returned. If any latch instruction does not
  /// contain llvm.loop or if multiple latches contain different nodes then
  /// 0 is returned.
  MDNode *getLoopID() const;
  /// Set the llvm.loop loop id metadata for this loop.
  ///
  /// The LoopID metadata node will be added to each terminator instruction in
  /// the loop that branches to the loop header.
  ///
  /// The LoopID metadata node should have one or more operands and the first
  /// operand should be the node itself.
  void setLoopID(MDNode *LoopID) const;

  /// Add llvm.loop.unroll.disable to this loop's loop id metadata.
  ///
  /// Remove existing unroll metadata and add unroll disable metadata to
  /// indicate the loop has already been unrolled.  This prevents a loop
  /// from being unrolled more than is directed by a pragma if the loop
  /// unrolling pass is run more than once (which it generally is).
  void setLoopAlreadyUnrolled();

  /// Add llvm.loop.mustprogress to this loop's loop id metadata.
  void setLoopMustProgress();

  void dump() const;
  void dumpVerbose() const;

  /// Return the debug location of the start of this loop.
  /// This looks for a BB terminating instruction with a known debug
  /// location by looking at the preheader and header blocks. If it
  /// cannot find a terminating instruction with location information,
  /// it returns an unknown location.
  DebugLoc getStartLoc() const;

  /// Return the source code span of the loop.
  LocRange getLocRange() const;

  StringRef getName() const {
    if (BasicBlock *Header = getHeader())
      if (Header->hasName())
        return Header->getName();
    return "<unnamed loop>";
  }

private:
  Loop() = default;

  friend class LoopInfoBase<BasicBlock, Loop>;
  friend class LoopBase<BasicBlock, Loop>;
  explicit Loop(BasicBlock *BB) : LoopBase<BasicBlock, Loop>(BB) {}
  ~Loop() = default;
};

// Implementation in Support/GenericLoopInfoImpl.h
extern template class LoopInfoBase<BasicBlock, Loop>;

class LoopInfo : public LoopInfoBase<BasicBlock, Loop> {
  typedef LoopInfoBase<BasicBlock, Loop> BaseT;

  friend class LoopBase<BasicBlock, Loop>;

  void operator=(const LoopInfo &) = delete;
  LoopInfo(const LoopInfo &) = delete;

public:
  LoopInfo() = default;
  explicit LoopInfo(const DominatorTreeBase<BasicBlock, false> &DomTree);

  LoopInfo(LoopInfo &&Arg) : BaseT(std::move(static_cast<BaseT &>(Arg))) {}
  LoopInfo &operator=(LoopInfo &&RHS) {
    BaseT::operator=(std::move(static_cast<BaseT &>(RHS)));
    return *this;
  }

  /// Handle invalidation explicitly.
  bool invalidate(Function &F, const PreservedAnalyses &PA,
                  FunctionAnalysisManager::Invalidator &);

  // Most of the public interface is provided via LoopInfoBase.

  /// Update LoopInfo after removing the last backedge from a loop. This updates
  /// the loop forest and parent loops for each block so that \c L is no longer
  /// referenced, but does not actually delete \c L immediately. The pointer
  /// will remain valid until this LoopInfo's memory is released.
  void erase(Loop *L);

  /// Returns true if replacing From with To everywhere is guaranteed to
  /// preserve LCSSA form.
  bool replacementPreservesLCSSAForm(Instruction *From, Value *To) {
    // Preserving LCSSA form is only problematic if the replacing value is an
    // instruction.
    Instruction *I = dyn_cast<Instruction>(To);
    if (!I)
      return true;
    // If both instructions are defined in the same basic block then replacement
    // cannot break LCSSA form.
    if (I->getParent() == From->getParent())
      return true;
    // If the instruction is not defined in a loop then it can safely replace
    // anything.
    Loop *ToLoop = getLoopFor(I->getParent());
    if (!ToLoop)
      return true;
    // If the replacing instruction is defined in the same loop as the original
    // instruction, or in a loop that contains it as an inner loop, then using
    // it as a replacement will not break LCSSA form.
    return ToLoop->contains(getLoopFor(From->getParent()));
  }

  /// Checks if moving a specific instruction can break LCSSA in any loop.
  ///
  /// Return true if moving \p Inst to before \p NewLoc will break LCSSA,
  /// assuming that the function containing \p Inst and \p NewLoc is currently
  /// in LCSSA form.
  bool movementPreservesLCSSAForm(Instruction *Inst, Instruction *NewLoc) {
    assert(Inst->getFunction() == NewLoc->getFunction() &&
           "Can't reason about IPO!");

    auto *OldBB = Inst->getParent();
    auto *NewBB = NewLoc->getParent();

    // Movement within the same loop does not break LCSSA (the equality check is
    // to avoid doing a hashtable lookup in case of intra-block movement).
    if (OldBB == NewBB)
      return true;

    auto *OldLoop = getLoopFor(OldBB);
    auto *NewLoop = getLoopFor(NewBB);

    if (OldLoop == NewLoop)
      return true;

    // Check if Outer contains Inner; with the null loop counting as the
    // "outermost" loop.
    auto Contains = [](const Loop *Outer, const Loop *Inner) {
      return !Outer || Outer->contains(Inner);
    };

    // To check that the movement of Inst to before NewLoc does not break LCSSA,
    // we need to check two sets of uses for possible LCSSA violations at
    // NewLoc: the users of NewInst, and the operands of NewInst.

    // If we know we're hoisting Inst out of an inner loop to an outer loop,
    // then the uses *of* Inst don't need to be checked.

    if (!Contains(NewLoop, OldLoop)) {
      for (Use &U : Inst->uses()) {
        auto *UI = cast<Instruction>(U.getUser());
        auto *UBB = isa<PHINode>(UI) ? cast<PHINode>(UI)->getIncomingBlock(U)
                                     : UI->getParent();
        if (UBB != NewBB && getLoopFor(UBB) != NewLoop)
          return false;
      }
    }

    // If we know we're sinking Inst from an outer loop into an inner loop, then
    // the *operands* of Inst don't need to be checked.

    if (!Contains(OldLoop, NewLoop)) {
      // See below on why we can't handle phi nodes here.
      if (isa<PHINode>(Inst))
        return false;

      for (Use &U : Inst->operands()) {
        auto *DefI = dyn_cast<Instruction>(U.get());
        if (!DefI)
          return false;

        // This would need adjustment if we allow Inst to be a phi node -- the
        // new use block won't simply be NewBB.

        auto *DefBlock = DefI->getParent();
        if (DefBlock != NewBB && getLoopFor(DefBlock) != NewLoop)
          return false;
      }
    }

    return true;
  }

  // Return true if a new use of V added in ExitBB would require an LCSSA PHI
  // to be inserted at the begining of the block.  Note that V is assumed to
  // dominate ExitBB, and ExitBB must be the exit block of some loop.  The
  // IR is assumed to be in LCSSA form before the planned insertion.
  bool wouldBeOutOfLoopUseRequiringLCSSA(const Value *V,
                                         const BasicBlock *ExitBB) const;
};

/// Enable verification of loop info.
///
/// The flag enables checks which are expensive and are disabled by default
/// unless the `EXPENSIVE_CHECKS` macro is defined.  The `-verify-loop-info`
/// flag allows the checks to be enabled selectively without re-compilation.
extern bool VerifyLoopInfo;

// Allow clients to walk the list of nested loops...
template <> struct GraphTraits<const Loop *> {
  typedef const Loop *NodeRef;
  typedef LoopInfo::iterator ChildIteratorType;

  static NodeRef getEntryNode(const Loop *L) { return L; }
  static ChildIteratorType child_begin(NodeRef N) { return N->begin(); }
  static ChildIteratorType child_end(NodeRef N) { return N->end(); }
};

template <> struct GraphTraits<Loop *> {
  typedef Loop *NodeRef;
  typedef LoopInfo::iterator ChildIteratorType;

  static NodeRef getEntryNode(Loop *L) { return L; }
  static ChildIteratorType child_begin(NodeRef N) { return N->begin(); }
  static ChildIteratorType child_end(NodeRef N) { return N->end(); }
};

/// Analysis pass that exposes the \c LoopInfo for a function.
class LoopAnalysis : public AnalysisInfoMixin<LoopAnalysis> {
  friend AnalysisInfoMixin<LoopAnalysis>;
  static AnalysisKey Key;

public:
  typedef LoopInfo Result;

  LoopInfo run(Function &F, FunctionAnalysisManager &AM);
};

/// Printer pass for the \c LoopAnalysis results.
class LoopPrinterPass : public PassInfoMixin<LoopPrinterPass> {
  raw_ostream &OS;

public:
  explicit LoopPrinterPass(raw_ostream &OS) : OS(OS) {}
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

/// Verifier pass for the \c LoopAnalysis results.
struct LoopVerifierPass : public PassInfoMixin<LoopVerifierPass> {
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

/// The legacy pass manager's analysis pass to compute loop information.
class LoopInfoWrapperPass : public FunctionPass {
  LoopInfo LI;

public:
  static char ID; // Pass identification, replacement for typeid

  LoopInfoWrapperPass();

  LoopInfo &getLoopInfo() { return LI; }
  const LoopInfo &getLoopInfo() const { return LI; }

  /// Calculate the natural loop information for a given function.
  bool runOnFunction(Function &F) override;

  void verifyAnalysis() const override;

  void releaseMemory() override { LI.releaseMemory(); }

  void print(raw_ostream &O, const Module *M = nullptr) const override;

  void getAnalysisUsage(AnalysisUsage &AU) const override;
};

/// Function to print a loop's contents as LLVM's text IR assembly.
void printLoop(Loop &L, raw_ostream &OS, const std::string &Banner = "");

/// Find and return the loop attribute node for the attribute @p Name in
/// @p LoopID. Return nullptr if there is no such attribute.
MDNode *findOptionMDForLoopID(MDNode *LoopID, StringRef Name);

/// Find string metadata for a loop.
///
/// Returns the MDNode where the first operand is the metadata's name. The
/// following operands are the metadata's values. If no metadata with @p Name is
/// found, return nullptr.
MDNode *findOptionMDForLoop(const Loop *TheLoop, StringRef Name);

std::optional<bool> getOptionalBoolLoopAttribute(const Loop *TheLoop,
                                                 StringRef Name);

/// Returns true if Name is applied to TheLoop and enabled.
bool getBooleanLoopAttribute(const Loop *TheLoop, StringRef Name);

/// Find named metadata for a loop with an integer value.
std::optional<int> getOptionalIntLoopAttribute(const Loop *TheLoop,
                                               StringRef Name);

/// Find named metadata for a loop with an integer value. Return \p Default if
/// not set.
int getIntLoopAttribute(const Loop *TheLoop, StringRef Name, int Default = 0);

/// Find string metadata for loop
///
/// If it has a value (e.g. {"llvm.distribute", 1} return the value as an
/// operand or null otherwise.  If the string metadata is not found return
/// Optional's not-a-value.
std::optional<const MDOperand *> findStringMetadataForLoop(const Loop *TheLoop,
                                                           StringRef Name);

/// Look for the loop attribute that requires progress within the loop.
/// Note: Most consumers probably want "isMustProgress" which checks
/// the containing function attribute too.
bool hasMustProgress(const Loop *L);

/// Return true if this loop can be assumed to make progress.  (i.e. can't
/// be infinite without side effects without also being undefined)
bool isMustProgress(const Loop *L);

/// Return true if this loop can be assumed to run for a finite number of
/// iterations.
bool isFinite(const Loop *L);

/// Return whether an MDNode might represent an access group.
///
/// Access group metadata nodes have to be distinct and empty. Being
/// always-empty ensures that it never needs to be changed (which -- because
/// MDNodes are designed immutable -- would require creating a new MDNode). Note
/// that this is not a sufficient condition: not every distinct and empty NDNode
/// is representing an access group.
bool isValidAsAccessGroup(MDNode *AccGroup);

/// Create a new LoopID after the loop has been transformed.
///
/// This can be used when no follow-up loop attributes are defined
/// (llvm::makeFollowupLoopID returning None) to stop transformations to be
/// applied again.
///
/// @param Context        The LLVMContext in which to create the new LoopID.
/// @param OrigLoopID     The original LoopID; can be nullptr if the original
///                       loop has no LoopID.
/// @param RemovePrefixes Remove all loop attributes that have these prefixes.
///                       Use to remove metadata of the transformation that has
///                       been applied.
/// @param AddAttrs       Add these loop attributes to the new LoopID.
///
/// @return A new LoopID that can be applied using Loop::setLoopID().
llvm::MDNode *
makePostTransformationMetadata(llvm::LLVMContext &Context, MDNode *OrigLoopID,
                               llvm::ArrayRef<llvm::StringRef> RemovePrefixes,
                               llvm::ArrayRef<llvm::MDNode *> AddAttrs);

} // namespace llvm

#endif
PKiwFZ���e��Analysis/PhiValues.hnu�[���//===- PhiValues.h - Phi Value Analysis -------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the PhiValues class, and associated passes, which can be
// used to find the underlying values of the phis in a function, i.e. the
// non-phi values that can be found by traversing the phi graph.
//
// This information is computed lazily and cached. If new phis are added to the
// function they are handled correctly, but if an existing phi has its operands
// modified PhiValues has to be notified by calling invalidateValue.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_PHIVALUES_H
#define LLVM_ANALYSIS_PHIVALUES_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/IR/PassManager.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/Pass.h"

namespace llvm {

class Value;
class PHINode;
class Function;

/// Class for calculating and caching the underlying values of phis in a
/// function.
///
/// Initially the PhiValues is empty, and gets incrementally populated whenever
/// it is queried.
class PhiValues {
public:
  using ValueSet = SmallSetVector<Value *, 4>;

  /// Construct an empty PhiValues.
  PhiValues(const Function &F) : F(F) {}

  /// Get the underlying values of a phi.
  ///
  /// This returns the cached value if PN has previously been processed,
  /// otherwise it processes it first.
  const ValueSet &getValuesForPhi(const PHINode *PN);

  /// Notify PhiValues that the cached information using V is no longer valid
  ///
  /// Whenever a phi has its operands modified the cached values for that phi
  /// (and the phis that use that phi) become invalid. A user of PhiValues has
  /// to notify it of this by calling invalidateValue on either the operand or
  /// the phi, which will then clear the relevant cached information.
  void invalidateValue(const Value *V);

  /// Free the memory used by this class.
  void releaseMemory();

  /// Print out the values currently in the cache.
  void print(raw_ostream &OS) const;

  /// Handle invalidation events in the new pass manager.
  bool invalidate(Function &, const PreservedAnalyses &,
                  FunctionAnalysisManager::Invalidator &);

private:
  using ConstValueSet = SmallSetVector<const Value *, 4>;

  /// The next depth number to be used by processPhi.
  unsigned int NextDepthNumber = 1;

  /// Depth numbers of phis. Phis with the same depth number are part of the
  /// same strongly connected component.
  DenseMap<const PHINode *, unsigned int> DepthMap;

  /// Non-phi values reachable from each component.
  DenseMap<unsigned int, ValueSet> NonPhiReachableMap;

  /// All values reachable from each component.
  DenseMap<unsigned int, ConstValueSet> ReachableMap;

  /// A CallbackVH to notify PhiValues when a value is deleted or replaced, so
  /// that the cached information for that value can be cleared to avoid
  /// dangling pointers to invalid values.
  class PhiValuesCallbackVH final : public CallbackVH {
    PhiValues *PV;
    void deleted() override;
    void allUsesReplacedWith(Value *New) override;

  public:
    PhiValuesCallbackVH(Value *V, PhiValues *PV = nullptr)
        : CallbackVH(V), PV(PV) {}
  };

  /// A set of callbacks to the values that processPhi has seen.
  DenseSet<PhiValuesCallbackVH, DenseMapInfo<Value *>> TrackedValues;

  /// The function that the PhiValues is for.
  const Function &F;

  /// Process a phi so that its entries in the depth and reachable maps are
  /// fully populated.
  void processPhi(const PHINode *PN, SmallVectorImpl<const PHINode *> &Stack);
};

/// The analysis pass which yields a PhiValues
///
/// The analysis does nothing by itself, and just returns an empty PhiValues
/// which will get filled in as it's used.
class PhiValuesAnalysis : public AnalysisInfoMixin<PhiValuesAnalysis> {
  friend AnalysisInfoMixin<PhiValuesAnalysis>;
  static AnalysisKey Key;

public:
  using Result = PhiValues;
  PhiValues run(Function &F, FunctionAnalysisManager &);
};

/// A pass for printing the PhiValues for a function.
///
/// This pass doesn't print whatever information the PhiValues happens to hold,
/// but instead first uses the PhiValues to analyze all the phis in the function
/// so the complete information is printed.
class PhiValuesPrinterPass : public PassInfoMixin<PhiValuesPrinterPass> {
  raw_ostream &OS;

public:
  explicit PhiValuesPrinterPass(raw_ostream &OS) : OS(OS) {}
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

/// Wrapper pass for the legacy pass manager
class PhiValuesWrapperPass : public FunctionPass {
  std::unique_ptr<PhiValues> Result;

public:
  static char ID;
  PhiValuesWrapperPass();

  PhiValues &getResult() { return *Result; }
  const PhiValues &getResult() const { return *Result; }

  bool runOnFunction(Function &F) override;
  void releaseMemory() override;
  void getAnalysisUsage(AnalysisUsage &AU) const override;
};

} // namespace llvm

#endif
PKiwFZ����
�
#Analysis/LegacyDivergenceAnalysis.hnu�[���//===- llvm/Analysis/LegacyDivergenceAnalysis.h - KernelDivergence Analysis -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// The kernel divergence analysis is an LLVM pass which can be used to find out
// if a branch instruction in a GPU program (kernel) is divergent or not. It can help
// branch optimizations such as jump threading and loop unswitching to make
// better decisions.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_LEGACYDIVERGENCEANALYSIS_H
#define LLVM_ANALYSIS_LEGACYDIVERGENCEANALYSIS_H

#include "llvm/ADT/DenseSet.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/PostDominators.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Pass.h"
#include <memory>

namespace llvm {
class DivergenceInfo;
class Function;
class Module;
class raw_ostream;
class TargetTransformInfo;
class Use;
class Value;

class LegacyDivergenceAnalysisImpl {
public:
  // Returns true if V is divergent at its definition.
  bool isDivergent(const Value *V) const;

  // Returns true if U is divergent. Uses of a uniform value can be divergent.
  bool isDivergentUse(const Use *U) const;

  // Returns true if V is uniform/non-divergent.
  bool isUniform(const Value *V) const { return !isDivergent(V); }

  // Returns true if U is uniform/non-divergent. Uses of a uniform value can be
  // divergent.
  bool isUniformUse(const Use *U) const { return !isDivergentUse(U); }

  // Keep the analysis results uptodate by removing an erased value.
  void removeValue(const Value *V) { DivergentValues.erase(V); }

  // Print all divergent branches in the function.
  void print(raw_ostream &OS, const Module *) const;

  // Whether analysis should be performed by GPUDivergenceAnalysis.
  bool shouldUseGPUDivergenceAnalysis(const Function &F,
                                      const TargetTransformInfo &TTI,
                                      const LoopInfo &LI);

  void run(Function &F, TargetTransformInfo &TTI, DominatorTree &DT,
           PostDominatorTree &PDT, const LoopInfo &LI);

protected:
  // (optional) handle to new DivergenceAnalysis
  std::unique_ptr<DivergenceInfo> gpuDA;

  // Stores all divergent values.
  DenseSet<const Value *> DivergentValues;

  // Stores divergent uses of possibly uniform values.
  DenseSet<const Use *> DivergentUses;
};

class LegacyDivergenceAnalysis : public FunctionPass,
                                 public LegacyDivergenceAnalysisImpl {
public:
  static char ID;

  LegacyDivergenceAnalysis();
  void getAnalysisUsage(AnalysisUsage &AU) const override;
  bool runOnFunction(Function &F) override;
};

class LegacyDivergenceAnalysisPass
    : public PassInfoMixin<LegacyDivergenceAnalysisPass>,
      public LegacyDivergenceAnalysisImpl {
public:
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);

private:
  // (optional) handle to new DivergenceAnalysis
  std::unique_ptr<DivergenceInfo> gpuDA;

  // Stores all divergent values.
  DenseSet<const Value *> DivergentValues;

  // Stores divergent uses of possibly uniform values.
  DenseSet<const Use *> DivergentUses;
};

} // end namespace llvm

#endif // LLVM_ANALYSIS_LEGACYDIVERGENCEANALYSIS_H
PKiwFZ���s}*}*Analysis/DOTGraphTraitsPass.hnu�[���//===-- DOTGraphTraitsPass.h - Print/View dotty graphs-----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Templates to create dotty viewer and printer passes for GraphTraits graphs.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_DOTGRAPHTRAITSPASS_H
#define LLVM_ANALYSIS_DOTGRAPHTRAITSPASS_H

#include "llvm/Analysis/CFGPrinter.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/GraphWriter.h"

namespace llvm {

/// Default traits class for extracting a graph from an analysis pass.
///
/// This assumes that 'GraphT' is 'AnalysisT::Result *', and pass it through
template <typename Result, typename GraphT = Result *>
struct DefaultAnalysisGraphTraits {
  static GraphT getGraph(Result R) { return &R; }
};

template <typename GraphT>
void viewGraphForFunction(Function &F, GraphT Graph, StringRef Name,
                          bool IsSimple) {
  std::string GraphName = DOTGraphTraits<GraphT *>::getGraphName(&Graph);

  ViewGraph(Graph, Name, IsSimple,
            GraphName + " for '" + F.getName() + "' function");
}

template <typename AnalysisT, bool IsSimple,
          typename GraphT = typename AnalysisT::Result *,
          typename AnalysisGraphTraitsT =
              DefaultAnalysisGraphTraits<typename AnalysisT::Result &, GraphT>>
struct DOTGraphTraitsViewer
    : PassInfoMixin<DOTGraphTraitsViewer<AnalysisT, IsSimple, GraphT,
                                         AnalysisGraphTraitsT>> {
  DOTGraphTraitsViewer(StringRef GraphName) : Name(GraphName) {}

  /// Return true if this function should be processed.
  ///
  /// An implementation of this class my override this function to indicate that
  /// only certain functions should be viewed.
  ///
  /// @param Result The current analysis result for this function.
  virtual bool processFunction(Function &F,
                               const typename AnalysisT::Result &Result) {
    return true;
  }

  PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM) {
    auto &Result = FAM.getResult<AnalysisT>(F);
    if (!processFunction(F, Result))
      return PreservedAnalyses::all();

    GraphT Graph = AnalysisGraphTraitsT::getGraph(Result);
    viewGraphForFunction(F, Graph, Name, IsSimple);

    return PreservedAnalyses::all();
  };

protected:
  /// Avoid compiler warning "has virtual functions but non-virtual destructor
  /// [-Wnon-virtual-dtor]" in derived classes.
  ///
  /// DOTGraphTraitsViewer is also used as a mixin for avoiding repeated
  /// implementation of viewer passes, ie there should be no
  /// runtime-polymorphisms/downcasting involving this class and hence no
  /// virtual destructor needed. Making this dtor protected stops accidental
  /// invocation when the derived class destructor should have been called.
  /// Those derived classes sould be marked final to avoid the warning.
  ~DOTGraphTraitsViewer() {}

private:
  StringRef Name;
};

template <typename GraphT>
void printGraphForFunction(Function &F, GraphT Graph, StringRef Name,
                           bool IsSimple) {
  std::string Filename = Name.str() + "." + F.getName().str() + ".dot";
  std::error_code EC;

  errs() << "Writing '" << Filename << "'...";

  raw_fd_ostream File(Filename, EC, sys::fs::OF_TextWithCRLF);
  std::string GraphName = DOTGraphTraits<GraphT>::getGraphName(Graph);

  if (!EC)
    WriteGraph(File, Graph, IsSimple,
               GraphName + " for '" + F.getName() + "' function");
  else
    errs() << "  error opening file for writing!";
  errs() << "\n";
}

template <typename AnalysisT, bool IsSimple,
          typename GraphT = typename AnalysisT::Result *,
          typename AnalysisGraphTraitsT =
              DefaultAnalysisGraphTraits<typename AnalysisT::Result &, GraphT>>
struct DOTGraphTraitsPrinter
    : PassInfoMixin<DOTGraphTraitsPrinter<AnalysisT, IsSimple, GraphT,
                                          AnalysisGraphTraitsT>> {
  DOTGraphTraitsPrinter(StringRef GraphName) : Name(GraphName) {}

  /// Return true if this function should be processed.
  ///
  /// An implementation of this class my override this function to indicate that
  /// only certain functions should be viewed.
  ///
  /// @param Result The current analysis result for this function.
  virtual bool processFunction(Function &F,
                               const typename AnalysisT::Result &Result) {
    return true;
  }

  PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM) {
    auto &Result = FAM.getResult<AnalysisT>(F);
    if (!processFunction(F, Result))
      return PreservedAnalyses::all();

    GraphT Graph = AnalysisGraphTraitsT::getGraph(Result);

    printGraphForFunction(F, Graph, Name, IsSimple);

    return PreservedAnalyses::all();
  };

protected:
  /// Avoid compiler warning "has virtual functions but non-virtual destructor
  /// [-Wnon-virtual-dtor]" in derived classes.
  ///
  /// DOTGraphTraitsPrinter is also used as a mixin for avoiding repeated
  /// implementation of printer passes, ie there should be no
  /// runtime-polymorphisms/downcasting involving this class and hence no
  /// virtual destructor needed. Making this dtor protected stops accidental
  /// invocation when the derived class destructor should have been called.
  /// Those derived classes sould be marked final to avoid the warning.
  ~DOTGraphTraitsPrinter() {}

private:
  StringRef Name;
};

/// Default traits class for extracting a graph from an analysis pass.
///
/// This assumes that 'GraphT' is 'AnalysisT *' and so just passes it through.
template <typename AnalysisT, typename GraphT = AnalysisT *>
struct LegacyDefaultAnalysisGraphTraits {
  static GraphT getGraph(AnalysisT *A) { return A; }
};

template <typename AnalysisT, bool IsSimple, typename GraphT = AnalysisT *,
          typename AnalysisGraphTraitsT =
              LegacyDefaultAnalysisGraphTraits<AnalysisT, GraphT>>
class DOTGraphTraitsViewerWrapperPass : public FunctionPass {
public:
  DOTGraphTraitsViewerWrapperPass(StringRef GraphName, char &ID)
      : FunctionPass(ID), Name(GraphName) {}

  /// Return true if this function should be processed.
  ///
  /// An implementation of this class my override this function to indicate that
  /// only certain functions should be viewed.
  ///
  /// @param Analysis The current analysis result for this function.
  virtual bool processFunction(Function &F, AnalysisT &Analysis) {
    return true;
  }

  bool runOnFunction(Function &F) override {
    auto &Analysis = getAnalysis<AnalysisT>();

    if (!processFunction(F, Analysis))
      return false;

    GraphT Graph = AnalysisGraphTraitsT::getGraph(&Analysis);
    viewGraphForFunction(F, Graph, Name, IsSimple);

    return false;
  }

  void getAnalysisUsage(AnalysisUsage &AU) const override {
    AU.setPreservesAll();
    AU.addRequired<AnalysisT>();
  }

private:
  std::string Name;
};

template <typename AnalysisT, bool IsSimple, typename GraphT = AnalysisT *,
          typename AnalysisGraphTraitsT =
              LegacyDefaultAnalysisGraphTraits<AnalysisT, GraphT>>
class DOTGraphTraitsPrinterWrapperPass : public FunctionPass {
public:
  DOTGraphTraitsPrinterWrapperPass(StringRef GraphName, char &ID)
      : FunctionPass(ID), Name(GraphName) {}

  /// Return true if this function should be processed.
  ///
  /// An implementation of this class my override this function to indicate that
  /// only certain functions should be printed.
  ///
  /// @param Analysis The current analysis result for this function.
  virtual bool processFunction(Function &F, AnalysisT &Analysis) {
    return true;
  }

  bool runOnFunction(Function &F) override {
    auto &Analysis = getAnalysis<AnalysisT>();

    if (!processFunction(F, Analysis))
      return false;

    GraphT Graph = AnalysisGraphTraitsT::getGraph(&Analysis);
    printGraphForFunction(F, Graph, Name, IsSimple);

    return false;
  }

  void getAnalysisUsage(AnalysisUsage &AU) const override {
    AU.setPreservesAll();
    AU.addRequired<AnalysisT>();
  }

private:
  std::string Name;
};

template <typename AnalysisT, bool IsSimple, typename GraphT = AnalysisT *,
          typename AnalysisGraphTraitsT =
              LegacyDefaultAnalysisGraphTraits<AnalysisT, GraphT>>
class DOTGraphTraitsModuleViewerWrapperPass : public ModulePass {
public:
  DOTGraphTraitsModuleViewerWrapperPass(StringRef GraphName, char &ID)
      : ModulePass(ID), Name(GraphName) {}

  bool runOnModule(Module &M) override {
    GraphT Graph = AnalysisGraphTraitsT::getGraph(&getAnalysis<AnalysisT>());
    std::string Title = DOTGraphTraits<GraphT>::getGraphName(Graph);

    ViewGraph(Graph, Name, IsSimple, Title);

    return false;
  }

  void getAnalysisUsage(AnalysisUsage &AU) const override {
    AU.setPreservesAll();
    AU.addRequired<AnalysisT>();
  }

private:
  std::string Name;
};

template <typename AnalysisT, bool IsSimple, typename GraphT = AnalysisT *,
          typename AnalysisGraphTraitsT =
              LegacyDefaultAnalysisGraphTraits<AnalysisT, GraphT>>
class DOTGraphTraitsModulePrinterWrapperPass : public ModulePass {
public:
  DOTGraphTraitsModulePrinterWrapperPass(StringRef GraphName, char &ID)
      : ModulePass(ID), Name(GraphName) {}

  bool runOnModule(Module &M) override {
    GraphT Graph = AnalysisGraphTraitsT::getGraph(&getAnalysis<AnalysisT>());
    std::string Filename = Name + ".dot";
    std::error_code EC;

    errs() << "Writing '" << Filename << "'...";

    raw_fd_ostream File(Filename, EC, sys::fs::OF_TextWithCRLF);
    std::string Title = DOTGraphTraits<GraphT>::getGraphName(Graph);

    if (!EC)
      WriteGraph(File, Graph, IsSimple, Title);
    else
      errs() << "  error opening file for writing!";
    errs() << "\n";

    return false;
  }

  void getAnalysisUsage(AnalysisUsage &AU) const override {
    AU.setPreservesAll();
    AU.addRequired<AnalysisT>();
  }

private:
  std::string Name;
};

template <typename GraphT>
void WriteDOTGraphToFile(Function &F, GraphT &&Graph,
                         std::string FileNamePrefix, bool IsSimple) {
  std::string Filename = FileNamePrefix + "." + F.getName().str() + ".dot";
  std::error_code EC;

  errs() << "Writing '" << Filename << "'...";

  raw_fd_ostream File(Filename, EC, sys::fs::OF_TextWithCRLF);
  std::string GraphName = DOTGraphTraits<GraphT>::getGraphName(Graph);
  std::string Title = GraphName + " for '" + F.getName().str() + "' function";

  if (!EC)
    WriteGraph(File, Graph, IsSimple, Title);
  else
    errs() << "  error opening file for writing!";
  errs() << "\n";
}

} // end namespace llvm

#endif
PKiwFZ<r8r8Analysis/RegionIterator.hnu�[���//===- RegionIterator.h - Iterators to iteratate over Regions ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// This file defines the iterators to iterate over the elements of a Region.
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_REGIONITERATOR_H
#define LLVM_ANALYSIS_REGIONITERATOR_H

#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/GraphTraits.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/Analysis/RegionInfo.h"
#include <cassert>
#include <iterator>
#include <type_traits>

namespace llvm {

class BasicBlock;
class RegionInfo;

//===----------------------------------------------------------------------===//
/// Hierarchical RegionNode successor iterator.
///
/// This iterator iterates over all successors of a RegionNode.
///
/// For a BasicBlock RegionNode it skips all BasicBlocks that are not part of
/// the parent Region.  Furthermore for BasicBlocks that start a subregion, a
/// RegionNode representing the subregion is returned.
///
/// For a subregion RegionNode there is just one successor. The RegionNode
/// representing the exit of the subregion.
template <class NodeRef, class BlockT, class RegionT> class RNSuccIterator {
public:
  using iterator_category = std::forward_iterator_tag;
  using value_type = NodeRef;
  using difference_type = std::ptrdiff_t;
  using pointer = value_type *;
  using reference = value_type &;

private:
  using BlockTraits = GraphTraits<BlockT *>;
  using SuccIterTy = typename BlockTraits::ChildIteratorType;

  // The iterator works in two modes, bb mode or region mode.
  enum ItMode {
    // In BB mode it returns all successors of this BasicBlock as its
    // successors.
    ItBB,
    // In region mode there is only one successor, thats the regionnode mapping
    // to the exit block of the regionnode
    ItRgBegin, // At the beginning of the regionnode successor.
    ItRgEnd    // At the end of the regionnode successor.
  };

  static_assert(std::is_pointer<NodeRef>::value,
                "FIXME: Currently RNSuccIterator only supports NodeRef as "
                "pointers due to the use of pointer-specific data structures "
                "(e.g. PointerIntPair and SmallPtrSet) internally. Generalize "
                "it to support non-pointer types");

  // Use two bit to represent the mode iterator.
  PointerIntPair<NodeRef, 2, ItMode> Node;

  // The block successor iterator.
  SuccIterTy BItor;

  // advanceRegionSucc - A region node has only one successor. It reaches end
  // once we advance it.
  void advanceRegionSucc() {
    assert(Node.getInt() == ItRgBegin && "Cannot advance region successor!");
    Node.setInt(ItRgEnd);
  }

  NodeRef getNode() const { return Node.getPointer(); }

  // isRegionMode - Is the current iterator in region mode?
  bool isRegionMode() const { return Node.getInt() != ItBB; }

  // Get the immediate successor. This function may return a Basic Block
  // RegionNode or a subregion RegionNode.
  NodeRef getISucc(BlockT *BB) const {
    NodeRef succ;
    succ = getNode()->getParent()->getNode(BB);
    assert(succ && "BB not in Region or entered subregion!");
    return succ;
  }

  // getRegionSucc - Return the successor basic block of a SubRegion RegionNode.
  inline BlockT* getRegionSucc() const {
    assert(Node.getInt() == ItRgBegin && "Cannot get the region successor!");
    return getNode()->template getNodeAs<RegionT>()->getExit();
  }

  // isExit - Is this the exit BB of the Region?
  inline bool isExit(BlockT* BB) const {
    return getNode()->getParent()->getExit() == BB;
  }

public:
  using Self = RNSuccIterator<NodeRef, BlockT, RegionT>;

  /// Create begin iterator of a RegionNode.
  inline RNSuccIterator(NodeRef node)
      : Node(node, node->isSubRegion() ? ItRgBegin : ItBB),
        BItor(BlockTraits::child_begin(node->getEntry())) {
    // Skip the exit block
    if (!isRegionMode())
      while (BlockTraits::child_end(node->getEntry()) != BItor && isExit(*BItor))
        ++BItor;

    if (isRegionMode() && isExit(getRegionSucc()))
      advanceRegionSucc();
  }

  /// Create an end iterator.
  inline RNSuccIterator(NodeRef node, bool)
      : Node(node, node->isSubRegion() ? ItRgEnd : ItBB),
        BItor(BlockTraits::child_end(node->getEntry())) {}

  inline bool operator==(const Self& x) const {
    assert(isRegionMode() == x.isRegionMode() && "Broken iterator!");
    if (isRegionMode())
      return Node.getInt() == x.Node.getInt();
    else
      return BItor == x.BItor;
  }

  inline bool operator!=(const Self& x) const { return !operator==(x); }

  inline value_type operator*() const {
    BlockT *BB = isRegionMode() ? getRegionSucc() : *BItor;
    assert(!isExit(BB) && "Iterator out of range!");
    return getISucc(BB);
  }

  inline Self& operator++() {
    if(isRegionMode()) {
      // The Region only has 1 successor.
      advanceRegionSucc();
    } else {
      // Skip the exit.
      do
        ++BItor;
      while (BItor != BlockTraits::child_end(getNode()->getEntry())
          && isExit(*BItor));
    }
    return *this;
  }

  inline Self operator++(int) {
    Self tmp = *this;
    ++*this;
    return tmp;
  }
};

//===----------------------------------------------------------------------===//
/// Flat RegionNode iterator.
///
/// The Flat Region iterator will iterate over all BasicBlock RegionNodes that
/// are contained in the Region and its subregions. This is close to a virtual
/// control flow graph of the Region.
template <class NodeRef, class BlockT, class RegionT>
class RNSuccIterator<FlatIt<NodeRef>, BlockT, RegionT> {
  using BlockTraits = GraphTraits<BlockT *>;
  using SuccIterTy = typename BlockTraits::ChildIteratorType;

  NodeRef Node;
  SuccIterTy Itor;

public:
  using iterator_category = std::forward_iterator_tag;
  using value_type = NodeRef;
  using difference_type = std::ptrdiff_t;
  using pointer = value_type *;
  using reference = value_type &;

  using Self = RNSuccIterator<FlatIt<NodeRef>, BlockT, RegionT>;

  /// Create the iterator from a RegionNode.
  ///
  /// Note that the incoming node must be a bb node, otherwise it will trigger
  /// an assertion when we try to get a BasicBlock.
  inline RNSuccIterator(NodeRef node)
      : Node(node), Itor(BlockTraits::child_begin(node->getEntry())) {
    assert(!Node->isSubRegion() &&
           "Subregion node not allowed in flat iterating mode!");
    assert(Node->getParent() && "A BB node must have a parent!");

    // Skip the exit block of the iterating region.
    while (BlockTraits::child_end(Node->getEntry()) != Itor &&
           Node->getParent()->getExit() == *Itor)
      ++Itor;
  }

  /// Create an end iterator
  inline RNSuccIterator(NodeRef node, bool)
      : Node(node), Itor(BlockTraits::child_end(node->getEntry())) {
    assert(!Node->isSubRegion() &&
           "Subregion node not allowed in flat iterating mode!");
  }

  inline bool operator==(const Self& x) const {
    assert(Node->getParent() == x.Node->getParent()
           && "Cannot compare iterators of different regions!");

    return Itor == x.Itor && Node == x.Node;
  }

  inline bool operator!=(const Self& x) const { return !operator==(x); }

  inline value_type operator*() const {
    BlockT *BB = *Itor;

    // Get the iterating region.
    RegionT *Parent = Node->getParent();

    // The only case that the successor reaches out of the region is it reaches
    // the exit of the region.
    assert(Parent->getExit() != BB && "iterator out of range!");

    return Parent->getBBNode(BB);
  }

  inline Self& operator++() {
    // Skip the exit block of the iterating region.
    do
      ++Itor;
    while (Itor != succ_end(Node->getEntry())
        && Node->getParent()->getExit() == *Itor);

    return *this;
  }

  inline Self operator++(int) {
    Self tmp = *this;
    ++*this;
    return tmp;
  }
};

template <class NodeRef, class BlockT, class RegionT>
inline RNSuccIterator<NodeRef, BlockT, RegionT> succ_begin(NodeRef Node) {
  return RNSuccIterator<NodeRef, BlockT, RegionT>(Node);
}

template <class NodeRef, class BlockT, class RegionT>
inline RNSuccIterator<NodeRef, BlockT, RegionT> succ_end(NodeRef Node) {
  return RNSuccIterator<NodeRef, BlockT, RegionT>(Node, true);
}

//===--------------------------------------------------------------------===//
// RegionNode GraphTraits specialization so the bbs in the region can be
// iterate by generic graph iterators.
//
// NodeT can either be region node or const region node, otherwise child_begin
// and child_end fail.

#define RegionNodeGraphTraits(NodeT, BlockT, RegionT)                          \
  template <> struct GraphTraits<NodeT *> {                                    \
    using NodeRef = NodeT *;                                                   \
    using ChildIteratorType = RNSuccIterator<NodeRef, BlockT, RegionT>;        \
    static NodeRef getEntryNode(NodeRef N) { return N; }                       \
    static inline ChildIteratorType child_begin(NodeRef N) {                   \
      return RNSuccIterator<NodeRef, BlockT, RegionT>(N);                      \
    }                                                                          \
    static inline ChildIteratorType child_end(NodeRef N) {                     \
      return RNSuccIterator<NodeRef, BlockT, RegionT>(N, true);                \
    }                                                                          \
  };                                                                           \
  template <> struct GraphTraits<FlatIt<NodeT *>> {                            \
    using NodeRef = NodeT *;                                                   \
    using ChildIteratorType =                                                  \
        RNSuccIterator<FlatIt<NodeRef>, BlockT, RegionT>;                      \
    static NodeRef getEntryNode(NodeRef N) { return N; }                       \
    static inline ChildIteratorType child_begin(NodeRef N) {                   \
      return RNSuccIterator<FlatIt<NodeRef>, BlockT, RegionT>(N);              \
    }                                                                          \
    static inline ChildIteratorType child_end(NodeRef N) {                     \
      return RNSuccIterator<FlatIt<NodeRef>, BlockT, RegionT>(N, true);        \
    }                                                                          \
  }

#define RegionGraphTraits(RegionT, NodeT)                                      \
  template <> struct GraphTraits<RegionT *> : public GraphTraits<NodeT *> {    \
    using nodes_iterator = df_iterator<NodeRef>;                               \
    static NodeRef getEntryNode(RegionT *R) {                                  \
      return R->getNode(R->getEntry());                                        \
    }                                                                          \
    static nodes_iterator nodes_begin(RegionT *R) {                            \
      return nodes_iterator::begin(getEntryNode(R));                           \
    }                                                                          \
    static nodes_iterator nodes_end(RegionT *R) {                              \
      return nodes_iterator::end(getEntryNode(R));                             \
    }                                                                          \
  };                                                                           \
  template <>                                                                  \
  struct GraphTraits<FlatIt<RegionT *>>                                        \
      : public GraphTraits<FlatIt<NodeT *>> {                                  \
    using nodes_iterator =                                                     \
        df_iterator<NodeRef, df_iterator_default_set<NodeRef>, false,          \
                    GraphTraits<FlatIt<NodeRef>>>;                             \
    static NodeRef getEntryNode(RegionT *R) {                                  \
      return R->getBBNode(R->getEntry());                                      \
    }                                                                          \
    static nodes_iterator nodes_begin(RegionT *R) {                            \
      return nodes_iterator::begin(getEntryNode(R));                           \
    }                                                                          \
    static nodes_iterator nodes_end(RegionT *R) {                              \
      return nodes_iterator::end(getEntryNode(R));                             \
    }                                                                          \
  }

RegionNodeGraphTraits(RegionNode, BasicBlock, Region);
RegionNodeGraphTraits(const RegionNode, BasicBlock, Region);

RegionGraphTraits(Region, RegionNode);
RegionGraphTraits(const Region, const RegionNode);

template <> struct GraphTraits<RegionInfo*>
  : public GraphTraits<FlatIt<RegionNode*>> {
  using nodes_iterator =
      df_iterator<NodeRef, df_iterator_default_set<NodeRef>, false,
                  GraphTraits<FlatIt<NodeRef>>>;

  static NodeRef getEntryNode(RegionInfo *RI) {
    return GraphTraits<FlatIt<Region*>>::getEntryNode(RI->getTopLevelRegion());
  }

  static nodes_iterator nodes_begin(RegionInfo* RI) {
    return nodes_iterator::begin(getEntryNode(RI));
  }

  static nodes_iterator nodes_end(RegionInfo *RI) {
    return nodes_iterator::end(getEntryNode(RI));
  }
};

template <> struct GraphTraits<RegionInfoPass*>
  : public GraphTraits<RegionInfo *> {
  using nodes_iterator =
      df_iterator<NodeRef, df_iterator_default_set<NodeRef>, false,
                  GraphTraits<FlatIt<NodeRef>>>;

  static NodeRef getEntryNode(RegionInfoPass *RI) {
    return GraphTraits<RegionInfo*>::getEntryNode(&RI->getRegionInfo());
  }

  static nodes_iterator nodes_begin(RegionInfoPass* RI) {
    return GraphTraits<RegionInfo*>::nodes_begin(&RI->getRegionInfo());
  }

  static nodes_iterator nodes_end(RegionInfoPass *RI) {
    return GraphTraits<RegionInfo*>::nodes_end(&RI->getRegionInfo());
  }
};

} // end namespace llvm

#endif // LLVM_ANALYSIS_REGIONITERATOR_H
PKiwFZ���""Analysis/ScopedNoAliasAA.hnu�[���//===- ScopedNoAliasAA.h - Scoped No-Alias Alias Analysis -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file
/// This is the interface for a metadata-based scoped no-alias analysis.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_SCOPEDNOALIASAA_H
#define LLVM_ANALYSIS_SCOPEDNOALIASAA_H

#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Pass.h"
#include <memory>

namespace llvm {

class Function;
class MDNode;
class MemoryLocation;

/// A simple AA result which uses scoped-noalias metadata to answer queries.
class ScopedNoAliasAAResult : public AAResultBase {
public:
  /// Handle invalidation events from the new pass manager.
  ///
  /// By definition, this result is stateless and so remains valid.
  bool invalidate(Function &, const PreservedAnalyses &,
                  FunctionAnalysisManager::Invalidator &) {
    return false;
  }

  AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB,
                    AAQueryInfo &AAQI, const Instruction *CtxI);
  ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc,
                           AAQueryInfo &AAQI);
  ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2,
                           AAQueryInfo &AAQI);

private:
  bool mayAliasInScopes(const MDNode *Scopes, const MDNode *NoAlias) const;
};

/// Analysis pass providing a never-invalidated alias analysis result.
class ScopedNoAliasAA : public AnalysisInfoMixin<ScopedNoAliasAA> {
  friend AnalysisInfoMixin<ScopedNoAliasAA>;

  static AnalysisKey Key;

public:
  using Result = ScopedNoAliasAAResult;

  ScopedNoAliasAAResult run(Function &F, FunctionAnalysisManager &AM);
};

/// Legacy wrapper pass to provide the ScopedNoAliasAAResult object.
class ScopedNoAliasAAWrapperPass : public ImmutablePass {
  std::unique_ptr<ScopedNoAliasAAResult> Result;

public:
  static char ID;

  ScopedNoAliasAAWrapperPass();

  ScopedNoAliasAAResult &getResult() { return *Result; }
  const ScopedNoAliasAAResult &getResult() const { return *Result; }

  bool doInitialization(Module &M) override;
  bool doFinalization(Module &M) override;
  void getAnalysisUsage(AnalysisUsage &AU) const override;
};

//===--------------------------------------------------------------------===//
//
// createScopedNoAliasAAWrapperPass - This pass implements metadata-based
// scoped noalias analysis.
//
ImmutablePass *createScopedNoAliasAAWrapperPass();

} // end namespace llvm

#endif // LLVM_ANALYSIS_SCOPEDNOALIASAA_H
PKiwFZ�a�D��Analysis/CallPrinter.hnu�[���//===-- CallPrinter.h - Call graph printer external interface ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines external functions that can be called to explicitly
// instantiate the call graph printer.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_CALLPRINTER_H
#define LLVM_ANALYSIS_CALLPRINTER_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class ModulePass;

/// Pass for printing the call graph to a dot file
class CallGraphDOTPrinterPass : public PassInfoMixin<CallGraphDOTPrinterPass> {
public:
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
};

/// Pass for viewing the call graph
class CallGraphViewerPass : public PassInfoMixin<CallGraphViewerPass> {
public:
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
};

ModulePass *createCallGraphViewerPass();
ModulePass *createCallGraphDOTPrinterPass();

} // end namespace llvm

#endif
PKiwFZ��6�W�W#Analysis/MemoryDependenceAnalysis.hnu�[���//===- llvm/Analysis/MemoryDependenceAnalysis.h - Memory Deps ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the MemoryDependenceAnalysis analysis pass.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_MEMORYDEPENDENCEANALYSIS_H
#define LLVM_ANALYSIS_MEMORYDEPENDENCEANALYSIS_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/PointerEmbeddedInt.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/PointerSumType.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Analysis/MemoryLocation.h"
#include "llvm/IR/PassManager.h"
#include "llvm/IR/PredIteratorCache.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/Pass.h"
#include <optional>

namespace llvm {

class AAResults;
class AssumptionCache;
class BatchAAResults;
class DominatorTree;
class PHITransAddr;

/// A memory dependence query can return one of three different answers.
class MemDepResult {
  enum DepType {
    /// Clients of MemDep never see this.
    ///
    /// Entries with this marker occur in a LocalDeps map or NonLocalDeps map
    /// when the instruction they previously referenced was removed from
    /// MemDep.  In either case, the entry may include an instruction pointer.
    /// If so, the pointer is an instruction in the block where scanning can
    /// start from, saving some work.
    ///
    /// In a default-constructed MemDepResult object, the type will be Invalid
    /// and the instruction pointer will be null.
    Invalid = 0,

    /// This is a dependence on the specified instruction which clobbers the
    /// desired value.  The pointer member of the MemDepResult pair holds the
    /// instruction that clobbers the memory.  For example, this occurs when we
    /// see a may-aliased store to the memory location we care about.
    ///
    /// There are several cases that may be interesting here:
    ///   1. Loads are clobbered by may-alias stores.
    ///   2. Loads are considered clobbered by partially-aliased loads.  The
    ///      client may choose to analyze deeper into these cases.
    Clobber,

    /// This is a dependence on the specified instruction which defines or
    /// produces the desired memory location.  The pointer member of the
    /// MemDepResult pair holds the instruction that defines the memory.
    ///
    /// Cases of interest:
    ///   1. This could be a load or store for dependence queries on
    ///      load/store.  The value loaded or stored is the produced value.
    ///      Note that the pointer operand may be different than that of the
    ///      queried pointer due to must aliases and phi translation. Note
    ///      that the def may not be the same type as the query, the pointers
    ///      may just be must aliases.
    ///   2. For loads and stores, this could be an allocation instruction. In
    ///      this case, the load is loading an undef value or a store is the
    ///      first store to (that part of) the allocation.
    ///   3. Dependence queries on calls return Def only when they are readonly
    ///      calls or memory use intrinsics with identical callees and no
    ///      intervening clobbers.  No validation is done that the operands to
    ///      the calls are the same.
    ///   4. For loads and stores, this could be a select instruction that
    ///      defines pointer to this memory location. In this case, users can
    ///      find non-clobbered Defs for both select values that are reaching
    //       the desired memory location (there is still a guarantee that there
    //       are no clobbers between analyzed memory location and select).
    Def,

    /// This marker indicates that the query has no known dependency in the
    /// specified block.
    ///
    /// More detailed state info is encoded in the upper part of the pair (i.e.
    /// the Instruction*)
    Other
  };

  /// If DepType is "Other", the upper part of the sum type is an encoding of
  /// the following more detailed type information.
  enum OtherType {
    /// This marker indicates that the query has no dependency in the specified
    /// block.
    ///
    /// To find out more, the client should query other predecessor blocks.
    NonLocal = 1,
    /// This marker indicates that the query has no dependency in the specified
    /// function.
    NonFuncLocal,
    /// This marker indicates that the query dependency is unknown.
    Unknown
  };

  using ValueTy = PointerSumType<
      DepType, PointerSumTypeMember<Invalid, Instruction *>,
      PointerSumTypeMember<Clobber, Instruction *>,
      PointerSumTypeMember<Def, Instruction *>,
      PointerSumTypeMember<Other, PointerEmbeddedInt<OtherType, 3>>>;
  ValueTy Value;

  explicit MemDepResult(ValueTy V) : Value(V) {}

public:
  MemDepResult() = default;

  /// get methods: These are static ctor methods for creating various
  /// MemDepResult kinds.
  static MemDepResult getDef(Instruction *Inst) {
    assert(Inst && "Def requires inst");
    return MemDepResult(ValueTy::create<Def>(Inst));
  }
  static MemDepResult getClobber(Instruction *Inst) {
    assert(Inst && "Clobber requires inst");
    return MemDepResult(ValueTy::create<Clobber>(Inst));
  }
  static MemDepResult getNonLocal() {
    return MemDepResult(ValueTy::create<Other>(NonLocal));
  }
  static MemDepResult getNonFuncLocal() {
    return MemDepResult(ValueTy::create<Other>(NonFuncLocal));
  }
  static MemDepResult getUnknown() {
    return MemDepResult(ValueTy::create<Other>(Unknown));
  }

  /// Tests if this MemDepResult represents a query that is an instruction
  /// clobber dependency.
  bool isClobber() const { return Value.is<Clobber>(); }

  /// Tests if this MemDepResult represents a query that is an instruction
  /// definition dependency.
  bool isDef() const { return Value.is<Def>(); }

  /// Tests if this MemDepResult represents a valid local query (Clobber/Def).
  bool isLocal() const { return isClobber() || isDef(); }

  /// Tests if this MemDepResult represents a query that is transparent to the
  /// start of the block, but where a non-local hasn't been done.
  bool isNonLocal() const {
    return Value.is<Other>() && Value.cast<Other>() == NonLocal;
  }

  /// Tests if this MemDepResult represents a query that is transparent to the
  /// start of the function.
  bool isNonFuncLocal() const {
    return Value.is<Other>() && Value.cast<Other>() == NonFuncLocal;
  }

  /// Tests if this MemDepResult represents a query which cannot and/or will
  /// not be computed.
  bool isUnknown() const {
    return Value.is<Other>() && Value.cast<Other>() == Unknown;
  }

  /// If this is a normal dependency, returns the instruction that is depended
  /// on.  Otherwise, returns null.
  Instruction *getInst() const {
    switch (Value.getTag()) {
    case Invalid:
      return Value.cast<Invalid>();
    case Clobber:
      return Value.cast<Clobber>();
    case Def:
      return Value.cast<Def>();
    case Other:
      return nullptr;
    }
    llvm_unreachable("Unknown discriminant!");
  }

  bool operator==(const MemDepResult &M) const { return Value == M.Value; }
  bool operator!=(const MemDepResult &M) const { return Value != M.Value; }
  bool operator<(const MemDepResult &M) const { return Value < M.Value; }
  bool operator>(const MemDepResult &M) const { return Value > M.Value; }

private:
  friend class MemoryDependenceResults;

  /// Tests if this is a MemDepResult in its dirty/invalid. state.
  bool isDirty() const { return Value.is<Invalid>(); }

  static MemDepResult getDirty(Instruction *Inst) {
    return MemDepResult(ValueTy::create<Invalid>(Inst));
  }
};

/// This is an entry in the NonLocalDepInfo cache.
///
/// For each BasicBlock (the BB entry) it keeps a MemDepResult.
class NonLocalDepEntry {
  BasicBlock *BB;
  MemDepResult Result;

public:
  NonLocalDepEntry(BasicBlock *BB, MemDepResult Result)
      : BB(BB), Result(Result) {}

  // This is used for searches.
  NonLocalDepEntry(BasicBlock *BB) : BB(BB) {}

  // BB is the sort key, it can't be changed.
  BasicBlock *getBB() const { return BB; }

  void setResult(const MemDepResult &R) { Result = R; }

  const MemDepResult &getResult() const { return Result; }

  bool operator<(const NonLocalDepEntry &RHS) const { return BB < RHS.BB; }
};

/// This is a result from a NonLocal dependence query.
///
/// For each BasicBlock (the BB entry) it keeps a MemDepResult and the
/// (potentially phi translated) address that was live in the block.
class NonLocalDepResult {
  NonLocalDepEntry Entry;
  Value *Address;

public:
  NonLocalDepResult(BasicBlock *BB, MemDepResult Result, Value *Address)
      : Entry(BB, Result), Address(Address) {}

  // BB is the sort key, it can't be changed.
  BasicBlock *getBB() const { return Entry.getBB(); }

  void setResult(const MemDepResult &R, Value *Addr) {
    Entry.setResult(R);
    Address = Addr;
  }

  const MemDepResult &getResult() const { return Entry.getResult(); }

  /// Returns the address of this pointer in this block.
  ///
  /// This can be different than the address queried for the non-local result
  /// because of phi translation.  This returns null if the address was not
  /// available in a block (i.e. because phi translation failed) or if this is
  /// a cached result and that address was deleted.
  ///
  /// The address is always null for a non-local 'call' dependence.
  Value *getAddress() const { return Address; }
};

/// Provides a lazy, caching interface for making common memory aliasing
/// information queries, backed by LLVM's alias analysis passes.
///
/// The dependency information returned is somewhat unusual, but is pragmatic.
/// If queried about a store or call that might modify memory, the analysis
/// will return the instruction[s] that may either load from that memory or
/// store to it.  If queried with a load or call that can never modify memory,
/// the analysis will return calls and stores that might modify the pointer,
/// but generally does not return loads unless a) they are volatile, or
/// b) they load from *must-aliased* pointers.  Returning a dependence on
/// must-alias'd pointers instead of all pointers interacts well with the
/// internal caching mechanism.
class MemoryDependenceResults {
  // A map from instructions to their dependency.
  using LocalDepMapType = DenseMap<Instruction *, MemDepResult>;
  LocalDepMapType LocalDeps;

public:
  using NonLocalDepInfo = std::vector<NonLocalDepEntry>;

private:
  /// A pair<Value*, bool> where the bool is true if the dependence is a read
  /// only dependence, false if read/write.
  using ValueIsLoadPair = PointerIntPair<const Value *, 1, bool>;

  /// This pair is used when caching information for a block.
  ///
  /// If the pointer is null, the cache value is not a full query that starts
  /// at the specified block.  If non-null, the bool indicates whether or not
  /// the contents of the block was skipped.
  using BBSkipFirstBlockPair = PointerIntPair<BasicBlock *, 1, bool>;

  /// This record is the information kept for each (value, is load) pair.
  struct NonLocalPointerInfo {
    /// The pair of the block and the skip-first-block flag.
    BBSkipFirstBlockPair Pair;
    /// The results of the query for each relevant block.
    NonLocalDepInfo NonLocalDeps;
    /// The maximum size of the dereferences of the pointer.
    ///
    /// May be UnknownSize if the sizes are unknown.
    LocationSize Size = LocationSize::afterPointer();
    /// The AA tags associated with dereferences of the pointer.
    ///
    /// The members may be null if there are no tags or conflicting tags.
    AAMDNodes AATags;

    NonLocalPointerInfo() = default;
  };

  /// Cache storing single nonlocal def for the instruction.
  /// It is set when nonlocal def would be found in function returning only
  /// local dependencies.
  DenseMap<AssertingVH<const Value>, NonLocalDepResult> NonLocalDefsCache;
  using ReverseNonLocalDefsCacheTy =
    DenseMap<Instruction *, SmallPtrSet<const Value*, 4>>;
  ReverseNonLocalDefsCacheTy ReverseNonLocalDefsCache;

  /// This map stores the cached results of doing a pointer lookup at the
  /// bottom of a block.
  ///
  /// The key of this map is the pointer+isload bit, the value is a list of
  /// <bb->result> mappings.
  using CachedNonLocalPointerInfo =
      DenseMap<ValueIsLoadPair, NonLocalPointerInfo>;
  CachedNonLocalPointerInfo NonLocalPointerDeps;

  // A map from instructions to their non-local pointer dependencies.
  using ReverseNonLocalPtrDepTy =
      DenseMap<Instruction *, SmallPtrSet<ValueIsLoadPair, 4>>;
  ReverseNonLocalPtrDepTy ReverseNonLocalPtrDeps;

  /// This is the instruction we keep for each cached access that we have for
  /// an instruction.
  ///
  /// The pointer is an owning pointer and the bool indicates whether we have
  /// any dirty bits in the set.
  using PerInstNLInfo = std::pair<NonLocalDepInfo, bool>;

  // A map from instructions to their non-local dependencies.
  using NonLocalDepMapType = DenseMap<Instruction *, PerInstNLInfo>;

  NonLocalDepMapType NonLocalDepsMap;

  // A reverse mapping from dependencies to the dependees.  This is
  // used when removing instructions to keep the cache coherent.
  using ReverseDepMapType =
      DenseMap<Instruction *, SmallPtrSet<Instruction *, 4>>;
  ReverseDepMapType ReverseLocalDeps;

  // A reverse mapping from dependencies to the non-local dependees.
  ReverseDepMapType ReverseNonLocalDeps;

  /// Current AA implementation, just a cache.
  AAResults &AA;
  AssumptionCache &AC;
  const TargetLibraryInfo &TLI;
  DominatorTree &DT;
  PredIteratorCache PredCache;

  unsigned DefaultBlockScanLimit;

  /// Offsets to dependant clobber loads.
  using ClobberOffsetsMapType = DenseMap<LoadInst *, int32_t>;
  ClobberOffsetsMapType ClobberOffsets;

public:
  MemoryDependenceResults(AAResults &AA, AssumptionCache &AC,
                          const TargetLibraryInfo &TLI, DominatorTree &DT,
                          unsigned DefaultBlockScanLimit)
      : AA(AA), AC(AC), TLI(TLI), DT(DT),
        DefaultBlockScanLimit(DefaultBlockScanLimit) {}

  /// Handle invalidation in the new PM.
  bool invalidate(Function &F, const PreservedAnalyses &PA,
                  FunctionAnalysisManager::Invalidator &Inv);

  /// Some methods limit the number of instructions they will examine.
  /// The return value of this method is the default limit that will be
  /// used if no limit is explicitly passed in.
  unsigned getDefaultBlockScanLimit() const;

  /// Returns the instruction on which a memory operation depends.
  ///
  /// See the class comment for more details. It is illegal to call this on
  /// non-memory instructions.
  MemDepResult getDependency(Instruction *QueryInst);

  /// Perform a full dependency query for the specified call, returning the set
  /// of blocks that the value is potentially live across.
  ///
  /// The returned set of results will include a "NonLocal" result for all
  /// blocks where the value is live across.
  ///
  /// This method assumes the instruction returns a "NonLocal" dependency
  /// within its own block.
  ///
  /// This returns a reference to an internal data structure that may be
  /// invalidated on the next non-local query or when an instruction is
  /// removed.  Clients must copy this data if they want it around longer than
  /// that.
  const NonLocalDepInfo &getNonLocalCallDependency(CallBase *QueryCall);

  /// Perform a full dependency query for an access to the QueryInst's
  /// specified memory location, returning the set of instructions that either
  /// define or clobber the value.
  ///
  /// Warning: For a volatile query instruction, the dependencies will be
  /// accurate, and thus usable for reordering, but it is never legal to
  /// remove the query instruction.
  ///
  /// This method assumes the pointer has a "NonLocal" dependency within
  /// QueryInst's parent basic block.
  void getNonLocalPointerDependency(Instruction *QueryInst,
                                    SmallVectorImpl<NonLocalDepResult> &Result);

  /// Removes an instruction from the dependence analysis, updating the
  /// dependence of instructions that previously depended on it.
  void removeInstruction(Instruction *InstToRemove);

  /// Invalidates cached information about the specified pointer, because it
  /// may be too conservative in memdep.
  ///
  /// This is an optional call that can be used when the client detects an
  /// equivalence between the pointer and some other value and replaces the
  /// other value with ptr. This can make Ptr available in more places that
  /// cached info does not necessarily keep.
  void invalidateCachedPointerInfo(Value *Ptr);

  /// Clears the PredIteratorCache info.
  ///
  /// This needs to be done when the CFG changes, e.g., due to splitting
  /// critical edges.
  void invalidateCachedPredecessors();

  /// Returns the instruction on which a memory location depends.
  ///
  /// If isLoad is true, this routine ignores may-aliases with read-only
  /// operations.  If isLoad is false, this routine ignores may-aliases
  /// with reads from read-only locations. If possible, pass the query
  /// instruction as well; this function may take advantage of the metadata
  /// annotated to the query instruction to refine the result. \p Limit
  /// can be used to set the maximum number of instructions that will be
  /// examined to find the pointer dependency. On return, it will be set to
  /// the number of instructions left to examine. If a null pointer is passed
  /// in, the limit will default to the value of -memdep-block-scan-limit.
  ///
  /// Note that this is an uncached query, and thus may be inefficient.
  MemDepResult getPointerDependencyFrom(const MemoryLocation &Loc, bool isLoad,
                                        BasicBlock::iterator ScanIt,
                                        BasicBlock *BB,
                                        Instruction *QueryInst = nullptr,
                                        unsigned *Limit = nullptr);

  MemDepResult getPointerDependencyFrom(const MemoryLocation &Loc, bool isLoad,
                                        BasicBlock::iterator ScanIt,
                                        BasicBlock *BB,
                                        Instruction *QueryInst,
                                        unsigned *Limit,
                                        BatchAAResults &BatchAA);

  MemDepResult
  getSimplePointerDependencyFrom(const MemoryLocation &MemLoc, bool isLoad,
                                 BasicBlock::iterator ScanIt, BasicBlock *BB,
                                 Instruction *QueryInst, unsigned *Limit,
                                 BatchAAResults &BatchAA);

  /// This analysis looks for other loads and stores with invariant.group
  /// metadata and the same pointer operand. Returns Unknown if it does not
  /// find anything, and Def if it can be assumed that 2 instructions load or
  /// store the same value and NonLocal which indicate that non-local Def was
  /// found, which can be retrieved by calling getNonLocalPointerDependency
  /// with the same queried instruction.
  MemDepResult getInvariantGroupPointerDependency(LoadInst *LI, BasicBlock *BB);

  /// Release memory in caches.
  void releaseMemory();

  /// Return the clobber offset to dependent instruction.
  std::optional<int32_t> getClobberOffset(LoadInst *DepInst) const {
    const auto Off = ClobberOffsets.find(DepInst);
    if (Off != ClobberOffsets.end())
      return Off->getSecond();
    return std::nullopt;
  }

private:
  MemDepResult getCallDependencyFrom(CallBase *Call, bool isReadOnlyCall,
                                     BasicBlock::iterator ScanIt,
                                     BasicBlock *BB);
  bool getNonLocalPointerDepFromBB(Instruction *QueryInst,
                                   const PHITransAddr &Pointer,
                                   const MemoryLocation &Loc, bool isLoad,
                                   BasicBlock *BB,
                                   SmallVectorImpl<NonLocalDepResult> &Result,
                                   DenseMap<BasicBlock *, Value *> &Visited,
                                   bool SkipFirstBlock = false,
                                   bool IsIncomplete = false);
  MemDepResult getNonLocalInfoForBlock(Instruction *QueryInst,
                                       const MemoryLocation &Loc, bool isLoad,
                                       BasicBlock *BB, NonLocalDepInfo *Cache,
                                       unsigned NumSortedEntries,
                                       BatchAAResults &BatchAA);

  void removeCachedNonLocalPointerDependencies(ValueIsLoadPair P);

  void verifyRemoved(Instruction *Inst) const;
};

/// An analysis that produces \c MemoryDependenceResults for a function.
///
/// This is essentially a no-op because the results are computed entirely
/// lazily.
class MemoryDependenceAnalysis
    : public AnalysisInfoMixin<MemoryDependenceAnalysis> {
  friend AnalysisInfoMixin<MemoryDependenceAnalysis>;

  static AnalysisKey Key;

  unsigned DefaultBlockScanLimit;

public:
  using Result = MemoryDependenceResults;

  MemoryDependenceAnalysis();
  MemoryDependenceAnalysis(unsigned DefaultBlockScanLimit) : DefaultBlockScanLimit(DefaultBlockScanLimit) { }

  MemoryDependenceResults run(Function &F, FunctionAnalysisManager &AM);
};

/// A wrapper analysis pass for the legacy pass manager that exposes a \c
/// MemoryDepnedenceResults instance.
class MemoryDependenceWrapperPass : public FunctionPass {
  std::optional<MemoryDependenceResults> MemDep;

public:
  static char ID;

  MemoryDependenceWrapperPass();
  ~MemoryDependenceWrapperPass() override;

  /// Pass Implementation stuff.  This doesn't do any analysis eagerly.
  bool runOnFunction(Function &) override;

  /// Clean up memory in between runs
  void releaseMemory() override;

  /// Does not modify anything.  It uses Value Numbering and Alias Analysis.
  void getAnalysisUsage(AnalysisUsage &AU) const override;

  MemoryDependenceResults &getMemDep() { return *MemDep; }
};

} // end namespace llvm

#endif // LLVM_ANALYSIS_MEMORYDEPENDENCEANALYSIS_H
PKiwFZa)n�n�Analysis/AliasAnalysis.hnu�[���//===- llvm/Analysis/AliasAnalysis.h - Alias Analysis Interface -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the generic AliasAnalysis interface, which is used as the
// common interface used by all clients of alias analysis information, and
// implemented by all alias analysis implementations.  Mod/Ref information is
// also captured by this interface.
//
// Implementations of this interface must implement the various virtual methods,
// which automatically provides functionality for the entire suite of client
// APIs.
//
// This API identifies memory regions with the MemoryLocation class. The pointer
// component specifies the base memory address of the region. The Size specifies
// the maximum size (in address units) of the memory region, or
// MemoryLocation::UnknownSize if the size is not known. The TBAA tag
// identifies the "type" of the memory reference; see the
// TypeBasedAliasAnalysis class for details.
//
// Some non-obvious details include:
//  - Pointers that point to two completely different objects in memory never
//    alias, regardless of the value of the Size component.
//  - NoAlias doesn't imply inequal pointers. The most obvious example of this
//    is two pointers to constant memory. Even if they are equal, constant
//    memory is never stored to, so there will never be any dependencies.
//    In this and other situations, the pointers may be both NoAlias and
//    MustAlias at the same time. The current API can only return one result,
//    though this is rarely a problem in practice.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_ALIASANALYSIS_H
#define LLVM_ANALYSIS_ALIASANALYSIS_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/Sequence.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Analysis/MemoryLocation.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Pass.h"
#include "llvm/Support/ModRef.h"
#include <cstdint>
#include <functional>
#include <memory>
#include <optional>
#include <vector>

namespace llvm {

class AnalysisUsage;
class AtomicCmpXchgInst;
class BasicBlock;
class CatchPadInst;
class CatchReturnInst;
class DominatorTree;
class FenceInst;
class Function;
class LoopInfo;
class PreservedAnalyses;
class TargetLibraryInfo;
class Value;
template <typename> class SmallPtrSetImpl;

/// The possible results of an alias query.
///
/// These results are always computed between two MemoryLocation objects as
/// a query to some alias analysis.
///
/// Note that these are unscoped enumerations because we would like to support
/// implicitly testing a result for the existence of any possible aliasing with
/// a conversion to bool, but an "enum class" doesn't support this. The
/// canonical names from the literature are suffixed and unique anyways, and so
/// they serve as global constants in LLVM for these results.
///
/// See docs/AliasAnalysis.html for more information on the specific meanings
/// of these values.
class AliasResult {
private:
  static const int OffsetBits = 23;
  static const int AliasBits = 8;
  static_assert(AliasBits + 1 + OffsetBits <= 32,
                "AliasResult size is intended to be 4 bytes!");

  unsigned int Alias : AliasBits;
  unsigned int HasOffset : 1;
  signed int Offset : OffsetBits;

public:
  enum Kind : uint8_t {
    /// The two locations do not alias at all.
    ///
    /// This value is arranged to convert to false, while all other values
    /// convert to true. This allows a boolean context to convert the result to
    /// a binary flag indicating whether there is the possibility of aliasing.
    NoAlias = 0,
    /// The two locations may or may not alias. This is the least precise
    /// result.
    MayAlias,
    /// The two locations alias, but only due to a partial overlap.
    PartialAlias,
    /// The two locations precisely alias each other.
    MustAlias,
  };
  static_assert(MustAlias < (1 << AliasBits),
                "Not enough bit field size for the enum!");

  explicit AliasResult() = delete;
  constexpr AliasResult(const Kind &Alias)
      : Alias(Alias), HasOffset(false), Offset(0) {}

  operator Kind() const { return static_cast<Kind>(Alias); }

  bool operator==(const AliasResult &Other) const {
    return Alias == Other.Alias && HasOffset == Other.HasOffset &&
           Offset == Other.Offset;
  }
  bool operator!=(const AliasResult &Other) const { return !(*this == Other); }

  bool operator==(Kind K) const { return Alias == K; }
  bool operator!=(Kind K) const { return !(*this == K); }

  constexpr bool hasOffset() const { return HasOffset; }
  constexpr int32_t getOffset() const {
    assert(HasOffset && "No offset!");
    return Offset;
  }
  void setOffset(int32_t NewOffset) {
    if (isInt<OffsetBits>(NewOffset)) {
      HasOffset = true;
      Offset = NewOffset;
    }
  }

  /// Helper for processing AliasResult for swapped memory location pairs.
  void swap(bool DoSwap = true) {
    if (DoSwap && hasOffset())
      setOffset(-getOffset());
  }
};

static_assert(sizeof(AliasResult) == 4,
              "AliasResult size is intended to be 4 bytes!");

/// << operator for AliasResult.
raw_ostream &operator<<(raw_ostream &OS, AliasResult AR);

/// Virtual base class for providers of capture information.
struct CaptureInfo {
  virtual ~CaptureInfo() = 0;
  virtual bool isNotCapturedBeforeOrAt(const Value *Object,
                                       const Instruction *I) = 0;
};

/// Context-free CaptureInfo provider, which computes and caches whether an
/// object is captured in the function at all, but does not distinguish whether
/// it was captured before or after the context instruction.
class SimpleCaptureInfo final : public CaptureInfo {
  SmallDenseMap<const Value *, bool, 8> IsCapturedCache;

public:
  bool isNotCapturedBeforeOrAt(const Value *Object,
                               const Instruction *I) override;
};

/// Context-sensitive CaptureInfo provider, which computes and caches the
/// earliest common dominator closure of all captures. It provides a good
/// approximation to a precise "captures before" analysis.
class EarliestEscapeInfo final : public CaptureInfo {
  DominatorTree &DT;
  const LoopInfo &LI;

  /// Map from identified local object to an instruction before which it does
  /// not escape, or nullptr if it never escapes. The "earliest" instruction
  /// may be a conservative approximation, e.g. the first instruction in the
  /// function is always a legal choice.
  DenseMap<const Value *, Instruction *> EarliestEscapes;

  /// Reverse map from instruction to the objects it is the earliest escape for.
  /// This is used for cache invalidation purposes.
  DenseMap<Instruction *, TinyPtrVector<const Value *>> Inst2Obj;

  const SmallPtrSetImpl<const Value *> &EphValues;

public:
  EarliestEscapeInfo(DominatorTree &DT, const LoopInfo &LI,
                     const SmallPtrSetImpl<const Value *> &EphValues)
      : DT(DT), LI(LI), EphValues(EphValues) {}

  bool isNotCapturedBeforeOrAt(const Value *Object,
                               const Instruction *I) override;

  void removeInstruction(Instruction *I);
};

/// Cache key for BasicAA results. It only includes the pointer and size from
/// MemoryLocation, as BasicAA is AATags independent. Additionally, it includes
/// the value of MayBeCrossIteration, which may affect BasicAA results.
struct AACacheLoc {
  using PtrTy = PointerIntPair<const Value *, 1, bool>;
  PtrTy Ptr;
  LocationSize Size;

  AACacheLoc(PtrTy Ptr, LocationSize Size) : Ptr(Ptr), Size(Size) {}
  AACacheLoc(const Value *Ptr, LocationSize Size, bool MayBeCrossIteration)
      : Ptr(Ptr, MayBeCrossIteration), Size(Size) {}
};

template <> struct DenseMapInfo<AACacheLoc> {
  static inline AACacheLoc getEmptyKey() {
    return {DenseMapInfo<AACacheLoc::PtrTy>::getEmptyKey(),
            DenseMapInfo<LocationSize>::getEmptyKey()};
  }
  static inline AACacheLoc getTombstoneKey() {
    return {DenseMapInfo<AACacheLoc::PtrTy>::getTombstoneKey(),
            DenseMapInfo<LocationSize>::getTombstoneKey()};
  }
  static unsigned getHashValue(const AACacheLoc &Val) {
    return DenseMapInfo<AACacheLoc::PtrTy>::getHashValue(Val.Ptr) ^
           DenseMapInfo<LocationSize>::getHashValue(Val.Size);
  }
  static bool isEqual(const AACacheLoc &LHS, const AACacheLoc &RHS) {
    return LHS.Ptr == RHS.Ptr && LHS.Size == RHS.Size;
  }
};

class AAResults;

/// This class stores info we want to provide to or retain within an alias
/// query. By default, the root query is stateless and starts with a freshly
/// constructed info object. Specific alias analyses can use this query info to
/// store per-query state that is important for recursive or nested queries to
/// avoid recomputing. To enable preserving this state across multiple queries
/// where safe (due to the IR not changing), use a `BatchAAResults` wrapper.
/// The information stored in an `AAQueryInfo` is currently limitted to the
/// caches used by BasicAA, but can further be extended to fit other AA needs.
class AAQueryInfo {
public:
  using LocPair = std::pair<AACacheLoc, AACacheLoc>;
  struct CacheEntry {
    AliasResult Result;
    /// Number of times a NoAlias assumption has been used.
    /// 0 for assumptions that have not been used, -1 for definitive results.
    int NumAssumptionUses;
    /// Whether this is a definitive (non-assumption) result.
    bool isDefinitive() const { return NumAssumptionUses < 0; }
  };

  // Alias analysis result aggregration using which this query is performed.
  // Can be used to perform recursive queries.
  AAResults &AAR;

  using AliasCacheT = SmallDenseMap<LocPair, CacheEntry, 8>;
  AliasCacheT AliasCache;

  CaptureInfo *CI;

  /// Query depth used to distinguish recursive queries.
  unsigned Depth = 0;

  /// How many active NoAlias assumption uses there are.
  int NumAssumptionUses = 0;

  /// Location pairs for which an assumption based result is currently stored.
  /// Used to remove all potentially incorrect results from the cache if an
  /// assumption is disproven.
  SmallVector<AAQueryInfo::LocPair, 4> AssumptionBasedResults;

  /// Tracks whether the accesses may be on different cycle iterations.
  ///
  /// When interpret "Value" pointer equality as value equality we need to make
  /// sure that the "Value" is not part of a cycle. Otherwise, two uses could
  /// come from different "iterations" of a cycle and see different values for
  /// the same "Value" pointer.
  ///
  /// The following example shows the problem:
  ///   %p = phi(%alloca1, %addr2)
  ///   %l = load %ptr
  ///   %addr1 = gep, %alloca2, 0, %l
  ///   %addr2 = gep  %alloca2, 0, (%l + 1)
  ///      alias(%p, %addr1) -> MayAlias !
  ///   store %l, ...
  bool MayBeCrossIteration = false;

  AAQueryInfo(AAResults &AAR, CaptureInfo *CI) : AAR(AAR), CI(CI) {}
};

/// AAQueryInfo that uses SimpleCaptureInfo.
class SimpleAAQueryInfo : public AAQueryInfo {
  SimpleCaptureInfo CI;

public:
  SimpleAAQueryInfo(AAResults &AAR) : AAQueryInfo(AAR, &CI) {}
};

class BatchAAResults;

class AAResults {
public:
  // Make these results default constructable and movable. We have to spell
  // these out because MSVC won't synthesize them.
  AAResults(const TargetLibraryInfo &TLI) : TLI(TLI) {}
  AAResults(AAResults &&Arg);
  ~AAResults();

  /// Register a specific AA result.
  template <typename AAResultT> void addAAResult(AAResultT &AAResult) {
    // FIXME: We should use a much lighter weight system than the usual
    // polymorphic pattern because we don't own AAResult. It should
    // ideally involve two pointers and no separate allocation.
    AAs.emplace_back(new Model<AAResultT>(AAResult, *this));
  }

  /// Register a function analysis ID that the results aggregation depends on.
  ///
  /// This is used in the new pass manager to implement the invalidation logic
  /// where we must invalidate the results aggregation if any of our component
  /// analyses become invalid.
  void addAADependencyID(AnalysisKey *ID) { AADeps.push_back(ID); }

  /// Handle invalidation events in the new pass manager.
  ///
  /// The aggregation is invalidated if any of the underlying analyses is
  /// invalidated.
  bool invalidate(Function &F, const PreservedAnalyses &PA,
                  FunctionAnalysisManager::Invalidator &Inv);

  //===--------------------------------------------------------------------===//
  /// \name Alias Queries
  /// @{

  /// The main low level interface to the alias analysis implementation.
  /// Returns an AliasResult indicating whether the two pointers are aliased to
  /// each other. This is the interface that must be implemented by specific
  /// alias analysis implementations.
  AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB);

  /// A convenience wrapper around the primary \c alias interface.
  AliasResult alias(const Value *V1, LocationSize V1Size, const Value *V2,
                    LocationSize V2Size) {
    return alias(MemoryLocation(V1, V1Size), MemoryLocation(V2, V2Size));
  }

  /// A convenience wrapper around the primary \c alias interface.
  AliasResult alias(const Value *V1, const Value *V2) {
    return alias(MemoryLocation::getBeforeOrAfter(V1),
                 MemoryLocation::getBeforeOrAfter(V2));
  }

  /// A trivial helper function to check to see if the specified pointers are
  /// no-alias.
  bool isNoAlias(const MemoryLocation &LocA, const MemoryLocation &LocB) {
    return alias(LocA, LocB) == AliasResult::NoAlias;
  }

  /// A convenience wrapper around the \c isNoAlias helper interface.
  bool isNoAlias(const Value *V1, LocationSize V1Size, const Value *V2,
                 LocationSize V2Size) {
    return isNoAlias(MemoryLocation(V1, V1Size), MemoryLocation(V2, V2Size));
  }

  /// A convenience wrapper around the \c isNoAlias helper interface.
  bool isNoAlias(const Value *V1, const Value *V2) {
    return isNoAlias(MemoryLocation::getBeforeOrAfter(V1),
                     MemoryLocation::getBeforeOrAfter(V2));
  }

  /// A trivial helper function to check to see if the specified pointers are
  /// must-alias.
  bool isMustAlias(const MemoryLocation &LocA, const MemoryLocation &LocB) {
    return alias(LocA, LocB) == AliasResult::MustAlias;
  }

  /// A convenience wrapper around the \c isMustAlias helper interface.
  bool isMustAlias(const Value *V1, const Value *V2) {
    return alias(V1, LocationSize::precise(1), V2, LocationSize::precise(1)) ==
           AliasResult::MustAlias;
  }

  /// Checks whether the given location points to constant memory, or if
  /// \p OrLocal is true whether it points to a local alloca.
  bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal = false) {
    return isNoModRef(getModRefInfoMask(Loc, OrLocal));
  }

  /// A convenience wrapper around the primary \c pointsToConstantMemory
  /// interface.
  bool pointsToConstantMemory(const Value *P, bool OrLocal = false) {
    return pointsToConstantMemory(MemoryLocation::getBeforeOrAfter(P), OrLocal);
  }

  /// @}
  //===--------------------------------------------------------------------===//
  /// \name Simple mod/ref information
  /// @{

  /// Returns a bitmask that should be unconditionally applied to the ModRef
  /// info of a memory location. This allows us to eliminate Mod and/or Ref
  /// from the ModRef info based on the knowledge that the memory location
  /// points to constant and/or locally-invariant memory.
  ///
  /// If IgnoreLocals is true, then this method returns NoModRef for memory
  /// that points to a local alloca.
  ModRefInfo getModRefInfoMask(const MemoryLocation &Loc,
                               bool IgnoreLocals = false);

  /// A convenience wrapper around the primary \c getModRefInfoMask
  /// interface.
  ModRefInfo getModRefInfoMask(const Value *P, bool IgnoreLocals = false) {
    return getModRefInfoMask(MemoryLocation::getBeforeOrAfter(P), IgnoreLocals);
  }

  /// Get the ModRef info associated with a pointer argument of a call. The
  /// result's bits are set to indicate the allowed aliasing ModRef kinds. Note
  /// that these bits do not necessarily account for the overall behavior of
  /// the function, but rather only provide additional per-argument
  /// information.
  ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx);

  /// Return the behavior of the given call site.
  MemoryEffects getMemoryEffects(const CallBase *Call);

  /// Return the behavior when calling the given function.
  MemoryEffects getMemoryEffects(const Function *F);

  /// Checks if the specified call is known to never read or write memory.
  ///
  /// Note that if the call only reads from known-constant memory, it is also
  /// legal to return true. Also, calls that unwind the stack are legal for
  /// this predicate.
  ///
  /// Many optimizations (such as CSE and LICM) can be performed on such calls
  /// without worrying about aliasing properties, and many calls have this
  /// property (e.g. calls to 'sin' and 'cos').
  ///
  /// This property corresponds to the GCC 'const' attribute.
  bool doesNotAccessMemory(const CallBase *Call) {
    return getMemoryEffects(Call).doesNotAccessMemory();
  }

  /// Checks if the specified function is known to never read or write memory.
  ///
  /// Note that if the function only reads from known-constant memory, it is
  /// also legal to return true. Also, function that unwind the stack are legal
  /// for this predicate.
  ///
  /// Many optimizations (such as CSE and LICM) can be performed on such calls
  /// to such functions without worrying about aliasing properties, and many
  /// functions have this property (e.g. 'sin' and 'cos').
  ///
  /// This property corresponds to the GCC 'const' attribute.
  bool doesNotAccessMemory(const Function *F) {
    return getMemoryEffects(F).doesNotAccessMemory();
  }

  /// Checks if the specified call is known to only read from non-volatile
  /// memory (or not access memory at all).
  ///
  /// Calls that unwind the stack are legal for this predicate.
  ///
  /// This property allows many common optimizations to be performed in the
  /// absence of interfering store instructions, such as CSE of strlen calls.
  ///
  /// This property corresponds to the GCC 'pure' attribute.
  bool onlyReadsMemory(const CallBase *Call) {
    return getMemoryEffects(Call).onlyReadsMemory();
  }

  /// Checks if the specified function is known to only read from non-volatile
  /// memory (or not access memory at all).
  ///
  /// Functions that unwind the stack are legal for this predicate.
  ///
  /// This property allows many common optimizations to be performed in the
  /// absence of interfering store instructions, such as CSE of strlen calls.
  ///
  /// This property corresponds to the GCC 'pure' attribute.
  bool onlyReadsMemory(const Function *F) {
    return getMemoryEffects(F).onlyReadsMemory();
  }

  /// Check whether or not an instruction may read or write the optionally
  /// specified memory location.
  ///
  ///
  /// An instruction that doesn't read or write memory may be trivially LICM'd
  /// for example.
  ///
  /// For function calls, this delegates to the alias-analysis specific
  /// call-site mod-ref behavior queries. Otherwise it delegates to the specific
  /// helpers above.
  ModRefInfo getModRefInfo(const Instruction *I,
                           const std::optional<MemoryLocation> &OptLoc) {
    SimpleAAQueryInfo AAQIP(*this);
    return getModRefInfo(I, OptLoc, AAQIP);
  }

  /// A convenience wrapper for constructing the memory location.
  ModRefInfo getModRefInfo(const Instruction *I, const Value *P,
                           LocationSize Size) {
    return getModRefInfo(I, MemoryLocation(P, Size));
  }

  /// Return information about whether a call and an instruction may refer to
  /// the same memory locations.
  ModRefInfo getModRefInfo(const Instruction *I, const CallBase *Call);

  /// Return information about whether a particular call site modifies
  /// or reads the specified memory location \p MemLoc before instruction \p I
  /// in a BasicBlock.
  ModRefInfo callCapturesBefore(const Instruction *I,
                                const MemoryLocation &MemLoc,
                                DominatorTree *DT) {
    SimpleAAQueryInfo AAQIP(*this);
    return callCapturesBefore(I, MemLoc, DT, AAQIP);
  }

  /// A convenience wrapper to synthesize a memory location.
  ModRefInfo callCapturesBefore(const Instruction *I, const Value *P,
                                LocationSize Size, DominatorTree *DT) {
    return callCapturesBefore(I, MemoryLocation(P, Size), DT);
  }

  /// @}
  //===--------------------------------------------------------------------===//
  /// \name Higher level methods for querying mod/ref information.
  /// @{

  /// Check if it is possible for execution of the specified basic block to
  /// modify the location Loc.
  bool canBasicBlockModify(const BasicBlock &BB, const MemoryLocation &Loc);

  /// A convenience wrapper synthesizing a memory location.
  bool canBasicBlockModify(const BasicBlock &BB, const Value *P,
                           LocationSize Size) {
    return canBasicBlockModify(BB, MemoryLocation(P, Size));
  }

  /// Check if it is possible for the execution of the specified instructions
  /// to mod\ref (according to the mode) the location Loc.
  ///
  /// The instructions to consider are all of the instructions in the range of
  /// [I1,I2] INCLUSIVE. I1 and I2 must be in the same basic block.
  bool canInstructionRangeModRef(const Instruction &I1, const Instruction &I2,
                                 const MemoryLocation &Loc,
                                 const ModRefInfo Mode);

  /// A convenience wrapper synthesizing a memory location.
  bool canInstructionRangeModRef(const Instruction &I1, const Instruction &I2,
                                 const Value *Ptr, LocationSize Size,
                                 const ModRefInfo Mode) {
    return canInstructionRangeModRef(I1, I2, MemoryLocation(Ptr, Size), Mode);
  }

  // CtxI can be nullptr, in which case the query is whether or not the aliasing
  // relationship holds through the entire function.
  AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB,
                    AAQueryInfo &AAQI, const Instruction *CtxI = nullptr);

  bool pointsToConstantMemory(const MemoryLocation &Loc, AAQueryInfo &AAQI,
                              bool OrLocal = false);
  ModRefInfo getModRefInfoMask(const MemoryLocation &Loc, AAQueryInfo &AAQI,
                               bool IgnoreLocals = false);
  ModRefInfo getModRefInfo(const Instruction *I, const CallBase *Call2,
                           AAQueryInfo &AAQIP);
  ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc,
                           AAQueryInfo &AAQI);
  ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2,
                           AAQueryInfo &AAQI);
  ModRefInfo getModRefInfo(const VAArgInst *V, const MemoryLocation &Loc,
                           AAQueryInfo &AAQI);
  ModRefInfo getModRefInfo(const LoadInst *L, const MemoryLocation &Loc,
                           AAQueryInfo &AAQI);
  ModRefInfo getModRefInfo(const StoreInst *S, const MemoryLocation &Loc,
                           AAQueryInfo &AAQI);
  ModRefInfo getModRefInfo(const FenceInst *S, const MemoryLocation &Loc,
                           AAQueryInfo &AAQI);
  ModRefInfo getModRefInfo(const AtomicCmpXchgInst *CX,
                           const MemoryLocation &Loc, AAQueryInfo &AAQI);
  ModRefInfo getModRefInfo(const AtomicRMWInst *RMW, const MemoryLocation &Loc,
                           AAQueryInfo &AAQI);
  ModRefInfo getModRefInfo(const CatchPadInst *I, const MemoryLocation &Loc,
                           AAQueryInfo &AAQI);
  ModRefInfo getModRefInfo(const CatchReturnInst *I, const MemoryLocation &Loc,
                           AAQueryInfo &AAQI);
  ModRefInfo getModRefInfo(const Instruction *I,
                           const std::optional<MemoryLocation> &OptLoc,
                           AAQueryInfo &AAQIP);
  ModRefInfo callCapturesBefore(const Instruction *I,
                                const MemoryLocation &MemLoc, DominatorTree *DT,
                                AAQueryInfo &AAQIP);
  MemoryEffects getMemoryEffects(const CallBase *Call, AAQueryInfo &AAQI);

private:
  class Concept;

  template <typename T> class Model;

  friend class AAResultBase;

  const TargetLibraryInfo &TLI;

  std::vector<std::unique_ptr<Concept>> AAs;

  std::vector<AnalysisKey *> AADeps;

  friend class BatchAAResults;
};

/// This class is a wrapper over an AAResults, and it is intended to be used
/// only when there are no IR changes inbetween queries. BatchAAResults is
/// reusing the same `AAQueryInfo` to preserve the state across queries,
/// esentially making AA work in "batch mode". The internal state cannot be
/// cleared, so to go "out-of-batch-mode", the user must either use AAResults,
/// or create a new BatchAAResults.
class BatchAAResults {
  AAResults &AA;
  AAQueryInfo AAQI;
  SimpleCaptureInfo SimpleCI;

public:
  BatchAAResults(AAResults &AAR) : AA(AAR), AAQI(AAR, &SimpleCI) {}
  BatchAAResults(AAResults &AAR, CaptureInfo *CI) : AA(AAR), AAQI(AAR, CI) {}

  AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB) {
    return AA.alias(LocA, LocB, AAQI);
  }
  bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal = false) {
    return AA.pointsToConstantMemory(Loc, AAQI, OrLocal);
  }
  ModRefInfo getModRefInfoMask(const MemoryLocation &Loc,
                               bool IgnoreLocals = false) {
    return AA.getModRefInfoMask(Loc, AAQI, IgnoreLocals);
  }
  ModRefInfo getModRefInfo(const Instruction *I,
                           const std::optional<MemoryLocation> &OptLoc) {
    return AA.getModRefInfo(I, OptLoc, AAQI);
  }
  ModRefInfo getModRefInfo(const Instruction *I, const CallBase *Call2) {
    return AA.getModRefInfo(I, Call2, AAQI);
  }
  ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx) {
    return AA.getArgModRefInfo(Call, ArgIdx);
  }
  MemoryEffects getMemoryEffects(const CallBase *Call) {
    return AA.getMemoryEffects(Call, AAQI);
  }
  bool isMustAlias(const MemoryLocation &LocA, const MemoryLocation &LocB) {
    return alias(LocA, LocB) == AliasResult::MustAlias;
  }
  bool isMustAlias(const Value *V1, const Value *V2) {
    return alias(MemoryLocation(V1, LocationSize::precise(1)),
                 MemoryLocation(V2, LocationSize::precise(1))) ==
           AliasResult::MustAlias;
  }
  ModRefInfo callCapturesBefore(const Instruction *I,
                                const MemoryLocation &MemLoc,
                                DominatorTree *DT) {
    return AA.callCapturesBefore(I, MemLoc, DT, AAQI);
  }

  /// Assume that values may come from different cycle iterations.
  void enableCrossIterationMode() {
    AAQI.MayBeCrossIteration = true;
  }
};

/// Temporary typedef for legacy code that uses a generic \c AliasAnalysis
/// pointer or reference.
using AliasAnalysis = AAResults;

/// A private abstract base class describing the concept of an individual alias
/// analysis implementation.
///
/// This interface is implemented by any \c Model instantiation. It is also the
/// interface which a type used to instantiate the model must provide.
///
/// All of these methods model methods by the same name in the \c
/// AAResults class. Only differences and specifics to how the
/// implementations are called are documented here.
class AAResults::Concept {
public:
  virtual ~Concept() = 0;

  //===--------------------------------------------------------------------===//
  /// \name Alias Queries
  /// @{

  /// The main low level interface to the alias analysis implementation.
  /// Returns an AliasResult indicating whether the two pointers are aliased to
  /// each other. This is the interface that must be implemented by specific
  /// alias analysis implementations.
  virtual AliasResult alias(const MemoryLocation &LocA,
                            const MemoryLocation &LocB, AAQueryInfo &AAQI,
                            const Instruction *CtxI) = 0;

  /// @}
  //===--------------------------------------------------------------------===//
  /// \name Simple mod/ref information
  /// @{

  /// Returns a bitmask that should be unconditionally applied to the ModRef
  /// info of a memory location. This allows us to eliminate Mod and/or Ref from
  /// the ModRef info based on the knowledge that the memory location points to
  /// constant and/or locally-invariant memory.
  virtual ModRefInfo getModRefInfoMask(const MemoryLocation &Loc,
                                       AAQueryInfo &AAQI,
                                       bool IgnoreLocals) = 0;

  /// Get the ModRef info associated with a pointer argument of a callsite. The
  /// result's bits are set to indicate the allowed aliasing ModRef kinds. Note
  /// that these bits do not necessarily account for the overall behavior of
  /// the function, but rather only provide additional per-argument
  /// information.
  virtual ModRefInfo getArgModRefInfo(const CallBase *Call,
                                      unsigned ArgIdx) = 0;

  /// Return the behavior of the given call site.
  virtual MemoryEffects getMemoryEffects(const CallBase *Call,
                                         AAQueryInfo &AAQI) = 0;

  /// Return the behavior when calling the given function.
  virtual MemoryEffects getMemoryEffects(const Function *F) = 0;

  /// getModRefInfo (for call sites) - Return information about whether
  /// a particular call site modifies or reads the specified memory location.
  virtual ModRefInfo getModRefInfo(const CallBase *Call,
                                   const MemoryLocation &Loc,
                                   AAQueryInfo &AAQI) = 0;

  /// Return information about whether two call sites may refer to the same set
  /// of memory locations. See the AA documentation for details:
  ///   http://llvm.org/docs/AliasAnalysis.html#ModRefInfo
  virtual ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2,
                                   AAQueryInfo &AAQI) = 0;

  /// @}
};

/// A private class template which derives from \c Concept and wraps some other
/// type.
///
/// This models the concept by directly forwarding each interface point to the
/// wrapped type which must implement a compatible interface. This provides
/// a type erased binding.
template <typename AAResultT> class AAResults::Model final : public Concept {
  AAResultT &Result;

public:
  explicit Model(AAResultT &Result, AAResults &AAR) : Result(Result) {}
  ~Model() override = default;

  AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB,
                    AAQueryInfo &AAQI, const Instruction *CtxI) override {
    return Result.alias(LocA, LocB, AAQI, CtxI);
  }

  ModRefInfo getModRefInfoMask(const MemoryLocation &Loc, AAQueryInfo &AAQI,
                               bool IgnoreLocals) override {
    return Result.getModRefInfoMask(Loc, AAQI, IgnoreLocals);
  }

  ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx) override {
    return Result.getArgModRefInfo(Call, ArgIdx);
  }

  MemoryEffects getMemoryEffects(const CallBase *Call,
                                 AAQueryInfo &AAQI) override {
    return Result.getMemoryEffects(Call, AAQI);
  }

  MemoryEffects getMemoryEffects(const Function *F) override {
    return Result.getMemoryEffects(F);
  }

  ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc,
                           AAQueryInfo &AAQI) override {
    return Result.getModRefInfo(Call, Loc, AAQI);
  }

  ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2,
                           AAQueryInfo &AAQI) override {
    return Result.getModRefInfo(Call1, Call2, AAQI);
  }
};

/// A base class to help implement the function alias analysis results concept.
///
/// Because of the nature of many alias analysis implementations, they often
/// only implement a subset of the interface. This base class will attempt to
/// implement the remaining portions of the interface in terms of simpler forms
/// of the interface where possible, and otherwise provide conservatively
/// correct fallback implementations.
///
/// Implementors of an alias analysis should derive from this class, and then
/// override specific methods that they wish to customize. There is no need to
/// use virtual anywhere.
class AAResultBase {
protected:
  explicit AAResultBase() = default;

  // Provide all the copy and move constructors so that derived types aren't
  // constrained.
  AAResultBase(const AAResultBase &Arg) {}
  AAResultBase(AAResultBase &&Arg) {}

public:
  AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB,
                    AAQueryInfo &AAQI, const Instruction *I) {
    return AliasResult::MayAlias;
  }

  ModRefInfo getModRefInfoMask(const MemoryLocation &Loc, AAQueryInfo &AAQI,
                               bool IgnoreLocals) {
    return ModRefInfo::ModRef;
  }

  ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx) {
    return ModRefInfo::ModRef;
  }

  MemoryEffects getMemoryEffects(const CallBase *Call, AAQueryInfo &AAQI) {
    return MemoryEffects::unknown();
  }

  MemoryEffects getMemoryEffects(const Function *F) {
    return MemoryEffects::unknown();
  }

  ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc,
                           AAQueryInfo &AAQI) {
    return ModRefInfo::ModRef;
  }

  ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2,
                           AAQueryInfo &AAQI) {
    return ModRefInfo::ModRef;
  }
};

/// Return true if this pointer is returned by a noalias function.
bool isNoAliasCall(const Value *V);

/// Return true if this pointer refers to a distinct and identifiable object.
/// This returns true for:
///    Global Variables and Functions (but not Global Aliases)
///    Allocas
///    ByVal and NoAlias Arguments
///    NoAlias returns (e.g. calls to malloc)
///
bool isIdentifiedObject(const Value *V);

/// Return true if V is umabigously identified at the function-level.
/// Different IdentifiedFunctionLocals can't alias.
/// Further, an IdentifiedFunctionLocal can not alias with any function
/// arguments other than itself, which is not necessarily true for
/// IdentifiedObjects.
bool isIdentifiedFunctionLocal(const Value *V);

/// Returns true if the pointer is one which would have been considered an
/// escape by isNonEscapingLocalObject.
bool isEscapeSource(const Value *V);

/// Return true if Object memory is not visible after an unwind, in the sense
/// that program semantics cannot depend on Object containing any particular
/// value on unwind. If the RequiresNoCaptureBeforeUnwind out parameter is set
/// to true, then the memory is only not visible if the object has not been
/// captured prior to the unwind. Otherwise it is not visible even if captured.
bool isNotVisibleOnUnwind(const Value *Object,
                          bool &RequiresNoCaptureBeforeUnwind);

/// A manager for alias analyses.
///
/// This class can have analyses registered with it and when run, it will run
/// all of them and aggregate their results into single AA results interface
/// that dispatches across all of the alias analysis results available.
///
/// Note that the order in which analyses are registered is very significant.
/// That is the order in which the results will be aggregated and queried.
///
/// This manager effectively wraps the AnalysisManager for registering alias
/// analyses. When you register your alias analysis with this manager, it will
/// ensure the analysis itself is registered with its AnalysisManager.
///
/// The result of this analysis is only invalidated if one of the particular
/// aggregated AA results end up being invalidated. This removes the need to
/// explicitly preserve the results of `AAManager`. Note that analyses should no
/// longer be registered once the `AAManager` is run.
class AAManager : public AnalysisInfoMixin<AAManager> {
public:
  using Result = AAResults;

  /// Register a specific AA result.
  template <typename AnalysisT> void registerFunctionAnalysis() {
    ResultGetters.push_back(&getFunctionAAResultImpl<AnalysisT>);
  }

  /// Register a specific AA result.
  template <typename AnalysisT> void registerModuleAnalysis() {
    ResultGetters.push_back(&getModuleAAResultImpl<AnalysisT>);
  }

  Result run(Function &F, FunctionAnalysisManager &AM);

private:
  friend AnalysisInfoMixin<AAManager>;

  static AnalysisKey Key;

  SmallVector<void (*)(Function &F, FunctionAnalysisManager &AM,
                       AAResults &AAResults),
              4> ResultGetters;

  template <typename AnalysisT>
  static void getFunctionAAResultImpl(Function &F,
                                      FunctionAnalysisManager &AM,
                                      AAResults &AAResults) {
    AAResults.addAAResult(AM.template getResult<AnalysisT>(F));
    AAResults.addAADependencyID(AnalysisT::ID());
  }

  template <typename AnalysisT>
  static void getModuleAAResultImpl(Function &F, FunctionAnalysisManager &AM,
                                    AAResults &AAResults) {
    auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
    if (auto *R =
            MAMProxy.template getCachedResult<AnalysisT>(*F.getParent())) {
      AAResults.addAAResult(*R);
      MAMProxy
          .template registerOuterAnalysisInvalidation<AnalysisT, AAManager>();
    }
  }
};

/// A wrapper pass to provide the legacy pass manager access to a suitably
/// prepared AAResults object.
class AAResultsWrapperPass : public FunctionPass {
  std::unique_ptr<AAResults> AAR;

public:
  static char ID;

  AAResultsWrapperPass();

  AAResults &getAAResults() { return *AAR; }
  const AAResults &getAAResults() const { return *AAR; }

  bool runOnFunction(Function &F) override;

  void getAnalysisUsage(AnalysisUsage &AU) const override;
};

/// A wrapper pass for external alias analyses. This just squirrels away the
/// callback used to run any analyses and register their results.
struct ExternalAAWrapperPass : ImmutablePass {
  using CallbackT = std::function<void(Pass &, Function &, AAResults &)>;

  CallbackT CB;

  static char ID;

  ExternalAAWrapperPass();

  explicit ExternalAAWrapperPass(CallbackT CB);

  void getAnalysisUsage(AnalysisUsage &AU) const override {
    AU.setPreservesAll();
  }
};

/// A wrapper pass around a callback which can be used to populate the
/// AAResults in the AAResultsWrapperPass from an external AA.
///
/// The callback provided here will be used each time we prepare an AAResults
/// object, and will receive a reference to the function wrapper pass, the
/// function, and the AAResults object to populate. This should be used when
/// setting up a custom pass pipeline to inject a hook into the AA results.
ImmutablePass *createExternalAAWrapperPass(
    std::function<void(Pass &, Function &, AAResults &)> Callback);

} // end namespace llvm

#endif // LLVM_ANALYSIS_ALIASANALYSIS_H
PKiwFZ�qѽ�J�JAnalysis/CallGraph.hnu�[���//===- CallGraph.h - Build a Module's call graph ----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This file provides interfaces used to build and manipulate a call graph,
/// which is a very useful tool for interprocedural optimization.
///
/// Every function in a module is represented as a node in the call graph.  The
/// callgraph node keeps track of which functions are called by the function
/// corresponding to the node.
///
/// A call graph may contain nodes where the function that they correspond to
/// is null.  These 'external' nodes are used to represent control flow that is
/// not represented (or analyzable) in the module.  In particular, this
/// analysis builds one external node such that:
///   1. All functions in the module without internal linkage will have edges
///      from this external node, indicating that they could be called by
///      functions outside of the module.
///   2. All functions whose address is used for something more than a direct
///      call, for example being stored into a memory location will also have
///      an edge from this external node.  Since they may be called by an
///      unknown caller later, they must be tracked as such.
///
/// There is a second external node added for calls that leave this module.
/// Functions have a call edge to the external node iff:
///   1. The function is external, reflecting the fact that they could call
///      anything without internal linkage or that has its address taken.
///   2. The function contains an indirect function call.
///
/// As an extension in the future, there may be multiple nodes with a null
/// function.  These will be used when we can prove (through pointer analysis)
/// that an indirect call site can call only a specific set of functions.
///
/// Because of these properties, the CallGraph captures a conservative superset
/// of all of the caller-callee relationships, which is useful for
/// transformations.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_CALLGRAPH_H
#define LLVM_ANALYSIS_CALLGRAPH_H

#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/PassManager.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/Pass.h"
#include <cassert>
#include <map>
#include <memory>
#include <utility>
#include <vector>

namespace llvm {

template <class GraphType> struct GraphTraits;
class CallGraphNode;
class Function;
class Module;
class raw_ostream;

/// The basic data container for the call graph of a \c Module of IR.
///
/// This class exposes both the interface to the call graph for a module of IR.
///
/// The core call graph itself can also be updated to reflect changes to the IR.
class CallGraph {
  Module &M;

  using FunctionMapTy =
      std::map<const Function *, std::unique_ptr<CallGraphNode>>;

  /// A map from \c Function* to \c CallGraphNode*.
  FunctionMapTy FunctionMap;

  /// This node has edges to all external functions and those internal
  /// functions that have their address taken.
  CallGraphNode *ExternalCallingNode;

  /// This node has edges to it from all functions making indirect calls
  /// or calling an external function.
  std::unique_ptr<CallGraphNode> CallsExternalNode;

public:
  explicit CallGraph(Module &M);
  CallGraph(CallGraph &&Arg);
  ~CallGraph();

  void print(raw_ostream &OS) const;
  void dump() const;

  using iterator = FunctionMapTy::iterator;
  using const_iterator = FunctionMapTy::const_iterator;

  /// Returns the module the call graph corresponds to.
  Module &getModule() const { return M; }

  bool invalidate(Module &, const PreservedAnalyses &PA,
                  ModuleAnalysisManager::Invalidator &);

  inline iterator begin() { return FunctionMap.begin(); }
  inline iterator end() { return FunctionMap.end(); }
  inline const_iterator begin() const { return FunctionMap.begin(); }
  inline const_iterator end() const { return FunctionMap.end(); }

  /// Returns the call graph node for the provided function.
  inline const CallGraphNode *operator[](const Function *F) const {
    const_iterator I = FunctionMap.find(F);
    assert(I != FunctionMap.end() && "Function not in callgraph!");
    return I->second.get();
  }

  /// Returns the call graph node for the provided function.
  inline CallGraphNode *operator[](const Function *F) {
    const_iterator I = FunctionMap.find(F);
    assert(I != FunctionMap.end() && "Function not in callgraph!");
    return I->second.get();
  }

  /// Returns the \c CallGraphNode which is used to represent
  /// undetermined calls into the callgraph.
  CallGraphNode *getExternalCallingNode() const { return ExternalCallingNode; }

  CallGraphNode *getCallsExternalNode() const {
    return CallsExternalNode.get();
  }

  /// Old node has been deleted, and New is to be used in its place, update the
  /// ExternalCallingNode.
  void ReplaceExternalCallEdge(CallGraphNode *Old, CallGraphNode *New);

  //===---------------------------------------------------------------------
  // Functions to keep a call graph up to date with a function that has been
  // modified.
  //

  /// Unlink the function from this module, returning it.
  ///
  /// Because this removes the function from the module, the call graph node is
  /// destroyed.  This is only valid if the function does not call any other
  /// functions (ie, there are no edges in it's CGN).  The easiest way to do
  /// this is to dropAllReferences before calling this.
  Function *removeFunctionFromModule(CallGraphNode *CGN);

  /// Similar to operator[], but this will insert a new CallGraphNode for
  /// \c F if one does not already exist.
  CallGraphNode *getOrInsertFunction(const Function *F);

  /// Populate \p CGN based on the calls inside the associated function.
  void populateCallGraphNode(CallGraphNode *CGN);

  /// Add a function to the call graph, and link the node to all of the
  /// functions that it calls.
  void addToCallGraph(Function *F);
};

/// A node in the call graph for a module.
///
/// Typically represents a function in the call graph. There are also special
/// "null" nodes used to represent theoretical entries in the call graph.
class CallGraphNode {
public:
  /// A pair of the calling instruction (a call or invoke)
  /// and the call graph node being called.
  /// Call graph node may have two types of call records which represent an edge
  /// in the call graph - reference or a call edge. Reference edges are not
  /// associated with any call instruction and are created with the first field
  /// set to `None`, while real call edges have instruction address in this
  /// field. Therefore, all real call edges are expected to have a value in the
  /// first field and it is not supposed to be `nullptr`.
  /// Reference edges, for example, are used for connecting broker function
  /// caller to the callback function for callback call sites.
  using CallRecord = std::pair<std::optional<WeakTrackingVH>, CallGraphNode *>;

public:
  using CalledFunctionsVector = std::vector<CallRecord>;

  /// Creates a node for the specified function.
  inline CallGraphNode(CallGraph *CG, Function *F) : CG(CG), F(F) {}

  CallGraphNode(const CallGraphNode &) = delete;
  CallGraphNode &operator=(const CallGraphNode &) = delete;

  ~CallGraphNode() {
    assert(NumReferences == 0 && "Node deleted while references remain");
  }

  using iterator = std::vector<CallRecord>::iterator;
  using const_iterator = std::vector<CallRecord>::const_iterator;

  /// Returns the function that this call graph node represents.
  Function *getFunction() const { return F; }

  inline iterator begin() { return CalledFunctions.begin(); }
  inline iterator end() { return CalledFunctions.end(); }
  inline const_iterator begin() const { return CalledFunctions.begin(); }
  inline const_iterator end() const { return CalledFunctions.end(); }
  inline bool empty() const { return CalledFunctions.empty(); }
  inline unsigned size() const { return (unsigned)CalledFunctions.size(); }

  /// Returns the number of other CallGraphNodes in this CallGraph that
  /// reference this node in their callee list.
  unsigned getNumReferences() const { return NumReferences; }

  /// Returns the i'th called function.
  CallGraphNode *operator[](unsigned i) const {
    assert(i < CalledFunctions.size() && "Invalid index");
    return CalledFunctions[i].second;
  }

  /// Print out this call graph node.
  void dump() const;
  void print(raw_ostream &OS) const;

  //===---------------------------------------------------------------------
  // Methods to keep a call graph up to date with a function that has been
  // modified
  //

  /// Removes all edges from this CallGraphNode to any functions it
  /// calls.
  void removeAllCalledFunctions() {
    while (!CalledFunctions.empty()) {
      CalledFunctions.back().second->DropRef();
      CalledFunctions.pop_back();
    }
  }

  /// Moves all the callee information from N to this node.
  void stealCalledFunctionsFrom(CallGraphNode *N) {
    assert(CalledFunctions.empty() &&
           "Cannot steal callsite information if I already have some");
    std::swap(CalledFunctions, N->CalledFunctions);
  }

  /// Adds a function to the list of functions called by this one.
  void addCalledFunction(CallBase *Call, CallGraphNode *M) {
    CalledFunctions.emplace_back(Call ? std::optional<WeakTrackingVH>(Call)
                                      : std::optional<WeakTrackingVH>(),
                                 M);
    M->AddRef();
  }

  void removeCallEdge(iterator I) {
    I->second->DropRef();
    *I = CalledFunctions.back();
    CalledFunctions.pop_back();
  }

  /// Removes the edge in the node for the specified call site.
  ///
  /// Note that this method takes linear time, so it should be used sparingly.
  void removeCallEdgeFor(CallBase &Call);

  /// Removes all call edges from this node to the specified callee
  /// function.
  ///
  /// This takes more time to execute than removeCallEdgeTo, so it should not
  /// be used unless necessary.
  void removeAnyCallEdgeTo(CallGraphNode *Callee);

  /// Removes one edge associated with a null callsite from this node to
  /// the specified callee function.
  void removeOneAbstractEdgeTo(CallGraphNode *Callee);

  /// Replaces the edge in the node for the specified call site with a
  /// new one.
  ///
  /// Note that this method takes linear time, so it should be used sparingly.
  void replaceCallEdge(CallBase &Call, CallBase &NewCall,
                       CallGraphNode *NewNode);

private:
  friend class CallGraph;

  CallGraph *CG;
  Function *F;

  std::vector<CallRecord> CalledFunctions;

  /// The number of times that this CallGraphNode occurs in the
  /// CalledFunctions array of this or other CallGraphNodes.
  unsigned NumReferences = 0;

  void DropRef() { --NumReferences; }
  void AddRef() { ++NumReferences; }

  /// A special function that should only be used by the CallGraph class.
  void allReferencesDropped() { NumReferences = 0; }
};

/// An analysis pass to compute the \c CallGraph for a \c Module.
///
/// This class implements the concept of an analysis pass used by the \c
/// ModuleAnalysisManager to run an analysis over a module and cache the
/// resulting data.
class CallGraphAnalysis : public AnalysisInfoMixin<CallGraphAnalysis> {
  friend AnalysisInfoMixin<CallGraphAnalysis>;

  static AnalysisKey Key;

public:
  /// A formulaic type to inform clients of the result type.
  using Result = CallGraph;

  /// Compute the \c CallGraph for the module \c M.
  ///
  /// The real work here is done in the \c CallGraph constructor.
  CallGraph run(Module &M, ModuleAnalysisManager &) { return CallGraph(M); }
};

/// Printer pass for the \c CallGraphAnalysis results.
class CallGraphPrinterPass : public PassInfoMixin<CallGraphPrinterPass> {
  raw_ostream &OS;

public:
  explicit CallGraphPrinterPass(raw_ostream &OS) : OS(OS) {}

  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
};

/// Printer pass for the summarized \c CallGraphAnalysis results.
class CallGraphSCCsPrinterPass
    : public PassInfoMixin<CallGraphSCCsPrinterPass> {
  raw_ostream &OS;

public:
  explicit CallGraphSCCsPrinterPass(raw_ostream &OS) : OS(OS) {}

  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
};

/// The \c ModulePass which wraps up a \c CallGraph and the logic to
/// build it.
///
/// This class exposes both the interface to the call graph container and the
/// module pass which runs over a module of IR and produces the call graph. The
/// call graph interface is entirelly a wrapper around a \c CallGraph object
/// which is stored internally for each module.
class CallGraphWrapperPass : public ModulePass {
  std::unique_ptr<CallGraph> G;

public:
  static char ID; // Class identification, replacement for typeinfo

  CallGraphWrapperPass();
  ~CallGraphWrapperPass() override;

  /// The internal \c CallGraph around which the rest of this interface
  /// is wrapped.
  const CallGraph &getCallGraph() const { return *G; }
  CallGraph &getCallGraph() { return *G; }

  using iterator = CallGraph::iterator;
  using const_iterator = CallGraph::const_iterator;

  /// Returns the module the call graph corresponds to.
  Module &getModule() const { return G->getModule(); }

  inline iterator begin() { return G->begin(); }
  inline iterator end() { return G->end(); }
  inline const_iterator begin() const { return G->begin(); }
  inline const_iterator end() const { return G->end(); }

  /// Returns the call graph node for the provided function.
  inline const CallGraphNode *operator[](const Function *F) const {
    return (*G)[F];
  }

  /// Returns the call graph node for the provided function.
  inline CallGraphNode *operator[](const Function *F) { return (*G)[F]; }

  /// Returns the \c CallGraphNode which is used to represent
  /// undetermined calls into the callgraph.
  CallGraphNode *getExternalCallingNode() const {
    return G->getExternalCallingNode();
  }

  CallGraphNode *getCallsExternalNode() const {
    return G->getCallsExternalNode();
  }

  //===---------------------------------------------------------------------
  // Functions to keep a call graph up to date with a function that has been
  // modified.
  //

  /// Unlink the function from this module, returning it.
  ///
  /// Because this removes the function from the module, the call graph node is
  /// destroyed.  This is only valid if the function does not call any other
  /// functions (ie, there are no edges in it's CGN).  The easiest way to do
  /// this is to dropAllReferences before calling this.
  Function *removeFunctionFromModule(CallGraphNode *CGN) {
    return G->removeFunctionFromModule(CGN);
  }

  /// Similar to operator[], but this will insert a new CallGraphNode for
  /// \c F if one does not already exist.
  CallGraphNode *getOrInsertFunction(const Function *F) {
    return G->getOrInsertFunction(F);
  }

  //===---------------------------------------------------------------------
  // Implementation of the ModulePass interface needed here.
  //

  void getAnalysisUsage(AnalysisUsage &AU) const override;
  bool runOnModule(Module &M) override;
  void releaseMemory() override;

  void print(raw_ostream &o, const Module *) const override;
  void dump() const;
};

//===----------------------------------------------------------------------===//
// GraphTraits specializations for call graphs so that they can be treated as
// graphs by the generic graph algorithms.
//

// Provide graph traits for traversing call graphs using standard graph
// traversals.
template <> struct GraphTraits<CallGraphNode *> {
  using NodeRef = CallGraphNode *;
  using CGNPairTy = CallGraphNode::CallRecord;

  static NodeRef getEntryNode(CallGraphNode *CGN) { return CGN; }
  static CallGraphNode *CGNGetValue(CGNPairTy P) { return P.second; }

  using ChildIteratorType =
      mapped_iterator<CallGraphNode::iterator, decltype(&CGNGetValue)>;

  static ChildIteratorType child_begin(NodeRef N) {
    return ChildIteratorType(N->begin(), &CGNGetValue);
  }

  static ChildIteratorType child_end(NodeRef N) {
    return ChildIteratorType(N->end(), &CGNGetValue);
  }
};

template <> struct GraphTraits<const CallGraphNode *> {
  using NodeRef = const CallGraphNode *;
  using CGNPairTy = CallGraphNode::CallRecord;
  using EdgeRef = const CallGraphNode::CallRecord &;

  static NodeRef getEntryNode(const CallGraphNode *CGN) { return CGN; }
  static const CallGraphNode *CGNGetValue(CGNPairTy P) { return P.second; }

  using ChildIteratorType =
      mapped_iterator<CallGraphNode::const_iterator, decltype(&CGNGetValue)>;
  using ChildEdgeIteratorType = CallGraphNode::const_iterator;

  static ChildIteratorType child_begin(NodeRef N) {
    return ChildIteratorType(N->begin(), &CGNGetValue);
  }

  static ChildIteratorType child_end(NodeRef N) {
    return ChildIteratorType(N->end(), &CGNGetValue);
  }

  static ChildEdgeIteratorType child_edge_begin(NodeRef N) {
    return N->begin();
  }
  static ChildEdgeIteratorType child_edge_end(NodeRef N) { return N->end(); }

  static NodeRef edge_dest(EdgeRef E) { return E.second; }
};

template <>
struct GraphTraits<CallGraph *> : public GraphTraits<CallGraphNode *> {
  using PairTy =
      std::pair<const Function *const, std::unique_ptr<CallGraphNode>>;

  static NodeRef getEntryNode(CallGraph *CGN) {
    return CGN->getExternalCallingNode(); // Start at the external node!
  }

  static CallGraphNode *CGGetValuePtr(const PairTy &P) {
    return P.second.get();
  }

  // nodes_iterator/begin/end - Allow iteration over all nodes in the graph
  using nodes_iterator =
      mapped_iterator<CallGraph::iterator, decltype(&CGGetValuePtr)>;

  static nodes_iterator nodes_begin(CallGraph *CG) {
    return nodes_iterator(CG->begin(), &CGGetValuePtr);
  }

  static nodes_iterator nodes_end(CallGraph *CG) {
    return nodes_iterator(CG->end(), &CGGetValuePtr);
  }
};

template <>
struct GraphTraits<const CallGraph *> : public GraphTraits<
                                            const CallGraphNode *> {
  using PairTy =
      std::pair<const Function *const, std::unique_ptr<CallGraphNode>>;

  static NodeRef getEntryNode(const CallGraph *CGN) {
    return CGN->getExternalCallingNode(); // Start at the external node!
  }

  static const CallGraphNode *CGGetValuePtr(const PairTy &P) {
    return P.second.get();
  }

  // nodes_iterator/begin/end - Allow iteration over all nodes in the graph
  using nodes_iterator =
      mapped_iterator<CallGraph::const_iterator, decltype(&CGGetValuePtr)>;

  static nodes_iterator nodes_begin(const CallGraph *CG) {
    return nodes_iterator(CG->begin(), &CGGetValuePtr);
  }

  static nodes_iterator nodes_end(const CallGraph *CG) {
    return nodes_iterator(CG->end(), &CGGetValuePtr);
  }
};

} // end namespace llvm

#endif // LLVM_ANALYSIS_CALLGRAPH_H
PKiwFZx�VD
D
Analysis/RegionPrinter.hnu�[���//===-- RegionPrinter.h - Region printer external interface -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines external functions that can be called to explicitly
// instantiate the region printer.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_REGIONPRINTER_H
#define LLVM_ANALYSIS_REGIONPRINTER_H

#include "llvm/Analysis/DOTGraphTraitsPass.h"
#include "llvm/Analysis/RegionInfo.h"

namespace llvm {
  class FunctionPass;
  class Function;
  class RegionInfo;

  FunctionPass *createRegionViewerPass();
  FunctionPass *createRegionOnlyViewerPass();
  FunctionPass *createRegionPrinterPass();
  FunctionPass *createRegionOnlyPrinterPass();

  template <>
  struct DOTGraphTraits<RegionNode *> : public DefaultDOTGraphTraits {
    DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {}

    std::string getNodeLabel(RegionNode *Node, RegionNode *Graph);
  };

#ifndef NDEBUG
  /// Open a viewer to display the GraphViz vizualization of the analysis
  /// result.
  ///
  /// Practical to call in the debugger.
  /// Includes the instructions in each BasicBlock.
  ///
  /// @param RI The analysis to display.
  void viewRegion(llvm::RegionInfo *RI);

  /// Analyze the regions of a function and open its GraphViz
  /// visualization in a viewer.
  ///
  /// Useful to call in the debugger.
  /// Includes the instructions in each BasicBlock.
  /// The result of a new analysis may differ from the RegionInfo the pass
  /// manager currently holds.
  ///
  /// @param F Function to analyze.
  void viewRegion(const llvm::Function *F);

  /// Open a viewer to display the GraphViz vizualization of the analysis
  /// result.
  ///
  /// Useful to call in the debugger.
  /// Shows only the BasicBlock names without their instructions.
  ///
  /// @param RI The analysis to display.
  void viewRegionOnly(llvm::RegionInfo *RI);

  /// Analyze the regions of a function and open its GraphViz
  /// visualization in a viewer.
  ///
  /// Useful to call in the debugger.
  /// Shows only the BasicBlock names without their instructions.
  /// The result of a new analysis may differ from the RegionInfo the pass
  /// manager currently holds.
  ///
  /// @param F Function to analyze.
  void viewRegionOnly(const llvm::Function *F);
#endif
} // End llvm namespace

#endif
PKiwFZJ��{�L�L Analysis/BranchProbabilityInfo.hnu�[���//===- BranchProbabilityInfo.h - Branch Probability Analysis ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This pass is used to evaluate branch probabilties.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_BRANCHPROBABILITYINFO_H
#define LLVM_ANALYSIS_BRANCHPROBABILITYINFO_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CFG.h"
#include "llvm/IR/PassManager.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/Pass.h"
#include "llvm/Support/BranchProbability.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <memory>
#include <utility>

namespace llvm {

class Function;
class Loop;
class LoopInfo;
class raw_ostream;
class DominatorTree;
class PostDominatorTree;
class TargetLibraryInfo;
class Value;

/// Analysis providing branch probability information.
///
/// This is a function analysis which provides information on the relative
/// probabilities of each "edge" in the function's CFG where such an edge is
/// defined by a pair (PredBlock and an index in the successors). The
/// probability of an edge from one block is always relative to the
/// probabilities of other edges from the block. The probabilites of all edges
/// from a block sum to exactly one (100%).
/// We use a pair (PredBlock and an index in the successors) to uniquely
/// identify an edge, since we can have multiple edges from Src to Dst.
/// As an example, we can have a switch which jumps to Dst with value 0 and
/// value 10.
///
/// Process of computing branch probabilities can be logically viewed as three
/// step process:
///
///   First, if there is a profile information associated with the branch then
/// it is trivially translated to branch probabilities. There is one exception
/// from this rule though. Probabilities for edges leading to "unreachable"
/// blocks (blocks with the estimated weight not greater than
/// UNREACHABLE_WEIGHT) are evaluated according to static estimation and
/// override profile information. If no branch probabilities were calculated
/// on this step then take the next one.
///
///   Second, estimate absolute execution weights for each block based on
/// statically known information. Roots of such information are "cold",
/// "unreachable", "noreturn" and "unwind" blocks. Those blocks get their
/// weights set to BlockExecWeight::COLD, BlockExecWeight::UNREACHABLE,
/// BlockExecWeight::NORETURN and BlockExecWeight::UNWIND respectively. Then the
/// weights are propagated to the other blocks up the domination line. In
/// addition, if all successors have estimated weights set then maximum of these
/// weights assigned to the block itself (while this is not ideal heuristic in
/// theory it's simple and works reasonably well in most cases) and the process
/// repeats. Once the process of weights propagation converges branch
/// probabilities are set for all such branches that have at least one successor
/// with the weight set. Default execution weight (BlockExecWeight::DEFAULT) is
/// used for any successors which doesn't have its weight set. For loop back
/// branches we use their weights scaled by loop trip count equal to
/// 'LBH_TAKEN_WEIGHT/LBH_NOTTAKEN_WEIGHT'.
///
/// Here is a simple example demonstrating how the described algorithm works.
///
///          BB1
///         /   \
///        v     v
///      BB2     BB3
///     /   \
///    v     v
///  ColdBB  UnreachBB
///
/// Initially, ColdBB is associated with COLD_WEIGHT and UnreachBB with
/// UNREACHABLE_WEIGHT. COLD_WEIGHT is set to BB2 as maximum between its
/// successors. BB1 and BB3 has no explicit estimated weights and assumed to
/// have DEFAULT_WEIGHT. Based on assigned weights branches will have the
/// following probabilities:
/// P(BB1->BB2) = COLD_WEIGHT/(COLD_WEIGHT + DEFAULT_WEIGHT) =
///   0xffff / (0xffff + 0xfffff) = 0.0588(5.9%)
/// P(BB1->BB3) = DEFAULT_WEIGHT_WEIGHT/(COLD_WEIGHT + DEFAULT_WEIGHT) =
///          0xfffff / (0xffff + 0xfffff) = 0.941(94.1%)
/// P(BB2->ColdBB) = COLD_WEIGHT/(COLD_WEIGHT + UNREACHABLE_WEIGHT) = 1(100%)
/// P(BB2->UnreachBB) =
///   UNREACHABLE_WEIGHT/(COLD_WEIGHT+UNREACHABLE_WEIGHT) = 0(0%)
///
/// If no branch probabilities were calculated on this step then take the next
/// one.
///
///   Third, apply different kinds of local heuristics for each individual
/// branch until first match. For example probability of a pointer to be null is
/// estimated as PH_TAKEN_WEIGHT/(PH_TAKEN_WEIGHT + PH_NONTAKEN_WEIGHT). If
/// no local heuristic has been matched then branch is left with no explicit
/// probability set and assumed to have default probability.
class BranchProbabilityInfo {
public:
  BranchProbabilityInfo() = default;

  BranchProbabilityInfo(const Function &F, const LoopInfo &LI,
                        const TargetLibraryInfo *TLI = nullptr,
                        DominatorTree *DT = nullptr,
                        PostDominatorTree *PDT = nullptr) {
    calculate(F, LI, TLI, DT, PDT);
  }

  BranchProbabilityInfo(BranchProbabilityInfo &&Arg)
      : Probs(std::move(Arg.Probs)), LastF(Arg.LastF),
        EstimatedBlockWeight(std::move(Arg.EstimatedBlockWeight)) {}

  BranchProbabilityInfo(const BranchProbabilityInfo &) = delete;
  BranchProbabilityInfo &operator=(const BranchProbabilityInfo &) = delete;

  BranchProbabilityInfo &operator=(BranchProbabilityInfo &&RHS) {
    releaseMemory();
    Probs = std::move(RHS.Probs);
    EstimatedBlockWeight = std::move(RHS.EstimatedBlockWeight);
    return *this;
  }

  bool invalidate(Function &, const PreservedAnalyses &PA,
                  FunctionAnalysisManager::Invalidator &);

  void releaseMemory();

  void print(raw_ostream &OS) const;

  /// Get an edge's probability, relative to other out-edges of the Src.
  ///
  /// This routine provides access to the fractional probability between zero
  /// (0%) and one (100%) of this edge executing, relative to other edges
  /// leaving the 'Src' block. The returned probability is never zero, and can
  /// only be one if the source block has only one successor.
  BranchProbability getEdgeProbability(const BasicBlock *Src,
                                       unsigned IndexInSuccessors) const;

  /// Get the probability of going from Src to Dst.
  ///
  /// It returns the sum of all probabilities for edges from Src to Dst.
  BranchProbability getEdgeProbability(const BasicBlock *Src,
                                       const BasicBlock *Dst) const;

  BranchProbability getEdgeProbability(const BasicBlock *Src,
                                       const_succ_iterator Dst) const;

  /// Test if an edge is hot relative to other out-edges of the Src.
  ///
  /// Check whether this edge out of the source block is 'hot'. We define hot
  /// as having a relative probability >= 80%.
  bool isEdgeHot(const BasicBlock *Src, const BasicBlock *Dst) const;

  /// Print an edge's probability.
  ///
  /// Retrieves an edge's probability similarly to \see getEdgeProbability, but
  /// then prints that probability to the provided stream. That stream is then
  /// returned.
  raw_ostream &printEdgeProbability(raw_ostream &OS, const BasicBlock *Src,
                                    const BasicBlock *Dst) const;

public:
  /// Set the raw probabilities for all edges from the given block.
  ///
  /// This allows a pass to explicitly set edge probabilities for a block. It
  /// can be used when updating the CFG to update the branch probability
  /// information.
  void setEdgeProbability(const BasicBlock *Src,
                          const SmallVectorImpl<BranchProbability> &Probs);

  /// Copy outgoing edge probabilities from \p Src to \p Dst.
  ///
  /// This allows to keep probabilities unset for the destination if they were
  /// unset for source.
  void copyEdgeProbabilities(BasicBlock *Src, BasicBlock *Dst);

  /// Swap outgoing edges probabilities for \p Src with branch terminator
  void swapSuccEdgesProbabilities(const BasicBlock *Src);

  static BranchProbability getBranchProbStackProtector(bool IsLikely) {
    static const BranchProbability LikelyProb((1u << 20) - 1, 1u << 20);
    return IsLikely ? LikelyProb : LikelyProb.getCompl();
  }

  void calculate(const Function &F, const LoopInfo &LI,
                 const TargetLibraryInfo *TLI, DominatorTree *DT,
                 PostDominatorTree *PDT);

  /// Forget analysis results for the given basic block.
  void eraseBlock(const BasicBlock *BB);

  // Data structure to track SCCs for handling irreducible loops.
  class SccInfo {
    // Enum of types to classify basic blocks in SCC. Basic block belonging to
    // SCC is 'Inner' until it is either 'Header' or 'Exiting'. Note that a
    // basic block can be 'Header' and 'Exiting' at the same time.
    enum SccBlockType {
      Inner = 0x0,
      Header = 0x1,
      Exiting = 0x2,
    };
    // Map of basic blocks to SCC IDs they belong to. If basic block doesn't
    // belong to any SCC it is not in the map.
    using SccMap = DenseMap<const BasicBlock *, int>;
    // Each basic block in SCC is attributed with one or several types from
    // SccBlockType. Map value has uint32_t type (instead of SccBlockType)
    // since basic block may be for example "Header" and "Exiting" at the same
    // time and we need to be able to keep more than one value from
    // SccBlockType.
    using SccBlockTypeMap = DenseMap<const BasicBlock *, uint32_t>;
    // Vector containing classification of basic blocks for all  SCCs where i'th
    // vector element corresponds to SCC with ID equal to i.
    using SccBlockTypeMaps = std::vector<SccBlockTypeMap>;

    SccMap SccNums;
    SccBlockTypeMaps SccBlocks;

  public:
    explicit SccInfo(const Function &F);

    /// If \p BB belongs to some SCC then ID of that SCC is returned, otherwise
    /// -1 is returned. If \p BB belongs to more than one SCC at the same time
    /// result is undefined.
    int getSCCNum(const BasicBlock *BB) const;
    /// Returns true if \p BB is a 'header' block in SCC with \p SccNum ID,
    /// false otherwise.
    bool isSCCHeader(const BasicBlock *BB, int SccNum) const {
      return getSccBlockType(BB, SccNum) & Header;
    }
    /// Returns true if \p BB is an 'exiting' block in SCC with \p SccNum ID,
    /// false otherwise.
    bool isSCCExitingBlock(const BasicBlock *BB, int SccNum) const {
      return getSccBlockType(BB, SccNum) & Exiting;
    }
    /// Fills in \p Enters vector with all such blocks that don't belong to
    /// SCC with \p SccNum ID but there is an edge to a block belonging to the
    /// SCC.
    void getSccEnterBlocks(int SccNum,
                           SmallVectorImpl<BasicBlock *> &Enters) const;
    /// Fills in \p Exits vector with all such blocks that don't belong to
    /// SCC with \p SccNum ID but there is an edge from a block belonging to the
    /// SCC.
    void getSccExitBlocks(int SccNum,
                          SmallVectorImpl<BasicBlock *> &Exits) const;

  private:
    /// Returns \p BB's type according to classification given by SccBlockType
    /// enum. Please note that \p BB must belong to SSC with \p SccNum ID.
    uint32_t getSccBlockType(const BasicBlock *BB, int SccNum) const;
    /// Calculates \p BB's type and stores it in internal data structures for
    /// future use. Please note that \p BB must belong to SSC with \p SccNum ID.
    void calculateSccBlockType(const BasicBlock *BB, int SccNum);
  };

private:
  // We need to store CallbackVH's in order to correctly handle basic block
  // removal.
  class BasicBlockCallbackVH final : public CallbackVH {
    BranchProbabilityInfo *BPI;

    void deleted() override {
      assert(BPI != nullptr);
      BPI->eraseBlock(cast<BasicBlock>(getValPtr()));
    }

  public:
    BasicBlockCallbackVH(const Value *V, BranchProbabilityInfo *BPI = nullptr)
        : CallbackVH(const_cast<Value *>(V)), BPI(BPI) {}
  };

  /// Pair of Loop and SCC ID number. Used to unify handling of normal and
  /// SCC based loop representations.
  using LoopData = std::pair<Loop *, int>;
  /// Helper class to keep basic block along with its loop data information.
  class LoopBlock {
  public:
    explicit LoopBlock(const BasicBlock *BB, const LoopInfo &LI,
                       const SccInfo &SccI);

    const BasicBlock *getBlock() const { return BB; }
    BasicBlock *getBlock() { return const_cast<BasicBlock *>(BB); }
    LoopData getLoopData() const { return LD; }
    Loop *getLoop() const { return LD.first; }
    int getSccNum() const { return LD.second; }

    bool belongsToLoop() const { return getLoop() || getSccNum() != -1; }
    bool belongsToSameLoop(const LoopBlock &LB) const {
      return (LB.getLoop() && getLoop() == LB.getLoop()) ||
             (LB.getSccNum() != -1 && getSccNum() == LB.getSccNum());
    }

  private:
    const BasicBlock *const BB = nullptr;
    LoopData LD = {nullptr, -1};
  };

  // Pair of LoopBlocks representing an edge from first to second block.
  using LoopEdge = std::pair<const LoopBlock &, const LoopBlock &>;

  DenseSet<BasicBlockCallbackVH, DenseMapInfo<Value*>> Handles;

  // Since we allow duplicate edges from one basic block to another, we use
  // a pair (PredBlock and an index in the successors) to specify an edge.
  using Edge = std::pair<const BasicBlock *, unsigned>;

  DenseMap<Edge, BranchProbability> Probs;

  /// Track the last function we run over for printing.
  const Function *LastF = nullptr;

  const LoopInfo *LI = nullptr;

  /// Keeps information about all SCCs in a function.
  std::unique_ptr<const SccInfo> SccI;

  /// Keeps mapping of a basic block to its estimated weight.
  SmallDenseMap<const BasicBlock *, uint32_t> EstimatedBlockWeight;

  /// Keeps mapping of a loop to estimated weight to enter the loop.
  SmallDenseMap<LoopData, uint32_t> EstimatedLoopWeight;

  /// Helper to construct LoopBlock for \p BB.
  LoopBlock getLoopBlock(const BasicBlock *BB) const {
    return LoopBlock(BB, *LI, *SccI.get());
  }

  /// Returns true if destination block belongs to some loop and source block is
  /// either doesn't belong to any loop or belongs to a loop which is not inner
  /// relative to the destination block.
  bool isLoopEnteringEdge(const LoopEdge &Edge) const;
  /// Returns true if source block belongs to some loop and destination block is
  /// either doesn't belong to any loop or belongs to a loop which is not inner
  /// relative to the source block.
  bool isLoopExitingEdge(const LoopEdge &Edge) const;
  /// Returns true if \p Edge is either enters to or exits from some loop, false
  /// in all other cases.
  bool isLoopEnteringExitingEdge(const LoopEdge &Edge) const;
  /// Returns true if source and destination blocks belongs to the same loop and
  /// destination block is loop header.
  bool isLoopBackEdge(const LoopEdge &Edge) const;
  // Fills in \p Enters vector with all "enter" blocks to a loop \LB belongs to.
  void getLoopEnterBlocks(const LoopBlock &LB,
                          SmallVectorImpl<BasicBlock *> &Enters) const;
  // Fills in \p Exits vector with all "exit" blocks from a loop \LB belongs to.
  void getLoopExitBlocks(const LoopBlock &LB,
                         SmallVectorImpl<BasicBlock *> &Exits) const;

  /// Returns estimated weight for \p BB. std::nullopt if \p BB has no estimated
  /// weight.
  std::optional<uint32_t> getEstimatedBlockWeight(const BasicBlock *BB) const;

  /// Returns estimated weight to enter \p L. In other words it is weight of
  /// loop's header block not scaled by trip count. Returns std::nullopt if \p L
  /// has no no estimated weight.
  std::optional<uint32_t> getEstimatedLoopWeight(const LoopData &L) const;

  /// Return estimated weight for \p Edge. Returns std::nullopt if estimated
  /// weight is unknown.
  std::optional<uint32_t> getEstimatedEdgeWeight(const LoopEdge &Edge) const;

  /// Iterates over all edges leading from \p SrcBB to \p Successors and
  /// returns maximum of all estimated weights. If at least one edge has unknown
  /// estimated weight std::nullopt is returned.
  template <class IterT>
  std::optional<uint32_t>
  getMaxEstimatedEdgeWeight(const LoopBlock &SrcBB,
                            iterator_range<IterT> Successors) const;

  /// If \p LoopBB has no estimated weight then set it to \p BBWeight and
  /// return true. Otherwise \p BB's weight remains unchanged and false is
  /// returned. In addition all blocks/loops that might need their weight to be
  /// re-estimated are put into BlockWorkList/LoopWorkList.
  bool updateEstimatedBlockWeight(LoopBlock &LoopBB, uint32_t BBWeight,
                                  SmallVectorImpl<BasicBlock *> &BlockWorkList,
                                  SmallVectorImpl<LoopBlock> &LoopWorkList);

  /// Starting from \p LoopBB (including \p LoopBB itself) propagate \p BBWeight
  /// up the domination tree.
  void propagateEstimatedBlockWeight(const LoopBlock &LoopBB, DominatorTree *DT,
                                     PostDominatorTree *PDT, uint32_t BBWeight,
                                     SmallVectorImpl<BasicBlock *> &WorkList,
                                     SmallVectorImpl<LoopBlock> &LoopWorkList);

  /// Returns block's weight encoded in the IR.
  std::optional<uint32_t> getInitialEstimatedBlockWeight(const BasicBlock *BB);

  // Computes estimated weights for all blocks in \p F.
  void computeEestimateBlockWeight(const Function &F, DominatorTree *DT,
                                   PostDominatorTree *PDT);

  /// Based on computed weights by \p computeEstimatedBlockWeight set
  /// probabilities on branches.
  bool calcEstimatedHeuristics(const BasicBlock *BB);
  bool calcMetadataWeights(const BasicBlock *BB);
  bool calcPointerHeuristics(const BasicBlock *BB);
  bool calcZeroHeuristics(const BasicBlock *BB, const TargetLibraryInfo *TLI);
  bool calcFloatingPointHeuristics(const BasicBlock *BB);
};

/// Analysis pass which computes \c BranchProbabilityInfo.
class BranchProbabilityAnalysis
    : public AnalysisInfoMixin<BranchProbabilityAnalysis> {
  friend AnalysisInfoMixin<BranchProbabilityAnalysis>;

  static AnalysisKey Key;

public:
  /// Provide the result type for this analysis pass.
  using Result = BranchProbabilityInfo;

  /// Run the analysis pass over a function and produce BPI.
  BranchProbabilityInfo run(Function &F, FunctionAnalysisManager &AM);
};

/// Printer pass for the \c BranchProbabilityAnalysis results.
class BranchProbabilityPrinterPass
    : public PassInfoMixin<BranchProbabilityPrinterPass> {
  raw_ostream &OS;

public:
  explicit BranchProbabilityPrinterPass(raw_ostream &OS) : OS(OS) {}

  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

/// Legacy analysis pass which computes \c BranchProbabilityInfo.
class BranchProbabilityInfoWrapperPass : public FunctionPass {
  BranchProbabilityInfo BPI;

public:
  static char ID;

  BranchProbabilityInfoWrapperPass();

  BranchProbabilityInfo &getBPI() { return BPI; }
  const BranchProbabilityInfo &getBPI() const { return BPI; }

  void getAnalysisUsage(AnalysisUsage &AU) const override;
  bool runOnFunction(Function &F) override;
  void releaseMemory() override;
  void print(raw_ostream &OS, const Module *M = nullptr) const override;
};

} // end namespace llvm

#endif // LLVM_ANALYSIS_BRANCHPROBABILITYINFO_H
PKiwFZh�/��'Analysis/ScalarEvolutionAliasAnalysis.hnu�[���//===- ScalarEvolutionAliasAnalysis.h - SCEV-based AA -----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// This is the interface for a SCEV-based alias analysis.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_SCALAREVOLUTIONALIASANALYSIS_H
#define LLVM_ANALYSIS_SCALAREVOLUTIONALIASANALYSIS_H

#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Pass.h"

namespace llvm {

class Function;
class ScalarEvolution;
class SCEV;

/// A simple alias analysis implementation that uses ScalarEvolution to answer
/// queries.
class SCEVAAResult : public AAResultBase {
  ScalarEvolution &SE;

public:
  explicit SCEVAAResult(ScalarEvolution &SE) : SE(SE) {}
  SCEVAAResult(SCEVAAResult &&Arg) : AAResultBase(std::move(Arg)), SE(Arg.SE) {}

  AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB,
                    AAQueryInfo &AAQI, const Instruction *CtxI);

  bool invalidate(Function &F, const PreservedAnalyses &PA,
                  FunctionAnalysisManager::Invalidator &Inv);

private:
  Value *GetBaseValue(const SCEV *S);
};

/// Analysis pass providing a never-invalidated alias analysis result.
class SCEVAA : public AnalysisInfoMixin<SCEVAA> {
  friend AnalysisInfoMixin<SCEVAA>;
  static AnalysisKey Key;

public:
  typedef SCEVAAResult Result;

  SCEVAAResult run(Function &F, FunctionAnalysisManager &AM);
};

/// Legacy wrapper pass to provide the SCEVAAResult object.
class SCEVAAWrapperPass : public FunctionPass {
  std::unique_ptr<SCEVAAResult> Result;

public:
  static char ID;

  SCEVAAWrapperPass();

  SCEVAAResult &getResult() { return *Result; }
  const SCEVAAResult &getResult() const { return *Result; }

  bool runOnFunction(Function &F) override;
  void getAnalysisUsage(AnalysisUsage &AU) const override;
};

/// Creates an instance of \c SCEVAAWrapperPass.
FunctionPass *createSCEVAAWrapperPass();

}

#endif
PKiwFZ#PϣB:B:Analysis/TargetLibraryInfo.defnu�[���//===-- TargetLibraryInfo.def - Library information -------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

// This .def file will either fill in the enum definition or fill in the
// string representation array definition for TargetLibraryInfo.
// Which is defined depends on whether TLI_DEFINE_ENUM is defined or
// TLI_DEFINE_STRING is defined. Only one should be defined at a time.

// NOTE: The nofree attribute is added to Libfuncs which are not
// listed as free or realloc functions in MemoryBuiltins.cpp
//
// When adding a function which frees memory include the LibFunc
// in lib/Analysis/MemoryBuiltins.cpp "isLibFreeFunction".
//
// When adding a LibFunc which reallocates memory include the LibFunc
// in lib/Analysis/MemoryBuiltins.cpp "AllocationFnData[]".

#if (defined(TLI_DEFINE_ENUM) +                 \
     defined(TLI_DEFINE_STRING) +               \
     defined(TLI_DEFINE_SIG) != 1)
#error "Must define exactly one of TLI_DEFINE_ENUM, TLI_DEFINE_STRING, or TLI_DEFINE_SIG for TLI .def."
#else
// Exactly one of TLI_DEFINE_ENUM/STRING/SIG is defined.

#if defined(TLI_DEFINE_ENUM)
#define TLI_DEFINE_ENUM_INTERNAL(enum_variant) LibFunc_##enum_variant,
#define TLI_DEFINE_STRING_INTERNAL(string_repr)
#define TLI_DEFINE_SIG_INTERNAL(...)
#elif defined(TLI_DEFINE_STRING)
#define TLI_DEFINE_ENUM_INTERNAL(enum_variant)
#define TLI_DEFINE_STRING_INTERNAL(string_repr) string_repr,
#define TLI_DEFINE_SIG_INTERNAL(...)
#else
#define TLI_DEFINE_ENUM_INTERNAL(enum_variant)
#define TLI_DEFINE_STRING_INTERNAL(string_repr)
#define TLI_DEFINE_SIG_INTERNAL(...) { __VA_ARGS__ },
#endif

/// void *operator new(unsigned int);
TLI_DEFINE_ENUM_INTERNAL(msvc_new_int)
TLI_DEFINE_STRING_INTERNAL("??2@YAPAXI@Z")
TLI_DEFINE_SIG_INTERNAL(Ptr, Int)

/// void *operator new(unsigned int, const std::nothrow_t&);
TLI_DEFINE_ENUM_INTERNAL(msvc_new_int_nothrow)
TLI_DEFINE_STRING_INTERNAL("??2@YAPAXIABUnothrow_t@std@@@Z")
TLI_DEFINE_SIG_INTERNAL(Ptr, Int, Ptr)

/// void *operator new(unsigned long long);
TLI_DEFINE_ENUM_INTERNAL(msvc_new_longlong)
TLI_DEFINE_STRING_INTERNAL("??2@YAPEAX_K@Z")
TLI_DEFINE_SIG_INTERNAL(Ptr, LLong)

/// void *operator new(unsigned long long, const std::nothrow_t&);
TLI_DEFINE_ENUM_INTERNAL(msvc_new_longlong_nothrow)
TLI_DEFINE_STRING_INTERNAL("??2@YAPEAX_KAEBUnothrow_t@std@@@Z")
TLI_DEFINE_SIG_INTERNAL(Ptr, LLong, Ptr)

/// void operator delete(void*);
TLI_DEFINE_ENUM_INTERNAL(msvc_delete_ptr32)
TLI_DEFINE_STRING_INTERNAL("??3@YAXPAX@Z")
TLI_DEFINE_SIG_INTERNAL(Void, Ptr)

/// void operator delete(void*, const std::nothrow_t&);
TLI_DEFINE_ENUM_INTERNAL(msvc_delete_ptr32_nothrow)
TLI_DEFINE_STRING_INTERNAL("??3@YAXPAXABUnothrow_t@std@@@Z")
TLI_DEFINE_SIG_INTERNAL(Void, Ptr, Ptr)

/// void operator delete(void*, unsigned int);
TLI_DEFINE_ENUM_INTERNAL(msvc_delete_ptr32_int)
TLI_DEFINE_STRING_INTERNAL("??3@YAXPAXI@Z")
TLI_DEFINE_SIG_INTERNAL(Void, Ptr, Int)

/// void operator delete(void*);
TLI_DEFINE_ENUM_INTERNAL(msvc_delete_ptr64)
TLI_DEFINE_STRING_INTERNAL("??3@YAXPEAX@Z")
TLI_DEFINE_SIG_INTERNAL(Void, Ptr)

/// void operator delete(void*, const std::nothrow_t&);
TLI_DEFINE_ENUM_INTERNAL(msvc_delete_ptr64_nothrow)
TLI_DEFINE_STRING_INTERNAL("??3@YAXPEAXAEBUnothrow_t@std@@@Z")
TLI_DEFINE_SIG_INTERNAL(Void, Ptr, Ptr)

/// void operator delete(void*, unsigned long long);
TLI_DEFINE_ENUM_INTERNAL(msvc_delete_ptr64_longlong)
TLI_DEFINE_STRING_INTERNAL("??3@YAXPEAX_K@Z")
TLI_DEFINE_SIG_INTERNAL(Void, Ptr, LLong)

/// void *operator new[](unsigned int);
TLI_DEFINE_ENUM_INTERNAL(msvc_new_array_int)
TLI_DEFINE_STRING_INTERNAL("??_U@YAPAXI@Z")
TLI_DEFINE_SIG_INTERNAL(Ptr, Int)

/// void *operator new[](unsigned int, const std::nothrow_t&);
TLI_DEFINE_ENUM_INTERNAL(msvc_new_array_int_nothrow)
TLI_DEFINE_STRING_INTERNAL("??_U@YAPAXIABUnothrow_t@std@@@Z")
TLI_DEFINE_SIG_INTERNAL(Ptr, Int, Ptr)

/// void *operator new[](unsigned long long);
TLI_DEFINE_ENUM_INTERNAL(msvc_new_array_longlong)
TLI_DEFINE_STRING_INTERNAL("??_U@YAPEAX_K@Z")
TLI_DEFINE_SIG_INTERNAL(Ptr, LLong)

/// void *operator new[](unsigned long long, const std::nothrow_t&);
TLI_DEFINE_ENUM_INTERNAL(msvc_new_array_longlong_nothrow)
TLI_DEFINE_STRING_INTERNAL("??_U@YAPEAX_KAEBUnothrow_t@std@@@Z")
TLI_DEFINE_SIG_INTERNAL(Ptr, LLong, Ptr)

/// void operator delete[](void*);
TLI_DEFINE_ENUM_INTERNAL(msvc_delete_array_ptr32)
TLI_DEFINE_STRING_INTERNAL("??_V@YAXPAX@Z")
TLI_DEFINE_SIG_INTERNAL(Void, Ptr)

/// void operator delete[](void*, const std::nothrow_t&);
TLI_DEFINE_ENUM_INTERNAL(msvc_delete_array_ptr32_nothrow)
TLI_DEFINE_STRING_INTERNAL("??_V@YAXPAXABUnothrow_t@std@@@Z")
TLI_DEFINE_SIG_INTERNAL(Void, Ptr, Ptr)

/// void operator delete[](void*, unsigned int);
TLI_DEFINE_ENUM_INTERNAL(msvc_delete_array_ptr32_int)
TLI_DEFINE_STRING_INTERNAL("??_V@YAXPAXI@Z")
TLI_DEFINE_SIG_INTERNAL(Void, Ptr, Int)

/// void operator delete[](void*);
TLI_DEFINE_ENUM_INTERNAL(msvc_delete_array_ptr64)
TLI_DEFINE_STRING_INTERNAL("??_V@YAXPEAX@Z")
TLI_DEFINE_SIG_INTERNAL(Void, Ptr)

/// void operator delete[](void*, const std::nothrow_t&);
TLI_DEFINE_ENUM_INTERNAL(msvc_delete_array_ptr64_nothrow)
TLI_DEFINE_STRING_INTERNAL("??_V@YAXPEAXAEBUnothrow_t@std@@@Z")
TLI_DEFINE_SIG_INTERNAL(Void, Ptr, Ptr)

/// void operator delete[](void*, unsigned long long);
TLI_DEFINE_ENUM_INTERNAL(msvc_delete_array_ptr64_longlong)
TLI_DEFINE_STRING_INTERNAL("??_V@YAXPEAX_K@Z")
TLI_DEFINE_SIG_INTERNAL(Void, Ptr, LLong)

/// int _IO_getc(_IO_FILE * __fp);
TLI_DEFINE_ENUM_INTERNAL(under_IO_getc)
TLI_DEFINE_STRING_INTERNAL("_IO_getc")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr)

/// int _IO_putc(int __c, _IO_FILE * __fp);
TLI_DEFINE_ENUM_INTERNAL(under_IO_putc)
TLI_DEFINE_STRING_INTERNAL("_IO_putc")
TLI_DEFINE_SIG_INTERNAL(Int, Int, Ptr)

/// void operator delete[](void*);
TLI_DEFINE_ENUM_INTERNAL(ZdaPv)
TLI_DEFINE_STRING_INTERNAL("_ZdaPv")
TLI_DEFINE_SIG_INTERNAL(Void, Ptr)

/// void operator delete[](void*, const std::nothrow_t&);
TLI_DEFINE_ENUM_INTERNAL(ZdaPvRKSt9nothrow_t)
TLI_DEFINE_STRING_INTERNAL("_ZdaPvRKSt9nothrow_t")
TLI_DEFINE_SIG_INTERNAL(Void, Ptr, Ptr)

/// void operator delete[](void*, std::align_val_t);
TLI_DEFINE_ENUM_INTERNAL(ZdaPvSt11align_val_t)
TLI_DEFINE_STRING_INTERNAL("_ZdaPvSt11align_val_t")
TLI_DEFINE_SIG_INTERNAL(Void, Ptr, IntPlus)

/// void operator delete[](void*, std::align_val_t, const std::nothrow_t&)
TLI_DEFINE_ENUM_INTERNAL(ZdaPvSt11align_val_tRKSt9nothrow_t)
TLI_DEFINE_STRING_INTERNAL("_ZdaPvSt11align_val_tRKSt9nothrow_t")
TLI_DEFINE_SIG_INTERNAL(Void, Ptr, IntPlus, Ptr)

/// void operator delete[](void*, unsigned int);
TLI_DEFINE_ENUM_INTERNAL(ZdaPvj)
TLI_DEFINE_STRING_INTERNAL("_ZdaPvj")
TLI_DEFINE_SIG_INTERNAL(Void, Ptr, Int)

/// void operator delete[](void*, unsigned int, std::align_val_t);
TLI_DEFINE_ENUM_INTERNAL(ZdaPvjSt11align_val_t)
TLI_DEFINE_STRING_INTERNAL("_ZdaPvjSt11align_val_t")
TLI_DEFINE_SIG_INTERNAL(Void, Ptr, Int, Int)

/// void operator delete[](void*, unsigned long);
TLI_DEFINE_ENUM_INTERNAL(ZdaPvm)
TLI_DEFINE_STRING_INTERNAL("_ZdaPvm")
TLI_DEFINE_SIG_INTERNAL(Void, Ptr, Long)

/// void operator delete[](void*, unsigned long, std::align_val_t);
TLI_DEFINE_ENUM_INTERNAL(ZdaPvmSt11align_val_t)
TLI_DEFINE_STRING_INTERNAL("_ZdaPvmSt11align_val_t")
TLI_DEFINE_SIG_INTERNAL(Void, Ptr, Long, Long)

/// void operator delete(void*);
TLI_DEFINE_ENUM_INTERNAL(ZdlPv)
TLI_DEFINE_STRING_INTERNAL("_ZdlPv")
TLI_DEFINE_SIG_INTERNAL(Void, Ptr)

/// void operator delete(void*, const std::nothrow_t&);
TLI_DEFINE_ENUM_INTERNAL(ZdlPvRKSt9nothrow_t)
TLI_DEFINE_STRING_INTERNAL("_ZdlPvRKSt9nothrow_t")
TLI_DEFINE_SIG_INTERNAL(Void, Ptr, Ptr)

/// void operator delete(void*, std::align_val_t)
TLI_DEFINE_ENUM_INTERNAL(ZdlPvSt11align_val_t)
TLI_DEFINE_STRING_INTERNAL("_ZdlPvSt11align_val_t")
TLI_DEFINE_SIG_INTERNAL(Void, Ptr, IntPlus)

/// void operator delete(void*, std::align_val_t, const std::nothrow_t&)
TLI_DEFINE_ENUM_INTERNAL(ZdlPvSt11align_val_tRKSt9nothrow_t)
TLI_DEFINE_STRING_INTERNAL("_ZdlPvSt11align_val_tRKSt9nothrow_t")
TLI_DEFINE_SIG_INTERNAL(Void, Ptr, IntPlus, Ptr)

/// void operator delete(void*, unsigned int);
TLI_DEFINE_ENUM_INTERNAL(ZdlPvj)
TLI_DEFINE_STRING_INTERNAL("_ZdlPvj")
TLI_DEFINE_SIG_INTERNAL(Void, Ptr, Int)

/// void operator delete(void*, unsigned int, std::align_val_t)
TLI_DEFINE_ENUM_INTERNAL(ZdlPvjSt11align_val_t)
TLI_DEFINE_STRING_INTERNAL("_ZdlPvjSt11align_val_t")
TLI_DEFINE_SIG_INTERNAL(Void, Ptr, Int, Int)

/// void operator delete(void*, unsigned long);
TLI_DEFINE_ENUM_INTERNAL(ZdlPvm)
TLI_DEFINE_STRING_INTERNAL("_ZdlPvm")
TLI_DEFINE_SIG_INTERNAL(Void, Ptr, Long)

/// void operator delete(void*, unsigned long, std::align_val_t)
TLI_DEFINE_ENUM_INTERNAL(ZdlPvmSt11align_val_t)
TLI_DEFINE_STRING_INTERNAL("_ZdlPvmSt11align_val_t")
TLI_DEFINE_SIG_INTERNAL(Void, Ptr, Long, Long)

/// void *operator new[](unsigned int);
TLI_DEFINE_ENUM_INTERNAL(Znaj)
TLI_DEFINE_STRING_INTERNAL("_Znaj")
TLI_DEFINE_SIG_INTERNAL(Ptr, Int)

/// void *operator new[](unsigned int, const std::nothrow_t&);
TLI_DEFINE_ENUM_INTERNAL(ZnajRKSt9nothrow_t)
TLI_DEFINE_STRING_INTERNAL("_ZnajRKSt9nothrow_t")
TLI_DEFINE_SIG_INTERNAL(Ptr, Int, Ptr)

/// void *operator new[](unsigned int, std::align_val_t)
TLI_DEFINE_ENUM_INTERNAL(ZnajSt11align_val_t)
TLI_DEFINE_STRING_INTERNAL("_ZnajSt11align_val_t")
TLI_DEFINE_SIG_INTERNAL(Ptr, Int, Int)

/// void *operator new[](unsigned int, std::align_val_t, const std::nothrow_t&)
TLI_DEFINE_ENUM_INTERNAL(ZnajSt11align_val_tRKSt9nothrow_t)
TLI_DEFINE_STRING_INTERNAL("_ZnajSt11align_val_tRKSt9nothrow_t")
TLI_DEFINE_SIG_INTERNAL(Ptr, Int, Int, Ptr)

/// void *operator new[](unsigned long);
TLI_DEFINE_ENUM_INTERNAL(Znam)
TLI_DEFINE_STRING_INTERNAL("_Znam")
TLI_DEFINE_SIG_INTERNAL(Ptr, Long)

/// void *operator new[](unsigned long, __hot_cold_t)
/// Currently this and other operator new interfaces that take a __hot_cold_t
/// hint are supported by the open source version of tcmalloc, see:
/// https://github.com/google/tcmalloc/blob/master/tcmalloc/new_extension.h
/// and for the definition of the __hot_cold_t parameter see:
/// https://github.com/google/tcmalloc/blob/master/tcmalloc/malloc_extension.h
TLI_DEFINE_ENUM_INTERNAL(Znam12__hot_cold_t)
TLI_DEFINE_STRING_INTERNAL("_Znam12__hot_cold_t")
TLI_DEFINE_SIG_INTERNAL(Ptr, Long, Bool)

/// void *operator new[](unsigned long, const std::nothrow_t&);
TLI_DEFINE_ENUM_INTERNAL(ZnamRKSt9nothrow_t)
TLI_DEFINE_STRING_INTERNAL("_ZnamRKSt9nothrow_t")
TLI_DEFINE_SIG_INTERNAL(Ptr, Long, Ptr)

/// void *operator new[](unsigned long, const std::nothrow_t&, __hot_cold_t)
TLI_DEFINE_ENUM_INTERNAL(ZnamRKSt9nothrow_t12__hot_cold_t)
TLI_DEFINE_STRING_INTERNAL("_ZnamRKSt9nothrow_t12__hot_cold_t")
TLI_DEFINE_SIG_INTERNAL(Ptr, Long, Ptr, Bool)

/// void *operator new[](unsigned long, std::align_val_t)
TLI_DEFINE_ENUM_INTERNAL(ZnamSt11align_val_t)
TLI_DEFINE_STRING_INTERNAL("_ZnamSt11align_val_t")
TLI_DEFINE_SIG_INTERNAL(Ptr, Long, Long)

/// void *operator new[](unsigned long, std::align_val_t, __hot_cold_t)
TLI_DEFINE_ENUM_INTERNAL(ZnamSt11align_val_t12__hot_cold_t)
TLI_DEFINE_STRING_INTERNAL("_ZnamSt11align_val_t12__hot_cold_t")
TLI_DEFINE_SIG_INTERNAL(Ptr, Long, Long, Bool)

/// void *operator new[](unsigned long, std::align_val_t, const std::nothrow_t&)
TLI_DEFINE_ENUM_INTERNAL(ZnamSt11align_val_tRKSt9nothrow_t)
TLI_DEFINE_STRING_INTERNAL("_ZnamSt11align_val_tRKSt9nothrow_t")
TLI_DEFINE_SIG_INTERNAL(Ptr, Long, Long, Ptr)

/// void *operator new[](unsigned long, std::align_val_t, const std::nothrow_t&, __hot_cold_t)
TLI_DEFINE_ENUM_INTERNAL(ZnamSt11align_val_tRKSt9nothrow_t12__hot_cold_t)
TLI_DEFINE_STRING_INTERNAL("_ZnamSt11align_val_tRKSt9nothrow_t12__hot_cold_t")
TLI_DEFINE_SIG_INTERNAL(Ptr, Long, Long, Ptr, Bool)

/// void *operator new(unsigned int);
TLI_DEFINE_ENUM_INTERNAL(Znwj)
TLI_DEFINE_STRING_INTERNAL("_Znwj")
TLI_DEFINE_SIG_INTERNAL(Ptr, Int)

/// void *operator new(unsigned int, const std::nothrow_t&);
TLI_DEFINE_ENUM_INTERNAL(ZnwjRKSt9nothrow_t)
TLI_DEFINE_STRING_INTERNAL("_ZnwjRKSt9nothrow_t")
TLI_DEFINE_SIG_INTERNAL(Ptr, Int, Ptr)

/// void *operator new(unsigned int, std::align_val_t)
TLI_DEFINE_ENUM_INTERNAL(ZnwjSt11align_val_t)
TLI_DEFINE_STRING_INTERNAL("_ZnwjSt11align_val_t")
TLI_DEFINE_SIG_INTERNAL(Ptr, Int, Int)

/// void *operator new(unsigned int, std::align_val_t, const std::nothrow_t&)
TLI_DEFINE_ENUM_INTERNAL(ZnwjSt11align_val_tRKSt9nothrow_t)
TLI_DEFINE_STRING_INTERNAL("_ZnwjSt11align_val_tRKSt9nothrow_t")
TLI_DEFINE_SIG_INTERNAL(Ptr, Int, Int, Ptr)

/// void *operator new(unsigned long);
TLI_DEFINE_ENUM_INTERNAL(Znwm)
TLI_DEFINE_STRING_INTERNAL("_Znwm")
TLI_DEFINE_SIG_INTERNAL(Ptr, Long)

/// void *operator new(unsigned long, __hot_cold_t)
TLI_DEFINE_ENUM_INTERNAL(Znwm12__hot_cold_t)
TLI_DEFINE_STRING_INTERNAL("_Znwm12__hot_cold_t")
TLI_DEFINE_SIG_INTERNAL(Ptr, Long, Bool)

/// void *operator new(unsigned long, const std::nothrow_t&);
TLI_DEFINE_ENUM_INTERNAL(ZnwmRKSt9nothrow_t)
TLI_DEFINE_STRING_INTERNAL("_ZnwmRKSt9nothrow_t")
TLI_DEFINE_SIG_INTERNAL(Ptr, Long, Ptr)

/// void *operator new(unsigned long, const std::nothrow_t&, __hot_cold_t)
TLI_DEFINE_ENUM_INTERNAL(ZnwmRKSt9nothrow_t12__hot_cold_t)
TLI_DEFINE_STRING_INTERNAL("_ZnwmRKSt9nothrow_t12__hot_cold_t")
TLI_DEFINE_SIG_INTERNAL(Ptr, Long, Ptr, Bool)

/// void *operator new(unsigned long, std::align_val_t)
TLI_DEFINE_ENUM_INTERNAL(ZnwmSt11align_val_t)
TLI_DEFINE_STRING_INTERNAL("_ZnwmSt11align_val_t")
TLI_DEFINE_SIG_INTERNAL(Ptr, Long, Long)

/// void *operator new(unsigned long, std::align_val_t, __hot_cold_t)
TLI_DEFINE_ENUM_INTERNAL(ZnwmSt11align_val_t12__hot_cold_t)
TLI_DEFINE_STRING_INTERNAL("_ZnwmSt11align_val_t12__hot_cold_t")
TLI_DEFINE_SIG_INTERNAL(Ptr, Long, Long, Bool)

/// void *operator new(unsigned long, std::align_val_t, const std::nothrow_t&)
TLI_DEFINE_ENUM_INTERNAL(ZnwmSt11align_val_tRKSt9nothrow_t)
TLI_DEFINE_STRING_INTERNAL("_ZnwmSt11align_val_tRKSt9nothrow_t")
TLI_DEFINE_SIG_INTERNAL(Ptr, Long, Long, Ptr)

/// void *operator new(unsigned long, std::align_val_t, const std::nothrow_t&, __hot_cold_t)
TLI_DEFINE_ENUM_INTERNAL(ZnwmSt11align_val_tRKSt9nothrow_t12__hot_cold_t)
TLI_DEFINE_STRING_INTERNAL("_ZnwmSt11align_val_tRKSt9nothrow_t12__hot_cold_t")
TLI_DEFINE_SIG_INTERNAL(Ptr, Long, Long, Ptr, Bool)

/// double __acos_finite(double x);
TLI_DEFINE_ENUM_INTERNAL(acos_finite)
TLI_DEFINE_STRING_INTERNAL("__acos_finite")
TLI_DEFINE_SIG_INTERNAL(Dbl, Dbl)

/// float __acosf_finite(float x);
TLI_DEFINE_ENUM_INTERNAL(acosf_finite)
TLI_DEFINE_STRING_INTERNAL("__acosf_finite")
TLI_DEFINE_SIG_INTERNAL(Flt, Flt)

/// double __acosh_finite(double x);
TLI_DEFINE_ENUM_INTERNAL(acosh_finite)
TLI_DEFINE_STRING_INTERNAL("__acosh_finite")
TLI_DEFINE_SIG_INTERNAL(Dbl, Dbl)

/// float __acoshf_finite(float x);
TLI_DEFINE_ENUM_INTERNAL(acoshf_finite)
TLI_DEFINE_STRING_INTERNAL("__acoshf_finite")
TLI_DEFINE_SIG_INTERNAL(Flt, Flt)

/// long double __acoshl_finite(long double x);
TLI_DEFINE_ENUM_INTERNAL(acoshl_finite)
TLI_DEFINE_STRING_INTERNAL("__acoshl_finite")
TLI_DEFINE_SIG_INTERNAL(LDbl, LDbl)

/// long double __acosl_finite(long double x);
TLI_DEFINE_ENUM_INTERNAL(acosl_finite)
TLI_DEFINE_STRING_INTERNAL("__acosl_finite")
TLI_DEFINE_SIG_INTERNAL(LDbl, LDbl)

/// double __asin_finite(double x);
TLI_DEFINE_ENUM_INTERNAL(asin_finite)
TLI_DEFINE_STRING_INTERNAL("__asin_finite")
TLI_DEFINE_SIG_INTERNAL(Dbl, Dbl)

/// float __asinf_finite(float x);
TLI_DEFINE_ENUM_INTERNAL(asinf_finite)
TLI_DEFINE_STRING_INTERNAL("__asinf_finite")
TLI_DEFINE_SIG_INTERNAL(Flt, Flt)

/// long double __asinl_finite(long double x);
TLI_DEFINE_ENUM_INTERNAL(asinl_finite)
TLI_DEFINE_STRING_INTERNAL("__asinl_finite")
TLI_DEFINE_SIG_INTERNAL(LDbl, LDbl)

/// double atan2_finite(double y, double x);
TLI_DEFINE_ENUM_INTERNAL(atan2_finite)
TLI_DEFINE_STRING_INTERNAL("__atan2_finite")
TLI_DEFINE_SIG_INTERNAL(Dbl, Dbl, Dbl)

/// float atan2f_finite(float y, float x);
TLI_DEFINE_ENUM_INTERNAL(atan2f_finite)
TLI_DEFINE_STRING_INTERNAL("__atan2f_finite")
TLI_DEFINE_SIG_INTERNAL(Flt, Flt, Flt)

/// long double atan2l_finite(long double y, long double x);
TLI_DEFINE_ENUM_INTERNAL(atan2l_finite)
TLI_DEFINE_STRING_INTERNAL("__atan2l_finite")
TLI_DEFINE_SIG_INTERNAL(LDbl, LDbl, LDbl)

/// double __atanh_finite(double x);
TLI_DEFINE_ENUM_INTERNAL(atanh_finite)
TLI_DEFINE_STRING_INTERNAL("__atanh_finite")
TLI_DEFINE_SIG_INTERNAL(Dbl, Dbl)

/// float __atanhf_finite(float x);
TLI_DEFINE_ENUM_INTERNAL(atanhf_finite)
TLI_DEFINE_STRING_INTERNAL("__atanhf_finite")
TLI_DEFINE_SIG_INTERNAL(Flt, Flt)

/// long double __atanhl_finite(long double x);
TLI_DEFINE_ENUM_INTERNAL(atanhl_finite)
TLI_DEFINE_STRING_INTERNAL("__atanhl_finite")
TLI_DEFINE_SIG_INTERNAL(LDbl, LDbl)

/// void __atomic_load(size_t size, void *mptr, void *vptr, int smodel);
TLI_DEFINE_ENUM_INTERNAL(atomic_load)
TLI_DEFINE_STRING_INTERNAL("__atomic_load")
TLI_DEFINE_SIG_INTERNAL(Void, SizeT, Ptr, Ptr, Int)

/// void __atomic_store(size_t size, void *mptr, void *vptr, int smodel);
TLI_DEFINE_ENUM_INTERNAL(atomic_store)
TLI_DEFINE_STRING_INTERNAL("__atomic_store")
TLI_DEFINE_SIG_INTERNAL(Void, SizeT, Ptr, Ptr, Int)

/// double __cosh_finite(double x);
TLI_DEFINE_ENUM_INTERNAL(cosh_finite)
TLI_DEFINE_STRING_INTERNAL("__cosh_finite")
TLI_DEFINE_SIG_INTERNAL(Dbl, Dbl)

/// float __coshf_finite(float x);
TLI_DEFINE_ENUM_INTERNAL(coshf_finite)
TLI_DEFINE_STRING_INTERNAL("__coshf_finite")
TLI_DEFINE_SIG_INTERNAL(Flt, Flt)

/// long double __coshl_finite(long double x);
TLI_DEFINE_ENUM_INTERNAL(coshl_finite)
TLI_DEFINE_STRING_INTERNAL("__coshl_finite")
TLI_DEFINE_SIG_INTERNAL(LDbl, LDbl)

/// double __cospi(double x);
TLI_DEFINE_ENUM_INTERNAL(cospi)
TLI_DEFINE_STRING_INTERNAL("__cospi")
TLI_DEFINE_SIG_INTERNAL(Dbl, Dbl)

/// float __cospif(float x);
TLI_DEFINE_ENUM_INTERNAL(cospif)
TLI_DEFINE_STRING_INTERNAL("__cospif")
TLI_DEFINE_SIG_INTERNAL(Flt, Flt)

/// int __cxa_atexit(void (*f)(void *), void *p, void *d);
TLI_DEFINE_ENUM_INTERNAL(cxa_atexit)
TLI_DEFINE_STRING_INTERNAL("__cxa_atexit")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, Ptr, Ptr)

/// void __cxa_guard_abort(guard_t *guard);
/// guard_t is int64_t in Itanium ABI or int32_t on ARM eabi.
TLI_DEFINE_ENUM_INTERNAL(cxa_guard_abort)
TLI_DEFINE_STRING_INTERNAL("__cxa_guard_abort")
TLI_DEFINE_SIG_INTERNAL(Void, Ptr)

/// int __cxa_guard_acquire(guard_t *guard);
TLI_DEFINE_ENUM_INTERNAL(cxa_guard_acquire)
TLI_DEFINE_STRING_INTERNAL("__cxa_guard_acquire")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr)

/// void __cxa_guard_release(guard_t *guard);
TLI_DEFINE_ENUM_INTERNAL(cxa_guard_release)
TLI_DEFINE_STRING_INTERNAL("__cxa_guard_release")
TLI_DEFINE_SIG_INTERNAL(Void, Ptr)

/// double __exp10_finite(double x);
TLI_DEFINE_ENUM_INTERNAL(exp10_finite)
TLI_DEFINE_STRING_INTERNAL("__exp10_finite")
TLI_DEFINE_SIG_INTERNAL(Dbl, Dbl)

/// float __exp10f_finite(float x);
TLI_DEFINE_ENUM_INTERNAL(exp10f_finite)
TLI_DEFINE_STRING_INTERNAL("__exp10f_finite")
TLI_DEFINE_SIG_INTERNAL(Flt, Flt)

/// long double __exp10l_finite(long double x);
TLI_DEFINE_ENUM_INTERNAL(exp10l_finite)
TLI_DEFINE_STRING_INTERNAL("__exp10l_finite")
TLI_DEFINE_SIG_INTERNAL(LDbl, LDbl)

/// double __exp2_finite(double x);
TLI_DEFINE_ENUM_INTERNAL(exp2_finite)
TLI_DEFINE_STRING_INTERNAL("__exp2_finite")
TLI_DEFINE_SIG_INTERNAL(Dbl, Dbl)

/// float __exp2f_finite(float x);
TLI_DEFINE_ENUM_INTERNAL(exp2f_finite)
TLI_DEFINE_STRING_INTERNAL("__exp2f_finite")
TLI_DEFINE_SIG_INTERNAL(Flt, Flt)

/// long double __exp2l_finite(long double x);
TLI_DEFINE_ENUM_INTERNAL(exp2l_finite)
TLI_DEFINE_STRING_INTERNAL("__exp2l_finite")
TLI_DEFINE_SIG_INTERNAL(LDbl, LDbl)

/// double __exp_finite(double x);
TLI_DEFINE_ENUM_INTERNAL(exp_finite)
TLI_DEFINE_STRING_INTERNAL("__exp_finite")
TLI_DEFINE_SIG_INTERNAL(Dbl, Dbl)

/// float __expf_finite(float x);
TLI_DEFINE_ENUM_INTERNAL(expf_finite)
TLI_DEFINE_STRING_INTERNAL("__expf_finite")
TLI_DEFINE_SIG_INTERNAL(Flt, Flt)

/// long double __expl_finite(long double x);
TLI_DEFINE_ENUM_INTERNAL(expl_finite)
TLI_DEFINE_STRING_INTERNAL("__expl_finite")
TLI_DEFINE_SIG_INTERNAL(LDbl, LDbl)

/// int __isoc99_scanf (const char *format, ...)
TLI_DEFINE_ENUM_INTERNAL(dunder_isoc99_scanf)
TLI_DEFINE_STRING_INTERNAL("__isoc99_scanf")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, Ellip)

/// int __isoc99_sscanf(const char *s, const char *format, ...)
TLI_DEFINE_ENUM_INTERNAL(dunder_isoc99_sscanf)
TLI_DEFINE_STRING_INTERNAL("__isoc99_sscanf")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, Ptr, Ellip)

/// void* __kmpc_alloc_shared(size_t nbyte);
TLI_DEFINE_ENUM_INTERNAL(__kmpc_alloc_shared)
TLI_DEFINE_STRING_INTERNAL("__kmpc_alloc_shared")
TLI_DEFINE_SIG_INTERNAL(Ptr, SizeT)

/// void __kmpc_free_shared(void *ptr, size_t nbyte);
TLI_DEFINE_ENUM_INTERNAL(__kmpc_free_shared)
TLI_DEFINE_STRING_INTERNAL("__kmpc_free_shared")
TLI_DEFINE_SIG_INTERNAL(Void, Ptr, SizeT)

/// double __log10_finite(double x);
TLI_DEFINE_ENUM_INTERNAL(log10_finite)
TLI_DEFINE_STRING_INTERNAL("__log10_finite")
TLI_DEFINE_SIG_INTERNAL(Dbl, Dbl)

/// float __log10f_finite(float x);
TLI_DEFINE_ENUM_INTERNAL(log10f_finite)
TLI_DEFINE_STRING_INTERNAL("__log10f_finite")
TLI_DEFINE_SIG_INTERNAL(Flt, Flt)

/// long double __log10l_finite(long double x);
TLI_DEFINE_ENUM_INTERNAL(log10l_finite)
TLI_DEFINE_STRING_INTERNAL("__log10l_finite")
TLI_DEFINE_SIG_INTERNAL(LDbl, LDbl)

/// double __log2_finite(double x);
TLI_DEFINE_ENUM_INTERNAL(log2_finite)
TLI_DEFINE_STRING_INTERNAL("__log2_finite")
TLI_DEFINE_SIG_INTERNAL(Dbl, Dbl)

/// float __log2f_finite(float x);
TLI_DEFINE_ENUM_INTERNAL(log2f_finite)
TLI_DEFINE_STRING_INTERNAL("__log2f_finite")
TLI_DEFINE_SIG_INTERNAL(Flt, Flt)

/// long double __log2l_finite(long double x);
TLI_DEFINE_ENUM_INTERNAL(log2l_finite)
TLI_DEFINE_STRING_INTERNAL("__log2l_finite")
TLI_DEFINE_SIG_INTERNAL(LDbl, LDbl)

/// double __log_finite(double x);
TLI_DEFINE_ENUM_INTERNAL(log_finite)
TLI_DEFINE_STRING_INTERNAL("__log_finite")
TLI_DEFINE_SIG_INTERNAL(Dbl, Dbl)

/// float __logf_finite(float x);
TLI_DEFINE_ENUM_INTERNAL(logf_finite)
TLI_DEFINE_STRING_INTERNAL("__logf_finite")
TLI_DEFINE_SIG_INTERNAL(Flt, Flt)

/// long double __logl_finite(long double x);
TLI_DEFINE_ENUM_INTERNAL(logl_finite)
TLI_DEFINE_STRING_INTERNAL("__logl_finite")
TLI_DEFINE_SIG_INTERNAL(LDbl, LDbl)

/// void *__memccpy_chk(void *dst, const void *src, int c, size_t n,
///                     size_t dstsize)
TLI_DEFINE_ENUM_INTERNAL(memccpy_chk)
TLI_DEFINE_STRING_INTERNAL("__memccpy_chk")
TLI_DEFINE_SIG_INTERNAL(Ptr, Ptr, Ptr, Int, SizeT, SizeT)

/// void *__memcpy_chk(void *s1, const void *s2, size_t n, size_t s1size);
TLI_DEFINE_ENUM_INTERNAL(memcpy_chk)
TLI_DEFINE_STRING_INTERNAL("__memcpy_chk")
TLI_DEFINE_SIG_INTERNAL(Ptr, Ptr, Ptr, SizeT, SizeT)

/// void *__memmove_chk(void *s1, const void *s2, size_t n, size_t s1size);
TLI_DEFINE_ENUM_INTERNAL(memmove_chk)
TLI_DEFINE_STRING_INTERNAL("__memmove_chk")
TLI_DEFINE_SIG_INTERNAL(Ptr, Ptr, Ptr, SizeT, SizeT)

/// void *__mempcpy_chk(void *s1, const void *s2, size_t n, size_t s1size);
TLI_DEFINE_ENUM_INTERNAL(mempcpy_chk)
TLI_DEFINE_STRING_INTERNAL("__mempcpy_chk")
TLI_DEFINE_SIG_INTERNAL(Ptr, Ptr, Ptr, SizeT, SizeT)

/// void *__memset_chk(void *s, int v, size_t n, size_t s1size);
TLI_DEFINE_ENUM_INTERNAL(memset_chk)
TLI_DEFINE_STRING_INTERNAL("__memset_chk")
TLI_DEFINE_SIG_INTERNAL(Ptr, Ptr, Int, SizeT, SizeT)

// int __nvvm_reflect(const char *)
TLI_DEFINE_ENUM_INTERNAL(nvvm_reflect)
TLI_DEFINE_STRING_INTERNAL("__nvvm_reflect")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr)

/// double __pow_finite(double x, double y);
TLI_DEFINE_ENUM_INTERNAL(pow_finite)
TLI_DEFINE_STRING_INTERNAL("__pow_finite")
TLI_DEFINE_SIG_INTERNAL(Dbl, Dbl, Dbl)

/// float _powf_finite(float x, float y);
TLI_DEFINE_ENUM_INTERNAL(powf_finite)
TLI_DEFINE_STRING_INTERNAL("__powf_finite")
TLI_DEFINE_SIG_INTERNAL(Flt, Flt, Flt)

/// long double __powl_finite(long double x, long double y);
TLI_DEFINE_ENUM_INTERNAL(powl_finite)
TLI_DEFINE_STRING_INTERNAL("__powl_finite")
TLI_DEFINE_SIG_INTERNAL(LDbl, LDbl, LDbl)

/// double __sincospi_stret(double x);
TLI_DEFINE_ENUM_INTERNAL(sincospi_stret)
TLI_DEFINE_STRING_INTERNAL("__sincospi_stret")
TLI_DEFINE_SIG_INTERNAL(/* Checked manually. */)

/// float __sincospif_stret(float x);
TLI_DEFINE_ENUM_INTERNAL(sincospif_stret)
TLI_DEFINE_STRING_INTERNAL("__sincospif_stret")
TLI_DEFINE_SIG_INTERNAL(/* Checked manually. */)

/// double __sinh_finite(double x);
TLI_DEFINE_ENUM_INTERNAL(sinh_finite)
TLI_DEFINE_STRING_INTERNAL("__sinh_finite")
TLI_DEFINE_SIG_INTERNAL(Dbl, Dbl)

/// float _sinhf_finite(float x);
TLI_DEFINE_ENUM_INTERNAL(sinhf_finite)
TLI_DEFINE_STRING_INTERNAL("__sinhf_finite")
TLI_DEFINE_SIG_INTERNAL(Flt, Flt)

/// long double __sinhl_finite(long double x);
TLI_DEFINE_ENUM_INTERNAL(sinhl_finite)
TLI_DEFINE_STRING_INTERNAL("__sinhl_finite")
TLI_DEFINE_SIG_INTERNAL(LDbl, LDbl)

/// double __sinpi(double x);
TLI_DEFINE_ENUM_INTERNAL(sinpi)
TLI_DEFINE_STRING_INTERNAL("__sinpi")
TLI_DEFINE_SIG_INTERNAL(Dbl, Dbl)

/// float __sinpif(float x);
TLI_DEFINE_ENUM_INTERNAL(sinpif)
TLI_DEFINE_STRING_INTERNAL("__sinpif")
TLI_DEFINE_SIG_INTERNAL(Flt, Flt)

/// int __small_fprintf(FILE *stream, const char *format, ...);
TLI_DEFINE_ENUM_INTERNAL(small_fprintf)
TLI_DEFINE_STRING_INTERNAL("__small_fprintf")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, Ptr, Ellip)

/// int __small_printf(const char *format, ...);
TLI_DEFINE_ENUM_INTERNAL(small_printf)
TLI_DEFINE_STRING_INTERNAL("__small_printf")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, Ellip)

/// int __small_sprintf(char *str, const char *format, ...);
TLI_DEFINE_ENUM_INTERNAL(small_sprintf)
TLI_DEFINE_STRING_INTERNAL("__small_sprintf")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, Ptr, Ellip)

/// int __snprintf_chk(char *s, size_t n, int flags, size_t slen,
///                    const char *format, ...);
TLI_DEFINE_ENUM_INTERNAL(snprintf_chk)
TLI_DEFINE_STRING_INTERNAL("__snprintf_chk")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, SizeT, Int, SizeT, Ptr, Ellip)

/// int __sprintf_chk(char *str, int flags, size_t str_len,
///                   const char *format, ...);
TLI_DEFINE_ENUM_INTERNAL(sprintf_chk)
TLI_DEFINE_STRING_INTERNAL("__sprintf_chk")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, Int, SizeT, Ptr, Ellip)

/// double __sqrt_finite(double x);
TLI_DEFINE_ENUM_INTERNAL(sqrt_finite)
TLI_DEFINE_STRING_INTERNAL("__sqrt_finite")
TLI_DEFINE_SIG_INTERNAL(Dbl, Dbl)

/// float __sqrt_finite(float x);
TLI_DEFINE_ENUM_INTERNAL(sqrtf_finite)
TLI_DEFINE_STRING_INTERNAL("__sqrtf_finite")
TLI_DEFINE_SIG_INTERNAL(Flt, Flt)

/// long double __sqrt_finite(long double x);
TLI_DEFINE_ENUM_INTERNAL(sqrtl_finite)
TLI_DEFINE_STRING_INTERNAL("__sqrtl_finite")
TLI_DEFINE_SIG_INTERNAL(LDbl, LDbl)

/// char *__stpcpy_chk(char *s1, const char *s2, size_t s1size);
TLI_DEFINE_ENUM_INTERNAL(stpcpy_chk)
TLI_DEFINE_STRING_INTERNAL("__stpcpy_chk")
TLI_DEFINE_SIG_INTERNAL(Ptr, Ptr, Ptr, SizeT)

/// char *__stpncpy_chk(char *s1, const char *s2, size_t n, size_t s1size);
TLI_DEFINE_ENUM_INTERNAL(stpncpy_chk)
TLI_DEFINE_STRING_INTERNAL("__stpncpy_chk")
TLI_DEFINE_SIG_INTERNAL(Ptr, Ptr, Ptr, SizeT, SizeT)

/// char *__strcat_chk(char *s1, const char *s2, size_t s1size);
TLI_DEFINE_ENUM_INTERNAL(strcat_chk)
TLI_DEFINE_STRING_INTERNAL("__strcat_chk")
TLI_DEFINE_SIG_INTERNAL(Ptr, Ptr, Ptr, SizeT)

/// char *__strcpy_chk(char *s1, const char *s2, size_t s1size);
TLI_DEFINE_ENUM_INTERNAL(strcpy_chk)
TLI_DEFINE_STRING_INTERNAL("__strcpy_chk")
TLI_DEFINE_SIG_INTERNAL(Ptr, Ptr, Ptr, SizeT)

/// char * __strdup(const char *s);
TLI_DEFINE_ENUM_INTERNAL(dunder_strdup)
TLI_DEFINE_STRING_INTERNAL("__strdup")
TLI_DEFINE_SIG_INTERNAL(Ptr, Ptr)

/// size_t __strlcat_chk(char *dst, const char *src, size_t size,
///                      size_t dstsize);
TLI_DEFINE_ENUM_INTERNAL(strlcat_chk)
TLI_DEFINE_STRING_INTERNAL("__strlcat_chk")
TLI_DEFINE_SIG_INTERNAL(SizeT, Ptr, Ptr, SizeT, SizeT)

/// size_t __strlcpy_chk(char *dst, const char *src, size_t size,
///                      size_t dstsize);
TLI_DEFINE_ENUM_INTERNAL(strlcpy_chk)
TLI_DEFINE_STRING_INTERNAL("__strlcpy_chk")
TLI_DEFINE_SIG_INTERNAL(SizeT, Ptr, Ptr, SizeT, SizeT)

/// size_t __strlen_chk(const char *s1, size_t s1size);
TLI_DEFINE_ENUM_INTERNAL(strlen_chk)
TLI_DEFINE_STRING_INTERNAL("__strlen_chk")
TLI_DEFINE_SIG_INTERNAL(SizeT, Ptr, SizeT)

/// char *strncat_chk(char *s1, const char *s2, size_t n, size_t s1size);
TLI_DEFINE_ENUM_INTERNAL(strncat_chk)
TLI_DEFINE_STRING_INTERNAL("__strncat_chk")
TLI_DEFINE_SIG_INTERNAL(Ptr, Ptr, Ptr, SizeT, SizeT)

/// char *__strncpy_chk(char *s1, const char *s2, size_t n, size_t s1size);
TLI_DEFINE_ENUM_INTERNAL(strncpy_chk)
TLI_DEFINE_STRING_INTERNAL("__strncpy_chk")
TLI_DEFINE_SIG_INTERNAL(Ptr, Ptr, Ptr, SizeT, SizeT)

/// char *__strndup(const char *s, size_t n);
TLI_DEFINE_ENUM_INTERNAL(dunder_strndup)
TLI_DEFINE_STRING_INTERNAL("__strndup")
TLI_DEFINE_SIG_INTERNAL(Ptr, Ptr, SizeT)

/// char * __strtok_r(char *s, const char *delim, char **save_ptr);
TLI_DEFINE_ENUM_INTERNAL(dunder_strtok_r)
TLI_DEFINE_STRING_INTERNAL("__strtok_r")
TLI_DEFINE_SIG_INTERNAL(Ptr, Ptr, Ptr, Ptr)

/// int __vsnprintf_chk(char *s, size_t n, int flags, size_t slen,
///                     const char *format, va_list ap);
TLI_DEFINE_ENUM_INTERNAL(vsnprintf_chk)
TLI_DEFINE_STRING_INTERNAL("__vsnprintf_chk")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, SizeT, Int, SizeT, Ptr, Ptr)

/// int __vsprintf_chk(char *s, int flags, size_t slen, const char *format,
///                    va_list ap);
TLI_DEFINE_ENUM_INTERNAL(vsprintf_chk)
TLI_DEFINE_STRING_INTERNAL("__vsprintf_chk")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, Int, SizeT, Ptr, Ptr)

/// int abs(int j);
TLI_DEFINE_ENUM_INTERNAL(abs)
TLI_DEFINE_STRING_INTERNAL("abs")
TLI_DEFINE_SIG_INTERNAL(Int, Int)

/// int access(const char *path, int amode);
TLI_DEFINE_ENUM_INTERNAL(access)
TLI_DEFINE_STRING_INTERNAL("access")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, Int)

/// double acos(double x);
TLI_DEFINE_ENUM_INTERNAL(acos)
TLI_DEFINE_STRING_INTERNAL("acos")
TLI_DEFINE_SIG_INTERNAL(Dbl, Dbl)

/// float acosf(float x);
TLI_DEFINE_ENUM_INTERNAL(acosf)
TLI_DEFINE_STRING_INTERNAL("acosf")
TLI_DEFINE_SIG_INTERNAL(Flt, Flt)

/// double acosh(double x);
TLI_DEFINE_ENUM_INTERNAL(acosh)
TLI_DEFINE_STRING_INTERNAL("acosh")
TLI_DEFINE_SIG_INTERNAL(Dbl, Dbl)

/// float acoshf(float x);
TLI_DEFINE_ENUM_INTERNAL(acoshf)
TLI_DEFINE_STRING_INTERNAL("acoshf")
TLI_DEFINE_SIG_INTERNAL(Flt, Flt)

/// long double acoshl(long double x);
TLI_DEFINE_ENUM_INTERNAL(acoshl)
TLI_DEFINE_STRING_INTERNAL("acoshl")
TLI_DEFINE_SIG_INTERNAL(LDbl, LDbl)

/// long double acosl(long double x);
TLI_DEFINE_ENUM_INTERNAL(acosl)
TLI_DEFINE_STRING_INTERNAL("acosl")
TLI_DEFINE_SIG_INTERNAL(LDbl, LDbl)

/// void *aligned_alloc(size_t alignment, size_t size);
TLI_DEFINE_ENUM_INTERNAL(aligned_alloc)
TLI_DEFINE_STRING_INTERNAL("aligned_alloc")
TLI_DEFINE_SIG_INTERNAL(Ptr, SizeT, SizeT)

/// double asin(double x);
TLI_DEFINE_ENUM_INTERNAL(asin)
TLI_DEFINE_STRING_INTERNAL("asin")
TLI_DEFINE_SIG_INTERNAL(Dbl, Dbl)

/// float asinf(float x);
TLI_DEFINE_ENUM_INTERNAL(asinf)
TLI_DEFINE_STRING_INTERNAL("asinf")
TLI_DEFINE_SIG_INTERNAL(Flt, Flt)

/// double asinh(double x);
TLI_DEFINE_ENUM_INTERNAL(asinh)
TLI_DEFINE_STRING_INTERNAL("asinh")
TLI_DEFINE_SIG_INTERNAL(Dbl, Dbl)

/// float asinhf(float x);
TLI_DEFINE_ENUM_INTERNAL(asinhf)
TLI_DEFINE_STRING_INTERNAL("asinhf")
TLI_DEFINE_SIG_INTERNAL(Flt, Flt)

/// long double asinhl(long double x);
TLI_DEFINE_ENUM_INTERNAL(asinhl)
TLI_DEFINE_STRING_INTERNAL("asinhl")
TLI_DEFINE_SIG_INTERNAL(LDbl, LDbl)

/// long double asinl(long double x);
TLI_DEFINE_ENUM_INTERNAL(asinl)
TLI_DEFINE_STRING_INTERNAL("asinl")
TLI_DEFINE_SIG_INTERNAL(LDbl, LDbl)

/// double atan(double x);
TLI_DEFINE_ENUM_INTERNAL(atan)
TLI_DEFINE_STRING_INTERNAL("atan")
TLI_DEFINE_SIG_INTERNAL(Dbl, Dbl)

/// double atan2(double y, double x);
TLI_DEFINE_ENUM_INTERNAL(atan2)
TLI_DEFINE_STRING_INTERNAL("atan2")
TLI_DEFINE_SIG_INTERNAL(Dbl, Dbl, Dbl)

/// float atan2f(float y, float x);
TLI_DEFINE_ENUM_INTERNAL(atan2f)
TLI_DEFINE_STRING_INTERNAL("atan2f")
TLI_DEFINE_SIG_INTERNAL(Flt, Flt, Flt)

/// long double atan2l(long double y, long double x);
TLI_DEFINE_ENUM_INTERNAL(atan2l)
TLI_DEFINE_STRING_INTERNAL("atan2l")
TLI_DEFINE_SIG_INTERNAL(LDbl, LDbl, LDbl)

/// float atanf(float x);
TLI_DEFINE_ENUM_INTERNAL(atanf)
TLI_DEFINE_STRING_INTERNAL("atanf")
TLI_DEFINE_SIG_INTERNAL(Flt, Flt)

/// double atanh(double x);
TLI_DEFINE_ENUM_INTERNAL(atanh)
TLI_DEFINE_STRING_INTERNAL("atanh")
TLI_DEFINE_SIG_INTERNAL(Dbl, Dbl)

/// float atanhf(float x);
TLI_DEFINE_ENUM_INTERNAL(atanhf)
TLI_DEFINE_STRING_INTERNAL("atanhf")
TLI_DEFINE_SIG_INTERNAL(Flt, Flt)

/// long double atanhl(long double x);
TLI_DEFINE_ENUM_INTERNAL(atanhl)
TLI_DEFINE_STRING_INTERNAL("atanhl")
TLI_DEFINE_SIG_INTERNAL(LDbl, LDbl)

/// long double atanl(long double x);
TLI_DEFINE_ENUM_INTERNAL(atanl)
TLI_DEFINE_STRING_INTERNAL("atanl")
TLI_DEFINE_SIG_INTERNAL(LDbl, LDbl)

/// double atof(const char *str);
TLI_DEFINE_ENUM_INTERNAL(atof)
TLI_DEFINE_STRING_INTERNAL("atof")
TLI_DEFINE_SIG_INTERNAL(Dbl, Ptr)

/// int atoi(const char *str);
TLI_DEFINE_ENUM_INTERNAL(atoi)
TLI_DEFINE_STRING_INTERNAL("atoi")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr)

/// long atol(const char *str);
TLI_DEFINE_ENUM_INTERNAL(atol)
TLI_DEFINE_STRING_INTERNAL("atol")
TLI_DEFINE_SIG_INTERNAL(Long, Ptr)

/// long long atoll(const char *nptr);
TLI_DEFINE_ENUM_INTERNAL(atoll)
TLI_DEFINE_STRING_INTERNAL("atoll")
TLI_DEFINE_SIG_INTERNAL(LLong, Ptr)

/// int bcmp(const void *s1, const void *s2, size_t n);
TLI_DEFINE_ENUM_INTERNAL(bcmp)
TLI_DEFINE_STRING_INTERNAL("bcmp")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, Ptr, SizeT)

/// void bcopy(const void *s1, void *s2, size_t n);
TLI_DEFINE_ENUM_INTERNAL(bcopy)
TLI_DEFINE_STRING_INTERNAL("bcopy")
TLI_DEFINE_SIG_INTERNAL(Void, Ptr, Ptr, SizeT)

/// void bzero(void *s, size_t n);
TLI_DEFINE_ENUM_INTERNAL(bzero)
TLI_DEFINE_STRING_INTERNAL("bzero")
TLI_DEFINE_SIG_INTERNAL(Void, Ptr, SizeT)

/// double cabs(double complex z)
TLI_DEFINE_ENUM_INTERNAL(cabs)
TLI_DEFINE_STRING_INTERNAL("cabs")
TLI_DEFINE_SIG_INTERNAL(/* Checked manually. */)

/// float cabs(float complex z)
TLI_DEFINE_ENUM_INTERNAL(cabsf)
TLI_DEFINE_STRING_INTERNAL("cabsf")
TLI_DEFINE_SIG_INTERNAL(/* Checked manually. */)

/// long double cabs(long double complex z)
TLI_DEFINE_ENUM_INTERNAL(cabsl)
TLI_DEFINE_STRING_INTERNAL("cabsl")
TLI_DEFINE_SIG_INTERNAL(/* Checked manually. */)

/// void *calloc(size_t count, size_t size);
TLI_DEFINE_ENUM_INTERNAL(calloc)
TLI_DEFINE_STRING_INTERNAL("calloc")
TLI_DEFINE_SIG_INTERNAL(Ptr, SizeT, SizeT)

/// double cbrt(double x);
TLI_DEFINE_ENUM_INTERNAL(cbrt)
TLI_DEFINE_STRING_INTERNAL("cbrt")
TLI_DEFINE_SIG_INTERNAL(Dbl, Dbl)

/// float cbrtf(float x);
TLI_DEFINE_ENUM_INTERNAL(cbrtf)
TLI_DEFINE_STRING_INTERNAL("cbrtf")
TLI_DEFINE_SIG_INTERNAL(Flt, Flt)

/// long double cbrtl(long double x);
TLI_DEFINE_ENUM_INTERNAL(cbrtl)
TLI_DEFINE_STRING_INTERNAL("cbrtl")
TLI_DEFINE_SIG_INTERNAL(LDbl, LDbl)

/// double ceil(double x);
TLI_DEFINE_ENUM_INTERNAL(ceil)
TLI_DEFINE_STRING_INTERNAL("ceil")
TLI_DEFINE_SIG_INTERNAL(Dbl, Dbl)

/// float ceilf(float x);
TLI_DEFINE_ENUM_INTERNAL(ceilf)
TLI_DEFINE_STRING_INTERNAL("ceilf")
TLI_DEFINE_SIG_INTERNAL(Flt, Flt)

/// long double ceill(long double x);
TLI_DEFINE_ENUM_INTERNAL(ceill)
TLI_DEFINE_STRING_INTERNAL("ceill")
TLI_DEFINE_SIG_INTERNAL(LDbl, LDbl)

/// int chmod(const char *path, mode_t mode);
TLI_DEFINE_ENUM_INTERNAL(chmod)
TLI_DEFINE_STRING_INTERNAL("chmod")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, IntX)

/// int chown(const char *path, uid_t owner, gid_t group);
TLI_DEFINE_ENUM_INTERNAL(chown)
TLI_DEFINE_STRING_INTERNAL("chown")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, IntX, IntX)

/// void clearerr(FILE *stream);
TLI_DEFINE_ENUM_INTERNAL(clearerr)
TLI_DEFINE_STRING_INTERNAL("clearerr")
TLI_DEFINE_SIG_INTERNAL(Void, Ptr)

/// int closedir(DIR *dirp);
TLI_DEFINE_ENUM_INTERNAL(closedir)
TLI_DEFINE_STRING_INTERNAL("closedir")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr)

/// double copysign(double x, double y);
TLI_DEFINE_ENUM_INTERNAL(copysign)
TLI_DEFINE_STRING_INTERNAL("copysign")
TLI_DEFINE_SIG_INTERNAL(Dbl, Dbl, Dbl)

/// float copysignf(float x, float y);
TLI_DEFINE_ENUM_INTERNAL(copysignf)
TLI_DEFINE_STRING_INTERNAL("copysignf")
TLI_DEFINE_SIG_INTERNAL(Flt, Flt, Flt)

/// long double copysignl(long double x, long double y);
TLI_DEFINE_ENUM_INTERNAL(copysignl)
TLI_DEFINE_STRING_INTERNAL("copysignl")
TLI_DEFINE_SIG_INTERNAL(LDbl, LDbl, LDbl)

/// double cos(double x);
TLI_DEFINE_ENUM_INTERNAL(cos)
TLI_DEFINE_STRING_INTERNAL("cos")
TLI_DEFINE_SIG_INTERNAL(Dbl, Dbl)

/// float cosf(float x);
TLI_DEFINE_ENUM_INTERNAL(cosf)
TLI_DEFINE_STRING_INTERNAL("cosf")
TLI_DEFINE_SIG_INTERNAL(Flt, Flt)

/// double cosh(double x);
TLI_DEFINE_ENUM_INTERNAL(cosh)
TLI_DEFINE_STRING_INTERNAL("cosh")
TLI_DEFINE_SIG_INTERNAL(Dbl, Dbl)

/// float coshf(float x);
TLI_DEFINE_ENUM_INTERNAL(coshf)
TLI_DEFINE_STRING_INTERNAL("coshf")
TLI_DEFINE_SIG_INTERNAL(Flt, Flt)

/// long double coshl(long double x);
TLI_DEFINE_ENUM_INTERNAL(coshl)
TLI_DEFINE_STRING_INTERNAL("coshl")
TLI_DEFINE_SIG_INTERNAL(LDbl, LDbl)

/// long double cosl(long double x);
TLI_DEFINE_ENUM_INTERNAL(cosl)
TLI_DEFINE_STRING_INTERNAL("cosl")
TLI_DEFINE_SIG_INTERNAL(LDbl, LDbl)

/// char *ctermid(char *s);
TLI_DEFINE_ENUM_INTERNAL(ctermid)
TLI_DEFINE_STRING_INTERNAL("ctermid")
TLI_DEFINE_SIG_INTERNAL(Ptr, Ptr)

/// int execl(const char *path, const char *arg, ...);
TLI_DEFINE_ENUM_INTERNAL(execl)
TLI_DEFINE_STRING_INTERNAL("execl")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, Ptr, Ellip)

/// int execle(const char *file, const char *arg, ..., char * const envp[]);
TLI_DEFINE_ENUM_INTERNAL(execle)
TLI_DEFINE_STRING_INTERNAL("execle")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, Ptr, Ellip)

/// int execlp(const char *file, const char *arg, ...);
TLI_DEFINE_ENUM_INTERNAL(execlp)
TLI_DEFINE_STRING_INTERNAL("execlp")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, Ptr, Ellip)

/// int execv(const char *path, char *const argv[]);
TLI_DEFINE_ENUM_INTERNAL(execv)
TLI_DEFINE_STRING_INTERNAL("execv")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, Ptr)

/// int execvP(const char *file, const char *search_path, char *const argv[]);
TLI_DEFINE_ENUM_INTERNAL(execvP)
TLI_DEFINE_STRING_INTERNAL("execvP")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, Ptr, Ptr)

/// int execve(const char *filename, char *const argv[], char *const envp[]);
TLI_DEFINE_ENUM_INTERNAL(execve)
TLI_DEFINE_STRING_INTERNAL("execve")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, Ptr, Ptr)

/// int execvp(const char *file, char *const argv[]);
TLI_DEFINE_ENUM_INTERNAL(execvp)
TLI_DEFINE_STRING_INTERNAL("execvp")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, Ptr)

/// int execvpe(const char *file, char *const argv[], char *const envp[]);
TLI_DEFINE_ENUM_INTERNAL(execvpe)
TLI_DEFINE_STRING_INTERNAL("execvpe")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, Ptr, Ptr)

/// double exp(double x);
TLI_DEFINE_ENUM_INTERNAL(exp)
TLI_DEFINE_STRING_INTERNAL("exp")
TLI_DEFINE_SIG_INTERNAL(Dbl, Dbl)

/// double exp10(double x);
TLI_DEFINE_ENUM_INTERNAL(exp10)
TLI_DEFINE_STRING_INTERNAL("exp10")
TLI_DEFINE_SIG_INTERNAL(Dbl, Dbl)

/// float exp10f(float x);
TLI_DEFINE_ENUM_INTERNAL(exp10f)
TLI_DEFINE_STRING_INTERNAL("exp10f")
TLI_DEFINE_SIG_INTERNAL(Flt, Flt)

/// long double exp10l(long double x);
TLI_DEFINE_ENUM_INTERNAL(exp10l)
TLI_DEFINE_STRING_INTERNAL("exp10l")
TLI_DEFINE_SIG_INTERNAL(LDbl, LDbl)

/// double exp2(double x);
TLI_DEFINE_ENUM_INTERNAL(exp2)
TLI_DEFINE_STRING_INTERNAL("exp2")
TLI_DEFINE_SIG_INTERNAL(Dbl, Dbl)

/// float exp2f(float x);
TLI_DEFINE_ENUM_INTERNAL(exp2f)
TLI_DEFINE_STRING_INTERNAL("exp2f")
TLI_DEFINE_SIG_INTERNAL(Flt, Flt)

/// long double exp2l(long double x);
TLI_DEFINE_ENUM_INTERNAL(exp2l)
TLI_DEFINE_STRING_INTERNAL("exp2l")
TLI_DEFINE_SIG_INTERNAL(LDbl, LDbl)

/// float expf(float x);
TLI_DEFINE_ENUM_INTERNAL(expf)
TLI_DEFINE_STRING_INTERNAL("expf")
TLI_DEFINE_SIG_INTERNAL(Flt, Flt)

/// long double expl(long double x);
TLI_DEFINE_ENUM_INTERNAL(expl)
TLI_DEFINE_STRING_INTERNAL("expl")
TLI_DEFINE_SIG_INTERNAL(LDbl, LDbl)

/// double expm1(double x);
TLI_DEFINE_ENUM_INTERNAL(expm1)
TLI_DEFINE_STRING_INTERNAL("expm1")
TLI_DEFINE_SIG_INTERNAL(Dbl, Dbl)

/// float expm1f(float x);
TLI_DEFINE_ENUM_INTERNAL(expm1f)
TLI_DEFINE_STRING_INTERNAL("expm1f")
TLI_DEFINE_SIG_INTERNAL(Flt, Flt)

/// long double expm1l(long double x);
TLI_DEFINE_ENUM_INTERNAL(expm1l)
TLI_DEFINE_STRING_INTERNAL("expm1l")
TLI_DEFINE_SIG_INTERNAL(LDbl, LDbl)

/// double fabs(double x);
TLI_DEFINE_ENUM_INTERNAL(fabs)
TLI_DEFINE_STRING_INTERNAL("fabs")
TLI_DEFINE_SIG_INTERNAL(Dbl, Dbl)

/// float fabsf(float x);
TLI_DEFINE_ENUM_INTERNAL(fabsf)
TLI_DEFINE_STRING_INTERNAL("fabsf")
TLI_DEFINE_SIG_INTERNAL(Flt, Flt)

/// long double fabsl(long double x);
TLI_DEFINE_ENUM_INTERNAL(fabsl)
TLI_DEFINE_STRING_INTERNAL("fabsl")
TLI_DEFINE_SIG_INTERNAL(LDbl, LDbl)

/// int fclose(FILE *stream);
TLI_DEFINE_ENUM_INTERNAL(fclose)
TLI_DEFINE_STRING_INTERNAL("fclose")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr)

/// FILE *fdopen(int fildes, const char *mode);
TLI_DEFINE_ENUM_INTERNAL(fdopen)
TLI_DEFINE_STRING_INTERNAL("fdopen")
TLI_DEFINE_SIG_INTERNAL(Ptr, Int, Ptr)

/// int feof(FILE *stream);
TLI_DEFINE_ENUM_INTERNAL(feof)
TLI_DEFINE_STRING_INTERNAL("feof")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr)

/// int ferror(FILE *stream);
TLI_DEFINE_ENUM_INTERNAL(ferror)
TLI_DEFINE_STRING_INTERNAL("ferror")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr)

/// int fflush(FILE *stream);
TLI_DEFINE_ENUM_INTERNAL(fflush)
TLI_DEFINE_STRING_INTERNAL("fflush")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr)

/// int ffs(int i);
TLI_DEFINE_ENUM_INTERNAL(ffs)
TLI_DEFINE_STRING_INTERNAL("ffs")
TLI_DEFINE_SIG_INTERNAL(Int, Int)

/// int ffsl(long int i);
TLI_DEFINE_ENUM_INTERNAL(ffsl)
TLI_DEFINE_STRING_INTERNAL("ffsl")
TLI_DEFINE_SIG_INTERNAL(Int, Long)

/// int ffsll(long long int i);
TLI_DEFINE_ENUM_INTERNAL(ffsll)
TLI_DEFINE_STRING_INTERNAL("ffsll")
TLI_DEFINE_SIG_INTERNAL(Int, LLong)

/// int fgetc(FILE *stream);
TLI_DEFINE_ENUM_INTERNAL(fgetc)
TLI_DEFINE_STRING_INTERNAL("fgetc")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr)

/// int fgetc_unlocked(FILE *stream);
TLI_DEFINE_ENUM_INTERNAL(fgetc_unlocked)
TLI_DEFINE_STRING_INTERNAL("fgetc_unlocked")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr)

/// int fgetpos(FILE *stream, fpos_t *pos);
TLI_DEFINE_ENUM_INTERNAL(fgetpos)
TLI_DEFINE_STRING_INTERNAL("fgetpos")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, Ptr)

/// char *fgets(char *s, int n, FILE *stream);
TLI_DEFINE_ENUM_INTERNAL(fgets)
TLI_DEFINE_STRING_INTERNAL("fgets")
TLI_DEFINE_SIG_INTERNAL(Ptr, Ptr, Int, Ptr)

/// char *fgets_unlocked(char *s, int n, FILE *stream);
TLI_DEFINE_ENUM_INTERNAL(fgets_unlocked)
TLI_DEFINE_STRING_INTERNAL("fgets_unlocked")
TLI_DEFINE_SIG_INTERNAL(Ptr, Ptr, Int, Ptr)

/// int fileno(FILE *stream);
TLI_DEFINE_ENUM_INTERNAL(fileno)
TLI_DEFINE_STRING_INTERNAL("fileno")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr)

/// int fiprintf(FILE *stream, const char *format, ...);
TLI_DEFINE_ENUM_INTERNAL(fiprintf)
TLI_DEFINE_STRING_INTERNAL("fiprintf")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, Ptr, Ellip)

/// void flockfile(FILE *file);
TLI_DEFINE_ENUM_INTERNAL(flockfile)
TLI_DEFINE_STRING_INTERNAL("flockfile")
TLI_DEFINE_SIG_INTERNAL(Void, Ptr)

/// double floor(double x);
TLI_DEFINE_ENUM_INTERNAL(floor)
TLI_DEFINE_STRING_INTERNAL("floor")
TLI_DEFINE_SIG_INTERNAL(Dbl, Dbl)

/// float floorf(float x);
TLI_DEFINE_ENUM_INTERNAL(floorf)
TLI_DEFINE_STRING_INTERNAL("floorf")
TLI_DEFINE_SIG_INTERNAL(Flt, Flt)

/// long double floorl(long double x);
TLI_DEFINE_ENUM_INTERNAL(floorl)
TLI_DEFINE_STRING_INTERNAL("floorl")
TLI_DEFINE_SIG_INTERNAL(LDbl, LDbl)

/// int fls(int i);
TLI_DEFINE_ENUM_INTERNAL(fls)
TLI_DEFINE_STRING_INTERNAL("fls")
TLI_DEFINE_SIG_INTERNAL(Int, Int)

/// int flsl(long int i);
TLI_DEFINE_ENUM_INTERNAL(flsl)
TLI_DEFINE_STRING_INTERNAL("flsl")
TLI_DEFINE_SIG_INTERNAL(Int, Long)

/// int flsll(long long int i);
TLI_DEFINE_ENUM_INTERNAL(flsll)
TLI_DEFINE_STRING_INTERNAL("flsll")
TLI_DEFINE_SIG_INTERNAL(Int, LLong)

// Calls to fmax and fmin library functions expand to the llvm.maxnnum and
// llvm.minnum intrinsics with the correct parameter types for the arguments
// (all types must match).
/// double fmax(double x, double y);
TLI_DEFINE_ENUM_INTERNAL(fmax)
TLI_DEFINE_STRING_INTERNAL("fmax")
TLI_DEFINE_SIG_INTERNAL(Floating, Same, Same)

/// float fmaxf(float x, float y);
TLI_DEFINE_ENUM_INTERNAL(fmaxf)
TLI_DEFINE_STRING_INTERNAL("fmaxf")
TLI_DEFINE_SIG_INTERNAL(Floating, Same, Same)

/// long double fmaxl(long double x, long double y);
TLI_DEFINE_ENUM_INTERNAL(fmaxl)
TLI_DEFINE_STRING_INTERNAL("fmaxl")
TLI_DEFINE_SIG_INTERNAL(Floating, Same, Same)

/// double fmin(double x, double y);
TLI_DEFINE_ENUM_INTERNAL(fmin)
TLI_DEFINE_STRING_INTERNAL("fmin")
TLI_DEFINE_SIG_INTERNAL(Floating, Same, Same)

/// float fminf(float x, float y);
TLI_DEFINE_ENUM_INTERNAL(fminf)
TLI_DEFINE_STRING_INTERNAL("fminf")
TLI_DEFINE_SIG_INTERNAL(Floating, Same, Same)

/// long double fminl(long double x, long double y);
TLI_DEFINE_ENUM_INTERNAL(fminl)
TLI_DEFINE_STRING_INTERNAL("fminl")
TLI_DEFINE_SIG_INTERNAL(Floating, Same, Same)

/// double fmod(double x, double y);
TLI_DEFINE_ENUM_INTERNAL(fmod)
TLI_DEFINE_STRING_INTERNAL("fmod")
TLI_DEFINE_SIG_INTERNAL(Dbl, Dbl, Dbl)

/// float fmodf(float x, float y);
TLI_DEFINE_ENUM_INTERNAL(fmodf)
TLI_DEFINE_STRING_INTERNAL("fmodf")
TLI_DEFINE_SIG_INTERNAL(Flt, Flt, Flt)

/// long double fmodl(long double x, long double y);
TLI_DEFINE_ENUM_INTERNAL(fmodl)
TLI_DEFINE_STRING_INTERNAL("fmodl")
TLI_DEFINE_SIG_INTERNAL(LDbl, LDbl, LDbl)

/// FILE *fopen(const char *filename, const char *mode);
TLI_DEFINE_ENUM_INTERNAL(fopen)
TLI_DEFINE_STRING_INTERNAL("fopen")
TLI_DEFINE_SIG_INTERNAL(Ptr, Ptr, Ptr)

/// FILE *fopen64(const char *filename, const char *opentype)
TLI_DEFINE_ENUM_INTERNAL(fopen64)
TLI_DEFINE_STRING_INTERNAL("fopen64")
TLI_DEFINE_SIG_INTERNAL(Ptr, Ptr, Ptr)

/// int fork();
TLI_DEFINE_ENUM_INTERNAL(fork)
TLI_DEFINE_STRING_INTERNAL("fork")
TLI_DEFINE_SIG_INTERNAL(Int)

/// int fprintf(FILE *stream, const char *format, ...);
TLI_DEFINE_ENUM_INTERNAL(fprintf)
TLI_DEFINE_STRING_INTERNAL("fprintf")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, Ptr, Ellip)

/// int fputc(int c, FILE *stream);
TLI_DEFINE_ENUM_INTERNAL(fputc)
TLI_DEFINE_STRING_INTERNAL("fputc")
TLI_DEFINE_SIG_INTERNAL(Int, Int, Ptr)

/// int fputc_unlocked(int c, FILE *stream);
TLI_DEFINE_ENUM_INTERNAL(fputc_unlocked)
TLI_DEFINE_STRING_INTERNAL("fputc_unlocked")
TLI_DEFINE_SIG_INTERNAL(Int, Int, Ptr)

/// int fputs(const char *s, FILE *stream);
TLI_DEFINE_ENUM_INTERNAL(fputs)
TLI_DEFINE_STRING_INTERNAL("fputs")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, Ptr)

/// int fputs_unlocked(const char *s, FILE *stream);
TLI_DEFINE_ENUM_INTERNAL(fputs_unlocked)
TLI_DEFINE_STRING_INTERNAL("fputs_unlocked")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, Ptr)

/// size_t fread(void *ptr, size_t size, size_t nitems, FILE *stream);
TLI_DEFINE_ENUM_INTERNAL(fread)
TLI_DEFINE_STRING_INTERNAL("fread")
TLI_DEFINE_SIG_INTERNAL(SizeT, Ptr, SizeT, SizeT, Ptr)

/// size_t fread_unlocked(void *ptr, size_t size, size_t nitems, FILE *stream);
TLI_DEFINE_ENUM_INTERNAL(fread_unlocked)
TLI_DEFINE_STRING_INTERNAL("fread_unlocked")
TLI_DEFINE_SIG_INTERNAL(SizeT, Ptr, SizeT, SizeT, Ptr)

/// void free(void *ptr);
TLI_DEFINE_ENUM_INTERNAL(free)
TLI_DEFINE_STRING_INTERNAL("free")
TLI_DEFINE_SIG_INTERNAL(Void, Ptr)

/// double frexp(double num, int *exp);
TLI_DEFINE_ENUM_INTERNAL(frexp)
TLI_DEFINE_STRING_INTERNAL("frexp")
TLI_DEFINE_SIG_INTERNAL(Dbl, Dbl, Ptr)

/// float frexpf(float num, int *exp);
TLI_DEFINE_ENUM_INTERNAL(frexpf)
TLI_DEFINE_STRING_INTERNAL("frexpf")
TLI_DEFINE_SIG_INTERNAL(Flt, Flt, Ptr)

/// long double frexpl(long double num, int *exp);
TLI_DEFINE_ENUM_INTERNAL(frexpl)
TLI_DEFINE_STRING_INTERNAL("frexpl")
TLI_DEFINE_SIG_INTERNAL(LDbl, LDbl, Ptr)

/// int fscanf(FILE *stream, const char *format, ... );
TLI_DEFINE_ENUM_INTERNAL(fscanf)
TLI_DEFINE_STRING_INTERNAL("fscanf")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, Ptr, Ellip)

/// int fseek(FILE *stream, long offset, int whence);
TLI_DEFINE_ENUM_INTERNAL(fseek)
TLI_DEFINE_STRING_INTERNAL("fseek")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, Long, Int)

/// int fseeko(FILE *stream, off_t offset, int whence);
TLI_DEFINE_ENUM_INTERNAL(fseeko)
TLI_DEFINE_STRING_INTERNAL("fseeko")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, IntX, Int)

/// int fseeko64(FILE *stream, off64_t offset, int whence)
TLI_DEFINE_ENUM_INTERNAL(fseeko64)
TLI_DEFINE_STRING_INTERNAL("fseeko64")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, Int64, Int)

/// int fsetpos(FILE *stream, const fpos_t *pos);
TLI_DEFINE_ENUM_INTERNAL(fsetpos)
TLI_DEFINE_STRING_INTERNAL("fsetpos")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, Ptr)

/// int fstat(int fildes, struct stat *buf);
TLI_DEFINE_ENUM_INTERNAL(fstat)
TLI_DEFINE_STRING_INTERNAL("fstat")
TLI_DEFINE_SIG_INTERNAL(Int, Int, Ptr)

/// int fstat64(int filedes, struct stat64 *buf)
TLI_DEFINE_ENUM_INTERNAL(fstat64)
TLI_DEFINE_STRING_INTERNAL("fstat64")
TLI_DEFINE_SIG_INTERNAL(Int, Int, Ptr)

/// int fstatvfs(int fildes, struct statvfs *buf);
TLI_DEFINE_ENUM_INTERNAL(fstatvfs)
TLI_DEFINE_STRING_INTERNAL("fstatvfs")
TLI_DEFINE_SIG_INTERNAL(Int, Int, Ptr)

/// int fstatvfs64(int fildes, struct statvfs64 *buf);
TLI_DEFINE_ENUM_INTERNAL(fstatvfs64)
TLI_DEFINE_STRING_INTERNAL("fstatvfs64")
TLI_DEFINE_SIG_INTERNAL(Int, Int, Ptr)

/// long ftell(FILE *stream);
TLI_DEFINE_ENUM_INTERNAL(ftell)
TLI_DEFINE_STRING_INTERNAL("ftell")
TLI_DEFINE_SIG_INTERNAL(Long, Ptr)

/// off_t ftello(FILE *stream);
TLI_DEFINE_ENUM_INTERNAL(ftello)
TLI_DEFINE_STRING_INTERNAL("ftello")
TLI_DEFINE_SIG_INTERNAL(IntPlus, Ptr)

/// off64_t ftello64(FILE *stream)
TLI_DEFINE_ENUM_INTERNAL(ftello64)
TLI_DEFINE_STRING_INTERNAL("ftello64")
TLI_DEFINE_SIG_INTERNAL(Int64, Ptr)

/// int ftrylockfile(FILE *file);
TLI_DEFINE_ENUM_INTERNAL(ftrylockfile)
TLI_DEFINE_STRING_INTERNAL("ftrylockfile")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr)

/// void funlockfile(FILE *file);
TLI_DEFINE_ENUM_INTERNAL(funlockfile)
TLI_DEFINE_STRING_INTERNAL("funlockfile")
TLI_DEFINE_SIG_INTERNAL(Void, Ptr)

/// size_t fwrite(const void *ptr, size_t size, size_t nitems, FILE *stream);
TLI_DEFINE_ENUM_INTERNAL(fwrite)
TLI_DEFINE_STRING_INTERNAL("fwrite")
TLI_DEFINE_SIG_INTERNAL(SizeT, Ptr, SizeT, SizeT, Ptr)

/// size_t fwrite_unlocked(const void *ptr, size_t size, size_t nitems, FILE *stream);
TLI_DEFINE_ENUM_INTERNAL(fwrite_unlocked)
TLI_DEFINE_STRING_INTERNAL("fwrite_unlocked")
TLI_DEFINE_SIG_INTERNAL(SizeT, Ptr, SizeT, SizeT, Ptr)

/// int getc(FILE *stream);
TLI_DEFINE_ENUM_INTERNAL(getc)
TLI_DEFINE_STRING_INTERNAL("getc")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr)

/// int getc_unlocked(FILE *stream);
TLI_DEFINE_ENUM_INTERNAL(getc_unlocked)
TLI_DEFINE_STRING_INTERNAL("getc_unlocked")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr)

/// int getchar(void);
TLI_DEFINE_ENUM_INTERNAL(getchar)
TLI_DEFINE_STRING_INTERNAL("getchar")
TLI_DEFINE_SIG_INTERNAL(Int)

/// int getchar_unlocked(void);
TLI_DEFINE_ENUM_INTERNAL(getchar_unlocked)
TLI_DEFINE_STRING_INTERNAL("getchar_unlocked")
TLI_DEFINE_SIG_INTERNAL(Int)

/// char *getenv(const char *name);
TLI_DEFINE_ENUM_INTERNAL(getenv)
TLI_DEFINE_STRING_INTERNAL("getenv")
TLI_DEFINE_SIG_INTERNAL(Ptr, Ptr)

/// int getitimer(int which, struct itimerval *value);
TLI_DEFINE_ENUM_INTERNAL(getitimer)
TLI_DEFINE_STRING_INTERNAL("getitimer")
TLI_DEFINE_SIG_INTERNAL(Int, Int, Ptr)

/// int getlogin_r(char *name, size_t namesize);
TLI_DEFINE_ENUM_INTERNAL(getlogin_r)
TLI_DEFINE_STRING_INTERNAL("getlogin_r")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, SizeT)

/// struct passwd *getpwnam(const char *name);
TLI_DEFINE_ENUM_INTERNAL(getpwnam)
TLI_DEFINE_STRING_INTERNAL("getpwnam")
TLI_DEFINE_SIG_INTERNAL(Ptr, Ptr)

/// char *gets(char *s);
TLI_DEFINE_ENUM_INTERNAL(gets)
TLI_DEFINE_STRING_INTERNAL("gets")
TLI_DEFINE_SIG_INTERNAL(Ptr, Ptr)

/// int gettimeofday(struct timeval *tp, void *tzp);
TLI_DEFINE_ENUM_INTERNAL(gettimeofday)
TLI_DEFINE_STRING_INTERNAL("gettimeofday")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, Ptr)

/// uint32_t htonl(uint32_t hostlong);
TLI_DEFINE_ENUM_INTERNAL(htonl)
TLI_DEFINE_STRING_INTERNAL("htonl")
TLI_DEFINE_SIG_INTERNAL(Int32, Int32)

/// uint16_t htons(uint16_t hostshort);
TLI_DEFINE_ENUM_INTERNAL(htons)
TLI_DEFINE_STRING_INTERNAL("htons")
TLI_DEFINE_SIG_INTERNAL(Int16, Int16)

/// int iprintf(const char *format, ...);
TLI_DEFINE_ENUM_INTERNAL(iprintf)
TLI_DEFINE_STRING_INTERNAL("iprintf")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, Ellip)

/// int isascii(int c);
TLI_DEFINE_ENUM_INTERNAL(isascii)
TLI_DEFINE_STRING_INTERNAL("isascii")
TLI_DEFINE_SIG_INTERNAL(Int, Int)

/// int isdigit(int c);
TLI_DEFINE_ENUM_INTERNAL(isdigit)
TLI_DEFINE_STRING_INTERNAL("isdigit")
TLI_DEFINE_SIG_INTERNAL(Int, Int)

/// long int labs(long int j);
TLI_DEFINE_ENUM_INTERNAL(labs)
TLI_DEFINE_STRING_INTERNAL("labs")
TLI_DEFINE_SIG_INTERNAL(Long, Long)

/// int lchown(const char *path, uid_t owner, gid_t group);
TLI_DEFINE_ENUM_INTERNAL(lchown)
TLI_DEFINE_STRING_INTERNAL("lchown")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, IntX, IntX)

/// double ldexp(double x, int n);
TLI_DEFINE_ENUM_INTERNAL(ldexp)
TLI_DEFINE_STRING_INTERNAL("ldexp")
TLI_DEFINE_SIG_INTERNAL(Dbl, Dbl, Int)

/// float ldexpf(float x, int n);
TLI_DEFINE_ENUM_INTERNAL(ldexpf)
TLI_DEFINE_STRING_INTERNAL("ldexpf")
TLI_DEFINE_SIG_INTERNAL(Flt, Flt, Int)

/// long double ldexpl(long double x, int n);
TLI_DEFINE_ENUM_INTERNAL(ldexpl)
TLI_DEFINE_STRING_INTERNAL("ldexpl")
TLI_DEFINE_SIG_INTERNAL(LDbl, LDbl, Int)

/// long long int llabs(long long int j);
TLI_DEFINE_ENUM_INTERNAL(llabs)
TLI_DEFINE_STRING_INTERNAL("llabs")
TLI_DEFINE_SIG_INTERNAL(LLong, LLong)

/// double log(double x);
TLI_DEFINE_ENUM_INTERNAL(log)
TLI_DEFINE_STRING_INTERNAL("log")
TLI_DEFINE_SIG_INTERNAL(Dbl, Dbl)

/// double log10(double x);
TLI_DEFINE_ENUM_INTERNAL(log10)
TLI_DEFINE_STRING_INTERNAL("log10")
TLI_DEFINE_SIG_INTERNAL(Dbl, Dbl)

/// float log10f(float x);
TLI_DEFINE_ENUM_INTERNAL(log10f)
TLI_DEFINE_STRING_INTERNAL("log10f")
TLI_DEFINE_SIG_INTERNAL(Flt, Flt)

/// long double log10l(long double x);
TLI_DEFINE_ENUM_INTERNAL(log10l)
TLI_DEFINE_STRING_INTERNAL("log10l")
TLI_DEFINE_SIG_INTERNAL(LDbl, LDbl)

/// double log1p(double x);
TLI_DEFINE_ENUM_INTERNAL(log1p)
TLI_DEFINE_STRING_INTERNAL("log1p")
TLI_DEFINE_SIG_INTERNAL(Dbl, Dbl)

/// float log1pf(float x);
TLI_DEFINE_ENUM_INTERNAL(log1pf)
TLI_DEFINE_STRING_INTERNAL("log1pf")
TLI_DEFINE_SIG_INTERNAL(Flt, Flt)

/// long double log1pl(long double x);
TLI_DEFINE_ENUM_INTERNAL(log1pl)
TLI_DEFINE_STRING_INTERNAL("log1pl")
TLI_DEFINE_SIG_INTERNAL(LDbl, LDbl)

/// double log2(double x);
TLI_DEFINE_ENUM_INTERNAL(log2)
TLI_DEFINE_STRING_INTERNAL("log2")
TLI_DEFINE_SIG_INTERNAL(Dbl, Dbl)

/// float log2f(float x);
TLI_DEFINE_ENUM_INTERNAL(log2f)
TLI_DEFINE_STRING_INTERNAL("log2f")
TLI_DEFINE_SIG_INTERNAL(Flt, Flt)

/// double long double log2l(long double x);
TLI_DEFINE_ENUM_INTERNAL(log2l)
TLI_DEFINE_STRING_INTERNAL("log2l")
TLI_DEFINE_SIG_INTERNAL(LDbl, LDbl)

/// double logb(double x);
TLI_DEFINE_ENUM_INTERNAL(logb)
TLI_DEFINE_STRING_INTERNAL("logb")
TLI_DEFINE_SIG_INTERNAL(Dbl, Dbl)

/// float logbf(float x);
TLI_DEFINE_ENUM_INTERNAL(logbf)
TLI_DEFINE_STRING_INTERNAL("logbf")
TLI_DEFINE_SIG_INTERNAL(Flt, Flt)

/// long double logbl(long double x);
TLI_DEFINE_ENUM_INTERNAL(logbl)
TLI_DEFINE_STRING_INTERNAL("logbl")
TLI_DEFINE_SIG_INTERNAL(LDbl, LDbl)

/// float logf(float x);
TLI_DEFINE_ENUM_INTERNAL(logf)
TLI_DEFINE_STRING_INTERNAL("logf")
TLI_DEFINE_SIG_INTERNAL(Flt, Flt)

/// long double logl(long double x);
TLI_DEFINE_ENUM_INTERNAL(logl)
TLI_DEFINE_STRING_INTERNAL("logl")
TLI_DEFINE_SIG_INTERNAL(LDbl, LDbl)

/// int lstat(const char *path, struct stat *buf);
TLI_DEFINE_ENUM_INTERNAL(lstat)
TLI_DEFINE_STRING_INTERNAL("lstat")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, Ptr)

/// int lstat64(const char *path, struct stat64 *buf);
TLI_DEFINE_ENUM_INTERNAL(lstat64)
TLI_DEFINE_STRING_INTERNAL("lstat64")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, Ptr)

/// void *malloc(size_t size);
TLI_DEFINE_ENUM_INTERNAL(malloc)
TLI_DEFINE_STRING_INTERNAL("malloc")
TLI_DEFINE_SIG_INTERNAL(Ptr, SizeT)

/// void *memalign(size_t boundary, size_t size);
TLI_DEFINE_ENUM_INTERNAL(memalign)
TLI_DEFINE_STRING_INTERNAL("memalign")
TLI_DEFINE_SIG_INTERNAL(Ptr, SizeT, SizeT)

/// void *memccpy(void *s1, const void *s2, int c, size_t n);
TLI_DEFINE_ENUM_INTERNAL(memccpy)
TLI_DEFINE_STRING_INTERNAL("memccpy")
TLI_DEFINE_SIG_INTERNAL(Ptr, Ptr, Ptr, Int, SizeT)

/// void *memchr(const void *s, int c, size_t n);
TLI_DEFINE_ENUM_INTERNAL(memchr)
TLI_DEFINE_STRING_INTERNAL("memchr")
TLI_DEFINE_SIG_INTERNAL(Ptr, Ptr, Int, SizeT)

/// int memcmp(const void *s1, const void *s2, size_t n);
TLI_DEFINE_ENUM_INTERNAL(memcmp)
TLI_DEFINE_STRING_INTERNAL("memcmp")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, Ptr, SizeT)

/// void *memcpy(void *s1, const void *s2, size_t n);
TLI_DEFINE_ENUM_INTERNAL(memcpy)
TLI_DEFINE_STRING_INTERNAL("memcpy")
TLI_DEFINE_SIG_INTERNAL(Ptr, Ptr, Ptr, SizeT)

/// void *memmove(void *s1, const void *s2, size_t n);
TLI_DEFINE_ENUM_INTERNAL(memmove)
TLI_DEFINE_STRING_INTERNAL("memmove")
TLI_DEFINE_SIG_INTERNAL(Ptr, Ptr, Ptr, SizeT)

/// void *mempcpy(void *s1, const void *s2, size_t n);
TLI_DEFINE_ENUM_INTERNAL(mempcpy)
TLI_DEFINE_STRING_INTERNAL("mempcpy")
TLI_DEFINE_SIG_INTERNAL(Ptr, Ptr, Ptr, SizeT)

/// void *memrchr(const void *s, int c, size_t n);
TLI_DEFINE_ENUM_INTERNAL(memrchr)
TLI_DEFINE_STRING_INTERNAL("memrchr")
TLI_DEFINE_SIG_INTERNAL(Ptr, Ptr, Int, SizeT)

/// void *memset(void *b, int c, size_t len);
TLI_DEFINE_ENUM_INTERNAL(memset)
TLI_DEFINE_STRING_INTERNAL("memset")
TLI_DEFINE_SIG_INTERNAL(Ptr, Ptr, Int, SizeT)

/// void memset_pattern16(void *b, const void *pattern16, size_t len);
TLI_DEFINE_ENUM_INTERNAL(memset_pattern16)
TLI_DEFINE_STRING_INTERNAL("memset_pattern16")
TLI_DEFINE_SIG_INTERNAL(Void, Ptr, Ptr, SizeT)

/// void memset_pattern4(void *b, const void *pattern4, size_t len);
TLI_DEFINE_ENUM_INTERNAL(memset_pattern4)
TLI_DEFINE_STRING_INTERNAL("memset_pattern4")
TLI_DEFINE_SIG_INTERNAL(Void, Ptr, Ptr, SizeT)

/// void memset_pattern8(void *b, const void *pattern8, size_t len);
TLI_DEFINE_ENUM_INTERNAL(memset_pattern8)
TLI_DEFINE_STRING_INTERNAL("memset_pattern8")
TLI_DEFINE_SIG_INTERNAL(Void, Ptr, Ptr, SizeT)

/// int mkdir(const char *path, mode_t mode);
TLI_DEFINE_ENUM_INTERNAL(mkdir)
TLI_DEFINE_STRING_INTERNAL("mkdir")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, IntX)

/// time_t mktime(struct tm *timeptr);
TLI_DEFINE_ENUM_INTERNAL(mktime)
TLI_DEFINE_STRING_INTERNAL("mktime")
TLI_DEFINE_SIG_INTERNAL(IntPlus, Ptr)

/// double modf(double x, double *iptr);
TLI_DEFINE_ENUM_INTERNAL(modf)
TLI_DEFINE_STRING_INTERNAL("modf")
TLI_DEFINE_SIG_INTERNAL(Dbl, Dbl, Ptr)

/// float modff(float, float *iptr);
TLI_DEFINE_ENUM_INTERNAL(modff)
TLI_DEFINE_STRING_INTERNAL("modff")
TLI_DEFINE_SIG_INTERNAL(Flt, Flt, Ptr)

/// long double modfl(long double value, long double *iptr);
TLI_DEFINE_ENUM_INTERNAL(modfl)
TLI_DEFINE_STRING_INTERNAL("modfl")
TLI_DEFINE_SIG_INTERNAL(LDbl, LDbl, Ptr)

/// double nearbyint(double x);
TLI_DEFINE_ENUM_INTERNAL(nearbyint)
TLI_DEFINE_STRING_INTERNAL("nearbyint")
TLI_DEFINE_SIG_INTERNAL(Dbl, Dbl)

/// float nearbyintf(float x);
TLI_DEFINE_ENUM_INTERNAL(nearbyintf)
TLI_DEFINE_STRING_INTERNAL("nearbyintf")
TLI_DEFINE_SIG_INTERNAL(Flt, Flt)

/// long double nearbyintl(long double x);
TLI_DEFINE_ENUM_INTERNAL(nearbyintl)
TLI_DEFINE_STRING_INTERNAL("nearbyintl")
TLI_DEFINE_SIG_INTERNAL(LDbl, LDbl)

/// uint32_t ntohl(uint32_t netlong);
TLI_DEFINE_ENUM_INTERNAL(ntohl)
TLI_DEFINE_STRING_INTERNAL("ntohl")
TLI_DEFINE_SIG_INTERNAL(Int32, Int32)

/// uint16_t ntohs(uint16_t netshort);
TLI_DEFINE_ENUM_INTERNAL(ntohs)
TLI_DEFINE_STRING_INTERNAL("ntohs")
TLI_DEFINE_SIG_INTERNAL(Int16, Int16)

/// int open(const char *path, int oflag, ... );
TLI_DEFINE_ENUM_INTERNAL(open)
TLI_DEFINE_STRING_INTERNAL("open")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, Int, Ellip)

/// int open64(const char *filename, int flags[, mode_t mode])
TLI_DEFINE_ENUM_INTERNAL(open64)
TLI_DEFINE_STRING_INTERNAL("open64")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, Int, Ellip)

/// DIR *opendir(const char *dirname);
TLI_DEFINE_ENUM_INTERNAL(opendir)
TLI_DEFINE_STRING_INTERNAL("opendir")
TLI_DEFINE_SIG_INTERNAL(Ptr, Ptr)

/// int pclose(FILE *stream);
TLI_DEFINE_ENUM_INTERNAL(pclose)
TLI_DEFINE_STRING_INTERNAL("pclose")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr)

/// void perror(const char *s);
TLI_DEFINE_ENUM_INTERNAL(perror)
TLI_DEFINE_STRING_INTERNAL("perror")
TLI_DEFINE_SIG_INTERNAL(Void, Ptr)

/// FILE *popen(const char *command, const char *mode);
TLI_DEFINE_ENUM_INTERNAL(popen)
TLI_DEFINE_STRING_INTERNAL("popen")
TLI_DEFINE_SIG_INTERNAL(Ptr, Ptr, Ptr)

/// int posix_memalign(void **memptr, size_t alignment, size_t size);
TLI_DEFINE_ENUM_INTERNAL(posix_memalign)
TLI_DEFINE_STRING_INTERNAL("posix_memalign")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, SizeT, SizeT)

/// double pow(double x, double y);
TLI_DEFINE_ENUM_INTERNAL(pow)
TLI_DEFINE_STRING_INTERNAL("pow")
TLI_DEFINE_SIG_INTERNAL(Dbl, Dbl, Dbl)

/// float powf(float x, float y);
TLI_DEFINE_ENUM_INTERNAL(powf)
TLI_DEFINE_STRING_INTERNAL("powf")
TLI_DEFINE_SIG_INTERNAL(Flt, Flt, Flt)

/// long double powl(long double x, long double y);
TLI_DEFINE_ENUM_INTERNAL(powl)
TLI_DEFINE_STRING_INTERNAL("powl")
TLI_DEFINE_SIG_INTERNAL(LDbl, LDbl, LDbl)

/// ssize_t pread(int fildes, void *buf, size_t nbyte, off_t offset);
TLI_DEFINE_ENUM_INTERNAL(pread)
TLI_DEFINE_STRING_INTERNAL("pread")
TLI_DEFINE_SIG_INTERNAL(SSizeT, Int, Ptr, SizeT, IntPlus)

/// int printf(const char *format, ...);
TLI_DEFINE_ENUM_INTERNAL(printf)
TLI_DEFINE_STRING_INTERNAL("printf")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, Ellip)

/// int putc(int c, FILE *stream);
TLI_DEFINE_ENUM_INTERNAL(putc)
TLI_DEFINE_STRING_INTERNAL("putc")
TLI_DEFINE_SIG_INTERNAL(Int, Int, Ptr)

/// int putc_unlocked(int c, FILE *stream);
TLI_DEFINE_ENUM_INTERNAL(putc_unlocked)
TLI_DEFINE_STRING_INTERNAL("putc_unlocked")
TLI_DEFINE_SIG_INTERNAL(Int, Int, Ptr)

/// int putchar(int c);
TLI_DEFINE_ENUM_INTERNAL(putchar)
TLI_DEFINE_STRING_INTERNAL("putchar")
TLI_DEFINE_SIG_INTERNAL(Int, Int)

/// int putchar_unlocked(int c);
TLI_DEFINE_ENUM_INTERNAL(putchar_unlocked)
TLI_DEFINE_STRING_INTERNAL("putchar_unlocked")
TLI_DEFINE_SIG_INTERNAL(Int, Int)

/// int puts(const char *s);
TLI_DEFINE_ENUM_INTERNAL(puts)
TLI_DEFINE_STRING_INTERNAL("puts")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr)

/// ssize_t pwrite(int fildes, const void *buf, size_t nbyte, off_t offset);
TLI_DEFINE_ENUM_INTERNAL(pwrite)
TLI_DEFINE_STRING_INTERNAL("pwrite")
TLI_DEFINE_SIG_INTERNAL(SSizeT, Int, Ptr, SizeT, IntPlus)

/// void qsort(void *base, size_t nel, size_t width,
///            int (*compar)(const void *, const void *));
TLI_DEFINE_ENUM_INTERNAL(qsort)
TLI_DEFINE_STRING_INTERNAL("qsort")
TLI_DEFINE_SIG_INTERNAL(Void, Ptr, SizeT, SizeT, Ptr)

/// ssize_t read(int fildes, void *buf, size_t nbyte);
TLI_DEFINE_ENUM_INTERNAL(read)
TLI_DEFINE_STRING_INTERNAL("read")
TLI_DEFINE_SIG_INTERNAL(SSizeT, Int, Ptr, SizeT)

/// ssize_t readlink(const char *path, char *buf, size_t bufsize);
TLI_DEFINE_ENUM_INTERNAL(readlink)
TLI_DEFINE_STRING_INTERNAL("readlink")
TLI_DEFINE_SIG_INTERNAL(SSizeT, Ptr, Ptr, SizeT)

/// void *realloc(void *ptr, size_t size);
TLI_DEFINE_ENUM_INTERNAL(realloc)
TLI_DEFINE_STRING_INTERNAL("realloc")
TLI_DEFINE_SIG_INTERNAL(Ptr, Ptr, SizeT)

/// void *reallocf(void *ptr, size_t size);
TLI_DEFINE_ENUM_INTERNAL(reallocf)
TLI_DEFINE_STRING_INTERNAL("reallocf")
TLI_DEFINE_SIG_INTERNAL(Ptr, Ptr, SizeT)

/// char *realpath(const char *file_name, char *resolved_name);
TLI_DEFINE_ENUM_INTERNAL(realpath)
TLI_DEFINE_STRING_INTERNAL("realpath")
TLI_DEFINE_SIG_INTERNAL(Ptr, Ptr, Ptr)

/// double remainder(double x, double y);
TLI_DEFINE_ENUM_INTERNAL(remainder)
TLI_DEFINE_STRING_INTERNAL("remainder")
TLI_DEFINE_SIG_INTERNAL(Dbl, Dbl, Dbl)

/// float remainderf(float x, float y);
TLI_DEFINE_ENUM_INTERNAL(remainderf)
TLI_DEFINE_STRING_INTERNAL("remainderf")
TLI_DEFINE_SIG_INTERNAL(Flt, Flt, Flt)

/// long double remainderl(long double x, long double y);
TLI_DEFINE_ENUM_INTERNAL(remainderl)
TLI_DEFINE_STRING_INTERNAL("remainderl")
TLI_DEFINE_SIG_INTERNAL(LDbl, LDbl, LDbl)

/// int remove(const char *path);
TLI_DEFINE_ENUM_INTERNAL(remove)
TLI_DEFINE_STRING_INTERNAL("remove")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr)

/// int rename(const char *old, const char *new);
TLI_DEFINE_ENUM_INTERNAL(rename)
TLI_DEFINE_STRING_INTERNAL("rename")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, Ptr)

/// void rewind(FILE *stream);
TLI_DEFINE_ENUM_INTERNAL(rewind)
TLI_DEFINE_STRING_INTERNAL("rewind")
TLI_DEFINE_SIG_INTERNAL(Void, Ptr)

/// double rint(double x);
TLI_DEFINE_ENUM_INTERNAL(rint)
TLI_DEFINE_STRING_INTERNAL("rint")
TLI_DEFINE_SIG_INTERNAL(Dbl, Dbl)

/// float rintf(float x);
TLI_DEFINE_ENUM_INTERNAL(rintf)
TLI_DEFINE_STRING_INTERNAL("rintf")
TLI_DEFINE_SIG_INTERNAL(Flt, Flt)

/// long double rintl(long double x);
TLI_DEFINE_ENUM_INTERNAL(rintl)
TLI_DEFINE_STRING_INTERNAL("rintl")
TLI_DEFINE_SIG_INTERNAL(LDbl, LDbl)

/// int rmdir(const char *path);
TLI_DEFINE_ENUM_INTERNAL(rmdir)
TLI_DEFINE_STRING_INTERNAL("rmdir")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr)

/// double round(double x);
TLI_DEFINE_ENUM_INTERNAL(round)
TLI_DEFINE_STRING_INTERNAL("round")
TLI_DEFINE_SIG_INTERNAL(Dbl, Dbl)

/// double roundeven(double x);
TLI_DEFINE_ENUM_INTERNAL(roundeven)
TLI_DEFINE_STRING_INTERNAL("roundeven")
TLI_DEFINE_SIG_INTERNAL(Dbl, Dbl)

/// float roundevenf(float x);
TLI_DEFINE_ENUM_INTERNAL(roundevenf)
TLI_DEFINE_STRING_INTERNAL("roundevenf")
TLI_DEFINE_SIG_INTERNAL(Flt, Flt)

/// long double roundevenl(long double x);
TLI_DEFINE_ENUM_INTERNAL(roundevenl)
TLI_DEFINE_STRING_INTERNAL("roundevenl")
TLI_DEFINE_SIG_INTERNAL(LDbl, LDbl)

/// float roundf(float x);
TLI_DEFINE_ENUM_INTERNAL(roundf)
TLI_DEFINE_STRING_INTERNAL("roundf")
TLI_DEFINE_SIG_INTERNAL(Flt, Flt)

/// long double roundl(long double x);
TLI_DEFINE_ENUM_INTERNAL(roundl)
TLI_DEFINE_STRING_INTERNAL("roundl")
TLI_DEFINE_SIG_INTERNAL(LDbl, LDbl)

/// int scanf(const char *restrict format, ... );
TLI_DEFINE_ENUM_INTERNAL(scanf)
TLI_DEFINE_STRING_INTERNAL("scanf")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, Ellip)

/// void setbuf(FILE *stream, char *buf);
TLI_DEFINE_ENUM_INTERNAL(setbuf)
TLI_DEFINE_STRING_INTERNAL("setbuf")
TLI_DEFINE_SIG_INTERNAL(Void, Ptr, Ptr)

/// int setitimer(int which, const struct itimerval *value,
///               struct itimerval *ovalue);
TLI_DEFINE_ENUM_INTERNAL(setitimer)
TLI_DEFINE_STRING_INTERNAL("setitimer")
TLI_DEFINE_SIG_INTERNAL(Int, Int, Ptr, Ptr)

/// int setvbuf(FILE *stream, char *buf, int type, size_t size);
TLI_DEFINE_ENUM_INTERNAL(setvbuf)
TLI_DEFINE_STRING_INTERNAL("setvbuf")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, Ptr, Int, SizeT)

/// double sin(double x);
TLI_DEFINE_ENUM_INTERNAL(sin)
TLI_DEFINE_STRING_INTERNAL("sin")
TLI_DEFINE_SIG_INTERNAL(Dbl, Dbl)

/// float sinf(float x);
TLI_DEFINE_ENUM_INTERNAL(sinf)
TLI_DEFINE_STRING_INTERNAL("sinf")
TLI_DEFINE_SIG_INTERNAL(Flt, Flt)

/// double sinh(double x);
TLI_DEFINE_ENUM_INTERNAL(sinh)
TLI_DEFINE_STRING_INTERNAL("sinh")
TLI_DEFINE_SIG_INTERNAL(Dbl, Dbl)

/// float sinhf(float x);
TLI_DEFINE_ENUM_INTERNAL(sinhf)
TLI_DEFINE_STRING_INTERNAL("sinhf")
TLI_DEFINE_SIG_INTERNAL(Flt, Flt)

/// long double sinhl(long double x);
TLI_DEFINE_ENUM_INTERNAL(sinhl)
TLI_DEFINE_STRING_INTERNAL("sinhl")
TLI_DEFINE_SIG_INTERNAL(LDbl, LDbl)

/// long double sinl(long double x);
TLI_DEFINE_ENUM_INTERNAL(sinl)
TLI_DEFINE_STRING_INTERNAL("sinl")
TLI_DEFINE_SIG_INTERNAL(LDbl, LDbl)

/// int siprintf(char *str, const char *format, ...);
TLI_DEFINE_ENUM_INTERNAL(siprintf)
TLI_DEFINE_STRING_INTERNAL("siprintf")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, Ptr, Ellip)

/// int snprintf(char *s, size_t n, const char *format, ...);
TLI_DEFINE_ENUM_INTERNAL(snprintf)
TLI_DEFINE_STRING_INTERNAL("snprintf")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, SizeT, Ptr, Ellip)

/// int sprintf(char *str, const char *format, ...);
TLI_DEFINE_ENUM_INTERNAL(sprintf)
TLI_DEFINE_STRING_INTERNAL("sprintf")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, Ptr, Ellip)

/// double sqrt(double x);
TLI_DEFINE_ENUM_INTERNAL(sqrt)
TLI_DEFINE_STRING_INTERNAL("sqrt")
TLI_DEFINE_SIG_INTERNAL(Dbl, Dbl)

/// float sqrtf(float x);
TLI_DEFINE_ENUM_INTERNAL(sqrtf)
TLI_DEFINE_STRING_INTERNAL("sqrtf")
TLI_DEFINE_SIG_INTERNAL(Flt, Flt)

/// long double sqrtl(long double x);
TLI_DEFINE_ENUM_INTERNAL(sqrtl)
TLI_DEFINE_STRING_INTERNAL("sqrtl")
TLI_DEFINE_SIG_INTERNAL(LDbl, LDbl)

/// int sscanf(const char *s, const char *format, ... );
TLI_DEFINE_ENUM_INTERNAL(sscanf)
TLI_DEFINE_STRING_INTERNAL("sscanf")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, Ptr, Ellip)

/// int stat(const char *path, struct stat *buf);
TLI_DEFINE_ENUM_INTERNAL(stat)
TLI_DEFINE_STRING_INTERNAL("stat")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, Ptr)

/// int stat64(const char *path, struct stat64 *buf);
TLI_DEFINE_ENUM_INTERNAL(stat64)
TLI_DEFINE_STRING_INTERNAL("stat64")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, Ptr)

/// int statvfs(const char *path, struct statvfs *buf);
TLI_DEFINE_ENUM_INTERNAL(statvfs)
TLI_DEFINE_STRING_INTERNAL("statvfs")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, Ptr)

/// int statvfs64(const char *path, struct statvfs64 *buf)
TLI_DEFINE_ENUM_INTERNAL(statvfs64)
TLI_DEFINE_STRING_INTERNAL("statvfs64")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, Ptr)

/// char *stpcpy(char *s1, const char *s2);
TLI_DEFINE_ENUM_INTERNAL(stpcpy)
TLI_DEFINE_STRING_INTERNAL("stpcpy")
TLI_DEFINE_SIG_INTERNAL(Ptr, Ptr, Ptr)

/// char *stpncpy(char *s1, const char *s2, size_t n);
TLI_DEFINE_ENUM_INTERNAL(stpncpy)
TLI_DEFINE_STRING_INTERNAL("stpncpy")
TLI_DEFINE_SIG_INTERNAL(Ptr, Ptr, Ptr, SizeT)

/// int strcasecmp(const char *s1, const char *s2);
TLI_DEFINE_ENUM_INTERNAL(strcasecmp)
TLI_DEFINE_STRING_INTERNAL("strcasecmp")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, Ptr)

/// char *strcat(char *s1, const char *s2);
TLI_DEFINE_ENUM_INTERNAL(strcat)
TLI_DEFINE_STRING_INTERNAL("strcat")
TLI_DEFINE_SIG_INTERNAL(Ptr, Ptr, Ptr)

/// char *strchr(const char *s, int c);
TLI_DEFINE_ENUM_INTERNAL(strchr)
TLI_DEFINE_STRING_INTERNAL("strchr")
TLI_DEFINE_SIG_INTERNAL(Ptr, Ptr, Int)

/// int strcmp(const char *s1, const char *s2);
TLI_DEFINE_ENUM_INTERNAL(strcmp)
TLI_DEFINE_STRING_INTERNAL("strcmp")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, Ptr)

/// int strcoll(const char *s1, const char *s2);
TLI_DEFINE_ENUM_INTERNAL(strcoll)
TLI_DEFINE_STRING_INTERNAL("strcoll")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, Ptr)

/// char *strcpy(char *s1, const char *s2);
TLI_DEFINE_ENUM_INTERNAL(strcpy)
TLI_DEFINE_STRING_INTERNAL("strcpy")
TLI_DEFINE_SIG_INTERNAL(Ptr, Ptr, Ptr)

/// size_t strcspn(const char *s1, const char *s2);
TLI_DEFINE_ENUM_INTERNAL(strcspn)
TLI_DEFINE_STRING_INTERNAL("strcspn")
TLI_DEFINE_SIG_INTERNAL(SizeT, Ptr, Ptr)

/// char *strdup(const char *s1);
TLI_DEFINE_ENUM_INTERNAL(strdup)
TLI_DEFINE_STRING_INTERNAL("strdup")
TLI_DEFINE_SIG_INTERNAL(Ptr, Ptr)

/// size_t strlcat(char *dst, const char *src, size_t size);
TLI_DEFINE_ENUM_INTERNAL(strlcat)
TLI_DEFINE_STRING_INTERNAL("strlcat")
TLI_DEFINE_SIG_INTERNAL(SizeT, Ptr, Ptr, SizeT)

/// size_t strlcpy(char *dst, const char *src, size_t size);
TLI_DEFINE_ENUM_INTERNAL(strlcpy)
TLI_DEFINE_STRING_INTERNAL("strlcpy")
TLI_DEFINE_SIG_INTERNAL(SizeT, Ptr, Ptr, SizeT)

/// size_t strlen(const char *s);
TLI_DEFINE_ENUM_INTERNAL(strlen)
TLI_DEFINE_STRING_INTERNAL("strlen")
TLI_DEFINE_SIG_INTERNAL(SizeT, Ptr)

/// int strncasecmp(const char *s1, const char *s2, size_t n);
TLI_DEFINE_ENUM_INTERNAL(strncasecmp)
TLI_DEFINE_STRING_INTERNAL("strncasecmp")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, Ptr, SizeT)

/// char *strncat(char *s1, const char *s2, size_t n);
TLI_DEFINE_ENUM_INTERNAL(strncat)
TLI_DEFINE_STRING_INTERNAL("strncat")
TLI_DEFINE_SIG_INTERNAL(Ptr, Ptr, Ptr, SizeT)

/// int strncmp(const char *s1, const char *s2, size_t n);
TLI_DEFINE_ENUM_INTERNAL(strncmp)
TLI_DEFINE_STRING_INTERNAL("strncmp")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, Ptr, SizeT)

/// char *strncpy(char *s1, const char *s2, size_t n);
TLI_DEFINE_ENUM_INTERNAL(strncpy)
TLI_DEFINE_STRING_INTERNAL("strncpy")
TLI_DEFINE_SIG_INTERNAL(Ptr, Ptr, Ptr, SizeT)

/// char *strndup(const char *s1, size_t n);
TLI_DEFINE_ENUM_INTERNAL(strndup)
TLI_DEFINE_STRING_INTERNAL("strndup")
TLI_DEFINE_SIG_INTERNAL(Ptr, Ptr, SizeT)

/// size_t strnlen(const char *s, size_t maxlen);
TLI_DEFINE_ENUM_INTERNAL(strnlen)
TLI_DEFINE_STRING_INTERNAL("strnlen")
TLI_DEFINE_SIG_INTERNAL(SizeT, Ptr, SizeT)

/// char *strpbrk(const char *s1, const char *s2);
TLI_DEFINE_ENUM_INTERNAL(strpbrk)
TLI_DEFINE_STRING_INTERNAL("strpbrk")
TLI_DEFINE_SIG_INTERNAL(Ptr, Ptr, Ptr)

/// char *strrchr(const char *s, int c);
TLI_DEFINE_ENUM_INTERNAL(strrchr)
TLI_DEFINE_STRING_INTERNAL("strrchr")
TLI_DEFINE_SIG_INTERNAL(Ptr, Ptr, Int)

/// size_t strspn(const char *s1, const char *s2);
TLI_DEFINE_ENUM_INTERNAL(strspn)
TLI_DEFINE_STRING_INTERNAL("strspn")
TLI_DEFINE_SIG_INTERNAL(SizeT, Ptr, Ptr)

/// char *strstr(const char *s1, const char *s2);
TLI_DEFINE_ENUM_INTERNAL(strstr)
TLI_DEFINE_STRING_INTERNAL("strstr")
TLI_DEFINE_SIG_INTERNAL(Ptr, Ptr, Ptr)

/// double strtod(const char *nptr, char **endptr);
TLI_DEFINE_ENUM_INTERNAL(strtod)
TLI_DEFINE_STRING_INTERNAL("strtod")
TLI_DEFINE_SIG_INTERNAL(Dbl, Ptr, Ptr)

/// float strtof(const char *nptr, char **endptr);
TLI_DEFINE_ENUM_INTERNAL(strtof)
TLI_DEFINE_STRING_INTERNAL("strtof")
TLI_DEFINE_SIG_INTERNAL(Flt, Ptr, Ptr)

/// char *strtok(char *s1, const char *s2);
TLI_DEFINE_ENUM_INTERNAL(strtok)
TLI_DEFINE_STRING_INTERNAL("strtok")
TLI_DEFINE_SIG_INTERNAL(Ptr, Ptr, Ptr)

/// char *strtok_r(char *s, const char *sep, char **lasts);
TLI_DEFINE_ENUM_INTERNAL(strtok_r)
TLI_DEFINE_STRING_INTERNAL("strtok_r")
TLI_DEFINE_SIG_INTERNAL(Ptr, Ptr, Ptr, Ptr)

/// long int strtol(const char *nptr, char **endptr, int base);
TLI_DEFINE_ENUM_INTERNAL(strtol)
TLI_DEFINE_STRING_INTERNAL("strtol")
TLI_DEFINE_SIG_INTERNAL(Long, Ptr, Ptr, Int)

/// long double strtold(const char *nptr, char **endptr);
TLI_DEFINE_ENUM_INTERNAL(strtold)
TLI_DEFINE_STRING_INTERNAL("strtold")
TLI_DEFINE_SIG_INTERNAL(LDbl, Ptr, Ptr)

/// long long int strtoll(const char *nptr, char **endptr, int base);
TLI_DEFINE_ENUM_INTERNAL(strtoll)
TLI_DEFINE_STRING_INTERNAL("strtoll")
TLI_DEFINE_SIG_INTERNAL(LLong, Ptr, Ptr, Int)

/// unsigned long int strtoul(const char *nptr, char **endptr, int base);
TLI_DEFINE_ENUM_INTERNAL(strtoul)
TLI_DEFINE_STRING_INTERNAL("strtoul")
TLI_DEFINE_SIG_INTERNAL(Long, Ptr, Ptr, Int)

/// unsigned long long int strtoull(const char *nptr, char **endptr, int base);
TLI_DEFINE_ENUM_INTERNAL(strtoull)
TLI_DEFINE_STRING_INTERNAL("strtoull")
TLI_DEFINE_SIG_INTERNAL(LLong, Ptr, Ptr, Int)

/// size_t strxfrm(char *s1, const char *s2, size_t n);
TLI_DEFINE_ENUM_INTERNAL(strxfrm)
TLI_DEFINE_STRING_INTERNAL("strxfrm")
TLI_DEFINE_SIG_INTERNAL(SizeT, Ptr, Ptr, SizeT)

/// int system(const char *command);
TLI_DEFINE_ENUM_INTERNAL(system)
TLI_DEFINE_STRING_INTERNAL("system")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr)

/// double tan(double x);
TLI_DEFINE_ENUM_INTERNAL(tan)
TLI_DEFINE_STRING_INTERNAL("tan")
TLI_DEFINE_SIG_INTERNAL(Dbl, Dbl)

/// float tanf(float x);
TLI_DEFINE_ENUM_INTERNAL(tanf)
TLI_DEFINE_STRING_INTERNAL("tanf")
TLI_DEFINE_SIG_INTERNAL(Flt, Flt)

/// double tanh(double x);
TLI_DEFINE_ENUM_INTERNAL(tanh)
TLI_DEFINE_STRING_INTERNAL("tanh")
TLI_DEFINE_SIG_INTERNAL(Dbl, Dbl)

/// float tanhf(float x);
TLI_DEFINE_ENUM_INTERNAL(tanhf)
TLI_DEFINE_STRING_INTERNAL("tanhf")
TLI_DEFINE_SIG_INTERNAL(Flt, Flt)

/// long double tanhl(long double x);
TLI_DEFINE_ENUM_INTERNAL(tanhl)
TLI_DEFINE_STRING_INTERNAL("tanhl")
TLI_DEFINE_SIG_INTERNAL(LDbl, LDbl)

/// long double tanl(long double x);
TLI_DEFINE_ENUM_INTERNAL(tanl)
TLI_DEFINE_STRING_INTERNAL("tanl")
TLI_DEFINE_SIG_INTERNAL(LDbl, LDbl)

/// clock_t times(struct tms *buffer);
TLI_DEFINE_ENUM_INTERNAL(times)
TLI_DEFINE_STRING_INTERNAL("times")
TLI_DEFINE_SIG_INTERNAL(IntPlus, Ptr)

/// FILE *tmpfile(void);
TLI_DEFINE_ENUM_INTERNAL(tmpfile)
TLI_DEFINE_STRING_INTERNAL("tmpfile")
TLI_DEFINE_SIG_INTERNAL(Ptr)

/// FILE *tmpfile64(void)
TLI_DEFINE_ENUM_INTERNAL(tmpfile64)
TLI_DEFINE_STRING_INTERNAL("tmpfile64")
TLI_DEFINE_SIG_INTERNAL(Ptr)

/// int toascii(int c);
TLI_DEFINE_ENUM_INTERNAL(toascii)
TLI_DEFINE_STRING_INTERNAL("toascii")
TLI_DEFINE_SIG_INTERNAL(Int, Int)

/// double trunc(double x);
TLI_DEFINE_ENUM_INTERNAL(trunc)
TLI_DEFINE_STRING_INTERNAL("trunc")
TLI_DEFINE_SIG_INTERNAL(Dbl, Dbl)

/// float truncf(float x);
TLI_DEFINE_ENUM_INTERNAL(truncf)
TLI_DEFINE_STRING_INTERNAL("truncf")
TLI_DEFINE_SIG_INTERNAL(Flt, Flt)

/// long double truncl(long double x);
TLI_DEFINE_ENUM_INTERNAL(truncl)
TLI_DEFINE_STRING_INTERNAL("truncl")
TLI_DEFINE_SIG_INTERNAL(LDbl, LDbl)

/// int uname(struct utsname *name);
TLI_DEFINE_ENUM_INTERNAL(uname)
TLI_DEFINE_STRING_INTERNAL("uname")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr)

/// int ungetc(int c, FILE *stream);
TLI_DEFINE_ENUM_INTERNAL(ungetc)
TLI_DEFINE_STRING_INTERNAL("ungetc")
TLI_DEFINE_SIG_INTERNAL(Int, Int, Ptr)

/// int unlink(const char *path);
TLI_DEFINE_ENUM_INTERNAL(unlink)
TLI_DEFINE_STRING_INTERNAL("unlink")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr)

/// int unsetenv(const char *name);
TLI_DEFINE_ENUM_INTERNAL(unsetenv)
TLI_DEFINE_STRING_INTERNAL("unsetenv")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr)

/// int utime(const char *path, const struct utimbuf *times);
TLI_DEFINE_ENUM_INTERNAL(utime)
TLI_DEFINE_STRING_INTERNAL("utime")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, Ptr)

/// int utimes(const char *path, const struct timeval times[2]);
TLI_DEFINE_ENUM_INTERNAL(utimes)
TLI_DEFINE_STRING_INTERNAL("utimes")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, Ptr)

/// void *valloc(size_t size);
TLI_DEFINE_ENUM_INTERNAL(valloc)
TLI_DEFINE_STRING_INTERNAL("valloc")
TLI_DEFINE_SIG_INTERNAL(Ptr, SizeT)

/// void *vec_calloc(size_t count, size_t size);
TLI_DEFINE_ENUM_INTERNAL(vec_calloc)
TLI_DEFINE_STRING_INTERNAL("vec_calloc")
TLI_DEFINE_SIG_INTERNAL(Ptr, SizeT, SizeT)

/// void vec_free(void *ptr);
TLI_DEFINE_ENUM_INTERNAL(vec_free)
TLI_DEFINE_STRING_INTERNAL("vec_free")
TLI_DEFINE_SIG_INTERNAL(Void, Ptr)

/// void *vec_malloc(size_t size);
TLI_DEFINE_ENUM_INTERNAL(vec_malloc)
TLI_DEFINE_STRING_INTERNAL("vec_malloc")
TLI_DEFINE_SIG_INTERNAL(Ptr, SizeT)

/// void *vec_realloc(void *ptr, size_t size);
TLI_DEFINE_ENUM_INTERNAL(vec_realloc)
TLI_DEFINE_STRING_INTERNAL("vec_realloc")
TLI_DEFINE_SIG_INTERNAL(Ptr, Ptr, SizeT)

/// int vfprintf(FILE *stream, const char *format, va_list ap);
TLI_DEFINE_ENUM_INTERNAL(vfprintf)
TLI_DEFINE_STRING_INTERNAL("vfprintf")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, Ptr, Ptr)

/// int vfscanf(FILE *stream, const char *format, va_list arg);
TLI_DEFINE_ENUM_INTERNAL(vfscanf)
TLI_DEFINE_STRING_INTERNAL("vfscanf")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, Ptr, Ptr)

/// int vprintf(const char *restrict format, va_list ap);
TLI_DEFINE_ENUM_INTERNAL(vprintf)
TLI_DEFINE_STRING_INTERNAL("vprintf")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, Ptr)

/// int vscanf(const char *format, va_list arg);
TLI_DEFINE_ENUM_INTERNAL(vscanf)
TLI_DEFINE_STRING_INTERNAL("vscanf")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, Ptr)

/// int vsnprintf(char *s, size_t n, const char *format, va_list ap);
TLI_DEFINE_ENUM_INTERNAL(vsnprintf)
TLI_DEFINE_STRING_INTERNAL("vsnprintf")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, SizeT, Ptr, Ptr)

/// int vsprintf(char *s, const char *format, va_list ap);
TLI_DEFINE_ENUM_INTERNAL(vsprintf)
TLI_DEFINE_STRING_INTERNAL("vsprintf")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, Ptr, Ptr)

/// int vsscanf(const char *s, const char *format, va_list arg);
TLI_DEFINE_ENUM_INTERNAL(vsscanf)
TLI_DEFINE_STRING_INTERNAL("vsscanf")
TLI_DEFINE_SIG_INTERNAL(Int, Ptr, Ptr, Ptr)

/// size_t wcslen (const wchar_t* wcs);
TLI_DEFINE_ENUM_INTERNAL(wcslen)
TLI_DEFINE_STRING_INTERNAL("wcslen")
TLI_DEFINE_SIG_INTERNAL(SizeT, Ptr)

/// ssize_t write(int fildes, const void *buf, size_t nbyte);
TLI_DEFINE_ENUM_INTERNAL(write)
TLI_DEFINE_STRING_INTERNAL("write")
TLI_DEFINE_SIG_INTERNAL(SSizeT, Int, Ptr, SizeT)

#undef TLI_DEFINE_ENUM_INTERNAL
#undef TLI_DEFINE_STRING_INTERNAL
#undef TLI_DEFINE_SIG_INTERNAL
#endif  // One of TLI_DEFINE_ENUM/STRING are defined.

#undef TLI_DEFINE_ENUM
#undef TLI_DEFINE_STRING
#undef TLI_DEFINE_SIG
PKiwFZ�6�����Analysis/LoopAccessAnalysis.hnu�[���//===- llvm/Analysis/LoopAccessAnalysis.h -----------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the interface for the loop memory dependence framework that
// was originally developed for the Loop Vectorizer.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_LOOPACCESSANALYSIS_H
#define LLVM_ANALYSIS_LOOPACCESSANALYSIS_H

#include "llvm/ADT/EquivalenceClasses.h"
#include "llvm/Analysis/LoopAnalysisManager.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/Pass.h"
#include <optional>

namespace llvm {

class AAResults;
class DataLayout;
class Loop;
class LoopAccessInfo;
class raw_ostream;
class SCEV;
class SCEVUnionPredicate;
class Value;

/// Collection of parameters shared beetween the Loop Vectorizer and the
/// Loop Access Analysis.
struct VectorizerParams {
  /// Maximum SIMD width.
  static const unsigned MaxVectorWidth;

  /// VF as overridden by the user.
  static unsigned VectorizationFactor;
  /// Interleave factor as overridden by the user.
  static unsigned VectorizationInterleave;
  /// True if force-vector-interleave was specified by the user.
  static bool isInterleaveForced();

  /// \When performing memory disambiguation checks at runtime do not
  /// make more than this number of comparisons.
  static unsigned RuntimeMemoryCheckThreshold;
};

/// Checks memory dependences among accesses to the same underlying
/// object to determine whether there vectorization is legal or not (and at
/// which vectorization factor).
///
/// Note: This class will compute a conservative dependence for access to
/// different underlying pointers. Clients, such as the loop vectorizer, will
/// sometimes deal these potential dependencies by emitting runtime checks.
///
/// We use the ScalarEvolution framework to symbolically evalutate access
/// functions pairs. Since we currently don't restructure the loop we can rely
/// on the program order of memory accesses to determine their safety.
/// At the moment we will only deem accesses as safe for:
///  * A negative constant distance assuming program order.
///
///      Safe: tmp = a[i + 1];     OR     a[i + 1] = x;
///            a[i] = tmp;                y = a[i];
///
///   The latter case is safe because later checks guarantuee that there can't
///   be a cycle through a phi node (that is, we check that "x" and "y" is not
///   the same variable: a header phi can only be an induction or a reduction, a
///   reduction can't have a memory sink, an induction can't have a memory
///   source). This is important and must not be violated (or we have to
///   resort to checking for cycles through memory).
///
///  * A positive constant distance assuming program order that is bigger
///    than the biggest memory access.
///
///     tmp = a[i]        OR              b[i] = x
///     a[i+2] = tmp                      y = b[i+2];
///
///     Safe distance: 2 x sizeof(a[0]), and 2 x sizeof(b[0]), respectively.
///
///  * Zero distances and all accesses have the same size.
///
class MemoryDepChecker {
public:
  typedef PointerIntPair<Value *, 1, bool> MemAccessInfo;
  typedef SmallVector<MemAccessInfo, 8> MemAccessInfoList;
  /// Set of potential dependent memory accesses.
  typedef EquivalenceClasses<MemAccessInfo> DepCandidates;

  /// Type to keep track of the status of the dependence check. The order of
  /// the elements is important and has to be from most permissive to least
  /// permissive.
  enum class VectorizationSafetyStatus {
    // Can vectorize safely without RT checks. All dependences are known to be
    // safe.
    Safe,
    // Can possibly vectorize with RT checks to overcome unknown dependencies.
    PossiblySafeWithRtChecks,
    // Cannot vectorize due to known unsafe dependencies.
    Unsafe,
  };

  /// Dependece between memory access instructions.
  struct Dependence {
    /// The type of the dependence.
    enum DepType {
      // No dependence.
      NoDep,
      // We couldn't determine the direction or the distance.
      Unknown,
      // Lexically forward.
      //
      // FIXME: If we only have loop-independent forward dependences (e.g. a
      // read and write of A[i]), LAA will locally deem the dependence "safe"
      // without querying the MemoryDepChecker.  Therefore we can miss
      // enumerating loop-independent forward dependences in
      // getDependences.  Note that as soon as there are different
      // indices used to access the same array, the MemoryDepChecker *is*
      // queried and the dependence list is complete.
      Forward,
      // Forward, but if vectorized, is likely to prevent store-to-load
      // forwarding.
      ForwardButPreventsForwarding,
      // Lexically backward.
      Backward,
      // Backward, but the distance allows a vectorization factor of
      // MaxSafeDepDistBytes.
      BackwardVectorizable,
      // Same, but may prevent store-to-load forwarding.
      BackwardVectorizableButPreventsForwarding
    };

    /// String version of the types.
    static const char *DepName[];

    /// Index of the source of the dependence in the InstMap vector.
    unsigned Source;
    /// Index of the destination of the dependence in the InstMap vector.
    unsigned Destination;
    /// The type of the dependence.
    DepType Type;

    Dependence(unsigned Source, unsigned Destination, DepType Type)
        : Source(Source), Destination(Destination), Type(Type) {}

    /// Return the source instruction of the dependence.
    Instruction *getSource(const LoopAccessInfo &LAI) const;
    /// Return the destination instruction of the dependence.
    Instruction *getDestination(const LoopAccessInfo &LAI) const;

    /// Dependence types that don't prevent vectorization.
    static VectorizationSafetyStatus isSafeForVectorization(DepType Type);

    /// Lexically forward dependence.
    bool isForward() const;
    /// Lexically backward dependence.
    bool isBackward() const;

    /// May be a lexically backward dependence type (includes Unknown).
    bool isPossiblyBackward() const;

    /// Print the dependence.  \p Instr is used to map the instruction
    /// indices to instructions.
    void print(raw_ostream &OS, unsigned Depth,
               const SmallVectorImpl<Instruction *> &Instrs) const;
  };

  MemoryDepChecker(PredicatedScalarEvolution &PSE, const Loop *L)
      : PSE(PSE), InnermostLoop(L) {}

  /// Register the location (instructions are given increasing numbers)
  /// of a write access.
  void addAccess(StoreInst *SI);

  /// Register the location (instructions are given increasing numbers)
  /// of a write access.
  void addAccess(LoadInst *LI);

  /// Check whether the dependencies between the accesses are safe.
  ///
  /// Only checks sets with elements in \p CheckDeps.
  bool areDepsSafe(DepCandidates &AccessSets, MemAccessInfoList &CheckDeps,
                   const DenseMap<Value *, const SCEV *> &Strides);

  /// No memory dependence was encountered that would inhibit
  /// vectorization.
  bool isSafeForVectorization() const {
    return Status == VectorizationSafetyStatus::Safe;
  }

  /// Return true if the number of elements that are safe to operate on
  /// simultaneously is not bounded.
  bool isSafeForAnyVectorWidth() const {
    return MaxSafeVectorWidthInBits == UINT_MAX;
  }

  /// The maximum number of bytes of a vector register we can vectorize
  /// the accesses safely with.
  uint64_t getMaxSafeDepDistBytes() { return MaxSafeDepDistBytes; }

  /// Return the number of elements that are safe to operate on
  /// simultaneously, multiplied by the size of the element in bits.
  uint64_t getMaxSafeVectorWidthInBits() const {
    return MaxSafeVectorWidthInBits;
  }

  /// In same cases when the dependency check fails we can still
  /// vectorize the loop with a dynamic array access check.
  bool shouldRetryWithRuntimeCheck() const {
    return FoundNonConstantDistanceDependence &&
           Status == VectorizationSafetyStatus::PossiblySafeWithRtChecks;
  }

  /// Returns the memory dependences.  If null is returned we exceeded
  /// the MaxDependences threshold and this information is not
  /// available.
  const SmallVectorImpl<Dependence> *getDependences() const {
    return RecordDependences ? &Dependences : nullptr;
  }

  void clearDependences() { Dependences.clear(); }

  /// The vector of memory access instructions.  The indices are used as
  /// instruction identifiers in the Dependence class.
  const SmallVectorImpl<Instruction *> &getMemoryInstructions() const {
    return InstMap;
  }

  /// Generate a mapping between the memory instructions and their
  /// indices according to program order.
  DenseMap<Instruction *, unsigned> generateInstructionOrderMap() const {
    DenseMap<Instruction *, unsigned> OrderMap;

    for (unsigned I = 0; I < InstMap.size(); ++I)
      OrderMap[InstMap[I]] = I;

    return OrderMap;
  }

  /// Find the set of instructions that read or write via \p Ptr.
  SmallVector<Instruction *, 4> getInstructionsForAccess(Value *Ptr,
                                                         bool isWrite) const;

  /// Return the program order indices for the access location (Ptr, IsWrite).
  /// Returns an empty ArrayRef if there are no accesses for the location.
  ArrayRef<unsigned> getOrderForAccess(Value *Ptr, bool IsWrite) const {
    auto I = Accesses.find({Ptr, IsWrite});
    if (I != Accesses.end())
      return I->second;
    return {};
  }

  const Loop *getInnermostLoop() const { return InnermostLoop; }

private:
  /// A wrapper around ScalarEvolution, used to add runtime SCEV checks, and
  /// applies dynamic knowledge to simplify SCEV expressions and convert them
  /// to a more usable form. We need this in case assumptions about SCEV
  /// expressions need to be made in order to avoid unknown dependences. For
  /// example we might assume a unit stride for a pointer in order to prove
  /// that a memory access is strided and doesn't wrap.
  PredicatedScalarEvolution &PSE;
  const Loop *InnermostLoop;

  /// Maps access locations (ptr, read/write) to program order.
  DenseMap<MemAccessInfo, std::vector<unsigned> > Accesses;

  /// Memory access instructions in program order.
  SmallVector<Instruction *, 16> InstMap;

  /// The program order index to be used for the next instruction.
  unsigned AccessIdx = 0;

  // We can access this many bytes in parallel safely.
  uint64_t MaxSafeDepDistBytes = 0;

  /// Number of elements (from consecutive iterations) that are safe to
  /// operate on simultaneously, multiplied by the size of the element in bits.
  /// The size of the element is taken from the memory access that is most
  /// restrictive.
  uint64_t MaxSafeVectorWidthInBits = -1U;

  /// If we see a non-constant dependence distance we can still try to
  /// vectorize this loop with runtime checks.
  bool FoundNonConstantDistanceDependence = false;

  /// Result of the dependence checks, indicating whether the checked
  /// dependences are safe for vectorization, require RT checks or are known to
  /// be unsafe.
  VectorizationSafetyStatus Status = VectorizationSafetyStatus::Safe;

  //// True if Dependences reflects the dependences in the
  //// loop.  If false we exceeded MaxDependences and
  //// Dependences is invalid.
  bool RecordDependences = true;

  /// Memory dependences collected during the analysis.  Only valid if
  /// RecordDependences is true.
  SmallVector<Dependence, 8> Dependences;

  /// Check whether there is a plausible dependence between the two
  /// accesses.
  ///
  /// Access \p A must happen before \p B in program order. The two indices
  /// identify the index into the program order map.
  ///
  /// This function checks  whether there is a plausible dependence (or the
  /// absence of such can't be proved) between the two accesses. If there is a
  /// plausible dependence but the dependence distance is bigger than one
  /// element access it records this distance in \p MaxSafeDepDistBytes (if this
  /// distance is smaller than any other distance encountered so far).
  /// Otherwise, this function returns true signaling a possible dependence.
  Dependence::DepType isDependent(const MemAccessInfo &A, unsigned AIdx,
                                  const MemAccessInfo &B, unsigned BIdx,
                                  const DenseMap<Value *, const SCEV *> &Strides);

  /// Check whether the data dependence could prevent store-load
  /// forwarding.
  ///
  /// \return false if we shouldn't vectorize at all or avoid larger
  /// vectorization factors by limiting MaxSafeDepDistBytes.
  bool couldPreventStoreLoadForward(uint64_t Distance, uint64_t TypeByteSize);

  /// Updates the current safety status with \p S. We can go from Safe to
  /// either PossiblySafeWithRtChecks or Unsafe and from
  /// PossiblySafeWithRtChecks to Unsafe.
  void mergeInStatus(VectorizationSafetyStatus S);
};

class RuntimePointerChecking;
/// A grouping of pointers. A single memcheck is required between
/// two groups.
struct RuntimeCheckingPtrGroup {
  /// Create a new pointer checking group containing a single
  /// pointer, with index \p Index in RtCheck.
  RuntimeCheckingPtrGroup(unsigned Index, RuntimePointerChecking &RtCheck);

  /// Tries to add the pointer recorded in RtCheck at index
  /// \p Index to this pointer checking group. We can only add a pointer
  /// to a checking group if we will still be able to get
  /// the upper and lower bounds of the check. Returns true in case
  /// of success, false otherwise.
  bool addPointer(unsigned Index, RuntimePointerChecking &RtCheck);
  bool addPointer(unsigned Index, const SCEV *Start, const SCEV *End,
                  unsigned AS, bool NeedsFreeze, ScalarEvolution &SE);

  /// The SCEV expression which represents the upper bound of all the
  /// pointers in this group.
  const SCEV *High;
  /// The SCEV expression which represents the lower bound of all the
  /// pointers in this group.
  const SCEV *Low;
  /// Indices of all the pointers that constitute this grouping.
  SmallVector<unsigned, 2> Members;
  /// Address space of the involved pointers.
  unsigned AddressSpace;
  /// Whether the pointer needs to be frozen after expansion, e.g. because it
  /// may be poison outside the loop.
  bool NeedsFreeze = false;
};

/// A memcheck which made up of a pair of grouped pointers.
typedef std::pair<const RuntimeCheckingPtrGroup *,
                  const RuntimeCheckingPtrGroup *>
    RuntimePointerCheck;

struct PointerDiffInfo {
  const SCEV *SrcStart;
  const SCEV *SinkStart;
  unsigned AccessSize;
  bool NeedsFreeze;

  PointerDiffInfo(const SCEV *SrcStart, const SCEV *SinkStart,
                  unsigned AccessSize, bool NeedsFreeze)
      : SrcStart(SrcStart), SinkStart(SinkStart), AccessSize(AccessSize),
        NeedsFreeze(NeedsFreeze) {}
};

/// Holds information about the memory runtime legality checks to verify
/// that a group of pointers do not overlap.
class RuntimePointerChecking {
  friend struct RuntimeCheckingPtrGroup;

public:
  struct PointerInfo {
    /// Holds the pointer value that we need to check.
    TrackingVH<Value> PointerValue;
    /// Holds the smallest byte address accessed by the pointer throughout all
    /// iterations of the loop.
    const SCEV *Start;
    /// Holds the largest byte address accessed by the pointer throughout all
    /// iterations of the loop, plus 1.
    const SCEV *End;
    /// Holds the information if this pointer is used for writing to memory.
    bool IsWritePtr;
    /// Holds the id of the set of pointers that could be dependent because of a
    /// shared underlying object.
    unsigned DependencySetId;
    /// Holds the id of the disjoint alias set to which this pointer belongs.
    unsigned AliasSetId;
    /// SCEV for the access.
    const SCEV *Expr;
    /// True if the pointer expressions needs to be frozen after expansion.
    bool NeedsFreeze;

    PointerInfo(Value *PointerValue, const SCEV *Start, const SCEV *End,
                bool IsWritePtr, unsigned DependencySetId, unsigned AliasSetId,
                const SCEV *Expr, bool NeedsFreeze)
        : PointerValue(PointerValue), Start(Start), End(End),
          IsWritePtr(IsWritePtr), DependencySetId(DependencySetId),
          AliasSetId(AliasSetId), Expr(Expr), NeedsFreeze(NeedsFreeze) {}
  };

  RuntimePointerChecking(MemoryDepChecker &DC, ScalarEvolution *SE)
      : DC(DC), SE(SE) {}

  /// Reset the state of the pointer runtime information.
  void reset() {
    Need = false;
    Pointers.clear();
    Checks.clear();
  }

  /// Insert a pointer and calculate the start and end SCEVs.
  /// We need \p PSE in order to compute the SCEV expression of the pointer
  /// according to the assumptions that we've made during the analysis.
  /// The method might also version the pointer stride according to \p Strides,
  /// and add new predicates to \p PSE.
  void insert(Loop *Lp, Value *Ptr, const SCEV *PtrExpr, Type *AccessTy,
              bool WritePtr, unsigned DepSetId, unsigned ASId,
              PredicatedScalarEvolution &PSE, bool NeedsFreeze);

  /// No run-time memory checking is necessary.
  bool empty() const { return Pointers.empty(); }

  /// Generate the checks and store it.  This also performs the grouping
  /// of pointers to reduce the number of memchecks necessary.
  void generateChecks(MemoryDepChecker::DepCandidates &DepCands,
                      bool UseDependencies);

  /// Returns the checks that generateChecks created. They can be used to ensure
  /// no read/write accesses overlap across all loop iterations.
  const SmallVectorImpl<RuntimePointerCheck> &getChecks() const {
    return Checks;
  }

  // Returns an optional list of (pointer-difference expressions, access size)
  // pairs that can be used to prove that there are no vectorization-preventing
  // dependencies at runtime. There are is a vectorization-preventing dependency
  // if any pointer-difference is <u VF * InterleaveCount * access size. Returns
  // std::nullopt if pointer-difference checks cannot be used.
  std::optional<ArrayRef<PointerDiffInfo>> getDiffChecks() const {
    if (!CanUseDiffCheck)
      return std::nullopt;
    return {DiffChecks};
  }

  /// Decide if we need to add a check between two groups of pointers,
  /// according to needsChecking.
  bool needsChecking(const RuntimeCheckingPtrGroup &M,
                     const RuntimeCheckingPtrGroup &N) const;

  /// Returns the number of run-time checks required according to
  /// needsChecking.
  unsigned getNumberOfChecks() const { return Checks.size(); }

  /// Print the list run-time memory checks necessary.
  void print(raw_ostream &OS, unsigned Depth = 0) const;

  /// Print \p Checks.
  void printChecks(raw_ostream &OS,
                   const SmallVectorImpl<RuntimePointerCheck> &Checks,
                   unsigned Depth = 0) const;

  /// This flag indicates if we need to add the runtime check.
  bool Need = false;

  /// Information about the pointers that may require checking.
  SmallVector<PointerInfo, 2> Pointers;

  /// Holds a partitioning of pointers into "check groups".
  SmallVector<RuntimeCheckingPtrGroup, 2> CheckingGroups;

  /// Check if pointers are in the same partition
  ///
  /// \p PtrToPartition contains the partition number for pointers (-1 if the
  /// pointer belongs to multiple partitions).
  static bool
  arePointersInSamePartition(const SmallVectorImpl<int> &PtrToPartition,
                             unsigned PtrIdx1, unsigned PtrIdx2);

  /// Decide whether we need to issue a run-time check for pointer at
  /// index \p I and \p J to prove their independence.
  bool needsChecking(unsigned I, unsigned J) const;

  /// Return PointerInfo for pointer at index \p PtrIdx.
  const PointerInfo &getPointerInfo(unsigned PtrIdx) const {
    return Pointers[PtrIdx];
  }

  ScalarEvolution *getSE() const { return SE; }

private:
  /// Groups pointers such that a single memcheck is required
  /// between two different groups. This will clear the CheckingGroups vector
  /// and re-compute it. We will only group dependecies if \p UseDependencies
  /// is true, otherwise we will create a separate group for each pointer.
  void groupChecks(MemoryDepChecker::DepCandidates &DepCands,
                   bool UseDependencies);

  /// Generate the checks and return them.
  SmallVector<RuntimePointerCheck, 4> generateChecks();

  /// Try to create add a new (pointer-difference, access size) pair to
  /// DiffCheck for checking groups \p CGI and \p CGJ. If pointer-difference
  /// checks cannot be used for the groups, set CanUseDiffCheck to false.
  void tryToCreateDiffCheck(const RuntimeCheckingPtrGroup &CGI,
                            const RuntimeCheckingPtrGroup &CGJ);

  MemoryDepChecker &DC;

  /// Holds a pointer to the ScalarEvolution analysis.
  ScalarEvolution *SE;

  /// Set of run-time checks required to establish independence of
  /// otherwise may-aliasing pointers in the loop.
  SmallVector<RuntimePointerCheck, 4> Checks;

  /// Flag indicating if pointer-difference checks can be used
  bool CanUseDiffCheck = true;

  /// A list of (pointer-difference, access size) pairs that can be used to
  /// prove that there are no vectorization-preventing dependencies.
  SmallVector<PointerDiffInfo> DiffChecks;
};

/// Drive the analysis of memory accesses in the loop
///
/// This class is responsible for analyzing the memory accesses of a loop.  It
/// collects the accesses and then its main helper the AccessAnalysis class
/// finds and categorizes the dependences in buildDependenceSets.
///
/// For memory dependences that can be analyzed at compile time, it determines
/// whether the dependence is part of cycle inhibiting vectorization.  This work
/// is delegated to the MemoryDepChecker class.
///
/// For memory dependences that cannot be determined at compile time, it
/// generates run-time checks to prove independence.  This is done by
/// AccessAnalysis::canCheckPtrAtRT and the checks are maintained by the
/// RuntimePointerCheck class.
///
/// If pointers can wrap or can't be expressed as affine AddRec expressions by
/// ScalarEvolution, we will generate run-time checks by emitting a
/// SCEVUnionPredicate.
///
/// Checks for both memory dependences and the SCEV predicates contained in the
/// PSE must be emitted in order for the results of this analysis to be valid.
class LoopAccessInfo {
public:
  LoopAccessInfo(Loop *L, ScalarEvolution *SE, const TargetLibraryInfo *TLI,
                 AAResults *AA, DominatorTree *DT, LoopInfo *LI);

  /// Return true we can analyze the memory accesses in the loop and there are
  /// no memory dependence cycles.
  bool canVectorizeMemory() const { return CanVecMem; }

  /// Return true if there is a convergent operation in the loop. There may
  /// still be reported runtime pointer checks that would be required, but it is
  /// not legal to insert them.
  bool hasConvergentOp() const { return HasConvergentOp; }

  const RuntimePointerChecking *getRuntimePointerChecking() const {
    return PtrRtChecking.get();
  }

  /// Number of memchecks required to prove independence of otherwise
  /// may-alias pointers.
  unsigned getNumRuntimePointerChecks() const {
    return PtrRtChecking->getNumberOfChecks();
  }

  /// Return true if the block BB needs to be predicated in order for the loop
  /// to be vectorized.
  static bool blockNeedsPredication(BasicBlock *BB, Loop *TheLoop,
                                    DominatorTree *DT);

  /// Returns true if value \p V is loop invariant.
  bool isInvariant(Value *V) const;

  unsigned getNumStores() const { return NumStores; }
  unsigned getNumLoads() const { return NumLoads;}

  /// The diagnostics report generated for the analysis.  E.g. why we
  /// couldn't analyze the loop.
  const OptimizationRemarkAnalysis *getReport() const { return Report.get(); }

  /// the Memory Dependence Checker which can determine the
  /// loop-independent and loop-carried dependences between memory accesses.
  const MemoryDepChecker &getDepChecker() const { return *DepChecker; }

  /// Return the list of instructions that use \p Ptr to read or write
  /// memory.
  SmallVector<Instruction *, 4> getInstructionsForAccess(Value *Ptr,
                                                         bool isWrite) const {
    return DepChecker->getInstructionsForAccess(Ptr, isWrite);
  }

  /// If an access has a symbolic strides, this maps the pointer value to
  /// the stride symbol.
  const DenseMap<Value *, const SCEV *> &getSymbolicStrides() const {
    return SymbolicStrides;
  }

  /// Print the information about the memory accesses in the loop.
  void print(raw_ostream &OS, unsigned Depth = 0) const;

  /// If the loop has memory dependence involving an invariant address, i.e. two
  /// stores or a store and a load, then return true, else return false.
  bool hasDependenceInvolvingLoopInvariantAddress() const {
    return HasDependenceInvolvingLoopInvariantAddress;
  }

  /// Return the list of stores to invariant addresses.
  ArrayRef<StoreInst *> getStoresToInvariantAddresses() const {
    return StoresToInvariantAddresses;
  }

  /// Used to add runtime SCEV checks. Simplifies SCEV expressions and converts
  /// them to a more usable form.  All SCEV expressions during the analysis
  /// should be re-written (and therefore simplified) according to PSE.
  /// A user of LoopAccessAnalysis will need to emit the runtime checks
  /// associated with this predicate.
  const PredicatedScalarEvolution &getPSE() const { return *PSE; }

private:
  /// Analyze the loop.
  void analyzeLoop(AAResults *AA, LoopInfo *LI,
                   const TargetLibraryInfo *TLI, DominatorTree *DT);

  /// Check if the structure of the loop allows it to be analyzed by this
  /// pass.
  bool canAnalyzeLoop();

  /// Save the analysis remark.
  ///
  /// LAA does not directly emits the remarks.  Instead it stores it which the
  /// client can retrieve and presents as its own analysis
  /// (e.g. -Rpass-analysis=loop-vectorize).
  OptimizationRemarkAnalysis &recordAnalysis(StringRef RemarkName,
                                             Instruction *Instr = nullptr);

  /// Collect memory access with loop invariant strides.
  ///
  /// Looks for accesses like "a[i * StrideA]" where "StrideA" is loop
  /// invariant.
  void collectStridedAccess(Value *LoadOrStoreInst);

  // Emits the first unsafe memory dependence in a loop.
  // Emits nothing if there are no unsafe dependences
  // or if the dependences were not recorded.
  void emitUnsafeDependenceRemark();

  std::unique_ptr<PredicatedScalarEvolution> PSE;

  /// We need to check that all of the pointers in this list are disjoint
  /// at runtime. Using std::unique_ptr to make using move ctor simpler.
  std::unique_ptr<RuntimePointerChecking> PtrRtChecking;

  /// the Memory Dependence Checker which can determine the
  /// loop-independent and loop-carried dependences between memory accesses.
  std::unique_ptr<MemoryDepChecker> DepChecker;

  Loop *TheLoop;

  unsigned NumLoads = 0;
  unsigned NumStores = 0;

  uint64_t MaxSafeDepDistBytes = -1;

  /// Cache the result of analyzeLoop.
  bool CanVecMem = false;
  bool HasConvergentOp = false;

  /// Indicator that there are non vectorizable stores to a uniform address.
  bool HasDependenceInvolvingLoopInvariantAddress = false;

  /// List of stores to invariant addresses.
  SmallVector<StoreInst *> StoresToInvariantAddresses;

  /// The diagnostics report generated for the analysis.  E.g. why we
  /// couldn't analyze the loop.
  std::unique_ptr<OptimizationRemarkAnalysis> Report;

  /// If an access has a symbolic strides, this maps the pointer value to
  /// the stride symbol.
  DenseMap<Value *, const SCEV *> SymbolicStrides;
};

/// Return the SCEV corresponding to a pointer with the symbolic stride
/// replaced with constant one, assuming the SCEV predicate associated with
/// \p PSE is true.
///
/// If necessary this method will version the stride of the pointer according
/// to \p PtrToStride and therefore add further predicates to \p PSE.
///
/// \p PtrToStride provides the mapping between the pointer value and its
/// stride as collected by LoopVectorizationLegality::collectStridedAccess.
const SCEV *
replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE,
                          const DenseMap<Value *, const SCEV *> &PtrToStride,
                          Value *Ptr);

/// If the pointer has a constant stride return it in units of the access type
/// size.  Otherwise return std::nullopt.
///
/// Ensure that it does not wrap in the address space, assuming the predicate
/// associated with \p PSE is true.
///
/// If necessary this method will version the stride of the pointer according
/// to \p PtrToStride and therefore add further predicates to \p PSE.
/// The \p Assume parameter indicates if we are allowed to make additional
/// run-time assumptions.
///
/// Note that the analysis results are defined if-and-only-if the original
/// memory access was defined.  If that access was dead, or UB, then the
/// result of this function is undefined.
std::optional<int64_t>
getPtrStride(PredicatedScalarEvolution &PSE, Type *AccessTy, Value *Ptr,
             const Loop *Lp,
             const DenseMap<Value *, const SCEV *> &StridesMap = DenseMap<Value *, const SCEV *>(),
             bool Assume = false, bool ShouldCheckWrap = true);

/// Returns the distance between the pointers \p PtrA and \p PtrB iff they are
/// compatible and it is possible to calculate the distance between them. This
/// is a simple API that does not depend on the analysis pass.
/// \param StrictCheck Ensure that the calculated distance matches the
/// type-based one after all the bitcasts removal in the provided pointers.
std::optional<int> getPointersDiff(Type *ElemTyA, Value *PtrA, Type *ElemTyB,
                                   Value *PtrB, const DataLayout &DL,
                                   ScalarEvolution &SE,
                                   bool StrictCheck = false,
                                   bool CheckType = true);

/// Attempt to sort the pointers in \p VL and return the sorted indices
/// in \p SortedIndices, if reordering is required.
///
/// Returns 'true' if sorting is legal, otherwise returns 'false'.
///
/// For example, for a given \p VL of memory accesses in program order, a[i+4],
/// a[i+0], a[i+1] and a[i+7], this function will sort the \p VL and save the
/// sorted indices in \p SortedIndices as a[i+0], a[i+1], a[i+4], a[i+7] and
/// saves the mask for actual memory accesses in program order in
/// \p SortedIndices as <1,2,0,3>
bool sortPtrAccesses(ArrayRef<Value *> VL, Type *ElemTy, const DataLayout &DL,
                     ScalarEvolution &SE,
                     SmallVectorImpl<unsigned> &SortedIndices);

/// Returns true if the memory operations \p A and \p B are consecutive.
/// This is a simple API that does not depend on the analysis pass.
bool isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL,
                         ScalarEvolution &SE, bool CheckType = true);

class LoopAccessInfoManager {
  /// The cache.
  DenseMap<Loop *, std::unique_ptr<LoopAccessInfo>> LoopAccessInfoMap;

  // The used analysis passes.
  ScalarEvolution &SE;
  AAResults &AA;
  DominatorTree &DT;
  LoopInfo &LI;
  const TargetLibraryInfo *TLI = nullptr;

public:
  LoopAccessInfoManager(ScalarEvolution &SE, AAResults &AA, DominatorTree &DT,
                        LoopInfo &LI, const TargetLibraryInfo *TLI)
      : SE(SE), AA(AA), DT(DT), LI(LI), TLI(TLI) {}

  const LoopAccessInfo &getInfo(Loop &L);

  void clear() { LoopAccessInfoMap.clear(); }

  bool invalidate(Function &F, const PreservedAnalyses &PA,
                  FunctionAnalysisManager::Invalidator &Inv);
};

/// This analysis provides dependence information for the memory
/// accesses of a loop.
///
/// It runs the analysis for a loop on demand.  This can be initiated by
/// querying the loop access info via AM.getResult<LoopAccessAnalysis>.
/// getResult return a LoopAccessInfo object.  See this class for the
/// specifics of what information is provided.
class LoopAccessAnalysis
    : public AnalysisInfoMixin<LoopAccessAnalysis> {
  friend AnalysisInfoMixin<LoopAccessAnalysis>;
  static AnalysisKey Key;

public:
  typedef LoopAccessInfoManager Result;

  Result run(Function &F, FunctionAnalysisManager &AM);
};

inline Instruction *MemoryDepChecker::Dependence::getSource(
    const LoopAccessInfo &LAI) const {
  return LAI.getDepChecker().getMemoryInstructions()[Source];
}

inline Instruction *MemoryDepChecker::Dependence::getDestination(
    const LoopAccessInfo &LAI) const {
  return LAI.getDepChecker().getMemoryInstructions()[Destination];
}

} // End llvm namespace

#endif
PKiwFZ*ƣ*/2/2Analysis/LoopCacheAnalysis.hnu�[���//===- llvm/Analysis/LoopCacheAnalysis.h ------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file defines the interface for the loop cache analysis.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_LOOPCACHEANALYSIS_H
#define LLVM_ANALYSIS_LOOPCACHEANALYSIS_H

#include "llvm/Analysis/LoopAnalysisManager.h"
#include "llvm/IR/PassManager.h"
#include <optional>

namespace llvm {

class AAResults;
class DependenceInfo;
class Instruction;
class LPMUpdater;
class raw_ostream;
class LoopInfo;
class Loop;
class ScalarEvolution;
class SCEV;
class TargetTransformInfo;

using CacheCostTy = int64_t;
using LoopVectorTy = SmallVector<Loop *, 8>;

/// Represents a memory reference as a base pointer and a set of indexing
/// operations. For example given the array reference A[i][2j+1][3k+2] in a
/// 3-dim loop nest:
///   for(i=0;i<n;++i)
///     for(j=0;j<m;++j)
///       for(k=0;k<o;++k)
///         ... A[i][2j+1][3k+2] ...
/// We expect:
///   BasePointer -> A
///   Subscripts -> [{0,+,1}<%for.i>][{1,+,2}<%for.j>][{2,+,3}<%for.k>]
///   Sizes -> [m][o][4]
class IndexedReference {
  friend raw_ostream &operator<<(raw_ostream &OS, const IndexedReference &R);

public:
  /// Construct an indexed reference given a \p StoreOrLoadInst instruction.
  IndexedReference(Instruction &StoreOrLoadInst, const LoopInfo &LI,
                   ScalarEvolution &SE);

  bool isValid() const { return IsValid; }
  const SCEV *getBasePointer() const { return BasePointer; }
  size_t getNumSubscripts() const { return Subscripts.size(); }
  const SCEV *getSubscript(unsigned SubNum) const {
    assert(SubNum < getNumSubscripts() && "Invalid subscript number");
    return Subscripts[SubNum];
  }
  const SCEV *getFirstSubscript() const {
    assert(!Subscripts.empty() && "Expecting non-empty container");
    return Subscripts.front();
  }
  const SCEV *getLastSubscript() const {
    assert(!Subscripts.empty() && "Expecting non-empty container");
    return Subscripts.back();
  }

  /// Return true/false if the current object and the indexed reference \p Other
  /// are/aren't in the same cache line of size \p CLS. Two references are in
  /// the same chace line iff the distance between them in the innermost
  /// dimension is less than the cache line size. Return std::nullopt if unsure.
  std::optional<bool> hasSpacialReuse(const IndexedReference &Other,
                                      unsigned CLS, AAResults &AA) const;

  /// Return true if the current object and the indexed reference \p Other
  /// have distance smaller than \p MaxDistance in the dimension associated with
  /// the given loop \p L. Return false if the distance is not smaller than \p
  /// MaxDistance and std::nullopt if unsure.
  std::optional<bool> hasTemporalReuse(const IndexedReference &Other,
                                       unsigned MaxDistance, const Loop &L,
                                       DependenceInfo &DI, AAResults &AA) const;

  /// Compute the cost of the reference w.r.t. the given loop \p L when it is
  /// considered in the innermost position in the loop nest.
  /// The cost is defined as:
  ///   - equal to one if the reference is loop invariant, or
  ///   - equal to '(TripCount * stride) / cache_line_size' if:
  ///     + the reference stride is less than the cache line size, and
  ///     + the coefficient of this loop's index variable used in all other
  ///       subscripts is zero
  ///   - or otherwise equal to 'TripCount'.
  CacheCostTy computeRefCost(const Loop &L, unsigned CLS) const;

private:
  /// Attempt to delinearize the indexed reference.
  bool delinearize(const LoopInfo &LI);

  /// Attempt to delinearize \p AccessFn for fixed-size arrays.
  bool tryDelinearizeFixedSize(const SCEV *AccessFn,
                               SmallVectorImpl<const SCEV *> &Subscripts);

  /// Return true if the index reference is invariant with respect to loop \p L.
  bool isLoopInvariant(const Loop &L) const;

  /// Return true if the indexed reference is 'consecutive' in loop \p L.
  /// An indexed reference is 'consecutive' if the only coefficient that uses
  /// the loop induction variable is the rightmost one, and the access stride is
  /// smaller than the cache line size \p CLS. Provide a valid \p Stride value
  /// if the indexed reference is 'consecutive'.
  bool isConsecutive(const Loop &L, const SCEV *&Stride, unsigned CLS) const;

  /// Retrieve the index of the subscript corresponding to the given loop \p
  /// L. Return a zero-based positive index if the subscript index is
  /// succesfully located and a negative value otherwise. For example given the
  /// indexed reference 'A[i][2j+1][3k+2]', the call
  /// 'getSubscriptIndex(loop-k)' would return value 2.
  int getSubscriptIndex(const Loop &L) const;

  /// Return the coefficient used in the rightmost dimension.
  const SCEV *getLastCoefficient() const;

  /// Return true if the coefficient corresponding to induction variable of
  /// loop \p L in the given \p Subscript is zero or is loop invariant in \p L.
  bool isCoeffForLoopZeroOrInvariant(const SCEV &Subscript,
                                     const Loop &L) const;

  /// Verify that the given \p Subscript is 'well formed' (must be a simple add
  /// recurrence).
  bool isSimpleAddRecurrence(const SCEV &Subscript, const Loop &L) const;

  /// Return true if the given reference \p Other is definetely aliased with
  /// the indexed reference represented by this class.
  bool isAliased(const IndexedReference &Other, AAResults &AA) const;

private:
  /// True if the reference can be delinearized, false otherwise.
  bool IsValid = false;

  /// Represent the memory reference instruction.
  Instruction &StoreOrLoadInst;

  /// The base pointer of the memory reference.
  const SCEV *BasePointer = nullptr;

  /// The subscript (indexes) of the memory reference.
  SmallVector<const SCEV *, 3> Subscripts;

  /// The dimensions of the memory reference.
  SmallVector<const SCEV *, 3> Sizes;

  ScalarEvolution &SE;
};

/// A reference group represents a set of memory references that exhibit
/// temporal or spacial reuse. Two references belong to the same
/// reference group with respect to a inner loop L iff:
/// 1. they have a loop independent dependency, or
/// 2. they have a loop carried dependence with a small dependence distance
///    (e.g. less than 2) carried by the inner loop, or
/// 3. they refer to the same array, and the subscript in their innermost
///    dimension is less than or equal to 'd' (where 'd' is less than the cache
///    line size)
///
/// Intuitively a reference group represents memory references that access
/// the same cache line. Conditions 1,2 above account for temporal reuse, while
/// contition 3 accounts for spacial reuse.
using ReferenceGroupTy = SmallVector<std::unique_ptr<IndexedReference>, 8>;
using ReferenceGroupsTy = SmallVector<ReferenceGroupTy, 8>;

/// \c CacheCost represents the estimated cost of a inner loop as the number of
/// cache lines used by the memory references it contains.
/// The 'cache cost' of a loop 'L' in a loop nest 'LN' is computed as the sum of
/// the cache costs of all of its reference groups when the loop is considered
/// to be in the innermost position in the nest.
/// A reference group represents memory references that fall into the same cache
/// line. Each reference group is analysed with respect to the innermost loop in
/// a loop nest. The cost of a reference is defined as follow:
///  - one if it is loop invariant w.r.t the innermost loop,
///  - equal to the loop trip count divided by the cache line times the
///    reference stride if the reference stride is less than the cache line
///    size (CLS), and the coefficient of this loop's index variable used in all
///    other subscripts is zero (e.g. RefCost = TripCount/(CLS/RefStride))
///  - equal to the innermost loop trip count if the reference stride is greater
///    or equal to the cache line size CLS.
class CacheCost {
  friend raw_ostream &operator<<(raw_ostream &OS, const CacheCost &CC);
  using LoopTripCountTy = std::pair<const Loop *, unsigned>;
  using LoopCacheCostTy = std::pair<const Loop *, CacheCostTy>;

public:
  static CacheCostTy constexpr InvalidCost = -1;

  /// Construct a CacheCost object for the loop nest described by \p Loops.
  /// The optional parameter \p TRT can be used to specify the max. distance
  /// between array elements accessed in a loop so that the elements are
  /// classified to have temporal reuse.
  CacheCost(const LoopVectorTy &Loops, const LoopInfo &LI, ScalarEvolution &SE,
            TargetTransformInfo &TTI, AAResults &AA, DependenceInfo &DI,
            std::optional<unsigned> TRT = std::nullopt);

  /// Create a CacheCost for the loop nest rooted by \p Root.
  /// The optional parameter \p TRT can be used to specify the max. distance
  /// between array elements accessed in a loop so that the elements are
  /// classified to have temporal reuse.
  static std::unique_ptr<CacheCost>
  getCacheCost(Loop &Root, LoopStandardAnalysisResults &AR, DependenceInfo &DI,
               std::optional<unsigned> TRT = std::nullopt);

  /// Return the estimated cost of loop \p L if the given loop is part of the
  /// loop nest associated with this object. Return -1 otherwise.
  CacheCostTy getLoopCost(const Loop &L) const {
    auto IT = llvm::find_if(LoopCosts, [&L](const LoopCacheCostTy &LCC) {
      return LCC.first == &L;
    });
    return (IT != LoopCosts.end()) ? (*IT).second : -1;
  }

  /// Return the estimated ordered loop costs.
  ArrayRef<LoopCacheCostTy> getLoopCosts() const { return LoopCosts; }

private:
  /// Calculate the cache footprint of each loop in the nest (when it is
  /// considered to be in the innermost position).
  void calculateCacheFootprint();

  /// Partition store/load instructions in the loop nest into reference groups.
  /// Two or more memory accesses belong in the same reference group if they
  /// share the same cache line.
  bool populateReferenceGroups(ReferenceGroupsTy &RefGroups) const;

  /// Calculate the cost of the given loop \p L assuming it is the innermost
  /// loop in nest.
  CacheCostTy computeLoopCacheCost(const Loop &L,
                                   const ReferenceGroupsTy &RefGroups) const;

  /// Compute the cost of a representative reference in reference group \p RG
  /// when the given loop \p L is considered as the innermost loop in the nest.
  /// The computed cost is an estimate for the number of cache lines used by the
  /// reference group. The representative reference cost is defined as:
  ///   - equal to one if the reference is loop invariant, or
  ///   - equal to '(TripCount * stride) / cache_line_size' if (a) loop \p L's
  ///     induction variable is used only in the reference subscript associated
  ///     with loop \p L, and (b) the reference stride is less than the cache
  ///     line size, or
  ///   - TripCount otherwise
  CacheCostTy computeRefGroupCacheCost(const ReferenceGroupTy &RG,
                                       const Loop &L) const;

  /// Sort the LoopCosts vector by decreasing cache cost.
  void sortLoopCosts() {
    stable_sort(LoopCosts,
                [](const LoopCacheCostTy &A, const LoopCacheCostTy &B) {
                  return A.second > B.second;
                });
  }

private:
  /// Loops in the loop nest associated with this object.
  LoopVectorTy Loops;

  /// Trip counts for the loops in the loop nest associated with this object.
  SmallVector<LoopTripCountTy, 3> TripCounts;

  /// Cache costs for the loops in the loop nest associated with this object.
  SmallVector<LoopCacheCostTy, 3> LoopCosts;

  /// The max. distance between array elements accessed in a loop so that the
  /// elements are classified to have temporal reuse.
  std::optional<unsigned> TRT;

  const LoopInfo &LI;
  ScalarEvolution &SE;
  TargetTransformInfo &TTI;
  AAResults &AA;
  DependenceInfo &DI;
};

raw_ostream &operator<<(raw_ostream &OS, const IndexedReference &R);
raw_ostream &operator<<(raw_ostream &OS, const CacheCost &CC);

/// Printer pass for the \c CacheCost results.
class LoopCachePrinterPass : public PassInfoMixin<LoopCachePrinterPass> {
  raw_ostream &OS;

public:
  explicit LoopCachePrinterPass(raw_ostream &OS) : OS(OS) {}

  PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
                        LoopStandardAnalysisResults &AR, LPMUpdater &U);
};

} // namespace llvm

#endif // LLVM_ANALYSIS_LOOPCACHEANALYSIS_H
PKiwFZ=��eeAnalysis/Trace.hnu�[���//===- llvm/Analysis/Trace.h - Represent one trace of LLVM code -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This class represents a single trace of LLVM basic blocks.  A trace is a
// single entry, multiple exit, region of code that is often hot.  Trace-based
// optimizations treat traces almost like they are a large, strange, basic
// block: because the trace path is assumed to be hot, optimizations for the
// fall-through path are made at the expense of the non-fall-through paths.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_TRACE_H
#define LLVM_ANALYSIS_TRACE_H

#include <cassert>
#include <vector>

namespace llvm {

class BasicBlock;
class Function;
class Module;
class raw_ostream;

class Trace {
  using BasicBlockListType = std::vector<BasicBlock *>;

  BasicBlockListType BasicBlocks;

public:
  /// Trace ctor - Make a new trace from a vector of basic blocks,
  /// residing in the function which is the parent of the first
  /// basic block in the vector.
  Trace(const std::vector<BasicBlock *> &vBB) : BasicBlocks (vBB) {}

  /// getEntryBasicBlock - Return the entry basic block (first block)
  /// of the trace.
  BasicBlock *getEntryBasicBlock () const { return BasicBlocks[0]; }

  /// operator[]/getBlock - Return basic block N in the trace.
  BasicBlock *operator[](unsigned i) const { return BasicBlocks[i]; }
  BasicBlock *getBlock(unsigned i)   const { return BasicBlocks[i]; }

  /// getFunction - Return this trace's parent function.
  Function *getFunction () const;

  /// getModule - Return this Module that contains this trace's parent
  /// function.
  Module *getModule () const;

  /// getBlockIndex - Return the index of the specified basic block in the
  /// trace, or -1 if it is not in the trace.
  int getBlockIndex(const BasicBlock *X) const {
    for (unsigned i = 0, e = BasicBlocks.size(); i != e; ++i)
      if (BasicBlocks[i] == X)
        return i;
    return -1;
  }

  /// contains - Returns true if this trace contains the given basic
  /// block.
  bool contains(const BasicBlock *X) const {
    return getBlockIndex(X) != -1;
  }

  /// Returns true if B1 occurs before B2 in the trace, or if it is the same
  /// block as B2..  Both blocks must be in the trace.
  bool dominates(const BasicBlock *B1, const BasicBlock *B2) const {
    int B1Idx = getBlockIndex(B1), B2Idx = getBlockIndex(B2);
    assert(B1Idx != -1 && B2Idx != -1 && "Block is not in the trace!");
    return B1Idx <= B2Idx;
  }

  // BasicBlock iterators...
  using iterator = BasicBlockListType::iterator;
  using const_iterator = BasicBlockListType::const_iterator;
  using reverse_iterator = std::reverse_iterator<iterator>;
  using const_reverse_iterator = std::reverse_iterator<const_iterator>;

  iterator                begin()       { return BasicBlocks.begin(); }
  const_iterator          begin() const { return BasicBlocks.begin(); }
  iterator                end  ()       { return BasicBlocks.end();   }
  const_iterator          end  () const { return BasicBlocks.end();   }

  reverse_iterator       rbegin()       { return BasicBlocks.rbegin(); }
  const_reverse_iterator rbegin() const { return BasicBlocks.rbegin(); }
  reverse_iterator       rend  ()       { return BasicBlocks.rend();   }
  const_reverse_iterator rend  () const { return BasicBlocks.rend();   }

  unsigned                 size() const { return BasicBlocks.size(); }
  bool                    empty() const { return BasicBlocks.empty(); }

  iterator erase(iterator q)               { return BasicBlocks.erase (q); }
  iterator erase(iterator q1, iterator q2) { return BasicBlocks.erase (q1, q2); }

  /// print - Write trace to output stream.
  void print(raw_ostream &O) const;

  /// dump - Debugger convenience method; writes trace to standard error
  /// output stream.
  void dump() const;
};

} // end namespace llvm

#endif // LLVM_ANALYSIS_TRACE_H
PKiwFZxY�M�	�	$Analysis/IteratedDominanceFrontier.hnu�[���//===- IteratedDominanceFrontier.h - Calculate IDF --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_ITERATEDDOMINANCEFRONTIER_H
#define LLVM_ANALYSIS_ITERATEDDOMINANCEFRONTIER_H

#include "llvm/Support/CFGDiff.h"
#include "llvm/Support/GenericIteratedDominanceFrontier.h"

namespace llvm {

class BasicBlock;

namespace IDFCalculatorDetail {

/// Specialization for BasicBlock for the optional use of GraphDiff.
template <bool IsPostDom> struct ChildrenGetterTy<BasicBlock, IsPostDom> {
  using NodeRef = BasicBlock *;
  using ChildrenTy = SmallVector<BasicBlock *, 8>;

  ChildrenGetterTy() = default;
  ChildrenGetterTy(const GraphDiff<BasicBlock *, IsPostDom> *GD) : GD(GD) {
    assert(GD);
  }

  ChildrenTy get(const NodeRef &N);

  const GraphDiff<BasicBlock *, IsPostDom> *GD = nullptr;
};

} // end of namespace IDFCalculatorDetail

template <bool IsPostDom>
class IDFCalculator final : public IDFCalculatorBase<BasicBlock, IsPostDom> {
public:
  using IDFCalculatorBase =
      typename llvm::IDFCalculatorBase<BasicBlock, IsPostDom>;
  using ChildrenGetterTy = typename IDFCalculatorBase::ChildrenGetterTy;

  IDFCalculator(DominatorTreeBase<BasicBlock, IsPostDom> &DT)
      : IDFCalculatorBase(DT) {}

  IDFCalculator(DominatorTreeBase<BasicBlock, IsPostDom> &DT,
                const GraphDiff<BasicBlock *, IsPostDom> *GD)
      : IDFCalculatorBase(DT, ChildrenGetterTy(GD)) {
    assert(GD);
  }
};

using ForwardIDFCalculator = IDFCalculator<false>;
using ReverseIDFCalculator = IDFCalculator<true>;

//===----------------------------------------------------------------------===//
// Implementation.
//===----------------------------------------------------------------------===//

namespace IDFCalculatorDetail {

template <bool IsPostDom>
typename ChildrenGetterTy<BasicBlock, IsPostDom>::ChildrenTy
ChildrenGetterTy<BasicBlock, IsPostDom>::get(const NodeRef &N) {

  using OrderedNodeTy =
      typename IDFCalculatorBase<BasicBlock, IsPostDom>::OrderedNodeTy;

  if (!GD) {
    auto Children = children<OrderedNodeTy>(N);
    return {Children.begin(), Children.end()};
  }

  return GD->template getChildren<IsPostDom>(N);
}

} // end of namespace IDFCalculatorDetail

} // end of namespace llvm

#endif
PKiwFZ�4ާ?�?�Analysis/VectorUtils.hnu�[���//===- llvm/Analysis/VectorUtils.h - Vector utilities -----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines some vectorizer utilities.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_VECTORUTILS_H
#define LLVM_ANALYSIS_VECTORUTILS_H

#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Analysis/LoopAccessAnalysis.h"
#include "llvm/Support/CheckedArithmetic.h"

namespace llvm {
class TargetLibraryInfo;

/// Describes the type of Parameters
enum class VFParamKind {
  Vector,            // No semantic information.
  OMP_Linear,        // declare simd linear(i)
  OMP_LinearRef,     // declare simd linear(ref(i))
  OMP_LinearVal,     // declare simd linear(val(i))
  OMP_LinearUVal,    // declare simd linear(uval(i))
  OMP_LinearPos,     // declare simd linear(i:c) uniform(c)
  OMP_LinearValPos,  // declare simd linear(val(i:c)) uniform(c)
  OMP_LinearRefPos,  // declare simd linear(ref(i:c)) uniform(c)
  OMP_LinearUValPos, // declare simd linear(uval(i:c)) uniform(c)
  OMP_Uniform,       // declare simd uniform(i)
  GlobalPredicate,   // Global logical predicate that acts on all lanes
                     // of the input and output mask concurrently. For
                     // example, it is implied by the `M` token in the
                     // Vector Function ABI mangled name.
  Unknown
};

/// Describes the type of Instruction Set Architecture
enum class VFISAKind {
  AdvancedSIMD, // AArch64 Advanced SIMD (NEON)
  SVE,          // AArch64 Scalable Vector Extension
  SSE,          // x86 SSE
  AVX,          // x86 AVX
  AVX2,         // x86 AVX2
  AVX512,       // x86 AVX512
  LLVM,         // LLVM internal ISA for functions that are not
  // attached to an existing ABI via name mangling.
  Unknown // Unknown ISA
};

/// Encapsulates information needed to describe a parameter.
///
/// The description of the parameter is not linked directly to
/// OpenMP or any other vector function description. This structure
/// is extendible to handle other paradigms that describe vector
/// functions and their parameters.
struct VFParameter {
  unsigned ParamPos;         // Parameter Position in Scalar Function.
  VFParamKind ParamKind;     // Kind of Parameter.
  int LinearStepOrPos = 0;   // Step or Position of the Parameter.
  Align Alignment = Align(); // Optional alignment in bytes, defaulted to 1.

  // Comparison operator.
  bool operator==(const VFParameter &Other) const {
    return std::tie(ParamPos, ParamKind, LinearStepOrPos, Alignment) ==
           std::tie(Other.ParamPos, Other.ParamKind, Other.LinearStepOrPos,
                    Other.Alignment);
  }
};

/// Contains the information about the kind of vectorization
/// available.
///
/// This object in independent on the paradigm used to
/// represent vector functions. in particular, it is not attached to
/// any target-specific ABI.
struct VFShape {
  ElementCount VF;                        // Vectorization factor.
  SmallVector<VFParameter, 8> Parameters; // List of parameter information.
  // Comparison operator.
  bool operator==(const VFShape &Other) const {
    return std::tie(VF, Parameters) == std::tie(Other.VF, Other.Parameters);
  }

  /// Update the parameter in position P.ParamPos to P.
  void updateParam(VFParameter P) {
    assert(P.ParamPos < Parameters.size() && "Invalid parameter position.");
    Parameters[P.ParamPos] = P;
    assert(hasValidParameterList() && "Invalid parameter list");
  }

  // Retrieve the VFShape that can be used to map a (scalar) function to itself,
  // with VF = 1.
  static VFShape getScalarShape(const CallInst &CI) {
    return VFShape::get(CI, ElementCount::getFixed(1),
                        /*HasGlobalPredicate*/ false);
  }

  // Retrieve the basic vectorization shape of the function, where all
  // parameters are mapped to VFParamKind::Vector with \p EC
  // lanes. Specifies whether the function has a Global Predicate
  // argument via \p HasGlobalPred.
  static VFShape get(const CallInst &CI, ElementCount EC, bool HasGlobalPred) {
    SmallVector<VFParameter, 8> Parameters;
    for (unsigned I = 0; I < CI.arg_size(); ++I)
      Parameters.push_back(VFParameter({I, VFParamKind::Vector}));
    if (HasGlobalPred)
      Parameters.push_back(
          VFParameter({CI.arg_size(), VFParamKind::GlobalPredicate}));

    return {EC, Parameters};
  }
  /// Validation check on the Parameters in the VFShape.
  bool hasValidParameterList() const;
};

/// Holds the VFShape for a specific scalar to vector function mapping.
struct VFInfo {
  VFShape Shape;          /// Classification of the vector function.
  std::string ScalarName; /// Scalar Function Name.
  std::string VectorName; /// Vector Function Name associated to this VFInfo.
  VFISAKind ISA;          /// Instruction Set Architecture.

  /// Returns the index of the first parameter with the kind 'GlobalPredicate',
  /// if any exist.
  std::optional<unsigned> getParamIndexForOptionalMask() const {
    unsigned ParamCount = Shape.Parameters.size();
    for (unsigned i = 0; i < ParamCount; ++i)
      if (Shape.Parameters[i].ParamKind == VFParamKind::GlobalPredicate)
        return i;

    return std::nullopt;
  }

  /// Returns true if at least one of the operands to the vectorized function
  /// has the kind 'GlobalPredicate'.
  bool isMasked() const { return getParamIndexForOptionalMask().has_value(); }
};

namespace VFABI {
/// LLVM Internal VFABI ISA token for vector functions.
static constexpr char const *_LLVM_ = "_LLVM_";
/// Prefix for internal name redirection for vector function that
/// tells the compiler to scalarize the call using the scalar name
/// of the function. For example, a mangled name like
/// `_ZGV_LLVM_N2v_foo(_LLVM_Scalarize_foo)` would tell the
/// vectorizer to vectorize the scalar call `foo`, and to scalarize
/// it once vectorization is done.
static constexpr char const *_LLVM_Scalarize_ = "_LLVM_Scalarize_";

/// Function to construct a VFInfo out of a mangled names in the
/// following format:
///
/// <VFABI_name>{(<redirection>)}
///
/// where <VFABI_name> is the name of the vector function, mangled according
/// to the rules described in the Vector Function ABI of the target vector
/// extension (or <isa> from now on). The <VFABI_name> is in the following
/// format:
///
/// _ZGV<isa><mask><vlen><parameters>_<scalarname>[(<redirection>)]
///
/// This methods support demangling rules for the following <isa>:
///
/// * AArch64: https://developer.arm.com/docs/101129/latest
///
/// * x86 (libmvec): https://sourceware.org/glibc/wiki/libmvec and
///  https://sourceware.org/glibc/wiki/libmvec?action=AttachFile&do=view&target=VectorABI.txt
///
/// \param MangledName -> input string in the format
/// _ZGV<isa><mask><vlen><parameters>_<scalarname>[(<redirection>)].
/// \param M -> Module used to retrieve informations about the vector
/// function that are not possible to retrieve from the mangled
/// name. At the moment, this parameter is needed only to retrieve the
/// Vectorization Factor of scalable vector functions from their
/// respective IR declarations.
std::optional<VFInfo> tryDemangleForVFABI(StringRef MangledName,
                                          const Module &M);

/// This routine mangles the given VectorName according to the LangRef
/// specification for vector-function-abi-variant attribute and is specific to
/// the TLI mappings. It is the responsibility of the caller to make sure that
/// this is only used if all parameters in the vector function are vector type.
/// This returned string holds scalar-to-vector mapping:
///    _ZGV<isa><mask><vlen><vparams>_<scalarname>(<vectorname>)
///
/// where:
///
/// <isa> = "_LLVM_"
/// <mask> = "M" if masked, "N" if no mask.
/// <vlen> = Number of concurrent lanes, stored in the `VectorizationFactor`
///          field of the `VecDesc` struct. If the number of lanes is scalable
///          then 'x' is printed instead.
/// <vparams> = "v", as many as are the numArgs.
/// <scalarname> = the name of the scalar function.
/// <vectorname> = the name of the vector function.
std::string mangleTLIVectorName(StringRef VectorName, StringRef ScalarName,
                                unsigned numArgs, ElementCount VF,
                                bool Masked = false);

/// Retrieve the `VFParamKind` from a string token.
VFParamKind getVFParamKindFromString(const StringRef Token);

// Name of the attribute where the variant mappings are stored.
static constexpr char const *MappingsAttrName = "vector-function-abi-variant";

/// Populates a set of strings representing the Vector Function ABI variants
/// associated to the CallInst CI. If the CI does not contain the
/// vector-function-abi-variant attribute, we return without populating
/// VariantMappings, i.e. callers of getVectorVariantNames need not check for
/// the presence of the attribute (see InjectTLIMappings).
void getVectorVariantNames(const CallInst &CI,
                           SmallVectorImpl<std::string> &VariantMappings);
} // end namespace VFABI

/// The Vector Function Database.
///
/// Helper class used to find the vector functions associated to a
/// scalar CallInst.
class VFDatabase {
  /// The Module of the CallInst CI.
  const Module *M;
  /// The CallInst instance being queried for scalar to vector mappings.
  const CallInst &CI;
  /// List of vector functions descriptors associated to the call
  /// instruction.
  const SmallVector<VFInfo, 8> ScalarToVectorMappings;

  /// Retrieve the scalar-to-vector mappings associated to the rule of
  /// a vector Function ABI.
  static void getVFABIMappings(const CallInst &CI,
                               SmallVectorImpl<VFInfo> &Mappings) {
    if (!CI.getCalledFunction())
      return;

    const StringRef ScalarName = CI.getCalledFunction()->getName();

    SmallVector<std::string, 8> ListOfStrings;
    // The check for the vector-function-abi-variant attribute is done when
    // retrieving the vector variant names here.
    VFABI::getVectorVariantNames(CI, ListOfStrings);
    if (ListOfStrings.empty())
      return;
    for (const auto &MangledName : ListOfStrings) {
      const std::optional<VFInfo> Shape =
          VFABI::tryDemangleForVFABI(MangledName, *(CI.getModule()));
      // A match is found via scalar and vector names, and also by
      // ensuring that the variant described in the attribute has a
      // corresponding definition or declaration of the vector
      // function in the Module M.
      if (Shape && (Shape->ScalarName == ScalarName)) {
        assert(CI.getModule()->getFunction(Shape->VectorName) &&
               "Vector function is missing.");
        Mappings.push_back(*Shape);
      }
    }
  }

public:
  /// Retrieve all the VFInfo instances associated to the CallInst CI.
  static SmallVector<VFInfo, 8> getMappings(const CallInst &CI) {
    SmallVector<VFInfo, 8> Ret;

    // Get mappings from the Vector Function ABI variants.
    getVFABIMappings(CI, Ret);

    // Other non-VFABI variants should be retrieved here.

    return Ret;
  }

  static bool hasMaskedVariant(const CallInst &CI,
                               std::optional<ElementCount> VF = std::nullopt) {
    // Check whether we have at least one masked vector version of a scalar
    // function. If no VF is specified then we check for any masked variant,
    // otherwise we look for one that matches the supplied VF.
    auto Mappings = VFDatabase::getMappings(CI);
    for (VFInfo Info : Mappings)
      if (!VF || Info.Shape.VF == *VF)
        if (Info.isMasked())
          return true;

    return false;
  }

  /// Constructor, requires a CallInst instance.
  VFDatabase(CallInst &CI)
      : M(CI.getModule()), CI(CI),
        ScalarToVectorMappings(VFDatabase::getMappings(CI)) {}
  /// \defgroup VFDatabase query interface.
  ///
  /// @{
  /// Retrieve the Function with VFShape \p Shape.
  Function *getVectorizedFunction(const VFShape &Shape) const {
    if (Shape == VFShape::getScalarShape(CI))
      return CI.getCalledFunction();

    for (const auto &Info : ScalarToVectorMappings)
      if (Info.Shape == Shape)
        return M->getFunction(Info.VectorName);

    return nullptr;
  }
  /// @}
};

template <typename T> class ArrayRef;
class DemandedBits;
template <typename InstTy> class InterleaveGroup;
class IRBuilderBase;
class Loop;
class ScalarEvolution;
class TargetTransformInfo;
class Type;
class Value;

namespace Intrinsic {
typedef unsigned ID;
}

/// A helper function for converting Scalar types to vector types. If
/// the incoming type is void, we return void. If the EC represents a
/// scalar, we return the scalar type.
inline Type *ToVectorTy(Type *Scalar, ElementCount EC) {
  if (Scalar->isVoidTy() || Scalar->isMetadataTy() || EC.isScalar())
    return Scalar;
  return VectorType::get(Scalar, EC);
}

inline Type *ToVectorTy(Type *Scalar, unsigned VF) {
  return ToVectorTy(Scalar, ElementCount::getFixed(VF));
}

/// Identify if the intrinsic is trivially vectorizable.
/// This method returns true if the intrinsic's argument types are all scalars
/// for the scalar form of the intrinsic and all vectors (or scalars handled by
/// isVectorIntrinsicWithScalarOpAtArg) for the vector form of the intrinsic.
bool isTriviallyVectorizable(Intrinsic::ID ID);

/// Identifies if the vector form of the intrinsic has a scalar operand.
bool isVectorIntrinsicWithScalarOpAtArg(Intrinsic::ID ID,
                                        unsigned ScalarOpdIdx);

/// Identifies if the vector form of the intrinsic is overloaded on the type of
/// the operand at index \p OpdIdx, or on the return type if \p OpdIdx is -1.
bool isVectorIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID, int OpdIdx);

/// Returns intrinsic ID for call.
/// For the input call instruction it finds mapping intrinsic and returns
/// its intrinsic ID, in case it does not found it return not_intrinsic.
Intrinsic::ID getVectorIntrinsicIDForCall(const CallInst *CI,
                                          const TargetLibraryInfo *TLI);

/// Given a vector and an element number, see if the scalar value is
/// already around as a register, for example if it were inserted then extracted
/// from the vector.
Value *findScalarElement(Value *V, unsigned EltNo);

/// If all non-negative \p Mask elements are the same value, return that value.
/// If all elements are negative (undefined) or \p Mask contains different
/// non-negative values, return -1.
int getSplatIndex(ArrayRef<int> Mask);

/// Get splat value if the input is a splat vector or return nullptr.
/// The value may be extracted from a splat constants vector or from
/// a sequence of instructions that broadcast a single value into a vector.
Value *getSplatValue(const Value *V);

/// Return true if each element of the vector value \p V is poisoned or equal to
/// every other non-poisoned element. If an index element is specified, either
/// every element of the vector is poisoned or the element at that index is not
/// poisoned and equal to every other non-poisoned element.
/// This may be more powerful than the related getSplatValue() because it is
/// not limited by finding a scalar source value to a splatted vector.
bool isSplatValue(const Value *V, int Index = -1, unsigned Depth = 0);

/// Transform a shuffle mask's output demanded element mask into demanded
/// element masks for the 2 operands, returns false if the mask isn't valid.
/// Both \p DemandedLHS and \p DemandedRHS are initialised to [SrcWidth].
/// \p AllowUndefElts permits "-1" indices to be treated as undef.
bool getShuffleDemandedElts(int SrcWidth, ArrayRef<int> Mask,
                            const APInt &DemandedElts, APInt &DemandedLHS,
                            APInt &DemandedRHS, bool AllowUndefElts = false);

/// Replace each shuffle mask index with the scaled sequential indices for an
/// equivalent mask of narrowed elements. Mask elements that are less than 0
/// (sentinel values) are repeated in the output mask.
///
/// Example with Scale = 4:
///   <4 x i32> <3, 2, 0, -1> -->
///   <16 x i8> <12, 13, 14, 15, 8, 9, 10, 11, 0, 1, 2, 3, -1, -1, -1, -1>
///
/// This is the reverse process of widening shuffle mask elements, but it always
/// succeeds because the indexes can always be multiplied (scaled up) to map to
/// narrower vector elements.
void narrowShuffleMaskElts(int Scale, ArrayRef<int> Mask,
                           SmallVectorImpl<int> &ScaledMask);

/// Try to transform a shuffle mask by replacing elements with the scaled index
/// for an equivalent mask of widened elements. If all mask elements that would
/// map to a wider element of the new mask are the same negative number
/// (sentinel value), that element of the new mask is the same value. If any
/// element in a given slice is negative and some other element in that slice is
/// not the same value, return false (partial matches with sentinel values are
/// not allowed).
///
/// Example with Scale = 4:
///   <16 x i8> <12, 13, 14, 15, 8, 9, 10, 11, 0, 1, 2, 3, -1, -1, -1, -1> -->
///   <4 x i32> <3, 2, 0, -1>
///
/// This is the reverse process of narrowing shuffle mask elements if it
/// succeeds. This transform is not always possible because indexes may not
/// divide evenly (scale down) to map to wider vector elements.
bool widenShuffleMaskElts(int Scale, ArrayRef<int> Mask,
                          SmallVectorImpl<int> &ScaledMask);

/// Repetitively apply `widenShuffleMaskElts()` for as long as it succeeds,
/// to get the shuffle mask with widest possible elements.
void getShuffleMaskWithWidestElts(ArrayRef<int> Mask,
                                  SmallVectorImpl<int> &ScaledMask);

/// Splits and processes shuffle mask depending on the number of input and
/// output registers. The function does 2 main things: 1) splits the
/// source/destination vectors into real registers; 2) do the mask analysis to
/// identify which real registers are permuted. Then the function processes
/// resulting registers mask using provided action items. If no input register
/// is defined, \p NoInputAction action is used. If only 1 input register is
/// used, \p SingleInputAction is used, otherwise \p ManyInputsAction is used to
/// process > 2 input registers and masks.
/// \param Mask Original shuffle mask.
/// \param NumOfSrcRegs Number of source registers.
/// \param NumOfDestRegs Number of destination registers.
/// \param NumOfUsedRegs Number of actually used destination registers.
void processShuffleMasks(
    ArrayRef<int> Mask, unsigned NumOfSrcRegs, unsigned NumOfDestRegs,
    unsigned NumOfUsedRegs, function_ref<void()> NoInputAction,
    function_ref<void(ArrayRef<int>, unsigned, unsigned)> SingleInputAction,
    function_ref<void(ArrayRef<int>, unsigned, unsigned)> ManyInputsAction);

/// Compute a map of integer instructions to their minimum legal type
/// size.
///
/// C semantics force sub-int-sized values (e.g. i8, i16) to be promoted to int
/// type (e.g. i32) whenever arithmetic is performed on them.
///
/// For targets with native i8 or i16 operations, usually InstCombine can shrink
/// the arithmetic type down again. However InstCombine refuses to create
/// illegal types, so for targets without i8 or i16 registers, the lengthening
/// and shrinking remains.
///
/// Most SIMD ISAs (e.g. NEON) however support vectors of i8 or i16 even when
/// their scalar equivalents do not, so during vectorization it is important to
/// remove these lengthens and truncates when deciding the profitability of
/// vectorization.
///
/// This function analyzes the given range of instructions and determines the
/// minimum type size each can be converted to. It attempts to remove or
/// minimize type size changes across each def-use chain, so for example in the
/// following code:
///
///   %1 = load i8, i8*
///   %2 = add i8 %1, 2
///   %3 = load i16, i16*
///   %4 = zext i8 %2 to i32
///   %5 = zext i16 %3 to i32
///   %6 = add i32 %4, %5
///   %7 = trunc i32 %6 to i16
///
/// Instruction %6 must be done at least in i16, so computeMinimumValueSizes
/// will return: {%1: 16, %2: 16, %3: 16, %4: 16, %5: 16, %6: 16, %7: 16}.
///
/// If the optional TargetTransformInfo is provided, this function tries harder
/// to do less work by only looking at illegal types.
MapVector<Instruction*, uint64_t>
computeMinimumValueSizes(ArrayRef<BasicBlock*> Blocks,
                         DemandedBits &DB,
                         const TargetTransformInfo *TTI=nullptr);

/// Compute the union of two access-group lists.
///
/// If the list contains just one access group, it is returned directly. If the
/// list is empty, returns nullptr.
MDNode *uniteAccessGroups(MDNode *AccGroups1, MDNode *AccGroups2);

/// Compute the access-group list of access groups that @p Inst1 and @p Inst2
/// are both in. If either instruction does not access memory at all, it is
/// considered to be in every list.
///
/// If the list contains just one access group, it is returned directly. If the
/// list is empty, returns nullptr.
MDNode *intersectAccessGroups(const Instruction *Inst1,
                              const Instruction *Inst2);

/// Specifically, let Kinds = [MD_tbaa, MD_alias_scope, MD_noalias, MD_fpmath,
/// MD_nontemporal, MD_access_group].
/// For K in Kinds, we get the MDNode for K from each of the
/// elements of VL, compute their "intersection" (i.e., the most generic
/// metadata value that covers all of the individual values), and set I's
/// metadata for M equal to the intersection value.
///
/// This function always sets a (possibly null) value for each K in Kinds.
Instruction *propagateMetadata(Instruction *I, ArrayRef<Value *> VL);

/// Create a mask that filters the members of an interleave group where there
/// are gaps.
///
/// For example, the mask for \p Group with interleave-factor 3
/// and \p VF 4, that has only its first member present is:
///
///   <1,0,0,1,0,0,1,0,0,1,0,0>
///
/// Note: The result is a mask of 0's and 1's, as opposed to the other
/// create[*]Mask() utilities which create a shuffle mask (mask that
/// consists of indices).
Constant *createBitMaskForGaps(IRBuilderBase &Builder, unsigned VF,
                               const InterleaveGroup<Instruction> &Group);

/// Create a mask with replicated elements.
///
/// This function creates a shuffle mask for replicating each of the \p VF
/// elements in a vector \p ReplicationFactor times. It can be used to
/// transform a mask of \p VF elements into a mask of
/// \p VF * \p ReplicationFactor elements used by a predicated
/// interleaved-group of loads/stores whose Interleaved-factor ==
/// \p ReplicationFactor.
///
/// For example, the mask for \p ReplicationFactor=3 and \p VF=4 is:
///
///   <0,0,0,1,1,1,2,2,2,3,3,3>
llvm::SmallVector<int, 16> createReplicatedMask(unsigned ReplicationFactor,
                                                unsigned VF);

/// Create an interleave shuffle mask.
///
/// This function creates a shuffle mask for interleaving \p NumVecs vectors of
/// vectorization factor \p VF into a single wide vector. The mask is of the
/// form:
///
///   <0, VF, VF * 2, ..., VF * (NumVecs - 1), 1, VF + 1, VF * 2 + 1, ...>
///
/// For example, the mask for VF = 4 and NumVecs = 2 is:
///
///   <0, 4, 1, 5, 2, 6, 3, 7>.
llvm::SmallVector<int, 16> createInterleaveMask(unsigned VF, unsigned NumVecs);

/// Create a stride shuffle mask.
///
/// This function creates a shuffle mask whose elements begin at \p Start and
/// are incremented by \p Stride. The mask can be used to deinterleave an
/// interleaved vector into separate vectors of vectorization factor \p VF. The
/// mask is of the form:
///
///   <Start, Start + Stride, ..., Start + Stride * (VF - 1)>
///
/// For example, the mask for Start = 0, Stride = 2, and VF = 4 is:
///
///   <0, 2, 4, 6>
llvm::SmallVector<int, 16> createStrideMask(unsigned Start, unsigned Stride,
                                            unsigned VF);

/// Create a sequential shuffle mask.
///
/// This function creates shuffle mask whose elements are sequential and begin
/// at \p Start.  The mask contains \p NumInts integers and is padded with \p
/// NumUndefs undef values. The mask is of the form:
///
///   <Start, Start + 1, ... Start + NumInts - 1, undef_1, ... undef_NumUndefs>
///
/// For example, the mask for Start = 0, NumInsts = 4, and NumUndefs = 4 is:
///
///   <0, 1, 2, 3, undef, undef, undef, undef>
llvm::SmallVector<int, 16>
createSequentialMask(unsigned Start, unsigned NumInts, unsigned NumUndefs);

/// Given a shuffle mask for a binary shuffle, create the equivalent shuffle
/// mask assuming both operands are identical. This assumes that the unary
/// shuffle will use elements from operand 0 (operand 1 will be unused).
llvm::SmallVector<int, 16> createUnaryMask(ArrayRef<int> Mask,
                                           unsigned NumElts);

/// Concatenate a list of vectors.
///
/// This function generates code that concatenate the vectors in \p Vecs into a
/// single large vector. The number of vectors should be greater than one, and
/// their element types should be the same. The number of elements in the
/// vectors should also be the same; however, if the last vector has fewer
/// elements, it will be padded with undefs.
Value *concatenateVectors(IRBuilderBase &Builder, ArrayRef<Value *> Vecs);

/// Given a mask vector of i1, Return true if all of the elements of this
/// predicate mask are known to be false or undef.  That is, return true if all
/// lanes can be assumed inactive.
bool maskIsAllZeroOrUndef(Value *Mask);

/// Given a mask vector of i1, Return true if all of the elements of this
/// predicate mask are known to be true or undef.  That is, return true if all
/// lanes can be assumed active.
bool maskIsAllOneOrUndef(Value *Mask);

/// Given a mask vector of the form <Y x i1>, return an APInt (of bitwidth Y)
/// for each lane which may be active.
APInt possiblyDemandedEltsInMask(Value *Mask);

/// The group of interleaved loads/stores sharing the same stride and
/// close to each other.
///
/// Each member in this group has an index starting from 0, and the largest
/// index should be less than interleaved factor, which is equal to the absolute
/// value of the access's stride.
///
/// E.g. An interleaved load group of factor 4:
///        for (unsigned i = 0; i < 1024; i+=4) {
///          a = A[i];                           // Member of index 0
///          b = A[i+1];                         // Member of index 1
///          d = A[i+3];                         // Member of index 3
///          ...
///        }
///
///      An interleaved store group of factor 4:
///        for (unsigned i = 0; i < 1024; i+=4) {
///          ...
///          A[i]   = a;                         // Member of index 0
///          A[i+1] = b;                         // Member of index 1
///          A[i+2] = c;                         // Member of index 2
///          A[i+3] = d;                         // Member of index 3
///        }
///
/// Note: the interleaved load group could have gaps (missing members), but
/// the interleaved store group doesn't allow gaps.
template <typename InstTy> class InterleaveGroup {
public:
  InterleaveGroup(uint32_t Factor, bool Reverse, Align Alignment)
      : Factor(Factor), Reverse(Reverse), Alignment(Alignment),
        InsertPos(nullptr) {}

  InterleaveGroup(InstTy *Instr, int32_t Stride, Align Alignment)
      : Alignment(Alignment), InsertPos(Instr) {
    Factor = std::abs(Stride);
    assert(Factor > 1 && "Invalid interleave factor");

    Reverse = Stride < 0;
    Members[0] = Instr;
  }

  bool isReverse() const { return Reverse; }
  uint32_t getFactor() const { return Factor; }
  Align getAlign() const { return Alignment; }
  uint32_t getNumMembers() const { return Members.size(); }

  /// Try to insert a new member \p Instr with index \p Index and
  /// alignment \p NewAlign. The index is related to the leader and it could be
  /// negative if it is the new leader.
  ///
  /// \returns false if the instruction doesn't belong to the group.
  bool insertMember(InstTy *Instr, int32_t Index, Align NewAlign) {
    // Make sure the key fits in an int32_t.
    std::optional<int32_t> MaybeKey = checkedAdd(Index, SmallestKey);
    if (!MaybeKey)
      return false;
    int32_t Key = *MaybeKey;

    // Skip if the key is used for either the tombstone or empty special values.
    if (DenseMapInfo<int32_t>::getTombstoneKey() == Key ||
        DenseMapInfo<int32_t>::getEmptyKey() == Key)
      return false;

    // Skip if there is already a member with the same index.
    if (Members.find(Key) != Members.end())
      return false;

    if (Key > LargestKey) {
      // The largest index is always less than the interleave factor.
      if (Index >= static_cast<int32_t>(Factor))
        return false;

      LargestKey = Key;
    } else if (Key < SmallestKey) {

      // Make sure the largest index fits in an int32_t.
      std::optional<int32_t> MaybeLargestIndex = checkedSub(LargestKey, Key);
      if (!MaybeLargestIndex)
        return false;

      // The largest index is always less than the interleave factor.
      if (*MaybeLargestIndex >= static_cast<int64_t>(Factor))
        return false;

      SmallestKey = Key;
    }

    // It's always safe to select the minimum alignment.
    Alignment = std::min(Alignment, NewAlign);
    Members[Key] = Instr;
    return true;
  }

  /// Get the member with the given index \p Index
  ///
  /// \returns nullptr if contains no such member.
  InstTy *getMember(uint32_t Index) const {
    int32_t Key = SmallestKey + Index;
    return Members.lookup(Key);
  }

  /// Get the index for the given member. Unlike the key in the member
  /// map, the index starts from 0.
  uint32_t getIndex(const InstTy *Instr) const {
    for (auto I : Members) {
      if (I.second == Instr)
        return I.first - SmallestKey;
    }

    llvm_unreachable("InterleaveGroup contains no such member");
  }

  InstTy *getInsertPos() const { return InsertPos; }
  void setInsertPos(InstTy *Inst) { InsertPos = Inst; }

  /// Add metadata (e.g. alias info) from the instructions in this group to \p
  /// NewInst.
  ///
  /// FIXME: this function currently does not add noalias metadata a'la
  /// addNewMedata.  To do that we need to compute the intersection of the
  /// noalias info from all members.
  void addMetadata(InstTy *NewInst) const;

  /// Returns true if this Group requires a scalar iteration to handle gaps.
  bool requiresScalarEpilogue() const {
    // If the last member of the Group exists, then a scalar epilog is not
    // needed for this group.
    if (getMember(getFactor() - 1))
      return false;

    // We have a group with gaps. It therefore can't be a reversed access,
    // because such groups get invalidated (TODO).
    assert(!isReverse() && "Group should have been invalidated");

    // This is a group of loads, with gaps, and without a last-member
    return true;
  }

private:
  uint32_t Factor; // Interleave Factor.
  bool Reverse;
  Align Alignment;
  DenseMap<int32_t, InstTy *> Members;
  int32_t SmallestKey = 0;
  int32_t LargestKey = 0;

  // To avoid breaking dependences, vectorized instructions of an interleave
  // group should be inserted at either the first load or the last store in
  // program order.
  //
  // E.g. %even = load i32             // Insert Position
  //      %add = add i32 %even         // Use of %even
  //      %odd = load i32
  //
  //      store i32 %even
  //      %odd = add i32               // Def of %odd
  //      store i32 %odd               // Insert Position
  InstTy *InsertPos;
};

/// Drive the analysis of interleaved memory accesses in the loop.
///
/// Use this class to analyze interleaved accesses only when we can vectorize
/// a loop. Otherwise it's meaningless to do analysis as the vectorization
/// on interleaved accesses is unsafe.
///
/// The analysis collects interleave groups and records the relationships
/// between the member and the group in a map.
class InterleavedAccessInfo {
public:
  InterleavedAccessInfo(PredicatedScalarEvolution &PSE, Loop *L,
                        DominatorTree *DT, LoopInfo *LI,
                        const LoopAccessInfo *LAI)
      : PSE(PSE), TheLoop(L), DT(DT), LI(LI), LAI(LAI) {}

  ~InterleavedAccessInfo() { invalidateGroups(); }

  /// Analyze the interleaved accesses and collect them in interleave
  /// groups. Substitute symbolic strides using \p Strides.
  /// Consider also predicated loads/stores in the analysis if
  /// \p EnableMaskedInterleavedGroup is true.
  void analyzeInterleaving(bool EnableMaskedInterleavedGroup);

  /// Invalidate groups, e.g., in case all blocks in loop will be predicated
  /// contrary to original assumption. Although we currently prevent group
  /// formation for predicated accesses, we may be able to relax this limitation
  /// in the future once we handle more complicated blocks. Returns true if any
  /// groups were invalidated.
  bool invalidateGroups() {
    if (InterleaveGroups.empty()) {
      assert(
          !RequiresScalarEpilogue &&
          "RequiresScalarEpilog should not be set without interleave groups");
      return false;
    }

    InterleaveGroupMap.clear();
    for (auto *Ptr : InterleaveGroups)
      delete Ptr;
    InterleaveGroups.clear();
    RequiresScalarEpilogue = false;
    return true;
  }

  /// Check if \p Instr belongs to any interleave group.
  bool isInterleaved(Instruction *Instr) const {
    return InterleaveGroupMap.contains(Instr);
  }

  /// Get the interleave group that \p Instr belongs to.
  ///
  /// \returns nullptr if doesn't have such group.
  InterleaveGroup<Instruction> *
  getInterleaveGroup(const Instruction *Instr) const {
    return InterleaveGroupMap.lookup(Instr);
  }

  iterator_range<SmallPtrSetIterator<llvm::InterleaveGroup<Instruction> *>>
  getInterleaveGroups() {
    return make_range(InterleaveGroups.begin(), InterleaveGroups.end());
  }

  /// Returns true if an interleaved group that may access memory
  /// out-of-bounds requires a scalar epilogue iteration for correctness.
  bool requiresScalarEpilogue() const { return RequiresScalarEpilogue; }

  /// Invalidate groups that require a scalar epilogue (due to gaps). This can
  /// happen when optimizing for size forbids a scalar epilogue, and the gap
  /// cannot be filtered by masking the load/store.
  void invalidateGroupsRequiringScalarEpilogue();

  /// Returns true if we have any interleave groups.
  bool hasGroups() const { return !InterleaveGroups.empty(); }

private:
  /// A wrapper around ScalarEvolution, used to add runtime SCEV checks.
  /// Simplifies SCEV expressions in the context of existing SCEV assumptions.
  /// The interleaved access analysis can also add new predicates (for example
  /// by versioning strides of pointers).
  PredicatedScalarEvolution &PSE;

  Loop *TheLoop;
  DominatorTree *DT;
  LoopInfo *LI;
  const LoopAccessInfo *LAI;

  /// True if the loop may contain non-reversed interleaved groups with
  /// out-of-bounds accesses. We ensure we don't speculatively access memory
  /// out-of-bounds by executing at least one scalar epilogue iteration.
  bool RequiresScalarEpilogue = false;

  /// Holds the relationships between the members and the interleave group.
  DenseMap<Instruction *, InterleaveGroup<Instruction> *> InterleaveGroupMap;

  SmallPtrSet<InterleaveGroup<Instruction> *, 4> InterleaveGroups;

  /// Holds dependences among the memory accesses in the loop. It maps a source
  /// access to a set of dependent sink accesses.
  DenseMap<Instruction *, SmallPtrSet<Instruction *, 2>> Dependences;

  /// The descriptor for a strided memory access.
  struct StrideDescriptor {
    StrideDescriptor() = default;
    StrideDescriptor(int64_t Stride, const SCEV *Scev, uint64_t Size,
                     Align Alignment)
        : Stride(Stride), Scev(Scev), Size(Size), Alignment(Alignment) {}

    // The access's stride. It is negative for a reverse access.
    int64_t Stride = 0;

    // The scalar expression of this access.
    const SCEV *Scev = nullptr;

    // The size of the memory object.
    uint64_t Size = 0;

    // The alignment of this access.
    Align Alignment;
  };

  /// A type for holding instructions and their stride descriptors.
  using StrideEntry = std::pair<Instruction *, StrideDescriptor>;

  /// Create a new interleave group with the given instruction \p Instr,
  /// stride \p Stride and alignment \p Align.
  ///
  /// \returns the newly created interleave group.
  InterleaveGroup<Instruction> *
  createInterleaveGroup(Instruction *Instr, int Stride, Align Alignment) {
    assert(!InterleaveGroupMap.count(Instr) &&
           "Already in an interleaved access group");
    InterleaveGroupMap[Instr] =
        new InterleaveGroup<Instruction>(Instr, Stride, Alignment);
    InterleaveGroups.insert(InterleaveGroupMap[Instr]);
    return InterleaveGroupMap[Instr];
  }

  /// Release the group and remove all the relationships.
  void releaseGroup(InterleaveGroup<Instruction> *Group) {
    for (unsigned i = 0; i < Group->getFactor(); i++)
      if (Instruction *Member = Group->getMember(i))
        InterleaveGroupMap.erase(Member);

    InterleaveGroups.erase(Group);
    delete Group;
  }

  /// Collect all the accesses with a constant stride in program order.
  void collectConstStrideAccesses(
      MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo,
      const DenseMap<Value *, const SCEV *> &Strides);

  /// Returns true if \p Stride is allowed in an interleaved group.
  static bool isStrided(int Stride);

  /// Returns true if \p BB is a predicated block.
  bool isPredicated(BasicBlock *BB) const {
    return LoopAccessInfo::blockNeedsPredication(BB, TheLoop, DT);
  }

  /// Returns true if LoopAccessInfo can be used for dependence queries.
  bool areDependencesValid() const {
    return LAI && LAI->getDepChecker().getDependences();
  }

  /// Returns true if memory accesses \p A and \p B can be reordered, if
  /// necessary, when constructing interleaved groups.
  ///
  /// \p A must precede \p B in program order. We return false if reordering is
  /// not necessary or is prevented because \p A and \p B may be dependent.
  bool canReorderMemAccessesForInterleavedGroups(StrideEntry *A,
                                                 StrideEntry *B) const {
    // Code motion for interleaved accesses can potentially hoist strided loads
    // and sink strided stores. The code below checks the legality of the
    // following two conditions:
    //
    // 1. Potentially moving a strided load (B) before any store (A) that
    //    precedes B, or
    //
    // 2. Potentially moving a strided store (A) after any load or store (B)
    //    that A precedes.
    //
    // It's legal to reorder A and B if we know there isn't a dependence from A
    // to B. Note that this determination is conservative since some
    // dependences could potentially be reordered safely.

    // A is potentially the source of a dependence.
    auto *Src = A->first;
    auto SrcDes = A->second;

    // B is potentially the sink of a dependence.
    auto *Sink = B->first;
    auto SinkDes = B->second;

    // Code motion for interleaved accesses can't violate WAR dependences.
    // Thus, reordering is legal if the source isn't a write.
    if (!Src->mayWriteToMemory())
      return true;

    // At least one of the accesses must be strided.
    if (!isStrided(SrcDes.Stride) && !isStrided(SinkDes.Stride))
      return true;

    // If dependence information is not available from LoopAccessInfo,
    // conservatively assume the instructions can't be reordered.
    if (!areDependencesValid())
      return false;

    // If we know there is a dependence from source to sink, assume the
    // instructions can't be reordered. Otherwise, reordering is legal.
    return !Dependences.contains(Src) || !Dependences.lookup(Src).count(Sink);
  }

  /// Collect the dependences from LoopAccessInfo.
  ///
  /// We process the dependences once during the interleaved access analysis to
  /// enable constant-time dependence queries.
  void collectDependences() {
    if (!areDependencesValid())
      return;
    auto *Deps = LAI->getDepChecker().getDependences();
    for (auto Dep : *Deps)
      Dependences[Dep.getSource(*LAI)].insert(Dep.getDestination(*LAI));
  }
};

} // llvm namespace

#endif
PKiwFZ�@m��&�&Analysis/PtrUseVisitor.hnu�[���//===- PtrUseVisitor.h - InstVisitors over a pointers uses ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file
/// This file provides a collection of visitors which walk the (instruction)
/// uses of a pointer. These visitors all provide the same essential behavior
/// as an InstVisitor with similar template-based flexibility and
/// implementation strategies.
///
/// These can be used, for example, to quickly analyze the uses of an alloca,
/// global variable, or function argument.
///
/// FIXME: Provide a variant which doesn't track offsets and is cheaper.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_PTRUSEVISITOR_H
#define LLVM_ANALYSIS_PTRUSEVISITOR_H

#include "llvm/ADT/APInt.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/InstVisitor.h"
#include "llvm/IR/IntrinsicInst.h"
#include <cassert>
#include <type_traits>

namespace llvm {
class DataLayout;
class Use;

namespace detail {

/// Implementation of non-dependent functionality for \c PtrUseVisitor.
///
/// See \c PtrUseVisitor for the public interface and detailed comments about
/// usage. This class is just a helper base class which is not templated and
/// contains all common code to be shared between different instantiations of
/// PtrUseVisitor.
class PtrUseVisitorBase {
public:
  /// This class provides information about the result of a visit.
  ///
  /// After walking all the users (recursively) of a pointer, the basic
  /// infrastructure records some commonly useful information such as escape
  /// analysis and whether the visit completed or aborted early.
  class PtrInfo {
  public:
    PtrInfo() : AbortedInfo(nullptr, false), EscapedInfo(nullptr, false) {}

    /// Reset the pointer info, clearing all state.
    void reset() {
      AbortedInfo.setPointer(nullptr);
      AbortedInfo.setInt(false);
      EscapedInfo.setPointer(nullptr);
      EscapedInfo.setInt(false);
    }

    /// Did we abort the visit early?
    bool isAborted() const { return AbortedInfo.getInt(); }

    /// Is the pointer escaped at some point?
    bool isEscaped() const { return EscapedInfo.getInt(); }

    /// Get the instruction causing the visit to abort.
    /// \returns a pointer to the instruction causing the abort if one is
    /// available; otherwise returns null.
    Instruction *getAbortingInst() const { return AbortedInfo.getPointer(); }

    /// Get the instruction causing the pointer to escape.
    /// \returns a pointer to the instruction which escapes the pointer if one
    /// is available; otherwise returns null.
    Instruction *getEscapingInst() const { return EscapedInfo.getPointer(); }

    /// Mark the visit as aborted. Intended for use in a void return.
    /// \param I The instruction which caused the visit to abort, if available.
    void setAborted(Instruction *I = nullptr) {
      AbortedInfo.setInt(true);
      AbortedInfo.setPointer(I);
    }

    /// Mark the pointer as escaped. Intended for use in a void return.
    /// \param I The instruction which escapes the pointer, if available.
    void setEscaped(Instruction *I = nullptr) {
      EscapedInfo.setInt(true);
      EscapedInfo.setPointer(I);
    }

    /// Mark the pointer as escaped, and the visit as aborted. Intended
    /// for use in a void return.
    /// \param I The instruction which both escapes the pointer and aborts the
    /// visit, if available.
    void setEscapedAndAborted(Instruction *I = nullptr) {
      setEscaped(I);
      setAborted(I);
    }

  private:
    PointerIntPair<Instruction *, 1, bool> AbortedInfo, EscapedInfo;
  };

protected:
  const DataLayout &DL;

  /// \name Visitation infrastructure
  /// @{

  /// The info collected about the pointer being visited thus far.
  PtrInfo PI;

  /// A struct of the data needed to visit a particular use.
  ///
  /// This is used to maintain a worklist fo to-visit uses. This is used to
  /// make the visit be iterative rather than recursive.
  struct UseToVisit {
    using UseAndIsOffsetKnownPair = PointerIntPair<Use *, 1, bool>;

    UseAndIsOffsetKnownPair UseAndIsOffsetKnown;
    APInt Offset;
  };

  /// The worklist of to-visit uses.
  SmallVector<UseToVisit, 8> Worklist;

  /// A set of visited uses to break cycles in unreachable code.
  SmallPtrSet<Use *, 8> VisitedUses;

  /// @}

  /// \name Per-visit state
  /// This state is reset for each instruction visited.
  /// @{

  /// The use currently being visited.
  Use *U;

  /// True if we have a known constant offset for the use currently
  /// being visited.
  bool IsOffsetKnown;

  /// The constant offset of the use if that is known.
  APInt Offset;

  /// @}

  /// Note that the constructor is protected because this class must be a base
  /// class, we can't create instances directly of this class.
  PtrUseVisitorBase(const DataLayout &DL) : DL(DL) {}

  /// Enqueue the users of this instruction in the visit worklist.
  ///
  /// This will visit the users with the same offset of the current visit
  /// (including an unknown offset if that is the current state).
  void enqueueUsers(Instruction &I);

  /// Walk the operands of a GEP and adjust the offset as appropriate.
  ///
  /// This routine does the heavy lifting of the pointer walk by computing
  /// offsets and looking through GEPs.
  bool adjustOffsetForGEP(GetElementPtrInst &GEPI);
};

} // end namespace detail

/// A base class for visitors over the uses of a pointer value.
///
/// Once constructed, a user can call \c visit on a pointer value, and this
/// will walk its uses and visit each instruction using an InstVisitor. It also
/// provides visit methods which will recurse through any pointer-to-pointer
/// transformations such as GEPs and bitcasts.
///
/// During the visit, the current Use* being visited is available to the
/// subclass, as well as the current offset from the original base pointer if
/// known.
///
/// The recursive visit of uses is accomplished with a worklist, so the only
/// ordering guarantee is that an instruction is visited before any uses of it
/// are visited. Note that this does *not* mean before any of its users are
/// visited! This is because users can be visited multiple times due to
/// multiple, different uses of pointers derived from the same base.
///
/// A particular Use will only be visited once, but a User may be visited
/// multiple times, once per Use. This visits may notably have different
/// offsets.
///
/// All visit methods on the underlying InstVisitor return a boolean. This
/// return short-circuits the visit, stopping it immediately.
///
/// FIXME: Generalize this for all values rather than just instructions.
template <typename DerivedT>
class PtrUseVisitor : protected InstVisitor<DerivedT>,
                      public detail::PtrUseVisitorBase {
  friend class InstVisitor<DerivedT>;

  using Base = InstVisitor<DerivedT>;

public:
  PtrUseVisitor(const DataLayout &DL) : PtrUseVisitorBase(DL) {
    static_assert(std::is_base_of<PtrUseVisitor, DerivedT>::value,
                  "Must pass the derived type to this template!");
  }

  /// Recursively visit the uses of the given pointer.
  /// \returns An info struct about the pointer. See \c PtrInfo for details.
  PtrInfo visitPtr(Instruction &I) {
    // This must be a pointer type. Get an integer type suitable to hold
    // offsets on this pointer.
    // FIXME: Support a vector of pointers.
    assert(I.getType()->isPointerTy());
    IntegerType *IntIdxTy = cast<IntegerType>(DL.getIndexType(I.getType()));
    IsOffsetKnown = true;
    Offset = APInt(IntIdxTy->getBitWidth(), 0);
    PI.reset();

    // Enqueue the uses of this pointer.
    enqueueUsers(I);

    // Visit all the uses off the worklist until it is empty.
    while (!Worklist.empty()) {
      UseToVisit ToVisit = Worklist.pop_back_val();
      U = ToVisit.UseAndIsOffsetKnown.getPointer();
      IsOffsetKnown = ToVisit.UseAndIsOffsetKnown.getInt();
      if (IsOffsetKnown)
        Offset = std::move(ToVisit.Offset);

      Instruction *I = cast<Instruction>(U->getUser());
      static_cast<DerivedT*>(this)->visit(I);
      if (PI.isAborted())
        break;
    }
    return PI;
  }

protected:
  void visitStoreInst(StoreInst &SI) {
    if (SI.getValueOperand() == U->get())
      PI.setEscaped(&SI);
  }

  void visitBitCastInst(BitCastInst &BC) {
    enqueueUsers(BC);
  }

  void visitAddrSpaceCastInst(AddrSpaceCastInst &ASC) {
    enqueueUsers(ASC);
  }

  void visitPtrToIntInst(PtrToIntInst &I) {
    PI.setEscaped(&I);
  }

  void visitGetElementPtrInst(GetElementPtrInst &GEPI) {
    if (GEPI.use_empty())
      return;

    // If we can't walk the GEP, clear the offset.
    if (!adjustOffsetForGEP(GEPI)) {
      IsOffsetKnown = false;
      Offset = APInt();
    }

    // Enqueue the users now that the offset has been adjusted.
    enqueueUsers(GEPI);
  }

  // No-op intrinsics which we know don't escape the pointer to logic in
  // some other function.
  void visitDbgInfoIntrinsic(DbgInfoIntrinsic &I) {}
  void visitMemIntrinsic(MemIntrinsic &I) {}
  void visitIntrinsicInst(IntrinsicInst &II) {
    switch (II.getIntrinsicID()) {
    default:
      return Base::visitIntrinsicInst(II);

    case Intrinsic::lifetime_start:
    case Intrinsic::lifetime_end:
      return; // No-op intrinsics.
    }
  }

  // Generically, arguments to calls and invokes escape the pointer to some
  // other function. Mark that.
  void visitCallBase(CallBase &CB) {
    PI.setEscaped(&CB);
    Base::visitCallBase(CB);
  }
};

} // end namespace llvm

#endif // LLVM_ANALYSIS_PTRUSEVISITOR_H
PKiwFZ�=]��Analysis/DomPrinter.hnu�[���//===-- DomPrinter.h - Dom printer external interface ------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines external functions that can be called to explicitly
// instantiate the dominance tree printer.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_DOMPRINTER_H
#define LLVM_ANALYSIS_DOMPRINTER_H

#include "llvm/Analysis/DOTGraphTraitsPass.h"
#include "llvm/Analysis/PostDominators.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/PassManager.h"

namespace llvm {

template <>
struct DOTGraphTraits<DomTreeNode *> : public DefaultDOTGraphTraits {

  DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {}

  std::string getNodeLabel(DomTreeNode *Node, DomTreeNode *Graph) {

    BasicBlock *BB = Node->getBlock();

    if (!BB)
      return "Post dominance root node";

    if (isSimple())
      return DOTGraphTraits<DOTFuncInfo *>::getSimpleNodeLabel(BB, nullptr);

    return DOTGraphTraits<DOTFuncInfo *>::getCompleteNodeLabel(BB, nullptr);
  }
};

template <>
struct DOTGraphTraits<DominatorTree *>
    : public DOTGraphTraits<DomTreeNode *> {

  DOTGraphTraits(bool isSimple = false)
      : DOTGraphTraits<DomTreeNode *>(isSimple) {}

  static std::string getGraphName(DominatorTree *DT) {
    return "Dominator tree";
  }

  std::string getNodeLabel(DomTreeNode *Node, DominatorTree *G) {
    return DOTGraphTraits<DomTreeNode *>::getNodeLabel(Node,
                                                             G->getRootNode());
  }
};

template<>
struct DOTGraphTraits<PostDominatorTree *>
  : public DOTGraphTraits<DomTreeNode*> {

  DOTGraphTraits (bool isSimple=false)
    : DOTGraphTraits<DomTreeNode*>(isSimple) {}

  static std::string getGraphName(PostDominatorTree *DT) {
    return "Post dominator tree";
  }

  std::string getNodeLabel(DomTreeNode *Node,
                           PostDominatorTree *G) {
    return DOTGraphTraits<DomTreeNode*>::getNodeLabel(Node, G->getRootNode());
  }
};

struct DomViewer final : DOTGraphTraitsViewer<DominatorTreeAnalysis, false> {
  DomViewer() : DOTGraphTraitsViewer<DominatorTreeAnalysis, false>("dom") {}
};

struct DomOnlyViewer final : DOTGraphTraitsViewer<DominatorTreeAnalysis, true> {
  DomOnlyViewer()
      : DOTGraphTraitsViewer<DominatorTreeAnalysis, true>("domonly") {}
};

struct PostDomViewer final
    : DOTGraphTraitsViewer<PostDominatorTreeAnalysis, false> {
  PostDomViewer()
      : DOTGraphTraitsViewer<PostDominatorTreeAnalysis, false>("postdom") {}
};

struct PostDomOnlyViewer final
    : DOTGraphTraitsViewer<PostDominatorTreeAnalysis, true> {
  PostDomOnlyViewer()
      : DOTGraphTraitsViewer<PostDominatorTreeAnalysis, true>("postdomonly") {}
};

struct DomPrinter final : DOTGraphTraitsPrinter<DominatorTreeAnalysis, false> {
  DomPrinter() : DOTGraphTraitsPrinter<DominatorTreeAnalysis, false>("dom") {}
};

struct DomOnlyPrinter final
    : DOTGraphTraitsPrinter<DominatorTreeAnalysis, true> {
  DomOnlyPrinter()
      : DOTGraphTraitsPrinter<DominatorTreeAnalysis, true>("domonly") {}
};

struct PostDomPrinter final
    : DOTGraphTraitsPrinter<PostDominatorTreeAnalysis, false> {
  PostDomPrinter()
      : DOTGraphTraitsPrinter<PostDominatorTreeAnalysis, false>("postdom") {}
};

struct PostDomOnlyPrinter final
    : DOTGraphTraitsPrinter<PostDominatorTreeAnalysis, true> {
  PostDomOnlyPrinter()
      : DOTGraphTraitsPrinter<PostDominatorTreeAnalysis, true>("postdomonly") {}
};
} // namespace llvm

namespace llvm {
  class FunctionPass;
  FunctionPass *createDomPrinterWrapperPassPass();
  FunctionPass *createDomOnlyPrinterWrapperPassPass();
  FunctionPass *createDomViewerWrapperPassPass();
  FunctionPass *createDomOnlyViewerWrapperPassPass();
  FunctionPass *createPostDomPrinterWrapperPassPass();
  FunctionPass *createPostDomOnlyPrinterWrapperPassPass();
  FunctionPass *createPostDomViewerWrapperPassPass();
  FunctionPass *createPostDomOnlyViewerWrapperPassPass();
} // End llvm namespace

#endif
PKiwFZ-;f�!�!Analysis/AssumptionCache.hnu�[���//===- llvm/Analysis/AssumptionCache.h - Track @llvm.assume -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains a pass that keeps track of @llvm.assume intrinsics in
// the functions of a module (allowing assumptions within any function to be
// found cheaply by other parts of the optimizer).
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_ASSUMPTIONCACHE_H
#define LLVM_ANALYSIS_ASSUMPTIONCACHE_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/PassManager.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/Pass.h"
#include <memory>

namespace llvm {

class AssumeInst;
class Function;
class raw_ostream;
class TargetTransformInfo;
class Value;

/// A cache of \@llvm.assume calls within a function.
///
/// This cache provides fast lookup of assumptions within a function by caching
/// them and amortizing the cost of scanning for them across all queries. Passes
/// that create new assumptions are required to call registerAssumption() to
/// register any new \@llvm.assume calls that they create. Deletions of
/// \@llvm.assume calls do not require special handling.
class AssumptionCache {
public:
  /// Value of ResultElem::Index indicating that the argument to the call of the
  /// llvm.assume.
  enum : unsigned { ExprResultIdx = std::numeric_limits<unsigned>::max() };

  struct ResultElem {
    WeakVH Assume;

    /// contains either ExprResultIdx or the index of the operand bundle
    /// containing the knowledge.
    unsigned Index;
    operator Value *() const { return Assume; }
  };

private:
  /// The function for which this cache is handling assumptions.
  ///
  /// We track this to lazily populate our assumptions.
  Function &F;

  TargetTransformInfo *TTI;

  /// Vector of weak value handles to calls of the \@llvm.assume
  /// intrinsic.
  SmallVector<ResultElem, 4> AssumeHandles;

  class AffectedValueCallbackVH final : public CallbackVH {
    AssumptionCache *AC;

    void deleted() override;
    void allUsesReplacedWith(Value *) override;

  public:
    using DMI = DenseMapInfo<Value *>;

    AffectedValueCallbackVH(Value *V, AssumptionCache *AC = nullptr)
        : CallbackVH(V), AC(AC) {}
  };

  friend AffectedValueCallbackVH;

  /// A map of values about which an assumption might be providing
  /// information to the relevant set of assumptions.
  using AffectedValuesMap =
      DenseMap<AffectedValueCallbackVH, SmallVector<ResultElem, 1>,
               AffectedValueCallbackVH::DMI>;
  AffectedValuesMap AffectedValues;

  /// Get the vector of assumptions which affect a value from the cache.
  SmallVector<ResultElem, 1> &getOrInsertAffectedValues(Value *V);

  /// Move affected values in the cache for OV to be affected values for NV.
  void transferAffectedValuesInCache(Value *OV, Value *NV);

  /// Flag tracking whether we have scanned the function yet.
  ///
  /// We want to be as lazy about this as possible, and so we scan the function
  /// at the last moment.
  bool Scanned = false;

  /// Scan the function for assumptions and add them to the cache.
  void scanFunction();

public:
  /// Construct an AssumptionCache from a function by scanning all of
  /// its instructions.
  AssumptionCache(Function &F, TargetTransformInfo *TTI = nullptr)
      : F(F), TTI(TTI) {}

  /// This cache is designed to be self-updating and so it should never be
  /// invalidated.
  bool invalidate(Function &, const PreservedAnalyses &,
                  FunctionAnalysisManager::Invalidator &) {
    return false;
  }

  /// Add an \@llvm.assume intrinsic to this function's cache.
  ///
  /// The call passed in must be an instruction within this function and must
  /// not already be in the cache.
  void registerAssumption(AssumeInst *CI);

  /// Remove an \@llvm.assume intrinsic from this function's cache if it has
  /// been added to the cache earlier.
  void unregisterAssumption(AssumeInst *CI);

  /// Update the cache of values being affected by this assumption (i.e.
  /// the values about which this assumption provides information).
  void updateAffectedValues(AssumeInst *CI);

  /// Clear the cache of \@llvm.assume intrinsics for a function.
  ///
  /// It will be re-scanned the next time it is requested.
  void clear() {
    AssumeHandles.clear();
    AffectedValues.clear();
    Scanned = false;
  }

  /// Access the list of assumption handles currently tracked for this
  /// function.
  ///
  /// Note that these produce weak handles that may be null. The caller must
  /// handle that case.
  /// FIXME: We should replace this with pointee_iterator<filter_iterator<...>>
  /// when we can write that to filter out the null values. Then caller code
  /// will become simpler.
  MutableArrayRef<ResultElem> assumptions() {
    if (!Scanned)
      scanFunction();
    return AssumeHandles;
  }

  /// Access the list of assumptions which affect this value.
  MutableArrayRef<ResultElem> assumptionsFor(const Value *V) {
    if (!Scanned)
      scanFunction();

    auto AVI = AffectedValues.find_as(const_cast<Value *>(V));
    if (AVI == AffectedValues.end())
      return MutableArrayRef<ResultElem>();

    return AVI->second;
  }
};

/// A function analysis which provides an \c AssumptionCache.
///
/// This analysis is intended for use with the new pass manager and will vend
/// assumption caches for a given function.
class AssumptionAnalysis : public AnalysisInfoMixin<AssumptionAnalysis> {
  friend AnalysisInfoMixin<AssumptionAnalysis>;

  static AnalysisKey Key;

public:
  using Result = AssumptionCache;

  AssumptionCache run(Function &F, FunctionAnalysisManager &);
};

/// Printer pass for the \c AssumptionAnalysis results.
class AssumptionPrinterPass : public PassInfoMixin<AssumptionPrinterPass> {
  raw_ostream &OS;

public:
  explicit AssumptionPrinterPass(raw_ostream &OS) : OS(OS) {}

  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

/// An immutable pass that tracks lazily created \c AssumptionCache
/// objects.
///
/// This is essentially a workaround for the legacy pass manager's weaknesses
/// which associates each assumption cache with Function and clears it if the
/// function is deleted. The nature of the AssumptionCache is that it is not
/// invalidated by any changes to the function body and so this is sufficient
/// to be conservatively correct.
class AssumptionCacheTracker : public ImmutablePass {
  /// A callback value handle applied to function objects, which we use to
  /// delete our cache of intrinsics for a function when it is deleted.
  class FunctionCallbackVH final : public CallbackVH {
    AssumptionCacheTracker *ACT;

    void deleted() override;

  public:
    using DMI = DenseMapInfo<Value *>;

    FunctionCallbackVH(Value *V, AssumptionCacheTracker *ACT = nullptr)
        : CallbackVH(V), ACT(ACT) {}
  };

  friend FunctionCallbackVH;

  using FunctionCallsMap =
      DenseMap<FunctionCallbackVH, std::unique_ptr<AssumptionCache>,
               FunctionCallbackVH::DMI>;

  FunctionCallsMap AssumptionCaches;

public:
  /// Get the cached assumptions for a function.
  ///
  /// If no assumptions are cached, this will scan the function. Otherwise, the
  /// existing cache will be returned.
  AssumptionCache &getAssumptionCache(Function &F);

  /// Return the cached assumptions for a function if it has already been
  /// scanned. Otherwise return nullptr.
  AssumptionCache *lookupAssumptionCache(Function &F);

  AssumptionCacheTracker();
  ~AssumptionCacheTracker() override;

  void releaseMemory() override {
    verifyAnalysis();
    AssumptionCaches.shrink_and_clear();
  }

  void verifyAnalysis() const override;

  bool doFinalization(Module &) override {
    verifyAnalysis();
    return false;
  }

  static char ID; // Pass identification, replacement for typeid
};

template<> struct simplify_type<AssumptionCache::ResultElem> {
  using SimpleType = Value *;

  static SimpleType getSimplifiedValue(AssumptionCache::ResultElem &Val) {
    return Val;
  }
};
template<> struct simplify_type<const AssumptionCache::ResultElem> {
  using SimpleType = /*const*/ Value *;

  static SimpleType getSimplifiedValue(const AssumptionCache::ResultElem &Val) {
    return Val;
  }
};

} // end namespace llvm

#endif // LLVM_ANALYSIS_ASSUMPTIONCACHE_H
PKiwFZ�=�B(	(	Analysis/CycleAnalysis.hnu�[���//===- CycleAnalysis.h - Cycle Info for LLVM IR -----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This file declares an analysis pass that computes CycleInfo for
/// LLVM IR, specialized from GenericCycleInfo.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_CYCLEANALYSIS_H
#define LLVM_ANALYSIS_CYCLEANALYSIS_H

#include "llvm/IR/CycleInfo.h"
#include "llvm/IR/PassManager.h"
#include "llvm/IR/SSAContext.h"
#include "llvm/Pass.h"

namespace llvm {
extern template class GenericCycleInfo<SSAContext>;
extern template class GenericCycle<SSAContext>;

using CycleInfo = GenericCycleInfo<SSAContext>;
using Cycle = CycleInfo::CycleT;

/// Legacy analysis pass which computes a \ref CycleInfo.
class CycleInfoWrapperPass : public FunctionPass {
  Function *F = nullptr;
  CycleInfo CI;

public:
  static char ID;

  CycleInfoWrapperPass();

  CycleInfo &getResult() { return CI; }
  const CycleInfo &getResult() const { return CI; }

  bool runOnFunction(Function &F) override;
  void getAnalysisUsage(AnalysisUsage &AU) const override;
  void releaseMemory() override;
  void print(raw_ostream &OS, const Module *M = nullptr) const override;

  // TODO: verify analysis?
};

/// Analysis pass which computes a \ref CycleInfo.
class CycleAnalysis : public AnalysisInfoMixin<CycleAnalysis> {
  friend AnalysisInfoMixin<CycleAnalysis>;
  static AnalysisKey Key;

public:
  /// Provide the result typedef for this analysis pass.
  using Result = CycleInfo;

  using LegacyWrapper = CycleInfoWrapperPass;

  /// Run the analysis pass over a function and produce a dominator tree.
  CycleInfo run(Function &F, FunctionAnalysisManager &);

  // TODO: verify analysis?
};

/// Printer pass for the \c DominatorTree.
class CycleInfoPrinterPass : public PassInfoMixin<CycleInfoPrinterPass> {
  raw_ostream &OS;

public:
  explicit CycleInfoPrinterPass(raw_ostream &OS);

  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

} // end namespace llvm

#endif // LLVM_ANALYSIS_CYCLEANALYSIS_H
PKiwFZ���&&Analysis/Loads.hnu�[���//===- Loads.h - Local load analysis --------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares simple local analyses for load instructions.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_LOADS_H
#define LLVM_ANALYSIS_LOADS_H

#include "llvm/IR/BasicBlock.h"
#include "llvm/Support/CommandLine.h"

namespace llvm {

class AAResults;
class AssumptionCache;
class DataLayout;
class DominatorTree;
class Instruction;
class LoadInst;
class Loop;
class MemoryLocation;
class ScalarEvolution;
class TargetLibraryInfo;

/// Return true if this is always a dereferenceable pointer. If the context
/// instruction is specified perform context-sensitive analysis and return true
/// if the pointer is dereferenceable at the specified instruction.
bool isDereferenceablePointer(const Value *V, Type *Ty, const DataLayout &DL,
                              const Instruction *CtxI = nullptr,
                              AssumptionCache *AC = nullptr,
                              const DominatorTree *DT = nullptr,
                              const TargetLibraryInfo *TLI = nullptr);

/// Returns true if V is always a dereferenceable pointer with alignment
/// greater or equal than requested. If the context instruction is specified
/// performs context-sensitive analysis and returns true if the pointer is
/// dereferenceable at the specified instruction.
bool isDereferenceableAndAlignedPointer(const Value *V, Type *Ty,
                                        Align Alignment, const DataLayout &DL,
                                        const Instruction *CtxI = nullptr,
                                        AssumptionCache *AC = nullptr,
                                        const DominatorTree *DT = nullptr,
                                        const TargetLibraryInfo *TLI = nullptr);

/// Returns true if V is always dereferenceable for Size byte with alignment
/// greater or equal than requested. If the context instruction is specified
/// performs context-sensitive analysis and returns true if the pointer is
/// dereferenceable at the specified instruction.
bool isDereferenceableAndAlignedPointer(const Value *V, Align Alignment,
                                        const APInt &Size, const DataLayout &DL,
                                        const Instruction *CtxI = nullptr,
                                        AssumptionCache *AC = nullptr,
                                        const DominatorTree *DT = nullptr,
                                        const TargetLibraryInfo *TLI = nullptr);

/// Return true if we know that executing a load from this value cannot trap.
///
/// If DT and ScanFrom are specified this method performs context-sensitive
/// analysis and returns true if it is safe to load immediately before ScanFrom.
///
/// If it is not obviously safe to load from the specified pointer, we do a
/// quick local scan of the basic block containing ScanFrom, to determine if
/// the address is already accessed.
bool isSafeToLoadUnconditionally(Value *V, Align Alignment, APInt &Size,
                                 const DataLayout &DL,
                                 Instruction *ScanFrom = nullptr,
                                 AssumptionCache *AC = nullptr,
                                 const DominatorTree *DT = nullptr,
                                 const TargetLibraryInfo *TLI = nullptr);

/// Return true if we can prove that the given load (which is assumed to be
/// within the specified loop) would access only dereferenceable memory, and
/// be properly aligned on every iteration of the specified loop regardless of
/// its placement within the loop. (i.e. does not require predication beyond
/// that required by the header itself and could be hoisted into the header
/// if desired.)  This is more powerful than the variants above when the
/// address loaded from is analyzeable by SCEV.
bool isDereferenceableAndAlignedInLoop(LoadInst *LI, Loop *L,
                                       ScalarEvolution &SE, DominatorTree &DT,
                                       AssumptionCache *AC = nullptr);

/// Return true if we know that executing a load from this value cannot trap.
///
/// If DT and ScanFrom are specified this method performs context-sensitive
/// analysis and returns true if it is safe to load immediately before ScanFrom.
///
/// If it is not obviously safe to load from the specified pointer, we do a
/// quick local scan of the basic block containing ScanFrom, to determine if
/// the address is already accessed.
bool isSafeToLoadUnconditionally(Value *V, Type *Ty, Align Alignment,
                                 const DataLayout &DL,
                                 Instruction *ScanFrom = nullptr,
                                 AssumptionCache *AC = nullptr,
                                 const DominatorTree *DT = nullptr,
                                 const TargetLibraryInfo *TLI = nullptr);

/// The default number of maximum instructions to scan in the block, used by
/// FindAvailableLoadedValue().
extern cl::opt<unsigned> DefMaxInstsToScan;

/// Scan backwards to see if we have the value of the given load available
/// locally within a small number of instructions.
///
/// You can use this function to scan across multiple blocks: after you call
/// this function, if ScanFrom points at the beginning of the block, it's safe
/// to continue scanning the predecessors.
///
/// Note that performing load CSE requires special care to make sure the
/// metadata is set appropriately.  In particular, aliasing metadata needs
/// to be merged.  (This doesn't matter for store-to-load forwarding because
/// the only relevant load gets deleted.)
///
/// \param Load The load we want to replace.
/// \param ScanBB The basic block to scan.
/// \param [in,out] ScanFrom The location to start scanning from. When this
/// function returns, it points at the last instruction scanned.
/// \param MaxInstsToScan The maximum number of instructions to scan. If this
/// is zero, the whole block will be scanned.
/// \param AA Optional pointer to alias analysis, to make the scan more
/// precise.
/// \param [out] IsLoadCSE Whether the returned value is a load from the same
/// location in memory, as opposed to the value operand of a store.
///
/// \returns The found value, or nullptr if no value is found.
Value *FindAvailableLoadedValue(LoadInst *Load,
                                BasicBlock *ScanBB,
                                BasicBlock::iterator &ScanFrom,
                                unsigned MaxInstsToScan = DefMaxInstsToScan,
                                AAResults *AA = nullptr,
                                bool *IsLoadCSE = nullptr,
                                unsigned *NumScanedInst = nullptr);

/// This overload provides a more efficient implementation of
/// FindAvailableLoadedValue() for the case where we are not interested in
/// finding the closest clobbering instruction if no available load is found.
/// This overload cannot be used to scan across multiple blocks.
Value *FindAvailableLoadedValue(LoadInst *Load, AAResults &AA, bool *IsLoadCSE,
                                unsigned MaxInstsToScan = DefMaxInstsToScan);

/// Scan backwards to see if we have the value of the given pointer available
/// locally within a small number of instructions.
///
/// You can use this function to scan across multiple blocks: after you call
/// this function, if ScanFrom points at the beginning of the block, it's safe
/// to continue scanning the predecessors.
///
/// \param Loc The location we want the load and store to originate from.
/// \param AccessTy The access type of the pointer.
/// \param AtLeastAtomic Are we looking for at-least an atomic load/store ? In
/// case it is false, we can return an atomic or non-atomic load or store. In
/// case it is true, we need to return an atomic load or store.
/// \param ScanBB The basic block to scan.
/// \param [in,out] ScanFrom The location to start scanning from. When this
/// function returns, it points at the last instruction scanned.
/// \param MaxInstsToScan The maximum number of instructions to scan. If this
/// is zero, the whole block will be scanned.
/// \param AA Optional pointer to alias analysis, to make the scan more
/// precise.
/// \param [out] IsLoadCSE Whether the returned value is a load from the same
/// location in memory, as opposed to the value operand of a store.
///
/// \returns The found value, or nullptr if no value is found.
Value *findAvailablePtrLoadStore(const MemoryLocation &Loc, Type *AccessTy,
                                 bool AtLeastAtomic, BasicBlock *ScanBB,
                                 BasicBlock::iterator &ScanFrom,
                                 unsigned MaxInstsToScan, AAResults *AA,
                                 bool *IsLoadCSE, unsigned *NumScanedInst);

/// Returns true if a pointer value \p A can be replace with another pointer
/// value \B if they are deemed equal through some means (e.g. information from
/// conditions).
/// NOTE: the current implementations is incomplete and unsound. It does not
/// reject all invalid cases yet, but will be made stricter in the future. In
/// particular this means returning true means unknown if replacement is safe.
bool canReplacePointersIfEqual(Value *A, Value *B, const DataLayout &DL,
                               Instruction *CtxI);
}

#endif
PKiwFZ˹�Analysis/MemDerefPrinter.hnu�[���//===- MemDerefPrinter.h - Printer for isDereferenceablePointer -----------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_MEMDEREFPRINTER_H
#define LLVM_ANALYSIS_MEMDEREFPRINTER_H

#include "llvm/IR/PassManager.h"

namespace llvm {
class MemDerefPrinterPass : public PassInfoMixin<MemDerefPrinterPass> {
  raw_ostream &OS;

public:
  MemDerefPrinterPass(raw_ostream &OS) : OS(OS) {}
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
} // namespace llvm

#endif // LLVM_ANALYSIS_MEMDEREFPRINTER_H
PKiwFZ��z(�(�!Analysis/IRSimilarityIdentifier.hnu�[���//===- IRSimilarityIdentifier.h - Find similarity in a module --------------==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// \file
// Interface file for the IRSimilarityIdentifier for identifying similarities in
// IR including the IRInstructionMapper, which maps an Instruction to unsigned
// integers.
//
// Two sequences of instructions are called "similar" if they perform the same
// series of operations for all inputs.
//
// \code
// %1 = add i32 %a, 10
// %2 = add i32 %a, %1
// %3 = icmp slt icmp %1, %2
// \endcode
//
// and
//
// \code
// %1 = add i32 11, %a
// %2 = sub i32 %a, %1
// %3 = icmp sgt icmp %2, %1
// \endcode
//
// ultimately have the same result, even if the inputs, and structure are
// slightly different.
//
// For instructions, we do not worry about operands that do not have fixed
// semantic meaning to the program.  We consider the opcode that the instruction
// has, the types, parameters, and extra information such as the function name,
// or comparison predicate.  These are used to create a hash to map instructions
// to integers to be used in similarity matching in sequences of instructions
//
// Terminology:
// An IRSimilarityCandidate is a region of IRInstructionData (wrapped
// Instructions), usually used to denote a region of similarity has been found.
//
// A SimilarityGroup is a set of IRSimilarityCandidates that are structurally
// similar to one another.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_IRSIMILARITYIDENTIFIER_H
#define LLVM_ANALYSIS_IRSIMILARITYIDENTIFIER_H

#include "llvm/IR/InstVisitor.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Pass.h"
#include "llvm/Support/Allocator.h"
#include <optional>

namespace llvm {
class Module;

namespace IRSimilarity {

struct IRInstructionDataList;

/// This represents what is and is not supported when finding similarity in
/// Instructions.
///
/// Legal Instructions are considered when looking at similarity between
/// Instructions.
///
/// Illegal Instructions cannot be considered when looking for similarity
/// between Instructions. They act as boundaries between similarity regions.
///
/// Invisible Instructions are skipped over during analysis.
// TODO: Shared with MachineOutliner
enum InstrType { Legal, Illegal, Invisible };

/// This provides the utilities for hashing an Instruction to an unsigned
/// integer. Two IRInstructionDatas produce the same hash value when their
/// underlying Instructions perform the same operation (even if they don't have
/// the same input operands.)
/// As a more concrete example, consider the following:
///
/// \code
/// %add1 = add i32 %a, %b
/// %add2 = add i32 %c, %d
/// %add3 = add i64 %e, %f
/// \endcode
///
// Then the IRInstructionData wrappers for these Instructions may be hashed like
/// so:
///
/// \code
/// ; These two adds have the same types and operand types, so they hash to the
/// ; same number.
/// %add1 = add i32 %a, %b ; Hash: 1
/// %add2 = add i32 %c, %d ; Hash: 1
/// ; This add produces an i64. This differentiates it from %add1 and %add2. So,
/// ; it hashes to a different number.
/// %add3 = add i64 %e, %f; Hash: 2
/// \endcode
///
///
/// This hashing scheme will be used to represent the program as a very long
/// string. This string can then be placed in a data structure which can be used
/// for similarity queries.
///
/// TODO: Handle types of Instructions which can be equal even with different
/// operands. (E.g. comparisons with swapped predicates.)
/// TODO: Handle CallInsts, which are only checked for function type
/// by \ref isSameOperationAs.
/// TODO: Handle GetElementPtrInsts, as some of the operands have to be the
/// exact same, and some do not.
struct IRInstructionData
    : ilist_node<IRInstructionData, ilist_sentinel_tracking<true>> {

  /// The source Instruction that is being wrapped.
  Instruction *Inst = nullptr;
  /// The values of the operands in the Instruction.
  SmallVector<Value *, 4> OperVals;
  /// The legality of the wrapped instruction. This is informed by InstrType,
  /// and is used when checking when two instructions are considered similar.
  /// If either instruction is not legal, the instructions are automatically not
  /// considered similar.
  bool Legal = false;

  /// This is only relevant if we are wrapping a CmpInst where we needed to
  /// change the predicate of a compare instruction from a greater than form
  /// to a less than form.  It is std::nullopt otherwise.
  std::optional<CmpInst::Predicate> RevisedPredicate;

  /// This is only relevant if we are wrapping a CallInst. If we are requiring
  /// that the function calls have matching names as well as types, and the
  /// call is not an indirect call, this will hold the name of the function.  If
  /// it is an indirect string, it will be the empty string.  However, if this
  /// requirement is not in place it will be the empty string regardless of the
  /// function call type.  The value held here is used to create the hash of the
  /// instruction, and check to make sure two instructions are close to one
  /// another.
  std::optional<std::string> CalleeName;

  /// This structure holds the distances of how far "ahead of" or "behind" the
  /// target blocks of a branch, or the incoming blocks of a phi nodes are.
  /// If the value is negative, it means that the block was registered before
  /// the block of this instruction in terms of blocks in the function.
  /// Code Example:
  /// \code
  /// block_1:
  ///   br i1 %0, label %block_2, label %block_3
  /// block_2:
  ///   br i1 %1, label %block_1, label %block_2
  /// block_3:
  ///   br i1 %2, label %block_2, label %block_1
  /// ; Replacing the labels with relative values, this becomes:
  /// block_1:
  ///   br i1 %0, distance 1, distance 2
  /// block_2:
  ///   br i1 %1, distance -1, distance 0
  /// block_3:
  ///   br i1 %2, distance -1, distance -2
  /// \endcode
  /// Taking block_2 as our example, block_1 is "behind" block_2, and block_2 is
  /// "ahead" of block_2.
  SmallVector<int, 4> RelativeBlockLocations;

  /// Gather the information that is difficult to gather for an Instruction, or
  /// is changed. i.e. the operands of an Instruction and the Types of those
  /// operands. This extra information allows for similarity matching to make
  /// assertions that allow for more flexibility when checking for whether an
  /// Instruction performs the same operation.
  IRInstructionData(Instruction &I, bool Legality, IRInstructionDataList &IDL);
  IRInstructionData(IRInstructionDataList &IDL);

  /// Fills data stuctures for IRInstructionData when it is constructed from a
  // reference or a pointer.
  void initializeInstruction();

  /// Get the predicate that the compare instruction is using for hashing the
  /// instruction. the IRInstructionData must be wrapping a CmpInst.
  CmpInst::Predicate getPredicate() const;

  /// Get the callee name that the call instruction is using for hashing the
  /// instruction. The IRInstructionData must be wrapping a CallInst.
  StringRef getCalleeName() const;

  /// A function that swaps the predicates to their less than form if they are
  /// in a greater than form. Otherwise, the predicate is unchanged.
  ///
  /// \param CI - The comparison operation to find a consistent preidcate for.
  /// \return the consistent comparison predicate. 
  static CmpInst::Predicate predicateForConsistency(CmpInst *CI);

  /// For an IRInstructionData containing a branch, finds the
  /// relative distances from the source basic block to the target by taking
  /// the difference of the number assigned to the current basic block and the
  /// target basic block of the branch.
  ///
  /// \param BasicBlockToInteger - The mapping of basic blocks to their location
  /// in the module.
  void
  setBranchSuccessors(DenseMap<BasicBlock *, unsigned> &BasicBlockToInteger);

  /// For an IRInstructionData containing a CallInst, set the function name
  /// appropriately.  This will be an empty string if it is an indirect call,
  /// or we are not matching by name of the called function.  It will be the
  /// name of the function if \p MatchByName is true and it is not an indirect
  /// call.  We may decide not to match by name in order to expand the
  /// size of the regions we can match.  If a function name has the same type
  /// signature, but the different name, the region of code is still almost the
  /// same.  Since function names can be treated as constants, the name itself
  /// could be extrapolated away.  However, matching by name provides a
  /// specificity and more "identical" code than not matching by name.
  ///
  /// \param MatchByName - A flag to mark whether we are using the called
  /// function name as a differentiating parameter.
  void setCalleeName(bool MatchByName = true);

  /// For an IRInstructionData containing a PHINode, finds the
  /// relative distances from the incoming basic block to the current block by
  /// taking the difference of the number assigned to the current basic block
  /// and the incoming basic block of the branch.
  ///
  /// \param BasicBlockToInteger - The mapping of basic blocks to their location
  /// in the module.
  void
  setPHIPredecessors(DenseMap<BasicBlock *, unsigned> &BasicBlockToInteger);

  /// Get the BasicBlock based operands for PHINodes and BranchInsts.
  ///
  /// \returns A list of relevant BasicBlocks.
  ArrayRef<Value *> getBlockOperVals();

  /// Hashes \p Value based on its opcode, types, and operand types.
  /// Two IRInstructionData instances produce the same hash when they perform
  /// the same operation.
  ///
  /// As a simple example, consider the following instructions.
  ///
  /// \code
  /// %add1 = add i32 %x1, %y1
  /// %add2 = add i32 %x2, %y2
  ///
  /// %sub = sub i32 %x1, %y1
  ///
  /// %add_i64 = add i64 %x2, %y2
  /// \endcode
  ///
  /// Because the first two adds operate the same types, and are performing the
  /// same action, they will be hashed to the same value.
  ///
  /// However, the subtraction instruction is not the same as an addition, and
  /// will be hashed to a different value.
  ///
  /// Finally, the last add has a different type compared to the first two add
  /// instructions, so it will also be hashed to a different value that any of
  /// the previous instructions.
  ///
  /// \param [in] ID - The IRInstructionData instance to be hashed.
  /// \returns A hash_value of the IRInstructionData.
  friend hash_code hash_value(const IRInstructionData &ID) {
    SmallVector<Type *, 4> OperTypes;
    for (Value *V : ID.OperVals)
      OperTypes.push_back(V->getType());

    if (isa<CmpInst>(ID.Inst))
      return llvm::hash_combine(
          llvm::hash_value(ID.Inst->getOpcode()),
          llvm::hash_value(ID.Inst->getType()),
          llvm::hash_value(ID.getPredicate()),
          llvm::hash_combine_range(OperTypes.begin(), OperTypes.end()));

    if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(ID.Inst)) {
      // To hash intrinsics, we use the opcode, and types like the other
      // instructions, but also, the Intrinsic ID, and the Name of the
      // intrinsic.
      Intrinsic::ID IntrinsicID = II->getIntrinsicID();
      return llvm::hash_combine(
          llvm::hash_value(ID.Inst->getOpcode()),
          llvm::hash_value(ID.Inst->getType()), llvm::hash_value(IntrinsicID),
          llvm::hash_value(*ID.CalleeName),
          llvm::hash_combine_range(OperTypes.begin(), OperTypes.end()));
    }

    if (isa<CallInst>(ID.Inst)) {
      std::string FunctionName = *ID.CalleeName;
      return llvm::hash_combine(
          llvm::hash_value(ID.Inst->getOpcode()),
          llvm::hash_value(ID.Inst->getType()),
          llvm::hash_value(ID.Inst->getType()), llvm::hash_value(FunctionName),
          llvm::hash_combine_range(OperTypes.begin(), OperTypes.end()));
    }

    return llvm::hash_combine(
        llvm::hash_value(ID.Inst->getOpcode()),
        llvm::hash_value(ID.Inst->getType()),
        llvm::hash_combine_range(OperTypes.begin(), OperTypes.end()));
  }

  IRInstructionDataList *IDL = nullptr;
};

struct IRInstructionDataList
    : simple_ilist<IRInstructionData, ilist_sentinel_tracking<true>> {};

/// Compare one IRInstructionData class to another IRInstructionData class for
/// whether they are performing a the same operation, and can mapped to the
/// same value. For regular instructions if the hash value is the same, then
/// they will also be close.
///
/// \param A - The first IRInstructionData class to compare
/// \param B - The second IRInstructionData class to compare
/// \returns true if \p A and \p B are similar enough to be mapped to the same
/// value.
bool isClose(const IRInstructionData &A, const IRInstructionData &B);

struct IRInstructionDataTraits : DenseMapInfo<IRInstructionData *> {
  static inline IRInstructionData *getEmptyKey() { return nullptr; }
  static inline IRInstructionData *getTombstoneKey() {
    return reinterpret_cast<IRInstructionData *>(-1);
  }

  static unsigned getHashValue(const IRInstructionData *E) {
    using llvm::hash_value;
    assert(E && "IRInstructionData is a nullptr?");
    return hash_value(*E);
  }

  static bool isEqual(const IRInstructionData *LHS,
                      const IRInstructionData *RHS) {
    if (RHS == getEmptyKey() || RHS == getTombstoneKey() ||
        LHS == getEmptyKey() || LHS == getTombstoneKey())
      return LHS == RHS;

    assert(LHS && RHS && "nullptr should have been caught by getEmptyKey?");
    return isClose(*LHS, *RHS);
  }
};

/// Helper struct for converting the Instructions in a Module into a vector of
/// unsigned integers. This vector of unsigned integers can be thought of as a
/// "numeric string". This numeric string can then be queried by, for example,
/// data structures that find repeated substrings.
///
/// This hashing is done per BasicBlock in the module. To hash Instructions
/// based off of their operations, each Instruction is wrapped in an
/// IRInstructionData struct. The unsigned integer for an IRInstructionData
/// depends on:
/// - The hash provided by the IRInstructionData.
/// - Which member of InstrType the IRInstructionData is classified as.
// See InstrType for more details on the possible classifications, and how they
// manifest in the numeric string.
///
/// The numeric string for an individual BasicBlock is terminated by an unique
/// unsigned integer. This prevents data structures which rely on repetition
/// from matching across BasicBlocks. (For example, the SuffixTree.)
/// As a concrete example, if we have the following two BasicBlocks:
/// \code
/// bb0:
/// %add1 = add i32 %a, %b
/// %add2 = add i32 %c, %d
/// %add3 = add i64 %e, %f
/// bb1:
/// %sub = sub i32 %c, %d
/// \endcode
/// We may hash the Instructions like this (via IRInstructionData):
/// \code
/// bb0:
/// %add1 = add i32 %a, %b ; Hash: 1
/// %add2 = add i32 %c, %d; Hash: 1
/// %add3 = add i64 %e, %f; Hash: 2
/// bb1:
/// %sub = sub i32 %c, %d; Hash: 3
/// %add4 = add i32 %c, %d ; Hash: 1
/// \endcode
/// And produce a "numeric string representation" like so:
/// 1, 1, 2, unique_integer_1, 3, 1, unique_integer_2
///
/// TODO: This is very similar to the MachineOutliner, and should be
/// consolidated into the same interface.
struct IRInstructionMapper {
  /// The starting illegal instruction number to map to.
  ///
  /// Set to -3 for compatibility with DenseMapInfo<unsigned>.
  unsigned IllegalInstrNumber = static_cast<unsigned>(-3);

  /// The next available integer to assign to a legal Instruction to.
  unsigned LegalInstrNumber = 0;

  /// Correspondence from IRInstructionData to unsigned integers.
  DenseMap<IRInstructionData *, unsigned, IRInstructionDataTraits>
      InstructionIntegerMap;

  /// A mapping for a basic block in a module to its assigned number/location
  /// in the module.
  DenseMap<BasicBlock *, unsigned> BasicBlockToInteger;

  /// Set if we added an illegal number in the previous step.
  /// Since each illegal number is unique, we only need one of them between
  /// each range of legal numbers. This lets us make sure we don't add more
  /// than one illegal number per range.
  bool AddedIllegalLastTime = false;

  /// Marks whether we found a illegal instruction in the previous step.
  bool CanCombineWithPrevInstr = false;

  /// Marks whether we have found a set of instructions that is long enough
  /// to be considered for similarity.
  bool HaveLegalRange = false;

  /// Marks whether we should use exact function names, as well as types to
  /// find similarity between calls.
  bool EnableMatchCallsByName = false;

  /// This allocator pointer is in charge of holding on to the IRInstructionData
  /// so it is not deallocated until whatever external tool is using it is done
  /// with the information.
  SpecificBumpPtrAllocator<IRInstructionData> *InstDataAllocator = nullptr;

  /// This allocator pointer is in charge of creating the IRInstructionDataList
  /// so it is not deallocated until whatever external tool is using it is done
  /// with the information.
  SpecificBumpPtrAllocator<IRInstructionDataList> *IDLAllocator = nullptr;

  /// Get an allocated IRInstructionData struct using the InstDataAllocator.
  ///
  /// \param I - The Instruction to wrap with IRInstructionData.
  /// \param Legality - A boolean value that is true if the instruction is to
  /// be considered for similarity, and false if not.
  /// \param IDL - The InstructionDataList that the IRInstructionData is
  /// inserted into.
  /// \returns An allocated IRInstructionData struct.
  IRInstructionData *allocateIRInstructionData(Instruction &I, bool Legality,
                                               IRInstructionDataList &IDL);

  /// Get an empty allocated IRInstructionData struct using the
  /// InstDataAllocator.
  ///
  /// \param IDL - The InstructionDataList that the IRInstructionData is
  /// inserted into.
  /// \returns An allocated IRInstructionData struct.
  IRInstructionData *allocateIRInstructionData(IRInstructionDataList &IDL);

  /// Get an allocated IRInstructionDataList object using the IDLAllocator.
  ///
  /// \returns An allocated IRInstructionDataList object.
  IRInstructionDataList *allocateIRInstructionDataList();

  IRInstructionDataList *IDL = nullptr;

  /// Assigns values to all the basic blocks in function \p F starting from
  /// integer \p BBNumber.
  ///
  /// \param F - The function containing the basic blocks to assign numbers to.
  /// \param BBNumber - The number to start from.
  void initializeForBBs(Function &F, unsigned &BBNumber) {
    for (BasicBlock &BB : F)
      BasicBlockToInteger.insert(std::make_pair(&BB, BBNumber++));
  }

  /// Assigns values to all the basic blocks in Module \p M.
  /// \param M - The module containing the basic blocks to assign numbers to.
  void initializeForBBs(Module &M) {
    unsigned BBNumber = 0;
    for (Function &F : M)
      initializeForBBs(F, BBNumber);
  }

  /// Maps the Instructions in a BasicBlock \p BB to legal or illegal integers
  /// determined by \p InstrType. Two Instructions are mapped to the same value
  /// if they are close as defined by the InstructionData class above.
  ///
  /// \param [in] BB - The BasicBlock to be mapped to integers.
  /// \param [in,out] InstrList - Vector of IRInstructionData to append to.
  /// \param [in,out] IntegerMapping - Vector of unsigned integers to append to.
  void convertToUnsignedVec(BasicBlock &BB,
                            std::vector<IRInstructionData *> &InstrList,
                            std::vector<unsigned> &IntegerMapping);

  /// Maps an Instruction to a legal integer.
  ///
  /// \param [in] It - The Instruction to be mapped to an integer.
  /// \param [in,out] IntegerMappingForBB - Vector of unsigned integers to
  /// append to.
  /// \param [in,out] InstrListForBB - Vector of InstructionData to append to.
  /// \returns The integer \p It was mapped to.
  unsigned mapToLegalUnsigned(BasicBlock::iterator &It,
                              std::vector<unsigned> &IntegerMappingForBB,
                              std::vector<IRInstructionData *> &InstrListForBB);

  /// Maps an Instruction to an illegal integer.
  ///
  /// \param [in] It - The \p Instruction to be mapped to an integer.
  /// \param [in,out] IntegerMappingForBB - Vector of unsigned integers to
  /// append to.
  /// \param [in,out] InstrListForBB - Vector of IRInstructionData to append to.
  /// \param End - true if creating a dummy IRInstructionData at the end of a
  /// basic block.
  /// \returns The integer \p It was mapped to.
  unsigned mapToIllegalUnsigned(
      BasicBlock::iterator &It, std::vector<unsigned> &IntegerMappingForBB,
      std::vector<IRInstructionData *> &InstrListForBB, bool End = false);

  IRInstructionMapper(SpecificBumpPtrAllocator<IRInstructionData> *IDA,
                      SpecificBumpPtrAllocator<IRInstructionDataList> *IDLA)
      : InstDataAllocator(IDA), IDLAllocator(IDLA) {
    // Make sure that the implementation of DenseMapInfo<unsigned> hasn't
    // changed.
    assert(DenseMapInfo<unsigned>::getEmptyKey() == static_cast<unsigned>(-1) &&
           "DenseMapInfo<unsigned>'s empty key isn't -1!");
    assert(DenseMapInfo<unsigned>::getTombstoneKey() ==
               static_cast<unsigned>(-2) &&
           "DenseMapInfo<unsigned>'s tombstone key isn't -2!");

    IDL = new (IDLAllocator->Allocate())
        IRInstructionDataList();
  }

  /// Custom InstVisitor to classify different instructions for whether it can
  /// be analyzed for similarity.
  struct InstructionClassification
      : public InstVisitor<InstructionClassification, InstrType> {
    InstructionClassification() = default;

    // TODO: Determine a scheme to resolve when the label is similar enough.
    InstrType visitBranchInst(BranchInst &BI) {
      if (EnableBranches)
        return Legal;
      return Illegal;
    }
    InstrType visitPHINode(PHINode &PN) { 
      if (EnableBranches)
        return Legal;
      return Illegal;
    }
    // TODO: Handle allocas.
    InstrType visitAllocaInst(AllocaInst &AI) { return Illegal; }
    // We exclude variable argument instructions since variable arguments
    // requires extra checking of the argument list.
    InstrType visitVAArgInst(VAArgInst &VI) { return Illegal; }
    // We exclude all exception handling cases since they are so context
    // dependent.
    InstrType visitLandingPadInst(LandingPadInst &LPI) { return Illegal; }
    InstrType visitFuncletPadInst(FuncletPadInst &FPI) { return Illegal; }
    // DebugInfo should be included in the regions, but should not be
    // analyzed for similarity as it has no bearing on the outcome of the
    // program.
    InstrType visitDbgInfoIntrinsic(DbgInfoIntrinsic &DII) { return Invisible; }
    InstrType visitIntrinsicInst(IntrinsicInst &II) {
      // These are disabled due to complications in the CodeExtractor when
      // outlining these instructions.  For instance, It is unclear what we
      // should do when moving only the start or end lifetime instruction into
      // an outlined function. Also, assume-like intrinsics could be removed
      // from the region, removing arguments, causing discrepencies in the
      // number of inputs between different regions.
      if (II.isAssumeLikeIntrinsic())
        return Illegal;
      return EnableIntrinsics ? Legal : Illegal;
    }
    // We only allow call instructions where the function has a name and
    // is not an indirect call.
    InstrType visitCallInst(CallInst &CI) {
      Function *F = CI.getCalledFunction();
      bool IsIndirectCall = CI.isIndirectCall();
      if (IsIndirectCall && !EnableIndirectCalls)
        return Illegal;
      if (!F && !IsIndirectCall)
        return Illegal;
      // Functions marked with the swifttailcc and tailcc calling conventions
      // require special handling when outlining musttail functions.  The
      // calling convention must be passed down to the outlined function as
      // well. Further, there is special handling for musttail calls as well,
      // requiring a return call directly after.  For now, the outliner does not
      // support this, so we do not handle matching this case either.
      if ((CI.getCallingConv() == CallingConv::SwiftTail ||
           CI.getCallingConv() == CallingConv::Tail) &&
          !EnableMustTailCalls)
        return Illegal;
      if (CI.isMustTailCall() && !EnableMustTailCalls)
        return Illegal;
      return Legal;
    }
    // TODO: We do not current handle similarity that changes the control flow.
    InstrType visitInvokeInst(InvokeInst &II) { return Illegal; }
    // TODO: We do not current handle similarity that changes the control flow.
    InstrType visitCallBrInst(CallBrInst &CBI) { return Illegal; }
    // TODO: Handle interblock similarity.
    InstrType visitTerminator(Instruction &I) { return Illegal; }
    InstrType visitInstruction(Instruction &I) { return Legal; }

    // The flag variable that lets the classifier know whether we should
    // allow branches to be checked for similarity.
    bool EnableBranches = false;

    // The flag variable that lets the classifier know whether we should
    // allow indirect calls to be considered legal instructions.
    bool EnableIndirectCalls = false;

    // Flag that lets the classifier know whether we should allow intrinsics to
    // be checked for similarity.
    bool EnableIntrinsics = false;
  
    // Flag that lets the classifier know whether we should allow tail calls to
    // be checked for similarity.
    bool EnableMustTailCalls = false;
  };

  /// Maps an Instruction to a member of InstrType.
  InstructionClassification InstClassifier;
};

/// This is a class that wraps a range of IRInstructionData from one point to
/// another in the vector of IRInstructionData, which is a region of the
/// program.  It is also responsible for defining the structure within this
/// region of instructions.
///
/// The structure of a region is defined through a value numbering system
/// assigned to each unique value in a region at the creation of the
/// IRSimilarityCandidate.
///
/// For example, for each Instruction we add a mapping for each new
/// value seen in that Instruction.
/// IR:                    Mapping Added:
/// %add1 = add i32 %a, c1    %add1 -> 3, %a -> 1, c1 -> 2
/// %add2 = add i32 %a, %1    %add2 -> 4
/// %add3 = add i32 c2, c1    %add3 -> 6, c2 -> 5
///
/// We can compare IRSimilarityCandidates against one another.
/// The \ref isSimilar function compares each IRInstructionData against one
/// another and if we have the same sequences of IRInstructionData that would
/// create the same hash, we have similar IRSimilarityCandidates.
///
/// We can also compare the structure of IRSimilarityCandidates. If we can
/// create a mapping of registers in the region contained by one
/// IRSimilarityCandidate to the region contained by different
/// IRSimilarityCandidate, they can be considered structurally similar.
///
/// IRSimilarityCandidate1:   IRSimilarityCandidate2:
/// %add1 = add i32 %a, %b    %add1 = add i32 %d, %e
/// %add2 = add i32 %a, %c    %add2 = add i32 %d, %f
/// %add3 = add i32 c1, c2    %add3 = add i32 c3, c4
///
/// Can have the following mapping from candidate to candidate of:
/// %a -> %d, %b -> %e, %c -> %f, c1 -> c3, c2 -> c4
/// and can be considered similar.
///
/// IRSimilarityCandidate1:   IRSimilarityCandidate2:
/// %add1 = add i32 %a, %b    %add1 = add i32 %d, c4
/// %add2 = add i32 %a, %c    %add2 = add i32 %d, %f
/// %add3 = add i32 c1, c2    %add3 = add i32 c3, c4
///
/// We cannot create the same mapping since the use of c4 is not used in the
/// same way as %b or c2.
class IRSimilarityCandidate {
private:
  /// The start index of this IRSimilarityCandidate in the instruction list.
  unsigned StartIdx = 0;

  /// The number of instructions in this IRSimilarityCandidate.
  unsigned Len = 0;

  /// The first instruction in this IRSimilarityCandidate.
  IRInstructionData *FirstInst = nullptr;

  /// The last instruction in this IRSimilarityCandidate.
  IRInstructionData *LastInst = nullptr;

  /// Global Value Numbering structures
  /// @{
  /// Stores the mapping of the value to the number assigned to it in the
  /// IRSimilarityCandidate.
  DenseMap<Value *, unsigned> ValueToNumber;
  /// Stores the mapping of the number to the value assigned this number.
  DenseMap<unsigned, Value *> NumberToValue;
  /// Stores the mapping of a value's number to canonical numbering in the
  /// candidate's respective similarity group.
  DenseMap<unsigned, unsigned> NumberToCanonNum;
  /// Stores the mapping of canonical number in the candidate's respective
  /// similarity group to a value number.
  DenseMap<unsigned, unsigned> CanonNumToNumber;
  /// @}

public:
  /// \param StartIdx - The starting location of the region.
  /// \param Len - The length of the region.
  /// \param FirstInstIt - The starting IRInstructionData of the region.
  /// \param LastInstIt - The ending IRInstructionData of the region.
  IRSimilarityCandidate(unsigned StartIdx, unsigned Len,
                        IRInstructionData *FirstInstIt,
                        IRInstructionData *LastInstIt);

  /// \param A - The first IRInstructionCandidate to compare.
  /// \param B - The second IRInstructionCandidate to compare.
  /// \returns True when every IRInstructionData in \p A is similar to every
  /// IRInstructionData in \p B.
  static bool isSimilar(const IRSimilarityCandidate &A,
                        const IRSimilarityCandidate &B);

  /// \param [in] A - The first IRInstructionCandidate to compare.
  /// \param [in] B - The second IRInstructionCandidate to compare.
  /// \returns True when every IRInstructionData in \p A is structurally similar
  /// to \p B.
  static bool compareStructure(const IRSimilarityCandidate &A,
                               const IRSimilarityCandidate &B);

  /// \param [in] A - The first IRInstructionCandidate to compare.
  /// \param [in] B - The second IRInstructionCandidate to compare.
  /// \param [in,out] ValueNumberMappingA - A mapping of value numbers from
  /// candidate \p A to candidate \B.
  /// \param [in,out] ValueNumberMappingB - A mapping of value numbers from
  /// candidate \p B to candidate \A.
  /// \returns True when every IRInstructionData in \p A is structurally similar
  /// to \p B.
  static bool
  compareStructure(const IRSimilarityCandidate &A,
                   const IRSimilarityCandidate &B,
                   DenseMap<unsigned, DenseSet<unsigned>> &ValueNumberMappingA,
                   DenseMap<unsigned, DenseSet<unsigned>> &ValueNumberMappingB);

  struct OperandMapping {
    /// The IRSimilarityCandidate that holds the instruction the OperVals were
    /// pulled from.
    const IRSimilarityCandidate &IRSC;

    /// The operand values to be analyzed.
    ArrayRef<Value *> &OperVals;

    /// The current mapping of global value numbers from one IRSimilarityCandidate
    /// to another IRSimilarityCandidate.
    DenseMap<unsigned, DenseSet<unsigned>> &ValueNumberMapping;
  };

  /// A helper struct to hold the candidate, for a branch instruction, the
  /// relative location of a label, and the label itself.  This is mostly to
  /// group the values together before passing them as a bundle to a function.
  struct RelativeLocMapping {
    /// The IRSimilarityCandidate that holds the instruction the relative
    /// location was pulled from.
    const IRSimilarityCandidate &IRSC;

    /// The relative location to be analyzed.
    int RelativeLocation;

    /// The corresponding value.
    Value *OperVal;
  };

  /// Compare the operands in \p A and \p B and check that the current mapping
  /// of global value numbers from \p A to \p B and \p B to \A is consistent.
  ///
  /// \param A - The first IRInstructionCandidate, operand values, and current
  /// operand mappings to compare.
  /// \param B - The second IRInstructionCandidate, operand values, and current
  /// operand mappings to compare.
  /// \returns true if the IRSimilarityCandidates operands are compatible.
  static bool compareNonCommutativeOperandMapping(OperandMapping A,
                                                  OperandMapping B);

  /// Compare the operands in \p A and \p B and check that the current mapping
  /// of global value numbers from \p A to \p B and \p B to \A is consistent
  /// given that the operands are commutative.
  ///
  /// \param A - The first IRInstructionCandidate, operand values, and current
  /// operand mappings to compare.
  /// \param B - The second IRInstructionCandidate, operand values, and current
  /// operand mappings to compare.
  /// \returns true if the IRSimilarityCandidates operands are compatible.
  static bool compareCommutativeOperandMapping(OperandMapping A,
                                               OperandMapping B);

  /// Compare the GVN of the assignment value in corresponding instructions in
  /// IRSimilarityCandidates \p A and \p B and check that there exists a mapping
  /// between the values and replaces the mapping with a one-to-one value if
  /// needed.
  ///
  /// \param InstValA - The assignment GVN from the first IRSimilarityCandidate.
  /// \param InstValB - The assignment GVN from the second
  /// IRSimilarityCandidate.
  /// \param [in,out] ValueNumberMappingA - A mapping of value numbers from 
  /// candidate \p A to candidate \B.
  /// \param [in,out] ValueNumberMappingB - A mapping of value numbers from 
  /// candidate \p B to candidate \A.
  /// \returns true if the IRSimilarityCandidates assignments are compatible.
  static bool compareAssignmentMapping(
      const unsigned InstValA, const unsigned &InstValB,
      DenseMap<unsigned, DenseSet<unsigned>> &ValueNumberMappingA,
      DenseMap<unsigned, DenseSet<unsigned>> &ValueNumberMappingB);

  /// Compare the relative locations in \p A and \p B and check that the
  /// distances match if both locations are contained in the region, and that
  /// the branches both point outside the region if they do not.
  /// Example Region:
  /// \code
  /// entry:
  ///   br i1 %0, label %block_1, label %block_3
  /// block_0:
  ///   br i1 %0, label %block_1, label %block_2
  /// block_1:
  ///   br i1 %0, label %block_2, label %block_3
  /// block_2:
  ///   br i1 %1, label %block_1, label %block_4
  /// block_3:
  ///   br i1 %2, label %block_2, label %block_5
  /// \endcode
  /// If we compare the branches in block_0 and block_1 the relative values are
  /// 1 and 2 for both, so we consider this a match.
  ///
  /// If we compare the branches in entry and block_0 the relative values are
  /// 2 and 3, and 1 and 2 respectively.  Since these are not the same we do not
  /// consider them a match.
  ///
  /// If we compare the branches in block_1 and block_2 the relative values are
  /// 1 and 2, and -1 and None respectively.  As a result we do not consider
  /// these to be the same
  ///
  /// If we compare the branches in block_2 and block_3 the relative values are
  /// -1 and None for both.  We do consider these to be a match.
  ///
  /// \param A - The first IRInstructionCandidate, relative location value,
  /// and incoming block.
  /// \param B - The second IRInstructionCandidate, relative location value,
  /// and incoming block.
  /// \returns true if the relative locations match.
  static bool checkRelativeLocations(RelativeLocMapping A,
                                     RelativeLocMapping B);

  /// Create a mapping from the value numbering to a different separate set of
  /// numbers. This will serve as a guide for relating one candidate to another.
  /// The canonical number gives use the ability identify which global value
  /// number in one candidate relates to the global value number in the other.
  ///
  /// \param [in, out] CurrCand - The IRSimilarityCandidate to create a
  /// canonical numbering for.
  static void createCanonicalMappingFor(IRSimilarityCandidate &CurrCand);

  /// Create a mapping for the value numbering of the calling
  /// IRSimilarityCandidate, to a different separate set of numbers, based on
  /// the canonical ordering in \p SourceCand. These are defined based on the
  /// found mappings in \p ToSourceMapping and \p FromSourceMapping.  Both of
  /// these relationships should have the same information, just in opposite
  /// directions.
  ///
  /// \param [in, out] SourceCand - The IRSimilarityCandidate to create a
  /// canonical numbering from.
  /// \param ToSourceMapping - The mapping of value numbers from this candidate
  /// to \p SourceCand.
  /// \param FromSourceMapping - The mapping of value numbers from \p SoureCand
  /// to this candidate.
  void createCanonicalRelationFrom(
      IRSimilarityCandidate &SourceCand,
      DenseMap<unsigned, DenseSet<unsigned>> &ToSourceMapping,
      DenseMap<unsigned, DenseSet<unsigned>> &FromSourceMapping);
  
  /// Create a mapping for the value numbering of the calling
  /// IRSimilarityCandidate, to a different separate set of numbers, based on
  /// the canonical ordering in \p SourceCand. These are defined based on the
  /// found mappings in \p ToSourceMapping and \p FromSourceMapping.  Both of
  /// these relationships should have the same information, just in opposite
  /// directions.  Uses the \p OneToOne mapping from target candidate to \p
  /// SourceCand GVNs to determine the mapping first for values with multiple
  /// mappings.  This mapping is created by the ordering of operands in the
  /// instruction they are first seen in the candidates.
  ///
  /// \param [in, out] SourceCand - The IRSimilarityCandidate to create a
  /// canonical numbering from.
  /// \param [in,out] OneToOne - A mapping of value numbers from candidate
  /// \p A to candidate \B using the structure of the original instructions.
  /// \param ToSourceMapping - The mapping of value numbers from this candidate
  /// to \p SourceCand.
  /// \param FromSourceMapping - The mapping of value numbers from \p SoureCand
  /// to this candidate.
  void createCanonicalRelationFrom(
      IRSimilarityCandidate &SourceCand,
      DenseMap<unsigned, unsigned> &OneToOne,
      DenseMap<unsigned, DenseSet<unsigned>> &ToSourceMapping,
      DenseMap<unsigned, DenseSet<unsigned>> &FromSourceMapping);
  
  /// Create a mapping for the value numbering of the calling
  /// IRSimilarityCandidate, to a different separate set of numbers, based on
  /// the canonical ordering in \p SourceCand. These are defined based on the
  /// canonical mapping defined between \p SoureCandLarge and
  /// \p TargetCandLarge.  These IRSimilarityCandidates are already structurally
  /// similar, and fully encapsulate the IRSimilarityCandidates in question.
  /// These are used as a "bridge" from the \p SourceCand to the target.
  ///
  /// \param [in, out] SourceCand - The IRSimilarityCandidate to create a
  /// canonical numbering from.
  /// \param SoureCandLarge - The IRSimilarityCandidate fully containing
  /// \p SourceCand.
  /// \param TargetCandLarge -  The IRSimilarityCandidate fully containing
  /// this Candidate.
  void createCanonicalRelationFrom(
      IRSimilarityCandidate &SourceCand,
      IRSimilarityCandidate &SourceCandLarge,
      IRSimilarityCandidate &TargetCandLarge);

  /// \param [in,out] BBSet - The set to track the basic blocks.
  void getBasicBlocks(DenseSet<BasicBlock *> &BBSet) const {
    for (IRInstructionData &ID : *this) {
      BasicBlock *BB = ID.Inst->getParent();
      BBSet.insert(BB);
    }
  }

  /// \param [in,out] BBSet - The set to track the basic blocks.
  /// \param [in,out] BBList - A list in order of use to track the basic blocks.
  void getBasicBlocks(DenseSet<BasicBlock *> &BBSet,
                      SmallVector<BasicBlock *> &BBList) const {
    for (IRInstructionData &ID : *this) {
      BasicBlock *BB = ID.Inst->getParent();
      if (BBSet.insert(BB).second)
        BBList.push_back(BB);
    }
  }

  /// Compare the start and end indices of the two IRSimilarityCandidates for
  /// whether they overlap. If the start instruction of one
  /// IRSimilarityCandidate is less than the end instruction of the other, and
  /// the start instruction of one is greater than the start instruction of the
  /// other, they overlap.
  ///
  /// \returns true if the IRSimilarityCandidates do not have overlapping
  /// instructions.
  static bool overlap(const IRSimilarityCandidate &A,
                      const IRSimilarityCandidate &B);

  /// \returns the number of instructions in this Candidate.
  unsigned getLength() const { return Len; }

  /// \returns the start index of this IRSimilarityCandidate.
  unsigned getStartIdx() const { return StartIdx; }

  /// \returns the end index of this IRSimilarityCandidate.
  unsigned getEndIdx() const { return StartIdx + Len - 1; }

  /// \returns The first IRInstructionData.
  IRInstructionData *front() const { return FirstInst; }
  /// \returns The last IRInstructionData.
  IRInstructionData *back() const { return LastInst; }

  /// \returns The first Instruction.
  Instruction *frontInstruction() { return FirstInst->Inst; }
  /// \returns The last Instruction
  Instruction *backInstruction() { return LastInst->Inst; }

  /// \returns The BasicBlock the IRSimilarityCandidate starts in.
  BasicBlock *getStartBB() { return FirstInst->Inst->getParent(); }
  /// \returns The BasicBlock the IRSimilarityCandidate ends in.
  BasicBlock *getEndBB() { return LastInst->Inst->getParent(); }

  /// \returns The Function that the IRSimilarityCandidate is located in.
  Function *getFunction() { return getStartBB()->getParent(); }

  /// Finds the positive number associated with \p V if it has been mapped.
  /// \param [in] V - the Value to find.
  /// \returns The positive number corresponding to the value.
  /// \returns std::nullopt if not present.
  std::optional<unsigned> getGVN(Value *V) {
    assert(V != nullptr && "Value is a nullptr?");
    DenseMap<Value *, unsigned>::iterator VNIt = ValueToNumber.find(V);
    if (VNIt == ValueToNumber.end())
      return std::nullopt;
    return VNIt->second;
  }

  /// Finds the Value associate with \p Num if it exists.
  /// \param [in] Num - the number to find.
  /// \returns The Value associated with the number.
  /// \returns std::nullopt if not present.
  std::optional<Value *> fromGVN(unsigned Num) {
    DenseMap<unsigned, Value *>::iterator VNIt = NumberToValue.find(Num);
    if (VNIt == NumberToValue.end())
      return std::nullopt;
    assert(VNIt->second != nullptr && "Found value is a nullptr!");
    return VNIt->second;
  }

  /// Find the canonical number from the global value number \p N stored in the
  /// candidate.
  ///
  /// \param N - The global value number to find the canonical number for.
  /// \returns An optional containing the value, and std::nullopt if it could
  /// not be found.
  std::optional<unsigned> getCanonicalNum(unsigned N) {
    DenseMap<unsigned, unsigned>::iterator NCIt = NumberToCanonNum.find(N);
    if (NCIt == NumberToCanonNum.end())
      return std::nullopt;
    return NCIt->second;
  }

  /// Find the global value number from the canonical number \p N stored in the
  /// candidate.
  ///
  /// \param N - The canonical number to find the global vlaue number for.
  /// \returns An optional containing the value, and std::nullopt if it could
  /// not be found.
  std::optional<unsigned> fromCanonicalNum(unsigned N) {
    DenseMap<unsigned, unsigned>::iterator CNIt = CanonNumToNumber.find(N);
    if (CNIt == CanonNumToNumber.end())
      return std::nullopt;
    return CNIt->second;
  }

  /// \param RHS -The IRSimilarityCandidate to compare against
  /// \returns true if the IRSimilarityCandidate is occurs after the
  /// IRSimilarityCandidate in the program.
  bool operator<(const IRSimilarityCandidate &RHS) const {
    return getStartIdx() > RHS.getStartIdx();
  }

  using iterator = IRInstructionDataList::iterator;
  iterator begin() const { return iterator(front()); }
  iterator end() const { return std::next(iterator(back())); }
};

typedef DenseMap<IRSimilarityCandidate *,
                 DenseMap<unsigned, DenseSet<unsigned>>>
    CandidateGVNMapping;
typedef std::vector<IRSimilarityCandidate> SimilarityGroup;
typedef std::vector<SimilarityGroup> SimilarityGroupList;

/// This class puts all the pieces of the IRInstructionData,
/// IRInstructionMapper, IRSimilarityCandidate together.
///
/// It first feeds the Module or vector of Modules into the IRInstructionMapper,
/// and puts all the mapped instructions into a single long list of
/// IRInstructionData.
///
/// The list of unsigned integers is given to the Suffix Tree or similar data
/// structure to find repeated subsequences.  We construct an
/// IRSimilarityCandidate for each instance of the subsequence.  We compare them
/// against one another since  These repeated subsequences can have different
/// structure.  For each different kind of structure found, we create a
/// similarity group.
///
/// If we had four IRSimilarityCandidates A, B, C, and D where A, B and D are
/// structurally similar to one another, while C is different we would have two
/// SimilarityGroups:
///
/// SimilarityGroup 1:  SimilarityGroup 2
/// A, B, D             C
///
/// A list of the different similarity groups is then returned after
/// analyzing the module.
class IRSimilarityIdentifier {
public:
  IRSimilarityIdentifier(bool MatchBranches = true,
                         bool MatchIndirectCalls = true,
                         bool MatchCallsWithName = false,
                         bool MatchIntrinsics = true,
                         bool MatchMustTailCalls = true)
      : Mapper(&InstDataAllocator, &InstDataListAllocator),
        EnableBranches(MatchBranches), EnableIndirectCalls(MatchIndirectCalls),
        EnableMatchingCallsByName(MatchCallsWithName),
        EnableIntrinsics(MatchIntrinsics),
        EnableMustTailCalls(MatchMustTailCalls) {}

private:
  /// Map the instructions in the module to unsigned integers, using mapping
  /// already present in the Mapper if possible.
  ///
  /// \param [in] M Module - To map to integers.
  /// \param [in,out] InstrList - The vector to append IRInstructionData to.
  /// \param [in,out] IntegerMapping - The vector to append integers to.
  void populateMapper(Module &M, std::vector<IRInstructionData *> &InstrList,
                      std::vector<unsigned> &IntegerMapping);

  /// Map the instructions in the modules vector to unsigned integers, using
  /// mapping already present in the mapper if possible.
  ///
  /// \param [in] Modules - The list of modules to use to populate the mapper
  /// \param [in,out] InstrList - The vector to append IRInstructionData to.
  /// \param [in,out] IntegerMapping - The vector to append integers to.
  void populateMapper(ArrayRef<std::unique_ptr<Module>> &Modules,
                      std::vector<IRInstructionData *> &InstrList,
                      std::vector<unsigned> &IntegerMapping);

  /// Find the similarity candidates in \p InstrList and corresponding
  /// \p UnsignedVec
  ///
  /// \param [in,out] InstrList - The vector to append IRInstructionData to.
  /// \param [in,out] IntegerMapping - The vector to append integers to.
  /// candidates found in the program.
  void findCandidates(std::vector<IRInstructionData *> &InstrList,
                      std::vector<unsigned> &IntegerMapping);

public:
  // Find the IRSimilarityCandidates in the \p Modules and group by structural
  // similarity in a SimilarityGroup, each group is returned in a
  // SimilarityGroupList.
  //
  // \param [in] Modules - the modules to analyze.
  // \returns The groups of similarity ranges found in the modules.
  SimilarityGroupList &
  findSimilarity(ArrayRef<std::unique_ptr<Module>> Modules);

  // Find the IRSimilarityCandidates in the given Module grouped by structural
  // similarity in a SimilarityGroup, contained inside a SimilarityGroupList.
  //
  // \param [in] M - the module to analyze.
  // \returns The groups of similarity ranges found in the module.
  SimilarityGroupList &findSimilarity(Module &M);

  // Clears \ref SimilarityCandidates if it is already filled by a previous run.
  void resetSimilarityCandidates() {
    // If we've already analyzed a Module or set of Modules, so we must clear
    // the SimilarityCandidates to make sure we do not have only old values
    // hanging around.
    if (SimilarityCandidates)
      SimilarityCandidates->clear();
    else
      SimilarityCandidates = SimilarityGroupList();
  }

  // \returns The groups of similarity ranges found in the most recently passed
  // set of modules.
  std::optional<SimilarityGroupList> &getSimilarity() {
    return SimilarityCandidates;
  }

private:
  /// The allocator for IRInstructionData.
  SpecificBumpPtrAllocator<IRInstructionData> InstDataAllocator;

  /// The allocator for IRInstructionDataLists.
  SpecificBumpPtrAllocator<IRInstructionDataList> InstDataListAllocator;

  /// Map Instructions to unsigned integers and wraps the Instruction in an
  /// instance of IRInstructionData.
  IRInstructionMapper Mapper;

  /// The flag variable that marks whether we should check branches for
  /// similarity, or only look within basic blocks.
  bool EnableBranches = true;

  /// The flag variable that marks whether we allow indirect calls to be checked
  /// for similarity, or exclude them as a legal instruction.
  bool EnableIndirectCalls = true;

  /// The flag variable that marks whether we allow calls to be marked as
  /// similar if they do not have the same name, only the same calling
  /// convention, attributes and type signature.
  bool EnableMatchingCallsByName = true;

  /// The flag variable that marks whether we should check intrinsics for
  /// similarity.
  bool EnableIntrinsics = true;

  // The flag variable that marks whether we should allow tailcalls
  // to be checked for similarity.
  bool EnableMustTailCalls = false;

  /// The SimilarityGroups found with the most recent run of \ref
  /// findSimilarity. std::nullopt if there is no recent run.
  std::optional<SimilarityGroupList> SimilarityCandidates;
};

} // end namespace IRSimilarity

/// An analysis pass based on legacy pass manager that runs and returns
/// IRSimilarityIdentifier run on the Module.
class IRSimilarityIdentifierWrapperPass : public ModulePass {
  std::unique_ptr<IRSimilarity::IRSimilarityIdentifier> IRSI;

public:
  static char ID;
  IRSimilarityIdentifierWrapperPass();

  IRSimilarity::IRSimilarityIdentifier &getIRSI() { return *IRSI; }
  const IRSimilarity::IRSimilarityIdentifier &getIRSI() const { return *IRSI; }

  bool doInitialization(Module &M) override;
  bool doFinalization(Module &M) override;
  bool runOnModule(Module &M) override;
  void getAnalysisUsage(AnalysisUsage &AU) const override {
    AU.setPreservesAll();
  }
};

/// An analysis pass that runs and returns the IRSimilarityIdentifier run on the
/// Module.
class IRSimilarityAnalysis : public AnalysisInfoMixin<IRSimilarityAnalysis> {
public:
  typedef IRSimilarity::IRSimilarityIdentifier Result;

  Result run(Module &M, ModuleAnalysisManager &);

private:
  friend AnalysisInfoMixin<IRSimilarityAnalysis>;
  static AnalysisKey Key;
};

/// Printer pass that uses \c IRSimilarityAnalysis.
class IRSimilarityAnalysisPrinterPass
    : public PassInfoMixin<IRSimilarityAnalysisPrinterPass> {
  raw_ostream &OS;

public:
  explicit IRSimilarityAnalysisPrinterPass(raw_ostream &OS) : OS(OS) {}
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
};

} // end namespace llvm

#endif // LLVM_ANALYSIS_IRSIMILARITYIDENTIFIER_H
PKiwFZ��2==Analysis/CostModel.hnu�[���//===- CostModel.h - --------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_COSTMODEL_H
#define LLVM_ANALYSIS_COSTMODEL_H

#include "llvm/IR/PassManager.h"

namespace llvm {
/// Printer pass for cost modeling results.
class CostModelPrinterPass : public PassInfoMixin<CostModelPrinterPass> {
  raw_ostream &OS;

public:
  explicit CostModelPrinterPass(raw_ostream &OS) : OS(OS) {}

  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
} // end namespace llvm

#endif // LLVM_ANALYSIS_COSTMODEL_H
PKiwFZ;-�qAnalysis/StackLifetime.hnu�[���//===- StackLifetime.h - Alloca Lifetime Analysis --------------*- C++ -*--===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_STACKLIFETIME_H
#define LLVM_ANALYSIS_STACKLIFETIME_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Support/raw_ostream.h"
#include <utility>

namespace llvm {

class AllocaInst;
class BasicBlock;
class Function;
class Instruction;
class IntrinsicInst;

/// Compute live ranges of allocas.
/// Live ranges are represented as sets of "interesting" instructions, which are
/// defined as instructions that may start or end an alloca's lifetime. These
/// are:
/// * lifetime.start and lifetime.end intrinsics
/// * first instruction of any basic block
/// Interesting instructions are numbered in the depth-first walk of the CFG,
/// and in the program order inside each basic block.
class StackLifetime {
  /// A class representing liveness information for a single basic block.
  /// Each bit in the BitVector represents the liveness property
  /// for a different stack slot.
  struct BlockLifetimeInfo {
    explicit BlockLifetimeInfo(unsigned Size)
        : Begin(Size), End(Size), LiveIn(Size), LiveOut(Size) {}

    /// Which slots BEGINs in each basic block.
    BitVector Begin;

    /// Which slots ENDs in each basic block.
    BitVector End;

    /// Which slots are marked as LIVE_IN, coming into each basic block.
    BitVector LiveIn;

    /// Which slots are marked as LIVE_OUT, coming out of each basic block.
    BitVector LiveOut;
  };

public:
  class LifetimeAnnotationWriter;

  /// This class represents a set of interesting instructions where an alloca is
  /// live.
  class LiveRange {
    BitVector Bits;
    friend raw_ostream &operator<<(raw_ostream &OS,
                                   const StackLifetime::LiveRange &R);

  public:
    LiveRange(unsigned Size, bool Set = false) : Bits(Size, Set) {}
    void addRange(unsigned Start, unsigned End) { Bits.set(Start, End); }

    bool overlaps(const LiveRange &Other) const {
      return Bits.anyCommon(Other.Bits);
    }

    void join(const LiveRange &Other) { Bits |= Other.Bits; }

    bool test(unsigned Idx) const { return Bits.test(Idx); }
  };

  // Controls what is "alive" if control flow may reach the instruction
  // with a different liveness of the alloca.
  enum class LivenessType {
    May,  // May be alive on some path.
    Must, // Must be alive on every path.
  };

private:
  const Function &F;
  LivenessType Type;

  /// Maps active slots (per bit) for each basic block.
  using LivenessMap = DenseMap<const BasicBlock *, BlockLifetimeInfo>;
  LivenessMap BlockLiveness;

  /// Interesting instructions. Instructions of the same block are adjustent
  /// preserve in-block order.
  SmallVector<const IntrinsicInst *, 64> Instructions;

  /// A range [Start, End) of instruction ids for each basic block.
  /// Instructions inside each BB have monotonic and consecutive ids.
  DenseMap<const BasicBlock *, std::pair<unsigned, unsigned>> BlockInstRange;

  ArrayRef<const AllocaInst *> Allocas;
  unsigned NumAllocas;
  DenseMap<const AllocaInst *, unsigned> AllocaNumbering;

  /// LiveRange for allocas.
  SmallVector<LiveRange, 8> LiveRanges;

  /// The set of allocas that have at least one lifetime.start. All other
  /// allocas get LiveRange that corresponds to the entire function.
  BitVector InterestingAllocas;

  struct Marker {
    unsigned AllocaNo;
    bool IsStart;
  };

  /// List of {InstNo, {AllocaNo, IsStart}} for each BB, ordered by InstNo.
  DenseMap<const BasicBlock *, SmallVector<std::pair<unsigned, Marker>, 4>>
      BBMarkers;

  bool HasUnknownLifetimeStartOrEnd = false;

  void dumpAllocas() const;
  void dumpBlockLiveness() const;
  void dumpLiveRanges() const;

  void collectMarkers();
  void calculateLocalLiveness();
  void calculateLiveIntervals();

public:
  StackLifetime(const Function &F, ArrayRef<const AllocaInst *> Allocas,
                LivenessType Type);

  void run();

  iterator_range<
      filter_iterator<ArrayRef<const IntrinsicInst *>::const_iterator,
                      std::function<bool(const IntrinsicInst *)>>>
  getMarkers() const {
    std::function<bool(const IntrinsicInst *)> NotNull(
        [](const IntrinsicInst *I) -> bool { return I; });
    return make_filter_range(Instructions, NotNull);
  }

  /// Returns a set of "interesting" instructions where the given alloca is
  /// live. Not all instructions in a function are interesting: we pick a set
  /// that is large enough for LiveRange::Overlaps to be correct.
  const LiveRange &getLiveRange(const AllocaInst *AI) const;

  /// Returns true if instruction is reachable from entry.
  bool isReachable(const Instruction *I) const;

  /// Returns true if the alloca is alive after the instruction.
  bool isAliveAfter(const AllocaInst *AI, const Instruction *I) const;

  /// Returns a live range that represents an alloca that is live throughout the
  /// entire function.
  LiveRange getFullLiveRange() const {
    return LiveRange(Instructions.size(), true);
  }

  void print(raw_ostream &O);
};

static inline raw_ostream &operator<<(raw_ostream &OS, const BitVector &V) {
  OS << "{";
  ListSeparator LS;
  for (int Idx = V.find_first(); Idx >= 0; Idx = V.find_next(Idx))
    OS << LS << Idx;
  OS << "}";
  return OS;
}

inline raw_ostream &operator<<(raw_ostream &OS,
                               const StackLifetime::LiveRange &R) {
  return OS << R.Bits;
}

/// Printer pass for testing.
class StackLifetimePrinterPass
    : public PassInfoMixin<StackLifetimePrinterPass> {
  StackLifetime::LivenessType Type;
  raw_ostream &OS;

public:
  StackLifetimePrinterPass(raw_ostream &OS, StackLifetime::LivenessType Type)
      : Type(Type), OS(OS) {}
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
  void printPipeline(raw_ostream &OS,
                     function_ref<StringRef(StringRef)> MapClassName2PassName);
};

} // end namespace llvm

#endif // LLVM_ANALYSIS_STACKLIFETIME_H
PKiwFZ_�	�
�
#Analysis/ModelUnderTrainingRunner.hnu�[���//===- ModelUnderTrainingRunner.h -- 'development' mode runner --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//

#ifndef LLVM_ANALYSIS_MODELUNDERTRAININGRUNNER_H
#define LLVM_ANALYSIS_MODELUNDERTRAININGRUNNER_H

#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Analysis/TensorSpec.h"
#include "llvm/Config/llvm-config.h"

#ifdef LLVM_HAVE_TFLITE
#include "llvm/Analysis/MLModelRunner.h"
#include "llvm/Analysis/Utils/TFUtils.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/PassManager.h"

namespace llvm {

/// ModelUnderTrainingRunner - training mode implementation. It uses TF C APIs
/// to dynamically load and evaluate a TF SavedModel
/// (https://www.tensorflow.org/guide/saved_model). Runtime performance is
/// sacrificed for ease of use while training.
class ModelUnderTrainingRunner final : public MLModelRunner {
public:
  // Disallows copy and assign.
  ModelUnderTrainingRunner(const ModelUnderTrainingRunner &) = delete;
  ModelUnderTrainingRunner &
  operator=(const ModelUnderTrainingRunner &) = delete;

  const std::vector<TensorSpec> &extraOutputsForLoggingSpecs() const {
    return ExtraOutputsForLogging;
  }

  const void *getUntypedExtraOutputValue(size_t ExtraOutputIndex) const {
    return lastEvaluationResult()->getUntypedTensorValue(ExtraOutputIndex + 1);
  }

  const std::optional<TFModelEvaluator::EvaluationResult> &
  lastEvaluationResult() const {
    return LastEvaluationResult;
  }
  static bool classof(const MLModelRunner *R) {
    return R->getKind() == MLModelRunner::Kind::Development;
  }

  static std::unique_ptr<ModelUnderTrainingRunner>
  createAndEnsureValid(LLVMContext &Ctx, const std::string &ModelPath,
                       StringRef DecisionName,
                       const std::vector<TensorSpec> &InputSpecs,
                       StringRef OutputSpecsPathOverride = "");

  ModelUnderTrainingRunner(
      LLVMContext &Ctx, const std::string &ModelPath,
      const std::vector<TensorSpec> &InputSpecs,
      const std::vector<TensorSpec> &OutputSpecs,
      const std::vector<TensorSpec> &ExtraOutputsForLogging = {});

  bool isValid() const { return !!Evaluator; }

private:
  std::unique_ptr<TFModelEvaluator> Evaluator;
  const std::vector<TensorSpec> OutputSpecs;
  const std::vector<TensorSpec> ExtraOutputsForLogging;
  std::optional<TFModelEvaluator::EvaluationResult> LastEvaluationResult;
  void *evaluateUntyped() override;
};

} // namespace llvm
#endif // define(LLVM_HAVE_TFLITE)
#endif // LLVM_ANALYSIS_MODELUNDERTRAININGRUNNER_H
PKiwFZ)&�]
]
Analysis/PostDominators.hnu�[���//=- llvm/Analysis/PostDominators.h - Post Dominator Calculation --*- C++ -*-=//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file exposes interfaces to post dominance information.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_POSTDOMINATORS_H
#define LLVM_ANALYSIS_POSTDOMINATORS_H

#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Pass.h"

namespace llvm {

class Function;
class raw_ostream;

/// PostDominatorTree Class - Concrete subclass of DominatorTree that is used to
/// compute the post-dominator tree.
class PostDominatorTree : public PostDomTreeBase<BasicBlock> {
public:
  using Base = PostDomTreeBase<BasicBlock>;

  PostDominatorTree() = default;
  explicit PostDominatorTree(Function &F) { recalculate(F); }
  /// Handle invalidation explicitly.
  bool invalidate(Function &F, const PreservedAnalyses &PA,
                  FunctionAnalysisManager::Invalidator &);

  // Ensure base-class overloads are visible.
  using Base::dominates;

  /// Return true if \p I1 dominates \p I2. This checks if \p I2 comes before
  /// \p I1 if they belongs to the same basic block.
  bool dominates(const Instruction *I1, const Instruction *I2) const;
};

/// Analysis pass which computes a \c PostDominatorTree.
class PostDominatorTreeAnalysis
    : public AnalysisInfoMixin<PostDominatorTreeAnalysis> {
  friend AnalysisInfoMixin<PostDominatorTreeAnalysis>;

  static AnalysisKey Key;

public:
  /// Provide the result type for this analysis pass.
  using Result = PostDominatorTree;

  /// Run the analysis pass over a function and produce a post dominator
  ///        tree.
  PostDominatorTree run(Function &F, FunctionAnalysisManager &);
};

/// Printer pass for the \c PostDominatorTree.
class PostDominatorTreePrinterPass
    : public PassInfoMixin<PostDominatorTreePrinterPass> {
  raw_ostream &OS;

public:
  explicit PostDominatorTreePrinterPass(raw_ostream &OS);

  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

struct PostDominatorTreeWrapperPass : public FunctionPass {
  static char ID; // Pass identification, replacement for typeid

  PostDominatorTree DT;

  PostDominatorTreeWrapperPass();

  PostDominatorTree &getPostDomTree() { return DT; }
  const PostDominatorTree &getPostDomTree() const { return DT; }

  bool runOnFunction(Function &F) override;

  void verifyAnalysis() const override;

  void getAnalysisUsage(AnalysisUsage &AU) const override {
    AU.setPreservesAll();
  }

  void releaseMemory() override { DT.reset(); }

  void print(raw_ostream &OS, const Module*) const override;
};

FunctionPass* createPostDomTree();

template <> struct GraphTraits<PostDominatorTree*>
  : public GraphTraits<DomTreeNode*> {
  static NodeRef getEntryNode(PostDominatorTree *DT) {
    return DT->getRootNode();
  }

  static nodes_iterator nodes_begin(PostDominatorTree *N) {
    return df_begin(getEntryNode(N));
  }

  static nodes_iterator nodes_end(PostDominatorTree *N) {
    return df_end(getEntryNode(N));
  }
};

} // end namespace llvm

#endif // LLVM_ANALYSIS_POSTDOMINATORS_H
PKiwFZ��ͷ��Analysis/EHPersonalities.hnu�[���//===- EHPersonalities.h - Compute EH-related information -----------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_EHPERSONALITIES_H
#define LLVM_ANALYSIS_EHPERSONALITIES_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/TinyPtrVector.h"

namespace llvm {
class BasicBlock;
class Function;
class Triple;
class Value;

enum class EHPersonality {
  Unknown,
  GNU_Ada,
  GNU_C,
  GNU_C_SjLj,
  GNU_CXX,
  GNU_CXX_SjLj,
  GNU_ObjC,
  MSVC_X86SEH,
  MSVC_TableSEH,
  MSVC_CXX,
  CoreCLR,
  Rust,
  Wasm_CXX,
  XL_CXX
};

/// See if the given exception handling personality function is one
/// that we understand.  If so, return a description of it; otherwise return
/// Unknown.
EHPersonality classifyEHPersonality(const Value *Pers);

StringRef getEHPersonalityName(EHPersonality Pers);

EHPersonality getDefaultEHPersonality(const Triple &T);

/// Returns true if this personality function catches asynchronous
/// exceptions.
inline bool isAsynchronousEHPersonality(EHPersonality Pers) {
  // The two SEH personality functions can catch asynch exceptions. We assume
  // unknown personalities don't catch asynch exceptions.
  switch (Pers) {
  case EHPersonality::MSVC_X86SEH:
  case EHPersonality::MSVC_TableSEH:
    return true;
  default:
    return false;
  }
  llvm_unreachable("invalid enum");
}

/// Returns true if this is a personality function that invokes
/// handler funclets (which must return to it).
inline bool isFuncletEHPersonality(EHPersonality Pers) {
  switch (Pers) {
  case EHPersonality::MSVC_CXX:
  case EHPersonality::MSVC_X86SEH:
  case EHPersonality::MSVC_TableSEH:
  case EHPersonality::CoreCLR:
    return true;
  default:
    return false;
  }
  llvm_unreachable("invalid enum");
}

/// Returns true if this personality uses scope-style EH IR instructions:
/// catchswitch, catchpad/ret, and cleanuppad/ret.
inline bool isScopedEHPersonality(EHPersonality Pers) {
  switch (Pers) {
  case EHPersonality::MSVC_CXX:
  case EHPersonality::MSVC_X86SEH:
  case EHPersonality::MSVC_TableSEH:
  case EHPersonality::CoreCLR:
  case EHPersonality::Wasm_CXX:
    return true;
  default:
    return false;
  }
  llvm_unreachable("invalid enum");
}

/// Return true if this personality may be safely removed if there
/// are no invoke instructions remaining in the current function.
inline bool isNoOpWithoutInvoke(EHPersonality Pers) {
  switch (Pers) {
  case EHPersonality::Unknown:
    return false;
  // All known personalities currently have this behavior
  default:
    return true;
  }
  llvm_unreachable("invalid enum");
}

bool canSimplifyInvokeNoUnwind(const Function *F);

typedef TinyPtrVector<BasicBlock *> ColorVector;

/// If an EH funclet personality is in use (see isFuncletEHPersonality),
/// this will recompute which blocks are in which funclet. It is possible that
/// some blocks are in multiple funclets. Consider this analysis to be
/// expensive.
DenseMap<BasicBlock *, ColorVector> colorEHFunclets(Function &F);

} // end namespace llvm

#endif
PKiwFZf����Analysis/OverflowInstAnalysis.hnu�[���//===-- OverflowInstAnalysis.h - Utils to fold overflow insts ----*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file holds routines to help analyse overflow instructions
// and fold them into constants or other overflow instructions
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_OVERFLOWINSTANALYSIS_H
#define LLVM_ANALYSIS_OVERFLOWINSTANALYSIS_H

namespace llvm {
class Use;
class Value;

/// Match one of the patterns up to the select/logic op:
///   %Op0 = icmp ne i4 %X, 0
///   %Agg = call { i4, i1 } @llvm.[us]mul.with.overflow.i4(i4 %X, i4 %Y)
///   %Op1 = extractvalue { i4, i1 } %Agg, 1
///   %ret = select i1 %Op0, i1 %Op1, i1 false / %ret = and i1 %Op0, %Op1
///
///   %Op0 = icmp eq i4 %X, 0
///   %Agg = call { i4, i1 } @llvm.[us]mul.with.overflow.i4(i4 %X, i4 %Y)
///   %NotOp1 = extractvalue { i4, i1 } %Agg, 1
///   %Op1 = xor i1 %NotOp1, true
///   %ret = select i1 %Op0, i1 true, i1 %Op1 / %ret = or i1 %Op0, %Op1
///
/// Callers are expected to align that with the operands of the select/logic.
/// IsAnd is set to true if the Op0 and Op1 are used as the first pattern.
/// If Op0 and Op1 match one of the patterns above, return true and fill Y's
/// use.

bool isCheckForZeroAndMulWithOverflow(Value *Op0, Value *Op1, bool IsAnd,
                                      Use *&Y);
bool isCheckForZeroAndMulWithOverflow(Value *Op0, Value *Op1, bool IsAnd);
} // end namespace llvm

#endif
PKiwFZC�Z�SSAnalysis/ObjCARCAliasAnalysis.hnu�[���//===- ObjCARCAliasAnalysis.h - ObjC ARC Alias Analysis ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// This file declares a simple ARC-aware AliasAnalysis using special knowledge
/// of Objective C to enhance other optimization passes which rely on the Alias
/// Analysis infrastructure.
///
/// WARNING: This file knows about certain library functions. It recognizes them
/// by name, and hardwires knowledge of their semantics.
///
/// WARNING: This file knows about how certain Objective-C library functions are
/// used. Naive LLVM IR transformations which would otherwise be
/// behavior-preserving may break these assumptions.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_OBJCARCALIASANALYSIS_H
#define LLVM_ANALYSIS_OBJCARCALIASANALYSIS_H

#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Pass.h"

namespace llvm {
namespace objcarc {

/// This is a simple alias analysis implementation that uses knowledge
/// of ARC constructs to answer queries.
///
/// TODO: This class could be generalized to know about other ObjC-specific
/// tricks. Such as knowing that ivars in the non-fragile ABI are non-aliasing
/// even though their offsets are dynamic.
class ObjCARCAAResult : public AAResultBase {
  const DataLayout &DL;

public:
  explicit ObjCARCAAResult(const DataLayout &DL) : DL(DL) {}
  ObjCARCAAResult(ObjCARCAAResult &&Arg)
      : AAResultBase(std::move(Arg)), DL(Arg.DL) {}

  /// Handle invalidation events from the new pass manager.
  ///
  /// By definition, this result is stateless and so remains valid.
  bool invalidate(Function &, const PreservedAnalyses &,
                  FunctionAnalysisManager::Invalidator &) {
    return false;
  }

  AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB,
                    AAQueryInfo &AAQI, const Instruction *CtxI);
  ModRefInfo getModRefInfoMask(const MemoryLocation &Loc, AAQueryInfo &AAQI,
                               bool IgnoreLocals);

  using AAResultBase::getMemoryEffects;
  MemoryEffects getMemoryEffects(const Function *F);

  using AAResultBase::getModRefInfo;
  ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc,
                           AAQueryInfo &AAQI);
};

/// Analysis pass providing a never-invalidated alias analysis result.
class ObjCARCAA : public AnalysisInfoMixin<ObjCARCAA> {
  friend AnalysisInfoMixin<ObjCARCAA>;
  static AnalysisKey Key;

public:
  typedef ObjCARCAAResult Result;

  ObjCARCAAResult run(Function &F, FunctionAnalysisManager &AM);
};

} // namespace objcarc
} // namespace llvm

#endif
PKiwFZ���Z�d�dAnalysis/RegionInfoImpl.hnu�[���//===- RegionInfoImpl.h - SESE region detection analysis --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// Detects single entry single exit regions in the control flow graph.
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_REGIONINFOIMPL_H
#define LLVM_ANALYSIS_REGIONINFOIMPL_H

#include "llvm/ADT/GraphTraits.h"
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/PostDominators.h"
#include "llvm/Analysis/RegionInfo.h"
#include "llvm/Analysis/RegionIterator.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include <algorithm>
#include <cassert>
#include <iterator>
#include <memory>
#include <set>
#include <string>
#include <type_traits>
#include <vector>

#define DEBUG_TYPE "region"

namespace llvm {
class raw_ostream;

//===----------------------------------------------------------------------===//
/// RegionBase Implementation
template <class Tr>
RegionBase<Tr>::RegionBase(BlockT *Entry, BlockT *Exit,
                           typename Tr::RegionInfoT *RInfo, DomTreeT *dt,
                           RegionT *Parent)
    : RegionNodeBase<Tr>(Parent, Entry, 1), RI(RInfo), DT(dt), exit(Exit) {}

template <class Tr>
RegionBase<Tr>::~RegionBase() {
  // Only clean the cache for this Region. Caches of child Regions will be
  // cleaned when the child Regions are deleted.
  BBNodeMap.clear();
}

template <class Tr>
void RegionBase<Tr>::replaceEntry(BlockT *BB) {
  this->entry.setPointer(BB);
}

template <class Tr>
void RegionBase<Tr>::replaceExit(BlockT *BB) {
  assert(exit && "No exit to replace!");
  exit = BB;
}

template <class Tr>
void RegionBase<Tr>::replaceEntryRecursive(BlockT *NewEntry) {
  std::vector<RegionT *> RegionQueue;
  BlockT *OldEntry = getEntry();

  RegionQueue.push_back(static_cast<RegionT *>(this));
  while (!RegionQueue.empty()) {
    RegionT *R = RegionQueue.back();
    RegionQueue.pop_back();

    R->replaceEntry(NewEntry);
    for (std::unique_ptr<RegionT> &Child : *R) {
      if (Child->getEntry() == OldEntry)
        RegionQueue.push_back(Child.get());
    }
  }
}

template <class Tr>
void RegionBase<Tr>::replaceExitRecursive(BlockT *NewExit) {
  std::vector<RegionT *> RegionQueue;
  BlockT *OldExit = getExit();

  RegionQueue.push_back(static_cast<RegionT *>(this));
  while (!RegionQueue.empty()) {
    RegionT *R = RegionQueue.back();
    RegionQueue.pop_back();

    R->replaceExit(NewExit);
    for (std::unique_ptr<RegionT> &Child : *R) {
      if (Child->getExit() == OldExit)
        RegionQueue.push_back(Child.get());
    }
  }
}

template <class Tr>
bool RegionBase<Tr>::contains(const BlockT *B) const {
  BlockT *BB = const_cast<BlockT *>(B);

  if (!DT->getNode(BB))
    return false;

  BlockT *entry = getEntry(), *exit = getExit();

  // Toplevel region.
  if (!exit)
    return true;

  return (DT->dominates(entry, BB) &&
          !(DT->dominates(exit, BB) && DT->dominates(entry, exit)));
}

template <class Tr>
bool RegionBase<Tr>::contains(const LoopT *L) const {
  // BBs that are not part of any loop are element of the Loop
  // described by the NULL pointer. This loop is not part of any region,
  // except if the region describes the whole function.
  if (!L)
    return getExit() == nullptr;

  if (!contains(L->getHeader()))
    return false;

  SmallVector<BlockT *, 8> ExitingBlocks;
  L->getExitingBlocks(ExitingBlocks);

  for (BlockT *BB : ExitingBlocks) {
    if (!contains(BB))
      return false;
  }

  return true;
}

template <class Tr>
typename Tr::LoopT *RegionBase<Tr>::outermostLoopInRegion(LoopT *L) const {
  if (!contains(L))
    return nullptr;

  while (L && contains(L->getParentLoop())) {
    L = L->getParentLoop();
  }

  return L;
}

template <class Tr>
typename Tr::LoopT *RegionBase<Tr>::outermostLoopInRegion(LoopInfoT *LI,
                                                          BlockT *BB) const {
  assert(LI && BB && "LI and BB cannot be null!");
  LoopT *L = LI->getLoopFor(BB);
  return outermostLoopInRegion(L);
}

template <class Tr>
typename RegionBase<Tr>::BlockT *RegionBase<Tr>::getEnteringBlock() const {
  auto isEnteringBlock = [&](BlockT *Pred, bool AllowRepeats) -> BlockT * {
    assert(!AllowRepeats && "Unexpected parameter value.");
    return DT->getNode(Pred) && !contains(Pred) ? Pred : nullptr;
  };
  BlockT *entry = getEntry();
  return find_singleton<BlockT>(make_range(InvBlockTraits::child_begin(entry),
                                           InvBlockTraits::child_end(entry)),
                                isEnteringBlock);
}

template <class Tr>
bool RegionBase<Tr>::getExitingBlocks(
    SmallVectorImpl<BlockT *> &Exitings) const {
  bool CoverAll = true;

  if (!exit)
    return CoverAll;

  for (PredIterTy PI = InvBlockTraits::child_begin(exit),
                  PE = InvBlockTraits::child_end(exit);
       PI != PE; ++PI) {
    BlockT *Pred = *PI;
    if (contains(Pred)) {
      Exitings.push_back(Pred);
      continue;
    }

    CoverAll = false;
  }

  return CoverAll;
}

template <class Tr>
typename RegionBase<Tr>::BlockT *RegionBase<Tr>::getExitingBlock() const {
  BlockT *exit = getExit();
  if (!exit)
    return nullptr;

  auto isContained = [&](BlockT *Pred, bool AllowRepeats) -> BlockT * {
    assert(!AllowRepeats && "Unexpected parameter value.");
    return contains(Pred) ? Pred : nullptr;
  };
  return find_singleton<BlockT>(make_range(InvBlockTraits::child_begin(exit),
                                           InvBlockTraits::child_end(exit)),
                                isContained);
}

template <class Tr>
bool RegionBase<Tr>::isSimple() const {
  return !isTopLevelRegion() && getEnteringBlock() && getExitingBlock();
}

template <class Tr>
std::string RegionBase<Tr>::getNameStr() const {
  std::string exitName;
  std::string entryName;

  if (getEntry()->getName().empty()) {
    raw_string_ostream OS(entryName);

    getEntry()->printAsOperand(OS, false);
  } else
    entryName = std::string(getEntry()->getName());

  if (getExit()) {
    if (getExit()->getName().empty()) {
      raw_string_ostream OS(exitName);

      getExit()->printAsOperand(OS, false);
    } else
      exitName = std::string(getExit()->getName());
  } else
    exitName = "<Function Return>";

  return entryName + " => " + exitName;
}

template <class Tr>
void RegionBase<Tr>::verifyBBInRegion(BlockT *BB) const {
  if (!contains(BB))
    report_fatal_error("Broken region found: enumerated BB not in region!");

  BlockT *entry = getEntry(), *exit = getExit();

  for (BlockT *Succ :
       make_range(BlockTraits::child_begin(BB), BlockTraits::child_end(BB))) {
    if (!contains(Succ) && exit != Succ)
      report_fatal_error("Broken region found: edges leaving the region must go "
                         "to the exit node!");
  }

  if (entry != BB) {
    for (BlockT *Pred : make_range(InvBlockTraits::child_begin(BB),
                                   InvBlockTraits::child_end(BB))) {
      // Allow predecessors that are unreachable, as these are ignored during
      // region analysis.
      if (!contains(Pred) && DT->isReachableFromEntry(Pred))
        report_fatal_error("Broken region found: edges entering the region must "
                           "go to the entry node!");
    }
  }
}

template <class Tr>
void RegionBase<Tr>::verifyWalk(BlockT *BB, std::set<BlockT *> *visited) const {
  BlockT *exit = getExit();

  visited->insert(BB);

  verifyBBInRegion(BB);

  for (BlockT *Succ :
       make_range(BlockTraits::child_begin(BB), BlockTraits::child_end(BB))) {
    if (Succ != exit && visited->find(Succ) == visited->end())
      verifyWalk(Succ, visited);
  }
}

template <class Tr>
void RegionBase<Tr>::verifyRegion() const {
  // Only do verification when user wants to, otherwise this expensive check
  // will be invoked by PMDataManager::verifyPreservedAnalysis when
  // a regionpass (marked PreservedAll) finish.
  if (!RegionInfoBase<Tr>::VerifyRegionInfo)
    return;

  std::set<BlockT *> visited;
  verifyWalk(getEntry(), &visited);
}

template <class Tr>
void RegionBase<Tr>::verifyRegionNest() const {
  for (const std::unique_ptr<RegionT> &R : *this)
    R->verifyRegionNest();

  verifyRegion();
}

template <class Tr>
typename RegionBase<Tr>::element_iterator RegionBase<Tr>::element_begin() {
  return GraphTraits<RegionT *>::nodes_begin(static_cast<RegionT *>(this));
}

template <class Tr>
typename RegionBase<Tr>::element_iterator RegionBase<Tr>::element_end() {
  return GraphTraits<RegionT *>::nodes_end(static_cast<RegionT *>(this));
}

template <class Tr>
typename RegionBase<Tr>::const_element_iterator
RegionBase<Tr>::element_begin() const {
  return GraphTraits<const RegionT *>::nodes_begin(
      static_cast<const RegionT *>(this));
}

template <class Tr>
typename RegionBase<Tr>::const_element_iterator
RegionBase<Tr>::element_end() const {
  return GraphTraits<const RegionT *>::nodes_end(
      static_cast<const RegionT *>(this));
}

template <class Tr>
typename Tr::RegionT *RegionBase<Tr>::getSubRegionNode(BlockT *BB) const {
  using RegionT = typename Tr::RegionT;

  RegionT *R = RI->getRegionFor(BB);

  if (!R || R == this)
    return nullptr;

  // If we pass the BB out of this region, that means our code is broken.
  assert(contains(R) && "BB not in current region!");

  while (contains(R->getParent()) && R->getParent() != this)
    R = R->getParent();

  if (R->getEntry() != BB)
    return nullptr;

  return R;
}

template <class Tr>
typename Tr::RegionNodeT *RegionBase<Tr>::getBBNode(BlockT *BB) const {
  assert(contains(BB) && "Can get BB node out of this region!");

  typename BBNodeMapT::const_iterator at = BBNodeMap.find(BB);

  if (at == BBNodeMap.end()) {
    auto Deconst = const_cast<RegionBase<Tr> *>(this);
    typename BBNodeMapT::value_type V = {
        BB,
        std::make_unique<RegionNodeT>(static_cast<RegionT *>(Deconst), BB)};
    at = BBNodeMap.insert(std::move(V)).first;
  }
  return at->second.get();
}

template <class Tr>
typename Tr::RegionNodeT *RegionBase<Tr>::getNode(BlockT *BB) const {
  assert(contains(BB) && "Can get BB node out of this region!");
  if (RegionT *Child = getSubRegionNode(BB))
    return Child->getNode();

  return getBBNode(BB);
}

template <class Tr>
void RegionBase<Tr>::transferChildrenTo(RegionT *To) {
  for (std::unique_ptr<RegionT> &R : *this) {
    R->parent = To;
    To->children.push_back(std::move(R));
  }
  children.clear();
}

template <class Tr>
void RegionBase<Tr>::addSubRegion(RegionT *SubRegion, bool moveChildren) {
  assert(!SubRegion->parent && "SubRegion already has a parent!");
  assert(llvm::none_of(*this,
                       [&](const std::unique_ptr<RegionT> &R) {
                         return R.get() == SubRegion;
                       }) &&
         "Subregion already exists!");

  SubRegion->parent = static_cast<RegionT *>(this);
  children.push_back(std::unique_ptr<RegionT>(SubRegion));

  if (!moveChildren)
    return;

  assert(SubRegion->children.empty() &&
         "SubRegions that contain children are not supported");

  for (RegionNodeT *Element : elements()) {
    if (!Element->isSubRegion()) {
      BlockT *BB = Element->template getNodeAs<BlockT>();

      if (SubRegion->contains(BB))
        RI->setRegionFor(BB, SubRegion);
    }
  }

  std::vector<std::unique_ptr<RegionT>> Keep;
  for (std::unique_ptr<RegionT> &R : *this) {
    if (SubRegion->contains(R.get()) && R.get() != SubRegion) {
      R->parent = SubRegion;
      SubRegion->children.push_back(std::move(R));
    } else
      Keep.push_back(std::move(R));
  }

  children.clear();
  children.insert(
      children.begin(),
      std::move_iterator<typename RegionSet::iterator>(Keep.begin()),
      std::move_iterator<typename RegionSet::iterator>(Keep.end()));
}

template <class Tr>
typename Tr::RegionT *RegionBase<Tr>::removeSubRegion(RegionT *Child) {
  assert(Child->parent == this && "Child is not a child of this region!");
  Child->parent = nullptr;
  typename RegionSet::iterator I =
      llvm::find_if(children, [&](const std::unique_ptr<RegionT> &R) {
        return R.get() == Child;
      });
  assert(I != children.end() && "Region does not exit. Unable to remove.");
  children.erase(children.begin() + (I - begin()));
  return Child;
}

template <class Tr>
unsigned RegionBase<Tr>::getDepth() const {
  unsigned Depth = 0;

  for (RegionT *R = getParent(); R != nullptr; R = R->getParent())
    ++Depth;

  return Depth;
}

template <class Tr>
typename Tr::RegionT *RegionBase<Tr>::getExpandedRegion() const {
  unsigned NumSuccessors = Tr::getNumSuccessors(exit);

  if (NumSuccessors == 0)
    return nullptr;

  RegionT *R = RI->getRegionFor(exit);

  if (R->getEntry() != exit) {
    for (BlockT *Pred : make_range(InvBlockTraits::child_begin(getExit()),
                                   InvBlockTraits::child_end(getExit())))
      if (!contains(Pred))
        return nullptr;
    if (Tr::getNumSuccessors(exit) == 1)
      return new RegionT(getEntry(), *BlockTraits::child_begin(exit), RI, DT);
    return nullptr;
  }

  while (R->getParent() && R->getParent()->getEntry() == exit)
    R = R->getParent();

  for (BlockT *Pred : make_range(InvBlockTraits::child_begin(getExit()),
                                 InvBlockTraits::child_end(getExit()))) {
    if (!(contains(Pred) || R->contains(Pred)))
      return nullptr;
  }

  return new RegionT(getEntry(), R->getExit(), RI, DT);
}

template <class Tr>
void RegionBase<Tr>::print(raw_ostream &OS, bool print_tree, unsigned level,
                           PrintStyle Style) const {
  if (print_tree)
    OS.indent(level * 2) << '[' << level << "] " << getNameStr();
  else
    OS.indent(level * 2) << getNameStr();

  OS << '\n';

  if (Style != PrintNone) {
    OS.indent(level * 2) << "{\n";
    OS.indent(level * 2 + 2);

    if (Style == PrintBB) {
      for (const auto *BB : blocks())
        OS << BB->getName() << ", "; // TODO: remove the last ","
    } else if (Style == PrintRN) {
      for (const RegionNodeT *Element : elements()) {
        OS << *Element << ", "; // TODO: remove the last ",
      }
    }

    OS << '\n';
  }

  if (print_tree) {
    for (const std::unique_ptr<RegionT> &R : *this)
      R->print(OS, print_tree, level + 1, Style);
  }

  if (Style != PrintNone)
    OS.indent(level * 2) << "} \n";
}

#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
template <class Tr>
void RegionBase<Tr>::dump() const {
  print(dbgs(), true, getDepth(), RegionInfoBase<Tr>::printStyle);
}
#endif

template <class Tr>
void RegionBase<Tr>::clearNodeCache() {
  BBNodeMap.clear();
  for (std::unique_ptr<RegionT> &R : *this)
    R->clearNodeCache();
}

//===----------------------------------------------------------------------===//
// RegionInfoBase implementation
//

template <class Tr>
RegionInfoBase<Tr>::RegionInfoBase() = default;

template <class Tr>
RegionInfoBase<Tr>::~RegionInfoBase() {
  releaseMemory();
}

template <class Tr>
void RegionInfoBase<Tr>::verifyBBMap(const RegionT *R) const {
  assert(R && "Re must be non-null");
  for (const typename Tr::RegionNodeT *Element : R->elements()) {
    if (Element->isSubRegion()) {
      const RegionT *SR = Element->template getNodeAs<RegionT>();
      verifyBBMap(SR);
    } else {
      BlockT *BB = Element->template getNodeAs<BlockT>();
      if (getRegionFor(BB) != R)
        report_fatal_error("BB map does not match region nesting");
    }
  }
}

template <class Tr>
bool RegionInfoBase<Tr>::isCommonDomFrontier(BlockT *BB, BlockT *entry,
                                             BlockT *exit) const {
  for (BlockT *P : make_range(InvBlockTraits::child_begin(BB),
                              InvBlockTraits::child_end(BB))) {
    if (DT->dominates(entry, P) && !DT->dominates(exit, P))
      return false;
  }

  return true;
}

template <class Tr>
bool RegionInfoBase<Tr>::isRegion(BlockT *entry, BlockT *exit) const {
  assert(entry && exit && "entry and exit must not be null!");

  using DST = typename DomFrontierT::DomSetType;

  DST *entrySuccs = &DF->find(entry)->second;

  // Exit is the header of a loop that contains the entry. In this case,
  // the dominance frontier must only contain the exit.
  if (!DT->dominates(entry, exit)) {
    for (BlockT *successor : *entrySuccs) {
      if (successor != exit && successor != entry)
        return false;
    }

    return true;
  }

  DST *exitSuccs = &DF->find(exit)->second;

  // Do not allow edges leaving the region.
  for (BlockT *Succ : *entrySuccs) {
    if (Succ == exit || Succ == entry)
      continue;
    if (exitSuccs->find(Succ) == exitSuccs->end())
      return false;
    if (!isCommonDomFrontier(Succ, entry, exit))
      return false;
  }

  // Do not allow edges pointing into the region.
  for (BlockT *Succ : *exitSuccs) {
    if (DT->properlyDominates(entry, Succ) && Succ != exit)
      return false;
  }

  return true;
}

template <class Tr>
void RegionInfoBase<Tr>::insertShortCut(BlockT *entry, BlockT *exit,
                                        BBtoBBMap *ShortCut) const {
  assert(entry && exit && "entry and exit must not be null!");

  typename BBtoBBMap::iterator e = ShortCut->find(exit);

  if (e == ShortCut->end())
    // No further region at exit available.
    (*ShortCut)[entry] = exit;
  else {
    // We found a region e that starts at exit. Therefore (entry, e->second)
    // is also a region, that is larger than (entry, exit). Insert the
    // larger one.
    BlockT *BB = e->second;
    (*ShortCut)[entry] = BB;
  }
}

template <class Tr>
typename Tr::DomTreeNodeT *
RegionInfoBase<Tr>::getNextPostDom(DomTreeNodeT *N, BBtoBBMap *ShortCut) const {
  typename BBtoBBMap::iterator e = ShortCut->find(N->getBlock());

  if (e == ShortCut->end())
    return N->getIDom();

  return PDT->getNode(e->second)->getIDom();
}

template <class Tr>
bool RegionInfoBase<Tr>::isTrivialRegion(BlockT *entry, BlockT *exit) const {
  assert(entry && exit && "entry and exit must not be null!");

  unsigned num_successors =
      BlockTraits::child_end(entry) - BlockTraits::child_begin(entry);

  if (num_successors <= 1 && exit == *(BlockTraits::child_begin(entry)))
    return true;

  return false;
}

template <class Tr>
typename Tr::RegionT *RegionInfoBase<Tr>::createRegion(BlockT *entry,
                                                       BlockT *exit) {
  assert(entry && exit && "entry and exit must not be null!");

  if (isTrivialRegion(entry, exit))
    return nullptr;

  RegionT *region =
      new RegionT(entry, exit, static_cast<RegionInfoT *>(this), DT);
  BBtoRegion.insert({entry, region});

#ifdef EXPENSIVE_CHECKS
  region->verifyRegion();
#else
  LLVM_DEBUG(region->verifyRegion());
#endif

  updateStatistics(region);
  return region;
}

template <class Tr>
void RegionInfoBase<Tr>::findRegionsWithEntry(BlockT *entry,
                                              BBtoBBMap *ShortCut) {
  assert(entry);

  DomTreeNodeT *N = PDT->getNode(entry);
  if (!N)
    return;

  RegionT *lastRegion = nullptr;
  BlockT *lastExit = entry;

  // As only a BasicBlock that postdominates entry can finish a region, walk the
  // post dominance tree upwards.
  while ((N = getNextPostDom(N, ShortCut))) {
    BlockT *exit = N->getBlock();

    if (!exit)
      break;

    if (isRegion(entry, exit)) {
      RegionT *newRegion = createRegion(entry, exit);

      if (lastRegion)
        newRegion->addSubRegion(lastRegion);

      lastRegion = newRegion;
      lastExit = exit;
    }

    // This can never be a region, so stop the search.
    if (!DT->dominates(entry, exit))
      break;
  }

  // Tried to create regions from entry to lastExit.  Next time take a
  // shortcut from entry to lastExit.
  if (lastExit != entry)
    insertShortCut(entry, lastExit, ShortCut);
}

template <class Tr>
void RegionInfoBase<Tr>::scanForRegions(FuncT &F, BBtoBBMap *ShortCut) {
  using FuncPtrT = std::add_pointer_t<FuncT>;

  BlockT *entry = GraphTraits<FuncPtrT>::getEntryNode(&F);
  DomTreeNodeT *N = DT->getNode(entry);

  // Iterate over the dominance tree in post order to start with the small
  // regions from the bottom of the dominance tree.  If the small regions are
  // detected first, detection of bigger regions is faster, as we can jump
  // over the small regions.
  for (auto DomNode : post_order(N))
    findRegionsWithEntry(DomNode->getBlock(), ShortCut);
}

template <class Tr>
typename Tr::RegionT *RegionInfoBase<Tr>::getTopMostParent(RegionT *region) {
  while (region->getParent())
    region = region->getParent();

  return region;
}

template <class Tr>
void RegionInfoBase<Tr>::buildRegionsTree(DomTreeNodeT *N, RegionT *region) {
  BlockT *BB = N->getBlock();

  // Passed region exit
  while (BB == region->getExit())
    region = region->getParent();

  typename BBtoRegionMap::iterator it = BBtoRegion.find(BB);

  // This basic block is a start block of a region. It is already in the
  // BBtoRegion relation. Only the child basic blocks have to be updated.
  if (it != BBtoRegion.end()) {
    RegionT *newRegion = it->second;
    region->addSubRegion(getTopMostParent(newRegion));
    region = newRegion;
  } else {
    BBtoRegion[BB] = region;
  }

  for (DomTreeNodeBase<BlockT> *C : *N) {
    buildRegionsTree(C, region);
  }
}

#ifdef EXPENSIVE_CHECKS
template <class Tr>
bool RegionInfoBase<Tr>::VerifyRegionInfo = true;
#else
template <class Tr>
bool RegionInfoBase<Tr>::VerifyRegionInfo = false;
#endif

template <class Tr>
typename Tr::RegionT::PrintStyle RegionInfoBase<Tr>::printStyle =
    RegionBase<Tr>::PrintNone;

template <class Tr>
void RegionInfoBase<Tr>::print(raw_ostream &OS) const {
  OS << "Region tree:\n";
  TopLevelRegion->print(OS, true, 0, printStyle);
  OS << "End region tree\n";
}

#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
template <class Tr>
void RegionInfoBase<Tr>::dump() const { print(dbgs()); }
#endif

template <class Tr> void RegionInfoBase<Tr>::releaseMemory() {
  BBtoRegion.clear();
  if (TopLevelRegion) {
    delete TopLevelRegion;
    TopLevelRegion = nullptr;
  }
}

template <class Tr>
void RegionInfoBase<Tr>::verifyAnalysis() const {
  // Do only verify regions if explicitely activated using EXPENSIVE_CHECKS or
  // -verify-region-info
  if (!RegionInfoBase<Tr>::VerifyRegionInfo)
    return;

  TopLevelRegion->verifyRegionNest();

  verifyBBMap(TopLevelRegion);
}

// Region pass manager support.
template <class Tr>
typename Tr::RegionT *RegionInfoBase<Tr>::getRegionFor(BlockT *BB) const {
  return BBtoRegion.lookup(BB);
}

template <class Tr>
void RegionInfoBase<Tr>::setRegionFor(BlockT *BB, RegionT *R) {
  BBtoRegion[BB] = R;
}

template <class Tr>
typename Tr::RegionT *RegionInfoBase<Tr>::operator[](BlockT *BB) const {
  return getRegionFor(BB);
}

template <class Tr>
typename RegionInfoBase<Tr>::BlockT *
RegionInfoBase<Tr>::getMaxRegionExit(BlockT *BB) const {
  BlockT *Exit = nullptr;

  while (true) {
    // Get largest region that starts at BB.
    RegionT *R = getRegionFor(BB);
    while (R && R->getParent() && R->getParent()->getEntry() == BB)
      R = R->getParent();

    // Get the single exit of BB.
    if (R && R->getEntry() == BB)
      Exit = R->getExit();
    else if (++BlockTraits::child_begin(BB) == BlockTraits::child_end(BB))
      Exit = *BlockTraits::child_begin(BB);
    else // No single exit exists.
      return Exit;

    // Get largest region that starts at Exit.
    RegionT *ExitR = getRegionFor(Exit);
    while (ExitR && ExitR->getParent() &&
           ExitR->getParent()->getEntry() == Exit)
      ExitR = ExitR->getParent();

    for (BlockT *Pred : make_range(InvBlockTraits::child_begin(Exit),
                                   InvBlockTraits::child_end(Exit))) {
      if (!R->contains(Pred) && !ExitR->contains(Pred))
        break;
    }

    // This stops infinite cycles.
    if (DT->dominates(Exit, BB))
      break;

    BB = Exit;
  }

  return Exit;
}

template <class Tr>
typename Tr::RegionT *RegionInfoBase<Tr>::getCommonRegion(RegionT *A,
                                                          RegionT *B) const {
  assert(A && B && "One of the Regions is NULL");

  if (A->contains(B))
    return A;

  while (!B->contains(A))
    B = B->getParent();

  return B;
}

template <class Tr>
typename Tr::RegionT *
RegionInfoBase<Tr>::getCommonRegion(SmallVectorImpl<RegionT *> &Regions) const {
  RegionT *ret = Regions.pop_back_val();

  for (RegionT *R : Regions)
    ret = getCommonRegion(ret, R);

  return ret;
}

template <class Tr>
typename Tr::RegionT *
RegionInfoBase<Tr>::getCommonRegion(SmallVectorImpl<BlockT *> &BBs) const {
  RegionT *ret = getRegionFor(BBs.back());
  BBs.pop_back();

  for (BlockT *BB : BBs)
    ret = getCommonRegion(ret, getRegionFor(BB));

  return ret;
}

template <class Tr>
void RegionInfoBase<Tr>::calculate(FuncT &F) {
  using FuncPtrT = std::add_pointer_t<FuncT>;

  // ShortCut a function where for every BB the exit of the largest region
  // starting with BB is stored. These regions can be threated as single BBS.
  // This improves performance on linear CFGs.
  BBtoBBMap ShortCut;

  scanForRegions(F, &ShortCut);
  BlockT *BB = GraphTraits<FuncPtrT>::getEntryNode(&F);
  buildRegionsTree(DT->getNode(BB), TopLevelRegion);
}

} // end namespace llvm

#undef DEBUG_TYPE

#endif // LLVM_ANALYSIS_REGIONINFOIMPL_H
PKiwFZj�d�&&!Analysis/ReleaseModeModelRunner.hnu�[���//===- ReleaseModeModelRunner.h - Fast, precompiled model runner  ---------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements a model runner wrapping an AOT compiled ML model.
// Only inference is supported.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_RELEASEMODEMODELRUNNER_H
#define LLVM_ANALYSIS_RELEASEMODEMODELRUNNER_H

#include "llvm/Analysis/MLModelRunner.h"
#include "llvm/Analysis/TensorSpec.h"
#include "llvm/Support/ErrorHandling.h"

#include <memory>
#include <vector>

namespace llvm {

/// ReleaseModeModelRunner - production mode implementation of the
/// MLModelRunner. It uses an AOT-compiled SavedModel for efficient execution.
template <class TGen>
class ReleaseModeModelRunner final : public MLModelRunner {
public:
  /// FeatureNames' type should be an indexed collection of std::string, like
  /// std::array or std::vector, that has a size() method.
  template <class FType>
  ReleaseModeModelRunner(LLVMContext &Ctx, const FType &InputSpec,
                         StringRef DecisionName, StringRef FeedPrefix = "feed_",
                         StringRef FetchPrefix = "fetch_")
      : MLModelRunner(Ctx, MLModelRunner::Kind::Release, InputSpec.size()),
        CompiledModel(std::make_unique<TGen>()) {
    assert(CompiledModel && "The CompiledModel should be valid");

    for (size_t I = 0; I < InputSpec.size(); ++I) {
      const int Index =
          CompiledModel->LookupArgIndex(FeedPrefix.str() + InputSpec[I].name());
      void *Buffer = nullptr;
      if (Index >= 0)
        Buffer = CompiledModel->arg_data(Index);
      setUpBufferForTensor(I, InputSpec[I], Buffer);
    }

    ResultIndex = CompiledModel->LookupResultIndex(FetchPrefix.str() +
                                                   DecisionName.str());
    assert(ResultIndex >= 0 && "Cannot find DecisionName in inlining model");
  }

  virtual ~ReleaseModeModelRunner() = default;

  static bool classof(const MLModelRunner *R) {
    return R->getKind() == MLModelRunner::Kind::Release;
  }

private:
  void *evaluateUntyped() override {
    CompiledModel->Run();
    return CompiledModel->result_data(ResultIndex);
  }

  int32_t ResultIndex = -1;
  std::unique_ptr<TGen> CompiledModel;
};

/// A mock class satisfying the interface expected by ReleaseModeModelRunner for
/// its `TGen` parameter. Useful to avoid conditional compilation complexity, as
/// a compile-time replacement for a real AOT-ed model.
class NoopSavedModelImpl final {
#define NOOP_MODEL_ERRMSG                                                      \
  "The mock AOT-ed saved model is a compile-time stub and should not be "      \
  "called."

public:
  NoopSavedModelImpl() = default;
  int LookupArgIndex(const std::string &) { llvm_unreachable(NOOP_MODEL_ERRMSG); }
  int LookupResultIndex(const std::string &) { llvm_unreachable(NOOP_MODEL_ERRMSG); }
  void Run() { llvm_unreachable(NOOP_MODEL_ERRMSG); }
  void *result_data(int) { llvm_unreachable(NOOP_MODEL_ERRMSG); }
  void *arg_data(int) { llvm_unreachable(NOOP_MODEL_ERRMSG); }
#undef NOOP_MODEL_ERRMSG
};

template <class T> bool isEmbeddedModelEvaluatorValid() { return true; }

template <> inline bool isEmbeddedModelEvaluatorValid<NoopSavedModelImpl>() {
  return false;
}
} // namespace llvm

#endif // LLVM_ANALYSIS_RELEASEMODEMODELRUNNER_H
PKiwFZ9�Q�s9s9Analysis/MemorySSAUpdater.hnu�[���//===- MemorySSAUpdater.h - Memory SSA Updater-------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// \file
// An automatic updater for MemorySSA that handles arbitrary insertion,
// deletion, and moves.  It performs phi insertion where necessary, and
// automatically updates the MemorySSA IR to be correct.
// While updating loads or removing instructions is often easy enough to not
// need this, updating stores should generally not be attemped outside this
// API.
//
// Basic API usage:
// Create the memory access you want for the instruction (this is mainly so
// we know where it is, without having to duplicate the entire set of create
// functions MemorySSA supports).
// Call insertDef or insertUse depending on whether it's a MemoryUse or a
// MemoryDef.
// That's it.
//
// For moving, first, move the instruction itself using the normal SSA
// instruction moving API, then just call moveBefore, moveAfter,or moveTo with
// the right arguments.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_MEMORYSSAUPDATER_H
#define LLVM_ANALYSIS_MEMORYSSAUPDATER_H

#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Analysis/MemorySSA.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/IR/ValueMap.h"
#include "llvm/Support/CFGDiff.h"

namespace llvm {

class BasicBlock;
class DominatorTree;
class Instruction;
class LoopBlocksRPO;
template <typename T, unsigned int N> class SmallSetVector;

using ValueToValueMapTy = ValueMap<const Value *, WeakTrackingVH>;
using PhiToDefMap = SmallDenseMap<MemoryPhi *, MemoryAccess *>;
using CFGUpdate = cfg::Update<BasicBlock *>;

class MemorySSAUpdater {
private:
  MemorySSA *MSSA;

  /// We use WeakVH rather than a costly deletion to deal with dangling pointers.
  /// MemoryPhis are created eagerly and sometimes get zapped shortly afterwards.
  SmallVector<WeakVH, 16> InsertedPHIs;

  SmallPtrSet<BasicBlock *, 8> VisitedBlocks;
  SmallSet<AssertingVH<MemoryPhi>, 8> NonOptPhis;

public:
  MemorySSAUpdater(MemorySSA *MSSA) : MSSA(MSSA) {}

  /// Insert a definition into the MemorySSA IR.  RenameUses will rename any use
  /// below the new def block (and any inserted phis).  RenameUses should be set
  /// to true if the definition may cause new aliases for loads below it.  This
  /// is not the case for hoisting or sinking or other forms of code *movement*.
  /// It *is* the case for straight code insertion.
  /// For example:
  /// store a
  /// if (foo) { }
  /// load a
  ///
  /// Moving the store into the if block, and calling insertDef, does not
  /// require RenameUses.
  /// However, changing it to:
  /// store a
  /// if (foo) { store b }
  /// load a
  /// Where a mayalias b, *does* require RenameUses be set to true.
  void insertDef(MemoryDef *Def, bool RenameUses = false);
  void insertUse(MemoryUse *Use, bool RenameUses = false);
  /// Update the MemoryPhi in `To` following an edge deletion between `From` and
  /// `To`. If `To` becomes unreachable, a call to removeBlocks should be made.
  void removeEdge(BasicBlock *From, BasicBlock *To);
  /// Update the MemoryPhi in `To` to have a single incoming edge from `From`,
  /// following a CFG change that replaced multiple edges (switch) with a direct
  /// branch.
  void removeDuplicatePhiEdgesBetween(const BasicBlock *From,
                                      const BasicBlock *To);
  /// Update MemorySSA when inserting a unique backedge block for a loop.
  void updatePhisWhenInsertingUniqueBackedgeBlock(BasicBlock *LoopHeader,
                                                  BasicBlock *LoopPreheader,
                                                  BasicBlock *BackedgeBlock);
  /// Update MemorySSA after a loop was cloned, given the blocks in RPO order,
  /// the exit blocks and a 1:1 mapping of all blocks and instructions
  /// cloned. This involves duplicating all defs and uses in the cloned blocks
  /// Updating phi nodes in exit block successors is done separately.
  void updateForClonedLoop(const LoopBlocksRPO &LoopBlocks,
                           ArrayRef<BasicBlock *> ExitBlocks,
                           const ValueToValueMapTy &VM,
                           bool IgnoreIncomingWithNoClones = false);
  // Block BB was fully or partially cloned into its predecessor P1. Map
  // contains the 1:1 mapping of instructions cloned and VM[BB]=P1.
  void updateForClonedBlockIntoPred(BasicBlock *BB, BasicBlock *P1,
                                    const ValueToValueMapTy &VM);
  /// Update phi nodes in exit block successors following cloning. Exit blocks
  /// that were not cloned don't have additional predecessors added.
  void updateExitBlocksForClonedLoop(ArrayRef<BasicBlock *> ExitBlocks,
                                     const ValueToValueMapTy &VMap,
                                     DominatorTree &DT);
  void updateExitBlocksForClonedLoop(
      ArrayRef<BasicBlock *> ExitBlocks,
      ArrayRef<std::unique_ptr<ValueToValueMapTy>> VMaps, DominatorTree &DT);

  /// Apply CFG updates, analogous with the DT edge updates. By default, the
  /// DT is assumed to be already up to date. If UpdateDTFirst is true, first
  /// update the DT with the same updates.
  void applyUpdates(ArrayRef<CFGUpdate> Updates, DominatorTree &DT,
                    bool UpdateDTFirst = false);
  /// Apply CFG insert updates, analogous with the DT edge updates.
  void applyInsertUpdates(ArrayRef<CFGUpdate> Updates, DominatorTree &DT);

  void moveBefore(MemoryUseOrDef *What, MemoryUseOrDef *Where);
  void moveAfter(MemoryUseOrDef *What, MemoryUseOrDef *Where);
  void moveToPlace(MemoryUseOrDef *What, BasicBlock *BB,
                   MemorySSA::InsertionPlace Where);
  /// `From` block was spliced into `From` and `To`. There is a CFG edge from
  /// `From` to `To`. Move all accesses from `From` to `To` starting at
  /// instruction `Start`. `To` is newly created BB, so empty of
  /// MemorySSA::MemoryAccesses. Edges are already updated, so successors of
  /// `To` with MPhi nodes need to update incoming block.
  /// |------|        |------|
  /// | From |        | From |
  /// |      |        |------|
  /// |      |           ||
  /// |      |   =>      \/
  /// |      |        |------|  <- Start
  /// |      |        |  To  |
  /// |------|        |------|
  void moveAllAfterSpliceBlocks(BasicBlock *From, BasicBlock *To,
                                Instruction *Start);
  /// `From` block was merged into `To`. There is a CFG edge from `To` to
  /// `From`.`To` still branches to `From`, but all instructions were moved and
  /// `From` is now an empty block; `From` is about to be deleted. Move all
  /// accesses from `From` to `To` starting at instruction `Start`. `To` may
  /// have multiple successors, `From` has a single predecessor. `From` may have
  /// successors with MPhi nodes, replace their incoming block with `To`.
  /// |------|        |------|
  /// |  To  |        |  To  |
  /// |------|        |      |
  ///    ||      =>   |      |
  ///    \/           |      |
  /// |------|        |      |  <- Start
  /// | From |        |      |
  /// |------|        |------|
  void moveAllAfterMergeBlocks(BasicBlock *From, BasicBlock *To,
                               Instruction *Start);
  /// A new empty BasicBlock (New) now branches directly to Old. Some of
  /// Old's predecessors (Preds) are now branching to New instead of Old.
  /// If New is the only predecessor, move Old's Phi, if present, to New.
  /// Otherwise, add a new Phi in New with appropriate incoming values, and
  /// update the incoming values in Old's Phi node too, if present.
  void wireOldPredecessorsToNewImmediatePredecessor(
      BasicBlock *Old, BasicBlock *New, ArrayRef<BasicBlock *> Preds,
      bool IdenticalEdgesWereMerged = true);
  // The below are utility functions. Other than creation of accesses to pass
  // to insertDef, and removeAccess to remove accesses, you should generally
  // not attempt to update memoryssa yourself. It is very non-trivial to get
  // the edge cases right, and the above calls already operate in near-optimal
  // time bounds.

  /// Create a MemoryAccess in MemorySSA at a specified point in a block,
  /// with a specified clobbering definition.
  ///
  /// Returns the new MemoryAccess.
  /// This should be called when a memory instruction is created that is being
  /// used to replace an existing memory instruction. It will *not* create PHI
  /// nodes, or verify the clobbering definition. The insertion place is used
  /// solely to determine where in the memoryssa access lists the instruction
  /// will be placed. The caller is expected to keep ordering the same as
  /// instructions.
  /// It will return the new MemoryAccess.
  /// Note: If a MemoryAccess already exists for I, this function will make it
  /// inaccessible and it *must* have removeMemoryAccess called on it.
  MemoryAccess *createMemoryAccessInBB(Instruction *I, MemoryAccess *Definition,
                                       const BasicBlock *BB,
                                       MemorySSA::InsertionPlace Point);

  /// Create a MemoryAccess in MemorySSA before or after an existing
  /// MemoryAccess.
  ///
  /// Returns the new MemoryAccess.
  /// This should be called when a memory instruction is created that is being
  /// used to replace an existing memory instruction. It will *not* create PHI
  /// nodes, or verify the clobbering definition.
  ///
  /// Note: If a MemoryAccess already exists for I, this function will make it
  /// inaccessible and it *must* have removeMemoryAccess called on it.
  MemoryUseOrDef *createMemoryAccessBefore(Instruction *I,
                                           MemoryAccess *Definition,
                                           MemoryUseOrDef *InsertPt);
  MemoryUseOrDef *createMemoryAccessAfter(Instruction *I,
                                          MemoryAccess *Definition,
                                          MemoryAccess *InsertPt);

  /// Remove a MemoryAccess from MemorySSA, including updating all
  /// definitions and uses.
  /// This should be called when a memory instruction that has a MemoryAccess
  /// associated with it is erased from the program.  For example, if a store or
  /// load is simply erased (not replaced), removeMemoryAccess should be called
  /// on the MemoryAccess for that store/load.
  void removeMemoryAccess(MemoryAccess *, bool OptimizePhis = false);

  /// Remove MemoryAccess for a given instruction, if a MemoryAccess exists.
  /// This should be called when an instruction (load/store) is deleted from
  /// the program.
  void removeMemoryAccess(const Instruction *I, bool OptimizePhis = false) {
    if (MemoryAccess *MA = MSSA->getMemoryAccess(I))
      removeMemoryAccess(MA, OptimizePhis);
  }

  /// Remove all MemoryAcceses in a set of BasicBlocks about to be deleted.
  /// Assumption we make here: all uses of deleted defs and phi must either
  /// occur in blocks about to be deleted (thus will be deleted as well), or
  /// they occur in phis that will simply lose an incoming value.
  /// Deleted blocks still have successor info, but their predecessor edges and
  /// Phi nodes may already be updated. Instructions in DeadBlocks should be
  /// deleted after this call.
  void removeBlocks(const SmallSetVector<BasicBlock *, 8> &DeadBlocks);

  /// Instruction I will be changed to an unreachable. Remove all accesses in
  /// I's block that follow I (inclusive), and update the Phis in the blocks'
  /// successors.
  void changeToUnreachable(const Instruction *I);

  /// Get handle on MemorySSA.
  MemorySSA* getMemorySSA() const { return MSSA; }

private:
  // Move What before Where in the MemorySSA IR.
  template <class WhereType>
  void moveTo(MemoryUseOrDef *What, BasicBlock *BB, WhereType Where);
  // Move all memory accesses from `From` to `To` starting at `Start`.
  // Restrictions apply, see public wrappers of this method.
  void moveAllAccesses(BasicBlock *From, BasicBlock *To, Instruction *Start);
  MemoryAccess *getPreviousDef(MemoryAccess *);
  MemoryAccess *getPreviousDefInBlock(MemoryAccess *);
  MemoryAccess *
  getPreviousDefFromEnd(BasicBlock *,
                        DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> &);
  MemoryAccess *
  getPreviousDefRecursive(BasicBlock *,
                          DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> &);
  MemoryAccess *recursePhi(MemoryAccess *Phi);
  MemoryAccess *tryRemoveTrivialPhi(MemoryPhi *Phi);
  template <class RangeType>
  MemoryAccess *tryRemoveTrivialPhi(MemoryPhi *Phi, RangeType &Operands);
  void tryRemoveTrivialPhis(ArrayRef<WeakVH> UpdatedPHIs);
  void fixupDefs(const SmallVectorImpl<WeakVH> &);
  // Clone all uses and defs from BB to NewBB given a 1:1 map of all
  // instructions and blocks cloned, and a map of MemoryPhi : Definition
  // (MemoryAccess Phi or Def). VMap maps old instructions to cloned
  // instructions and old blocks to cloned blocks. MPhiMap, is created in the
  // caller of this private method, and maps existing MemoryPhis to new
  // definitions that new MemoryAccesses must point to. These definitions may
  // not necessarily be MemoryPhis themselves, they may be MemoryDefs. As such,
  // the map is between MemoryPhis and MemoryAccesses, where the MemoryAccesses
  // may be MemoryPhis or MemoryDefs and not MemoryUses.
  // If CloneWasSimplified = true, the clone was exact. Otherwise, assume that
  // the clone involved simplifications that may have: (1) turned a MemoryUse
  // into an instruction that MemorySSA has no representation for, or (2) turned
  // a MemoryDef into a MemoryUse or an instruction that MemorySSA has no
  // representation for. No other cases are supported.
  void cloneUsesAndDefs(BasicBlock *BB, BasicBlock *NewBB,
                        const ValueToValueMapTy &VMap, PhiToDefMap &MPhiMap,
                        bool CloneWasSimplified = false);
  template <typename Iter>
  void privateUpdateExitBlocksForClonedLoop(ArrayRef<BasicBlock *> ExitBlocks,
                                            Iter ValuesBegin, Iter ValuesEnd,
                                            DominatorTree &DT);
  void applyInsertUpdates(ArrayRef<CFGUpdate>, DominatorTree &DT,
                          const GraphDiff<BasicBlock *> *GD);
};
} // end namespace llvm

#endif // LLVM_ANALYSIS_MEMORYSSAUPDATER_H
PKiwFZ�w�;��Analysis/TypeMetadataUtils.hnu�[���//===- TypeMetadataUtils.h - Utilities related to type metadata --*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains functions that make it easier to manipulate type metadata
// for devirtualization.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_TYPEMETADATAUTILS_H
#define LLVM_ANALYSIS_TYPEMETADATAUTILS_H

#include <cstdint>

namespace llvm {

template <typename T> class SmallVectorImpl;
class CallBase;
class CallInst;
class Constant;
class Function;
class DominatorTree;
class Instruction;
class Module;

/// The type of CFI jumptable needed for a function.
enum CfiFunctionLinkage {
  CFL_Definition = 0,
  CFL_Declaration = 1,
  CFL_WeakDeclaration = 2
};

/// A call site that could be devirtualized.
struct DevirtCallSite {
  /// The offset from the address point to the virtual function.
  uint64_t Offset;
  /// The call site itself.
  CallBase &CB;
};

/// Given a call to the intrinsic \@llvm.type.test, find all devirtualizable
/// call sites based on the call and return them in DevirtCalls.
void findDevirtualizableCallsForTypeTest(
    SmallVectorImpl<DevirtCallSite> &DevirtCalls,
    SmallVectorImpl<CallInst *> &Assumes, const CallInst *CI,
    DominatorTree &DT);

/// Given a call to the intrinsic \@llvm.type.checked.load, find all
/// devirtualizable call sites based on the call and return them in DevirtCalls.
void findDevirtualizableCallsForTypeCheckedLoad(
    SmallVectorImpl<DevirtCallSite> &DevirtCalls,
    SmallVectorImpl<Instruction *> &LoadedPtrs,
    SmallVectorImpl<Instruction *> &Preds, bool &HasNonCallUses,
    const CallInst *CI, DominatorTree &DT);

/// Processes a Constant recursively looking into elements of arrays, structs
/// and expressions to find a trivial pointer element that is located at the
/// given offset (relative to the beginning of the whole outer Constant).
///
/// Used for example from GlobalDCE to find an entry in a C++ vtable that
/// matches a vcall offset.
///
/// To support Swift vtables, getPointerAtOffset can see through "relative
/// pointers", i.e. (sub-)expressions of the form of:
///
/// @symbol = ... {
///   i32 trunc (i64 sub (
///     i64 ptrtoint (<type> @target to i64), i64 ptrtoint (... @symbol to i64)
///   ) to i32)
/// }
///
/// For such (sub-)expressions, getPointerAtOffset returns the @target pointer.
Constant *getPointerAtOffset(Constant *I, uint64_t Offset, Module &M,
                             Constant *TopLevelGlobal = nullptr);

/// Finds the same "relative pointer" pattern as described above, where the
/// target is `F`, and replaces the entire pattern with a constant zero.
void replaceRelativePointerUsersWithZero(Function *F);

} // namespace llvm

#endif
PKiwFZ���Analysis/Passes.hnu�[���//===-- llvm/Analysis/Passes.h - Constructors for analyses ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This header file defines prototypes for accessor functions that expose passes
// in the analysis libraries.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_PASSES_H
#define LLVM_ANALYSIS_PASSES_H

namespace llvm {
  class FunctionPass;
  class ImmutablePass;
  class ModulePass;

  //===--------------------------------------------------------------------===//
  //
  /// createLazyValueInfoPass - This creates an instance of the LazyValueInfo
  /// pass.
  FunctionPass *createLazyValueInfoPass();

  //===--------------------------------------------------------------------===//
  //
  // createDependenceAnalysisWrapperPass - This creates an instance of the
  // DependenceAnalysisWrapper pass.
  //
  FunctionPass *createDependenceAnalysisWrapperPass();

  //===--------------------------------------------------------------------===//
  //
  // createCostModelAnalysisPass - This creates an instance of the
  // CostModelAnalysis pass.
  //
  FunctionPass *createCostModelAnalysisPass();

  //===--------------------------------------------------------------------===//
  //
  // createDelinearizationPass - This pass implements attempts to restore
  // multidimensional array indices from linearized expressions.
  //
  FunctionPass *createDelinearizationPass();

  //===--------------------------------------------------------------------===//
  //
  // Minor pass prototypes, allowing us to expose them through bugpoint and
  // analyze.
  FunctionPass *createInstCountPass();

  //===--------------------------------------------------------------------===//
  //
  // createRegionInfoPass - This pass finds all single entry single exit regions
  // in a function and builds the region hierarchy.
  //
  FunctionPass *createRegionInfoPass();
}

#endif
PKiwFZ����!Analysis/DependenceGraphBuilder.hnu�[���//===- llvm/Analysis/DependenceGraphBuilder.h -------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines a builder interface that can be used to populate dependence
// graphs such as DDG and PDG.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_DEPENDENCEGRAPHBUILDER_H
#define LLVM_ANALYSIS_DEPENDENCEGRAPHBUILDER_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/EquivalenceClasses.h"
#include "llvm/ADT/SmallVector.h"

namespace llvm {

class BasicBlock;
class DependenceInfo;
class Instruction;

/// This abstract builder class defines a set of high-level steps for creating
/// DDG-like graphs. The client code is expected to inherit from this class and
/// define concrete implementation for each of the pure virtual functions used
/// in the high-level algorithm.
template <class GraphType> class AbstractDependenceGraphBuilder {
protected:
  using BasicBlockListType = SmallVectorImpl<BasicBlock *>;

private:
  using NodeType = typename GraphType::NodeType;
  using EdgeType = typename GraphType::EdgeType;

public:
  using ClassesType = EquivalenceClasses<BasicBlock *>;
  using NodeListType = SmallVector<NodeType *, 4>;

  AbstractDependenceGraphBuilder(GraphType &G, DependenceInfo &D,
                                 const BasicBlockListType &BBs)
      : Graph(G), DI(D), BBList(BBs) {}
  virtual ~AbstractDependenceGraphBuilder() = default;

  /// The main entry to the graph construction algorithm. It starts by
  /// creating nodes in increasing order of granularity and then
  /// adds def-use and memory edges. As one of the final stages, it
  /// also creates pi-block nodes to facilitate codegen in transformations
  /// that use dependence graphs.
  ///
  /// The algorithmic complexity of this implementation is O(V^2 * I^2), where V
  /// is the number of vertecies (nodes) and I is the number of instructions in
  /// each node. The total number of instructions, N, is equal to V * I,
  /// therefore the worst-case time complexity is O(N^2). The average time
  /// complexity is O((N^2)/2).
  void populate() {
    computeInstructionOrdinals();
    createFineGrainedNodes();
    createDefUseEdges();
    createMemoryDependencyEdges();
    simplify();
    createAndConnectRootNode();
    createPiBlocks();
    sortNodesTopologically();
  }

  /// Compute ordinal numbers for each instruction and store them in a map for
  /// future look up. These ordinals are used to compute node ordinals which are
  /// in turn used to order nodes that are part of a cycle.
  /// Instruction ordinals are assigned based on lexical program order.
  void computeInstructionOrdinals();

  /// Create fine grained nodes. These are typically atomic nodes that
  /// consist of a single instruction.
  void createFineGrainedNodes();

  /// Analyze the def-use chains and create edges from the nodes containing
  /// definitions to the nodes containing the uses.
  void createDefUseEdges();

  /// Analyze data dependencies that exist between memory loads or stores,
  /// in the graph nodes and create edges between them.
  void createMemoryDependencyEdges();

  /// Create a root node and add edges such that each node in the graph is
  /// reachable from the root.
  void createAndConnectRootNode();

  /// Apply graph abstraction to groups of nodes that belong to a strongly
  /// connected component of the graph to create larger compound nodes
  /// called pi-blocks. The purpose of this abstraction is to isolate sets of
  /// program elements that need to stay together during codegen and turn
  /// the dependence graph into an acyclic graph.
  void createPiBlocks();

  /// Go through all the nodes in the graph and collapse any two nodes
  /// 'a' and 'b' if all of the following are true:
  ///   - the only edge from 'a' is a def-use edge to 'b' and
  ///   - the only edge to 'b' is a def-use edge from 'a' and
  ///   - there is no cyclic edge from 'b' to 'a' and
  ///   - all instructions in 'a' and 'b' belong to the same basic block and
  ///   - both 'a' and 'b' are simple (single or multi instruction) nodes.
  void simplify();

  /// Topologically sort the graph nodes.
  void sortNodesTopologically();

protected:
  /// Create the root node of the graph.
  virtual NodeType &createRootNode() = 0;

  /// Create an atomic node in the graph given a single instruction.
  virtual NodeType &createFineGrainedNode(Instruction &I) = 0;

  /// Create a pi-block node in the graph representing a group of nodes in an
  /// SCC of the graph.
  virtual NodeType &createPiBlock(const NodeListType &L) = 0;

  /// Create a def-use edge going from \p Src to \p Tgt.
  virtual EdgeType &createDefUseEdge(NodeType &Src, NodeType &Tgt) = 0;

  /// Create a memory dependence edge going from \p Src to \p Tgt.
  virtual EdgeType &createMemoryEdge(NodeType &Src, NodeType &Tgt) = 0;

  /// Create a rooted edge going from \p Src to \p Tgt .
  virtual EdgeType &createRootedEdge(NodeType &Src, NodeType &Tgt) = 0;

  /// Given a pi-block node, return a vector of all the nodes contained within
  /// it.
  virtual const NodeListType &getNodesInPiBlock(const NodeType &N) = 0;

  /// Deallocate memory of edge \p E.
  virtual void destroyEdge(EdgeType &E) { delete &E; }

  /// Deallocate memory of node \p N.
  virtual void destroyNode(NodeType &N) { delete &N; }

  /// Return true if creation of pi-blocks are supported and desired,
  /// and false otherwise.
  virtual bool shouldCreatePiBlocks() const { return true; }

  /// Return true if graph simplification step is requested, and false
  /// otherwise.
  virtual bool shouldSimplify() const { return true; }

  /// Return true if it's safe to merge the two nodes.
  virtual bool areNodesMergeable(const NodeType &A,
                                 const NodeType &B) const = 0;

  /// Append the content of node \p B into node \p A and remove \p B and
  /// the edge between \p A and \p B from the graph.
  virtual void mergeNodes(NodeType &A, NodeType &B) = 0;

  /// Given an instruction \p I return its associated ordinal number.
  size_t getOrdinal(Instruction &I) {
    assert(InstOrdinalMap.contains(&I) &&
           "No ordinal computed for this instruction.");
    return InstOrdinalMap[&I];
  }

  /// Given a node \p N return its associated ordinal number.
  size_t getOrdinal(NodeType &N) {
    assert(NodeOrdinalMap.find(&N) != NodeOrdinalMap.end() &&
           "No ordinal computed for this node.");
    return NodeOrdinalMap[&N];
  }

  /// Map types to map instructions to nodes used when populating the graph.
  using InstToNodeMap = DenseMap<Instruction *, NodeType *>;

  /// Map Types to map instruction/nodes to an ordinal number.
  using InstToOrdinalMap = DenseMap<Instruction *, size_t>;
  using NodeToOrdinalMap = DenseMap<NodeType *, size_t>;

  /// Reference to the graph that gets built by a concrete implementation of
  /// this builder.
  GraphType &Graph;

  /// Dependence information used to create memory dependence edges in the
  /// graph.
  DependenceInfo &DI;

  /// The list of basic blocks to consider when building the graph.
  const BasicBlockListType &BBList;

  /// A mapping from instructions to the corresponding nodes in the graph.
  InstToNodeMap IMap;

  /// A mapping from each instruction to an ordinal number. This map is used to
  /// populate the \p NodeOrdinalMap.
  InstToOrdinalMap InstOrdinalMap;

  /// A mapping from nodes to an ordinal number. This map is used to sort nodes
  /// in a pi-block based on program order.
  NodeToOrdinalMap NodeOrdinalMap;
};

} // namespace llvm

#endif // LLVM_ANALYSIS_DEPENDENCEGRAPHBUILDER_H
PKiwFZ�9b���Analysis/DivergenceAnalysis.hnu�[���//===- llvm/Analysis/DivergenceAnalysis.h - Divergence Analysis -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// \file
// The divergence analysis determines which instructions and branches are
// divergent given a set of divergent source instructions.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_DIVERGENCEANALYSIS_H
#define LLVM_ANALYSIS_DIVERGENCEANALYSIS_H

#include "llvm/ADT/DenseSet.h"
#include "llvm/Analysis/SyncDependenceAnalysis.h"
#include "llvm/IR/PassManager.h"
#include <vector>

namespace llvm {
class Function;
class Instruction;
class Loop;
class raw_ostream;
class TargetTransformInfo;
class Value;

/// \brief Generic divergence analysis for reducible CFGs.
///
/// This analysis propagates divergence in a data-parallel context from sources
/// of divergence to all users. It requires reducible CFGs. All assignments
/// should be in SSA form.
class DivergenceAnalysisImpl {
public:
  /// \brief This instance will analyze the whole function \p F or the loop \p
  /// RegionLoop.
  ///
  /// \param RegionLoop if non-null the analysis is restricted to \p RegionLoop.
  /// Otherwise the whole function is analyzed.
  /// \param IsLCSSAForm whether the analysis may assume that the IR in the
  /// region in LCSSA form.
  DivergenceAnalysisImpl(const Function &F, const Loop *RegionLoop,
                         const DominatorTree &DT, const LoopInfo &LI,
                         SyncDependenceAnalysis &SDA, bool IsLCSSAForm);

  /// \brief The loop that defines the analyzed region (if any).
  const Loop *getRegionLoop() const { return RegionLoop; }
  const Function &getFunction() const { return F; }

  /// \brief Whether \p BB is part of the region.
  bool inRegion(const BasicBlock &BB) const;
  /// \brief Whether \p I is part of the region.
  bool inRegion(const Instruction &I) const;

  /// \brief Mark \p UniVal as a value that is always uniform.
  void addUniformOverride(const Value &UniVal);

  /// \brief Mark \p DivVal as a value that is always divergent. Will not do so
  /// if `isAlwaysUniform(DivVal)`.
  /// \returns Whether the tracked divergence state of \p DivVal changed.
  bool markDivergent(const Value &DivVal);

  /// \brief Propagate divergence to all instructions in the region.
  /// Divergence is seeded by calls to \p markDivergent.
  void compute();

  /// \brief Whether any value was marked or analyzed to be divergent.
  bool hasDetectedDivergence() const { return !DivergentValues.empty(); }

  /// \brief Whether \p Val will always return a uniform value regardless of its
  /// operands
  bool isAlwaysUniform(const Value &Val) const;

  /// \brief Whether \p Val is divergent at its definition.
  bool isDivergent(const Value &Val) const;

  /// \brief Whether \p U is divergent. Uses of a uniform value can be
  /// divergent.
  bool isDivergentUse(const Use &U) const;

private:
  /// \brief Mark \p Term as divergent and push all Instructions that become
  /// divergent as a result on the worklist.
  void analyzeControlDivergence(const Instruction &Term);
  /// \brief Mark all phi nodes in \p JoinBlock as divergent and push them on
  /// the worklist.
  void taintAndPushPhiNodes(const BasicBlock &JoinBlock);

  /// \brief Identify all Instructions that become divergent because \p DivExit
  /// is a divergent loop exit of \p DivLoop. Mark those instructions as
  /// divergent and push them on the worklist.
  void propagateLoopExitDivergence(const BasicBlock &DivExit,
                                   const Loop &DivLoop);

  /// \brief Internal implementation function for propagateLoopExitDivergence.
  void analyzeLoopExitDivergence(const BasicBlock &DivExit,
                                 const Loop &OuterDivLoop);

  /// \brief Mark all instruction as divergent that use a value defined in \p
  /// OuterDivLoop. Push their users on the worklist.
  void analyzeTemporalDivergence(const Instruction &I,
                                 const Loop &OuterDivLoop);

  /// \brief Push all users of \p Val (in the region) to the worklist.
  void pushUsers(const Value &I);

  /// \brief Whether \p Val is divergent when read in \p ObservingBlock.
  bool isTemporalDivergent(const BasicBlock &ObservingBlock,
                           const Value &Val) const;

private:
  const Function &F;
  // If regionLoop != nullptr, analysis is only performed within \p RegionLoop.
  // Otherwise, analyze the whole function
  const Loop *RegionLoop;

  const DominatorTree &DT;
  const LoopInfo &LI;

  // Recognized divergent loops
  DenseSet<const Loop *> DivergentLoops;

  // The SDA links divergent branches to divergent control-flow joins.
  SyncDependenceAnalysis &SDA;

  // Use simplified code path for LCSSA form.
  bool IsLCSSAForm;

  // Set of known-uniform values.
  DenseSet<const Value *> UniformOverrides;

  // Detected/marked divergent values.
  DenseSet<const Value *> DivergentValues;

  // Internal worklist for divergence propagation.
  std::vector<const Instruction *> Worklist;
};

class DivergenceInfo {
  Function &F;

  // If the function contains an irreducible region the divergence
  // analysis can run indefinitely. We set ContainsIrreducible and no
  // analysis is actually performed on the function. All values in
  // this function are conservatively reported as divergent instead.
  bool ContainsIrreducible = false;
  std::unique_ptr<SyncDependenceAnalysis> SDA;
  std::unique_ptr<DivergenceAnalysisImpl> DA;

public:
  DivergenceInfo(Function &F, const DominatorTree &DT,
                 const PostDominatorTree &PDT, const LoopInfo &LI,
                 const TargetTransformInfo &TTI, bool KnownReducible);

  /// Whether any divergence was detected.
  bool hasDivergence() const {
    return ContainsIrreducible || DA->hasDetectedDivergence();
  }

  /// The GPU kernel this analysis result is for
  const Function &getFunction() const { return F; }

  /// Whether \p V is divergent at its definition.
  bool isDivergent(const Value &V) const {
    return ContainsIrreducible || DA->isDivergent(V);
  }

  /// Whether \p U is divergent. Uses of a uniform value can be divergent.
  bool isDivergentUse(const Use &U) const {
    return ContainsIrreducible || DA->isDivergentUse(U);
  }

  /// Whether \p V is uniform/non-divergent.
  bool isUniform(const Value &V) const { return !isDivergent(V); }

  /// Whether \p U is uniform/non-divergent. Uses of a uniform value can be
  /// divergent.
  bool isUniformUse(const Use &U) const { return !isDivergentUse(U); }
};

/// \brief Divergence analysis frontend for GPU kernels.
class DivergenceAnalysis : public AnalysisInfoMixin<DivergenceAnalysis> {
  friend AnalysisInfoMixin<DivergenceAnalysis>;

  static AnalysisKey Key;

public:
  using Result = DivergenceInfo;

  /// Runs the divergence analysis on @F, a GPU kernel
  Result run(Function &F, FunctionAnalysisManager &AM);
};

/// Printer pass to dump divergence analysis results.
struct DivergenceAnalysisPrinterPass
    : public PassInfoMixin<DivergenceAnalysisPrinterPass> {
  DivergenceAnalysisPrinterPass(raw_ostream &OS) : OS(OS) {}

  PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM);

private:
  raw_ostream &OS;
}; // class DivergenceAnalysisPrinterPass

} // namespace llvm

#endif // LLVM_ANALYSIS_DIVERGENCEANALYSIS_H
PKiwFZ1���NNAnalysis/LoopAnalysisManager.hnu�[���//===- LoopAnalysisManager.h - Loop analysis management ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This header provides classes for managing per-loop analyses. These are
/// typically used as part of a loop pass pipeline over the loop nests of
/// a function.
///
/// Loop analyses are allowed to make some simplifying assumptions:
/// 1) Loops are, where possible, in simplified form.
/// 2) Loops are *always* in LCSSA form.
/// 3) A collection of analysis results are available:
///    - LoopInfo
///    - DominatorTree
///    - ScalarEvolution
///    - AAManager
///
/// The primary mechanism to provide these invariants is the loop pass manager,
/// but they can also be manually provided in order to reason about a loop from
/// outside of a dedicated pass manager.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_LOOPANALYSISMANAGER_H
#define LLVM_ANALYSIS_LOOPANALYSISMANAGER_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class AAResults;
class AssumptionCache;
class DominatorTree;
class Function;
class Loop;
class LoopInfo;
class MemorySSA;
class ScalarEvolution;
class TargetLibraryInfo;
class TargetTransformInfo;

/// The adaptor from a function pass to a loop pass computes these analyses and
/// makes them available to the loop passes "for free". Each loop pass is
/// expected to update these analyses if necessary to ensure they're
/// valid after it runs.
struct LoopStandardAnalysisResults {
  AAResults &AA;
  AssumptionCache &AC;
  DominatorTree &DT;
  LoopInfo &LI;
  ScalarEvolution &SE;
  TargetLibraryInfo &TLI;
  TargetTransformInfo &TTI;
  BlockFrequencyInfo *BFI;
  BranchProbabilityInfo *BPI;
  MemorySSA *MSSA;
};

/// Extern template declaration for the analysis set for this IR unit.
extern template class AllAnalysesOn<Loop>;

extern template class AnalysisManager<Loop, LoopStandardAnalysisResults &>;
/// The loop analysis manager.
///
/// See the documentation for the AnalysisManager template for detail
/// documentation. This typedef serves as a convenient way to refer to this
/// construct in the adaptors and proxies used to integrate this into the larger
/// pass manager infrastructure.
typedef AnalysisManager<Loop, LoopStandardAnalysisResults &>
    LoopAnalysisManager;

/// A proxy from a \c LoopAnalysisManager to a \c Function.
typedef InnerAnalysisManagerProxy<LoopAnalysisManager, Function>
    LoopAnalysisManagerFunctionProxy;

/// A specialized result for the \c LoopAnalysisManagerFunctionProxy which
/// retains a \c LoopInfo reference.
///
/// This allows it to collect loop objects for which analysis results may be
/// cached in the \c LoopAnalysisManager.
template <> class LoopAnalysisManagerFunctionProxy::Result {
public:
  explicit Result(LoopAnalysisManager &InnerAM, LoopInfo &LI)
      : InnerAM(&InnerAM), LI(&LI) {}
  Result(Result &&Arg)
      : InnerAM(std::move(Arg.InnerAM)), LI(Arg.LI), MSSAUsed(Arg.MSSAUsed) {
    // We have to null out the analysis manager in the moved-from state
    // because we are taking ownership of the responsibilty to clear the
    // analysis state.
    Arg.InnerAM = nullptr;
  }
  Result &operator=(Result &&RHS) {
    InnerAM = RHS.InnerAM;
    LI = RHS.LI;
    MSSAUsed = RHS.MSSAUsed;
    // We have to null out the analysis manager in the moved-from state
    // because we are taking ownership of the responsibilty to clear the
    // analysis state.
    RHS.InnerAM = nullptr;
    return *this;
  }
  ~Result() {
    // InnerAM is cleared in a moved from state where there is nothing to do.
    if (!InnerAM)
      return;

    // Clear out the analysis manager if we're being destroyed -- it means we
    // didn't even see an invalidate call when we got invalidated.
    InnerAM->clear();
  }

  /// Mark MemorySSA as used so we can invalidate self if MSSA is invalidated.
  void markMSSAUsed() { MSSAUsed = true; }

  /// Accessor for the analysis manager.
  LoopAnalysisManager &getManager() { return *InnerAM; }

  /// Handler for invalidation of the proxy for a particular function.
  ///
  /// If the proxy, \c LoopInfo, and associated analyses are preserved, this
  /// will merely forward the invalidation event to any cached loop analysis
  /// results for loops within this function.
  ///
  /// If the necessary loop infrastructure is not preserved, this will forcibly
  /// clear all of the cached analysis results that are keyed on the \c
  /// LoopInfo for this function.
  bool invalidate(Function &F, const PreservedAnalyses &PA,
                  FunctionAnalysisManager::Invalidator &Inv);

private:
  LoopAnalysisManager *InnerAM;
  LoopInfo *LI;
  bool MSSAUsed = false;
};

/// Provide a specialized run method for the \c LoopAnalysisManagerFunctionProxy
/// so it can pass the \c LoopInfo to the result.
template <>
LoopAnalysisManagerFunctionProxy::Result
LoopAnalysisManagerFunctionProxy::run(Function &F, FunctionAnalysisManager &AM);

// Ensure the \c LoopAnalysisManagerFunctionProxy is provided as an extern
// template.
extern template class InnerAnalysisManagerProxy<LoopAnalysisManager, Function>;

extern template class OuterAnalysisManagerProxy<FunctionAnalysisManager, Loop,
                                                LoopStandardAnalysisResults &>;
/// A proxy from a \c FunctionAnalysisManager to a \c Loop.
typedef OuterAnalysisManagerProxy<FunctionAnalysisManager, Loop,
                                  LoopStandardAnalysisResults &>
    FunctionAnalysisManagerLoopProxy;

/// Returns the minimum set of Analyses that all loop passes must preserve.
PreservedAnalyses getLoopPassPreservedAnalyses();
}

#endif // LLVM_ANALYSIS_LOOPANALYSISMANAGER_H
PKiwFZ �]�hhAnalysis/CGSCCPassManager.hnu�[���//===- CGSCCPassManager.h - Call graph pass management ----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This header provides classes for managing passes over SCCs of the call
/// graph. These passes form an important component of LLVM's interprocedural
/// optimizations. Because they operate on the SCCs of the call graph, and they
/// traverse the graph in post-order, they can effectively do pair-wise
/// interprocedural optimizations for all call edges in the program while
/// incrementally refining it and improving the context of these pair-wise
/// optimizations. At each call site edge, the callee has already been
/// optimized as much as is possible. This in turn allows very accurate
/// analysis of it for IPO.
///
/// A secondary more general goal is to be able to isolate optimization on
/// unrelated parts of the IR module. This is useful to ensure our
/// optimizations are principled and don't miss oportunities where refinement
/// of one part of the module influences transformations in another part of the
/// module. But this is also useful if we want to parallelize the optimizations
/// across common large module graph shapes which tend to be very wide and have
/// large regions of unrelated cliques.
///
/// To satisfy these goals, we use the LazyCallGraph which provides two graphs
/// nested inside each other (and built lazily from the bottom-up): the call
/// graph proper, and a reference graph. The reference graph is super set of
/// the call graph and is a conservative approximation of what could through
/// scalar or CGSCC transforms *become* the call graph. Using this allows us to
/// ensure we optimize functions prior to them being introduced into the call
/// graph by devirtualization or other technique, and thus ensures that
/// subsequent pair-wise interprocedural optimizations observe the optimized
/// form of these functions. The (potentially transitive) reference
/// reachability used by the reference graph is a conservative approximation
/// that still allows us to have independent regions of the graph.
///
/// FIXME: There is one major drawback of the reference graph: in its naive
/// form it is quadratic because it contains a distinct edge for each
/// (potentially indirect) reference, even if are all through some common
/// global table of function pointers. This can be fixed in a number of ways
/// that essentially preserve enough of the normalization. While it isn't
/// expected to completely preclude the usability of this, it will need to be
/// addressed.
///
///
/// All of these issues are made substantially more complex in the face of
/// mutations to the call graph while optimization passes are being run. When
/// mutations to the call graph occur we want to achieve two different things:
///
/// - We need to update the call graph in-flight and invalidate analyses
///   cached on entities in the graph. Because of the cache-based analysis
///   design of the pass manager, it is essential to have stable identities for
///   the elements of the IR that passes traverse, and to invalidate any
///   analyses cached on these elements as the mutations take place.
///
/// - We want to preserve the incremental and post-order traversal of the
///   graph even as it is refined and mutated. This means we want optimization
///   to observe the most refined form of the call graph and to do so in
///   post-order.
///
/// To address this, the CGSCC manager uses both worklists that can be expanded
/// by passes which transform the IR, and provides invalidation tests to skip
/// entries that become dead. This extra data is provided to every SCC pass so
/// that it can carefully update the manager's traversal as the call graph
/// mutates.
///
/// We also provide support for running function passes within the CGSCC walk,
/// and there we provide automatic update of the call graph including of the
/// pass manager to reflect call graph changes that fall out naturally as part
/// of scalar transformations.
///
/// The patterns used to ensure the goals of post-order visitation of the fully
/// refined graph:
///
/// 1) Sink toward the "bottom" as the graph is refined. This means that any
///    iteration continues in some valid post-order sequence after the mutation
///    has altered the structure.
///
/// 2) Enqueue in post-order, including the current entity. If the current
///    entity's shape changes, it and everything after it in post-order needs
///    to be visited to observe that shape.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_CGSCCPASSMANAGER_H
#define LLVM_ANALYSIS_CGSCCPASSMANAGER_H

#include "llvm/ADT/MapVector.h"
#include "llvm/Analysis/LazyCallGraph.h"
#include "llvm/IR/PassManager.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/Support/raw_ostream.h"
#include <cassert>
#include <utility>

namespace llvm {

class Function;
class Value;
template <typename T, unsigned int N> class SmallPriorityWorklist;
struct CGSCCUpdateResult;

class Module;

// Allow debug logging in this inline function.
#define DEBUG_TYPE "cgscc"

/// Extern template declaration for the analysis set for this IR unit.
extern template class AllAnalysesOn<LazyCallGraph::SCC>;

extern template class AnalysisManager<LazyCallGraph::SCC, LazyCallGraph &>;

/// The CGSCC analysis manager.
///
/// See the documentation for the AnalysisManager template for detail
/// documentation. This type serves as a convenient way to refer to this
/// construct in the adaptors and proxies used to integrate this into the larger
/// pass manager infrastructure.
using CGSCCAnalysisManager =
    AnalysisManager<LazyCallGraph::SCC, LazyCallGraph &>;

// Explicit specialization and instantiation declarations for the pass manager.
// See the comments on the definition of the specialization for details on how
// it differs from the primary template.
template <>
PreservedAnalyses
PassManager<LazyCallGraph::SCC, CGSCCAnalysisManager, LazyCallGraph &,
            CGSCCUpdateResult &>::run(LazyCallGraph::SCC &InitialC,
                                      CGSCCAnalysisManager &AM,
                                      LazyCallGraph &G, CGSCCUpdateResult &UR);
extern template class PassManager<LazyCallGraph::SCC, CGSCCAnalysisManager,
                                  LazyCallGraph &, CGSCCUpdateResult &>;

/// The CGSCC pass manager.
///
/// See the documentation for the PassManager template for details. It runs
/// a sequence of SCC passes over each SCC that the manager is run over. This
/// type serves as a convenient way to refer to this construct.
using CGSCCPassManager =
    PassManager<LazyCallGraph::SCC, CGSCCAnalysisManager, LazyCallGraph &,
                CGSCCUpdateResult &>;

/// An explicit specialization of the require analysis template pass.
template <typename AnalysisT>
struct RequireAnalysisPass<AnalysisT, LazyCallGraph::SCC, CGSCCAnalysisManager,
                           LazyCallGraph &, CGSCCUpdateResult &>
    : PassInfoMixin<RequireAnalysisPass<AnalysisT, LazyCallGraph::SCC,
                                        CGSCCAnalysisManager, LazyCallGraph &,
                                        CGSCCUpdateResult &>> {
  PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM,
                        LazyCallGraph &CG, CGSCCUpdateResult &) {
    (void)AM.template getResult<AnalysisT>(C, CG);
    return PreservedAnalyses::all();
  }
  void printPipeline(raw_ostream &OS,
                     function_ref<StringRef(StringRef)> MapClassName2PassName) {
    auto ClassName = AnalysisT::name();
    auto PassName = MapClassName2PassName(ClassName);
    OS << "require<" << PassName << '>';
  }
};

/// A proxy from a \c CGSCCAnalysisManager to a \c Module.
using CGSCCAnalysisManagerModuleProxy =
    InnerAnalysisManagerProxy<CGSCCAnalysisManager, Module>;

/// We need a specialized result for the \c CGSCCAnalysisManagerModuleProxy so
/// it can have access to the call graph in order to walk all the SCCs when
/// invalidating things.
template <> class CGSCCAnalysisManagerModuleProxy::Result {
public:
  explicit Result(CGSCCAnalysisManager &InnerAM, LazyCallGraph &G)
      : InnerAM(&InnerAM), G(&G) {}

  /// Accessor for the analysis manager.
  CGSCCAnalysisManager &getManager() { return *InnerAM; }

  /// Handler for invalidation of the Module.
  ///
  /// If the proxy analysis itself is preserved, then we assume that the set of
  /// SCCs in the Module hasn't changed. Thus any pointers to SCCs in the
  /// CGSCCAnalysisManager are still valid, and we don't need to call \c clear
  /// on the CGSCCAnalysisManager.
  ///
  /// Regardless of whether this analysis is marked as preserved, all of the
  /// analyses in the \c CGSCCAnalysisManager are potentially invalidated based
  /// on the set of preserved analyses.
  bool invalidate(Module &M, const PreservedAnalyses &PA,
                  ModuleAnalysisManager::Invalidator &Inv);

private:
  CGSCCAnalysisManager *InnerAM;
  LazyCallGraph *G;
};

/// Provide a specialized run method for the \c CGSCCAnalysisManagerModuleProxy
/// so it can pass the lazy call graph to the result.
template <>
CGSCCAnalysisManagerModuleProxy::Result
CGSCCAnalysisManagerModuleProxy::run(Module &M, ModuleAnalysisManager &AM);

// Ensure the \c CGSCCAnalysisManagerModuleProxy is provided as an extern
// template.
extern template class InnerAnalysisManagerProxy<CGSCCAnalysisManager, Module>;

extern template class OuterAnalysisManagerProxy<
    ModuleAnalysisManager, LazyCallGraph::SCC, LazyCallGraph &>;

/// A proxy from a \c ModuleAnalysisManager to an \c SCC.
using ModuleAnalysisManagerCGSCCProxy =
    OuterAnalysisManagerProxy<ModuleAnalysisManager, LazyCallGraph::SCC,
                              LazyCallGraph &>;

/// Support structure for SCC passes to communicate updates the call graph back
/// to the CGSCC pass manager infrastructure.
///
/// The CGSCC pass manager runs SCC passes which are allowed to update the call
/// graph and SCC structures. This means the structure the pass manager works
/// on is mutating underneath it. In order to support that, there needs to be
/// careful communication about the precise nature and ramifications of these
/// updates to the pass management infrastructure.
///
/// All SCC passes will have to accept a reference to the management layer's
/// update result struct and use it to reflect the results of any CG updates
/// performed.
///
/// Passes which do not change the call graph structure in any way can just
/// ignore this argument to their run method.
struct CGSCCUpdateResult {
  /// Worklist of the RefSCCs queued for processing.
  ///
  /// When a pass refines the graph and creates new RefSCCs or causes them to
  /// have a different shape or set of component SCCs it should add the RefSCCs
  /// to this worklist so that we visit them in the refined form.
  ///
  /// This worklist is in reverse post-order, as we pop off the back in order
  /// to observe RefSCCs in post-order. When adding RefSCCs, clients should add
  /// them in reverse post-order.
  SmallPriorityWorklist<LazyCallGraph::RefSCC *, 1> &RCWorklist;

  /// Worklist of the SCCs queued for processing.
  ///
  /// When a pass refines the graph and creates new SCCs or causes them to have
  /// a different shape or set of component functions it should add the SCCs to
  /// this worklist so that we visit them in the refined form.
  ///
  /// Note that if the SCCs are part of a RefSCC that is added to the \c
  /// RCWorklist, they don't need to be added here as visiting the RefSCC will
  /// be sufficient to re-visit the SCCs within it.
  ///
  /// This worklist is in reverse post-order, as we pop off the back in order
  /// to observe SCCs in post-order. When adding SCCs, clients should add them
  /// in reverse post-order.
  SmallPriorityWorklist<LazyCallGraph::SCC *, 1> &CWorklist;

  /// The set of invalidated RefSCCs which should be skipped if they are found
  /// in \c RCWorklist.
  ///
  /// This is used to quickly prune out RefSCCs when they get deleted and
  /// happen to already be on the worklist. We use this primarily to avoid
  /// scanning the list and removing entries from it.
  SmallPtrSetImpl<LazyCallGraph::RefSCC *> &InvalidatedRefSCCs;

  /// The set of invalidated SCCs which should be skipped if they are found
  /// in \c CWorklist.
  ///
  /// This is used to quickly prune out SCCs when they get deleted and happen
  /// to already be on the worklist. We use this primarily to avoid scanning
  /// the list and removing entries from it.
  SmallPtrSetImpl<LazyCallGraph::SCC *> &InvalidatedSCCs;

  /// If non-null, the updated current \c SCC being processed.
  ///
  /// This is set when a graph refinement takes place and the "current" point
  /// in the graph moves "down" or earlier in the post-order walk. This will
  /// often cause the "current" SCC to be a newly created SCC object and the
  /// old one to be added to the above worklist. When that happens, this
  /// pointer is non-null and can be used to continue processing the "top" of
  /// the post-order walk.
  LazyCallGraph::SCC *UpdatedC;

  /// Preserved analyses across SCCs.
  ///
  /// We specifically want to allow CGSCC passes to mutate ancestor IR
  /// (changing both the CG structure and the function IR itself). However,
  /// this means we need to take special care to correctly mark what analyses
  /// are preserved *across* SCCs. We have to track this out-of-band here
  /// because within the main `PassManager` infrastructure we need to mark
  /// everything within an SCC as preserved in order to avoid repeatedly
  /// invalidating the same analyses as we unnest pass managers and adaptors.
  /// So we track the cross-SCC version of the preserved analyses here from any
  /// code that does direct invalidation of SCC analyses, and then use it
  /// whenever we move forward in the post-order walk of SCCs before running
  /// passes over the new SCC.
  PreservedAnalyses CrossSCCPA;

  /// A hacky area where the inliner can retain history about inlining
  /// decisions that mutated the call graph's SCC structure in order to avoid
  /// infinite inlining. See the comments in the inliner's CG update logic.
  ///
  /// FIXME: Keeping this here seems like a big layering issue, we should look
  /// for a better technique.
  SmallDenseSet<std::pair<LazyCallGraph::Node *, LazyCallGraph::SCC *>, 4>
      &InlinedInternalEdges;

  /// Weak VHs to keep track of indirect calls for the purposes of detecting
  /// devirtualization.
  ///
  /// This is a map to avoid having duplicate entries. If a Value is
  /// deallocated, its corresponding WeakTrackingVH will be nulled out. When
  /// checking if a Value is in the map or not, also check if the corresponding
  /// WeakTrackingVH is null to avoid issues with a new Value sharing the same
  /// address as a deallocated one.
  SmallMapVector<Value *, WeakTrackingVH, 16> IndirectVHs;
};

/// The core module pass which does a post-order walk of the SCCs and
/// runs a CGSCC pass over each one.
///
/// Designed to allow composition of a CGSCCPass(Manager) and
/// a ModulePassManager. Note that this pass must be run with a module analysis
/// manager as it uses the LazyCallGraph analysis. It will also run the
/// \c CGSCCAnalysisManagerModuleProxy analysis prior to running the CGSCC
/// pass over the module to enable a \c FunctionAnalysisManager to be used
/// within this run safely.
class ModuleToPostOrderCGSCCPassAdaptor
    : public PassInfoMixin<ModuleToPostOrderCGSCCPassAdaptor> {
public:
  using PassConceptT =
      detail::PassConcept<LazyCallGraph::SCC, CGSCCAnalysisManager,
                          LazyCallGraph &, CGSCCUpdateResult &>;

  explicit ModuleToPostOrderCGSCCPassAdaptor(std::unique_ptr<PassConceptT> Pass)
      : Pass(std::move(Pass)) {}

  ModuleToPostOrderCGSCCPassAdaptor(ModuleToPostOrderCGSCCPassAdaptor &&Arg)
      : Pass(std::move(Arg.Pass)) {}

  friend void swap(ModuleToPostOrderCGSCCPassAdaptor &LHS,
                   ModuleToPostOrderCGSCCPassAdaptor &RHS) {
    std::swap(LHS.Pass, RHS.Pass);
  }

  ModuleToPostOrderCGSCCPassAdaptor &
  operator=(ModuleToPostOrderCGSCCPassAdaptor RHS) {
    swap(*this, RHS);
    return *this;
  }

  /// Runs the CGSCC pass across every SCC in the module.
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);

  void printPipeline(raw_ostream &OS,
                     function_ref<StringRef(StringRef)> MapClassName2PassName) {
    OS << "cgscc(";
    Pass->printPipeline(OS, MapClassName2PassName);
    OS << ')';
  }

  static bool isRequired() { return true; }

private:
  std::unique_ptr<PassConceptT> Pass;
};

/// A function to deduce a function pass type and wrap it in the
/// templated adaptor.
template <typename CGSCCPassT>
ModuleToPostOrderCGSCCPassAdaptor
createModuleToPostOrderCGSCCPassAdaptor(CGSCCPassT &&Pass) {
  using PassModelT = detail::PassModel<LazyCallGraph::SCC, CGSCCPassT,
                                       PreservedAnalyses, CGSCCAnalysisManager,
                                       LazyCallGraph &, CGSCCUpdateResult &>;
  // Do not use make_unique, it causes too many template instantiations,
  // causing terrible compile times.
  return ModuleToPostOrderCGSCCPassAdaptor(
      std::unique_ptr<ModuleToPostOrderCGSCCPassAdaptor::PassConceptT>(
          new PassModelT(std::forward<CGSCCPassT>(Pass))));
}

/// A proxy from a \c FunctionAnalysisManager to an \c SCC.
///
/// When a module pass runs and triggers invalidation, both the CGSCC and
/// Function analysis manager proxies on the module get an invalidation event.
/// We don't want to fully duplicate responsibility for most of the
/// invalidation logic. Instead, this layer is only responsible for SCC-local
/// invalidation events. We work with the module's FunctionAnalysisManager to
/// invalidate function analyses.
class FunctionAnalysisManagerCGSCCProxy
    : public AnalysisInfoMixin<FunctionAnalysisManagerCGSCCProxy> {
public:
  class Result {
  public:
    explicit Result() : FAM(nullptr) {}
    explicit Result(FunctionAnalysisManager &FAM) : FAM(&FAM) {}

    void updateFAM(FunctionAnalysisManager &FAM) { this->FAM = &FAM; }
    /// Accessor for the analysis manager.
    FunctionAnalysisManager &getManager() {
      assert(FAM);
      return *FAM;
    }

    bool invalidate(LazyCallGraph::SCC &C, const PreservedAnalyses &PA,
                    CGSCCAnalysisManager::Invalidator &Inv);

  private:
    FunctionAnalysisManager *FAM;
  };

  /// Computes the \c FunctionAnalysisManager and stores it in the result proxy.
  Result run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM, LazyCallGraph &);

private:
  friend AnalysisInfoMixin<FunctionAnalysisManagerCGSCCProxy>;

  static AnalysisKey Key;
};

extern template class OuterAnalysisManagerProxy<CGSCCAnalysisManager, Function>;

/// A proxy from a \c CGSCCAnalysisManager to a \c Function.
using CGSCCAnalysisManagerFunctionProxy =
    OuterAnalysisManagerProxy<CGSCCAnalysisManager, Function>;

/// Helper to update the call graph after running a function pass.
///
/// Function passes can only mutate the call graph in specific ways. This
/// routine provides a helper that updates the call graph in those ways
/// including returning whether any changes were made and populating a CG
/// update result struct for the overall CGSCC walk.
LazyCallGraph::SCC &updateCGAndAnalysisManagerForFunctionPass(
    LazyCallGraph &G, LazyCallGraph::SCC &C, LazyCallGraph::Node &N,
    CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR,
    FunctionAnalysisManager &FAM);

/// Helper to update the call graph after running a CGSCC pass.
///
/// CGSCC passes can only mutate the call graph in specific ways. This
/// routine provides a helper that updates the call graph in those ways
/// including returning whether any changes were made and populating a CG
/// update result struct for the overall CGSCC walk.
LazyCallGraph::SCC &updateCGAndAnalysisManagerForCGSCCPass(
    LazyCallGraph &G, LazyCallGraph::SCC &C, LazyCallGraph::Node &N,
    CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR,
    FunctionAnalysisManager &FAM);

/// Adaptor that maps from a SCC to its functions.
///
/// Designed to allow composition of a FunctionPass(Manager) and
/// a CGSCCPassManager. Note that if this pass is constructed with a pointer
/// to a \c CGSCCAnalysisManager it will run the
/// \c FunctionAnalysisManagerCGSCCProxy analysis prior to running the function
/// pass over the SCC to enable a \c FunctionAnalysisManager to be used
/// within this run safely.
class CGSCCToFunctionPassAdaptor
    : public PassInfoMixin<CGSCCToFunctionPassAdaptor> {
public:
  using PassConceptT = detail::PassConcept<Function, FunctionAnalysisManager>;

  explicit CGSCCToFunctionPassAdaptor(std::unique_ptr<PassConceptT> Pass,
                                      bool EagerlyInvalidate, bool NoRerun)
      : Pass(std::move(Pass)), EagerlyInvalidate(EagerlyInvalidate),
        NoRerun(NoRerun) {}

  CGSCCToFunctionPassAdaptor(CGSCCToFunctionPassAdaptor &&Arg)
      : Pass(std::move(Arg.Pass)), EagerlyInvalidate(Arg.EagerlyInvalidate),
        NoRerun(Arg.NoRerun) {}

  friend void swap(CGSCCToFunctionPassAdaptor &LHS,
                   CGSCCToFunctionPassAdaptor &RHS) {
    std::swap(LHS.Pass, RHS.Pass);
  }

  CGSCCToFunctionPassAdaptor &operator=(CGSCCToFunctionPassAdaptor RHS) {
    swap(*this, RHS);
    return *this;
  }

  /// Runs the function pass across every function in the module.
  PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM,
                        LazyCallGraph &CG, CGSCCUpdateResult &UR);

  void printPipeline(raw_ostream &OS,
                     function_ref<StringRef(StringRef)> MapClassName2PassName) {
    OS << "function";
    if (EagerlyInvalidate || NoRerun) {
      OS << "<";
      if (EagerlyInvalidate)
        OS << "eager-inv";
      if (EagerlyInvalidate && NoRerun)
        OS << ";";
      if (NoRerun)
        OS << "no-rerun";
      OS << ">";
    }
    OS << '(';
    Pass->printPipeline(OS, MapClassName2PassName);
    OS << ')';
  }

  static bool isRequired() { return true; }

private:
  std::unique_ptr<PassConceptT> Pass;
  bool EagerlyInvalidate;
  bool NoRerun;
};

/// A function to deduce a function pass type and wrap it in the
/// templated adaptor.
template <typename FunctionPassT>
CGSCCToFunctionPassAdaptor
createCGSCCToFunctionPassAdaptor(FunctionPassT &&Pass,
                                 bool EagerlyInvalidate = false,
                                 bool NoRerun = false) {
  using PassModelT =
      detail::PassModel<Function, FunctionPassT, PreservedAnalyses,
                        FunctionAnalysisManager>;
  // Do not use make_unique, it causes too many template instantiations,
  // causing terrible compile times.
  return CGSCCToFunctionPassAdaptor(
      std::unique_ptr<CGSCCToFunctionPassAdaptor::PassConceptT>(
          new PassModelT(std::forward<FunctionPassT>(Pass))),
      EagerlyInvalidate, NoRerun);
}

// A marker to determine if function passes should be run on a function within a
// CGSCCToFunctionPassAdaptor. This is used to prevent running an expensive
// function pass (manager) on a function multiple times if SCC mutations cause a
// function to be visited multiple times and the function is not modified by
// other SCC passes.
class ShouldNotRunFunctionPassesAnalysis
    : public AnalysisInfoMixin<ShouldNotRunFunctionPassesAnalysis> {
public:
  static AnalysisKey Key;
  struct Result {};

  Result run(Function &F, FunctionAnalysisManager &FAM) { return Result(); }
};

/// A helper that repeats an SCC pass each time an indirect call is refined to
/// a direct call by that pass.
///
/// While the CGSCC pass manager works to re-visit SCCs and RefSCCs as they
/// change shape, we may also want to repeat an SCC pass if it simply refines
/// an indirect call to a direct call, even if doing so does not alter the
/// shape of the graph. Note that this only pertains to direct calls to
/// functions where IPO across the SCC may be able to compute more precise
/// results. For intrinsics, we assume scalar optimizations already can fully
/// reason about them.
///
/// This repetition has the potential to be very large however, as each one
/// might refine a single call site. As a consequence, in practice we use an
/// upper bound on the number of repetitions to limit things.
class DevirtSCCRepeatedPass : public PassInfoMixin<DevirtSCCRepeatedPass> {
public:
  using PassConceptT =
      detail::PassConcept<LazyCallGraph::SCC, CGSCCAnalysisManager,
                          LazyCallGraph &, CGSCCUpdateResult &>;

  explicit DevirtSCCRepeatedPass(std::unique_ptr<PassConceptT> Pass,
                                 int MaxIterations)
      : Pass(std::move(Pass)), MaxIterations(MaxIterations) {}

  /// Runs the wrapped pass up to \c MaxIterations on the SCC, iterating
  /// whenever an indirect call is refined.
  PreservedAnalyses run(LazyCallGraph::SCC &InitialC, CGSCCAnalysisManager &AM,
                        LazyCallGraph &CG, CGSCCUpdateResult &UR);

  void printPipeline(raw_ostream &OS,
                     function_ref<StringRef(StringRef)> MapClassName2PassName) {
    OS << "devirt<" << MaxIterations << ">(";
    Pass->printPipeline(OS, MapClassName2PassName);
    OS << ')';
  }

private:
  std::unique_ptr<PassConceptT> Pass;
  int MaxIterations;
};

/// A function to deduce a function pass type and wrap it in the
/// templated adaptor.
template <typename CGSCCPassT>
DevirtSCCRepeatedPass createDevirtSCCRepeatedPass(CGSCCPassT &&Pass,
                                                  int MaxIterations) {
  using PassModelT = detail::PassModel<LazyCallGraph::SCC, CGSCCPassT,
                                       PreservedAnalyses, CGSCCAnalysisManager,
                                       LazyCallGraph &, CGSCCUpdateResult &>;
  // Do not use make_unique, it causes too many template instantiations,
  // causing terrible compile times.
  return DevirtSCCRepeatedPass(
      std::unique_ptr<DevirtSCCRepeatedPass::PassConceptT>(
          new PassModelT(std::forward<CGSCCPassT>(Pass))),
      MaxIterations);
}

// Clear out the debug logging macro.
#undef DEBUG_TYPE

} // end namespace llvm

#endif // LLVM_ANALYSIS_CGSCCPASSMANAGER_H
PKiwFZ	����Analysis/IVUsers.hnu�[���//===- llvm/Analysis/IVUsers.h - Induction Variable Users -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements bookkeeping for "interesting" users of expressions
// computed from induction variables.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_IVUSERS_H
#define LLVM_ANALYSIS_IVUSERS_H

#include "llvm/Analysis/LoopAnalysisManager.h"
#include "llvm/Analysis/LoopPass.h"
#include "llvm/Analysis/ScalarEvolutionNormalization.h"
#include "llvm/IR/ValueHandle.h"

namespace llvm {

class AssumptionCache;
class DominatorTree;
class ScalarEvolution;
class SCEV;
class IVUsers;

/// IVStrideUse - Keep track of one use of a strided induction variable.
/// The Expr member keeps track of the expression, User is the actual user
/// instruction of the operand, and 'OperandValToReplace' is the operand of
/// the User that is the use.
class IVStrideUse final : public CallbackVH, public ilist_node<IVStrideUse> {
  friend class IVUsers;
public:
  IVStrideUse(IVUsers *P, Instruction* U, Value *O)
    : CallbackVH(U), Parent(P), OperandValToReplace(O) {
  }

  /// getUser - Return the user instruction for this use.
  Instruction *getUser() const {
    return cast<Instruction>(getValPtr());
  }

  /// setUser - Assign a new user instruction for this use.
  void setUser(Instruction *NewUser) {
    setValPtr(NewUser);
  }

  /// getOperandValToReplace - Return the Value of the operand in the user
  /// instruction that this IVStrideUse is representing.
  Value *getOperandValToReplace() const {
    return OperandValToReplace;
  }

  /// setOperandValToReplace - Assign a new Value as the operand value
  /// to replace.
  void setOperandValToReplace(Value *Op) {
    OperandValToReplace = Op;
  }

  /// getPostIncLoops - Return the set of loops for which the expression has
  /// been adjusted to use post-inc mode.
  const PostIncLoopSet &getPostIncLoops() const {
    return PostIncLoops;
  }

  /// transformToPostInc - Transform the expression to post-inc form for the
  /// given loop.
  void transformToPostInc(const Loop *L);

private:
  /// Parent - a pointer to the IVUsers that owns this IVStrideUse.
  IVUsers *Parent;

  /// OperandValToReplace - The Value of the operand in the user instruction
  /// that this IVStrideUse is representing.
  WeakTrackingVH OperandValToReplace;

  /// PostIncLoops - The set of loops for which Expr has been adjusted to
  /// use post-inc mode. This corresponds with SCEVExpander's post-inc concept.
  PostIncLoopSet PostIncLoops;

  /// Deleted - Implementation of CallbackVH virtual function to
  /// receive notification when the User is deleted.
  void deleted() override;
};

class IVUsers {
  friend class IVStrideUse;
  Loop *L;
  AssumptionCache *AC;
  LoopInfo *LI;
  DominatorTree *DT;
  ScalarEvolution *SE;
  SmallPtrSet<Instruction*, 16> Processed;

  /// IVUses - A list of all tracked IV uses of induction variable expressions
  /// we are interested in.
  ilist<IVStrideUse> IVUses;

  // Ephemeral values used by @llvm.assume in this function.
  SmallPtrSet<const Value *, 32> EphValues;

public:
  IVUsers(Loop *L, AssumptionCache *AC, LoopInfo *LI, DominatorTree *DT,
          ScalarEvolution *SE);

  IVUsers(IVUsers &&X)
      : L(std::move(X.L)), AC(std::move(X.AC)), DT(std::move(X.DT)),
        SE(std::move(X.SE)), Processed(std::move(X.Processed)),
        IVUses(std::move(X.IVUses)), EphValues(std::move(X.EphValues)) {
    for (IVStrideUse &U : IVUses)
      U.Parent = this;
  }
  IVUsers(const IVUsers &) = delete;
  IVUsers &operator=(IVUsers &&) = delete;
  IVUsers &operator=(const IVUsers &) = delete;

  Loop *getLoop() const { return L; }

  /// AddUsersIfInteresting - Inspect the specified Instruction.  If it is a
  /// reducible SCEV, recursively add its users to the IVUsesByStride set and
  /// return true.  Otherwise, return false.
  bool AddUsersIfInteresting(Instruction *I);

  IVStrideUse &AddUser(Instruction *User, Value *Operand);

  /// getReplacementExpr - Return a SCEV expression which computes the
  /// value of the OperandValToReplace of the given IVStrideUse.
  const SCEV *getReplacementExpr(const IVStrideUse &IU) const;

  /// getExpr - Return the expression for the use. Returns nullptr if the result
  /// is not invertible.
  const SCEV *getExpr(const IVStrideUse &IU) const;

  const SCEV *getStride(const IVStrideUse &IU, const Loop *L) const;

  typedef ilist<IVStrideUse>::iterator iterator;
  typedef ilist<IVStrideUse>::const_iterator const_iterator;
  iterator begin() { return IVUses.begin(); }
  iterator end()   { return IVUses.end(); }
  const_iterator begin() const { return IVUses.begin(); }
  const_iterator end() const   { return IVUses.end(); }
  bool empty() const { return IVUses.empty(); }

  bool isIVUserOrOperand(Instruction *Inst) const {
    return Processed.count(Inst);
  }

  void releaseMemory();

  void print(raw_ostream &OS, const Module * = nullptr) const;

  /// dump - This method is used for debugging.
  void dump() const;
};

Pass *createIVUsersPass();

class IVUsersWrapperPass : public LoopPass {
  std::unique_ptr<IVUsers> IU;

public:
  static char ID;

  IVUsersWrapperPass();

  IVUsers &getIU() { return *IU; }
  const IVUsers &getIU() const { return *IU; }

  void getAnalysisUsage(AnalysisUsage &AU) const override;

  bool runOnLoop(Loop *L, LPPassManager &LPM) override;

  void releaseMemory() override;

  void print(raw_ostream &OS, const Module * = nullptr) const override;
};

/// Analysis pass that exposes the \c IVUsers for a loop.
class IVUsersAnalysis : public AnalysisInfoMixin<IVUsersAnalysis> {
  friend AnalysisInfoMixin<IVUsersAnalysis>;
  static AnalysisKey Key;

public:
  typedef IVUsers Result;

  IVUsers run(Loop &L, LoopAnalysisManager &AM,
              LoopStandardAnalysisResults &AR);
};

}

#endif
PKiwFZ��DFFAnalysis/PHITransAddr.hnu�[���//===- PHITransAddr.h - PHI Translation for Addresses -----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the PHITransAddr class.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_PHITRANSADDR_H
#define LLVM_ANALYSIS_PHITRANSADDR_H

#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/Instruction.h"

namespace llvm {
class AssumptionCache;
class DominatorTree;
class DataLayout;
class TargetLibraryInfo;

/// PHITransAddr - An address value which tracks and handles phi translation.
/// As we walk "up" the CFG through predecessors, we need to ensure that the
/// address we're tracking is kept up to date.  For example, if we're analyzing
/// an address of "&A[i]" and walk through the definition of 'i' which is a PHI
/// node, we *must* phi translate i to get "&A[j]" or else we will analyze an
/// incorrect pointer in the predecessor block.
///
/// This is designed to be a relatively small object that lives on the stack and
/// is copyable.
///
class PHITransAddr {
  /// Addr - The actual address we're analyzing.
  Value *Addr;

  /// The DataLayout we are playing with.
  const DataLayout &DL;

  /// TLI - The target library info if known, otherwise null.
  const TargetLibraryInfo *TLI = nullptr;

  /// A cache of \@llvm.assume calls used by SimplifyInstruction.
  AssumptionCache *AC;

  /// InstInputs - The inputs for our symbolic address.
  SmallVector<Instruction*, 4> InstInputs;

public:
  PHITransAddr(Value *Addr, const DataLayout &DL, AssumptionCache *AC)
      : Addr(Addr), DL(DL), AC(AC) {
    // If the address is an instruction, the whole thing is considered an input.
    addAsInput(Addr);
  }

  Value *getAddr() const { return Addr; }

  /// needsPHITranslationFromBlock - Return true if moving from the specified
  /// BasicBlock to its predecessors requires PHI translation.
  bool needsPHITranslationFromBlock(BasicBlock *BB) const {
    // We do need translation if one of our input instructions is defined in
    // this block.
    return any_of(InstInputs, [BB](const auto &InstInput) {
      return InstInput->getParent() == BB;
    });
  }

  /// isPotentiallyPHITranslatable - If this needs PHI translation, return true
  /// if we have some hope of doing it.  This should be used as a filter to
  /// avoid calling PHITranslateValue in hopeless situations.
  bool isPotentiallyPHITranslatable() const;

  /// translateValue - PHI translate the current address up the CFG from
  /// CurBB to Pred, updating our state to reflect any needed changes.  If
  /// 'MustDominate' is true, the translated value must dominate PredBB.
  Value *translateValue(BasicBlock *CurBB, BasicBlock *PredBB,
                        const DominatorTree *DT, bool MustDominate);

  /// translateWithInsertion - PHI translate this value into the specified
  /// predecessor block, inserting a computation of the value if it is
  /// unavailable.
  ///
  /// All newly created instructions are added to the NewInsts list.  This
  /// returns null on failure.
  ///
  Value *translateWithInsertion(BasicBlock *CurBB, BasicBlock *PredBB,
                                const DominatorTree &DT,
                                SmallVectorImpl<Instruction *> &NewInsts);

  void dump() const;

  /// verify - Check internal consistency of this data structure.  If the
  /// structure is valid, it returns true.  If invalid, it prints errors and
  /// returns false.
  bool verify() const;

private:
  Value *translateSubExpr(Value *V, BasicBlock *CurBB, BasicBlock *PredBB,
                          const DominatorTree *DT);

  /// insertTranslatedSubExpr - Insert a computation of the PHI translated
  /// version of 'V' for the edge PredBB->CurBB into the end of the PredBB
  /// block.  All newly created instructions are added to the NewInsts list.
  /// This returns null on failure.
  ///
  Value *insertTranslatedSubExpr(Value *InVal, BasicBlock *CurBB,
                                 BasicBlock *PredBB, const DominatorTree &DT,
                                 SmallVectorImpl<Instruction *> &NewInsts);

  /// addAsInput - If the specified value is an instruction, add it as an input.
  Value *addAsInput(Value *V) {
    // If V is an instruction, it is now an input.
    if (Instruction *VI = dyn_cast<Instruction>(V))
      InstInputs.push_back(VI);
    return V;
  }
};

} // end namespace llvm

#endif
PKiwFZB&�8rrAnalysis/DominanceFrontier.hnu�[���//===- llvm/Analysis/DominanceFrontier.h - Dominator Frontiers --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the DominanceFrontier class, which calculate and holds the
// dominance frontier for a function.
//
// This should be considered deprecated, don't add any more uses of this data
// structure.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_DOMINANCEFRONTIER_H
#define LLVM_ANALYSIS_DOMINANCEFRONTIER_H

#include "llvm/ADT/GraphTraits.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Pass.h"
#include "llvm/Support/GenericDomTree.h"
#include <cassert>
#include <map>
#include <set>
#include <utility>

namespace llvm {

class Function;
class raw_ostream;

//===----------------------------------------------------------------------===//
/// DominanceFrontierBase - Common base class for computing forward and inverse
/// dominance frontiers for a function.
///
template <class BlockT, bool IsPostDom>
class DominanceFrontierBase {
public:
  using DomSetType = std::set<BlockT *>;                // Dom set for a bb
  using DomSetMapType = std::map<BlockT *, DomSetType>; // Dom set map

protected:
  using BlockTraits = GraphTraits<BlockT *>;

  DomSetMapType Frontiers;
  // Postdominators can have multiple roots.
  SmallVector<BlockT *, IsPostDom ? 4 : 1> Roots;
  static constexpr bool IsPostDominators = IsPostDom;

public:
  DominanceFrontierBase() = default;

  /// getRoots - Return the root blocks of the current CFG.  This may include
  /// multiple blocks if we are computing post dominators.  For forward
  /// dominators, this will always be a single block (the entry node).
  const SmallVectorImpl<BlockT *> &getRoots() const { return Roots; }

  BlockT *getRoot() const {
    assert(Roots.size() == 1 && "Should always have entry node!");
    return Roots[0];
  }

  /// isPostDominator - Returns true if analysis based of postdoms
  bool isPostDominator() const {
    return IsPostDominators;
  }

  void releaseMemory() {
    Frontiers.clear();
  }

  // Accessor interface:
  using iterator = typename DomSetMapType::iterator;
  using const_iterator = typename DomSetMapType::const_iterator;

  iterator begin() { return Frontiers.begin(); }
  const_iterator begin() const { return Frontiers.begin(); }
  iterator end() { return Frontiers.end(); }
  const_iterator end() const { return Frontiers.end(); }
  iterator find(BlockT *B) { return Frontiers.find(B); }
  const_iterator find(BlockT *B) const { return Frontiers.find(B); }

  iterator addBasicBlock(BlockT *BB, const DomSetType &frontier) {
    assert(find(BB) == end() && "Block already in DominanceFrontier!");
    return Frontiers.insert(std::make_pair(BB, frontier)).first;
  }

  /// removeBlock - Remove basic block BB's frontier.
  void removeBlock(BlockT *BB);

  void addToFrontier(iterator I, BlockT *Node);

  void removeFromFrontier(iterator I, BlockT *Node);

  /// compareDomSet - Return false if two domsets match. Otherwise
  /// return true;
  bool compareDomSet(DomSetType &DS1, const DomSetType &DS2) const;

  /// compare - Return true if the other dominance frontier base matches
  /// this dominance frontier base. Otherwise return false.
  bool compare(DominanceFrontierBase &Other) const;

  /// print - Convert to human readable form
  ///
  void print(raw_ostream &OS) const;

  /// dump - Dump the dominance frontier to dbgs().
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
  void dump() const;
#endif
};

//===-------------------------------------
/// DominanceFrontier Class - Concrete subclass of DominanceFrontierBase that is
/// used to compute a forward dominator frontiers.
///
template <class BlockT>
class ForwardDominanceFrontierBase
    : public DominanceFrontierBase<BlockT, false> {
private:
  using BlockTraits = GraphTraits<BlockT *>;

public:
  using DomTreeT = DomTreeBase<BlockT>;
  using DomTreeNodeT = DomTreeNodeBase<BlockT>;
  using DomSetType = typename DominanceFrontierBase<BlockT, false>::DomSetType;

  void analyze(DomTreeT &DT) {
    assert(DT.root_size() == 1 &&
           "Only one entry block for forward domfronts!");
    this->Roots = {DT.getRoot()};
    calculate(DT, DT[this->Roots[0]]);
  }

  const DomSetType &calculate(const DomTreeT &DT, const DomTreeNodeT *Node);
};

class DominanceFrontier : public ForwardDominanceFrontierBase<BasicBlock> {
public:
  using DomTreeT = DomTreeBase<BasicBlock>;
  using DomTreeNodeT = DomTreeNodeBase<BasicBlock>;
  using DomSetType = DominanceFrontierBase<BasicBlock, false>::DomSetType;
  using iterator = DominanceFrontierBase<BasicBlock, false>::iterator;
  using const_iterator =
      DominanceFrontierBase<BasicBlock, false>::const_iterator;

  /// Handle invalidation explicitly.
  bool invalidate(Function &F, const PreservedAnalyses &PA,
                  FunctionAnalysisManager::Invalidator &);
};

class DominanceFrontierWrapperPass : public FunctionPass {
  DominanceFrontier DF;

public:
  static char ID; // Pass ID, replacement for typeid

  DominanceFrontierWrapperPass();

  DominanceFrontier &getDominanceFrontier() { return DF; }
  const DominanceFrontier &getDominanceFrontier() const { return DF;  }

  void releaseMemory() override;

  bool runOnFunction(Function &) override;

  void getAnalysisUsage(AnalysisUsage &AU) const override;

  void print(raw_ostream &OS, const Module * = nullptr) const override;

  void dump() const;
};

extern template class DominanceFrontierBase<BasicBlock, false>;
extern template class DominanceFrontierBase<BasicBlock, true>;
extern template class ForwardDominanceFrontierBase<BasicBlock>;

/// Analysis pass which computes a \c DominanceFrontier.
class DominanceFrontierAnalysis
    : public AnalysisInfoMixin<DominanceFrontierAnalysis> {
  friend AnalysisInfoMixin<DominanceFrontierAnalysis>;

  static AnalysisKey Key;

public:
  /// Provide the result type for this analysis pass.
  using Result = DominanceFrontier;

  /// Run the analysis pass over a function and produce a dominator tree.
  DominanceFrontier run(Function &F, FunctionAnalysisManager &AM);
};

/// Printer pass for the \c DominanceFrontier.
class DominanceFrontierPrinterPass
    : public PassInfoMixin<DominanceFrontierPrinterPass> {
  raw_ostream &OS;

public:
  explicit DominanceFrontierPrinterPass(raw_ostream &OS);

  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

} // end namespace llvm

#endif // LLVM_ANALYSIS_DOMINANCEFRONTIER_H
PKiwFZzA���$Analysis/LazyBranchProbabilityInfo.hnu�[���//===- LazyBranchProbabilityInfo.h - Lazy Branch Probability ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This is an alternative analysis pass to BranchProbabilityInfoWrapperPass.
// The difference is that with this pass the branch probabilities are not
// computed when the analysis pass is executed but rather when the BPI results
// is explicitly requested by the analysis client.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_LAZYBRANCHPROBABILITYINFO_H
#define LLVM_ANALYSIS_LAZYBRANCHPROBABILITYINFO_H

#include "llvm/Analysis/BranchProbabilityInfo.h"
#include "llvm/Pass.h"

namespace llvm {
class AnalysisUsage;
class Function;
class LoopInfo;
class TargetLibraryInfo;

/// This is an alternative analysis pass to
/// BranchProbabilityInfoWrapperPass.  The difference is that with this pass the
/// branch probabilities are not computed when the analysis pass is executed but
/// rather when the BPI results is explicitly requested by the analysis client.
///
/// There are some additional requirements for any client pass that wants to use
/// the analysis:
///
/// 1. The pass needs to initialize dependent passes with:
///
///   INITIALIZE_PASS_DEPENDENCY(LazyBPIPass)
///
/// 2. Similarly, getAnalysisUsage should call:
///
///   LazyBranchProbabilityInfoPass::getLazyBPIAnalysisUsage(AU)
///
/// 3. The computed BPI should be requested with
///    getAnalysis<LazyBranchProbabilityInfoPass>().getBPI() before LoopInfo
///    could be invalidated for example by changing the CFG.
///
/// Note that it is expected that we wouldn't need this functionality for the
/// new PM since with the new PM, analyses are executed on demand.
class LazyBranchProbabilityInfoPass : public FunctionPass {

  /// Wraps a BPI to allow lazy computation of the branch probabilities.
  ///
  /// A pass that only conditionally uses BPI can uncondtionally require the
  /// analysis without paying for the overhead if BPI doesn't end up being used.
  class LazyBranchProbabilityInfo {
  public:
    LazyBranchProbabilityInfo(const Function *F, const LoopInfo *LI,
                              const TargetLibraryInfo *TLI)
        : F(F), LI(LI), TLI(TLI) {}

    /// Retrieve the BPI with the branch probabilities computed.
    BranchProbabilityInfo &getCalculated() {
      if (!Calculated) {
        assert(F && LI && "call setAnalysis");
        BPI.calculate(*F, *LI, TLI, nullptr, nullptr);
        Calculated = true;
      }
      return BPI;
    }

    const BranchProbabilityInfo &getCalculated() const {
      return const_cast<LazyBranchProbabilityInfo *>(this)->getCalculated();
    }

  private:
    BranchProbabilityInfo BPI;
    bool Calculated = false;
    const Function *F;
    const LoopInfo *LI;
    const TargetLibraryInfo *TLI;
  };

  std::unique_ptr<LazyBranchProbabilityInfo> LBPI;

public:
  static char ID;

  LazyBranchProbabilityInfoPass();

  /// Compute and return the branch probabilities.
  BranchProbabilityInfo &getBPI() { return LBPI->getCalculated(); }

  /// Compute and return the branch probabilities.
  const BranchProbabilityInfo &getBPI() const { return LBPI->getCalculated(); }

  void getAnalysisUsage(AnalysisUsage &AU) const override;

  /// Helper for client passes to set up the analysis usage on behalf of this
  /// pass.
  static void getLazyBPIAnalysisUsage(AnalysisUsage &AU);

  bool runOnFunction(Function &F) override;
  void releaseMemory() override;
  void print(raw_ostream &OS, const Module *M) const override;
};

/// Helper for client passes to initialize dependent passes for LBPI.
void initializeLazyBPIPassPass(PassRegistry &Registry);

/// Simple trait class that provides a mapping between BPI passes and the
/// corresponding BPInfo.
template <typename PassT> struct BPIPassTrait {
  static PassT &getBPI(PassT *P) { return *P; }
};

template <> struct BPIPassTrait<LazyBranchProbabilityInfoPass> {
  static BranchProbabilityInfo &getBPI(LazyBranchProbabilityInfoPass *P) {
    return P->getBPI();
  }
};
}
#endif
PKiwFZ���00Analysis/BlockFrequencyInfo.hnu�[���//===- BlockFrequencyInfo.h - Block Frequency Analysis ----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Loops should be simplified before this analysis.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_BLOCKFREQUENCYINFO_H
#define LLVM_ANALYSIS_BLOCKFREQUENCYINFO_H

#include "llvm/IR/PassManager.h"
#include "llvm/Pass.h"
#include "llvm/Support/BlockFrequency.h"
#include <cstdint>
#include <memory>
#include <optional>

namespace llvm {

class BasicBlock;
class BranchProbabilityInfo;
class Function;
class LoopInfo;
class Module;
class raw_ostream;
template <class BlockT> class BlockFrequencyInfoImpl;

enum PGOViewCountsType { PGOVCT_None, PGOVCT_Graph, PGOVCT_Text };

/// BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to
/// estimate IR basic block frequencies.
class BlockFrequencyInfo {
  using ImplType = BlockFrequencyInfoImpl<BasicBlock>;

  std::unique_ptr<ImplType> BFI;

public:
  BlockFrequencyInfo();
  BlockFrequencyInfo(const Function &F, const BranchProbabilityInfo &BPI,
                     const LoopInfo &LI);
  BlockFrequencyInfo(const BlockFrequencyInfo &) = delete;
  BlockFrequencyInfo &operator=(const BlockFrequencyInfo &) = delete;
  BlockFrequencyInfo(BlockFrequencyInfo &&Arg);
  BlockFrequencyInfo &operator=(BlockFrequencyInfo &&RHS);
  ~BlockFrequencyInfo();

  /// Handle invalidation explicitly.
  bool invalidate(Function &F, const PreservedAnalyses &PA,
                  FunctionAnalysisManager::Invalidator &);

  const Function *getFunction() const;
  const BranchProbabilityInfo *getBPI() const;
  void view(StringRef = "BlockFrequencyDAGs") const;

  /// getblockFreq - Return block frequency. Return 0 if we don't have the
  /// information. Please note that initial frequency is equal to ENTRY_FREQ. It
  /// means that we should not rely on the value itself, but only on the
  /// comparison to the other block frequencies. We do this to avoid using of
  /// floating points.
  BlockFrequency getBlockFreq(const BasicBlock *BB) const;

  /// Returns the estimated profile count of \p BB.
  /// This computes the relative block frequency of \p BB and multiplies it by
  /// the enclosing function's count (if available) and returns the value.
  std::optional<uint64_t>
  getBlockProfileCount(const BasicBlock *BB, bool AllowSynthetic = false) const;

  /// Returns the estimated profile count of \p Freq.
  /// This uses the frequency \p Freq and multiplies it by
  /// the enclosing function's count (if available) and returns the value.
  std::optional<uint64_t> getProfileCountFromFreq(uint64_t Freq) const;

  /// Returns true if \p BB is an irreducible loop header
  /// block. Otherwise false.
  bool isIrrLoopHeader(const BasicBlock *BB);

  // Set the frequency of the given basic block.
  void setBlockFreq(const BasicBlock *BB, uint64_t Freq);

  /// Set the frequency of \p ReferenceBB to \p Freq and scale the frequencies
  /// of the blocks in \p BlocksToScale such that their frequencies relative
  /// to \p ReferenceBB remain unchanged.
  void setBlockFreqAndScale(const BasicBlock *ReferenceBB, uint64_t Freq,
                            SmallPtrSetImpl<BasicBlock *> &BlocksToScale);

  /// calculate - compute block frequency info for the given function.
  void calculate(const Function &F, const BranchProbabilityInfo &BPI,
                 const LoopInfo &LI);

  // Print the block frequency Freq to OS using the current functions entry
  // frequency to convert freq into a relative decimal form.
  raw_ostream &printBlockFreq(raw_ostream &OS, const BlockFrequency Freq) const;

  // Convenience method that attempts to look up the frequency associated with
  // BB and print it to OS.
  raw_ostream &printBlockFreq(raw_ostream &OS, const BasicBlock *BB) const;

  uint64_t getEntryFreq() const;
  void releaseMemory();
  void print(raw_ostream &OS) const;

  // Compare to the other BFI and verify they match.
  void verifyMatch(BlockFrequencyInfo &Other) const;
};

/// Analysis pass which computes \c BlockFrequencyInfo.
class BlockFrequencyAnalysis
    : public AnalysisInfoMixin<BlockFrequencyAnalysis> {
  friend AnalysisInfoMixin<BlockFrequencyAnalysis>;

  static AnalysisKey Key;

public:
  /// Provide the result type for this analysis pass.
  using Result = BlockFrequencyInfo;

  /// Run the analysis pass over a function and produce BFI.
  Result run(Function &F, FunctionAnalysisManager &AM);
};

/// Printer pass for the \c BlockFrequencyInfo results.
class BlockFrequencyPrinterPass
    : public PassInfoMixin<BlockFrequencyPrinterPass> {
  raw_ostream &OS;

public:
  explicit BlockFrequencyPrinterPass(raw_ostream &OS) : OS(OS) {}

  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

/// Legacy analysis pass which computes \c BlockFrequencyInfo.
class BlockFrequencyInfoWrapperPass : public FunctionPass {
  BlockFrequencyInfo BFI;

public:
  static char ID;

  BlockFrequencyInfoWrapperPass();
  ~BlockFrequencyInfoWrapperPass() override;

  BlockFrequencyInfo &getBFI() { return BFI; }
  const BlockFrequencyInfo &getBFI() const { return BFI; }

  void getAnalysisUsage(AnalysisUsage &AU) const override;

  bool runOnFunction(Function &F) override;
  void releaseMemory() override;
  void print(raw_ostream &OS, const Module *M) const override;
};

} // end namespace llvm

#endif // LLVM_ANALYSIS_BLOCKFREQUENCYINFO_H
PKiwFZKf��**!Analysis/LazyBlockFrequencyInfo.hnu�[���//===- LazyBlockFrequencyInfo.h - Lazy Block Frequency Analysis -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This is an alternative analysis pass to BlockFrequencyInfoWrapperPass.  The
// difference is that with this pass the block frequencies are not computed when
// the analysis pass is executed but rather when the BFI result is explicitly
// requested by the analysis client.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_LAZYBLOCKFREQUENCYINFO_H
#define LLVM_ANALYSIS_LAZYBLOCKFREQUENCYINFO_H

#include "llvm/Analysis/BlockFrequencyInfo.h"
#include "llvm/Analysis/LazyBranchProbabilityInfo.h"
#include "llvm/Pass.h"

namespace llvm {
class AnalysisUsage;
class Function;
class LoopInfo;

/// Wraps a BFI to allow lazy computation of the block frequencies.
///
/// A pass that only conditionally uses BFI can uncondtionally require the
/// analysis without paying for the overhead if BFI doesn't end up being used.
template <typename FunctionT, typename BranchProbabilityInfoPassT,
          typename LoopInfoT, typename BlockFrequencyInfoT>
class LazyBlockFrequencyInfo {
public:
  LazyBlockFrequencyInfo() = default;

  /// Set up the per-function input.
  void setAnalysis(const FunctionT *F, BranchProbabilityInfoPassT *BPIPass,
                   const LoopInfoT *LI) {
    this->F = F;
    this->BPIPass = BPIPass;
    this->LI = LI;
  }

  /// Retrieve the BFI with the block frequencies computed.
  BlockFrequencyInfoT &getCalculated() {
    if (!Calculated) {
      assert(F && BPIPass && LI && "call setAnalysis");
      BFI.calculate(
          *F, BPIPassTrait<BranchProbabilityInfoPassT>::getBPI(BPIPass), *LI);
      Calculated = true;
    }
    return BFI;
  }

  const BlockFrequencyInfoT &getCalculated() const {
    return const_cast<LazyBlockFrequencyInfo *>(this)->getCalculated();
  }

  void releaseMemory() {
    BFI.releaseMemory();
    Calculated = false;
    setAnalysis(nullptr, nullptr, nullptr);
  }

private:
  BlockFrequencyInfoT BFI;
  bool Calculated = false;
  const FunctionT *F = nullptr;
  BranchProbabilityInfoPassT *BPIPass = nullptr;
  const LoopInfoT *LI = nullptr;
};

/// This is an alternative analysis pass to
/// BlockFrequencyInfoWrapperPass.  The difference is that with this pass the
/// block frequencies are not computed when the analysis pass is executed but
/// rather when the BFI result is explicitly requested by the analysis client.
///
/// There are some additional requirements for any client pass that wants to use
/// the analysis:
///
/// 1. The pass needs to initialize dependent passes with:
///
///   INITIALIZE_PASS_DEPENDENCY(LazyBFIPass)
///
/// 2. Similarly, getAnalysisUsage should call:
///
///   LazyBlockFrequencyInfoPass::getLazyBFIAnalysisUsage(AU)
///
/// 3. The computed BFI should be requested with
///    getAnalysis<LazyBlockFrequencyInfoPass>().getBFI() before either LoopInfo
///    or BPI could be invalidated for example by changing the CFG.
///
/// Note that it is expected that we wouldn't need this functionality for the
/// new PM since with the new PM, analyses are executed on demand.

class LazyBlockFrequencyInfoPass : public FunctionPass {
private:
  LazyBlockFrequencyInfo<Function, LazyBranchProbabilityInfoPass, LoopInfo,
                         BlockFrequencyInfo>
      LBFI;

public:
  static char ID;

  LazyBlockFrequencyInfoPass();

  /// Compute and return the block frequencies.
  BlockFrequencyInfo &getBFI() { return LBFI.getCalculated(); }

  /// Compute and return the block frequencies.
  const BlockFrequencyInfo &getBFI() const { return LBFI.getCalculated(); }

  void getAnalysisUsage(AnalysisUsage &AU) const override;

  /// Helper for client passes to set up the analysis usage on behalf of this
  /// pass.
  static void getLazyBFIAnalysisUsage(AnalysisUsage &AU);

  bool runOnFunction(Function &F) override;
  void releaseMemory() override;
  void print(raw_ostream &OS, const Module *M) const override;
};

/// Helper for client passes to initialize dependent passes for LBFI.
void initializeLazyBFIPassPass(PassRegistry &Registry);
}
#endif
PKiwFZ�q��Analysis/MLInlineAdvisor.hnu�[���//===- MLInlineAdvisor.h - ML - based InlineAdvisor factories ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_MLINLINEADVISOR_H
#define LLVM_ANALYSIS_MLINLINEADVISOR_H

#include "llvm/Analysis/FunctionPropertiesAnalysis.h"
#include "llvm/Analysis/InlineAdvisor.h"
#include "llvm/Analysis/LazyCallGraph.h"
#include "llvm/Analysis/MLModelRunner.h"
#include "llvm/IR/PassManager.h"

#include <deque>
#include <map>
#include <memory>
#include <optional>

namespace llvm {
class DiagnosticInfoOptimizationBase;
class Module;
class MLInlineAdvice;

class MLInlineAdvisor : public InlineAdvisor {
public:
  MLInlineAdvisor(Module &M, ModuleAnalysisManager &MAM,
                  std::unique_ptr<MLModelRunner> ModelRunner,
                  std::function<bool(CallBase &)> GetDefaultAdvice);

  virtual ~MLInlineAdvisor() = default;

  void onPassEntry(LazyCallGraph::SCC *SCC) override;
  void onPassExit(LazyCallGraph::SCC *SCC) override;

  int64_t getIRSize(Function &F) const {
    return getCachedFPI(F).TotalInstructionCount;
  }
  void onSuccessfulInlining(const MLInlineAdvice &Advice,
                            bool CalleeWasDeleted);

  bool isForcedToStop() const { return ForceStop; }
  int64_t getLocalCalls(Function &F);
  const MLModelRunner &getModelRunner() const { return *ModelRunner.get(); }
  FunctionPropertiesInfo &getCachedFPI(Function &) const;

protected:
  std::unique_ptr<InlineAdvice> getAdviceImpl(CallBase &CB) override;

  std::unique_ptr<InlineAdvice> getMandatoryAdvice(CallBase &CB,
                                                   bool Advice) override;

  virtual std::unique_ptr<MLInlineAdvice> getMandatoryAdviceImpl(CallBase &CB);

  virtual std::unique_ptr<MLInlineAdvice>
  getAdviceFromModel(CallBase &CB, OptimizationRemarkEmitter &ORE);

  // Get the initial 'level' of the function, or 0 if the function has been
  // introduced afterwards.
  // TODO: should we keep this updated?
  unsigned getInitialFunctionLevel(const Function &F) const;

  std::unique_ptr<MLModelRunner> ModelRunner;
  std::function<bool(CallBase &)> GetDefaultAdvice;

private:
  int64_t getModuleIRSize() const;
  std::unique_ptr<InlineAdvice>
  getSkipAdviceIfUnreachableCallsite(CallBase &CB);
  void print(raw_ostream &OS) const override;

  // Using std::map to benefit from its iterator / reference non-invalidating
  // semantics, which make it easy to use `getCachedFPI` results from multiple
  // calls without needing to copy to avoid invalidation effects.
  mutable std::map<const Function *, FunctionPropertiesInfo> FPICache;

  LazyCallGraph &CG;

  int64_t NodeCount = 0;
  int64_t EdgeCount = 0;
  int64_t EdgesOfLastSeenNodes = 0;

  std::map<const LazyCallGraph::Node *, unsigned> FunctionLevels;
  const int32_t InitialIRSize = 0;
  int32_t CurrentIRSize = 0;
  llvm::SmallPtrSet<const LazyCallGraph::Node *, 1> NodesInLastSCC;
  DenseSet<const LazyCallGraph::Node *> AllNodes;
  bool ForceStop = false;
};

/// InlineAdvice that tracks changes post inlining. For that reason, it only
/// overrides the "successful inlining" extension points.
class MLInlineAdvice : public InlineAdvice {
public:
  MLInlineAdvice(MLInlineAdvisor *Advisor, CallBase &CB,
                 OptimizationRemarkEmitter &ORE, bool Recommendation);
  virtual ~MLInlineAdvice() = default;

  void recordInliningImpl() override;
  void recordInliningWithCalleeDeletedImpl() override;
  void recordUnsuccessfulInliningImpl(const InlineResult &Result) override;
  void recordUnattemptedInliningImpl() override;

  Function *getCaller() const { return Caller; }
  Function *getCallee() const { return Callee; }

  const int64_t CallerIRSize;
  const int64_t CalleeIRSize;
  const int64_t CallerAndCalleeEdges;
  void updateCachedCallerFPI(FunctionAnalysisManager &FAM) const;

private:
  void reportContextForRemark(DiagnosticInfoOptimizationBase &OR);
  MLInlineAdvisor *getAdvisor() const {
    return static_cast<MLInlineAdvisor *>(Advisor);
  };
  // Make a copy of the FPI of the caller right before inlining. If inlining
  // fails, we can just update the cache with that value.
  const FunctionPropertiesInfo PreInlineCallerFPI;
  std::optional<FunctionPropertiesUpdater> FPU;
};

} // namespace llvm

#endif // LLVM_ANALYSIS_MLINLINEADVISOR_H
PKiwFZ�V��
�
"Analysis/ScalarEvolutionDivision.hnu�[���//===- llvm/Analysis/ScalarEvolutionDivision.h - See below ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the class that knows how to divide SCEV's.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_SCALAREVOLUTIONDIVISION_H
#define LLVM_ANALYSIS_SCALAREVOLUTIONDIVISION_H

#include "llvm/Analysis/ScalarEvolutionExpressions.h"

namespace llvm {

class SCEV;

class ScalarEvolution;

struct SCEVCouldNotCompute;

struct SCEVDivision : public SCEVVisitor<SCEVDivision, void> {
public:
  // Computes the Quotient and Remainder of the division of Numerator by
  // Denominator.
  static void divide(ScalarEvolution &SE, const SCEV *Numerator,
                     const SCEV *Denominator, const SCEV **Quotient,
                     const SCEV **Remainder);

  // Except in the trivial case described above, we do not know how to divide
  // Expr by Denominator for the following functions with empty implementation.
  void visitPtrToIntExpr(const SCEVPtrToIntExpr *Numerator) {}
  void visitTruncateExpr(const SCEVTruncateExpr *Numerator) {}
  void visitZeroExtendExpr(const SCEVZeroExtendExpr *Numerator) {}
  void visitSignExtendExpr(const SCEVSignExtendExpr *Numerator) {}
  void visitUDivExpr(const SCEVUDivExpr *Numerator) {}
  void visitSMaxExpr(const SCEVSMaxExpr *Numerator) {}
  void visitUMaxExpr(const SCEVUMaxExpr *Numerator) {}
  void visitSMinExpr(const SCEVSMinExpr *Numerator) {}
  void visitUMinExpr(const SCEVUMinExpr *Numerator) {}
  void visitSequentialUMinExpr(const SCEVSequentialUMinExpr *Numerator) {}
  void visitUnknown(const SCEVUnknown *Numerator) {}
  void visitCouldNotCompute(const SCEVCouldNotCompute *Numerator) {}

  void visitConstant(const SCEVConstant *Numerator);

  void visitVScale(const SCEVVScale *Numerator);

  void visitAddRecExpr(const SCEVAddRecExpr *Numerator);

  void visitAddExpr(const SCEVAddExpr *Numerator);

  void visitMulExpr(const SCEVMulExpr *Numerator);

private:
  SCEVDivision(ScalarEvolution &S, const SCEV *Numerator,
               const SCEV *Denominator);

  // Convenience function for giving up on the division. We set the quotient to
  // be equal to zero and the remainder to be equal to the numerator.
  void cannotDivide(const SCEV *Numerator);

  ScalarEvolution &SE;
  const SCEV *Denominator, *Quotient, *Remainder, *Zero, *One;
};

} // end namespace llvm

#endif // LLVM_ANALYSIS_SCALAREVOLUTIONDIVISION_H
PKiwFZ���$�I�IBitstream/BitstreamReader.hnu�[���//===- BitstreamReader.h - Low-level bitstream reader interface -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This header defines the BitstreamReader class.  This class can be used to
// read an arbitrary bitstream, regardless of its contents.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_BITSTREAM_BITSTREAMREADER_H
#define LLVM_BITSTREAM_BITSTREAMREADER_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Bitstream/BitCodes.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/MemoryBufferRef.h"
#include <algorithm>
#include <cassert>
#include <climits>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>

namespace llvm {

/// This class maintains the abbreviations read from a block info block.
class BitstreamBlockInfo {
public:
  /// This contains information emitted to BLOCKINFO_BLOCK blocks. These
  /// describe abbreviations that all blocks of the specified ID inherit.
  struct BlockInfo {
    unsigned BlockID = 0;
    std::vector<std::shared_ptr<BitCodeAbbrev>> Abbrevs;
    std::string Name;
    std::vector<std::pair<unsigned, std::string>> RecordNames;
  };

private:
  std::vector<BlockInfo> BlockInfoRecords;

public:
  /// If there is block info for the specified ID, return it, otherwise return
  /// null.
  const BlockInfo *getBlockInfo(unsigned BlockID) const {
    // Common case, the most recent entry matches BlockID.
    if (!BlockInfoRecords.empty() && BlockInfoRecords.back().BlockID == BlockID)
      return &BlockInfoRecords.back();

    for (const BlockInfo &BI : BlockInfoRecords)
      if (BI.BlockID == BlockID)
        return &BI;
    return nullptr;
  }

  BlockInfo &getOrCreateBlockInfo(unsigned BlockID) {
    if (const BlockInfo *BI = getBlockInfo(BlockID))
      return *const_cast<BlockInfo*>(BI);

    // Otherwise, add a new record.
    BlockInfoRecords.emplace_back();
    BlockInfoRecords.back().BlockID = BlockID;
    return BlockInfoRecords.back();
  }
};

/// This represents a position within a bitstream. There may be multiple
/// independent cursors reading within one bitstream, each maintaining their
/// own local state.
class SimpleBitstreamCursor {
  ArrayRef<uint8_t> BitcodeBytes;
  size_t NextChar = 0;

public:
  /// This is the current data we have pulled from the stream but have not
  /// returned to the client. This is specifically and intentionally defined to
  /// follow the word size of the host machine for efficiency. We use word_t in
  /// places that are aware of this to make it perfectly explicit what is going
  /// on.
  using word_t = size_t;

private:
  word_t CurWord = 0;

  /// This is the number of bits in CurWord that are valid. This is always from
  /// [0...bits_of(size_t)-1] inclusive.
  unsigned BitsInCurWord = 0;

public:
  SimpleBitstreamCursor() = default;
  explicit SimpleBitstreamCursor(ArrayRef<uint8_t> BitcodeBytes)
      : BitcodeBytes(BitcodeBytes) {}
  explicit SimpleBitstreamCursor(StringRef BitcodeBytes)
      : BitcodeBytes(arrayRefFromStringRef(BitcodeBytes)) {}
  explicit SimpleBitstreamCursor(MemoryBufferRef BitcodeBytes)
      : SimpleBitstreamCursor(BitcodeBytes.getBuffer()) {}

  bool canSkipToPos(size_t pos) const {
    // pos can be skipped to if it is a valid address or one byte past the end.
    return pos <= BitcodeBytes.size();
  }

  bool AtEndOfStream() {
    return BitsInCurWord == 0 && BitcodeBytes.size() <= NextChar;
  }

  /// Return the bit # of the bit we are reading.
  uint64_t GetCurrentBitNo() const {
    return NextChar*CHAR_BIT - BitsInCurWord;
  }

  // Return the byte # of the current bit.
  uint64_t getCurrentByteNo() const { return GetCurrentBitNo() / 8; }

  ArrayRef<uint8_t> getBitcodeBytes() const { return BitcodeBytes; }

  /// Reset the stream to the specified bit number.
  Error JumpToBit(uint64_t BitNo) {
    size_t ByteNo = size_t(BitNo/8) & ~(sizeof(word_t)-1);
    unsigned WordBitNo = unsigned(BitNo & (sizeof(word_t)*8-1));
    assert(canSkipToPos(ByteNo) && "Invalid location");

    // Move the cursor to the right word.
    NextChar = ByteNo;
    BitsInCurWord = 0;

    // Skip over any bits that are already consumed.
    if (WordBitNo) {
      if (Expected<word_t> Res = Read(WordBitNo))
        return Error::success();
      else
        return Res.takeError();
    }

    return Error::success();
  }

  /// Get a pointer into the bitstream at the specified byte offset.
  const uint8_t *getPointerToByte(uint64_t ByteNo, uint64_t NumBytes) {
    return BitcodeBytes.data() + ByteNo;
  }

  /// Get a pointer into the bitstream at the specified bit offset.
  ///
  /// The bit offset must be on a byte boundary.
  const uint8_t *getPointerToBit(uint64_t BitNo, uint64_t NumBytes) {
    assert(!(BitNo % 8) && "Expected bit on byte boundary");
    return getPointerToByte(BitNo / 8, NumBytes);
  }

  Error fillCurWord() {
    if (NextChar >= BitcodeBytes.size())
      return createStringError(std::errc::io_error,
                               "Unexpected end of file reading %u of %u bytes",
                               NextChar, BitcodeBytes.size());

    // Read the next word from the stream.
    const uint8_t *NextCharPtr = BitcodeBytes.data() + NextChar;
    unsigned BytesRead;
    if (BitcodeBytes.size() >= NextChar + sizeof(word_t)) {
      BytesRead = sizeof(word_t);
      CurWord =
          support::endian::read<word_t, support::little, support::unaligned>(
              NextCharPtr);
    } else {
      // Short read.
      BytesRead = BitcodeBytes.size() - NextChar;
      CurWord = 0;
      for (unsigned B = 0; B != BytesRead; ++B)
        CurWord |= uint64_t(NextCharPtr[B]) << (B * 8);
    }
    NextChar += BytesRead;
    BitsInCurWord = BytesRead * 8;
    return Error::success();
  }

  Expected<word_t> Read(unsigned NumBits) {
    static const unsigned BitsInWord = sizeof(word_t) * 8;

    assert(NumBits && NumBits <= BitsInWord &&
           "Cannot return zero or more than BitsInWord bits!");

    static const unsigned Mask = sizeof(word_t) > 4 ? 0x3f : 0x1f;

    // If the field is fully contained by CurWord, return it quickly.
    if (BitsInCurWord >= NumBits) {
      word_t R = CurWord & (~word_t(0) >> (BitsInWord - NumBits));

      // Use a mask to avoid undefined behavior.
      CurWord >>= (NumBits & Mask);

      BitsInCurWord -= NumBits;
      return R;
    }

    word_t R = BitsInCurWord ? CurWord : 0;
    unsigned BitsLeft = NumBits - BitsInCurWord;

    if (Error fillResult = fillCurWord())
      return std::move(fillResult);

    // If we run out of data, abort.
    if (BitsLeft > BitsInCurWord)
      return createStringError(std::errc::io_error,
                               "Unexpected end of file reading %u of %u bits",
                               BitsInCurWord, BitsLeft);

    word_t R2 = CurWord & (~word_t(0) >> (BitsInWord - BitsLeft));

    // Use a mask to avoid undefined behavior.
    CurWord >>= (BitsLeft & Mask);

    BitsInCurWord -= BitsLeft;

    R |= R2 << (NumBits - BitsLeft);

    return R;
  }

  Expected<uint32_t> ReadVBR(const unsigned NumBits) {
    Expected<unsigned> MaybeRead = Read(NumBits);
    if (!MaybeRead)
      return MaybeRead;
    uint32_t Piece = MaybeRead.get();

    assert(NumBits <= 32 && NumBits >= 1 && "Invalid NumBits value");
    const uint32_t MaskBitOrder = (NumBits - 1);
    const uint32_t Mask = 1UL << MaskBitOrder;

    if ((Piece & Mask) == 0)
      return Piece;

    uint32_t Result = 0;
    unsigned NextBit = 0;
    while (true) {
      Result |= (Piece & (Mask - 1)) << NextBit;

      if ((Piece & Mask) == 0)
        return Result;

      NextBit += NumBits-1;
      if (NextBit >= 32)
        return createStringError(std::errc::illegal_byte_sequence,
                                 "Unterminated VBR");

      MaybeRead = Read(NumBits);
      if (!MaybeRead)
        return MaybeRead;
      Piece = MaybeRead.get();
    }
  }

  // Read a VBR that may have a value up to 64-bits in size. The chunk size of
  // the VBR must still be <= 32 bits though.
  Expected<uint64_t> ReadVBR64(const unsigned NumBits) {
    Expected<uint64_t> MaybeRead = Read(NumBits);
    if (!MaybeRead)
      return MaybeRead;
    uint32_t Piece = MaybeRead.get();
    assert(NumBits <= 32 && NumBits >= 1 && "Invalid NumBits value");
    const uint32_t MaskBitOrder = (NumBits - 1);
    const uint32_t Mask = 1UL << MaskBitOrder;

    if ((Piece & Mask) == 0)
      return uint64_t(Piece);

    uint64_t Result = 0;
    unsigned NextBit = 0;
    while (true) {
      Result |= uint64_t(Piece & (Mask - 1)) << NextBit;

      if ((Piece & Mask) == 0)
        return Result;

      NextBit += NumBits-1;
      if (NextBit >= 64)
        return createStringError(std::errc::illegal_byte_sequence,
                                 "Unterminated VBR");

      MaybeRead = Read(NumBits);
      if (!MaybeRead)
        return MaybeRead;
      Piece = MaybeRead.get();
    }
  }

  void SkipToFourByteBoundary() {
    // If word_t is 64-bits and if we've read less than 32 bits, just dump
    // the bits we have up to the next 32-bit boundary.
    if (sizeof(word_t) > 4 &&
        BitsInCurWord >= 32) {
      CurWord >>= BitsInCurWord-32;
      BitsInCurWord = 32;
      return;
    }

    BitsInCurWord = 0;
  }

  /// Return the size of the stream in bytes.
  size_t SizeInBytes() const { return BitcodeBytes.size(); }

  /// Skip to the end of the file.
  void skipToEnd() { NextChar = BitcodeBytes.size(); }

  /// Check whether a reservation of Size elements is plausible.
  bool isSizePlausible(size_t Size) const {
    // Don't allow reserving more elements than the number of bits, assuming
    // at least one bit is needed to encode an element.
    return Size < BitcodeBytes.size() * 8;
  }
};

/// When advancing through a bitstream cursor, each advance can discover a few
/// different kinds of entries:
struct BitstreamEntry {
  enum {
    Error,    // Malformed bitcode was found.
    EndBlock, // We've reached the end of the current block, (or the end of the
              // file, which is treated like a series of EndBlock records.
    SubBlock, // This is the start of a new subblock of a specific ID.
    Record    // This is a record with a specific AbbrevID.
  } Kind;

  unsigned ID;

  static BitstreamEntry getError() {
    BitstreamEntry E; E.Kind = Error; return E;
  }

  static BitstreamEntry getEndBlock() {
    BitstreamEntry E; E.Kind = EndBlock; return E;
  }

  static BitstreamEntry getSubBlock(unsigned ID) {
    BitstreamEntry E; E.Kind = SubBlock; E.ID = ID; return E;
  }

  static BitstreamEntry getRecord(unsigned AbbrevID) {
    BitstreamEntry E; E.Kind = Record; E.ID = AbbrevID; return E;
  }
};

/// This represents a position within a bitcode file, implemented on top of a
/// SimpleBitstreamCursor.
///
/// Unlike iterators, BitstreamCursors are heavy-weight objects that should not
/// be passed by value.
class BitstreamCursor : SimpleBitstreamCursor {
  // This is the declared size of code values used for the current block, in
  // bits.
  unsigned CurCodeSize = 2;

  /// Abbrevs installed at in this block.
  std::vector<std::shared_ptr<BitCodeAbbrev>> CurAbbrevs;

  struct Block {
    unsigned PrevCodeSize;
    std::vector<std::shared_ptr<BitCodeAbbrev>> PrevAbbrevs;

    explicit Block(unsigned PCS) : PrevCodeSize(PCS) {}
  };

  /// This tracks the codesize of parent blocks.
  SmallVector<Block, 8> BlockScope;

  BitstreamBlockInfo *BlockInfo = nullptr;

public:
  static const size_t MaxChunkSize = 32;

  BitstreamCursor() = default;
  explicit BitstreamCursor(ArrayRef<uint8_t> BitcodeBytes)
      : SimpleBitstreamCursor(BitcodeBytes) {}
  explicit BitstreamCursor(StringRef BitcodeBytes)
      : SimpleBitstreamCursor(BitcodeBytes) {}
  explicit BitstreamCursor(MemoryBufferRef BitcodeBytes)
      : SimpleBitstreamCursor(BitcodeBytes) {}

  using SimpleBitstreamCursor::AtEndOfStream;
  using SimpleBitstreamCursor::canSkipToPos;
  using SimpleBitstreamCursor::fillCurWord;
  using SimpleBitstreamCursor::getBitcodeBytes;
  using SimpleBitstreamCursor::GetCurrentBitNo;
  using SimpleBitstreamCursor::getCurrentByteNo;
  using SimpleBitstreamCursor::getPointerToByte;
  using SimpleBitstreamCursor::JumpToBit;
  using SimpleBitstreamCursor::Read;
  using SimpleBitstreamCursor::ReadVBR;
  using SimpleBitstreamCursor::ReadVBR64;
  using SimpleBitstreamCursor::SizeInBytes;
  using SimpleBitstreamCursor::skipToEnd;

  /// Return the number of bits used to encode an abbrev #.
  unsigned getAbbrevIDWidth() const { return CurCodeSize; }

  /// Flags that modify the behavior of advance().
  enum {
    /// If this flag is used, the advance() method does not automatically pop
    /// the block scope when the end of a block is reached.
    AF_DontPopBlockAtEnd = 1,

    /// If this flag is used, abbrev entries are returned just like normal
    /// records.
    AF_DontAutoprocessAbbrevs = 2
  };

  /// Advance the current bitstream, returning the next entry in the stream.
  Expected<BitstreamEntry> advance(unsigned Flags = 0) {
    while (true) {
      if (AtEndOfStream())
        return BitstreamEntry::getError();

      Expected<unsigned> MaybeCode = ReadCode();
      if (!MaybeCode)
        return MaybeCode.takeError();
      unsigned Code = MaybeCode.get();

      if (Code == bitc::END_BLOCK) {
        // Pop the end of the block unless Flags tells us not to.
        if (!(Flags & AF_DontPopBlockAtEnd) && ReadBlockEnd())
          return BitstreamEntry::getError();
        return BitstreamEntry::getEndBlock();
      }

      if (Code == bitc::ENTER_SUBBLOCK) {
        if (Expected<unsigned> MaybeSubBlock = ReadSubBlockID())
          return BitstreamEntry::getSubBlock(MaybeSubBlock.get());
        else
          return MaybeSubBlock.takeError();
      }

      if (Code == bitc::DEFINE_ABBREV &&
          !(Flags & AF_DontAutoprocessAbbrevs)) {
        // We read and accumulate abbrev's, the client can't do anything with
        // them anyway.
        if (Error Err = ReadAbbrevRecord())
          return std::move(Err);
        continue;
      }

      return BitstreamEntry::getRecord(Code);
    }
  }

  /// This is a convenience function for clients that don't expect any
  /// subblocks. This just skips over them automatically.
  Expected<BitstreamEntry> advanceSkippingSubblocks(unsigned Flags = 0) {
    while (true) {
      // If we found a normal entry, return it.
      Expected<BitstreamEntry> MaybeEntry = advance(Flags);
      if (!MaybeEntry)
        return MaybeEntry;
      BitstreamEntry Entry = MaybeEntry.get();

      if (Entry.Kind != BitstreamEntry::SubBlock)
        return Entry;

      // If we found a sub-block, just skip over it and check the next entry.
      if (Error Err = SkipBlock())
        return std::move(Err);
    }
  }

  Expected<unsigned> ReadCode() { return Read(CurCodeSize); }

  // Block header:
  //    [ENTER_SUBBLOCK, blockid, newcodelen, <align4bytes>, blocklen]

  /// Having read the ENTER_SUBBLOCK code, read the BlockID for the block.
  Expected<unsigned> ReadSubBlockID() { return ReadVBR(bitc::BlockIDWidth); }

  /// Having read the ENTER_SUBBLOCK abbrevid and a BlockID, skip over the body
  /// of this block.
  Error SkipBlock() {
    // Read and ignore the codelen value.
    if (Expected<uint32_t> Res = ReadVBR(bitc::CodeLenWidth))
      ; // Since we are skipping this block, we don't care what code widths are
        // used inside of it.
    else
      return Res.takeError();

    SkipToFourByteBoundary();
    Expected<unsigned> MaybeNum = Read(bitc::BlockSizeWidth);
    if (!MaybeNum)
      return MaybeNum.takeError();
    size_t NumFourBytes = MaybeNum.get();

    // Check that the block wasn't partially defined, and that the offset isn't
    // bogus.
    size_t SkipTo = GetCurrentBitNo() + NumFourBytes * 4 * 8;
    if (AtEndOfStream())
      return createStringError(std::errc::illegal_byte_sequence,
                               "can't skip block: already at end of stream");
    if (!canSkipToPos(SkipTo / 8))
      return createStringError(std::errc::illegal_byte_sequence,
                               "can't skip to bit %zu from %" PRIu64, SkipTo,
                               GetCurrentBitNo());

    if (Error Res = JumpToBit(SkipTo))
      return Res;

    return Error::success();
  }

  /// Having read the ENTER_SUBBLOCK abbrevid, and enter the block.
  Error EnterSubBlock(unsigned BlockID, unsigned *NumWordsP = nullptr);

  bool ReadBlockEnd() {
    if (BlockScope.empty()) return true;

    // Block tail:
    //    [END_BLOCK, <align4bytes>]
    SkipToFourByteBoundary();

    popBlockScope();
    return false;
  }

private:
  void popBlockScope() {
    CurCodeSize = BlockScope.back().PrevCodeSize;

    CurAbbrevs = std::move(BlockScope.back().PrevAbbrevs);
    BlockScope.pop_back();
  }

  //===--------------------------------------------------------------------===//
  // Record Processing
  //===--------------------------------------------------------------------===//

public:
  /// Return the abbreviation for the specified AbbrevId.
  Expected<const BitCodeAbbrev *> getAbbrev(unsigned AbbrevID) {
    unsigned AbbrevNo = AbbrevID - bitc::FIRST_APPLICATION_ABBREV;
    if (AbbrevNo >= CurAbbrevs.size())
      return createStringError(
          std::errc::illegal_byte_sequence, "Invalid abbrev number");
    return CurAbbrevs[AbbrevNo].get();
  }

  /// Read the current record and discard it, returning the code for the record.
  Expected<unsigned> skipRecord(unsigned AbbrevID);

  Expected<unsigned> readRecord(unsigned AbbrevID,
                                SmallVectorImpl<uint64_t> &Vals,
                                StringRef *Blob = nullptr);

  //===--------------------------------------------------------------------===//
  // Abbrev Processing
  //===--------------------------------------------------------------------===//
  Error ReadAbbrevRecord();

  /// Read and return a block info block from the bitstream. If an error was
  /// encountered, return std::nullopt.
  ///
  /// \param ReadBlockInfoNames Whether to read block/record name information in
  /// the BlockInfo block. Only llvm-bcanalyzer uses this.
  Expected<std::optional<BitstreamBlockInfo>>
  ReadBlockInfoBlock(bool ReadBlockInfoNames = false);

  /// Set the block info to be used by this BitstreamCursor to interpret
  /// abbreviated records.
  void setBlockInfo(BitstreamBlockInfo *BI) { BlockInfo = BI; }
};

} // end llvm namespace

#endif // LLVM_BITSTREAM_BITSTREAMREADER_H
PKiwFZ3PV
V
Bitstream/BitCodeEnums.hnu�[���//===- BitCodeEnums.h - Core enums for the bitstream format -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This header defines "core" bitstream enum values.
// It has been separated from the other header that defines bitstream enum
// values, BitCodes.h, to allow tools to track changes to the various
// bitstream and bitcode enums without needing to fully or partially build
// LLVM itself.
//
// The enum values defined in this file should be considered permanent.  If
// new features are added, they should have values added at the end of the
// respective lists.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_BITSTREAM_BITCODEENUMS_H
#define LLVM_BITSTREAM_BITCODEENUMS_H

namespace llvm {
/// Offsets of the 32-bit fields of bitstream wrapper header.
enum BitstreamWrapperHeader : unsigned {
  BWH_MagicField = 0 * 4,
  BWH_VersionField = 1 * 4,
  BWH_OffsetField = 2 * 4,
  BWH_SizeField = 3 * 4,
  BWH_CPUTypeField = 4 * 4,
  BWH_HeaderSize = 5 * 4
};

namespace bitc {
enum StandardWidths {
  BlockIDWidth = 8,   // We use VBR-8 for block IDs.
  CodeLenWidth = 4,   // Codelen are VBR-4.
  BlockSizeWidth = 32 // BlockSize up to 2^32 32-bit words = 16GB per block.
};

// The standard abbrev namespace always has a way to exit a block, enter a
// nested block, define abbrevs, and define an unabbreviated record.
enum FixedAbbrevIDs {
  END_BLOCK = 0, // Must be zero to guarantee termination for broken bitcode.
  ENTER_SUBBLOCK = 1,

  /// DEFINE_ABBREV - Defines an abbrev for the current block.  It consists
  /// of a vbr5 for # operand infos.  Each operand info is emitted with a
  /// single bit to indicate if it is a literal encoding.  If so, the value is
  /// emitted with a vbr8.  If not, the encoding is emitted as 3 bits followed
  /// by the info value as a vbr5 if needed.
  DEFINE_ABBREV = 2,

  // UNABBREV_RECORDs are emitted with a vbr6 for the record code, followed by
  // a vbr6 for the # operands, followed by vbr6's for each operand.
  UNABBREV_RECORD = 3,

  // This is not a code, this is a marker for the first abbrev assignment.
  FIRST_APPLICATION_ABBREV = 4
};

/// StandardBlockIDs - All bitcode files can optionally include a BLOCKINFO
/// block, which contains metadata about other blocks in the file.
enum StandardBlockIDs {
  /// BLOCKINFO_BLOCK is used to define metadata about blocks, for example,
  /// standard abbrevs that should be available to all blocks of a specified
  /// ID.
  BLOCKINFO_BLOCK_ID = 0,

  // Block IDs 1-7 are reserved for future expansion.
  FIRST_APPLICATION_BLOCKID = 8
};

/// BlockInfoCodes - The blockinfo block contains metadata about user-defined
/// blocks.
enum BlockInfoCodes {
  // DEFINE_ABBREV has magic semantics here, applying to the current SETBID'd
  // block, instead of the BlockInfo block.

  BLOCKINFO_CODE_SETBID = 1,       // SETBID: [blockid#]
  BLOCKINFO_CODE_BLOCKNAME = 2,    // BLOCKNAME: [name]
  BLOCKINFO_CODE_SETRECORDNAME = 3 // BLOCKINFO_CODE_SETRECORDNAME:
                                   //                             [id, name]
};

} // namespace bitc
} // namespace llvm

#endif
PKiwFZ���rWrWBitstream/BitstreamWriter.hnu�[���//===- BitstreamWriter.h - Low-level bitstream writer interface -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This header defines the BitstreamWriter class.  This class can be used to
// write an arbitrary bitstream, regardless of its contents.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_BITSTREAM_BITSTREAMWRITER_H
#define LLVM_BITSTREAM_BITSTREAMWRITER_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Bitstream/BitCodes.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <optional>
#include <vector>

namespace llvm {

class BitstreamWriter {
  /// Out - The buffer that keeps unflushed bytes.
  SmallVectorImpl<char> &Out;

  /// FS - The file stream that Out flushes to. If FS is nullptr, it does not
  /// support read or seek, Out cannot be flushed until all data are written.
  raw_fd_stream *FS;

  /// FlushThreshold - If FS is valid, this is the threshold (unit B) to flush
  /// FS.
  const uint64_t FlushThreshold;

  /// CurBit - Always between 0 and 31 inclusive, specifies the next bit to use.
  unsigned CurBit;

  /// CurValue - The current value. Only bits < CurBit are valid.
  uint32_t CurValue;

  /// CurCodeSize - This is the declared size of code values used for the
  /// current block, in bits.
  unsigned CurCodeSize;

  /// BlockInfoCurBID - When emitting a BLOCKINFO_BLOCK, this is the currently
  /// selected BLOCK ID.
  unsigned BlockInfoCurBID;

  /// CurAbbrevs - Abbrevs installed at in this block.
  std::vector<std::shared_ptr<BitCodeAbbrev>> CurAbbrevs;

  struct Block {
    unsigned PrevCodeSize;
    size_t StartSizeWord;
    std::vector<std::shared_ptr<BitCodeAbbrev>> PrevAbbrevs;
    Block(unsigned PCS, size_t SSW) : PrevCodeSize(PCS), StartSizeWord(SSW) {}
  };

  /// BlockScope - This tracks the current blocks that we have entered.
  std::vector<Block> BlockScope;

  /// BlockInfo - This contains information emitted to BLOCKINFO_BLOCK blocks.
  /// These describe abbreviations that all blocks of the specified ID inherit.
  struct BlockInfo {
    unsigned BlockID;
    std::vector<std::shared_ptr<BitCodeAbbrev>> Abbrevs;
  };
  std::vector<BlockInfo> BlockInfoRecords;

  void WriteWord(unsigned Value) {
    Value = support::endian::byte_swap<uint32_t, support::little>(Value);
    Out.append(reinterpret_cast<const char *>(&Value),
               reinterpret_cast<const char *>(&Value + 1));
  }

  uint64_t GetNumOfFlushedBytes() const { return FS ? FS->tell() : 0; }

  size_t GetBufferOffset() const { return Out.size() + GetNumOfFlushedBytes(); }

  size_t GetWordIndex() const {
    size_t Offset = GetBufferOffset();
    assert((Offset & 3) == 0 && "Not 32-bit aligned");
    return Offset / 4;
  }

  /// If the related file stream supports reading, seeking and writing, flush
  /// the buffer if its size is above a threshold.
  void FlushToFile() {
    if (!FS)
      return;
    if (Out.size() < FlushThreshold)
      return;
    FS->write((char *)&Out.front(), Out.size());
    Out.clear();
  }

public:
  /// Create a BitstreamWriter that writes to Buffer \p O.
  ///
  /// \p FS is the file stream that \p O flushes to incrementally. If \p FS is
  /// null, \p O does not flush incrementially, but writes to disk at the end.
  ///
  /// \p FlushThreshold is the threshold (unit M) to flush \p O if \p FS is
  /// valid. Flushing only occurs at (sub)block boundaries.
  BitstreamWriter(SmallVectorImpl<char> &O, raw_fd_stream *FS = nullptr,
                  uint32_t FlushThreshold = 512)
      : Out(O), FS(FS), FlushThreshold(FlushThreshold << 20), CurBit(0),
        CurValue(0), CurCodeSize(2) {}

  ~BitstreamWriter() {
    assert(CurBit == 0 && "Unflushed data remaining");
    assert(BlockScope.empty() && CurAbbrevs.empty() && "Block imbalance");
  }

  /// Retrieve the current position in the stream, in bits.
  uint64_t GetCurrentBitNo() const { return GetBufferOffset() * 8 + CurBit; }

  /// Retrieve the number of bits currently used to encode an abbrev ID.
  unsigned GetAbbrevIDWidth() const { return CurCodeSize; }

  //===--------------------------------------------------------------------===//
  // Basic Primitives for emitting bits to the stream.
  //===--------------------------------------------------------------------===//

  /// Backpatch a 32-bit word in the output at the given bit offset
  /// with the specified value.
  void BackpatchWord(uint64_t BitNo, unsigned NewWord) {
    using namespace llvm::support;
    uint64_t ByteNo = BitNo / 8;
    uint64_t StartBit = BitNo & 7;
    uint64_t NumOfFlushedBytes = GetNumOfFlushedBytes();

    if (ByteNo >= NumOfFlushedBytes) {
      assert((!endian::readAtBitAlignment<uint32_t, little, unaligned>(
                 &Out[ByteNo - NumOfFlushedBytes], StartBit)) &&
             "Expected to be patching over 0-value placeholders");
      endian::writeAtBitAlignment<uint32_t, little, unaligned>(
          &Out[ByteNo - NumOfFlushedBytes], NewWord, StartBit);
      return;
    }

    // If the byte offset to backpatch is flushed, use seek to backfill data.
    // First, save the file position to restore later.
    uint64_t CurPos = FS->tell();

    // Copy data to update into Bytes from the file FS and the buffer Out.
    char Bytes[9]; // Use one more byte to silence a warning from Visual C++.
    size_t BytesNum = StartBit ? 8 : 4;
    size_t BytesFromDisk = std::min(static_cast<uint64_t>(BytesNum), NumOfFlushedBytes - ByteNo);
    size_t BytesFromBuffer = BytesNum - BytesFromDisk;

    // When unaligned, copy existing data into Bytes from the file FS and the
    // buffer Out so that it can be updated before writing. For debug builds
    // read bytes unconditionally in order to check that the existing value is 0
    // as expected.
#ifdef NDEBUG
    if (StartBit)
#endif
    {
      FS->seek(ByteNo);
      ssize_t BytesRead = FS->read(Bytes, BytesFromDisk);
      (void)BytesRead; // silence warning
      assert(BytesRead >= 0 && static_cast<size_t>(BytesRead) == BytesFromDisk);
      for (size_t i = 0; i < BytesFromBuffer; ++i)
        Bytes[BytesFromDisk + i] = Out[i];
      assert((!endian::readAtBitAlignment<uint32_t, little, unaligned>(
                 Bytes, StartBit)) &&
             "Expected to be patching over 0-value placeholders");
    }

    // Update Bytes in terms of bit offset and value.
    endian::writeAtBitAlignment<uint32_t, little, unaligned>(Bytes, NewWord,
                                                             StartBit);

    // Copy updated data back to the file FS and the buffer Out.
    FS->seek(ByteNo);
    FS->write(Bytes, BytesFromDisk);
    for (size_t i = 0; i < BytesFromBuffer; ++i)
      Out[i] = Bytes[BytesFromDisk + i];

    // Restore the file position.
    FS->seek(CurPos);
  }

  void BackpatchWord64(uint64_t BitNo, uint64_t Val) {
    BackpatchWord(BitNo, (uint32_t)Val);
    BackpatchWord(BitNo + 32, (uint32_t)(Val >> 32));
  }

  void Emit(uint32_t Val, unsigned NumBits) {
    assert(NumBits && NumBits <= 32 && "Invalid value size!");
    assert((Val & ~(~0U >> (32-NumBits))) == 0 && "High bits set!");
    CurValue |= Val << CurBit;
    if (CurBit + NumBits < 32) {
      CurBit += NumBits;
      return;
    }

    // Add the current word.
    WriteWord(CurValue);

    if (CurBit)
      CurValue = Val >> (32-CurBit);
    else
      CurValue = 0;
    CurBit = (CurBit+NumBits) & 31;
  }

  void FlushToWord() {
    if (CurBit) {
      WriteWord(CurValue);
      CurBit = 0;
      CurValue = 0;
    }
  }

  void EmitVBR(uint32_t Val, unsigned NumBits) {
    assert(NumBits <= 32 && "Too many bits to emit!");
    uint32_t Threshold = 1U << (NumBits-1);

    // Emit the bits with VBR encoding, NumBits-1 bits at a time.
    while (Val >= Threshold) {
      Emit((Val & ((1 << (NumBits-1))-1)) | (1 << (NumBits-1)), NumBits);
      Val >>= NumBits-1;
    }

    Emit(Val, NumBits);
  }

  void EmitVBR64(uint64_t Val, unsigned NumBits) {
    assert(NumBits <= 32 && "Too many bits to emit!");
    if ((uint32_t)Val == Val)
      return EmitVBR((uint32_t)Val, NumBits);

    uint32_t Threshold = 1U << (NumBits-1);

    // Emit the bits with VBR encoding, NumBits-1 bits at a time.
    while (Val >= Threshold) {
      Emit(((uint32_t)Val & ((1 << (NumBits - 1)) - 1)) | (1 << (NumBits - 1)),
           NumBits);
      Val >>= NumBits-1;
    }

    Emit((uint32_t)Val, NumBits);
  }

  /// EmitCode - Emit the specified code.
  void EmitCode(unsigned Val) {
    Emit(Val, CurCodeSize);
  }

  //===--------------------------------------------------------------------===//
  // Block Manipulation
  //===--------------------------------------------------------------------===//

  /// getBlockInfo - If there is block info for the specified ID, return it,
  /// otherwise return null.
  BlockInfo *getBlockInfo(unsigned BlockID) {
    // Common case, the most recent entry matches BlockID.
    if (!BlockInfoRecords.empty() && BlockInfoRecords.back().BlockID == BlockID)
      return &BlockInfoRecords.back();

    for (BlockInfo &BI : BlockInfoRecords)
      if (BI.BlockID == BlockID)
        return &BI;
    return nullptr;
  }

  void EnterSubblock(unsigned BlockID, unsigned CodeLen) {
    // Block header:
    //    [ENTER_SUBBLOCK, blockid, newcodelen, <align4bytes>, blocklen]
    EmitCode(bitc::ENTER_SUBBLOCK);
    EmitVBR(BlockID, bitc::BlockIDWidth);
    EmitVBR(CodeLen, bitc::CodeLenWidth);
    FlushToWord();

    size_t BlockSizeWordIndex = GetWordIndex();
    unsigned OldCodeSize = CurCodeSize;

    // Emit a placeholder, which will be replaced when the block is popped.
    Emit(0, bitc::BlockSizeWidth);

    CurCodeSize = CodeLen;

    // Push the outer block's abbrev set onto the stack, start out with an
    // empty abbrev set.
    BlockScope.emplace_back(OldCodeSize, BlockSizeWordIndex);
    BlockScope.back().PrevAbbrevs.swap(CurAbbrevs);

    // If there is a blockinfo for this BlockID, add all the predefined abbrevs
    // to the abbrev list.
    if (BlockInfo *Info = getBlockInfo(BlockID))
      append_range(CurAbbrevs, Info->Abbrevs);
  }

  void ExitBlock() {
    assert(!BlockScope.empty() && "Block scope imbalance!");
    const Block &B = BlockScope.back();

    // Block tail:
    //    [END_BLOCK, <align4bytes>]
    EmitCode(bitc::END_BLOCK);
    FlushToWord();

    // Compute the size of the block, in words, not counting the size field.
    size_t SizeInWords = GetWordIndex() - B.StartSizeWord - 1;
    uint64_t BitNo = uint64_t(B.StartSizeWord) * 32;

    // Update the block size field in the header of this sub-block.
    BackpatchWord(BitNo, SizeInWords);

    // Restore the inner block's code size and abbrev table.
    CurCodeSize = B.PrevCodeSize;
    CurAbbrevs = std::move(B.PrevAbbrevs);
    BlockScope.pop_back();
    FlushToFile();
  }

  //===--------------------------------------------------------------------===//
  // Record Emission
  //===--------------------------------------------------------------------===//

private:
  /// EmitAbbreviatedLiteral - Emit a literal value according to its abbrev
  /// record.  This is a no-op, since the abbrev specifies the literal to use.
  template<typename uintty>
  void EmitAbbreviatedLiteral(const BitCodeAbbrevOp &Op, uintty V) {
    assert(Op.isLiteral() && "Not a literal");
    // If the abbrev specifies the literal value to use, don't emit
    // anything.
    assert(V == Op.getLiteralValue() &&
           "Invalid abbrev for record!");
  }

  /// EmitAbbreviatedField - Emit a single scalar field value with the specified
  /// encoding.
  template<typename uintty>
  void EmitAbbreviatedField(const BitCodeAbbrevOp &Op, uintty V) {
    assert(!Op.isLiteral() && "Literals should use EmitAbbreviatedLiteral!");

    // Encode the value as we are commanded.
    switch (Op.getEncoding()) {
    default: llvm_unreachable("Unknown encoding!");
    case BitCodeAbbrevOp::Fixed:
      if (Op.getEncodingData())
        Emit((unsigned)V, (unsigned)Op.getEncodingData());
      break;
    case BitCodeAbbrevOp::VBR:
      if (Op.getEncodingData())
        EmitVBR64(V, (unsigned)Op.getEncodingData());
      break;
    case BitCodeAbbrevOp::Char6:
      Emit(BitCodeAbbrevOp::EncodeChar6((char)V), 6);
      break;
    }
  }

  /// EmitRecordWithAbbrevImpl - This is the core implementation of the record
  /// emission code.  If BlobData is non-null, then it specifies an array of
  /// data that should be emitted as part of the Blob or Array operand that is
  /// known to exist at the end of the record. If Code is specified, then
  /// it is the record code to emit before the Vals, which must not contain
  /// the code.
  template <typename uintty>
  void EmitRecordWithAbbrevImpl(unsigned Abbrev, ArrayRef<uintty> Vals,
                                StringRef Blob, std::optional<unsigned> Code) {
    const char *BlobData = Blob.data();
    unsigned BlobLen = (unsigned) Blob.size();
    unsigned AbbrevNo = Abbrev-bitc::FIRST_APPLICATION_ABBREV;
    assert(AbbrevNo < CurAbbrevs.size() && "Invalid abbrev #!");
    const BitCodeAbbrev *Abbv = CurAbbrevs[AbbrevNo].get();

    EmitCode(Abbrev);

    unsigned i = 0, e = static_cast<unsigned>(Abbv->getNumOperandInfos());
    if (Code) {
      assert(e && "Expected non-empty abbreviation");
      const BitCodeAbbrevOp &Op = Abbv->getOperandInfo(i++);

      if (Op.isLiteral())
        EmitAbbreviatedLiteral(Op, *Code);
      else {
        assert(Op.getEncoding() != BitCodeAbbrevOp::Array &&
               Op.getEncoding() != BitCodeAbbrevOp::Blob &&
               "Expected literal or scalar");
        EmitAbbreviatedField(Op, *Code);
      }
    }

    unsigned RecordIdx = 0;
    for (; i != e; ++i) {
      const BitCodeAbbrevOp &Op = Abbv->getOperandInfo(i);
      if (Op.isLiteral()) {
        assert(RecordIdx < Vals.size() && "Invalid abbrev/record");
        EmitAbbreviatedLiteral(Op, Vals[RecordIdx]);
        ++RecordIdx;
      } else if (Op.getEncoding() == BitCodeAbbrevOp::Array) {
        // Array case.
        assert(i + 2 == e && "array op not second to last?");
        const BitCodeAbbrevOp &EltEnc = Abbv->getOperandInfo(++i);

        // If this record has blob data, emit it, otherwise we must have record
        // entries to encode this way.
        if (BlobData) {
          assert(RecordIdx == Vals.size() &&
                 "Blob data and record entries specified for array!");
          // Emit a vbr6 to indicate the number of elements present.
          EmitVBR(static_cast<uint32_t>(BlobLen), 6);

          // Emit each field.
          for (unsigned i = 0; i != BlobLen; ++i)
            EmitAbbreviatedField(EltEnc, (unsigned char)BlobData[i]);

          // Know that blob data is consumed for assertion below.
          BlobData = nullptr;
        } else {
          // Emit a vbr6 to indicate the number of elements present.
          EmitVBR(static_cast<uint32_t>(Vals.size()-RecordIdx), 6);

          // Emit each field.
          for (unsigned e = Vals.size(); RecordIdx != e; ++RecordIdx)
            EmitAbbreviatedField(EltEnc, Vals[RecordIdx]);
        }
      } else if (Op.getEncoding() == BitCodeAbbrevOp::Blob) {
        // If this record has blob data, emit it, otherwise we must have record
        // entries to encode this way.

        if (BlobData) {
          assert(RecordIdx == Vals.size() &&
                 "Blob data and record entries specified for blob operand!");

          assert(Blob.data() == BlobData && "BlobData got moved");
          assert(Blob.size() == BlobLen && "BlobLen got changed");
          emitBlob(Blob);
          BlobData = nullptr;
        } else {
          emitBlob(Vals.slice(RecordIdx));
        }
      } else {  // Single scalar field.
        assert(RecordIdx < Vals.size() && "Invalid abbrev/record");
        EmitAbbreviatedField(Op, Vals[RecordIdx]);
        ++RecordIdx;
      }
    }
    assert(RecordIdx == Vals.size() && "Not all record operands emitted!");
    assert(BlobData == nullptr &&
           "Blob data specified for record that doesn't use it!");
  }

public:
  /// Emit a blob, including flushing before and tail-padding.
  template <class UIntTy>
  void emitBlob(ArrayRef<UIntTy> Bytes, bool ShouldEmitSize = true) {
    // Emit a vbr6 to indicate the number of elements present.
    if (ShouldEmitSize)
      EmitVBR(static_cast<uint32_t>(Bytes.size()), 6);

    // Flush to a 32-bit alignment boundary.
    FlushToWord();

    // Emit literal bytes.
    assert(llvm::all_of(Bytes, [](UIntTy B) { return isUInt<8>(B); }));
    Out.append(Bytes.begin(), Bytes.end());

    // Align end to 32-bits.
    while (GetBufferOffset() & 3)
      Out.push_back(0);
  }
  void emitBlob(StringRef Bytes, bool ShouldEmitSize = true) {
    emitBlob(ArrayRef((const uint8_t *)Bytes.data(), Bytes.size()),
             ShouldEmitSize);
  }

  /// EmitRecord - Emit the specified record to the stream, using an abbrev if
  /// we have one to compress the output.
  template <typename Container>
  void EmitRecord(unsigned Code, const Container &Vals, unsigned Abbrev = 0) {
    if (!Abbrev) {
      // If we don't have an abbrev to use, emit this in its fully unabbreviated
      // form.
      auto Count = static_cast<uint32_t>(std::size(Vals));
      EmitCode(bitc::UNABBREV_RECORD);
      EmitVBR(Code, 6);
      EmitVBR(Count, 6);
      for (unsigned i = 0, e = Count; i != e; ++i)
        EmitVBR64(Vals[i], 6);
      return;
    }

    EmitRecordWithAbbrevImpl(Abbrev, ArrayRef(Vals), StringRef(), Code);
  }

  /// EmitRecordWithAbbrev - Emit a record with the specified abbreviation.
  /// Unlike EmitRecord, the code for the record should be included in Vals as
  /// the first entry.
  template <typename Container>
  void EmitRecordWithAbbrev(unsigned Abbrev, const Container &Vals) {
    EmitRecordWithAbbrevImpl(Abbrev, ArrayRef(Vals), StringRef(), std::nullopt);
  }

  /// EmitRecordWithBlob - Emit the specified record to the stream, using an
  /// abbrev that includes a blob at the end.  The blob data to emit is
  /// specified by the pointer and length specified at the end.  In contrast to
  /// EmitRecord, this routine expects that the first entry in Vals is the code
  /// of the record.
  template <typename Container>
  void EmitRecordWithBlob(unsigned Abbrev, const Container &Vals,
                          StringRef Blob) {
    EmitRecordWithAbbrevImpl(Abbrev, ArrayRef(Vals), Blob, std::nullopt);
  }
  template <typename Container>
  void EmitRecordWithBlob(unsigned Abbrev, const Container &Vals,
                          const char *BlobData, unsigned BlobLen) {
    return EmitRecordWithAbbrevImpl(Abbrev, ArrayRef(Vals),
                                    StringRef(BlobData, BlobLen), std::nullopt);
  }

  /// EmitRecordWithArray - Just like EmitRecordWithBlob, works with records
  /// that end with an array.
  template <typename Container>
  void EmitRecordWithArray(unsigned Abbrev, const Container &Vals,
                           StringRef Array) {
    EmitRecordWithAbbrevImpl(Abbrev, ArrayRef(Vals), Array, std::nullopt);
  }
  template <typename Container>
  void EmitRecordWithArray(unsigned Abbrev, const Container &Vals,
                           const char *ArrayData, unsigned ArrayLen) {
    return EmitRecordWithAbbrevImpl(
        Abbrev, ArrayRef(Vals), StringRef(ArrayData, ArrayLen), std::nullopt);
  }

  //===--------------------------------------------------------------------===//
  // Abbrev Emission
  //===--------------------------------------------------------------------===//

private:
  // Emit the abbreviation as a DEFINE_ABBREV record.
  void EncodeAbbrev(const BitCodeAbbrev &Abbv) {
    EmitCode(bitc::DEFINE_ABBREV);
    EmitVBR(Abbv.getNumOperandInfos(), 5);
    for (unsigned i = 0, e = static_cast<unsigned>(Abbv.getNumOperandInfos());
         i != e; ++i) {
      const BitCodeAbbrevOp &Op = Abbv.getOperandInfo(i);
      Emit(Op.isLiteral(), 1);
      if (Op.isLiteral()) {
        EmitVBR64(Op.getLiteralValue(), 8);
      } else {
        Emit(Op.getEncoding(), 3);
        if (Op.hasEncodingData())
          EmitVBR64(Op.getEncodingData(), 5);
      }
    }
  }
public:

  /// Emits the abbreviation \p Abbv to the stream.
  unsigned EmitAbbrev(std::shared_ptr<BitCodeAbbrev> Abbv) {
    EncodeAbbrev(*Abbv);
    CurAbbrevs.push_back(std::move(Abbv));
    return static_cast<unsigned>(CurAbbrevs.size())-1 +
      bitc::FIRST_APPLICATION_ABBREV;
  }

  //===--------------------------------------------------------------------===//
  // BlockInfo Block Emission
  //===--------------------------------------------------------------------===//

  /// EnterBlockInfoBlock - Start emitting the BLOCKINFO_BLOCK.
  void EnterBlockInfoBlock() {
    EnterSubblock(bitc::BLOCKINFO_BLOCK_ID, 2);
    BlockInfoCurBID = ~0U;
    BlockInfoRecords.clear();
  }
private:
  /// SwitchToBlockID - If we aren't already talking about the specified block
  /// ID, emit a BLOCKINFO_CODE_SETBID record.
  void SwitchToBlockID(unsigned BlockID) {
    if (BlockInfoCurBID == BlockID) return;
    SmallVector<unsigned, 2> V;
    V.push_back(BlockID);
    EmitRecord(bitc::BLOCKINFO_CODE_SETBID, V);
    BlockInfoCurBID = BlockID;
  }

  BlockInfo &getOrCreateBlockInfo(unsigned BlockID) {
    if (BlockInfo *BI = getBlockInfo(BlockID))
      return *BI;

    // Otherwise, add a new record.
    BlockInfoRecords.emplace_back();
    BlockInfoRecords.back().BlockID = BlockID;
    return BlockInfoRecords.back();
  }

public:

  /// EmitBlockInfoAbbrev - Emit a DEFINE_ABBREV record for the specified
  /// BlockID.
  unsigned EmitBlockInfoAbbrev(unsigned BlockID, std::shared_ptr<BitCodeAbbrev> Abbv) {
    SwitchToBlockID(BlockID);
    EncodeAbbrev(*Abbv);

    // Add the abbrev to the specified block record.
    BlockInfo &Info = getOrCreateBlockInfo(BlockID);
    Info.Abbrevs.push_back(std::move(Abbv));

    return Info.Abbrevs.size()-1+bitc::FIRST_APPLICATION_ABBREV;
  }
};


} // End llvm namespace

#endif
PKiwFZ7�፭�Bitstream/BitCodes.hnu�[���//===- BitCodes.h - Enum values for the bitstream format --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This header defines bitstream enum values.
//
// The enum values defined in this file should be considered permanent.  If
// new features are added, they should have values added at the end of the
// respective lists.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_BITSTREAM_BITCODES_H
#define LLVM_BITSTREAM_BITCODES_H

#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Bitstream/BitCodeEnums.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/ErrorHandling.h"
#include <cassert>

namespace llvm {
/// BitCodeAbbrevOp - This describes one or more operands in an abbreviation.
/// This is actually a union of two different things:
///   1. It could be a literal integer value ("the operand is always 17").
///   2. It could be an encoding specification ("this operand encoded like so").
///
class BitCodeAbbrevOp {
  uint64_t Val;           // A literal value or data for an encoding.
  bool IsLiteral : 1;     // Indicate whether this is a literal value or not.
  unsigned Enc   : 3;     // The encoding to use.
public:
  enum Encoding {
    Fixed = 1,  // A fixed width field, Val specifies number of bits.
    VBR   = 2,  // A VBR field where Val specifies the width of each chunk.
    Array = 3,  // A sequence of fields, next field species elt encoding.
    Char6 = 4,  // A 6-bit fixed field which maps to [a-zA-Z0-9._].
    Blob  = 5   // 32-bit aligned array of 8-bit characters.
  };

  static bool isValidEncoding(uint64_t E) {
    return E >= 1 && E <= 5;
  }

  explicit BitCodeAbbrevOp(uint64_t V) :  Val(V), IsLiteral(true) {}
  explicit BitCodeAbbrevOp(Encoding E, uint64_t Data = 0)
    : Val(Data), IsLiteral(false), Enc(E) {}

  bool isLiteral() const  { return IsLiteral; }
  bool isEncoding() const { return !IsLiteral; }

  // Accessors for literals.
  uint64_t getLiteralValue() const { assert(isLiteral()); return Val; }

  // Accessors for encoding info.
  Encoding getEncoding() const { assert(isEncoding()); return (Encoding)Enc; }
  uint64_t getEncodingData() const {
    assert(isEncoding() && hasEncodingData());
    return Val;
  }

  bool hasEncodingData() const { return hasEncodingData(getEncoding()); }
  static bool hasEncodingData(Encoding E) {
    switch (E) {
    case Fixed:
    case VBR:
      return true;
    case Array:
    case Char6:
    case Blob:
      return false;
    }
    report_fatal_error("Invalid encoding");
  }

  /// isChar6 - Return true if this character is legal in the Char6 encoding.
  static bool isChar6(char C) { return isAlnum(C) || C == '.' || C == '_'; }
  static unsigned EncodeChar6(char C) {
    if (C >= 'a' && C <= 'z') return C-'a';
    if (C >= 'A' && C <= 'Z') return C-'A'+26;
    if (C >= '0' && C <= '9') return C-'0'+26+26;
    if (C == '.')             return 62;
    if (C == '_')             return 63;
    llvm_unreachable("Not a value Char6 character!");
  }

  static char DecodeChar6(unsigned V) {
    assert((V & ~63) == 0 && "Not a Char6 encoded character!");
    return "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789._"
        [V];
  }

};

/// BitCodeAbbrev - This class represents an abbreviation record.  An
/// abbreviation allows a complex record that has redundancy to be stored in a
/// specialized format instead of the fully-general, fully-vbr, format.
class BitCodeAbbrev {
  SmallVector<BitCodeAbbrevOp, 32> OperandList;

public:
  BitCodeAbbrev() = default;

  explicit BitCodeAbbrev(std::initializer_list<BitCodeAbbrevOp> OperandList)
      : OperandList(OperandList) {}

  unsigned getNumOperandInfos() const {
    return static_cast<unsigned>(OperandList.size());
  }
  const BitCodeAbbrevOp &getOperandInfo(unsigned N) const {
    return OperandList[N];
  }

  void Add(const BitCodeAbbrevOp &OpInfo) {
    OperandList.push_back(OpInfo);
  }
};
} // namespace llvm

#endif
PKiwFZ�kuSSObjCopy/COFF/COFFObjcopy.hnu�[���//===- COFFObjcopy.h --------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJCOPY_COFF_COFFOBJCOPY_H
#define LLVM_OBJCOPY_COFF_COFFOBJCOPY_H

namespace llvm {
class Error;
class raw_ostream;

namespace object {
class COFFObjectFile;
} // end namespace object

namespace objcopy {
struct CommonConfig;
struct COFFConfig;

namespace coff {

/// Apply the transformations described by \p Config and \p COFFConfig
/// to \p In and writes the result into \p Out.
/// \returns any Error encountered whilst performing the operation.
Error executeObjcopyOnBinary(const CommonConfig &Config, const COFFConfig &,
                             object::COFFObjectFile &In, raw_ostream &Out);

} // end namespace coff
} // end namespace objcopy
} // end namespace llvm

#endif // LLVM_OBJCOPY_COFF_COFFOBJCOPY_H
PKiwFZ�nl44ObjCopy/COFF/COFFConfig.hnu�[���//===- COFFConfig.h ---------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJCOPY_COFF_COFFCONFIG_H
#define LLVM_OBJCOPY_COFF_COFFCONFIG_H

#include <optional>

namespace llvm {
namespace objcopy {

// Coff specific configuration for copying/stripping a single file.
struct COFFConfig {
  std::optional<unsigned> Subsystem;
  std::optional<unsigned> MajorSubsystemVersion;
  std::optional<unsigned> MinorSubsystemVersion;
};

} // namespace objcopy
} // namespace llvm

#endif // LLVM_OBJCOPY_COFF_COFFCONFIG_H
PKiwFZ�
���ObjCopy/ELF/ELFObjcopy.hnu�[���//===- ELFObjcopy.h ---------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJCOPY_ELF_ELFOBJCOPY_H
#define LLVM_OBJCOPY_ELF_ELFOBJCOPY_H

namespace llvm {
class Error;
class MemoryBuffer;
class raw_ostream;

namespace object {
class ELFObjectFileBase;
} // end namespace object

namespace objcopy {
struct CommonConfig;
struct ELFConfig;

namespace elf {
/// Apply the transformations described by \p Config and \p ELFConfig to
/// \p In, which must represent an IHex file, and writes the result
/// into \p Out.
/// \returns any Error encountered whilst performing the operation.
Error executeObjcopyOnIHex(const CommonConfig &Config,
                           const ELFConfig &ELFConfig, MemoryBuffer &In,
                           raw_ostream &Out);

/// Apply the transformations described by \p Config and \p ELFConfig to
/// \p In, which is treated as a raw binary input, and writes the result
/// into \p Out.
/// \returns any Error encountered whilst performing the operation.
Error executeObjcopyOnRawBinary(const CommonConfig &Config,
                                const ELFConfig &ELFConfig, MemoryBuffer &In,
                                raw_ostream &Out);

/// Apply the transformations described by \p Config and \p ELFConfig to
/// \p In and writes the result into \p Out.
/// \returns any Error encountered whilst performing the operation.
Error executeObjcopyOnBinary(const CommonConfig &Config,
                             const ELFConfig &ELFConfig,
                             object::ELFObjectFileBase &In, raw_ostream &Out);

} // end namespace elf
} // end namespace objcopy
} // end namespace llvm

#endif // LLVM_OBJCOPY_ELF_ELFOBJCOPY_H
PKiwFZ#ߏ3��ObjCopy/ELF/ELFConfig.hnu�[���//===- ELFConfig.h ----------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJCOPY_ELF_ELFCONFIG_H
#define LLVM_OBJCOPY_ELF_ELFCONFIG_H

#include "llvm/ADT/StringRef.h"
#include "llvm/Object/ELFTypes.h"
#include <vector>

namespace llvm {
namespace objcopy {

// ELF specific configuration for copying/stripping a single file.
struct ELFConfig {
  uint8_t NewSymbolVisibility = (uint8_t)ELF::STV_DEFAULT;

  // ELF entry point address expression. The input parameter is an entry point
  // address in the input ELF file. The entry address in the output file is
  // calculated with EntryExpr(input_address), when either --set-start or
  // --change-start is used.
  std::function<uint64_t(uint64_t)> EntryExpr;

  bool AllowBrokenLinks = false;
  bool KeepFileSymbols = false;
  bool LocalizeHidden = false;
};

} // namespace objcopy
} // namespace llvm

#endif // LLVM_OBJCOPY_ELF_ELFCONFIG_H
PKiwFZѩ�ϗ�ObjCopy/wasm/WasmConfig.hnu�[���//===- WasmConfig.h ---------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJCOPY_WASM_WASMCONFIG_H
#define LLVM_OBJCOPY_WASM_WASMCONFIG_H

namespace llvm {
namespace objcopy {

// Wasm specific configuration for copying/stripping a single file.
struct WasmConfig {};

} // namespace objcopy
} // namespace llvm

#endif // LLVM_OBJCOPY_WASM_WASMCONFIG_H
PKiwFZ�8!QQObjCopy/wasm/WasmObjcopy.hnu�[���//===- WasmObjcopy.h -------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJCOPY_WASM_WASMOBJCOPY_H
#define LLVM_OBJCOPY_WASM_WASMOBJCOPY_H

namespace llvm {
class Error;
class raw_ostream;

namespace object {
class WasmObjectFile;
} // end namespace object

namespace objcopy {
struct CommonConfig;
struct WasmConfig;

namespace wasm {
/// Apply the transformations described by \p Config and \p WasmConfig
/// to \p In and writes the result into \p Out.
/// \returns any Error encountered whilst performing the operation.
Error executeObjcopyOnBinary(const CommonConfig &Config, const WasmConfig &,
                             object::WasmObjectFile &In, raw_ostream &Out);

} // end namespace wasm
} // end namespace objcopy
} // end namespace llvm

#endif // LLVM_OBJCOPY_WASM_WASMOBJCOPY_H
PKiwFZ����ObjCopy/ObjCopy.hnu�[���//===- ObjCopy.h ------------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJCOPY_OBJCOPY_H
#define LLVM_OBJCOPY_OBJCOPY_H

#include "llvm/Support/Error.h"

namespace llvm {
class raw_ostream;

namespace object {
class Archive;
class Binary;
} // end namespace object

namespace objcopy {
class MultiFormatConfig;

/// Applies the transformations described by \p Config to
/// each member in archive \p Ar.
/// Writes a result in a file specified by \p Config.OutputFilename.
/// \returns any Error encountered whilst performing the operation.
Error executeObjcopyOnArchive(const MultiFormatConfig &Config,
                              const object::Archive &Ar);

/// Applies the transformations described by \p Config to \p In and writes
/// the result into \p Out. This function does the dispatch based on the
/// format of the input binary (COFF, ELF, MachO or wasm).
/// \returns any Error encountered whilst performing the operation.
Error executeObjcopyOnBinary(const MultiFormatConfig &Config,
                             object::Binary &In, raw_ostream &Out);

} // end namespace objcopy
} // end namespace llvm

#endif // LLVM_OBJCOPY_OBJCOPY_H
PKiwFZl�__ObjCopy/XCOFF/XCOFFObjcopy.hnu�[���//===- XCOFFObjcopy.h -------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJCOPY_XCOFF_XCOFFOBJCOPY_H
#define LLVM_OBJCOPY_XCOFF_XCOFFOBJCOPY_H

namespace llvm {
class Error;
class raw_ostream;

namespace object {
class XCOFFObjectFile;
} // end namespace object

namespace objcopy {
struct CommonConfig;
struct XCOFFConfig;

namespace xcoff {
/// Apply the transformations described by \p Config and \p XCOFFConfig
/// to \p In and writes the result into \p Out.
/// \returns any Error encountered whilst performing the operation.
Error executeObjcopyOnBinary(const CommonConfig &Config, const XCOFFConfig &,
                             object::XCOFFObjectFile &In, raw_ostream &Out);

} // end namespace xcoff
} // end namespace objcopy
} // end namespace llvm

#endif // LLVM_OBJCOPY_XCOFF_XCOFFOBJCOPY_H
PKiwFZD9�ğ�ObjCopy/XCOFF/XCOFFConfig.hnu�[���//===- XCOFFConfig.h --------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJCOPY_XCOFF_XCOFFCONFIG_H
#define LLVM_OBJCOPY_XCOFF_XCOFFCONFIG_H

namespace llvm {
namespace objcopy {

// XCOFF specific configuration for copying/stripping a single file.
struct XCOFFConfig {};

} // namespace objcopy
} // namespace llvm

#endif // LLVM_OBJCOPY_XCOFF_XCOFFCONFIG_H
PKiwFZ�3Af``ObjCopy/MachO/MachOConfig.hnu�[���//===- MachOConfig.h --------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJCOPY_MACHO_MACHOCONFIG_H
#define LLVM_OBJCOPY_MACHO_MACHOCONFIG_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/StringRef.h"
#include <optional>
#include <vector>

namespace llvm {
namespace objcopy {

// Mach-O specific configuration for copying/stripping a single file.
struct MachOConfig {
  // Repeated options
  std::vector<StringRef> RPathToAdd;
  std::vector<StringRef> RPathToPrepend;
  DenseMap<StringRef, StringRef> RPathsToUpdate;
  DenseMap<StringRef, StringRef> InstallNamesToUpdate;
  DenseSet<StringRef> RPathsToRemove;

  // install-name-tool's id option
  std::optional<StringRef> SharedLibId;

  // Segments to remove if they are empty
  DenseSet<StringRef> EmptySegmentsToRemove;

  // Boolean options
  bool StripSwiftSymbols = false;
  bool KeepUndefined = false;

  // install-name-tool's --delete_all_rpaths
  bool RemoveAllRpaths = false;
};

} // namespace objcopy
} // namespace llvm

#endif // LLVM_OBJCOPY_MACHO_MACHOCONFIG_H
PKiwFZF��		ObjCopy/MachO/MachOObjcopy.hnu�[���//===- MachOObjcopy.h -------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJCOPY_MACHO_MACHOOBJCOPY_H
#define LLVM_OBJCOPY_MACHO_MACHOOBJCOPY_H

namespace llvm {
class Error;
class raw_ostream;

namespace object {
class MachOObjectFile;
class MachOUniversalBinary;
} // end namespace object

namespace objcopy {
struct CommonConfig;
struct MachOConfig;
class MultiFormatConfig;

namespace macho {
/// Apply the transformations described by \p Config and \p MachOConfig to
/// \p In and writes the result into \p Out.
/// \returns any Error encountered whilst performing the operation.
Error executeObjcopyOnBinary(const CommonConfig &Config,
                             const MachOConfig &MachOConfig,
                             object::MachOObjectFile &In, raw_ostream &Out);

/// Apply the transformations described by \p Config and \p MachOConfig to
/// \p In and writes the result into \p Out.
/// \returns any Error encountered whilst performing the operation.
Error executeObjcopyOnMachOUniversalBinary(
    const MultiFormatConfig &Config, const object::MachOUniversalBinary &In,
    raw_ostream &Out);

} // end namespace macho
} // end namespace objcopy
} // end namespace llvm

#endif // LLVM_OBJCOPY_MACHO_MACHOOBJCOPY_H
PKiwFZ�B���ObjCopy/MultiFormatConfig.hnu�[���//===- MultiFormatConfig.h --------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJCOPY_MULTIFORMATCONFIG_H
#define LLVM_OBJCOPY_MULTIFORMATCONFIG_H

#include "llvm/Support/Error.h"

namespace llvm {
namespace objcopy {

struct CommonConfig;
struct ELFConfig;
struct COFFConfig;
struct MachOConfig;
struct WasmConfig;
struct XCOFFConfig;

class MultiFormatConfig {
public:
  virtual ~MultiFormatConfig() {}

  virtual const CommonConfig &getCommonConfig() const = 0;
  virtual Expected<const ELFConfig &> getELFConfig() const = 0;
  virtual Expected<const COFFConfig &> getCOFFConfig() const = 0;
  virtual Expected<const MachOConfig &> getMachOConfig() const = 0;
  virtual Expected<const WasmConfig &> getWasmConfig() const = 0;
  virtual Expected<const XCOFFConfig &> getXCOFFConfig() const = 0;
};

} // namespace objcopy
} // namespace llvm

#endif // LLVM_OBJCOPY_MULTIFORMATCONFIG_H
PKiwFZ���UUObjCopy/CommonConfig.hnu�[���//===- CommonConfig.h -------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJCOPY_COMMONCONFIG_H
#define LLVM_OBJCOPY_COMMONCONFIG_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/CachedHashString.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Object/ELFTypes.h"
#include "llvm/Support/GlobPattern.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Regex.h"
// Necessary for llvm::DebugCompressionType::None
#include "llvm/Target/TargetOptions.h"
#include <optional>
#include <vector>

namespace llvm {
namespace objcopy {

enum class FileFormat {
  Unspecified,
  ELF,
  Binary,
  IHex,
};

// This type keeps track of the machine info for various architectures. This
// lets us map architecture names to ELF types and the e_machine value of the
// ELF file.
struct MachineInfo {
  MachineInfo(uint16_t EM, uint8_t ABI, bool Is64, bool IsLittle)
      : EMachine(EM), OSABI(ABI), Is64Bit(Is64), IsLittleEndian(IsLittle) {}
  // Alternative constructor that defaults to NONE for OSABI.
  MachineInfo(uint16_t EM, bool Is64, bool IsLittle)
      : MachineInfo(EM, ELF::ELFOSABI_NONE, Is64, IsLittle) {}
  // Default constructor for unset fields.
  MachineInfo() : MachineInfo(0, 0, false, false) {}
  uint16_t EMachine;
  uint8_t OSABI;
  bool Is64Bit;
  bool IsLittleEndian;
};

// Flags set by --set-section-flags or --rename-section. Interpretation of these
// is format-specific and not all flags are meaningful for all object file
// formats. This is a bitmask; many section flags may be set.
enum SectionFlag {
  SecNone = 0,
  SecAlloc = 1 << 0,
  SecLoad = 1 << 1,
  SecNoload = 1 << 2,
  SecReadonly = 1 << 3,
  SecDebug = 1 << 4,
  SecCode = 1 << 5,
  SecData = 1 << 6,
  SecRom = 1 << 7,
  SecMerge = 1 << 8,
  SecStrings = 1 << 9,
  SecContents = 1 << 10,
  SecShare = 1 << 11,
  SecExclude = 1 << 12,
  LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/SecExclude)
};

struct SectionRename {
  StringRef OriginalName;
  StringRef NewName;
  std::optional<SectionFlag> NewFlags;
};

struct SectionFlagsUpdate {
  StringRef Name;
  SectionFlag NewFlags;
};

enum class DiscardType {
  None,   // Default
  All,    // --discard-all (-x)
  Locals, // --discard-locals (-X)
};

enum class MatchStyle {
  Literal,  // Default for symbols.
  Wildcard, // Default for sections, or enabled with --wildcard (-w).
  Regex,    // Enabled with --regex.
};

class NameOrPattern {
  StringRef Name;
  // Regex is shared between multiple CommonConfig instances.
  std::shared_ptr<Regex> R;
  std::shared_ptr<GlobPattern> G;
  bool IsPositiveMatch = true;

  NameOrPattern(StringRef N) : Name(N) {}
  NameOrPattern(std::shared_ptr<Regex> R) : R(R) {}
  NameOrPattern(std::shared_ptr<GlobPattern> G, bool IsPositiveMatch)
      : G(G), IsPositiveMatch(IsPositiveMatch) {}

public:
  // ErrorCallback is used to handle recoverable errors. An Error returned
  // by the callback aborts the parsing and is then returned by this function.
  static Expected<NameOrPattern>
  create(StringRef Pattern, MatchStyle MS,
         llvm::function_ref<Error(Error)> ErrorCallback);

  bool isPositiveMatch() const { return IsPositiveMatch; }
  std::optional<StringRef> getName() const {
    if (!R && !G)
      return Name;
    return std::nullopt;
  }
  bool operator==(StringRef S) const {
    return R ? R->match(S) : G ? G->match(S) : Name == S;
  }
  bool operator!=(StringRef S) const { return !operator==(S); }
};

// Matcher that checks symbol or section names against the command line flags
// provided for that option.
class NameMatcher {
  DenseSet<CachedHashStringRef> PosNames;
  std::vector<NameOrPattern> PosPatterns;
  std::vector<NameOrPattern> NegMatchers;

public:
  Error addMatcher(Expected<NameOrPattern> Matcher) {
    if (!Matcher)
      return Matcher.takeError();
    if (Matcher->isPositiveMatch()) {
      if (std::optional<StringRef> MaybeName = Matcher->getName())
        PosNames.insert(CachedHashStringRef(*MaybeName));
      else
        PosPatterns.push_back(std::move(*Matcher));
    } else {
      NegMatchers.push_back(std::move(*Matcher));
    }
    return Error::success();
  }
  bool matches(StringRef S) const {
    return (PosNames.contains(CachedHashStringRef(S)) ||
            is_contained(PosPatterns, S)) &&
           !is_contained(NegMatchers, S);
  }
  bool empty() const {
    return PosNames.empty() && PosPatterns.empty() && NegMatchers.empty();
  }
};

enum class SymbolFlag {
  Global,
  Local,
  Weak,
  Default,
  Hidden,
  Protected,
  File,
  Section,
  Object,
  Function,
  IndirectFunction,
  Debug,
  Constructor,
  Warning,
  Indirect,
  Synthetic,
  UniqueObject,
};

// Symbol info specified by --add-symbol option. Symbol flags not supported
// by a concrete format should be ignored.
struct NewSymbolInfo {
  StringRef SymbolName;
  StringRef SectionName;
  uint64_t Value = 0;
  std::vector<SymbolFlag> Flags;
  std::vector<StringRef> BeforeSyms;
};

// Specify section name and section body for newly added or updated section.
struct NewSectionInfo {
  NewSectionInfo() = default;
  NewSectionInfo(StringRef Name, std::unique_ptr<MemoryBuffer> &&Buffer)
      : SectionName(Name), SectionData(std::move(Buffer)) {}

  StringRef SectionName;
  std::shared_ptr<MemoryBuffer> SectionData;
};

// Configuration for copying/stripping a single file.
struct CommonConfig {
  // Main input/output options
  StringRef InputFilename;
  FileFormat InputFormat = FileFormat::Unspecified;
  StringRef OutputFilename;
  FileFormat OutputFormat = FileFormat::Unspecified;

  // Only applicable when --output-format!=binary (e.g. elf64-x86-64).
  std::optional<MachineInfo> OutputArch;

  // Advanced options
  StringRef AddGnuDebugLink;
  // Cached gnu_debuglink's target CRC
  uint32_t GnuDebugLinkCRC32;
  std::optional<StringRef> ExtractPartition;
  StringRef SplitDWO;
  StringRef SymbolsPrefix;
  StringRef AllocSectionsPrefix;
  DiscardType DiscardMode = DiscardType::None;

  // Repeated options
  std::vector<NewSectionInfo> AddSection;
  std::vector<StringRef> DumpSection;
  std::vector<NewSectionInfo> UpdateSection;

  // Section matchers
  NameMatcher KeepSection;
  NameMatcher OnlySection;
  NameMatcher ToRemove;

  // Symbol matchers
  NameMatcher SymbolsToGlobalize;
  NameMatcher SymbolsToKeep;
  NameMatcher SymbolsToLocalize;
  NameMatcher SymbolsToRemove;
  NameMatcher UnneededSymbolsToRemove;
  NameMatcher SymbolsToWeaken;
  NameMatcher SymbolsToKeepGlobal;

  // Map options
  StringMap<SectionRename> SectionsToRename;
  StringMap<uint64_t> SetSectionAlignment;
  StringMap<SectionFlagsUpdate> SetSectionFlags;
  StringMap<uint64_t> SetSectionType;
  StringMap<StringRef> SymbolsToRename;

  // Symbol info specified by --add-symbol option.
  std::vector<NewSymbolInfo> SymbolsToAdd;

  // Boolean options
  bool DeterministicArchives = true;
  bool ExtractDWO = false;
  bool ExtractMainPartition = false;
  bool OnlyKeepDebug = false;
  bool PreserveDates = false;
  bool StripAll = false;
  bool StripAllGNU = false;
  bool StripDWO = false;
  bool StripDebug = false;
  bool StripNonAlloc = false;
  bool StripSections = false;
  bool StripUnneeded = false;
  bool Weaken = false;
  bool DecompressDebugSections = false;

  DebugCompressionType CompressionType = DebugCompressionType::None;
};

} // namespace objcopy
} // namespace llvm

#endif // LLVM_OBJCOPY_COMMONCONFIG_H
PKiwFZ.+6!��ObjCopy/ConfigManager.hnu�[���//===- ConfigManager.h ------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJCOPY_CONFIGMANAGER_H
#define LLVM_OBJCOPY_CONFIGMANAGER_H

#include "llvm/ObjCopy/COFF/COFFConfig.h"
#include "llvm/ObjCopy/CommonConfig.h"
#include "llvm/ObjCopy/ELF/ELFConfig.h"
#include "llvm/ObjCopy/MachO/MachOConfig.h"
#include "llvm/ObjCopy/MultiFormatConfig.h"
#include "llvm/ObjCopy/wasm/WasmConfig.h"
#include "llvm/ObjCopy/XCOFF/XCOFFConfig.h"

namespace llvm {
namespace objcopy {

struct ConfigManager : public MultiFormatConfig {
  virtual ~ConfigManager() {}

  const CommonConfig &getCommonConfig() const override { return Common; }

  Expected<const ELFConfig &> getELFConfig() const override { return ELF; }

  Expected<const COFFConfig &> getCOFFConfig() const override;

  Expected<const MachOConfig &> getMachOConfig() const override;

  Expected<const WasmConfig &> getWasmConfig() const override;

  Expected<const XCOFFConfig &> getXCOFFConfig() const override;

  // All configs.
  CommonConfig Common;
  ELFConfig ELF;
  COFFConfig COFF;
  MachOConfig MachO;
  WasmConfig Wasm;
  XCOFFConfig XCOFF;
};

} // namespace objcopy
} // namespace llvm

#endif // LLVM_OBJCOPY_CONFIGMANAGER_H
PKiwFZ9w�		Config/Disassemblers.defnu�[���/*===- llvm/Config/Disassemblers.def - LLVM Assembly Parsers ----*- C++ -*-===*\
|*                                                                            *|
|* Part of the LLVM Project, under the Apache License v2.0 with LLVM          *|
|* Exceptions.                                                                *|
|* See https://llvm.org/LICENSE.txt for license information.                  *|
|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception                    *|
|*                                                                            *|
|*===----------------------------------------------------------------------===*|
|*                                                                            *|
|* This file enumerates all of the assembly-language parsers                  *|
|* supported by this build of LLVM. Clients of this file should define        *|
|* the LLVM_DISASSEMBLER macro to be a function-like macro with a             *|
|* single parameter (the name of the target whose assembly can be             *|
|* generated); including this file will then enumerate all of the             *|
|* targets with assembly parsers.                                             *|
|*                                                                            *|
|* The set of targets supported by LLVM is generated at configuration         *|
|* time, at which point this header is generated. Do not modify this          *|
|* header directly.                                                           *|
|*                                                                            *|
\*===----------------------------------------------------------------------===*/

#ifndef LLVM_DISASSEMBLER
#  error Please define the macro LLVM_DISASSEMBLER(TargetName)
#endif

LLVM_DISASSEMBLER(X86)
LLVM_DISASSEMBLER(AMDGPU)
LLVM_DISASSEMBLER(PowerPC)
LLVM_DISASSEMBLER(SystemZ)
LLVM_DISASSEMBLER(AArch64)
LLVM_DISASSEMBLER(ARM)
LLVM_DISASSEMBLER(Mips)
LLVM_DISASSEMBLER(BPF)
LLVM_DISASSEMBLER(WebAssembly)


#undef LLVM_DISASSEMBLER
PKiwFZ`��kkConfig/TargetExegesis.defnu�[���/*===----- llvm/Config/TargetExegesis.def - LLVM Target Exegesis-*- C++ -*-===*\
|*                                                                            *|
|* Part of the LLVM Project, under the Apache License v2.0 with LLVM          *|
|* Exceptions.                                                                *|
|* See https://llvm.org/LICENSE.txt for license information.                  *|
|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception                    *|
|*                                                                            *|
|*===----------------------------------------------------------------------===*|
|*                                                                            *|
|* This file enumerates all of the target's of llvm-exegesis                  *|
|* supported by this build of LLVM. Clients of this file should define        *|
|* the LLVM_EXEGISIS macro to be a function-like macro with a                 *|
|* single parameter (the name of the target whose assembly can be             *|
|* generated); including this file will then enumerate all of the             *|
|* targets with target llvm-exegsis support.                                  *|
|*                                                                            *|
|* The set of targets supported by LLVM is generated at configuration         *|
|* time, at which point this header is generated. Do not modify this          *|
|* header directly.                                                           *|
|*                                                                            *|
\*===----------------------------------------------------------------------===*/

#ifndef LLVM_EXEGESIS
#  error Please define the macro LLVM_EXEGESIS(TargetName)
#endif

LLVM_EXEGESIS(X86)
LLVM_EXEGESIS(PowerPC)
LLVM_EXEGESIS(AArch64)
LLVM_EXEGESIS(Mips)


#undef LLVM_EXEGESIS
PKiwFZlpm�$$Config/llvm-config-x86_64.hnu�[���/*===------- llvm/Config/llvm-config.h - llvm configuration -------*- C -*-===*/
/*                                                                            */
/* Part of the LLVM Project, under the Apache License v2.0 with LLVM          */
/* Exceptions.                                                                */
/* See https://llvm.org/LICENSE.txt for license information.                  */
/* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception                    */
/*                                                                            */
/*===----------------------------------------------------------------------===*/

/* This file enumerates variables from the LLVM configuration so that they
   can be in exported headers and won't override package specific directives.
   This is a C header that can be included in the llvm-c headers. */

#ifndef LLVM_CONFIG_H
#define LLVM_CONFIG_H

/* Define if LLVM_ENABLE_DUMP is enabled */
/* #undef LLVM_ENABLE_DUMP */

/* Target triple LLVM will generate code for by default */
/* Doesn't use `cmakedefine` because it is allowed to be empty. */
#define LLVM_DEFAULT_TARGET_TRIPLE "x86_64-redhat-linux-gnu"

/* Define if threads enabled */
#define LLVM_ENABLE_THREADS 1

/* Has gcc/MSVC atomic intrinsics */
#define LLVM_HAS_ATOMICS 1

/* Host triple LLVM will be executed on */
#define LLVM_HOST_TRIPLE "x86_64-unknown-linux-gnu"

/* LLVM architecture name for the native architecture, if available */
#define LLVM_NATIVE_ARCH X86

/* LLVM name for the native AsmParser init function, if available */
#define LLVM_NATIVE_ASMPARSER LLVMInitializeX86AsmParser

/* LLVM name for the native AsmPrinter init function, if available */
#define LLVM_NATIVE_ASMPRINTER LLVMInitializeX86AsmPrinter

/* LLVM name for the native Disassembler init function, if available */
#define LLVM_NATIVE_DISASSEMBLER LLVMInitializeX86Disassembler

/* LLVM name for the native Target init function, if available */
#define LLVM_NATIVE_TARGET LLVMInitializeX86Target

/* LLVM name for the native TargetInfo init function, if available */
#define LLVM_NATIVE_TARGETINFO LLVMInitializeX86TargetInfo

/* LLVM name for the native target MC init function, if available */
#define LLVM_NATIVE_TARGETMC LLVMInitializeX86TargetMC

/* LLVM name for the native target MCA init function, if available */
/* #undef LLVM_NATIVE_TARGETMCA */

/* Define if this is Unixish platform */
#define LLVM_ON_UNIX 1

/* Define if we have the Intel JIT API runtime support library */
#define LLVM_USE_INTEL_JITEVENTS 0

/* Define if we have the oprofile JIT-support library */
#define LLVM_USE_OPROFILE 0

/* Define if we have the perf JIT-support library */
#define LLVM_USE_PERF 1

/* Major version of the LLVM API */
#define LLVM_VERSION_MAJOR 17

/* Minor version of the LLVM API */
#define LLVM_VERSION_MINOR 0

/* Patch version of the LLVM API */
#define LLVM_VERSION_PATCH 6

/* LLVM version string */
#define LLVM_VERSION_STRING "17.0.6"

/* Whether LLVM records statistics for use with GetStatistics(),
 * PrintStatistics() or PrintStatisticsJSON()
 */
#define LLVM_FORCE_ENABLE_STATS 0

/* Define if we have z3 and want to build it */
/* #undef LLVM_WITH_Z3 */

/* Define if we have curl and want to use it */
/* #undef LLVM_ENABLE_CURL */

/* Define if we have cpp-httplib and want to use it */
/* #undef LLVM_ENABLE_HTTPLIB */

/* Define if zlib compression is available */
#define LLVM_ENABLE_ZLIB 1

/* Define if zstd compression is available */
#define LLVM_ENABLE_ZSTD 0

/* Define if LLVM is using tflite instead of libtensorflow */
/* #undef LLVM_HAVE_TFLITE */

/* Define to 1 if you have the <sysexits.h> header file. */
#define HAVE_SYSEXITS_H 1

/* Define if the xar_open() function is supported on this platform. */
/* #undef LLVM_HAVE_LIBXAR */

/* Define if building libLLVM shared library */
#define LLVM_BUILD_LLVM_DYLIB

/* Define if building LLVM with BUILD_SHARED_LIBS */
/* #undef LLVM_BUILD_SHARED_LIBS */

/* Define if building LLVM with LLVM_FORCE_USE_OLD_TOOLCHAIN_LIBS */
/* #undef LLVM_FORCE_USE_OLD_TOOLCHAIN */

/* Define if llvm_unreachable should be optimized with undefined behavior
 * in non assert builds */
#define LLVM_UNREACHABLE_OPTIMIZE 1

/* Define to 1 if you have the DIA SDK installed, and to 0 if you don't. */
#define LLVM_ENABLE_DIA_SDK 0

/* Define if plugins enabled */
#define LLVM_ENABLE_PLUGINS

#endif
PKiwFZ�>���	�	Config/abi-breaking.hnu�[���/*===------- llvm/Config/abi-breaking.h - llvm configuration -------*- C -*-===*/
/*                                                                            */
/* Part of the LLVM Project, under the Apache License v2.0 with LLVM          */
/* Exceptions.                                                                */
/* See https://llvm.org/LICENSE.txt for license information.                  */
/* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception                    */
/*                                                                            */
/*===----------------------------------------------------------------------===*/

/* This file controls the C++ ABI break introduced in LLVM public header. */

#ifndef LLVM_ABI_BREAKING_CHECKS_H
#define LLVM_ABI_BREAKING_CHECKS_H

/* Define to enable checks that alter the LLVM C++ ABI */
#define LLVM_ENABLE_ABI_BREAKING_CHECKS 0

/* Define to enable reverse iteration of unordered llvm containers */
#define LLVM_ENABLE_REVERSE_ITERATION 0

/* Allow selectively disabling link-time mismatch checking so that header-only
   ADT content from LLVM can be used without linking libSupport. */
#if !defined(LLVM_DISABLE_ABI_BREAKING_CHECKS_ENFORCING) || !LLVM_DISABLE_ABI_BREAKING_CHECKS_ENFORCING

// ABI_BREAKING_CHECKS protection: provides link-time failure when clients build
// mismatch with LLVM
#if defined(_MSC_VER)
// Use pragma with MSVC
#define LLVM_XSTR(s) LLVM_STR(s)
#define LLVM_STR(s) #s
#pragma detect_mismatch("LLVM_ENABLE_ABI_BREAKING_CHECKS", LLVM_XSTR(LLVM_ENABLE_ABI_BREAKING_CHECKS))
#undef LLVM_XSTR
#undef LLVM_STR
#elif defined(_WIN32) || defined(__CYGWIN__) // Win32 w/o #pragma detect_mismatch
// FIXME: Implement checks without weak.
#elif defined(__cplusplus)
#if !(defined(_AIX) && defined(__GNUC__) && !defined(__clang__))
#define LLVM_HIDDEN_VISIBILITY __attribute__ ((visibility("hidden")))
#else
// GCC on AIX does not support visibility attributes. Symbols are not
// exported by default on AIX.
#define LLVM_HIDDEN_VISIBILITY
#endif
namespace llvm {
#if LLVM_ENABLE_ABI_BREAKING_CHECKS
extern int EnableABIBreakingChecks;
LLVM_HIDDEN_VISIBILITY
__attribute__((weak)) int *VerifyEnableABIBreakingChecks =
    &EnableABIBreakingChecks;
#else
extern int DisableABIBreakingChecks;
LLVM_HIDDEN_VISIBILITY
__attribute__((weak)) int *VerifyDisableABIBreakingChecks =
    &DisableABIBreakingChecks;
#endif
}
#undef LLVM_HIDDEN_VISIBILITY
#endif // _MSC_VER

#endif // LLVM_DISABLE_ABI_BREAKING_CHECKS_ENFORCING

#endif
PKiwFZ��y���Config/AsmParsers.defnu�[���/*===- llvm/Config/AsmParsers.def - LLVM Assembly Parsers -------*- C++ -*-===*\
|*                                                                            *|
|* Part of the LLVM Project, under the Apache License v2.0 with LLVM          *|
|* Exceptions.                                                                *|
|* See https://llvm.org/LICENSE.txt for license information.                  *|
|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception                    *|
|*                                                                            *|
|*===----------------------------------------------------------------------===*|
|*                                                                            *|
|* This file enumerates all of the assembly-language parsers                  *|
|* supported by this build of LLVM. Clients of this file should define        *|
|* the LLVM_ASM_PARSER macro to be a function-like macro with a               *|
|* single parameter (the name of the target whose assembly can be             *|
|* generated); including this file will then enumerate all of the             *|
|* targets with assembly parsers.                                             *|
|*                                                                            *|
|* The set of targets supported by LLVM is generated at configuration         *|
|* time, at which point this header is generated. Do not modify this          *|
|* header directly.                                                           *|
|*                                                                            *|
\*===----------------------------------------------------------------------===*/

#ifndef LLVM_ASM_PARSER
#  error Please define the macro LLVM_ASM_PARSER(TargetName)
#endif

LLVM_ASM_PARSER(X86)
LLVM_ASM_PARSER(AMDGPU)
LLVM_ASM_PARSER(PowerPC)
LLVM_ASM_PARSER(SystemZ)
LLVM_ASM_PARSER(AArch64)
LLVM_ASM_PARSER(ARM)
LLVM_ASM_PARSER(Mips)
LLVM_ASM_PARSER(BPF)
LLVM_ASM_PARSER(WebAssembly)


#undef LLVM_ASM_PARSER
PKiwFZ;����Config/Targets.defnu�[���/*===- llvm/Config/Targets.def - LLVM Target Architectures ------*- C++ -*-===*\
|*                                                                            *|
|* Part of the LLVM Project, under the Apache License v2.0 with LLVM          *|
|* Exceptions.                                                                *|
|* See https://llvm.org/LICENSE.txt for license information.                  *|
|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception                    *|
|*                                                                            *|
|*===----------------------------------------------------------------------===*|
|*                                                                            *|
|* This file enumerates all of the target architectures supported by          *|
|* this build of LLVM. Clients of this file should define the                 *|
|* LLVM_TARGET macro to be a function-like macro with a single                *|
|* parameter (the name of the target); including this file will then          *|
|* enumerate all of the targets.                                              *|
|*                                                                            *|
|* The set of targets supported by LLVM is generated at configuration         *|
|* time, at which point this header is generated. Do not modify this          *|
|* header directly.                                                           *|
|*                                                                            *|
\*===----------------------------------------------------------------------===*/

#ifndef LLVM_TARGET
#  error Please define the macro LLVM_TARGET(TargetName)
#endif

LLVM_TARGET(X86)
LLVM_TARGET(AMDGPU)
LLVM_TARGET(PowerPC)
LLVM_TARGET(NVPTX)
LLVM_TARGET(SystemZ)
LLVM_TARGET(AArch64)
LLVM_TARGET(ARM)
LLVM_TARGET(Mips)
LLVM_TARGET(BPF)
LLVM_TARGET(WebAssembly)


#undef LLVM_TARGET
PKiwFZ�b���Config/llvm-config.hnu�[���/*
 * Kluge to support multilib installation of both 32- and 64-bit RPMS:
 * we need to arrange that header files that appear in both RPMs are
 * identical.  Hence, this file is architecture-independent and calls
 * in an arch-dependent file that will appear in just one RPM.
 *
 * To avoid breaking arches not explicitly supported by Red Hat, we
 * use this indirection file *only* on known multilib arches.
 *
 * We pay attention to include _only_ the original multilib-unclean
 * header file.  Including any other system-header file could cause
 * unpredictable include-ordering issues (rhbz#1412274, comment #16).
 *
 * Note: this may well fail if user tries to use gcc's -I- option.
 * But that option is deprecated anyway.
 */
#if defined(__x86_64__)
#include "llvm-config-x86_64.h"
#elif defined(__i386__)
#include "llvm-config-i386.h"
#elif defined(__ppc64__) || defined(__powerpc64__)
#include "llvm-config-ppc64.h"
#elif defined(__ppc__) || defined(__powerpc__)
#include "llvm-config-ppc.h"
#elif defined(__s390x__)
#include "llvm-config-s390x.h"
#elif defined(__s390__)
#include "llvm-config-s390.h"
#elif defined(__sparc__) && defined(__arch64__)
#include "llvm-config-sparc64.h"
#elif defined(__sparc__)
#include "llvm-config-sparc.h"
#endif
PKiwFZH�Config/AsmPrinters.defnu�[���/*===- llvm/Config/AsmPrinters.def - LLVM Assembly Printers -----*- C++ -*-===*\
|*                                                                            *|
|* Part of the LLVM Project, under the Apache License v2.0 with LLVM          *|
|* Exceptions.                                                                *|
|* See https://llvm.org/LICENSE.txt for license information.                  *|
|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception                    *|
|*                                                                            *|
|*===----------------------------------------------------------------------===*|
|*                                                                            *|
|* This file enumerates all of the assembly-language printers                 *|
|* supported by this build of LLVM. Clients of this file should define        *|
|* the LLVM_ASM_PRINTER macro to be a function-like macro with a              *|
|* single parameter (the name of the target whose assembly can be             *|
|* generated); including this file will then enumerate all of the             *|
|* targets with assembly printers.                                            *|
|*                                                                            *|
|* The set of targets supported by LLVM is generated at configuration         *|
|* time, at which point this header is generated. Do not modify this          *|
|* header directly.                                                           *|
|*                                                                            *|
\*===----------------------------------------------------------------------===*/

#ifndef LLVM_ASM_PRINTER
#  error Please define the macro LLVM_ASM_PRINTER(TargetName)
#endif

LLVM_ASM_PRINTER(X86)
LLVM_ASM_PRINTER(AMDGPU)
LLVM_ASM_PRINTER(PowerPC)
LLVM_ASM_PRINTER(NVPTX)
LLVM_ASM_PRINTER(SystemZ)
LLVM_ASM_PRINTER(AArch64)
LLVM_ASM_PRINTER(ARM)
LLVM_ASM_PRINTER(Mips)
LLVM_ASM_PRINTER(BPF)
LLVM_ASM_PRINTER(WebAssembly)


#undef LLVM_ASM_PRINTER
PKiwFZ��w..Config/TargetMCAs.defnu�[���/*===------ llvm/Config/TargetMCAs.def - LLVM Target MCAs -------*- C++ -*-===*\
|*                                                                            *|
|* Part of the LLVM Project, under the Apache License v2.0 with LLVM          *|
|* Exceptions.                                                                *|
|* See https://llvm.org/LICENSE.txt for license information.                  *|
|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception                    *|
|*                                                                            *|
|*===----------------------------------------------------------------------===*|
|*                                                                            *|
|* This file enumerates all of the target MCAs                  *|
|* supported by this build of LLVM. Clients of this file should define        *|
|* the LLVM_TARGETMCA macro to be a function-like macro with a             *|
|* single parameter (the name of the target whose assembly can be             *|
|* generated); including this file will then enumerate all of the             *|
|* targets with target MCAs.                                             *|
|*                                                                            *|
|* The set of targets supported by LLVM is generated at configuration         *|
|* time, at which point this header is generated. Do not modify this          *|
|* header directly.                                                           *|
|*                                                                            *|
\*===----------------------------------------------------------------------===*/

#ifndef LLVM_TARGETMCA
#  error Please define the macro LLVM_TARGETMCA(TargetName)
#endif

LLVM_TARGETMCA(X86)
LLVM_TARGETMCA(AMDGPU)


#undef LLVM_TARGETMCA
PKiwFZ�_�Linker/Linker.hnu�[���//===- Linker.h - Module Linker Interface -----------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_LINKER_LINKER_H
#define LLVM_LINKER_LINKER_H

#include "llvm/ADT/StringSet.h"
#include "llvm/Linker/IRMover.h"

namespace llvm {
class Module;

/// This class provides the core functionality of linking in LLVM. It keeps a
/// pointer to the merged module so far. It doesn't take ownership of the
/// module since it is assumed that the user of this class will want to do
/// something with it after the linking.
class Linker {
  IRMover Mover;

public:
  enum Flags {
    None = 0,
    OverrideFromSrc = (1 << 0),
    LinkOnlyNeeded = (1 << 1),
  };

  Linker(Module &M);

  /// Link \p Src into the composite.
  ///
  /// Passing OverrideSymbols as true will have symbols from Src
  /// shadow those in the Dest.
  ///
  /// Passing InternalizeCallback will have the linker call the function with
  /// the new module and a list of global value names to be internalized by the
  /// callback.
  ///
  /// Returns true on error.
  bool linkInModule(std::unique_ptr<Module> Src, unsigned Flags = Flags::None,
                    std::function<void(Module &, const StringSet<> &)>
                        InternalizeCallback = {});

  static bool linkModules(Module &Dest, std::unique_ptr<Module> Src,
                          unsigned Flags = Flags::None,
                          std::function<void(Module &, const StringSet<> &)>
                              InternalizeCallback = {});
};

} // End llvm namespace

#endif
PKiwFZ��X�TTLinker/IRMover.hnu�[���//===- IRMover.h ------------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_LINKER_IRMOVER_H
#define LLVM_LINKER_IRMOVER_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/FunctionExtras.h"
#include <functional>

namespace llvm {
class Error;
class GlobalValue;
class Metadata;
class Module;
class StructType;
class TrackingMDRef;
class Type;

class IRMover {
  struct StructTypeKeyInfo {
    struct KeyTy {
      ArrayRef<Type *> ETypes;
      bool IsPacked;
      KeyTy(ArrayRef<Type *> E, bool P);
      KeyTy(const StructType *ST);
      bool operator==(const KeyTy &that) const;
      bool operator!=(const KeyTy &that) const;
    };
    static StructType *getEmptyKey();
    static StructType *getTombstoneKey();
    static unsigned getHashValue(const KeyTy &Key);
    static unsigned getHashValue(const StructType *ST);
    static bool isEqual(const KeyTy &LHS, const StructType *RHS);
    static bool isEqual(const StructType *LHS, const StructType *RHS);
  };

  /// Type of the Metadata map in \a ValueToValueMapTy.
  typedef DenseMap<const Metadata *, TrackingMDRef> MDMapT;

public:
  class IdentifiedStructTypeSet {
    // The set of opaque types is the composite module.
    DenseSet<StructType *> OpaqueStructTypes;

    // The set of identified but non opaque structures in the composite module.
    DenseSet<StructType *, StructTypeKeyInfo> NonOpaqueStructTypes;

  public:
    void addNonOpaque(StructType *Ty);
    void switchToNonOpaque(StructType *Ty);
    void addOpaque(StructType *Ty);
    StructType *findNonOpaque(ArrayRef<Type *> ETypes, bool IsPacked);
    bool hasType(StructType *Ty);
  };

  IRMover(Module &M);

  typedef std::function<void(GlobalValue &)> ValueAdder;
  using LazyCallback =
      llvm::unique_function<void(GlobalValue &GV, ValueAdder Add)>;

  /// Move in the provide values in \p ValuesToLink from \p Src.
  ///
  /// - \p AddLazyFor is a call back that the IRMover will call when a global
  ///   value is referenced by one of the ValuesToLink (transitively) but was
  ///   not present in ValuesToLink. The GlobalValue and a ValueAdder callback
  ///   are passed as an argument, and the callback is expected to be called
  ///   if the GlobalValue needs to be added to the \p ValuesToLink and linked.
  ///   Pass nullptr if there's no work to be done in such cases.
  /// - \p IsPerformingImport is true when this IR link is to perform ThinLTO
  ///   function importing from Src.
  Error move(std::unique_ptr<Module> Src, ArrayRef<GlobalValue *> ValuesToLink,
             LazyCallback AddLazyFor, bool IsPerformingImport);
  Module &getModule() { return Composite; }

private:
  Module &Composite;
  IdentifiedStructTypeSet IdentifiedStructTypes;
  MDMapT SharedMDs; ///< A Metadata map to use for all calls to \a move().
};

} // End llvm namespace

#endif
PKiwFZـ�y��MCA/InstrBuilder.hnu�[���//===--------------------- InstrBuilder.h -----------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// A builder class for instructions that are statically analyzed by llvm-mca.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MCA_INSTRBUILDER_H
#define LLVM_MCA_INSTRBUILDER_H

#include "llvm/ADT/STLExtras.h"
#include "llvm/MC/MCInstrAnalysis.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/MCA/CustomBehaviour.h"
#include "llvm/MCA/Instruction.h"
#include "llvm/MCA/Support.h"
#include "llvm/Support/Error.h"

namespace llvm {
namespace mca {

class RecycledInstErr : public ErrorInfo<RecycledInstErr> {
  Instruction *RecycledInst;

public:
  static char ID;

  explicit RecycledInstErr(Instruction *Inst) : RecycledInst(Inst) {}
  // Always need to carry an Instruction
  RecycledInstErr() = delete;

  Instruction *getInst() const { return RecycledInst; }

  void log(raw_ostream &OS) const override {
    OS << "Instruction is recycled\n";
  }

  std::error_code convertToErrorCode() const override {
    return llvm::inconvertibleErrorCode();
  }
};

/// A builder class that knows how to construct Instruction objects.
///
/// Every llvm-mca Instruction is described by an object of class InstrDesc.
/// An InstrDesc describes which registers are read/written by the instruction,
/// as well as the instruction latency and hardware resources consumed.
///
/// This class is used by the tool to construct Instructions and instruction
/// descriptors (i.e. InstrDesc objects).
/// Information from the machine scheduling model is used to identify processor
/// resources that are consumed by an instruction.
class InstrBuilder {
  const MCSubtargetInfo &STI;
  const MCInstrInfo &MCII;
  const MCRegisterInfo &MRI;
  const MCInstrAnalysis *MCIA;
  const InstrumentManager &IM;
  SmallVector<uint64_t, 8> ProcResourceMasks;

  // Key is the MCI.Opcode and SchedClassID the describe the value InstrDesc
  DenseMap<std::pair<unsigned short, unsigned>,
           std::unique_ptr<const InstrDesc>>
      Descriptors;

  // Key is the MCIInst and SchedClassID the describe the value InstrDesc
  DenseMap<std::pair<const MCInst *, unsigned>,
           std::unique_ptr<const InstrDesc>>
      VariantDescriptors;

  bool FirstCallInst;
  bool FirstReturnInst;

  using InstRecycleCallback =
      llvm::function_ref<Instruction *(const InstrDesc &)>;
  InstRecycleCallback InstRecycleCB;

  Expected<const InstrDesc &>
  createInstrDescImpl(const MCInst &MCI, const SmallVector<Instrument *> &IVec);
  Expected<const InstrDesc &>
  getOrCreateInstrDesc(const MCInst &MCI,
                       const SmallVector<Instrument *> &IVec);

  InstrBuilder(const InstrBuilder &) = delete;
  InstrBuilder &operator=(const InstrBuilder &) = delete;

  void populateWrites(InstrDesc &ID, const MCInst &MCI, unsigned SchedClassID);
  void populateReads(InstrDesc &ID, const MCInst &MCI, unsigned SchedClassID);
  Error verifyInstrDesc(const InstrDesc &ID, const MCInst &MCI) const;

public:
  InstrBuilder(const MCSubtargetInfo &STI, const MCInstrInfo &MCII,
               const MCRegisterInfo &RI, const MCInstrAnalysis *IA,
               const InstrumentManager &IM);

  void clear() {
    Descriptors.clear();
    VariantDescriptors.clear();
    FirstCallInst = true;
    FirstReturnInst = true;
  }

  /// Set a callback which is invoked to retrieve a recycled mca::Instruction
  /// or null if there isn't any.
  void setInstRecycleCallback(InstRecycleCallback CB) { InstRecycleCB = CB; }

  Expected<std::unique_ptr<Instruction>>
  createInstruction(const MCInst &MCI, const SmallVector<Instrument *> &IVec);
};
} // namespace mca
} // namespace llvm

#endif // LLVM_MCA_INSTRBUILDER_H
PKiwFZ*"���MCA/IncrementalSourceMgr.hnu�[���//===---------------- IncrementalSourceMgr.h --------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// This file contains IncrementalSourceMgr, an implementation of SourceMgr
/// that allows users to add new instructions incrementally / dynamically.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_MCA_INCREMENTALSOURCEMGR_H
#define LLVM_MCA_INCREMENTALSOURCEMGR_H

#include "llvm/MCA/SourceMgr.h"
#include <deque>

namespace llvm {
namespace mca {

/// An implementation of \a SourceMgr that allows users to add new instructions
/// incrementally / dynamically.
/// Note that this SourceMgr takes ownership of all \a mca::Instruction.
class IncrementalSourceMgr : public SourceMgr {
  /// Owner of all mca::Instruction instances. Note that we use std::deque here
  /// to have a better throughput, in comparison to std::vector or
  /// llvm::SmallVector, as they usually pay a higher re-allocation cost when
  /// there is a large number of instructions.
  std::deque<UniqueInst> InstStorage;

  /// Instructions that are ready to be used. Each of them is a pointer of an
  /// \a UniqueInst inside InstStorage.
  std::deque<Instruction *> Staging;

  /// Current instruction index.
  unsigned TotalCounter = 0U;

  /// End-of-stream flag.
  bool EOS = false;

  /// Called when an instruction is no longer needed.
  using InstFreedCallback = llvm::function_ref<void(Instruction *)>;
  InstFreedCallback InstFreedCB;

public:
  IncrementalSourceMgr() = default;

  void clear();

  /// Set a callback that is invoked when a mca::Instruction is
  /// no longer needed. This is usually used for recycling the
  /// instruction.
  void setOnInstFreedCallback(InstFreedCallback CB) { InstFreedCB = CB; }

  ArrayRef<UniqueInst> getInstructions() const override {
    llvm_unreachable("Not applicable");
  }

  bool hasNext() const override { return !Staging.empty(); }
  bool isEnd() const override { return EOS; }

  SourceRef peekNext() const override {
    assert(hasNext());
    return SourceRef(TotalCounter, *Staging.front());
  }

  /// Add a new instruction.
  void addInst(UniqueInst &&Inst) {
    InstStorage.emplace_back(std::move(Inst));
    Staging.push_back(InstStorage.back().get());
  }

  /// Add a recycled instruction.
  void addRecycledInst(Instruction *Inst) { Staging.push_back(Inst); }

  void updateNext() override;

  /// Mark the end of instruction stream.
  void endOfStream() { EOS = true; }

#ifndef NDEBUG
  /// Print statistic about instruction recycling stats.
  void printStatistic(raw_ostream &OS);
#endif
};

} // end namespace mca
} // end namespace llvm

#endif // LLVM_MCA_INCREMENTALSOURCEMGR_H
PKiwFZ�ўo��MCA/SourceMgr.hnu�[���//===--------------------- SourceMgr.h --------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// This file contains abstract class SourceMgr and the default implementation,
/// CircularSourceMgr.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_MCA_SOURCEMGR_H
#define LLVM_MCA_SOURCEMGR_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/MCA/Instruction.h"

namespace llvm {
namespace mca {

// MSVC >= 19.15, < 19.20 need to see the definition of class Instruction to
// prevent compiler error C2139 about intrinsic type trait '__is_assignable'.
typedef std::pair<unsigned, const Instruction &> SourceRef;

/// Abstracting the input code sequence (a sequence of MCInst) and assigning
/// unique identifiers to every instruction in the sequence.
struct SourceMgr {
  using UniqueInst = std::unique_ptr<Instruction>;

  /// Provides a fixed range of \a UniqueInst to iterate.
  virtual ArrayRef<UniqueInst> getInstructions() const = 0;

  /// (Fixed) Number of \a UniqueInst. Returns the size of
  /// \a getInstructions by default.
  virtual size_t size() const { return getInstructions().size(); }

  /// Whether there is any \a SourceRef to inspect / peek next.
  /// Note that returning false from this doesn't mean the instruction
  /// stream has ended.
  virtual bool hasNext() const = 0;

  /// Whether the instruction stream has eneded.
  virtual bool isEnd() const = 0;

  /// The next \a SourceRef.
  virtual SourceRef peekNext() const = 0;

  /// Advance to the next \a SourceRef.
  virtual void updateNext() = 0;

  virtual ~SourceMgr() {}
};

/// The default implementation of \a SourceMgr. It always takes a fixed number
/// of instructions and provides an option to loop the given sequence for a
/// certain iterations.
class CircularSourceMgr : public SourceMgr {
  ArrayRef<UniqueInst> Sequence;
  unsigned Current;
  const unsigned Iterations;
  static const unsigned DefaultIterations = 100;

public:
  CircularSourceMgr(ArrayRef<UniqueInst> S, unsigned Iter)
      : Sequence(S), Current(0U), Iterations(Iter ? Iter : DefaultIterations) {}

  ArrayRef<UniqueInst> getInstructions() const override { return Sequence; }

  unsigned getNumIterations() const { return Iterations; }
  bool hasNext() const override {
    return Current < (Iterations * Sequence.size());
  }
  bool isEnd() const override { return !hasNext(); }

  SourceRef peekNext() const override {
    assert(hasNext() && "Already at end of sequence!");
    return SourceRef(Current, *Sequence[Current % Sequence.size()]);
  }

  void updateNext() override { ++Current; }
};

} // namespace mca
} // namespace llvm

#endif // LLVM_MCA_SOURCEMGR_H
PKiwFZ$�6���MCA/HWEventListener.hnu�[���//===----------------------- HWEventListener.h ------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This file defines the main interface for hardware event listeners.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_MCA_HWEVENTLISTENER_H
#define LLVM_MCA_HWEVENTLISTENER_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/MCA/Instruction.h"
#include "llvm/MCA/Support.h"

namespace llvm {
namespace mca {

// An HWInstructionEvent represents state changes of instructions that
// listeners might be interested in. Listeners can choose to ignore any event
// they are not interested in.
class HWInstructionEvent {
public:
  // This is the list of event types that are shared by all targets, that
  // generic subtarget-agnostic classes (e.g., Pipeline, HWInstructionEvent,
  // ...) and generic Views can manipulate.
  // Subtargets are free to define additional event types, that are going to be
  // handled by generic components as opaque values, but can still be
  // emitted by subtarget-specific pipeline stages (e.g., ExecuteStage,
  // DispatchStage, ...) and interpreted by subtarget-specific EventListener
  // implementations.
  enum GenericEventType {
    Invalid = 0,
    // Events generated by the Retire Control Unit.
    Retired,
    // Events generated by the Scheduler.
    Pending,
    Ready,
    Issued,
    Executed,
    // Events generated by the Dispatch logic.
    Dispatched,

    LastGenericEventType,
  };

  HWInstructionEvent(unsigned type, const InstRef &Inst)
      : Type(type), IR(Inst) {}

  // The event type. The exact meaning depends on the subtarget.
  const unsigned Type;

  // The instruction this event was generated for.
  const InstRef &IR;
};

// ResourceRef::first is the index of the associated Resource.
// ResourceRef::second is a bitmask of the referenced sub-unit of the resource.
using ResourceRef = std::pair<uint64_t, uint64_t>;

using ResourceUse = std::pair<ResourceRef, ResourceCycles>;

class HWInstructionIssuedEvent : public HWInstructionEvent {
public:
  HWInstructionIssuedEvent(const InstRef &IR, ArrayRef<ResourceUse> UR)
      : HWInstructionEvent(HWInstructionEvent::Issued, IR), UsedResources(UR) {}

  ArrayRef<ResourceUse> UsedResources;
};

class HWInstructionDispatchedEvent : public HWInstructionEvent {
public:
  HWInstructionDispatchedEvent(const InstRef &IR, ArrayRef<unsigned> Regs,
                               unsigned UOps)
      : HWInstructionEvent(HWInstructionEvent::Dispatched, IR),
        UsedPhysRegs(Regs), MicroOpcodes(UOps) {}
  // Number of physical register allocated for this instruction. There is one
  // entry per register file.
  ArrayRef<unsigned> UsedPhysRegs;
  // Number of micro opcodes dispatched.
  // This field is often set to the total number of micro-opcodes specified by
  // the instruction descriptor of IR.
  // The only exception is when IR declares a number of micro opcodes
  // which exceeds the processor DispatchWidth, and - by construction - it
  // requires multiple cycles to be fully dispatched. In that particular case,
  // the dispatch logic would generate more than one dispatch event (one per
  // cycle), and each event would declare how many micro opcodes are effectively
  // been dispatched to the schedulers.
  unsigned MicroOpcodes;
};

class HWInstructionRetiredEvent : public HWInstructionEvent {
public:
  HWInstructionRetiredEvent(const InstRef &IR, ArrayRef<unsigned> Regs)
      : HWInstructionEvent(HWInstructionEvent::Retired, IR),
        FreedPhysRegs(Regs) {}
  // Number of register writes that have been architecturally committed. There
  // is one entry per register file.
  ArrayRef<unsigned> FreedPhysRegs;
};

// A HWStallEvent represents a pipeline stall caused by the lack of hardware
// resources.
class HWStallEvent {
public:
  enum GenericEventType {
    Invalid = 0,
    // Generic stall events generated by the DispatchStage.
    RegisterFileStall,
    RetireControlUnitStall,
    // Generic stall events generated by the Scheduler.
    DispatchGroupStall,
    SchedulerQueueFull,
    LoadQueueFull,
    StoreQueueFull,
    CustomBehaviourStall,
    LastGenericEvent
  };

  HWStallEvent(unsigned type, const InstRef &Inst) : Type(type), IR(Inst) {}

  // The exact meaning of the stall event type depends on the subtarget.
  const unsigned Type;

  // The instruction this event was generated for.
  const InstRef &IR;
};

// A HWPressureEvent describes an increase in backend pressure caused by
// the presence of data dependencies or unavailability of pipeline resources.
class HWPressureEvent {
public:
  enum GenericReason {
    INVALID = 0,
    // Scheduler was unable to issue all the ready instructions because some
    // pipeline resources were unavailable.
    RESOURCES,
    // Instructions could not be issued because of register data dependencies.
    REGISTER_DEPS,
    // Instructions could not be issued because of memory dependencies.
    MEMORY_DEPS
  };

  HWPressureEvent(GenericReason reason, ArrayRef<InstRef> Insts,
                  uint64_t Mask = 0)
      : Reason(reason), AffectedInstructions(Insts), ResourceMask(Mask) {}

  // Reason for this increase in backend pressure.
  GenericReason Reason;

  // Instructions affected (i.e. delayed) by this increase in backend pressure.
  ArrayRef<InstRef> AffectedInstructions;

  // A mask of unavailable processor resources.
  const uint64_t ResourceMask;
};

class HWEventListener {
public:
  // Generic events generated by the pipeline.
  virtual void onCycleBegin() {}
  virtual void onCycleEnd() {}

  virtual void onEvent(const HWInstructionEvent &Event) {}
  virtual void onEvent(const HWStallEvent &Event) {}
  virtual void onEvent(const HWPressureEvent &Event) {}

  virtual void onResourceAvailable(const ResourceRef &RRef) {}

  // Events generated by the Scheduler when buffered resources are
  // consumed/freed for an instruction.
  virtual void onReservedBuffers(const InstRef &Inst,
                                 ArrayRef<unsigned> Buffers) {}
  virtual void onReleasedBuffers(const InstRef &Inst,
                                 ArrayRef<unsigned> Buffers) {}

  virtual ~HWEventListener() = default;

private:
  virtual void anchor();
};
} // namespace mca
} // namespace llvm

#endif // LLVM_MCA_HWEVENTLISTENER_H
PKiwFZs����MCA/Stages/RetireStage.hnu�[���//===---------------------- RetireStage.h -----------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This file defines the retire stage of a default instruction pipeline.
/// The RetireStage represents the process logic that interacts with the
/// simulated RetireControlUnit hardware.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MCA_STAGES_RETIRESTAGE_H
#define LLVM_MCA_STAGES_RETIRESTAGE_H

#include "llvm/ADT/SmallVector.h"
#include "llvm/MCA/HardwareUnits/LSUnit.h"
#include "llvm/MCA/HardwareUnits/RegisterFile.h"
#include "llvm/MCA/HardwareUnits/RetireControlUnit.h"
#include "llvm/MCA/Stages/Stage.h"

namespace llvm {
namespace mca {

class RetireStage final : public Stage {
  // Owner will go away when we move listeners/eventing to the stages.
  RetireControlUnit &RCU;
  RegisterFile &PRF;
  LSUnitBase &LSU;

  RetireStage(const RetireStage &Other) = delete;
  RetireStage &operator=(const RetireStage &Other) = delete;

public:
  RetireStage(RetireControlUnit &R, RegisterFile &F, LSUnitBase &LS)
      : RCU(R), PRF(F), LSU(LS) {}

  bool hasWorkToComplete() const override { return !RCU.isEmpty(); }
  Error cycleStart() override;
  Error cycleEnd() override;
  Error execute(InstRef &IR) override;
  void notifyInstructionRetired(const InstRef &IR) const;
};

} // namespace mca
} // namespace llvm

#endif // LLVM_MCA_STAGES_RETIRESTAGE_H
PKiwFZ8x�G��MCA/Stages/InstructionTables.hnu�[���//===--------------------- InstructionTables.h ------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This file implements a custom stage to generate instruction tables.
/// See the description of command-line flag -instruction-tables in
/// docs/CommandGuide/lvm-mca.rst
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_MCA_STAGES_INSTRUCTIONTABLES_H
#define LLVM_MCA_STAGES_INSTRUCTIONTABLES_H

#include "llvm/ADT/SmallVector.h"
#include "llvm/MC/MCSchedule.h"
#include "llvm/MCA/HardwareUnits/Scheduler.h"
#include "llvm/MCA/Stages/Stage.h"
#include "llvm/MCA/Support.h"

namespace llvm {
namespace mca {

class InstructionTables final : public Stage {
  const MCSchedModel &SM;
  SmallVector<ResourceUse, 4> UsedResources;
  SmallVector<uint64_t, 8> Masks;

public:
  InstructionTables(const MCSchedModel &Model)
      : SM(Model), Masks(Model.getNumProcResourceKinds()) {
    computeProcResourceMasks(Model, Masks);
  }

  bool hasWorkToComplete() const override { return false; }
  Error execute(InstRef &IR) override;
};
} // namespace mca
} // namespace llvm

#endif // LLVM_MCA_STAGES_INSTRUCTIONTABLES_H
PKiwFZzD��JJMCA/Stages/EntryStage.hnu�[���//===---------------------- EntryStage.h ------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This file defines the Entry stage of an instruction pipeline.  Its sole
/// purpose in life is to pick instructions in sequence and move them to the
/// next pipeline stage.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_MCA_STAGES_ENTRYSTAGE_H
#define LLVM_MCA_STAGES_ENTRYSTAGE_H

#include "llvm/ADT/SmallVector.h"
#include "llvm/MCA/SourceMgr.h"
#include "llvm/MCA/Stages/Stage.h"

namespace llvm {
namespace mca {

class EntryStage final : public Stage {
  InstRef CurrentInstruction;
  SmallVector<std::unique_ptr<Instruction>, 16> Instructions;
  SourceMgr &SM;
  unsigned NumRetired;

  // Updates the program counter, and sets 'CurrentInstruction'.
  Error getNextInstruction();

  EntryStage(const EntryStage &Other) = delete;
  EntryStage &operator=(const EntryStage &Other) = delete;

public:
  EntryStage(SourceMgr &SM) : SM(SM), NumRetired(0) {}

  bool isAvailable(const InstRef &IR) const override;
  bool hasWorkToComplete() const override;
  Error execute(InstRef &IR) override;
  Error cycleStart() override;
  Error cycleResume() override;
  Error cycleEnd() override;
};

} // namespace mca
} // namespace llvm

#endif // LLVM_MCA_STAGES_ENTRYSTAGE_H
PKiwFZi�9�
�
MCA/Stages/ExecuteStage.hnu�[���//===---------------------- ExecuteStage.h ----------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This file defines the execution stage of a default instruction pipeline.
///
/// The ExecuteStage is responsible for managing the hardware scheduler
/// and issuing notifications that an instruction has been executed.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_MCA_STAGES_EXECUTESTAGE_H
#define LLVM_MCA_STAGES_EXECUTESTAGE_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/MCA/HardwareUnits/Scheduler.h"
#include "llvm/MCA/Instruction.h"
#include "llvm/MCA/Stages/Stage.h"

namespace llvm {
namespace mca {

class ExecuteStage final : public Stage {
  Scheduler &HWS;

  unsigned NumDispatchedOpcodes;
  unsigned NumIssuedOpcodes;

  // True if this stage should notify listeners of HWPressureEvents.
  bool EnablePressureEvents;

  Error issueInstruction(InstRef &IR);

  // Called at the beginning of each cycle to issue already dispatched
  // instructions to the underlying pipelines.
  Error issueReadyInstructions();

  // Used to notify instructions eliminated at register renaming stage.
  Error handleInstructionEliminated(InstRef &IR);

  ExecuteStage(const ExecuteStage &Other) = delete;
  ExecuteStage &operator=(const ExecuteStage &Other) = delete;

public:
  ExecuteStage(Scheduler &S) : ExecuteStage(S, false) {}
  ExecuteStage(Scheduler &S, bool ShouldPerformBottleneckAnalysis)
      : HWS(S), NumDispatchedOpcodes(0), NumIssuedOpcodes(0),
        EnablePressureEvents(ShouldPerformBottleneckAnalysis) {}

  // This stage works under the assumption that the Pipeline will eventually
  // execute a retire stage. We don't need to check if pipelines and/or
  // schedulers have instructions to process, because those instructions are
  // also tracked by the retire control unit. That means,
  // RetireControlUnit::hasWorkToComplete() is responsible for checking if there
  // are still instructions in-flight in the out-of-order backend.
  bool hasWorkToComplete() const override { return false; }
  bool isAvailable(const InstRef &IR) const override;

  // Notifies the scheduler that a new cycle just started.
  //
  // This method notifies the scheduler that a new cycle started.
  // This method is also responsible for notifying listeners about instructions
  // state changes, and processor resources freed by the scheduler.
  // Instructions that transitioned to the 'Executed' state are automatically
  // moved to the next stage (i.e. RetireStage).
  Error cycleStart() override;
  Error cycleEnd() override;
  Error execute(InstRef &IR) override;

  void notifyInstructionIssued(const InstRef &IR,
                               MutableArrayRef<ResourceUse> Used) const;
  void notifyInstructionExecuted(const InstRef &IR) const;
  void notifyInstructionPending(const InstRef &IR) const;
  void notifyInstructionReady(const InstRef &IR) const;
  void notifyResourceAvailable(const ResourceRef &RR) const;

  // Notify listeners that buffered resources have been consumed or freed.
  void notifyReservedOrReleasedBuffers(const InstRef &IR, bool Reserved) const;
};

} // namespace mca
} // namespace llvm

#endif // LLVM_MCA_STAGES_EXECUTESTAGE_H
PKiwFZV|8::MCA/Stages/MicroOpQueueStage.hnu�[���//===---------------------- MicroOpQueueStage.h -----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This file defines a stage that implements a queue of micro opcodes.
/// It can be used to simulate a hardware micro-op queue that serves opcodes to
/// the out of order backend.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_MCA_STAGES_MICROOPQUEUESTAGE_H
#define LLVM_MCA_STAGES_MICROOPQUEUESTAGE_H

#include "llvm/ADT/SmallVector.h"
#include "llvm/MCA/Stages/Stage.h"

namespace llvm {
namespace mca {

/// A stage that simulates a queue of instruction opcodes.
class MicroOpQueueStage : public Stage {
  SmallVector<InstRef, 8> Buffer;
  unsigned NextAvailableSlotIdx;
  unsigned CurrentInstructionSlotIdx;

  // Limits the number of instructions that can be written to this buffer every
  // cycle. A value of zero means that there is no limit to the instruction
  // throughput in input.
  const unsigned MaxIPC;
  unsigned CurrentIPC;

  // Number of entries that are available during this cycle.
  unsigned AvailableEntries;

  // True if instructions dispatched to this stage don't need to wait for the
  // next cycle before moving to the next stage.
  // False if this buffer acts as a one cycle delay in the execution pipeline.
  bool IsZeroLatencyStage;

  MicroOpQueueStage(const MicroOpQueueStage &Other) = delete;
  MicroOpQueueStage &operator=(const MicroOpQueueStage &Other) = delete;

  // By default, an instruction consumes a number of buffer entries equal to its
  // number of micro opcodes (see field `InstrDesc::NumMicroOpcodes`).  The
  // number of entries consumed by an instruction is normalized to the
  // minimum value between NumMicroOpcodes and the buffer size. This is to avoid
  // problems with (microcoded) instructions that generate a number of micro
  // opcodes than doesn't fit in the buffer.
  unsigned getNormalizedOpcodes(const InstRef &IR) const {
    unsigned NormalizedOpcodes =
        std::min(static_cast<unsigned>(Buffer.size()),
                 IR.getInstruction()->getDesc().NumMicroOps);
    return NormalizedOpcodes ? NormalizedOpcodes : 1U;
  }

  Error moveInstructions();

public:
  MicroOpQueueStage(unsigned Size, unsigned IPC = 0,
                    bool ZeroLatencyStage = true);

  bool isAvailable(const InstRef &IR) const override {
    if (MaxIPC && CurrentIPC == MaxIPC)
      return false;
    unsigned NormalizedOpcodes = getNormalizedOpcodes(IR);
    if (NormalizedOpcodes > AvailableEntries)
      return false;
    return true;
  }

  bool hasWorkToComplete() const override {
    return AvailableEntries != Buffer.size();
  }

  Error execute(InstRef &IR) override;
  Error cycleStart() override;
  Error cycleEnd() override;
};

} // namespace mca
} // namespace llvm

#endif // LLVM_MCA_STAGES_MICROOPQUEUESTAGE_H
PKiwFZ�PԚ�MCA/Stages/Stage.hnu�[���//===---------------------- Stage.h -----------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This file defines a stage.
/// A chain of stages compose an instruction pipeline.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_MCA_STAGES_STAGE_H
#define LLVM_MCA_STAGES_STAGE_H

#include "llvm/MCA/HWEventListener.h"
#include "llvm/Support/Error.h"
#include <set>

namespace llvm {
namespace mca {

class InstRef;

class Stage {
  Stage *NextInSequence = nullptr;
  std::set<HWEventListener *> Listeners;

  Stage(const Stage &Other) = delete;
  Stage &operator=(const Stage &Other) = delete;

protected:
  const std::set<HWEventListener *> &getListeners() const { return Listeners; }

public:
  Stage() = default;
  virtual ~Stage();

  /// Returns true if it can execute IR during this cycle.
  virtual bool isAvailable(const InstRef &IR) const { return true; }

  /// Returns true if some instructions are still executing this stage.
  virtual bool hasWorkToComplete() const = 0;

  /// Called once at the start of each cycle.  This can be used as a setup
  /// phase to prepare for the executions during the cycle.
  virtual Error cycleStart() { return ErrorSuccess(); }

  /// Called after the pipeline is resumed from pausing state.
  virtual Error cycleResume() { return ErrorSuccess(); }

  /// Called once at the end of each cycle.
  virtual Error cycleEnd() { return ErrorSuccess(); }

  /// The primary action that this stage performs on instruction IR.
  virtual Error execute(InstRef &IR) = 0;

  void setNextInSequence(Stage *NextStage) {
    assert(!NextInSequence && "This stage already has a NextInSequence!");
    NextInSequence = NextStage;
  }

  bool checkNextStage(const InstRef &IR) const {
    return NextInSequence && NextInSequence->isAvailable(IR);
  }

  /// Called when an instruction is ready to move the next pipeline stage.
  ///
  /// Stages are responsible for moving instructions to their immediate
  /// successor stages.
  Error moveToTheNextStage(InstRef &IR) {
    assert(checkNextStage(IR) && "Next stage is not ready!");
    return NextInSequence->execute(IR);
  }

  /// Add a listener to receive callbacks during the execution of this stage.
  void addListener(HWEventListener *Listener);

  /// Notify listeners of a particular hardware event.
  template <typename EventT> void notifyEvent(const EventT &Event) const {
    for (HWEventListener *Listener : Listeners)
      Listener->onEvent(Event);
  }
};

/// This is actually not an error but a marker to indicate that
/// the instruction stream is paused.
struct InstStreamPause : public ErrorInfo<InstStreamPause> {
  static char ID;

  std::error_code convertToErrorCode() const override {
    return llvm::inconvertibleErrorCode();
  }
  void log(raw_ostream &OS) const override { OS << "Stream is paused"; }
};
} // namespace mca
} // namespace llvm
#endif // LLVM_MCA_STAGES_STAGE_H
PKiwFZ���MCA/Stages/DispatchStage.hnu�[���//===----------------------- DispatchStage.h --------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This file models the dispatch component of an instruction pipeline.
///
/// The DispatchStage is responsible for updating instruction dependencies
/// and communicating to the simulated instruction scheduler that an instruction
/// is ready to be scheduled for execution.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_MCA_STAGES_DISPATCHSTAGE_H
#define LLVM_MCA_STAGES_DISPATCHSTAGE_H

#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/MCA/HardwareUnits/RegisterFile.h"
#include "llvm/MCA/HardwareUnits/RetireControlUnit.h"
#include "llvm/MCA/Instruction.h"
#include "llvm/MCA/Stages/Stage.h"

namespace llvm {
namespace mca {

// Implements the hardware dispatch logic.
//
// This class is responsible for the dispatch stage, in which instructions are
// dispatched in groups to the Scheduler.  An instruction can be dispatched if
// the following conditions are met:
//  1) There are enough entries in the reorder buffer (see class
//     RetireControlUnit) to write the opcodes associated with the instruction.
//  2) There are enough physical registers to rename output register operands.
//  3) There are enough entries available in the used buffered resource(s).
//
// The number of micro opcodes that can be dispatched in one cycle is limited by
// the value of field 'DispatchWidth'. A "dynamic dispatch stall" occurs when
// processor resources are not available. Dispatch stall events are counted
// during the entire execution of the code, and displayed by the performance
// report when flag '-dispatch-stats' is specified.
//
// If the number of micro opcodes exceedes DispatchWidth, then the instruction
// is dispatched in multiple cycles.
class DispatchStage final : public Stage {
  unsigned DispatchWidth;
  unsigned AvailableEntries;
  unsigned CarryOver;
  InstRef CarriedOver;
  const MCSubtargetInfo &STI;
  RetireControlUnit &RCU;
  RegisterFile &PRF;

  bool checkRCU(const InstRef &IR) const;
  bool checkPRF(const InstRef &IR) const;
  bool canDispatch(const InstRef &IR) const;
  Error dispatch(InstRef IR);

  void notifyInstructionDispatched(const InstRef &IR,
                                   ArrayRef<unsigned> UsedPhysRegs,
                                   unsigned uOps) const;

public:
  DispatchStage(const MCSubtargetInfo &Subtarget, const MCRegisterInfo &MRI,
                unsigned MaxDispatchWidth, RetireControlUnit &R,
                RegisterFile &F);

  bool isAvailable(const InstRef &IR) const override;

  // The dispatch logic internally doesn't buffer instructions. So there is
  // never work to do at the beginning of every cycle.
  bool hasWorkToComplete() const override { return false; }
  Error cycleStart() override;
  Error execute(InstRef &IR) override;

#ifndef NDEBUG
  void dump() const;
#endif
};
} // namespace mca
} // namespace llvm

#endif // LLVM_MCA_STAGES_DISPATCHSTAGE_H
PKiwFZ�g�rrMCA/Stages/InOrderIssueStage.hnu�[���//===---------------------- InOrderIssueStage.h -----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// InOrderIssueStage implements an in-order execution pipeline.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_MCA_STAGES_INORDERISSUESTAGE_H
#define LLVM_MCA_STAGES_INORDERISSUESTAGE_H

#include "llvm/MCA/CustomBehaviour.h"
#include "llvm/MCA/HardwareUnits/ResourceManager.h"
#include "llvm/MCA/SourceMgr.h"
#include "llvm/MCA/Stages/Stage.h"

namespace llvm {
namespace mca {
class LSUnit;
class RegisterFile;

struct StallInfo {
  enum class StallKind {
    DEFAULT,
    REGISTER_DEPS,
    DISPATCH,
    DELAY,
    LOAD_STORE,
    CUSTOM_STALL
  };

  InstRef IR;
  unsigned CyclesLeft = 0;
  StallKind Kind = StallKind::DEFAULT;

  StallInfo() = default;

  StallKind getStallKind() const { return Kind; }
  unsigned getCyclesLeft() const { return CyclesLeft; }
  const InstRef &getInstruction() const { return IR; }
  InstRef &getInstruction() { return IR; }

  bool isValid() const { return (bool)IR; }
  void clear();
  void update(const InstRef &Inst, unsigned Cycles, StallKind SK);
  void cycleEnd();
};

class InOrderIssueStage final : public Stage {
  const MCSubtargetInfo &STI;
  RegisterFile &PRF;
  ResourceManager RM;
  CustomBehaviour &CB;
  LSUnit &LSU;

  /// Instructions that were issued, but not executed yet.
  SmallVector<InstRef, 4> IssuedInst;

  /// Number of instructions issued in the current cycle.
  unsigned NumIssued;

  StallInfo SI;

  /// Instruction that is issued in more than 1 cycle.
  InstRef CarriedOver;
  /// Number of CarriedOver uops left to issue.
  unsigned CarryOver;

  /// Number of instructions that can be issued in the current cycle.
  unsigned Bandwidth;

  /// Number of cycles (counted from the current cycle) until the last write is
  /// committed. This is taken into account to ensure that writes commit in the
  /// program order.
  unsigned LastWriteBackCycle;

  InOrderIssueStage(const InOrderIssueStage &Other) = delete;
  InOrderIssueStage &operator=(const InOrderIssueStage &Other) = delete;

  /// Returns true if IR can execute during this cycle.
  /// In case of stall, it updates SI with information about the stalled
  /// instruction and the stall reason.
  bool canExecute(const InstRef &IR);

  /// Issue the instruction, or update the StallInfo.
  Error tryIssue(InstRef &IR);

  /// Update status of instructions from IssuedInst.
  void updateIssuedInst();

  /// Continue to issue the CarriedOver instruction.
  void updateCarriedOver();

  /// Notifies a stall event to the Stage listener. Stall information is
  /// obtained from the internal StallInfo field.
  void notifyStallEvent();

  void notifyInstructionIssued(const InstRef &IR,
                               ArrayRef<ResourceUse> UsedRes);
  void notifyInstructionDispatched(const InstRef &IR, unsigned Ops,
                                   ArrayRef<unsigned> UsedRegs);
  void notifyInstructionExecuted(const InstRef &IR);
  void notifyInstructionRetired(const InstRef &IR,
                                ArrayRef<unsigned> FreedRegs);

  /// Retire instruction once it is executed.
  void retireInstruction(InstRef &IR);

public:
  InOrderIssueStage(const MCSubtargetInfo &STI, RegisterFile &PRF,
                    CustomBehaviour &CB, LSUnit &LSU);

  unsigned getIssueWidth() const;
  bool isAvailable(const InstRef &) const override;
  bool hasWorkToComplete() const override;
  Error execute(InstRef &IR) override;
  Error cycleStart() override;
  Error cycleEnd() override;
};

} // namespace mca
} // namespace llvm

#endif // LLVM_MCA_STAGES_INORDERISSUESTAGE_H
PKiwFZ�	n��
MCA/View.hnu�[���//===----------------------- View.h -----------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This file defines the main interface for Views. Each view contributes a
/// portion of the final report generated by the tool.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_MCA_VIEW_H
#define LLVM_MCA_VIEW_H

#include "llvm/MC/MCInstPrinter.h"
#include "llvm/MCA/HWEventListener.h"
#include "llvm/Support/JSON.h"
#include "llvm/Support/raw_ostream.h"

namespace llvm {
namespace mca {

class View : public HWEventListener {
public:
  virtual ~View() = default;

  virtual void printView(llvm::raw_ostream &OS) const = 0;
  virtual StringRef getNameAsString() const = 0;

  virtual json::Value toJSON() const { return "not implemented"; }
  virtual bool isSerializable() const { return true; }

  void anchor() override;
};
} // namespace mca
} // namespace llvm

#endif
PKiwFZ9�F^^
MCA/Support.hnu�[���//===--------------------- Support.h ----------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// Helper functions used by various pipeline components.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_MCA_SUPPORT_H
#define LLVM_MCA_SUPPORT_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/MC/MCSchedule.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/MathExtras.h"

namespace llvm {
namespace mca {

template <typename T>
class InstructionError : public ErrorInfo<InstructionError<T>> {
public:
  static char ID;
  std::string Message;
  const T &Inst;

  InstructionError(std::string M, const T &MCI)
      : Message(std::move(M)), Inst(MCI) {}

  void log(raw_ostream &OS) const override { OS << Message; }

  std::error_code convertToErrorCode() const override {
    return inconvertibleErrorCode();
  }
};

template <typename T> char InstructionError<T>::ID;

/// This class represents the number of cycles per resource (fractions of
/// cycles).  That quantity is managed here as a ratio, and accessed via the
/// double cast-operator below.  The two quantities, number of cycles and
/// number of resources, are kept separate.  This is used by the
/// ResourcePressureView to calculate the average resource cycles
/// per instruction/iteration.
class ResourceCycles {
  unsigned Numerator, Denominator;

public:
  ResourceCycles() : Numerator(0), Denominator(1) {}
  ResourceCycles(unsigned Cycles, unsigned ResourceUnits = 1)
      : Numerator(Cycles), Denominator(ResourceUnits) {}

  operator double() const {
    assert(Denominator && "Invalid denominator (must be non-zero).");
    return (Denominator == 1) ? Numerator : (double)Numerator / Denominator;
  }

  unsigned getNumerator() const { return Numerator; }
  unsigned getDenominator() const { return Denominator; }

  // Add the components of RHS to this instance.  Instead of calculating
  // the final value here, we keep track of the numerator and denominator
  // separately, to reduce floating point error.
  ResourceCycles &operator+=(const ResourceCycles &RHS);
};

/// Populates vector Masks with processor resource masks.
///
/// The number of bits set in a mask depends on the processor resource type.
/// Each processor resource mask has at least one bit set. For groups, the
/// number of bits set in the mask is equal to the cardinality of the group plus
/// one. Excluding the most significant bit, the remaining bits in the mask
/// identify processor resources that are part of the group.
///
/// Example:
///
///  ResourceA  -- Mask: 0b001
///  ResourceB  -- Mask: 0b010
///  ResourceAB -- Mask: 0b100 U (ResourceA::Mask | ResourceB::Mask) == 0b111
///
/// ResourceAB is a processor resource group containing ResourceA and ResourceB.
/// Each resource mask uniquely identifies a resource; both ResourceA and
/// ResourceB only have one bit set.
/// ResourceAB is a group; excluding the most significant bit in the mask, the
/// remaining bits identify the composition of the group.
///
/// Resource masks are used by the ResourceManager to solve set membership
/// problems with simple bit manipulation operations.
void computeProcResourceMasks(const MCSchedModel &SM,
                              MutableArrayRef<uint64_t> Masks);

// Returns the index of the highest bit set. For resource masks, the position of
// the highest bit set can be used to construct a resource mask identifier.
inline unsigned getResourceStateIndex(uint64_t Mask) {
  assert(Mask && "Processor Resource Mask cannot be zero!");
  return llvm::Log2_64(Mask);
}

/// Compute the reciprocal block throughput from a set of processor resource
/// cycles. The reciprocal block throughput is computed as the MAX between:
///  - NumMicroOps / DispatchWidth
///  - ProcResourceCycles / #ProcResourceUnits  (for every consumed resource).
double computeBlockRThroughput(const MCSchedModel &SM, unsigned DispatchWidth,
                               unsigned NumMicroOps,
                               ArrayRef<unsigned> ProcResourceUsage);
} // namespace mca
} // namespace llvm

#endif // LLVM_MCA_SUPPORT_H
PKiwFZ���~33 MCA/HardwareUnits/RegisterFile.hnu�[���//===--------------------- RegisterFile.h -----------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This file defines a register mapping file class.  This class is responsible
/// for managing hardware register files and the tracking of data dependencies
/// between registers.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_MCA_HARDWAREUNITS_REGISTERFILE_H
#define LLVM_MCA_HARDWAREUNITS_REGISTERFILE_H

#include "llvm/ADT/APInt.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSchedule.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/MCA/HardwareUnits/HardwareUnit.h"

namespace llvm {
namespace mca {

class ReadState;
class WriteState;
class Instruction;

/// A reference to a register write.
///
/// This class is mainly used by the register file to describe register
/// mappings. It correlates a register write to the source index of the
/// defining instruction.
class WriteRef {
  unsigned IID;
  unsigned WriteBackCycle;
  unsigned WriteResID;
  MCPhysReg RegisterID;
  WriteState *Write;

  static const unsigned INVALID_IID;

public:
  WriteRef()
      : IID(INVALID_IID), WriteBackCycle(), WriteResID(), RegisterID(),
        Write() {}
  WriteRef(unsigned SourceIndex, WriteState *WS);

  unsigned getSourceIndex() const { return IID; }
  unsigned getWriteBackCycle() const;

  const WriteState *getWriteState() const { return Write; }
  WriteState *getWriteState() { return Write; }
  unsigned getWriteResourceID() const;
  MCPhysReg getRegisterID() const;

  void commit();
  void notifyExecuted(unsigned Cycle);

  bool hasKnownWriteBackCycle() const;
  bool isWriteZero() const;
  bool isValid() const { return getSourceIndex() != INVALID_IID; }

  /// Returns true if this register write has been executed, and the new
  /// register value is therefore available to users.
  bool isAvailable() const { return hasKnownWriteBackCycle(); }

  bool operator==(const WriteRef &Other) const {
    return Write && Other.Write && Write == Other.Write;
  }

#ifndef NDEBUG
  void dump() const;
#endif
};

/// Manages hardware register files, and tracks register definitions for
/// register renaming purposes.
class RegisterFile : public HardwareUnit {
  const MCRegisterInfo &MRI;

  // class RegisterMappingTracker is a  physical register file (PRF) descriptor.
  // There is one RegisterMappingTracker for every PRF definition in the
  // scheduling model.
  //
  // An instance of RegisterMappingTracker tracks the number of physical
  // registers available for renaming. It also tracks  the number of register
  // moves eliminated per cycle.
  struct RegisterMappingTracker {
    // The total number of physical registers that are available in this
    // register file for register renaming purpouses.  A value of zero for this
    // field means: this register file has an unbounded number of physical
    // registers.
    const unsigned NumPhysRegs;
    // Number of physical registers that are currently in use.
    unsigned NumUsedPhysRegs;

    // Maximum number of register moves that can be eliminated by this PRF every
    // cycle. A value of zero means that there is no limit in the number of
    // moves which can be eliminated every cycle.
    const unsigned MaxMoveEliminatedPerCycle;

    // Number of register moves eliminated during this cycle.
    //
    // This value is increased by one every time a register move is eliminated.
    // Every new cycle, this value is reset to zero.
    // A move can be eliminated only if MaxMoveEliminatedPerCycle is zero, or if
    // NumMoveEliminated is less than MaxMoveEliminatedPerCycle.
    unsigned NumMoveEliminated;

    // If set, move elimination is restricted to zero-register moves only.
    bool AllowZeroMoveEliminationOnly;

    RegisterMappingTracker(unsigned NumPhysRegisters,
                           unsigned MaxMoveEliminated = 0U,
                           bool AllowZeroMoveElimOnly = false)
        : NumPhysRegs(NumPhysRegisters), NumUsedPhysRegs(0),
          MaxMoveEliminatedPerCycle(MaxMoveEliminated), NumMoveEliminated(0U),
          AllowZeroMoveEliminationOnly(AllowZeroMoveElimOnly) {}
  };

  // A vector of register file descriptors.  This set always contains at least
  // one entry. Entry at index #0 is reserved.  That entry describes a register
  // file with an unbounded number of physical registers that "sees" all the
  // hardware registers declared by the target (i.e. all the register
  // definitions in the target specific `XYZRegisterInfo.td` - where `XYZ` is
  // the target name).
  //
  // Users can limit the number of physical registers that are available in
  // register file #0 specifying command line flag `-register-file-size=<uint>`.
  SmallVector<RegisterMappingTracker, 4> RegisterFiles;

  // This type is used to propagate information about the owner of a register,
  // and the cost of allocating it in the PRF. Register cost is defined as the
  // number of physical registers consumed by the PRF to allocate a user
  // register.
  //
  // For example: on X86 BtVer2, a YMM register consumes 2 128-bit physical
  // registers. So, the cost of allocating a YMM register in BtVer2 is 2.
  using IndexPlusCostPairTy = std::pair<unsigned, unsigned>;

  // Struct RegisterRenamingInfo is used to map logical registers to register
  // files.
  //
  // There is a RegisterRenamingInfo object for every logical register defined
  // by the target. RegisteRenamingInfo objects are stored into vector
  // `RegisterMappings`, and MCPhysReg IDs can be used to reference
  // elements in that vector.
  //
  // Each RegisterRenamingInfo is owned by a PRF, and field `IndexPlusCost`
  // specifies both the owning PRF, as well as the number of physical registers
  // consumed at register renaming stage.
  //
  // Field `AllowMoveElimination` is set for registers that are used as
  // destination by optimizable register moves.
  //
  // Field `AliasRegID` is set by writes from register moves that have been
  // eliminated at register renaming stage. A move eliminated at register
  // renaming stage is effectively bypassed, and its write aliases the source
  // register definition.
  struct RegisterRenamingInfo {
    IndexPlusCostPairTy IndexPlusCost;
    MCPhysReg RenameAs;
    MCPhysReg AliasRegID;
    bool AllowMoveElimination;
    RegisterRenamingInfo()
        : IndexPlusCost(std::make_pair(0U, 1U)), RenameAs(0U), AliasRegID(0U),
          AllowMoveElimination(false) {}
  };

  // RegisterMapping objects are mainly used to track physical register
  // definitions and resolve data dependencies.
  //
  // Every register declared by the Target is associated with an instance of
  // RegisterMapping. RegisterMapping objects keep track of writes to a logical
  // register.  That information is used by class RegisterFile to resolve data
  // dependencies, and correctly set latencies for register uses.
  //
  // This implementation does not allow overlapping register files. The only
  // register file that is allowed to overlap with other register files is
  // register file #0. If we exclude register #0, every register is "owned" by
  // at most one register file.
  using RegisterMapping = std::pair<WriteRef, RegisterRenamingInfo>;

  // There is one entry per each register defined by the target.
  std::vector<RegisterMapping> RegisterMappings;

  // Used to track zero registers. There is one bit for each register defined by
  // the target. Bits are set for registers that are known to be zero.
  APInt ZeroRegisters;

  unsigned CurrentCycle;

  // This method creates a new register file descriptor.
  // The new register file owns all of the registers declared by register
  // classes in the 'RegisterClasses' set.
  //
  // Processor models allow the definition of RegisterFile(s) via tablegen. For
  // example, this is a tablegen definition for a x86 register file for
  // XMM[0-15] and YMM[0-15], that allows up to 60 renames (each rename costs 1
  // physical register).
  //
  //    def FPRegisterFile : RegisterFile<60, [VR128RegClass, VR256RegClass]>
  //
  // Here FPRegisterFile contains all the registers defined by register class
  // VR128RegClass and VR256RegClass. FPRegisterFile implements 60
  // registers which can be used for register renaming purpose.
  void addRegisterFile(const MCRegisterFileDesc &RF,
                       ArrayRef<MCRegisterCostEntry> Entries);

  // Consumes physical registers in each register file specified by the
  // `IndexPlusCostPairTy`. This method is called from `addRegisterMapping()`.
  void allocatePhysRegs(const RegisterRenamingInfo &Entry,
                        MutableArrayRef<unsigned> UsedPhysRegs);

  // Releases previously allocated physical registers from the register file(s).
  // This method is called from `invalidateRegisterMapping()`.
  void freePhysRegs(const RegisterRenamingInfo &Entry,
                    MutableArrayRef<unsigned> FreedPhysRegs);

  // Create an instance of RegisterMappingTracker for every register file
  // specified by the processor model.
  // If no register file is specified, then this method creates a default
  // register file with an unbounded number of physical registers.
  void initialize(const MCSchedModel &SM, unsigned NumRegs);

public:
  RegisterFile(const MCSchedModel &SM, const MCRegisterInfo &mri,
               unsigned NumRegs = 0);

  // Collects writes that are in a RAW dependency with RS.
  void collectWrites(const MCSubtargetInfo &STI, const ReadState &RS,
                     SmallVectorImpl<WriteRef> &Writes,
                     SmallVectorImpl<WriteRef> &CommittedWrites) const;
  struct RAWHazard {
    MCPhysReg RegisterID = 0;
    int CyclesLeft = 0;

    RAWHazard() = default;
    bool isValid() const { return RegisterID; }
    bool hasUnknownCycles() const { return CyclesLeft < 0; }
  };

  RAWHazard checkRAWHazards(const MCSubtargetInfo &STI,
                            const ReadState &RS) const;

  // This method updates the register mappings inserting a new register
  // definition. This method is also responsible for updating the number of
  // allocated physical registers in each register file modified by the write.
  // No physical regiser is allocated if this write is from a zero-idiom.
  void addRegisterWrite(WriteRef Write, MutableArrayRef<unsigned> UsedPhysRegs);

  // Collect writes that are in a data dependency with RS, and update RS
  // internal state.
  void addRegisterRead(ReadState &RS, const MCSubtargetInfo &STI) const;

  // Removes write \param WS from the register mappings.
  // Physical registers may be released to reflect this update.
  // No registers are released if this write is from a zero-idiom.
  void removeRegisterWrite(const WriteState &WS,
                           MutableArrayRef<unsigned> FreedPhysRegs);

  // Returns true if the PRF at index `PRFIndex` can eliminate a move from RS to
  // WS.
  bool canEliminateMove(const WriteState &WS, const ReadState &RS,
                        unsigned PRFIndex) const;

  // Returns true if this instruction can be fully eliminated at register
  // renaming stage. On success, this method updates the internal state of each
  // WriteState by setting flag `WS.isEliminated`, and by propagating the zero
  // flag for known zero registers. It internally uses `canEliminateMove` to
  // determine if a read/write pair can be eliminated. By default, it assumes a
  // register swap if there is more than one register definition.
  bool tryEliminateMoveOrSwap(MutableArrayRef<WriteState> Writes,
                              MutableArrayRef<ReadState> Reads);

  // Checks if there are enough physical registers in the register files.
  // Returns a "response mask" where each bit represents the response from a
  // different register file.  A mask of all zeroes means that all register
  // files are available.  Otherwise, the mask can be used to identify which
  // register file was busy.  This sematic allows us to classify dispatch
  // stalls caused by the lack of register file resources.
  //
  // Current implementation can simulate up to 32 register files (including the
  // special register file at index #0).
  unsigned isAvailable(ArrayRef<MCPhysReg> Regs) const;

  // Returns the number of PRFs implemented by this processor.
  unsigned getNumRegisterFiles() const { return RegisterFiles.size(); }

  unsigned getElapsedCyclesFromWriteBack(const WriteRef &WR) const;

  void onInstructionExecuted(Instruction *IS);

  // Notify each PRF that a new cycle just started.
  void cycleStart();

  void cycleEnd() { ++CurrentCycle; }

#ifndef NDEBUG
  void dump() const;
#endif
};

} // namespace mca
} // namespace llvm

#endif // LLVM_MCA_HARDWAREUNITS_REGISTERFILE_H
PKiwFZR
_bBHBHMCA/HardwareUnits/LSUnit.hnu�[���//===------------------------- LSUnit.h --------------------------*- C++-*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// A Load/Store unit class that models load/store queues and that implements
/// a simple weak memory consistency model.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_MCA_HARDWAREUNITS_LSUNIT_H
#define LLVM_MCA_HARDWAREUNITS_LSUNIT_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/MC/MCSchedule.h"
#include "llvm/MCA/HardwareUnits/HardwareUnit.h"
#include "llvm/MCA/Instruction.h"

namespace llvm {
namespace mca {

/// A node of a memory dependency graph. A MemoryGroup describes a set of
/// instructions with same memory dependencies.
///
/// By construction, instructions of a MemoryGroup don't depend on each other.
/// At dispatch stage, instructions are mapped by the LSUnit to MemoryGroups.
/// A Memory group identifier is then stored as a "token" in field
/// Instruction::LSUTokenID of each dispatched instructions. That token is used
/// internally by the LSUnit to track memory dependencies.
class MemoryGroup {
  unsigned NumPredecessors = 0;
  unsigned NumExecutingPredecessors = 0;
  unsigned NumExecutedPredecessors = 0;

  unsigned NumInstructions = 0;
  unsigned NumExecuting = 0;
  unsigned NumExecuted = 0;
  // Successors that are in a order dependency with this group.
  SmallVector<MemoryGroup *, 4> OrderSucc;
  // Successors that are in a data dependency with this group.
  SmallVector<MemoryGroup *, 4> DataSucc;

  CriticalDependency CriticalPredecessor;
  InstRef CriticalMemoryInstruction;

  MemoryGroup(const MemoryGroup &) = delete;
  MemoryGroup &operator=(const MemoryGroup &) = delete;

public:
  MemoryGroup() = default;
  MemoryGroup(MemoryGroup &&) = default;

  size_t getNumSuccessors() const {
    return OrderSucc.size() + DataSucc.size();
  }
  unsigned getNumPredecessors() const { return NumPredecessors; }
  unsigned getNumExecutingPredecessors() const {
    return NumExecutingPredecessors;
  }
  unsigned getNumExecutedPredecessors() const {
    return NumExecutedPredecessors;
  }
  unsigned getNumInstructions() const { return NumInstructions; }
  unsigned getNumExecuting() const { return NumExecuting; }
  unsigned getNumExecuted() const { return NumExecuted; }

  const InstRef &getCriticalMemoryInstruction() const {
    return CriticalMemoryInstruction;
  }
  const CriticalDependency &getCriticalPredecessor() const {
    return CriticalPredecessor;
  }

  void addSuccessor(MemoryGroup *Group, bool IsDataDependent) {
    // Do not need to add a dependency if there is no data
    // dependency and all instructions from this group have been
    // issued already.
    if (!IsDataDependent && isExecuting())
      return;

    Group->NumPredecessors++;
    assert(!isExecuted() && "Should have been removed!");
    if (isExecuting())
      Group->onGroupIssued(CriticalMemoryInstruction, IsDataDependent);

    if (IsDataDependent)
      DataSucc.emplace_back(Group);
    else
      OrderSucc.emplace_back(Group);
  }

  bool isWaiting() const {
    return NumPredecessors >
           (NumExecutingPredecessors + NumExecutedPredecessors);
  }
  bool isPending() const {
    return NumExecutingPredecessors &&
           ((NumExecutedPredecessors + NumExecutingPredecessors) ==
            NumPredecessors);
  }
  bool isReady() const { return NumExecutedPredecessors == NumPredecessors; }
  bool isExecuting() const {
    return NumExecuting && (NumExecuting == (NumInstructions - NumExecuted));
  }
  bool isExecuted() const { return NumInstructions == NumExecuted; }

  void onGroupIssued(const InstRef &IR, bool ShouldUpdateCriticalDep) {
    assert(!isReady() && "Unexpected group-start event!");
    NumExecutingPredecessors++;

    if (!ShouldUpdateCriticalDep)
      return;

    unsigned Cycles = IR.getInstruction()->getCyclesLeft();
    if (CriticalPredecessor.Cycles < Cycles) {
      CriticalPredecessor.IID = IR.getSourceIndex();
      CriticalPredecessor.Cycles = Cycles;
    }
  }

  void onGroupExecuted() {
    assert(!isReady() && "Inconsistent state found!");
    NumExecutingPredecessors--;
    NumExecutedPredecessors++;
  }

  void onInstructionIssued(const InstRef &IR) {
    assert(!isExecuting() && "Invalid internal state!");
    ++NumExecuting;

    // update the CriticalMemDep.
    const Instruction &IS = *IR.getInstruction();
    if ((bool)CriticalMemoryInstruction) {
      const Instruction &OtherIS = *CriticalMemoryInstruction.getInstruction();
      if (OtherIS.getCyclesLeft() < IS.getCyclesLeft())
        CriticalMemoryInstruction = IR;
    } else {
      CriticalMemoryInstruction = IR;
    }

    if (!isExecuting())
      return;

    // Notify successors that this group started execution.
    for (MemoryGroup *MG : OrderSucc) {
      MG->onGroupIssued(CriticalMemoryInstruction, false);
      // Release the order dependency with this group.
      MG->onGroupExecuted();
    }

    for (MemoryGroup *MG : DataSucc)
      MG->onGroupIssued(CriticalMemoryInstruction, true);
  }

  void onInstructionExecuted(const InstRef &IR) {
    assert(isReady() && !isExecuted() && "Invalid internal state!");
    --NumExecuting;
    ++NumExecuted;

    if (CriticalMemoryInstruction &&
        CriticalMemoryInstruction.getSourceIndex() == IR.getSourceIndex()) {
      CriticalMemoryInstruction.invalidate();
    }

    if (!isExecuted())
      return;

    // Notify data dependent successors that this group has finished execution.
    for (MemoryGroup *MG : DataSucc)
      MG->onGroupExecuted();
  }

  void addInstruction() {
    assert(!getNumSuccessors() && "Cannot add instructions to this group!");
    ++NumInstructions;
  }

  void cycleEvent() {
    if (isWaiting() && CriticalPredecessor.Cycles)
      CriticalPredecessor.Cycles--;
  }
};

/// Abstract base interface for LS (load/store) units in llvm-mca.
class LSUnitBase : public HardwareUnit {
  /// Load queue size.
  ///
  /// A value of zero for this field means that the load queue is unbounded.
  /// Processor models can declare the size of a load queue via tablegen (see
  /// the definition of tablegen class LoadQueue in
  /// llvm/Target/TargetSchedule.td).
  unsigned LQSize;

  /// Load queue size.
  ///
  /// A value of zero for this field means that the store queue is unbounded.
  /// Processor models can declare the size of a store queue via tablegen (see
  /// the definition of tablegen class StoreQueue in
  /// llvm/Target/TargetSchedule.td).
  unsigned SQSize;

  unsigned UsedLQEntries;
  unsigned UsedSQEntries;

  /// True if loads don't alias with stores.
  ///
  /// By default, the LS unit assumes that loads and stores don't alias with
  /// eachother. If this field is set to false, then loads are always assumed to
  /// alias with stores.
  const bool NoAlias;

  /// Used to map group identifiers to MemoryGroups.
  DenseMap<unsigned, std::unique_ptr<MemoryGroup>> Groups;
  unsigned NextGroupID;

public:
  LSUnitBase(const MCSchedModel &SM, unsigned LoadQueueSize,
             unsigned StoreQueueSize, bool AssumeNoAlias);

  virtual ~LSUnitBase();

  /// Returns the total number of entries in the load queue.
  unsigned getLoadQueueSize() const { return LQSize; }

  /// Returns the total number of entries in the store queue.
  unsigned getStoreQueueSize() const { return SQSize; }

  unsigned getUsedLQEntries() const { return UsedLQEntries; }
  unsigned getUsedSQEntries() const { return UsedSQEntries; }
  void acquireLQSlot() { ++UsedLQEntries; }
  void acquireSQSlot() { ++UsedSQEntries; }
  void releaseLQSlot() { --UsedLQEntries; }
  void releaseSQSlot() { --UsedSQEntries; }

  bool assumeNoAlias() const { return NoAlias; }

  enum Status {
    LSU_AVAILABLE = 0,
    LSU_LQUEUE_FULL, // Load Queue unavailable
    LSU_SQUEUE_FULL  // Store Queue unavailable
  };

  /// This method checks the availability of the load/store buffers.
  ///
  /// Returns LSU_AVAILABLE if there are enough load/store queue entries to
  /// accomodate instruction IR. By default, LSU_AVAILABLE is returned if IR is
  /// not a memory operation.
  virtual Status isAvailable(const InstRef &IR) const = 0;

  /// Allocates LS resources for instruction IR.
  ///
  /// This method assumes that a previous call to `isAvailable(IR)` succeeded
  /// with a LSUnitBase::Status value of LSU_AVAILABLE.
  /// Returns the GroupID associated with this instruction. That value will be
  /// used to set the LSUTokenID field in class Instruction.
  virtual unsigned dispatch(const InstRef &IR) = 0;

  bool isSQEmpty() const { return !UsedSQEntries; }
  bool isLQEmpty() const { return !UsedLQEntries; }
  bool isSQFull() const { return SQSize && SQSize == UsedSQEntries; }
  bool isLQFull() const { return LQSize && LQSize == UsedLQEntries; }

  bool isValidGroupID(unsigned Index) const {
    return Index && Groups.contains(Index);
  }

  /// Check if a peviously dispatched instruction IR is now ready for execution.
  bool isReady(const InstRef &IR) const {
    unsigned GroupID = IR.getInstruction()->getLSUTokenID();
    const MemoryGroup &Group = getGroup(GroupID);
    return Group.isReady();
  }

  /// Check if instruction IR only depends on memory instructions that are
  /// currently executing.
  bool isPending(const InstRef &IR) const {
    unsigned GroupID = IR.getInstruction()->getLSUTokenID();
    const MemoryGroup &Group = getGroup(GroupID);
    return Group.isPending();
  }

  /// Check if instruction IR is still waiting on memory operations, and the
  /// wait time is still unknown.
  bool isWaiting(const InstRef &IR) const {
    unsigned GroupID = IR.getInstruction()->getLSUTokenID();
    const MemoryGroup &Group = getGroup(GroupID);
    return Group.isWaiting();
  }

  bool hasDependentUsers(const InstRef &IR) const {
    unsigned GroupID = IR.getInstruction()->getLSUTokenID();
    const MemoryGroup &Group = getGroup(GroupID);
    return !Group.isExecuted() && Group.getNumSuccessors();
  }

  const MemoryGroup &getGroup(unsigned Index) const {
    assert(isValidGroupID(Index) && "Group doesn't exist!");
    return *Groups.find(Index)->second;
  }

  MemoryGroup &getGroup(unsigned Index) {
    assert(isValidGroupID(Index) && "Group doesn't exist!");
    return *Groups.find(Index)->second;
  }

  unsigned createMemoryGroup() {
    Groups.insert(
        std::make_pair(NextGroupID, std::make_unique<MemoryGroup>()));
    return NextGroupID++;
  }

  virtual void onInstructionExecuted(const InstRef &IR);

  // Loads are tracked by the LDQ (load queue) from dispatch until completion.
  // Stores are tracked by the STQ (store queue) from dispatch until commitment.
  // By default we conservatively assume that the LDQ receives a load at
  // dispatch. Loads leave the LDQ at retirement stage.
  virtual void onInstructionRetired(const InstRef &IR);

  virtual void onInstructionIssued(const InstRef &IR) {
    unsigned GroupID = IR.getInstruction()->getLSUTokenID();
    Groups[GroupID]->onInstructionIssued(IR);
  }

  virtual void cycleEvent();

#ifndef NDEBUG
  void dump() const;
#endif
};

/// Default Load/Store Unit (LS Unit) for simulated processors.
///
/// Each load (or store) consumes one entry in the load (or store) queue.
///
/// Rules are:
/// 1) A younger load is allowed to pass an older load only if there are no
///    stores nor barriers in between the two loads.
/// 2) An younger store is not allowed to pass an older store.
/// 3) A younger store is not allowed to pass an older load.
/// 4) A younger load is allowed to pass an older store only if the load does
///    not alias with the store.
///
/// This class optimistically assumes that loads don't alias store operations.
/// Under this assumption, younger loads are always allowed to pass older
/// stores (this would only affects rule 4).
/// Essentially, this class doesn't perform any sort alias analysis to
/// identify aliasing loads and stores.
///
/// To enforce aliasing between loads and stores, flag `AssumeNoAlias` must be
/// set to `false` by the constructor of LSUnit.
///
/// Note that this class doesn't know about the existence of different memory
/// types for memory operations (example: write-through, write-combining, etc.).
/// Derived classes are responsible for implementing that extra knowledge, and
/// provide different sets of rules for loads and stores by overriding method
/// `isReady()`.
/// To emulate a write-combining memory type, rule 2. must be relaxed in a
/// derived class to enable the reordering of non-aliasing store operations.
///
/// No assumptions are made by this class on the size of the store buffer.  This
/// class doesn't know how to identify cases where store-to-load forwarding may
/// occur.
///
/// LSUnit doesn't attempt to predict whether a load or store hits or misses
/// the L1 cache. To be more specific, LSUnit doesn't know anything about
/// cache hierarchy and memory types.
/// It only knows if an instruction "mayLoad" and/or "mayStore". For loads, the
/// scheduling model provides an "optimistic" load-to-use latency (which usually
/// matches the load-to-use latency for when there is a hit in the L1D).
/// Derived classes may expand this knowledge.
///
/// Class MCInstrDesc in LLVM doesn't know about serializing operations, nor
/// memory-barrier like instructions.
/// LSUnit conservatively assumes that an instruction which `mayLoad` and has
/// `unmodeled side effects` behave like a "soft" load-barrier. That means, it
/// serializes loads without forcing a flush of the load queue.
/// Similarly, instructions that both `mayStore` and have `unmodeled side
/// effects` are treated like store barriers. A full memory
/// barrier is a 'mayLoad' and 'mayStore' instruction with unmodeled side
/// effects. This is obviously inaccurate, but this is the best that we can do
/// at the moment.
///
/// Each load/store barrier consumes one entry in the load/store queue. A
/// load/store barrier enforces ordering of loads/stores:
///  - A younger load cannot pass a load barrier.
///  - A younger store cannot pass a store barrier.
///
/// A younger load has to wait for the memory load barrier to execute.
/// A load/store barrier is "executed" when it becomes the oldest entry in
/// the load/store queue(s). That also means, all the older loads/stores have
/// already been executed.
class LSUnit : public LSUnitBase {
  // This class doesn't know about the latency of a load instruction. So, it
  // conservatively/pessimistically assumes that the latency of a load opcode
  // matches the instruction latency.
  //
  // FIXME: In the absence of cache misses (i.e. L1I/L1D/iTLB/dTLB hits/misses),
  // and load/store conflicts, the latency of a load is determined by the depth
  // of the load pipeline. So, we could use field `LoadLatency` in the
  // MCSchedModel to model that latency.
  // Field `LoadLatency` often matches the so-called 'load-to-use' latency from
  // L1D, and it usually already accounts for any extra latency due to data
  // forwarding.
  // When doing throughput analysis, `LoadLatency` is likely to
  // be a better predictor of load latency than instruction latency. This is
  // particularly true when simulating code with temporal/spatial locality of
  // memory accesses.
  // Using `LoadLatency` (instead of the instruction latency) is also expected
  // to improve the load queue allocation for long latency instructions with
  // folded memory operands (See PR39829).
  //
  // FIXME: On some processors, load/store operations are split into multiple
  // uOps. For example, X86 AMD Jaguar natively supports 128-bit data types, but
  // not 256-bit data types. So, a 256-bit load is effectively split into two
  // 128-bit loads, and each split load consumes one 'LoadQueue' entry. For
  // simplicity, this class optimistically assumes that a load instruction only
  // consumes one entry in the LoadQueue.  Similarly, store instructions only
  // consume a single entry in the StoreQueue.
  // In future, we should reassess the quality of this design, and consider
  // alternative approaches that let instructions specify the number of
  // load/store queue entries which they consume at dispatch stage (See
  // PR39830).
  //
  // An instruction that both 'mayStore' and 'HasUnmodeledSideEffects' is
  // conservatively treated as a store barrier. It forces older store to be
  // executed before newer stores are issued.
  //
  // An instruction that both 'MayLoad' and 'HasUnmodeledSideEffects' is
  // conservatively treated as a load barrier. It forces older loads to execute
  // before newer loads are issued.
  unsigned CurrentLoadGroupID;
  unsigned CurrentLoadBarrierGroupID;
  unsigned CurrentStoreGroupID;
  unsigned CurrentStoreBarrierGroupID;

public:
  LSUnit(const MCSchedModel &SM)
      : LSUnit(SM, /* LQSize */ 0, /* SQSize */ 0, /* NoAlias */ false) {}
  LSUnit(const MCSchedModel &SM, unsigned LQ, unsigned SQ)
      : LSUnit(SM, LQ, SQ, /* NoAlias */ false) {}
  LSUnit(const MCSchedModel &SM, unsigned LQ, unsigned SQ, bool AssumeNoAlias)
      : LSUnitBase(SM, LQ, SQ, AssumeNoAlias), CurrentLoadGroupID(0),
        CurrentLoadBarrierGroupID(0), CurrentStoreGroupID(0),
        CurrentStoreBarrierGroupID(0) {}

  /// Returns LSU_AVAILABLE if there are enough load/store queue entries to
  /// accomodate instruction IR.
  Status isAvailable(const InstRef &IR) const override;

  /// Allocates LS resources for instruction IR.
  ///
  /// This method assumes that a previous call to `isAvailable(IR)` succeeded
  /// returning LSU_AVAILABLE.
  ///
  /// Rules are:
  /// By default, rules are:
  /// 1. A store may not pass a previous store.
  /// 2. A load may not pass a previous store unless flag 'NoAlias' is set.
  /// 3. A load may pass a previous load.
  /// 4. A store may not pass a previous load (regardless of flag 'NoAlias').
  /// 5. A load has to wait until an older load barrier is fully executed.
  /// 6. A store has to wait until an older store barrier is fully executed.
  unsigned dispatch(const InstRef &IR) override;

  void onInstructionExecuted(const InstRef &IR) override;
};

} // namespace mca
} // namespace llvm

#endif // LLVM_MCA_HARDWAREUNITS_LSUNIT_H
PKiwFZ!�[l�.�.MCA/HardwareUnits/Scheduler.hnu�[���//===--------------------- Scheduler.h ------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// A scheduler for Processor Resource Units and Processor Resource Groups.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_MCA_HARDWAREUNITS_SCHEDULER_H
#define LLVM_MCA_HARDWAREUNITS_SCHEDULER_H

#include "llvm/ADT/SmallVector.h"
#include "llvm/MC/MCSchedule.h"
#include "llvm/MCA/HardwareUnits/HardwareUnit.h"
#include "llvm/MCA/HardwareUnits/LSUnit.h"
#include "llvm/MCA/HardwareUnits/ResourceManager.h"
#include "llvm/MCA/Support.h"

namespace llvm {
namespace mca {

class SchedulerStrategy {
public:
  SchedulerStrategy() = default;
  virtual ~SchedulerStrategy();

  /// Returns true if Lhs should take priority over Rhs.
  ///
  /// This method is used by class Scheduler to select the "best" ready
  /// instruction to issue to the underlying pipelines.
  virtual bool compare(const InstRef &Lhs, const InstRef &Rhs) const = 0;
};

/// Default instruction selection strategy used by class Scheduler.
class DefaultSchedulerStrategy : public SchedulerStrategy {
  /// This method ranks instructions based on their age, and the number of known
  /// users. The lower the rank value, the better.
  int computeRank(const InstRef &Lhs) const {
    return Lhs.getSourceIndex() - Lhs.getInstruction()->getNumUsers();
  }

public:
  DefaultSchedulerStrategy() = default;
  virtual ~DefaultSchedulerStrategy();

  bool compare(const InstRef &Lhs, const InstRef &Rhs) const override {
    int LhsRank = computeRank(Lhs);
    int RhsRank = computeRank(Rhs);

    /// Prioritize older instructions over younger instructions to minimize the
    /// pressure on the reorder buffer.
    if (LhsRank == RhsRank)
      return Lhs.getSourceIndex() < Rhs.getSourceIndex();
    return LhsRank < RhsRank;
  }
};

/// Class Scheduler is responsible for issuing instructions to pipeline
/// resources.
///
/// Internally, it delegates to a ResourceManager the management of processor
/// resources. This class is also responsible for tracking the progress of
/// instructions from the dispatch stage, until the write-back stage.
///
class Scheduler : public HardwareUnit {
  LSUnitBase &LSU;

  // Instruction selection strategy for this Scheduler.
  std::unique_ptr<SchedulerStrategy> Strategy;

  // Hardware resources that are managed by this scheduler.
  std::unique_ptr<ResourceManager> Resources;

  // Instructions dispatched to the Scheduler are internally classified based on
  // the instruction stage (see Instruction::InstrStage).
  //
  // An Instruction dispatched to the Scheduler is added to the WaitSet if not
  // all its register operands are available, and at least one latency is
  // unknown.  By construction, the WaitSet only contains instructions that are
  // in the IS_DISPATCHED stage.
  //
  // An Instruction transitions from the WaitSet to the PendingSet if the
  // instruction is not ready yet, but the latency of every register read is
  // known.  Instructions in the PendingSet can only be in the IS_PENDING or
  // IS_READY stage.  Only IS_READY instructions that are waiting on memory
  // dependencies can be added to the PendingSet.
  //
  // Instructions in the PendingSet are immediately dominated only by
  // instructions that have already been issued to the underlying pipelines.  In
  // the presence of bottlenecks caused by data dependencies, the PendingSet can
  // be inspected to identify problematic data dependencies between
  // instructions.
  //
  // An instruction is moved to the ReadySet when all register operands become
  // available, and all memory dependencies are met.  Instructions that are
  // moved from the PendingSet to the ReadySet must transition to the 'IS_READY'
  // stage.
  //
  // On every cycle, the Scheduler checks if it can promote instructions from the
  // PendingSet to the ReadySet.
  //
  // An Instruction is moved from the ReadySet to the `IssuedSet` when it starts
  // exection. This event also causes an instruction state transition (i.e. from
  // state IS_READY, to state IS_EXECUTING). An Instruction leaves the IssuedSet
  // only when it reaches the write-back stage.
  std::vector<InstRef> WaitSet;
  std::vector<InstRef> PendingSet;
  std::vector<InstRef> ReadySet;
  std::vector<InstRef> IssuedSet;

  // A mask of busy resource units. It defaults to the empty set (i.e. a zero
  // mask), and it is cleared at the beginning of every cycle.
  // It is updated every time the scheduler fails to issue an instruction from
  // the ready set due to unavailable pipeline resources.
  // Each bit of the mask represents an unavailable resource.
  uint64_t BusyResourceUnits;

  // Counts the number of instructions in the pending set that were dispatched
  // during this cycle.
  unsigned NumDispatchedToThePendingSet;

  // True if the previous pipeline Stage was unable to dispatch a full group of
  // opcodes because scheduler buffers (or LS queues) were unavailable.
  bool HadTokenStall;

  /// Verify the given selection strategy and set the Strategy member
  /// accordingly.  If no strategy is provided, the DefaultSchedulerStrategy is
  /// used.
  void initializeStrategy(std::unique_ptr<SchedulerStrategy> S);

  /// Issue an instruction without updating the ready queue.
  void issueInstructionImpl(
      InstRef &IR,
      SmallVectorImpl<std::pair<ResourceRef, ResourceCycles>> &Pipes);

  // Identify instructions that have finished executing, and remove them from
  // the IssuedSet. References to executed instructions are added to input
  // vector 'Executed'.
  void updateIssuedSet(SmallVectorImpl<InstRef> &Executed);

  // Try to promote instructions from the PendingSet to the ReadySet.
  // Add promoted instructions to the 'Ready' vector in input.
  // Returns true if at least one instruction was promoted.
  bool promoteToReadySet(SmallVectorImpl<InstRef> &Ready);

  // Try to promote instructions from the WaitSet to the PendingSet.
  // Add promoted instructions to the 'Pending' vector in input.
  // Returns true if at least one instruction was promoted.
  bool promoteToPendingSet(SmallVectorImpl<InstRef> &Pending);

public:
  Scheduler(const MCSchedModel &Model, LSUnitBase &Lsu)
      : Scheduler(Model, Lsu, nullptr) {}

  Scheduler(const MCSchedModel &Model, LSUnitBase &Lsu,
            std::unique_ptr<SchedulerStrategy> SelectStrategy)
      : Scheduler(std::make_unique<ResourceManager>(Model), Lsu,
                  std::move(SelectStrategy)) {}

  Scheduler(std::unique_ptr<ResourceManager> RM, LSUnitBase &Lsu,
            std::unique_ptr<SchedulerStrategy> SelectStrategy)
      : LSU(Lsu), Resources(std::move(RM)), BusyResourceUnits(0),
        NumDispatchedToThePendingSet(0), HadTokenStall(false) {
    initializeStrategy(std::move(SelectStrategy));
  }

  // Stalls generated by the scheduler.
  enum Status {
    SC_AVAILABLE,
    SC_LOAD_QUEUE_FULL,
    SC_STORE_QUEUE_FULL,
    SC_BUFFERS_FULL,
    SC_DISPATCH_GROUP_STALL,
  };

  /// Check if the instruction in 'IR' can be dispatched during this cycle.
  /// Return SC_AVAILABLE if both scheduler and LS resources are available.
  ///
  /// This method is also responsible for setting field HadTokenStall if
  /// IR cannot be dispatched to the Scheduler due to unavailable resources.
  Status isAvailable(const InstRef &IR);

  /// Reserves buffer and LSUnit queue resources that are necessary to issue
  /// this instruction.
  ///
  /// Returns true if instruction IR is ready to be issued to the underlying
  /// pipelines. Note that this operation cannot fail; it assumes that a
  /// previous call to method `isAvailable(IR)` returned `SC_AVAILABLE`.
  ///
  /// If IR is a memory operation, then the Scheduler queries the LS unit to
  /// obtain a LS token. An LS token is used internally to track memory
  /// dependencies.
  bool dispatch(InstRef &IR);

  /// Issue an instruction and populates a vector of used pipeline resources,
  /// and a vector of instructions that transitioned to the ready state as a
  /// result of this event.
  void issueInstruction(
      InstRef &IR,
      SmallVectorImpl<std::pair<ResourceRef, ResourceCycles>> &Used,
      SmallVectorImpl<InstRef> &Pending,
      SmallVectorImpl<InstRef> &Ready);

  /// Returns true if IR has to be issued immediately, or if IR is a zero
  /// latency instruction.
  bool mustIssueImmediately(const InstRef &IR) const;

  /// This routine notifies the Scheduler that a new cycle just started.
  ///
  /// It notifies the underlying ResourceManager that a new cycle just started.
  /// Vector `Freed` is populated with resourceRef related to resources that
  /// have changed in state, and that are now available to new instructions.
  /// Instructions executed are added to vector Executed, while vector Ready is
  /// populated with instructions that have become ready in this new cycle.
  /// Vector Pending is popluated by instructions that have transitioned through
  /// the pending stat during this cycle. The Pending and Ready sets may not be
  /// disjoint. An instruction is allowed to transition from the WAIT state to
  /// the READY state (going through the PENDING state) within a single cycle.
  /// That means, instructions may appear in both the Pending and Ready set.
  void cycleEvent(SmallVectorImpl<ResourceRef> &Freed,
                  SmallVectorImpl<InstRef> &Executed,
                  SmallVectorImpl<InstRef> &Pending,
                  SmallVectorImpl<InstRef> &Ready);

  /// Convert a resource mask into a valid llvm processor resource identifier.
  ///
  /// Only the most significant bit of the Mask is used by this method to
  /// identify the processor resource.
  unsigned getResourceID(uint64_t Mask) const {
    return Resources->resolveResourceMask(Mask);
  }

  /// Select the next instruction to issue from the ReadySet. Returns an invalid
  /// instruction reference if there are no ready instructions, or if processor
  /// resources are not available.
  InstRef select();

  bool isReadySetEmpty() const { return ReadySet.empty(); }
  bool isWaitSetEmpty() const { return WaitSet.empty(); }

  /// This method is called by the ExecuteStage at the end of each cycle to
  /// identify bottlenecks caused by data dependencies. Vector RegDeps is
  /// populated by instructions that were not issued because of unsolved
  /// register dependencies.  Vector MemDeps is populated by instructions that
  /// were not issued because of unsolved memory dependencies.
  void analyzeDataDependencies(SmallVectorImpl<InstRef> &RegDeps,
                               SmallVectorImpl<InstRef> &MemDeps);

  /// Returns a mask of busy resources, and populates vector Insts with
  /// instructions that could not be issued to the underlying pipelines because
  /// not all pipeline resources were available.
  uint64_t analyzeResourcePressure(SmallVectorImpl<InstRef> &Insts);

  // Returns true if the dispatch logic couldn't dispatch a full group due to
  // unavailable scheduler and/or LS resources.
  bool hadTokenStall() const { return HadTokenStall; }

#ifndef NDEBUG
  // Update the ready queues.
  void dump() const;

  // This routine performs a basic correctness check.  This routine should only
  // be called when we know that 'IR' is not in the scheduler's instruction
  // queues.
  void instructionCheck(const InstRef &IR) const {
    assert(!is_contained(WaitSet, IR) && "Already in the wait set!");
    assert(!is_contained(ReadySet, IR) && "Already in the ready set!");
    assert(!is_contained(IssuedSet, IR) && "Already executing!");
  }
#endif // !NDEBUG
};
} // namespace mca
} // namespace llvm

#endif // LLVM_MCA_HARDWAREUNITS_SCHEDULER_H
PKiwFZw�e%MCA/HardwareUnits/RetireControlUnit.hnu�[���//===---------------------- RetireControlUnit.h -----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This file simulates the hardware responsible for retiring instructions.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_MCA_HARDWAREUNITS_RETIRECONTROLUNIT_H
#define LLVM_MCA_HARDWAREUNITS_RETIRECONTROLUNIT_H

#include "llvm/MC/MCSchedule.h"
#include "llvm/MCA/HardwareUnits/HardwareUnit.h"
#include "llvm/MCA/Instruction.h"
#include <vector>

namespace llvm {
namespace mca {

/// This class tracks which instructions are in-flight (i.e., dispatched but not
/// retired) in the OoO backend.
//
/// This class checks on every cycle if/which instructions can be retired.
/// Instructions are retired in program order.
/// In the event of an instruction being retired, the pipeline that owns
/// this RetireControlUnit (RCU) gets notified.
///
/// On instruction retired, register updates are all architecturally
/// committed, and any physicall registers previously allocated for the
/// retired instruction are freed.
struct RetireControlUnit : public HardwareUnit {
  // A RUToken is created by the RCU for every instruction dispatched to the
  // schedulers.  These "tokens" are managed by the RCU in its token Queue.
  //
  // On every cycle ('cycleEvent'), the RCU iterates through the token queue
  // looking for any token with its 'Executed' flag set.  If a token has that
  // flag set, then the instruction has reached the write-back stage and will
  // be retired by the RCU.
  //
  // 'NumSlots' represents the number of entries consumed by the instruction in
  // the reorder buffer. Those entries will become available again once the
  // instruction is retired.
  //
  // Note that the size of the reorder buffer is defined by the scheduling
  // model via field 'NumMicroOpBufferSize'.
  struct RUToken {
    InstRef IR;
    unsigned NumSlots; // Slots reserved to this instruction.
    bool Executed;     // True if the instruction is past the WB stage.
  };

private:
  unsigned NextAvailableSlotIdx;
  unsigned CurrentInstructionSlotIdx;
  unsigned NumROBEntries;
  unsigned AvailableEntries;
  unsigned MaxRetirePerCycle; // 0 means no limit.
  std::vector<RUToken> Queue;

  unsigned normalizeQuantity(unsigned Quantity) const {
    // Some instructions may declare a number of uOps which exceeds the size
    // of the reorder buffer. To avoid problems, cap the amount of slots to
    // the size of the reorder buffer.
    Quantity = std::min(Quantity, NumROBEntries);

    // Further normalize the number of micro opcodes for instructions that
    // declare zero opcodes. This should match the behavior of method
    // reserveSlot().
    return std::max(Quantity, 1U);
  }

  unsigned computeNextSlotIdx() const;

public:
  RetireControlUnit(const MCSchedModel &SM);

  bool isEmpty() const { return AvailableEntries == NumROBEntries; }

  bool isAvailable(unsigned Quantity = 1) const {
    return AvailableEntries >= normalizeQuantity(Quantity);
  }

  unsigned getMaxRetirePerCycle() const { return MaxRetirePerCycle; }

  // Reserves a number of slots, and returns a new token reference.
  unsigned dispatch(const InstRef &IS);

  // Return the current token from the RCU's circular token queue.
  const RUToken &getCurrentToken() const;

  const RUToken &peekNextToken() const;

  // Advance the pointer to the next token in the circular token queue.
  void consumeCurrentToken();

  // Update the RCU token to represent the executed state.
  void onInstructionExecuted(unsigned TokenID);

#ifndef NDEBUG
  void dump() const;
#endif

  // Assigned to instructions that are not handled by the RCU.
  static const unsigned UnhandledTokenID = ~0U;
};

} // namespace mca
} // namespace llvm

#endif // LLVM_MCA_HARDWAREUNITS_RETIRECONTROLUNIT_H
PKiwFZ�����E�E#MCA/HardwareUnits/ResourceManager.hnu�[���//===--------------------- ResourceManager.h --------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// The classes here represent processor resource units and their management
/// strategy.  These classes are managed by the Scheduler.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_MCA_HARDWAREUNITS_RESOURCEMANAGER_H
#define LLVM_MCA_HARDWAREUNITS_RESOURCEMANAGER_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/MC/MCSchedule.h"
#include "llvm/MCA/Instruction.h"
#include "llvm/MCA/Support.h"

namespace llvm {
namespace mca {

/// Used to notify the internal state of a processor resource.
///
/// A processor resource is available if it is not reserved, and there are
/// available slots in the buffer.  A processor resource is unavailable if it
/// is either reserved, or the associated buffer is full. A processor resource
/// with a buffer size of -1 is always available if it is not reserved.
///
/// Values of type ResourceStateEvent are returned by method
/// ResourceManager::canBeDispatched()
///
/// The naming convention for resource state events is:
///  * Event names start with prefix RS_
///  * Prefix RS_ is followed by a string describing the actual resource state.
enum ResourceStateEvent {
  RS_BUFFER_AVAILABLE,
  RS_BUFFER_UNAVAILABLE,
  RS_RESERVED
};

/// Resource allocation strategy used by hardware scheduler resources.
class ResourceStrategy {
  ResourceStrategy(const ResourceStrategy &) = delete;
  ResourceStrategy &operator=(const ResourceStrategy &) = delete;

public:
  ResourceStrategy() = default;
  virtual ~ResourceStrategy();

  /// Selects a processor resource unit from a ReadyMask.
  virtual uint64_t select(uint64_t ReadyMask) = 0;

  /// Called by the ResourceManager when a processor resource group, or a
  /// processor resource with multiple units has become unavailable.
  ///
  /// The default strategy uses this information to bias its selection logic.
  virtual void used(uint64_t ResourceMask) {}
};

/// Default resource allocation strategy used by processor resource groups and
/// processor resources with multiple units.
class DefaultResourceStrategy final : public ResourceStrategy {
  /// A Mask of resource unit identifiers.
  ///
  /// There is one bit set for every available resource unit.
  /// It defaults to the value of field ResourceSizeMask in ResourceState.
  const uint64_t ResourceUnitMask;

  /// A simple round-robin selector for processor resource units.
  /// Each bit of this mask identifies a sub resource within a group.
  ///
  /// As an example, lets assume that this is a default policy for a
  /// processor resource group composed by the following three units:
  ///   ResourceA -- 0b001
  ///   ResourceB -- 0b010
  ///   ResourceC -- 0b100
  ///
  /// Field NextInSequenceMask is used to select the next unit from the set of
  /// resource units. It defaults to the value of field `ResourceUnitMasks` (in
  /// this example, it defaults to mask '0b111').
  ///
  /// The round-robin selector would firstly select 'ResourceC', then
  /// 'ResourceB', and eventually 'ResourceA'.  When a resource R is used, the
  /// corresponding bit in NextInSequenceMask is cleared.  For example, if
  /// 'ResourceC' is selected, then the new value of NextInSequenceMask becomes
  /// 0xb011.
  ///
  /// When NextInSequenceMask becomes zero, it is automatically reset to the
  /// default value (i.e. ResourceUnitMask).
  uint64_t NextInSequenceMask;

  /// This field is used to track resource units that are used (i.e. selected)
  /// by other groups other than the one associated with this strategy object.
  ///
  /// In LLVM processor resource groups are allowed to partially (or fully)
  /// overlap. That means, a same unit may be visible to multiple groups.
  /// This field keeps track of uses that have originated from outside of
  /// this group. The idea is to bias the selection strategy, so that resources
  /// that haven't been used by other groups get prioritized.
  ///
  /// The end goal is to (try to) keep the resource distribution as much uniform
  /// as possible. By construction, this mask only tracks one-level of resource
  /// usage. Therefore, this strategy is expected to be less accurate when same
  /// units are used multiple times by other groups within a single round of
  /// select.
  ///
  /// Note: an LRU selector would have a better accuracy at the cost of being
  /// slightly more expensive (mostly in terms of runtime cost). Methods
  /// 'select' and 'used', are always in the hot execution path of llvm-mca.
  /// Therefore, a slow implementation of 'select' would have a negative impact
  /// on the overall performance of the tool.
  uint64_t RemovedFromNextInSequence;

public:
  DefaultResourceStrategy(uint64_t UnitMask)
      : ResourceUnitMask(UnitMask), NextInSequenceMask(UnitMask),
        RemovedFromNextInSequence(0) {}
  virtual ~DefaultResourceStrategy() = default;

  uint64_t select(uint64_t ReadyMask) override;
  void used(uint64_t Mask) override;
};

/// A processor resource descriptor.
///
/// There is an instance of this class for every processor resource defined by
/// the machine scheduling model.
/// Objects of class ResourceState dynamically track the usage of processor
/// resource units.
class ResourceState {
  /// An index to the MCProcResourceDesc entry in the processor model.
  const unsigned ProcResourceDescIndex;
  /// A resource mask. This is generated by the tool with the help of
  /// function `mca::computeProcResourceMasks' (see Support.h).
  ///
  /// Field ResourceMask only has one bit set if this resource state describes a
  /// processor resource unit (i.e. this is not a group). That means, we can
  /// quickly check if a resource is a group by simply counting the number of
  /// bits that are set in the mask.
  ///
  /// The most significant bit of a mask (MSB) uniquely identifies a resource.
  /// Remaining bits are used to describe the composition of a group (Group).
  ///
  /// Example (little endian):
  ///            Resource |  Mask      |  MSB       |  Group
  ///            ---------+------------+------------+------------
  ///            A        |  0b000001  |  0b000001  |  0b000000
  ///                     |            |            |
  ///            B        |  0b000010  |  0b000010  |  0b000000
  ///                     |            |            |
  ///            C        |  0b010000  |  0b010000  |  0b000000
  ///                     |            |            |
  ///            D        |  0b110010  |  0b100000  |  0b010010
  ///
  /// In this example, resources A, B and C are processor resource units.
  /// Only resource D is a group resource, and it contains resources B and C.
  /// That is because MSB(B) and MSB(C) are both contained within Group(D).
  const uint64_t ResourceMask;

  /// A ProcResource can have multiple units.
  ///
  /// For processor resource groups this field is a mask of contained resource
  /// units. It is obtained from ResourceMask by clearing the highest set bit.
  /// The number of resource units in a group can be simply computed as the
  /// population count of this field.
  ///
  /// For normal (i.e. non-group) resources, the number of bits set in this mask
  /// is equivalent to the number of units declared by the processor model (see
  /// field 'NumUnits' in 'ProcResourceUnits').
  uint64_t ResourceSizeMask;

  /// A mask of ready units.
  uint64_t ReadyMask;

  /// Buffered resources will have this field set to a positive number different
  /// than zero. A buffered resource behaves like a reservation station
  /// implementing its own buffer for out-of-order execution.
  ///
  /// A BufferSize of 1 is used by scheduler resources that force in-order
  /// execution.
  ///
  /// A BufferSize of 0 is used to model in-order issue/dispatch resources.
  /// Since in-order issue/dispatch resources don't implement buffers, dispatch
  /// events coincide with issue events.
  /// Also, no other instruction ca be dispatched/issue while this resource is
  /// in use. Only when all the "resource cycles" are consumed (after the issue
  /// event), a new instruction ca be dispatched.
  const int BufferSize;

  /// Available slots in the buffer (zero, if this is not a buffered resource).
  unsigned AvailableSlots;

  /// This field is set if this resource is currently reserved.
  ///
  /// Resources can be reserved for a number of cycles.
  /// Instructions can still be dispatched to reserved resources. However,
  /// istructions dispatched to a reserved resource cannot be issued to the
  /// underlying units (i.e. pipelines) until the resource is released.
  bool Unavailable;

  const bool IsAGroup;

  /// Checks for the availability of unit 'SubResMask' in the group.
  bool isSubResourceReady(uint64_t SubResMask) const {
    return ReadyMask & SubResMask;
  }

public:
  ResourceState(const MCProcResourceDesc &Desc, unsigned Index, uint64_t Mask);

  unsigned getProcResourceID() const { return ProcResourceDescIndex; }
  uint64_t getResourceMask() const { return ResourceMask; }
  uint64_t getReadyMask() const { return ReadyMask; }
  int getBufferSize() const { return BufferSize; }

  bool isBuffered() const { return BufferSize > 0; }
  bool isInOrder() const { return BufferSize == 1; }

  /// Returns true if this is an in-order dispatch/issue resource.
  bool isADispatchHazard() const { return BufferSize == 0; }
  bool isReserved() const { return Unavailable; }

  void setReserved() { Unavailable = true; }
  void clearReserved() { Unavailable = false; }

  /// Returs true if this resource is not reserved, and if there are at least
  /// `NumUnits` available units.
  bool isReady(unsigned NumUnits = 1) const;

  bool isAResourceGroup() const { return IsAGroup; }

  bool containsResource(uint64_t ID) const { return ResourceMask & ID; }

  void markSubResourceAsUsed(uint64_t ID) {
    assert(isSubResourceReady(ID));
    ReadyMask ^= ID;
  }

  void releaseSubResource(uint64_t ID) {
    assert(!isSubResourceReady(ID));
    ReadyMask ^= ID;
  }

  unsigned getNumUnits() const {
    return isAResourceGroup() ? 1U : llvm::popcount(ResourceSizeMask);
  }

  /// Checks if there is an available slot in the resource buffer.
  ///
  /// Returns RS_BUFFER_AVAILABLE if this is not a buffered resource, or if
  /// there is a slot available.
  ///
  /// Returns RS_RESERVED if this buffered resource is a dispatch hazard, and it
  /// is reserved.
  ///
  /// Returns RS_BUFFER_UNAVAILABLE if there are no available slots.
  ResourceStateEvent isBufferAvailable() const;

  /// Reserve a buffer slot.
  ///
  /// Returns true if the buffer is not full.
  /// It always returns true if BufferSize is set to zero.
  bool reserveBuffer() {
    if (BufferSize <= 0)
      return true;

    --AvailableSlots;
    assert(AvailableSlots <= static_cast<unsigned>(BufferSize));
    return AvailableSlots;
  }

  /// Releases a slot in the buffer.
  void releaseBuffer() {
    // Ignore dispatch hazards or invalid buffer sizes.
    if (BufferSize <= 0)
      return;

    ++AvailableSlots;
    assert(AvailableSlots <= static_cast<unsigned>(BufferSize));
  }

#ifndef NDEBUG
  void dump() const;
#endif
};

/// A resource unit identifier.
///
/// This is used to identify a specific processor resource unit using a pair
/// of indices where the 'first' index is a processor resource mask, and the
/// 'second' index is an index for a "sub-resource" (i.e. unit).
typedef std::pair<uint64_t, uint64_t> ResourceRef;

// First: a MCProcResourceDesc index identifying a buffered resource.
// Second: max number of buffer entries used in this resource.
typedef std::pair<unsigned, unsigned> BufferUsageEntry;

/// A resource manager for processor resource units and groups.
///
/// This class owns all the ResourceState objects, and it is responsible for
/// acting on requests from a Scheduler by updating the internal state of
/// ResourceState objects.
/// This class doesn't know about instruction itineraries and functional units.
/// In future, it can be extended to support itineraries too through the same
/// public interface.
class ResourceManager {
  // Set of resources available on the subtarget.
  //
  // There is an instance of ResourceState for every resource declared by the
  // target scheduling model.
  //
  // Elements of this vector are ordered by resource kind. In particular,
  // resource units take precedence over resource groups.
  //
  // The index of a processor resource in this vector depends on the value of
  // its mask (see the description of field ResourceState::ResourceMask).  In
  // particular, it is computed as the position of the most significant bit set
  // (MSB) in the mask plus one (since we want to ignore the invalid resource
  // descriptor at index zero).
  //
  // Example (little endian):
  //
  //             Resource | Mask    |  MSB    | Index
  //             ---------+---------+---------+-------
  //                 A    | 0b00001 | 0b00001 |   1
  //                      |         |         |
  //                 B    | 0b00100 | 0b00100 |   3
  //                      |         |         |
  //                 C    | 0b10010 | 0b10000 |   5
  //
  //
  // The same index is also used to address elements within vector `Strategies`
  // and vector `Resource2Groups`.
  std::vector<std::unique_ptr<ResourceState>> Resources;
  std::vector<std::unique_ptr<ResourceStrategy>> Strategies;

  // Used to quickly identify groups that own a particular resource unit.
  std::vector<uint64_t> Resource2Groups;

  // A table that maps processor resource IDs to processor resource masks.
  SmallVector<uint64_t, 8> ProcResID2Mask;

  // A table that maps resource indices to actual processor resource IDs in the
  // scheduling model.
  SmallVector<unsigned, 8> ResIndex2ProcResID;

  // Keeps track of which resources are busy, and how many cycles are left
  // before those become usable again.
  SmallDenseMap<ResourceRef, unsigned> BusyResources;

  // Set of processor resource units available on the target.
  uint64_t ProcResUnitMask;

  // Set of processor resource units that are available during this cycle.
  uint64_t AvailableProcResUnits;

  // Set of processor resources that are currently reserved.
  uint64_t ReservedResourceGroups;

  // Set of unavailable scheduler buffer resources. This is used internally to
  // speedup `canBeDispatched()` queries.
  uint64_t AvailableBuffers;

  // Set of dispatch hazard buffer resources that are currently unavailable.
  uint64_t ReservedBuffers;

  // Returns the actual resource unit that will be used.
  ResourceRef selectPipe(uint64_t ResourceID);

  void use(const ResourceRef &RR);
  void release(const ResourceRef &RR);

  unsigned getNumUnits(uint64_t ResourceID) const;

  // Overrides the selection strategy for the processor resource with the given
  // mask.
  void setCustomStrategyImpl(std::unique_ptr<ResourceStrategy> S,
                             uint64_t ResourceMask);

public:
  ResourceManager(const MCSchedModel &SM);
  virtual ~ResourceManager() = default;

  // Overrides the selection strategy for the resource at index ResourceID in
  // the MCProcResourceDesc table.
  void setCustomStrategy(std::unique_ptr<ResourceStrategy> S,
                         unsigned ResourceID) {
    assert(ResourceID < ProcResID2Mask.size() &&
           "Invalid resource index in input!");
    return setCustomStrategyImpl(std::move(S), ProcResID2Mask[ResourceID]);
  }

  // Returns RS_BUFFER_AVAILABLE if buffered resources are not reserved, and if
  // there are enough available slots in the buffers.
  ResourceStateEvent canBeDispatched(uint64_t ConsumedBuffers) const;

  // Return the processor resource identifier associated to this Mask.
  unsigned resolveResourceMask(uint64_t Mask) const;

  // Acquires a slot from every buffered resource in mask `ConsumedBuffers`.
  // Units that are dispatch hazards (i.e. BufferSize=0) are marked as reserved.
  void reserveBuffers(uint64_t ConsumedBuffers);

  // Releases a slot from every buffered resource in mask `ConsumedBuffers`.
  // ConsumedBuffers is a bitmask of previously acquired buffers (using method
  // `reserveBuffers`). Units that are dispatch hazards (i.e. BufferSize=0) are
  // not automatically unreserved by this method.
  void releaseBuffers(uint64_t ConsumedBuffers);

  // Reserve a processor resource. A reserved resource is not available for
  // instruction issue until it is released.
  void reserveResource(uint64_t ResourceID);

  // Release a previously reserved processor resource.
  void releaseResource(uint64_t ResourceID);

  // Returns a zero mask if resources requested by Desc are all available during
  // this cycle. It returns a non-zero mask value only if there are unavailable
  // processor resources; each bit set in the mask represents a busy processor
  // resource unit or a reserved processor resource group.
  uint64_t checkAvailability(const InstrDesc &Desc) const;

  uint64_t getProcResUnitMask() const { return ProcResUnitMask; }
  uint64_t getAvailableProcResUnits() const { return AvailableProcResUnits; }

  void issueInstruction(
      const InstrDesc &Desc,
      SmallVectorImpl<std::pair<ResourceRef, ResourceCycles>> &Pipes);

  void cycleEvent(SmallVectorImpl<ResourceRef> &ResourcesFreed);

#ifndef NDEBUG
  void dump() const {
    for (const std::unique_ptr<ResourceState> &Resource : Resources)
      Resource->dump();
  }
#endif
};
} // namespace mca
} // namespace llvm

#endif // LLVM_MCA_HARDWAREUNITS_RESOURCEMANAGER_H
PKiwFZ�-���� MCA/HardwareUnits/HardwareUnit.hnu�[���//===-------------------------- HardwareUnit.h ------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This file defines a base class for describing a simulated hardware
/// unit.  These units are used to construct a simulated backend.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_MCA_HARDWAREUNITS_HARDWAREUNIT_H
#define LLVM_MCA_HARDWAREUNITS_HARDWAREUNIT_H

namespace llvm {
namespace mca {

class HardwareUnit {
  HardwareUnit(const HardwareUnit &H) = delete;
  HardwareUnit &operator=(const HardwareUnit &H) = delete;

public:
  HardwareUnit() = default;
  virtual ~HardwareUnit();
};

} // namespace mca
} // namespace llvm
#endif // LLVM_MCA_HARDWAREUNITS_HARDWAREUNIT_H
PKiwFZ�6�l��MCA/CustomBehaviour.hnu�[���//===---------------------- CustomBehaviour.h -------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This file defines the base class CustomBehaviour which can be inherited from
/// by specific targets (ex. llvm/tools/llvm-mca/lib/X86CustomBehaviour.h).
/// CustomBehaviour is designed to enforce custom behaviour and dependencies
/// within the llvm-mca pipeline simulation that llvm-mca isn't already capable
/// of extracting from the Scheduling Models.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_MCA_CUSTOMBEHAVIOUR_H
#define LLVM_MCA_CUSTOMBEHAVIOUR_H

#include "llvm/ADT/SmallVector.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/MCA/SourceMgr.h"
#include "llvm/MCA/View.h"

namespace llvm {
namespace mca {

/// Class which can be overriden by targets to modify the
/// mca::Instruction objects before the pipeline starts.
/// A common usage of this class is to add immediate operands to certain
/// instructions or to remove Defs/Uses from an instruction where the
/// schedulinng model is incorrect.
class InstrPostProcess {
protected:
  const MCSubtargetInfo &STI;
  const MCInstrInfo &MCII;

public:
  InstrPostProcess(const MCSubtargetInfo &STI, const MCInstrInfo &MCII)
      : STI(STI), MCII(MCII) {}

  virtual ~InstrPostProcess() = default;

  /// This method can be overriden by targets to modify the mca::Instruction
  /// object after it has been lowered from the MCInst.
  /// This is generally a less disruptive alternative to modifying the
  /// scheduling model.
  virtual void postProcessInstruction(std::unique_ptr<Instruction> &Inst,
                                      const MCInst &MCI) {}

  // The resetState() method gets invoked at the beginning of each code region
  // so that targets that override this function can clear any state that they
  // have left from the previous code region.
  virtual void resetState() {}
};

/// Class which can be overriden by targets to enforce instruction
/// dependencies and behaviours that aren't expressed well enough
/// within the scheduling model for mca to automatically simulate
/// them properly.
/// If you implement this class for your target, make sure to also implement
/// a target specific InstrPostProcess class as well.
class CustomBehaviour {
protected:
  const MCSubtargetInfo &STI;
  const mca::SourceMgr &SrcMgr;
  const MCInstrInfo &MCII;

public:
  CustomBehaviour(const MCSubtargetInfo &STI, const mca::SourceMgr &SrcMgr,
                  const MCInstrInfo &MCII)
      : STI(STI), SrcMgr(SrcMgr), MCII(MCII) {}

  virtual ~CustomBehaviour();

  /// Before the llvm-mca pipeline dispatches an instruction, it first checks
  /// for any register or resource dependencies / hazards. If it doesn't find
  /// any, this method will be invoked to determine if there are any custom
  /// hazards that the instruction needs to wait for.
  /// The return value of this method is the number of cycles that the
  /// instruction needs to wait for.
  /// It's safe to underestimate the number of cycles to wait for since these
  /// checks will be invoked again before the intruction gets dispatched.
  /// However, it's not safe (accurate) to overestimate the number of cycles
  /// to wait for since the instruction will wait for AT LEAST that number of
  /// cycles before attempting to be dispatched again.
  virtual unsigned checkCustomHazard(ArrayRef<InstRef> IssuedInst,
                                     const InstRef &IR);

  // Functions that target CBs can override to return a list of
  // target specific Views that need to live within /lib/Target/ so that
  // they can benefit from the target CB or from backend functionality that is
  // not already exposed through MC-layer classes. Keep in mind that how this
  // function is used is that the function is called within llvm-mca.cpp and
  // then each unique_ptr<View> is passed into the PipelinePrinter::addView()
  // function. This function will then std::move the View into its own vector of
  // Views. So any CB that overrides this function needs to make sure that they
  // are not relying on the current address or reference of the View
  // unique_ptrs. If you do need the CB and View to be able to communicate with
  // each other, consider giving the View a reference or pointer to the CB when
  // the View is constructed. Then the View can query the CB for information
  // when it needs it.
  /// Return a vector of Views that will be added before all other Views.
  virtual std::vector<std::unique_ptr<View>>
  getStartViews(llvm::MCInstPrinter &IP, llvm::ArrayRef<llvm::MCInst> Insts);
  /// Return a vector of Views that will be added after the InstructionInfoView.
  virtual std::vector<std::unique_ptr<View>>
  getPostInstrInfoViews(llvm::MCInstPrinter &IP,
                        llvm::ArrayRef<llvm::MCInst> Insts);
  /// Return a vector of Views that will be added after all other Views.
  virtual std::vector<std::unique_ptr<View>>
  getEndViews(llvm::MCInstPrinter &IP, llvm::ArrayRef<llvm::MCInst> Insts);
};

class Instrument {
  /// The description of Instrument kind
  const StringRef Desc;

  /// The instrumentation data
  const StringRef Data;

public:
  Instrument(StringRef Desc, StringRef Data) : Desc(Desc), Data(Data) {}

  Instrument() : Instrument("", "") {}

  virtual ~Instrument() = default;

  StringRef getDesc() const { return Desc; }
  StringRef getData() const { return Data; }
};

using UniqueInstrument = std::unique_ptr<Instrument>;

/// This class allows targets to optionally customize the logic that resolves
/// scheduling class IDs. Targets can use information encoded in Instrument
/// objects to make more informed scheduling decisions.
class InstrumentManager {
protected:
  const MCSubtargetInfo &STI;
  const MCInstrInfo &MCII;

public:
  InstrumentManager(const MCSubtargetInfo &STI, const MCInstrInfo &MCII)
      : STI(STI), MCII(MCII) {}

  virtual ~InstrumentManager() = default;

  /// Returns true if llvm-mca should ignore instruments.
  virtual bool shouldIgnoreInstruments() const { return true; }

  // Returns true if this supports processing Instrument with
  // Instrument.Desc equal to Type
  virtual bool supportsInstrumentType(StringRef Type) const { return false; }

  /// Allocate an Instrument, and return a unique pointer to it. This function
  /// may be useful to create instruments coming from comments in the assembly.
  /// See createInstruments to create Instruments from MCInst
  virtual UniqueInstrument createInstrument(StringRef Desc, StringRef Data);

  /// Return a list of unique pointers to Instruments, where each Instrument
  /// is allocated by this function. See createInstrument to create Instrument
  /// from a description and data.
  virtual SmallVector<UniqueInstrument> createInstruments(const MCInst &Inst);

  /// Given an MCInst and a vector of Instrument, a target can
  /// return a SchedClassID. This can be used by a subtarget to return a
  /// PseudoInstruction SchedClassID instead of the one that belongs to the
  /// BaseInstruction This can be useful when a BaseInstruction does not convey
  /// the correct scheduling information without additional data. By default,
  /// it returns the SchedClassID that belongs to MCI.
  virtual unsigned getSchedClassID(const MCInstrInfo &MCII, const MCInst &MCI,
                                   const SmallVector<Instrument *> &IVec) const;
};

} // namespace mca
} // namespace llvm

#endif /* LLVM_MCA_CUSTOMBEHAVIOUR_H */
PKiwFZ�W5qGGMCA/CodeEmitter.hnu�[���//===--------------------- CodeEmitter.h ------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// A utility class used to compute instruction encodings. It buffers encodings
/// for later usage. It exposes a simple API to compute and get the encodings as
/// StringRef.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MCA_CODEEMITTER_H
#define LLVM_MCA_CODEEMITTER_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/MC/MCAsmBackend.h"
#include "llvm/MC/MCCodeEmitter.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCSubtargetInfo.h"

namespace llvm {
namespace mca {

/// A utility class used to compute instruction encodings for a code region.
///
/// It provides a simple API to compute and return instruction encodings as
/// strings. Encodings are cached internally for later usage.
class CodeEmitter {
  const MCSubtargetInfo &STI;
  const MCAsmBackend &MAB;
  const MCCodeEmitter &MCE;

  SmallString<256> Code;
  ArrayRef<MCInst> Sequence;

  // An EncodingInfo pair stores <base, length> information.  Base (i.e. first)
  // is an index to the `Code`. Length (i.e. second) is the encoding size.
  using EncodingInfo = std::pair<unsigned, unsigned>;

  // A cache of encodings.
  SmallVector<EncodingInfo, 16> Encodings;

  EncodingInfo getOrCreateEncodingInfo(unsigned MCID);

public:
  CodeEmitter(const MCSubtargetInfo &ST, const MCAsmBackend &AB,
              const MCCodeEmitter &CE, ArrayRef<MCInst> S)
      : STI(ST), MAB(AB), MCE(CE), Sequence(S), Encodings(S.size()) {}

  StringRef getEncoding(unsigned MCID) {
    EncodingInfo EI = getOrCreateEncodingInfo(MCID);
    return StringRef(&Code[EI.first], EI.second);
  }
};

} // namespace mca
} // namespace llvm

#endif // LLVM_MCA_CODEEMITTER_H
PKiwFZ2�3+��
MCA/Context.hnu�[���//===---------------------------- Context.h ---------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This file defines a class for holding ownership of various simulated
/// hardware units.  A Context also provides a utility routine for constructing
/// a default out-of-order pipeline with fetch, dispatch, execute, and retire
/// stages.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_MCA_CONTEXT_H
#define LLVM_MCA_CONTEXT_H

#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/MCA/CustomBehaviour.h"
#include "llvm/MCA/HardwareUnits/HardwareUnit.h"
#include "llvm/MCA/Pipeline.h"
#include "llvm/MCA/SourceMgr.h"
#include <memory>

namespace llvm {
namespace mca {

/// This is a convenience struct to hold the parameters necessary for creating
/// the pre-built "default" out-of-order pipeline.
struct PipelineOptions {
  PipelineOptions(unsigned UOPQSize, unsigned DecThr, unsigned DW, unsigned RFS,
                  unsigned LQS, unsigned SQS, bool NoAlias,
                  bool ShouldEnableBottleneckAnalysis = false)
      : MicroOpQueueSize(UOPQSize), DecodersThroughput(DecThr),
        DispatchWidth(DW), RegisterFileSize(RFS), LoadQueueSize(LQS),
        StoreQueueSize(SQS), AssumeNoAlias(NoAlias),
        EnableBottleneckAnalysis(ShouldEnableBottleneckAnalysis) {}
  unsigned MicroOpQueueSize;
  unsigned DecodersThroughput; // Instructions per cycle.
  unsigned DispatchWidth;
  unsigned RegisterFileSize;
  unsigned LoadQueueSize;
  unsigned StoreQueueSize;
  bool AssumeNoAlias;
  bool EnableBottleneckAnalysis;
};

class Context {
  SmallVector<std::unique_ptr<HardwareUnit>, 4> Hardware;
  const MCRegisterInfo &MRI;
  const MCSubtargetInfo &STI;

public:
  Context(const MCRegisterInfo &R, const MCSubtargetInfo &S) : MRI(R), STI(S) {}
  Context(const Context &C) = delete;
  Context &operator=(const Context &C) = delete;

  const MCRegisterInfo &getMCRegisterInfo() const { return MRI; }
  const MCSubtargetInfo &getMCSubtargetInfo() const { return STI; }

  void addHardwareUnit(std::unique_ptr<HardwareUnit> H) {
    Hardware.push_back(std::move(H));
  }

  /// Construct a basic pipeline for simulating an out-of-order pipeline.
  /// This pipeline consists of Fetch, Dispatch, Execute, and Retire stages.
  std::unique_ptr<Pipeline> createDefaultPipeline(const PipelineOptions &Opts,
                                                  SourceMgr &SrcMgr,
                                                  CustomBehaviour &CB);

  /// Construct a basic pipeline for simulating an in-order pipeline.
  /// This pipeline consists of Fetch, InOrderIssue, and Retire stages.
  std::unique_ptr<Pipeline> createInOrderPipeline(const PipelineOptions &Opts,
                                                  SourceMgr &SrcMgr,
                                                  CustomBehaviour &CB);
};

} // namespace mca
} // namespace llvm
#endif // LLVM_MCA_CONTEXT_H
PKiwFZ}%�NjNjMCA/Instruction.hnu�[���//===--------------------- Instruction.h ------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This file defines abstractions used by the Pipeline to model register reads,
/// register writes and instructions.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_MCA_INSTRUCTION_H
#define LLVM_MCA_INSTRUCTION_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/MC/MCRegister.h" // definition of MCPhysReg.
#include "llvm/Support/MathExtras.h"

#ifndef NDEBUG
#include "llvm/Support/raw_ostream.h"
#endif

#include <memory>

namespace llvm {

namespace mca {

constexpr int UNKNOWN_CYCLES = -512;

/// A representation of an mca::Instruction operand
/// for use in mca::CustomBehaviour.
class MCAOperand {
  // This class is mostly copied from MCOperand within
  // MCInst.h except that we don't keep track of
  // expressions or sub-instructions.
  enum MCAOperandType : unsigned char {
    kInvalid,   ///< Uninitialized, Relocatable immediate, or Sub-instruction.
    kRegister,  ///< Register operand.
    kImmediate, ///< Immediate operand.
    kSFPImmediate, ///< Single-floating-point immediate operand.
    kDFPImmediate, ///< Double-Floating-point immediate operand.
  };
  MCAOperandType Kind;

  union {
    unsigned RegVal;
    int64_t ImmVal;
    uint32_t SFPImmVal;
    uint64_t FPImmVal;
  };

  // We only store specific operands for specific instructions
  // so an instruction's operand 3 may be stored within the list
  // of MCAOperand as element 0. This Index attribute keeps track
  // of the original index (3 for this example).
  unsigned Index;

public:
  MCAOperand() : Kind(kInvalid), FPImmVal(), Index() {}

  bool isValid() const { return Kind != kInvalid; }
  bool isReg() const { return Kind == kRegister; }
  bool isImm() const { return Kind == kImmediate; }
  bool isSFPImm() const { return Kind == kSFPImmediate; }
  bool isDFPImm() const { return Kind == kDFPImmediate; }

  /// Returns the register number.
  unsigned getReg() const {
    assert(isReg() && "This is not a register operand!");
    return RegVal;
  }

  int64_t getImm() const {
    assert(isImm() && "This is not an immediate");
    return ImmVal;
  }

  uint32_t getSFPImm() const {
    assert(isSFPImm() && "This is not an SFP immediate");
    return SFPImmVal;
  }

  uint64_t getDFPImm() const {
    assert(isDFPImm() && "This is not an FP immediate");
    return FPImmVal;
  }

  void setIndex(const unsigned Idx) { Index = Idx; }

  unsigned getIndex() const { return Index; }

  static MCAOperand createReg(unsigned Reg) {
    MCAOperand Op;
    Op.Kind = kRegister;
    Op.RegVal = Reg;
    return Op;
  }

  static MCAOperand createImm(int64_t Val) {
    MCAOperand Op;
    Op.Kind = kImmediate;
    Op.ImmVal = Val;
    return Op;
  }

  static MCAOperand createSFPImm(uint32_t Val) {
    MCAOperand Op;
    Op.Kind = kSFPImmediate;
    Op.SFPImmVal = Val;
    return Op;
  }

  static MCAOperand createDFPImm(uint64_t Val) {
    MCAOperand Op;
    Op.Kind = kDFPImmediate;
    Op.FPImmVal = Val;
    return Op;
  }

  static MCAOperand createInvalid() {
    MCAOperand Op;
    Op.Kind = kInvalid;
    Op.FPImmVal = 0;
    return Op;
  }
};

/// A register write descriptor.
struct WriteDescriptor {
  // Operand index. The index is negative for implicit writes only.
  // For implicit writes, the actual operand index is computed performing
  // a bitwise not of the OpIndex.
  int OpIndex;
  // Write latency. Number of cycles before write-back stage.
  unsigned Latency;
  // This field is set to a value different than zero only if this
  // is an implicit definition.
  MCPhysReg RegisterID;
  // Instruction itineraries would set this field to the SchedClass ID.
  // Otherwise, it defaults to the WriteResourceID from the MCWriteLatencyEntry
  // element associated to this write.
  // When computing read latencies, this value is matched against the
  // "ReadAdvance" information. The hardware backend may implement
  // dedicated forwarding paths to quickly propagate write results to dependent
  // instructions waiting in the reservation station (effectively bypassing the
  // write-back stage).
  unsigned SClassOrWriteResourceID;
  // True only if this is a write obtained from an optional definition.
  // Optional definitions are allowed to reference regID zero (i.e. "no
  // register").
  bool IsOptionalDef;

  bool isImplicitWrite() const { return OpIndex < 0; };
};

/// A register read descriptor.
struct ReadDescriptor {
  // A MCOperand index. This is used by the Dispatch logic to identify register
  // reads. Implicit reads have negative indices. The actual operand index of an
  // implicit read is the bitwise not of field OpIndex.
  int OpIndex;
  // The actual "UseIdx". This is used to query the ReadAdvance table. Explicit
  // uses always come first in the sequence of uses.
  unsigned UseIndex;
  // This field is only set if this is an implicit read.
  MCPhysReg RegisterID;
  // Scheduling Class Index. It is used to query the scheduling model for the
  // MCSchedClassDesc object.
  unsigned SchedClassID;

  bool isImplicitRead() const { return OpIndex < 0; };
};

class ReadState;

/// A critical data dependency descriptor.
///
/// Field RegID is set to the invalid register for memory dependencies.
struct CriticalDependency {
  unsigned IID;
  MCPhysReg RegID;
  unsigned Cycles;
};

/// Tracks uses of a register definition (e.g. register write).
///
/// Each implicit/explicit register write is associated with an instance of
/// this class. A WriteState object tracks the dependent users of a
/// register write. It also tracks how many cycles are left before the write
/// back stage.
class WriteState {
  const WriteDescriptor *WD;
  // On instruction issue, this field is set equal to the write latency.
  // Before instruction issue, this field defaults to -512, a special
  // value that represents an "unknown" number of cycles.
  int CyclesLeft;

  // Actual register defined by this write. This field is only used
  // to speedup queries on the register file.
  // For implicit writes, this field always matches the value of
  // field RegisterID from WD.
  MCPhysReg RegisterID;

  // Physical register file that serves register RegisterID.
  unsigned PRFID;

  // True if this write implicitly clears the upper portion of RegisterID's
  // super-registers.
  bool ClearsSuperRegs;

  // True if this write is from a dependency breaking zero-idiom instruction.
  bool WritesZero;

  // True if this write has been eliminated at register renaming stage.
  // Example: a register move doesn't consume scheduler/pipleline resources if
  // it is eliminated at register renaming stage. It still consumes
  // decode bandwidth, and ROB entries.
  bool IsEliminated;

  // This field is set if this is a partial register write, and it has a false
  // dependency on any previous write of the same register (or a portion of it).
  // DependentWrite must be able to complete before this write completes, so
  // that we don't break the WAW, and the two writes can be merged together.
  const WriteState *DependentWrite;

  // A partial write that is in a false dependency with this write.
  WriteState *PartialWrite;
  unsigned DependentWriteCyclesLeft;

  // Critical register dependency for this write.
  CriticalDependency CRD;

  // A list of dependent reads. Users is a set of dependent
  // reads. A dependent read is added to the set only if CyclesLeft
  // is "unknown". As soon as CyclesLeft is 'known', each user in the set
  // gets notified with the actual CyclesLeft.

  // The 'second' element of a pair is a "ReadAdvance" number of cycles.
  SmallVector<std::pair<ReadState *, int>, 4> Users;

public:
  WriteState(const WriteDescriptor &Desc, MCPhysReg RegID,
             bool clearsSuperRegs = false, bool writesZero = false)
      : WD(&Desc), CyclesLeft(UNKNOWN_CYCLES), RegisterID(RegID), PRFID(0),
        ClearsSuperRegs(clearsSuperRegs), WritesZero(writesZero),
        IsEliminated(false), DependentWrite(nullptr), PartialWrite(nullptr),
        DependentWriteCyclesLeft(0), CRD() {}

  WriteState(const WriteState &Other) = default;
  WriteState &operator=(const WriteState &Other) = default;

  int getCyclesLeft() const { return CyclesLeft; }
  unsigned getWriteResourceID() const { return WD->SClassOrWriteResourceID; }
  MCPhysReg getRegisterID() const { return RegisterID; }
  void setRegisterID(const MCPhysReg RegID) { RegisterID = RegID; }
  unsigned getRegisterFileID() const { return PRFID; }
  unsigned getLatency() const { return WD->Latency; }
  unsigned getDependentWriteCyclesLeft() const {
    return DependentWriteCyclesLeft;
  }
  const WriteState *getDependentWrite() const { return DependentWrite; }
  const CriticalDependency &getCriticalRegDep() const { return CRD; }

  // This method adds Use to the set of data dependent reads. IID is the
  // instruction identifier associated with this write. ReadAdvance is the
  // number of cycles to subtract from the latency of this data dependency.
  // Use is in a RAW dependency with this write.
  void addUser(unsigned IID, ReadState *Use, int ReadAdvance);

  // Use is a younger register write that is in a false dependency with this
  // write. IID is the instruction identifier associated with this write.
  void addUser(unsigned IID, WriteState *Use);

  unsigned getNumUsers() const {
    unsigned NumUsers = Users.size();
    if (PartialWrite)
      ++NumUsers;
    return NumUsers;
  }

  bool clearsSuperRegisters() const { return ClearsSuperRegs; }
  bool isWriteZero() const { return WritesZero; }
  bool isEliminated() const { return IsEliminated; }

  bool isReady() const {
    if (DependentWrite)
      return false;
    unsigned CyclesLeft = getDependentWriteCyclesLeft();
    return !CyclesLeft || CyclesLeft < getLatency();
  }

  bool isExecuted() const {
    return CyclesLeft != UNKNOWN_CYCLES && CyclesLeft <= 0;
  }

  void setDependentWrite(const WriteState *Other) { DependentWrite = Other; }
  void writeStartEvent(unsigned IID, MCPhysReg RegID, unsigned Cycles);
  void setWriteZero() { WritesZero = true; }
  void setEliminated() {
    assert(Users.empty() && "Write is in an inconsistent state.");
    CyclesLeft = 0;
    IsEliminated = true;
  }

  void setPRF(unsigned PRF) { PRFID = PRF; }

  // On every cycle, update CyclesLeft and notify dependent users.
  void cycleEvent();
  void onInstructionIssued(unsigned IID);

#ifndef NDEBUG
  void dump() const;
#endif
};

/// Tracks register operand latency in cycles.
///
/// A read may be dependent on more than one write. This occurs when some
/// writes only partially update the register associated to this read.
class ReadState {
  const ReadDescriptor *RD;
  // Physical register identified associated to this read.
  MCPhysReg RegisterID;
  // Physical register file that serves register RegisterID.
  unsigned PRFID;
  // Number of writes that contribute to the definition of RegisterID.
  // In the absence of partial register updates, the number of DependentWrites
  // cannot be more than one.
  unsigned DependentWrites;
  // Number of cycles left before RegisterID can be read. This value depends on
  // the latency of all the dependent writes. It defaults to UNKNOWN_CYCLES.
  // It gets set to the value of field TotalCycles only when the 'CyclesLeft' of
  // every dependent write is known.
  int CyclesLeft;
  // This field is updated on every writeStartEvent(). When the number of
  // dependent writes (i.e. field DependentWrite) is zero, this value is
  // propagated to field CyclesLeft.
  unsigned TotalCycles;
  // Longest register dependency.
  CriticalDependency CRD;
  // This field is set to true only if there are no dependent writes, and
  // there are no `CyclesLeft' to wait.
  bool IsReady;
  // True if this is a read from a known zero register.
  bool IsZero;
  // True if this register read is from a dependency-breaking instruction.
  bool IndependentFromDef;

public:
  ReadState(const ReadDescriptor &Desc, MCPhysReg RegID)
      : RD(&Desc), RegisterID(RegID), PRFID(0), DependentWrites(0),
        CyclesLeft(UNKNOWN_CYCLES), TotalCycles(0), CRD(), IsReady(true),
        IsZero(false), IndependentFromDef(false) {}

  const ReadDescriptor &getDescriptor() const { return *RD; }
  unsigned getSchedClass() const { return RD->SchedClassID; }
  MCPhysReg getRegisterID() const { return RegisterID; }
  unsigned getRegisterFileID() const { return PRFID; }
  const CriticalDependency &getCriticalRegDep() const { return CRD; }

  bool isPending() const { return !IndependentFromDef && CyclesLeft > 0; }
  bool isReady() const { return IsReady; }
  bool isImplicitRead() const { return RD->isImplicitRead(); }

  bool isIndependentFromDef() const { return IndependentFromDef; }
  void setIndependentFromDef() { IndependentFromDef = true; }

  void cycleEvent();
  void writeStartEvent(unsigned IID, MCPhysReg RegID, unsigned Cycles);
  void setDependentWrites(unsigned Writes) {
    DependentWrites = Writes;
    IsReady = !Writes;
  }

  bool isReadZero() const { return IsZero; }
  void setReadZero() { IsZero = true; }
  void setPRF(unsigned ID) { PRFID = ID; }
};

/// A sequence of cycles.
///
/// This class can be used as a building block to construct ranges of cycles.
class CycleSegment {
  unsigned Begin; // Inclusive.
  unsigned End;   // Exclusive.
  bool Reserved;  // Resources associated to this segment must be reserved.

public:
  CycleSegment(unsigned StartCycle, unsigned EndCycle, bool IsReserved = false)
      : Begin(StartCycle), End(EndCycle), Reserved(IsReserved) {}

  bool contains(unsigned Cycle) const { return Cycle >= Begin && Cycle < End; }
  bool startsAfter(const CycleSegment &CS) const { return End <= CS.Begin; }
  bool endsBefore(const CycleSegment &CS) const { return Begin >= CS.End; }
  bool overlaps(const CycleSegment &CS) const {
    return !startsAfter(CS) && !endsBefore(CS);
  }
  bool isExecuting() const { return Begin == 0 && End != 0; }
  bool isExecuted() const { return End == 0; }
  bool operator<(const CycleSegment &Other) const {
    return Begin < Other.Begin;
  }
  CycleSegment &operator--() {
    if (Begin)
      Begin--;
    if (End)
      End--;
    return *this;
  }

  bool isValid() const { return Begin <= End; }
  unsigned size() const { return End - Begin; };
  void subtract(unsigned Cycles) {
    assert(End >= Cycles);
    End -= Cycles;
  }

  unsigned begin() const { return Begin; }
  unsigned end() const { return End; }
  void setEnd(unsigned NewEnd) { End = NewEnd; }
  bool isReserved() const { return Reserved; }
  void setReserved() { Reserved = true; }
};

/// Helper used by class InstrDesc to describe how hardware resources
/// are used.
///
/// This class describes how many resource units of a specific resource kind
/// (and how many cycles) are "used" by an instruction.
struct ResourceUsage {
  CycleSegment CS;
  unsigned NumUnits;
  ResourceUsage(CycleSegment Cycles, unsigned Units = 1)
      : CS(Cycles), NumUnits(Units) {}
  unsigned size() const { return CS.size(); }
  bool isReserved() const { return CS.isReserved(); }
  void setReserved() { CS.setReserved(); }
};

/// An instruction descriptor
struct InstrDesc {
  SmallVector<WriteDescriptor, 2> Writes; // Implicit writes are at the end.
  SmallVector<ReadDescriptor, 4> Reads;   // Implicit reads are at the end.

  // For every resource used by an instruction of this kind, this vector
  // reports the number of "consumed cycles".
  SmallVector<std::pair<uint64_t, ResourceUsage>, 4> Resources;

  // A bitmask of used hardware buffers.
  uint64_t UsedBuffers;

  // A bitmask of used processor resource units.
  uint64_t UsedProcResUnits;

  // A bitmask of used processor resource groups.
  uint64_t UsedProcResGroups;

  unsigned MaxLatency;
  // Number of MicroOps for this instruction.
  unsigned NumMicroOps;
  // SchedClassID used to construct this InstrDesc.
  // This information is currently used by views to do fast queries on the
  // subtarget when computing the reciprocal throughput.
  unsigned SchedClassID;

  // True if all buffered resources are in-order, and there is at least one
  // buffer which is a dispatch hazard (BufferSize = 0).
  unsigned MustIssueImmediately : 1;

  // True if the corresponding mca::Instruction can be recycled. Currently only
  // instructions that are neither variadic nor have any variant can be
  // recycled.
  unsigned IsRecyclable : 1;

  // True if some of the consumed group resources are partially overlapping.
  unsigned HasPartiallyOverlappingGroups : 1;

  // A zero latency instruction doesn't consume any scheduler resources.
  bool isZeroLatency() const { return !MaxLatency && Resources.empty(); }

  InstrDesc() = default;
  InstrDesc(const InstrDesc &Other) = delete;
  InstrDesc &operator=(const InstrDesc &Other) = delete;
};

/// Base class for instructions consumed by the simulation pipeline.
///
/// This class tracks data dependencies as well as generic properties
/// of the instruction.
class InstructionBase {
  const InstrDesc &Desc;

  // This field is set for instructions that are candidates for move
  // elimination. For more information about move elimination, see the
  // definition of RegisterMappingTracker in RegisterFile.h
  bool IsOptimizableMove;

  // Output dependencies.
  // One entry per each implicit and explicit register definition.
  SmallVector<WriteState, 2> Defs;

  // Input dependencies.
  // One entry per each implicit and explicit register use.
  SmallVector<ReadState, 4> Uses;

  // List of operands which can be used by mca::CustomBehaviour
  std::vector<MCAOperand> Operands;

  // Instruction opcode which can be used by mca::CustomBehaviour
  unsigned Opcode;

  // Flags used by the LSUnit.
  bool IsALoadBarrier : 1;
  bool IsAStoreBarrier : 1;
  // Flags copied from the InstrDesc and potentially modified by
  // CustomBehaviour or (more likely) InstrPostProcess.
  bool MayLoad : 1;
  bool MayStore : 1;
  bool HasSideEffects : 1;
  bool BeginGroup : 1;
  bool EndGroup : 1;
  bool RetireOOO : 1;

public:
  InstructionBase(const InstrDesc &D, const unsigned Opcode)
      : Desc(D), IsOptimizableMove(false), Operands(0), Opcode(Opcode),
        IsALoadBarrier(false), IsAStoreBarrier(false) {}

  SmallVectorImpl<WriteState> &getDefs() { return Defs; }
  ArrayRef<WriteState> getDefs() const { return Defs; }
  SmallVectorImpl<ReadState> &getUses() { return Uses; }
  ArrayRef<ReadState> getUses() const { return Uses; }
  const InstrDesc &getDesc() const { return Desc; }

  unsigned getLatency() const { return Desc.MaxLatency; }
  unsigned getNumMicroOps() const { return Desc.NumMicroOps; }
  unsigned getOpcode() const { return Opcode; }
  bool isALoadBarrier() const { return IsALoadBarrier; }
  bool isAStoreBarrier() const { return IsAStoreBarrier; }
  void setLoadBarrier(bool IsBarrier) { IsALoadBarrier = IsBarrier; }
  void setStoreBarrier(bool IsBarrier) { IsAStoreBarrier = IsBarrier; }

  /// Return the MCAOperand which corresponds to index Idx within the original
  /// MCInst.
  const MCAOperand *getOperand(const unsigned Idx) const {
    auto It = llvm::find_if(Operands, [&Idx](const MCAOperand &Op) {
      return Op.getIndex() == Idx;
    });
    if (It == Operands.end())
      return nullptr;
    return &(*It);
  }
  unsigned getNumOperands() const { return Operands.size(); }
  void addOperand(const MCAOperand Op) { Operands.push_back(Op); }

  bool hasDependentUsers() const {
    return any_of(Defs,
                  [](const WriteState &Def) { return Def.getNumUsers() > 0; });
  }

  unsigned getNumUsers() const {
    unsigned NumUsers = 0;
    for (const WriteState &Def : Defs)
      NumUsers += Def.getNumUsers();
    return NumUsers;
  }

  // Returns true if this instruction is a candidate for move elimination.
  bool isOptimizableMove() const { return IsOptimizableMove; }
  void setOptimizableMove() { IsOptimizableMove = true; }
  void clearOptimizableMove() { IsOptimizableMove = false; }
  bool isMemOp() const { return MayLoad || MayStore; }

  // Getters and setters for general instruction flags.
  void setMayLoad(bool newVal) { MayLoad = newVal; }
  void setMayStore(bool newVal) { MayStore = newVal; }
  void setHasSideEffects(bool newVal) { HasSideEffects = newVal; }
  void setBeginGroup(bool newVal) { BeginGroup = newVal; }
  void setEndGroup(bool newVal) { EndGroup = newVal; }
  void setRetireOOO(bool newVal) { RetireOOO = newVal; }

  bool getMayLoad() const { return MayLoad; }
  bool getMayStore() const { return MayStore; }
  bool getHasSideEffects() const { return HasSideEffects; }
  bool getBeginGroup() const { return BeginGroup; }
  bool getEndGroup() const { return EndGroup; }
  bool getRetireOOO() const { return RetireOOO; }
};

/// An instruction propagated through the simulated instruction pipeline.
///
/// This class is used to monitor changes to the internal state of instructions
/// that are sent to the various components of the simulated hardware pipeline.
class Instruction : public InstructionBase {
  enum InstrStage {
    IS_INVALID,    // Instruction in an invalid state.
    IS_DISPATCHED, // Instruction dispatched but operands are not ready.
    IS_PENDING,    // Instruction is not ready, but operand latency is known.
    IS_READY,      // Instruction dispatched and operands ready.
    IS_EXECUTING,  // Instruction issued.
    IS_EXECUTED,   // Instruction executed. Values are written back.
    IS_RETIRED     // Instruction retired.
  };

  // The current instruction stage.
  enum InstrStage Stage;

  // This value defaults to the instruction latency. This instruction is
  // considered executed when field CyclesLeft goes to zero.
  int CyclesLeft;

  // Retire Unit token ID for this instruction.
  unsigned RCUTokenID;

  // LS token ID for this instruction.
  // This field is set to the invalid null token if this is not a memory
  // operation.
  unsigned LSUTokenID;

  // A resource mask which identifies buffered resources consumed by this
  // instruction at dispatch stage. In the absence of macro-fusion, this value
  // should always match the value of field `UsedBuffers` from the instruction
  // descriptor (see field InstrBase::Desc).
  uint64_t UsedBuffers;

  // Critical register dependency.
  CriticalDependency CriticalRegDep;

  // Critical memory dependency.
  CriticalDependency CriticalMemDep;

  // A bitmask of busy processor resource units.
  // This field is set to zero only if execution is not delayed during this
  // cycle because of unavailable pipeline resources.
  uint64_t CriticalResourceMask;

  // True if this instruction has been optimized at register renaming stage.
  bool IsEliminated;

public:
  Instruction(const InstrDesc &D, const unsigned Opcode)
      : InstructionBase(D, Opcode), Stage(IS_INVALID),
        CyclesLeft(UNKNOWN_CYCLES), RCUTokenID(0), LSUTokenID(0),
        UsedBuffers(D.UsedBuffers), CriticalRegDep(), CriticalMemDep(),
        CriticalResourceMask(0), IsEliminated(false) {}

  void reset();

  unsigned getRCUTokenID() const { return RCUTokenID; }
  unsigned getLSUTokenID() const { return LSUTokenID; }
  void setLSUTokenID(unsigned LSUTok) { LSUTokenID = LSUTok; }

  uint64_t getUsedBuffers() const { return UsedBuffers; }
  void setUsedBuffers(uint64_t Mask) { UsedBuffers = Mask; }
  void clearUsedBuffers() { UsedBuffers = 0ULL; }

  int getCyclesLeft() const { return CyclesLeft; }

  // Transition to the dispatch stage, and assign a RCUToken to this
  // instruction. The RCUToken is used to track the completion of every
  // register write performed by this instruction.
  void dispatch(unsigned RCUTokenID);

  // Instruction issued. Transition to the IS_EXECUTING state, and update
  // all the register definitions.
  void execute(unsigned IID);

  // Force a transition from the IS_DISPATCHED state to the IS_READY or
  // IS_PENDING state. State transitions normally occur either at the beginning
  // of a new cycle (see method cycleEvent()), or as a result of another issue
  // event. This method is called every time the instruction might have changed
  // in state. It internally delegates to method updateDispatched() and
  // updateWaiting().
  void update();
  bool updateDispatched();
  bool updatePending();

  bool isInvalid() const { return Stage == IS_INVALID; }
  bool isDispatched() const { return Stage == IS_DISPATCHED; }
  bool isPending() const { return Stage == IS_PENDING; }
  bool isReady() const { return Stage == IS_READY; }
  bool isExecuting() const { return Stage == IS_EXECUTING; }
  bool isExecuted() const { return Stage == IS_EXECUTED; }
  bool isRetired() const { return Stage == IS_RETIRED; }
  bool isEliminated() const { return IsEliminated; }

  // Forces a transition from state IS_DISPATCHED to state IS_EXECUTED.
  void forceExecuted();
  void setEliminated() { IsEliminated = true; }

  void retire() {
    assert(isExecuted() && "Instruction is in an invalid state!");
    Stage = IS_RETIRED;
  }

  const CriticalDependency &getCriticalRegDep() const { return CriticalRegDep; }
  const CriticalDependency &getCriticalMemDep() const { return CriticalMemDep; }
  const CriticalDependency &computeCriticalRegDep();
  void setCriticalMemDep(const CriticalDependency &MemDep) {
    CriticalMemDep = MemDep;
  }

  uint64_t getCriticalResourceMask() const { return CriticalResourceMask; }
  void setCriticalResourceMask(uint64_t ResourceMask) {
    CriticalResourceMask = ResourceMask;
  }

  void cycleEvent();
};

/// An InstRef contains both a SourceMgr index and Instruction pair.  The index
/// is used as a unique identifier for the instruction.  MCA will make use of
/// this index as a key throughout MCA.
class InstRef {
  std::pair<unsigned, Instruction *> Data;

public:
  InstRef() : Data(std::make_pair(0, nullptr)) {}
  InstRef(unsigned Index, Instruction *I) : Data(std::make_pair(Index, I)) {}

  bool operator==(const InstRef &Other) const { return Data == Other.Data; }
  bool operator!=(const InstRef &Other) const { return Data != Other.Data; }
  bool operator<(const InstRef &Other) const {
    return Data.first < Other.Data.first;
  }

  unsigned getSourceIndex() const { return Data.first; }
  Instruction *getInstruction() { return Data.second; }
  const Instruction *getInstruction() const { return Data.second; }

  /// Returns true if this references a valid instruction.
  explicit operator bool() const { return Data.second != nullptr; }

  /// Invalidate this reference.
  void invalidate() { Data.second = nullptr; }

#ifndef NDEBUG
  void print(raw_ostream &OS) const { OS << getSourceIndex(); }
#endif
};

#ifndef NDEBUG
inline raw_ostream &operator<<(raw_ostream &OS, const InstRef &IR) {
  IR.print(OS);
  return OS;
}
#endif

} // namespace mca
} // namespace llvm

#endif // LLVM_MCA_INSTRUCTION_H
PKiwFZK.5ddMCA/Pipeline.hnu�[���//===--------------------- Pipeline.h ---------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This file implements an ordered container of stages that simulate the
/// pipeline of a hardware backend.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_MCA_PIPELINE_H
#define LLVM_MCA_PIPELINE_H

#include "llvm/MCA/Stages/Stage.h"
#include "llvm/Support/Error.h"

namespace llvm {
namespace mca {

class HWEventListener;

/// A pipeline for a specific subtarget.
///
/// It emulates an out-of-order execution of instructions. Instructions are
/// fetched from a MCInst sequence managed by an initial 'Fetch' stage.
/// Instructions are firstly fetched, then dispatched to the schedulers, and
/// then executed.
///
/// This class tracks the lifetime of an instruction from the moment where
/// it gets dispatched to the schedulers, to the moment where it finishes
/// executing and register writes are architecturally committed.
/// In particular, it monitors changes in the state of every instruction
/// in flight.
///
/// Instructions are executed in a loop of iterations. The number of iterations
/// is defined by the SourceMgr object, which is managed by the initial stage
/// of the instruction pipeline.
///
/// The Pipeline entry point is method 'run()' which executes cycles in a loop
/// until there are new instructions to dispatch, and not every instruction
/// has been retired.
///
/// Internally, the Pipeline collects statistical information in the form of
/// histograms. For example, it tracks how the dispatch group size changes
/// over time.
class Pipeline {
  Pipeline(const Pipeline &P) = delete;
  Pipeline &operator=(const Pipeline &P) = delete;

  enum class State {
    Created, // Pipeline was just created. The default state.
    Started, // Pipeline has started running.
    Paused   // Pipeline is paused.
  };
  State CurrentState = State::Created;

  /// An ordered list of stages that define this instruction pipeline.
  SmallVector<std::unique_ptr<Stage>, 8> Stages;
  std::set<HWEventListener *> Listeners;
  unsigned Cycles = 0;

  Error runCycle();
  bool hasWorkToProcess();
  void notifyCycleBegin();
  void notifyCycleEnd();

public:
  Pipeline() = default;
  void appendStage(std::unique_ptr<Stage> S);

  /// Returns the total number of simulated cycles.
  Expected<unsigned> run();

  void addEventListener(HWEventListener *Listener);

  /// Returns whether the pipeline is currently paused.
  bool isPaused() const { return CurrentState == State::Paused; }
};
} // namespace mca
} // namespace llvm

#endif // LLVM_MCA_PIPELINE_H
PKiwFZ�(v��
PassInfo.hnu�[���//===- llvm/PassInfo.h - Pass Info class ------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines and implements the PassInfo class.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_PASSINFO_H
#define LLVM_PASSINFO_H

#include "llvm/ADT/StringRef.h"
#include <cassert>
#include <vector>

namespace llvm {

class Pass;

//===---------------------------------------------------------------------------
/// PassInfo class - An instance of this class exists for every pass known by
/// the system, and can be obtained from a live Pass by calling its
/// getPassInfo() method.  These objects are set up by the RegisterPass<>
/// template.
///
class PassInfo {
public:
  using NormalCtor_t = Pass* (*)();

private:
  StringRef PassName;     // Nice name for Pass
  StringRef PassArgument; // Command Line argument to run this pass
  const void *PassID;
  const bool IsCFGOnlyPass = false;      // Pass only looks at the CFG.
  const bool IsAnalysis;                 // True if an analysis pass.
  const bool IsAnalysisGroup;            // True if an analysis group.
  std::vector<const PassInfo *> ItfImpl; // Interfaces implemented by this pass
  NormalCtor_t NormalCtor = nullptr;

public:
  /// PassInfo ctor - Do not call this directly, this should only be invoked
  /// through RegisterPass.
  PassInfo(StringRef name, StringRef arg, const void *pi, NormalCtor_t normal,
           bool isCFGOnly, bool is_analysis)
      : PassName(name), PassArgument(arg), PassID(pi), IsCFGOnlyPass(isCFGOnly),
        IsAnalysis(is_analysis), IsAnalysisGroup(false), NormalCtor(normal) {}

  /// PassInfo ctor - Do not call this directly, this should only be invoked
  /// through RegisterPass. This version is for use by analysis groups; it
  /// does not auto-register the pass.
  PassInfo(StringRef name, const void *pi)
      : PassName(name), PassID(pi), IsAnalysis(false), IsAnalysisGroup(true) {}

  PassInfo(const PassInfo &) = delete;
  PassInfo &operator=(const PassInfo &) = delete;

  /// getPassName - Return the friendly name for the pass, never returns null
  StringRef getPassName() const { return PassName; }

  /// getPassArgument - Return the command line option that may be passed to
  /// 'opt' that will cause this pass to be run.  This will return null if there
  /// is no argument.
  StringRef getPassArgument() const { return PassArgument; }

  /// getTypeInfo - Return the id object for the pass...
  /// TODO : Rename
  const void *getTypeInfo() const { return PassID; }

  /// Return true if this PassID implements the specified ID pointer.
  bool isPassID(const void *IDPtr) const { return PassID == IDPtr; }

  /// isAnalysisGroup - Return true if this is an analysis group, not a normal
  /// pass.
  bool isAnalysisGroup() const { return IsAnalysisGroup; }
  bool isAnalysis() const { return IsAnalysis; }

  /// isCFGOnlyPass - return true if this pass only looks at the CFG for the
  /// function.
  bool isCFGOnlyPass() const { return IsCFGOnlyPass; }

  /// getNormalCtor - Return a pointer to a function, that when called, creates
  /// an instance of the pass and returns it.  This pointer may be null if there
  /// is no default constructor for the pass.
  NormalCtor_t getNormalCtor() const {
    return NormalCtor;
  }
  void setNormalCtor(NormalCtor_t Ctor) {
    NormalCtor = Ctor;
  }

  /// createPass() - Use this method to create an instance of this pass.
  Pass *createPass() const {
    assert((!isAnalysisGroup() || NormalCtor) &&
           "No default implementation found for analysis group!");
    assert(NormalCtor &&
           "Cannot call createPass on PassInfo without default ctor!");
    return NormalCtor();
  }

  /// addInterfaceImplemented - This method is called when this pass is
  /// registered as a member of an analysis group with the RegisterAnalysisGroup
  /// template.
  void addInterfaceImplemented(const PassInfo *ItfPI) {
    ItfImpl.push_back(ItfPI);
  }

  /// getInterfacesImplemented - Return a list of all of the analysis group
  /// interfaces implemented by this pass.
  const std::vector<const PassInfo*> &getInterfacesImplemented() const {
    return ItfImpl;
  }
};

} // end namespace llvm

#endif // LLVM_PASSINFO_H
PKiwFZL�|is�s�Passes/PassBuilder.hnu�[���//===- Parsing, selection, and construction of pass pipelines --*- C++ -*--===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// Interfaces for registering analysis passes, producing common pass manager
/// configurations, and parsing of pass pipelines.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_PASSES_PASSBUILDER_H
#define LLVM_PASSES_PASSBUILDER_H

#include "llvm/Analysis/CGSCCPassManager.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Passes/OptimizationLevel.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/PGOOptions.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/IPO/Inliner.h"
#include "llvm/Transforms/IPO/ModuleInliner.h"
#include "llvm/Transforms/Instrumentation.h"
#include "llvm/Transforms/Scalar/LoopPassManager.h"
#include <vector>

namespace llvm {
class StringRef;
class AAManager;
class TargetMachine;
class ModuleSummaryIndex;
template <typename T> class IntrusiveRefCntPtr;
namespace vfs {
class FileSystem;
} // namespace vfs

/// Tunable parameters for passes in the default pipelines.
class PipelineTuningOptions {
public:
  /// Constructor sets pipeline tuning defaults based on cl::opts. Each option
  /// can be set in the PassBuilder when using a LLVM as a library.
  PipelineTuningOptions();

  /// Tuning option to set loop interleaving on/off, set based on opt level.
  bool LoopInterleaving;

  /// Tuning option to enable/disable loop vectorization, set based on opt
  /// level.
  bool LoopVectorization;

  /// Tuning option to enable/disable slp loop vectorization, set based on opt
  /// level.
  bool SLPVectorization;

  /// Tuning option to enable/disable loop unrolling. Its default value is true.
  bool LoopUnrolling;

  /// Tuning option to forget all SCEV loops in LoopUnroll. Its default value
  /// is that of the flag: `-forget-scev-loop-unroll`.
  bool ForgetAllSCEVInLoopUnroll;

  /// Tuning option to cap the number of calls to retrive clobbering accesses in
  /// MemorySSA, in LICM.
  unsigned LicmMssaOptCap;

  /// Tuning option to disable promotion to scalars in LICM with MemorySSA, if
  /// the number of access is too large.
  unsigned LicmMssaNoAccForPromotionCap;

  /// Tuning option to enable/disable call graph profile. Its default value is
  /// that of the flag: `-enable-npm-call-graph-profile`.
  bool CallGraphProfile;

  // Add LTO pipeline tuning option to enable the unified LTO pipeline.
  bool UnifiedLTO;

  /// Tuning option to enable/disable function merging. Its default value is
  /// false.
  bool MergeFunctions;

  /// Tuning option to override the default inliner threshold.
  int InlinerThreshold;

  // Experimental option to eagerly invalidate more analyses. This has the
  // potential to decrease max memory usage in exchange for more compile time.
  // This may affect codegen due to either passes using analyses only when
  // cached, or invalidating and recalculating an analysis that was
  // stale/imprecise but still valid. Currently this invalidates all function
  // analyses after various module->function or cgscc->function adaptors in the
  // default pipelines.
  bool EagerlyInvalidateAnalyses;
};

/// This class provides access to building LLVM's passes.
///
/// Its members provide the baseline state available to passes during their
/// construction. The \c PassRegistry.def file specifies how to construct all
/// of the built-in passes, and those may reference these members during
/// construction.
class PassBuilder {
  TargetMachine *TM;
  PipelineTuningOptions PTO;
  std::optional<PGOOptions> PGOOpt;
  PassInstrumentationCallbacks *PIC;

public:
  /// A struct to capture parsed pass pipeline names.
  ///
  /// A pipeline is defined as a series of names, each of which may in itself
  /// recursively contain a nested pipeline. A name is either the name of a pass
  /// (e.g. "instcombine") or the name of a pipeline type (e.g. "cgscc"). If the
  /// name is the name of a pass, the InnerPipeline is empty, since passes
  /// cannot contain inner pipelines. See parsePassPipeline() for a more
  /// detailed description of the textual pipeline format.
  struct PipelineElement {
    StringRef Name;
    std::vector<PipelineElement> InnerPipeline;
  };

  explicit PassBuilder(TargetMachine *TM = nullptr,
                       PipelineTuningOptions PTO = PipelineTuningOptions(),
                       std::optional<PGOOptions> PGOOpt = std::nullopt,
                       PassInstrumentationCallbacks *PIC = nullptr);

  /// Cross register the analysis managers through their proxies.
  ///
  /// This is an interface that can be used to cross register each
  /// AnalysisManager with all the others analysis managers.
  void crossRegisterProxies(LoopAnalysisManager &LAM,
                            FunctionAnalysisManager &FAM,
                            CGSCCAnalysisManager &CGAM,
                            ModuleAnalysisManager &MAM);

  /// Registers all available module analysis passes.
  ///
  /// This is an interface that can be used to populate a \c
  /// ModuleAnalysisManager with all registered module analyses. Callers can
  /// still manually register any additional analyses. Callers can also
  /// pre-register analyses and this will not override those.
  void registerModuleAnalyses(ModuleAnalysisManager &MAM);

  /// Registers all available CGSCC analysis passes.
  ///
  /// This is an interface that can be used to populate a \c CGSCCAnalysisManager
  /// with all registered CGSCC analyses. Callers can still manually register any
  /// additional analyses. Callers can also pre-register analyses and this will
  /// not override those.
  void registerCGSCCAnalyses(CGSCCAnalysisManager &CGAM);

  /// Registers all available function analysis passes.
  ///
  /// This is an interface that can be used to populate a \c
  /// FunctionAnalysisManager with all registered function analyses. Callers can
  /// still manually register any additional analyses. Callers can also
  /// pre-register analyses and this will not override those.
  void registerFunctionAnalyses(FunctionAnalysisManager &FAM);

  /// Registers all available loop analysis passes.
  ///
  /// This is an interface that can be used to populate a \c LoopAnalysisManager
  /// with all registered loop analyses. Callers can still manually register any
  /// additional analyses.
  void registerLoopAnalyses(LoopAnalysisManager &LAM);

  /// Construct the core LLVM function canonicalization and simplification
  /// pipeline.
  ///
  /// This is a long pipeline and uses most of the per-function optimization
  /// passes in LLVM to canonicalize and simplify the IR. It is suitable to run
  /// repeatedly over the IR and is not expected to destroy important
  /// information about the semantics of the IR.
  ///
  /// Note that \p Level cannot be `O0` here. The pipelines produced are
  /// only intended for use when attempting to optimize code. If frontends
  /// require some transformations for semantic reasons, they should explicitly
  /// build them.
  ///
  /// \p Phase indicates the current ThinLTO phase.
  FunctionPassManager
  buildFunctionSimplificationPipeline(OptimizationLevel Level,
                                      ThinOrFullLTOPhase Phase);

  /// Construct the core LLVM module canonicalization and simplification
  /// pipeline.
  ///
  /// This pipeline focuses on canonicalizing and simplifying the entire module
  /// of IR. Much like the function simplification pipeline above, it is
  /// suitable to run repeatedly over the IR and is not expected to destroy
  /// important information. It does, however, perform inlining and other
  /// heuristic based simplifications that are not strictly reversible.
  ///
  /// Note that \p Level cannot be `O0` here. The pipelines produced are
  /// only intended for use when attempting to optimize code. If frontends
  /// require some transformations for semantic reasons, they should explicitly
  /// build them.
  ///
  /// \p Phase indicates the current ThinLTO phase.
  ModulePassManager buildModuleSimplificationPipeline(OptimizationLevel Level,
                                                      ThinOrFullLTOPhase Phase);

  /// Construct the module pipeline that performs inlining as well as
  /// the inlining-driven cleanups.
  ModuleInlinerWrapperPass buildInlinerPipeline(OptimizationLevel Level,
                                                ThinOrFullLTOPhase Phase);

  /// Construct the module pipeline that performs inlining with
  /// module inliner pass.
  ModulePassManager buildModuleInlinerPipeline(OptimizationLevel Level,
                                               ThinOrFullLTOPhase Phase);

  /// Construct the core LLVM module optimization pipeline.
  ///
  /// This pipeline focuses on optimizing the execution speed of the IR. It
  /// uses cost modeling and thresholds to balance code growth against runtime
  /// improvements. It includes vectorization and other information destroying
  /// transformations. It also cannot generally be run repeatedly on a module
  /// without potentially seriously regressing either runtime performance of
  /// the code or serious code size growth.
  ///
  /// Note that \p Level cannot be `O0` here. The pipelines produced are
  /// only intended for use when attempting to optimize code. If frontends
  /// require some transformations for semantic reasons, they should explicitly
  /// build them.
  ModulePassManager
  buildModuleOptimizationPipeline(OptimizationLevel Level,
                                  ThinOrFullLTOPhase LTOPhase);

  /// Build a per-module default optimization pipeline.
  ///
  /// This provides a good default optimization pipeline for per-module
  /// optimization and code generation without any link-time optimization. It
  /// typically correspond to frontend "-O[123]" options for optimization
  /// levels \c O1, \c O2 and \c O3 resp.
  ModulePassManager buildPerModuleDefaultPipeline(OptimizationLevel Level,
                                                  bool LTOPreLink = false);

  /// Build a fat object default optimization pipeline.
  ///
  /// This builds a pipeline that runs the LTO/ThinLTO  pre-link pipeline, and
  /// emits a section containing the pre-link bitcode along side the object code
  /// generated by running the PerModuleDefaultPipeline, used when compiling
  /// without LTO. It clones the module and runs the LTO/non-LTO pipelines
  /// separately to avoid any inconsistencies with an ad-hoc pipeline that tries
  /// to approximate the PerModuleDefaultPipeline from the pre-link LTO
  /// pipelines.
  ModulePassManager buildFatLTODefaultPipeline(OptimizationLevel Level,
                                               bool ThinLTO, bool EmitSummary);

  /// Build a pre-link, ThinLTO-targeting default optimization pipeline to
  /// a pass manager.
  ///
  /// This adds the pre-link optimizations tuned to prepare a module for
  /// a ThinLTO run. It works to minimize the IR which needs to be analyzed
  /// without making irreversible decisions which could be made better during
  /// the LTO run.
  ModulePassManager buildThinLTOPreLinkDefaultPipeline(OptimizationLevel Level);

  /// Build an ThinLTO default optimization pipeline to a pass manager.
  ///
  /// This provides a good default optimization pipeline for link-time
  /// optimization and code generation. It is particularly tuned to fit well
  /// when IR coming into the LTO phase was first run through \c
  /// addPreLinkLTODefaultPipeline, and the two coordinate closely.
  ModulePassManager
  buildThinLTODefaultPipeline(OptimizationLevel Level,
                              const ModuleSummaryIndex *ImportSummary);

  /// Build a pre-link, LTO-targeting default optimization pipeline to a pass
  /// manager.
  ///
  /// This adds the pre-link optimizations tuned to work well with a later LTO
  /// run. It works to minimize the IR which needs to be analyzed without
  /// making irreversible decisions which could be made better during the LTO
  /// run.
  ModulePassManager buildLTOPreLinkDefaultPipeline(OptimizationLevel Level);

  /// Build an LTO default optimization pipeline to a pass manager.
  ///
  /// This provides a good default optimization pipeline for link-time
  /// optimization and code generation. It is particularly tuned to fit well
  /// when IR coming into the LTO phase was first run through \c
  /// addPreLinkLTODefaultPipeline, and the two coordinate closely.
  ModulePassManager buildLTODefaultPipeline(OptimizationLevel Level,
                                            ModuleSummaryIndex *ExportSummary);

  /// Build an O0 pipeline with the minimal semantically required passes.
  ///
  /// This should only be used for non-LTO and LTO pre-link pipelines.
  ModulePassManager buildO0DefaultPipeline(OptimizationLevel Level,
                                           bool LTOPreLink = false);

  /// Build the default `AAManager` with the default alias analysis pipeline
  /// registered.
  ///
  /// This also adds target-specific alias analyses registered via
  /// TargetMachine::registerDefaultAliasAnalyses().
  AAManager buildDefaultAAPipeline();

  /// Parse a textual pass pipeline description into a \c
  /// ModulePassManager.
  ///
  /// The format of the textual pass pipeline description looks something like:
  ///
  ///   module(function(instcombine,sroa),dce,cgscc(inliner,function(...)),...)
  ///
  /// Pass managers have ()s describing the nest structure of passes. All passes
  /// are comma separated. As a special shortcut, if the very first pass is not
  /// a module pass (as a module pass manager is), this will automatically form
  /// the shortest stack of pass managers that allow inserting that first pass.
  /// So, assuming function passes 'fpassN', CGSCC passes 'cgpassN', and loop
  /// passes 'lpassN', all of these are valid:
  ///
  ///   fpass1,fpass2,fpass3
  ///   cgpass1,cgpass2,cgpass3
  ///   lpass1,lpass2,lpass3
  ///
  /// And they are equivalent to the following (resp.):
  ///
  ///   module(function(fpass1,fpass2,fpass3))
  ///   module(cgscc(cgpass1,cgpass2,cgpass3))
  ///   module(function(loop(lpass1,lpass2,lpass3)))
  ///
  /// This shortcut is especially useful for debugging and testing small pass
  /// combinations.
  ///
  /// The sequence of passes aren't necessarily the exact same kind of pass.
  /// You can mix different levels implicitly if adaptor passes are defined to
  /// make them work. For example,
  ///
  ///   mpass1,fpass1,fpass2,mpass2,lpass1
  ///
  /// This pipeline uses only one pass manager: the top-level module manager.
  /// fpass1,fpass2 and lpass1 are added into the the top-level module manager
  /// using only adaptor passes. No nested function/loop pass managers are
  /// added. The purpose is to allow easy pass testing when the user
  /// specifically want the pass to run under a adaptor directly. This is
  /// preferred when a pipeline is largely of one type, but one or just a few
  /// passes are of different types(See PassBuilder.cpp for examples).
  Error parsePassPipeline(ModulePassManager &MPM, StringRef PipelineText);

  /// {{@ Parse a textual pass pipeline description into a specific PassManager
  ///
  /// Automatic deduction of an appropriate pass manager stack is not supported.
  /// For example, to insert a loop pass 'lpass' into a FunctionPassManager,
  /// this is the valid pipeline text:
  ///
  ///   function(lpass)
  Error parsePassPipeline(CGSCCPassManager &CGPM, StringRef PipelineText);
  Error parsePassPipeline(FunctionPassManager &FPM, StringRef PipelineText);
  Error parsePassPipeline(LoopPassManager &LPM, StringRef PipelineText);
  /// @}}

  /// Parse a textual alias analysis pipeline into the provided AA manager.
  ///
  /// The format of the textual AA pipeline is a comma separated list of AA
  /// pass names:
  ///
  ///   basic-aa,globals-aa,...
  ///
  /// The AA manager is set up such that the provided alias analyses are tried
  /// in the order specified. See the \c AAManaager documentation for details
  /// about the logic used. This routine just provides the textual mapping
  /// between AA names and the analyses to register with the manager.
  ///
  /// Returns false if the text cannot be parsed cleanly. The specific state of
  /// the \p AA manager is unspecified if such an error is encountered and this
  /// returns false.
  Error parseAAPipeline(AAManager &AA, StringRef PipelineText);

  /// Print pass names.
  void printPassNames(raw_ostream &OS);

  /// Register a callback for a default optimizer pipeline extension
  /// point
  ///
  /// This extension point allows adding passes that perform peephole
  /// optimizations similar to the instruction combiner. These passes will be
  /// inserted after each instance of the instruction combiner pass.
  void registerPeepholeEPCallback(
      const std::function<void(FunctionPassManager &, OptimizationLevel)> &C) {
    PeepholeEPCallbacks.push_back(C);
  }

  /// Register a callback for a default optimizer pipeline extension
  /// point
  ///
  /// This extension point allows adding late loop canonicalization and
  /// simplification passes. This is the last point in the loop optimization
  /// pipeline before loop deletion. Each pass added
  /// here must be an instance of LoopPass.
  /// This is the place to add passes that can remove loops, such as target-
  /// specific loop idiom recognition.
  void registerLateLoopOptimizationsEPCallback(
      const std::function<void(LoopPassManager &, OptimizationLevel)> &C) {
    LateLoopOptimizationsEPCallbacks.push_back(C);
  }

  /// Register a callback for a default optimizer pipeline extension
  /// point
  ///
  /// This extension point allows adding loop passes to the end of the loop
  /// optimizer.
  void registerLoopOptimizerEndEPCallback(
      const std::function<void(LoopPassManager &, OptimizationLevel)> &C) {
    LoopOptimizerEndEPCallbacks.push_back(C);
  }

  /// Register a callback for a default optimizer pipeline extension
  /// point
  ///
  /// This extension point allows adding optimization passes after most of the
  /// main optimizations, but before the last cleanup-ish optimizations.
  void registerScalarOptimizerLateEPCallback(
      const std::function<void(FunctionPassManager &, OptimizationLevel)> &C) {
    ScalarOptimizerLateEPCallbacks.push_back(C);
  }

  /// Register a callback for a default optimizer pipeline extension
  /// point
  ///
  /// This extension point allows adding CallGraphSCC passes at the end of the
  /// main CallGraphSCC passes and before any function simplification passes run
  /// by CGPassManager.
  void registerCGSCCOptimizerLateEPCallback(
      const std::function<void(CGSCCPassManager &, OptimizationLevel)> &C) {
    CGSCCOptimizerLateEPCallbacks.push_back(C);
  }

  /// Register a callback for a default optimizer pipeline extension
  /// point
  ///
  /// This extension point allows adding optimization passes before the
  /// vectorizer and other highly target specific optimization passes are
  /// executed.
  void registerVectorizerStartEPCallback(
      const std::function<void(FunctionPassManager &, OptimizationLevel)> &C) {
    VectorizerStartEPCallbacks.push_back(C);
  }

  /// Register a callback for a default optimizer pipeline extension point.
  ///
  /// This extension point allows adding optimization once at the start of the
  /// pipeline. This does not apply to 'backend' compiles (LTO and ThinLTO
  /// link-time pipelines).
  void registerPipelineStartEPCallback(
      const std::function<void(ModulePassManager &, OptimizationLevel)> &C) {
    PipelineStartEPCallbacks.push_back(C);
  }

  /// Register a callback for a default optimizer pipeline extension point.
  ///
  /// This extension point allows adding optimization right after passes that do
  /// basic simplification of the input IR.
  void registerPipelineEarlySimplificationEPCallback(
      const std::function<void(ModulePassManager &, OptimizationLevel)> &C) {
    PipelineEarlySimplificationEPCallbacks.push_back(C);
  }

  /// Register a callback for a default optimizer pipeline extension point
  ///
  /// This extension point allows adding optimizations before the function
  /// optimization pipeline.
  void registerOptimizerEarlyEPCallback(
      const std::function<void(ModulePassManager &, OptimizationLevel)> &C) {
    OptimizerEarlyEPCallbacks.push_back(C);
  }

  /// Register a callback for a default optimizer pipeline extension point
  ///
  /// This extension point allows adding optimizations at the very end of the
  /// function optimization pipeline.
  void registerOptimizerLastEPCallback(
      const std::function<void(ModulePassManager &, OptimizationLevel)> &C) {
    OptimizerLastEPCallbacks.push_back(C);
  }

  /// Register a callback for a default optimizer pipeline extension point
  ///
  /// This extension point allows adding optimizations at the start of the full
  /// LTO pipeline.
  void registerFullLinkTimeOptimizationEarlyEPCallback(
      const std::function<void(ModulePassManager &, OptimizationLevel)> &C) {
    FullLinkTimeOptimizationEarlyEPCallbacks.push_back(C);
  }

  /// Register a callback for a default optimizer pipeline extension point
  ///
  /// This extension point allows adding optimizations at the end of the full
  /// LTO pipeline.
  void registerFullLinkTimeOptimizationLastEPCallback(
      const std::function<void(ModulePassManager &, OptimizationLevel)> &C) {
    FullLinkTimeOptimizationLastEPCallbacks.push_back(C);
  }

  /// Register a callback for parsing an AliasAnalysis Name to populate
  /// the given AAManager \p AA
  void registerParseAACallback(
      const std::function<bool(StringRef Name, AAManager &AA)> &C) {
    AAParsingCallbacks.push_back(C);
  }

  /// {{@ Register callbacks for analysis registration with this PassBuilder
  /// instance.
  /// Callees register their analyses with the given AnalysisManager objects.
  void registerAnalysisRegistrationCallback(
      const std::function<void(CGSCCAnalysisManager &)> &C) {
    CGSCCAnalysisRegistrationCallbacks.push_back(C);
  }
  void registerAnalysisRegistrationCallback(
      const std::function<void(FunctionAnalysisManager &)> &C) {
    FunctionAnalysisRegistrationCallbacks.push_back(C);
  }
  void registerAnalysisRegistrationCallback(
      const std::function<void(LoopAnalysisManager &)> &C) {
    LoopAnalysisRegistrationCallbacks.push_back(C);
  }
  void registerAnalysisRegistrationCallback(
      const std::function<void(ModuleAnalysisManager &)> &C) {
    ModuleAnalysisRegistrationCallbacks.push_back(C);
  }
  /// @}}

  /// {{@ Register pipeline parsing callbacks with this pass builder instance.
  /// Using these callbacks, callers can parse both a single pass name, as well
  /// as entire sub-pipelines, and populate the PassManager instance
  /// accordingly.
  void registerPipelineParsingCallback(
      const std::function<bool(StringRef Name, CGSCCPassManager &,
                               ArrayRef<PipelineElement>)> &C) {
    CGSCCPipelineParsingCallbacks.push_back(C);
  }
  void registerPipelineParsingCallback(
      const std::function<bool(StringRef Name, FunctionPassManager &,
                               ArrayRef<PipelineElement>)> &C) {
    FunctionPipelineParsingCallbacks.push_back(C);
  }
  void registerPipelineParsingCallback(
      const std::function<bool(StringRef Name, LoopPassManager &,
                               ArrayRef<PipelineElement>)> &C) {
    LoopPipelineParsingCallbacks.push_back(C);
  }
  void registerPipelineParsingCallback(
      const std::function<bool(StringRef Name, ModulePassManager &,
                               ArrayRef<PipelineElement>)> &C) {
    ModulePipelineParsingCallbacks.push_back(C);
  }
  /// @}}

  /// Register a callback for a top-level pipeline entry.
  ///
  /// If the PassManager type is not given at the top level of the pipeline
  /// text, this Callback should be used to determine the appropriate stack of
  /// PassManagers and populate the passed ModulePassManager.
  void registerParseTopLevelPipelineCallback(
      const std::function<bool(ModulePassManager &, ArrayRef<PipelineElement>)>
          &C);

  /// Add PGOInstrumenation passes for O0 only.
  void addPGOInstrPassesForO0(ModulePassManager &MPM, bool RunProfileGen,
                              bool IsCS, std::string ProfileFile,
                              std::string ProfileRemappingFile,
                              IntrusiveRefCntPtr<vfs::FileSystem> FS);

  /// Returns PIC. External libraries can use this to register pass
  /// instrumentation callbacks.
  PassInstrumentationCallbacks *getPassInstrumentationCallbacks() const {
    return PIC;
  }

  // Invoke the callbacks registered for the various extension points.
  // Custom pipelines should use these to invoke the callbacks registered
  // by TargetMachines and other clients.
  void invokePeepholeEPCallbacks(FunctionPassManager &FPM,
                                 OptimizationLevel Level);
  void invokeLateLoopOptimizationsEPCallbacks(LoopPassManager &LPM,
                                              OptimizationLevel Level);
  void invokeLoopOptimizerEndEPCallbacks(LoopPassManager &LPM,
                                         OptimizationLevel Level);
  void invokeScalarOptimizerLateEPCallbacks(FunctionPassManager &FPM,
                                            OptimizationLevel Level);
  void invokeCGSCCOptimizerLateEPCallbacks(CGSCCPassManager &CGPM,
                                           OptimizationLevel Level);
  void invokeVectorizerStartEPCallbacks(FunctionPassManager &FPM,
                                        OptimizationLevel Level);
  void invokeOptimizerEarlyEPCallbacks(ModulePassManager &MPM,
                                       OptimizationLevel Level);
  void invokeOptimizerLastEPCallbacks(ModulePassManager &MPM,
                                      OptimizationLevel Level);
  void invokeFullLinkTimeOptimizationEarlyEPCallbacks(ModulePassManager &MPM,
                                                      OptimizationLevel Level);
  void invokeFullLinkTimeOptimizationLastEPCallbacks(ModulePassManager &MPM,
                                                     OptimizationLevel Level);
  void invokePipelineStartEPCallbacks(ModulePassManager &MPM,
                                      OptimizationLevel Level);
  void invokePipelineEarlySimplificationEPCallbacks(ModulePassManager &MPM,
                                                    OptimizationLevel Level);

private:
  // O1 pass pipeline
  FunctionPassManager
  buildO1FunctionSimplificationPipeline(OptimizationLevel Level,
                                        ThinOrFullLTOPhase Phase);

  void addRequiredLTOPreLinkPasses(ModulePassManager &MPM);

  void addVectorPasses(OptimizationLevel Level, FunctionPassManager &FPM,
                       bool IsFullLTO);

  static std::optional<std::vector<PipelineElement>>
  parsePipelineText(StringRef Text);

  Error parseModulePass(ModulePassManager &MPM, const PipelineElement &E);
  Error parseCGSCCPass(CGSCCPassManager &CGPM, const PipelineElement &E);
  Error parseFunctionPass(FunctionPassManager &FPM, const PipelineElement &E);
  Error parseLoopPass(LoopPassManager &LPM, const PipelineElement &E);
  bool parseAAPassName(AAManager &AA, StringRef Name);

  Error parseLoopPassPipeline(LoopPassManager &LPM,
                              ArrayRef<PipelineElement> Pipeline);
  Error parseFunctionPassPipeline(FunctionPassManager &FPM,
                                  ArrayRef<PipelineElement> Pipeline);
  Error parseCGSCCPassPipeline(CGSCCPassManager &CGPM,
                               ArrayRef<PipelineElement> Pipeline);
  Error parseModulePassPipeline(ModulePassManager &MPM,
                                ArrayRef<PipelineElement> Pipeline);

  void addPGOInstrPasses(ModulePassManager &MPM, OptimizationLevel Level,
                         bool RunProfileGen, bool IsCS, std::string ProfileFile,
                         std::string ProfileRemappingFile,
                         ThinOrFullLTOPhase LTOPhase,
                         IntrusiveRefCntPtr<vfs::FileSystem> FS);

  // Extension Point callbacks
  SmallVector<std::function<void(FunctionPassManager &, OptimizationLevel)>, 2>
      PeepholeEPCallbacks;
  SmallVector<std::function<void(LoopPassManager &, OptimizationLevel)>, 2>
      LateLoopOptimizationsEPCallbacks;
  SmallVector<std::function<void(LoopPassManager &, OptimizationLevel)>, 2>
      LoopOptimizerEndEPCallbacks;
  SmallVector<std::function<void(FunctionPassManager &, OptimizationLevel)>, 2>
      ScalarOptimizerLateEPCallbacks;
  SmallVector<std::function<void(CGSCCPassManager &, OptimizationLevel)>, 2>
      CGSCCOptimizerLateEPCallbacks;
  SmallVector<std::function<void(FunctionPassManager &, OptimizationLevel)>, 2>
      VectorizerStartEPCallbacks;
  // Module callbacks
  SmallVector<std::function<void(ModulePassManager &, OptimizationLevel)>, 2>
      OptimizerEarlyEPCallbacks;
  SmallVector<std::function<void(ModulePassManager &, OptimizationLevel)>, 2>
      OptimizerLastEPCallbacks;
  SmallVector<std::function<void(ModulePassManager &, OptimizationLevel)>, 2>
      FullLinkTimeOptimizationEarlyEPCallbacks;
  SmallVector<std::function<void(ModulePassManager &, OptimizationLevel)>, 2>
      FullLinkTimeOptimizationLastEPCallbacks;
  SmallVector<std::function<void(ModulePassManager &, OptimizationLevel)>, 2>
      PipelineStartEPCallbacks;
  SmallVector<std::function<void(ModulePassManager &, OptimizationLevel)>, 2>
      PipelineEarlySimplificationEPCallbacks;

  SmallVector<std::function<void(ModuleAnalysisManager &)>, 2>
      ModuleAnalysisRegistrationCallbacks;
  SmallVector<std::function<bool(StringRef, ModulePassManager &,
                                 ArrayRef<PipelineElement>)>,
              2>
      ModulePipelineParsingCallbacks;
  SmallVector<
      std::function<bool(ModulePassManager &, ArrayRef<PipelineElement>)>, 2>
      TopLevelPipelineParsingCallbacks;
  // CGSCC callbacks
  SmallVector<std::function<void(CGSCCAnalysisManager &)>, 2>
      CGSCCAnalysisRegistrationCallbacks;
  SmallVector<std::function<bool(StringRef, CGSCCPassManager &,
                                 ArrayRef<PipelineElement>)>,
              2>
      CGSCCPipelineParsingCallbacks;
  // Function callbacks
  SmallVector<std::function<void(FunctionAnalysisManager &)>, 2>
      FunctionAnalysisRegistrationCallbacks;
  SmallVector<std::function<bool(StringRef, FunctionPassManager &,
                                 ArrayRef<PipelineElement>)>,
              2>
      FunctionPipelineParsingCallbacks;
  // Loop callbacks
  SmallVector<std::function<void(LoopAnalysisManager &)>, 2>
      LoopAnalysisRegistrationCallbacks;
  SmallVector<std::function<bool(StringRef, LoopPassManager &,
                                 ArrayRef<PipelineElement>)>,
              2>
      LoopPipelineParsingCallbacks;
  // AA callbacks
  SmallVector<std::function<bool(StringRef Name, AAManager &AA)>, 2>
      AAParsingCallbacks;
};

/// This utility template takes care of adding require<> and invalidate<>
/// passes for an analysis to a given \c PassManager. It is intended to be used
/// during parsing of a pass pipeline when parsing a single PipelineName.
/// When registering a new function analysis FancyAnalysis with the pass
/// pipeline name "fancy-analysis", a matching ParsePipelineCallback could look
/// like this:
///
/// static bool parseFunctionPipeline(StringRef Name, FunctionPassManager &FPM,
///                                   ArrayRef<PipelineElement> P) {
///   if (parseAnalysisUtilityPasses<FancyAnalysis>("fancy-analysis", Name,
///                                                 FPM))
///     return true;
///   return false;
/// }
template <typename AnalysisT, typename IRUnitT, typename AnalysisManagerT,
          typename... ExtraArgTs>
bool parseAnalysisUtilityPasses(
    StringRef AnalysisName, StringRef PipelineName,
    PassManager<IRUnitT, AnalysisManagerT, ExtraArgTs...> &PM) {
  if (!PipelineName.endswith(">"))
    return false;
  // See if this is an invalidate<> pass name
  if (PipelineName.startswith("invalidate<")) {
    PipelineName = PipelineName.substr(11, PipelineName.size() - 12);
    if (PipelineName != AnalysisName)
      return false;
    PM.addPass(InvalidateAnalysisPass<AnalysisT>());
    return true;
  }

  // See if this is a require<> pass name
  if (PipelineName.startswith("require<")) {
    PipelineName = PipelineName.substr(8, PipelineName.size() - 9);
    if (PipelineName != AnalysisName)
      return false;
    PM.addPass(RequireAnalysisPass<AnalysisT, IRUnitT, AnalysisManagerT,
                                   ExtraArgTs...>());
    return true;
  }

  return false;
}
}

#endif
PKiwFZ�¦�0V0V!Passes/StandardInstrumentations.hnu�[���//===- StandardInstrumentations.h ------------------------------*- C++ -*--===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This header defines a class that provides bookkeeping for all standard
/// (i.e in-tree) pass instrumentations.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_PASSES_STANDARDINSTRUMENTATIONS_H
#define LLVM_PASSES_STANDARDINSTRUMENTATIONS_H

#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/OptBisect.h"
#include "llvm/IR/PassTimingInfo.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/TimeProfiler.h"
#include "llvm/Transforms/IPO/SampleProfileProbe.h"

#include <string>
#include <utility>

namespace llvm {

class Module;
class Function;
class PassInstrumentationCallbacks;

/// Instrumentation to print IR before/after passes.
///
/// Needs state to be able to print module after pass that invalidates IR unit
/// (typically Loop or SCC).
class PrintIRInstrumentation {
public:
  ~PrintIRInstrumentation();

  void registerCallbacks(PassInstrumentationCallbacks &PIC);

private:
  void printBeforePass(StringRef PassID, Any IR);
  void printAfterPass(StringRef PassID, Any IR);
  void printAfterPassInvalidated(StringRef PassID);

  bool shouldPrintBeforePass(StringRef PassID);
  bool shouldPrintAfterPass(StringRef PassID);
  bool shouldPrintPassNumbers();
  bool shouldPrintAtPassNumber();

  using PrintModuleDesc = std::tuple<const Module *, std::string, StringRef>;

  void pushModuleDesc(StringRef PassID, Any IR);
  PrintModuleDesc popModuleDesc(StringRef PassID);

  PassInstrumentationCallbacks *PIC;
  /// Stack of Module description, enough to print the module after a given
  /// pass.
  SmallVector<PrintModuleDesc, 2> ModuleDescStack;

  /// Used for print-at-pass-number
  unsigned CurrentPassNumber = 0;
};

class OptNoneInstrumentation {
public:
  OptNoneInstrumentation(bool DebugLogging) : DebugLogging(DebugLogging) {}
  void registerCallbacks(PassInstrumentationCallbacks &PIC);

private:
  bool DebugLogging;
  bool shouldRun(StringRef PassID, Any IR);
};

class OptPassGateInstrumentation {
  LLVMContext &Context;
  bool HasWrittenIR = false;
public:
  OptPassGateInstrumentation(LLVMContext &Context) : Context(Context) {}
  bool shouldRun(StringRef PassName, Any IR);
  void registerCallbacks(PassInstrumentationCallbacks &PIC);
};

struct PrintPassOptions {
  /// Print adaptors and pass managers.
  bool Verbose = false;
  /// Don't print information for analyses.
  bool SkipAnalyses = false;
  /// Indent based on hierarchy.
  bool Indent = false;
};

// Debug logging for transformation and analysis passes.
class PrintPassInstrumentation {
  raw_ostream &print();

public:
  PrintPassInstrumentation(bool Enabled, PrintPassOptions Opts)
      : Enabled(Enabled), Opts(Opts) {}
  void registerCallbacks(PassInstrumentationCallbacks &PIC);

private:
  bool Enabled;
  PrintPassOptions Opts;
  int Indent = 0;
};

class PreservedCFGCheckerInstrumentation {
public:
  // Keeps sticky poisoned flag for the given basic block once it has been
  // deleted or RAUWed.
  struct BBGuard final : public CallbackVH {
    BBGuard(const BasicBlock *BB) : CallbackVH(BB) {}
    void deleted() override { CallbackVH::deleted(); }
    void allUsesReplacedWith(Value *) override { CallbackVH::deleted(); }
    bool isPoisoned() const { return !getValPtr(); }
  };

  // CFG is a map BB -> {(Succ, Multiplicity)}, where BB is a non-leaf basic
  // block, {(Succ, Multiplicity)} set of all pairs of the block's successors
  // and the multiplicity of the edge (BB->Succ). As the mapped sets are
  // unordered the order of successors is not tracked by the CFG. In other words
  // this allows basic block successors to be swapped by a pass without
  // reporting a CFG change. CFG can be guarded by basic block tracking pointers
  // in the Graph (BBGuard). That is if any of the block is deleted or RAUWed
  // then the CFG is treated poisoned and no block pointer of the Graph is used.
  struct CFG {
    std::optional<DenseMap<intptr_t, BBGuard>> BBGuards;
    DenseMap<const BasicBlock *, DenseMap<const BasicBlock *, unsigned>> Graph;

    CFG(const Function *F, bool TrackBBLifetime);

    bool operator==(const CFG &G) const {
      return !isPoisoned() && !G.isPoisoned() && Graph == G.Graph;
    }

    bool isPoisoned() const {
      return BBGuards && llvm::any_of(*BBGuards, [](const auto &BB) {
               return BB.second.isPoisoned();
             });
    }

    static void printDiff(raw_ostream &out, const CFG &Before,
                          const CFG &After);
    bool invalidate(Function &F, const PreservedAnalyses &PA,
                    FunctionAnalysisManager::Invalidator &);
  };

#ifdef LLVM_ENABLE_ABI_BREAKING_CHECKS
  SmallVector<StringRef, 8> PassStack;
#endif

  void registerCallbacks(PassInstrumentationCallbacks &PIC,
                         ModuleAnalysisManager &MAM);
};

// Base class for classes that report changes to the IR.
// It presents an interface for such classes and provides calls
// on various events as the new pass manager transforms the IR.
// It also provides filtering of information based on hidden options
// specifying which functions are interesting.
// Calls are made for the following events/queries:
// 1.  The initial IR processed.
// 2.  To get the representation of the IR (of type \p T).
// 3.  When a pass does not change the IR.
// 4.  When a pass changes the IR (given both before and after representations
//         of type \p T).
// 5.  When an IR is invalidated.
// 6.  When a pass is run on an IR that is not interesting (based on options).
// 7.  When a pass is ignored (pass manager or adapter pass).
// 8.  To compare two IR representations (of type \p T).
template <typename IRUnitT> class ChangeReporter {
protected:
  ChangeReporter(bool RunInVerboseMode) : VerboseMode(RunInVerboseMode) {}

public:
  virtual ~ChangeReporter();

  // Determine if this pass/IR is interesting and if so, save the IR
  // otherwise it is left on the stack without data.
  void saveIRBeforePass(Any IR, StringRef PassID, StringRef PassName);
  // Compare the IR from before the pass after the pass.
  void handleIRAfterPass(Any IR, StringRef PassID, StringRef PassName);
  // Handle the situation where a pass is invalidated.
  void handleInvalidatedPass(StringRef PassID);

protected:
  // Register required callbacks.
  void registerRequiredCallbacks(PassInstrumentationCallbacks &PIC);

  // Called on the first IR processed.
  virtual void handleInitialIR(Any IR) = 0;
  // Called before and after a pass to get the representation of the IR.
  virtual void generateIRRepresentation(Any IR, StringRef PassID,
                                        IRUnitT &Output) = 0;
  // Called when the pass is not iteresting.
  virtual void omitAfter(StringRef PassID, std::string &Name) = 0;
  // Called when an interesting IR has changed.
  virtual void handleAfter(StringRef PassID, std::string &Name,
                           const IRUnitT &Before, const IRUnitT &After,
                           Any) = 0;
  // Called when an interesting pass is invalidated.
  virtual void handleInvalidated(StringRef PassID) = 0;
  // Called when the IR or pass is not interesting.
  virtual void handleFiltered(StringRef PassID, std::string &Name) = 0;
  // Called when an ignored pass is encountered.
  virtual void handleIgnored(StringRef PassID, std::string &Name) = 0;

  // Stack of IRs before passes.
  std::vector<IRUnitT> BeforeStack;
  // Is this the first IR seen?
  bool InitialIR = true;

  // Run in verbose mode, printing everything?
  const bool VerboseMode;
};

// An abstract template base class that handles printing banners and
// reporting when things have not changed or are filtered out.
template <typename IRUnitT>
class TextChangeReporter : public ChangeReporter<IRUnitT> {
protected:
  TextChangeReporter(bool Verbose);

  // Print a module dump of the first IR that is changed.
  void handleInitialIR(Any IR) override;
  // Report that the IR was omitted because it did not change.
  void omitAfter(StringRef PassID, std::string &Name) override;
  // Report that the pass was invalidated.
  void handleInvalidated(StringRef PassID) override;
  // Report that the IR was filtered out.
  void handleFiltered(StringRef PassID, std::string &Name) override;
  // Report that the pass was ignored.
  void handleIgnored(StringRef PassID, std::string &Name) override;
  // Make substitutions in \p S suitable for reporting changes
  // after the pass and then print it.

  raw_ostream &Out;
};

// A change printer based on the string representation of the IR as created
// by unwrapAndPrint.  The string representation is stored in a std::string
// to preserve it as the IR changes in each pass.  Note that the banner is
// included in this representation but it is massaged before reporting.
class IRChangedPrinter : public TextChangeReporter<std::string> {
public:
  IRChangedPrinter(bool VerboseMode)
      : TextChangeReporter<std::string>(VerboseMode) {}
  ~IRChangedPrinter() override;
  void registerCallbacks(PassInstrumentationCallbacks &PIC);

protected:
  // Called before and after a pass to get the representation of the IR.
  void generateIRRepresentation(Any IR, StringRef PassID,
                                std::string &Output) override;
  // Called when an interesting IR has changed.
  void handleAfter(StringRef PassID, std::string &Name,
                   const std::string &Before, const std::string &After,
                   Any) override;
};

class IRChangedTester : public IRChangedPrinter {
public:
  IRChangedTester() : IRChangedPrinter(true) {}
  ~IRChangedTester() override;
  void registerCallbacks(PassInstrumentationCallbacks &PIC);

protected:
  void handleIR(const std::string &IR, StringRef PassID);

  // Check initial IR
  void handleInitialIR(Any IR) override;
  // Do nothing.
  void omitAfter(StringRef PassID, std::string &Name) override;
  // Do nothing.
  void handleInvalidated(StringRef PassID) override;
  // Do nothing.
  void handleFiltered(StringRef PassID, std::string &Name) override;
  // Do nothing.
  void handleIgnored(StringRef PassID, std::string &Name) override;

  // Call test as interesting IR has changed.
  void handleAfter(StringRef PassID, std::string &Name,
                   const std::string &Before, const std::string &After,
                   Any) override;
};

// Information that needs to be saved for a basic block in order to compare
// before and after the pass to determine if it was changed by a pass.
template <typename T> class BlockDataT {
public:
  BlockDataT(const BasicBlock &B) : Label(B.getName().str()), Data(B) {
    raw_string_ostream SS(Body);
    B.print(SS, nullptr, true, true);
  }

  bool operator==(const BlockDataT &That) const { return Body == That.Body; }
  bool operator!=(const BlockDataT &That) const { return Body != That.Body; }

  // Return the label of the represented basic block.
  StringRef getLabel() const { return Label; }
  // Return the string representation of the basic block.
  StringRef getBody() const { return Body; }

  // Return the associated data
  const T &getData() const { return Data; }

protected:
  std::string Label;
  std::string Body;

  // Extra data associated with a basic block
  T Data;
};

template <typename T> class OrderedChangedData {
public:
  // Return the names in the order they were saved
  std::vector<std::string> &getOrder() { return Order; }
  const std::vector<std::string> &getOrder() const { return Order; }

  // Return a map of names to saved representations
  StringMap<T> &getData() { return Data; }
  const StringMap<T> &getData() const { return Data; }

  bool operator==(const OrderedChangedData<T> &That) const {
    return Data == That.getData();
  }

  // Call the lambda \p HandlePair on each corresponding pair of data from
  // \p Before and \p After.  The order is based on the order in \p After
  // with ones that are only in \p Before interspersed based on where they
  // occur in \p Before.  This is used to present the output in an order
  // based on how the data is ordered in LLVM.
  static void report(const OrderedChangedData &Before,
                     const OrderedChangedData &After,
                     function_ref<void(const T *, const T *)> HandlePair);

protected:
  std::vector<std::string> Order;
  StringMap<T> Data;
};

// Do not need extra information for patch-style change reporter.
class EmptyData {
public:
  EmptyData(const BasicBlock &) {}
};

// The data saved for comparing functions.
template <typename T>
class FuncDataT : public OrderedChangedData<BlockDataT<T>> {
public:
  FuncDataT(std::string S) : EntryBlockName(S) {}

  // Return the name of the entry block
  std::string getEntryBlockName() const { return EntryBlockName; }

protected:
  std::string EntryBlockName;
};

// The data saved for comparing IRs.
template <typename T>
class IRDataT : public OrderedChangedData<FuncDataT<T>> {};

// Abstract template base class for a class that compares two IRs.  The
// class is created with the 2 IRs to compare and then compare is called.
// The static function analyzeIR is used to build up the IR representation.
template <typename T> class IRComparer {
public:
  IRComparer(const IRDataT<T> &Before, const IRDataT<T> &After)
      : Before(Before), After(After) {}

  // Compare the 2 IRs. \p handleFunctionCompare is called to handle the
  // compare of a function. When \p InModule is set,
  // this function is being handled as part of comparing a module.
  void compare(
      bool CompareModule,
      std::function<void(bool InModule, unsigned Minor,
                         const FuncDataT<T> &Before, const FuncDataT<T> &After)>
          CompareFunc);

  // Analyze \p IR and build the IR representation in \p Data.
  static void analyzeIR(Any IR, IRDataT<T> &Data);

protected:
  // Generate the data for \p F into \p Data.
  static bool generateFunctionData(IRDataT<T> &Data, const Function &F);

  const IRDataT<T> &Before;
  const IRDataT<T> &After;
};

// A change printer that prints out in-line differences in the basic
// blocks.  It uses an InlineComparer to do the comparison so it shows
// the differences prefixed with '-' and '+' for code that is removed
// and added, respectively.  Changes to the IR that do not affect basic
// blocks are not reported as having changed the IR.  The option
// -print-module-scope does not affect this change reporter.
class InLineChangePrinter : public TextChangeReporter<IRDataT<EmptyData>> {
public:
  InLineChangePrinter(bool VerboseMode, bool ColourMode)
      : TextChangeReporter<IRDataT<EmptyData>>(VerboseMode),
        UseColour(ColourMode) {}
  ~InLineChangePrinter() override;
  void registerCallbacks(PassInstrumentationCallbacks &PIC);

protected:
  // Create a representation of the IR.
  void generateIRRepresentation(Any IR, StringRef PassID,
                                IRDataT<EmptyData> &Output) override;

  // Called when an interesting IR has changed.
  void handleAfter(StringRef PassID, std::string &Name,
                   const IRDataT<EmptyData> &Before,
                   const IRDataT<EmptyData> &After, Any) override;

  void handleFunctionCompare(StringRef Name, StringRef Prefix, StringRef PassID,
                             StringRef Divider, bool InModule, unsigned Minor,
                             const FuncDataT<EmptyData> &Before,
                             const FuncDataT<EmptyData> &After);

  bool UseColour;
};

class VerifyInstrumentation {
  bool DebugLogging;

public:
  VerifyInstrumentation(bool DebugLogging) : DebugLogging(DebugLogging) {}
  void registerCallbacks(PassInstrumentationCallbacks &PIC);
};

/// This class implements --time-trace functionality for new pass manager.
/// It provides the pass-instrumentation callbacks that measure the pass
/// execution time. They collect time tracing info by TimeProfiler.
class TimeProfilingPassesHandler {
public:
  TimeProfilingPassesHandler();
  // We intend this to be unique per-compilation, thus no copies.
  TimeProfilingPassesHandler(const TimeProfilingPassesHandler &) = delete;
  void operator=(const TimeProfilingPassesHandler &) = delete;

  void registerCallbacks(PassInstrumentationCallbacks &PIC);

private:
  // Implementation of pass instrumentation callbacks.
  void runBeforePass(StringRef PassID, Any IR);
  void runAfterPass();
};

// Class that holds transitions between basic blocks.  The transitions
// are contained in a map of values to names of basic blocks.
class DCData {
public:
  // Fill the map with the transitions from basic block \p B.
  DCData(const BasicBlock &B);

  // Return an iterator to the names of the successor blocks.
  StringMap<std::string>::const_iterator begin() const {
    return Successors.begin();
  }
  StringMap<std::string>::const_iterator end() const {
    return Successors.end();
  }

  // Return the label of the basic block reached on a transition on \p S.
  StringRef getSuccessorLabel(StringRef S) const {
    assert(Successors.count(S) == 1 && "Expected to find successor.");
    return Successors.find(S)->getValue();
  }

protected:
  // Add a transition to \p Succ on \p Label
  void addSuccessorLabel(StringRef Succ, StringRef Label) {
    std::pair<std::string, std::string> SS{Succ.str(), Label.str()};
    Successors.insert(SS);
  }

  StringMap<std::string> Successors;
};

// A change reporter that builds a website with links to pdf files showing
// dot control flow graphs with changed instructions shown in colour.
class DotCfgChangeReporter : public ChangeReporter<IRDataT<DCData>> {
public:
  DotCfgChangeReporter(bool Verbose);
  ~DotCfgChangeReporter() override;
  void registerCallbacks(PassInstrumentationCallbacks &PIC);

protected:
  // Initialize the HTML file and output the header.
  bool initializeHTML();

  // Called on the first IR processed.
  void handleInitialIR(Any IR) override;
  // Called before and after a pass to get the representation of the IR.
  void generateIRRepresentation(Any IR, StringRef PassID,
                                IRDataT<DCData> &Output) override;
  // Called when the pass is not iteresting.
  void omitAfter(StringRef PassID, std::string &Name) override;
  // Called when an interesting IR has changed.
  void handleAfter(StringRef PassID, std::string &Name,
                   const IRDataT<DCData> &Before, const IRDataT<DCData> &After,
                   Any) override;
  // Called when an interesting pass is invalidated.
  void handleInvalidated(StringRef PassID) override;
  // Called when the IR or pass is not interesting.
  void handleFiltered(StringRef PassID, std::string &Name) override;
  // Called when an ignored pass is encountered.
  void handleIgnored(StringRef PassID, std::string &Name) override;

  // Generate the pdf file into \p Dir / \p PDFFileName using \p DotFile as
  // input and return the html <a> tag with \Text as the content.
  static std::string genHTML(StringRef Text, StringRef DotFile,
                             StringRef PDFFileName);

  void handleFunctionCompare(StringRef Name, StringRef Prefix, StringRef PassID,
                             StringRef Divider, bool InModule, unsigned Minor,
                             const FuncDataT<DCData> &Before,
                             const FuncDataT<DCData> &After);

  unsigned N = 0;
  std::unique_ptr<raw_fd_ostream> HTML;
};

// Print IR on crash.
class PrintCrashIRInstrumentation {
public:
  PrintCrashIRInstrumentation()
      : SavedIR("*** Dump of IR Before Last Pass Unknown ***") {}
  ~PrintCrashIRInstrumentation();
  void registerCallbacks(PassInstrumentationCallbacks &PIC);
  void reportCrashIR();

protected:
  std::string SavedIR;

private:
  // The crash reporter that will report on a crash.
  static PrintCrashIRInstrumentation *CrashReporter;
  // Crash handler registered when print-on-crash is specified.
  static void SignalHandler(void *);
};

/// This class provides an interface to register all the standard pass
/// instrumentations and manages their state (if any).
class StandardInstrumentations {
  PrintIRInstrumentation PrintIR;
  PrintPassInstrumentation PrintPass;
  TimePassesHandler TimePasses;
  TimeProfilingPassesHandler TimeProfilingPasses;
  OptNoneInstrumentation OptNone;
  OptPassGateInstrumentation OptPassGate;
  PreservedCFGCheckerInstrumentation PreservedCFGChecker;
  IRChangedPrinter PrintChangedIR;
  PseudoProbeVerifier PseudoProbeVerification;
  InLineChangePrinter PrintChangedDiff;
  DotCfgChangeReporter WebsiteChangeReporter;
  PrintCrashIRInstrumentation PrintCrashIR;
  IRChangedTester ChangeTester;
  VerifyInstrumentation Verify;

  bool VerifyEach;

public:
  StandardInstrumentations(LLVMContext &Context, bool DebugLogging,
                           bool VerifyEach = false,
                           PrintPassOptions PrintPassOpts = PrintPassOptions());

  // Register all the standard instrumentation callbacks. If \p FAM is nullptr
  // then PreservedCFGChecker is not enabled.
  void registerCallbacks(PassInstrumentationCallbacks &PIC,
                         ModuleAnalysisManager *MAM = nullptr);

  TimePassesHandler &getTimePasses() { return TimePasses; }
};

extern template class ChangeReporter<std::string>;
extern template class TextChangeReporter<std::string>;

extern template class BlockDataT<EmptyData>;
extern template class FuncDataT<EmptyData>;
extern template class IRDataT<EmptyData>;
extern template class ChangeReporter<IRDataT<EmptyData>>;
extern template class TextChangeReporter<IRDataT<EmptyData>>;
extern template class IRComparer<EmptyData>;

} // namespace llvm

#endif
PKiwFZI6�

Passes/OptimizationLevel.hnu�[���//===-------- LLVM-provided High-Level Optimization levels -*- C++ -*------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This header enumerates the LLVM-provided high-level optimization levels.
/// Each level has a specific goal and rationale.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_PASSES_OPTIMIZATIONLEVEL_H
#define LLVM_PASSES_OPTIMIZATIONLEVEL_H

#include <assert.h>

namespace llvm {

class OptimizationLevel final {
  unsigned SpeedLevel = 2;
  unsigned SizeLevel = 0;
  OptimizationLevel(unsigned SpeedLevel, unsigned SizeLevel)
      : SpeedLevel(SpeedLevel), SizeLevel(SizeLevel) {
    // Check that only valid combinations are passed.
    assert(SpeedLevel <= 3 &&
           "Optimization level for speed should be 0, 1, 2, or 3");
    assert(SizeLevel <= 2 &&
           "Optimization level for size should be 0, 1, or 2");
    assert((SizeLevel == 0 || SpeedLevel == 2) &&
           "Optimize for size should be encoded with speedup level == 2");
  }

public:
  OptimizationLevel() = default;
  /// Disable as many optimizations as possible. This doesn't completely
  /// disable the optimizer in all cases, for example always_inline functions
  /// can be required to be inlined for correctness.
  static const OptimizationLevel O0;

  /// Optimize quickly without destroying debuggability.
  ///
  /// This level is tuned to produce a result from the optimizer as quickly
  /// as possible and to avoid destroying debuggability. This tends to result
  /// in a very good development mode where the compiled code will be
  /// immediately executed as part of testing. As a consequence, where
  /// possible, we would like to produce efficient-to-execute code, but not
  /// if it significantly slows down compilation or would prevent even basic
  /// debugging of the resulting binary.
  ///
  /// As an example, complex loop transformations such as versioning,
  /// vectorization, or fusion don't make sense here due to the degree to
  /// which the executed code differs from the source code, and the compile
  /// time cost.
  static const OptimizationLevel O1;
  /// Optimize for fast execution as much as possible without triggering
  /// significant incremental compile time or code size growth.
  ///
  /// The key idea is that optimizations at this level should "pay for
  /// themselves". So if an optimization increases compile time by 5% or
  /// increases code size by 5% for a particular benchmark, that benchmark
  /// should also be one which sees a 5% runtime improvement. If the compile
  /// time or code size penalties happen on average across a diverse range of
  /// LLVM users' benchmarks, then the improvements should as well.
  ///
  /// And no matter what, the compile time needs to not grow superlinearly
  /// with the size of input to LLVM so that users can control the runtime of
  /// the optimizer in this mode.
  ///
  /// This is expected to be a good default optimization level for the vast
  /// majority of users.
  static const OptimizationLevel O2;
  /// Optimize for fast execution as much as possible.
  ///
  /// This mode is significantly more aggressive in trading off compile time
  /// and code size to get execution time improvements. The core idea is that
  /// this mode should include any optimization that helps execution time on
  /// balance across a diverse collection of benchmarks, even if it increases
  /// code size or compile time for some benchmarks without corresponding
  /// improvements to execution time.
  ///
  /// Despite being willing to trade more compile time off to get improved
  /// execution time, this mode still tries to avoid superlinear growth in
  /// order to make even significantly slower compile times at least scale
  /// reasonably. This does not preclude very substantial constant factor
  /// costs though.
  static const OptimizationLevel O3;
  /// Similar to \c O2 but tries to optimize for small code size instead of
  /// fast execution without triggering significant incremental execution
  /// time slowdowns.
  ///
  /// The logic here is exactly the same as \c O2, but with code size and
  /// execution time metrics swapped.
  ///
  /// A consequence of the different core goal is that this should in general
  /// produce substantially smaller executables that still run in
  /// a reasonable amount of time.
  static const OptimizationLevel Os;
  /// A very specialized mode that will optimize for code size at any and all
  /// costs.
  ///
  /// This is useful primarily when there are absolute size limitations and
  /// any effort taken to reduce the size is worth it regardless of the
  /// execution time impact. You should expect this level to produce rather
  /// slow, but very small, code.
  static const OptimizationLevel Oz;

  bool isOptimizingForSpeed() const { return SizeLevel == 0 && SpeedLevel > 0; }

  bool isOptimizingForSize() const { return SizeLevel > 0; }

  bool operator==(const OptimizationLevel &Other) const {
    return SizeLevel == Other.SizeLevel && SpeedLevel == Other.SpeedLevel;
  }
  bool operator!=(const OptimizationLevel &Other) const {
    return SizeLevel != Other.SizeLevel || SpeedLevel != Other.SpeedLevel;
  }

  unsigned getSpeedupLevel() const { return SpeedLevel; }

  unsigned getSizeLevel() const { return SizeLevel; }
};
} // namespace llvm

#endif
PKiwFZx�d�((Passes/PassPlugin.hnu�[���//===- llvm/Passes/PassPlugin.h - Public Plugin API -----------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This defines the public entry point for new-PM pass plugins.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_PASSES_PASSPLUGIN_H
#define LLVM_PASSES_PASSPLUGIN_H

#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/DynamicLibrary.h"
#include "llvm/Support/Error.h"
#include <cstdint>
#include <string>

namespace llvm {
class PassBuilder;

/// \macro LLVM_PLUGIN_API_VERSION
/// Identifies the API version understood by this plugin.
///
/// When a plugin is loaded, the driver will check it's supported plugin version
/// against that of the plugin. A mismatch is an error. The supported version
/// will be incremented for ABI-breaking changes to the \c PassPluginLibraryInfo
/// struct, i.e. when callbacks are added, removed, or reordered.
#define LLVM_PLUGIN_API_VERSION 1

extern "C" {
/// Information about the plugin required to load its passes
///
/// This struct defines the core interface for pass plugins and is supposed to
/// be filled out by plugin implementors. LLVM-side users of a plugin are
/// expected to use the \c PassPlugin class below to interface with it.
struct PassPluginLibraryInfo {
  /// The API version understood by this plugin, usually \c
  /// LLVM_PLUGIN_API_VERSION
  uint32_t APIVersion;
  /// A meaningful name of the plugin.
  const char *PluginName;
  /// The version of the plugin.
  const char *PluginVersion;

  /// The callback for registering plugin passes with a \c PassBuilder
  /// instance
  void (*RegisterPassBuilderCallbacks)(PassBuilder &);
};
}

/// A loaded pass plugin.
///
/// An instance of this class wraps a loaded pass plugin and gives access to
/// its interface defined by the \c PassPluginLibraryInfo it exposes.
class PassPlugin {
public:
  /// Attempts to load a pass plugin from a given file.
  ///
  /// \returns Returns an error if either the library cannot be found or loaded,
  /// there is no public entry point, or the plugin implements the wrong API
  /// version.
  static Expected<PassPlugin> Load(const std::string &Filename);

  /// Get the filename of the loaded plugin.
  StringRef getFilename() const { return Filename; }

  /// Get the plugin name
  StringRef getPluginName() const { return Info.PluginName; }

  /// Get the plugin version
  StringRef getPluginVersion() const { return Info.PluginVersion; }

  /// Get the plugin API version
  uint32_t getAPIVersion() const { return Info.APIVersion; }

  /// Invoke the PassBuilder callback registration
  void registerPassBuilderCallbacks(PassBuilder &PB) const {
    Info.RegisterPassBuilderCallbacks(PB);
  }

private:
  PassPlugin(const std::string &Filename, const sys::DynamicLibrary &Library)
      : Filename(Filename), Library(Library), Info() {}

  std::string Filename;
  sys::DynamicLibrary Library;
  PassPluginLibraryInfo Info;
};
}

/// The public entry point for a pass plugin.
///
/// When a plugin is loaded by the driver, it will call this entry point to
/// obtain information about this plugin and about how to register its passes.
/// This function needs to be implemented by the plugin, see the example below:
///
/// ```
/// extern "C" ::llvm::PassPluginLibraryInfo LLVM_ATTRIBUTE_WEAK
/// llvmGetPassPluginInfo() {
///   return {
///     LLVM_PLUGIN_API_VERSION, "MyPlugin", "v0.1", [](PassBuilder &PB) { ... }
///   };
/// }
/// ```
extern "C" ::llvm::PassPluginLibraryInfo LLVM_ATTRIBUTE_WEAK
llvmGetPassPluginInfo();

#endif /* LLVM_PASSES_PASSPLUGIN_H */
PKiwFZ����IRPrinter/IRPrintingPasses.hnu�[���//===- IRPrintingPasses.h - Passes to print out IR constructs ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This file defines passes to print out IR in various granularities. The
/// PrintModulePass pass simply prints out the entire module when it is
/// executed. The PrintFunctionPass class is designed to be pipelined with
/// other FunctionPass's, and prints out the functions of the module as they
/// are processed.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_IRPRINTER_IRPRINTINGPASSES_H
#define LLVM_IRPRINTER_IRPRINTINGPASSES_H

#include "llvm/IR/PassManager.h"
#include <string>

namespace llvm {
class raw_ostream;
class Function;
class Module;
class Pass;

/// Pass (for the new pass manager) for printing a Module as
/// LLVM's text IR assembly.
class PrintModulePass : public PassInfoMixin<PrintModulePass> {
  raw_ostream &OS;
  std::string Banner;
  bool ShouldPreserveUseListOrder;
  bool EmitSummaryIndex;

public:
  PrintModulePass();
  PrintModulePass(raw_ostream &OS, const std::string &Banner = "",
                  bool ShouldPreserveUseListOrder = false,
                  bool EmitSummaryIndex = false);

  PreservedAnalyses run(Module &M, AnalysisManager<Module> &);
  static bool isRequired() { return true; }
};

/// Pass (for the new pass manager) for printing a Function as
/// LLVM's text IR assembly.
class PrintFunctionPass : public PassInfoMixin<PrintFunctionPass> {
  raw_ostream &OS;
  std::string Banner;

public:
  PrintFunctionPass();
  PrintFunctionPass(raw_ostream &OS, const std::string &Banner = "");

  PreservedAnalyses run(Function &F, AnalysisManager<Function> &);
  static bool isRequired() { return true; }
};

} // namespace llvm

#endif
PKiwFZ����GGInitializePasses.hnu�[���//===- llvm/InitializePasses.h - Initialize All Passes ----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the declarations for the pass initialization routines
// for the entire LLVM project.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_INITIALIZEPASSES_H
#define LLVM_INITIALIZEPASSES_H

namespace llvm {

class PassRegistry;

/// Initialize all passes linked into the Core library.
void initializeCore(PassRegistry&);

/// Initialize all passes linked into the TransformUtils library.
void initializeTransformUtils(PassRegistry&);

/// Initialize all passes linked into the ScalarOpts library.
void initializeScalarOpts(PassRegistry&);

/// Initialize all passes linked into the Vectorize library.
void initializeVectorization(PassRegistry&);

/// Initialize all passes linked into the InstCombine library.
void initializeInstCombine(PassRegistry&);

/// Initialize all passes linked into the IPO library.
void initializeIPO(PassRegistry&);

/// Initialize all passes linked into the Analysis library.
void initializeAnalysis(PassRegistry&);

/// Initialize all passes linked into the CodeGen library.
void initializeCodeGen(PassRegistry&);

/// Initialize all passes linked into the GlobalISel library.
void initializeGlobalISel(PassRegistry&);

/// Initialize all passes linked into the CodeGen library.
void initializeTarget(PassRegistry&);

void initializeAAEvalLegacyPassPass(PassRegistry&);
void initializeAAResultsWrapperPassPass(PassRegistry&);
void initializeAlwaysInlinerLegacyPassPass(PassRegistry&);
void initializeAssignmentTrackingAnalysisPass(PassRegistry &);
void initializeAssumeBuilderPassLegacyPassPass(PassRegistry &);
void initializeAssumptionCacheTrackerPass(PassRegistry&);
void initializeAtomicExpandPass(PassRegistry&);
void initializeBasicBlockSectionsProfileReaderPass(PassRegistry &);
void initializeBasicBlockSectionsPass(PassRegistry &);
void initializeBarrierNoopPass(PassRegistry&);
void initializeBasicAAWrapperPassPass(PassRegistry&);
void initializeBlockFrequencyInfoWrapperPassPass(PassRegistry&);
void initializeBranchFolderPassPass(PassRegistry&);
void initializeBranchProbabilityInfoWrapperPassPass(PassRegistry&);
void initializeBranchRelaxationPass(PassRegistry&);
void initializeBreakCriticalEdgesPass(PassRegistry&);
void initializeBreakFalseDepsPass(PassRegistry&);
void initializeCanonicalizeFreezeInLoopsPass(PassRegistry &);
void initializeCFGOnlyPrinterLegacyPassPass(PassRegistry&);
void initializeCFGOnlyViewerLegacyPassPass(PassRegistry&);
void initializeCFGPrinterLegacyPassPass(PassRegistry&);
void initializeCFGSimplifyPassPass(PassRegistry&);
void initializeCFGuardPass(PassRegistry&);
void initializeCFGuardLongjmpPass(PassRegistry&);
void initializeCFGViewerLegacyPassPass(PassRegistry&);
void initializeCFIFixupPass(PassRegistry&);
void initializeCFIInstrInserterPass(PassRegistry&);
void initializeCallBrPreparePass(PassRegistry &);
void initializeCallGraphDOTPrinterPass(PassRegistry&);
void initializeCallGraphPrinterLegacyPassPass(PassRegistry&);
void initializeCallGraphViewerPass(PassRegistry&);
void initializeCallGraphWrapperPassPass(PassRegistry&);
void initializeCheckDebugMachineModulePass(PassRegistry &);
void initializeCodeGenPreparePass(PassRegistry&);
void initializeComplexDeinterleavingLegacyPassPass(PassRegistry&);
void initializeConstantHoistingLegacyPassPass(PassRegistry&);
void initializeCostModelAnalysisPass(PassRegistry&);
void initializeCycleInfoWrapperPassPass(PassRegistry &);
void initializeDAEPass(PassRegistry&);
void initializeDAHPass(PassRegistry&);
void initializeDCELegacyPassPass(PassRegistry&);
void initializeDeadMachineInstructionElimPass(PassRegistry&);
void initializeDebugifyMachineModulePass(PassRegistry &);
void initializeDelinearizationPass(PassRegistry&);
void initializeDependenceAnalysisWrapperPassPass(PassRegistry&);
void initializeDetectDeadLanesPass(PassRegistry&);
void initializeDomOnlyPrinterWrapperPassPass(PassRegistry &);
void initializeDomOnlyViewerWrapperPassPass(PassRegistry &);
void initializeDomPrinterWrapperPassPass(PassRegistry &);
void initializeDomViewerWrapperPassPass(PassRegistry &);
void initializeDominanceFrontierWrapperPassPass(PassRegistry&);
void initializeDominatorTreeWrapperPassPass(PassRegistry&);
void initializeDwarfEHPrepareLegacyPassPass(PassRegistry &);
void initializeEarlyCSELegacyPassPass(PassRegistry&);
void initializeEarlyCSEMemSSALegacyPassPass(PassRegistry&);
void initializeEarlyIfConverterPass(PassRegistry&);
void initializeEarlyIfPredicatorPass(PassRegistry &);
void initializeEarlyMachineLICMPass(PassRegistry&);
void initializeEarlyTailDuplicatePass(PassRegistry&);
void initializeEdgeBundlesPass(PassRegistry&);
void initializeEHContGuardCatchretPass(PassRegistry &);
void initializeExpandLargeFpConvertLegacyPassPass(PassRegistry&);
void initializeExpandLargeDivRemLegacyPassPass(PassRegistry&);
void initializeExpandMemCmpPassPass(PassRegistry&);
void initializeExpandPostRAPass(PassRegistry&);
void initializeExpandReductionsPass(PassRegistry&);
void initializeExpandVectorPredicationPass(PassRegistry &);
void initializeMakeGuardsExplicitLegacyPassPass(PassRegistry&);
void initializeExternalAAWrapperPassPass(PassRegistry&);
void initializeFEntryInserterPass(PassRegistry&);
void initializeFinalizeISelPass(PassRegistry&);
void initializeFinalizeMachineBundlesPass(PassRegistry&);
void initializeFixIrreduciblePass(PassRegistry &);
void initializeFixupStatepointCallerSavedPass(PassRegistry&);
void initializeFlattenCFGLegacyPassPass(PassRegistry &);
void initializeFuncletLayoutPass(PassRegistry&);
void initializeGCMachineCodeAnalysisPass(PassRegistry&);
void initializeGCModuleInfoPass(PassRegistry&);
void initializeGVNLegacyPassPass(PassRegistry&);
void initializeGlobalMergePass(PassRegistry&);
void initializeGlobalsAAWrapperPassPass(PassRegistry&);
void initializeGuardWideningLegacyPassPass(PassRegistry&);
void initializeHardwareLoopsLegacyPass(PassRegistry&);
void initializeMIRProfileLoaderPassPass(PassRegistry &);
void initializeIRSimilarityIdentifierWrapperPassPass(PassRegistry&);
void initializeIRTranslatorPass(PassRegistry&);
void initializeIVUsersWrapperPassPass(PassRegistry&);
void initializeIfConverterPass(PassRegistry&);
void initializeImmutableModuleSummaryIndexWrapperPassPass(PassRegistry&);
void initializeImplicitNullChecksPass(PassRegistry&);
void initializeIndirectBrExpandPassPass(PassRegistry&);
void initializeInferAddressSpacesPass(PassRegistry&);
void initializeInstCountLegacyPassPass(PassRegistry &);
void initializeInstSimplifyLegacyPassPass(PassRegistry &);
void initializeInstructionCombiningPassPass(PassRegistry&);
void initializeInstructionSelectPass(PassRegistry&);
void initializeInterleavedAccessPass(PassRegistry&);
void initializeInterleavedLoadCombinePass(PassRegistry &);
void initializeIntervalPartitionPass(PassRegistry&);
void initializeJMCInstrumenterPass(PassRegistry&);
void initializeKCFIPass(PassRegistry &);
void initializeLCSSAVerificationPassPass(PassRegistry&);
void initializeLCSSAWrapperPassPass(PassRegistry&);
void initializeLazyBlockFrequencyInfoPassPass(PassRegistry&);
void initializeLazyBranchProbabilityInfoPassPass(PassRegistry&);
void initializeLazyMachineBlockFrequencyInfoPassPass(PassRegistry&);
void initializeLazyValueInfoPrinterPass(PassRegistry&);
void initializeLazyValueInfoWrapperPassPass(PassRegistry&);
void initializeLegacyLICMPassPass(PassRegistry&);
void initializeLegacyLoopSinkPassPass(PassRegistry&);
void initializeLegalizerPass(PassRegistry&);
void initializeGISelCSEAnalysisWrapperPassPass(PassRegistry &);
void initializeGISelKnownBitsAnalysisPass(PassRegistry &);
void initializeLiveDebugValuesPass(PassRegistry&);
void initializeLiveDebugVariablesPass(PassRegistry&);
void initializeLiveIntervalsPass(PassRegistry&);
void initializeLiveRangeShrinkPass(PassRegistry&);
void initializeLiveRegMatrixPass(PassRegistry&);
void initializeLiveStacksPass(PassRegistry&);
void initializeLiveVariablesPass(PassRegistry &);
void initializeLoadStoreOptPass(PassRegistry &);
void initializeLoadStoreVectorizerLegacyPassPass(PassRegistry&);
void initializeLocalStackSlotPassPass(PassRegistry&);
void initializeLocalizerPass(PassRegistry&);
void initializeLoopDataPrefetchLegacyPassPass(PassRegistry&);
void initializeLoopExtractorLegacyPassPass(PassRegistry &);
void initializeLoopGuardWideningLegacyPassPass(PassRegistry&);
void initializeLoopInfoWrapperPassPass(PassRegistry&);
void initializeLoopInstSimplifyLegacyPassPass(PassRegistry&);
void initializeLoopPassPass(PassRegistry&);
void initializeLoopPredicationLegacyPassPass(PassRegistry&);
void initializeLoopRotateLegacyPassPass(PassRegistry&);
void initializeLoopSimplifyCFGLegacyPassPass(PassRegistry&);
void initializeLoopSimplifyPass(PassRegistry&);
void initializeLoopStrengthReducePass(PassRegistry&);
void initializeLoopUnrollPass(PassRegistry&);
void initializeLowerAtomicLegacyPassPass(PassRegistry&);
void initializeLowerConstantIntrinsicsPass(PassRegistry&);
void initializeLowerEmuTLSPass(PassRegistry&);
void initializeLowerExpectIntrinsicPass(PassRegistry&);
void initializeLowerGlobalDtorsLegacyPassPass(PassRegistry &);
void initializeLowerGuardIntrinsicLegacyPassPass(PassRegistry&);
void initializeLowerWidenableConditionLegacyPassPass(PassRegistry&);
void initializeLowerIntrinsicsPass(PassRegistry&);
void initializeLowerInvokeLegacyPassPass(PassRegistry&);
void initializeLowerSwitchLegacyPassPass(PassRegistry &);
void initializeKCFIPass(PassRegistry &);
void initializeMIRAddFSDiscriminatorsPass(PassRegistry &);
void initializeMIRCanonicalizerPass(PassRegistry &);
void initializeMIRNamerPass(PassRegistry &);
void initializeMIRPrintingPassPass(PassRegistry&);
void initializeMachineBlockFrequencyInfoPass(PassRegistry&);
void initializeMachineBlockPlacementPass(PassRegistry&);
void initializeMachineBlockPlacementStatsPass(PassRegistry&);
void initializeMachineBranchProbabilityInfoPass(PassRegistry&);
void initializeMachineCFGPrinterPass(PassRegistry &);
void initializeMachineCSEPass(PassRegistry&);
void initializeMachineCombinerPass(PassRegistry&);
void initializeMachineCopyPropagationPass(PassRegistry&);
void initializeMachineCycleInfoPrinterPassPass(PassRegistry &);
void initializeMachineCycleInfoWrapperPassPass(PassRegistry &);
void initializeMachineDominanceFrontierPass(PassRegistry&);
void initializeMachineDominatorTreePass(PassRegistry&);
void initializeMachineFunctionPrinterPassPass(PassRegistry&);
void initializeMachineFunctionSplitterPass(PassRegistry &);
void initializeMachineLateInstrsCleanupPass(PassRegistry&);
void initializeMachineLICMPass(PassRegistry&);
void initializeMachineLoopInfoPass(PassRegistry&);
void initializeMachineModuleInfoWrapperPassPass(PassRegistry &);
void initializeMachineOptimizationRemarkEmitterPassPass(PassRegistry&);
void initializeMachineOutlinerPass(PassRegistry&);
void initializeMachinePipelinerPass(PassRegistry&);
void initializeMachinePostDominatorTreePass(PassRegistry&);
void initializeMachineRegionInfoPassPass(PassRegistry&);
void initializeMachineSanitizerBinaryMetadataPass(PassRegistry &);
void initializeMachineSchedulerPass(PassRegistry&);
void initializeMachineSinkingPass(PassRegistry&);
void initializeMachineTraceMetricsPass(PassRegistry&);
void initializeMachineUniformityInfoPrinterPassPass(PassRegistry &);
void initializeMachineUniformityAnalysisPassPass(PassRegistry &);
void initializeMachineVerifierPassPass(PassRegistry&);
void initializeMemoryDependenceWrapperPassPass(PassRegistry&);
void initializeMemorySSAWrapperPassPass(PassRegistry&);
void initializeMergeICmpsLegacyPassPass(PassRegistry &);
void initializeMergedLoadStoreMotionLegacyPassPass(PassRegistry&);
void initializeModuleSummaryIndexWrapperPassPass(PassRegistry&);
void initializeModuloScheduleTestPass(PassRegistry&);
void initializeNaryReassociateLegacyPassPass(PassRegistry&);
void initializeObjCARCContractLegacyPassPass(PassRegistry &);
void initializeOptimizationRemarkEmitterWrapperPassPass(PassRegistry&);
void initializeOptimizePHIsPass(PassRegistry&);
void initializePEIPass(PassRegistry&);
void initializePHIEliminationPass(PassRegistry&);
void initializePartiallyInlineLibCallsLegacyPassPass(PassRegistry&);
void initializePatchableFunctionPass(PassRegistry&);
void initializePeepholeOptimizerPass(PassRegistry&);
void initializePhiValuesWrapperPassPass(PassRegistry&);
void initializePhysicalRegisterUsageInfoPass(PassRegistry&);
void initializePlaceBackedgeSafepointsLegacyPassPass(PassRegistry &);
void initializePostDomOnlyPrinterWrapperPassPass(PassRegistry &);
void initializePostDomOnlyViewerWrapperPassPass(PassRegistry &);
void initializePostDomPrinterWrapperPassPass(PassRegistry &);
void initializePostDomViewerWrapperPassPass(PassRegistry &);
void initializePostDominatorTreeWrapperPassPass(PassRegistry&);
void initializePostMachineSchedulerPass(PassRegistry&);
void initializePostRAHazardRecognizerPass(PassRegistry&);
void initializePostRAMachineSinkingPass(PassRegistry&);
void initializePostRASchedulerPass(PassRegistry&);
void initializePreISelIntrinsicLoweringLegacyPassPass(PassRegistry&);
void initializePredicateInfoPrinterLegacyPassPass(PassRegistry&);
void initializePrintFunctionPassWrapperPass(PassRegistry&);
void initializePrintModulePassWrapperPass(PassRegistry&);
void initializeProcessImplicitDefsPass(PassRegistry&);
void initializeProfileSummaryInfoWrapperPassPass(PassRegistry&);
void initializePromoteLegacyPassPass(PassRegistry&);
void initializeRABasicPass(PassRegistry&);
void initializePseudoProbeInserterPass(PassRegistry &);
void initializeRAGreedyPass(PassRegistry&);
void initializeReachingDefAnalysisPass(PassRegistry&);
void initializeReassociateLegacyPassPass(PassRegistry&);
void initializeRedundantDbgInstEliminationPass(PassRegistry&);
void initializeRegAllocEvictionAdvisorAnalysisPass(PassRegistry &);
void initializeRegAllocFastPass(PassRegistry&);
void initializeRegAllocPriorityAdvisorAnalysisPass(PassRegistry &);
void initializeRegAllocScoringPass(PassRegistry &);
void initializeRegBankSelectPass(PassRegistry&);
void initializeRegToMemLegacyPass(PassRegistry&);
void initializeRegUsageInfoCollectorPass(PassRegistry&);
void initializeRegUsageInfoPropagationPass(PassRegistry&);
void initializeRegionInfoPassPass(PassRegistry&);
void initializeRegionOnlyPrinterPass(PassRegistry&);
void initializeRegionOnlyViewerPass(PassRegistry&);
void initializeRegionPrinterPass(PassRegistry&);
void initializeRegionViewerPass(PassRegistry&);
void initializeRegisterCoalescerPass(PassRegistry&);
void initializeRemoveRedundantDebugValuesPass(PassRegistry&);
void initializeRenameIndependentSubregsPass(PassRegistry&);
void initializeReplaceWithVeclibLegacyPass(PassRegistry &);
void initializeResetMachineFunctionPass(PassRegistry&);
void initializeSCEVAAWrapperPassPass(PassRegistry&);
void initializeSROALegacyPassPass(PassRegistry&);
void initializeSafeStackLegacyPassPass(PassRegistry&);
void initializeSafepointIRVerifierPass(PassRegistry&);
void initializeSelectOptimizePass(PassRegistry &);
void initializeScalarEvolutionWrapperPassPass(PassRegistry&);
void initializeScalarizeMaskedMemIntrinLegacyPassPass(PassRegistry &);
void initializeScalarizerLegacyPassPass(PassRegistry&);
void initializeScavengerTestPass(PassRegistry&);
void initializeScopedNoAliasAAWrapperPassPass(PassRegistry&);
void initializeSeparateConstOffsetFromGEPLegacyPassPass(PassRegistry &);
void initializeShadowStackGCLoweringPass(PassRegistry&);
void initializeShrinkWrapPass(PassRegistry&);
void initializeSimpleLoopUnswitchLegacyPassPass(PassRegistry&);
void initializeSingleLoopExtractorPass(PassRegistry&);
void initializeSinkingLegacyPassPass(PassRegistry&);
void initializeSjLjEHPreparePass(PassRegistry&);
void initializeSlotIndexesPass(PassRegistry&);
void initializeSpeculativeExecutionLegacyPassPass(PassRegistry&);
void initializeSpillPlacementPass(PassRegistry&);
void initializeStackColoringPass(PassRegistry&);
void initializeStackFrameLayoutAnalysisPassPass(PassRegistry &);
void initializeStackMapLivenessPass(PassRegistry&);
void initializeStackProtectorPass(PassRegistry&);
void initializeStackSafetyGlobalInfoWrapperPassPass(PassRegistry &);
void initializeStackSafetyInfoWrapperPassPass(PassRegistry &);
void initializeStackSlotColoringPass(PassRegistry&);
void initializeStraightLineStrengthReduceLegacyPassPass(PassRegistry &);
void initializeStripDebugMachineModulePass(PassRegistry &);
void initializeStripGCRelocatesLegacyPass(PassRegistry &);
void initializeStructurizeCFGLegacyPassPass(PassRegistry &);
void initializeTailCallElimPass(PassRegistry&);
void initializeTailDuplicatePass(PassRegistry&);
void initializeTargetLibraryInfoWrapperPassPass(PassRegistry&);
void initializeTargetPassConfigPass(PassRegistry&);
void initializeTargetTransformInfoWrapperPassPass(PassRegistry&);
void initializeTLSVariableHoistLegacyPassPass(PassRegistry &);
void initializeTwoAddressInstructionPassPass(PassRegistry&);
void initializeTypeBasedAAWrapperPassPass(PassRegistry&);
void initializeTypePromotionLegacyPass(PassRegistry&);
void initializeUniformityInfoWrapperPassPass(PassRegistry &);
void initializeUnifyFunctionExitNodesLegacyPassPass(PassRegistry &);
void initializeUnifyLoopExitsLegacyPassPass(PassRegistry &);
void initializeUnpackMachineBundlesPass(PassRegistry&);
void initializeUnreachableBlockElimLegacyPassPass(PassRegistry&);
void initializeUnreachableMachineBlockElimPass(PassRegistry&);
void initializeVerifierLegacyPassPass(PassRegistry&);
void initializeVirtRegMapPass(PassRegistry&);
void initializeVirtRegRewriterPass(PassRegistry&);
void initializeWasmEHPreparePass(PassRegistry&);
void initializeWinEHPreparePass(PassRegistry&);
void initializeWriteBitcodePassPass(PassRegistry&);
void initializeXRayInstrumentationPass(PassRegistry&);

} // end namespace llvm

#endif // LLVM_INITIALIZEPASSES_H
PKiwFZ�����/�/DWARFLinker/DWARFStreamer.hnu�[���//===- DwarfStreamer.h ------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DWARFLINKER_DWARFSTREAMER_H
#define LLVM_DWARFLINKER_DWARFSTREAMER_H

#include "llvm/BinaryFormat/Swift.h"
#include "llvm/CodeGen/AsmPrinter.h"
#include "llvm/DWARFLinker/DWARFLinker.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCObjectFileInfo.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/Target/TargetMachine.h"

namespace llvm {
template <typename DataT> class AccelTable;

///   User of DwarfStreamer should call initialization code
///   for AsmPrinter:
///
///   InitializeAllTargetInfos();
///   InitializeAllTargetMCs();
///   InitializeAllTargets();
///   InitializeAllAsmPrinters();

class MCCodeEmitter;
class DWARFDebugMacro;

/// The Dwarf streaming logic.
///
/// All interactions with the MC layer that is used to build the debug
/// information binary representation are handled in this class.
class DwarfStreamer : public DwarfEmitter {
public:
  DwarfStreamer(DWARFLinker::OutputFileType OutFileType,
                raw_pwrite_stream &OutFile,
                std::function<StringRef(StringRef Input)> Translator,
                DWARFLinker::messageHandler Warning)
      : OutFile(OutFile), OutFileType(OutFileType), Translator(Translator),
        WarningHandler(Warning) {}

  Error init(Triple TheTriple, StringRef Swift5ReflectionSegmentName);

  /// Dump the file to the disk.
  void finish() override;

  AsmPrinter &getAsmPrinter() const override { return *Asm; }

  /// Set the current output section to debug_info and change
  /// the MC Dwarf version to \p DwarfVersion.
  void switchToDebugInfoSection(unsigned DwarfVersion);

  /// Emit the compilation unit header for \p Unit in the
  /// debug_info section.
  ///
  /// As a side effect, this also switches the current Dwarf version
  /// of the MC layer to the one of U.getOrigUnit().
  void emitCompileUnitHeader(CompileUnit &Unit, unsigned DwarfVersion) override;

  /// Recursively emit the DIE tree rooted at \p Die.
  void emitDIE(DIE &Die) override;

  /// Emit the abbreviation table \p Abbrevs to the debug_abbrev section.
  void emitAbbrevs(const std::vector<std::unique_ptr<DIEAbbrev>> &Abbrevs,
                   unsigned DwarfVersion) override;

  /// Emit DIE containing warnings.
  void emitPaperTrailWarningsDie(DIE &Die) override;

  /// Emit contents of section SecName From Obj.
  void emitSectionContents(StringRef SecData, StringRef SecName) override;

  /// Emit the string table described by \p Pool into .debug_str table.
  void emitStrings(const NonRelocatableStringpool &Pool) override;

  /// Emit the string table described by \p Pool into .debug_line_str table.
  void emitLineStrings(const NonRelocatableStringpool &Pool) override;

  /// Emit the swift_ast section stored in \p Buffer.
  void emitSwiftAST(StringRef Buffer) override;

  /// Emit the swift reflection section stored in \p Buffer.
  void emitSwiftReflectionSection(
      llvm::binaryformat::Swift5ReflectionSectionKind ReflSectionKind,
      StringRef Buffer, uint32_t Alignment, uint32_t Size) override;

  /// Emit debug ranges(.debug_ranges, .debug_rnglists) header.
  MCSymbol *emitDwarfDebugRangeListHeader(const CompileUnit &Unit) override;

  /// Emit debug ranges(.debug_ranges, .debug_rnglists) fragment.
  void emitDwarfDebugRangeListFragment(const CompileUnit &Unit,
                                       const AddressRanges &LinkedRanges,
                                       PatchLocation Patch) override;

  /// Emit debug ranges(.debug_ranges, .debug_rnglists) footer.
  void emitDwarfDebugRangeListFooter(const CompileUnit &Unit,
                                     MCSymbol *EndLabel) override;

  /// Emit debug locations(.debug_loc, .debug_loclists) header.
  MCSymbol *emitDwarfDebugLocListHeader(const CompileUnit &Unit) override;

  /// Emit .debug_addr header.
  MCSymbol *emitDwarfDebugAddrsHeader(const CompileUnit &Unit) override;

  /// Emit the addresses described by \p Addrs into .debug_addr table.
  void emitDwarfDebugAddrs(const SmallVector<uint64_t> &Addrs,
                           uint8_t AddrSize) override;

  /// Emit .debug_addr footer.
  void emitDwarfDebugAddrsFooter(const CompileUnit &Unit,
                                 MCSymbol *EndLabel) override;

  /// Emit debug ranges(.debug_loc, .debug_loclists) fragment.
  void emitDwarfDebugLocListFragment(
      const CompileUnit &Unit,
      const DWARFLocationExpressionsVector &LinkedLocationExpression,
      PatchLocation Patch, DebugAddrPool &AddrPool) override;

  /// Emit debug ranges(.debug_loc, .debug_loclists) footer.
  void emitDwarfDebugLocListFooter(const CompileUnit &Unit,
                                   MCSymbol *EndLabel) override;

  /// Emit .debug_aranges entries for \p Unit
  void emitDwarfDebugArangesTable(const CompileUnit &Unit,
                                  const AddressRanges &LinkedRanges) override;

  uint64_t getRangesSectionSize() const override { return RangesSectionSize; }

  uint64_t getRngListsSectionSize() const override {
    return RngListsSectionSize;
  }

  /// Emit .debug_line table entry for specified \p LineTable
  void emitLineTableForUnit(const DWARFDebugLine::LineTable &LineTable,
                            const CompileUnit &Unit,
                            OffsetsStringPool &DebugStrPool,
                            OffsetsStringPool &DebugLineStrPool) override;

  uint64_t getLineSectionSize() const override { return LineSectionSize; }

  /// Emit the .debug_pubnames contribution for \p Unit.
  void emitPubNamesForUnit(const CompileUnit &Unit) override;

  /// Emit the .debug_pubtypes contribution for \p Unit.
  void emitPubTypesForUnit(const CompileUnit &Unit) override;

  /// Emit a CIE.
  void emitCIE(StringRef CIEBytes) override;

  /// Emit an FDE with data \p Bytes.
  void emitFDE(uint32_t CIEOffset, uint32_t AddreSize, uint64_t Address,
               StringRef Bytes) override;

  /// Emit DWARF debug names.
  void emitDebugNames(AccelTable<DWARF5AccelTableStaticData> &Table) override;

  /// Emit Apple namespaces accelerator table.
  void emitAppleNamespaces(
      AccelTable<AppleAccelTableStaticOffsetData> &Table) override;

  /// Emit Apple names accelerator table.
  void
  emitAppleNames(AccelTable<AppleAccelTableStaticOffsetData> &Table) override;

  /// Emit Apple Objective-C accelerator table.
  void
  emitAppleObjc(AccelTable<AppleAccelTableStaticOffsetData> &Table) override;

  /// Emit Apple type accelerator table.
  void
  emitAppleTypes(AccelTable<AppleAccelTableStaticTypeData> &Table) override;

  uint64_t getFrameSectionSize() const override { return FrameSectionSize; }

  uint64_t getDebugInfoSectionSize() const override {
    return DebugInfoSectionSize;
  }

  uint64_t getDebugMacInfoSectionSize() const override {
    return MacInfoSectionSize;
  }

  uint64_t getDebugMacroSectionSize() const override {
    return MacroSectionSize;
  }

  uint64_t getLocListsSectionSize() const override {
    return LocListsSectionSize;
  }

  uint64_t getDebugAddrSectionSize() const override { return AddrSectionSize; }

  void emitMacroTables(DWARFContext *Context,
                       const Offset2UnitMap &UnitMacroMap,
                       OffsetsStringPool &StringPool) override;

private:
  inline void warn(const Twine &Warning, StringRef Context = "") {
    if (WarningHandler)
      WarningHandler(Warning, Context, nullptr);
  }

  void emitMacroTableImpl(const DWARFDebugMacro *MacroTable,
                          const Offset2UnitMap &UnitMacroMap,
                          OffsetsStringPool &StringPool, uint64_t &OutOffset);

  /// Emit piece of .debug_ranges for \p LinkedRanges.
  void emitDwarfDebugRangesTableFragment(const CompileUnit &Unit,
                                         const AddressRanges &LinkedRanges,
                                         PatchLocation Patch);

  /// Emit piece of .debug_rnglists for \p LinkedRanges.
  void emitDwarfDebugRngListsTableFragment(const CompileUnit &Unit,
                                           const AddressRanges &LinkedRanges,
                                           PatchLocation Patch);

  /// Emit piece of .debug_loc for \p LinkedRanges.
  void emitDwarfDebugLocTableFragment(
      const CompileUnit &Unit,
      const DWARFLocationExpressionsVector &LinkedLocationExpression,
      PatchLocation Patch);

  /// Emit piece of .debug_loclists for \p LinkedRanges.
  void emitDwarfDebugLocListsTableFragment(
      const CompileUnit &Unit,
      const DWARFLocationExpressionsVector &LinkedLocationExpression,
      PatchLocation Patch, DebugAddrPool &AddrPool);

  /// \defgroup Line table emission
  /// @{
  void emitLineTablePrologue(const DWARFDebugLine::Prologue &P,
                             OffsetsStringPool &DebugStrPool,
                             OffsetsStringPool &DebugLineStrPool);
  void emitLineTableString(const DWARFDebugLine::Prologue &P,
                           const DWARFFormValue &String,
                           OffsetsStringPool &DebugStrPool,
                           OffsetsStringPool &DebugLineStrPool);
  void emitLineTableProloguePayload(const DWARFDebugLine::Prologue &P,
                                    OffsetsStringPool &DebugStrPool,
                                    OffsetsStringPool &DebugLineStrPool);
  void emitLineTablePrologueV2IncludeAndFileTable(
      const DWARFDebugLine::Prologue &P, OffsetsStringPool &DebugStrPool,
      OffsetsStringPool &DebugLineStrPool);
  void emitLineTablePrologueV5IncludeAndFileTable(
      const DWARFDebugLine::Prologue &P, OffsetsStringPool &DebugStrPool,
      OffsetsStringPool &DebugLineStrPool);
  void emitLineTableRows(const DWARFDebugLine::LineTable &LineTable,
                         MCSymbol *LineEndSym, unsigned AddressByteSize);
  void emitIntOffset(uint64_t Offset, dwarf::DwarfFormat Format,
                     uint64_t &SectionSize);
  void emitLabelDifference(const MCSymbol *Hi, const MCSymbol *Lo,
                           dwarf::DwarfFormat Format, uint64_t &SectionSize);
  /// @}

  /// \defgroup MCObjects MC layer objects constructed by the streamer
  /// @{
  std::unique_ptr<MCRegisterInfo> MRI;
  std::unique_ptr<MCAsmInfo> MAI;
  std::unique_ptr<MCObjectFileInfo> MOFI;
  std::unique_ptr<MCContext> MC;
  MCAsmBackend *MAB; // Owned by MCStreamer
  std::unique_ptr<MCInstrInfo> MII;
  std::unique_ptr<MCSubtargetInfo> MSTI;
  MCInstPrinter *MIP; // Owned by AsmPrinter
  MCCodeEmitter *MCE; // Owned by MCStreamer
  MCStreamer *MS;     // Owned by AsmPrinter
  std::unique_ptr<TargetMachine> TM;
  std::unique_ptr<AsmPrinter> Asm;
  /// @}

  /// The output file we stream the linked Dwarf to.
  raw_pwrite_stream &OutFile;
  DWARFLinker::OutputFileType OutFileType = DWARFLinker::OutputFileType::Object;
  std::function<StringRef(StringRef Input)> Translator;

  uint64_t RangesSectionSize = 0;
  uint64_t RngListsSectionSize = 0;
  uint64_t LocSectionSize = 0;
  uint64_t LocListsSectionSize = 0;
  uint64_t LineSectionSize = 0;
  uint64_t FrameSectionSize = 0;
  uint64_t DebugInfoSectionSize = 0;
  uint64_t MacInfoSectionSize = 0;
  uint64_t MacroSectionSize = 0;
  uint64_t AddrSectionSize = 0;

  /// Keep track of emitted CUs and their Unique ID.
  struct EmittedUnit {
    unsigned ID;
    MCSymbol *LabelBegin;
  };
  std::vector<EmittedUnit> EmittedUnits;

  /// Emit the pubnames or pubtypes section contribution for \p
  /// Unit into \p Sec. The data is provided in \p Names.
  void emitPubSectionForUnit(MCSection *Sec, StringRef Name,
                             const CompileUnit &Unit,
                             const std::vector<CompileUnit::AccelInfo> &Names);

  DWARFLinker::messageHandler WarningHandler = nullptr;
};

} // end namespace llvm

#endif // LLVM_DWARFLINKER_DWARFSTREAMER_H
PKiwFZ��-���$DWARFLinker/DWARFLinkerDeclContext.hnu�[���//===- DWARFLinkerDeclContext.h ---------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DWARFLINKER_DWARFLINKERDECLCONTEXT_H
#define LLVM_DWARFLINKER_DWARFLINKERDECLCONTEXT_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/CodeGen/NonRelocatableStringpool.h"
#include "llvm/DebugInfo/DWARF/DWARFDebugLine.h"
#include "llvm/DebugInfo/DWARF/DWARFDie.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
#include <atomic>

namespace llvm {

class CompileUnit;
struct DeclMapInfo;

/// Small helper that resolves and caches file paths. This helps reduce the
/// number of calls to realpath which is expensive. We assume the input are
/// files, and cache the realpath of their parent. This way we can quickly
/// resolve different files under the same path.
class CachedPathResolver {
public:
  /// Resolve a path by calling realpath and cache its result. The returned
  /// StringRef is interned in the given \p StringPool.
  StringRef resolve(const std::string &Path,
                    NonRelocatableStringpool &StringPool) {
    StringRef FileName = sys::path::filename(Path);
    StringRef ParentPath = sys::path::parent_path(Path);

    // If the ParentPath has not yet been resolved, resolve and cache it for
    // future look-ups.
    if (!ResolvedPaths.count(ParentPath)) {
      SmallString<256> RealPath;
      sys::fs::real_path(ParentPath, RealPath);
      ResolvedPaths.insert(
          {ParentPath, std::string(RealPath.c_str(), RealPath.size())});
    }

    // Join the file name again with the resolved path.
    SmallString<256> ResolvedPath(ResolvedPaths[ParentPath]);
    sys::path::append(ResolvedPath, FileName);
    return StringPool.internString(ResolvedPath);
  }

private:
  StringMap<std::string> ResolvedPaths;
};

/// A DeclContext is a named program scope that is used for ODR uniquing of
/// types.
///
/// The set of DeclContext for the ODR-subject parts of a Dwarf link is
/// expanded (and uniqued) with each new object file processed. We need to
/// determine the context of each DIE in an linked object file to see if the
/// corresponding type has already been emitted.
///
/// The contexts are conceptually organized as a tree (eg. a function scope is
/// contained in a namespace scope that contains other scopes), but
/// storing/accessing them in an actual tree is too inefficient: we need to be
/// able to very quickly query a context for a given child context by name.
/// Storing a StringMap in each DeclContext would be too space inefficient.
///
/// The solution here is to give each DeclContext a link to its parent (this
/// allows to walk up the tree), but to query the existence of a specific
/// DeclContext using a separate DenseMap keyed on the hash of the fully
/// qualified name of the context.
class DeclContext {
public:
  using Map = DenseSet<DeclContext *, DeclMapInfo>;

  DeclContext() : DefinedInClangModule(0), Parent(*this) {}

  DeclContext(unsigned Hash, uint32_t Line, uint32_t ByteSize, uint16_t Tag,
              StringRef Name, StringRef File, const DeclContext &Parent,
              DWARFDie LastSeenDIE = DWARFDie(), unsigned CUId = 0)
      : QualifiedNameHash(Hash), Line(Line), ByteSize(ByteSize), Tag(Tag),
        DefinedInClangModule(0), Name(Name), File(File), Parent(Parent),
        LastSeenDIE(LastSeenDIE), LastSeenCompileUnitID(CUId) {}

  uint32_t getQualifiedNameHash() const { return QualifiedNameHash; }

  bool setLastSeenDIE(CompileUnit &U, const DWARFDie &Die);

  void setHasCanonicalDIE() { HasCanonicalDIE = true; }

  bool hasCanonicalDIE() const { return HasCanonicalDIE; }

  uint32_t getCanonicalDIEOffset() const { return CanonicalDIEOffset; }
  void setCanonicalDIEOffset(uint32_t Offset) { CanonicalDIEOffset = Offset; }

  bool isDefinedInClangModule() const { return DefinedInClangModule; }
  void setDefinedInClangModule(bool Val) { DefinedInClangModule = Val; }

  uint16_t getTag() const { return Tag; }

private:
  friend DeclMapInfo;

  unsigned QualifiedNameHash = 0;
  uint32_t Line = 0;
  uint32_t ByteSize = 0;
  uint16_t Tag = dwarf::DW_TAG_compile_unit;
  unsigned DefinedInClangModule : 1;
  StringRef Name;
  StringRef File;
  const DeclContext &Parent;
  DWARFDie LastSeenDIE;
  uint32_t LastSeenCompileUnitID = 0;
  std::atomic<uint32_t> CanonicalDIEOffset = {0};
  bool HasCanonicalDIE = false;
};

/// This class gives a tree-like API to the DenseMap that stores the
/// DeclContext objects. It holds the BumpPtrAllocator where these objects will
/// be allocated.
class DeclContextTree {
public:
  /// Get the child of \a Context described by \a DIE in \a Unit. The
  /// required strings will be interned in \a StringPool.
  /// \returns The child DeclContext along with one bit that is set if
  /// this context is invalid.
  ///
  /// An invalid context means it shouldn't be considered for uniquing, but its
  /// not returning null, because some children of that context might be
  /// uniquing candidates.
  ///
  /// FIXME: The invalid bit along the return value is to emulate some
  /// dsymutil-classic functionality.
  PointerIntPair<DeclContext *, 1> getChildDeclContext(DeclContext &Context,
                                                       const DWARFDie &DIE,
                                                       CompileUnit &Unit,
                                                       bool InClangModule);

  DeclContext &getRoot() { return Root; }

private:
  BumpPtrAllocator Allocator;
  DeclContext Root;
  DeclContext::Map Contexts;

  /// Cached resolved paths from the line table.
  /// The key is <UniqueUnitID, FileIdx>.
  using ResolvedPathsMap = DenseMap<std::pair<unsigned, unsigned>, StringRef>;
  ResolvedPathsMap ResolvedPaths;

  /// Helper that resolves and caches fragments of file paths.
  CachedPathResolver PathResolver;

  /// String pool keeping real path bodies.
  NonRelocatableStringpool StringPool;

  StringRef getResolvedPath(CompileUnit &CU, unsigned FileNum,
                            const DWARFDebugLine::LineTable &LineTable);
};

/// Info type for the DenseMap storing the DeclContext pointers.
struct DeclMapInfo : private DenseMapInfo<DeclContext *> {
  using DenseMapInfo<DeclContext *>::getEmptyKey;
  using DenseMapInfo<DeclContext *>::getTombstoneKey;

  static unsigned getHashValue(const DeclContext *Ctxt) {
    return Ctxt->QualifiedNameHash;
  }

  static bool isEqual(const DeclContext *LHS, const DeclContext *RHS) {
    if (RHS == getEmptyKey() || RHS == getTombstoneKey())
      return RHS == LHS;
    return LHS->QualifiedNameHash == RHS->QualifiedNameHash &&
           LHS->Line == RHS->Line && LHS->ByteSize == RHS->ByteSize &&
           LHS->Name.data() == RHS->Name.data() &&
           LHS->File.data() == RHS->File.data() &&
           LHS->Parent.QualifiedNameHash == RHS->Parent.QualifiedNameHash;
  }
};

} // end namespace llvm

#endif // LLVM_DWARFLINKER_DWARFLINKERDECLCONTEXT_H
PKiwFZ;�&>�>�DWARFLinker/DWARFLinker.hnu�[���//===- DWARFLinker.h --------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DWARFLINKER_DWARFLINKER_H
#define LLVM_DWARFLINKER_DWARFLINKER_H

#include "llvm/ADT/AddressRanges.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/CodeGen/AccelTable.h"
#include "llvm/CodeGen/NonRelocatableStringpool.h"
#include "llvm/DWARFLinker/DWARFLinkerCompileUnit.h"
#include "llvm/DebugInfo/DWARF/DWARFContext.h"
#include "llvm/DebugInfo/DWARF/DWARFDebugLine.h"
#include "llvm/DebugInfo/DWARF/DWARFDebugRangeList.h"
#include "llvm/DebugInfo/DWARF/DWARFDie.h"
#include "llvm/DebugInfo/DWARF/DWARFExpression.h"
#include <map>

namespace llvm {
class DWARFExpression;
class DWARFUnit;
class DataExtractor;
class DeclContextTree;
template <typename T> class SmallVectorImpl;

enum class DwarfLinkerClient { Dsymutil, LLD, General };

/// AddressesMap represents information about valid addresses used
/// by debug information. Valid addresses are those which points to
/// live code sections. i.e. relocations for these addresses point
/// into sections which would be/are placed into resulting binary.
class AddressesMap {
public:
  virtual ~AddressesMap();

  /// Checks that there are valid relocations against a .debug_info
  /// section.
  virtual bool hasValidRelocs() = 0;

  /// Checks that the specified DWARF expression operand \p Op references live
  /// code section and returns the relocation adjustment value (to get the
  /// linked address this value might be added to the source expression operand
  /// address).
  /// \returns relocation adjustment value or std::nullopt if there is no
  /// corresponding live address.
  virtual std::optional<int64_t>
  getExprOpAddressRelocAdjustment(DWARFUnit &U,
                                  const DWARFExpression::Operation &Op,
                                  uint64_t StartOffset, uint64_t EndOffset) = 0;

  /// Checks that the specified subprogram \p DIE references the live code
  /// section and returns the relocation adjustment value (to get the linked
  /// address this value might be added to the source subprogram address).
  /// Allowed kinds of input DIE: DW_TAG_subprogram, DW_TAG_label.
  /// \returns relocation adjustment value or std::nullopt if there is no
  /// corresponding live address.
  virtual std::optional<int64_t>
  getSubprogramRelocAdjustment(const DWARFDie &DIE) = 0;

  /// Apply the valid relocations to the buffer \p Data, taking into
  /// account that Data is at \p BaseOffset in the .debug_info section.
  ///
  /// \returns true whether any reloc has been applied.
  virtual bool applyValidRelocs(MutableArrayRef<char> Data, uint64_t BaseOffset,
                                bool IsLittleEndian) = 0;

  /// Erases all data.
  virtual void clear() = 0;
};

using Offset2UnitMap = DenseMap<uint64_t, CompileUnit *>;

struct DebugAddrPool {
  DenseMap<uint64_t, uint64_t> AddrIndexMap;
  SmallVector<uint64_t> Addrs;

  uint64_t getAddrIndex(uint64_t Addr) {
    DenseMap<uint64_t, uint64_t>::iterator It = AddrIndexMap.find(Addr);
    if (It == AddrIndexMap.end()) {
      It = AddrIndexMap.insert(std::make_pair(Addr, Addrs.size())).first;
      Addrs.push_back(Addr);
    }
    return It->second;
  }

  void clear() {
    AddrIndexMap.clear();
    Addrs.clear();
  }
};

/// DwarfEmitter presents interface to generate all debug info tables.
class DwarfEmitter {
public:
  virtual ~DwarfEmitter();

  /// Emit DIE containing warnings.
  virtual void emitPaperTrailWarningsDie(DIE &Die) = 0;

  /// Emit section named SecName with data SecData.
  virtual void emitSectionContents(StringRef SecData, StringRef SecName) = 0;

  /// Emit the abbreviation table \p Abbrevs to the .debug_abbrev section.
  virtual void
  emitAbbrevs(const std::vector<std::unique_ptr<DIEAbbrev>> &Abbrevs,
              unsigned DwarfVersion) = 0;

  /// Emit the string table described by \p Pool into .debug_str table.
  virtual void emitStrings(const NonRelocatableStringpool &Pool) = 0;

  /// Emit the string table described by \p Pool into .debug_line_str table.
  virtual void emitLineStrings(const NonRelocatableStringpool &Pool) = 0;

  /// Emit DWARF debug names.
  virtual void
  emitDebugNames(AccelTable<DWARF5AccelTableStaticData> &Table) = 0;

  /// Emit Apple namespaces accelerator table.
  virtual void
  emitAppleNamespaces(AccelTable<AppleAccelTableStaticOffsetData> &Table) = 0;

  /// Emit Apple names accelerator table.
  virtual void
  emitAppleNames(AccelTable<AppleAccelTableStaticOffsetData> &Table) = 0;

  /// Emit Apple Objective-C accelerator table.
  virtual void
  emitAppleObjc(AccelTable<AppleAccelTableStaticOffsetData> &Table) = 0;

  /// Emit Apple type accelerator table.
  virtual void
  emitAppleTypes(AccelTable<AppleAccelTableStaticTypeData> &Table) = 0;

  /// Emit debug ranges (.debug_ranges, .debug_rnglists) header.
  virtual MCSymbol *emitDwarfDebugRangeListHeader(const CompileUnit &Unit) = 0;

  /// Emit debug ranges (.debug_ranges, .debug_rnglists) fragment.
  virtual void
  emitDwarfDebugRangeListFragment(const CompileUnit &Unit,
                                  const AddressRanges &LinkedRanges,
                                  PatchLocation Patch) = 0;

  /// Emit debug ranges (.debug_ranges, .debug_rnglists) footer.
  virtual void emitDwarfDebugRangeListFooter(const CompileUnit &Unit,
                                             MCSymbol *EndLabel) = 0;

  /// Emit debug locations (.debug_loc, .debug_loclists) header.
  virtual MCSymbol *emitDwarfDebugLocListHeader(const CompileUnit &Unit) = 0;

  /// Emit debug locations (.debug_loc, .debug_loclists) fragment.
  virtual void emitDwarfDebugLocListFragment(
      const CompileUnit &Unit,
      const DWARFLocationExpressionsVector &LinkedLocationExpression,
      PatchLocation Patch, DebugAddrPool &AddrPool) = 0;

  /// Emit debug locations (.debug_loc, .debug_loclists) footer.
  virtual void emitDwarfDebugLocListFooter(const CompileUnit &Unit,
                                           MCSymbol *EndLabel) = 0;

  /// Emit .debug_addr header.
  virtual MCSymbol *emitDwarfDebugAddrsHeader(const CompileUnit &Unit) = 0;

  /// Emit the addresses described by \p Addrs into the .debug_addr section.
  virtual void emitDwarfDebugAddrs(const SmallVector<uint64_t> &Addrs,
                                   uint8_t AddrSize) = 0;

  /// Emit .debug_addr footer.
  virtual void emitDwarfDebugAddrsFooter(const CompileUnit &Unit,
                                         MCSymbol *EndLabel) = 0;

  /// Emit .debug_aranges entries for \p Unit
  virtual void
  emitDwarfDebugArangesTable(const CompileUnit &Unit,
                             const AddressRanges &LinkedRanges) = 0;

  /// Emit specified \p LineTable into .debug_line table.
  virtual void emitLineTableForUnit(const DWARFDebugLine::LineTable &LineTable,
                                    const CompileUnit &Unit,
                                    OffsetsStringPool &DebugStrPool,
                                    OffsetsStringPool &DebugLineStrPool) = 0;

  /// Emit the .debug_pubnames contribution for \p Unit.
  virtual void emitPubNamesForUnit(const CompileUnit &Unit) = 0;

  /// Emit the .debug_pubtypes contribution for \p Unit.
  virtual void emitPubTypesForUnit(const CompileUnit &Unit) = 0;

  /// Emit a CIE.
  virtual void emitCIE(StringRef CIEBytes) = 0;

  /// Emit an FDE with data \p Bytes.
  virtual void emitFDE(uint32_t CIEOffset, uint32_t AddreSize, uint64_t Address,
                       StringRef Bytes) = 0;

  /// Emit the compilation unit header for \p Unit in the
  /// .debug_info section.
  ///
  /// As a side effect, this also switches the current Dwarf version
  /// of the MC layer to the one of U.getOrigUnit().
  virtual void emitCompileUnitHeader(CompileUnit &Unit,
                                     unsigned DwarfVersion) = 0;

  /// Recursively emit the DIE tree rooted at \p Die.
  virtual void emitDIE(DIE &Die) = 0;

  /// Emit all available macro tables(DWARFv4 and DWARFv5).
  /// Use \p UnitMacroMap to get compilation unit by macro table offset.
  /// Side effects: Fill \p StringPool with macro strings, update
  /// DW_AT_macro_info, DW_AT_macros attributes for corresponding compile
  /// units.
  virtual void emitMacroTables(DWARFContext *Context,
                               const Offset2UnitMap &UnitMacroMap,
                               OffsetsStringPool &StringPool) = 0;

  /// Returns size of generated .debug_line section.
  virtual uint64_t getLineSectionSize() const = 0;

  /// Returns size of generated .debug_frame section.
  virtual uint64_t getFrameSectionSize() const = 0;

  /// Returns size of generated .debug_ranges section.
  virtual uint64_t getRangesSectionSize() const = 0;

  /// Returns size of generated .debug_rnglists section.
  virtual uint64_t getRngListsSectionSize() const = 0;

  /// Returns size of generated .debug_info section.
  virtual uint64_t getDebugInfoSectionSize() const = 0;

  /// Returns size of generated .debug_macinfo section.
  virtual uint64_t getDebugMacInfoSectionSize() const = 0;

  /// Returns size of generated .debug_macro section.
  virtual uint64_t getDebugMacroSectionSize() const = 0;

  /// Returns size of generated .debug_loclists section.
  virtual uint64_t getLocListsSectionSize() const = 0;

  /// Returns size of generated .debug_addr section.
  virtual uint64_t getDebugAddrSectionSize() const = 0;

  /// Dump the file to the disk.
  virtual void finish() = 0;

  /// Emit the swift_ast section stored in \p Buffer.
  virtual void emitSwiftAST(StringRef Buffer) = 0;

  /// Emit the swift reflection section stored in \p Buffer.
  virtual void emitSwiftReflectionSection(
      llvm::binaryformat::Swift5ReflectionSectionKind ReflSectionKind,
      StringRef Buffer, uint32_t Alignment, uint32_t Size) = 0;

  /// Returns underlying AsmPrinter.
  virtual AsmPrinter &getAsmPrinter() const = 0;
};

class DwarfStreamer;
using UnitListTy = std::vector<std::unique_ptr<CompileUnit>>;

/// This class represents DWARF information for source file
/// and its address map.
class DWARFFile {
public:
  DWARFFile(StringRef Name, std::unique_ptr<DWARFContext> Dwarf,
            std::unique_ptr<AddressesMap> Addresses,
            const std::vector<std::string> &Warnings)
      : FileName(Name), Dwarf(std::move(Dwarf)),
        Addresses(std::move(Addresses)), Warnings(Warnings) {}

  /// The object file name.
  StringRef FileName;

  /// The source DWARF information.
  std::unique_ptr<DWARFContext> Dwarf;

  /// Helpful address information(list of valid address ranges, relocations).
  std::unique_ptr<AddressesMap> Addresses;

  /// Warnings for this object file.
  const std::vector<std::string> &Warnings;
};

typedef std::map<std::string, std::string> swiftInterfacesMap;
typedef std::map<std::string, std::string> objectPrefixMap;

typedef function_ref<void(const DWARFUnit &Unit)> CompileUnitHandler;

/// The core of the Dwarf linking logic.
///
/// The generation of the dwarf information from the object files will be
/// driven by the selection of 'root DIEs', which are DIEs that
/// describe variables or functions that resolves to the corresponding
/// code section(and thus have entries in the Addresses map). All the debug
/// information that will be generated(the DIEs, but also the line
/// tables, ranges, ...) is derived from that set of root DIEs.
///
/// The root DIEs are identified because they contain relocations that
/// points to code section(the low_pc for a function, the location for
/// a variable). These relocations are called ValidRelocs in the
/// AddressesInfo and are gathered as a very first step when we start
/// processing a object file.
class DWARFLinker {
public:
  typedef std::function<void(const Twine &Warning, StringRef Context,
                             const DWARFDie *DIE)>
      messageHandler;
  DWARFLinker(messageHandler ErrorHandler, messageHandler WarningHandler,
              std::function<StringRef(StringRef)> StringsTranslator)
      : DwarfLinkerClientID(DwarfLinkerClient::Dsymutil),
        StringsTranslator(StringsTranslator), ErrorHandler(ErrorHandler),
        WarningHandler(WarningHandler) {}

  static std::unique_ptr<DWARFLinker> createLinker(
      messageHandler ErrorHandler, messageHandler WarningHandler,
      std::function<StringRef(StringRef)> StringsTranslator = nullptr) {
    return std::make_unique<DWARFLinker>(ErrorHandler, WarningHandler,
                                         StringsTranslator);
  }

  /// Type of output file.
  enum class OutputFileType {
    Object,
    Assembly,
  };

  /// The kind of accelerator tables we should emit.
  enum class AccelTableKind : uint8_t {
    Apple,     ///< .apple_names, .apple_namespaces, .apple_types, .apple_objc.
    Pub,       ///< .debug_pubnames, .debug_pubtypes
    DebugNames ///< .debug_names.
  };
  typedef std::function<void(const DWARFFile &File)> inputVerificationHandler;
  typedef std::function<ErrorOr<DWARFFile &>(StringRef ContainerName,
                                             StringRef Path)>
      objFileLoader;

  Error createEmitter(const Triple &TheTriple, OutputFileType FileType,
                      raw_pwrite_stream &OutFile);

  DwarfEmitter *getEmitter();

  /// Add object file to be linked. Pre-load compile unit die. Call
  /// \p OnCUDieLoaded for each compile unit die. If specified \p File
  /// has reference to the Clang module then such module would be
  /// pre-loaded by \p Loader for !Update case.
  ///
  /// \pre NoODR, Update options should be set before call to addObjectFile.
  void addObjectFile(
      DWARFFile &File, objFileLoader Loader = nullptr,
      CompileUnitHandler OnCUDieLoaded = [](const DWARFUnit &) {});

  /// Link debug info for added objFiles. Object files are linked all together.
  Error link();

  /// A number of methods setting various linking options:

  /// Allows to generate log of linking process to the standard output.
  void setVerbosity(bool Verbose) { Options.Verbose = Verbose; }

  /// Print statistics to standard output.
  void setStatistics(bool Statistics) { Options.Statistics = Statistics; }

  /// Verify the input DWARF.
  void setVerifyInputDWARF(bool Verify) { Options.VerifyInputDWARF = Verify; }

  /// Do not unique types according to ODR.
  void setNoODR(bool NoODR) { Options.NoODR = NoODR; }

  /// Update index tables only(do not modify rest of DWARF).
  void setUpdateIndexTablesOnly(bool Update) { Options.Update = Update; }

  /// Allow generating valid, but non-deterministic output.
  void setAllowNonDeterministicOutput(bool) { /* Nothing to do. */
  }

  /// Set whether to keep the enclosing function for a static variable.
  void setKeepFunctionForStatic(bool KeepFunctionForStatic) {
    Options.KeepFunctionForStatic = KeepFunctionForStatic;
  }

  /// Use specified number of threads for parallel files linking.
  void setNumThreads(unsigned NumThreads) { Options.Threads = NumThreads; }

  /// Add kind of accelerator tables to be generated.
  void addAccelTableKind(AccelTableKind Kind) {
    assert(!llvm::is_contained(Options.AccelTables, Kind));
    Options.AccelTables.emplace_back(Kind);
  }

  /// Set prepend path for clang modules.
  void setPrependPath(const std::string &Ppath) { Options.PrependPath = Ppath; }

  /// Set estimated objects files amount, for preliminary data allocation.
  void setEstimatedObjfilesAmount(unsigned ObjFilesNum) {
    ObjectContexts.reserve(ObjFilesNum);
  }

  /// Set verification handler which would be used to report verification
  /// errors.
  void setInputVerificationHandler(inputVerificationHandler Handler) {
    Options.InputVerificationHandler = Handler;
  }

  /// Set map for Swift interfaces.
  void setSwiftInterfacesMap(swiftInterfacesMap *Map) {
    Options.ParseableSwiftInterfaces = Map;
  }

  /// Set prefix map for objects.
  void setObjectPrefixMap(objectPrefixMap *Map) {
    Options.ObjectPrefixMap = Map;
  }

  /// Set target DWARF version.
  Error setTargetDWARFVersion(uint16_t TargetDWARFVersion) {
    if ((TargetDWARFVersion < 1) || (TargetDWARFVersion > 5))
      return createStringError(std::errc::invalid_argument,
                               "unsupported DWARF version: %d",
                               TargetDWARFVersion);

    Options.TargetDWARFVersion = TargetDWARFVersion;
    return Error::success();
  }

private:
  /// Flags passed to DwarfLinker::lookForDIEsToKeep
  enum TraversalFlags {
    TF_Keep = 1 << 0,            ///< Mark the traversed DIEs as kept.
    TF_InFunctionScope = 1 << 1, ///< Current scope is a function scope.
    TF_DependencyWalk = 1 << 2,  ///< Walking the dependencies of a kept DIE.
    TF_ParentWalk = 1 << 3,      ///< Walking up the parents of a kept DIE.
    TF_ODR = 1 << 4,             ///< Use the ODR while keeping dependents.
    TF_SkipPC = 1 << 5,          ///< Skip all location attributes.
  };

  /// The  distinct types of work performed by the work loop.
  enum class WorklistItemType {
    /// Given a DIE, look for DIEs to be kept.
    LookForDIEsToKeep,
    /// Given a DIE, look for children of this DIE to be kept.
    LookForChildDIEsToKeep,
    /// Given a DIE, look for DIEs referencing this DIE to be kept.
    LookForRefDIEsToKeep,
    /// Given a DIE, look for parent DIEs to be kept.
    LookForParentDIEsToKeep,
    /// Given a DIE, update its incompleteness based on whether its children are
    /// incomplete.
    UpdateChildIncompleteness,
    /// Given a DIE, update its incompleteness based on whether the DIEs it
    /// references are incomplete.
    UpdateRefIncompleteness,
    /// Given a DIE, mark it as ODR Canonical if applicable.
    MarkODRCanonicalDie,
  };

  /// This class represents an item in the work list. The type defines what kind
  /// of work needs to be performed when processing the current item. The flags
  /// and info fields are optional based on the type.
  struct WorklistItem {
    DWARFDie Die;
    WorklistItemType Type;
    CompileUnit &CU;
    unsigned Flags;
    union {
      const unsigned AncestorIdx;
      CompileUnit::DIEInfo *OtherInfo;
    };

    WorklistItem(DWARFDie Die, CompileUnit &CU, unsigned Flags,
                 WorklistItemType T = WorklistItemType::LookForDIEsToKeep)
        : Die(Die), Type(T), CU(CU), Flags(Flags), AncestorIdx(0) {}

    WorklistItem(DWARFDie Die, CompileUnit &CU, WorklistItemType T,
                 CompileUnit::DIEInfo *OtherInfo = nullptr)
        : Die(Die), Type(T), CU(CU), Flags(0), OtherInfo(OtherInfo) {}

    WorklistItem(unsigned AncestorIdx, CompileUnit &CU, unsigned Flags)
        : Type(WorklistItemType::LookForParentDIEsToKeep), CU(CU), Flags(Flags),
          AncestorIdx(AncestorIdx) {}
  };

  /// Verify the given DWARF file.
  void verifyInput(const DWARFFile &File);

  /// returns true if we need to translate strings.
  bool needToTranslateStrings() { return StringsTranslator != nullptr; }

  void reportWarning(const Twine &Warning, const DWARFFile &File,
                     const DWARFDie *DIE = nullptr) const {
    if (WarningHandler != nullptr)
      WarningHandler(Warning, File.FileName, DIE);
  }

  void reportError(const Twine &Warning, const DWARFFile &File,
                   const DWARFDie *DIE = nullptr) const {
    if (ErrorHandler != nullptr)
      ErrorHandler(Warning, File.FileName, DIE);
  }

  /// Emit warnings as Dwarf compile units to leave a trail after linking.
  bool emitPaperTrailWarnings(const DWARFFile &File,
                              OffsetsStringPool &StringPool);

  void copyInvariantDebugSection(DWARFContext &Dwarf);

  /// Keep information for referenced clang module: already loaded DWARF info
  /// of the clang module and a CompileUnit of the module.
  struct RefModuleUnit {
    RefModuleUnit(DWARFFile &File, std::unique_ptr<CompileUnit> Unit)
        : File(File), Unit(std::move(Unit)) {}
    RefModuleUnit(RefModuleUnit &&Other)
        : File(Other.File), Unit(std::move(Other.Unit)) {}
    RefModuleUnit(const RefModuleUnit &) = delete;

    DWARFFile &File;
    std::unique_ptr<CompileUnit> Unit;
  };
  using ModuleUnitListTy = std::vector<RefModuleUnit>;

  /// Keeps track of data associated with one object during linking.
  struct LinkContext {
    DWARFFile &File;
    UnitListTy CompileUnits;
    ModuleUnitListTy ModuleUnits;
    bool Skip = false;

    LinkContext(DWARFFile &File) : File(File) {}

    /// Clear part of the context that's no longer needed when we're done with
    /// the debug object.
    void clear() {
      CompileUnits.clear();
      File.Addresses->clear();
    }
  };

  /// Called before emitting object data
  void cleanupAuxiliarryData(LinkContext &Context);

  /// Look at the parent of the given DIE and decide whether they should be
  /// kept.
  void lookForParentDIEsToKeep(unsigned AncestorIdx, CompileUnit &CU,
                               unsigned Flags,
                               SmallVectorImpl<WorklistItem> &Worklist);

  /// Look at the children of the given DIE and decide whether they should be
  /// kept.
  void lookForChildDIEsToKeep(const DWARFDie &Die, CompileUnit &CU,
                              unsigned Flags,
                              SmallVectorImpl<WorklistItem> &Worklist);

  /// Look at DIEs referenced by the given DIE and decide whether they should be
  /// kept. All DIEs referenced though attributes should be kept.
  void lookForRefDIEsToKeep(const DWARFDie &Die, CompileUnit &CU,
                            unsigned Flags, const UnitListTy &Units,
                            const DWARFFile &File,
                            SmallVectorImpl<WorklistItem> &Worklist);

  /// Mark context corresponding to the specified \p Die as having canonical
  /// die, if applicable.
  void markODRCanonicalDie(const DWARFDie &Die, CompileUnit &CU);

  /// \defgroup FindRootDIEs Find DIEs corresponding to Address map entries.
  ///
  /// @{
  /// Recursively walk the \p DIE tree and look for DIEs to
  /// keep. Store that information in \p CU's DIEInfo.
  ///
  /// The return value indicates whether the DIE is incomplete.
  void lookForDIEsToKeep(AddressesMap &RelocMgr, const UnitListTy &Units,
                         const DWARFDie &DIE, const DWARFFile &File,
                         CompileUnit &CU, unsigned Flags);

  /// Check whether specified \p CUDie is a Clang module reference.
  /// if \p Quiet is false then display error messages.
  /// \return first == true if CUDie is a Clang module reference.
  ///         second == true if module is already loaded.
  std::pair<bool, bool> isClangModuleRef(const DWARFDie &CUDie,
                                         std::string &PCMFile,
                                         LinkContext &Context, unsigned Indent,
                                         bool Quiet);

  /// If this compile unit is really a skeleton CU that points to a
  /// clang module, register it in ClangModules and return true.
  ///
  /// A skeleton CU is a CU without children, a DW_AT_gnu_dwo_name
  /// pointing to the module, and a DW_AT_gnu_dwo_id with the module
  /// hash.
  bool registerModuleReference(const DWARFDie &CUDie, LinkContext &Context,
                               objFileLoader Loader,
                               CompileUnitHandler OnCUDieLoaded,
                               unsigned Indent = 0);

  /// Recursively add the debug info in this clang module .pcm
  /// file (and all the modules imported by it in a bottom-up fashion)
  /// to ModuleUnits.
  Error loadClangModule(objFileLoader Loader, const DWARFDie &CUDie,
                        const std::string &PCMFile, LinkContext &Context,
                        CompileUnitHandler OnCUDieLoaded, unsigned Indent = 0);

  /// Clone specified Clang module unit \p Unit.
  Error cloneModuleUnit(LinkContext &Context, RefModuleUnit &Unit,
                        DeclContextTree &ODRContexts,
                        OffsetsStringPool &DebugStrPool,
                        OffsetsStringPool &DebugLineStrPool,
                        unsigned Indent = 0);

  unsigned shouldKeepDIE(AddressesMap &RelocMgr, const DWARFDie &DIE,
                         const DWARFFile &File, CompileUnit &Unit,
                         CompileUnit::DIEInfo &MyInfo, unsigned Flags);

  /// This function checks whether variable has DWARF expression containing
  /// operation referencing live address(f.e. DW_OP_addr, DW_OP_addrx...).
  /// \returns first is true if the expression has an operation referencing an
  /// address.
  ///          second is the relocation adjustment value if the live address is
  ///          referenced.
  std::pair<bool, std::optional<int64_t>>
  getVariableRelocAdjustment(AddressesMap &RelocMgr, const DWARFDie &DIE);

  /// Check if a variable describing DIE should be kept.
  /// \returns updated TraversalFlags.
  unsigned shouldKeepVariableDIE(AddressesMap &RelocMgr, const DWARFDie &DIE,
                                 CompileUnit::DIEInfo &MyInfo, unsigned Flags);

  unsigned shouldKeepSubprogramDIE(AddressesMap &RelocMgr, const DWARFDie &DIE,
                                   const DWARFFile &File, CompileUnit &Unit,
                                   CompileUnit::DIEInfo &MyInfo,
                                   unsigned Flags);

  /// Resolve the DIE attribute reference that has been extracted in \p
  /// RefValue. The resulting DIE might be in another CompileUnit which is
  /// stored into \p ReferencedCU. \returns null if resolving fails for any
  /// reason.
  DWARFDie resolveDIEReference(const DWARFFile &File, const UnitListTy &Units,
                               const DWARFFormValue &RefValue,
                               const DWARFDie &DIE, CompileUnit *&RefCU);

  /// @}

  /// \defgroup Methods used to link the debug information
  ///
  /// @{

  struct DWARFLinkerOptions;

  class DIECloner {
    DWARFLinker &Linker;
    DwarfEmitter *Emitter;
    DWARFFile &ObjFile;
    OffsetsStringPool &DebugStrPool;
    OffsetsStringPool &DebugLineStrPool;
    DebugAddrPool AddrPool;

    /// Allocator used for all the DIEValue objects.
    BumpPtrAllocator &DIEAlloc;

    std::vector<std::unique_ptr<CompileUnit>> &CompileUnits;

    /// Keeps mapping from offset of the macro table to corresponding
    /// compile unit.
    Offset2UnitMap UnitMacroMap;

    bool Update;

  public:
    DIECloner(DWARFLinker &Linker, DwarfEmitter *Emitter, DWARFFile &ObjFile,
              BumpPtrAllocator &DIEAlloc,
              std::vector<std::unique_ptr<CompileUnit>> &CompileUnits,
              bool Update, OffsetsStringPool &DebugStrPool,
              OffsetsStringPool &DebugLineStrPool)
        : Linker(Linker), Emitter(Emitter), ObjFile(ObjFile),
          DebugStrPool(DebugStrPool), DebugLineStrPool(DebugLineStrPool),
          DIEAlloc(DIEAlloc), CompileUnits(CompileUnits), Update(Update) {}

    /// Recursively clone \p InputDIE into an tree of DIE objects
    /// where useless (as decided by lookForDIEsToKeep()) bits have been
    /// stripped out and addresses have been rewritten according to the
    /// address map.
    ///
    /// \param OutOffset is the offset the cloned DIE in the output
    /// compile unit.
    /// \param PCOffset (while cloning a function scope) is the offset
    /// applied to the entry point of the function to get the linked address.
    /// \param Die the output DIE to use, pass NULL to create one.
    /// \returns the root of the cloned tree or null if nothing was selected.
    DIE *cloneDIE(const DWARFDie &InputDIE, const DWARFFile &File,
                  CompileUnit &U, int64_t PCOffset, uint32_t OutOffset,
                  unsigned Flags, bool IsLittleEndian, DIE *Die = nullptr);

    /// Construct the output DIE tree by cloning the DIEs we
    /// chose to keep above. If there are no valid relocs, then there's
    /// nothing to clone/emit.
    uint64_t cloneAllCompileUnits(DWARFContext &DwarfContext,
                                  const DWARFFile &File, bool IsLittleEndian);

    /// Emit the .debug_addr section for the \p Unit.
    void emitDebugAddrSection(CompileUnit &Unit,
                              const uint16_t DwarfVersion) const;

    using ExpressionHandlerRef = function_ref<void(
        SmallVectorImpl<uint8_t> &, SmallVectorImpl<uint8_t> &,
        int64_t AddrRelocAdjustment)>;

    /// Compute and emit debug locations (.debug_loc, .debug_loclists)
    /// for \p Unit, patch the attributes referencing it.
    void generateUnitLocations(CompileUnit &Unit, const DWARFFile &File,
                               ExpressionHandlerRef ExprHandler);

  private:
    using AttributeSpec = DWARFAbbreviationDeclaration::AttributeSpec;

    /// Information gathered and exchanged between the various
    /// clone*Attributes helpers about the attributes of a particular DIE.
    struct AttributesInfo {
      /// Names.
      DwarfStringPoolEntryRef Name, MangledName, NameWithoutTemplate;

      /// Offsets in the string pool.
      uint32_t NameOffset = 0;
      uint32_t MangledNameOffset = 0;

      /// Offset to apply to PC addresses inside a function.
      int64_t PCOffset = 0;

      /// Does the DIE have a low_pc attribute?
      bool HasLowPc = false;

      /// Does the DIE have a ranges attribute?
      bool HasRanges = false;

      /// Is this DIE only a declaration?
      bool IsDeclaration = false;

      AttributesInfo() = default;
    };

    /// Helper for cloneDIE.
    unsigned cloneAttribute(DIE &Die, const DWARFDie &InputDIE,
                            const DWARFFile &File, CompileUnit &U,
                            const DWARFFormValue &Val,
                            const AttributeSpec AttrSpec, unsigned AttrSize,
                            AttributesInfo &AttrInfo, bool IsLittleEndian);

    /// Clone a string attribute described by \p AttrSpec and add
    /// it to \p Die.
    /// \returns the size of the new attribute.
    unsigned cloneStringAttribute(DIE &Die, AttributeSpec AttrSpec,
                                  const DWARFFormValue &Val, const DWARFUnit &U,
                                  AttributesInfo &Info);

    /// Clone an attribute referencing another DIE and add
    /// it to \p Die.
    /// \returns the size of the new attribute.
    unsigned cloneDieReferenceAttribute(DIE &Die, const DWARFDie &InputDIE,
                                        AttributeSpec AttrSpec,
                                        unsigned AttrSize,
                                        const DWARFFormValue &Val,
                                        const DWARFFile &File,
                                        CompileUnit &Unit);

    /// Clone a DWARF expression that may be referencing another DIE.
    void cloneExpression(DataExtractor &Data, DWARFExpression Expression,
                         const DWARFFile &File, CompileUnit &Unit,
                         SmallVectorImpl<uint8_t> &OutputBuffer,
                         int64_t AddrRelocAdjustment, bool IsLittleEndian);

    /// Clone an attribute referencing another DIE and add
    /// it to \p Die.
    /// \returns the size of the new attribute.
    unsigned cloneBlockAttribute(DIE &Die, const DWARFDie &InputDIE,
                                 const DWARFFile &File, CompileUnit &Unit,
                                 AttributeSpec AttrSpec,
                                 const DWARFFormValue &Val,
                                 bool IsLittleEndian);

    /// Clone an attribute referencing another DIE and add
    /// it to \p Die.
    /// \returns the size of the new attribute.
    unsigned cloneAddressAttribute(DIE &Die, const DWARFDie &InputDIE,
                                   AttributeSpec AttrSpec, unsigned AttrSize,
                                   const DWARFFormValue &Val,
                                   const CompileUnit &Unit,
                                   AttributesInfo &Info);

    /// Clone a scalar attribute  and add it to \p Die.
    /// \returns the size of the new attribute.
    unsigned cloneScalarAttribute(DIE &Die, const DWARFDie &InputDIE,
                                  const DWARFFile &File, CompileUnit &U,
                                  AttributeSpec AttrSpec,
                                  const DWARFFormValue &Val, unsigned AttrSize,
                                  AttributesInfo &Info);

    /// Get the potential name and mangled name for the entity
    /// described by \p Die and store them in \Info if they are not
    /// already there.
    /// \returns is a name was found.
    bool getDIENames(const DWARFDie &Die, AttributesInfo &Info,
                     OffsetsStringPool &StringPool, bool StripTemplate = false);

    uint32_t hashFullyQualifiedName(DWARFDie DIE, CompileUnit &U,
                                    const DWARFFile &File,
                                    int RecurseDepth = 0);

    /// Helper for cloneDIE.
    void addObjCAccelerator(CompileUnit &Unit, const DIE *Die,
                            DwarfStringPoolEntryRef Name,
                            OffsetsStringPool &StringPool, bool SkipPubSection);

    void rememberUnitForMacroOffset(CompileUnit &Unit);

    /// Clone and emit the line table for the specified \p Unit.
    /// Translate directories and file names if necessary.
    /// Relocate address ranges.
    void generateLineTableForUnit(CompileUnit &Unit);
  };

  /// Assign an abbreviation number to \p Abbrev
  void assignAbbrev(DIEAbbrev &Abbrev);

  /// Compute and emit debug ranges(.debug_aranges, .debug_ranges,
  /// .debug_rnglists) for \p Unit, patch the attributes referencing it.
  void generateUnitRanges(CompileUnit &Unit, const DWARFFile &File) const;

  /// Emit the accelerator entries for \p Unit.
  void emitAcceleratorEntriesForUnit(CompileUnit &Unit);

  /// Patch the frame info for an object file and emit it.
  void patchFrameInfoForObject(LinkContext &Context);

  /// FoldingSet that uniques the abbreviations.
  FoldingSet<DIEAbbrev> AbbreviationsSet;

  /// Storage for the unique Abbreviations.
  /// This is passed to AsmPrinter::emitDwarfAbbrevs(), thus it cannot be
  /// changed to a vector of unique_ptrs.
  std::vector<std::unique_ptr<DIEAbbrev>> Abbreviations;

  /// DIELoc objects that need to be destructed (but not freed!).
  std::vector<DIELoc *> DIELocs;

  /// DIEBlock objects that need to be destructed (but not freed!).
  std::vector<DIEBlock *> DIEBlocks;

  /// Allocator used for all the DIEValue objects.
  BumpPtrAllocator DIEAlloc;
  /// @}

  std::unique_ptr<DwarfStreamer> TheDwarfEmitter;
  std::vector<LinkContext> ObjectContexts;

  /// The CIEs that have been emitted in the output section. The actual CIE
  /// data serves a the key to this StringMap, this takes care of comparing the
  /// semantics of CIEs defined in different object files.
  StringMap<uint32_t> EmittedCIEs;

  /// Offset of the last CIE that has been emitted in the output
  /// .debug_frame section.
  uint32_t LastCIEOffset = 0;

  /// Apple accelerator tables.
  AccelTable<DWARF5AccelTableStaticData> DebugNames;
  AccelTable<AppleAccelTableStaticOffsetData> AppleNames;
  AccelTable<AppleAccelTableStaticOffsetData> AppleNamespaces;
  AccelTable<AppleAccelTableStaticOffsetData> AppleObjc;
  AccelTable<AppleAccelTableStaticTypeData> AppleTypes;

  /// Mapping the PCM filename to the DwoId.
  StringMap<uint64_t> ClangModules;

  DwarfLinkerClient DwarfLinkerClientID;

  std::function<StringRef(StringRef)> StringsTranslator = nullptr;

  /// A unique ID that identifies each compile unit.
  unsigned UniqueUnitID = 0;

  // error handler
  messageHandler ErrorHandler = nullptr;

  // warning handler
  messageHandler WarningHandler = nullptr;

  /// linking options
  struct DWARFLinkerOptions {
    /// DWARF version for the output.
    uint16_t TargetDWARFVersion = 0;

    /// Generate processing log to the standard output.
    bool Verbose = false;

    /// Print statistics.
    bool Statistics = false;

    /// Verify the input DWARF.
    bool VerifyInputDWARF = false;

    /// Do not unique types according to ODR
    bool NoODR = false;

    /// Update
    bool Update = false;

    /// Whether we want a static variable to force us to keep its enclosing
    /// function.
    bool KeepFunctionForStatic = false;

    /// Number of threads.
    unsigned Threads = 1;

    /// The accelerator table kinds
    SmallVector<AccelTableKind, 1> AccelTables;

    /// Prepend path for the clang modules.
    std::string PrependPath;

    // input verification handler
    inputVerificationHandler InputVerificationHandler = nullptr;

    /// A list of all .swiftinterface files referenced by the debug
    /// info, mapping Module name to path on disk. The entries need to
    /// be uniqued and sorted and there are only few entries expected
    /// per compile unit, which is why this is a std::map.
    /// this is dsymutil specific fag.
    swiftInterfacesMap *ParseableSwiftInterfaces = nullptr;

    /// A list of remappings to apply to file paths.
    objectPrefixMap *ObjectPrefixMap = nullptr;
  } Options;
};

} // end namespace llvm

#endif // LLVM_DWARFLINKER_DWARFLINKER_H
PKiwFZ��H-H-$DWARFLinker/DWARFLinkerCompileUnit.hnu�[���//===- DWARFLinkerCompileUnit.h ---------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DWARFLINKER_DWARFLINKERCOMPILEUNIT_H
#define LLVM_DWARFLINKER_DWARFLINKERCOMPILEUNIT_H

#include "llvm/ADT/AddressRanges.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/CodeGen/DIE.h"
#include "llvm/DebugInfo/DWARF/DWARFUnit.h"
#include <optional>

namespace llvm {

class DeclContext;

/// Mapped value in the address map is the offset to apply to the
/// linked address.
using RangesTy = AddressRangesMap;

// This structure keeps patch for the attribute and, optionally,
// the value of relocation which should be applied. Currently,
// only location attribute needs to have relocation: either to the
// function ranges if location attribute is of type 'loclist',
// either to the operand of DW_OP_addr/DW_OP_addrx if location attribute
// is of type 'exprloc'.
// ASSUMPTION: Location attributes of 'loclist' type containing 'exprloc'
//             with address expression operands are not supported yet.
struct PatchLocation {
  DIE::value_iterator I;
  int64_t RelocAdjustment = 0;

  PatchLocation() = default;
  PatchLocation(DIE::value_iterator I) : I(I) {}
  PatchLocation(DIE::value_iterator I, int64_t Reloc)
      : I(I), RelocAdjustment(Reloc) {}

  void set(uint64_t New) const {
    assert(I);
    const auto &Old = *I;
    assert(Old.getType() == DIEValue::isInteger);
    *I = DIEValue(Old.getAttribute(), Old.getForm(), DIEInteger(New));
  }

  uint64_t get() const {
    assert(I);
    return I->getDIEInteger().getValue();
  }
};

using RngListAttributesTy = SmallVector<PatchLocation>;
using LocListAttributesTy = SmallVector<PatchLocation>;

/// Stores all information relating to a compile unit, be it in its original
/// instance in the object file to its brand new cloned and generated DIE tree.
class CompileUnit {
public:
  /// Information gathered about a DIE in the object file.
  struct DIEInfo {
    /// Address offset to apply to the described entity.
    int64_t AddrAdjust;

    /// ODR Declaration context.
    DeclContext *Ctxt;

    /// Cloned version of that DIE.
    DIE *Clone;

    /// The index of this DIE's parent.
    uint32_t ParentIdx;

    /// Is the DIE part of the linked output?
    bool Keep : 1;

    /// Was this DIE's entity found in the map?
    bool InDebugMap : 1;

    /// Is this a pure forward declaration we can strip?
    bool Prune : 1;

    /// Does DIE transitively refer an incomplete decl?
    bool Incomplete : 1;

    /// Is DIE in the clang module scope?
    bool InModuleScope : 1;

    /// Is ODR marking done?
    bool ODRMarkingDone : 1;

    /// Is this a reference to a DIE that hasn't been cloned yet?
    bool UnclonedReference : 1;

    /// Is this a variable with a location attribute referencing address?
    bool HasLocationExpressionAddr : 1;

#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
    LLVM_DUMP_METHOD void dump();
#endif // if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
  };

  CompileUnit(DWARFUnit &OrigUnit, unsigned ID, bool CanUseODR,
              StringRef ClangModuleName)
      : OrigUnit(OrigUnit), ID(ID), ClangModuleName(ClangModuleName) {
    Info.resize(OrigUnit.getNumDIEs());

    auto CUDie = OrigUnit.getUnitDIE(false);
    if (!CUDie) {
      HasODR = false;
      return;
    }
    if (auto Lang = dwarf::toUnsigned(CUDie.find(dwarf::DW_AT_language)))
      HasODR = CanUseODR && (*Lang == dwarf::DW_LANG_C_plus_plus ||
                             *Lang == dwarf::DW_LANG_C_plus_plus_03 ||
                             *Lang == dwarf::DW_LANG_C_plus_plus_11 ||
                             *Lang == dwarf::DW_LANG_C_plus_plus_14 ||
                             *Lang == dwarf::DW_LANG_ObjC_plus_plus);
    else
      HasODR = false;
  }

  DWARFUnit &getOrigUnit() const { return OrigUnit; }

  unsigned getUniqueID() const { return ID; }

  void createOutputDIE() { NewUnit.emplace(OrigUnit.getUnitDIE().getTag()); }

  DIE *getOutputUnitDIE() const {
    if (NewUnit)
      return &const_cast<BasicDIEUnit &>(*NewUnit).getUnitDie();
    return nullptr;
  }

  bool hasODR() const { return HasODR; }
  bool isClangModule() const { return !ClangModuleName.empty(); }
  uint16_t getLanguage();
  /// Return the DW_AT_LLVM_sysroot of the compile unit or an empty StringRef.
  StringRef getSysRoot();

  const std::string &getClangModuleName() const { return ClangModuleName; }

  DIEInfo &getInfo(unsigned Idx) { return Info[Idx]; }
  const DIEInfo &getInfo(unsigned Idx) const { return Info[Idx]; }

  DIEInfo &getInfo(const DWARFDie &Die) {
    unsigned Idx = getOrigUnit().getDIEIndex(Die);
    return Info[Idx];
  }

  uint64_t getStartOffset() const { return StartOffset; }
  uint64_t getNextUnitOffset() const { return NextUnitOffset; }
  void setStartOffset(uint64_t DebugInfoSize) { StartOffset = DebugInfoSize; }

  std::optional<uint64_t> getLowPc() const { return LowPc; }
  uint64_t getHighPc() const { return HighPc; }
  bool hasLabelAt(uint64_t Addr) const { return Labels.count(Addr); }

  const RangesTy &getFunctionRanges() const { return Ranges; }

  const RngListAttributesTy &getRangesAttributes() { return RangeAttributes; }

  std::optional<PatchLocation> getUnitRangesAttribute() const {
    return UnitRangeAttribute;
  }

  const LocListAttributesTy &getLocationAttributes() const {
    return LocationAttributes;
  }

  /// Mark every DIE in this unit as kept. This function also
  /// marks variables as InDebugMap so that they appear in the
  /// reconstructed accelerator tables.
  void markEverythingAsKept();

  /// Compute the end offset for this unit. Must be called after the CU's DIEs
  /// have been cloned.  \returns the next unit offset (which is also the
  /// current debug_info section size).
  uint64_t computeNextUnitOffset(uint16_t DwarfVersion);

  /// Keep track of a forward reference to DIE \p Die in \p RefUnit by \p
  /// Attr. The attribute should be fixed up later to point to the absolute
  /// offset of \p Die in the debug_info section or to the canonical offset of
  /// \p Ctxt if it is non-null.
  void noteForwardReference(DIE *Die, const CompileUnit *RefUnit,
                            DeclContext *Ctxt, PatchLocation Attr);

  /// Apply all fixups recorded by noteForwardReference().
  void fixupForwardReferences();

  /// Add the low_pc of a label that is relocated by applying
  /// offset \p PCOffset.
  void addLabelLowPc(uint64_t LabelLowPc, int64_t PcOffset);

  /// Add a function range [\p LowPC, \p HighPC) that is relocated by applying
  /// offset \p PCOffset.
  void addFunctionRange(uint64_t LowPC, uint64_t HighPC, int64_t PCOffset);

  /// Keep track of a DW_AT_range attribute that we will need to patch up later.
  void noteRangeAttribute(const DIE &Die, PatchLocation Attr);

  /// Keep track of a location attribute pointing to a location list in the
  /// debug_loc section.
  void noteLocationAttribute(PatchLocation Attr);

  /// Add a name accelerator entry for \a Die with \a Name.
  void addNamespaceAccelerator(const DIE *Die, DwarfStringPoolEntryRef Name);

  /// Add a name accelerator entry for \a Die with \a Name.
  void addNameAccelerator(const DIE *Die, DwarfStringPoolEntryRef Name,
                          bool SkipPubnamesSection = false);

  /// Add various accelerator entries for \p Die with \p Name which is stored
  /// in the string table at \p Offset. \p Name must be an Objective-C
  /// selector.
  void addObjCAccelerator(const DIE *Die, DwarfStringPoolEntryRef Name,
                          bool SkipPubnamesSection = false);

  /// Add a type accelerator entry for \p Die with \p Name which is stored in
  /// the string table at \p Offset.
  void addTypeAccelerator(const DIE *Die, DwarfStringPoolEntryRef Name,
                          bool ObjcClassImplementation,
                          uint32_t QualifiedNameHash);

  struct AccelInfo {
    /// Name of the entry.
    DwarfStringPoolEntryRef Name;

    /// DIE this entry describes.
    const DIE *Die;

    /// Hash of the fully qualified name.
    uint32_t QualifiedNameHash;

    /// Emit this entry only in the apple_* sections.
    bool SkipPubSection;

    /// Is this an ObjC class implementation?
    bool ObjcClassImplementation;

    AccelInfo(DwarfStringPoolEntryRef Name, const DIE *Die,
              bool SkipPubSection = false)
        : Name(Name), Die(Die), SkipPubSection(SkipPubSection) {}

    AccelInfo(DwarfStringPoolEntryRef Name, const DIE *Die,
              uint32_t QualifiedNameHash, bool ObjCClassIsImplementation)
        : Name(Name), Die(Die), QualifiedNameHash(QualifiedNameHash),
          SkipPubSection(false),
          ObjcClassImplementation(ObjCClassIsImplementation) {}
  };

  const std::vector<AccelInfo> &getPubnames() const { return Pubnames; }
  const std::vector<AccelInfo> &getPubtypes() const { return Pubtypes; }
  const std::vector<AccelInfo> &getNamespaces() const { return Namespaces; }
  const std::vector<AccelInfo> &getObjC() const { return ObjC; }

  MCSymbol *getLabelBegin() { return LabelBegin; }
  void setLabelBegin(MCSymbol *S) { LabelBegin = S; }

private:
  DWARFUnit &OrigUnit;
  unsigned ID;
  std::vector<DIEInfo> Info; ///< DIE info indexed by DIE index.
  std::optional<BasicDIEUnit> NewUnit;
  MCSymbol *LabelBegin = nullptr;

  uint64_t StartOffset;
  uint64_t NextUnitOffset;

  std::optional<uint64_t> LowPc;
  uint64_t HighPc = 0;

  /// A list of attributes to fixup with the absolute offset of
  /// a DIE in the debug_info section.
  ///
  /// The offsets for the attributes in this array couldn't be set while
  /// cloning because for cross-cu forward references the target DIE's offset
  /// isn't known you emit the reference attribute.
  std::vector<
      std::tuple<DIE *, const CompileUnit *, DeclContext *, PatchLocation>>
      ForwardDIEReferences;

  /// The ranges in that map are the PC ranges for functions in this unit,
  /// associated with the PC offset to apply to the addresses to get
  /// the linked address.
  RangesTy Ranges;

  /// The DW_AT_low_pc of each DW_TAG_label.
  SmallDenseMap<uint64_t, uint64_t, 1> Labels;

  /// 'rnglist'(DW_AT_ranges, DW_AT_start_scope) attributes to patch after
  /// we have gathered all the unit's function addresses.
  /// @{
  RngListAttributesTy RangeAttributes;
  std::optional<PatchLocation> UnitRangeAttribute;
  /// @}

  /// Location attributes that need to be transferred from the
  /// original debug_loc section to the linked one. They are stored
  /// along with the PC offset that is to be applied to their
  /// function's address or to be applied to address operands of
  /// location expression.
  LocListAttributesTy LocationAttributes;

  /// Accelerator entries for the unit, both for the pub*
  /// sections and the apple* ones.
  /// @{
  std::vector<AccelInfo> Pubnames;
  std::vector<AccelInfo> Pubtypes;
  std::vector<AccelInfo> Namespaces;
  std::vector<AccelInfo> ObjC;
  /// @}

  /// Is this unit subject to the ODR rule?
  bool HasODR;

  /// The DW_AT_language of this unit.
  uint16_t Language = 0;

  /// The DW_AT_LLVM_sysroot of this unit.
  std::string SysRoot;

  /// If this is a Clang module, this holds the module's name.
  std::string ClangModuleName;
};

} // end namespace llvm

#endif // LLVM_DWARFLINKER_DWARFLINKERCOMPILEUNIT_H
PKiwFZ��������TargetParser/Triple.hnu�[���//===-- llvm/TargetParser/Triple.h - Target triple helper class--*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TARGETPARSER_TRIPLE_H
#define LLVM_TARGETPARSER_TRIPLE_H

#include "llvm/ADT/Twine.h"
#include "llvm/Support/VersionTuple.h"

// Some system headers or GCC predefined macros conflict with identifiers in
// this file.  Undefine them here.
#undef NetBSD
#undef mips
#undef sparc

namespace llvm {

/// Triple - Helper class for working with autoconf configuration names. For
/// historical reasons, we also call these 'triples' (they used to contain
/// exactly three fields).
///
/// Configuration names are strings in the canonical form:
///   ARCHITECTURE-VENDOR-OPERATING_SYSTEM
/// or
///   ARCHITECTURE-VENDOR-OPERATING_SYSTEM-ENVIRONMENT
///
/// This class is used for clients which want to support arbitrary
/// configuration names, but also want to implement certain special
/// behavior for particular configurations. This class isolates the mapping
/// from the components of the configuration name to well known IDs.
///
/// At its core the Triple class is designed to be a wrapper for a triple
/// string; the constructor does not change or normalize the triple string.
/// Clients that need to handle the non-canonical triples that users often
/// specify should use the normalize method.
///
/// See autoconf/config.guess for a glimpse into what configuration names
/// look like in practice.
class Triple {
public:
  enum ArchType {
    UnknownArch,

    arm,            // ARM (little endian): arm, armv.*, xscale
    armeb,          // ARM (big endian): armeb
    aarch64,        // AArch64 (little endian): aarch64
    aarch64_be,     // AArch64 (big endian): aarch64_be
    aarch64_32,     // AArch64 (little endian) ILP32: aarch64_32
    arc,            // ARC: Synopsys ARC
    avr,            // AVR: Atmel AVR microcontroller
    bpfel,          // eBPF or extended BPF or 64-bit BPF (little endian)
    bpfeb,          // eBPF or extended BPF or 64-bit BPF (big endian)
    csky,           // CSKY: csky
    dxil,           // DXIL 32-bit DirectX bytecode
    hexagon,        // Hexagon: hexagon
    loongarch32,    // LoongArch (32-bit): loongarch32
    loongarch64,    // LoongArch (64-bit): loongarch64
    m68k,           // M68k: Motorola 680x0 family
    mips,           // MIPS: mips, mipsallegrex, mipsr6
    mipsel,         // MIPSEL: mipsel, mipsallegrexe, mipsr6el
    mips64,         // MIPS64: mips64, mips64r6, mipsn32, mipsn32r6
    mips64el,       // MIPS64EL: mips64el, mips64r6el, mipsn32el, mipsn32r6el
    msp430,         // MSP430: msp430
    ppc,            // PPC: powerpc
    ppcle,          // PPCLE: powerpc (little endian)
    ppc64,          // PPC64: powerpc64, ppu
    ppc64le,        // PPC64LE: powerpc64le
    r600,           // R600: AMD GPUs HD2XXX - HD6XXX
    amdgcn,         // AMDGCN: AMD GCN GPUs
    riscv32,        // RISC-V (32-bit): riscv32
    riscv64,        // RISC-V (64-bit): riscv64
    sparc,          // Sparc: sparc
    sparcv9,        // Sparcv9: Sparcv9
    sparcel,        // Sparc: (endianness = little). NB: 'Sparcle' is a CPU variant
    systemz,        // SystemZ: s390x
    tce,            // TCE (http://tce.cs.tut.fi/): tce
    tcele,          // TCE little endian (http://tce.cs.tut.fi/): tcele
    thumb,          // Thumb (little endian): thumb, thumbv.*
    thumbeb,        // Thumb (big endian): thumbeb
    x86,            // X86: i[3-9]86
    x86_64,         // X86-64: amd64, x86_64
    xcore,          // XCore: xcore
    xtensa,         // Tensilica: Xtensa
    nvptx,          // NVPTX: 32-bit
    nvptx64,        // NVPTX: 64-bit
    le32,           // le32: generic little-endian 32-bit CPU (PNaCl)
    le64,           // le64: generic little-endian 64-bit CPU (PNaCl)
    amdil,          // AMDIL
    amdil64,        // AMDIL with 64-bit pointers
    hsail,          // AMD HSAIL
    hsail64,        // AMD HSAIL with 64-bit pointers
    spir,           // SPIR: standard portable IR for OpenCL 32-bit version
    spir64,         // SPIR: standard portable IR for OpenCL 64-bit version
    spirv32,        // SPIR-V with 32-bit pointers
    spirv64,        // SPIR-V with 64-bit pointers
    kalimba,        // Kalimba: generic kalimba
    shave,          // SHAVE: Movidius vector VLIW processors
    lanai,          // Lanai: Lanai 32-bit
    wasm32,         // WebAssembly with 32-bit pointers
    wasm64,         // WebAssembly with 64-bit pointers
    renderscript32, // 32-bit RenderScript
    renderscript64, // 64-bit RenderScript
    ve,             // NEC SX-Aurora Vector Engine
    LastArchType = ve
  };
  enum SubArchType {
    NoSubArch,

    ARMSubArch_v9_4a,
    ARMSubArch_v9_3a,
    ARMSubArch_v9_2a,
    ARMSubArch_v9_1a,
    ARMSubArch_v9,
    ARMSubArch_v8_9a,
    ARMSubArch_v8_8a,
    ARMSubArch_v8_7a,
    ARMSubArch_v8_6a,
    ARMSubArch_v8_5a,
    ARMSubArch_v8_4a,
    ARMSubArch_v8_3a,
    ARMSubArch_v8_2a,
    ARMSubArch_v8_1a,
    ARMSubArch_v8,
    ARMSubArch_v8r,
    ARMSubArch_v8m_baseline,
    ARMSubArch_v8m_mainline,
    ARMSubArch_v8_1m_mainline,
    ARMSubArch_v7,
    ARMSubArch_v7em,
    ARMSubArch_v7m,
    ARMSubArch_v7s,
    ARMSubArch_v7k,
    ARMSubArch_v7ve,
    ARMSubArch_v6,
    ARMSubArch_v6m,
    ARMSubArch_v6k,
    ARMSubArch_v6t2,
    ARMSubArch_v5,
    ARMSubArch_v5te,
    ARMSubArch_v4t,

    AArch64SubArch_arm64e,
    AArch64SubArch_arm64ec,

    KalimbaSubArch_v3,
    KalimbaSubArch_v4,
    KalimbaSubArch_v5,

    MipsSubArch_r6,

    PPCSubArch_spe,

    // SPIR-V sub-arch corresponds to its version.
    SPIRVSubArch_v10,
    SPIRVSubArch_v11,
    SPIRVSubArch_v12,
    SPIRVSubArch_v13,
    SPIRVSubArch_v14,
    SPIRVSubArch_v15,
  };
  enum VendorType {
    UnknownVendor,

    Apple,
    PC,
    SCEI,
    Freescale,
    IBM,
    ImaginationTechnologies,
    MipsTechnologies,
    NVIDIA,
    CSR,
    Myriad,
    AMD,
    Mesa,
    SUSE,
    OpenEmbedded,
    LastVendorType = OpenEmbedded
  };
  enum OSType {
    UnknownOS,

    Ananas,
    CloudABI,
    Darwin,
    DragonFly,
    FreeBSD,
    Fuchsia,
    IOS,
    KFreeBSD,
    Linux,
    Lv2,        // PS3
    MacOSX,
    NetBSD,
    OpenBSD,
    Solaris,
    UEFI,
    Win32,
    ZOS,
    Haiku,
    Minix,
    RTEMS,
    NaCl,       // Native Client
    AIX,
    CUDA,       // NVIDIA CUDA
    NVCL,       // NVIDIA OpenCL
    AMDHSA,     // AMD HSA Runtime
    PS4,
    PS5,
    ELFIAMCU,
    TvOS,       // Apple tvOS
    WatchOS,    // Apple watchOS
    DriverKit,  // Apple DriverKit
    Mesa3D,
    Contiki,
    AMDPAL,     // AMD PAL Runtime
    HermitCore, // HermitCore Unikernel/Multikernel
    Hurd,       // GNU/Hurd
    WASI,       // Experimental WebAssembly OS
    Emscripten,
    ShaderModel, // DirectX ShaderModel
    LiteOS,
    LastOSType = LiteOS
  };
  enum EnvironmentType {
    UnknownEnvironment,

    GNU,
    GNUABIN32,
    GNUABI64,
    GNUEABI,
    GNUEABIHF,
    GNUF32,
    GNUF64,
    GNUSF,
    GNUX32,
    GNUILP32,
    CODE16,
    EABI,
    EABIHF,
    Android,
    Musl,
    MuslEABI,
    MuslEABIHF,
    MuslX32,

    MSVC,
    Itanium,
    Cygnus,
    CoreCLR,
    Simulator, // Simulator variants of other systems, e.g., Apple's iOS
    MacABI, // Mac Catalyst variant of Apple's iOS deployment target.

    // Shader Stages
    // The order of these values matters, and must be kept in sync with the
    // language options enum in Clang. The ordering is enforced in
    // static_asserts in Triple.cpp and in Clang.
    Pixel,
    Vertex,
    Geometry,
    Hull,
    Domain,
    Compute,
    Library,
    RayGeneration,
    Intersection,
    AnyHit,
    ClosestHit,
    Miss,
    Callable,
    Mesh,
    Amplification,
    OpenHOS,
    LastEnvironmentType = OpenHOS
  };
  enum ObjectFormatType {
    UnknownObjectFormat,

    COFF,
    DXContainer,
    ELF,
    GOFF,
    MachO,
    SPIRV,
    Wasm,
    XCOFF,
  };

private:
  std::string Data;

  /// The parsed arch type.
  ArchType Arch{};

  /// The parsed subarchitecture type.
  SubArchType SubArch{};

  /// The parsed vendor type.
  VendorType Vendor{};

  /// The parsed OS type.
  OSType OS{};

  /// The parsed Environment type.
  EnvironmentType Environment{};

  /// The object format type.
  ObjectFormatType ObjectFormat{};

public:
  /// @name Constructors
  /// @{

  /// Default constructor is the same as an empty string and leaves all
  /// triple fields unknown.
  Triple() = default;

  explicit Triple(const Twine &Str);
  Triple(const Twine &ArchStr, const Twine &VendorStr, const Twine &OSStr);
  Triple(const Twine &ArchStr, const Twine &VendorStr, const Twine &OSStr,
         const Twine &EnvironmentStr);

  bool operator==(const Triple &Other) const {
    return Arch == Other.Arch && SubArch == Other.SubArch &&
           Vendor == Other.Vendor && OS == Other.OS &&
           Environment == Other.Environment &&
           ObjectFormat == Other.ObjectFormat;
  }

  bool operator!=(const Triple &Other) const {
    return !(*this == Other);
  }

  /// @}
  /// @name Normalization
  /// @{

  /// Turn an arbitrary machine specification into the canonical triple form (or
  /// something sensible that the Triple class understands if nothing better can
  /// reasonably be done).  In particular, it handles the common case in which
  /// otherwise valid components are in the wrong order.
  static std::string normalize(StringRef Str);

  /// Return the normalized form of this triple's string.
  std::string normalize() const { return normalize(Data); }

  /// @}
  /// @name Typed Component Access
  /// @{

  /// Get the parsed architecture type of this triple.
  ArchType getArch() const { return Arch; }

  /// get the parsed subarchitecture type for this triple.
  SubArchType getSubArch() const { return SubArch; }

  /// Get the parsed vendor type of this triple.
  VendorType getVendor() const { return Vendor; }

  /// Get the parsed operating system type of this triple.
  OSType getOS() const { return OS; }

  /// Does this triple have the optional environment (fourth) component?
  bool hasEnvironment() const {
    return getEnvironmentName() != "";
  }

  /// Get the parsed environment type of this triple.
  EnvironmentType getEnvironment() const { return Environment; }

  /// Parse the version number from the OS name component of the
  /// triple, if present.
  ///
  /// For example, "fooos1.2.3" would return (1, 2, 3).
  VersionTuple getEnvironmentVersion() const;

  /// Get the object format for this triple.
  ObjectFormatType getObjectFormat() const { return ObjectFormat; }

  /// Parse the version number from the OS name component of the triple, if
  /// present.
  ///
  /// For example, "fooos1.2.3" would return (1, 2, 3).
  VersionTuple getOSVersion() const;

  /// Return just the major version number, this is specialized because it is a
  /// common query.
  unsigned getOSMajorVersion() const { return getOSVersion().getMajor(); }

  /// Parse the version number as with getOSVersion and then translate generic
  /// "darwin" versions to the corresponding OS X versions.  This may also be
  /// called with IOS triples but the OS X version number is just set to a
  /// constant 10.4.0 in that case.  Returns true if successful.
  bool getMacOSXVersion(VersionTuple &Version) const;

  /// Parse the version number as with getOSVersion.  This should only be called
  /// with IOS or generic triples.
  VersionTuple getiOSVersion() const;

  /// Parse the version number as with getOSVersion.  This should only be called
  /// with WatchOS or generic triples.
  VersionTuple getWatchOSVersion() const;

  /// Parse the version number as with getOSVersion.
  VersionTuple getDriverKitVersion() const;

  /// @}
  /// @name Direct Component Access
  /// @{

  const std::string &str() const { return Data; }

  const std::string &getTriple() const { return Data; }

  /// Get the architecture (first) component of the triple.
  StringRef getArchName() const;

  /// Get the architecture name based on Kind and SubArch.
  StringRef getArchName(ArchType Kind, SubArchType SubArch = NoSubArch) const;

  /// Get the vendor (second) component of the triple.
  StringRef getVendorName() const;

  /// Get the operating system (third) component of the triple.
  StringRef getOSName() const;

  /// Get the optional environment (fourth) component of the triple, or "" if
  /// empty.
  StringRef getEnvironmentName() const;

  /// Get the operating system and optional environment components as a single
  /// string (separated by a '-' if the environment component is present).
  StringRef getOSAndEnvironmentName() const;

  /// @}
  /// @name Convenience Predicates
  /// @{

  /// Test whether the architecture is 64-bit
  ///
  /// Note that this tests for 64-bit pointer width, and nothing else. Note
  /// that we intentionally expose only three predicates, 64-bit, 32-bit, and
  /// 16-bit. The inner details of pointer width for particular architectures
  /// is not summed up in the triple, and so only a coarse grained predicate
  /// system is provided.
  bool isArch64Bit() const;

  /// Test whether the architecture is 32-bit
  ///
  /// Note that this tests for 32-bit pointer width, and nothing else.
  bool isArch32Bit() const;

  /// Test whether the architecture is 16-bit
  ///
  /// Note that this tests for 16-bit pointer width, and nothing else.
  bool isArch16Bit() const;

  /// Helper function for doing comparisons against version numbers included in
  /// the target triple.
  bool isOSVersionLT(unsigned Major, unsigned Minor = 0,
                     unsigned Micro = 0) const {
    if (Minor == 0) {
      return getOSVersion() < VersionTuple(Major);
    }
    if (Micro == 0) {
      return getOSVersion() < VersionTuple(Major, Minor);
    }
    return getOSVersion() < VersionTuple(Major, Minor, Micro);
  }

  bool isOSVersionLT(const Triple &Other) const {
    return getOSVersion() < Other.getOSVersion();
  }

  /// Comparison function for checking OS X version compatibility, which handles
  /// supporting skewed version numbering schemes used by the "darwin" triples.
  bool isMacOSXVersionLT(unsigned Major, unsigned Minor = 0,
                         unsigned Micro = 0) const;

  /// Is this a Mac OS X triple. For legacy reasons, we support both "darwin"
  /// and "osx" as OS X triples.
  bool isMacOSX() const {
    return getOS() == Triple::Darwin || getOS() == Triple::MacOSX;
  }

  /// Is this an iOS triple.
  /// Note: This identifies tvOS as a variant of iOS. If that ever
  /// changes, i.e., if the two operating systems diverge or their version
  /// numbers get out of sync, that will need to be changed.
  /// watchOS has completely different version numbers so it is not included.
  bool isiOS() const {
    return getOS() == Triple::IOS || isTvOS();
  }

  /// Is this an Apple tvOS triple.
  bool isTvOS() const {
    return getOS() == Triple::TvOS;
  }

  /// Is this an Apple watchOS triple.
  bool isWatchOS() const {
    return getOS() == Triple::WatchOS;
  }

  bool isWatchABI() const {
    return getSubArch() == Triple::ARMSubArch_v7k;
  }

  /// Is this an Apple DriverKit triple.
  bool isDriverKit() const { return getOS() == Triple::DriverKit; }

  bool isOSzOS() const { return getOS() == Triple::ZOS; }

  /// Is this a "Darwin" OS (macOS, iOS, tvOS, watchOS, or DriverKit).
  bool isOSDarwin() const {
    return isMacOSX() || isiOS() || isWatchOS() || isDriverKit();
  }

  bool isSimulatorEnvironment() const {
    return getEnvironment() == Triple::Simulator;
  }

  bool isMacCatalystEnvironment() const {
    return getEnvironment() == Triple::MacABI;
  }

  /// Returns true for targets that run on a macOS machine.
  bool isTargetMachineMac() const {
    return isMacOSX() || (isOSDarwin() && (isSimulatorEnvironment() ||
                                           isMacCatalystEnvironment()));
  }

  bool isOSNetBSD() const {
    return getOS() == Triple::NetBSD;
  }

  bool isOSOpenBSD() const {
    return getOS() == Triple::OpenBSD;
  }

  bool isOSFreeBSD() const {
    return getOS() == Triple::FreeBSD;
  }

  bool isOSFuchsia() const {
    return getOS() == Triple::Fuchsia;
  }

  bool isOSDragonFly() const { return getOS() == Triple::DragonFly; }

  bool isOSSolaris() const {
    return getOS() == Triple::Solaris;
  }

  bool isOSIAMCU() const {
    return getOS() == Triple::ELFIAMCU;
  }

  bool isOSUnknown() const { return getOS() == Triple::UnknownOS; }

  bool isGNUEnvironment() const {
    EnvironmentType Env = getEnvironment();
    return Env == Triple::GNU || Env == Triple::GNUABIN32 ||
           Env == Triple::GNUABI64 || Env == Triple::GNUEABI ||
           Env == Triple::GNUEABIHF || Env == Triple::GNUF32 ||
           Env == Triple::GNUF64 || Env == Triple::GNUSF ||
           Env == Triple::GNUX32;
  }

  bool isOSContiki() const {
    return getOS() == Triple::Contiki;
  }

  /// Tests whether the OS is Haiku.
  bool isOSHaiku() const {
    return getOS() == Triple::Haiku;
  }

  /// Tests whether the OS is UEFI.
  bool isUEFI() const {
    return getOS() == Triple::UEFI;
  }

  /// Tests whether the OS is Windows.
  bool isOSWindows() const {
    return getOS() == Triple::Win32;
  }

  /// Checks if the environment is MSVC.
  bool isKnownWindowsMSVCEnvironment() const {
    return isOSWindows() && getEnvironment() == Triple::MSVC;
  }

  /// Checks if the environment could be MSVC.
  bool isWindowsMSVCEnvironment() const {
    return isKnownWindowsMSVCEnvironment() ||
           (isOSWindows() && getEnvironment() == Triple::UnknownEnvironment);
  }

  // Checks if we're using the Windows Arm64EC ABI.
  bool isWindowsArm64EC() const {
    return getArch() == Triple::aarch64 &&
           getSubArch() == Triple::AArch64SubArch_arm64ec;
  }

  bool isWindowsCoreCLREnvironment() const {
    return isOSWindows() && getEnvironment() == Triple::CoreCLR;
  }

  bool isWindowsItaniumEnvironment() const {
    return isOSWindows() && getEnvironment() == Triple::Itanium;
  }

  bool isWindowsCygwinEnvironment() const {
    return isOSWindows() && getEnvironment() == Triple::Cygnus;
  }

  bool isWindowsGNUEnvironment() const {
    return isOSWindows() && getEnvironment() == Triple::GNU;
  }

  /// Tests for either Cygwin or MinGW OS
  bool isOSCygMing() const {
    return isWindowsCygwinEnvironment() || isWindowsGNUEnvironment();
  }

  /// Is this a "Windows" OS targeting a "MSVCRT.dll" environment.
  bool isOSMSVCRT() const {
    return isWindowsMSVCEnvironment() || isWindowsGNUEnvironment() ||
           isWindowsItaniumEnvironment();
  }

  /// Tests whether the OS is NaCl (Native Client)
  bool isOSNaCl() const {
    return getOS() == Triple::NaCl;
  }

  /// Tests whether the OS is Linux.
  bool isOSLinux() const {
    return getOS() == Triple::Linux;
  }

  /// Tests whether the OS is kFreeBSD.
  bool isOSKFreeBSD() const {
    return getOS() == Triple::KFreeBSD;
  }

  /// Tests whether the OS is Hurd.
  bool isOSHurd() const {
    return getOS() == Triple::Hurd;
  }

  /// Tests whether the OS is WASI.
  bool isOSWASI() const {
    return getOS() == Triple::WASI;
  }

  /// Tests whether the OS is Emscripten.
  bool isOSEmscripten() const {
    return getOS() == Triple::Emscripten;
  }

  /// Tests whether the OS uses glibc.
  bool isOSGlibc() const {
    return (getOS() == Triple::Linux || getOS() == Triple::KFreeBSD ||
            getOS() == Triple::Hurd) &&
           !isAndroid();
  }

  /// Tests whether the OS is AIX.
  bool isOSAIX() const {
    return getOS() == Triple::AIX;
  }

  /// Tests whether the OS uses the ELF binary format.
  bool isOSBinFormatELF() const {
    return getObjectFormat() == Triple::ELF;
  }

  /// Tests whether the OS uses the COFF binary format.
  bool isOSBinFormatCOFF() const {
    return getObjectFormat() == Triple::COFF;
  }

  /// Tests whether the OS uses the GOFF binary format.
  bool isOSBinFormatGOFF() const { return getObjectFormat() == Triple::GOFF; }

  /// Tests whether the environment is MachO.
  bool isOSBinFormatMachO() const {
    return getObjectFormat() == Triple::MachO;
  }

  /// Tests whether the OS uses the Wasm binary format.
  bool isOSBinFormatWasm() const {
    return getObjectFormat() == Triple::Wasm;
  }

  /// Tests whether the OS uses the XCOFF binary format.
  bool isOSBinFormatXCOFF() const {
    return getObjectFormat() == Triple::XCOFF;
  }

  /// Tests whether the OS uses the DXContainer binary format.
  bool isOSBinFormatDXContainer() const {
    return getObjectFormat() == Triple::DXContainer;
  }

  /// Tests whether the target is the PS4 platform.
  bool isPS4() const {
    return getArch() == Triple::x86_64 &&
           getVendor() == Triple::SCEI &&
           getOS() == Triple::PS4;
  }

  /// Tests whether the target is the PS5 platform.
  bool isPS5() const {
    return getArch() == Triple::x86_64 &&
      getVendor() == Triple::SCEI &&
      getOS() == Triple::PS5;
  }

  /// Tests whether the target is the PS4 or PS5 platform.
  bool isPS() const { return isPS4() || isPS5(); }

  /// Tests whether the target is Android
  bool isAndroid() const { return getEnvironment() == Triple::Android; }

  bool isAndroidVersionLT(unsigned Major) const {
    assert(isAndroid() && "Not an Android triple!");

    VersionTuple Version = getEnvironmentVersion();

    // 64-bit targets did not exist before API level 21 (Lollipop).
    if (isArch64Bit() && Version.getMajor() < 21)
      return VersionTuple(21) < VersionTuple(Major);

    return Version < VersionTuple(Major);
  }

  /// Tests whether the environment is musl-libc
  bool isMusl() const {
    return getEnvironment() == Triple::Musl ||
           getEnvironment() == Triple::MuslEABI ||
           getEnvironment() == Triple::MuslEABIHF ||
           getEnvironment() == Triple::MuslX32 ||
           getEnvironment() == Triple::OpenHOS || isOSLiteOS();
  }

  /// Tests whether the target is OHOS
  /// LiteOS default enviroment is also OHOS, but omited on triple.
  bool isOHOSFamily() const { return isOpenHOS() || isOSLiteOS(); }

  bool isOpenHOS() const { return getEnvironment() == Triple::OpenHOS; }

  bool isOSLiteOS() const { return getOS() == Triple::LiteOS; }

  /// Tests whether the target is DXIL.
  bool isDXIL() const {
    return getArch() == Triple::dxil;
  }

  /// Tests whether the target is SPIR (32- or 64-bit).
  bool isSPIR() const {
    return getArch() == Triple::spir || getArch() == Triple::spir64;
  }

  /// Tests whether the target is SPIR-V (32/64-bit).
  bool isSPIRV() const {
    return getArch() == Triple::spirv32 || getArch() == Triple::spirv64;
  }

  /// Tests whether the target is NVPTX (32- or 64-bit).
  bool isNVPTX() const {
    return getArch() == Triple::nvptx || getArch() == Triple::nvptx64;
  }

  /// Tests whether the target is AMDGCN
  bool isAMDGCN() const { return getArch() == Triple::amdgcn; }

  bool isAMDGPU() const {
    return getArch() == Triple::r600 || getArch() == Triple::amdgcn;
  }

  /// Tests whether the target is Thumb (little and big endian).
  bool isThumb() const {
    return getArch() == Triple::thumb || getArch() == Triple::thumbeb;
  }

  /// Tests whether the target is ARM (little and big endian).
  bool isARM() const {
    return getArch() == Triple::arm || getArch() == Triple::armeb;
  }

  /// Tests whether the target supports the EHABI exception
  /// handling standard.
  bool isTargetEHABICompatible() const {
    return (isARM() || isThumb()) &&
           (getEnvironment() == Triple::EABI ||
            getEnvironment() == Triple::GNUEABI ||
            getEnvironment() == Triple::MuslEABI ||
            getEnvironment() == Triple::EABIHF ||
            getEnvironment() == Triple::GNUEABIHF ||
            getEnvironment() == Triple::OpenHOS ||
            getEnvironment() == Triple::MuslEABIHF || isAndroid()) &&
           isOSBinFormatELF();
  }

  /// Tests whether the target is T32.
  bool isArmT32() const {
    switch (getSubArch()) {
    case Triple::ARMSubArch_v8m_baseline:
    case Triple::ARMSubArch_v7s:
    case Triple::ARMSubArch_v7k:
    case Triple::ARMSubArch_v7ve:
    case Triple::ARMSubArch_v6:
    case Triple::ARMSubArch_v6m:
    case Triple::ARMSubArch_v6k:
    case Triple::ARMSubArch_v6t2:
    case Triple::ARMSubArch_v5:
    case Triple::ARMSubArch_v5te:
    case Triple::ARMSubArch_v4t:
      return false;
    default:
      return true;
    }
  }

  /// Tests whether the target is an M-class.
  bool isArmMClass() const {
    switch (getSubArch()) {
    case Triple::ARMSubArch_v6m:
    case Triple::ARMSubArch_v7m:
    case Triple::ARMSubArch_v7em:
    case Triple::ARMSubArch_v8m_mainline:
    case Triple::ARMSubArch_v8m_baseline:
    case Triple::ARMSubArch_v8_1m_mainline:
      return true;
    default:
      return false;
    }
  }

  /// Tests whether the target is AArch64 (little and big endian).
  bool isAArch64() const {
    return getArch() == Triple::aarch64 || getArch() == Triple::aarch64_be ||
           getArch() == Triple::aarch64_32;
  }

  /// Tests whether the target is AArch64 and pointers are the size specified by
  /// \p PointerWidth.
  bool isAArch64(int PointerWidth) const {
    assert(PointerWidth == 64 || PointerWidth == 32);
    if (!isAArch64())
      return false;
    return getArch() == Triple::aarch64_32 ||
                   getEnvironment() == Triple::GNUILP32
               ? PointerWidth == 32
               : PointerWidth == 64;
  }

  /// Tests whether the target is 32-bit LoongArch.
  bool isLoongArch32() const { return getArch() == Triple::loongarch32; }

  /// Tests whether the target is 64-bit LoongArch.
  bool isLoongArch64() const { return getArch() == Triple::loongarch64; }

  /// Tests whether the target is LoongArch (32- and 64-bit).
  bool isLoongArch() const { return isLoongArch32() || isLoongArch64(); }

  /// Tests whether the target is MIPS 32-bit (little and big endian).
  bool isMIPS32() const {
    return getArch() == Triple::mips || getArch() == Triple::mipsel;
  }

  /// Tests whether the target is MIPS 64-bit (little and big endian).
  bool isMIPS64() const {
    return getArch() == Triple::mips64 || getArch() == Triple::mips64el;
  }

  /// Tests whether the target is MIPS (little and big endian, 32- or 64-bit).
  bool isMIPS() const {
    return isMIPS32() || isMIPS64();
  }

  /// Tests whether the target is PowerPC (32- or 64-bit LE or BE).
  bool isPPC() const {
    return getArch() == Triple::ppc || getArch() == Triple::ppc64 ||
           getArch() == Triple::ppcle || getArch() == Triple::ppc64le;
  }

  /// Tests whether the target is 32-bit PowerPC (little and big endian).
  bool isPPC32() const {
    return getArch() == Triple::ppc || getArch() == Triple::ppcle;
  }

  /// Tests whether the target is 64-bit PowerPC (little and big endian).
  bool isPPC64() const {
    return getArch() == Triple::ppc64 || getArch() == Triple::ppc64le;
  }

  /// Tests whether the target 64-bit PowerPC big endian ABI is ELFv2.
  bool isPPC64ELFv2ABI() const {
    return (getArch() == Triple::ppc64 &&
            ((getOS() == Triple::FreeBSD &&
              (getOSMajorVersion() >= 13 || getOSVersion().empty())) ||
             getOS() == Triple::OpenBSD || isMusl()));
  }

  /// Tests whether the target 32-bit PowerPC uses Secure PLT.
  bool isPPC32SecurePlt() const {
    return ((getArch() == Triple::ppc || getArch() == Triple::ppcle) &&
            ((getOS() == Triple::FreeBSD &&
              (getOSMajorVersion() >= 13 || getOSVersion().empty())) ||
             getOS() == Triple::NetBSD || getOS() == Triple::OpenBSD ||
             isMusl()));
  }

  /// Tests whether the target is 32-bit RISC-V.
  bool isRISCV32() const { return getArch() == Triple::riscv32; }

  /// Tests whether the target is 64-bit RISC-V.
  bool isRISCV64() const { return getArch() == Triple::riscv64; }

  /// Tests whether the target is RISC-V (32- and 64-bit).
  bool isRISCV() const { return isRISCV32() || isRISCV64(); }

  /// Tests whether the target is 32-bit SPARC (little and big endian).
  bool isSPARC32() const {
    return getArch() == Triple::sparc || getArch() == Triple::sparcel;
  }

  /// Tests whether the target is 64-bit SPARC (big endian).
  bool isSPARC64() const { return getArch() == Triple::sparcv9; }

  /// Tests whether the target is SPARC.
  bool isSPARC() const { return isSPARC32() || isSPARC64(); }

  /// Tests whether the target is SystemZ.
  bool isSystemZ() const {
    return getArch() == Triple::systemz;
  }

  /// Tests whether the target is x86 (32- or 64-bit).
  bool isX86() const {
    return getArch() == Triple::x86 || getArch() == Triple::x86_64;
  }

  /// Tests whether the target is VE
  bool isVE() const {
    return getArch() == Triple::ve;
  }

  /// Tests whether the target is wasm (32- and 64-bit).
  bool isWasm() const {
    return getArch() == Triple::wasm32 || getArch() == Triple::wasm64;
  }

  // Tests whether the target is CSKY
  bool isCSKY() const {
    return getArch() == Triple::csky;
  }

  /// Tests whether the target is the Apple "arm64e" AArch64 subarch.
  bool isArm64e() const {
    return getArch() == Triple::aarch64 &&
           getSubArch() == Triple::AArch64SubArch_arm64e;
  }

  /// Tests whether the target is X32.
  bool isX32() const {
    EnvironmentType Env = getEnvironment();
    return Env == Triple::GNUX32 || Env == Triple::MuslX32;
  }

  /// Tests whether the target is eBPF.
  bool isBPF() const {
    return getArch() == Triple::bpfel || getArch() == Triple::bpfeb;
  }

  /// Tests whether the target supports comdat
  bool supportsCOMDAT() const {
    return !(isOSBinFormatMachO() || isOSBinFormatXCOFF() ||
             isOSBinFormatDXContainer());
  }

  /// Tests whether the target uses emulated TLS as default.
  ///
  /// Note: Android API level 29 (10) introduced ELF TLS.
  bool hasDefaultEmulatedTLS() const {
    return (isAndroid() && isAndroidVersionLT(29)) || isOSOpenBSD() ||
           isWindowsCygwinEnvironment() || isOHOSFamily();
  }

  /// Tests whether the target uses -data-sections as default.
  bool hasDefaultDataSections() const {
    return isOSBinFormatXCOFF() || isWasm();
  }

  /// Tests if the environment supports dllimport/export annotations.
  bool hasDLLImportExport() const { return isOSWindows() || isPS(); }

  /// @}
  /// @name Mutators
  /// @{

  /// Set the architecture (first) component of the triple to a known type.
  void setArch(ArchType Kind, SubArchType SubArch = NoSubArch);

  /// Set the vendor (second) component of the triple to a known type.
  void setVendor(VendorType Kind);

  /// Set the operating system (third) component of the triple to a known type.
  void setOS(OSType Kind);

  /// Set the environment (fourth) component of the triple to a known type.
  void setEnvironment(EnvironmentType Kind);

  /// Set the object file format.
  void setObjectFormat(ObjectFormatType Kind);

  /// Set all components to the new triple \p Str.
  void setTriple(const Twine &Str);

  /// Set the architecture (first) component of the triple by name.
  void setArchName(StringRef Str);

  /// Set the vendor (second) component of the triple by name.
  void setVendorName(StringRef Str);

  /// Set the operating system (third) component of the triple by name.
  void setOSName(StringRef Str);

  /// Set the optional environment (fourth) component of the triple by name.
  void setEnvironmentName(StringRef Str);

  /// Set the operating system and optional environment components with a single
  /// string.
  void setOSAndEnvironmentName(StringRef Str);

  /// @}
  /// @name Helpers to build variants of a particular triple.
  /// @{

  /// Form a triple with a 32-bit variant of the current architecture.
  ///
  /// This can be used to move across "families" of architectures where useful.
  ///
  /// \returns A new triple with a 32-bit architecture or an unknown
  ///          architecture if no such variant can be found.
  llvm::Triple get32BitArchVariant() const;

  /// Form a triple with a 64-bit variant of the current architecture.
  ///
  /// This can be used to move across "families" of architectures where useful.
  ///
  /// \returns A new triple with a 64-bit architecture or an unknown
  ///          architecture if no such variant can be found.
  llvm::Triple get64BitArchVariant() const;

  /// Form a triple with a big endian variant of the current architecture.
  ///
  /// This can be used to move across "families" of architectures where useful.
  ///
  /// \returns A new triple with a big endian architecture or an unknown
  ///          architecture if no such variant can be found.
  llvm::Triple getBigEndianArchVariant() const;

  /// Form a triple with a little endian variant of the current architecture.
  ///
  /// This can be used to move across "families" of architectures where useful.
  ///
  /// \returns A new triple with a little endian architecture or an unknown
  ///          architecture if no such variant can be found.
  llvm::Triple getLittleEndianArchVariant() const;

  /// Tests whether the target triple is little endian.
  ///
  /// \returns true if the triple is little endian, false otherwise.
  bool isLittleEndian() const;

  /// Test whether target triples are compatible.
  bool isCompatibleWith(const Triple &Other) const;

  /// Merge target triples.
  std::string merge(const Triple &Other) const;

  /// Some platforms have different minimum supported OS versions that
  /// varies by the architecture specified in the triple. This function
  /// returns the minimum supported OS version for this triple if one an exists,
  /// or an invalid version tuple if this triple doesn't have one.
  VersionTuple getMinimumSupportedOSVersion() const;

  /// @}
  /// @name Static helpers for IDs.
  /// @{

  /// Get the canonical name for the \p Kind architecture.
  static StringRef getArchTypeName(ArchType Kind);

  /// Get the "prefix" canonical name for the \p Kind architecture. This is the
  /// prefix used by the architecture specific builtins, and is suitable for
  /// passing to \see Intrinsic::getIntrinsicForClangBuiltin().
  ///
  /// \return - The architecture prefix, or 0 if none is defined.
  static StringRef getArchTypePrefix(ArchType Kind);

  /// Get the canonical name for the \p Kind vendor.
  static StringRef getVendorTypeName(VendorType Kind);

  /// Get the canonical name for the \p Kind operating system.
  static StringRef getOSTypeName(OSType Kind);

  /// Get the canonical name for the \p Kind environment.
  static StringRef getEnvironmentTypeName(EnvironmentType Kind);

  /// Get the name for the \p Object format.
  static StringRef getObjectFormatTypeName(ObjectFormatType ObjectFormat);

  /// @}
  /// @name Static helpers for converting alternate architecture names.
  /// @{

  /// The canonical type for the given LLVM architecture name (e.g., "x86").
  static ArchType getArchTypeForLLVMName(StringRef Str);

  /// @}

  /// Returns a canonicalized OS version number for the specified OS.
  static VersionTuple getCanonicalVersionForOS(OSType OSKind,
                                               const VersionTuple &Version);
};

} // End llvm namespace


#endif
PKiwFZ��j���&TargetParser/LoongArchTargetParser.defnu�[���#ifndef LOONGARCH_FEATURE
#define LOONGARCH_FEATURE(NAME, KIND)
#endif

LOONGARCH_FEATURE("+64bit", FK_64BIT)
LOONGARCH_FEATURE("+f", FK_FP32)
LOONGARCH_FEATURE("+d", FK_FP64)
LOONGARCH_FEATURE("+lsx", FK_LSX)
LOONGARCH_FEATURE("+lasx", FK_LASX)
LOONGARCH_FEATURE("+lbt", FK_LBT)
LOONGARCH_FEATURE("+lvz", FK_LVZ)
LOONGARCH_FEATURE("+ual", FK_UAL)

#undef LOONGARCH_FEATURE

#ifndef LOONGARCH_ARCH
#define LOONGARCH_ARCH(NAME, KIND, FEATURES)
#endif

LOONGARCH_ARCH("loongarch64", AK_LOONGARCH64, FK_64BIT | FK_FP32 | FK_FP64 | FK_UAL)
LOONGARCH_ARCH("la464", AK_LA464, FK_64BIT | FK_FP32 | FK_FP64 | FK_LSX | FK_LASX | FK_UAL)

#undef LOONGARCH_ARCH
PKiwFZ�ʑq��%TargetParser/RISCVTargetParserDef.incnu�[���#ifndef PROC
#define PROC(ENUM, NAME, DEFAULT_MARCH)
#endif

PROC(GENERIC_RV32, {"generic-rv32"}, {"rv32i2p1"})
PROC(GENERIC_RV64, {"generic-rv64"}, {"rv64i2p1"})
PROC(ROCKET_RV32, {"rocket-rv32"}, {"rv32i2p1_zicsr2p0_zifencei2p0"})
PROC(ROCKET_RV64, {"rocket-rv64"}, {"rv64i2p1_zicsr2p0_zifencei2p0"})
PROC(SIFIVE_E20, {"sifive-e20"}, {"rv32i2p1_m2p0_c2p0_zicsr2p0_zifencei2p0"})
PROC(SIFIVE_E21, {"sifive-e21"}, {"rv32i2p1_m2p0_a2p1_c2p0_zicsr2p0_zifencei2p0"})
PROC(SIFIVE_E24, {"sifive-e24"}, {"rv32i2p1_m2p0_a2p1_f2p2_c2p0_zicsr2p0_zifencei2p0"})
PROC(SIFIVE_E31, {"sifive-e31"}, {"rv32i2p1_m2p0_a2p1_c2p0_zicsr2p0_zifencei2p0"})
PROC(SIFIVE_E34, {"sifive-e34"}, {"rv32i2p1_m2p0_a2p1_f2p2_c2p0_zicsr2p0_zifencei2p0"})
PROC(SIFIVE_E76, {"sifive-e76"}, {"rv32i2p1_m2p0_a2p1_f2p2_c2p0_zicsr2p0_zifencei2p0"})
PROC(SIFIVE_S21, {"sifive-s21"}, {"rv64i2p1_m2p0_a2p1_c2p0_zicsr2p0_zifencei2p0"})
PROC(SIFIVE_S51, {"sifive-s51"}, {"rv64i2p1_m2p0_a2p1_c2p0_zicsr2p0_zifencei2p0"})
PROC(SIFIVE_S54, {"sifive-s54"}, {"rv64i2p1_m2p0_a2p1_f2p2_d2p2_c2p0_zicsr2p0_zifencei2p0"})
PROC(SIFIVE_S76, {"sifive-s76"}, {"rv64i2p1_m2p0_a2p1_f2p2_d2p2_c2p0_zicsr2p0_zifencei2p0_zihintpause2p0_xsfcie1p0"})
PROC(SIFIVE_U54, {"sifive-u54"}, {"rv64i2p1_m2p0_a2p1_f2p2_d2p2_c2p0_zicsr2p0_zifencei2p0"})
PROC(SIFIVE_U74, {"sifive-u74"}, {"rv64i2p1_m2p0_a2p1_f2p2_d2p2_c2p0_zicsr2p0_zifencei2p0"})
PROC(SIFIVE_X280, {"sifive-x280"}, {"rv64i2p1_m2p0_a2p1_f2p2_d2p2_c2p0_v1p0_zicsr2p0_zifencei2p0_zfh1p0_zfhmin1p0_zba1p0_zbb1p0_zve32f1p0_zve32x1p0_zve64d1p0_zve64f1p0_zve64x1p0_zvfh1p0_zvl128b1p0_zvl256b1p0_zvl32b1p0_zvl512b1p0_zvl64b1p0"})
PROC(SYNTACORE_SCR1_BASE, {"syntacore-scr1-base"}, {"rv32i2p1_c2p0_zicsr2p0_zifencei2p0"})
PROC(SYNTACORE_SCR1_MAX, {"syntacore-scr1-max"}, {"rv32i2p1_m2p0_c2p0_zicsr2p0_zifencei2p0"})

#undef PROC

#ifndef TUNE_PROC
#define TUNE_PROC(ENUM, NAME)
#endif

TUNE_PROC(GENERIC, "generic")
TUNE_PROC(ROCKET, "rocket")
TUNE_PROC(SIFIVE_7, "sifive-7-series")

#undef TUNE_PROC
PKiwFZ��""$TargetParser/ARMTargetParserCommon.hnu�[���//===---------------- ARMTargetParserCommon ---------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Code that is common to ARMTargetParser and AArch64TargetParser.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TARGETPARSER_ARMTARGETPARSERCOMMON_H
#define LLVM_TARGETPARSER_ARMTARGETPARSERCOMMON_H

#include "llvm/ADT/StringRef.h"

namespace llvm {
namespace ARM {

enum class ISAKind { INVALID = 0, ARM, THUMB, AARCH64 };

enum class EndianKind { INVALID = 0, LITTLE, BIG };

/// Converts e.g. "armv8" -> "armv8-a"
StringRef getArchSynonym(StringRef Arch);

/// MArch is expected to be of the form (arm|thumb)?(eb)?(v.+)?(eb)?, but
/// (iwmmxt|xscale)(eb)? is also permitted. If the former, return
/// "v.+", if the latter, return unmodified string, minus 'eb'.
/// If invalid, return empty string.
StringRef getCanonicalArchName(StringRef Arch);

// ARM, Thumb, AArch64
ISAKind parseArchISA(StringRef Arch);

// Little/Big endian
EndianKind parseArchEndian(StringRef Arch);

struct ParsedBranchProtection {
  StringRef Scope;
  StringRef Key;
  bool BranchTargetEnforcement;
};

bool parseBranchProtection(StringRef Spec, ParsedBranchProtection &PBP,
                           StringRef &Err);

} // namespace ARM
} // namespace llvm
#endif
PKiwFZ�[���TargetParser/CSKYTargetParser.hnu�[���//===-- CSKYTargetParser - Parser for CSKY target features --------*- C++
//-*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements a target parser to recognise CSKY hardware features
// such as FPU/CPU/ARCH/extensions and specific support such as HWDIV.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TARGETPARSER_CSKYTARGETPARSER_H
#define LLVM_TARGETPARSER_CSKYTARGETPARSER_H

#include "llvm/TargetParser/Triple.h"
#include <vector>

namespace llvm {
class StringRef;

namespace CSKY {

// Arch extension modifiers for CPUs.
enum ArchExtKind : uint64_t {
  AEK_INVALID = 0,
  AEK_NONE = 1,
  AEK_FPUV2SF = 1 << 1,
  AEK_FPUV2DF = 1 << 2,
  AEK_FDIVDU = 1 << 3,
  AEK_FPUV3HI = 1 << 4,
  AEK_FPUV3HF = 1 << 5,
  AEK_FPUV3SF = 1 << 6,
  AEK_FPUV3DF = 1 << 7,
  AEK_FLOATE1 = 1 << 8,
  AEK_FLOAT1E2 = 1 << 9,
  AEK_FLOAT1E3 = 1 << 10,
  AEK_FLOAT3E4 = 1 << 11,
  AEK_FLOAT7E60 = 1 << 12,
  AEK_HWDIV = 1 << 13,
  AEK_STLD = 1 << 14,
  AEK_PUSHPOP = 1 << 15,
  AEK_EDSP = 1 << 16,
  AEK_DSP1E2 = 1 << 17,
  AEK_DSPE60 = 1 << 18,
  AEK_DSPV2 = 1 << 19,
  AEK_DSPSILAN = 1 << 20,
  AEK_ELRW = 1 << 21,
  AEK_TRUST = 1 << 22,
  AEK_JAVA = 1 << 23,
  AEK_CACHE = 1 << 24,
  AEK_NVIC = 1 << 25,
  AEK_DOLOOP = 1 << 26,
  AEK_HIGHREG = 1 << 27,
  AEK_SMART = 1 << 28,
  AEK_VDSP2E3 = 1 << 29,
  AEK_VDSP2E60F = 1 << 30,
  AEK_VDSPV2 = 1ULL << 31,
  AEK_HARDTP = 1ULL << 32,
  AEK_SOFTTP = 1ULL << 33,
  AEK_ISTACK = 1ULL << 34,
  AEK_CONSTPOOL = 1ULL << 35,
  AEK_STACKSIZE = 1ULL << 36,
  AEK_CCRT = 1ULL << 37,
  AEK_VDSPV1 = 1ULL << 38,
  AEK_E1 = 1ULL << 39,
  AEK_E2 = 1ULL << 40,
  AEK_2E3 = 1ULL << 41,
  AEK_MP = 1ULL << 42,
  AEK_3E3R1 = 1ULL << 43,
  AEK_3E3R2 = 1ULL << 44,
  AEK_3E3R3 = 1ULL << 45,
  AEK_3E7 = 1ULL << 46,
  AEK_MP1E2 = 1ULL << 47,
  AEK_7E10 = 1ULL << 48,
  AEK_10E60 = 1ULL << 49

};

// Arch extension modifiers for CPUs.
enum MultiArchExtKind : uint64_t {
  MAEK_E1 = CSKY::AEK_E1 | CSKY::AEK_ELRW,
  MAEK_E2 = CSKY::AEK_E2 | CSKY::MAEK_E1,
  MAEK_2E3 = CSKY::AEK_2E3 | CSKY::MAEK_E2,
  MAEK_MP = CSKY::AEK_MP | CSKY::MAEK_2E3,
  MAEK_3E3R1 = CSKY::AEK_3E3R1,
  MAEK_3E3R2 = CSKY::AEK_3E3R1 | CSKY::AEK_3E3R2 | CSKY::AEK_DOLOOP,
  MAEK_3E7 = CSKY::AEK_3E7 | CSKY::MAEK_2E3,
  MAEK_MP1E2 = CSKY::AEK_MP1E2 | CSKY::MAEK_3E7,
  MAEK_7E10 = CSKY::AEK_7E10 | CSKY::MAEK_3E7,
  MAEK_10E60 = CSKY::AEK_10E60 | CSKY::MAEK_7E10,
};
// FPU names.
enum CSKYFPUKind {
#define CSKY_FPU(NAME, KIND, VERSION) KIND,
#include "CSKYTargetParser.def"
  FK_LAST
};

// FPU Version
enum class FPUVersion {
  NONE,
  FPV2,
  FPV3,
};

// Arch names.
enum class ArchKind {
#define CSKY_ARCH(NAME, ID, ARCH_BASE_EXT) ID,
#include "CSKYTargetParser.def"
};

// List of Arch Extension names.
// FIXME: TableGen this.
struct ExtName {
  const char *NameCStr;
  size_t NameLength;
  uint64_t ID;
  const char *Feature;
  const char *NegFeature;

  StringRef getName() const { return StringRef(NameCStr, NameLength); }
};

const CSKY::ExtName CSKYARCHExtNames[] = {
#define CSKY_ARCH_EXT_NAME(NAME, ID, FEATURE, NEGFEATURE)                      \
  {NAME, sizeof(NAME) - 1, ID, FEATURE, NEGFEATURE},
#include "CSKYTargetParser.def"
};

// List of CPU names and their arches.
template <typename T> struct CpuNames {
  const char *NameCStr;
  size_t NameLength;
  T ArchID;
  uint64_t defaultExt;

  StringRef getName() const { return StringRef(NameCStr, NameLength); }
};
const CpuNames<CSKY::ArchKind> CPUNames[] = {
#define CSKY_CPU_NAME(NAME, ARCH_ID, DEFAULT_EXT)                              \
  {NAME, sizeof(NAME) - 1, CSKY::ArchKind::ARCH_ID, DEFAULT_EXT},
#include "llvm/TargetParser/CSKYTargetParser.def"
};

// FIXME: TableGen this.
// The entries must appear in the order listed in CSKY::CSKYFPUKind for correct
// indexing
struct FPUName {
  const char *NameCStr;
  size_t NameLength;
  CSKYFPUKind ID;
  FPUVersion FPUVer;

  StringRef getName() const { return StringRef(NameCStr, NameLength); }
};

static const FPUName FPUNames[] = {
#define CSKY_FPU(NAME, KIND, VERSION) {NAME, sizeof(NAME) - 1, KIND, VERSION},
#include "llvm/TargetParser/CSKYTargetParser.def"
};

// List of canonical arch names.
template <typename T> struct ArchNames {
  const char *NameCStr;
  size_t NameLength;
  T ID;
  uint64_t archBaseExt;
  StringRef getName() const { return StringRef(NameCStr, NameLength); }
};
const ArchNames<CSKY::ArchKind> ARCHNames[] = {
#define CSKY_ARCH(NAME, ID, ARCH_BASE_EXT)                                     \
  {NAME, sizeof(NAME) - 1, CSKY::ArchKind::ID, ARCH_BASE_EXT},
#include "llvm/TargetParser/CSKYTargetParser.def"
};

StringRef getArchName(ArchKind AK);
StringRef getDefaultCPU(StringRef Arch);
StringRef getArchExtName(uint64_t ArchExtKind);
StringRef getArchExtFeature(StringRef ArchExt);
uint64_t getDefaultExtensions(StringRef CPU);
bool getExtensionFeatures(uint64_t Extensions,
                          std::vector<StringRef> &Features);

// Information by ID
StringRef getFPUName(unsigned FPUKind);
FPUVersion getFPUVersion(unsigned FPUKind);

bool getFPUFeatures(CSKYFPUKind Kind, std::vector<StringRef> &Features);

// Parser
ArchKind parseArch(StringRef Arch);
ArchKind parseCPUArch(StringRef CPU);
uint64_t parseArchExt(StringRef ArchExt);
void fillValidCPUArchList(SmallVectorImpl<StringRef> &Values);

} // namespace CSKY

} // namespace llvm

#endif
PKiwFZ���0��$TargetParser/LoongArchTargetParser.hnu�[���//==-- LoongArch64TargetParser - Parser for LoongArch64 features --*- C++ -*-=//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements a target parser to recognise LoongArch hardware features
// such as CPU/ARCH and extension names.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TARGETPARSER_LOONGARCHTARGETPARSER_H
#define LLVM_TARGETPARSER_LOONGARCHTARGETPARSER_H

#include "llvm/TargetParser/Triple.h"
#include <vector>

namespace llvm {
class StringRef;

namespace LoongArch {

enum FeatureKind : uint32_t {
  // 64-bit ISA is available.
  FK_64BIT = 1 << 1,

  // Single-precision floating-point instructions are available.
  FK_FP32 = 1 << 2,

  // Double-precision floating-point instructions are available.
  FK_FP64 = 1 << 3,

  // Loongson SIMD Extension is available.
  FK_LSX = 1 << 4,

  // Loongson Advanced SIMD Extension is available.
  FK_LASX = 1 << 5,

  // Loongson Binary Translation Extension is available.
  FK_LBT = 1 << 6,

  // Loongson Virtualization Extension is available.
  FK_LVZ = 1 << 7,

  // Allow memory accesses to be unaligned.
  FK_UAL = 1 << 8,
};

struct FeatureInfo {
  StringRef Name;
  FeatureKind Kind;
};

enum class ArchKind {
#define LOONGARCH_ARCH(NAME, KIND, FEATURES) KIND,
#include "LoongArchTargetParser.def"
};

struct ArchInfo {
  StringRef Name;
  ArchKind Kind;
  uint32_t Features;
};

bool isValidArchName(StringRef Arch);
bool getArchFeatures(StringRef Arch, std::vector<StringRef> &Features);
bool isValidCPUName(StringRef TuneCPU);
void fillValidCPUList(SmallVectorImpl<StringRef> &Values);
StringRef getDefaultArch(bool Is64Bit);

} // namespace LoongArch

} // namespace llvm

#endif // LLVM_TARGETPARSER_LOONGARCHTARGETPARSER_H
PKiwFZ�z�CVCV TargetParser/ARMTargetParser.defnu�[���//===- ARMTargetParser.def - ARM target parsing defines ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file provides defines to build up the ARM target parser's logic.
//
//===----------------------------------------------------------------------===//

// NOTE: NO INCLUDE GUARD DESIRED!

#ifndef ARM_FPU
#define ARM_FPU(NAME, KIND, VERSION, NEON_SUPPORT, RESTRICTION)
#endif
ARM_FPU("invalid", FK_INVALID, FPUVersion::NONE, NeonSupportLevel::None, FPURestriction::None)
ARM_FPU("none", FK_NONE, FPUVersion::NONE, NeonSupportLevel::None, FPURestriction::None)
ARM_FPU("vfp", FK_VFP, FPUVersion::VFPV2, NeonSupportLevel::None, FPURestriction::None)
ARM_FPU("vfpv2", FK_VFPV2, FPUVersion::VFPV2, NeonSupportLevel::None, FPURestriction::None)
ARM_FPU("vfpv3", FK_VFPV3, FPUVersion::VFPV3, NeonSupportLevel::None, FPURestriction::None)
ARM_FPU("vfpv3-fp16", FK_VFPV3_FP16, FPUVersion::VFPV3_FP16, NeonSupportLevel::None, FPURestriction::None)
ARM_FPU("vfpv3-d16", FK_VFPV3_D16, FPUVersion::VFPV3, NeonSupportLevel::None, FPURestriction::D16)
ARM_FPU("vfpv3-d16-fp16", FK_VFPV3_D16_FP16, FPUVersion::VFPV3_FP16, NeonSupportLevel::None, FPURestriction::D16)
ARM_FPU("vfpv3xd", FK_VFPV3XD, FPUVersion::VFPV3, NeonSupportLevel::None, FPURestriction::SP_D16)
ARM_FPU("vfpv3xd-fp16", FK_VFPV3XD_FP16, FPUVersion::VFPV3_FP16, NeonSupportLevel::None, FPURestriction::SP_D16)
ARM_FPU("vfpv4", FK_VFPV4, FPUVersion::VFPV4, NeonSupportLevel::None, FPURestriction::None)
ARM_FPU("vfpv4-d16", FK_VFPV4_D16, FPUVersion::VFPV4, NeonSupportLevel::None, FPURestriction::D16)
ARM_FPU("fpv4-sp-d16", FK_FPV4_SP_D16, FPUVersion::VFPV4, NeonSupportLevel::None, FPURestriction::SP_D16)
ARM_FPU("fpv5-d16", FK_FPV5_D16, FPUVersion::VFPV5, NeonSupportLevel::None, FPURestriction::D16)
ARM_FPU("fpv5-sp-d16", FK_FPV5_SP_D16, FPUVersion::VFPV5, NeonSupportLevel::None, FPURestriction::SP_D16)
ARM_FPU("fp-armv8", FK_FP_ARMV8, FPUVersion::VFPV5, NeonSupportLevel::None, FPURestriction::None)
ARM_FPU("fp-armv8-fullfp16-d16", FK_FP_ARMV8_FULLFP16_D16, FPUVersion::VFPV5_FULLFP16, NeonSupportLevel::None, FPURestriction::D16)
ARM_FPU("fp-armv8-fullfp16-sp-d16", FK_FP_ARMV8_FULLFP16_SP_D16, FPUVersion::VFPV5_FULLFP16, NeonSupportLevel::None, FPURestriction::SP_D16)
ARM_FPU("neon", FK_NEON, FPUVersion::VFPV3, NeonSupportLevel::Neon, FPURestriction::None)
ARM_FPU("neon-fp16", FK_NEON_FP16, FPUVersion::VFPV3_FP16, NeonSupportLevel::Neon, FPURestriction::None)
ARM_FPU("neon-vfpv4", FK_NEON_VFPV4, FPUVersion::VFPV4, NeonSupportLevel::Neon, FPURestriction::None)
ARM_FPU("neon-fp-armv8", FK_NEON_FP_ARMV8, FPUVersion::VFPV5, NeonSupportLevel::Neon, FPURestriction::None)
ARM_FPU("crypto-neon-fp-armv8", FK_CRYPTO_NEON_FP_ARMV8, FPUVersion::VFPV5, NeonSupportLevel::Crypto,
        FPURestriction::None)
ARM_FPU("softvfp", FK_SOFTVFP, FPUVersion::NONE, NeonSupportLevel::None, FPURestriction::None)
#undef ARM_FPU

#ifndef ARM_ARCH
#define ARM_ARCH(NAME, ID, CPU_ATTR, ARCH_FEATURE, ARCH_ATTR, ARCH_FPU, ARCH_BASE_EXT)
#endif
ARM_ARCH("invalid", INVALID, "", "+",
          ARMBuildAttrs::CPUArch::Pre_v4, FK_NONE, ARM::AEK_NONE)
ARM_ARCH("armv4", ARMV4, "4", "+v4", ARMBuildAttrs::CPUArch::v4,
          FK_NONE, ARM::AEK_NONE)
ARM_ARCH("armv4t", ARMV4T, "4T", "+v4t", ARMBuildAttrs::CPUArch::v4T,
          FK_NONE, ARM::AEK_NONE)
ARM_ARCH("armv5t", ARMV5T, "5T", "+v5", ARMBuildAttrs::CPUArch::v5T,
          FK_NONE, ARM::AEK_NONE)
ARM_ARCH("armv5te", ARMV5TE, "5TE", "+v5e", ARMBuildAttrs::CPUArch::v5TE,
          FK_NONE, ARM::AEK_DSP)
ARM_ARCH("armv5tej", ARMV5TEJ, "5TEJ", "+v5e", ARMBuildAttrs::CPUArch::v5TEJ,
          FK_NONE, ARM::AEK_DSP)
ARM_ARCH("armv6", ARMV6, "6", "+v6", ARMBuildAttrs::CPUArch::v6,
          FK_VFPV2, ARM::AEK_DSP)
ARM_ARCH("armv6k", ARMV6K, "6K", "+v6k", ARMBuildAttrs::CPUArch::v6K,
          FK_VFPV2, ARM::AEK_DSP)
ARM_ARCH("armv6t2", ARMV6T2, "6T2", "+v6t2", ARMBuildAttrs::CPUArch::v6T2,
          FK_NONE, ARM::AEK_DSP)
ARM_ARCH("armv6kz", ARMV6KZ, "6KZ", "+v6kz", ARMBuildAttrs::CPUArch::v6KZ,
          FK_VFPV2, (ARM::AEK_SEC | ARM::AEK_DSP))
ARM_ARCH("armv6-m", ARMV6M, "6-M", "+v6m", ARMBuildAttrs::CPUArch::v6_M,
          FK_NONE, ARM::AEK_NONE)
ARM_ARCH("armv7-a", ARMV7A, "7-A", "+v7", ARMBuildAttrs::CPUArch::v7,
          FK_NEON, ARM::AEK_DSP)
ARM_ARCH("armv7ve", ARMV7VE, "7VE", "+v7ve", ARMBuildAttrs::CPUArch::v7,
          FK_NEON, (ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT |
          ARM::AEK_HWDIVARM | ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP))
ARM_ARCH("armv7-r", ARMV7R, "7-R", "+v7r", ARMBuildAttrs::CPUArch::v7,
          FK_NONE, (ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP))
ARM_ARCH("armv7-m", ARMV7M, "7-M", "+v7m", ARMBuildAttrs::CPUArch::v7,
          FK_NONE, ARM::AEK_HWDIVTHUMB)
ARM_ARCH("armv7e-m", ARMV7EM, "7E-M", "+v7em", ARMBuildAttrs::CPUArch::v7E_M,
          FK_NONE, (ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP))
ARM_ARCH("armv8-a", ARMV8A, "8-A", "+v8a", ARMBuildAttrs::CPUArch::v8_A,
         FK_CRYPTO_NEON_FP_ARMV8,
         (ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM |
          ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP | ARM::AEK_CRC))
ARM_ARCH("armv8.1-a", ARMV8_1A, "8.1-A", "+v8.1a",
         ARMBuildAttrs::CPUArch::v8_A, FK_CRYPTO_NEON_FP_ARMV8,
         (ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM |
          ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP | ARM::AEK_CRC))
ARM_ARCH("armv8.2-a", ARMV8_2A, "8.2-A", "+v8.2a",
         ARMBuildAttrs::CPUArch::v8_A, FK_CRYPTO_NEON_FP_ARMV8,
         (ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM |
          ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP | ARM::AEK_CRC | ARM::AEK_RAS))
ARM_ARCH("armv8.3-a", ARMV8_3A, "8.3-A", "+v8.3a",
         ARMBuildAttrs::CPUArch::v8_A, FK_CRYPTO_NEON_FP_ARMV8,
         (ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM |
          ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP | ARM::AEK_CRC | ARM::AEK_RAS))
ARM_ARCH("armv8.4-a", ARMV8_4A, "8.4-A", "+v8.4a",
         ARMBuildAttrs::CPUArch::v8_A, FK_CRYPTO_NEON_FP_ARMV8,
         (ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM |
          ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP | ARM::AEK_CRC | ARM::AEK_RAS |
          ARM::AEK_DOTPROD))
ARM_ARCH("armv8.5-a", ARMV8_5A, "8.5-A", "+v8.5a",
         ARMBuildAttrs::CPUArch::v8_A, FK_CRYPTO_NEON_FP_ARMV8,
         (ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM |
          ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP | ARM::AEK_CRC | ARM::AEK_RAS |
          ARM::AEK_DOTPROD))
ARM_ARCH("armv8.6-a", ARMV8_6A, "8.6-A", "+v8.6a",
         ARMBuildAttrs::CPUArch::v8_A, FK_CRYPTO_NEON_FP_ARMV8,
         (ARM::AEK_SEC        | ARM::AEK_MP   | ARM::AEK_VIRT | ARM::AEK_HWDIVARM |
          ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP  | ARM::AEK_CRC  | ARM::AEK_RAS |
          ARM::AEK_DOTPROD    | ARM::AEK_BF16 | ARM::AEK_I8MM))
ARM_ARCH("armv8.7-a", ARMV8_7A, "8.7-A", "+v8.7a",
         ARMBuildAttrs::CPUArch::v8_A, FK_CRYPTO_NEON_FP_ARMV8,
         (ARM::AEK_SEC        | ARM::AEK_MP   | ARM::AEK_VIRT | ARM::AEK_HWDIVARM |
          ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP  | ARM::AEK_CRC  | ARM::AEK_RAS |
          ARM::AEK_DOTPROD    | ARM::AEK_BF16 | ARM::AEK_I8MM))
ARM_ARCH("armv8.8-a", ARMV8_8A, "8.8-A", "+v8.8a",
         ARMBuildAttrs::CPUArch::v8_A, FK_CRYPTO_NEON_FP_ARMV8,
         (ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM |
          ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP | ARM::AEK_CRC | ARM::AEK_RAS |
          ARM::AEK_DOTPROD | ARM::AEK_BF16 | ARM::AEK_SHA2 | ARM::AEK_AES |
          ARM::AEK_I8MM))
ARM_ARCH("armv8.9-a", ARMV8_9A, "8.9-A", "+v8.9a",
         ARMBuildAttrs::CPUArch::v8_A, FK_CRYPTO_NEON_FP_ARMV8,
         (ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM |
          ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP | ARM::AEK_CRC | ARM::AEK_RAS |
          ARM::AEK_DOTPROD | ARM::AEK_BF16 | ARM::AEK_SHA2 | ARM::AEK_AES |
          ARM::AEK_I8MM))
ARM_ARCH("armv9-a", ARMV9A, "9-A", "+v9a",
         ARMBuildAttrs::CPUArch::v9_A, FK_NEON_FP_ARMV8,
         (ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM |
          ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP | ARM::AEK_CRC | ARM::AEK_RAS |
          ARM::AEK_DOTPROD))
ARM_ARCH("armv9.1-a", ARMV9_1A, "9.1-A", "+v9.1a",
         ARMBuildAttrs::CPUArch::v9_A, FK_NEON_FP_ARMV8,
         (ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM |
          ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP | ARM::AEK_CRC | ARM::AEK_RAS |
          ARM::AEK_DOTPROD | ARM::AEK_BF16 | ARM::AEK_I8MM))
ARM_ARCH("armv9.2-a", ARMV9_2A, "9.2-A", "+v9.2a",
         ARMBuildAttrs::CPUArch::v9_A, FK_NEON_FP_ARMV8,
         (ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM |
          ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP | ARM::AEK_CRC | ARM::AEK_RAS |
          ARM::AEK_DOTPROD | ARM::AEK_BF16 | ARM::AEK_I8MM))
ARM_ARCH("armv9.3-a", ARMV9_3A, "9.3-A", "+v9.3a",
         ARMBuildAttrs::CPUArch::v9_A, FK_CRYPTO_NEON_FP_ARMV8,
         (ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM |
          ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP | ARM::AEK_CRC | ARM::AEK_RAS |
          ARM::AEK_DOTPROD | ARM::AEK_BF16 | ARM::AEK_I8MM))
ARM_ARCH("armv9.4-a", ARMV9_4A, "9.4-A", "+v9.4a",
         ARMBuildAttrs::CPUArch::v9_A, FK_NEON_FP_ARMV8,
         (ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM |
          ARM::AEK_HWDIVTHUMB | ARM::AEK_DSP | ARM::AEK_CRC | ARM::AEK_RAS |
          ARM::AEK_DOTPROD | ARM::AEK_BF16 | ARM::AEK_I8MM))
ARM_ARCH("armv8-r", ARMV8R, "8-R", "+v8r", ARMBuildAttrs::CPUArch::v8_R,
          FK_NEON_FP_ARMV8,
          (ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM | ARM::AEK_HWDIVTHUMB |
           ARM::AEK_DSP | ARM::AEK_CRC))
ARM_ARCH("armv8-m.base", ARMV8MBaseline, "8-M.Baseline", "+v8m.base",
          ARMBuildAttrs::CPUArch::v8_M_Base, FK_NONE, ARM::AEK_HWDIVTHUMB)
ARM_ARCH("armv8-m.main", ARMV8MMainline, "8-M.Mainline", "+v8m.main",
          ARMBuildAttrs::CPUArch::v8_M_Main, FK_FPV5_D16, ARM::AEK_HWDIVTHUMB)
ARM_ARCH("armv8.1-m.main", ARMV8_1MMainline, "8.1-M.Mainline", "+v8.1m.main",
          ARMBuildAttrs::CPUArch::v8_1_M_Main, FK_FP_ARMV8_FULLFP16_SP_D16, ARM::AEK_HWDIVTHUMB | ARM::AEK_RAS | ARM::AEK_LOB)
// Non-standard Arch names.
ARM_ARCH("iwmmxt", IWMMXT, "iwmmxt", "+", ARMBuildAttrs::CPUArch::v5TE,
          FK_NONE, ARM::AEK_NONE)
ARM_ARCH("iwmmxt2", IWMMXT2, "iwmmxt2", "+", ARMBuildAttrs::CPUArch::v5TE,
          FK_NONE, ARM::AEK_NONE)
ARM_ARCH("xscale", XSCALE, "xscale", "+v5e", ARMBuildAttrs::CPUArch::v5TE,
          FK_NONE, ARM::AEK_NONE)
ARM_ARCH("armv7s", ARMV7S, "7-S", "+v7s", ARMBuildAttrs::CPUArch::v7,
          FK_NEON_VFPV4, ARM::AEK_DSP)
ARM_ARCH("armv7k", ARMV7K, "7-K", "+v7k", ARMBuildAttrs::CPUArch::v7,
          FK_NONE, ARM::AEK_DSP)
#undef ARM_ARCH

#ifndef ARM_ARCH_EXT_NAME
#define ARM_ARCH_EXT_NAME(NAME, ID, FEATURE, NEGFEATURE)
#endif
ARM_ARCH_EXT_NAME("invalid",  ARM::AEK_INVALID,  {},  {})
ARM_ARCH_EXT_NAME("none",     ARM::AEK_NONE,     {},  {})
ARM_ARCH_EXT_NAME("crc",      ARM::AEK_CRC,      "+crc",   "-crc")
ARM_ARCH_EXT_NAME("crypto",   ARM::AEK_CRYPTO,   "+crypto","-crypto")
ARM_ARCH_EXT_NAME("sha2",     ARM::AEK_SHA2,     "+sha2",  "-sha2")
ARM_ARCH_EXT_NAME("aes",      ARM::AEK_AES,      "+aes",   "-aes")
ARM_ARCH_EXT_NAME("dotprod",  ARM::AEK_DOTPROD,  "+dotprod","-dotprod")
ARM_ARCH_EXT_NAME("dsp",      ARM::AEK_DSP,      "+dsp",   "-dsp")
ARM_ARCH_EXT_NAME("fp",       ARM::AEK_FP,       {},  {})
ARM_ARCH_EXT_NAME("fp.dp",    ARM::AEK_FP_DP,    {},  {})
ARM_ARCH_EXT_NAME("mve",     (ARM::AEK_DSP | ARM::AEK_SIMD), "+mve", "-mve")
ARM_ARCH_EXT_NAME("mve.fp",  (ARM::AEK_DSP | ARM::AEK_SIMD | ARM::AEK_FP), "+mve.fp", "-mve.fp")
ARM_ARCH_EXT_NAME("idiv",     (ARM::AEK_HWDIVARM | ARM::AEK_HWDIVTHUMB), {}, {})
ARM_ARCH_EXT_NAME("mp",       ARM::AEK_MP,       {},  {})
ARM_ARCH_EXT_NAME("simd",     ARM::AEK_SIMD,     {},  {})
ARM_ARCH_EXT_NAME("sec",      ARM::AEK_SEC,      {},  {})
ARM_ARCH_EXT_NAME("virt",     ARM::AEK_VIRT,     {},  {})
ARM_ARCH_EXT_NAME("fp16",     ARM::AEK_FP16,     "+fullfp16",  "-fullfp16")
ARM_ARCH_EXT_NAME("ras",      ARM::AEK_RAS,      "+ras", "-ras")
ARM_ARCH_EXT_NAME("os",       ARM::AEK_OS,       {},  {})
ARM_ARCH_EXT_NAME("iwmmxt",   ARM::AEK_IWMMXT,   {},  {})
ARM_ARCH_EXT_NAME("iwmmxt2",  ARM::AEK_IWMMXT2,  {},  {})
ARM_ARCH_EXT_NAME("maverick", ARM::AEK_MAVERICK, {},  {})
ARM_ARCH_EXT_NAME("xscale",   ARM::AEK_XSCALE,   {},  {})
ARM_ARCH_EXT_NAME("fp16fml",  ARM::AEK_FP16FML,  "+fp16fml", "-fp16fml")
ARM_ARCH_EXT_NAME("bf16",     ARM::AEK_BF16,     "+bf16",    "-bf16")
ARM_ARCH_EXT_NAME("sb",       ARM::AEK_SB,       "+sb",      "-sb")
ARM_ARCH_EXT_NAME("i8mm",     ARM::AEK_I8MM,     "+i8mm",    "-i8mm")
ARM_ARCH_EXT_NAME("lob",      ARM::AEK_LOB,      "+lob",   "-lob")
ARM_ARCH_EXT_NAME("cdecp0",   ARM::AEK_CDECP0,   "+cdecp0",  "-cdecp0")
ARM_ARCH_EXT_NAME("cdecp1",   ARM::AEK_CDECP1,   "+cdecp1",  "-cdecp1")
ARM_ARCH_EXT_NAME("cdecp2",   ARM::AEK_CDECP2,   "+cdecp2",  "-cdecp2")
ARM_ARCH_EXT_NAME("cdecp3",   ARM::AEK_CDECP3,   "+cdecp3",  "-cdecp3")
ARM_ARCH_EXT_NAME("cdecp4",   ARM::AEK_CDECP4,   "+cdecp4",  "-cdecp4")
ARM_ARCH_EXT_NAME("cdecp5",   ARM::AEK_CDECP5,   "+cdecp5",  "-cdecp5")
ARM_ARCH_EXT_NAME("cdecp6",   ARM::AEK_CDECP6,   "+cdecp6",  "-cdecp6")
ARM_ARCH_EXT_NAME("cdecp7",   ARM::AEK_CDECP7,   "+cdecp7",  "-cdecp7")
ARM_ARCH_EXT_NAME("pacbti",   ARM::AEK_PACBTI,   "+pacbti",  "-pacbti")
#undef ARM_ARCH_EXT_NAME

#ifndef ARM_HW_DIV_NAME
#define ARM_HW_DIV_NAME(NAME, ID)
#endif
ARM_HW_DIV_NAME("invalid", ARM::AEK_INVALID)
ARM_HW_DIV_NAME("none", ARM::AEK_NONE)
ARM_HW_DIV_NAME("thumb", ARM::AEK_HWDIVTHUMB)
ARM_HW_DIV_NAME("arm", ARM::AEK_HWDIVARM)
ARM_HW_DIV_NAME("arm,thumb", (ARM::AEK_HWDIVARM | ARM::AEK_HWDIVTHUMB))
#undef ARM_HW_DIV_NAME

#ifndef ARM_CPU_NAME
#define ARM_CPU_NAME(NAME, ID, DEFAULT_FPU, IS_DEFAULT, DEFAULT_EXT)
#endif
ARM_CPU_NAME("arm8", ARMV4, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("arm810", ARMV4, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("strongarm", ARMV4, FK_NONE, true, ARM::AEK_NONE)
ARM_CPU_NAME("strongarm110", ARMV4, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("strongarm1100", ARMV4, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("strongarm1110", ARMV4, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("arm7tdmi", ARMV4T, FK_NONE, true, ARM::AEK_NONE)
ARM_CPU_NAME("arm7tdmi-s", ARMV4T, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("arm710t", ARMV4T, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("arm720t", ARMV4T, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("arm9", ARMV4T, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("arm9tdmi", ARMV4T, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("arm920", ARMV4T, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("arm920t", ARMV4T, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("arm922t", ARMV4T, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("arm940t", ARMV4T, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("ep9312", ARMV4T, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("arm10tdmi", ARMV5T, FK_NONE, true, ARM::AEK_NONE)
ARM_CPU_NAME("arm1020t", ARMV5T, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("arm9e", ARMV5TE, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("arm946e-s", ARMV5TE, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("arm966e-s", ARMV5TE, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("arm968e-s", ARMV5TE, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("arm10e", ARMV5TE, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("arm1020e", ARMV5TE, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("arm1022e", ARMV5TE, FK_NONE, true, ARM::AEK_NONE)
ARM_CPU_NAME("arm926ej-s", ARMV5TEJ, FK_NONE, true, ARM::AEK_NONE)
ARM_CPU_NAME("arm1136j-s", ARMV6, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("arm1136jf-s", ARMV6, FK_VFPV2, true, ARM::AEK_NONE)
ARM_CPU_NAME("mpcore", ARMV6K, FK_VFPV2, true, ARM::AEK_NONE)
ARM_CPU_NAME("mpcorenovfp", ARMV6K, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("arm1176jz-s", ARMV6KZ, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("arm1176jzf-s", ARMV6KZ, FK_VFPV2, true, ARM::AEK_NONE)
ARM_CPU_NAME("arm1156t2-s", ARMV6T2, FK_NONE, true, ARM::AEK_NONE)
ARM_CPU_NAME("arm1156t2f-s", ARMV6T2, FK_VFPV2, false, ARM::AEK_NONE)
ARM_CPU_NAME("cortex-m0", ARMV6M, FK_NONE, true, ARM::AEK_NONE)
ARM_CPU_NAME("cortex-m0plus", ARMV6M, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("cortex-m1", ARMV6M, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("sc000", ARMV6M, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("cortex-a5", ARMV7A, FK_NEON_VFPV4, false,
             (ARM::AEK_SEC | ARM::AEK_MP))
ARM_CPU_NAME("cortex-a7", ARMV7A, FK_NEON_VFPV4, false,
             (ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM |
              ARM::AEK_HWDIVTHUMB))
ARM_CPU_NAME("cortex-a8", ARMV7A, FK_NEON, false, ARM::AEK_SEC)
ARM_CPU_NAME("cortex-a9", ARMV7A, FK_NEON_FP16, false, (ARM::AEK_SEC | ARM::AEK_MP))
ARM_CPU_NAME("cortex-a12", ARMV7A, FK_NEON_VFPV4, false,
             (ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM |
              ARM::AEK_HWDIVTHUMB))
ARM_CPU_NAME("cortex-a15", ARMV7A, FK_NEON_VFPV4, false,
             (ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM |
              ARM::AEK_HWDIVTHUMB))
ARM_CPU_NAME("cortex-a17", ARMV7A, FK_NEON_VFPV4, false,
             (ARM::AEK_SEC | ARM::AEK_MP | ARM::AEK_VIRT | ARM::AEK_HWDIVARM |
              ARM::AEK_HWDIVTHUMB))
ARM_CPU_NAME("krait", ARMV7A, FK_NEON_VFPV4, false,
             (ARM::AEK_HWDIVARM | ARM::AEK_HWDIVTHUMB))
ARM_CPU_NAME("cortex-r4", ARMV7R, FK_NONE, true, ARM::AEK_NONE)
ARM_CPU_NAME("cortex-r4f", ARMV7R, FK_VFPV3_D16, false, ARM::AEK_NONE)
ARM_CPU_NAME("cortex-r5", ARMV7R, FK_VFPV3_D16, false,
             (ARM::AEK_MP | ARM::AEK_HWDIVARM))
ARM_CPU_NAME("cortex-r7", ARMV7R, FK_VFPV3_D16_FP16, false,
             (ARM::AEK_MP | ARM::AEK_HWDIVARM))
ARM_CPU_NAME("cortex-r8", ARMV7R, FK_VFPV3_D16_FP16, false,
             (ARM::AEK_MP | ARM::AEK_HWDIVARM))
ARM_CPU_NAME("cortex-r52", ARMV8R, FK_NEON_FP_ARMV8, true, ARM::AEK_NONE)
ARM_CPU_NAME("sc300", ARMV7M, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("cortex-m3", ARMV7M, FK_NONE, true, ARM::AEK_NONE)
ARM_CPU_NAME("cortex-m4", ARMV7EM, FK_FPV4_SP_D16, true, ARM::AEK_NONE)
ARM_CPU_NAME("cortex-m7", ARMV7EM, FK_FPV5_D16, false, ARM::AEK_NONE)
ARM_CPU_NAME("cortex-m23", ARMV8MBaseline, FK_NONE, false, ARM::AEK_NONE)
ARM_CPU_NAME("cortex-m33", ARMV8MMainline, FK_FPV5_SP_D16, false, ARM::AEK_DSP)
ARM_CPU_NAME("cortex-m35p", ARMV8MMainline, FK_FPV5_SP_D16, false, ARM::AEK_DSP)
ARM_CPU_NAME("cortex-m55", ARMV8_1MMainline, FK_FP_ARMV8_FULLFP16_D16, false,
             (ARM::AEK_DSP | ARM::AEK_SIMD | ARM::AEK_FP | ARM::AEK_FP16))
ARM_CPU_NAME("cortex-m85", ARMV8_1MMainline, FK_FP_ARMV8_FULLFP16_D16, false,
             (ARM::AEK_DSP | ARM::AEK_SIMD | ARM::AEK_FP | ARM::AEK_FP16 |
              ARM::AEK_RAS | ARM::AEK_PACBTI))
ARM_CPU_NAME("cortex-a32", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, ARM::AEK_CRC)
ARM_CPU_NAME("cortex-a35", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, ARM::AEK_CRC)
ARM_CPU_NAME("cortex-a53", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, ARM::AEK_CRC)
ARM_CPU_NAME("cortex-a55", ARMV8_2A, FK_CRYPTO_NEON_FP_ARMV8, false,
            (ARM::AEK_FP16 | ARM::AEK_DOTPROD))
ARM_CPU_NAME("cortex-a57", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, ARM::AEK_CRC)
ARM_CPU_NAME("cortex-a72", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, ARM::AEK_CRC)
ARM_CPU_NAME("cortex-a73", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, ARM::AEK_CRC)
ARM_CPU_NAME("cortex-a75", ARMV8_2A, FK_CRYPTO_NEON_FP_ARMV8, false,
            (ARM::AEK_FP16 | ARM::AEK_DOTPROD))
ARM_CPU_NAME("cortex-a76", ARMV8_2A, FK_CRYPTO_NEON_FP_ARMV8, false,
            (ARM::AEK_FP16 | ARM::AEK_DOTPROD))
ARM_CPU_NAME("cortex-a76ae", ARMV8_2A, FK_CRYPTO_NEON_FP_ARMV8, false,
            (ARM::AEK_FP16 | ARM::AEK_DOTPROD))
ARM_CPU_NAME("cortex-a77", ARMV8_2A, FK_CRYPTO_NEON_FP_ARMV8, false,
            (ARM::AEK_FP16 | ARM::AEK_DOTPROD))
ARM_CPU_NAME("cortex-a78", ARMV8_2A, FK_CRYPTO_NEON_FP_ARMV8, false,
             (ARM::AEK_FP16 | ARM::AEK_DOTPROD))
ARM_CPU_NAME("cortex-a78c", ARMV8_2A, FK_CRYPTO_NEON_FP_ARMV8, false,
             ARM::AEK_FP16 | ARM::AEK_DOTPROD)
ARM_CPU_NAME("cortex-a710", ARMV9A, FK_NEON_FP_ARMV8, false,
             (ARM::AEK_DOTPROD | ARM::AEK_FP16FML | ARM::AEK_BF16 | ARM::AEK_SB |
              ARM::AEK_I8MM))
ARM_CPU_NAME("cortex-x1", ARMV8_2A, FK_CRYPTO_NEON_FP_ARMV8, false,
             (ARM::AEK_FP16 | ARM::AEK_DOTPROD))
ARM_CPU_NAME("cortex-x1c", ARMV8_2A, FK_CRYPTO_NEON_FP_ARMV8, false,
             (ARM::AEK_FP16 | ARM::AEK_DOTPROD))
ARM_CPU_NAME("neoverse-n1", ARMV8_2A, FK_CRYPTO_NEON_FP_ARMV8, false,
             (ARM::AEK_FP16 | ARM::AEK_DOTPROD))
ARM_CPU_NAME("neoverse-n2", ARMV8_5A, FK_CRYPTO_NEON_FP_ARMV8, false,
             (ARM::AEK_BF16 | ARM::AEK_DOTPROD | ARM::AEK_I8MM | ARM::AEK_RAS |
              ARM::AEK_SB))
ARM_CPU_NAME("neoverse-v1", ARMV8_4A, FK_CRYPTO_NEON_FP_ARMV8, false,
             (ARM::AEK_RAS | ARM::AEK_FP16 | ARM::AEK_BF16 | ARM::AEK_DOTPROD))
ARM_CPU_NAME("cyclone", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, ARM::AEK_CRC)
ARM_CPU_NAME("exynos-m3", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, ARM::AEK_CRC)
ARM_CPU_NAME("exynos-m4", ARMV8_2A, FK_CRYPTO_NEON_FP_ARMV8, false,
             (ARM::AEK_FP16 | ARM::AEK_DOTPROD))
ARM_CPU_NAME("exynos-m5", ARMV8_2A, FK_CRYPTO_NEON_FP_ARMV8, false,
             (ARM::AEK_FP16 | ARM::AEK_DOTPROD))
ARM_CPU_NAME("kryo", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, ARM::AEK_CRC)
// Non-standard Arch names.
ARM_CPU_NAME("iwmmxt", IWMMXT, FK_NONE, true, ARM::AEK_NONE)
ARM_CPU_NAME("xscale", XSCALE, FK_NONE, true, ARM::AEK_NONE)
ARM_CPU_NAME("swift", ARMV7S, FK_NEON_VFPV4, true,
             (ARM::AEK_HWDIVARM | ARM::AEK_HWDIVTHUMB))
// Invalid CPU
ARM_CPU_NAME("invalid", INVALID, FK_INVALID, true, ARM::AEK_INVALID)
#undef ARM_CPU_NAME
PKiwFZN�{&*!*!TargetParser/ARMTargetParser.hnu�[���//===-- ARMTargetParser - Parser for ARM target features --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements a target parser to recognise ARM hardware features
// such as FPU/CPU/ARCH/extensions and specific support such as HWDIV.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TARGETPARSER_ARMTARGETPARSER_H
#define LLVM_TARGETPARSER_ARMTARGETPARSER_H

#include "llvm/ADT/StringRef.h"
#include "llvm/Support/ARMBuildAttributes.h"
#include "llvm/TargetParser/ARMTargetParserCommon.h"
#include <vector>

namespace llvm {

class Triple;

namespace ARM {

// Arch extension modifiers for CPUs.
// Note that this is not the same as the AArch64 list
enum ArchExtKind : uint64_t {
  AEK_INVALID =     0,
  AEK_NONE =        1,
  AEK_CRC =         1 << 1,
  AEK_CRYPTO =      1 << 2,
  AEK_FP =          1 << 3,
  AEK_HWDIVTHUMB =  1 << 4,
  AEK_HWDIVARM =    1 << 5,
  AEK_MP =          1 << 6,
  AEK_SIMD =        1 << 7,
  AEK_SEC =         1 << 8,
  AEK_VIRT =        1 << 9,
  AEK_DSP =         1 << 10,
  AEK_FP16 =        1 << 11,
  AEK_RAS =         1 << 12,
  AEK_DOTPROD =     1 << 13,
  AEK_SHA2    =     1 << 14,
  AEK_AES     =     1 << 15,
  AEK_FP16FML =     1 << 16,
  AEK_SB      =     1 << 17,
  AEK_FP_DP   =     1 << 18,
  AEK_LOB     =     1 << 19,
  AEK_BF16    =     1 << 20,
  AEK_I8MM    =     1 << 21,
  AEK_CDECP0 =      1 << 22,
  AEK_CDECP1 =      1 << 23,
  AEK_CDECP2 =      1 << 24,
  AEK_CDECP3 =      1 << 25,
  AEK_CDECP4 =      1 << 26,
  AEK_CDECP5 =      1 << 27,
  AEK_CDECP6 =      1 << 28,
  AEK_CDECP7 =      1 << 29,
  AEK_PACBTI =      1 << 30,
  // Unsupported extensions.
  AEK_OS       =    1ULL << 59,
  AEK_IWMMXT   =    1ULL << 60,
  AEK_IWMMXT2  =    1ULL << 61,
  AEK_MAVERICK =    1ULL << 62,
  AEK_XSCALE   =    1ULL << 63,
};

// List of Arch Extension names.
struct ExtName {
  StringRef Name;
  uint64_t ID;
  StringRef Feature;
  StringRef NegFeature;
};

const ExtName ARCHExtNames[] = {
#define ARM_ARCH_EXT_NAME(NAME, ID, FEATURE, NEGFEATURE)                       \
  {NAME, ID, FEATURE, NEGFEATURE},
#include "ARMTargetParser.def"
};

// List of HWDiv names (use getHWDivSynonym) and which architectural
// features they correspond to (use getHWDivFeatures).
const struct {
  StringRef Name;
  uint64_t ID;
} HWDivNames[] = {
#define ARM_HW_DIV_NAME(NAME, ID) {NAME, ID},
#include "ARMTargetParser.def"
};

// Arch names.
enum class ArchKind {
#define ARM_ARCH(NAME, ID, CPU_ATTR, ARCH_FEATURE, ARCH_ATTR, ARCH_FPU,        \
                 ARCH_BASE_EXT)                                                \
  ID,
#include "ARMTargetParser.def"
};

// List of CPU names and their arches.
// The same CPU can have multiple arches and can be default on multiple arches.
// When finding the Arch for a CPU, first-found prevails. Sort them accordingly.
// When this becomes table-generated, we'd probably need two tables.
struct CpuNames {
  StringRef Name;
  ArchKind ArchID;
  bool Default; // is $Name the default CPU for $ArchID ?
  uint64_t DefaultExtensions;
};

const CpuNames CPUNames[] = {
#define ARM_CPU_NAME(NAME, ID, DEFAULT_FPU, IS_DEFAULT, DEFAULT_EXT)           \
  {NAME, ARM::ArchKind::ID, IS_DEFAULT, DEFAULT_EXT},
#include "ARMTargetParser.def"
};

// FPU names.
enum FPUKind {
#define ARM_FPU(NAME, KIND, VERSION, NEON_SUPPORT, RESTRICTION) KIND,
#include "ARMTargetParser.def"
  FK_LAST
};

// FPU Version
enum class FPUVersion {
  NONE,
  VFPV2,
  VFPV3,
  VFPV3_FP16,
  VFPV4,
  VFPV5,
  VFPV5_FULLFP16,
};

// An FPU name restricts the FPU in one of three ways:
enum class FPURestriction {
  None = 0, ///< No restriction
  D16,      ///< Only 16 D registers
  SP_D16    ///< Only single-precision instructions, with 16 D registers
};

// An FPU name implies one of three levels of Neon support:
enum class NeonSupportLevel {
  None = 0, ///< No Neon
  Neon,     ///< Neon
  Crypto    ///< Neon with Crypto
};

// v6/v7/v8 Profile
enum class ProfileKind { INVALID = 0, A, R, M };

// List of canonical FPU names (use getFPUSynonym) and which architectural
// features they correspond to (use getFPUFeatures).
// The entries must appear in the order listed in ARM::FPUKind for correct
// indexing
struct FPUName {
  StringRef Name;
  FPUKind ID;
  FPUVersion FPUVer;
  NeonSupportLevel NeonSupport;
  FPURestriction Restriction;
};

static const FPUName FPUNames[] = {
#define ARM_FPU(NAME, KIND, VERSION, NEON_SUPPORT, RESTRICTION)                \
  {NAME, KIND, VERSION, NEON_SUPPORT, RESTRICTION},
#include "llvm/TargetParser/ARMTargetParser.def"
};

// List of canonical arch names (use getArchSynonym).
// This table also provides the build attribute fields for CPU arch
// and Arch ID, according to the Addenda to the ARM ABI, chapters
// 2.4 and 2.3.5.2 respectively.
// FIXME: SubArch values were simplified to fit into the expectations
// of the triples and are not conforming with their official names.
// Check to see if the expectation should be changed.
struct ArchNames {
  StringRef Name;
  StringRef CPUAttr; // CPU class in build attributes.
  StringRef ArchFeature;
  FPUKind DefaultFPU;
  uint64_t ArchBaseExtensions;
  ArchKind ID;
  ARMBuildAttrs::CPUArch ArchAttr; // Arch ID in build attributes.

  // Return ArchFeature without the leading "+".
  StringRef getSubArch() const { return ArchFeature.substr(1); }
};

static const ArchNames ARMArchNames[] = {
#define ARM_ARCH(NAME, ID, CPU_ATTR, ARCH_FEATURE, ARCH_ATTR, ARCH_FPU,        \
                 ARCH_BASE_EXT)                                                \
  {NAME,          CPU_ATTR,     ARCH_FEATURE, ARCH_FPU,                        \
   ARCH_BASE_EXT, ArchKind::ID, ARCH_ATTR},
#include "llvm/TargetParser/ARMTargetParser.def"
};

inline ArchKind &operator--(ArchKind &Kind) {
  assert((Kind >= ArchKind::ARMV8A && Kind <= ArchKind::ARMV9_3A) &&
         "We only expect operator-- to be called with ARMV8/V9");
  if (Kind == ArchKind::INVALID || Kind == ArchKind::ARMV8A ||
      Kind == ArchKind::ARMV8_1A || Kind == ArchKind::ARMV9A ||
      Kind == ArchKind::ARMV8R)
    Kind = ArchKind::INVALID;
  else {
    unsigned KindAsInteger = static_cast<unsigned>(Kind);
    Kind = static_cast<ArchKind>(--KindAsInteger);
  }
  return Kind;
}

// Information by ID
StringRef getFPUName(FPUKind FPUKind);
FPUVersion getFPUVersion(FPUKind FPUKind);
NeonSupportLevel getFPUNeonSupportLevel(FPUKind FPUKind);
FPURestriction getFPURestriction(FPUKind FPUKind);

bool getFPUFeatures(FPUKind FPUKind, std::vector<StringRef> &Features);
bool getHWDivFeatures(uint64_t HWDivKind, std::vector<StringRef> &Features);
bool getExtensionFeatures(uint64_t Extensions,
                          std::vector<StringRef> &Features);

StringRef getArchName(ArchKind AK);
unsigned getArchAttr(ArchKind AK);
StringRef getCPUAttr(ArchKind AK);
StringRef getSubArch(ArchKind AK);
StringRef getArchExtName(uint64_t ArchExtKind);
StringRef getArchExtFeature(StringRef ArchExt);
bool appendArchExtFeatures(StringRef CPU, ARM::ArchKind AK, StringRef ArchExt,
                           std::vector<StringRef> &Features,
                           FPUKind &ArgFPUKind);
ArchKind convertV9toV8(ArchKind AK);

// Information by Name
FPUKind getDefaultFPU(StringRef CPU, ArchKind AK);
uint64_t getDefaultExtensions(StringRef CPU, ArchKind AK);
StringRef getDefaultCPU(StringRef Arch);
StringRef getCanonicalArchName(StringRef Arch);
StringRef getFPUSynonym(StringRef FPU);

// Parser
uint64_t parseHWDiv(StringRef HWDiv);
FPUKind parseFPU(StringRef FPU);
ArchKind parseArch(StringRef Arch);
uint64_t parseArchExt(StringRef ArchExt);
ArchKind parseCPUArch(StringRef CPU);
ProfileKind parseArchProfile(StringRef Arch);
unsigned parseArchVersion(StringRef Arch);

void fillValidCPUArchList(SmallVectorImpl<StringRef> &Values);
StringRef computeDefaultTargetABI(const Triple &TT, StringRef CPU);

/// Get the (LLVM) name of the minimum ARM CPU for the arch we are targeting.
///
/// \param Arch the architecture name (e.g., "armv7s"). If it is an empty
/// string then the triple's arch name is used.
StringRef getARMCPUForArch(const llvm::Triple &Triple, StringRef MArch = {});

} // namespace ARM
} // namespace llvm

#endif
PKiwFZ2����TargetParser/X86TargetParser.hnu�[���//===-- X86TargetParser - Parser for X86 features ---------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements a target parser to recognise X86 hardware features.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TARGETPARSER_X86TARGETPARSER_H
#define LLVM_TARGETPARSER_X86TARGETPARSER_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringMap.h"

namespace llvm {
template <typename T> class SmallVectorImpl;
class StringRef;

namespace X86 {

// This should be kept in sync with libcc/compiler-rt as its included by clang
// as a proxy for what's in libgcc/compiler-rt.
enum ProcessorVendors : unsigned {
  VENDOR_DUMMY,
#define X86_VENDOR(ENUM, STRING) \
  ENUM,
#include "llvm/TargetParser/X86TargetParser.def"
  VENDOR_OTHER
};

// This should be kept in sync with libcc/compiler-rt as its included by clang
// as a proxy for what's in libgcc/compiler-rt.
enum ProcessorTypes : unsigned {
  CPU_TYPE_DUMMY,
#define X86_CPU_TYPE(ENUM, STRING) \
  ENUM,
#include "llvm/TargetParser/X86TargetParser.def"
  CPU_TYPE_MAX
};

// This should be kept in sync with libcc/compiler-rt as its included by clang
// as a proxy for what's in libgcc/compiler-rt.
enum ProcessorSubtypes : unsigned {
  CPU_SUBTYPE_DUMMY,
#define X86_CPU_SUBTYPE(ENUM, STRING) \
  ENUM,
#include "llvm/TargetParser/X86TargetParser.def"
  CPU_SUBTYPE_MAX
};

// This should be kept in sync with libcc/compiler-rt as it should be used
// by clang as a proxy for what's in libgcc/compiler-rt.
enum ProcessorFeatures {
#define X86_FEATURE(ENUM, STRING) FEATURE_##ENUM,
#include "llvm/TargetParser/X86TargetParser.def"
  CPU_FEATURE_MAX
};

enum CPUKind {
  CK_None,
  CK_i386,
  CK_i486,
  CK_WinChipC6,
  CK_WinChip2,
  CK_C3,
  CK_i586,
  CK_Pentium,
  CK_PentiumMMX,
  CK_PentiumPro,
  CK_i686,
  CK_Pentium2,
  CK_Pentium3,
  CK_PentiumM,
  CK_C3_2,
  CK_Yonah,
  CK_Pentium4,
  CK_Prescott,
  CK_Nocona,
  CK_Core2,
  CK_Penryn,
  CK_Bonnell,
  CK_Silvermont,
  CK_Goldmont,
  CK_GoldmontPlus,
  CK_Tremont,
  CK_Nehalem,
  CK_Westmere,
  CK_SandyBridge,
  CK_IvyBridge,
  CK_Haswell,
  CK_Broadwell,
  CK_SkylakeClient,
  CK_SkylakeServer,
  CK_Cascadelake,
  CK_Cooperlake,
  CK_Cannonlake,
  CK_IcelakeClient,
  CK_Rocketlake,
  CK_IcelakeServer,
  CK_Tigerlake,
  CK_SapphireRapids,
  CK_Alderlake,
  CK_Raptorlake,
  CK_Meteorlake,
  CK_Sierraforest,
  CK_Grandridge,
  CK_Graniterapids,
  CK_GraniterapidsD,
  CK_Emeraldrapids,
  CK_KNL,
  CK_KNM,
  CK_Lakemont,
  CK_K6,
  CK_K6_2,
  CK_K6_3,
  CK_Athlon,
  CK_AthlonXP,
  CK_K8,
  CK_K8SSE3,
  CK_AMDFAM10,
  CK_BTVER1,
  CK_BTVER2,
  CK_BDVER1,
  CK_BDVER2,
  CK_BDVER3,
  CK_BDVER4,
  CK_ZNVER1,
  CK_ZNVER2,
  CK_ZNVER3,
  CK_ZNVER4,
  CK_x86_64,
  CK_x86_64_v2,
  CK_x86_64_v3,
  CK_x86_64_v4,
  CK_Geode,
};

/// Parse \p CPU string into a CPUKind. Will only accept 64-bit capable CPUs if
/// \p Only64Bit is true.
CPUKind parseArchX86(StringRef CPU, bool Only64Bit = false);
CPUKind parseTuneCPU(StringRef CPU, bool Only64Bit = false);

/// Provide a list of valid CPU names. If \p Only64Bit is true, the list will
/// only contain 64-bit capable CPUs.
void fillValidCPUArchList(SmallVectorImpl<StringRef> &Values,
                          bool Only64Bit = false);
/// Provide a list of valid -mtune names.
void fillValidTuneCPUList(SmallVectorImpl<StringRef> &Values,
                          bool Only64Bit = false);

/// Get the key feature prioritizing target multiversioning.
ProcessorFeatures getKeyFeature(CPUKind Kind);

/// Fill in the features that \p CPU supports into \p Features.
/// "+" will be append in front of each feature if IfNeedPlus is true.
void getFeaturesForCPU(StringRef CPU, SmallVectorImpl<StringRef> &Features,
                       bool IfNeedPlus = false);

/// Set or clear entries in \p Features that are implied to be enabled/disabled
/// by the provided \p Feature.
void updateImpliedFeatures(StringRef Feature, bool Enabled,
                           StringMap<bool> &Features);

char getCPUDispatchMangling(StringRef Name);
bool validateCPUSpecificCPUDispatch(StringRef Name);
uint64_t getCpuSupportsMask(ArrayRef<StringRef> FeatureStrs);
unsigned getFeaturePriority(ProcessorFeatures Feat);

} // namespace X86
} // namespace llvm

#endif
PKiwFZ����,�, TargetParser/X86TargetParser.defnu�[���//===- X86TargetParser.def - X86 target parsing defines ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file provides defines to build up the X86 target parser's logic.
//
//===----------------------------------------------------------------------===//

// NOTE: NO INCLUDE GUARD DESIRED!

#ifndef X86_VENDOR
#define X86_VENDOR(ENUM, STR)
#endif
X86_VENDOR(VENDOR_INTEL, "intel")
X86_VENDOR(VENDOR_AMD,   "amd")
#undef X86_VENDOR

// This macro is used for cpu types present in compiler-rt/libgcc.
#ifndef X86_CPU_TYPE
#define X86_CPU_TYPE(ENUM, STR)
#endif

#ifndef X86_CPU_TYPE_ALIAS
#define X86_CPU_TYPE_ALIAS(ENUM, STR)
#endif

// This list must match what is implemented in libgcc and compilert-rt. Clang
// uses this to know how to implement __builtin_cpu_is.
X86_CPU_TYPE(INTEL_BONNELL,       "bonnell")
X86_CPU_TYPE(INTEL_CORE2,         "core2")
X86_CPU_TYPE(INTEL_COREI7,        "corei7")
X86_CPU_TYPE(AMDFAM10H,           "amdfam10h")
X86_CPU_TYPE(AMDFAM15H,           "amdfam15h")
X86_CPU_TYPE(INTEL_SILVERMONT,    "silvermont")
X86_CPU_TYPE(INTEL_KNL,           "knl")
X86_CPU_TYPE(AMD_BTVER1,          "btver1")
X86_CPU_TYPE(AMD_BTVER2,          "btver2")
X86_CPU_TYPE(AMDFAM17H,           "amdfam17h")
X86_CPU_TYPE(INTEL_KNM,           "knm")
X86_CPU_TYPE(INTEL_GOLDMONT,      "goldmont")
X86_CPU_TYPE(INTEL_GOLDMONT_PLUS, "goldmont-plus")
X86_CPU_TYPE(INTEL_TREMONT,       "tremont")
X86_CPU_TYPE(AMDFAM19H,           "amdfam19h")
X86_CPU_TYPE(ZHAOXIN_FAM7H,       "zhaoxin_fam7h")
X86_CPU_TYPE(INTEL_SIERRAFOREST,  "sierraforest")
X86_CPU_TYPE(INTEL_GRANDRIDGE,    "grandridge")

// Alternate names supported by __builtin_cpu_is and target multiversioning.
X86_CPU_TYPE_ALIAS(INTEL_BONNELL,    "atom")
X86_CPU_TYPE_ALIAS(AMDFAM10H,        "amdfam10")
X86_CPU_TYPE_ALIAS(AMDFAM15H,        "amdfam15")
X86_CPU_TYPE_ALIAS(INTEL_SILVERMONT, "slm")

#undef X86_CPU_TYPE_ALIAS
#undef X86_CPU_TYPE

// This macro is used for cpu subtypes present in compiler-rt/libgcc.
#ifndef X86_CPU_SUBTYPE
#define X86_CPU_SUBTYPE(ENUM, STR)
#endif

#ifndef X86_CPU_SUBTYPE_ALIAS
#define X86_CPU_SUBTYPE_ALIAS(ENUM, STR)
#endif

// This list must match what is implemented in libgcc and compilert-rt. Clang
// uses this to know how to implement __builtin_cpu_is.
X86_CPU_SUBTYPE(INTEL_COREI7_NEHALEM,        "nehalem")
X86_CPU_SUBTYPE(INTEL_COREI7_WESTMERE,       "westmere")
X86_CPU_SUBTYPE(INTEL_COREI7_SANDYBRIDGE,    "sandybridge")
X86_CPU_SUBTYPE(AMDFAM10H_BARCELONA,         "barcelona")
X86_CPU_SUBTYPE(AMDFAM10H_SHANGHAI,          "shanghai")
X86_CPU_SUBTYPE(AMDFAM10H_ISTANBUL,          "istanbul")
X86_CPU_SUBTYPE(AMDFAM15H_BDVER1,            "bdver1")
X86_CPU_SUBTYPE(AMDFAM15H_BDVER2,            "bdver2")
X86_CPU_SUBTYPE(AMDFAM15H_BDVER3,            "bdver3")
X86_CPU_SUBTYPE(AMDFAM15H_BDVER4,            "bdver4")
X86_CPU_SUBTYPE(AMDFAM17H_ZNVER1,            "znver1")
X86_CPU_SUBTYPE(INTEL_COREI7_IVYBRIDGE,      "ivybridge")
X86_CPU_SUBTYPE(INTEL_COREI7_HASWELL,        "haswell")
X86_CPU_SUBTYPE(INTEL_COREI7_BROADWELL,      "broadwell")
X86_CPU_SUBTYPE(INTEL_COREI7_SKYLAKE,        "skylake")
X86_CPU_SUBTYPE(INTEL_COREI7_SKYLAKE_AVX512, "skylake-avx512")
X86_CPU_SUBTYPE(INTEL_COREI7_CANNONLAKE,     "cannonlake")
X86_CPU_SUBTYPE(INTEL_COREI7_ICELAKE_CLIENT, "icelake-client")
X86_CPU_SUBTYPE(INTEL_COREI7_ICELAKE_SERVER, "icelake-server")
X86_CPU_SUBTYPE(AMDFAM17H_ZNVER2,            "znver2")
X86_CPU_SUBTYPE(INTEL_COREI7_CASCADELAKE,    "cascadelake")
X86_CPU_SUBTYPE(INTEL_COREI7_TIGERLAKE,      "tigerlake")
X86_CPU_SUBTYPE(INTEL_COREI7_COOPERLAKE,     "cooperlake")
X86_CPU_SUBTYPE(INTEL_COREI7_SAPPHIRERAPIDS, "sapphirerapids")
X86_CPU_SUBTYPE(INTEL_COREI7_ALDERLAKE,      "alderlake")
X86_CPU_SUBTYPE(AMDFAM19H_ZNVER3,            "znver3")
X86_CPU_SUBTYPE(INTEL_COREI7_ROCKETLAKE,     "rocketlake")
X86_CPU_SUBTYPE(ZHAOXIN_FAM7H_LUJIAZUI,      "zhaoxin_fam7h_lujiazui")
X86_CPU_SUBTYPE(AMDFAM19H_ZNVER4,            "znver4")
X86_CPU_SUBTYPE(INTEL_COREI7_GRANITERAPIDS,  "graniterapids")
X86_CPU_SUBTYPE(INTEL_COREI7_GRANITERAPIDS_D,"graniterapids-d")

// Alternate names supported by __builtin_cpu_is and target multiversioning.
X86_CPU_SUBTYPE_ALIAS(INTEL_COREI7_ALDERLAKE, "raptorlake")
X86_CPU_SUBTYPE_ALIAS(INTEL_COREI7_ALDERLAKE, "meteorlake")
X86_CPU_SUBTYPE_ALIAS(INTEL_COREI7_SAPPHIRERAPIDS, "emeraldrapids")

#undef X86_CPU_SUBTYPE_ALIAS
#undef X86_CPU_SUBTYPE

// This macro is used for cpu types present in compiler-rt/libgcc. The third
// parameter PRIORITY is as required by the attribute 'target' checking. Note
// that not all are supported/prioritized by GCC, so synchronization with GCC's
// implementation may require changing some existing values.
//
// We cannot just re-sort the list though because its order is dictated by the
// order of bits in CodeGenFunction::GetX86CpuSupportsMask.
#ifndef X86_FEATURE_COMPAT
#define X86_FEATURE_COMPAT(ENUM, STR, PRIORITY) X86_FEATURE(ENUM, STR)
#endif

#ifndef X86_FEATURE
#define X86_FEATURE(ENUM, STR)
#endif

X86_FEATURE_COMPAT(CMOV,            "cmov",                  0)
X86_FEATURE_COMPAT(MMX,             "mmx",                   1)
X86_FEATURE_COMPAT(POPCNT,          "popcnt",                9)
X86_FEATURE_COMPAT(SSE,             "sse",                   2)
X86_FEATURE_COMPAT(SSE2,            "sse2",                  3)
X86_FEATURE_COMPAT(SSE3,            "sse3",                  4)
X86_FEATURE_COMPAT(SSSE3,           "ssse3",                 5)
X86_FEATURE_COMPAT(SSE4_1,          "sse4.1",                7)
X86_FEATURE_COMPAT(SSE4_2,          "sse4.2",                8)
X86_FEATURE_COMPAT(AVX,             "avx",                   12)
X86_FEATURE_COMPAT(AVX2,            "avx2",                  18)
X86_FEATURE_COMPAT(SSE4_A,          "sse4a",                 6)
X86_FEATURE_COMPAT(FMA4,            "fma4",                  14)
X86_FEATURE_COMPAT(XOP,             "xop",                   15)
X86_FEATURE_COMPAT(FMA,             "fma",                   16)
X86_FEATURE_COMPAT(AVX512F,         "avx512f",               19)
X86_FEATURE_COMPAT(BMI,             "bmi",                   13)
X86_FEATURE_COMPAT(BMI2,            "bmi2",                  17)
X86_FEATURE_COMPAT(AES,             "aes",                   10)
X86_FEATURE_COMPAT(PCLMUL,          "pclmul",                11)
X86_FEATURE_COMPAT(AVX512VL,        "avx512vl",              20)
X86_FEATURE_COMPAT(AVX512BW,        "avx512bw",              21)
X86_FEATURE_COMPAT(AVX512DQ,        "avx512dq",              22)
X86_FEATURE_COMPAT(AVX512CD,        "avx512cd",              23)
X86_FEATURE_COMPAT(AVX512ER,        "avx512er",              24)
X86_FEATURE_COMPAT(AVX512PF,        "avx512pf",              25)
X86_FEATURE_COMPAT(AVX512VBMI,      "avx512vbmi",            26)
X86_FEATURE_COMPAT(AVX512IFMA,      "avx512ifma",            27)
X86_FEATURE_COMPAT(AVX5124VNNIW,    "avx5124vnniw",          28)
X86_FEATURE_COMPAT(AVX5124FMAPS,    "avx5124fmaps",          29)
X86_FEATURE_COMPAT(AVX512VPOPCNTDQ, "avx512vpopcntdq",       30)
X86_FEATURE_COMPAT(AVX512VBMI2,     "avx512vbmi2",           31)
X86_FEATURE_COMPAT(GFNI,            "gfni",                  32)
X86_FEATURE_COMPAT(VPCLMULQDQ,      "vpclmulqdq",            33)
X86_FEATURE_COMPAT(AVX512VNNI,      "avx512vnni",            34)
X86_FEATURE_COMPAT(AVX512BITALG,    "avx512bitalg",          35)
X86_FEATURE_COMPAT(AVX512BF16,      "avx512bf16",            36)
X86_FEATURE_COMPAT(AVX512VP2INTERSECT, "avx512vp2intersect", 37)
// Features below here are not in libgcc/compiler-rt.
X86_FEATURE       (3DNOW,           "3dnow")
X86_FEATURE       (3DNOWA,          "3dnowa")
X86_FEATURE       (64BIT,           "64bit")
X86_FEATURE       (ADX,             "adx")
X86_FEATURE       (AMX_BF16,        "amx-bf16")
X86_FEATURE       (AMX_COMPLEX,     "amx-complex")
X86_FEATURE       (AMX_INT8,        "amx-int8")
X86_FEATURE       (AMX_TILE,        "amx-tile")
X86_FEATURE       (CLDEMOTE,        "cldemote")
X86_FEATURE       (CLFLUSHOPT,      "clflushopt")
X86_FEATURE       (CLWB,            "clwb")
X86_FEATURE       (CLZERO,          "clzero")
X86_FEATURE       (CMPXCHG16B,      "cx16")
X86_FEATURE       (CMPXCHG8B,       "cx8")
X86_FEATURE       (CRC32,           "crc32")
X86_FEATURE       (ENQCMD,          "enqcmd")
X86_FEATURE       (F16C,            "f16c")
X86_FEATURE       (FSGSBASE,        "fsgsbase")
X86_FEATURE       (FXSR,            "fxsr")
X86_FEATURE       (INVPCID,         "invpcid")
X86_FEATURE       (KL,              "kl")
X86_FEATURE       (WIDEKL,          "widekl")
X86_FEATURE       (LWP,             "lwp")
X86_FEATURE       (LZCNT,           "lzcnt")
X86_FEATURE       (MOVBE,           "movbe")
X86_FEATURE       (MOVDIR64B,       "movdir64b")
X86_FEATURE       (MOVDIRI,         "movdiri")
X86_FEATURE       (MWAITX,          "mwaitx")
X86_FEATURE       (PCONFIG,         "pconfig")
X86_FEATURE       (PKU,             "pku")
X86_FEATURE       (PREFETCHI,       "prefetchi")
X86_FEATURE       (PREFETCHWT1,     "prefetchwt1")
X86_FEATURE       (PRFCHW,          "prfchw")
X86_FEATURE       (PTWRITE,         "ptwrite")
X86_FEATURE       (RDPID,           "rdpid")
X86_FEATURE       (RDPRU,           "rdpru")
X86_FEATURE       (RDRND,           "rdrnd")
X86_FEATURE       (RDSEED,          "rdseed")
X86_FEATURE       (RTM,             "rtm")
X86_FEATURE       (SAHF,            "sahf")
X86_FEATURE       (SERIALIZE,       "serialize")
X86_FEATURE       (SGX,             "sgx")
X86_FEATURE       (SHA,             "sha")
X86_FEATURE       (SHSTK,           "shstk")
X86_FEATURE       (TBM,             "tbm")
X86_FEATURE       (TSXLDTRK,        "tsxldtrk")
X86_FEATURE       (UINTR,           "uintr")
X86_FEATURE       (VAES,            "vaes")
X86_FEATURE       (VZEROUPPER,      "vzeroupper")
X86_FEATURE       (WAITPKG,         "waitpkg")
X86_FEATURE       (WBNOINVD,        "wbnoinvd")
X86_FEATURE       (X87,             "x87")
X86_FEATURE       (XSAVE,           "xsave")
X86_FEATURE       (XSAVEC,          "xsavec")
X86_FEATURE       (XSAVEOPT,        "xsaveopt")
X86_FEATURE       (XSAVES,          "xsaves")
X86_FEATURE       (HRESET,          "hreset")
X86_FEATURE       (RAOINT,          "raoint")
X86_FEATURE       (AVX512FP16,      "avx512fp16")
X86_FEATURE       (AMX_FP16,        "amx-fp16")
X86_FEATURE       (CMPCCXADD,       "cmpccxadd")
X86_FEATURE       (AVXNECONVERT,    "avxneconvert")
X86_FEATURE       (AVXVNNI,         "avxvnni")
X86_FEATURE       (AVXIFMA,         "avxifma")
X86_FEATURE       (AVXVNNIINT8,     "avxvnniint8")
X86_FEATURE       (SHA512,          "sha512")
X86_FEATURE       (SM3,             "sm3")
X86_FEATURE       (SM4,             "sm4")
X86_FEATURE       (AVXVNNIINT16,    "avxvnniint16")
// These features aren't really CPU features, but the frontend can set them.
X86_FEATURE       (RETPOLINE_EXTERNAL_THUNK,    "retpoline-external-thunk")
X86_FEATURE       (RETPOLINE_INDIRECT_BRANCHES, "retpoline-indirect-branches")
X86_FEATURE       (RETPOLINE_INDIRECT_CALLS,    "retpoline-indirect-calls")
X86_FEATURE       (LVI_CFI,                     "lvi-cfi")
X86_FEATURE       (LVI_LOAD_HARDENING,          "lvi-load-hardening")
#undef X86_FEATURE_COMPAT
#undef X86_FEATURE
PKiwFZ~-!q!q"TargetParser/AArch64TargetParser.hnu�[���//===-- AArch64TargetParser - Parser for AArch64 features -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements a target parser to recognise AArch64 hardware features
// such as FPU/CPU/ARCH and extension names.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TARGETPARSER_AARCH64TARGETPARSER_H
#define LLVM_TARGETPARSER_AARCH64TARGETPARSER_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/VersionTuple.h"
#include <array>
#include <vector>

namespace llvm {

class Triple;

namespace AArch64 {
// Function Multi Versioning CPU features. They must be kept in sync with
// compiler-rt enum CPUFeatures in lib/builtins/cpu_model.c with FEAT_MAX as
// sentinel.
enum CPUFeatures {
  FEAT_RNG,
  FEAT_FLAGM,
  FEAT_FLAGM2,
  FEAT_FP16FML,
  FEAT_DOTPROD,
  FEAT_SM4,
  FEAT_RDM,
  FEAT_LSE,
  FEAT_FP,
  FEAT_SIMD,
  FEAT_CRC,
  FEAT_SHA1,
  FEAT_SHA2,
  FEAT_SHA3,
  FEAT_AES,
  FEAT_PMULL,
  FEAT_FP16,
  FEAT_DIT,
  FEAT_DPB,
  FEAT_DPB2,
  FEAT_JSCVT,
  FEAT_FCMA,
  FEAT_RCPC,
  FEAT_RCPC2,
  FEAT_FRINTTS,
  FEAT_DGH,
  FEAT_I8MM,
  FEAT_BF16,
  FEAT_EBF16,
  FEAT_RPRES,
  FEAT_SVE,
  FEAT_SVE_BF16,
  FEAT_SVE_EBF16,
  FEAT_SVE_I8MM,
  FEAT_SVE_F32MM,
  FEAT_SVE_F64MM,
  FEAT_SVE2,
  FEAT_SVE_AES,
  FEAT_SVE_PMULL128,
  FEAT_SVE_BITPERM,
  FEAT_SVE_SHA3,
  FEAT_SVE_SM4,
  FEAT_SME,
  FEAT_MEMTAG,
  FEAT_MEMTAG2,
  FEAT_MEMTAG3,
  FEAT_SB,
  FEAT_PREDRES,
  FEAT_SSBS,
  FEAT_SSBS2,
  FEAT_BTI,
  FEAT_LS64,
  FEAT_LS64_V,
  FEAT_LS64_ACCDATA,
  FEAT_WFXT,
  FEAT_SME_F64,
  FEAT_SME_I64,
  FEAT_SME2,
  FEAT_MAX
};

static_assert(FEAT_MAX <= 64,
              "CPUFeatures enum must not have more than 64 entries");

// Arch extension modifiers for CPUs. These are labelled with their Arm ARM
// feature name (though the canonical reference for those is AArch64.td)
// clang-format off
enum ArchExtKind : uint64_t {
  AEK_NONE =        1,
  AEK_CRC =         1 << 1,  // FEAT_CRC32
  AEK_CRYPTO =      1 << 2,
  AEK_FP =          1 << 3,  // FEAT_FP
  AEK_SIMD =        1 << 4,  // FEAT_AdvSIMD
  AEK_FP16 =        1 << 5,  // FEAT_FP16
  AEK_PROFILE =     1 << 6,  // FEAT_SPE
  AEK_RAS =         1 << 7,  // FEAT_RAS, FEAT_RASv1p1
  AEK_LSE =         1 << 8,  // FEAT_LSE
  AEK_SVE =         1 << 9,  // FEAT_SVE
  AEK_DOTPROD =     1 << 10, // FEAT_DotProd
  AEK_RCPC =        1 << 11, // FEAT_LRCPC
  AEK_RDM =         1 << 12, // FEAT_RDM
  AEK_SM4 =         1 << 13, // FEAT_SM4, FEAT_SM3
  AEK_SHA3 =        1 << 14, // FEAT_SHA3, FEAT_SHA512
  AEK_SHA2 =        1 << 15, // FEAT_SHA1, FEAT_SHA256
  AEK_AES =         1 << 16, // FEAT_AES, FEAT_PMULL
  AEK_FP16FML =     1 << 17, // FEAT_FHM
  AEK_RAND =        1 << 18, // FEAT_RNG
  AEK_MTE =         1 << 19, // FEAT_MTE, FEAT_MTE2
  AEK_SSBS =        1 << 20, // FEAT_SSBS, FEAT_SSBS2
  AEK_SB =          1 << 21, // FEAT_SB
  AEK_PREDRES =     1 << 22, // FEAT_SPECRES
  AEK_SVE2 =        1 << 23, // FEAT_SVE2
  AEK_SVE2AES =     1 << 24, // FEAT_SVE_AES, FEAT_SVE_PMULL128
  AEK_SVE2SM4 =     1 << 25, // FEAT_SVE_SM4
  AEK_SVE2SHA3 =    1 << 26, // FEAT_SVE_SHA3
  AEK_SVE2BITPERM = 1 << 27, // FEAT_SVE_BitPerm
  AEK_TME =         1 << 28, // FEAT_TME
  AEK_BF16 =        1 << 29, // FEAT_BF16
  AEK_I8MM =        1 << 30, // FEAT_I8MM
  AEK_F32MM =       1ULL << 31, // FEAT_F32MM
  AEK_F64MM =       1ULL << 32, // FEAT_F64MM
  AEK_LS64 =        1ULL << 33, // FEAT_LS64, FEAT_LS64_V, FEAT_LS64_ACCDATA
  AEK_BRBE =        1ULL << 34, // FEAT_BRBE
  AEK_PAUTH =       1ULL << 35, // FEAT_PAuth
  AEK_FLAGM =       1ULL << 36, // FEAT_FlagM
  AEK_SME =         1ULL << 37, // FEAT_SME
  AEK_SMEF64F64 =   1ULL << 38, // FEAT_SME_F64F64
  AEK_SMEI16I64 =   1ULL << 39, // FEAT_SME_I16I64
  AEK_HBC =         1ULL << 40, // FEAT_HBC
  AEK_MOPS =        1ULL << 41, // FEAT_MOPS
  AEK_PERFMON =     1ULL << 42, // FEAT_PMUv3
  AEK_SME2 =        1ULL << 43, // FEAT_SME2
  AEK_SVE2p1 =      1ULL << 44, // FEAT_SVE2p1
  AEK_SME2p1 =      1ULL << 45, // FEAT_SME2p1
  AEK_B16B16 =      1ULL << 46, // FEAT_B16B16
  AEK_SMEF16F16 =   1ULL << 47, // FEAT_SMEF16F16
  AEK_CSSC =        1ULL << 48, // FEAT_CSSC
  AEK_RCPC3 =       1ULL << 49, // FEAT_LRCPC3
  AEK_THE =         1ULL << 50, // FEAT_THE
  AEK_D128 =        1ULL << 51, // FEAT_D128
  AEK_LSE128 =      1ULL << 52, // FEAT_LSE128
  AEK_SPECRES2 =    1ULL << 53, // FEAT_SPECRES2
  AEK_RASv2 =       1ULL << 54, // FEAT_RASv2
  AEK_ITE =         1ULL << 55, // FEAT_ITE
  AEK_GCS =         1ULL << 56, // FEAT_GCS
};
// clang-format on

// Represents an extension that can be enabled with -march=<arch>+<extension>.
// Typically these correspond to Arm Architecture extensions, unlike
// SubtargetFeature which may represent either an actual extension or some
// internal LLVM property.
struct ExtensionInfo {
  StringRef Name;              // Human readable name, e.g. "profile".
  ArchExtKind ID;              // Corresponding to the ArchExtKind, this
                               // extensions representation in the bitfield.
  StringRef Feature;           // -mattr enable string, e.g. "+spe"
  StringRef NegFeature;        // -mattr disable string, e.g. "-spe"
  CPUFeatures CPUFeature;      // Function Multi Versioning (FMV) bitfield value
                               // set in __aarch64_cpu_features
  StringRef DependentFeatures; // FMV enabled features string,
                               // e.g. "+dotprod,+fp-armv8,+neon"
  unsigned FmvPriority;        // FMV feature priority
  static constexpr unsigned MaxFMVPriority =
      1000; // Maximum priority for FMV feature
};

// NOTE: If adding a new extension here, consider adding it to ExtensionMap
// in AArch64AsmParser too, if supported as an extension name by binutils.
// clang-format off
inline constexpr ExtensionInfo Extensions[] = {
    {"aes", AArch64::AEK_AES, "+aes", "-aes", FEAT_AES, "+fp-armv8,+neon", 150},
    {"b16b16", AArch64::AEK_B16B16, "+b16b16", "-b16b16", FEAT_MAX, "", 0},
    {"bf16", AArch64::AEK_BF16, "+bf16", "-bf16", FEAT_BF16, "+bf16", 280},
    {"brbe", AArch64::AEK_BRBE, "+brbe", "-brbe", FEAT_MAX, "", 0},
    {"bti", AArch64::AEK_NONE, {}, {}, FEAT_BTI, "+bti", 510},
    {"crc", AArch64::AEK_CRC, "+crc", "-crc", FEAT_CRC, "+crc", 110},
    {"crypto", AArch64::AEK_CRYPTO, "+crypto", "-crypto", FEAT_MAX, "+aes,+sha2", 0},
    {"cssc", AArch64::AEK_CSSC, "+cssc", "-cssc", FEAT_MAX, "", 0},
    {"d128", AArch64::AEK_D128, "+d128", "-d128", FEAT_MAX, "", 0},
    {"dgh", AArch64::AEK_NONE, {}, {}, FEAT_DGH, "", 260},
    {"dit", AArch64::AEK_NONE, {}, {}, FEAT_DIT, "+dit", 180},
    {"dotprod", AArch64::AEK_DOTPROD, "+dotprod", "-dotprod", FEAT_DOTPROD, "+dotprod,+fp-armv8,+neon", 50},
    {"dpb", AArch64::AEK_NONE, {}, {}, FEAT_DPB, "+ccpp", 190},
    {"dpb2", AArch64::AEK_NONE, {}, {}, FEAT_DPB2, "+ccpp,+ccdp", 200},
    {"ebf16", AArch64::AEK_NONE, {}, {}, FEAT_EBF16, "+bf16", 290},
    {"f32mm", AArch64::AEK_F32MM, "+f32mm", "-f32mm", FEAT_SVE_F32MM, "+sve,+f32mm,+fullfp16,+fp-armv8,+neon", 350},
    {"f64mm", AArch64::AEK_F64MM, "+f64mm", "-f64mm", FEAT_SVE_F64MM, "+sve,+f64mm,+fullfp16,+fp-armv8,+neon", 360},
    {"fcma", AArch64::AEK_NONE, {}, {}, FEAT_FCMA, "+fp-armv8,+neon,+complxnum", 220},
    {"flagm", AArch64::AEK_FLAGM, "+flagm", "-flagm", FEAT_FLAGM, "+flagm", 20},
    {"flagm2", AArch64::AEK_NONE, {}, {}, FEAT_FLAGM2, "+flagm,+altnzcv", 30},
    {"fp", AArch64::AEK_FP, "+fp-armv8", "-fp-armv8", FEAT_FP, "+fp-armv8,+neon", 90},
    {"fp16", AArch64::AEK_FP16, "+fullfp16", "-fullfp16", FEAT_FP16, "+fullfp16,+fp-armv8,+neon", 170},
    {"fp16fml", AArch64::AEK_FP16FML, "+fp16fml", "-fp16fml", FEAT_FP16FML, "+fp16fml,+fullfp16,+fp-armv8,+neon", 40},
    {"frintts", AArch64::AEK_NONE, {}, {}, FEAT_FRINTTS, "+fptoint", 250},
    {"hbc", AArch64::AEK_HBC, "+hbc", "-hbc", FEAT_MAX, "", 0},
    {"i8mm", AArch64::AEK_I8MM, "+i8mm", "-i8mm", FEAT_I8MM, "+i8mm", 270},
    {"ite", AArch64::AEK_ITE, "+ite", "-ite", FEAT_MAX, "", 0},
    {"jscvt", AArch64::AEK_NONE, {}, {}, FEAT_JSCVT, "+fp-armv8,+neon,+jsconv", 210},
    {"ls64_accdata", AArch64::AEK_NONE, {}, {}, FEAT_LS64_ACCDATA, "+ls64", 540},
    {"ls64_v", AArch64::AEK_NONE, {}, {}, FEAT_LS64_V, "", 530},
    {"ls64", AArch64::AEK_LS64, "+ls64", "-ls64", FEAT_LS64, "", 520},
    {"lse", AArch64::AEK_LSE, "+lse", "-lse", FEAT_LSE, "+lse", 80},
    {"lse128", AArch64::AEK_LSE128, "+lse128", "-lse128", FEAT_MAX, "", 0},
    {"memtag", AArch64::AEK_MTE, "+mte", "-mte", FEAT_MEMTAG, "", 440},
    {"memtag2", AArch64::AEK_NONE, {}, {}, FEAT_MEMTAG2, "+mte", 450},
    {"memtag3", AArch64::AEK_NONE, {}, {}, FEAT_MEMTAG3, "+mte", 460},
    {"mops", AArch64::AEK_MOPS, "+mops", "-mops", FEAT_MAX, "", 0},
    {"pauth", AArch64::AEK_PAUTH, "+pauth", "-pauth", FEAT_MAX, "", 0},
    {"pmull", AArch64::AEK_NONE, {}, {}, FEAT_PMULL, "+aes,+fp-armv8,+neon", 160},
    {"pmuv3", AArch64::AEK_PERFMON, "+perfmon", "-perfmon", FEAT_MAX, "", 0},
    {"predres", AArch64::AEK_PREDRES, "+predres", "-predres", FEAT_PREDRES, "+predres", 480},
    {"predres2", AArch64::AEK_SPECRES2, "+specres2", "-specres2", FEAT_MAX, "", 0},
    {"profile", AArch64::AEK_PROFILE, "+spe", "-spe", FEAT_MAX, "", 0},
    {"ras", AArch64::AEK_RAS, "+ras", "-ras", FEAT_MAX, "", 0},
    {"rasv2", AArch64::AEK_RASv2, "+rasv2", "-rasv2", FEAT_MAX, "", 0},
    {"rcpc", AArch64::AEK_RCPC, "+rcpc", "-rcpc", FEAT_RCPC, "+rcpc", 230},
    {"rcpc2", AArch64::AEK_NONE, {}, {}, FEAT_RCPC2, "+rcpc", 240},
    {"rcpc3", AArch64::AEK_RCPC3, "+rcpc3", "-rcpc3", FEAT_MAX, "", 0},
    {"rdm", AArch64::AEK_RDM, "+rdm", "-rdm", FEAT_RDM, "+rdm,+fp-armv8,+neon", 70},
    {"rng", AArch64::AEK_RAND, "+rand", "-rand", FEAT_RNG, "+rand", 10},
    {"rpres", AArch64::AEK_NONE, {}, {}, FEAT_RPRES, "", 300},
    {"sb", AArch64::AEK_SB, "+sb", "-sb", FEAT_SB, "+sb", 470},
    {"sha1", AArch64::AEK_NONE, {}, {}, FEAT_SHA1, "+fp-armv8,+neon", 120},
    {"sha2", AArch64::AEK_SHA2, "+sha2", "-sha2", FEAT_SHA2, "+sha2,+fp-armv8,+neon", 130},
    {"sha3", AArch64::AEK_SHA3, "+sha3", "-sha3", FEAT_SHA3, "+sha3,+sha2,+fp-armv8,+neon", 140},
    {"simd", AArch64::AEK_SIMD, "+neon", "-neon", FEAT_SIMD, "+fp-armv8,+neon", 100},
    {"sm4", AArch64::AEK_SM4, "+sm4", "-sm4", FEAT_SM4, "+sm4,+fp-armv8,+neon", 60},
    {"sme-f16f16", AArch64::AEK_SMEF16F16, "+sme-f16f16", "-sme-f16f16", FEAT_MAX, "", 0},
    {"sme-f64f64", AArch64::AEK_SMEF64F64, "+sme-f64f64", "-sme-f64f64", FEAT_SME_F64, "+sme,+sme-f64f64,+bf16", 560},
    {"sme-i16i64", AArch64::AEK_SMEI16I64, "+sme-i16i64", "-sme-i16i64", FEAT_SME_I64, "+sme,+sme-i16i64,+bf16", 570},
    {"sme", AArch64::AEK_SME, "+sme", "-sme", FEAT_SME, "+sme,+bf16", 430},
    {"sme2", AArch64::AEK_SME2, "+sme2", "-sme2", FEAT_SME2, "+sme2,+sme,+bf16", 580},
    {"sme2p1", AArch64::AEK_SME2p1, "+sme2p1", "-sme2p1", FEAT_MAX, "", 0},
    {"ssbs", AArch64::AEK_SSBS, "+ssbs", "-ssbs", FEAT_SSBS, "", 490},
    {"ssbs2", AArch64::AEK_NONE, {}, {}, FEAT_SSBS2, "+ssbs", 500},
    {"sve-bf16", AArch64::AEK_NONE, {}, {}, FEAT_SVE_BF16, "+sve,+bf16,+fullfp16,+fp-armv8,+neon", 320},
    {"sve-ebf16", AArch64::AEK_NONE, {}, {}, FEAT_SVE_EBF16, "+sve,+bf16,+fullfp16,+fp-armv8,+neon", 330},
    {"sve-i8mm", AArch64::AEK_NONE, {}, {}, FEAT_SVE_I8MM, "+sve,+i8mm,+fullfp16,+fp-armv8,+neon", 340},
    {"sve", AArch64::AEK_SVE, "+sve", "-sve", FEAT_SVE, "+sve,+fullfp16,+fp-armv8,+neon", 310},
    {"sve2-aes", AArch64::AEK_SVE2AES, "+sve2-aes", "-sve2-aes", FEAT_SVE_AES, "+sve2,+sve,+sve2-aes,+fullfp16,+fp-armv8,+neon", 380},
    {"sve2-bitperm", AArch64::AEK_SVE2BITPERM, "+sve2-bitperm", "-sve2-bitperm", FEAT_SVE_BITPERM, "+sve2,+sve,+sve2-bitperm,+fullfp16,+fp-armv8,+neon", 400},
    {"sve2-pmull128", AArch64::AEK_NONE, {}, {}, FEAT_SVE_PMULL128, "+sve2,+sve,+sve2-aes,+fullfp16,+fp-armv8,+neon", 390},
    {"sve2-sha3", AArch64::AEK_SVE2SHA3, "+sve2-sha3", "-sve2-sha3", FEAT_SVE_SHA3, "+sve2,+sve,+sve2-sha3,+fullfp16,+fp-armv8,+neon", 410},
    {"sve2-sm4", AArch64::AEK_SVE2SM4, "+sve2-sm4", "-sve2-sm4", FEAT_SVE_SM4, "+sve2,+sve,+sve2-sm4,+fullfp16,+fp-armv8,+neon", 420},
    {"sve2", AArch64::AEK_SVE2, "+sve2", "-sve2", FEAT_SVE2, "+sve2,+sve,+fullfp16,+fp-armv8,+neon", 370},
    {"sve2p1", AArch64::AEK_SVE2p1, "+sve2p1", "-sve2p1", FEAT_MAX, "+sve2p1,+sve2,+sve,+fullfp16,+fp-armv8,+neon", 0},
    {"the", AArch64::AEK_THE, "+the", "-the", FEAT_MAX, "", 0},
    {"tme", AArch64::AEK_TME, "+tme", "-tme", FEAT_MAX, "", 0},
    {"wfxt", AArch64::AEK_NONE, {}, {}, FEAT_WFXT, "+wfxt", 550},
    {"gcs", AArch64::AEK_GCS, "+gcs", "-gcs", FEAT_MAX, "", 0},
    // Special cases
    {"none", AArch64::AEK_NONE, {}, {}, FEAT_MAX, "", ExtensionInfo::MaxFMVPriority},
};
// clang-format on

enum ArchProfile { AProfile = 'A', RProfile = 'R', InvalidProfile = '?' };

// Information about a specific architecture, e.g. V8.1-A
struct ArchInfo {
  VersionTuple Version;  // Architecture version, major + minor.
  ArchProfile Profile;   // Architecuture profile
  StringRef Name;        // Human readable name, e.g. "armv8.1-a"
  StringRef ArchFeature; // Command line feature flag, e.g. +v8a
  uint64_t DefaultExts;  // bitfield of default extensions ArchExtKind

  bool operator==(const ArchInfo &Other) const {
    return this->Name == Other.Name;
  }
  bool operator!=(const ArchInfo &Other) const {
    return this->Name != Other.Name;
  }

  // Defines the following partial order, indicating when an architecture is
  // a superset of another:
  //
  //     v9.4a > v9.3a > v9.3a > v9.3a > v9a;
  //       v       v       v       v       v
  //     v8.9a > v8.8a > v8.7a > v8.6a > v8.5a > v8.4a > ... > v8a;
  //
  // v8r has no relation to anything. This is used to determine which
  // features to enable for a given architecture. See
  // AArch64TargetInfo::setFeatureEnabled.
  bool implies(const ArchInfo &Other) const {
    if (this->Profile != Other.Profile)
      return false; // ARMV8R
    if (this->Version.getMajor() == Other.Version.getMajor()) {
      return this->Version > Other.Version;
    }
    if (this->Version.getMajor() == 9 && Other.Version.getMajor() == 8) {
      assert(this->Version.getMinor() && Other.Version.getMinor() &&
             "AArch64::ArchInfo should have a minor version.");
      return this->Version.getMinor().value_or(0) + 5 >=
             Other.Version.getMinor().value_or(0);
    }
    return false;
  }

  // Return ArchFeature without the leading "+".
  StringRef getSubArch() const { return ArchFeature.substr(1); }

  // Search for ArchInfo by SubArch name
  static std::optional<ArchInfo> findBySubArch(StringRef SubArch);
};

// clang-format off
inline constexpr ArchInfo ARMV8A    = { VersionTuple{8, 0}, AProfile, "armv8-a", "+v8a", (AArch64::AEK_FP | AArch64::AEK_SIMD), };
inline constexpr ArchInfo ARMV8_1A  = { VersionTuple{8, 1}, AProfile, "armv8.1-a", "+v8.1a", (ARMV8A.DefaultExts | AArch64::AEK_CRC | AArch64::AEK_LSE | AArch64::AEK_RDM)};
inline constexpr ArchInfo ARMV8_2A  = { VersionTuple{8, 2}, AProfile, "armv8.2-a", "+v8.2a", (ARMV8_1A.DefaultExts | AArch64::AEK_RAS)};
inline constexpr ArchInfo ARMV8_3A  = { VersionTuple{8, 3}, AProfile, "armv8.3-a", "+v8.3a", (ARMV8_2A.DefaultExts | AArch64::AEK_RCPC)};
inline constexpr ArchInfo ARMV8_4A  = { VersionTuple{8, 4}, AProfile, "armv8.4-a", "+v8.4a", (ARMV8_3A.DefaultExts | AArch64::AEK_DOTPROD)};
inline constexpr ArchInfo ARMV8_5A  = { VersionTuple{8, 5}, AProfile, "armv8.5-a", "+v8.5a", (ARMV8_4A.DefaultExts)};
inline constexpr ArchInfo ARMV8_6A  = { VersionTuple{8, 6}, AProfile, "armv8.6-a", "+v8.6a", (ARMV8_5A.DefaultExts | AArch64::AEK_BF16 | AArch64::AEK_I8MM)};
inline constexpr ArchInfo ARMV8_7A  = { VersionTuple{8, 7}, AProfile, "armv8.7-a", "+v8.7a", (ARMV8_6A.DefaultExts)};
inline constexpr ArchInfo ARMV8_8A  = { VersionTuple{8, 8}, AProfile, "armv8.8-a", "+v8.8a", (ARMV8_7A.DefaultExts | AArch64::AEK_MOPS | AArch64::AEK_HBC)};
inline constexpr ArchInfo ARMV8_9A  = { VersionTuple{8, 9}, AProfile, "armv8.9-a", "+v8.9a", (ARMV8_8A.DefaultExts | AArch64::AEK_SPECRES2 | AArch64::AEK_CSSC | AArch64::AEK_RASv2)};
inline constexpr ArchInfo ARMV9A    = { VersionTuple{9, 0}, AProfile, "armv9-a", "+v9a", (ARMV8_5A.DefaultExts | AArch64::AEK_FP16 | AArch64::AEK_SVE | AArch64::AEK_SVE2)};
inline constexpr ArchInfo ARMV9_1A  = { VersionTuple{9, 1}, AProfile, "armv9.1-a", "+v9.1a", (ARMV9A.DefaultExts | AArch64::AEK_BF16 | AArch64::AEK_I8MM)};
inline constexpr ArchInfo ARMV9_2A  = { VersionTuple{9, 2}, AProfile, "armv9.2-a", "+v9.2a", (ARMV9_1A.DefaultExts)};
inline constexpr ArchInfo ARMV9_3A  = { VersionTuple{9, 3}, AProfile, "armv9.3-a", "+v9.3a", (ARMV9_2A.DefaultExts | AArch64::AEK_MOPS | AArch64::AEK_HBC)};
inline constexpr ArchInfo ARMV9_4A  = { VersionTuple{9, 4}, AProfile, "armv9.4-a", "+v9.4a", (ARMV9_3A.DefaultExts | AArch64::AEK_SPECRES2 | AArch64::AEK_CSSC | AArch64::AEK_RASv2)};
// For v8-R, we do not enable crypto and align with GCC that enables a more minimal set of optional architecture extensions.
inline constexpr ArchInfo ARMV8R    = { VersionTuple{8, 0}, RProfile, "armv8-r", "+v8r", ((ARMV8_5A.DefaultExts ^ AArch64::AEK_LSE) | AArch64::AEK_SSBS | AArch64::AEK_FP16 | AArch64::AEK_FP16FML | AArch64::AEK_SB), };
// clang-format on

// The set of all architectures
static constexpr std::array<const ArchInfo *, 16> ArchInfos = {
    &ARMV8A,   &ARMV8_1A, &ARMV8_2A, &ARMV8_3A, &ARMV8_4A, &ARMV8_5A,
    &ARMV8_6A, &ARMV8_7A, &ARMV8_8A, &ARMV8_9A, &ARMV9A, &ARMV9_1A,
    &ARMV9_2A, &ARMV9_3A, &ARMV9_4A, &ARMV8R,
};

// Details of a specific CPU.
struct CpuInfo {
  StringRef Name; // Name, as written for -mcpu.
  const ArchInfo &Arch;
  uint64_t DefaultExtensions; // Default extensions for this CPU. These will be
                              // ORd with the architecture defaults.

  uint64_t getImpliedExtensions() const {
    return DefaultExtensions | Arch.DefaultExts;
  }
};

inline constexpr CpuInfo CpuInfos[] = {
    {"cortex-a34", ARMV8A,
     (AArch64::AEK_AES | AArch64::AEK_SHA2 | AArch64::AEK_CRC)},
    {"cortex-a35", ARMV8A,
     (AArch64::AEK_AES | AArch64::AEK_SHA2 | AArch64::AEK_CRC)},
    {"cortex-a53", ARMV8A,
     (AArch64::AEK_AES | AArch64::AEK_SHA2 | AArch64::AEK_CRC)},
    {"cortex-a55", ARMV8_2A,
     (AArch64::AEK_AES | AArch64::AEK_SHA2 | AArch64::AEK_FP16 |
      AArch64::AEK_DOTPROD | AArch64::AEK_RCPC)},
    {"cortex-a510", ARMV9A,
     (AArch64::AEK_BF16 | AArch64::AEK_I8MM | AArch64::AEK_SB |
      AArch64::AEK_PAUTH | AArch64::AEK_MTE | AArch64::AEK_SSBS |
      AArch64::AEK_SVE | AArch64::AEK_SVE2 | AArch64::AEK_SVE2BITPERM |
      AArch64::AEK_FP16FML)},
    {"cortex-a57", ARMV8A,
     (AArch64::AEK_AES | AArch64::AEK_SHA2 | AArch64::AEK_CRC)},
    {"cortex-a65", ARMV8_2A,
     (AArch64::AEK_AES | AArch64::AEK_SHA2 | AArch64::AEK_DOTPROD |
      AArch64::AEK_FP16 | AArch64::AEK_RCPC | AArch64::AEK_SSBS)},
    {"cortex-a65ae", ARMV8_2A,
     (AArch64::AEK_AES | AArch64::AEK_SHA2 | AArch64::AEK_DOTPROD |
      AArch64::AEK_FP16 | AArch64::AEK_RCPC | AArch64::AEK_SSBS)},
    {"cortex-a72", ARMV8A,
     (AArch64::AEK_AES | AArch64::AEK_SHA2 | AArch64::AEK_CRC)},
    {"cortex-a73", ARMV8A,
     (AArch64::AEK_AES | AArch64::AEK_SHA2 | AArch64::AEK_CRC)},
    {"cortex-a75", ARMV8_2A,
     (AArch64::AEK_AES | AArch64::AEK_SHA2 | AArch64::AEK_FP16 |
      AArch64::AEK_DOTPROD | AArch64::AEK_RCPC)},
    {"cortex-a76", ARMV8_2A,
     (AArch64::AEK_AES | AArch64::AEK_SHA2 | AArch64::AEK_FP16 |
      AArch64::AEK_DOTPROD | AArch64::AEK_RCPC | AArch64::AEK_SSBS)},
    {"cortex-a76ae", ARMV8_2A,
     (AArch64::AEK_AES | AArch64::AEK_SHA2 | AArch64::AEK_FP16 |
      AArch64::AEK_DOTPROD | AArch64::AEK_RCPC | AArch64::AEK_SSBS)},
    {"cortex-a77", ARMV8_2A,
     (AArch64::AEK_AES | AArch64::AEK_SHA2 | AArch64::AEK_FP16 |
      AArch64::AEK_RCPC | AArch64::AEK_DOTPROD | AArch64::AEK_SSBS)},
    {"cortex-a78", ARMV8_2A,
     (AArch64::AEK_AES | AArch64::AEK_SHA2 | AArch64::AEK_FP16 |
      AArch64::AEK_DOTPROD | AArch64::AEK_RCPC | AArch64::AEK_SSBS |
      AArch64::AEK_PROFILE)},
    {"cortex-a78c", ARMV8_2A,
     (AArch64::AEK_AES | AArch64::AEK_SHA2 | AArch64::AEK_FP16 |
      AArch64::AEK_DOTPROD | AArch64::AEK_RCPC | AArch64::AEK_SSBS |
      AArch64::AEK_PROFILE | AArch64::AEK_FLAGM | AArch64::AEK_PAUTH |
      AArch64::AEK_FP16FML)},
    {"cortex-a710", ARMV9A,
     (AArch64::AEK_MTE | AArch64::AEK_PAUTH | AArch64::AEK_FLAGM |
      AArch64::AEK_SB | AArch64::AEK_I8MM | AArch64::AEK_FP16FML |
      AArch64::AEK_SVE | AArch64::AEK_SVE2 | AArch64::AEK_SVE2BITPERM |
      AArch64::AEK_BF16)},
    {"cortex-a715", ARMV9A,
     (AArch64::AEK_SB | AArch64::AEK_SSBS | AArch64::AEK_MTE |
      AArch64::AEK_FP16 | AArch64::AEK_FP16FML | AArch64::AEK_PAUTH |
      AArch64::AEK_I8MM | AArch64::AEK_PREDRES | AArch64::AEK_PERFMON |
      AArch64::AEK_PROFILE | AArch64::AEK_SVE | AArch64::AEK_SVE2BITPERM |
      AArch64::AEK_BF16 | AArch64::AEK_FLAGM)},
    {"cortex-r82", ARMV8R, (AArch64::AEK_LSE)},
    {"cortex-x1", ARMV8_2A,
     (AArch64::AEK_AES | AArch64::AEK_SHA2 | AArch64::AEK_FP16 |
      AArch64::AEK_DOTPROD | AArch64::AEK_RCPC | AArch64::AEK_SSBS |
      AArch64::AEK_PROFILE)},
    {"cortex-x1c", ARMV8_2A,
     (AArch64::AEK_AES | AArch64::AEK_SHA2 | AArch64::AEK_FP16 |
      AArch64::AEK_DOTPROD | AArch64::AEK_RCPC | AArch64::AEK_SSBS |
      AArch64::AEK_PAUTH | AArch64::AEK_PROFILE)},
    {"cortex-x2", ARMV9A,
     (AArch64::AEK_MTE | AArch64::AEK_BF16 | AArch64::AEK_I8MM |
      AArch64::AEK_PAUTH | AArch64::AEK_SSBS | AArch64::AEK_SB |
      AArch64::AEK_SVE | AArch64::AEK_SVE2 | AArch64::AEK_SVE2BITPERM |
      AArch64::AEK_FP16FML)},
    {"cortex-x3", ARMV9A,
     (AArch64::AEK_SVE | AArch64::AEK_PERFMON | AArch64::AEK_PROFILE |
      AArch64::AEK_BF16 | AArch64::AEK_I8MM | AArch64::AEK_MTE |
      AArch64::AEK_SVE2BITPERM | AArch64::AEK_SB | AArch64::AEK_PAUTH |
      AArch64::AEK_FP16 | AArch64::AEK_FP16FML | AArch64::AEK_PREDRES |
      AArch64::AEK_FLAGM | AArch64::AEK_SSBS)},
    {"neoverse-e1", ARMV8_2A,
     (AArch64::AEK_AES | AArch64::AEK_SHA2 | AArch64::AEK_DOTPROD |
      AArch64::AEK_FP16 | AArch64::AEK_RCPC | AArch64::AEK_SSBS)},
    {"neoverse-n1", ARMV8_2A,
     (AArch64::AEK_AES | AArch64::AEK_SHA2 | AArch64::AEK_DOTPROD |
      AArch64::AEK_FP16 | AArch64::AEK_PROFILE | AArch64::AEK_RCPC |
      AArch64::AEK_SSBS)},
    {"neoverse-n2", ARMV8_5A,
     (AArch64::AEK_AES | AArch64::AEK_SHA2 | AArch64::AEK_SHA3 |
      AArch64::AEK_SM4 | AArch64::AEK_BF16 | AArch64::AEK_DOTPROD |
      AArch64::AEK_FP16 | AArch64::AEK_I8MM | AArch64::AEK_MTE |
      AArch64::AEK_SB | AArch64::AEK_SSBS | AArch64::AEK_SVE |
      AArch64::AEK_SVE2 | AArch64::AEK_SVE2BITPERM)},
    {"neoverse-512tvb", ARMV8_4A,
     (AArch64::AEK_AES | AArch64::AEK_SHA2 | AArch64::AEK_SHA3 |
      AArch64::AEK_SM4 | AArch64::AEK_SVE | AArch64::AEK_SSBS |
      AArch64::AEK_FP16 | AArch64::AEK_BF16 | AArch64::AEK_DOTPROD |
      AArch64::AEK_PROFILE | AArch64::AEK_RAND | AArch64::AEK_FP16FML |
      AArch64::AEK_I8MM)},
    {"neoverse-v1", ARMV8_4A,
     (AArch64::AEK_AES | AArch64::AEK_SHA2 | AArch64::AEK_SHA3 |
      AArch64::AEK_SM4 | AArch64::AEK_SVE | AArch64::AEK_SSBS |
      AArch64::AEK_FP16 | AArch64::AEK_BF16 | AArch64::AEK_DOTPROD |
      AArch64::AEK_PROFILE | AArch64::AEK_RAND | AArch64::AEK_FP16FML |
      AArch64::AEK_I8MM)},
    {"neoverse-v2", ARMV9A,
     (AArch64::AEK_SVE | AArch64::AEK_SVE2 | AArch64::AEK_SSBS |
      AArch64::AEK_FP16 | AArch64::AEK_BF16 | AArch64::AEK_RAND |
      AArch64::AEK_DOTPROD | AArch64::AEK_PROFILE | AArch64::AEK_SVE2BITPERM |
      AArch64::AEK_FP16FML | AArch64::AEK_I8MM | AArch64::AEK_MTE)},
    {"cyclone", ARMV8A,
     (AArch64::AEK_AES | AArch64::AEK_SHA2 | AArch64::AEK_NONE)},
    {"apple-a7", ARMV8A,
     (AArch64::AEK_AES | AArch64::AEK_SHA2 | AArch64::AEK_NONE)},
    {"apple-a8", ARMV8A,
     (AArch64::AEK_AES | AArch64::AEK_SHA2 | AArch64::AEK_NONE)},
    {"apple-a9", ARMV8A,
     (AArch64::AEK_AES | AArch64::AEK_SHA2 | AArch64::AEK_NONE)},
    {"apple-a10", ARMV8A,
     (AArch64::AEK_AES | AArch64::AEK_SHA2 | AArch64::AEK_CRC |
      AArch64::AEK_RDM)},
    {"apple-a11", ARMV8_2A,
     (AArch64::AEK_AES | AArch64::AEK_SHA2 | AArch64::AEK_FP16)},
    {"apple-a12", ARMV8_3A,
     (AArch64::AEK_AES | AArch64::AEK_SHA2 | AArch64::AEK_FP16)},
    {"apple-a13", ARMV8_4A,
     (AArch64::AEK_AES | AArch64::AEK_SHA2 | AArch64::AEK_SHA3 |
      AArch64::AEK_FP16 | AArch64::AEK_FP16FML | AArch64::AEK_SHA3)},
    {"apple-a14", ARMV8_5A,
     (AArch64::AEK_AES | AArch64::AEK_SHA2 | AArch64::AEK_SHA3 |
      AArch64::AEK_FP16 | AArch64::AEK_FP16FML | AArch64::AEK_SHA3)},
    {"apple-a15", ARMV8_5A,
     (AArch64::AEK_AES | AArch64::AEK_SHA2 | AArch64::AEK_SHA3 |
      AArch64::AEK_FP16 | AArch64::AEK_FP16FML | AArch64::AEK_SHA3 |
      AArch64::AEK_BF16 | AArch64::AEK_I8MM)},
    {"apple-a16", ARMV8_5A,
     (AArch64::AEK_AES | AArch64::AEK_SHA2 | AArch64::AEK_SHA3 |
      AArch64::AEK_FP16 | AArch64::AEK_FP16FML | AArch64::AEK_SHA3 |
      AArch64::AEK_BF16 | AArch64::AEK_I8MM)},
    {"apple-m1", ARMV8_5A,
     (AArch64::AEK_AES | AArch64::AEK_SHA2 | AArch64::AEK_SHA3 |
      AArch64::AEK_FP16 | AArch64::AEK_FP16FML | AArch64::AEK_SHA3)},
    {"apple-m2", ARMV8_5A,
     (AArch64::AEK_AES | AArch64::AEK_SHA2 | AArch64::AEK_SHA3 |
      AArch64::AEK_FP16 | AArch64::AEK_FP16FML | AArch64::AEK_SHA3 |
      AArch64::AEK_BF16 | AArch64::AEK_I8MM)},
    {"apple-s4", ARMV8_3A,
     (AArch64::AEK_AES | AArch64::AEK_SHA2 | AArch64::AEK_FP16)},
    {"apple-s5", ARMV8_3A,
     (AArch64::AEK_AES | AArch64::AEK_SHA2 | AArch64::AEK_FP16)},
    {"exynos-m3", ARMV8A,
     (AArch64::AEK_AES | AArch64::AEK_SHA2 | AArch64::AEK_CRC)},
    {"exynos-m4", ARMV8_2A,
     (AArch64::AEK_AES | AArch64::AEK_SHA2 | AArch64::AEK_DOTPROD |
      AArch64::AEK_FP16)},
    {"exynos-m5", ARMV8_2A,
     (AArch64::AEK_AES | AArch64::AEK_SHA2 | AArch64::AEK_DOTPROD |
      AArch64::AEK_FP16)},
    {"falkor", ARMV8A,
     (AArch64::AEK_AES | AArch64::AEK_SHA2 | AArch64::AEK_CRC |
      AArch64::AEK_RDM)},
    {"saphira", ARMV8_3A,
     (AArch64::AEK_AES | AArch64::AEK_SHA2 | AArch64::AEK_PROFILE)},
    {"kryo", ARMV8A, (AArch64::AEK_AES | AArch64::AEK_SHA2 | AArch64::AEK_CRC)},
    {"thunderx2t99", ARMV8_1A, (AArch64::AEK_AES | AArch64::AEK_SHA2)},
    {"thunderx3t110", ARMV8_3A, (AArch64::AEK_AES | AArch64::AEK_SHA2)},
    {"thunderx", ARMV8A,
     (AArch64::AEK_AES | AArch64::AEK_SHA2 | AArch64::AEK_CRC)},
    {"thunderxt88", ARMV8A,
     (AArch64::AEK_AES | AArch64::AEK_SHA2 | AArch64::AEK_CRC)},
    {"thunderxt81", ARMV8A,
     (AArch64::AEK_AES | AArch64::AEK_SHA2 | AArch64::AEK_CRC)},
    {"thunderxt83", ARMV8A,
     (AArch64::AEK_AES | AArch64::AEK_SHA2 | AArch64::AEK_CRC)},
    {"tsv110", ARMV8_2A,
     (AArch64::AEK_AES | AArch64::AEK_SHA2 | AArch64::AEK_DOTPROD |
      AArch64::AEK_FP16 | AArch64::AEK_FP16FML | AArch64::AEK_PROFILE)},
    {"a64fx", ARMV8_2A,
     (AArch64::AEK_AES | AArch64::AEK_SHA2 | AArch64::AEK_FP16 |
      AArch64::AEK_SVE)},
    {"carmel", ARMV8_2A,
     (AArch64::AEK_AES | AArch64::AEK_SHA2 | AArch64::AEK_FP16)},
    {"ampere1", ARMV8_6A,
     (AArch64::AEK_AES | AArch64::AEK_SHA2 | AArch64::AEK_SHA3 |
      AArch64::AEK_FP16 | AArch64::AEK_SB | AArch64::AEK_SSBS |
      AArch64::AEK_RAND)},
    {"ampere1a", ARMV8_6A,
     (AArch64::AEK_FP16 | AArch64::AEK_RAND | AArch64::AEK_SM4 |
      AArch64::AEK_SHA3 | AArch64::AEK_SHA2 | AArch64::AEK_AES |
      AArch64::AEK_MTE | AArch64::AEK_SB | AArch64::AEK_SSBS)},
};

// An alias for a CPU.
struct CpuAlias {
  StringRef Alias;
  StringRef Name;
};

inline constexpr CpuAlias CpuAliases[] = {{"grace", "neoverse-v2"}};

bool getExtensionFeatures(uint64_t Extensions,
                          std::vector<StringRef> &Features);

StringRef getArchExtFeature(StringRef ArchExt);
StringRef resolveCPUAlias(StringRef CPU);

// Information by Name
std::optional<ArchInfo> getArchForCpu(StringRef CPU);

// Parser
std::optional<ArchInfo> parseArch(StringRef Arch);
std::optional<ExtensionInfo> parseArchExtension(StringRef Extension);
// Given the name of a CPU or alias, return the correponding CpuInfo.
std::optional<CpuInfo> parseCpu(StringRef Name);
// Used by target parser tests
void fillValidCPUArchList(SmallVectorImpl<StringRef> &Values);

bool isX18ReservedByDefault(const Triple &TT);

// For given feature names, return a bitmask corresponding to the entries of
// AArch64::CPUFeatures. The values in CPUFeatures are not bitmasks
// themselves, they are sequential (0, 1, 2, 3, ...).
uint64_t getCpuSupportsMask(ArrayRef<StringRef> FeatureStrs);

} // namespace AArch64
} // namespace llvm

#endif
PKiwFZ	�]���TargetParser/Host.hnu�[���//===- llvm/TargetParser/Host.h - Host machine detection  -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Methods for querying the nature of the host machine.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TARGETPARSER_HOST_H
#define LLVM_TARGETPARSER_HOST_H

#include <string>

namespace llvm {
class MallocAllocator;
class StringRef;
template <typename ValueTy, typename AllocatorTy> class StringMap;
class raw_ostream;

namespace sys {

  /// getDefaultTargetTriple() - Return the default target triple the compiler
  /// has been configured to produce code for.
  ///
  /// The target triple is a string in the format of:
  ///   CPU_TYPE-VENDOR-OPERATING_SYSTEM
  /// or
  ///   CPU_TYPE-VENDOR-KERNEL-OPERATING_SYSTEM
  std::string getDefaultTargetTriple();

  /// getProcessTriple() - Return an appropriate target triple for generating
  /// code to be loaded into the current process, e.g. when using the JIT.
  std::string getProcessTriple();

  /// getHostCPUName - Get the LLVM name for the host CPU. The particular format
  /// of the name is target dependent, and suitable for passing as -mcpu to the
  /// target which matches the host.
  ///
  /// \return - The host CPU name, or empty if the CPU could not be determined.
  StringRef getHostCPUName();

  /// getHostCPUFeatures - Get the LLVM names for the host CPU features.
  /// The particular format of the names are target dependent, and suitable for
  /// passing as -mattr to the target which matches the host.
  ///
  /// \param Features - A string mapping feature names to either
  /// true (if enabled) or false (if disabled). This routine makes no guarantees
  /// about exactly which features may appear in this map, except that they are
  /// all valid LLVM feature names.
  ///
  /// \return - True on success.
  bool getHostCPUFeatures(StringMap<bool, MallocAllocator> &Features);

  /// This is a function compatible with cl::AddExtraVersionPrinter, which adds
  /// info about the current target triple and detected CPU.
  void printDefaultTargetAndDetectedCPU(raw_ostream &OS);

  namespace detail {
  /// Helper functions to extract HostCPUName from /proc/cpuinfo on linux.
  StringRef getHostCPUNameForPowerPC(StringRef ProcCpuinfoContent);
  StringRef getHostCPUNameForARM(StringRef ProcCpuinfoContent);
  StringRef getHostCPUNameForS390x(StringRef ProcCpuinfoContent);
  StringRef getHostCPUNameForRISCV(StringRef ProcCpuinfoContent);
  StringRef getHostCPUNameForSPARC(StringRef ProcCpuinfoContent);
  StringRef getHostCPUNameForBPF();

  /// Helper functions to extract CPU details from CPUID on x86.
  namespace x86 {
  enum class VendorSignatures {
    UNKNOWN,
    GENUINE_INTEL,
    AUTHENTIC_AMD,
  };

  /// Returns the host CPU's vendor.
  /// MaxLeaf: if a non-nullptr pointer is specified, the EAX value will be
  /// assigned to its pointee.
  VendorSignatures getVendorSignature(unsigned *MaxLeaf = nullptr);
  } // namespace x86
  }
}
}

#endif
PKiwFZ;�L��k�k!TargetParser/CSKYTargetParser.defnu�[���//===- CSKYTargetParser.def - CSKY target parsing defines -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file provides defines to build up the CSKY target parser's logic.
//
//===----------------------------------------------------------------------===//

// NOTE: NO INCLUDE GUARD DESIRED!

#ifndef CSKY_FPU
#define CSKY_FPU(NAME, KIND, VERSION)
#endif
CSKY_FPU("invalid", FK_INVALID, FPUVersion::NONE)
CSKY_FPU("auto", FK_AUTO, FPUVersion::FPV2)
CSKY_FPU("fpv2", FK_FPV2, FPUVersion::FPV2)
CSKY_FPU("fpv2_divd", FK_FPV2_DIVD, FPUVersion::FPV2)
CSKY_FPU("fpv2_sf", FK_FPV2_SF, FPUVersion::FPV2)
CSKY_FPU("fpv3", FK_FPV3, FPUVersion::FPV3)
CSKY_FPU("fpv3_hf", FK_FPV3_HF, FPUVersion::FPV3)
CSKY_FPU("fpv3_hsf", FK_FPV3_HSF, FPUVersion::FPV3)
CSKY_FPU("fpv3_sdf", FK_FPV3_SDF, FPUVersion::FPV3)

#undef CSKY_FPU

#ifndef CSKY_ARCH
#define CSKY_ARCH(NAME, ID, ARCH_BASE_EXT)
#endif
CSKY_ARCH("invalid", INVALID, CSKY::AEK_INVALID)
CSKY_ARCH("ck801", CK801, CSKY::MAEK_E1 | CSKY::AEK_TRUST)
CSKY_ARCH("ck802", CK802, CSKY::MAEK_E2 | CSKY::AEK_TRUST | CSKY::AEK_NVIC)
CSKY_ARCH("ck803", CK803,
          CSKY::MAEK_2E3 | CSKY::AEK_MP | CSKY::AEK_TRUST | CSKY::AEK_NVIC |
              CSKY::AEK_HWDIV)
CSKY_ARCH("ck803s", CK803S,
          CSKY::MAEK_2E3 | CSKY::AEK_MP | CSKY::AEK_TRUST | CSKY::AEK_NVIC |
              CSKY::AEK_HWDIV)
CSKY_ARCH("ck804", CK804,
          CSKY::MAEK_2E3 | CSKY::AEK_MP | CSKY::AEK_TRUST | CSKY::AEK_NVIC |
              CSKY::AEK_HWDIV | CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3)
CSKY_ARCH("ck805", CK805,
          CSKY::MAEK_2E3 | CSKY::AEK_MP | CSKY::AEK_TRUST | CSKY::AEK_NVIC |
              CSKY::AEK_HWDIV | CSKY::AEK_HIGHREG | CSKY::MAEK_3E3R2 |
              CSKY::AEK_3E3R3 | CSKY::AEK_VDSPV2 | CSKY::AEK_VDSP2E3)
CSKY_ARCH("ck807", CK807,
          CSKY::MAEK_3E7 | CSKY::MAEK_MP | CSKY::MAEK_MP1E2 | CSKY::AEK_TRUST |
              CSKY::AEK_HWDIV | CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 |
              CSKY::AEK_DSPE60 | CSKY::AEK_HIGHREG | CSKY::AEK_HARDTP |
              CSKY::AEK_NVIC | CSKY::AEK_CACHE)
CSKY_ARCH("ck810", CK810,
          CSKY::MAEK_7E10 | CSKY::MAEK_MP | CSKY::MAEK_MP1E2 | CSKY::AEK_TRUST |
              CSKY::AEK_HWDIV | CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 |
              CSKY::AEK_DSPE60 | CSKY::AEK_HIGHREG | CSKY::AEK_HARDTP |
              CSKY::AEK_NVIC | CSKY::AEK_CACHE)
CSKY_ARCH("ck810v", CK810V,
          CSKY::MAEK_7E10 | CSKY::MAEK_MP | CSKY::MAEK_MP1E2 | CSKY::AEK_TRUST |
              CSKY::AEK_HWDIV | CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 |
              CSKY::AEK_DSPE60 | CSKY::AEK_HIGHREG | CSKY::AEK_HARDTP |
              CSKY::AEK_NVIC | CSKY::AEK_CACHE | CSKY::AEK_VDSPV1)
CSKY_ARCH("ck860", CK860,
          CSKY::MAEK_10E60 | CSKY::MAEK_MP | CSKY::MAEK_MP1E2 |
              CSKY::AEK_TRUST | CSKY::AEK_HWDIV | CSKY::AEK_DSPE60 |
              CSKY::AEK_HIGHREG | CSKY::AEK_HARDTP | CSKY::AEK_NVIC |
              CSKY::AEK_CACHE | CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3)
CSKY_ARCH("ck860v", CK860V,
          CSKY::MAEK_10E60 | CSKY::MAEK_MP | CSKY::MAEK_MP1E2 |
              CSKY::AEK_TRUST | CSKY::AEK_HWDIV | CSKY::AEK_DSPE60 |
              CSKY::AEK_HIGHREG | CSKY::AEK_HARDTP | CSKY::AEK_NVIC |
              CSKY::AEK_CACHE | CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3 |
              CSKY::AEK_VDSPV2 | CSKY::AEK_VDSP2E60F)
#undef CSKY_ARCH

#ifndef CSKY_ARCH_EXT_NAME
#define CSKY_ARCH_EXT_NAME(NAME, ID, FEATURE, NEGFEATURE)
#endif
CSKY_ARCH_EXT_NAME("invalid", CSKY::AEK_INVALID, nullptr, nullptr)
CSKY_ARCH_EXT_NAME("none", CSKY::AEK_NONE, nullptr, nullptr)
CSKY_ARCH_EXT_NAME("fpuv2_sf", CSKY::AEK_FPUV2SF, "+fpuv2_sf", "-fpuv2_sf")
CSKY_ARCH_EXT_NAME("fpuv2_df", CSKY::AEK_FPUV2DF, "+fpuv2_df", "-fpuv2_df")
CSKY_ARCH_EXT_NAME("fdivdu", CSKY::AEK_FDIVDU, "+fdivdu", "-fdivdu")
CSKY_ARCH_EXT_NAME("fpuv3_hi", CSKY::AEK_FPUV3HI, "+fpuv3_hi", "-fpuv3_hi")
CSKY_ARCH_EXT_NAME("fpuv3_hf", CSKY::AEK_FPUV3HF, "+fpuv3_hf", "-fpuv3_hf")
CSKY_ARCH_EXT_NAME("fpuv3_sf", CSKY::AEK_FPUV3SF, "+fpuv3_sf", "-fpuv3_sf")
CSKY_ARCH_EXT_NAME("fpuv3_df", CSKY::AEK_FPUV3DF, "+fpuv3_df", "-fpuv3_df")
CSKY_ARCH_EXT_NAME("floate1", CSKY::AEK_FLOATE1, "+floate1", "-floate1")
CSKY_ARCH_EXT_NAME("float1e2", CSKY::AEK_FLOAT1E2, "+float1e2", "-float1e2")
CSKY_ARCH_EXT_NAME("float1e3", CSKY::AEK_FLOAT1E3, "+float1e3", "-float1e3")
CSKY_ARCH_EXT_NAME("float3e4", CSKY::AEK_FLOAT3E4, "+float3e4", "-float3e4")
CSKY_ARCH_EXT_NAME("float7e60", CSKY::AEK_FLOAT7E60, "+float7e60", "-float7e60")
CSKY_ARCH_EXT_NAME("hwdiv", CSKY::AEK_HWDIV, "+hwdiv", "-hwdiv")
CSKY_ARCH_EXT_NAME("multiple_stld", CSKY::AEK_STLD, "+multiple_stld",
                   "-multiple_stld")
CSKY_ARCH_EXT_NAME("pushpop", CSKY::AEK_PUSHPOP, "+pushpop", "-pushpop")
CSKY_ARCH_EXT_NAME("edsp", CSKY::AEK_EDSP, "+edsp", "-edsp")
CSKY_ARCH_EXT_NAME("dsp1e2", CSKY::AEK_DSP1E2, "+dsp1e2", "-dsp1e2")
CSKY_ARCH_EXT_NAME("dspe60", CSKY::AEK_DSPE60, "+dspe60", "-dspe60")
CSKY_ARCH_EXT_NAME("dspv2", CSKY::AEK_DSPV2, "+dspv2", "-dspv2")
CSKY_ARCH_EXT_NAME("dsp_silan", CSKY::AEK_DSPSILAN, "+dsp_silan", "-dsp_silan")
CSKY_ARCH_EXT_NAME("elrw", CSKY::AEK_ELRW, "+elrw", "-elrw")
CSKY_ARCH_EXT_NAME("trust", CSKY::AEK_TRUST, "+trust", "-trust")
CSKY_ARCH_EXT_NAME("java", CSKY::AEK_JAVA, "+java", "-java")
CSKY_ARCH_EXT_NAME("cache", CSKY::AEK_CACHE, "+cache", "-cache")
CSKY_ARCH_EXT_NAME("nvic", CSKY::AEK_NVIC, "+nvic", "-nvic")
CSKY_ARCH_EXT_NAME("doloop", CSKY::AEK_DOLOOP, "+doloop", "-doloop")
CSKY_ARCH_EXT_NAME("high-registers", CSKY::AEK_HIGHREG, "+high-registers",
                   "-high-registers")
CSKY_ARCH_EXT_NAME("smart", CSKY::AEK_SMART, "+smart", "-smart")
CSKY_ARCH_EXT_NAME("vdsp2e3", CSKY::AEK_VDSP2E3, "+vdsp2e3", "-vdsp2e3")
CSKY_ARCH_EXT_NAME("vdsp2e60f", CSKY::AEK_VDSP2E60F, "+vdsp2e60f", "-vdsp2e60f")
CSKY_ARCH_EXT_NAME("vdspv2", CSKY::AEK_VDSPV2, "+vdspv2", "-vdspv2")
CSKY_ARCH_EXT_NAME("hard-tp", CSKY::AEK_HARDTP, "+hard-tp", "-hard-tp")
CSKY_ARCH_EXT_NAME("soft-tp", CSKY::AEK_SOFTTP, "+soft-tp", "-soft-tp")
CSKY_ARCH_EXT_NAME("istack", CSKY::AEK_ISTACK, "+istack", "-istack")
CSKY_ARCH_EXT_NAME("constpool", CSKY::AEK_CONSTPOOL, "+constpool", "-constpool")
CSKY_ARCH_EXT_NAME("stack-size", CSKY::AEK_STACKSIZE, "+stack-size",
                   "-stack-size")
CSKY_ARCH_EXT_NAME("ccrt", CSKY::AEK_CCRT, "+ccrt", "-ccrt")
CSKY_ARCH_EXT_NAME("vdspv1", CSKY::AEK_VDSPV1, "+vdspv1", "-vdspv1")

CSKY_ARCH_EXT_NAME("e1", CSKY::AEK_E1, "+e1", "-e1")
CSKY_ARCH_EXT_NAME("e2", CSKY::AEK_E2, "+e2", "-e2")
CSKY_ARCH_EXT_NAME("2e3", CSKY::AEK_2E3, "+2e3", "-2e3")
CSKY_ARCH_EXT_NAME("mp", CSKY::AEK_MP, "+mp", "-mp")
CSKY_ARCH_EXT_NAME("3e3r1", CSKY::AEK_3E3R1, "+3e3r1", "-3e3r1")
CSKY_ARCH_EXT_NAME("3e3r2", CSKY::AEK_3E3R2, "+3e3r2", "-3e3r2")
CSKY_ARCH_EXT_NAME("3e3r3", CSKY::AEK_3E3R3, "+3e3r3", "-3e3r3")
CSKY_ARCH_EXT_NAME("3e7", CSKY::AEK_3E7, "+3e7", "-3e7")
CSKY_ARCH_EXT_NAME("mp1e2", CSKY::AEK_MP1E2, "+mp1e2", "-mp1e2")
CSKY_ARCH_EXT_NAME("7e10", CSKY::AEK_7E10, "+7e10", "-7e10")
CSKY_ARCH_EXT_NAME("10e60", CSKY::AEK_10E60, "+10e60", "-10e60")

#undef CSKY_ARCH_EXT_NAME

#ifndef CSKY_CPU_NAME
#define CSKY_CPU_NAME(NAME, ARCH_ID, DEFAULT_EXT)
#endif

CSKY_CPU_NAME("ck801", CK801, CSKY::AEK_NONE)
CSKY_CPU_NAME("ck801t", CK801, CSKY::AEK_NONE)
CSKY_CPU_NAME("e801", CK801, CSKY::AEK_NONE)

CSKY_CPU_NAME("ck802", CK802, CSKY::AEK_NONE)
CSKY_CPU_NAME("ck802t", CK802, CSKY::AEK_NONE)
CSKY_CPU_NAME("ck802j", CK802, CSKY::AEK_JAVA)
CSKY_CPU_NAME("e802", CK802, CSKY::AEK_NONE)
CSKY_CPU_NAME("e802t", CK802, CSKY::AEK_NONE)
CSKY_CPU_NAME("s802", CK802, CSKY::AEK_NONE)
CSKY_CPU_NAME("s802t", CK802, CSKY::AEK_NONE)

CSKY_CPU_NAME("ck803", CK803, CSKY::AEK_NONE)
CSKY_CPU_NAME("ck803h", CK803, CSKY::AEK_NONE)
CSKY_CPU_NAME("ck803t", CK803, CSKY::AEK_NONE)
CSKY_CPU_NAME("ck803ht", CK803, CSKY::AEK_NONE)
CSKY_CPU_NAME("ck803f", CK803,
              CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3)
CSKY_CPU_NAME("ck803fh", CK803,
              CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3)
CSKY_CPU_NAME("ck803e", CK803,
              CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60)
CSKY_CPU_NAME("ck803eh", CK803,
              CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60)
CSKY_CPU_NAME("ck803et", CK803,
              CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60)
CSKY_CPU_NAME("ck803eht", CK803,
              CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60)
CSKY_CPU_NAME("ck803ef", CK803,
              CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3)
CSKY_CPU_NAME("ck803efh", CK803,
              CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3)
CSKY_CPU_NAME("ck803ft", CK803,
              CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3)
CSKY_CPU_NAME("ck803eft", CK803,
              CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3)
CSKY_CPU_NAME("ck803efht", CK803,
              CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3)
CSKY_CPU_NAME("ck803r1", CK803,
              CSKY::MAEK_3E3R1 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2)
CSKY_CPU_NAME("ck803r2", CK803,
              CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2)
CSKY_CPU_NAME("ck803r3", CK803,
              CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2)
CSKY_CPU_NAME("ck803hr1", CK803,
              CSKY::MAEK_3E3R1 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2)
CSKY_CPU_NAME("ck803hr2", CK803,
              CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2)
CSKY_CPU_NAME("ck803hr3", CK803,
              CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2)
CSKY_CPU_NAME("ck803tr1", CK803,
              CSKY::MAEK_3E3R1 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2)
CSKY_CPU_NAME("ck803tr2", CK803,
              CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2)
CSKY_CPU_NAME("ck803tr3", CK803,
              CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2)
CSKY_CPU_NAME("ck803htr1", CK803,
              CSKY::MAEK_3E3R1 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2)
CSKY_CPU_NAME("ck803htr2", CK803,
              CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2)
CSKY_CPU_NAME("ck803htr3", CK803,
              CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2)
CSKY_CPU_NAME("ck803fr1", CK803,
              CSKY::MAEK_3E3R1 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3)
CSKY_CPU_NAME("ck803fr2", CK803,
              CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3)
CSKY_CPU_NAME("ck803fr3", CK803,
              CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3)
CSKY_CPU_NAME("ck803fhr1", CK803,
              CSKY::MAEK_3E3R1 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3)
CSKY_CPU_NAME("ck803fhr2", CK803,
              CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3)
CSKY_CPU_NAME("ck803fhr3", CK803,
              CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3)
CSKY_CPU_NAME("ck803er1", CK803,
              CSKY::MAEK_3E3R1 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("ck803er2", CK803,
              CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("ck803er3", CK803,
              CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("ck803ehr1", CK803,
              CSKY::MAEK_3E3R1 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("ck803ehr2", CK803,
              CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("ck803ehr3", CK803,
              CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("ck803etr1", CK803,
              CSKY::MAEK_3E3R1 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("ck803etr2", CK803,
              CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("ck803etr3", CK803,
              CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("ck803ehtr1", CK803,
              CSKY::MAEK_3E3R1 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("ck803ehtr2", CK803,
              CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("ck803ehtr3", CK803,
              CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("ck803efr1", CK803,
              CSKY::MAEK_3E3R1 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3 |
                  CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("ck803efr2", CK803,
              CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3 |
                  CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("ck803efr3", CK803,
              CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3 |
                  CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("ck803efhr1", CK803,
              CSKY::MAEK_3E3R1 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3 |
                  CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("ck803efhr2", CK803,
              CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3 |
                  CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("ck803efhr3", CK803,
              CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3 |
                  CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("ck803ftr1", CK803,
              CSKY::MAEK_3E3R1 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3)
CSKY_CPU_NAME("ck803ftr2", CK803,
              CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3)
CSKY_CPU_NAME("ck803ftr3", CK803,
              CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3)
CSKY_CPU_NAME("ck803eftr1", CK803,
              CSKY::MAEK_3E3R1 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3 |
                  CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("ck803eftr2", CK803,
              CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3 |
                  CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("ck803eftr3", CK803,
              CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3 |
                  CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("ck803efhtr1", CK803,
              CSKY::MAEK_3E3R1 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3 |
                  CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("ck803efhtr2", CK803,
              CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3 |
                  CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("ck803efhtr3", CK803,
              CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3 | CSKY::AEK_DSPV2 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3 |
                  CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("s803", CK803, CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3)
CSKY_CPU_NAME("s803t", CK803, CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3)
CSKY_CPU_NAME("e803", CK803, CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3)
CSKY_CPU_NAME("e803t", CK803, CSKY::MAEK_3E3R2 | CSKY::AEK_3E3R3)

CSKY_CPU_NAME("ck803s", CK803S, CSKY::AEK_NONE)
CSKY_CPU_NAME("ck803st", CK803S, CSKY::AEK_NONE)
CSKY_CPU_NAME("ck803se", CK803S,
              CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60)
CSKY_CPU_NAME("ck803sf", CK803S,
              CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3)
CSKY_CPU_NAME("ck803sef", CK803S,
              CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3)
CSKY_CPU_NAME("ck803seft", CK803S,
              CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3)

CSKY_CPU_NAME("ck804", CK804, CSKY::AEK_NONE)
CSKY_CPU_NAME("ck804h", CK804, CSKY::AEK_NONE)
CSKY_CPU_NAME("ck804t", CK804, CSKY::AEK_NONE)
CSKY_CPU_NAME("ck804ht", CK804, CSKY::AEK_NONE)
CSKY_CPU_NAME("ck804f", CK804,
              CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3)
CSKY_CPU_NAME("ck804fh", CK804,
              CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3)
CSKY_CPU_NAME("ck804e", CK804,
              CSKY::AEK_DSPV2 | CSKY::AEK_3E3R1 | CSKY::AEK_3E3R3 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("ck804eh", CK804,
              CSKY::AEK_DSPV2 | CSKY::AEK_3E3R1 | CSKY::AEK_3E3R3 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("ck804et", CK804,
              CSKY::AEK_DSPV2 | CSKY::AEK_3E3R1 | CSKY::AEK_3E3R3 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("ck804eht", CK804,
              CSKY::AEK_DSPV2 | CSKY::AEK_3E3R1 | CSKY::AEK_3E3R3 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("ck804ef", CK804,
              CSKY::AEK_DSPV2 | CSKY::AEK_3E3R1 | CSKY::AEK_3E3R3 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("ck804efh", CK804,
              CSKY::AEK_DSPV2 | CSKY::AEK_3E3R1 | CSKY::AEK_3E3R3 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("ck804ft", CK804,
              CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3)
CSKY_CPU_NAME("ck804eft", CK804,
              CSKY::AEK_DSPV2 | CSKY::AEK_3E3R1 | CSKY::AEK_3E3R3 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("ck804efht", CK804,
              CSKY::AEK_DSPV2 | CSKY::AEK_3E3R1 | CSKY::AEK_3E3R3 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("e804d", CK804,
              CSKY::AEK_DSPV2 | CSKY::AEK_3E3R1 | CSKY::AEK_3E3R3 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("e804dt", CK804,
              CSKY::AEK_DSPV2 | CSKY::AEK_3E3R1 | CSKY::AEK_3E3R3 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("e804f", CK804,
              CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3)
CSKY_CPU_NAME("e804ft", CK804,
              CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3)
CSKY_CPU_NAME("e804df", CK804,
              CSKY::AEK_DSPV2 | CSKY::AEK_3E3R1 | CSKY::AEK_3E3R3 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3 |
                  CSKY::AEK_HIGHREG)
CSKY_CPU_NAME("e804dft", CK804,
              CSKY::AEK_DSPV2 | CSKY::AEK_3E3R1 | CSKY::AEK_3E3R3 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3 |
                  CSKY::AEK_HIGHREG)

CSKY_CPU_NAME("ck805", CK805, CSKY::AEK_NONE)
CSKY_CPU_NAME("ck805e", CK805,
              CSKY::AEK_DSPV2 | CSKY::AEK_3E3R1 | CSKY::AEK_3E3R3)
CSKY_CPU_NAME("ck805f", CK805,
              CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3)
CSKY_CPU_NAME("ck805t", CK805, CSKY::AEK_NONE)
CSKY_CPU_NAME("ck805ef", CK805,
              CSKY::AEK_DSPV2 | CSKY::AEK_3E3R1 | CSKY::AEK_3E3R3 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3)
CSKY_CPU_NAME("ck805et", CK805,
              CSKY::AEK_DSPV2 | CSKY::AEK_3E3R1 | CSKY::AEK_3E3R3)
CSKY_CPU_NAME("ck805ft", CK805,
              CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3)
CSKY_CPU_NAME("ck805eft", CK805,
              CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3 |
                  CSKY::AEK_DSPV2 | CSKY::AEK_3E3R1 | CSKY::AEK_3E3R3)
CSKY_CPU_NAME("i805", CK805, CSKY::AEK_NONE)
CSKY_CPU_NAME("i805f", CK805,
              CSKY::AEK_FPUV2SF | CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E3)

CSKY_CPU_NAME("ck807", CK807, CSKY::AEK_NONE)
CSKY_CPU_NAME("ck807e", CK807,
              CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60)
CSKY_CPU_NAME("ck807f", CK807,
              CSKY::AEK_FPUV2SF | CSKY::AEK_FPUV2DF | CSKY::AEK_FDIVDU |
                  CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E2 | CSKY::AEK_FLOAT1E3 |
                  CSKY::AEK_FLOAT3E4)
CSKY_CPU_NAME("ck807ef", CK807,
              CSKY::AEK_EDSP | CSKY::AEK_DSP1E2 | CSKY::AEK_DSPE60 |
                  CSKY::AEK_FPUV2SF | CSKY::AEK_FPUV2DF | CSKY::AEK_FDIVDU |
                  CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E2 | CSKY::AEK_FLOAT1E3 |
                  CSKY::AEK_FLOAT3E4)
CSKY_CPU_NAME("c807", CK807, CSKY::AEK_NONE)
CSKY_CPU_NAME("c807f", CK807,
              CSKY::AEK_FPUV2SF | CSKY::AEK_FPUV2DF | CSKY::AEK_FDIVDU |
                  CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E2 | CSKY::AEK_FLOAT1E3 |
                  CSKY::AEK_FLOAT3E4)
CSKY_CPU_NAME("r807", CK807, CSKY::AEK_NONE)
CSKY_CPU_NAME("r807f", CK807,
              CSKY::AEK_FPUV2SF | CSKY::AEK_FPUV2DF | CSKY::AEK_FDIVDU |
                  CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E2 | CSKY::AEK_FLOAT1E3 |
                  CSKY::AEK_FLOAT3E4)

CSKY_CPU_NAME("ck810e", CK810, CSKY::AEK_NONE)
CSKY_CPU_NAME("ck810et", CK810, CSKY::AEK_NONE)
CSKY_CPU_NAME("ck810ef", CK810,
              CSKY::AEK_FPUV2SF | CSKY::AEK_FPUV2DF | CSKY::AEK_FDIVDU |
                  CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E2)
CSKY_CPU_NAME("ck810eft", CK810,
              CSKY::AEK_FPUV2SF | CSKY::AEK_FPUV2DF | CSKY::AEK_FDIVDU |
                  CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E2)
CSKY_CPU_NAME("ck810", CK810, CSKY::AEK_NONE)
CSKY_CPU_NAME("ck810f", CK810,
              CSKY::AEK_FPUV2SF | CSKY::AEK_FPUV2DF | CSKY::AEK_FDIVDU |
                  CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E2)
CSKY_CPU_NAME("ck810t", CK810, CSKY::AEK_NONE)
CSKY_CPU_NAME("ck810ft", CK810,
              CSKY::AEK_FPUV2SF | CSKY::AEK_FPUV2DF | CSKY::AEK_FDIVDU |
                  CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E2)
CSKY_CPU_NAME("c810", CK810,
              CSKY::AEK_FPUV2SF | CSKY::AEK_FPUV2DF | CSKY::AEK_FDIVDU |
                  CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E2)
CSKY_CPU_NAME("c810t", CK810,
              CSKY::AEK_FPUV2SF | CSKY::AEK_FPUV2DF | CSKY::AEK_FDIVDU |
                  CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E2)

CSKY_CPU_NAME("ck810v", CK810V, CSKY::AEK_NONE)
CSKY_CPU_NAME("ck810ev", CK810V, CSKY::AEK_NONE)
CSKY_CPU_NAME("ck810tv", CK810V, CSKY::AEK_NONE)
CSKY_CPU_NAME("ck810etv", CK810V, CSKY::AEK_NONE)
CSKY_CPU_NAME("c810v", CK810V,
              CSKY::AEK_FPUV2SF | CSKY::AEK_FPUV2DF | CSKY::AEK_FDIVDU |
                  CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E2)
CSKY_CPU_NAME("ck810fv", CK810V,
              CSKY::AEK_FPUV2SF | CSKY::AEK_FPUV2DF | CSKY::AEK_FDIVDU |
                  CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E2)
CSKY_CPU_NAME("ck810efv", CK810V,
              CSKY::AEK_FPUV2SF | CSKY::AEK_FPUV2DF | CSKY::AEK_FDIVDU |
                  CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E2)
CSKY_CPU_NAME("ck810ftv", CK810V,
              CSKY::AEK_FPUV2SF | CSKY::AEK_FPUV2DF | CSKY::AEK_FDIVDU |
                  CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E2)
CSKY_CPU_NAME("c810tv", CK810V,
              CSKY::AEK_FPUV2SF | CSKY::AEK_FPUV2DF | CSKY::AEK_FDIVDU |
                  CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E2)
CSKY_CPU_NAME("c810eftv", CK810V,
              CSKY::AEK_FPUV2SF | CSKY::AEK_FPUV2DF | CSKY::AEK_FDIVDU |
                  CSKY::AEK_FLOATE1 | CSKY::AEK_FLOAT1E2)

CSKY_CPU_NAME("ck860", CK860, CSKY::AEK_NONE)
CSKY_CPU_NAME("ck860f", CK860,
              CSKY::AEK_FPUV3HI | CSKY::AEK_FPUV3HF | CSKY::AEK_FPUV3SF |
                  CSKY::AEK_FPUV3DF | CSKY::AEK_FLOAT7E60)
CSKY_CPU_NAME("c860", CK860,
              CSKY::AEK_FPUV3HI | CSKY::AEK_FPUV3HF | CSKY::AEK_FPUV3SF |
                  CSKY::AEK_FPUV3DF | CSKY::AEK_FLOAT7E60)

CSKY_CPU_NAME("ck860v", CK860V, CSKY::AEK_NONE)
CSKY_CPU_NAME("ck860fv", CK860V,
              CSKY::AEK_FPUV3HI | CSKY::AEK_FPUV3HF | CSKY::AEK_FPUV3SF |
                  CSKY::AEK_FPUV3DF | CSKY::AEK_FLOAT7E60)
CSKY_CPU_NAME("c860v", CK860V,
              CSKY::AEK_FPUV3HI | CSKY::AEK_FPUV3HF | CSKY::AEK_FPUV3SF |
                  CSKY::AEK_FPUV3DF | CSKY::AEK_FLOAT7E60)
// Invalid CPU
CSKY_CPU_NAME("invalid", INVALID, CSKY::AEK_INVALID)
#undef CSKY_CPU_NAME
PKiwFZ��
�� TargetParser/RISCVTargetParser.hnu�[���//===-- RISCVTargetParser - Parser for target features ----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements a target parser to recognise hardware features
// for RISC-V CPUs.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TARGETPARSER_RISCVTARGETPARSER_H
#define LLVM_TARGETPARSER_RISCVTARGETPARSER_H

#include "llvm/ADT/StringRef.h"
#include <vector>

namespace llvm {

class Triple;

namespace RISCV {

// We use 64 bits as the known part in the scalable vector types.
static constexpr unsigned RVVBitsPerBlock = 64;

bool parseCPU(StringRef CPU, bool IsRV64);
bool parseTuneCPU(StringRef CPU, bool IsRV64);
StringRef getMArchFromMcpu(StringRef CPU);
void fillValidCPUArchList(SmallVectorImpl<StringRef> &Values, bool IsRV64);
void fillValidTuneCPUArchList(SmallVectorImpl<StringRef> &Values, bool IsRV64);

} // namespace RISCV
} // namespace llvm

#endif
PKiwFZ�=*vvTargetParser/SubtargetFeature.hnu�[���//=== llvm/TargetParser/SubtargetFeature.h - CPU characteristics-*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file Defines and manages user or tool specified CPU characteristics.
/// The intent is to be able to package specific features that should or should
/// not be used on a specific target processor.  A tool, such as llc, could, as
/// as example, gather chip info from the command line, a long with features
/// that should be used on that chip.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TARGETPARSER_SUBTARGETFEATURE_H
#define LLVM_TARGETPARSER_SUBTARGETFEATURE_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/MathExtras.h"
#include <array>
#include <initializer_list>
#include <string>
#include <vector>

namespace llvm {

class raw_ostream;
class Triple;

const unsigned MAX_SUBTARGET_WORDS = 4;
const unsigned MAX_SUBTARGET_FEATURES = MAX_SUBTARGET_WORDS * 64;

/// Container class for subtarget features.
/// This is a constexpr reimplementation of a subset of std::bitset. It would be
/// nice to use std::bitset directly, but it doesn't support constant
/// initialization.
class FeatureBitset {
  static_assert((MAX_SUBTARGET_FEATURES % 64) == 0,
                "Should be a multiple of 64!");
  std::array<uint64_t, MAX_SUBTARGET_WORDS> Bits{};

protected:
  constexpr FeatureBitset(const std::array<uint64_t, MAX_SUBTARGET_WORDS> &B)
      : Bits{B} {}

public:
  constexpr FeatureBitset() = default;
  constexpr FeatureBitset(std::initializer_list<unsigned> Init) {
    for (auto I : Init)
      set(I);
  }

  FeatureBitset &set() {
    std::fill(std::begin(Bits), std::end(Bits), -1ULL);
    return *this;
  }

  constexpr FeatureBitset &set(unsigned I) {
    // GCC <6.2 crashes if this is written in a single statement.
    uint64_t NewBits = Bits[I / 64] | (uint64_t(1) << (I % 64));
    Bits[I / 64] = NewBits;
    return *this;
  }

  constexpr FeatureBitset &reset(unsigned I) {
    // GCC <6.2 crashes if this is written in a single statement.
    uint64_t NewBits = Bits[I / 64] & ~(uint64_t(1) << (I % 64));
    Bits[I / 64] = NewBits;
    return *this;
  }

  constexpr FeatureBitset &flip(unsigned I) {
    // GCC <6.2 crashes if this is written in a single statement.
    uint64_t NewBits = Bits[I / 64] ^ (uint64_t(1) << (I % 64));
    Bits[I / 64] = NewBits;
    return *this;
  }

  constexpr bool operator[](unsigned I) const {
    uint64_t Mask = uint64_t(1) << (I % 64);
    return (Bits[I / 64] & Mask) != 0;
  }

  constexpr bool test(unsigned I) const { return (*this)[I]; }

  constexpr size_t size() const { return MAX_SUBTARGET_FEATURES; }

  bool any() const {
    return llvm::any_of(Bits, [](uint64_t I) { return I != 0; });
  }
  bool none() const { return !any(); }
  size_t count() const {
    size_t Count = 0;
    for (auto B : Bits)
      Count += llvm::popcount(B);
    return Count;
  }

  constexpr FeatureBitset &operator^=(const FeatureBitset &RHS) {
    for (unsigned I = 0, E = Bits.size(); I != E; ++I) {
      Bits[I] ^= RHS.Bits[I];
    }
    return *this;
  }
  constexpr FeatureBitset operator^(const FeatureBitset &RHS) const {
    FeatureBitset Result = *this;
    Result ^= RHS;
    return Result;
  }

  constexpr FeatureBitset &operator&=(const FeatureBitset &RHS) {
    for (unsigned I = 0, E = Bits.size(); I != E; ++I) {
      Bits[I] &= RHS.Bits[I];
    }
    return *this;
  }
  constexpr FeatureBitset operator&(const FeatureBitset &RHS) const {
    FeatureBitset Result = *this;
    Result &= RHS;
    return Result;
  }

  constexpr FeatureBitset &operator|=(const FeatureBitset &RHS) {
    for (unsigned I = 0, E = Bits.size(); I != E; ++I) {
      Bits[I] |= RHS.Bits[I];
    }
    return *this;
  }
  constexpr FeatureBitset operator|(const FeatureBitset &RHS) const {
    FeatureBitset Result = *this;
    Result |= RHS;
    return Result;
  }

  constexpr FeatureBitset operator~() const {
    FeatureBitset Result = *this;
    for (auto &B : Result.Bits)
      B = ~B;
    return Result;
  }

  bool operator==(const FeatureBitset &RHS) const {
    return std::equal(std::begin(Bits), std::end(Bits), std::begin(RHS.Bits));
  }

  bool operator!=(const FeatureBitset &RHS) const { return !(*this == RHS); }

  bool operator < (const FeatureBitset &Other) const {
    for (unsigned I = 0, E = size(); I != E; ++I) {
      bool LHS = test(I), RHS = Other.test(I);
      if (LHS != RHS)
        return LHS < RHS;
    }
    return false;
  }
};

/// Class used to store the subtarget bits in the tables created by tablegen.
class FeatureBitArray : public FeatureBitset {
public:
  constexpr FeatureBitArray(const std::array<uint64_t, MAX_SUBTARGET_WORDS> &B)
      : FeatureBitset(B) {}

  const FeatureBitset &getAsBitset() const { return *this; }
};

//===----------------------------------------------------------------------===//

/// Manages the enabling and disabling of subtarget specific features.
///
/// Features are encoded as a string of the form
///   "+attr1,+attr2,-attr3,...,+attrN"
/// A comma separates each feature from the next (all lowercase.)
/// Each of the remaining features is prefixed with + or - indicating whether
/// that feature should be enabled or disabled contrary to the cpu
/// specification.
class SubtargetFeatures {
  std::vector<std::string> Features;    ///< Subtarget features as a vector

public:
  explicit SubtargetFeatures(StringRef Initial = "");

  /// Returns features as a string.
  std::string getString() const;

  /// Adds Features.
  void AddFeature(StringRef String, bool Enable = true);

  void addFeaturesVector(const ArrayRef<std::string> OtherFeatures);

  /// Returns the vector of individual subtarget features.
  const std::vector<std::string> &getFeatures() const { return Features; }

  /// Prints feature string.
  void print(raw_ostream &OS) const;

  // Dumps feature info.
  void dump() const;

  /// Adds the default features for the specified target triple.
  void getDefaultSubtargetFeatures(const Triple& Triple);

  /// Determine if a feature has a flag; '+' or '-'
  static bool hasFlag(StringRef Feature) {
    assert(!Feature.empty() && "Empty string");
    // Get first character
    char Ch = Feature[0];
    // Check if first character is '+' or '-' flag
    return Ch == '+' || Ch =='-';
  }

  /// Return string stripped of flag.
  static StringRef StripFlag(StringRef Feature) {
    return hasFlag(Feature) ? Feature.substr(1) : Feature;
  }

  /// Return true if enable flag; '+'.
  static inline bool isEnabled(StringRef Feature) {
    assert(!Feature.empty() && "Empty string");
    // Get first character
    char Ch = Feature[0];
    // Check if first character is '+' for enabled
    return Ch == '+';
  }

  /// Splits a string of comma separated items in to a vector of strings.
  static void Split(std::vector<std::string> &V, StringRef S);
};

} // end namespace llvm

#endif // LLVM_TARGETPARSER_SUBTARGETFEATURE_H
PKiwFZX�u��TargetParser/TargetParser.hnu�[���//===-- TargetParser - Parser for target features ---------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements a target parser to recognise hardware features such as
// FPU/CPU/ARCH names as well as specific support such as HDIV, etc.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TARGETPARSER_TARGETPARSER_H
#define LLVM_TARGETPARSER_TARGETPARSER_H

#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"

namespace llvm {

template <typename T> class SmallVectorImpl;
class Triple;

// Target specific information in their own namespaces.
// (ARM/AArch64/X86 are declared in ARM/AArch64/X86TargetParser.h)
// These should be generated from TableGen because the information is already
// there, and there is where new information about targets will be added.
// FIXME: To TableGen this we need to make some table generated files available
// even if the back-end is not compiled with LLVM, plus we need to create a new
// back-end to TableGen to create these clean tables.
namespace AMDGPU {

/// GPU kinds supported by the AMDGPU target.
enum GPUKind : uint32_t {
  // Not specified processor.
  GK_NONE = 0,

  // R600-based processors.
  GK_R600 = 1,
  GK_R630 = 2,
  GK_RS880 = 3,
  GK_RV670 = 4,
  GK_RV710 = 5,
  GK_RV730 = 6,
  GK_RV770 = 7,
  GK_CEDAR = 8,
  GK_CYPRESS = 9,
  GK_JUNIPER = 10,
  GK_REDWOOD = 11,
  GK_SUMO = 12,
  GK_BARTS = 13,
  GK_CAICOS = 14,
  GK_CAYMAN = 15,
  GK_TURKS = 16,

  GK_R600_FIRST = GK_R600,
  GK_R600_LAST = GK_TURKS,

  // AMDGCN-based processors.
  GK_GFX600 = 32,
  GK_GFX601 = 33,
  GK_GFX602 = 34,

  GK_GFX700 = 40,
  GK_GFX701 = 41,
  GK_GFX702 = 42,
  GK_GFX703 = 43,
  GK_GFX704 = 44,
  GK_GFX705 = 45,

  GK_GFX801 = 50,
  GK_GFX802 = 51,
  GK_GFX803 = 52,
  GK_GFX805 = 53,
  GK_GFX810 = 54,

  GK_GFX900 = 60,
  GK_GFX902 = 61,
  GK_GFX904 = 62,
  GK_GFX906 = 63,
  GK_GFX908 = 64,
  GK_GFX909 = 65,
  GK_GFX90A = 66,
  GK_GFX90C = 67,
  GK_GFX940 = 68,
  GK_GFX941 = 69,
  GK_GFX942 = 70,

  GK_GFX1010 = 71,
  GK_GFX1011 = 72,
  GK_GFX1012 = 73,
  GK_GFX1013 = 74,
  GK_GFX1030 = 75,
  GK_GFX1031 = 76,
  GK_GFX1032 = 77,
  GK_GFX1033 = 78,
  GK_GFX1034 = 79,
  GK_GFX1035 = 80,
  GK_GFX1036 = 81,

  GK_GFX1100 = 90,
  GK_GFX1101 = 91,
  GK_GFX1102 = 92,
  GK_GFX1103 = 93,
  GK_GFX1150 = 94,
  GK_GFX1151 = 95,

  GK_AMDGCN_FIRST = GK_GFX600,
  GK_AMDGCN_LAST = GK_GFX1151,
};

/// Instruction set architecture version.
struct IsaVersion {
  unsigned Major;
  unsigned Minor;
  unsigned Stepping;
};

// This isn't comprehensive for now, just things that are needed from the
// frontend driver.
enum ArchFeatureKind : uint32_t {
  FEATURE_NONE = 0,

  // These features only exist for r600, and are implied true for amdgcn.
  FEATURE_FMA = 1 << 1,
  FEATURE_LDEXP = 1 << 2,
  FEATURE_FP64 = 1 << 3,

  // Common features.
  FEATURE_FAST_FMA_F32 = 1 << 4,
  FEATURE_FAST_DENORMAL_F32 = 1 << 5,

  // Wavefront 32 is available.
  FEATURE_WAVE32 = 1 << 6,

  // Xnack is available.
  FEATURE_XNACK = 1 << 7,

  // Sram-ecc is available.
  FEATURE_SRAMECC = 1 << 8,

  // WGP mode is supported.
  FEATURE_WGP = 1 << 9,
};

StringRef getArchNameAMDGCN(GPUKind AK);
StringRef getArchNameR600(GPUKind AK);
StringRef getCanonicalArchName(const Triple &T, StringRef Arch);
GPUKind parseArchAMDGCN(StringRef CPU);
GPUKind parseArchR600(StringRef CPU);
unsigned getArchAttrAMDGCN(GPUKind AK);
unsigned getArchAttrR600(GPUKind AK);

void fillValidArchListAMDGCN(SmallVectorImpl<StringRef> &Values);
void fillValidArchListR600(SmallVectorImpl<StringRef> &Values);

IsaVersion getIsaVersion(StringRef GPU);

/// Fills Features map with default values for given target GPU
void fillAMDGPUFeatureMap(StringRef GPU, const Triple &T,
                          StringMap<bool> &Features);

/// Inserts wave size feature for given GPU into features map
bool insertWaveSizeFeature(StringRef GPU, const Triple &T,
                           StringMap<bool> &Features, std::string &ErrorMsg);

} // namespace AMDGPU
} // namespace llvm

#endif
PKiwFZ�O_Demangle/Utility.hnu�[���//===--- Utility.h -------------------*- mode:c++;eval:(read-only-mode) -*-===//
//       Do not edit! See README.txt.
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Provide some utility classes for use in the demangler.
// There are two copies of this file in the source tree.  The one in libcxxabi
// is the original and the one in llvm is the copy.  Use cp-to-llvm.sh to update
// the copy.  See README.txt for more details.
//
//===----------------------------------------------------------------------===//

#ifndef DEMANGLE_UTILITY_H
#define DEMANGLE_UTILITY_H

#include "DemangleConfig.h"

#include <array>
#include <cassert>
#include <cstdint>
#include <cstdlib>
#include <cstring>
#include <exception>
#include <limits>
#include <string_view>

DEMANGLE_NAMESPACE_BEGIN

// Stream that AST nodes write their string representation into after the AST
// has been parsed.
class OutputBuffer {
  char *Buffer = nullptr;
  size_t CurrentPosition = 0;
  size_t BufferCapacity = 0;

  // Ensure there are at least N more positions in the buffer.
  void grow(size_t N) {
    size_t Need = N + CurrentPosition;
    if (Need > BufferCapacity) {
      // Reduce the number of reallocations, with a bit of hysteresis. The
      // number here is chosen so the first allocation will more-than-likely not
      // allocate more than 1K.
      Need += 1024 - 32;
      BufferCapacity *= 2;
      if (BufferCapacity < Need)
        BufferCapacity = Need;
      Buffer = static_cast<char *>(std::realloc(Buffer, BufferCapacity));
      if (Buffer == nullptr)
        std::terminate();
    }
  }

  OutputBuffer &writeUnsigned(uint64_t N, bool isNeg = false) {
    std::array<char, 21> Temp;
    char *TempPtr = Temp.data() + Temp.size();

    // Output at least one character.
    do {
      *--TempPtr = char('0' + N % 10);
      N /= 10;
    } while (N);

    // Add negative sign.
    if (isNeg)
      *--TempPtr = '-';

    return operator+=(
        std::string_view(TempPtr, Temp.data() + Temp.size() - TempPtr));
  }

public:
  OutputBuffer(char *StartBuf, size_t Size)
      : Buffer(StartBuf), BufferCapacity(Size) {}
  OutputBuffer(char *StartBuf, size_t *SizePtr)
      : OutputBuffer(StartBuf, StartBuf ? *SizePtr : 0) {}
  OutputBuffer() = default;
  // Non-copyable
  OutputBuffer(const OutputBuffer &) = delete;
  OutputBuffer &operator=(const OutputBuffer &) = delete;

  operator std::string_view() const {
    return std::string_view(Buffer, CurrentPosition);
  }

  /// If a ParameterPackExpansion (or similar type) is encountered, the offset
  /// into the pack that we're currently printing.
  unsigned CurrentPackIndex = std::numeric_limits<unsigned>::max();
  unsigned CurrentPackMax = std::numeric_limits<unsigned>::max();

  /// When zero, we're printing template args and '>' needs to be parenthesized.
  /// Use a counter so we can simply increment inside parentheses.
  unsigned GtIsGt = 1;

  bool isGtInsideTemplateArgs() const { return GtIsGt == 0; }

  void printOpen(char Open = '(') {
    GtIsGt++;
    *this += Open;
  }
  void printClose(char Close = ')') {
    GtIsGt--;
    *this += Close;
  }

  OutputBuffer &operator+=(std::string_view R) {
    if (size_t Size = R.size()) {
      grow(Size);
      std::memcpy(Buffer + CurrentPosition, &*R.begin(), Size);
      CurrentPosition += Size;
    }
    return *this;
  }

  OutputBuffer &operator+=(char C) {
    grow(1);
    Buffer[CurrentPosition++] = C;
    return *this;
  }

  OutputBuffer &prepend(std::string_view R) {
    size_t Size = R.size();

    grow(Size);
    std::memmove(Buffer + Size, Buffer, CurrentPosition);
    std::memcpy(Buffer, &*R.begin(), Size);
    CurrentPosition += Size;

    return *this;
  }

  OutputBuffer &operator<<(std::string_view R) { return (*this += R); }

  OutputBuffer &operator<<(char C) { return (*this += C); }

  OutputBuffer &operator<<(long long N) {
    return writeUnsigned(static_cast<unsigned long long>(std::abs(N)), N < 0);
  }

  OutputBuffer &operator<<(unsigned long long N) {
    return writeUnsigned(N, false);
  }

  OutputBuffer &operator<<(long N) {
    return this->operator<<(static_cast<long long>(N));
  }

  OutputBuffer &operator<<(unsigned long N) {
    return this->operator<<(static_cast<unsigned long long>(N));
  }

  OutputBuffer &operator<<(int N) {
    return this->operator<<(static_cast<long long>(N));
  }

  OutputBuffer &operator<<(unsigned int N) {
    return this->operator<<(static_cast<unsigned long long>(N));
  }

  void insert(size_t Pos, const char *S, size_t N) {
    assert(Pos <= CurrentPosition);
    if (N == 0)
      return;
    grow(N);
    std::memmove(Buffer + Pos + N, Buffer + Pos, CurrentPosition - Pos);
    std::memcpy(Buffer + Pos, S, N);
    CurrentPosition += N;
  }

  size_t getCurrentPosition() const { return CurrentPosition; }
  void setCurrentPosition(size_t NewPos) { CurrentPosition = NewPos; }

  char back() const {
    assert(CurrentPosition);
    return Buffer[CurrentPosition - 1];
  }

  bool empty() const { return CurrentPosition == 0; }

  char *getBuffer() { return Buffer; }
  char *getBufferEnd() { return Buffer + CurrentPosition - 1; }
  size_t getBufferCapacity() const { return BufferCapacity; }
};

template <class T> class ScopedOverride {
  T &Loc;
  T Original;

public:
  ScopedOverride(T &Loc_) : ScopedOverride(Loc_, Loc_) {}

  ScopedOverride(T &Loc_, T NewVal) : Loc(Loc_), Original(Loc_) {
    Loc_ = std::move(NewVal);
  }
  ~ScopedOverride() { Loc = std::move(Original); }

  ScopedOverride(const ScopedOverride &) = delete;
  ScopedOverride &operator=(const ScopedOverride &) = delete;
};

DEMANGLE_NAMESPACE_END

#endif
PKiwFZ)�����Demangle/StringViewExtras.hnu�[���//===--- StringViewExtras.h ----------*- mode:c++;eval:(read-only-mode) -*-===//
//       Do not edit! See README.txt.
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// There are two copies of this file in the source tree.  The one under
// libcxxabi is the original and the one under llvm is the copy.  Use
// cp-to-llvm.sh to update the copy.  See README.txt for more details.
//
//===----------------------------------------------------------------------===//

#ifndef DEMANGLE_STRINGVIEW_H
#define DEMANGLE_STRINGVIEW_H

#include "DemangleConfig.h"

#include <string_view>

DEMANGLE_NAMESPACE_BEGIN

inline bool starts_with(std::string_view self, char C) noexcept {
  return !self.empty() && *self.begin() == C;
}

inline bool starts_with(std::string_view haystack,
                        std::string_view needle) noexcept {
  if (needle.size() > haystack.size())
    return false;
  haystack.remove_suffix(haystack.size() - needle.size());
  return haystack == needle;
}

DEMANGLE_NAMESPACE_END

#endif
PKiwFZ�ˠ�)�)Demangle/MicrosoftDemangle.hnu�[���//===------------------------- MicrosoftDemangle.h --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEMANGLE_MICROSOFTDEMANGLE_H
#define LLVM_DEMANGLE_MICROSOFTDEMANGLE_H

#include "llvm/Demangle/MicrosoftDemangleNodes.h"

#include <cassert>
#include <string_view>
#include <utility>

namespace llvm {
namespace ms_demangle {
// This memory allocator is extremely fast, but it doesn't call dtors
// for allocated objects. That means you can't use STL containers
// (such as std::vector) with this allocator. But it pays off --
// the demangler is 3x faster with this allocator compared to one with
// STL containers.
constexpr size_t AllocUnit = 4096;

class ArenaAllocator {
  struct AllocatorNode {
    uint8_t *Buf = nullptr;
    size_t Used = 0;
    size_t Capacity = 0;
    AllocatorNode *Next = nullptr;
  };

  void addNode(size_t Capacity) {
    AllocatorNode *NewHead = new AllocatorNode;
    NewHead->Buf = new uint8_t[Capacity];
    NewHead->Next = Head;
    NewHead->Capacity = Capacity;
    Head = NewHead;
    NewHead->Used = 0;
  }

public:
  ArenaAllocator() { addNode(AllocUnit); }

  ~ArenaAllocator() {
    while (Head) {
      assert(Head->Buf);
      delete[] Head->Buf;
      AllocatorNode *Next = Head->Next;
      delete Head;
      Head = Next;
    }
  }

  char *allocUnalignedBuffer(size_t Size) {
    assert(Head && Head->Buf);

    uint8_t *P = Head->Buf + Head->Used;

    Head->Used += Size;
    if (Head->Used <= Head->Capacity)
      return reinterpret_cast<char *>(P);

    addNode(std::max(AllocUnit, Size));
    Head->Used = Size;
    return reinterpret_cast<char *>(Head->Buf);
  }

  template <typename T, typename... Args> T *allocArray(size_t Count) {
    size_t Size = Count * sizeof(T);
    assert(Head && Head->Buf);

    size_t P = (size_t)Head->Buf + Head->Used;
    uintptr_t AlignedP =
        (((size_t)P + alignof(T) - 1) & ~(size_t)(alignof(T) - 1));
    uint8_t *PP = (uint8_t *)AlignedP;
    size_t Adjustment = AlignedP - P;

    Head->Used += Size + Adjustment;
    if (Head->Used <= Head->Capacity)
      return new (PP) T[Count]();

    addNode(std::max(AllocUnit, Size));
    Head->Used = Size;
    return new (Head->Buf) T[Count]();
  }

  template <typename T, typename... Args> T *alloc(Args &&... ConstructorArgs) {
    constexpr size_t Size = sizeof(T);
    assert(Head && Head->Buf);

    size_t P = (size_t)Head->Buf + Head->Used;
    uintptr_t AlignedP =
        (((size_t)P + alignof(T) - 1) & ~(size_t)(alignof(T) - 1));
    uint8_t *PP = (uint8_t *)AlignedP;
    size_t Adjustment = AlignedP - P;

    Head->Used += Size + Adjustment;
    if (Head->Used <= Head->Capacity)
      return new (PP) T(std::forward<Args>(ConstructorArgs)...);

    static_assert(Size < AllocUnit);
    addNode(AllocUnit);
    Head->Used = Size;
    return new (Head->Buf) T(std::forward<Args>(ConstructorArgs)...);
  }

private:
  AllocatorNode *Head = nullptr;
};

struct BackrefContext {
  static constexpr size_t Max = 10;

  TypeNode *FunctionParams[Max];
  size_t FunctionParamCount = 0;

  // The first 10 BackReferences in a mangled name can be back-referenced by
  // special name @[0-9]. This is a storage for the first 10 BackReferences.
  NamedIdentifierNode *Names[Max];
  size_t NamesCount = 0;
};

enum class QualifierMangleMode { Drop, Mangle, Result };

enum NameBackrefBehavior : uint8_t {
  NBB_None = 0,          // don't save any names as backrefs.
  NBB_Template = 1 << 0, // save template instanations.
  NBB_Simple = 1 << 1,   // save simple names.
};

enum class FunctionIdentifierCodeGroup { Basic, Under, DoubleUnder };

// Demangler class takes the main role in demangling symbols.
// It has a set of functions to parse mangled symbols into Type instances.
// It also has a set of functions to convert Type instances to strings.
class Demangler {
public:
  Demangler() = default;
  virtual ~Demangler() = default;

  // You are supposed to call parse() first and then check if error is true.  If
  // it is false, call output() to write the formatted name to the given stream.
  SymbolNode *parse(std::string_view &MangledName);

  TagTypeNode *parseTagUniqueName(std::string_view &MangledName);

  // True if an error occurred.
  bool Error = false;

  void dumpBackReferences();

private:
  SymbolNode *demangleEncodedSymbol(std::string_view &MangledName,
                                    QualifiedNameNode *QN);
  SymbolNode *demangleDeclarator(std::string_view &MangledName);
  SymbolNode *demangleMD5Name(std::string_view &MangledName);
  SymbolNode *demangleTypeinfoName(std::string_view &MangledName);

  VariableSymbolNode *demangleVariableEncoding(std::string_view &MangledName,
                                               StorageClass SC);
  FunctionSymbolNode *demangleFunctionEncoding(std::string_view &MangledName);

  Qualifiers demanglePointerExtQualifiers(std::string_view &MangledName);

  // Parser functions. This is a recursive-descent parser.
  TypeNode *demangleType(std::string_view &MangledName,
                         QualifierMangleMode QMM);
  PrimitiveTypeNode *demanglePrimitiveType(std::string_view &MangledName);
  CustomTypeNode *demangleCustomType(std::string_view &MangledName);
  TagTypeNode *demangleClassType(std::string_view &MangledName);
  PointerTypeNode *demanglePointerType(std::string_view &MangledName);
  PointerTypeNode *demangleMemberPointerType(std::string_view &MangledName);
  FunctionSignatureNode *demangleFunctionType(std::string_view &MangledName,
                                              bool HasThisQuals);

  ArrayTypeNode *demangleArrayType(std::string_view &MangledName);

  NodeArrayNode *demangleFunctionParameterList(std::string_view &MangledName,
                                               bool &IsVariadic);
  NodeArrayNode *demangleTemplateParameterList(std::string_view &MangledName);

  std::pair<uint64_t, bool> demangleNumber(std::string_view &MangledName);
  uint64_t demangleUnsigned(std::string_view &MangledName);
  int64_t demangleSigned(std::string_view &MangledName);

  void memorizeString(std::string_view s);
  void memorizeIdentifier(IdentifierNode *Identifier);

  /// Allocate a copy of \p Borrowed into memory that we own.
  std::string_view copyString(std::string_view Borrowed);

  QualifiedNameNode *
  demangleFullyQualifiedTypeName(std::string_view &MangledName);
  QualifiedNameNode *
  demangleFullyQualifiedSymbolName(std::string_view &MangledName);

  IdentifierNode *demangleUnqualifiedTypeName(std::string_view &MangledName,
                                              bool Memorize);
  IdentifierNode *demangleUnqualifiedSymbolName(std::string_view &MangledName,
                                                NameBackrefBehavior NBB);

  QualifiedNameNode *demangleNameScopeChain(std::string_view &MangledName,
                                            IdentifierNode *UnqualifiedName);
  IdentifierNode *demangleNameScopePiece(std::string_view &MangledName);

  NamedIdentifierNode *demangleBackRefName(std::string_view &MangledName);
  IdentifierNode *
  demangleTemplateInstantiationName(std::string_view &MangledName,
                                    NameBackrefBehavior NBB);
  IntrinsicFunctionKind
  translateIntrinsicFunctionCode(char CH, FunctionIdentifierCodeGroup Group);
  IdentifierNode *demangleFunctionIdentifierCode(std::string_view &MangledName);
  IdentifierNode *
  demangleFunctionIdentifierCode(std::string_view &MangledName,
                                 FunctionIdentifierCodeGroup Group);
  StructorIdentifierNode *
  demangleStructorIdentifier(std::string_view &MangledName, bool IsDestructor);
  ConversionOperatorIdentifierNode *
  demangleConversionOperatorIdentifier(std::string_view &MangledName);
  LiteralOperatorIdentifierNode *
  demangleLiteralOperatorIdentifier(std::string_view &MangledName);

  SymbolNode *demangleSpecialIntrinsic(std::string_view &MangledName);
  SpecialTableSymbolNode *
  demangleSpecialTableSymbolNode(std::string_view &MangledName,
                                 SpecialIntrinsicKind SIK);
  LocalStaticGuardVariableNode *
  demangleLocalStaticGuard(std::string_view &MangledName, bool IsThread);
  VariableSymbolNode *demangleUntypedVariable(ArenaAllocator &Arena,
                                              std::string_view &MangledName,
                                              std::string_view VariableName);
  VariableSymbolNode *
  demangleRttiBaseClassDescriptorNode(ArenaAllocator &Arena,
                                      std::string_view &MangledName);
  FunctionSymbolNode *demangleInitFiniStub(std::string_view &MangledName,
                                           bool IsDestructor);

  NamedIdentifierNode *demangleSimpleName(std::string_view &MangledName,
                                          bool Memorize);
  NamedIdentifierNode *
  demangleAnonymousNamespaceName(std::string_view &MangledName);
  NamedIdentifierNode *
  demangleLocallyScopedNamePiece(std::string_view &MangledName);
  EncodedStringLiteralNode *
  demangleStringLiteral(std::string_view &MangledName);
  FunctionSymbolNode *demangleVcallThunkNode(std::string_view &MangledName);

  std::string_view demangleSimpleString(std::string_view &MangledName,
                                        bool Memorize);

  FuncClass demangleFunctionClass(std::string_view &MangledName);
  CallingConv demangleCallingConvention(std::string_view &MangledName);
  StorageClass demangleVariableStorageClass(std::string_view &MangledName);
  bool demangleThrowSpecification(std::string_view &MangledName);
  wchar_t demangleWcharLiteral(std::string_view &MangledName);
  uint8_t demangleCharLiteral(std::string_view &MangledName);

  std::pair<Qualifiers, bool> demangleQualifiers(std::string_view &MangledName);

  // Memory allocator.
  ArenaAllocator Arena;

  // A single type uses one global back-ref table for all function params.
  // This means back-refs can even go "into" other types.  Examples:
  //
  //  // Second int* is a back-ref to first.
  //  void foo(int *, int*);
  //
  //  // Second int* is not a back-ref to first (first is not a function param).
  //  int* foo(int*);
  //
  //  // Second int* is a back-ref to first (ALL function types share the same
  //  // back-ref map.
  //  using F = void(*)(int*);
  //  F G(int *);
  BackrefContext Backrefs;
};

} // namespace ms_demangle
} // namespace llvm

#endif // LLVM_DEMANGLE_MICROSOFTDEMANGLE_H
PKiwFZ��0�G�G!Demangle/MicrosoftDemangleNodes.hnu�[���//===- MicrosoftDemangleNodes.h ---------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the AST nodes used in the MSVC demangler.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEMANGLE_MICROSOFTDEMANGLENODES_H
#define LLVM_DEMANGLE_MICROSOFTDEMANGLENODES_H

#include <array>
#include <cstdint>
#include <string>
#include <string_view>

namespace llvm {
namespace itanium_demangle {
class OutputBuffer;
}
}

using llvm::itanium_demangle::OutputBuffer;

namespace llvm {
namespace ms_demangle {

// Storage classes
enum Qualifiers : uint8_t {
  Q_None = 0,
  Q_Const = 1 << 0,
  Q_Volatile = 1 << 1,
  Q_Far = 1 << 2,
  Q_Huge = 1 << 3,
  Q_Unaligned = 1 << 4,
  Q_Restrict = 1 << 5,
  Q_Pointer64 = 1 << 6
};

enum class StorageClass : uint8_t {
  None,
  PrivateStatic,
  ProtectedStatic,
  PublicStatic,
  Global,
  FunctionLocalStatic,
};

enum class PointerAffinity { None, Pointer, Reference, RValueReference };
enum class FunctionRefQualifier { None, Reference, RValueReference };

// Calling conventions
enum class CallingConv : uint8_t {
  None,
  Cdecl,
  Pascal,
  Thiscall,
  Stdcall,
  Fastcall,
  Clrcall,
  Eabi,
  Vectorcall,
  Regcall,
  Swift,      // Clang-only
  SwiftAsync, // Clang-only
};

enum class ReferenceKind : uint8_t { None, LValueRef, RValueRef };

enum OutputFlags {
  OF_Default = 0,
  OF_NoCallingConvention = 1,
  OF_NoTagSpecifier = 2,
  OF_NoAccessSpecifier = 4,
  OF_NoMemberType = 8,
  OF_NoReturnType = 16,
  OF_NoVariableType = 32,
};

// Types
enum class PrimitiveKind {
  Void,
  Bool,
  Char,
  Schar,
  Uchar,
  Char8,
  Char16,
  Char32,
  Short,
  Ushort,
  Int,
  Uint,
  Long,
  Ulong,
  Int64,
  Uint64,
  Wchar,
  Float,
  Double,
  Ldouble,
  Nullptr,
};

enum class CharKind {
  Char,
  Char16,
  Char32,
  Wchar,
};

enum class IntrinsicFunctionKind : uint8_t {
  None,
  New,                        // ?2 # operator new
  Delete,                     // ?3 # operator delete
  Assign,                     // ?4 # operator=
  RightShift,                 // ?5 # operator>>
  LeftShift,                  // ?6 # operator<<
  LogicalNot,                 // ?7 # operator!
  Equals,                     // ?8 # operator==
  NotEquals,                  // ?9 # operator!=
  ArraySubscript,             // ?A # operator[]
  Pointer,                    // ?C # operator->
  Dereference,                // ?D # operator*
  Increment,                  // ?E # operator++
  Decrement,                  // ?F # operator--
  Minus,                      // ?G # operator-
  Plus,                       // ?H # operator+
  BitwiseAnd,                 // ?I # operator&
  MemberPointer,              // ?J # operator->*
  Divide,                     // ?K # operator/
  Modulus,                    // ?L # operator%
  LessThan,                   // ?M operator<
  LessThanEqual,              // ?N operator<=
  GreaterThan,                // ?O operator>
  GreaterThanEqual,           // ?P operator>=
  Comma,                      // ?Q operator,
  Parens,                     // ?R operator()
  BitwiseNot,                 // ?S operator~
  BitwiseXor,                 // ?T operator^
  BitwiseOr,                  // ?U operator|
  LogicalAnd,                 // ?V operator&&
  LogicalOr,                  // ?W operator||
  TimesEqual,                 // ?X operator*=
  PlusEqual,                  // ?Y operator+=
  MinusEqual,                 // ?Z operator-=
  DivEqual,                   // ?_0 operator/=
  ModEqual,                   // ?_1 operator%=
  RshEqual,                   // ?_2 operator>>=
  LshEqual,                   // ?_3 operator<<=
  BitwiseAndEqual,            // ?_4 operator&=
  BitwiseOrEqual,             // ?_5 operator|=
  BitwiseXorEqual,            // ?_6 operator^=
  VbaseDtor,                  // ?_D # vbase destructor
  VecDelDtor,                 // ?_E # vector deleting destructor
  DefaultCtorClosure,         // ?_F # default constructor closure
  ScalarDelDtor,              // ?_G # scalar deleting destructor
  VecCtorIter,                // ?_H # vector constructor iterator
  VecDtorIter,                // ?_I # vector destructor iterator
  VecVbaseCtorIter,           // ?_J # vector vbase constructor iterator
  VdispMap,                   // ?_K # virtual displacement map
  EHVecCtorIter,              // ?_L # eh vector constructor iterator
  EHVecDtorIter,              // ?_M # eh vector destructor iterator
  EHVecVbaseCtorIter,         // ?_N # eh vector vbase constructor iterator
  CopyCtorClosure,            // ?_O # copy constructor closure
  LocalVftableCtorClosure,    // ?_T # local vftable constructor closure
  ArrayNew,                   // ?_U operator new[]
  ArrayDelete,                // ?_V operator delete[]
  ManVectorCtorIter,          // ?__A managed vector ctor iterator
  ManVectorDtorIter,          // ?__B managed vector dtor iterator
  EHVectorCopyCtorIter,       // ?__C EH vector copy ctor iterator
  EHVectorVbaseCopyCtorIter,  // ?__D EH vector vbase copy ctor iterator
  VectorCopyCtorIter,         // ?__G vector copy constructor iterator
  VectorVbaseCopyCtorIter,    // ?__H vector vbase copy constructor iterator
  ManVectorVbaseCopyCtorIter, // ?__I managed vector vbase copy constructor
  CoAwait,                    // ?__L operator co_await
  Spaceship,                  // ?__M operator<=>
  MaxIntrinsic
};

enum class SpecialIntrinsicKind {
  None,
  Vftable,
  Vbtable,
  Typeof,
  VcallThunk,
  LocalStaticGuard,
  StringLiteralSymbol,
  UdtReturning,
  Unknown,
  DynamicInitializer,
  DynamicAtexitDestructor,
  RttiTypeDescriptor,
  RttiBaseClassDescriptor,
  RttiBaseClassArray,
  RttiClassHierarchyDescriptor,
  RttiCompleteObjLocator,
  LocalVftable,
  LocalStaticThreadGuard,
};

// Function classes
enum FuncClass : uint16_t {
  FC_None = 0,
  FC_Public = 1 << 0,
  FC_Protected = 1 << 1,
  FC_Private = 1 << 2,
  FC_Global = 1 << 3,
  FC_Static = 1 << 4,
  FC_Virtual = 1 << 5,
  FC_Far = 1 << 6,
  FC_ExternC = 1 << 7,
  FC_NoParameterList = 1 << 8,
  FC_VirtualThisAdjust = 1 << 9,
  FC_VirtualThisAdjustEx = 1 << 10,
  FC_StaticThisAdjust = 1 << 11,
};

enum class TagKind { Class, Struct, Union, Enum };

enum class NodeKind {
  Unknown,
  Md5Symbol,
  PrimitiveType,
  FunctionSignature,
  Identifier,
  NamedIdentifier,
  VcallThunkIdentifier,
  LocalStaticGuardIdentifier,
  IntrinsicFunctionIdentifier,
  ConversionOperatorIdentifier,
  DynamicStructorIdentifier,
  StructorIdentifier,
  LiteralOperatorIdentifier,
  ThunkSignature,
  PointerType,
  TagType,
  ArrayType,
  Custom,
  IntrinsicType,
  NodeArray,
  QualifiedName,
  TemplateParameterReference,
  EncodedStringLiteral,
  IntegerLiteral,
  RttiBaseClassDescriptor,
  LocalStaticGuardVariable,
  FunctionSymbol,
  VariableSymbol,
  SpecialTableSymbol
};

struct Node {
  explicit Node(NodeKind K) : Kind(K) {}
  virtual ~Node() = default;

  NodeKind kind() const { return Kind; }

  virtual void output(OutputBuffer &OB, OutputFlags Flags) const = 0;

  std::string toString(OutputFlags Flags = OF_Default) const;

private:
  NodeKind Kind;
};

struct TypeNode;
struct PrimitiveTypeNode;
struct FunctionSignatureNode;
struct IdentifierNode;
struct NamedIdentifierNode;
struct VcallThunkIdentifierNode;
struct IntrinsicFunctionIdentifierNode;
struct LiteralOperatorIdentifierNode;
struct ConversionOperatorIdentifierNode;
struct StructorIdentifierNode;
struct ThunkSignatureNode;
struct PointerTypeNode;
struct ArrayTypeNode;
struct TagTypeNode;
struct NodeArrayNode;
struct QualifiedNameNode;
struct TemplateParameterReferenceNode;
struct EncodedStringLiteralNode;
struct IntegerLiteralNode;
struct RttiBaseClassDescriptorNode;
struct LocalStaticGuardVariableNode;
struct SymbolNode;
struct FunctionSymbolNode;
struct VariableSymbolNode;
struct SpecialTableSymbolNode;

struct TypeNode : public Node {
  explicit TypeNode(NodeKind K) : Node(K) {}

  virtual void outputPre(OutputBuffer &OB, OutputFlags Flags) const = 0;
  virtual void outputPost(OutputBuffer &OB, OutputFlags Flags) const = 0;

  void output(OutputBuffer &OB, OutputFlags Flags) const override {
    outputPre(OB, Flags);
    outputPost(OB, Flags);
  }

  Qualifiers Quals = Q_None;
};

struct PrimitiveTypeNode : public TypeNode {
  explicit PrimitiveTypeNode(PrimitiveKind K)
      : TypeNode(NodeKind::PrimitiveType), PrimKind(K) {}

  void outputPre(OutputBuffer &OB, OutputFlags Flags) const override;
  void outputPost(OutputBuffer &OB, OutputFlags Flags) const override {}

  PrimitiveKind PrimKind;
};

struct FunctionSignatureNode : public TypeNode {
  explicit FunctionSignatureNode(NodeKind K) : TypeNode(K) {}
  FunctionSignatureNode() : TypeNode(NodeKind::FunctionSignature) {}

  void outputPre(OutputBuffer &OB, OutputFlags Flags) const override;
  void outputPost(OutputBuffer &OB, OutputFlags Flags) const override;

  // Valid if this FunctionTypeNode is the Pointee of a PointerType or
  // MemberPointerType.
  PointerAffinity Affinity = PointerAffinity::None;

  // The function's calling convention.
  CallingConv CallConvention = CallingConv::None;

  // Function flags (gloabl, public, etc)
  FuncClass FunctionClass = FC_Global;

  FunctionRefQualifier RefQualifier = FunctionRefQualifier::None;

  // The return type of the function.
  TypeNode *ReturnType = nullptr;

  // True if this is a C-style ... varargs function.
  bool IsVariadic = false;

  // Function parameters
  NodeArrayNode *Params = nullptr;

  // True if the function type is noexcept.
  bool IsNoexcept = false;
};

struct IdentifierNode : public Node {
  explicit IdentifierNode(NodeKind K) : Node(K) {}

  NodeArrayNode *TemplateParams = nullptr;

protected:
  void outputTemplateParameters(OutputBuffer &OB, OutputFlags Flags) const;
};

struct VcallThunkIdentifierNode : public IdentifierNode {
  VcallThunkIdentifierNode() : IdentifierNode(NodeKind::VcallThunkIdentifier) {}

  void output(OutputBuffer &OB, OutputFlags Flags) const override;

  uint64_t OffsetInVTable = 0;
};

struct DynamicStructorIdentifierNode : public IdentifierNode {
  DynamicStructorIdentifierNode()
      : IdentifierNode(NodeKind::DynamicStructorIdentifier) {}

  void output(OutputBuffer &OB, OutputFlags Flags) const override;

  VariableSymbolNode *Variable = nullptr;
  QualifiedNameNode *Name = nullptr;
  bool IsDestructor = false;
};

struct NamedIdentifierNode : public IdentifierNode {
  NamedIdentifierNode() : IdentifierNode(NodeKind::NamedIdentifier) {}

  void output(OutputBuffer &OB, OutputFlags Flags) const override;

  std::string_view Name;
};

struct IntrinsicFunctionIdentifierNode : public IdentifierNode {
  explicit IntrinsicFunctionIdentifierNode(IntrinsicFunctionKind Operator)
      : IdentifierNode(NodeKind::IntrinsicFunctionIdentifier),
        Operator(Operator) {}

  void output(OutputBuffer &OB, OutputFlags Flags) const override;

  IntrinsicFunctionKind Operator;
};

struct LiteralOperatorIdentifierNode : public IdentifierNode {
  LiteralOperatorIdentifierNode()
      : IdentifierNode(NodeKind::LiteralOperatorIdentifier) {}

  void output(OutputBuffer &OB, OutputFlags Flags) const override;

  std::string_view Name;
};

struct LocalStaticGuardIdentifierNode : public IdentifierNode {
  LocalStaticGuardIdentifierNode()
      : IdentifierNode(NodeKind::LocalStaticGuardIdentifier) {}

  void output(OutputBuffer &OB, OutputFlags Flags) const override;

  bool IsThread = false;
  uint32_t ScopeIndex = 0;
};

struct ConversionOperatorIdentifierNode : public IdentifierNode {
  ConversionOperatorIdentifierNode()
      : IdentifierNode(NodeKind::ConversionOperatorIdentifier) {}

  void output(OutputBuffer &OB, OutputFlags Flags) const override;

  // The type that this operator converts too.
  TypeNode *TargetType = nullptr;
};

struct StructorIdentifierNode : public IdentifierNode {
  StructorIdentifierNode() : IdentifierNode(NodeKind::StructorIdentifier) {}
  explicit StructorIdentifierNode(bool IsDestructor)
      : IdentifierNode(NodeKind::StructorIdentifier),
        IsDestructor(IsDestructor) {}

  void output(OutputBuffer &OB, OutputFlags Flags) const override;

  // The name of the class that this is a structor of.
  IdentifierNode *Class = nullptr;
  bool IsDestructor = false;
};

struct ThunkSignatureNode : public FunctionSignatureNode {
  ThunkSignatureNode() : FunctionSignatureNode(NodeKind::ThunkSignature) {}

  void outputPre(OutputBuffer &OB, OutputFlags Flags) const override;
  void outputPost(OutputBuffer &OB, OutputFlags Flags) const override;

  struct ThisAdjustor {
    uint32_t StaticOffset = 0;
    int32_t VBPtrOffset = 0;
    int32_t VBOffsetOffset = 0;
    int32_t VtordispOffset = 0;
  };

  ThisAdjustor ThisAdjust;
};

struct PointerTypeNode : public TypeNode {
  PointerTypeNode() : TypeNode(NodeKind::PointerType) {}
  void outputPre(OutputBuffer &OB, OutputFlags Flags) const override;
  void outputPost(OutputBuffer &OB, OutputFlags Flags) const override;

  // Is this a pointer, reference, or rvalue-reference?
  PointerAffinity Affinity = PointerAffinity::None;

  // If this is a member pointer, this is the class that the member is in.
  QualifiedNameNode *ClassParent = nullptr;

  // Represents a type X in "a pointer to X", "a reference to X", or
  // "rvalue-reference to X"
  TypeNode *Pointee = nullptr;
};

struct TagTypeNode : public TypeNode {
  explicit TagTypeNode(TagKind Tag) : TypeNode(NodeKind::TagType), Tag(Tag) {}

  void outputPre(OutputBuffer &OB, OutputFlags Flags) const override;
  void outputPost(OutputBuffer &OB, OutputFlags Flags) const override;

  QualifiedNameNode *QualifiedName = nullptr;
  TagKind Tag;
};

struct ArrayTypeNode : public TypeNode {
  ArrayTypeNode() : TypeNode(NodeKind::ArrayType) {}

  void outputPre(OutputBuffer &OB, OutputFlags Flags) const override;
  void outputPost(OutputBuffer &OB, OutputFlags Flags) const override;

  void outputDimensionsImpl(OutputBuffer &OB, OutputFlags Flags) const;
  void outputOneDimension(OutputBuffer &OB, OutputFlags Flags, Node *N) const;

  // A list of array dimensions.  e.g. [3,4,5] in `int Foo[3][4][5]`
  NodeArrayNode *Dimensions = nullptr;

  // The type of array element.
  TypeNode *ElementType = nullptr;
};

struct IntrinsicNode : public TypeNode {
  IntrinsicNode() : TypeNode(NodeKind::IntrinsicType) {}
  void output(OutputBuffer &OB, OutputFlags Flags) const override {}
};

struct CustomTypeNode : public TypeNode {
  CustomTypeNode() : TypeNode(NodeKind::Custom) {}

  void outputPre(OutputBuffer &OB, OutputFlags Flags) const override;
  void outputPost(OutputBuffer &OB, OutputFlags Flags) const override;

  IdentifierNode *Identifier = nullptr;
};

struct NodeArrayNode : public Node {
  NodeArrayNode() : Node(NodeKind::NodeArray) {}

  void output(OutputBuffer &OB, OutputFlags Flags) const override;

  void output(OutputBuffer &OB, OutputFlags Flags,
              std::string_view Separator) const;

  Node **Nodes = nullptr;
  size_t Count = 0;
};

struct QualifiedNameNode : public Node {
  QualifiedNameNode() : Node(NodeKind::QualifiedName) {}

  void output(OutputBuffer &OB, OutputFlags Flags) const override;

  NodeArrayNode *Components = nullptr;

  IdentifierNode *getUnqualifiedIdentifier() {
    Node *LastComponent = Components->Nodes[Components->Count - 1];
    return static_cast<IdentifierNode *>(LastComponent);
  }
};

struct TemplateParameterReferenceNode : public Node {
  TemplateParameterReferenceNode()
      : Node(NodeKind::TemplateParameterReference) {}

  void output(OutputBuffer &OB, OutputFlags Flags) const override;

  SymbolNode *Symbol = nullptr;

  int ThunkOffsetCount = 0;
  std::array<int64_t, 3> ThunkOffsets;
  PointerAffinity Affinity = PointerAffinity::None;
  bool IsMemberPointer = false;
};

struct IntegerLiteralNode : public Node {
  IntegerLiteralNode() : Node(NodeKind::IntegerLiteral) {}
  IntegerLiteralNode(uint64_t Value, bool IsNegative)
      : Node(NodeKind::IntegerLiteral), Value(Value), IsNegative(IsNegative) {}

  void output(OutputBuffer &OB, OutputFlags Flags) const override;

  uint64_t Value = 0;
  bool IsNegative = false;
};

struct RttiBaseClassDescriptorNode : public IdentifierNode {
  RttiBaseClassDescriptorNode()
      : IdentifierNode(NodeKind::RttiBaseClassDescriptor) {}

  void output(OutputBuffer &OB, OutputFlags Flags) const override;

  uint32_t NVOffset = 0;
  int32_t VBPtrOffset = 0;
  uint32_t VBTableOffset = 0;
  uint32_t Flags = 0;
};

struct SymbolNode : public Node {
  explicit SymbolNode(NodeKind K) : Node(K) {}
  void output(OutputBuffer &OB, OutputFlags Flags) const override;
  QualifiedNameNode *Name = nullptr;
};

struct SpecialTableSymbolNode : public SymbolNode {
  explicit SpecialTableSymbolNode()
      : SymbolNode(NodeKind::SpecialTableSymbol) {}

  void output(OutputBuffer &OB, OutputFlags Flags) const override;
  QualifiedNameNode *TargetName = nullptr;
  Qualifiers Quals = Qualifiers::Q_None;
};

struct LocalStaticGuardVariableNode : public SymbolNode {
  LocalStaticGuardVariableNode()
      : SymbolNode(NodeKind::LocalStaticGuardVariable) {}

  void output(OutputBuffer &OB, OutputFlags Flags) const override;

  bool IsVisible = false;
};

struct EncodedStringLiteralNode : public SymbolNode {
  EncodedStringLiteralNode() : SymbolNode(NodeKind::EncodedStringLiteral) {}

  void output(OutputBuffer &OB, OutputFlags Flags) const override;

  std::string_view DecodedString;
  bool IsTruncated = false;
  CharKind Char = CharKind::Char;
};

struct VariableSymbolNode : public SymbolNode {
  VariableSymbolNode() : SymbolNode(NodeKind::VariableSymbol) {}

  void output(OutputBuffer &OB, OutputFlags Flags) const override;

  StorageClass SC = StorageClass::None;
  TypeNode *Type = nullptr;
};

struct FunctionSymbolNode : public SymbolNode {
  FunctionSymbolNode() : SymbolNode(NodeKind::FunctionSymbol) {}

  void output(OutputBuffer &OB, OutputFlags Flags) const override;

  FunctionSignatureNode *Signature = nullptr;
};

} // namespace ms_demangle
} // namespace llvm

#endif
PKiwFZ�_R�S
S
Demangle/StringView.hnu�[���//===--- StringView.h ----------------*- mode:c++;eval:(read-only-mode) -*-===//
//       Do not edit! See README.txt.
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// FIXME: Use std::string_view instead when we support C++17.
// There are two copies of this file in the source tree.  The one under
// libcxxabi is the original and the one under llvm is the copy.  Use
// cp-to-llvm.sh to update the copy.  See README.txt for more details.
//
//===----------------------------------------------------------------------===//

#ifndef DEMANGLE_STRINGVIEW_H
#define DEMANGLE_STRINGVIEW_H

#include "DemangleConfig.h"
#include <cassert>
#include <cstring>

DEMANGLE_NAMESPACE_BEGIN

class StringView {
  const char *First;
  const char *Last;

public:
  static const size_t npos = ~size_t(0);

  template <size_t N>
  StringView(const char (&Str)[N]) : First(Str), Last(Str + N - 1) {}
  StringView(const char *First_, const char *Last_)
      : First(First_), Last(Last_) {}
  StringView(const char *First_, size_t Len)
      : First(First_), Last(First_ + Len) {}
  StringView(const char *Str) : First(Str), Last(Str + std::strlen(Str)) {}
  StringView() : First(nullptr), Last(nullptr) {}

  StringView substr(size_t Pos, size_t Len = npos) const {
    assert(Pos <= size());
    if (Len > size() - Pos)
      Len = size() - Pos;
    return StringView(begin() + Pos, Len);
  }

  size_t find(char C, size_t From = 0) const {
    // Avoid calling memchr with nullptr.
    if (From < size()) {
      // Just forward to memchr, which is faster than a hand-rolled loop.
      if (const void *P = ::memchr(First + From, C, size() - From))
        return size_t(static_cast<const char *>(P) - First);
    }
    return npos;
  }

  StringView dropFront(size_t N = 1) const {
    if (N >= size())
      N = size();
    return StringView(First + N, Last);
  }

  StringView dropBack(size_t N = 1) const {
    if (N >= size())
      N = size();
    return StringView(First, Last - N);
  }

  char front() const {
    assert(!empty());
    return *begin();
  }

  char back() const {
    assert(!empty());
    return *(end() - 1);
  }

  char popFront() {
    assert(!empty());
    return *First++;
  }

  bool consumeFront(char C) {
    if (!startsWith(C))
      return false;
    *this = dropFront(1);
    return true;
  }

  bool consumeFront(StringView S) {
    if (!startsWith(S))
      return false;
    *this = dropFront(S.size());
    return true;
  }

  bool startsWith(char C) const { return !empty() && *begin() == C; }

  bool startsWith(StringView Str) const {
    if (Str.size() > size())
      return false;
    return std::strncmp(Str.begin(), begin(), Str.size()) == 0;
  }

  const char &operator[](size_t Idx) const { return *(begin() + Idx); }

  const char *begin() const { return First; }
  const char *end() const { return Last; }
  size_t size() const { return static_cast<size_t>(Last - First); }
  bool empty() const { return First == Last; }
};

inline bool operator==(const StringView &LHS, const StringView &RHS) {
  return LHS.size() == RHS.size() &&
         std::strncmp(LHS.begin(), RHS.begin(), LHS.size()) == 0;
}

DEMANGLE_NAMESPACE_END

#endif
PKiwFZPJ���Demangle/Demangle.hnu�[���//===--- Demangle.h ---------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEMANGLE_DEMANGLE_H
#define LLVM_DEMANGLE_DEMANGLE_H

#include <cstddef>
#include <string>
#include <string_view>

namespace llvm {
/// This is a llvm local version of __cxa_demangle. Other than the name and
/// being in the llvm namespace it is identical.
///
/// The mangled_name is demangled into buf and returned. If the buffer is not
/// large enough, realloc is used to expand it.
///
/// The *status will be set to a value from the following enumeration
enum : int {
  demangle_unknown_error = -4,
  demangle_invalid_args = -3,
  demangle_invalid_mangled_name = -2,
  demangle_memory_alloc_failure = -1,
  demangle_success = 0,
};

/// Returns a non-NULL pointer to a NUL-terminated C style string
/// that should be explicitly freed, if successful. Otherwise, may return
/// nullptr if mangled_name is not a valid mangling or is nullptr.
char *itaniumDemangle(std::string_view mangled_name);

enum MSDemangleFlags {
  MSDF_None = 0,
  MSDF_DumpBackrefs = 1 << 0,
  MSDF_NoAccessSpecifier = 1 << 1,
  MSDF_NoCallingConvention = 1 << 2,
  MSDF_NoReturnType = 1 << 3,
  MSDF_NoMemberType = 1 << 4,
  MSDF_NoVariableType = 1 << 5,
};

/// Demangles the Microsoft symbol pointed at by mangled_name and returns it.
/// Returns a pointer to the start of a null-terminated demangled string on
/// success, or nullptr on error.
/// If n_read is non-null and demangling was successful, it receives how many
/// bytes of the input string were consumed.
/// status receives one of the demangle_ enum entries above if it's not nullptr.
/// Flags controls various details of the demangled representation.
char *microsoftDemangle(std::string_view mangled_name, size_t *n_read,
                        int *status, MSDemangleFlags Flags = MSDF_None);

// Demangles a Rust v0 mangled symbol.
char *rustDemangle(std::string_view MangledName);

// Demangles a D mangled symbol.
char *dlangDemangle(std::string_view MangledName);

/// Attempt to demangle a string using different demangling schemes.
/// The function uses heuristics to determine which demangling scheme to use.
/// \param MangledName - reference to string to demangle.
/// \returns - the demangled string, or a copy of the input string if no
/// demangling occurred.
std::string demangle(std::string_view MangledName);

bool nonMicrosoftDemangle(std::string_view MangledName, std::string &Result);

/// "Partial" demangler. This supports demangling a string into an AST
/// (typically an intermediate stage in itaniumDemangle) and querying certain
/// properties or partially printing the demangled name.
struct ItaniumPartialDemangler {
  ItaniumPartialDemangler();

  ItaniumPartialDemangler(ItaniumPartialDemangler &&Other);
  ItaniumPartialDemangler &operator=(ItaniumPartialDemangler &&Other);

  /// Demangle into an AST. Subsequent calls to the rest of the member functions
  /// implicitly operate on the AST this produces.
  /// \return true on error, false otherwise
  bool partialDemangle(const char *MangledName);

  /// Just print the entire mangled name into Buf. Buf and N behave like the
  /// second and third parameters to __cxa_demangle.
  char *finishDemangle(char *Buf, size_t *N) const;

  /// Get the base name of a function. This doesn't include trailing template
  /// arguments, ie for "a::b<int>" this function returns "b".
  char *getFunctionBaseName(char *Buf, size_t *N) const;

  /// Get the context name for a function. For "a::b::c", this function returns
  /// "a::b".
  char *getFunctionDeclContextName(char *Buf, size_t *N) const;

  /// Get the entire name of this function.
  char *getFunctionName(char *Buf, size_t *N) const;

  /// Get the parameters for this function.
  char *getFunctionParameters(char *Buf, size_t *N) const;
  char *getFunctionReturnType(char *Buf, size_t *N) const;

  /// If this function has any any cv or reference qualifiers. These imply that
  /// the function is a non-static member function.
  bool hasFunctionQualifiers() const;

  /// If this symbol describes a constructor or destructor.
  bool isCtorOrDtor() const;

  /// If this symbol describes a function.
  bool isFunction() const;

  /// If this symbol describes a variable.
  bool isData() const;

  /// If this symbol is a <special-name>. These are generally implicitly
  /// generated by the implementation, such as vtables and typeinfo names.
  bool isSpecialName() const;

  ~ItaniumPartialDemangler();

private:
  void *RootNode;
  void *Context;
};
} // namespace llvm

#endif
PKiwFZ]
R|�|�Demangle/ItaniumDemangle.hnu�[���//===--- ItaniumDemangle.h -----------*- mode:c++;eval:(read-only-mode) -*-===//
//       Do not edit! See README.txt.
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Generic itanium demangler library.
// There are two copies of this file in the source tree.  The one under
// libcxxabi is the original and the one under llvm is the copy.  Use
// cp-to-llvm.sh to update the copy.  See README.txt for more details.
//
//===----------------------------------------------------------------------===//

#ifndef DEMANGLE_ITANIUMDEMANGLE_H
#define DEMANGLE_ITANIUMDEMANGLE_H

#include "DemangleConfig.h"
#include "StringViewExtras.h"
#include "Utility.h"
#include <algorithm>
#include <cassert>
#include <cctype>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <limits>
#include <new>
#include <string_view>
#include <type_traits>
#include <utility>

DEMANGLE_NAMESPACE_BEGIN

template <class T, size_t N> class PODSmallVector {
  static_assert(std::is_pod<T>::value,
                "T is required to be a plain old data type");

  T *First = nullptr;
  T *Last = nullptr;
  T *Cap = nullptr;
  T Inline[N] = {0};

  bool isInline() const { return First == Inline; }

  void clearInline() {
    First = Inline;
    Last = Inline;
    Cap = Inline + N;
  }

  void reserve(size_t NewCap) {
    size_t S = size();
    if (isInline()) {
      auto *Tmp = static_cast<T *>(std::malloc(NewCap * sizeof(T)));
      if (Tmp == nullptr)
        std::terminate();
      std::copy(First, Last, Tmp);
      First = Tmp;
    } else {
      First = static_cast<T *>(std::realloc(First, NewCap * sizeof(T)));
      if (First == nullptr)
        std::terminate();
    }
    Last = First + S;
    Cap = First + NewCap;
  }

public:
  PODSmallVector() : First(Inline), Last(First), Cap(Inline + N) {}

  PODSmallVector(const PODSmallVector &) = delete;
  PODSmallVector &operator=(const PODSmallVector &) = delete;

  PODSmallVector(PODSmallVector &&Other) : PODSmallVector() {
    if (Other.isInline()) {
      std::copy(Other.begin(), Other.end(), First);
      Last = First + Other.size();
      Other.clear();
      return;
    }

    First = Other.First;
    Last = Other.Last;
    Cap = Other.Cap;
    Other.clearInline();
  }

  PODSmallVector &operator=(PODSmallVector &&Other) {
    if (Other.isInline()) {
      if (!isInline()) {
        std::free(First);
        clearInline();
      }
      std::copy(Other.begin(), Other.end(), First);
      Last = First + Other.size();
      Other.clear();
      return *this;
    }

    if (isInline()) {
      First = Other.First;
      Last = Other.Last;
      Cap = Other.Cap;
      Other.clearInline();
      return *this;
    }

    std::swap(First, Other.First);
    std::swap(Last, Other.Last);
    std::swap(Cap, Other.Cap);
    Other.clear();
    return *this;
  }

  // NOLINTNEXTLINE(readability-identifier-naming)
  void push_back(const T &Elem) {
    if (Last == Cap)
      reserve(size() * 2);
    *Last++ = Elem;
  }

  // NOLINTNEXTLINE(readability-identifier-naming)
  void pop_back() {
    assert(Last != First && "Popping empty vector!");
    --Last;
  }

  void dropBack(size_t Index) {
    assert(Index <= size() && "dropBack() can't expand!");
    Last = First + Index;
  }

  T *begin() { return First; }
  T *end() { return Last; }

  bool empty() const { return First == Last; }
  size_t size() const { return static_cast<size_t>(Last - First); }
  T &back() {
    assert(Last != First && "Calling back() on empty vector!");
    return *(Last - 1);
  }
  T &operator[](size_t Index) {
    assert(Index < size() && "Invalid access!");
    return *(begin() + Index);
  }
  void clear() { Last = First; }

  ~PODSmallVector() {
    if (!isInline())
      std::free(First);
  }
};

// Base class of all AST nodes. The AST is built by the parser, then is
// traversed by the printLeft/Right functions to produce a demangled string.
class Node {
public:
  enum Kind : unsigned char {
#define NODE(NodeKind) K##NodeKind,
#include "ItaniumNodes.def"
  };

  /// Three-way bool to track a cached value. Unknown is possible if this node
  /// has an unexpanded parameter pack below it that may affect this cache.
  enum class Cache : unsigned char { Yes, No, Unknown, };

  /// Operator precedence for expression nodes. Used to determine required
  /// parens in expression emission.
  enum class Prec {
    Primary,
    Postfix,
    Unary,
    Cast,
    PtrMem,
    Multiplicative,
    Additive,
    Shift,
    Spaceship,
    Relational,
    Equality,
    And,
    Xor,
    Ior,
    AndIf,
    OrIf,
    Conditional,
    Assign,
    Comma,
    Default,
  };

private:
  Kind K;

  Prec Precedence : 6;

  // FIXME: Make these protected.
public:
  /// Tracks if this node has a component on its right side, in which case we
  /// need to call printRight.
  Cache RHSComponentCache : 2;

  /// Track if this node is a (possibly qualified) array type. This can affect
  /// how we format the output string.
  Cache ArrayCache : 2;

  /// Track if this node is a (possibly qualified) function type. This can
  /// affect how we format the output string.
  Cache FunctionCache : 2;

public:
  Node(Kind K_, Prec Precedence_ = Prec::Primary,
       Cache RHSComponentCache_ = Cache::No, Cache ArrayCache_ = Cache::No,
       Cache FunctionCache_ = Cache::No)
      : K(K_), Precedence(Precedence_), RHSComponentCache(RHSComponentCache_),
        ArrayCache(ArrayCache_), FunctionCache(FunctionCache_) {}
  Node(Kind K_, Cache RHSComponentCache_, Cache ArrayCache_ = Cache::No,
       Cache FunctionCache_ = Cache::No)
      : Node(K_, Prec::Primary, RHSComponentCache_, ArrayCache_,
             FunctionCache_) {}

  /// Visit the most-derived object corresponding to this object.
  template<typename Fn> void visit(Fn F) const;

  // The following function is provided by all derived classes:
  //
  // Call F with arguments that, when passed to the constructor of this node,
  // would construct an equivalent node.
  //template<typename Fn> void match(Fn F) const;

  bool hasRHSComponent(OutputBuffer &OB) const {
    if (RHSComponentCache != Cache::Unknown)
      return RHSComponentCache == Cache::Yes;
    return hasRHSComponentSlow(OB);
  }

  bool hasArray(OutputBuffer &OB) const {
    if (ArrayCache != Cache::Unknown)
      return ArrayCache == Cache::Yes;
    return hasArraySlow(OB);
  }

  bool hasFunction(OutputBuffer &OB) const {
    if (FunctionCache != Cache::Unknown)
      return FunctionCache == Cache::Yes;
    return hasFunctionSlow(OB);
  }

  Kind getKind() const { return K; }

  Prec getPrecedence() const { return Precedence; }

  virtual bool hasRHSComponentSlow(OutputBuffer &) const { return false; }
  virtual bool hasArraySlow(OutputBuffer &) const { return false; }
  virtual bool hasFunctionSlow(OutputBuffer &) const { return false; }

  // Dig through "glue" nodes like ParameterPack and ForwardTemplateReference to
  // get at a node that actually represents some concrete syntax.
  virtual const Node *getSyntaxNode(OutputBuffer &) const { return this; }

  // Print this node as an expression operand, surrounding it in parentheses if
  // its precedence is [Strictly] weaker than P.
  void printAsOperand(OutputBuffer &OB, Prec P = Prec::Default,
                      bool StrictlyWorse = false) const {
    bool Paren =
        unsigned(getPrecedence()) >= unsigned(P) + unsigned(StrictlyWorse);
    if (Paren)
      OB.printOpen();
    print(OB);
    if (Paren)
      OB.printClose();
  }

  void print(OutputBuffer &OB) const {
    printLeft(OB);
    if (RHSComponentCache != Cache::No)
      printRight(OB);
  }

  // Print the "left" side of this Node into OutputBuffer.
  virtual void printLeft(OutputBuffer &) const = 0;

  // Print the "right". This distinction is necessary to represent C++ types
  // that appear on the RHS of their subtype, such as arrays or functions.
  // Since most types don't have such a component, provide a default
  // implementation.
  virtual void printRight(OutputBuffer &) const {}

  virtual std::string_view getBaseName() const { return {}; }

  // Silence compiler warnings, this dtor will never be called.
  virtual ~Node() = default;

#ifndef NDEBUG
  DEMANGLE_DUMP_METHOD void dump() const;
#endif
};

class NodeArray {
  Node **Elements;
  size_t NumElements;

public:
  NodeArray() : Elements(nullptr), NumElements(0) {}
  NodeArray(Node **Elements_, size_t NumElements_)
      : Elements(Elements_), NumElements(NumElements_) {}

  bool empty() const { return NumElements == 0; }
  size_t size() const { return NumElements; }

  Node **begin() const { return Elements; }
  Node **end() const { return Elements + NumElements; }

  Node *operator[](size_t Idx) const { return Elements[Idx]; }

  void printWithComma(OutputBuffer &OB) const {
    bool FirstElement = true;
    for (size_t Idx = 0; Idx != NumElements; ++Idx) {
      size_t BeforeComma = OB.getCurrentPosition();
      if (!FirstElement)
        OB += ", ";
      size_t AfterComma = OB.getCurrentPosition();
      Elements[Idx]->printAsOperand(OB, Node::Prec::Comma);

      // Elements[Idx] is an empty parameter pack expansion, we should erase the
      // comma we just printed.
      if (AfterComma == OB.getCurrentPosition()) {
        OB.setCurrentPosition(BeforeComma);
        continue;
      }

      FirstElement = false;
    }
  }
};

struct NodeArrayNode : Node {
  NodeArray Array;
  NodeArrayNode(NodeArray Array_) : Node(KNodeArrayNode), Array(Array_) {}

  template<typename Fn> void match(Fn F) const { F(Array); }

  void printLeft(OutputBuffer &OB) const override { Array.printWithComma(OB); }
};

class DotSuffix final : public Node {
  const Node *Prefix;
  const std::string_view Suffix;

public:
  DotSuffix(const Node *Prefix_, std::string_view Suffix_)
      : Node(KDotSuffix), Prefix(Prefix_), Suffix(Suffix_) {}

  template<typename Fn> void match(Fn F) const { F(Prefix, Suffix); }

  void printLeft(OutputBuffer &OB) const override {
    Prefix->print(OB);
    OB += " (";
    OB += Suffix;
    OB += ")";
  }
};

class VendorExtQualType final : public Node {
  const Node *Ty;
  std::string_view Ext;
  const Node *TA;

public:
  VendorExtQualType(const Node *Ty_, std::string_view Ext_, const Node *TA_)
      : Node(KVendorExtQualType), Ty(Ty_), Ext(Ext_), TA(TA_) {}

  const Node *getTy() const { return Ty; }
  std::string_view getExt() const { return Ext; }
  const Node *getTA() const { return TA; }

  template <typename Fn> void match(Fn F) const { F(Ty, Ext, TA); }

  void printLeft(OutputBuffer &OB) const override {
    Ty->print(OB);
    OB += " ";
    OB += Ext;
    if (TA != nullptr)
      TA->print(OB);
  }
};

enum FunctionRefQual : unsigned char {
  FrefQualNone,
  FrefQualLValue,
  FrefQualRValue,
};

enum Qualifiers {
  QualNone = 0,
  QualConst = 0x1,
  QualVolatile = 0x2,
  QualRestrict = 0x4,
};

inline Qualifiers operator|=(Qualifiers &Q1, Qualifiers Q2) {
  return Q1 = static_cast<Qualifiers>(Q1 | Q2);
}

class QualType final : public Node {
protected:
  const Qualifiers Quals;
  const Node *Child;

  void printQuals(OutputBuffer &OB) const {
    if (Quals & QualConst)
      OB += " const";
    if (Quals & QualVolatile)
      OB += " volatile";
    if (Quals & QualRestrict)
      OB += " restrict";
  }

public:
  QualType(const Node *Child_, Qualifiers Quals_)
      : Node(KQualType, Child_->RHSComponentCache,
             Child_->ArrayCache, Child_->FunctionCache),
        Quals(Quals_), Child(Child_) {}

  Qualifiers getQuals() const { return Quals; }
  const Node *getChild() const { return Child; }

  template<typename Fn> void match(Fn F) const { F(Child, Quals); }

  bool hasRHSComponentSlow(OutputBuffer &OB) const override {
    return Child->hasRHSComponent(OB);
  }
  bool hasArraySlow(OutputBuffer &OB) const override {
    return Child->hasArray(OB);
  }
  bool hasFunctionSlow(OutputBuffer &OB) const override {
    return Child->hasFunction(OB);
  }

  void printLeft(OutputBuffer &OB) const override {
    Child->printLeft(OB);
    printQuals(OB);
  }

  void printRight(OutputBuffer &OB) const override { Child->printRight(OB); }
};

class ConversionOperatorType final : public Node {
  const Node *Ty;

public:
  ConversionOperatorType(const Node *Ty_)
      : Node(KConversionOperatorType), Ty(Ty_) {}

  template<typename Fn> void match(Fn F) const { F(Ty); }

  void printLeft(OutputBuffer &OB) const override {
    OB += "operator ";
    Ty->print(OB);
  }
};

class PostfixQualifiedType final : public Node {
  const Node *Ty;
  const std::string_view Postfix;

public:
  PostfixQualifiedType(const Node *Ty_, std::string_view Postfix_)
      : Node(KPostfixQualifiedType), Ty(Ty_), Postfix(Postfix_) {}

  template<typename Fn> void match(Fn F) const { F(Ty, Postfix); }

  void printLeft(OutputBuffer &OB) const override {
    Ty->printLeft(OB);
    OB += Postfix;
  }
};

class NameType final : public Node {
  const std::string_view Name;

public:
  NameType(std::string_view Name_) : Node(KNameType), Name(Name_) {}

  template<typename Fn> void match(Fn F) const { F(Name); }

  std::string_view getName() const { return Name; }
  std::string_view getBaseName() const override { return Name; }

  void printLeft(OutputBuffer &OB) const override { OB += Name; }
};

class BitIntType final : public Node {
  const Node *Size;
  bool Signed;

public:
  BitIntType(const Node *Size_, bool Signed_)
      : Node(KBitIntType), Size(Size_), Signed(Signed_) {}

  template <typename Fn> void match(Fn F) const { F(Size, Signed); }

  void printLeft(OutputBuffer &OB) const override {
    if (!Signed)
      OB += "unsigned ";
    OB += "_BitInt";
    OB.printOpen();
    Size->printAsOperand(OB);
    OB.printClose();
  }
};

class ElaboratedTypeSpefType : public Node {
  std::string_view Kind;
  Node *Child;
public:
  ElaboratedTypeSpefType(std::string_view Kind_, Node *Child_)
      : Node(KElaboratedTypeSpefType), Kind(Kind_), Child(Child_) {}

  template<typename Fn> void match(Fn F) const { F(Kind, Child); }

  void printLeft(OutputBuffer &OB) const override {
    OB += Kind;
    OB += ' ';
    Child->print(OB);
  }
};

struct AbiTagAttr : Node {
  Node *Base;
  std::string_view Tag;

  AbiTagAttr(Node *Base_, std::string_view Tag_)
      : Node(KAbiTagAttr, Base_->RHSComponentCache, Base_->ArrayCache,
             Base_->FunctionCache),
        Base(Base_), Tag(Tag_) {}

  template<typename Fn> void match(Fn F) const { F(Base, Tag); }

  std::string_view getBaseName() const override { return Base->getBaseName(); }

  void printLeft(OutputBuffer &OB) const override {
    Base->printLeft(OB);
    OB += "[abi:";
    OB += Tag;
    OB += "]";
  }
};

class EnableIfAttr : public Node {
  NodeArray Conditions;
public:
  EnableIfAttr(NodeArray Conditions_)
      : Node(KEnableIfAttr), Conditions(Conditions_) {}

  template<typename Fn> void match(Fn F) const { F(Conditions); }

  void printLeft(OutputBuffer &OB) const override {
    OB += " [enable_if:";
    Conditions.printWithComma(OB);
    OB += ']';
  }
};

class ObjCProtoName : public Node {
  const Node *Ty;
  std::string_view Protocol;

  friend class PointerType;

public:
  ObjCProtoName(const Node *Ty_, std::string_view Protocol_)
      : Node(KObjCProtoName), Ty(Ty_), Protocol(Protocol_) {}

  template<typename Fn> void match(Fn F) const { F(Ty, Protocol); }

  bool isObjCObject() const {
    return Ty->getKind() == KNameType &&
           static_cast<const NameType *>(Ty)->getName() == "objc_object";
  }

  void printLeft(OutputBuffer &OB) const override {
    Ty->print(OB);
    OB += "<";
    OB += Protocol;
    OB += ">";
  }
};

class PointerType final : public Node {
  const Node *Pointee;

public:
  PointerType(const Node *Pointee_)
      : Node(KPointerType, Pointee_->RHSComponentCache),
        Pointee(Pointee_) {}

  const Node *getPointee() const { return Pointee; }

  template<typename Fn> void match(Fn F) const { F(Pointee); }

  bool hasRHSComponentSlow(OutputBuffer &OB) const override {
    return Pointee->hasRHSComponent(OB);
  }

  void printLeft(OutputBuffer &OB) const override {
    // We rewrite objc_object<SomeProtocol>* into id<SomeProtocol>.
    if (Pointee->getKind() != KObjCProtoName ||
        !static_cast<const ObjCProtoName *>(Pointee)->isObjCObject()) {
      Pointee->printLeft(OB);
      if (Pointee->hasArray(OB))
        OB += " ";
      if (Pointee->hasArray(OB) || Pointee->hasFunction(OB))
        OB += "(";
      OB += "*";
    } else {
      const auto *objcProto = static_cast<const ObjCProtoName *>(Pointee);
      OB += "id<";
      OB += objcProto->Protocol;
      OB += ">";
    }
  }

  void printRight(OutputBuffer &OB) const override {
    if (Pointee->getKind() != KObjCProtoName ||
        !static_cast<const ObjCProtoName *>(Pointee)->isObjCObject()) {
      if (Pointee->hasArray(OB) || Pointee->hasFunction(OB))
        OB += ")";
      Pointee->printRight(OB);
    }
  }
};

enum class ReferenceKind {
  LValue,
  RValue,
};

// Represents either a LValue or an RValue reference type.
class ReferenceType : public Node {
  const Node *Pointee;
  ReferenceKind RK;

  mutable bool Printing = false;

  // Dig through any refs to refs, collapsing the ReferenceTypes as we go. The
  // rule here is rvalue ref to rvalue ref collapses to a rvalue ref, and any
  // other combination collapses to a lvalue ref.
  //
  // A combination of a TemplateForwardReference and a back-ref Substitution
  // from an ill-formed string may have created a cycle; use cycle detection to
  // avoid looping forever.
  std::pair<ReferenceKind, const Node *> collapse(OutputBuffer &OB) const {
    auto SoFar = std::make_pair(RK, Pointee);
    // Track the chain of nodes for the Floyd's 'tortoise and hare'
    // cycle-detection algorithm, since getSyntaxNode(S) is impure
    PODSmallVector<const Node *, 8> Prev;
    for (;;) {
      const Node *SN = SoFar.second->getSyntaxNode(OB);
      if (SN->getKind() != KReferenceType)
        break;
      auto *RT = static_cast<const ReferenceType *>(SN);
      SoFar.second = RT->Pointee;
      SoFar.first = std::min(SoFar.first, RT->RK);

      // The middle of Prev is the 'slow' pointer moving at half speed
      Prev.push_back(SoFar.second);
      if (Prev.size() > 1 && SoFar.second == Prev[(Prev.size() - 1) / 2]) {
        // Cycle detected
        SoFar.second = nullptr;
        break;
      }
    }
    return SoFar;
  }

public:
  ReferenceType(const Node *Pointee_, ReferenceKind RK_)
      : Node(KReferenceType, Pointee_->RHSComponentCache),
        Pointee(Pointee_), RK(RK_) {}

  template<typename Fn> void match(Fn F) const { F(Pointee, RK); }

  bool hasRHSComponentSlow(OutputBuffer &OB) const override {
    return Pointee->hasRHSComponent(OB);
  }

  void printLeft(OutputBuffer &OB) const override {
    if (Printing)
      return;
    ScopedOverride<bool> SavePrinting(Printing, true);
    std::pair<ReferenceKind, const Node *> Collapsed = collapse(OB);
    if (!Collapsed.second)
      return;
    Collapsed.second->printLeft(OB);
    if (Collapsed.second->hasArray(OB))
      OB += " ";
    if (Collapsed.second->hasArray(OB) || Collapsed.second->hasFunction(OB))
      OB += "(";

    OB += (Collapsed.first == ReferenceKind::LValue ? "&" : "&&");
  }
  void printRight(OutputBuffer &OB) const override {
    if (Printing)
      return;
    ScopedOverride<bool> SavePrinting(Printing, true);
    std::pair<ReferenceKind, const Node *> Collapsed = collapse(OB);
    if (!Collapsed.second)
      return;
    if (Collapsed.second->hasArray(OB) || Collapsed.second->hasFunction(OB))
      OB += ")";
    Collapsed.second->printRight(OB);
  }
};

class PointerToMemberType final : public Node {
  const Node *ClassType;
  const Node *MemberType;

public:
  PointerToMemberType(const Node *ClassType_, const Node *MemberType_)
      : Node(KPointerToMemberType, MemberType_->RHSComponentCache),
        ClassType(ClassType_), MemberType(MemberType_) {}

  template<typename Fn> void match(Fn F) const { F(ClassType, MemberType); }

  bool hasRHSComponentSlow(OutputBuffer &OB) const override {
    return MemberType->hasRHSComponent(OB);
  }

  void printLeft(OutputBuffer &OB) const override {
    MemberType->printLeft(OB);
    if (MemberType->hasArray(OB) || MemberType->hasFunction(OB))
      OB += "(";
    else
      OB += " ";
    ClassType->print(OB);
    OB += "::*";
  }

  void printRight(OutputBuffer &OB) const override {
    if (MemberType->hasArray(OB) || MemberType->hasFunction(OB))
      OB += ")";
    MemberType->printRight(OB);
  }
};

class ArrayType final : public Node {
  const Node *Base;
  Node *Dimension;

public:
  ArrayType(const Node *Base_, Node *Dimension_)
      : Node(KArrayType,
             /*RHSComponentCache=*/Cache::Yes,
             /*ArrayCache=*/Cache::Yes),
        Base(Base_), Dimension(Dimension_) {}

  template<typename Fn> void match(Fn F) const { F(Base, Dimension); }

  bool hasRHSComponentSlow(OutputBuffer &) const override { return true; }
  bool hasArraySlow(OutputBuffer &) const override { return true; }

  void printLeft(OutputBuffer &OB) const override { Base->printLeft(OB); }

  void printRight(OutputBuffer &OB) const override {
    if (OB.back() != ']')
      OB += " ";
    OB += "[";
    if (Dimension)
      Dimension->print(OB);
    OB += "]";
    Base->printRight(OB);
  }
};

class FunctionType final : public Node {
  const Node *Ret;
  NodeArray Params;
  Qualifiers CVQuals;
  FunctionRefQual RefQual;
  const Node *ExceptionSpec;

public:
  FunctionType(const Node *Ret_, NodeArray Params_, Qualifiers CVQuals_,
               FunctionRefQual RefQual_, const Node *ExceptionSpec_)
      : Node(KFunctionType,
             /*RHSComponentCache=*/Cache::Yes, /*ArrayCache=*/Cache::No,
             /*FunctionCache=*/Cache::Yes),
        Ret(Ret_), Params(Params_), CVQuals(CVQuals_), RefQual(RefQual_),
        ExceptionSpec(ExceptionSpec_) {}

  template<typename Fn> void match(Fn F) const {
    F(Ret, Params, CVQuals, RefQual, ExceptionSpec);
  }

  bool hasRHSComponentSlow(OutputBuffer &) const override { return true; }
  bool hasFunctionSlow(OutputBuffer &) const override { return true; }

  // Handle C++'s ... quirky decl grammar by using the left & right
  // distinction. Consider:
  //   int (*f(float))(char) {}
  // f is a function that takes a float and returns a pointer to a function
  // that takes a char and returns an int. If we're trying to print f, start
  // by printing out the return types's left, then print our parameters, then
  // finally print right of the return type.
  void printLeft(OutputBuffer &OB) const override {
    Ret->printLeft(OB);
    OB += " ";
  }

  void printRight(OutputBuffer &OB) const override {
    OB.printOpen();
    Params.printWithComma(OB);
    OB.printClose();
    Ret->printRight(OB);

    if (CVQuals & QualConst)
      OB += " const";
    if (CVQuals & QualVolatile)
      OB += " volatile";
    if (CVQuals & QualRestrict)
      OB += " restrict";

    if (RefQual == FrefQualLValue)
      OB += " &";
    else if (RefQual == FrefQualRValue)
      OB += " &&";

    if (ExceptionSpec != nullptr) {
      OB += ' ';
      ExceptionSpec->print(OB);
    }
  }
};

class NoexceptSpec : public Node {
  const Node *E;
public:
  NoexceptSpec(const Node *E_) : Node(KNoexceptSpec), E(E_) {}

  template<typename Fn> void match(Fn F) const { F(E); }

  void printLeft(OutputBuffer &OB) const override {
    OB += "noexcept";
    OB.printOpen();
    E->printAsOperand(OB);
    OB.printClose();
  }
};

class DynamicExceptionSpec : public Node {
  NodeArray Types;
public:
  DynamicExceptionSpec(NodeArray Types_)
      : Node(KDynamicExceptionSpec), Types(Types_) {}

  template<typename Fn> void match(Fn F) const { F(Types); }

  void printLeft(OutputBuffer &OB) const override {
    OB += "throw";
    OB.printOpen();
    Types.printWithComma(OB);
    OB.printClose();
  }
};

class FunctionEncoding final : public Node {
  const Node *Ret;
  const Node *Name;
  NodeArray Params;
  const Node *Attrs;
  Qualifiers CVQuals;
  FunctionRefQual RefQual;

public:
  FunctionEncoding(const Node *Ret_, const Node *Name_, NodeArray Params_,
                   const Node *Attrs_, Qualifiers CVQuals_,
                   FunctionRefQual RefQual_)
      : Node(KFunctionEncoding,
             /*RHSComponentCache=*/Cache::Yes, /*ArrayCache=*/Cache::No,
             /*FunctionCache=*/Cache::Yes),
        Ret(Ret_), Name(Name_), Params(Params_), Attrs(Attrs_),
        CVQuals(CVQuals_), RefQual(RefQual_) {}

  template<typename Fn> void match(Fn F) const {
    F(Ret, Name, Params, Attrs, CVQuals, RefQual);
  }

  Qualifiers getCVQuals() const { return CVQuals; }
  FunctionRefQual getRefQual() const { return RefQual; }
  NodeArray getParams() const { return Params; }
  const Node *getReturnType() const { return Ret; }

  bool hasRHSComponentSlow(OutputBuffer &) const override { return true; }
  bool hasFunctionSlow(OutputBuffer &) const override { return true; }

  const Node *getName() const { return Name; }

  void printLeft(OutputBuffer &OB) const override {
    if (Ret) {
      Ret->printLeft(OB);
      if (!Ret->hasRHSComponent(OB))
        OB += " ";
    }
    Name->print(OB);
  }

  void printRight(OutputBuffer &OB) const override {
    OB.printOpen();
    Params.printWithComma(OB);
    OB.printClose();
    if (Ret)
      Ret->printRight(OB);

    if (CVQuals & QualConst)
      OB += " const";
    if (CVQuals & QualVolatile)
      OB += " volatile";
    if (CVQuals & QualRestrict)
      OB += " restrict";

    if (RefQual == FrefQualLValue)
      OB += " &";
    else if (RefQual == FrefQualRValue)
      OB += " &&";

    if (Attrs != nullptr)
      Attrs->print(OB);
  }
};

class LiteralOperator : public Node {
  const Node *OpName;

public:
  LiteralOperator(const Node *OpName_)
      : Node(KLiteralOperator), OpName(OpName_) {}

  template<typename Fn> void match(Fn F) const { F(OpName); }

  void printLeft(OutputBuffer &OB) const override {
    OB += "operator\"\" ";
    OpName->print(OB);
  }
};

class SpecialName final : public Node {
  const std::string_view Special;
  const Node *Child;

public:
  SpecialName(std::string_view Special_, const Node *Child_)
      : Node(KSpecialName), Special(Special_), Child(Child_) {}

  template<typename Fn> void match(Fn F) const { F(Special, Child); }

  void printLeft(OutputBuffer &OB) const override {
    OB += Special;
    Child->print(OB);
  }
};

class CtorVtableSpecialName final : public Node {
  const Node *FirstType;
  const Node *SecondType;

public:
  CtorVtableSpecialName(const Node *FirstType_, const Node *SecondType_)
      : Node(KCtorVtableSpecialName),
        FirstType(FirstType_), SecondType(SecondType_) {}

  template<typename Fn> void match(Fn F) const { F(FirstType, SecondType); }

  void printLeft(OutputBuffer &OB) const override {
    OB += "construction vtable for ";
    FirstType->print(OB);
    OB += "-in-";
    SecondType->print(OB);
  }
};

struct NestedName : Node {
  Node *Qual;
  Node *Name;

  NestedName(Node *Qual_, Node *Name_)
      : Node(KNestedName), Qual(Qual_), Name(Name_) {}

  template<typename Fn> void match(Fn F) const { F(Qual, Name); }

  std::string_view getBaseName() const override { return Name->getBaseName(); }

  void printLeft(OutputBuffer &OB) const override {
    Qual->print(OB);
    OB += "::";
    Name->print(OB);
  }
};

struct ModuleName : Node {
  ModuleName *Parent;
  Node *Name;
  bool IsPartition;

  ModuleName(ModuleName *Parent_, Node *Name_, bool IsPartition_ = false)
      : Node(KModuleName), Parent(Parent_), Name(Name_),
        IsPartition(IsPartition_) {}

  template <typename Fn> void match(Fn F) const {
    F(Parent, Name, IsPartition);
  }

  void printLeft(OutputBuffer &OB) const override {
    if (Parent)
      Parent->print(OB);
    if (Parent || IsPartition)
      OB += IsPartition ? ':' : '.';
    Name->print(OB);
  }
};

struct ModuleEntity : Node {
  ModuleName *Module;
  Node *Name;

  ModuleEntity(ModuleName *Module_, Node *Name_)
      : Node(KModuleEntity), Module(Module_), Name(Name_) {}

  template <typename Fn> void match(Fn F) const { F(Module, Name); }

  std::string_view getBaseName() const override { return Name->getBaseName(); }

  void printLeft(OutputBuffer &OB) const override {
    Name->print(OB);
    OB += '@';
    Module->print(OB);
  }
};

struct LocalName : Node {
  Node *Encoding;
  Node *Entity;

  LocalName(Node *Encoding_, Node *Entity_)
      : Node(KLocalName), Encoding(Encoding_), Entity(Entity_) {}

  template<typename Fn> void match(Fn F) const { F(Encoding, Entity); }

  void printLeft(OutputBuffer &OB) const override {
    Encoding->print(OB);
    OB += "::";
    Entity->print(OB);
  }
};

class QualifiedName final : public Node {
  // qualifier::name
  const Node *Qualifier;
  const Node *Name;

public:
  QualifiedName(const Node *Qualifier_, const Node *Name_)
      : Node(KQualifiedName), Qualifier(Qualifier_), Name(Name_) {}

  template<typename Fn> void match(Fn F) const { F(Qualifier, Name); }

  std::string_view getBaseName() const override { return Name->getBaseName(); }

  void printLeft(OutputBuffer &OB) const override {
    Qualifier->print(OB);
    OB += "::";
    Name->print(OB);
  }
};

class VectorType final : public Node {
  const Node *BaseType;
  const Node *Dimension;

public:
  VectorType(const Node *BaseType_, const Node *Dimension_)
      : Node(KVectorType), BaseType(BaseType_), Dimension(Dimension_) {}

  const Node *getBaseType() const { return BaseType; }
  const Node *getDimension() const { return Dimension; }

  template<typename Fn> void match(Fn F) const { F(BaseType, Dimension); }

  void printLeft(OutputBuffer &OB) const override {
    BaseType->print(OB);
    OB += " vector[";
    if (Dimension)
      Dimension->print(OB);
    OB += "]";
  }
};

class PixelVectorType final : public Node {
  const Node *Dimension;

public:
  PixelVectorType(const Node *Dimension_)
      : Node(KPixelVectorType), Dimension(Dimension_) {}

  template<typename Fn> void match(Fn F) const { F(Dimension); }

  void printLeft(OutputBuffer &OB) const override {
    // FIXME: This should demangle as "vector pixel".
    OB += "pixel vector[";
    Dimension->print(OB);
    OB += "]";
  }
};

class BinaryFPType final : public Node {
  const Node *Dimension;

public:
  BinaryFPType(const Node *Dimension_)
      : Node(KBinaryFPType), Dimension(Dimension_) {}

  template<typename Fn> void match(Fn F) const { F(Dimension); }

  void printLeft(OutputBuffer &OB) const override {
    OB += "_Float";
    Dimension->print(OB);
  }
};

enum class TemplateParamKind { Type, NonType, Template };

/// An invented name for a template parameter for which we don't have a
/// corresponding template argument.
///
/// This node is created when parsing the <lambda-sig> for a lambda with
/// explicit template arguments, which might be referenced in the parameter
/// types appearing later in the <lambda-sig>.
class SyntheticTemplateParamName final : public Node {
  TemplateParamKind Kind;
  unsigned Index;

public:
  SyntheticTemplateParamName(TemplateParamKind Kind_, unsigned Index_)
      : Node(KSyntheticTemplateParamName), Kind(Kind_), Index(Index_) {}

  template<typename Fn> void match(Fn F) const { F(Kind, Index); }

  void printLeft(OutputBuffer &OB) const override {
    switch (Kind) {
    case TemplateParamKind::Type:
      OB += "$T";
      break;
    case TemplateParamKind::NonType:
      OB += "$N";
      break;
    case TemplateParamKind::Template:
      OB += "$TT";
      break;
    }
    if (Index > 0)
      OB << Index - 1;
  }
};

/// A template type parameter declaration, 'typename T'.
class TypeTemplateParamDecl final : public Node {
  Node *Name;

public:
  TypeTemplateParamDecl(Node *Name_)
      : Node(KTypeTemplateParamDecl, Cache::Yes), Name(Name_) {}

  template<typename Fn> void match(Fn F) const { F(Name); }

  void printLeft(OutputBuffer &OB) const override { OB += "typename "; }

  void printRight(OutputBuffer &OB) const override { Name->print(OB); }
};

/// A non-type template parameter declaration, 'int N'.
class NonTypeTemplateParamDecl final : public Node {
  Node *Name;
  Node *Type;

public:
  NonTypeTemplateParamDecl(Node *Name_, Node *Type_)
      : Node(KNonTypeTemplateParamDecl, Cache::Yes), Name(Name_), Type(Type_) {}

  template<typename Fn> void match(Fn F) const { F(Name, Type); }

  void printLeft(OutputBuffer &OB) const override {
    Type->printLeft(OB);
    if (!Type->hasRHSComponent(OB))
      OB += " ";
  }

  void printRight(OutputBuffer &OB) const override {
    Name->print(OB);
    Type->printRight(OB);
  }
};

/// A template template parameter declaration,
/// 'template<typename T> typename N'.
class TemplateTemplateParamDecl final : public Node {
  Node *Name;
  NodeArray Params;

public:
  TemplateTemplateParamDecl(Node *Name_, NodeArray Params_)
      : Node(KTemplateTemplateParamDecl, Cache::Yes), Name(Name_),
        Params(Params_) {}

  template<typename Fn> void match(Fn F) const { F(Name, Params); }

  void printLeft(OutputBuffer &OB) const override {
    ScopedOverride<unsigned> LT(OB.GtIsGt, 0);
    OB += "template<";
    Params.printWithComma(OB);
    OB += "> typename ";
  }

  void printRight(OutputBuffer &OB) const override { Name->print(OB); }
};

/// A template parameter pack declaration, 'typename ...T'.
class TemplateParamPackDecl final : public Node {
  Node *Param;

public:
  TemplateParamPackDecl(Node *Param_)
      : Node(KTemplateParamPackDecl, Cache::Yes), Param(Param_) {}

  template<typename Fn> void match(Fn F) const { F(Param); }

  void printLeft(OutputBuffer &OB) const override {
    Param->printLeft(OB);
    OB += "...";
  }

  void printRight(OutputBuffer &OB) const override { Param->printRight(OB); }
};

/// An unexpanded parameter pack (either in the expression or type context). If
/// this AST is correct, this node will have a ParameterPackExpansion node above
/// it.
///
/// This node is created when some <template-args> are found that apply to an
/// <encoding>, and is stored in the TemplateParams table. In order for this to
/// appear in the final AST, it has to referenced via a <template-param> (ie,
/// T_).
class ParameterPack final : public Node {
  NodeArray Data;

  // Setup OutputBuffer for a pack expansion, unless we're already expanding
  // one.
  void initializePackExpansion(OutputBuffer &OB) const {
    if (OB.CurrentPackMax == std::numeric_limits<unsigned>::max()) {
      OB.CurrentPackMax = static_cast<unsigned>(Data.size());
      OB.CurrentPackIndex = 0;
    }
  }

public:
  ParameterPack(NodeArray Data_) : Node(KParameterPack), Data(Data_) {
    ArrayCache = FunctionCache = RHSComponentCache = Cache::Unknown;
    if (std::all_of(Data.begin(), Data.end(), [](Node* P) {
          return P->ArrayCache == Cache::No;
        }))
      ArrayCache = Cache::No;
    if (std::all_of(Data.begin(), Data.end(), [](Node* P) {
          return P->FunctionCache == Cache::No;
        }))
      FunctionCache = Cache::No;
    if (std::all_of(Data.begin(), Data.end(), [](Node* P) {
          return P->RHSComponentCache == Cache::No;
        }))
      RHSComponentCache = Cache::No;
  }

  template<typename Fn> void match(Fn F) const { F(Data); }

  bool hasRHSComponentSlow(OutputBuffer &OB) const override {
    initializePackExpansion(OB);
    size_t Idx = OB.CurrentPackIndex;
    return Idx < Data.size() && Data[Idx]->hasRHSComponent(OB);
  }
  bool hasArraySlow(OutputBuffer &OB) const override {
    initializePackExpansion(OB);
    size_t Idx = OB.CurrentPackIndex;
    return Idx < Data.size() && Data[Idx]->hasArray(OB);
  }
  bool hasFunctionSlow(OutputBuffer &OB) const override {
    initializePackExpansion(OB);
    size_t Idx = OB.CurrentPackIndex;
    return Idx < Data.size() && Data[Idx]->hasFunction(OB);
  }
  const Node *getSyntaxNode(OutputBuffer &OB) const override {
    initializePackExpansion(OB);
    size_t Idx = OB.CurrentPackIndex;
    return Idx < Data.size() ? Data[Idx]->getSyntaxNode(OB) : this;
  }

  void printLeft(OutputBuffer &OB) const override {
    initializePackExpansion(OB);
    size_t Idx = OB.CurrentPackIndex;
    if (Idx < Data.size())
      Data[Idx]->printLeft(OB);
  }
  void printRight(OutputBuffer &OB) const override {
    initializePackExpansion(OB);
    size_t Idx = OB.CurrentPackIndex;
    if (Idx < Data.size())
      Data[Idx]->printRight(OB);
  }
};

/// A variadic template argument. This node represents an occurrence of
/// J<something>E in some <template-args>. It isn't itself unexpanded, unless
/// one of it's Elements is. The parser inserts a ParameterPack into the
/// TemplateParams table if the <template-args> this pack belongs to apply to an
/// <encoding>.
class TemplateArgumentPack final : public Node {
  NodeArray Elements;
public:
  TemplateArgumentPack(NodeArray Elements_)
      : Node(KTemplateArgumentPack), Elements(Elements_) {}

  template<typename Fn> void match(Fn F) const { F(Elements); }

  NodeArray getElements() const { return Elements; }

  void printLeft(OutputBuffer &OB) const override {
    Elements.printWithComma(OB);
  }
};

/// A pack expansion. Below this node, there are some unexpanded ParameterPacks
/// which each have Child->ParameterPackSize elements.
class ParameterPackExpansion final : public Node {
  const Node *Child;

public:
  ParameterPackExpansion(const Node *Child_)
      : Node(KParameterPackExpansion), Child(Child_) {}

  template<typename Fn> void match(Fn F) const { F(Child); }

  const Node *getChild() const { return Child; }

  void printLeft(OutputBuffer &OB) const override {
    constexpr unsigned Max = std::numeric_limits<unsigned>::max();
    ScopedOverride<unsigned> SavePackIdx(OB.CurrentPackIndex, Max);
    ScopedOverride<unsigned> SavePackMax(OB.CurrentPackMax, Max);
    size_t StreamPos = OB.getCurrentPosition();

    // Print the first element in the pack. If Child contains a ParameterPack,
    // it will set up S.CurrentPackMax and print the first element.
    Child->print(OB);

    // No ParameterPack was found in Child. This can occur if we've found a pack
    // expansion on a <function-param>.
    if (OB.CurrentPackMax == Max) {
      OB += "...";
      return;
    }

    // We found a ParameterPack, but it has no elements. Erase whatever we may
    // of printed.
    if (OB.CurrentPackMax == 0) {
      OB.setCurrentPosition(StreamPos);
      return;
    }

    // Else, iterate through the rest of the elements in the pack.
    for (unsigned I = 1, E = OB.CurrentPackMax; I < E; ++I) {
      OB += ", ";
      OB.CurrentPackIndex = I;
      Child->print(OB);
    }
  }
};

class TemplateArgs final : public Node {
  NodeArray Params;

public:
  TemplateArgs(NodeArray Params_) : Node(KTemplateArgs), Params(Params_) {}

  template<typename Fn> void match(Fn F) const { F(Params); }

  NodeArray getParams() { return Params; }

  void printLeft(OutputBuffer &OB) const override {
    ScopedOverride<unsigned> LT(OB.GtIsGt, 0);
    OB += "<";
    Params.printWithComma(OB);
    OB += ">";
  }
};

/// A forward-reference to a template argument that was not known at the point
/// where the template parameter name was parsed in a mangling.
///
/// This is created when demangling the name of a specialization of a
/// conversion function template:
///
/// \code
/// struct A {
///   template<typename T> operator T*();
/// };
/// \endcode
///
/// When demangling a specialization of the conversion function template, we
/// encounter the name of the template (including the \c T) before we reach
/// the template argument list, so we cannot substitute the parameter name
/// for the corresponding argument while parsing. Instead, we create a
/// \c ForwardTemplateReference node that is resolved after we parse the
/// template arguments.
struct ForwardTemplateReference : Node {
  size_t Index;
  Node *Ref = nullptr;

  // If we're currently printing this node. It is possible (though invalid) for
  // a forward template reference to refer to itself via a substitution. This
  // creates a cyclic AST, which will stack overflow printing. To fix this, bail
  // out if more than one print* function is active.
  mutable bool Printing = false;

  ForwardTemplateReference(size_t Index_)
      : Node(KForwardTemplateReference, Cache::Unknown, Cache::Unknown,
             Cache::Unknown),
        Index(Index_) {}

  // We don't provide a matcher for these, because the value of the node is
  // not determined by its construction parameters, and it generally needs
  // special handling.
  template<typename Fn> void match(Fn F) const = delete;

  bool hasRHSComponentSlow(OutputBuffer &OB) const override {
    if (Printing)
      return false;
    ScopedOverride<bool> SavePrinting(Printing, true);
    return Ref->hasRHSComponent(OB);
  }
  bool hasArraySlow(OutputBuffer &OB) const override {
    if (Printing)
      return false;
    ScopedOverride<bool> SavePrinting(Printing, true);
    return Ref->hasArray(OB);
  }
  bool hasFunctionSlow(OutputBuffer &OB) const override {
    if (Printing)
      return false;
    ScopedOverride<bool> SavePrinting(Printing, true);
    return Ref->hasFunction(OB);
  }
  const Node *getSyntaxNode(OutputBuffer &OB) const override {
    if (Printing)
      return this;
    ScopedOverride<bool> SavePrinting(Printing, true);
    return Ref->getSyntaxNode(OB);
  }

  void printLeft(OutputBuffer &OB) const override {
    if (Printing)
      return;
    ScopedOverride<bool> SavePrinting(Printing, true);
    Ref->printLeft(OB);
  }
  void printRight(OutputBuffer &OB) const override {
    if (Printing)
      return;
    ScopedOverride<bool> SavePrinting(Printing, true);
    Ref->printRight(OB);
  }
};

struct NameWithTemplateArgs : Node {
  // name<template_args>
  Node *Name;
  Node *TemplateArgs;

  NameWithTemplateArgs(Node *Name_, Node *TemplateArgs_)
      : Node(KNameWithTemplateArgs), Name(Name_), TemplateArgs(TemplateArgs_) {}

  template<typename Fn> void match(Fn F) const { F(Name, TemplateArgs); }

  std::string_view getBaseName() const override { return Name->getBaseName(); }

  void printLeft(OutputBuffer &OB) const override {
    Name->print(OB);
    TemplateArgs->print(OB);
  }
};

class GlobalQualifiedName final : public Node {
  Node *Child;

public:
  GlobalQualifiedName(Node* Child_)
      : Node(KGlobalQualifiedName), Child(Child_) {}

  template<typename Fn> void match(Fn F) const { F(Child); }

  std::string_view getBaseName() const override { return Child->getBaseName(); }

  void printLeft(OutputBuffer &OB) const override {
    OB += "::";
    Child->print(OB);
  }
};

enum class SpecialSubKind {
  allocator,
  basic_string,
  string,
  istream,
  ostream,
  iostream,
};

class SpecialSubstitution;
class ExpandedSpecialSubstitution : public Node {
protected:
  SpecialSubKind SSK;

  ExpandedSpecialSubstitution(SpecialSubKind SSK_, Kind K_)
      : Node(K_), SSK(SSK_) {}
public:
  ExpandedSpecialSubstitution(SpecialSubKind SSK_)
      : ExpandedSpecialSubstitution(SSK_, KExpandedSpecialSubstitution) {}
  inline ExpandedSpecialSubstitution(SpecialSubstitution const *);

  template<typename Fn> void match(Fn F) const { F(SSK); }

protected:
  bool isInstantiation() const {
    return unsigned(SSK) >= unsigned(SpecialSubKind::string);
  }

  std::string_view getBaseName() const override {
    switch (SSK) {
    case SpecialSubKind::allocator:
      return {"allocator"};
    case SpecialSubKind::basic_string:
      return {"basic_string"};
    case SpecialSubKind::string:
      return {"basic_string"};
    case SpecialSubKind::istream:
      return {"basic_istream"};
    case SpecialSubKind::ostream:
      return {"basic_ostream"};
    case SpecialSubKind::iostream:
      return {"basic_iostream"};
    }
    DEMANGLE_UNREACHABLE;
  }

private:
  void printLeft(OutputBuffer &OB) const override {
    OB << "std::" << getBaseName();
    if (isInstantiation()) {
      OB << "<char, std::char_traits<char>";
      if (SSK == SpecialSubKind::string)
        OB << ", std::allocator<char>";
      OB << ">";
    }
  }
};

class SpecialSubstitution final : public ExpandedSpecialSubstitution {
public:
  SpecialSubstitution(SpecialSubKind SSK_)
      : ExpandedSpecialSubstitution(SSK_, KSpecialSubstitution) {}

  template<typename Fn> void match(Fn F) const { F(SSK); }

  std::string_view getBaseName() const override {
    std::string_view SV = ExpandedSpecialSubstitution::getBaseName();
    if (isInstantiation()) {
      // The instantiations are typedefs that drop the "basic_" prefix.
      assert(llvm::itanium_demangle::starts_with(SV, "basic_"));
      SV.remove_prefix(sizeof("basic_") - 1);
    }
    return SV;
  }

  void printLeft(OutputBuffer &OB) const override {
    OB << "std::" << getBaseName();
  }
};

inline ExpandedSpecialSubstitution::ExpandedSpecialSubstitution(
    SpecialSubstitution const *SS)
    : ExpandedSpecialSubstitution(SS->SSK) {}

class CtorDtorName final : public Node {
  const Node *Basename;
  const bool IsDtor;
  const int Variant;

public:
  CtorDtorName(const Node *Basename_, bool IsDtor_, int Variant_)
      : Node(KCtorDtorName), Basename(Basename_), IsDtor(IsDtor_),
        Variant(Variant_) {}

  template<typename Fn> void match(Fn F) const { F(Basename, IsDtor, Variant); }

  void printLeft(OutputBuffer &OB) const override {
    if (IsDtor)
      OB += "~";
    OB += Basename->getBaseName();
  }
};

class DtorName : public Node {
  const Node *Base;

public:
  DtorName(const Node *Base_) : Node(KDtorName), Base(Base_) {}

  template<typename Fn> void match(Fn F) const { F(Base); }

  void printLeft(OutputBuffer &OB) const override {
    OB += "~";
    Base->printLeft(OB);
  }
};

class UnnamedTypeName : public Node {
  const std::string_view Count;

public:
  UnnamedTypeName(std::string_view Count_)
      : Node(KUnnamedTypeName), Count(Count_) {}

  template<typename Fn> void match(Fn F) const { F(Count); }

  void printLeft(OutputBuffer &OB) const override {
    OB += "'unnamed";
    OB += Count;
    OB += "\'";
  }
};

class ClosureTypeName : public Node {
  NodeArray TemplateParams;
  NodeArray Params;
  std::string_view Count;

public:
  ClosureTypeName(NodeArray TemplateParams_, NodeArray Params_,
                  std::string_view Count_)
      : Node(KClosureTypeName), TemplateParams(TemplateParams_),
        Params(Params_), Count(Count_) {}

  template<typename Fn> void match(Fn F) const {
    F(TemplateParams, Params, Count);
  }

  void printDeclarator(OutputBuffer &OB) const {
    if (!TemplateParams.empty()) {
      ScopedOverride<unsigned> LT(OB.GtIsGt, 0);
      OB += "<";
      TemplateParams.printWithComma(OB);
      OB += ">";
    }
    OB.printOpen();
    Params.printWithComma(OB);
    OB.printClose();
  }

  void printLeft(OutputBuffer &OB) const override {
    OB += "\'lambda";
    OB += Count;
    OB += "\'";
    printDeclarator(OB);
  }
};

class StructuredBindingName : public Node {
  NodeArray Bindings;
public:
  StructuredBindingName(NodeArray Bindings_)
      : Node(KStructuredBindingName), Bindings(Bindings_) {}

  template<typename Fn> void match(Fn F) const { F(Bindings); }

  void printLeft(OutputBuffer &OB) const override {
    OB.printOpen('[');
    Bindings.printWithComma(OB);
    OB.printClose(']');
  }
};

// -- Expression Nodes --

class BinaryExpr : public Node {
  const Node *LHS;
  const std::string_view InfixOperator;
  const Node *RHS;

public:
  BinaryExpr(const Node *LHS_, std::string_view InfixOperator_,
             const Node *RHS_, Prec Prec_)
      : Node(KBinaryExpr, Prec_), LHS(LHS_), InfixOperator(InfixOperator_),
        RHS(RHS_) {}

  template <typename Fn> void match(Fn F) const {
    F(LHS, InfixOperator, RHS, getPrecedence());
  }

  void printLeft(OutputBuffer &OB) const override {
    bool ParenAll = OB.isGtInsideTemplateArgs() &&
                    (InfixOperator == ">" || InfixOperator == ">>");
    if (ParenAll)
      OB.printOpen();
    // Assignment is right associative, with special LHS precedence.
    bool IsAssign = getPrecedence() == Prec::Assign;
    LHS->printAsOperand(OB, IsAssign ? Prec::OrIf : getPrecedence(), !IsAssign);
    // No space before comma operator
    if (!(InfixOperator == ","))
      OB += " ";
    OB += InfixOperator;
    OB += " ";
    RHS->printAsOperand(OB, getPrecedence(), IsAssign);
    if (ParenAll)
      OB.printClose();
  }
};

class ArraySubscriptExpr : public Node {
  const Node *Op1;
  const Node *Op2;

public:
  ArraySubscriptExpr(const Node *Op1_, const Node *Op2_, Prec Prec_)
      : Node(KArraySubscriptExpr, Prec_), Op1(Op1_), Op2(Op2_) {}

  template <typename Fn> void match(Fn F) const {
    F(Op1, Op2, getPrecedence());
  }

  void printLeft(OutputBuffer &OB) const override {
    Op1->printAsOperand(OB, getPrecedence());
    OB.printOpen('[');
    Op2->printAsOperand(OB);
    OB.printClose(']');
  }
};

class PostfixExpr : public Node {
  const Node *Child;
  const std::string_view Operator;

public:
  PostfixExpr(const Node *Child_, std::string_view Operator_, Prec Prec_)
      : Node(KPostfixExpr, Prec_), Child(Child_), Operator(Operator_) {}

  template <typename Fn> void match(Fn F) const {
    F(Child, Operator, getPrecedence());
  }

  void printLeft(OutputBuffer &OB) const override {
    Child->printAsOperand(OB, getPrecedence(), true);
    OB += Operator;
  }
};

class ConditionalExpr : public Node {
  const Node *Cond;
  const Node *Then;
  const Node *Else;

public:
  ConditionalExpr(const Node *Cond_, const Node *Then_, const Node *Else_,
                  Prec Prec_)
      : Node(KConditionalExpr, Prec_), Cond(Cond_), Then(Then_), Else(Else_) {}

  template <typename Fn> void match(Fn F) const {
    F(Cond, Then, Else, getPrecedence());
  }

  void printLeft(OutputBuffer &OB) const override {
    Cond->printAsOperand(OB, getPrecedence());
    OB += " ? ";
    Then->printAsOperand(OB);
    OB += " : ";
    Else->printAsOperand(OB, Prec::Assign, true);
  }
};

class MemberExpr : public Node {
  const Node *LHS;
  const std::string_view Kind;
  const Node *RHS;

public:
  MemberExpr(const Node *LHS_, std::string_view Kind_, const Node *RHS_,
             Prec Prec_)
      : Node(KMemberExpr, Prec_), LHS(LHS_), Kind(Kind_), RHS(RHS_) {}

  template <typename Fn> void match(Fn F) const {
    F(LHS, Kind, RHS, getPrecedence());
  }

  void printLeft(OutputBuffer &OB) const override {
    LHS->printAsOperand(OB, getPrecedence(), true);
    OB += Kind;
    RHS->printAsOperand(OB, getPrecedence(), false);
  }
};

class SubobjectExpr : public Node {
  const Node *Type;
  const Node *SubExpr;
  std::string_view Offset;
  NodeArray UnionSelectors;
  bool OnePastTheEnd;

public:
  SubobjectExpr(const Node *Type_, const Node *SubExpr_,
                std::string_view Offset_, NodeArray UnionSelectors_,
                bool OnePastTheEnd_)
      : Node(KSubobjectExpr), Type(Type_), SubExpr(SubExpr_), Offset(Offset_),
        UnionSelectors(UnionSelectors_), OnePastTheEnd(OnePastTheEnd_) {}

  template<typename Fn> void match(Fn F) const {
    F(Type, SubExpr, Offset, UnionSelectors, OnePastTheEnd);
  }

  void printLeft(OutputBuffer &OB) const override {
    SubExpr->print(OB);
    OB += ".<";
    Type->print(OB);
    OB += " at offset ";
    if (Offset.empty()) {
      OB += "0";
    } else if (Offset[0] == 'n') {
      OB += "-";
      OB += std::string_view(Offset.data() + 1, Offset.size() - 1);
    } else {
      OB += Offset;
    }
    OB += ">";
  }
};

class EnclosingExpr : public Node {
  const std::string_view Prefix;
  const Node *Infix;
  const std::string_view Postfix;

public:
  EnclosingExpr(std::string_view Prefix_, const Node *Infix_,
                Prec Prec_ = Prec::Primary)
      : Node(KEnclosingExpr, Prec_), Prefix(Prefix_), Infix(Infix_) {}

  template <typename Fn> void match(Fn F) const {
    F(Prefix, Infix, getPrecedence());
  }

  void printLeft(OutputBuffer &OB) const override {
    OB += Prefix;
    OB.printOpen();
    Infix->print(OB);
    OB.printClose();
    OB += Postfix;
  }
};

class CastExpr : public Node {
  // cast_kind<to>(from)
  const std::string_view CastKind;
  const Node *To;
  const Node *From;

public:
  CastExpr(std::string_view CastKind_, const Node *To_, const Node *From_,
           Prec Prec_)
      : Node(KCastExpr, Prec_), CastKind(CastKind_), To(To_), From(From_) {}

  template <typename Fn> void match(Fn F) const {
    F(CastKind, To, From, getPrecedence());
  }

  void printLeft(OutputBuffer &OB) const override {
    OB += CastKind;
    {
      ScopedOverride<unsigned> LT(OB.GtIsGt, 0);
      OB += "<";
      To->printLeft(OB);
      OB += ">";
    }
    OB.printOpen();
    From->printAsOperand(OB);
    OB.printClose();
  }
};

class SizeofParamPackExpr : public Node {
  const Node *Pack;

public:
  SizeofParamPackExpr(const Node *Pack_)
      : Node(KSizeofParamPackExpr), Pack(Pack_) {}

  template<typename Fn> void match(Fn F) const { F(Pack); }

  void printLeft(OutputBuffer &OB) const override {
    OB += "sizeof...";
    OB.printOpen();
    ParameterPackExpansion PPE(Pack);
    PPE.printLeft(OB);
    OB.printClose();
  }
};

class CallExpr : public Node {
  const Node *Callee;
  NodeArray Args;

public:
  CallExpr(const Node *Callee_, NodeArray Args_, Prec Prec_)
      : Node(KCallExpr, Prec_), Callee(Callee_), Args(Args_) {}

  template <typename Fn> void match(Fn F) const {
    F(Callee, Args, getPrecedence());
  }

  void printLeft(OutputBuffer &OB) const override {
    Callee->print(OB);
    OB.printOpen();
    Args.printWithComma(OB);
    OB.printClose();
  }
};

class NewExpr : public Node {
  // new (expr_list) type(init_list)
  NodeArray ExprList;
  Node *Type;
  NodeArray InitList;
  bool IsGlobal; // ::operator new ?
  bool IsArray;  // new[] ?
public:
  NewExpr(NodeArray ExprList_, Node *Type_, NodeArray InitList_, bool IsGlobal_,
          bool IsArray_, Prec Prec_)
      : Node(KNewExpr, Prec_), ExprList(ExprList_), Type(Type_),
        InitList(InitList_), IsGlobal(IsGlobal_), IsArray(IsArray_) {}

  template<typename Fn> void match(Fn F) const {
    F(ExprList, Type, InitList, IsGlobal, IsArray, getPrecedence());
  }

  void printLeft(OutputBuffer &OB) const override {
    if (IsGlobal)
      OB += "::";
    OB += "new";
    if (IsArray)
      OB += "[]";
    if (!ExprList.empty()) {
      OB.printOpen();
      ExprList.printWithComma(OB);
      OB.printClose();
    }
    OB += " ";
    Type->print(OB);
    if (!InitList.empty()) {
      OB.printOpen();
      InitList.printWithComma(OB);
      OB.printClose();
    }
  }
};

class DeleteExpr : public Node {
  Node *Op;
  bool IsGlobal;
  bool IsArray;

public:
  DeleteExpr(Node *Op_, bool IsGlobal_, bool IsArray_, Prec Prec_)
      : Node(KDeleteExpr, Prec_), Op(Op_), IsGlobal(IsGlobal_),
        IsArray(IsArray_) {}

  template <typename Fn> void match(Fn F) const {
    F(Op, IsGlobal, IsArray, getPrecedence());
  }

  void printLeft(OutputBuffer &OB) const override {
    if (IsGlobal)
      OB += "::";
    OB += "delete";
    if (IsArray)
      OB += "[]";
    OB += ' ';
    Op->print(OB);
  }
};

class PrefixExpr : public Node {
  std::string_view Prefix;
  Node *Child;

public:
  PrefixExpr(std::string_view Prefix_, Node *Child_, Prec Prec_)
      : Node(KPrefixExpr, Prec_), Prefix(Prefix_), Child(Child_) {}

  template <typename Fn> void match(Fn F) const {
    F(Prefix, Child, getPrecedence());
  }

  void printLeft(OutputBuffer &OB) const override {
    OB += Prefix;
    Child->printAsOperand(OB, getPrecedence());
  }
};

class FunctionParam : public Node {
  std::string_view Number;

public:
  FunctionParam(std::string_view Number_)
      : Node(KFunctionParam), Number(Number_) {}

  template<typename Fn> void match(Fn F) const { F(Number); }

  void printLeft(OutputBuffer &OB) const override {
    OB += "fp";
    OB += Number;
  }
};

class ConversionExpr : public Node {
  const Node *Type;
  NodeArray Expressions;

public:
  ConversionExpr(const Node *Type_, NodeArray Expressions_, Prec Prec_)
      : Node(KConversionExpr, Prec_), Type(Type_), Expressions(Expressions_) {}

  template <typename Fn> void match(Fn F) const {
    F(Type, Expressions, getPrecedence());
  }

  void printLeft(OutputBuffer &OB) const override {
    OB.printOpen();
    Type->print(OB);
    OB.printClose();
    OB.printOpen();
    Expressions.printWithComma(OB);
    OB.printClose();
  }
};

class PointerToMemberConversionExpr : public Node {
  const Node *Type;
  const Node *SubExpr;
  std::string_view Offset;

public:
  PointerToMemberConversionExpr(const Node *Type_, const Node *SubExpr_,
                                std::string_view Offset_, Prec Prec_)
      : Node(KPointerToMemberConversionExpr, Prec_), Type(Type_),
        SubExpr(SubExpr_), Offset(Offset_) {}

  template <typename Fn> void match(Fn F) const {
    F(Type, SubExpr, Offset, getPrecedence());
  }

  void printLeft(OutputBuffer &OB) const override {
    OB.printOpen();
    Type->print(OB);
    OB.printClose();
    OB.printOpen();
    SubExpr->print(OB);
    OB.printClose();
  }
};

class InitListExpr : public Node {
  const Node *Ty;
  NodeArray Inits;
public:
  InitListExpr(const Node *Ty_, NodeArray Inits_)
      : Node(KInitListExpr), Ty(Ty_), Inits(Inits_) {}

  template<typename Fn> void match(Fn F) const { F(Ty, Inits); }

  void printLeft(OutputBuffer &OB) const override {
    if (Ty)
      Ty->print(OB);
    OB += '{';
    Inits.printWithComma(OB);
    OB += '}';
  }
};

class BracedExpr : public Node {
  const Node *Elem;
  const Node *Init;
  bool IsArray;
public:
  BracedExpr(const Node *Elem_, const Node *Init_, bool IsArray_)
      : Node(KBracedExpr), Elem(Elem_), Init(Init_), IsArray(IsArray_) {}

  template<typename Fn> void match(Fn F) const { F(Elem, Init, IsArray); }

  void printLeft(OutputBuffer &OB) const override {
    if (IsArray) {
      OB += '[';
      Elem->print(OB);
      OB += ']';
    } else {
      OB += '.';
      Elem->print(OB);
    }
    if (Init->getKind() != KBracedExpr && Init->getKind() != KBracedRangeExpr)
      OB += " = ";
    Init->print(OB);
  }
};

class BracedRangeExpr : public Node {
  const Node *First;
  const Node *Last;
  const Node *Init;
public:
  BracedRangeExpr(const Node *First_, const Node *Last_, const Node *Init_)
      : Node(KBracedRangeExpr), First(First_), Last(Last_), Init(Init_) {}

  template<typename Fn> void match(Fn F) const { F(First, Last, Init); }

  void printLeft(OutputBuffer &OB) const override {
    OB += '[';
    First->print(OB);
    OB += " ... ";
    Last->print(OB);
    OB += ']';
    if (Init->getKind() != KBracedExpr && Init->getKind() != KBracedRangeExpr)
      OB += " = ";
    Init->print(OB);
  }
};

class FoldExpr : public Node {
  const Node *Pack, *Init;
  std::string_view OperatorName;
  bool IsLeftFold;

public:
  FoldExpr(bool IsLeftFold_, std::string_view OperatorName_, const Node *Pack_,
           const Node *Init_)
      : Node(KFoldExpr), Pack(Pack_), Init(Init_), OperatorName(OperatorName_),
        IsLeftFold(IsLeftFold_) {}

  template<typename Fn> void match(Fn F) const {
    F(IsLeftFold, OperatorName, Pack, Init);
  }

  void printLeft(OutputBuffer &OB) const override {
    auto PrintPack = [&] {
      OB.printOpen();
      ParameterPackExpansion(Pack).print(OB);
      OB.printClose();
    };

    OB.printOpen();
    // Either '[init op ]... op pack' or 'pack op ...[ op init]'
    // Refactored to '[(init|pack) op ]...[ op (pack|init)]'
    // Fold expr operands are cast-expressions
    if (!IsLeftFold || Init != nullptr) {
      // '(init|pack) op '
      if (IsLeftFold)
        Init->printAsOperand(OB, Prec::Cast, true);
      else
        PrintPack();
      OB << " " << OperatorName << " ";
    }
    OB << "...";
    if (IsLeftFold || Init != nullptr) {
      // ' op (init|pack)'
      OB << " " << OperatorName << " ";
      if (IsLeftFold)
        PrintPack();
      else
        Init->printAsOperand(OB, Prec::Cast, true);
    }
    OB.printClose();
  }
};

class ThrowExpr : public Node {
  const Node *Op;

public:
  ThrowExpr(const Node *Op_) : Node(KThrowExpr), Op(Op_) {}

  template<typename Fn> void match(Fn F) const { F(Op); }

  void printLeft(OutputBuffer &OB) const override {
    OB += "throw ";
    Op->print(OB);
  }
};

class BoolExpr : public Node {
  bool Value;

public:
  BoolExpr(bool Value_) : Node(KBoolExpr), Value(Value_) {}

  template<typename Fn> void match(Fn F) const { F(Value); }

  void printLeft(OutputBuffer &OB) const override {
    OB += Value ? std::string_view("true") : std::string_view("false");
  }
};

class StringLiteral : public Node {
  const Node *Type;

public:
  StringLiteral(const Node *Type_) : Node(KStringLiteral), Type(Type_) {}

  template<typename Fn> void match(Fn F) const { F(Type); }

  void printLeft(OutputBuffer &OB) const override {
    OB += "\"<";
    Type->print(OB);
    OB += ">\"";
  }
};

class LambdaExpr : public Node {
  const Node *Type;

public:
  LambdaExpr(const Node *Type_) : Node(KLambdaExpr), Type(Type_) {}

  template<typename Fn> void match(Fn F) const { F(Type); }

  void printLeft(OutputBuffer &OB) const override {
    OB += "[]";
    if (Type->getKind() == KClosureTypeName)
      static_cast<const ClosureTypeName *>(Type)->printDeclarator(OB);
    OB += "{...}";
  }
};

class EnumLiteral : public Node {
  // ty(integer)
  const Node *Ty;
  std::string_view Integer;

public:
  EnumLiteral(const Node *Ty_, std::string_view Integer_)
      : Node(KEnumLiteral), Ty(Ty_), Integer(Integer_) {}

  template<typename Fn> void match(Fn F) const { F(Ty, Integer); }

  void printLeft(OutputBuffer &OB) const override {
    OB.printOpen();
    Ty->print(OB);
    OB.printClose();

    if (Integer[0] == 'n')
      OB << '-' << std::string_view(Integer.data() + 1, Integer.size() - 1);
    else
      OB << Integer;
  }
};

class IntegerLiteral : public Node {
  std::string_view Type;
  std::string_view Value;

public:
  IntegerLiteral(std::string_view Type_, std::string_view Value_)
      : Node(KIntegerLiteral), Type(Type_), Value(Value_) {}

  template<typename Fn> void match(Fn F) const { F(Type, Value); }

  void printLeft(OutputBuffer &OB) const override {
    if (Type.size() > 3) {
      OB.printOpen();
      OB += Type;
      OB.printClose();
    }

    if (Value[0] == 'n')
      OB << '-' << std::string_view(Value.data() + 1, Value.size() - 1);
    else
      OB += Value;

    if (Type.size() <= 3)
      OB += Type;
  }
};

template <class Float> struct FloatData;

namespace float_literal_impl {
constexpr Node::Kind getFloatLiteralKind(float *) {
  return Node::KFloatLiteral;
}
constexpr Node::Kind getFloatLiteralKind(double *) {
  return Node::KDoubleLiteral;
}
constexpr Node::Kind getFloatLiteralKind(long double *) {
  return Node::KLongDoubleLiteral;
}
}

template <class Float> class FloatLiteralImpl : public Node {
  const std::string_view Contents;

  static constexpr Kind KindForClass =
      float_literal_impl::getFloatLiteralKind((Float *)nullptr);

public:
  FloatLiteralImpl(std::string_view Contents_)
      : Node(KindForClass), Contents(Contents_) {}

  template<typename Fn> void match(Fn F) const { F(Contents); }

  void printLeft(OutputBuffer &OB) const override {
    const size_t N = FloatData<Float>::mangled_size;
    if (Contents.size() >= N) {
      union {
        Float value;
        char buf[sizeof(Float)];
      };
      const char *t = Contents.data();
      const char *last = t + N;
      char *e = buf;
      for (; t != last; ++t, ++e) {
        unsigned d1 = isdigit(*t) ? static_cast<unsigned>(*t - '0')
                                  : static_cast<unsigned>(*t - 'a' + 10);
        ++t;
        unsigned d0 = isdigit(*t) ? static_cast<unsigned>(*t - '0')
                                  : static_cast<unsigned>(*t - 'a' + 10);
        *e = static_cast<char>((d1 << 4) + d0);
      }
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
      std::reverse(buf, e);
#endif
      char num[FloatData<Float>::max_demangled_size] = {0};
      int n = snprintf(num, sizeof(num), FloatData<Float>::spec, value);
      OB += std::string_view(num, n);
    }
  }
};

using FloatLiteral = FloatLiteralImpl<float>;
using DoubleLiteral = FloatLiteralImpl<double>;
using LongDoubleLiteral = FloatLiteralImpl<long double>;

/// Visit the node. Calls \c F(P), where \c P is the node cast to the
/// appropriate derived class.
template<typename Fn>
void Node::visit(Fn F) const {
  switch (K) {
#define NODE(X)                                                                \
  case K##X:                                                                   \
    return F(static_cast<const X *>(this));
#include "ItaniumNodes.def"
  }
  assert(0 && "unknown mangling node kind");
}

/// Determine the kind of a node from its type.
template<typename NodeT> struct NodeKind;
#define NODE(X)                                                                \
  template <> struct NodeKind<X> {                                             \
    static constexpr Node::Kind Kind = Node::K##X;                             \
    static constexpr const char *name() { return #X; }                         \
  };
#include "ItaniumNodes.def"

template <typename Derived, typename Alloc> struct AbstractManglingParser {
  const char *First;
  const char *Last;

  // Name stack, this is used by the parser to hold temporary names that were
  // parsed. The parser collapses multiple names into new nodes to construct
  // the AST. Once the parser is finished, names.size() == 1.
  PODSmallVector<Node *, 32> Names;

  // Substitution table. Itanium supports name substitutions as a means of
  // compression. The string "S42_" refers to the 44nd entry (base-36) in this
  // table.
  PODSmallVector<Node *, 32> Subs;

  using TemplateParamList = PODSmallVector<Node *, 8>;

  class ScopedTemplateParamList {
    AbstractManglingParser *Parser;
    size_t OldNumTemplateParamLists;
    TemplateParamList Params;

  public:
    ScopedTemplateParamList(AbstractManglingParser *TheParser)
        : Parser(TheParser),
          OldNumTemplateParamLists(TheParser->TemplateParams.size()) {
      Parser->TemplateParams.push_back(&Params);
    }
    ~ScopedTemplateParamList() {
      assert(Parser->TemplateParams.size() >= OldNumTemplateParamLists);
      Parser->TemplateParams.dropBack(OldNumTemplateParamLists);
    }
  };

  // Template parameter table. Like the above, but referenced like "T42_".
  // This has a smaller size compared to Subs and Names because it can be
  // stored on the stack.
  TemplateParamList OuterTemplateParams;

  // Lists of template parameters indexed by template parameter depth,
  // referenced like "TL2_4_". If nonempty, element 0 is always
  // OuterTemplateParams; inner elements are always template parameter lists of
  // lambda expressions. For a generic lambda with no explicit template
  // parameter list, the corresponding parameter list pointer will be null.
  PODSmallVector<TemplateParamList *, 4> TemplateParams;

  // Set of unresolved forward <template-param> references. These can occur in a
  // conversion operator's type, and are resolved in the enclosing <encoding>.
  PODSmallVector<ForwardTemplateReference *, 4> ForwardTemplateRefs;

  bool TryToParseTemplateArgs = true;
  bool PermitForwardTemplateReferences = false;
  size_t ParsingLambdaParamsAtLevel = (size_t)-1;

  unsigned NumSyntheticTemplateParameters[3] = {};

  Alloc ASTAllocator;

  AbstractManglingParser(const char *First_, const char *Last_)
      : First(First_), Last(Last_) {}

  Derived &getDerived() { return static_cast<Derived &>(*this); }

  void reset(const char *First_, const char *Last_) {
    First = First_;
    Last = Last_;
    Names.clear();
    Subs.clear();
    TemplateParams.clear();
    ParsingLambdaParamsAtLevel = (size_t)-1;
    TryToParseTemplateArgs = true;
    PermitForwardTemplateReferences = false;
    for (int I = 0; I != 3; ++I)
      NumSyntheticTemplateParameters[I] = 0;
    ASTAllocator.reset();
  }

  template <class T, class... Args> Node *make(Args &&... args) {
    return ASTAllocator.template makeNode<T>(std::forward<Args>(args)...);
  }

  template <class It> NodeArray makeNodeArray(It begin, It end) {
    size_t sz = static_cast<size_t>(end - begin);
    void *mem = ASTAllocator.allocateNodeArray(sz);
    Node **data = new (mem) Node *[sz];
    std::copy(begin, end, data);
    return NodeArray(data, sz);
  }

  NodeArray popTrailingNodeArray(size_t FromPosition) {
    assert(FromPosition <= Names.size());
    NodeArray res =
        makeNodeArray(Names.begin() + (long)FromPosition, Names.end());
    Names.dropBack(FromPosition);
    return res;
  }

  bool consumeIf(std::string_view S) {
    if (llvm::itanium_demangle::starts_with(
            std::string_view(First, Last - First), S)) {
      First += S.size();
      return true;
    }
    return false;
  }

  bool consumeIf(char C) {
    if (First != Last && *First == C) {
      ++First;
      return true;
    }
    return false;
  }

  char consume() { return First != Last ? *First++ : '\0'; }

  char look(unsigned Lookahead = 0) const {
    if (static_cast<size_t>(Last - First) <= Lookahead)
      return '\0';
    return First[Lookahead];
  }

  size_t numLeft() const { return static_cast<size_t>(Last - First); }

  std::string_view parseNumber(bool AllowNegative = false);
  Qualifiers parseCVQualifiers();
  bool parsePositiveInteger(size_t *Out);
  std::string_view parseBareSourceName();

  bool parseSeqId(size_t *Out);
  Node *parseSubstitution();
  Node *parseTemplateParam();
  Node *parseTemplateParamDecl();
  Node *parseTemplateArgs(bool TagTemplates = false);
  Node *parseTemplateArg();

  /// Parse the <expr> production.
  Node *parseExpr();
  Node *parsePrefixExpr(std::string_view Kind, Node::Prec Prec);
  Node *parseBinaryExpr(std::string_view Kind, Node::Prec Prec);
  Node *parseIntegerLiteral(std::string_view Lit);
  Node *parseExprPrimary();
  template <class Float> Node *parseFloatingLiteral();
  Node *parseFunctionParam();
  Node *parseConversionExpr();
  Node *parseBracedExpr();
  Node *parseFoldExpr();
  Node *parsePointerToMemberConversionExpr(Node::Prec Prec);
  Node *parseSubobjectExpr();

  /// Parse the <type> production.
  Node *parseType();
  Node *parseFunctionType();
  Node *parseVectorType();
  Node *parseDecltype();
  Node *parseArrayType();
  Node *parsePointerToMemberType();
  Node *parseClassEnumType();
  Node *parseQualifiedType();

  Node *parseEncoding();
  bool parseCallOffset();
  Node *parseSpecialName();

  /// Holds some extra information about a <name> that is being parsed. This
  /// information is only pertinent if the <name> refers to an <encoding>.
  struct NameState {
    bool CtorDtorConversion = false;
    bool EndsWithTemplateArgs = false;
    Qualifiers CVQualifiers = QualNone;
    FunctionRefQual ReferenceQualifier = FrefQualNone;
    size_t ForwardTemplateRefsBegin;

    NameState(AbstractManglingParser *Enclosing)
        : ForwardTemplateRefsBegin(Enclosing->ForwardTemplateRefs.size()) {}
  };

  bool resolveForwardTemplateRefs(NameState &State) {
    size_t I = State.ForwardTemplateRefsBegin;
    size_t E = ForwardTemplateRefs.size();
    for (; I < E; ++I) {
      size_t Idx = ForwardTemplateRefs[I]->Index;
      if (TemplateParams.empty() || !TemplateParams[0] ||
          Idx >= TemplateParams[0]->size())
        return true;
      ForwardTemplateRefs[I]->Ref = (*TemplateParams[0])[Idx];
    }
    ForwardTemplateRefs.dropBack(State.ForwardTemplateRefsBegin);
    return false;
  }

  /// Parse the <name> production>
  Node *parseName(NameState *State = nullptr);
  Node *parseLocalName(NameState *State);
  Node *parseOperatorName(NameState *State);
  bool parseModuleNameOpt(ModuleName *&Module);
  Node *parseUnqualifiedName(NameState *State, Node *Scope, ModuleName *Module);
  Node *parseUnnamedTypeName(NameState *State);
  Node *parseSourceName(NameState *State);
  Node *parseUnscopedName(NameState *State, bool *isSubstName);
  Node *parseNestedName(NameState *State);
  Node *parseCtorDtorName(Node *&SoFar, NameState *State);

  Node *parseAbiTags(Node *N);

  struct OperatorInfo {
    enum OIKind : unsigned char {
      Prefix,      // Prefix unary: @ expr
      Postfix,     // Postfix unary: expr @
      Binary,      // Binary: lhs @ rhs
      Array,       // Array index:  lhs [ rhs ]
      Member,      // Member access: lhs @ rhs
      New,         // New
      Del,         // Delete
      Call,        // Function call: expr (expr*)
      CCast,       // C cast: (type)expr
      Conditional, // Conditional: expr ? expr : expr
      NameOnly,    // Overload only, not allowed in expression.
      // Below do not have operator names
      NamedCast, // Named cast, @<type>(expr)
      OfIdOp,    // alignof, sizeof, typeid

      Unnameable = NamedCast,
    };
    char Enc[2];      // Encoding
    OIKind Kind;      // Kind of operator
    bool Flag : 1;    // Entry-specific flag
    Node::Prec Prec : 7; // Precedence
    const char *Name; // Spelling

  public:
    constexpr OperatorInfo(const char (&E)[3], OIKind K, bool F, Node::Prec P,
                           const char *N)
        : Enc{E[0], E[1]}, Kind{K}, Flag{F}, Prec{P}, Name{N} {}

  public:
    bool operator<(const OperatorInfo &Other) const {
      return *this < Other.Enc;
    }
    bool operator<(const char *Peek) const {
      return Enc[0] < Peek[0] || (Enc[0] == Peek[0] && Enc[1] < Peek[1]);
    }
    bool operator==(const char *Peek) const {
      return Enc[0] == Peek[0] && Enc[1] == Peek[1];
    }
    bool operator!=(const char *Peek) const { return !this->operator==(Peek); }

  public:
    std::string_view getSymbol() const {
      std::string_view Res = Name;
      if (Kind < Unnameable) {
        assert(llvm::itanium_demangle::starts_with(Res, "operator") &&
               "operator name does not start with 'operator'");
        Res.remove_prefix(sizeof("operator") - 1);
        if (llvm::itanium_demangle::starts_with(Res, ' '))
          Res.remove_prefix(1);
      }
      return Res;
    }
    std::string_view getName() const { return Name; }
    OIKind getKind() const { return Kind; }
    bool getFlag() const { return Flag; }
    Node::Prec getPrecedence() const { return Prec; }
  };
  static const OperatorInfo Ops[];
  static const size_t NumOps;
  const OperatorInfo *parseOperatorEncoding();

  /// Parse the <unresolved-name> production.
  Node *parseUnresolvedName(bool Global);
  Node *parseSimpleId();
  Node *parseBaseUnresolvedName();
  Node *parseUnresolvedType();
  Node *parseDestructorName();

  /// Top-level entry point into the parser.
  Node *parse();
};

const char* parse_discriminator(const char* first, const char* last);

// <name> ::= <nested-name> // N
//        ::= <local-name> # See Scope Encoding below  // Z
//        ::= <unscoped-template-name> <template-args>
//        ::= <unscoped-name>
//
// <unscoped-template-name> ::= <unscoped-name>
//                          ::= <substitution>
template <typename Derived, typename Alloc>
Node *AbstractManglingParser<Derived, Alloc>::parseName(NameState *State) {
  if (look() == 'N')
    return getDerived().parseNestedName(State);
  if (look() == 'Z')
    return getDerived().parseLocalName(State);

  Node *Result = nullptr;
  bool IsSubst = false;

  Result = getDerived().parseUnscopedName(State, &IsSubst);
  if (!Result)
    return nullptr;

  if (look() == 'I') {
    //        ::= <unscoped-template-name> <template-args>
    if (!IsSubst)
      // An unscoped-template-name is substitutable.
      Subs.push_back(Result);
    Node *TA = getDerived().parseTemplateArgs(State != nullptr);
    if (TA == nullptr)
      return nullptr;
    if (State)
      State->EndsWithTemplateArgs = true;
    Result = make<NameWithTemplateArgs>(Result, TA);
  } else if (IsSubst) {
    // The substitution case must be followed by <template-args>.
    return nullptr;
  }

  return Result;
}

// <local-name> := Z <function encoding> E <entity name> [<discriminator>]
//              := Z <function encoding> E s [<discriminator>]
//              := Z <function encoding> Ed [ <parameter number> ] _ <entity name>
template <typename Derived, typename Alloc>
Node *AbstractManglingParser<Derived, Alloc>::parseLocalName(NameState *State) {
  if (!consumeIf('Z'))
    return nullptr;
  Node *Encoding = getDerived().parseEncoding();
  if (Encoding == nullptr || !consumeIf('E'))
    return nullptr;

  if (consumeIf('s')) {
    First = parse_discriminator(First, Last);
    auto *StringLitName = make<NameType>("string literal");
    if (!StringLitName)
      return nullptr;
    return make<LocalName>(Encoding, StringLitName);
  }

  if (consumeIf('d')) {
    parseNumber(true);
    if (!consumeIf('_'))
      return nullptr;
    Node *N = getDerived().parseName(State);
    if (N == nullptr)
      return nullptr;
    return make<LocalName>(Encoding, N);
  }

  Node *Entity = getDerived().parseName(State);
  if (Entity == nullptr)
    return nullptr;
  First = parse_discriminator(First, Last);
  return make<LocalName>(Encoding, Entity);
}

// <unscoped-name> ::= <unqualified-name>
//                 ::= St <unqualified-name>   # ::std::
// [*] extension
template <typename Derived, typename Alloc>
Node *
AbstractManglingParser<Derived, Alloc>::parseUnscopedName(NameState *State,
                                                          bool *IsSubst) {

  Node *Std = nullptr;
  if (consumeIf("St")) {
    Std = make<NameType>("std");
    if (Std == nullptr)
      return nullptr;
  }

  Node *Res = nullptr;
  ModuleName *Module = nullptr;
  if (look() == 'S') {
    Node *S = getDerived().parseSubstitution();
    if (!S)
      return nullptr;
    if (S->getKind() == Node::KModuleName)
      Module = static_cast<ModuleName *>(S);
    else if (IsSubst && Std == nullptr) {
      Res = S;
      *IsSubst = true;
    } else {
      return nullptr;
    }
  }

  if (Res == nullptr || Std != nullptr) {
    Res = getDerived().parseUnqualifiedName(State, Std, Module);
  }

  return Res;
}

// <unqualified-name> ::= [<module-name>] L? <operator-name> [<abi-tags>]
//                    ::= [<module-name>] <ctor-dtor-name> [<abi-tags>]
//                    ::= [<module-name>] L? <source-name> [<abi-tags>]
//                    ::= [<module-name>] L? <unnamed-type-name> [<abi-tags>]
//			# structured binding declaration
//                    ::= [<module-name>] L? DC <source-name>+ E
template <typename Derived, typename Alloc>
Node *AbstractManglingParser<Derived, Alloc>::parseUnqualifiedName(
    NameState *State, Node *Scope, ModuleName *Module) {
  if (getDerived().parseModuleNameOpt(Module))
    return nullptr;

  consumeIf('L');

  Node *Result;
  if (look() >= '1' && look() <= '9') {
    Result = getDerived().parseSourceName(State);
  } else if (look() == 'U') {
    Result = getDerived().parseUnnamedTypeName(State);
  } else if (consumeIf("DC")) {
    // Structured binding
    size_t BindingsBegin = Names.size();
    do {
      Node *Binding = getDerived().parseSourceName(State);
      if (Binding == nullptr)
        return nullptr;
      Names.push_back(Binding);
    } while (!consumeIf('E'));
    Result = make<StructuredBindingName>(popTrailingNodeArray(BindingsBegin));
  } else if (look() == 'C' || look() == 'D') {
    // A <ctor-dtor-name>.
    if (Scope == nullptr || Module != nullptr)
      return nullptr;
    Result = getDerived().parseCtorDtorName(Scope, State);
  } else {
    Result = getDerived().parseOperatorName(State);
  }

  if (Result != nullptr && Module != nullptr)
    Result = make<ModuleEntity>(Module, Result);
  if (Result != nullptr)
    Result = getDerived().parseAbiTags(Result);
  if (Result != nullptr && Scope != nullptr)
    Result = make<NestedName>(Scope, Result);

  return Result;
}

// <module-name> ::= <module-subname>
// 	 	 ::= <module-name> <module-subname>
//		 ::= <substitution>  # passed in by caller
// <module-subname> ::= W <source-name>
//		    ::= W P <source-name>
template <typename Derived, typename Alloc>
bool AbstractManglingParser<Derived, Alloc>::parseModuleNameOpt(
    ModuleName *&Module) {
  while (consumeIf('W')) {
    bool IsPartition = consumeIf('P');
    Node *Sub = getDerived().parseSourceName(nullptr);
    if (!Sub)
      return true;
    Module =
        static_cast<ModuleName *>(make<ModuleName>(Module, Sub, IsPartition));
    Subs.push_back(Module);
  }

  return false;
}

// <unnamed-type-name> ::= Ut [<nonnegative number>] _
//                     ::= <closure-type-name>
//
// <closure-type-name> ::= Ul <lambda-sig> E [ <nonnegative number> ] _
//
// <lambda-sig> ::= <parameter type>+  # Parameter types or "v" if the lambda has no parameters
template <typename Derived, typename Alloc>
Node *
AbstractManglingParser<Derived, Alloc>::parseUnnamedTypeName(NameState *State) {
  // <template-params> refer to the innermost <template-args>. Clear out any
  // outer args that we may have inserted into TemplateParams.
  if (State != nullptr)
    TemplateParams.clear();

  if (consumeIf("Ut")) {
    std::string_view Count = parseNumber();
    if (!consumeIf('_'))
      return nullptr;
    return make<UnnamedTypeName>(Count);
  }
  if (consumeIf("Ul")) {
    ScopedOverride<size_t> SwapParams(ParsingLambdaParamsAtLevel,
                                      TemplateParams.size());
    ScopedTemplateParamList LambdaTemplateParams(this);

    size_t ParamsBegin = Names.size();
    while (look() == 'T' &&
           std::string_view("yptn").find(look(1)) != std::string_view::npos) {
      Node *T = parseTemplateParamDecl();
      if (!T)
        return nullptr;
      Names.push_back(T);
    }
    NodeArray TempParams = popTrailingNodeArray(ParamsBegin);

    // FIXME: If TempParams is empty and none of the function parameters
    // includes 'auto', we should remove LambdaTemplateParams from the
    // TemplateParams list. Unfortunately, we don't find out whether there are
    // any 'auto' parameters until too late in an example such as:
    //
    //   template<typename T> void f(
    //       decltype([](decltype([]<typename T>(T v) {}),
    //                   auto) {})) {}
    //   template<typename T> void f(
    //       decltype([](decltype([]<typename T>(T w) {}),
    //                   int) {})) {}
    //
    // Here, the type of v is at level 2 but the type of w is at level 1. We
    // don't find this out until we encounter the type of the next parameter.
    //
    // However, compilers can't actually cope with the former example in
    // practice, and it's likely to be made ill-formed in future, so we don't
    // need to support it here.
    //
    // If we encounter an 'auto' in the function parameter types, we will
    // recreate a template parameter scope for it, but any intervening lambdas
    // will be parsed in the 'wrong' template parameter depth.
    if (TempParams.empty())
      TemplateParams.pop_back();

    if (!consumeIf("vE")) {
      do {
        Node *P = getDerived().parseType();
        if (P == nullptr)
          return nullptr;
        Names.push_back(P);
      } while (!consumeIf('E'));
    }
    NodeArray Params = popTrailingNodeArray(ParamsBegin);

    std::string_view Count = parseNumber();
    if (!consumeIf('_'))
      return nullptr;
    return make<ClosureTypeName>(TempParams, Params, Count);
  }
  if (consumeIf("Ub")) {
    (void)parseNumber();
    if (!consumeIf('_'))
      return nullptr;
    return make<NameType>("'block-literal'");
  }
  return nullptr;
}

// <source-name> ::= <positive length number> <identifier>
template <typename Derived, typename Alloc>
Node *AbstractManglingParser<Derived, Alloc>::parseSourceName(NameState *) {
  size_t Length = 0;
  if (parsePositiveInteger(&Length))
    return nullptr;
  if (numLeft() < Length || Length == 0)
    return nullptr;
  std::string_view Name(First, Length);
  First += Length;
  if (llvm::itanium_demangle::starts_with(Name, "_GLOBAL__N"))
    return make<NameType>("(anonymous namespace)");
  return make<NameType>(Name);
}

// Operator encodings
template <typename Derived, typename Alloc>
const typename AbstractManglingParser<
    Derived, Alloc>::OperatorInfo AbstractManglingParser<Derived,
                                                         Alloc>::Ops[] = {
    // Keep ordered by encoding
    {"aN", OperatorInfo::Binary, false, Node::Prec::Assign, "operator&="},
    {"aS", OperatorInfo::Binary, false, Node::Prec::Assign, "operator="},
    {"aa", OperatorInfo::Binary, false, Node::Prec::AndIf, "operator&&"},
    {"ad", OperatorInfo::Prefix, false, Node::Prec::Unary, "operator&"},
    {"an", OperatorInfo::Binary, false, Node::Prec::And, "operator&"},
    {"at", OperatorInfo::OfIdOp, /*Type*/ true, Node::Prec::Unary, "alignof "},
    {"aw", OperatorInfo::NameOnly, false, Node::Prec::Primary,
     "operator co_await"},
    {"az", OperatorInfo::OfIdOp, /*Type*/ false, Node::Prec::Unary, "alignof "},
    {"cc", OperatorInfo::NamedCast, false, Node::Prec::Postfix, "const_cast"},
    {"cl", OperatorInfo::Call, false, Node::Prec::Postfix, "operator()"},
    {"cm", OperatorInfo::Binary, false, Node::Prec::Comma, "operator,"},
    {"co", OperatorInfo::Prefix, false, Node::Prec::Unary, "operator~"},
    {"cv", OperatorInfo::CCast, false, Node::Prec::Cast, "operator"}, // C Cast
    {"dV", OperatorInfo::Binary, false, Node::Prec::Assign, "operator/="},
    {"da", OperatorInfo::Del, /*Ary*/ true, Node::Prec::Unary,
     "operator delete[]"},
    {"dc", OperatorInfo::NamedCast, false, Node::Prec::Postfix, "dynamic_cast"},
    {"de", OperatorInfo::Prefix, false, Node::Prec::Unary, "operator*"},
    {"dl", OperatorInfo::Del, /*Ary*/ false, Node::Prec::Unary,
     "operator delete"},
    {"ds", OperatorInfo::Member, /*Named*/ false, Node::Prec::PtrMem,
     "operator.*"},
    {"dt", OperatorInfo::Member, /*Named*/ false, Node::Prec::Postfix,
     "operator."},
    {"dv", OperatorInfo::Binary, false, Node::Prec::Assign, "operator/"},
    {"eO", OperatorInfo::Binary, false, Node::Prec::Assign, "operator^="},
    {"eo", OperatorInfo::Binary, false, Node::Prec::Xor, "operator^"},
    {"eq", OperatorInfo::Binary, false, Node::Prec::Equality, "operator=="},
    {"ge", OperatorInfo::Binary, false, Node::Prec::Relational, "operator>="},
    {"gt", OperatorInfo::Binary, false, Node::Prec::Relational, "operator>"},
    {"ix", OperatorInfo::Array, false, Node::Prec::Postfix, "operator[]"},
    {"lS", OperatorInfo::Binary, false, Node::Prec::Assign, "operator<<="},
    {"le", OperatorInfo::Binary, false, Node::Prec::Relational, "operator<="},
    {"ls", OperatorInfo::Binary, false, Node::Prec::Shift, "operator<<"},
    {"lt", OperatorInfo::Binary, false, Node::Prec::Relational, "operator<"},
    {"mI", OperatorInfo::Binary, false, Node::Prec::Assign, "operator-="},
    {"mL", OperatorInfo::Binary, false, Node::Prec::Assign, "operator*="},
    {"mi", OperatorInfo::Binary, false, Node::Prec::Additive, "operator-"},
    {"ml", OperatorInfo::Binary, false, Node::Prec::Multiplicative,
     "operator*"},
    {"mm", OperatorInfo::Postfix, false, Node::Prec::Postfix, "operator--"},
    {"na", OperatorInfo::New, /*Ary*/ true, Node::Prec::Unary,
     "operator new[]"},
    {"ne", OperatorInfo::Binary, false, Node::Prec::Equality, "operator!="},
    {"ng", OperatorInfo::Prefix, false, Node::Prec::Unary, "operator-"},
    {"nt", OperatorInfo::Prefix, false, Node::Prec::Unary, "operator!"},
    {"nw", OperatorInfo::New, /*Ary*/ false, Node::Prec::Unary, "operator new"},
    {"oR", OperatorInfo::Binary, false, Node::Prec::Assign, "operator|="},
    {"oo", OperatorInfo::Binary, false, Node::Prec::OrIf, "operator||"},
    {"or", OperatorInfo::Binary, false, Node::Prec::Ior, "operator|"},
    {"pL", OperatorInfo::Binary, false, Node::Prec::Assign, "operator+="},
    {"pl", OperatorInfo::Binary, false, Node::Prec::Additive, "operator+"},
    {"pm", OperatorInfo::Member, /*Named*/ false, Node::Prec::PtrMem,
     "operator->*"},
    {"pp", OperatorInfo::Postfix, false, Node::Prec::Postfix, "operator++"},
    {"ps", OperatorInfo::Prefix, false, Node::Prec::Unary, "operator+"},
    {"pt", OperatorInfo::Member, /*Named*/ true, Node::Prec::Postfix,
     "operator->"},
    {"qu", OperatorInfo::Conditional, false, Node::Prec::Conditional,
     "operator?"},
    {"rM", OperatorInfo::Binary, false, Node::Prec::Assign, "operator%="},
    {"rS", OperatorInfo::Binary, false, Node::Prec::Assign, "operator>>="},
    {"rc", OperatorInfo::NamedCast, false, Node::Prec::Postfix,
     "reinterpret_cast"},
    {"rm", OperatorInfo::Binary, false, Node::Prec::Multiplicative,
     "operator%"},
    {"rs", OperatorInfo::Binary, false, Node::Prec::Shift, "operator>>"},
    {"sc", OperatorInfo::NamedCast, false, Node::Prec::Postfix, "static_cast"},
    {"ss", OperatorInfo::Binary, false, Node::Prec::Spaceship, "operator<=>"},
    {"st", OperatorInfo::OfIdOp, /*Type*/ true, Node::Prec::Unary, "sizeof "},
    {"sz", OperatorInfo::OfIdOp, /*Type*/ false, Node::Prec::Unary, "sizeof "},
    {"te", OperatorInfo::OfIdOp, /*Type*/ false, Node::Prec::Postfix,
     "typeid "},
    {"ti", OperatorInfo::OfIdOp, /*Type*/ true, Node::Prec::Postfix, "typeid "},
};
template <typename Derived, typename Alloc>
const size_t AbstractManglingParser<Derived, Alloc>::NumOps = sizeof(Ops) /
                                                              sizeof(Ops[0]);

// If the next 2 chars are an operator encoding, consume them and return their
// OperatorInfo.  Otherwise return nullptr.
template <typename Derived, typename Alloc>
const typename AbstractManglingParser<Derived, Alloc>::OperatorInfo *
AbstractManglingParser<Derived, Alloc>::parseOperatorEncoding() {
  if (numLeft() < 2)
    return nullptr;

  // We can't use lower_bound as that can link to symbols in the C++ library,
  // and this must remain independant of that.
  size_t lower = 0u, upper = NumOps - 1; // Inclusive bounds.
  while (upper != lower) {
    size_t middle = (upper + lower) / 2;
    if (Ops[middle] < First)
      lower = middle + 1;
    else
      upper = middle;
  }
  if (Ops[lower] != First)
    return nullptr;

  First += 2;
  return &Ops[lower];
}

//   <operator-name> ::= See parseOperatorEncoding()
//                   ::= li <source-name>  # operator ""
//                   ::= v <digit> <source-name>  # vendor extended operator
template <typename Derived, typename Alloc>
Node *
AbstractManglingParser<Derived, Alloc>::parseOperatorName(NameState *State) {
  if (const auto *Op = parseOperatorEncoding()) {
    if (Op->getKind() == OperatorInfo::CCast) {
      //              ::= cv <type>    # (cast)
      ScopedOverride<bool> SaveTemplate(TryToParseTemplateArgs, false);
      // If we're parsing an encoding, State != nullptr and the conversion
      // operators' <type> could have a <template-param> that refers to some
      // <template-arg>s further ahead in the mangled name.
      ScopedOverride<bool> SavePermit(PermitForwardTemplateReferences,
                                      PermitForwardTemplateReferences ||
                                          State != nullptr);
      Node *Ty = getDerived().parseType();
      if (Ty == nullptr)
        return nullptr;
      if (State) State->CtorDtorConversion = true;
      return make<ConversionOperatorType>(Ty);
    }

    if (Op->getKind() >= OperatorInfo::Unnameable)
      /* Not a nameable operator.  */
      return nullptr;
    if (Op->getKind() == OperatorInfo::Member && !Op->getFlag())
      /* Not a nameable MemberExpr */
      return nullptr;

    return make<NameType>(Op->getName());
  }

  if (consumeIf("li")) {
    //                   ::= li <source-name>  # operator ""
    Node *SN = getDerived().parseSourceName(State);
    if (SN == nullptr)
      return nullptr;
    return make<LiteralOperator>(SN);
  }

  if (consumeIf('v')) {
    // ::= v <digit> <source-name>        # vendor extended operator
    if (look() >= '0' && look() <= '9') {
      First++;
      Node *SN = getDerived().parseSourceName(State);
      if (SN == nullptr)
        return nullptr;
      return make<ConversionOperatorType>(SN);
    }
    return nullptr;
  }

  return nullptr;
}

// <ctor-dtor-name> ::= C1  # complete object constructor
//                  ::= C2  # base object constructor
//                  ::= C3  # complete object allocating constructor
//   extension      ::= C4  # gcc old-style "[unified]" constructor
//   extension      ::= C5  # the COMDAT used for ctors
//                  ::= D0  # deleting destructor
//                  ::= D1  # complete object destructor
//                  ::= D2  # base object destructor
//   extension      ::= D4  # gcc old-style "[unified]" destructor
//   extension      ::= D5  # the COMDAT used for dtors
template <typename Derived, typename Alloc>
Node *
AbstractManglingParser<Derived, Alloc>::parseCtorDtorName(Node *&SoFar,
                                                          NameState *State) {
  if (SoFar->getKind() == Node::KSpecialSubstitution) {
    // Expand the special substitution.
    SoFar = make<ExpandedSpecialSubstitution>(
        static_cast<SpecialSubstitution *>(SoFar));
    if (!SoFar)
      return nullptr;
  }

  if (consumeIf('C')) {
    bool IsInherited = consumeIf('I');
    if (look() != '1' && look() != '2' && look() != '3' && look() != '4' &&
        look() != '5')
      return nullptr;
    int Variant = look() - '0';
    ++First;
    if (State) State->CtorDtorConversion = true;
    if (IsInherited) {
      if (getDerived().parseName(State) == nullptr)
        return nullptr;
    }
    return make<CtorDtorName>(SoFar, /*IsDtor=*/false, Variant);
  }

  if (look() == 'D' && (look(1) == '0' || look(1) == '1' || look(1) == '2' ||
                        look(1) == '4' || look(1) == '5')) {
    int Variant = look(1) - '0';
    First += 2;
    if (State) State->CtorDtorConversion = true;
    return make<CtorDtorName>(SoFar, /*IsDtor=*/true, Variant);
  }

  return nullptr;
}

// <nested-name> ::= N [<CV-Qualifiers>] [<ref-qualifier>] <prefix>
// 			<unqualified-name> E
//               ::= N [<CV-Qualifiers>] [<ref-qualifier>] <template-prefix>
//               	<template-args> E
//
// <prefix> ::= <prefix> <unqualified-name>
//          ::= <template-prefix> <template-args>
//          ::= <template-param>
//          ::= <decltype>
//          ::= # empty
//          ::= <substitution>
//          ::= <prefix> <data-member-prefix>
// [*] extension
//
// <data-member-prefix> := <member source-name> [<template-args>] M
//
// <template-prefix> ::= <prefix> <template unqualified-name>
//                   ::= <template-param>
//                   ::= <substitution>
template <typename Derived, typename Alloc>
Node *
AbstractManglingParser<Derived, Alloc>::parseNestedName(NameState *State) {
  if (!consumeIf('N'))
    return nullptr;

  Qualifiers CVTmp = parseCVQualifiers();
  if (State) State->CVQualifiers = CVTmp;

  if (consumeIf('O')) {
    if (State) State->ReferenceQualifier = FrefQualRValue;
  } else if (consumeIf('R')) {
    if (State) State->ReferenceQualifier = FrefQualLValue;
  } else {
    if (State) State->ReferenceQualifier = FrefQualNone;
  }

  Node *SoFar = nullptr;
  while (!consumeIf('E')) {
    if (State)
      // Only set end-with-template on the case that does that.
      State->EndsWithTemplateArgs = false;

    if (look() == 'T') {
      //          ::= <template-param>
      if (SoFar != nullptr)
        return nullptr; // Cannot have a prefix.
      SoFar = getDerived().parseTemplateParam();
    } else if (look() == 'I') {
      //          ::= <template-prefix> <template-args>
      if (SoFar == nullptr)
        return nullptr; // Must have a prefix.
      Node *TA = getDerived().parseTemplateArgs(State != nullptr);
      if (TA == nullptr)
        return nullptr;
      if (SoFar->getKind() == Node::KNameWithTemplateArgs)
        // Semantically <template-args> <template-args> cannot be generated by a
        // C++ entity.  There will always be [something like] a name between
        // them.
        return nullptr;
      if (State)
        State->EndsWithTemplateArgs = true;
      SoFar = make<NameWithTemplateArgs>(SoFar, TA);
    } else if (look() == 'D' && (look(1) == 't' || look(1) == 'T')) {
      //          ::= <decltype>
      if (SoFar != nullptr)
        return nullptr; // Cannot have a prefix.
      SoFar = getDerived().parseDecltype();
    } else {
      ModuleName *Module = nullptr;

      if (look() == 'S') {
        //          ::= <substitution>
        Node *S = nullptr;
        if (look(1) == 't') {
          First += 2;
          S = make<NameType>("std");
        } else {
          S = getDerived().parseSubstitution();
        }
        if (!S)
          return nullptr;
        if (S->getKind() == Node::KModuleName) {
          Module = static_cast<ModuleName *>(S);
        } else if (SoFar != nullptr) {
          return nullptr; // Cannot have a prefix.
        } else {
          SoFar = S;
          continue; // Do not push a new substitution.
        }
      }

      //          ::= [<prefix>] <unqualified-name>
      SoFar = getDerived().parseUnqualifiedName(State, SoFar, Module);
    }

    if (SoFar == nullptr)
      return nullptr;
    Subs.push_back(SoFar);

    // No longer used.
    // <data-member-prefix> := <member source-name> [<template-args>] M
    consumeIf('M');
  }

  if (SoFar == nullptr || Subs.empty())
    return nullptr;

  Subs.pop_back();
  return SoFar;
}

// <simple-id> ::= <source-name> [ <template-args> ]
template <typename Derived, typename Alloc>
Node *AbstractManglingParser<Derived, Alloc>::parseSimpleId() {
  Node *SN = getDerived().parseSourceName(/*NameState=*/nullptr);
  if (SN == nullptr)
    return nullptr;
  if (look() == 'I') {
    Node *TA = getDerived().parseTemplateArgs();
    if (TA == nullptr)
      return nullptr;
    return make<NameWithTemplateArgs>(SN, TA);
  }
  return SN;
}

// <destructor-name> ::= <unresolved-type>  # e.g., ~T or ~decltype(f())
//                   ::= <simple-id>        # e.g., ~A<2*N>
template <typename Derived, typename Alloc>
Node *AbstractManglingParser<Derived, Alloc>::parseDestructorName() {
  Node *Result;
  if (std::isdigit(look()))
    Result = getDerived().parseSimpleId();
  else
    Result = getDerived().parseUnresolvedType();
  if (Result == nullptr)
    return nullptr;
  return make<DtorName>(Result);
}

// <unresolved-type> ::= <template-param>
//                   ::= <decltype>
//                   ::= <substitution>
template <typename Derived, typename Alloc>
Node *AbstractManglingParser<Derived, Alloc>::parseUnresolvedType() {
  if (look() == 'T') {
    Node *TP = getDerived().parseTemplateParam();
    if (TP == nullptr)
      return nullptr;
    Subs.push_back(TP);
    return TP;
  }
  if (look() == 'D') {
    Node *DT = getDerived().parseDecltype();
    if (DT == nullptr)
      return nullptr;
    Subs.push_back(DT);
    return DT;
  }
  return getDerived().parseSubstitution();
}

// <base-unresolved-name> ::= <simple-id>                                # unresolved name
//          extension     ::= <operator-name>                            # unresolved operator-function-id
//          extension     ::= <operator-name> <template-args>            # unresolved operator template-id
//                        ::= on <operator-name>                         # unresolved operator-function-id
//                        ::= on <operator-name> <template-args>         # unresolved operator template-id
//                        ::= dn <destructor-name>                       # destructor or pseudo-destructor;
//                                                                         # e.g. ~X or ~X<N-1>
template <typename Derived, typename Alloc>
Node *AbstractManglingParser<Derived, Alloc>::parseBaseUnresolvedName() {
  if (std::isdigit(look()))
    return getDerived().parseSimpleId();

  if (consumeIf("dn"))
    return getDerived().parseDestructorName();

  consumeIf("on");

  Node *Oper = getDerived().parseOperatorName(/*NameState=*/nullptr);
  if (Oper == nullptr)
    return nullptr;
  if (look() == 'I') {
    Node *TA = getDerived().parseTemplateArgs();
    if (TA == nullptr)
      return nullptr;
    return make<NameWithTemplateArgs>(Oper, TA);
  }
  return Oper;
}

// <unresolved-name>
//  extension        ::= srN <unresolved-type> [<template-args>] <unresolved-qualifier-level>* E <base-unresolved-name>
//                   ::= [gs] <base-unresolved-name>                     # x or (with "gs") ::x
//                   ::= [gs] sr <unresolved-qualifier-level>+ E <base-unresolved-name>
//                                                                       # A::x, N::y, A<T>::z; "gs" means leading "::"
// [gs] has been parsed by caller.
//                   ::= sr <unresolved-type> <base-unresolved-name>     # T::x / decltype(p)::x
//  extension        ::= sr <unresolved-type> <template-args> <base-unresolved-name>
//                                                                       # T::N::x /decltype(p)::N::x
//  (ignored)        ::= srN <unresolved-type>  <unresolved-qualifier-level>+ E <base-unresolved-name>
//
// <unresolved-qualifier-level> ::= <simple-id>
template <typename Derived, typename Alloc>
Node *AbstractManglingParser<Derived, Alloc>::parseUnresolvedName(bool Global) {
  Node *SoFar = nullptr;

  // srN <unresolved-type> [<template-args>] <unresolved-qualifier-level>* E <base-unresolved-name>
  // srN <unresolved-type>                   <unresolved-qualifier-level>+ E <base-unresolved-name>
  if (consumeIf("srN")) {
    SoFar = getDerived().parseUnresolvedType();
    if (SoFar == nullptr)
      return nullptr;

    if (look() == 'I') {
      Node *TA = getDerived().parseTemplateArgs();
      if (TA == nullptr)
        return nullptr;
      SoFar = make<NameWithTemplateArgs>(SoFar, TA);
      if (!SoFar)
        return nullptr;
    }

    while (!consumeIf('E')) {
      Node *Qual = getDerived().parseSimpleId();
      if (Qual == nullptr)
        return nullptr;
      SoFar = make<QualifiedName>(SoFar, Qual);
      if (!SoFar)
        return nullptr;
    }

    Node *Base = getDerived().parseBaseUnresolvedName();
    if (Base == nullptr)
      return nullptr;
    return make<QualifiedName>(SoFar, Base);
  }

  // [gs] <base-unresolved-name>                     # x or (with "gs") ::x
  if (!consumeIf("sr")) {
    SoFar = getDerived().parseBaseUnresolvedName();
    if (SoFar == nullptr)
      return nullptr;
    if (Global)
      SoFar = make<GlobalQualifiedName>(SoFar);
    return SoFar;
  }

  // [gs] sr <unresolved-qualifier-level>+ E   <base-unresolved-name>
  if (std::isdigit(look())) {
    do {
      Node *Qual = getDerived().parseSimpleId();
      if (Qual == nullptr)
        return nullptr;
      if (SoFar)
        SoFar = make<QualifiedName>(SoFar, Qual);
      else if (Global)
        SoFar = make<GlobalQualifiedName>(Qual);
      else
        SoFar = Qual;
      if (!SoFar)
        return nullptr;
    } while (!consumeIf('E'));
  }
  //      sr <unresolved-type>                 <base-unresolved-name>
  //      sr <unresolved-type> <template-args> <base-unresolved-name>
  else {
    SoFar = getDerived().parseUnresolvedType();
    if (SoFar == nullptr)
      return nullptr;

    if (look() == 'I') {
      Node *TA = getDerived().parseTemplateArgs();
      if (TA == nullptr)
        return nullptr;
      SoFar = make<NameWithTemplateArgs>(SoFar, TA);
      if (!SoFar)
        return nullptr;
    }
  }

  assert(SoFar != nullptr);

  Node *Base = getDerived().parseBaseUnresolvedName();
  if (Base == nullptr)
    return nullptr;
  return make<QualifiedName>(SoFar, Base);
}

// <abi-tags> ::= <abi-tag> [<abi-tags>]
// <abi-tag> ::= B <source-name>
template <typename Derived, typename Alloc>
Node *AbstractManglingParser<Derived, Alloc>::parseAbiTags(Node *N) {
  while (consumeIf('B')) {
    std::string_view SN = parseBareSourceName();
    if (SN.empty())
      return nullptr;
    N = make<AbiTagAttr>(N, SN);
    if (!N)
      return nullptr;
  }
  return N;
}

// <number> ::= [n] <non-negative decimal integer>
template <typename Alloc, typename Derived>
std::string_view
AbstractManglingParser<Alloc, Derived>::parseNumber(bool AllowNegative) {
  const char *Tmp = First;
  if (AllowNegative)
    consumeIf('n');
  if (numLeft() == 0 || !std::isdigit(*First))
    return std::string_view();
  while (numLeft() != 0 && std::isdigit(*First))
    ++First;
  return std::string_view(Tmp, First - Tmp);
}

// <positive length number> ::= [0-9]*
template <typename Alloc, typename Derived>
bool AbstractManglingParser<Alloc, Derived>::parsePositiveInteger(size_t *Out) {
  *Out = 0;
  if (look() < '0' || look() > '9')
    return true;
  while (look() >= '0' && look() <= '9') {
    *Out *= 10;
    *Out += static_cast<size_t>(consume() - '0');
  }
  return false;
}

template <typename Alloc, typename Derived>
std::string_view AbstractManglingParser<Alloc, Derived>::parseBareSourceName() {
  size_t Int = 0;
  if (parsePositiveInteger(&Int) || numLeft() < Int)
    return {};
  std::string_view R(First, Int);
  First += Int;
  return R;
}

// <function-type> ::= [<CV-qualifiers>] [<exception-spec>] [Dx] F [Y] <bare-function-type> [<ref-qualifier>] E
//
// <exception-spec> ::= Do                # non-throwing exception-specification (e.g., noexcept, throw())
//                  ::= DO <expression> E # computed (instantiation-dependent) noexcept
//                  ::= Dw <type>+ E      # dynamic exception specification with instantiation-dependent types
//
// <ref-qualifier> ::= R                   # & ref-qualifier
// <ref-qualifier> ::= O                   # && ref-qualifier
template <typename Derived, typename Alloc>
Node *AbstractManglingParser<Derived, Alloc>::parseFunctionType() {
  Qualifiers CVQuals = parseCVQualifiers();

  Node *ExceptionSpec = nullptr;
  if (consumeIf("Do")) {
    ExceptionSpec = make<NameType>("noexcept");
    if (!ExceptionSpec)
      return nullptr;
  } else if (consumeIf("DO")) {
    Node *E = getDerived().parseExpr();
    if (E == nullptr || !consumeIf('E'))
      return nullptr;
    ExceptionSpec = make<NoexceptSpec>(E);
    if (!ExceptionSpec)
      return nullptr;
  } else if (consumeIf("Dw")) {
    size_t SpecsBegin = Names.size();
    while (!consumeIf('E')) {
      Node *T = getDerived().parseType();
      if (T == nullptr)
        return nullptr;
      Names.push_back(T);
    }
    ExceptionSpec =
      make<DynamicExceptionSpec>(popTrailingNodeArray(SpecsBegin));
    if (!ExceptionSpec)
      return nullptr;
  }

  consumeIf("Dx"); // transaction safe

  if (!consumeIf('F'))
    return nullptr;
  consumeIf('Y'); // extern "C"
  Node *ReturnType = getDerived().parseType();
  if (ReturnType == nullptr)
    return nullptr;

  FunctionRefQual ReferenceQualifier = FrefQualNone;
  size_t ParamsBegin = Names.size();
  while (true) {
    if (consumeIf('E'))
      break;
    if (consumeIf('v'))
      continue;
    if (consumeIf("RE")) {
      ReferenceQualifier = FrefQualLValue;
      break;
    }
    if (consumeIf("OE")) {
      ReferenceQualifier = FrefQualRValue;
      break;
    }
    Node *T = getDerived().parseType();
    if (T == nullptr)
      return nullptr;
    Names.push_back(T);
  }

  NodeArray Params = popTrailingNodeArray(ParamsBegin);
  return make<FunctionType>(ReturnType, Params, CVQuals,
                            ReferenceQualifier, ExceptionSpec);
}

// extension:
// <vector-type>           ::= Dv <positive dimension number> _ <extended element type>
//                         ::= Dv [<dimension expression>] _ <element type>
// <extended element type> ::= <element type>
//                         ::= p # AltiVec vector pixel
template <typename Derived, typename Alloc>
Node *AbstractManglingParser<Derived, Alloc>::parseVectorType() {
  if (!consumeIf("Dv"))
    return nullptr;
  if (look() >= '1' && look() <= '9') {
    Node *DimensionNumber = make<NameType>(parseNumber());
    if (!DimensionNumber)
      return nullptr;
    if (!consumeIf('_'))
      return nullptr;
    if (consumeIf('p'))
      return make<PixelVectorType>(DimensionNumber);
    Node *ElemType = getDerived().parseType();
    if (ElemType == nullptr)
      return nullptr;
    return make<VectorType>(ElemType, DimensionNumber);
  }

  if (!consumeIf('_')) {
    Node *DimExpr = getDerived().parseExpr();
    if (!DimExpr)
      return nullptr;
    if (!consumeIf('_'))
      return nullptr;
    Node *ElemType = getDerived().parseType();
    if (!ElemType)
      return nullptr;
    return make<VectorType>(ElemType, DimExpr);
  }
  Node *ElemType = getDerived().parseType();
  if (!ElemType)
    return nullptr;
  return make<VectorType>(ElemType, /*Dimension=*/nullptr);
}

// <decltype>  ::= Dt <expression> E  # decltype of an id-expression or class member access (C++0x)
//             ::= DT <expression> E  # decltype of an expression (C++0x)
template <typename Derived, typename Alloc>
Node *AbstractManglingParser<Derived, Alloc>::parseDecltype() {
  if (!consumeIf('D'))
    return nullptr;
  if (!consumeIf('t') && !consumeIf('T'))
    return nullptr;
  Node *E = getDerived().parseExpr();
  if (E == nullptr)
    return nullptr;
  if (!consumeIf('E'))
    return nullptr;
  return make<EnclosingExpr>("decltype", E);
}

// <array-type> ::= A <positive dimension number> _ <element type>
//              ::= A [<dimension expression>] _ <element type>
template <typename Derived, typename Alloc>
Node *AbstractManglingParser<Derived, Alloc>::parseArrayType() {
  if (!consumeIf('A'))
    return nullptr;

  Node *Dimension = nullptr;

  if (std::isdigit(look())) {
    Dimension = make<NameType>(parseNumber());
    if (!Dimension)
      return nullptr;
    if (!consumeIf('_'))
      return nullptr;
  } else if (!consumeIf('_')) {
    Node *DimExpr = getDerived().parseExpr();
    if (DimExpr == nullptr)
      return nullptr;
    if (!consumeIf('_'))
      return nullptr;
    Dimension = DimExpr;
  }

  Node *Ty = getDerived().parseType();
  if (Ty == nullptr)
    return nullptr;
  return make<ArrayType>(Ty, Dimension);
}

// <pointer-to-member-type> ::= M <class type> <member type>
template <typename Derived, typename Alloc>
Node *AbstractManglingParser<Derived, Alloc>::parsePointerToMemberType() {
  if (!consumeIf('M'))
    return nullptr;
  Node *ClassType = getDerived().parseType();
  if (ClassType == nullptr)
    return nullptr;
  Node *MemberType = getDerived().parseType();
  if (MemberType == nullptr)
    return nullptr;
  return make<PointerToMemberType>(ClassType, MemberType);
}

// <class-enum-type> ::= <name>     # non-dependent type name, dependent type name, or dependent typename-specifier
//                   ::= Ts <name>  # dependent elaborated type specifier using 'struct' or 'class'
//                   ::= Tu <name>  # dependent elaborated type specifier using 'union'
//                   ::= Te <name>  # dependent elaborated type specifier using 'enum'
template <typename Derived, typename Alloc>
Node *AbstractManglingParser<Derived, Alloc>::parseClassEnumType() {
  std::string_view ElabSpef;
  if (consumeIf("Ts"))
    ElabSpef = "struct";
  else if (consumeIf("Tu"))
    ElabSpef = "union";
  else if (consumeIf("Te"))
    ElabSpef = "enum";

  Node *Name = getDerived().parseName();
  if (Name == nullptr)
    return nullptr;

  if (!ElabSpef.empty())
    return make<ElaboratedTypeSpefType>(ElabSpef, Name);

  return Name;
}

// <qualified-type>     ::= <qualifiers> <type>
// <qualifiers> ::= <extended-qualifier>* <CV-qualifiers>
// <extended-qualifier> ::= U <source-name> [<template-args>] # vendor extended type qualifier
template <typename Derived, typename Alloc>
Node *AbstractManglingParser<Derived, Alloc>::parseQualifiedType() {
  if (consumeIf('U')) {
    std::string_view Qual = parseBareSourceName();
    if (Qual.empty())
      return nullptr;

    // extension            ::= U <objc-name> <objc-type>  # objc-type<identifier>
    if (llvm::itanium_demangle::starts_with(Qual, "objcproto")) {
      constexpr size_t Len = sizeof("objcproto") - 1;
      std::string_view ProtoSourceName(Qual.data() + Len, Qual.size() - Len);
      std::string_view Proto;
      {
        ScopedOverride<const char *> SaveFirst(First, ProtoSourceName.data()),
            SaveLast(Last, &*ProtoSourceName.rbegin() + 1);
        Proto = parseBareSourceName();
      }
      if (Proto.empty())
        return nullptr;
      Node *Child = getDerived().parseQualifiedType();
      if (Child == nullptr)
        return nullptr;
      return make<ObjCProtoName>(Child, Proto);
    }

    Node *TA = nullptr;
    if (look() == 'I') {
      TA = getDerived().parseTemplateArgs();
      if (TA == nullptr)
        return nullptr;
    }

    Node *Child = getDerived().parseQualifiedType();
    if (Child == nullptr)
      return nullptr;
    return make<VendorExtQualType>(Child, Qual, TA);
  }

  Qualifiers Quals = parseCVQualifiers();
  Node *Ty = getDerived().parseType();
  if (Ty == nullptr)
    return nullptr;
  if (Quals != QualNone)
    Ty = make<QualType>(Ty, Quals);
  return Ty;
}

// <type>      ::= <builtin-type>
//             ::= <qualified-type>
//             ::= <function-type>
//             ::= <class-enum-type>
//             ::= <array-type>
//             ::= <pointer-to-member-type>
//             ::= <template-param>
//             ::= <template-template-param> <template-args>
//             ::= <decltype>
//             ::= P <type>        # pointer
//             ::= R <type>        # l-value reference
//             ::= O <type>        # r-value reference (C++11)
//             ::= C <type>        # complex pair (C99)
//             ::= G <type>        # imaginary (C99)
//             ::= <substitution>  # See Compression below
// extension   ::= U <objc-name> <objc-type>  # objc-type<identifier>
// extension   ::= <vector-type> # <vector-type> starts with Dv
//
// <objc-name> ::= <k0 number> objcproto <k1 number> <identifier>  # k0 = 9 + <number of digits in k1> + k1
// <objc-type> ::= <source-name>  # PU<11+>objcproto 11objc_object<source-name> 11objc_object -> id<source-name>
template <typename Derived, typename Alloc>
Node *AbstractManglingParser<Derived, Alloc>::parseType() {
  Node *Result = nullptr;

  switch (look()) {
  //             ::= <qualified-type>
  case 'r':
  case 'V':
  case 'K': {
    unsigned AfterQuals = 0;
    if (look(AfterQuals) == 'r') ++AfterQuals;
    if (look(AfterQuals) == 'V') ++AfterQuals;
    if (look(AfterQuals) == 'K') ++AfterQuals;

    if (look(AfterQuals) == 'F' ||
        (look(AfterQuals) == 'D' &&
         (look(AfterQuals + 1) == 'o' || look(AfterQuals + 1) == 'O' ||
          look(AfterQuals + 1) == 'w' || look(AfterQuals + 1) == 'x'))) {
      Result = getDerived().parseFunctionType();
      break;
    }
    DEMANGLE_FALLTHROUGH;
  }
  case 'U': {
    Result = getDerived().parseQualifiedType();
    break;
  }
  // <builtin-type> ::= v    # void
  case 'v':
    ++First;
    return make<NameType>("void");
  //                ::= w    # wchar_t
  case 'w':
    ++First;
    return make<NameType>("wchar_t");
  //                ::= b    # bool
  case 'b':
    ++First;
    return make<NameType>("bool");
  //                ::= c    # char
  case 'c':
    ++First;
    return make<NameType>("char");
  //                ::= a    # signed char
  case 'a':
    ++First;
    return make<NameType>("signed char");
  //                ::= h    # unsigned char
  case 'h':
    ++First;
    return make<NameType>("unsigned char");
  //                ::= s    # short
  case 's':
    ++First;
    return make<NameType>("short");
  //                ::= t    # unsigned short
  case 't':
    ++First;
    return make<NameType>("unsigned short");
  //                ::= i    # int
  case 'i':
    ++First;
    return make<NameType>("int");
  //                ::= j    # unsigned int
  case 'j':
    ++First;
    return make<NameType>("unsigned int");
  //                ::= l    # long
  case 'l':
    ++First;
    return make<NameType>("long");
  //                ::= m    # unsigned long
  case 'm':
    ++First;
    return make<NameType>("unsigned long");
  //                ::= x    # long long, __int64
  case 'x':
    ++First;
    return make<NameType>("long long");
  //                ::= y    # unsigned long long, __int64
  case 'y':
    ++First;
    return make<NameType>("unsigned long long");
  //                ::= n    # __int128
  case 'n':
    ++First;
    return make<NameType>("__int128");
  //                ::= o    # unsigned __int128
  case 'o':
    ++First;
    return make<NameType>("unsigned __int128");
  //                ::= f    # float
  case 'f':
    ++First;
    return make<NameType>("float");
  //                ::= d    # double
  case 'd':
    ++First;
    return make<NameType>("double");
  //                ::= e    # long double, __float80
  case 'e':
    ++First;
    return make<NameType>("long double");
  //                ::= g    # __float128
  case 'g':
    ++First;
    return make<NameType>("__float128");
  //                ::= z    # ellipsis
  case 'z':
    ++First;
    return make<NameType>("...");

  // <builtin-type> ::= u <source-name>    # vendor extended type
  case 'u': {
    ++First;
    std::string_view Res = parseBareSourceName();
    if (Res.empty())
      return nullptr;
    // Typically, <builtin-type>s are not considered substitution candidates,
    // but the exception to that exception is vendor extended types (Itanium C++
    // ABI 5.9.1).
    Result = make<NameType>(Res);
    break;
  }
  case 'D':
    switch (look(1)) {
    //                ::= Dd   # IEEE 754r decimal floating point (64 bits)
    case 'd':
      First += 2;
      return make<NameType>("decimal64");
    //                ::= De   # IEEE 754r decimal floating point (128 bits)
    case 'e':
      First += 2;
      return make<NameType>("decimal128");
    //                ::= Df   # IEEE 754r decimal floating point (32 bits)
    case 'f':
      First += 2;
      return make<NameType>("decimal32");
    //                ::= Dh   # IEEE 754r half-precision floating point (16 bits)
    case 'h':
      First += 2;
      return make<NameType>("half");
    //                ::= DF <number> _ # ISO/IEC TS 18661 binary floating point (N bits)
    case 'F': {
      First += 2;
      Node *DimensionNumber = make<NameType>(parseNumber());
      if (!DimensionNumber)
        return nullptr;
      if (!consumeIf('_'))
        return nullptr;
      return make<BinaryFPType>(DimensionNumber);
    }
    //                ::= DB <number> _                             # C23 signed _BitInt(N)
    //                ::= DB <instantiation-dependent expression> _ # C23 signed _BitInt(N)
    //                ::= DU <number> _                             # C23 unsigned _BitInt(N)
    //                ::= DU <instantiation-dependent expression> _ # C23 unsigned _BitInt(N)
    case 'B':
    case 'U': {
      bool Signed = look(1) == 'B';
      First += 2;
      Node *Size = std::isdigit(look()) ? make<NameType>(parseNumber())
                                        : getDerived().parseExpr();
      if (!Size)
        return nullptr;
      if (!consumeIf('_'))
        return nullptr;
      return make<BitIntType>(Size, Signed);
    }
    //                ::= Di   # char32_t
    case 'i':
      First += 2;
      return make<NameType>("char32_t");
    //                ::= Ds   # char16_t
    case 's':
      First += 2;
      return make<NameType>("char16_t");
    //                ::= Du   # char8_t (C++2a, not yet in the Itanium spec)
    case 'u':
      First += 2;
      return make<NameType>("char8_t");
    //                ::= Da   # auto (in dependent new-expressions)
    case 'a':
      First += 2;
      return make<NameType>("auto");
    //                ::= Dc   # decltype(auto)
    case 'c':
      First += 2;
      return make<NameType>("decltype(auto)");
    //                ::= Dn   # std::nullptr_t (i.e., decltype(nullptr))
    case 'n':
      First += 2;
      return make<NameType>("std::nullptr_t");

    //             ::= <decltype>
    case 't':
    case 'T': {
      Result = getDerived().parseDecltype();
      break;
    }
    // extension   ::= <vector-type> # <vector-type> starts with Dv
    case 'v': {
      Result = getDerived().parseVectorType();
      break;
    }
    //           ::= Dp <type>       # pack expansion (C++0x)
    case 'p': {
      First += 2;
      Node *Child = getDerived().parseType();
      if (!Child)
        return nullptr;
      Result = make<ParameterPackExpansion>(Child);
      break;
    }
    // Exception specifier on a function type.
    case 'o':
    case 'O':
    case 'w':
    // Transaction safe function type.
    case 'x':
      Result = getDerived().parseFunctionType();
      break;
    }
    break;
  //             ::= <function-type>
  case 'F': {
    Result = getDerived().parseFunctionType();
    break;
  }
  //             ::= <array-type>
  case 'A': {
    Result = getDerived().parseArrayType();
    break;
  }
  //             ::= <pointer-to-member-type>
  case 'M': {
    Result = getDerived().parsePointerToMemberType();
    break;
  }
  //             ::= <template-param>
  case 'T': {
    // This could be an elaborate type specifier on a <class-enum-type>.
    if (look(1) == 's' || look(1) == 'u' || look(1) == 'e') {
      Result = getDerived().parseClassEnumType();
      break;
    }

    Result = getDerived().parseTemplateParam();
    if (Result == nullptr)
      return nullptr;

    // Result could be either of:
    //   <type>        ::= <template-param>
    //   <type>        ::= <template-template-param> <template-args>
    //
    //   <template-template-param> ::= <template-param>
    //                             ::= <substitution>
    //
    // If this is followed by some <template-args>, and we're permitted to
    // parse them, take the second production.

    if (TryToParseTemplateArgs && look() == 'I') {
      Node *TA = getDerived().parseTemplateArgs();
      if (TA == nullptr)
        return nullptr;
      Result = make<NameWithTemplateArgs>(Result, TA);
    }
    break;
  }
  //             ::= P <type>        # pointer
  case 'P': {
    ++First;
    Node *Ptr = getDerived().parseType();
    if (Ptr == nullptr)
      return nullptr;
    Result = make<PointerType>(Ptr);
    break;
  }
  //             ::= R <type>        # l-value reference
  case 'R': {
    ++First;
    Node *Ref = getDerived().parseType();
    if (Ref == nullptr)
      return nullptr;
    Result = make<ReferenceType>(Ref, ReferenceKind::LValue);
    break;
  }
  //             ::= O <type>        # r-value reference (C++11)
  case 'O': {
    ++First;
    Node *Ref = getDerived().parseType();
    if (Ref == nullptr)
      return nullptr;
    Result = make<ReferenceType>(Ref, ReferenceKind::RValue);
    break;
  }
  //             ::= C <type>        # complex pair (C99)
  case 'C': {
    ++First;
    Node *P = getDerived().parseType();
    if (P == nullptr)
      return nullptr;
    Result = make<PostfixQualifiedType>(P, " complex");
    break;
  }
  //             ::= G <type>        # imaginary (C99)
  case 'G': {
    ++First;
    Node *P = getDerived().parseType();
    if (P == nullptr)
      return P;
    Result = make<PostfixQualifiedType>(P, " imaginary");
    break;
  }
  //             ::= <substitution>  # See Compression below
  case 'S': {
    if (look(1) != 't') {
      bool IsSubst = false;
      Result = getDerived().parseUnscopedName(nullptr, &IsSubst);
      if (!Result)
        return nullptr;

      // Sub could be either of:
      //   <type>        ::= <substitution>
      //   <type>        ::= <template-template-param> <template-args>
      //
      //   <template-template-param> ::= <template-param>
      //                             ::= <substitution>
      //
      // If this is followed by some <template-args>, and we're permitted to
      // parse them, take the second production.

      if (look() == 'I' && (!IsSubst || TryToParseTemplateArgs)) {
        if (!IsSubst)
          Subs.push_back(Result);
        Node *TA = getDerived().parseTemplateArgs();
        if (TA == nullptr)
          return nullptr;
        Result = make<NameWithTemplateArgs>(Result, TA);
      } else if (IsSubst) {
        // If all we parsed was a substitution, don't re-insert into the
        // substitution table.
        return Result;
      }
      break;
    }
    DEMANGLE_FALLTHROUGH;
  }
  //        ::= <class-enum-type>
  default: {
    Result = getDerived().parseClassEnumType();
    break;
  }
  }

  // If we parsed a type, insert it into the substitution table. Note that all
  // <builtin-type>s and <substitution>s have already bailed out, because they
  // don't get substitutions.
  if (Result != nullptr)
    Subs.push_back(Result);
  return Result;
}

template <typename Derived, typename Alloc>
Node *
AbstractManglingParser<Derived, Alloc>::parsePrefixExpr(std::string_view Kind,
                                                        Node::Prec Prec) {
  Node *E = getDerived().parseExpr();
  if (E == nullptr)
    return nullptr;
  return make<PrefixExpr>(Kind, E, Prec);
}

template <typename Derived, typename Alloc>
Node *
AbstractManglingParser<Derived, Alloc>::parseBinaryExpr(std::string_view Kind,
                                                        Node::Prec Prec) {
  Node *LHS = getDerived().parseExpr();
  if (LHS == nullptr)
    return nullptr;
  Node *RHS = getDerived().parseExpr();
  if (RHS == nullptr)
    return nullptr;
  return make<BinaryExpr>(LHS, Kind, RHS, Prec);
}

template <typename Derived, typename Alloc>
Node *AbstractManglingParser<Derived, Alloc>::parseIntegerLiteral(
    std::string_view Lit) {
  std::string_view Tmp = parseNumber(true);
  if (!Tmp.empty() && consumeIf('E'))
    return make<IntegerLiteral>(Lit, Tmp);
  return nullptr;
}

// <CV-Qualifiers> ::= [r] [V] [K]
template <typename Alloc, typename Derived>
Qualifiers AbstractManglingParser<Alloc, Derived>::parseCVQualifiers() {
  Qualifiers CVR = QualNone;
  if (consumeIf('r'))
    CVR |= QualRestrict;
  if (consumeIf('V'))
    CVR |= QualVolatile;
  if (consumeIf('K'))
    CVR |= QualConst;
  return CVR;
}

// <function-param> ::= fp <top-level CV-Qualifiers> _                                     # L == 0, first parameter
//                  ::= fp <top-level CV-Qualifiers> <parameter-2 non-negative number> _   # L == 0, second and later parameters
//                  ::= fL <L-1 non-negative number> p <top-level CV-Qualifiers> _         # L > 0, first parameter
//                  ::= fL <L-1 non-negative number> p <top-level CV-Qualifiers> <parameter-2 non-negative number> _   # L > 0, second and later parameters
//                  ::= fpT      # 'this' expression (not part of standard?)
template <typename Derived, typename Alloc>
Node *AbstractManglingParser<Derived, Alloc>::parseFunctionParam() {
  if (consumeIf("fpT"))
    return make<NameType>("this");
  if (consumeIf("fp")) {
    parseCVQualifiers();
    std::string_view Num = parseNumber();
    if (!consumeIf('_'))
      return nullptr;
    return make<FunctionParam>(Num);
  }
  if (consumeIf("fL")) {
    if (parseNumber().empty())
      return nullptr;
    if (!consumeIf('p'))
      return nullptr;
    parseCVQualifiers();
    std::string_view Num = parseNumber();
    if (!consumeIf('_'))
      return nullptr;
    return make<FunctionParam>(Num);
  }
  return nullptr;
}

// cv <type> <expression>                               # conversion with one argument
// cv <type> _ <expression>* E                          # conversion with a different number of arguments
template <typename Derived, typename Alloc>
Node *AbstractManglingParser<Derived, Alloc>::parseConversionExpr() {
  if (!consumeIf("cv"))
    return nullptr;
  Node *Ty;
  {
    ScopedOverride<bool> SaveTemp(TryToParseTemplateArgs, false);
    Ty = getDerived().parseType();
  }

  if (Ty == nullptr)
    return nullptr;

  if (consumeIf('_')) {
    size_t ExprsBegin = Names.size();
    while (!consumeIf('E')) {
      Node *E = getDerived().parseExpr();
      if (E == nullptr)
        return E;
      Names.push_back(E);
    }
    NodeArray Exprs = popTrailingNodeArray(ExprsBegin);
    return make<ConversionExpr>(Ty, Exprs);
  }

  Node *E[1] = {getDerived().parseExpr()};
  if (E[0] == nullptr)
    return nullptr;
  return make<ConversionExpr>(Ty, makeNodeArray(E, E + 1));
}

// <expr-primary> ::= L <type> <value number> E                          # integer literal
//                ::= L <type> <value float> E                           # floating literal
//                ::= L <string type> E                                  # string literal
//                ::= L <nullptr type> E                                 # nullptr literal (i.e., "LDnE")
//                ::= L <lambda type> E                                  # lambda expression
// FIXME:         ::= L <type> <real-part float> _ <imag-part float> E   # complex floating point literal (C 2000)
//                ::= L <mangled-name> E                                 # external name
template <typename Derived, typename Alloc>
Node *AbstractManglingParser<Derived, Alloc>::parseExprPrimary() {
  if (!consumeIf('L'))
    return nullptr;
  switch (look()) {
  case 'w':
    ++First;
    return getDerived().parseIntegerLiteral("wchar_t");
  case 'b':
    if (consumeIf("b0E"))
      return make<BoolExpr>(0);
    if (consumeIf("b1E"))
      return make<BoolExpr>(1);
    return nullptr;
  case 'c':
    ++First;
    return getDerived().parseIntegerLiteral("char");
  case 'a':
    ++First;
    return getDerived().parseIntegerLiteral("signed char");
  case 'h':
    ++First;
    return getDerived().parseIntegerLiteral("unsigned char");
  case 's':
    ++First;
    return getDerived().parseIntegerLiteral("short");
  case 't':
    ++First;
    return getDerived().parseIntegerLiteral("unsigned short");
  case 'i':
    ++First;
    return getDerived().parseIntegerLiteral("");
  case 'j':
    ++First;
    return getDerived().parseIntegerLiteral("u");
  case 'l':
    ++First;
    return getDerived().parseIntegerLiteral("l");
  case 'm':
    ++First;
    return getDerived().parseIntegerLiteral("ul");
  case 'x':
    ++First;
    return getDerived().parseIntegerLiteral("ll");
  case 'y':
    ++First;
    return getDerived().parseIntegerLiteral("ull");
  case 'n':
    ++First;
    return getDerived().parseIntegerLiteral("__int128");
  case 'o':
    ++First;
    return getDerived().parseIntegerLiteral("unsigned __int128");
  case 'f':
    ++First;
    return getDerived().template parseFloatingLiteral<float>();
  case 'd':
    ++First;
    return getDerived().template parseFloatingLiteral<double>();
  case 'e':
    ++First;
#if defined(__powerpc__) || defined(__s390__)
    // Handle cases where long doubles encoded with e have the same size
    // and representation as doubles.
    return getDerived().template parseFloatingLiteral<double>();
#else
    return getDerived().template parseFloatingLiteral<long double>();
#endif
  case '_':
    if (consumeIf("_Z")) {
      Node *R = getDerived().parseEncoding();
      if (R != nullptr && consumeIf('E'))
        return R;
    }
    return nullptr;
  case 'A': {
    Node *T = getDerived().parseType();
    if (T == nullptr)
      return nullptr;
    // FIXME: We need to include the string contents in the mangling.
    if (consumeIf('E'))
      return make<StringLiteral>(T);
    return nullptr;
  }
  case 'D':
    if (consumeIf("Dn") && (consumeIf('0'), consumeIf('E')))
      return make<NameType>("nullptr");
    return nullptr;
  case 'T':
    // Invalid mangled name per
    //   http://sourcerytools.com/pipermail/cxx-abi-dev/2011-August/002422.html
    return nullptr;
  case 'U': {
    // FIXME: Should we support LUb... for block literals?
    if (look(1) != 'l')
      return nullptr;
    Node *T = parseUnnamedTypeName(nullptr);
    if (!T || !consumeIf('E'))
      return nullptr;
    return make<LambdaExpr>(T);
  }
  default: {
    // might be named type
    Node *T = getDerived().parseType();
    if (T == nullptr)
      return nullptr;
    std::string_view N = parseNumber(/*AllowNegative=*/true);
    if (N.empty())
      return nullptr;
    if (!consumeIf('E'))
      return nullptr;
    return make<EnumLiteral>(T, N);
  }
  }
}

// <braced-expression> ::= <expression>
//                     ::= di <field source-name> <braced-expression>    # .name = expr
//                     ::= dx <index expression> <braced-expression>     # [expr] = expr
//                     ::= dX <range begin expression> <range end expression> <braced-expression>
template <typename Derived, typename Alloc>
Node *AbstractManglingParser<Derived, Alloc>::parseBracedExpr() {
  if (look() == 'd') {
    switch (look(1)) {
    case 'i': {
      First += 2;
      Node *Field = getDerived().parseSourceName(/*NameState=*/nullptr);
      if (Field == nullptr)
        return nullptr;
      Node *Init = getDerived().parseBracedExpr();
      if (Init == nullptr)
        return nullptr;
      return make<BracedExpr>(Field, Init, /*isArray=*/false);
    }
    case 'x': {
      First += 2;
      Node *Index = getDerived().parseExpr();
      if (Index == nullptr)
        return nullptr;
      Node *Init = getDerived().parseBracedExpr();
      if (Init == nullptr)
        return nullptr;
      return make<BracedExpr>(Index, Init, /*isArray=*/true);
    }
    case 'X': {
      First += 2;
      Node *RangeBegin = getDerived().parseExpr();
      if (RangeBegin == nullptr)
        return nullptr;
      Node *RangeEnd = getDerived().parseExpr();
      if (RangeEnd == nullptr)
        return nullptr;
      Node *Init = getDerived().parseBracedExpr();
      if (Init == nullptr)
        return nullptr;
      return make<BracedRangeExpr>(RangeBegin, RangeEnd, Init);
    }
    }
  }
  return getDerived().parseExpr();
}

// (not yet in the spec)
// <fold-expr> ::= fL <binary-operator-name> <expression> <expression>
//             ::= fR <binary-operator-name> <expression> <expression>
//             ::= fl <binary-operator-name> <expression>
//             ::= fr <binary-operator-name> <expression>
template <typename Derived, typename Alloc>
Node *AbstractManglingParser<Derived, Alloc>::parseFoldExpr() {
  if (!consumeIf('f'))
    return nullptr;

  bool IsLeftFold = false, HasInitializer = false;
  switch (look()) {
  default:
    return nullptr;
  case 'L':
    IsLeftFold = true;
    HasInitializer = true;
    break;
  case 'R':
    HasInitializer = true;
    break;
  case 'l':
    IsLeftFold = true;
    break;
  case 'r':
    break;
  }
  ++First;

  const auto *Op = parseOperatorEncoding();
  if (!Op)
    return nullptr;
  if (!(Op->getKind() == OperatorInfo::Binary
        || (Op->getKind() == OperatorInfo::Member
            && Op->getName().back() == '*')))
    return nullptr;

  Node *Pack = getDerived().parseExpr();
  if (Pack == nullptr)
    return nullptr;

  Node *Init = nullptr;
  if (HasInitializer) {
    Init = getDerived().parseExpr();
    if (Init == nullptr)
      return nullptr;
  }

  if (IsLeftFold && Init)
    std::swap(Pack, Init);

  return make<FoldExpr>(IsLeftFold, Op->getSymbol(), Pack, Init);
}

// <expression> ::= mc <parameter type> <expr> [<offset number>] E
//
// Not yet in the spec: https://github.com/itanium-cxx-abi/cxx-abi/issues/47
template <typename Derived, typename Alloc>
Node *
AbstractManglingParser<Derived, Alloc>::parsePointerToMemberConversionExpr(
    Node::Prec Prec) {
  Node *Ty = getDerived().parseType();
  if (!Ty)
    return nullptr;
  Node *Expr = getDerived().parseExpr();
  if (!Expr)
    return nullptr;
  std::string_view Offset = getDerived().parseNumber(true);
  if (!consumeIf('E'))
    return nullptr;
  return make<PointerToMemberConversionExpr>(Ty, Expr, Offset, Prec);
}

// <expression> ::= so <referent type> <expr> [<offset number>] <union-selector>* [p] E
// <union-selector> ::= _ [<number>]
//
// Not yet in the spec: https://github.com/itanium-cxx-abi/cxx-abi/issues/47
template <typename Derived, typename Alloc>
Node *AbstractManglingParser<Derived, Alloc>::parseSubobjectExpr() {
  Node *Ty = getDerived().parseType();
  if (!Ty)
    return nullptr;
  Node *Expr = getDerived().parseExpr();
  if (!Expr)
    return nullptr;
  std::string_view Offset = getDerived().parseNumber(true);
  size_t SelectorsBegin = Names.size();
  while (consumeIf('_')) {
    Node *Selector = make<NameType>(parseNumber());
    if (!Selector)
      return nullptr;
    Names.push_back(Selector);
  }
  bool OnePastTheEnd = consumeIf('p');
  if (!consumeIf('E'))
    return nullptr;
  return make<SubobjectExpr>(
      Ty, Expr, Offset, popTrailingNodeArray(SelectorsBegin), OnePastTheEnd);
}

// <expression> ::= <unary operator-name> <expression>
//              ::= <binary operator-name> <expression> <expression>
//              ::= <ternary operator-name> <expression> <expression> <expression>
//              ::= cl <expression>+ E                                   # call
//              ::= cv <type> <expression>                               # conversion with one argument
//              ::= cv <type> _ <expression>* E                          # conversion with a different number of arguments
//              ::= [gs] nw <expression>* _ <type> E                     # new (expr-list) type
//              ::= [gs] nw <expression>* _ <type> <initializer>         # new (expr-list) type (init)
//              ::= [gs] na <expression>* _ <type> E                     # new[] (expr-list) type
//              ::= [gs] na <expression>* _ <type> <initializer>         # new[] (expr-list) type (init)
//              ::= [gs] dl <expression>                                 # delete expression
//              ::= [gs] da <expression>                                 # delete[] expression
//              ::= pp_ <expression>                                     # prefix ++
//              ::= mm_ <expression>                                     # prefix --
//              ::= ti <type>                                            # typeid (type)
//              ::= te <expression>                                      # typeid (expression)
//              ::= dc <type> <expression>                               # dynamic_cast<type> (expression)
//              ::= sc <type> <expression>                               # static_cast<type> (expression)
//              ::= cc <type> <expression>                               # const_cast<type> (expression)
//              ::= rc <type> <expression>                               # reinterpret_cast<type> (expression)
//              ::= st <type>                                            # sizeof (a type)
//              ::= sz <expression>                                      # sizeof (an expression)
//              ::= at <type>                                            # alignof (a type)
//              ::= az <expression>                                      # alignof (an expression)
//              ::= nx <expression>                                      # noexcept (expression)
//              ::= <template-param>
//              ::= <function-param>
//              ::= dt <expression> <unresolved-name>                    # expr.name
//              ::= pt <expression> <unresolved-name>                    # expr->name
//              ::= ds <expression> <expression>                         # expr.*expr
//              ::= sZ <template-param>                                  # size of a parameter pack
//              ::= sZ <function-param>                                  # size of a function parameter pack
//              ::= sP <template-arg>* E                                 # sizeof...(T), size of a captured template parameter pack from an alias template
//              ::= sp <expression>                                      # pack expansion
//              ::= tw <expression>                                      # throw expression
//              ::= tr                                                   # throw with no operand (rethrow)
//              ::= <unresolved-name>                                    # f(p), N::f(p), ::f(p),
//                                                                       # freestanding dependent name (e.g., T::x),
//                                                                       # objectless nonstatic member reference
//              ::= fL <binary-operator-name> <expression> <expression>
//              ::= fR <binary-operator-name> <expression> <expression>
//              ::= fl <binary-operator-name> <expression>
//              ::= fr <binary-operator-name> <expression>
//              ::= <expr-primary>
template <typename Derived, typename Alloc>
Node *AbstractManglingParser<Derived, Alloc>::parseExpr() {
  bool Global = consumeIf("gs");

  const auto *Op = parseOperatorEncoding();
  if (Op) {
    auto Sym = Op->getSymbol();
    switch (Op->getKind()) {
    case OperatorInfo::Binary:
      // Binary operator: lhs @ rhs
      return getDerived().parseBinaryExpr(Sym, Op->getPrecedence());
    case OperatorInfo::Prefix:
      // Prefix unary operator: @ expr
      return getDerived().parsePrefixExpr(Sym, Op->getPrecedence());
    case OperatorInfo::Postfix: {
      // Postfix unary operator: expr @
      if (consumeIf('_'))
        return getDerived().parsePrefixExpr(Sym, Op->getPrecedence());
      Node *Ex = getDerived().parseExpr();
      if (Ex == nullptr)
        return nullptr;
      return make<PostfixExpr>(Ex, Sym, Op->getPrecedence());
    }
    case OperatorInfo::Array: {
      // Array Index:  lhs [ rhs ]
      Node *Base = getDerived().parseExpr();
      if (Base == nullptr)
        return nullptr;
      Node *Index = getDerived().parseExpr();
      if (Index == nullptr)
        return nullptr;
      return make<ArraySubscriptExpr>(Base, Index, Op->getPrecedence());
    }
    case OperatorInfo::Member: {
      // Member access lhs @ rhs
      Node *LHS = getDerived().parseExpr();
      if (LHS == nullptr)
        return nullptr;
      Node *RHS = getDerived().parseExpr();
      if (RHS == nullptr)
        return nullptr;
      return make<MemberExpr>(LHS, Sym, RHS, Op->getPrecedence());
    }
    case OperatorInfo::New: {
      // New
      // # new (expr-list) type [(init)]
      // [gs] nw <expression>* _ <type> [pi <expression>*] E
      // # new[] (expr-list) type [(init)]
      // [gs] na <expression>* _ <type> [pi <expression>*] E
      size_t Exprs = Names.size();
      while (!consumeIf('_')) {
        Node *Ex = getDerived().parseExpr();
        if (Ex == nullptr)
          return nullptr;
        Names.push_back(Ex);
      }
      NodeArray ExprList = popTrailingNodeArray(Exprs);
      Node *Ty = getDerived().parseType();
      if (Ty == nullptr)
        return nullptr;
      bool HaveInits = consumeIf("pi");
      size_t InitsBegin = Names.size();
      while (!consumeIf('E')) {
        if (!HaveInits)
          return nullptr;
        Node *Init = getDerived().parseExpr();
        if (Init == nullptr)
          return Init;
        Names.push_back(Init);
      }
      NodeArray Inits = popTrailingNodeArray(InitsBegin);
      return make<NewExpr>(ExprList, Ty, Inits, Global,
                           /*IsArray=*/Op->getFlag(), Op->getPrecedence());
    }
    case OperatorInfo::Del: {
      // Delete
      Node *Ex = getDerived().parseExpr();
      if (Ex == nullptr)
        return nullptr;
      return make<DeleteExpr>(Ex, Global, /*IsArray=*/Op->getFlag(),
                              Op->getPrecedence());
    }
    case OperatorInfo::Call: {
      // Function Call
      Node *Callee = getDerived().parseExpr();
      if (Callee == nullptr)
        return nullptr;
      size_t ExprsBegin = Names.size();
      while (!consumeIf('E')) {
        Node *E = getDerived().parseExpr();
        if (E == nullptr)
          return nullptr;
        Names.push_back(E);
      }
      return make<CallExpr>(Callee, popTrailingNodeArray(ExprsBegin),
                            Op->getPrecedence());
    }
    case OperatorInfo::CCast: {
      // C Cast: (type)expr
      Node *Ty;
      {
        ScopedOverride<bool> SaveTemp(TryToParseTemplateArgs, false);
        Ty = getDerived().parseType();
      }
      if (Ty == nullptr)
        return nullptr;

      size_t ExprsBegin = Names.size();
      bool IsMany = consumeIf('_');
      while (!consumeIf('E')) {
        Node *E = getDerived().parseExpr();
        if (E == nullptr)
          return E;
        Names.push_back(E);
        if (!IsMany)
          break;
      }
      NodeArray Exprs = popTrailingNodeArray(ExprsBegin);
      if (!IsMany && Exprs.size() != 1)
        return nullptr;
      return make<ConversionExpr>(Ty, Exprs, Op->getPrecedence());
    }
    case OperatorInfo::Conditional: {
      // Conditional operator: expr ? expr : expr
      Node *Cond = getDerived().parseExpr();
      if (Cond == nullptr)
        return nullptr;
      Node *LHS = getDerived().parseExpr();
      if (LHS == nullptr)
        return nullptr;
      Node *RHS = getDerived().parseExpr();
      if (RHS == nullptr)
        return nullptr;
      return make<ConditionalExpr>(Cond, LHS, RHS, Op->getPrecedence());
    }
    case OperatorInfo::NamedCast: {
      // Named cast operation, @<type>(expr)
      Node *Ty = getDerived().parseType();
      if (Ty == nullptr)
        return nullptr;
      Node *Ex = getDerived().parseExpr();
      if (Ex == nullptr)
        return nullptr;
      return make<CastExpr>(Sym, Ty, Ex, Op->getPrecedence());
    }
    case OperatorInfo::OfIdOp: {
      // [sizeof/alignof/typeid] ( <type>|<expr> )
      Node *Arg =
          Op->getFlag() ? getDerived().parseType() : getDerived().parseExpr();
      if (!Arg)
        return nullptr;
      return make<EnclosingExpr>(Sym, Arg, Op->getPrecedence());
    }
    case OperatorInfo::NameOnly: {
      // Not valid as an expression operand.
      return nullptr;
    }
    }
    DEMANGLE_UNREACHABLE;
  }

  if (numLeft() < 2)
    return nullptr;

  if (look() == 'L')
    return getDerived().parseExprPrimary();
  if (look() == 'T')
    return getDerived().parseTemplateParam();
  if (look() == 'f') {
    // Disambiguate a fold expression from a <function-param>.
    if (look(1) == 'p' || (look(1) == 'L' && std::isdigit(look(2))))
      return getDerived().parseFunctionParam();
    return getDerived().parseFoldExpr();
  }
  if (consumeIf("il")) {
    size_t InitsBegin = Names.size();
    while (!consumeIf('E')) {
      Node *E = getDerived().parseBracedExpr();
      if (E == nullptr)
        return nullptr;
      Names.push_back(E);
    }
    return make<InitListExpr>(nullptr, popTrailingNodeArray(InitsBegin));
  }
  if (consumeIf("mc"))
    return parsePointerToMemberConversionExpr(Node::Prec::Unary);
  if (consumeIf("nx")) {
    Node *Ex = getDerived().parseExpr();
    if (Ex == nullptr)
      return Ex;
    return make<EnclosingExpr>("noexcept ", Ex, Node::Prec::Unary);
  }
  if (consumeIf("so"))
    return parseSubobjectExpr();
  if (consumeIf("sp")) {
    Node *Child = getDerived().parseExpr();
    if (Child == nullptr)
      return nullptr;
    return make<ParameterPackExpansion>(Child);
  }
  if (consumeIf("sZ")) {
    if (look() == 'T') {
      Node *R = getDerived().parseTemplateParam();
      if (R == nullptr)
        return nullptr;
      return make<SizeofParamPackExpr>(R);
    }
    Node *FP = getDerived().parseFunctionParam();
    if (FP == nullptr)
      return nullptr;
    return make<EnclosingExpr>("sizeof... ", FP);
  }
  if (consumeIf("sP")) {
    size_t ArgsBegin = Names.size();
    while (!consumeIf('E')) {
      Node *Arg = getDerived().parseTemplateArg();
      if (Arg == nullptr)
        return nullptr;
      Names.push_back(Arg);
    }
    auto *Pack = make<NodeArrayNode>(popTrailingNodeArray(ArgsBegin));
    if (!Pack)
      return nullptr;
    return make<EnclosingExpr>("sizeof... ", Pack);
  }
  if (consumeIf("tl")) {
    Node *Ty = getDerived().parseType();
    if (Ty == nullptr)
      return nullptr;
    size_t InitsBegin = Names.size();
    while (!consumeIf('E')) {
      Node *E = getDerived().parseBracedExpr();
      if (E == nullptr)
        return nullptr;
      Names.push_back(E);
    }
    return make<InitListExpr>(Ty, popTrailingNodeArray(InitsBegin));
  }
  if (consumeIf("tr"))
    return make<NameType>("throw");
  if (consumeIf("tw")) {
    Node *Ex = getDerived().parseExpr();
    if (Ex == nullptr)
      return nullptr;
    return make<ThrowExpr>(Ex);
  }
  if (consumeIf('u')) {
    Node *Name = getDerived().parseSourceName(/*NameState=*/nullptr);
    if (!Name)
      return nullptr;
    // Special case legacy __uuidof mangling. The 't' and 'z' appear where the
    // standard encoding expects a <template-arg>, and would be otherwise be
    // interpreted as <type> node 'short' or 'ellipsis'. However, neither
    // __uuidof(short) nor __uuidof(...) can actually appear, so there is no
    // actual conflict here.
    bool IsUUID = false;
    Node *UUID = nullptr;
    if (Name->getBaseName() == "__uuidof") {
      if (consumeIf('t')) {
        UUID = getDerived().parseType();
        IsUUID = true;
      } else if (consumeIf('z')) {
        UUID = getDerived().parseExpr();
        IsUUID = true;
      }
    }
    size_t ExprsBegin = Names.size();
    if (IsUUID) {
      if (UUID == nullptr)
        return nullptr;
      Names.push_back(UUID);
    } else {
      while (!consumeIf('E')) {
        Node *E = getDerived().parseTemplateArg();
        if (E == nullptr)
          return E;
        Names.push_back(E);
      }
    }
    return make<CallExpr>(Name, popTrailingNodeArray(ExprsBegin),
                          Node::Prec::Postfix);
  }

  // Only unresolved names remain.
  return getDerived().parseUnresolvedName(Global);
}

// <call-offset> ::= h <nv-offset> _
//               ::= v <v-offset> _
//
// <nv-offset> ::= <offset number>
//               # non-virtual base override
//
// <v-offset>  ::= <offset number> _ <virtual offset number>
//               # virtual base override, with vcall offset
template <typename Alloc, typename Derived>
bool AbstractManglingParser<Alloc, Derived>::parseCallOffset() {
  // Just scan through the call offset, we never add this information into the
  // output.
  if (consumeIf('h'))
    return parseNumber(true).empty() || !consumeIf('_');
  if (consumeIf('v'))
    return parseNumber(true).empty() || !consumeIf('_') ||
           parseNumber(true).empty() || !consumeIf('_');
  return true;
}

// <special-name> ::= TV <type>    # virtual table
//                ::= TT <type>    # VTT structure (construction vtable index)
//                ::= TI <type>    # typeinfo structure
//                ::= TS <type>    # typeinfo name (null-terminated byte string)
//                ::= Tc <call-offset> <call-offset> <base encoding>
//                    # base is the nominal target function of thunk
//                    # first call-offset is 'this' adjustment
//                    # second call-offset is result adjustment
//                ::= T <call-offset> <base encoding>
//                    # base is the nominal target function of thunk
//                # Guard variable for one-time initialization
//                ::= GV <object name>
//                                     # No <type>
//                ::= TW <object name> # Thread-local wrapper
//                ::= TH <object name> # Thread-local initialization
//                ::= GR <object name> _             # First temporary
//                ::= GR <object name> <seq-id> _    # Subsequent temporaries
//                # construction vtable for second-in-first
//      extension ::= TC <first type> <number> _ <second type>
//      extension ::= GR <object name> # reference temporary for object
//      extension ::= GI <module name> # module global initializer
template <typename Derived, typename Alloc>
Node *AbstractManglingParser<Derived, Alloc>::parseSpecialName() {
  switch (look()) {
  case 'T':
    switch (look(1)) {
    // TA <template-arg>    # template parameter object
    //
    // Not yet in the spec: https://github.com/itanium-cxx-abi/cxx-abi/issues/63
    case 'A': {
      First += 2;
      Node *Arg = getDerived().parseTemplateArg();
      if (Arg == nullptr)
        return nullptr;
      return make<SpecialName>("template parameter object for ", Arg);
    }
    // TV <type>    # virtual table
    case 'V': {
      First += 2;
      Node *Ty = getDerived().parseType();
      if (Ty == nullptr)
        return nullptr;
      return make<SpecialName>("vtable for ", Ty);
    }
    // TT <type>    # VTT structure (construction vtable index)
    case 'T': {
      First += 2;
      Node *Ty = getDerived().parseType();
      if (Ty == nullptr)
        return nullptr;
      return make<SpecialName>("VTT for ", Ty);
    }
    // TI <type>    # typeinfo structure
    case 'I': {
      First += 2;
      Node *Ty = getDerived().parseType();
      if (Ty == nullptr)
        return nullptr;
      return make<SpecialName>("typeinfo for ", Ty);
    }
    // TS <type>    # typeinfo name (null-terminated byte string)
    case 'S': {
      First += 2;
      Node *Ty = getDerived().parseType();
      if (Ty == nullptr)
        return nullptr;
      return make<SpecialName>("typeinfo name for ", Ty);
    }
    // Tc <call-offset> <call-offset> <base encoding>
    case 'c': {
      First += 2;
      if (parseCallOffset() || parseCallOffset())
        return nullptr;
      Node *Encoding = getDerived().parseEncoding();
      if (Encoding == nullptr)
        return nullptr;
      return make<SpecialName>("covariant return thunk to ", Encoding);
    }
    // extension ::= TC <first type> <number> _ <second type>
    //               # construction vtable for second-in-first
    case 'C': {
      First += 2;
      Node *FirstType = getDerived().parseType();
      if (FirstType == nullptr)
        return nullptr;
      if (parseNumber(true).empty() || !consumeIf('_'))
        return nullptr;
      Node *SecondType = getDerived().parseType();
      if (SecondType == nullptr)
        return nullptr;
      return make<CtorVtableSpecialName>(SecondType, FirstType);
    }
    // TW <object name> # Thread-local wrapper
    case 'W': {
      First += 2;
      Node *Name = getDerived().parseName();
      if (Name == nullptr)
        return nullptr;
      return make<SpecialName>("thread-local wrapper routine for ", Name);
    }
    // TH <object name> # Thread-local initialization
    case 'H': {
      First += 2;
      Node *Name = getDerived().parseName();
      if (Name == nullptr)
        return nullptr;
      return make<SpecialName>("thread-local initialization routine for ", Name);
    }
    // T <call-offset> <base encoding>
    default: {
      ++First;
      bool IsVirt = look() == 'v';
      if (parseCallOffset())
        return nullptr;
      Node *BaseEncoding = getDerived().parseEncoding();
      if (BaseEncoding == nullptr)
        return nullptr;
      if (IsVirt)
        return make<SpecialName>("virtual thunk to ", BaseEncoding);
      else
        return make<SpecialName>("non-virtual thunk to ", BaseEncoding);
    }
    }
  case 'G':
    switch (look(1)) {
    // GV <object name> # Guard variable for one-time initialization
    case 'V': {
      First += 2;
      Node *Name = getDerived().parseName();
      if (Name == nullptr)
        return nullptr;
      return make<SpecialName>("guard variable for ", Name);
    }
    // GR <object name> # reference temporary for object
    // GR <object name> _             # First temporary
    // GR <object name> <seq-id> _    # Subsequent temporaries
    case 'R': {
      First += 2;
      Node *Name = getDerived().parseName();
      if (Name == nullptr)
        return nullptr;
      size_t Count;
      bool ParsedSeqId = !parseSeqId(&Count);
      if (!consumeIf('_') && ParsedSeqId)
        return nullptr;
      return make<SpecialName>("reference temporary for ", Name);
    }
    // GI <module-name> v
    case 'I': {
      First += 2;
      ModuleName *Module = nullptr;
      if (getDerived().parseModuleNameOpt(Module))
        return nullptr;
      if (Module == nullptr)
        return nullptr;
      return make<SpecialName>("initializer for module ", Module);
    }
    }
  }
  return nullptr;
}

// <encoding> ::= <function name> <bare-function-type>
//            ::= <data name>
//            ::= <special-name>
template <typename Derived, typename Alloc>
Node *AbstractManglingParser<Derived, Alloc>::parseEncoding() {
  // The template parameters of an encoding are unrelated to those of the
  // enclosing context.
  class SaveTemplateParams {
    AbstractManglingParser *Parser;
    decltype(TemplateParams) OldParams;
    decltype(OuterTemplateParams) OldOuterParams;

  public:
    SaveTemplateParams(AbstractManglingParser *TheParser) : Parser(TheParser) {
      OldParams = std::move(Parser->TemplateParams);
      OldOuterParams = std::move(Parser->OuterTemplateParams);
      Parser->TemplateParams.clear();
      Parser->OuterTemplateParams.clear();
    }
    ~SaveTemplateParams() {
      Parser->TemplateParams = std::move(OldParams);
      Parser->OuterTemplateParams = std::move(OldOuterParams);
    }
  } SaveTemplateParams(this);

  if (look() == 'G' || look() == 'T')
    return getDerived().parseSpecialName();

  auto IsEndOfEncoding = [&] {
    // The set of chars that can potentially follow an <encoding> (none of which
    // can start a <type>). Enumerating these allows us to avoid speculative
    // parsing.
    return numLeft() == 0 || look() == 'E' || look() == '.' || look() == '_';
  };

  NameState NameInfo(this);
  Node *Name = getDerived().parseName(&NameInfo);
  if (Name == nullptr)
    return nullptr;

  if (resolveForwardTemplateRefs(NameInfo))
    return nullptr;

  if (IsEndOfEncoding())
    return Name;

  Node *Attrs = nullptr;
  if (consumeIf("Ua9enable_ifI")) {
    size_t BeforeArgs = Names.size();
    while (!consumeIf('E')) {
      Node *Arg = getDerived().parseTemplateArg();
      if (Arg == nullptr)
        return nullptr;
      Names.push_back(Arg);
    }
    Attrs = make<EnableIfAttr>(popTrailingNodeArray(BeforeArgs));
    if (!Attrs)
      return nullptr;
  }

  Node *ReturnType = nullptr;
  if (!NameInfo.CtorDtorConversion && NameInfo.EndsWithTemplateArgs) {
    ReturnType = getDerived().parseType();
    if (ReturnType == nullptr)
      return nullptr;
  }

  if (consumeIf('v'))
    return make<FunctionEncoding>(ReturnType, Name, NodeArray(),
                                  Attrs, NameInfo.CVQualifiers,
                                  NameInfo.ReferenceQualifier);

  size_t ParamsBegin = Names.size();
  do {
    Node *Ty = getDerived().parseType();
    if (Ty == nullptr)
      return nullptr;
    Names.push_back(Ty);
  } while (!IsEndOfEncoding());

  return make<FunctionEncoding>(ReturnType, Name,
                                popTrailingNodeArray(ParamsBegin),
                                Attrs, NameInfo.CVQualifiers,
                                NameInfo.ReferenceQualifier);
}

template <class Float>
struct FloatData;

template <>
struct FloatData<float>
{
    static const size_t mangled_size = 8;
    static const size_t max_demangled_size = 24;
    static constexpr const char* spec = "%af";
};

template <>
struct FloatData<double>
{
    static const size_t mangled_size = 16;
    static const size_t max_demangled_size = 32;
    static constexpr const char* spec = "%a";
};

template <>
struct FloatData<long double>
{
#if defined(__mips__) && defined(__mips_n64) || defined(__aarch64__) || \
    defined(__wasm__) || defined(__riscv) || defined(__loongarch__)
    static const size_t mangled_size = 32;
#elif defined(__arm__) || defined(__mips__) || defined(__hexagon__)
    static const size_t mangled_size = 16;
#else
    static const size_t mangled_size = 20;  // May need to be adjusted to 16 or 24 on other platforms
#endif
    // `-0x1.ffffffffffffffffffffffffffffp+16383` + 'L' + '\0' == 42 bytes.
    // 28 'f's * 4 bits == 112 bits, which is the number of mantissa bits.
    // Negatives are one character longer than positives.
    // `0x1.` and `p` are constant, and exponents `+16383` and `-16382` are the
    // same length. 1 sign bit, 112 mantissa bits, and 15 exponent bits == 128.
    static const size_t max_demangled_size = 42;
    static constexpr const char *spec = "%LaL";
};

template <typename Alloc, typename Derived>
template <class Float>
Node *AbstractManglingParser<Alloc, Derived>::parseFloatingLiteral() {
  const size_t N = FloatData<Float>::mangled_size;
  if (numLeft() <= N)
    return nullptr;
  std::string_view Data(First, N);
  for (char C : Data)
    if (!std::isxdigit(C))
      return nullptr;
  First += N;
  if (!consumeIf('E'))
    return nullptr;
  return make<FloatLiteralImpl<Float>>(Data);
}

// <seq-id> ::= <0-9A-Z>+
template <typename Alloc, typename Derived>
bool AbstractManglingParser<Alloc, Derived>::parseSeqId(size_t *Out) {
  if (!(look() >= '0' && look() <= '9') &&
      !(look() >= 'A' && look() <= 'Z'))
    return true;

  size_t Id = 0;
  while (true) {
    if (look() >= '0' && look() <= '9') {
      Id *= 36;
      Id += static_cast<size_t>(look() - '0');
    } else if (look() >= 'A' && look() <= 'Z') {
      Id *= 36;
      Id += static_cast<size_t>(look() - 'A') + 10;
    } else {
      *Out = Id;
      return false;
    }
    ++First;
  }
}

// <substitution> ::= S <seq-id> _
//                ::= S_
// <substitution> ::= Sa # ::std::allocator
// <substitution> ::= Sb # ::std::basic_string
// <substitution> ::= Ss # ::std::basic_string < char,
//                                               ::std::char_traits<char>,
//                                               ::std::allocator<char> >
// <substitution> ::= Si # ::std::basic_istream<char,  std::char_traits<char> >
// <substitution> ::= So # ::std::basic_ostream<char,  std::char_traits<char> >
// <substitution> ::= Sd # ::std::basic_iostream<char, std::char_traits<char> >
// The St case is handled specially in parseNestedName.
template <typename Derived, typename Alloc>
Node *AbstractManglingParser<Derived, Alloc>::parseSubstitution() {
  if (!consumeIf('S'))
    return nullptr;

  if (look() >= 'a' && look() <= 'z') {
    SpecialSubKind Kind;
    switch (look()) {
    case 'a':
      Kind = SpecialSubKind::allocator;
      break;
    case 'b':
      Kind = SpecialSubKind::basic_string;
      break;
    case 'd':
      Kind = SpecialSubKind::iostream;
      break;
    case 'i':
      Kind = SpecialSubKind::istream;
      break;
    case 'o':
      Kind = SpecialSubKind::ostream;
      break;
    case 's':
      Kind = SpecialSubKind::string;
      break;
    default:
      return nullptr;
    }
    ++First;
    auto *SpecialSub = make<SpecialSubstitution>(Kind);
    if (!SpecialSub)
      return nullptr;

    // Itanium C++ ABI 5.1.2: If a name that would use a built-in <substitution>
    // has ABI tags, the tags are appended to the substitution; the result is a
    // substitutable component.
    Node *WithTags = getDerived().parseAbiTags(SpecialSub);
    if (WithTags != SpecialSub) {
      Subs.push_back(WithTags);
      SpecialSub = WithTags;
    }
    return SpecialSub;
  }

  //                ::= S_
  if (consumeIf('_')) {
    if (Subs.empty())
      return nullptr;
    return Subs[0];
  }

  //                ::= S <seq-id> _
  size_t Index = 0;
  if (parseSeqId(&Index))
    return nullptr;
  ++Index;
  if (!consumeIf('_') || Index >= Subs.size())
    return nullptr;
  return Subs[Index];
}

// <template-param> ::= T_    # first template parameter
//                  ::= T <parameter-2 non-negative number> _
//                  ::= TL <level-1> __
//                  ::= TL <level-1> _ <parameter-2 non-negative number> _
template <typename Derived, typename Alloc>
Node *AbstractManglingParser<Derived, Alloc>::parseTemplateParam() {
  if (!consumeIf('T'))
    return nullptr;

  size_t Level = 0;
  if (consumeIf('L')) {
    if (parsePositiveInteger(&Level))
      return nullptr;
    ++Level;
    if (!consumeIf('_'))
      return nullptr;
  }

  size_t Index = 0;
  if (!consumeIf('_')) {
    if (parsePositiveInteger(&Index))
      return nullptr;
    ++Index;
    if (!consumeIf('_'))
      return nullptr;
  }

  // If we're in a context where this <template-param> refers to a
  // <template-arg> further ahead in the mangled name (currently just conversion
  // operator types), then we should only look it up in the right context.
  // This can only happen at the outermost level.
  if (PermitForwardTemplateReferences && Level == 0) {
    Node *ForwardRef = make<ForwardTemplateReference>(Index);
    if (!ForwardRef)
      return nullptr;
    assert(ForwardRef->getKind() == Node::KForwardTemplateReference);
    ForwardTemplateRefs.push_back(
        static_cast<ForwardTemplateReference *>(ForwardRef));
    return ForwardRef;
  }

  if (Level >= TemplateParams.size() || !TemplateParams[Level] ||
      Index >= TemplateParams[Level]->size()) {
    // Itanium ABI 5.1.8: In a generic lambda, uses of auto in the parameter
    // list are mangled as the corresponding artificial template type parameter.
    if (ParsingLambdaParamsAtLevel == Level && Level <= TemplateParams.size()) {
      // This will be popped by the ScopedTemplateParamList in
      // parseUnnamedTypeName.
      if (Level == TemplateParams.size())
        TemplateParams.push_back(nullptr);
      return make<NameType>("auto");
    }

    return nullptr;
  }

  return (*TemplateParams[Level])[Index];
}

// <template-param-decl> ::= Ty                          # type parameter
//                       ::= Tn <type>                   # non-type parameter
//                       ::= Tt <template-param-decl>* E # template parameter
//                       ::= Tp <template-param-decl>    # parameter pack
template <typename Derived, typename Alloc>
Node *AbstractManglingParser<Derived, Alloc>::parseTemplateParamDecl() {
  auto InventTemplateParamName = [&](TemplateParamKind Kind) {
    unsigned Index = NumSyntheticTemplateParameters[(int)Kind]++;
    Node *N = make<SyntheticTemplateParamName>(Kind, Index);
    if (N) TemplateParams.back()->push_back(N);
    return N;
  };

  if (consumeIf("Ty")) {
    Node *Name = InventTemplateParamName(TemplateParamKind::Type);
    if (!Name)
      return nullptr;
    return make<TypeTemplateParamDecl>(Name);
  }

  if (consumeIf("Tn")) {
    Node *Name = InventTemplateParamName(TemplateParamKind::NonType);
    if (!Name)
      return nullptr;
    Node *Type = parseType();
    if (!Type)
      return nullptr;
    return make<NonTypeTemplateParamDecl>(Name, Type);
  }

  if (consumeIf("Tt")) {
    Node *Name = InventTemplateParamName(TemplateParamKind::Template);
    if (!Name)
      return nullptr;
    size_t ParamsBegin = Names.size();
    ScopedTemplateParamList TemplateTemplateParamParams(this);
    while (!consumeIf("E")) {
      Node *P = parseTemplateParamDecl();
      if (!P)
        return nullptr;
      Names.push_back(P);
    }
    NodeArray Params = popTrailingNodeArray(ParamsBegin);
    return make<TemplateTemplateParamDecl>(Name, Params);
  }

  if (consumeIf("Tp")) {
    Node *P = parseTemplateParamDecl();
    if (!P)
      return nullptr;
    return make<TemplateParamPackDecl>(P);
  }

  return nullptr;
}

// <template-arg> ::= <type>                    # type or template
//                ::= X <expression> E          # expression
//                ::= <expr-primary>            # simple expressions
//                ::= J <template-arg>* E       # argument pack
//                ::= LZ <encoding> E           # extension
template <typename Derived, typename Alloc>
Node *AbstractManglingParser<Derived, Alloc>::parseTemplateArg() {
  switch (look()) {
  case 'X': {
    ++First;
    Node *Arg = getDerived().parseExpr();
    if (Arg == nullptr || !consumeIf('E'))
      return nullptr;
    return Arg;
  }
  case 'J': {
    ++First;
    size_t ArgsBegin = Names.size();
    while (!consumeIf('E')) {
      Node *Arg = getDerived().parseTemplateArg();
      if (Arg == nullptr)
        return nullptr;
      Names.push_back(Arg);
    }
    NodeArray Args = popTrailingNodeArray(ArgsBegin);
    return make<TemplateArgumentPack>(Args);
  }
  case 'L': {
    //                ::= LZ <encoding> E           # extension
    if (look(1) == 'Z') {
      First += 2;
      Node *Arg = getDerived().parseEncoding();
      if (Arg == nullptr || !consumeIf('E'))
        return nullptr;
      return Arg;
    }
    //                ::= <expr-primary>            # simple expressions
    return getDerived().parseExprPrimary();
  }
  default:
    return getDerived().parseType();
  }
}

// <template-args> ::= I <template-arg>* E
//     extension, the abi says <template-arg>+
template <typename Derived, typename Alloc>
Node *
AbstractManglingParser<Derived, Alloc>::parseTemplateArgs(bool TagTemplates) {
  if (!consumeIf('I'))
    return nullptr;

  // <template-params> refer to the innermost <template-args>. Clear out any
  // outer args that we may have inserted into TemplateParams.
  if (TagTemplates) {
    TemplateParams.clear();
    TemplateParams.push_back(&OuterTemplateParams);
    OuterTemplateParams.clear();
  }

  size_t ArgsBegin = Names.size();
  while (!consumeIf('E')) {
    if (TagTemplates) {
      auto OldParams = std::move(TemplateParams);
      Node *Arg = getDerived().parseTemplateArg();
      TemplateParams = std::move(OldParams);
      if (Arg == nullptr)
        return nullptr;
      Names.push_back(Arg);
      Node *TableEntry = Arg;
      if (Arg->getKind() == Node::KTemplateArgumentPack) {
        TableEntry = make<ParameterPack>(
            static_cast<TemplateArgumentPack*>(TableEntry)->getElements());
        if (!TableEntry)
          return nullptr;
      }
      TemplateParams.back()->push_back(TableEntry);
    } else {
      Node *Arg = getDerived().parseTemplateArg();
      if (Arg == nullptr)
        return nullptr;
      Names.push_back(Arg);
    }
  }
  return make<TemplateArgs>(popTrailingNodeArray(ArgsBegin));
}

// <mangled-name> ::= _Z <encoding>
//                ::= <type>
// extension      ::= ___Z <encoding> _block_invoke
// extension      ::= ___Z <encoding> _block_invoke<decimal-digit>+
// extension      ::= ___Z <encoding> _block_invoke_<decimal-digit>+
template <typename Derived, typename Alloc>
Node *AbstractManglingParser<Derived, Alloc>::parse() {
  if (consumeIf("_Z") || consumeIf("__Z")) {
    Node *Encoding = getDerived().parseEncoding();
    if (Encoding == nullptr)
      return nullptr;
    if (look() == '.') {
      Encoding =
          make<DotSuffix>(Encoding, std::string_view(First, Last - First));
      First = Last;
    }
    if (numLeft() != 0)
      return nullptr;
    return Encoding;
  }

  if (consumeIf("___Z") || consumeIf("____Z")) {
    Node *Encoding = getDerived().parseEncoding();
    if (Encoding == nullptr || !consumeIf("_block_invoke"))
      return nullptr;
    bool RequireNumber = consumeIf('_');
    if (parseNumber().empty() && RequireNumber)
      return nullptr;
    if (look() == '.')
      First = Last;
    if (numLeft() != 0)
      return nullptr;
    return make<SpecialName>("invocation function for block in ", Encoding);
  }

  Node *Ty = getDerived().parseType();
  if (numLeft() != 0)
    return nullptr;
  return Ty;
}

template <typename Alloc>
struct ManglingParser : AbstractManglingParser<ManglingParser<Alloc>, Alloc> {
  using AbstractManglingParser<ManglingParser<Alloc>,
                               Alloc>::AbstractManglingParser;
};

DEMANGLE_NAMESPACE_END

#endif // DEMANGLE_ITANIUMDEMANGLE_H
PKiwFZ�Fy���Demangle/ItaniumNodes.defnu�[���//===--- ItaniumNodes.def ------------*- mode:c++;eval:(read-only-mode) -*-===//
//       Do not edit! See README.txt.
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Define the demangler's node names

#ifndef NODE
#error Define NODE to handle nodes
#endif

NODE(NodeArrayNode)
NODE(DotSuffix)
NODE(VendorExtQualType)
NODE(QualType)
NODE(ConversionOperatorType)
NODE(PostfixQualifiedType)
NODE(ElaboratedTypeSpefType)
NODE(NameType)
NODE(AbiTagAttr)
NODE(EnableIfAttr)
NODE(ObjCProtoName)
NODE(PointerType)
NODE(ReferenceType)
NODE(PointerToMemberType)
NODE(ArrayType)
NODE(FunctionType)
NODE(NoexceptSpec)
NODE(DynamicExceptionSpec)
NODE(FunctionEncoding)
NODE(LiteralOperator)
NODE(SpecialName)
NODE(CtorVtableSpecialName)
NODE(QualifiedName)
NODE(NestedName)
NODE(LocalName)
NODE(ModuleName)
NODE(ModuleEntity)
NODE(VectorType)
NODE(PixelVectorType)
NODE(BinaryFPType)
NODE(BitIntType)
NODE(SyntheticTemplateParamName)
NODE(TypeTemplateParamDecl)
NODE(NonTypeTemplateParamDecl)
NODE(TemplateTemplateParamDecl)
NODE(TemplateParamPackDecl)
NODE(ParameterPack)
NODE(TemplateArgumentPack)
NODE(ParameterPackExpansion)
NODE(TemplateArgs)
NODE(ForwardTemplateReference)
NODE(NameWithTemplateArgs)
NODE(GlobalQualifiedName)
NODE(ExpandedSpecialSubstitution)
NODE(SpecialSubstitution)
NODE(CtorDtorName)
NODE(DtorName)
NODE(UnnamedTypeName)
NODE(ClosureTypeName)
NODE(StructuredBindingName)
NODE(BinaryExpr)
NODE(ArraySubscriptExpr)
NODE(PostfixExpr)
NODE(ConditionalExpr)
NODE(MemberExpr)
NODE(SubobjectExpr)
NODE(EnclosingExpr)
NODE(CastExpr)
NODE(SizeofParamPackExpr)
NODE(CallExpr)
NODE(NewExpr)
NODE(DeleteExpr)
NODE(PrefixExpr)
NODE(FunctionParam)
NODE(ConversionExpr)
NODE(PointerToMemberConversionExpr)
NODE(InitListExpr)
NODE(FoldExpr)
NODE(ThrowExpr)
NODE(BoolExpr)
NODE(StringLiteral)
NODE(LambdaExpr)
NODE(EnumLiteral)
NODE(IntegerLiteral)
NODE(FloatLiteral)
NODE(DoubleLiteral)
NODE(LongDoubleLiteral)
NODE(BracedExpr)
NODE(BracedRangeExpr)

#undef NODE
PKiwFZ��P��Demangle/DemangleConfig.hnu�[���//===--- DemangleConfig.h ---------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains a variety of feature test macros copied from
// include/llvm/Support/Compiler.h so that LLVMDemangle does not need to take
// a dependency on LLVMSupport.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DEMANGLE_DEMANGLECONFIG_H
#define LLVM_DEMANGLE_DEMANGLECONFIG_H

#ifndef __has_feature
#define __has_feature(x) 0
#endif

#ifndef __has_cpp_attribute
#define __has_cpp_attribute(x) 0
#endif

#ifndef __has_attribute
#define __has_attribute(x) 0
#endif

#ifndef __has_builtin
#define __has_builtin(x) 0
#endif

#ifndef DEMANGLE_GNUC_PREREQ
#if defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__)
#define DEMANGLE_GNUC_PREREQ(maj, min, patch)                           \
  ((__GNUC__ << 20) + (__GNUC_MINOR__ << 10) + __GNUC_PATCHLEVEL__ >=          \
   ((maj) << 20) + ((min) << 10) + (patch))
#elif defined(__GNUC__) && defined(__GNUC_MINOR__)
#define DEMANGLE_GNUC_PREREQ(maj, min, patch)                           \
  ((__GNUC__ << 20) + (__GNUC_MINOR__ << 10) >= ((maj) << 20) + ((min) << 10))
#else
#define DEMANGLE_GNUC_PREREQ(maj, min, patch) 0
#endif
#endif

#if __has_attribute(used) || DEMANGLE_GNUC_PREREQ(3, 1, 0)
#define DEMANGLE_ATTRIBUTE_USED __attribute__((__used__))
#else
#define DEMANGLE_ATTRIBUTE_USED
#endif

#if __has_builtin(__builtin_unreachable) || DEMANGLE_GNUC_PREREQ(4, 5, 0)
#define DEMANGLE_UNREACHABLE __builtin_unreachable()
#elif defined(_MSC_VER)
#define DEMANGLE_UNREACHABLE __assume(false)
#else
#define DEMANGLE_UNREACHABLE
#endif

#if __has_attribute(noinline) || DEMANGLE_GNUC_PREREQ(3, 4, 0)
#define DEMANGLE_ATTRIBUTE_NOINLINE __attribute__((noinline))
#elif defined(_MSC_VER)
#define DEMANGLE_ATTRIBUTE_NOINLINE __declspec(noinline)
#else
#define DEMANGLE_ATTRIBUTE_NOINLINE
#endif

#if !defined(NDEBUG)
#define DEMANGLE_DUMP_METHOD DEMANGLE_ATTRIBUTE_NOINLINE DEMANGLE_ATTRIBUTE_USED
#else
#define DEMANGLE_DUMP_METHOD DEMANGLE_ATTRIBUTE_NOINLINE
#endif

#if __cplusplus > 201402L && __has_cpp_attribute(fallthrough)
#define DEMANGLE_FALLTHROUGH [[fallthrough]]
#elif __has_cpp_attribute(gnu::fallthrough)
#define DEMANGLE_FALLTHROUGH [[gnu::fallthrough]]
#elif !__cplusplus
// Workaround for llvm.org/PR23435, since clang 3.6 and below emit a spurious
// error when __has_cpp_attribute is given a scoped attribute in C mode.
#define DEMANGLE_FALLTHROUGH
#elif __has_cpp_attribute(clang::fallthrough)
#define DEMANGLE_FALLTHROUGH [[clang::fallthrough]]
#else
#define DEMANGLE_FALLTHROUGH
#endif

#define DEMANGLE_NAMESPACE_BEGIN namespace llvm { namespace itanium_demangle {
#define DEMANGLE_NAMESPACE_END } }

#endif
PKiwFZ���{�%�%ADT/IntrusiveRefCntPtr.hnu�[���//==- llvm/ADT/IntrusiveRefCntPtr.h - Smart Refcounting Pointer --*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file defines the RefCountedBase, ThreadSafeRefCountedBase, and
/// IntrusiveRefCntPtr classes.
///
/// IntrusiveRefCntPtr is a smart pointer to an object which maintains a
/// reference count.  (ThreadSafe)RefCountedBase is a mixin class that adds a
/// refcount member variable and methods for updating the refcount.  An object
/// that inherits from (ThreadSafe)RefCountedBase deletes itself when its
/// refcount hits zero.
///
/// For example:
///
/// ```
///   class MyClass : public RefCountedBase<MyClass> {};
///
///   void foo() {
///     // Constructing an IntrusiveRefCntPtr increases the pointee's refcount
///     // by 1 (from 0 in this case).
///     IntrusiveRefCntPtr<MyClass> Ptr1(new MyClass());
///
///     // Copying an IntrusiveRefCntPtr increases the pointee's refcount by 1.
///     IntrusiveRefCntPtr<MyClass> Ptr2(Ptr1);
///
///     // Constructing an IntrusiveRefCntPtr has no effect on the object's
///     // refcount.  After a move, the moved-from pointer is null.
///     IntrusiveRefCntPtr<MyClass> Ptr3(std::move(Ptr1));
///     assert(Ptr1 == nullptr);
///
///     // Clearing an IntrusiveRefCntPtr decreases the pointee's refcount by 1.
///     Ptr2.reset();
///
///     // The object deletes itself when we return from the function, because
///     // Ptr3's destructor decrements its refcount to 0.
///   }
/// ```
///
/// You can use IntrusiveRefCntPtr with isa<T>(), dyn_cast<T>(), etc.:
///
/// ```
///   IntrusiveRefCntPtr<MyClass> Ptr(new MyClass());
///   OtherClass *Other = dyn_cast<OtherClass>(Ptr);  // Ptr.get() not required
/// ```
///
/// IntrusiveRefCntPtr works with any class that
///
///  - inherits from (ThreadSafe)RefCountedBase,
///  - has Retain() and Release() methods, or
///  - specializes IntrusiveRefCntPtrInfo.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_INTRUSIVEREFCNTPTR_H
#define LLVM_ADT_INTRUSIVEREFCNTPTR_H

#include <atomic>
#include <cassert>
#include <cstddef>
#include <memory>

namespace llvm {

/// A CRTP mixin class that adds reference counting to a type.
///
/// The lifetime of an object which inherits from RefCountedBase is managed by
/// calls to Release() and Retain(), which increment and decrement the object's
/// refcount, respectively.  When a Release() call decrements the refcount to 0,
/// the object deletes itself.
template <class Derived> class RefCountedBase {
  mutable unsigned RefCount = 0;

protected:
  RefCountedBase() = default;
  RefCountedBase(const RefCountedBase &) {}
  RefCountedBase &operator=(const RefCountedBase &) = delete;

#ifndef NDEBUG
  ~RefCountedBase() {
    assert(RefCount == 0 &&
           "Destruction occurred when there are still references to this.");
  }
#else
  // Default the destructor in release builds, A trivial destructor may enable
  // better codegen.
  ~RefCountedBase() = default;
#endif

public:
  void Retain() const { ++RefCount; }

  void Release() const {
    assert(RefCount > 0 && "Reference count is already zero.");
    if (--RefCount == 0)
      delete static_cast<const Derived *>(this);
  }
};

/// A thread-safe version of \c RefCountedBase.
template <class Derived> class ThreadSafeRefCountedBase {
  mutable std::atomic<int> RefCount{0};

protected:
  ThreadSafeRefCountedBase() = default;
  ThreadSafeRefCountedBase(const ThreadSafeRefCountedBase &) {}
  ThreadSafeRefCountedBase &
  operator=(const ThreadSafeRefCountedBase &) = delete;

#ifndef NDEBUG
  ~ThreadSafeRefCountedBase() {
    assert(RefCount == 0 &&
           "Destruction occurred when there are still references to this.");
  }
#else
  // Default the destructor in release builds, A trivial destructor may enable
  // better codegen.
  ~ThreadSafeRefCountedBase() = default;
#endif

public:
  void Retain() const { RefCount.fetch_add(1, std::memory_order_relaxed); }

  void Release() const {
    int NewRefCount = RefCount.fetch_sub(1, std::memory_order_acq_rel) - 1;
    assert(NewRefCount >= 0 && "Reference count was already zero.");
    if (NewRefCount == 0)
      delete static_cast<const Derived *>(this);
  }
};

/// Class you can specialize to provide custom retain/release functionality for
/// a type.
///
/// Usually specializing this class is not necessary, as IntrusiveRefCntPtr
/// works with any type which defines Retain() and Release() functions -- you
/// can define those functions yourself if RefCountedBase doesn't work for you.
///
/// One case when you might want to specialize this type is if you have
///  - Foo.h defines type Foo and includes Bar.h, and
///  - Bar.h uses IntrusiveRefCntPtr<Foo> in inline functions.
///
/// Because Foo.h includes Bar.h, Bar.h can't include Foo.h in order to pull in
/// the declaration of Foo.  Without the declaration of Foo, normally Bar.h
/// wouldn't be able to use IntrusiveRefCntPtr<Foo>, which wants to call
/// T::Retain and T::Release.
///
/// To resolve this, Bar.h could include a third header, FooFwd.h, which
/// forward-declares Foo and specializes IntrusiveRefCntPtrInfo<Foo>.  Then
/// Bar.h could use IntrusiveRefCntPtr<Foo>, although it still couldn't call any
/// functions on Foo itself, because Foo would be an incomplete type.
template <typename T> struct IntrusiveRefCntPtrInfo {
  static void retain(T *obj) { obj->Retain(); }
  static void release(T *obj) { obj->Release(); }
};

/// A smart pointer to a reference-counted object that inherits from
/// RefCountedBase or ThreadSafeRefCountedBase.
///
/// This class increments its pointee's reference count when it is created, and
/// decrements its refcount when it's destroyed (or is changed to point to a
/// different object).
template <typename T> class IntrusiveRefCntPtr {
  T *Obj = nullptr;

public:
  using element_type = T;

  explicit IntrusiveRefCntPtr() = default;
  IntrusiveRefCntPtr(T *obj) : Obj(obj) { retain(); }
  IntrusiveRefCntPtr(const IntrusiveRefCntPtr &S) : Obj(S.Obj) { retain(); }
  IntrusiveRefCntPtr(IntrusiveRefCntPtr &&S) : Obj(S.Obj) { S.Obj = nullptr; }

  template <class X,
            std::enable_if_t<std::is_convertible<X *, T *>::value, bool> = true>
  IntrusiveRefCntPtr(IntrusiveRefCntPtr<X> S) : Obj(S.get()) {
    S.Obj = nullptr;
  }

  template <class X,
            std::enable_if_t<std::is_convertible<X *, T *>::value, bool> = true>
  IntrusiveRefCntPtr(std::unique_ptr<X> S) : Obj(S.release()) {
    retain();
  }

  ~IntrusiveRefCntPtr() { release(); }

  IntrusiveRefCntPtr &operator=(IntrusiveRefCntPtr S) {
    swap(S);
    return *this;
  }

  T &operator*() const { return *Obj; }
  T *operator->() const { return Obj; }
  T *get() const { return Obj; }
  explicit operator bool() const { return Obj; }

  void swap(IntrusiveRefCntPtr &other) {
    T *tmp = other.Obj;
    other.Obj = Obj;
    Obj = tmp;
  }

  void reset() {
    release();
    Obj = nullptr;
  }

  void resetWithoutRelease() { Obj = nullptr; }

private:
  void retain() {
    if (Obj)
      IntrusiveRefCntPtrInfo<T>::retain(Obj);
  }

  void release() {
    if (Obj)
      IntrusiveRefCntPtrInfo<T>::release(Obj);
  }

  template <typename X> friend class IntrusiveRefCntPtr;
};

template <class T, class U>
inline bool operator==(const IntrusiveRefCntPtr<T> &A,
                       const IntrusiveRefCntPtr<U> &B) {
  return A.get() == B.get();
}

template <class T, class U>
inline bool operator!=(const IntrusiveRefCntPtr<T> &A,
                       const IntrusiveRefCntPtr<U> &B) {
  return A.get() != B.get();
}

template <class T, class U>
inline bool operator==(const IntrusiveRefCntPtr<T> &A, U *B) {
  return A.get() == B;
}

template <class T, class U>
inline bool operator!=(const IntrusiveRefCntPtr<T> &A, U *B) {
  return A.get() != B;
}

template <class T, class U>
inline bool operator==(T *A, const IntrusiveRefCntPtr<U> &B) {
  return A == B.get();
}

template <class T, class U>
inline bool operator!=(T *A, const IntrusiveRefCntPtr<U> &B) {
  return A != B.get();
}

template <class T>
bool operator==(std::nullptr_t, const IntrusiveRefCntPtr<T> &B) {
  return !B;
}

template <class T>
bool operator==(const IntrusiveRefCntPtr<T> &A, std::nullptr_t B) {
  return B == A;
}

template <class T>
bool operator!=(std::nullptr_t A, const IntrusiveRefCntPtr<T> &B) {
  return !(A == B);
}

template <class T>
bool operator!=(const IntrusiveRefCntPtr<T> &A, std::nullptr_t B) {
  return !(A == B);
}

// Make IntrusiveRefCntPtr work with dyn_cast, isa, and the other idioms from
// Casting.h.
template <typename From> struct simplify_type;

template <class T> struct simplify_type<IntrusiveRefCntPtr<T>> {
  using SimpleType = T *;

  static SimpleType getSimplifiedValue(IntrusiveRefCntPtr<T> &Val) {
    return Val.get();
  }
};

template <class T> struct simplify_type<const IntrusiveRefCntPtr<T>> {
  using SimpleType = /*const*/ T *;

  static SimpleType getSimplifiedValue(const IntrusiveRefCntPtr<T> &Val) {
    return Val.get();
  }
};

/// Factory function for creating intrusive ref counted pointers.
template <typename T, typename... Args>
IntrusiveRefCntPtr<T> makeIntrusiveRefCnt(Args &&...A) {
  return IntrusiveRefCntPtr<T>(new T(std::forward<Args>(A)...));
}

} // end namespace llvm

#endif // LLVM_ADT_INTRUSIVEREFCNTPTR_H
PKiwFZP����ADT/ilist_iterator.hnu�[���//===- llvm/ADT/ilist_iterator.h - Intrusive List Iterator ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_ILIST_ITERATOR_H
#define LLVM_ADT_ILIST_ITERATOR_H

#include "llvm/ADT/ilist_node.h"
#include <cassert>
#include <cstddef>
#include <iterator>
#include <type_traits>

namespace llvm {

namespace ilist_detail {

/// Find const-correct node types.
template <class OptionsT, bool IsConst> struct IteratorTraits;
template <class OptionsT> struct IteratorTraits<OptionsT, false> {
  using value_type = typename OptionsT::value_type;
  using pointer = typename OptionsT::pointer;
  using reference = typename OptionsT::reference;
  using node_pointer = ilist_node_impl<OptionsT> *;
  using node_reference = ilist_node_impl<OptionsT> &;
};
template <class OptionsT> struct IteratorTraits<OptionsT, true> {
  using value_type = const typename OptionsT::value_type;
  using pointer = typename OptionsT::const_pointer;
  using reference = typename OptionsT::const_reference;
  using node_pointer = const ilist_node_impl<OptionsT> *;
  using node_reference = const ilist_node_impl<OptionsT> &;
};

template <bool IsReverse> struct IteratorHelper;
template <> struct IteratorHelper<false> : ilist_detail::NodeAccess {
  using Access = ilist_detail::NodeAccess;

  template <class T> static void increment(T *&I) { I = Access::getNext(*I); }
  template <class T> static void decrement(T *&I) { I = Access::getPrev(*I); }
};
template <> struct IteratorHelper<true> : ilist_detail::NodeAccess {
  using Access = ilist_detail::NodeAccess;

  template <class T> static void increment(T *&I) { I = Access::getPrev(*I); }
  template <class T> static void decrement(T *&I) { I = Access::getNext(*I); }
};

} // end namespace ilist_detail

/// Iterator for intrusive lists  based on ilist_node.
template <class OptionsT, bool IsReverse, bool IsConst>
class ilist_iterator : ilist_detail::SpecificNodeAccess<OptionsT> {
  friend ilist_iterator<OptionsT, IsReverse, !IsConst>;
  friend ilist_iterator<OptionsT, !IsReverse, IsConst>;
  friend ilist_iterator<OptionsT, !IsReverse, !IsConst>;

  using Traits = ilist_detail::IteratorTraits<OptionsT, IsConst>;
  using Access = ilist_detail::SpecificNodeAccess<OptionsT>;

public:
  using value_type = typename Traits::value_type;
  using pointer = typename Traits::pointer;
  using reference = typename Traits::reference;
  using difference_type = ptrdiff_t;
  using iterator_category = std::bidirectional_iterator_tag;
  using const_pointer = typename OptionsT::const_pointer;
  using const_reference = typename OptionsT::const_reference;

private:
  using node_pointer = typename Traits::node_pointer;
  using node_reference = typename Traits::node_reference;

  node_pointer NodePtr = nullptr;

public:
  /// Create from an ilist_node.
  explicit ilist_iterator(node_reference N) : NodePtr(&N) {}

  explicit ilist_iterator(pointer NP) : NodePtr(Access::getNodePtr(NP)) {}
  explicit ilist_iterator(reference NR) : NodePtr(Access::getNodePtr(&NR)) {}
  ilist_iterator() = default;

  // This is templated so that we can allow constructing a const iterator from
  // a nonconst iterator...
  template <bool RHSIsConst>
  ilist_iterator(const ilist_iterator<OptionsT, IsReverse, RHSIsConst> &RHS,
                 std::enable_if_t<IsConst || !RHSIsConst, void *> = nullptr)
      : NodePtr(RHS.NodePtr) {}

  // This is templated so that we can allow assigning to a const iterator from
  // a nonconst iterator...
  template <bool RHSIsConst>
  std::enable_if_t<IsConst || !RHSIsConst, ilist_iterator &>
  operator=(const ilist_iterator<OptionsT, IsReverse, RHSIsConst> &RHS) {
    NodePtr = RHS.NodePtr;
    return *this;
  }

  /// Explicit conversion between forward/reverse iterators.
  ///
  /// Translate between forward and reverse iterators without changing range
  /// boundaries.  The resulting iterator will dereference (and have a handle)
  /// to the previous node, which is somewhat unexpected; but converting the
  /// two endpoints in a range will give the same range in reverse.
  ///
  /// This matches std::reverse_iterator conversions.
  explicit ilist_iterator(
      const ilist_iterator<OptionsT, !IsReverse, IsConst> &RHS)
      : ilist_iterator(++RHS.getReverse()) {}

  /// Get a reverse iterator to the same node.
  ///
  /// Gives a reverse iterator that will dereference (and have a handle) to the
  /// same node.  Converting the endpoint iterators in a range will give a
  /// different range; for range operations, use the explicit conversions.
  ilist_iterator<OptionsT, !IsReverse, IsConst> getReverse() const {
    if (NodePtr)
      return ilist_iterator<OptionsT, !IsReverse, IsConst>(*NodePtr);
    return ilist_iterator<OptionsT, !IsReverse, IsConst>();
  }

  /// Const-cast.
  ilist_iterator<OptionsT, IsReverse, false> getNonConst() const {
    if (NodePtr)
      return ilist_iterator<OptionsT, IsReverse, false>(
          const_cast<typename ilist_iterator<OptionsT, IsReverse,
                                             false>::node_reference>(*NodePtr));
    return ilist_iterator<OptionsT, IsReverse, false>();
  }

  // Accessors...
  reference operator*() const {
    assert(!NodePtr->isKnownSentinel());
    return *Access::getValuePtr(NodePtr);
  }
  pointer operator->() const { return &operator*(); }

  // Comparison operators
  friend bool operator==(const ilist_iterator &LHS, const ilist_iterator &RHS) {
    return LHS.NodePtr == RHS.NodePtr;
  }
  friend bool operator!=(const ilist_iterator &LHS, const ilist_iterator &RHS) {
    return LHS.NodePtr != RHS.NodePtr;
  }

  // Increment and decrement operators...
  ilist_iterator &operator--() {
    NodePtr = IsReverse ? NodePtr->getNext() : NodePtr->getPrev();
    return *this;
  }
  ilist_iterator &operator++() {
    NodePtr = IsReverse ? NodePtr->getPrev() : NodePtr->getNext();
    return *this;
  }
  ilist_iterator operator--(int) {
    ilist_iterator tmp = *this;
    --*this;
    return tmp;
  }
  ilist_iterator operator++(int) {
    ilist_iterator tmp = *this;
    ++*this;
    return tmp;
  }

  /// Get the underlying ilist_node.
  node_pointer getNodePtr() const { return static_cast<node_pointer>(NodePtr); }

  /// Check for end.  Only valid if ilist_sentinel_tracking<true>.
  bool isEnd() const { return NodePtr ? NodePtr->isSentinel() : false; }
};

template <typename From> struct simplify_type;

/// Allow ilist_iterators to convert into pointers to a node automatically when
/// used by the dyn_cast, cast, isa mechanisms...
///
/// FIXME: remove this, since there is no implicit conversion to NodeTy.
template <class OptionsT, bool IsConst>
struct simplify_type<ilist_iterator<OptionsT, false, IsConst>> {
  using iterator = ilist_iterator<OptionsT, false, IsConst>;
  using SimpleType = typename iterator::pointer;

  static SimpleType getSimplifiedValue(const iterator &Node) { return &*Node; }
};
template <class OptionsT, bool IsConst>
struct simplify_type<const ilist_iterator<OptionsT, false, IsConst>>
    : simplify_type<ilist_iterator<OptionsT, false, IsConst>> {};

} // end namespace llvm

#endif // LLVM_ADT_ILIST_ITERATOR_H
PKiwFZUrH�H�
ADT/APFloat.hnu�[���//===- llvm/ADT/APFloat.h - Arbitrary Precision Floating Point ---*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file declares a class to represent arbitrary precision floating point
/// values and provide a variety of arithmetic operations on them.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_APFLOAT_H
#define LLVM_ADT_APFLOAT_H

#include "llvm/ADT/APInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/FloatingPointMode.h"
#include "llvm/Support/ErrorHandling.h"
#include <memory>

#define APFLOAT_DISPATCH_ON_SEMANTICS(METHOD_CALL)                             \
  do {                                                                         \
    if (usesLayout<IEEEFloat>(getSemantics()))                                 \
      return U.IEEE.METHOD_CALL;                                               \
    if (usesLayout<DoubleAPFloat>(getSemantics()))                             \
      return U.Double.METHOD_CALL;                                             \
    llvm_unreachable("Unexpected semantics");                                  \
  } while (false)

namespace llvm {

struct fltSemantics;
class APSInt;
class StringRef;
class APFloat;
class raw_ostream;

template <typename T> class Expected;
template <typename T> class SmallVectorImpl;

/// Enum that represents what fraction of the LSB truncated bits of an fp number
/// represent.
///
/// This essentially combines the roles of guard and sticky bits.
enum lostFraction { // Example of truncated bits:
  lfExactlyZero,    // 000000
  lfLessThanHalf,   // 0xxxxx  x's not all zero
  lfExactlyHalf,    // 100000
  lfMoreThanHalf    // 1xxxxx  x's not all zero
};

/// A self-contained host- and target-independent arbitrary-precision
/// floating-point software implementation.
///
/// APFloat uses bignum integer arithmetic as provided by static functions in
/// the APInt class.  The library will work with bignum integers whose parts are
/// any unsigned type at least 16 bits wide, but 64 bits is recommended.
///
/// Written for clarity rather than speed, in particular with a view to use in
/// the front-end of a cross compiler so that target arithmetic can be correctly
/// performed on the host.  Performance should nonetheless be reasonable,
/// particularly for its intended use.  It may be useful as a base
/// implementation for a run-time library during development of a faster
/// target-specific one.
///
/// All 5 rounding modes in the IEEE-754R draft are handled correctly for all
/// implemented operations.  Currently implemented operations are add, subtract,
/// multiply, divide, fused-multiply-add, conversion-to-float,
/// conversion-to-integer and conversion-from-integer.  New rounding modes
/// (e.g. away from zero) can be added with three or four lines of code.
///
/// Four formats are built-in: IEEE single precision, double precision,
/// quadruple precision, and x87 80-bit extended double (when operating with
/// full extended precision).  Adding a new format that obeys IEEE semantics
/// only requires adding two lines of code: a declaration and definition of the
/// format.
///
/// All operations return the status of that operation as an exception bit-mask,
/// so multiple operations can be done consecutively with their results or-ed
/// together.  The returned status can be useful for compiler diagnostics; e.g.,
/// inexact, underflow and overflow can be easily diagnosed on constant folding,
/// and compiler optimizers can determine what exceptions would be raised by
/// folding operations and optimize, or perhaps not optimize, accordingly.
///
/// At present, underflow tininess is detected after rounding; it should be
/// straight forward to add support for the before-rounding case too.
///
/// The library reads hexadecimal floating point numbers as per C99, and
/// correctly rounds if necessary according to the specified rounding mode.
/// Syntax is required to have been validated by the caller.  It also converts
/// floating point numbers to hexadecimal text as per the C99 %a and %A
/// conversions.  The output precision (or alternatively the natural minimal
/// precision) can be specified; if the requested precision is less than the
/// natural precision the output is correctly rounded for the specified rounding
/// mode.
///
/// It also reads decimal floating point numbers and correctly rounds according
/// to the specified rounding mode.
///
/// Conversion to decimal text is not currently implemented.
///
/// Non-zero finite numbers are represented internally as a sign bit, a 16-bit
/// signed exponent, and the significand as an array of integer parts.  After
/// normalization of a number of precision P the exponent is within the range of
/// the format, and if the number is not denormal the P-th bit of the
/// significand is set as an explicit integer bit.  For denormals the most
/// significant bit is shifted right so that the exponent is maintained at the
/// format's minimum, so that the smallest denormal has just the least
/// significant bit of the significand set.  The sign of zeroes and infinities
/// is significant; the exponent and significand of such numbers is not stored,
/// but has a known implicit (deterministic) value: 0 for the significands, 0
/// for zero exponent, all 1 bits for infinity exponent.  For NaNs the sign and
/// significand are deterministic, although not really meaningful, and preserved
/// in non-conversion operations.  The exponent is implicitly all 1 bits.
///
/// APFloat does not provide any exception handling beyond default exception
/// handling. We represent Signaling NaNs via IEEE-754R 2008 6.2.1 should clause
/// by encoding Signaling NaNs with the first bit of its trailing significand as
/// 0.
///
/// TODO
/// ====
///
/// Some features that may or may not be worth adding:
///
/// Binary to decimal conversion (hard).
///
/// Optional ability to detect underflow tininess before rounding.
///
/// New formats: x87 in single and double precision mode (IEEE apart from
/// extended exponent range) (hard).
///
/// New operations: sqrt, IEEE remainder, C90 fmod, nexttoward.
///

// This is the common type definitions shared by APFloat and its internal
// implementation classes. This struct should not define any non-static data
// members.
struct APFloatBase {
  typedef APInt::WordType integerPart;
  static constexpr unsigned integerPartWidth = APInt::APINT_BITS_PER_WORD;

  /// A signed type to represent a floating point numbers unbiased exponent.
  typedef int32_t ExponentType;

  /// \name Floating Point Semantics.
  /// @{
  enum Semantics {
    S_IEEEhalf,
    S_BFloat,
    S_IEEEsingle,
    S_IEEEdouble,
    S_IEEEquad,
    S_PPCDoubleDouble,
    // 8-bit floating point number following IEEE-754 conventions with bit
    // layout S1E5M2 as described in https://arxiv.org/abs/2209.05433.
    S_Float8E5M2,
    // 8-bit floating point number mostly following IEEE-754 conventions
    // and bit layout S1E5M2 described in https://arxiv.org/abs/2206.02915,
    // with expanded range and with no infinity or signed zero.
    // NaN is represented as negative zero. (FN -> Finite, UZ -> unsigned zero).
    // This format's exponent bias is 16, instead of the 15 (2 ** (5 - 1) - 1)
    // that IEEE precedent would imply.
    S_Float8E5M2FNUZ,
    // 8-bit floating point number mostly following IEEE-754 conventions with
    // bit layout S1E4M3 as described in https://arxiv.org/abs/2209.05433.
    // Unlike IEEE-754 types, there are no infinity values, and NaN is
    // represented with the exponent and mantissa bits set to all 1s.
    S_Float8E4M3FN,
    // 8-bit floating point number mostly following IEEE-754 conventions
    // and bit layout S1E4M3 described in https://arxiv.org/abs/2206.02915,
    // with expanded range and with no infinity or signed zero.
    // NaN is represented as negative zero. (FN -> Finite, UZ -> unsigned zero).
    // This format's exponent bias is 8, instead of the 7 (2 ** (4 - 1) - 1)
    // that IEEE precedent would imply.
    S_Float8E4M3FNUZ,
    // 8-bit floating point number mostly following IEEE-754 conventions
    // and bit layout S1E4M3 with expanded range and with no infinity or signed
    // zero.
    // NaN is represented as negative zero. (FN -> Finite, UZ -> unsigned zero).
    // This format's exponent bias is 11, instead of the 7 (2 ** (4 - 1) - 1)
    // that IEEE precedent would imply.
    S_Float8E4M3B11FNUZ,
    // Floating point number that occupies 32 bits or less of storage, providing
    // improved range compared to half (16-bit) formats, at (potentially)
    // greater throughput than single precision (32-bit) formats.
    S_FloatTF32,

    S_x87DoubleExtended,
    S_MaxSemantics = S_x87DoubleExtended,
  };

  static const llvm::fltSemantics &EnumToSemantics(Semantics S);
  static Semantics SemanticsToEnum(const llvm::fltSemantics &Sem);

  static const fltSemantics &IEEEhalf() LLVM_READNONE;
  static const fltSemantics &BFloat() LLVM_READNONE;
  static const fltSemantics &IEEEsingle() LLVM_READNONE;
  static const fltSemantics &IEEEdouble() LLVM_READNONE;
  static const fltSemantics &IEEEquad() LLVM_READNONE;
  static const fltSemantics &PPCDoubleDouble() LLVM_READNONE;
  static const fltSemantics &Float8E5M2() LLVM_READNONE;
  static const fltSemantics &Float8E5M2FNUZ() LLVM_READNONE;
  static const fltSemantics &Float8E4M3FN() LLVM_READNONE;
  static const fltSemantics &Float8E4M3FNUZ() LLVM_READNONE;
  static const fltSemantics &Float8E4M3B11FNUZ() LLVM_READNONE;
  static const fltSemantics &FloatTF32() LLVM_READNONE;
  static const fltSemantics &x87DoubleExtended() LLVM_READNONE;

  /// A Pseudo fltsemantic used to construct APFloats that cannot conflict with
  /// anything real.
  static const fltSemantics &Bogus() LLVM_READNONE;

  /// @}

  /// IEEE-754R 5.11: Floating Point Comparison Relations.
  enum cmpResult {
    cmpLessThan,
    cmpEqual,
    cmpGreaterThan,
    cmpUnordered
  };

  /// IEEE-754R 4.3: Rounding-direction attributes.
  using roundingMode = llvm::RoundingMode;

  static constexpr roundingMode rmNearestTiesToEven =
                                                RoundingMode::NearestTiesToEven;
  static constexpr roundingMode rmTowardPositive = RoundingMode::TowardPositive;
  static constexpr roundingMode rmTowardNegative = RoundingMode::TowardNegative;
  static constexpr roundingMode rmTowardZero     = RoundingMode::TowardZero;
  static constexpr roundingMode rmNearestTiesToAway =
                                                RoundingMode::NearestTiesToAway;

  /// IEEE-754R 7: Default exception handling.
  ///
  /// opUnderflow or opOverflow are always returned or-ed with opInexact.
  ///
  /// APFloat models this behavior specified by IEEE-754:
  ///   "For operations producing results in floating-point format, the default
  ///    result of an operation that signals the invalid operation exception
  ///    shall be a quiet NaN."
  enum opStatus {
    opOK = 0x00,
    opInvalidOp = 0x01,
    opDivByZero = 0x02,
    opOverflow = 0x04,
    opUnderflow = 0x08,
    opInexact = 0x10
  };

  /// Category of internally-represented number.
  enum fltCategory {
    fcInfinity,
    fcNaN,
    fcNormal,
    fcZero
  };

  /// Convenience enum used to construct an uninitialized APFloat.
  enum uninitializedTag {
    uninitialized
  };

  /// Enumeration of \c ilogb error results.
  enum IlogbErrorKinds {
    IEK_Zero = INT_MIN + 1,
    IEK_NaN = INT_MIN,
    IEK_Inf = INT_MAX
  };

  static unsigned int semanticsPrecision(const fltSemantics &);
  static ExponentType semanticsMinExponent(const fltSemantics &);
  static ExponentType semanticsMaxExponent(const fltSemantics &);
  static unsigned int semanticsSizeInBits(const fltSemantics &);
  static unsigned int semanticsIntSizeInBits(const fltSemantics&, bool);

  // Returns true if any number described by \p Src can be precisely represented
  // by a normal (not subnormal) value in \p Dst.
  static bool isRepresentableAsNormalIn(const fltSemantics &Src,
                                        const fltSemantics &Dst);

  /// Returns the size of the floating point number (in bits) in the given
  /// semantics.
  static unsigned getSizeInBits(const fltSemantics &Sem);
};

namespace detail {

class IEEEFloat final : public APFloatBase {
public:
  /// \name Constructors
  /// @{

  IEEEFloat(const fltSemantics &); // Default construct to +0.0
  IEEEFloat(const fltSemantics &, integerPart);
  IEEEFloat(const fltSemantics &, uninitializedTag);
  IEEEFloat(const fltSemantics &, const APInt &);
  explicit IEEEFloat(double d);
  explicit IEEEFloat(float f);
  IEEEFloat(const IEEEFloat &);
  IEEEFloat(IEEEFloat &&);
  ~IEEEFloat();

  /// @}

  /// Returns whether this instance allocated memory.
  bool needsCleanup() const { return partCount() > 1; }

  /// \name Convenience "constructors"
  /// @{

  /// @}

  /// \name Arithmetic
  /// @{

  opStatus add(const IEEEFloat &, roundingMode);
  opStatus subtract(const IEEEFloat &, roundingMode);
  opStatus multiply(const IEEEFloat &, roundingMode);
  opStatus divide(const IEEEFloat &, roundingMode);
  /// IEEE remainder.
  opStatus remainder(const IEEEFloat &);
  /// C fmod, or llvm frem.
  opStatus mod(const IEEEFloat &);
  opStatus fusedMultiplyAdd(const IEEEFloat &, const IEEEFloat &, roundingMode);
  opStatus roundToIntegral(roundingMode);
  /// IEEE-754R 5.3.1: nextUp/nextDown.
  opStatus next(bool nextDown);

  /// @}

  /// \name Sign operations.
  /// @{

  void changeSign();

  /// @}

  /// \name Conversions
  /// @{

  opStatus convert(const fltSemantics &, roundingMode, bool *);
  opStatus convertToInteger(MutableArrayRef<integerPart>, unsigned int, bool,
                            roundingMode, bool *) const;
  opStatus convertFromAPInt(const APInt &, bool, roundingMode);
  opStatus convertFromSignExtendedInteger(const integerPart *, unsigned int,
                                          bool, roundingMode);
  opStatus convertFromZeroExtendedInteger(const integerPart *, unsigned int,
                                          bool, roundingMode);
  Expected<opStatus> convertFromString(StringRef, roundingMode);
  APInt bitcastToAPInt() const;
  double convertToDouble() const;
  float convertToFloat() const;

  /// @}

  /// The definition of equality is not straightforward for floating point, so
  /// we won't use operator==.  Use one of the following, or write whatever it
  /// is you really mean.
  bool operator==(const IEEEFloat &) const = delete;

  /// IEEE comparison with another floating point number (NaNs compare
  /// unordered, 0==-0).
  cmpResult compare(const IEEEFloat &) const;

  /// Bitwise comparison for equality (QNaNs compare equal, 0!=-0).
  bool bitwiseIsEqual(const IEEEFloat &) const;

  /// Write out a hexadecimal representation of the floating point value to DST,
  /// which must be of sufficient size, in the C99 form [-]0xh.hhhhp[+-]d.
  /// Return the number of characters written, excluding the terminating NUL.
  unsigned int convertToHexString(char *dst, unsigned int hexDigits,
                                  bool upperCase, roundingMode) const;

  /// \name IEEE-754R 5.7.2 General operations.
  /// @{

  /// IEEE-754R isSignMinus: Returns true if and only if the current value is
  /// negative.
  ///
  /// This applies to zeros and NaNs as well.
  bool isNegative() const { return sign; }

  /// IEEE-754R isNormal: Returns true if and only if the current value is normal.
  ///
  /// This implies that the current value of the float is not zero, subnormal,
  /// infinite, or NaN following the definition of normality from IEEE-754R.
  bool isNormal() const { return !isDenormal() && isFiniteNonZero(); }

  /// Returns true if and only if the current value is zero, subnormal, or
  /// normal.
  ///
  /// This means that the value is not infinite or NaN.
  bool isFinite() const { return !isNaN() && !isInfinity(); }

  /// Returns true if and only if the float is plus or minus zero.
  bool isZero() const { return category == fcZero; }

  /// IEEE-754R isSubnormal(): Returns true if and only if the float is a
  /// denormal.
  bool isDenormal() const;

  /// IEEE-754R isInfinite(): Returns true if and only if the float is infinity.
  bool isInfinity() const { return category == fcInfinity; }

  /// Returns true if and only if the float is a quiet or signaling NaN.
  bool isNaN() const { return category == fcNaN; }

  /// Returns true if and only if the float is a signaling NaN.
  bool isSignaling() const;

  /// @}

  /// \name Simple Queries
  /// @{

  fltCategory getCategory() const { return category; }
  const fltSemantics &getSemantics() const { return *semantics; }
  bool isNonZero() const { return category != fcZero; }
  bool isFiniteNonZero() const { return isFinite() && !isZero(); }
  bool isPosZero() const { return isZero() && !isNegative(); }
  bool isNegZero() const { return isZero() && isNegative(); }

  /// Returns true if and only if the number has the smallest possible non-zero
  /// magnitude in the current semantics.
  bool isSmallest() const;

  /// Returns true if this is the smallest (by magnitude) normalized finite
  /// number in the given semantics.
  bool isSmallestNormalized() const;

  /// Returns true if and only if the number has the largest possible finite
  /// magnitude in the current semantics.
  bool isLargest() const;

  /// Returns true if and only if the number is an exact integer.
  bool isInteger() const;

  /// @}

  IEEEFloat &operator=(const IEEEFloat &);
  IEEEFloat &operator=(IEEEFloat &&);

  /// Overload to compute a hash code for an APFloat value.
  ///
  /// Note that the use of hash codes for floating point values is in general
  /// frought with peril. Equality is hard to define for these values. For
  /// example, should negative and positive zero hash to different codes? Are
  /// they equal or not? This hash value implementation specifically
  /// emphasizes producing different codes for different inputs in order to
  /// be used in canonicalization and memoization. As such, equality is
  /// bitwiseIsEqual, and 0 != -0.
  friend hash_code hash_value(const IEEEFloat &Arg);

  /// Converts this value into a decimal string.
  ///
  /// \param FormatPrecision The maximum number of digits of
  ///   precision to output.  If there are fewer digits available,
  ///   zero padding will not be used unless the value is
  ///   integral and small enough to be expressed in
  ///   FormatPrecision digits.  0 means to use the natural
  ///   precision of the number.
  /// \param FormatMaxPadding The maximum number of zeros to
  ///   consider inserting before falling back to scientific
  ///   notation.  0 means to always use scientific notation.
  ///
  /// \param TruncateZero Indicate whether to remove the trailing zero in
  ///   fraction part or not. Also setting this parameter to false forcing
  ///   producing of output more similar to default printf behavior.
  ///   Specifically the lower e is used as exponent delimiter and exponent
  ///   always contains no less than two digits.
  ///
  /// Number       Precision    MaxPadding      Result
  /// ------       ---------    ----------      ------
  /// 1.01E+4              5             2       10100
  /// 1.01E+4              4             2       1.01E+4
  /// 1.01E+4              5             1       1.01E+4
  /// 1.01E-2              5             2       0.0101
  /// 1.01E-2              4             2       0.0101
  /// 1.01E-2              4             1       1.01E-2
  void toString(SmallVectorImpl<char> &Str, unsigned FormatPrecision = 0,
                unsigned FormatMaxPadding = 3, bool TruncateZero = true) const;

  /// If this value has an exact multiplicative inverse, store it in inv and
  /// return true.
  bool getExactInverse(APFloat *inv) const;

  /// Returns the exponent of the internal representation of the APFloat.
  ///
  /// Because the radix of APFloat is 2, this is equivalent to floor(log2(x)).
  /// For special APFloat values, this returns special error codes:
  ///
  ///   NaN -> \c IEK_NaN
  ///   0   -> \c IEK_Zero
  ///   Inf -> \c IEK_Inf
  ///
  friend int ilogb(const IEEEFloat &Arg);

  /// Returns: X * 2^Exp for integral exponents.
  friend IEEEFloat scalbn(IEEEFloat X, int Exp, roundingMode);

  friend IEEEFloat frexp(const IEEEFloat &X, int &Exp, roundingMode);

  /// \name Special value setters.
  /// @{

  void makeLargest(bool Neg = false);
  void makeSmallest(bool Neg = false);
  void makeNaN(bool SNaN = false, bool Neg = false,
               const APInt *fill = nullptr);
  void makeInf(bool Neg = false);
  void makeZero(bool Neg = false);
  void makeQuiet();

  /// Returns the smallest (by magnitude) normalized finite number in the given
  /// semantics.
  ///
  /// \param Negative - True iff the number should be negative
  void makeSmallestNormalized(bool Negative = false);

  /// @}

  cmpResult compareAbsoluteValue(const IEEEFloat &) const;

private:
  /// \name Simple Queries
  /// @{

  integerPart *significandParts();
  const integerPart *significandParts() const;
  unsigned int partCount() const;

  /// @}

  /// \name Significand operations.
  /// @{

  integerPart addSignificand(const IEEEFloat &);
  integerPart subtractSignificand(const IEEEFloat &, integerPart);
  lostFraction addOrSubtractSignificand(const IEEEFloat &, bool subtract);
  lostFraction multiplySignificand(const IEEEFloat &, IEEEFloat);
  lostFraction multiplySignificand(const IEEEFloat&);
  lostFraction divideSignificand(const IEEEFloat &);
  void incrementSignificand();
  void initialize(const fltSemantics *);
  void shiftSignificandLeft(unsigned int);
  lostFraction shiftSignificandRight(unsigned int);
  unsigned int significandLSB() const;
  unsigned int significandMSB() const;
  void zeroSignificand();
  /// Return true if the significand excluding the integral bit is all ones.
  bool isSignificandAllOnes() const;
  bool isSignificandAllOnesExceptLSB() const;
  /// Return true if the significand excluding the integral bit is all zeros.
  bool isSignificandAllZeros() const;
  bool isSignificandAllZerosExceptMSB() const;

  /// @}

  /// \name Arithmetic on special values.
  /// @{

  opStatus addOrSubtractSpecials(const IEEEFloat &, bool subtract);
  opStatus divideSpecials(const IEEEFloat &);
  opStatus multiplySpecials(const IEEEFloat &);
  opStatus modSpecials(const IEEEFloat &);
  opStatus remainderSpecials(const IEEEFloat&);

  /// @}

  /// \name Miscellany
  /// @{

  bool convertFromStringSpecials(StringRef str);
  opStatus normalize(roundingMode, lostFraction);
  opStatus addOrSubtract(const IEEEFloat &, roundingMode, bool subtract);
  opStatus handleOverflow(roundingMode);
  bool roundAwayFromZero(roundingMode, lostFraction, unsigned int) const;
  opStatus convertToSignExtendedInteger(MutableArrayRef<integerPart>,
                                        unsigned int, bool, roundingMode,
                                        bool *) const;
  opStatus convertFromUnsignedParts(const integerPart *, unsigned int,
                                    roundingMode);
  Expected<opStatus> convertFromHexadecimalString(StringRef, roundingMode);
  Expected<opStatus> convertFromDecimalString(StringRef, roundingMode);
  char *convertNormalToHexString(char *, unsigned int, bool,
                                 roundingMode) const;
  opStatus roundSignificandWithExponent(const integerPart *, unsigned int, int,
                                        roundingMode);
  ExponentType exponentNaN() const;
  ExponentType exponentInf() const;
  ExponentType exponentZero() const;

  /// @}

  template <const fltSemantics &S> APInt convertIEEEFloatToAPInt() const;
  APInt convertHalfAPFloatToAPInt() const;
  APInt convertBFloatAPFloatToAPInt() const;
  APInt convertFloatAPFloatToAPInt() const;
  APInt convertDoubleAPFloatToAPInt() const;
  APInt convertQuadrupleAPFloatToAPInt() const;
  APInt convertF80LongDoubleAPFloatToAPInt() const;
  APInt convertPPCDoubleDoubleAPFloatToAPInt() const;
  APInt convertFloat8E5M2APFloatToAPInt() const;
  APInt convertFloat8E5M2FNUZAPFloatToAPInt() const;
  APInt convertFloat8E4M3FNAPFloatToAPInt() const;
  APInt convertFloat8E4M3FNUZAPFloatToAPInt() const;
  APInt convertFloat8E4M3B11FNUZAPFloatToAPInt() const;
  APInt convertFloatTF32APFloatToAPInt() const;
  void initFromAPInt(const fltSemantics *Sem, const APInt &api);
  template <const fltSemantics &S> void initFromIEEEAPInt(const APInt &api);
  void initFromHalfAPInt(const APInt &api);
  void initFromBFloatAPInt(const APInt &api);
  void initFromFloatAPInt(const APInt &api);
  void initFromDoubleAPInt(const APInt &api);
  void initFromQuadrupleAPInt(const APInt &api);
  void initFromF80LongDoubleAPInt(const APInt &api);
  void initFromPPCDoubleDoubleAPInt(const APInt &api);
  void initFromFloat8E5M2APInt(const APInt &api);
  void initFromFloat8E5M2FNUZAPInt(const APInt &api);
  void initFromFloat8E4M3FNAPInt(const APInt &api);
  void initFromFloat8E4M3FNUZAPInt(const APInt &api);
  void initFromFloat8E4M3B11FNUZAPInt(const APInt &api);
  void initFromFloatTF32APInt(const APInt &api);

  void assign(const IEEEFloat &);
  void copySignificand(const IEEEFloat &);
  void freeSignificand();

  /// Note: this must be the first data member.
  /// The semantics that this value obeys.
  const fltSemantics *semantics;

  /// A binary fraction with an explicit integer bit.
  ///
  /// The significand must be at least one bit wider than the target precision.
  union Significand {
    integerPart part;
    integerPart *parts;
  } significand;

  /// The signed unbiased exponent of the value.
  ExponentType exponent;

  /// What kind of floating point number this is.
  ///
  /// Only 2 bits are required, but VisualStudio incorrectly sign extends it.
  /// Using the extra bit keeps it from failing under VisualStudio.
  fltCategory category : 3;

  /// Sign bit of the number.
  unsigned int sign : 1;
};

hash_code hash_value(const IEEEFloat &Arg);
int ilogb(const IEEEFloat &Arg);
IEEEFloat scalbn(IEEEFloat X, int Exp, IEEEFloat::roundingMode);
IEEEFloat frexp(const IEEEFloat &Val, int &Exp, IEEEFloat::roundingMode RM);

// This mode implements more precise float in terms of two APFloats.
// The interface and layout is designed for arbitrary underlying semantics,
// though currently only PPCDoubleDouble semantics are supported, whose
// corresponding underlying semantics are IEEEdouble.
class DoubleAPFloat final : public APFloatBase {
  // Note: this must be the first data member.
  const fltSemantics *Semantics;
  std::unique_ptr<APFloat[]> Floats;

  opStatus addImpl(const APFloat &a, const APFloat &aa, const APFloat &c,
                   const APFloat &cc, roundingMode RM);

  opStatus addWithSpecial(const DoubleAPFloat &LHS, const DoubleAPFloat &RHS,
                          DoubleAPFloat &Out, roundingMode RM);

public:
  DoubleAPFloat(const fltSemantics &S);
  DoubleAPFloat(const fltSemantics &S, uninitializedTag);
  DoubleAPFloat(const fltSemantics &S, integerPart);
  DoubleAPFloat(const fltSemantics &S, const APInt &I);
  DoubleAPFloat(const fltSemantics &S, APFloat &&First, APFloat &&Second);
  DoubleAPFloat(const DoubleAPFloat &RHS);
  DoubleAPFloat(DoubleAPFloat &&RHS);

  DoubleAPFloat &operator=(const DoubleAPFloat &RHS);
  inline DoubleAPFloat &operator=(DoubleAPFloat &&RHS);

  bool needsCleanup() const { return Floats != nullptr; }

  inline APFloat &getFirst();
  inline const APFloat &getFirst() const;
  inline APFloat &getSecond();
  inline const APFloat &getSecond() const;

  opStatus add(const DoubleAPFloat &RHS, roundingMode RM);
  opStatus subtract(const DoubleAPFloat &RHS, roundingMode RM);
  opStatus multiply(const DoubleAPFloat &RHS, roundingMode RM);
  opStatus divide(const DoubleAPFloat &RHS, roundingMode RM);
  opStatus remainder(const DoubleAPFloat &RHS);
  opStatus mod(const DoubleAPFloat &RHS);
  opStatus fusedMultiplyAdd(const DoubleAPFloat &Multiplicand,
                            const DoubleAPFloat &Addend, roundingMode RM);
  opStatus roundToIntegral(roundingMode RM);
  void changeSign();
  cmpResult compareAbsoluteValue(const DoubleAPFloat &RHS) const;

  fltCategory getCategory() const;
  bool isNegative() const;

  void makeInf(bool Neg);
  void makeZero(bool Neg);
  void makeLargest(bool Neg);
  void makeSmallest(bool Neg);
  void makeSmallestNormalized(bool Neg);
  void makeNaN(bool SNaN, bool Neg, const APInt *fill);

  cmpResult compare(const DoubleAPFloat &RHS) const;
  bool bitwiseIsEqual(const DoubleAPFloat &RHS) const;
  APInt bitcastToAPInt() const;
  Expected<opStatus> convertFromString(StringRef, roundingMode);
  opStatus next(bool nextDown);

  opStatus convertToInteger(MutableArrayRef<integerPart> Input,
                            unsigned int Width, bool IsSigned, roundingMode RM,
                            bool *IsExact) const;
  opStatus convertFromAPInt(const APInt &Input, bool IsSigned, roundingMode RM);
  opStatus convertFromSignExtendedInteger(const integerPart *Input,
                                          unsigned int InputSize, bool IsSigned,
                                          roundingMode RM);
  opStatus convertFromZeroExtendedInteger(const integerPart *Input,
                                          unsigned int InputSize, bool IsSigned,
                                          roundingMode RM);
  unsigned int convertToHexString(char *DST, unsigned int HexDigits,
                                  bool UpperCase, roundingMode RM) const;

  bool isDenormal() const;
  bool isSmallest() const;
  bool isSmallestNormalized() const;
  bool isLargest() const;
  bool isInteger() const;

  void toString(SmallVectorImpl<char> &Str, unsigned FormatPrecision,
                unsigned FormatMaxPadding, bool TruncateZero = true) const;

  bool getExactInverse(APFloat *inv) const;

  friend DoubleAPFloat scalbn(const DoubleAPFloat &X, int Exp, roundingMode);
  friend DoubleAPFloat frexp(const DoubleAPFloat &X, int &Exp, roundingMode);
  friend hash_code hash_value(const DoubleAPFloat &Arg);
};

hash_code hash_value(const DoubleAPFloat &Arg);

} // End detail namespace

// This is a interface class that is currently forwarding functionalities from
// detail::IEEEFloat.
class APFloat : public APFloatBase {
  typedef detail::IEEEFloat IEEEFloat;
  typedef detail::DoubleAPFloat DoubleAPFloat;

  static_assert(std::is_standard_layout<IEEEFloat>::value);

  union Storage {
    const fltSemantics *semantics;
    IEEEFloat IEEE;
    DoubleAPFloat Double;

    explicit Storage(IEEEFloat F, const fltSemantics &S);
    explicit Storage(DoubleAPFloat F, const fltSemantics &S)
        : Double(std::move(F)) {
      assert(&S == &PPCDoubleDouble());
    }

    template <typename... ArgTypes>
    Storage(const fltSemantics &Semantics, ArgTypes &&... Args) {
      if (usesLayout<IEEEFloat>(Semantics)) {
        new (&IEEE) IEEEFloat(Semantics, std::forward<ArgTypes>(Args)...);
        return;
      }
      if (usesLayout<DoubleAPFloat>(Semantics)) {
        new (&Double) DoubleAPFloat(Semantics, std::forward<ArgTypes>(Args)...);
        return;
      }
      llvm_unreachable("Unexpected semantics");
    }

    ~Storage() {
      if (usesLayout<IEEEFloat>(*semantics)) {
        IEEE.~IEEEFloat();
        return;
      }
      if (usesLayout<DoubleAPFloat>(*semantics)) {
        Double.~DoubleAPFloat();
        return;
      }
      llvm_unreachable("Unexpected semantics");
    }

    Storage(const Storage &RHS) {
      if (usesLayout<IEEEFloat>(*RHS.semantics)) {
        new (this) IEEEFloat(RHS.IEEE);
        return;
      }
      if (usesLayout<DoubleAPFloat>(*RHS.semantics)) {
        new (this) DoubleAPFloat(RHS.Double);
        return;
      }
      llvm_unreachable("Unexpected semantics");
    }

    Storage(Storage &&RHS) {
      if (usesLayout<IEEEFloat>(*RHS.semantics)) {
        new (this) IEEEFloat(std::move(RHS.IEEE));
        return;
      }
      if (usesLayout<DoubleAPFloat>(*RHS.semantics)) {
        new (this) DoubleAPFloat(std::move(RHS.Double));
        return;
      }
      llvm_unreachable("Unexpected semantics");
    }

    Storage &operator=(const Storage &RHS) {
      if (usesLayout<IEEEFloat>(*semantics) &&
          usesLayout<IEEEFloat>(*RHS.semantics)) {
        IEEE = RHS.IEEE;
      } else if (usesLayout<DoubleAPFloat>(*semantics) &&
                 usesLayout<DoubleAPFloat>(*RHS.semantics)) {
        Double = RHS.Double;
      } else if (this != &RHS) {
        this->~Storage();
        new (this) Storage(RHS);
      }
      return *this;
    }

    Storage &operator=(Storage &&RHS) {
      if (usesLayout<IEEEFloat>(*semantics) &&
          usesLayout<IEEEFloat>(*RHS.semantics)) {
        IEEE = std::move(RHS.IEEE);
      } else if (usesLayout<DoubleAPFloat>(*semantics) &&
                 usesLayout<DoubleAPFloat>(*RHS.semantics)) {
        Double = std::move(RHS.Double);
      } else if (this != &RHS) {
        this->~Storage();
        new (this) Storage(std::move(RHS));
      }
      return *this;
    }
  } U;

  template <typename T> static bool usesLayout(const fltSemantics &Semantics) {
    static_assert(std::is_same<T, IEEEFloat>::value ||
                  std::is_same<T, DoubleAPFloat>::value);
    if (std::is_same<T, DoubleAPFloat>::value) {
      return &Semantics == &PPCDoubleDouble();
    }
    return &Semantics != &PPCDoubleDouble();
  }

  IEEEFloat &getIEEE() {
    if (usesLayout<IEEEFloat>(*U.semantics))
      return U.IEEE;
    if (usesLayout<DoubleAPFloat>(*U.semantics))
      return U.Double.getFirst().U.IEEE;
    llvm_unreachable("Unexpected semantics");
  }

  const IEEEFloat &getIEEE() const {
    if (usesLayout<IEEEFloat>(*U.semantics))
      return U.IEEE;
    if (usesLayout<DoubleAPFloat>(*U.semantics))
      return U.Double.getFirst().U.IEEE;
    llvm_unreachable("Unexpected semantics");
  }

  void makeZero(bool Neg) { APFLOAT_DISPATCH_ON_SEMANTICS(makeZero(Neg)); }

  void makeInf(bool Neg) { APFLOAT_DISPATCH_ON_SEMANTICS(makeInf(Neg)); }

  void makeNaN(bool SNaN, bool Neg, const APInt *fill) {
    APFLOAT_DISPATCH_ON_SEMANTICS(makeNaN(SNaN, Neg, fill));
  }

  void makeLargest(bool Neg) {
    APFLOAT_DISPATCH_ON_SEMANTICS(makeLargest(Neg));
  }

  void makeSmallest(bool Neg) {
    APFLOAT_DISPATCH_ON_SEMANTICS(makeSmallest(Neg));
  }

  void makeSmallestNormalized(bool Neg) {
    APFLOAT_DISPATCH_ON_SEMANTICS(makeSmallestNormalized(Neg));
  }

  explicit APFloat(IEEEFloat F, const fltSemantics &S) : U(std::move(F), S) {}
  explicit APFloat(DoubleAPFloat F, const fltSemantics &S)
      : U(std::move(F), S) {}

  cmpResult compareAbsoluteValue(const APFloat &RHS) const {
    assert(&getSemantics() == &RHS.getSemantics() &&
           "Should only compare APFloats with the same semantics");
    if (usesLayout<IEEEFloat>(getSemantics()))
      return U.IEEE.compareAbsoluteValue(RHS.U.IEEE);
    if (usesLayout<DoubleAPFloat>(getSemantics()))
      return U.Double.compareAbsoluteValue(RHS.U.Double);
    llvm_unreachable("Unexpected semantics");
  }

public:
  APFloat(const fltSemantics &Semantics) : U(Semantics) {}
  APFloat(const fltSemantics &Semantics, StringRef S);
  APFloat(const fltSemantics &Semantics, integerPart I) : U(Semantics, I) {}
  template <typename T,
            typename = std::enable_if_t<std::is_floating_point<T>::value>>
  APFloat(const fltSemantics &Semantics, T V) = delete;
  // TODO: Remove this constructor. This isn't faster than the first one.
  APFloat(const fltSemantics &Semantics, uninitializedTag)
      : U(Semantics, uninitialized) {}
  APFloat(const fltSemantics &Semantics, const APInt &I) : U(Semantics, I) {}
  explicit APFloat(double d) : U(IEEEFloat(d), IEEEdouble()) {}
  explicit APFloat(float f) : U(IEEEFloat(f), IEEEsingle()) {}
  APFloat(const APFloat &RHS) = default;
  APFloat(APFloat &&RHS) = default;

  ~APFloat() = default;

  bool needsCleanup() const { APFLOAT_DISPATCH_ON_SEMANTICS(needsCleanup()); }

  /// Factory for Positive and Negative Zero.
  ///
  /// \param Negative True iff the number should be negative.
  static APFloat getZero(const fltSemantics &Sem, bool Negative = false) {
    APFloat Val(Sem, uninitialized);
    Val.makeZero(Negative);
    return Val;
  }

  /// Factory for Positive and Negative Infinity.
  ///
  /// \param Negative True iff the number should be negative.
  static APFloat getInf(const fltSemantics &Sem, bool Negative = false) {
    APFloat Val(Sem, uninitialized);
    Val.makeInf(Negative);
    return Val;
  }

  /// Factory for NaN values.
  ///
  /// \param Negative - True iff the NaN generated should be negative.
  /// \param payload - The unspecified fill bits for creating the NaN, 0 by
  /// default.  The value is truncated as necessary.
  static APFloat getNaN(const fltSemantics &Sem, bool Negative = false,
                        uint64_t payload = 0) {
    if (payload) {
      APInt intPayload(64, payload);
      return getQNaN(Sem, Negative, &intPayload);
    } else {
      return getQNaN(Sem, Negative, nullptr);
    }
  }

  /// Factory for QNaN values.
  static APFloat getQNaN(const fltSemantics &Sem, bool Negative = false,
                         const APInt *payload = nullptr) {
    APFloat Val(Sem, uninitialized);
    Val.makeNaN(false, Negative, payload);
    return Val;
  }

  /// Factory for SNaN values.
  static APFloat getSNaN(const fltSemantics &Sem, bool Negative = false,
                         const APInt *payload = nullptr) {
    APFloat Val(Sem, uninitialized);
    Val.makeNaN(true, Negative, payload);
    return Val;
  }

  /// Returns the largest finite number in the given semantics.
  ///
  /// \param Negative - True iff the number should be negative
  static APFloat getLargest(const fltSemantics &Sem, bool Negative = false) {
    APFloat Val(Sem, uninitialized);
    Val.makeLargest(Negative);
    return Val;
  }

  /// Returns the smallest (by magnitude) finite number in the given semantics.
  /// Might be denormalized, which implies a relative loss of precision.
  ///
  /// \param Negative - True iff the number should be negative
  static APFloat getSmallest(const fltSemantics &Sem, bool Negative = false) {
    APFloat Val(Sem, uninitialized);
    Val.makeSmallest(Negative);
    return Val;
  }

  /// Returns the smallest (by magnitude) normalized finite number in the given
  /// semantics.
  ///
  /// \param Negative - True iff the number should be negative
  static APFloat getSmallestNormalized(const fltSemantics &Sem,
                                       bool Negative = false) {
    APFloat Val(Sem, uninitialized);
    Val.makeSmallestNormalized(Negative);
    return Val;
  }

  /// Returns a float which is bitcasted from an all one value int.
  ///
  /// \param Semantics - type float semantics
  static APFloat getAllOnesValue(const fltSemantics &Semantics);

  /// Used to insert APFloat objects, or objects that contain APFloat objects,
  /// into FoldingSets.
  void Profile(FoldingSetNodeID &NID) const;

  opStatus add(const APFloat &RHS, roundingMode RM) {
    assert(&getSemantics() == &RHS.getSemantics() &&
           "Should only call on two APFloats with the same semantics");
    if (usesLayout<IEEEFloat>(getSemantics()))
      return U.IEEE.add(RHS.U.IEEE, RM);
    if (usesLayout<DoubleAPFloat>(getSemantics()))
      return U.Double.add(RHS.U.Double, RM);
    llvm_unreachable("Unexpected semantics");
  }
  opStatus subtract(const APFloat &RHS, roundingMode RM) {
    assert(&getSemantics() == &RHS.getSemantics() &&
           "Should only call on two APFloats with the same semantics");
    if (usesLayout<IEEEFloat>(getSemantics()))
      return U.IEEE.subtract(RHS.U.IEEE, RM);
    if (usesLayout<DoubleAPFloat>(getSemantics()))
      return U.Double.subtract(RHS.U.Double, RM);
    llvm_unreachable("Unexpected semantics");
  }
  opStatus multiply(const APFloat &RHS, roundingMode RM) {
    assert(&getSemantics() == &RHS.getSemantics() &&
           "Should only call on two APFloats with the same semantics");
    if (usesLayout<IEEEFloat>(getSemantics()))
      return U.IEEE.multiply(RHS.U.IEEE, RM);
    if (usesLayout<DoubleAPFloat>(getSemantics()))
      return U.Double.multiply(RHS.U.Double, RM);
    llvm_unreachable("Unexpected semantics");
  }
  opStatus divide(const APFloat &RHS, roundingMode RM) {
    assert(&getSemantics() == &RHS.getSemantics() &&
           "Should only call on two APFloats with the same semantics");
    if (usesLayout<IEEEFloat>(getSemantics()))
      return U.IEEE.divide(RHS.U.IEEE, RM);
    if (usesLayout<DoubleAPFloat>(getSemantics()))
      return U.Double.divide(RHS.U.Double, RM);
    llvm_unreachable("Unexpected semantics");
  }
  opStatus remainder(const APFloat &RHS) {
    assert(&getSemantics() == &RHS.getSemantics() &&
           "Should only call on two APFloats with the same semantics");
    if (usesLayout<IEEEFloat>(getSemantics()))
      return U.IEEE.remainder(RHS.U.IEEE);
    if (usesLayout<DoubleAPFloat>(getSemantics()))
      return U.Double.remainder(RHS.U.Double);
    llvm_unreachable("Unexpected semantics");
  }
  opStatus mod(const APFloat &RHS) {
    assert(&getSemantics() == &RHS.getSemantics() &&
           "Should only call on two APFloats with the same semantics");
    if (usesLayout<IEEEFloat>(getSemantics()))
      return U.IEEE.mod(RHS.U.IEEE);
    if (usesLayout<DoubleAPFloat>(getSemantics()))
      return U.Double.mod(RHS.U.Double);
    llvm_unreachable("Unexpected semantics");
  }
  opStatus fusedMultiplyAdd(const APFloat &Multiplicand, const APFloat &Addend,
                            roundingMode RM) {
    assert(&getSemantics() == &Multiplicand.getSemantics() &&
           "Should only call on APFloats with the same semantics");
    assert(&getSemantics() == &Addend.getSemantics() &&
           "Should only call on APFloats with the same semantics");
    if (usesLayout<IEEEFloat>(getSemantics()))
      return U.IEEE.fusedMultiplyAdd(Multiplicand.U.IEEE, Addend.U.IEEE, RM);
    if (usesLayout<DoubleAPFloat>(getSemantics()))
      return U.Double.fusedMultiplyAdd(Multiplicand.U.Double, Addend.U.Double,
                                       RM);
    llvm_unreachable("Unexpected semantics");
  }
  opStatus roundToIntegral(roundingMode RM) {
    APFLOAT_DISPATCH_ON_SEMANTICS(roundToIntegral(RM));
  }

  // TODO: bool parameters are not readable and a source of bugs.
  // Do something.
  opStatus next(bool nextDown) {
    APFLOAT_DISPATCH_ON_SEMANTICS(next(nextDown));
  }

  /// Negate an APFloat.
  APFloat operator-() const {
    APFloat Result(*this);
    Result.changeSign();
    return Result;
  }

  /// Add two APFloats, rounding ties to the nearest even.
  /// No error checking.
  APFloat operator+(const APFloat &RHS) const {
    APFloat Result(*this);
    (void)Result.add(RHS, rmNearestTiesToEven);
    return Result;
  }

  /// Subtract two APFloats, rounding ties to the nearest even.
  /// No error checking.
  APFloat operator-(const APFloat &RHS) const {
    APFloat Result(*this);
    (void)Result.subtract(RHS, rmNearestTiesToEven);
    return Result;
  }

  /// Multiply two APFloats, rounding ties to the nearest even.
  /// No error checking.
  APFloat operator*(const APFloat &RHS) const {
    APFloat Result(*this);
    (void)Result.multiply(RHS, rmNearestTiesToEven);
    return Result;
  }

  /// Divide the first APFloat by the second, rounding ties to the nearest even.
  /// No error checking.
  APFloat operator/(const APFloat &RHS) const {
    APFloat Result(*this);
    (void)Result.divide(RHS, rmNearestTiesToEven);
    return Result;
  }

  void changeSign() { APFLOAT_DISPATCH_ON_SEMANTICS(changeSign()); }
  void clearSign() {
    if (isNegative())
      changeSign();
  }
  void copySign(const APFloat &RHS) {
    if (isNegative() != RHS.isNegative())
      changeSign();
  }

  /// A static helper to produce a copy of an APFloat value with its sign
  /// copied from some other APFloat.
  static APFloat copySign(APFloat Value, const APFloat &Sign) {
    Value.copySign(Sign);
    return Value;
  }

  /// Assuming this is an IEEE-754 NaN value, quiet its signaling bit.
  /// This preserves the sign and payload bits.
  APFloat makeQuiet() const {
    APFloat Result(*this);
    Result.getIEEE().makeQuiet();
    return Result;
  }

  opStatus convert(const fltSemantics &ToSemantics, roundingMode RM,
                   bool *losesInfo);
  opStatus convertToInteger(MutableArrayRef<integerPart> Input,
                            unsigned int Width, bool IsSigned, roundingMode RM,
                            bool *IsExact) const {
    APFLOAT_DISPATCH_ON_SEMANTICS(
        convertToInteger(Input, Width, IsSigned, RM, IsExact));
  }
  opStatus convertToInteger(APSInt &Result, roundingMode RM,
                            bool *IsExact) const;
  opStatus convertFromAPInt(const APInt &Input, bool IsSigned,
                            roundingMode RM) {
    APFLOAT_DISPATCH_ON_SEMANTICS(convertFromAPInt(Input, IsSigned, RM));
  }
  opStatus convertFromSignExtendedInteger(const integerPart *Input,
                                          unsigned int InputSize, bool IsSigned,
                                          roundingMode RM) {
    APFLOAT_DISPATCH_ON_SEMANTICS(
        convertFromSignExtendedInteger(Input, InputSize, IsSigned, RM));
  }
  opStatus convertFromZeroExtendedInteger(const integerPart *Input,
                                          unsigned int InputSize, bool IsSigned,
                                          roundingMode RM) {
    APFLOAT_DISPATCH_ON_SEMANTICS(
        convertFromZeroExtendedInteger(Input, InputSize, IsSigned, RM));
  }
  Expected<opStatus> convertFromString(StringRef, roundingMode);
  APInt bitcastToAPInt() const {
    APFLOAT_DISPATCH_ON_SEMANTICS(bitcastToAPInt());
  }

  /// Converts this APFloat to host double value.
  ///
  /// \pre The APFloat must be built using semantics, that can be represented by
  /// the host double type without loss of precision. It can be IEEEdouble and
  /// shorter semantics, like IEEEsingle and others.
  double convertToDouble() const;

  /// Converts this APFloat to host float value.
  ///
  /// \pre The APFloat must be built using semantics, that can be represented by
  /// the host float type without loss of precision. It can be IEEEsingle and
  /// shorter semantics, like IEEEhalf.
  float convertToFloat() const;

  bool operator==(const APFloat &RHS) const { return compare(RHS) == cmpEqual; }

  bool operator!=(const APFloat &RHS) const { return compare(RHS) != cmpEqual; }

  bool operator<(const APFloat &RHS) const {
    return compare(RHS) == cmpLessThan;
  }

  bool operator>(const APFloat &RHS) const {
    return compare(RHS) == cmpGreaterThan;
  }

  bool operator<=(const APFloat &RHS) const {
    cmpResult Res = compare(RHS);
    return Res == cmpLessThan || Res == cmpEqual;
  }

  bool operator>=(const APFloat &RHS) const {
    cmpResult Res = compare(RHS);
    return Res == cmpGreaterThan || Res == cmpEqual;
  }

  cmpResult compare(const APFloat &RHS) const {
    assert(&getSemantics() == &RHS.getSemantics() &&
           "Should only compare APFloats with the same semantics");
    if (usesLayout<IEEEFloat>(getSemantics()))
      return U.IEEE.compare(RHS.U.IEEE);
    if (usesLayout<DoubleAPFloat>(getSemantics()))
      return U.Double.compare(RHS.U.Double);
    llvm_unreachable("Unexpected semantics");
  }

  bool bitwiseIsEqual(const APFloat &RHS) const {
    if (&getSemantics() != &RHS.getSemantics())
      return false;
    if (usesLayout<IEEEFloat>(getSemantics()))
      return U.IEEE.bitwiseIsEqual(RHS.U.IEEE);
    if (usesLayout<DoubleAPFloat>(getSemantics()))
      return U.Double.bitwiseIsEqual(RHS.U.Double);
    llvm_unreachable("Unexpected semantics");
  }

  /// We don't rely on operator== working on double values, as
  /// it returns true for things that are clearly not equal, like -0.0 and 0.0.
  /// As such, this method can be used to do an exact bit-for-bit comparison of
  /// two floating point values.
  ///
  /// We leave the version with the double argument here because it's just so
  /// convenient to write "2.0" and the like.  Without this function we'd
  /// have to duplicate its logic everywhere it's called.
  bool isExactlyValue(double V) const {
    bool ignored;
    APFloat Tmp(V);
    Tmp.convert(getSemantics(), APFloat::rmNearestTiesToEven, &ignored);
    return bitwiseIsEqual(Tmp);
  }

  unsigned int convertToHexString(char *DST, unsigned int HexDigits,
                                  bool UpperCase, roundingMode RM) const {
    APFLOAT_DISPATCH_ON_SEMANTICS(
        convertToHexString(DST, HexDigits, UpperCase, RM));
  }

  bool isZero() const { return getCategory() == fcZero; }
  bool isInfinity() const { return getCategory() == fcInfinity; }
  bool isNaN() const { return getCategory() == fcNaN; }

  bool isNegative() const { return getIEEE().isNegative(); }
  bool isDenormal() const { APFLOAT_DISPATCH_ON_SEMANTICS(isDenormal()); }
  bool isSignaling() const { return getIEEE().isSignaling(); }

  bool isNormal() const { return !isDenormal() && isFiniteNonZero(); }
  bool isFinite() const { return !isNaN() && !isInfinity(); }

  fltCategory getCategory() const { return getIEEE().getCategory(); }
  const fltSemantics &getSemantics() const { return *U.semantics; }
  bool isNonZero() const { return !isZero(); }
  bool isFiniteNonZero() const { return isFinite() && !isZero(); }
  bool isPosZero() const { return isZero() && !isNegative(); }
  bool isNegZero() const { return isZero() && isNegative(); }
  bool isPosInfinity() const { return isInfinity() && !isNegative(); }
  bool isNegInfinity() const { return isInfinity() && isNegative(); }
  bool isSmallest() const { APFLOAT_DISPATCH_ON_SEMANTICS(isSmallest()); }
  bool isLargest() const { APFLOAT_DISPATCH_ON_SEMANTICS(isLargest()); }
  bool isInteger() const { APFLOAT_DISPATCH_ON_SEMANTICS(isInteger()); }
  bool isIEEE() const { return usesLayout<IEEEFloat>(getSemantics()); }

  bool isSmallestNormalized() const {
    APFLOAT_DISPATCH_ON_SEMANTICS(isSmallestNormalized());
  }

  /// Return the FPClassTest which will return true for the value.
  FPClassTest classify() const;

  APFloat &operator=(const APFloat &RHS) = default;
  APFloat &operator=(APFloat &&RHS) = default;

  void toString(SmallVectorImpl<char> &Str, unsigned FormatPrecision = 0,
                unsigned FormatMaxPadding = 3, bool TruncateZero = true) const {
    APFLOAT_DISPATCH_ON_SEMANTICS(
        toString(Str, FormatPrecision, FormatMaxPadding, TruncateZero));
  }

  void print(raw_ostream &) const;
  void dump() const;

  bool getExactInverse(APFloat *inv) const {
    APFLOAT_DISPATCH_ON_SEMANTICS(getExactInverse(inv));
  }

  friend hash_code hash_value(const APFloat &Arg);
  friend int ilogb(const APFloat &Arg) { return ilogb(Arg.getIEEE()); }
  friend APFloat scalbn(APFloat X, int Exp, roundingMode RM);
  friend APFloat frexp(const APFloat &X, int &Exp, roundingMode RM);
  friend IEEEFloat;
  friend DoubleAPFloat;
};

/// See friend declarations above.
///
/// These additional declarations are required in order to compile LLVM with IBM
/// xlC compiler.
hash_code hash_value(const APFloat &Arg);
inline APFloat scalbn(APFloat X, int Exp, APFloat::roundingMode RM) {
  if (APFloat::usesLayout<detail::IEEEFloat>(X.getSemantics()))
    return APFloat(scalbn(X.U.IEEE, Exp, RM), X.getSemantics());
  if (APFloat::usesLayout<detail::DoubleAPFloat>(X.getSemantics()))
    return APFloat(scalbn(X.U.Double, Exp, RM), X.getSemantics());
  llvm_unreachable("Unexpected semantics");
}

/// Equivalent of C standard library function.
///
/// While the C standard says Exp is an unspecified value for infinity and nan,
/// this returns INT_MAX for infinities, and INT_MIN for NaNs.
inline APFloat frexp(const APFloat &X, int &Exp, APFloat::roundingMode RM) {
  if (APFloat::usesLayout<detail::IEEEFloat>(X.getSemantics()))
    return APFloat(frexp(X.U.IEEE, Exp, RM), X.getSemantics());
  if (APFloat::usesLayout<detail::DoubleAPFloat>(X.getSemantics()))
    return APFloat(frexp(X.U.Double, Exp, RM), X.getSemantics());
  llvm_unreachable("Unexpected semantics");
}
/// Returns the absolute value of the argument.
inline APFloat abs(APFloat X) {
  X.clearSign();
  return X;
}

/// Returns the negated value of the argument.
inline APFloat neg(APFloat X) {
  X.changeSign();
  return X;
}

/// Implements IEEE minNum semantics. Returns the smaller of the 2 arguments if
/// both are not NaN. If either argument is a NaN, returns the other argument.
LLVM_READONLY
inline APFloat minnum(const APFloat &A, const APFloat &B) {
  if (A.isNaN())
    return B;
  if (B.isNaN())
    return A;
  return B < A ? B : A;
}

/// Implements IEEE maxNum semantics. Returns the larger of the 2 arguments if
/// both are not NaN. If either argument is a NaN, returns the other argument.
LLVM_READONLY
inline APFloat maxnum(const APFloat &A, const APFloat &B) {
  if (A.isNaN())
    return B;
  if (B.isNaN())
    return A;
  return A < B ? B : A;
}

/// Implements IEEE 754-2018 minimum semantics. Returns the smaller of 2
/// arguments, propagating NaNs and treating -0 as less than +0.
LLVM_READONLY
inline APFloat minimum(const APFloat &A, const APFloat &B) {
  if (A.isNaN())
    return A;
  if (B.isNaN())
    return B;
  if (A.isZero() && B.isZero() && (A.isNegative() != B.isNegative()))
    return A.isNegative() ? A : B;
  return B < A ? B : A;
}

/// Implements IEEE 754-2018 maximum semantics. Returns the larger of 2
/// arguments, propagating NaNs and treating -0 as less than +0.
LLVM_READONLY
inline APFloat maximum(const APFloat &A, const APFloat &B) {
  if (A.isNaN())
    return A;
  if (B.isNaN())
    return B;
  if (A.isZero() && B.isZero() && (A.isNegative() != B.isNegative()))
    return A.isNegative() ? B : A;
  return A < B ? B : A;
}

// We want the following functions to be available in the header for inlining.
// We cannot define them inline in the class definition of `DoubleAPFloat`
// because doing so would instantiate `std::unique_ptr<APFloat[]>` before
// `APFloat` is defined, and that would be undefined behavior.
namespace detail {

DoubleAPFloat &DoubleAPFloat::operator=(DoubleAPFloat &&RHS) {
  if (this != &RHS) {
    this->~DoubleAPFloat();
    new (this) DoubleAPFloat(std::move(RHS));
  }
  return *this;
}

APFloat &DoubleAPFloat::getFirst() { return Floats[0]; }
const APFloat &DoubleAPFloat::getFirst() const { return Floats[0]; }
APFloat &DoubleAPFloat::getSecond() { return Floats[1]; }
const APFloat &DoubleAPFloat::getSecond() const { return Floats[1]; }

} // namespace detail

} // namespace llvm

#undef APFLOAT_DISPATCH_ON_SEMANTICS
#endif // LLVM_ADT_APFLOAT_H
PKiwFZu�0��h�hADT/SparseBitVector.hnu�[���//===- llvm/ADT/SparseBitVector.h - Efficient Sparse BitVector --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file defines the SparseBitVector class.  See the doxygen comment for
/// SparseBitVector for more details on the algorithm used.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_SPARSEBITVECTOR_H
#define LLVM_ADT_SPARSEBITVECTOR_H

#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include <cassert>
#include <climits>
#include <cstring>
#include <iterator>
#include <list>

namespace llvm {

/// SparseBitVector is an implementation of a bitvector that is sparse by only
/// storing the elements that have non-zero bits set.  In order to make this
/// fast for the most common cases, SparseBitVector is implemented as a linked
/// list of SparseBitVectorElements.  We maintain a pointer to the last
/// SparseBitVectorElement accessed (in the form of a list iterator), in order
/// to make multiple in-order test/set constant time after the first one is
/// executed.  Note that using vectors to store SparseBitVectorElement's does
/// not work out very well because it causes insertion in the middle to take
/// enormous amounts of time with a large amount of bits.  Other structures that
/// have better worst cases for insertion in the middle (various balanced trees,
/// etc) do not perform as well in practice as a linked list with this iterator
/// kept up to date.  They are also significantly more memory intensive.

template <unsigned ElementSize = 128> struct SparseBitVectorElement {
public:
  using BitWord = unsigned long;
  using size_type = unsigned;
  enum {
    BITWORD_SIZE = sizeof(BitWord) * CHAR_BIT,
    BITWORDS_PER_ELEMENT = (ElementSize + BITWORD_SIZE - 1) / BITWORD_SIZE,
    BITS_PER_ELEMENT = ElementSize
  };

private:
  // Index of Element in terms of where first bit starts.
  unsigned ElementIndex;
  BitWord Bits[BITWORDS_PER_ELEMENT];

  SparseBitVectorElement() {
    ElementIndex = ~0U;
    memset(&Bits[0], 0, sizeof (BitWord) * BITWORDS_PER_ELEMENT);
  }

public:
  explicit SparseBitVectorElement(unsigned Idx) {
    ElementIndex = Idx;
    memset(&Bits[0], 0, sizeof (BitWord) * BITWORDS_PER_ELEMENT);
  }

  // Comparison.
  bool operator==(const SparseBitVectorElement &RHS) const {
    if (ElementIndex != RHS.ElementIndex)
      return false;
    for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i)
      if (Bits[i] != RHS.Bits[i])
        return false;
    return true;
  }

  bool operator!=(const SparseBitVectorElement &RHS) const {
    return !(*this == RHS);
  }

  // Return the bits that make up word Idx in our element.
  BitWord word(unsigned Idx) const {
    assert(Idx < BITWORDS_PER_ELEMENT);
    return Bits[Idx];
  }

  unsigned index() const {
    return ElementIndex;
  }

  bool empty() const {
    for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i)
      if (Bits[i])
        return false;
    return true;
  }

  void set(unsigned Idx) {
    Bits[Idx / BITWORD_SIZE] |= 1L << (Idx % BITWORD_SIZE);
  }

  bool test_and_set(unsigned Idx) {
    bool old = test(Idx);
    if (!old) {
      set(Idx);
      return true;
    }
    return false;
  }

  void reset(unsigned Idx) {
    Bits[Idx / BITWORD_SIZE] &= ~(1L << (Idx % BITWORD_SIZE));
  }

  bool test(unsigned Idx) const {
    return Bits[Idx / BITWORD_SIZE] & (1L << (Idx % BITWORD_SIZE));
  }

  size_type count() const {
    unsigned NumBits = 0;
    for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i)
      NumBits += llvm::popcount(Bits[i]);
    return NumBits;
  }

  /// find_first - Returns the index of the first set bit.
  int find_first() const {
    for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i)
      if (Bits[i] != 0)
        return i * BITWORD_SIZE + llvm::countr_zero(Bits[i]);
    llvm_unreachable("Illegal empty element");
  }

  /// find_last - Returns the index of the last set bit.
  int find_last() const {
    for (unsigned I = 0; I < BITWORDS_PER_ELEMENT; ++I) {
      unsigned Idx = BITWORDS_PER_ELEMENT - I - 1;
      if (Bits[Idx] != 0)
        return Idx * BITWORD_SIZE + BITWORD_SIZE -
               llvm::countl_zero(Bits[Idx]) - 1;
    }
    llvm_unreachable("Illegal empty element");
  }

  /// find_next - Returns the index of the next set bit starting from the
  /// "Curr" bit. Returns -1 if the next set bit is not found.
  int find_next(unsigned Curr) const {
    if (Curr >= BITS_PER_ELEMENT)
      return -1;

    unsigned WordPos = Curr / BITWORD_SIZE;
    unsigned BitPos = Curr % BITWORD_SIZE;
    BitWord Copy = Bits[WordPos];
    assert(WordPos <= BITWORDS_PER_ELEMENT
           && "Word Position outside of element");

    // Mask off previous bits.
    Copy &= ~0UL << BitPos;

    if (Copy != 0)
      return WordPos * BITWORD_SIZE + llvm::countr_zero(Copy);

    // Check subsequent words.
    for (unsigned i = WordPos+1; i < BITWORDS_PER_ELEMENT; ++i)
      if (Bits[i] != 0)
        return i * BITWORD_SIZE + llvm::countr_zero(Bits[i]);
    return -1;
  }

  // Union this element with RHS and return true if this one changed.
  bool unionWith(const SparseBitVectorElement &RHS) {
    bool changed = false;
    for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i) {
      BitWord old = changed ? 0 : Bits[i];

      Bits[i] |= RHS.Bits[i];
      if (!changed && old != Bits[i])
        changed = true;
    }
    return changed;
  }

  // Return true if we have any bits in common with RHS
  bool intersects(const SparseBitVectorElement &RHS) const {
    for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i) {
      if (RHS.Bits[i] & Bits[i])
        return true;
    }
    return false;
  }

  // Intersect this Element with RHS and return true if this one changed.
  // BecameZero is set to true if this element became all-zero bits.
  bool intersectWith(const SparseBitVectorElement &RHS,
                     bool &BecameZero) {
    bool changed = false;
    bool allzero = true;

    BecameZero = false;
    for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i) {
      BitWord old = changed ? 0 : Bits[i];

      Bits[i] &= RHS.Bits[i];
      if (Bits[i] != 0)
        allzero = false;

      if (!changed && old != Bits[i])
        changed = true;
    }
    BecameZero = allzero;
    return changed;
  }

  // Intersect this Element with the complement of RHS and return true if this
  // one changed.  BecameZero is set to true if this element became all-zero
  // bits.
  bool intersectWithComplement(const SparseBitVectorElement &RHS,
                               bool &BecameZero) {
    bool changed = false;
    bool allzero = true;

    BecameZero = false;
    for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i) {
      BitWord old = changed ? 0 : Bits[i];

      Bits[i] &= ~RHS.Bits[i];
      if (Bits[i] != 0)
        allzero = false;

      if (!changed && old != Bits[i])
        changed = true;
    }
    BecameZero = allzero;
    return changed;
  }

  // Three argument version of intersectWithComplement that intersects
  // RHS1 & ~RHS2 into this element
  void intersectWithComplement(const SparseBitVectorElement &RHS1,
                               const SparseBitVectorElement &RHS2,
                               bool &BecameZero) {
    bool allzero = true;

    BecameZero = false;
    for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i) {
      Bits[i] = RHS1.Bits[i] & ~RHS2.Bits[i];
      if (Bits[i] != 0)
        allzero = false;
    }
    BecameZero = allzero;
  }
};

template <unsigned ElementSize = 128>
class SparseBitVector {
  using ElementList = std::list<SparseBitVectorElement<ElementSize>>;
  using ElementListIter = typename ElementList::iterator;
  using ElementListConstIter = typename ElementList::const_iterator;
  enum {
    BITWORD_SIZE = SparseBitVectorElement<ElementSize>::BITWORD_SIZE
  };

  ElementList Elements;
  // Pointer to our current Element. This has no visible effect on the external
  // state of a SparseBitVector, it's just used to improve performance in the
  // common case of testing/modifying bits with similar indices.
  mutable ElementListIter CurrElementIter;

  // This is like std::lower_bound, except we do linear searching from the
  // current position.
  ElementListIter FindLowerBoundImpl(unsigned ElementIndex) const {

    // We cache a non-const iterator so we're forced to resort to const_cast to
    // get the begin/end in the case where 'this' is const. To avoid duplication
    // of code with the only difference being whether the const cast is present
    // 'this' is always const in this particular function and we sort out the
    // difference in FindLowerBound and FindLowerBoundConst.
    ElementListIter Begin =
        const_cast<SparseBitVector<ElementSize> *>(this)->Elements.begin();
    ElementListIter End =
        const_cast<SparseBitVector<ElementSize> *>(this)->Elements.end();

    if (Elements.empty()) {
      CurrElementIter = Begin;
      return CurrElementIter;
    }

    // Make sure our current iterator is valid.
    if (CurrElementIter == End)
      --CurrElementIter;

    // Search from our current iterator, either backwards or forwards,
    // depending on what element we are looking for.
    ElementListIter ElementIter = CurrElementIter;
    if (CurrElementIter->index() == ElementIndex) {
      return ElementIter;
    } else if (CurrElementIter->index() > ElementIndex) {
      while (ElementIter != Begin
             && ElementIter->index() > ElementIndex)
        --ElementIter;
    } else {
      while (ElementIter != End &&
             ElementIter->index() < ElementIndex)
        ++ElementIter;
    }
    CurrElementIter = ElementIter;
    return ElementIter;
  }
  ElementListConstIter FindLowerBoundConst(unsigned ElementIndex) const {
    return FindLowerBoundImpl(ElementIndex);
  }
  ElementListIter FindLowerBound(unsigned ElementIndex) {
    return FindLowerBoundImpl(ElementIndex);
  }

  // Iterator to walk set bits in the bitmap.  This iterator is a lot uglier
  // than it would be, in order to be efficient.
  class SparseBitVectorIterator {
  private:
    bool AtEnd;

    const SparseBitVector<ElementSize> *BitVector = nullptr;

    // Current element inside of bitmap.
    ElementListConstIter Iter;

    // Current bit number inside of our bitmap.
    unsigned BitNumber;

    // Current word number inside of our element.
    unsigned WordNumber;

    // Current bits from the element.
    typename SparseBitVectorElement<ElementSize>::BitWord Bits;

    // Move our iterator to the first non-zero bit in the bitmap.
    void AdvanceToFirstNonZero() {
      if (AtEnd)
        return;
      if (BitVector->Elements.empty()) {
        AtEnd = true;
        return;
      }
      Iter = BitVector->Elements.begin();
      BitNumber = Iter->index() * ElementSize;
      unsigned BitPos = Iter->find_first();
      BitNumber += BitPos;
      WordNumber = (BitNumber % ElementSize) / BITWORD_SIZE;
      Bits = Iter->word(WordNumber);
      Bits >>= BitPos % BITWORD_SIZE;
    }

    // Move our iterator to the next non-zero bit.
    void AdvanceToNextNonZero() {
      if (AtEnd)
        return;

      while (Bits && !(Bits & 1)) {
        Bits >>= 1;
        BitNumber += 1;
      }

      // See if we ran out of Bits in this word.
      if (!Bits) {
        int NextSetBitNumber = Iter->find_next(BitNumber % ElementSize) ;
        // If we ran out of set bits in this element, move to next element.
        if (NextSetBitNumber == -1 || (BitNumber % ElementSize == 0)) {
          ++Iter;
          WordNumber = 0;

          // We may run out of elements in the bitmap.
          if (Iter == BitVector->Elements.end()) {
            AtEnd = true;
            return;
          }
          // Set up for next non-zero word in bitmap.
          BitNumber = Iter->index() * ElementSize;
          NextSetBitNumber = Iter->find_first();
          BitNumber += NextSetBitNumber;
          WordNumber = (BitNumber % ElementSize) / BITWORD_SIZE;
          Bits = Iter->word(WordNumber);
          Bits >>= NextSetBitNumber % BITWORD_SIZE;
        } else {
          WordNumber = (NextSetBitNumber % ElementSize) / BITWORD_SIZE;
          Bits = Iter->word(WordNumber);
          Bits >>= NextSetBitNumber % BITWORD_SIZE;
          BitNumber = Iter->index() * ElementSize;
          BitNumber += NextSetBitNumber;
        }
      }
    }

  public:
    SparseBitVectorIterator() = default;

    SparseBitVectorIterator(const SparseBitVector<ElementSize> *RHS,
                            bool end = false):BitVector(RHS) {
      Iter = BitVector->Elements.begin();
      BitNumber = 0;
      Bits = 0;
      WordNumber = ~0;
      AtEnd = end;
      AdvanceToFirstNonZero();
    }

    // Preincrement.
    inline SparseBitVectorIterator& operator++() {
      ++BitNumber;
      Bits >>= 1;
      AdvanceToNextNonZero();
      return *this;
    }

    // Postincrement.
    inline SparseBitVectorIterator operator++(int) {
      SparseBitVectorIterator tmp = *this;
      ++*this;
      return tmp;
    }

    // Return the current set bit number.
    unsigned operator*() const {
      return BitNumber;
    }

    bool operator==(const SparseBitVectorIterator &RHS) const {
      // If they are both at the end, ignore the rest of the fields.
      if (AtEnd && RHS.AtEnd)
        return true;
      // Otherwise they are the same if they have the same bit number and
      // bitmap.
      return AtEnd == RHS.AtEnd && RHS.BitNumber == BitNumber;
    }

    bool operator!=(const SparseBitVectorIterator &RHS) const {
      return !(*this == RHS);
    }
  };

public:
  using iterator = SparseBitVectorIterator;

  SparseBitVector() : Elements(), CurrElementIter(Elements.begin()) {}

  SparseBitVector(const SparseBitVector &RHS)
      : Elements(RHS.Elements), CurrElementIter(Elements.begin()) {}
  SparseBitVector(SparseBitVector &&RHS)
      : Elements(std::move(RHS.Elements)), CurrElementIter(Elements.begin()) {}

  // Clear.
  void clear() {
    Elements.clear();
  }

  // Assignment
  SparseBitVector& operator=(const SparseBitVector& RHS) {
    if (this == &RHS)
      return *this;

    Elements = RHS.Elements;
    CurrElementIter = Elements.begin();
    return *this;
  }
  SparseBitVector &operator=(SparseBitVector &&RHS) {
    Elements = std::move(RHS.Elements);
    CurrElementIter = Elements.begin();
    return *this;
  }

  // Test, Reset, and Set a bit in the bitmap.
  bool test(unsigned Idx) const {
    if (Elements.empty())
      return false;

    unsigned ElementIndex = Idx / ElementSize;
    ElementListConstIter ElementIter = FindLowerBoundConst(ElementIndex);

    // If we can't find an element that is supposed to contain this bit, there
    // is nothing more to do.
    if (ElementIter == Elements.end() ||
        ElementIter->index() != ElementIndex)
      return false;
    return ElementIter->test(Idx % ElementSize);
  }

  void reset(unsigned Idx) {
    if (Elements.empty())
      return;

    unsigned ElementIndex = Idx / ElementSize;
    ElementListIter ElementIter = FindLowerBound(ElementIndex);

    // If we can't find an element that is supposed to contain this bit, there
    // is nothing more to do.
    if (ElementIter == Elements.end() ||
        ElementIter->index() != ElementIndex)
      return;
    ElementIter->reset(Idx % ElementSize);

    // When the element is zeroed out, delete it.
    if (ElementIter->empty()) {
      ++CurrElementIter;
      Elements.erase(ElementIter);
    }
  }

  void set(unsigned Idx) {
    unsigned ElementIndex = Idx / ElementSize;
    ElementListIter ElementIter;
    if (Elements.empty()) {
      ElementIter = Elements.emplace(Elements.end(), ElementIndex);
    } else {
      ElementIter = FindLowerBound(ElementIndex);

      if (ElementIter == Elements.end() ||
          ElementIter->index() != ElementIndex) {
        // We may have hit the beginning of our SparseBitVector, in which case,
        // we may need to insert right after this element, which requires moving
        // the current iterator forward one, because insert does insert before.
        if (ElementIter != Elements.end() &&
            ElementIter->index() < ElementIndex)
          ++ElementIter;
        ElementIter = Elements.emplace(ElementIter, ElementIndex);
      }
    }
    CurrElementIter = ElementIter;

    ElementIter->set(Idx % ElementSize);
  }

  bool test_and_set(unsigned Idx) {
    bool old = test(Idx);
    if (!old) {
      set(Idx);
      return true;
    }
    return false;
  }

  bool operator!=(const SparseBitVector &RHS) const {
    return !(*this == RHS);
  }

  bool operator==(const SparseBitVector &RHS) const {
    ElementListConstIter Iter1 = Elements.begin();
    ElementListConstIter Iter2 = RHS.Elements.begin();

    for (; Iter1 != Elements.end() && Iter2 != RHS.Elements.end();
         ++Iter1, ++Iter2) {
      if (*Iter1 != *Iter2)
        return false;
    }
    return Iter1 == Elements.end() && Iter2 == RHS.Elements.end();
  }

  // Union our bitmap with the RHS and return true if we changed.
  bool operator|=(const SparseBitVector &RHS) {
    if (this == &RHS)
      return false;

    bool changed = false;
    ElementListIter Iter1 = Elements.begin();
    ElementListConstIter Iter2 = RHS.Elements.begin();

    // If RHS is empty, we are done
    if (RHS.Elements.empty())
      return false;

    while (Iter2 != RHS.Elements.end()) {
      if (Iter1 == Elements.end() || Iter1->index() > Iter2->index()) {
        Elements.insert(Iter1, *Iter2);
        ++Iter2;
        changed = true;
      } else if (Iter1->index() == Iter2->index()) {
        changed |= Iter1->unionWith(*Iter2);
        ++Iter1;
        ++Iter2;
      } else {
        ++Iter1;
      }
    }
    CurrElementIter = Elements.begin();
    return changed;
  }

  // Intersect our bitmap with the RHS and return true if ours changed.
  bool operator&=(const SparseBitVector &RHS) {
    if (this == &RHS)
      return false;

    bool changed = false;
    ElementListIter Iter1 = Elements.begin();
    ElementListConstIter Iter2 = RHS.Elements.begin();

    // Check if both bitmaps are empty.
    if (Elements.empty() && RHS.Elements.empty())
      return false;

    // Loop through, intersecting as we go, erasing elements when necessary.
    while (Iter2 != RHS.Elements.end()) {
      if (Iter1 == Elements.end()) {
        CurrElementIter = Elements.begin();
        return changed;
      }

      if (Iter1->index() > Iter2->index()) {
        ++Iter2;
      } else if (Iter1->index() == Iter2->index()) {
        bool BecameZero;
        changed |= Iter1->intersectWith(*Iter2, BecameZero);
        if (BecameZero) {
          ElementListIter IterTmp = Iter1;
          ++Iter1;
          Elements.erase(IterTmp);
        } else {
          ++Iter1;
        }
        ++Iter2;
      } else {
        ElementListIter IterTmp = Iter1;
        ++Iter1;
        Elements.erase(IterTmp);
        changed = true;
      }
    }
    if (Iter1 != Elements.end()) {
      Elements.erase(Iter1, Elements.end());
      changed = true;
    }
    CurrElementIter = Elements.begin();
    return changed;
  }

  // Intersect our bitmap with the complement of the RHS and return true
  // if ours changed.
  bool intersectWithComplement(const SparseBitVector &RHS) {
    if (this == &RHS) {
      if (!empty()) {
        clear();
        return true;
      }
      return false;
    }

    bool changed = false;
    ElementListIter Iter1 = Elements.begin();
    ElementListConstIter Iter2 = RHS.Elements.begin();

    // If either our bitmap or RHS is empty, we are done
    if (Elements.empty() || RHS.Elements.empty())
      return false;

    // Loop through, intersecting as we go, erasing elements when necessary.
    while (Iter2 != RHS.Elements.end()) {
      if (Iter1 == Elements.end()) {
        CurrElementIter = Elements.begin();
        return changed;
      }

      if (Iter1->index() > Iter2->index()) {
        ++Iter2;
      } else if (Iter1->index() == Iter2->index()) {
        bool BecameZero;
        changed |= Iter1->intersectWithComplement(*Iter2, BecameZero);
        if (BecameZero) {
          ElementListIter IterTmp = Iter1;
          ++Iter1;
          Elements.erase(IterTmp);
        } else {
          ++Iter1;
        }
        ++Iter2;
      } else {
        ++Iter1;
      }
    }
    CurrElementIter = Elements.begin();
    return changed;
  }

  bool intersectWithComplement(const SparseBitVector<ElementSize> *RHS) const {
    return intersectWithComplement(*RHS);
  }

  //  Three argument version of intersectWithComplement.
  //  Result of RHS1 & ~RHS2 is stored into this bitmap.
  void intersectWithComplement(const SparseBitVector<ElementSize> &RHS1,
                               const SparseBitVector<ElementSize> &RHS2)
  {
    if (this == &RHS1) {
      intersectWithComplement(RHS2);
      return;
    } else if (this == &RHS2) {
      SparseBitVector RHS2Copy(RHS2);
      intersectWithComplement(RHS1, RHS2Copy);
      return;
    }

    Elements.clear();
    CurrElementIter = Elements.begin();
    ElementListConstIter Iter1 = RHS1.Elements.begin();
    ElementListConstIter Iter2 = RHS2.Elements.begin();

    // If RHS1 is empty, we are done
    // If RHS2 is empty, we still have to copy RHS1
    if (RHS1.Elements.empty())
      return;

    // Loop through, intersecting as we go, erasing elements when necessary.
    while (Iter2 != RHS2.Elements.end()) {
      if (Iter1 == RHS1.Elements.end())
        return;

      if (Iter1->index() > Iter2->index()) {
        ++Iter2;
      } else if (Iter1->index() == Iter2->index()) {
        bool BecameZero = false;
        Elements.emplace_back(Iter1->index());
        Elements.back().intersectWithComplement(*Iter1, *Iter2, BecameZero);
        if (BecameZero)
          Elements.pop_back();
        ++Iter1;
        ++Iter2;
      } else {
        Elements.push_back(*Iter1++);
      }
    }

    // copy the remaining elements
    std::copy(Iter1, RHS1.Elements.end(), std::back_inserter(Elements));
  }

  void intersectWithComplement(const SparseBitVector<ElementSize> *RHS1,
                               const SparseBitVector<ElementSize> *RHS2) {
    intersectWithComplement(*RHS1, *RHS2);
  }

  bool intersects(const SparseBitVector<ElementSize> *RHS) const {
    return intersects(*RHS);
  }

  // Return true if we share any bits in common with RHS
  bool intersects(const SparseBitVector<ElementSize> &RHS) const {
    ElementListConstIter Iter1 = Elements.begin();
    ElementListConstIter Iter2 = RHS.Elements.begin();

    // Check if both bitmaps are empty.
    if (Elements.empty() && RHS.Elements.empty())
      return false;

    // Loop through, intersecting stopping when we hit bits in common.
    while (Iter2 != RHS.Elements.end()) {
      if (Iter1 == Elements.end())
        return false;

      if (Iter1->index() > Iter2->index()) {
        ++Iter2;
      } else if (Iter1->index() == Iter2->index()) {
        if (Iter1->intersects(*Iter2))
          return true;
        ++Iter1;
        ++Iter2;
      } else {
        ++Iter1;
      }
    }
    return false;
  }

  // Return true iff all bits set in this SparseBitVector are
  // also set in RHS.
  bool contains(const SparseBitVector<ElementSize> &RHS) const {
    SparseBitVector<ElementSize> Result(*this);
    Result &= RHS;
    return (Result == RHS);
  }

  // Return the first set bit in the bitmap.  Return -1 if no bits are set.
  int find_first() const {
    if (Elements.empty())
      return -1;
    const SparseBitVectorElement<ElementSize> &First = *(Elements.begin());
    return (First.index() * ElementSize) + First.find_first();
  }

  // Return the last set bit in the bitmap.  Return -1 if no bits are set.
  int find_last() const {
    if (Elements.empty())
      return -1;
    const SparseBitVectorElement<ElementSize> &Last = *(Elements.rbegin());
    return (Last.index() * ElementSize) + Last.find_last();
  }

  // Return true if the SparseBitVector is empty
  bool empty() const {
    return Elements.empty();
  }

  unsigned count() const {
    unsigned BitCount = 0;
    for (ElementListConstIter Iter = Elements.begin();
         Iter != Elements.end();
         ++Iter)
      BitCount += Iter->count();

    return BitCount;
  }

  iterator begin() const {
    return iterator(this);
  }

  iterator end() const {
    return iterator(this, true);
  }
};

// Convenience functions to allow Or and And without dereferencing in the user
// code.

template <unsigned ElementSize>
inline bool operator |=(SparseBitVector<ElementSize> &LHS,
                        const SparseBitVector<ElementSize> *RHS) {
  return LHS |= *RHS;
}

template <unsigned ElementSize>
inline bool operator |=(SparseBitVector<ElementSize> *LHS,
                        const SparseBitVector<ElementSize> &RHS) {
  return LHS->operator|=(RHS);
}

template <unsigned ElementSize>
inline bool operator &=(SparseBitVector<ElementSize> *LHS,
                        const SparseBitVector<ElementSize> &RHS) {
  return LHS->operator&=(RHS);
}

template <unsigned ElementSize>
inline bool operator &=(SparseBitVector<ElementSize> &LHS,
                        const SparseBitVector<ElementSize> *RHS) {
  return LHS &= *RHS;
}

// Convenience functions for infix union, intersection, difference operators.

template <unsigned ElementSize>
inline SparseBitVector<ElementSize>
operator|(const SparseBitVector<ElementSize> &LHS,
          const SparseBitVector<ElementSize> &RHS) {
  SparseBitVector<ElementSize> Result(LHS);
  Result |= RHS;
  return Result;
}

template <unsigned ElementSize>
inline SparseBitVector<ElementSize>
operator&(const SparseBitVector<ElementSize> &LHS,
          const SparseBitVector<ElementSize> &RHS) {
  SparseBitVector<ElementSize> Result(LHS);
  Result &= RHS;
  return Result;
}

template <unsigned ElementSize>
inline SparseBitVector<ElementSize>
operator-(const SparseBitVector<ElementSize> &LHS,
          const SparseBitVector<ElementSize> &RHS) {
  SparseBitVector<ElementSize> Result;
  Result.intersectWithComplement(LHS, RHS);
  return Result;
}

// Dump a SparseBitVector to a stream
template <unsigned ElementSize>
void dump(const SparseBitVector<ElementSize> &LHS, raw_ostream &out) {
  out << "[";

  typename SparseBitVector<ElementSize>::iterator bi = LHS.begin(),
    be = LHS.end();
  if (bi != be) {
    out << *bi;
    for (++bi; bi != be; ++bi) {
      out << " " << *bi;
    }
  }
  out << "]\n";
}

} // end namespace llvm

#endif // LLVM_ADT_SPARSEBITVECTOR_H
PKiwFZb�4���ADT/SmallVector.hnu�[���//===- llvm/ADT/SmallVector.h - 'Normally small' vectors --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file defines the SmallVector class.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_SMALLVECTOR_H
#define LLVM_ADT_SMALLVECTOR_H

#include "llvm/Support/Compiler.h"
#include "llvm/Support/type_traits.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdlib>
#include <cstring>
#include <functional>
#include <initializer_list>
#include <iterator>
#include <limits>
#include <memory>
#include <new>
#include <type_traits>
#include <utility>

namespace llvm {

template <typename T> class ArrayRef;

template <typename IteratorT> class iterator_range;

template <class Iterator>
using EnableIfConvertibleToInputIterator = std::enable_if_t<std::is_convertible<
    typename std::iterator_traits<Iterator>::iterator_category,
    std::input_iterator_tag>::value>;

/// This is all the stuff common to all SmallVectors.
///
/// The template parameter specifies the type which should be used to hold the
/// Size and Capacity of the SmallVector, so it can be adjusted.
/// Using 32 bit size is desirable to shrink the size of the SmallVector.
/// Using 64 bit size is desirable for cases like SmallVector<char>, where a
/// 32 bit size would limit the vector to ~4GB. SmallVectors are used for
/// buffering bitcode output - which can exceed 4GB.
template <class Size_T> class SmallVectorBase {
protected:
  void *BeginX;
  Size_T Size = 0, Capacity;

  /// The maximum value of the Size_T used.
  static constexpr size_t SizeTypeMax() {
    return std::numeric_limits<Size_T>::max();
  }

  SmallVectorBase() = delete;
  SmallVectorBase(void *FirstEl, size_t TotalCapacity)
      : BeginX(FirstEl), Capacity(TotalCapacity) {}

  /// This is a helper for \a grow() that's out of line to reduce code
  /// duplication.  This function will report a fatal error if it can't grow at
  /// least to \p MinSize.
  void *mallocForGrow(void *FirstEl, size_t MinSize, size_t TSize,
                      size_t &NewCapacity);

  /// This is an implementation of the grow() method which only works
  /// on POD-like data types and is out of line to reduce code duplication.
  /// This function will report a fatal error if it cannot increase capacity.
  void grow_pod(void *FirstEl, size_t MinSize, size_t TSize);

  /// If vector was first created with capacity 0, getFirstEl() points to the
  /// memory right after, an area unallocated. If a subsequent allocation,
  /// that grows the vector, happens to return the same pointer as getFirstEl(),
  /// get a new allocation, otherwise isSmall() will falsely return that no
  /// allocation was done (true) and the memory will not be freed in the
  /// destructor. If a VSize is given (vector size), also copy that many
  /// elements to the new allocation - used if realloca fails to increase
  /// space, and happens to allocate precisely at BeginX.
  /// This is unlikely to be called often, but resolves a memory leak when the
  /// situation does occur.
  void *replaceAllocation(void *NewElts, size_t TSize, size_t NewCapacity,
                          size_t VSize = 0);

public:
  size_t size() const { return Size; }
  size_t capacity() const { return Capacity; }

  [[nodiscard]] bool empty() const { return !Size; }

protected:
  /// Set the array size to \p N, which the current array must have enough
  /// capacity for.
  ///
  /// This does not construct or destroy any elements in the vector.
  void set_size(size_t N) {
    assert(N <= capacity());
    Size = N;
  }
};

template <class T>
using SmallVectorSizeType =
    std::conditional_t<sizeof(T) < 4 && sizeof(void *) >= 8, uint64_t,
                       uint32_t>;

/// Figure out the offset of the first element.
template <class T, typename = void> struct SmallVectorAlignmentAndSize {
  alignas(SmallVectorBase<SmallVectorSizeType<T>>) char Base[sizeof(
      SmallVectorBase<SmallVectorSizeType<T>>)];
  alignas(T) char FirstEl[sizeof(T)];
};

/// This is the part of SmallVectorTemplateBase which does not depend on whether
/// the type T is a POD. The extra dummy template argument is used by ArrayRef
/// to avoid unnecessarily requiring T to be complete.
template <typename T, typename = void>
class SmallVectorTemplateCommon
    : public SmallVectorBase<SmallVectorSizeType<T>> {
  using Base = SmallVectorBase<SmallVectorSizeType<T>>;

protected:
  /// Find the address of the first element.  For this pointer math to be valid
  /// with small-size of 0 for T with lots of alignment, it's important that
  /// SmallVectorStorage is properly-aligned even for small-size of 0.
  void *getFirstEl() const {
    return const_cast<void *>(reinterpret_cast<const void *>(
        reinterpret_cast<const char *>(this) +
        offsetof(SmallVectorAlignmentAndSize<T>, FirstEl)));
  }
  // Space after 'FirstEl' is clobbered, do not add any instance vars after it.

  SmallVectorTemplateCommon(size_t Size) : Base(getFirstEl(), Size) {}

  void grow_pod(size_t MinSize, size_t TSize) {
    Base::grow_pod(getFirstEl(), MinSize, TSize);
  }

  /// Return true if this is a smallvector which has not had dynamic
  /// memory allocated for it.
  bool isSmall() const { return this->BeginX == getFirstEl(); }

  /// Put this vector in a state of being small.
  void resetToSmall() {
    this->BeginX = getFirstEl();
    this->Size = this->Capacity = 0; // FIXME: Setting Capacity to 0 is suspect.
  }

  /// Return true if V is an internal reference to the given range.
  bool isReferenceToRange(const void *V, const void *First, const void *Last) const {
    // Use std::less to avoid UB.
    std::less<> LessThan;
    return !LessThan(V, First) && LessThan(V, Last);
  }

  /// Return true if V is an internal reference to this vector.
  bool isReferenceToStorage(const void *V) const {
    return isReferenceToRange(V, this->begin(), this->end());
  }

  /// Return true if First and Last form a valid (possibly empty) range in this
  /// vector's storage.
  bool isRangeInStorage(const void *First, const void *Last) const {
    // Use std::less to avoid UB.
    std::less<> LessThan;
    return !LessThan(First, this->begin()) && !LessThan(Last, First) &&
           !LessThan(this->end(), Last);
  }

  /// Return true unless Elt will be invalidated by resizing the vector to
  /// NewSize.
  bool isSafeToReferenceAfterResize(const void *Elt, size_t NewSize) {
    // Past the end.
    if (LLVM_LIKELY(!isReferenceToStorage(Elt)))
      return true;

    // Return false if Elt will be destroyed by shrinking.
    if (NewSize <= this->size())
      return Elt < this->begin() + NewSize;

    // Return false if we need to grow.
    return NewSize <= this->capacity();
  }

  /// Check whether Elt will be invalidated by resizing the vector to NewSize.
  void assertSafeToReferenceAfterResize(const void *Elt, size_t NewSize) {
    assert(isSafeToReferenceAfterResize(Elt, NewSize) &&
           "Attempting to reference an element of the vector in an operation "
           "that invalidates it");
  }

  /// Check whether Elt will be invalidated by increasing the size of the
  /// vector by N.
  void assertSafeToAdd(const void *Elt, size_t N = 1) {
    this->assertSafeToReferenceAfterResize(Elt, this->size() + N);
  }

  /// Check whether any part of the range will be invalidated by clearing.
  void assertSafeToReferenceAfterClear(const T *From, const T *To) {
    if (From == To)
      return;
    this->assertSafeToReferenceAfterResize(From, 0);
    this->assertSafeToReferenceAfterResize(To - 1, 0);
  }
  template <
      class ItTy,
      std::enable_if_t<!std::is_same<std::remove_const_t<ItTy>, T *>::value,
                       bool> = false>
  void assertSafeToReferenceAfterClear(ItTy, ItTy) {}

  /// Check whether any part of the range will be invalidated by growing.
  void assertSafeToAddRange(const T *From, const T *To) {
    if (From == To)
      return;
    this->assertSafeToAdd(From, To - From);
    this->assertSafeToAdd(To - 1, To - From);
  }
  template <
      class ItTy,
      std::enable_if_t<!std::is_same<std::remove_const_t<ItTy>, T *>::value,
                       bool> = false>
  void assertSafeToAddRange(ItTy, ItTy) {}

  /// Reserve enough space to add one element, and return the updated element
  /// pointer in case it was a reference to the storage.
  template <class U>
  static const T *reserveForParamAndGetAddressImpl(U *This, const T &Elt,
                                                   size_t N) {
    size_t NewSize = This->size() + N;
    if (LLVM_LIKELY(NewSize <= This->capacity()))
      return &Elt;

    bool ReferencesStorage = false;
    int64_t Index = -1;
    if (!U::TakesParamByValue) {
      if (LLVM_UNLIKELY(This->isReferenceToStorage(&Elt))) {
        ReferencesStorage = true;
        Index = &Elt - This->begin();
      }
    }
    This->grow(NewSize);
    return ReferencesStorage ? This->begin() + Index : &Elt;
  }

public:
  using size_type = size_t;
  using difference_type = ptrdiff_t;
  using value_type = T;
  using iterator = T *;
  using const_iterator = const T *;

  using const_reverse_iterator = std::reverse_iterator<const_iterator>;
  using reverse_iterator = std::reverse_iterator<iterator>;

  using reference = T &;
  using const_reference = const T &;
  using pointer = T *;
  using const_pointer = const T *;

  using Base::capacity;
  using Base::empty;
  using Base::size;

  // forward iterator creation methods.
  iterator begin() { return (iterator)this->BeginX; }
  const_iterator begin() const { return (const_iterator)this->BeginX; }
  iterator end() { return begin() + size(); }
  const_iterator end() const { return begin() + size(); }

  // reverse iterator creation methods.
  reverse_iterator rbegin()            { return reverse_iterator(end()); }
  const_reverse_iterator rbegin() const{ return const_reverse_iterator(end()); }
  reverse_iterator rend()              { return reverse_iterator(begin()); }
  const_reverse_iterator rend() const { return const_reverse_iterator(begin());}

  size_type size_in_bytes() const { return size() * sizeof(T); }
  size_type max_size() const {
    return std::min(this->SizeTypeMax(), size_type(-1) / sizeof(T));
  }

  size_t capacity_in_bytes() const { return capacity() * sizeof(T); }

  /// Return a pointer to the vector's buffer, even if empty().
  pointer data() { return pointer(begin()); }
  /// Return a pointer to the vector's buffer, even if empty().
  const_pointer data() const { return const_pointer(begin()); }

  reference operator[](size_type idx) {
    assert(idx < size());
    return begin()[idx];
  }
  const_reference operator[](size_type idx) const {
    assert(idx < size());
    return begin()[idx];
  }

  reference front() {
    assert(!empty());
    return begin()[0];
  }
  const_reference front() const {
    assert(!empty());
    return begin()[0];
  }

  reference back() {
    assert(!empty());
    return end()[-1];
  }
  const_reference back() const {
    assert(!empty());
    return end()[-1];
  }
};

/// SmallVectorTemplateBase<TriviallyCopyable = false> - This is where we put
/// method implementations that are designed to work with non-trivial T's.
///
/// We approximate is_trivially_copyable with trivial move/copy construction and
/// trivial destruction. While the standard doesn't specify that you're allowed
/// copy these types with memcpy, there is no way for the type to observe this.
/// This catches the important case of std::pair<POD, POD>, which is not
/// trivially assignable.
template <typename T, bool = (std::is_trivially_copy_constructible<T>::value) &&
                             (std::is_trivially_move_constructible<T>::value) &&
                             std::is_trivially_destructible<T>::value>
class SmallVectorTemplateBase : public SmallVectorTemplateCommon<T> {
  friend class SmallVectorTemplateCommon<T>;

protected:
  static constexpr bool TakesParamByValue = false;
  using ValueParamT = const T &;

  SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {}

  static void destroy_range(T *S, T *E) {
    while (S != E) {
      --E;
      E->~T();
    }
  }

  /// Move the range [I, E) into the uninitialized memory starting with "Dest",
  /// constructing elements as needed.
  template<typename It1, typename It2>
  static void uninitialized_move(It1 I, It1 E, It2 Dest) {
    std::uninitialized_move(I, E, Dest);
  }

  /// Copy the range [I, E) onto the uninitialized memory starting with "Dest",
  /// constructing elements as needed.
  template<typename It1, typename It2>
  static void uninitialized_copy(It1 I, It1 E, It2 Dest) {
    std::uninitialized_copy(I, E, Dest);
  }

  /// Grow the allocated memory (without initializing new elements), doubling
  /// the size of the allocated memory. Guarantees space for at least one more
  /// element, or MinSize more elements if specified.
  void grow(size_t MinSize = 0);

  /// Create a new allocation big enough for \p MinSize and pass back its size
  /// in \p NewCapacity. This is the first section of \a grow().
  T *mallocForGrow(size_t MinSize, size_t &NewCapacity);

  /// Move existing elements over to the new allocation \p NewElts, the middle
  /// section of \a grow().
  void moveElementsForGrow(T *NewElts);

  /// Transfer ownership of the allocation, finishing up \a grow().
  void takeAllocationForGrow(T *NewElts, size_t NewCapacity);

  /// Reserve enough space to add one element, and return the updated element
  /// pointer in case it was a reference to the storage.
  const T *reserveForParamAndGetAddress(const T &Elt, size_t N = 1) {
    return this->reserveForParamAndGetAddressImpl(this, Elt, N);
  }

  /// Reserve enough space to add one element, and return the updated element
  /// pointer in case it was a reference to the storage.
  T *reserveForParamAndGetAddress(T &Elt, size_t N = 1) {
    return const_cast<T *>(
        this->reserveForParamAndGetAddressImpl(this, Elt, N));
  }

  static T &&forward_value_param(T &&V) { return std::move(V); }
  static const T &forward_value_param(const T &V) { return V; }

  void growAndAssign(size_t NumElts, const T &Elt) {
    // Grow manually in case Elt is an internal reference.
    size_t NewCapacity;
    T *NewElts = mallocForGrow(NumElts, NewCapacity);
    std::uninitialized_fill_n(NewElts, NumElts, Elt);
    this->destroy_range(this->begin(), this->end());
    takeAllocationForGrow(NewElts, NewCapacity);
    this->set_size(NumElts);
  }

  template <typename... ArgTypes> T &growAndEmplaceBack(ArgTypes &&... Args) {
    // Grow manually in case one of Args is an internal reference.
    size_t NewCapacity;
    T *NewElts = mallocForGrow(0, NewCapacity);
    ::new ((void *)(NewElts + this->size())) T(std::forward<ArgTypes>(Args)...);
    moveElementsForGrow(NewElts);
    takeAllocationForGrow(NewElts, NewCapacity);
    this->set_size(this->size() + 1);
    return this->back();
  }

public:
  void push_back(const T &Elt) {
    const T *EltPtr = reserveForParamAndGetAddress(Elt);
    ::new ((void *)this->end()) T(*EltPtr);
    this->set_size(this->size() + 1);
  }

  void push_back(T &&Elt) {
    T *EltPtr = reserveForParamAndGetAddress(Elt);
    ::new ((void *)this->end()) T(::std::move(*EltPtr));
    this->set_size(this->size() + 1);
  }

  void pop_back() {
    this->set_size(this->size() - 1);
    this->end()->~T();
  }
};

// Define this out-of-line to dissuade the C++ compiler from inlining it.
template <typename T, bool TriviallyCopyable>
void SmallVectorTemplateBase<T, TriviallyCopyable>::grow(size_t MinSize) {
  size_t NewCapacity;
  T *NewElts = mallocForGrow(MinSize, NewCapacity);
  moveElementsForGrow(NewElts);
  takeAllocationForGrow(NewElts, NewCapacity);
}

template <typename T, bool TriviallyCopyable>
T *SmallVectorTemplateBase<T, TriviallyCopyable>::mallocForGrow(
    size_t MinSize, size_t &NewCapacity) {
  return static_cast<T *>(
      SmallVectorBase<SmallVectorSizeType<T>>::mallocForGrow(
          this->getFirstEl(), MinSize, sizeof(T), NewCapacity));
}

// Define this out-of-line to dissuade the C++ compiler from inlining it.
template <typename T, bool TriviallyCopyable>
void SmallVectorTemplateBase<T, TriviallyCopyable>::moveElementsForGrow(
    T *NewElts) {
  // Move the elements over.
  this->uninitialized_move(this->begin(), this->end(), NewElts);

  // Destroy the original elements.
  destroy_range(this->begin(), this->end());
}

// Define this out-of-line to dissuade the C++ compiler from inlining it.
template <typename T, bool TriviallyCopyable>
void SmallVectorTemplateBase<T, TriviallyCopyable>::takeAllocationForGrow(
    T *NewElts, size_t NewCapacity) {
  // If this wasn't grown from the inline copy, deallocate the old space.
  if (!this->isSmall())
    free(this->begin());

  this->BeginX = NewElts;
  this->Capacity = NewCapacity;
}

/// SmallVectorTemplateBase<TriviallyCopyable = true> - This is where we put
/// method implementations that are designed to work with trivially copyable
/// T's. This allows using memcpy in place of copy/move construction and
/// skipping destruction.
template <typename T>
class SmallVectorTemplateBase<T, true> : public SmallVectorTemplateCommon<T> {
  friend class SmallVectorTemplateCommon<T>;

protected:
  /// True if it's cheap enough to take parameters by value. Doing so avoids
  /// overhead related to mitigations for reference invalidation.
  static constexpr bool TakesParamByValue = sizeof(T) <= 2 * sizeof(void *);

  /// Either const T& or T, depending on whether it's cheap enough to take
  /// parameters by value.
  using ValueParamT = std::conditional_t<TakesParamByValue, T, const T &>;

  SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {}

  // No need to do a destroy loop for POD's.
  static void destroy_range(T *, T *) {}

  /// Move the range [I, E) onto the uninitialized memory
  /// starting with "Dest", constructing elements into it as needed.
  template<typename It1, typename It2>
  static void uninitialized_move(It1 I, It1 E, It2 Dest) {
    // Just do a copy.
    uninitialized_copy(I, E, Dest);
  }

  /// Copy the range [I, E) onto the uninitialized memory
  /// starting with "Dest", constructing elements into it as needed.
  template<typename It1, typename It2>
  static void uninitialized_copy(It1 I, It1 E, It2 Dest) {
    // Arbitrary iterator types; just use the basic implementation.
    std::uninitialized_copy(I, E, Dest);
  }

  /// Copy the range [I, E) onto the uninitialized memory
  /// starting with "Dest", constructing elements into it as needed.
  template <typename T1, typename T2>
  static void uninitialized_copy(
      T1 *I, T1 *E, T2 *Dest,
      std::enable_if_t<std::is_same<std::remove_const_t<T1>, T2>::value> * =
          nullptr) {
    // Use memcpy for PODs iterated by pointers (which includes SmallVector
    // iterators): std::uninitialized_copy optimizes to memmove, but we can
    // use memcpy here. Note that I and E are iterators and thus might be
    // invalid for memcpy if they are equal.
    if (I != E)
      memcpy(reinterpret_cast<void *>(Dest), I, (E - I) * sizeof(T));
  }

  /// Double the size of the allocated memory, guaranteeing space for at
  /// least one more element or MinSize if specified.
  void grow(size_t MinSize = 0) { this->grow_pod(MinSize, sizeof(T)); }

  /// Reserve enough space to add one element, and return the updated element
  /// pointer in case it was a reference to the storage.
  const T *reserveForParamAndGetAddress(const T &Elt, size_t N = 1) {
    return this->reserveForParamAndGetAddressImpl(this, Elt, N);
  }

  /// Reserve enough space to add one element, and return the updated element
  /// pointer in case it was a reference to the storage.
  T *reserveForParamAndGetAddress(T &Elt, size_t N = 1) {
    return const_cast<T *>(
        this->reserveForParamAndGetAddressImpl(this, Elt, N));
  }

  /// Copy \p V or return a reference, depending on \a ValueParamT.
  static ValueParamT forward_value_param(ValueParamT V) { return V; }

  void growAndAssign(size_t NumElts, T Elt) {
    // Elt has been copied in case it's an internal reference, side-stepping
    // reference invalidation problems without losing the realloc optimization.
    this->set_size(0);
    this->grow(NumElts);
    std::uninitialized_fill_n(this->begin(), NumElts, Elt);
    this->set_size(NumElts);
  }

  template <typename... ArgTypes> T &growAndEmplaceBack(ArgTypes &&... Args) {
    // Use push_back with a copy in case Args has an internal reference,
    // side-stepping reference invalidation problems without losing the realloc
    // optimization.
    push_back(T(std::forward<ArgTypes>(Args)...));
    return this->back();
  }

public:
  void push_back(ValueParamT Elt) {
    const T *EltPtr = reserveForParamAndGetAddress(Elt);
    memcpy(reinterpret_cast<void *>(this->end()), EltPtr, sizeof(T));
    this->set_size(this->size() + 1);
  }

  void pop_back() { this->set_size(this->size() - 1); }
};

/// This class consists of common code factored out of the SmallVector class to
/// reduce code duplication based on the SmallVector 'N' template parameter.
template <typename T>
class SmallVectorImpl : public SmallVectorTemplateBase<T> {
  using SuperClass = SmallVectorTemplateBase<T>;

public:
  using iterator = typename SuperClass::iterator;
  using const_iterator = typename SuperClass::const_iterator;
  using reference = typename SuperClass::reference;
  using size_type = typename SuperClass::size_type;

protected:
  using SmallVectorTemplateBase<T>::TakesParamByValue;
  using ValueParamT = typename SuperClass::ValueParamT;

  // Default ctor - Initialize to empty.
  explicit SmallVectorImpl(unsigned N)
      : SmallVectorTemplateBase<T>(N) {}

  void assignRemote(SmallVectorImpl &&RHS) {
    this->destroy_range(this->begin(), this->end());
    if (!this->isSmall())
      free(this->begin());
    this->BeginX = RHS.BeginX;
    this->Size = RHS.Size;
    this->Capacity = RHS.Capacity;
    RHS.resetToSmall();
  }

public:
  SmallVectorImpl(const SmallVectorImpl &) = delete;

  ~SmallVectorImpl() {
    // Subclass has already destructed this vector's elements.
    // If this wasn't grown from the inline copy, deallocate the old space.
    if (!this->isSmall())
      free(this->begin());
  }

  void clear() {
    this->destroy_range(this->begin(), this->end());
    this->Size = 0;
  }

private:
  // Make set_size() private to avoid misuse in subclasses.
  using SuperClass::set_size;

  template <bool ForOverwrite> void resizeImpl(size_type N) {
    if (N == this->size())
      return;

    if (N < this->size()) {
      this->truncate(N);
      return;
    }

    this->reserve(N);
    for (auto I = this->end(), E = this->begin() + N; I != E; ++I)
      if (ForOverwrite)
        new (&*I) T;
      else
        new (&*I) T();
    this->set_size(N);
  }

public:
  void resize(size_type N) { resizeImpl<false>(N); }

  /// Like resize, but \ref T is POD, the new values won't be initialized.
  void resize_for_overwrite(size_type N) { resizeImpl<true>(N); }

  /// Like resize, but requires that \p N is less than \a size().
  void truncate(size_type N) {
    assert(this->size() >= N && "Cannot increase size with truncate");
    this->destroy_range(this->begin() + N, this->end());
    this->set_size(N);
  }

  void resize(size_type N, ValueParamT NV) {
    if (N == this->size())
      return;

    if (N < this->size()) {
      this->truncate(N);
      return;
    }

    // N > this->size(). Defer to append.
    this->append(N - this->size(), NV);
  }

  void reserve(size_type N) {
    if (this->capacity() < N)
      this->grow(N);
  }

  void pop_back_n(size_type NumItems) {
    assert(this->size() >= NumItems);
    truncate(this->size() - NumItems);
  }

  [[nodiscard]] T pop_back_val() {
    T Result = ::std::move(this->back());
    this->pop_back();
    return Result;
  }

  void swap(SmallVectorImpl &RHS);

  /// Add the specified range to the end of the SmallVector.
  template <typename ItTy, typename = EnableIfConvertibleToInputIterator<ItTy>>
  void append(ItTy in_start, ItTy in_end) {
    this->assertSafeToAddRange(in_start, in_end);
    size_type NumInputs = std::distance(in_start, in_end);
    this->reserve(this->size() + NumInputs);
    this->uninitialized_copy(in_start, in_end, this->end());
    this->set_size(this->size() + NumInputs);
  }

  /// Append \p NumInputs copies of \p Elt to the end.
  void append(size_type NumInputs, ValueParamT Elt) {
    const T *EltPtr = this->reserveForParamAndGetAddress(Elt, NumInputs);
    std::uninitialized_fill_n(this->end(), NumInputs, *EltPtr);
    this->set_size(this->size() + NumInputs);
  }

  void append(std::initializer_list<T> IL) {
    append(IL.begin(), IL.end());
  }

  void append(const SmallVectorImpl &RHS) { append(RHS.begin(), RHS.end()); }

  void assign(size_type NumElts, ValueParamT Elt) {
    // Note that Elt could be an internal reference.
    if (NumElts > this->capacity()) {
      this->growAndAssign(NumElts, Elt);
      return;
    }

    // Assign over existing elements.
    std::fill_n(this->begin(), std::min(NumElts, this->size()), Elt);
    if (NumElts > this->size())
      std::uninitialized_fill_n(this->end(), NumElts - this->size(), Elt);
    else if (NumElts < this->size())
      this->destroy_range(this->begin() + NumElts, this->end());
    this->set_size(NumElts);
  }

  // FIXME: Consider assigning over existing elements, rather than clearing &
  // re-initializing them - for all assign(...) variants.

  template <typename ItTy, typename = EnableIfConvertibleToInputIterator<ItTy>>
  void assign(ItTy in_start, ItTy in_end) {
    this->assertSafeToReferenceAfterClear(in_start, in_end);
    clear();
    append(in_start, in_end);
  }

  void assign(std::initializer_list<T> IL) {
    clear();
    append(IL);
  }

  void assign(const SmallVectorImpl &RHS) { assign(RHS.begin(), RHS.end()); }

  iterator erase(const_iterator CI) {
    // Just cast away constness because this is a non-const member function.
    iterator I = const_cast<iterator>(CI);

    assert(this->isReferenceToStorage(CI) && "Iterator to erase is out of bounds.");

    iterator N = I;
    // Shift all elts down one.
    std::move(I+1, this->end(), I);
    // Drop the last elt.
    this->pop_back();
    return(N);
  }

  iterator erase(const_iterator CS, const_iterator CE) {
    // Just cast away constness because this is a non-const member function.
    iterator S = const_cast<iterator>(CS);
    iterator E = const_cast<iterator>(CE);

    assert(this->isRangeInStorage(S, E) && "Range to erase is out of bounds.");

    iterator N = S;
    // Shift all elts down.
    iterator I = std::move(E, this->end(), S);
    // Drop the last elts.
    this->destroy_range(I, this->end());
    this->set_size(I - this->begin());
    return(N);
  }

private:
  template <class ArgType> iterator insert_one_impl(iterator I, ArgType &&Elt) {
    // Callers ensure that ArgType is derived from T.
    static_assert(
        std::is_same<std::remove_const_t<std::remove_reference_t<ArgType>>,
                     T>::value,
        "ArgType must be derived from T!");

    if (I == this->end()) {  // Important special case for empty vector.
      this->push_back(::std::forward<ArgType>(Elt));
      return this->end()-1;
    }

    assert(this->isReferenceToStorage(I) && "Insertion iterator is out of bounds.");

    // Grow if necessary.
    size_t Index = I - this->begin();
    std::remove_reference_t<ArgType> *EltPtr =
        this->reserveForParamAndGetAddress(Elt);
    I = this->begin() + Index;

    ::new ((void*) this->end()) T(::std::move(this->back()));
    // Push everything else over.
    std::move_backward(I, this->end()-1, this->end());
    this->set_size(this->size() + 1);

    // If we just moved the element we're inserting, be sure to update
    // the reference (never happens if TakesParamByValue).
    static_assert(!TakesParamByValue || std::is_same<ArgType, T>::value,
                  "ArgType must be 'T' when taking by value!");
    if (!TakesParamByValue && this->isReferenceToRange(EltPtr, I, this->end()))
      ++EltPtr;

    *I = ::std::forward<ArgType>(*EltPtr);
    return I;
  }

public:
  iterator insert(iterator I, T &&Elt) {
    return insert_one_impl(I, this->forward_value_param(std::move(Elt)));
  }

  iterator insert(iterator I, const T &Elt) {
    return insert_one_impl(I, this->forward_value_param(Elt));
  }

  iterator insert(iterator I, size_type NumToInsert, ValueParamT Elt) {
    // Convert iterator to elt# to avoid invalidating iterator when we reserve()
    size_t InsertElt = I - this->begin();

    if (I == this->end()) {  // Important special case for empty vector.
      append(NumToInsert, Elt);
      return this->begin()+InsertElt;
    }

    assert(this->isReferenceToStorage(I) && "Insertion iterator is out of bounds.");

    // Ensure there is enough space, and get the (maybe updated) address of
    // Elt.
    const T *EltPtr = this->reserveForParamAndGetAddress(Elt, NumToInsert);

    // Uninvalidate the iterator.
    I = this->begin()+InsertElt;

    // If there are more elements between the insertion point and the end of the
    // range than there are being inserted, we can use a simple approach to
    // insertion.  Since we already reserved space, we know that this won't
    // reallocate the vector.
    if (size_t(this->end()-I) >= NumToInsert) {
      T *OldEnd = this->end();
      append(std::move_iterator<iterator>(this->end() - NumToInsert),
             std::move_iterator<iterator>(this->end()));

      // Copy the existing elements that get replaced.
      std::move_backward(I, OldEnd-NumToInsert, OldEnd);

      // If we just moved the element we're inserting, be sure to update
      // the reference (never happens if TakesParamByValue).
      if (!TakesParamByValue && I <= EltPtr && EltPtr < this->end())
        EltPtr += NumToInsert;

      std::fill_n(I, NumToInsert, *EltPtr);
      return I;
    }

    // Otherwise, we're inserting more elements than exist already, and we're
    // not inserting at the end.

    // Move over the elements that we're about to overwrite.
    T *OldEnd = this->end();
    this->set_size(this->size() + NumToInsert);
    size_t NumOverwritten = OldEnd-I;
    this->uninitialized_move(I, OldEnd, this->end()-NumOverwritten);

    // If we just moved the element we're inserting, be sure to update
    // the reference (never happens if TakesParamByValue).
    if (!TakesParamByValue && I <= EltPtr && EltPtr < this->end())
      EltPtr += NumToInsert;

    // Replace the overwritten part.
    std::fill_n(I, NumOverwritten, *EltPtr);

    // Insert the non-overwritten middle part.
    std::uninitialized_fill_n(OldEnd, NumToInsert - NumOverwritten, *EltPtr);
    return I;
  }

  template <typename ItTy, typename = EnableIfConvertibleToInputIterator<ItTy>>
  iterator insert(iterator I, ItTy From, ItTy To) {
    // Convert iterator to elt# to avoid invalidating iterator when we reserve()
    size_t InsertElt = I - this->begin();

    if (I == this->end()) {  // Important special case for empty vector.
      append(From, To);
      return this->begin()+InsertElt;
    }

    assert(this->isReferenceToStorage(I) && "Insertion iterator is out of bounds.");

    // Check that the reserve that follows doesn't invalidate the iterators.
    this->assertSafeToAddRange(From, To);

    size_t NumToInsert = std::distance(From, To);

    // Ensure there is enough space.
    reserve(this->size() + NumToInsert);

    // Uninvalidate the iterator.
    I = this->begin()+InsertElt;

    // If there are more elements between the insertion point and the end of the
    // range than there are being inserted, we can use a simple approach to
    // insertion.  Since we already reserved space, we know that this won't
    // reallocate the vector.
    if (size_t(this->end()-I) >= NumToInsert) {
      T *OldEnd = this->end();
      append(std::move_iterator<iterator>(this->end() - NumToInsert),
             std::move_iterator<iterator>(this->end()));

      // Copy the existing elements that get replaced.
      std::move_backward(I, OldEnd-NumToInsert, OldEnd);

      std::copy(From, To, I);
      return I;
    }

    // Otherwise, we're inserting more elements than exist already, and we're
    // not inserting at the end.

    // Move over the elements that we're about to overwrite.
    T *OldEnd = this->end();
    this->set_size(this->size() + NumToInsert);
    size_t NumOverwritten = OldEnd-I;
    this->uninitialized_move(I, OldEnd, this->end()-NumOverwritten);

    // Replace the overwritten part.
    for (T *J = I; NumOverwritten > 0; --NumOverwritten) {
      *J = *From;
      ++J; ++From;
    }

    // Insert the non-overwritten middle part.
    this->uninitialized_copy(From, To, OldEnd);
    return I;
  }

  void insert(iterator I, std::initializer_list<T> IL) {
    insert(I, IL.begin(), IL.end());
  }

  template <typename... ArgTypes> reference emplace_back(ArgTypes &&... Args) {
    if (LLVM_UNLIKELY(this->size() >= this->capacity()))
      return this->growAndEmplaceBack(std::forward<ArgTypes>(Args)...);

    ::new ((void *)this->end()) T(std::forward<ArgTypes>(Args)...);
    this->set_size(this->size() + 1);
    return this->back();
  }

  SmallVectorImpl &operator=(const SmallVectorImpl &RHS);

  SmallVectorImpl &operator=(SmallVectorImpl &&RHS);

  bool operator==(const SmallVectorImpl &RHS) const {
    if (this->size() != RHS.size()) return false;
    return std::equal(this->begin(), this->end(), RHS.begin());
  }
  bool operator!=(const SmallVectorImpl &RHS) const {
    return !(*this == RHS);
  }

  bool operator<(const SmallVectorImpl &RHS) const {
    return std::lexicographical_compare(this->begin(), this->end(),
                                        RHS.begin(), RHS.end());
  }
  bool operator>(const SmallVectorImpl &RHS) const { return RHS < *this; }
  bool operator<=(const SmallVectorImpl &RHS) const { return !(*this > RHS); }
  bool operator>=(const SmallVectorImpl &RHS) const { return !(*this < RHS); }
};

template <typename T>
void SmallVectorImpl<T>::swap(SmallVectorImpl<T> &RHS) {
  if (this == &RHS) return;

  // We can only avoid copying elements if neither vector is small.
  if (!this->isSmall() && !RHS.isSmall()) {
    std::swap(this->BeginX, RHS.BeginX);
    std::swap(this->Size, RHS.Size);
    std::swap(this->Capacity, RHS.Capacity);
    return;
  }
  this->reserve(RHS.size());
  RHS.reserve(this->size());

  // Swap the shared elements.
  size_t NumShared = this->size();
  if (NumShared > RHS.size()) NumShared = RHS.size();
  for (size_type i = 0; i != NumShared; ++i)
    std::swap((*this)[i], RHS[i]);

  // Copy over the extra elts.
  if (this->size() > RHS.size()) {
    size_t EltDiff = this->size() - RHS.size();
    this->uninitialized_copy(this->begin()+NumShared, this->end(), RHS.end());
    RHS.set_size(RHS.size() + EltDiff);
    this->destroy_range(this->begin()+NumShared, this->end());
    this->set_size(NumShared);
  } else if (RHS.size() > this->size()) {
    size_t EltDiff = RHS.size() - this->size();
    this->uninitialized_copy(RHS.begin()+NumShared, RHS.end(), this->end());
    this->set_size(this->size() + EltDiff);
    this->destroy_range(RHS.begin()+NumShared, RHS.end());
    RHS.set_size(NumShared);
  }
}

template <typename T>
SmallVectorImpl<T> &SmallVectorImpl<T>::
  operator=(const SmallVectorImpl<T> &RHS) {
  // Avoid self-assignment.
  if (this == &RHS) return *this;

  // If we already have sufficient space, assign the common elements, then
  // destroy any excess.
  size_t RHSSize = RHS.size();
  size_t CurSize = this->size();
  if (CurSize >= RHSSize) {
    // Assign common elements.
    iterator NewEnd;
    if (RHSSize)
      NewEnd = std::copy(RHS.begin(), RHS.begin()+RHSSize, this->begin());
    else
      NewEnd = this->begin();

    // Destroy excess elements.
    this->destroy_range(NewEnd, this->end());

    // Trim.
    this->set_size(RHSSize);
    return *this;
  }

  // If we have to grow to have enough elements, destroy the current elements.
  // This allows us to avoid copying them during the grow.
  // FIXME: don't do this if they're efficiently moveable.
  if (this->capacity() < RHSSize) {
    // Destroy current elements.
    this->clear();
    CurSize = 0;
    this->grow(RHSSize);
  } else if (CurSize) {
    // Otherwise, use assignment for the already-constructed elements.
    std::copy(RHS.begin(), RHS.begin()+CurSize, this->begin());
  }

  // Copy construct the new elements in place.
  this->uninitialized_copy(RHS.begin()+CurSize, RHS.end(),
                           this->begin()+CurSize);

  // Set end.
  this->set_size(RHSSize);
  return *this;
}

template <typename T>
SmallVectorImpl<T> &SmallVectorImpl<T>::operator=(SmallVectorImpl<T> &&RHS) {
  // Avoid self-assignment.
  if (this == &RHS) return *this;

  // If the RHS isn't small, clear this vector and then steal its buffer.
  if (!RHS.isSmall()) {
    this->assignRemote(std::move(RHS));
    return *this;
  }

  // If we already have sufficient space, assign the common elements, then
  // destroy any excess.
  size_t RHSSize = RHS.size();
  size_t CurSize = this->size();
  if (CurSize >= RHSSize) {
    // Assign common elements.
    iterator NewEnd = this->begin();
    if (RHSSize)
      NewEnd = std::move(RHS.begin(), RHS.end(), NewEnd);

    // Destroy excess elements and trim the bounds.
    this->destroy_range(NewEnd, this->end());
    this->set_size(RHSSize);

    // Clear the RHS.
    RHS.clear();

    return *this;
  }

  // If we have to grow to have enough elements, destroy the current elements.
  // This allows us to avoid copying them during the grow.
  // FIXME: this may not actually make any sense if we can efficiently move
  // elements.
  if (this->capacity() < RHSSize) {
    // Destroy current elements.
    this->clear();
    CurSize = 0;
    this->grow(RHSSize);
  } else if (CurSize) {
    // Otherwise, use assignment for the already-constructed elements.
    std::move(RHS.begin(), RHS.begin()+CurSize, this->begin());
  }

  // Move-construct the new elements in place.
  this->uninitialized_move(RHS.begin()+CurSize, RHS.end(),
                           this->begin()+CurSize);

  // Set end.
  this->set_size(RHSSize);

  RHS.clear();
  return *this;
}

/// Storage for the SmallVector elements.  This is specialized for the N=0 case
/// to avoid allocating unnecessary storage.
template <typename T, unsigned N>
struct SmallVectorStorage {
  alignas(T) char InlineElts[N * sizeof(T)];
};

/// We need the storage to be properly aligned even for small-size of 0 so that
/// the pointer math in \a SmallVectorTemplateCommon::getFirstEl() is
/// well-defined.
template <typename T> struct alignas(T) SmallVectorStorage<T, 0> {};

/// Forward declaration of SmallVector so that
/// calculateSmallVectorDefaultInlinedElements can reference
/// `sizeof(SmallVector<T, 0>)`.
template <typename T, unsigned N> class LLVM_GSL_OWNER SmallVector;

/// Helper class for calculating the default number of inline elements for
/// `SmallVector<T>`.
///
/// This should be migrated to a constexpr function when our minimum
/// compiler support is enough for multi-statement constexpr functions.
template <typename T> struct CalculateSmallVectorDefaultInlinedElements {
  // Parameter controlling the default number of inlined elements
  // for `SmallVector<T>`.
  //
  // The default number of inlined elements ensures that
  // 1. There is at least one inlined element.
  // 2. `sizeof(SmallVector<T>) <= kPreferredSmallVectorSizeof` unless
  // it contradicts 1.
  static constexpr size_t kPreferredSmallVectorSizeof = 64;

  // static_assert that sizeof(T) is not "too big".
  //
  // Because our policy guarantees at least one inlined element, it is possible
  // for an arbitrarily large inlined element to allocate an arbitrarily large
  // amount of inline storage. We generally consider it an antipattern for a
  // SmallVector to allocate an excessive amount of inline storage, so we want
  // to call attention to these cases and make sure that users are making an
  // intentional decision if they request a lot of inline storage.
  //
  // We want this assertion to trigger in pathological cases, but otherwise
  // not be too easy to hit. To accomplish that, the cutoff is actually somewhat
  // larger than kPreferredSmallVectorSizeof (otherwise,
  // `SmallVector<SmallVector<T>>` would be one easy way to trip it, and that
  // pattern seems useful in practice).
  //
  // One wrinkle is that this assertion is in theory non-portable, since
  // sizeof(T) is in general platform-dependent. However, we don't expect this
  // to be much of an issue, because most LLVM development happens on 64-bit
  // hosts, and therefore sizeof(T) is expected to *decrease* when compiled for
  // 32-bit hosts, dodging the issue. The reverse situation, where development
  // happens on a 32-bit host and then fails due to sizeof(T) *increasing* on a
  // 64-bit host, is expected to be very rare.
  static_assert(
      sizeof(T) <= 256,
      "You are trying to use a default number of inlined elements for "
      "`SmallVector<T>` but `sizeof(T)` is really big! Please use an "
      "explicit number of inlined elements with `SmallVector<T, N>` to make "
      "sure you really want that much inline storage.");

  // Discount the size of the header itself when calculating the maximum inline
  // bytes.
  static constexpr size_t PreferredInlineBytes =
      kPreferredSmallVectorSizeof - sizeof(SmallVector<T, 0>);
  static constexpr size_t NumElementsThatFit = PreferredInlineBytes / sizeof(T);
  static constexpr size_t value =
      NumElementsThatFit == 0 ? 1 : NumElementsThatFit;
};

/// This is a 'vector' (really, a variable-sized array), optimized
/// for the case when the array is small.  It contains some number of elements
/// in-place, which allows it to avoid heap allocation when the actual number of
/// elements is below that threshold.  This allows normal "small" cases to be
/// fast without losing generality for large inputs.
///
/// \note
/// In the absence of a well-motivated choice for the number of inlined
/// elements \p N, it is recommended to use \c SmallVector<T> (that is,
/// omitting the \p N). This will choose a default number of inlined elements
/// reasonable for allocation on the stack (for example, trying to keep \c
/// sizeof(SmallVector<T>) around 64 bytes).
///
/// \warning This does not attempt to be exception safe.
///
/// \see https://llvm.org/docs/ProgrammersManual.html#llvm-adt-smallvector-h
template <typename T,
          unsigned N = CalculateSmallVectorDefaultInlinedElements<T>::value>
class LLVM_GSL_OWNER SmallVector : public SmallVectorImpl<T>,
                                   SmallVectorStorage<T, N> {
public:
  SmallVector() : SmallVectorImpl<T>(N) {}

  ~SmallVector() {
    // Destroy the constructed elements in the vector.
    this->destroy_range(this->begin(), this->end());
  }

  explicit SmallVector(size_t Size)
    : SmallVectorImpl<T>(N) {
    this->resize(Size);
  }

  SmallVector(size_t Size, const T &Value)
    : SmallVectorImpl<T>(N) {
    this->assign(Size, Value);
  }

  template <typename ItTy, typename = EnableIfConvertibleToInputIterator<ItTy>>
  SmallVector(ItTy S, ItTy E) : SmallVectorImpl<T>(N) {
    this->append(S, E);
  }

  template <typename RangeTy>
  explicit SmallVector(const iterator_range<RangeTy> &R)
      : SmallVectorImpl<T>(N) {
    this->append(R.begin(), R.end());
  }

  SmallVector(std::initializer_list<T> IL) : SmallVectorImpl<T>(N) {
    this->append(IL);
  }

  template <typename U,
            typename = std::enable_if_t<std::is_convertible<U, T>::value>>
  explicit SmallVector(ArrayRef<U> A) : SmallVectorImpl<T>(N) {
    this->append(A.begin(), A.end());
  }

  SmallVector(const SmallVector &RHS) : SmallVectorImpl<T>(N) {
    if (!RHS.empty())
      SmallVectorImpl<T>::operator=(RHS);
  }

  SmallVector &operator=(const SmallVector &RHS) {
    SmallVectorImpl<T>::operator=(RHS);
    return *this;
  }

  SmallVector(SmallVector &&RHS) : SmallVectorImpl<T>(N) {
    if (!RHS.empty())
      SmallVectorImpl<T>::operator=(::std::move(RHS));
  }

  SmallVector(SmallVectorImpl<T> &&RHS) : SmallVectorImpl<T>(N) {
    if (!RHS.empty())
      SmallVectorImpl<T>::operator=(::std::move(RHS));
  }

  SmallVector &operator=(SmallVector &&RHS) {
    if (N) {
      SmallVectorImpl<T>::operator=(::std::move(RHS));
      return *this;
    }
    // SmallVectorImpl<T>::operator= does not leverage N==0. Optimize the
    // case.
    if (this == &RHS)
      return *this;
    if (RHS.empty()) {
      this->destroy_range(this->begin(), this->end());
      this->Size = 0;
    } else {
      this->assignRemote(std::move(RHS));
    }
    return *this;
  }

  SmallVector &operator=(SmallVectorImpl<T> &&RHS) {
    SmallVectorImpl<T>::operator=(::std::move(RHS));
    return *this;
  }

  SmallVector &operator=(std::initializer_list<T> IL) {
    this->assign(IL);
    return *this;
  }
};

template <typename T, unsigned N>
inline size_t capacity_in_bytes(const SmallVector<T, N> &X) {
  return X.capacity_in_bytes();
}

template <typename RangeType>
using ValueTypeFromRangeType =
    std::remove_const_t<std::remove_reference_t<decltype(*std::begin(
        std::declval<RangeType &>()))>>;

/// Given a range of type R, iterate the entire range and return a
/// SmallVector with elements of the vector.  This is useful, for example,
/// when you want to iterate a range and then sort the results.
template <unsigned Size, typename R>
SmallVector<ValueTypeFromRangeType<R>, Size> to_vector(R &&Range) {
  return {std::begin(Range), std::end(Range)};
}
template <typename R>
SmallVector<ValueTypeFromRangeType<R>> to_vector(R &&Range) {
  return {std::begin(Range), std::end(Range)};
}

template <typename Out, unsigned Size, typename R>
SmallVector<Out, Size> to_vector_of(R &&Range) {
  return {std::begin(Range), std::end(Range)};
}

template <typename Out, typename R> SmallVector<Out> to_vector_of(R &&Range) {
  return {std::begin(Range), std::end(Range)};
}

// Explicit instantiations
extern template class llvm::SmallVectorBase<uint32_t>;
#if SIZE_MAX > UINT32_MAX
extern template class llvm::SmallVectorBase<uint64_t>;
#endif

} // end namespace llvm

namespace std {

  /// Implement std::swap in terms of SmallVector swap.
  template<typename T>
  inline void
  swap(llvm::SmallVectorImpl<T> &LHS, llvm::SmallVectorImpl<T> &RHS) {
    LHS.swap(RHS);
  }

  /// Implement std::swap in terms of SmallVector swap.
  template<typename T, unsigned N>
  inline void
  swap(llvm::SmallVector<T, N> &LHS, llvm::SmallVector<T, N> &RHS) {
    LHS.swap(RHS);
  }

} // end namespace std

#endif // LLVM_ADT_SMALLVECTOR_H
PKiwFZ<�7D��ADT/CombinationGenerator.hnu�[���//===-- llvm/ADT/CombinationGenerator.h ------------------------*- C++ -*--===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// Combination generator.
///
/// Example: given input {{0, 1}, {2}, {3, 4}} it will produce the following
/// combinations: {0, 2, 3}, {0, 2, 4}, {1, 2, 3}, {1, 2, 4}.
///
/// It is useful to think of input as vector-of-vectors, where the
/// outer vector is the variable space, and inner vector is choice space.
/// The number of choices for each variable can be different.
///
/// As for implementation, it is useful to think of this as a weird number,
/// where each digit (==variable) may have different base (==number of choices).
/// Thus modelling of 'produce next combination' is exactly analogous to the
/// incrementing of an number - increment lowest digit (pick next choice for the
/// variable), and if it wrapped to the beginning then increment next digit.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_COMBINATIONGENERATOR_H
#define LLVM_ADT_COMBINATIONGENERATOR_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLFunctionalExtras.h"
#include "llvm/ADT/SmallVector.h"
#include <cassert>
#include <cstring>

namespace llvm {

template <typename choice_type, typename choices_storage_type,
          int variable_smallsize>
class CombinationGenerator {
  template <typename T> struct WrappingIterator {
    using value_type = T;

    const ArrayRef<value_type> Range;
    typename decltype(Range)::const_iterator Position;

    // Rewind the tape, placing the position to again point at the beginning.
    void rewind() { Position = Range.begin(); }

    // Advance position forward, possibly wrapping to the beginning.
    // Returns whether the wrap happened.
    bool advance() {
      ++Position;
      bool Wrapped = Position == Range.end();
      if (Wrapped)
        rewind();
      return Wrapped;
    }

    // Get the value at which we are currently pointing.
    const value_type &operator*() const { return *Position; }

    WrappingIterator(ArrayRef<value_type> Range_) : Range(Range_) {
      assert(!Range.empty() && "The range must not be empty.");
      rewind();
    }
  };

  const ArrayRef<choices_storage_type> VariablesChoices;

  void performGeneration(
      const function_ref<bool(ArrayRef<choice_type>)> Callback) const {
    SmallVector<WrappingIterator<choice_type>, variable_smallsize>
        VariablesState;

    // 'increment' of the the whole VariablesState is defined identically to the
    // increment of a number: starting from the least significant element,
    // increment it, and if it wrapped, then propagate that carry by also
    // incrementing next (more significant) element.
    auto IncrementState =
        [](MutableArrayRef<WrappingIterator<choice_type>> VariablesState)
        -> bool {
      for (WrappingIterator<choice_type> &Variable :
           llvm::reverse(VariablesState)) {
        bool Wrapped = Variable.advance();
        if (!Wrapped)
          return false; // There you go, next combination is ready.
        // We have carry - increment more significant variable next..
      }
      return true; // MSB variable wrapped, no more unique combinations.
    };

    // Initialize the per-variable state to refer to the possible choices for
    // that variable.
    VariablesState.reserve(VariablesChoices.size());
    for (ArrayRef<choice_type> VC : VariablesChoices)
      VariablesState.emplace_back(VC);

    // Temporary buffer to store each combination before performing Callback.
    SmallVector<choice_type, variable_smallsize> CurrentCombination;
    CurrentCombination.resize(VariablesState.size());

    while (true) {
      // Gather the currently-selected variable choices into a vector.
      for (auto I : llvm::zip(VariablesState, CurrentCombination))
        std::get<1>(I) = *std::get<0>(I);
      // And pass the new combination into callback, as intended.
      if (/*Abort=*/Callback(CurrentCombination))
        return;
      // And tick the state to next combination, which will be unique.
      if (IncrementState(VariablesState))
        return; // All combinations produced.
    }
  };

public:
  CombinationGenerator(ArrayRef<choices_storage_type> VariablesChoices_)
      : VariablesChoices(VariablesChoices_) {
#ifndef NDEBUG
    assert(!VariablesChoices.empty() && "There should be some variables.");
    llvm::for_each(VariablesChoices, [](ArrayRef<choice_type> VariableChoices) {
      assert(!VariableChoices.empty() &&
             "There must always be some choice, at least a placeholder one.");
    });
#endif
  }

  // How many combinations can we produce, max?
  // This is at most how many times the callback will be called.
  size_t numCombinations() const {
    size_t NumVariants = 1;
    for (ArrayRef<choice_type> VariableChoices : VariablesChoices)
      NumVariants *= VariableChoices.size();
    assert(NumVariants >= 1 &&
           "We should always end up producing at least one combination");
    return NumVariants;
  }

  // Actually perform exhaustive combination generation.
  // Each result will be passed into the callback.
  void generate(const function_ref<bool(ArrayRef<choice_type>)> Callback) {
    performGeneration(Callback);
  }
};

} // namespace llvm

#endif
PKiwFZLԬ�b(b(ADT/ImmutableMap.hnu�[���//===--- ImmutableMap.h - Immutable (functional) map interface --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file defines the ImmutableMap class.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_IMMUTABLEMAP_H
#define LLVM_ADT_IMMUTABLEMAP_H

#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/ImmutableSet.h"
#include "llvm/Support/Allocator.h"
#include <utility>

namespace llvm {

/// ImutKeyValueInfo -Traits class used by ImmutableMap.  While both the first
/// and second elements in a pair are used to generate profile information,
/// only the first element (the key) is used by isEqual and isLess.
template <typename T, typename S>
struct ImutKeyValueInfo {
  using value_type = const std::pair<T,S>;
  using value_type_ref = const value_type&;
  using key_type = const T;
  using key_type_ref = const T&;
  using data_type = const S;
  using data_type_ref = const S&;

  static inline key_type_ref KeyOfValue(value_type_ref V) {
    return V.first;
  }

  static inline data_type_ref DataOfValue(value_type_ref V) {
    return V.second;
  }

  static inline bool isEqual(key_type_ref L, key_type_ref R) {
    return ImutContainerInfo<T>::isEqual(L,R);
  }
  static inline bool isLess(key_type_ref L, key_type_ref R) {
    return ImutContainerInfo<T>::isLess(L,R);
  }

  static inline bool isDataEqual(data_type_ref L, data_type_ref R) {
    return ImutContainerInfo<S>::isEqual(L,R);
  }

  static inline void Profile(FoldingSetNodeID& ID, value_type_ref V) {
    ImutContainerInfo<T>::Profile(ID, V.first);
    ImutContainerInfo<S>::Profile(ID, V.second);
  }
};

template <typename KeyT, typename ValT,
          typename ValInfo = ImutKeyValueInfo<KeyT,ValT>>
class ImmutableMap {
public:
  using value_type = typename ValInfo::value_type;
  using value_type_ref = typename ValInfo::value_type_ref;
  using key_type = typename ValInfo::key_type;
  using key_type_ref = typename ValInfo::key_type_ref;
  using data_type = typename ValInfo::data_type;
  using data_type_ref = typename ValInfo::data_type_ref;
  using TreeTy = ImutAVLTree<ValInfo>;

protected:
  IntrusiveRefCntPtr<TreeTy> Root;

public:
  /// Constructs a map from a pointer to a tree root.  In general one
  /// should use a Factory object to create maps instead of directly
  /// invoking the constructor, but there are cases where make this
  /// constructor public is useful.
  explicit ImmutableMap(const TreeTy *R) : Root(const_cast<TreeTy *>(R)) {}

  class Factory {
    typename TreeTy::Factory F;
    const bool Canonicalize;

  public:
    Factory(bool canonicalize = true) : Canonicalize(canonicalize) {}

    Factory(BumpPtrAllocator &Alloc, bool canonicalize = true)
        : F(Alloc), Canonicalize(canonicalize) {}

    Factory(const Factory &) = delete;
    Factory &operator=(const Factory &) = delete;

    ImmutableMap getEmptyMap() { return ImmutableMap(F.getEmptyTree()); }

    [[nodiscard]] ImmutableMap add(ImmutableMap Old, key_type_ref K,
                                   data_type_ref D) {
      TreeTy *T = F.add(Old.Root.get(), std::pair<key_type, data_type>(K, D));
      return ImmutableMap(Canonicalize ? F.getCanonicalTree(T): T);
    }

    [[nodiscard]] ImmutableMap remove(ImmutableMap Old, key_type_ref K) {
      TreeTy *T = F.remove(Old.Root.get(), K);
      return ImmutableMap(Canonicalize ? F.getCanonicalTree(T): T);
    }

    typename TreeTy::Factory *getTreeFactory() const {
      return const_cast<typename TreeTy::Factory *>(&F);
    }
  };

  bool contains(key_type_ref K) const {
    return Root ? Root->contains(K) : false;
  }

  bool operator==(const ImmutableMap &RHS) const {
    return Root && RHS.Root ? Root->isEqual(*RHS.Root.get()) : Root == RHS.Root;
  }

  bool operator!=(const ImmutableMap &RHS) const {
    return Root && RHS.Root ? Root->isNotEqual(*RHS.Root.get())
                            : Root != RHS.Root;
  }

  TreeTy *getRoot() const {
    if (Root) { Root->retain(); }
    return Root.get();
  }

  TreeTy *getRootWithoutRetain() const { return Root.get(); }

  void manualRetain() {
    if (Root) Root->retain();
  }

  void manualRelease() {
    if (Root) Root->release();
  }

  bool isEmpty() const { return !Root; }

public:
  //===--------------------------------------------------===//
  // For testing.
  //===--------------------------------------------------===//

  void verify() const { if (Root) Root->verify(); }

  //===--------------------------------------------------===//
  // Iterators.
  //===--------------------------------------------------===//

  class iterator : public ImutAVLValueIterator<ImmutableMap> {
    friend class ImmutableMap;

    iterator() = default;
    explicit iterator(TreeTy *Tree) : iterator::ImutAVLValueIterator(Tree) {}

  public:
    key_type_ref getKey() const { return (*this)->first; }
    data_type_ref getData() const { return (*this)->second; }
  };

  iterator begin() const { return iterator(Root.get()); }
  iterator end() const { return iterator(); }

  data_type* lookup(key_type_ref K) const {
    if (Root) {
      TreeTy* T = Root->find(K);
      if (T) return &T->getValue().second;
    }

    return nullptr;
  }

  /// getMaxElement - Returns the <key,value> pair in the ImmutableMap for
  ///  which key is the highest in the ordering of keys in the map.  This
  ///  method returns NULL if the map is empty.
  value_type* getMaxElement() const {
    return Root ? &(Root->getMaxElement()->getValue()) : nullptr;
  }

  //===--------------------------------------------------===//
  // Utility methods.
  //===--------------------------------------------------===//

  unsigned getHeight() const { return Root ? Root->getHeight() : 0; }

  static inline void Profile(FoldingSetNodeID& ID, const ImmutableMap& M) {
    ID.AddPointer(M.Root.get());
  }

  inline void Profile(FoldingSetNodeID& ID) const {
    return Profile(ID,*this);
  }
};

// NOTE: This will possibly become the new implementation of ImmutableMap some day.
template <typename KeyT, typename ValT,
typename ValInfo = ImutKeyValueInfo<KeyT,ValT>>
class ImmutableMapRef {
public:
  using value_type = typename ValInfo::value_type;
  using value_type_ref = typename ValInfo::value_type_ref;
  using key_type = typename ValInfo::key_type;
  using key_type_ref = typename ValInfo::key_type_ref;
  using data_type = typename ValInfo::data_type;
  using data_type_ref = typename ValInfo::data_type_ref;
  using TreeTy = ImutAVLTree<ValInfo>;
  using FactoryTy = typename TreeTy::Factory;

protected:
  IntrusiveRefCntPtr<TreeTy> Root;
  FactoryTy *Factory;

public:
  /// Constructs a map from a pointer to a tree root.  In general one
  /// should use a Factory object to create maps instead of directly
  /// invoking the constructor, but there are cases where make this
  /// constructor public is useful.
  ImmutableMapRef(const TreeTy *R, FactoryTy *F)
      : Root(const_cast<TreeTy *>(R)), Factory(F) {}

  ImmutableMapRef(const ImmutableMap<KeyT, ValT> &X,
                  typename ImmutableMap<KeyT, ValT>::Factory &F)
      : Root(X.getRootWithoutRetain()), Factory(F.getTreeFactory()) {}

  static inline ImmutableMapRef getEmptyMap(FactoryTy *F) {
    return ImmutableMapRef(nullptr, F);
  }

  void manualRetain() {
    if (Root) Root->retain();
  }

  void manualRelease() {
    if (Root) Root->release();
  }

  ImmutableMapRef add(key_type_ref K, data_type_ref D) const {
    TreeTy *NewT =
        Factory->add(Root.get(), std::pair<key_type, data_type>(K, D));
    return ImmutableMapRef(NewT, Factory);
  }

  ImmutableMapRef remove(key_type_ref K) const {
    TreeTy *NewT = Factory->remove(Root.get(), K);
    return ImmutableMapRef(NewT, Factory);
  }

  bool contains(key_type_ref K) const {
    return Root ? Root->contains(K) : false;
  }

  ImmutableMap<KeyT, ValT> asImmutableMap() const {
    return ImmutableMap<KeyT, ValT>(Factory->getCanonicalTree(Root.get()));
  }

  bool operator==(const ImmutableMapRef &RHS) const {
    return Root && RHS.Root ? Root->isEqual(*RHS.Root.get()) : Root == RHS.Root;
  }

  bool operator!=(const ImmutableMapRef &RHS) const {
    return Root && RHS.Root ? Root->isNotEqual(*RHS.Root.get())
                            : Root != RHS.Root;
  }

  bool isEmpty() const { return !Root; }

  //===--------------------------------------------------===//
  // For testing.
  //===--------------------------------------------------===//

  void verify() const {
    if (Root)
      Root->verify();
  }

  //===--------------------------------------------------===//
  // Iterators.
  //===--------------------------------------------------===//

  class iterator : public ImutAVLValueIterator<ImmutableMapRef> {
    friend class ImmutableMapRef;

    iterator() = default;
    explicit iterator(TreeTy *Tree) : iterator::ImutAVLValueIterator(Tree) {}

  public:
    key_type_ref getKey() const { return (*this)->first; }
    data_type_ref getData() const { return (*this)->second; }
  };

  iterator begin() const { return iterator(Root.get()); }
  iterator end() const { return iterator(); }

  data_type *lookup(key_type_ref K) const {
    if (Root) {
      TreeTy* T = Root->find(K);
      if (T) return &T->getValue().second;
    }

    return nullptr;
  }

  /// getMaxElement - Returns the <key,value> pair in the ImmutableMap for
  ///  which key is the highest in the ordering of keys in the map.  This
  ///  method returns NULL if the map is empty.
  value_type* getMaxElement() const {
    return Root ? &(Root->getMaxElement()->getValue()) : nullptr;
  }

  //===--------------------------------------------------===//
  // Utility methods.
  //===--------------------------------------------------===//

  unsigned getHeight() const { return Root ? Root->getHeight() : 0; }

  static inline void Profile(FoldingSetNodeID &ID, const ImmutableMapRef &M) {
    ID.AddPointer(M.Root.get());
  }

  inline void Profile(FoldingSetNodeID &ID) const { return Profile(ID, *this); }
};

} // end namespace llvm

#endif // LLVM_ADT_IMMUTABLEMAP_H
PKiwFZ�n�h�6�6ADT/iterator.hnu�[���//===- iterator.h - Utilities for using and defining iterators --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_ITERATOR_H
#define LLVM_ADT_ITERATOR_H

#include "llvm/ADT/iterator_range.h"
#include <cstddef>
#include <iterator>
#include <type_traits>
#include <utility>

namespace llvm {

/// CRTP base class which implements the entire standard iterator facade
/// in terms of a minimal subset of the interface.
///
/// Use this when it is reasonable to implement most of the iterator
/// functionality in terms of a core subset. If you need special behavior or
/// there are performance implications for this, you may want to override the
/// relevant members instead.
///
/// Note, one abstraction that this does *not* provide is implementing
/// subtraction in terms of addition by negating the difference. Negation isn't
/// always information preserving, and I can see very reasonable iterator
/// designs where this doesn't work well. It doesn't really force much added
/// boilerplate anyways.
///
/// Another abstraction that this doesn't provide is implementing increment in
/// terms of addition of one. These aren't equivalent for all iterator
/// categories, and respecting that adds a lot of complexity for little gain.
///
/// Iterators are expected to have const rules analogous to pointers, with a
/// single, const-qualified operator*() that returns ReferenceT. This matches
/// the second and third pointers in the following example:
/// \code
///   int Value;
///   { int *I = &Value; }             // ReferenceT 'int&'
///   { int *const I = &Value; }       // ReferenceT 'int&'; const
///   { const int *I = &Value; }       // ReferenceT 'const int&'
///   { const int *const I = &Value; } // ReferenceT 'const int&'; const
/// \endcode
/// If an iterator facade returns a handle to its own state, then T (and
/// PointerT and ReferenceT) should usually be const-qualified. Otherwise, if
/// clients are expected to modify the handle itself, the field can be declared
/// mutable or use const_cast.
///
/// Classes wishing to use `iterator_facade_base` should implement the following
/// methods:
///
/// Forward Iterators:
///   (All of the following methods)
///   - DerivedT &operator=(const DerivedT &R);
///   - bool operator==(const DerivedT &R) const;
///   - T &operator*() const;
///   - DerivedT &operator++();
///
/// Bidirectional Iterators:
///   (All methods of forward iterators, plus the following)
///   - DerivedT &operator--();
///
/// Random-access Iterators:
///   (All methods of bidirectional iterators excluding the following)
///   - DerivedT &operator++();
///   - DerivedT &operator--();
///   (and plus the following)
///   - bool operator<(const DerivedT &RHS) const;
///   - DifferenceTypeT operator-(const DerivedT &R) const;
///   - DerivedT &operator+=(DifferenceTypeT N);
///   - DerivedT &operator-=(DifferenceTypeT N);
///
template <typename DerivedT, typename IteratorCategoryT, typename T,
          typename DifferenceTypeT = std::ptrdiff_t, typename PointerT = T *,
          typename ReferenceT = T &>
class iterator_facade_base {
public:
  using iterator_category = IteratorCategoryT;
  using value_type = T;
  using difference_type = DifferenceTypeT;
  using pointer = PointerT;
  using reference = ReferenceT;

protected:
  enum {
    IsRandomAccess = std::is_base_of<std::random_access_iterator_tag,
                                     IteratorCategoryT>::value,
    IsBidirectional = std::is_base_of<std::bidirectional_iterator_tag,
                                      IteratorCategoryT>::value,
  };

  /// A proxy object for computing a reference via indirecting a copy of an
  /// iterator. This is used in APIs which need to produce a reference via
  /// indirection but for which the iterator object might be a temporary. The
  /// proxy preserves the iterator internally and exposes the indirected
  /// reference via a conversion operator.
  class ReferenceProxy {
    friend iterator_facade_base;

    DerivedT I;

    ReferenceProxy(DerivedT I) : I(std::move(I)) {}

  public:
    operator ReferenceT() const { return *I; }
  };

  /// A proxy object for computing a pointer via indirecting a copy of a
  /// reference. This is used in APIs which need to produce a pointer but for
  /// which the reference might be a temporary. The proxy preserves the
  /// reference internally and exposes the pointer via a arrow operator.
  class PointerProxy {
    friend iterator_facade_base;

    ReferenceT R;

    template <typename RefT>
    PointerProxy(RefT &&R) : R(std::forward<RefT>(R)) {}

  public:
    PointerT operator->() const { return &R; }
  };

public:
  DerivedT operator+(DifferenceTypeT n) const {
    static_assert(std::is_base_of<iterator_facade_base, DerivedT>::value,
                  "Must pass the derived type to this template!");
    static_assert(
        IsRandomAccess,
        "The '+' operator is only defined for random access iterators.");
    DerivedT tmp = *static_cast<const DerivedT *>(this);
    tmp += n;
    return tmp;
  }
  friend DerivedT operator+(DifferenceTypeT n, const DerivedT &i) {
    static_assert(
        IsRandomAccess,
        "The '+' operator is only defined for random access iterators.");
    return i + n;
  }
  DerivedT operator-(DifferenceTypeT n) const {
    static_assert(
        IsRandomAccess,
        "The '-' operator is only defined for random access iterators.");
    DerivedT tmp = *static_cast<const DerivedT *>(this);
    tmp -= n;
    return tmp;
  }

  DerivedT &operator++() {
    static_assert(std::is_base_of<iterator_facade_base, DerivedT>::value,
                  "Must pass the derived type to this template!");
    return static_cast<DerivedT *>(this)->operator+=(1);
  }
  DerivedT operator++(int) {
    DerivedT tmp = *static_cast<DerivedT *>(this);
    ++*static_cast<DerivedT *>(this);
    return tmp;
  }
  DerivedT &operator--() {
    static_assert(
        IsBidirectional,
        "The decrement operator is only defined for bidirectional iterators.");
    return static_cast<DerivedT *>(this)->operator-=(1);
  }
  DerivedT operator--(int) {
    static_assert(
        IsBidirectional,
        "The decrement operator is only defined for bidirectional iterators.");
    DerivedT tmp = *static_cast<DerivedT *>(this);
    --*static_cast<DerivedT *>(this);
    return tmp;
  }

#ifndef __cpp_impl_three_way_comparison
  bool operator!=(const DerivedT &RHS) const {
    return !(static_cast<const DerivedT &>(*this) == RHS);
  }
#endif

  bool operator>(const DerivedT &RHS) const {
    static_assert(
        IsRandomAccess,
        "Relational operators are only defined for random access iterators.");
    return !(static_cast<const DerivedT &>(*this) < RHS) &&
           !(static_cast<const DerivedT &>(*this) == RHS);
  }
  bool operator<=(const DerivedT &RHS) const {
    static_assert(
        IsRandomAccess,
        "Relational operators are only defined for random access iterators.");
    return !(static_cast<const DerivedT &>(*this) > RHS);
  }
  bool operator>=(const DerivedT &RHS) const {
    static_assert(
        IsRandomAccess,
        "Relational operators are only defined for random access iterators.");
    return !(static_cast<const DerivedT &>(*this) < RHS);
  }

  PointerProxy operator->() const {
    return static_cast<const DerivedT *>(this)->operator*();
  }
  ReferenceProxy operator[](DifferenceTypeT n) const {
    static_assert(IsRandomAccess,
                  "Subscripting is only defined for random access iterators.");
    return static_cast<const DerivedT *>(this)->operator+(n);
  }
};

/// CRTP base class for adapting an iterator to a different type.
///
/// This class can be used through CRTP to adapt one iterator into another.
/// Typically this is done through providing in the derived class a custom \c
/// operator* implementation. Other methods can be overridden as well.
template <
    typename DerivedT, typename WrappedIteratorT,
    typename IteratorCategoryT =
        typename std::iterator_traits<WrappedIteratorT>::iterator_category,
    typename T = typename std::iterator_traits<WrappedIteratorT>::value_type,
    typename DifferenceTypeT =
        typename std::iterator_traits<WrappedIteratorT>::difference_type,
    typename PointerT = std::conditional_t<
        std::is_same<T, typename std::iterator_traits<
                            WrappedIteratorT>::value_type>::value,
        typename std::iterator_traits<WrappedIteratorT>::pointer, T *>,
    typename ReferenceT = std::conditional_t<
        std::is_same<T, typename std::iterator_traits<
                            WrappedIteratorT>::value_type>::value,
        typename std::iterator_traits<WrappedIteratorT>::reference, T &>>
class iterator_adaptor_base
    : public iterator_facade_base<DerivedT, IteratorCategoryT, T,
                                  DifferenceTypeT, PointerT, ReferenceT> {
  using BaseT = typename iterator_adaptor_base::iterator_facade_base;

protected:
  WrappedIteratorT I;

  iterator_adaptor_base() = default;

  explicit iterator_adaptor_base(WrappedIteratorT u) : I(std::move(u)) {
    static_assert(std::is_base_of<iterator_adaptor_base, DerivedT>::value,
                  "Must pass the derived type to this template!");
  }

  const WrappedIteratorT &wrapped() const { return I; }

public:
  using difference_type = DifferenceTypeT;

  DerivedT &operator+=(difference_type n) {
    static_assert(
        BaseT::IsRandomAccess,
        "The '+=' operator is only defined for random access iterators.");
    I += n;
    return *static_cast<DerivedT *>(this);
  }
  DerivedT &operator-=(difference_type n) {
    static_assert(
        BaseT::IsRandomAccess,
        "The '-=' operator is only defined for random access iterators.");
    I -= n;
    return *static_cast<DerivedT *>(this);
  }
  using BaseT::operator-;
  difference_type operator-(const DerivedT &RHS) const {
    static_assert(
        BaseT::IsRandomAccess,
        "The '-' operator is only defined for random access iterators.");
    return I - RHS.I;
  }

  // We have to explicitly provide ++ and -- rather than letting the facade
  // forward to += because WrappedIteratorT might not support +=.
  using BaseT::operator++;
  DerivedT &operator++() {
    ++I;
    return *static_cast<DerivedT *>(this);
  }
  using BaseT::operator--;
  DerivedT &operator--() {
    static_assert(
        BaseT::IsBidirectional,
        "The decrement operator is only defined for bidirectional iterators.");
    --I;
    return *static_cast<DerivedT *>(this);
  }

  friend bool operator==(const iterator_adaptor_base &LHS,
                         const iterator_adaptor_base &RHS) {
    return LHS.I == RHS.I;
  }
  friend bool operator<(const iterator_adaptor_base &LHS,
                        const iterator_adaptor_base &RHS) {
    static_assert(
        BaseT::IsRandomAccess,
        "Relational operators are only defined for random access iterators.");
    return LHS.I < RHS.I;
  }

  ReferenceT operator*() const { return *I; }
};

/// An iterator type that allows iterating over the pointees via some
/// other iterator.
///
/// The typical usage of this is to expose a type that iterates over Ts, but
/// which is implemented with some iterator over T*s:
///
/// \code
///   using iterator = pointee_iterator<SmallVectorImpl<T *>::iterator>;
/// \endcode
template <typename WrappedIteratorT,
          typename T = std::remove_reference_t<decltype(
              **std::declval<WrappedIteratorT>())>>
struct pointee_iterator
    : iterator_adaptor_base<
          pointee_iterator<WrappedIteratorT, T>, WrappedIteratorT,
          typename std::iterator_traits<WrappedIteratorT>::iterator_category,
          T> {
  pointee_iterator() = default;
  template <typename U>
  pointee_iterator(U &&u)
      : pointee_iterator::iterator_adaptor_base(std::forward<U &&>(u)) {}

  T &operator*() const { return **this->I; }
};

template <typename RangeT, typename WrappedIteratorT =
                               decltype(std::begin(std::declval<RangeT>()))>
iterator_range<pointee_iterator<WrappedIteratorT>>
make_pointee_range(RangeT &&Range) {
  using PointeeIteratorT = pointee_iterator<WrappedIteratorT>;
  return make_range(PointeeIteratorT(std::begin(std::forward<RangeT>(Range))),
                    PointeeIteratorT(std::end(std::forward<RangeT>(Range))));
}

template <typename WrappedIteratorT,
          typename T = decltype(&*std::declval<WrappedIteratorT>())>
class pointer_iterator
    : public iterator_adaptor_base<
          pointer_iterator<WrappedIteratorT, T>, WrappedIteratorT,
          typename std::iterator_traits<WrappedIteratorT>::iterator_category,
          T> {
  mutable T Ptr;

public:
  pointer_iterator() = default;

  explicit pointer_iterator(WrappedIteratorT u)
      : pointer_iterator::iterator_adaptor_base(std::move(u)) {}

  T &operator*() const { return Ptr = &*this->I; }
};

template <typename RangeT, typename WrappedIteratorT =
                               decltype(std::begin(std::declval<RangeT>()))>
iterator_range<pointer_iterator<WrappedIteratorT>>
make_pointer_range(RangeT &&Range) {
  using PointerIteratorT = pointer_iterator<WrappedIteratorT>;
  return make_range(PointerIteratorT(std::begin(std::forward<RangeT>(Range))),
                    PointerIteratorT(std::end(std::forward<RangeT>(Range))));
}

template <typename WrappedIteratorT,
          typename T1 = std::remove_reference_t<decltype(
              **std::declval<WrappedIteratorT>())>,
          typename T2 = std::add_pointer_t<T1>>
using raw_pointer_iterator =
    pointer_iterator<pointee_iterator<WrappedIteratorT, T1>, T2>;

} // end namespace llvm

#endif // LLVM_ADT_ITERATOR_H
PKiwFZFM��,�,	ADT/bit.hnu�[���//===-- llvm/ADT/bit.h - C++20 <bit> ----------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file implements the C++20 <bit> header.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_BIT_H
#define LLVM_ADT_BIT_H

#include "llvm/Support/Compiler.h"
#include <cstdint>
#include <limits>
#include <type_traits>

#if !__has_builtin(__builtin_bit_cast)
#include <cstring>
#endif

#if defined(_MSC_VER) && !defined(_DEBUG)
#include <cstdlib>  // for _byteswap_{ushort,ulong,uint64}
#endif

#ifdef _MSC_VER
// Declare these intrinsics manually rather including intrin.h. It's very
// expensive, and bit.h is popular via MathExtras.h.
// #include <intrin.h>
extern "C" {
unsigned char _BitScanForward(unsigned long *_Index, unsigned long _Mask);
unsigned char _BitScanForward64(unsigned long *_Index, unsigned __int64 _Mask);
unsigned char _BitScanReverse(unsigned long *_Index, unsigned long _Mask);
unsigned char _BitScanReverse64(unsigned long *_Index, unsigned __int64 _Mask);
}
#endif

namespace llvm {

// This implementation of bit_cast is different from the C++20 one in two ways:
//  - It isn't constexpr because that requires compiler support.
//  - It requires trivially-constructible To, to avoid UB in the implementation.
template <
    typename To, typename From,
    typename = std::enable_if_t<sizeof(To) == sizeof(From)>,
    typename = std::enable_if_t<std::is_trivially_constructible<To>::value>,
    typename = std::enable_if_t<std::is_trivially_copyable<To>::value>,
    typename = std::enable_if_t<std::is_trivially_copyable<From>::value>>
[[nodiscard]] inline To bit_cast(const From &from) noexcept {
#if __has_builtin(__builtin_bit_cast)
  return __builtin_bit_cast(To, from);
#else
  To to;
  std::memcpy(&to, &from, sizeof(To));
  return to;
#endif
}

/// Reverses the bytes in the given integer value V.
template <typename T, typename = std::enable_if_t<std::is_integral_v<T>>>
[[nodiscard]] constexpr T byteswap(T V) noexcept {
  if constexpr (sizeof(T) == 1) {
    return V;
  } else if constexpr (sizeof(T) == 2) {
    uint16_t UV = V;
#if defined(_MSC_VER) && !defined(_DEBUG)
    // The DLL version of the runtime lacks these functions (bug!?), but in a
    // release build they're replaced with BSWAP instructions anyway.
    return _byteswap_ushort(UV);
#else
    uint16_t Hi = UV << 8;
    uint16_t Lo = UV >> 8;
    return Hi | Lo;
#endif
  } else if constexpr (sizeof(T) == 4) {
    uint32_t UV = V;
#if __has_builtin(__builtin_bswap32)
    return __builtin_bswap32(UV);
#elif defined(_MSC_VER) && !defined(_DEBUG)
    return _byteswap_ulong(UV);
#else
    uint32_t Byte0 = UV & 0x000000FF;
    uint32_t Byte1 = UV & 0x0000FF00;
    uint32_t Byte2 = UV & 0x00FF0000;
    uint32_t Byte3 = UV & 0xFF000000;
    return (Byte0 << 24) | (Byte1 << 8) | (Byte2 >> 8) | (Byte3 >> 24);
#endif
  } else if constexpr (sizeof(T) == 8) {
    uint64_t UV = V;
#if __has_builtin(__builtin_bswap64)
    return __builtin_bswap64(UV);
#elif defined(_MSC_VER) && !defined(_DEBUG)
    return _byteswap_uint64(UV);
#else
    uint64_t Hi = llvm::byteswap<uint32_t>(UV);
    uint32_t Lo = llvm::byteswap<uint32_t>(UV >> 32);
    return (Hi << 32) | Lo;
#endif
  } else {
    static_assert(!sizeof(T *), "Don't know how to handle the given type.");
    return 0;
  }
}

template <typename T, typename = std::enable_if_t<std::is_unsigned_v<T>>>
[[nodiscard]] constexpr inline bool has_single_bit(T Value) noexcept {
  return (Value != 0) && ((Value & (Value - 1)) == 0);
}

namespace detail {
template <typename T, std::size_t SizeOfT> struct TrailingZerosCounter {
  static unsigned count(T Val) {
    if (!Val)
      return std::numeric_limits<T>::digits;
    if (Val & 0x1)
      return 0;

    // Bisection method.
    unsigned ZeroBits = 0;
    T Shift = std::numeric_limits<T>::digits >> 1;
    T Mask = std::numeric_limits<T>::max() >> Shift;
    while (Shift) {
      if ((Val & Mask) == 0) {
        Val >>= Shift;
        ZeroBits |= Shift;
      }
      Shift >>= 1;
      Mask >>= Shift;
    }
    return ZeroBits;
  }
};

#if defined(__GNUC__) || defined(_MSC_VER)
template <typename T> struct TrailingZerosCounter<T, 4> {
  static unsigned count(T Val) {
    if (Val == 0)
      return 32;

#if __has_builtin(__builtin_ctz) || defined(__GNUC__)
    return __builtin_ctz(Val);
#elif defined(_MSC_VER)
    unsigned long Index;
    _BitScanForward(&Index, Val);
    return Index;
#endif
  }
};

#if !defined(_MSC_VER) || defined(_M_X64)
template <typename T> struct TrailingZerosCounter<T, 8> {
  static unsigned count(T Val) {
    if (Val == 0)
      return 64;

#if __has_builtin(__builtin_ctzll) || defined(__GNUC__)
    return __builtin_ctzll(Val);
#elif defined(_MSC_VER)
    unsigned long Index;
    _BitScanForward64(&Index, Val);
    return Index;
#endif
  }
};
#endif
#endif
} // namespace detail

/// Count number of 0's from the least significant bit to the most
///   stopping at the first 1.
///
/// Only unsigned integral types are allowed.
///
/// Returns std::numeric_limits<T>::digits on an input of 0.
template <typename T> [[nodiscard]] int countr_zero(T Val) {
  static_assert(std::is_unsigned_v<T>,
                "Only unsigned integral types are allowed.");
  return llvm::detail::TrailingZerosCounter<T, sizeof(T)>::count(Val);
}

namespace detail {
template <typename T, std::size_t SizeOfT> struct LeadingZerosCounter {
  static unsigned count(T Val) {
    if (!Val)
      return std::numeric_limits<T>::digits;

    // Bisection method.
    unsigned ZeroBits = 0;
    for (T Shift = std::numeric_limits<T>::digits >> 1; Shift; Shift >>= 1) {
      T Tmp = Val >> Shift;
      if (Tmp)
        Val = Tmp;
      else
        ZeroBits |= Shift;
    }
    return ZeroBits;
  }
};

#if defined(__GNUC__) || defined(_MSC_VER)
template <typename T> struct LeadingZerosCounter<T, 4> {
  static unsigned count(T Val) {
    if (Val == 0)
      return 32;

#if __has_builtin(__builtin_clz) || defined(__GNUC__)
    return __builtin_clz(Val);
#elif defined(_MSC_VER)
    unsigned long Index;
    _BitScanReverse(&Index, Val);
    return Index ^ 31;
#endif
  }
};

#if !defined(_MSC_VER) || defined(_M_X64)
template <typename T> struct LeadingZerosCounter<T, 8> {
  static unsigned count(T Val) {
    if (Val == 0)
      return 64;

#if __has_builtin(__builtin_clzll) || defined(__GNUC__)
    return __builtin_clzll(Val);
#elif defined(_MSC_VER)
    unsigned long Index;
    _BitScanReverse64(&Index, Val);
    return Index ^ 63;
#endif
  }
};
#endif
#endif
} // namespace detail

/// Count number of 0's from the most significant bit to the least
///   stopping at the first 1.
///
/// Only unsigned integral types are allowed.
///
/// Returns std::numeric_limits<T>::digits on an input of 0.
template <typename T> [[nodiscard]] int countl_zero(T Val) {
  static_assert(std::is_unsigned_v<T>,
                "Only unsigned integral types are allowed.");
  return llvm::detail::LeadingZerosCounter<T, sizeof(T)>::count(Val);
}

/// Count the number of ones from the most significant bit to the first
/// zero bit.
///
/// Ex. countl_one(0xFF0FFF00) == 8.
/// Only unsigned integral types are allowed.
///
/// Returns std::numeric_limits<T>::digits on an input of all ones.
template <typename T> [[nodiscard]] int countl_one(T Value) {
  static_assert(std::is_unsigned_v<T>,
                "Only unsigned integral types are allowed.");
  return llvm::countl_zero<T>(~Value);
}

/// Count the number of ones from the least significant bit to the first
/// zero bit.
///
/// Ex. countr_one(0x00FF00FF) == 8.
/// Only unsigned integral types are allowed.
///
/// Returns std::numeric_limits<T>::digits on an input of all ones.
template <typename T> [[nodiscard]] int countr_one(T Value) {
  static_assert(std::is_unsigned_v<T>,
                "Only unsigned integral types are allowed.");
  return llvm::countr_zero<T>(~Value);
}

/// Returns the number of bits needed to represent Value if Value is nonzero.
/// Returns 0 otherwise.
///
/// Ex. bit_width(5) == 3.
template <typename T> [[nodiscard]] int bit_width(T Value) {
  static_assert(std::is_unsigned_v<T>,
                "Only unsigned integral types are allowed.");
  return std::numeric_limits<T>::digits - llvm::countl_zero(Value);
}

/// Returns the largest integral power of two no greater than Value if Value is
/// nonzero.  Returns 0 otherwise.
///
/// Ex. bit_floor(5) == 4.
template <typename T> [[nodiscard]] T bit_floor(T Value) {
  static_assert(std::is_unsigned_v<T>,
                "Only unsigned integral types are allowed.");
  if (!Value)
    return 0;
  return T(1) << (llvm::bit_width(Value) - 1);
}

/// Returns the smallest integral power of two no smaller than Value if Value is
/// nonzero.  Returns 1 otherwise.
///
/// Ex. bit_ceil(5) == 8.
///
/// The return value is undefined if the input is larger than the largest power
/// of two representable in T.
template <typename T> [[nodiscard]] T bit_ceil(T Value) {
  static_assert(std::is_unsigned_v<T>,
                "Only unsigned integral types are allowed.");
  if (Value < 2)
    return 1;
  return T(1) << llvm::bit_width<T>(Value - 1u);
}

namespace detail {
template <typename T, std::size_t SizeOfT> struct PopulationCounter {
  static int count(T Value) {
    // Generic version, forward to 32 bits.
    static_assert(SizeOfT <= 4, "Not implemented!");
#if defined(__GNUC__)
    return (int)__builtin_popcount(Value);
#else
    uint32_t v = Value;
    v = v - ((v >> 1) & 0x55555555);
    v = (v & 0x33333333) + ((v >> 2) & 0x33333333);
    return int(((v + (v >> 4) & 0xF0F0F0F) * 0x1010101) >> 24);
#endif
  }
};

template <typename T> struct PopulationCounter<T, 8> {
  static int count(T Value) {
#if defined(__GNUC__)
    return (int)__builtin_popcountll(Value);
#else
    uint64_t v = Value;
    v = v - ((v >> 1) & 0x5555555555555555ULL);
    v = (v & 0x3333333333333333ULL) + ((v >> 2) & 0x3333333333333333ULL);
    v = (v + (v >> 4)) & 0x0F0F0F0F0F0F0F0FULL;
    return int((uint64_t)(v * 0x0101010101010101ULL) >> 56);
#endif
  }
};
} // namespace detail

/// Count the number of set bits in a value.
/// Ex. popcount(0xF000F000) = 8
/// Returns 0 if the word is zero.
template <typename T, typename = std::enable_if_t<std::is_unsigned_v<T>>>
[[nodiscard]] inline int popcount(T Value) noexcept {
  return detail::PopulationCounter<T, sizeof(T)>::count(Value);
}

// Forward-declare rotr so that rotl can use it.
template <typename T, typename = std::enable_if_t<std::is_unsigned_v<T>>>
[[nodiscard]] constexpr T rotr(T V, int R);

template <typename T, typename = std::enable_if_t<std::is_unsigned_v<T>>>
[[nodiscard]] constexpr T rotl(T V, int R) {
  unsigned N = std::numeric_limits<T>::digits;

  R = R % N;
  if (!R)
    return V;

  if (R < 0)
    return llvm::rotr(V, -R);

  return (V << R) | (V >> (N - R));
}

template <typename T, typename> [[nodiscard]] constexpr T rotr(T V, int R) {
  unsigned N = std::numeric_limits<T>::digits;

  R = R % N;
  if (!R)
    return V;

  if (R < 0)
    return llvm::rotl(V, -R);

  return (V >> R) | (V << (N - R));
}

} // namespace llvm

#endif
PKiwFZ Fh��ADT/StringMapEntry.hnu�[���//===- StringMapEntry.h - String Hash table map interface -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file defines the StringMapEntry class - it is intended to be a low
/// dependency implementation detail of StringMap that is more suitable for
/// inclusion in public headers than StringMap.h itself is.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_STRINGMAPENTRY_H
#define LLVM_ADT_STRINGMAPENTRY_H

#include "llvm/ADT/StringRef.h"
#include <optional>

namespace llvm {

/// StringMapEntryBase - Shared base class of StringMapEntry instances.
class StringMapEntryBase {
  size_t keyLength;

public:
  explicit StringMapEntryBase(size_t keyLength) : keyLength(keyLength) {}

  size_t getKeyLength() const { return keyLength; }

protected:
  /// Helper to tail-allocate \p Key. It'd be nice to generalize this so it
  /// could be reused elsewhere, maybe even taking an llvm::function_ref to
  /// type-erase the allocator and put it in a source file.
  template <typename AllocatorTy>
  static void *allocateWithKey(size_t EntrySize, size_t EntryAlign,
                               StringRef Key, AllocatorTy &Allocator);
};

// Define out-of-line to dissuade inlining.
template <typename AllocatorTy>
void *StringMapEntryBase::allocateWithKey(size_t EntrySize, size_t EntryAlign,
                                          StringRef Key,
                                          AllocatorTy &Allocator) {
  size_t KeyLength = Key.size();

  // Allocate a new item with space for the string at the end and a null
  // terminator.
  size_t AllocSize = EntrySize + KeyLength + 1;
  void *Allocation = Allocator.Allocate(AllocSize, EntryAlign);
  assert(Allocation && "Unhandled out-of-memory");

  // Copy the string information.
  char *Buffer = reinterpret_cast<char *>(Allocation) + EntrySize;
  if (KeyLength > 0)
    ::memcpy(Buffer, Key.data(), KeyLength);
  Buffer[KeyLength] = 0; // Null terminate for convenience of clients.
  return Allocation;
}

/// StringMapEntryStorage - Holds the value in a StringMapEntry.
///
/// Factored out into a separate base class to make it easier to specialize.
/// This is primarily intended to support StringSet, which doesn't need a value
/// stored at all.
template <typename ValueTy>
class StringMapEntryStorage : public StringMapEntryBase {
public:
  ValueTy second;

  explicit StringMapEntryStorage(size_t keyLength)
      : StringMapEntryBase(keyLength), second() {}
  template <typename... InitTy>
  StringMapEntryStorage(size_t keyLength, InitTy &&...initVals)
      : StringMapEntryBase(keyLength),
        second(std::forward<InitTy>(initVals)...) {}
  StringMapEntryStorage(StringMapEntryStorage &e) = delete;

  const ValueTy &getValue() const { return second; }
  ValueTy &getValue() { return second; }

  void setValue(const ValueTy &V) { second = V; }
};

template <>
class StringMapEntryStorage<std::nullopt_t> : public StringMapEntryBase {
public:
  explicit StringMapEntryStorage(size_t keyLength,
                                 std::nullopt_t = std::nullopt)
      : StringMapEntryBase(keyLength) {}
  StringMapEntryStorage(StringMapEntryStorage &entry) = delete;

  std::nullopt_t getValue() const { return std::nullopt; }
};

/// StringMapEntry - This is used to represent one value that is inserted into
/// a StringMap.  It contains the Value itself and the key: the string length
/// and data.
template <typename ValueTy>
class StringMapEntry final : public StringMapEntryStorage<ValueTy> {
public:
  using StringMapEntryStorage<ValueTy>::StringMapEntryStorage;

  using ValueType = ValueTy;

  StringRef getKey() const {
    return StringRef(getKeyData(), this->getKeyLength());
  }

  /// getKeyData - Return the start of the string data that is the key for this
  /// value.  The string data is always stored immediately after the
  /// StringMapEntry object.
  const char *getKeyData() const {
    return reinterpret_cast<const char *>(this + 1);
  }

  StringRef first() const {
    return StringRef(getKeyData(), this->getKeyLength());
  }

  /// Create a StringMapEntry for the specified key construct the value using
  /// \p InitiVals.
  template <typename AllocatorTy, typename... InitTy>
  static StringMapEntry *create(StringRef key, AllocatorTy &allocator,
                                InitTy &&...initVals) {
    return new (StringMapEntryBase::allocateWithKey(
        sizeof(StringMapEntry), alignof(StringMapEntry), key, allocator))
        StringMapEntry(key.size(), std::forward<InitTy>(initVals)...);
  }

  /// GetStringMapEntryFromKeyData - Given key data that is known to be embedded
  /// into a StringMapEntry, return the StringMapEntry itself.
  static StringMapEntry &GetStringMapEntryFromKeyData(const char *keyData) {
    char *ptr = const_cast<char *>(keyData) - sizeof(StringMapEntry<ValueTy>);
    return *reinterpret_cast<StringMapEntry *>(ptr);
  }

  /// Destroy - Destroy this StringMapEntry, releasing memory back to the
  /// specified allocator.
  template <typename AllocatorTy> void Destroy(AllocatorTy &allocator) {
    // Free memory referenced by the item.
    size_t AllocSize = sizeof(StringMapEntry) + this->getKeyLength() + 1;
    this->~StringMapEntry();
    allocator.Deallocate(static_cast<void *>(this), AllocSize,
                         alignof(StringMapEntry));
  }
};

// Allow structured bindings on StringMapEntry.
template <std::size_t Index, typename ValueTy>
decltype(auto) get(const StringMapEntry<ValueTy> &E) {
  static_assert(Index < 2);
  if constexpr (Index == 0)
    return E.first();
  else
    return E.second;
}

} // end namespace llvm

namespace std {
template <typename ValueTy>
struct tuple_size<llvm::StringMapEntry<ValueTy>>
    : std::integral_constant<std::size_t, 2> {};

template <std::size_t I, typename ValueTy>
struct tuple_element<I, llvm::StringMapEntry<ValueTy>>
    : std::conditional<I == 0, llvm::StringRef, ValueTy> {};
} // namespace std

#endif // LLVM_ADT_STRINGMAPENTRY_H
PKiwFZ�}iFFADT/STLForwardCompat.hnu�[���//===- STLForwardCompat.h - Library features from future STLs ------C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file contains library features backported from future STL versions.
///
/// These should be replaced with their STL counterparts as the C++ version LLVM
/// is compiled with is updated.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_STLFORWARDCOMPAT_H
#define LLVM_ADT_STLFORWARDCOMPAT_H

#include <optional>
#include <type_traits>

namespace llvm {

//===----------------------------------------------------------------------===//
//     Features from C++20
//===----------------------------------------------------------------------===//

template <typename T>
struct remove_cvref // NOLINT(readability-identifier-naming)
{
  using type = std::remove_cv_t<std::remove_reference_t<T>>;
};

template <typename T>
using remove_cvref_t // NOLINT(readability-identifier-naming)
    = typename llvm::remove_cvref<T>::type;

//===----------------------------------------------------------------------===//
//     Features from C++23
//===----------------------------------------------------------------------===//

// TODO: Remove this in favor of std::optional<T>::transform once we switch to
// C++23.
template <typename T, typename Function>
auto transformOptional(const std::optional<T> &O, const Function &F)
    -> std::optional<decltype(F(*O))> {
  if (O)
    return F(*O);
  return std::nullopt;
}

// TODO: Remove this in favor of std::optional<T>::transform once we switch to
// C++23.
template <typename T, typename Function>
auto transformOptional(std::optional<T> &&O, const Function &F)
    -> std::optional<decltype(F(*std::move(O)))> {
  if (O)
    return F(*std::move(O));
  return std::nullopt;
}

} // namespace llvm

#endif // LLVM_ADT_STLFORWARDCOMPAT_H
PKiwFZ�\��+�+ADT/EquivalenceClasses.hnu�[���//===- llvm/ADT/EquivalenceClasses.h - Generic Equiv. Classes ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// Generic implementation of equivalence classes through the use Tarjan's
/// efficient union-find algorithm.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_EQUIVALENCECLASSES_H
#define LLVM_ADT_EQUIVALENCECLASSES_H

#include <cassert>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <set>

namespace llvm {

/// EquivalenceClasses - This represents a collection of equivalence classes and
/// supports three efficient operations: insert an element into a class of its
/// own, union two classes, and find the class for a given element.  In
/// addition to these modification methods, it is possible to iterate over all
/// of the equivalence classes and all of the elements in a class.
///
/// This implementation is an efficient implementation that only stores one copy
/// of the element being indexed per entry in the set, and allows any arbitrary
/// type to be indexed (as long as it can be ordered with operator< or a
/// comparator is provided).
///
/// Here is a simple example using integers:
///
/// \code
///  EquivalenceClasses<int> EC;
///  EC.unionSets(1, 2);                // insert 1, 2 into the same set
///  EC.insert(4); EC.insert(5);        // insert 4, 5 into own sets
///  EC.unionSets(5, 1);                // merge the set for 1 with 5's set.
///
///  for (EquivalenceClasses<int>::iterator I = EC.begin(), E = EC.end();
///       I != E; ++I) {           // Iterate over all of the equivalence sets.
///    if (!I->isLeader()) continue;   // Ignore non-leader sets.
///    for (EquivalenceClasses<int>::member_iterator MI = EC.member_begin(I);
///         MI != EC.member_end(); ++MI)   // Loop over members in this set.
///      cerr << *MI << " ";  // Print member.
///    cerr << "\n";   // Finish set.
///  }
/// \endcode
///
/// This example prints:
///   4
///   5 1 2
///
template <class ElemTy, class Compare = std::less<ElemTy>>
class EquivalenceClasses {
  /// ECValue - The EquivalenceClasses data structure is just a set of these.
  /// Each of these represents a relation for a value.  First it stores the
  /// value itself, which provides the ordering that the set queries.  Next, it
  /// provides a "next pointer", which is used to enumerate all of the elements
  /// in the unioned set.  Finally, it defines either a "end of list pointer" or
  /// "leader pointer" depending on whether the value itself is a leader.  A
  /// "leader pointer" points to the node that is the leader for this element,
  /// if the node is not a leader.  A "end of list pointer" points to the last
  /// node in the list of members of this list.  Whether or not a node is a
  /// leader is determined by a bit stolen from one of the pointers.
  class ECValue {
    friend class EquivalenceClasses;

    mutable const ECValue *Leader, *Next;
    ElemTy Data;

    // ECValue ctor - Start out with EndOfList pointing to this node, Next is
    // Null, isLeader = true.
    ECValue(const ElemTy &Elt)
      : Leader(this), Next((ECValue*)(intptr_t)1), Data(Elt) {}

    const ECValue *getLeader() const {
      if (isLeader()) return this;
      if (Leader->isLeader()) return Leader;
      // Path compression.
      return Leader = Leader->getLeader();
    }

    const ECValue *getEndOfList() const {
      assert(isLeader() && "Cannot get the end of a list for a non-leader!");
      return Leader;
    }

    void setNext(const ECValue *NewNext) const {
      assert(getNext() == nullptr && "Already has a next pointer!");
      Next = (const ECValue*)((intptr_t)NewNext | (intptr_t)isLeader());
    }

  public:
    ECValue(const ECValue &RHS) : Leader(this), Next((ECValue*)(intptr_t)1),
                                  Data(RHS.Data) {
      // Only support copying of singleton nodes.
      assert(RHS.isLeader() && RHS.getNext() == nullptr && "Not a singleton!");
    }

    bool isLeader() const { return (intptr_t)Next & 1; }
    const ElemTy &getData() const { return Data; }

    const ECValue *getNext() const {
      return (ECValue*)((intptr_t)Next & ~(intptr_t)1);
    }
  };

  /// A wrapper of the comparator, to be passed to the set.
  struct ECValueComparator {
    using is_transparent = void;

    ECValueComparator() : compare(Compare()) {}

    bool operator()(const ECValue &lhs, const ECValue &rhs) const {
      return compare(lhs.Data, rhs.Data);
    }

    template <typename T>
    bool operator()(const T &lhs, const ECValue &rhs) const {
      return compare(lhs, rhs.Data);
    }

    template <typename T>
    bool operator()(const ECValue &lhs, const T &rhs) const {
      return compare(lhs.Data, rhs);
    }

    const Compare compare;
  };

  /// TheMapping - This implicitly provides a mapping from ElemTy values to the
  /// ECValues, it just keeps the key as part of the value.
  std::set<ECValue, ECValueComparator> TheMapping;

public:
  EquivalenceClasses() = default;
  EquivalenceClasses(const EquivalenceClasses &RHS) {
    operator=(RHS);
  }

  const EquivalenceClasses &operator=(const EquivalenceClasses &RHS) {
    TheMapping.clear();
    for (iterator I = RHS.begin(), E = RHS.end(); I != E; ++I)
      if (I->isLeader()) {
        member_iterator MI = RHS.member_begin(I);
        member_iterator LeaderIt = member_begin(insert(*MI));
        for (++MI; MI != member_end(); ++MI)
          unionSets(LeaderIt, member_begin(insert(*MI)));
      }
    return *this;
  }

  //===--------------------------------------------------------------------===//
  // Inspection methods
  //

  /// iterator* - Provides a way to iterate over all values in the set.
  using iterator =
      typename std::set<ECValue, ECValueComparator>::const_iterator;

  iterator begin() const { return TheMapping.begin(); }
  iterator end() const { return TheMapping.end(); }

  bool empty() const { return TheMapping.empty(); }

  /// member_* Iterate over the members of an equivalence class.
  class member_iterator;
  member_iterator member_begin(iterator I) const {
    // Only leaders provide anything to iterate over.
    return member_iterator(I->isLeader() ? &*I : nullptr);
  }
  member_iterator member_end() const {
    return member_iterator(nullptr);
  }

  /// findValue - Return an iterator to the specified value.  If it does not
  /// exist, end() is returned.
  iterator findValue(const ElemTy &V) const {
    return TheMapping.find(V);
  }

  /// getLeaderValue - Return the leader for the specified value that is in the
  /// set.  It is an error to call this method for a value that is not yet in
  /// the set.  For that, call getOrInsertLeaderValue(V).
  const ElemTy &getLeaderValue(const ElemTy &V) const {
    member_iterator MI = findLeader(V);
    assert(MI != member_end() && "Value is not in the set!");
    return *MI;
  }

  /// getOrInsertLeaderValue - Return the leader for the specified value that is
  /// in the set.  If the member is not in the set, it is inserted, then
  /// returned.
  const ElemTy &getOrInsertLeaderValue(const ElemTy &V) {
    member_iterator MI = findLeader(insert(V));
    assert(MI != member_end() && "Value is not in the set!");
    return *MI;
  }

  /// getNumClasses - Return the number of equivalence classes in this set.
  /// Note that this is a linear time operation.
  unsigned getNumClasses() const {
    unsigned NC = 0;
    for (iterator I = begin(), E = end(); I != E; ++I)
      if (I->isLeader()) ++NC;
    return NC;
  }

  //===--------------------------------------------------------------------===//
  // Mutation methods

  /// insert - Insert a new value into the union/find set, ignoring the request
  /// if the value already exists.
  iterator insert(const ElemTy &Data) {
    return TheMapping.insert(ECValue(Data)).first;
  }

  /// findLeader - Given a value in the set, return a member iterator for the
  /// equivalence class it is in.  This does the path-compression part that
  /// makes union-find "union findy".  This returns an end iterator if the value
  /// is not in the equivalence class.
  member_iterator findLeader(iterator I) const {
    if (I == TheMapping.end()) return member_end();
    return member_iterator(I->getLeader());
  }
  member_iterator findLeader(const ElemTy &V) const {
    return findLeader(TheMapping.find(V));
  }

  /// union - Merge the two equivalence sets for the specified values, inserting
  /// them if they do not already exist in the equivalence set.
  member_iterator unionSets(const ElemTy &V1, const ElemTy &V2) {
    iterator V1I = insert(V1), V2I = insert(V2);
    return unionSets(findLeader(V1I), findLeader(V2I));
  }
  member_iterator unionSets(member_iterator L1, member_iterator L2) {
    assert(L1 != member_end() && L2 != member_end() && "Illegal inputs!");
    if (L1 == L2) return L1;   // Unifying the same two sets, noop.

    // Otherwise, this is a real union operation.  Set the end of the L1 list to
    // point to the L2 leader node.
    const ECValue &L1LV = *L1.Node, &L2LV = *L2.Node;
    L1LV.getEndOfList()->setNext(&L2LV);

    // Update L1LV's end of list pointer.
    L1LV.Leader = L2LV.getEndOfList();

    // Clear L2's leader flag:
    L2LV.Next = L2LV.getNext();

    // L2's leader is now L1.
    L2LV.Leader = &L1LV;
    return L1;
  }

  // isEquivalent - Return true if V1 is equivalent to V2. This can happen if
  // V1 is equal to V2 or if they belong to one equivalence class.
  bool isEquivalent(const ElemTy &V1, const ElemTy &V2) const {
    // Fast path: any element is equivalent to itself.
    if (V1 == V2)
      return true;
    auto It = findLeader(V1);
    return It != member_end() && It == findLeader(V2);
  }

  class member_iterator {
    friend class EquivalenceClasses;

    const ECValue *Node;

  public:
    using iterator_category = std::forward_iterator_tag;
    using value_type = const ElemTy;
    using size_type = std::size_t;
    using difference_type = std::ptrdiff_t;
    using pointer = value_type *;
    using reference = value_type &;

    explicit member_iterator() = default;
    explicit member_iterator(const ECValue *N) : Node(N) {}

    reference operator*() const {
      assert(Node != nullptr && "Dereferencing end()!");
      return Node->getData();
    }
    pointer operator->() const { return &operator*(); }

    member_iterator &operator++() {
      assert(Node != nullptr && "++'d off the end of the list!");
      Node = Node->getNext();
      return *this;
    }

    member_iterator operator++(int) {    // postincrement operators.
      member_iterator tmp = *this;
      ++*this;
      return tmp;
    }

    bool operator==(const member_iterator &RHS) const {
      return Node == RHS.Node;
    }
    bool operator!=(const member_iterator &RHS) const {
      return Node != RHS.Node;
    }
  };
};

} // end namespace llvm

#endif // LLVM_ADT_EQUIVALENCECLASSES_H
PKiwFZ��)��ADT/SmallVectorExtras.hnu�[���//===- llvm/ADT/SmallVectorExtras.h -----------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file defines less commonly used SmallVector utilities.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_SMALLVECTOREXTRAS_H
#define LLVM_ADT_SMALLVECTOREXTRAS_H

#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"

namespace llvm {

/// Map a range to a SmallVector with element types deduced from the mapping.
template <unsigned Size, class ContainerTy, class FuncTy>
auto map_to_vector(ContainerTy &&C, FuncTy &&F) {
  return to_vector<Size>(
      map_range(std::forward<ContainerTy>(C), std::forward<FuncTy>(F)));
}
template <class ContainerTy, class FuncTy>
auto map_to_vector(ContainerTy &&C, FuncTy &&F) {
  return to_vector(
      map_range(std::forward<ContainerTy>(C), std::forward<FuncTy>(F)));
}

} // namespace llvm

#endif // LLVM_ADT_SMALLVECTOREXTRAS_HPKiwFZB�3X
X
ADT/SetOperations.hnu�[���//===-- llvm/ADT/SetOperations.h - Generic Set Operations -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file defines generic set operations that may be used on set's of
/// different types, and different element types.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_SETOPERATIONS_H
#define LLVM_ADT_SETOPERATIONS_H

namespace llvm {

/// set_union(A, B) - Compute A := A u B, return whether A changed.
///
template <class S1Ty, class S2Ty>
bool set_union(S1Ty &S1, const S2Ty &S2) {
  bool Changed = false;

  for (typename S2Ty::const_iterator SI = S2.begin(), SE = S2.end();
       SI != SE; ++SI)
    if (S1.insert(*SI).second)
      Changed = true;

  return Changed;
}

/// set_intersect(A, B) - Compute A := A ^ B
/// Identical to set_intersection, except that it works on set<>'s and
/// is nicer to use.  Functionally, this iterates through S1, removing
/// elements that are not contained in S2.
///
template <class S1Ty, class S2Ty>
void set_intersect(S1Ty &S1, const S2Ty &S2) {
   for (typename S1Ty::iterator I = S1.begin(); I != S1.end();) {
     const auto &E = *I;
     ++I;
     if (!S2.count(E)) S1.erase(E);   // Erase element if not in S2
   }
}

template <class S1Ty, class S2Ty>
S1Ty set_intersection_impl(const S1Ty &S1, const S2Ty &S2) {
   S1Ty Result;
   for (typename S1Ty::const_iterator SI = S1.begin(), SE = S1.end(); SI != SE;
        ++SI)
     if (S2.count(*SI))
      Result.insert(*SI);
   return Result;
}

/// set_intersection(A, B) - Return A ^ B
template <class S1Ty, class S2Ty>
S1Ty set_intersection(const S1Ty &S1, const S2Ty &S2) {
   if (S1.size() < S2.size())
     return set_intersection_impl(S1, S2);
   else
     return set_intersection_impl(S2, S1);
}

/// set_difference(A, B) - Return A - B
///
template <class S1Ty, class S2Ty>
S1Ty set_difference(const S1Ty &S1, const S2Ty &S2) {
  S1Ty Result;
  for (typename S1Ty::const_iterator SI = S1.begin(), SE = S1.end();
       SI != SE; ++SI)
    if (!S2.count(*SI))       // if the element is not in set2
      Result.insert(*SI);
  return Result;
}

/// set_subtract(A, B) - Compute A := A - B
///
template <class S1Ty, class S2Ty>
void set_subtract(S1Ty &S1, const S2Ty &S2) {
  for (typename S2Ty::const_iterator SI = S2.begin(), SE = S2.end();
       SI != SE; ++SI)
    S1.erase(*SI);
}

/// set_subtract(A, B, C, D) - Compute A := A - B, set C to the elements of B
/// removed from A (A ^ B), and D to the elements of B not found in and removed
/// from A (B - A).
template <class S1Ty, class S2Ty>
void set_subtract(S1Ty &S1, const S2Ty &S2, S1Ty &Removed, S1Ty &Remaining) {
  for (typename S2Ty::const_iterator SI = S2.begin(), SE = S2.end(); SI != SE;
       ++SI)
    if (S1.erase(*SI))
      Removed.insert(*SI);
    else
      Remaining.insert(*SI);
}

/// set_is_subset(A, B) - Return true iff A in B
///
template <class S1Ty, class S2Ty>
bool set_is_subset(const S1Ty &S1, const S2Ty &S2) {
  if (S1.size() > S2.size())
    return false;
  for (const auto It : S1)
    if (!S2.count(It))
      return false;
  return true;
}

} // End llvm namespace

#endif
PKiwFZ�IR:�i�i
ADT/Hashing.hnu�[���//===-- llvm/ADT/Hashing.h - Utilities for hashing --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements the newly proposed standard C++ interfaces for hashing
// arbitrary data and building hash functions for user-defined types. This
// interface was originally proposed in N3333[1] and is currently under review
// for inclusion in a future TR and/or standard.
//
// The primary interfaces provide are comprised of one type and three functions:
//
//  -- 'hash_code' class is an opaque type representing the hash code for some
//     data. It is the intended product of hashing, and can be used to implement
//     hash tables, checksumming, and other common uses of hashes. It is not an
//     integer type (although it can be converted to one) because it is risky
//     to assume much about the internals of a hash_code. In particular, each
//     execution of the program has a high probability of producing a different
//     hash_code for a given input. Thus their values are not stable to save or
//     persist, and should only be used during the execution for the
//     construction of hashing datastructures.
//
//  -- 'hash_value' is a function designed to be overloaded for each
//     user-defined type which wishes to be used within a hashing context. It
//     should be overloaded within the user-defined type's namespace and found
//     via ADL. Overloads for primitive types are provided by this library.
//
//  -- 'hash_combine' and 'hash_combine_range' are functions designed to aid
//      programmers in easily and intuitively combining a set of data into
//      a single hash_code for their object. They should only logically be used
//      within the implementation of a 'hash_value' routine or similar context.
//
// Note that 'hash_combine_range' contains very special logic for hashing
// a contiguous array of integers or pointers. This logic is *extremely* fast,
// on a modern Intel "Gainestown" Xeon (Nehalem uarch) @2.2 GHz, these were
// benchmarked at over 6.5 GiB/s for large keys, and <20 cycles/hash for keys
// under 32-bytes.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_HASHING_H
#define LLVM_ADT_HASHING_H

#include "llvm/Support/DataTypes.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/SwapByteOrder.h"
#include "llvm/Support/type_traits.h"
#include <algorithm>
#include <cassert>
#include <cstring>
#include <optional>
#include <string>
#include <tuple>
#include <utility>

namespace llvm {
template <typename T, typename Enable> struct DenseMapInfo;

/// An opaque object representing a hash code.
///
/// This object represents the result of hashing some entity. It is intended to
/// be used to implement hashtables or other hashing-based data structures.
/// While it wraps and exposes a numeric value, this value should not be
/// trusted to be stable or predictable across processes or executions.
///
/// In order to obtain the hash_code for an object 'x':
/// \code
///   using llvm::hash_value;
///   llvm::hash_code code = hash_value(x);
/// \endcode
class hash_code {
  size_t value;

public:
  /// Default construct a hash_code.
  /// Note that this leaves the value uninitialized.
  hash_code() = default;

  /// Form a hash code directly from a numerical value.
  hash_code(size_t value) : value(value) {}

  /// Convert the hash code to its numerical value for use.
  /*explicit*/ operator size_t() const { return value; }

  friend bool operator==(const hash_code &lhs, const hash_code &rhs) {
    return lhs.value == rhs.value;
  }
  friend bool operator!=(const hash_code &lhs, const hash_code &rhs) {
    return lhs.value != rhs.value;
  }

  /// Allow a hash_code to be directly run through hash_value.
  friend size_t hash_value(const hash_code &code) { return code.value; }
};

/// Compute a hash_code for any integer value.
///
/// Note that this function is intended to compute the same hash_code for
/// a particular value without regard to the pre-promotion type. This is in
/// contrast to hash_combine which may produce different hash_codes for
/// differing argument types even if they would implicit promote to a common
/// type without changing the value.
template <typename T>
std::enable_if_t<is_integral_or_enum<T>::value, hash_code> hash_value(T value);

/// Compute a hash_code for a pointer's address.
///
/// N.B.: This hashes the *address*. Not the value and not the type.
template <typename T> hash_code hash_value(const T *ptr);

/// Compute a hash_code for a pair of objects.
template <typename T, typename U>
hash_code hash_value(const std::pair<T, U> &arg);

/// Compute a hash_code for a tuple.
template <typename... Ts>
hash_code hash_value(const std::tuple<Ts...> &arg);

/// Compute a hash_code for a standard string.
template <typename T>
hash_code hash_value(const std::basic_string<T> &arg);

/// Compute a hash_code for a standard string.
template <typename T> hash_code hash_value(const std::optional<T> &arg);

/// Override the execution seed with a fixed value.
///
/// This hashing library uses a per-execution seed designed to change on each
/// run with high probability in order to ensure that the hash codes are not
/// attackable and to ensure that output which is intended to be stable does
/// not rely on the particulars of the hash codes produced.
///
/// That said, there are use cases where it is important to be able to
/// reproduce *exactly* a specific behavior. To that end, we provide a function
/// which will forcibly set the seed to a fixed value. This must be done at the
/// start of the program, before any hashes are computed. Also, it cannot be
/// undone. This makes it thread-hostile and very hard to use outside of
/// immediately on start of a simple program designed for reproducible
/// behavior.
void set_fixed_execution_hash_seed(uint64_t fixed_value);


// All of the implementation details of actually computing the various hash
// code values are held within this namespace. These routines are included in
// the header file mainly to allow inlining and constant propagation.
namespace hashing {
namespace detail {

inline uint64_t fetch64(const char *p) {
  uint64_t result;
  memcpy(&result, p, sizeof(result));
  if (sys::IsBigEndianHost)
    sys::swapByteOrder(result);
  return result;
}

inline uint32_t fetch32(const char *p) {
  uint32_t result;
  memcpy(&result, p, sizeof(result));
  if (sys::IsBigEndianHost)
    sys::swapByteOrder(result);
  return result;
}

/// Some primes between 2^63 and 2^64 for various uses.
static constexpr uint64_t k0 = 0xc3a5c85c97cb3127ULL;
static constexpr uint64_t k1 = 0xb492b66fbe98f273ULL;
static constexpr uint64_t k2 = 0x9ae16a3b2f90404fULL;
static constexpr uint64_t k3 = 0xc949d7c7509e6557ULL;

/// Bitwise right rotate.
/// Normally this will compile to a single instruction, especially if the
/// shift is a manifest constant.
inline uint64_t rotate(uint64_t val, size_t shift) {
  // Avoid shifting by 64: doing so yields an undefined result.
  return shift == 0 ? val : ((val >> shift) | (val << (64 - shift)));
}

inline uint64_t shift_mix(uint64_t val) {
  return val ^ (val >> 47);
}

inline uint64_t hash_16_bytes(uint64_t low, uint64_t high) {
  // Murmur-inspired hashing.
  const uint64_t kMul = 0x9ddfea08eb382d69ULL;
  uint64_t a = (low ^ high) * kMul;
  a ^= (a >> 47);
  uint64_t b = (high ^ a) * kMul;
  b ^= (b >> 47);
  b *= kMul;
  return b;
}

inline uint64_t hash_1to3_bytes(const char *s, size_t len, uint64_t seed) {
  uint8_t a = s[0];
  uint8_t b = s[len >> 1];
  uint8_t c = s[len - 1];
  uint32_t y = static_cast<uint32_t>(a) + (static_cast<uint32_t>(b) << 8);
  uint32_t z = static_cast<uint32_t>(len) + (static_cast<uint32_t>(c) << 2);
  return shift_mix(y * k2 ^ z * k3 ^ seed) * k2;
}

inline uint64_t hash_4to8_bytes(const char *s, size_t len, uint64_t seed) {
  uint64_t a = fetch32(s);
  return hash_16_bytes(len + (a << 3), seed ^ fetch32(s + len - 4));
}

inline uint64_t hash_9to16_bytes(const char *s, size_t len, uint64_t seed) {
  uint64_t a = fetch64(s);
  uint64_t b = fetch64(s + len - 8);
  return hash_16_bytes(seed ^ a, rotate(b + len, len)) ^ b;
}

inline uint64_t hash_17to32_bytes(const char *s, size_t len, uint64_t seed) {
  uint64_t a = fetch64(s) * k1;
  uint64_t b = fetch64(s + 8);
  uint64_t c = fetch64(s + len - 8) * k2;
  uint64_t d = fetch64(s + len - 16) * k0;
  return hash_16_bytes(llvm::rotr<uint64_t>(a - b, 43) +
                           llvm::rotr<uint64_t>(c ^ seed, 30) + d,
                       a + llvm::rotr<uint64_t>(b ^ k3, 20) - c + len + seed);
}

inline uint64_t hash_33to64_bytes(const char *s, size_t len, uint64_t seed) {
  uint64_t z = fetch64(s + 24);
  uint64_t a = fetch64(s) + (len + fetch64(s + len - 16)) * k0;
  uint64_t b = llvm::rotr<uint64_t>(a + z, 52);
  uint64_t c = llvm::rotr<uint64_t>(a, 37);
  a += fetch64(s + 8);
  c += llvm::rotr<uint64_t>(a, 7);
  a += fetch64(s + 16);
  uint64_t vf = a + z;
  uint64_t vs = b + llvm::rotr<uint64_t>(a, 31) + c;
  a = fetch64(s + 16) + fetch64(s + len - 32);
  z = fetch64(s + len - 8);
  b = llvm::rotr<uint64_t>(a + z, 52);
  c = llvm::rotr<uint64_t>(a, 37);
  a += fetch64(s + len - 24);
  c += llvm::rotr<uint64_t>(a, 7);
  a += fetch64(s + len - 16);
  uint64_t wf = a + z;
  uint64_t ws = b + llvm::rotr<uint64_t>(a, 31) + c;
  uint64_t r = shift_mix((vf + ws) * k2 + (wf + vs) * k0);
  return shift_mix((seed ^ (r * k0)) + vs) * k2;
}

inline uint64_t hash_short(const char *s, size_t length, uint64_t seed) {
  if (length >= 4 && length <= 8)
    return hash_4to8_bytes(s, length, seed);
  if (length > 8 && length <= 16)
    return hash_9to16_bytes(s, length, seed);
  if (length > 16 && length <= 32)
    return hash_17to32_bytes(s, length, seed);
  if (length > 32)
    return hash_33to64_bytes(s, length, seed);
  if (length != 0)
    return hash_1to3_bytes(s, length, seed);

  return k2 ^ seed;
}

/// The intermediate state used during hashing.
/// Currently, the algorithm for computing hash codes is based on CityHash and
/// keeps 56 bytes of arbitrary state.
struct hash_state {
  uint64_t h0 = 0, h1 = 0, h2 = 0, h3 = 0, h4 = 0, h5 = 0, h6 = 0;

  /// Create a new hash_state structure and initialize it based on the
  /// seed and the first 64-byte chunk.
  /// This effectively performs the initial mix.
  static hash_state create(const char *s, uint64_t seed) {
    hash_state state = {0,
                        seed,
                        hash_16_bytes(seed, k1),
                        llvm::rotr<uint64_t>(seed ^ k1, 49),
                        seed * k1,
                        shift_mix(seed),
                        0};
    state.h6 = hash_16_bytes(state.h4, state.h5);
    state.mix(s);
    return state;
  }

  /// Mix 32-bytes from the input sequence into the 16-bytes of 'a'
  /// and 'b', including whatever is already in 'a' and 'b'.
  static void mix_32_bytes(const char *s, uint64_t &a, uint64_t &b) {
    a += fetch64(s);
    uint64_t c = fetch64(s + 24);
    b = llvm::rotr<uint64_t>(b + a + c, 21);
    uint64_t d = a;
    a += fetch64(s + 8) + fetch64(s + 16);
    b += llvm::rotr<uint64_t>(a, 44) + d;
    a += c;
  }

  /// Mix in a 64-byte buffer of data.
  /// We mix all 64 bytes even when the chunk length is smaller, but we
  /// record the actual length.
  void mix(const char *s) {
    h0 = llvm::rotr<uint64_t>(h0 + h1 + h3 + fetch64(s + 8), 37) * k1;
    h1 = llvm::rotr<uint64_t>(h1 + h4 + fetch64(s + 48), 42) * k1;
    h0 ^= h6;
    h1 += h3 + fetch64(s + 40);
    h2 = llvm::rotr<uint64_t>(h2 + h5, 33) * k1;
    h3 = h4 * k1;
    h4 = h0 + h5;
    mix_32_bytes(s, h3, h4);
    h5 = h2 + h6;
    h6 = h1 + fetch64(s + 16);
    mix_32_bytes(s + 32, h5, h6);
    std::swap(h2, h0);
  }

  /// Compute the final 64-bit hash code value based on the current
  /// state and the length of bytes hashed.
  uint64_t finalize(size_t length) {
    return hash_16_bytes(hash_16_bytes(h3, h5) + shift_mix(h1) * k1 + h2,
                         hash_16_bytes(h4, h6) + shift_mix(length) * k1 + h0);
  }
};


/// A global, fixed seed-override variable.
///
/// This variable can be set using the \see llvm::set_fixed_execution_seed
/// function. See that function for details. Do not, under any circumstances,
/// set or read this variable.
extern uint64_t fixed_seed_override;

inline uint64_t get_execution_seed() {
  // FIXME: This needs to be a per-execution seed. This is just a placeholder
  // implementation. Switching to a per-execution seed is likely to flush out
  // instability bugs and so will happen as its own commit.
  //
  // However, if there is a fixed seed override set the first time this is
  // called, return that instead of the per-execution seed.
  const uint64_t seed_prime = 0xff51afd7ed558ccdULL;
  static uint64_t seed = fixed_seed_override ? fixed_seed_override : seed_prime;
  return seed;
}


/// Trait to indicate whether a type's bits can be hashed directly.
///
/// A type trait which is true if we want to combine values for hashing by
/// reading the underlying data. It is false if values of this type must
/// first be passed to hash_value, and the resulting hash_codes combined.
//
// FIXME: We want to replace is_integral_or_enum and is_pointer here with
// a predicate which asserts that comparing the underlying storage of two
// values of the type for equality is equivalent to comparing the two values
// for equality. For all the platforms we care about, this holds for integers
// and pointers, but there are platforms where it doesn't and we would like to
// support user-defined types which happen to satisfy this property.
template <typename T> struct is_hashable_data
  : std::integral_constant<bool, ((is_integral_or_enum<T>::value ||
                                   std::is_pointer<T>::value) &&
                                  64 % sizeof(T) == 0)> {};

// Special case std::pair to detect when both types are viable and when there
// is no alignment-derived padding in the pair. This is a bit of a lie because
// std::pair isn't truly POD, but it's close enough in all reasonable
// implementations for our use case of hashing the underlying data.
template <typename T, typename U> struct is_hashable_data<std::pair<T, U> >
  : std::integral_constant<bool, (is_hashable_data<T>::value &&
                                  is_hashable_data<U>::value &&
                                  (sizeof(T) + sizeof(U)) ==
                                   sizeof(std::pair<T, U>))> {};

/// Helper to get the hashable data representation for a type.
/// This variant is enabled when the type itself can be used.
template <typename T>
std::enable_if_t<is_hashable_data<T>::value, T>
get_hashable_data(const T &value) {
  return value;
}
/// Helper to get the hashable data representation for a type.
/// This variant is enabled when we must first call hash_value and use the
/// result as our data.
template <typename T>
std::enable_if_t<!is_hashable_data<T>::value, size_t>
get_hashable_data(const T &value) {
  using ::llvm::hash_value;
  return hash_value(value);
}

/// Helper to store data from a value into a buffer and advance the
/// pointer into that buffer.
///
/// This routine first checks whether there is enough space in the provided
/// buffer, and if not immediately returns false. If there is space, it
/// copies the underlying bytes of value into the buffer, advances the
/// buffer_ptr past the copied bytes, and returns true.
template <typename T>
bool store_and_advance(char *&buffer_ptr, char *buffer_end, const T& value,
                       size_t offset = 0) {
  size_t store_size = sizeof(value) - offset;
  if (buffer_ptr + store_size > buffer_end)
    return false;
  const char *value_data = reinterpret_cast<const char *>(&value);
  memcpy(buffer_ptr, value_data + offset, store_size);
  buffer_ptr += store_size;
  return true;
}

/// Implement the combining of integral values into a hash_code.
///
/// This overload is selected when the value type of the iterator is
/// integral. Rather than computing a hash_code for each object and then
/// combining them, this (as an optimization) directly combines the integers.
template <typename InputIteratorT>
hash_code hash_combine_range_impl(InputIteratorT first, InputIteratorT last) {
  const uint64_t seed = get_execution_seed();
  char buffer[64], *buffer_ptr = buffer;
  char *const buffer_end = std::end(buffer);
  while (first != last && store_and_advance(buffer_ptr, buffer_end,
                                            get_hashable_data(*first)))
    ++first;
  if (first == last)
    return hash_short(buffer, buffer_ptr - buffer, seed);
  assert(buffer_ptr == buffer_end);

  hash_state state = state.create(buffer, seed);
  size_t length = 64;
  while (first != last) {
    // Fill up the buffer. We don't clear it, which re-mixes the last round
    // when only a partial 64-byte chunk is left.
    buffer_ptr = buffer;
    while (first != last && store_and_advance(buffer_ptr, buffer_end,
                                              get_hashable_data(*first)))
      ++first;

    // Rotate the buffer if we did a partial fill in order to simulate doing
    // a mix of the last 64-bytes. That is how the algorithm works when we
    // have a contiguous byte sequence, and we want to emulate that here.
    std::rotate(buffer, buffer_ptr, buffer_end);

    // Mix this chunk into the current state.
    state.mix(buffer);
    length += buffer_ptr - buffer;
  };

  return state.finalize(length);
}

/// Implement the combining of integral values into a hash_code.
///
/// This overload is selected when the value type of the iterator is integral
/// and when the input iterator is actually a pointer. Rather than computing
/// a hash_code for each object and then combining them, this (as an
/// optimization) directly combines the integers. Also, because the integers
/// are stored in contiguous memory, this routine avoids copying each value
/// and directly reads from the underlying memory.
template <typename ValueT>
std::enable_if_t<is_hashable_data<ValueT>::value, hash_code>
hash_combine_range_impl(ValueT *first, ValueT *last) {
  const uint64_t seed = get_execution_seed();
  const char *s_begin = reinterpret_cast<const char *>(first);
  const char *s_end = reinterpret_cast<const char *>(last);
  const size_t length = std::distance(s_begin, s_end);
  if (length <= 64)
    return hash_short(s_begin, length, seed);

  const char *s_aligned_end = s_begin + (length & ~63);
  hash_state state = state.create(s_begin, seed);
  s_begin += 64;
  while (s_begin != s_aligned_end) {
    state.mix(s_begin);
    s_begin += 64;
  }
  if (length & 63)
    state.mix(s_end - 64);

  return state.finalize(length);
}

} // namespace detail
} // namespace hashing


/// Compute a hash_code for a sequence of values.
///
/// This hashes a sequence of values. It produces the same hash_code as
/// 'hash_combine(a, b, c, ...)', but can run over arbitrary sized sequences
/// and is significantly faster given pointers and types which can be hashed as
/// a sequence of bytes.
template <typename InputIteratorT>
hash_code hash_combine_range(InputIteratorT first, InputIteratorT last) {
  return ::llvm::hashing::detail::hash_combine_range_impl(first, last);
}


// Implementation details for hash_combine.
namespace hashing {
namespace detail {

/// Helper class to manage the recursive combining of hash_combine
/// arguments.
///
/// This class exists to manage the state and various calls involved in the
/// recursive combining of arguments used in hash_combine. It is particularly
/// useful at minimizing the code in the recursive calls to ease the pain
/// caused by a lack of variadic functions.
struct hash_combine_recursive_helper {
  char buffer[64] = {};
  hash_state state;
  const uint64_t seed;

public:
  /// Construct a recursive hash combining helper.
  ///
  /// This sets up the state for a recursive hash combine, including getting
  /// the seed and buffer setup.
  hash_combine_recursive_helper()
    : seed(get_execution_seed()) {}

  /// Combine one chunk of data into the current in-flight hash.
  ///
  /// This merges one chunk of data into the hash. First it tries to buffer
  /// the data. If the buffer is full, it hashes the buffer into its
  /// hash_state, empties it, and then merges the new chunk in. This also
  /// handles cases where the data straddles the end of the buffer.
  template <typename T>
  char *combine_data(size_t &length, char *buffer_ptr, char *buffer_end, T data) {
    if (!store_and_advance(buffer_ptr, buffer_end, data)) {
      // Check for skew which prevents the buffer from being packed, and do
      // a partial store into the buffer to fill it. This is only a concern
      // with the variadic combine because that formation can have varying
      // argument types.
      size_t partial_store_size = buffer_end - buffer_ptr;
      memcpy(buffer_ptr, &data, partial_store_size);

      // If the store fails, our buffer is full and ready to hash. We have to
      // either initialize the hash state (on the first full buffer) or mix
      // this buffer into the existing hash state. Length tracks the *hashed*
      // length, not the buffered length.
      if (length == 0) {
        state = state.create(buffer, seed);
        length = 64;
      } else {
        // Mix this chunk into the current state and bump length up by 64.
        state.mix(buffer);
        length += 64;
      }
      // Reset the buffer_ptr to the head of the buffer for the next chunk of
      // data.
      buffer_ptr = buffer;

      // Try again to store into the buffer -- this cannot fail as we only
      // store types smaller than the buffer.
      if (!store_and_advance(buffer_ptr, buffer_end, data,
                             partial_store_size))
        llvm_unreachable("buffer smaller than stored type");
    }
    return buffer_ptr;
  }

  /// Recursive, variadic combining method.
  ///
  /// This function recurses through each argument, combining that argument
  /// into a single hash.
  template <typename T, typename ...Ts>
  hash_code combine(size_t length, char *buffer_ptr, char *buffer_end,
                    const T &arg, const Ts &...args) {
    buffer_ptr = combine_data(length, buffer_ptr, buffer_end, get_hashable_data(arg));

    // Recurse to the next argument.
    return combine(length, buffer_ptr, buffer_end, args...);
  }

  /// Base case for recursive, variadic combining.
  ///
  /// The base case when combining arguments recursively is reached when all
  /// arguments have been handled. It flushes the remaining buffer and
  /// constructs a hash_code.
  hash_code combine(size_t length, char *buffer_ptr, char *buffer_end) {
    // Check whether the entire set of values fit in the buffer. If so, we'll
    // use the optimized short hashing routine and skip state entirely.
    if (length == 0)
      return hash_short(buffer, buffer_ptr - buffer, seed);

    // Mix the final buffer, rotating it if we did a partial fill in order to
    // simulate doing a mix of the last 64-bytes. That is how the algorithm
    // works when we have a contiguous byte sequence, and we want to emulate
    // that here.
    std::rotate(buffer, buffer_ptr, buffer_end);

    // Mix this chunk into the current state.
    state.mix(buffer);
    length += buffer_ptr - buffer;

    return state.finalize(length);
  }
};

} // namespace detail
} // namespace hashing

/// Combine values into a single hash_code.
///
/// This routine accepts a varying number of arguments of any type. It will
/// attempt to combine them into a single hash_code. For user-defined types it
/// attempts to call a \see hash_value overload (via ADL) for the type. For
/// integer and pointer types it directly combines their data into the
/// resulting hash_code.
///
/// The result is suitable for returning from a user's hash_value
/// *implementation* for their user-defined type. Consumers of a type should
/// *not* call this routine, they should instead call 'hash_value'.
template <typename ...Ts> hash_code hash_combine(const Ts &...args) {
  // Recursively hash each argument using a helper class.
  ::llvm::hashing::detail::hash_combine_recursive_helper helper;
  return helper.combine(0, helper.buffer, helper.buffer + 64, args...);
}

// Implementation details for implementations of hash_value overloads provided
// here.
namespace hashing {
namespace detail {

/// Helper to hash the value of a single integer.
///
/// Overloads for smaller integer types are not provided to ensure consistent
/// behavior in the presence of integral promotions. Essentially,
/// "hash_value('4')" and "hash_value('0' + 4)" should be the same.
inline hash_code hash_integer_value(uint64_t value) {
  // Similar to hash_4to8_bytes but using a seed instead of length.
  const uint64_t seed = get_execution_seed();
  const char *s = reinterpret_cast<const char *>(&value);
  const uint64_t a = fetch32(s);
  return hash_16_bytes(seed + (a << 3), fetch32(s + 4));
}

} // namespace detail
} // namespace hashing

// Declared and documented above, but defined here so that any of the hashing
// infrastructure is available.
template <typename T>
std::enable_if_t<is_integral_or_enum<T>::value, hash_code> hash_value(T value) {
  return ::llvm::hashing::detail::hash_integer_value(
      static_cast<uint64_t>(value));
}

// Declared and documented above, but defined here so that any of the hashing
// infrastructure is available.
template <typename T> hash_code hash_value(const T *ptr) {
  return ::llvm::hashing::detail::hash_integer_value(
    reinterpret_cast<uintptr_t>(ptr));
}

// Declared and documented above, but defined here so that any of the hashing
// infrastructure is available.
template <typename T, typename U>
hash_code hash_value(const std::pair<T, U> &arg) {
  return hash_combine(arg.first, arg.second);
}

template <typename... Ts> hash_code hash_value(const std::tuple<Ts...> &arg) {
  return std::apply([](const auto &...xs) { return hash_combine(xs...); }, arg);
}

// Declared and documented above, but defined here so that any of the hashing
// infrastructure is available.
template <typename T>
hash_code hash_value(const std::basic_string<T> &arg) {
  return hash_combine_range(arg.begin(), arg.end());
}

template <typename T> hash_code hash_value(const std::optional<T> &arg) {
  return arg ? hash_combine(true, *arg) : hash_value(false);
}

template <> struct DenseMapInfo<hash_code, void> {
  static inline hash_code getEmptyKey() { return hash_code(-1); }
  static inline hash_code getTombstoneKey() { return hash_code(-2); }
  static unsigned getHashValue(hash_code val) { return val; }
  static bool isEqual(hash_code LHS, hash_code RHS) { return LHS == RHS; }
};

} // namespace llvm

#endif
PKiwFZa��I�R�RADT/SmallBitVector.hnu�[���//===- llvm/ADT/SmallBitVector.h - 'Normally small' bit vectors -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file implements the SmallBitVector class.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_SMALLBITVECTOR_H
#define LLVM_ADT_SMALLBITVECTOR_H

#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/MathExtras.h"
#include <algorithm>
#include <cassert>
#include <climits>
#include <cstddef>
#include <cstdint>
#include <limits>
#include <utility>

namespace llvm {

/// This is a 'bitvector' (really, a variable-sized bit array), optimized for
/// the case when the array is small. It contains one pointer-sized field, which
/// is directly used as a plain collection of bits when possible, or as a
/// pointer to a larger heap-allocated array when necessary. This allows normal
/// "small" cases to be fast without losing generality for large inputs.
class SmallBitVector {
  // TODO: In "large" mode, a pointer to a BitVector is used, leading to an
  // unnecessary level of indirection. It would be more efficient to use a
  // pointer to memory containing size, allocation size, and the array of bits.
  uintptr_t X = 1;

  enum {
    // The number of bits in this class.
    NumBaseBits = sizeof(uintptr_t) * CHAR_BIT,

    // One bit is used to discriminate between small and large mode. The
    // remaining bits are used for the small-mode representation.
    SmallNumRawBits = NumBaseBits - 1,

    // A few more bits are used to store the size of the bit set in small mode.
    // Theoretically this is a ceil-log2. These bits are encoded in the most
    // significant bits of the raw bits.
    SmallNumSizeBits = (NumBaseBits == 32 ? 5 :
                        NumBaseBits == 64 ? 6 :
                        SmallNumRawBits),

    // The remaining bits are used to store the actual set in small mode.
    SmallNumDataBits = SmallNumRawBits - SmallNumSizeBits
  };

  static_assert(NumBaseBits == 64 || NumBaseBits == 32,
                "Unsupported word size");

public:
  using size_type = uintptr_t;

  // Encapsulation of a single bit.
  class reference {
    SmallBitVector &TheVector;
    unsigned BitPos;

  public:
    reference(SmallBitVector &b, unsigned Idx) : TheVector(b), BitPos(Idx) {}

    reference(const reference&) = default;

    reference& operator=(reference t) {
      *this = bool(t);
      return *this;
    }

    reference& operator=(bool t) {
      if (t)
        TheVector.set(BitPos);
      else
        TheVector.reset(BitPos);
      return *this;
    }

    operator bool() const {
      return const_cast<const SmallBitVector &>(TheVector).operator[](BitPos);
    }
  };

private:
  BitVector *getPointer() const {
    assert(!isSmall());
    return reinterpret_cast<BitVector *>(X);
  }

  void switchToSmall(uintptr_t NewSmallBits, size_type NewSize) {
    X = 1;
    setSmallSize(NewSize);
    setSmallBits(NewSmallBits);
  }

  void switchToLarge(BitVector *BV) {
    X = reinterpret_cast<uintptr_t>(BV);
    assert(!isSmall() && "Tried to use an unaligned pointer");
  }

  // Return all the bits used for the "small" representation; this includes
  // bits for the size as well as the element bits.
  uintptr_t getSmallRawBits() const {
    assert(isSmall());
    return X >> 1;
  }

  void setSmallRawBits(uintptr_t NewRawBits) {
    assert(isSmall());
    X = (NewRawBits << 1) | uintptr_t(1);
  }

  // Return the size.
  size_type getSmallSize() const {
    return getSmallRawBits() >> SmallNumDataBits;
  }

  void setSmallSize(size_type Size) {
    setSmallRawBits(getSmallBits() | (Size << SmallNumDataBits));
  }

  // Return the element bits.
  uintptr_t getSmallBits() const {
    return getSmallRawBits() & ~(~uintptr_t(0) << getSmallSize());
  }

  void setSmallBits(uintptr_t NewBits) {
    setSmallRawBits((NewBits & ~(~uintptr_t(0) << getSmallSize())) |
                    (getSmallSize() << SmallNumDataBits));
  }

public:
  /// Creates an empty bitvector.
  SmallBitVector() = default;

  /// Creates a bitvector of specified number of bits. All bits are initialized
  /// to the specified value.
  explicit SmallBitVector(unsigned s, bool t = false) {
    if (s <= SmallNumDataBits)
      switchToSmall(t ? ~uintptr_t(0) : 0, s);
    else
      switchToLarge(new BitVector(s, t));
  }

  /// SmallBitVector copy ctor.
  SmallBitVector(const SmallBitVector &RHS) {
    if (RHS.isSmall())
      X = RHS.X;
    else
      switchToLarge(new BitVector(*RHS.getPointer()));
  }

  SmallBitVector(SmallBitVector &&RHS) : X(RHS.X) {
    RHS.X = 1;
  }

  ~SmallBitVector() {
    if (!isSmall())
      delete getPointer();
  }

  using const_set_bits_iterator = const_set_bits_iterator_impl<SmallBitVector>;
  using set_iterator = const_set_bits_iterator;

  const_set_bits_iterator set_bits_begin() const {
    return const_set_bits_iterator(*this);
  }

  const_set_bits_iterator set_bits_end() const {
    return const_set_bits_iterator(*this, -1);
  }

  iterator_range<const_set_bits_iterator> set_bits() const {
    return make_range(set_bits_begin(), set_bits_end());
  }

  bool isSmall() const { return X & uintptr_t(1); }

  /// Tests whether there are no bits in this bitvector.
  bool empty() const {
    return isSmall() ? getSmallSize() == 0 : getPointer()->empty();
  }

  /// Returns the number of bits in this bitvector.
  size_type size() const {
    return isSmall() ? getSmallSize() : getPointer()->size();
  }

  /// Returns the number of bits which are set.
  size_type count() const {
    if (isSmall()) {
      uintptr_t Bits = getSmallBits();
      return llvm::popcount(Bits);
    }
    return getPointer()->count();
  }

  /// Returns true if any bit is set.
  bool any() const {
    if (isSmall())
      return getSmallBits() != 0;
    return getPointer()->any();
  }

  /// Returns true if all bits are set.
  bool all() const {
    if (isSmall())
      return getSmallBits() == (uintptr_t(1) << getSmallSize()) - 1;
    return getPointer()->all();
  }

  /// Returns true if none of the bits are set.
  bool none() const {
    if (isSmall())
      return getSmallBits() == 0;
    return getPointer()->none();
  }

  /// Returns the index of the first set bit, -1 if none of the bits are set.
  int find_first() const {
    if (isSmall()) {
      uintptr_t Bits = getSmallBits();
      if (Bits == 0)
        return -1;
      return llvm::countr_zero(Bits);
    }
    return getPointer()->find_first();
  }

  int find_last() const {
    if (isSmall()) {
      uintptr_t Bits = getSmallBits();
      if (Bits == 0)
        return -1;
      return NumBaseBits - llvm::countl_zero(Bits) - 1;
    }
    return getPointer()->find_last();
  }

  /// Returns the index of the first unset bit, -1 if all of the bits are set.
  int find_first_unset() const {
    if (isSmall()) {
      if (count() == getSmallSize())
        return -1;

      uintptr_t Bits = getSmallBits();
      return llvm::countr_one(Bits);
    }
    return getPointer()->find_first_unset();
  }

  int find_last_unset() const {
    if (isSmall()) {
      if (count() == getSmallSize())
        return -1;

      uintptr_t Bits = getSmallBits();
      // Set unused bits.
      Bits |= ~uintptr_t(0) << getSmallSize();
      return NumBaseBits - llvm::countl_one(Bits) - 1;
    }
    return getPointer()->find_last_unset();
  }

  /// Returns the index of the next set bit following the "Prev" bit.
  /// Returns -1 if the next set bit is not found.
  int find_next(unsigned Prev) const {
    if (isSmall()) {
      uintptr_t Bits = getSmallBits();
      // Mask off previous bits.
      Bits &= ~uintptr_t(0) << (Prev + 1);
      if (Bits == 0 || Prev + 1 >= getSmallSize())
        return -1;
      return llvm::countr_zero(Bits);
    }
    return getPointer()->find_next(Prev);
  }

  /// Returns the index of the next unset bit following the "Prev" bit.
  /// Returns -1 if the next unset bit is not found.
  int find_next_unset(unsigned Prev) const {
    if (isSmall()) {
      uintptr_t Bits = getSmallBits();
      // Mask in previous bits.
      Bits |= (uintptr_t(1) << (Prev + 1)) - 1;
      // Mask in unused bits.
      Bits |= ~uintptr_t(0) << getSmallSize();

      if (Bits == ~uintptr_t(0) || Prev + 1 >= getSmallSize())
        return -1;
      return llvm::countr_one(Bits);
    }
    return getPointer()->find_next_unset(Prev);
  }

  /// find_prev - Returns the index of the first set bit that precedes the
  /// the bit at \p PriorTo.  Returns -1 if all previous bits are unset.
  int find_prev(unsigned PriorTo) const {
    if (isSmall()) {
      if (PriorTo == 0)
        return -1;

      --PriorTo;
      uintptr_t Bits = getSmallBits();
      Bits &= maskTrailingOnes<uintptr_t>(PriorTo + 1);
      if (Bits == 0)
        return -1;

      return NumBaseBits - llvm::countl_zero(Bits) - 1;
    }
    return getPointer()->find_prev(PriorTo);
  }

  /// Clear all bits.
  void clear() {
    if (!isSmall())
      delete getPointer();
    switchToSmall(0, 0);
  }

  /// Grow or shrink the bitvector.
  void resize(unsigned N, bool t = false) {
    if (!isSmall()) {
      getPointer()->resize(N, t);
    } else if (SmallNumDataBits >= N) {
      uintptr_t NewBits = t ? ~uintptr_t(0) << getSmallSize() : 0;
      setSmallSize(N);
      setSmallBits(NewBits | getSmallBits());
    } else {
      BitVector *BV = new BitVector(N, t);
      uintptr_t OldBits = getSmallBits();
      for (size_type I = 0, E = getSmallSize(); I != E; ++I)
        (*BV)[I] = (OldBits >> I) & 1;
      switchToLarge(BV);
    }
  }

  void reserve(unsigned N) {
    if (isSmall()) {
      if (N > SmallNumDataBits) {
        uintptr_t OldBits = getSmallRawBits();
        size_type SmallSize = getSmallSize();
        BitVector *BV = new BitVector(SmallSize);
        for (size_type I = 0; I < SmallSize; ++I)
          if ((OldBits >> I) & 1)
            BV->set(I);
        BV->reserve(N);
        switchToLarge(BV);
      }
    } else {
      getPointer()->reserve(N);
    }
  }

  // Set, reset, flip
  SmallBitVector &set() {
    if (isSmall())
      setSmallBits(~uintptr_t(0));
    else
      getPointer()->set();
    return *this;
  }

  SmallBitVector &set(unsigned Idx) {
    if (isSmall()) {
      assert(Idx <= static_cast<unsigned>(
                        std::numeric_limits<uintptr_t>::digits) &&
             "undefined behavior");
      setSmallBits(getSmallBits() | (uintptr_t(1) << Idx));
    }
    else
      getPointer()->set(Idx);
    return *this;
  }

  /// Efficiently set a range of bits in [I, E)
  SmallBitVector &set(unsigned I, unsigned E) {
    assert(I <= E && "Attempted to set backwards range!");
    assert(E <= size() && "Attempted to set out-of-bounds range!");
    if (I == E) return *this;
    if (isSmall()) {
      uintptr_t EMask = ((uintptr_t)1) << E;
      uintptr_t IMask = ((uintptr_t)1) << I;
      uintptr_t Mask = EMask - IMask;
      setSmallBits(getSmallBits() | Mask);
    } else
      getPointer()->set(I, E);
    return *this;
  }

  SmallBitVector &reset() {
    if (isSmall())
      setSmallBits(0);
    else
      getPointer()->reset();
    return *this;
  }

  SmallBitVector &reset(unsigned Idx) {
    if (isSmall())
      setSmallBits(getSmallBits() & ~(uintptr_t(1) << Idx));
    else
      getPointer()->reset(Idx);
    return *this;
  }

  /// Efficiently reset a range of bits in [I, E)
  SmallBitVector &reset(unsigned I, unsigned E) {
    assert(I <= E && "Attempted to reset backwards range!");
    assert(E <= size() && "Attempted to reset out-of-bounds range!");
    if (I == E) return *this;
    if (isSmall()) {
      uintptr_t EMask = ((uintptr_t)1) << E;
      uintptr_t IMask = ((uintptr_t)1) << I;
      uintptr_t Mask = EMask - IMask;
      setSmallBits(getSmallBits() & ~Mask);
    } else
      getPointer()->reset(I, E);
    return *this;
  }

  SmallBitVector &flip() {
    if (isSmall())
      setSmallBits(~getSmallBits());
    else
      getPointer()->flip();
    return *this;
  }

  SmallBitVector &flip(unsigned Idx) {
    if (isSmall())
      setSmallBits(getSmallBits() ^ (uintptr_t(1) << Idx));
    else
      getPointer()->flip(Idx);
    return *this;
  }

  // No argument flip.
  SmallBitVector operator~() const {
    return SmallBitVector(*this).flip();
  }

  // Indexing.
  reference operator[](unsigned Idx) {
    assert(Idx < size() && "Out-of-bounds Bit access.");
    return reference(*this, Idx);
  }

  bool operator[](unsigned Idx) const {
    assert(Idx < size() && "Out-of-bounds Bit access.");
    if (isSmall())
      return ((getSmallBits() >> Idx) & 1) != 0;
    return getPointer()->operator[](Idx);
  }

  /// Return the last element in the vector.
  bool back() const {
    assert(!empty() && "Getting last element of empty vector.");
    return (*this)[size() - 1];
  }

  bool test(unsigned Idx) const {
    return (*this)[Idx];
  }

  // Push single bit to end of vector.
  void push_back(bool Val) {
    resize(size() + 1, Val);
  }

  /// Pop one bit from the end of the vector.
  void pop_back() {
    assert(!empty() && "Empty vector has no element to pop.");
    resize(size() - 1);
  }

  /// Test if any common bits are set.
  bool anyCommon(const SmallBitVector &RHS) const {
    if (isSmall() && RHS.isSmall())
      return (getSmallBits() & RHS.getSmallBits()) != 0;
    if (!isSmall() && !RHS.isSmall())
      return getPointer()->anyCommon(*RHS.getPointer());

    for (unsigned i = 0, e = std::min(size(), RHS.size()); i != e; ++i)
      if (test(i) && RHS.test(i))
        return true;
    return false;
  }

  // Comparison operators.
  bool operator==(const SmallBitVector &RHS) const {
    if (size() != RHS.size())
      return false;
    if (isSmall() && RHS.isSmall())
      return getSmallBits() == RHS.getSmallBits();
    else if (!isSmall() && !RHS.isSmall())
      return *getPointer() == *RHS.getPointer();
    else {
      for (size_type I = 0, E = size(); I != E; ++I) {
        if ((*this)[I] != RHS[I])
          return false;
      }
      return true;
    }
  }

  bool operator!=(const SmallBitVector &RHS) const {
    return !(*this == RHS);
  }

  // Intersection, union, disjoint union.
  // FIXME BitVector::operator&= does not resize the LHS but this does
  SmallBitVector &operator&=(const SmallBitVector &RHS) {
    resize(std::max(size(), RHS.size()));
    if (isSmall() && RHS.isSmall())
      setSmallBits(getSmallBits() & RHS.getSmallBits());
    else if (!isSmall() && !RHS.isSmall())
      getPointer()->operator&=(*RHS.getPointer());
    else {
      size_type I, E;
      for (I = 0, E = std::min(size(), RHS.size()); I != E; ++I)
        (*this)[I] = test(I) && RHS.test(I);
      for (E = size(); I != E; ++I)
        reset(I);
    }
    return *this;
  }

  /// Reset bits that are set in RHS. Same as *this &= ~RHS.
  SmallBitVector &reset(const SmallBitVector &RHS) {
    if (isSmall() && RHS.isSmall())
      setSmallBits(getSmallBits() & ~RHS.getSmallBits());
    else if (!isSmall() && !RHS.isSmall())
      getPointer()->reset(*RHS.getPointer());
    else
      for (unsigned i = 0, e = std::min(size(), RHS.size()); i != e; ++i)
        if (RHS.test(i))
          reset(i);

    return *this;
  }

  /// Check if (This - RHS) is zero. This is the same as reset(RHS) and any().
  bool test(const SmallBitVector &RHS) const {
    if (isSmall() && RHS.isSmall())
      return (getSmallBits() & ~RHS.getSmallBits()) != 0;
    if (!isSmall() && !RHS.isSmall())
      return getPointer()->test(*RHS.getPointer());

    unsigned i, e;
    for (i = 0, e = std::min(size(), RHS.size()); i != e; ++i)
      if (test(i) && !RHS.test(i))
        return true;

    for (e = size(); i != e; ++i)
      if (test(i))
        return true;

    return false;
  }

  SmallBitVector &operator|=(const SmallBitVector &RHS) {
    resize(std::max(size(), RHS.size()));
    if (isSmall() && RHS.isSmall())
      setSmallBits(getSmallBits() | RHS.getSmallBits());
    else if (!isSmall() && !RHS.isSmall())
      getPointer()->operator|=(*RHS.getPointer());
    else {
      for (size_type I = 0, E = RHS.size(); I != E; ++I)
        (*this)[I] = test(I) || RHS.test(I);
    }
    return *this;
  }

  SmallBitVector &operator^=(const SmallBitVector &RHS) {
    resize(std::max(size(), RHS.size()));
    if (isSmall() && RHS.isSmall())
      setSmallBits(getSmallBits() ^ RHS.getSmallBits());
    else if (!isSmall() && !RHS.isSmall())
      getPointer()->operator^=(*RHS.getPointer());
    else {
      for (size_type I = 0, E = RHS.size(); I != E; ++I)
        (*this)[I] = test(I) != RHS.test(I);
    }
    return *this;
  }

  SmallBitVector &operator<<=(unsigned N) {
    if (isSmall())
      setSmallBits(getSmallBits() << N);
    else
      getPointer()->operator<<=(N);
    return *this;
  }

  SmallBitVector &operator>>=(unsigned N) {
    if (isSmall())
      setSmallBits(getSmallBits() >> N);
    else
      getPointer()->operator>>=(N);
    return *this;
  }

  // Assignment operator.
  const SmallBitVector &operator=(const SmallBitVector &RHS) {
    if (isSmall()) {
      if (RHS.isSmall())
        X = RHS.X;
      else
        switchToLarge(new BitVector(*RHS.getPointer()));
    } else {
      if (!RHS.isSmall())
        *getPointer() = *RHS.getPointer();
      else {
        delete getPointer();
        X = RHS.X;
      }
    }
    return *this;
  }

  const SmallBitVector &operator=(SmallBitVector &&RHS) {
    if (this != &RHS) {
      clear();
      swap(RHS);
    }
    return *this;
  }

  void swap(SmallBitVector &RHS) {
    std::swap(X, RHS.X);
  }

  /// Add '1' bits from Mask to this vector. Don't resize.
  /// This computes "*this |= Mask".
  void setBitsInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
    if (isSmall())
      applyMask<true, false>(Mask, MaskWords);
    else
      getPointer()->setBitsInMask(Mask, MaskWords);
  }

  /// Clear any bits in this vector that are set in Mask. Don't resize.
  /// This computes "*this &= ~Mask".
  void clearBitsInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
    if (isSmall())
      applyMask<false, false>(Mask, MaskWords);
    else
      getPointer()->clearBitsInMask(Mask, MaskWords);
  }

  /// Add a bit to this vector for every '0' bit in Mask. Don't resize.
  /// This computes "*this |= ~Mask".
  void setBitsNotInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
    if (isSmall())
      applyMask<true, true>(Mask, MaskWords);
    else
      getPointer()->setBitsNotInMask(Mask, MaskWords);
  }

  /// Clear a bit in this vector for every '0' bit in Mask. Don't resize.
  /// This computes "*this &= Mask".
  void clearBitsNotInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
    if (isSmall())
      applyMask<false, true>(Mask, MaskWords);
    else
      getPointer()->clearBitsNotInMask(Mask, MaskWords);
  }

  void invalid() {
    assert(empty());
    X = (uintptr_t)-1;
  }
  bool isInvalid() const { return X == (uintptr_t)-1; }

  ArrayRef<uintptr_t> getData(uintptr_t &Store) const {
    if (!isSmall())
      return getPointer()->getData();
    Store = getSmallBits();
    return Store;
  }

private:
  template <bool AddBits, bool InvertMask>
  void applyMask(const uint32_t *Mask, unsigned MaskWords) {
    assert(MaskWords <= sizeof(uintptr_t) && "Mask is larger than base!");
    uintptr_t M = Mask[0];
    if (NumBaseBits == 64)
      M |= uint64_t(Mask[1]) << 32;
    if (InvertMask)
      M = ~M;
    if (AddBits)
      setSmallBits(getSmallBits() | M);
    else
      setSmallBits(getSmallBits() & ~M);
  }
};

inline SmallBitVector
operator&(const SmallBitVector &LHS, const SmallBitVector &RHS) {
  SmallBitVector Result(LHS);
  Result &= RHS;
  return Result;
}

inline SmallBitVector
operator|(const SmallBitVector &LHS, const SmallBitVector &RHS) {
  SmallBitVector Result(LHS);
  Result |= RHS;
  return Result;
}

inline SmallBitVector
operator^(const SmallBitVector &LHS, const SmallBitVector &RHS) {
  SmallBitVector Result(LHS);
  Result ^= RHS;
  return Result;
}

template <> struct DenseMapInfo<SmallBitVector> {
  static inline SmallBitVector getEmptyKey() { return SmallBitVector(); }
  static inline SmallBitVector getTombstoneKey() {
    SmallBitVector V;
    V.invalid();
    return V;
  }
  static unsigned getHashValue(const SmallBitVector &V) {
    uintptr_t Store;
    return DenseMapInfo<
        std::pair<SmallBitVector::size_type, ArrayRef<uintptr_t>>>::
        getHashValue(std::make_pair(V.size(), V.getData(Store)));
  }
  static bool isEqual(const SmallBitVector &LHS, const SmallBitVector &RHS) {
    if (LHS.isInvalid() || RHS.isInvalid())
      return LHS.isInvalid() == RHS.isInvalid();
    return LHS == RHS;
  }
};
} // end namespace llvm

namespace std {

/// Implement std::swap in terms of BitVector swap.
inline void
swap(llvm::SmallBitVector &LHS, llvm::SmallBitVector &RHS) {
  LHS.swap(RHS);
}

} // end namespace std

#endif // LLVM_ADT_SMALLBITVECTOR_H
PKiwFZ���AiiADT/IntervalTree.hnu�[���//===-- IntervalTree.h ------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements an interval tree.
//
// Further information:
// https://en.wikipedia.org/wiki/Interval_tree
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_INTERVALTREE_H
#define LLVM_ADT_INTERVALTREE_H

#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cassert>
#include <iterator>

// IntervalTree is a light tree data structure to hold intervals. It allows
// finding all intervals that overlap with any given point. At this time,
// it does not support any deletion or rebalancing operations.
//
// The IntervalTree is designed to be set up once, and then queried without
// any further additions.
//
// Synopsis:
//   Closed intervals delimited by PointT objects are mapped to ValueT objects.
//
// Restrictions:
//   PointT must be a fundamental type.
//   ValueT must be a fundamental or pointer type.
//
// template <typename PointT, typename ValueT, typename DataT>
// class IntervalTree {
// public:
//
//   IntervalTree();
//   ~IntervalTree():
//
//   using IntervalReferences = SmallVector<IntervalData *>;
//
//   void create();
//   void insert(PointT Left, PointT Right, ValueT Value);
//
//   IntervalReferences getContaining(PointT Point);
//   static void sortIntervals(IntervalReferences &Intervals, Sorting Sort);
//
//   find_iterator begin(PointType Point) const;
//   find_iterator end() const;
//
//   bool empty() const;
//   void clear();
//
//   void print(raw_ostream &OS, bool HexFormat = true);
// };
//
//===----------------------------------------------------------------------===//
//
// In the below given dataset
//
//   [a, b] <- (x)
//
// 'a' and 'b' describe a range and 'x' the value for that interval.
//
// The following data are purely for illustrative purposes:
//
// [30, 35] <- (3035),    [39, 50] <- (3950),    [55, 61] <- (5561),
// [31, 56] <- (3156),    [12, 21] <- (1221),    [25, 41] <- (2541),
// [49, 65] <- (4965),    [71, 79] <- (7179),    [11, 16] <- (1116),
// [20, 30] <- (2030),    [36, 54] <- (3654),    [60, 70] <- (6070),
// [74, 80] <- (7480),    [15, 40] <- (1540),    [43, 43] <- (4343),
// [50, 75] <- (5075),    [10, 85] <- (1085)
//
// The data represents a set of overlapping intervals:
//
//                    30--35  39------------50  55----61
//                      31------------------------56
//     12--------21 25------------41      49-------------65   71-----79
//   11----16  20-----30    36----------------54    60------70  74---- 80
//       15---------------------40  43--43  50--------------------75
// 10----------------------------------------------------------------------85
//
// The items are stored in a binary tree with each node storing:
//
// MP: A middle point.
// IL: All intervals whose left value are completely to the left of the middle
//     point. They are sorted in ascending order by their beginning point.
// IR: All intervals whose right value are completely to the right of the
//     middle point. They are sorted in descending order by their ending point.
// LS: Left subtree.
// RS: Right subtree.
//
// As IL and IR will contain the same intervals, in order to optimize space,
// instead of storing intervals on each node, we use two vectors that will
// contain the intervals described by IL and IR. Each node will contain an
// index into that vector (global bucket), to indicate the beginning of the
// intervals assigned to the node.
//
// The following is the output from print():
//
// 0: MP:43 IR [10,85] [31,56] [36,54] [39,50] [43,43]
// 0: MP:43 IL [10,85] [31,56] [36,54] [39,50] [43,43]
// 1:   MP:25 IR [25,41] [15,40] [20,30]
// 1:   MP:25 IL [15,40] [20,30] [25,41]
// 2:     MP:15 IR [12,21] [11,16]
// 2:     MP:15 IL [11,16] [12,21]
// 2:     MP:36 IR []
// 2:     MP:36 IL []
// 3:       MP:31 IR [30,35]
// 3:       MP:31 IL [30,35]
// 1:   MP:61 IR [50,75] [60,70] [49,65] [55,61]
// 1:   MP:61 IL [49,65] [50,75] [55,61] [60,70]
// 2:     MP:74 IR [74,80] [71,79]
// 2:     MP:74 IL [71,79] [74,80]
//
// with:
//    0: Root Node.
//   MP: Middle point.
//   IL: Intervals to the left (in ascending order by beginning point).
//   IR: Intervals to the right (in descending order by ending point).
//
//                                    Root
//                                      |
//                                      V
//                       +------------MP:43------------+
//                       |            IL IR            |
//                       |       [10,85] [10,85]       |
//                    LS |       [31,56] [31,56]       | RS
//                       |       [36,54] [36,54]       |
//                       |       [39,50] [39,50]       |
//                       |       [43,43] [43,43]       |
//                       V                             V
//        +------------MP:25------------+            MP:61------------+
//        |            IL IR            |            IL IR            |
//        |       [15,40] [25,41]       |       [49,65] [50,75]       |
//     LS |       [20,30] [15,40]       | RS    [50,75] [60,70]       | RS
//        |       [25,41] [20,30]       |       [55,61] [49,65]       |
//        |                             |       [60,70] [55,61]       |
//        V                             V                             V
//      MP:15                 +-------MP:36                         MP:74
//      IL IR                 |       IL IR                         IL IR
// [11,16] [12,21]         LS |       [] []                    [71,79] [74,80]
// [12,21] [11,16]            |                                [74,80] [71,79]
//                            V
//                          MP:31
//                          IL IR
//                     [30,35] [30,35]
//
// The creation of an interval tree is done in 2 steps:
// 1) Insert the interval items by calling
//    void insert(PointT Left, PointT Right, ValueT Value);
//    Left, Right: the interval left and right limits.
//    Value: the data associated with that specific interval.
//
// 2) Create the interval tree by calling
//    void create();
//
// Once the tree is created, it is switched to query mode.
// Query the tree by using iterators or container.
//
// a) Iterators over intervals overlapping the given point with very weak
//    ordering guarantees.
//    find_iterator begin(PointType Point) const;
//    find_iterator end() const;
//    Point: a target point to be tested for inclusion in any interval.
//
// b) Container:
//    IntervalReferences getContaining(PointT Point);
//    Point: a target point to be tested for inclusion in any interval.
//    Returns vector with all the intervals containing the target point.
//
// The returned intervals are in their natural tree location. They can
// be sorted:
//
// static void sortIntervals(IntervalReferences &Intervals, Sorting Sort);
//
// Ability to print the constructed interval tree:
//   void print(raw_ostream &OS, bool HexFormat = true);
// Display the associated data in hexadecimal format.

namespace llvm {

//===----------------------------------------------------------------------===//
//---                          IntervalData                               ----//
//===----------------------------------------------------------------------===//
/// An interval data composed by a \a Left and \a Right points and an
/// associated \a Value.
/// \a PointT corresponds to the interval endpoints type.
/// \a ValueT corresponds to the interval value type.
template <typename PointT, typename ValueT> class IntervalData {
protected:
  using PointType = PointT;
  using ValueType = ValueT;

private:
  PointType Left;
  PointType Right;
  ValueType Value;

public:
  IntervalData() = delete;
  IntervalData(PointType Left, PointType Right, ValueType Value)
      : Left(Left), Right(Right), Value(Value) {
    assert(Left <= Right && "'Left' must be less or equal to 'Right'");
  }
  virtual ~IntervalData() = default;
  PointType left() const { return Left; }
  PointType right() const { return Right; }
  ValueType value() const { return Value; }

  /// Return true if \a Point is inside the left bound of closed interval \a
  /// [Left;Right]. This is Left <= Point for closed intervals.
  bool left(const PointType &Point) const { return left() <= Point; }

  /// Return true if \a Point is inside the right bound of closed interval \a
  /// [Left;Right]. This is Point <= Right for closed intervals.
  bool right(const PointType &Point) const { return Point <= right(); }

  /// Return true when \a Point is contained in interval \a [Left;Right].
  /// This is Left <= Point <= Right for closed intervals.
  bool contains(const PointType &Point) const {
    return left(Point) && right(Point);
  }
};

//===----------------------------------------------------------------------===//
//---                          IntervalTree                               ----//
//===----------------------------------------------------------------------===//
// Helper class template that is used by the IntervalTree to ensure that one
// does instantiate using only fundamental and/or pointer types.
template <typename T>
using PointTypeIsValid = std::bool_constant<std::is_fundamental<T>::value>;

template <typename T>
using ValueTypeIsValid = std::bool_constant<std::is_fundamental<T>::value ||
                                            std::is_pointer<T>::value>;

template <typename PointT, typename ValueT,
          typename DataT = IntervalData<PointT, ValueT>>
class IntervalTree {
  static_assert(PointTypeIsValid<PointT>::value,
                "PointT must be a fundamental type");
  static_assert(ValueTypeIsValid<ValueT>::value,
                "ValueT must be a fundamental or pointer type");

public:
  using PointType = PointT;
  using ValueType = ValueT;
  using DataType = DataT;
  using Allocator = BumpPtrAllocator;

  enum class Sorting { Ascending, Descending };
  using IntervalReferences = SmallVector<const DataType *, 4>;

private:
  using IntervalVector = SmallVector<DataType, 4>;
  using PointsVector = SmallVector<PointType, 4>;

  class IntervalNode {
    PointType MiddlePoint;             // MP - Middle point.
    IntervalNode *Left = nullptr;      // LS - Left subtree.
    IntervalNode *Right = nullptr;     // RS - Right subtree.
    unsigned BucketIntervalsStart = 0; // Starting index in global bucket.
    unsigned BucketIntervalsSize = 0;  // Size of bucket.

  public:
    PointType middle() const { return MiddlePoint; }
    unsigned start() const { return BucketIntervalsStart; }
    unsigned size() const { return BucketIntervalsSize; }

    IntervalNode(PointType Point, unsigned Start)
        : MiddlePoint(Point), BucketIntervalsStart(Start) {}

    friend IntervalTree;
  };

  Allocator &NodeAllocator;     // Allocator used for creating interval nodes.
  IntervalNode *Root = nullptr; // Interval tree root.
  IntervalVector Intervals; // Storage for each interval and all of the fields
                            // point back into it.
  PointsVector EndPoints; // Sorted left and right points of all the intervals.

  // These vectors provide storage that nodes carve buckets of overlapping
  // intervals out of. All intervals are recorded on each vector.
  // The bucket with the intervals associated to a node, is determined by
  // the fields 'BucketIntervalStart' and 'BucketIntervalSize' in the node.
  // The buckets in the first vector are sorted in ascending order using
  // the left value and the buckets in the second vector are sorted in
  // descending order using the right value. Every interval in a bucket
  // contains the middle point for the node.
  IntervalReferences IntervalsLeft;  // Intervals to the left of middle point.
  IntervalReferences IntervalsRight; // Intervals to the right of middle point.

  // Working vector used during the tree creation to sort the intervals. It is
  // cleared once the tree is created.
  IntervalReferences References;

  /// Recursively delete the constructed tree.
  void deleteTree(IntervalNode *Node) {
    if (Node) {
      deleteTree(Node->Left);
      deleteTree(Node->Right);
      Node->~IntervalNode();
      NodeAllocator.Deallocate(Node);
    }
  }

  /// Print the interval list (left and right) for a given \a Node.
  static void printList(raw_ostream &OS, IntervalReferences &IntervalSet,
                        unsigned Start, unsigned Size, bool HexFormat = true) {
    assert(Start + Size <= IntervalSet.size() &&
           "Start + Size must be in bounds of the IntervalSet");
    const char *Format = HexFormat ? "[0x%08x,0x%08x] " : "[%2d,%2d] ";
    if (Size) {
      for (unsigned Position = Start; Position < Start + Size; ++Position)
        OS << format(Format, IntervalSet[Position]->left(),
                     IntervalSet[Position]->right());
    } else {
      OS << "[]";
    }
    OS << "\n";
  }

  /// Print an interval tree \a Node.
  void printNode(raw_ostream &OS, unsigned Level, IntervalNode *Node,
                 bool HexFormat = true) {
    const char *Format = HexFormat ? "MP:0x%08x " : "MP:%2d ";
    auto PrintNodeData = [&](StringRef Text, IntervalReferences &IntervalSet) {
      OS << format("%5d: ", Level);
      OS.indent(Level * 2);
      OS << format(Format, Node->middle()) << Text << " ";
      printList(OS, IntervalSet, Node->start(), Node->size(), HexFormat);
    };

    PrintNodeData("IR", IntervalsRight);
    PrintNodeData("IL", IntervalsLeft);
  }

  /// Recursively print all the interval nodes.
  void printTree(raw_ostream &OS, unsigned Level, IntervalNode *Node,
                 bool HexFormat = true) {
    if (Node) {
      printNode(OS, Level, Node, HexFormat);
      ++Level;
      printTree(OS, Level, Node->Left, HexFormat);
      printTree(OS, Level, Node->Right, HexFormat);
    }
  }

  /// Recursively construct the interval tree.
  /// IntervalsSize: Number of intervals that have been processed and it will
  /// be used as the start for the intervals bucket for a node.
  /// PointsBeginIndex, PointsEndIndex: Determine the range into the EndPoints
  /// vector of end points to be processed.
  /// ReferencesBeginIndex, ReferencesSize: Determine the range into the
  /// intervals being processed.
  IntervalNode *createTree(unsigned &IntervalsSize, int PointsBeginIndex,
                           int PointsEndIndex, int ReferencesBeginIndex,
                           int ReferencesSize) {
    // We start by taking the entire range of all the intervals and dividing
    // it in half at x_middle (in practice, x_middle should be picked to keep
    // the tree relatively balanced).
    // This gives three sets of intervals, those completely to the left of
    // x_middle which we'll call S_left, those completely to the right of
    // x_middle which we'll call S_right, and those overlapping x_middle
    // which we'll call S_middle.
    // The intervals in S_left and S_right are recursively divided in the
    // same manner until there are no intervals remaining.

    if (PointsBeginIndex > PointsEndIndex ||
        ReferencesBeginIndex >= ReferencesSize)
      return nullptr;

    int MiddleIndex = (PointsBeginIndex + PointsEndIndex) / 2;
    PointType MiddlePoint = EndPoints[MiddleIndex];

    unsigned NewBucketStart = IntervalsSize;
    unsigned NewBucketSize = 0;
    int ReferencesRightIndex = ReferencesSize;

    IntervalNode *Root =
        new (NodeAllocator) IntervalNode(MiddlePoint, NewBucketStart);

    // A quicksort implementation where all the intervals that overlap
    // with the pivot are put into the "bucket", and "References" is the
    // partition space where we recursively sort the remaining intervals.
    for (int Index = ReferencesBeginIndex; Index < ReferencesRightIndex;) {

      // Current interval contains the middle point.
      if (References[Index]->contains(MiddlePoint)) {
        IntervalsLeft[IntervalsSize] = References[Index];
        IntervalsRight[IntervalsSize] = References[Index];
        ++IntervalsSize;
        Root->BucketIntervalsSize = ++NewBucketSize;

        if (Index < --ReferencesRightIndex)
          std::swap(References[Index], References[ReferencesRightIndex]);
        if (ReferencesRightIndex < --ReferencesSize)
          std::swap(References[ReferencesRightIndex],
                    References[ReferencesSize]);
        continue;
      }

      if (References[Index]->left() > MiddlePoint) {
        if (Index < --ReferencesRightIndex)
          std::swap(References[Index], References[ReferencesRightIndex]);
        continue;
      }
      ++Index;
    }

    // Sort intervals on the left and right of the middle point.
    if (NewBucketSize > 1) {
      // Sort the intervals in ascending order by their beginning point.
      std::stable_sort(IntervalsLeft.begin() + NewBucketStart,
                       IntervalsLeft.begin() + NewBucketStart + NewBucketSize,
                       [](const DataType *LHS, const DataType *RHS) {
                         return LHS->left() < RHS->left();
                       });
      // Sort the intervals in descending order by their ending point.
      std::stable_sort(IntervalsRight.begin() + NewBucketStart,
                       IntervalsRight.begin() + NewBucketStart + NewBucketSize,
                       [](const DataType *LHS, const DataType *RHS) {
                         return LHS->right() > RHS->right();
                       });
    }

    if (PointsBeginIndex <= MiddleIndex - 1) {
      Root->Left = createTree(IntervalsSize, PointsBeginIndex, MiddleIndex - 1,
                              ReferencesBeginIndex, ReferencesRightIndex);
    }

    if (MiddleIndex + 1 <= PointsEndIndex) {
      Root->Right = createTree(IntervalsSize, MiddleIndex + 1, PointsEndIndex,
                               ReferencesRightIndex, ReferencesSize);
    }

    return Root;
  }

public:
  class find_iterator {
  public:
    using iterator_category = std::forward_iterator_tag;
    using value_type = DataType;
    using difference_type = DataType;
    using pointer = DataType *;
    using reference = DataType &;

  private:
    const IntervalReferences *AscendingBuckets = nullptr;
    const IntervalReferences *DescendingBuckets = nullptr;

    // Current node and index while traversing the intervals that contain
    // the reference point.
    IntervalNode *Node = nullptr;
    PointType Point = {};
    unsigned Index = 0;

    // For the current node, check if we have intervals that contain the
    // reference point. We return when the node does have intervals that
    // contain such point. Otherwise we keep descending on that branch.
    void initNode() {
      Index = 0;
      while (Node) {
        // Return if the reference point is the same as the middle point or
        // the current node doesn't have any intervals at all.
        if (Point == Node->middle()) {
          if (Node->size() == 0) {
            // No intervals that contain the reference point.
            Node = nullptr;
          }
          return;
        }

        if (Point < Node->middle()) {
          // The reference point can be at the left or right of the middle
          // point. Return if the current node has intervals that contain the
          // reference point; otherwise descend on the respective branch.
          if (Node->size() && (*AscendingBuckets)[Node->start()]->left(Point)) {
            return;
          }
          Node = Node->Left;
        } else {
          if (Node->size() &&
              (*DescendingBuckets)[Node->start()]->right(Point)) {
            return;
          }
          Node = Node->Right;
        }
      }
    }

    // Given the current node (which was initialized by initNode), move to
    // the next interval in the list of intervals that contain the reference
    // point. Otherwise move to the next node, as the intervals contained
    // in that node, can contain the reference point.
    void nextInterval() {
      // If there are available intervals that contain the reference point,
      // traverse them; otherwise move to the left or right node, depending
      // on the middle point value.
      if (++Index < Node->size()) {
        if (Node->middle() == Point)
          return;
        if (Point < Node->middle()) {
          // Reference point is on the left.
          if (!(*AscendingBuckets)[Node->start() + Index]->left(Point)) {
            // The intervals don't contain the reference point. Move to the
            // next node, preserving the descending order.
            Node = Node->Left;
            initNode();
          }
        } else {
          // Reference point is on the right.
          if (!(*DescendingBuckets)[Node->start() + Index]->right(Point)) {
            // The intervals don't contain the reference point. Move to the
            // next node, preserving the ascending order.
            Node = Node->Right;
            initNode();
          }
        }
      } else {
        // We have traversed all the intervals in the current node.
        if (Point == Node->middle()) {
          Node = nullptr;
          Index = 0;
          return;
        }
        // Select a branch based on the middle point.
        Node = Point < Node->middle() ? Node->Left : Node->Right;
        initNode();
      }
    }

    find_iterator() = default;
    explicit find_iterator(const IntervalReferences *Left,
                           const IntervalReferences *Right, IntervalNode *Node,
                           PointType Point)
        : AscendingBuckets(Left), DescendingBuckets(Right), Node(Node),
          Point(Point), Index(0) {
      initNode();
    }

    const DataType *current() const {
      return (Point <= Node->middle())
                 ? (*AscendingBuckets)[Node->start() + Index]
                 : (*DescendingBuckets)[Node->start() + Index];
    }

  public:
    find_iterator &operator++() {
      nextInterval();
      return *this;
    }

    find_iterator operator++(int) {
      find_iterator Iter(*this);
      nextInterval();
      return Iter;
    }

    /// Dereference operators.
    const DataType *operator->() const { return current(); }
    const DataType &operator*() const { return *(current()); }

    /// Comparison operators.
    friend bool operator==(const find_iterator &LHS, const find_iterator &RHS) {
      return (!LHS.Node && !RHS.Node && !LHS.Index && !RHS.Index) ||
             (LHS.Point == RHS.Point && LHS.Node == RHS.Node &&
              LHS.Index == RHS.Index);
    }
    friend bool operator!=(const find_iterator &LHS, const find_iterator &RHS) {
      return !(LHS == RHS);
    }

    friend IntervalTree;
  };

private:
  find_iterator End;

public:
  explicit IntervalTree(Allocator &NodeAllocator)
      : NodeAllocator(NodeAllocator) {}
  ~IntervalTree() { clear(); }

  /// Return true when no intervals are mapped.
  bool empty() const { return Root == nullptr; }

  /// Remove all entries.
  void clear() {
    deleteTree(Root);
    Root = nullptr;
    Intervals.clear();
    IntervalsLeft.clear();
    IntervalsRight.clear();
    EndPoints.clear();
  }

  /// Add a mapping of [Left;Right] to \a Value.
  void insert(PointType Left, PointType Right, ValueType Value) {
    assert(empty() && "Invalid insertion. Interval tree already constructed.");
    Intervals.emplace_back(Left, Right, Value);
  }

  /// Return all the intervals in their natural tree location, that
  /// contain the given point.
  IntervalReferences getContaining(PointType Point) const {
    assert(!empty() && "Interval tree it is not constructed.");
    IntervalReferences IntervalSet;
    for (find_iterator Iter = find(Point), E = find_end(); Iter != E; ++Iter)
      IntervalSet.push_back(const_cast<DataType *>(&(*Iter)));
    return IntervalSet;
  }

  /// Sort the given intervals using the following sort options:
  /// Ascending: return the intervals with the smallest at the front.
  /// Descending: return the intervals with the biggest at the front.
  static void sortIntervals(IntervalReferences &IntervalSet, Sorting Sort) {
    std::stable_sort(IntervalSet.begin(), IntervalSet.end(),
                     [Sort](const DataType *RHS, const DataType *LHS) {
                       return Sort == Sorting::Ascending
                                  ? (LHS->right() - LHS->left()) >
                                        (RHS->right() - RHS->left())
                                  : (LHS->right() - LHS->left()) <
                                        (RHS->right() - RHS->left());
                     });
  }

  /// Print the interval tree.
  /// When \a HexFormat is true, the interval tree interval ranges and
  /// associated values are printed in hexadecimal format.
  void print(raw_ostream &OS, bool HexFormat = true) {
    printTree(OS, 0, Root, HexFormat);
  }

  /// Create the interval tree.
  void create() {
    assert(empty() && "Interval tree already constructed.");
    // Sorted vector of unique end points values of all the intervals.
    // Records references to the collected intervals.
    SmallVector<PointType, 4> Points;
    for (const DataType &Data : Intervals) {
      Points.push_back(Data.left());
      Points.push_back(Data.right());
      References.push_back(std::addressof(Data));
    }
    std::stable_sort(Points.begin(), Points.end());
    auto Last = std::unique(Points.begin(), Points.end());
    Points.erase(Last, Points.end());

    EndPoints.assign(Points.begin(), Points.end());

    IntervalsLeft.resize(Intervals.size());
    IntervalsRight.resize(Intervals.size());

    // Given a set of n intervals, construct a data structure so that
    // we can efficiently retrieve all intervals overlapping another
    // interval or point.
    unsigned IntervalsSize = 0;
    Root =
        createTree(IntervalsSize, /*PointsBeginIndex=*/0, EndPoints.size() - 1,
                   /*ReferencesBeginIndex=*/0, References.size());

    // Save to clear this storage, as it used only to sort the intervals.
    References.clear();
  }

  /// Iterator to start a find operation; it returns find_end() if the
  /// tree has not been built.
  /// There is no support to iterate over all the elements of the tree.
  find_iterator find(PointType Point) const {
    return empty()
               ? find_end()
               : find_iterator(&IntervalsLeft, &IntervalsRight, Root, Point);
  }

  /// Iterator to end find operation.
  find_iterator find_end() const { return End; }
};

} // namespace llvm

#endif // LLVM_ADT_INTERVALTREE_H
PKiwFZ{c�~F~FADT/SparseMultiSet.hnu�[���//===- llvm/ADT/SparseMultiSet.h - Sparse multiset --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file defines the SparseMultiSet class, which adds multiset behavior to
/// the SparseSet.
///
/// A sparse multiset holds a small number of objects identified by integer keys
/// from a moderately sized universe. The sparse multiset uses more memory than
/// other containers in order to provide faster operations. Any key can map to
/// multiple values. A SparseMultiSetNode class is provided, which serves as a
/// convenient base class for the contents of a SparseMultiSet.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_SPARSEMULTISET_H
#define LLVM_ADT_SPARSEMULTISET_H

#include "llvm/ADT/identity.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/SparseSet.h"
#include <cassert>
#include <cstdint>
#include <cstdlib>
#include <iterator>
#include <limits>
#include <utility>

namespace llvm {

/// Fast multiset implementation for objects that can be identified by small
/// unsigned keys.
///
/// SparseMultiSet allocates memory proportional to the size of the key
/// universe, so it is not recommended for building composite data structures.
/// It is useful for algorithms that require a single set with fast operations.
///
/// Compared to DenseSet and DenseMap, SparseMultiSet provides constant-time
/// fast clear() as fast as a vector.  The find(), insert(), and erase()
/// operations are all constant time, and typically faster than a hash table.
/// The iteration order doesn't depend on numerical key values, it only depends
/// on the order of insert() and erase() operations.  Iteration order is the
/// insertion order. Iteration is only provided over elements of equivalent
/// keys, but iterators are bidirectional.
///
/// Compared to BitVector, SparseMultiSet<unsigned> uses 8x-40x more memory, but
/// offers constant-time clear() and size() operations as well as fast iteration
/// independent on the size of the universe.
///
/// SparseMultiSet contains a dense vector holding all the objects and a sparse
/// array holding indexes into the dense vector.  Most of the memory is used by
/// the sparse array which is the size of the key universe. The SparseT template
/// parameter provides a space/speed tradeoff for sets holding many elements.
///
/// When SparseT is uint32_t, find() only touches up to 3 cache lines, but the
/// sparse array uses 4 x Universe bytes.
///
/// When SparseT is uint8_t (the default), find() touches up to 3+[N/256] cache
/// lines, but the sparse array is 4x smaller.  N is the number of elements in
/// the set.
///
/// For sets that may grow to thousands of elements, SparseT should be set to
/// uint16_t or uint32_t.
///
/// Multiset behavior is provided by providing doubly linked lists for values
/// that are inlined in the dense vector. SparseMultiSet is a good choice when
/// one desires a growable number of entries per key, as it will retain the
/// SparseSet algorithmic properties despite being growable. Thus, it is often a
/// better choice than a SparseSet of growable containers or a vector of
/// vectors. SparseMultiSet also keeps iterators valid after erasure (provided
/// the iterators don't point to the element erased), allowing for more
/// intuitive and fast removal.
///
/// @tparam ValueT      The type of objects in the set.
/// @tparam KeyFunctorT A functor that computes an unsigned index from KeyT.
/// @tparam SparseT     An unsigned integer type. See above.
///
template<typename ValueT,
         typename KeyFunctorT = identity<unsigned>,
         typename SparseT = uint8_t>
class SparseMultiSet {
  static_assert(std::is_unsigned_v<SparseT>,
                "SparseT must be an unsigned integer type");

  /// The actual data that's stored, as a doubly-linked list implemented via
  /// indices into the DenseVector.  The doubly linked list is implemented
  /// circular in Prev indices, and INVALID-terminated in Next indices. This
  /// provides efficient access to list tails. These nodes can also be
  /// tombstones, in which case they are actually nodes in a single-linked
  /// freelist of recyclable slots.
  struct SMSNode {
    static constexpr unsigned INVALID = ~0U;

    ValueT Data;
    unsigned Prev;
    unsigned Next;

    SMSNode(ValueT D, unsigned P, unsigned N) : Data(D), Prev(P), Next(N) {}

    /// List tails have invalid Nexts.
    bool isTail() const {
      return Next == INVALID;
    }

    /// Whether this node is a tombstone node, and thus is in our freelist.
    bool isTombstone() const {
      return Prev == INVALID;
    }

    /// Since the list is circular in Prev, all non-tombstone nodes have a valid
    /// Prev.
    bool isValid() const { return Prev != INVALID; }
  };

  using KeyT = typename KeyFunctorT::argument_type;
  using DenseT = SmallVector<SMSNode, 8>;
  DenseT Dense;
  SparseT *Sparse = nullptr;
  unsigned Universe = 0;
  KeyFunctorT KeyIndexOf;
  SparseSetValFunctor<KeyT, ValueT, KeyFunctorT> ValIndexOf;

  /// We have a built-in recycler for reusing tombstone slots. This recycler
  /// puts a singly-linked free list into tombstone slots, allowing us quick
  /// erasure, iterator preservation, and dense size.
  unsigned FreelistIdx = SMSNode::INVALID;
  unsigned NumFree = 0;

  unsigned sparseIndex(const ValueT &Val) const {
    assert(ValIndexOf(Val) < Universe &&
           "Invalid key in set. Did object mutate?");
    return ValIndexOf(Val);
  }
  unsigned sparseIndex(const SMSNode &N) const { return sparseIndex(N.Data); }

  /// Whether the given entry is the head of the list. List heads's previous
  /// pointers are to the tail of the list, allowing for efficient access to the
  /// list tail. D must be a valid entry node.
  bool isHead(const SMSNode &D) const {
    assert(D.isValid() && "Invalid node for head");
    return Dense[D.Prev].isTail();
  }

  /// Whether the given entry is a singleton entry, i.e. the only entry with
  /// that key.
  bool isSingleton(const SMSNode &N) const {
    assert(N.isValid() && "Invalid node for singleton");
    // Is N its own predecessor?
    return &Dense[N.Prev] == &N;
  }

  /// Add in the given SMSNode. Uses a free entry in our freelist if
  /// available. Returns the index of the added node.
  unsigned addValue(const ValueT& V, unsigned Prev, unsigned Next) {
    if (NumFree == 0) {
      Dense.push_back(SMSNode(V, Prev, Next));
      return Dense.size() - 1;
    }

    // Peel off a free slot
    unsigned Idx = FreelistIdx;
    unsigned NextFree = Dense[Idx].Next;
    assert(Dense[Idx].isTombstone() && "Non-tombstone free?");

    Dense[Idx] = SMSNode(V, Prev, Next);
    FreelistIdx = NextFree;
    --NumFree;
    return Idx;
  }

  /// Make the current index a new tombstone. Pushes it onto the freelist.
  void makeTombstone(unsigned Idx) {
    Dense[Idx].Prev = SMSNode::INVALID;
    Dense[Idx].Next = FreelistIdx;
    FreelistIdx = Idx;
    ++NumFree;
  }

public:
  using value_type = ValueT;
  using reference = ValueT &;
  using const_reference = const ValueT &;
  using pointer = ValueT *;
  using const_pointer = const ValueT *;
  using size_type = unsigned;

  SparseMultiSet() = default;
  SparseMultiSet(const SparseMultiSet &) = delete;
  SparseMultiSet &operator=(const SparseMultiSet &) = delete;
  ~SparseMultiSet() { free(Sparse); }

  /// Set the universe size which determines the largest key the set can hold.
  /// The universe must be sized before any elements can be added.
  ///
  /// @param U Universe size. All object keys must be less than U.
  ///
  void setUniverse(unsigned U) {
    // It's not hard to resize the universe on a non-empty set, but it doesn't
    // seem like a likely use case, so we can add that code when we need it.
    assert(empty() && "Can only resize universe on an empty map");
    // Hysteresis prevents needless reallocations.
    if (U >= Universe/4 && U <= Universe)
      return;
    free(Sparse);
    // The Sparse array doesn't actually need to be initialized, so malloc
    // would be enough here, but that will cause tools like valgrind to
    // complain about branching on uninitialized data.
    Sparse = static_cast<SparseT*>(safe_calloc(U, sizeof(SparseT)));
    Universe = U;
  }

  /// Our iterators are iterators over the collection of objects that share a
  /// key.
  template <typename SMSPtrTy> class iterator_base {
    friend class SparseMultiSet;

  public:
    using iterator_category = std::bidirectional_iterator_tag;
    using value_type = ValueT;
    using difference_type = std::ptrdiff_t;
    using pointer = value_type *;
    using reference = value_type &;

  private:
    SMSPtrTy SMS;
    unsigned Idx;
    unsigned SparseIdx;

    iterator_base(SMSPtrTy P, unsigned I, unsigned SI)
      : SMS(P), Idx(I), SparseIdx(SI) {}

    /// Whether our iterator has fallen outside our dense vector.
    bool isEnd() const {
      if (Idx == SMSNode::INVALID)
        return true;

      assert(Idx < SMS->Dense.size() && "Out of range, non-INVALID Idx?");
      return false;
    }

    /// Whether our iterator is properly keyed, i.e. the SparseIdx is valid
    bool isKeyed() const { return SparseIdx < SMS->Universe; }

    unsigned Prev() const { return SMS->Dense[Idx].Prev; }
    unsigned Next() const { return SMS->Dense[Idx].Next; }

    void setPrev(unsigned P) { SMS->Dense[Idx].Prev = P; }
    void setNext(unsigned N) { SMS->Dense[Idx].Next = N; }

  public:
    reference operator*() const {
      assert(isKeyed() && SMS->sparseIndex(SMS->Dense[Idx].Data) == SparseIdx &&
             "Dereferencing iterator of invalid key or index");

      return SMS->Dense[Idx].Data;
    }
    pointer operator->() const { return &operator*(); }

    /// Comparison operators
    bool operator==(const iterator_base &RHS) const {
      // end compares equal
      if (SMS == RHS.SMS && Idx == RHS.Idx) {
        assert((isEnd() || SparseIdx == RHS.SparseIdx) &&
               "Same dense entry, but different keys?");
        return true;
      }

      return false;
    }

    bool operator!=(const iterator_base &RHS) const {
      return !operator==(RHS);
    }

    /// Increment and decrement operators
    iterator_base &operator--() { // predecrement - Back up
      assert(isKeyed() && "Decrementing an invalid iterator");
      assert((isEnd() || !SMS->isHead(SMS->Dense[Idx])) &&
             "Decrementing head of list");

      // If we're at the end, then issue a new find()
      if (isEnd())
        Idx = SMS->findIndex(SparseIdx).Prev();
      else
        Idx = Prev();

      return *this;
    }
    iterator_base &operator++() { // preincrement - Advance
      assert(!isEnd() && isKeyed() && "Incrementing an invalid/end iterator");
      Idx = Next();
      return *this;
    }
    iterator_base operator--(int) { // postdecrement
      iterator_base I(*this);
      --*this;
      return I;
    }
    iterator_base operator++(int) { // postincrement
      iterator_base I(*this);
      ++*this;
      return I;
    }
  };

  using iterator = iterator_base<SparseMultiSet *>;
  using const_iterator = iterator_base<const SparseMultiSet *>;

  // Convenience types
  using RangePair = std::pair<iterator, iterator>;

  /// Returns an iterator past this container. Note that such an iterator cannot
  /// be decremented, but will compare equal to other end iterators.
  iterator end() { return iterator(this, SMSNode::INVALID, SMSNode::INVALID); }
  const_iterator end() const {
    return const_iterator(this, SMSNode::INVALID, SMSNode::INVALID);
  }

  /// Returns true if the set is empty.
  ///
  /// This is not the same as BitVector::empty().
  ///
  bool empty() const { return size() == 0; }

  /// Returns the number of elements in the set.
  ///
  /// This is not the same as BitVector::size() which returns the size of the
  /// universe.
  ///
  size_type size() const {
    assert(NumFree <= Dense.size() && "Out-of-bounds free entries");
    return Dense.size() - NumFree;
  }

  /// Clears the set.  This is a very fast constant time operation.
  ///
  void clear() {
    // Sparse does not need to be cleared, see find().
    Dense.clear();
    NumFree = 0;
    FreelistIdx = SMSNode::INVALID;
  }

  /// Find an element by its index.
  ///
  /// @param   Idx A valid index to find.
  /// @returns An iterator to the element identified by key, or end().
  ///
  iterator findIndex(unsigned Idx) {
    assert(Idx < Universe && "Key out of range");
    const unsigned Stride = std::numeric_limits<SparseT>::max() + 1u;
    for (unsigned i = Sparse[Idx], e = Dense.size(); i < e; i += Stride) {
      const unsigned FoundIdx = sparseIndex(Dense[i]);
      // Check that we're pointing at the correct entry and that it is the head
      // of a valid list.
      if (Idx == FoundIdx && Dense[i].isValid() && isHead(Dense[i]))
        return iterator(this, i, Idx);
      // Stride is 0 when SparseT >= unsigned.  We don't need to loop.
      if (!Stride)
        break;
    }
    return end();
  }

  /// Find an element by its key.
  ///
  /// @param   Key A valid key to find.
  /// @returns An iterator to the element identified by key, or end().
  ///
  iterator find(const KeyT &Key) {
    return findIndex(KeyIndexOf(Key));
  }

  const_iterator find(const KeyT &Key) const {
    iterator I = const_cast<SparseMultiSet*>(this)->findIndex(KeyIndexOf(Key));
    return const_iterator(I.SMS, I.Idx, KeyIndexOf(Key));
  }

  /// Returns the number of elements identified by Key. This will be linear in
  /// the number of elements of that key.
  size_type count(const KeyT &Key) const {
    unsigned Ret = 0;
    for (const_iterator It = find(Key); It != end(); ++It)
      ++Ret;

    return Ret;
  }

  /// Returns true if this set contains an element identified by Key.
  bool contains(const KeyT &Key) const {
    return find(Key) != end();
  }

  /// Return the head and tail of the subset's list, otherwise returns end().
  iterator getHead(const KeyT &Key) { return find(Key); }
  iterator getTail(const KeyT &Key) {
    iterator I = find(Key);
    if (I != end())
      I = iterator(this, I.Prev(), KeyIndexOf(Key));
    return I;
  }

  /// The bounds of the range of items sharing Key K. First member is the head
  /// of the list, and the second member is a decrementable end iterator for
  /// that key.
  RangePair equal_range(const KeyT &K) {
    iterator B = find(K);
    iterator E = iterator(this, SMSNode::INVALID, B.SparseIdx);
    return std::make_pair(B, E);
  }

  /// Insert a new element at the tail of the subset list. Returns an iterator
  /// to the newly added entry.
  iterator insert(const ValueT &Val) {
    unsigned Idx = sparseIndex(Val);
    iterator I = findIndex(Idx);

    unsigned NodeIdx = addValue(Val, SMSNode::INVALID, SMSNode::INVALID);

    if (I == end()) {
      // Make a singleton list
      Sparse[Idx] = NodeIdx;
      Dense[NodeIdx].Prev = NodeIdx;
      return iterator(this, NodeIdx, Idx);
    }

    // Stick it at the end.
    unsigned HeadIdx = I.Idx;
    unsigned TailIdx = I.Prev();
    Dense[TailIdx].Next = NodeIdx;
    Dense[HeadIdx].Prev = NodeIdx;
    Dense[NodeIdx].Prev = TailIdx;

    return iterator(this, NodeIdx, Idx);
  }

  /// Erases an existing element identified by a valid iterator.
  ///
  /// This invalidates iterators pointing at the same entry, but erase() returns
  /// an iterator pointing to the next element in the subset's list. This makes
  /// it possible to erase selected elements while iterating over the subset:
  ///
  ///   tie(I, E) = Set.equal_range(Key);
  ///   while (I != E)
  ///     if (test(*I))
  ///       I = Set.erase(I);
  ///     else
  ///       ++I;
  ///
  /// Note that if the last element in the subset list is erased, this will
  /// return an end iterator which can be decremented to get the new tail (if it
  /// exists):
  ///
  ///  tie(B, I) = Set.equal_range(Key);
  ///  for (bool isBegin = B == I; !isBegin; /* empty */) {
  ///    isBegin = (--I) == B;
  ///    if (test(I))
  ///      break;
  ///    I = erase(I);
  ///  }
  iterator erase(iterator I) {
    assert(I.isKeyed() && !I.isEnd() && !Dense[I.Idx].isTombstone() &&
           "erasing invalid/end/tombstone iterator");

    // First, unlink the node from its list. Then swap the node out with the
    // dense vector's last entry
    iterator NextI = unlink(Dense[I.Idx]);

    // Put in a tombstone.
    makeTombstone(I.Idx);

    return NextI;
  }

  /// Erase all elements with the given key. This invalidates all
  /// iterators of that key.
  void eraseAll(const KeyT &K) {
    for (iterator I = find(K); I != end(); /* empty */)
      I = erase(I);
  }

private:
  /// Unlink the node from its list. Returns the next node in the list.
  iterator unlink(const SMSNode &N) {
    if (isSingleton(N)) {
      // Singleton is already unlinked
      assert(N.Next == SMSNode::INVALID && "Singleton has next?");
      return iterator(this, SMSNode::INVALID, ValIndexOf(N.Data));
    }

    if (isHead(N)) {
      // If we're the head, then update the sparse array and our next.
      Sparse[sparseIndex(N)] = N.Next;
      Dense[N.Next].Prev = N.Prev;
      return iterator(this, N.Next, ValIndexOf(N.Data));
    }

    if (N.isTail()) {
      // If we're the tail, then update our head and our previous.
      findIndex(sparseIndex(N)).setPrev(N.Prev);
      Dense[N.Prev].Next = N.Next;

      // Give back an end iterator that can be decremented
      iterator I(this, N.Prev, ValIndexOf(N.Data));
      return ++I;
    }

    // Otherwise, just drop us
    Dense[N.Next].Prev = N.Prev;
    Dense[N.Prev].Next = N.Next;
    return iterator(this, N.Next, ValIndexOf(N.Data));
  }
};

} // end namespace llvm

#endif // LLVM_ADT_SPARSEMULTISET_H
PKiwFZ�P.
!
!ADT/ScopedHashTable.hnu�[���//===- ScopedHashTable.h - A simple scoped hash table -----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements an efficient scoped hash table, which is useful for
// things like dominator-based optimizations.  This allows clients to do things
// like this:
//
//  ScopedHashTable<int, int> HT;
//  {
//    ScopedHashTableScope<int, int> Scope1(HT);
//    HT.insert(0, 0);
//    HT.insert(1, 1);
//    {
//      ScopedHashTableScope<int, int> Scope2(HT);
//      HT.insert(0, 42);
//    }
//  }
//
// Looking up the value for "0" in the Scope2 block will return 42.  Looking
// up the value for 0 before 42 is inserted or after Scope2 is popped will
// return 0.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_SCOPEDHASHTABLE_H
#define LLVM_ADT_SCOPEDHASHTABLE_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/Support/AllocatorBase.h"
#include <cassert>
#include <new>

namespace llvm {

template <typename K, typename V, typename KInfo = DenseMapInfo<K>,
          typename AllocatorTy = MallocAllocator>
class ScopedHashTable;

template <typename K, typename V>
class ScopedHashTableVal {
  ScopedHashTableVal *NextInScope;
  ScopedHashTableVal *NextForKey;
  K Key;
  V Val;

  ScopedHashTableVal(const K &key, const V &val) : Key(key), Val(val) {}

public:
  const K &getKey() const { return Key; }
  const V &getValue() const { return Val; }
  V &getValue() { return Val; }

  ScopedHashTableVal *getNextForKey() { return NextForKey; }
  const ScopedHashTableVal *getNextForKey() const { return NextForKey; }
  ScopedHashTableVal *getNextInScope() { return NextInScope; }

  template <typename AllocatorTy>
  static ScopedHashTableVal *Create(ScopedHashTableVal *nextInScope,
                                    ScopedHashTableVal *nextForKey,
                                    const K &key, const V &val,
                                    AllocatorTy &Allocator) {
    ScopedHashTableVal *New = Allocator.template Allocate<ScopedHashTableVal>();
    // Set up the value.
    new (New) ScopedHashTableVal(key, val);
    New->NextInScope = nextInScope;
    New->NextForKey = nextForKey;
    return New;
  }

  template <typename AllocatorTy> void Destroy(AllocatorTy &Allocator) {
    // Free memory referenced by the item.
    this->~ScopedHashTableVal();
    Allocator.Deallocate(this);
  }
};

template <typename K, typename V, typename KInfo = DenseMapInfo<K>,
          typename AllocatorTy = MallocAllocator>
class ScopedHashTableScope {
  /// HT - The hashtable that we are active for.
  ScopedHashTable<K, V, KInfo, AllocatorTy> &HT;

  /// PrevScope - This is the scope that we are shadowing in HT.
  ScopedHashTableScope *PrevScope;

  /// LastValInScope - This is the last value that was inserted for this scope
  /// or null if none have been inserted yet.
  ScopedHashTableVal<K, V> *LastValInScope;

public:
  ScopedHashTableScope(ScopedHashTable<K, V, KInfo, AllocatorTy> &HT);
  ScopedHashTableScope(ScopedHashTableScope &) = delete;
  ScopedHashTableScope &operator=(ScopedHashTableScope &) = delete;
  ~ScopedHashTableScope();

  ScopedHashTableScope *getParentScope() { return PrevScope; }
  const ScopedHashTableScope *getParentScope() const { return PrevScope; }

private:
  friend class ScopedHashTable<K, V, KInfo, AllocatorTy>;

  ScopedHashTableVal<K, V> *getLastValInScope() {
    return LastValInScope;
  }

  void setLastValInScope(ScopedHashTableVal<K, V> *Val) {
    LastValInScope = Val;
  }
};

template <typename K, typename V, typename KInfo = DenseMapInfo<K>>
class ScopedHashTableIterator {
  ScopedHashTableVal<K, V> *Node;

public:
  ScopedHashTableIterator(ScopedHashTableVal<K, V> *node) : Node(node) {}

  V &operator*() const {
    assert(Node && "Dereference end()");
    return Node->getValue();
  }
  V *operator->() const {
    return &Node->getValue();
  }

  bool operator==(const ScopedHashTableIterator &RHS) const {
    return Node == RHS.Node;
  }
  bool operator!=(const ScopedHashTableIterator &RHS) const {
    return Node != RHS.Node;
  }

  inline ScopedHashTableIterator& operator++() {          // Preincrement
    assert(Node && "incrementing past end()");
    Node = Node->getNextForKey();
    return *this;
  }
  ScopedHashTableIterator operator++(int) {        // Postincrement
    ScopedHashTableIterator tmp = *this; ++*this; return tmp;
  }
};

template <typename K, typename V, typename KInfo, typename AllocatorTy>
class ScopedHashTable : detail::AllocatorHolder<AllocatorTy> {
  using AllocTy = detail::AllocatorHolder<AllocatorTy>;

public:
  /// ScopeTy - This is a helpful typedef that allows clients to get easy access
  /// to the name of the scope for this hash table.
  using ScopeTy = ScopedHashTableScope<K, V, KInfo, AllocatorTy>;
  using size_type = unsigned;

private:
  friend class ScopedHashTableScope<K, V, KInfo, AllocatorTy>;

  using ValTy = ScopedHashTableVal<K, V>;

  DenseMap<K, ValTy*, KInfo> TopLevelMap;
  ScopeTy *CurScope = nullptr;

public:
  ScopedHashTable() = default;
  ScopedHashTable(AllocatorTy A) : AllocTy(A) {}
  ScopedHashTable(const ScopedHashTable &) = delete;
  ScopedHashTable &operator=(const ScopedHashTable &) = delete;

  ~ScopedHashTable() {
    assert(!CurScope && TopLevelMap.empty() && "Scope imbalance!");
  }

  /// Access to the allocator.
  using AllocTy::getAllocator;

  /// Return 1 if the specified key is in the table, 0 otherwise.
  size_type count(const K &Key) const {
    return TopLevelMap.count(Key);
  }

  V lookup(const K &Key) const {
    auto I = TopLevelMap.find(Key);
    if (I != TopLevelMap.end())
      return I->second->getValue();

    return V();
  }

  void insert(const K &Key, const V &Val) {
    insertIntoScope(CurScope, Key, Val);
  }

  using iterator = ScopedHashTableIterator<K, V, KInfo>;

  iterator end() { return iterator(nullptr); }

  iterator begin(const K &Key) {
    typename DenseMap<K, ValTy*, KInfo>::iterator I =
      TopLevelMap.find(Key);
    if (I == TopLevelMap.end()) return end();
    return iterator(I->second);
  }

  ScopeTy *getCurScope() { return CurScope; }
  const ScopeTy *getCurScope() const { return CurScope; }

  /// insertIntoScope - This inserts the specified key/value at the specified
  /// (possibly not the current) scope.  While it is ok to insert into a scope
  /// that isn't the current one, it isn't ok to insert *underneath* an existing
  /// value of the specified key.
  void insertIntoScope(ScopeTy *S, const K &Key, const V &Val) {
    assert(S && "No scope active!");
    ScopedHashTableVal<K, V> *&KeyEntry = TopLevelMap[Key];
    KeyEntry = ValTy::Create(S->getLastValInScope(), KeyEntry, Key, Val,
                             getAllocator());
    S->setLastValInScope(KeyEntry);
  }
};

/// ScopedHashTableScope ctor - Install this as the current scope for the hash
/// table.
template <typename K, typename V, typename KInfo, typename Allocator>
ScopedHashTableScope<K, V, KInfo, Allocator>::
  ScopedHashTableScope(ScopedHashTable<K, V, KInfo, Allocator> &ht) : HT(ht) {
  PrevScope = HT.CurScope;
  HT.CurScope = this;
  LastValInScope = nullptr;
}

template <typename K, typename V, typename KInfo, typename Allocator>
ScopedHashTableScope<K, V, KInfo, Allocator>::~ScopedHashTableScope() {
  assert(HT.CurScope == this && "Scope imbalance!");
  HT.CurScope = PrevScope;

  // Pop and delete all values corresponding to this scope.
  while (ScopedHashTableVal<K, V> *ThisEntry = LastValInScope) {
    // Pop this value out of the TopLevelMap.
    if (!ThisEntry->getNextForKey()) {
      assert(HT.TopLevelMap[ThisEntry->getKey()] == ThisEntry &&
             "Scope imbalance!");
      HT.TopLevelMap.erase(ThisEntry->getKey());
    } else {
      ScopedHashTableVal<K, V> *&KeyEntry = HT.TopLevelMap[ThisEntry->getKey()];
      assert(KeyEntry == ThisEntry && "Scope imbalance!");
      KeyEntry = ThisEntry->getNextForKey();
    }

    // Pop this value out of the scope.
    LastValInScope = ThisEntry->getNextInScope();

    // Delete this entry.
    ThisEntry->Destroy(HT.getAllocator());
  }
}

} // end namespace llvm

#endif // LLVM_ADT_SCOPEDHASHTABLE_H
PKiwFZ< a�P4P4ADT/APFixedPoint.hnu�[���//===- APFixedPoint.h - Fixed point constant handling -----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// Defines the fixed point number interface.
/// This is a class for abstracting various operations performed on fixed point
/// types.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_APFIXEDPOINT_H
#define LLVM_ADT_APFIXEDPOINT_H

#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/raw_ostream.h"

namespace llvm {

class APFloat;
struct fltSemantics;

/// The fixed point semantics work similarly to fltSemantics. The width
/// specifies the whole bit width of the underlying scaled integer (with padding
/// if any). The scale represents the number of fractional bits in this type.
/// When HasUnsignedPadding is true and this type is unsigned, the first bit
/// in the value this represents is treated as padding.
class FixedPointSemantics {
public:
  static constexpr unsigned WidthBitWidth = 16;
  static constexpr unsigned LsbWeightBitWidth = 13;
  /// Used to differentiate between constructors with Width and Lsb from the
  /// default Width and scale
  struct Lsb {
    int LsbWeight;
  };
  FixedPointSemantics(unsigned Width, unsigned Scale, bool IsSigned,
                      bool IsSaturated, bool HasUnsignedPadding)
      : FixedPointSemantics(Width, Lsb{-static_cast<int>(Scale)}, IsSigned,
                            IsSaturated, HasUnsignedPadding) {}
  FixedPointSemantics(unsigned Width, Lsb Weight, bool IsSigned,
                      bool IsSaturated, bool HasUnsignedPadding)
      : Width(Width), LsbWeight(Weight.LsbWeight), IsSigned(IsSigned),
        IsSaturated(IsSaturated), HasUnsignedPadding(HasUnsignedPadding) {
    assert(isUInt<WidthBitWidth>(Width) && isInt<LsbWeightBitWidth>(Weight.LsbWeight));
    assert(!(IsSigned && HasUnsignedPadding) &&
           "Cannot have unsigned padding on a signed type.");
  }

  /// Check if the Semantic follow the requirements of an older more limited
  /// version of this class
  bool isValidLegacySema() const {
    return LsbWeight <= 0 && static_cast<int>(Width) >= -LsbWeight;
  }
  unsigned getWidth() const { return Width; }
  unsigned getScale() const { assert(isValidLegacySema()); return -LsbWeight; }
  int getLsbWeight() const { return LsbWeight; }
  int getMsbWeight() const {
    return LsbWeight + Width - 1 /*Both lsb and msb are both part of width*/;
  }
  bool isSigned() const { return IsSigned; }
  bool isSaturated() const { return IsSaturated; }
  bool hasUnsignedPadding() const { return HasUnsignedPadding; }

  void setSaturated(bool Saturated) { IsSaturated = Saturated; }

  /// return true if the first bit doesn't have a strictly positive weight
  bool hasSignOrPaddingBit() const { return IsSigned || HasUnsignedPadding; }

  /// Return the number of integral bits represented by these semantics. These
  /// are separate from the fractional bits and do not include the sign or
  /// padding bit.
  unsigned getIntegralBits() const {
    return std::max(getMsbWeight() + 1 - hasSignOrPaddingBit(), 0);
  }

  /// Return the FixedPointSemantics that allows for calculating the full
  /// precision semantic that can precisely represent the precision and ranges
  /// of both input values. This does not compute the resulting semantics for a
  /// given binary operation.
  FixedPointSemantics
  getCommonSemantics(const FixedPointSemantics &Other) const;

  /// Print semantics for debug purposes
  void print(llvm::raw_ostream& OS) const;

  /// Returns true if this fixed-point semantic with its value bits interpreted
  /// as an integer can fit in the given floating point semantic without
  /// overflowing to infinity.
  /// For example, a signed 8-bit fixed-point semantic has a maximum and
  /// minimum integer representation of 127 and -128, respectively. If both of
  /// these values can be represented (possibly inexactly) in the floating
  /// point semantic without overflowing, this returns true.
  bool fitsInFloatSemantics(const fltSemantics &FloatSema) const;

  /// Return the FixedPointSemantics for an integer type.
  static FixedPointSemantics GetIntegerSemantics(unsigned Width,
                                                 bool IsSigned) {
    return FixedPointSemantics(Width, /*Scale=*/0, IsSigned,
                               /*IsSaturated=*/false,
                               /*HasUnsignedPadding=*/false);
  }

  bool operator==(FixedPointSemantics Other) const {
    return Width == Other.Width && LsbWeight == Other.LsbWeight &&
           IsSigned == Other.IsSigned && IsSaturated == Other.IsSaturated &&
           HasUnsignedPadding == Other.HasUnsignedPadding;
  }
  bool operator!=(FixedPointSemantics Other) const { return !(*this == Other); }

private:
  unsigned Width          : WidthBitWidth;
  signed int LsbWeight    : LsbWeightBitWidth;
  unsigned IsSigned       : 1;
  unsigned IsSaturated    : 1;
  unsigned HasUnsignedPadding : 1;
};

static_assert(sizeof(FixedPointSemantics) == 4, "");

inline hash_code hash_value(const FixedPointSemantics &Val) {
  return hash_value(bit_cast<uint32_t>(Val));
}

template <> struct DenseMapInfo<FixedPointSemantics> {
  static inline FixedPointSemantics getEmptyKey() {
    return FixedPointSemantics(0, 0, false, false, false);
  }

  static inline FixedPointSemantics getTombstoneKey() {
    return FixedPointSemantics(0, 1, false, false, false);
  }

  static unsigned getHashValue(const FixedPointSemantics &Val) {
    return hash_value(Val);
  }

  static bool isEqual(const char &LHS, const char &RHS) { return LHS == RHS; }
};

/// The APFixedPoint class works similarly to APInt/APSInt in that it is a
/// functional replacement for a scaled integer. It supports a wide range of
/// semantics including the one used by fixed point types proposed in ISO/IEC
/// JTC1 SC22 WG14 N1169. The class carries the value and semantics of
/// a fixed point, and provides different operations that would normally be
/// performed on fixed point types.
class APFixedPoint {
public:
  APFixedPoint(const APInt &Val, const FixedPointSemantics &Sema)
      : Val(Val, !Sema.isSigned()), Sema(Sema) {
    assert(Val.getBitWidth() == Sema.getWidth() &&
           "The value should have a bit width that matches the Sema width");
  }

  APFixedPoint(uint64_t Val, const FixedPointSemantics &Sema)
      : APFixedPoint(APInt(Sema.getWidth(), Val, Sema.isSigned()), Sema) {}

  // Zero initialization.
  APFixedPoint(const FixedPointSemantics &Sema) : APFixedPoint(0, Sema) {}

  APSInt getValue() const { return APSInt(Val, !Sema.isSigned()); }
  inline unsigned getWidth() const { return Sema.getWidth(); }
  inline unsigned getScale() const { return Sema.getScale(); }
  int getLsbWeight() const { return Sema.getLsbWeight(); }
  int getMsbWeight() const { return Sema.getMsbWeight(); }
  inline bool isSaturated() const { return Sema.isSaturated(); }
  inline bool isSigned() const { return Sema.isSigned(); }
  inline bool hasPadding() const { return Sema.hasUnsignedPadding(); }
  FixedPointSemantics getSemantics() const { return Sema; }

  bool getBoolValue() const { return Val.getBoolValue(); }

  // Convert this number to match the semantics provided. If the overflow
  // parameter is provided, set this value to true or false to indicate if this
  // operation results in an overflow.
  APFixedPoint convert(const FixedPointSemantics &DstSema,
                       bool *Overflow = nullptr) const;

  // Perform binary operations on a fixed point type. The resulting fixed point
  // value will be in the common, full precision semantics that can represent
  // the precision and ranges of both input values. See convert() for an
  // explanation of the Overflow parameter.
  APFixedPoint add(const APFixedPoint &Other, bool *Overflow = nullptr) const;
  APFixedPoint sub(const APFixedPoint &Other, bool *Overflow = nullptr) const;
  APFixedPoint mul(const APFixedPoint &Other, bool *Overflow = nullptr) const;
  APFixedPoint div(const APFixedPoint &Other, bool *Overflow = nullptr) const;

  // Perform shift operations on a fixed point type. Unlike the other binary
  // operations, the resulting fixed point value will be in the original
  // semantic.
  APFixedPoint shl(unsigned Amt, bool *Overflow = nullptr) const;
  APFixedPoint shr(unsigned Amt, bool *Overflow = nullptr) const {
    // Right shift cannot overflow.
    if (Overflow)
      *Overflow = false;
    return APFixedPoint(Val >> Amt, Sema);
  }

  /// Perform a unary negation (-X) on this fixed point type, taking into
  /// account saturation if applicable.
  APFixedPoint negate(bool *Overflow = nullptr) const;

  /// Return the integral part of this fixed point number, rounded towards
  /// zero. (-2.5k -> -2)
  APSInt getIntPart() const {
    if (getMsbWeight() < 0)
      return APSInt(APInt::getZero(getWidth()), Val.isUnsigned());
    APSInt ExtVal =
        (getLsbWeight() > 0) ? Val.extend(getWidth() + getLsbWeight()) : Val;
    if (Val < 0 && Val != -Val) // Cover the case when we have the min val
      return -((-ExtVal).relativeShl(getLsbWeight()));
    return ExtVal.relativeShl(getLsbWeight());
  }

  /// Return the integral part of this fixed point number, rounded towards
  /// zero. The value is stored into an APSInt with the provided width and sign.
  /// If the overflow parameter is provided, and the integral value is not able
  /// to be fully stored in the provided width and sign, the overflow parameter
  /// is set to true.
  APSInt convertToInt(unsigned DstWidth, bool DstSign,
                      bool *Overflow = nullptr) const;

  /// Convert this fixed point number to a floating point value with the
  /// provided semantics.
  APFloat convertToFloat(const fltSemantics &FloatSema) const;

  void toString(SmallVectorImpl<char> &Str) const;
  std::string toString() const {
    SmallString<40> S;
    toString(S);
    return std::string(S.str());
  }

  void print(raw_ostream &) const;
  void dump() const;

  // If LHS > RHS, return 1. If LHS == RHS, return 0. If LHS < RHS, return -1.
  int compare(const APFixedPoint &Other) const;
  bool operator==(const APFixedPoint &Other) const {
    return compare(Other) == 0;
  }
  bool operator!=(const APFixedPoint &Other) const {
    return compare(Other) != 0;
  }
  bool operator>(const APFixedPoint &Other) const { return compare(Other) > 0; }
  bool operator<(const APFixedPoint &Other) const { return compare(Other) < 0; }
  bool operator>=(const APFixedPoint &Other) const {
    return compare(Other) >= 0;
  }
  bool operator<=(const APFixedPoint &Other) const {
    return compare(Other) <= 0;
  }

  static APFixedPoint getMax(const FixedPointSemantics &Sema);
  static APFixedPoint getMin(const FixedPointSemantics &Sema);

  /// Given a floating point semantic, return the next floating point semantic
  /// with a larger exponent and larger or equal mantissa.
  static const fltSemantics *promoteFloatSemantics(const fltSemantics *S);

  /// Create an APFixedPoint with a value equal to that of the provided integer,
  /// and in the same semantics as the provided target semantics. If the value
  /// is not able to fit in the specified fixed point semantics, and the
  /// overflow parameter is provided, it is set to true.
  static APFixedPoint getFromIntValue(const APSInt &Value,
                                      const FixedPointSemantics &DstFXSema,
                                      bool *Overflow = nullptr);

  /// Create an APFixedPoint with a value equal to that of the provided
  /// floating point value, in the provided target semantics. If the value is
  /// not able to fit in the specified fixed point semantics and the overflow
  /// parameter is specified, it is set to true.
  /// For NaN, the Overflow flag is always set. For +inf and -inf, if the
  /// semantic is saturating, the value saturates. Otherwise, the Overflow flag
  /// is set.
  static APFixedPoint getFromFloatValue(const APFloat &Value,
                                        const FixedPointSemantics &DstFXSema,
                                        bool *Overflow = nullptr);

private:
  APSInt Val;
  FixedPointSemantics Sema;
};

inline raw_ostream &operator<<(raw_ostream &OS, const APFixedPoint &FX) {
  OS << FX.toString();
  return OS;
}

inline hash_code hash_value(const APFixedPoint &Val) {
  return hash_combine(Val.getSemantics(), Val.getValue());
}

template <> struct DenseMapInfo<APFixedPoint> {
  static inline APFixedPoint getEmptyKey() {
    return APFixedPoint(DenseMapInfo<FixedPointSemantics>::getEmptyKey());
  }

  static inline APFixedPoint getTombstoneKey() {
    return APFixedPoint(DenseMapInfo<FixedPointSemantics>::getTombstoneKey());
  }

  static unsigned getHashValue(const APFixedPoint &Val) {
    return hash_value(Val);
  }

  static bool isEqual(const APFixedPoint &LHS, const APFixedPoint &RHS) {
    return LHS.getSemantics() == RHS.getSemantics() &&
           LHS.getValue() == RHS.getValue();
  }
};

} // namespace llvm

#endif
PKiwFZA�QUADT/Uniformity.hnu�[���//===- Uniformity.h --------------------------------------*- C++ -*--------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_UNIFORMITY_H
#define LLVM_ADT_UNIFORMITY_H

namespace llvm {

/// Enum describing how instructions behave with respect to uniformity and
/// divergence, to answer the question: if the same instruction is executed by
/// two threads in a convergent set of threads, will its result value(s) be
/// uniform, i.e. the same on both threads?
enum class InstructionUniformity {
  /// The result values are uniform if and only if all operands are uniform.
  Default,

  /// The result values are always uniform.
  AlwaysUniform,

  /// The result values can never be assumed to be uniform.
  NeverUniform
};

} // namespace llvm
#endif // LLVM_ADT_UNIFORMITY_H
PKiwFZvX݅��ADT/IntEqClasses.hnu�[���//===-- llvm/ADT/IntEqClasses.h - Equiv. Classes of Integers ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// Equivalence classes for small integers. This is a mapping of the integers
/// 0 .. N-1 into M equivalence classes numbered 0 .. M-1.
///
/// Initially each integer has its own equivalence class. Classes are joined by
/// passing a representative member of each class to join().
///
/// Once the classes are built, compress() will number them 0 .. M-1 and prevent
/// further changes.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_INTEQCLASSES_H
#define LLVM_ADT_INTEQCLASSES_H

#include "llvm/ADT/SmallVector.h"

namespace llvm {

class IntEqClasses {
  /// EC - When uncompressed, map each integer to a smaller member of its
  /// equivalence class. The class leader is the smallest member and maps to
  /// itself.
  ///
  /// When compressed, EC[i] is the equivalence class of i.
  SmallVector<unsigned, 8> EC;

  /// NumClasses - The number of equivalence classes when compressed, or 0 when
  /// uncompressed.
  unsigned NumClasses = 0;

public:
  /// IntEqClasses - Create an equivalence class mapping for 0 .. N-1.
  IntEqClasses(unsigned N = 0) { grow(N); }

  /// grow - Increase capacity to hold 0 .. N-1, putting new integers in unique
  /// equivalence classes.
  /// This requires an uncompressed map.
  void grow(unsigned N);

  /// clear - Clear all classes so that grow() will assign a unique class to
  /// every integer.
  void clear() {
    EC.clear();
    NumClasses = 0;
  }

  /// Join the equivalence classes of a and b. After joining classes,
  /// findLeader(a) == findLeader(b). This requires an uncompressed map.
  /// Returns the new leader.
  unsigned join(unsigned a, unsigned b);

  /// findLeader - Compute the leader of a's equivalence class. This is the
  /// smallest member of the class.
  /// This requires an uncompressed map.
  unsigned findLeader(unsigned a) const;

  /// compress - Compress equivalence classes by numbering them 0 .. M.
  /// This makes the equivalence class map immutable.
  void compress();

  /// getNumClasses - Return the number of equivalence classes after compress()
  /// was called.
  unsigned getNumClasses() const { return NumClasses; }

  /// operator[] - Return a's equivalence class number, 0 .. getNumClasses()-1.
  /// This requires a compressed map.
  unsigned operator[](unsigned a) const {
    assert(NumClasses && "operator[] called before compress()");
    return EC[a];
  }

  /// uncompress - Change back to the uncompressed representation that allows
  /// editing.
  void uncompress();
};

} // End llvm namespace

#endif
PKiwFZ���X X ADT/PriorityWorklist.hnu�[���//===- PriorityWorklist.h - Worklist with insertion priority ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
///
/// This file provides a priority worklist. See the class comments for details.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_PRIORITYWORKLIST_H
#define LLVM_ADT_PRIORITYWORKLIST_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Compiler.h"
#include <cassert>
#include <cstddef>
#include <iterator>
#include <type_traits>
#include <vector>

namespace llvm {

/// A FILO worklist that prioritizes on re-insertion without duplication.
///
/// This is very similar to a \c SetVector with the primary difference that
/// while re-insertion does not create a duplicate, it does adjust the
/// visitation order to respect the last insertion point. This can be useful
/// when the visit order needs to be prioritized based on insertion point
/// without actually having duplicate visits.
///
/// Note that this doesn't prevent re-insertion of elements which have been
/// visited -- if you need to break cycles, a set will still be necessary.
///
/// The type \c T must be default constructable to a null value that will be
/// ignored. It is an error to insert such a value, and popping elements will
/// never produce such a value. It is expected to be used with common nullable
/// types like pointers or optionals.
///
/// Internally this uses a vector to store the worklist and a map to identify
/// existing elements in the worklist. Both of these may be customized, but the
/// map must support the basic DenseMap API for mapping from a T to an integer
/// index into the vector.
///
/// A partial specialization is provided to automatically select a SmallVector
/// and a SmallDenseMap if custom data structures are not provided.
template <typename T, typename VectorT = std::vector<T>,
          typename MapT = DenseMap<T, ptrdiff_t>>
class PriorityWorklist {
public:
  using value_type = T;
  using key_type = T;
  using reference = T&;
  using const_reference = const T&;
  using size_type = typename MapT::size_type;

  /// Construct an empty PriorityWorklist
  PriorityWorklist() = default;

  /// Determine if the PriorityWorklist is empty or not.
  bool empty() const {
    return V.empty();
  }

  /// Returns the number of elements in the worklist.
  size_type size() const {
    return M.size();
  }

  /// Count the number of elements of a given key in the PriorityWorklist.
  /// \returns 0 if the element is not in the PriorityWorklist, 1 if it is.
  size_type count(const key_type &key) const {
    return M.count(key);
  }

  /// Return the last element of the PriorityWorklist.
  const T &back() const {
    assert(!empty() && "Cannot call back() on empty PriorityWorklist!");
    return V.back();
  }

  /// Insert a new element into the PriorityWorklist.
  /// \returns true if the element was inserted into the PriorityWorklist.
  bool insert(const T &X) {
    assert(X != T() && "Cannot insert a null (default constructed) value!");
    auto InsertResult = M.insert({X, V.size()});
    if (InsertResult.second) {
      // Fresh value, just append it to the vector.
      V.push_back(X);
      return true;
    }

    auto &Index = InsertResult.first->second;
    assert(V[Index] == X && "Value not actually at index in map!");
    if (Index != (ptrdiff_t)(V.size() - 1)) {
      // If the element isn't at the back, null it out and append a fresh one.
      V[Index] = T();
      Index = (ptrdiff_t)V.size();
      V.push_back(X);
    }
    return false;
  }

  /// Insert a sequence of new elements into the PriorityWorklist.
  template <typename SequenceT>
  std::enable_if_t<!std::is_convertible<SequenceT, T>::value>
  insert(SequenceT &&Input) {
    if (std::begin(Input) == std::end(Input))
      // Nothing to do for an empty input sequence.
      return;

    // First pull the input sequence into the vector as a bulk append
    // operation.
    ptrdiff_t StartIndex = V.size();
    V.insert(V.end(), std::begin(Input), std::end(Input));
    // Now walk backwards fixing up the index map and deleting any duplicates.
    for (ptrdiff_t i = V.size() - 1; i >= StartIndex; --i) {
      auto InsertResult = M.insert({V[i], i});
      if (InsertResult.second)
        continue;

      // If the existing index is before this insert's start, nuke that one and
      // move it up.
      ptrdiff_t &Index = InsertResult.first->second;
      if (Index < StartIndex) {
        V[Index] = T();
        Index = i;
        continue;
      }

      // Otherwise the existing one comes first so just clear out the value in
      // this slot.
      V[i] = T();
    }
  }

  /// Remove the last element of the PriorityWorklist.
  void pop_back() {
    assert(!empty() && "Cannot remove an element when empty!");
    assert(back() != T() && "Cannot have a null element at the back!");
    M.erase(back());
    do {
      V.pop_back();
    } while (!V.empty() && V.back() == T());
  }

  [[nodiscard]] T pop_back_val() {
    T Ret = back();
    pop_back();
    return Ret;
  }

  /// Erase an item from the worklist.
  ///
  /// Note that this is constant time due to the nature of the worklist implementation.
  bool erase(const T& X) {
    auto I = M.find(X);
    if (I == M.end())
      return false;

    assert(V[I->second] == X && "Value not actually at index in map!");
    if (I->second == (ptrdiff_t)(V.size() - 1)) {
      do {
        V.pop_back();
      } while (!V.empty() && V.back() == T());
    } else {
      V[I->second] = T();
    }
    M.erase(I);
    return true;
  }

  /// Erase items from the set vector based on a predicate function.
  ///
  /// This is intended to be equivalent to the following code, if we could
  /// write it:
  ///
  /// \code
  ///   V.erase(remove_if(V, P), V.end());
  /// \endcode
  ///
  /// However, PriorityWorklist doesn't expose non-const iterators, making any
  /// algorithm like remove_if impossible to use.
  ///
  /// \returns true if any element is removed.
  template <typename UnaryPredicate>
  bool erase_if(UnaryPredicate P) {
    typename VectorT::iterator E =
        remove_if(V, TestAndEraseFromMap<UnaryPredicate>(P, M));
    if (E == V.end())
      return false;
    for (auto I = V.begin(); I != E; ++I)
      if (*I != T())
        M[*I] = I - V.begin();
    V.erase(E, V.end());
    return true;
  }

  /// Reverse the items in the PriorityWorklist.
  ///
  /// This does an in-place reversal. Other kinds of reverse aren't easy to
  /// support in the face of the worklist semantics.

  /// Completely clear the PriorityWorklist
  void clear() {
    M.clear();
    V.clear();
  }

private:
  /// A wrapper predicate designed for use with std::remove_if.
  ///
  /// This predicate wraps a predicate suitable for use with std::remove_if to
  /// call M.erase(x) on each element which is slated for removal. This just
  /// allows the predicate to be move only which we can't do with lambdas
  /// today.
  template <typename UnaryPredicateT>
  class TestAndEraseFromMap {
    UnaryPredicateT P;
    MapT &M;

  public:
    TestAndEraseFromMap(UnaryPredicateT P, MapT &M)
        : P(std::move(P)), M(M) {}

    bool operator()(const T &Arg) {
      if (Arg == T())
        // Skip null values in the PriorityWorklist.
        return false;

      if (P(Arg)) {
        M.erase(Arg);
        return true;
      }
      return false;
    }
  };

  /// The map from value to index in the vector.
  MapT M;

  /// The vector of elements in insertion order.
  VectorT V;
};

/// A version of \c PriorityWorklist that selects small size optimized data
/// structures for the vector and map.
template <typename T, unsigned N>
class SmallPriorityWorklist
    : public PriorityWorklist<T, SmallVector<T, N>,
                              SmallDenseMap<T, ptrdiff_t>> {
public:
  SmallPriorityWorklist() = default;
};

} // end namespace llvm

#endif // LLVM_ADT_PRIORITYWORKLIST_H
PKiwFZ�Jc��ADT/AddressRanges.hnu�[���//===- AddressRanges.h ------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_ADDRESSRANGES_H
#define LLVM_ADT_ADDRESSRANGES_H

#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include <cassert>
#include <optional>
#include <stdint.h>

namespace llvm {

/// A class that represents an address range. The range is specified using
/// a start and an end address: [Start, End).
class AddressRange {
public:
  AddressRange() {}
  AddressRange(uint64_t S, uint64_t E) : Start(S), End(E) {
    assert(Start <= End);
  }
  uint64_t start() const { return Start; }
  uint64_t end() const { return End; }
  uint64_t size() const { return End - Start; }
  uint64_t empty() const { return size() == 0; }
  bool contains(uint64_t Addr) const { return Start <= Addr && Addr < End; }
  bool contains(const AddressRange &R) const {
    return Start <= R.Start && R.End <= End;
  }
  bool intersects(const AddressRange &R) const {
    return Start < R.End && R.Start < End;
  }
  bool operator==(const AddressRange &R) const {
    return Start == R.Start && End == R.End;
  }
  bool operator!=(const AddressRange &R) const { return !(*this == R); }
  bool operator<(const AddressRange &R) const {
    return std::make_pair(Start, End) < std::make_pair(R.Start, R.End);
  }

private:
  uint64_t Start = 0;
  uint64_t End = 0;
};

/// The AddressRangesBase class presents the base functionality for the
/// normalized address ranges collection. This class keeps a sorted vector
/// of AddressRange-like objects and can perform searches efficiently.
/// The address ranges are always sorted and never contain any invalid,
/// empty or intersected address ranges.

template <typename T> class AddressRangesBase {
protected:
  using Collection = SmallVector<T>;
  Collection Ranges;

public:
  void clear() { Ranges.clear(); }
  bool empty() const { return Ranges.empty(); }
  bool contains(uint64_t Addr) const {
    return find(Addr, Addr + 1) != Ranges.end();
  }
  bool contains(AddressRange Range) const {
    return find(Range.start(), Range.end()) != Ranges.end();
  }
  void reserve(size_t Capacity) { Ranges.reserve(Capacity); }
  size_t size() const { return Ranges.size(); }

  std::optional<T> getRangeThatContains(uint64_t Addr) const {
    typename Collection::const_iterator It = find(Addr, Addr + 1);
    if (It == Ranges.end())
      return std::nullopt;

    return *It;
  }

  typename Collection::const_iterator begin() const { return Ranges.begin(); }
  typename Collection::const_iterator end() const { return Ranges.end(); }

  const T &operator[](size_t i) const {
    assert(i < Ranges.size());
    return Ranges[i];
  }

  bool operator==(const AddressRangesBase<T> &RHS) const {
    return Ranges == RHS.Ranges;
  }

protected:
  typename Collection::const_iterator find(uint64_t Start, uint64_t End) const {
    if (Start >= End)
      return Ranges.end();

    auto It =
        std::partition_point(Ranges.begin(), Ranges.end(), [=](const T &R) {
          return AddressRange(R).start() <= Start;
        });

    if (It == Ranges.begin())
      return Ranges.end();

    --It;
    if (End > AddressRange(*It).end())
      return Ranges.end();

    return It;
  }
};

/// The AddressRanges class helps normalize address range collections.
/// This class keeps a sorted vector of AddressRange objects and can perform
/// insertions and searches efficiently. Intersecting([100,200), [150,300))
/// and adjacent([100,200), [200,300)) address ranges are combined during
/// insertion.
class AddressRanges : public AddressRangesBase<AddressRange> {
public:
  Collection::const_iterator insert(AddressRange Range) {
    if (Range.empty())
      return Ranges.end();

    auto It = llvm::upper_bound(Ranges, Range);
    auto It2 = It;
    while (It2 != Ranges.end() && It2->start() <= Range.end())
      ++It2;
    if (It != It2) {
      Range = {Range.start(), std::max(Range.end(), std::prev(It2)->end())};
      It = Ranges.erase(It, It2);
    }
    if (It != Ranges.begin() && Range.start() <= std::prev(It)->end()) {
      --It;
      *It = {It->start(), std::max(It->end(), Range.end())};
      return It;
    }

    return Ranges.insert(It, Range);
  }
};

class AddressRangeValuePair {
public:
  operator AddressRange() const { return Range; }

  AddressRange Range;
  int64_t Value = 0;
};

inline bool operator==(const AddressRangeValuePair &LHS,
                       const AddressRangeValuePair &RHS) {
  return LHS.Range == RHS.Range && LHS.Value == RHS.Value;
}

/// AddressRangesMap class maps values to the address ranges.
/// It keeps normalized address ranges and corresponding values.
/// This class keeps a sorted vector of AddressRangeValuePair objects
/// and can perform insertions and searches efficiently.
/// Intersecting([100,200), [150,300)) ranges splitted into non-conflicting
/// parts([100,200), [200,300)). Adjacent([100,200), [200,300)) address
/// ranges are not combined during insertion.
class AddressRangesMap : public AddressRangesBase<AddressRangeValuePair> {
public:
  void insert(AddressRange Range, int64_t Value) {
    if (Range.empty())
      return;

    // Search for range which is less than or equal incoming Range.
    auto It = std::partition_point(Ranges.begin(), Ranges.end(),
                                   [=](const AddressRangeValuePair &R) {
                                     return R.Range.start() <= Range.start();
                                   });

    if (It != Ranges.begin())
      It--;

    while (!Range.empty()) {
      // Inserted range does not overlap with any range.
      // Store it into the Ranges collection.
      if (It == Ranges.end() || Range.end() <= It->Range.start()) {
        Ranges.insert(It, {Range, Value});
        return;
      }

      // Inserted range partially overlaps with current range.
      // Store not overlapped part of inserted range.
      if (Range.start() < It->Range.start()) {
        It = Ranges.insert(It, {{Range.start(), It->Range.start()}, Value});
        It++;
        Range = {It->Range.start(), Range.end()};
        continue;
      }

      // Inserted range fully overlaps with current range.
      if (Range.end() <= It->Range.end())
        return;

      // Inserted range partially overlaps with current range.
      // Remove overlapped part from the inserted range.
      if (Range.start() < It->Range.end())
        Range = {It->Range.end(), Range.end()};

      It++;
    }
  }
};

} // namespace llvm

#endif // LLVM_ADT_ADDRESSRANGES_H
PKiwFZ��\�C'C'ADT/TinyPtrVector.hnu�[���//===- llvm/ADT/TinyPtrVector.h - 'Normally tiny' vectors -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_TINYPTRVECTOR_H
#define LLVM_ADT_TINYPTRVECTOR_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/SmallVector.h"
#include <cassert>
#include <cstddef>
#include <iterator>
#include <type_traits>

namespace llvm {

/// TinyPtrVector - This class is specialized for cases where there are
/// normally 0 or 1 element in a vector, but is general enough to go beyond that
/// when required.
///
/// NOTE: This container doesn't allow you to store a null pointer into it.
///
template <typename EltTy>
class TinyPtrVector {
public:
  using VecTy = SmallVector<EltTy, 4>;
  using value_type = typename VecTy::value_type;
  // EltTy must be the first pointer type so that is<EltTy> is true for the
  // default-constructed PtrUnion. This allows an empty TinyPtrVector to
  // naturally vend a begin/end iterator of type EltTy* without an additional
  // check for the empty state.
  using PtrUnion = PointerUnion<EltTy, VecTy *>;

private:
  PtrUnion Val;

public:
  TinyPtrVector() = default;

  ~TinyPtrVector() {
    if (VecTy *V = dyn_cast_if_present<VecTy *>(Val))
      delete V;
  }

  TinyPtrVector(const TinyPtrVector &RHS) : Val(RHS.Val) {
    if (VecTy *V = dyn_cast_if_present<VecTy *>(Val))
      Val = new VecTy(*V);
  }

  TinyPtrVector &operator=(const TinyPtrVector &RHS) {
    if (this == &RHS)
      return *this;
    if (RHS.empty()) {
      this->clear();
      return *this;
    }

    // Try to squeeze into the single slot. If it won't fit, allocate a copied
    // vector.
    if (isa<EltTy>(Val)) {
      if (RHS.size() == 1)
        Val = RHS.front();
      else
        Val = new VecTy(*cast<VecTy *>(RHS.Val));
      return *this;
    }

    // If we have a full vector allocated, try to re-use it.
    if (isa<EltTy>(RHS.Val)) {
      cast<VecTy *>(Val)->clear();
      cast<VecTy *>(Val)->push_back(RHS.front());
    } else {
      *cast<VecTy *>(Val) = *cast<VecTy *>(RHS.Val);
    }
    return *this;
  }

  TinyPtrVector(TinyPtrVector &&RHS) : Val(RHS.Val) {
    RHS.Val = (EltTy)nullptr;
  }

  TinyPtrVector &operator=(TinyPtrVector &&RHS) {
    if (this == &RHS)
      return *this;
    if (RHS.empty()) {
      this->clear();
      return *this;
    }

    // If this vector has been allocated on the heap, re-use it if cheap. If it
    // would require more copying, just delete it and we'll steal the other
    // side.
    if (VecTy *V = dyn_cast_if_present<VecTy *>(Val)) {
      if (isa<EltTy>(RHS.Val)) {
        V->clear();
        V->push_back(RHS.front());
        RHS.Val = EltTy();
        return *this;
      }
      delete V;
    }

    Val = RHS.Val;
    RHS.Val = EltTy();
    return *this;
  }

  TinyPtrVector(std::initializer_list<EltTy> IL)
      : Val(IL.size() == 0
                ? PtrUnion()
                : IL.size() == 1 ? PtrUnion(*IL.begin())
                                 : PtrUnion(new VecTy(IL.begin(), IL.end()))) {}

  /// Constructor from an ArrayRef.
  ///
  /// This also is a constructor for individual array elements due to the single
  /// element constructor for ArrayRef.
  explicit TinyPtrVector(ArrayRef<EltTy> Elts)
      : Val(Elts.empty()
                ? PtrUnion()
                : Elts.size() == 1
                      ? PtrUnion(Elts[0])
                      : PtrUnion(new VecTy(Elts.begin(), Elts.end()))) {}

  TinyPtrVector(size_t Count, EltTy Value)
      : Val(Count == 0 ? PtrUnion()
                       : Count == 1 ? PtrUnion(Value)
                                    : PtrUnion(new VecTy(Count, Value))) {}

  // implicit conversion operator to ArrayRef.
  operator ArrayRef<EltTy>() const {
    if (Val.isNull())
      return std::nullopt;
    if (isa<EltTy>(Val))
      return *Val.getAddrOfPtr1();
    return *cast<VecTy *>(Val);
  }

  // implicit conversion operator to MutableArrayRef.
  operator MutableArrayRef<EltTy>() {
    if (Val.isNull())
      return std::nullopt;
    if (isa<EltTy>(Val))
      return *Val.getAddrOfPtr1();
    return *cast<VecTy *>(Val);
  }

  // Implicit conversion to ArrayRef<U> if EltTy* implicitly converts to U*.
  template <
      typename U,
      std::enable_if_t<std::is_convertible<ArrayRef<EltTy>, ArrayRef<U>>::value,
                       bool> = false>
  operator ArrayRef<U>() const {
    return operator ArrayRef<EltTy>();
  }

  bool empty() const {
    // This vector can be empty if it contains no element, or if it
    // contains a pointer to an empty vector.
    if (Val.isNull()) return true;
    if (VecTy *Vec = dyn_cast_if_present<VecTy *>(Val))
      return Vec->empty();
    return false;
  }

  unsigned size() const {
    if (empty())
      return 0;
    if (isa<EltTy>(Val))
      return 1;
    return cast<VecTy *>(Val)->size();
  }

  using iterator = EltTy *;
  using const_iterator = const EltTy *;
  using reverse_iterator = std::reverse_iterator<iterator>;
  using const_reverse_iterator = std::reverse_iterator<const_iterator>;

  iterator begin() {
    if (isa<EltTy>(Val))
      return Val.getAddrOfPtr1();

    return cast<VecTy *>(Val)->begin();
  }

  iterator end() {
    if (isa<EltTy>(Val))
      return begin() + (Val.isNull() ? 0 : 1);

    return cast<VecTy *>(Val)->end();
  }

  const_iterator begin() const {
    return (const_iterator)const_cast<TinyPtrVector*>(this)->begin();
  }

  const_iterator end() const {
    return (const_iterator)const_cast<TinyPtrVector*>(this)->end();
  }

  reverse_iterator rbegin() { return reverse_iterator(end()); }
  reverse_iterator rend() { return reverse_iterator(begin()); }

  const_reverse_iterator rbegin() const {
    return const_reverse_iterator(end());
  }

  const_reverse_iterator rend() const {
    return const_reverse_iterator(begin());
  }

  EltTy operator[](unsigned i) const {
    assert(!Val.isNull() && "can't index into an empty vector");
    if (isa<EltTy>(Val)) {
      assert(i == 0 && "tinyvector index out of range");
      return cast<EltTy>(Val);
    }

    assert(i < cast<VecTy *>(Val)->size() && "tinyvector index out of range");
    return (*cast<VecTy *>(Val))[i];
  }

  EltTy front() const {
    assert(!empty() && "vector empty");
    if (isa<EltTy>(Val))
      return cast<EltTy>(Val);
    return cast<VecTy *>(Val)->front();
  }

  EltTy back() const {
    assert(!empty() && "vector empty");
    if (isa<EltTy>(Val))
      return cast<EltTy>(Val);
    return cast<VecTy *>(Val)->back();
  }

  void push_back(EltTy NewVal) {
    // If we have nothing, add something.
    if (Val.isNull()) {
      Val = NewVal;
      assert(!Val.isNull() && "Can't add a null value");
      return;
    }

    // If we have a single value, convert to a vector.
    if (isa<EltTy>(Val)) {
      EltTy V = cast<EltTy>(Val);
      Val = new VecTy();
      cast<VecTy *>(Val)->push_back(V);
    }

    // Add the new value, we know we have a vector.
    cast<VecTy *>(Val)->push_back(NewVal);
  }

  void pop_back() {
    // If we have a single value, convert to empty.
    if (isa<EltTy>(Val))
      Val = (EltTy)nullptr;
    else if (VecTy *Vec = cast<VecTy *>(Val))
      Vec->pop_back();
  }

  void clear() {
    // If we have a single value, convert to empty.
    if (isa<EltTy>(Val)) {
      Val = EltTy();
    } else if (VecTy *Vec = dyn_cast_if_present<VecTy *>(Val)) {
      // If we have a vector form, just clear it.
      Vec->clear();
    }
    // Otherwise, we're already empty.
  }

  iterator erase(iterator I) {
    assert(I >= begin() && "Iterator to erase is out of bounds.");
    assert(I < end() && "Erasing at past-the-end iterator.");

    // If we have a single value, convert to empty.
    if (isa<EltTy>(Val)) {
      if (I == begin())
        Val = EltTy();
    } else if (VecTy *Vec = dyn_cast_if_present<VecTy *>(Val)) {
      // multiple items in a vector; just do the erase, there is no
      // benefit to collapsing back to a pointer
      return Vec->erase(I);
    }
    return end();
  }

  iterator erase(iterator S, iterator E) {
    assert(S >= begin() && "Range to erase is out of bounds.");
    assert(S <= E && "Trying to erase invalid range.");
    assert(E <= end() && "Trying to erase past the end.");

    if (isa<EltTy>(Val)) {
      if (S == begin() && S != E)
        Val = EltTy();
    } else if (VecTy *Vec = dyn_cast_if_present<VecTy *>(Val)) {
      return Vec->erase(S, E);
    }
    return end();
  }

  iterator insert(iterator I, const EltTy &Elt) {
    assert(I >= this->begin() && "Insertion iterator is out of bounds.");
    assert(I <= this->end() && "Inserting past the end of the vector.");
    if (I == end()) {
      push_back(Elt);
      return std::prev(end());
    }
    assert(!Val.isNull() && "Null value with non-end insert iterator.");
    if (isa<EltTy>(Val)) {
      EltTy V = cast<EltTy>(Val);
      assert(I == begin());
      Val = Elt;
      push_back(V);
      return begin();
    }

    return cast<VecTy *>(Val)->insert(I, Elt);
  }

  template<typename ItTy>
  iterator insert(iterator I, ItTy From, ItTy To) {
    assert(I >= this->begin() && "Insertion iterator is out of bounds.");
    assert(I <= this->end() && "Inserting past the end of the vector.");
    if (From == To)
      return I;

    // If we have a single value, convert to a vector.
    ptrdiff_t Offset = I - begin();
    if (Val.isNull()) {
      if (std::next(From) == To) {
        Val = *From;
        return begin();
      }

      Val = new VecTy();
    } else if (isa<EltTy>(Val)) {
      EltTy V = cast<EltTy>(Val);
      Val = new VecTy();
      cast<VecTy *>(Val)->push_back(V);
    }
    return cast<VecTy *>(Val)->insert(begin() + Offset, From, To);
  }
};

} // end namespace llvm

#endif // LLVM_ADT_TINYPTRVECTOR_H
PKiwFZ~_(��ADT/GraphTraits.hnu�[���//===- llvm/ADT/GraphTraits.h - Graph traits template -----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file defines the little GraphTraits<X> template class that should be
/// specialized by classes that want to be iteratable by generic graph
/// iterators.
///
/// This file also defines the marker class Inverse that is used to iterate over
/// graphs in a graph defined, inverse ordering...
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_GRAPHTRAITS_H
#define LLVM_ADT_GRAPHTRAITS_H

#include "llvm/ADT/iterator_range.h"

namespace llvm {

// GraphTraits - This class should be specialized by different graph types...
// which is why the default version is empty.
//
// This template evolved from supporting `BasicBlock` to also later supporting
// more complex types (e.g. CFG and DomTree).
//
// GraphTraits can be used to create a view over a graph interpreting it
// differently without requiring a copy of the original graph. This could
// be achieved by carrying more data in NodeRef. See LoopBodyTraits for one
// example.
template<class GraphType>
struct GraphTraits {
  // Elements to provide:

  // typedef NodeRef           - Type of Node token in the graph, which should
  //                             be cheap to copy.
  // typedef ChildIteratorType - Type used to iterate over children in graph,
  //                             dereference to a NodeRef.

  // static NodeRef getEntryNode(const GraphType &)
  //    Return the entry node of the graph

  // static ChildIteratorType child_begin(NodeRef)
  // static ChildIteratorType child_end  (NodeRef)
  //    Return iterators that point to the beginning and ending of the child
  //    node list for the specified node.

  // typedef  ...iterator nodes_iterator; - dereference to a NodeRef
  // static nodes_iterator nodes_begin(GraphType *G)
  // static nodes_iterator nodes_end  (GraphType *G)
  //    nodes_iterator/begin/end - Allow iteration over all nodes in the graph

  // typedef EdgeRef           - Type of Edge token in the graph, which should
  //                             be cheap to copy.
  // typedef ChildEdgeIteratorType - Type used to iterate over children edges in
  //                             graph, dereference to a EdgeRef.

  // static ChildEdgeIteratorType child_edge_begin(NodeRef)
  // static ChildEdgeIteratorType child_edge_end(NodeRef)
  //     Return iterators that point to the beginning and ending of the
  //     edge list for the given callgraph node.
  //
  // static NodeRef edge_dest(EdgeRef)
  //     Return the destination node of an edge.

  // static unsigned       size       (GraphType *G)
  //    Return total number of nodes in the graph

  // If anyone tries to use this class without having an appropriate
  // specialization, make an error.  If you get this error, it's because you
  // need to include the appropriate specialization of GraphTraits<> for your
  // graph, or you need to define it for a new graph type. Either that or
  // your argument to XXX_begin(...) is unknown or needs to have the proper .h
  // file #include'd.
  using NodeRef = typename GraphType::UnknownGraphTypeError;
};

// Inverse - This class is used as a little marker class to tell the graph
// iterator to iterate over the graph in a graph defined "Inverse" ordering.
// Not all graphs define an inverse ordering, and if they do, it depends on
// the graph exactly what that is.  Here's an example of usage with the
// df_iterator:
//
// idf_iterator<Method*> I = idf_begin(M), E = idf_end(M);
// for (; I != E; ++I) { ... }
//
// Which is equivalent to:
// df_iterator<Inverse<Method*>> I = idf_begin(M), E = idf_end(M);
// for (; I != E; ++I) { ... }
//
template <class GraphType>
struct Inverse {
  const GraphType &Graph;

  inline Inverse(const GraphType &G) : Graph(G) {}
};

// Provide a partial specialization of GraphTraits so that the inverse of an
// inverse falls back to the original graph.
template <class T> struct GraphTraits<Inverse<Inverse<T>>> : GraphTraits<T> {};

// Provide iterator ranges for the graph traits nodes and children
template <class GraphType>
iterator_range<typename GraphTraits<GraphType>::nodes_iterator>
nodes(const GraphType &G) {
  return make_range(GraphTraits<GraphType>::nodes_begin(G),
                    GraphTraits<GraphType>::nodes_end(G));
}
template <class GraphType>
iterator_range<typename GraphTraits<Inverse<GraphType>>::nodes_iterator>
inverse_nodes(const GraphType &G) {
  return make_range(GraphTraits<Inverse<GraphType>>::nodes_begin(G),
                    GraphTraits<Inverse<GraphType>>::nodes_end(G));
}

template <class GraphType>
iterator_range<typename GraphTraits<GraphType>::ChildIteratorType>
children(const typename GraphTraits<GraphType>::NodeRef &G) {
  return make_range(GraphTraits<GraphType>::child_begin(G),
                    GraphTraits<GraphType>::child_end(G));
}

template <class GraphType>
iterator_range<typename GraphTraits<Inverse<GraphType>>::ChildIteratorType>
inverse_children(const typename GraphTraits<GraphType>::NodeRef &G) {
  return make_range(GraphTraits<Inverse<GraphType>>::child_begin(G),
                    GraphTraits<Inverse<GraphType>>::child_end(G));
}

template <class GraphType>
iterator_range<typename GraphTraits<GraphType>::ChildEdgeIteratorType>
children_edges(const typename GraphTraits<GraphType>::NodeRef &G) {
  return make_range(GraphTraits<GraphType>::child_edge_begin(G),
                    GraphTraits<GraphType>::child_edge_end(G));
}

} // end namespace llvm

#endif // LLVM_ADT_GRAPHTRAITS_H
PKiwFZ��}2@@ADT/FunctionExtras.hnu�[���//===- FunctionExtras.h - Function type erasure utilities -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// This file provides a collection of function (or more generally, callable)
/// type erasure utilities supplementing those provided by the standard library
/// in `<function>`.
///
/// It provides `unique_function`, which works like `std::function` but supports
/// move-only callable objects and const-qualification.
///
/// Future plans:
/// - Add a `function` that provides ref-qualified support, which doesn't work
///   with `std::function`.
/// - Provide support for specifying multiple signatures to type erase callable
///   objects with an overload set, such as those produced by generic lambdas.
/// - Expand to include a copyable utility that directly replaces std::function
///   but brings the above improvements.
///
/// Note that LLVM's utilities are greatly simplified by not supporting
/// allocators.
///
/// If the standard library ever begins to provide comparable facilities we can
/// consider switching to those.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_FUNCTIONEXTRAS_H
#define LLVM_ADT_FUNCTIONEXTRAS_H

#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/STLForwardCompat.h"
#include "llvm/Support/MemAlloc.h"
#include "llvm/Support/type_traits.h"
#include <cstring>
#include <memory>
#include <type_traits>

namespace llvm {

/// unique_function is a type-erasing functor similar to std::function.
///
/// It can hold move-only function objects, like lambdas capturing unique_ptrs.
/// Accordingly, it is movable but not copyable.
///
/// It supports const-qualification:
/// - unique_function<int() const> has a const operator().
///   It can only hold functions which themselves have a const operator().
/// - unique_function<int()> has a non-const operator().
///   It can hold functions with a non-const operator(), like mutable lambdas.
template <typename FunctionT> class unique_function;

namespace detail {

template <typename T>
using EnableIfTrivial =
    std::enable_if_t<std::is_trivially_move_constructible<T>::value &&
                     std::is_trivially_destructible<T>::value>;
template <typename CallableT, typename ThisT>
using EnableUnlessSameType =
    std::enable_if_t<!std::is_same<remove_cvref_t<CallableT>, ThisT>::value>;
template <typename CallableT, typename Ret, typename... Params>
using EnableIfCallable = std::enable_if_t<std::disjunction<
    std::is_void<Ret>,
    std::is_same<decltype(std::declval<CallableT>()(std::declval<Params>()...)),
                 Ret>,
    std::is_same<const decltype(std::declval<CallableT>()(
                     std::declval<Params>()...)),
                 Ret>,
    std::is_convertible<decltype(std::declval<CallableT>()(
                            std::declval<Params>()...)),
                        Ret>>::value>;

template <typename ReturnT, typename... ParamTs> class UniqueFunctionBase {
protected:
  static constexpr size_t InlineStorageSize = sizeof(void *) * 3;

  template <typename T, class = void>
  struct IsSizeLessThanThresholdT : std::false_type {};

  template <typename T>
  struct IsSizeLessThanThresholdT<
      T, std::enable_if_t<sizeof(T) <= 2 * sizeof(void *)>> : std::true_type {};

  // Provide a type function to map parameters that won't observe extra copies
  // or moves and which are small enough to likely pass in register to values
  // and all other types to l-value reference types. We use this to compute the
  // types used in our erased call utility to minimize copies and moves unless
  // doing so would force things unnecessarily into memory.
  //
  // The heuristic used is related to common ABI register passing conventions.
  // It doesn't have to be exact though, and in one way it is more strict
  // because we want to still be able to observe either moves *or* copies.
  template <typename T> struct AdjustedParamTBase {
    static_assert(!std::is_reference<T>::value,
                  "references should be handled by template specialization");
    using type =
        std::conditional_t<std::is_trivially_copy_constructible<T>::value &&
                               std::is_trivially_move_constructible<T>::value &&
                               IsSizeLessThanThresholdT<T>::value,
                           T, T &>;
  };

  // This specialization ensures that 'AdjustedParam<V<T>&>' or
  // 'AdjustedParam<V<T>&&>' does not trigger a compile-time error when 'T' is
  // an incomplete type and V a templated type.
  template <typename T> struct AdjustedParamTBase<T &> { using type = T &; };
  template <typename T> struct AdjustedParamTBase<T &&> { using type = T &; };

  template <typename T>
  using AdjustedParamT = typename AdjustedParamTBase<T>::type;

  // The type of the erased function pointer we use as a callback to dispatch to
  // the stored callable when it is trivial to move and destroy.
  using CallPtrT = ReturnT (*)(void *CallableAddr,
                               AdjustedParamT<ParamTs>... Params);
  using MovePtrT = void (*)(void *LHSCallableAddr, void *RHSCallableAddr);
  using DestroyPtrT = void (*)(void *CallableAddr);

  /// A struct to hold a single trivial callback with sufficient alignment for
  /// our bitpacking.
  struct alignas(8) TrivialCallback {
    CallPtrT CallPtr;
  };

  /// A struct we use to aggregate three callbacks when we need full set of
  /// operations.
  struct alignas(8) NonTrivialCallbacks {
    CallPtrT CallPtr;
    MovePtrT MovePtr;
    DestroyPtrT DestroyPtr;
  };

  // Create a pointer union between either a pointer to a static trivial call
  // pointer in a struct or a pointer to a static struct of the call, move, and
  // destroy pointers.
  using CallbackPointerUnionT =
      PointerUnion<TrivialCallback *, NonTrivialCallbacks *>;

  // The main storage buffer. This will either have a pointer to out-of-line
  // storage or an inline buffer storing the callable.
  union StorageUnionT {
    // For out-of-line storage we keep a pointer to the underlying storage and
    // the size. This is enough to deallocate the memory.
    struct OutOfLineStorageT {
      void *StoragePtr;
      size_t Size;
      size_t Alignment;
    } OutOfLineStorage;
    static_assert(
        sizeof(OutOfLineStorageT) <= InlineStorageSize,
        "Should always use all of the out-of-line storage for inline storage!");

    // For in-line storage, we just provide an aligned character buffer. We
    // provide three pointers worth of storage here.
    // This is mutable as an inlined `const unique_function<void() const>` may
    // still modify its own mutable members.
    mutable std::aligned_storage_t<InlineStorageSize, alignof(void *)>
        InlineStorage;
  } StorageUnion;

  // A compressed pointer to either our dispatching callback or our table of
  // dispatching callbacks and the flag for whether the callable itself is
  // stored inline or not.
  PointerIntPair<CallbackPointerUnionT, 1, bool> CallbackAndInlineFlag;

  bool isInlineStorage() const { return CallbackAndInlineFlag.getInt(); }

  bool isTrivialCallback() const {
    return isa<TrivialCallback *>(CallbackAndInlineFlag.getPointer());
  }

  CallPtrT getTrivialCallback() const {
    return cast<TrivialCallback *>(CallbackAndInlineFlag.getPointer())->CallPtr;
  }

  NonTrivialCallbacks *getNonTrivialCallbacks() const {
    return cast<NonTrivialCallbacks *>(CallbackAndInlineFlag.getPointer());
  }

  CallPtrT getCallPtr() const {
    return isTrivialCallback() ? getTrivialCallback()
                               : getNonTrivialCallbacks()->CallPtr;
  }

  // These three functions are only const in the narrow sense. They return
  // mutable pointers to function state.
  // This allows unique_function<T const>::operator() to be const, even if the
  // underlying functor may be internally mutable.
  //
  // const callers must ensure they're only used in const-correct ways.
  void *getCalleePtr() const {
    return isInlineStorage() ? getInlineStorage() : getOutOfLineStorage();
  }
  void *getInlineStorage() const { return &StorageUnion.InlineStorage; }
  void *getOutOfLineStorage() const {
    return StorageUnion.OutOfLineStorage.StoragePtr;
  }

  size_t getOutOfLineStorageSize() const {
    return StorageUnion.OutOfLineStorage.Size;
  }
  size_t getOutOfLineStorageAlignment() const {
    return StorageUnion.OutOfLineStorage.Alignment;
  }

  void setOutOfLineStorage(void *Ptr, size_t Size, size_t Alignment) {
    StorageUnion.OutOfLineStorage = {Ptr, Size, Alignment};
  }

  template <typename CalledAsT>
  static ReturnT CallImpl(void *CallableAddr,
                          AdjustedParamT<ParamTs>... Params) {
    auto &Func = *reinterpret_cast<CalledAsT *>(CallableAddr);
    return Func(std::forward<ParamTs>(Params)...);
  }

  template <typename CallableT>
  static void MoveImpl(void *LHSCallableAddr, void *RHSCallableAddr) noexcept {
    new (LHSCallableAddr)
        CallableT(std::move(*reinterpret_cast<CallableT *>(RHSCallableAddr)));
  }

  template <typename CallableT>
  static void DestroyImpl(void *CallableAddr) noexcept {
    reinterpret_cast<CallableT *>(CallableAddr)->~CallableT();
  }

  // The pointers to call/move/destroy functions are determined for each
  // callable type (and called-as type, which determines the overload chosen).
  // (definitions are out-of-line).

  // By default, we need an object that contains all the different
  // type erased behaviors needed. Create a static instance of the struct type
  // here and each instance will contain a pointer to it.
  // Wrap in a struct to avoid https://gcc.gnu.org/PR71954
  template <typename CallableT, typename CalledAs, typename Enable = void>
  struct CallbacksHolder {
    static NonTrivialCallbacks Callbacks;
  };
  // See if we can create a trivial callback. We need the callable to be
  // trivially moved and trivially destroyed so that we don't have to store
  // type erased callbacks for those operations.
  template <typename CallableT, typename CalledAs>
  struct CallbacksHolder<CallableT, CalledAs, EnableIfTrivial<CallableT>> {
    static TrivialCallback Callbacks;
  };

  // A simple tag type so the call-as type to be passed to the constructor.
  template <typename T> struct CalledAs {};

  // Essentially the "main" unique_function constructor, but subclasses
  // provide the qualified type to be used for the call.
  // (We always store a T, even if the call will use a pointer to const T).
  template <typename CallableT, typename CalledAsT>
  UniqueFunctionBase(CallableT Callable, CalledAs<CalledAsT>) {
    bool IsInlineStorage = true;
    void *CallableAddr = getInlineStorage();
    if (sizeof(CallableT) > InlineStorageSize ||
        alignof(CallableT) > alignof(decltype(StorageUnion.InlineStorage))) {
      IsInlineStorage = false;
      // Allocate out-of-line storage. FIXME: Use an explicit alignment
      // parameter in C++17 mode.
      auto Size = sizeof(CallableT);
      auto Alignment = alignof(CallableT);
      CallableAddr = allocate_buffer(Size, Alignment);
      setOutOfLineStorage(CallableAddr, Size, Alignment);
    }

    // Now move into the storage.
    new (CallableAddr) CallableT(std::move(Callable));
    CallbackAndInlineFlag.setPointerAndInt(
        &CallbacksHolder<CallableT, CalledAsT>::Callbacks, IsInlineStorage);
  }

  ~UniqueFunctionBase() {
    if (!CallbackAndInlineFlag.getPointer())
      return;

    // Cache this value so we don't re-check it after type-erased operations.
    bool IsInlineStorage = isInlineStorage();

    if (!isTrivialCallback())
      getNonTrivialCallbacks()->DestroyPtr(
          IsInlineStorage ? getInlineStorage() : getOutOfLineStorage());

    if (!IsInlineStorage)
      deallocate_buffer(getOutOfLineStorage(), getOutOfLineStorageSize(),
                        getOutOfLineStorageAlignment());
  }

  UniqueFunctionBase(UniqueFunctionBase &&RHS) noexcept {
    // Copy the callback and inline flag.
    CallbackAndInlineFlag = RHS.CallbackAndInlineFlag;

    // If the RHS is empty, just copying the above is sufficient.
    if (!RHS)
      return;

    if (!isInlineStorage()) {
      // The out-of-line case is easiest to move.
      StorageUnion.OutOfLineStorage = RHS.StorageUnion.OutOfLineStorage;
    } else if (isTrivialCallback()) {
      // Move is trivial, just memcpy the bytes across.
      memcpy(getInlineStorage(), RHS.getInlineStorage(), InlineStorageSize);
    } else {
      // Non-trivial move, so dispatch to a type-erased implementation.
      getNonTrivialCallbacks()->MovePtr(getInlineStorage(),
                                        RHS.getInlineStorage());
    }

    // Clear the old callback and inline flag to get back to as-if-null.
    RHS.CallbackAndInlineFlag = {};

#ifndef NDEBUG
    // In debug builds, we also scribble across the rest of the storage.
    memset(RHS.getInlineStorage(), 0xAD, InlineStorageSize);
#endif
  }

  UniqueFunctionBase &operator=(UniqueFunctionBase &&RHS) noexcept {
    if (this == &RHS)
      return *this;

    // Because we don't try to provide any exception safety guarantees we can
    // implement move assignment very simply by first destroying the current
    // object and then move-constructing over top of it.
    this->~UniqueFunctionBase();
    new (this) UniqueFunctionBase(std::move(RHS));
    return *this;
  }

  UniqueFunctionBase() = default;

public:
  explicit operator bool() const {
    return (bool)CallbackAndInlineFlag.getPointer();
  }
};

template <typename R, typename... P>
template <typename CallableT, typename CalledAsT, typename Enable>
typename UniqueFunctionBase<R, P...>::NonTrivialCallbacks UniqueFunctionBase<
    R, P...>::CallbacksHolder<CallableT, CalledAsT, Enable>::Callbacks = {
    &CallImpl<CalledAsT>, &MoveImpl<CallableT>, &DestroyImpl<CallableT>};

template <typename R, typename... P>
template <typename CallableT, typename CalledAsT>
typename UniqueFunctionBase<R, P...>::TrivialCallback
    UniqueFunctionBase<R, P...>::CallbacksHolder<
        CallableT, CalledAsT, EnableIfTrivial<CallableT>>::Callbacks{
        &CallImpl<CalledAsT>};

} // namespace detail

template <typename R, typename... P>
class unique_function<R(P...)> : public detail::UniqueFunctionBase<R, P...> {
  using Base = detail::UniqueFunctionBase<R, P...>;

public:
  unique_function() = default;
  unique_function(std::nullptr_t) {}
  unique_function(unique_function &&) = default;
  unique_function(const unique_function &) = delete;
  unique_function &operator=(unique_function &&) = default;
  unique_function &operator=(const unique_function &) = delete;

  template <typename CallableT>
  unique_function(
      CallableT Callable,
      detail::EnableUnlessSameType<CallableT, unique_function> * = nullptr,
      detail::EnableIfCallable<CallableT, R, P...> * = nullptr)
      : Base(std::forward<CallableT>(Callable),
             typename Base::template CalledAs<CallableT>{}) {}

  R operator()(P... Params) {
    return this->getCallPtr()(this->getCalleePtr(), Params...);
  }
};

template <typename R, typename... P>
class unique_function<R(P...) const>
    : public detail::UniqueFunctionBase<R, P...> {
  using Base = detail::UniqueFunctionBase<R, P...>;

public:
  unique_function() = default;
  unique_function(std::nullptr_t) {}
  unique_function(unique_function &&) = default;
  unique_function(const unique_function &) = delete;
  unique_function &operator=(unique_function &&) = default;
  unique_function &operator=(const unique_function &) = delete;

  template <typename CallableT>
  unique_function(
      CallableT Callable,
      detail::EnableUnlessSameType<CallableT, unique_function> * = nullptr,
      detail::EnableIfCallable<const CallableT, R, P...> * = nullptr)
      : Base(std::forward<CallableT>(Callable),
             typename Base::template CalledAs<const CallableT>{}) {}

  R operator()(P... Params) const {
    return this->getCallPtr()(this->getCalleePtr(), Params...);
  }
};

} // end namespace llvm

#endif // LLVM_ADT_FUNCTIONEXTRAS_H
PKiwFZGb���ADT/ImmutableSet.hnu�[���//===--- ImmutableSet.h - Immutable (functional) set interface --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file defines the ImutAVLTree and ImmutableSet classes.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_IMMUTABLESET_H
#define LLVM_ADT_IMMUTABLESET_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/IntrusiveRefCntPtr.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/iterator.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/ErrorHandling.h"
#include <cassert>
#include <cstdint>
#include <functional>
#include <iterator>
#include <new>
#include <vector>

namespace llvm {

//===----------------------------------------------------------------------===//
// Immutable AVL-Tree Definition.
//===----------------------------------------------------------------------===//

template <typename ImutInfo> class ImutAVLFactory;
template <typename ImutInfo> class ImutIntervalAVLFactory;
template <typename ImutInfo> class ImutAVLTreeInOrderIterator;
template <typename ImutInfo> class ImutAVLTreeGenericIterator;

template <typename ImutInfo >
class ImutAVLTree {
public:
  using key_type_ref = typename ImutInfo::key_type_ref;
  using value_type = typename ImutInfo::value_type;
  using value_type_ref = typename ImutInfo::value_type_ref;
  using Factory = ImutAVLFactory<ImutInfo>;
  using iterator = ImutAVLTreeInOrderIterator<ImutInfo>;

  friend class ImutAVLFactory<ImutInfo>;
  friend class ImutIntervalAVLFactory<ImutInfo>;
  friend class ImutAVLTreeGenericIterator<ImutInfo>;

  //===----------------------------------------------------===//
  // Public Interface.
  //===----------------------------------------------------===//

  /// Return a pointer to the left subtree.  This value
  ///  is NULL if there is no left subtree.
  ImutAVLTree *getLeft() const { return left; }

  /// Return a pointer to the right subtree.  This value is
  ///  NULL if there is no right subtree.
  ImutAVLTree *getRight() const { return right; }

  /// getHeight - Returns the height of the tree.  A tree with no subtrees
  ///  has a height of 1.
  unsigned getHeight() const { return height; }

  /// getValue - Returns the data value associated with the tree node.
  const value_type& getValue() const { return value; }

  /// find - Finds the subtree associated with the specified key value.
  ///  This method returns NULL if no matching subtree is found.
  ImutAVLTree* find(key_type_ref K) {
    ImutAVLTree *T = this;
    while (T) {
      key_type_ref CurrentKey = ImutInfo::KeyOfValue(T->getValue());
      if (ImutInfo::isEqual(K,CurrentKey))
        return T;
      else if (ImutInfo::isLess(K,CurrentKey))
        T = T->getLeft();
      else
        T = T->getRight();
    }
    return nullptr;
  }

  /// getMaxElement - Find the subtree associated with the highest ranged
  ///  key value.
  ImutAVLTree* getMaxElement() {
    ImutAVLTree *T = this;
    ImutAVLTree *Right = T->getRight();
    while (Right) { T = Right; Right = T->getRight(); }
    return T;
  }

  /// size - Returns the number of nodes in the tree, which includes
  ///  both leaves and non-leaf nodes.
  unsigned size() const {
    unsigned n = 1;
    if (const ImutAVLTree* L = getLeft())
      n += L->size();
    if (const ImutAVLTree* R = getRight())
      n += R->size();
    return n;
  }

  /// begin - Returns an iterator that iterates over the nodes of the tree
  ///  in an inorder traversal.  The returned iterator thus refers to the
  ///  the tree node with the minimum data element.
  iterator begin() const { return iterator(this); }

  /// end - Returns an iterator for the tree that denotes the end of an
  ///  inorder traversal.
  iterator end() const { return iterator(); }

  bool isElementEqual(value_type_ref V) const {
    // Compare the keys.
    if (!ImutInfo::isEqual(ImutInfo::KeyOfValue(getValue()),
                           ImutInfo::KeyOfValue(V)))
      return false;

    // Also compare the data values.
    if (!ImutInfo::isDataEqual(ImutInfo::DataOfValue(getValue()),
                               ImutInfo::DataOfValue(V)))
      return false;

    return true;
  }

  bool isElementEqual(const ImutAVLTree* RHS) const {
    return isElementEqual(RHS->getValue());
  }

  /// isEqual - Compares two trees for structural equality and returns true
  ///   if they are equal.  This worst case performance of this operation is
  //    linear in the sizes of the trees.
  bool isEqual(const ImutAVLTree& RHS) const {
    if (&RHS == this)
      return true;

    iterator LItr = begin(), LEnd = end();
    iterator RItr = RHS.begin(), REnd = RHS.end();

    while (LItr != LEnd && RItr != REnd) {
      if (&*LItr == &*RItr) {
        LItr.skipSubTree();
        RItr.skipSubTree();
        continue;
      }

      if (!LItr->isElementEqual(&*RItr))
        return false;

      ++LItr;
      ++RItr;
    }

    return LItr == LEnd && RItr == REnd;
  }

  /// isNotEqual - Compares two trees for structural inequality.  Performance
  ///  is the same is isEqual.
  bool isNotEqual(const ImutAVLTree& RHS) const { return !isEqual(RHS); }

  /// contains - Returns true if this tree contains a subtree (node) that
  ///  has an data element that matches the specified key.  Complexity
  ///  is logarithmic in the size of the tree.
  bool contains(key_type_ref K) { return (bool) find(K); }

  /// validateTree - A utility method that checks that the balancing and
  ///  ordering invariants of the tree are satisfied.  It is a recursive
  ///  method that returns the height of the tree, which is then consumed
  ///  by the enclosing validateTree call.  External callers should ignore the
  ///  return value.  An invalid tree will cause an assertion to fire in
  ///  a debug build.
  unsigned validateTree() const {
    unsigned HL = getLeft() ? getLeft()->validateTree() : 0;
    unsigned HR = getRight() ? getRight()->validateTree() : 0;
    (void) HL;
    (void) HR;

    assert(getHeight() == ( HL > HR ? HL : HR ) + 1
            && "Height calculation wrong");

    assert((HL > HR ? HL-HR : HR-HL) <= 2
           && "Balancing invariant violated");

    assert((!getLeft() ||
            ImutInfo::isLess(ImutInfo::KeyOfValue(getLeft()->getValue()),
                             ImutInfo::KeyOfValue(getValue()))) &&
           "Value in left child is not less that current value");

    assert((!getRight() ||
             ImutInfo::isLess(ImutInfo::KeyOfValue(getValue()),
                              ImutInfo::KeyOfValue(getRight()->getValue()))) &&
           "Current value is not less that value of right child");

    return getHeight();
  }

  //===----------------------------------------------------===//
  // Internal values.
  //===----------------------------------------------------===//

private:
  Factory *factory;
  ImutAVLTree *left;
  ImutAVLTree *right;
  ImutAVLTree *prev = nullptr;
  ImutAVLTree *next = nullptr;

  unsigned height : 28;
  bool IsMutable : 1;
  bool IsDigestCached : 1;
  bool IsCanonicalized : 1;

  value_type value;
  uint32_t digest = 0;
  uint32_t refCount = 0;

  //===----------------------------------------------------===//
  // Internal methods (node manipulation; used by Factory).
  //===----------------------------------------------------===//

private:
  /// ImutAVLTree - Internal constructor that is only called by
  ///   ImutAVLFactory.
  ImutAVLTree(Factory *f, ImutAVLTree* l, ImutAVLTree* r, value_type_ref v,
              unsigned height)
    : factory(f), left(l), right(r), height(height), IsMutable(true),
      IsDigestCached(false), IsCanonicalized(false), value(v)
  {
    if (left) left->retain();
    if (right) right->retain();
  }

  /// isMutable - Returns true if the left and right subtree references
  ///  (as well as height) can be changed.  If this method returns false,
  ///  the tree is truly immutable.  Trees returned from an ImutAVLFactory
  ///  object should always have this method return true.  Further, if this
  ///  method returns false for an instance of ImutAVLTree, all subtrees
  ///  will also have this method return false.  The converse is not true.
  bool isMutable() const { return IsMutable; }

  /// hasCachedDigest - Returns true if the digest for this tree is cached.
  ///  This can only be true if the tree is immutable.
  bool hasCachedDigest() const { return IsDigestCached; }

  //===----------------------------------------------------===//
  // Mutating operations.  A tree root can be manipulated as
  // long as its reference has not "escaped" from internal
  // methods of a factory object (see below).  When a tree
  // pointer is externally viewable by client code, the
  // internal "mutable bit" is cleared to mark the tree
  // immutable.  Note that a tree that still has its mutable
  // bit set may have children (subtrees) that are themselves
  // immutable.
  //===----------------------------------------------------===//

  /// markImmutable - Clears the mutable flag for a tree.  After this happens,
  ///   it is an error to call setLeft(), setRight(), and setHeight().
  void markImmutable() {
    assert(isMutable() && "Mutable flag already removed.");
    IsMutable = false;
  }

  /// markedCachedDigest - Clears the NoCachedDigest flag for a tree.
  void markedCachedDigest() {
    assert(!hasCachedDigest() && "NoCachedDigest flag already removed.");
    IsDigestCached = true;
  }

  /// setHeight - Changes the height of the tree.  Used internally by
  ///  ImutAVLFactory.
  void setHeight(unsigned h) {
    assert(isMutable() && "Only a mutable tree can have its height changed.");
    height = h;
  }

  static uint32_t computeDigest(ImutAVLTree *L, ImutAVLTree *R,
                                value_type_ref V) {
    uint32_t digest = 0;

    if (L)
      digest += L->computeDigest();

    // Compute digest of stored data.
    FoldingSetNodeID ID;
    ImutInfo::Profile(ID,V);
    digest += ID.ComputeHash();

    if (R)
      digest += R->computeDigest();

    return digest;
  }

  uint32_t computeDigest() {
    // Check the lowest bit to determine if digest has actually been
    // pre-computed.
    if (hasCachedDigest())
      return digest;

    uint32_t X = computeDigest(getLeft(), getRight(), getValue());
    digest = X;
    markedCachedDigest();
    return X;
  }

  //===----------------------------------------------------===//
  // Reference count operations.
  //===----------------------------------------------------===//

public:
  void retain() { ++refCount; }

  void release() {
    assert(refCount > 0);
    if (--refCount == 0)
      destroy();
  }

  void destroy() {
    if (left)
      left->release();
    if (right)
      right->release();
    if (IsCanonicalized) {
      if (next)
        next->prev = prev;

      if (prev)
        prev->next = next;
      else
        factory->Cache[factory->maskCacheIndex(computeDigest())] = next;
    }

    // We need to clear the mutability bit in case we are
    // destroying the node as part of a sweep in ImutAVLFactory::recoverNodes().
    IsMutable = false;
    factory->freeNodes.push_back(this);
  }
};

template <typename ImutInfo>
struct IntrusiveRefCntPtrInfo<ImutAVLTree<ImutInfo>> {
  static void retain(ImutAVLTree<ImutInfo> *Tree) { Tree->retain(); }
  static void release(ImutAVLTree<ImutInfo> *Tree) { Tree->release(); }
};

//===----------------------------------------------------------------------===//
// Immutable AVL-Tree Factory class.
//===----------------------------------------------------------------------===//

template <typename ImutInfo >
class ImutAVLFactory {
  friend class ImutAVLTree<ImutInfo>;

  using TreeTy = ImutAVLTree<ImutInfo>;
  using value_type_ref = typename TreeTy::value_type_ref;
  using key_type_ref = typename TreeTy::key_type_ref;
  using CacheTy = DenseMap<unsigned, TreeTy*>;

  CacheTy Cache;
  uintptr_t Allocator;
  std::vector<TreeTy*> createdNodes;
  std::vector<TreeTy*> freeNodes;

  bool ownsAllocator() const {
    return (Allocator & 0x1) == 0;
  }

  BumpPtrAllocator& getAllocator() const {
    return *reinterpret_cast<BumpPtrAllocator*>(Allocator & ~0x1);
  }

  //===--------------------------------------------------===//
  // Public interface.
  //===--------------------------------------------------===//

public:
  ImutAVLFactory()
    : Allocator(reinterpret_cast<uintptr_t>(new BumpPtrAllocator())) {}

  ImutAVLFactory(BumpPtrAllocator& Alloc)
    : Allocator(reinterpret_cast<uintptr_t>(&Alloc) | 0x1) {}

  ~ImutAVLFactory() {
    if (ownsAllocator()) delete &getAllocator();
  }

  TreeTy* add(TreeTy* T, value_type_ref V) {
    T = add_internal(V,T);
    markImmutable(T);
    recoverNodes();
    return T;
  }

  TreeTy* remove(TreeTy* T, key_type_ref V) {
    T = remove_internal(V,T);
    markImmutable(T);
    recoverNodes();
    return T;
  }

  TreeTy* getEmptyTree() const { return nullptr; }

protected:
  //===--------------------------------------------------===//
  // A bunch of quick helper functions used for reasoning
  // about the properties of trees and their children.
  // These have succinct names so that the balancing code
  // is as terse (and readable) as possible.
  //===--------------------------------------------------===//

  bool            isEmpty(TreeTy* T) const { return !T; }
  unsigned        getHeight(TreeTy* T) const { return T ? T->getHeight() : 0; }
  TreeTy*         getLeft(TreeTy* T) const { return T->getLeft(); }
  TreeTy*         getRight(TreeTy* T) const { return T->getRight(); }
  value_type_ref  getValue(TreeTy* T) const { return T->value; }

  // Make sure the index is not the Tombstone or Entry key of the DenseMap.
  static unsigned maskCacheIndex(unsigned I) { return (I & ~0x02); }

  unsigned incrementHeight(TreeTy* L, TreeTy* R) const {
    unsigned hl = getHeight(L);
    unsigned hr = getHeight(R);
    return (hl > hr ? hl : hr) + 1;
  }

  static bool compareTreeWithSection(TreeTy* T,
                                     typename TreeTy::iterator& TI,
                                     typename TreeTy::iterator& TE) {
    typename TreeTy::iterator I = T->begin(), E = T->end();
    for ( ; I!=E ; ++I, ++TI) {
      if (TI == TE || !I->isElementEqual(&*TI))
        return false;
    }
    return true;
  }

  //===--------------------------------------------------===//
  // "createNode" is used to generate new tree roots that link
  // to other trees.  The function may also simply move links
  // in an existing root if that root is still marked mutable.
  // This is necessary because otherwise our balancing code
  // would leak memory as it would create nodes that are
  // then discarded later before the finished tree is
  // returned to the caller.
  //===--------------------------------------------------===//

  TreeTy* createNode(TreeTy* L, value_type_ref V, TreeTy* R) {
    BumpPtrAllocator& A = getAllocator();
    TreeTy* T;
    if (!freeNodes.empty()) {
      T = freeNodes.back();
      freeNodes.pop_back();
      assert(T != L);
      assert(T != R);
    } else {
      T = (TreeTy*) A.Allocate<TreeTy>();
    }
    new (T) TreeTy(this, L, R, V, incrementHeight(L,R));
    createdNodes.push_back(T);
    return T;
  }

  TreeTy* createNode(TreeTy* newLeft, TreeTy* oldTree, TreeTy* newRight) {
    return createNode(newLeft, getValue(oldTree), newRight);
  }

  void recoverNodes() {
    for (unsigned i = 0, n = createdNodes.size(); i < n; ++i) {
      TreeTy *N = createdNodes[i];
      if (N->isMutable() && N->refCount == 0)
        N->destroy();
    }
    createdNodes.clear();
  }

  /// balanceTree - Used by add_internal and remove_internal to
  ///  balance a newly created tree.
  TreeTy* balanceTree(TreeTy* L, value_type_ref V, TreeTy* R) {
    unsigned hl = getHeight(L);
    unsigned hr = getHeight(R);

    if (hl > hr + 2) {
      assert(!isEmpty(L) && "Left tree cannot be empty to have a height >= 2");

      TreeTy *LL = getLeft(L);
      TreeTy *LR = getRight(L);

      if (getHeight(LL) >= getHeight(LR))
        return createNode(LL, L, createNode(LR,V,R));

      assert(!isEmpty(LR) && "LR cannot be empty because it has a height >= 1");

      TreeTy *LRL = getLeft(LR);
      TreeTy *LRR = getRight(LR);

      return createNode(createNode(LL,L,LRL), LR, createNode(LRR,V,R));
    }

    if (hr > hl + 2) {
      assert(!isEmpty(R) && "Right tree cannot be empty to have a height >= 2");

      TreeTy *RL = getLeft(R);
      TreeTy *RR = getRight(R);

      if (getHeight(RR) >= getHeight(RL))
        return createNode(createNode(L,V,RL), R, RR);

      assert(!isEmpty(RL) && "RL cannot be empty because it has a height >= 1");

      TreeTy *RLL = getLeft(RL);
      TreeTy *RLR = getRight(RL);

      return createNode(createNode(L,V,RLL), RL, createNode(RLR,R,RR));
    }

    return createNode(L,V,R);
  }

  /// add_internal - Creates a new tree that includes the specified
  ///  data and the data from the original tree.  If the original tree
  ///  already contained the data item, the original tree is returned.
  TreeTy* add_internal(value_type_ref V, TreeTy* T) {
    if (isEmpty(T))
      return createNode(T, V, T);
    assert(!T->isMutable());

    key_type_ref K = ImutInfo::KeyOfValue(V);
    key_type_ref KCurrent = ImutInfo::KeyOfValue(getValue(T));

    if (ImutInfo::isEqual(K,KCurrent))
      return createNode(getLeft(T), V, getRight(T));
    else if (ImutInfo::isLess(K,KCurrent))
      return balanceTree(add_internal(V, getLeft(T)), getValue(T), getRight(T));
    else
      return balanceTree(getLeft(T), getValue(T), add_internal(V, getRight(T)));
  }

  /// remove_internal - Creates a new tree that includes all the data
  ///  from the original tree except the specified data.  If the
  ///  specified data did not exist in the original tree, the original
  ///  tree is returned.
  TreeTy* remove_internal(key_type_ref K, TreeTy* T) {
    if (isEmpty(T))
      return T;

    assert(!T->isMutable());

    key_type_ref KCurrent = ImutInfo::KeyOfValue(getValue(T));

    if (ImutInfo::isEqual(K,KCurrent)) {
      return combineTrees(getLeft(T), getRight(T));
    } else if (ImutInfo::isLess(K,KCurrent)) {
      return balanceTree(remove_internal(K, getLeft(T)),
                                            getValue(T), getRight(T));
    } else {
      return balanceTree(getLeft(T), getValue(T),
                         remove_internal(K, getRight(T)));
    }
  }

  TreeTy* combineTrees(TreeTy* L, TreeTy* R) {
    if (isEmpty(L))
      return R;
    if (isEmpty(R))
      return L;
    TreeTy* OldNode;
    TreeTy* newRight = removeMinBinding(R,OldNode);
    return balanceTree(L, getValue(OldNode), newRight);
  }

  TreeTy* removeMinBinding(TreeTy* T, TreeTy*& Noderemoved) {
    assert(!isEmpty(T));
    if (isEmpty(getLeft(T))) {
      Noderemoved = T;
      return getRight(T);
    }
    return balanceTree(removeMinBinding(getLeft(T), Noderemoved),
                       getValue(T), getRight(T));
  }

  /// markImmutable - Clears the mutable bits of a root and all of its
  ///  descendants.
  void markImmutable(TreeTy* T) {
    if (!T || !T->isMutable())
      return;
    T->markImmutable();
    markImmutable(getLeft(T));
    markImmutable(getRight(T));
  }

public:
  TreeTy *getCanonicalTree(TreeTy *TNew) {
    if (!TNew)
      return nullptr;

    if (TNew->IsCanonicalized)
      return TNew;

    // Search the hashtable for another tree with the same digest, and
    // if find a collision compare those trees by their contents.
    unsigned digest = TNew->computeDigest();
    TreeTy *&entry = Cache[maskCacheIndex(digest)];
    do {
      if (!entry)
        break;
      for (TreeTy *T = entry ; T != nullptr; T = T->next) {
        // Compare the Contents('T') with Contents('TNew')
        typename TreeTy::iterator TI = T->begin(), TE = T->end();
        if (!compareTreeWithSection(TNew, TI, TE))
          continue;
        if (TI != TE)
          continue; // T has more contents than TNew.
        // Trees did match!  Return 'T'.
        if (TNew->refCount == 0)
          TNew->destroy();
        return T;
      }
      entry->prev = TNew;
      TNew->next = entry;
    }
    while (false);

    entry = TNew;
    TNew->IsCanonicalized = true;
    return TNew;
  }
};

//===----------------------------------------------------------------------===//
// Immutable AVL-Tree Iterators.
//===----------------------------------------------------------------------===//

template <typename ImutInfo> class ImutAVLTreeGenericIterator {
  SmallVector<uintptr_t,20> stack;

public:
  using iterator_category = std::bidirectional_iterator_tag;
  using value_type = ImutAVLTree<ImutInfo>;
  using difference_type = std::ptrdiff_t;
  using pointer = value_type *;
  using reference = value_type &;

  enum VisitFlag { VisitedNone=0x0, VisitedLeft=0x1, VisitedRight=0x3,
                   Flags=0x3 };

  using TreeTy = ImutAVLTree<ImutInfo>;

  ImutAVLTreeGenericIterator() = default;
  ImutAVLTreeGenericIterator(const TreeTy *Root) {
    if (Root) stack.push_back(reinterpret_cast<uintptr_t>(Root));
  }

  TreeTy &operator*() const {
    assert(!stack.empty());
    return *reinterpret_cast<TreeTy *>(stack.back() & ~Flags);
  }
  TreeTy *operator->() const { return &*this; }

  uintptr_t getVisitState() const {
    assert(!stack.empty());
    return stack.back() & Flags;
  }

  bool atEnd() const { return stack.empty(); }

  bool atBeginning() const {
    return stack.size() == 1 && getVisitState() == VisitedNone;
  }

  void skipToParent() {
    assert(!stack.empty());
    stack.pop_back();
    if (stack.empty())
      return;
    switch (getVisitState()) {
      case VisitedNone:
        stack.back() |= VisitedLeft;
        break;
      case VisitedLeft:
        stack.back() |= VisitedRight;
        break;
      default:
        llvm_unreachable("Unreachable.");
    }
  }

  bool operator==(const ImutAVLTreeGenericIterator &x) const {
    return stack == x.stack;
  }

  bool operator!=(const ImutAVLTreeGenericIterator &x) const {
    return !(*this == x);
  }

  ImutAVLTreeGenericIterator &operator++() {
    assert(!stack.empty());
    TreeTy* Current = reinterpret_cast<TreeTy*>(stack.back() & ~Flags);
    assert(Current);
    switch (getVisitState()) {
      case VisitedNone:
        if (TreeTy* L = Current->getLeft())
          stack.push_back(reinterpret_cast<uintptr_t>(L));
        else
          stack.back() |= VisitedLeft;
        break;
      case VisitedLeft:
        if (TreeTy* R = Current->getRight())
          stack.push_back(reinterpret_cast<uintptr_t>(R));
        else
          stack.back() |= VisitedRight;
        break;
      case VisitedRight:
        skipToParent();
        break;
      default:
        llvm_unreachable("Unreachable.");
    }
    return *this;
  }

  ImutAVLTreeGenericIterator &operator--() {
    assert(!stack.empty());
    TreeTy* Current = reinterpret_cast<TreeTy*>(stack.back() & ~Flags);
    assert(Current);
    switch (getVisitState()) {
      case VisitedNone:
        stack.pop_back();
        break;
      case VisitedLeft:
        stack.back() &= ~Flags; // Set state to "VisitedNone."
        if (TreeTy* L = Current->getLeft())
          stack.push_back(reinterpret_cast<uintptr_t>(L) | VisitedRight);
        break;
      case VisitedRight:
        stack.back() &= ~Flags;
        stack.back() |= VisitedLeft;
        if (TreeTy* R = Current->getRight())
          stack.push_back(reinterpret_cast<uintptr_t>(R) | VisitedRight);
        break;
      default:
        llvm_unreachable("Unreachable.");
    }
    return *this;
  }
};

template <typename ImutInfo> class ImutAVLTreeInOrderIterator {
  using InternalIteratorTy = ImutAVLTreeGenericIterator<ImutInfo>;

  InternalIteratorTy InternalItr;

public:
  using iterator_category = std::bidirectional_iterator_tag;
  using value_type = ImutAVLTree<ImutInfo>;
  using difference_type = std::ptrdiff_t;
  using pointer = value_type *;
  using reference = value_type &;

  using TreeTy = ImutAVLTree<ImutInfo>;

  ImutAVLTreeInOrderIterator(const TreeTy* Root) : InternalItr(Root) {
    if (Root)
      ++*this; // Advance to first element.
  }

  ImutAVLTreeInOrderIterator() : InternalItr() {}

  bool operator==(const ImutAVLTreeInOrderIterator &x) const {
    return InternalItr == x.InternalItr;
  }

  bool operator!=(const ImutAVLTreeInOrderIterator &x) const {
    return !(*this == x);
  }

  TreeTy &operator*() const { return *InternalItr; }
  TreeTy *operator->() const { return &*InternalItr; }

  ImutAVLTreeInOrderIterator &operator++() {
    do ++InternalItr;
    while (!InternalItr.atEnd() &&
           InternalItr.getVisitState() != InternalIteratorTy::VisitedLeft);

    return *this;
  }

  ImutAVLTreeInOrderIterator &operator--() {
    do --InternalItr;
    while (!InternalItr.atBeginning() &&
           InternalItr.getVisitState() != InternalIteratorTy::VisitedLeft);

    return *this;
  }

  void skipSubTree() {
    InternalItr.skipToParent();

    while (!InternalItr.atEnd() &&
           InternalItr.getVisitState() != InternalIteratorTy::VisitedLeft)
      ++InternalItr;
  }
};

/// Generic iterator that wraps a T::TreeTy::iterator and exposes
/// iterator::getValue() on dereference.
template <typename T>
struct ImutAVLValueIterator
    : iterator_adaptor_base<
          ImutAVLValueIterator<T>, typename T::TreeTy::iterator,
          typename std::iterator_traits<
              typename T::TreeTy::iterator>::iterator_category,
          const typename T::value_type> {
  ImutAVLValueIterator() = default;
  explicit ImutAVLValueIterator(typename T::TreeTy *Tree)
      : ImutAVLValueIterator::iterator_adaptor_base(Tree) {}

  typename ImutAVLValueIterator::reference operator*() const {
    return this->I->getValue();
  }
};

//===----------------------------------------------------------------------===//
// Trait classes for Profile information.
//===----------------------------------------------------------------------===//

/// Generic profile template.  The default behavior is to invoke the
/// profile method of an object.  Specializations for primitive integers
/// and generic handling of pointers is done below.
template <typename T>
struct ImutProfileInfo {
  using value_type = const T;
  using value_type_ref = const T&;

  static void Profile(FoldingSetNodeID &ID, value_type_ref X) {
    FoldingSetTrait<T>::Profile(X,ID);
  }
};

/// Profile traits for integers.
template <typename T>
struct ImutProfileInteger {
  using value_type = const T;
  using value_type_ref = const T&;

  static void Profile(FoldingSetNodeID &ID, value_type_ref X) {
    ID.AddInteger(X);
  }
};

#define PROFILE_INTEGER_INFO(X)\
template<> struct ImutProfileInfo<X> : ImutProfileInteger<X> {};

PROFILE_INTEGER_INFO(char)
PROFILE_INTEGER_INFO(unsigned char)
PROFILE_INTEGER_INFO(short)
PROFILE_INTEGER_INFO(unsigned short)
PROFILE_INTEGER_INFO(unsigned)
PROFILE_INTEGER_INFO(signed)
PROFILE_INTEGER_INFO(long)
PROFILE_INTEGER_INFO(unsigned long)
PROFILE_INTEGER_INFO(long long)
PROFILE_INTEGER_INFO(unsigned long long)

#undef PROFILE_INTEGER_INFO

/// Profile traits for booleans.
template <>
struct ImutProfileInfo<bool> {
  using value_type = const bool;
  using value_type_ref = const bool&;

  static void Profile(FoldingSetNodeID &ID, value_type_ref X) {
    ID.AddBoolean(X);
  }
};

/// Generic profile trait for pointer types.  We treat pointers as
/// references to unique objects.
template <typename T>
struct ImutProfileInfo<T*> {
  using value_type = const T*;
  using value_type_ref = value_type;

  static void Profile(FoldingSetNodeID &ID, value_type_ref X) {
    ID.AddPointer(X);
  }
};

//===----------------------------------------------------------------------===//
// Trait classes that contain element comparison operators and type
//  definitions used by ImutAVLTree, ImmutableSet, and ImmutableMap.  These
//  inherit from the profile traits (ImutProfileInfo) to include operations
//  for element profiling.
//===----------------------------------------------------------------------===//

/// ImutContainerInfo - Generic definition of comparison operations for
///   elements of immutable containers that defaults to using
///   std::equal_to<> and std::less<> to perform comparison of elements.
template <typename T>
struct ImutContainerInfo : public ImutProfileInfo<T> {
  using value_type = typename ImutProfileInfo<T>::value_type;
  using value_type_ref = typename ImutProfileInfo<T>::value_type_ref;
  using key_type = value_type;
  using key_type_ref = value_type_ref;
  using data_type = bool;
  using data_type_ref = bool;

  static key_type_ref KeyOfValue(value_type_ref D) { return D; }
  static data_type_ref DataOfValue(value_type_ref) { return true; }

  static bool isEqual(key_type_ref LHS, key_type_ref RHS) {
    return std::equal_to<key_type>()(LHS,RHS);
  }

  static bool isLess(key_type_ref LHS, key_type_ref RHS) {
    return std::less<key_type>()(LHS,RHS);
  }

  static bool isDataEqual(data_type_ref, data_type_ref) { return true; }
};

/// ImutContainerInfo - Specialization for pointer values to treat pointers
///  as references to unique objects.  Pointers are thus compared by
///  their addresses.
template <typename T>
struct ImutContainerInfo<T*> : public ImutProfileInfo<T*> {
  using value_type = typename ImutProfileInfo<T*>::value_type;
  using value_type_ref = typename ImutProfileInfo<T*>::value_type_ref;
  using key_type = value_type;
  using key_type_ref = value_type_ref;
  using data_type = bool;
  using data_type_ref = bool;

  static key_type_ref KeyOfValue(value_type_ref D) { return D; }
  static data_type_ref DataOfValue(value_type_ref) { return true; }

  static bool isEqual(key_type_ref LHS, key_type_ref RHS) { return LHS == RHS; }

  static bool isLess(key_type_ref LHS, key_type_ref RHS) { return LHS < RHS; }

  static bool isDataEqual(data_type_ref, data_type_ref) { return true; }
};

//===----------------------------------------------------------------------===//
// Immutable Set
//===----------------------------------------------------------------------===//

template <typename ValT, typename ValInfo = ImutContainerInfo<ValT>>
class ImmutableSet {
public:
  using value_type = typename ValInfo::value_type;
  using value_type_ref = typename ValInfo::value_type_ref;
  using TreeTy = ImutAVLTree<ValInfo>;

private:
  IntrusiveRefCntPtr<TreeTy> Root;

public:
  /// Constructs a set from a pointer to a tree root.  In general one
  /// should use a Factory object to create sets instead of directly
  /// invoking the constructor, but there are cases where make this
  /// constructor public is useful.
  explicit ImmutableSet(TreeTy *R) : Root(R) {}

  class Factory {
    typename TreeTy::Factory F;
    const bool Canonicalize;

  public:
    Factory(bool canonicalize = true)
      : Canonicalize(canonicalize) {}

    Factory(BumpPtrAllocator& Alloc, bool canonicalize = true)
      : F(Alloc), Canonicalize(canonicalize) {}

    Factory(const Factory& RHS) = delete;
    void operator=(const Factory& RHS) = delete;

    /// getEmptySet - Returns an immutable set that contains no elements.
    ImmutableSet getEmptySet() {
      return ImmutableSet(F.getEmptyTree());
    }

    /// add - Creates a new immutable set that contains all of the values
    ///  of the original set with the addition of the specified value.  If
    ///  the original set already included the value, then the original set is
    ///  returned and no memory is allocated.  The time and space complexity
    ///  of this operation is logarithmic in the size of the original set.
    ///  The memory allocated to represent the set is released when the
    ///  factory object that created the set is destroyed.
    [[nodiscard]] ImmutableSet add(ImmutableSet Old, value_type_ref V) {
      TreeTy *NewT = F.add(Old.Root.get(), V);
      return ImmutableSet(Canonicalize ? F.getCanonicalTree(NewT) : NewT);
    }

    /// remove - Creates a new immutable set that contains all of the values
    ///  of the original set with the exception of the specified value.  If
    ///  the original set did not contain the value, the original set is
    ///  returned and no memory is allocated.  The time and space complexity
    ///  of this operation is logarithmic in the size of the original set.
    ///  The memory allocated to represent the set is released when the
    ///  factory object that created the set is destroyed.
    [[nodiscard]] ImmutableSet remove(ImmutableSet Old, value_type_ref V) {
      TreeTy *NewT = F.remove(Old.Root.get(), V);
      return ImmutableSet(Canonicalize ? F.getCanonicalTree(NewT) : NewT);
    }

    BumpPtrAllocator& getAllocator() { return F.getAllocator(); }

    typename TreeTy::Factory *getTreeFactory() const {
      return const_cast<typename TreeTy::Factory *>(&F);
    }
  };

  friend class Factory;

  /// Returns true if the set contains the specified value.
  bool contains(value_type_ref V) const {
    return Root ? Root->contains(V) : false;
  }

  bool operator==(const ImmutableSet &RHS) const {
    return Root && RHS.Root ? Root->isEqual(*RHS.Root.get()) : Root == RHS.Root;
  }

  bool operator!=(const ImmutableSet &RHS) const {
    return Root && RHS.Root ? Root->isNotEqual(*RHS.Root.get())
                            : Root != RHS.Root;
  }

  TreeTy *getRoot() {
    if (Root) { Root->retain(); }
    return Root.get();
  }

  TreeTy *getRootWithoutRetain() const { return Root.get(); }

  /// isEmpty - Return true if the set contains no elements.
  bool isEmpty() const { return !Root; }

  /// isSingleton - Return true if the set contains exactly one element.
  ///   This method runs in constant time.
  bool isSingleton() const { return getHeight() == 1; }

  //===--------------------------------------------------===//
  // Iterators.
  //===--------------------------------------------------===//

  using iterator = ImutAVLValueIterator<ImmutableSet>;

  iterator begin() const { return iterator(Root.get()); }
  iterator end() const { return iterator(); }

  //===--------------------------------------------------===//
  // Utility methods.
  //===--------------------------------------------------===//

  unsigned getHeight() const { return Root ? Root->getHeight() : 0; }

  static void Profile(FoldingSetNodeID &ID, const ImmutableSet &S) {
    ID.AddPointer(S.Root.get());
  }

  void Profile(FoldingSetNodeID &ID) const { return Profile(ID, *this); }

  //===--------------------------------------------------===//
  // For testing.
  //===--------------------------------------------------===//

  void validateTree() const { if (Root) Root->validateTree(); }
};

// NOTE: This may some day replace the current ImmutableSet.
template <typename ValT, typename ValInfo = ImutContainerInfo<ValT>>
class ImmutableSetRef {
public:
  using value_type = typename ValInfo::value_type;
  using value_type_ref = typename ValInfo::value_type_ref;
  using TreeTy = ImutAVLTree<ValInfo>;
  using FactoryTy = typename TreeTy::Factory;

private:
  IntrusiveRefCntPtr<TreeTy> Root;
  FactoryTy *Factory;

public:
  /// Constructs a set from a pointer to a tree root.  In general one
  /// should use a Factory object to create sets instead of directly
  /// invoking the constructor, but there are cases where make this
  /// constructor public is useful.
  ImmutableSetRef(TreeTy *R, FactoryTy *F) : Root(R), Factory(F) {}

  static ImmutableSetRef getEmptySet(FactoryTy *F) {
    return ImmutableSetRef(0, F);
  }

  ImmutableSetRef add(value_type_ref V) {
    return ImmutableSetRef(Factory->add(Root.get(), V), Factory);
  }

  ImmutableSetRef remove(value_type_ref V) {
    return ImmutableSetRef(Factory->remove(Root.get(), V), Factory);
  }

  /// Returns true if the set contains the specified value.
  bool contains(value_type_ref V) const {
    return Root ? Root->contains(V) : false;
  }

  ImmutableSet<ValT> asImmutableSet(bool canonicalize = true) const {
    return ImmutableSet<ValT>(
        canonicalize ? Factory->getCanonicalTree(Root.get()) : Root.get());
  }

  TreeTy *getRootWithoutRetain() const { return Root.get(); }

  bool operator==(const ImmutableSetRef &RHS) const {
    return Root && RHS.Root ? Root->isEqual(*RHS.Root.get()) : Root == RHS.Root;
  }

  bool operator!=(const ImmutableSetRef &RHS) const {
    return Root && RHS.Root ? Root->isNotEqual(*RHS.Root.get())
                            : Root != RHS.Root;
  }

  /// isEmpty - Return true if the set contains no elements.
  bool isEmpty() const { return !Root; }

  /// isSingleton - Return true if the set contains exactly one element.
  ///   This method runs in constant time.
  bool isSingleton() const { return getHeight() == 1; }

  //===--------------------------------------------------===//
  // Iterators.
  //===--------------------------------------------------===//

  using iterator = ImutAVLValueIterator<ImmutableSetRef>;

  iterator begin() const { return iterator(Root.get()); }
  iterator end() const { return iterator(); }

  //===--------------------------------------------------===//
  // Utility methods.
  //===--------------------------------------------------===//

  unsigned getHeight() const { return Root ? Root->getHeight() : 0; }

  static void Profile(FoldingSetNodeID &ID, const ImmutableSetRef &S) {
    ID.AddPointer(S.Root.get());
  }

  void Profile(FoldingSetNodeID &ID) const { return Profile(ID, *this); }

  //===--------------------------------------------------===//
  // For testing.
  //===--------------------------------------------------===//

  void validateTree() const { if (Root) Root->validateTree(); }
};

} // end namespace llvm

#endif // LLVM_ADT_IMMUTABLESET_H
PKiwFZB�����ADT/identity.hnu�[���//===- llvm/ADT/Identity.h - Provide std::identity from C++20 ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file provides an implementation of std::identity from C++20.
//
// No library is required when using these functions.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_IDENTITY_H
#define LLVM_ADT_IDENTITY_H


namespace llvm {

// Similar to `std::identity` from C++20.
template <class Ty> struct identity {
  using is_transparent = void;
  using argument_type = Ty;

  Ty &operator()(Ty &self) const {
    return self;
  }
  const Ty &operator()(const Ty &self) const {
    return self;
  }
};

} // end namespace llvm

#endif // LLVM_ADT_IDENTITY_H
PKiwFZ�|���!�!ADT/SmallString.hnu�[���//===- llvm/ADT/SmallString.h - 'Normally small' strings --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file defines the SmallString class.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_SMALLSTRING_H
#define LLVM_ADT_SMALLSTRING_H

#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include <cstddef>

namespace llvm {

/// SmallString - A SmallString is just a SmallVector with methods and accessors
/// that make it work better as a string (e.g. operator+ etc).
template<unsigned InternalLen>
class SmallString : public SmallVector<char, InternalLen> {
public:
  /// Default ctor - Initialize to empty.
  SmallString() = default;

  /// Initialize from a StringRef.
  SmallString(StringRef S) : SmallVector<char, InternalLen>(S.begin(), S.end()) {}

  /// Initialize by concatenating a list of StringRefs.
  SmallString(std::initializer_list<StringRef> Refs)
      : SmallVector<char, InternalLen>() {
    this->append(Refs);
  }

  /// Initialize with a range.
  template<typename ItTy>
  SmallString(ItTy S, ItTy E) : SmallVector<char, InternalLen>(S, E) {}

  /// @}
  /// @name String Assignment
  /// @{

  using SmallVector<char, InternalLen>::assign;

  /// Assign from a StringRef.
  void assign(StringRef RHS) {
    SmallVectorImpl<char>::assign(RHS.begin(), RHS.end());
  }

  /// Assign from a list of StringRefs.
  void assign(std::initializer_list<StringRef> Refs) {
    this->clear();
    append(Refs);
  }

  /// @}
  /// @name String Concatenation
  /// @{

  using SmallVector<char, InternalLen>::append;

  /// Append from a StringRef.
  void append(StringRef RHS) {
    SmallVectorImpl<char>::append(RHS.begin(), RHS.end());
  }

  /// Append from a list of StringRefs.
  void append(std::initializer_list<StringRef> Refs) {
    size_t CurrentSize = this->size();
    size_t SizeNeeded = CurrentSize;
    for (const StringRef &Ref : Refs)
      SizeNeeded += Ref.size();
    this->resize_for_overwrite(SizeNeeded);
    for (const StringRef &Ref : Refs) {
      std::copy(Ref.begin(), Ref.end(), this->begin() + CurrentSize);
      CurrentSize += Ref.size();
    }
    assert(CurrentSize == this->size());
  }

  /// @}
  /// @name String Comparison
  /// @{

  /// Check for string equality.  This is more efficient than compare() when
  /// the relative ordering of inequal strings isn't needed.
  bool equals(StringRef RHS) const {
    return str().equals(RHS);
  }

  /// Check for string equality, ignoring case.
  bool equals_insensitive(StringRef RHS) const {
    return str().equals_insensitive(RHS);
  }

  /// compare - Compare two strings; the result is negative, zero, or positive
  /// if this string is lexicographically less than, equal to, or greater than
  /// the \p RHS.
  int compare(StringRef RHS) const {
    return str().compare(RHS);
  }

  /// compare_insensitive - Compare two strings, ignoring case.
  int compare_insensitive(StringRef RHS) const {
    return str().compare_insensitive(RHS);
  }

  /// compare_numeric - Compare two strings, treating sequences of digits as
  /// numbers.
  int compare_numeric(StringRef RHS) const {
    return str().compare_numeric(RHS);
  }

  /// @}
  /// @name String Predicates
  /// @{

  /// startswith - Check if this string starts with the given \p Prefix.
  bool startswith(StringRef Prefix) const {
    return str().startswith(Prefix);
  }

  /// endswith - Check if this string ends with the given \p Suffix.
  bool endswith(StringRef Suffix) const {
    return str().endswith(Suffix);
  }

  /// @}
  /// @name String Searching
  /// @{

  /// find - Search for the first character \p C in the string.
  ///
  /// \return - The index of the first occurrence of \p C, or npos if not
  /// found.
  size_t find(char C, size_t From = 0) const {
    return str().find(C, From);
  }

  /// Search for the first string \p Str in the string.
  ///
  /// \returns The index of the first occurrence of \p Str, or npos if not
  /// found.
  size_t find(StringRef Str, size_t From = 0) const {
    return str().find(Str, From);
  }

  /// Search for the last character \p C in the string.
  ///
  /// \returns The index of the last occurrence of \p C, or npos if not
  /// found.
  size_t rfind(char C, size_t From = StringRef::npos) const {
    return str().rfind(C, From);
  }

  /// Search for the last string \p Str in the string.
  ///
  /// \returns The index of the last occurrence of \p Str, or npos if not
  /// found.
  size_t rfind(StringRef Str) const {
    return str().rfind(Str);
  }

  /// Find the first character in the string that is \p C, or npos if not
  /// found. Same as find.
  size_t find_first_of(char C, size_t From = 0) const {
    return str().find_first_of(C, From);
  }

  /// Find the first character in the string that is in \p Chars, or npos if
  /// not found.
  ///
  /// Complexity: O(size() + Chars.size())
  size_t find_first_of(StringRef Chars, size_t From = 0) const {
    return str().find_first_of(Chars, From);
  }

  /// Find the first character in the string that is not \p C or npos if not
  /// found.
  size_t find_first_not_of(char C, size_t From = 0) const {
    return str().find_first_not_of(C, From);
  }

  /// Find the first character in the string that is not in the string
  /// \p Chars, or npos if not found.
  ///
  /// Complexity: O(size() + Chars.size())
  size_t find_first_not_of(StringRef Chars, size_t From = 0) const {
    return str().find_first_not_of(Chars, From);
  }

  /// Find the last character in the string that is \p C, or npos if not
  /// found.
  size_t find_last_of(char C, size_t From = StringRef::npos) const {
    return str().find_last_of(C, From);
  }

  /// Find the last character in the string that is in \p C, or npos if not
  /// found.
  ///
  /// Complexity: O(size() + Chars.size())
  size_t find_last_of(
      StringRef Chars, size_t From = StringRef::npos) const {
    return str().find_last_of(Chars, From);
  }

  /// @}
  /// @name Helpful Algorithms
  /// @{

  /// Return the number of occurrences of \p C in the string.
  size_t count(char C) const {
    return str().count(C);
  }

  /// Return the number of non-overlapped occurrences of \p Str in the
  /// string.
  size_t count(StringRef Str) const {
    return str().count(Str);
  }

  /// @}
  /// @name Substring Operations
  /// @{

  /// Return a reference to the substring from [Start, Start + N).
  ///
  /// \param Start The index of the starting character in the substring; if
  /// the index is npos or greater than the length of the string then the
  /// empty substring will be returned.
  ///
  /// \param N The number of characters to included in the substring. If \p N
  /// exceeds the number of characters remaining in the string, the string
  /// suffix (starting with \p Start) will be returned.
  StringRef substr(size_t Start, size_t N = StringRef::npos) const {
    return str().substr(Start, N);
  }

  /// Return a reference to the substring from [Start, End).
  ///
  /// \param Start The index of the starting character in the substring; if
  /// the index is npos or greater than the length of the string then the
  /// empty substring will be returned.
  ///
  /// \param End The index following the last character to include in the
  /// substring. If this is npos, or less than \p Start, or exceeds the
  /// number of characters remaining in the string, the string suffix
  /// (starting with \p Start) will be returned.
  StringRef slice(size_t Start, size_t End) const {
    return str().slice(Start, End);
  }

  // Extra methods.

  /// Explicit conversion to StringRef.
  StringRef str() const { return StringRef(this->data(), this->size()); }

  // TODO: Make this const, if it's safe...
  const char* c_str() {
    this->push_back(0);
    this->pop_back();
    return this->data();
  }

  /// Implicit conversion to StringRef.
  operator StringRef() const { return str(); }

  explicit operator std::string() const {
    return std::string(this->data(), this->size());
  }

  // Extra operators.
  SmallString &operator=(StringRef RHS) {
    this->assign(RHS);
    return *this;
  }

  SmallString &operator+=(StringRef RHS) {
    this->append(RHS.begin(), RHS.end());
    return *this;
  }
  SmallString &operator+=(char C) {
    this->push_back(C);
    return *this;
  }
};

} // end namespace llvm

#endif // LLVM_ADT_SMALLSTRING_H
PKiwFZ���Z00ADT/TypeSwitch.hnu�[���//===- TypeSwitch.h - Switch functionality for RTTI casting -*- C++ -*-----===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
///  This file implements the TypeSwitch template, which mimics a switch()
///  statement whose cases are type names.
///
//===-----------------------------------------------------------------------===/

#ifndef LLVM_ADT_TYPESWITCH_H
#define LLVM_ADT_TYPESWITCH_H

#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Casting.h"
#include <optional>

namespace llvm {
namespace detail {

template <typename DerivedT, typename T> class TypeSwitchBase {
public:
  TypeSwitchBase(const T &value) : value(value) {}
  TypeSwitchBase(TypeSwitchBase &&other) : value(other.value) {}
  ~TypeSwitchBase() = default;

  /// TypeSwitchBase is not copyable.
  TypeSwitchBase(const TypeSwitchBase &) = delete;
  void operator=(const TypeSwitchBase &) = delete;
  void operator=(TypeSwitchBase &&other) = delete;

  /// Invoke a case on the derived class with multiple case types.
  template <typename CaseT, typename CaseT2, typename... CaseTs,
            typename CallableT>
  // This is marked always_inline and nodebug so it doesn't show up in stack
  // traces at -O0 (or other optimization levels).  Large TypeSwitch's are
  // common, are equivalent to a switch, and don't add any value to stack
  // traces.
  LLVM_ATTRIBUTE_ALWAYS_INLINE LLVM_ATTRIBUTE_NODEBUG DerivedT &
  Case(CallableT &&caseFn) {
    DerivedT &derived = static_cast<DerivedT &>(*this);
    return derived.template Case<CaseT>(caseFn)
        .template Case<CaseT2, CaseTs...>(caseFn);
  }

  /// Invoke a case on the derived class, inferring the type of the Case from
  /// the first input of the given callable.
  /// Note: This inference rules for this overload are very simple: strip
  ///       pointers and references.
  template <typename CallableT> DerivedT &Case(CallableT &&caseFn) {
    using Traits = function_traits<std::decay_t<CallableT>>;
    using CaseT = std::remove_cv_t<std::remove_pointer_t<
        std::remove_reference_t<typename Traits::template arg_t<0>>>>;

    DerivedT &derived = static_cast<DerivedT &>(*this);
    return derived.template Case<CaseT>(std::forward<CallableT>(caseFn));
  }

protected:
  /// Trait to check whether `ValueT` provides a 'dyn_cast' method with type
  /// `CastT`.
  template <typename ValueT, typename CastT>
  using has_dyn_cast_t =
      decltype(std::declval<ValueT &>().template dyn_cast<CastT>());

  /// Attempt to dyn_cast the given `value` to `CastT`. This overload is
  /// selected if `value` already has a suitable dyn_cast method.
  template <typename CastT, typename ValueT>
  static decltype(auto) castValue(
      ValueT &&value,
      std::enable_if_t<is_detected<has_dyn_cast_t, ValueT, CastT>::value> * =
          nullptr) {
    return value.template dyn_cast<CastT>();
  }

  /// Attempt to dyn_cast the given `value` to `CastT`. This overload is
  /// selected if llvm::dyn_cast should be used.
  template <typename CastT, typename ValueT>
  static decltype(auto) castValue(
      ValueT &&value,
      std::enable_if_t<!is_detected<has_dyn_cast_t, ValueT, CastT>::value> * =
          nullptr) {
    return dyn_cast<CastT>(value);
  }

  /// The root value we are switching on.
  const T value;
};
} // end namespace detail

/// This class implements a switch-like dispatch statement for a value of 'T'
/// using dyn_cast functionality. Each `Case<T>` takes a callable to be invoked
/// if the root value isa<T>, the callable is invoked with the result of
/// dyn_cast<T>() as a parameter.
///
/// Example:
///  Operation *op = ...;
///  LogicalResult result = TypeSwitch<Operation *, LogicalResult>(op)
///    .Case<ConstantOp>([](ConstantOp op) { ... })
///    .Default([](Operation *op) { ... });
///
template <typename T, typename ResultT = void>
class TypeSwitch : public detail::TypeSwitchBase<TypeSwitch<T, ResultT>, T> {
public:
  using BaseT = detail::TypeSwitchBase<TypeSwitch<T, ResultT>, T>;
  using BaseT::BaseT;
  using BaseT::Case;
  TypeSwitch(TypeSwitch &&other) = default;

  /// Add a case on the given type.
  template <typename CaseT, typename CallableT>
  TypeSwitch<T, ResultT> &Case(CallableT &&caseFn) {
    if (result)
      return *this;

    // Check to see if CaseT applies to 'value'.
    if (auto caseValue = BaseT::template castValue<CaseT>(this->value))
      result.emplace(caseFn(caseValue));
    return *this;
  }

  /// As a default, invoke the given callable within the root value.
  template <typename CallableT>
  [[nodiscard]] ResultT Default(CallableT &&defaultFn) {
    if (result)
      return std::move(*result);
    return defaultFn(this->value);
  }
  /// As a default, return the given value.
  [[nodiscard]] ResultT Default(ResultT defaultResult) {
    if (result)
      return std::move(*result);
    return defaultResult;
  }

  [[nodiscard]] operator ResultT() {
    assert(result && "Fell off the end of a type-switch");
    return std::move(*result);
  }

private:
  /// The pointer to the result of this switch statement, once known,
  /// null before that.
  std::optional<ResultT> result;
};

/// Specialization of TypeSwitch for void returning callables.
template <typename T>
class TypeSwitch<T, void>
    : public detail::TypeSwitchBase<TypeSwitch<T, void>, T> {
public:
  using BaseT = detail::TypeSwitchBase<TypeSwitch<T, void>, T>;
  using BaseT::BaseT;
  using BaseT::Case;
  TypeSwitch(TypeSwitch &&other) = default;

  /// Add a case on the given type.
  template <typename CaseT, typename CallableT>
  TypeSwitch<T, void> &Case(CallableT &&caseFn) {
    if (foundMatch)
      return *this;

    // Check to see if any of the types apply to 'value'.
    if (auto caseValue = BaseT::template castValue<CaseT>(this->value)) {
      caseFn(caseValue);
      foundMatch = true;
    }
    return *this;
  }

  /// As a default, invoke the given callable within the root value.
  template <typename CallableT> void Default(CallableT &&defaultFn) {
    if (!foundMatch)
      defaultFn(this->value);
  }

private:
  /// A flag detailing if we have already found a match.
  bool foundMatch = false;
};
} // end namespace llvm

#endif // LLVM_ADT_TYPESWITCH_H
PKiwFZۺ��
�
ADT/EpochTracker.hnu�[���//===- llvm/ADT/EpochTracker.h - ADT epoch tracking --------------*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file defines the DebugEpochBase and DebugEpochBase::HandleBase classes.
/// These can be used to write iterators that are fail-fast when LLVM is built
/// with asserts enabled.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_EPOCHTRACKER_H
#define LLVM_ADT_EPOCHTRACKER_H

#include "llvm/Config/abi-breaking.h"

#include <cstdint>

namespace llvm {

#if LLVM_ENABLE_ABI_BREAKING_CHECKS
#define LLVM_DEBUGEPOCHBASE_HANDLEBASE_EMPTYBASE

/// A base class for data structure classes wishing to make iterators
/// ("handles") pointing into themselves fail-fast.  When building without
/// asserts, this class is empty and does nothing.
///
/// DebugEpochBase does not by itself track handles pointing into itself.  The
/// expectation is that routines touching the handles will poll on
/// isHandleInSync at appropriate points to assert that the handle they're using
/// is still valid.
///
class DebugEpochBase {
  uint64_t Epoch = 0;

public:
  DebugEpochBase() = default;

  /// Calling incrementEpoch invalidates all handles pointing into the
  /// calling instance.
  void incrementEpoch() { ++Epoch; }

  /// The destructor calls incrementEpoch to make use-after-free bugs
  /// more likely to crash deterministically.
  ~DebugEpochBase() { incrementEpoch(); }

  /// A base class for iterator classes ("handles") that wish to poll for
  /// iterator invalidating modifications in the underlying data structure.
  /// When LLVM is built without asserts, this class is empty and does nothing.
  ///
  /// HandleBase does not track the parent data structure by itself.  It expects
  /// the routines modifying the data structure to call incrementEpoch when they
  /// make an iterator-invalidating modification.
  ///
  class HandleBase {
    const uint64_t *EpochAddress = nullptr;
    uint64_t EpochAtCreation = UINT64_MAX;

  public:
    HandleBase() = default;

    explicit HandleBase(const DebugEpochBase *Parent)
        : EpochAddress(&Parent->Epoch), EpochAtCreation(Parent->Epoch) {}

    /// Returns true if the DebugEpochBase this Handle is linked to has
    /// not called incrementEpoch on itself since the creation of this
    /// HandleBase instance.
    bool isHandleInSync() const { return *EpochAddress == EpochAtCreation; }

    /// Returns a pointer to the epoch word stored in the data structure
    /// this handle points into.  Can be used to check if two iterators point
    /// into the same data structure.
    const void *getEpochAddress() const { return EpochAddress; }
  };
};

#else
#ifdef _MSC_VER
#define LLVM_DEBUGEPOCHBASE_HANDLEBASE_EMPTYBASE __declspec(empty_bases)
#else
#define LLVM_DEBUGEPOCHBASE_HANDLEBASE_EMPTYBASE
#endif // _MSC_VER

class DebugEpochBase {
public:
  void incrementEpoch() {}

  class HandleBase {
  public:
    HandleBase() = default;
    explicit HandleBase(const DebugEpochBase *) {}
    bool isHandleInSync() const { return true; }
    const void *getEpochAddress() const { return nullptr; }
  };
};

#endif // LLVM_ENABLE_ABI_BREAKING_CHECKS

} // namespace llvm

#endif
PKiwFZ���[�
�
ADT/ilist_base.hnu�[���//===- llvm/ADT/ilist_base.h - Intrusive List Base --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_ILIST_BASE_H
#define LLVM_ADT_ILIST_BASE_H

#include "llvm/ADT/ilist_node_base.h"
#include <cassert>

namespace llvm {

/// Implementations of list algorithms using ilist_node_base.
template <bool EnableSentinelTracking> class ilist_base {
public:
  using node_base_type = ilist_node_base<EnableSentinelTracking>;

  static void insertBeforeImpl(node_base_type &Next, node_base_type &N) {
    node_base_type &Prev = *Next.getPrev();
    N.setNext(&Next);
    N.setPrev(&Prev);
    Prev.setNext(&N);
    Next.setPrev(&N);
  }

  static void removeImpl(node_base_type &N) {
    node_base_type *Prev = N.getPrev();
    node_base_type *Next = N.getNext();
    Next->setPrev(Prev);
    Prev->setNext(Next);

    // Not strictly necessary, but helps catch a class of bugs.
    N.setPrev(nullptr);
    N.setNext(nullptr);
  }

  static void removeRangeImpl(node_base_type &First, node_base_type &Last) {
    node_base_type *Prev = First.getPrev();
    node_base_type *Final = Last.getPrev();
    Last.setPrev(Prev);
    Prev->setNext(&Last);

    // Not strictly necessary, but helps catch a class of bugs.
    First.setPrev(nullptr);
    Final->setNext(nullptr);
  }

  static void transferBeforeImpl(node_base_type &Next, node_base_type &First,
                                 node_base_type &Last) {
    if (&Next == &Last || &First == &Last)
      return;

    // Position cannot be contained in the range to be transferred.
    assert(&Next != &First &&
           // Check for the most common mistake.
           "Insertion point can't be one of the transferred nodes");

    node_base_type &Final = *Last.getPrev();

    // Detach from old list/position.
    First.getPrev()->setNext(&Last);
    Last.setPrev(First.getPrev());

    // Splice [First, Final] into its new list/position.
    node_base_type &Prev = *Next.getPrev();
    Final.setNext(&Next);
    First.setPrev(&Prev);
    Prev.setNext(&First);
    Next.setPrev(&Final);
  }

  template <class T> static void insertBefore(T &Next, T &N) {
    insertBeforeImpl(Next, N);
  }

  template <class T> static void remove(T &N) { removeImpl(N); }
  template <class T> static void removeRange(T &First, T &Last) {
    removeRangeImpl(First, Last);
  }

  template <class T> static void transferBefore(T &Next, T &First, T &Last) {
    transferBeforeImpl(Next, First, Last);
  }
};

} // end namespace llvm

#endif // LLVM_ADT_ILIST_BASE_H
PKiwFZ�+���ADT/PackedVector.hnu�[���//===- llvm/ADT/PackedVector.h - Packed values vector -----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file implements the PackedVector class.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_PACKEDVECTOR_H
#define LLVM_ADT_PACKEDVECTOR_H

#include "llvm/ADT/BitVector.h"
#include <cassert>
#include <limits>

namespace llvm {

template <typename T, unsigned BitNum, typename BitVectorTy, bool isSigned>
class PackedVectorBase;

// This won't be necessary if we can specialize members without specializing
// the parent template.
template <typename T, unsigned BitNum, typename BitVectorTy>
class PackedVectorBase<T, BitNum, BitVectorTy, false> {
protected:
  static T getValue(const BitVectorTy &Bits, unsigned Idx) {
    T val = T();
    for (unsigned i = 0; i != BitNum; ++i)
      val = T(val | ((Bits[(Idx << (BitNum-1)) + i] ? 1UL : 0UL) << i));
    return val;
  }

  static void setValue(BitVectorTy &Bits, unsigned Idx, T val) {
    assert((val >> BitNum) == 0 && "value is too big");
    for (unsigned i = 0; i != BitNum; ++i)
      Bits[(Idx << (BitNum-1)) + i] = val & (T(1) << i);
  }
};

template <typename T, unsigned BitNum, typename BitVectorTy>
class PackedVectorBase<T, BitNum, BitVectorTy, true> {
protected:
  static T getValue(const BitVectorTy &Bits, unsigned Idx) {
    T val = T();
    for (unsigned i = 0; i != BitNum-1; ++i)
      val = T(val | ((Bits[(Idx << (BitNum-1)) + i] ? 1UL : 0UL) << i));
    if (Bits[(Idx << (BitNum-1)) + BitNum-1])
      val = ~val;
    return val;
  }

  static void setValue(BitVectorTy &Bits, unsigned Idx, T val) {
    if (val < 0) {
      val = ~val;
      Bits.set((Idx << (BitNum-1)) + BitNum-1);
    }
    assert((val >> (BitNum-1)) == 0 && "value is too big");
    for (unsigned i = 0; i != BitNum-1; ++i)
      Bits[(Idx << (BitNum-1)) + i] = val & (T(1) << i);
  }
};

/// Store a vector of values using a specific number of bits for each
/// value. Both signed and unsigned types can be used, e.g
/// @code
///   PackedVector<signed, 2> vec;
/// @endcode
/// will create a vector accepting values -2, -1, 0, 1. Any other value will hit
/// an assertion.
template <typename T, unsigned BitNum, typename BitVectorTy = BitVector>
class PackedVector : public PackedVectorBase<T, BitNum, BitVectorTy,
                                            std::numeric_limits<T>::is_signed> {
  BitVectorTy Bits;
  using base = PackedVectorBase<T, BitNum, BitVectorTy,
                                std::numeric_limits<T>::is_signed>;

public:
  class reference {
    PackedVector &Vec;
    const unsigned Idx;

  public:
    reference() = delete;
    reference(PackedVector &vec, unsigned idx) : Vec(vec), Idx(idx) {}

    reference &operator=(T val) {
      Vec.setValue(Vec.Bits, Idx, val);
      return *this;
    }

    operator T() const {
      return Vec.getValue(Vec.Bits, Idx);
    }
  };

  PackedVector() = default;
  explicit PackedVector(unsigned size) : Bits(size << (BitNum-1)) {}

  bool empty() const { return Bits.empty(); }

  unsigned size() const { return Bits.size() >> (BitNum - 1); }

  void clear() { Bits.clear(); }

  void resize(unsigned N) { Bits.resize(N << (BitNum - 1)); }

  void reserve(unsigned N) { Bits.reserve(N << (BitNum-1)); }

  PackedVector &reset() {
    Bits.reset();
    return *this;
  }

  void push_back(T val) {
    resize(size()+1);
    (*this)[size()-1] = val;
  }

  reference operator[](unsigned Idx) {
    return reference(*this, Idx);
  }

  T operator[](unsigned Idx) const {
    return base::getValue(Bits, Idx);
  }

  bool operator==(const PackedVector &RHS) const {
    return Bits == RHS.Bits;
  }

  bool operator!=(const PackedVector &RHS) const {
    return Bits != RHS.Bits;
  }

  PackedVector &operator|=(const PackedVector &RHS) {
    Bits |= RHS.Bits;
    return *this;
  }
};

// Leave BitNum=0 undefined.
template <typename T> class PackedVector<T, 0>;

} // end namespace llvm

#endif // LLVM_ADT_PACKEDVECTOR_H
PKiwFZ*[�̅
�
ADT/STLFunctionalExtras.hnu�[���//===- llvm/ADT/STLFunctionalExtras.h - Extras for <functional> -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains some extension to <functional>.
//
// No library is required when using these functions.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_STLFUNCTIONALEXTRAS_H
#define LLVM_ADT_STLFUNCTIONALEXTRAS_H

#include "llvm/ADT/STLForwardCompat.h"

#include <cstdint>
#include <type_traits>
#include <utility>

namespace llvm {

//===----------------------------------------------------------------------===//
//     Extra additions to <functional>
//===----------------------------------------------------------------------===//

/// An efficient, type-erasing, non-owning reference to a callable. This is
/// intended for use as the type of a function parameter that is not used
/// after the function in question returns.
///
/// This class does not own the callable, so it is not in general safe to store
/// a function_ref.
template<typename Fn> class function_ref;

template<typename Ret, typename ...Params>
class function_ref<Ret(Params...)> {
  Ret (*callback)(intptr_t callable, Params ...params) = nullptr;
  intptr_t callable;

  template<typename Callable>
  static Ret callback_fn(intptr_t callable, Params ...params) {
    return (*reinterpret_cast<Callable*>(callable))(
        std::forward<Params>(params)...);
  }

public:
  function_ref() = default;
  function_ref(std::nullptr_t) {}

  template <typename Callable>
  function_ref(
      Callable &&callable,
      // This is not the copy-constructor.
      std::enable_if_t<!std::is_same<remove_cvref_t<Callable>,
                                     function_ref>::value> * = nullptr,
      // Functor must be callable and return a suitable type.
      std::enable_if_t<std::is_void<Ret>::value ||
                       std::is_convertible<decltype(std::declval<Callable>()(
                                               std::declval<Params>()...)),
                                           Ret>::value> * = nullptr)
      : callback(callback_fn<std::remove_reference_t<Callable>>),
        callable(reinterpret_cast<intptr_t>(&callable)) {}

  Ret operator()(Params ...params) const {
    return callback(callable, std::forward<Params>(params)...);
  }

  explicit operator bool() const { return callback; }
};

} // end namespace llvm

#endif // LLVM_ADT_STLFUNCTIONALEXTRAS_H
PKiwFZ����4�4ADT/APInt.hnu�[���//===-- llvm/ADT/APInt.h - For Arbitrary Precision Integer -----*- C++ -*--===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file implements a class to represent arbitrary precision
/// integral constant values and operations on them.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_APINT_H
#define LLVM_ADT_APINT_H

#include "llvm/Support/Compiler.h"
#include "llvm/Support/MathExtras.h"
#include <cassert>
#include <climits>
#include <cstring>
#include <optional>
#include <utility>

namespace llvm {
class FoldingSetNodeID;
class StringRef;
class hash_code;
class raw_ostream;
struct Align;

template <typename T> class SmallVectorImpl;
template <typename T> class ArrayRef;
template <typename T, typename Enable> struct DenseMapInfo;

class APInt;

inline APInt operator-(APInt);

//===----------------------------------------------------------------------===//
//                              APInt Class
//===----------------------------------------------------------------------===//

/// Class for arbitrary precision integers.
///
/// APInt is a functional replacement for common case unsigned integer type like
/// "unsigned", "unsigned long" or "uint64_t", but also allows non-byte-width
/// integer sizes and large integer value types such as 3-bits, 15-bits, or more
/// than 64-bits of precision. APInt provides a variety of arithmetic operators
/// and methods to manipulate integer values of any bit-width. It supports both
/// the typical integer arithmetic and comparison operations as well as bitwise
/// manipulation.
///
/// The class has several invariants worth noting:
///   * All bit, byte, and word positions are zero-based.
///   * Once the bit width is set, it doesn't change except by the Truncate,
///     SignExtend, or ZeroExtend operations.
///   * All binary operators must be on APInt instances of the same bit width.
///     Attempting to use these operators on instances with different bit
///     widths will yield an assertion.
///   * The value is stored canonically as an unsigned value. For operations
///     where it makes a difference, there are both signed and unsigned variants
///     of the operation. For example, sdiv and udiv. However, because the bit
///     widths must be the same, operations such as Mul and Add produce the same
///     results regardless of whether the values are interpreted as signed or
///     not.
///   * In general, the class tries to follow the style of computation that LLVM
///     uses in its IR. This simplifies its use for LLVM.
///   * APInt supports zero-bit-width values, but operations that require bits
///     are not defined on it (e.g. you cannot ask for the sign of a zero-bit
///     integer).  This means that operations like zero extension and logical
///     shifts are defined, but sign extension and ashr is not.  Zero bit values
///     compare and hash equal to themselves, and countLeadingZeros returns 0.
///
class [[nodiscard]] APInt {
public:
  typedef uint64_t WordType;

  /// This enum is used to hold the constants we needed for APInt.
  enum : unsigned {
    /// Byte size of a word.
    APINT_WORD_SIZE = sizeof(WordType),
    /// Bits in a word.
    APINT_BITS_PER_WORD = APINT_WORD_SIZE * CHAR_BIT
  };

  enum class Rounding {
    DOWN,
    TOWARD_ZERO,
    UP,
  };

  static constexpr WordType WORDTYPE_MAX = ~WordType(0);

  /// \name Constructors
  /// @{

  /// Create a new APInt of numBits width, initialized as val.
  ///
  /// If isSigned is true then val is treated as if it were a signed value
  /// (i.e. as an int64_t) and the appropriate sign extension to the bit width
  /// will be done. Otherwise, no sign extension occurs (high order bits beyond
  /// the range of val are zero filled).
  ///
  /// \param numBits the bit width of the constructed APInt
  /// \param val the initial value of the APInt
  /// \param isSigned how to treat signedness of val
  APInt(unsigned numBits, uint64_t val, bool isSigned = false)
      : BitWidth(numBits) {
    if (isSingleWord()) {
      U.VAL = val;
      clearUnusedBits();
    } else {
      initSlowCase(val, isSigned);
    }
  }

  /// Construct an APInt of numBits width, initialized as bigVal[].
  ///
  /// Note that bigVal.size() can be smaller or larger than the corresponding
  /// bit width but any extraneous bits will be dropped.
  ///
  /// \param numBits the bit width of the constructed APInt
  /// \param bigVal a sequence of words to form the initial value of the APInt
  APInt(unsigned numBits, ArrayRef<uint64_t> bigVal);

  /// Equivalent to APInt(numBits, ArrayRef<uint64_t>(bigVal, numWords)), but
  /// deprecated because this constructor is prone to ambiguity with the
  /// APInt(unsigned, uint64_t, bool) constructor.
  ///
  /// If this overload is ever deleted, care should be taken to prevent calls
  /// from being incorrectly captured by the APInt(unsigned, uint64_t, bool)
  /// constructor.
  APInt(unsigned numBits, unsigned numWords, const uint64_t bigVal[]);

  /// Construct an APInt from a string representation.
  ///
  /// This constructor interprets the string \p str in the given radix. The
  /// interpretation stops when the first character that is not suitable for the
  /// radix is encountered, or the end of the string. Acceptable radix values
  /// are 2, 8, 10, 16, and 36. It is an error for the value implied by the
  /// string to require more bits than numBits.
  ///
  /// \param numBits the bit width of the constructed APInt
  /// \param str the string to be interpreted
  /// \param radix the radix to use for the conversion
  APInt(unsigned numBits, StringRef str, uint8_t radix);

  /// Default constructor that creates an APInt with a 1-bit zero value.
  explicit APInt() { U.VAL = 0; }

  /// Copy Constructor.
  APInt(const APInt &that) : BitWidth(that.BitWidth) {
    if (isSingleWord())
      U.VAL = that.U.VAL;
    else
      initSlowCase(that);
  }

  /// Move Constructor.
  APInt(APInt &&that) : BitWidth(that.BitWidth) {
    memcpy(&U, &that.U, sizeof(U));
    that.BitWidth = 0;
  }

  /// Destructor.
  ~APInt() {
    if (needsCleanup())
      delete[] U.pVal;
  }

  /// @}
  /// \name Value Generators
  /// @{

  /// Get the '0' value for the specified bit-width.
  static APInt getZero(unsigned numBits) { return APInt(numBits, 0); }

  /// Return an APInt zero bits wide.
  static APInt getZeroWidth() { return getZero(0); }

  /// Gets maximum unsigned value of APInt for specific bit width.
  static APInt getMaxValue(unsigned numBits) { return getAllOnes(numBits); }

  /// Gets maximum signed value of APInt for a specific bit width.
  static APInt getSignedMaxValue(unsigned numBits) {
    APInt API = getAllOnes(numBits);
    API.clearBit(numBits - 1);
    return API;
  }

  /// Gets minimum unsigned value of APInt for a specific bit width.
  static APInt getMinValue(unsigned numBits) { return APInt(numBits, 0); }

  /// Gets minimum signed value of APInt for a specific bit width.
  static APInt getSignedMinValue(unsigned numBits) {
    APInt API(numBits, 0);
    API.setBit(numBits - 1);
    return API;
  }

  /// Get the SignMask for a specific bit width.
  ///
  /// This is just a wrapper function of getSignedMinValue(), and it helps code
  /// readability when we want to get a SignMask.
  static APInt getSignMask(unsigned BitWidth) {
    return getSignedMinValue(BitWidth);
  }

  /// Return an APInt of a specified width with all bits set.
  static APInt getAllOnes(unsigned numBits) {
    return APInt(numBits, WORDTYPE_MAX, true);
  }

  /// Return an APInt with exactly one bit set in the result.
  static APInt getOneBitSet(unsigned numBits, unsigned BitNo) {
    APInt Res(numBits, 0);
    Res.setBit(BitNo);
    return Res;
  }

  /// Get a value with a block of bits set.
  ///
  /// Constructs an APInt value that has a contiguous range of bits set. The
  /// bits from loBit (inclusive) to hiBit (exclusive) will be set. All other
  /// bits will be zero. For example, with parameters(32, 0, 16) you would get
  /// 0x0000FFFF. Please call getBitsSetWithWrap if \p loBit may be greater than
  /// \p hiBit.
  ///
  /// \param numBits the intended bit width of the result
  /// \param loBit the index of the lowest bit set.
  /// \param hiBit the index of the highest bit set.
  ///
  /// \returns An APInt value with the requested bits set.
  static APInt getBitsSet(unsigned numBits, unsigned loBit, unsigned hiBit) {
    APInt Res(numBits, 0);
    Res.setBits(loBit, hiBit);
    return Res;
  }

  /// Wrap version of getBitsSet.
  /// If \p hiBit is bigger than \p loBit, this is same with getBitsSet.
  /// If \p hiBit is not bigger than \p loBit, the set bits "wrap". For example,
  /// with parameters (32, 28, 4), you would get 0xF000000F.
  /// If \p hiBit is equal to \p loBit, you would get a result with all bits
  /// set.
  static APInt getBitsSetWithWrap(unsigned numBits, unsigned loBit,
                                  unsigned hiBit) {
    APInt Res(numBits, 0);
    Res.setBitsWithWrap(loBit, hiBit);
    return Res;
  }

  /// Constructs an APInt value that has a contiguous range of bits set. The
  /// bits from loBit (inclusive) to numBits (exclusive) will be set. All other
  /// bits will be zero. For example, with parameters(32, 12) you would get
  /// 0xFFFFF000.
  ///
  /// \param numBits the intended bit width of the result
  /// \param loBit the index of the lowest bit to set.
  ///
  /// \returns An APInt value with the requested bits set.
  static APInt getBitsSetFrom(unsigned numBits, unsigned loBit) {
    APInt Res(numBits, 0);
    Res.setBitsFrom(loBit);
    return Res;
  }

  /// Constructs an APInt value that has the top hiBitsSet bits set.
  ///
  /// \param numBits the bitwidth of the result
  /// \param hiBitsSet the number of high-order bits set in the result.
  static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet) {
    APInt Res(numBits, 0);
    Res.setHighBits(hiBitsSet);
    return Res;
  }

  /// Constructs an APInt value that has the bottom loBitsSet bits set.
  ///
  /// \param numBits the bitwidth of the result
  /// \param loBitsSet the number of low-order bits set in the result.
  static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet) {
    APInt Res(numBits, 0);
    Res.setLowBits(loBitsSet);
    return Res;
  }

  /// Return a value containing V broadcasted over NewLen bits.
  static APInt getSplat(unsigned NewLen, const APInt &V);

  /// @}
  /// \name Value Tests
  /// @{

  /// Determine if this APInt just has one word to store value.
  ///
  /// \returns true if the number of bits <= 64, false otherwise.
  bool isSingleWord() const { return BitWidth <= APINT_BITS_PER_WORD; }

  /// Determine sign of this APInt.
  ///
  /// This tests the high bit of this APInt to determine if it is set.
  ///
  /// \returns true if this APInt is negative, false otherwise
  bool isNegative() const { return (*this)[BitWidth - 1]; }

  /// Determine if this APInt Value is non-negative (>= 0)
  ///
  /// This tests the high bit of the APInt to determine if it is unset.
  bool isNonNegative() const { return !isNegative(); }

  /// Determine if sign bit of this APInt is set.
  ///
  /// This tests the high bit of this APInt to determine if it is set.
  ///
  /// \returns true if this APInt has its sign bit set, false otherwise.
  bool isSignBitSet() const { return (*this)[BitWidth - 1]; }

  /// Determine if sign bit of this APInt is clear.
  ///
  /// This tests the high bit of this APInt to determine if it is clear.
  ///
  /// \returns true if this APInt has its sign bit clear, false otherwise.
  bool isSignBitClear() const { return !isSignBitSet(); }

  /// Determine if this APInt Value is positive.
  ///
  /// This tests if the value of this APInt is positive (> 0). Note
  /// that 0 is not a positive value.
  ///
  /// \returns true if this APInt is positive.
  bool isStrictlyPositive() const { return isNonNegative() && !isZero(); }

  /// Determine if this APInt Value is non-positive (<= 0).
  ///
  /// \returns true if this APInt is non-positive.
  bool isNonPositive() const { return !isStrictlyPositive(); }

  /// Determine if this APInt Value only has the specified bit set.
  ///
  /// \returns true if this APInt only has the specified bit set.
  bool isOneBitSet(unsigned BitNo) const {
    return (*this)[BitNo] && popcount() == 1;
  }

  /// Determine if all bits are set.  This is true for zero-width values.
  bool isAllOnes() const {
    if (BitWidth == 0)
      return true;
    if (isSingleWord())
      return U.VAL == WORDTYPE_MAX >> (APINT_BITS_PER_WORD - BitWidth);
    return countTrailingOnesSlowCase() == BitWidth;
  }

  /// Determine if this value is zero, i.e. all bits are clear.
  bool isZero() const {
    if (isSingleWord())
      return U.VAL == 0;
    return countLeadingZerosSlowCase() == BitWidth;
  }

  /// Determine if this is a value of 1.
  ///
  /// This checks to see if the value of this APInt is one.
  bool isOne() const {
    if (isSingleWord())
      return U.VAL == 1;
    return countLeadingZerosSlowCase() == BitWidth - 1;
  }

  /// Determine if this is the largest unsigned value.
  ///
  /// This checks to see if the value of this APInt is the maximum unsigned
  /// value for the APInt's bit width.
  bool isMaxValue() const { return isAllOnes(); }

  /// Determine if this is the largest signed value.
  ///
  /// This checks to see if the value of this APInt is the maximum signed
  /// value for the APInt's bit width.
  bool isMaxSignedValue() const {
    if (isSingleWord()) {
      assert(BitWidth && "zero width values not allowed");
      return U.VAL == ((WordType(1) << (BitWidth - 1)) - 1);
    }
    return !isNegative() && countTrailingOnesSlowCase() == BitWidth - 1;
  }

  /// Determine if this is the smallest unsigned value.
  ///
  /// This checks to see if the value of this APInt is the minimum unsigned
  /// value for the APInt's bit width.
  bool isMinValue() const { return isZero(); }

  /// Determine if this is the smallest signed value.
  ///
  /// This checks to see if the value of this APInt is the minimum signed
  /// value for the APInt's bit width.
  bool isMinSignedValue() const {
    if (isSingleWord()) {
      assert(BitWidth && "zero width values not allowed");
      return U.VAL == (WordType(1) << (BitWidth - 1));
    }
    return isNegative() && countTrailingZerosSlowCase() == BitWidth - 1;
  }

  /// Check if this APInt has an N-bits unsigned integer value.
  bool isIntN(unsigned N) const { return getActiveBits() <= N; }

  /// Check if this APInt has an N-bits signed integer value.
  bool isSignedIntN(unsigned N) const { return getSignificantBits() <= N; }

  /// Check if this APInt's value is a power of two greater than zero.
  ///
  /// \returns true if the argument APInt value is a power of two > 0.
  bool isPowerOf2() const {
    if (isSingleWord()) {
      assert(BitWidth && "zero width values not allowed");
      return isPowerOf2_64(U.VAL);
    }
    return countPopulationSlowCase() == 1;
  }

  /// Check if this APInt's negated value is a power of two greater than zero.
  bool isNegatedPowerOf2() const {
    assert(BitWidth && "zero width values not allowed");
    if (isNonNegative())
      return false;
    // NegatedPowerOf2 - shifted mask in the top bits.
    unsigned LO = countl_one();
    unsigned TZ = countr_zero();
    return (LO + TZ) == BitWidth;
  }

  /// Checks if this APInt -interpreted as an address- is aligned to the
  /// provided value.
  bool isAligned(Align A) const;

  /// Check if the APInt's value is returned by getSignMask.
  ///
  /// \returns true if this is the value returned by getSignMask.
  bool isSignMask() const { return isMinSignedValue(); }

  /// Convert APInt to a boolean value.
  ///
  /// This converts the APInt to a boolean value as a test against zero.
  bool getBoolValue() const { return !isZero(); }

  /// If this value is smaller than the specified limit, return it, otherwise
  /// return the limit value.  This causes the value to saturate to the limit.
  uint64_t getLimitedValue(uint64_t Limit = UINT64_MAX) const {
    return ugt(Limit) ? Limit : getZExtValue();
  }

  /// Check if the APInt consists of a repeated bit pattern.
  ///
  /// e.g. 0x01010101 satisfies isSplat(8).
  /// \param SplatSizeInBits The size of the pattern in bits. Must divide bit
  /// width without remainder.
  bool isSplat(unsigned SplatSizeInBits) const;

  /// \returns true if this APInt value is a sequence of \param numBits ones
  /// starting at the least significant bit with the remainder zero.
  bool isMask(unsigned numBits) const {
    assert(numBits != 0 && "numBits must be non-zero");
    assert(numBits <= BitWidth && "numBits out of range");
    if (isSingleWord())
      return U.VAL == (WORDTYPE_MAX >> (APINT_BITS_PER_WORD - numBits));
    unsigned Ones = countTrailingOnesSlowCase();
    return (numBits == Ones) &&
           ((Ones + countLeadingZerosSlowCase()) == BitWidth);
  }

  /// \returns true if this APInt is a non-empty sequence of ones starting at
  /// the least significant bit with the remainder zero.
  /// Ex. isMask(0x0000FFFFU) == true.
  bool isMask() const {
    if (isSingleWord())
      return isMask_64(U.VAL);
    unsigned Ones = countTrailingOnesSlowCase();
    return (Ones > 0) && ((Ones + countLeadingZerosSlowCase()) == BitWidth);
  }

  /// Return true if this APInt value contains a non-empty sequence of ones with
  /// the remainder zero.
  bool isShiftedMask() const {
    if (isSingleWord())
      return isShiftedMask_64(U.VAL);
    unsigned Ones = countPopulationSlowCase();
    unsigned LeadZ = countLeadingZerosSlowCase();
    return (Ones + LeadZ + countr_zero()) == BitWidth;
  }

  /// Return true if this APInt value contains a non-empty sequence of ones with
  /// the remainder zero. If true, \p MaskIdx will specify the index of the
  /// lowest set bit and \p MaskLen is updated to specify the length of the
  /// mask, else neither are updated.
  bool isShiftedMask(unsigned &MaskIdx, unsigned &MaskLen) const {
    if (isSingleWord())
      return isShiftedMask_64(U.VAL, MaskIdx, MaskLen);
    unsigned Ones = countPopulationSlowCase();
    unsigned LeadZ = countLeadingZerosSlowCase();
    unsigned TrailZ = countTrailingZerosSlowCase();
    if ((Ones + LeadZ + TrailZ) != BitWidth)
      return false;
    MaskLen = Ones;
    MaskIdx = TrailZ;
    return true;
  }

  /// Compute an APInt containing numBits highbits from this APInt.
  ///
  /// Get an APInt with the same BitWidth as this APInt, just zero mask the low
  /// bits and right shift to the least significant bit.
  ///
  /// \returns the high "numBits" bits of this APInt.
  APInt getHiBits(unsigned numBits) const;

  /// Compute an APInt containing numBits lowbits from this APInt.
  ///
  /// Get an APInt with the same BitWidth as this APInt, just zero mask the high
  /// bits.
  ///
  /// \returns the low "numBits" bits of this APInt.
  APInt getLoBits(unsigned numBits) const;

  /// Determine if two APInts have the same value, after zero-extending
  /// one of them (if needed!) to ensure that the bit-widths match.
  static bool isSameValue(const APInt &I1, const APInt &I2) {
    if (I1.getBitWidth() == I2.getBitWidth())
      return I1 == I2;

    if (I1.getBitWidth() > I2.getBitWidth())
      return I1 == I2.zext(I1.getBitWidth());

    return I1.zext(I2.getBitWidth()) == I2;
  }

  /// Overload to compute a hash_code for an APInt value.
  friend hash_code hash_value(const APInt &Arg);

  /// This function returns a pointer to the internal storage of the APInt.
  /// This is useful for writing out the APInt in binary form without any
  /// conversions.
  const uint64_t *getRawData() const {
    if (isSingleWord())
      return &U.VAL;
    return &U.pVal[0];
  }

  /// @}
  /// \name Unary Operators
  /// @{

  /// Postfix increment operator.  Increment *this by 1.
  ///
  /// \returns a new APInt value representing the original value of *this.
  APInt operator++(int) {
    APInt API(*this);
    ++(*this);
    return API;
  }

  /// Prefix increment operator.
  ///
  /// \returns *this incremented by one
  APInt &operator++();

  /// Postfix decrement operator. Decrement *this by 1.
  ///
  /// \returns a new APInt value representing the original value of *this.
  APInt operator--(int) {
    APInt API(*this);
    --(*this);
    return API;
  }

  /// Prefix decrement operator.
  ///
  /// \returns *this decremented by one.
  APInt &operator--();

  /// Logical negation operation on this APInt returns true if zero, like normal
  /// integers.
  bool operator!() const { return isZero(); }

  /// @}
  /// \name Assignment Operators
  /// @{

  /// Copy assignment operator.
  ///
  /// \returns *this after assignment of RHS.
  APInt &operator=(const APInt &RHS) {
    // The common case (both source or dest being inline) doesn't require
    // allocation or deallocation.
    if (isSingleWord() && RHS.isSingleWord()) {
      U.VAL = RHS.U.VAL;
      BitWidth = RHS.BitWidth;
      return *this;
    }

    assignSlowCase(RHS);
    return *this;
  }

  /// Move assignment operator.
  APInt &operator=(APInt &&that) {
#ifdef EXPENSIVE_CHECKS
    // Some std::shuffle implementations still do self-assignment.
    if (this == &that)
      return *this;
#endif
    assert(this != &that && "Self-move not supported");
    if (!isSingleWord())
      delete[] U.pVal;

    // Use memcpy so that type based alias analysis sees both VAL and pVal
    // as modified.
    memcpy(&U, &that.U, sizeof(U));

    BitWidth = that.BitWidth;
    that.BitWidth = 0;
    return *this;
  }

  /// Assignment operator.
  ///
  /// The RHS value is assigned to *this. If the significant bits in RHS exceed
  /// the bit width, the excess bits are truncated. If the bit width is larger
  /// than 64, the value is zero filled in the unspecified high order bits.
  ///
  /// \returns *this after assignment of RHS value.
  APInt &operator=(uint64_t RHS) {
    if (isSingleWord()) {
      U.VAL = RHS;
      return clearUnusedBits();
    }
    U.pVal[0] = RHS;
    memset(U.pVal + 1, 0, (getNumWords() - 1) * APINT_WORD_SIZE);
    return *this;
  }

  /// Bitwise AND assignment operator.
  ///
  /// Performs a bitwise AND operation on this APInt and RHS. The result is
  /// assigned to *this.
  ///
  /// \returns *this after ANDing with RHS.
  APInt &operator&=(const APInt &RHS) {
    assert(BitWidth == RHS.BitWidth && "Bit widths must be the same");
    if (isSingleWord())
      U.VAL &= RHS.U.VAL;
    else
      andAssignSlowCase(RHS);
    return *this;
  }

  /// Bitwise AND assignment operator.
  ///
  /// Performs a bitwise AND operation on this APInt and RHS. RHS is
  /// logically zero-extended or truncated to match the bit-width of
  /// the LHS.
  APInt &operator&=(uint64_t RHS) {
    if (isSingleWord()) {
      U.VAL &= RHS;
      return *this;
    }
    U.pVal[0] &= RHS;
    memset(U.pVal + 1, 0, (getNumWords() - 1) * APINT_WORD_SIZE);
    return *this;
  }

  /// Bitwise OR assignment operator.
  ///
  /// Performs a bitwise OR operation on this APInt and RHS. The result is
  /// assigned *this;
  ///
  /// \returns *this after ORing with RHS.
  APInt &operator|=(const APInt &RHS) {
    assert(BitWidth == RHS.BitWidth && "Bit widths must be the same");
    if (isSingleWord())
      U.VAL |= RHS.U.VAL;
    else
      orAssignSlowCase(RHS);
    return *this;
  }

  /// Bitwise OR assignment operator.
  ///
  /// Performs a bitwise OR operation on this APInt and RHS. RHS is
  /// logically zero-extended or truncated to match the bit-width of
  /// the LHS.
  APInt &operator|=(uint64_t RHS) {
    if (isSingleWord()) {
      U.VAL |= RHS;
      return clearUnusedBits();
    }
    U.pVal[0] |= RHS;
    return *this;
  }

  /// Bitwise XOR assignment operator.
  ///
  /// Performs a bitwise XOR operation on this APInt and RHS. The result is
  /// assigned to *this.
  ///
  /// \returns *this after XORing with RHS.
  APInt &operator^=(const APInt &RHS) {
    assert(BitWidth == RHS.BitWidth && "Bit widths must be the same");
    if (isSingleWord())
      U.VAL ^= RHS.U.VAL;
    else
      xorAssignSlowCase(RHS);
    return *this;
  }

  /// Bitwise XOR assignment operator.
  ///
  /// Performs a bitwise XOR operation on this APInt and RHS. RHS is
  /// logically zero-extended or truncated to match the bit-width of
  /// the LHS.
  APInt &operator^=(uint64_t RHS) {
    if (isSingleWord()) {
      U.VAL ^= RHS;
      return clearUnusedBits();
    }
    U.pVal[0] ^= RHS;
    return *this;
  }

  /// Multiplication assignment operator.
  ///
  /// Multiplies this APInt by RHS and assigns the result to *this.
  ///
  /// \returns *this
  APInt &operator*=(const APInt &RHS);
  APInt &operator*=(uint64_t RHS);

  /// Addition assignment operator.
  ///
  /// Adds RHS to *this and assigns the result to *this.
  ///
  /// \returns *this
  APInt &operator+=(const APInt &RHS);
  APInt &operator+=(uint64_t RHS);

  /// Subtraction assignment operator.
  ///
  /// Subtracts RHS from *this and assigns the result to *this.
  ///
  /// \returns *this
  APInt &operator-=(const APInt &RHS);
  APInt &operator-=(uint64_t RHS);

  /// Left-shift assignment function.
  ///
  /// Shifts *this left by shiftAmt and assigns the result to *this.
  ///
  /// \returns *this after shifting left by ShiftAmt
  APInt &operator<<=(unsigned ShiftAmt) {
    assert(ShiftAmt <= BitWidth && "Invalid shift amount");
    if (isSingleWord()) {
      if (ShiftAmt == BitWidth)
        U.VAL = 0;
      else
        U.VAL <<= ShiftAmt;
      return clearUnusedBits();
    }
    shlSlowCase(ShiftAmt);
    return *this;
  }

  /// Left-shift assignment function.
  ///
  /// Shifts *this left by shiftAmt and assigns the result to *this.
  ///
  /// \returns *this after shifting left by ShiftAmt
  APInt &operator<<=(const APInt &ShiftAmt);

  /// @}
  /// \name Binary Operators
  /// @{

  /// Multiplication operator.
  ///
  /// Multiplies this APInt by RHS and returns the result.
  APInt operator*(const APInt &RHS) const;

  /// Left logical shift operator.
  ///
  /// Shifts this APInt left by \p Bits and returns the result.
  APInt operator<<(unsigned Bits) const { return shl(Bits); }

  /// Left logical shift operator.
  ///
  /// Shifts this APInt left by \p Bits and returns the result.
  APInt operator<<(const APInt &Bits) const { return shl(Bits); }

  /// Arithmetic right-shift function.
  ///
  /// Arithmetic right-shift this APInt by shiftAmt.
  APInt ashr(unsigned ShiftAmt) const {
    APInt R(*this);
    R.ashrInPlace(ShiftAmt);
    return R;
  }

  /// Arithmetic right-shift this APInt by ShiftAmt in place.
  void ashrInPlace(unsigned ShiftAmt) {
    assert(ShiftAmt <= BitWidth && "Invalid shift amount");
    if (isSingleWord()) {
      int64_t SExtVAL = SignExtend64(U.VAL, BitWidth);
      if (ShiftAmt == BitWidth)
        U.VAL = SExtVAL >> (APINT_BITS_PER_WORD - 1); // Fill with sign bit.
      else
        U.VAL = SExtVAL >> ShiftAmt;
      clearUnusedBits();
      return;
    }
    ashrSlowCase(ShiftAmt);
  }

  /// Logical right-shift function.
  ///
  /// Logical right-shift this APInt by shiftAmt.
  APInt lshr(unsigned shiftAmt) const {
    APInt R(*this);
    R.lshrInPlace(shiftAmt);
    return R;
  }

  /// Logical right-shift this APInt by ShiftAmt in place.
  void lshrInPlace(unsigned ShiftAmt) {
    assert(ShiftAmt <= BitWidth && "Invalid shift amount");
    if (isSingleWord()) {
      if (ShiftAmt == BitWidth)
        U.VAL = 0;
      else
        U.VAL >>= ShiftAmt;
      return;
    }
    lshrSlowCase(ShiftAmt);
  }

  /// Left-shift function.
  ///
  /// Left-shift this APInt by shiftAmt.
  APInt shl(unsigned shiftAmt) const {
    APInt R(*this);
    R <<= shiftAmt;
    return R;
  }

  /// relative logical shift right
  APInt relativeLShr(int RelativeShift) const {
    return RelativeShift > 0 ? lshr(RelativeShift) : shl(-RelativeShift);
  }

  /// relative logical shift left
  APInt relativeLShl(int RelativeShift) const {
    return relativeLShr(-RelativeShift);
  }

  /// relative arithmetic shift right
  APInt relativeAShr(int RelativeShift) const {
    return RelativeShift > 0 ? ashr(RelativeShift) : shl(-RelativeShift);
  }

  /// relative arithmetic shift left
  APInt relativeAShl(int RelativeShift) const {
    return relativeAShr(-RelativeShift);
  }

  /// Rotate left by rotateAmt.
  APInt rotl(unsigned rotateAmt) const;

  /// Rotate right by rotateAmt.
  APInt rotr(unsigned rotateAmt) const;

  /// Arithmetic right-shift function.
  ///
  /// Arithmetic right-shift this APInt by shiftAmt.
  APInt ashr(const APInt &ShiftAmt) const {
    APInt R(*this);
    R.ashrInPlace(ShiftAmt);
    return R;
  }

  /// Arithmetic right-shift this APInt by shiftAmt in place.
  void ashrInPlace(const APInt &shiftAmt);

  /// Logical right-shift function.
  ///
  /// Logical right-shift this APInt by shiftAmt.
  APInt lshr(const APInt &ShiftAmt) const {
    APInt R(*this);
    R.lshrInPlace(ShiftAmt);
    return R;
  }

  /// Logical right-shift this APInt by ShiftAmt in place.
  void lshrInPlace(const APInt &ShiftAmt);

  /// Left-shift function.
  ///
  /// Left-shift this APInt by shiftAmt.
  APInt shl(const APInt &ShiftAmt) const {
    APInt R(*this);
    R <<= ShiftAmt;
    return R;
  }

  /// Rotate left by rotateAmt.
  APInt rotl(const APInt &rotateAmt) const;

  /// Rotate right by rotateAmt.
  APInt rotr(const APInt &rotateAmt) const;

  /// Concatenate the bits from "NewLSB" onto the bottom of *this.  This is
  /// equivalent to:
  ///   (this->zext(NewWidth) << NewLSB.getBitWidth()) | NewLSB.zext(NewWidth)
  APInt concat(const APInt &NewLSB) const {
    /// If the result will be small, then both the merged values are small.
    unsigned NewWidth = getBitWidth() + NewLSB.getBitWidth();
    if (NewWidth <= APINT_BITS_PER_WORD)
      return APInt(NewWidth, (U.VAL << NewLSB.getBitWidth()) | NewLSB.U.VAL);
    return concatSlowCase(NewLSB);
  }

  /// Unsigned division operation.
  ///
  /// Perform an unsigned divide operation on this APInt by RHS. Both this and
  /// RHS are treated as unsigned quantities for purposes of this division.
  ///
  /// \returns a new APInt value containing the division result, rounded towards
  /// zero.
  APInt udiv(const APInt &RHS) const;
  APInt udiv(uint64_t RHS) const;

  /// Signed division function for APInt.
  ///
  /// Signed divide this APInt by APInt RHS.
  ///
  /// The result is rounded towards zero.
  APInt sdiv(const APInt &RHS) const;
  APInt sdiv(int64_t RHS) const;

  /// Unsigned remainder operation.
  ///
  /// Perform an unsigned remainder operation on this APInt with RHS being the
  /// divisor. Both this and RHS are treated as unsigned quantities for purposes
  /// of this operation.
  ///
  /// \returns a new APInt value containing the remainder result
  APInt urem(const APInt &RHS) const;
  uint64_t urem(uint64_t RHS) const;

  /// Function for signed remainder operation.
  ///
  /// Signed remainder operation on APInt.
  ///
  /// Note that this is a true remainder operation and not a modulo operation
  /// because the sign follows the sign of the dividend which is *this.
  APInt srem(const APInt &RHS) const;
  int64_t srem(int64_t RHS) const;

  /// Dual division/remainder interface.
  ///
  /// Sometimes it is convenient to divide two APInt values and obtain both the
  /// quotient and remainder. This function does both operations in the same
  /// computation making it a little more efficient. The pair of input arguments
  /// may overlap with the pair of output arguments. It is safe to call
  /// udivrem(X, Y, X, Y), for example.
  static void udivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient,
                      APInt &Remainder);
  static void udivrem(const APInt &LHS, uint64_t RHS, APInt &Quotient,
                      uint64_t &Remainder);

  static void sdivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient,
                      APInt &Remainder);
  static void sdivrem(const APInt &LHS, int64_t RHS, APInt &Quotient,
                      int64_t &Remainder);

  // Operations that return overflow indicators.
  APInt sadd_ov(const APInt &RHS, bool &Overflow) const;
  APInt uadd_ov(const APInt &RHS, bool &Overflow) const;
  APInt ssub_ov(const APInt &RHS, bool &Overflow) const;
  APInt usub_ov(const APInt &RHS, bool &Overflow) const;
  APInt sdiv_ov(const APInt &RHS, bool &Overflow) const;
  APInt smul_ov(const APInt &RHS, bool &Overflow) const;
  APInt umul_ov(const APInt &RHS, bool &Overflow) const;
  APInt sshl_ov(const APInt &Amt, bool &Overflow) const;
  APInt sshl_ov(unsigned Amt, bool &Overflow) const;
  APInt ushl_ov(const APInt &Amt, bool &Overflow) const;
  APInt ushl_ov(unsigned Amt, bool &Overflow) const;

  // Operations that saturate
  APInt sadd_sat(const APInt &RHS) const;
  APInt uadd_sat(const APInt &RHS) const;
  APInt ssub_sat(const APInt &RHS) const;
  APInt usub_sat(const APInt &RHS) const;
  APInt smul_sat(const APInt &RHS) const;
  APInt umul_sat(const APInt &RHS) const;
  APInt sshl_sat(const APInt &RHS) const;
  APInt sshl_sat(unsigned RHS) const;
  APInt ushl_sat(const APInt &RHS) const;
  APInt ushl_sat(unsigned RHS) const;

  /// Array-indexing support.
  ///
  /// \returns the bit value at bitPosition
  bool operator[](unsigned bitPosition) const {
    assert(bitPosition < getBitWidth() && "Bit position out of bounds!");
    return (maskBit(bitPosition) & getWord(bitPosition)) != 0;
  }

  /// @}
  /// \name Comparison Operators
  /// @{

  /// Equality operator.
  ///
  /// Compares this APInt with RHS for the validity of the equality
  /// relationship.
  bool operator==(const APInt &RHS) const {
    assert(BitWidth == RHS.BitWidth && "Comparison requires equal bit widths");
    if (isSingleWord())
      return U.VAL == RHS.U.VAL;
    return equalSlowCase(RHS);
  }

  /// Equality operator.
  ///
  /// Compares this APInt with a uint64_t for the validity of the equality
  /// relationship.
  ///
  /// \returns true if *this == Val
  bool operator==(uint64_t Val) const {
    return (isSingleWord() || getActiveBits() <= 64) && getZExtValue() == Val;
  }

  /// Equality comparison.
  ///
  /// Compares this APInt with RHS for the validity of the equality
  /// relationship.
  ///
  /// \returns true if *this == Val
  bool eq(const APInt &RHS) const { return (*this) == RHS; }

  /// Inequality operator.
  ///
  /// Compares this APInt with RHS for the validity of the inequality
  /// relationship.
  ///
  /// \returns true if *this != Val
  bool operator!=(const APInt &RHS) const { return !((*this) == RHS); }

  /// Inequality operator.
  ///
  /// Compares this APInt with a uint64_t for the validity of the inequality
  /// relationship.
  ///
  /// \returns true if *this != Val
  bool operator!=(uint64_t Val) const { return !((*this) == Val); }

  /// Inequality comparison
  ///
  /// Compares this APInt with RHS for the validity of the inequality
  /// relationship.
  ///
  /// \returns true if *this != Val
  bool ne(const APInt &RHS) const { return !((*this) == RHS); }

  /// Unsigned less than comparison
  ///
  /// Regards both *this and RHS as unsigned quantities and compares them for
  /// the validity of the less-than relationship.
  ///
  /// \returns true if *this < RHS when both are considered unsigned.
  bool ult(const APInt &RHS) const { return compare(RHS) < 0; }

  /// Unsigned less than comparison
  ///
  /// Regards both *this as an unsigned quantity and compares it with RHS for
  /// the validity of the less-than relationship.
  ///
  /// \returns true if *this < RHS when considered unsigned.
  bool ult(uint64_t RHS) const {
    // Only need to check active bits if not a single word.
    return (isSingleWord() || getActiveBits() <= 64) && getZExtValue() < RHS;
  }

  /// Signed less than comparison
  ///
  /// Regards both *this and RHS as signed quantities and compares them for
  /// validity of the less-than relationship.
  ///
  /// \returns true if *this < RHS when both are considered signed.
  bool slt(const APInt &RHS) const { return compareSigned(RHS) < 0; }

  /// Signed less than comparison
  ///
  /// Regards both *this as a signed quantity and compares it with RHS for
  /// the validity of the less-than relationship.
  ///
  /// \returns true if *this < RHS when considered signed.
  bool slt(int64_t RHS) const {
    return (!isSingleWord() && getSignificantBits() > 64)
               ? isNegative()
               : getSExtValue() < RHS;
  }

  /// Unsigned less or equal comparison
  ///
  /// Regards both *this and RHS as unsigned quantities and compares them for
  /// validity of the less-or-equal relationship.
  ///
  /// \returns true if *this <= RHS when both are considered unsigned.
  bool ule(const APInt &RHS) const { return compare(RHS) <= 0; }

  /// Unsigned less or equal comparison
  ///
  /// Regards both *this as an unsigned quantity and compares it with RHS for
  /// the validity of the less-or-equal relationship.
  ///
  /// \returns true if *this <= RHS when considered unsigned.
  bool ule(uint64_t RHS) const { return !ugt(RHS); }

  /// Signed less or equal comparison
  ///
  /// Regards both *this and RHS as signed quantities and compares them for
  /// validity of the less-or-equal relationship.
  ///
  /// \returns true if *this <= RHS when both are considered signed.
  bool sle(const APInt &RHS) const { return compareSigned(RHS) <= 0; }

  /// Signed less or equal comparison
  ///
  /// Regards both *this as a signed quantity and compares it with RHS for the
  /// validity of the less-or-equal relationship.
  ///
  /// \returns true if *this <= RHS when considered signed.
  bool sle(uint64_t RHS) const { return !sgt(RHS); }

  /// Unsigned greater than comparison
  ///
  /// Regards both *this and RHS as unsigned quantities and compares them for
  /// the validity of the greater-than relationship.
  ///
  /// \returns true if *this > RHS when both are considered unsigned.
  bool ugt(const APInt &RHS) const { return !ule(RHS); }

  /// Unsigned greater than comparison
  ///
  /// Regards both *this as an unsigned quantity and compares it with RHS for
  /// the validity of the greater-than relationship.
  ///
  /// \returns true if *this > RHS when considered unsigned.
  bool ugt(uint64_t RHS) const {
    // Only need to check active bits if not a single word.
    return (!isSingleWord() && getActiveBits() > 64) || getZExtValue() > RHS;
  }

  /// Signed greater than comparison
  ///
  /// Regards both *this and RHS as signed quantities and compares them for the
  /// validity of the greater-than relationship.
  ///
  /// \returns true if *this > RHS when both are considered signed.
  bool sgt(const APInt &RHS) const { return !sle(RHS); }

  /// Signed greater than comparison
  ///
  /// Regards both *this as a signed quantity and compares it with RHS for
  /// the validity of the greater-than relationship.
  ///
  /// \returns true if *this > RHS when considered signed.
  bool sgt(int64_t RHS) const {
    return (!isSingleWord() && getSignificantBits() > 64)
               ? !isNegative()
               : getSExtValue() > RHS;
  }

  /// Unsigned greater or equal comparison
  ///
  /// Regards both *this and RHS as unsigned quantities and compares them for
  /// validity of the greater-or-equal relationship.
  ///
  /// \returns true if *this >= RHS when both are considered unsigned.
  bool uge(const APInt &RHS) const { return !ult(RHS); }

  /// Unsigned greater or equal comparison
  ///
  /// Regards both *this as an unsigned quantity and compares it with RHS for
  /// the validity of the greater-or-equal relationship.
  ///
  /// \returns true if *this >= RHS when considered unsigned.
  bool uge(uint64_t RHS) const { return !ult(RHS); }

  /// Signed greater or equal comparison
  ///
  /// Regards both *this and RHS as signed quantities and compares them for
  /// validity of the greater-or-equal relationship.
  ///
  /// \returns true if *this >= RHS when both are considered signed.
  bool sge(const APInt &RHS) const { return !slt(RHS); }

  /// Signed greater or equal comparison
  ///
  /// Regards both *this as a signed quantity and compares it with RHS for
  /// the validity of the greater-or-equal relationship.
  ///
  /// \returns true if *this >= RHS when considered signed.
  bool sge(int64_t RHS) const { return !slt(RHS); }

  /// This operation tests if there are any pairs of corresponding bits
  /// between this APInt and RHS that are both set.
  bool intersects(const APInt &RHS) const {
    assert(BitWidth == RHS.BitWidth && "Bit widths must be the same");
    if (isSingleWord())
      return (U.VAL & RHS.U.VAL) != 0;
    return intersectsSlowCase(RHS);
  }

  /// This operation checks that all bits set in this APInt are also set in RHS.
  bool isSubsetOf(const APInt &RHS) const {
    assert(BitWidth == RHS.BitWidth && "Bit widths must be the same");
    if (isSingleWord())
      return (U.VAL & ~RHS.U.VAL) == 0;
    return isSubsetOfSlowCase(RHS);
  }

  /// @}
  /// \name Resizing Operators
  /// @{

  /// Truncate to new width.
  ///
  /// Truncate the APInt to a specified width. It is an error to specify a width
  /// that is greater than the current width.
  APInt trunc(unsigned width) const;

  /// Truncate to new width with unsigned saturation.
  ///
  /// If the APInt, treated as unsigned integer, can be losslessly truncated to
  /// the new bitwidth, then return truncated APInt. Else, return max value.
  APInt truncUSat(unsigned width) const;

  /// Truncate to new width with signed saturation.
  ///
  /// If this APInt, treated as signed integer, can be losslessly truncated to
  /// the new bitwidth, then return truncated APInt. Else, return either
  /// signed min value if the APInt was negative, or signed max value.
  APInt truncSSat(unsigned width) const;

  /// Sign extend to a new width.
  ///
  /// This operation sign extends the APInt to a new width. If the high order
  /// bit is set, the fill on the left will be done with 1 bits, otherwise zero.
  /// It is an error to specify a width that is less than the
  /// current width.
  APInt sext(unsigned width) const;

  /// Zero extend to a new width.
  ///
  /// This operation zero extends the APInt to a new width. The high order bits
  /// are filled with 0 bits.  It is an error to specify a width that is less
  /// than the current width.
  APInt zext(unsigned width) const;

  /// Sign extend or truncate to width
  ///
  /// Make this APInt have the bit width given by \p width. The value is sign
  /// extended, truncated, or left alone to make it that width.
  APInt sextOrTrunc(unsigned width) const;

  /// Zero extend or truncate to width
  ///
  /// Make this APInt have the bit width given by \p width. The value is zero
  /// extended, truncated, or left alone to make it that width.
  APInt zextOrTrunc(unsigned width) const;

  /// @}
  /// \name Bit Manipulation Operators
  /// @{

  /// Set every bit to 1.
  void setAllBits() {
    if (isSingleWord())
      U.VAL = WORDTYPE_MAX;
    else
      // Set all the bits in all the words.
      memset(U.pVal, -1, getNumWords() * APINT_WORD_SIZE);
    // Clear the unused ones
    clearUnusedBits();
  }

  /// Set the given bit to 1 whose position is given as "bitPosition".
  void setBit(unsigned BitPosition) {
    assert(BitPosition < BitWidth && "BitPosition out of range");
    WordType Mask = maskBit(BitPosition);
    if (isSingleWord())
      U.VAL |= Mask;
    else
      U.pVal[whichWord(BitPosition)] |= Mask;
  }

  /// Set the sign bit to 1.
  void setSignBit() { setBit(BitWidth - 1); }

  /// Set a given bit to a given value.
  void setBitVal(unsigned BitPosition, bool BitValue) {
    if (BitValue)
      setBit(BitPosition);
    else
      clearBit(BitPosition);
  }

  /// Set the bits from loBit (inclusive) to hiBit (exclusive) to 1.
  /// This function handles "wrap" case when \p loBit >= \p hiBit, and calls
  /// setBits when \p loBit < \p hiBit.
  /// For \p loBit == \p hiBit wrap case, set every bit to 1.
  void setBitsWithWrap(unsigned loBit, unsigned hiBit) {
    assert(hiBit <= BitWidth && "hiBit out of range");
    assert(loBit <= BitWidth && "loBit out of range");
    if (loBit < hiBit) {
      setBits(loBit, hiBit);
      return;
    }
    setLowBits(hiBit);
    setHighBits(BitWidth - loBit);
  }

  /// Set the bits from loBit (inclusive) to hiBit (exclusive) to 1.
  /// This function handles case when \p loBit <= \p hiBit.
  void setBits(unsigned loBit, unsigned hiBit) {
    assert(hiBit <= BitWidth && "hiBit out of range");
    assert(loBit <= BitWidth && "loBit out of range");
    assert(loBit <= hiBit && "loBit greater than hiBit");
    if (loBit == hiBit)
      return;
    if (loBit < APINT_BITS_PER_WORD && hiBit <= APINT_BITS_PER_WORD) {
      uint64_t mask = WORDTYPE_MAX >> (APINT_BITS_PER_WORD - (hiBit - loBit));
      mask <<= loBit;
      if (isSingleWord())
        U.VAL |= mask;
      else
        U.pVal[0] |= mask;
    } else {
      setBitsSlowCase(loBit, hiBit);
    }
  }

  /// Set the top bits starting from loBit.
  void setBitsFrom(unsigned loBit) { return setBits(loBit, BitWidth); }

  /// Set the bottom loBits bits.
  void setLowBits(unsigned loBits) { return setBits(0, loBits); }

  /// Set the top hiBits bits.
  void setHighBits(unsigned hiBits) {
    return setBits(BitWidth - hiBits, BitWidth);
  }

  /// Set every bit to 0.
  void clearAllBits() {
    if (isSingleWord())
      U.VAL = 0;
    else
      memset(U.pVal, 0, getNumWords() * APINT_WORD_SIZE);
  }

  /// Set a given bit to 0.
  ///
  /// Set the given bit to 0 whose position is given as "bitPosition".
  void clearBit(unsigned BitPosition) {
    assert(BitPosition < BitWidth && "BitPosition out of range");
    WordType Mask = ~maskBit(BitPosition);
    if (isSingleWord())
      U.VAL &= Mask;
    else
      U.pVal[whichWord(BitPosition)] &= Mask;
  }

  /// Set bottom loBits bits to 0.
  void clearLowBits(unsigned loBits) {
    assert(loBits <= BitWidth && "More bits than bitwidth");
    APInt Keep = getHighBitsSet(BitWidth, BitWidth - loBits);
    *this &= Keep;
  }

  /// Set the sign bit to 0.
  void clearSignBit() { clearBit(BitWidth - 1); }

  /// Toggle every bit to its opposite value.
  void flipAllBits() {
    if (isSingleWord()) {
      U.VAL ^= WORDTYPE_MAX;
      clearUnusedBits();
    } else {
      flipAllBitsSlowCase();
    }
  }

  /// Toggles a given bit to its opposite value.
  ///
  /// Toggle a given bit to its opposite value whose position is given
  /// as "bitPosition".
  void flipBit(unsigned bitPosition);

  /// Negate this APInt in place.
  void negate() {
    flipAllBits();
    ++(*this);
  }

  /// Insert the bits from a smaller APInt starting at bitPosition.
  void insertBits(const APInt &SubBits, unsigned bitPosition);
  void insertBits(uint64_t SubBits, unsigned bitPosition, unsigned numBits);

  /// Return an APInt with the extracted bits [bitPosition,bitPosition+numBits).
  APInt extractBits(unsigned numBits, unsigned bitPosition) const;
  uint64_t extractBitsAsZExtValue(unsigned numBits, unsigned bitPosition) const;

  /// @}
  /// \name Value Characterization Functions
  /// @{

  /// Return the number of bits in the APInt.
  unsigned getBitWidth() const { return BitWidth; }

  /// Get the number of words.
  ///
  /// Here one word's bitwidth equals to that of uint64_t.
  ///
  /// \returns the number of words to hold the integer value of this APInt.
  unsigned getNumWords() const { return getNumWords(BitWidth); }

  /// Get the number of words.
  ///
  /// *NOTE* Here one word's bitwidth equals to that of uint64_t.
  ///
  /// \returns the number of words to hold the integer value with a given bit
  /// width.
  static unsigned getNumWords(unsigned BitWidth) {
    return ((uint64_t)BitWidth + APINT_BITS_PER_WORD - 1) / APINT_BITS_PER_WORD;
  }

  /// Compute the number of active bits in the value
  ///
  /// This function returns the number of active bits which is defined as the
  /// bit width minus the number of leading zeros. This is used in several
  /// computations to see how "wide" the value is.
  unsigned getActiveBits() const { return BitWidth - countl_zero(); }

  /// Compute the number of active words in the value of this APInt.
  ///
  /// This is used in conjunction with getActiveData to extract the raw value of
  /// the APInt.
  unsigned getActiveWords() const {
    unsigned numActiveBits = getActiveBits();
    return numActiveBits ? whichWord(numActiveBits - 1) + 1 : 1;
  }

  /// Get the minimum bit size for this signed APInt
  ///
  /// Computes the minimum bit width for this APInt while considering it to be a
  /// signed (and probably negative) value. If the value is not negative, this
  /// function returns the same value as getActiveBits()+1. Otherwise, it
  /// returns the smallest bit width that will retain the negative value. For
  /// example, -1 can be written as 0b1 or 0xFFFFFFFFFF. 0b1 is shorter and so
  /// for -1, this function will always return 1.
  unsigned getSignificantBits() const {
    return BitWidth - getNumSignBits() + 1;
  }

  /// Get zero extended value
  ///
  /// This method attempts to return the value of this APInt as a zero extended
  /// uint64_t. The bitwidth must be <= 64 or the value must fit within a
  /// uint64_t. Otherwise an assertion will result.
  uint64_t getZExtValue() const {
    if (isSingleWord())
      return U.VAL;
    assert(getActiveBits() <= 64 && "Too many bits for uint64_t");
    return U.pVal[0];
  }

  /// Get zero extended value if possible
  ///
  /// This method attempts to return the value of this APInt as a zero extended
  /// uint64_t. The bitwidth must be <= 64 or the value must fit within a
  /// uint64_t. Otherwise no value is returned.
  std::optional<uint64_t> tryZExtValue() const {
    return (getActiveBits() <= 64) ? std::optional<uint64_t>(getZExtValue())
                                   : std::nullopt;
  };

  /// Get sign extended value
  ///
  /// This method attempts to return the value of this APInt as a sign extended
  /// int64_t. The bit width must be <= 64 or the value must fit within an
  /// int64_t. Otherwise an assertion will result.
  int64_t getSExtValue() const {
    if (isSingleWord())
      return SignExtend64(U.VAL, BitWidth);
    assert(getSignificantBits() <= 64 && "Too many bits for int64_t");
    return int64_t(U.pVal[0]);
  }

  /// Get sign extended value if possible
  ///
  /// This method attempts to return the value of this APInt as a sign extended
  /// int64_t. The bitwidth must be <= 64 or the value must fit within an
  /// int64_t. Otherwise no value is returned.
  std::optional<int64_t> trySExtValue() const {
    return (getSignificantBits() <= 64) ? std::optional<int64_t>(getSExtValue())
                                        : std::nullopt;
  };

  /// Get bits required for string value.
  ///
  /// This method determines how many bits are required to hold the APInt
  /// equivalent of the string given by \p str.
  static unsigned getBitsNeeded(StringRef str, uint8_t radix);

  /// Get the bits that are sufficient to represent the string value. This may
  /// over estimate the amount of bits required, but it does not require
  /// parsing the value in the string.
  static unsigned getSufficientBitsNeeded(StringRef Str, uint8_t Radix);

  /// The APInt version of std::countl_zero.
  ///
  /// It counts the number of zeros from the most significant bit to the first
  /// one bit.
  ///
  /// \returns BitWidth if the value is zero, otherwise returns the number of
  ///   zeros from the most significant bit to the first one bits.
  unsigned countl_zero() const {
    if (isSingleWord()) {
      unsigned unusedBits = APINT_BITS_PER_WORD - BitWidth;
      return llvm::countl_zero(U.VAL) - unusedBits;
    }
    return countLeadingZerosSlowCase();
  }

  unsigned countLeadingZeros() const { return countl_zero(); }

  /// Count the number of leading one bits.
  ///
  /// This function is an APInt version of std::countl_one. It counts the number
  /// of ones from the most significant bit to the first zero bit.
  ///
  /// \returns 0 if the high order bit is not set, otherwise returns the number
  /// of 1 bits from the most significant to the least
  unsigned countl_one() const {
    if (isSingleWord()) {
      if (LLVM_UNLIKELY(BitWidth == 0))
        return 0;
      return llvm::countl_one(U.VAL << (APINT_BITS_PER_WORD - BitWidth));
    }
    return countLeadingOnesSlowCase();
  }

  unsigned countLeadingOnes() const { return countl_one(); }

  /// Computes the number of leading bits of this APInt that are equal to its
  /// sign bit.
  unsigned getNumSignBits() const {
    return isNegative() ? countl_one() : countl_zero();
  }

  /// Count the number of trailing zero bits.
  ///
  /// This function is an APInt version of std::countr_zero. It counts the
  /// number of zeros from the least significant bit to the first set bit.
  ///
  /// \returns BitWidth if the value is zero, otherwise returns the number of
  /// zeros from the least significant bit to the first one bit.
  unsigned countr_zero() const {
    if (isSingleWord()) {
      unsigned TrailingZeros = llvm::countr_zero(U.VAL);
      return (TrailingZeros > BitWidth ? BitWidth : TrailingZeros);
    }
    return countTrailingZerosSlowCase();
  }

  unsigned countTrailingZeros() const { return countr_zero(); }

  /// Count the number of trailing one bits.
  ///
  /// This function is an APInt version of std::countr_one. It counts the number
  /// of ones from the least significant bit to the first zero bit.
  ///
  /// \returns BitWidth if the value is all ones, otherwise returns the number
  /// of ones from the least significant bit to the first zero bit.
  unsigned countr_one() const {
    if (isSingleWord())
      return llvm::countr_one(U.VAL);
    return countTrailingOnesSlowCase();
  }

  unsigned countTrailingOnes() const { return countr_one(); }

  /// Count the number of bits set.
  ///
  /// This function is an APInt version of std::popcount. It counts the number
  /// of 1 bits in the APInt value.
  ///
  /// \returns 0 if the value is zero, otherwise returns the number of set bits.
  unsigned popcount() const {
    if (isSingleWord())
      return llvm::popcount(U.VAL);
    return countPopulationSlowCase();
  }

  /// @}
  /// \name Conversion Functions
  /// @{
  void print(raw_ostream &OS, bool isSigned) const;

  /// Converts an APInt to a string and append it to Str.  Str is commonly a
  /// SmallString. If Radix > 10, UpperCase determine the case of letter
  /// digits.
  void toString(SmallVectorImpl<char> &Str, unsigned Radix, bool Signed,
                bool formatAsCLiteral = false, bool UpperCase = true) const;

  /// Considers the APInt to be unsigned and converts it into a string in the
  /// radix given. The radix can be 2, 8, 10 16, or 36.
  void toStringUnsigned(SmallVectorImpl<char> &Str, unsigned Radix = 10) const {
    toString(Str, Radix, false, false);
  }

  /// Considers the APInt to be signed and converts it into a string in the
  /// radix given. The radix can be 2, 8, 10, 16, or 36.
  void toStringSigned(SmallVectorImpl<char> &Str, unsigned Radix = 10) const {
    toString(Str, Radix, true, false);
  }

  /// \returns a byte-swapped representation of this APInt Value.
  APInt byteSwap() const;

  /// \returns the value with the bit representation reversed of this APInt
  /// Value.
  APInt reverseBits() const;

  /// Converts this APInt to a double value.
  double roundToDouble(bool isSigned) const;

  /// Converts this unsigned APInt to a double value.
  double roundToDouble() const { return roundToDouble(false); }

  /// Converts this signed APInt to a double value.
  double signedRoundToDouble() const { return roundToDouble(true); }

  /// Converts APInt bits to a double
  ///
  /// The conversion does not do a translation from integer to double, it just
  /// re-interprets the bits as a double. Note that it is valid to do this on
  /// any bit width. Exactly 64 bits will be translated.
  double bitsToDouble() const { return llvm::bit_cast<double>(getWord(0)); }

  /// Converts APInt bits to a float
  ///
  /// The conversion does not do a translation from integer to float, it just
  /// re-interprets the bits as a float. Note that it is valid to do this on
  /// any bit width. Exactly 32 bits will be translated.
  float bitsToFloat() const {
    return llvm::bit_cast<float>(static_cast<uint32_t>(getWord(0)));
  }

  /// Converts a double to APInt bits.
  ///
  /// The conversion does not do a translation from double to integer, it just
  /// re-interprets the bits of the double.
  static APInt doubleToBits(double V) {
    return APInt(sizeof(double) * CHAR_BIT, llvm::bit_cast<uint64_t>(V));
  }

  /// Converts a float to APInt bits.
  ///
  /// The conversion does not do a translation from float to integer, it just
  /// re-interprets the bits of the float.
  static APInt floatToBits(float V) {
    return APInt(sizeof(float) * CHAR_BIT, llvm::bit_cast<uint32_t>(V));
  }

  /// @}
  /// \name Mathematics Operations
  /// @{

  /// \returns the floor log base 2 of this APInt.
  unsigned logBase2() const { return getActiveBits() - 1; }

  /// \returns the ceil log base 2 of this APInt.
  unsigned ceilLogBase2() const {
    APInt temp(*this);
    --temp;
    return temp.getActiveBits();
  }

  /// \returns the nearest log base 2 of this APInt. Ties round up.
  ///
  /// NOTE: When we have a BitWidth of 1, we define:
  ///
  ///   log2(0) = UINT32_MAX
  ///   log2(1) = 0
  ///
  /// to get around any mathematical concerns resulting from
  /// referencing 2 in a space where 2 does no exist.
  unsigned nearestLogBase2() const;

  /// \returns the log base 2 of this APInt if its an exact power of two, -1
  /// otherwise
  int32_t exactLogBase2() const {
    if (!isPowerOf2())
      return -1;
    return logBase2();
  }

  /// Compute the square root.
  APInt sqrt() const;

  /// Get the absolute value.  If *this is < 0 then return -(*this), otherwise
  /// *this.  Note that the "most negative" signed number (e.g. -128 for 8 bit
  /// wide APInt) is unchanged due to how negation works.
  APInt abs() const {
    if (isNegative())
      return -(*this);
    return *this;
  }

  /// \returns the multiplicative inverse for a given modulo.
  APInt multiplicativeInverse(const APInt &modulo) const;

  /// @}
  /// \name Building-block Operations for APInt and APFloat
  /// @{

  // These building block operations operate on a representation of arbitrary
  // precision, two's-complement, bignum integer values. They should be
  // sufficient to implement APInt and APFloat bignum requirements. Inputs are
  // generally a pointer to the base of an array of integer parts, representing
  // an unsigned bignum, and a count of how many parts there are.

  /// Sets the least significant part of a bignum to the input value, and zeroes
  /// out higher parts.
  static void tcSet(WordType *, WordType, unsigned);

  /// Assign one bignum to another.
  static void tcAssign(WordType *, const WordType *, unsigned);

  /// Returns true if a bignum is zero, false otherwise.
  static bool tcIsZero(const WordType *, unsigned);

  /// Extract the given bit of a bignum; returns 0 or 1.  Zero-based.
  static int tcExtractBit(const WordType *, unsigned bit);

  /// Copy the bit vector of width srcBITS from SRC, starting at bit srcLSB, to
  /// DST, of dstCOUNT parts, such that the bit srcLSB becomes the least
  /// significant bit of DST.  All high bits above srcBITS in DST are
  /// zero-filled.
  static void tcExtract(WordType *, unsigned dstCount, const WordType *,
                        unsigned srcBits, unsigned srcLSB);

  /// Set the given bit of a bignum.  Zero-based.
  static void tcSetBit(WordType *, unsigned bit);

  /// Clear the given bit of a bignum.  Zero-based.
  static void tcClearBit(WordType *, unsigned bit);

  /// Returns the bit number of the least or most significant set bit of a
  /// number.  If the input number has no bits set -1U is returned.
  static unsigned tcLSB(const WordType *, unsigned n);
  static unsigned tcMSB(const WordType *parts, unsigned n);

  /// Negate a bignum in-place.
  static void tcNegate(WordType *, unsigned);

  /// DST += RHS + CARRY where CARRY is zero or one.  Returns the carry flag.
  static WordType tcAdd(WordType *, const WordType *, WordType carry, unsigned);
  /// DST += RHS.  Returns the carry flag.
  static WordType tcAddPart(WordType *, WordType, unsigned);

  /// DST -= RHS + CARRY where CARRY is zero or one. Returns the carry flag.
  static WordType tcSubtract(WordType *, const WordType *, WordType carry,
                             unsigned);
  /// DST -= RHS.  Returns the carry flag.
  static WordType tcSubtractPart(WordType *, WordType, unsigned);

  /// DST += SRC * MULTIPLIER + PART   if add is true
  /// DST  = SRC * MULTIPLIER + PART   if add is false
  ///
  /// Requires 0 <= DSTPARTS <= SRCPARTS + 1.  If DST overlaps SRC they must
  /// start at the same point, i.e. DST == SRC.
  ///
  /// If DSTPARTS == SRC_PARTS + 1 no overflow occurs and zero is returned.
  /// Otherwise DST is filled with the least significant DSTPARTS parts of the
  /// result, and if all of the omitted higher parts were zero return zero,
  /// otherwise overflow occurred and return one.
  static int tcMultiplyPart(WordType *dst, const WordType *src,
                            WordType multiplier, WordType carry,
                            unsigned srcParts, unsigned dstParts, bool add);

  /// DST = LHS * RHS, where DST has the same width as the operands and is
  /// filled with the least significant parts of the result.  Returns one if
  /// overflow occurred, otherwise zero.  DST must be disjoint from both
  /// operands.
  static int tcMultiply(WordType *, const WordType *, const WordType *,
                        unsigned);

  /// DST = LHS * RHS, where DST has width the sum of the widths of the
  /// operands. No overflow occurs. DST must be disjoint from both operands.
  static void tcFullMultiply(WordType *, const WordType *, const WordType *,
                             unsigned, unsigned);

  /// If RHS is zero LHS and REMAINDER are left unchanged, return one.
  /// Otherwise set LHS to LHS / RHS with the fractional part discarded, set
  /// REMAINDER to the remainder, return zero.  i.e.
  ///
  ///  OLD_LHS = RHS * LHS + REMAINDER
  ///
  /// SCRATCH is a bignum of the same size as the operands and result for use by
  /// the routine; its contents need not be initialized and are destroyed.  LHS,
  /// REMAINDER and SCRATCH must be distinct.
  static int tcDivide(WordType *lhs, const WordType *rhs, WordType *remainder,
                      WordType *scratch, unsigned parts);

  /// Shift a bignum left Count bits. Shifted in bits are zero. There are no
  /// restrictions on Count.
  static void tcShiftLeft(WordType *, unsigned Words, unsigned Count);

  /// Shift a bignum right Count bits.  Shifted in bits are zero.  There are no
  /// restrictions on Count.
  static void tcShiftRight(WordType *, unsigned Words, unsigned Count);

  /// Comparison (unsigned) of two bignums.
  static int tcCompare(const WordType *, const WordType *, unsigned);

  /// Increment a bignum in-place.  Return the carry flag.
  static WordType tcIncrement(WordType *dst, unsigned parts) {
    return tcAddPart(dst, 1, parts);
  }

  /// Decrement a bignum in-place.  Return the borrow flag.
  static WordType tcDecrement(WordType *dst, unsigned parts) {
    return tcSubtractPart(dst, 1, parts);
  }

  /// Used to insert APInt objects, or objects that contain APInt objects, into
  ///  FoldingSets.
  void Profile(FoldingSetNodeID &id) const;

  /// debug method
  void dump() const;

  /// Returns whether this instance allocated memory.
  bool needsCleanup() const { return !isSingleWord(); }

private:
  /// This union is used to store the integer value. When the
  /// integer bit-width <= 64, it uses VAL, otherwise it uses pVal.
  union {
    uint64_t VAL;   ///< Used to store the <= 64 bits integer value.
    uint64_t *pVal; ///< Used to store the >64 bits integer value.
  } U;

  unsigned BitWidth = 1; ///< The number of bits in this APInt.

  friend struct DenseMapInfo<APInt, void>;
  friend class APSInt;

  /// This constructor is used only internally for speed of construction of
  /// temporaries. It is unsafe since it takes ownership of the pointer, so it
  /// is not public.
  APInt(uint64_t *val, unsigned bits) : BitWidth(bits) { U.pVal = val; }

  /// Determine which word a bit is in.
  ///
  /// \returns the word position for the specified bit position.
  static unsigned whichWord(unsigned bitPosition) {
    return bitPosition / APINT_BITS_PER_WORD;
  }

  /// Determine which bit in a word the specified bit position is in.
  static unsigned whichBit(unsigned bitPosition) {
    return bitPosition % APINT_BITS_PER_WORD;
  }

  /// Get a single bit mask.
  ///
  /// \returns a uint64_t with only bit at "whichBit(bitPosition)" set
  /// This method generates and returns a uint64_t (word) mask for a single
  /// bit at a specific bit position. This is used to mask the bit in the
  /// corresponding word.
  static uint64_t maskBit(unsigned bitPosition) {
    return 1ULL << whichBit(bitPosition);
  }

  /// Clear unused high order bits
  ///
  /// This method is used internally to clear the top "N" bits in the high order
  /// word that are not used by the APInt. This is needed after the most
  /// significant word is assigned a value to ensure that those bits are
  /// zero'd out.
  APInt &clearUnusedBits() {
    // Compute how many bits are used in the final word.
    unsigned WordBits = ((BitWidth - 1) % APINT_BITS_PER_WORD) + 1;

    // Mask out the high bits.
    uint64_t mask = WORDTYPE_MAX >> (APINT_BITS_PER_WORD - WordBits);
    if (LLVM_UNLIKELY(BitWidth == 0))
      mask = 0;

    if (isSingleWord())
      U.VAL &= mask;
    else
      U.pVal[getNumWords() - 1] &= mask;
    return *this;
  }

  /// Get the word corresponding to a bit position
  /// \returns the corresponding word for the specified bit position.
  uint64_t getWord(unsigned bitPosition) const {
    return isSingleWord() ? U.VAL : U.pVal[whichWord(bitPosition)];
  }

  /// Utility method to change the bit width of this APInt to new bit width,
  /// allocating and/or deallocating as necessary. There is no guarantee on the
  /// value of any bits upon return. Caller should populate the bits after.
  void reallocate(unsigned NewBitWidth);

  /// Convert a char array into an APInt
  ///
  /// \param radix 2, 8, 10, 16, or 36
  /// Converts a string into a number.  The string must be non-empty
  /// and well-formed as a number of the given base. The bit-width
  /// must be sufficient to hold the result.
  ///
  /// This is used by the constructors that take string arguments.
  ///
  /// StringRef::getAsInteger is superficially similar but (1) does
  /// not assume that the string is well-formed and (2) grows the
  /// result to hold the input.
  void fromString(unsigned numBits, StringRef str, uint8_t radix);

  /// An internal division function for dividing APInts.
  ///
  /// This is used by the toString method to divide by the radix. It simply
  /// provides a more convenient form of divide for internal use since KnuthDiv
  /// has specific constraints on its inputs. If those constraints are not met
  /// then it provides a simpler form of divide.
  static void divide(const WordType *LHS, unsigned lhsWords,
                     const WordType *RHS, unsigned rhsWords, WordType *Quotient,
                     WordType *Remainder);

  /// out-of-line slow case for inline constructor
  void initSlowCase(uint64_t val, bool isSigned);

  /// shared code between two array constructors
  void initFromArray(ArrayRef<uint64_t> array);

  /// out-of-line slow case for inline copy constructor
  void initSlowCase(const APInt &that);

  /// out-of-line slow case for shl
  void shlSlowCase(unsigned ShiftAmt);

  /// out-of-line slow case for lshr.
  void lshrSlowCase(unsigned ShiftAmt);

  /// out-of-line slow case for ashr.
  void ashrSlowCase(unsigned ShiftAmt);

  /// out-of-line slow case for operator=
  void assignSlowCase(const APInt &RHS);

  /// out-of-line slow case for operator==
  bool equalSlowCase(const APInt &RHS) const LLVM_READONLY;

  /// out-of-line slow case for countLeadingZeros
  unsigned countLeadingZerosSlowCase() const LLVM_READONLY;

  /// out-of-line slow case for countLeadingOnes.
  unsigned countLeadingOnesSlowCase() const LLVM_READONLY;

  /// out-of-line slow case for countTrailingZeros.
  unsigned countTrailingZerosSlowCase() const LLVM_READONLY;

  /// out-of-line slow case for countTrailingOnes
  unsigned countTrailingOnesSlowCase() const LLVM_READONLY;

  /// out-of-line slow case for countPopulation
  unsigned countPopulationSlowCase() const LLVM_READONLY;

  /// out-of-line slow case for intersects.
  bool intersectsSlowCase(const APInt &RHS) const LLVM_READONLY;

  /// out-of-line slow case for isSubsetOf.
  bool isSubsetOfSlowCase(const APInt &RHS) const LLVM_READONLY;

  /// out-of-line slow case for setBits.
  void setBitsSlowCase(unsigned loBit, unsigned hiBit);

  /// out-of-line slow case for flipAllBits.
  void flipAllBitsSlowCase();

  /// out-of-line slow case for concat.
  APInt concatSlowCase(const APInt &NewLSB) const;

  /// out-of-line slow case for operator&=.
  void andAssignSlowCase(const APInt &RHS);

  /// out-of-line slow case for operator|=.
  void orAssignSlowCase(const APInt &RHS);

  /// out-of-line slow case for operator^=.
  void xorAssignSlowCase(const APInt &RHS);

  /// Unsigned comparison. Returns -1, 0, or 1 if this APInt is less than, equal
  /// to, or greater than RHS.
  int compare(const APInt &RHS) const LLVM_READONLY;

  /// Signed comparison. Returns -1, 0, or 1 if this APInt is less than, equal
  /// to, or greater than RHS.
  int compareSigned(const APInt &RHS) const LLVM_READONLY;

  /// @}
};

inline bool operator==(uint64_t V1, const APInt &V2) { return V2 == V1; }

inline bool operator!=(uint64_t V1, const APInt &V2) { return V2 != V1; }

/// Unary bitwise complement operator.
///
/// \returns an APInt that is the bitwise complement of \p v.
inline APInt operator~(APInt v) {
  v.flipAllBits();
  return v;
}

inline APInt operator&(APInt a, const APInt &b) {
  a &= b;
  return a;
}

inline APInt operator&(const APInt &a, APInt &&b) {
  b &= a;
  return std::move(b);
}

inline APInt operator&(APInt a, uint64_t RHS) {
  a &= RHS;
  return a;
}

inline APInt operator&(uint64_t LHS, APInt b) {
  b &= LHS;
  return b;
}

inline APInt operator|(APInt a, const APInt &b) {
  a |= b;
  return a;
}

inline APInt operator|(const APInt &a, APInt &&b) {
  b |= a;
  return std::move(b);
}

inline APInt operator|(APInt a, uint64_t RHS) {
  a |= RHS;
  return a;
}

inline APInt operator|(uint64_t LHS, APInt b) {
  b |= LHS;
  return b;
}

inline APInt operator^(APInt a, const APInt &b) {
  a ^= b;
  return a;
}

inline APInt operator^(const APInt &a, APInt &&b) {
  b ^= a;
  return std::move(b);
}

inline APInt operator^(APInt a, uint64_t RHS) {
  a ^= RHS;
  return a;
}

inline APInt operator^(uint64_t LHS, APInt b) {
  b ^= LHS;
  return b;
}

inline raw_ostream &operator<<(raw_ostream &OS, const APInt &I) {
  I.print(OS, true);
  return OS;
}

inline APInt operator-(APInt v) {
  v.negate();
  return v;
}

inline APInt operator+(APInt a, const APInt &b) {
  a += b;
  return a;
}

inline APInt operator+(const APInt &a, APInt &&b) {
  b += a;
  return std::move(b);
}

inline APInt operator+(APInt a, uint64_t RHS) {
  a += RHS;
  return a;
}

inline APInt operator+(uint64_t LHS, APInt b) {
  b += LHS;
  return b;
}

inline APInt operator-(APInt a, const APInt &b) {
  a -= b;
  return a;
}

inline APInt operator-(const APInt &a, APInt &&b) {
  b.negate();
  b += a;
  return std::move(b);
}

inline APInt operator-(APInt a, uint64_t RHS) {
  a -= RHS;
  return a;
}

inline APInt operator-(uint64_t LHS, APInt b) {
  b.negate();
  b += LHS;
  return b;
}

inline APInt operator*(APInt a, uint64_t RHS) {
  a *= RHS;
  return a;
}

inline APInt operator*(uint64_t LHS, APInt b) {
  b *= LHS;
  return b;
}

namespace APIntOps {

/// Determine the smaller of two APInts considered to be signed.
inline const APInt &smin(const APInt &A, const APInt &B) {
  return A.slt(B) ? A : B;
}

/// Determine the larger of two APInts considered to be signed.
inline const APInt &smax(const APInt &A, const APInt &B) {
  return A.sgt(B) ? A : B;
}

/// Determine the smaller of two APInts considered to be unsigned.
inline const APInt &umin(const APInt &A, const APInt &B) {
  return A.ult(B) ? A : B;
}

/// Determine the larger of two APInts considered to be unsigned.
inline const APInt &umax(const APInt &A, const APInt &B) {
  return A.ugt(B) ? A : B;
}

/// Compute GCD of two unsigned APInt values.
///
/// This function returns the greatest common divisor of the two APInt values
/// using Stein's algorithm.
///
/// \returns the greatest common divisor of A and B.
APInt GreatestCommonDivisor(APInt A, APInt B);

/// Converts the given APInt to a double value.
///
/// Treats the APInt as an unsigned value for conversion purposes.
inline double RoundAPIntToDouble(const APInt &APIVal) {
  return APIVal.roundToDouble();
}

/// Converts the given APInt to a double value.
///
/// Treats the APInt as a signed value for conversion purposes.
inline double RoundSignedAPIntToDouble(const APInt &APIVal) {
  return APIVal.signedRoundToDouble();
}

/// Converts the given APInt to a float value.
inline float RoundAPIntToFloat(const APInt &APIVal) {
  return float(RoundAPIntToDouble(APIVal));
}

/// Converts the given APInt to a float value.
///
/// Treats the APInt as a signed value for conversion purposes.
inline float RoundSignedAPIntToFloat(const APInt &APIVal) {
  return float(APIVal.signedRoundToDouble());
}

/// Converts the given double value into a APInt.
///
/// This function convert a double value to an APInt value.
APInt RoundDoubleToAPInt(double Double, unsigned width);

/// Converts a float value into a APInt.
///
/// Converts a float value into an APInt value.
inline APInt RoundFloatToAPInt(float Float, unsigned width) {
  return RoundDoubleToAPInt(double(Float), width);
}

/// Return A unsign-divided by B, rounded by the given rounding mode.
APInt RoundingUDiv(const APInt &A, const APInt &B, APInt::Rounding RM);

/// Return A sign-divided by B, rounded by the given rounding mode.
APInt RoundingSDiv(const APInt &A, const APInt &B, APInt::Rounding RM);

/// Let q(n) = An^2 + Bn + C, and BW = bit width of the value range
/// (e.g. 32 for i32).
/// This function finds the smallest number n, such that
/// (a) n >= 0 and q(n) = 0, or
/// (b) n >= 1 and q(n-1) and q(n), when evaluated in the set of all
///     integers, belong to two different intervals [Rk, Rk+R),
///     where R = 2^BW, and k is an integer.
/// The idea here is to find when q(n) "overflows" 2^BW, while at the
/// same time "allowing" subtraction. In unsigned modulo arithmetic a
/// subtraction (treated as addition of negated numbers) would always
/// count as an overflow, but here we want to allow values to decrease
/// and increase as long as they are within the same interval.
/// Specifically, adding of two negative numbers should not cause an
/// overflow (as long as the magnitude does not exceed the bit width).
/// On the other hand, given a positive number, adding a negative
/// number to it can give a negative result, which would cause the
/// value to go from [-2^BW, 0) to [0, 2^BW). In that sense, zero is
/// treated as a special case of an overflow.
///
/// This function returns std::nullopt if after finding k that minimizes the
/// positive solution to q(n) = kR, both solutions are contained between
/// two consecutive integers.
///
/// There are cases where q(n) > T, and q(n+1) < T (assuming evaluation
/// in arithmetic modulo 2^BW, and treating the values as signed) by the
/// virtue of *signed* overflow. This function will *not* find such an n,
/// however it may find a value of n satisfying the inequalities due to
/// an *unsigned* overflow (if the values are treated as unsigned).
/// To find a solution for a signed overflow, treat it as a problem of
/// finding an unsigned overflow with a range with of BW-1.
///
/// The returned value may have a different bit width from the input
/// coefficients.
std::optional<APInt> SolveQuadraticEquationWrap(APInt A, APInt B, APInt C,
                                                unsigned RangeWidth);

/// Compare two values, and if they are different, return the position of the
/// most significant bit that is different in the values.
std::optional<unsigned> GetMostSignificantDifferentBit(const APInt &A,
                                                       const APInt &B);

/// Splat/Merge neighboring bits to widen/narrow the bitmask represented
/// by \param A to \param NewBitWidth bits.
///
/// MatchAnyBits: (Default)
/// e.g. ScaleBitMask(0b0101, 8) -> 0b00110011
/// e.g. ScaleBitMask(0b00011011, 4) -> 0b0111
///
/// MatchAllBits:
/// e.g. ScaleBitMask(0b0101, 8) -> 0b00110011
/// e.g. ScaleBitMask(0b00011011, 4) -> 0b0001
/// A.getBitwidth() or NewBitWidth must be a whole multiples of the other.
APInt ScaleBitMask(const APInt &A, unsigned NewBitWidth,
                   bool MatchAllBits = false);
} // namespace APIntOps

// See friend declaration above. This additional declaration is required in
// order to compile LLVM with IBM xlC compiler.
hash_code hash_value(const APInt &Arg);

/// StoreIntToMemory - Fills the StoreBytes bytes of memory starting from Dst
/// with the integer held in IntVal.
void StoreIntToMemory(const APInt &IntVal, uint8_t *Dst, unsigned StoreBytes);

/// LoadIntFromMemory - Loads the integer stored in the LoadBytes bytes starting
/// from Src into IntVal, which is assumed to be wide enough and to hold zero.
void LoadIntFromMemory(APInt &IntVal, const uint8_t *Src, unsigned LoadBytes);

/// Provide DenseMapInfo for APInt.
template <> struct DenseMapInfo<APInt, void> {
  static inline APInt getEmptyKey() {
    APInt V(nullptr, 0);
    V.U.VAL = ~0ULL;
    return V;
  }

  static inline APInt getTombstoneKey() {
    APInt V(nullptr, 0);
    V.U.VAL = ~1ULL;
    return V;
  }

  static unsigned getHashValue(const APInt &Key);

  static bool isEqual(const APInt &LHS, const APInt &RHS) {
    return LHS.getBitWidth() == RHS.getBitWidth() && LHS == RHS;
  }
};

} // namespace llvm

#endif
PKiwFZ�,�IIADT/Twine.hnu�[���//===- Twine.h - Fast Temporary String Concatenation ------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_TWINE_H
#define LLVM_ADT_TWINE_H

#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/ErrorHandling.h"
#include <cassert>
#include <cstdint>
#include <string>
#include <string_view>

namespace llvm {

  class formatv_object_base;
  class raw_ostream;

  /// Twine - A lightweight data structure for efficiently representing the
  /// concatenation of temporary values as strings.
  ///
  /// A Twine is a kind of rope, it represents a concatenated string using a
  /// binary-tree, where the string is the preorder of the nodes. Since the
  /// Twine can be efficiently rendered into a buffer when its result is used,
  /// it avoids the cost of generating temporary values for intermediate string
  /// results -- particularly in cases when the Twine result is never
  /// required. By explicitly tracking the type of leaf nodes, we can also avoid
  /// the creation of temporary strings for conversions operations (such as
  /// appending an integer to a string).
  ///
  /// A Twine is not intended for use directly and should not be stored, its
  /// implementation relies on the ability to store pointers to temporary stack
  /// objects which may be deallocated at the end of a statement. Twines should
  /// only be used accepted as const references in arguments, when an API wishes
  /// to accept possibly-concatenated strings.
  ///
  /// Twines support a special 'null' value, which always concatenates to form
  /// itself, and renders as an empty string. This can be returned from APIs to
  /// effectively nullify any concatenations performed on the result.
  ///
  /// \b Implementation
  ///
  /// Given the nature of a Twine, it is not possible for the Twine's
  /// concatenation method to construct interior nodes; the result must be
  /// represented inside the returned value. For this reason a Twine object
  /// actually holds two values, the left- and right-hand sides of a
  /// concatenation. We also have nullary Twine objects, which are effectively
  /// sentinel values that represent empty strings.
  ///
  /// Thus, a Twine can effectively have zero, one, or two children. The \see
  /// isNullary(), \see isUnary(), and \see isBinary() predicates exist for
  /// testing the number of children.
  ///
  /// We maintain a number of invariants on Twine objects (FIXME: Why):
  ///  - Nullary twines are always represented with their Kind on the left-hand
  ///    side, and the Empty kind on the right-hand side.
  ///  - Unary twines are always represented with the value on the left-hand
  ///    side, and the Empty kind on the right-hand side.
  ///  - If a Twine has another Twine as a child, that child should always be
  ///    binary (otherwise it could have been folded into the parent).
  ///
  /// These invariants are check by \see isValid().
  ///
  /// \b Efficiency Considerations
  ///
  /// The Twine is designed to yield efficient and small code for common
  /// situations. For this reason, the concat() method is inlined so that
  /// concatenations of leaf nodes can be optimized into stores directly into a
  /// single stack allocated object.
  ///
  /// In practice, not all compilers can be trusted to optimize concat() fully,
  /// so we provide two additional methods (and accompanying operator+
  /// overloads) to guarantee that particularly important cases (cstring plus
  /// StringRef) codegen as desired.
  class Twine {
    /// NodeKind - Represent the type of an argument.
    enum NodeKind : unsigned char {
      /// An empty string; the result of concatenating anything with it is also
      /// empty.
      NullKind,

      /// The empty string.
      EmptyKind,

      /// A pointer to a Twine instance.
      TwineKind,

      /// A pointer to a C string instance.
      CStringKind,

      /// A pointer to an std::string instance.
      StdStringKind,

      /// A Pointer and Length representation. Used for std::string_view,
      /// StringRef, and SmallString.  Can't use a StringRef here
      /// because they are not trivally constructible.
      PtrAndLengthKind,

      /// A pointer to a formatv_object_base instance.
      FormatvObjectKind,

      /// A char value, to render as a character.
      CharKind,

      /// An unsigned int value, to render as an unsigned decimal integer.
      DecUIKind,

      /// An int value, to render as a signed decimal integer.
      DecIKind,

      /// A pointer to an unsigned long value, to render as an unsigned decimal
      /// integer.
      DecULKind,

      /// A pointer to a long value, to render as a signed decimal integer.
      DecLKind,

      /// A pointer to an unsigned long long value, to render as an unsigned
      /// decimal integer.
      DecULLKind,

      /// A pointer to a long long value, to render as a signed decimal integer.
      DecLLKind,

      /// A pointer to a uint64_t value, to render as an unsigned hexadecimal
      /// integer.
      UHexKind
    };

    union Child
    {
      const Twine *twine;
      const char *cString;
      const std::string *stdString;
      struct {
        const char *ptr;
        size_t length;
      } ptrAndLength;
      const formatv_object_base *formatvObject;
      char character;
      unsigned int decUI;
      int decI;
      const unsigned long *decUL;
      const long *decL;
      const unsigned long long *decULL;
      const long long *decLL;
      const uint64_t *uHex;
    };

    /// LHS - The prefix in the concatenation, which may be uninitialized for
    /// Null or Empty kinds.
    Child LHS;

    /// RHS - The suffix in the concatenation, which may be uninitialized for
    /// Null or Empty kinds.
    Child RHS;

    /// LHSKind - The NodeKind of the left hand side, \see getLHSKind().
    NodeKind LHSKind = EmptyKind;

    /// RHSKind - The NodeKind of the right hand side, \see getRHSKind().
    NodeKind RHSKind = EmptyKind;

    /// Construct a nullary twine; the kind must be NullKind or EmptyKind.
    explicit Twine(NodeKind Kind) : LHSKind(Kind) {
      assert(isNullary() && "Invalid kind!");
    }

    /// Construct a binary twine.
    explicit Twine(const Twine &LHS, const Twine &RHS)
        : LHSKind(TwineKind), RHSKind(TwineKind) {
      this->LHS.twine = &LHS;
      this->RHS.twine = &RHS;
      assert(isValid() && "Invalid twine!");
    }

    /// Construct a twine from explicit values.
    explicit Twine(Child LHS, NodeKind LHSKind, Child RHS, NodeKind RHSKind)
        : LHS(LHS), RHS(RHS), LHSKind(LHSKind), RHSKind(RHSKind) {
      assert(isValid() && "Invalid twine!");
    }

    /// Check for the null twine.
    bool isNull() const {
      return getLHSKind() == NullKind;
    }

    /// Check for the empty twine.
    bool isEmpty() const {
      return getLHSKind() == EmptyKind;
    }

    /// Check if this is a nullary twine (null or empty).
    bool isNullary() const {
      return isNull() || isEmpty();
    }

    /// Check if this is a unary twine.
    bool isUnary() const {
      return getRHSKind() == EmptyKind && !isNullary();
    }

    /// Check if this is a binary twine.
    bool isBinary() const {
      return getLHSKind() != NullKind && getRHSKind() != EmptyKind;
    }

    /// Check if this is a valid twine (satisfying the invariants on
    /// order and number of arguments).
    bool isValid() const {
      // Nullary twines always have Empty on the RHS.
      if (isNullary() && getRHSKind() != EmptyKind)
        return false;

      // Null should never appear on the RHS.
      if (getRHSKind() == NullKind)
        return false;

      // The RHS cannot be non-empty if the LHS is empty.
      if (getRHSKind() != EmptyKind && getLHSKind() == EmptyKind)
        return false;

      // A twine child should always be binary.
      if (getLHSKind() == TwineKind &&
          !LHS.twine->isBinary())
        return false;
      if (getRHSKind() == TwineKind &&
          !RHS.twine->isBinary())
        return false;

      return true;
    }

    /// Get the NodeKind of the left-hand side.
    NodeKind getLHSKind() const { return LHSKind; }

    /// Get the NodeKind of the right-hand side.
    NodeKind getRHSKind() const { return RHSKind; }

    /// Print one child from a twine.
    void printOneChild(raw_ostream &OS, Child Ptr, NodeKind Kind) const;

    /// Print the representation of one child from a twine.
    void printOneChildRepr(raw_ostream &OS, Child Ptr,
                           NodeKind Kind) const;

  public:
    /// @name Constructors
    /// @{

    /// Construct from an empty string.
    /*implicit*/ Twine() {
      assert(isValid() && "Invalid twine!");
    }

    Twine(const Twine &) = default;

    /// Construct from a C string.
    ///
    /// We take care here to optimize "" into the empty twine -- this will be
    /// optimized out for string constants. This allows Twine arguments have
    /// default "" values, without introducing unnecessary string constants.
    /*implicit*/ Twine(const char *Str) {
      if (Str[0] != '\0') {
        LHS.cString = Str;
        LHSKind = CStringKind;
      } else
        LHSKind = EmptyKind;

      assert(isValid() && "Invalid twine!");
    }
    /// Delete the implicit conversion from nullptr as Twine(const char *)
    /// cannot take nullptr.
    /*implicit*/ Twine(std::nullptr_t) = delete;

    /// Construct from an std::string.
    /*implicit*/ Twine(const std::string &Str) : LHSKind(StdStringKind) {
      LHS.stdString = &Str;
      assert(isValid() && "Invalid twine!");
    }

    /// Construct from an std::string_view by converting it to a pointer and
    /// length.  This handles string_views on a pure API basis, and avoids
    /// storing one (or a pointer to one) inside a Twine, which avoids problems
    /// when mixing code compiled under various C++ standards.
    /*implicit*/ Twine(const std::string_view &Str)
        : LHSKind(PtrAndLengthKind) {
      LHS.ptrAndLength.ptr = Str.data();
      LHS.ptrAndLength.length = Str.length();
      assert(isValid() && "Invalid twine!");
    }

    /// Construct from a StringRef.
    /*implicit*/ Twine(const StringRef &Str) : LHSKind(PtrAndLengthKind) {
      LHS.ptrAndLength.ptr = Str.data();
      LHS.ptrAndLength.length = Str.size();
      assert(isValid() && "Invalid twine!");
    }

    /// Construct from a SmallString.
    /*implicit*/ Twine(const SmallVectorImpl<char> &Str)
        : LHSKind(PtrAndLengthKind) {
      LHS.ptrAndLength.ptr = Str.data();
      LHS.ptrAndLength.length = Str.size();
      assert(isValid() && "Invalid twine!");
    }

    /// Construct from a formatv_object_base.
    /*implicit*/ Twine(const formatv_object_base &Fmt)
        : LHSKind(FormatvObjectKind) {
      LHS.formatvObject = &Fmt;
      assert(isValid() && "Invalid twine!");
    }

    /// Construct from a char.
    explicit Twine(char Val) : LHSKind(CharKind) {
      LHS.character = Val;
    }

    /// Construct from a signed char.
    explicit Twine(signed char Val) : LHSKind(CharKind) {
      LHS.character = static_cast<char>(Val);
    }

    /// Construct from an unsigned char.
    explicit Twine(unsigned char Val) : LHSKind(CharKind) {
      LHS.character = static_cast<char>(Val);
    }

    /// Construct a twine to print \p Val as an unsigned decimal integer.
    explicit Twine(unsigned Val) : LHSKind(DecUIKind) {
      LHS.decUI = Val;
    }

    /// Construct a twine to print \p Val as a signed decimal integer.
    explicit Twine(int Val) : LHSKind(DecIKind) {
      LHS.decI = Val;
    }

    /// Construct a twine to print \p Val as an unsigned decimal integer.
    explicit Twine(const unsigned long &Val) : LHSKind(DecULKind) {
      LHS.decUL = &Val;
    }

    /// Construct a twine to print \p Val as a signed decimal integer.
    explicit Twine(const long &Val) : LHSKind(DecLKind) {
      LHS.decL = &Val;
    }

    /// Construct a twine to print \p Val as an unsigned decimal integer.
    explicit Twine(const unsigned long long &Val) : LHSKind(DecULLKind) {
      LHS.decULL = &Val;
    }

    /// Construct a twine to print \p Val as a signed decimal integer.
    explicit Twine(const long long &Val) : LHSKind(DecLLKind) {
      LHS.decLL = &Val;
    }

    // FIXME: Unfortunately, to make sure this is as efficient as possible we
    // need extra binary constructors from particular types. We can't rely on
    // the compiler to be smart enough to fold operator+()/concat() down to the
    // right thing. Yet.

    /// Construct as the concatenation of a C string and a StringRef.
    /*implicit*/ Twine(const char *LHS, const StringRef &RHS)
        : LHSKind(CStringKind), RHSKind(PtrAndLengthKind) {
      this->LHS.cString = LHS;
      this->RHS.ptrAndLength.ptr = RHS.data();
      this->RHS.ptrAndLength.length = RHS.size();
      assert(isValid() && "Invalid twine!");
    }

    /// Construct as the concatenation of a StringRef and a C string.
    /*implicit*/ Twine(const StringRef &LHS, const char *RHS)
        : LHSKind(PtrAndLengthKind), RHSKind(CStringKind) {
      this->LHS.ptrAndLength.ptr = LHS.data();
      this->LHS.ptrAndLength.length = LHS.size();
      this->RHS.cString = RHS;
      assert(isValid() && "Invalid twine!");
    }

    /// Since the intended use of twines is as temporary objects, assignments
    /// when concatenating might cause undefined behavior or stack corruptions
    Twine &operator=(const Twine &) = delete;

    /// Create a 'null' string, which is an empty string that always
    /// concatenates to form another empty string.
    static Twine createNull() {
      return Twine(NullKind);
    }

    /// @}
    /// @name Numeric Conversions
    /// @{

    // Construct a twine to print \p Val as an unsigned hexadecimal integer.
    static Twine utohexstr(const uint64_t &Val) {
      Child LHS, RHS;
      LHS.uHex = &Val;
      RHS.twine = nullptr;
      return Twine(LHS, UHexKind, RHS, EmptyKind);
    }

    /// @}
    /// @name Predicate Operations
    /// @{

    /// Check if this twine is trivially empty; a false return value does not
    /// necessarily mean the twine is empty.
    bool isTriviallyEmpty() const {
      return isNullary();
    }

    /// Return true if this twine can be dynamically accessed as a single
    /// StringRef value with getSingleStringRef().
    bool isSingleStringRef() const {
      if (getRHSKind() != EmptyKind) return false;

      switch (getLHSKind()) {
      case EmptyKind:
      case CStringKind:
      case StdStringKind:
      case PtrAndLengthKind:
        return true;
      default:
        return false;
      }
    }

    /// @}
    /// @name String Operations
    /// @{

    Twine concat(const Twine &Suffix) const;

    /// @}
    /// @name Output & Conversion.
    /// @{

    /// Return the twine contents as a std::string.
    std::string str() const;

    /// Append the concatenated string into the given SmallString or SmallVector.
    void toVector(SmallVectorImpl<char> &Out) const;

    /// This returns the twine as a single StringRef.  This method is only valid
    /// if isSingleStringRef() is true.
    StringRef getSingleStringRef() const {
      assert(isSingleStringRef() &&"This cannot be had as a single stringref!");
      switch (getLHSKind()) {
      default: llvm_unreachable("Out of sync with isSingleStringRef");
      case EmptyKind:
        return StringRef();
      case CStringKind:
        return StringRef(LHS.cString);
      case StdStringKind:
        return StringRef(*LHS.stdString);
      case PtrAndLengthKind:
        return StringRef(LHS.ptrAndLength.ptr, LHS.ptrAndLength.length);
      }
    }

    /// This returns the twine as a single StringRef if it can be
    /// represented as such. Otherwise the twine is written into the given
    /// SmallVector and a StringRef to the SmallVector's data is returned.
    StringRef toStringRef(SmallVectorImpl<char> &Out) const {
      if (isSingleStringRef())
        return getSingleStringRef();
      toVector(Out);
      return StringRef(Out.data(), Out.size());
    }

    /// This returns the twine as a single null terminated StringRef if it
    /// can be represented as such. Otherwise the twine is written into the
    /// given SmallVector and a StringRef to the SmallVector's data is returned.
    ///
    /// The returned StringRef's size does not include the null terminator.
    StringRef toNullTerminatedStringRef(SmallVectorImpl<char> &Out) const;

    /// Write the concatenated string represented by this twine to the
    /// stream \p OS.
    void print(raw_ostream &OS) const;

    /// Dump the concatenated string represented by this twine to stderr.
    void dump() const;

    /// Write the representation of this twine to the stream \p OS.
    void printRepr(raw_ostream &OS) const;

    /// Dump the representation of this twine to stderr.
    void dumpRepr() const;

    /// @}
  };

  /// @name Twine Inline Implementations
  /// @{

  inline Twine Twine::concat(const Twine &Suffix) const {
    // Concatenation with null is null.
    if (isNull() || Suffix.isNull())
      return Twine(NullKind);

    // Concatenation with empty yields the other side.
    if (isEmpty())
      return Suffix;
    if (Suffix.isEmpty())
      return *this;

    // Otherwise we need to create a new node, taking care to fold in unary
    // twines.
    Child NewLHS, NewRHS;
    NewLHS.twine = this;
    NewRHS.twine = &Suffix;
    NodeKind NewLHSKind = TwineKind, NewRHSKind = TwineKind;
    if (isUnary()) {
      NewLHS = LHS;
      NewLHSKind = getLHSKind();
    }
    if (Suffix.isUnary()) {
      NewRHS = Suffix.LHS;
      NewRHSKind = Suffix.getLHSKind();
    }

    return Twine(NewLHS, NewLHSKind, NewRHS, NewRHSKind);
  }

  inline Twine operator+(const Twine &LHS, const Twine &RHS) {
    return LHS.concat(RHS);
  }

  /// Additional overload to guarantee simplified codegen; this is equivalent to
  /// concat().

  inline Twine operator+(const char *LHS, const StringRef &RHS) {
    return Twine(LHS, RHS);
  }

  /// Additional overload to guarantee simplified codegen; this is equivalent to
  /// concat().

  inline Twine operator+(const StringRef &LHS, const char *RHS) {
    return Twine(LHS, RHS);
  }

  inline raw_ostream &operator<<(raw_ostream &OS, const Twine &RHS) {
    RHS.print(OS);
    return OS;
  }

  /// @}

} // end namespace llvm

#endif // LLVM_ADT_TWINE_H
PKiwFZdx��R<R<ADT/CoalescingBitVector.hnu�[���//===- llvm/ADT/CoalescingBitVector.h - A coalescing bitvector --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// A bitvector that uses an IntervalMap to coalesce adjacent elements
/// into intervals.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_COALESCINGBITVECTOR_H
#define LLVM_ADT_COALESCINGBITVECTOR_H

#include "llvm/ADT/IntervalMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"

#include <initializer_list>

namespace llvm {

/// A bitvector that, under the hood, relies on an IntervalMap to coalesce
/// elements into intervals. Good for representing sets which predominantly
/// contain contiguous ranges. Bad for representing sets with lots of gaps
/// between elements.
///
/// Compared to SparseBitVector, CoalescingBitVector offers more predictable
/// performance for non-sequential find() operations.
///
/// \tparam IndexT - The type of the index into the bitvector.
template <typename IndexT> class CoalescingBitVector {
  static_assert(std::is_unsigned<IndexT>::value,
                "Index must be an unsigned integer.");

  using ThisT = CoalescingBitVector<IndexT>;

  /// An interval map for closed integer ranges. The mapped values are unused.
  using MapT = IntervalMap<IndexT, char>;

  using UnderlyingIterator = typename MapT::const_iterator;

  using IntervalT = std::pair<IndexT, IndexT>;

public:
  using Allocator = typename MapT::Allocator;

  /// Construct by passing in a CoalescingBitVector<IndexT>::Allocator
  /// reference.
  CoalescingBitVector(Allocator &Alloc)
      : Alloc(&Alloc), Intervals(Alloc) {}

  /// \name Copy/move constructors and assignment operators.
  /// @{

  CoalescingBitVector(const ThisT &Other)
      : Alloc(Other.Alloc), Intervals(*Other.Alloc) {
    set(Other);
  }

  ThisT &operator=(const ThisT &Other) {
    clear();
    set(Other);
    return *this;
  }

  CoalescingBitVector(ThisT &&Other) = delete;
  ThisT &operator=(ThisT &&Other) = delete;

  /// @}

  /// Clear all the bits.
  void clear() { Intervals.clear(); }

  /// Check whether no bits are set.
  bool empty() const { return Intervals.empty(); }

  /// Count the number of set bits.
  unsigned count() const {
    unsigned Bits = 0;
    for (auto It = Intervals.begin(), End = Intervals.end(); It != End; ++It)
      Bits += 1 + It.stop() - It.start();
    return Bits;
  }

  /// Set the bit at \p Index.
  ///
  /// This method does /not/ support setting a bit that has already been set,
  /// for efficiency reasons. If possible, restructure your code to not set the
  /// same bit multiple times, or use \ref test_and_set.
  void set(IndexT Index) {
    assert(!test(Index) && "Setting already-set bits not supported/efficient, "
                           "IntervalMap will assert");
    insert(Index, Index);
  }

  /// Set the bits set in \p Other.
  ///
  /// This method does /not/ support setting already-set bits, see \ref set
  /// for the rationale. For a safe set union operation, use \ref operator|=.
  void set(const ThisT &Other) {
    for (auto It = Other.Intervals.begin(), End = Other.Intervals.end();
         It != End; ++It)
      insert(It.start(), It.stop());
  }

  /// Set the bits at \p Indices. Used for testing, primarily.
  void set(std::initializer_list<IndexT> Indices) {
    for (IndexT Index : Indices)
      set(Index);
  }

  /// Check whether the bit at \p Index is set.
  bool test(IndexT Index) const {
    const auto It = Intervals.find(Index);
    if (It == Intervals.end())
      return false;
    assert(It.stop() >= Index && "Interval must end after Index");
    return It.start() <= Index;
  }

  /// Set the bit at \p Index. Supports setting an already-set bit.
  void test_and_set(IndexT Index) {
    if (!test(Index))
      set(Index);
  }

  /// Reset the bit at \p Index. Supports resetting an already-unset bit.
  void reset(IndexT Index) {
    auto It = Intervals.find(Index);
    if (It == Intervals.end())
      return;

    // Split the interval containing Index into up to two parts: one from
    // [Start, Index-1] and another from [Index+1, Stop]. If Index is equal to
    // either Start or Stop, we create one new interval. If Index is equal to
    // both Start and Stop, we simply erase the existing interval.
    IndexT Start = It.start();
    if (Index < Start)
      // The index was not set.
      return;
    IndexT Stop = It.stop();
    assert(Index <= Stop && "Wrong interval for index");
    It.erase();
    if (Start < Index)
      insert(Start, Index - 1);
    if (Index < Stop)
      insert(Index + 1, Stop);
  }

  /// Set union. If \p RHS is guaranteed to not overlap with this, \ref set may
  /// be a faster alternative.
  void operator|=(const ThisT &RHS) {
    // Get the overlaps between the two interval maps.
    SmallVector<IntervalT, 8> Overlaps;
    getOverlaps(RHS, Overlaps);

    // Insert the non-overlapping parts of all the intervals from RHS.
    for (auto It = RHS.Intervals.begin(), End = RHS.Intervals.end();
         It != End; ++It) {
      IndexT Start = It.start();
      IndexT Stop = It.stop();
      SmallVector<IntervalT, 8> NonOverlappingParts;
      getNonOverlappingParts(Start, Stop, Overlaps, NonOverlappingParts);
      for (IntervalT AdditivePortion : NonOverlappingParts)
        insert(AdditivePortion.first, AdditivePortion.second);
    }
  }

  /// Set intersection.
  void operator&=(const ThisT &RHS) {
    // Get the overlaps between the two interval maps (i.e. the intersection).
    SmallVector<IntervalT, 8> Overlaps;
    getOverlaps(RHS, Overlaps);
    // Rebuild the interval map, including only the overlaps.
    clear();
    for (IntervalT Overlap : Overlaps)
      insert(Overlap.first, Overlap.second);
  }

  /// Reset all bits present in \p Other.
  void intersectWithComplement(const ThisT &Other) {
    SmallVector<IntervalT, 8> Overlaps;
    if (!getOverlaps(Other, Overlaps)) {
      // If there is no overlap with Other, the intersection is empty.
      return;
    }

    // Delete the overlapping intervals. Split up intervals that only partially
    // intersect an overlap.
    for (IntervalT Overlap : Overlaps) {
      IndexT OlapStart, OlapStop;
      std::tie(OlapStart, OlapStop) = Overlap;

      auto It = Intervals.find(OlapStart);
      IndexT CurrStart = It.start();
      IndexT CurrStop = It.stop();
      assert(CurrStart <= OlapStart && OlapStop <= CurrStop &&
             "Expected some intersection!");

      // Split the overlap interval into up to two parts: one from [CurrStart,
      // OlapStart-1] and another from [OlapStop+1, CurrStop]. If OlapStart is
      // equal to CurrStart, the first split interval is unnecessary. Ditto for
      // when OlapStop is equal to CurrStop, we omit the second split interval.
      It.erase();
      if (CurrStart < OlapStart)
        insert(CurrStart, OlapStart - 1);
      if (OlapStop < CurrStop)
        insert(OlapStop + 1, CurrStop);
    }
  }

  bool operator==(const ThisT &RHS) const {
    // We cannot just use std::equal because it checks the dereferenced values
    // of an iterator pair for equality, not the iterators themselves. In our
    // case that results in comparison of the (unused) IntervalMap values.
    auto ItL = Intervals.begin();
    auto ItR = RHS.Intervals.begin();
    while (ItL != Intervals.end() && ItR != RHS.Intervals.end() &&
           ItL.start() == ItR.start() && ItL.stop() == ItR.stop()) {
      ++ItL;
      ++ItR;
    }
    return ItL == Intervals.end() && ItR == RHS.Intervals.end();
  }

  bool operator!=(const ThisT &RHS) const { return !operator==(RHS); }

  class const_iterator {
    friend class CoalescingBitVector;

  public:
    using iterator_category = std::forward_iterator_tag;
    using value_type = IndexT;
    using difference_type = std::ptrdiff_t;
    using pointer = value_type *;
    using reference = value_type &;

  private:
    // For performance reasons, make the offset at the end different than the
    // one used in \ref begin, to optimize the common `It == end()` pattern.
    static constexpr unsigned kIteratorAtTheEndOffset = ~0u;

    UnderlyingIterator MapIterator;
    unsigned OffsetIntoMapIterator = 0;

    // Querying the start/stop of an IntervalMap iterator can be very expensive.
    // Cache these values for performance reasons.
    IndexT CachedStart = IndexT();
    IndexT CachedStop = IndexT();

    void setToEnd() {
      OffsetIntoMapIterator = kIteratorAtTheEndOffset;
      CachedStart = IndexT();
      CachedStop = IndexT();
    }

    /// MapIterator has just changed, reset the cached state to point to the
    /// start of the new underlying iterator.
    void resetCache() {
      if (MapIterator.valid()) {
        OffsetIntoMapIterator = 0;
        CachedStart = MapIterator.start();
        CachedStop = MapIterator.stop();
      } else {
        setToEnd();
      }
    }

    /// Advance the iterator to \p Index, if it is contained within the current
    /// interval. The public-facing method which supports advancing past the
    /// current interval is \ref advanceToLowerBound.
    void advanceTo(IndexT Index) {
      assert(Index <= CachedStop && "Cannot advance to OOB index");
      if (Index < CachedStart)
        // We're already past this index.
        return;
      OffsetIntoMapIterator = Index - CachedStart;
    }

    const_iterator(UnderlyingIterator MapIt) : MapIterator(MapIt) {
      resetCache();
    }

  public:
    const_iterator() { setToEnd(); }

    bool operator==(const const_iterator &RHS) const {
      // Do /not/ compare MapIterator for equality, as this is very expensive.
      // The cached start/stop values make that check unnecessary.
      return std::tie(OffsetIntoMapIterator, CachedStart, CachedStop) ==
             std::tie(RHS.OffsetIntoMapIterator, RHS.CachedStart,
                      RHS.CachedStop);
    }

    bool operator!=(const const_iterator &RHS) const {
      return !operator==(RHS);
    }

    IndexT operator*() const { return CachedStart + OffsetIntoMapIterator; }

    const_iterator &operator++() { // Pre-increment (++It).
      if (CachedStart + OffsetIntoMapIterator < CachedStop) {
        // Keep going within the current interval.
        ++OffsetIntoMapIterator;
      } else {
        // We reached the end of the current interval: advance.
        ++MapIterator;
        resetCache();
      }
      return *this;
    }

    const_iterator operator++(int) { // Post-increment (It++).
      const_iterator tmp = *this;
      operator++();
      return tmp;
    }

    /// Advance the iterator to the first set bit AT, OR AFTER, \p Index. If
    /// no such set bit exists, advance to end(). This is like std::lower_bound.
    /// This is useful if \p Index is close to the current iterator position.
    /// However, unlike \ref find(), this has worst-case O(n) performance.
    void advanceToLowerBound(IndexT Index) {
      if (OffsetIntoMapIterator == kIteratorAtTheEndOffset)
        return;

      // Advance to the first interval containing (or past) Index, or to end().
      while (Index > CachedStop) {
        ++MapIterator;
        resetCache();
        if (OffsetIntoMapIterator == kIteratorAtTheEndOffset)
          return;
      }

      advanceTo(Index);
    }
  };

  const_iterator begin() const { return const_iterator(Intervals.begin()); }

  const_iterator end() const { return const_iterator(); }

  /// Return an iterator pointing to the first set bit AT, OR AFTER, \p Index.
  /// If no such set bit exists, return end(). This is like std::lower_bound.
  /// This has worst-case logarithmic performance (roughly O(log(gaps between
  /// contiguous ranges))).
  const_iterator find(IndexT Index) const {
    auto UnderlyingIt = Intervals.find(Index);
    if (UnderlyingIt == Intervals.end())
      return end();
    auto It = const_iterator(UnderlyingIt);
    It.advanceTo(Index);
    return It;
  }

  /// Return a range iterator which iterates over all of the set bits in the
  /// half-open range [Start, End).
  iterator_range<const_iterator> half_open_range(IndexT Start,
                                                 IndexT End) const {
    assert(Start < End && "Not a valid range");
    auto StartIt = find(Start);
    if (StartIt == end() || *StartIt >= End)
      return {end(), end()};
    auto EndIt = StartIt;
    EndIt.advanceToLowerBound(End);
    return {StartIt, EndIt};
  }

  void print(raw_ostream &OS) const {
    OS << "{";
    for (auto It = Intervals.begin(), End = Intervals.end(); It != End;
         ++It) {
      OS << "[" << It.start();
      if (It.start() != It.stop())
        OS << ", " << It.stop();
      OS << "]";
    }
    OS << "}";
  }

#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
  LLVM_DUMP_METHOD void dump() const {
    // LLDB swallows the first line of output after callling dump(). Add
    // newlines before/after the braces to work around this.
    dbgs() << "\n";
    print(dbgs());
    dbgs() << "\n";
  }
#endif

private:
  void insert(IndexT Start, IndexT End) { Intervals.insert(Start, End, 0); }

  /// Record the overlaps between \p this and \p Other in \p Overlaps. Return
  /// true if there is any overlap.
  bool getOverlaps(const ThisT &Other,
                   SmallVectorImpl<IntervalT> &Overlaps) const {
    for (IntervalMapOverlaps<MapT, MapT> I(Intervals, Other.Intervals);
         I.valid(); ++I)
      Overlaps.emplace_back(I.start(), I.stop());
    assert(llvm::is_sorted(Overlaps,
                           [](IntervalT LHS, IntervalT RHS) {
                             return LHS.second < RHS.first;
                           }) &&
           "Overlaps must be sorted");
    return !Overlaps.empty();
  }

  /// Given the set of overlaps between this and some other bitvector, and an
  /// interval [Start, Stop] from that bitvector, determine the portions of the
  /// interval which do not overlap with this.
  void getNonOverlappingParts(IndexT Start, IndexT Stop,
                              const SmallVectorImpl<IntervalT> &Overlaps,
                              SmallVectorImpl<IntervalT> &NonOverlappingParts) {
    IndexT NextUncoveredBit = Start;
    for (IntervalT Overlap : Overlaps) {
      IndexT OlapStart, OlapStop;
      std::tie(OlapStart, OlapStop) = Overlap;

      // [Start;Stop] and [OlapStart;OlapStop] overlap iff OlapStart <= Stop
      // and Start <= OlapStop.
      bool DoesOverlap = OlapStart <= Stop && Start <= OlapStop;
      if (!DoesOverlap)
        continue;

      // Cover the range [NextUncoveredBit, OlapStart). This puts the start of
      // the next uncovered range at OlapStop+1.
      if (NextUncoveredBit < OlapStart)
        NonOverlappingParts.emplace_back(NextUncoveredBit, OlapStart - 1);
      NextUncoveredBit = OlapStop + 1;
      if (NextUncoveredBit > Stop)
        break;
    }
    if (NextUncoveredBit <= Stop)
      NonOverlappingParts.emplace_back(NextUncoveredBit, Stop);
  }

  Allocator *Alloc;
  MapT Intervals;
};

} // namespace llvm

#endif // LLVM_ADT_COALESCINGBITVECTOR_H
PKiwFZ�Y��}�}ADT/FoldingSet.hnu�[���//===- llvm/ADT/FoldingSet.h - Uniquing Hash Set ----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file defines a hash set that can be used to remove duplication of nodes
/// in a graph.  This code was originally created by Chris Lattner for use with
/// SelectionDAGCSEMap, but was isolated to provide use across the llvm code
/// set.
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_FOLDINGSET_H
#define LLVM_ADT_FOLDINGSET_H

#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/iterator.h"
#include "llvm/Support/Allocator.h"
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <type_traits>
#include <utility>

namespace llvm {

/// This folding set used for two purposes:
///   1. Given information about a node we want to create, look up the unique
///      instance of the node in the set.  If the node already exists, return
///      it, otherwise return the bucket it should be inserted into.
///   2. Given a node that has already been created, remove it from the set.
///
/// This class is implemented as a single-link chained hash table, where the
/// "buckets" are actually the nodes themselves (the next pointer is in the
/// node).  The last node points back to the bucket to simplify node removal.
///
/// Any node that is to be included in the folding set must be a subclass of
/// FoldingSetNode.  The node class must also define a Profile method used to
/// establish the unique bits of data for the node.  The Profile method is
/// passed a FoldingSetNodeID object which is used to gather the bits.  Just
/// call one of the Add* functions defined in the FoldingSetBase::NodeID class.
/// NOTE: That the folding set does not own the nodes and it is the
/// responsibility of the user to dispose of the nodes.
///
/// Eg.
///    class MyNode : public FoldingSetNode {
///    private:
///      std::string Name;
///      unsigned Value;
///    public:
///      MyNode(const char *N, unsigned V) : Name(N), Value(V) {}
///       ...
///      void Profile(FoldingSetNodeID &ID) const {
///        ID.AddString(Name);
///        ID.AddInteger(Value);
///      }
///      ...
///    };
///
/// To define the folding set itself use the FoldingSet template;
///
/// Eg.
///    FoldingSet<MyNode> MyFoldingSet;
///
/// Four public methods are available to manipulate the folding set;
///
/// 1) If you have an existing node that you want add to the set but unsure
/// that the node might already exist then call;
///
///    MyNode *M = MyFoldingSet.GetOrInsertNode(N);
///
/// If The result is equal to the input then the node has been inserted.
/// Otherwise, the result is the node existing in the folding set, and the
/// input can be discarded (use the result instead.)
///
/// 2) If you are ready to construct a node but want to check if it already
/// exists, then call FindNodeOrInsertPos with a FoldingSetNodeID of the bits to
/// check;
///
///   FoldingSetNodeID ID;
///   ID.AddString(Name);
///   ID.AddInteger(Value);
///   void *InsertPoint;
///
///    MyNode *M = MyFoldingSet.FindNodeOrInsertPos(ID, InsertPoint);
///
/// If found then M will be non-NULL, else InsertPoint will point to where it
/// should be inserted using InsertNode.
///
/// 3) If you get a NULL result from FindNodeOrInsertPos then you can insert a
/// new node with InsertNode;
///
///    MyFoldingSet.InsertNode(M, InsertPoint);
///
/// 4) Finally, if you want to remove a node from the folding set call;
///
///    bool WasRemoved = MyFoldingSet.RemoveNode(M);
///
/// The result indicates whether the node existed in the folding set.

class FoldingSetNodeID;
class StringRef;

//===----------------------------------------------------------------------===//
/// FoldingSetBase - Implements the folding set functionality.  The main
/// structure is an array of buckets.  Each bucket is indexed by the hash of
/// the nodes it contains.  The bucket itself points to the nodes contained
/// in the bucket via a singly linked list.  The last node in the list points
/// back to the bucket to facilitate node removal.
///
class FoldingSetBase {
protected:
  /// Buckets - Array of bucket chains.
  void **Buckets;

  /// NumBuckets - Length of the Buckets array.  Always a power of 2.
  unsigned NumBuckets;

  /// NumNodes - Number of nodes in the folding set. Growth occurs when NumNodes
  /// is greater than twice the number of buckets.
  unsigned NumNodes;

  explicit FoldingSetBase(unsigned Log2InitSize = 6);
  FoldingSetBase(FoldingSetBase &&Arg);
  FoldingSetBase &operator=(FoldingSetBase &&RHS);
  ~FoldingSetBase();

public:
  //===--------------------------------------------------------------------===//
  /// Node - This class is used to maintain the singly linked bucket list in
  /// a folding set.
  class Node {
  private:
    // NextInFoldingSetBucket - next link in the bucket list.
    void *NextInFoldingSetBucket = nullptr;

  public:
    Node() = default;

    // Accessors
    void *getNextInBucket() const { return NextInFoldingSetBucket; }
    void SetNextInBucket(void *N) { NextInFoldingSetBucket = N; }
  };

  /// clear - Remove all nodes from the folding set.
  void clear();

  /// size - Returns the number of nodes in the folding set.
  unsigned size() const { return NumNodes; }

  /// empty - Returns true if there are no nodes in the folding set.
  bool empty() const { return NumNodes == 0; }

  /// capacity - Returns the number of nodes permitted in the folding set
  /// before a rebucket operation is performed.
  unsigned capacity() {
    // We allow a load factor of up to 2.0,
    // so that means our capacity is NumBuckets * 2
    return NumBuckets * 2;
  }

protected:
  /// Functions provided by the derived class to compute folding properties.
  /// This is effectively a vtable for FoldingSetBase, except that we don't
  /// actually store a pointer to it in the object.
  struct FoldingSetInfo {
    /// GetNodeProfile - Instantiations of the FoldingSet template implement
    /// this function to gather data bits for the given node.
    void (*GetNodeProfile)(const FoldingSetBase *Self, Node *N,
                           FoldingSetNodeID &ID);

    /// NodeEquals - Instantiations of the FoldingSet template implement
    /// this function to compare the given node with the given ID.
    bool (*NodeEquals)(const FoldingSetBase *Self, Node *N,
                       const FoldingSetNodeID &ID, unsigned IDHash,
                       FoldingSetNodeID &TempID);

    /// ComputeNodeHash - Instantiations of the FoldingSet template implement
    /// this function to compute a hash value for the given node.
    unsigned (*ComputeNodeHash)(const FoldingSetBase *Self, Node *N,
                                FoldingSetNodeID &TempID);
  };

private:
  /// GrowHashTable - Double the size of the hash table and rehash everything.
  void GrowHashTable(const FoldingSetInfo &Info);

  /// GrowBucketCount - resize the hash table and rehash everything.
  /// NewBucketCount must be a power of two, and must be greater than the old
  /// bucket count.
  void GrowBucketCount(unsigned NewBucketCount, const FoldingSetInfo &Info);

protected:
  // The below methods are protected to encourage subclasses to provide a more
  // type-safe API.

  /// reserve - Increase the number of buckets such that adding the
  /// EltCount-th node won't cause a rebucket operation. reserve is permitted
  /// to allocate more space than requested by EltCount.
  void reserve(unsigned EltCount, const FoldingSetInfo &Info);

  /// RemoveNode - Remove a node from the folding set, returning true if one
  /// was removed or false if the node was not in the folding set.
  bool RemoveNode(Node *N);

  /// GetOrInsertNode - If there is an existing simple Node exactly
  /// equal to the specified node, return it.  Otherwise, insert 'N' and return
  /// it instead.
  Node *GetOrInsertNode(Node *N, const FoldingSetInfo &Info);

  /// FindNodeOrInsertPos - Look up the node specified by ID.  If it exists,
  /// return it.  If not, return the insertion token that will make insertion
  /// faster.
  Node *FindNodeOrInsertPos(const FoldingSetNodeID &ID, void *&InsertPos,
                            const FoldingSetInfo &Info);

  /// InsertNode - Insert the specified node into the folding set, knowing that
  /// it is not already in the folding set.  InsertPos must be obtained from
  /// FindNodeOrInsertPos.
  void InsertNode(Node *N, void *InsertPos, const FoldingSetInfo &Info);
};

//===----------------------------------------------------------------------===//

/// DefaultFoldingSetTrait - This class provides default implementations
/// for FoldingSetTrait implementations.
template<typename T> struct DefaultFoldingSetTrait {
  static void Profile(const T &X, FoldingSetNodeID &ID) {
    X.Profile(ID);
  }
  static void Profile(T &X, FoldingSetNodeID &ID) {
    X.Profile(ID);
  }

  // Equals - Test if the profile for X would match ID, using TempID
  // to compute a temporary ID if necessary. The default implementation
  // just calls Profile and does a regular comparison. Implementations
  // can override this to provide more efficient implementations.
  static inline bool Equals(T &X, const FoldingSetNodeID &ID, unsigned IDHash,
                            FoldingSetNodeID &TempID);

  // ComputeHash - Compute a hash value for X, using TempID to
  // compute a temporary ID if necessary. The default implementation
  // just calls Profile and does a regular hash computation.
  // Implementations can override this to provide more efficient
  // implementations.
  static inline unsigned ComputeHash(T &X, FoldingSetNodeID &TempID);
};

/// FoldingSetTrait - This trait class is used to define behavior of how
/// to "profile" (in the FoldingSet parlance) an object of a given type.
/// The default behavior is to invoke a 'Profile' method on an object, but
/// through template specialization the behavior can be tailored for specific
/// types.  Combined with the FoldingSetNodeWrapper class, one can add objects
/// to FoldingSets that were not originally designed to have that behavior.
template <typename T, typename Enable = void>
struct FoldingSetTrait : public DefaultFoldingSetTrait<T> {};

/// DefaultContextualFoldingSetTrait - Like DefaultFoldingSetTrait, but
/// for ContextualFoldingSets.
template<typename T, typename Ctx>
struct DefaultContextualFoldingSetTrait {
  static void Profile(T &X, FoldingSetNodeID &ID, Ctx Context) {
    X.Profile(ID, Context);
  }

  static inline bool Equals(T &X, const FoldingSetNodeID &ID, unsigned IDHash,
                            FoldingSetNodeID &TempID, Ctx Context);
  static inline unsigned ComputeHash(T &X, FoldingSetNodeID &TempID,
                                     Ctx Context);
};

/// ContextualFoldingSetTrait - Like FoldingSetTrait, but for
/// ContextualFoldingSets.
template<typename T, typename Ctx> struct ContextualFoldingSetTrait
  : public DefaultContextualFoldingSetTrait<T, Ctx> {};

//===--------------------------------------------------------------------===//
/// FoldingSetNodeIDRef - This class describes a reference to an interned
/// FoldingSetNodeID, which can be a useful to store node id data rather
/// than using plain FoldingSetNodeIDs, since the 32-element SmallVector
/// is often much larger than necessary, and the possibility of heap
/// allocation means it requires a non-trivial destructor call.
class FoldingSetNodeIDRef {
  const unsigned *Data = nullptr;
  size_t Size = 0;

public:
  FoldingSetNodeIDRef() = default;
  FoldingSetNodeIDRef(const unsigned *D, size_t S) : Data(D), Size(S) {}

  /// ComputeHash - Compute a strong hash value for this FoldingSetNodeIDRef,
  /// used to lookup the node in the FoldingSetBase.
  unsigned ComputeHash() const {
    return static_cast<unsigned>(hash_combine_range(Data, Data + Size));
  }

  bool operator==(FoldingSetNodeIDRef) const;

  bool operator!=(FoldingSetNodeIDRef RHS) const { return !(*this == RHS); }

  /// Used to compare the "ordering" of two nodes as defined by the
  /// profiled bits and their ordering defined by memcmp().
  bool operator<(FoldingSetNodeIDRef) const;

  const unsigned *getData() const { return Data; }
  size_t getSize() const { return Size; }
};

//===--------------------------------------------------------------------===//
/// FoldingSetNodeID - This class is used to gather all the unique data bits of
/// a node.  When all the bits are gathered this class is used to produce a
/// hash value for the node.
class FoldingSetNodeID {
  /// Bits - Vector of all the data bits that make the node unique.
  /// Use a SmallVector to avoid a heap allocation in the common case.
  SmallVector<unsigned, 32> Bits;

public:
  FoldingSetNodeID() = default;

  FoldingSetNodeID(FoldingSetNodeIDRef Ref)
    : Bits(Ref.getData(), Ref.getData() + Ref.getSize()) {}

  /// Add* - Add various data types to Bit data.
  void AddPointer(const void *Ptr) {
    // Note: this adds pointers to the hash using sizes and endianness that
    // depend on the host. It doesn't matter, however, because hashing on
    // pointer values is inherently unstable. Nothing should depend on the
    // ordering of nodes in the folding set.
    static_assert(sizeof(uintptr_t) <= sizeof(unsigned long long),
                  "unexpected pointer size");
    AddInteger(reinterpret_cast<uintptr_t>(Ptr));
  }
  void AddInteger(signed I) { Bits.push_back(I); }
  void AddInteger(unsigned I) { Bits.push_back(I); }
  void AddInteger(long I) { AddInteger((unsigned long)I); }
  void AddInteger(unsigned long I) {
    if (sizeof(long) == sizeof(int))
      AddInteger(unsigned(I));
    else if (sizeof(long) == sizeof(long long)) {
      AddInteger((unsigned long long)I);
    } else {
      llvm_unreachable("unexpected sizeof(long)");
    }
  }
  void AddInteger(long long I) { AddInteger((unsigned long long)I); }
  void AddInteger(unsigned long long I) {
    AddInteger(unsigned(I));
    AddInteger(unsigned(I >> 32));
  }

  void AddBoolean(bool B) { AddInteger(B ? 1U : 0U); }
  void AddString(StringRef String);
  void AddNodeID(const FoldingSetNodeID &ID);

  template <typename T>
  inline void Add(const T &x) { FoldingSetTrait<T>::Profile(x, *this); }

  /// clear - Clear the accumulated profile, allowing this FoldingSetNodeID
  /// object to be used to compute a new profile.
  inline void clear() { Bits.clear(); }

  /// ComputeHash - Compute a strong hash value for this FoldingSetNodeID, used
  /// to lookup the node in the FoldingSetBase.
  unsigned ComputeHash() const {
    return FoldingSetNodeIDRef(Bits.data(), Bits.size()).ComputeHash();
  }

  /// operator== - Used to compare two nodes to each other.
  bool operator==(const FoldingSetNodeID &RHS) const;
  bool operator==(const FoldingSetNodeIDRef RHS) const;

  bool operator!=(const FoldingSetNodeID &RHS) const { return !(*this == RHS); }
  bool operator!=(const FoldingSetNodeIDRef RHS) const { return !(*this ==RHS);}

  /// Used to compare the "ordering" of two nodes as defined by the
  /// profiled bits and their ordering defined by memcmp().
  bool operator<(const FoldingSetNodeID &RHS) const;
  bool operator<(const FoldingSetNodeIDRef RHS) const;

  /// Intern - Copy this node's data to a memory region allocated from the
  /// given allocator and return a FoldingSetNodeIDRef describing the
  /// interned data.
  FoldingSetNodeIDRef Intern(BumpPtrAllocator &Allocator) const;
};

// Convenience type to hide the implementation of the folding set.
using FoldingSetNode = FoldingSetBase::Node;
template<class T> class FoldingSetIterator;
template<class T> class FoldingSetBucketIterator;

// Definitions of FoldingSetTrait and ContextualFoldingSetTrait functions, which
// require the definition of FoldingSetNodeID.
template<typename T>
inline bool
DefaultFoldingSetTrait<T>::Equals(T &X, const FoldingSetNodeID &ID,
                                  unsigned /*IDHash*/,
                                  FoldingSetNodeID &TempID) {
  FoldingSetTrait<T>::Profile(X, TempID);
  return TempID == ID;
}
template<typename T>
inline unsigned
DefaultFoldingSetTrait<T>::ComputeHash(T &X, FoldingSetNodeID &TempID) {
  FoldingSetTrait<T>::Profile(X, TempID);
  return TempID.ComputeHash();
}
template<typename T, typename Ctx>
inline bool
DefaultContextualFoldingSetTrait<T, Ctx>::Equals(T &X,
                                                 const FoldingSetNodeID &ID,
                                                 unsigned /*IDHash*/,
                                                 FoldingSetNodeID &TempID,
                                                 Ctx Context) {
  ContextualFoldingSetTrait<T, Ctx>::Profile(X, TempID, Context);
  return TempID == ID;
}
template<typename T, typename Ctx>
inline unsigned
DefaultContextualFoldingSetTrait<T, Ctx>::ComputeHash(T &X,
                                                      FoldingSetNodeID &TempID,
                                                      Ctx Context) {
  ContextualFoldingSetTrait<T, Ctx>::Profile(X, TempID, Context);
  return TempID.ComputeHash();
}

//===----------------------------------------------------------------------===//
/// FoldingSetImpl - An implementation detail that lets us share code between
/// FoldingSet and ContextualFoldingSet.
template <class Derived, class T> class FoldingSetImpl : public FoldingSetBase {
protected:
  explicit FoldingSetImpl(unsigned Log2InitSize)
      : FoldingSetBase(Log2InitSize) {}

  FoldingSetImpl(FoldingSetImpl &&Arg) = default;
  FoldingSetImpl &operator=(FoldingSetImpl &&RHS) = default;
  ~FoldingSetImpl() = default;

public:
  using iterator = FoldingSetIterator<T>;

  iterator begin() { return iterator(Buckets); }
  iterator end() { return iterator(Buckets+NumBuckets); }

  using const_iterator = FoldingSetIterator<const T>;

  const_iterator begin() const { return const_iterator(Buckets); }
  const_iterator end() const { return const_iterator(Buckets+NumBuckets); }

  using bucket_iterator = FoldingSetBucketIterator<T>;

  bucket_iterator bucket_begin(unsigned hash) {
    return bucket_iterator(Buckets + (hash & (NumBuckets-1)));
  }

  bucket_iterator bucket_end(unsigned hash) {
    return bucket_iterator(Buckets + (hash & (NumBuckets-1)), true);
  }

  /// reserve - Increase the number of buckets such that adding the
  /// EltCount-th node won't cause a rebucket operation. reserve is permitted
  /// to allocate more space than requested by EltCount.
  void reserve(unsigned EltCount) {
    return FoldingSetBase::reserve(EltCount, Derived::getFoldingSetInfo());
  }

  /// RemoveNode - Remove a node from the folding set, returning true if one
  /// was removed or false if the node was not in the folding set.
  bool RemoveNode(T *N) {
    return FoldingSetBase::RemoveNode(N);
  }

  /// GetOrInsertNode - If there is an existing simple Node exactly
  /// equal to the specified node, return it.  Otherwise, insert 'N' and
  /// return it instead.
  T *GetOrInsertNode(T *N) {
    return static_cast<T *>(
        FoldingSetBase::GetOrInsertNode(N, Derived::getFoldingSetInfo()));
  }

  /// FindNodeOrInsertPos - Look up the node specified by ID.  If it exists,
  /// return it.  If not, return the insertion token that will make insertion
  /// faster.
  T *FindNodeOrInsertPos(const FoldingSetNodeID &ID, void *&InsertPos) {
    return static_cast<T *>(FoldingSetBase::FindNodeOrInsertPos(
        ID, InsertPos, Derived::getFoldingSetInfo()));
  }

  /// InsertNode - Insert the specified node into the folding set, knowing that
  /// it is not already in the folding set.  InsertPos must be obtained from
  /// FindNodeOrInsertPos.
  void InsertNode(T *N, void *InsertPos) {
    FoldingSetBase::InsertNode(N, InsertPos, Derived::getFoldingSetInfo());
  }

  /// InsertNode - Insert the specified node into the folding set, knowing that
  /// it is not already in the folding set.
  void InsertNode(T *N) {
    T *Inserted = GetOrInsertNode(N);
    (void)Inserted;
    assert(Inserted == N && "Node already inserted!");
  }
};

//===----------------------------------------------------------------------===//
/// FoldingSet - This template class is used to instantiate a specialized
/// implementation of the folding set to the node class T.  T must be a
/// subclass of FoldingSetNode and implement a Profile function.
///
/// Note that this set type is movable and move-assignable. However, its
/// moved-from state is not a valid state for anything other than
/// move-assigning and destroying. This is primarily to enable movable APIs
/// that incorporate these objects.
template <class T>
class FoldingSet : public FoldingSetImpl<FoldingSet<T>, T> {
  using Super = FoldingSetImpl<FoldingSet, T>;
  using Node = typename Super::Node;

  /// GetNodeProfile - Each instantiation of the FoldingSet needs to provide a
  /// way to convert nodes into a unique specifier.
  static void GetNodeProfile(const FoldingSetBase *, Node *N,
                             FoldingSetNodeID &ID) {
    T *TN = static_cast<T *>(N);
    FoldingSetTrait<T>::Profile(*TN, ID);
  }

  /// NodeEquals - Instantiations may optionally provide a way to compare a
  /// node with a specified ID.
  static bool NodeEquals(const FoldingSetBase *, Node *N,
                         const FoldingSetNodeID &ID, unsigned IDHash,
                         FoldingSetNodeID &TempID) {
    T *TN = static_cast<T *>(N);
    return FoldingSetTrait<T>::Equals(*TN, ID, IDHash, TempID);
  }

  /// ComputeNodeHash - Instantiations may optionally provide a way to compute a
  /// hash value directly from a node.
  static unsigned ComputeNodeHash(const FoldingSetBase *, Node *N,
                                  FoldingSetNodeID &TempID) {
    T *TN = static_cast<T *>(N);
    return FoldingSetTrait<T>::ComputeHash(*TN, TempID);
  }

  static const FoldingSetBase::FoldingSetInfo &getFoldingSetInfo() {
    static constexpr FoldingSetBase::FoldingSetInfo Info = {
        GetNodeProfile, NodeEquals, ComputeNodeHash};
    return Info;
  }
  friend Super;

public:
  explicit FoldingSet(unsigned Log2InitSize = 6) : Super(Log2InitSize) {}
  FoldingSet(FoldingSet &&Arg) = default;
  FoldingSet &operator=(FoldingSet &&RHS) = default;
};

//===----------------------------------------------------------------------===//
/// ContextualFoldingSet - This template class is a further refinement
/// of FoldingSet which provides a context argument when calling
/// Profile on its nodes.  Currently, that argument is fixed at
/// initialization time.
///
/// T must be a subclass of FoldingSetNode and implement a Profile
/// function with signature
///   void Profile(FoldingSetNodeID &, Ctx);
template <class T, class Ctx>
class ContextualFoldingSet
    : public FoldingSetImpl<ContextualFoldingSet<T, Ctx>, T> {
  // Unfortunately, this can't derive from FoldingSet<T> because the
  // construction of the vtable for FoldingSet<T> requires
  // FoldingSet<T>::GetNodeProfile to be instantiated, which in turn
  // requires a single-argument T::Profile().

  using Super = FoldingSetImpl<ContextualFoldingSet, T>;
  using Node = typename Super::Node;

  Ctx Context;

  static const Ctx &getContext(const FoldingSetBase *Base) {
    return static_cast<const ContextualFoldingSet*>(Base)->Context;
  }

  /// GetNodeProfile - Each instantiatation of the FoldingSet needs to provide a
  /// way to convert nodes into a unique specifier.
  static void GetNodeProfile(const FoldingSetBase *Base, Node *N,
                             FoldingSetNodeID &ID) {
    T *TN = static_cast<T *>(N);
    ContextualFoldingSetTrait<T, Ctx>::Profile(*TN, ID, getContext(Base));
  }

  static bool NodeEquals(const FoldingSetBase *Base, Node *N,
                         const FoldingSetNodeID &ID, unsigned IDHash,
                         FoldingSetNodeID &TempID) {
    T *TN = static_cast<T *>(N);
    return ContextualFoldingSetTrait<T, Ctx>::Equals(*TN, ID, IDHash, TempID,
                                                     getContext(Base));
  }

  static unsigned ComputeNodeHash(const FoldingSetBase *Base, Node *N,
                                  FoldingSetNodeID &TempID) {
    T *TN = static_cast<T *>(N);
    return ContextualFoldingSetTrait<T, Ctx>::ComputeHash(*TN, TempID,
                                                          getContext(Base));
  }

  static const FoldingSetBase::FoldingSetInfo &getFoldingSetInfo() {
    static constexpr FoldingSetBase::FoldingSetInfo Info = {
        GetNodeProfile, NodeEquals, ComputeNodeHash};
    return Info;
  }
  friend Super;

public:
  explicit ContextualFoldingSet(Ctx Context, unsigned Log2InitSize = 6)
      : Super(Log2InitSize), Context(Context) {}

  Ctx getContext() const { return Context; }
};

//===----------------------------------------------------------------------===//
/// FoldingSetVector - This template class combines a FoldingSet and a vector
/// to provide the interface of FoldingSet but with deterministic iteration
/// order based on the insertion order. T must be a subclass of FoldingSetNode
/// and implement a Profile function.
template <class T, class VectorT = SmallVector<T*, 8>>
class FoldingSetVector {
  FoldingSet<T> Set;
  VectorT Vector;

public:
  explicit FoldingSetVector(unsigned Log2InitSize = 6) : Set(Log2InitSize) {}

  using iterator = pointee_iterator<typename VectorT::iterator>;

  iterator begin() { return Vector.begin(); }
  iterator end()   { return Vector.end(); }

  using const_iterator = pointee_iterator<typename VectorT::const_iterator>;

  const_iterator begin() const { return Vector.begin(); }
  const_iterator end()   const { return Vector.end(); }

  /// clear - Remove all nodes from the folding set.
  void clear() { Set.clear(); Vector.clear(); }

  /// FindNodeOrInsertPos - Look up the node specified by ID.  If it exists,
  /// return it.  If not, return the insertion token that will make insertion
  /// faster.
  T *FindNodeOrInsertPos(const FoldingSetNodeID &ID, void *&InsertPos) {
    return Set.FindNodeOrInsertPos(ID, InsertPos);
  }

  /// GetOrInsertNode - If there is an existing simple Node exactly
  /// equal to the specified node, return it.  Otherwise, insert 'N' and
  /// return it instead.
  T *GetOrInsertNode(T *N) {
    T *Result = Set.GetOrInsertNode(N);
    if (Result == N) Vector.push_back(N);
    return Result;
  }

  /// InsertNode - Insert the specified node into the folding set, knowing that
  /// it is not already in the folding set.  InsertPos must be obtained from
  /// FindNodeOrInsertPos.
  void InsertNode(T *N, void *InsertPos) {
    Set.InsertNode(N, InsertPos);
    Vector.push_back(N);
  }

  /// InsertNode - Insert the specified node into the folding set, knowing that
  /// it is not already in the folding set.
  void InsertNode(T *N) {
    Set.InsertNode(N);
    Vector.push_back(N);
  }

  /// size - Returns the number of nodes in the folding set.
  unsigned size() const { return Set.size(); }

  /// empty - Returns true if there are no nodes in the folding set.
  bool empty() const { return Set.empty(); }
};

//===----------------------------------------------------------------------===//
/// FoldingSetIteratorImpl - This is the common iterator support shared by all
/// folding sets, which knows how to walk the folding set hash table.
class FoldingSetIteratorImpl {
protected:
  FoldingSetNode *NodePtr;

  FoldingSetIteratorImpl(void **Bucket);

  void advance();

public:
  bool operator==(const FoldingSetIteratorImpl &RHS) const {
    return NodePtr == RHS.NodePtr;
  }
  bool operator!=(const FoldingSetIteratorImpl &RHS) const {
    return NodePtr != RHS.NodePtr;
  }
};

template <class T> class FoldingSetIterator : public FoldingSetIteratorImpl {
public:
  explicit FoldingSetIterator(void **Bucket) : FoldingSetIteratorImpl(Bucket) {}

  T &operator*() const {
    return *static_cast<T*>(NodePtr);
  }

  T *operator->() const {
    return static_cast<T*>(NodePtr);
  }

  inline FoldingSetIterator &operator++() {          // Preincrement
    advance();
    return *this;
  }
  FoldingSetIterator operator++(int) {        // Postincrement
    FoldingSetIterator tmp = *this; ++*this; return tmp;
  }
};

//===----------------------------------------------------------------------===//
/// FoldingSetBucketIteratorImpl - This is the common bucket iterator support
/// shared by all folding sets, which knows how to walk a particular bucket
/// of a folding set hash table.
class FoldingSetBucketIteratorImpl {
protected:
  void *Ptr;

  explicit FoldingSetBucketIteratorImpl(void **Bucket);

  FoldingSetBucketIteratorImpl(void **Bucket, bool) : Ptr(Bucket) {}

  void advance() {
    void *Probe = static_cast<FoldingSetNode*>(Ptr)->getNextInBucket();
    uintptr_t x = reinterpret_cast<uintptr_t>(Probe) & ~0x1;
    Ptr = reinterpret_cast<void*>(x);
  }

public:
  bool operator==(const FoldingSetBucketIteratorImpl &RHS) const {
    return Ptr == RHS.Ptr;
  }
  bool operator!=(const FoldingSetBucketIteratorImpl &RHS) const {
    return Ptr != RHS.Ptr;
  }
};

template <class T>
class FoldingSetBucketIterator : public FoldingSetBucketIteratorImpl {
public:
  explicit FoldingSetBucketIterator(void **Bucket) :
    FoldingSetBucketIteratorImpl(Bucket) {}

  FoldingSetBucketIterator(void **Bucket, bool) :
    FoldingSetBucketIteratorImpl(Bucket, true) {}

  T &operator*() const { return *static_cast<T*>(Ptr); }
  T *operator->() const { return static_cast<T*>(Ptr); }

  inline FoldingSetBucketIterator &operator++() { // Preincrement
    advance();
    return *this;
  }
  FoldingSetBucketIterator operator++(int) {      // Postincrement
    FoldingSetBucketIterator tmp = *this; ++*this; return tmp;
  }
};

//===----------------------------------------------------------------------===//
/// FoldingSetNodeWrapper - This template class is used to "wrap" arbitrary
/// types in an enclosing object so that they can be inserted into FoldingSets.
template <typename T>
class FoldingSetNodeWrapper : public FoldingSetNode {
  T data;

public:
  template <typename... Ts>
  explicit FoldingSetNodeWrapper(Ts &&... Args)
      : data(std::forward<Ts>(Args)...) {}

  void Profile(FoldingSetNodeID &ID) { FoldingSetTrait<T>::Profile(data, ID); }

  T &getValue() { return data; }
  const T &getValue() const { return data; }

  operator T&() { return data; }
  operator const T&() const { return data; }
};

//===----------------------------------------------------------------------===//
/// FastFoldingSetNode - This is a subclass of FoldingSetNode which stores
/// a FoldingSetNodeID value rather than requiring the node to recompute it
/// each time it is needed. This trades space for speed (which can be
/// significant if the ID is long), and it also permits nodes to drop
/// information that would otherwise only be required for recomputing an ID.
class FastFoldingSetNode : public FoldingSetNode {
  FoldingSetNodeID FastID;

protected:
  explicit FastFoldingSetNode(const FoldingSetNodeID &ID) : FastID(ID) {}

public:
  void Profile(FoldingSetNodeID &ID) const { ID.AddNodeID(FastID); }
};

//===----------------------------------------------------------------------===//
// Partial specializations of FoldingSetTrait.

template<typename T> struct FoldingSetTrait<T*> {
  static inline void Profile(T *X, FoldingSetNodeID &ID) {
    ID.AddPointer(X);
  }
};
template <typename T1, typename T2>
struct FoldingSetTrait<std::pair<T1, T2>> {
  static inline void Profile(const std::pair<T1, T2> &P,
                             FoldingSetNodeID &ID) {
    ID.Add(P.first);
    ID.Add(P.second);
  }
};

template <typename T>
struct FoldingSetTrait<T, std::enable_if_t<std::is_enum<T>::value>> {
  static void Profile(const T &X, FoldingSetNodeID &ID) {
    ID.AddInteger(static_cast<std::underlying_type_t<T>>(X));
  }
};

} // end namespace llvm

#endif // LLVM_ADT_FOLDINGSET_H
PKiwFZ����0�0ADT/GenericCycleInfo.hnu�[���//===- GenericCycleInfo.h - Info for Cycles in any IR ------*- C++ -*------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// \brief Find all cycles in a control-flow graph, including irreducible loops.
///
/// See docs/CycleTerminology.rst for a formal definition of cycles.
///
/// Briefly:
/// - A cycle is a generalization of a loop which can represent
///   irreducible control flow.
/// - Cycles identified in a program are implementation defined,
///   depending on the DFS traversal chosen.
/// - Cycles are well-nested, and form a forest with a parent-child
///   relationship.
/// - In any choice of DFS, every natural loop L is represented by a
///   unique cycle C which is a superset of L.
/// - In the absence of irreducible control flow, the cycles are
///   exactly the natural loops in the program.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_GENERICCYCLEINFO_H
#define LLVM_ADT_GENERICCYCLEINFO_H

#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/GenericSSAContext.h"
#include "llvm/ADT/GraphTraits.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"

namespace llvm {

template <typename ContextT> class GenericCycleInfo;
template <typename ContextT> class GenericCycleInfoCompute;

/// A possibly irreducible generalization of a \ref Loop.
template <typename ContextT> class GenericCycle {
public:
  using BlockT = typename ContextT::BlockT;
  using FunctionT = typename ContextT::FunctionT;
  template <typename> friend class GenericCycleInfo;
  template <typename> friend class GenericCycleInfoCompute;

private:
  /// The parent cycle. Is null for the root "cycle". Top-level cycles point
  /// at the root.
  GenericCycle *ParentCycle = nullptr;

  /// The entry block(s) of the cycle. The header is the only entry if
  /// this is a loop. Is empty for the root "cycle", to avoid
  /// unnecessary memory use.
  SmallVector<BlockT *, 1> Entries;

  /// Child cycles, if any.
  std::vector<std::unique_ptr<GenericCycle>> Children;

  /// Basic blocks that are contained in the cycle, including entry blocks,
  /// and including blocks that are part of a child cycle.
  using BlockSetVectorT = SetVector<BlockT *, SmallVector<BlockT *, 8>,
                                    DenseSet<const BlockT *>, 8>;
  BlockSetVectorT Blocks;

  /// Depth of the cycle in the tree. The root "cycle" is at depth 0.
  ///
  /// \note Depths are not necessarily contiguous. However, child loops always
  ///       have strictly greater depth than their parents, and sibling loops
  ///       always have the same depth.
  unsigned Depth = 0;

  void clear() {
    Entries.clear();
    Children.clear();
    Blocks.clear();
    Depth = 0;
    ParentCycle = nullptr;
  }

  void appendEntry(BlockT *Block) { Entries.push_back(Block); }
  void appendBlock(BlockT *Block) { Blocks.insert(Block); }

  GenericCycle(const GenericCycle &) = delete;
  GenericCycle &operator=(const GenericCycle &) = delete;
  GenericCycle(GenericCycle &&Rhs) = delete;
  GenericCycle &operator=(GenericCycle &&Rhs) = delete;

public:
  GenericCycle() = default;

  /// \brief Whether the cycle is a natural loop.
  bool isReducible() const { return Entries.size() == 1; }

  BlockT *getHeader() const { return Entries[0]; }

  const SmallVectorImpl<BlockT *> & getEntries() const {
    return Entries;
  }

  /// \brief Return whether \p Block is an entry block of the cycle.
  bool isEntry(const BlockT *Block) const {
    return is_contained(Entries, Block);
  }

  /// \brief Return whether \p Block is contained in the cycle.
  bool contains(const BlockT *Block) const { return Blocks.contains(Block); }

  /// \brief Returns true iff this cycle contains \p C.
  ///
  /// Note: Non-strict containment check, i.e. returns true if C is the
  /// same cycle.
  bool contains(const GenericCycle *C) const;

  const GenericCycle *getParentCycle() const { return ParentCycle; }
  GenericCycle *getParentCycle() { return ParentCycle; }
  unsigned getDepth() const { return Depth; }

  /// Return all of the successor blocks of this cycle.
  ///
  /// These are the blocks _outside of the current cycle_ which are
  /// branched to.
  void getExitBlocks(SmallVectorImpl<BlockT *> &TmpStorage) const;

  /// Return the preheader block for this cycle. Pre-header is well-defined for
  /// reducible cycle in docs/LoopTerminology.rst as: the only one entering
  /// block and its only edge is to the entry block. Return null for irreducible
  /// cycles.
  BlockT *getCyclePreheader() const;

  /// If the cycle has exactly one entry with exactly one predecessor, return
  /// it, otherwise return nullptr.
  BlockT *getCyclePredecessor() const;

  /// Iteration over child cycles.
  //@{
  using const_child_iterator_base =
      typename std::vector<std::unique_ptr<GenericCycle>>::const_iterator;
  struct const_child_iterator
      : iterator_adaptor_base<const_child_iterator, const_child_iterator_base> {
    using Base =
        iterator_adaptor_base<const_child_iterator, const_child_iterator_base>;

    const_child_iterator() = default;
    explicit const_child_iterator(const_child_iterator_base I) : Base(I) {}

    const const_child_iterator_base &wrapped() { return Base::wrapped(); }
    GenericCycle *operator*() const { return Base::I->get(); }
  };

  const_child_iterator child_begin() const {
    return const_child_iterator{Children.begin()};
  }
  const_child_iterator child_end() const {
    return const_child_iterator{Children.end()};
  }
  size_t getNumChildren() const { return Children.size(); }
  iterator_range<const_child_iterator> children() const {
    return llvm::make_range(const_child_iterator{Children.begin()},
                            const_child_iterator{Children.end()});
  }
  //@}

  /// Iteration over blocks in the cycle (including entry blocks).
  //@{
  using const_block_iterator = typename BlockSetVectorT::const_iterator;

  const_block_iterator block_begin() const {
    return const_block_iterator{Blocks.begin()};
  }
  const_block_iterator block_end() const {
    return const_block_iterator{Blocks.end()};
  }
  size_t getNumBlocks() const { return Blocks.size(); }
  iterator_range<const_block_iterator> blocks() const {
    return llvm::make_range(block_begin(), block_end());
  }
  //@}

  /// Iteration over entry blocks.
  //@{
  using const_entry_iterator =
      typename SmallVectorImpl<BlockT *>::const_iterator;

  size_t getNumEntries() const { return Entries.size(); }
  iterator_range<const_entry_iterator> entries() const {
    return llvm::make_range(Entries.begin(), Entries.end());
  }
  //@}

  Printable printEntries(const ContextT &Ctx) const {
    return Printable([this, &Ctx](raw_ostream &Out) {
      bool First = true;
      for (auto *Entry : Entries) {
        if (!First)
          Out << ' ';
        First = false;
        Out << Ctx.print(Entry);
      }
    });
  }

  Printable print(const ContextT &Ctx) const {
    return Printable([this, &Ctx](raw_ostream &Out) {
      Out << "depth=" << Depth << ": entries(" << printEntries(Ctx) << ')';

      for (auto *Block : Blocks) {
        if (isEntry(Block))
          continue;

        Out << ' ' << Ctx.print(Block);
      }
    });
  }
};

/// \brief Cycle information for a function.
template <typename ContextT> class GenericCycleInfo {
public:
  using BlockT = typename ContextT::BlockT;
  using CycleT = GenericCycle<ContextT>;
  using FunctionT = typename ContextT::FunctionT;
  template <typename> friend class GenericCycle;
  template <typename> friend class GenericCycleInfoCompute;

private:
  ContextT Context;

  /// Map basic blocks to their inner-most containing cycle.
  DenseMap<BlockT *, CycleT *> BlockMap;

  /// Map basic blocks to their top level containing cycle.
  DenseMap<BlockT *, CycleT *> BlockMapTopLevel;

  /// Top-level cycles discovered by any DFS.
  ///
  /// Note: The implementation treats the nullptr as the parent of
  /// every top-level cycle. See \ref contains for an example.
  std::vector<std::unique_ptr<CycleT>> TopLevelCycles;

  /// Move \p Child to \p NewParent by manipulating Children vectors.
  ///
  /// Note: This is an incomplete operation that does not update the depth of
  /// the subtree.
  void moveTopLevelCycleToNewParent(CycleT *NewParent, CycleT *Child);

public:
  GenericCycleInfo() = default;
  GenericCycleInfo(GenericCycleInfo &&) = default;
  GenericCycleInfo &operator=(GenericCycleInfo &&) = default;

  void clear();
  void compute(FunctionT &F);

  FunctionT *getFunction() const { return Context.getFunction(); }
  const ContextT &getSSAContext() const { return Context; }

  CycleT *getCycle(const BlockT *Block) const;
  unsigned getCycleDepth(const BlockT *Block) const;
  CycleT *getTopLevelParentCycle(BlockT *Block);

  /// Methods for debug and self-test.
  //@{
#ifndef NDEBUG
  bool validateTree() const;
#endif
  void print(raw_ostream &Out) const;
  void dump() const { print(dbgs()); }
  Printable print(const CycleT *Cycle) { return Cycle->print(Context); }
  //@}

  /// Iteration over top-level cycles.
  //@{
  using const_toplevel_iterator_base =
      typename std::vector<std::unique_ptr<CycleT>>::const_iterator;
  struct const_toplevel_iterator
      : iterator_adaptor_base<const_toplevel_iterator,
                              const_toplevel_iterator_base> {
    using Base = iterator_adaptor_base<const_toplevel_iterator,
                                       const_toplevel_iterator_base>;

    const_toplevel_iterator() = default;
    explicit const_toplevel_iterator(const_toplevel_iterator_base I)
        : Base(I) {}

    const const_toplevel_iterator_base &wrapped() { return Base::wrapped(); }
    CycleT *operator*() const { return Base::I->get(); }
  };

  const_toplevel_iterator toplevel_begin() const {
    return const_toplevel_iterator{TopLevelCycles.begin()};
  }
  const_toplevel_iterator toplevel_end() const {
    return const_toplevel_iterator{TopLevelCycles.end()};
  }

  iterator_range<const_toplevel_iterator> toplevel_cycles() const {
    return llvm::make_range(const_toplevel_iterator{TopLevelCycles.begin()},
                            const_toplevel_iterator{TopLevelCycles.end()});
  }
  //@}
};

/// \brief GraphTraits for iterating over a sub-tree of the CycleT tree.
template <typename CycleRefT, typename ChildIteratorT> struct CycleGraphTraits {
  using NodeRef = CycleRefT;

  using nodes_iterator = ChildIteratorT;
  using ChildIteratorType = nodes_iterator;

  static NodeRef getEntryNode(NodeRef Graph) { return Graph; }

  static ChildIteratorType child_begin(NodeRef Ref) {
    return Ref->child_begin();
  }
  static ChildIteratorType child_end(NodeRef Ref) { return Ref->child_end(); }

  // Not implemented:
  // static nodes_iterator nodes_begin(GraphType *G)
  // static nodes_iterator nodes_end  (GraphType *G)
  //    nodes_iterator/begin/end - Allow iteration over all nodes in the graph

  // typedef EdgeRef           - Type of Edge token in the graph, which should
  //                             be cheap to copy.
  // typedef ChildEdgeIteratorType - Type used to iterate over children edges in
  //                             graph, dereference to a EdgeRef.

  // static ChildEdgeIteratorType child_edge_begin(NodeRef)
  // static ChildEdgeIteratorType child_edge_end(NodeRef)
  //     Return iterators that point to the beginning and ending of the
  //     edge list for the given callgraph node.
  //
  // static NodeRef edge_dest(EdgeRef)
  //     Return the destination node of an edge.
  // static unsigned       size       (GraphType *G)
  //    Return total number of nodes in the graph
};

template <typename BlockT>
struct GraphTraits<const GenericCycle<BlockT> *>
    : CycleGraphTraits<const GenericCycle<BlockT> *,
                       typename GenericCycle<BlockT>::const_child_iterator> {};
template <typename BlockT>
struct GraphTraits<GenericCycle<BlockT> *>
    : CycleGraphTraits<GenericCycle<BlockT> *,
                       typename GenericCycle<BlockT>::const_child_iterator> {};

} // namespace llvm

#endif // LLVM_ADT_GENERICCYCLEINFO_H
PKiwFZ�:*���ADT/StringSet.hnu�[���//===- StringSet.h - An efficient set built on StringMap --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
///  StringSet - A set-like wrapper for the StringMap.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_STRINGSET_H
#define LLVM_ADT_STRINGSET_H

#include "llvm/ADT/StringMap.h"

namespace llvm {

/// StringSet - A wrapper for StringMap that provides set-like functionality.
template <class AllocatorTy = MallocAllocator>
class StringSet : public StringMap<std::nullopt_t, AllocatorTy> {
  using Base = StringMap<std::nullopt_t, AllocatorTy>;

public:
  StringSet() = default;
  StringSet(std::initializer_list<StringRef> initializer) {
    for (StringRef str : initializer)
      insert(str);
  }
  explicit StringSet(AllocatorTy a) : Base(a) {}

  std::pair<typename Base::iterator, bool> insert(StringRef key) {
    return Base::try_emplace(key);
  }

  template <typename InputIt>
  void insert(InputIt begin, InputIt end) {
    for (auto it = begin; it != end; ++it)
      insert(*it);
  }

  template <typename ValueTy>
  std::pair<typename Base::iterator, bool>
  insert(const StringMapEntry<ValueTy> &mapEntry) {
    return insert(mapEntry.getKey());
  }

  /// Check if the set contains the given \c key.
  bool contains(StringRef key) const { return Base::FindKey(key) != -1; }
};

} // end namespace llvm

#endif // LLVM_ADT_STRINGSET_H
PKiwFZP�)�f&f&ADT/DirectedGraph.hnu�[���//===- llvm/ADT/DirectedGraph.h - Directed Graph ----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file defines the interface and a base class implementation for a
/// directed graph.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_DIRECTEDGRAPH_H
#define LLVM_ADT_DIRECTEDGRAPH_H

#include "llvm/ADT/GraphTraits.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"

namespace llvm {

/// Represent an edge in the directed graph.
/// The edge contains the target node it connects to.
template <class NodeType, class EdgeType> class DGEdge {
public:
  DGEdge() = delete;
  /// Create an edge pointing to the given node \p N.
  explicit DGEdge(NodeType &N) : TargetNode(N) {}
  explicit DGEdge(const DGEdge<NodeType, EdgeType> &E)
      : TargetNode(E.TargetNode) {}
  DGEdge<NodeType, EdgeType> &operator=(const DGEdge<NodeType, EdgeType> &E) {
    TargetNode = E.TargetNode;
    return *this;
  }

  /// Static polymorphism: delegate implementation (via isEqualTo) to the
  /// derived class.
  bool operator==(const DGEdge &E) const {
    return getDerived().isEqualTo(E.getDerived());
  }
  bool operator!=(const DGEdge &E) const { return !operator==(E); }

  /// Retrieve the target node this edge connects to.
  const NodeType &getTargetNode() const { return TargetNode; }
  NodeType &getTargetNode() {
    return const_cast<NodeType &>(
        static_cast<const DGEdge<NodeType, EdgeType> &>(*this).getTargetNode());
  }

  /// Set the target node this edge connects to.
  void setTargetNode(const NodeType &N) { TargetNode = N; }

protected:
  // As the default implementation use address comparison for equality.
  bool isEqualTo(const EdgeType &E) const { return this == &E; }

  // Cast the 'this' pointer to the derived type and return a reference.
  EdgeType &getDerived() { return *static_cast<EdgeType *>(this); }
  const EdgeType &getDerived() const {
    return *static_cast<const EdgeType *>(this);
  }

  // The target node this edge connects to.
  NodeType &TargetNode;
};

/// Represent a node in the directed graph.
/// The node has a (possibly empty) list of outgoing edges.
template <class NodeType, class EdgeType> class DGNode {
public:
  using EdgeListTy = SetVector<EdgeType *>;
  using iterator = typename EdgeListTy::iterator;
  using const_iterator = typename EdgeListTy::const_iterator;

  /// Create a node with a single outgoing edge \p E.
  explicit DGNode(EdgeType &E) : Edges() { Edges.insert(&E); }
  DGNode() = default;

  explicit DGNode(const DGNode<NodeType, EdgeType> &N) : Edges(N.Edges) {}
  DGNode(DGNode<NodeType, EdgeType> &&N) : Edges(std::move(N.Edges)) {}

  DGNode<NodeType, EdgeType> &operator=(const DGNode<NodeType, EdgeType> &N) {
    Edges = N.Edges;
    return *this;
  }
  DGNode<NodeType, EdgeType> &operator=(const DGNode<NodeType, EdgeType> &&N) {
    Edges = std::move(N.Edges);
    return *this;
  }

  /// Static polymorphism: delegate implementation (via isEqualTo) to the
  /// derived class.
  friend bool operator==(const NodeType &M, const NodeType &N) {
    return M.isEqualTo(N);
  }
  friend bool operator!=(const NodeType &M, const NodeType &N) {
    return !(M == N);
  }

  const_iterator begin() const { return Edges.begin(); }
  const_iterator end() const { return Edges.end(); }
  iterator begin() { return Edges.begin(); }
  iterator end() { return Edges.end(); }
  const EdgeType &front() const { return *Edges.front(); }
  EdgeType &front() { return *Edges.front(); }
  const EdgeType &back() const { return *Edges.back(); }
  EdgeType &back() { return *Edges.back(); }

  /// Collect in \p EL, all the edges from this node to \p N.
  /// Return true if at least one edge was found, and false otherwise.
  /// Note that this implementation allows more than one edge to connect
  /// a given pair of nodes.
  bool findEdgesTo(const NodeType &N, SmallVectorImpl<EdgeType *> &EL) const {
    assert(EL.empty() && "Expected the list of edges to be empty.");
    for (auto *E : Edges)
      if (E->getTargetNode() == N)
        EL.push_back(E);
    return !EL.empty();
  }

  /// Add the given edge \p E to this node, if it doesn't exist already. Returns
  /// true if the edge is added and false otherwise.
  bool addEdge(EdgeType &E) { return Edges.insert(&E); }

  /// Remove the given edge \p E from this node, if it exists.
  void removeEdge(EdgeType &E) { Edges.remove(&E); }

  /// Test whether there is an edge that goes from this node to \p N.
  bool hasEdgeTo(const NodeType &N) const {
    return (findEdgeTo(N) != Edges.end());
  }

  /// Retrieve the outgoing edges for the node.
  const EdgeListTy &getEdges() const { return Edges; }
  EdgeListTy &getEdges() {
    return const_cast<EdgeListTy &>(
        static_cast<const DGNode<NodeType, EdgeType> &>(*this).Edges);
  }

  /// Clear the outgoing edges.
  void clear() { Edges.clear(); }

protected:
  // As the default implementation use address comparison for equality.
  bool isEqualTo(const NodeType &N) const { return this == &N; }

  // Cast the 'this' pointer to the derived type and return a reference.
  NodeType &getDerived() { return *static_cast<NodeType *>(this); }
  const NodeType &getDerived() const {
    return *static_cast<const NodeType *>(this);
  }

  /// Find an edge to \p N. If more than one edge exists, this will return
  /// the first one in the list of edges.
  const_iterator findEdgeTo(const NodeType &N) const {
    return llvm::find_if(
        Edges, [&N](const EdgeType *E) { return E->getTargetNode() == N; });
  }

  // The list of outgoing edges.
  EdgeListTy Edges;
};

/// Directed graph
///
/// The graph is represented by a table of nodes.
/// Each node contains a (possibly empty) list of outgoing edges.
/// Each edge contains the target node it connects to.
template <class NodeType, class EdgeType> class DirectedGraph {
protected:
  using NodeListTy = SmallVector<NodeType *, 10>;
  using EdgeListTy = SmallVector<EdgeType *, 10>;
public:
  using iterator = typename NodeListTy::iterator;
  using const_iterator = typename NodeListTy::const_iterator;
  using DGraphType = DirectedGraph<NodeType, EdgeType>;

  DirectedGraph() = default;
  explicit DirectedGraph(NodeType &N) : Nodes() { addNode(N); }
  DirectedGraph(const DGraphType &G) : Nodes(G.Nodes) {}
  DirectedGraph(DGraphType &&RHS) : Nodes(std::move(RHS.Nodes)) {}
  DGraphType &operator=(const DGraphType &G) {
    Nodes = G.Nodes;
    return *this;
  }
  DGraphType &operator=(const DGraphType &&G) {
    Nodes = std::move(G.Nodes);
    return *this;
  }

  const_iterator begin() const { return Nodes.begin(); }
  const_iterator end() const { return Nodes.end(); }
  iterator begin() { return Nodes.begin(); }
  iterator end() { return Nodes.end(); }
  const NodeType &front() const { return *Nodes.front(); }
  NodeType &front() { return *Nodes.front(); }
  const NodeType &back() const { return *Nodes.back(); }
  NodeType &back() { return *Nodes.back(); }

  size_t size() const { return Nodes.size(); }

  /// Find the given node \p N in the table.
  const_iterator findNode(const NodeType &N) const {
    return llvm::find_if(Nodes,
                         [&N](const NodeType *Node) { return *Node == N; });
  }
  iterator findNode(const NodeType &N) {
    return const_cast<iterator>(
        static_cast<const DGraphType &>(*this).findNode(N));
  }

  /// Add the given node \p N to the graph if it is not already present.
  bool addNode(NodeType &N) {
    if (findNode(N) != Nodes.end())
      return false;
    Nodes.push_back(&N);
    return true;
  }

  /// Collect in \p EL all edges that are coming into node \p N. Return true
  /// if at least one edge was found, and false otherwise.
  bool findIncomingEdgesToNode(const NodeType &N, SmallVectorImpl<EdgeType*> &EL) const {
    assert(EL.empty() && "Expected the list of edges to be empty.");
    EdgeListTy TempList;
    for (auto *Node : Nodes) {
      if (*Node == N)
        continue;
      Node->findEdgesTo(N, TempList);
      llvm::append_range(EL, TempList);
      TempList.clear();
    }
    return !EL.empty();
  }

  /// Remove the given node \p N from the graph. If the node has incoming or
  /// outgoing edges, they are also removed. Return true if the node was found
  /// and then removed, and false if the node was not found in the graph to
  /// begin with.
  bool removeNode(NodeType &N) {
    iterator IT = findNode(N);
    if (IT == Nodes.end())
      return false;
    // Remove incoming edges.
    EdgeListTy EL;
    for (auto *Node : Nodes) {
      if (*Node == N)
        continue;
      Node->findEdgesTo(N, EL);
      for (auto *E : EL)
        Node->removeEdge(*E);
      EL.clear();
    }
    N.clear();
    Nodes.erase(IT);
    return true;
  }

  /// Assuming nodes \p Src and \p Dst are already in the graph, connect node \p
  /// Src to node \p Dst using the provided edge \p E. Return true if \p Src is
  /// not already connected to \p Dst via \p E, and false otherwise.
  bool connect(NodeType &Src, NodeType &Dst, EdgeType &E) {
    assert(findNode(Src) != Nodes.end() && "Src node should be present.");
    assert(findNode(Dst) != Nodes.end() && "Dst node should be present.");
    assert((E.getTargetNode() == Dst) &&
           "Target of the given edge does not match Dst.");
    return Src.addEdge(E);
  }

protected:
  // The list of nodes in the graph.
  NodeListTy Nodes;
};

} // namespace llvm

#endif // LLVM_ADT_DIRECTEDGRAPH_H
PKiwFZ�F�K�"�"ADT/SmallSet.hnu�[���//===- llvm/ADT/SmallSet.h - 'Normally small' sets --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file defines the SmallSet class.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_SMALLSET_H
#define LLVM_ADT_SMALLSET_H

#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/iterator.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/type_traits.h"
#include <cstddef>
#include <functional>
#include <set>
#include <type_traits>
#include <utility>

namespace llvm {

/// SmallSetIterator - This class implements a const_iterator for SmallSet by
/// delegating to the underlying SmallVector or Set iterators.
template <typename T, unsigned N, typename C>
class SmallSetIterator
    : public iterator_facade_base<SmallSetIterator<T, N, C>,
                                  std::forward_iterator_tag, T> {
private:
  using SetIterTy = typename std::set<T, C>::const_iterator;
  using VecIterTy = typename SmallVector<T, N>::const_iterator;
  using SelfTy = SmallSetIterator<T, N, C>;

  /// Iterators to the parts of the SmallSet containing the data. They are set
  /// depending on isSmall.
  union {
    SetIterTy SetIter;
    VecIterTy VecIter;
  };

  bool isSmall;

public:
  SmallSetIterator(SetIterTy SetIter) : SetIter(SetIter), isSmall(false) {}

  SmallSetIterator(VecIterTy VecIter) : VecIter(VecIter), isSmall(true) {}

  // Spell out destructor, copy/move constructor and assignment operators for
  // MSVC STL, where set<T>::const_iterator is not trivially copy constructible.
  ~SmallSetIterator() {
    if (isSmall)
      VecIter.~VecIterTy();
    else
      SetIter.~SetIterTy();
  }

  SmallSetIterator(const SmallSetIterator &Other) : isSmall(Other.isSmall) {
    if (isSmall)
      VecIter = Other.VecIter;
    else
      // Use placement new, to make sure SetIter is properly constructed, even
      // if it is not trivially copy-able (e.g. in MSVC).
      new (&SetIter) SetIterTy(Other.SetIter);
  }

  SmallSetIterator(SmallSetIterator &&Other) : isSmall(Other.isSmall) {
    if (isSmall)
      VecIter = std::move(Other.VecIter);
    else
      // Use placement new, to make sure SetIter is properly constructed, even
      // if it is not trivially copy-able (e.g. in MSVC).
      new (&SetIter) SetIterTy(std::move(Other.SetIter));
  }

  SmallSetIterator& operator=(const SmallSetIterator& Other) {
    // Call destructor for SetIter, so it gets properly destroyed if it is
    // not trivially destructible in case we are setting VecIter.
    if (!isSmall)
      SetIter.~SetIterTy();

    isSmall = Other.isSmall;
    if (isSmall)
      VecIter = Other.VecIter;
    else
      new (&SetIter) SetIterTy(Other.SetIter);
    return *this;
  }

  SmallSetIterator& operator=(SmallSetIterator&& Other) {
    // Call destructor for SetIter, so it gets properly destroyed if it is
    // not trivially destructible in case we are setting VecIter.
    if (!isSmall)
      SetIter.~SetIterTy();

    isSmall = Other.isSmall;
    if (isSmall)
      VecIter = std::move(Other.VecIter);
    else
      new (&SetIter) SetIterTy(std::move(Other.SetIter));
    return *this;
  }

  bool operator==(const SmallSetIterator &RHS) const {
    if (isSmall != RHS.isSmall)
      return false;
    if (isSmall)
      return VecIter == RHS.VecIter;
    return SetIter == RHS.SetIter;
  }

  SmallSetIterator &operator++() { // Preincrement
    if (isSmall)
      VecIter++;
    else
      SetIter++;
    return *this;
  }

  const T &operator*() const { return isSmall ? *VecIter : *SetIter; }
};

/// SmallSet - This maintains a set of unique values, optimizing for the case
/// when the set is small (less than N).  In this case, the set can be
/// maintained with no mallocs.  If the set gets large, we expand to using an
/// std::set to maintain reasonable lookup times.
template <typename T, unsigned N, typename C = std::less<T>>
class SmallSet {
  /// Use a SmallVector to hold the elements here (even though it will never
  /// reach its 'large' stage) to avoid calling the default ctors of elements
  /// we will never use.
  SmallVector<T, N> Vector;
  std::set<T, C> Set;

  using VIterator = typename SmallVector<T, N>::const_iterator;
  using SIterator = typename std::set<T, C>::const_iterator;
  using mutable_iterator = typename SmallVector<T, N>::iterator;

  // In small mode SmallPtrSet uses linear search for the elements, so it is
  // not a good idea to choose this value too high. You may consider using a
  // DenseSet<> instead if you expect many elements in the set.
  static_assert(N <= 32, "N should be small");

public:
  using key_type = T;
  using size_type = size_t;
  using value_type = T;
  using const_iterator = SmallSetIterator<T, N, C>;

  SmallSet() = default;

  [[nodiscard]] bool empty() const { return Vector.empty() && Set.empty(); }

  size_type size() const {
    return isSmall() ? Vector.size() : Set.size();
  }

  /// count - Return 1 if the element is in the set, 0 otherwise.
  size_type count(const T &V) const {
    if (isSmall()) {
      // Since the collection is small, just do a linear search.
      return vfind(V) == Vector.end() ? 0 : 1;
    } else {
      return Set.count(V);
    }
  }

  /// insert - Insert an element into the set if it isn't already there.
  /// Returns a pair. The first value of it is an iterator to the inserted
  /// element or the existing element in the set. The second value is true
  /// if the element is inserted (it was not in the set before).
  std::pair<const_iterator, bool> insert(const T &V) {
    if (!isSmall()) {
      auto [I, Inserted] = Set.insert(V);
      return std::make_pair(const_iterator(I), Inserted);
    }

    VIterator I = vfind(V);
    if (I != Vector.end())    // Don't reinsert if it already exists.
      return std::make_pair(const_iterator(I), false);
    if (Vector.size() < N) {
      Vector.push_back(V);
      return std::make_pair(const_iterator(std::prev(Vector.end())), true);
    }

    // Otherwise, grow from vector to set.
    while (!Vector.empty()) {
      Set.insert(Vector.back());
      Vector.pop_back();
    }
    return std::make_pair(const_iterator(Set.insert(V).first), true);
  }

  template <typename IterT>
  void insert(IterT I, IterT E) {
    for (; I != E; ++I)
      insert(*I);
  }

  bool erase(const T &V) {
    if (!isSmall())
      return Set.erase(V);
    for (mutable_iterator I = Vector.begin(), E = Vector.end(); I != E; ++I)
      if (*I == V) {
        Vector.erase(I);
        return true;
      }
    return false;
  }

  void clear() {
    Vector.clear();
    Set.clear();
  }

  const_iterator begin() const {
    if (isSmall())
      return {Vector.begin()};
    return {Set.begin()};
  }

  const_iterator end() const {
    if (isSmall())
      return {Vector.end()};
    return {Set.end()};
  }

  /// Check if the SmallSet contains the given element.
  bool contains(const T &V) const {
    if (isSmall())
      return vfind(V) != Vector.end();
    return Set.find(V) != Set.end();
  }

private:
  bool isSmall() const { return Set.empty(); }

  VIterator vfind(const T &V) const {
    for (VIterator I = Vector.begin(), E = Vector.end(); I != E; ++I)
      if (*I == V)
        return I;
    return Vector.end();
  }
};

/// If this set is of pointer values, transparently switch over to using
/// SmallPtrSet for performance.
template <typename PointeeType, unsigned N>
class SmallSet<PointeeType*, N> : public SmallPtrSet<PointeeType*, N> {};

/// Equality comparison for SmallSet.
///
/// Iterates over elements of LHS confirming that each element is also a member
/// of RHS, and that RHS contains no additional values.
/// Equivalent to N calls to RHS.count.
/// For small-set mode amortized complexity is O(N^2)
/// For large-set mode amortized complexity is linear, worst case is O(N^2) (if
/// every hash collides).
template <typename T, unsigned LN, unsigned RN, typename C>
bool operator==(const SmallSet<T, LN, C> &LHS, const SmallSet<T, RN, C> &RHS) {
  if (LHS.size() != RHS.size())
    return false;

  // All elements in LHS must also be in RHS
  return all_of(LHS, [&RHS](const T &E) { return RHS.count(E); });
}

/// Inequality comparison for SmallSet.
///
/// Equivalent to !(LHS == RHS). See operator== for performance notes.
template <typename T, unsigned LN, unsigned RN, typename C>
bool operator!=(const SmallSet<T, LN, C> &LHS, const SmallSet<T, RN, C> &RHS) {
  return !(LHS == RHS);
}

} // end namespace llvm

#endif // LLVM_ADT_SMALLSET_H
PKiwFZ�X/o'o'ADT/ilist_node.hnu�[���//===- llvm/ADT/ilist_node.h - Intrusive Linked List Helper -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file defines the ilist_node class template, which is a convenient
/// base class for creating classes that can be used with ilists.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_ILIST_NODE_H
#define LLVM_ADT_ILIST_NODE_H

#include "llvm/ADT/ilist_node_base.h"
#include "llvm/ADT/ilist_node_options.h"

namespace llvm {

namespace ilist_detail {

struct NodeAccess;

} // end namespace ilist_detail

template <class OptionsT, bool IsReverse, bool IsConst> class ilist_iterator;
template <class OptionsT> class ilist_sentinel;

/// Implementation for an ilist node.
///
/// Templated on an appropriate \a ilist_detail::node_options, usually computed
/// by \a ilist_detail::compute_node_options.
///
/// This is a wrapper around \a ilist_node_base whose main purpose is to
/// provide type safety: you can't insert nodes of \a ilist_node_impl into the
/// wrong \a simple_ilist or \a iplist.
template <class OptionsT> class ilist_node_impl : OptionsT::node_base_type {
  using value_type = typename OptionsT::value_type;
  using node_base_type = typename OptionsT::node_base_type;
  using list_base_type = typename OptionsT::list_base_type;

  friend typename OptionsT::list_base_type;
  friend struct ilist_detail::NodeAccess;
  friend class ilist_sentinel<OptionsT>;
  friend class ilist_iterator<OptionsT, false, false>;
  friend class ilist_iterator<OptionsT, false, true>;
  friend class ilist_iterator<OptionsT, true, false>;
  friend class ilist_iterator<OptionsT, true, true>;

protected:
  using self_iterator = ilist_iterator<OptionsT, false, false>;
  using const_self_iterator = ilist_iterator<OptionsT, false, true>;
  using reverse_self_iterator = ilist_iterator<OptionsT, true, false>;
  using const_reverse_self_iterator = ilist_iterator<OptionsT, true, true>;

  ilist_node_impl() = default;

private:
  ilist_node_impl *getPrev() {
    return static_cast<ilist_node_impl *>(node_base_type::getPrev());
  }

  ilist_node_impl *getNext() {
    return static_cast<ilist_node_impl *>(node_base_type::getNext());
  }

  const ilist_node_impl *getPrev() const {
    return static_cast<ilist_node_impl *>(node_base_type::getPrev());
  }

  const ilist_node_impl *getNext() const {
    return static_cast<ilist_node_impl *>(node_base_type::getNext());
  }

  void setPrev(ilist_node_impl *N) { node_base_type::setPrev(N); }
  void setNext(ilist_node_impl *N) { node_base_type::setNext(N); }

public:
  self_iterator getIterator() { return self_iterator(*this); }
  const_self_iterator getIterator() const { return const_self_iterator(*this); }

  reverse_self_iterator getReverseIterator() {
    return reverse_self_iterator(*this);
  }

  const_reverse_self_iterator getReverseIterator() const {
    return const_reverse_self_iterator(*this);
  }

  // Under-approximation, but always available for assertions.
  using node_base_type::isKnownSentinel;

  /// Check whether this is the sentinel node.
  ///
  /// This requires sentinel tracking to be explicitly enabled.  Use the
  /// ilist_sentinel_tracking<true> option to get this API.
  bool isSentinel() const {
    static_assert(OptionsT::is_sentinel_tracking_explicit,
                  "Use ilist_sentinel_tracking<true> to enable isSentinel()");
    return node_base_type::isSentinel();
  }
};

/// An intrusive list node.
///
/// A base class to enable membership in intrusive lists, including \a
/// simple_ilist, \a iplist, and \a ilist.  The first template parameter is the
/// \a value_type for the list.
///
/// An ilist node can be configured with compile-time options to change
/// behaviour and/or add API.
///
/// By default, an \a ilist_node knows whether it is the list sentinel (an
/// instance of \a ilist_sentinel) if and only if
/// LLVM_ENABLE_ABI_BREAKING_CHECKS.  The function \a isKnownSentinel() always
/// returns \c false tracking is off.  Sentinel tracking steals a bit from the
/// "prev" link, which adds a mask operation when decrementing an iterator, but
/// enables bug-finding assertions in \a ilist_iterator.
///
/// To turn sentinel tracking on all the time, pass in the
/// ilist_sentinel_tracking<true> template parameter.  This also enables the \a
/// isSentinel() function.  The same option must be passed to the intrusive
/// list.  (ilist_sentinel_tracking<false> turns sentinel tracking off all the
/// time.)
///
/// A type can inherit from ilist_node multiple times by passing in different
/// \a ilist_tag options.  This allows a single instance to be inserted into
/// multiple lists simultaneously, where each list is given the same tag.
///
/// \example
/// struct A {};
/// struct B {};
/// struct N : ilist_node<N, ilist_tag<A>>, ilist_node<N, ilist_tag<B>> {};
///
/// void foo() {
///   simple_ilist<N, ilist_tag<A>> ListA;
///   simple_ilist<N, ilist_tag<B>> ListB;
///   N N1;
///   ListA.push_back(N1);
///   ListB.push_back(N1);
/// }
/// \endexample
///
/// See \a is_valid_option for steps on adding a new option.
template <class T, class... Options>
class ilist_node
    : public ilist_node_impl<
          typename ilist_detail::compute_node_options<T, Options...>::type> {
  static_assert(ilist_detail::check_options<Options...>::value,
                "Unrecognized node option!");
};

namespace ilist_detail {

/// An access class for ilist_node private API.
///
/// This gives access to the private parts of ilist nodes.  Nodes for an ilist
/// should friend this class if they inherit privately from ilist_node.
///
/// Using this class outside of the ilist implementation is unsupported.
struct NodeAccess {
protected:
  template <class OptionsT>
  static ilist_node_impl<OptionsT> *getNodePtr(typename OptionsT::pointer N) {
    return N;
  }

  template <class OptionsT>
  static const ilist_node_impl<OptionsT> *
  getNodePtr(typename OptionsT::const_pointer N) {
    return N;
  }

  template <class OptionsT>
  static typename OptionsT::pointer getValuePtr(ilist_node_impl<OptionsT> *N) {
    return static_cast<typename OptionsT::pointer>(N);
  }

  template <class OptionsT>
  static typename OptionsT::const_pointer
  getValuePtr(const ilist_node_impl<OptionsT> *N) {
    return static_cast<typename OptionsT::const_pointer>(N);
  }

  template <class OptionsT>
  static ilist_node_impl<OptionsT> *getPrev(ilist_node_impl<OptionsT> &N) {
    return N.getPrev();
  }

  template <class OptionsT>
  static ilist_node_impl<OptionsT> *getNext(ilist_node_impl<OptionsT> &N) {
    return N.getNext();
  }

  template <class OptionsT>
  static const ilist_node_impl<OptionsT> *
  getPrev(const ilist_node_impl<OptionsT> &N) {
    return N.getPrev();
  }

  template <class OptionsT>
  static const ilist_node_impl<OptionsT> *
  getNext(const ilist_node_impl<OptionsT> &N) {
    return N.getNext();
  }
};

template <class OptionsT> struct SpecificNodeAccess : NodeAccess {
protected:
  using pointer = typename OptionsT::pointer;
  using const_pointer = typename OptionsT::const_pointer;
  using node_type = ilist_node_impl<OptionsT>;

  static node_type *getNodePtr(pointer N) {
    return NodeAccess::getNodePtr<OptionsT>(N);
  }

  static const node_type *getNodePtr(const_pointer N) {
    return NodeAccess::getNodePtr<OptionsT>(N);
  }

  static pointer getValuePtr(node_type *N) {
    return NodeAccess::getValuePtr<OptionsT>(N);
  }

  static const_pointer getValuePtr(const node_type *N) {
    return NodeAccess::getValuePtr<OptionsT>(N);
  }
};

} // end namespace ilist_detail

template <class OptionsT>
class ilist_sentinel : public ilist_node_impl<OptionsT> {
public:
  ilist_sentinel() {
    this->initializeSentinel();
    reset();
  }

  void reset() {
    this->setPrev(this);
    this->setNext(this);
  }

  bool empty() const { return this == this->getPrev(); }
};

/// An ilist node that can access its parent list.
///
/// Requires \c NodeTy to have \a getParent() to find the parent node, and the
/// \c ParentTy to have \a getSublistAccess() to get a reference to the list.
template <typename NodeTy, typename ParentTy, class... Options>
class ilist_node_with_parent : public ilist_node<NodeTy, Options...> {
protected:
  ilist_node_with_parent() = default;

private:
  /// Forward to NodeTy::getParent().
  ///
  /// Note: do not use the name "getParent()".  We want a compile error
  /// (instead of recursion) when the subclass fails to implement \a
  /// getParent().
  const ParentTy *getNodeParent() const {
    return static_cast<const NodeTy *>(this)->getParent();
  }

public:
  /// @name Adjacent Node Accessors
  /// @{
  /// Get the previous node, or \c nullptr for the list head.
  NodeTy *getPrevNode() {
    // Should be separated to a reused function, but then we couldn't use auto
    // (and would need the type of the list).
    const auto &List =
        getNodeParent()->*(ParentTy::getSublistAccess((NodeTy *)nullptr));
    return List.getPrevNode(*static_cast<NodeTy *>(this));
  }

  /// Get the previous node, or \c nullptr for the list head.
  const NodeTy *getPrevNode() const {
    return const_cast<ilist_node_with_parent *>(this)->getPrevNode();
  }

  /// Get the next node, or \c nullptr for the list tail.
  NodeTy *getNextNode() {
    // Should be separated to a reused function, but then we couldn't use auto
    // (and would need the type of the list).
    const auto &List =
        getNodeParent()->*(ParentTy::getSublistAccess((NodeTy *)nullptr));
    return List.getNextNode(*static_cast<NodeTy *>(this));
  }

  /// Get the next node, or \c nullptr for the list tail.
  const NodeTy *getNextNode() const {
    return const_cast<ilist_node_with_parent *>(this)->getNextNode();
  }
  /// @}
};

} // end namespace llvm

#endif // LLVM_ADT_ILIST_NODE_H
PKiwFZ[;)�.�.ADT/Bitfields.hnu�[���//===-- llvm/ADT/Bitfield.h - Get and Set bits in an integer ---*- C++ -*--===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file implements methods to test, set and extract typed bits from packed
/// unsigned integers.
///
/// Why not C++ bitfields?
/// ----------------------
/// C++ bitfields do not offer control over the bit layout nor consistent
/// behavior when it comes to out of range values.
/// For instance, the layout is implementation defined and adjacent bits may be
/// packed together but are not required to. This is problematic when storage is
/// sparse and data must be stored in a particular integer type.
///
/// The methods provided in this file ensure precise control over the
/// layout/storage as well as protection against out of range values.
///
/// Usage example
/// -------------
/// \code{.cpp}
///  uint8_t Storage = 0;
///
///  // Store and retrieve a single bit as bool.
///  using Bool = Bitfield::Element<bool, 0, 1>;
///  Bitfield::set<Bool>(Storage, true);
///  EXPECT_EQ(Storage, 0b00000001);
///  //                          ^
///  EXPECT_EQ(Bitfield::get<Bool>(Storage), true);
///
///  // Store and retrieve a 2 bit typed enum.
///  // Note: enum underlying type must be unsigned.
///  enum class SuitEnum : uint8_t { CLUBS, DIAMONDS, HEARTS, SPADES };
///  // Note: enum maximum value needs to be passed in as last parameter.
///  using Suit = Bitfield::Element<SuitEnum, 1, 2, SuitEnum::SPADES>;
///  Bitfield::set<Suit>(Storage, SuitEnum::HEARTS);
///  EXPECT_EQ(Storage, 0b00000101);
///  //                        ^^
///  EXPECT_EQ(Bitfield::get<Suit>(Storage), SuitEnum::HEARTS);
///
///  // Store and retrieve a 5 bit value as unsigned.
///  using Value = Bitfield::Element<unsigned, 3, 5>;
///  Bitfield::set<Value>(Storage, 10);
///  EXPECT_EQ(Storage, 0b01010101);
///  //                   ^^^^^
///  EXPECT_EQ(Bitfield::get<Value>(Storage), 10U);
///
///  // Interpret the same 5 bit value as signed.
///  using SignedValue = Bitfield::Element<int, 3, 5>;
///  Bitfield::set<SignedValue>(Storage, -2);
///  EXPECT_EQ(Storage, 0b11110101);
///  //                   ^^^^^
///  EXPECT_EQ(Bitfield::get<SignedValue>(Storage), -2);
///
///  // Ability to efficiently test if a field is non zero.
///  EXPECT_TRUE(Bitfield::test<Value>(Storage));
///
///  // Alter Storage changes value.
///  Storage = 0;
///  EXPECT_EQ(Bitfield::get<Bool>(Storage), false);
///  EXPECT_EQ(Bitfield::get<Suit>(Storage), SuitEnum::CLUBS);
///  EXPECT_EQ(Bitfield::get<Value>(Storage), 0U);
///  EXPECT_EQ(Bitfield::get<SignedValue>(Storage), 0);
///
///  Storage = 255;
///  EXPECT_EQ(Bitfield::get<Bool>(Storage), true);
///  EXPECT_EQ(Bitfield::get<Suit>(Storage), SuitEnum::SPADES);
///  EXPECT_EQ(Bitfield::get<Value>(Storage), 31U);
///  EXPECT_EQ(Bitfield::get<SignedValue>(Storage), -1);
/// \endcode
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_BITFIELDS_H
#define LLVM_ADT_BITFIELDS_H

#include <cassert>
#include <climits> // CHAR_BIT
#include <cstddef> // size_t
#include <cstdint> // uintXX_t
#include <limits>  // numeric_limits
#include <type_traits>

namespace llvm {

namespace bitfields_details {

/// A struct defining useful bit patterns for n-bits integer types.
template <typename T, unsigned Bits> struct BitPatterns {
  /// Bit patterns are forged using the equivalent `Unsigned` type because of
  /// undefined operations over signed types (e.g. Bitwise shift operators).
  /// Moreover same size casting from unsigned to signed is well defined but not
  /// the other way around.
  using Unsigned = std::make_unsigned_t<T>;
  static_assert(sizeof(Unsigned) == sizeof(T), "Types must have same size");

  static constexpr unsigned TypeBits = sizeof(Unsigned) * CHAR_BIT;
  static_assert(TypeBits >= Bits, "n-bit must fit in T");

  /// e.g. with TypeBits == 8 and Bits == 6.
  static constexpr Unsigned AllZeros = Unsigned(0);                  // 00000000
  static constexpr Unsigned AllOnes = ~Unsigned(0);                  // 11111111
  static constexpr Unsigned Umin = AllZeros;                         // 00000000
  static constexpr Unsigned Umax = AllOnes >> (TypeBits - Bits);     // 00111111
  static constexpr Unsigned SignBitMask = Unsigned(1) << (Bits - 1); // 00100000
  static constexpr Unsigned Smax = Umax >> 1U;                       // 00011111
  static constexpr Unsigned Smin = ~Smax;                            // 11100000
  static constexpr Unsigned SignExtend = Unsigned(Smin << 1U);       // 11000000
};

/// `Compressor` is used to manipulate the bits of a (possibly signed) integer
/// type so it can be packed and unpacked into a `bits` sized integer,
/// `Compressor` is specialized on signed-ness so no runtime cost is incurred.
/// The `pack` method also checks that the passed in `UserValue` is valid.
template <typename T, unsigned Bits, bool = std::is_unsigned<T>::value>
struct Compressor {
  static_assert(std::is_unsigned<T>::value, "T must be unsigned");
  using BP = BitPatterns<T, Bits>;

  static T pack(T UserValue, T UserMaxValue) {
    assert(UserValue <= UserMaxValue && "value is too big");
    assert(UserValue <= BP::Umax && "value is too big");
    return UserValue;
  }

  static T unpack(T StorageValue) { return StorageValue; }
};

template <typename T, unsigned Bits> struct Compressor<T, Bits, false> {
  static_assert(std::is_signed<T>::value, "T must be signed");
  using BP = BitPatterns<T, Bits>;

  static T pack(T UserValue, T UserMaxValue) {
    assert(UserValue <= UserMaxValue && "value is too big");
    assert(UserValue <= T(BP::Smax) && "value is too big");
    assert(UserValue >= T(BP::Smin) && "value is too small");
    if (UserValue < 0)
      UserValue &= ~BP::SignExtend;
    return UserValue;
  }

  static T unpack(T StorageValue) {
    if (StorageValue >= T(BP::SignBitMask))
      StorageValue |= BP::SignExtend;
    return StorageValue;
  }
};

/// Impl is where Bifield description and Storage are put together to interact
/// with values.
template <typename Bitfield, typename StorageType> struct Impl {
  static_assert(std::is_unsigned<StorageType>::value,
                "Storage must be unsigned");
  using IntegerType = typename Bitfield::IntegerType;
  using C = Compressor<IntegerType, Bitfield::Bits>;
  using BP = BitPatterns<StorageType, Bitfield::Bits>;

  static constexpr size_t StorageBits = sizeof(StorageType) * CHAR_BIT;
  static_assert(Bitfield::FirstBit <= StorageBits, "Data must fit in mask");
  static_assert(Bitfield::LastBit <= StorageBits, "Data must fit in mask");
  static constexpr StorageType Mask = BP::Umax << Bitfield::Shift;

  /// Checks `UserValue` is within bounds and packs it between `FirstBit` and
  /// `LastBit` of `Packed` leaving the rest unchanged.
  static void update(StorageType &Packed, IntegerType UserValue) {
    const StorageType StorageValue = C::pack(UserValue, Bitfield::UserMaxValue);
    Packed &= ~Mask;
    Packed |= StorageValue << Bitfield::Shift;
  }

  /// Interprets bits between `FirstBit` and `LastBit` of `Packed` as
  /// an`IntegerType`.
  static IntegerType extract(StorageType Packed) {
    const StorageType StorageValue = (Packed & Mask) >> Bitfield::Shift;
    return C::unpack(StorageValue);
  }

  /// Interprets bits between `FirstBit` and `LastBit` of `Packed` as
  /// an`IntegerType`.
  static StorageType test(StorageType Packed) { return Packed & Mask; }
};

/// `Bitfield` deals with the following type:
/// - unsigned enums
/// - signed and unsigned integer
/// - `bool`
/// Internally though we only manipulate integer with well defined and
/// consistent semantics, this excludes typed enums and `bool` that are replaced
/// with their unsigned counterparts. The correct type is restored in the public
/// API.
template <typename T, bool = std::is_enum<T>::value>
struct ResolveUnderlyingType {
  using type = std::underlying_type_t<T>;
};
template <typename T> struct ResolveUnderlyingType<T, false> {
  using type = T;
};
template <> struct ResolveUnderlyingType<bool, false> {
  /// In case sizeof(bool) != 1, replace `void` by an additionnal
  /// std::conditional.
  using type = std::conditional_t<sizeof(bool) == 1, uint8_t, void>;
};

} // namespace bitfields_details

/// Holds functions to get, set or test bitfields.
struct Bitfield {
  /// Describes an element of a Bitfield. This type is then used with the
  /// Bitfield static member functions.
  /// \tparam T         The type of the field once in unpacked form.
  /// \tparam Offset    The position of the first bit.
  /// \tparam Size      The size of the field.
  /// \tparam MaxValue  For enums the maximum enum allowed.
  template <typename T, unsigned Offset, unsigned Size,
            T MaxValue = std::is_enum<T>::value
                             ? T(0) // coupled with static_assert below
                             : std::numeric_limits<T>::max()>
  struct Element {
    using Type = T;
    using IntegerType =
        typename bitfields_details::ResolveUnderlyingType<T>::type;
    static constexpr unsigned Shift = Offset;
    static constexpr unsigned Bits = Size;
    static constexpr unsigned FirstBit = Offset;
    static constexpr unsigned LastBit = Shift + Bits - 1;
    static constexpr unsigned NextBit = Shift + Bits;

  private:
    template <typename, typename> friend struct bitfields_details::Impl;

    static_assert(Bits > 0, "Bits must be non zero");
    static constexpr size_t TypeBits = sizeof(IntegerType) * CHAR_BIT;
    static_assert(Bits <= TypeBits, "Bits may not be greater than T size");
    static_assert(!std::is_enum<T>::value || MaxValue != T(0),
                  "Enum Bitfields must provide a MaxValue");
    static_assert(!std::is_enum<T>::value ||
                      std::is_unsigned<IntegerType>::value,
                  "Enum must be unsigned");
    static_assert(std::is_integral<IntegerType>::value &&
                      std::numeric_limits<IntegerType>::is_integer,
                  "IntegerType must be an integer type");

    static constexpr IntegerType UserMaxValue =
        static_cast<IntegerType>(MaxValue);
  };

  /// Unpacks the field from the `Packed` value.
  template <typename Bitfield, typename StorageType>
  static typename Bitfield::Type get(StorageType Packed) {
    using I = bitfields_details::Impl<Bitfield, StorageType>;
    return static_cast<typename Bitfield::Type>(I::extract(Packed));
  }

  /// Return a non-zero value if the field is non-zero.
  /// It is more efficient than `getField`.
  template <typename Bitfield, typename StorageType>
  static StorageType test(StorageType Packed) {
    using I = bitfields_details::Impl<Bitfield, StorageType>;
    return I::test(Packed);
  }

  /// Sets the typed value in the provided `Packed` value.
  /// The method will asserts if the provided value is too big to fit in.
  template <typename Bitfield, typename StorageType>
  static void set(StorageType &Packed, typename Bitfield::Type Value) {
    using I = bitfields_details::Impl<Bitfield, StorageType>;
    I::update(Packed, static_cast<typename Bitfield::IntegerType>(Value));
  }

  /// Returns whether the two bitfields share common bits.
  template <typename A, typename B> static constexpr bool isOverlapping() {
    return A::LastBit >= B::FirstBit && B::LastBit >= A::FirstBit;
  }

  template <typename A> static constexpr bool areContiguous() { return true; }
  template <typename A, typename B, typename... Others>
  static constexpr bool areContiguous() {
    return A::NextBit == B::FirstBit && areContiguous<B, Others...>();
  }
};

} // namespace llvm

#endif // LLVM_ADT_BITFIELDS_H
PKiwFZE�nL�D�DADT/SmallPtrSet.hnu�[���//===- llvm/ADT/SmallPtrSet.h - 'Normally small' pointer set ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file defines the SmallPtrSet class.  See the doxygen comment for
/// SmallPtrSetImplBase for more details on the algorithm used.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_SMALLPTRSET_H
#define LLVM_ADT_SMALLPTRSET_H

#include "llvm/ADT/EpochTracker.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ReverseIteration.h"
#include "llvm/Support/type_traits.h"
#include <cassert>
#include <cstddef>
#include <cstdlib>
#include <cstring>
#include <initializer_list>
#include <iterator>
#include <utility>

namespace llvm {

/// SmallPtrSetImplBase - This is the common code shared among all the
/// SmallPtrSet<>'s, which is almost everything.  SmallPtrSet has two modes, one
/// for small and one for large sets.
///
/// Small sets use an array of pointers allocated in the SmallPtrSet object,
/// which is treated as a simple array of pointers.  When a pointer is added to
/// the set, the array is scanned to see if the element already exists, if not
/// the element is 'pushed back' onto the array.  If we run out of space in the
/// array, we grow into the 'large set' case.  SmallSet should be used when the
/// sets are often small.  In this case, no memory allocation is used, and only
/// light-weight and cache-efficient scanning is used.
///
/// Large sets use a classic exponentially-probed hash table.  Empty buckets are
/// represented with an illegal pointer value (-1) to allow null pointers to be
/// inserted.  Tombstones are represented with another illegal pointer value
/// (-2), to allow deletion.  The hash table is resized when the table is 3/4 or
/// more.  When this happens, the table is doubled in size.
///
class SmallPtrSetImplBase : public DebugEpochBase {
  friend class SmallPtrSetIteratorImpl;

protected:
  /// SmallArray - Points to a fixed size set of buckets, used in 'small mode'.
  const void **SmallArray;
  /// CurArray - This is the current set of buckets.  If equal to SmallArray,
  /// then the set is in 'small mode'.
  const void **CurArray;
  /// CurArraySize - The allocated size of CurArray, always a power of two.
  unsigned CurArraySize;

  /// Number of elements in CurArray that contain a value or are a tombstone.
  /// If small, all these elements are at the beginning of CurArray and the rest
  /// is uninitialized.
  unsigned NumNonEmpty;
  /// Number of tombstones in CurArray.
  unsigned NumTombstones;

  // Helpers to copy and move construct a SmallPtrSet.
  SmallPtrSetImplBase(const void **SmallStorage,
                      const SmallPtrSetImplBase &that);
  SmallPtrSetImplBase(const void **SmallStorage, unsigned SmallSize,
                      SmallPtrSetImplBase &&that);

  explicit SmallPtrSetImplBase(const void **SmallStorage, unsigned SmallSize)
      : SmallArray(SmallStorage), CurArray(SmallStorage),
        CurArraySize(SmallSize), NumNonEmpty(0), NumTombstones(0) {
    assert(SmallSize && (SmallSize & (SmallSize-1)) == 0 &&
           "Initial size must be a power of two!");
  }

  ~SmallPtrSetImplBase() {
    if (!isSmall())
      free(CurArray);
  }

public:
  using size_type = unsigned;

  SmallPtrSetImplBase &operator=(const SmallPtrSetImplBase &) = delete;

  [[nodiscard]] bool empty() const { return size() == 0; }
  size_type size() const { return NumNonEmpty - NumTombstones; }

  void clear() {
    incrementEpoch();
    // If the capacity of the array is huge, and the # elements used is small,
    // shrink the array.
    if (!isSmall()) {
      if (size() * 4 < CurArraySize && CurArraySize > 32)
        return shrink_and_clear();
      // Fill the array with empty markers.
      memset(CurArray, -1, CurArraySize * sizeof(void *));
    }

    NumNonEmpty = 0;
    NumTombstones = 0;
  }

protected:
  static void *getTombstoneMarker() { return reinterpret_cast<void*>(-2); }

  static void *getEmptyMarker() {
    // Note that -1 is chosen to make clear() efficiently implementable with
    // memset and because it's not a valid pointer value.
    return reinterpret_cast<void*>(-1);
  }

  const void **EndPointer() const {
    return isSmall() ? CurArray + NumNonEmpty : CurArray + CurArraySize;
  }

  /// insert_imp - This returns true if the pointer was new to the set, false if
  /// it was already in the set.  This is hidden from the client so that the
  /// derived class can check that the right type of pointer is passed in.
  std::pair<const void *const *, bool> insert_imp(const void *Ptr) {
    if (isSmall()) {
      // Check to see if it is already in the set.
      const void **LastTombstone = nullptr;
      for (const void **APtr = SmallArray, **E = SmallArray + NumNonEmpty;
           APtr != E; ++APtr) {
        const void *Value = *APtr;
        if (Value == Ptr)
          return std::make_pair(APtr, false);
        if (Value == getTombstoneMarker())
          LastTombstone = APtr;
      }

      // Did we find any tombstone marker?
      if (LastTombstone != nullptr) {
        *LastTombstone = Ptr;
        --NumTombstones;
        incrementEpoch();
        return std::make_pair(LastTombstone, true);
      }

      // Nope, there isn't.  If we stay small, just 'pushback' now.
      if (NumNonEmpty < CurArraySize) {
        SmallArray[NumNonEmpty++] = Ptr;
        incrementEpoch();
        return std::make_pair(SmallArray + (NumNonEmpty - 1), true);
      }
      // Otherwise, hit the big set case, which will call grow.
    }
    return insert_imp_big(Ptr);
  }

  /// erase_imp - If the set contains the specified pointer, remove it and
  /// return true, otherwise return false.  This is hidden from the client so
  /// that the derived class can check that the right type of pointer is passed
  /// in.
  bool erase_imp(const void * Ptr) {
    const void *const *P = find_imp(Ptr);
    if (P == EndPointer())
      return false;

    const void **Loc = const_cast<const void **>(P);
    assert(*Loc == Ptr && "broken find!");
    *Loc = getTombstoneMarker();
    NumTombstones++;
    return true;
  }

  /// Returns the raw pointer needed to construct an iterator.  If element not
  /// found, this will be EndPointer.  Otherwise, it will be a pointer to the
  /// slot which stores Ptr;
  const void *const * find_imp(const void * Ptr) const {
    if (isSmall()) {
      // Linear search for the item.
      for (const void *const *APtr = SmallArray,
                      *const *E = SmallArray + NumNonEmpty; APtr != E; ++APtr)
        if (*APtr == Ptr)
          return APtr;
      return EndPointer();
    }

    // Big set case.
    auto *Bucket = FindBucketFor(Ptr);
    if (*Bucket == Ptr)
      return Bucket;
    return EndPointer();
  }

private:
  bool isSmall() const { return CurArray == SmallArray; }

  std::pair<const void *const *, bool> insert_imp_big(const void *Ptr);

  const void * const *FindBucketFor(const void *Ptr) const;
  void shrink_and_clear();

  /// Grow - Allocate a larger backing store for the buckets and move it over.
  void Grow(unsigned NewSize);

protected:
  /// swap - Swaps the elements of two sets.
  /// Note: This method assumes that both sets have the same small size.
  void swap(SmallPtrSetImplBase &RHS);

  void CopyFrom(const SmallPtrSetImplBase &RHS);
  void MoveFrom(unsigned SmallSize, SmallPtrSetImplBase &&RHS);

private:
  /// Code shared by MoveFrom() and move constructor.
  void MoveHelper(unsigned SmallSize, SmallPtrSetImplBase &&RHS);
  /// Code shared by CopyFrom() and copy constructor.
  void CopyHelper(const SmallPtrSetImplBase &RHS);
};

/// SmallPtrSetIteratorImpl - This is the common base class shared between all
/// instances of SmallPtrSetIterator.
class SmallPtrSetIteratorImpl {
protected:
  const void *const *Bucket;
  const void *const *End;

public:
  explicit SmallPtrSetIteratorImpl(const void *const *BP, const void*const *E)
    : Bucket(BP), End(E) {
    if (shouldReverseIterate()) {
      RetreatIfNotValid();
      return;
    }
    AdvanceIfNotValid();
  }

  bool operator==(const SmallPtrSetIteratorImpl &RHS) const {
    return Bucket == RHS.Bucket;
  }
  bool operator!=(const SmallPtrSetIteratorImpl &RHS) const {
    return Bucket != RHS.Bucket;
  }

protected:
  /// AdvanceIfNotValid - If the current bucket isn't valid, advance to a bucket
  /// that is.   This is guaranteed to stop because the end() bucket is marked
  /// valid.
  void AdvanceIfNotValid() {
    assert(Bucket <= End);
    while (Bucket != End &&
           (*Bucket == SmallPtrSetImplBase::getEmptyMarker() ||
            *Bucket == SmallPtrSetImplBase::getTombstoneMarker()))
      ++Bucket;
  }
  void RetreatIfNotValid() {
    assert(Bucket >= End);
    while (Bucket != End &&
           (Bucket[-1] == SmallPtrSetImplBase::getEmptyMarker() ||
            Bucket[-1] == SmallPtrSetImplBase::getTombstoneMarker())) {
      --Bucket;
    }
  }
};

/// SmallPtrSetIterator - This implements a const_iterator for SmallPtrSet.
template <typename PtrTy>
class LLVM_DEBUGEPOCHBASE_HANDLEBASE_EMPTYBASE SmallPtrSetIterator
    : public SmallPtrSetIteratorImpl,
      DebugEpochBase::HandleBase {
  using PtrTraits = PointerLikeTypeTraits<PtrTy>;

public:
  using value_type = PtrTy;
  using reference = PtrTy;
  using pointer = PtrTy;
  using difference_type = std::ptrdiff_t;
  using iterator_category = std::forward_iterator_tag;

  explicit SmallPtrSetIterator(const void *const *BP, const void *const *E,
                               const DebugEpochBase &Epoch)
      : SmallPtrSetIteratorImpl(BP, E), DebugEpochBase::HandleBase(&Epoch) {}

  // Most methods are provided by the base class.

  const PtrTy operator*() const {
    assert(isHandleInSync() && "invalid iterator access!");
    if (shouldReverseIterate()) {
      assert(Bucket > End);
      return PtrTraits::getFromVoidPointer(const_cast<void *>(Bucket[-1]));
    }
    assert(Bucket < End);
    return PtrTraits::getFromVoidPointer(const_cast<void*>(*Bucket));
  }

  inline SmallPtrSetIterator& operator++() {          // Preincrement
    assert(isHandleInSync() && "invalid iterator access!");
    if (shouldReverseIterate()) {
      --Bucket;
      RetreatIfNotValid();
      return *this;
    }
    ++Bucket;
    AdvanceIfNotValid();
    return *this;
  }

  SmallPtrSetIterator operator++(int) {        // Postincrement
    SmallPtrSetIterator tmp = *this;
    ++*this;
    return tmp;
  }
};

/// RoundUpToPowerOfTwo - This is a helper template that rounds N up to the next
/// power of two (which means N itself if N is already a power of two).
template<unsigned N>
struct RoundUpToPowerOfTwo;

/// RoundUpToPowerOfTwoH - If N is not a power of two, increase it.  This is a
/// helper template used to implement RoundUpToPowerOfTwo.
template<unsigned N, bool isPowerTwo>
struct RoundUpToPowerOfTwoH {
  enum { Val = N };
};
template<unsigned N>
struct RoundUpToPowerOfTwoH<N, false> {
  enum {
    // We could just use NextVal = N+1, but this converges faster.  N|(N-1) sets
    // the right-most zero bits to one all at once, e.g. 0b0011000 -> 0b0011111.
    Val = RoundUpToPowerOfTwo<(N|(N-1)) + 1>::Val
  };
};

template<unsigned N>
struct RoundUpToPowerOfTwo {
  enum { Val = RoundUpToPowerOfTwoH<N, (N&(N-1)) == 0>::Val };
};

/// A templated base class for \c SmallPtrSet which provides the
/// typesafe interface that is common across all small sizes.
///
/// This is particularly useful for passing around between interface boundaries
/// to avoid encoding a particular small size in the interface boundary.
template <typename PtrType>
class SmallPtrSetImpl : public SmallPtrSetImplBase {
  using ConstPtrType = typename add_const_past_pointer<PtrType>::type;
  using PtrTraits = PointerLikeTypeTraits<PtrType>;
  using ConstPtrTraits = PointerLikeTypeTraits<ConstPtrType>;

protected:
  // Forward constructors to the base.
  using SmallPtrSetImplBase::SmallPtrSetImplBase;

public:
  using iterator = SmallPtrSetIterator<PtrType>;
  using const_iterator = SmallPtrSetIterator<PtrType>;
  using key_type = ConstPtrType;
  using value_type = PtrType;

  SmallPtrSetImpl(const SmallPtrSetImpl &) = delete;

  /// Inserts Ptr if and only if there is no element in the container equal to
  /// Ptr. The bool component of the returned pair is true if and only if the
  /// insertion takes place, and the iterator component of the pair points to
  /// the element equal to Ptr.
  std::pair<iterator, bool> insert(PtrType Ptr) {
    auto p = insert_imp(PtrTraits::getAsVoidPointer(Ptr));
    return std::make_pair(makeIterator(p.first), p.second);
  }

  /// Insert the given pointer with an iterator hint that is ignored. This is
  /// identical to calling insert(Ptr), but allows SmallPtrSet to be used by
  /// std::insert_iterator and std::inserter().
  iterator insert(iterator, PtrType Ptr) {
    return insert(Ptr).first;
  }

  /// erase - If the set contains the specified pointer, remove it and return
  /// true, otherwise return false.
  bool erase(PtrType Ptr) {
    return erase_imp(PtrTraits::getAsVoidPointer(Ptr));
  }
  /// count - Return 1 if the specified pointer is in the set, 0 otherwise.
  size_type count(ConstPtrType Ptr) const {
    return find_imp(ConstPtrTraits::getAsVoidPointer(Ptr)) != EndPointer();
  }
  iterator find(ConstPtrType Ptr) const {
    return makeIterator(find_imp(ConstPtrTraits::getAsVoidPointer(Ptr)));
  }
  bool contains(ConstPtrType Ptr) const {
    return find_imp(ConstPtrTraits::getAsVoidPointer(Ptr)) != EndPointer();
  }

  template <typename IterT>
  void insert(IterT I, IterT E) {
    for (; I != E; ++I)
      insert(*I);
  }

  void insert(std::initializer_list<PtrType> IL) {
    insert(IL.begin(), IL.end());
  }

  iterator begin() const {
    if (shouldReverseIterate())
      return makeIterator(EndPointer() - 1);
    return makeIterator(CurArray);
  }
  iterator end() const { return makeIterator(EndPointer()); }

private:
  /// Create an iterator that dereferences to same place as the given pointer.
  iterator makeIterator(const void *const *P) const {
    if (shouldReverseIterate())
      return iterator(P == EndPointer() ? CurArray : P + 1, CurArray, *this);
    return iterator(P, EndPointer(), *this);
  }
};

/// Equality comparison for SmallPtrSet.
///
/// Iterates over elements of LHS confirming that each value from LHS is also in
/// RHS, and that no additional values are in RHS.
template <typename PtrType>
bool operator==(const SmallPtrSetImpl<PtrType> &LHS,
                const SmallPtrSetImpl<PtrType> &RHS) {
  if (LHS.size() != RHS.size())
    return false;

  for (const auto *KV : LHS)
    if (!RHS.count(KV))
      return false;

  return true;
}

/// Inequality comparison for SmallPtrSet.
///
/// Equivalent to !(LHS == RHS).
template <typename PtrType>
bool operator!=(const SmallPtrSetImpl<PtrType> &LHS,
                const SmallPtrSetImpl<PtrType> &RHS) {
  return !(LHS == RHS);
}

/// SmallPtrSet - This class implements a set which is optimized for holding
/// SmallSize or less elements.  This internally rounds up SmallSize to the next
/// power of two if it is not already a power of two.  See the comments above
/// SmallPtrSetImplBase for details of the algorithm.
template<class PtrType, unsigned SmallSize>
class SmallPtrSet : public SmallPtrSetImpl<PtrType> {
  // In small mode SmallPtrSet uses linear search for the elements, so it is
  // not a good idea to choose this value too high. You may consider using a
  // DenseSet<> instead if you expect many elements in the set.
  static_assert(SmallSize <= 32, "SmallSize should be small");

  using BaseT = SmallPtrSetImpl<PtrType>;

  // Make sure that SmallSize is a power of two, round up if not.
  enum { SmallSizePowTwo = RoundUpToPowerOfTwo<SmallSize>::Val };
  /// SmallStorage - Fixed size storage used in 'small mode'.
  const void *SmallStorage[SmallSizePowTwo];

public:
  SmallPtrSet() : BaseT(SmallStorage, SmallSizePowTwo) {}
  SmallPtrSet(const SmallPtrSet &that) : BaseT(SmallStorage, that) {}
  SmallPtrSet(SmallPtrSet &&that)
      : BaseT(SmallStorage, SmallSizePowTwo, std::move(that)) {}

  template<typename It>
  SmallPtrSet(It I, It E) : BaseT(SmallStorage, SmallSizePowTwo) {
    this->insert(I, E);
  }

  SmallPtrSet(std::initializer_list<PtrType> IL)
      : BaseT(SmallStorage, SmallSizePowTwo) {
    this->insert(IL.begin(), IL.end());
  }

  SmallPtrSet<PtrType, SmallSize> &
  operator=(const SmallPtrSet<PtrType, SmallSize> &RHS) {
    if (&RHS != this)
      this->CopyFrom(RHS);
    return *this;
  }

  SmallPtrSet<PtrType, SmallSize> &
  operator=(SmallPtrSet<PtrType, SmallSize> &&RHS) {
    if (&RHS != this)
      this->MoveFrom(SmallSizePowTwo, std::move(RHS));
    return *this;
  }

  SmallPtrSet<PtrType, SmallSize> &
  operator=(std::initializer_list<PtrType> IL) {
    this->clear();
    this->insert(IL.begin(), IL.end());
    return *this;
  }

  /// swap - Swaps the elements of two sets.
  void swap(SmallPtrSet<PtrType, SmallSize> &RHS) {
    SmallPtrSetImplBase::swap(RHS);
  }
};

} // end namespace llvm

namespace std {

  /// Implement std::swap in terms of SmallPtrSet swap.
  template<class T, unsigned N>
  inline void swap(llvm::SmallPtrSet<T, N> &LHS, llvm::SmallPtrSet<T, N> &RHS) {
    LHS.swap(RHS);
  }

} // end namespace std

#endif // LLVM_ADT_SMALLPTRSET_H
PKiwFZ�2��YYADT/UniqueVector.hnu�[���//===- llvm/ADT/UniqueVector.h ----------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_UNIQUEVECTOR_H
#define LLVM_ADT_UNIQUEVECTOR_H

#include <cassert>
#include <cstddef>
#include <map>
#include <vector>

namespace llvm {

//===----------------------------------------------------------------------===//
/// UniqueVector - This class produces a sequential ID number (base 1) for each
/// unique entry that is added.  T is the type of entries in the vector. This
/// class should have an implementation of operator== and of operator<.
/// Entries can be fetched using operator[] with the entry ID.
template<class T> class UniqueVector {
public:
  using VectorType = typename std::vector<T>;
  using iterator = typename VectorType::iterator;
  using const_iterator = typename VectorType::const_iterator;

private:
  // Map - Used to handle the correspondence of entry to ID.
  std::map<T, unsigned> Map;

  // Vector - ID ordered vector of entries. Entries can be indexed by ID - 1.
  VectorType Vector;

public:
  /// insert - Append entry to the vector if it doesn't already exist.  Returns
  /// the entry's index + 1 to be used as a unique ID.
  unsigned insert(const T &Entry) {
    // Check if the entry is already in the map.
    unsigned &Val = Map[Entry];

    // See if entry exists, if so return prior ID.
    if (Val) return Val;

    // Compute ID for entry.
    Val = static_cast<unsigned>(Vector.size()) + 1;

    // Insert in vector.
    Vector.push_back(Entry);
    return Val;
  }

  /// idFor - return the ID for an existing entry.  Returns 0 if the entry is
  /// not found.
  unsigned idFor(const T &Entry) const {
    // Search for entry in the map.
    typename std::map<T, unsigned>::const_iterator MI = Map.find(Entry);

    // See if entry exists, if so return ID.
    if (MI != Map.end()) return MI->second;

    // No luck.
    return 0;
  }

  /// operator[] - Returns a reference to the entry with the specified ID.
  const T &operator[](unsigned ID) const {
    assert(ID-1 < size() && "ID is 0 or out of range!");
    return Vector[ID - 1];
  }

  /// Return an iterator to the start of the vector.
  iterator begin() { return Vector.begin(); }

  /// Return an iterator to the start of the vector.
  const_iterator begin() const { return Vector.begin(); }

  /// Return an iterator to the end of the vector.
  iterator end() { return Vector.end(); }

  /// Return an iterator to the end of the vector.
  const_iterator end() const { return Vector.end(); }

  /// size - Returns the number of entries in the vector.
  size_t size() const { return Vector.size(); }

  /// empty - Returns true if the vector is empty.
  bool empty() const { return Vector.empty(); }

  /// reset - Clears all the entries.
  void reset() {
    Map.clear();
    Vector.resize(0, 0);
  }
};

} // end namespace llvm

#endif // LLVM_ADT_UNIQUEVECTOR_H
PKiwFZ�߉V3V3ADT/ConcurrentHashtable.hnu�[���//===- ConcurrentHashtable.h ------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_CONCURRENTHASHTABLE_H
#define LLVM_ADT_CONCURRENTHASHTABLE_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/Parallel.h"
#include "llvm/Support/WithColor.h"
#include "llvm/Support/xxhash.h"
#include <atomic>
#include <cstddef>
#include <iomanip>
#include <mutex>
#include <sstream>
#include <type_traits>

namespace llvm {

/// ConcurrentHashTable - is a resizeable concurrent hashtable.
/// The number of resizings limited up to x2^31. This hashtable is
/// useful to have efficient access to aggregate data(like strings,
/// type descriptors...) and to keep only single copy of such
/// an aggregate. The hashtable allows only concurrent insertions:
///
/// KeyDataTy* = insert ( const KeyTy& );
///
/// Data structure:
///
/// Inserted value KeyTy is mapped to 64-bit hash value ->
///
///          [------- 64-bit Hash value --------]
///          [  StartEntryIndex ][ Bucket Index ]
///                    |                |
///              points to the     points to
///              first probe       the bucket.
///              position inside
///              bucket entries
///
/// After initialization, all buckets have an initial size. During insertions,
/// buckets might be extended to contain more entries. Each bucket can be
/// independently resized and rehashed(no need to lock the whole table).
/// Different buckets may have different sizes. If the single bucket is full
/// then the bucket is resized.
///
/// BucketsArray keeps all buckets. Each bucket keeps an array of Entries
/// (pointers to KeyDataTy) and another array of entries hashes:
///
/// BucketsArray[BucketIdx].Hashes[EntryIdx]:
/// BucketsArray[BucketIdx].Entries[EntryIdx]:
///
/// [Bucket 0].Hashes -> [uint32_t][uint32_t]
/// [Bucket 0].Entries -> [KeyDataTy*][KeyDataTy*]
///
/// [Bucket 1].Hashes -> [uint32_t][uint32_t][uint32_t][uint32_t]
/// [Bucket 1].Entries -> [KeyDataTy*][KeyDataTy*][KeyDataTy*][KeyDataTy*]
///                      .........................
/// [Bucket N].Hashes -> [uint32_t][uint32_t][uint32_t]
/// [Bucket N].Entries -> [KeyDataTy*][KeyDataTy*][KeyDataTy*]
///
/// ConcurrentHashTableByPtr uses an external thread-safe allocator to allocate
/// KeyDataTy items.

template <typename KeyTy, typename KeyDataTy, typename AllocatorTy>
class ConcurrentHashTableInfoByPtr {
public:
  /// \returns Hash value for the specified \p Key.
  static inline uint64_t getHashValue(const KeyTy &Key) {
    return xxh3_64bits(Key);
  }

  /// \returns true if both \p LHS and \p RHS are equal.
  static inline bool isEqual(const KeyTy &LHS, const KeyTy &RHS) {
    return LHS == RHS;
  }

  /// \returns key for the specified \p KeyData.
  static inline const KeyTy &getKey(const KeyDataTy &KeyData) {
    return KeyData.getKey();
  }

  /// \returns newly created object of KeyDataTy type.
  static inline KeyDataTy *create(const KeyTy &Key, AllocatorTy &Allocator) {
    return KeyDataTy::create(Key, Allocator);
  }
};

template <typename KeyTy, typename KeyDataTy, typename AllocatorTy,
          typename Info =
              ConcurrentHashTableInfoByPtr<KeyTy, KeyDataTy, AllocatorTy>>
class ConcurrentHashTableByPtr {
public:
  ConcurrentHashTableByPtr(
      AllocatorTy &Allocator, uint64_t EstimatedSize = 100000,
      size_t ThreadsNum = parallel::strategy.compute_thread_count(),
      size_t InitialNumberOfBuckets = 128)
      : MultiThreadAllocator(Allocator) {
    assert((ThreadsNum > 0) && "ThreadsNum must be greater than 0");
    assert((InitialNumberOfBuckets > 0) &&
           "InitialNumberOfBuckets must be greater than 0");

    // Calculate number of buckets.
    uint64_t EstimatedNumberOfBuckets = ThreadsNum;
    if (ThreadsNum > 1) {
      EstimatedNumberOfBuckets *= InitialNumberOfBuckets;
      EstimatedNumberOfBuckets *= std::max(
          1,
          countr_zero(PowerOf2Ceil(EstimatedSize / InitialNumberOfBuckets)) >>
              2);
    }
    EstimatedNumberOfBuckets = PowerOf2Ceil(EstimatedNumberOfBuckets);
    NumberOfBuckets =
        std::min(EstimatedNumberOfBuckets, (uint64_t)(1Ull << 31));

    // Allocate buckets.
    BucketsArray = std::make_unique<Bucket[]>(NumberOfBuckets);

    InitialBucketSize = EstimatedSize / NumberOfBuckets;
    InitialBucketSize = std::max((uint32_t)1, InitialBucketSize);
    InitialBucketSize = PowerOf2Ceil(InitialBucketSize);

    // Initialize each bucket.
    for (uint32_t Idx = 0; Idx < NumberOfBuckets; Idx++) {
      HashesPtr Hashes = new ExtHashBitsTy[InitialBucketSize];
      memset(Hashes, 0, sizeof(ExtHashBitsTy) * InitialBucketSize);

      DataPtr Entries = new EntryDataTy[InitialBucketSize];
      memset(Entries, 0, sizeof(EntryDataTy) * InitialBucketSize);

      BucketsArray[Idx].Size = InitialBucketSize;
      BucketsArray[Idx].Hashes = Hashes;
      BucketsArray[Idx].Entries = Entries;
    }

    // Calculate masks.
    HashMask = NumberOfBuckets - 1;

    size_t LeadingZerosNumber = countl_zero(HashMask);
    HashBitsNum = 64 - LeadingZerosNumber;

    // We keep only high 32-bits of hash value. So bucket size cannot
    // exceed 2^31. Bucket size is always power of two.
    MaxBucketSize = 1Ull << (std::min((size_t)31, LeadingZerosNumber));

    // Calculate mask for extended hash bits.
    ExtHashMask = (NumberOfBuckets * MaxBucketSize) - 1;
  }

  virtual ~ConcurrentHashTableByPtr() {
    // Deallocate buckets.
    for (uint32_t Idx = 0; Idx < NumberOfBuckets; Idx++) {
      delete[] BucketsArray[Idx].Hashes;
      delete[] BucketsArray[Idx].Entries;
    }
  }

  /// Insert new value \p NewValue or return already existing entry.
  ///
  /// \returns entry and "true" if an entry is just inserted or
  /// "false" if an entry already exists.
  std::pair<KeyDataTy *, bool> insert(const KeyTy &NewValue) {
    // Calculate bucket index.
    uint64_t Hash = Info::getHashValue(NewValue);
    Bucket &CurBucket = BucketsArray[getBucketIdx(Hash)];
    uint32_t ExtHashBits = getExtHashBits(Hash);

#if LLVM_ENABLE_THREADS
    // Lock bucket.
    CurBucket.Guard.lock();
#endif

    HashesPtr BucketHashes = CurBucket.Hashes;
    DataPtr BucketEntries = CurBucket.Entries;
    uint32_t CurEntryIdx = getStartIdx(ExtHashBits, CurBucket.Size);

    while (true) {
      uint32_t CurEntryHashBits = BucketHashes[CurEntryIdx];

      if (CurEntryHashBits == 0 && BucketEntries[CurEntryIdx] == nullptr) {
        // Found empty slot. Insert data.
        KeyDataTy *NewData = Info::create(NewValue, MultiThreadAllocator);
        BucketEntries[CurEntryIdx] = NewData;
        BucketHashes[CurEntryIdx] = ExtHashBits;

        CurBucket.NumberOfEntries++;
        RehashBucket(CurBucket);

#if LLVM_ENABLE_THREADS
        CurBucket.Guard.unlock();
#endif

        return {NewData, true};
      }

      if (CurEntryHashBits == ExtHashBits) {
        // Hash matched. Check value for equality.
        KeyDataTy *EntryData = BucketEntries[CurEntryIdx];
        if (Info::isEqual(Info::getKey(*EntryData), NewValue)) {
          // Already existed entry matched with inserted data is found.
#if LLVM_ENABLE_THREADS
          CurBucket.Guard.unlock();
#endif

          return {EntryData, false};
        }
      }

      CurEntryIdx++;
      CurEntryIdx &= (CurBucket.Size - 1);
    }

    llvm_unreachable("Insertion error.");
    return {};
  }

  /// Print information about current state of hash table structures.
  void printStatistic(raw_ostream &OS) {
    OS << "\n--- HashTable statistic:\n";
    OS << "\nNumber of buckets = " << NumberOfBuckets;
    OS << "\nInitial bucket size = " << InitialBucketSize;

    uint64_t NumberOfNonEmptyBuckets = 0;
    uint64_t NumberOfEntriesPlusEmpty = 0;
    uint64_t OverallNumberOfEntries = 0;
    uint64_t OverallSize = sizeof(*this) + NumberOfBuckets * sizeof(Bucket);

    DenseMap<uint32_t, uint32_t> BucketSizesMap;

    // For each bucket...
    for (uint32_t Idx = 0; Idx < NumberOfBuckets; Idx++) {
      Bucket &CurBucket = BucketsArray[Idx];

      BucketSizesMap[CurBucket.Size]++;

      if (CurBucket.NumberOfEntries != 0)
        NumberOfNonEmptyBuckets++;
      NumberOfEntriesPlusEmpty += CurBucket.Size;
      OverallNumberOfEntries += CurBucket.NumberOfEntries;
      OverallSize +=
          (sizeof(ExtHashBitsTy) + sizeof(EntryDataTy)) * CurBucket.Size;
    }

    OS << "\nOverall number of entries = " << OverallNumberOfEntries;
    OS << "\nOverall number of non empty buckets = " << NumberOfNonEmptyBuckets;
    for (auto &BucketSize : BucketSizesMap)
      OS << "\n Number of buckets with size " << BucketSize.first << ": "
         << BucketSize.second;

    std::stringstream stream;
    stream << std::fixed << std::setprecision(2)
           << ((float)OverallNumberOfEntries / (float)NumberOfEntriesPlusEmpty);
    std::string str = stream.str();

    OS << "\nLoad factor = " << str;
    OS << "\nOverall allocated size = " << OverallSize;
  }

protected:
  using ExtHashBitsTy = uint32_t;
  using EntryDataTy = KeyDataTy *;

  using HashesPtr = ExtHashBitsTy *;
  using DataPtr = EntryDataTy *;

  // Bucket structure. Keeps bucket data.
  struct Bucket {
    Bucket() = default;

    // Size of bucket.
    uint32_t Size = 0;

    // Number of non-null entries.
    uint32_t NumberOfEntries = 0;

    // Hashes for [Size] entries.
    HashesPtr Hashes = nullptr;

    // [Size] entries.
    DataPtr Entries = nullptr;

#if LLVM_ENABLE_THREADS
    // Mutex for this bucket.
    std::mutex Guard;
#endif
  };

  // Reallocate and rehash bucket if this is full enough.
  void RehashBucket(Bucket &CurBucket) {
    assert((CurBucket.Size > 0) && "Uninitialised bucket");
    if (CurBucket.NumberOfEntries < CurBucket.Size * 0.9)
      return;

    if (CurBucket.Size >= MaxBucketSize)
      report_fatal_error("ConcurrentHashTable is full");

    uint32_t NewBucketSize = CurBucket.Size << 1;
    assert((NewBucketSize <= MaxBucketSize) && "New bucket size is too big");
    assert((CurBucket.Size < NewBucketSize) &&
           "New bucket size less than size of current bucket");

    // Store old entries & hashes arrays.
    HashesPtr SrcHashes = CurBucket.Hashes;
    DataPtr SrcEntries = CurBucket.Entries;

    // Allocate new entries&hashes arrays.
    HashesPtr DestHashes = new ExtHashBitsTy[NewBucketSize];
    memset(DestHashes, 0, sizeof(ExtHashBitsTy) * NewBucketSize);

    DataPtr DestEntries = new EntryDataTy[NewBucketSize];
    memset(DestEntries, 0, sizeof(EntryDataTy) * NewBucketSize);

    // For each entry in source arrays...
    for (uint32_t CurSrcEntryIdx = 0; CurSrcEntryIdx < CurBucket.Size;
         CurSrcEntryIdx++) {
      uint32_t CurSrcEntryHashBits = SrcHashes[CurSrcEntryIdx];

      // Check for null entry.
      if (CurSrcEntryHashBits == 0 && SrcEntries[CurSrcEntryIdx] == nullptr)
        continue;

      uint32_t StartDestIdx = getStartIdx(CurSrcEntryHashBits, NewBucketSize);

      // Insert non-null entry into the new arrays.
      while (true) {
        uint32_t CurDestEntryHashBits = DestHashes[StartDestIdx];

        if (CurDestEntryHashBits == 0 && DestEntries[StartDestIdx] == nullptr) {
          // Found empty slot. Insert data.
          DestHashes[StartDestIdx] = CurSrcEntryHashBits;
          DestEntries[StartDestIdx] = SrcEntries[CurSrcEntryIdx];
          break;
        }

        StartDestIdx++;
        StartDestIdx = StartDestIdx & (NewBucketSize - 1);
      }
    }

    // Update bucket fields.
    CurBucket.Hashes = DestHashes;
    CurBucket.Entries = DestEntries;
    CurBucket.Size = NewBucketSize;

    // Delete old bucket entries.
    if (SrcHashes != nullptr)
      delete[] SrcHashes;
    if (SrcEntries != nullptr)
      delete[] SrcEntries;
  }

  uint32_t getBucketIdx(hash_code Hash) { return Hash & HashMask; }

  uint32_t getExtHashBits(uint64_t Hash) {
    return (Hash & ExtHashMask) >> HashBitsNum;
  }

  uint32_t getStartIdx(uint32_t ExtHashBits, uint32_t BucketSize) {
    assert((BucketSize > 0) && "Empty bucket");

    return ExtHashBits & (BucketSize - 1);
  }

  // Number of bits in hash mask.
  uint64_t HashBitsNum = 0;

  // Hash mask.
  uint64_t HashMask = 0;

  // Hash mask for the extended hash bits.
  uint64_t ExtHashMask = 0;

  // The maximal bucket size.
  uint32_t MaxBucketSize = 0;

  // Initial size of bucket.
  uint32_t InitialBucketSize = 0;

  // The number of buckets.
  uint32_t NumberOfBuckets = 0;

  // Array of buckets.
  std::unique_ptr<Bucket[]> BucketsArray;

  // Used for allocating KeyDataTy values.
  AllocatorTy &MultiThreadAllocator;
};

} // end namespace llvm

#endif // LLVM_ADT_CONCURRENTHASHTABLE_H
PKiwFZt��#��ADT/CachedHashString.hnu�[���//===- llvm/ADT/CachedHashString.h - Prehashed string/StringRef -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file defines CachedHashString and CachedHashStringRef.  These are
/// owning and not-owning string types that store their hash in addition to
/// their string data.
///
/// Unlike std::string, CachedHashString can be used in DenseSet/DenseMap
/// (because, unlike std::string, CachedHashString lets us have empty and
/// tombstone values).
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_CACHEDHASHSTRING_H
#define LLVM_ADT_CACHEDHASHSTRING_H

#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/ADT/StringRef.h"

namespace llvm {

/// A container which contains a StringRef plus a precomputed hash.
class CachedHashStringRef {
  const char *P;
  uint32_t Size;
  uint32_t Hash;

public:
  // Explicit because hashing a string isn't free.
  explicit CachedHashStringRef(StringRef S)
      : CachedHashStringRef(S, DenseMapInfo<StringRef>::getHashValue(S)) {}

  CachedHashStringRef(StringRef S, uint32_t Hash)
      : P(S.data()), Size(S.size()), Hash(Hash) {
    assert(S.size() <= std::numeric_limits<uint32_t>::max());
  }

  StringRef val() const { return StringRef(P, Size); }
  const char *data() const { return P; }
  uint32_t size() const { return Size; }
  uint32_t hash() const { return Hash; }
};

template <> struct DenseMapInfo<CachedHashStringRef> {
  static CachedHashStringRef getEmptyKey() {
    return CachedHashStringRef(DenseMapInfo<StringRef>::getEmptyKey(), 0);
  }
  static CachedHashStringRef getTombstoneKey() {
    return CachedHashStringRef(DenseMapInfo<StringRef>::getTombstoneKey(), 1);
  }
  static unsigned getHashValue(const CachedHashStringRef &S) {
    assert(!isEqual(S, getEmptyKey()) && "Cannot hash the empty key!");
    assert(!isEqual(S, getTombstoneKey()) && "Cannot hash the tombstone key!");
    return S.hash();
  }
  static bool isEqual(const CachedHashStringRef &LHS,
                      const CachedHashStringRef &RHS) {
    return LHS.hash() == RHS.hash() &&
           DenseMapInfo<StringRef>::isEqual(LHS.val(), RHS.val());
  }
};

/// A container which contains a string, which it owns, plus a precomputed hash.
///
/// We do not null-terminate the string.
class CachedHashString {
  friend struct DenseMapInfo<CachedHashString>;

  char *P;
  uint32_t Size;
  uint32_t Hash;

  static char *getEmptyKeyPtr() { return DenseMapInfo<char *>::getEmptyKey(); }
  static char *getTombstoneKeyPtr() {
    return DenseMapInfo<char *>::getTombstoneKey();
  }

  bool isEmptyOrTombstone() const {
    return P == getEmptyKeyPtr() || P == getTombstoneKeyPtr();
  }

  struct ConstructEmptyOrTombstoneTy {};

  CachedHashString(ConstructEmptyOrTombstoneTy, char *EmptyOrTombstonePtr)
      : P(EmptyOrTombstonePtr), Size(0), Hash(0) {
    assert(isEmptyOrTombstone());
  }

  // TODO: Use small-string optimization to avoid allocating.

public:
  explicit CachedHashString(const char *S) : CachedHashString(StringRef(S)) {}

  // Explicit because copying and hashing a string isn't free.
  explicit CachedHashString(StringRef S)
      : CachedHashString(S, DenseMapInfo<StringRef>::getHashValue(S)) {}

  CachedHashString(StringRef S, uint32_t Hash)
      : P(new char[S.size()]), Size(S.size()), Hash(Hash) {
    memcpy(P, S.data(), S.size());
  }

  // Ideally this class would not be copyable.  But SetVector requires copyable
  // keys, and we want this to be usable there.
  CachedHashString(const CachedHashString &Other)
      : Size(Other.Size), Hash(Other.Hash) {
    if (Other.isEmptyOrTombstone()) {
      P = Other.P;
    } else {
      P = new char[Size];
      memcpy(P, Other.P, Size);
    }
  }

  CachedHashString &operator=(CachedHashString Other) {
    swap(*this, Other);
    return *this;
  }

  CachedHashString(CachedHashString &&Other) noexcept
      : P(Other.P), Size(Other.Size), Hash(Other.Hash) {
    Other.P = getEmptyKeyPtr();
  }

  ~CachedHashString() {
    if (!isEmptyOrTombstone())
      delete[] P;
  }

  StringRef val() const { return StringRef(P, Size); }
  uint32_t size() const { return Size; }
  uint32_t hash() const { return Hash; }

  operator StringRef() const { return val(); }
  operator CachedHashStringRef() const {
    return CachedHashStringRef(val(), Hash);
  }

  friend void swap(CachedHashString &LHS, CachedHashString &RHS) {
    using std::swap;
    swap(LHS.P, RHS.P);
    swap(LHS.Size, RHS.Size);
    swap(LHS.Hash, RHS.Hash);
  }
};

template <> struct DenseMapInfo<CachedHashString> {
  static CachedHashString getEmptyKey() {
    return CachedHashString(CachedHashString::ConstructEmptyOrTombstoneTy(),
                            CachedHashString::getEmptyKeyPtr());
  }
  static CachedHashString getTombstoneKey() {
    return CachedHashString(CachedHashString::ConstructEmptyOrTombstoneTy(),
                            CachedHashString::getTombstoneKeyPtr());
  }
  static unsigned getHashValue(const CachedHashString &S) {
    assert(!isEqual(S, getEmptyKey()) && "Cannot hash the empty key!");
    assert(!isEqual(S, getTombstoneKey()) && "Cannot hash the tombstone key!");
    return S.hash();
  }
  static bool isEqual(const CachedHashString &LHS,
                      const CachedHashString &RHS) {
    if (LHS.hash() != RHS.hash())
      return false;
    if (LHS.P == CachedHashString::getEmptyKeyPtr())
      return RHS.P == CachedHashString::getEmptyKeyPtr();
    if (LHS.P == CachedHashString::getTombstoneKeyPtr())
      return RHS.P == CachedHashString::getTombstoneKeyPtr();

    // This is safe because if RHS.P is the empty or tombstone key, it will have
    // length 0, so we'll never dereference its pointer.
    return LHS.val() == RHS.val();
  }
};

} // namespace llvm

#endif
PKiwFZ��g�$$ADT/DeltaAlgorithm.hnu�[���//===- DeltaAlgorithm.h - A Set Minimization Algorithm ---------*- C++ -*--===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_DELTAALGORITHM_H
#define LLVM_ADT_DELTAALGORITHM_H

#include <set>
#include <vector>

namespace llvm {

/// DeltaAlgorithm - Implements the delta debugging algorithm (A. Zeller '99)
/// for minimizing arbitrary sets using a predicate function.
///
/// The result of the algorithm is a subset of the input change set which is
/// guaranteed to satisfy the predicate, assuming that the input set did. For
/// well formed predicates, the result set is guaranteed to be such that
/// removing any single element would falsify the predicate.
///
/// For best results the predicate function *should* (but need not) satisfy
/// certain properties, in particular:
///  (1) The predicate should return false on an empty set and true on the full
///  set.
///  (2) If the predicate returns true for a set of changes, it should return
///  true for all supersets of that set.
///
/// It is not an error to provide a predicate that does not satisfy these
/// requirements, and the algorithm will generally produce reasonable
/// results. However, it may run substantially more tests than with a good
/// predicate.
class DeltaAlgorithm {
public:
  using change_ty = unsigned;
  // FIXME: Use a decent data structure.
  using changeset_ty = std::set<change_ty>;
  using changesetlist_ty = std::vector<changeset_ty>;

private:
  /// Cache of failed test results. Successful test results are never cached
  /// since we always reduce following a success.
  std::set<changeset_ty> FailedTestsCache;

  /// GetTestResult - Get the test result for the \p Changes from the
  /// cache, executing the test if necessary.
  ///
  /// \param Changes - The change set to test.
  /// \return - The test result.
  bool GetTestResult(const changeset_ty &Changes);

  /// Split - Partition a set of changes \p S into one or two subsets.
  void Split(const changeset_ty &S, changesetlist_ty &Res);

  /// Delta - Minimize a set of \p Changes which has been partitioned into
  /// smaller sets, by attempting to remove individual subsets.
  changeset_ty Delta(const changeset_ty &Changes,
                     const changesetlist_ty &Sets);

  /// Search - Search for a subset (or subsets) in \p Sets which can be
  /// removed from \p Changes while still satisfying the predicate.
  ///
  /// \param Res - On success, a subset of Changes which satisfies the
  /// predicate.
  /// \return - True on success.
  bool Search(const changeset_ty &Changes, const changesetlist_ty &Sets,
              changeset_ty &Res);

protected:
  /// UpdatedSearchState - Callback used when the search state changes.
  virtual void UpdatedSearchState(const changeset_ty &Changes,
                                  const changesetlist_ty &Sets) {}

  /// ExecuteOneTest - Execute a single test predicate on the change set \p S.
  virtual bool ExecuteOneTest(const changeset_ty &S) = 0;

  DeltaAlgorithm& operator=(const DeltaAlgorithm&) = default;

public:
  virtual ~DeltaAlgorithm();

  /// Run - Minimize the set \p Changes by executing \see ExecuteOneTest() on
  /// subsets of changes and returning the smallest set which still satisfies
  /// the test predicate.
  changeset_ty Run(const changeset_ty &Changes);
};

} // end namespace llvm

#endif // LLVM_ADT_DELTAALGORITHM_H
PKiwFZ�J���)�)ADT/DepthFirstIterator.hnu�[���//===- llvm/ADT/DepthFirstIterator.h - Depth First iterator -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file builds on the ADT/GraphTraits.h file to build generic depth
/// first graph iterator.  This file exposes the following functions/types:
///
/// df_begin/df_end/df_iterator
///   * Normal depth-first iteration - visit a node and then all of its
///     children.
///
/// idf_begin/idf_end/idf_iterator
///   * Depth-first iteration on the 'inverse' graph.
///
/// df_ext_begin/df_ext_end/df_ext_iterator
///   * Normal depth-first iteration - visit a node and then all of its
///     children. This iterator stores the 'visited' set in an external set,
///     which allows it to be more efficient, and allows external clients to
///     use the set for other purposes.
///
/// idf_ext_begin/idf_ext_end/idf_ext_iterator
///   * Depth-first iteration on the 'inverse' graph.
///     This iterator stores the 'visited' set in an external set, which
///     allows it to be more efficient, and allows external clients to use
///     the set for other purposes.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_DEPTHFIRSTITERATOR_H
#define LLVM_ADT_DEPTHFIRSTITERATOR_H

#include "llvm/ADT/GraphTraits.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/iterator_range.h"
#include <iterator>
#include <optional>
#include <utility>
#include <vector>

namespace llvm {

// df_iterator_storage - A private class which is used to figure out where to
// store the visited set.
template<class SetType, bool External>   // Non-external set
class df_iterator_storage {
public:
  SetType Visited;
};

template<class SetType>
class df_iterator_storage<SetType, true> {
public:
  df_iterator_storage(SetType &VSet) : Visited(VSet) {}
  df_iterator_storage(const df_iterator_storage &S) : Visited(S.Visited) {}

  SetType &Visited;
};

// The visited stated for the iteration is a simple set augmented with
// one more method, completed, which is invoked when all children of a
// node have been processed. It is intended to distinguish of back and
// cross edges in the spanning tree but is not used in the common case.
template <typename NodeRef, unsigned SmallSize=8>
struct df_iterator_default_set : public SmallPtrSet<NodeRef, SmallSize> {
  using BaseSet = SmallPtrSet<NodeRef, SmallSize>;
  using iterator = typename BaseSet::iterator;

  std::pair<iterator,bool> insert(NodeRef N) { return BaseSet::insert(N); }
  template <typename IterT>
  void insert(IterT Begin, IterT End) { BaseSet::insert(Begin,End); }

  void completed(NodeRef) {}
};

// Generic Depth First Iterator
template <class GraphT,
          class SetType =
              df_iterator_default_set<typename GraphTraits<GraphT>::NodeRef>,
          bool ExtStorage = false, class GT = GraphTraits<GraphT>>
class df_iterator : public df_iterator_storage<SetType, ExtStorage> {
public:
  using iterator_category = std::forward_iterator_tag;
  using value_type = typename GT::NodeRef;
  using difference_type = std::ptrdiff_t;
  using pointer = value_type *;
  using reference = const value_type &;

private:
  using NodeRef = typename GT::NodeRef;
  using ChildItTy = typename GT::ChildIteratorType;

  // First element is node reference, second is the 'next child' to visit.
  // The second child is initialized lazily to pick up graph changes during the
  // DFS.
  using StackElement = std::pair<NodeRef, std::optional<ChildItTy>>;

  // VisitStack - Used to maintain the ordering.  Top = current block
  std::vector<StackElement> VisitStack;

  inline df_iterator(NodeRef Node) {
    this->Visited.insert(Node);
    VisitStack.push_back(StackElement(Node, std::nullopt));
  }

  inline df_iterator() = default; // End is when stack is empty

  inline df_iterator(NodeRef Node, SetType &S)
      : df_iterator_storage<SetType, ExtStorage>(S) {
    if (this->Visited.insert(Node).second)
      VisitStack.push_back(StackElement(Node, std::nullopt));
  }

  inline df_iterator(SetType &S)
    : df_iterator_storage<SetType, ExtStorage>(S) {
    // End is when stack is empty
  }

  inline void toNext() {
    do {
      NodeRef Node = VisitStack.back().first;
      std::optional<ChildItTy> &Opt = VisitStack.back().second;

      if (!Opt)
        Opt.emplace(GT::child_begin(Node));

      // Notice that we directly mutate *Opt here, so that
      // VisitStack.back().second actually gets updated as the iterator
      // increases.
      while (*Opt != GT::child_end(Node)) {
        NodeRef Next = *(*Opt)++;
        // Has our next sibling been visited?
        if (this->Visited.insert(Next).second) {
          // No, do it now.
          VisitStack.push_back(StackElement(Next, std::nullopt));
          return;
        }
      }
      this->Visited.completed(Node);

      // Oops, ran out of successors... go up a level on the stack.
      VisitStack.pop_back();
    } while (!VisitStack.empty());
  }

public:
  // Provide static begin and end methods as our public "constructors"
  static df_iterator begin(const GraphT &G) {
    return df_iterator(GT::getEntryNode(G));
  }
  static df_iterator end(const GraphT &G) { return df_iterator(); }

  // Static begin and end methods as our public ctors for external iterators
  static df_iterator begin(const GraphT &G, SetType &S) {
    return df_iterator(GT::getEntryNode(G), S);
  }
  static df_iterator end(const GraphT &G, SetType &S) { return df_iterator(S); }

  bool operator==(const df_iterator &x) const {
    return VisitStack == x.VisitStack;
  }
  bool operator!=(const df_iterator &x) const { return !(*this == x); }

  reference operator*() const { return VisitStack.back().first; }

  // This is a nonstandard operator-> that dereferences the pointer an extra
  // time... so that you can actually call methods ON the Node, because
  // the contained type is a pointer.  This allows BBIt->getTerminator() f.e.
  //
  NodeRef operator->() const { return **this; }

  df_iterator &operator++() { // Preincrement
    toNext();
    return *this;
  }

  /// Skips all children of the current node and traverses to next node
  ///
  /// Note: This function takes care of incrementing the iterator. If you
  /// always increment and call this function, you risk walking off the end.
  df_iterator &skipChildren() {
    VisitStack.pop_back();
    if (!VisitStack.empty())
      toNext();
    return *this;
  }

  df_iterator operator++(int) { // Postincrement
    df_iterator tmp = *this;
    ++*this;
    return tmp;
  }

  // nodeVisited - return true if this iterator has already visited the
  // specified node.  This is public, and will probably be used to iterate over
  // nodes that a depth first iteration did not find: ie unreachable nodes.
  //
  bool nodeVisited(NodeRef Node) const {
    return this->Visited.contains(Node);
  }

  /// getPathLength - Return the length of the path from the entry node to the
  /// current node, counting both nodes.
  unsigned getPathLength() const { return VisitStack.size(); }

  /// getPath - Return the n'th node in the path from the entry node to the
  /// current node.
  NodeRef getPath(unsigned n) const { return VisitStack[n].first; }
};

// Provide global constructors that automatically figure out correct types...
//
template <class T>
df_iterator<T> df_begin(const T& G) {
  return df_iterator<T>::begin(G);
}

template <class T>
df_iterator<T> df_end(const T& G) {
  return df_iterator<T>::end(G);
}

// Provide an accessor method to use them in range-based patterns.
template <class T>
iterator_range<df_iterator<T>> depth_first(const T& G) {
  return make_range(df_begin(G), df_end(G));
}

// Provide global definitions of external depth first iterators...
template <class T, class SetTy = df_iterator_default_set<typename GraphTraits<T>::NodeRef>>
struct df_ext_iterator : public df_iterator<T, SetTy, true> {
  df_ext_iterator(const df_iterator<T, SetTy, true> &V)
    : df_iterator<T, SetTy, true>(V) {}
};

template <class T, class SetTy>
df_ext_iterator<T, SetTy> df_ext_begin(const T& G, SetTy &S) {
  return df_ext_iterator<T, SetTy>::begin(G, S);
}

template <class T, class SetTy>
df_ext_iterator<T, SetTy> df_ext_end(const T& G, SetTy &S) {
  return df_ext_iterator<T, SetTy>::end(G, S);
}

template <class T, class SetTy>
iterator_range<df_ext_iterator<T, SetTy>> depth_first_ext(const T& G,
                                                          SetTy &S) {
  return make_range(df_ext_begin(G, S), df_ext_end(G, S));
}

// Provide global definitions of inverse depth first iterators...
template <class T,
          class SetTy =
              df_iterator_default_set<typename GraphTraits<T>::NodeRef>,
          bool External = false>
struct idf_iterator : public df_iterator<Inverse<T>, SetTy, External> {
  idf_iterator(const df_iterator<Inverse<T>, SetTy, External> &V)
    : df_iterator<Inverse<T>, SetTy, External>(V) {}
};

template <class T>
idf_iterator<T> idf_begin(const T& G) {
  return idf_iterator<T>::begin(Inverse<T>(G));
}

template <class T>
idf_iterator<T> idf_end(const T& G){
  return idf_iterator<T>::end(Inverse<T>(G));
}

// Provide an accessor method to use them in range-based patterns.
template <class T>
iterator_range<idf_iterator<T>> inverse_depth_first(const T& G) {
  return make_range(idf_begin(G), idf_end(G));
}

// Provide global definitions of external inverse depth first iterators...
template <class T, class SetTy = df_iterator_default_set<typename GraphTraits<T>::NodeRef>>
struct idf_ext_iterator : public idf_iterator<T, SetTy, true> {
  idf_ext_iterator(const idf_iterator<T, SetTy, true> &V)
    : idf_iterator<T, SetTy, true>(V) {}
  idf_ext_iterator(const df_iterator<Inverse<T>, SetTy, true> &V)
    : idf_iterator<T, SetTy, true>(V) {}
};

template <class T, class SetTy>
idf_ext_iterator<T, SetTy> idf_ext_begin(const T& G, SetTy &S) {
  return idf_ext_iterator<T, SetTy>::begin(Inverse<T>(G), S);
}

template <class T, class SetTy>
idf_ext_iterator<T, SetTy> idf_ext_end(const T& G, SetTy &S) {
  return idf_ext_iterator<T, SetTy>::end(Inverse<T>(G), S);
}

template <class T, class SetTy>
iterator_range<idf_ext_iterator<T, SetTy>> inverse_depth_first_ext(const T& G,
                                                                   SetTy &S) {
  return make_range(idf_ext_begin(G, S), idf_ext_end(G, S));
}

} // end namespace llvm

#endif // LLVM_ADT_DEPTHFIRSTITERATOR_H
PKiwFZ��xRYCYCADT/StringMap.hnu�[���//===- StringMap.h - String Hash table map interface ------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file defines the StringMap class.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_STRINGMAP_H
#define LLVM_ADT_STRINGMAP_H

#include "llvm/ADT/StringMapEntry.h"
#include "llvm/ADT/iterator.h"
#include "llvm/Support/AllocatorBase.h"
#include "llvm/Support/PointerLikeTypeTraits.h"
#include <initializer_list>
#include <iterator>

namespace llvm {

template <typename ValueTy> class StringMapConstIterator;
template <typename ValueTy> class StringMapIterator;
template <typename ValueTy> class StringMapKeyIterator;

/// StringMapImpl - This is the base class of StringMap that is shared among
/// all of its instantiations.
class StringMapImpl {
protected:
  // Array of NumBuckets pointers to entries, null pointers are holes.
  // TheTable[NumBuckets] contains a sentinel value for easy iteration. Followed
  // by an array of the actual hash values as unsigned integers.
  StringMapEntryBase **TheTable = nullptr;
  unsigned NumBuckets = 0;
  unsigned NumItems = 0;
  unsigned NumTombstones = 0;
  unsigned ItemSize;

protected:
  explicit StringMapImpl(unsigned itemSize) : ItemSize(itemSize) {}
  StringMapImpl(StringMapImpl &&RHS)
      : TheTable(RHS.TheTable), NumBuckets(RHS.NumBuckets),
        NumItems(RHS.NumItems), NumTombstones(RHS.NumTombstones),
        ItemSize(RHS.ItemSize) {
    RHS.TheTable = nullptr;
    RHS.NumBuckets = 0;
    RHS.NumItems = 0;
    RHS.NumTombstones = 0;
  }

  StringMapImpl(unsigned InitSize, unsigned ItemSize);
  unsigned RehashTable(unsigned BucketNo = 0);

  /// LookupBucketFor - Look up the bucket that the specified string should end
  /// up in.  If it already exists as a key in the map, the Item pointer for the
  /// specified bucket will be non-null.  Otherwise, it will be null.  In either
  /// case, the FullHashValue field of the bucket will be set to the hash value
  /// of the string.
  unsigned LookupBucketFor(StringRef Key);

  /// FindKey - Look up the bucket that contains the specified key. If it exists
  /// in the map, return the bucket number of the key.  Otherwise return -1.
  /// This does not modify the map.
  int FindKey(StringRef Key) const;

  /// RemoveKey - Remove the specified StringMapEntry from the table, but do not
  /// delete it.  This aborts if the value isn't in the table.
  void RemoveKey(StringMapEntryBase *V);

  /// RemoveKey - Remove the StringMapEntry for the specified key from the
  /// table, returning it.  If the key is not in the table, this returns null.
  StringMapEntryBase *RemoveKey(StringRef Key);

  /// Allocate the table with the specified number of buckets and otherwise
  /// setup the map as empty.
  void init(unsigned Size);

public:
  static constexpr uintptr_t TombstoneIntVal =
      static_cast<uintptr_t>(-1)
      << PointerLikeTypeTraits<StringMapEntryBase *>::NumLowBitsAvailable;

  static StringMapEntryBase *getTombstoneVal() {
    return reinterpret_cast<StringMapEntryBase *>(TombstoneIntVal);
  }

  unsigned getNumBuckets() const { return NumBuckets; }
  unsigned getNumItems() const { return NumItems; }

  bool empty() const { return NumItems == 0; }
  unsigned size() const { return NumItems; }

  void swap(StringMapImpl &Other) {
    std::swap(TheTable, Other.TheTable);
    std::swap(NumBuckets, Other.NumBuckets);
    std::swap(NumItems, Other.NumItems);
    std::swap(NumTombstones, Other.NumTombstones);
  }
};

/// StringMap - This is an unconventional map that is specialized for handling
/// keys that are "strings", which are basically ranges of bytes. This does some
/// funky memory allocation and hashing things to make it extremely efficient,
/// storing the string data *after* the value in the map.
template <typename ValueTy, typename AllocatorTy = MallocAllocator>
class LLVM_ALLOCATORHOLDER_EMPTYBASE StringMap
    : public StringMapImpl,
      private detail::AllocatorHolder<AllocatorTy> {
  using AllocTy = detail::AllocatorHolder<AllocatorTy>;

public:
  using MapEntryTy = StringMapEntry<ValueTy>;

  StringMap() : StringMapImpl(static_cast<unsigned>(sizeof(MapEntryTy))) {}

  explicit StringMap(unsigned InitialSize)
      : StringMapImpl(InitialSize, static_cast<unsigned>(sizeof(MapEntryTy))) {}

  explicit StringMap(AllocatorTy A)
      : StringMapImpl(static_cast<unsigned>(sizeof(MapEntryTy))), AllocTy(A) {}

  StringMap(unsigned InitialSize, AllocatorTy A)
      : StringMapImpl(InitialSize, static_cast<unsigned>(sizeof(MapEntryTy))),
        AllocTy(A) {}

  StringMap(std::initializer_list<std::pair<StringRef, ValueTy>> List)
      : StringMapImpl(List.size(), static_cast<unsigned>(sizeof(MapEntryTy))) {
    insert(List);
  }

  StringMap(StringMap &&RHS)
      : StringMapImpl(std::move(RHS)), AllocTy(std::move(RHS.getAllocator())) {}

  StringMap(const StringMap &RHS)
      : StringMapImpl(static_cast<unsigned>(sizeof(MapEntryTy))),
        AllocTy(RHS.getAllocator()) {
    if (RHS.empty())
      return;

    // Allocate TheTable of the same size as RHS's TheTable, and set the
    // sentinel appropriately (and NumBuckets).
    init(RHS.NumBuckets);
    unsigned *HashTable = (unsigned *)(TheTable + NumBuckets + 1),
             *RHSHashTable = (unsigned *)(RHS.TheTable + NumBuckets + 1);

    NumItems = RHS.NumItems;
    NumTombstones = RHS.NumTombstones;
    for (unsigned I = 0, E = NumBuckets; I != E; ++I) {
      StringMapEntryBase *Bucket = RHS.TheTable[I];
      if (!Bucket || Bucket == getTombstoneVal()) {
        TheTable[I] = Bucket;
        continue;
      }

      TheTable[I] = MapEntryTy::create(
          static_cast<MapEntryTy *>(Bucket)->getKey(), getAllocator(),
          static_cast<MapEntryTy *>(Bucket)->getValue());
      HashTable[I] = RHSHashTable[I];
    }

    // Note that here we've copied everything from the RHS into this object,
    // tombstones included. We could, instead, have re-probed for each key to
    // instantiate this new object without any tombstone buckets. The
    // assumption here is that items are rarely deleted from most StringMaps,
    // and so tombstones are rare, so the cost of re-probing for all inputs is
    // not worthwhile.
  }

  StringMap &operator=(StringMap RHS) {
    StringMapImpl::swap(RHS);
    std::swap(getAllocator(), RHS.getAllocator());
    return *this;
  }

  ~StringMap() {
    // Delete all the elements in the map, but don't reset the elements
    // to default values.  This is a copy of clear(), but avoids unnecessary
    // work not required in the destructor.
    if (!empty()) {
      for (unsigned I = 0, E = NumBuckets; I != E; ++I) {
        StringMapEntryBase *Bucket = TheTable[I];
        if (Bucket && Bucket != getTombstoneVal()) {
          static_cast<MapEntryTy *>(Bucket)->Destroy(getAllocator());
        }
      }
    }
    free(TheTable);
  }

  using AllocTy::getAllocator;

  using key_type = const char *;
  using mapped_type = ValueTy;
  using value_type = StringMapEntry<ValueTy>;
  using size_type = size_t;

  using const_iterator = StringMapConstIterator<ValueTy>;
  using iterator = StringMapIterator<ValueTy>;

  iterator begin() { return iterator(TheTable, NumBuckets == 0); }
  iterator end() { return iterator(TheTable + NumBuckets, true); }
  const_iterator begin() const {
    return const_iterator(TheTable, NumBuckets == 0);
  }
  const_iterator end() const {
    return const_iterator(TheTable + NumBuckets, true);
  }

  iterator_range<StringMapKeyIterator<ValueTy>> keys() const {
    return make_range(StringMapKeyIterator<ValueTy>(begin()),
                      StringMapKeyIterator<ValueTy>(end()));
  }

  iterator find(StringRef Key) {
    int Bucket = FindKey(Key);
    if (Bucket == -1)
      return end();
    return iterator(TheTable + Bucket, true);
  }

  const_iterator find(StringRef Key) const {
    int Bucket = FindKey(Key);
    if (Bucket == -1)
      return end();
    return const_iterator(TheTable + Bucket, true);
  }

  /// lookup - Return the entry for the specified key, or a default
  /// constructed value if no such entry exists.
  ValueTy lookup(StringRef Key) const {
    const_iterator Iter = find(Key);
    if (Iter != end())
      return Iter->second;
    return ValueTy();
  }

  /// at - Return the entry for the specified key, or abort if no such
  /// entry exists.
  const ValueTy &at(StringRef Val) const {
    auto Iter = this->find(std::move(Val));
    assert(Iter != this->end() && "StringMap::at failed due to a missing key");
    return Iter->second;
  }

  /// Lookup the ValueTy for the \p Key, or create a default constructed value
  /// if the key is not in the map.
  ValueTy &operator[](StringRef Key) { return try_emplace(Key).first->second; }

  /// contains - Return true if the element is in the map, false otherwise.
  bool contains(StringRef Key) const { return find(Key) != end(); }

  /// count - Return 1 if the element is in the map, 0 otherwise.
  size_type count(StringRef Key) const { return contains(Key) ? 1 : 0; }

  template <typename InputTy>
  size_type count(const StringMapEntry<InputTy> &MapEntry) const {
    return count(MapEntry.getKey());
  }

  /// equal - check whether both of the containers are equal.
  bool operator==(const StringMap &RHS) const {
    if (size() != RHS.size())
      return false;

    for (const auto &KeyValue : *this) {
      auto FindInRHS = RHS.find(KeyValue.getKey());

      if (FindInRHS == RHS.end())
        return false;

      if (!(KeyValue.getValue() == FindInRHS->getValue()))
        return false;
    }

    return true;
  }

  bool operator!=(const StringMap &RHS) const { return !(*this == RHS); }

  /// insert - Insert the specified key/value pair into the map.  If the key
  /// already exists in the map, return false and ignore the request, otherwise
  /// insert it and return true.
  bool insert(MapEntryTy *KeyValue) {
    unsigned BucketNo = LookupBucketFor(KeyValue->getKey());
    StringMapEntryBase *&Bucket = TheTable[BucketNo];
    if (Bucket && Bucket != getTombstoneVal())
      return false; // Already exists in map.

    if (Bucket == getTombstoneVal())
      --NumTombstones;
    Bucket = KeyValue;
    ++NumItems;
    assert(NumItems + NumTombstones <= NumBuckets);

    RehashTable();
    return true;
  }

  /// insert - Inserts the specified key/value pair into the map if the key
  /// isn't already in the map. The bool component of the returned pair is true
  /// if and only if the insertion takes place, and the iterator component of
  /// the pair points to the element with key equivalent to the key of the pair.
  std::pair<iterator, bool> insert(std::pair<StringRef, ValueTy> KV) {
    return try_emplace(KV.first, std::move(KV.second));
  }

  /// Inserts elements from range [first, last). If multiple elements in the
  /// range have keys that compare equivalent, it is unspecified which element
  /// is inserted .
  template <typename InputIt> void insert(InputIt First, InputIt Last) {
    for (InputIt It = First; It != Last; ++It)
      insert(*It);
  }

  ///  Inserts elements from initializer list ilist. If multiple elements in
  /// the range have keys that compare equivalent, it is unspecified which
  /// element is inserted
  void insert(std::initializer_list<std::pair<StringRef, ValueTy>> List) {
    insert(List.begin(), List.end());
  }

  /// Inserts an element or assigns to the current element if the key already
  /// exists. The return type is the same as try_emplace.
  template <typename V>
  std::pair<iterator, bool> insert_or_assign(StringRef Key, V &&Val) {
    auto Ret = try_emplace(Key, std::forward<V>(Val));
    if (!Ret.second)
      Ret.first->second = std::forward<V>(Val);
    return Ret;
  }

  /// Emplace a new element for the specified key into the map if the key isn't
  /// already in the map. The bool component of the returned pair is true
  /// if and only if the insertion takes place, and the iterator component of
  /// the pair points to the element with key equivalent to the key of the pair.
  template <typename... ArgsTy>
  std::pair<iterator, bool> try_emplace(StringRef Key, ArgsTy &&...Args) {
    unsigned BucketNo = LookupBucketFor(Key);
    StringMapEntryBase *&Bucket = TheTable[BucketNo];
    if (Bucket && Bucket != getTombstoneVal())
      return std::make_pair(iterator(TheTable + BucketNo, false),
                            false); // Already exists in map.

    if (Bucket == getTombstoneVal())
      --NumTombstones;
    Bucket =
        MapEntryTy::create(Key, getAllocator(), std::forward<ArgsTy>(Args)...);
    ++NumItems;
    assert(NumItems + NumTombstones <= NumBuckets);

    BucketNo = RehashTable(BucketNo);
    return std::make_pair(iterator(TheTable + BucketNo, false), true);
  }

  // clear - Empties out the StringMap
  void clear() {
    if (empty())
      return;

    // Zap all values, resetting the keys back to non-present (not tombstone),
    // which is safe because we're removing all elements.
    for (unsigned I = 0, E = NumBuckets; I != E; ++I) {
      StringMapEntryBase *&Bucket = TheTable[I];
      if (Bucket && Bucket != getTombstoneVal()) {
        static_cast<MapEntryTy *>(Bucket)->Destroy(getAllocator());
      }
      Bucket = nullptr;
    }

    NumItems = 0;
    NumTombstones = 0;
  }

  /// remove - Remove the specified key/value pair from the map, but do not
  /// erase it.  This aborts if the key is not in the map.
  void remove(MapEntryTy *KeyValue) { RemoveKey(KeyValue); }

  void erase(iterator I) {
    MapEntryTy &V = *I;
    remove(&V);
    V.Destroy(getAllocator());
  }

  bool erase(StringRef Key) {
    iterator I = find(Key);
    if (I == end())
      return false;
    erase(I);
    return true;
  }
};

template <typename DerivedTy, typename ValueTy>
class StringMapIterBase
    : public iterator_facade_base<DerivedTy, std::forward_iterator_tag,
                                  ValueTy> {
protected:
  StringMapEntryBase **Ptr = nullptr;

public:
  StringMapIterBase() = default;

  explicit StringMapIterBase(StringMapEntryBase **Bucket,
                             bool NoAdvance = false)
      : Ptr(Bucket) {
    if (!NoAdvance)
      AdvancePastEmptyBuckets();
  }

  DerivedTy &operator=(const DerivedTy &Other) {
    Ptr = Other.Ptr;
    return static_cast<DerivedTy &>(*this);
  }

  friend bool operator==(const DerivedTy &LHS, const DerivedTy &RHS) {
    return LHS.Ptr == RHS.Ptr;
  }

  DerivedTy &operator++() { // Preincrement
    ++Ptr;
    AdvancePastEmptyBuckets();
    return static_cast<DerivedTy &>(*this);
  }

  DerivedTy operator++(int) { // Post-increment
    DerivedTy Tmp(Ptr);
    ++*this;
    return Tmp;
  }

private:
  void AdvancePastEmptyBuckets() {
    while (*Ptr == nullptr || *Ptr == StringMapImpl::getTombstoneVal())
      ++Ptr;
  }
};

template <typename ValueTy>
class StringMapConstIterator
    : public StringMapIterBase<StringMapConstIterator<ValueTy>,
                               const StringMapEntry<ValueTy>> {
  using base = StringMapIterBase<StringMapConstIterator<ValueTy>,
                                 const StringMapEntry<ValueTy>>;

public:
  StringMapConstIterator() = default;
  explicit StringMapConstIterator(StringMapEntryBase **Bucket,
                                  bool NoAdvance = false)
      : base(Bucket, NoAdvance) {}

  const StringMapEntry<ValueTy> &operator*() const {
    return *static_cast<const StringMapEntry<ValueTy> *>(*this->Ptr);
  }
};

template <typename ValueTy>
class StringMapIterator : public StringMapIterBase<StringMapIterator<ValueTy>,
                                                   StringMapEntry<ValueTy>> {
  using base =
      StringMapIterBase<StringMapIterator<ValueTy>, StringMapEntry<ValueTy>>;

public:
  StringMapIterator() = default;
  explicit StringMapIterator(StringMapEntryBase **Bucket,
                             bool NoAdvance = false)
      : base(Bucket, NoAdvance) {}

  StringMapEntry<ValueTy> &operator*() const {
    return *static_cast<StringMapEntry<ValueTy> *>(*this->Ptr);
  }

  operator StringMapConstIterator<ValueTy>() const {
    return StringMapConstIterator<ValueTy>(this->Ptr, true);
  }
};

template <typename ValueTy>
class StringMapKeyIterator
    : public iterator_adaptor_base<StringMapKeyIterator<ValueTy>,
                                   StringMapConstIterator<ValueTy>,
                                   std::forward_iterator_tag, StringRef> {
  using base = iterator_adaptor_base<StringMapKeyIterator<ValueTy>,
                                     StringMapConstIterator<ValueTy>,
                                     std::forward_iterator_tag, StringRef>;

public:
  StringMapKeyIterator() = default;
  explicit StringMapKeyIterator(StringMapConstIterator<ValueTy> Iter)
      : base(std::move(Iter)) {}

  StringRef operator*() const { return this->wrapped()->getKey(); }
};

} // end namespace llvm

#endif // LLVM_ADT_STRINGMAP_H
PKiwFZ�k��N
N
ADT/GenericUniformityInfo.hnu�[���//===- GenericUniformityInfo.h ---------------------------*- C++ -*--------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_GENERICUNIFORMITYINFO_H
#define LLVM_ADT_GENERICUNIFORMITYINFO_H

#include "llvm/ADT/GenericCycleInfo.h"
#include "llvm/Support/raw_ostream.h"

namespace llvm {

class TargetTransformInfo;

template <typename ContextT> class GenericUniformityAnalysisImpl;
template <typename ImplT> struct GenericUniformityAnalysisImplDeleter {
  // Ugly hack around the fact that recent (> 15.0) clang will run into an
  // is_invocable() check in some GNU libc++'s unique_ptr implementation
  // and reject this deleter if you just make it callable with an ImplT *,
  // whether or not the type of ImplT is spelled out.
  using pointer = ImplT *;
  void operator()(ImplT *Impl);
};

template <typename ContextT> class GenericUniformityInfo {
public:
  using BlockT = typename ContextT::BlockT;
  using FunctionT = typename ContextT::FunctionT;
  using ValueRefT = typename ContextT::ValueRefT;
  using ConstValueRefT = typename ContextT::ConstValueRefT;
  using UseT = typename ContextT::UseT;
  using InstructionT = typename ContextT::InstructionT;
  using DominatorTreeT = typename ContextT::DominatorTreeT;
  using ThisT = GenericUniformityInfo<ContextT>;

  using CycleInfoT = GenericCycleInfo<ContextT>;
  using CycleT = typename CycleInfoT::CycleT;

  GenericUniformityInfo(FunctionT &F, const DominatorTreeT &DT,
                        const CycleInfoT &CI,
                        const TargetTransformInfo *TTI = nullptr);
  GenericUniformityInfo() = default;
  GenericUniformityInfo(GenericUniformityInfo &&) = default;
  GenericUniformityInfo &operator=(GenericUniformityInfo &&) = default;

  void compute() {
    DA->initialize();
    DA->compute();
  }

  /// Whether any divergence was detected.
  bool hasDivergence() const;

  /// The GPU kernel this analysis result is for
  const FunctionT &getFunction() const { return *F; }

  /// Whether \p V is divergent at its definition.
  bool isDivergent(ConstValueRefT V) const;

  /// Whether \p V is uniform/non-divergent.
  bool isUniform(ConstValueRefT V) const { return !isDivergent(V); }

  // Similar queries for InstructionT. These accept a pointer argument so that
  // in LLVM IR, they overload the equivalent queries for Value*. For example,
  // if querying whether a BranchInst is divergent, it should not be treated as
  // a Value in LLVM IR.
  bool isUniform(const InstructionT *I) const { return !isDivergent(I); };
  bool isDivergent(const InstructionT *I) const;

  /// \brief Whether \p U is divergent. Uses of a uniform value can be
  /// divergent.
  bool isDivergentUse(const UseT &U) const;

  bool hasDivergentTerminator(const BlockT &B);

  void print(raw_ostream &Out) const;

private:
  using ImplT = GenericUniformityAnalysisImpl<ContextT>;

  FunctionT *F;
  std::unique_ptr<ImplT, GenericUniformityAnalysisImplDeleter<ImplT>> DA;

  GenericUniformityInfo(const GenericUniformityInfo &) = delete;
  GenericUniformityInfo &operator=(const GenericUniformityInfo &) = delete;
};

} // namespace llvm

#endif // LLVM_ADT_GENERICUNIFORMITYINFO_H
PKiwFZxl9��)�)ADT/IntervalMap.hnu�[���//===- llvm/ADT/IntervalMap.h - A sorted interval map -----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file implements a coalescing interval map for small objects.
///
/// KeyT objects are mapped to ValT objects. Intervals of keys that map to the
/// same value are represented in a compressed form.
///
/// Iterators provide ordered access to the compressed intervals rather than the
/// individual keys, and insert and erase operations use key intervals as well.
///
/// Like SmallVector, IntervalMap will store the first N intervals in the map
/// object itself without any allocations. When space is exhausted it switches
/// to a B+-tree representation with very small overhead for small key and
/// value objects.
///
/// A Traits class specifies how keys are compared. It also allows IntervalMap
/// to work with both closed and half-open intervals.
///
/// Keys and values are not stored next to each other in a std::pair, so we
/// don't provide such a value_type. Dereferencing iterators only returns the
/// mapped value. The interval bounds are accessible through the start() and
/// stop() iterator methods.
///
/// IntervalMap is optimized for small key and value objects, 4 or 8 bytes
/// each is the optimal size. For large objects use std::map instead.
//
//===----------------------------------------------------------------------===//
//
// Synopsis:
//
// template <typename KeyT, typename ValT, unsigned N, typename Traits>
// class IntervalMap {
// public:
//   typedef KeyT key_type;
//   typedef ValT mapped_type;
//   typedef RecyclingAllocator<...> Allocator;
//   class iterator;
//   class const_iterator;
//
//   explicit IntervalMap(Allocator&);
//   ~IntervalMap():
//
//   bool empty() const;
//   KeyT start() const;
//   KeyT stop() const;
//   ValT lookup(KeyT x, Value NotFound = Value()) const;
//
//   const_iterator begin() const;
//   const_iterator end() const;
//   iterator begin();
//   iterator end();
//   const_iterator find(KeyT x) const;
//   iterator find(KeyT x);
//
//   void insert(KeyT a, KeyT b, ValT y);
//   void clear();
// };
//
// template <typename KeyT, typename ValT, unsigned N, typename Traits>
// class IntervalMap::const_iterator {
// public:
//   using iterator_category = std::bidirectional_iterator_tag;
//   using value_type = ValT;
//   using difference_type = std::ptrdiff_t;
//   using pointer = value_type *;
//   using reference = value_type &;
//
//   bool operator==(const const_iterator &) const;
//   bool operator!=(const const_iterator &) const;
//   bool valid() const;
//
//   const KeyT &start() const;
//   const KeyT &stop() const;
//   const ValT &value() const;
//   const ValT &operator*() const;
//   const ValT *operator->() const;
//
//   const_iterator &operator++();
//   const_iterator &operator++(int);
//   const_iterator &operator--();
//   const_iterator &operator--(int);
//   void goToBegin();
//   void goToEnd();
//   void find(KeyT x);
//   void advanceTo(KeyT x);
// };
//
// template <typename KeyT, typename ValT, unsigned N, typename Traits>
// class IntervalMap::iterator : public const_iterator {
// public:
//   void insert(KeyT a, KeyT b, Value y);
//   void erase();
// };
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_INTERVALMAP_H
#define LLVM_ADT_INTERVALMAP_H

#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/RecyclingAllocator.h"
#include <algorithm>
#include <cassert>
#include <iterator>
#include <new>
#include <utility>

namespace llvm {

//===----------------------------------------------------------------------===//
//---                              Key traits                              ---//
//===----------------------------------------------------------------------===//
//
// The IntervalMap works with closed or half-open intervals.
// Adjacent intervals that map to the same value are coalesced.
//
// The IntervalMapInfo traits class is used to determine if a key is contained
// in an interval, and if two intervals are adjacent so they can be coalesced.
// The provided implementation works for closed integer intervals, other keys
// probably need a specialized version.
//
// The point x is contained in [a;b] when !startLess(x, a) && !stopLess(b, x).
//
// It is assumed that (a;b] half-open intervals are not used, only [a;b) is
// allowed. This is so that stopLess(a, b) can be used to determine if two
// intervals overlap.
//
//===----------------------------------------------------------------------===//

template <typename T>
struct IntervalMapInfo {
  /// startLess - Return true if x is not in [a;b].
  /// This is x < a both for closed intervals and for [a;b) half-open intervals.
  static inline bool startLess(const T &x, const T &a) {
    return x < a;
  }

  /// stopLess - Return true if x is not in [a;b].
  /// This is b < x for a closed interval, b <= x for [a;b) half-open intervals.
  static inline bool stopLess(const T &b, const T &x) {
    return b < x;
  }

  /// adjacent - Return true when the intervals [x;a] and [b;y] can coalesce.
  /// This is a+1 == b for closed intervals, a == b for half-open intervals.
  static inline bool adjacent(const T &a, const T &b) {
    return a+1 == b;
  }

  /// nonEmpty - Return true if [a;b] is non-empty.
  /// This is a <= b for a closed interval, a < b for [a;b) half-open intervals.
  static inline bool nonEmpty(const T &a, const T &b) {
    return a <= b;
  }
};

template <typename T>
struct IntervalMapHalfOpenInfo {
  /// startLess - Return true if x is not in [a;b).
  static inline bool startLess(const T &x, const T &a) {
    return x < a;
  }

  /// stopLess - Return true if x is not in [a;b).
  static inline bool stopLess(const T &b, const T &x) {
    return b <= x;
  }

  /// adjacent - Return true when the intervals [x;a) and [b;y) can coalesce.
  static inline bool adjacent(const T &a, const T &b) {
    return a == b;
  }

  /// nonEmpty - Return true if [a;b) is non-empty.
  static inline bool nonEmpty(const T &a, const T &b) {
    return a < b;
  }
};

/// IntervalMapImpl - Namespace used for IntervalMap implementation details.
/// It should be considered private to the implementation.
namespace IntervalMapImpl {

using IdxPair = std::pair<unsigned,unsigned>;

//===----------------------------------------------------------------------===//
//---                    IntervalMapImpl::NodeBase                         ---//
//===----------------------------------------------------------------------===//
//
// Both leaf and branch nodes store vectors of pairs.
// Leaves store ((KeyT, KeyT), ValT) pairs, branches use (NodeRef, KeyT).
//
// Keys and values are stored in separate arrays to avoid padding caused by
// different object alignments. This also helps improve locality of reference
// when searching the keys.
//
// The nodes don't know how many elements they contain - that information is
// stored elsewhere. Omitting the size field prevents padding and allows a node
// to fill the allocated cache lines completely.
//
// These are typical key and value sizes, the node branching factor (N), and
// wasted space when nodes are sized to fit in three cache lines (192 bytes):
//
//   T1  T2   N Waste  Used by
//    4   4  24   0    Branch<4> (32-bit pointers)
//    8   4  16   0    Leaf<4,4>, Branch<4>
//    8   8  12   0    Leaf<4,8>, Branch<8>
//   16   4   9  12    Leaf<8,4>
//   16   8   8   0    Leaf<8,8>
//
//===----------------------------------------------------------------------===//

template <typename T1, typename T2, unsigned N>
class NodeBase {
public:
  enum { Capacity = N };

  T1 first[N];
  T2 second[N];

  /// copy - Copy elements from another node.
  /// @param Other Node elements are copied from.
  /// @param i     Beginning of the source range in other.
  /// @param j     Beginning of the destination range in this.
  /// @param Count Number of elements to copy.
  template <unsigned M>
  void copy(const NodeBase<T1, T2, M> &Other, unsigned i,
            unsigned j, unsigned Count) {
    assert(i + Count <= M && "Invalid source range");
    assert(j + Count <= N && "Invalid dest range");
    for (unsigned e = i + Count; i != e; ++i, ++j) {
      first[j]  = Other.first[i];
      second[j] = Other.second[i];
    }
  }

  /// moveLeft - Move elements to the left.
  /// @param i     Beginning of the source range.
  /// @param j     Beginning of the destination range.
  /// @param Count Number of elements to copy.
  void moveLeft(unsigned i, unsigned j, unsigned Count) {
    assert(j <= i && "Use moveRight shift elements right");
    copy(*this, i, j, Count);
  }

  /// moveRight - Move elements to the right.
  /// @param i     Beginning of the source range.
  /// @param j     Beginning of the destination range.
  /// @param Count Number of elements to copy.
  void moveRight(unsigned i, unsigned j, unsigned Count) {
    assert(i <= j && "Use moveLeft shift elements left");
    assert(j + Count <= N && "Invalid range");
    while (Count--) {
      first[j + Count]  = first[i + Count];
      second[j + Count] = second[i + Count];
    }
  }

  /// erase - Erase elements [i;j).
  /// @param i    Beginning of the range to erase.
  /// @param j    End of the range. (Exclusive).
  /// @param Size Number of elements in node.
  void erase(unsigned i, unsigned j, unsigned Size) {
    moveLeft(j, i, Size - j);
  }

  /// erase - Erase element at i.
  /// @param i    Index of element to erase.
  /// @param Size Number of elements in node.
  void erase(unsigned i, unsigned Size) {
    erase(i, i+1, Size);
  }

  /// shift - Shift elements [i;size) 1 position to the right.
  /// @param i    Beginning of the range to move.
  /// @param Size Number of elements in node.
  void shift(unsigned i, unsigned Size) {
    moveRight(i, i + 1, Size - i);
  }

  /// transferToLeftSib - Transfer elements to a left sibling node.
  /// @param Size  Number of elements in this.
  /// @param Sib   Left sibling node.
  /// @param SSize Number of elements in sib.
  /// @param Count Number of elements to transfer.
  void transferToLeftSib(unsigned Size, NodeBase &Sib, unsigned SSize,
                         unsigned Count) {
    Sib.copy(*this, 0, SSize, Count);
    erase(0, Count, Size);
  }

  /// transferToRightSib - Transfer elements to a right sibling node.
  /// @param Size  Number of elements in this.
  /// @param Sib   Right sibling node.
  /// @param SSize Number of elements in sib.
  /// @param Count Number of elements to transfer.
  void transferToRightSib(unsigned Size, NodeBase &Sib, unsigned SSize,
                          unsigned Count) {
    Sib.moveRight(0, Count, SSize);
    Sib.copy(*this, Size-Count, 0, Count);
  }

  /// adjustFromLeftSib - Adjust the number if elements in this node by moving
  /// elements to or from a left sibling node.
  /// @param Size  Number of elements in this.
  /// @param Sib   Right sibling node.
  /// @param SSize Number of elements in sib.
  /// @param Add   The number of elements to add to this node, possibly < 0.
  /// @return      Number of elements added to this node, possibly negative.
  int adjustFromLeftSib(unsigned Size, NodeBase &Sib, unsigned SSize, int Add) {
    if (Add > 0) {
      // We want to grow, copy from sib.
      unsigned Count = std::min(std::min(unsigned(Add), SSize), N - Size);
      Sib.transferToRightSib(SSize, *this, Size, Count);
      return Count;
    } else {
      // We want to shrink, copy to sib.
      unsigned Count = std::min(std::min(unsigned(-Add), Size), N - SSize);
      transferToLeftSib(Size, Sib, SSize, Count);
      return -Count;
    }
  }
};

/// IntervalMapImpl::adjustSiblingSizes - Move elements between sibling nodes.
/// @param Node  Array of pointers to sibling nodes.
/// @param Nodes Number of nodes.
/// @param CurSize Array of current node sizes, will be overwritten.
/// @param NewSize Array of desired node sizes.
template <typename NodeT>
void adjustSiblingSizes(NodeT *Node[], unsigned Nodes,
                        unsigned CurSize[], const unsigned NewSize[]) {
  // Move elements right.
  for (int n = Nodes - 1; n; --n) {
    if (CurSize[n] == NewSize[n])
      continue;
    for (int m = n - 1; m != -1; --m) {
      int d = Node[n]->adjustFromLeftSib(CurSize[n], *Node[m], CurSize[m],
                                         NewSize[n] - CurSize[n]);
      CurSize[m] -= d;
      CurSize[n] += d;
      // Keep going if the current node was exhausted.
      if (CurSize[n] >= NewSize[n])
          break;
    }
  }

  if (Nodes == 0)
    return;

  // Move elements left.
  for (unsigned n = 0; n != Nodes - 1; ++n) {
    if (CurSize[n] == NewSize[n])
      continue;
    for (unsigned m = n + 1; m != Nodes; ++m) {
      int d = Node[m]->adjustFromLeftSib(CurSize[m], *Node[n], CurSize[n],
                                        CurSize[n] -  NewSize[n]);
      CurSize[m] += d;
      CurSize[n] -= d;
      // Keep going if the current node was exhausted.
      if (CurSize[n] >= NewSize[n])
          break;
    }
  }

#ifndef NDEBUG
  for (unsigned n = 0; n != Nodes; n++)
    assert(CurSize[n] == NewSize[n] && "Insufficient element shuffle");
#endif
}

/// IntervalMapImpl::distribute - Compute a new distribution of node elements
/// after an overflow or underflow. Reserve space for a new element at Position,
/// and compute the node that will hold Position after redistributing node
/// elements.
///
/// It is required that
///
///   Elements == sum(CurSize), and
///   Elements + Grow <= Nodes * Capacity.
///
/// NewSize[] will be filled in such that:
///
///   sum(NewSize) == Elements, and
///   NewSize[i] <= Capacity.
///
/// The returned index is the node where Position will go, so:
///
///   sum(NewSize[0..idx-1]) <= Position
///   sum(NewSize[0..idx])   >= Position
///
/// The last equality, sum(NewSize[0..idx]) == Position, can only happen when
/// Grow is set and NewSize[idx] == Capacity-1. The index points to the node
/// before the one holding the Position'th element where there is room for an
/// insertion.
///
/// @param Nodes    The number of nodes.
/// @param Elements Total elements in all nodes.
/// @param Capacity The capacity of each node.
/// @param CurSize  Array[Nodes] of current node sizes, or NULL.
/// @param NewSize  Array[Nodes] to receive the new node sizes.
/// @param Position Insert position.
/// @param Grow     Reserve space for a new element at Position.
/// @return         (node, offset) for Position.
IdxPair distribute(unsigned Nodes, unsigned Elements, unsigned Capacity,
                   const unsigned *CurSize, unsigned NewSize[],
                   unsigned Position, bool Grow);

//===----------------------------------------------------------------------===//
//---                   IntervalMapImpl::NodeSizer                         ---//
//===----------------------------------------------------------------------===//
//
// Compute node sizes from key and value types.
//
// The branching factors are chosen to make nodes fit in three cache lines.
// This may not be possible if keys or values are very large. Such large objects
// are handled correctly, but a std::map would probably give better performance.
//
//===----------------------------------------------------------------------===//

enum {
  // Cache line size. Most architectures have 32 or 64 byte cache lines.
  // We use 64 bytes here because it provides good branching factors.
  Log2CacheLine = 6,
  CacheLineBytes = 1 << Log2CacheLine,
  DesiredNodeBytes = 3 * CacheLineBytes
};

template <typename KeyT, typename ValT>
struct NodeSizer {
  enum {
    // Compute the leaf node branching factor that makes a node fit in three
    // cache lines. The branching factor must be at least 3, or some B+-tree
    // balancing algorithms won't work.
    // LeafSize can't be larger than CacheLineBytes. This is required by the
    // PointerIntPair used by NodeRef.
    DesiredLeafSize = DesiredNodeBytes /
      static_cast<unsigned>(2*sizeof(KeyT)+sizeof(ValT)),
    MinLeafSize = 3,
    LeafSize = DesiredLeafSize > MinLeafSize ? DesiredLeafSize : MinLeafSize
  };

  using LeafBase = NodeBase<std::pair<KeyT, KeyT>, ValT, LeafSize>;

  enum {
    // Now that we have the leaf branching factor, compute the actual allocation
    // unit size by rounding up to a whole number of cache lines.
    AllocBytes = (sizeof(LeafBase) + CacheLineBytes-1) & ~(CacheLineBytes-1),

    // Determine the branching factor for branch nodes.
    BranchSize = AllocBytes /
      static_cast<unsigned>(sizeof(KeyT) + sizeof(void*))
  };

  /// Allocator - The recycling allocator used for both branch and leaf nodes.
  /// This typedef is very likely to be identical for all IntervalMaps with
  /// reasonably sized entries, so the same allocator can be shared among
  /// different kinds of maps.
  using Allocator =
      RecyclingAllocator<BumpPtrAllocator, char, AllocBytes, CacheLineBytes>;
};

//===----------------------------------------------------------------------===//
//---                     IntervalMapImpl::NodeRef                         ---//
//===----------------------------------------------------------------------===//
//
// B+-tree nodes can be leaves or branches, so we need a polymorphic node
// pointer that can point to both kinds.
//
// All nodes are cache line aligned and the low 6 bits of a node pointer are
// always 0. These bits are used to store the number of elements in the
// referenced node. Besides saving space, placing node sizes in the parents
// allow tree balancing algorithms to run without faulting cache lines for nodes
// that may not need to be modified.
//
// A NodeRef doesn't know whether it references a leaf node or a branch node.
// It is the responsibility of the caller to use the correct types.
//
// Nodes are never supposed to be empty, and it is invalid to store a node size
// of 0 in a NodeRef. The valid range of sizes is 1-64.
//
//===----------------------------------------------------------------------===//

class NodeRef {
  struct CacheAlignedPointerTraits {
    static inline void *getAsVoidPointer(void *P) { return P; }
    static inline void *getFromVoidPointer(void *P) { return P; }
    static constexpr int NumLowBitsAvailable = Log2CacheLine;
  };
  PointerIntPair<void*, Log2CacheLine, unsigned, CacheAlignedPointerTraits> pip;

public:
  /// NodeRef - Create a null ref.
  NodeRef() = default;

  /// operator bool - Detect a null ref.
  explicit operator bool() const { return pip.getOpaqueValue(); }

  /// NodeRef - Create a reference to the node p with n elements.
  template <typename NodeT>
  NodeRef(NodeT *p, unsigned n) : pip(p, n - 1) {
    assert(n <= NodeT::Capacity && "Size too big for node");
  }

  /// size - Return the number of elements in the referenced node.
  unsigned size() const { return pip.getInt() + 1; }

  /// setSize - Update the node size.
  void setSize(unsigned n) { pip.setInt(n - 1); }

  /// subtree - Access the i'th subtree reference in a branch node.
  /// This depends on branch nodes storing the NodeRef array as their first
  /// member.
  NodeRef &subtree(unsigned i) const {
    return reinterpret_cast<NodeRef*>(pip.getPointer())[i];
  }

  /// get - Dereference as a NodeT reference.
  template <typename NodeT>
  NodeT &get() const {
    return *reinterpret_cast<NodeT*>(pip.getPointer());
  }

  bool operator==(const NodeRef &RHS) const {
    if (pip == RHS.pip)
      return true;
    assert(pip.getPointer() != RHS.pip.getPointer() && "Inconsistent NodeRefs");
    return false;
  }

  bool operator!=(const NodeRef &RHS) const {
    return !operator==(RHS);
  }
};

//===----------------------------------------------------------------------===//
//---                      IntervalMapImpl::LeafNode                       ---//
//===----------------------------------------------------------------------===//
//
// Leaf nodes store up to N disjoint intervals with corresponding values.
//
// The intervals are kept sorted and fully coalesced so there are no adjacent
// intervals mapping to the same value.
//
// These constraints are always satisfied:
//
// - Traits::stopLess(start(i), stop(i))    - Non-empty, sane intervals.
//
// - Traits::stopLess(stop(i), start(i + 1) - Sorted.
//
// - value(i) != value(i + 1) || !Traits::adjacent(stop(i), start(i + 1))
//                                          - Fully coalesced.
//
//===----------------------------------------------------------------------===//

template <typename KeyT, typename ValT, unsigned N, typename Traits>
class LeafNode : public NodeBase<std::pair<KeyT, KeyT>, ValT, N> {
public:
  const KeyT &start(unsigned i) const { return this->first[i].first; }
  const KeyT &stop(unsigned i) const { return this->first[i].second; }
  const ValT &value(unsigned i) const { return this->second[i]; }

  KeyT &start(unsigned i) { return this->first[i].first; }
  KeyT &stop(unsigned i) { return this->first[i].second; }
  ValT &value(unsigned i) { return this->second[i]; }

  /// findFrom - Find the first interval after i that may contain x.
  /// @param i    Starting index for the search.
  /// @param Size Number of elements in node.
  /// @param x    Key to search for.
  /// @return     First index with !stopLess(key[i].stop, x), or size.
  ///             This is the first interval that can possibly contain x.
  unsigned findFrom(unsigned i, unsigned Size, KeyT x) const {
    assert(i <= Size && Size <= N && "Bad indices");
    assert((i == 0 || Traits::stopLess(stop(i - 1), x)) &&
           "Index is past the needed point");
    while (i != Size && Traits::stopLess(stop(i), x)) ++i;
    return i;
  }

  /// safeFind - Find an interval that is known to exist. This is the same as
  /// findFrom except is it assumed that x is at least within range of the last
  /// interval.
  /// @param i Starting index for the search.
  /// @param x Key to search for.
  /// @return  First index with !stopLess(key[i].stop, x), never size.
  ///          This is the first interval that can possibly contain x.
  unsigned safeFind(unsigned i, KeyT x) const {
    assert(i < N && "Bad index");
    assert((i == 0 || Traits::stopLess(stop(i - 1), x)) &&
           "Index is past the needed point");
    while (Traits::stopLess(stop(i), x)) ++i;
    assert(i < N && "Unsafe intervals");
    return i;
  }

  /// safeLookup - Lookup mapped value for a safe key.
  /// It is assumed that x is within range of the last entry.
  /// @param x        Key to search for.
  /// @param NotFound Value to return if x is not in any interval.
  /// @return         The mapped value at x or NotFound.
  ValT safeLookup(KeyT x, ValT NotFound) const {
    unsigned i = safeFind(0, x);
    return Traits::startLess(x, start(i)) ? NotFound : value(i);
  }

  unsigned insertFrom(unsigned &Pos, unsigned Size, KeyT a, KeyT b, ValT y);
};

/// insertFrom - Add mapping of [a;b] to y if possible, coalescing as much as
/// possible. This may cause the node to grow by 1, or it may cause the node
/// to shrink because of coalescing.
/// @param Pos  Starting index = insertFrom(0, size, a)
/// @param Size Number of elements in node.
/// @param a    Interval start.
/// @param b    Interval stop.
/// @param y    Value be mapped.
/// @return     (insert position, new size), or (i, Capacity+1) on overflow.
template <typename KeyT, typename ValT, unsigned N, typename Traits>
unsigned LeafNode<KeyT, ValT, N, Traits>::
insertFrom(unsigned &Pos, unsigned Size, KeyT a, KeyT b, ValT y) {
  unsigned i = Pos;
  assert(i <= Size && Size <= N && "Invalid index");
  assert(!Traits::stopLess(b, a) && "Invalid interval");

  // Verify the findFrom invariant.
  assert((i == 0 || Traits::stopLess(stop(i - 1), a)));
  assert((i == Size || !Traits::stopLess(stop(i), a)));
  assert((i == Size || Traits::stopLess(b, start(i))) && "Overlapping insert");

  // Coalesce with previous interval.
  if (i && value(i - 1) == y && Traits::adjacent(stop(i - 1), a)) {
    Pos = i - 1;
    // Also coalesce with next interval?
    if (i != Size && value(i) == y && Traits::adjacent(b, start(i))) {
      stop(i - 1) = stop(i);
      this->erase(i, Size);
      return Size - 1;
    }
    stop(i - 1) = b;
    return Size;
  }

  // Detect overflow.
  if (i == N)
    return N + 1;

  // Add new interval at end.
  if (i == Size) {
    start(i) = a;
    stop(i) = b;
    value(i) = y;
    return Size + 1;
  }

  // Try to coalesce with following interval.
  if (value(i) == y && Traits::adjacent(b, start(i))) {
    start(i) = a;
    return Size;
  }

  // We must insert before i. Detect overflow.
  if (Size == N)
    return N + 1;

  // Insert before i.
  this->shift(i, Size);
  start(i) = a;
  stop(i) = b;
  value(i) = y;
  return Size + 1;
}

//===----------------------------------------------------------------------===//
//---                   IntervalMapImpl::BranchNode                        ---//
//===----------------------------------------------------------------------===//
//
// A branch node stores references to 1--N subtrees all of the same height.
//
// The key array in a branch node holds the rightmost stop key of each subtree.
// It is redundant to store the last stop key since it can be found in the
// parent node, but doing so makes tree balancing a lot simpler.
//
// It is unusual for a branch node to only have one subtree, but it can happen
// in the root node if it is smaller than the normal nodes.
//
// When all of the leaf nodes from all the subtrees are concatenated, they must
// satisfy the same constraints as a single leaf node. They must be sorted,
// sane, and fully coalesced.
//
//===----------------------------------------------------------------------===//

template <typename KeyT, typename ValT, unsigned N, typename Traits>
class BranchNode : public NodeBase<NodeRef, KeyT, N> {
public:
  const KeyT &stop(unsigned i) const { return this->second[i]; }
  const NodeRef &subtree(unsigned i) const { return this->first[i]; }

  KeyT &stop(unsigned i) { return this->second[i]; }
  NodeRef &subtree(unsigned i) { return this->first[i]; }

  /// findFrom - Find the first subtree after i that may contain x.
  /// @param i    Starting index for the search.
  /// @param Size Number of elements in node.
  /// @param x    Key to search for.
  /// @return     First index with !stopLess(key[i], x), or size.
  ///             This is the first subtree that can possibly contain x.
  unsigned findFrom(unsigned i, unsigned Size, KeyT x) const {
    assert(i <= Size && Size <= N && "Bad indices");
    assert((i == 0 || Traits::stopLess(stop(i - 1), x)) &&
           "Index to findFrom is past the needed point");
    while (i != Size && Traits::stopLess(stop(i), x)) ++i;
    return i;
  }

  /// safeFind - Find a subtree that is known to exist. This is the same as
  /// findFrom except is it assumed that x is in range.
  /// @param i Starting index for the search.
  /// @param x Key to search for.
  /// @return  First index with !stopLess(key[i], x), never size.
  ///          This is the first subtree that can possibly contain x.
  unsigned safeFind(unsigned i, KeyT x) const {
    assert(i < N && "Bad index");
    assert((i == 0 || Traits::stopLess(stop(i - 1), x)) &&
           "Index is past the needed point");
    while (Traits::stopLess(stop(i), x)) ++i;
    assert(i < N && "Unsafe intervals");
    return i;
  }

  /// safeLookup - Get the subtree containing x, Assuming that x is in range.
  /// @param x Key to search for.
  /// @return  Subtree containing x
  NodeRef safeLookup(KeyT x) const {
    return subtree(safeFind(0, x));
  }

  /// insert - Insert a new (subtree, stop) pair.
  /// @param i    Insert position, following entries will be shifted.
  /// @param Size Number of elements in node.
  /// @param Node Subtree to insert.
  /// @param Stop Last key in subtree.
  void insert(unsigned i, unsigned Size, NodeRef Node, KeyT Stop) {
    assert(Size < N && "branch node overflow");
    assert(i <= Size && "Bad insert position");
    this->shift(i, Size);
    subtree(i) = Node;
    stop(i) = Stop;
  }
};

//===----------------------------------------------------------------------===//
//---                         IntervalMapImpl::Path                        ---//
//===----------------------------------------------------------------------===//
//
// A Path is used by iterators to represent a position in a B+-tree, and the
// path to get there from the root.
//
// The Path class also contains the tree navigation code that doesn't have to
// be templatized.
//
//===----------------------------------------------------------------------===//

class Path {
  /// Entry - Each step in the path is a node pointer and an offset into that
  /// node.
  struct Entry {
    void *node;
    unsigned size;
    unsigned offset;

    Entry(void *Node, unsigned Size, unsigned Offset)
      : node(Node), size(Size), offset(Offset) {}

    Entry(NodeRef Node, unsigned Offset)
      : node(&Node.subtree(0)), size(Node.size()), offset(Offset) {}

    NodeRef &subtree(unsigned i) const {
      return reinterpret_cast<NodeRef*>(node)[i];
    }
  };

  /// path - The path entries, path[0] is the root node, path.back() is a leaf.
  SmallVector<Entry, 4> path;

public:
  // Node accessors.
  template <typename NodeT> NodeT &node(unsigned Level) const {
    return *reinterpret_cast<NodeT*>(path[Level].node);
  }
  unsigned size(unsigned Level) const { return path[Level].size; }
  unsigned offset(unsigned Level) const { return path[Level].offset; }
  unsigned &offset(unsigned Level) { return path[Level].offset; }

  // Leaf accessors.
  template <typename NodeT> NodeT &leaf() const {
    return *reinterpret_cast<NodeT*>(path.back().node);
  }
  unsigned leafSize() const { return path.back().size; }
  unsigned leafOffset() const { return path.back().offset; }
  unsigned &leafOffset() { return path.back().offset; }

  /// valid - Return true if path is at a valid node, not at end().
  bool valid() const {
    return !path.empty() && path.front().offset < path.front().size;
  }

  /// height - Return the height of the tree corresponding to this path.
  /// This matches map->height in a full path.
  unsigned height() const { return path.size() - 1; }

  /// subtree - Get the subtree referenced from Level. When the path is
  /// consistent, node(Level + 1) == subtree(Level).
  /// @param Level 0..height-1. The leaves have no subtrees.
  NodeRef &subtree(unsigned Level) const {
    return path[Level].subtree(path[Level].offset);
  }

  /// reset - Reset cached information about node(Level) from subtree(Level -1).
  /// @param Level 1..height. The node to update after parent node changed.
  void reset(unsigned Level) {
    path[Level] = Entry(subtree(Level - 1), offset(Level));
  }

  /// push - Add entry to path.
  /// @param Node Node to add, should be subtree(path.size()-1).
  /// @param Offset Offset into Node.
  void push(NodeRef Node, unsigned Offset) {
    path.push_back(Entry(Node, Offset));
  }

  /// pop - Remove the last path entry.
  void pop() {
    path.pop_back();
  }

  /// setSize - Set the size of a node both in the path and in the tree.
  /// @param Level 0..height. Note that setting the root size won't change
  ///              map->rootSize.
  /// @param Size New node size.
  void setSize(unsigned Level, unsigned Size) {
    path[Level].size = Size;
    if (Level)
      subtree(Level - 1).setSize(Size);
  }

  /// setRoot - Clear the path and set a new root node.
  /// @param Node New root node.
  /// @param Size New root size.
  /// @param Offset Offset into root node.
  void setRoot(void *Node, unsigned Size, unsigned Offset) {
    path.clear();
    path.push_back(Entry(Node, Size, Offset));
  }

  /// replaceRoot - Replace the current root node with two new entries after the
  /// tree height has increased.
  /// @param Root The new root node.
  /// @param Size Number of entries in the new root.
  /// @param Offsets Offsets into the root and first branch nodes.
  void replaceRoot(void *Root, unsigned Size, IdxPair Offsets);

  /// getLeftSibling - Get the left sibling node at Level, or a null NodeRef.
  /// @param Level Get the sibling to node(Level).
  /// @return Left sibling, or NodeRef().
  NodeRef getLeftSibling(unsigned Level) const;

  /// moveLeft - Move path to the left sibling at Level. Leave nodes below Level
  /// unaltered.
  /// @param Level Move node(Level).
  void moveLeft(unsigned Level);

  /// fillLeft - Grow path to Height by taking leftmost branches.
  /// @param Height The target height.
  void fillLeft(unsigned Height) {
    while (height() < Height)
      push(subtree(height()), 0);
  }

  /// getLeftSibling - Get the left sibling node at Level, or a null NodeRef.
  /// @param Level Get the sibling to node(Level).
  /// @return Left sibling, or NodeRef().
  NodeRef getRightSibling(unsigned Level) const;

  /// moveRight - Move path to the left sibling at Level. Leave nodes below
  /// Level unaltered.
  /// @param Level Move node(Level).
  void moveRight(unsigned Level);

  /// atBegin - Return true if path is at begin().
  bool atBegin() const {
    for (unsigned i = 0, e = path.size(); i != e; ++i)
      if (path[i].offset != 0)
        return false;
    return true;
  }

  /// atLastEntry - Return true if the path is at the last entry of the node at
  /// Level.
  /// @param Level Node to examine.
  bool atLastEntry(unsigned Level) const {
    return path[Level].offset == path[Level].size - 1;
  }

  /// legalizeForInsert - Prepare the path for an insertion at Level. When the
  /// path is at end(), node(Level) may not be a legal node. legalizeForInsert
  /// ensures that node(Level) is real by moving back to the last node at Level,
  /// and setting offset(Level) to size(Level) if required.
  /// @param Level The level where an insertion is about to take place.
  void legalizeForInsert(unsigned Level) {
    if (valid())
      return;
    moveLeft(Level);
    ++path[Level].offset;
  }
};

} // end namespace IntervalMapImpl

//===----------------------------------------------------------------------===//
//---                          IntervalMap                                ----//
//===----------------------------------------------------------------------===//

template <typename KeyT, typename ValT,
          unsigned N = IntervalMapImpl::NodeSizer<KeyT, ValT>::LeafSize,
          typename Traits = IntervalMapInfo<KeyT>>
class IntervalMap {
  using Sizer = IntervalMapImpl::NodeSizer<KeyT, ValT>;
  using Leaf = IntervalMapImpl::LeafNode<KeyT, ValT, Sizer::LeafSize, Traits>;
  using Branch =
      IntervalMapImpl::BranchNode<KeyT, ValT, Sizer::BranchSize, Traits>;
  using RootLeaf = IntervalMapImpl::LeafNode<KeyT, ValT, N, Traits>;
  using IdxPair = IntervalMapImpl::IdxPair;

  // The RootLeaf capacity is given as a template parameter. We must compute the
  // corresponding RootBranch capacity.
  enum {
    DesiredRootBranchCap = (sizeof(RootLeaf) - sizeof(KeyT)) /
      (sizeof(KeyT) + sizeof(IntervalMapImpl::NodeRef)),
    RootBranchCap = DesiredRootBranchCap ? DesiredRootBranchCap : 1
  };

  using RootBranch =
      IntervalMapImpl::BranchNode<KeyT, ValT, RootBranchCap, Traits>;

  // When branched, we store a global start key as well as the branch node.
  struct RootBranchData {
    KeyT start;
    RootBranch node;
  };

public:
  using Allocator = typename Sizer::Allocator;
  using KeyType = KeyT;
  using ValueType = ValT;
  using KeyTraits = Traits;

private:
  // The root data is either a RootLeaf or a RootBranchData instance.
  union {
    RootLeaf leaf;
    RootBranchData branchData;
  };

  // Tree height.
  // 0: Leaves in root.
  // 1: Root points to leaf.
  // 2: root->branch->leaf ...
  unsigned height = 0;

  // Number of entries in the root node.
  unsigned rootSize = 0;

  // Allocator used for creating external nodes.
  Allocator *allocator = nullptr;

  const RootLeaf &rootLeaf() const {
    assert(!branched() && "Cannot acces leaf data in branched root");
    return leaf;
  }
  RootLeaf &rootLeaf() {
    assert(!branched() && "Cannot acces leaf data in branched root");
    return leaf;
  }

  const RootBranchData &rootBranchData() const {
    assert(branched() && "Cannot access branch data in non-branched root");
    return branchData;
  }
  RootBranchData &rootBranchData() {
    assert(branched() && "Cannot access branch data in non-branched root");
    return branchData;
  }

  const RootBranch &rootBranch() const { return rootBranchData().node; }
  RootBranch &rootBranch()             { return rootBranchData().node; }
  KeyT rootBranchStart() const { return rootBranchData().start; }
  KeyT &rootBranchStart()      { return rootBranchData().start; }

  template <typename NodeT> NodeT *newNode() {
    return new (allocator->template Allocate<NodeT>()) NodeT();
  }

  template <typename NodeT> void deleteNode(NodeT *P) {
    P->~NodeT();
    allocator->Deallocate(P);
  }

  IdxPair branchRoot(unsigned Position);
  IdxPair splitRoot(unsigned Position);

  void switchRootToBranch() {
    rootLeaf().~RootLeaf();
    height = 1;
    new (&rootBranchData()) RootBranchData();
  }

  void switchRootToLeaf() {
    rootBranchData().~RootBranchData();
    height = 0;
    new(&rootLeaf()) RootLeaf();
  }

  bool branched() const { return height > 0; }

  ValT treeSafeLookup(KeyT x, ValT NotFound) const;
  void visitNodes(void (IntervalMap::*f)(IntervalMapImpl::NodeRef,
                  unsigned Level));
  void deleteNode(IntervalMapImpl::NodeRef Node, unsigned Level);

public:
  explicit IntervalMap(Allocator &a) : allocator(&a) {
    new (&rootLeaf()) RootLeaf();
  }

  ///@{
  /// NOTE: The moved-from or copied-from object's allocator needs to have a
  /// lifetime equal to or exceeding the moved-to or copied-to object to avoid
  /// undefined behaviour.
  IntervalMap(IntervalMap const &RHS) : IntervalMap(*RHS.allocator) {
    // Future-proofing assertion: this function assumes the IntervalMap
    // constructor doesn't add any nodes.
    assert(empty() && "Expected emptry tree");
    *this = RHS;
  }
  IntervalMap &operator=(IntervalMap const &RHS) {
    clear();
    allocator = RHS.allocator;
    for (auto It = RHS.begin(), End = RHS.end(); It != End; ++It)
      insert(It.start(), It.stop(), It.value());
    return *this;
  }

  IntervalMap(IntervalMap &&RHS) : IntervalMap(*RHS.allocator) {
    // Future-proofing assertion: this function assumes the IntervalMap
    // constructor doesn't add any nodes.
    assert(empty() && "Expected emptry tree");
    *this = std::move(RHS);
  }
  IntervalMap &operator=(IntervalMap &&RHS) {
    // Calling clear deallocates memory and switches to rootLeaf.
    clear();
    // Destroy the new rootLeaf.
    rootLeaf().~RootLeaf();

    height = RHS.height;
    rootSize = RHS.rootSize;
    allocator = RHS.allocator;

    // rootLeaf and rootBranch are both uninitialized. Move RHS data into
    // appropriate field.
    if (RHS.branched()) {
      rootBranch() = std::move(RHS.rootBranch());
      // Prevent RHS deallocating memory LHS now owns by replacing RHS
      // rootBranch with a new rootLeaf.
      RHS.rootBranch().~RootBranch();
      RHS.height = 0;
      new (&RHS.rootLeaf()) RootLeaf();
    } else {
      rootLeaf() = std::move(RHS.rootLeaf());
    }
    return *this;
  }
  ///@}

  ~IntervalMap() {
    clear();
    rootLeaf().~RootLeaf();
  }

  /// empty -  Return true when no intervals are mapped.
  bool empty() const {
    return rootSize == 0;
  }

  /// start - Return the smallest mapped key in a non-empty map.
  KeyT start() const {
    assert(!empty() && "Empty IntervalMap has no start");
    return !branched() ? rootLeaf().start(0) : rootBranchStart();
  }

  /// stop - Return the largest mapped key in a non-empty map.
  KeyT stop() const {
    assert(!empty() && "Empty IntervalMap has no stop");
    return !branched() ? rootLeaf().stop(rootSize - 1) :
                         rootBranch().stop(rootSize - 1);
  }

  /// lookup - Return the mapped value at x or NotFound.
  ValT lookup(KeyT x, ValT NotFound = ValT()) const {
    if (empty() || Traits::startLess(x, start()) || Traits::stopLess(stop(), x))
      return NotFound;
    return branched() ? treeSafeLookup(x, NotFound) :
                        rootLeaf().safeLookup(x, NotFound);
  }

  /// insert - Add a mapping of [a;b] to y, coalesce with adjacent intervals.
  /// It is assumed that no key in the interval is mapped to another value, but
  /// overlapping intervals already mapped to y will be coalesced.
  void insert(KeyT a, KeyT b, ValT y) {
    if (branched() || rootSize == RootLeaf::Capacity)
      return find(a).insert(a, b, y);

    // Easy insert into root leaf.
    unsigned p = rootLeaf().findFrom(0, rootSize, a);
    rootSize = rootLeaf().insertFrom(p, rootSize, a, b, y);
  }

  /// clear - Remove all entries.
  void clear();

  class const_iterator;
  class iterator;
  friend class const_iterator;
  friend class iterator;

  const_iterator begin() const {
    const_iterator I(*this);
    I.goToBegin();
    return I;
  }

  iterator begin() {
    iterator I(*this);
    I.goToBegin();
    return I;
  }

  const_iterator end() const {
    const_iterator I(*this);
    I.goToEnd();
    return I;
  }

  iterator end() {
    iterator I(*this);
    I.goToEnd();
    return I;
  }

  /// find - Return an iterator pointing to the first interval ending at or
  /// after x, or end().
  const_iterator find(KeyT x) const {
    const_iterator I(*this);
    I.find(x);
    return I;
  }

  iterator find(KeyT x) {
    iterator I(*this);
    I.find(x);
    return I;
  }

  /// overlaps(a, b) - Return true if the intervals in this map overlap with the
  /// interval [a;b].
  bool overlaps(KeyT a, KeyT b) const {
    assert(Traits::nonEmpty(a, b));
    const_iterator I = find(a);
    if (!I.valid())
      return false;
    // [a;b] and [x;y] overlap iff x<=b and a<=y. The find() call guarantees the
    // second part (y = find(a).stop()), so it is sufficient to check the first
    // one.
    return !Traits::stopLess(b, I.start());
  }
};

/// treeSafeLookup - Return the mapped value at x or NotFound, assuming a
/// branched root.
template <typename KeyT, typename ValT, unsigned N, typename Traits>
ValT IntervalMap<KeyT, ValT, N, Traits>::
treeSafeLookup(KeyT x, ValT NotFound) const {
  assert(branched() && "treeLookup assumes a branched root");

  IntervalMapImpl::NodeRef NR = rootBranch().safeLookup(x);
  for (unsigned h = height-1; h; --h)
    NR = NR.get<Branch>().safeLookup(x);
  return NR.get<Leaf>().safeLookup(x, NotFound);
}

// branchRoot - Switch from a leaf root to a branched root.
// Return the new (root offset, node offset) corresponding to Position.
template <typename KeyT, typename ValT, unsigned N, typename Traits>
IntervalMapImpl::IdxPair IntervalMap<KeyT, ValT, N, Traits>::
branchRoot(unsigned Position) {
  using namespace IntervalMapImpl;
  // How many external leaf nodes to hold RootLeaf+1?
  const unsigned Nodes = RootLeaf::Capacity / Leaf::Capacity + 1;

  // Compute element distribution among new nodes.
  unsigned size[Nodes];
  IdxPair NewOffset(0, Position);

  // Is is very common for the root node to be smaller than external nodes.
  if (Nodes == 1)
    size[0] = rootSize;
  else
    NewOffset = distribute(Nodes, rootSize, Leaf::Capacity,  nullptr, size,
                           Position, true);

  // Allocate new nodes.
  unsigned pos = 0;
  NodeRef node[Nodes];
  for (unsigned n = 0; n != Nodes; ++n) {
    Leaf *L = newNode<Leaf>();
    L->copy(rootLeaf(), pos, 0, size[n]);
    node[n] = NodeRef(L, size[n]);
    pos += size[n];
  }

  // Destroy the old leaf node, construct branch node instead.
  switchRootToBranch();
  for (unsigned n = 0; n != Nodes; ++n) {
    rootBranch().stop(n) = node[n].template get<Leaf>().stop(size[n]-1);
    rootBranch().subtree(n) = node[n];
  }
  rootBranchStart() = node[0].template get<Leaf>().start(0);
  rootSize = Nodes;
  return NewOffset;
}

// splitRoot - Split the current BranchRoot into multiple Branch nodes.
// Return the new (root offset, node offset) corresponding to Position.
template <typename KeyT, typename ValT, unsigned N, typename Traits>
IntervalMapImpl::IdxPair IntervalMap<KeyT, ValT, N, Traits>::
splitRoot(unsigned Position) {
  using namespace IntervalMapImpl;
  // How many external leaf nodes to hold RootBranch+1?
  const unsigned Nodes = RootBranch::Capacity / Branch::Capacity + 1;

  // Compute element distribution among new nodes.
  unsigned Size[Nodes];
  IdxPair NewOffset(0, Position);

  // Is is very common for the root node to be smaller than external nodes.
  if (Nodes == 1)
    Size[0] = rootSize;
  else
    NewOffset = distribute(Nodes, rootSize, Leaf::Capacity,  nullptr, Size,
                           Position, true);

  // Allocate new nodes.
  unsigned Pos = 0;
  NodeRef Node[Nodes];
  for (unsigned n = 0; n != Nodes; ++n) {
    Branch *B = newNode<Branch>();
    B->copy(rootBranch(), Pos, 0, Size[n]);
    Node[n] = NodeRef(B, Size[n]);
    Pos += Size[n];
  }

  for (unsigned n = 0; n != Nodes; ++n) {
    rootBranch().stop(n) = Node[n].template get<Branch>().stop(Size[n]-1);
    rootBranch().subtree(n) = Node[n];
  }
  rootSize = Nodes;
  ++height;
  return NewOffset;
}

/// visitNodes - Visit each external node.
template <typename KeyT, typename ValT, unsigned N, typename Traits>
void IntervalMap<KeyT, ValT, N, Traits>::
visitNodes(void (IntervalMap::*f)(IntervalMapImpl::NodeRef, unsigned Height)) {
  if (!branched())
    return;
  SmallVector<IntervalMapImpl::NodeRef, 4> Refs, NextRefs;

  // Collect level 0 nodes from the root.
  for (unsigned i = 0; i != rootSize; ++i)
    Refs.push_back(rootBranch().subtree(i));

  // Visit all branch nodes.
  for (unsigned h = height - 1; h; --h) {
    for (unsigned i = 0, e = Refs.size(); i != e; ++i) {
      for (unsigned j = 0, s = Refs[i].size(); j != s; ++j)
        NextRefs.push_back(Refs[i].subtree(j));
      (this->*f)(Refs[i], h);
    }
    Refs.clear();
    Refs.swap(NextRefs);
  }

  // Visit all leaf nodes.
  for (unsigned i = 0, e = Refs.size(); i != e; ++i)
    (this->*f)(Refs[i], 0);
}

template <typename KeyT, typename ValT, unsigned N, typename Traits>
void IntervalMap<KeyT, ValT, N, Traits>::
deleteNode(IntervalMapImpl::NodeRef Node, unsigned Level) {
  if (Level)
    deleteNode(&Node.get<Branch>());
  else
    deleteNode(&Node.get<Leaf>());
}

template <typename KeyT, typename ValT, unsigned N, typename Traits>
void IntervalMap<KeyT, ValT, N, Traits>::
clear() {
  if (branched()) {
    visitNodes(&IntervalMap::deleteNode);
    switchRootToLeaf();
  }
  rootSize = 0;
}

//===----------------------------------------------------------------------===//
//---                   IntervalMap::const_iterator                       ----//
//===----------------------------------------------------------------------===//

template <typename KeyT, typename ValT, unsigned N, typename Traits>
class IntervalMap<KeyT, ValT, N, Traits>::const_iterator {
  friend class IntervalMap;

public:
  using iterator_category = std::bidirectional_iterator_tag;
  using value_type = ValT;
  using difference_type = std::ptrdiff_t;
  using pointer = value_type *;
  using reference = value_type &;

protected:
  // The map referred to.
  IntervalMap *map = nullptr;

  // We store a full path from the root to the current position.
  // The path may be partially filled, but never between iterator calls.
  IntervalMapImpl::Path path;

  explicit const_iterator(const IntervalMap &map) :
    map(const_cast<IntervalMap*>(&map)) {}

  bool branched() const {
    assert(map && "Invalid iterator");
    return map->branched();
  }

  void setRoot(unsigned Offset) {
    if (branched())
      path.setRoot(&map->rootBranch(), map->rootSize, Offset);
    else
      path.setRoot(&map->rootLeaf(), map->rootSize, Offset);
  }

  void pathFillFind(KeyT x);
  void treeFind(KeyT x);
  void treeAdvanceTo(KeyT x);

  /// unsafeStart - Writable access to start() for iterator.
  KeyT &unsafeStart() const {
    assert(valid() && "Cannot access invalid iterator");
    return branched() ? path.leaf<Leaf>().start(path.leafOffset()) :
                        path.leaf<RootLeaf>().start(path.leafOffset());
  }

  /// unsafeStop - Writable access to stop() for iterator.
  KeyT &unsafeStop() const {
    assert(valid() && "Cannot access invalid iterator");
    return branched() ? path.leaf<Leaf>().stop(path.leafOffset()) :
                        path.leaf<RootLeaf>().stop(path.leafOffset());
  }

  /// unsafeValue - Writable access to value() for iterator.
  ValT &unsafeValue() const {
    assert(valid() && "Cannot access invalid iterator");
    return branched() ? path.leaf<Leaf>().value(path.leafOffset()) :
                        path.leaf<RootLeaf>().value(path.leafOffset());
  }

public:
  /// const_iterator - Create an iterator that isn't pointing anywhere.
  const_iterator() = default;

  /// setMap - Change the map iterated over. This call must be followed by a
  /// call to goToBegin(), goToEnd(), or find()
  void setMap(const IntervalMap &m) { map = const_cast<IntervalMap*>(&m); }

  /// valid - Return true if the current position is valid, false for end().
  bool valid() const { return path.valid(); }

  /// atBegin - Return true if the current position is the first map entry.
  bool atBegin() const { return path.atBegin(); }

  /// start - Return the beginning of the current interval.
  const KeyT &start() const { return unsafeStart(); }

  /// stop - Return the end of the current interval.
  const KeyT &stop() const { return unsafeStop(); }

  /// value - Return the mapped value at the current interval.
  const ValT &value() const { return unsafeValue(); }

  const ValT &operator*() const { return value(); }

  bool operator==(const const_iterator &RHS) const {
    assert(map == RHS.map && "Cannot compare iterators from different maps");
    if (!valid())
      return !RHS.valid();
    if (path.leafOffset() != RHS.path.leafOffset())
      return false;
    return &path.template leaf<Leaf>() == &RHS.path.template leaf<Leaf>();
  }

  bool operator!=(const const_iterator &RHS) const {
    return !operator==(RHS);
  }

  /// goToBegin - Move to the first interval in map.
  void goToBegin() {
    setRoot(0);
    if (branched())
      path.fillLeft(map->height);
  }

  /// goToEnd - Move beyond the last interval in map.
  void goToEnd() {
    setRoot(map->rootSize);
  }

  /// preincrement - Move to the next interval.
  const_iterator &operator++() {
    assert(valid() && "Cannot increment end()");
    if (++path.leafOffset() == path.leafSize() && branched())
      path.moveRight(map->height);
    return *this;
  }

  /// postincrement - Don't do that!
  const_iterator operator++(int) {
    const_iterator tmp = *this;
    operator++();
    return tmp;
  }

  /// predecrement - Move to the previous interval.
  const_iterator &operator--() {
    if (path.leafOffset() && (valid() || !branched()))
      --path.leafOffset();
    else
      path.moveLeft(map->height);
    return *this;
  }

  /// postdecrement - Don't do that!
  const_iterator operator--(int) {
    const_iterator tmp = *this;
    operator--();
    return tmp;
  }

  /// find - Move to the first interval with stop >= x, or end().
  /// This is a full search from the root, the current position is ignored.
  void find(KeyT x) {
    if (branched())
      treeFind(x);
    else
      setRoot(map->rootLeaf().findFrom(0, map->rootSize, x));
  }

  /// advanceTo - Move to the first interval with stop >= x, or end().
  /// The search is started from the current position, and no earlier positions
  /// can be found. This is much faster than find() for small moves.
  void advanceTo(KeyT x) {
    if (!valid())
      return;
    if (branched())
      treeAdvanceTo(x);
    else
      path.leafOffset() =
        map->rootLeaf().findFrom(path.leafOffset(), map->rootSize, x);
  }
};

/// pathFillFind - Complete path by searching for x.
/// @param x Key to search for.
template <typename KeyT, typename ValT, unsigned N, typename Traits>
void IntervalMap<KeyT, ValT, N, Traits>::
const_iterator::pathFillFind(KeyT x) {
  IntervalMapImpl::NodeRef NR = path.subtree(path.height());
  for (unsigned i = map->height - path.height() - 1; i; --i) {
    unsigned p = NR.get<Branch>().safeFind(0, x);
    path.push(NR, p);
    NR = NR.subtree(p);
  }
  path.push(NR, NR.get<Leaf>().safeFind(0, x));
}

/// treeFind - Find in a branched tree.
/// @param x Key to search for.
template <typename KeyT, typename ValT, unsigned N, typename Traits>
void IntervalMap<KeyT, ValT, N, Traits>::
const_iterator::treeFind(KeyT x) {
  setRoot(map->rootBranch().findFrom(0, map->rootSize, x));
  if (valid())
    pathFillFind(x);
}

/// treeAdvanceTo - Find position after the current one.
/// @param x Key to search for.
template <typename KeyT, typename ValT, unsigned N, typename Traits>
void IntervalMap<KeyT, ValT, N, Traits>::
const_iterator::treeAdvanceTo(KeyT x) {
  // Can we stay on the same leaf node?
  if (!Traits::stopLess(path.leaf<Leaf>().stop(path.leafSize() - 1), x)) {
    path.leafOffset() = path.leaf<Leaf>().safeFind(path.leafOffset(), x);
    return;
  }

  // Drop the current leaf.
  path.pop();

  // Search towards the root for a usable subtree.
  if (path.height()) {
    for (unsigned l = path.height() - 1; l; --l) {
      if (!Traits::stopLess(path.node<Branch>(l).stop(path.offset(l)), x)) {
        // The branch node at l+1 is usable
        path.offset(l + 1) =
          path.node<Branch>(l + 1).safeFind(path.offset(l + 1), x);
        return pathFillFind(x);
      }
      path.pop();
    }
    // Is the level-1 Branch usable?
    if (!Traits::stopLess(map->rootBranch().stop(path.offset(0)), x)) {
      path.offset(1) = path.node<Branch>(1).safeFind(path.offset(1), x);
      return pathFillFind(x);
    }
  }

  // We reached the root.
  setRoot(map->rootBranch().findFrom(path.offset(0), map->rootSize, x));
  if (valid())
    pathFillFind(x);
}

//===----------------------------------------------------------------------===//
//---                       IntervalMap::iterator                         ----//
//===----------------------------------------------------------------------===//

template <typename KeyT, typename ValT, unsigned N, typename Traits>
class IntervalMap<KeyT, ValT, N, Traits>::iterator : public const_iterator {
  friend class IntervalMap;

  using IdxPair = IntervalMapImpl::IdxPair;

  explicit iterator(IntervalMap &map) : const_iterator(map) {}

  void setNodeStop(unsigned Level, KeyT Stop);
  bool insertNode(unsigned Level, IntervalMapImpl::NodeRef Node, KeyT Stop);
  template <typename NodeT> bool overflow(unsigned Level);
  void treeInsert(KeyT a, KeyT b, ValT y);
  void eraseNode(unsigned Level);
  void treeErase(bool UpdateRoot = true);
  bool canCoalesceLeft(KeyT Start, ValT x);
  bool canCoalesceRight(KeyT Stop, ValT x);

public:
  /// iterator - Create null iterator.
  iterator() = default;

  /// setStart - Move the start of the current interval.
  /// This may cause coalescing with the previous interval.
  /// @param a New start key, must not overlap the previous interval.
  void setStart(KeyT a);

  /// setStop - Move the end of the current interval.
  /// This may cause coalescing with the following interval.
  /// @param b New stop key, must not overlap the following interval.
  void setStop(KeyT b);

  /// setValue - Change the mapped value of the current interval.
  /// This may cause coalescing with the previous and following intervals.
  /// @param x New value.
  void setValue(ValT x);

  /// setStartUnchecked - Move the start of the current interval without
  /// checking for coalescing or overlaps.
  /// This should only be used when it is known that coalescing is not required.
  /// @param a New start key.
  void setStartUnchecked(KeyT a) { this->unsafeStart() = a; }

  /// setStopUnchecked - Move the end of the current interval without checking
  /// for coalescing or overlaps.
  /// This should only be used when it is known that coalescing is not required.
  /// @param b New stop key.
  void setStopUnchecked(KeyT b) {
    this->unsafeStop() = b;
    // Update keys in branch nodes as well.
    if (this->path.atLastEntry(this->path.height()))
      setNodeStop(this->path.height(), b);
  }

  /// setValueUnchecked - Change the mapped value of the current interval
  /// without checking for coalescing.
  /// @param x New value.
  void setValueUnchecked(ValT x) { this->unsafeValue() = x; }

  /// insert - Insert mapping [a;b] -> y before the current position.
  void insert(KeyT a, KeyT b, ValT y);

  /// erase - Erase the current interval.
  void erase();

  iterator &operator++() {
    const_iterator::operator++();
    return *this;
  }

  iterator operator++(int) {
    iterator tmp = *this;
    operator++();
    return tmp;
  }

  iterator &operator--() {
    const_iterator::operator--();
    return *this;
  }

  iterator operator--(int) {
    iterator tmp = *this;
    operator--();
    return tmp;
  }
};

/// canCoalesceLeft - Can the current interval coalesce to the left after
/// changing start or value?
/// @param Start New start of current interval.
/// @param Value New value for current interval.
/// @return True when updating the current interval would enable coalescing.
template <typename KeyT, typename ValT, unsigned N, typename Traits>
bool IntervalMap<KeyT, ValT, N, Traits>::
iterator::canCoalesceLeft(KeyT Start, ValT Value) {
  using namespace IntervalMapImpl;
  Path &P = this->path;
  if (!this->branched()) {
    unsigned i = P.leafOffset();
    RootLeaf &Node = P.leaf<RootLeaf>();
    return i && Node.value(i-1) == Value &&
                Traits::adjacent(Node.stop(i-1), Start);
  }
  // Branched.
  if (unsigned i = P.leafOffset()) {
    Leaf &Node = P.leaf<Leaf>();
    return Node.value(i-1) == Value && Traits::adjacent(Node.stop(i-1), Start);
  } else if (NodeRef NR = P.getLeftSibling(P.height())) {
    unsigned i = NR.size() - 1;
    Leaf &Node = NR.get<Leaf>();
    return Node.value(i) == Value && Traits::adjacent(Node.stop(i), Start);
  }
  return false;
}

/// canCoalesceRight - Can the current interval coalesce to the right after
/// changing stop or value?
/// @param Stop New stop of current interval.
/// @param Value New value for current interval.
/// @return True when updating the current interval would enable coalescing.
template <typename KeyT, typename ValT, unsigned N, typename Traits>
bool IntervalMap<KeyT, ValT, N, Traits>::
iterator::canCoalesceRight(KeyT Stop, ValT Value) {
  using namespace IntervalMapImpl;
  Path &P = this->path;
  unsigned i = P.leafOffset() + 1;
  if (!this->branched()) {
    if (i >= P.leafSize())
      return false;
    RootLeaf &Node = P.leaf<RootLeaf>();
    return Node.value(i) == Value && Traits::adjacent(Stop, Node.start(i));
  }
  // Branched.
  if (i < P.leafSize()) {
    Leaf &Node = P.leaf<Leaf>();
    return Node.value(i) == Value && Traits::adjacent(Stop, Node.start(i));
  } else if (NodeRef NR = P.getRightSibling(P.height())) {
    Leaf &Node = NR.get<Leaf>();
    return Node.value(0) == Value && Traits::adjacent(Stop, Node.start(0));
  }
  return false;
}

/// setNodeStop - Update the stop key of the current node at level and above.
template <typename KeyT, typename ValT, unsigned N, typename Traits>
void IntervalMap<KeyT, ValT, N, Traits>::
iterator::setNodeStop(unsigned Level, KeyT Stop) {
  // There are no references to the root node, so nothing to update.
  if (!Level)
    return;
  IntervalMapImpl::Path &P = this->path;
  // Update nodes pointing to the current node.
  while (--Level) {
    P.node<Branch>(Level).stop(P.offset(Level)) = Stop;
    if (!P.atLastEntry(Level))
      return;
  }
  // Update root separately since it has a different layout.
  P.node<RootBranch>(Level).stop(P.offset(Level)) = Stop;
}

template <typename KeyT, typename ValT, unsigned N, typename Traits>
void IntervalMap<KeyT, ValT, N, Traits>::
iterator::setStart(KeyT a) {
  assert(Traits::nonEmpty(a, this->stop()) && "Cannot move start beyond stop");
  KeyT &CurStart = this->unsafeStart();
  if (!Traits::startLess(a, CurStart) || !canCoalesceLeft(a, this->value())) {
    CurStart = a;
    return;
  }
  // Coalesce with the interval to the left.
  --*this;
  a = this->start();
  erase();
  setStartUnchecked(a);
}

template <typename KeyT, typename ValT, unsigned N, typename Traits>
void IntervalMap<KeyT, ValT, N, Traits>::
iterator::setStop(KeyT b) {
  assert(Traits::nonEmpty(this->start(), b) && "Cannot move stop beyond start");
  if (Traits::startLess(b, this->stop()) ||
      !canCoalesceRight(b, this->value())) {
    setStopUnchecked(b);
    return;
  }
  // Coalesce with interval to the right.
  KeyT a = this->start();
  erase();
  setStartUnchecked(a);
}

template <typename KeyT, typename ValT, unsigned N, typename Traits>
void IntervalMap<KeyT, ValT, N, Traits>::
iterator::setValue(ValT x) {
  setValueUnchecked(x);
  if (canCoalesceRight(this->stop(), x)) {
    KeyT a = this->start();
    erase();
    setStartUnchecked(a);
  }
  if (canCoalesceLeft(this->start(), x)) {
    --*this;
    KeyT a = this->start();
    erase();
    setStartUnchecked(a);
  }
}

/// insertNode - insert a node before the current path at level.
/// Leave the current path pointing at the new node.
/// @param Level path index of the node to be inserted.
/// @param Node The node to be inserted.
/// @param Stop The last index in the new node.
/// @return True if the tree height was increased.
template <typename KeyT, typename ValT, unsigned N, typename Traits>
bool IntervalMap<KeyT, ValT, N, Traits>::
iterator::insertNode(unsigned Level, IntervalMapImpl::NodeRef Node, KeyT Stop) {
  assert(Level && "Cannot insert next to the root");
  bool SplitRoot = false;
  IntervalMap &IM = *this->map;
  IntervalMapImpl::Path &P = this->path;

  if (Level == 1) {
    // Insert into the root branch node.
    if (IM.rootSize < RootBranch::Capacity) {
      IM.rootBranch().insert(P.offset(0), IM.rootSize, Node, Stop);
      P.setSize(0, ++IM.rootSize);
      P.reset(Level);
      return SplitRoot;
    }

    // We need to split the root while keeping our position.
    SplitRoot = true;
    IdxPair Offset = IM.splitRoot(P.offset(0));
    P.replaceRoot(&IM.rootBranch(), IM.rootSize, Offset);

    // Fall through to insert at the new higher level.
    ++Level;
  }

  // When inserting before end(), make sure we have a valid path.
  P.legalizeForInsert(--Level);

  // Insert into the branch node at Level-1.
  if (P.size(Level) == Branch::Capacity) {
    // Branch node is full, handle handle the overflow.
    assert(!SplitRoot && "Cannot overflow after splitting the root");
    SplitRoot = overflow<Branch>(Level);
    Level += SplitRoot;
  }
  P.node<Branch>(Level).insert(P.offset(Level), P.size(Level), Node, Stop);
  P.setSize(Level, P.size(Level) + 1);
  if (P.atLastEntry(Level))
    setNodeStop(Level, Stop);
  P.reset(Level + 1);
  return SplitRoot;
}

// insert
template <typename KeyT, typename ValT, unsigned N, typename Traits>
void IntervalMap<KeyT, ValT, N, Traits>::
iterator::insert(KeyT a, KeyT b, ValT y) {
  if (this->branched())
    return treeInsert(a, b, y);
  IntervalMap &IM = *this->map;
  IntervalMapImpl::Path &P = this->path;

  // Try simple root leaf insert.
  unsigned Size = IM.rootLeaf().insertFrom(P.leafOffset(), IM.rootSize, a, b, y);

  // Was the root node insert successful?
  if (Size <= RootLeaf::Capacity) {
    P.setSize(0, IM.rootSize = Size);
    return;
  }

  // Root leaf node is full, we must branch.
  IdxPair Offset = IM.branchRoot(P.leafOffset());
  P.replaceRoot(&IM.rootBranch(), IM.rootSize, Offset);

  // Now it fits in the new leaf.
  treeInsert(a, b, y);
}

template <typename KeyT, typename ValT, unsigned N, typename Traits>
void IntervalMap<KeyT, ValT, N, Traits>::
iterator::treeInsert(KeyT a, KeyT b, ValT y) {
  using namespace IntervalMapImpl;
  Path &P = this->path;

  if (!P.valid())
    P.legalizeForInsert(this->map->height);

  // Check if this insertion will extend the node to the left.
  if (P.leafOffset() == 0 && Traits::startLess(a, P.leaf<Leaf>().start(0))) {
    // Node is growing to the left, will it affect a left sibling node?
    if (NodeRef Sib = P.getLeftSibling(P.height())) {
      Leaf &SibLeaf = Sib.get<Leaf>();
      unsigned SibOfs = Sib.size() - 1;
      if (SibLeaf.value(SibOfs) == y &&
          Traits::adjacent(SibLeaf.stop(SibOfs), a)) {
        // This insertion will coalesce with the last entry in SibLeaf. We can
        // handle it in two ways:
        //  1. Extend SibLeaf.stop to b and be done, or
        //  2. Extend a to SibLeaf, erase the SibLeaf entry and continue.
        // We prefer 1., but need 2 when coalescing to the right as well.
        Leaf &CurLeaf = P.leaf<Leaf>();
        P.moveLeft(P.height());
        if (Traits::stopLess(b, CurLeaf.start(0)) &&
            (y != CurLeaf.value(0) || !Traits::adjacent(b, CurLeaf.start(0)))) {
          // Easy, just extend SibLeaf and we're done.
          setNodeStop(P.height(), SibLeaf.stop(SibOfs) = b);
          return;
        } else {
          // We have both left and right coalescing. Erase the old SibLeaf entry
          // and continue inserting the larger interval.
          a = SibLeaf.start(SibOfs);
          treeErase(/* UpdateRoot= */false);
        }
      }
    } else {
      // No left sibling means we are at begin(). Update cached bound.
      this->map->rootBranchStart() = a;
    }
  }

  // When we are inserting at the end of a leaf node, we must update stops.
  unsigned Size = P.leafSize();
  bool Grow = P.leafOffset() == Size;
  Size = P.leaf<Leaf>().insertFrom(P.leafOffset(), Size, a, b, y);

  // Leaf insertion unsuccessful? Overflow and try again.
  if (Size > Leaf::Capacity) {
    overflow<Leaf>(P.height());
    Grow = P.leafOffset() == P.leafSize();
    Size = P.leaf<Leaf>().insertFrom(P.leafOffset(), P.leafSize(), a, b, y);
    assert(Size <= Leaf::Capacity && "overflow() didn't make room");
  }

  // Inserted, update offset and leaf size.
  P.setSize(P.height(), Size);

  // Insert was the last node entry, update stops.
  if (Grow)
    setNodeStop(P.height(), b);
}

/// erase - erase the current interval and move to the next position.
template <typename KeyT, typename ValT, unsigned N, typename Traits>
void IntervalMap<KeyT, ValT, N, Traits>::
iterator::erase() {
  IntervalMap &IM = *this->map;
  IntervalMapImpl::Path &P = this->path;
  assert(P.valid() && "Cannot erase end()");
  if (this->branched())
    return treeErase();
  IM.rootLeaf().erase(P.leafOffset(), IM.rootSize);
  P.setSize(0, --IM.rootSize);
}

/// treeErase - erase() for a branched tree.
template <typename KeyT, typename ValT, unsigned N, typename Traits>
void IntervalMap<KeyT, ValT, N, Traits>::
iterator::treeErase(bool UpdateRoot) {
  IntervalMap &IM = *this->map;
  IntervalMapImpl::Path &P = this->path;
  Leaf &Node = P.leaf<Leaf>();

  // Nodes are not allowed to become empty.
  if (P.leafSize() == 1) {
    IM.deleteNode(&Node);
    eraseNode(IM.height);
    // Update rootBranchStart if we erased begin().
    if (UpdateRoot && IM.branched() && P.valid() && P.atBegin())
      IM.rootBranchStart() = P.leaf<Leaf>().start(0);
    return;
  }

  // Erase current entry.
  Node.erase(P.leafOffset(), P.leafSize());
  unsigned NewSize = P.leafSize() - 1;
  P.setSize(IM.height, NewSize);
  // When we erase the last entry, update stop and move to a legal position.
  if (P.leafOffset() == NewSize) {
    setNodeStop(IM.height, Node.stop(NewSize - 1));
    P.moveRight(IM.height);
  } else if (UpdateRoot && P.atBegin())
    IM.rootBranchStart() = P.leaf<Leaf>().start(0);
}

/// eraseNode - Erase the current node at Level from its parent and move path to
/// the first entry of the next sibling node.
/// The node must be deallocated by the caller.
/// @param Level 1..height, the root node cannot be erased.
template <typename KeyT, typename ValT, unsigned N, typename Traits>
void IntervalMap<KeyT, ValT, N, Traits>::
iterator::eraseNode(unsigned Level) {
  assert(Level && "Cannot erase root node");
  IntervalMap &IM = *this->map;
  IntervalMapImpl::Path &P = this->path;

  if (--Level == 0) {
    IM.rootBranch().erase(P.offset(0), IM.rootSize);
    P.setSize(0, --IM.rootSize);
    // If this cleared the root, switch to height=0.
    if (IM.empty()) {
      IM.switchRootToLeaf();
      this->setRoot(0);
      return;
    }
  } else {
    // Remove node ref from branch node at Level.
    Branch &Parent = P.node<Branch>(Level);
    if (P.size(Level) == 1) {
      // Branch node became empty, remove it recursively.
      IM.deleteNode(&Parent);
      eraseNode(Level);
    } else {
      // Branch node won't become empty.
      Parent.erase(P.offset(Level), P.size(Level));
      unsigned NewSize = P.size(Level) - 1;
      P.setSize(Level, NewSize);
      // If we removed the last branch, update stop and move to a legal pos.
      if (P.offset(Level) == NewSize) {
        setNodeStop(Level, Parent.stop(NewSize - 1));
        P.moveRight(Level);
      }
    }
  }
  // Update path cache for the new right sibling position.
  if (P.valid()) {
    P.reset(Level + 1);
    P.offset(Level + 1) = 0;
  }
}

/// overflow - Distribute entries of the current node evenly among
/// its siblings and ensure that the current node is not full.
/// This may require allocating a new node.
/// @tparam NodeT The type of node at Level (Leaf or Branch).
/// @param Level path index of the overflowing node.
/// @return True when the tree height was changed.
template <typename KeyT, typename ValT, unsigned N, typename Traits>
template <typename NodeT>
bool IntervalMap<KeyT, ValT, N, Traits>::
iterator::overflow(unsigned Level) {
  using namespace IntervalMapImpl;
  Path &P = this->path;
  unsigned CurSize[4];
  NodeT *Node[4];
  unsigned Nodes = 0;
  unsigned Elements = 0;
  unsigned Offset = P.offset(Level);

  // Do we have a left sibling?
  NodeRef LeftSib = P.getLeftSibling(Level);
  if (LeftSib) {
    Offset += Elements = CurSize[Nodes] = LeftSib.size();
    Node[Nodes++] = &LeftSib.get<NodeT>();
  }

  // Current node.
  Elements += CurSize[Nodes] = P.size(Level);
  Node[Nodes++] = &P.node<NodeT>(Level);

  // Do we have a right sibling?
  NodeRef RightSib = P.getRightSibling(Level);
  if (RightSib) {
    Elements += CurSize[Nodes] = RightSib.size();
    Node[Nodes++] = &RightSib.get<NodeT>();
  }

  // Do we need to allocate a new node?
  unsigned NewNode = 0;
  if (Elements + 1 > Nodes * NodeT::Capacity) {
    // Insert NewNode at the penultimate position, or after a single node.
    NewNode = Nodes == 1 ? 1 : Nodes - 1;
    CurSize[Nodes] = CurSize[NewNode];
    Node[Nodes] = Node[NewNode];
    CurSize[NewNode] = 0;
    Node[NewNode] = this->map->template newNode<NodeT>();
    ++Nodes;
  }

  // Compute the new element distribution.
  unsigned NewSize[4];
  IdxPair NewOffset = distribute(Nodes, Elements, NodeT::Capacity,
                                 CurSize, NewSize, Offset, true);
  adjustSiblingSizes(Node, Nodes, CurSize, NewSize);

  // Move current location to the leftmost node.
  if (LeftSib)
    P.moveLeft(Level);

  // Elements have been rearranged, now update node sizes and stops.
  bool SplitRoot = false;
  unsigned Pos = 0;
  while (true) {
    KeyT Stop = Node[Pos]->stop(NewSize[Pos]-1);
    if (NewNode && Pos == NewNode) {
      SplitRoot = insertNode(Level, NodeRef(Node[Pos], NewSize[Pos]), Stop);
      Level += SplitRoot;
    } else {
      P.setSize(Level, NewSize[Pos]);
      setNodeStop(Level, Stop);
    }
    if (Pos + 1 == Nodes)
      break;
    P.moveRight(Level);
    ++Pos;
  }

  // Where was I? Find NewOffset.
  while(Pos != NewOffset.first) {
    P.moveLeft(Level);
    --Pos;
  }
  P.offset(Level) = NewOffset.second;
  return SplitRoot;
}

//===----------------------------------------------------------------------===//
//---                       IntervalMapOverlaps                           ----//
//===----------------------------------------------------------------------===//

/// IntervalMapOverlaps - Iterate over the overlaps of mapped intervals in two
/// IntervalMaps. The maps may be different, but the KeyT and Traits types
/// should be the same.
///
/// Typical uses:
///
/// 1. Test for overlap:
///    bool overlap = IntervalMapOverlaps(a, b).valid();
///
/// 2. Enumerate overlaps:
///    for (IntervalMapOverlaps I(a, b); I.valid() ; ++I) { ... }
///
template <typename MapA, typename MapB>
class IntervalMapOverlaps {
  using KeyType = typename MapA::KeyType;
  using Traits = typename MapA::KeyTraits;

  typename MapA::const_iterator posA;
  typename MapB::const_iterator posB;

  /// advance - Move posA and posB forward until reaching an overlap, or until
  /// either meets end.
  /// Don't move the iterators if they are already overlapping.
  void advance() {
    if (!valid())
      return;

    if (Traits::stopLess(posA.stop(), posB.start())) {
      // A ends before B begins. Catch up.
      posA.advanceTo(posB.start());
      if (!posA.valid() || !Traits::stopLess(posB.stop(), posA.start()))
        return;
    } else if (Traits::stopLess(posB.stop(), posA.start())) {
      // B ends before A begins. Catch up.
      posB.advanceTo(posA.start());
      if (!posB.valid() || !Traits::stopLess(posA.stop(), posB.start()))
        return;
    } else
      // Already overlapping.
      return;

    while (true) {
      // Make a.end > b.start.
      posA.advanceTo(posB.start());
      if (!posA.valid() || !Traits::stopLess(posB.stop(), posA.start()))
        return;
      // Make b.end > a.start.
      posB.advanceTo(posA.start());
      if (!posB.valid() || !Traits::stopLess(posA.stop(), posB.start()))
        return;
    }
  }

public:
  /// IntervalMapOverlaps - Create an iterator for the overlaps of a and b.
  IntervalMapOverlaps(const MapA &a, const MapB &b)
    : posA(b.empty() ? a.end() : a.find(b.start())),
      posB(posA.valid() ? b.find(posA.start()) : b.end()) { advance(); }

  /// valid - Return true if iterator is at an overlap.
  bool valid() const {
    return posA.valid() && posB.valid();
  }

  /// a - access the left hand side in the overlap.
  const typename MapA::const_iterator &a() const { return posA; }

  /// b - access the right hand side in the overlap.
  const typename MapB::const_iterator &b() const { return posB; }

  /// start - Beginning of the overlapping interval.
  KeyType start() const {
    KeyType ak = a().start();
    KeyType bk = b().start();
    return Traits::startLess(ak, bk) ? bk : ak;
  }

  /// stop - End of the overlapping interval.
  KeyType stop() const {
    KeyType ak = a().stop();
    KeyType bk = b().stop();
    return Traits::startLess(ak, bk) ? ak : bk;
  }

  /// skipA - Move to the next overlap that doesn't involve a().
  void skipA() {
    ++posA;
    advance();
  }

  /// skipB - Move to the next overlap that doesn't involve b().
  void skipB() {
    ++posB;
    advance();
  }

  /// Preincrement - Move to the next overlap.
  IntervalMapOverlaps &operator++() {
    // Bump the iterator that ends first. The other one may have more overlaps.
    if (Traits::startLess(posB.stop(), posA.stop()))
      skipB();
    else
      skipA();
    return *this;
  }

  /// advanceTo - Move to the first overlapping interval with
  /// stopLess(x, stop()).
  void advanceTo(KeyType x) {
    if (!valid())
      return;
    // Make sure advanceTo sees monotonic keys.
    if (Traits::stopLess(posA.stop(), x))
      posA.advanceTo(x);
    if (Traits::stopLess(posB.stop(), x))
      posB.advanceTo(x);
    advance();
  }
};

} // end namespace llvm

#endif // LLVM_ADT_INTERVALMAP_H
PKiwFZ�f��TTADT/STLArrayExtras.hnu�[���//===- llvm/ADT/STLArrayExtras.h - additions to <array> ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains some templates that are useful if you are working with the
// STL at all.
//
// No library is required when using these functions.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_STLARRAYEXTRAS_H
#define LLVM_ADT_STLARRAYEXTRAS_H

#include <cstddef>

namespace llvm {

//===----------------------------------------------------------------------===//
//     Extra additions for arrays
//===----------------------------------------------------------------------===//

/// Find the length of an array.
template <class T, std::size_t N>
constexpr inline size_t array_lengthof(T (&)[N]) {
  return N;
}

} // end namespace llvm

#endif // LLVM_ADT_STLARRAYEXTRAS_H
PKiwFZN=?::ADT/StringSwitch.hnu�[���//===--- StringSwitch.h - Switch-on-literal-string Construct --------------===/
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//===----------------------------------------------------------------------===/
///
/// \file
///  This file implements the StringSwitch template, which mimics a switch()
///  statement whose cases are string literals.
///
//===----------------------------------------------------------------------===/
#ifndef LLVM_ADT_STRINGSWITCH_H
#define LLVM_ADT_STRINGSWITCH_H

#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Compiler.h"
#include <cassert>
#include <cstring>
#include <optional>

namespace llvm {

/// A switch()-like statement whose cases are string literals.
///
/// The StringSwitch class is a simple form of a switch() statement that
/// determines whether the given string matches one of the given string
/// literals. The template type parameter \p T is the type of the value that
/// will be returned from the string-switch expression. For example,
/// the following code switches on the name of a color in \c argv[i]:
///
/// \code
/// Color color = StringSwitch<Color>(argv[i])
///   .Case("red", Red)
///   .Case("orange", Orange)
///   .Case("yellow", Yellow)
///   .Case("green", Green)
///   .Case("blue", Blue)
///   .Case("indigo", Indigo)
///   .Cases("violet", "purple", Violet)
///   .Default(UnknownColor);
/// \endcode
template<typename T, typename R = T>
class StringSwitch {
  /// The string we are matching.
  const StringRef Str;

  /// The pointer to the result of this switch statement, once known,
  /// null before that.
  std::optional<T> Result;

public:
  explicit StringSwitch(StringRef S)
  : Str(S), Result() { }

  // StringSwitch is not copyable.
  StringSwitch(const StringSwitch &) = delete;

  // StringSwitch is not assignable due to 'Str' being 'const'.
  void operator=(const StringSwitch &) = delete;
  void operator=(StringSwitch &&other) = delete;

  StringSwitch(StringSwitch &&other)
    : Str(other.Str), Result(std::move(other.Result)) { }

  ~StringSwitch() = default;

  // Case-sensitive case matchers
  StringSwitch &Case(StringLiteral S, T Value) {
    if (!Result && Str == S) {
      Result = std::move(Value);
    }
    return *this;
  }

  StringSwitch& EndsWith(StringLiteral S, T Value) {
    if (!Result && Str.endswith(S)) {
      Result = std::move(Value);
    }
    return *this;
  }

  StringSwitch& StartsWith(StringLiteral S, T Value) {
    if (!Result && Str.startswith(S)) {
      Result = std::move(Value);
    }
    return *this;
  }

  StringSwitch &Cases(StringLiteral S0, StringLiteral S1, T Value) {
    return Case(S0, Value).Case(S1, Value);
  }

  StringSwitch &Cases(StringLiteral S0, StringLiteral S1, StringLiteral S2,
                      T Value) {
    return Case(S0, Value).Cases(S1, S2, Value);
  }

  StringSwitch &Cases(StringLiteral S0, StringLiteral S1, StringLiteral S2,
                      StringLiteral S3, T Value) {
    return Case(S0, Value).Cases(S1, S2, S3, Value);
  }

  StringSwitch &Cases(StringLiteral S0, StringLiteral S1, StringLiteral S2,
                      StringLiteral S3, StringLiteral S4, T Value) {
    return Case(S0, Value).Cases(S1, S2, S3, S4, Value);
  }

  StringSwitch &Cases(StringLiteral S0, StringLiteral S1, StringLiteral S2,
                      StringLiteral S3, StringLiteral S4, StringLiteral S5,
                      T Value) {
    return Case(S0, Value).Cases(S1, S2, S3, S4, S5, Value);
  }

  StringSwitch &Cases(StringLiteral S0, StringLiteral S1, StringLiteral S2,
                      StringLiteral S3, StringLiteral S4, StringLiteral S5,
                      StringLiteral S6, T Value) {
    return Case(S0, Value).Cases(S1, S2, S3, S4, S5, S6, Value);
  }

  StringSwitch &Cases(StringLiteral S0, StringLiteral S1, StringLiteral S2,
                      StringLiteral S3, StringLiteral S4, StringLiteral S5,
                      StringLiteral S6, StringLiteral S7, T Value) {
    return Case(S0, Value).Cases(S1, S2, S3, S4, S5, S6, S7, Value);
  }

  StringSwitch &Cases(StringLiteral S0, StringLiteral S1, StringLiteral S2,
                      StringLiteral S3, StringLiteral S4, StringLiteral S5,
                      StringLiteral S6, StringLiteral S7, StringLiteral S8,
                      T Value) {
    return Case(S0, Value).Cases(S1, S2, S3, S4, S5, S6, S7, S8, Value);
  }

  StringSwitch &Cases(StringLiteral S0, StringLiteral S1, StringLiteral S2,
                      StringLiteral S3, StringLiteral S4, StringLiteral S5,
                      StringLiteral S6, StringLiteral S7, StringLiteral S8,
                      StringLiteral S9, T Value) {
    return Case(S0, Value).Cases(S1, S2, S3, S4, S5, S6, S7, S8, S9, Value);
  }

  // Case-insensitive case matchers.
  StringSwitch &CaseLower(StringLiteral S, T Value) {
    if (!Result && Str.equals_insensitive(S))
      Result = std::move(Value);

    return *this;
  }

  StringSwitch &EndsWithLower(StringLiteral S, T Value) {
    if (!Result && Str.ends_with_insensitive(S))
      Result = Value;

    return *this;
  }

  StringSwitch &StartsWithLower(StringLiteral S, T Value) {
    if (!Result && Str.starts_with_insensitive(S))
      Result = std::move(Value);

    return *this;
  }

  StringSwitch &CasesLower(StringLiteral S0, StringLiteral S1, T Value) {
    return CaseLower(S0, Value).CaseLower(S1, Value);
  }

  StringSwitch &CasesLower(StringLiteral S0, StringLiteral S1, StringLiteral S2,
                           T Value) {
    return CaseLower(S0, Value).CasesLower(S1, S2, Value);
  }

  StringSwitch &CasesLower(StringLiteral S0, StringLiteral S1, StringLiteral S2,
                           StringLiteral S3, T Value) {
    return CaseLower(S0, Value).CasesLower(S1, S2, S3, Value);
  }

  StringSwitch &CasesLower(StringLiteral S0, StringLiteral S1, StringLiteral S2,
                           StringLiteral S3, StringLiteral S4, T Value) {
    return CaseLower(S0, Value).CasesLower(S1, S2, S3, S4, Value);
  }

  [[nodiscard]] R Default(T Value) {
    if (Result)
      return std::move(*Result);
    return Value;
  }

  [[nodiscard]] operator R() {
    assert(Result && "Fell off the end of a string-switch");
    return std::move(*Result);
  }
};

} // end namespace llvm

#endif // LLVM_ADT_STRINGSWITCH_H
PKiwFZ���

ADT/IndexedMap.hnu�[���//===- llvm/ADT/IndexedMap.h - An index map implementation ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file implements an indexed map. The index map template takes two
/// types. The first is the mapped type and the second is a functor
/// that maps its argument to a size_t. On instantiation a "null" value
/// can be provided to be used as a "does not exist" indicator in the
/// map. A member function grow() is provided that given the value of
/// the maximally indexed key (the argument of the functor) makes sure
/// the map has enough space for it.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_INDEXEDMAP_H
#define LLVM_ADT_INDEXEDMAP_H

#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/STLExtras.h"
#include <cassert>

namespace llvm {

template <typename T, typename ToIndexT = identity<unsigned>>
  class IndexedMap {
    using IndexT = typename ToIndexT::argument_type;
    // Prefer SmallVector with zero inline storage over std::vector. IndexedMaps
    // can grow very large and SmallVector grows more efficiently as long as T
    // is trivially copyable.
    using StorageT = SmallVector<T, 0>;

    StorageT storage_;
    T nullVal_;
    ToIndexT toIndex_;

  public:
    IndexedMap() : nullVal_(T()) {}

    explicit IndexedMap(const T& val) : nullVal_(val) {}

    typename StorageT::reference operator[](IndexT n) {
      assert(toIndex_(n) < storage_.size() && "index out of bounds!");
      return storage_[toIndex_(n)];
    }

    typename StorageT::const_reference operator[](IndexT n) const {
      assert(toIndex_(n) < storage_.size() && "index out of bounds!");
      return storage_[toIndex_(n)];
    }

    void reserve(typename StorageT::size_type s) {
      storage_.reserve(s);
    }

    void resize(typename StorageT::size_type s) {
      storage_.resize(s, nullVal_);
    }

    void clear() {
      storage_.clear();
    }

    void grow(IndexT n) {
      unsigned NewSize = toIndex_(n) + 1;
      if (NewSize > storage_.size())
        resize(NewSize);
    }

    bool inBounds(IndexT n) const {
      return toIndex_(n) < storage_.size();
    }

    typename StorageT::size_type size() const {
      return storage_.size();
    }
  };

} // end namespace llvm

#endif // LLVM_ADT_INDEXEDMAP_H
PKiwFZ�d`<<ADT/MapVector.hnu�[���//===- llvm/ADT/MapVector.h - Map w/ deterministic value order --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file implements a map that provides insertion order iteration. The
/// interface is purposefully minimal. The key is assumed to be cheap to copy
/// and 2 copies are kept, one for indexing in a DenseMap, one for iteration in
/// a SmallVector.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_MAPVECTOR_H
#define LLVM_ADT_MAPVECTOR_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include <cassert>
#include <cstddef>
#include <iterator>
#include <type_traits>
#include <utility>

namespace llvm {

/// This class implements a map that also provides access to all stored values
/// in a deterministic order. The values are kept in a SmallVector<*, 0> and the
/// mapping is done with DenseMap from Keys to indexes in that vector.
template <typename KeyT, typename ValueT,
          typename MapType = DenseMap<KeyT, unsigned>,
          typename VectorType = SmallVector<std::pair<KeyT, ValueT>, 0>>
class MapVector {
  MapType Map;
  VectorType Vector;

  static_assert(
      std::is_integral_v<typename MapType::mapped_type>,
      "The mapped_type of the specified Map must be an integral type");

public:
  using key_type = KeyT;
  using value_type = typename VectorType::value_type;
  using size_type = typename VectorType::size_type;

  using iterator = typename VectorType::iterator;
  using const_iterator = typename VectorType::const_iterator;
  using reverse_iterator = typename VectorType::reverse_iterator;
  using const_reverse_iterator = typename VectorType::const_reverse_iterator;

  /// Clear the MapVector and return the underlying vector.
  VectorType takeVector() {
    Map.clear();
    return std::move(Vector);
  }

  size_type size() const { return Vector.size(); }

  /// Grow the MapVector so that it can contain at least \p NumEntries items
  /// before resizing again.
  void reserve(size_type NumEntries) {
    Map.reserve(NumEntries);
    Vector.reserve(NumEntries);
  }

  iterator begin() { return Vector.begin(); }
  const_iterator begin() const { return Vector.begin(); }
  iterator end() { return Vector.end(); }
  const_iterator end() const { return Vector.end(); }

  reverse_iterator rbegin() { return Vector.rbegin(); }
  const_reverse_iterator rbegin() const { return Vector.rbegin(); }
  reverse_iterator rend() { return Vector.rend(); }
  const_reverse_iterator rend() const { return Vector.rend(); }

  bool empty() const {
    return Vector.empty();
  }

  std::pair<KeyT, ValueT>       &front()       { return Vector.front(); }
  const std::pair<KeyT, ValueT> &front() const { return Vector.front(); }
  std::pair<KeyT, ValueT>       &back()        { return Vector.back(); }
  const std::pair<KeyT, ValueT> &back()  const { return Vector.back(); }

  void clear() {
    Map.clear();
    Vector.clear();
  }

  void swap(MapVector &RHS) {
    std::swap(Map, RHS.Map);
    std::swap(Vector, RHS.Vector);
  }

  ValueT &operator[](const KeyT &Key) {
    std::pair<KeyT, typename MapType::mapped_type> Pair = std::make_pair(Key, 0);
    std::pair<typename MapType::iterator, bool> Result = Map.insert(Pair);
    auto &I = Result.first->second;
    if (Result.second) {
      Vector.push_back(std::make_pair(Key, ValueT()));
      I = Vector.size() - 1;
    }
    return Vector[I].second;
  }

  // Returns a copy of the value.  Only allowed if ValueT is copyable.
  ValueT lookup(const KeyT &Key) const {
    static_assert(std::is_copy_constructible_v<ValueT>,
                  "Cannot call lookup() if ValueT is not copyable.");
    typename MapType::const_iterator Pos = Map.find(Key);
    return Pos == Map.end()? ValueT() : Vector[Pos->second].second;
  }

  std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) {
    std::pair<KeyT, typename MapType::mapped_type> Pair = std::make_pair(KV.first, 0);
    std::pair<typename MapType::iterator, bool> Result = Map.insert(Pair);
    auto &I = Result.first->second;
    if (Result.second) {
      Vector.push_back(std::make_pair(KV.first, KV.second));
      I = Vector.size() - 1;
      return std::make_pair(std::prev(end()), true);
    }
    return std::make_pair(begin() + I, false);
  }

  std::pair<iterator, bool> insert(std::pair<KeyT, ValueT> &&KV) {
    // Copy KV.first into the map, then move it into the vector.
    std::pair<KeyT, typename MapType::mapped_type> Pair = std::make_pair(KV.first, 0);
    std::pair<typename MapType::iterator, bool> Result = Map.insert(Pair);
    auto &I = Result.first->second;
    if (Result.second) {
      Vector.push_back(std::move(KV));
      I = Vector.size() - 1;
      return std::make_pair(std::prev(end()), true);
    }
    return std::make_pair(begin() + I, false);
  }

  bool contains(const KeyT &Key) const { return Map.find(Key) != Map.end(); }

  size_type count(const KeyT &Key) const { return contains(Key) ? 1 : 0; }

  iterator find(const KeyT &Key) {
    typename MapType::const_iterator Pos = Map.find(Key);
    return Pos == Map.end()? Vector.end() :
                            (Vector.begin() + Pos->second);
  }

  const_iterator find(const KeyT &Key) const {
    typename MapType::const_iterator Pos = Map.find(Key);
    return Pos == Map.end()? Vector.end() :
                            (Vector.begin() + Pos->second);
  }

  /// Remove the last element from the vector.
  void pop_back() {
    typename MapType::iterator Pos = Map.find(Vector.back().first);
    Map.erase(Pos);
    Vector.pop_back();
  }

  /// Remove the element given by Iterator.
  ///
  /// Returns an iterator to the element following the one which was removed,
  /// which may be end().
  ///
  /// \note This is a deceivingly expensive operation (linear time).  It's
  /// usually better to use \a remove_if() if possible.
  typename VectorType::iterator erase(typename VectorType::iterator Iterator) {
    Map.erase(Iterator->first);
    auto Next = Vector.erase(Iterator);
    if (Next == Vector.end())
      return Next;

    // Update indices in the map.
    size_t Index = Next - Vector.begin();
    for (auto &I : Map) {
      assert(I.second != Index && "Index was already erased!");
      if (I.second > Index)
        --I.second;
    }
    return Next;
  }

  /// Remove all elements with the key value Key.
  ///
  /// Returns the number of elements removed.
  size_type erase(const KeyT &Key) {
    auto Iterator = find(Key);
    if (Iterator == end())
      return 0;
    erase(Iterator);
    return 1;
  }

  /// Remove the elements that match the predicate.
  ///
  /// Erase all elements that match \c Pred in a single pass.  Takes linear
  /// time.
  template <class Predicate> void remove_if(Predicate Pred);
};

template <typename KeyT, typename ValueT, typename MapType, typename VectorType>
template <class Function>
void MapVector<KeyT, ValueT, MapType, VectorType>::remove_if(Function Pred) {
  auto O = Vector.begin();
  for (auto I = O, E = Vector.end(); I != E; ++I) {
    if (Pred(*I)) {
      // Erase from the map.
      Map.erase(I->first);
      continue;
    }

    if (I != O) {
      // Move the value and update the index in the map.
      *O = std::move(*I);
      Map[O->first] = O - Vector.begin();
    }
    ++O;
  }
  // Erase trailing entries in the vector.
  Vector.erase(O, Vector.end());
}

/// A MapVector that performs no allocations if smaller than a certain
/// size.
template <typename KeyT, typename ValueT, unsigned N>
struct SmallMapVector
    : MapVector<KeyT, ValueT, SmallDenseMap<KeyT, unsigned, N>,
                SmallVector<std::pair<KeyT, ValueT>, N>> {
};

} // end namespace llvm

#endif // LLVM_ADT_MAPVECTOR_H
PKiwFZേ�%%ADT/ImmutableList.hnu�[���//==--- ImmutableList.h - Immutable (functional) list interface --*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file defines the ImmutableList class.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_IMMUTABLELIST_H
#define LLVM_ADT_IMMUTABLELIST_H

#include "llvm/ADT/FoldingSet.h"
#include "llvm/Support/Allocator.h"
#include <cassert>
#include <cstdint>
#include <new>

namespace llvm {

template <typename T> class ImmutableListFactory;

template <typename T>
class ImmutableListImpl : public FoldingSetNode {
  friend class ImmutableListFactory<T>;

  T Head;
  const ImmutableListImpl* Tail;

  template <typename ElemT>
  ImmutableListImpl(ElemT &&head, const ImmutableListImpl *tail = nullptr)
    : Head(std::forward<ElemT>(head)), Tail(tail) {}

public:
  ImmutableListImpl(const ImmutableListImpl &) = delete;
  ImmutableListImpl &operator=(const ImmutableListImpl &) = delete;

  const T& getHead() const { return Head; }
  const ImmutableListImpl* getTail() const { return Tail; }

  static inline void Profile(FoldingSetNodeID& ID, const T& H,
                             const ImmutableListImpl* L){
    ID.AddPointer(L);
    ID.Add(H);
  }

  void Profile(FoldingSetNodeID& ID) {
    Profile(ID, Head, Tail);
  }
};

/// ImmutableList - This class represents an immutable (functional) list.
///  It is implemented as a smart pointer (wraps ImmutableListImpl), so it
///  it is intended to always be copied by value as if it were a pointer.
///  This interface matches ImmutableSet and ImmutableMap.  ImmutableList
///  objects should almost never be created directly, and instead should
///  be created by ImmutableListFactory objects that manage the lifetime
///  of a group of lists.  When the factory object is reclaimed, all lists
///  created by that factory are released as well.
template <typename T>
class ImmutableList {
public:
  using value_type = T;
  using Factory = ImmutableListFactory<T>;

  static_assert(std::is_trivially_destructible<T>::value,
                "T must be trivially destructible!");

private:
  const ImmutableListImpl<T>* X;

public:
  // This constructor should normally only be called by ImmutableListFactory<T>.
  // There may be cases, however, when one needs to extract the internal pointer
  // and reconstruct a list object from that pointer.
  ImmutableList(const ImmutableListImpl<T>* x = nullptr) : X(x) {}

  const ImmutableListImpl<T>* getInternalPointer() const {
    return X;
  }

  class iterator {
    const ImmutableListImpl<T>* L = nullptr;

  public:
    iterator() = default;
    iterator(ImmutableList l) : L(l.getInternalPointer()) {}

    iterator& operator++() { L = L->getTail(); return *this; }
    bool operator==(const iterator& I) const { return L == I.L; }
    bool operator!=(const iterator& I) const { return L != I.L; }
    const value_type& operator*() const { return L->getHead(); }
    const std::remove_reference_t<value_type> *operator->() const {
      return &L->getHead();
    }

    ImmutableList getList() const { return L; }
  };

  /// begin - Returns an iterator referring to the head of the list, or
  ///  an iterator denoting the end of the list if the list is empty.
  iterator begin() const { return iterator(X); }

  /// end - Returns an iterator denoting the end of the list.  This iterator
  ///  does not refer to a valid list element.
  iterator end() const { return iterator(); }

  /// isEmpty - Returns true if the list is empty.
  bool isEmpty() const { return !X; }

  bool contains(const T& V) const {
    for (iterator I = begin(), E = end(); I != E; ++I) {
      if (*I == V)
        return true;
    }
    return false;
  }

  /// isEqual - Returns true if two lists are equal.  Because all lists created
  ///  from the same ImmutableListFactory are uniqued, this has O(1) complexity
  ///  because it the contents of the list do not need to be compared.  Note
  ///  that you should only compare two lists created from the same
  ///  ImmutableListFactory.
  bool isEqual(const ImmutableList& L) const { return X == L.X; }

  bool operator==(const ImmutableList& L) const { return isEqual(L); }

  /// getHead - Returns the head of the list.
  const T& getHead() const {
    assert(!isEmpty() && "Cannot get the head of an empty list.");
    return X->getHead();
  }

  /// getTail - Returns the tail of the list, which is another (possibly empty)
  ///  ImmutableList.
  ImmutableList getTail() const {
    return X ? X->getTail() : nullptr;
  }

  void Profile(FoldingSetNodeID& ID) const {
    ID.AddPointer(X);
  }
};

template <typename T>
class ImmutableListFactory {
  using ListTy = ImmutableListImpl<T>;
  using CacheTy = FoldingSet<ListTy>;

  CacheTy Cache;
  uintptr_t Allocator;

  bool ownsAllocator() const {
    return (Allocator & 0x1) == 0;
  }

  BumpPtrAllocator& getAllocator() const {
    return *reinterpret_cast<BumpPtrAllocator*>(Allocator & ~0x1);
  }

public:
  ImmutableListFactory()
    : Allocator(reinterpret_cast<uintptr_t>(new BumpPtrAllocator())) {}

  ImmutableListFactory(BumpPtrAllocator& Alloc)
  : Allocator(reinterpret_cast<uintptr_t>(&Alloc) | 0x1) {}

  ~ImmutableListFactory() {
    if (ownsAllocator()) delete &getAllocator();
  }

  template <typename ElemT>
  [[nodiscard]] ImmutableList<T> concat(ElemT &&Head, ImmutableList<T> Tail) {
    // Profile the new list to see if it already exists in our cache.
    FoldingSetNodeID ID;
    void* InsertPos;

    const ListTy* TailImpl = Tail.getInternalPointer();
    ListTy::Profile(ID, Head, TailImpl);
    ListTy* L = Cache.FindNodeOrInsertPos(ID, InsertPos);

    if (!L) {
      // The list does not exist in our cache.  Create it.
      BumpPtrAllocator& A = getAllocator();
      L = (ListTy*) A.Allocate<ListTy>();
      new (L) ListTy(std::forward<ElemT>(Head), TailImpl);

      // Insert the new list into the cache.
      Cache.InsertNode(L, InsertPos);
    }

    return L;
  }

  template <typename ElemT>
  [[nodiscard]] ImmutableList<T> add(ElemT &&Data, ImmutableList<T> L) {
    return concat(std::forward<ElemT>(Data), L);
  }

  template <typename... CtorArgs>
  [[nodiscard]] ImmutableList<T> emplace(ImmutableList<T> Tail,
                                         CtorArgs &&...Args) {
    return concat(T(std::forward<CtorArgs>(Args)...), Tail);
  }

  ImmutableList<T> getEmptyList() const {
    return ImmutableList<T>(nullptr);
  }

  template <typename ElemT>
  ImmutableList<T> create(ElemT &&Data) {
    return concat(std::forward<ElemT>(Data), getEmptyList());
  }
};

//===----------------------------------------------------------------------===//
// Partially-specialized Traits.
//===----------------------------------------------------------------------===//

template <typename T> struct DenseMapInfo<ImmutableList<T>, void> {
  static inline ImmutableList<T> getEmptyKey() {
    return reinterpret_cast<ImmutableListImpl<T>*>(-1);
  }

  static inline ImmutableList<T> getTombstoneKey() {
    return reinterpret_cast<ImmutableListImpl<T>*>(-2);
  }

  static unsigned getHashValue(ImmutableList<T> X) {
    uintptr_t PtrVal = reinterpret_cast<uintptr_t>(X.getInternalPointer());
    return (unsigned((uintptr_t)PtrVal) >> 4) ^
           (unsigned((uintptr_t)PtrVal) >> 9);
  }

  static bool isEqual(ImmutableList<T> X1, ImmutableList<T> X2) {
    return X1 == X2;
  }
};

} // end namespace llvm

#endif // LLVM_ADT_IMMUTABLELIST_H
PKiwFZlGM��	�	ADT/DenseMapInfoVariant.hnu�[���//===- DenseMapInfoVariant.h - Type traits for DenseMap<variant> *- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file defines DenseMapInfo traits for DenseMap<std::variant<Ts...>>.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_DENSEMAPINFOVARIANT_H
#define LLVM_ADT_DENSEMAPINFOVARIANT_H

#include "llvm/ADT/DenseMapInfo.h"
#include <utility>
#include <variant>

namespace llvm {

// Provide DenseMapInfo for variants whose all alternatives have DenseMapInfo.
template <typename... Ts> struct DenseMapInfo<std::variant<Ts...>> {
  using Variant = std::variant<Ts...>;
  using FirstT = std::variant_alternative_t<0, Variant>;

  static inline Variant getEmptyKey() {
    return Variant(std::in_place_index<0>, DenseMapInfo<FirstT>::getEmptyKey());
  }

  static inline Variant getTombstoneKey() {
    return Variant(std::in_place_index<0>,
                   DenseMapInfo<FirstT>::getTombstoneKey());
  }

  static unsigned getHashValue(const Variant &Val) {
    return std::visit(
        [&Val](auto &&Alternative) {
          using T = std::decay_t<decltype(Alternative)>;
          // Include index in hash to make sure same value as different
          // alternatives don't collide.
          return DenseMapInfo<std::pair<size_t, T>>::getHashValuePiecewise(
              Val.index(), Alternative);
        },
        Val);
  }

  static bool isEqual(const Variant &LHS, const Variant &RHS) {
    if (LHS.index() != RHS.index())
      return false;
    if (LHS.valueless_by_exception())
      return true;
    // We want to dispatch to DenseMapInfo<T>::isEqual(LHS.get(I), RHS.get(I))
    // We know the types are the same, but std::visit(V, LHS, RHS) doesn't.
    // We erase the type held in LHS to void*, and dispatch over RHS.
    const void *ErasedLHS =
        std::visit([](const auto &LHS) -> const void * { return &LHS; }, LHS);
    return std::visit(
        [&](const auto &RHS) -> bool {
          using T = std::remove_cv_t<std::remove_reference_t<decltype(RHS)>>;
          return DenseMapInfo<T>::isEqual(*static_cast<const T *>(ErasedLHS),
                                          RHS);
        },
        RHS);
  }
};

} // end namespace llvm

#endif // LLVM_ADT_DENSEMAPINFOVARIANT_H
PKiwFZ�һ�gg
ADT/None.hnu�[���//===-- None.h - Simple null value for implicit construction ------*- C++ -*-=//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
///  This file provides None, an enumerator for use in implicit constructors
///  of various (usually templated) types to make such construction more
///  terse.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_NONE_H
#define LLVM_ADT_NONE_H

#include "llvm/Support/Compiler.h"
#include <optional>

namespace llvm {
/// A simple null object to allow implicit construction of std::optional<T>
/// and similar types without having to spell out the specialization's name.
LLVM_DEPRECATED("Use std::nullopt_t instead", "std::nullopt_t")
typedef std::nullopt_t NoneType;
LLVM_DEPRECATED("Use std::nullopt instead.", "std::nullopt")
inline constexpr std::nullopt_t None = std::nullopt;
}

#endif
PKiwFZ��c�2�2ADT/SCCIterator.hnu�[���//===- ADT/SCCIterator.h - Strongly Connected Comp. Iter. -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This builds on the llvm/ADT/GraphTraits.h file to find the strongly
/// connected components (SCCs) of a graph in O(N+E) time using Tarjan's DFS
/// algorithm.
///
/// The SCC iterator has the important property that if a node in SCC S1 has an
/// edge to a node in SCC S2, then it visits S1 *after* S2.
///
/// To visit S1 *before* S2, use the scc_iterator on the Inverse graph. (NOTE:
/// This requires some simple wrappers and is not supported yet.)
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_SCCITERATOR_H
#define LLVM_ADT_SCCITERATOR_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/GraphTraits.h"
#include "llvm/ADT/iterator.h"
#include <cassert>
#include <cstddef>
#include <iterator>
#include <queue>
#include <set>
#include <unordered_map>
#include <unordered_set>
#include <vector>

namespace llvm {

/// Enumerate the SCCs of a directed graph in reverse topological order
/// of the SCC DAG.
///
/// This is implemented using Tarjan's DFS algorithm using an internal stack to
/// build up a vector of nodes in a particular SCC. Note that it is a forward
/// iterator and thus you cannot backtrack or re-visit nodes.
template <class GraphT, class GT = GraphTraits<GraphT>>
class scc_iterator : public iterator_facade_base<
                         scc_iterator<GraphT, GT>, std::forward_iterator_tag,
                         const std::vector<typename GT::NodeRef>, ptrdiff_t> {
  using NodeRef = typename GT::NodeRef;
  using ChildItTy = typename GT::ChildIteratorType;
  using SccTy = std::vector<NodeRef>;
  using reference = typename scc_iterator::reference;

  /// Element of VisitStack during DFS.
  struct StackElement {
    NodeRef Node;         ///< The current node pointer.
    ChildItTy NextChild;  ///< The next child, modified inplace during DFS.
    unsigned MinVisited;  ///< Minimum uplink value of all children of Node.

    StackElement(NodeRef Node, const ChildItTy &Child, unsigned Min)
        : Node(Node), NextChild(Child), MinVisited(Min) {}

    bool operator==(const StackElement &Other) const {
      return Node == Other.Node &&
             NextChild == Other.NextChild &&
             MinVisited == Other.MinVisited;
    }
  };

  /// The visit counters used to detect when a complete SCC is on the stack.
  /// visitNum is the global counter.
  ///
  /// nodeVisitNumbers are per-node visit numbers, also used as DFS flags.
  unsigned visitNum;
  DenseMap<NodeRef, unsigned> nodeVisitNumbers;

  /// Stack holding nodes of the SCC.
  std::vector<NodeRef> SCCNodeStack;

  /// The current SCC, retrieved using operator*().
  SccTy CurrentSCC;

  /// DFS stack, Used to maintain the ordering.  The top contains the current
  /// node, the next child to visit, and the minimum uplink value of all child
  std::vector<StackElement> VisitStack;

  /// A single "visit" within the non-recursive DFS traversal.
  void DFSVisitOne(NodeRef N);

  /// The stack-based DFS traversal; defined below.
  void DFSVisitChildren();

  /// Compute the next SCC using the DFS traversal.
  void GetNextSCC();

  scc_iterator(NodeRef entryN) : visitNum(0) {
    DFSVisitOne(entryN);
    GetNextSCC();
  }

  /// End is when the DFS stack is empty.
  scc_iterator() = default;

public:
  static scc_iterator begin(const GraphT &G) {
    return scc_iterator(GT::getEntryNode(G));
  }
  static scc_iterator end(const GraphT &) { return scc_iterator(); }

  /// Direct loop termination test which is more efficient than
  /// comparison with \c end().
  bool isAtEnd() const {
    assert(!CurrentSCC.empty() || VisitStack.empty());
    return CurrentSCC.empty();
  }

  bool operator==(const scc_iterator &x) const {
    return VisitStack == x.VisitStack && CurrentSCC == x.CurrentSCC;
  }

  scc_iterator &operator++() {
    GetNextSCC();
    return *this;
  }

  reference operator*() const {
    assert(!CurrentSCC.empty() && "Dereferencing END SCC iterator!");
    return CurrentSCC;
  }

  /// Test if the current SCC has a cycle.
  ///
  /// If the SCC has more than one node, this is trivially true.  If not, it may
  /// still contain a cycle if the node has an edge back to itself.
  bool hasCycle() const;

  /// This informs the \c scc_iterator that the specified \c Old node
  /// has been deleted, and \c New is to be used in its place.
  void ReplaceNode(NodeRef Old, NodeRef New) {
    assert(nodeVisitNumbers.count(Old) && "Old not in scc_iterator?");
    // Do the assignment in two steps, in case 'New' is not yet in the map, and
    // inserting it causes the map to grow.
    auto tempVal = nodeVisitNumbers[Old];
    nodeVisitNumbers[New] = tempVal;
    nodeVisitNumbers.erase(Old);
  }
};

template <class GraphT, class GT>
void scc_iterator<GraphT, GT>::DFSVisitOne(NodeRef N) {
  ++visitNum;
  nodeVisitNumbers[N] = visitNum;
  SCCNodeStack.push_back(N);
  VisitStack.push_back(StackElement(N, GT::child_begin(N), visitNum));
#if 0 // Enable if needed when debugging.
  dbgs() << "TarjanSCC: Node " << N <<
        " : visitNum = " << visitNum << "\n";
#endif
}

template <class GraphT, class GT>
void scc_iterator<GraphT, GT>::DFSVisitChildren() {
  assert(!VisitStack.empty());
  while (VisitStack.back().NextChild != GT::child_end(VisitStack.back().Node)) {
    // TOS has at least one more child so continue DFS
    NodeRef childN = *VisitStack.back().NextChild++;
    typename DenseMap<NodeRef, unsigned>::iterator Visited =
        nodeVisitNumbers.find(childN);
    if (Visited == nodeVisitNumbers.end()) {
      // this node has never been seen.
      DFSVisitOne(childN);
      continue;
    }

    unsigned childNum = Visited->second;
    if (VisitStack.back().MinVisited > childNum)
      VisitStack.back().MinVisited = childNum;
  }
}

template <class GraphT, class GT> void scc_iterator<GraphT, GT>::GetNextSCC() {
  CurrentSCC.clear(); // Prepare to compute the next SCC
  while (!VisitStack.empty()) {
    DFSVisitChildren();

    // Pop the leaf on top of the VisitStack.
    NodeRef visitingN = VisitStack.back().Node;
    unsigned minVisitNum = VisitStack.back().MinVisited;
    assert(VisitStack.back().NextChild == GT::child_end(visitingN));
    VisitStack.pop_back();

    // Propagate MinVisitNum to parent so we can detect the SCC starting node.
    if (!VisitStack.empty() && VisitStack.back().MinVisited > minVisitNum)
      VisitStack.back().MinVisited = minVisitNum;

#if 0 // Enable if needed when debugging.
    dbgs() << "TarjanSCC: Popped node " << visitingN <<
          " : minVisitNum = " << minVisitNum << "; Node visit num = " <<
          nodeVisitNumbers[visitingN] << "\n";
#endif

    if (minVisitNum != nodeVisitNumbers[visitingN])
      continue;

    // A full SCC is on the SCCNodeStack!  It includes all nodes below
    // visitingN on the stack.  Copy those nodes to CurrentSCC,
    // reset their minVisit values, and return (this suspends
    // the DFS traversal till the next ++).
    do {
      CurrentSCC.push_back(SCCNodeStack.back());
      SCCNodeStack.pop_back();
      nodeVisitNumbers[CurrentSCC.back()] = ~0U;
    } while (CurrentSCC.back() != visitingN);
    return;
  }
}

template <class GraphT, class GT>
bool scc_iterator<GraphT, GT>::hasCycle() const {
    assert(!CurrentSCC.empty() && "Dereferencing END SCC iterator!");
    if (CurrentSCC.size() > 1)
      return true;
    NodeRef N = CurrentSCC.front();
    for (ChildItTy CI = GT::child_begin(N), CE = GT::child_end(N); CI != CE;
         ++CI)
      if (*CI == N)
        return true;
    return false;
  }

/// Construct the begin iterator for a deduced graph type T.
template <class T> scc_iterator<T> scc_begin(const T &G) {
  return scc_iterator<T>::begin(G);
}

/// Construct the end iterator for a deduced graph type T.
template <class T> scc_iterator<T> scc_end(const T &G) {
  return scc_iterator<T>::end(G);
}

/// Sort the nodes of a directed SCC in the decreasing order of the edge
/// weights. The instantiating GraphT type should have weighted edge type
/// declared in its graph traits in order to use this iterator.
///
/// This is implemented using Kruskal's minimal spanning tree algorithm followed
/// by Kahn's algorithm to compute a topological order on the MST. First a
/// maximum spanning tree (forest) is built based on all edges within the SCC
/// collection. Then a topological walk is initiated on tree nodes that do not
/// have a predecessor and then applied to all nodes of the SCC. Such order
/// ensures that high-weighted edges are visited first during the traversal.
template <class GraphT, class GT = GraphTraits<GraphT>>
class scc_member_iterator {
  using NodeType = typename GT::NodeType;
  using EdgeType = typename GT::EdgeType;
  using NodesType = std::vector<NodeType *>;

  // Auxilary node information used during the MST calculation.
  struct NodeInfo {
    NodeInfo *Group = this;
    uint32_t Rank = 0;
    bool Visited = false;
    DenseSet<const EdgeType *> IncomingMSTEdges;
  };

  // Find the root group of the node and compress the path from node to the
  // root.
  NodeInfo *find(NodeInfo *Node) {
    if (Node->Group != Node)
      Node->Group = find(Node->Group);
    return Node->Group;
  }

  // Union the source and target node into the same group and return true.
  // Returns false if they are already in the same group.
  bool unionGroups(const EdgeType *Edge) {
    NodeInfo *G1 = find(&NodeInfoMap[Edge->Source]);
    NodeInfo *G2 = find(&NodeInfoMap[Edge->Target]);

    // If the edge forms a cycle, do not add it to MST
    if (G1 == G2)
      return false;

    // Make the smaller rank tree a direct child or the root of high rank tree.
    if (G1->Rank < G1->Rank)
      G1->Group = G2;
    else {
      G2->Group = G1;
      // If the ranks are the same, increment root of one tree by one.
      if (G1->Rank == G2->Rank)
        G2->Rank++;
    }
    return true;
  }

  std::unordered_map<NodeType *, NodeInfo> NodeInfoMap;
  NodesType Nodes;

public:
  scc_member_iterator(const NodesType &InputNodes);

  NodesType &operator*() { return Nodes; }
};

template <class GraphT, class GT>
scc_member_iterator<GraphT, GT>::scc_member_iterator(
    const NodesType &InputNodes) {
  if (InputNodes.size() <= 1) {
    Nodes = InputNodes;
    return;
  }

  // Initialize auxilary node information.
  NodeInfoMap.clear();
  for (auto *Node : InputNodes) {
    // This is specifically used to construct a `NodeInfo` object in place. An
    // insert operation will involve a copy construction which invalidate the
    // initial value of the `Group` field which should be `this`.
    (void)NodeInfoMap[Node].Group;
  }

  // Sort edges by weights.
  struct EdgeComparer {
    bool operator()(const EdgeType *L, const EdgeType *R) const {
      return L->Weight > R->Weight;
    }
  };

  std::multiset<const EdgeType *, EdgeComparer> SortedEdges;
  for (auto *Node : InputNodes) {
    for (auto &Edge : Node->Edges) {
      if (NodeInfoMap.count(Edge.Target))
        SortedEdges.insert(&Edge);
    }
  }

  // Traverse all the edges and compute the Maximum Weight Spanning Tree
  // using Kruskal's algorithm.
  std::unordered_set<const EdgeType *> MSTEdges;
  for (auto *Edge : SortedEdges) {
    if (unionGroups(Edge))
      MSTEdges.insert(Edge);
  }

  // Run Kahn's algorithm on MST to compute a topological traversal order.
  // The algorithm starts from nodes that have no incoming edge. These nodes are
  // "roots" of the MST forest. This ensures that nodes are visited before their
  // descendants are, thus ensures hot edges are processed before cold edges,
  // based on how MST is computed.
  std::queue<NodeType *> Queue;
  for (const auto *Edge : MSTEdges)
    NodeInfoMap[Edge->Target].IncomingMSTEdges.insert(Edge);

  // Walk through SortedEdges to initialize the queue, instead of using NodeInfoMap
  // to ensure an ordered deterministic push.
  for (auto *Edge : SortedEdges) {
    if (!NodeInfoMap[Edge->Source].Visited &&
        NodeInfoMap[Edge->Source].IncomingMSTEdges.empty()) {
      Queue.push(Edge->Source);
      NodeInfoMap[Edge->Source].Visited = true;
    }
  }

  while (!Queue.empty()) {
    auto *Node = Queue.front();
    Queue.pop();
    Nodes.push_back(Node);
    for (auto &Edge : Node->Edges) {
      NodeInfoMap[Edge.Target].IncomingMSTEdges.erase(&Edge);
      if (MSTEdges.count(&Edge) &&
          NodeInfoMap[Edge.Target].IncomingMSTEdges.empty()) {
        Queue.push(Edge.Target);
      }
    }
  }

  assert(InputNodes.size() == Nodes.size() && "missing nodes in MST");
  std::reverse(Nodes.begin(), Nodes.end());
}
} // end namespace llvm

#endif // LLVM_ADT_SCCITERATOR_H
PKiwFZ&AG.G.ADT/SparseSet.hnu�[���//===- llvm/ADT/SparseSet.h - Sparse set ------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file defines the SparseSet class derived from the version described in
/// Briggs, Torczon, "An efficient representation for sparse sets", ACM Letters
/// on Programming Languages and Systems, Volume 2 Issue 1-4, March-Dec.  1993.
///
/// A sparse set holds a small number of objects identified by integer keys from
/// a moderately sized universe. The sparse set uses more memory than other
/// containers in order to provide faster operations.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_SPARSESET_H
#define LLVM_ADT_SPARSESET_H

#include "llvm/ADT/identity.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/AllocatorBase.h"
#include <cassert>
#include <cstdint>
#include <cstdlib>
#include <limits>
#include <utility>

namespace llvm {

/// SparseSetValTraits - Objects in a SparseSet are identified by keys that can
/// be uniquely converted to a small integer less than the set's universe. This
/// class allows the set to hold values that differ from the set's key type as
/// long as an index can still be derived from the value. SparseSet never
/// directly compares ValueT, only their indices, so it can map keys to
/// arbitrary values. SparseSetValTraits computes the index from the value
/// object. To compute the index from a key, SparseSet uses a separate
/// KeyFunctorT template argument.
///
/// A simple type declaration, SparseSet<Type>, handles these cases:
/// - unsigned key, identity index, identity value
/// - unsigned key, identity index, fat value providing getSparseSetIndex()
///
/// The type declaration SparseSet<Type, UnaryFunction> handles:
/// - unsigned key, remapped index, identity value (virtual registers)
/// - pointer key, pointer-derived index, identity value (node+ID)
/// - pointer key, pointer-derived index, fat value with getSparseSetIndex()
///
/// Only other, unexpected cases require specializing SparseSetValTraits.
///
/// For best results, ValueT should not require a destructor.
///
template<typename ValueT>
struct SparseSetValTraits {
  static unsigned getValIndex(const ValueT &Val) {
    return Val.getSparseSetIndex();
  }
};

/// SparseSetValFunctor - Helper class for selecting SparseSetValTraits. The
/// generic implementation handles ValueT classes which either provide
/// getSparseSetIndex() or specialize SparseSetValTraits<>.
///
template<typename KeyT, typename ValueT, typename KeyFunctorT>
struct SparseSetValFunctor {
  unsigned operator()(const ValueT &Val) const {
    return SparseSetValTraits<ValueT>::getValIndex(Val);
  }
};

/// SparseSetValFunctor<KeyT, KeyT> - Helper class for the common case of
/// identity key/value sets.
template<typename KeyT, typename KeyFunctorT>
struct SparseSetValFunctor<KeyT, KeyT, KeyFunctorT> {
  unsigned operator()(const KeyT &Key) const {
    return KeyFunctorT()(Key);
  }
};

/// SparseSet - Fast set implementation for objects that can be identified by
/// small unsigned keys.
///
/// SparseSet allocates memory proportional to the size of the key universe, so
/// it is not recommended for building composite data structures.  It is useful
/// for algorithms that require a single set with fast operations.
///
/// Compared to DenseSet and DenseMap, SparseSet provides constant-time fast
/// clear() and iteration as fast as a vector.  The find(), insert(), and
/// erase() operations are all constant time, and typically faster than a hash
/// table.  The iteration order doesn't depend on numerical key values, it only
/// depends on the order of insert() and erase() operations.  When no elements
/// have been erased, the iteration order is the insertion order.
///
/// Compared to BitVector, SparseSet<unsigned> uses 8x-40x more memory, but
/// offers constant-time clear() and size() operations as well as fast
/// iteration independent on the size of the universe.
///
/// SparseSet contains a dense vector holding all the objects and a sparse
/// array holding indexes into the dense vector.  Most of the memory is used by
/// the sparse array which is the size of the key universe.  The SparseT
/// template parameter provides a space/speed tradeoff for sets holding many
/// elements.
///
/// When SparseT is uint32_t, find() only touches 2 cache lines, but the sparse
/// array uses 4 x Universe bytes.
///
/// When SparseT is uint8_t (the default), find() touches up to 2+[N/256] cache
/// lines, but the sparse array is 4x smaller.  N is the number of elements in
/// the set.
///
/// For sets that may grow to thousands of elements, SparseT should be set to
/// uint16_t or uint32_t.
///
/// @tparam ValueT      The type of objects in the set.
/// @tparam KeyFunctorT A functor that computes an unsigned index from KeyT.
/// @tparam SparseT     An unsigned integer type. See above.
///
template<typename ValueT,
         typename KeyFunctorT = identity<unsigned>,
         typename SparseT = uint8_t>
class SparseSet {
  static_assert(std::is_unsigned_v<SparseT>,
                "SparseT must be an unsigned integer type");

  using KeyT = typename KeyFunctorT::argument_type;
  using DenseT = SmallVector<ValueT, 8>;
  using size_type = unsigned;
  DenseT Dense;
  SparseT *Sparse = nullptr;
  unsigned Universe = 0;
  KeyFunctorT KeyIndexOf;
  SparseSetValFunctor<KeyT, ValueT, KeyFunctorT> ValIndexOf;

public:
  using value_type = ValueT;
  using reference = ValueT &;
  using const_reference = const ValueT &;
  using pointer = ValueT *;
  using const_pointer = const ValueT *;

  SparseSet() = default;
  SparseSet(const SparseSet &) = delete;
  SparseSet &operator=(const SparseSet &) = delete;
  ~SparseSet() { free(Sparse); }

  /// setUniverse - Set the universe size which determines the largest key the
  /// set can hold.  The universe must be sized before any elements can be
  /// added.
  ///
  /// @param U Universe size. All object keys must be less than U.
  ///
  void setUniverse(unsigned U) {
    // It's not hard to resize the universe on a non-empty set, but it doesn't
    // seem like a likely use case, so we can add that code when we need it.
    assert(empty() && "Can only resize universe on an empty map");
    // Hysteresis prevents needless reallocations.
    if (U >= Universe/4 && U <= Universe)
      return;
    free(Sparse);
    // The Sparse array doesn't actually need to be initialized, so malloc
    // would be enough here, but that will cause tools like valgrind to
    // complain about branching on uninitialized data.
    Sparse = static_cast<SparseT*>(safe_calloc(U, sizeof(SparseT)));
    Universe = U;
  }

  // Import trivial vector stuff from DenseT.
  using iterator = typename DenseT::iterator;
  using const_iterator = typename DenseT::const_iterator;

  const_iterator begin() const { return Dense.begin(); }
  const_iterator end() const { return Dense.end(); }
  iterator begin() { return Dense.begin(); }
  iterator end() { return Dense.end(); }

  /// empty - Returns true if the set is empty.
  ///
  /// This is not the same as BitVector::empty().
  ///
  bool empty() const { return Dense.empty(); }

  /// size - Returns the number of elements in the set.
  ///
  /// This is not the same as BitVector::size() which returns the size of the
  /// universe.
  ///
  size_type size() const { return Dense.size(); }

  /// clear - Clears the set.  This is a very fast constant time operation.
  ///
  void clear() {
    // Sparse does not need to be cleared, see find().
    Dense.clear();
  }

  /// findIndex - Find an element by its index.
  ///
  /// @param   Idx A valid index to find.
  /// @returns An iterator to the element identified by key, or end().
  ///
  iterator findIndex(unsigned Idx) {
    assert(Idx < Universe && "Key out of range");
    assert(Sparse != nullptr && "Invalid sparse type");
    const unsigned Stride = std::numeric_limits<SparseT>::max() + 1u;
    for (unsigned i = Sparse[Idx], e = size(); i < e; i += Stride) {
      const unsigned FoundIdx = ValIndexOf(Dense[i]);
      assert(FoundIdx < Universe && "Invalid key in set. Did object mutate?");
      if (Idx == FoundIdx)
        return begin() + i;
      // Stride is 0 when SparseT >= unsigned.  We don't need to loop.
      if (!Stride)
        break;
    }
    return end();
  }

  /// find - Find an element by its key.
  ///
  /// @param   Key A valid key to find.
  /// @returns An iterator to the element identified by key, or end().
  ///
  iterator find(const KeyT &Key) {
    return findIndex(KeyIndexOf(Key));
  }

  const_iterator find(const KeyT &Key) const {
    return const_cast<SparseSet*>(this)->findIndex(KeyIndexOf(Key));
  }

  /// Check if the set contains the given \c Key.
  ///
  /// @param Key A valid key to find.
  bool contains(const KeyT &Key) const { return find(Key) == end() ? 0 : 1; }

  /// count - Returns 1 if this set contains an element identified by Key,
  /// 0 otherwise.
  ///
  size_type count(const KeyT &Key) const { return contains(Key) ? 1 : 0; }

  /// insert - Attempts to insert a new element.
  ///
  /// If Val is successfully inserted, return (I, true), where I is an iterator
  /// pointing to the newly inserted element.
  ///
  /// If the set already contains an element with the same key as Val, return
  /// (I, false), where I is an iterator pointing to the existing element.
  ///
  /// Insertion invalidates all iterators.
  ///
  std::pair<iterator, bool> insert(const ValueT &Val) {
    unsigned Idx = ValIndexOf(Val);
    iterator I = findIndex(Idx);
    if (I != end())
      return std::make_pair(I, false);
    Sparse[Idx] = size();
    Dense.push_back(Val);
    return std::make_pair(end() - 1, true);
  }

  /// array subscript - If an element already exists with this key, return it.
  /// Otherwise, automatically construct a new value from Key, insert it,
  /// and return the newly inserted element.
  ValueT &operator[](const KeyT &Key) {
    return *insert(ValueT(Key)).first;
  }

  ValueT pop_back_val() {
    // Sparse does not need to be cleared, see find().
    return Dense.pop_back_val();
  }

  /// erase - Erases an existing element identified by a valid iterator.
  ///
  /// This invalidates all iterators, but erase() returns an iterator pointing
  /// to the next element.  This makes it possible to erase selected elements
  /// while iterating over the set:
  ///
  ///   for (SparseSet::iterator I = Set.begin(); I != Set.end();)
  ///     if (test(*I))
  ///       I = Set.erase(I);
  ///     else
  ///       ++I;
  ///
  /// Note that end() changes when elements are erased, unlike std::list.
  ///
  iterator erase(iterator I) {
    assert(unsigned(I - begin()) < size() && "Invalid iterator");
    if (I != end() - 1) {
      *I = Dense.back();
      unsigned BackIdx = ValIndexOf(Dense.back());
      assert(BackIdx < Universe && "Invalid key in set. Did object mutate?");
      Sparse[BackIdx] = I - begin();
    }
    // This depends on SmallVector::pop_back() not invalidating iterators.
    // std::vector::pop_back() doesn't give that guarantee.
    Dense.pop_back();
    return I;
  }

  /// erase - Erases an element identified by Key, if it exists.
  ///
  /// @param   Key The key identifying the element to erase.
  /// @returns True when an element was erased, false if no element was found.
  ///
  bool erase(const KeyT &Key) {
    iterator I = find(Key);
    if (I == end())
      return false;
    erase(I);
    return true;
  }
};

} // end namespace llvm

#endif // LLVM_ADT_SPARSESET_H
PKiwFZ\�ڲ���ADT/DenseMap.hnu�[���//===- llvm/ADT/DenseMap.h - Dense probed hash table ------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file defines the DenseMap class.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_DENSEMAP_H
#define LLVM_ADT_DENSEMAP_H

#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/ADT/EpochTracker.h"
#include "llvm/Support/AlignOf.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/MemAlloc.h"
#include "llvm/Support/ReverseIteration.h"
#include "llvm/Support/type_traits.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstring>
#include <initializer_list>
#include <iterator>
#include <new>
#include <type_traits>
#include <utility>

namespace llvm {

namespace detail {

// We extend a pair to allow users to override the bucket type with their own
// implementation without requiring two members.
template <typename KeyT, typename ValueT>
struct DenseMapPair : public std::pair<KeyT, ValueT> {
  using std::pair<KeyT, ValueT>::pair;

  KeyT &getFirst() { return std::pair<KeyT, ValueT>::first; }
  const KeyT &getFirst() const { return std::pair<KeyT, ValueT>::first; }
  ValueT &getSecond() { return std::pair<KeyT, ValueT>::second; }
  const ValueT &getSecond() const { return std::pair<KeyT, ValueT>::second; }
};

} // end namespace detail

template <typename KeyT, typename ValueT,
          typename KeyInfoT = DenseMapInfo<KeyT>,
          typename Bucket = llvm::detail::DenseMapPair<KeyT, ValueT>,
          bool IsConst = false>
class DenseMapIterator;

template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
          typename BucketT>
class DenseMapBase : public DebugEpochBase {
  template <typename T>
  using const_arg_type_t = typename const_pointer_or_const_ref<T>::type;

public:
  using size_type = unsigned;
  using key_type = KeyT;
  using mapped_type = ValueT;
  using value_type = BucketT;

  using iterator = DenseMapIterator<KeyT, ValueT, KeyInfoT, BucketT>;
  using const_iterator =
      DenseMapIterator<KeyT, ValueT, KeyInfoT, BucketT, true>;

  inline iterator begin() {
    // When the map is empty, avoid the overhead of advancing/retreating past
    // empty buckets.
    if (empty())
      return end();
    if (shouldReverseIterate<KeyT>())
      return makeIterator(getBucketsEnd() - 1, getBuckets(), *this);
    return makeIterator(getBuckets(), getBucketsEnd(), *this);
  }
  inline iterator end() {
    return makeIterator(getBucketsEnd(), getBucketsEnd(), *this, true);
  }
  inline const_iterator begin() const {
    if (empty())
      return end();
    if (shouldReverseIterate<KeyT>())
      return makeConstIterator(getBucketsEnd() - 1, getBuckets(), *this);
    return makeConstIterator(getBuckets(), getBucketsEnd(), *this);
  }
  inline const_iterator end() const {
    return makeConstIterator(getBucketsEnd(), getBucketsEnd(), *this, true);
  }

  [[nodiscard]] bool empty() const { return getNumEntries() == 0; }
  unsigned size() const { return getNumEntries(); }

  /// Grow the densemap so that it can contain at least \p NumEntries items
  /// before resizing again.
  void reserve(size_type NumEntries) {
    auto NumBuckets = getMinBucketToReserveForEntries(NumEntries);
    incrementEpoch();
    if (NumBuckets > getNumBuckets())
      grow(NumBuckets);
  }

  void clear() {
    incrementEpoch();
    if (getNumEntries() == 0 && getNumTombstones() == 0) return;

    // If the capacity of the array is huge, and the # elements used is small,
    // shrink the array.
    if (getNumEntries() * 4 < getNumBuckets() && getNumBuckets() > 64) {
      shrink_and_clear();
      return;
    }

    const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
    if (std::is_trivially_destructible<ValueT>::value) {
      // Use a simpler loop when values don't need destruction.
      for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P)
        P->getFirst() = EmptyKey;
    } else {
      unsigned NumEntries = getNumEntries();
      for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
        if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey)) {
          if (!KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
            P->getSecond().~ValueT();
            --NumEntries;
          }
          P->getFirst() = EmptyKey;
        }
      }
      assert(NumEntries == 0 && "Node count imbalance!");
      (void)NumEntries;
    }
    setNumEntries(0);
    setNumTombstones(0);
  }

  /// Return true if the specified key is in the map, false otherwise.
  bool contains(const_arg_type_t<KeyT> Val) const {
    const BucketT *TheBucket;
    return LookupBucketFor(Val, TheBucket);
  }

  /// Return 1 if the specified key is in the map, 0 otherwise.
  size_type count(const_arg_type_t<KeyT> Val) const {
    return contains(Val) ? 1 : 0;
  }

  iterator find(const_arg_type_t<KeyT> Val) {
    BucketT *TheBucket;
    if (LookupBucketFor(Val, TheBucket))
      return makeIterator(TheBucket,
                          shouldReverseIterate<KeyT>() ? getBuckets()
                                                       : getBucketsEnd(),
                          *this, true);
    return end();
  }
  const_iterator find(const_arg_type_t<KeyT> Val) const {
    const BucketT *TheBucket;
    if (LookupBucketFor(Val, TheBucket))
      return makeConstIterator(TheBucket,
                               shouldReverseIterate<KeyT>() ? getBuckets()
                                                            : getBucketsEnd(),
                               *this, true);
    return end();
  }

  /// Alternate version of find() which allows a different, and possibly
  /// less expensive, key type.
  /// The DenseMapInfo is responsible for supplying methods
  /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
  /// type used.
  template<class LookupKeyT>
  iterator find_as(const LookupKeyT &Val) {
    BucketT *TheBucket;
    if (LookupBucketFor(Val, TheBucket))
      return makeIterator(TheBucket,
                          shouldReverseIterate<KeyT>() ? getBuckets()
                                                       : getBucketsEnd(),
                          *this, true);
    return end();
  }
  template<class LookupKeyT>
  const_iterator find_as(const LookupKeyT &Val) const {
    const BucketT *TheBucket;
    if (LookupBucketFor(Val, TheBucket))
      return makeConstIterator(TheBucket,
                               shouldReverseIterate<KeyT>() ? getBuckets()
                                                            : getBucketsEnd(),
                               *this, true);
    return end();
  }

  /// lookup - Return the entry for the specified key, or a default
  /// constructed value if no such entry exists.
  ValueT lookup(const_arg_type_t<KeyT> Val) const {
    const BucketT *TheBucket;
    if (LookupBucketFor(Val, TheBucket))
      return TheBucket->getSecond();
    return ValueT();
  }

  /// at - Return the entry for the specified key, or abort if no such
  /// entry exists.
  const ValueT &at(const_arg_type_t<KeyT> Val) const {
    auto Iter = this->find(std::move(Val));
    assert(Iter != this->end() && "DenseMap::at failed due to a missing key");
    return Iter->second;
  }

  // Inserts key,value pair into the map if the key isn't already in the map.
  // If the key is already in the map, it returns false and doesn't update the
  // value.
  std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) {
    return try_emplace(KV.first, KV.second);
  }

  // Inserts key,value pair into the map if the key isn't already in the map.
  // If the key is already in the map, it returns false and doesn't update the
  // value.
  std::pair<iterator, bool> insert(std::pair<KeyT, ValueT> &&KV) {
    return try_emplace(std::move(KV.first), std::move(KV.second));
  }

  // Inserts key,value pair into the map if the key isn't already in the map.
  // The value is constructed in-place if the key is not in the map, otherwise
  // it is not moved.
  template <typename... Ts>
  std::pair<iterator, bool> try_emplace(KeyT &&Key, Ts &&... Args) {
    BucketT *TheBucket;
    if (LookupBucketFor(Key, TheBucket))
      return std::make_pair(makeIterator(TheBucket,
                                         shouldReverseIterate<KeyT>()
                                             ? getBuckets()
                                             : getBucketsEnd(),
                                         *this, true),
                            false); // Already in map.

    // Otherwise, insert the new element.
    TheBucket =
        InsertIntoBucket(TheBucket, std::move(Key), std::forward<Ts>(Args)...);
    return std::make_pair(makeIterator(TheBucket,
                                       shouldReverseIterate<KeyT>()
                                           ? getBuckets()
                                           : getBucketsEnd(),
                                       *this, true),
                          true);
  }

  // Inserts key,value pair into the map if the key isn't already in the map.
  // The value is constructed in-place if the key is not in the map, otherwise
  // it is not moved.
  template <typename... Ts>
  std::pair<iterator, bool> try_emplace(const KeyT &Key, Ts &&... Args) {
    BucketT *TheBucket;
    if (LookupBucketFor(Key, TheBucket))
      return std::make_pair(makeIterator(TheBucket,
                                         shouldReverseIterate<KeyT>()
                                             ? getBuckets()
                                             : getBucketsEnd(),
                                         *this, true),
                            false); // Already in map.

    // Otherwise, insert the new element.
    TheBucket = InsertIntoBucket(TheBucket, Key, std::forward<Ts>(Args)...);
    return std::make_pair(makeIterator(TheBucket,
                                       shouldReverseIterate<KeyT>()
                                           ? getBuckets()
                                           : getBucketsEnd(),
                                       *this, true),
                          true);
  }

  /// Alternate version of insert() which allows a different, and possibly
  /// less expensive, key type.
  /// The DenseMapInfo is responsible for supplying methods
  /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
  /// type used.
  template <typename LookupKeyT>
  std::pair<iterator, bool> insert_as(std::pair<KeyT, ValueT> &&KV,
                                      const LookupKeyT &Val) {
    BucketT *TheBucket;
    if (LookupBucketFor(Val, TheBucket))
      return std::make_pair(makeIterator(TheBucket,
                                         shouldReverseIterate<KeyT>()
                                             ? getBuckets()
                                             : getBucketsEnd(),
                                         *this, true),
                            false); // Already in map.

    // Otherwise, insert the new element.
    TheBucket = InsertIntoBucketWithLookup(TheBucket, std::move(KV.first),
                                           std::move(KV.second), Val);
    return std::make_pair(makeIterator(TheBucket,
                                       shouldReverseIterate<KeyT>()
                                           ? getBuckets()
                                           : getBucketsEnd(),
                                       *this, true),
                          true);
  }

  /// insert - Range insertion of pairs.
  template<typename InputIt>
  void insert(InputIt I, InputIt E) {
    for (; I != E; ++I)
      insert(*I);
  }

  /// Returns the value associated to the key in the map if it exists. If it
  /// does not exist, emplace a default value for the key and returns a
  /// reference to the newly created value.
  ValueT &getOrInsertDefault(KeyT &&Key) {
    return try_emplace(Key).first->second;
  }

  /// Returns the value associated to the key in the map if it exists. If it
  /// does not exist, emplace a default value for the key and returns a
  /// reference to the newly created value.
  ValueT &getOrInsertDefault(const KeyT &Key) {
    return try_emplace(Key).first->second;
  }

  bool erase(const KeyT &Val) {
    BucketT *TheBucket;
    if (!LookupBucketFor(Val, TheBucket))
      return false; // not in map.

    TheBucket->getSecond().~ValueT();
    TheBucket->getFirst() = getTombstoneKey();
    decrementNumEntries();
    incrementNumTombstones();
    return true;
  }
  void erase(iterator I) {
    BucketT *TheBucket = &*I;
    TheBucket->getSecond().~ValueT();
    TheBucket->getFirst() = getTombstoneKey();
    decrementNumEntries();
    incrementNumTombstones();
  }

  value_type& FindAndConstruct(const KeyT &Key) {
    BucketT *TheBucket;
    if (LookupBucketFor(Key, TheBucket))
      return *TheBucket;

    return *InsertIntoBucket(TheBucket, Key);
  }

  ValueT &operator[](const KeyT &Key) {
    return FindAndConstruct(Key).second;
  }

  value_type& FindAndConstruct(KeyT &&Key) {
    BucketT *TheBucket;
    if (LookupBucketFor(Key, TheBucket))
      return *TheBucket;

    return *InsertIntoBucket(TheBucket, std::move(Key));
  }

  ValueT &operator[](KeyT &&Key) {
    return FindAndConstruct(std::move(Key)).second;
  }

  /// isPointerIntoBucketsArray - Return true if the specified pointer points
  /// somewhere into the DenseMap's array of buckets (i.e. either to a key or
  /// value in the DenseMap).
  bool isPointerIntoBucketsArray(const void *Ptr) const {
    return Ptr >= getBuckets() && Ptr < getBucketsEnd();
  }

  /// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets
  /// array.  In conjunction with the previous method, this can be used to
  /// determine whether an insertion caused the DenseMap to reallocate.
  const void *getPointerIntoBucketsArray() const { return getBuckets(); }

protected:
  DenseMapBase() = default;

  void destroyAll() {
    if (getNumBuckets() == 0) // Nothing to do.
      return;

    const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
    for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
      if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
          !KeyInfoT::isEqual(P->getFirst(), TombstoneKey))
        P->getSecond().~ValueT();
      P->getFirst().~KeyT();
    }
  }

  void initEmpty() {
    setNumEntries(0);
    setNumTombstones(0);

    assert((getNumBuckets() & (getNumBuckets()-1)) == 0 &&
           "# initial buckets must be a power of two!");
    const KeyT EmptyKey = getEmptyKey();
    for (BucketT *B = getBuckets(), *E = getBucketsEnd(); B != E; ++B)
      ::new (&B->getFirst()) KeyT(EmptyKey);
  }

  /// Returns the number of buckets to allocate to ensure that the DenseMap can
  /// accommodate \p NumEntries without need to grow().
  unsigned getMinBucketToReserveForEntries(unsigned NumEntries) {
    // Ensure that "NumEntries * 4 < NumBuckets * 3"
    if (NumEntries == 0)
      return 0;
    // +1 is required because of the strict equality.
    // For example if NumEntries is 48, we need to return 401.
    return NextPowerOf2(NumEntries * 4 / 3 + 1);
  }

  void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) {
    initEmpty();

    // Insert all the old elements.
    const KeyT EmptyKey = getEmptyKey();
    const KeyT TombstoneKey = getTombstoneKey();
    for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) {
      if (!KeyInfoT::isEqual(B->getFirst(), EmptyKey) &&
          !KeyInfoT::isEqual(B->getFirst(), TombstoneKey)) {
        // Insert the key/value into the new table.
        BucketT *DestBucket;
        bool FoundVal = LookupBucketFor(B->getFirst(), DestBucket);
        (void)FoundVal; // silence warning.
        assert(!FoundVal && "Key already in new map?");
        DestBucket->getFirst() = std::move(B->getFirst());
        ::new (&DestBucket->getSecond()) ValueT(std::move(B->getSecond()));
        incrementNumEntries();

        // Free the value.
        B->getSecond().~ValueT();
      }
      B->getFirst().~KeyT();
    }
  }

  template <typename OtherBaseT>
  void copyFrom(
      const DenseMapBase<OtherBaseT, KeyT, ValueT, KeyInfoT, BucketT> &other) {
    assert(&other != this);
    assert(getNumBuckets() == other.getNumBuckets());

    setNumEntries(other.getNumEntries());
    setNumTombstones(other.getNumTombstones());

    if (std::is_trivially_copyable<KeyT>::value &&
        std::is_trivially_copyable<ValueT>::value)
      memcpy(reinterpret_cast<void *>(getBuckets()), other.getBuckets(),
             getNumBuckets() * sizeof(BucketT));
    else
      for (size_t i = 0; i < getNumBuckets(); ++i) {
        ::new (&getBuckets()[i].getFirst())
            KeyT(other.getBuckets()[i].getFirst());
        if (!KeyInfoT::isEqual(getBuckets()[i].getFirst(), getEmptyKey()) &&
            !KeyInfoT::isEqual(getBuckets()[i].getFirst(), getTombstoneKey()))
          ::new (&getBuckets()[i].getSecond())
              ValueT(other.getBuckets()[i].getSecond());
      }
  }

  static unsigned getHashValue(const KeyT &Val) {
    return KeyInfoT::getHashValue(Val);
  }

  template<typename LookupKeyT>
  static unsigned getHashValue(const LookupKeyT &Val) {
    return KeyInfoT::getHashValue(Val);
  }

  static const KeyT getEmptyKey() {
    static_assert(std::is_base_of<DenseMapBase, DerivedT>::value,
                  "Must pass the derived type to this template!");
    return KeyInfoT::getEmptyKey();
  }

  static const KeyT getTombstoneKey() {
    return KeyInfoT::getTombstoneKey();
  }

private:
  iterator makeIterator(BucketT *P, BucketT *E,
                        DebugEpochBase &Epoch,
                        bool NoAdvance=false) {
    if (shouldReverseIterate<KeyT>()) {
      BucketT *B = P == getBucketsEnd() ? getBuckets() : P + 1;
      return iterator(B, E, Epoch, NoAdvance);
    }
    return iterator(P, E, Epoch, NoAdvance);
  }

  const_iterator makeConstIterator(const BucketT *P, const BucketT *E,
                                   const DebugEpochBase &Epoch,
                                   const bool NoAdvance=false) const {
    if (shouldReverseIterate<KeyT>()) {
      const BucketT *B = P == getBucketsEnd() ? getBuckets() : P + 1;
      return const_iterator(B, E, Epoch, NoAdvance);
    }
    return const_iterator(P, E, Epoch, NoAdvance);
  }

  unsigned getNumEntries() const {
    return static_cast<const DerivedT *>(this)->getNumEntries();
  }

  void setNumEntries(unsigned Num) {
    static_cast<DerivedT *>(this)->setNumEntries(Num);
  }

  void incrementNumEntries() {
    setNumEntries(getNumEntries() + 1);
  }

  void decrementNumEntries() {
    setNumEntries(getNumEntries() - 1);
  }

  unsigned getNumTombstones() const {
    return static_cast<const DerivedT *>(this)->getNumTombstones();
  }

  void setNumTombstones(unsigned Num) {
    static_cast<DerivedT *>(this)->setNumTombstones(Num);
  }

  void incrementNumTombstones() {
    setNumTombstones(getNumTombstones() + 1);
  }

  void decrementNumTombstones() {
    setNumTombstones(getNumTombstones() - 1);
  }

  const BucketT *getBuckets() const {
    return static_cast<const DerivedT *>(this)->getBuckets();
  }

  BucketT *getBuckets() {
    return static_cast<DerivedT *>(this)->getBuckets();
  }

  unsigned getNumBuckets() const {
    return static_cast<const DerivedT *>(this)->getNumBuckets();
  }

  BucketT *getBucketsEnd() {
    return getBuckets() + getNumBuckets();
  }

  const BucketT *getBucketsEnd() const {
    return getBuckets() + getNumBuckets();
  }

  void grow(unsigned AtLeast) {
    static_cast<DerivedT *>(this)->grow(AtLeast);
  }

  void shrink_and_clear() {
    static_cast<DerivedT *>(this)->shrink_and_clear();
  }

  template <typename KeyArg, typename... ValueArgs>
  BucketT *InsertIntoBucket(BucketT *TheBucket, KeyArg &&Key,
                            ValueArgs &&... Values) {
    TheBucket = InsertIntoBucketImpl(Key, Key, TheBucket);

    TheBucket->getFirst() = std::forward<KeyArg>(Key);
    ::new (&TheBucket->getSecond()) ValueT(std::forward<ValueArgs>(Values)...);
    return TheBucket;
  }

  template <typename LookupKeyT>
  BucketT *InsertIntoBucketWithLookup(BucketT *TheBucket, KeyT &&Key,
                                      ValueT &&Value, LookupKeyT &Lookup) {
    TheBucket = InsertIntoBucketImpl(Key, Lookup, TheBucket);

    TheBucket->getFirst() = std::move(Key);
    ::new (&TheBucket->getSecond()) ValueT(std::move(Value));
    return TheBucket;
  }

  template <typename LookupKeyT>
  BucketT *InsertIntoBucketImpl(const KeyT &Key, const LookupKeyT &Lookup,
                                BucketT *TheBucket) {
    incrementEpoch();

    // If the load of the hash table is more than 3/4, or if fewer than 1/8 of
    // the buckets are empty (meaning that many are filled with tombstones),
    // grow the table.
    //
    // The later case is tricky.  For example, if we had one empty bucket with
    // tons of tombstones, failing lookups (e.g. for insertion) would have to
    // probe almost the entire table until it found the empty bucket.  If the
    // table completely filled with tombstones, no lookup would ever succeed,
    // causing infinite loops in lookup.
    unsigned NewNumEntries = getNumEntries() + 1;
    unsigned NumBuckets = getNumBuckets();
    if (LLVM_UNLIKELY(NewNumEntries * 4 >= NumBuckets * 3)) {
      this->grow(NumBuckets * 2);
      LookupBucketFor(Lookup, TheBucket);
      NumBuckets = getNumBuckets();
    } else if (LLVM_UNLIKELY(NumBuckets-(NewNumEntries+getNumTombstones()) <=
                             NumBuckets/8)) {
      this->grow(NumBuckets);
      LookupBucketFor(Lookup, TheBucket);
    }
    assert(TheBucket);

    // Only update the state after we've grown our bucket space appropriately
    // so that when growing buckets we have self-consistent entry count.
    incrementNumEntries();

    // If we are writing over a tombstone, remember this.
    const KeyT EmptyKey = getEmptyKey();
    if (!KeyInfoT::isEqual(TheBucket->getFirst(), EmptyKey))
      decrementNumTombstones();

    return TheBucket;
  }

  /// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in
  /// FoundBucket.  If the bucket contains the key and a value, this returns
  /// true, otherwise it returns a bucket with an empty marker or tombstone and
  /// returns false.
  template<typename LookupKeyT>
  bool LookupBucketFor(const LookupKeyT &Val,
                       const BucketT *&FoundBucket) const {
    const BucketT *BucketsPtr = getBuckets();
    const unsigned NumBuckets = getNumBuckets();

    if (NumBuckets == 0) {
      FoundBucket = nullptr;
      return false;
    }

    // FoundTombstone - Keep track of whether we find a tombstone while probing.
    const BucketT *FoundTombstone = nullptr;
    const KeyT EmptyKey = getEmptyKey();
    const KeyT TombstoneKey = getTombstoneKey();
    assert(!KeyInfoT::isEqual(Val, EmptyKey) &&
           !KeyInfoT::isEqual(Val, TombstoneKey) &&
           "Empty/Tombstone value shouldn't be inserted into map!");

    unsigned BucketNo = getHashValue(Val) & (NumBuckets-1);
    unsigned ProbeAmt = 1;
    while (true) {
      const BucketT *ThisBucket = BucketsPtr + BucketNo;
      // Found Val's bucket?  If so, return it.
      if (LLVM_LIKELY(KeyInfoT::isEqual(Val, ThisBucket->getFirst()))) {
        FoundBucket = ThisBucket;
        return true;
      }

      // If we found an empty bucket, the key doesn't exist in the set.
      // Insert it and return the default value.
      if (LLVM_LIKELY(KeyInfoT::isEqual(ThisBucket->getFirst(), EmptyKey))) {
        // If we've already seen a tombstone while probing, fill it in instead
        // of the empty bucket we eventually probed to.
        FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket;
        return false;
      }

      // If this is a tombstone, remember it.  If Val ends up not in the map, we
      // prefer to return it than something that would require more probing.
      if (KeyInfoT::isEqual(ThisBucket->getFirst(), TombstoneKey) &&
          !FoundTombstone)
        FoundTombstone = ThisBucket;  // Remember the first tombstone found.

      // Otherwise, it's a hash collision or a tombstone, continue quadratic
      // probing.
      BucketNo += ProbeAmt++;
      BucketNo &= (NumBuckets-1);
    }
  }

  template <typename LookupKeyT>
  bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) {
    const BucketT *ConstFoundBucket;
    bool Result = const_cast<const DenseMapBase *>(this)
      ->LookupBucketFor(Val, ConstFoundBucket);
    FoundBucket = const_cast<BucketT *>(ConstFoundBucket);
    return Result;
  }

public:
  /// Return the approximate size (in bytes) of the actual map.
  /// This is just the raw memory used by DenseMap.
  /// If entries are pointers to objects, the size of the referenced objects
  /// are not included.
  size_t getMemorySize() const {
    return getNumBuckets() * sizeof(BucketT);
  }
};

/// Equality comparison for DenseMap.
///
/// Iterates over elements of LHS confirming that each (key, value) pair in LHS
/// is also in RHS, and that no additional pairs are in RHS.
/// Equivalent to N calls to RHS.find and N value comparisons. Amortized
/// complexity is linear, worst case is O(N^2) (if every hash collides).
template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
          typename BucketT>
bool operator==(
    const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &LHS,
    const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &RHS) {
  if (LHS.size() != RHS.size())
    return false;

  for (auto &KV : LHS) {
    auto I = RHS.find(KV.first);
    if (I == RHS.end() || I->second != KV.second)
      return false;
  }

  return true;
}

/// Inequality comparison for DenseMap.
///
/// Equivalent to !(LHS == RHS). See operator== for performance notes.
template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
          typename BucketT>
bool operator!=(
    const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &LHS,
    const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &RHS) {
  return !(LHS == RHS);
}

template <typename KeyT, typename ValueT,
          typename KeyInfoT = DenseMapInfo<KeyT>,
          typename BucketT = llvm::detail::DenseMapPair<KeyT, ValueT>>
class DenseMap : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT, BucketT>,
                                     KeyT, ValueT, KeyInfoT, BucketT> {
  friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;

  // Lift some types from the dependent base class into this class for
  // simplicity of referring to them.
  using BaseT = DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;

  BucketT *Buckets;
  unsigned NumEntries;
  unsigned NumTombstones;
  unsigned NumBuckets;

public:
  /// Create a DenseMap with an optional \p InitialReserve that guarantee that
  /// this number of elements can be inserted in the map without grow()
  explicit DenseMap(unsigned InitialReserve = 0) { init(InitialReserve); }

  DenseMap(const DenseMap &other) : BaseT() {
    init(0);
    copyFrom(other);
  }

  DenseMap(DenseMap &&other) : BaseT() {
    init(0);
    swap(other);
  }

  template<typename InputIt>
  DenseMap(const InputIt &I, const InputIt &E) {
    init(std::distance(I, E));
    this->insert(I, E);
  }

  DenseMap(std::initializer_list<typename BaseT::value_type> Vals) {
    init(Vals.size());
    this->insert(Vals.begin(), Vals.end());
  }

  ~DenseMap() {
    this->destroyAll();
    deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
  }

  void swap(DenseMap& RHS) {
    this->incrementEpoch();
    RHS.incrementEpoch();
    std::swap(Buckets, RHS.Buckets);
    std::swap(NumEntries, RHS.NumEntries);
    std::swap(NumTombstones, RHS.NumTombstones);
    std::swap(NumBuckets, RHS.NumBuckets);
  }

  DenseMap& operator=(const DenseMap& other) {
    if (&other != this)
      copyFrom(other);
    return *this;
  }

  DenseMap& operator=(DenseMap &&other) {
    this->destroyAll();
    deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
    init(0);
    swap(other);
    return *this;
  }

  void copyFrom(const DenseMap& other) {
    this->destroyAll();
    deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
    if (allocateBuckets(other.NumBuckets)) {
      this->BaseT::copyFrom(other);
    } else {
      NumEntries = 0;
      NumTombstones = 0;
    }
  }

  void init(unsigned InitNumEntries) {
    auto InitBuckets = BaseT::getMinBucketToReserveForEntries(InitNumEntries);
    if (allocateBuckets(InitBuckets)) {
      this->BaseT::initEmpty();
    } else {
      NumEntries = 0;
      NumTombstones = 0;
    }
  }

  void grow(unsigned AtLeast) {
    unsigned OldNumBuckets = NumBuckets;
    BucketT *OldBuckets = Buckets;

    allocateBuckets(std::max<unsigned>(64, static_cast<unsigned>(NextPowerOf2(AtLeast-1))));
    assert(Buckets);
    if (!OldBuckets) {
      this->BaseT::initEmpty();
      return;
    }

    this->moveFromOldBuckets(OldBuckets, OldBuckets+OldNumBuckets);

    // Free the old table.
    deallocate_buffer(OldBuckets, sizeof(BucketT) * OldNumBuckets,
                      alignof(BucketT));
  }

  void shrink_and_clear() {
    unsigned OldNumBuckets = NumBuckets;
    unsigned OldNumEntries = NumEntries;
    this->destroyAll();

    // Reduce the number of buckets.
    unsigned NewNumBuckets = 0;
    if (OldNumEntries)
      NewNumBuckets = std::max(64, 1 << (Log2_32_Ceil(OldNumEntries) + 1));
    if (NewNumBuckets == NumBuckets) {
      this->BaseT::initEmpty();
      return;
    }

    deallocate_buffer(Buckets, sizeof(BucketT) * OldNumBuckets,
                      alignof(BucketT));
    init(NewNumBuckets);
  }

private:
  unsigned getNumEntries() const {
    return NumEntries;
  }

  void setNumEntries(unsigned Num) {
    NumEntries = Num;
  }

  unsigned getNumTombstones() const {
    return NumTombstones;
  }

  void setNumTombstones(unsigned Num) {
    NumTombstones = Num;
  }

  BucketT *getBuckets() const {
    return Buckets;
  }

  unsigned getNumBuckets() const {
    return NumBuckets;
  }

  bool allocateBuckets(unsigned Num) {
    NumBuckets = Num;
    if (NumBuckets == 0) {
      Buckets = nullptr;
      return false;
    }

    Buckets = static_cast<BucketT *>(
        allocate_buffer(sizeof(BucketT) * NumBuckets, alignof(BucketT)));
    return true;
  }
};

template <typename KeyT, typename ValueT, unsigned InlineBuckets = 4,
          typename KeyInfoT = DenseMapInfo<KeyT>,
          typename BucketT = llvm::detail::DenseMapPair<KeyT, ValueT>>
class SmallDenseMap
    : public DenseMapBase<
          SmallDenseMap<KeyT, ValueT, InlineBuckets, KeyInfoT, BucketT>, KeyT,
          ValueT, KeyInfoT, BucketT> {
  friend class DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT>;

  // Lift some types from the dependent base class into this class for
  // simplicity of referring to them.
  using BaseT = DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT>;

  static_assert(isPowerOf2_64(InlineBuckets),
                "InlineBuckets must be a power of 2.");

  unsigned Small : 1;
  unsigned NumEntries : 31;
  unsigned NumTombstones;

  struct LargeRep {
    BucketT *Buckets;
    unsigned NumBuckets;
  };

  /// A "union" of an inline bucket array and the struct representing
  /// a large bucket. This union will be discriminated by the 'Small' bit.
  AlignedCharArrayUnion<BucketT[InlineBuckets], LargeRep> storage;

public:
  explicit SmallDenseMap(unsigned NumInitBuckets = 0) {
    if (NumInitBuckets > InlineBuckets)
      NumInitBuckets = llvm::bit_ceil(NumInitBuckets);
    init(NumInitBuckets);
  }

  SmallDenseMap(const SmallDenseMap &other) : BaseT() {
    init(0);
    copyFrom(other);
  }

  SmallDenseMap(SmallDenseMap &&other) : BaseT() {
    init(0);
    swap(other);
  }

  template<typename InputIt>
  SmallDenseMap(const InputIt &I, const InputIt &E) {
    init(NextPowerOf2(std::distance(I, E)));
    this->insert(I, E);
  }

  SmallDenseMap(std::initializer_list<typename BaseT::value_type> Vals)
      : SmallDenseMap(Vals.begin(), Vals.end()) {}

  ~SmallDenseMap() {
    this->destroyAll();
    deallocateBuckets();
  }

  void swap(SmallDenseMap& RHS) {
    unsigned TmpNumEntries = RHS.NumEntries;
    RHS.NumEntries = NumEntries;
    NumEntries = TmpNumEntries;
    std::swap(NumTombstones, RHS.NumTombstones);

    const KeyT EmptyKey = this->getEmptyKey();
    const KeyT TombstoneKey = this->getTombstoneKey();
    if (Small && RHS.Small) {
      // If we're swapping inline bucket arrays, we have to cope with some of
      // the tricky bits of DenseMap's storage system: the buckets are not
      // fully initialized. Thus we swap every key, but we may have
      // a one-directional move of the value.
      for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
        BucketT *LHSB = &getInlineBuckets()[i],
                *RHSB = &RHS.getInlineBuckets()[i];
        bool hasLHSValue = (!KeyInfoT::isEqual(LHSB->getFirst(), EmptyKey) &&
                            !KeyInfoT::isEqual(LHSB->getFirst(), TombstoneKey));
        bool hasRHSValue = (!KeyInfoT::isEqual(RHSB->getFirst(), EmptyKey) &&
                            !KeyInfoT::isEqual(RHSB->getFirst(), TombstoneKey));
        if (hasLHSValue && hasRHSValue) {
          // Swap together if we can...
          std::swap(*LHSB, *RHSB);
          continue;
        }
        // Swap separately and handle any asymmetry.
        std::swap(LHSB->getFirst(), RHSB->getFirst());
        if (hasLHSValue) {
          ::new (&RHSB->getSecond()) ValueT(std::move(LHSB->getSecond()));
          LHSB->getSecond().~ValueT();
        } else if (hasRHSValue) {
          ::new (&LHSB->getSecond()) ValueT(std::move(RHSB->getSecond()));
          RHSB->getSecond().~ValueT();
        }
      }
      return;
    }
    if (!Small && !RHS.Small) {
      std::swap(getLargeRep()->Buckets, RHS.getLargeRep()->Buckets);
      std::swap(getLargeRep()->NumBuckets, RHS.getLargeRep()->NumBuckets);
      return;
    }

    SmallDenseMap &SmallSide = Small ? *this : RHS;
    SmallDenseMap &LargeSide = Small ? RHS : *this;

    // First stash the large side's rep and move the small side across.
    LargeRep TmpRep = std::move(*LargeSide.getLargeRep());
    LargeSide.getLargeRep()->~LargeRep();
    LargeSide.Small = true;
    // This is similar to the standard move-from-old-buckets, but the bucket
    // count hasn't actually rotated in this case. So we have to carefully
    // move construct the keys and values into their new locations, but there
    // is no need to re-hash things.
    for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
      BucketT *NewB = &LargeSide.getInlineBuckets()[i],
              *OldB = &SmallSide.getInlineBuckets()[i];
      ::new (&NewB->getFirst()) KeyT(std::move(OldB->getFirst()));
      OldB->getFirst().~KeyT();
      if (!KeyInfoT::isEqual(NewB->getFirst(), EmptyKey) &&
          !KeyInfoT::isEqual(NewB->getFirst(), TombstoneKey)) {
        ::new (&NewB->getSecond()) ValueT(std::move(OldB->getSecond()));
        OldB->getSecond().~ValueT();
      }
    }

    // The hard part of moving the small buckets across is done, just move
    // the TmpRep into its new home.
    SmallSide.Small = false;
    new (SmallSide.getLargeRep()) LargeRep(std::move(TmpRep));
  }

  SmallDenseMap& operator=(const SmallDenseMap& other) {
    if (&other != this)
      copyFrom(other);
    return *this;
  }

  SmallDenseMap& operator=(SmallDenseMap &&other) {
    this->destroyAll();
    deallocateBuckets();
    init(0);
    swap(other);
    return *this;
  }

  void copyFrom(const SmallDenseMap& other) {
    this->destroyAll();
    deallocateBuckets();
    Small = true;
    if (other.getNumBuckets() > InlineBuckets) {
      Small = false;
      new (getLargeRep()) LargeRep(allocateBuckets(other.getNumBuckets()));
    }
    this->BaseT::copyFrom(other);
  }

  void init(unsigned InitBuckets) {
    Small = true;
    if (InitBuckets > InlineBuckets) {
      Small = false;
      new (getLargeRep()) LargeRep(allocateBuckets(InitBuckets));
    }
    this->BaseT::initEmpty();
  }

  void grow(unsigned AtLeast) {
    if (AtLeast > InlineBuckets)
      AtLeast = std::max<unsigned>(64, NextPowerOf2(AtLeast-1));

    if (Small) {
      // First move the inline buckets into a temporary storage.
      AlignedCharArrayUnion<BucketT[InlineBuckets]> TmpStorage;
      BucketT *TmpBegin = reinterpret_cast<BucketT *>(&TmpStorage);
      BucketT *TmpEnd = TmpBegin;

      // Loop over the buckets, moving non-empty, non-tombstones into the
      // temporary storage. Have the loop move the TmpEnd forward as it goes.
      const KeyT EmptyKey = this->getEmptyKey();
      const KeyT TombstoneKey = this->getTombstoneKey();
      for (BucketT *P = getBuckets(), *E = P + InlineBuckets; P != E; ++P) {
        if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
            !KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
          assert(size_t(TmpEnd - TmpBegin) < InlineBuckets &&
                 "Too many inline buckets!");
          ::new (&TmpEnd->getFirst()) KeyT(std::move(P->getFirst()));
          ::new (&TmpEnd->getSecond()) ValueT(std::move(P->getSecond()));
          ++TmpEnd;
          P->getSecond().~ValueT();
        }
        P->getFirst().~KeyT();
      }

      // AtLeast == InlineBuckets can happen if there are many tombstones,
      // and grow() is used to remove them. Usually we always switch to the
      // large rep here.
      if (AtLeast > InlineBuckets) {
        Small = false;
        new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
      }
      this->moveFromOldBuckets(TmpBegin, TmpEnd);
      return;
    }

    LargeRep OldRep = std::move(*getLargeRep());
    getLargeRep()->~LargeRep();
    if (AtLeast <= InlineBuckets) {
      Small = true;
    } else {
      new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
    }

    this->moveFromOldBuckets(OldRep.Buckets, OldRep.Buckets+OldRep.NumBuckets);

    // Free the old table.
    deallocate_buffer(OldRep.Buckets, sizeof(BucketT) * OldRep.NumBuckets,
                      alignof(BucketT));
  }

  void shrink_and_clear() {
    unsigned OldSize = this->size();
    this->destroyAll();

    // Reduce the number of buckets.
    unsigned NewNumBuckets = 0;
    if (OldSize) {
      NewNumBuckets = 1 << (Log2_32_Ceil(OldSize) + 1);
      if (NewNumBuckets > InlineBuckets && NewNumBuckets < 64u)
        NewNumBuckets = 64;
    }
    if ((Small && NewNumBuckets <= InlineBuckets) ||
        (!Small && NewNumBuckets == getLargeRep()->NumBuckets)) {
      this->BaseT::initEmpty();
      return;
    }

    deallocateBuckets();
    init(NewNumBuckets);
  }

private:
  unsigned getNumEntries() const {
    return NumEntries;
  }

  void setNumEntries(unsigned Num) {
    // NumEntries is hardcoded to be 31 bits wide.
    assert(Num < (1U << 31) && "Cannot support more than 1<<31 entries");
    NumEntries = Num;
  }

  unsigned getNumTombstones() const {
    return NumTombstones;
  }

  void setNumTombstones(unsigned Num) {
    NumTombstones = Num;
  }

  const BucketT *getInlineBuckets() const {
    assert(Small);
    // Note that this cast does not violate aliasing rules as we assert that
    // the memory's dynamic type is the small, inline bucket buffer, and the
    // 'storage' is a POD containing a char buffer.
    return reinterpret_cast<const BucketT *>(&storage);
  }

  BucketT *getInlineBuckets() {
    return const_cast<BucketT *>(
      const_cast<const SmallDenseMap *>(this)->getInlineBuckets());
  }

  const LargeRep *getLargeRep() const {
    assert(!Small);
    // Note, same rule about aliasing as with getInlineBuckets.
    return reinterpret_cast<const LargeRep *>(&storage);
  }

  LargeRep *getLargeRep() {
    return const_cast<LargeRep *>(
      const_cast<const SmallDenseMap *>(this)->getLargeRep());
  }

  const BucketT *getBuckets() const {
    return Small ? getInlineBuckets() : getLargeRep()->Buckets;
  }

  BucketT *getBuckets() {
    return const_cast<BucketT *>(
      const_cast<const SmallDenseMap *>(this)->getBuckets());
  }

  unsigned getNumBuckets() const {
    return Small ? InlineBuckets : getLargeRep()->NumBuckets;
  }

  void deallocateBuckets() {
    if (Small)
      return;

    deallocate_buffer(getLargeRep()->Buckets,
                      sizeof(BucketT) * getLargeRep()->NumBuckets,
                      alignof(BucketT));
    getLargeRep()->~LargeRep();
  }

  LargeRep allocateBuckets(unsigned Num) {
    assert(Num > InlineBuckets && "Must allocate more buckets than are inline");
    LargeRep Rep = {static_cast<BucketT *>(allocate_buffer(
                        sizeof(BucketT) * Num, alignof(BucketT))),
                    Num};
    return Rep;
  }
};

template <typename KeyT, typename ValueT, typename KeyInfoT, typename Bucket,
          bool IsConst>
class DenseMapIterator : DebugEpochBase::HandleBase {
  friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, true>;
  friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, false>;

public:
  using difference_type = ptrdiff_t;
  using value_type = std::conditional_t<IsConst, const Bucket, Bucket>;
  using pointer = value_type *;
  using reference = value_type &;
  using iterator_category = std::forward_iterator_tag;

private:
  pointer Ptr = nullptr;
  pointer End = nullptr;

public:
  DenseMapIterator() = default;

  DenseMapIterator(pointer Pos, pointer E, const DebugEpochBase &Epoch,
                   bool NoAdvance = false)
      : DebugEpochBase::HandleBase(&Epoch), Ptr(Pos), End(E) {
    assert(isHandleInSync() && "invalid construction!");

    if (NoAdvance) return;
    if (shouldReverseIterate<KeyT>()) {
      RetreatPastEmptyBuckets();
      return;
    }
    AdvancePastEmptyBuckets();
  }

  // Converting ctor from non-const iterators to const iterators. SFINAE'd out
  // for const iterator destinations so it doesn't end up as a user defined copy
  // constructor.
  template <bool IsConstSrc,
            typename = std::enable_if_t<!IsConstSrc && IsConst>>
  DenseMapIterator(
      const DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, IsConstSrc> &I)
      : DebugEpochBase::HandleBase(I), Ptr(I.Ptr), End(I.End) {}

  reference operator*() const {
    assert(isHandleInSync() && "invalid iterator access!");
    assert(Ptr != End && "dereferencing end() iterator");
    if (shouldReverseIterate<KeyT>())
      return Ptr[-1];
    return *Ptr;
  }
  pointer operator->() const {
    assert(isHandleInSync() && "invalid iterator access!");
    assert(Ptr != End && "dereferencing end() iterator");
    if (shouldReverseIterate<KeyT>())
      return &(Ptr[-1]);
    return Ptr;
  }

  friend bool operator==(const DenseMapIterator &LHS,
                         const DenseMapIterator &RHS) {
    assert((!LHS.Ptr || LHS.isHandleInSync()) && "handle not in sync!");
    assert((!RHS.Ptr || RHS.isHandleInSync()) && "handle not in sync!");
    assert(LHS.getEpochAddress() == RHS.getEpochAddress() &&
           "comparing incomparable iterators!");
    return LHS.Ptr == RHS.Ptr;
  }

  friend bool operator!=(const DenseMapIterator &LHS,
                         const DenseMapIterator &RHS) {
    return !(LHS == RHS);
  }

  inline DenseMapIterator& operator++() {  // Preincrement
    assert(isHandleInSync() && "invalid iterator access!");
    assert(Ptr != End && "incrementing end() iterator");
    if (shouldReverseIterate<KeyT>()) {
      --Ptr;
      RetreatPastEmptyBuckets();
      return *this;
    }
    ++Ptr;
    AdvancePastEmptyBuckets();
    return *this;
  }
  DenseMapIterator operator++(int) {  // Postincrement
    assert(isHandleInSync() && "invalid iterator access!");
    DenseMapIterator tmp = *this; ++*this; return tmp;
  }

private:
  void AdvancePastEmptyBuckets() {
    assert(Ptr <= End);
    const KeyT Empty = KeyInfoT::getEmptyKey();
    const KeyT Tombstone = KeyInfoT::getTombstoneKey();

    while (Ptr != End && (KeyInfoT::isEqual(Ptr->getFirst(), Empty) ||
                          KeyInfoT::isEqual(Ptr->getFirst(), Tombstone)))
      ++Ptr;
  }

  void RetreatPastEmptyBuckets() {
    assert(Ptr >= End);
    const KeyT Empty = KeyInfoT::getEmptyKey();
    const KeyT Tombstone = KeyInfoT::getTombstoneKey();

    while (Ptr != End && (KeyInfoT::isEqual(Ptr[-1].getFirst(), Empty) ||
                          KeyInfoT::isEqual(Ptr[-1].getFirst(), Tombstone)))
      --Ptr;
  }
};

template <typename KeyT, typename ValueT, typename KeyInfoT>
inline size_t capacity_in_bytes(const DenseMap<KeyT, ValueT, KeyInfoT> &X) {
  return X.getMemorySize();
}

} // end namespace llvm

#endif // LLVM_ADT_DENSEMAP_H
PKiwFZR�ǽ44ADT/GenericSSAContext.hnu�[���//===- GenericSSAContext.h --------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This file defines the little GenericSSAContext<X> template class
/// that can be used to implement IR analyses as templates.
/// Specializing these templates allows the analyses to be used over
/// both LLVM IR and Machine IR.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_GENERICSSACONTEXT_H
#define LLVM_ADT_GENERICSSACONTEXT_H

#include "llvm/Support/Printable.h"

namespace llvm {

template <typename _FunctionT> class GenericSSAContext {
public:
  // Specializations should provide the following types that are similar to how
  // LLVM IR is structured:

  // The smallest unit of the IR is a ValueT. The SSA context uses a ValueRefT,
  // which is a pointer to a ValueT, since Machine IR does not have the
  // equivalent of a ValueT.
  //
  // using ValueRefT = ...
  //
  // The ConstValueRefT is needed to work with "const Value *", where const
  // needs to bind to the pointee and not the pointer.
  //
  // using ConstValueRefT = ...
  //
  // The null value for ValueRefT.
  //
  // static constexpr ValueRefT ValueRefNull;

  // An InstructionT usually defines one or more ValueT objects.
  //
  // using InstructionT = ... must be a subclass of Value

  // A UseT represents a data-edge from the defining instruction to the using
  // instruction.
  //
  // using UseT = ...

  // A BlockT is a sequence of InstructionT, and forms a node of the CFG. It
  // has global methods predecessors() and successors() that return
  // the list of incoming CFG edges and outgoing CFG edges
  // respectively.
  //
  // using BlockT = ...

  // A FunctionT represents a CFG along with arguments and return values. It is
  // the smallest complete unit of code in a Module.
  //
  // The compiler produces an error here if this class is implicitly
  // specialized due to an instantiation. An explicit specialization
  // of this template needs to be added before the instantiation point
  // indicated by the compiler.
  using FunctionT = typename _FunctionT::invalidTemplateInstanceError;

  // A dominator tree provides the dominance relation between basic blocks in
  // a given funciton.
  //
  // using DominatorTreeT = ...

  // Initialize the SSA context with information about the FunctionT being
  // processed.
  //
  // void setFunction(FunctionT &function);
  // FunctionT* getFunction() const;

  // Every FunctionT has a unique BlockT marked as its entry.
  //
  // static BlockT* getEntryBlock(FunctionT &F);

  // Methods to examine basic blocks and values
  //
  // static void appendBlockDefs(SmallVectorImpl<ValueRefT> &defs,
  //                             BlockT &block);
  // static void appendBlockDefs(SmallVectorImpl<const ValueRefT> &defs,
  //                             const BlockT &block);

  // static void appendBlockTerms(SmallVectorImpl<InstructionT *> &terms,
  //                              BlockT &block);
  // static void appendBlockTerms(SmallVectorImpl<const InstructionT *> &terms,
  //                              const BlockT &block);
  //
  // static bool comesBefore(const InstructionT *lhs, const InstructionT *rhs);
  // static bool isConstantOrUndefValuePhi(const InstructionT &Instr);
  // const BlockT *getDefBlock(const ValueRefT value) const;

  // Methods to print various objects.
  //
  // Printable print(BlockT *block) const;
  // Printable print(InstructionT *inst) const;
  // Printable print(ValueRefT value) const;
};
} // namespace llvm

#endif // LLVM_ADT_GENERICSSACONTEXT_H
PKiwFZ��ADT/EnumeratedArray.hnu�[���//===- llvm/ADT/EnumeratedArray.h - Enumerated Array-------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file defines an array type that can be indexed using scoped enum
/// values.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_ENUMERATEDARRAY_H
#define LLVM_ADT_ENUMERATEDARRAY_H

#include <cassert>
#include <iterator>

namespace llvm {

template <typename ValueType, typename Enumeration,
          Enumeration LargestEnum = Enumeration::Last, typename IndexType = int,
          IndexType Size = 1 + static_cast<IndexType>(LargestEnum)>
class EnumeratedArray {
public:
  using iterator = ValueType *;
  using const_iterator = const ValueType *;

  using const_reverse_iterator = std::reverse_iterator<const_iterator>;
  using reverse_iterator = std::reverse_iterator<iterator>;

  using value_type = ValueType;
  using reference = ValueType &;
  using const_reference = const ValueType &;
  using pointer = ValueType *;
  using const_pointer = const ValueType *;

  EnumeratedArray() = default;
  EnumeratedArray(ValueType V) {
    for (IndexType IX = 0; IX < Size; ++IX) {
      Underlying[IX] = V;
    }
  }
  EnumeratedArray(std::initializer_list<ValueType> Init) {
    assert(Init.size() == Size && "Incorrect initializer size");
    for (IndexType IX = 0; IX < Size; ++IX) {
      Underlying[IX] = *(Init.begin() + IX);
    }
  }

  const ValueType &operator[](Enumeration Index) const {
    auto IX = static_cast<IndexType>(Index);
    assert(IX >= 0 && IX < Size && "Index is out of bounds.");
    return Underlying[IX];
  }
  ValueType &operator[](Enumeration Index) {
    return const_cast<ValueType &>(
        static_cast<const EnumeratedArray<ValueType, Enumeration, LargestEnum,
                                          IndexType, Size> &>(*this)[Index]);
  }
  IndexType size() const { return Size; }
  bool empty() const { return size() == 0; }

  iterator begin() { return Underlying; }
  const_iterator begin() const { return Underlying; }

  iterator end() { return begin() + size(); }
  const_iterator end() const { return begin() + size(); }

  reverse_iterator rbegin() { return reverse_iterator(end()); }
  const_reverse_iterator rbegin() const {
    return const_reverse_iterator(end());
  }
  reverse_iterator rend() { return reverse_iterator(begin()); }
  const_reverse_iterator rend() const {
    return const_reverse_iterator(begin());
  }

private:
  ValueType Underlying[Size];
};

} // namespace llvm

#endif // LLVM_ADT_ENUMERATEDARRAY_H
PKiwFZ��̣bbADT/ArrayRef.hnu�[���//===- ArrayRef.h - Array Reference Wrapper ---------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_ARRAYREF_H
#define LLVM_ADT_ARRAYREF_H

#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Compiler.h"
#include <algorithm>
#include <array>
#include <cassert>
#include <cstddef>
#include <initializer_list>
#include <iterator>
#include <memory>
#include <type_traits>
#include <vector>

namespace llvm {
  template<typename T> class [[nodiscard]] MutableArrayRef;

  /// ArrayRef - Represent a constant reference to an array (0 or more elements
  /// consecutively in memory), i.e. a start pointer and a length.  It allows
  /// various APIs to take consecutive elements easily and conveniently.
  ///
  /// This class does not own the underlying data, it is expected to be used in
  /// situations where the data resides in some other buffer, whose lifetime
  /// extends past that of the ArrayRef. For this reason, it is not in general
  /// safe to store an ArrayRef.
  ///
  /// This is intended to be trivially copyable, so it should be passed by
  /// value.
  template<typename T>
  class LLVM_GSL_POINTER [[nodiscard]] ArrayRef {
  public:
    using value_type = T;
    using pointer = value_type *;
    using const_pointer = const value_type *;
    using reference = value_type &;
    using const_reference = const value_type &;
    using iterator = const_pointer;
    using const_iterator = const_pointer;
    using reverse_iterator = std::reverse_iterator<iterator>;
    using const_reverse_iterator = std::reverse_iterator<const_iterator>;
    using size_type = size_t;
    using difference_type = ptrdiff_t;

  private:
    /// The start of the array, in an external buffer.
    const T *Data = nullptr;

    /// The number of elements.
    size_type Length = 0;

  public:
    /// @name Constructors
    /// @{

    /// Construct an empty ArrayRef.
    /*implicit*/ ArrayRef() = default;

    /// Construct an empty ArrayRef from std::nullopt.
    /*implicit*/ ArrayRef(std::nullopt_t) {}

    /// Construct an ArrayRef from a single element.
    /*implicit*/ ArrayRef(const T &OneElt)
      : Data(&OneElt), Length(1) {}

    /// Construct an ArrayRef from a pointer and length.
    constexpr /*implicit*/ ArrayRef(const T *data, size_t length)
        : Data(data), Length(length) {}

    /// Construct an ArrayRef from a range.
    constexpr ArrayRef(const T *begin, const T *end)
        : Data(begin), Length(end - begin) {
      assert(begin <= end);
    }

    /// Construct an ArrayRef from a SmallVector. This is templated in order to
    /// avoid instantiating SmallVectorTemplateCommon<T> whenever we
    /// copy-construct an ArrayRef.
    template<typename U>
    /*implicit*/ ArrayRef(const SmallVectorTemplateCommon<T, U> &Vec)
      : Data(Vec.data()), Length(Vec.size()) {
    }

    /// Construct an ArrayRef from a std::vector.
    template<typename A>
    /*implicit*/ ArrayRef(const std::vector<T, A> &Vec)
      : Data(Vec.data()), Length(Vec.size()) {}

    /// Construct an ArrayRef from a std::array
    template <size_t N>
    /*implicit*/ constexpr ArrayRef(const std::array<T, N> &Arr)
        : Data(Arr.data()), Length(N) {}

    /// Construct an ArrayRef from a C array.
    template <size_t N>
    /*implicit*/ constexpr ArrayRef(const T (&Arr)[N]) : Data(Arr), Length(N) {}

    /// Construct an ArrayRef from a std::initializer_list.
#if LLVM_GNUC_PREREQ(9, 0, 0)
// Disable gcc's warning in this constructor as it generates an enormous amount
// of messages. Anyone using ArrayRef should already be aware of the fact that
// it does not do lifetime extension.
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Winit-list-lifetime"
#endif
    constexpr /*implicit*/ ArrayRef(const std::initializer_list<T> &Vec)
        : Data(Vec.begin() == Vec.end() ? (T *)nullptr : Vec.begin()),
          Length(Vec.size()) {}
#if LLVM_GNUC_PREREQ(9, 0, 0)
#pragma GCC diagnostic pop
#endif

    /// Construct an ArrayRef<const T*> from ArrayRef<T*>. This uses SFINAE to
    /// ensure that only ArrayRefs of pointers can be converted.
    template <typename U>
    ArrayRef(const ArrayRef<U *> &A,
             std::enable_if_t<std::is_convertible<U *const *, T const *>::value>
                 * = nullptr)
        : Data(A.data()), Length(A.size()) {}

    /// Construct an ArrayRef<const T*> from a SmallVector<T*>. This is
    /// templated in order to avoid instantiating SmallVectorTemplateCommon<T>
    /// whenever we copy-construct an ArrayRef.
    template <typename U, typename DummyT>
    /*implicit*/ ArrayRef(
        const SmallVectorTemplateCommon<U *, DummyT> &Vec,
        std::enable_if_t<std::is_convertible<U *const *, T const *>::value> * =
            nullptr)
        : Data(Vec.data()), Length(Vec.size()) {}

    /// Construct an ArrayRef<const T*> from std::vector<T*>. This uses SFINAE
    /// to ensure that only vectors of pointers can be converted.
    template <typename U, typename A>
    ArrayRef(const std::vector<U *, A> &Vec,
             std::enable_if_t<std::is_convertible<U *const *, T const *>::value>
                 * = nullptr)
        : Data(Vec.data()), Length(Vec.size()) {}

    /// @}
    /// @name Simple Operations
    /// @{

    iterator begin() const { return Data; }
    iterator end() const { return Data + Length; }

    reverse_iterator rbegin() const { return reverse_iterator(end()); }
    reverse_iterator rend() const { return reverse_iterator(begin()); }

    /// empty - Check if the array is empty.
    bool empty() const { return Length == 0; }

    const T *data() const { return Data; }

    /// size - Get the array size.
    size_t size() const { return Length; }

    /// front - Get the first element.
    const T &front() const {
      assert(!empty());
      return Data[0];
    }

    /// back - Get the last element.
    const T &back() const {
      assert(!empty());
      return Data[Length-1];
    }

    // copy - Allocate copy in Allocator and return ArrayRef<T> to it.
    template <typename Allocator> MutableArrayRef<T> copy(Allocator &A) {
      T *Buff = A.template Allocate<T>(Length);
      std::uninitialized_copy(begin(), end(), Buff);
      return MutableArrayRef<T>(Buff, Length);
    }

    /// equals - Check for element-wise equality.
    bool equals(ArrayRef RHS) const {
      if (Length != RHS.Length)
        return false;
      return std::equal(begin(), end(), RHS.begin());
    }

    /// slice(n, m) - Chop off the first N elements of the array, and keep M
    /// elements in the array.
    ArrayRef<T> slice(size_t N, size_t M) const {
      assert(N+M <= size() && "Invalid specifier");
      return ArrayRef<T>(data()+N, M);
    }

    /// slice(n) - Chop off the first N elements of the array.
    ArrayRef<T> slice(size_t N) const { return slice(N, size() - N); }

    /// Drop the first \p N elements of the array.
    ArrayRef<T> drop_front(size_t N = 1) const {
      assert(size() >= N && "Dropping more elements than exist");
      return slice(N, size() - N);
    }

    /// Drop the last \p N elements of the array.
    ArrayRef<T> drop_back(size_t N = 1) const {
      assert(size() >= N && "Dropping more elements than exist");
      return slice(0, size() - N);
    }

    /// Return a copy of *this with the first N elements satisfying the
    /// given predicate removed.
    template <class PredicateT> ArrayRef<T> drop_while(PredicateT Pred) const {
      return ArrayRef<T>(find_if_not(*this, Pred), end());
    }

    /// Return a copy of *this with the first N elements not satisfying
    /// the given predicate removed.
    template <class PredicateT> ArrayRef<T> drop_until(PredicateT Pred) const {
      return ArrayRef<T>(find_if(*this, Pred), end());
    }

    /// Return a copy of *this with only the first \p N elements.
    ArrayRef<T> take_front(size_t N = 1) const {
      if (N >= size())
        return *this;
      return drop_back(size() - N);
    }

    /// Return a copy of *this with only the last \p N elements.
    ArrayRef<T> take_back(size_t N = 1) const {
      if (N >= size())
        return *this;
      return drop_front(size() - N);
    }

    /// Return the first N elements of this Array that satisfy the given
    /// predicate.
    template <class PredicateT> ArrayRef<T> take_while(PredicateT Pred) const {
      return ArrayRef<T>(begin(), find_if_not(*this, Pred));
    }

    /// Return the first N elements of this Array that don't satisfy the
    /// given predicate.
    template <class PredicateT> ArrayRef<T> take_until(PredicateT Pred) const {
      return ArrayRef<T>(begin(), find_if(*this, Pred));
    }

    /// @}
    /// @name Operator Overloads
    /// @{
    const T &operator[](size_t Index) const {
      assert(Index < Length && "Invalid index!");
      return Data[Index];
    }

    /// Disallow accidental assignment from a temporary.
    ///
    /// The declaration here is extra complicated so that "arrayRef = {}"
    /// continues to select the move assignment operator.
    template <typename U>
    std::enable_if_t<std::is_same<U, T>::value, ArrayRef<T>> &
    operator=(U &&Temporary) = delete;

    /// Disallow accidental assignment from a temporary.
    ///
    /// The declaration here is extra complicated so that "arrayRef = {}"
    /// continues to select the move assignment operator.
    template <typename U>
    std::enable_if_t<std::is_same<U, T>::value, ArrayRef<T>> &
    operator=(std::initializer_list<U>) = delete;

    /// @}
    /// @name Expensive Operations
    /// @{
    std::vector<T> vec() const {
      return std::vector<T>(Data, Data+Length);
    }

    /// @}
    /// @name Conversion operators
    /// @{
    operator std::vector<T>() const {
      return std::vector<T>(Data, Data+Length);
    }

    /// @}
  };

  /// MutableArrayRef - Represent a mutable reference to an array (0 or more
  /// elements consecutively in memory), i.e. a start pointer and a length.  It
  /// allows various APIs to take and modify consecutive elements easily and
  /// conveniently.
  ///
  /// This class does not own the underlying data, it is expected to be used in
  /// situations where the data resides in some other buffer, whose lifetime
  /// extends past that of the MutableArrayRef. For this reason, it is not in
  /// general safe to store a MutableArrayRef.
  ///
  /// This is intended to be trivially copyable, so it should be passed by
  /// value.
  template<typename T>
  class [[nodiscard]] MutableArrayRef : public ArrayRef<T> {
  public:
    using value_type = T;
    using pointer = value_type *;
    using const_pointer = const value_type *;
    using reference = value_type &;
    using const_reference = const value_type &;
    using iterator = pointer;
    using const_iterator = const_pointer;
    using reverse_iterator = std::reverse_iterator<iterator>;
    using const_reverse_iterator = std::reverse_iterator<const_iterator>;
    using size_type = size_t;
    using difference_type = ptrdiff_t;

    /// Construct an empty MutableArrayRef.
    /*implicit*/ MutableArrayRef() = default;

    /// Construct an empty MutableArrayRef from std::nullopt.
    /*implicit*/ MutableArrayRef(std::nullopt_t) : ArrayRef<T>() {}

    /// Construct a MutableArrayRef from a single element.
    /*implicit*/ MutableArrayRef(T &OneElt) : ArrayRef<T>(OneElt) {}

    /// Construct a MutableArrayRef from a pointer and length.
    /*implicit*/ MutableArrayRef(T *data, size_t length)
      : ArrayRef<T>(data, length) {}

    /// Construct a MutableArrayRef from a range.
    MutableArrayRef(T *begin, T *end) : ArrayRef<T>(begin, end) {}

    /// Construct a MutableArrayRef from a SmallVector.
    /*implicit*/ MutableArrayRef(SmallVectorImpl<T> &Vec)
    : ArrayRef<T>(Vec) {}

    /// Construct a MutableArrayRef from a std::vector.
    /*implicit*/ MutableArrayRef(std::vector<T> &Vec)
    : ArrayRef<T>(Vec) {}

    /// Construct a MutableArrayRef from a std::array
    template <size_t N>
    /*implicit*/ constexpr MutableArrayRef(std::array<T, N> &Arr)
        : ArrayRef<T>(Arr) {}

    /// Construct a MutableArrayRef from a C array.
    template <size_t N>
    /*implicit*/ constexpr MutableArrayRef(T (&Arr)[N]) : ArrayRef<T>(Arr) {}

    T *data() const { return const_cast<T*>(ArrayRef<T>::data()); }

    iterator begin() const { return data(); }
    iterator end() const { return data() + this->size(); }

    reverse_iterator rbegin() const { return reverse_iterator(end()); }
    reverse_iterator rend() const { return reverse_iterator(begin()); }

    /// front - Get the first element.
    T &front() const {
      assert(!this->empty());
      return data()[0];
    }

    /// back - Get the last element.
    T &back() const {
      assert(!this->empty());
      return data()[this->size()-1];
    }

    /// slice(n, m) - Chop off the first N elements of the array, and keep M
    /// elements in the array.
    MutableArrayRef<T> slice(size_t N, size_t M) const {
      assert(N + M <= this->size() && "Invalid specifier");
      return MutableArrayRef<T>(this->data() + N, M);
    }

    /// slice(n) - Chop off the first N elements of the array.
    MutableArrayRef<T> slice(size_t N) const {
      return slice(N, this->size() - N);
    }

    /// Drop the first \p N elements of the array.
    MutableArrayRef<T> drop_front(size_t N = 1) const {
      assert(this->size() >= N && "Dropping more elements than exist");
      return slice(N, this->size() - N);
    }

    MutableArrayRef<T> drop_back(size_t N = 1) const {
      assert(this->size() >= N && "Dropping more elements than exist");
      return slice(0, this->size() - N);
    }

    /// Return a copy of *this with the first N elements satisfying the
    /// given predicate removed.
    template <class PredicateT>
    MutableArrayRef<T> drop_while(PredicateT Pred) const {
      return MutableArrayRef<T>(find_if_not(*this, Pred), end());
    }

    /// Return a copy of *this with the first N elements not satisfying
    /// the given predicate removed.
    template <class PredicateT>
    MutableArrayRef<T> drop_until(PredicateT Pred) const {
      return MutableArrayRef<T>(find_if(*this, Pred), end());
    }

    /// Return a copy of *this with only the first \p N elements.
    MutableArrayRef<T> take_front(size_t N = 1) const {
      if (N >= this->size())
        return *this;
      return drop_back(this->size() - N);
    }

    /// Return a copy of *this with only the last \p N elements.
    MutableArrayRef<T> take_back(size_t N = 1) const {
      if (N >= this->size())
        return *this;
      return drop_front(this->size() - N);
    }

    /// Return the first N elements of this Array that satisfy the given
    /// predicate.
    template <class PredicateT>
    MutableArrayRef<T> take_while(PredicateT Pred) const {
      return MutableArrayRef<T>(begin(), find_if_not(*this, Pred));
    }

    /// Return the first N elements of this Array that don't satisfy the
    /// given predicate.
    template <class PredicateT>
    MutableArrayRef<T> take_until(PredicateT Pred) const {
      return MutableArrayRef<T>(begin(), find_if(*this, Pred));
    }

    /// @}
    /// @name Operator Overloads
    /// @{
    T &operator[](size_t Index) const {
      assert(Index < this->size() && "Invalid index!");
      return data()[Index];
    }
  };

  /// This is a MutableArrayRef that owns its array.
  template <typename T> class OwningArrayRef : public MutableArrayRef<T> {
  public:
    OwningArrayRef() = default;
    OwningArrayRef(size_t Size) : MutableArrayRef<T>(new T[Size], Size) {}

    OwningArrayRef(ArrayRef<T> Data)
        : MutableArrayRef<T>(new T[Data.size()], Data.size()) {
      std::copy(Data.begin(), Data.end(), this->begin());
    }

    OwningArrayRef(OwningArrayRef &&Other) { *this = std::move(Other); }

    OwningArrayRef &operator=(OwningArrayRef &&Other) {
      delete[] this->data();
      this->MutableArrayRef<T>::operator=(Other);
      Other.MutableArrayRef<T>::operator=(MutableArrayRef<T>());
      return *this;
    }

    ~OwningArrayRef() { delete[] this->data(); }
  };

  /// @name ArrayRef Deduction guides
  /// @{
  /// Deduction guide to construct an ArrayRef from a single element.
  template <typename T> ArrayRef(const T &OneElt) -> ArrayRef<T>;

  /// Deduction guide to construct an ArrayRef from a pointer and length
  template <typename T> ArrayRef(const T *data, size_t length) -> ArrayRef<T>;

  /// Deduction guide to construct an ArrayRef from a range
  template <typename T> ArrayRef(const T *data, const T *end) -> ArrayRef<T>;

  /// Deduction guide to construct an ArrayRef from a SmallVector
  template <typename T> ArrayRef(const SmallVectorImpl<T> &Vec) -> ArrayRef<T>;

  /// Deduction guide to construct an ArrayRef from a SmallVector
  template <typename T, unsigned N>
  ArrayRef(const SmallVector<T, N> &Vec) -> ArrayRef<T>;

  /// Deduction guide to construct an ArrayRef from a std::vector
  template <typename T> ArrayRef(const std::vector<T> &Vec) -> ArrayRef<T>;

  /// Deduction guide to construct an ArrayRef from a std::array
  template <typename T, std::size_t N>
  ArrayRef(const std::array<T, N> &Vec) -> ArrayRef<T>;

  /// Deduction guide to construct an ArrayRef from an ArrayRef (const)
  template <typename T> ArrayRef(const ArrayRef<T> &Vec) -> ArrayRef<T>;

  /// Deduction guide to construct an ArrayRef from an ArrayRef
  template <typename T> ArrayRef(ArrayRef<T> &Vec) -> ArrayRef<T>;

  /// Deduction guide to construct an ArrayRef from a C array.
  template <typename T, size_t N> ArrayRef(const T (&Arr)[N]) -> ArrayRef<T>;

  /// @}

  /// @name ArrayRef Convenience constructors
  /// @{
  /// Construct an ArrayRef from a single element.
  template <typename T>
  LLVM_DEPRECATED("Use deduction guide instead", "ArrayRef")
  ArrayRef<T> makeArrayRef(const T &OneElt) {
    return OneElt;
  }

  /// Construct an ArrayRef from a pointer and length.
  template <typename T>
  LLVM_DEPRECATED("Use deduction guide instead", "ArrayRef")
  ArrayRef<T> makeArrayRef(const T *data, size_t length) {
    return ArrayRef<T>(data, length);
  }

  /// Construct an ArrayRef from a range.
  template <typename T>
  LLVM_DEPRECATED("Use deduction guide instead", "ArrayRef")
  ArrayRef<T> makeArrayRef(const T *begin, const T *end) {
    return ArrayRef<T>(begin, end);
  }

  /// Construct an ArrayRef from a SmallVector.
  template <typename T>
  LLVM_DEPRECATED("Use deduction guide instead", "ArrayRef")
  ArrayRef<T> makeArrayRef(const SmallVectorImpl<T> &Vec) {
    return Vec;
  }

  /// Construct an ArrayRef from a SmallVector.
  template <typename T, unsigned N>
  LLVM_DEPRECATED("Use deduction guide instead", "ArrayRef")
  ArrayRef<T> makeArrayRef(const SmallVector<T, N> &Vec) {
    return Vec;
  }

  /// Construct an ArrayRef from a std::vector.
  template <typename T>
  LLVM_DEPRECATED("Use deduction guide instead", "ArrayRef")
  ArrayRef<T> makeArrayRef(const std::vector<T> &Vec) {
    return Vec;
  }

  /// Construct an ArrayRef from a std::array.
  template <typename T, std::size_t N>
  LLVM_DEPRECATED("Use deduction guide instead", "ArrayRef")
  ArrayRef<T> makeArrayRef(const std::array<T, N> &Arr) {
    return Arr;
  }

  /// Construct an ArrayRef from an ArrayRef (no-op) (const)
  template <typename T>
  LLVM_DEPRECATED("Use deduction guide instead", "ArrayRef")
  ArrayRef<T> makeArrayRef(const ArrayRef<T> &Vec) {
    return Vec;
  }

  /// Construct an ArrayRef from an ArrayRef (no-op)
  template <typename T>
  LLVM_DEPRECATED("Use deduction guide instead", "ArrayRef")
  ArrayRef<T> &makeArrayRef(ArrayRef<T> &Vec) {
    return Vec;
  }

  /// Construct an ArrayRef from a C array.
  template <typename T, size_t N>
  LLVM_DEPRECATED("Use deduction guide instead", "ArrayRef")
  ArrayRef<T> makeArrayRef(const T (&Arr)[N]) {
    return ArrayRef<T>(Arr);
  }

  /// @name MutableArrayRef Deduction guides
  /// @{
  /// Deduction guide to construct a `MutableArrayRef` from a single element
  template <class T> MutableArrayRef(T &OneElt) -> MutableArrayRef<T>;

  /// Deduction guide to construct a `MutableArrayRef` from a pointer and
  /// length.
  template <class T>
  MutableArrayRef(T *data, size_t length) -> MutableArrayRef<T>;

  /// Deduction guide to construct a `MutableArrayRef` from a `SmallVector`.
  template <class T>
  MutableArrayRef(SmallVectorImpl<T> &Vec) -> MutableArrayRef<T>;

  template <class T, unsigned N>
  MutableArrayRef(SmallVector<T, N> &Vec) -> MutableArrayRef<T>;

  /// Deduction guide to construct a `MutableArrayRef` from a `std::vector`.
  template <class T> MutableArrayRef(std::vector<T> &Vec) -> MutableArrayRef<T>;

  /// Deduction guide to construct a `MutableArrayRef` from a `std::array`.
  template <class T, std::size_t N>
  MutableArrayRef(std::array<T, N> &Vec) -> MutableArrayRef<T>;

  /// Deduction guide to construct a `MutableArrayRef` from a C array.
  template <typename T, size_t N>
  MutableArrayRef(T (&Arr)[N]) -> MutableArrayRef<T>;

  /// @}

  /// Construct a MutableArrayRef from a single element.
  template <typename T>
  LLVM_DEPRECATED("Use deduction guide instead", "MutableArrayRef")
  MutableArrayRef<T> makeMutableArrayRef(T &OneElt) {
    return OneElt;
  }

  /// Construct a MutableArrayRef from a pointer and length.
  template <typename T>
  LLVM_DEPRECATED("Use deduction guide instead", "MutableArrayRef")
  MutableArrayRef<T> makeMutableArrayRef(T *data, size_t length) {
    return MutableArrayRef<T>(data, length);
  }

  /// Construct a MutableArrayRef from a SmallVector.
  template <typename T>
  LLVM_DEPRECATED("Use deduction guide instead", "MutableArrayRef")
  MutableArrayRef<T> makeMutableArrayRef(SmallVectorImpl<T> &Vec) {
    return Vec;
  }

  /// Construct a MutableArrayRef from a SmallVector.
  template <typename T, unsigned N>
  LLVM_DEPRECATED("Use deduction guide instead", "MutableArrayRef")
  MutableArrayRef<T> makeMutableArrayRef(SmallVector<T, N> &Vec) {
    return Vec;
  }

  /// Construct a MutableArrayRef from a std::vector.
  template <typename T>
  LLVM_DEPRECATED("Use deduction guide instead", "MutableArrayRef")
  MutableArrayRef<T> makeMutableArrayRef(std::vector<T> &Vec) {
    return Vec;
  }

  /// Construct a MutableArrayRef from a std::array.
  template <typename T, std::size_t N>
  LLVM_DEPRECATED("Use deduction guide instead", "MutableArrayRef")
  MutableArrayRef<T> makeMutableArrayRef(std::array<T, N> &Arr) {
    return Arr;
  }

  /// Construct a MutableArrayRef from a MutableArrayRef (no-op) (const)
  template <typename T>
  LLVM_DEPRECATED("Use deduction guide instead", "MutableArrayRef")
  MutableArrayRef<T> makeMutableArrayRef(const MutableArrayRef<T> &Vec) {
    return Vec;
  }

  /// Construct a MutableArrayRef from a C array.
  template <typename T, size_t N>
  LLVM_DEPRECATED("Use deduction guide instead", "MutableArrayRef")
  MutableArrayRef<T> makeMutableArrayRef(T (&Arr)[N]) {
    return MutableArrayRef<T>(Arr);
  }

  /// @}
  /// @name ArrayRef Comparison Operators
  /// @{

  template<typename T>
  inline bool operator==(ArrayRef<T> LHS, ArrayRef<T> RHS) {
    return LHS.equals(RHS);
  }

  template <typename T>
  inline bool operator==(SmallVectorImpl<T> &LHS, ArrayRef<T> RHS) {
    return ArrayRef<T>(LHS).equals(RHS);
  }

  template <typename T>
  inline bool operator!=(ArrayRef<T> LHS, ArrayRef<T> RHS) {
    return !(LHS == RHS);
  }

  template <typename T>
  inline bool operator!=(SmallVectorImpl<T> &LHS, ArrayRef<T> RHS) {
    return !(LHS == RHS);
  }

  /// @}

  template <typename T> hash_code hash_value(ArrayRef<T> S) {
    return hash_combine_range(S.begin(), S.end());
  }

  // Provide DenseMapInfo for ArrayRefs.
  template <typename T> struct DenseMapInfo<ArrayRef<T>, void> {
    static inline ArrayRef<T> getEmptyKey() {
      return ArrayRef<T>(
          reinterpret_cast<const T *>(~static_cast<uintptr_t>(0)), size_t(0));
    }

    static inline ArrayRef<T> getTombstoneKey() {
      return ArrayRef<T>(
          reinterpret_cast<const T *>(~static_cast<uintptr_t>(1)), size_t(0));
    }

    static unsigned getHashValue(ArrayRef<T> Val) {
      assert(Val.data() != getEmptyKey().data() &&
             "Cannot hash the empty key!");
      assert(Val.data() != getTombstoneKey().data() &&
             "Cannot hash the tombstone key!");
      return (unsigned)(hash_value(Val));
    }

    static bool isEqual(ArrayRef<T> LHS, ArrayRef<T> RHS) {
      if (RHS.data() == getEmptyKey().data())
        return LHS.data() == getEmptyKey().data();
      if (RHS.data() == getTombstoneKey().data())
        return LHS.data() == getTombstoneKey().data();
      return LHS == RHS;
    }
  };

} // end namespace llvm

#endif // LLVM_ADT_ARRAYREF_H
PKiwFZĔ�HHADT/Triple.hnu�[���//===-- llvm/ADT/Triple.h ---------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This header is deprecated in favour of
/// `llvm/TargetParser/Triple.h`.
///
//===----------------------------------------------------------------------===//

#include "llvm/TargetParser/Triple.h"
PKiwFZ	C��uu	ADT/ADL.hnu�[���//===- llvm/ADT/ADL.h - Argument dependent lookup utilities -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_ADL_H
#define LLVM_ADT_ADL_H

#include <type_traits>
#include <iterator>
#include <utility>

namespace llvm {

// Only used by compiler if both template types are the same.  Useful when
// using SFINAE to test for the existence of member functions.
template <typename T, T> struct SameType;

namespace adl_detail {

using std::begin;

template <typename RangeT>
constexpr auto begin_impl(RangeT &&range)
    -> decltype(begin(std::forward<RangeT>(range))) {
  return begin(std::forward<RangeT>(range));
}

using std::end;

template <typename RangeT>
constexpr auto end_impl(RangeT &&range)
    -> decltype(end(std::forward<RangeT>(range))) {
  return end(std::forward<RangeT>(range));
}

using std::swap;

template <typename T>
constexpr void swap_impl(T &&lhs,
                         T &&rhs) noexcept(noexcept(swap(std::declval<T>(),
                                                         std::declval<T>()))) {
  swap(std::forward<T>(lhs), std::forward<T>(rhs));
}

using std::size;

template <typename RangeT>
constexpr auto size_impl(RangeT &&range)
    -> decltype(size(std::forward<RangeT>(range))) {
  return size(std::forward<RangeT>(range));
}

} // end namespace adl_detail

/// Returns the begin iterator to \p range using `std::begin` and
/// function found through Argument-Dependent Lookup (ADL).
template <typename RangeT>
constexpr auto adl_begin(RangeT &&range)
    -> decltype(adl_detail::begin_impl(std::forward<RangeT>(range))) {
  return adl_detail::begin_impl(std::forward<RangeT>(range));
}

/// Returns the end iterator to \p range using `std::end` and
/// functions found through Argument-Dependent Lookup (ADL).
template <typename RangeT>
constexpr auto adl_end(RangeT &&range)
    -> decltype(adl_detail::end_impl(std::forward<RangeT>(range))) {
  return adl_detail::end_impl(std::forward<RangeT>(range));
}

/// Swaps \p lhs with \p rhs using `std::swap` and functions found through
/// Argument-Dependent Lookup (ADL).
template <typename T>
constexpr void adl_swap(T &&lhs, T &&rhs) noexcept(
    noexcept(adl_detail::swap_impl(std::declval<T>(), std::declval<T>()))) {
  adl_detail::swap_impl(std::forward<T>(lhs), std::forward<T>(rhs));
}

/// Returns the size of \p range using `std::size` and functions found through
/// Argument-Dependent Lookup (ADL).
template <typename RangeT>
constexpr auto adl_size(RangeT &&range)
    -> decltype(adl_detail::size_impl(std::forward<RangeT>(range))) {
  return adl_detail::size_impl(std::forward<RangeT>(range));
}

namespace detail {

template <typename RangeT>
using IterOfRange = decltype(adl_begin(std::declval<RangeT &>()));

template <typename RangeT>
using ValueOfRange =
    std::remove_reference_t<decltype(*adl_begin(std::declval<RangeT &>()))>;

} // namespace detail
} // namespace llvm

#endif // LLVM_ADT_ADL_H
PKiwFZ0��**ADT/edit_distance.hnu�[���//===-- llvm/ADT/edit_distance.h - Array edit distance function --- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file defines a Levenshtein distance function that works for any two
/// sequences, with each element of each sequence being analogous to a character
/// in a string.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_EDIT_DISTANCE_H
#define LLVM_ADT_EDIT_DISTANCE_H

#include "llvm/ADT/ArrayRef.h"
#include <algorithm>

namespace llvm {

/// Determine the edit distance between two sequences.
///
/// \param FromArray the first sequence to compare.
///
/// \param ToArray the second sequence to compare.
///
/// \param Map A Functor to apply to each item of the sequences before
/// comparison.
///
/// \param AllowReplacements whether to allow element replacements (change one
/// element into another) as a single operation, rather than as two operations
/// (an insertion and a removal).
///
/// \param MaxEditDistance If non-zero, the maximum edit distance that this
/// routine is allowed to compute. If the edit distance will exceed that
/// maximum, returns \c MaxEditDistance+1.
///
/// \returns the minimum number of element insertions, removals, or (if
/// \p AllowReplacements is \c true) replacements needed to transform one of
/// the given sequences into the other. If zero, the sequences are identical.
template <typename T, typename Functor>
unsigned ComputeMappedEditDistance(ArrayRef<T> FromArray, ArrayRef<T> ToArray,
                                   Functor Map, bool AllowReplacements = true,
                                   unsigned MaxEditDistance = 0) {
  // The algorithm implemented below is the "classic"
  // dynamic-programming algorithm for computing the Levenshtein
  // distance, which is described here:
  //
  //   http://en.wikipedia.org/wiki/Levenshtein_distance
  //
  // Although the algorithm is typically described using an m x n
  // array, only one row plus one element are used at a time, so this
  // implementation just keeps one vector for the row.  To update one entry,
  // only the entries to the left, top, and top-left are needed.  The left
  // entry is in Row[x-1], the top entry is what's in Row[x] from the last
  // iteration, and the top-left entry is stored in Previous.
  typename ArrayRef<T>::size_type m = FromArray.size();
  typename ArrayRef<T>::size_type n = ToArray.size();

  if (MaxEditDistance) {
    // If the difference in size between the 2 arrays is larger than the max
    // distance allowed, we can bail out as we will always need at least
    // MaxEditDistance insertions or removals.
    typename ArrayRef<T>::size_type AbsDiff = m > n ? m - n : n - m;
    if (AbsDiff > MaxEditDistance)
      return MaxEditDistance + 1;
  }

  SmallVector<unsigned, 64> Row(n + 1);
  for (unsigned i = 1; i < Row.size(); ++i)
    Row[i] = i;

  for (typename ArrayRef<T>::size_type y = 1; y <= m; ++y) {
    Row[0] = y;
    unsigned BestThisRow = Row[0];

    unsigned Previous = y - 1;
    const auto &CurItem = Map(FromArray[y - 1]);
    for (typename ArrayRef<T>::size_type x = 1; x <= n; ++x) {
      int OldRow = Row[x];
      if (AllowReplacements) {
        Row[x] = std::min(Previous + (CurItem == Map(ToArray[x - 1]) ? 0u : 1u),
                          std::min(Row[x - 1], Row[x]) + 1);
      }
      else {
        if (CurItem == Map(ToArray[x - 1]))
          Row[x] = Previous;
        else Row[x] = std::min(Row[x-1], Row[x]) + 1;
      }
      Previous = OldRow;
      BestThisRow = std::min(BestThisRow, Row[x]);
    }

    if (MaxEditDistance && BestThisRow > MaxEditDistance)
      return MaxEditDistance + 1;
  }

  unsigned Result = Row[n];
  return Result;
}

template <typename T>
unsigned ComputeEditDistance(ArrayRef<T> FromArray, ArrayRef<T> ToArray,
                             bool AllowReplacements = true,
                             unsigned MaxEditDistance = 0) {
  return ComputeMappedEditDistance(
      FromArray, ToArray, [](const T &X) -> const T & { return X; },
      AllowReplacements, MaxEditDistance);
}

} // End llvm namespace

#endif
PKiwFZ�9b��ADT/iterator_range.hnu�[���//===- iterator_range.h - A range adaptor for iterators ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// This provides a very simple, boring adaptor for a begin and end iterator
/// into a range type. This should be used to build range views that work well
/// with range based for loops and range based constructors.
///
/// Note that code here follows more standards-based coding conventions as it
/// is mirroring proposed interfaces for standardization.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_ITERATOR_RANGE_H
#define LLVM_ADT_ITERATOR_RANGE_H

#include "llvm/ADT/ADL.h"
#include <type_traits>
#include <utility>

namespace llvm {

template <typename From, typename To, typename = void>
struct explicitly_convertible : std::false_type {};

template <typename From, typename To>
struct explicitly_convertible<
    From, To,
    std::void_t<decltype(static_cast<To>(
        std::declval<std::add_rvalue_reference_t<From>>()))>> : std::true_type {
};

/// A range adaptor for a pair of iterators.
///
/// This just wraps two iterators into a range-compatible interface. Nothing
/// fancy at all.
template <typename IteratorT>
class iterator_range {
  IteratorT begin_iterator, end_iterator;

public:
#if __GNUC__ == 7
  // Be careful no to break gcc-7 on the mlir target.
  // See https://github.com/llvm/llvm-project/issues/63843
  template <typename Container>
#else
  template <typename Container,
            std::enable_if_t<explicitly_convertible<
                detail::IterOfRange<Container>, IteratorT>::value> * = nullptr>
#endif
  iterator_range(Container &&c)
      : begin_iterator(adl_begin(std::forward<Container>(c))),
        end_iterator(adl_end(std::forward<Container>(c))) {
  }
  iterator_range(IteratorT begin_iterator, IteratorT end_iterator)
      : begin_iterator(std::move(begin_iterator)),
        end_iterator(std::move(end_iterator)) {}

  IteratorT begin() const { return begin_iterator; }
  IteratorT end() const { return end_iterator; }
  bool empty() const { return begin_iterator == end_iterator; }
};

template <typename Container>
iterator_range(Container &&) -> iterator_range<detail::IterOfRange<Container>>;

/// Convenience function for iterating over sub-ranges.
///
/// This provides a bit of syntactic sugar to make using sub-ranges
/// in for loops a bit easier. Analogous to std::make_pair().
template <class T> iterator_range<T> make_range(T x, T y) {
  return iterator_range<T>(std::move(x), std::move(y));
}

template <typename T> iterator_range<T> make_range(std::pair<T, T> p) {
  return iterator_range<T>(std::move(p.first), std::move(p.second));
}

}

#endif
PKiwFZ�	F�F�ADT/StringRef.hnu�[���//===- StringRef.h - Constant String Reference Wrapper ----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_STRINGREF_H
#define LLVM_ADT_STRINGREF_H

#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/ADT/STLFunctionalExtras.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Compiler.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstring>
#include <limits>
#include <string>
#include <string_view>
#include <type_traits>
#include <utility>

namespace llvm {

  class APInt;
  class hash_code;
  template <typename T> class SmallVectorImpl;
  class StringRef;

  /// Helper functions for StringRef::getAsInteger.
  bool getAsUnsignedInteger(StringRef Str, unsigned Radix,
                            unsigned long long &Result);

  bool getAsSignedInteger(StringRef Str, unsigned Radix, long long &Result);

  bool consumeUnsignedInteger(StringRef &Str, unsigned Radix,
                              unsigned long long &Result);
  bool consumeSignedInteger(StringRef &Str, unsigned Radix, long long &Result);

  /// StringRef - Represent a constant reference to a string, i.e. a character
  /// array and a length, which need not be null terminated.
  ///
  /// This class does not own the string data, it is expected to be used in
  /// situations where the character data resides in some other buffer, whose
  /// lifetime extends past that of the StringRef. For this reason, it is not in
  /// general safe to store a StringRef.
  class LLVM_GSL_POINTER StringRef {
  public:
    static constexpr size_t npos = ~size_t(0);

    using iterator = const char *;
    using const_iterator = const char *;
    using size_type = size_t;

  private:
    /// The start of the string, in an external buffer.
    const char *Data = nullptr;

    /// The length of the string.
    size_t Length = 0;

    // Workaround memcmp issue with null pointers (undefined behavior)
    // by providing a specialized version
    static int compareMemory(const char *Lhs, const char *Rhs, size_t Length) {
      if (Length == 0) { return 0; }
      return ::memcmp(Lhs,Rhs,Length);
    }

  public:
    /// @name Constructors
    /// @{

    /// Construct an empty string ref.
    /*implicit*/ StringRef() = default;

    /// Disable conversion from nullptr.  This prevents things like
    /// if (S == nullptr)
    StringRef(std::nullptr_t) = delete;

    /// Construct a string ref from a cstring.
    /*implicit*/ constexpr StringRef(const char *Str)
        : Data(Str), Length(Str ?
    // GCC 7 doesn't have constexpr char_traits. Fall back to __builtin_strlen.
#if defined(_GLIBCXX_RELEASE) && _GLIBCXX_RELEASE < 8
                                __builtin_strlen(Str)
#else
                                std::char_traits<char>::length(Str)
#endif
                                : 0) {
    }

    /// Construct a string ref from a pointer and length.
    /*implicit*/ constexpr StringRef(const char *data, size_t length)
        : Data(data), Length(length) {}

    /// Construct a string ref from an std::string.
    /*implicit*/ StringRef(const std::string &Str)
      : Data(Str.data()), Length(Str.length()) {}

    /// Construct a string ref from an std::string_view.
    /*implicit*/ constexpr StringRef(std::string_view Str)
        : Data(Str.data()), Length(Str.size()) {}

    /// @}
    /// @name Iterators
    /// @{

    iterator begin() const { return Data; }

    iterator end() const { return Data + Length; }

    const unsigned char *bytes_begin() const {
      return reinterpret_cast<const unsigned char *>(begin());
    }
    const unsigned char *bytes_end() const {
      return reinterpret_cast<const unsigned char *>(end());
    }
    iterator_range<const unsigned char *> bytes() const {
      return make_range(bytes_begin(), bytes_end());
    }

    /// @}
    /// @name String Operations
    /// @{

    /// data - Get a pointer to the start of the string (which may not be null
    /// terminated).
    [[nodiscard]] const char *data() const { return Data; }

    /// empty - Check if the string is empty.
    [[nodiscard]] constexpr bool empty() const { return Length == 0; }

    /// size - Get the string size.
    [[nodiscard]] constexpr size_t size() const { return Length; }

    /// front - Get the first character in the string.
    [[nodiscard]] char front() const {
      assert(!empty());
      return Data[0];
    }

    /// back - Get the last character in the string.
    [[nodiscard]] char back() const {
      assert(!empty());
      return Data[Length-1];
    }

    // copy - Allocate copy in Allocator and return StringRef to it.
    template <typename Allocator>
    [[nodiscard]] StringRef copy(Allocator &A) const {
      // Don't request a length 0 copy from the allocator.
      if (empty())
        return StringRef();
      char *S = A.template Allocate<char>(Length);
      std::copy(begin(), end(), S);
      return StringRef(S, Length);
    }

    /// equals - Check for string equality, this is more efficient than
    /// compare() when the relative ordering of inequal strings isn't needed.
    [[nodiscard]] bool equals(StringRef RHS) const {
      return (Length == RHS.Length &&
              compareMemory(Data, RHS.Data, RHS.Length) == 0);
    }

    /// Check for string equality, ignoring case.
    [[nodiscard]] bool equals_insensitive(StringRef RHS) const {
      return Length == RHS.Length && compare_insensitive(RHS) == 0;
    }

    /// compare - Compare two strings; the result is negative, zero, or positive
    /// if this string is lexicographically less than, equal to, or greater than
    /// the \p RHS.
    [[nodiscard]] int compare(StringRef RHS) const {
      // Check the prefix for a mismatch.
      if (int Res = compareMemory(Data, RHS.Data, std::min(Length, RHS.Length)))
        return Res < 0 ? -1 : 1;

      // Otherwise the prefixes match, so we only need to check the lengths.
      if (Length == RHS.Length)
        return 0;
      return Length < RHS.Length ? -1 : 1;
    }

    /// Compare two strings, ignoring case.
    [[nodiscard]] int compare_insensitive(StringRef RHS) const;

    /// compare_numeric - Compare two strings, treating sequences of digits as
    /// numbers.
    [[nodiscard]] int compare_numeric(StringRef RHS) const;

    /// Determine the edit distance between this string and another
    /// string.
    ///
    /// \param Other the string to compare this string against.
    ///
    /// \param AllowReplacements whether to allow character
    /// replacements (change one character into another) as a single
    /// operation, rather than as two operations (an insertion and a
    /// removal).
    ///
    /// \param MaxEditDistance If non-zero, the maximum edit distance that
    /// this routine is allowed to compute. If the edit distance will exceed
    /// that maximum, returns \c MaxEditDistance+1.
    ///
    /// \returns the minimum number of character insertions, removals,
    /// or (if \p AllowReplacements is \c true) replacements needed to
    /// transform one of the given strings into the other. If zero,
    /// the strings are identical.
    [[nodiscard]] unsigned edit_distance(StringRef Other,
                                         bool AllowReplacements = true,
                                         unsigned MaxEditDistance = 0) const;

    [[nodiscard]] unsigned
    edit_distance_insensitive(StringRef Other, bool AllowReplacements = true,
                              unsigned MaxEditDistance = 0) const;

    /// str - Get the contents as an std::string.
    [[nodiscard]] std::string str() const {
      if (!Data) return std::string();
      return std::string(Data, Length);
    }

    /// @}
    /// @name Operator Overloads
    /// @{

    [[nodiscard]] char operator[](size_t Index) const {
      assert(Index < Length && "Invalid index!");
      return Data[Index];
    }

    /// Disallow accidental assignment from a temporary std::string.
    ///
    /// The declaration here is extra complicated so that `stringRef = {}`
    /// and `stringRef = "abc"` continue to select the move assignment operator.
    template <typename T>
    std::enable_if_t<std::is_same<T, std::string>::value, StringRef> &
    operator=(T &&Str) = delete;

    /// @}
    /// @name Type Conversions
    /// @{

    operator std::string_view() const {
      return std::string_view(data(), size());
    }

    /// @}
    /// @name String Predicates
    /// @{

    /// Check if this string starts with the given \p Prefix.
    [[nodiscard]] bool starts_with(StringRef Prefix) const {
      return Length >= Prefix.Length &&
             compareMemory(Data, Prefix.Data, Prefix.Length) == 0;
    }
    [[nodiscard]] bool startswith(StringRef Prefix) const {
      return starts_with(Prefix);
    }

    /// Check if this string starts with the given \p Prefix, ignoring case.
    [[nodiscard]] bool starts_with_insensitive(StringRef Prefix) const;
    [[nodiscard]] LLVM_DEPRECATED(
        "Use starts_with_insensitive instead",
        "starts_with_insensitive") bool startswith_insensitive(StringRef Prefix)
        const {
      return starts_with_insensitive(Prefix);
    }

    /// Check if this string ends with the given \p Suffix.
    [[nodiscard]] bool ends_with(StringRef Suffix) const {
      return Length >= Suffix.Length &&
             compareMemory(end() - Suffix.Length, Suffix.Data, Suffix.Length) ==
                 0;
    }
    [[nodiscard]] bool endswith(StringRef Suffix) const {
      return ends_with(Suffix);
    }

    /// Check if this string ends with the given \p Suffix, ignoring case.
    [[nodiscard]] bool ends_with_insensitive(StringRef Suffix) const;
    [[nodiscard]] LLVM_DEPRECATED(
        "Use ends_with_insensitive instead",
        "ends_with_insensitive") bool endswith_insensitive(StringRef Suffix)
        const {
      return ends_with_insensitive(Suffix);
    }

    /// @}
    /// @name String Searching
    /// @{

    /// Search for the first character \p C in the string.
    ///
    /// \returns The index of the first occurrence of \p C, or npos if not
    /// found.
    [[nodiscard]] size_t find(char C, size_t From = 0) const {
      return std::string_view(*this).find(C, From);
    }

    /// Search for the first character \p C in the string, ignoring case.
    ///
    /// \returns The index of the first occurrence of \p C, or npos if not
    /// found.
    [[nodiscard]] size_t find_insensitive(char C, size_t From = 0) const;

    /// Search for the first character satisfying the predicate \p F
    ///
    /// \returns The index of the first character satisfying \p F starting from
    /// \p From, or npos if not found.
    [[nodiscard]] size_t find_if(function_ref<bool(char)> F,
                                 size_t From = 0) const {
      StringRef S = drop_front(From);
      while (!S.empty()) {
        if (F(S.front()))
          return size() - S.size();
        S = S.drop_front();
      }
      return npos;
    }

    /// Search for the first character not satisfying the predicate \p F
    ///
    /// \returns The index of the first character not satisfying \p F starting
    /// from \p From, or npos if not found.
    [[nodiscard]] size_t find_if_not(function_ref<bool(char)> F,
                                     size_t From = 0) const {
      return find_if([F](char c) { return !F(c); }, From);
    }

    /// Search for the first string \p Str in the string.
    ///
    /// \returns The index of the first occurrence of \p Str, or npos if not
    /// found.
    [[nodiscard]] size_t find(StringRef Str, size_t From = 0) const;

    /// Search for the first string \p Str in the string, ignoring case.
    ///
    /// \returns The index of the first occurrence of \p Str, or npos if not
    /// found.
    [[nodiscard]] size_t find_insensitive(StringRef Str, size_t From = 0) const;

    /// Search for the last character \p C in the string.
    ///
    /// \returns The index of the last occurrence of \p C, or npos if not
    /// found.
    [[nodiscard]] size_t rfind(char C, size_t From = npos) const {
      size_t I = std::min(From, Length);
      while (I) {
        --I;
        if (Data[I] == C)
          return I;
      }
      return npos;
    }

    /// Search for the last character \p C in the string, ignoring case.
    ///
    /// \returns The index of the last occurrence of \p C, or npos if not
    /// found.
    [[nodiscard]] size_t rfind_insensitive(char C, size_t From = npos) const;

    /// Search for the last string \p Str in the string.
    ///
    /// \returns The index of the last occurrence of \p Str, or npos if not
    /// found.
    [[nodiscard]] size_t rfind(StringRef Str) const;

    /// Search for the last string \p Str in the string, ignoring case.
    ///
    /// \returns The index of the last occurrence of \p Str, or npos if not
    /// found.
    [[nodiscard]] size_t rfind_insensitive(StringRef Str) const;

    /// Find the first character in the string that is \p C, or npos if not
    /// found. Same as find.
    [[nodiscard]] size_t find_first_of(char C, size_t From = 0) const {
      return find(C, From);
    }

    /// Find the first character in the string that is in \p Chars, or npos if
    /// not found.
    ///
    /// Complexity: O(size() + Chars.size())
    [[nodiscard]] size_t find_first_of(StringRef Chars, size_t From = 0) const;

    /// Find the first character in the string that is not \p C or npos if not
    /// found.
    [[nodiscard]] size_t find_first_not_of(char C, size_t From = 0) const;

    /// Find the first character in the string that is not in the string
    /// \p Chars, or npos if not found.
    ///
    /// Complexity: O(size() + Chars.size())
    [[nodiscard]] size_t find_first_not_of(StringRef Chars,
                                           size_t From = 0) const;

    /// Find the last character in the string that is \p C, or npos if not
    /// found.
    [[nodiscard]] size_t find_last_of(char C, size_t From = npos) const {
      return rfind(C, From);
    }

    /// Find the last character in the string that is in \p C, or npos if not
    /// found.
    ///
    /// Complexity: O(size() + Chars.size())
    [[nodiscard]] size_t find_last_of(StringRef Chars,
                                      size_t From = npos) const;

    /// Find the last character in the string that is not \p C, or npos if not
    /// found.
    [[nodiscard]] size_t find_last_not_of(char C, size_t From = npos) const;

    /// Find the last character in the string that is not in \p Chars, or
    /// npos if not found.
    ///
    /// Complexity: O(size() + Chars.size())
    [[nodiscard]] size_t find_last_not_of(StringRef Chars,
                                          size_t From = npos) const;

    /// Return true if the given string is a substring of *this, and false
    /// otherwise.
    [[nodiscard]] bool contains(StringRef Other) const {
      return find(Other) != npos;
    }

    /// Return true if the given character is contained in *this, and false
    /// otherwise.
    [[nodiscard]] bool contains(char C) const {
      return find_first_of(C) != npos;
    }

    /// Return true if the given string is a substring of *this, and false
    /// otherwise.
    [[nodiscard]] bool contains_insensitive(StringRef Other) const {
      return find_insensitive(Other) != npos;
    }

    /// Return true if the given character is contained in *this, and false
    /// otherwise.
    [[nodiscard]] bool contains_insensitive(char C) const {
      return find_insensitive(C) != npos;
    }

    /// @}
    /// @name Helpful Algorithms
    /// @{

    /// Return the number of occurrences of \p C in the string.
    [[nodiscard]] size_t count(char C) const {
      size_t Count = 0;
      for (size_t I = 0; I != Length; ++I)
        if (Data[I] == C)
          ++Count;
      return Count;
    }

    /// Return the number of non-overlapped occurrences of \p Str in
    /// the string.
    size_t count(StringRef Str) const;

    /// Parse the current string as an integer of the specified radix.  If
    /// \p Radix is specified as zero, this does radix autosensing using
    /// extended C rules: 0 is octal, 0x is hex, 0b is binary.
    ///
    /// If the string is invalid or if only a subset of the string is valid,
    /// this returns true to signify the error.  The string is considered
    /// erroneous if empty or if it overflows T.
    template <typename T> bool getAsInteger(unsigned Radix, T &Result) const {
      if constexpr (std::numeric_limits<T>::is_signed) {
        long long LLVal;
        if (getAsSignedInteger(*this, Radix, LLVal) ||
            static_cast<T>(LLVal) != LLVal)
          return true;
        Result = LLVal;
      } else {
        unsigned long long ULLVal;
        // The additional cast to unsigned long long is required to avoid the
        // Visual C++ warning C4805: '!=' : unsafe mix of type 'bool' and type
        // 'unsigned __int64' when instantiating getAsInteger with T = bool.
        if (getAsUnsignedInteger(*this, Radix, ULLVal) ||
            static_cast<unsigned long long>(static_cast<T>(ULLVal)) != ULLVal)
          return true;
        Result = ULLVal;
      }
      return false;
    }

    /// Parse the current string as an integer of the specified radix.  If
    /// \p Radix is specified as zero, this does radix autosensing using
    /// extended C rules: 0 is octal, 0x is hex, 0b is binary.
    ///
    /// If the string does not begin with a number of the specified radix,
    /// this returns true to signify the error. The string is considered
    /// erroneous if empty or if it overflows T.
    /// The portion of the string representing the discovered numeric value
    /// is removed from the beginning of the string.
    template <typename T> bool consumeInteger(unsigned Radix, T &Result) {
      if constexpr (std::numeric_limits<T>::is_signed) {
        long long LLVal;
        if (consumeSignedInteger(*this, Radix, LLVal) ||
            static_cast<long long>(static_cast<T>(LLVal)) != LLVal)
          return true;
        Result = LLVal;
      } else {
        unsigned long long ULLVal;
        if (consumeUnsignedInteger(*this, Radix, ULLVal) ||
            static_cast<unsigned long long>(static_cast<T>(ULLVal)) != ULLVal)
          return true;
        Result = ULLVal;
      }
      return false;
    }

    /// Parse the current string as an integer of the specified \p Radix, or of
    /// an autosensed radix if the \p Radix given is 0.  The current value in
    /// \p Result is discarded, and the storage is changed to be wide enough to
    /// store the parsed integer.
    ///
    /// \returns true if the string does not solely consist of a valid
    /// non-empty number in the appropriate base.
    ///
    /// APInt::fromString is superficially similar but assumes the
    /// string is well-formed in the given radix.
    bool getAsInteger(unsigned Radix, APInt &Result) const;

    /// Parse the current string as an integer of the specified \p Radix.  If
    /// \p Radix is specified as zero, this does radix autosensing using
    /// extended C rules: 0 is octal, 0x is hex, 0b is binary.
    ///
    /// If the string does not begin with a number of the specified radix,
    /// this returns true to signify the error. The string is considered
    /// erroneous if empty.
    /// The portion of the string representing the discovered numeric value
    /// is removed from the beginning of the string.
    bool consumeInteger(unsigned Radix, APInt &Result);

    /// Parse the current string as an IEEE double-precision floating
    /// point value.  The string must be a well-formed double.
    ///
    /// If \p AllowInexact is false, the function will fail if the string
    /// cannot be represented exactly.  Otherwise, the function only fails
    /// in case of an overflow or underflow, or an invalid floating point
    /// representation.
    bool getAsDouble(double &Result, bool AllowInexact = true) const;

    /// @}
    /// @name String Operations
    /// @{

    // Convert the given ASCII string to lowercase.
    [[nodiscard]] std::string lower() const;

    /// Convert the given ASCII string to uppercase.
    [[nodiscard]] std::string upper() const;

    /// @}
    /// @name Substring Operations
    /// @{

    /// Return a reference to the substring from [Start, Start + N).
    ///
    /// \param Start The index of the starting character in the substring; if
    /// the index is npos or greater than the length of the string then the
    /// empty substring will be returned.
    ///
    /// \param N The number of characters to included in the substring. If N
    /// exceeds the number of characters remaining in the string, the string
    /// suffix (starting with \p Start) will be returned.
    [[nodiscard]] constexpr StringRef substr(size_t Start,
                                             size_t N = npos) const {
      Start = std::min(Start, Length);
      return StringRef(Data + Start, std::min(N, Length - Start));
    }

    /// Return a StringRef equal to 'this' but with only the first \p N
    /// elements remaining.  If \p N is greater than the length of the
    /// string, the entire string is returned.
    [[nodiscard]] StringRef take_front(size_t N = 1) const {
      if (N >= size())
        return *this;
      return drop_back(size() - N);
    }

    /// Return a StringRef equal to 'this' but with only the last \p N
    /// elements remaining.  If \p N is greater than the length of the
    /// string, the entire string is returned.
    [[nodiscard]] StringRef take_back(size_t N = 1) const {
      if (N >= size())
        return *this;
      return drop_front(size() - N);
    }

    /// Return the longest prefix of 'this' such that every character
    /// in the prefix satisfies the given predicate.
    [[nodiscard]] StringRef take_while(function_ref<bool(char)> F) const {
      return substr(0, find_if_not(F));
    }

    /// Return the longest prefix of 'this' such that no character in
    /// the prefix satisfies the given predicate.
    [[nodiscard]] StringRef take_until(function_ref<bool(char)> F) const {
      return substr(0, find_if(F));
    }

    /// Return a StringRef equal to 'this' but with the first \p N elements
    /// dropped.
    [[nodiscard]] StringRef drop_front(size_t N = 1) const {
      assert(size() >= N && "Dropping more elements than exist");
      return substr(N);
    }

    /// Return a StringRef equal to 'this' but with the last \p N elements
    /// dropped.
    [[nodiscard]] StringRef drop_back(size_t N = 1) const {
      assert(size() >= N && "Dropping more elements than exist");
      return substr(0, size()-N);
    }

    /// Return a StringRef equal to 'this', but with all characters satisfying
    /// the given predicate dropped from the beginning of the string.
    [[nodiscard]] StringRef drop_while(function_ref<bool(char)> F) const {
      return substr(find_if_not(F));
    }

    /// Return a StringRef equal to 'this', but with all characters not
    /// satisfying the given predicate dropped from the beginning of the string.
    [[nodiscard]] StringRef drop_until(function_ref<bool(char)> F) const {
      return substr(find_if(F));
    }

    /// Returns true if this StringRef has the given prefix and removes that
    /// prefix.
    bool consume_front(StringRef Prefix) {
      if (!starts_with(Prefix))
        return false;

      *this = substr(Prefix.size());
      return true;
    }

    /// Returns true if this StringRef has the given prefix, ignoring case,
    /// and removes that prefix.
    bool consume_front_insensitive(StringRef Prefix) {
      if (!starts_with_insensitive(Prefix))
        return false;

      *this = substr(Prefix.size());
      return true;
    }

    /// Returns true if this StringRef has the given suffix and removes that
    /// suffix.
    bool consume_back(StringRef Suffix) {
      if (!ends_with(Suffix))
        return false;

      *this = substr(0, size() - Suffix.size());
      return true;
    }

    /// Returns true if this StringRef has the given suffix, ignoring case,
    /// and removes that suffix.
    bool consume_back_insensitive(StringRef Suffix) {
      if (!ends_with_insensitive(Suffix))
        return false;

      *this = substr(0, size() - Suffix.size());
      return true;
    }

    /// Return a reference to the substring from [Start, End).
    ///
    /// \param Start The index of the starting character in the substring; if
    /// the index is npos or greater than the length of the string then the
    /// empty substring will be returned.
    ///
    /// \param End The index following the last character to include in the
    /// substring. If this is npos or exceeds the number of characters
    /// remaining in the string, the string suffix (starting with \p Start)
    /// will be returned. If this is less than \p Start, an empty string will
    /// be returned.
    [[nodiscard]] StringRef slice(size_t Start, size_t End) const {
      Start = std::min(Start, Length);
      End = std::clamp(End, Start, Length);
      return StringRef(Data + Start, End - Start);
    }

    /// Split into two substrings around the first occurrence of a separator
    /// character.
    ///
    /// If \p Separator is in the string, then the result is a pair (LHS, RHS)
    /// such that (*this == LHS + Separator + RHS) is true and RHS is
    /// maximal. If \p Separator is not in the string, then the result is a
    /// pair (LHS, RHS) where (*this == LHS) and (RHS == "").
    ///
    /// \param Separator The character to split on.
    /// \returns The split substrings.
    [[nodiscard]] std::pair<StringRef, StringRef> split(char Separator) const {
      return split(StringRef(&Separator, 1));
    }

    /// Split into two substrings around the first occurrence of a separator
    /// string.
    ///
    /// If \p Separator is in the string, then the result is a pair (LHS, RHS)
    /// such that (*this == LHS + Separator + RHS) is true and RHS is
    /// maximal. If \p Separator is not in the string, then the result is a
    /// pair (LHS, RHS) where (*this == LHS) and (RHS == "").
    ///
    /// \param Separator - The string to split on.
    /// \return - The split substrings.
    [[nodiscard]] std::pair<StringRef, StringRef>
    split(StringRef Separator) const {
      size_t Idx = find(Separator);
      if (Idx == npos)
        return std::make_pair(*this, StringRef());
      return std::make_pair(slice(0, Idx), slice(Idx + Separator.size(), npos));
    }

    /// Split into two substrings around the last occurrence of a separator
    /// string.
    ///
    /// If \p Separator is in the string, then the result is a pair (LHS, RHS)
    /// such that (*this == LHS + Separator + RHS) is true and RHS is
    /// minimal. If \p Separator is not in the string, then the result is a
    /// pair (LHS, RHS) where (*this == LHS) and (RHS == "").
    ///
    /// \param Separator - The string to split on.
    /// \return - The split substrings.
    [[nodiscard]] std::pair<StringRef, StringRef>
    rsplit(StringRef Separator) const {
      size_t Idx = rfind(Separator);
      if (Idx == npos)
        return std::make_pair(*this, StringRef());
      return std::make_pair(slice(0, Idx), slice(Idx + Separator.size(), npos));
    }

    /// Split into substrings around the occurrences of a separator string.
    ///
    /// Each substring is stored in \p A. If \p MaxSplit is >= 0, at most
    /// \p MaxSplit splits are done and consequently <= \p MaxSplit + 1
    /// elements are added to A.
    /// If \p KeepEmpty is false, empty strings are not added to \p A. They
    /// still count when considering \p MaxSplit
    /// An useful invariant is that
    /// Separator.join(A) == *this if MaxSplit == -1 and KeepEmpty == true
    ///
    /// \param A - Where to put the substrings.
    /// \param Separator - The string to split on.
    /// \param MaxSplit - The maximum number of times the string is split.
    /// \param KeepEmpty - True if empty substring should be added.
    void split(SmallVectorImpl<StringRef> &A,
               StringRef Separator, int MaxSplit = -1,
               bool KeepEmpty = true) const;

    /// Split into substrings around the occurrences of a separator character.
    ///
    /// Each substring is stored in \p A. If \p MaxSplit is >= 0, at most
    /// \p MaxSplit splits are done and consequently <= \p MaxSplit + 1
    /// elements are added to A.
    /// If \p KeepEmpty is false, empty strings are not added to \p A. They
    /// still count when considering \p MaxSplit
    /// An useful invariant is that
    /// Separator.join(A) == *this if MaxSplit == -1 and KeepEmpty == true
    ///
    /// \param A - Where to put the substrings.
    /// \param Separator - The string to split on.
    /// \param MaxSplit - The maximum number of times the string is split.
    /// \param KeepEmpty - True if empty substring should be added.
    void split(SmallVectorImpl<StringRef> &A, char Separator, int MaxSplit = -1,
               bool KeepEmpty = true) const;

    /// Split into two substrings around the last occurrence of a separator
    /// character.
    ///
    /// If \p Separator is in the string, then the result is a pair (LHS, RHS)
    /// such that (*this == LHS + Separator + RHS) is true and RHS is
    /// minimal. If \p Separator is not in the string, then the result is a
    /// pair (LHS, RHS) where (*this == LHS) and (RHS == "").
    ///
    /// \param Separator - The character to split on.
    /// \return - The split substrings.
    [[nodiscard]] std::pair<StringRef, StringRef> rsplit(char Separator) const {
      return rsplit(StringRef(&Separator, 1));
    }

    /// Return string with consecutive \p Char characters starting from the
    /// the left removed.
    [[nodiscard]] StringRef ltrim(char Char) const {
      return drop_front(std::min(Length, find_first_not_of(Char)));
    }

    /// Return string with consecutive characters in \p Chars starting from
    /// the left removed.
    [[nodiscard]] StringRef ltrim(StringRef Chars = " \t\n\v\f\r") const {
      return drop_front(std::min(Length, find_first_not_of(Chars)));
    }

    /// Return string with consecutive \p Char characters starting from the
    /// right removed.
    [[nodiscard]] StringRef rtrim(char Char) const {
      return drop_back(Length - std::min(Length, find_last_not_of(Char) + 1));
    }

    /// Return string with consecutive characters in \p Chars starting from
    /// the right removed.
    [[nodiscard]] StringRef rtrim(StringRef Chars = " \t\n\v\f\r") const {
      return drop_back(Length - std::min(Length, find_last_not_of(Chars) + 1));
    }

    /// Return string with consecutive \p Char characters starting from the
    /// left and right removed.
    [[nodiscard]] StringRef trim(char Char) const {
      return ltrim(Char).rtrim(Char);
    }

    /// Return string with consecutive characters in \p Chars starting from
    /// the left and right removed.
    [[nodiscard]] StringRef trim(StringRef Chars = " \t\n\v\f\r") const {
      return ltrim(Chars).rtrim(Chars);
    }

    /// Detect the line ending style of the string.
    ///
    /// If the string contains a line ending, return the line ending character
    /// sequence that is detected. Otherwise return '\n' for unix line endings.
    ///
    /// \return - The line ending character sequence.
    [[nodiscard]] StringRef detectEOL() const {
      size_t Pos = find('\r');
      if (Pos == npos) {
        // If there is no carriage return, assume unix
        return "\n";
      }
      if (Pos + 1 < Length && Data[Pos + 1] == '\n')
        return "\r\n"; // Windows
      if (Pos > 0 && Data[Pos - 1] == '\n')
        return "\n\r"; // You monster!
      return "\r";     // Classic Mac
    }
    /// @}
  };

  /// A wrapper around a string literal that serves as a proxy for constructing
  /// global tables of StringRefs with the length computed at compile time.
  /// In order to avoid the invocation of a global constructor, StringLiteral
  /// should *only* be used in a constexpr context, as such:
  ///
  /// constexpr StringLiteral S("test");
  ///
  class StringLiteral : public StringRef {
  private:
    constexpr StringLiteral(const char *Str, size_t N) : StringRef(Str, N) {
    }

  public:
    template <size_t N>
    constexpr StringLiteral(const char (&Str)[N])
#if defined(__clang__) && __has_attribute(enable_if)
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wgcc-compat"
        __attribute((enable_if(__builtin_strlen(Str) == N - 1,
                               "invalid string literal")))
#pragma clang diagnostic pop
#endif
        : StringRef(Str, N - 1) {
    }

    // Explicit construction for strings like "foo\0bar".
    template <size_t N>
    static constexpr StringLiteral withInnerNUL(const char (&Str)[N]) {
      return StringLiteral(Str, N - 1);
    }
  };

  /// @name StringRef Comparison Operators
  /// @{

  inline bool operator==(StringRef LHS, StringRef RHS) {
    return LHS.equals(RHS);
  }

  inline bool operator!=(StringRef LHS, StringRef RHS) { return !(LHS == RHS); }

  inline bool operator<(StringRef LHS, StringRef RHS) {
    return LHS.compare(RHS) < 0;
  }

  inline bool operator<=(StringRef LHS, StringRef RHS) {
    return LHS.compare(RHS) <= 0;
  }

  inline bool operator>(StringRef LHS, StringRef RHS) {
    return LHS.compare(RHS) > 0;
  }

  inline bool operator>=(StringRef LHS, StringRef RHS) {
    return LHS.compare(RHS) >= 0;
  }

  inline std::string &operator+=(std::string &buffer, StringRef string) {
    return buffer.append(string.data(), string.size());
  }

  /// @}

  /// Compute a hash_code for a StringRef.
  [[nodiscard]] hash_code hash_value(StringRef S);

  // Provide DenseMapInfo for StringRefs.
  template <> struct DenseMapInfo<StringRef, void> {
    static inline StringRef getEmptyKey() {
      return StringRef(
          reinterpret_cast<const char *>(~static_cast<uintptr_t>(0)), 0);
    }

    static inline StringRef getTombstoneKey() {
      return StringRef(
          reinterpret_cast<const char *>(~static_cast<uintptr_t>(1)), 0);
    }

    static unsigned getHashValue(StringRef Val);

    static bool isEqual(StringRef LHS, StringRef RHS) {
      if (RHS.data() == getEmptyKey().data())
        return LHS.data() == getEmptyKey().data();
      if (RHS.data() == getTombstoneKey().data())
        return LHS.data() == getTombstoneKey().data();
      return LHS == RHS;
    }
  };

} // end namespace llvm

#endif // LLVM_ADT_STRINGREF_H
PKiwFZ
�%^�5�5ADT/Sequence.hnu�[���//===- Sequence.h - Utility for producing sequences of values ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// Provides some synthesis utilities to produce sequences of values. The names
/// are intentionally kept very short as they tend to occur in common and
/// widely used contexts.
///
/// The `seq(A, B)` function produces a sequence of values from `A` to up to
/// (but not including) `B`, i.e., [`A`, `B`), that can be safely iterated over.
/// `seq` supports both integral (e.g., `int`, `char`, `uint32_t`) and enum
/// types. `seq_inclusive(A, B)` produces a sequence of values from `A` to `B`,
/// including `B`.
///
/// Examples with integral types:
/// ```
/// for (int x : seq(0, 3))
///   outs() << x << " ";
/// ```
///
/// Prints: `0 1 2 `.
///
/// ```
/// for (int x : seq_inclusive(0, 3))
///   outs() << x << " ";
/// ```
///
/// Prints: `0 1 2 3 `.
///
/// Similar to `seq` and `seq_inclusive`, the `enum_seq` and
/// `enum_seq_inclusive` functions produce sequences of enum values that can be
/// iterated over.
/// To enable iteration with enum types, you need to either mark enums as safe
/// to iterate on by specializing `enum_iteration_traits`, or opt into
/// potentially unsafe iteration at every callsite by passing
/// `force_iteration_on_noniterable_enum`.
///
/// Examples with enum types:
/// ```
/// namespace X {
///   enum class MyEnum : unsigned {A = 0, B, C};
/// } // namespace X
///
/// template <> struct enum_iteration_traits<X::MyEnum> {
///   static contexpr bool is_iterable = true;
/// };
///
/// class MyClass {
/// public:
///   enum Safe { D = 3, E, F };
///   enum MaybeUnsafe { G = 1, H = 2, I = 4 };
/// };
///
/// template <> struct enum_iteration_traits<MyClass::Safe> {
///   static contexpr bool is_iterable = true;
/// };
/// ```
///
/// ```
///   for (auto v : enum_seq(MyClass::Safe::D, MyClass::Safe::F))
///     outs() << int(v) << " ";
/// ```
///
/// Prints: `3 4 `.
///
/// ```
///   for (auto v : enum_seq(MyClass::MaybeUnsafe::H, MyClass::MaybeUnsafe::I,
///                          force_iteration_on_noniterable_enum))
///     outs() << int(v) << " ";
/// ```
///
/// Prints: `2 3 `.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_SEQUENCE_H
#define LLVM_ADT_SEQUENCE_H

#include <cassert>     // assert
#include <iterator>    // std::random_access_iterator_tag
#include <limits>      // std::numeric_limits
#include <type_traits> // std::is_integral, std::is_enum, std::underlying_type,
                       // std::enable_if

#include "llvm/Support/MathExtras.h" // AddOverflow / SubOverflow

namespace llvm {

// Enum traits that marks enums as safe or unsafe to iterate over.
// By default, enum types are *not* considered safe for iteration.
// To allow iteration for your enum type, provide a specialization with
// `is_iterable` set to `true` in the `llvm` namespace.
// Alternatively, you can pass the `force_iteration_on_noniterable_enum` tag
// to `enum_seq` or `enum_seq_inclusive`.
template <typename EnumT> struct enum_iteration_traits {
  static constexpr bool is_iterable = false;
};

struct force_iteration_on_noniterable_enum_t {
  explicit force_iteration_on_noniterable_enum_t() = default;
};

inline constexpr force_iteration_on_noniterable_enum_t
    force_iteration_on_noniterable_enum;

namespace detail {

// Returns whether a value of type U can be represented with type T.
template <typename T, typename U> bool canTypeFitValue(const U Value) {
  const intmax_t BotT = intmax_t(std::numeric_limits<T>::min());
  const intmax_t BotU = intmax_t(std::numeric_limits<U>::min());
  const uintmax_t TopT = uintmax_t(std::numeric_limits<T>::max());
  const uintmax_t TopU = uintmax_t(std::numeric_limits<U>::max());
  return !((BotT > BotU && Value < static_cast<U>(BotT)) ||
           (TopT < TopU && Value > static_cast<U>(TopT)));
}

// An integer type that asserts when:
// - constructed from a value that doesn't fit into intmax_t,
// - casted to a type that cannot hold the current value,
// - its internal representation overflows.
struct CheckedInt {
  // Integral constructor, asserts if Value cannot be represented as intmax_t.
  template <typename Integral,
            std::enable_if_t<std::is_integral<Integral>::value, bool> = 0>
  static CheckedInt from(Integral FromValue) {
    if (!canTypeFitValue<intmax_t>(FromValue))
      assertOutOfBounds();
    CheckedInt Result;
    Result.Value = static_cast<intmax_t>(FromValue);
    return Result;
  }

  // Enum constructor, asserts if Value cannot be represented as intmax_t.
  template <typename Enum,
            std::enable_if_t<std::is_enum<Enum>::value, bool> = 0>
  static CheckedInt from(Enum FromValue) {
    using type = std::underlying_type_t<Enum>;
    return from<type>(static_cast<type>(FromValue));
  }

  // Equality
  bool operator==(const CheckedInt &O) const { return Value == O.Value; }
  bool operator!=(const CheckedInt &O) const { return Value != O.Value; }

  CheckedInt operator+(intmax_t Offset) const {
    CheckedInt Result;
    if (AddOverflow(Value, Offset, Result.Value))
      assertOutOfBounds();
    return Result;
  }

  intmax_t operator-(CheckedInt Other) const {
    intmax_t Result;
    if (SubOverflow(Value, Other.Value, Result))
      assertOutOfBounds();
    return Result;
  }

  // Convert to integral, asserts if Value cannot be represented as Integral.
  template <typename Integral,
            std::enable_if_t<std::is_integral<Integral>::value, bool> = 0>
  Integral to() const {
    if (!canTypeFitValue<Integral>(Value))
      assertOutOfBounds();
    return static_cast<Integral>(Value);
  }

  // Convert to enum, asserts if Value cannot be represented as Enum's
  // underlying type.
  template <typename Enum,
            std::enable_if_t<std::is_enum<Enum>::value, bool> = 0>
  Enum to() const {
    using type = std::underlying_type_t<Enum>;
    return Enum(to<type>());
  }

private:
  static void assertOutOfBounds() { assert(false && "Out of bounds"); }

  intmax_t Value;
};

template <typename T, bool IsReverse> struct SafeIntIterator {
  using iterator_category = std::random_access_iterator_tag;
  using value_type = T;
  using difference_type = intmax_t;
  using pointer = T *;
  using reference = value_type; // The iterator does not reference memory.

  // Construct from T.
  explicit SafeIntIterator(T Value) : SI(CheckedInt::from<T>(Value)) {}
  // Construct from other direction.
  SafeIntIterator(const SafeIntIterator<T, !IsReverse> &O) : SI(O.SI) {}

  // Dereference
  reference operator*() const { return SI.to<T>(); }
  // Indexing
  reference operator[](intmax_t Offset) const { return *(*this + Offset); }

  // Can be compared for equivalence using the equality/inequality operators.
  bool operator==(const SafeIntIterator &O) const { return SI == O.SI; }
  bool operator!=(const SafeIntIterator &O) const { return SI != O.SI; }
  // Comparison
  bool operator<(const SafeIntIterator &O) const { return (*this - O) < 0; }
  bool operator>(const SafeIntIterator &O) const { return (*this - O) > 0; }
  bool operator<=(const SafeIntIterator &O) const { return (*this - O) <= 0; }
  bool operator>=(const SafeIntIterator &O) const { return (*this - O) >= 0; }

  // Pre Increment/Decrement
  void operator++() { offset(1); }
  void operator--() { offset(-1); }

  // Post Increment/Decrement
  SafeIntIterator operator++(int) {
    const auto Copy = *this;
    ++*this;
    return Copy;
  }
  SafeIntIterator operator--(int) {
    const auto Copy = *this;
    --*this;
    return Copy;
  }

  // Compound assignment operators
  void operator+=(intmax_t Offset) { offset(Offset); }
  void operator-=(intmax_t Offset) { offset(-Offset); }

  // Arithmetic
  SafeIntIterator operator+(intmax_t Offset) const { return add(Offset); }
  SafeIntIterator operator-(intmax_t Offset) const { return add(-Offset); }

  // Difference
  intmax_t operator-(const SafeIntIterator &O) const {
    return IsReverse ? O.SI - SI : SI - O.SI;
  }

private:
  SafeIntIterator(const CheckedInt &SI) : SI(SI) {}

  static intmax_t getOffset(intmax_t Offset) {
    return IsReverse ? -Offset : Offset;
  }

  CheckedInt add(intmax_t Offset) const { return SI + getOffset(Offset); }

  void offset(intmax_t Offset) { SI = SI + getOffset(Offset); }

  CheckedInt SI;

  // To allow construction from the other direction.
  template <typename, bool> friend struct SafeIntIterator;
};

} // namespace detail

template <typename T> struct iota_range {
  using value_type = T;
  using reference = T &;
  using const_reference = const T &;
  using iterator = detail::SafeIntIterator<value_type, false>;
  using const_iterator = iterator;
  using reverse_iterator = detail::SafeIntIterator<value_type, true>;
  using const_reverse_iterator = reverse_iterator;
  using difference_type = intmax_t;
  using size_type = std::size_t;

  explicit iota_range(T Begin, T End, bool Inclusive)
      : BeginValue(Begin), PastEndValue(End) {
    assert(Begin <= End && "Begin must be less or equal to End.");
    if (Inclusive)
      ++PastEndValue;
  }

  size_t size() const { return PastEndValue - BeginValue; }
  bool empty() const { return BeginValue == PastEndValue; }

  auto begin() const { return const_iterator(BeginValue); }
  auto end() const { return const_iterator(PastEndValue); }

  auto rbegin() const { return const_reverse_iterator(PastEndValue - 1); }
  auto rend() const { return const_reverse_iterator(BeginValue - 1); }

private:
  static_assert(std::is_integral<T>::value || std::is_enum<T>::value,
                "T must be an integral or enum type");
  static_assert(std::is_same<T, std::remove_cv_t<T>>::value,
                "T must not be const nor volatile");

  iterator BeginValue;
  iterator PastEndValue;
};

/// Iterate over an integral type from Begin up to - but not including - End.
/// Note: Begin and End values have to be within [INTMAX_MIN, INTMAX_MAX] for
/// forward iteration (resp. [INTMAX_MIN + 1, INTMAX_MAX] for reverse
/// iteration).
template <typename T, typename = std::enable_if_t<std::is_integral<T>::value &&
                                                  !std::is_enum<T>::value>>
auto seq(T Begin, T End) {
  return iota_range<T>(Begin, End, false);
}

/// Iterate over an integral type from Begin to End inclusive.
/// Note: Begin and End values have to be within [INTMAX_MIN, INTMAX_MAX - 1]
/// for forward iteration (resp. [INTMAX_MIN + 1, INTMAX_MAX - 1] for reverse
/// iteration).
template <typename T, typename = std::enable_if_t<std::is_integral<T>::value &&
                                                  !std::is_enum<T>::value>>
auto seq_inclusive(T Begin, T End) {
  return iota_range<T>(Begin, End, true);
}

/// Iterate over an enum type from Begin up to - but not including - End.
/// Note: `enum_seq` will generate each consecutive value, even if no
/// enumerator with that value exists.
/// Note: Begin and End values have to be within [INTMAX_MIN, INTMAX_MAX] for
/// forward iteration (resp. [INTMAX_MIN + 1, INTMAX_MAX] for reverse
/// iteration).
template <typename EnumT,
          typename = std::enable_if_t<std::is_enum<EnumT>::value>>
auto enum_seq(EnumT Begin, EnumT End) {
  static_assert(enum_iteration_traits<EnumT>::is_iterable,
                "Enum type is not marked as iterable.");
  return iota_range<EnumT>(Begin, End, false);
}

/// Iterate over an enum type from Begin up to - but not including - End, even
/// when `EnumT` is not marked as safely iterable by `enum_iteration_traits`.
/// Note: `enum_seq` will generate each consecutive value, even if no
/// enumerator with that value exists.
/// Note: Begin and End values have to be within [INTMAX_MIN, INTMAX_MAX] for
/// forward iteration (resp. [INTMAX_MIN + 1, INTMAX_MAX] for reverse
/// iteration).
template <typename EnumT,
          typename = std::enable_if_t<std::is_enum<EnumT>::value>>
auto enum_seq(EnumT Begin, EnumT End, force_iteration_on_noniterable_enum_t) {
  return iota_range<EnumT>(Begin, End, false);
}

/// Iterate over an enum type from Begin to End inclusive.
/// Note: `enum_seq_inclusive` will generate each consecutive value, even if no
/// enumerator with that value exists.
/// Note: Begin and End values have to be within [INTMAX_MIN, INTMAX_MAX - 1]
/// for forward iteration (resp. [INTMAX_MIN + 1, INTMAX_MAX - 1] for reverse
/// iteration).
template <typename EnumT,
          typename = std::enable_if_t<std::is_enum<EnumT>::value>>
auto enum_seq_inclusive(EnumT Begin, EnumT End) {
  static_assert(enum_iteration_traits<EnumT>::is_iterable,
                "Enum type is not marked as iterable.");
  return iota_range<EnumT>(Begin, End, true);
}

/// Iterate over an enum type from Begin to End inclusive, even when `EnumT`
/// is not marked as safely iterable by `enum_iteration_traits`.
/// Note: `enum_seq_inclusive` will generate each consecutive value, even if no
/// enumerator with that value exists.
/// Note: Begin and End values have to be within [INTMAX_MIN, INTMAX_MAX - 1]
/// for forward iteration (resp. [INTMAX_MIN + 1, INTMAX_MAX - 1] for reverse
/// iteration).
template <typename EnumT,
          typename = std::enable_if_t<std::is_enum<EnumT>::value>>
auto enum_seq_inclusive(EnumT Begin, EnumT End,
                        force_iteration_on_noniterable_enum_t) {
  return iota_range<EnumT>(Begin, End, true);
}

} // end namespace llvm

#endif // LLVM_ADT_SEQUENCE_H
PKiwFZ���ag#g#ADT/FloatingPointMode.hnu�[���//===- llvm/Support/FloatingPointMode.h -------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// Utilities for dealing with flags related to floating point properties and
/// mode controls.
///
//===----------------------------------------------------------------------===/

#ifndef LLVM_ADT_FLOATINGPOINTMODE_H
#define LLVM_ADT_FLOATINGPOINTMODE_H

#include "llvm/ADT/BitmaskEnum.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/raw_ostream.h"

namespace llvm {

/// Rounding mode.
///
/// Enumerates supported rounding modes, as well as some special values. The set
/// of the modes must agree with IEEE-754, 4.3.1 and 4.3.2. The constants
/// assigned to the IEEE rounding modes must agree with the values used by
/// FLT_ROUNDS (C11, 5.2.4.2.2p8).
///
/// This value is packed into bitfield in some cases, including \c FPOptions, so
/// the rounding mode values and the special value \c Dynamic must fit into the
/// the bit field (now - 3 bits). The value \c Invalid is used only in values
/// returned by intrinsics to indicate errors, it should never be stored as
/// rounding mode value, so it does not need to fit the bit fields.
///
enum class RoundingMode : int8_t {
  // Rounding mode defined in IEEE-754.
  TowardZero        = 0,    ///< roundTowardZero.
  NearestTiesToEven = 1,    ///< roundTiesToEven.
  TowardPositive    = 2,    ///< roundTowardPositive.
  TowardNegative    = 3,    ///< roundTowardNegative.
  NearestTiesToAway = 4,    ///< roundTiesToAway.

  // Special values.
  Dynamic = 7,    ///< Denotes mode unknown at compile time.
  Invalid = -1    ///< Denotes invalid value.
};

/// Returns text representation of the given rounding mode.
inline StringRef spell(RoundingMode RM) {
  switch (RM) {
  case RoundingMode::TowardZero: return "towardzero";
  case RoundingMode::NearestTiesToEven: return "tonearest";
  case RoundingMode::TowardPositive: return "upward";
  case RoundingMode::TowardNegative: return "downward";
  case RoundingMode::NearestTiesToAway: return "tonearestaway";
  case RoundingMode::Dynamic: return "dynamic";
  default: return "invalid";
  }
}

inline raw_ostream &operator << (raw_ostream &OS, RoundingMode RM) {
  OS << spell(RM);
  return OS;
}

/// Represent subnormal handling kind for floating point instruction inputs and
/// outputs.
struct DenormalMode {
  /// Represent handled modes for denormal (aka subnormal) modes in the floating
  /// point environment.
  enum DenormalModeKind : int8_t {
    Invalid = -1,

    /// IEEE-754 denormal numbers preserved.
    IEEE,

    /// The sign of a flushed-to-zero number is preserved in the sign of 0
    PreserveSign,

    /// Denormals are flushed to positive zero.
    PositiveZero,

    /// Denormals have unknown treatment.
    Dynamic
  };

  /// Denormal flushing mode for floating point instruction results in the
  /// default floating point environment.
  DenormalModeKind Output = DenormalModeKind::Invalid;

  /// Denormal treatment kind for floating point instruction inputs in the
  /// default floating-point environment. If this is not DenormalModeKind::IEEE,
  /// floating-point instructions implicitly treat the input value as 0.
  DenormalModeKind Input = DenormalModeKind::Invalid;

  constexpr DenormalMode() = default;
  constexpr DenormalMode(DenormalModeKind Out, DenormalModeKind In) :
    Output(Out), Input(In) {}


  static constexpr DenormalMode getInvalid() {
    return DenormalMode(DenormalModeKind::Invalid, DenormalModeKind::Invalid);
  }

  /// Return the assumed default mode for a function without denormal-fp-math.
  static constexpr DenormalMode getDefault() {
    return getIEEE();
  }

  static constexpr DenormalMode getIEEE() {
    return DenormalMode(DenormalModeKind::IEEE, DenormalModeKind::IEEE);
  }

  static constexpr DenormalMode getPreserveSign() {
    return DenormalMode(DenormalModeKind::PreserveSign,
                        DenormalModeKind::PreserveSign);
  }

  static constexpr DenormalMode getPositiveZero() {
    return DenormalMode(DenormalModeKind::PositiveZero,
                        DenormalModeKind::PositiveZero);
  }

  static constexpr DenormalMode getDynamic() {
    return DenormalMode(DenormalModeKind::Dynamic, DenormalModeKind::Dynamic);
  }

  bool operator==(DenormalMode Other) const {
    return Output == Other.Output && Input == Other.Input;
  }

  bool operator!=(DenormalMode Other) const {
    return !(*this == Other);
  }

  bool isSimple() const {
    return Input == Output;
  }

  bool isValid() const {
    return Output != DenormalModeKind::Invalid &&
           Input != DenormalModeKind::Invalid;
  }

  /// Return true if input denormals must be implicitly treated as 0.
  constexpr bool inputsAreZero() const {
    return Input == DenormalModeKind::PreserveSign ||
           Input == DenormalModeKind::PositiveZero;
  }

  /// Return true if output denormals should be flushed to 0.
  constexpr bool outputsAreZero() const {
    return Output == DenormalModeKind::PreserveSign ||
           Output == DenormalModeKind::PositiveZero;
  }

  /// Get the effective denormal mode if the mode if this caller calls into a
  /// function with \p Callee. This promotes dynamic modes to the mode of the
  /// caller.
  DenormalMode mergeCalleeMode(DenormalMode Callee) const {
    DenormalMode MergedMode = Callee;
    if (Callee.Input == DenormalMode::Dynamic)
      MergedMode.Input = Input;
    if (Callee.Output == DenormalMode::Dynamic)
      MergedMode.Output = Output;
    return MergedMode;
  }

  inline void print(raw_ostream &OS) const;

  inline std::string str() const {
    std::string storage;
    raw_string_ostream OS(storage);
    print(OS);
    return OS.str();
  }
};

inline raw_ostream& operator<<(raw_ostream &OS, DenormalMode Mode) {
  Mode.print(OS);
  return OS;
}

/// Parse the expected names from the denormal-fp-math attribute.
inline DenormalMode::DenormalModeKind
parseDenormalFPAttributeComponent(StringRef Str) {
  // Assume ieee on unspecified attribute.
  return StringSwitch<DenormalMode::DenormalModeKind>(Str)
      .Cases("", "ieee", DenormalMode::IEEE)
      .Case("preserve-sign", DenormalMode::PreserveSign)
      .Case("positive-zero", DenormalMode::PositiveZero)
      .Case("dynamic", DenormalMode::Dynamic)
      .Default(DenormalMode::Invalid);
}

/// Return the name used for the denormal handling mode used by the the
/// expected names from the denormal-fp-math attribute.
inline StringRef denormalModeKindName(DenormalMode::DenormalModeKind Mode) {
  switch (Mode) {
  case DenormalMode::IEEE:
    return "ieee";
  case DenormalMode::PreserveSign:
    return "preserve-sign";
  case DenormalMode::PositiveZero:
    return "positive-zero";
  case DenormalMode::Dynamic:
    return "dynamic";
  default:
    return "";
  }
}

/// Returns the denormal mode to use for inputs and outputs.
inline DenormalMode parseDenormalFPAttribute(StringRef Str) {
  StringRef OutputStr, InputStr;
  std::tie(OutputStr, InputStr) = Str.split(',');

  DenormalMode Mode;
  Mode.Output = parseDenormalFPAttributeComponent(OutputStr);

  // Maintain compatability with old form of the attribute which only specified
  // one component.
  Mode.Input = InputStr.empty() ? Mode.Output  :
               parseDenormalFPAttributeComponent(InputStr);

  return Mode;
}

void DenormalMode::print(raw_ostream &OS) const {
  OS << denormalModeKindName(Output) << ',' << denormalModeKindName(Input);
}

/// Floating-point class tests, supported by 'is_fpclass' intrinsic. Actual
/// test may be an OR combination of basic tests.
enum FPClassTest : unsigned {
  fcNone = 0,

  fcSNan = 0x0001,
  fcQNan = 0x0002,
  fcNegInf = 0x0004,
  fcNegNormal = 0x0008,
  fcNegSubnormal = 0x0010,
  fcNegZero = 0x0020,
  fcPosZero = 0x0040,
  fcPosSubnormal = 0x0080,
  fcPosNormal = 0x0100,
  fcPosInf = 0x0200,

  fcNan = fcSNan | fcQNan,
  fcInf = fcPosInf | fcNegInf,
  fcNormal = fcPosNormal | fcNegNormal,
  fcSubnormal = fcPosSubnormal | fcNegSubnormal,
  fcZero = fcPosZero | fcNegZero,
  fcPosFinite = fcPosNormal | fcPosSubnormal | fcPosZero,
  fcNegFinite = fcNegNormal | fcNegSubnormal | fcNegZero,
  fcFinite = fcPosFinite | fcNegFinite,
  fcPositive = fcPosFinite | fcPosInf,
  fcNegative = fcNegFinite | fcNegInf,

  fcAllFlags = fcNan | fcInf | fcFinite,
};

LLVM_DECLARE_ENUM_AS_BITMASK(FPClassTest, /* LargestValue */ fcPosInf);

/// Return the test mask which returns true if the value's sign bit is flipped.
FPClassTest fneg(FPClassTest Mask);

/// Return the test mask which returns true if the value's sign bit is cleared.
FPClassTest fabs(FPClassTest Mask);

/// Write a human readable form of \p Mask to \p OS
raw_ostream &operator<<(raw_ostream &OS, FPClassTest Mask);

} // namespace llvm

#endif // LLVM_ADT_FLOATINGPOINTMODE_H
PKiwFZFV%��.�.ADT/SetVector.hnu�[���//===- llvm/ADT/SetVector.h - Set with insert order iteration ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file implements a set that has insertion order iteration
/// characteristics. This is useful for keeping a set of things that need to be
/// visited later but in a deterministic order (insertion order). The interface
/// is purposefully minimal.
///
/// This file defines SetVector and SmallSetVector, which performs no
/// allocations if the SetVector has less than a certain number of elements.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_SETVECTOR_H
#define LLVM_ADT_SETVECTOR_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Compiler.h"
#include <cassert>
#include <iterator>
#include <vector>

namespace llvm {

/// A vector that has set insertion semantics.
///
/// This adapter class provides a way to keep a set of things that also has the
/// property of a deterministic iteration order. The order of iteration is the
/// order of insertion.
///
/// The key and value types are derived from the Set and Vector types
/// respectively. This allows the vector-type operations and set-type operations
/// to have different types. In particular, this is useful when storing pointers
/// as "Foo *" values but looking them up as "const Foo *" keys.
///
/// No constraint is placed on the key and value types, although it is assumed
/// that value_type can be converted into key_type for insertion. Users must be
/// aware of any loss of information in this conversion. For example, setting
/// value_type to float and key_type to int can produce very surprising results,
/// but it is not explicitly disallowed.
///
/// The parameter N specifies the "small" size of the container, which is the
/// number of elements upto which a linear scan over the Vector will be used
/// when searching for elements instead of checking Set, due to it being better
/// for performance. A value of 0 means that this mode of operation is not used,
/// and is the default value.
template <typename T, typename Vector = std::vector<T>,
          typename Set = DenseSet<T>, unsigned N = 0>
class SetVector {
  // Much like in SmallPtrSet, this value should not be too high to prevent
  // excessively long linear scans from occuring.
  static_assert(N <= 32, "Small size should be less than or equal to 32!");

public:
  using value_type = typename Vector::value_type;
  using key_type = typename Set::key_type;
  using reference = value_type &;
  using const_reference = const value_type &;
  using set_type = Set;
  using vector_type = Vector;
  using iterator = typename vector_type::const_iterator;
  using const_iterator = typename vector_type::const_iterator;
  using reverse_iterator = typename vector_type::const_reverse_iterator;
  using const_reverse_iterator = typename vector_type::const_reverse_iterator;
  using size_type = typename vector_type::size_type;

  /// Construct an empty SetVector
  SetVector() = default;

  /// Initialize a SetVector with a range of elements
  template<typename It>
  SetVector(It Start, It End) {
    insert(Start, End);
  }

  ArrayRef<value_type> getArrayRef() const { return vector_; }

  /// Clear the SetVector and return the underlying vector.
  Vector takeVector() {
    set_.clear();
    return std::move(vector_);
  }

  /// Determine if the SetVector is empty or not.
  bool empty() const {
    return vector_.empty();
  }

  /// Determine the number of elements in the SetVector.
  size_type size() const {
    return vector_.size();
  }

  /// Get an iterator to the beginning of the SetVector.
  iterator begin() {
    return vector_.begin();
  }

  /// Get a const_iterator to the beginning of the SetVector.
  const_iterator begin() const {
    return vector_.begin();
  }

  /// Get an iterator to the end of the SetVector.
  iterator end() {
    return vector_.end();
  }

  /// Get a const_iterator to the end of the SetVector.
  const_iterator end() const {
    return vector_.end();
  }

  /// Get an reverse_iterator to the end of the SetVector.
  reverse_iterator rbegin() {
    return vector_.rbegin();
  }

  /// Get a const_reverse_iterator to the end of the SetVector.
  const_reverse_iterator rbegin() const {
    return vector_.rbegin();
  }

  /// Get a reverse_iterator to the beginning of the SetVector.
  reverse_iterator rend() {
    return vector_.rend();
  }

  /// Get a const_reverse_iterator to the beginning of the SetVector.
  const_reverse_iterator rend() const {
    return vector_.rend();
  }

  /// Return the first element of the SetVector.
  const value_type &front() const {
    assert(!empty() && "Cannot call front() on empty SetVector!");
    return vector_.front();
  }

  /// Return the last element of the SetVector.
  const value_type &back() const {
    assert(!empty() && "Cannot call back() on empty SetVector!");
    return vector_.back();
  }

  /// Index into the SetVector.
  const_reference operator[](size_type n) const {
    assert(n < vector_.size() && "SetVector access out of range!");
    return vector_[n];
  }

  /// Insert a new element into the SetVector.
  /// \returns true if the element was inserted into the SetVector.
  bool insert(const value_type &X) {
    if constexpr (canBeSmall())
      if (isSmall()) {
        if (llvm::find(vector_, X) == vector_.end()) {
          vector_.push_back(X);
          if (vector_.size() > N)
            makeBig();
          return true;
        }
        return false;
      }

    bool result = set_.insert(X).second;
    if (result)
      vector_.push_back(X);
    return result;
  }

  /// Insert a range of elements into the SetVector.
  template<typename It>
  void insert(It Start, It End) {
    for (; Start != End; ++Start)
      insert(*Start);
  }

  /// Remove an item from the set vector.
  bool remove(const value_type& X) {
    if constexpr (canBeSmall())
      if (isSmall()) {
        typename vector_type::iterator I = find(vector_, X);
        if (I != vector_.end()) {
          vector_.erase(I);
          return true;
        }
        return false;
      }

    if (set_.erase(X)) {
      typename vector_type::iterator I = find(vector_, X);
      assert(I != vector_.end() && "Corrupted SetVector instances!");
      vector_.erase(I);
      return true;
    }
    return false;
  }

  /// Erase a single element from the set vector.
  /// \returns an iterator pointing to the next element that followed the
  /// element erased. This is the end of the SetVector if the last element is
  /// erased.
  iterator erase(const_iterator I) {
    if constexpr (canBeSmall())
      if (isSmall())
        return vector_.erase(I);

    const key_type &V = *I;
    assert(set_.count(V) && "Corrupted SetVector instances!");
    set_.erase(V);
    return vector_.erase(I);
  }

  /// Remove items from the set vector based on a predicate function.
  ///
  /// This is intended to be equivalent to the following code, if we could
  /// write it:
  ///
  /// \code
  ///   V.erase(remove_if(V, P), V.end());
  /// \endcode
  ///
  /// However, SetVector doesn't expose non-const iterators, making any
  /// algorithm like remove_if impossible to use.
  ///
  /// \returns true if any element is removed.
  template <typename UnaryPredicate>
  bool remove_if(UnaryPredicate P) {
    typename vector_type::iterator I = [this, P] {
      if constexpr (canBeSmall())
        if (isSmall())
          return llvm::remove_if(vector_, P);

      return llvm::remove_if(vector_,
                             TestAndEraseFromSet<UnaryPredicate>(P, set_));
    }();

    if (I == vector_.end())
      return false;
    vector_.erase(I, vector_.end());
    return true;
  }

  /// Check if the SetVector contains the given key.
  bool contains(const key_type &key) const {
    if constexpr (canBeSmall())
      if (isSmall())
        return is_contained(vector_, key);

    return set_.find(key) != set_.end();
  }

  /// Count the number of elements of a given key in the SetVector.
  /// \returns 0 if the element is not in the SetVector, 1 if it is.
  size_type count(const key_type &key) const {
    if constexpr (canBeSmall())
      if (isSmall())
        return is_contained(vector_, key);

    return set_.count(key);
  }

  /// Completely clear the SetVector
  void clear() {
    set_.clear();
    vector_.clear();
  }

  /// Remove the last element of the SetVector.
  void pop_back() {
    assert(!empty() && "Cannot remove an element from an empty SetVector!");
    set_.erase(back());
    vector_.pop_back();
  }

  [[nodiscard]] value_type pop_back_val() {
    value_type Ret = back();
    pop_back();
    return Ret;
  }

  bool operator==(const SetVector &that) const {
    return vector_ == that.vector_;
  }

  bool operator!=(const SetVector &that) const {
    return vector_ != that.vector_;
  }

  /// Compute This := This u S, return whether 'This' changed.
  /// TODO: We should be able to use set_union from SetOperations.h, but
  ///       SetVector interface is inconsistent with DenseSet.
  template <class STy>
  bool set_union(const STy &S) {
    bool Changed = false;

    for (typename STy::const_iterator SI = S.begin(), SE = S.end(); SI != SE;
         ++SI)
      if (insert(*SI))
        Changed = true;

    return Changed;
  }

  /// Compute This := This - B
  /// TODO: We should be able to use set_subtract from SetOperations.h, but
  ///       SetVector interface is inconsistent with DenseSet.
  template <class STy>
  void set_subtract(const STy &S) {
    for (typename STy::const_iterator SI = S.begin(), SE = S.end(); SI != SE;
         ++SI)
      remove(*SI);
  }

  void swap(SetVector<T, Vector, Set, N> &RHS) {
    set_.swap(RHS.set_);
    vector_.swap(RHS.vector_);
  }

private:
  /// A wrapper predicate designed for use with std::remove_if.
  ///
  /// This predicate wraps a predicate suitable for use with std::remove_if to
  /// call set_.erase(x) on each element which is slated for removal.
  template <typename UnaryPredicate>
  class TestAndEraseFromSet {
    UnaryPredicate P;
    set_type &set_;

  public:
    TestAndEraseFromSet(UnaryPredicate P, set_type &set_)
        : P(std::move(P)), set_(set_) {}

    template <typename ArgumentT>
    bool operator()(const ArgumentT &Arg) {
      if (P(Arg)) {
        set_.erase(Arg);
        return true;
      }
      return false;
    }
  };

  [[nodiscard]] static constexpr bool canBeSmall() { return N != 0; }

  [[nodiscard]] bool isSmall() const { return set_.empty(); }

  void makeBig() {
    if constexpr (canBeSmall())
      for (const auto &entry : vector_)
        set_.insert(entry);
  }

  set_type set_;         ///< The set.
  vector_type vector_;   ///< The vector.
};

/// A SetVector that performs no allocations if smaller than
/// a certain size.
template <typename T, unsigned N>
class SmallSetVector : public SetVector<T, SmallVector<T, N>, DenseSet<T>, N> {
public:
  SmallSetVector() = default;

  /// Initialize a SmallSetVector with a range of elements
  template<typename It>
  SmallSetVector(It Start, It End) {
    this->insert(Start, End);
  }
};

} // end namespace llvm

namespace std {

/// Implement std::swap in terms of SetVector swap.
template <typename T, typename V, typename S, unsigned N>
inline void swap(llvm::SetVector<T, V, S, N> &LHS,
                 llvm::SetVector<T, V, S, N> &RHS) {
  LHS.swap(RHS);
}

/// Implement std::swap in terms of SmallSetVector swap.
template<typename T, unsigned N>
inline void
swap(llvm::SmallSetVector<T, N> &LHS, llvm::SmallSetVector<T, N> &RHS) {
  LHS.swap(RHS);
}

} // end namespace std

#endif // LLVM_ADT_SETVECTOR_H
PKiwFZ.��_&_&ADT/DenseSet.hnu�[���//===- llvm/ADT/DenseSet.h - Dense probed hash table ------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file defines the DenseSet and SmallDenseSet classes.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_DENSESET_H
#define LLVM_ADT_DENSESET_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/type_traits.h"
#include <cstddef>
#include <initializer_list>
#include <iterator>
#include <utility>

namespace llvm {

namespace detail {

struct DenseSetEmpty {};

// Use the empty base class trick so we can create a DenseMap where the buckets
// contain only a single item.
template <typename KeyT> class DenseSetPair : public DenseSetEmpty {
  KeyT key;

public:
  KeyT &getFirst() { return key; }
  const KeyT &getFirst() const { return key; }
  DenseSetEmpty &getSecond() { return *this; }
  const DenseSetEmpty &getSecond() const { return *this; }
};

/// Base class for DenseSet and DenseSmallSet.
///
/// MapTy should be either
///
///   DenseMap<ValueT, detail::DenseSetEmpty, ValueInfoT,
///            detail::DenseSetPair<ValueT>>
///
/// or the equivalent SmallDenseMap type.  ValueInfoT must implement the
/// DenseMapInfo "concept".
template <typename ValueT, typename MapTy, typename ValueInfoT>
class DenseSetImpl {
  static_assert(sizeof(typename MapTy::value_type) == sizeof(ValueT),
                "DenseMap buckets unexpectedly large!");
  MapTy TheMap;

  template <typename T>
  using const_arg_type_t = typename const_pointer_or_const_ref<T>::type;

public:
  using key_type = ValueT;
  using value_type = ValueT;
  using size_type = unsigned;

  explicit DenseSetImpl(unsigned InitialReserve = 0) : TheMap(InitialReserve) {}

  template <typename InputIt>
  DenseSetImpl(const InputIt &I, const InputIt &E)
      : DenseSetImpl(PowerOf2Ceil(std::distance(I, E))) {
    insert(I, E);
  }

  DenseSetImpl(std::initializer_list<ValueT> Elems)
      : DenseSetImpl(PowerOf2Ceil(Elems.size())) {
    insert(Elems.begin(), Elems.end());
  }

  bool empty() const { return TheMap.empty(); }
  size_type size() const { return TheMap.size(); }
  size_t getMemorySize() const { return TheMap.getMemorySize(); }

  /// Grow the DenseSet so that it has at least Size buckets. Will not shrink
  /// the Size of the set.
  void resize(size_t Size) { TheMap.resize(Size); }

  /// Grow the DenseSet so that it can contain at least \p NumEntries items
  /// before resizing again.
  void reserve(size_t Size) { TheMap.reserve(Size); }

  void clear() {
    TheMap.clear();
  }

  /// Return 1 if the specified key is in the set, 0 otherwise.
  size_type count(const_arg_type_t<ValueT> V) const {
    return TheMap.count(V);
  }

  bool erase(const ValueT &V) {
    return TheMap.erase(V);
  }

  void swap(DenseSetImpl &RHS) { TheMap.swap(RHS.TheMap); }

  // Iterators.

  class ConstIterator;

  class Iterator {
    typename MapTy::iterator I;
    friend class DenseSetImpl;
    friend class ConstIterator;

  public:
    using difference_type = typename MapTy::iterator::difference_type;
    using value_type = ValueT;
    using pointer = value_type *;
    using reference = value_type &;
    using iterator_category = std::forward_iterator_tag;

    Iterator() = default;
    Iterator(const typename MapTy::iterator &i) : I(i) {}

    ValueT &operator*() { return I->getFirst(); }
    const ValueT &operator*() const { return I->getFirst(); }
    ValueT *operator->() { return &I->getFirst(); }
    const ValueT *operator->() const { return &I->getFirst(); }

    Iterator& operator++() { ++I; return *this; }
    Iterator operator++(int) { auto T = *this; ++I; return T; }
    friend bool operator==(const Iterator &X, const Iterator &Y) {
      return X.I == Y.I;
    }
    friend bool operator!=(const Iterator &X, const Iterator &Y) {
      return X.I != Y.I;
    }
  };

  class ConstIterator {
    typename MapTy::const_iterator I;
    friend class DenseSetImpl;
    friend class Iterator;

  public:
    using difference_type = typename MapTy::const_iterator::difference_type;
    using value_type = ValueT;
    using pointer = const value_type *;
    using reference = const value_type &;
    using iterator_category = std::forward_iterator_tag;

    ConstIterator() = default;
    ConstIterator(const Iterator &B) : I(B.I) {}
    ConstIterator(const typename MapTy::const_iterator &i) : I(i) {}

    const ValueT &operator*() const { return I->getFirst(); }
    const ValueT *operator->() const { return &I->getFirst(); }

    ConstIterator& operator++() { ++I; return *this; }
    ConstIterator operator++(int) { auto T = *this; ++I; return T; }
    friend bool operator==(const ConstIterator &X, const ConstIterator &Y) {
      return X.I == Y.I;
    }
    friend bool operator!=(const ConstIterator &X, const ConstIterator &Y) {
      return X.I != Y.I;
    }
  };

  using iterator = Iterator;
  using const_iterator = ConstIterator;

  iterator begin() { return Iterator(TheMap.begin()); }
  iterator end() { return Iterator(TheMap.end()); }

  const_iterator begin() const { return ConstIterator(TheMap.begin()); }
  const_iterator end() const { return ConstIterator(TheMap.end()); }

  iterator find(const_arg_type_t<ValueT> V) { return Iterator(TheMap.find(V)); }
  const_iterator find(const_arg_type_t<ValueT> V) const {
    return ConstIterator(TheMap.find(V));
  }

  /// Check if the set contains the given element.
  bool contains(const_arg_type_t<ValueT> V) const {
    return TheMap.find(V) != TheMap.end();
  }

  /// Alternative version of find() which allows a different, and possibly less
  /// expensive, key type.
  /// The DenseMapInfo is responsible for supplying methods
  /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key type
  /// used.
  template <class LookupKeyT>
  iterator find_as(const LookupKeyT &Val) {
    return Iterator(TheMap.find_as(Val));
  }
  template <class LookupKeyT>
  const_iterator find_as(const LookupKeyT &Val) const {
    return ConstIterator(TheMap.find_as(Val));
  }

  void erase(Iterator I) { return TheMap.erase(I.I); }
  void erase(ConstIterator CI) { return TheMap.erase(CI.I); }

  std::pair<iterator, bool> insert(const ValueT &V) {
    detail::DenseSetEmpty Empty;
    return TheMap.try_emplace(V, Empty);
  }

  std::pair<iterator, bool> insert(ValueT &&V) {
    detail::DenseSetEmpty Empty;
    return TheMap.try_emplace(std::move(V), Empty);
  }

  /// Alternative version of insert that uses a different (and possibly less
  /// expensive) key type.
  template <typename LookupKeyT>
  std::pair<iterator, bool> insert_as(const ValueT &V,
                                      const LookupKeyT &LookupKey) {
    return TheMap.insert_as({V, detail::DenseSetEmpty()}, LookupKey);
  }
  template <typename LookupKeyT>
  std::pair<iterator, bool> insert_as(ValueT &&V, const LookupKeyT &LookupKey) {
    return TheMap.insert_as({std::move(V), detail::DenseSetEmpty()}, LookupKey);
  }

  // Range insertion of values.
  template<typename InputIt>
  void insert(InputIt I, InputIt E) {
    for (; I != E; ++I)
      insert(*I);
  }
};

/// Equality comparison for DenseSet.
///
/// Iterates over elements of LHS confirming that each element is also a member
/// of RHS, and that RHS contains no additional values.
/// Equivalent to N calls to RHS.count. Amortized complexity is linear, worst
/// case is O(N^2) (if every hash collides).
template <typename ValueT, typename MapTy, typename ValueInfoT>
bool operator==(const DenseSetImpl<ValueT, MapTy, ValueInfoT> &LHS,
                const DenseSetImpl<ValueT, MapTy, ValueInfoT> &RHS) {
  if (LHS.size() != RHS.size())
    return false;

  for (auto &E : LHS)
    if (!RHS.count(E))
      return false;

  return true;
}

/// Inequality comparison for DenseSet.
///
/// Equivalent to !(LHS == RHS). See operator== for performance notes.
template <typename ValueT, typename MapTy, typename ValueInfoT>
bool operator!=(const DenseSetImpl<ValueT, MapTy, ValueInfoT> &LHS,
                const DenseSetImpl<ValueT, MapTy, ValueInfoT> &RHS) {
  return !(LHS == RHS);
}

} // end namespace detail

/// Implements a dense probed hash-table based set.
template <typename ValueT, typename ValueInfoT = DenseMapInfo<ValueT>>
class DenseSet : public detail::DenseSetImpl<
                     ValueT, DenseMap<ValueT, detail::DenseSetEmpty, ValueInfoT,
                                      detail::DenseSetPair<ValueT>>,
                     ValueInfoT> {
  using BaseT =
      detail::DenseSetImpl<ValueT,
                           DenseMap<ValueT, detail::DenseSetEmpty, ValueInfoT,
                                    detail::DenseSetPair<ValueT>>,
                           ValueInfoT>;

public:
  using BaseT::BaseT;
};

/// Implements a dense probed hash-table based set with some number of buckets
/// stored inline.
template <typename ValueT, unsigned InlineBuckets = 4,
          typename ValueInfoT = DenseMapInfo<ValueT>>
class SmallDenseSet
    : public detail::DenseSetImpl<
          ValueT, SmallDenseMap<ValueT, detail::DenseSetEmpty, InlineBuckets,
                                ValueInfoT, detail::DenseSetPair<ValueT>>,
          ValueInfoT> {
  using BaseT = detail::DenseSetImpl<
      ValueT, SmallDenseMap<ValueT, detail::DenseSetEmpty, InlineBuckets,
                            ValueInfoT, detail::DenseSetPair<ValueT>>,
      ValueInfoT>;

public:
  using BaseT::BaseT;
};

} // end namespace llvm

#endif // LLVM_ADT_DENSESET_H
PKiwFZ�]뫽,�,ADT/PostOrderIterator.hnu�[���//===- llvm/ADT/PostOrderIterator.h - PostOrder iterator --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file builds on the ADT/GraphTraits.h file to build a generic graph
/// post order iterator.  This should work over any graph type that has a
/// GraphTraits specialization.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_POSTORDERITERATOR_H
#define LLVM_ADT_POSTORDERITERATOR_H

#include "llvm/ADT/GraphTraits.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/iterator_range.h"
#include <iterator>
#include <optional>
#include <set>
#include <utility>
#include <vector>

namespace llvm {

// The po_iterator_storage template provides access to the set of already
// visited nodes during the po_iterator's depth-first traversal.
//
// The default implementation simply contains a set of visited nodes, while
// the External=true version uses a reference to an external set.
//
// It is possible to prune the depth-first traversal in several ways:
//
// - When providing an external set that already contains some graph nodes,
//   those nodes won't be visited again. This is useful for restarting a
//   post-order traversal on a graph with nodes that aren't dominated by a
//   single node.
//
// - By providing a custom SetType class, unwanted graph nodes can be excluded
//   by having the insert() function return false. This could for example
//   confine a CFG traversal to blocks in a specific loop.
//
// - Finally, by specializing the po_iterator_storage template itself, graph
//   edges can be pruned by returning false in the insertEdge() function. This
//   could be used to remove loop back-edges from the CFG seen by po_iterator.
//
// A specialized po_iterator_storage class can observe both the pre-order and
// the post-order. The insertEdge() function is called in a pre-order, while
// the finishPostorder() function is called just before the po_iterator moves
// on to the next node.

/// Default po_iterator_storage implementation with an internal set object.
template<class SetType, bool External>
class po_iterator_storage {
  SetType Visited;

public:
  // Return true if edge destination should be visited.
  template <typename NodeRef>
  bool insertEdge(std::optional<NodeRef> From, NodeRef To) {
    return Visited.insert(To).second;
  }

  // Called after all children of BB have been visited.
  template <typename NodeRef> void finishPostorder(NodeRef BB) {}
};

/// Specialization of po_iterator_storage that references an external set.
template<class SetType>
class po_iterator_storage<SetType, true> {
  SetType &Visited;

public:
  po_iterator_storage(SetType &VSet) : Visited(VSet) {}
  po_iterator_storage(const po_iterator_storage &S) : Visited(S.Visited) {}

  // Return true if edge destination should be visited, called with From = 0 for
  // the root node.
  // Graph edges can be pruned by specializing this function.
  template <class NodeRef>
  bool insertEdge(std::optional<NodeRef> From, NodeRef To) {
    return Visited.insert(To).second;
  }

  // Called after all children of BB have been visited.
  template <class NodeRef> void finishPostorder(NodeRef BB) {}
};

template <class GraphT,
          class SetType = SmallPtrSet<typename GraphTraits<GraphT>::NodeRef, 8>,
          bool ExtStorage = false, class GT = GraphTraits<GraphT>>
class po_iterator : public po_iterator_storage<SetType, ExtStorage> {
public:
  using iterator_category = std::forward_iterator_tag;
  using value_type = typename GT::NodeRef;
  using difference_type = std::ptrdiff_t;
  using pointer = value_type *;
  using reference = const value_type &;

private:
  using NodeRef = typename GT::NodeRef;
  using ChildItTy = typename GT::ChildIteratorType;

  /// Used to maintain the ordering.
  /// First element is basic block pointer, second is iterator for the next
  /// child to visit, third is the end iterator.
  SmallVector<std::tuple<NodeRef, ChildItTy, ChildItTy>, 8> VisitStack;

  po_iterator(NodeRef BB) {
    this->insertEdge(std::optional<NodeRef>(), BB);
    VisitStack.emplace_back(BB, GT::child_begin(BB), GT::child_end(BB));
    traverseChild();
  }

  po_iterator() = default; // End is when stack is empty.

  po_iterator(NodeRef BB, SetType &S)
      : po_iterator_storage<SetType, ExtStorage>(S) {
    if (this->insertEdge(std::optional<NodeRef>(), BB)) {
      VisitStack.emplace_back(BB, GT::child_begin(BB), GT::child_end(BB));
      traverseChild();
    }
  }

  po_iterator(SetType &S)
      : po_iterator_storage<SetType, ExtStorage>(S) {
  } // End is when stack is empty.

  void traverseChild() {
    while (true) {
      auto &Entry = VisitStack.back();
      if (std::get<1>(Entry) == std::get<2>(Entry))
        break;
      NodeRef BB = *std::get<1>(Entry)++;
      if (this->insertEdge(std::optional<NodeRef>(std::get<0>(Entry)), BB)) {
        // If the block is not visited...
        VisitStack.emplace_back(BB, GT::child_begin(BB), GT::child_end(BB));
      }
    }
  }

public:
  // Provide static "constructors"...
  static po_iterator begin(const GraphT &G) {
    return po_iterator(GT::getEntryNode(G));
  }
  static po_iterator end(const GraphT &G) { return po_iterator(); }

  static po_iterator begin(const GraphT &G, SetType &S) {
    return po_iterator(GT::getEntryNode(G), S);
  }
  static po_iterator end(const GraphT &G, SetType &S) { return po_iterator(S); }

  bool operator==(const po_iterator &x) const {
    return VisitStack == x.VisitStack;
  }
  bool operator!=(const po_iterator &x) const { return !(*this == x); }

  reference operator*() const { return std::get<0>(VisitStack.back()); }

  // This is a nonstandard operator-> that dereferences the pointer an extra
  // time... so that you can actually call methods ON the BasicBlock, because
  // the contained type is a pointer.  This allows BBIt->getTerminator() f.e.
  //
  NodeRef operator->() const { return **this; }

  po_iterator &operator++() { // Preincrement
    this->finishPostorder(std::get<0>(VisitStack.back()));
    VisitStack.pop_back();
    if (!VisitStack.empty())
      traverseChild();
    return *this;
  }

  po_iterator operator++(int) { // Postincrement
    po_iterator tmp = *this;
    ++*this;
    return tmp;
  }
};

// Provide global constructors that automatically figure out correct types...
//
template <class T>
po_iterator<T> po_begin(const T &G) { return po_iterator<T>::begin(G); }
template <class T>
po_iterator<T> po_end  (const T &G) { return po_iterator<T>::end(G); }

template <class T> iterator_range<po_iterator<T>> post_order(const T &G) {
  return make_range(po_begin(G), po_end(G));
}

// Provide global definitions of external postorder iterators...
template <class T, class SetType = std::set<typename GraphTraits<T>::NodeRef>>
struct po_ext_iterator : public po_iterator<T, SetType, true> {
  po_ext_iterator(const po_iterator<T, SetType, true> &V) :
  po_iterator<T, SetType, true>(V) {}
};

template<class T, class SetType>
po_ext_iterator<T, SetType> po_ext_begin(T G, SetType &S) {
  return po_ext_iterator<T, SetType>::begin(G, S);
}

template<class T, class SetType>
po_ext_iterator<T, SetType> po_ext_end(T G, SetType &S) {
  return po_ext_iterator<T, SetType>::end(G, S);
}

template <class T, class SetType>
iterator_range<po_ext_iterator<T, SetType>> post_order_ext(const T &G, SetType &S) {
  return make_range(po_ext_begin(G, S), po_ext_end(G, S));
}

// Provide global definitions of inverse post order iterators...
template <class T, class SetType = std::set<typename GraphTraits<T>::NodeRef>,
          bool External = false>
struct ipo_iterator : public po_iterator<Inverse<T>, SetType, External> {
  ipo_iterator(const po_iterator<Inverse<T>, SetType, External> &V) :
     po_iterator<Inverse<T>, SetType, External> (V) {}
};

template <class T>
ipo_iterator<T> ipo_begin(const T &G) {
  return ipo_iterator<T>::begin(G);
}

template <class T>
ipo_iterator<T> ipo_end(const T &G){
  return ipo_iterator<T>::end(G);
}

template <class T>
iterator_range<ipo_iterator<T>> inverse_post_order(const T &G) {
  return make_range(ipo_begin(G), ipo_end(G));
}

// Provide global definitions of external inverse postorder iterators...
template <class T, class SetType = std::set<typename GraphTraits<T>::NodeRef>>
struct ipo_ext_iterator : public ipo_iterator<T, SetType, true> {
  ipo_ext_iterator(const ipo_iterator<T, SetType, true> &V) :
    ipo_iterator<T, SetType, true>(V) {}
  ipo_ext_iterator(const po_iterator<Inverse<T>, SetType, true> &V) :
    ipo_iterator<T, SetType, true>(V) {}
};

template <class T, class SetType>
ipo_ext_iterator<T, SetType> ipo_ext_begin(const T &G, SetType &S) {
  return ipo_ext_iterator<T, SetType>::begin(G, S);
}

template <class T, class SetType>
ipo_ext_iterator<T, SetType> ipo_ext_end(const T &G, SetType &S) {
  return ipo_ext_iterator<T, SetType>::end(G, S);
}

template <class T, class SetType>
iterator_range<ipo_ext_iterator<T, SetType>>
inverse_post_order_ext(const T &G, SetType &S) {
  return make_range(ipo_ext_begin(G, S), ipo_ext_end(G, S));
}

//===--------------------------------------------------------------------===//
// Reverse Post Order CFG iterator code
//===--------------------------------------------------------------------===//
//
// This is used to visit basic blocks in a method in reverse post order.  This
// class is awkward to use because I don't know a good incremental algorithm to
// computer RPO from a graph.  Because of this, the construction of the
// ReversePostOrderTraversal object is expensive (it must walk the entire graph
// with a postorder iterator to build the data structures).  The moral of this
// story is: Don't create more ReversePostOrderTraversal classes than necessary.
//
// Because it does the traversal in its constructor, it won't invalidate when
// BasicBlocks are removed, *but* it may contain erased blocks. Some places
// rely on this behavior (i.e. GVN).
//
// This class should be used like this:
// {
//   ReversePostOrderTraversal<Function*> RPOT(FuncPtr); // Expensive to create
//   for (rpo_iterator I = RPOT.begin(); I != RPOT.end(); ++I) {
//      ...
//   }
//   for (rpo_iterator I = RPOT.begin(); I != RPOT.end(); ++I) {
//      ...
//   }
// }
//

template<class GraphT, class GT = GraphTraits<GraphT>>
class ReversePostOrderTraversal {
  using NodeRef = typename GT::NodeRef;

  using VecTy = SmallVector<NodeRef, 8>;
  VecTy Blocks; // Block list in normal PO order

  void Initialize(const GraphT &G) {
    std::copy(po_begin(G), po_end(G), std::back_inserter(Blocks));
  }

public:
  using rpo_iterator = typename VecTy::reverse_iterator;
  using const_rpo_iterator = typename VecTy::const_reverse_iterator;

  ReversePostOrderTraversal(const GraphT &G) { Initialize(G); }

  // Because we want a reverse post order, use reverse iterators from the vector
  rpo_iterator begin() { return Blocks.rbegin(); }
  const_rpo_iterator begin() const { return Blocks.rbegin(); }
  rpo_iterator end() { return Blocks.rend(); }
  const_rpo_iterator end() const { return Blocks.rend(); }
};

} // end namespace llvm

#endif // LLVM_ADT_POSTORDERITERATOR_H
PKiwFZD�Mn.n.ADT/ilist.hnu�[���//==-- llvm/ADT/ilist.h - Intrusive Linked List Template ---------*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file defines classes to implement an intrusive doubly linked list class
/// (i.e. each node of the list must contain a next and previous field for the
/// list.
///
/// The ilist class itself should be a plug in replacement for list.  This list
/// replacement does not provide a constant time size() method, so be careful to
/// use empty() when you really want to know if it's empty.
///
/// The ilist class is implemented as a circular list.  The list itself contains
/// a sentinel node, whose Next points at begin() and whose Prev points at
/// rbegin().  The sentinel node itself serves as end() and rend().
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_ILIST_H
#define LLVM_ADT_ILIST_H

#include "llvm/ADT/simple_ilist.h"
#include <cassert>
#include <cstddef>
#include <iterator>

namespace llvm {

/// Use delete by default for iplist and ilist.
///
/// Specialize this to get different behaviour for ownership-related API.  (If
/// you really want ownership semantics, consider using std::list or building
/// something like \a BumpPtrList.)
///
/// \see ilist_noalloc_traits
template <typename NodeTy> struct ilist_alloc_traits {
  static void deleteNode(NodeTy *V) { delete V; }
};

/// Custom traits to do nothing on deletion.
///
/// Specialize ilist_alloc_traits to inherit from this to disable the
/// non-intrusive deletion in iplist (which implies ownership).
///
/// If you want purely intrusive semantics with no callbacks, consider using \a
/// simple_ilist instead.
///
/// \code
/// template <>
/// struct ilist_alloc_traits<MyType> : ilist_noalloc_traits<MyType> {};
/// \endcode
template <typename NodeTy> struct ilist_noalloc_traits {
  static void deleteNode(NodeTy *V) {}
};

/// Callbacks do nothing by default in iplist and ilist.
///
/// Specialize this for to use callbacks for when nodes change their list
/// membership.
template <typename NodeTy> struct ilist_callback_traits {
  void addNodeToList(NodeTy *) {}
  void removeNodeFromList(NodeTy *) {}

  /// Callback before transferring nodes to this list. The nodes may already be
  /// in this same list.
  template <class Iterator>
  void transferNodesFromList(ilist_callback_traits &OldList, Iterator /*first*/,
                             Iterator /*last*/) {
    (void)OldList;
  }
};

/// A fragment for template traits for intrusive list that provides default
/// node related operations.
///
/// TODO: Remove this layer of indirection.  It's not necessary.
template <typename NodeTy>
struct ilist_node_traits : ilist_alloc_traits<NodeTy>,
                           ilist_callback_traits<NodeTy> {};

/// Template traits for intrusive list.
///
/// Customize callbacks and allocation semantics.
template <typename NodeTy>
struct ilist_traits : public ilist_node_traits<NodeTy> {};

/// Const traits should never be instantiated.
template <typename Ty> struct ilist_traits<const Ty> {};

//===----------------------------------------------------------------------===//
//
/// A wrapper around an intrusive list with callbacks and non-intrusive
/// ownership.
///
/// This wraps a purely intrusive list (like simple_ilist) with a configurable
/// traits class.  The traits can implement callbacks and customize the
/// ownership semantics.
///
/// This is a subset of ilist functionality that can safely be used on nodes of
/// polymorphic types, i.e. a heterogeneous list with a common base class that
/// holds the next/prev pointers.  The only state of the list itself is an
/// ilist_sentinel, which holds pointers to the first and last nodes in the
/// list.
template <class IntrusiveListT, class TraitsT>
class iplist_impl : public TraitsT, IntrusiveListT {
  typedef IntrusiveListT base_list_type;

public:
  typedef typename base_list_type::pointer pointer;
  typedef typename base_list_type::const_pointer const_pointer;
  typedef typename base_list_type::reference reference;
  typedef typename base_list_type::const_reference const_reference;
  typedef typename base_list_type::value_type value_type;
  typedef typename base_list_type::size_type size_type;
  typedef typename base_list_type::difference_type difference_type;
  typedef typename base_list_type::iterator iterator;
  typedef typename base_list_type::const_iterator const_iterator;
  typedef typename base_list_type::reverse_iterator reverse_iterator;
  typedef
      typename base_list_type::const_reverse_iterator const_reverse_iterator;

private:
  static bool op_less(const_reference L, const_reference R) { return L < R; }
  static bool op_equal(const_reference L, const_reference R) { return L == R; }

public:
  iplist_impl() = default;

  iplist_impl(const iplist_impl &) = delete;
  iplist_impl &operator=(const iplist_impl &) = delete;

  iplist_impl(iplist_impl &&X)
      : TraitsT(std::move(static_cast<TraitsT &>(X))),
        IntrusiveListT(std::move(static_cast<IntrusiveListT &>(X))) {}
  iplist_impl &operator=(iplist_impl &&X) {
    *static_cast<TraitsT *>(this) = std::move(static_cast<TraitsT &>(X));
    *static_cast<IntrusiveListT *>(this) =
        std::move(static_cast<IntrusiveListT &>(X));
    return *this;
  }

  ~iplist_impl() { clear(); }

  // Miscellaneous inspection routines.
  size_type max_size() const { return size_type(-1); }

  using base_list_type::begin;
  using base_list_type::end;
  using base_list_type::rbegin;
  using base_list_type::rend;
  using base_list_type::empty;
  using base_list_type::front;
  using base_list_type::back;

  void swap(iplist_impl &RHS) {
    assert(0 && "Swap does not use list traits callback correctly yet!");
    base_list_type::swap(RHS);
  }

  iterator insert(iterator where, pointer New) {
    this->addNodeToList(New); // Notify traits that we added a node...
    return base_list_type::insert(where, *New);
  }

  iterator insert(iterator where, const_reference New) {
    return this->insert(where, new value_type(New));
  }

  iterator insertAfter(iterator where, pointer New) {
    if (empty())
      return insert(begin(), New);
    else
      return insert(++where, New);
  }

  /// Clone another list.
  template <class Cloner> void cloneFrom(const iplist_impl &L2, Cloner clone) {
    clear();
    for (const_reference V : L2)
      push_back(clone(V));
  }

  pointer remove(iterator &IT) {
    pointer Node = &*IT++;
    this->removeNodeFromList(Node); // Notify traits that we removed a node...
    base_list_type::remove(*Node);
    return Node;
  }

  pointer remove(const iterator &IT) {
    iterator MutIt = IT;
    return remove(MutIt);
  }

  pointer remove(pointer IT) { return remove(iterator(IT)); }
  pointer remove(reference IT) { return remove(iterator(IT)); }

  // erase - remove a node from the controlled sequence... and delete it.
  iterator erase(iterator where) {
    this->deleteNode(remove(where));
    return where;
  }

  iterator erase(pointer IT) { return erase(iterator(IT)); }
  iterator erase(reference IT) { return erase(iterator(IT)); }

  /// Remove all nodes from the list like clear(), but do not call
  /// removeNodeFromList() or deleteNode().
  ///
  /// This should only be used immediately before freeing nodes in bulk to
  /// avoid traversing the list and bringing all the nodes into cache.
  void clearAndLeakNodesUnsafely() { base_list_type::clear(); }

private:
  // transfer - The heart of the splice function.  Move linked list nodes from
  // [first, last) into position.
  //
  void transfer(iterator position, iplist_impl &L2, iterator first, iterator last) {
    if (position == last)
      return;

    // Notify traits we moved the nodes...
    this->transferNodesFromList(L2, first, last);

    base_list_type::splice(position, L2, first, last);
  }

public:
  //===----------------------------------------------------------------------===
  // Functionality derived from other functions defined above...
  //

  using base_list_type::size;

  iterator erase(iterator first, iterator last) {
    while (first != last)
      first = erase(first);
    return last;
  }

  void clear() { erase(begin(), end()); }

  // Front and back inserters...
  void push_front(pointer val) { insert(begin(), val); }
  void push_back(pointer val) { insert(end(), val); }
  void pop_front() {
    assert(!empty() && "pop_front() on empty list!");
    erase(begin());
  }
  void pop_back() {
    assert(!empty() && "pop_back() on empty list!");
    iterator t = end(); erase(--t);
  }

  // Special forms of insert...
  template<class InIt> void insert(iterator where, InIt first, InIt last) {
    for (; first != last; ++first) insert(where, *first);
  }

  // Splice members - defined in terms of transfer...
  void splice(iterator where, iplist_impl &L2) {
    if (!L2.empty())
      transfer(where, L2, L2.begin(), L2.end());
  }
  void splice(iterator where, iplist_impl &L2, iterator first) {
    iterator last = first; ++last;
    if (where == first || where == last) return; // No change
    transfer(where, L2, first, last);
  }
  void splice(iterator where, iplist_impl &L2, iterator first, iterator last) {
    if (first != last) transfer(where, L2, first, last);
  }
  void splice(iterator where, iplist_impl &L2, reference N) {
    splice(where, L2, iterator(N));
  }
  void splice(iterator where, iplist_impl &L2, pointer N) {
    splice(where, L2, iterator(N));
  }

  template <class Compare>
  void merge(iplist_impl &Right, Compare comp) {
    if (this == &Right)
      return;
    this->transferNodesFromList(Right, Right.begin(), Right.end());
    base_list_type::merge(Right, comp);
  }
  void merge(iplist_impl &Right) { return merge(Right, op_less); }

  using base_list_type::sort;

  /// Get the previous node, or \c nullptr for the list head.
  pointer getPrevNode(reference N) const {
    auto I = N.getIterator();
    if (I == begin())
      return nullptr;
    return &*std::prev(I);
  }
  /// Get the previous node, or \c nullptr for the list head.
  const_pointer getPrevNode(const_reference N) const {
    return getPrevNode(const_cast<reference >(N));
  }

  /// Get the next node, or \c nullptr for the list tail.
  pointer getNextNode(reference N) const {
    auto Next = std::next(N.getIterator());
    if (Next == end())
      return nullptr;
    return &*Next;
  }
  /// Get the next node, or \c nullptr for the list tail.
  const_pointer getNextNode(const_reference N) const {
    return getNextNode(const_cast<reference >(N));
  }
};

/// An intrusive list with ownership and callbacks specified/controlled by
/// ilist_traits, only with API safe for polymorphic types.
///
/// The \p Options parameters are the same as those for \a simple_ilist.  See
/// there for a description of what's available.
template <class T, class... Options>
class iplist
    : public iplist_impl<simple_ilist<T, Options...>, ilist_traits<T>> {
  using iplist_impl_type = typename iplist::iplist_impl;

public:
  iplist() = default;

  iplist(const iplist &X) = delete;
  iplist &operator=(const iplist &X) = delete;

  iplist(iplist &&X) : iplist_impl_type(std::move(X)) {}
  iplist &operator=(iplist &&X) {
    *static_cast<iplist_impl_type *>(this) = std::move(X);
    return *this;
  }
};

template <class T, class... Options> using ilist = iplist<T, Options...>;

} // end namespace llvm

namespace std {

  // Ensure that swap uses the fast list swap...
  template<class Ty>
  void swap(llvm::iplist<Ty> &Left, llvm::iplist<Ty> &Right) {
    Left.swap(Right);
  }

} // end namespace std

#endif // LLVM_ADT_ILIST_H
PKiwFZ��]�	+	+ADT/simple_ilist.hnu�[���//===- llvm/ADT/simple_ilist.h - Simple Intrusive List ----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_SIMPLE_ILIST_H
#define LLVM_ADT_SIMPLE_ILIST_H

#include "llvm/ADT/ilist_base.h"
#include "llvm/ADT/ilist_iterator.h"
#include "llvm/ADT/ilist_node.h"
#include "llvm/ADT/ilist_node_options.h"
#include "llvm/Support/Compiler.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <functional>
#include <iterator>
#include <utility>

namespace llvm {

/// A simple intrusive list implementation.
///
/// This is a simple intrusive list for a \c T that inherits from \c
/// ilist_node<T>.  The list never takes ownership of anything inserted in it.
///
/// Unlike \a iplist<T> and \a ilist<T>, \a simple_ilist<T> never deletes
/// values, and has no callback traits.
///
/// The API for adding nodes include \a push_front(), \a push_back(), and \a
/// insert().  These all take values by reference (not by pointer), except for
/// the range version of \a insert().
///
/// There are three sets of API for discarding nodes from the list: \a
/// remove(), which takes a reference to the node to remove, \a erase(), which
/// takes an iterator or iterator range and returns the next one, and \a
/// clear(), which empties out the container.  All three are constant time
/// operations.  None of these deletes any nodes; in particular, if there is a
/// single node in the list, then these have identical semantics:
/// \li \c L.remove(L.front());
/// \li \c L.erase(L.begin());
/// \li \c L.clear();
///
/// As a convenience for callers, there are parallel APIs that take a \c
/// Disposer (such as \c std::default_delete<T>): \a removeAndDispose(), \a
/// eraseAndDispose(), and \a clearAndDispose().  These have different names
/// because the extra semantic is otherwise non-obvious.  They are equivalent
/// to calling \a std::for_each() on the range to be discarded.
///
/// The currently available \p Options customize the nodes in the list.  The
/// same options must be specified in the \a ilist_node instantiation for
/// compatibility (although the order is irrelevant).
/// \li Use \a ilist_tag to designate which ilist_node for a given \p T this
/// list should use.  This is useful if a type \p T is part of multiple,
/// independent lists simultaneously.
/// \li Use \a ilist_sentinel_tracking to always (or never) track whether a
/// node is a sentinel.  Specifying \c true enables the \a
/// ilist_node::isSentinel() API.  Unlike \a ilist_node::isKnownSentinel(),
/// which is only appropriate for assertions, \a ilist_node::isSentinel() is
/// appropriate for real logic.
///
/// Here are examples of \p Options usage:
/// \li \c simple_ilist<T> gives the defaults.  \li \c
/// simple_ilist<T,ilist_sentinel_tracking<true>> enables the \a
/// ilist_node::isSentinel() API.
/// \li \c simple_ilist<T,ilist_tag<A>,ilist_sentinel_tracking<false>>
/// specifies a tag of A and that tracking should be off (even when
/// LLVM_ENABLE_ABI_BREAKING_CHECKS are enabled).
/// \li \c simple_ilist<T,ilist_sentinel_tracking<false>,ilist_tag<A>> is
/// equivalent to the last.
///
/// See \a is_valid_option for steps on adding a new option.
template <typename T, class... Options>
class simple_ilist
    : ilist_detail::compute_node_options<T, Options...>::type::list_base_type,
      ilist_detail::SpecificNodeAccess<
          typename ilist_detail::compute_node_options<T, Options...>::type> {
  static_assert(ilist_detail::check_options<Options...>::value,
                "Unrecognized node option!");
  using OptionsT =
      typename ilist_detail::compute_node_options<T, Options...>::type;
  using list_base_type = typename OptionsT::list_base_type;
  ilist_sentinel<OptionsT> Sentinel;

public:
  using value_type = typename OptionsT::value_type;
  using pointer = typename OptionsT::pointer;
  using reference = typename OptionsT::reference;
  using const_pointer = typename OptionsT::const_pointer;
  using const_reference = typename OptionsT::const_reference;
  using iterator = ilist_iterator<OptionsT, false, false>;
  using const_iterator = ilist_iterator<OptionsT, false, true>;
  using reverse_iterator = ilist_iterator<OptionsT, true, false>;
  using const_reverse_iterator = ilist_iterator<OptionsT, true, true>;
  using size_type = size_t;
  using difference_type = ptrdiff_t;

  simple_ilist() = default;
  ~simple_ilist() = default;

  // No copy constructors.
  simple_ilist(const simple_ilist &) = delete;
  simple_ilist &operator=(const simple_ilist &) = delete;

  // Move constructors.
  simple_ilist(simple_ilist &&X) { splice(end(), X); }
  simple_ilist &operator=(simple_ilist &&X) {
    clear();
    splice(end(), X);
    return *this;
  }

  iterator begin() { return ++iterator(Sentinel); }
  const_iterator begin() const { return ++const_iterator(Sentinel); }
  iterator end() { return iterator(Sentinel); }
  const_iterator end() const { return const_iterator(Sentinel); }
  reverse_iterator rbegin() { return ++reverse_iterator(Sentinel); }
  const_reverse_iterator rbegin() const {
    return ++const_reverse_iterator(Sentinel);
  }
  reverse_iterator rend() { return reverse_iterator(Sentinel); }
  const_reverse_iterator rend() const {
    return const_reverse_iterator(Sentinel);
  }

  /// Check if the list is empty in constant time.
  [[nodiscard]] bool empty() const { return Sentinel.empty(); }

  /// Calculate the size of the list in linear time.
  [[nodiscard]] size_type size() const { return std::distance(begin(), end()); }

  reference front() { return *begin(); }
  const_reference front() const { return *begin(); }
  reference back() { return *rbegin(); }
  const_reference back() const { return *rbegin(); }

  /// Insert a node at the front; never copies.
  void push_front(reference Node) { insert(begin(), Node); }

  /// Insert a node at the back; never copies.
  void push_back(reference Node) { insert(end(), Node); }

  /// Remove the node at the front; never deletes.
  void pop_front() { erase(begin()); }

  /// Remove the node at the back; never deletes.
  void pop_back() { erase(--end()); }

  /// Swap with another list in place using std::swap.
  void swap(simple_ilist &X) { std::swap(*this, X); }

  /// Insert a node by reference; never copies.
  iterator insert(iterator I, reference Node) {
    list_base_type::insertBefore(*I.getNodePtr(), *this->getNodePtr(&Node));
    return iterator(&Node);
  }

  /// Insert a range of nodes; never copies.
  template <class Iterator>
  void insert(iterator I, Iterator First, Iterator Last) {
    for (; First != Last; ++First)
      insert(I, *First);
  }

  /// Clone another list.
  template <class Cloner, class Disposer>
  void cloneFrom(const simple_ilist &L2, Cloner clone, Disposer dispose) {
    clearAndDispose(dispose);
    for (const_reference V : L2)
      push_back(*clone(V));
  }

  /// Remove a node by reference; never deletes.
  ///
  /// \see \a erase() for removing by iterator.
  /// \see \a removeAndDispose() if the node should be deleted.
  void remove(reference N) { list_base_type::remove(*this->getNodePtr(&N)); }

  /// Remove a node by reference and dispose of it.
  template <class Disposer>
  void removeAndDispose(reference N, Disposer dispose) {
    remove(N);
    dispose(&N);
  }

  /// Remove a node by iterator; never deletes.
  ///
  /// \see \a remove() for removing by reference.
  /// \see \a eraseAndDispose() it the node should be deleted.
  iterator erase(iterator I) {
    assert(I != end() && "Cannot remove end of list!");
    remove(*I++);
    return I;
  }

  /// Remove a range of nodes; never deletes.
  ///
  /// \see \a eraseAndDispose() if the nodes should be deleted.
  iterator erase(iterator First, iterator Last) {
    list_base_type::removeRange(*First.getNodePtr(), *Last.getNodePtr());
    return Last;
  }

  /// Remove a node by iterator and dispose of it.
  template <class Disposer>
  iterator eraseAndDispose(iterator I, Disposer dispose) {
    auto Next = std::next(I);
    erase(I);
    dispose(&*I);
    return Next;
  }

  /// Remove a range of nodes and dispose of them.
  template <class Disposer>
  iterator eraseAndDispose(iterator First, iterator Last, Disposer dispose) {
    while (First != Last)
      First = eraseAndDispose(First, dispose);
    return Last;
  }

  /// Clear the list; never deletes.
  ///
  /// \see \a clearAndDispose() if the nodes should be deleted.
  void clear() { Sentinel.reset(); }

  /// Clear the list and dispose of the nodes.
  template <class Disposer> void clearAndDispose(Disposer dispose) {
    eraseAndDispose(begin(), end(), dispose);
  }

  /// Splice in another list.
  void splice(iterator I, simple_ilist &L2) {
    splice(I, L2, L2.begin(), L2.end());
  }

  /// Splice in a node from another list.
  void splice(iterator I, simple_ilist &L2, iterator Node) {
    splice(I, L2, Node, std::next(Node));
  }

  /// Splice in a range of nodes from another list.
  void splice(iterator I, simple_ilist &, iterator First, iterator Last) {
    list_base_type::transferBefore(*I.getNodePtr(), *First.getNodePtr(),
                                   *Last.getNodePtr());
  }

  /// Merge in another list.
  ///
  /// \pre \c this and \p RHS are sorted.
  ///@{
  void merge(simple_ilist &RHS) { merge(RHS, std::less<T>()); }
  template <class Compare> void merge(simple_ilist &RHS, Compare comp);
  ///@}

  /// Sort the list.
  ///@{
  void sort() { sort(std::less<T>()); }
  template <class Compare> void sort(Compare comp);
  ///@}
};

template <class T, class... Options>
template <class Compare>
void simple_ilist<T, Options...>::merge(simple_ilist &RHS, Compare comp) {
  if (this == &RHS || RHS.empty())
    return;
  iterator LI = begin(), LE = end();
  iterator RI = RHS.begin(), RE = RHS.end();
  while (LI != LE) {
    if (comp(*RI, *LI)) {
      // Transfer a run of at least size 1 from RHS to LHS.
      iterator RunStart = RI++;
      RI = std::find_if(RI, RE, [&](reference RV) { return !comp(RV, *LI); });
      splice(LI, RHS, RunStart, RI);
      if (RI == RE)
        return;
    }
    ++LI;
  }
  // Transfer the remaining RHS nodes once LHS is finished.
  splice(LE, RHS, RI, RE);
}

template <class T, class... Options>
template <class Compare>
void simple_ilist<T, Options...>::sort(Compare comp) {
  // Vacuously sorted.
  if (empty() || std::next(begin()) == end())
    return;

  // Split the list in the middle.
  iterator Center = begin(), End = begin();
  while (End != end() && ++End != end()) {
    ++Center;
    ++End;
  }
  simple_ilist RHS;
  RHS.splice(RHS.end(), *this, Center, end());

  // Sort the sublists and merge back together.
  sort(comp);
  RHS.sort(comp);
  merge(RHS, comp);
}

} // end namespace llvm

#endif // LLVM_ADT_SIMPLE_ILIST_H
PKiwFZ��@�HHADT/BitmaskEnum.hnu�[���//===-- llvm/ADT/BitmaskEnum.h ----------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_BITMASKENUM_H
#define LLVM_ADT_BITMASKENUM_H

#include <cassert>
#include <type_traits>
#include <utility>

#include "llvm/Support/MathExtras.h"

/// LLVM_MARK_AS_BITMASK_ENUM lets you opt in an individual enum type so you can
/// perform bitwise operations on it without putting static_cast everywhere.
///
/// \code
///   enum MyEnum {
///     E1 = 1, E2 = 2, E3 = 4, E4 = 8,
///     LLVM_MARK_AS_BITMASK_ENUM(/* LargestValue = */ E4)
///   };
///
///   void Foo() {
///     MyEnum A = (E1 | E2) & E3 ^ ~E4; // Look, ma: No static_cast!
///   }
/// \endcode
///
/// Normally when you do a bitwise operation on an enum value, you get back an
/// instance of the underlying type (e.g. int).  But using this macro, bitwise
/// ops on your enum will return you back instances of the enum.  This is
/// particularly useful for enums which represent a combination of flags.
///
/// The parameter to LLVM_MARK_AS_BITMASK_ENUM should be the largest individual
/// value in your enum.
///
/// All of the enum's values must be non-negative.
#define LLVM_MARK_AS_BITMASK_ENUM(LargestValue)                                \
  LLVM_BITMASK_LARGEST_ENUMERATOR = LargestValue

/// LLVM_DECLARE_ENUM_AS_BITMASK can be used to declare an enum type as a bit
/// set, so that bitwise operation on such enum does not require static_cast.
///
/// \code
///   enum MyEnum { E1 = 1, E2 = 2, E3 = 4, E4 = 8 };
///   LLVM_DECLARE_ENUM_AS_BITMASK(MyEnum, E4);
///
///   void Foo() {
///     MyEnum A = (E1 | E2) & E3 ^ ~E4; // No static_cast
///   }
/// \endcode
///
/// The second parameter to LLVM_DECLARE_ENUM_AS_BITMASK specifies the largest
/// bit value of the enum type.
///
/// LLVM_DECLARE_ENUM_AS_BITMASK should be used in llvm namespace.
///
/// This a non-intrusive alternative for LLVM_MARK_AS_BITMASK_ENUM. It allows
/// declaring more than one non-scoped enumerations as bitmask types in the same
/// scope. Otherwise it provides the same functionality as
/// LLVM_MARK_AS_BITMASK_ENUM.
#define LLVM_DECLARE_ENUM_AS_BITMASK(Enum, LargestValue)                       \
  template <> struct is_bitmask_enum<Enum> : std::true_type {};                \
  template <> struct largest_bitmask_enum_bit<Enum> {                          \
    static constexpr std::underlying_type_t<Enum> value = LargestValue;        \
  }

/// LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE() pulls the operator overloads used
/// by LLVM_MARK_AS_BITMASK_ENUM into the current namespace.
///
/// Suppose you have an enum foo::bar::MyEnum.  Before using
/// LLVM_MARK_AS_BITMASK_ENUM on MyEnum, you must put
/// LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE() somewhere inside namespace foo or
/// namespace foo::bar.  This allows the relevant operator overloads to be found
/// by ADL.
///
/// You don't need to use this macro in namespace llvm; it's done at the bottom
/// of this file.
#define LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE()                               \
  using ::llvm::BitmaskEnumDetail::operator~;                                  \
  using ::llvm::BitmaskEnumDetail::operator|;                                  \
  using ::llvm::BitmaskEnumDetail::operator&;                                  \
  using ::llvm::BitmaskEnumDetail::operator^;                                  \
  using ::llvm::BitmaskEnumDetail::operator|=;                                 \
  using ::llvm::BitmaskEnumDetail::operator&=;                                 \
  /* Force a semicolon at the end of this macro. */                            \
  using ::llvm::BitmaskEnumDetail::operator^=

namespace llvm {

/// Traits class to determine whether an enum has a
/// LLVM_BITMASK_LARGEST_ENUMERATOR enumerator.
template <typename E, typename Enable = void>
struct is_bitmask_enum : std::false_type {};

template <typename E>
struct is_bitmask_enum<
    E, std::enable_if_t<sizeof(E::LLVM_BITMASK_LARGEST_ENUMERATOR) >= 0>>
    : std::true_type {};

/// Trait class to determine bitmask enumeration largest bit.
template <typename E, typename Enable = void> struct largest_bitmask_enum_bit;

template <typename E>
struct largest_bitmask_enum_bit<
    E, std::enable_if_t<sizeof(E::LLVM_BITMASK_LARGEST_ENUMERATOR) >= 0>> {
  using UnderlyingTy = std::underlying_type_t<E>;
  static constexpr UnderlyingTy value =
      static_cast<UnderlyingTy>(E::LLVM_BITMASK_LARGEST_ENUMERATOR);
};

namespace BitmaskEnumDetail {

/// Get a bitmask with 1s in all places up to the high-order bit of E's largest
/// value.
template <typename E> constexpr std::underlying_type_t<E> Mask() {
  // On overflow, NextPowerOf2 returns zero with the type uint64_t, so
  // subtracting 1 gives us the mask with all bits set, like we want.
  return NextPowerOf2(largest_bitmask_enum_bit<E>::value) - 1;
}

/// Check that Val is in range for E, and return Val cast to E's underlying
/// type.
template <typename E> constexpr std::underlying_type_t<E> Underlying(E Val) {
  auto U = static_cast<std::underlying_type_t<E>>(Val);
  assert(U >= 0 && "Negative enum values are not allowed.");
  assert(U <= Mask<E>() && "Enum value too large (or largest val too small?)");
  return U;
}

constexpr unsigned bitWidth(uint64_t Value) {
  return Value ? 1 + bitWidth(Value >> 1) : 0;
}

template <typename E, typename = std::enable_if_t<is_bitmask_enum<E>::value>>
constexpr E operator~(E Val) {
  return static_cast<E>(~Underlying(Val) & Mask<E>());
}

template <typename E, typename = std::enable_if_t<is_bitmask_enum<E>::value>>
constexpr E operator|(E LHS, E RHS) {
  return static_cast<E>(Underlying(LHS) | Underlying(RHS));
}

template <typename E, typename = std::enable_if_t<is_bitmask_enum<E>::value>>
constexpr E operator&(E LHS, E RHS) {
  return static_cast<E>(Underlying(LHS) & Underlying(RHS));
}

template <typename E, typename = std::enable_if_t<is_bitmask_enum<E>::value>>
constexpr E operator^(E LHS, E RHS) {
  return static_cast<E>(Underlying(LHS) ^ Underlying(RHS));
}

// |=, &=, and ^= return a reference to LHS, to match the behavior of the
// operators on builtin types.

template <typename E, typename = std::enable_if_t<is_bitmask_enum<E>::value>>
E &operator|=(E &LHS, E RHS) {
  LHS = LHS | RHS;
  return LHS;
}

template <typename E, typename = std::enable_if_t<is_bitmask_enum<E>::value>>
E &operator&=(E &LHS, E RHS) {
  LHS = LHS & RHS;
  return LHS;
}

template <typename E, typename = std::enable_if_t<is_bitmask_enum<E>::value>>
E &operator^=(E &LHS, E RHS) {
  LHS = LHS ^ RHS;
  return LHS;
}

} // namespace BitmaskEnumDetail

// Enable bitmask enums in namespace ::llvm and all nested namespaces.
LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE();
template <typename E, typename = std::enable_if_t<is_bitmask_enum<E>::value>>
constexpr unsigned BitWidth = BitmaskEnumDetail::bitWidth(uint64_t{
    static_cast<std::underlying_type_t<E>>(
        E::LLVM_BITMASK_LARGEST_ENUMERATOR)});

} // namespace llvm

#endif
PKiwFZ�x��NNADT/StringExtras.hnu�[���//===- llvm/ADT/StringExtras.h - Useful string functions --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file contains some functions that are useful when dealing with strings.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_STRINGEXTRAS_H
#define LLVM_ADT_STRINGEXTRAS_H

#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <cstdlib>
#include <cstring>
#include <iterator>
#include <string>
#include <utility>

namespace llvm {

class raw_ostream;

/// hexdigit - Return the hexadecimal character for the
/// given number \p X (which should be less than 16).
inline char hexdigit(unsigned X, bool LowerCase = false) {
  assert(X < 16);
  static const char LUT[] = "0123456789ABCDEF";
  const uint8_t Offset = LowerCase ? 32 : 0;
  return LUT[X] | Offset;
}

/// Given an array of c-style strings terminated by a null pointer, construct
/// a vector of StringRefs representing the same strings without the terminating
/// null string.
inline std::vector<StringRef> toStringRefArray(const char *const *Strings) {
  std::vector<StringRef> Result;
  while (*Strings)
    Result.push_back(*Strings++);
  return Result;
}

/// Construct a string ref from a boolean.
inline StringRef toStringRef(bool B) { return StringRef(B ? "true" : "false"); }

/// Construct a string ref from an array ref of unsigned chars.
inline StringRef toStringRef(ArrayRef<uint8_t> Input) {
  return StringRef(reinterpret_cast<const char *>(Input.begin()), Input.size());
}

/// Construct a string ref from an array ref of unsigned chars.
inline ArrayRef<uint8_t> arrayRefFromStringRef(StringRef Input) {
  return {Input.bytes_begin(), Input.bytes_end()};
}

/// Interpret the given character \p C as a hexadecimal digit and return its
/// value.
///
/// If \p C is not a valid hex digit, -1U is returned.
inline unsigned hexDigitValue(char C) {
  /* clang-format off */
  static const int16_t LUT[256] = {
    -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
    -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
    -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
     0,  1,  2,  3,  4,  5,  6,  7,  8,  9, -1, -1, -1, -1, -1, -1,  // '0'..'9'
    -1, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1,  // 'A'..'F'
    -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
    -1, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1,  // 'a'..'f'
    -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
    -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
    -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
    -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
    -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
    -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
    -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
    -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
    -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
  };
  /* clang-format on */
  return LUT[static_cast<unsigned char>(C)];
}

/// Checks if character \p C is one of the 10 decimal digits.
inline bool isDigit(char C) { return C >= '0' && C <= '9'; }

/// Checks if character \p C is a hexadecimal numeric character.
inline bool isHexDigit(char C) { return hexDigitValue(C) != ~0U; }

/// Checks if character \p C is a valid letter as classified by "C" locale.
inline bool isAlpha(char C) {
  return ('a' <= C && C <= 'z') || ('A' <= C && C <= 'Z');
}

/// Checks whether character \p C is either a decimal digit or an uppercase or
/// lowercase letter as classified by "C" locale.
inline bool isAlnum(char C) { return isAlpha(C) || isDigit(C); }

/// Checks whether character \p C is valid ASCII (high bit is zero).
inline bool isASCII(char C) { return static_cast<unsigned char>(C) <= 127; }

/// Checks whether all characters in S are ASCII.
inline bool isASCII(llvm::StringRef S) {
  for (char C : S)
    if (LLVM_UNLIKELY(!isASCII(C)))
      return false;
  return true;
}

/// Checks whether character \p C is printable.
///
/// Locale-independent version of the C standard library isprint whose results
/// may differ on different platforms.
inline bool isPrint(char C) {
  unsigned char UC = static_cast<unsigned char>(C);
  return (0x20 <= UC) && (UC <= 0x7E);
}

/// Checks whether character \p C is whitespace in the "C" locale.
///
/// Locale-independent version of the C standard library isspace.
inline bool isSpace(char C) {
  return C == ' ' || C == '\f' || C == '\n' || C == '\r' || C == '\t' ||
         C == '\v';
}

/// Returns the corresponding lowercase character if \p x is uppercase.
inline char toLower(char x) {
  if (x >= 'A' && x <= 'Z')
    return x - 'A' + 'a';
  return x;
}

/// Returns the corresponding uppercase character if \p x is lowercase.
inline char toUpper(char x) {
  if (x >= 'a' && x <= 'z')
    return x - 'a' + 'A';
  return x;
}

inline std::string utohexstr(uint64_t X, bool LowerCase = false,
                             unsigned Width = 0) {
  char Buffer[17];
  char *BufPtr = std::end(Buffer);

  if (X == 0) *--BufPtr = '0';

  for (unsigned i = 0; Width ? (i < Width) : X; ++i) {
    unsigned char Mod = static_cast<unsigned char>(X) & 15;
    *--BufPtr = hexdigit(Mod, LowerCase);
    X >>= 4;
  }

  return std::string(BufPtr, std::end(Buffer));
}

/// Convert buffer \p Input to its hexadecimal representation.
/// The returned string is double the size of \p Input.
inline void toHex(ArrayRef<uint8_t> Input, bool LowerCase,
                  SmallVectorImpl<char> &Output) {
  const size_t Length = Input.size();
  Output.resize_for_overwrite(Length * 2);

  for (size_t i = 0; i < Length; i++) {
    const uint8_t c = Input[i];
    Output[i * 2    ] = hexdigit(c >> 4, LowerCase);
    Output[i * 2 + 1] = hexdigit(c & 15, LowerCase);
  }
}

inline std::string toHex(ArrayRef<uint8_t> Input, bool LowerCase = false) {
  SmallString<16> Output;
  toHex(Input, LowerCase, Output);
  return std::string(Output);
}

inline std::string toHex(StringRef Input, bool LowerCase = false) {
  return toHex(arrayRefFromStringRef(Input), LowerCase);
}

/// Store the binary representation of the two provided values, \p MSB and
/// \p LSB, that make up the nibbles of a hexadecimal digit. If \p MSB or \p LSB
/// do not correspond to proper nibbles of a hexadecimal digit, this method
/// returns false. Otherwise, returns true.
inline bool tryGetHexFromNibbles(char MSB, char LSB, uint8_t &Hex) {
  unsigned U1 = hexDigitValue(MSB);
  unsigned U2 = hexDigitValue(LSB);
  if (U1 == ~0U || U2 == ~0U)
    return false;

  Hex = static_cast<uint8_t>((U1 << 4) | U2);
  return true;
}

/// Return the binary representation of the two provided values, \p MSB and
/// \p LSB, that make up the nibbles of a hexadecimal digit.
inline uint8_t hexFromNibbles(char MSB, char LSB) {
  uint8_t Hex = 0;
  bool GotHex = tryGetHexFromNibbles(MSB, LSB, Hex);
  (void)GotHex;
  assert(GotHex && "MSB and/or LSB do not correspond to hex digits");
  return Hex;
}

/// Convert hexadecimal string \p Input to its binary representation and store
/// the result in \p Output. Returns true if the binary representation could be
/// converted from the hexadecimal string. Returns false if \p Input contains
/// non-hexadecimal digits. The output string is half the size of \p Input.
inline bool tryGetFromHex(StringRef Input, std::string &Output) {
  if (Input.empty())
    return true;

  // If the input string is not properly aligned on 2 nibbles we pad out the
  // front with a 0 prefix; e.g. `ABC` -> `0ABC`.
  Output.resize((Input.size() + 1) / 2);
  char *OutputPtr = const_cast<char *>(Output.data());
  if (Input.size() % 2 == 1) {
    uint8_t Hex = 0;
    if (!tryGetHexFromNibbles('0', Input.front(), Hex))
      return false;
    *OutputPtr++ = Hex;
    Input = Input.drop_front();
  }

  // Convert the nibble pairs (e.g. `9C`) into bytes (0x9C).
  // With the padding above we know the input is aligned and the output expects
  // exactly half as many bytes as nibbles in the input.
  size_t InputSize = Input.size();
  assert(InputSize % 2 == 0);
  const char *InputPtr = Input.data();
  for (size_t OutputIndex = 0; OutputIndex < InputSize / 2; ++OutputIndex) {
    uint8_t Hex = 0;
    if (!tryGetHexFromNibbles(InputPtr[OutputIndex * 2 + 0], // MSB
                              InputPtr[OutputIndex * 2 + 1], // LSB
                              Hex))
      return false;
    OutputPtr[OutputIndex] = Hex;
  }
  return true;
}

/// Convert hexadecimal string \p Input to its binary representation.
/// The return string is half the size of \p Input.
inline std::string fromHex(StringRef Input) {
  std::string Hex;
  bool GotHex = tryGetFromHex(Input, Hex);
  (void)GotHex;
  assert(GotHex && "Input contains non hex digits");
  return Hex;
}

/// Convert the string \p S to an integer of the specified type using
/// the radix \p Base.  If \p Base is 0, auto-detects the radix.
/// Returns true if the number was successfully converted, false otherwise.
template <typename N> bool to_integer(StringRef S, N &Num, unsigned Base = 0) {
  return !S.getAsInteger(Base, Num);
}

namespace detail {
template <typename N>
inline bool to_float(const Twine &T, N &Num, N (*StrTo)(const char *, char **)) {
  SmallString<32> Storage;
  StringRef S = T.toNullTerminatedStringRef(Storage);
  char *End;
  N Temp = StrTo(S.data(), &End);
  if (*End != '\0')
    return false;
  Num = Temp;
  return true;
}
}

inline bool to_float(const Twine &T, float &Num) {
  return detail::to_float(T, Num, strtof);
}

inline bool to_float(const Twine &T, double &Num) {
  return detail::to_float(T, Num, strtod);
}

inline bool to_float(const Twine &T, long double &Num) {
  return detail::to_float(T, Num, strtold);
}

inline std::string utostr(uint64_t X, bool isNeg = false) {
  char Buffer[21];
  char *BufPtr = std::end(Buffer);

  if (X == 0) *--BufPtr = '0';  // Handle special case...

  while (X) {
    *--BufPtr = '0' + char(X % 10);
    X /= 10;
  }

  if (isNeg) *--BufPtr = '-';   // Add negative sign...
  return std::string(BufPtr, std::end(Buffer));
}

inline std::string itostr(int64_t X) {
  if (X < 0)
    return utostr(static_cast<uint64_t>(1) + ~static_cast<uint64_t>(X), true);
  else
    return utostr(static_cast<uint64_t>(X));
}

inline std::string toString(const APInt &I, unsigned Radix, bool Signed,
                            bool formatAsCLiteral = false) {
  SmallString<40> S;
  I.toString(S, Radix, Signed, formatAsCLiteral);
  return std::string(S.str());
}

inline std::string toString(const APSInt &I, unsigned Radix) {
  return toString(I, Radix, I.isSigned());
}

/// StrInStrNoCase - Portable version of strcasestr.  Locates the first
/// occurrence of string 's1' in string 's2', ignoring case.  Returns
/// the offset of s2 in s1 or npos if s2 cannot be found.
StringRef::size_type StrInStrNoCase(StringRef s1, StringRef s2);

/// getToken - This function extracts one token from source, ignoring any
/// leading characters that appear in the Delimiters string, and ending the
/// token at any of the characters that appear in the Delimiters string.  If
/// there are no tokens in the source string, an empty string is returned.
/// The function returns a pair containing the extracted token and the
/// remaining tail string.
std::pair<StringRef, StringRef> getToken(StringRef Source,
                                         StringRef Delimiters = " \t\n\v\f\r");

/// SplitString - Split up the specified string according to the specified
/// delimiters, appending the result fragments to the output list.
void SplitString(StringRef Source,
                 SmallVectorImpl<StringRef> &OutFragments,
                 StringRef Delimiters = " \t\n\v\f\r");

/// Returns the English suffix for an ordinal integer (-st, -nd, -rd, -th).
inline StringRef getOrdinalSuffix(unsigned Val) {
  // It is critically important that we do this perfectly for
  // user-written sequences with over 100 elements.
  switch (Val % 100) {
  case 11:
  case 12:
  case 13:
    return "th";
  default:
    switch (Val % 10) {
      case 1: return "st";
      case 2: return "nd";
      case 3: return "rd";
      default: return "th";
    }
  }
}

/// Print each character of the specified string, escaping it if it is not
/// printable or if it is an escape char.
void printEscapedString(StringRef Name, raw_ostream &Out);

/// Print each character of the specified string, escaping HTML special
/// characters.
void printHTMLEscaped(StringRef String, raw_ostream &Out);

/// printLowerCase - Print each character as lowercase if it is uppercase.
void printLowerCase(StringRef String, raw_ostream &Out);

/// Converts a string from camel-case to snake-case by replacing all uppercase
/// letters with '_' followed by the letter in lowercase, except if the
/// uppercase letter is the first character of the string.
std::string convertToSnakeFromCamelCase(StringRef input);

/// Converts a string from snake-case to camel-case by replacing all occurrences
/// of '_' followed by a lowercase letter with the letter in uppercase.
/// Optionally allow capitalization of the first letter (if it is a lowercase
/// letter)
std::string convertToCamelFromSnakeCase(StringRef input,
                                        bool capitalizeFirst = false);

namespace detail {

template <typename IteratorT>
inline std::string join_impl(IteratorT Begin, IteratorT End,
                             StringRef Separator, std::input_iterator_tag) {
  std::string S;
  if (Begin == End)
    return S;

  S += (*Begin);
  while (++Begin != End) {
    S += Separator;
    S += (*Begin);
  }
  return S;
}

template <typename IteratorT>
inline std::string join_impl(IteratorT Begin, IteratorT End,
                             StringRef Separator, std::forward_iterator_tag) {
  std::string S;
  if (Begin == End)
    return S;

  size_t Len = (std::distance(Begin, End) - 1) * Separator.size();
  for (IteratorT I = Begin; I != End; ++I)
    Len += (*I).size();
  S.reserve(Len);
  size_t PrevCapacity = S.capacity();
  (void)PrevCapacity;
  S += (*Begin);
  while (++Begin != End) {
    S += Separator;
    S += (*Begin);
  }
  assert(PrevCapacity == S.capacity() && "String grew during building");
  return S;
}

template <typename Sep>
inline void join_items_impl(std::string &Result, Sep Separator) {}

template <typename Sep, typename Arg>
inline void join_items_impl(std::string &Result, Sep Separator,
                            const Arg &Item) {
  Result += Item;
}

template <typename Sep, typename Arg1, typename... Args>
inline void join_items_impl(std::string &Result, Sep Separator, const Arg1 &A1,
                            Args &&... Items) {
  Result += A1;
  Result += Separator;
  join_items_impl(Result, Separator, std::forward<Args>(Items)...);
}

inline size_t join_one_item_size(char) { return 1; }
inline size_t join_one_item_size(const char *S) { return S ? ::strlen(S) : 0; }

template <typename T> inline size_t join_one_item_size(const T &Str) {
  return Str.size();
}

template <typename... Args> inline size_t join_items_size(Args &&...Items) {
  return (0 + ... + join_one_item_size(std::forward<Args>(Items)));
}

} // end namespace detail

/// Joins the strings in the range [Begin, End), adding Separator between
/// the elements.
template <typename IteratorT>
inline std::string join(IteratorT Begin, IteratorT End, StringRef Separator) {
  using tag = typename std::iterator_traits<IteratorT>::iterator_category;
  return detail::join_impl(Begin, End, Separator, tag());
}

/// Joins the strings in the range [R.begin(), R.end()), adding Separator
/// between the elements.
template <typename Range>
inline std::string join(Range &&R, StringRef Separator) {
  return join(R.begin(), R.end(), Separator);
}

/// Joins the strings in the parameter pack \p Items, adding \p Separator
/// between the elements.  All arguments must be implicitly convertible to
/// std::string, or there should be an overload of std::string::operator+=()
/// that accepts the argument explicitly.
template <typename Sep, typename... Args>
inline std::string join_items(Sep Separator, Args &&... Items) {
  std::string Result;
  if (sizeof...(Items) == 0)
    return Result;

  size_t NS = detail::join_one_item_size(Separator);
  size_t NI = detail::join_items_size(std::forward<Args>(Items)...);
  Result.reserve(NI + (sizeof...(Items) - 1) * NS + 1);
  detail::join_items_impl(Result, Separator, std::forward<Args>(Items)...);
  return Result;
}

/// A helper class to return the specified delimiter string after the first
/// invocation of operator StringRef().  Used to generate a comma-separated
/// list from a loop like so:
///
/// \code
///   ListSeparator LS;
///   for (auto &I : C)
///     OS << LS << I.getName();
/// \end
class ListSeparator {
  bool First = true;
  StringRef Separator;

public:
  ListSeparator(StringRef Separator = ", ") : Separator(Separator) {}
  operator StringRef() {
    if (First) {
      First = false;
      return {};
    }
    return Separator;
  }
};

/// A forward iterator over partitions of string over a separator.
class SplittingIterator
    : public iterator_facade_base<SplittingIterator, std::forward_iterator_tag,
                                  StringRef> {
  char SeparatorStorage;
  StringRef Current;
  StringRef Next;
  StringRef Separator;

public:
  SplittingIterator(StringRef Str, StringRef Separator)
      : Next(Str), Separator(Separator) {
    ++*this;
  }

  SplittingIterator(StringRef Str, char Separator)
      : SeparatorStorage(Separator), Next(Str),
        Separator(&SeparatorStorage, 1) {
    ++*this;
  }

  SplittingIterator(const SplittingIterator &R)
      : SeparatorStorage(R.SeparatorStorage), Current(R.Current), Next(R.Next),
        Separator(R.Separator) {
    if (R.Separator.data() == &R.SeparatorStorage)
      Separator = StringRef(&SeparatorStorage, 1);
  }

  SplittingIterator &operator=(const SplittingIterator &R) {
    if (this == &R)
      return *this;

    SeparatorStorage = R.SeparatorStorage;
    Current = R.Current;
    Next = R.Next;
    Separator = R.Separator;
    if (R.Separator.data() == &R.SeparatorStorage)
      Separator = StringRef(&SeparatorStorage, 1);
    return *this;
  }

  bool operator==(const SplittingIterator &R) const {
    assert(Separator == R.Separator);
    return Current.data() == R.Current.data();
  }

  const StringRef &operator*() const { return Current; }

  StringRef &operator*() { return Current; }

  SplittingIterator &operator++() {
    std::tie(Current, Next) = Next.split(Separator);
    return *this;
  }
};

/// Split the specified string over a separator and return a range-compatible
/// iterable over its partitions.  Used to permit conveniently iterating
/// over separated strings like so:
///
/// \code
///   for (StringRef x : llvm::split("foo,bar,baz", ","))
///     ...;
/// \end
///
/// Note that the passed string must remain valid throuhgout lifetime
/// of the iterators.
inline iterator_range<SplittingIterator> split(StringRef Str, StringRef Separator) {
  return {SplittingIterator(Str, Separator),
          SplittingIterator(StringRef(), Separator)};
}

inline iterator_range<SplittingIterator> split(StringRef Str, char Separator) {
  return {SplittingIterator(Str, Separator),
          SplittingIterator(StringRef(), Separator)};
}

} // end namespace llvm

#endif // LLVM_ADT_STRINGEXTRAS_H
PKiwFZ|YC�
�
ADT/PriorityQueue.hnu�[���//===- llvm/ADT/PriorityQueue.h - Priority queues ---------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file defines the PriorityQueue class.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_PRIORITYQUEUE_H
#define LLVM_ADT_PRIORITYQUEUE_H

#include <algorithm>
#include <queue>

namespace llvm {

/// PriorityQueue - This class behaves like std::priority_queue and
/// provides a few additional convenience functions.
///
template<class T,
         class Sequence = std::vector<T>,
         class Compare = std::less<typename Sequence::value_type> >
class PriorityQueue : public std::priority_queue<T, Sequence, Compare> {
public:
  explicit PriorityQueue(const Compare &compare = Compare(),
                         const Sequence &sequence = Sequence())
    : std::priority_queue<T, Sequence, Compare>(compare, sequence)
  {}

  template<class Iterator>
  PriorityQueue(Iterator begin, Iterator end,
                const Compare &compare = Compare(),
                const Sequence &sequence = Sequence())
    : std::priority_queue<T, Sequence, Compare>(begin, end, compare, sequence)
  {}

  /// erase_one - Erase one element from the queue, regardless of its
  /// position. This operation performs a linear search to find an element
  /// equal to t, but then uses all logarithmic-time algorithms to do
  /// the erase operation.
  ///
  void erase_one(const T &t) {
    // Linear-search to find the element.
    typename Sequence::size_type i = find(this->c, t) - this->c.begin();

    // Logarithmic-time heap bubble-up.
    while (i != 0) {
      typename Sequence::size_type parent = (i - 1) / 2;
      this->c[i] = this->c[parent];
      i = parent;
    }

    // The element we want to remove is now at the root, so we can use
    // priority_queue's plain pop to remove it.
    this->pop();
  }

  /// reheapify - If an element in the queue has changed in a way that
  /// affects its standing in the comparison function, the queue's
  /// internal state becomes invalid. Calling reheapify() resets the
  /// queue's state, making it valid again. This operation has time
  /// complexity proportional to the number of elements in the queue,
  /// so don't plan to use it a lot.
  ///
  void reheapify() {
    std::make_heap(this->c.begin(), this->c.end(), this->comp);
  }

  /// clear - Erase all elements from the queue.
  ///
  void clear() {
    this->c.clear();
  }
};

} // End llvm namespace

#endif
PKiwFZ� |�BlBlADT/STLExtras.hnu�[���//===- llvm/ADT/STLExtras.h - Useful STL related functions ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file contains some templates that are useful if you are working with
/// the STL at all.
///
/// No library is required when using these functions.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_STLEXTRAS_H
#define LLVM_ADT_STLEXTRAS_H

#include "llvm/ADT/ADL.h"
#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/STLForwardCompat.h"
#include "llvm/ADT/STLFunctionalExtras.h"
#include "llvm/ADT/identity.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Config/abi-breaking.h"
#include "llvm/Support/ErrorHandling.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <cstdlib>
#include <functional>
#include <initializer_list>
#include <iterator>
#include <limits>
#include <memory>
#include <optional>
#include <tuple>
#include <type_traits>
#include <utility>

#ifdef EXPENSIVE_CHECKS
#include <random> // for std::mt19937
#endif

namespace llvm {

//===----------------------------------------------------------------------===//
//     Extra additions to <type_traits>
//===----------------------------------------------------------------------===//

template <typename T> struct make_const_ptr {
  using type = std::add_pointer_t<std::add_const_t<T>>;
};

template <typename T> struct make_const_ref {
  using type = std::add_lvalue_reference_t<std::add_const_t<T>>;
};

namespace detail {
template <class, template <class...> class Op, class... Args> struct detector {
  using value_t = std::false_type;
};
template <template <class...> class Op, class... Args>
struct detector<std::void_t<Op<Args...>>, Op, Args...> {
  using value_t = std::true_type;
};
} // end namespace detail

/// Detects if a given trait holds for some set of arguments 'Args'.
/// For example, the given trait could be used to detect if a given type
/// has a copy assignment operator:
///   template<class T>
///   using has_copy_assign_t = decltype(std::declval<T&>()
///                                                 = std::declval<const T&>());
///   bool fooHasCopyAssign = is_detected<has_copy_assign_t, FooClass>::value;
template <template <class...> class Op, class... Args>
using is_detected = typename detail::detector<void, Op, Args...>::value_t;

/// This class provides various trait information about a callable object.
///   * To access the number of arguments: Traits::num_args
///   * To access the type of an argument: Traits::arg_t<Index>
///   * To access the type of the result:  Traits::result_t
template <typename T, bool isClass = std::is_class<T>::value>
struct function_traits : public function_traits<decltype(&T::operator())> {};

/// Overload for class function types.
template <typename ClassType, typename ReturnType, typename... Args>
struct function_traits<ReturnType (ClassType::*)(Args...) const, false> {
  /// The number of arguments to this function.
  enum { num_args = sizeof...(Args) };

  /// The result type of this function.
  using result_t = ReturnType;

  /// The type of an argument to this function.
  template <size_t Index>
  using arg_t = std::tuple_element_t<Index, std::tuple<Args...>>;
};
/// Overload for class function types.
template <typename ClassType, typename ReturnType, typename... Args>
struct function_traits<ReturnType (ClassType::*)(Args...), false>
    : public function_traits<ReturnType (ClassType::*)(Args...) const> {};
/// Overload for non-class function types.
template <typename ReturnType, typename... Args>
struct function_traits<ReturnType (*)(Args...), false> {
  /// The number of arguments to this function.
  enum { num_args = sizeof...(Args) };

  /// The result type of this function.
  using result_t = ReturnType;

  /// The type of an argument to this function.
  template <size_t i>
  using arg_t = std::tuple_element_t<i, std::tuple<Args...>>;
};
template <typename ReturnType, typename... Args>
struct function_traits<ReturnType (*const)(Args...), false>
    : public function_traits<ReturnType (*)(Args...)> {};
/// Overload for non-class function type references.
template <typename ReturnType, typename... Args>
struct function_traits<ReturnType (&)(Args...), false>
    : public function_traits<ReturnType (*)(Args...)> {};

/// traits class for checking whether type T is one of any of the given
/// types in the variadic list.
template <typename T, typename... Ts>
using is_one_of = std::disjunction<std::is_same<T, Ts>...>;

/// traits class for checking whether type T is a base class for all
///  the given types in the variadic list.
template <typename T, typename... Ts>
using are_base_of = std::conjunction<std::is_base_of<T, Ts>...>;

namespace detail {
template <typename T, typename... Us> struct TypesAreDistinct;
template <typename T, typename... Us>
struct TypesAreDistinct
    : std::integral_constant<bool, !is_one_of<T, Us...>::value &&
                                       TypesAreDistinct<Us...>::value> {};
template <typename T> struct TypesAreDistinct<T> : std::true_type {};
} // namespace detail

/// Determine if all types in Ts are distinct.
///
/// Useful to statically assert when Ts is intended to describe a non-multi set
/// of types.
///
/// Expensive (currently quadratic in sizeof(Ts...)), and so should only be
/// asserted once per instantiation of a type which requires it.
template <typename... Ts> struct TypesAreDistinct;
template <> struct TypesAreDistinct<> : std::true_type {};
template <typename... Ts>
struct TypesAreDistinct
    : std::integral_constant<bool, detail::TypesAreDistinct<Ts...>::value> {};

/// Find the first index where a type appears in a list of types.
///
/// FirstIndexOfType<T, Us...>::value is the first index of T in Us.
///
/// Typically only meaningful when it is otherwise statically known that the
/// type pack has no duplicate types. This should be guaranteed explicitly with
/// static_assert(TypesAreDistinct<Us...>::value).
///
/// It is a compile-time error to instantiate when T is not present in Us, i.e.
/// if is_one_of<T, Us...>::value is false.
template <typename T, typename... Us> struct FirstIndexOfType;
template <typename T, typename U, typename... Us>
struct FirstIndexOfType<T, U, Us...>
    : std::integral_constant<size_t, 1 + FirstIndexOfType<T, Us...>::value> {};
template <typename T, typename... Us>
struct FirstIndexOfType<T, T, Us...> : std::integral_constant<size_t, 0> {};

/// Find the type at a given index in a list of types.
///
/// TypeAtIndex<I, Ts...> is the type at index I in Ts.
template <size_t I, typename... Ts>
using TypeAtIndex = std::tuple_element_t<I, std::tuple<Ts...>>;

/// Helper which adds two underlying types of enumeration type.
/// Implicit conversion to a common type is accepted.
template <typename EnumTy1, typename EnumTy2,
          typename UT1 = std::enable_if_t<std::is_enum<EnumTy1>::value,
                                          std::underlying_type_t<EnumTy1>>,
          typename UT2 = std::enable_if_t<std::is_enum<EnumTy2>::value,
                                          std::underlying_type_t<EnumTy2>>>
constexpr auto addEnumValues(EnumTy1 LHS, EnumTy2 RHS) {
  return static_cast<UT1>(LHS) + static_cast<UT2>(RHS);
}

//===----------------------------------------------------------------------===//
//     Extra additions to <iterator>
//===----------------------------------------------------------------------===//

namespace callable_detail {

/// Templated storage wrapper for a callable.
///
/// This class is consistently default constructible, copy / move
/// constructible / assignable.
///
/// Supported callable types:
///  - Function pointer
///  - Function reference
///  - Lambda
///  - Function object
template <typename T,
          bool = std::is_function_v<std::remove_pointer_t<remove_cvref_t<T>>>>
class Callable {
  using value_type = std::remove_reference_t<T>;
  using reference = value_type &;
  using const_reference = value_type const &;

  std::optional<value_type> Obj;

  static_assert(!std::is_pointer_v<value_type>,
                "Pointers to non-functions are not callable.");

public:
  Callable() = default;
  Callable(T const &O) : Obj(std::in_place, O) {}

  Callable(Callable const &Other) = default;
  Callable(Callable &&Other) = default;

  Callable &operator=(Callable const &Other) {
    Obj = std::nullopt;
    if (Other.Obj)
      Obj.emplace(*Other.Obj);
    return *this;
  }

  Callable &operator=(Callable &&Other) {
    Obj = std::nullopt;
    if (Other.Obj)
      Obj.emplace(std::move(*Other.Obj));
    return *this;
  }

  template <typename... Pn,
            std::enable_if_t<std::is_invocable_v<T, Pn...>, int> = 0>
  decltype(auto) operator()(Pn &&...Params) {
    return (*Obj)(std::forward<Pn>(Params)...);
  }

  template <typename... Pn,
            std::enable_if_t<std::is_invocable_v<T const, Pn...>, int> = 0>
  decltype(auto) operator()(Pn &&...Params) const {
    return (*Obj)(std::forward<Pn>(Params)...);
  }

  bool valid() const { return Obj != std::nullopt; }
  bool reset() { return Obj = std::nullopt; }

  operator reference() { return *Obj; }
  operator const_reference() const { return *Obj; }
};

// Function specialization.  No need to waste extra space wrapping with a
// std::optional.
template <typename T> class Callable<T, true> {
  static constexpr bool IsPtr = std::is_pointer_v<remove_cvref_t<T>>;

  using StorageT = std::conditional_t<IsPtr, T, std::remove_reference_t<T> *>;
  using CastT = std::conditional_t<IsPtr, T, T &>;

private:
  StorageT Func = nullptr;

private:
  template <typename In> static constexpr auto convertIn(In &&I) {
    if constexpr (IsPtr) {
      // Pointer... just echo it back.
      return I;
    } else {
      // Must be a function reference.  Return its address.
      return &I;
    }
  }

public:
  Callable() = default;

  // Construct from a function pointer or reference.
  //
  // Disable this constructor for references to 'Callable' so we don't violate
  // the rule of 0.
  template < // clang-format off
    typename FnPtrOrRef,
    std::enable_if_t<
      !std::is_same_v<remove_cvref_t<FnPtrOrRef>, Callable>, int
    > = 0
  > // clang-format on
  Callable(FnPtrOrRef &&F) : Func(convertIn(F)) {}

  template <typename... Pn,
            std::enable_if_t<std::is_invocable_v<T, Pn...>, int> = 0>
  decltype(auto) operator()(Pn &&...Params) const {
    return Func(std::forward<Pn>(Params)...);
  }

  bool valid() const { return Func != nullptr; }
  void reset() { Func = nullptr; }

  operator T const &() const {
    if constexpr (IsPtr) {
      // T is a pointer... just echo it back.
      return Func;
    } else {
      static_assert(std::is_reference_v<T>,
                    "Expected a reference to a function.");
      // T is a function reference... dereference the stored pointer.
      return *Func;
    }
  }
};

} // namespace callable_detail

/// Returns true if the given container only contains a single element.
template <typename ContainerTy> bool hasSingleElement(ContainerTy &&C) {
  auto B = std::begin(C), E = std::end(C);
  return B != E && std::next(B) == E;
}

/// Return a range covering \p RangeOrContainer with the first N elements
/// excluded.
template <typename T> auto drop_begin(T &&RangeOrContainer, size_t N = 1) {
  return make_range(std::next(adl_begin(RangeOrContainer), N),
                    adl_end(RangeOrContainer));
}

/// Return a range covering \p RangeOrContainer with the last N elements
/// excluded.
template <typename T> auto drop_end(T &&RangeOrContainer, size_t N = 1) {
  return make_range(adl_begin(RangeOrContainer),
                    std::prev(adl_end(RangeOrContainer), N));
}

// mapped_iterator - This is a simple iterator adapter that causes a function to
// be applied whenever operator* is invoked on the iterator.

template <typename ItTy, typename FuncTy,
          typename ReferenceTy =
              decltype(std::declval<FuncTy>()(*std::declval<ItTy>()))>
class mapped_iterator
    : public iterator_adaptor_base<
          mapped_iterator<ItTy, FuncTy>, ItTy,
          typename std::iterator_traits<ItTy>::iterator_category,
          std::remove_reference_t<ReferenceTy>,
          typename std::iterator_traits<ItTy>::difference_type,
          std::remove_reference_t<ReferenceTy> *, ReferenceTy> {
public:
  mapped_iterator() = default;
  mapped_iterator(ItTy U, FuncTy F)
    : mapped_iterator::iterator_adaptor_base(std::move(U)), F(std::move(F)) {}

  ItTy getCurrent() { return this->I; }

  const FuncTy &getFunction() const { return F; }

  ReferenceTy operator*() const { return F(*this->I); }

private:
  callable_detail::Callable<FuncTy> F{};
};

// map_iterator - Provide a convenient way to create mapped_iterators, just like
// make_pair is useful for creating pairs...
template <class ItTy, class FuncTy>
inline mapped_iterator<ItTy, FuncTy> map_iterator(ItTy I, FuncTy F) {
  return mapped_iterator<ItTy, FuncTy>(std::move(I), std::move(F));
}

template <class ContainerTy, class FuncTy>
auto map_range(ContainerTy &&C, FuncTy F) {
  return make_range(map_iterator(std::begin(C), F),
                    map_iterator(std::end(C), F));
}

/// A base type of mapped iterator, that is useful for building derived
/// iterators that do not need/want to store the map function (as in
/// mapped_iterator). These iterators must simply provide a `mapElement` method
/// that defines how to map a value of the iterator to the provided reference
/// type.
template <typename DerivedT, typename ItTy, typename ReferenceTy>
class mapped_iterator_base
    : public iterator_adaptor_base<
          DerivedT, ItTy,
          typename std::iterator_traits<ItTy>::iterator_category,
          std::remove_reference_t<ReferenceTy>,
          typename std::iterator_traits<ItTy>::difference_type,
          std::remove_reference_t<ReferenceTy> *, ReferenceTy> {
public:
  using BaseT = mapped_iterator_base;

  mapped_iterator_base(ItTy U)
      : mapped_iterator_base::iterator_adaptor_base(std::move(U)) {}

  ItTy getCurrent() { return this->I; }

  ReferenceTy operator*() const {
    return static_cast<const DerivedT &>(*this).mapElement(*this->I);
  }
};

/// Helper to determine if type T has a member called rbegin().
template <typename Ty> class has_rbegin_impl {
  using yes = char[1];
  using no = char[2];

  template <typename Inner>
  static yes& test(Inner *I, decltype(I->rbegin()) * = nullptr);

  template <typename>
  static no& test(...);

public:
  static const bool value = sizeof(test<Ty>(nullptr)) == sizeof(yes);
};

/// Metafunction to determine if T& or T has a member called rbegin().
template <typename Ty>
struct has_rbegin : has_rbegin_impl<std::remove_reference_t<Ty>> {};

// Returns an iterator_range over the given container which iterates in reverse.
template <typename ContainerTy> auto reverse(ContainerTy &&C) {
  if constexpr (has_rbegin<ContainerTy>::value)
    return make_range(C.rbegin(), C.rend());
  else
    return make_range(std::make_reverse_iterator(std::end(C)),
                      std::make_reverse_iterator(std::begin(C)));
}

/// An iterator adaptor that filters the elements of given inner iterators.
///
/// The predicate parameter should be a callable object that accepts the wrapped
/// iterator's reference type and returns a bool. When incrementing or
/// decrementing the iterator, it will call the predicate on each element and
/// skip any where it returns false.
///
/// \code
///   int A[] = { 1, 2, 3, 4 };
///   auto R = make_filter_range(A, [](int N) { return N % 2 == 1; });
///   // R contains { 1, 3 }.
/// \endcode
///
/// Note: filter_iterator_base implements support for forward iteration.
/// filter_iterator_impl exists to provide support for bidirectional iteration,
/// conditional on whether the wrapped iterator supports it.
template <typename WrappedIteratorT, typename PredicateT, typename IterTag>
class filter_iterator_base
    : public iterator_adaptor_base<
          filter_iterator_base<WrappedIteratorT, PredicateT, IterTag>,
          WrappedIteratorT,
          std::common_type_t<IterTag,
                             typename std::iterator_traits<
                                 WrappedIteratorT>::iterator_category>> {
  using BaseT = typename filter_iterator_base::iterator_adaptor_base;

protected:
  WrappedIteratorT End;
  PredicateT Pred;

  void findNextValid() {
    while (this->I != End && !Pred(*this->I))
      BaseT::operator++();
  }

  filter_iterator_base() = default;

  // Construct the iterator. The begin iterator needs to know where the end
  // is, so that it can properly stop when it gets there. The end iterator only
  // needs the predicate to support bidirectional iteration.
  filter_iterator_base(WrappedIteratorT Begin, WrappedIteratorT End,
                       PredicateT Pred)
      : BaseT(Begin), End(End), Pred(Pred) {
    findNextValid();
  }

public:
  using BaseT::operator++;

  filter_iterator_base &operator++() {
    BaseT::operator++();
    findNextValid();
    return *this;
  }

  decltype(auto) operator*() const {
    assert(BaseT::wrapped() != End && "Cannot dereference end iterator!");
    return BaseT::operator*();
  }

  decltype(auto) operator->() const {
    assert(BaseT::wrapped() != End && "Cannot dereference end iterator!");
    return BaseT::operator->();
  }
};

/// Specialization of filter_iterator_base for forward iteration only.
template <typename WrappedIteratorT, typename PredicateT,
          typename IterTag = std::forward_iterator_tag>
class filter_iterator_impl
    : public filter_iterator_base<WrappedIteratorT, PredicateT, IterTag> {
public:
  filter_iterator_impl() = default;

  filter_iterator_impl(WrappedIteratorT Begin, WrappedIteratorT End,
                       PredicateT Pred)
      : filter_iterator_impl::filter_iterator_base(Begin, End, Pred) {}
};

/// Specialization of filter_iterator_base for bidirectional iteration.
template <typename WrappedIteratorT, typename PredicateT>
class filter_iterator_impl<WrappedIteratorT, PredicateT,
                           std::bidirectional_iterator_tag>
    : public filter_iterator_base<WrappedIteratorT, PredicateT,
                                  std::bidirectional_iterator_tag> {
  using BaseT = typename filter_iterator_impl::filter_iterator_base;

  void findPrevValid() {
    while (!this->Pred(*this->I))
      BaseT::operator--();
  }

public:
  using BaseT::operator--;

  filter_iterator_impl() = default;

  filter_iterator_impl(WrappedIteratorT Begin, WrappedIteratorT End,
                       PredicateT Pred)
      : BaseT(Begin, End, Pred) {}

  filter_iterator_impl &operator--() {
    BaseT::operator--();
    findPrevValid();
    return *this;
  }
};

namespace detail {

template <bool is_bidirectional> struct fwd_or_bidi_tag_impl {
  using type = std::forward_iterator_tag;
};

template <> struct fwd_or_bidi_tag_impl<true> {
  using type = std::bidirectional_iterator_tag;
};

/// Helper which sets its type member to forward_iterator_tag if the category
/// of \p IterT does not derive from bidirectional_iterator_tag, and to
/// bidirectional_iterator_tag otherwise.
template <typename IterT> struct fwd_or_bidi_tag {
  using type = typename fwd_or_bidi_tag_impl<std::is_base_of<
      std::bidirectional_iterator_tag,
      typename std::iterator_traits<IterT>::iterator_category>::value>::type;
};

} // namespace detail

/// Defines filter_iterator to a suitable specialization of
/// filter_iterator_impl, based on the underlying iterator's category.
template <typename WrappedIteratorT, typename PredicateT>
using filter_iterator = filter_iterator_impl<
    WrappedIteratorT, PredicateT,
    typename detail::fwd_or_bidi_tag<WrappedIteratorT>::type>;

/// Convenience function that takes a range of elements and a predicate,
/// and return a new filter_iterator range.
///
/// FIXME: Currently if RangeT && is a rvalue reference to a temporary, the
/// lifetime of that temporary is not kept by the returned range object, and the
/// temporary is going to be dropped on the floor after the make_iterator_range
/// full expression that contains this function call.
template <typename RangeT, typename PredicateT>
iterator_range<filter_iterator<detail::IterOfRange<RangeT>, PredicateT>>
make_filter_range(RangeT &&Range, PredicateT Pred) {
  using FilterIteratorT =
      filter_iterator<detail::IterOfRange<RangeT>, PredicateT>;
  return make_range(
      FilterIteratorT(std::begin(std::forward<RangeT>(Range)),
                      std::end(std::forward<RangeT>(Range)), Pred),
      FilterIteratorT(std::end(std::forward<RangeT>(Range)),
                      std::end(std::forward<RangeT>(Range)), Pred));
}

/// A pseudo-iterator adaptor that is designed to implement "early increment"
/// style loops.
///
/// This is *not a normal iterator* and should almost never be used directly. It
/// is intended primarily to be used with range based for loops and some range
/// algorithms.
///
/// The iterator isn't quite an `OutputIterator` or an `InputIterator` but
/// somewhere between them. The constraints of these iterators are:
///
/// - On construction or after being incremented, it is comparable and
///   dereferencable. It is *not* incrementable.
/// - After being dereferenced, it is neither comparable nor dereferencable, it
///   is only incrementable.
///
/// This means you can only dereference the iterator once, and you can only
/// increment it once between dereferences.
template <typename WrappedIteratorT>
class early_inc_iterator_impl
    : public iterator_adaptor_base<early_inc_iterator_impl<WrappedIteratorT>,
                                   WrappedIteratorT, std::input_iterator_tag> {
  using BaseT = typename early_inc_iterator_impl::iterator_adaptor_base;

  using PointerT = typename std::iterator_traits<WrappedIteratorT>::pointer;

protected:
#if LLVM_ENABLE_ABI_BREAKING_CHECKS
  bool IsEarlyIncremented = false;
#endif

public:
  early_inc_iterator_impl(WrappedIteratorT I) : BaseT(I) {}

  using BaseT::operator*;
  decltype(*std::declval<WrappedIteratorT>()) operator*() {
#if LLVM_ENABLE_ABI_BREAKING_CHECKS
    assert(!IsEarlyIncremented && "Cannot dereference twice!");
    IsEarlyIncremented = true;
#endif
    return *(this->I)++;
  }

  using BaseT::operator++;
  early_inc_iterator_impl &operator++() {
#if LLVM_ENABLE_ABI_BREAKING_CHECKS
    assert(IsEarlyIncremented && "Cannot increment before dereferencing!");
    IsEarlyIncremented = false;
#endif
    return *this;
  }

  friend bool operator==(const early_inc_iterator_impl &LHS,
                         const early_inc_iterator_impl &RHS) {
#if LLVM_ENABLE_ABI_BREAKING_CHECKS
    assert(!LHS.IsEarlyIncremented && "Cannot compare after dereferencing!");
#endif
    return (const BaseT &)LHS == (const BaseT &)RHS;
  }
};

/// Make a range that does early increment to allow mutation of the underlying
/// range without disrupting iteration.
///
/// The underlying iterator will be incremented immediately after it is
/// dereferenced, allowing deletion of the current node or insertion of nodes to
/// not disrupt iteration provided they do not invalidate the *next* iterator --
/// the current iterator can be invalidated.
///
/// This requires a very exact pattern of use that is only really suitable to
/// range based for loops and other range algorithms that explicitly guarantee
/// to dereference exactly once each element, and to increment exactly once each
/// element.
template <typename RangeT>
iterator_range<early_inc_iterator_impl<detail::IterOfRange<RangeT>>>
make_early_inc_range(RangeT &&Range) {
  using EarlyIncIteratorT =
      early_inc_iterator_impl<detail::IterOfRange<RangeT>>;
  return make_range(EarlyIncIteratorT(std::begin(std::forward<RangeT>(Range))),
                    EarlyIncIteratorT(std::end(std::forward<RangeT>(Range))));
}

// Forward declarations required by zip_shortest/zip_equal/zip_first/zip_longest
template <typename R, typename UnaryPredicate>
bool all_of(R &&range, UnaryPredicate P);

template <typename R, typename UnaryPredicate>
bool any_of(R &&range, UnaryPredicate P);

template <typename T> bool all_equal(std::initializer_list<T> Values);

template <typename R> constexpr size_t range_size(R &&Range);

namespace detail {

using std::declval;

// We have to alias this since inlining the actual type at the usage site
// in the parameter list of iterator_facade_base<> below ICEs MSVC 2017.
template<typename... Iters> struct ZipTupleType {
  using type = std::tuple<decltype(*declval<Iters>())...>;
};

template <typename ZipType, typename ReferenceTupleType, typename... Iters>
using zip_traits = iterator_facade_base<
    ZipType,
    std::common_type_t<
        std::bidirectional_iterator_tag,
        typename std::iterator_traits<Iters>::iterator_category...>,
    // ^ TODO: Implement random access methods.
    ReferenceTupleType,
    typename std::iterator_traits<
        std::tuple_element_t<0, std::tuple<Iters...>>>::difference_type,
    // ^ FIXME: This follows boost::make_zip_iterator's assumption that all
    // inner iterators have the same difference_type. It would fail if, for
    // instance, the second field's difference_type were non-numeric while the
    // first is.
    ReferenceTupleType *, ReferenceTupleType>;

template <typename ZipType, typename ReferenceTupleType, typename... Iters>
struct zip_common : public zip_traits<ZipType, ReferenceTupleType, Iters...> {
  using Base = zip_traits<ZipType, ReferenceTupleType, Iters...>;
  using IndexSequence = std::index_sequence_for<Iters...>;
  using value_type = typename Base::value_type;

  std::tuple<Iters...> iterators;

protected:
  template <size_t... Ns> value_type deref(std::index_sequence<Ns...>) const {
    return value_type(*std::get<Ns>(iterators)...);
  }

  template <size_t... Ns> void tup_inc(std::index_sequence<Ns...>) {
    (++std::get<Ns>(iterators), ...);
  }

  template <size_t... Ns> void tup_dec(std::index_sequence<Ns...>) {
    (--std::get<Ns>(iterators), ...);
  }

  template <size_t... Ns>
  bool test_all_equals(const zip_common &other,
                       std::index_sequence<Ns...>) const {
    return ((std::get<Ns>(this->iterators) == std::get<Ns>(other.iterators)) &&
            ...);
  }

public:
  zip_common(Iters &&... ts) : iterators(std::forward<Iters>(ts)...) {}

  value_type operator*() const { return deref(IndexSequence{}); }

  ZipType &operator++() {
    tup_inc(IndexSequence{});
    return static_cast<ZipType &>(*this);
  }

  ZipType &operator--() {
    static_assert(Base::IsBidirectional,
                  "All inner iterators must be at least bidirectional.");
    tup_dec(IndexSequence{});
    return static_cast<ZipType &>(*this);
  }

  /// Return true if all the iterator are matching `other`'s iterators.
  bool all_equals(zip_common &other) {
    return test_all_equals(other, IndexSequence{});
  }
};

template <typename... Iters>
struct zip_first : zip_common<zip_first<Iters...>,
                              typename ZipTupleType<Iters...>::type, Iters...> {
  using zip_common<zip_first, typename ZipTupleType<Iters...>::type,
                   Iters...>::zip_common;

  bool operator==(const zip_first &other) const {
    return std::get<0>(this->iterators) == std::get<0>(other.iterators);
  }
};

template <typename... Iters>
struct zip_shortest
    : zip_common<zip_shortest<Iters...>, typename ZipTupleType<Iters...>::type,
                 Iters...> {
  using zip_common<zip_shortest, typename ZipTupleType<Iters...>::type,
                   Iters...>::zip_common;

  bool operator==(const zip_shortest &other) const {
    return any_iterator_equals(other, std::index_sequence_for<Iters...>{});
  }

private:
  template <size_t... Ns>
  bool any_iterator_equals(const zip_shortest &other,
                           std::index_sequence<Ns...>) const {
    return ((std::get<Ns>(this->iterators) == std::get<Ns>(other.iterators)) ||
            ...);
  }
};

/// Helper to obtain the iterator types for the tuple storage within `zippy`.
template <template <typename...> class ItType, typename TupleStorageType,
          typename IndexSequence>
struct ZippyIteratorTuple;

/// Partial specialization for non-const tuple storage.
template <template <typename...> class ItType, typename... Args,
          std::size_t... Ns>
struct ZippyIteratorTuple<ItType, std::tuple<Args...>,
                          std::index_sequence<Ns...>> {
  using type = ItType<decltype(adl_begin(
      std::get<Ns>(declval<std::tuple<Args...> &>())))...>;
};

/// Partial specialization for const tuple storage.
template <template <typename...> class ItType, typename... Args,
          std::size_t... Ns>
struct ZippyIteratorTuple<ItType, const std::tuple<Args...>,
                          std::index_sequence<Ns...>> {
  using type = ItType<decltype(adl_begin(
      std::get<Ns>(declval<const std::tuple<Args...> &>())))...>;
};

template <template <typename...> class ItType, typename... Args> class zippy {
private:
  std::tuple<Args...> storage;
  using IndexSequence = std::index_sequence_for<Args...>;

public:
  using iterator = typename ZippyIteratorTuple<ItType, decltype(storage),
                                               IndexSequence>::type;
  using const_iterator =
      typename ZippyIteratorTuple<ItType, const decltype(storage),
                                  IndexSequence>::type;
  using iterator_category = typename iterator::iterator_category;
  using value_type = typename iterator::value_type;
  using difference_type = typename iterator::difference_type;
  using pointer = typename iterator::pointer;
  using reference = typename iterator::reference;
  using const_reference = typename const_iterator::reference;

  zippy(Args &&...args) : storage(std::forward<Args>(args)...) {}

  const_iterator begin() const { return begin_impl(IndexSequence{}); }
  iterator begin() { return begin_impl(IndexSequence{}); }
  const_iterator end() const { return end_impl(IndexSequence{}); }
  iterator end() { return end_impl(IndexSequence{}); }

private:
  template <size_t... Ns>
  const_iterator begin_impl(std::index_sequence<Ns...>) const {
    return const_iterator(adl_begin(std::get<Ns>(storage))...);
  }
  template <size_t... Ns> iterator begin_impl(std::index_sequence<Ns...>) {
    return iterator(adl_begin(std::get<Ns>(storage))...);
  }

  template <size_t... Ns>
  const_iterator end_impl(std::index_sequence<Ns...>) const {
    return const_iterator(adl_end(std::get<Ns>(storage))...);
  }
  template <size_t... Ns> iterator end_impl(std::index_sequence<Ns...>) {
    return iterator(adl_end(std::get<Ns>(storage))...);
  }
};

} // end namespace detail

/// zip iterator for two or more iteratable types. Iteration continues until the
/// end of the *shortest* iteratee is reached.
template <typename T, typename U, typename... Args>
detail::zippy<detail::zip_shortest, T, U, Args...> zip(T &&t, U &&u,
                                                       Args &&...args) {
  return detail::zippy<detail::zip_shortest, T, U, Args...>(
      std::forward<T>(t), std::forward<U>(u), std::forward<Args>(args)...);
}

/// zip iterator that assumes that all iteratees have the same length.
/// In builds with assertions on, this assumption is checked before the
/// iteration starts.
template <typename T, typename U, typename... Args>
detail::zippy<detail::zip_first, T, U, Args...> zip_equal(T &&t, U &&u,
                                                          Args &&...args) {
  assert(all_equal({range_size(t), range_size(u), range_size(args)...}) &&
         "Iteratees do not have equal length");
  return detail::zippy<detail::zip_first, T, U, Args...>(
      std::forward<T>(t), std::forward<U>(u), std::forward<Args>(args)...);
}

/// zip iterator that, for the sake of efficiency, assumes the first iteratee to
/// be the shortest. Iteration continues until the end of the first iteratee is
/// reached. In builds with assertions on, we check that the assumption about
/// the first iteratee being the shortest holds.
template <typename T, typename U, typename... Args>
detail::zippy<detail::zip_first, T, U, Args...> zip_first(T &&t, U &&u,
                                                          Args &&...args) {
  assert(range_size(t) <= std::min({range_size(u), range_size(args)...}) &&
         "First iteratee is not the shortest");

  return detail::zippy<detail::zip_first, T, U, Args...>(
      std::forward<T>(t), std::forward<U>(u), std::forward<Args>(args)...);
}

namespace detail {
template <typename Iter>
Iter next_or_end(const Iter &I, const Iter &End) {
  if (I == End)
    return End;
  return std::next(I);
}

template <typename Iter>
auto deref_or_none(const Iter &I, const Iter &End) -> std::optional<
    std::remove_const_t<std::remove_reference_t<decltype(*I)>>> {
  if (I == End)
    return std::nullopt;
  return *I;
}

template <typename Iter> struct ZipLongestItemType {
  using type = std::optional<std::remove_const_t<
      std::remove_reference_t<decltype(*std::declval<Iter>())>>>;
};

template <typename... Iters> struct ZipLongestTupleType {
  using type = std::tuple<typename ZipLongestItemType<Iters>::type...>;
};

template <typename... Iters>
class zip_longest_iterator
    : public iterator_facade_base<
          zip_longest_iterator<Iters...>,
          std::common_type_t<
              std::forward_iterator_tag,
              typename std::iterator_traits<Iters>::iterator_category...>,
          typename ZipLongestTupleType<Iters...>::type,
          typename std::iterator_traits<
              std::tuple_element_t<0, std::tuple<Iters...>>>::difference_type,
          typename ZipLongestTupleType<Iters...>::type *,
          typename ZipLongestTupleType<Iters...>::type> {
public:
  using value_type = typename ZipLongestTupleType<Iters...>::type;

private:
  std::tuple<Iters...> iterators;
  std::tuple<Iters...> end_iterators;

  template <size_t... Ns>
  bool test(const zip_longest_iterator<Iters...> &other,
            std::index_sequence<Ns...>) const {
    return ((std::get<Ns>(this->iterators) != std::get<Ns>(other.iterators)) ||
            ...);
  }

  template <size_t... Ns> value_type deref(std::index_sequence<Ns...>) const {
    return value_type(
        deref_or_none(std::get<Ns>(iterators), std::get<Ns>(end_iterators))...);
  }

  template <size_t... Ns>
  decltype(iterators) tup_inc(std::index_sequence<Ns...>) const {
    return std::tuple<Iters...>(
        next_or_end(std::get<Ns>(iterators), std::get<Ns>(end_iterators))...);
  }

public:
  zip_longest_iterator(std::pair<Iters &&, Iters &&>... ts)
      : iterators(std::forward<Iters>(ts.first)...),
        end_iterators(std::forward<Iters>(ts.second)...) {}

  value_type operator*() const {
    return deref(std::index_sequence_for<Iters...>{});
  }

  zip_longest_iterator<Iters...> &operator++() {
    iterators = tup_inc(std::index_sequence_for<Iters...>{});
    return *this;
  }

  bool operator==(const zip_longest_iterator<Iters...> &other) const {
    return !test(other, std::index_sequence_for<Iters...>{});
  }
};

template <typename... Args> class zip_longest_range {
public:
  using iterator =
      zip_longest_iterator<decltype(adl_begin(std::declval<Args>()))...>;
  using iterator_category = typename iterator::iterator_category;
  using value_type = typename iterator::value_type;
  using difference_type = typename iterator::difference_type;
  using pointer = typename iterator::pointer;
  using reference = typename iterator::reference;

private:
  std::tuple<Args...> ts;

  template <size_t... Ns>
  iterator begin_impl(std::index_sequence<Ns...>) const {
    return iterator(std::make_pair(adl_begin(std::get<Ns>(ts)),
                                   adl_end(std::get<Ns>(ts)))...);
  }

  template <size_t... Ns> iterator end_impl(std::index_sequence<Ns...>) const {
    return iterator(std::make_pair(adl_end(std::get<Ns>(ts)),
                                   adl_end(std::get<Ns>(ts)))...);
  }

public:
  zip_longest_range(Args &&... ts_) : ts(std::forward<Args>(ts_)...) {}

  iterator begin() const {
    return begin_impl(std::index_sequence_for<Args...>{});
  }
  iterator end() const { return end_impl(std::index_sequence_for<Args...>{}); }
};
} // namespace detail

/// Iterate over two or more iterators at the same time. Iteration continues
/// until all iterators reach the end. The std::optional only contains a value
/// if the iterator has not reached the end.
template <typename T, typename U, typename... Args>
detail::zip_longest_range<T, U, Args...> zip_longest(T &&t, U &&u,
                                                     Args &&... args) {
  return detail::zip_longest_range<T, U, Args...>(
      std::forward<T>(t), std::forward<U>(u), std::forward<Args>(args)...);
}

/// Iterator wrapper that concatenates sequences together.
///
/// This can concatenate different iterators, even with different types, into
/// a single iterator provided the value types of all the concatenated
/// iterators expose `reference` and `pointer` types that can be converted to
/// `ValueT &` and `ValueT *` respectively. It doesn't support more
/// interesting/customized pointer or reference types.
///
/// Currently this only supports forward or higher iterator categories as
/// inputs and always exposes a forward iterator interface.
template <typename ValueT, typename... IterTs>
class concat_iterator
    : public iterator_facade_base<concat_iterator<ValueT, IterTs...>,
                                  std::forward_iterator_tag, ValueT> {
  using BaseT = typename concat_iterator::iterator_facade_base;

  /// We store both the current and end iterators for each concatenated
  /// sequence in a tuple of pairs.
  ///
  /// Note that something like iterator_range seems nice at first here, but the
  /// range properties are of little benefit and end up getting in the way
  /// because we need to do mutation on the current iterators.
  std::tuple<IterTs...> Begins;
  std::tuple<IterTs...> Ends;

  /// Attempts to increment a specific iterator.
  ///
  /// Returns true if it was able to increment the iterator. Returns false if
  /// the iterator is already at the end iterator.
  template <size_t Index> bool incrementHelper() {
    auto &Begin = std::get<Index>(Begins);
    auto &End = std::get<Index>(Ends);
    if (Begin == End)
      return false;

    ++Begin;
    return true;
  }

  /// Increments the first non-end iterator.
  ///
  /// It is an error to call this with all iterators at the end.
  template <size_t... Ns> void increment(std::index_sequence<Ns...>) {
    // Build a sequence of functions to increment each iterator if possible.
    bool (concat_iterator::*IncrementHelperFns[])() = {
        &concat_iterator::incrementHelper<Ns>...};

    // Loop over them, and stop as soon as we succeed at incrementing one.
    for (auto &IncrementHelperFn : IncrementHelperFns)
      if ((this->*IncrementHelperFn)())
        return;

    llvm_unreachable("Attempted to increment an end concat iterator!");
  }

  /// Returns null if the specified iterator is at the end. Otherwise,
  /// dereferences the iterator and returns the address of the resulting
  /// reference.
  template <size_t Index> ValueT *getHelper() const {
    auto &Begin = std::get<Index>(Begins);
    auto &End = std::get<Index>(Ends);
    if (Begin == End)
      return nullptr;

    return &*Begin;
  }

  /// Finds the first non-end iterator, dereferences, and returns the resulting
  /// reference.
  ///
  /// It is an error to call this with all iterators at the end.
  template <size_t... Ns> ValueT &get(std::index_sequence<Ns...>) const {
    // Build a sequence of functions to get from iterator if possible.
    ValueT *(concat_iterator::*GetHelperFns[])() const = {
        &concat_iterator::getHelper<Ns>...};

    // Loop over them, and return the first result we find.
    for (auto &GetHelperFn : GetHelperFns)
      if (ValueT *P = (this->*GetHelperFn)())
        return *P;

    llvm_unreachable("Attempted to get a pointer from an end concat iterator!");
  }

public:
  /// Constructs an iterator from a sequence of ranges.
  ///
  /// We need the full range to know how to switch between each of the
  /// iterators.
  template <typename... RangeTs>
  explicit concat_iterator(RangeTs &&... Ranges)
      : Begins(std::begin(Ranges)...), Ends(std::end(Ranges)...) {}

  using BaseT::operator++;

  concat_iterator &operator++() {
    increment(std::index_sequence_for<IterTs...>());
    return *this;
  }

  ValueT &operator*() const {
    return get(std::index_sequence_for<IterTs...>());
  }

  bool operator==(const concat_iterator &RHS) const {
    return Begins == RHS.Begins && Ends == RHS.Ends;
  }
};

namespace detail {

/// Helper to store a sequence of ranges being concatenated and access them.
///
/// This is designed to facilitate providing actual storage when temporaries
/// are passed into the constructor such that we can use it as part of range
/// based for loops.
template <typename ValueT, typename... RangeTs> class concat_range {
public:
  using iterator =
      concat_iterator<ValueT,
                      decltype(std::begin(std::declval<RangeTs &>()))...>;

private:
  std::tuple<RangeTs...> Ranges;

  template <size_t... Ns>
  iterator begin_impl(std::index_sequence<Ns...>) {
    return iterator(std::get<Ns>(Ranges)...);
  }
  template <size_t... Ns>
  iterator begin_impl(std::index_sequence<Ns...>) const {
    return iterator(std::get<Ns>(Ranges)...);
  }
  template <size_t... Ns> iterator end_impl(std::index_sequence<Ns...>) {
    return iterator(make_range(std::end(std::get<Ns>(Ranges)),
                               std::end(std::get<Ns>(Ranges)))...);
  }
  template <size_t... Ns> iterator end_impl(std::index_sequence<Ns...>) const {
    return iterator(make_range(std::end(std::get<Ns>(Ranges)),
                               std::end(std::get<Ns>(Ranges)))...);
  }

public:
  concat_range(RangeTs &&... Ranges)
      : Ranges(std::forward<RangeTs>(Ranges)...) {}

  iterator begin() {
    return begin_impl(std::index_sequence_for<RangeTs...>{});
  }
  iterator begin() const {
    return begin_impl(std::index_sequence_for<RangeTs...>{});
  }
  iterator end() {
    return end_impl(std::index_sequence_for<RangeTs...>{});
  }
  iterator end() const {
    return end_impl(std::index_sequence_for<RangeTs...>{});
  }
};

} // end namespace detail

/// Concatenated range across two or more ranges.
///
/// The desired value type must be explicitly specified.
template <typename ValueT, typename... RangeTs>
detail::concat_range<ValueT, RangeTs...> concat(RangeTs &&... Ranges) {
  static_assert(sizeof...(RangeTs) > 1,
                "Need more than one range to concatenate!");
  return detail::concat_range<ValueT, RangeTs...>(
      std::forward<RangeTs>(Ranges)...);
}

/// A utility class used to implement an iterator that contains some base object
/// and an index. The iterator moves the index but keeps the base constant.
template <typename DerivedT, typename BaseT, typename T,
          typename PointerT = T *, typename ReferenceT = T &>
class indexed_accessor_iterator
    : public llvm::iterator_facade_base<DerivedT,
                                        std::random_access_iterator_tag, T,
                                        std::ptrdiff_t, PointerT, ReferenceT> {
public:
  ptrdiff_t operator-(const indexed_accessor_iterator &rhs) const {
    assert(base == rhs.base && "incompatible iterators");
    return index - rhs.index;
  }
  bool operator==(const indexed_accessor_iterator &rhs) const {
    return base == rhs.base && index == rhs.index;
  }
  bool operator<(const indexed_accessor_iterator &rhs) const {
    assert(base == rhs.base && "incompatible iterators");
    return index < rhs.index;
  }

  DerivedT &operator+=(ptrdiff_t offset) {
    this->index += offset;
    return static_cast<DerivedT &>(*this);
  }
  DerivedT &operator-=(ptrdiff_t offset) {
    this->index -= offset;
    return static_cast<DerivedT &>(*this);
  }

  /// Returns the current index of the iterator.
  ptrdiff_t getIndex() const { return index; }

  /// Returns the current base of the iterator.
  const BaseT &getBase() const { return base; }

protected:
  indexed_accessor_iterator(BaseT base, ptrdiff_t index)
      : base(base), index(index) {}
  BaseT base;
  ptrdiff_t index;
};

namespace detail {
/// The class represents the base of a range of indexed_accessor_iterators. It
/// provides support for many different range functionalities, e.g.
/// drop_front/slice/etc.. Derived range classes must implement the following
/// static methods:
///   * ReferenceT dereference_iterator(const BaseT &base, ptrdiff_t index)
///     - Dereference an iterator pointing to the base object at the given
///       index.
///   * BaseT offset_base(const BaseT &base, ptrdiff_t index)
///     - Return a new base that is offset from the provide base by 'index'
///       elements.
template <typename DerivedT, typename BaseT, typename T,
          typename PointerT = T *, typename ReferenceT = T &>
class indexed_accessor_range_base {
public:
  using RangeBaseT = indexed_accessor_range_base;

  /// An iterator element of this range.
  class iterator : public indexed_accessor_iterator<iterator, BaseT, T,
                                                    PointerT, ReferenceT> {
  public:
    // Index into this iterator, invoking a static method on the derived type.
    ReferenceT operator*() const {
      return DerivedT::dereference_iterator(this->getBase(), this->getIndex());
    }

  private:
    iterator(BaseT owner, ptrdiff_t curIndex)
        : iterator::indexed_accessor_iterator(owner, curIndex) {}

    /// Allow access to the constructor.
    friend indexed_accessor_range_base<DerivedT, BaseT, T, PointerT,
                                       ReferenceT>;
  };

  indexed_accessor_range_base(iterator begin, iterator end)
      : base(offset_base(begin.getBase(), begin.getIndex())),
        count(end.getIndex() - begin.getIndex()) {}
  indexed_accessor_range_base(const iterator_range<iterator> &range)
      : indexed_accessor_range_base(range.begin(), range.end()) {}
  indexed_accessor_range_base(BaseT base, ptrdiff_t count)
      : base(base), count(count) {}

  iterator begin() const { return iterator(base, 0); }
  iterator end() const { return iterator(base, count); }
  ReferenceT operator[](size_t Index) const {
    assert(Index < size() && "invalid index for value range");
    return DerivedT::dereference_iterator(base, static_cast<ptrdiff_t>(Index));
  }
  ReferenceT front() const {
    assert(!empty() && "expected non-empty range");
    return (*this)[0];
  }
  ReferenceT back() const {
    assert(!empty() && "expected non-empty range");
    return (*this)[size() - 1];
  }

  /// Compare this range with another.
  template <typename OtherT>
  friend bool operator==(const indexed_accessor_range_base &lhs,
                         const OtherT &rhs) {
    return std::equal(lhs.begin(), lhs.end(), rhs.begin(), rhs.end());
  }
  template <typename OtherT>
  friend bool operator!=(const indexed_accessor_range_base &lhs,
                         const OtherT &rhs) {
    return !(lhs == rhs);
  }

  /// Return the size of this range.
  size_t size() const { return count; }

  /// Return if the range is empty.
  bool empty() const { return size() == 0; }

  /// Drop the first N elements, and keep M elements.
  DerivedT slice(size_t n, size_t m) const {
    assert(n + m <= size() && "invalid size specifiers");
    return DerivedT(offset_base(base, n), m);
  }

  /// Drop the first n elements.
  DerivedT drop_front(size_t n = 1) const {
    assert(size() >= n && "Dropping more elements than exist");
    return slice(n, size() - n);
  }
  /// Drop the last n elements.
  DerivedT drop_back(size_t n = 1) const {
    assert(size() >= n && "Dropping more elements than exist");
    return DerivedT(base, size() - n);
  }

  /// Take the first n elements.
  DerivedT take_front(size_t n = 1) const {
    return n < size() ? drop_back(size() - n)
                      : static_cast<const DerivedT &>(*this);
  }

  /// Take the last n elements.
  DerivedT take_back(size_t n = 1) const {
    return n < size() ? drop_front(size() - n)
                      : static_cast<const DerivedT &>(*this);
  }

  /// Allow conversion to any type accepting an iterator_range.
  template <typename RangeT, typename = std::enable_if_t<std::is_constructible<
                                 RangeT, iterator_range<iterator>>::value>>
  operator RangeT() const {
    return RangeT(iterator_range<iterator>(*this));
  }

  /// Returns the base of this range.
  const BaseT &getBase() const { return base; }

private:
  /// Offset the given base by the given amount.
  static BaseT offset_base(const BaseT &base, size_t n) {
    return n == 0 ? base : DerivedT::offset_base(base, n);
  }

protected:
  indexed_accessor_range_base(const indexed_accessor_range_base &) = default;
  indexed_accessor_range_base(indexed_accessor_range_base &&) = default;
  indexed_accessor_range_base &
  operator=(const indexed_accessor_range_base &) = default;

  /// The base that owns the provided range of values.
  BaseT base;
  /// The size from the owning range.
  ptrdiff_t count;
};
} // end namespace detail

/// This class provides an implementation of a range of
/// indexed_accessor_iterators where the base is not indexable. Ranges with
/// bases that are offsetable should derive from indexed_accessor_range_base
/// instead. Derived range classes are expected to implement the following
/// static method:
///   * ReferenceT dereference(const BaseT &base, ptrdiff_t index)
///     - Dereference an iterator pointing to a parent base at the given index.
template <typename DerivedT, typename BaseT, typename T,
          typename PointerT = T *, typename ReferenceT = T &>
class indexed_accessor_range
    : public detail::indexed_accessor_range_base<
          DerivedT, std::pair<BaseT, ptrdiff_t>, T, PointerT, ReferenceT> {
public:
  indexed_accessor_range(BaseT base, ptrdiff_t startIndex, ptrdiff_t count)
      : detail::indexed_accessor_range_base<
            DerivedT, std::pair<BaseT, ptrdiff_t>, T, PointerT, ReferenceT>(
            std::make_pair(base, startIndex), count) {}
  using detail::indexed_accessor_range_base<
      DerivedT, std::pair<BaseT, ptrdiff_t>, T, PointerT,
      ReferenceT>::indexed_accessor_range_base;

  /// Returns the current base of the range.
  const BaseT &getBase() const { return this->base.first; }

  /// Returns the current start index of the range.
  ptrdiff_t getStartIndex() const { return this->base.second; }

  /// See `detail::indexed_accessor_range_base` for details.
  static std::pair<BaseT, ptrdiff_t>
  offset_base(const std::pair<BaseT, ptrdiff_t> &base, ptrdiff_t index) {
    // We encode the internal base as a pair of the derived base and a start
    // index into the derived base.
    return std::make_pair(base.first, base.second + index);
  }
  /// See `detail::indexed_accessor_range_base` for details.
  static ReferenceT
  dereference_iterator(const std::pair<BaseT, ptrdiff_t> &base,
                       ptrdiff_t index) {
    return DerivedT::dereference(base.first, base.second + index);
  }
};

namespace detail {
/// Return a reference to the first or second member of a reference. Otherwise,
/// return a copy of the member of a temporary.
///
/// When passing a range whose iterators return values instead of references,
/// the reference must be dropped from `decltype((elt.first))`, which will
/// always be a reference, to avoid returning a reference to a temporary.
template <typename EltTy, typename FirstTy> class first_or_second_type {
public:
  using type = std::conditional_t<std::is_reference<EltTy>::value, FirstTy,
                                  std::remove_reference_t<FirstTy>>;
};
} // end namespace detail

/// Given a container of pairs, return a range over the first elements.
template <typename ContainerTy> auto make_first_range(ContainerTy &&c) {
  using EltTy = decltype((*std::begin(c)));
  return llvm::map_range(std::forward<ContainerTy>(c),
                         [](EltTy elt) -> typename detail::first_or_second_type<
                                           EltTy, decltype((elt.first))>::type {
                           return elt.first;
                         });
}

/// Given a container of pairs, return a range over the second elements.
template <typename ContainerTy> auto make_second_range(ContainerTy &&c) {
  using EltTy = decltype((*std::begin(c)));
  return llvm::map_range(
      std::forward<ContainerTy>(c),
      [](EltTy elt) ->
      typename detail::first_or_second_type<EltTy,
                                            decltype((elt.second))>::type {
        return elt.second;
      });
}

//===----------------------------------------------------------------------===//
//     Extra additions to <utility>
//===----------------------------------------------------------------------===//

/// Function object to check whether the first component of a container
/// supported by std::get (like std::pair and std::tuple) compares less than the
/// first component of another container.
struct less_first {
  template <typename T> bool operator()(const T &lhs, const T &rhs) const {
    return std::less<>()(std::get<0>(lhs), std::get<0>(rhs));
  }
};

/// Function object to check whether the second component of a container
/// supported by std::get (like std::pair and std::tuple) compares less than the
/// second component of another container.
struct less_second {
  template <typename T> bool operator()(const T &lhs, const T &rhs) const {
    return std::less<>()(std::get<1>(lhs), std::get<1>(rhs));
  }
};

/// \brief Function object to apply a binary function to the first component of
/// a std::pair.
template<typename FuncTy>
struct on_first {
  FuncTy func;

  template <typename T>
  decltype(auto) operator()(const T &lhs, const T &rhs) const {
    return func(lhs.first, rhs.first);
  }
};

/// Utility type to build an inheritance chain that makes it easy to rank
/// overload candidates.
template <int N> struct rank : rank<N - 1> {};
template <> struct rank<0> {};

/// traits class for checking whether type T is one of any of the given
/// types in the variadic list.
template <typename T, typename... Ts>
using is_one_of = std::disjunction<std::is_same<T, Ts>...>;

/// traits class for checking whether type T is a base class for all
///  the given types in the variadic list.
template <typename T, typename... Ts>
using are_base_of = std::conjunction<std::is_base_of<T, Ts>...>;

namespace detail {
template <typename... Ts> struct Visitor;

template <typename HeadT, typename... TailTs>
struct Visitor<HeadT, TailTs...> : remove_cvref_t<HeadT>, Visitor<TailTs...> {
  explicit constexpr Visitor(HeadT &&Head, TailTs &&...Tail)
      : remove_cvref_t<HeadT>(std::forward<HeadT>(Head)),
        Visitor<TailTs...>(std::forward<TailTs>(Tail)...) {}
  using remove_cvref_t<HeadT>::operator();
  using Visitor<TailTs...>::operator();
};

template <typename HeadT> struct Visitor<HeadT> : remove_cvref_t<HeadT> {
  explicit constexpr Visitor(HeadT &&Head)
      : remove_cvref_t<HeadT>(std::forward<HeadT>(Head)) {}
  using remove_cvref_t<HeadT>::operator();
};
} // namespace detail

/// Returns an opaquely-typed Callable object whose operator() overload set is
/// the sum of the operator() overload sets of each CallableT in CallableTs.
///
/// The type of the returned object derives from each CallableT in CallableTs.
/// The returned object is constructed by invoking the appropriate copy or move
/// constructor of each CallableT, as selected by overload resolution on the
/// corresponding argument to makeVisitor.
///
/// Example:
///
/// \code
/// auto visitor = makeVisitor([](auto) { return "unhandled type"; },
///                            [](int i) { return "int"; },
///                            [](std::string s) { return "str"; });
/// auto a = visitor(42);    // `a` is now "int".
/// auto b = visitor("foo"); // `b` is now "str".
/// auto c = visitor(3.14f); // `c` is now "unhandled type".
/// \endcode
///
/// Example of making a visitor with a lambda which captures a move-only type:
///
/// \code
/// std::unique_ptr<FooHandler> FH = /* ... */;
/// auto visitor = makeVisitor(
///     [FH{std::move(FH)}](Foo F) { return FH->handle(F); },
///     [](int i) { return i; },
///     [](std::string s) { return atoi(s); });
/// \endcode
template <typename... CallableTs>
constexpr decltype(auto) makeVisitor(CallableTs &&...Callables) {
  return detail::Visitor<CallableTs...>(std::forward<CallableTs>(Callables)...);
}

//===----------------------------------------------------------------------===//
//     Extra additions to <algorithm>
//===----------------------------------------------------------------------===//

// We have a copy here so that LLVM behaves the same when using different
// standard libraries.
template <class Iterator, class RNG>
void shuffle(Iterator first, Iterator last, RNG &&g) {
  // It would be better to use a std::uniform_int_distribution,
  // but that would be stdlib dependent.
  typedef
      typename std::iterator_traits<Iterator>::difference_type difference_type;
  for (auto size = last - first; size > 1; ++first, (void)--size) {
    difference_type offset = g() % size;
    // Avoid self-assignment due to incorrect assertions in libstdc++
    // containers (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85828).
    if (offset != difference_type(0))
      std::iter_swap(first, first + offset);
  }
}

/// Adapt std::less<T> for array_pod_sort.
template<typename T>
inline int array_pod_sort_comparator(const void *P1, const void *P2) {
  if (std::less<T>()(*reinterpret_cast<const T*>(P1),
                     *reinterpret_cast<const T*>(P2)))
    return -1;
  if (std::less<T>()(*reinterpret_cast<const T*>(P2),
                     *reinterpret_cast<const T*>(P1)))
    return 1;
  return 0;
}

/// get_array_pod_sort_comparator - This is an internal helper function used to
/// get type deduction of T right.
template<typename T>
inline int (*get_array_pod_sort_comparator(const T &))
             (const void*, const void*) {
  return array_pod_sort_comparator<T>;
}

#ifdef EXPENSIVE_CHECKS
namespace detail {

inline unsigned presortShuffleEntropy() {
  static unsigned Result(std::random_device{}());
  return Result;
}

template <class IteratorTy>
inline void presortShuffle(IteratorTy Start, IteratorTy End) {
  std::mt19937 Generator(presortShuffleEntropy());
  llvm::shuffle(Start, End, Generator);
}

} // end namespace detail
#endif

/// array_pod_sort - This sorts an array with the specified start and end
/// extent.  This is just like std::sort, except that it calls qsort instead of
/// using an inlined template.  qsort is slightly slower than std::sort, but
/// most sorts are not performance critical in LLVM and std::sort has to be
/// template instantiated for each type, leading to significant measured code
/// bloat.  This function should generally be used instead of std::sort where
/// possible.
///
/// This function assumes that you have simple POD-like types that can be
/// compared with std::less and can be moved with memcpy.  If this isn't true,
/// you should use std::sort.
///
/// NOTE: If qsort_r were portable, we could allow a custom comparator and
/// default to std::less.
template<class IteratorTy>
inline void array_pod_sort(IteratorTy Start, IteratorTy End) {
  // Don't inefficiently call qsort with one element or trigger undefined
  // behavior with an empty sequence.
  auto NElts = End - Start;
  if (NElts <= 1) return;
#ifdef EXPENSIVE_CHECKS
  detail::presortShuffle<IteratorTy>(Start, End);
#endif
  qsort(&*Start, NElts, sizeof(*Start), get_array_pod_sort_comparator(*Start));
}

template <class IteratorTy>
inline void array_pod_sort(
    IteratorTy Start, IteratorTy End,
    int (*Compare)(
        const typename std::iterator_traits<IteratorTy>::value_type *,
        const typename std::iterator_traits<IteratorTy>::value_type *)) {
  // Don't inefficiently call qsort with one element or trigger undefined
  // behavior with an empty sequence.
  auto NElts = End - Start;
  if (NElts <= 1) return;
#ifdef EXPENSIVE_CHECKS
  detail::presortShuffle<IteratorTy>(Start, End);
#endif
  qsort(&*Start, NElts, sizeof(*Start),
        reinterpret_cast<int (*)(const void *, const void *)>(Compare));
}

namespace detail {
template <typename T>
// We can use qsort if the iterator type is a pointer and the underlying value
// is trivially copyable.
using sort_trivially_copyable = std::conjunction<
    std::is_pointer<T>,
    std::is_trivially_copyable<typename std::iterator_traits<T>::value_type>>;
} // namespace detail

// Provide wrappers to std::sort which shuffle the elements before sorting
// to help uncover non-deterministic behavior (PR35135).
template <typename IteratorTy>
inline void sort(IteratorTy Start, IteratorTy End) {
  if constexpr (detail::sort_trivially_copyable<IteratorTy>::value) {
    // Forward trivially copyable types to array_pod_sort. This avoids a large
    // amount of code bloat for a minor performance hit.
    array_pod_sort(Start, End);
  } else {
#ifdef EXPENSIVE_CHECKS
    detail::presortShuffle<IteratorTy>(Start, End);
#endif
    std::sort(Start, End);
  }
}

template <typename Container> inline void sort(Container &&C) {
  llvm::sort(adl_begin(C), adl_end(C));
}

template <typename IteratorTy, typename Compare>
inline void sort(IteratorTy Start, IteratorTy End, Compare Comp) {
#ifdef EXPENSIVE_CHECKS
  detail::presortShuffle<IteratorTy>(Start, End);
#endif
  std::sort(Start, End, Comp);
}

template <typename Container, typename Compare>
inline void sort(Container &&C, Compare Comp) {
  llvm::sort(adl_begin(C), adl_end(C), Comp);
}

/// Get the size of a range. This is a wrapper function around std::distance
/// which is only enabled when the operation is O(1).
template <typename R>
auto size(R &&Range,
          std::enable_if_t<
              std::is_base_of<std::random_access_iterator_tag,
                              typename std::iterator_traits<decltype(
                                  Range.begin())>::iterator_category>::value,
              void> * = nullptr) {
  return std::distance(Range.begin(), Range.end());
}

namespace detail {
template <typename Range>
using check_has_free_function_size =
    decltype(adl_size(std::declval<Range &>()));

template <typename Range>
static constexpr bool HasFreeFunctionSize =
    is_detected<check_has_free_function_size, Range>::value;
} // namespace detail

/// Returns the size of the \p Range, i.e., the number of elements. This
/// implementation takes inspiration from `std::ranges::size` from C++20 and
/// delegates the size check to `adl_size` or `std::distance`, in this order of
/// preference. Unlike `llvm::size`, this function does *not* guarantee O(1)
/// running time, and is intended to be used in generic code that does not know
/// the exact range type.
template <typename R> constexpr size_t range_size(R &&Range) {
  if constexpr (detail::HasFreeFunctionSize<R>)
    return adl_size(Range);
  else
    return static_cast<size_t>(std::distance(adl_begin(Range), adl_end(Range)));
}

/// Provide wrappers to std::for_each which take ranges instead of having to
/// pass begin/end explicitly.
template <typename R, typename UnaryFunction>
UnaryFunction for_each(R &&Range, UnaryFunction F) {
  return std::for_each(adl_begin(Range), adl_end(Range), F);
}

/// Provide wrappers to std::all_of which take ranges instead of having to pass
/// begin/end explicitly.
template <typename R, typename UnaryPredicate>
bool all_of(R &&Range, UnaryPredicate P) {
  return std::all_of(adl_begin(Range), adl_end(Range), P);
}

/// Provide wrappers to std::any_of which take ranges instead of having to pass
/// begin/end explicitly.
template <typename R, typename UnaryPredicate>
bool any_of(R &&Range, UnaryPredicate P) {
  return std::any_of(adl_begin(Range), adl_end(Range), P);
}

/// Provide wrappers to std::none_of which take ranges instead of having to pass
/// begin/end explicitly.
template <typename R, typename UnaryPredicate>
bool none_of(R &&Range, UnaryPredicate P) {
  return std::none_of(adl_begin(Range), adl_end(Range), P);
}

/// Provide wrappers to std::find which take ranges instead of having to pass
/// begin/end explicitly.
template <typename R, typename T> auto find(R &&Range, const T &Val) {
  return std::find(adl_begin(Range), adl_end(Range), Val);
}

/// Provide wrappers to std::find_if which take ranges instead of having to pass
/// begin/end explicitly.
template <typename R, typename UnaryPredicate>
auto find_if(R &&Range, UnaryPredicate P) {
  return std::find_if(adl_begin(Range), adl_end(Range), P);
}

template <typename R, typename UnaryPredicate>
auto find_if_not(R &&Range, UnaryPredicate P) {
  return std::find_if_not(adl_begin(Range), adl_end(Range), P);
}

/// Provide wrappers to std::remove_if which take ranges instead of having to
/// pass begin/end explicitly.
template <typename R, typename UnaryPredicate>
auto remove_if(R &&Range, UnaryPredicate P) {
  return std::remove_if(adl_begin(Range), adl_end(Range), P);
}

/// Provide wrappers to std::copy_if which take ranges instead of having to
/// pass begin/end explicitly.
template <typename R, typename OutputIt, typename UnaryPredicate>
OutputIt copy_if(R &&Range, OutputIt Out, UnaryPredicate P) {
  return std::copy_if(adl_begin(Range), adl_end(Range), Out, P);
}

/// Return the single value in \p Range that satisfies
/// \p P(<member of \p Range> *, AllowRepeats)->T * returning nullptr
/// when no values or multiple values were found.
/// When \p AllowRepeats is true, multiple values that compare equal
/// are allowed.
template <typename T, typename R, typename Predicate>
T *find_singleton(R &&Range, Predicate P, bool AllowRepeats = false) {
  T *RC = nullptr;
  for (auto *A : Range) {
    if (T *PRC = P(A, AllowRepeats)) {
      if (RC) {
        if (!AllowRepeats || PRC != RC)
          return nullptr;
      } else
        RC = PRC;
    }
  }
  return RC;
}

/// Return a pair consisting of the single value in \p Range that satisfies
/// \p P(<member of \p Range> *, AllowRepeats)->std::pair<T*, bool> returning
/// nullptr when no values or multiple values were found, and a bool indicating
/// whether multiple values were found to cause the nullptr.
/// When \p AllowRepeats is true, multiple values that compare equal are
/// allowed.  The predicate \p P returns a pair<T *, bool> where T is the
/// singleton while the bool indicates whether multiples have already been
/// found.  It is expected that first will be nullptr when second is true.
/// This allows using find_singleton_nested within the predicate \P.
template <typename T, typename R, typename Predicate>
std::pair<T *, bool> find_singleton_nested(R &&Range, Predicate P,
                                           bool AllowRepeats = false) {
  T *RC = nullptr;
  for (auto *A : Range) {
    std::pair<T *, bool> PRC = P(A, AllowRepeats);
    if (PRC.second) {
      assert(PRC.first == nullptr &&
             "Inconsistent return values in find_singleton_nested.");
      return PRC;
    }
    if (PRC.first) {
      if (RC) {
        if (!AllowRepeats || PRC.first != RC)
          return {nullptr, true};
      } else
        RC = PRC.first;
    }
  }
  return {RC, false};
}

template <typename R, typename OutputIt>
OutputIt copy(R &&Range, OutputIt Out) {
  return std::copy(adl_begin(Range), adl_end(Range), Out);
}

/// Provide wrappers to std::replace_copy_if which take ranges instead of having
/// to pass begin/end explicitly.
template <typename R, typename OutputIt, typename UnaryPredicate, typename T>
OutputIt replace_copy_if(R &&Range, OutputIt Out, UnaryPredicate P,
                         const T &NewValue) {
  return std::replace_copy_if(adl_begin(Range), adl_end(Range), Out, P,
                              NewValue);
}

/// Provide wrappers to std::replace_copy which take ranges instead of having to
/// pass begin/end explicitly.
template <typename R, typename OutputIt, typename T>
OutputIt replace_copy(R &&Range, OutputIt Out, const T &OldValue,
                      const T &NewValue) {
  return std::replace_copy(adl_begin(Range), adl_end(Range), Out, OldValue,
                           NewValue);
}

/// Provide wrappers to std::move which take ranges instead of having to
/// pass begin/end explicitly.
template <typename R, typename OutputIt>
OutputIt move(R &&Range, OutputIt Out) {
  return std::move(adl_begin(Range), adl_end(Range), Out);
}

namespace detail {
template <typename Range, typename Element>
using check_has_member_contains_t =
    decltype(std::declval<Range &>().contains(std::declval<const Element &>()));

template <typename Range, typename Element>
static constexpr bool HasMemberContains =
    is_detected<check_has_member_contains_t, Range, Element>::value;

template <typename Range, typename Element>
using check_has_member_find_t =
    decltype(std::declval<Range &>().find(std::declval<const Element &>()) !=
             std::declval<Range &>().end());

template <typename Range, typename Element>
static constexpr bool HasMemberFind =
    is_detected<check_has_member_find_t, Range, Element>::value;

} // namespace detail

/// Returns true if \p Element is found in \p Range. Delegates the check to
/// either `.contains(Element)`, `.find(Element)`, or `std::find`, in this
/// order of preference. This is intended as the canonical way to check if an
/// element exists in a range in generic code or range type that does not
/// expose a `.contains(Element)` member.
template <typename R, typename E>
bool is_contained(R &&Range, const E &Element) {
  if constexpr (detail::HasMemberContains<R, E>)
    return Range.contains(Element);
  else if constexpr (detail::HasMemberFind<R, E>)
    return Range.find(Element) != Range.end();
  else
    return std::find(adl_begin(Range), adl_end(Range), Element) !=
           adl_end(Range);
}

/// Returns true iff \p Element exists in \p Set. This overload takes \p Set as
/// an initializer list and is `constexpr`-friendly.
template <typename T, typename E>
constexpr bool is_contained(std::initializer_list<T> Set, const E &Element) {
  // TODO: Use std::find when we switch to C++20.
  for (const T &V : Set)
    if (V == Element)
      return true;
  return false;
}

/// Wrapper function around std::is_sorted to check if elements in a range \p R
/// are sorted with respect to a comparator \p C.
template <typename R, typename Compare> bool is_sorted(R &&Range, Compare C) {
  return std::is_sorted(adl_begin(Range), adl_end(Range), C);
}

/// Wrapper function around std::is_sorted to check if elements in a range \p R
/// are sorted in non-descending order.
template <typename R> bool is_sorted(R &&Range) {
  return std::is_sorted(adl_begin(Range), adl_end(Range));
}

/// Wrapper function around std::count to count the number of times an element
/// \p Element occurs in the given range \p Range.
template <typename R, typename E> auto count(R &&Range, const E &Element) {
  return std::count(adl_begin(Range), adl_end(Range), Element);
}

/// Wrapper function around std::count_if to count the number of times an
/// element satisfying a given predicate occurs in a range.
template <typename R, typename UnaryPredicate>
auto count_if(R &&Range, UnaryPredicate P) {
  return std::count_if(adl_begin(Range), adl_end(Range), P);
}

/// Wrapper function around std::transform to apply a function to a range and
/// store the result elsewhere.
template <typename R, typename OutputIt, typename UnaryFunction>
OutputIt transform(R &&Range, OutputIt d_first, UnaryFunction F) {
  return std::transform(adl_begin(Range), adl_end(Range), d_first, F);
}

/// Provide wrappers to std::partition which take ranges instead of having to
/// pass begin/end explicitly.
template <typename R, typename UnaryPredicate>
auto partition(R &&Range, UnaryPredicate P) {
  return std::partition(adl_begin(Range), adl_end(Range), P);
}

/// Provide wrappers to std::lower_bound which take ranges instead of having to
/// pass begin/end explicitly.
template <typename R, typename T> auto lower_bound(R &&Range, T &&Value) {
  return std::lower_bound(adl_begin(Range), adl_end(Range),
                          std::forward<T>(Value));
}

template <typename R, typename T, typename Compare>
auto lower_bound(R &&Range, T &&Value, Compare C) {
  return std::lower_bound(adl_begin(Range), adl_end(Range),
                          std::forward<T>(Value), C);
}

/// Provide wrappers to std::upper_bound which take ranges instead of having to
/// pass begin/end explicitly.
template <typename R, typename T> auto upper_bound(R &&Range, T &&Value) {
  return std::upper_bound(adl_begin(Range), adl_end(Range),
                          std::forward<T>(Value));
}

template <typename R, typename T, typename Compare>
auto upper_bound(R &&Range, T &&Value, Compare C) {
  return std::upper_bound(adl_begin(Range), adl_end(Range),
                          std::forward<T>(Value), C);
}

template <typename R>
void stable_sort(R &&Range) {
  std::stable_sort(adl_begin(Range), adl_end(Range));
}

template <typename R, typename Compare>
void stable_sort(R &&Range, Compare C) {
  std::stable_sort(adl_begin(Range), adl_end(Range), C);
}

/// Binary search for the first iterator in a range where a predicate is false.
/// Requires that C is always true below some limit, and always false above it.
template <typename R, typename Predicate,
          typename Val = decltype(*adl_begin(std::declval<R>()))>
auto partition_point(R &&Range, Predicate P) {
  return std::partition_point(adl_begin(Range), adl_end(Range), P);
}

template<typename Range, typename Predicate>
auto unique(Range &&R, Predicate P) {
  return std::unique(adl_begin(R), adl_end(R), P);
}

/// Wrapper function around std::equal to detect if pair-wise elements between
/// two ranges are the same.
template <typename L, typename R> bool equal(L &&LRange, R &&RRange) {
  return std::equal(adl_begin(LRange), adl_end(LRange), adl_begin(RRange),
                    adl_end(RRange));
}

/// Returns true if all elements in Range are equal or when the Range is empty.
template <typename R> bool all_equal(R &&Range) {
  auto Begin = adl_begin(Range);
  auto End = adl_end(Range);
  return Begin == End || std::equal(Begin + 1, End, Begin);
}

/// Returns true if all Values in the initializer lists are equal or the list
// is empty.
template <typename T> bool all_equal(std::initializer_list<T> Values) {
  return all_equal<std::initializer_list<T>>(std::move(Values));
}

/// Provide a container algorithm similar to C++ Library Fundamentals v2's
/// `erase_if` which is equivalent to:
///
///   C.erase(remove_if(C, pred), C.end());
///
/// This version works for any container with an erase method call accepting
/// two iterators.
template <typename Container, typename UnaryPredicate>
void erase_if(Container &C, UnaryPredicate P) {
  C.erase(remove_if(C, P), C.end());
}

/// Wrapper function to remove a value from a container:
///
/// C.erase(remove(C.begin(), C.end(), V), C.end());
template <typename Container, typename ValueType>
void erase_value(Container &C, ValueType V) {
  C.erase(std::remove(C.begin(), C.end(), V), C.end());
}

/// Wrapper function to append a range to a container.
///
/// C.insert(C.end(), R.begin(), R.end());
template <typename Container, typename Range>
inline void append_range(Container &C, Range &&R) {
  C.insert(C.end(), adl_begin(R), adl_end(R));
}

/// Given a sequence container Cont, replace the range [ContIt, ContEnd) with
/// the range [ValIt, ValEnd) (which is not from the same container).
template<typename Container, typename RandomAccessIterator>
void replace(Container &Cont, typename Container::iterator ContIt,
             typename Container::iterator ContEnd, RandomAccessIterator ValIt,
             RandomAccessIterator ValEnd) {
  while (true) {
    if (ValIt == ValEnd) {
      Cont.erase(ContIt, ContEnd);
      return;
    } else if (ContIt == ContEnd) {
      Cont.insert(ContIt, ValIt, ValEnd);
      return;
    }
    *ContIt++ = *ValIt++;
  }
}

/// Given a sequence container Cont, replace the range [ContIt, ContEnd) with
/// the range R.
template<typename Container, typename Range = std::initializer_list<
                                 typename Container::value_type>>
void replace(Container &Cont, typename Container::iterator ContIt,
             typename Container::iterator ContEnd, Range R) {
  replace(Cont, ContIt, ContEnd, R.begin(), R.end());
}

/// An STL-style algorithm similar to std::for_each that applies a second
/// functor between every pair of elements.
///
/// This provides the control flow logic to, for example, print a
/// comma-separated list:
/// \code
///   interleave(names.begin(), names.end(),
///              [&](StringRef name) { os << name; },
///              [&] { os << ", "; });
/// \endcode
template <typename ForwardIterator, typename UnaryFunctor,
          typename NullaryFunctor,
          typename = std::enable_if_t<
              !std::is_constructible<StringRef, UnaryFunctor>::value &&
              !std::is_constructible<StringRef, NullaryFunctor>::value>>
inline void interleave(ForwardIterator begin, ForwardIterator end,
                       UnaryFunctor each_fn, NullaryFunctor between_fn) {
  if (begin == end)
    return;
  each_fn(*begin);
  ++begin;
  for (; begin != end; ++begin) {
    between_fn();
    each_fn(*begin);
  }
}

template <typename Container, typename UnaryFunctor, typename NullaryFunctor,
          typename = std::enable_if_t<
              !std::is_constructible<StringRef, UnaryFunctor>::value &&
              !std::is_constructible<StringRef, NullaryFunctor>::value>>
inline void interleave(const Container &c, UnaryFunctor each_fn,
                       NullaryFunctor between_fn) {
  interleave(c.begin(), c.end(), each_fn, between_fn);
}

/// Overload of interleave for the common case of string separator.
template <typename Container, typename UnaryFunctor, typename StreamT,
          typename T = detail::ValueOfRange<Container>>
inline void interleave(const Container &c, StreamT &os, UnaryFunctor each_fn,
                       const StringRef &separator) {
  interleave(c.begin(), c.end(), each_fn, [&] { os << separator; });
}
template <typename Container, typename StreamT,
          typename T = detail::ValueOfRange<Container>>
inline void interleave(const Container &c, StreamT &os,
                       const StringRef &separator) {
  interleave(
      c, os, [&](const T &a) { os << a; }, separator);
}

template <typename Container, typename UnaryFunctor, typename StreamT,
          typename T = detail::ValueOfRange<Container>>
inline void interleaveComma(const Container &c, StreamT &os,
                            UnaryFunctor each_fn) {
  interleave(c, os, each_fn, ", ");
}
template <typename Container, typename StreamT,
          typename T = detail::ValueOfRange<Container>>
inline void interleaveComma(const Container &c, StreamT &os) {
  interleaveComma(c, os, [&](const T &a) { os << a; });
}

//===----------------------------------------------------------------------===//
//     Extra additions to <memory>
//===----------------------------------------------------------------------===//

struct FreeDeleter {
  void operator()(void* v) {
    ::free(v);
  }
};

template<typename First, typename Second>
struct pair_hash {
  size_t operator()(const std::pair<First, Second> &P) const {
    return std::hash<First>()(P.first) * 31 + std::hash<Second>()(P.second);
  }
};

/// Binary functor that adapts to any other binary functor after dereferencing
/// operands.
template <typename T> struct deref {
  T func;

  // Could be further improved to cope with non-derivable functors and
  // non-binary functors (should be a variadic template member function
  // operator()).
  template <typename A, typename B> auto operator()(A &lhs, B &rhs) const {
    assert(lhs);
    assert(rhs);
    return func(*lhs, *rhs);
  }
};

namespace detail {

/// Tuple-like type for `zip_enumerator` dereference.
template <typename... Refs> struct enumerator_result;

template <typename... Iters>
using EnumeratorTupleType = enumerator_result<decltype(*declval<Iters>())...>;

/// Zippy iterator that uses the second iterator for comparisons. For the
/// increment to be safe, the second range has to be the shortest.
/// Returns `enumerator_result` on dereference to provide `.index()` and
/// `.value()` member functions.
/// Note: Because the dereference operator returns `enumerator_result` as a
/// value instead of a reference and does not strictly conform to the C++17's
/// definition of forward iterator. However, it satisfies all the
/// forward_iterator requirements that the `zip_common` and `zippy` depend on
/// and fully conforms to the C++20 definition of forward iterator.
/// This is similar to `std::vector<bool>::iterator` that returns bit reference
/// wrappers on dereference.
template <typename... Iters>
struct zip_enumerator : zip_common<zip_enumerator<Iters...>,
                                   EnumeratorTupleType<Iters...>, Iters...> {
  static_assert(sizeof...(Iters) >= 2, "Expected at least two iteratees");
  using zip_common<zip_enumerator<Iters...>, EnumeratorTupleType<Iters...>,
                   Iters...>::zip_common;

  bool operator==(const zip_enumerator &Other) const {
    return std::get<1>(this->iterators) == std::get<1>(Other.iterators);
  }
};

template <typename... Refs> struct enumerator_result<std::size_t, Refs...> {
  static constexpr std::size_t NumRefs = sizeof...(Refs);
  static_assert(NumRefs != 0);
  // `NumValues` includes the index.
  static constexpr std::size_t NumValues = NumRefs + 1;

  // Tuple type whose element types are references for each `Ref`.
  using range_reference_tuple = std::tuple<Refs...>;
  // Tuple type who elements are references to all values, including both
  // the index and `Refs` reference types.
  using value_reference_tuple = std::tuple<std::size_t, Refs...>;

  enumerator_result(std::size_t Index, Refs &&...Rs)
      : Idx(Index), Storage(std::forward<Refs>(Rs)...) {}

  /// Returns the 0-based index of the current position within the original
  /// input range(s).
  std::size_t index() const { return Idx; }

  /// Returns the value(s) for the current iterator. This does not include the
  /// index.
  decltype(auto) value() const {
    if constexpr (NumRefs == 1)
      return std::get<0>(Storage);
    else
      return Storage;
  }

  /// Returns the value at index `I`. This case covers the index.
  template <std::size_t I, typename = std::enable_if_t<I == 0>>
  friend std::size_t get(const enumerator_result &Result) {
    return Result.Idx;
  }

  /// Returns the value at index `I`. This case covers references to the
  /// iteratees.
  template <std::size_t I, typename = std::enable_if_t<I != 0>>
  friend decltype(auto) get(const enumerator_result &Result) {
    // Note: This is a separate function from the other `get`, instead of an
    // `if constexpr` case, to work around an MSVC 19.31.31XXX compiler
    // (Visual Studio 2022 17.1) return type deduction bug.
    return std::get<I - 1>(Result.Storage);
  }

  template <typename... Ts>
  friend bool operator==(const enumerator_result &Result,
                         const std::tuple<std::size_t, Ts...> &Other) {
    static_assert(NumRefs == sizeof...(Ts), "Size mismatch");
    if (Result.Idx != std::get<0>(Other))
      return false;
    return Result.is_value_equal(Other, std::make_index_sequence<NumRefs>{});
  }

private:
  template <typename Tuple, std::size_t... Idx>
  bool is_value_equal(const Tuple &Other, std::index_sequence<Idx...>) const {
    return ((std::get<Idx>(Storage) == std::get<Idx + 1>(Other)) && ...);
  }

  std::size_t Idx;
  // Make this tuple mutable to avoid casts that obfuscate const-correctness
  // issues. Const-correctness of references is taken care of by `zippy` that
  // defines const-non and const iterator types that will propagate down to
  // `enumerator_result`'s `Refs`.
  //  Note that unlike the results of `zip*` functions, `enumerate`'s result are
  //  supposed to be modifiable even when defined as
  // `const`.
  mutable range_reference_tuple Storage;
};

/// Infinite stream of increasing 0-based `size_t` indices.
struct index_stream {
  struct iterator : iterator_facade_base<iterator, std::forward_iterator_tag,
                                         const iterator> {
    iterator &operator++() {
      assert(Index != std::numeric_limits<std::size_t>::max() &&
             "Attempting to increment end iterator");
      ++Index;
      return *this;
    }

    // Note: This dereference operator returns a value instead of a reference
    // and does not strictly conform to the C++17's definition of forward
    // iterator. However, it satisfies all the forward_iterator requirements
    // that the `zip_common` depends on and fully conforms to the C++20
    // definition of forward iterator.
    std::size_t operator*() const { return Index; }

    friend bool operator==(const iterator &Lhs, const iterator &Rhs) {
      return Lhs.Index == Rhs.Index;
    }

    std::size_t Index = 0;
  };

  iterator begin() const { return {}; }
  iterator end() const {
    // We approximate 'infinity' with the max size_t value, which should be good
    // enough to index over any container.
    iterator It;
    It.Index = std::numeric_limits<std::size_t>::max();
    return It;
  }
};

} // end namespace detail

/// Given two or more input ranges, returns a new range whose values are are
/// tuples (A, B, C, ...), such that A is the 0-based index of the item in the
/// sequence, and B, C, ..., are the values from the original input ranges. All
/// input ranges are required to have equal lengths. Note that the returned
/// iterator allows for the values (B, C, ...) to be modified.  Example:
///
/// ```c++
/// std::vector<char> Letters = {'A', 'B', 'C', 'D'};
/// std::vector<int> Vals = {10, 11, 12, 13};
///
/// for (auto [Index, Letter, Value] : enumerate(Letters, Vals)) {
///   printf("Item %zu - %c: %d\n", Index, Letter, Value);
///   Value -= 10;
/// }
/// ```
///
/// Output:
///   Item 0 - A: 10
///   Item 1 - B: 11
///   Item 2 - C: 12
///   Item 3 - D: 13
///
/// or using an iterator:
/// ```c++
/// for (auto it : enumerate(Vals)) {
///   it.value() += 10;
///   printf("Item %zu: %d\n", it.index(), it.value());
/// }
/// ```
///
/// Output:
///   Item 0: 20
///   Item 1: 21
///   Item 2: 22
///   Item 3: 23
///
template <typename FirstRange, typename... RestRanges>
auto enumerate(FirstRange &&First, RestRanges &&...Rest) {
  if constexpr (sizeof...(Rest) != 0) {
#ifndef NDEBUG
    // Note: Create an array instead of an initializer list to work around an
    // Apple clang 14 compiler bug.
    size_t sizes[] = {range_size(First), range_size(Rest)...};
    assert(all_equal(sizes) && "Ranges have different length");
#endif
  }
  using enumerator = detail::zippy<detail::zip_enumerator, detail::index_stream,
                                   FirstRange, RestRanges...>;
  return enumerator(detail::index_stream{}, std::forward<FirstRange>(First),
                    std::forward<RestRanges>(Rest)...);
}

namespace detail {

template <typename Predicate, typename... Args>
bool all_of_zip_predicate_first(Predicate &&P, Args &&...args) {
  auto z = zip(args...);
  auto it = z.begin();
  auto end = z.end();
  while (it != end) {
    if (!std::apply([&](auto &&...args) { return P(args...); }, *it))
      return false;
    ++it;
  }
  return it.all_equals(end);
}

// Just an adaptor to switch the order of argument and have the predicate before
// the zipped inputs.
template <typename... ArgsThenPredicate, size_t... InputIndexes>
bool all_of_zip_predicate_last(
    std::tuple<ArgsThenPredicate...> argsThenPredicate,
    std::index_sequence<InputIndexes...>) {
  auto constexpr OutputIndex =
      std::tuple_size<decltype(argsThenPredicate)>::value - 1;
  return all_of_zip_predicate_first(std::get<OutputIndex>(argsThenPredicate),
                             std::get<InputIndexes>(argsThenPredicate)...);
}

} // end namespace detail

/// Compare two zipped ranges using the provided predicate (as last argument).
/// Return true if all elements satisfy the predicate and false otherwise.
//  Return false if the zipped iterator aren't all at end (size mismatch).
template <typename... ArgsAndPredicate>
bool all_of_zip(ArgsAndPredicate &&...argsAndPredicate) {
  return detail::all_of_zip_predicate_last(
      std::forward_as_tuple(argsAndPredicate...),
      std::make_index_sequence<sizeof...(argsAndPredicate) - 1>{});
}

/// Return true if the sequence [Begin, End) has exactly N items. Runs in O(N)
/// time. Not meant for use with random-access iterators.
/// Can optionally take a predicate to filter lazily some items.
template <typename IterTy,
          typename Pred = bool (*)(const decltype(*std::declval<IterTy>()) &)>
bool hasNItems(
    IterTy &&Begin, IterTy &&End, unsigned N,
    Pred &&ShouldBeCounted =
        [](const decltype(*std::declval<IterTy>()) &) { return true; },
    std::enable_if_t<
        !std::is_base_of<std::random_access_iterator_tag,
                         typename std::iterator_traits<std::remove_reference_t<
                             decltype(Begin)>>::iterator_category>::value,
        void> * = nullptr) {
  for (; N; ++Begin) {
    if (Begin == End)
      return false; // Too few.
    N -= ShouldBeCounted(*Begin);
  }
  for (; Begin != End; ++Begin)
    if (ShouldBeCounted(*Begin))
      return false; // Too many.
  return true;
}

/// Return true if the sequence [Begin, End) has N or more items. Runs in O(N)
/// time. Not meant for use with random-access iterators.
/// Can optionally take a predicate to lazily filter some items.
template <typename IterTy,
          typename Pred = bool (*)(const decltype(*std::declval<IterTy>()) &)>
bool hasNItemsOrMore(
    IterTy &&Begin, IterTy &&End, unsigned N,
    Pred &&ShouldBeCounted =
        [](const decltype(*std::declval<IterTy>()) &) { return true; },
    std::enable_if_t<
        !std::is_base_of<std::random_access_iterator_tag,
                         typename std::iterator_traits<std::remove_reference_t<
                             decltype(Begin)>>::iterator_category>::value,
        void> * = nullptr) {
  for (; N; ++Begin) {
    if (Begin == End)
      return false; // Too few.
    N -= ShouldBeCounted(*Begin);
  }
  return true;
}

/// Returns true if the sequence [Begin, End) has N or less items. Can
/// optionally take a predicate to lazily filter some items.
template <typename IterTy,
          typename Pred = bool (*)(const decltype(*std::declval<IterTy>()) &)>
bool hasNItemsOrLess(
    IterTy &&Begin, IterTy &&End, unsigned N,
    Pred &&ShouldBeCounted = [](const decltype(*std::declval<IterTy>()) &) {
      return true;
    }) {
  assert(N != std::numeric_limits<unsigned>::max());
  return !hasNItemsOrMore(Begin, End, N + 1, ShouldBeCounted);
}

/// Returns true if the given container has exactly N items
template <typename ContainerTy> bool hasNItems(ContainerTy &&C, unsigned N) {
  return hasNItems(std::begin(C), std::end(C), N);
}

/// Returns true if the given container has N or more items
template <typename ContainerTy>
bool hasNItemsOrMore(ContainerTy &&C, unsigned N) {
  return hasNItemsOrMore(std::begin(C), std::end(C), N);
}

/// Returns true if the given container has N or less items
template <typename ContainerTy>
bool hasNItemsOrLess(ContainerTy &&C, unsigned N) {
  return hasNItemsOrLess(std::begin(C), std::end(C), N);
}

/// Returns a raw pointer that represents the same address as the argument.
///
/// This implementation can be removed once we move to C++20 where it's defined
/// as std::to_address().
///
/// The std::pointer_traits<>::to_address(p) variations of these overloads has
/// not been implemented.
template <class Ptr> auto to_address(const Ptr &P) { return P.operator->(); }
template <class T> constexpr T *to_address(T *P) { return P; }

} // end namespace llvm

namespace std {
template <typename... Refs>
struct tuple_size<llvm::detail::enumerator_result<Refs...>>
    : std::integral_constant<std::size_t, sizeof...(Refs)> {};

template <std::size_t I, typename... Refs>
struct tuple_element<I, llvm::detail::enumerator_result<Refs...>>
    : std::tuple_element<I, std::tuple<Refs...>> {};

template <std::size_t I, typename... Refs>
struct tuple_element<I, const llvm::detail::enumerator_result<Refs...>>
    : std::tuple_element<I, std::tuple<Refs...>> {};

} // namespace std

#endif // LLVM_ADT_STLEXTRAS_H
PKiwFZ]����	ADT/Any.hnu�[���//===- Any.h - Generic type erased holder of any type -----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
///  This file provides Any, a non-template class modeled in the spirit of
///  std::any.  The idea is to provide a type-safe replacement for C's void*.
///  It can hold a value of any copy-constructible copy-assignable type
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_ANY_H
#define LLVM_ADT_ANY_H

#include "llvm/ADT/STLForwardCompat.h"
#include "llvm/Support/Compiler.h"

#include <cassert>
#include <memory>
#include <type_traits>

namespace llvm {

class LLVM_EXTERNAL_VISIBILITY Any {

  // The `Typeid<T>::Id` static data member below is a globally unique
  // identifier for the type `T`. It is explicitly marked with default
  // visibility so that when `-fvisibility=hidden` is used, the loader still
  // merges duplicate definitions across DSO boundaries.
  // We also cannot mark it as `const`, otherwise msvc merges all definitions
  // when lto is enabled, making any comparison return true.
  template <typename T> struct TypeId { static char Id; };

  struct StorageBase {
    virtual ~StorageBase() = default;
    virtual std::unique_ptr<StorageBase> clone() const = 0;
    virtual const void *id() const = 0;
  };

  template <typename T> struct StorageImpl : public StorageBase {
    explicit StorageImpl(const T &Value) : Value(Value) {}

    explicit StorageImpl(T &&Value) : Value(std::move(Value)) {}

    std::unique_ptr<StorageBase> clone() const override {
      return std::make_unique<StorageImpl<T>>(Value);
    }

    const void *id() const override { return &TypeId<T>::Id; }

    T Value;

  private:
    StorageImpl &operator=(const StorageImpl &Other) = delete;
    StorageImpl(const StorageImpl &Other) = delete;
  };

public:
  Any() = default;

  Any(const Any &Other)
      : Storage(Other.Storage ? Other.Storage->clone() : nullptr) {}

  // When T is Any or T is not copy-constructible we need to explicitly disable
  // the forwarding constructor so that the copy constructor gets selected
  // instead.
  template <typename T,
            std::enable_if_t<
                std::conjunction<
                    std::negation<std::is_same<std::decay_t<T>, Any>>,
                    // We also disable this overload when an `Any` object can be
                    // converted to the parameter type because in that case,
                    // this constructor may combine with that conversion during
                    // overload resolution for determining copy
                    // constructibility, and then when we try to determine copy
                    // constructibility below we may infinitely recurse. This is
                    // being evaluated by the standards committee as a potential
                    // DR in `std::any` as well, but we're going ahead and
                    // adopting it to work-around usage of `Any` with types that
                    // need to be implicitly convertible from an `Any`.
                    std::negation<std::is_convertible<Any, std::decay_t<T>>>,
                    std::is_copy_constructible<std::decay_t<T>>>::value,
                int> = 0>
  Any(T &&Value) {
    Storage =
        std::make_unique<StorageImpl<std::decay_t<T>>>(std::forward<T>(Value));
  }

  Any(Any &&Other) : Storage(std::move(Other.Storage)) {}

  Any &swap(Any &Other) {
    std::swap(Storage, Other.Storage);
    return *this;
  }

  Any &operator=(Any Other) {
    Storage = std::move(Other.Storage);
    return *this;
  }

  bool has_value() const { return !!Storage; }

  void reset() { Storage.reset(); }

private:
  // Only used for the internal llvm::Any implementation
  template <typename T> bool isa() const {
    if (!Storage)
      return false;
    return Storage->id() == &Any::TypeId<remove_cvref_t<T>>::Id;
  }

  template <class T> friend T any_cast(const Any &Value);
  template <class T> friend T any_cast(Any &Value);
  template <class T> friend T any_cast(Any &&Value);
  template <class T> friend const T *any_cast(const Any *Value);
  template <class T> friend T *any_cast(Any *Value);
  template <typename T> friend bool any_isa(const Any &Value);

  std::unique_ptr<StorageBase> Storage;
};

// Define the type id and initialize with a non-zero value.
// Initializing with a zero value means the variable can end up in either the
// .data or the .bss section. This can lead to multiple definition linker errors
// when some object files are compiled with a compiler that puts the variable
// into .data but they are linked to object files from a different compiler that
// put the variable into .bss. To prevent this issue from happening, initialize
// the variable with a non-zero value, which forces it to land in .data (because
// .bss is zero-initialized).
// See also https://github.com/llvm/llvm-project/issues/62270
template <typename T> char Any::TypeId<T>::Id = 1;

template <typename T>
LLVM_DEPRECATED("Use any_cast(Any*) != nullptr instead", "any_cast")
bool any_isa(const Any &Value) {
  return Value.isa<T>();
}

template <class T> T any_cast(const Any &Value) {
  assert(Value.isa<T>() && "Bad any cast!");
  return static_cast<T>(*any_cast<remove_cvref_t<T>>(&Value));
}

template <class T> T any_cast(Any &Value) {
  assert(Value.isa<T>() && "Bad any cast!");
  return static_cast<T>(*any_cast<remove_cvref_t<T>>(&Value));
}

template <class T> T any_cast(Any &&Value) {
  assert(Value.isa<T>() && "Bad any cast!");
  return static_cast<T>(std::move(*any_cast<remove_cvref_t<T>>(&Value)));
}

template <class T> const T *any_cast(const Any *Value) {
  using U = remove_cvref_t<T>;
  if (!Value || !Value->isa<U>())
    return nullptr;
  return &static_cast<Any::StorageImpl<U> &>(*Value->Storage).Value;
}

template <class T> T *any_cast(Any *Value) {
  using U = std::decay_t<T>;
  if (!Value || !Value->isa<U>())
    return nullptr;
  return &static_cast<Any::StorageImpl<U> &>(*Value->Storage).Value;
}

} // end namespace llvm

#endif // LLVM_ADT_ANY_H
PKiwFZ4d�;;ADT/ScopeExit.hnu�[���//===- llvm/ADT/ScopeExit.h - Execute code at scope exit --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file defines the make_scope_exit function, which executes user-defined
/// cleanup logic at scope exit.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_SCOPEEXIT_H
#define LLVM_ADT_SCOPEEXIT_H

#include "llvm/Support/Compiler.h"

#include <type_traits>
#include <utility>

namespace llvm {
namespace detail {

template <typename Callable> class scope_exit {
  Callable ExitFunction;
  bool Engaged = true; // False once moved-from or release()d.

public:
  template <typename Fp>
  explicit scope_exit(Fp &&F) : ExitFunction(std::forward<Fp>(F)) {}

  scope_exit(scope_exit &&Rhs)
      : ExitFunction(std::move(Rhs.ExitFunction)), Engaged(Rhs.Engaged) {
    Rhs.release();
  }
  scope_exit(const scope_exit &) = delete;
  scope_exit &operator=(scope_exit &&) = delete;
  scope_exit &operator=(const scope_exit &) = delete;

  void release() { Engaged = false; }

  ~scope_exit() {
    if (Engaged)
      ExitFunction();
  }
};

} // end namespace detail

// Keeps the callable object that is passed in, and execute it at the
// destruction of the returned object (usually at the scope exit where the
// returned object is kept).
//
// Interface is specified by p0052r2.
template <typename Callable>
[[nodiscard]] detail::scope_exit<std::decay_t<Callable>>
make_scope_exit(Callable &&F) {
  return detail::scope_exit<std::decay_t<Callable>>(std::forward<Callable>(F));
}

} // end namespace llvm

#endif
PKiwFZ/�F�wwADT/BreadthFirstIterator.hnu�[���//===- llvm/ADT/BreadthFirstIterator.h - Breadth First iterator -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file builds on the ADT/GraphTraits.h file to build a generic breadth
/// first graph iterator.  This file exposes the following functions/types:
///
/// bf_begin/bf_end/bf_iterator
///   * Normal breadth-first iteration - visit a graph level-by-level.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_BREADTHFIRSTITERATOR_H
#define LLVM_ADT_BREADTHFIRSTITERATOR_H

#include "llvm/ADT/GraphTraits.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/iterator_range.h"
#include <iterator>
#include <optional>
#include <queue>
#include <utility>

namespace llvm {

// bf_iterator_storage - A private class which is used to figure out where to
// store the visited set. We only provide a non-external variant for now.
template <class SetType> class bf_iterator_storage {
public:
  SetType Visited;
};

// The visited state for the iteration is a simple set.
template <typename NodeRef, unsigned SmallSize = 8>
using bf_iterator_default_set = SmallPtrSet<NodeRef, SmallSize>;

// Generic Breadth first search iterator.
template <class GraphT,
          class SetType =
              bf_iterator_default_set<typename GraphTraits<GraphT>::NodeRef>,
          class GT = GraphTraits<GraphT>>
class bf_iterator : public bf_iterator_storage<SetType> {
public:
  using iterator_category = std::forward_iterator_tag;
  using value_type = typename GT::NodeRef;
  using difference_type = std::ptrdiff_t;
  using pointer = value_type *;
  using reference = const value_type &;

private:
  using NodeRef = typename GT::NodeRef;
  using ChildItTy = typename GT::ChildIteratorType;

  // First element is the node reference, second is the next child to visit.
  using QueueElement = std::pair<NodeRef, std::optional<ChildItTy>>;

  // Visit queue - used to maintain BFS ordering.
  // std::optional<> because we need markers for levels.
  std::queue<std::optional<QueueElement>> VisitQueue;

  // Current level.
  unsigned Level = 0;

  inline bf_iterator(NodeRef Node) {
    this->Visited.insert(Node);
    Level = 0;

    // Also, insert a dummy node as marker.
    VisitQueue.push(QueueElement(Node, std::nullopt));
    VisitQueue.push(std::nullopt);
  }

  inline bf_iterator() = default;

  inline void toNext() {
    std::optional<QueueElement> Head = VisitQueue.front();
    QueueElement H = *Head;
    NodeRef Node = H.first;
    std::optional<ChildItTy> &ChildIt = H.second;

    if (!ChildIt)
      ChildIt.emplace(GT::child_begin(Node));
    while (*ChildIt != GT::child_end(Node)) {
      NodeRef Next = *(*ChildIt)++;

      // Already visited?
      if (this->Visited.insert(Next).second)
        VisitQueue.push(QueueElement(Next, std::nullopt));
    }
    VisitQueue.pop();

    // Go to the next element skipping markers if needed.
    if (!VisitQueue.empty()) {
      Head = VisitQueue.front();
      if (Head != std::nullopt)
        return;
      Level += 1;
      VisitQueue.pop();

      // Don't push another marker if this is the last
      // element.
      if (!VisitQueue.empty())
        VisitQueue.push(std::nullopt);
    }
  }

public:
  // Provide static begin and end methods as our public "constructors"
  static bf_iterator begin(const GraphT &G) {
    return bf_iterator(GT::getEntryNode(G));
  }

  static bf_iterator end(const GraphT &G) { return bf_iterator(); }

  bool operator==(const bf_iterator &RHS) const {
    return VisitQueue == RHS.VisitQueue;
  }

  bool operator!=(const bf_iterator &RHS) const { return !(*this == RHS); }

  reference operator*() const { return VisitQueue.front()->first; }

  // This is a nonstandard operator-> that dereferences the pointer an extra
  // time so that you can actually call methods on the node, because the
  // contained type is a pointer.
  NodeRef operator->() const { return **this; }

  bf_iterator &operator++() { // Pre-increment
    toNext();
    return *this;
  }

  bf_iterator operator++(int) { // Post-increment
    bf_iterator ItCopy = *this;
    ++*this;
    return ItCopy;
  }

  unsigned getLevel() const { return Level; }
};

// Provide global constructors that automatically figure out correct types.
template <class T> bf_iterator<T> bf_begin(const T &G) {
  return bf_iterator<T>::begin(G);
}

template <class T> bf_iterator<T> bf_end(const T &G) {
  return bf_iterator<T>::end(G);
}

// Provide an accessor method to use them in range-based patterns.
template <class T> iterator_range<bf_iterator<T>> breadth_first(const T &G) {
  return make_range(bf_begin(G), bf_end(G));
}

} // end namespace llvm

#endif // LLVM_ADT_BREADTHFIRSTITERATOR_H
PKiwFZ��55ADT/PointerEmbeddedInt.hnu�[���//===- llvm/ADT/PointerEmbeddedInt.h ----------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_POINTEREMBEDDEDINT_H
#define LLVM_ADT_POINTEREMBEDDEDINT_H

#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/PointerLikeTypeTraits.h"
#include <cassert>
#include <climits>
#include <cstdint>
#include <type_traits>

namespace llvm {

/// Utility to embed an integer into a pointer-like type. This is specifically
/// intended to allow embedding integers where fewer bits are required than
/// exist in a pointer, and the integer can participate in abstractions along
/// side other pointer-like types. For example it can be placed into a \c
/// PointerSumType or \c PointerUnion.
///
/// Note that much like pointers, an integer value of zero has special utility
/// due to boolean conversions. For example, a non-null value can be tested for
/// in the above abstractions without testing the particular active member.
/// Also, the default constructed value zero initializes the integer.
template <typename IntT, int Bits = sizeof(IntT) * CHAR_BIT>
class PointerEmbeddedInt {
  uintptr_t Value = 0;

  // Note: This '<' is correct; using '<=' would result in some shifts
  // overflowing their storage types.
  static_assert(Bits < sizeof(uintptr_t) * CHAR_BIT,
                "Cannot embed more bits than we have in a pointer!");

  enum : uintptr_t {
    // We shift as many zeros into the value as we can while preserving the
    // number of bits desired for the integer.
    Shift = sizeof(uintptr_t) * CHAR_BIT - Bits,

    // We also want to be able to mask out the preserved bits for asserts.
    Mask = static_cast<uintptr_t>(-1) << Bits
  };

  struct RawValueTag {
    explicit RawValueTag() = default;
  };

  friend struct PointerLikeTypeTraits<PointerEmbeddedInt>;

  explicit PointerEmbeddedInt(uintptr_t Value, RawValueTag) : Value(Value) {}

public:
  PointerEmbeddedInt() = default;

  PointerEmbeddedInt(IntT I) { *this = I; }

  PointerEmbeddedInt &operator=(IntT I) {
    assert((std::is_signed<IntT>::value ? isInt<Bits>(I) : isUInt<Bits>(I)) &&
           "Integer has bits outside those preserved!");
    Value = static_cast<uintptr_t>(I) << Shift;
    return *this;
  }

  // Note that this implicit conversion additionally allows all of the basic
  // comparison operators to work transparently, etc.
  operator IntT() const {
    if (std::is_signed<IntT>::value)
      return static_cast<IntT>(static_cast<intptr_t>(Value) >> Shift);
    return static_cast<IntT>(Value >> Shift);
  }
};

// Provide pointer like traits to support use with pointer unions and sum
// types.
template <typename IntT, int Bits>
struct PointerLikeTypeTraits<PointerEmbeddedInt<IntT, Bits>> {
  using T = PointerEmbeddedInt<IntT, Bits>;

  static inline void *getAsVoidPointer(const T &P) {
    return reinterpret_cast<void *>(P.Value);
  }

  static inline T getFromVoidPointer(void *P) {
    return T(reinterpret_cast<uintptr_t>(P), typename T::RawValueTag());
  }

  static inline T getFromVoidPointer(const void *P) {
    return T(reinterpret_cast<uintptr_t>(P), typename T::RawValueTag());
  }

  static constexpr int NumLowBitsAvailable = T::Shift;
};

// Teach DenseMap how to use PointerEmbeddedInt objects as keys if the Int type
// itself can be a key.
template <typename IntT, int Bits>
struct DenseMapInfo<PointerEmbeddedInt<IntT, Bits>> {
  using T = PointerEmbeddedInt<IntT, Bits>;
  using IntInfo = DenseMapInfo<IntT>;

  static inline T getEmptyKey() { return IntInfo::getEmptyKey(); }
  static inline T getTombstoneKey() { return IntInfo::getTombstoneKey(); }

  static unsigned getHashValue(const T &Arg) {
    return IntInfo::getHashValue(Arg);
  }

  static bool isEqual(const T &LHS, const T &RHS) { return LHS == RHS; }
};

} // end namespace llvm

#endif // LLVM_ADT_POINTEREMBEDDEDINT_H
PKiwFZY�e)��ADT/Statistic.hnu�[���//===-- llvm/ADT/Statistic.h - Easy way to expose stats ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file defines the 'Statistic' class, which is designed to be an easy way
/// to expose various metrics from passes.  These statistics are printed at the
/// end of a run (from llvm_shutdown), when the -stats command line option is
/// passed on the command line.
///
/// This is useful for reporting information like the number of instructions
/// simplified, optimized or removed by various transformations, like this:
///
/// static Statistic NumInstsKilled("gcse", "Number of instructions killed");
///
/// Later, in the code: ++NumInstsKilled;
///
/// NOTE: Statistics *must* be declared as global variables.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_STATISTIC_H
#define LLVM_ADT_STATISTIC_H

#include "llvm/Config/llvm-config.h"
#include "llvm/Support/Compiler.h"
#include <atomic>
#include <memory>
#include <vector>

// Determine whether statistics should be enabled. We must do it here rather
// than in CMake because multi-config generators cannot determine this at
// configure time.
#if !defined(NDEBUG) || LLVM_FORCE_ENABLE_STATS
#define LLVM_ENABLE_STATS 1
#else
#define LLVM_ENABLE_STATS 0
#endif

namespace llvm {

class raw_ostream;
class raw_fd_ostream;
class StringRef;

class TrackingStatistic {
public:
  const char *const DebugType;
  const char *const Name;
  const char *const Desc;

  std::atomic<uint64_t> Value;
  std::atomic<bool> Initialized;

  constexpr TrackingStatistic(const char *DebugType, const char *Name,
                              const char *Desc)
      : DebugType(DebugType), Name(Name), Desc(Desc), Value(0),
        Initialized(false) {}

  const char *getDebugType() const { return DebugType; }
  const char *getName() const { return Name; }
  const char *getDesc() const { return Desc; }

  uint64_t getValue() const { return Value.load(std::memory_order_relaxed); }

  // Allow use of this class as the value itself.
  operator uint64_t() const { return getValue(); }

  const TrackingStatistic &operator=(uint64_t Val) {
    Value.store(Val, std::memory_order_relaxed);
    return init();
  }

  const TrackingStatistic &operator++() {
    Value.fetch_add(1, std::memory_order_relaxed);
    return init();
  }

  uint64_t operator++(int) {
    init();
    return Value.fetch_add(1, std::memory_order_relaxed);
  }

  const TrackingStatistic &operator--() {
    Value.fetch_sub(1, std::memory_order_relaxed);
    return init();
  }

  uint64_t operator--(int) {
    init();
    return Value.fetch_sub(1, std::memory_order_relaxed);
  }

  const TrackingStatistic &operator+=(uint64_t V) {
    if (V == 0)
      return *this;
    Value.fetch_add(V, std::memory_order_relaxed);
    return init();
  }

  const TrackingStatistic &operator-=(uint64_t V) {
    if (V == 0)
      return *this;
    Value.fetch_sub(V, std::memory_order_relaxed);
    return init();
  }

  void updateMax(uint64_t V) {
    uint64_t PrevMax = Value.load(std::memory_order_relaxed);
    // Keep trying to update max until we succeed or another thread produces
    // a bigger max than us.
    while (V > PrevMax && !Value.compare_exchange_weak(
                              PrevMax, V, std::memory_order_relaxed)) {
    }
    init();
  }

protected:
  TrackingStatistic &init() {
    if (!Initialized.load(std::memory_order_acquire))
      RegisterStatistic();
    return *this;
  }

  void RegisterStatistic();
};

class NoopStatistic {
public:
  NoopStatistic(const char * /*DebugType*/, const char * /*Name*/,
                const char * /*Desc*/) {}

  uint64_t getValue() const { return 0; }

  // Allow use of this class as the value itself.
  operator uint64_t() const { return 0; }

  const NoopStatistic &operator=(uint64_t Val) { return *this; }

  const NoopStatistic &operator++() { return *this; }

  uint64_t operator++(int) { return 0; }

  const NoopStatistic &operator--() { return *this; }

  uint64_t operator--(int) { return 0; }

  const NoopStatistic &operator+=(const uint64_t &V) { return *this; }

  const NoopStatistic &operator-=(const uint64_t &V) { return *this; }

  void updateMax(uint64_t V) {}
};

#if LLVM_ENABLE_STATS
using Statistic = TrackingStatistic;
#else
using Statistic = NoopStatistic;
#endif

// STATISTIC - A macro to make definition of statistics really simple.  This
// automatically passes the DEBUG_TYPE of the file into the statistic.
#define STATISTIC(VARNAME, DESC)                                               \
  static llvm::Statistic VARNAME = {DEBUG_TYPE, #VARNAME, DESC}

// ALWAYS_ENABLED_STATISTIC - A macro to define a statistic like STATISTIC but
// it is enabled even if LLVM_ENABLE_STATS is off.
#define ALWAYS_ENABLED_STATISTIC(VARNAME, DESC)                                \
  static llvm::TrackingStatistic VARNAME = {DEBUG_TYPE, #VARNAME, DESC}

/// Enable the collection and printing of statistics.
void EnableStatistics(bool DoPrintOnExit = true);

/// Check if statistics are enabled.
bool AreStatisticsEnabled();

/// Return a file stream to print our output on.
std::unique_ptr<raw_fd_ostream> CreateInfoOutputFile();

/// Print statistics to the file returned by CreateInfoOutputFile().
void PrintStatistics();

/// Print statistics to the given output stream.
void PrintStatistics(raw_ostream &OS);

/// Print statistics in JSON format. This does include all global timers (\see
/// Timer, TimerGroup). Note that the timers are cleared after printing and will
/// not be printed in human readable form or in a second call of
/// PrintStatisticsJSON().
void PrintStatisticsJSON(raw_ostream &OS);

/// Get the statistics. This can be used to look up the value of
/// statistics without needing to parse JSON.
///
/// This function does not prevent statistics being updated by other threads
/// during it's execution. It will return the value at the point that it is
/// read. However, it will prevent new statistics from registering until it
/// completes.
std::vector<std::pair<StringRef, uint64_t>> GetStatistics();

/// Reset the statistics. This can be used to zero and de-register the
/// statistics in order to measure a compilation.
///
/// When this function begins to call destructors prior to returning, all
/// statistics will be zero and unregistered. However, that might not remain the
/// case by the time this function finishes returning. Whether update from other
/// threads are lost or merely deferred until during the function return is
/// timing sensitive.
///
/// Callers who intend to use this to measure statistics for a single
/// compilation should ensure that no compilations are in progress at the point
/// this function is called and that only one compilation executes until calling
/// GetStatistics().
void ResetStatistics();

} // end namespace llvm

#endif // LLVM_ADT_STATISTIC_H
PKiwFZ���"llADT/BitVector.hnu�[���//===- llvm/ADT/BitVector.h - Bit vectors -----------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file implements the BitVector class.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_BITVECTOR_H
#define LLVM_ADT_BITVECTOR_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/MathExtras.h"
#include <algorithm>
#include <cassert>
#include <climits>
#include <cstdint>
#include <cstdlib>
#include <cstring>
#include <iterator>
#include <utility>

namespace llvm {

/// ForwardIterator for the bits that are set.
/// Iterators get invalidated when resize / reserve is called.
template <typename BitVectorT> class const_set_bits_iterator_impl {
  const BitVectorT &Parent;
  int Current = 0;

  void advance() {
    assert(Current != -1 && "Trying to advance past end.");
    Current = Parent.find_next(Current);
  }

public:
  using iterator_category = std::forward_iterator_tag;
  using difference_type   = void;
  using value_type        = int;
  using pointer           = value_type*;
  using reference         = value_type&;

  const_set_bits_iterator_impl(const BitVectorT &Parent, int Current)
      : Parent(Parent), Current(Current) {}
  explicit const_set_bits_iterator_impl(const BitVectorT &Parent)
      : const_set_bits_iterator_impl(Parent, Parent.find_first()) {}
  const_set_bits_iterator_impl(const const_set_bits_iterator_impl &) = default;

  const_set_bits_iterator_impl operator++(int) {
    auto Prev = *this;
    advance();
    return Prev;
  }

  const_set_bits_iterator_impl &operator++() {
    advance();
    return *this;
  }

  unsigned operator*() const { return Current; }

  bool operator==(const const_set_bits_iterator_impl &Other) const {
    assert(&Parent == &Other.Parent &&
           "Comparing iterators from different BitVectors");
    return Current == Other.Current;
  }

  bool operator!=(const const_set_bits_iterator_impl &Other) const {
    assert(&Parent == &Other.Parent &&
           "Comparing iterators from different BitVectors");
    return Current != Other.Current;
  }
};

class BitVector {
  typedef uintptr_t BitWord;

  enum { BITWORD_SIZE = (unsigned)sizeof(BitWord) * CHAR_BIT };

  static_assert(BITWORD_SIZE == 64 || BITWORD_SIZE == 32,
                "Unsupported word size");

  using Storage = SmallVector<BitWord>;

  Storage Bits;  // Actual bits.
  unsigned Size = 0; // Size of bitvector in bits.

public:
  using size_type = unsigned;

  // Encapsulation of a single bit.
  class reference {

    BitWord *WordRef;
    unsigned BitPos;

  public:
    reference(BitVector &b, unsigned Idx) {
      WordRef = &b.Bits[Idx / BITWORD_SIZE];
      BitPos = Idx % BITWORD_SIZE;
    }

    reference() = delete;
    reference(const reference&) = default;

    reference &operator=(reference t) {
      *this = bool(t);
      return *this;
    }

    reference& operator=(bool t) {
      if (t)
        *WordRef |= BitWord(1) << BitPos;
      else
        *WordRef &= ~(BitWord(1) << BitPos);
      return *this;
    }

    operator bool() const {
      return ((*WordRef) & (BitWord(1) << BitPos)) != 0;
    }
  };

  typedef const_set_bits_iterator_impl<BitVector> const_set_bits_iterator;
  typedef const_set_bits_iterator set_iterator;

  const_set_bits_iterator set_bits_begin() const {
    return const_set_bits_iterator(*this);
  }
  const_set_bits_iterator set_bits_end() const {
    return const_set_bits_iterator(*this, -1);
  }
  iterator_range<const_set_bits_iterator> set_bits() const {
    return make_range(set_bits_begin(), set_bits_end());
  }

  /// BitVector default ctor - Creates an empty bitvector.
  BitVector() = default;

  /// BitVector ctor - Creates a bitvector of specified number of bits. All
  /// bits are initialized to the specified value.
  explicit BitVector(unsigned s, bool t = false)
      : Bits(NumBitWords(s), 0 - (BitWord)t), Size(s) {
    if (t)
      clear_unused_bits();
  }

  /// empty - Tests whether there are no bits in this bitvector.
  bool empty() const { return Size == 0; }

  /// size - Returns the number of bits in this bitvector.
  size_type size() const { return Size; }

  /// count - Returns the number of bits which are set.
  size_type count() const {
    unsigned NumBits = 0;
    for (auto Bit : Bits)
      NumBits += llvm::popcount(Bit);
    return NumBits;
  }

  /// any - Returns true if any bit is set.
  bool any() const {
    return any_of(Bits, [](BitWord Bit) { return Bit != 0; });
  }

  /// all - Returns true if all bits are set.
  bool all() const {
    for (unsigned i = 0; i < Size / BITWORD_SIZE; ++i)
      if (Bits[i] != ~BitWord(0))
        return false;

    // If bits remain check that they are ones. The unused bits are always zero.
    if (unsigned Remainder = Size % BITWORD_SIZE)
      return Bits[Size / BITWORD_SIZE] == (BitWord(1) << Remainder) - 1;

    return true;
  }

  /// none - Returns true if none of the bits are set.
  bool none() const {
    return !any();
  }

  /// find_first_in - Returns the index of the first set / unset bit,
  /// depending on \p Set, in the range [Begin, End).
  /// Returns -1 if all bits in the range are unset / set.
  int find_first_in(unsigned Begin, unsigned End, bool Set = true) const {
    assert(Begin <= End && End <= Size);
    if (Begin == End)
      return -1;

    unsigned FirstWord = Begin / BITWORD_SIZE;
    unsigned LastWord = (End - 1) / BITWORD_SIZE;

    // Check subsequent words.
    // The code below is based on search for the first _set_ bit. If
    // we're searching for the first _unset_, we just take the
    // complement of each word before we use it and apply
    // the same method.
    for (unsigned i = FirstWord; i <= LastWord; ++i) {
      BitWord Copy = Bits[i];
      if (!Set)
        Copy = ~Copy;

      if (i == FirstWord) {
        unsigned FirstBit = Begin % BITWORD_SIZE;
        Copy &= maskTrailingZeros<BitWord>(FirstBit);
      }

      if (i == LastWord) {
        unsigned LastBit = (End - 1) % BITWORD_SIZE;
        Copy &= maskTrailingOnes<BitWord>(LastBit + 1);
      }
      if (Copy != 0)
        return i * BITWORD_SIZE + llvm::countr_zero(Copy);
    }
    return -1;
  }

  /// find_last_in - Returns the index of the last set bit in the range
  /// [Begin, End).  Returns -1 if all bits in the range are unset.
  int find_last_in(unsigned Begin, unsigned End) const {
    assert(Begin <= End && End <= Size);
    if (Begin == End)
      return -1;

    unsigned LastWord = (End - 1) / BITWORD_SIZE;
    unsigned FirstWord = Begin / BITWORD_SIZE;

    for (unsigned i = LastWord + 1; i >= FirstWord + 1; --i) {
      unsigned CurrentWord = i - 1;

      BitWord Copy = Bits[CurrentWord];
      if (CurrentWord == LastWord) {
        unsigned LastBit = (End - 1) % BITWORD_SIZE;
        Copy &= maskTrailingOnes<BitWord>(LastBit + 1);
      }

      if (CurrentWord == FirstWord) {
        unsigned FirstBit = Begin % BITWORD_SIZE;
        Copy &= maskTrailingZeros<BitWord>(FirstBit);
      }

      if (Copy != 0)
        return (CurrentWord + 1) * BITWORD_SIZE - llvm::countl_zero(Copy) - 1;
    }

    return -1;
  }

  /// find_first_unset_in - Returns the index of the first unset bit in the
  /// range [Begin, End).  Returns -1 if all bits in the range are set.
  int find_first_unset_in(unsigned Begin, unsigned End) const {
    return find_first_in(Begin, End, /* Set = */ false);
  }

  /// find_last_unset_in - Returns the index of the last unset bit in the
  /// range [Begin, End).  Returns -1 if all bits in the range are set.
  int find_last_unset_in(unsigned Begin, unsigned End) const {
    assert(Begin <= End && End <= Size);
    if (Begin == End)
      return -1;

    unsigned LastWord = (End - 1) / BITWORD_SIZE;
    unsigned FirstWord = Begin / BITWORD_SIZE;

    for (unsigned i = LastWord + 1; i >= FirstWord + 1; --i) {
      unsigned CurrentWord = i - 1;

      BitWord Copy = Bits[CurrentWord];
      if (CurrentWord == LastWord) {
        unsigned LastBit = (End - 1) % BITWORD_SIZE;
        Copy |= maskTrailingZeros<BitWord>(LastBit + 1);
      }

      if (CurrentWord == FirstWord) {
        unsigned FirstBit = Begin % BITWORD_SIZE;
        Copy |= maskTrailingOnes<BitWord>(FirstBit);
      }

      if (Copy != ~BitWord(0)) {
        unsigned Result =
            (CurrentWord + 1) * BITWORD_SIZE - llvm::countl_one(Copy) - 1;
        return Result < Size ? Result : -1;
      }
    }
    return -1;
  }

  /// find_first - Returns the index of the first set bit, -1 if none
  /// of the bits are set.
  int find_first() const { return find_first_in(0, Size); }

  /// find_last - Returns the index of the last set bit, -1 if none of the bits
  /// are set.
  int find_last() const { return find_last_in(0, Size); }

  /// find_next - Returns the index of the next set bit following the
  /// "Prev" bit. Returns -1 if the next set bit is not found.
  int find_next(unsigned Prev) const { return find_first_in(Prev + 1, Size); }

  /// find_prev - Returns the index of the first set bit that precedes the
  /// the bit at \p PriorTo.  Returns -1 if all previous bits are unset.
  int find_prev(unsigned PriorTo) const { return find_last_in(0, PriorTo); }

  /// find_first_unset - Returns the index of the first unset bit, -1 if all
  /// of the bits are set.
  int find_first_unset() const { return find_first_unset_in(0, Size); }

  /// find_next_unset - Returns the index of the next unset bit following the
  /// "Prev" bit.  Returns -1 if all remaining bits are set.
  int find_next_unset(unsigned Prev) const {
    return find_first_unset_in(Prev + 1, Size);
  }

  /// find_last_unset - Returns the index of the last unset bit, -1 if all of
  /// the bits are set.
  int find_last_unset() const { return find_last_unset_in(0, Size); }

  /// find_prev_unset - Returns the index of the first unset bit that precedes
  /// the bit at \p PriorTo.  Returns -1 if all previous bits are set.
  int find_prev_unset(unsigned PriorTo) {
    return find_last_unset_in(0, PriorTo);
  }

  /// clear - Removes all bits from the bitvector.
  void clear() {
    Size = 0;
    Bits.clear();
  }

  /// resize - Grow or shrink the bitvector.
  void resize(unsigned N, bool t = false) {
    set_unused_bits(t);
    Size = N;
    Bits.resize(NumBitWords(N), 0 - BitWord(t));
    clear_unused_bits();
  }

  void reserve(unsigned N) { Bits.reserve(NumBitWords(N)); }

  // Set, reset, flip
  BitVector &set() {
    init_words(true);
    clear_unused_bits();
    return *this;
  }

  BitVector &set(unsigned Idx) {
    assert(Idx < Size && "access in bound");
    Bits[Idx / BITWORD_SIZE] |= BitWord(1) << (Idx % BITWORD_SIZE);
    return *this;
  }

  /// set - Efficiently set a range of bits in [I, E)
  BitVector &set(unsigned I, unsigned E) {
    assert(I <= E && "Attempted to set backwards range!");
    assert(E <= size() && "Attempted to set out-of-bounds range!");

    if (I == E) return *this;

    if (I / BITWORD_SIZE == E / BITWORD_SIZE) {
      BitWord EMask = BitWord(1) << (E % BITWORD_SIZE);
      BitWord IMask = BitWord(1) << (I % BITWORD_SIZE);
      BitWord Mask = EMask - IMask;
      Bits[I / BITWORD_SIZE] |= Mask;
      return *this;
    }

    BitWord PrefixMask = ~BitWord(0) << (I % BITWORD_SIZE);
    Bits[I / BITWORD_SIZE] |= PrefixMask;
    I = alignTo(I, BITWORD_SIZE);

    for (; I + BITWORD_SIZE <= E; I += BITWORD_SIZE)
      Bits[I / BITWORD_SIZE] = ~BitWord(0);

    BitWord PostfixMask = (BitWord(1) << (E % BITWORD_SIZE)) - 1;
    if (I < E)
      Bits[I / BITWORD_SIZE] |= PostfixMask;

    return *this;
  }

  BitVector &reset() {
    init_words(false);
    return *this;
  }

  BitVector &reset(unsigned Idx) {
    Bits[Idx / BITWORD_SIZE] &= ~(BitWord(1) << (Idx % BITWORD_SIZE));
    return *this;
  }

  /// reset - Efficiently reset a range of bits in [I, E)
  BitVector &reset(unsigned I, unsigned E) {
    assert(I <= E && "Attempted to reset backwards range!");
    assert(E <= size() && "Attempted to reset out-of-bounds range!");

    if (I == E) return *this;

    if (I / BITWORD_SIZE == E / BITWORD_SIZE) {
      BitWord EMask = BitWord(1) << (E % BITWORD_SIZE);
      BitWord IMask = BitWord(1) << (I % BITWORD_SIZE);
      BitWord Mask = EMask - IMask;
      Bits[I / BITWORD_SIZE] &= ~Mask;
      return *this;
    }

    BitWord PrefixMask = ~BitWord(0) << (I % BITWORD_SIZE);
    Bits[I / BITWORD_SIZE] &= ~PrefixMask;
    I = alignTo(I, BITWORD_SIZE);

    for (; I + BITWORD_SIZE <= E; I += BITWORD_SIZE)
      Bits[I / BITWORD_SIZE] = BitWord(0);

    BitWord PostfixMask = (BitWord(1) << (E % BITWORD_SIZE)) - 1;
    if (I < E)
      Bits[I / BITWORD_SIZE] &= ~PostfixMask;

    return *this;
  }

  BitVector &flip() {
    for (auto &Bit : Bits)
      Bit = ~Bit;
    clear_unused_bits();
    return *this;
  }

  BitVector &flip(unsigned Idx) {
    Bits[Idx / BITWORD_SIZE] ^= BitWord(1) << (Idx % BITWORD_SIZE);
    return *this;
  }

  // Indexing.
  reference operator[](unsigned Idx) {
    assert (Idx < Size && "Out-of-bounds Bit access.");
    return reference(*this, Idx);
  }

  bool operator[](unsigned Idx) const {
    assert (Idx < Size && "Out-of-bounds Bit access.");
    BitWord Mask = BitWord(1) << (Idx % BITWORD_SIZE);
    return (Bits[Idx / BITWORD_SIZE] & Mask) != 0;
  }

  /// Return the last element in the vector.
  bool back() const {
    assert(!empty() && "Getting last element of empty vector.");
    return (*this)[size() - 1];
  }

  bool test(unsigned Idx) const {
    return (*this)[Idx];
  }

  // Push single bit to end of vector.
  void push_back(bool Val) {
    unsigned OldSize = Size;
    unsigned NewSize = Size + 1;

    // Resize, which will insert zeros.
    // If we already fit then the unused bits will be already zero.
    if (NewSize > getBitCapacity())
      resize(NewSize, false);
    else
      Size = NewSize;

    // If true, set single bit.
    if (Val)
      set(OldSize);
  }

  /// Pop one bit from the end of the vector.
  void pop_back() {
    assert(!empty() && "Empty vector has no element to pop.");
    resize(size() - 1);
  }

  /// Test if any common bits are set.
  bool anyCommon(const BitVector &RHS) const {
    unsigned ThisWords = Bits.size();
    unsigned RHSWords = RHS.Bits.size();
    for (unsigned i = 0, e = std::min(ThisWords, RHSWords); i != e; ++i)
      if (Bits[i] & RHS.Bits[i])
        return true;
    return false;
  }

  // Comparison operators.
  bool operator==(const BitVector &RHS) const {
    if (size() != RHS.size())
      return false;
    unsigned NumWords = Bits.size();
    return std::equal(Bits.begin(), Bits.begin() + NumWords, RHS.Bits.begin());
  }

  bool operator!=(const BitVector &RHS) const { return !(*this == RHS); }

  /// Intersection, union, disjoint union.
  BitVector &operator&=(const BitVector &RHS) {
    unsigned ThisWords = Bits.size();
    unsigned RHSWords = RHS.Bits.size();
    unsigned i;
    for (i = 0; i != std::min(ThisWords, RHSWords); ++i)
      Bits[i] &= RHS.Bits[i];

    // Any bits that are just in this bitvector become zero, because they aren't
    // in the RHS bit vector.  Any words only in RHS are ignored because they
    // are already zero in the LHS.
    for (; i != ThisWords; ++i)
      Bits[i] = 0;

    return *this;
  }

  /// reset - Reset bits that are set in RHS. Same as *this &= ~RHS.
  BitVector &reset(const BitVector &RHS) {
    unsigned ThisWords = Bits.size();
    unsigned RHSWords = RHS.Bits.size();
    for (unsigned i = 0; i != std::min(ThisWords, RHSWords); ++i)
      Bits[i] &= ~RHS.Bits[i];
    return *this;
  }

  /// test - Check if (This - RHS) is zero.
  /// This is the same as reset(RHS) and any().
  bool test(const BitVector &RHS) const {
    unsigned ThisWords = Bits.size();
    unsigned RHSWords = RHS.Bits.size();
    unsigned i;
    for (i = 0; i != std::min(ThisWords, RHSWords); ++i)
      if ((Bits[i] & ~RHS.Bits[i]) != 0)
        return true;

    for (; i != ThisWords ; ++i)
      if (Bits[i] != 0)
        return true;

    return false;
  }

  template <class F, class... ArgTys>
  static BitVector &apply(F &&f, BitVector &Out, BitVector const &Arg,
                          ArgTys const &...Args) {
    assert(llvm::all_of(
               std::initializer_list<unsigned>{Args.size()...},
               [&Arg](auto const &BV) { return Arg.size() == BV; }) &&
           "consistent sizes");
    Out.resize(Arg.size());
    for (size_type I = 0, E = Arg.Bits.size(); I != E; ++I)
      Out.Bits[I] = f(Arg.Bits[I], Args.Bits[I]...);
    Out.clear_unused_bits();
    return Out;
  }

  BitVector &operator|=(const BitVector &RHS) {
    if (size() < RHS.size())
      resize(RHS.size());
    for (size_type I = 0, E = RHS.Bits.size(); I != E; ++I)
      Bits[I] |= RHS.Bits[I];
    return *this;
  }

  BitVector &operator^=(const BitVector &RHS) {
    if (size() < RHS.size())
      resize(RHS.size());
    for (size_type I = 0, E = RHS.Bits.size(); I != E; ++I)
      Bits[I] ^= RHS.Bits[I];
    return *this;
  }

  BitVector &operator>>=(unsigned N) {
    assert(N <= Size);
    if (LLVM_UNLIKELY(empty() || N == 0))
      return *this;

    unsigned NumWords = Bits.size();
    assert(NumWords >= 1);

    wordShr(N / BITWORD_SIZE);

    unsigned BitDistance = N % BITWORD_SIZE;
    if (BitDistance == 0)
      return *this;

    // When the shift size is not a multiple of the word size, then we have
    // a tricky situation where each word in succession needs to extract some
    // of the bits from the next word and or them into this word while
    // shifting this word to make room for the new bits.  This has to be done
    // for every word in the array.

    // Since we're shifting each word right, some bits will fall off the end
    // of each word to the right, and empty space will be created on the left.
    // The final word in the array will lose bits permanently, so starting at
    // the beginning, work forwards shifting each word to the right, and
    // OR'ing in the bits from the end of the next word to the beginning of
    // the current word.

    // Example:
    //   Starting with {0xAABBCCDD, 0xEEFF0011, 0x22334455} and shifting right
    //   by 4 bits.
    // Step 1: Word[0] >>= 4           ; 0x0ABBCCDD
    // Step 2: Word[0] |= 0x10000000   ; 0x1ABBCCDD
    // Step 3: Word[1] >>= 4           ; 0x0EEFF001
    // Step 4: Word[1] |= 0x50000000   ; 0x5EEFF001
    // Step 5: Word[2] >>= 4           ; 0x02334455
    // Result: { 0x1ABBCCDD, 0x5EEFF001, 0x02334455 }
    const BitWord Mask = maskTrailingOnes<BitWord>(BitDistance);
    const unsigned LSH = BITWORD_SIZE - BitDistance;

    for (unsigned I = 0; I < NumWords - 1; ++I) {
      Bits[I] >>= BitDistance;
      Bits[I] |= (Bits[I + 1] & Mask) << LSH;
    }

    Bits[NumWords - 1] >>= BitDistance;

    return *this;
  }

  BitVector &operator<<=(unsigned N) {
    assert(N <= Size);
    if (LLVM_UNLIKELY(empty() || N == 0))
      return *this;

    unsigned NumWords = Bits.size();
    assert(NumWords >= 1);

    wordShl(N / BITWORD_SIZE);

    unsigned BitDistance = N % BITWORD_SIZE;
    if (BitDistance == 0)
      return *this;

    // When the shift size is not a multiple of the word size, then we have
    // a tricky situation where each word in succession needs to extract some
    // of the bits from the previous word and or them into this word while
    // shifting this word to make room for the new bits.  This has to be done
    // for every word in the array.  This is similar to the algorithm outlined
    // in operator>>=, but backwards.

    // Since we're shifting each word left, some bits will fall off the end
    // of each word to the left, and empty space will be created on the right.
    // The first word in the array will lose bits permanently, so starting at
    // the end, work backwards shifting each word to the left, and OR'ing
    // in the bits from the end of the next word to the beginning of the
    // current word.

    // Example:
    //   Starting with {0xAABBCCDD, 0xEEFF0011, 0x22334455} and shifting left
    //   by 4 bits.
    // Step 1: Word[2] <<= 4           ; 0x23344550
    // Step 2: Word[2] |= 0x0000000E   ; 0x2334455E
    // Step 3: Word[1] <<= 4           ; 0xEFF00110
    // Step 4: Word[1] |= 0x0000000A   ; 0xEFF0011A
    // Step 5: Word[0] <<= 4           ; 0xABBCCDD0
    // Result: { 0xABBCCDD0, 0xEFF0011A, 0x2334455E }
    const BitWord Mask = maskLeadingOnes<BitWord>(BitDistance);
    const unsigned RSH = BITWORD_SIZE - BitDistance;

    for (int I = NumWords - 1; I > 0; --I) {
      Bits[I] <<= BitDistance;
      Bits[I] |= (Bits[I - 1] & Mask) >> RSH;
    }
    Bits[0] <<= BitDistance;
    clear_unused_bits();

    return *this;
  }

  void swap(BitVector &RHS) {
    std::swap(Bits, RHS.Bits);
    std::swap(Size, RHS.Size);
  }

  void invalid() {
    assert(!Size && Bits.empty());
    Size = (unsigned)-1;
  }
  bool isInvalid() const { return Size == (unsigned)-1; }

  ArrayRef<BitWord> getData() const { return {&Bits[0], Bits.size()}; }

  //===--------------------------------------------------------------------===//
  // Portable bit mask operations.
  //===--------------------------------------------------------------------===//
  //
  // These methods all operate on arrays of uint32_t, each holding 32 bits. The
  // fixed word size makes it easier to work with literal bit vector constants
  // in portable code.
  //
  // The LSB in each word is the lowest numbered bit.  The size of a portable
  // bit mask is always a whole multiple of 32 bits.  If no bit mask size is
  // given, the bit mask is assumed to cover the entire BitVector.

  /// setBitsInMask - Add '1' bits from Mask to this vector. Don't resize.
  /// This computes "*this |= Mask".
  void setBitsInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
    applyMask<true, false>(Mask, MaskWords);
  }

  /// clearBitsInMask - Clear any bits in this vector that are set in Mask.
  /// Don't resize. This computes "*this &= ~Mask".
  void clearBitsInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
    applyMask<false, false>(Mask, MaskWords);
  }

  /// setBitsNotInMask - Add a bit to this vector for every '0' bit in Mask.
  /// Don't resize.  This computes "*this |= ~Mask".
  void setBitsNotInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
    applyMask<true, true>(Mask, MaskWords);
  }

  /// clearBitsNotInMask - Clear a bit in this vector for every '0' bit in Mask.
  /// Don't resize.  This computes "*this &= Mask".
  void clearBitsNotInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
    applyMask<false, true>(Mask, MaskWords);
  }

private:
  /// Perform a logical left shift of \p Count words by moving everything
  /// \p Count words to the right in memory.
  ///
  /// While confusing, words are stored from least significant at Bits[0] to
  /// most significant at Bits[NumWords-1].  A logical shift left, however,
  /// moves the current least significant bit to a higher logical index, and
  /// fills the previous least significant bits with 0.  Thus, we actually
  /// need to move the bytes of the memory to the right, not to the left.
  /// Example:
  ///   Words = [0xBBBBAAAA, 0xDDDDFFFF, 0x00000000, 0xDDDD0000]
  /// represents a BitVector where 0xBBBBAAAA contain the least significant
  /// bits.  So if we want to shift the BitVector left by 2 words, we need
  /// to turn this into 0x00000000 0x00000000 0xBBBBAAAA 0xDDDDFFFF by using a
  /// memmove which moves right, not left.
  void wordShl(uint32_t Count) {
    if (Count == 0)
      return;

    uint32_t NumWords = Bits.size();

    // Since we always move Word-sized chunks of data with src and dest both
    // aligned to a word-boundary, we don't need to worry about endianness
    // here.
    std::copy(Bits.begin(), Bits.begin() + NumWords - Count,
              Bits.begin() + Count);
    std::fill(Bits.begin(), Bits.begin() + Count, 0);
    clear_unused_bits();
  }

  /// Perform a logical right shift of \p Count words by moving those
  /// words to the left in memory.  See wordShl for more information.
  ///
  void wordShr(uint32_t Count) {
    if (Count == 0)
      return;

    uint32_t NumWords = Bits.size();

    std::copy(Bits.begin() + Count, Bits.begin() + NumWords, Bits.begin());
    std::fill(Bits.begin() + NumWords - Count, Bits.begin() + NumWords, 0);
  }

  int next_unset_in_word(int WordIndex, BitWord Word) const {
    unsigned Result = WordIndex * BITWORD_SIZE + llvm::countr_one(Word);
    return Result < size() ? Result : -1;
  }

  unsigned NumBitWords(unsigned S) const {
    return (S + BITWORD_SIZE-1) / BITWORD_SIZE;
  }

  // Set the unused bits in the high words.
  void set_unused_bits(bool t = true) {
    //  Then set any stray high bits of the last used word.
    if (unsigned ExtraBits = Size % BITWORD_SIZE) {
      BitWord ExtraBitMask = ~BitWord(0) << ExtraBits;
      if (t)
        Bits.back() |= ExtraBitMask;
      else
        Bits.back() &= ~ExtraBitMask;
    }
  }

  // Clear the unused bits in the high words.
  void clear_unused_bits() {
    set_unused_bits(false);
  }

  void init_words(bool t) {
    std::fill(Bits.begin(), Bits.end(), 0 - (BitWord)t);
  }

  template<bool AddBits, bool InvertMask>
  void applyMask(const uint32_t *Mask, unsigned MaskWords) {
    static_assert(BITWORD_SIZE % 32 == 0, "Unsupported BitWord size.");
    MaskWords = std::min(MaskWords, (size() + 31) / 32);
    const unsigned Scale = BITWORD_SIZE / 32;
    unsigned i;
    for (i = 0; MaskWords >= Scale; ++i, MaskWords -= Scale) {
      BitWord BW = Bits[i];
      // This inner loop should unroll completely when BITWORD_SIZE > 32.
      for (unsigned b = 0; b != BITWORD_SIZE; b += 32) {
        uint32_t M = *Mask++;
        if (InvertMask) M = ~M;
        if (AddBits) BW |=   BitWord(M) << b;
        else         BW &= ~(BitWord(M) << b);
      }
      Bits[i] = BW;
    }
    for (unsigned b = 0; MaskWords; b += 32, --MaskWords) {
      uint32_t M = *Mask++;
      if (InvertMask) M = ~M;
      if (AddBits) Bits[i] |=   BitWord(M) << b;
      else         Bits[i] &= ~(BitWord(M) << b);
    }
    if (AddBits)
      clear_unused_bits();
  }

public:
  /// Return the size (in bytes) of the bit vector.
  size_type getMemorySize() const { return Bits.size() * sizeof(BitWord); }
  size_type getBitCapacity() const { return Bits.size() * BITWORD_SIZE; }
};

inline BitVector::size_type capacity_in_bytes(const BitVector &X) {
  return X.getMemorySize();
}

template <> struct DenseMapInfo<BitVector> {
  static inline BitVector getEmptyKey() { return {}; }
  static inline BitVector getTombstoneKey() {
    BitVector V;
    V.invalid();
    return V;
  }
  static unsigned getHashValue(const BitVector &V) {
    return DenseMapInfo<std::pair<BitVector::size_type, ArrayRef<uintptr_t>>>::
        getHashValue(std::make_pair(V.size(), V.getData()));
  }
  static bool isEqual(const BitVector &LHS, const BitVector &RHS) {
    if (LHS.isInvalid() || RHS.isInvalid())
      return LHS.isInvalid() == RHS.isInvalid();
    return LHS == RHS;
  }
};
} // end namespace llvm

namespace std {
  /// Implement std::swap in terms of BitVector swap.
inline void swap(llvm::BitVector &LHS, llvm::BitVector &RHS) { LHS.swap(RHS); }
} // end namespace std

#endif // LLVM_ADT_BITVECTOR_H
PKiwFZD�\�.�.ADT/PointerSumType.hnu�[���//===- llvm/ADT/PointerSumType.h --------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_POINTERSUMTYPE_H
#define LLVM_ADT_POINTERSUMTYPE_H

#include "llvm/ADT/bit.h"
#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/Support/PointerLikeTypeTraits.h"
#include <cassert>
#include <cstdint>
#include <type_traits>

namespace llvm {

/// A compile time pair of an integer tag and the pointer-like type which it
/// indexes within a sum type. Also allows the user to specify a particular
/// traits class for pointer types with custom behavior such as over-aligned
/// allocation.
template <uintptr_t N, typename PointerArgT,
          typename TraitsArgT = PointerLikeTypeTraits<PointerArgT>>
struct PointerSumTypeMember {
  enum { Tag = N };
  using PointerT = PointerArgT;
  using TraitsT = TraitsArgT;
};

namespace detail {

template <typename TagT, typename... MemberTs> struct PointerSumTypeHelper;

} // end namespace detail

/// A sum type over pointer-like types.
///
/// This is a normal tagged union across pointer-like types that uses the low
/// bits of the pointers to store the tag.
///
/// Each member of the sum type is specified by passing a \c
/// PointerSumTypeMember specialization in the variadic member argument list.
/// This allows the user to control the particular tag value associated with
/// a particular type, use the same type for multiple different tags, and
/// customize the pointer-like traits used for a particular member. Note that
/// these *must* be specializations of \c PointerSumTypeMember, no other type
/// will suffice, even if it provides a compatible interface.
///
/// This type implements all of the comparison operators and even hash table
/// support by comparing the underlying storage of the pointer values. It
/// doesn't support delegating to particular members for comparisons.
///
/// It also default constructs to a zero tag with a null pointer, whatever that
/// would be. This means that the zero value for the tag type is significant
/// and may be desirable to set to a state that is particularly desirable to
/// default construct.
///
/// Having a supported zero-valued tag also enables getting the address of a
/// pointer stored with that tag provided it is stored in its natural bit
/// representation. This works because in the case of a zero-valued tag, the
/// pointer's value is directly stored into this object and we can expose the
/// address of that internal storage. This is especially useful when building an
/// `ArrayRef` of a single pointer stored in a sum type.
///
/// There is no support for constructing or accessing with a dynamic tag as
/// that would fundamentally violate the type safety provided by the sum type.
template <typename TagT, typename... MemberTs> class PointerSumType {
  using HelperT = detail::PointerSumTypeHelper<TagT, MemberTs...>;

  // We keep both the raw value and the min tag value's pointer in a union. When
  // the minimum tag value is zero, this allows code below to cleanly expose the
  // address of the zero-tag pointer instead of just the zero-tag pointer
  // itself. This is especially useful when building `ArrayRef`s out of a single
  // pointer. However, we have to carefully access the union due to the active
  // member potentially changing. When we *store* a new value, we directly
  // access the union to allow us to store using the obvious types. However,
  // when we *read* a value, we copy the underlying storage out to avoid relying
  // on one member or the other being active.
  union StorageT {
    // Ensure we get a null default constructed value. We don't use a member
    // initializer because some compilers seem to not implement those correctly
    // for a union.
    StorageT() : Value(0) {}

    uintptr_t Value;

    typename HelperT::template Lookup<HelperT::MinTag>::PointerT MinTagPointer;
  };

  StorageT Storage;

public:
  constexpr PointerSumType() = default;

  /// A typed setter to a given tagged member of the sum type.
  template <TagT N>
  void set(typename HelperT::template Lookup<N>::PointerT Pointer) {
    void *V = HelperT::template Lookup<N>::TraitsT::getAsVoidPointer(Pointer);
    assert((reinterpret_cast<uintptr_t>(V) & HelperT::TagMask) == 0 &&
           "Pointer is insufficiently aligned to store the discriminant!");
    Storage.Value = reinterpret_cast<uintptr_t>(V) | N;
  }

  /// A typed constructor for a specific tagged member of the sum type.
  template <TagT N>
  static PointerSumType
  create(typename HelperT::template Lookup<N>::PointerT Pointer) {
    PointerSumType Result;
    Result.set<N>(Pointer);
    return Result;
  }

  /// Clear the value to null with the min tag type.
  void clear() { set<HelperT::MinTag>(nullptr); }

  TagT getTag() const {
    return static_cast<TagT>(getOpaqueValue() & HelperT::TagMask);
  }

  template <TagT N> bool is() const { return N == getTag(); }

  template <TagT N> typename HelperT::template Lookup<N>::PointerT get() const {
    void *P = is<N>() ? getVoidPtr() : nullptr;
    return HelperT::template Lookup<N>::TraitsT::getFromVoidPointer(P);
  }

  template <TagT N>
  typename HelperT::template Lookup<N>::PointerT cast() const {
    assert(is<N>() && "This instance has a different active member.");
    return HelperT::template Lookup<N>::TraitsT::getFromVoidPointer(
        getVoidPtr());
  }

  /// If the tag is zero and the pointer's value isn't changed when being
  /// stored, get the address of the stored value type-punned to the zero-tag's
  /// pointer type.
  typename HelperT::template Lookup<HelperT::MinTag>::PointerT const *
  getAddrOfZeroTagPointer() const {
    return const_cast<PointerSumType *>(this)->getAddrOfZeroTagPointer();
  }

  /// If the tag is zero and the pointer's value isn't changed when being
  /// stored, get the address of the stored value type-punned to the zero-tag's
  /// pointer type.
  typename HelperT::template Lookup<HelperT::MinTag>::PointerT *
  getAddrOfZeroTagPointer() {
    static_assert(HelperT::MinTag == 0, "Non-zero minimum tag value!");
    assert(is<HelperT::MinTag>() && "The active tag is not zero!");
    // Store the initial value of the pointer when read out of our storage.
    auto InitialPtr = get<HelperT::MinTag>();
    // Now update the active member of the union to be the actual pointer-typed
    // member so that accessing it indirectly through the returned address is
    // valid.
    Storage.MinTagPointer = InitialPtr;
    // Finally, validate that this was a no-op as expected by reading it back
    // out using the same underlying-storage read as above.
    assert(InitialPtr == get<HelperT::MinTag>() &&
           "Switching to typed storage changed the pointer returned!");
    // Now we can correctly return an address to typed storage.
    return &Storage.MinTagPointer;
  }

  explicit operator bool() const {
    return getOpaqueValue() & HelperT::PointerMask;
  }
  bool operator==(const PointerSumType &R) const {
    return getOpaqueValue() == R.getOpaqueValue();
  }
  bool operator!=(const PointerSumType &R) const {
    return getOpaqueValue() != R.getOpaqueValue();
  }
  bool operator<(const PointerSumType &R) const {
    return getOpaqueValue() < R.getOpaqueValue();
  }
  bool operator>(const PointerSumType &R) const {
    return getOpaqueValue() > R.getOpaqueValue();
  }
  bool operator<=(const PointerSumType &R) const {
    return getOpaqueValue() <= R.getOpaqueValue();
  }
  bool operator>=(const PointerSumType &R) const {
    return getOpaqueValue() >= R.getOpaqueValue();
  }

  uintptr_t getOpaqueValue() const {
    // Read the underlying storage of the union, regardless of the active
    // member.
    return bit_cast<uintptr_t>(Storage);
  }

protected:
  void *getVoidPtr() const {
    return reinterpret_cast<void *>(getOpaqueValue() & HelperT::PointerMask);
  }
};

namespace detail {

/// A helper template for implementing \c PointerSumType. It provides fast
/// compile-time lookup of the member from a particular tag value, along with
/// useful constants and compile time checking infrastructure..
template <typename TagT, typename... MemberTs>
struct PointerSumTypeHelper : MemberTs... {
  // First we use a trick to allow quickly looking up information about
  // a particular member of the sum type. This works because we arranged to
  // have this type derive from all of the member type templates. We can select
  // the matching member for a tag using type deduction during overload
  // resolution.
  template <TagT N, typename PointerT, typename TraitsT>
  static PointerSumTypeMember<N, PointerT, TraitsT>
  LookupOverload(PointerSumTypeMember<N, PointerT, TraitsT> *);
  template <TagT N> static void LookupOverload(...);
  template <TagT N> struct Lookup {
    // Compute a particular member type by resolving the lookup helper overload.
    using MemberT = decltype(
        LookupOverload<N>(static_cast<PointerSumTypeHelper *>(nullptr)));

    /// The Nth member's pointer type.
    using PointerT = typename MemberT::PointerT;

    /// The Nth member's traits type.
    using TraitsT = typename MemberT::TraitsT;
  };

  // Next we need to compute the number of bits available for the discriminant
  // by taking the min of the bits available for each member. Much of this
  // would be amazingly easier with good constexpr support.
  template <uintptr_t V, uintptr_t... Vs>
  struct Min : std::integral_constant<
                   uintptr_t, (V < Min<Vs...>::value ? V : Min<Vs...>::value)> {
  };
  template <uintptr_t V>
  struct Min<V> : std::integral_constant<uintptr_t, V> {};
  enum { NumTagBits = Min<MemberTs::TraitsT::NumLowBitsAvailable...>::value };

  // Also compute the smallest discriminant and various masks for convenience.
  constexpr static TagT MinTag =
      static_cast<TagT>(Min<MemberTs::Tag...>::value);
  enum : uint64_t {
    PointerMask = static_cast<uint64_t>(-1) << NumTagBits,
    TagMask = ~PointerMask
  };

  // Finally we need a recursive template to do static checks of each
  // member.
  template <typename MemberT, typename... InnerMemberTs>
  struct Checker : Checker<InnerMemberTs...> {
    static_assert(MemberT::Tag < (1 << NumTagBits),
                  "This discriminant value requires too many bits!");
  };
  template <typename MemberT> struct Checker<MemberT> : std::true_type {
    static_assert(MemberT::Tag < (1 << NumTagBits),
                  "This discriminant value requires too many bits!");
  };
  static_assert(Checker<MemberTs...>::value,
                "Each member must pass the checker.");
};

} // end namespace detail

// Teach DenseMap how to use PointerSumTypes as keys.
template <typename TagT, typename... MemberTs>
struct DenseMapInfo<PointerSumType<TagT, MemberTs...>> {
  using SumType = PointerSumType<TagT, MemberTs...>;
  using HelperT = detail::PointerSumTypeHelper<TagT, MemberTs...>;
  enum { SomeTag = HelperT::MinTag };
  using SomePointerT =
      typename HelperT::template Lookup<HelperT::MinTag>::PointerT;
  using SomePointerInfo = DenseMapInfo<SomePointerT>;

  static inline SumType getEmptyKey() {
    return SumType::template create<SomeTag>(SomePointerInfo::getEmptyKey());
  }

  static inline SumType getTombstoneKey() {
    return SumType::template create<SomeTag>(
        SomePointerInfo::getTombstoneKey());
  }

  static unsigned getHashValue(const SumType &Arg) {
    uintptr_t OpaqueValue = Arg.getOpaqueValue();
    return DenseMapInfo<uintptr_t>::getHashValue(OpaqueValue);
  }

  static bool isEqual(const SumType &LHS, const SumType &RHS) {
    return LHS == RHS;
  }
};

} // end namespace llvm

#endif // LLVM_ADT_POINTERSUMTYPE_H
PKiwFZ�b٭(*(*ADT/PointerUnion.hnu�[���//===- llvm/ADT/PointerUnion.h - Discriminated Union of 2 Ptrs --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file defines the PointerUnion class, which is a discriminated union of
/// pointer types.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_POINTERUNION_H
#define LLVM_ADT_POINTERUNION_H

#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/PointerLikeTypeTraits.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdint>

namespace llvm {

namespace pointer_union_detail {
  /// Determine the number of bits required to store integers with values < n.
  /// This is ceil(log2(n)).
  constexpr int bitsRequired(unsigned n) {
    return n > 1 ? 1 + bitsRequired((n + 1) / 2) : 0;
  }

  template <typename... Ts> constexpr int lowBitsAvailable() {
    return std::min<int>({PointerLikeTypeTraits<Ts>::NumLowBitsAvailable...});
  }

  /// Find the first type in a list of types.
  template <typename T, typename...> struct GetFirstType {
    using type = T;
  };

  /// Provide PointerLikeTypeTraits for void* that is used by PointerUnion
  /// for the template arguments.
  template <typename ...PTs> class PointerUnionUIntTraits {
  public:
    static inline void *getAsVoidPointer(void *P) { return P; }
    static inline void *getFromVoidPointer(void *P) { return P; }
    static constexpr int NumLowBitsAvailable = lowBitsAvailable<PTs...>();
  };

  template <typename Derived, typename ValTy, int I, typename ...Types>
  class PointerUnionMembers;

  template <typename Derived, typename ValTy, int I>
  class PointerUnionMembers<Derived, ValTy, I> {
  protected:
    ValTy Val;
    PointerUnionMembers() = default;
    PointerUnionMembers(ValTy Val) : Val(Val) {}

    friend struct PointerLikeTypeTraits<Derived>;
  };

  template <typename Derived, typename ValTy, int I, typename Type,
            typename ...Types>
  class PointerUnionMembers<Derived, ValTy, I, Type, Types...>
      : public PointerUnionMembers<Derived, ValTy, I + 1, Types...> {
    using Base = PointerUnionMembers<Derived, ValTy, I + 1, Types...>;
  public:
    using Base::Base;
    PointerUnionMembers() = default;
    PointerUnionMembers(Type V)
        : Base(ValTy(const_cast<void *>(
                         PointerLikeTypeTraits<Type>::getAsVoidPointer(V)),
                     I)) {}

    using Base::operator=;
    Derived &operator=(Type V) {
      this->Val = ValTy(
          const_cast<void *>(PointerLikeTypeTraits<Type>::getAsVoidPointer(V)),
          I);
      return static_cast<Derived &>(*this);
    };
  };
}

// This is a forward declaration of CastInfoPointerUnionImpl
// Refer to its definition below for further details
template <typename... PTs> struct CastInfoPointerUnionImpl;
/// A discriminated union of two or more pointer types, with the discriminator
/// in the low bit of the pointer.
///
/// This implementation is extremely efficient in space due to leveraging the
/// low bits of the pointer, while exposing a natural and type-safe API.
///
/// Common use patterns would be something like this:
///    PointerUnion<int*, float*> P;
///    P = (int*)0;
///    printf("%d %d", P.is<int*>(), P.is<float*>());  // prints "1 0"
///    X = P.get<int*>();     // ok.
///    Y = P.get<float*>();   // runtime assertion failure.
///    Z = P.get<double*>();  // compile time failure.
///    P = (float*)0;
///    Y = P.get<float*>();   // ok.
///    X = P.get<int*>();     // runtime assertion failure.
///    PointerUnion<int*, int*> Q; // compile time failure.
template <typename... PTs>
class PointerUnion
    : public pointer_union_detail::PointerUnionMembers<
          PointerUnion<PTs...>,
          PointerIntPair<
              void *, pointer_union_detail::bitsRequired(sizeof...(PTs)), int,
              pointer_union_detail::PointerUnionUIntTraits<PTs...>>,
          0, PTs...> {
  static_assert(TypesAreDistinct<PTs...>::value,
                "PointerUnion alternative types cannot be repeated");
  // The first type is special because we want to directly cast a pointer to a
  // default-initialized union to a pointer to the first type. But we don't
  // want PointerUnion to be a 'template <typename First, typename ...Rest>'
  // because it's much more convenient to have a name for the whole pack. So
  // split off the first type here.
  using First = TypeAtIndex<0, PTs...>;
  using Base = typename PointerUnion::PointerUnionMembers;

  /// This is needed to give the CastInfo implementation below access
  /// to protected members.
  /// Refer to its definition for further details.
  friend struct CastInfoPointerUnionImpl<PTs...>;

public:
  PointerUnion() = default;

  PointerUnion(std::nullptr_t) : PointerUnion() {}
  using Base::Base;

  /// Test if the pointer held in the union is null, regardless of
  /// which type it is.
  bool isNull() const { return !this->Val.getPointer(); }

  explicit operator bool() const { return !isNull(); }

  // FIXME: Replace the uses of is(), get() and dyn_cast() with
  //        isa<T>, cast<T> and the llvm::dyn_cast<T>

  /// Test if the Union currently holds the type matching T.
  template <typename T> inline bool is() const { return isa<T>(*this); }

  /// Returns the value of the specified pointer type.
  ///
  /// If the specified pointer type is incorrect, assert.
  template <typename T> inline T get() const {
    assert(isa<T>(*this) && "Invalid accessor called");
    return cast<T>(*this);
  }

  /// Returns the current pointer if it is of the specified pointer type,
  /// otherwise returns null.
  template <typename T> inline T dyn_cast() const {
    return llvm::dyn_cast_if_present<T>(*this);
  }

  /// If the union is set to the first pointer type get an address pointing to
  /// it.
  First const *getAddrOfPtr1() const {
    return const_cast<PointerUnion *>(this)->getAddrOfPtr1();
  }

  /// If the union is set to the first pointer type get an address pointing to
  /// it.
  First *getAddrOfPtr1() {
    assert(isa<First>(*this) && "Val is not the first pointer");
    assert(
        PointerLikeTypeTraits<First>::getAsVoidPointer(cast<First>(*this)) ==
            this->Val.getPointer() &&
        "Can't get the address because PointerLikeTypeTraits changes the ptr");
    return const_cast<First *>(
        reinterpret_cast<const First *>(this->Val.getAddrOfPointer()));
  }

  /// Assignment from nullptr which just clears the union.
  const PointerUnion &operator=(std::nullptr_t) {
    this->Val.initWithPointer(nullptr);
    return *this;
  }

  /// Assignment from elements of the union.
  using Base::operator=;

  void *getOpaqueValue() const { return this->Val.getOpaqueValue(); }
  static inline PointerUnion getFromOpaqueValue(void *VP) {
    PointerUnion V;
    V.Val = decltype(V.Val)::getFromOpaqueValue(VP);
    return V;
  }
};

template <typename ...PTs>
bool operator==(PointerUnion<PTs...> lhs, PointerUnion<PTs...> rhs) {
  return lhs.getOpaqueValue() == rhs.getOpaqueValue();
}

template <typename ...PTs>
bool operator!=(PointerUnion<PTs...> lhs, PointerUnion<PTs...> rhs) {
  return lhs.getOpaqueValue() != rhs.getOpaqueValue();
}

template <typename ...PTs>
bool operator<(PointerUnion<PTs...> lhs, PointerUnion<PTs...> rhs) {
  return lhs.getOpaqueValue() < rhs.getOpaqueValue();
}

/// We can't (at least, at this moment with C++14) declare CastInfo
/// as a friend of PointerUnion like this:
/// ```
///   template<typename To>
///   friend struct CastInfo<To, PointerUnion<PTs...>>;
/// ```
/// The compiler complains 'Partial specialization cannot be declared as a
/// friend'.
/// So we define this struct to be a bridge between CastInfo and
/// PointerUnion.
template <typename... PTs> struct CastInfoPointerUnionImpl {
  using From = PointerUnion<PTs...>;

  template <typename To> static inline bool isPossible(From &F) {
    return F.Val.getInt() == FirstIndexOfType<To, PTs...>::value;
  }

  template <typename To> static To doCast(From &F) {
    assert(isPossible<To>(F) && "cast to an incompatible type!");
    return PointerLikeTypeTraits<To>::getFromVoidPointer(F.Val.getPointer());
  }
};

// Specialization of CastInfo for PointerUnion
template <typename To, typename... PTs>
struct CastInfo<To, PointerUnion<PTs...>>
    : public DefaultDoCastIfPossible<To, PointerUnion<PTs...>,
                                     CastInfo<To, PointerUnion<PTs...>>> {
  using From = PointerUnion<PTs...>;
  using Impl = CastInfoPointerUnionImpl<PTs...>;

  static inline bool isPossible(From &f) {
    return Impl::template isPossible<To>(f);
  }

  static To doCast(From &f) { return Impl::template doCast<To>(f); }

  static inline To castFailed() { return To(); }
};

template <typename To, typename... PTs>
struct CastInfo<To, const PointerUnion<PTs...>>
    : public ConstStrippingForwardingCast<To, const PointerUnion<PTs...>,
                                          CastInfo<To, PointerUnion<PTs...>>> {
};

// Teach SmallPtrSet that PointerUnion is "basically a pointer", that has
// # low bits available = min(PT1bits,PT2bits)-1.
template <typename ...PTs>
struct PointerLikeTypeTraits<PointerUnion<PTs...>> {
  static inline void *getAsVoidPointer(const PointerUnion<PTs...> &P) {
    return P.getOpaqueValue();
  }

  static inline PointerUnion<PTs...> getFromVoidPointer(void *P) {
    return PointerUnion<PTs...>::getFromOpaqueValue(P);
  }

  // The number of bits available are the min of the pointer types minus the
  // bits needed for the discriminator.
  static constexpr int NumLowBitsAvailable = PointerLikeTypeTraits<decltype(
      PointerUnion<PTs...>::Val)>::NumLowBitsAvailable;
};

// Teach DenseMap how to use PointerUnions as keys.
template <typename ...PTs> struct DenseMapInfo<PointerUnion<PTs...>> {
  using Union = PointerUnion<PTs...>;
  using FirstInfo =
      DenseMapInfo<typename pointer_union_detail::GetFirstType<PTs...>::type>;

  static inline Union getEmptyKey() { return Union(FirstInfo::getEmptyKey()); }

  static inline Union getTombstoneKey() {
    return Union(FirstInfo::getTombstoneKey());
  }

  static unsigned getHashValue(const Union &UnionVal) {
    intptr_t key = (intptr_t)UnionVal.getOpaqueValue();
    return DenseMapInfo<intptr_t>::getHashValue(key);
  }

  static bool isEqual(const Union &LHS, const Union &RHS) {
    return LHS == RHS;
  }
};

} // end namespace llvm

#endif // LLVM_ADT_POINTERUNION_H
PKiwFZL_מ����ADT/GenericUniformityImpl.hnu�[���//===- GenericUniformityImpl.h -----------------------*- C++ -*------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This template implementation resides in a separate file so that it
// does not get injected into every .cpp file that includes the
// generic header.
//
// DO NOT INCLUDE THIS FILE WHEN MERELY USING UNIFORMITYINFO.
//
// This file should only be included by files that implement a
// specialization of the relvant templates. Currently these are:
// - UniformityAnalysis.cpp
//
// Note: The DEBUG_TYPE macro should be defined before using this
// file so that any use of LLVM_DEBUG is associated with the
// including file rather than this file.
//
//===----------------------------------------------------------------------===//
///
/// \file
/// \brief Implementation of uniformity analysis.
///
/// The algorithm is a fixed point iteration that starts with the assumption
/// that all control flow and all values are uniform. Starting from sources of
/// divergence (whose discovery must be implemented by a CFG- or even
/// target-specific derived class), divergence of values is propagated from
/// definition to uses in a straight-forward way. The main complexity lies in
/// the propagation of the impact of divergent control flow on the divergence of
/// values (sync dependencies).
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_GENERICUNIFORMITYIMPL_H
#define LLVM_ADT_GENERICUNIFORMITYIMPL_H

#include "llvm/ADT/GenericUniformityInfo.h"

#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SparseBitVector.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/raw_ostream.h"

#include <set>

#define DEBUG_TYPE "uniformity"

using namespace llvm;

namespace llvm {

template <typename Range> auto unique(Range &&R) {
  return std::unique(adl_begin(R), adl_end(R));
}

/// Construct a specially modified post-order traversal of cycles.
///
/// The ModifiedPO is contructed using a virtually modified CFG as follows:
///
/// 1. The successors of pre-entry nodes (predecessors of an cycle
///    entry that are outside the cycle) are replaced by the
///    successors of the successors of the header.
/// 2. Successors of the cycle header are replaced by the exit blocks
///    of the cycle.
///
/// Effectively, we produce a depth-first numbering with the following
/// properties:
///
/// 1. Nodes after a cycle are numbered earlier than the cycle header.
/// 2. The header is numbered earlier than the nodes in the cycle.
/// 3. The numbering of the nodes within the cycle forms an interval
///    starting with the header.
///
/// Effectively, the virtual modification arranges the nodes in a
/// cycle as a DAG with the header as the sole leaf, and successors of
/// the header as the roots. A reverse traversal of this numbering has
/// the following invariant on the unmodified original CFG:
///
///    Each node is visited after all its predecessors, except if that
///    predecessor is the cycle header.
///
template <typename ContextT> class ModifiedPostOrder {
public:
  using BlockT = typename ContextT::BlockT;
  using FunctionT = typename ContextT::FunctionT;
  using DominatorTreeT = typename ContextT::DominatorTreeT;

  using CycleInfoT = GenericCycleInfo<ContextT>;
  using CycleT = typename CycleInfoT::CycleT;
  using const_iterator = typename std::vector<BlockT *>::const_iterator;

  ModifiedPostOrder(const ContextT &C) : Context(C) {}

  bool empty() const { return m_order.empty(); }
  size_t size() const { return m_order.size(); }

  void clear() { m_order.clear(); }
  void compute(const CycleInfoT &CI);

  unsigned count(BlockT *BB) const { return POIndex.count(BB); }
  const BlockT *operator[](size_t idx) const { return m_order[idx]; }

  void appendBlock(const BlockT &BB, bool isReducibleCycleHeader = false) {
    POIndex[&BB] = m_order.size();
    m_order.push_back(&BB);
    LLVM_DEBUG(dbgs() << "ModifiedPO(" << POIndex[&BB]
                      << "): " << Context.print(&BB) << "\n");
    if (isReducibleCycleHeader)
      ReducibleCycleHeaders.insert(&BB);
  }

  unsigned getIndex(const BlockT *BB) const {
    assert(POIndex.count(BB));
    return POIndex.lookup(BB);
  }

  bool isReducibleCycleHeader(const BlockT *BB) const {
    return ReducibleCycleHeaders.contains(BB);
  }

private:
  SmallVector<const BlockT *> m_order;
  DenseMap<const BlockT *, unsigned> POIndex;
  SmallPtrSet<const BlockT *, 32> ReducibleCycleHeaders;
  const ContextT &Context;

  void computeCyclePO(const CycleInfoT &CI, const CycleT *Cycle,
                      SmallPtrSetImpl<BlockT *> &Finalized);

  void computeStackPO(SmallVectorImpl<BlockT *> &Stack, const CycleInfoT &CI,
                      const CycleT *Cycle,
                      SmallPtrSetImpl<BlockT *> &Finalized);
};

template <typename> class DivergencePropagator;

/// \class GenericSyncDependenceAnalysis
///
/// \brief Locate join blocks for disjoint paths starting at a divergent branch.
///
/// An analysis per divergent branch that returns the set of basic
/// blocks whose phi nodes become divergent due to divergent control.
/// These are the blocks that are reachable by two disjoint paths from
/// the branch, or cycle exits reachable along a path that is disjoint
/// from a path to the cycle latch.

// --- Above line is not a doxygen comment; intentionally left blank ---
//
// Originally implemented in SyncDependenceAnalysis.cpp for DivergenceAnalysis.
//
// The SyncDependenceAnalysis is used in the UniformityAnalysis to model
// control-induced divergence in phi nodes.
//
// -- Reference --
// The algorithm is an extension of Section 5 of
//
//   An abstract interpretation for SPMD divergence
//       on reducible control flow graphs.
//   Julian Rosemann, Simon Moll and Sebastian Hack
//   POPL '21
//
//
// -- Sync dependence --
// Sync dependence characterizes the control flow aspect of the
// propagation of branch divergence. For example,
//
//   %cond = icmp slt i32 %tid, 10
//   br i1 %cond, label %then, label %else
// then:
//   br label %merge
// else:
//   br label %merge
// merge:
//   %a = phi i32 [ 0, %then ], [ 1, %else ]
//
// Suppose %tid holds the thread ID. Although %a is not data dependent on %tid
// because %tid is not on its use-def chains, %a is sync dependent on %tid
// because the branch "br i1 %cond" depends on %tid and affects which value %a
// is assigned to.
//
//
// -- Reduction to SSA construction --
// There are two disjoint paths from A to X, if a certain variant of SSA
// construction places a phi node in X under the following set-up scheme.
//
// This variant of SSA construction ignores incoming undef values.
// That is paths from the entry without a definition do not result in
// phi nodes.
//
//       entry
//     /      \
//    A        \
//  /   \       Y
// B     C     /
//  \   /  \  /
//    D     E
//     \   /
//       F
//
// Assume that A contains a divergent branch. We are interested
// in the set of all blocks where each block is reachable from A
// via two disjoint paths. This would be the set {D, F} in this
// case.
// To generally reduce this query to SSA construction we introduce
// a virtual variable x and assign to x different values in each
// successor block of A.
//
//           entry
//         /      \
//        A        \
//      /   \       Y
// x = 0   x = 1   /
//      \  /   \  /
//        D     E
//         \   /
//           F
//
// Our flavor of SSA construction for x will construct the following
//
//            entry
//          /      \
//         A        \
//       /   \       Y
// x0 = 0   x1 = 1  /
//       \   /   \ /
//     x2 = phi   E
//         \     /
//         x3 = phi
//
// The blocks D and F contain phi nodes and are thus each reachable
// by two disjoins paths from A.
//
// -- Remarks --
// * In case of cycle exits we need to check for temporal divergence.
//   To this end, we check whether the definition of x differs between the
//   cycle exit and the cycle header (_after_ SSA construction).
//
// * In the presence of irreducible control flow, the fixed point is
//   reached only after multiple iterations. This is because labels
//   reaching the header of a cycle must be repropagated through the
//   cycle. This is true even in a reducible cycle, since the labels
//   may have been produced by a nested irreducible cycle.
//
// * Note that SyncDependenceAnalysis is not concerned with the points
//   of convergence in an irreducible cycle. It's only purpose is to
//   identify join blocks. The "diverged entry" criterion is
//   separately applied on join blocks to determine if an entire
//   irreducible cycle is assumed to be divergent.
//
// * Relevant related work:
//     A simple algorithm for global data flow analysis problems.
//     Matthew S. Hecht and Jeffrey D. Ullman.
//     SIAM Journal on Computing, 4(4):519–532, December 1975.
//
template <typename ContextT> class GenericSyncDependenceAnalysis {
public:
  using BlockT = typename ContextT::BlockT;
  using DominatorTreeT = typename ContextT::DominatorTreeT;
  using FunctionT = typename ContextT::FunctionT;
  using ValueRefT = typename ContextT::ValueRefT;
  using InstructionT = typename ContextT::InstructionT;

  using CycleInfoT = GenericCycleInfo<ContextT>;
  using CycleT = typename CycleInfoT::CycleT;

  using ConstBlockSet = SmallPtrSet<const BlockT *, 4>;
  using ModifiedPO = ModifiedPostOrder<ContextT>;

  // * if BlockLabels[B] == C then C is the dominating definition at
  //   block B
  // * if BlockLabels[B] == nullptr then we haven't seen B yet
  // * if BlockLabels[B] == B then:
  //   - B is a join point of disjoint paths from X, or,
  //   - B is an immediate successor of X (initial value), or,
  //   - B is X
  using BlockLabelMap = DenseMap<const BlockT *, const BlockT *>;

  /// Information discovered by the sync dependence analysis for each
  /// divergent branch.
  struct DivergenceDescriptor {
    // Join points of diverged paths.
    ConstBlockSet JoinDivBlocks;
    // Divergent cycle exits
    ConstBlockSet CycleDivBlocks;
    // Labels assigned to blocks on diverged paths.
    BlockLabelMap BlockLabels;
  };

  using DivergencePropagatorT = DivergencePropagator<ContextT>;

  GenericSyncDependenceAnalysis(const ContextT &Context,
                                const DominatorTreeT &DT, const CycleInfoT &CI);

  /// \brief Computes divergent join points and cycle exits caused by branch
  /// divergence in \p Term.
  ///
  /// This returns a pair of sets:
  /// * The set of blocks which are reachable by disjoint paths from
  ///   \p Term.
  /// * The set also contains cycle exits if there two disjoint paths:
  ///   one from \p Term to the cycle exit and another from \p Term to
  ///   the cycle header.
  const DivergenceDescriptor &getJoinBlocks(const BlockT *DivTermBlock);

private:
  static DivergenceDescriptor EmptyDivergenceDesc;

  ModifiedPO CyclePO;

  const DominatorTreeT &DT;
  const CycleInfoT &CI;

  DenseMap<const BlockT *, std::unique_ptr<DivergenceDescriptor>>
      CachedControlDivDescs;
};

/// \brief Analysis that identifies uniform values in a data-parallel
/// execution.
///
/// This analysis propagates divergence in a data-parallel context
/// from sources of divergence to all users. It can be instantiated
/// for an IR that provides a suitable SSAContext.
template <typename ContextT> class GenericUniformityAnalysisImpl {
public:
  using BlockT = typename ContextT::BlockT;
  using FunctionT = typename ContextT::FunctionT;
  using ValueRefT = typename ContextT::ValueRefT;
  using ConstValueRefT = typename ContextT::ConstValueRefT;
  using UseT = typename ContextT::UseT;
  using InstructionT = typename ContextT::InstructionT;
  using DominatorTreeT = typename ContextT::DominatorTreeT;

  using CycleInfoT = GenericCycleInfo<ContextT>;
  using CycleT = typename CycleInfoT::CycleT;

  using SyncDependenceAnalysisT = GenericSyncDependenceAnalysis<ContextT>;
  using DivergenceDescriptorT =
      typename SyncDependenceAnalysisT::DivergenceDescriptor;
  using BlockLabelMapT = typename SyncDependenceAnalysisT::BlockLabelMap;

  GenericUniformityAnalysisImpl(const FunctionT &F, const DominatorTreeT &DT,
                                const CycleInfoT &CI,
                                const TargetTransformInfo *TTI)
      : Context(CI.getSSAContext()), F(F), CI(CI), TTI(TTI), DT(DT),
        SDA(Context, DT, CI) {}

  void initialize();

  const FunctionT &getFunction() const { return F; }

  /// \brief Mark \p UniVal as a value that is always uniform.
  void addUniformOverride(const InstructionT &Instr);

  /// \brief Examine \p I for divergent outputs and add to the worklist.
  void markDivergent(const InstructionT &I);

  /// \brief Mark \p DivVal as a divergent value.
  /// \returns Whether the tracked divergence state of \p DivVal changed.
  bool markDivergent(ConstValueRefT DivVal);

  /// \brief Mark outputs of \p Instr as divergent.
  /// \returns Whether the tracked divergence state of any output has changed.
  bool markDefsDivergent(const InstructionT &Instr);

  /// \brief Propagate divergence to all instructions in the region.
  /// Divergence is seeded by calls to \p markDivergent.
  void compute();

  /// \brief Whether any value was marked or analyzed to be divergent.
  bool hasDivergence() const { return !DivergentValues.empty(); }

  /// \brief Whether \p Val will always return a uniform value regardless of its
  /// operands
  bool isAlwaysUniform(const InstructionT &Instr) const;

  bool hasDivergentDefs(const InstructionT &I) const;

  bool isDivergent(const InstructionT &I) const {
    if (I.isTerminator()) {
      return DivergentTermBlocks.contains(I.getParent());
    }
    return hasDivergentDefs(I);
  };

  /// \brief Whether \p Val is divergent at its definition.
  bool isDivergent(ConstValueRefT V) const { return DivergentValues.count(V); }

  bool isDivergentUse(const UseT &U) const;

  bool hasDivergentTerminator(const BlockT &B) const {
    return DivergentTermBlocks.contains(&B);
  }

  void print(raw_ostream &out) const;

protected:
  /// \brief Value/block pair representing a single phi input.
  struct PhiInput {
    ConstValueRefT value;
    BlockT *predBlock;

    PhiInput(ConstValueRefT value, BlockT *predBlock)
        : value(value), predBlock(predBlock) {}
  };

  const ContextT &Context;
  const FunctionT &F;
  const CycleInfoT &CI;
  const TargetTransformInfo *TTI = nullptr;

  // Detected/marked divergent values.
  std::set<ConstValueRefT> DivergentValues;
  SmallPtrSet<const BlockT *, 32> DivergentTermBlocks;

  // Internal worklist for divergence propagation.
  std::vector<const InstructionT *> Worklist;

  /// \brief Mark \p Term as divergent and push all Instructions that become
  /// divergent as a result on the worklist.
  void analyzeControlDivergence(const InstructionT &Term);

private:
  const DominatorTreeT &DT;

  // Recognized cycles with divergent exits.
  SmallPtrSet<const CycleT *, 16> DivergentExitCycles;

  // Cycles assumed to be divergent.
  //
  // We don't use a set here because every insertion needs an explicit
  // traversal of all existing members.
  SmallVector<const CycleT *> AssumedDivergent;

  // The SDA links divergent branches to divergent control-flow joins.
  SyncDependenceAnalysisT SDA;

  // Set of known-uniform values.
  SmallPtrSet<const InstructionT *, 32> UniformOverrides;

  /// \brief Mark all nodes in \p JoinBlock as divergent and push them on
  /// the worklist.
  void taintAndPushAllDefs(const BlockT &JoinBlock);

  /// \brief Mark all phi nodes in \p JoinBlock as divergent and push them on
  /// the worklist.
  void taintAndPushPhiNodes(const BlockT &JoinBlock);

  /// \brief Identify all Instructions that become divergent because \p DivExit
  /// is a divergent cycle exit of \p DivCycle. Mark those instructions as
  /// divergent and push them on the worklist.
  void propagateCycleExitDivergence(const BlockT &DivExit,
                                    const CycleT &DivCycle);

  /// Mark as divergent all external uses of values defined in \p DefCycle.
  void analyzeCycleExitDivergence(const CycleT &DefCycle);

  /// \brief Mark as divergent all uses of \p I that are outside \p DefCycle.
  void propagateTemporalDivergence(const InstructionT &I,
                                   const CycleT &DefCycle);

  /// \brief Push all users of \p Val (in the region) to the worklist.
  void pushUsers(const InstructionT &I);
  void pushUsers(ConstValueRefT V);

  bool usesValueFromCycle(const InstructionT &I, const CycleT &DefCycle) const;

  /// \brief Whether \p Def is divergent when read in \p ObservingBlock.
  bool isTemporalDivergent(const BlockT &ObservingBlock,
                           const InstructionT &Def) const;
};

template <typename ImplT>
void GenericUniformityAnalysisImplDeleter<ImplT>::operator()(ImplT *Impl) {
  delete Impl;
}

/// Compute divergence starting with a divergent branch.
template <typename ContextT> class DivergencePropagator {
public:
  using BlockT = typename ContextT::BlockT;
  using DominatorTreeT = typename ContextT::DominatorTreeT;
  using FunctionT = typename ContextT::FunctionT;
  using ValueRefT = typename ContextT::ValueRefT;

  using CycleInfoT = GenericCycleInfo<ContextT>;
  using CycleT = typename CycleInfoT::CycleT;

  using ModifiedPO = ModifiedPostOrder<ContextT>;
  using SyncDependenceAnalysisT = GenericSyncDependenceAnalysis<ContextT>;
  using DivergenceDescriptorT =
      typename SyncDependenceAnalysisT::DivergenceDescriptor;
  using BlockLabelMapT = typename SyncDependenceAnalysisT::BlockLabelMap;

  const ModifiedPO &CyclePOT;
  const DominatorTreeT &DT;
  const CycleInfoT &CI;
  const BlockT &DivTermBlock;
  const ContextT &Context;

  // Track blocks that receive a new label. Every time we relabel a
  // cycle header, we another pass over the modified post-order in
  // order to propagate the header label. The bit vector also allows
  // us to skip labels that have not changed.
  SparseBitVector<> FreshLabels;

  // divergent join and cycle exit descriptor.
  std::unique_ptr<DivergenceDescriptorT> DivDesc;
  BlockLabelMapT &BlockLabels;

  DivergencePropagator(const ModifiedPO &CyclePOT, const DominatorTreeT &DT,
                       const CycleInfoT &CI, const BlockT &DivTermBlock)
      : CyclePOT(CyclePOT), DT(DT), CI(CI), DivTermBlock(DivTermBlock),
        Context(CI.getSSAContext()), DivDesc(new DivergenceDescriptorT),
        BlockLabels(DivDesc->BlockLabels) {}

  void printDefs(raw_ostream &Out) {
    Out << "Propagator::BlockLabels {\n";
    for (int BlockIdx = (int)CyclePOT.size() - 1; BlockIdx >= 0; --BlockIdx) {
      const auto *Block = CyclePOT[BlockIdx];
      const auto *Label = BlockLabels[Block];
      Out << Context.print(Block) << "(" << BlockIdx << ") : ";
      if (!Label) {
        Out << "<null>\n";
      } else {
        Out << Context.print(Label) << "\n";
      }
    }
    Out << "}\n";
  }

  // Push a definition (\p PushedLabel) to \p SuccBlock and return whether this
  // causes a divergent join.
  bool computeJoin(const BlockT &SuccBlock, const BlockT &PushedLabel) {
    const auto *OldLabel = BlockLabels[&SuccBlock];

    LLVM_DEBUG(dbgs() << "labeling " << Context.print(&SuccBlock) << ":\n"
                      << "\tpushed label: " << Context.print(&PushedLabel)
                      << "\n"
                      << "\told label: " << Context.print(OldLabel) << "\n");

    // Early exit if there is no change in the label.
    if (OldLabel == &PushedLabel)
      return false;

    if (OldLabel != &SuccBlock) {
      auto SuccIdx = CyclePOT.getIndex(&SuccBlock);
      // Assigning a new label, mark this in FreshLabels.
      LLVM_DEBUG(dbgs() << "\tfresh label: " << SuccIdx << "\n");
      FreshLabels.set(SuccIdx);
    }

    // This is not a join if the succ was previously unlabeled.
    if (!OldLabel) {
      LLVM_DEBUG(dbgs() << "\tnew label: " << Context.print(&PushedLabel)
                        << "\n");
      BlockLabels[&SuccBlock] = &PushedLabel;
      return false;
    }

    // This is a new join. Label the join block as itself, and not as
    // the pushed label.
    LLVM_DEBUG(dbgs() << "\tnew label: " << Context.print(&SuccBlock) << "\n");
    BlockLabels[&SuccBlock] = &SuccBlock;

    return true;
  }

  // visiting a virtual cycle exit edge from the cycle header --> temporal
  // divergence on join
  bool visitCycleExitEdge(const BlockT &ExitBlock, const BlockT &Label) {
    if (!computeJoin(ExitBlock, Label))
      return false;

    // Identified a divergent cycle exit
    DivDesc->CycleDivBlocks.insert(&ExitBlock);
    LLVM_DEBUG(dbgs() << "\tDivergent cycle exit: " << Context.print(&ExitBlock)
                      << "\n");
    return true;
  }

  // process \p SuccBlock with reaching definition \p Label
  bool visitEdge(const BlockT &SuccBlock, const BlockT &Label) {
    if (!computeJoin(SuccBlock, Label))
      return false;

    // Divergent, disjoint paths join.
    DivDesc->JoinDivBlocks.insert(&SuccBlock);
    LLVM_DEBUG(dbgs() << "\tDivergent join: " << Context.print(&SuccBlock)
                      << "\n");
    return true;
  }

  std::unique_ptr<DivergenceDescriptorT> computeJoinPoints() {
    assert(DivDesc);

    LLVM_DEBUG(dbgs() << "SDA:computeJoinPoints: "
                      << Context.print(&DivTermBlock) << "\n");

    // Early stopping criterion
    int FloorIdx = CyclePOT.size() - 1;
    const BlockT *FloorLabel = nullptr;
    int DivTermIdx = CyclePOT.getIndex(&DivTermBlock);

    // Bootstrap with branch targets
    auto const *DivTermCycle = CI.getCycle(&DivTermBlock);
    for (const auto *SuccBlock : successors(&DivTermBlock)) {
      if (DivTermCycle && !DivTermCycle->contains(SuccBlock)) {
        // If DivTerm exits the cycle immediately, computeJoin() might
        // not reach SuccBlock with a different label. We need to
        // check for this exit now.
        DivDesc->CycleDivBlocks.insert(SuccBlock);
        LLVM_DEBUG(dbgs() << "\tImmediate divergent cycle exit: "
                          << Context.print(SuccBlock) << "\n");
      }
      auto SuccIdx = CyclePOT.getIndex(SuccBlock);
      visitEdge(*SuccBlock, *SuccBlock);
      FloorIdx = std::min<int>(FloorIdx, SuccIdx);
    }

    while (true) {
      auto BlockIdx = FreshLabels.find_last();
      if (BlockIdx == -1 || BlockIdx < FloorIdx)
        break;

      LLVM_DEBUG(dbgs() << "Current labels:\n"; printDefs(dbgs()));

      FreshLabels.reset(BlockIdx);
      if (BlockIdx == DivTermIdx) {
        LLVM_DEBUG(dbgs() << "Skipping DivTermBlock\n");
        continue;
      }

      const auto *Block = CyclePOT[BlockIdx];
      LLVM_DEBUG(dbgs() << "visiting " << Context.print(Block) << " at index "
                        << BlockIdx << "\n");

      const auto *Label = BlockLabels[Block];
      assert(Label);

      bool CausedJoin = false;
      int LoweredFloorIdx = FloorIdx;

      // If the current block is the header of a reducible cycle that
      // contains the divergent branch, then the label should be
      // propagated to the cycle exits. Such a header is the "last
      // possible join" of any disjoint paths within this cycle. This
      // prevents detection of spurious joins at the entries of any
      // irreducible child cycles.
      //
      // This conclusion about the header is true for any choice of DFS:
      //
      //   If some DFS has a reducible cycle C with header H, then for
      //   any other DFS, H is the header of a cycle C' that is a
      //   superset of C. For a divergent branch inside the subgraph
      //   C, any join node inside C is either H, or some node
      //   encountered without passing through H.
      //
      auto getReducibleParent = [&](const BlockT *Block) -> const CycleT * {
        if (!CyclePOT.isReducibleCycleHeader(Block))
          return nullptr;
        const auto *BlockCycle = CI.getCycle(Block);
        if (BlockCycle->contains(&DivTermBlock))
          return BlockCycle;
        return nullptr;
      };

      if (const auto *BlockCycle = getReducibleParent(Block)) {
        SmallVector<BlockT *, 4> BlockCycleExits;
        BlockCycle->getExitBlocks(BlockCycleExits);
        for (auto *BlockCycleExit : BlockCycleExits) {
          CausedJoin |= visitCycleExitEdge(*BlockCycleExit, *Label);
          LoweredFloorIdx =
              std::min<int>(LoweredFloorIdx, CyclePOT.getIndex(BlockCycleExit));
        }
      } else {
        for (const auto *SuccBlock : successors(Block)) {
          CausedJoin |= visitEdge(*SuccBlock, *Label);
          LoweredFloorIdx =
              std::min<int>(LoweredFloorIdx, CyclePOT.getIndex(SuccBlock));
        }
      }

      // Floor update
      if (CausedJoin) {
        // 1. Different labels pushed to successors
        FloorIdx = LoweredFloorIdx;
      } else if (FloorLabel != Label) {
        // 2. No join caused BUT we pushed a label that is different than the
        // last pushed label
        FloorIdx = LoweredFloorIdx;
        FloorLabel = Label;
      }
    }

    LLVM_DEBUG(dbgs() << "Final labeling:\n"; printDefs(dbgs()));

    // Check every cycle containing DivTermBlock for exit divergence.
    // A cycle has exit divergence if the label of an exit block does
    // not match the label of its header.
    for (const auto *Cycle = CI.getCycle(&DivTermBlock); Cycle;
         Cycle = Cycle->getParentCycle()) {
      if (Cycle->isReducible()) {
        // The exit divergence of a reducible cycle is recorded while
        // propagating labels.
        continue;
      }
      SmallVector<BlockT *> Exits;
      Cycle->getExitBlocks(Exits);
      auto *Header = Cycle->getHeader();
      auto *HeaderLabel = BlockLabels[Header];
      for (const auto *Exit : Exits) {
        if (BlockLabels[Exit] != HeaderLabel) {
          // Identified a divergent cycle exit
          DivDesc->CycleDivBlocks.insert(Exit);
          LLVM_DEBUG(dbgs() << "\tDivergent cycle exit: " << Context.print(Exit)
                            << "\n");
        }
      }
    }

    return std::move(DivDesc);
  }
};

template <typename ContextT>
typename llvm::GenericSyncDependenceAnalysis<ContextT>::DivergenceDescriptor
    llvm::GenericSyncDependenceAnalysis<ContextT>::EmptyDivergenceDesc;

template <typename ContextT>
llvm::GenericSyncDependenceAnalysis<ContextT>::GenericSyncDependenceAnalysis(
    const ContextT &Context, const DominatorTreeT &DT, const CycleInfoT &CI)
    : CyclePO(Context), DT(DT), CI(CI) {
  CyclePO.compute(CI);
}

template <typename ContextT>
auto llvm::GenericSyncDependenceAnalysis<ContextT>::getJoinBlocks(
    const BlockT *DivTermBlock) -> const DivergenceDescriptor & {
  // trivial case
  if (succ_size(DivTermBlock) <= 1) {
    return EmptyDivergenceDesc;
  }

  // already available in cache?
  auto ItCached = CachedControlDivDescs.find(DivTermBlock);
  if (ItCached != CachedControlDivDescs.end())
    return *ItCached->second;

  // compute all join points
  DivergencePropagatorT Propagator(CyclePO, DT, CI, *DivTermBlock);
  auto DivDesc = Propagator.computeJoinPoints();

  auto printBlockSet = [&](ConstBlockSet &Blocks) {
    return Printable([&](raw_ostream &Out) {
      Out << "[";
      ListSeparator LS;
      for (const auto *BB : Blocks) {
        Out << LS << CI.getSSAContext().print(BB);
      }
      Out << "]\n";
    });
  };

  LLVM_DEBUG(
      dbgs() << "\nResult (" << CI.getSSAContext().print(DivTermBlock)
             << "):\n  JoinDivBlocks: " << printBlockSet(DivDesc->JoinDivBlocks)
             << "  CycleDivBlocks: " << printBlockSet(DivDesc->CycleDivBlocks)
             << "\n");
  (void)printBlockSet;

  auto ItInserted =
      CachedControlDivDescs.try_emplace(DivTermBlock, std::move(DivDesc));
  assert(ItInserted.second);
  return *ItInserted.first->second;
}

template <typename ContextT>
void GenericUniformityAnalysisImpl<ContextT>::markDivergent(
    const InstructionT &I) {
  if (isAlwaysUniform(I))
    return;
  bool Marked = false;
  if (I.isTerminator()) {
    Marked = DivergentTermBlocks.insert(I.getParent()).second;
    if (Marked) {
      LLVM_DEBUG(dbgs() << "marked divergent term block: "
                        << Context.print(I.getParent()) << "\n");
    }
  } else {
    Marked = markDefsDivergent(I);
  }

  if (Marked)
    Worklist.push_back(&I);
}

template <typename ContextT>
bool GenericUniformityAnalysisImpl<ContextT>::markDivergent(
    ConstValueRefT Val) {
  if (DivergentValues.insert(Val).second) {
    LLVM_DEBUG(dbgs() << "marked divergent: " << Context.print(Val) << "\n");
    return true;
  }
  return false;
}

template <typename ContextT>
void GenericUniformityAnalysisImpl<ContextT>::addUniformOverride(
    const InstructionT &Instr) {
  UniformOverrides.insert(&Instr);
}

// Mark as divergent all external uses of values defined in \p DefCycle.
//
// A value V defined by a block B inside \p DefCycle may be used outside the
// cycle only if the use is a PHI in some exit block, or B dominates some exit
// block. Thus, we check uses as follows:
//
// - Check all PHIs in all exit blocks for inputs defined inside \p DefCycle.
// - For every block B inside \p DefCycle that dominates at least one exit
//   block, check all uses outside \p DefCycle.
//
// FIXME: This function does not distinguish between divergent and uniform
// exits. For each divergent exit, only the values that are live at that exit
// need to be propagated as divergent at their use outside the cycle.
template <typename ContextT>
void GenericUniformityAnalysisImpl<ContextT>::analyzeCycleExitDivergence(
    const CycleT &DefCycle) {
  SmallVector<BlockT *> Exits;
  DefCycle.getExitBlocks(Exits);
  for (auto *Exit : Exits) {
    for (auto &Phi : Exit->phis()) {
      if (usesValueFromCycle(Phi, DefCycle)) {
        markDivergent(Phi);
      }
    }
  }

  for (auto *BB : DefCycle.blocks()) {
    if (!llvm::any_of(Exits,
                     [&](BlockT *Exit) { return DT.dominates(BB, Exit); }))
      continue;
    for (auto &II : *BB) {
      propagateTemporalDivergence(II, DefCycle);
    }
  }
}

template <typename ContextT>
void GenericUniformityAnalysisImpl<ContextT>::propagateCycleExitDivergence(
    const BlockT &DivExit, const CycleT &InnerDivCycle) {
  LLVM_DEBUG(dbgs() << "\tpropCycleExitDiv " << Context.print(&DivExit)
                    << "\n");
  auto *DivCycle = &InnerDivCycle;
  auto *OuterDivCycle = DivCycle;
  auto *ExitLevelCycle = CI.getCycle(&DivExit);
  const unsigned CycleExitDepth =
      ExitLevelCycle ? ExitLevelCycle->getDepth() : 0;

  // Find outer-most cycle that does not contain \p DivExit
  while (DivCycle && DivCycle->getDepth() > CycleExitDepth) {
    LLVM_DEBUG(dbgs() << "  Found exiting cycle: "
                      << Context.print(DivCycle->getHeader()) << "\n");
    OuterDivCycle = DivCycle;
    DivCycle = DivCycle->getParentCycle();
  }
  LLVM_DEBUG(dbgs() << "\tOuter-most exiting cycle: "
                    << Context.print(OuterDivCycle->getHeader()) << "\n");

  if (!DivergentExitCycles.insert(OuterDivCycle).second)
    return;

  // Exit divergence does not matter if the cycle itself is assumed to
  // be divergent.
  for (const auto *C : AssumedDivergent) {
    if (C->contains(OuterDivCycle))
      return;
  }

  analyzeCycleExitDivergence(*OuterDivCycle);
}

template <typename ContextT>
void GenericUniformityAnalysisImpl<ContextT>::taintAndPushAllDefs(
    const BlockT &BB) {
  LLVM_DEBUG(dbgs() << "taintAndPushAllDefs " << Context.print(&BB) << "\n");
  for (const auto &I : instrs(BB)) {
    // Terminators do not produce values; they are divergent only if
    // the condition is divergent. That is handled when the divergent
    // condition is placed in the worklist.
    if (I.isTerminator())
      break;

    markDivergent(I);
  }
}

/// Mark divergent phi nodes in a join block
template <typename ContextT>
void GenericUniformityAnalysisImpl<ContextT>::taintAndPushPhiNodes(
    const BlockT &JoinBlock) {
  LLVM_DEBUG(dbgs() << "taintAndPushPhiNodes in " << Context.print(&JoinBlock)
                    << "\n");
  for (const auto &Phi : JoinBlock.phis()) {
    // FIXME: The non-undef value is not constant per se; it just happens to be
    // uniform and may not dominate this PHI. So assuming that the same value
    // reaches along all incoming edges may itself be undefined behaviour. This
    // particular interpretation of the undef value was added to
    // DivergenceAnalysis in the following review:
    //
    // https://reviews.llvm.org/D19013
    if (ContextT::isConstantOrUndefValuePhi(Phi))
      continue;
    markDivergent(Phi);
  }
}

/// Add \p Candidate to \p Cycles if it is not already contained in \p Cycles.
///
/// \return true iff \p Candidate was added to \p Cycles.
template <typename CycleT>
static bool insertIfNotContained(SmallVector<CycleT *> &Cycles,
                                 CycleT *Candidate) {
  if (llvm::any_of(Cycles,
                   [Candidate](CycleT *C) { return C->contains(Candidate); }))
    return false;
  Cycles.push_back(Candidate);
  return true;
}

/// Return the outermost cycle made divergent by branch outside it.
///
/// If two paths that diverged outside an irreducible cycle join
/// inside that cycle, then that whole cycle is assumed to be
/// divergent. This does not apply if the cycle is reducible.
template <typename CycleT, typename BlockT>
static const CycleT *getExtDivCycle(const CycleT *Cycle,
                                    const BlockT *DivTermBlock,
                                    const BlockT *JoinBlock) {
  assert(Cycle);
  assert(Cycle->contains(JoinBlock));

  if (Cycle->contains(DivTermBlock))
    return nullptr;

  if (Cycle->isReducible()) {
    assert(Cycle->getHeader() == JoinBlock);
    return nullptr;
  }

  const auto *Parent = Cycle->getParentCycle();
  while (Parent && !Parent->contains(DivTermBlock)) {
    // If the join is inside a child, then the parent must be
    // irreducible. The only join in a reducible cyle is its own
    // header.
    assert(!Parent->isReducible());
    Cycle = Parent;
    Parent = Cycle->getParentCycle();
  }

  LLVM_DEBUG(dbgs() << "cycle made divergent by external branch\n");
  return Cycle;
}

/// Return the outermost cycle made divergent by branch inside it.
///
/// This checks the "diverged entry" criterion defined in the
/// docs/ConvergenceAnalysis.html.
template <typename ContextT, typename CycleT, typename BlockT,
          typename DominatorTreeT>
static const CycleT *
getIntDivCycle(const CycleT *Cycle, const BlockT *DivTermBlock,
               const BlockT *JoinBlock, const DominatorTreeT &DT,
               ContextT &Context) {
  LLVM_DEBUG(dbgs() << "examine join " << Context.print(JoinBlock)
                    << "for internal branch " << Context.print(DivTermBlock)
                    << "\n");
  if (DT.properlyDominates(DivTermBlock, JoinBlock))
    return nullptr;

  // Find the smallest common cycle, if one exists.
  assert(Cycle && Cycle->contains(JoinBlock));
  while (Cycle && !Cycle->contains(DivTermBlock)) {
    Cycle = Cycle->getParentCycle();
  }
  if (!Cycle || Cycle->isReducible())
    return nullptr;

  if (DT.properlyDominates(Cycle->getHeader(), JoinBlock))
    return nullptr;

  LLVM_DEBUG(dbgs() << "  header " << Context.print(Cycle->getHeader())
                    << " does not dominate join\n");

  const auto *Parent = Cycle->getParentCycle();
  while (Parent && !DT.properlyDominates(Parent->getHeader(), JoinBlock)) {
    LLVM_DEBUG(dbgs() << "  header " << Context.print(Parent->getHeader())
                      << " does not dominate join\n");
    Cycle = Parent;
    Parent = Parent->getParentCycle();
  }

  LLVM_DEBUG(dbgs() << "  cycle made divergent by internal branch\n");
  return Cycle;
}

template <typename ContextT, typename CycleT, typename BlockT,
          typename DominatorTreeT>
static const CycleT *
getOutermostDivergentCycle(const CycleT *Cycle, const BlockT *DivTermBlock,
                           const BlockT *JoinBlock, const DominatorTreeT &DT,
                           ContextT &Context) {
  if (!Cycle)
    return nullptr;

  // First try to expand Cycle to the largest that contains JoinBlock
  // but not DivTermBlock.
  const auto *Ext = getExtDivCycle(Cycle, DivTermBlock, JoinBlock);

  // Continue expanding to the largest cycle that contains both.
  const auto *Int = getIntDivCycle(Cycle, DivTermBlock, JoinBlock, DT, Context);

  if (Int)
    return Int;
  return Ext;
}

template <typename ContextT>
bool GenericUniformityAnalysisImpl<ContextT>::isTemporalDivergent(
    const BlockT &ObservingBlock, const InstructionT &Def) const {
  const BlockT *DefBlock = Def.getParent();
  for (const CycleT *Cycle = CI.getCycle(DefBlock);
       Cycle && !Cycle->contains(&ObservingBlock);
       Cycle = Cycle->getParentCycle()) {
    if (DivergentExitCycles.contains(Cycle)) {
      return true;
    }
  }
  return false;
}

template <typename ContextT>
void GenericUniformityAnalysisImpl<ContextT>::analyzeControlDivergence(
    const InstructionT &Term) {
  const auto *DivTermBlock = Term.getParent();
  DivergentTermBlocks.insert(DivTermBlock);
  LLVM_DEBUG(dbgs() << "analyzeControlDiv " << Context.print(DivTermBlock)
                    << "\n");

  // Don't propagate divergence from unreachable blocks.
  if (!DT.isReachableFromEntry(DivTermBlock))
    return;

  const auto &DivDesc = SDA.getJoinBlocks(DivTermBlock);
  SmallVector<const CycleT *> DivCycles;

  // Iterate over all blocks now reachable by a disjoint path join
  for (const auto *JoinBlock : DivDesc.JoinDivBlocks) {
    const auto *Cycle = CI.getCycle(JoinBlock);
    LLVM_DEBUG(dbgs() << "visiting join block " << Context.print(JoinBlock)
                      << "\n");
    if (const auto *Outermost = getOutermostDivergentCycle(
            Cycle, DivTermBlock, JoinBlock, DT, Context)) {
      LLVM_DEBUG(dbgs() << "found divergent cycle\n");
      DivCycles.push_back(Outermost);
      continue;
    }
    taintAndPushPhiNodes(*JoinBlock);
  }

  // Sort by order of decreasing depth. This allows later cycles to be skipped
  // because they are already contained in earlier ones.
  llvm::sort(DivCycles, [](const CycleT *A, const CycleT *B) {
    return A->getDepth() > B->getDepth();
  });

  // Cycles that are assumed divergent due to the diverged entry
  // criterion potentially contain temporal divergence depending on
  // the DFS chosen. Conservatively, all values produced in such a
  // cycle are assumed divergent. "Cycle invariant" values may be
  // assumed uniform, but that requires further analysis.
  for (auto *C : DivCycles) {
    if (!insertIfNotContained(AssumedDivergent, C))
      continue;
    LLVM_DEBUG(dbgs() << "process divergent cycle\n");
    for (const BlockT *BB : C->blocks()) {
      taintAndPushAllDefs(*BB);
    }
  }

  const auto *BranchCycle = CI.getCycle(DivTermBlock);
  assert(DivDesc.CycleDivBlocks.empty() || BranchCycle);
  for (const auto *DivExitBlock : DivDesc.CycleDivBlocks) {
    propagateCycleExitDivergence(*DivExitBlock, *BranchCycle);
  }
}

template <typename ContextT>
void GenericUniformityAnalysisImpl<ContextT>::compute() {
  // Initialize worklist.
  auto DivValuesCopy = DivergentValues;
  for (const auto DivVal : DivValuesCopy) {
    assert(isDivergent(DivVal) && "Worklist invariant violated!");
    pushUsers(DivVal);
  }

  // All values on the Worklist are divergent.
  // Their users may not have been updated yet.
  while (!Worklist.empty()) {
    const InstructionT *I = Worklist.back();
    Worklist.pop_back();

    LLVM_DEBUG(dbgs() << "worklist pop: " << Context.print(I) << "\n");

    if (I->isTerminator()) {
      analyzeControlDivergence(*I);
      continue;
    }

    // propagate value divergence to users
    assert(isDivergent(*I) && "Worklist invariant violated!");
    pushUsers(*I);
  }
}

template <typename ContextT>
bool GenericUniformityAnalysisImpl<ContextT>::isAlwaysUniform(
    const InstructionT &Instr) const {
  return UniformOverrides.contains(&Instr);
}

template <typename ContextT>
GenericUniformityInfo<ContextT>::GenericUniformityInfo(
    FunctionT &Func, const DominatorTreeT &DT, const CycleInfoT &CI,
    const TargetTransformInfo *TTI)
    : F(&Func) {
  DA.reset(new ImplT{Func, DT, CI, TTI});
}

template <typename ContextT>
void GenericUniformityAnalysisImpl<ContextT>::print(raw_ostream &OS) const {
  bool haveDivergentArgs = false;

  // Control flow instructions may be divergent even if their inputs are
  // uniform. Thus, although exceedingly rare, it is possible to have a program
  // with no divergent values but with divergent control structures.
  if (DivergentValues.empty() && DivergentTermBlocks.empty() &&
      DivergentExitCycles.empty()) {
    OS << "ALL VALUES UNIFORM\n";
    return;
  }

  for (const auto &entry : DivergentValues) {
    const BlockT *parent = Context.getDefBlock(entry);
    if (!parent) {
      if (!haveDivergentArgs) {
        OS << "DIVERGENT ARGUMENTS:\n";
        haveDivergentArgs = true;
      }
      OS << "  DIVERGENT: " << Context.print(entry) << '\n';
    }
  }

  if (!AssumedDivergent.empty()) {
    OS << "CYCLES ASSSUMED DIVERGENT:\n";
    for (const CycleT *cycle : AssumedDivergent) {
      OS << "  " << cycle->print(Context) << '\n';
    }
  }

  if (!DivergentExitCycles.empty()) {
    OS << "CYCLES WITH DIVERGENT EXIT:\n";
    for (const CycleT *cycle : DivergentExitCycles) {
      OS << "  " << cycle->print(Context) << '\n';
    }
  }

  for (auto &block : F) {
    OS << "\nBLOCK " << Context.print(&block) << '\n';

    OS << "DEFINITIONS\n";
    SmallVector<ConstValueRefT, 16> defs;
    Context.appendBlockDefs(defs, block);
    for (auto value : defs) {
      if (isDivergent(value))
        OS << "  DIVERGENT: ";
      else
        OS << "             ";
      OS << Context.print(value) << '\n';
    }

    OS << "TERMINATORS\n";
    SmallVector<const InstructionT *, 8> terms;
    Context.appendBlockTerms(terms, block);
    bool divergentTerminators = hasDivergentTerminator(block);
    for (auto *T : terms) {
      if (divergentTerminators)
        OS << "  DIVERGENT: ";
      else
        OS << "             ";
      OS << Context.print(T) << '\n';
    }

    OS << "END BLOCK\n";
  }
}

template <typename ContextT>
bool GenericUniformityInfo<ContextT>::hasDivergence() const {
  return DA->hasDivergence();
}

/// Whether \p V is divergent at its definition.
template <typename ContextT>
bool GenericUniformityInfo<ContextT>::isDivergent(ConstValueRefT V) const {
  return DA->isDivergent(V);
}

template <typename ContextT>
bool GenericUniformityInfo<ContextT>::isDivergent(const InstructionT *I) const {
  return DA->isDivergent(*I);
}

template <typename ContextT>
bool GenericUniformityInfo<ContextT>::isDivergentUse(const UseT &U) const {
  return DA->isDivergentUse(U);
}

template <typename ContextT>
bool GenericUniformityInfo<ContextT>::hasDivergentTerminator(const BlockT &B) {
  return DA->hasDivergentTerminator(B);
}

/// \brief T helper function for printing.
template <typename ContextT>
void GenericUniformityInfo<ContextT>::print(raw_ostream &out) const {
  DA->print(out);
}

template <typename ContextT>
void llvm::ModifiedPostOrder<ContextT>::computeStackPO(
    SmallVectorImpl<BlockT *> &Stack, const CycleInfoT &CI, const CycleT *Cycle,
    SmallPtrSetImpl<BlockT *> &Finalized) {
  LLVM_DEBUG(dbgs() << "inside computeStackPO\n");
  while (!Stack.empty()) {
    auto *NextBB = Stack.back();
    if (Finalized.count(NextBB)) {
      Stack.pop_back();
      continue;
    }
    LLVM_DEBUG(dbgs() << "  visiting " << CI.getSSAContext().print(NextBB)
                      << "\n");
    auto *NestedCycle = CI.getCycle(NextBB);
    if (Cycle != NestedCycle && (!Cycle || Cycle->contains(NestedCycle))) {
      LLVM_DEBUG(dbgs() << "  found a cycle\n");
      while (NestedCycle->getParentCycle() != Cycle)
        NestedCycle = NestedCycle->getParentCycle();

      SmallVector<BlockT *, 3> NestedExits;
      NestedCycle->getExitBlocks(NestedExits);
      bool PushedNodes = false;
      for (auto *NestedExitBB : NestedExits) {
        LLVM_DEBUG(dbgs() << "  examine exit: "
                          << CI.getSSAContext().print(NestedExitBB) << "\n");
        if (Cycle && !Cycle->contains(NestedExitBB))
          continue;
        if (Finalized.count(NestedExitBB))
          continue;
        PushedNodes = true;
        Stack.push_back(NestedExitBB);
        LLVM_DEBUG(dbgs() << "  pushed exit: "
                          << CI.getSSAContext().print(NestedExitBB) << "\n");
      }
      if (!PushedNodes) {
        // All loop exits finalized -> finish this node
        Stack.pop_back();
        computeCyclePO(CI, NestedCycle, Finalized);
      }
      continue;
    }

    LLVM_DEBUG(dbgs() << "  no nested cycle, going into DAG\n");
    // DAG-style
    bool PushedNodes = false;
    for (auto *SuccBB : successors(NextBB)) {
      LLVM_DEBUG(dbgs() << "  examine succ: "
                        << CI.getSSAContext().print(SuccBB) << "\n");
      if (Cycle && !Cycle->contains(SuccBB))
        continue;
      if (Finalized.count(SuccBB))
        continue;
      PushedNodes = true;
      Stack.push_back(SuccBB);
      LLVM_DEBUG(dbgs() << "  pushed succ: " << CI.getSSAContext().print(SuccBB)
                        << "\n");
    }
    if (!PushedNodes) {
      // Never push nodes twice
      LLVM_DEBUG(dbgs() << "  finishing node: "
                        << CI.getSSAContext().print(NextBB) << "\n");
      Stack.pop_back();
      Finalized.insert(NextBB);
      appendBlock(*NextBB);
    }
  }
  LLVM_DEBUG(dbgs() << "exited computeStackPO\n");
}

template <typename ContextT>
void ModifiedPostOrder<ContextT>::computeCyclePO(
    const CycleInfoT &CI, const CycleT *Cycle,
    SmallPtrSetImpl<BlockT *> &Finalized) {
  LLVM_DEBUG(dbgs() << "inside computeCyclePO\n");
  SmallVector<BlockT *> Stack;
  auto *CycleHeader = Cycle->getHeader();

  LLVM_DEBUG(dbgs() << "  noted header: "
                    << CI.getSSAContext().print(CycleHeader) << "\n");
  assert(!Finalized.count(CycleHeader));
  Finalized.insert(CycleHeader);

  // Visit the header last
  LLVM_DEBUG(dbgs() << "  finishing header: "
                    << CI.getSSAContext().print(CycleHeader) << "\n");
  appendBlock(*CycleHeader, Cycle->isReducible());

  // Initialize with immediate successors
  for (auto *BB : successors(CycleHeader)) {
    LLVM_DEBUG(dbgs() << "  examine succ: " << CI.getSSAContext().print(BB)
                      << "\n");
    if (!Cycle->contains(BB))
      continue;
    if (BB == CycleHeader)
      continue;
    if (!Finalized.count(BB)) {
      LLVM_DEBUG(dbgs() << "  pushed succ: " << CI.getSSAContext().print(BB)
                        << "\n");
      Stack.push_back(BB);
    }
  }

  // Compute PO inside region
  computeStackPO(Stack, CI, Cycle, Finalized);

  LLVM_DEBUG(dbgs() << "exited computeCyclePO\n");
}

/// \brief Generically compute the modified post order.
template <typename ContextT>
void llvm::ModifiedPostOrder<ContextT>::compute(const CycleInfoT &CI) {
  SmallPtrSet<BlockT *, 32> Finalized;
  SmallVector<BlockT *> Stack;
  auto *F = CI.getFunction();
  Stack.reserve(24); // FIXME made-up number
  Stack.push_back(GraphTraits<FunctionT *>::getEntryNode(F));
  computeStackPO(Stack, CI, nullptr, Finalized);
}

} // namespace llvm

#undef DEBUG_TYPE

#endif // LLVM_ADT_GENERICUNIFORMITYIMPL_H
PKiwFZȌ((ADT/PointerIntPair.hnu�[���//===- llvm/ADT/PointerIntPair.h - Pair for pointer and int -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file defines the PointerIntPair class.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_POINTERINTPAIR_H
#define LLVM_ADT_POINTERINTPAIR_H

#include "llvm/Support/Compiler.h"
#include "llvm/Support/PointerLikeTypeTraits.h"
#include "llvm/Support/type_traits.h"
#include <cassert>
#include <cstdint>
#include <cstring>
#include <limits>

namespace llvm {

namespace detail {
template <typename Ptr> struct PunnedPointer {
  static_assert(sizeof(Ptr) == sizeof(intptr_t), "");

  // Asserts that allow us to let the compiler implement the destructor and
  // copy/move constructors
  static_assert(std::is_trivially_destructible<Ptr>::value, "");
  static_assert(std::is_trivially_copy_constructible<Ptr>::value, "");
  static_assert(std::is_trivially_move_constructible<Ptr>::value, "");

  explicit constexpr PunnedPointer(intptr_t i = 0) { *this = i; }

  constexpr intptr_t asInt() const {
    intptr_t R = 0;
    std::memcpy(&R, Data, sizeof(R));
    return R;
  }

  constexpr operator intptr_t() const { return asInt(); }

  constexpr PunnedPointer &operator=(intptr_t V) {
    std::memcpy(Data, &V, sizeof(Data));
    return *this;
  }

  Ptr *getPointerAddress() { return reinterpret_cast<Ptr *>(Data); }
  const Ptr *getPointerAddress() const { return reinterpret_cast<Ptr *>(Data); }

private:
  alignas(Ptr) unsigned char Data[sizeof(Ptr)];
};
} // namespace detail

template <typename T, typename Enable> struct DenseMapInfo;
template <typename PointerT, unsigned IntBits, typename PtrTraits>
struct PointerIntPairInfo;

/// PointerIntPair - This class implements a pair of a pointer and small
/// integer.  It is designed to represent this in the space required by one
/// pointer by bitmangling the integer into the low part of the pointer.  This
/// can only be done for small integers: typically up to 3 bits, but it depends
/// on the number of bits available according to PointerLikeTypeTraits for the
/// type.
///
/// Note that PointerIntPair always puts the IntVal part in the highest bits
/// possible.  For example, PointerIntPair<void*, 1, bool> will put the bit for
/// the bool into bit #2, not bit #0, which allows the low two bits to be used
/// for something else.  For example, this allows:
///   PointerIntPair<PointerIntPair<void*, 1, bool>, 1, bool>
/// ... and the two bools will land in different bits.
template <typename PointerTy, unsigned IntBits, typename IntType = unsigned,
          typename PtrTraits = PointerLikeTypeTraits<PointerTy>,
          typename Info = PointerIntPairInfo<PointerTy, IntBits, PtrTraits>>
class PointerIntPair {
  // Used by MSVC visualizer and generally helpful for debugging/visualizing.
  using InfoTy = Info;
  detail::PunnedPointer<PointerTy> Value;

public:
  constexpr PointerIntPair() = default;

  PointerIntPair(PointerTy PtrVal, IntType IntVal) {
    setPointerAndInt(PtrVal, IntVal);
  }

  explicit PointerIntPair(PointerTy PtrVal) { initWithPointer(PtrVal); }

  PointerTy getPointer() const { return Info::getPointer(Value); }

  IntType getInt() const { return (IntType)Info::getInt(Value); }

  void setPointer(PointerTy PtrVal) & {
    Value = Info::updatePointer(Value, PtrVal);
  }

  void setInt(IntType IntVal) & {
    Value = Info::updateInt(Value, static_cast<intptr_t>(IntVal));
  }

  void initWithPointer(PointerTy PtrVal) & {
    Value = Info::updatePointer(0, PtrVal);
  }

  void setPointerAndInt(PointerTy PtrVal, IntType IntVal) & {
    Value = Info::updateInt(Info::updatePointer(0, PtrVal),
                            static_cast<intptr_t>(IntVal));
  }

  PointerTy const *getAddrOfPointer() const {
    return const_cast<PointerIntPair *>(this)->getAddrOfPointer();
  }

  PointerTy *getAddrOfPointer() {
    assert(Value == reinterpret_cast<intptr_t>(getPointer()) &&
           "Can only return the address if IntBits is cleared and "
           "PtrTraits doesn't change the pointer");
    return Value.getPointerAddress();
  }

  void *getOpaqueValue() const {
    return reinterpret_cast<void *>(Value.asInt());
  }

  void setFromOpaqueValue(void *Val) & {
    Value = reinterpret_cast<intptr_t>(Val);
  }

  static PointerIntPair getFromOpaqueValue(void *V) {
    PointerIntPair P;
    P.setFromOpaqueValue(V);
    return P;
  }

  // Allow PointerIntPairs to be created from const void * if and only if the
  // pointer type could be created from a const void *.
  static PointerIntPair getFromOpaqueValue(const void *V) {
    (void)PtrTraits::getFromVoidPointer(V);
    return getFromOpaqueValue(const_cast<void *>(V));
  }

  bool operator==(const PointerIntPair &RHS) const {
    return Value == RHS.Value;
  }

  bool operator!=(const PointerIntPair &RHS) const {
    return Value != RHS.Value;
  }

  bool operator<(const PointerIntPair &RHS) const { return Value < RHS.Value; }
  bool operator>(const PointerIntPair &RHS) const { return Value > RHS.Value; }

  bool operator<=(const PointerIntPair &RHS) const {
    return Value <= RHS.Value;
  }

  bool operator>=(const PointerIntPair &RHS) const {
    return Value >= RHS.Value;
  }
};

template <typename PointerT, unsigned IntBits, typename PtrTraits>
struct PointerIntPairInfo {
  static_assert(PtrTraits::NumLowBitsAvailable <
                    std::numeric_limits<uintptr_t>::digits,
                "cannot use a pointer type that has all bits free");
  static_assert(IntBits <= PtrTraits::NumLowBitsAvailable,
                "PointerIntPair with integer size too large for pointer");
  enum MaskAndShiftConstants : uintptr_t {
    /// PointerBitMask - The bits that come from the pointer.
    PointerBitMask =
        ~(uintptr_t)(((intptr_t)1 << PtrTraits::NumLowBitsAvailable) - 1),

    /// IntShift - The number of low bits that we reserve for other uses, and
    /// keep zero.
    IntShift = (uintptr_t)PtrTraits::NumLowBitsAvailable - IntBits,

    /// IntMask - This is the unshifted mask for valid bits of the int type.
    IntMask = (uintptr_t)(((intptr_t)1 << IntBits) - 1),

    // ShiftedIntMask - This is the bits for the integer shifted in place.
    ShiftedIntMask = (uintptr_t)(IntMask << IntShift)
  };

  static PointerT getPointer(intptr_t Value) {
    return PtrTraits::getFromVoidPointer(
        reinterpret_cast<void *>(Value & PointerBitMask));
  }

  static intptr_t getInt(intptr_t Value) {
    return (Value >> IntShift) & IntMask;
  }

  static intptr_t updatePointer(intptr_t OrigValue, PointerT Ptr) {
    intptr_t PtrWord =
        reinterpret_cast<intptr_t>(PtrTraits::getAsVoidPointer(Ptr));
    assert((PtrWord & ~PointerBitMask) == 0 &&
           "Pointer is not sufficiently aligned");
    // Preserve all low bits, just update the pointer.
    return PtrWord | (OrigValue & ~PointerBitMask);
  }

  static intptr_t updateInt(intptr_t OrigValue, intptr_t Int) {
    intptr_t IntWord = static_cast<intptr_t>(Int);
    assert((IntWord & ~IntMask) == 0 && "Integer too large for field");

    // Preserve all bits other than the ones we are updating.
    return (OrigValue & ~ShiftedIntMask) | IntWord << IntShift;
  }
};

// Provide specialization of DenseMapInfo for PointerIntPair.
template <typename PointerTy, unsigned IntBits, typename IntType>
struct DenseMapInfo<PointerIntPair<PointerTy, IntBits, IntType>, void> {
  using Ty = PointerIntPair<PointerTy, IntBits, IntType>;

  static Ty getEmptyKey() {
    uintptr_t Val = static_cast<uintptr_t>(-1);
    Val <<= PointerLikeTypeTraits<Ty>::NumLowBitsAvailable;
    return Ty::getFromOpaqueValue(reinterpret_cast<void *>(Val));
  }

  static Ty getTombstoneKey() {
    uintptr_t Val = static_cast<uintptr_t>(-2);
    Val <<= PointerLikeTypeTraits<PointerTy>::NumLowBitsAvailable;
    return Ty::getFromOpaqueValue(reinterpret_cast<void *>(Val));
  }

  static unsigned getHashValue(Ty V) {
    uintptr_t IV = reinterpret_cast<uintptr_t>(V.getOpaqueValue());
    return unsigned(IV) ^ unsigned(IV >> 9);
  }

  static bool isEqual(const Ty &LHS, const Ty &RHS) { return LHS == RHS; }
};

// Teach SmallPtrSet that PointerIntPair is "basically a pointer".
template <typename PointerTy, unsigned IntBits, typename IntType,
          typename PtrTraits>
struct PointerLikeTypeTraits<
    PointerIntPair<PointerTy, IntBits, IntType, PtrTraits>> {
  static inline void *
  getAsVoidPointer(const PointerIntPair<PointerTy, IntBits, IntType> &P) {
    return P.getOpaqueValue();
  }

  static inline PointerIntPair<PointerTy, IntBits, IntType>
  getFromVoidPointer(void *P) {
    return PointerIntPair<PointerTy, IntBits, IntType>::getFromOpaqueValue(P);
  }

  static inline PointerIntPair<PointerTy, IntBits, IntType>
  getFromVoidPointer(const void *P) {
    return PointerIntPair<PointerTy, IntBits, IntType>::getFromOpaqueValue(P);
  }

  static constexpr int NumLowBitsAvailable =
      PtrTraits::NumLowBitsAvailable - IntBits;
};

// Allow structured bindings on PointerIntPair.
template <std::size_t I, typename PointerTy, unsigned IntBits, typename IntType,
          typename PtrTraits, typename Info>
decltype(auto)
get(const PointerIntPair<PointerTy, IntBits, IntType, PtrTraits, Info> &Pair) {
  static_assert(I < 2);
  if constexpr (I == 0)
    return Pair.getPointer();
  else
    return Pair.getInt();
}

} // end namespace llvm

namespace std {
template <typename PointerTy, unsigned IntBits, typename IntType,
          typename PtrTraits, typename Info>
struct tuple_size<
    llvm::PointerIntPair<PointerTy, IntBits, IntType, PtrTraits, Info>>
    : std::integral_constant<std::size_t, 2> {};

template <std::size_t I, typename PointerTy, unsigned IntBits, typename IntType,
          typename PtrTraits, typename Info>
struct tuple_element<
    I, llvm::PointerIntPair<PointerTy, IntBits, IntType, PtrTraits, Info>>
    : std::conditional<I == 0, PointerTy, IntType> {};
} // namespace std

#endif // LLVM_ADT_POINTERINTPAIR_H
PKiwFZ����m4m4ADT/APSInt.hnu�[���//===-- llvm/ADT/APSInt.h - Arbitrary Precision Signed Int -----*- C++ -*--===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file implements the APSInt class, which is a simple class that
/// represents an arbitrary sized integer that knows its signedness.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_APSINT_H
#define LLVM_ADT_APSINT_H

#include "llvm/ADT/APInt.h"

namespace llvm {

/// An arbitrary precision integer that knows its signedness.
class [[nodiscard]] APSInt : public APInt {
  bool IsUnsigned = false;

public:
  /// Default constructor that creates an uninitialized APInt.
  explicit APSInt() = default;

  /// Create an APSInt with the specified width, default to unsigned.
  explicit APSInt(uint32_t BitWidth, bool isUnsigned = true)
      : APInt(BitWidth, 0), IsUnsigned(isUnsigned) {}

  explicit APSInt(APInt I, bool isUnsigned = true)
      : APInt(std::move(I)), IsUnsigned(isUnsigned) {}

  /// Construct an APSInt from a string representation.
  ///
  /// This constructor interprets the string \p Str using the radix of 10.
  /// The interpretation stops at the end of the string. The bit width of the
  /// constructed APSInt is determined automatically.
  ///
  /// \param Str the string to be interpreted.
  explicit APSInt(StringRef Str);

  /// Determine sign of this APSInt.
  ///
  /// \returns true if this APSInt is negative, false otherwise
  bool isNegative() const { return isSigned() && APInt::isNegative(); }

  /// Determine if this APSInt Value is non-negative (>= 0)
  ///
  /// \returns true if this APSInt is non-negative, false otherwise
  bool isNonNegative() const { return !isNegative(); }

  /// Determine if this APSInt Value is positive.
  ///
  /// This tests if the value of this APSInt is positive (> 0). Note
  /// that 0 is not a positive value.
  ///
  /// \returns true if this APSInt is positive.
  bool isStrictlyPositive() const { return isNonNegative() && !isZero(); }

  APSInt &operator=(APInt RHS) {
    // Retain our current sign.
    APInt::operator=(std::move(RHS));
    return *this;
  }

  APSInt &operator=(uint64_t RHS) {
    // Retain our current sign.
    APInt::operator=(RHS);
    return *this;
  }

  // Query sign information.
  bool isSigned() const { return !IsUnsigned; }
  bool isUnsigned() const { return IsUnsigned; }
  void setIsUnsigned(bool Val) { IsUnsigned = Val; }
  void setIsSigned(bool Val) { IsUnsigned = !Val; }

  /// Append this APSInt to the specified SmallString.
  void toString(SmallVectorImpl<char> &Str, unsigned Radix = 10) const {
    APInt::toString(Str, Radix, isSigned());
  }
  using APInt::toString;

  /// If this int is representable using an int64_t.
  bool isRepresentableByInt64() const {
    // For unsigned values with 64 active bits, they technically fit into a
    // int64_t, but the user may get negative numbers and has to manually cast
    // them to unsigned. Let's not bet the user has the sanity to do that and
    // not give them a vague value at the first place.
    return isSigned() ? isSignedIntN(64) : isIntN(63);
  }

  /// Get the correctly-extended \c int64_t value.
  int64_t getExtValue() const {
    assert(isRepresentableByInt64() && "Too many bits for int64_t");
    return isSigned() ? getSExtValue() : getZExtValue();
  }

  std::optional<int64_t> tryExtValue() const {
    return isRepresentableByInt64() ? std::optional<int64_t>(getExtValue())
                                    : std::nullopt;
  }

  APSInt trunc(uint32_t width) const {
    return APSInt(APInt::trunc(width), IsUnsigned);
  }

  APSInt extend(uint32_t width) const {
    if (IsUnsigned)
      return APSInt(zext(width), IsUnsigned);
    else
      return APSInt(sext(width), IsUnsigned);
  }

  APSInt extOrTrunc(uint32_t width) const {
    if (IsUnsigned)
      return APSInt(zextOrTrunc(width), IsUnsigned);
    else
      return APSInt(sextOrTrunc(width), IsUnsigned);
  }

  const APSInt &operator%=(const APSInt &RHS) {
    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
    if (IsUnsigned)
      *this = urem(RHS);
    else
      *this = srem(RHS);
    return *this;
  }
  const APSInt &operator/=(const APSInt &RHS) {
    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
    if (IsUnsigned)
      *this = udiv(RHS);
    else
      *this = sdiv(RHS);
    return *this;
  }
  APSInt operator%(const APSInt &RHS) const {
    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
    return IsUnsigned ? APSInt(urem(RHS), true) : APSInt(srem(RHS), false);
  }
  APSInt operator/(const APSInt &RHS) const {
    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
    return IsUnsigned ? APSInt(udiv(RHS), true) : APSInt(sdiv(RHS), false);
  }

  APSInt operator>>(unsigned Amt) const {
    return IsUnsigned ? APSInt(lshr(Amt), true) : APSInt(ashr(Amt), false);
  }
  APSInt &operator>>=(unsigned Amt) {
    if (IsUnsigned)
      lshrInPlace(Amt);
    else
      ashrInPlace(Amt);
    return *this;
  }
  APSInt relativeShr(unsigned Amt) const {
    return IsUnsigned ? APSInt(relativeLShr(Amt), true)
                      : APSInt(relativeAShr(Amt), false);
  }

  inline bool operator<(const APSInt &RHS) const {
    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
    return IsUnsigned ? ult(RHS) : slt(RHS);
  }
  inline bool operator>(const APSInt &RHS) const {
    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
    return IsUnsigned ? ugt(RHS) : sgt(RHS);
  }
  inline bool operator<=(const APSInt &RHS) const {
    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
    return IsUnsigned ? ule(RHS) : sle(RHS);
  }
  inline bool operator>=(const APSInt &RHS) const {
    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
    return IsUnsigned ? uge(RHS) : sge(RHS);
  }
  inline bool operator==(const APSInt &RHS) const {
    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
    return eq(RHS);
  }
  inline bool operator!=(const APSInt &RHS) const { return !((*this) == RHS); }

  bool operator==(int64_t RHS) const {
    return compareValues(*this, get(RHS)) == 0;
  }
  bool operator!=(int64_t RHS) const {
    return compareValues(*this, get(RHS)) != 0;
  }
  bool operator<=(int64_t RHS) const {
    return compareValues(*this, get(RHS)) <= 0;
  }
  bool operator>=(int64_t RHS) const {
    return compareValues(*this, get(RHS)) >= 0;
  }
  bool operator<(int64_t RHS) const {
    return compareValues(*this, get(RHS)) < 0;
  }
  bool operator>(int64_t RHS) const {
    return compareValues(*this, get(RHS)) > 0;
  }

  // The remaining operators just wrap the logic of APInt, but retain the
  // signedness information.

  APSInt operator<<(unsigned Bits) const {
    return APSInt(static_cast<const APInt &>(*this) << Bits, IsUnsigned);
  }
  APSInt &operator<<=(unsigned Amt) {
    static_cast<APInt &>(*this) <<= Amt;
    return *this;
  }
  APSInt relativeShl(unsigned Amt) const {
    return IsUnsigned ? APSInt(relativeLShl(Amt), true)
                      : APSInt(relativeAShl(Amt), false);
  }

  APSInt &operator++() {
    ++(static_cast<APInt &>(*this));
    return *this;
  }
  APSInt &operator--() {
    --(static_cast<APInt &>(*this));
    return *this;
  }
  APSInt operator++(int) {
    return APSInt(++static_cast<APInt &>(*this), IsUnsigned);
  }
  APSInt operator--(int) {
    return APSInt(--static_cast<APInt &>(*this), IsUnsigned);
  }
  APSInt operator-() const {
    return APSInt(-static_cast<const APInt &>(*this), IsUnsigned);
  }
  APSInt &operator+=(const APSInt &RHS) {
    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
    static_cast<APInt &>(*this) += RHS;
    return *this;
  }
  APSInt &operator-=(const APSInt &RHS) {
    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
    static_cast<APInt &>(*this) -= RHS;
    return *this;
  }
  APSInt &operator*=(const APSInt &RHS) {
    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
    static_cast<APInt &>(*this) *= RHS;
    return *this;
  }
  APSInt &operator&=(const APSInt &RHS) {
    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
    static_cast<APInt &>(*this) &= RHS;
    return *this;
  }
  APSInt &operator|=(const APSInt &RHS) {
    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
    static_cast<APInt &>(*this) |= RHS;
    return *this;
  }
  APSInt &operator^=(const APSInt &RHS) {
    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
    static_cast<APInt &>(*this) ^= RHS;
    return *this;
  }

  APSInt operator&(const APSInt &RHS) const {
    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
    return APSInt(static_cast<const APInt &>(*this) & RHS, IsUnsigned);
  }

  APSInt operator|(const APSInt &RHS) const {
    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
    return APSInt(static_cast<const APInt &>(*this) | RHS, IsUnsigned);
  }

  APSInt operator^(const APSInt &RHS) const {
    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
    return APSInt(static_cast<const APInt &>(*this) ^ RHS, IsUnsigned);
  }

  APSInt operator*(const APSInt &RHS) const {
    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
    return APSInt(static_cast<const APInt &>(*this) * RHS, IsUnsigned);
  }
  APSInt operator+(const APSInt &RHS) const {
    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
    return APSInt(static_cast<const APInt &>(*this) + RHS, IsUnsigned);
  }
  APSInt operator-(const APSInt &RHS) const {
    assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
    return APSInt(static_cast<const APInt &>(*this) - RHS, IsUnsigned);
  }
  APSInt operator~() const {
    return APSInt(~static_cast<const APInt &>(*this), IsUnsigned);
  }

  /// Return the APSInt representing the maximum integer value with the given
  /// bit width and signedness.
  static APSInt getMaxValue(uint32_t numBits, bool Unsigned) {
    return APSInt(Unsigned ? APInt::getMaxValue(numBits)
                           : APInt::getSignedMaxValue(numBits),
                  Unsigned);
  }

  /// Return the APSInt representing the minimum integer value with the given
  /// bit width and signedness.
  static APSInt getMinValue(uint32_t numBits, bool Unsigned) {
    return APSInt(Unsigned ? APInt::getMinValue(numBits)
                           : APInt::getSignedMinValue(numBits),
                  Unsigned);
  }

  /// Determine if two APSInts have the same value, zero- or
  /// sign-extending as needed.
  static bool isSameValue(const APSInt &I1, const APSInt &I2) {
    return !compareValues(I1, I2);
  }

  /// Compare underlying values of two numbers.
  static int compareValues(const APSInt &I1, const APSInt &I2) {
    if (I1.getBitWidth() == I2.getBitWidth() && I1.isSigned() == I2.isSigned())
      return I1.IsUnsigned ? I1.compare(I2) : I1.compareSigned(I2);

    // Check for a bit-width mismatch.
    if (I1.getBitWidth() > I2.getBitWidth())
      return compareValues(I1, I2.extend(I1.getBitWidth()));
    if (I2.getBitWidth() > I1.getBitWidth())
      return compareValues(I1.extend(I2.getBitWidth()), I2);

    // We have a signedness mismatch. Check for negative values and do an
    // unsigned compare if both are positive.
    if (I1.isSigned()) {
      assert(!I2.isSigned() && "Expected signed mismatch");
      if (I1.isNegative())
        return -1;
    } else {
      assert(I2.isSigned() && "Expected signed mismatch");
      if (I2.isNegative())
        return 1;
    }

    return I1.compare(I2);
  }

  static APSInt get(int64_t X) { return APSInt(APInt(64, X), false); }
  static APSInt getUnsigned(uint64_t X) { return APSInt(APInt(64, X), true); }

  /// Used to insert APSInt objects, or objects that contain APSInt objects,
  /// into FoldingSets.
  void Profile(FoldingSetNodeID &ID) const;
};

inline bool operator==(int64_t V1, const APSInt &V2) { return V2 == V1; }
inline bool operator!=(int64_t V1, const APSInt &V2) { return V2 != V1; }
inline bool operator<=(int64_t V1, const APSInt &V2) { return V2 >= V1; }
inline bool operator>=(int64_t V1, const APSInt &V2) { return V2 <= V1; }
inline bool operator<(int64_t V1, const APSInt &V2) { return V2 > V1; }
inline bool operator>(int64_t V1, const APSInt &V2) { return V2 < V1; }

inline raw_ostream &operator<<(raw_ostream &OS, const APSInt &I) {
  I.print(OS, I.isSigned());
  return OS;
}

/// Provide DenseMapInfo for APSInt, using the DenseMapInfo for APInt.
template <> struct DenseMapInfo<APSInt, void> {
  static inline APSInt getEmptyKey() {
    return APSInt(DenseMapInfo<APInt, void>::getEmptyKey());
  }

  static inline APSInt getTombstoneKey() {
    return APSInt(DenseMapInfo<APInt, void>::getTombstoneKey());
  }

  static unsigned getHashValue(const APSInt &Key) {
    return DenseMapInfo<APInt, void>::getHashValue(Key);
  }

  static bool isEqual(const APSInt &LHS, const APSInt &RHS) {
    return LHS.getBitWidth() == RHS.getBitWidth() &&
           LHS.isUnsigned() == RHS.isUnsigned() && LHS == RHS;
  }
};

} // end namespace llvm

#endif
PKiwFZ����EEADT/ilist_node_options.hnu�[���//===- llvm/ADT/ilist_node_options.h - ilist_node Options -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_ILIST_NODE_OPTIONS_H
#define LLVM_ADT_ILIST_NODE_OPTIONS_H

#include "llvm/Config/abi-breaking.h"

#include <type_traits>

namespace llvm {

template <bool EnableSentinelTracking> class ilist_node_base;
template <bool EnableSentinelTracking> class ilist_base;

/// Option to choose whether to track sentinels.
///
/// This option affects the ABI for the nodes.  When not specified explicitly,
/// the ABI depends on LLVM_ENABLE_ABI_BREAKING_CHECKS.  Specify explicitly to
/// enable \a ilist_node::isSentinel().
template <bool EnableSentinelTracking> struct ilist_sentinel_tracking {};

/// Option to specify a tag for the node type.
///
/// This option allows a single value type to be inserted in multiple lists
/// simultaneously.  See \a ilist_node for usage examples.
template <class Tag> struct ilist_tag {};

namespace ilist_detail {

/// Helper trait for recording whether an option is specified explicitly.
template <bool IsExplicit> struct explicitness {
  static const bool is_explicit = IsExplicit;
};
typedef explicitness<true> is_explicit;
typedef explicitness<false> is_implicit;

/// Check whether an option is valid.
///
/// The steps for adding and enabling a new ilist option include:
/// \li define the option, ilist_foo<Bar>, above;
/// \li add new parameters for Bar to \a ilist_detail::node_options;
/// \li add an extraction meta-function, ilist_detail::extract_foo;
/// \li call extract_foo from \a ilist_detail::compute_node_options and pass it
/// into \a ilist_detail::node_options; and
/// \li specialize \c is_valid_option<ilist_foo<Bar>> to inherit from \c
/// std::true_type to get static assertions passing in \a simple_ilist and \a
/// ilist_node.
template <class Option> struct is_valid_option : std::false_type {};

/// Extract sentinel tracking option.
///
/// Look through \p Options for the \a ilist_sentinel_tracking option, with the
/// default depending on LLVM_ENABLE_ABI_BREAKING_CHECKS.
template <class... Options> struct extract_sentinel_tracking;
template <bool EnableSentinelTracking, class... Options>
struct extract_sentinel_tracking<
    ilist_sentinel_tracking<EnableSentinelTracking>, Options...>
    : std::integral_constant<bool, EnableSentinelTracking>, is_explicit {};
template <class Option1, class... Options>
struct extract_sentinel_tracking<Option1, Options...>
    : extract_sentinel_tracking<Options...> {};
#if LLVM_ENABLE_ABI_BREAKING_CHECKS
template <> struct extract_sentinel_tracking<> : std::true_type, is_implicit {};
#else
template <>
struct extract_sentinel_tracking<> : std::false_type, is_implicit {};
#endif
template <bool EnableSentinelTracking>
struct is_valid_option<ilist_sentinel_tracking<EnableSentinelTracking>>
    : std::true_type {};

/// Extract custom tag option.
///
/// Look through \p Options for the \a ilist_tag option, pulling out the
/// custom tag type, using void as a default.
template <class... Options> struct extract_tag;
template <class Tag, class... Options>
struct extract_tag<ilist_tag<Tag>, Options...> {
  typedef Tag type;
};
template <class Option1, class... Options>
struct extract_tag<Option1, Options...> : extract_tag<Options...> {};
template <> struct extract_tag<> {
  typedef void type;
};
template <class Tag> struct is_valid_option<ilist_tag<Tag>> : std::true_type {};

/// Check whether options are valid.
///
/// The conjunction of \a is_valid_option on each individual option.
template <class... Options> struct check_options;
template <> struct check_options<> : std::true_type {};
template <class Option1, class... Options>
struct check_options<Option1, Options...>
    : std::integral_constant<bool, is_valid_option<Option1>::value &&
                                       check_options<Options...>::value> {};

/// Traits for options for \a ilist_node.
///
/// This is usually computed via \a compute_node_options.
template <class T, bool EnableSentinelTracking, bool IsSentinelTrackingExplicit,
          class TagT>
struct node_options {
  typedef T value_type;
  typedef T *pointer;
  typedef T &reference;
  typedef const T *const_pointer;
  typedef const T &const_reference;

  static const bool enable_sentinel_tracking = EnableSentinelTracking;
  static const bool is_sentinel_tracking_explicit = IsSentinelTrackingExplicit;
  typedef TagT tag;
  typedef ilist_node_base<enable_sentinel_tracking> node_base_type;
  typedef ilist_base<enable_sentinel_tracking> list_base_type;
};

template <class T, class... Options> struct compute_node_options {
  typedef node_options<T, extract_sentinel_tracking<Options...>::value,
                       extract_sentinel_tracking<Options...>::is_explicit,
                       typename extract_tag<Options...>::type>
      type;
};

} // end namespace ilist_detail
} // end namespace llvm

#endif // LLVM_ADT_ILIST_NODE_OPTIONS_H
PKiwFZ�f4��ADT/Optional.hnu�[���//===- Optional.h - Simple variant for passing optional values --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
///  This file provides Optional, a template class modeled in the spirit of
///  OCaml's 'opt' variant.  The idea is to strongly type whether or not
///  a value can be optional.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_OPTIONAL_H
#define LLVM_ADT_OPTIONAL_H

#include <optional>

namespace llvm {
// Legacy alias of llvm::Optional to std::optional.
// FIXME: Remove this after LLVM 16.
template <class T> using Optional = std::optional<T>;
} // namespace llvm

#endif // LLVM_ADT_OPTIONAL_H
PKiwFZ����m'm'ADT/DenseMapInfo.hnu�[���//===- llvm/ADT/DenseMapInfo.h - Type traits for DenseMap -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file defines DenseMapInfo traits for DenseMap.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_DENSEMAPINFO_H
#define LLVM_ADT_DENSEMAPINFO_H

#include <cassert>
#include <cstddef>
#include <cstdint>
#include <tuple>
#include <type_traits>
#include <utility>

namespace llvm {

namespace detail {

/// Simplistic combination of 32-bit hash values into 32-bit hash values.
static inline unsigned combineHashValue(unsigned a, unsigned b) {
  uint64_t key = (uint64_t)a << 32 | (uint64_t)b;
  key += ~(key << 32);
  key ^= (key >> 22);
  key += ~(key << 13);
  key ^= (key >> 8);
  key += (key << 3);
  key ^= (key >> 15);
  key += ~(key << 27);
  key ^= (key >> 31);
  return (unsigned)key;
}

} // end namespace detail

/// An information struct used to provide DenseMap with the various necessary
/// components for a given value type `T`. `Enable` is an optional additional
/// parameter that is used to support SFINAE (generally using std::enable_if_t)
/// in derived DenseMapInfo specializations; in non-SFINAE use cases this should
/// just be `void`.
template<typename T, typename Enable = void>
struct DenseMapInfo {
  //static inline T getEmptyKey();
  //static inline T getTombstoneKey();
  //static unsigned getHashValue(const T &Val);
  //static bool isEqual(const T &LHS, const T &RHS);
};

// Provide DenseMapInfo for all pointers. Come up with sentinel pointer values
// that are aligned to alignof(T) bytes, but try to avoid requiring T to be
// complete. This allows clients to instantiate DenseMap<T*, ...> with forward
// declared key types. Assume that no pointer key type requires more than 4096
// bytes of alignment.
template<typename T>
struct DenseMapInfo<T*> {
  // The following should hold, but it would require T to be complete:
  // static_assert(alignof(T) <= (1 << Log2MaxAlign),
  //               "DenseMap does not support pointer keys requiring more than "
  //               "Log2MaxAlign bits of alignment");
  static constexpr uintptr_t Log2MaxAlign = 12;

  static inline T* getEmptyKey() {
    uintptr_t Val = static_cast<uintptr_t>(-1);
    Val <<= Log2MaxAlign;
    return reinterpret_cast<T*>(Val);
  }

  static inline T* getTombstoneKey() {
    uintptr_t Val = static_cast<uintptr_t>(-2);
    Val <<= Log2MaxAlign;
    return reinterpret_cast<T*>(Val);
  }

  static unsigned getHashValue(const T *PtrVal) {
    return (unsigned((uintptr_t)PtrVal) >> 4) ^
           (unsigned((uintptr_t)PtrVal) >> 9);
  }

  static bool isEqual(const T *LHS, const T *RHS) { return LHS == RHS; }
};

// Provide DenseMapInfo for chars.
template<> struct DenseMapInfo<char> {
  static inline char getEmptyKey() { return ~0; }
  static inline char getTombstoneKey() { return ~0 - 1; }
  static unsigned getHashValue(const char& Val) { return Val * 37U; }

  static bool isEqual(const char &LHS, const char &RHS) {
    return LHS == RHS;
  }
};

// Provide DenseMapInfo for unsigned chars.
template <> struct DenseMapInfo<unsigned char> {
  static inline unsigned char getEmptyKey() { return ~0; }
  static inline unsigned char getTombstoneKey() { return ~0 - 1; }
  static unsigned getHashValue(const unsigned char &Val) { return Val * 37U; }

  static bool isEqual(const unsigned char &LHS, const unsigned char &RHS) {
    return LHS == RHS;
  }
};

// Provide DenseMapInfo for unsigned shorts.
template <> struct DenseMapInfo<unsigned short> {
  static inline unsigned short getEmptyKey() { return 0xFFFF; }
  static inline unsigned short getTombstoneKey() { return 0xFFFF - 1; }
  static unsigned getHashValue(const unsigned short &Val) { return Val * 37U; }

  static bool isEqual(const unsigned short &LHS, const unsigned short &RHS) {
    return LHS == RHS;
  }
};

// Provide DenseMapInfo for unsigned ints.
template<> struct DenseMapInfo<unsigned> {
  static inline unsigned getEmptyKey() { return ~0U; }
  static inline unsigned getTombstoneKey() { return ~0U - 1; }
  static unsigned getHashValue(const unsigned& Val) { return Val * 37U; }

  static bool isEqual(const unsigned& LHS, const unsigned& RHS) {
    return LHS == RHS;
  }
};

// Provide DenseMapInfo for unsigned longs.
template<> struct DenseMapInfo<unsigned long> {
  static inline unsigned long getEmptyKey() { return ~0UL; }
  static inline unsigned long getTombstoneKey() { return ~0UL - 1L; }

  static unsigned getHashValue(const unsigned long& Val) {
    return (unsigned)(Val * 37UL);
  }

  static bool isEqual(const unsigned long& LHS, const unsigned long& RHS) {
    return LHS == RHS;
  }
};

// Provide DenseMapInfo for unsigned long longs.
template<> struct DenseMapInfo<unsigned long long> {
  static inline unsigned long long getEmptyKey() { return ~0ULL; }
  static inline unsigned long long getTombstoneKey() { return ~0ULL - 1ULL; }

  static unsigned getHashValue(const unsigned long long& Val) {
    return (unsigned)(Val * 37ULL);
  }

  static bool isEqual(const unsigned long long& LHS,
                      const unsigned long long& RHS) {
    return LHS == RHS;
  }
};

// Provide DenseMapInfo for shorts.
template <> struct DenseMapInfo<short> {
  static inline short getEmptyKey() { return 0x7FFF; }
  static inline short getTombstoneKey() { return -0x7FFF - 1; }
  static unsigned getHashValue(const short &Val) { return Val * 37U; }
  static bool isEqual(const short &LHS, const short &RHS) { return LHS == RHS; }
};

// Provide DenseMapInfo for ints.
template<> struct DenseMapInfo<int> {
  static inline int getEmptyKey() { return 0x7fffffff; }
  static inline int getTombstoneKey() { return -0x7fffffff - 1; }
  static unsigned getHashValue(const int& Val) { return (unsigned)(Val * 37U); }

  static bool isEqual(const int& LHS, const int& RHS) {
    return LHS == RHS;
  }
};

// Provide DenseMapInfo for longs.
template<> struct DenseMapInfo<long> {
  static inline long getEmptyKey() {
    return (1UL << (sizeof(long) * 8 - 1)) - 1UL;
  }

  static inline long getTombstoneKey() { return getEmptyKey() - 1L; }

  static unsigned getHashValue(const long& Val) {
    return (unsigned)(Val * 37UL);
  }

  static bool isEqual(const long& LHS, const long& RHS) {
    return LHS == RHS;
  }
};

// Provide DenseMapInfo for long longs.
template<> struct DenseMapInfo<long long> {
  static inline long long getEmptyKey() { return 0x7fffffffffffffffLL; }
  static inline long long getTombstoneKey() { return -0x7fffffffffffffffLL-1; }

  static unsigned getHashValue(const long long& Val) {
    return (unsigned)(Val * 37ULL);
  }

  static bool isEqual(const long long& LHS,
                      const long long& RHS) {
    return LHS == RHS;
  }
};

// Provide DenseMapInfo for all pairs whose members have info.
template<typename T, typename U>
struct DenseMapInfo<std::pair<T, U>> {
  using Pair = std::pair<T, U>;
  using FirstInfo = DenseMapInfo<T>;
  using SecondInfo = DenseMapInfo<U>;

  static inline Pair getEmptyKey() {
    return std::make_pair(FirstInfo::getEmptyKey(),
                          SecondInfo::getEmptyKey());
  }

  static inline Pair getTombstoneKey() {
    return std::make_pair(FirstInfo::getTombstoneKey(),
                          SecondInfo::getTombstoneKey());
  }

  static unsigned getHashValue(const Pair& PairVal) {
    return detail::combineHashValue(FirstInfo::getHashValue(PairVal.first),
                                    SecondInfo::getHashValue(PairVal.second));
  }

  // Expose an additional function intended to be used by other
  // specializations of DenseMapInfo without needing to know how
  // to combine hash values manually
  static unsigned getHashValuePiecewise(const T &First, const U &Second) {
    return detail::combineHashValue(FirstInfo::getHashValue(First),
                                    SecondInfo::getHashValue(Second));
  }

  static bool isEqual(const Pair &LHS, const Pair &RHS) {
    return FirstInfo::isEqual(LHS.first, RHS.first) &&
           SecondInfo::isEqual(LHS.second, RHS.second);
  }
};

// Provide DenseMapInfo for all tuples whose members have info.
template <typename... Ts> struct DenseMapInfo<std::tuple<Ts...>> {
  using Tuple = std::tuple<Ts...>;

  static inline Tuple getEmptyKey() {
    return Tuple(DenseMapInfo<Ts>::getEmptyKey()...);
  }

  static inline Tuple getTombstoneKey() {
    return Tuple(DenseMapInfo<Ts>::getTombstoneKey()...);
  }

  template <unsigned I>
  static unsigned getHashValueImpl(const Tuple &values, std::false_type) {
    using EltType = std::tuple_element_t<I, Tuple>;
    std::integral_constant<bool, I + 1 == sizeof...(Ts)> atEnd;
    return detail::combineHashValue(
        DenseMapInfo<EltType>::getHashValue(std::get<I>(values)),
        getHashValueImpl<I + 1>(values, atEnd));
  }

  template <unsigned I>
  static unsigned getHashValueImpl(const Tuple &, std::true_type) {
    return 0;
  }

  static unsigned getHashValue(const std::tuple<Ts...> &values) {
    std::integral_constant<bool, 0 == sizeof...(Ts)> atEnd;
    return getHashValueImpl<0>(values, atEnd);
  }

  template <unsigned I>
  static bool isEqualImpl(const Tuple &lhs, const Tuple &rhs, std::false_type) {
    using EltType = std::tuple_element_t<I, Tuple>;
    std::integral_constant<bool, I + 1 == sizeof...(Ts)> atEnd;
    return DenseMapInfo<EltType>::isEqual(std::get<I>(lhs), std::get<I>(rhs)) &&
           isEqualImpl<I + 1>(lhs, rhs, atEnd);
  }

  template <unsigned I>
  static bool isEqualImpl(const Tuple &, const Tuple &, std::true_type) {
    return true;
  }

  static bool isEqual(const Tuple &lhs, const Tuple &rhs) {
    std::integral_constant<bool, 0 == sizeof...(Ts)> atEnd;
    return isEqualImpl<0>(lhs, rhs, atEnd);
  }
};

} // end namespace llvm

#endif // LLVM_ADT_DENSEMAPINFO_H
PKiwFZ��(܈�ADT/DAGDeltaAlgorithm.hnu�[���//===- DAGDeltaAlgorithm.h - A DAG Minimization Algorithm ------*- C++ -*--===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_DAGDELTAALGORITHM_H
#define LLVM_ADT_DAGDELTAALGORITHM_H

#include <set>
#include <utility>
#include <vector>

namespace llvm {

/// DAGDeltaAlgorithm - Implements a "delta debugging" algorithm for minimizing
/// directed acyclic graphs using a predicate function.
///
/// The result of the algorithm is a subset of the input change set which is
/// guaranteed to satisfy the predicate, assuming that the input set did. For
/// well formed predicates, the result set is guaranteed to be such that
/// removing any single element not required by the dependencies on the other
/// elements would falsify the predicate.
///
/// The DAG should be used to represent dependencies in the changes which are
/// likely to hold across the predicate function. That is, for a particular
/// changeset S and predicate P:
///
///   P(S) => P(S union pred(S))
///
/// The minimization algorithm uses this dependency information to attempt to
/// eagerly prune large subsets of changes. As with \see DeltaAlgorithm, the DAG
/// is not required to satisfy this property, but the algorithm will run
/// substantially fewer tests with appropriate dependencies. \see DeltaAlgorithm
/// for more information on the properties which the predicate function itself
/// should satisfy.
class DAGDeltaAlgorithm {
  virtual void anchor();

public:
  using change_ty = unsigned;
  using edge_ty = std::pair<change_ty, change_ty>;

  // FIXME: Use a decent data structure.
  using changeset_ty = std::set<change_ty>;
  using changesetlist_ty = std::vector<changeset_ty>;

public:
  virtual ~DAGDeltaAlgorithm() = default;

  /// Run - Minimize the DAG formed by the \p Changes vertices and the
  /// \p Dependencies edges by executing \see ExecuteOneTest() on subsets of
  /// changes and returning the smallest set which still satisfies the test
  /// predicate and the input \p Dependencies.
  ///
  /// \param Changes The list of changes.
  ///
  /// \param Dependencies The list of dependencies amongst changes. For each
  /// (x,y) in \p Dependencies, both x and y must be in \p Changes. The
  /// minimization algorithm guarantees that for each tested changed set S,
  /// \f$ x \in S \f$ implies \f$ y \in S \f$. It is an error to have cyclic
  /// dependencies.
  changeset_ty Run(const changeset_ty &Changes,
                   const std::vector<edge_ty> &Dependencies);

  /// UpdatedSearchState - Callback used when the search state changes.
  virtual void UpdatedSearchState(const changeset_ty &Changes,
                                  const changesetlist_ty &Sets,
                                  const changeset_ty &Required) {}

  /// ExecuteOneTest - Execute a single test predicate on the change set \p S.
  virtual bool ExecuteOneTest(const changeset_ty &S) = 0;
};

} // end namespace llvm

#endif // LLVM_ADT_DAGDELTAALGORITHM_H
PKiwFZQ=����ADT/ilist_node_base.hnu�[���//===- llvm/ADT/ilist_node_base.h - Intrusive List Node Base -----*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_ILIST_NODE_BASE_H
#define LLVM_ADT_ILIST_NODE_BASE_H

#include "llvm/ADT/PointerIntPair.h"

namespace llvm {

/// Base class for ilist nodes.
///
/// Optionally tracks whether this node is the sentinel.
template <bool EnableSentinelTracking> class ilist_node_base;

template <> class ilist_node_base<false> {
  ilist_node_base *Prev = nullptr;
  ilist_node_base *Next = nullptr;

public:
  void setPrev(ilist_node_base *Prev) { this->Prev = Prev; }
  void setNext(ilist_node_base *Next) { this->Next = Next; }
  ilist_node_base *getPrev() const { return Prev; }
  ilist_node_base *getNext() const { return Next; }

  bool isKnownSentinel() const { return false; }
  void initializeSentinel() {}
};

template <> class ilist_node_base<true> {
  PointerIntPair<ilist_node_base *, 1> PrevAndSentinel;
  ilist_node_base *Next = nullptr;

public:
  void setPrev(ilist_node_base *Prev) { PrevAndSentinel.setPointer(Prev); }
  void setNext(ilist_node_base *Next) { this->Next = Next; }
  ilist_node_base *getPrev() const { return PrevAndSentinel.getPointer(); }
  ilist_node_base *getNext() const { return Next; }

  bool isSentinel() const { return PrevAndSentinel.getInt(); }
  bool isKnownSentinel() const { return isSentinel(); }
  void initializeSentinel() { PrevAndSentinel.setInt(true); }
};

} // end namespace llvm

#endif // LLVM_ADT_ILIST_NODE_BASE_H
PKiwFZ�o3�$!$!ADT/fallible_iterator.hnu�[���//===--- fallible_iterator.h - Wrapper for fallible iterators ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_FALLIBLE_ITERATOR_H
#define LLVM_ADT_FALLIBLE_ITERATOR_H

#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Error.h"

#include <type_traits>

namespace llvm {

/// A wrapper class for fallible iterators.
///
///   The fallible_iterator template wraps an underlying iterator-like class
/// whose increment and decrement operations are replaced with fallible versions
/// like:
///
///   @code{.cpp}
///   Error inc();
///   Error dec();
///   @endcode
///
///   It produces an interface that is (mostly) compatible with a traditional
/// c++ iterator, including ++ and -- operators that do not fail.
///
///   Instances of the wrapper are constructed with an instance of the
/// underlying iterator and (for non-end iterators) a reference to an Error
/// instance. If the underlying increment/decrement operations fail, the Error
/// is returned via this reference, and the resulting iterator value set to an
/// end-of-range sentinel value. This enables the following loop idiom:
///
///   @code{.cpp}
///   class Archive { // E.g. Potentially malformed on-disk archive
///   public:
///     fallible_iterator<ArchiveChildItr> children_begin(Error &Err);
///     fallible_iterator<ArchiveChildItr> children_end();
///     iterator_range<fallible_iterator<ArchiveChildItr>>
///     children(Error &Err) {
///       return make_range(children_begin(Err), children_end());
///     //...
///   };
///
///   void walk(Archive &A) {
///     Error Err = Error::success();
///     for (auto &C : A.children(Err)) {
///       // Loop body only entered when increment succeeds.
///     }
///     if (Err) {
///       // handle error.
///     }
///   }
///   @endcode
///
///   The wrapper marks the referenced Error as unchecked after each increment
/// and/or decrement operation, and clears the unchecked flag when a non-end
/// value is compared against end (since, by the increment invariant, not being
/// an end value proves that there was no error, and is equivalent to checking
/// that the Error is success). This allows early exits from the loop body
/// without requiring redundant error checks.
template <typename Underlying> class fallible_iterator {
private:
  template <typename T>
  using enable_if_struct_deref_supported = std::enable_if_t<
      !std::is_void<decltype(std::declval<T>().operator->())>::value,
      decltype(std::declval<T>().operator->())>;

public:
  /// Construct a fallible iterator that *cannot* be used as an end-of-range
  /// value.
  ///
  /// A value created by this method can be dereferenced, incremented,
  /// decremented and compared, providing the underlying type supports it.
  ///
  /// The error that is passed in will be initially marked as checked, so if the
  /// iterator is not used at all the Error need not be checked.
  static fallible_iterator itr(Underlying I, Error &Err) {
    (void)!!Err;
    return fallible_iterator(std::move(I), &Err);
  }

  /// Construct a fallible iterator that can be used as an end-of-range value.
  ///
  /// A value created by this method can be dereferenced (if the underlying
  /// value points at a valid value) and compared, but not incremented or
  /// decremented.
  static fallible_iterator end(Underlying I) {
    return fallible_iterator(std::move(I), nullptr);
  }

  /// Forward dereference to the underlying iterator.
  decltype(auto) operator*() { return *I; }

  /// Forward const dereference to the underlying iterator.
  decltype(auto) operator*() const { return *I; }

  /// Forward structure dereference to the underlying iterator (if the
  /// underlying iterator supports it).
  template <typename T = Underlying>
  enable_if_struct_deref_supported<T> operator->() {
    return I.operator->();
  }

  /// Forward const structure dereference to the underlying iterator (if the
  /// underlying iterator supports it).
  template <typename T = Underlying>
  enable_if_struct_deref_supported<const T> operator->() const {
    return I.operator->();
  }

  /// Increment the fallible iterator.
  ///
  /// If the underlying 'inc' operation fails, this will set the Error value
  /// and update this iterator value to point to end-of-range.
  ///
  /// The Error value is marked as needing checking, regardless of whether the
  /// 'inc' operation succeeds or fails.
  fallible_iterator &operator++() {
    assert(getErrPtr() && "Cannot increment end iterator");
    if (auto Err = I.inc())
      handleError(std::move(Err));
    else
      resetCheckedFlag();
    return *this;
  }

  /// Decrement the fallible iterator.
  ///
  /// If the underlying 'dec' operation fails, this will set the Error value
  /// and update this iterator value to point to end-of-range.
  ///
  /// The Error value is marked as needing checking, regardless of whether the
  /// 'dec' operation succeeds or fails.
  fallible_iterator &operator--() {
    assert(getErrPtr() && "Cannot decrement end iterator");
    if (auto Err = I.dec())
      handleError(std::move(Err));
    else
      resetCheckedFlag();
    return *this;
  }

  /// Compare fallible iterators for equality.
  ///
  /// Returns true if both LHS and RHS are end-of-range values, or if both are
  /// non-end-of-range values whose underlying iterator values compare equal.
  ///
  /// If this is a comparison between an end-of-range iterator and a
  /// non-end-of-range iterator, then the Error (referenced by the
  /// non-end-of-range value) is marked as checked: Since all
  /// increment/decrement operations result in an end-of-range value, comparing
  /// false against end-of-range is equivalent to checking that the Error value
  /// is success. This flag management enables early returns from loop bodies
  /// without redundant Error checks.
  friend bool operator==(const fallible_iterator &LHS,
                         const fallible_iterator &RHS) {
    // If both iterators are in the end state they compare
    // equal, regardless of whether either is valid.
    if (LHS.isEnd() && RHS.isEnd())
      return true;

    assert(LHS.isValid() && RHS.isValid() &&
           "Invalid iterators can only be compared against end");

    bool Equal = LHS.I == RHS.I;

    // If the iterators differ and this is a comparison against end then mark
    // the Error as checked.
    if (!Equal) {
      if (LHS.isEnd())
        (void)!!*RHS.getErrPtr();
      else
        (void)!!*LHS.getErrPtr();
    }

    return Equal;
  }

  /// Compare fallible iterators for inequality.
  ///
  /// See notes for operator==.
  friend bool operator!=(const fallible_iterator &LHS,
                         const fallible_iterator &RHS) {
    return !(LHS == RHS);
  }

private:
  fallible_iterator(Underlying I, Error *Err)
      : I(std::move(I)), ErrState(Err, false) {}

  Error *getErrPtr() const { return ErrState.getPointer(); }

  bool isEnd() const { return getErrPtr() == nullptr; }

  bool isValid() const { return !ErrState.getInt(); }

  void handleError(Error Err) {
    *getErrPtr() = std::move(Err);
    ErrState.setPointer(nullptr);
    ErrState.setInt(true);
  }

  void resetCheckedFlag() {
    *getErrPtr() = Error::success();
  }

  Underlying I;
  mutable PointerIntPair<Error *, 1> ErrState;
};

/// Convenience wrapper to make a fallible_iterator value from an instance
/// of an underlying iterator and an Error reference.
template <typename Underlying>
fallible_iterator<Underlying> make_fallible_itr(Underlying I, Error &Err) {
  return fallible_iterator<Underlying>::itr(std::move(I), Err);
}

/// Convenience wrapper to make a fallible_iterator end value from an instance
/// of an underlying iterator.
template <typename Underlying>
fallible_iterator<Underlying> make_fallible_end(Underlying E) {
  return fallible_iterator<Underlying>::end(std::move(E));
}

template <typename Underlying>
iterator_range<fallible_iterator<Underlying>>
make_fallible_range(Underlying I, Underlying E, Error &Err) {
  return make_range(make_fallible_itr(std::move(I), Err),
                    make_fallible_end(std::move(E)));
}

} // end namespace llvm

#endif // LLVM_ADT_FALLIBLE_ITERATOR_H
PKiwFZ�8�d<d<ADT/GenericCycleImpl.hnu�[���//===- GenericCycleImpl.h -------------------------------------*- C++ -*---===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This template implementation resides in a separate file so that it
/// does not get injected into every .cpp file that includes the
/// generic header.
///
/// DO NOT INCLUDE THIS FILE WHEN MERELY USING CYCLEINFO.
///
/// This file should only be included by files that implement a
/// specialization of the relevant templates. Currently these are:
/// - llvm/lib/IR/CycleInfo.cpp
/// - llvm/lib/CodeGen/MachineCycleAnalysis.cpp
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_GENERICCYCLEIMPL_H
#define LLVM_ADT_GENERICCYCLEIMPL_H

#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/GenericCycleInfo.h"

#define DEBUG_TYPE "generic-cycle-impl"

namespace llvm {

template <typename ContextT>
bool GenericCycle<ContextT>::contains(const GenericCycle *C) const {
  if (!C)
    return false;

  if (Depth > C->Depth)
    return false;
  while (Depth < C->Depth)
    C = C->ParentCycle;
  return this == C;
}

template <typename ContextT>
void GenericCycle<ContextT>::getExitBlocks(
    SmallVectorImpl<BlockT *> &TmpStorage) const {
  TmpStorage.clear();

  size_t NumExitBlocks = 0;
  for (BlockT *Block : blocks()) {
    llvm::append_range(TmpStorage, successors(Block));

    for (size_t Idx = NumExitBlocks, End = TmpStorage.size(); Idx < End;
         ++Idx) {
      BlockT *Succ = TmpStorage[Idx];
      if (!contains(Succ)) {
        auto ExitEndIt = TmpStorage.begin() + NumExitBlocks;
        if (std::find(TmpStorage.begin(), ExitEndIt, Succ) == ExitEndIt)
          TmpStorage[NumExitBlocks++] = Succ;
      }
    }

    TmpStorage.resize(NumExitBlocks);
  }
}

template <typename ContextT>
auto GenericCycle<ContextT>::getCyclePreheader() const -> BlockT * {
  BlockT *Predecessor = getCyclePredecessor();
  if (!Predecessor)
    return nullptr;

  assert(isReducible() && "Cycle Predecessor must be in a reducible cycle!");

  if (succ_size(Predecessor) != 1)
    return nullptr;

  // Make sure we are allowed to hoist instructions into the predecessor.
  if (!Predecessor->isLegalToHoistInto())
    return nullptr;

  return Predecessor;
}

template <typename ContextT>
auto GenericCycle<ContextT>::getCyclePredecessor() const -> BlockT * {
  if (!isReducible())
    return nullptr;

  BlockT *Out = nullptr;

  // Loop over the predecessors of the header node...
  BlockT *Header = getHeader();
  for (const auto Pred : predecessors(Header)) {
    if (!contains(Pred)) {
      if (Out && Out != Pred)
        return nullptr;
      Out = Pred;
    }
  }

  return Out;
}

/// \brief Helper class for computing cycle information.
template <typename ContextT> class GenericCycleInfoCompute {
  using BlockT = typename ContextT::BlockT;
  using CycleInfoT = GenericCycleInfo<ContextT>;
  using CycleT = typename CycleInfoT::CycleT;

  CycleInfoT &Info;

  struct DFSInfo {
    unsigned Start = 0; // DFS start; positive if block is found
    unsigned End = 0;   // DFS end

    DFSInfo() = default;
    explicit DFSInfo(unsigned Start) : Start(Start) {}

    /// Whether this node is an ancestor (or equal to) the node \p Other
    /// in the DFS tree.
    bool isAncestorOf(const DFSInfo &Other) const {
      return Start <= Other.Start && Other.End <= End;
    }
  };

  DenseMap<BlockT *, DFSInfo> BlockDFSInfo;
  SmallVector<BlockT *, 8> BlockPreorder;

  GenericCycleInfoCompute(const GenericCycleInfoCompute &) = delete;
  GenericCycleInfoCompute &operator=(const GenericCycleInfoCompute &) = delete;

public:
  GenericCycleInfoCompute(CycleInfoT &Info) : Info(Info) {}

  void run(BlockT *EntryBlock);

  static void updateDepth(CycleT *SubTree);

private:
  void dfs(BlockT *EntryBlock);
};

template <typename ContextT>
auto GenericCycleInfo<ContextT>::getTopLevelParentCycle(BlockT *Block)
    -> CycleT * {
  auto Cycle = BlockMapTopLevel.find(Block);
  if (Cycle != BlockMapTopLevel.end())
    return Cycle->second;

  auto MapIt = BlockMap.find(Block);
  if (MapIt == BlockMap.end())
    return nullptr;

  auto *C = MapIt->second;
  while (C->ParentCycle)
    C = C->ParentCycle;
  BlockMapTopLevel.try_emplace(Block, C);
  return C;
}

template <typename ContextT>
void GenericCycleInfo<ContextT>::moveTopLevelCycleToNewParent(CycleT *NewParent,
                                                              CycleT *Child) {
  assert((!Child->ParentCycle && !NewParent->ParentCycle) &&
         "NewParent and Child must be both top level cycle!\n");
  auto &CurrentContainer =
      Child->ParentCycle ? Child->ParentCycle->Children : TopLevelCycles;
  auto Pos = llvm::find_if(CurrentContainer, [=](const auto &Ptr) -> bool {
    return Child == Ptr.get();
  });
  assert(Pos != CurrentContainer.end());
  NewParent->Children.push_back(std::move(*Pos));
  *Pos = std::move(CurrentContainer.back());
  CurrentContainer.pop_back();
  Child->ParentCycle = NewParent;

  NewParent->Blocks.insert(Child->block_begin(), Child->block_end());

  for (auto &It : BlockMapTopLevel)
    if (It.second == Child)
      It.second = NewParent;
}

/// \brief Main function of the cycle info computations.
template <typename ContextT>
void GenericCycleInfoCompute<ContextT>::run(BlockT *EntryBlock) {
  LLVM_DEBUG(errs() << "Entry block: " << Info.Context.print(EntryBlock)
                    << "\n");
  dfs(EntryBlock);

  SmallVector<BlockT *, 8> Worklist;

  for (BlockT *HeaderCandidate : llvm::reverse(BlockPreorder)) {
    const DFSInfo CandidateInfo = BlockDFSInfo.lookup(HeaderCandidate);

    for (BlockT *Pred : predecessors(HeaderCandidate)) {
      const DFSInfo PredDFSInfo = BlockDFSInfo.lookup(Pred);
      if (CandidateInfo.isAncestorOf(PredDFSInfo))
        Worklist.push_back(Pred);
    }
    if (Worklist.empty()) {
      continue;
    }

    // Found a cycle with the candidate as its header.
    LLVM_DEBUG(errs() << "Found cycle for header: "
                      << Info.Context.print(HeaderCandidate) << "\n");
    std::unique_ptr<CycleT> NewCycle = std::make_unique<CycleT>();
    NewCycle->appendEntry(HeaderCandidate);
    NewCycle->appendBlock(HeaderCandidate);
    Info.BlockMap.try_emplace(HeaderCandidate, NewCycle.get());

    // Helper function to process (non-back-edge) predecessors of a discovered
    // block and either add them to the worklist or recognize that the given
    // block is an additional cycle entry.
    auto ProcessPredecessors = [&](BlockT *Block) {
      LLVM_DEBUG(errs() << "  block " << Info.Context.print(Block) << ": ");

      bool IsEntry = false;
      for (BlockT *Pred : predecessors(Block)) {
        const DFSInfo PredDFSInfo = BlockDFSInfo.lookup(Pred);
        if (CandidateInfo.isAncestorOf(PredDFSInfo)) {
          Worklist.push_back(Pred);
        } else {
          IsEntry = true;
        }
      }
      if (IsEntry) {
        assert(!NewCycle->isEntry(Block));
        LLVM_DEBUG(errs() << "append as entry\n");
        NewCycle->appendEntry(Block);
      } else {
        LLVM_DEBUG(errs() << "append as child\n");
      }
    };

    do {
      BlockT *Block = Worklist.pop_back_val();
      if (Block == HeaderCandidate)
        continue;

      // If the block has already been discovered by some cycle
      // (possibly by ourself), then the outermost cycle containing it
      // should become our child.
      if (auto *BlockParent = Info.getTopLevelParentCycle(Block)) {
        LLVM_DEBUG(errs() << "  block " << Info.Context.print(Block) << ": ");

        if (BlockParent != NewCycle.get()) {
          LLVM_DEBUG(errs()
                     << "discovered child cycle "
                     << Info.Context.print(BlockParent->getHeader()) << "\n");
          // Make BlockParent the child of NewCycle.
          Info.moveTopLevelCycleToNewParent(NewCycle.get(), BlockParent);

          for (auto *ChildEntry : BlockParent->entries())
            ProcessPredecessors(ChildEntry);
        } else {
          LLVM_DEBUG(errs()
                     << "known child cycle "
                     << Info.Context.print(BlockParent->getHeader()) << "\n");
        }
      } else {
        Info.BlockMap.try_emplace(Block, NewCycle.get());
        assert(!is_contained(NewCycle->Blocks, Block));
        NewCycle->Blocks.insert(Block);
        ProcessPredecessors(Block);
        Info.BlockMapTopLevel.try_emplace(Block, NewCycle.get());
      }
    } while (!Worklist.empty());

    Info.TopLevelCycles.push_back(std::move(NewCycle));
  }

  // Fix top-level cycle links and compute cycle depths.
  for (auto *TLC : Info.toplevel_cycles()) {
    LLVM_DEBUG(errs() << "top-level cycle: "
                      << Info.Context.print(TLC->getHeader()) << "\n");

    TLC->ParentCycle = nullptr;
    updateDepth(TLC);
  }
}

/// \brief Recompute depth values of \p SubTree and all descendants.
template <typename ContextT>
void GenericCycleInfoCompute<ContextT>::updateDepth(CycleT *SubTree) {
  for (CycleT *Cycle : depth_first(SubTree))
    Cycle->Depth = Cycle->ParentCycle ? Cycle->ParentCycle->Depth + 1 : 1;
}

/// \brief Compute a DFS of basic blocks starting at the function entry.
///
/// Fills BlockDFSInfo with start/end counters and BlockPreorder.
template <typename ContextT>
void GenericCycleInfoCompute<ContextT>::dfs(BlockT *EntryBlock) {
  SmallVector<unsigned, 8> DFSTreeStack;
  SmallVector<BlockT *, 8> TraverseStack;
  unsigned Counter = 0;
  TraverseStack.emplace_back(EntryBlock);

  do {
    BlockT *Block = TraverseStack.back();
    LLVM_DEBUG(errs() << "DFS visiting block: " << Info.Context.print(Block)
                      << "\n");
    if (!BlockDFSInfo.count(Block)) {
      // We're visiting the block for the first time. Open its DFSInfo, add
      // successors to the traversal stack, and remember the traversal stack
      // depth at which the block was opened, so that we can correctly record
      // its end time.
      LLVM_DEBUG(errs() << "  first encountered at depth "
                        << TraverseStack.size() << "\n");

      DFSTreeStack.emplace_back(TraverseStack.size());
      llvm::append_range(TraverseStack, successors(Block));

      bool Added = BlockDFSInfo.try_emplace(Block, ++Counter).second;
      (void)Added;
      assert(Added);
      BlockPreorder.push_back(Block);
      LLVM_DEBUG(errs() << "  preorder number: " << Counter << "\n");
    } else {
      assert(!DFSTreeStack.empty());
      if (DFSTreeStack.back() == TraverseStack.size()) {
        LLVM_DEBUG(errs() << "  ended at " << Counter << "\n");
        BlockDFSInfo.find(Block)->second.End = Counter;
        DFSTreeStack.pop_back();
      } else {
        LLVM_DEBUG(errs() << "  already done\n");
      }
      TraverseStack.pop_back();
    }
  } while (!TraverseStack.empty());
  assert(DFSTreeStack.empty());

  LLVM_DEBUG(
    errs() << "Preorder:\n";
    for (int i = 0, e = BlockPreorder.size(); i != e; ++i) {
      errs() << "  " << Info.Context.print(BlockPreorder[i]) << ": " << i << "\n";
    }
  );
}

/// \brief Reset the object to its initial state.
template <typename ContextT> void GenericCycleInfo<ContextT>::clear() {
  TopLevelCycles.clear();
  BlockMap.clear();
  BlockMapTopLevel.clear();
}

/// \brief Compute the cycle info for a function.
template <typename ContextT>
void GenericCycleInfo<ContextT>::compute(FunctionT &F) {
  GenericCycleInfoCompute<ContextT> Compute(*this);
  Context.setFunction(F);

  LLVM_DEBUG(errs() << "Computing cycles for function: " << F.getName()
                    << "\n");
  Compute.run(ContextT::getEntryBlock(F));

  assert(validateTree());
}

/// \brief Find the innermost cycle containing a given block.
///
/// \returns the innermost cycle containing \p Block or nullptr if
///          it is not contained in any cycle.
template <typename ContextT>
auto GenericCycleInfo<ContextT>::getCycle(const BlockT *Block) const
    -> CycleT * {
  auto MapIt = BlockMap.find(Block);
  if (MapIt != BlockMap.end())
    return MapIt->second;
  return nullptr;
}

/// \brief get the depth for the cycle which containing a given block.
///
/// \returns the depth for the innermost cycle containing \p Block or 0 if it is
///          not contained in any cycle.
template <typename ContextT>
unsigned GenericCycleInfo<ContextT>::getCycleDepth(const BlockT *Block) const {
  CycleT *Cycle = getCycle(Block);
  if (!Cycle)
    return 0;
  return Cycle->getDepth();
}

#ifndef NDEBUG
/// \brief Validate the internal consistency of the cycle tree.
///
/// Note that this does \em not check that cycles are really cycles in the CFG,
/// or that the right set of cycles in the CFG were found.
template <typename ContextT>
bool GenericCycleInfo<ContextT>::validateTree() const {
  DenseSet<BlockT *> Blocks;
  DenseSet<BlockT *> Entries;

  auto reportError = [](const char *File, int Line, const char *Cond) {
    errs() << File << ':' << Line
           << ": GenericCycleInfo::validateTree: " << Cond << '\n';
  };
#define check(cond)                                                            \
  do {                                                                         \
    if (!(cond)) {                                                             \
      reportError(__FILE__, __LINE__, #cond);                                  \
      return false;                                                            \
    }                                                                          \
  } while (false)

  for (const auto *TLC : toplevel_cycles()) {
    for (const CycleT *Cycle : depth_first(TLC)) {
      if (Cycle->ParentCycle)
        check(is_contained(Cycle->ParentCycle->children(), Cycle));

      for (BlockT *Block : Cycle->Blocks) {
        auto MapIt = BlockMap.find(Block);
        check(MapIt != BlockMap.end());
        check(Cycle->contains(MapIt->second));
        check(Blocks.insert(Block).second); // duplicates in block list?
      }
      Blocks.clear();

      check(!Cycle->Entries.empty());
      for (BlockT *Entry : Cycle->Entries) {
        check(Entries.insert(Entry).second); // duplicate entry?
        check(is_contained(Cycle->Blocks, Entry));
      }
      Entries.clear();

      unsigned ChildDepth = 0;
      for (const CycleT *Child : Cycle->children()) {
        check(Child->Depth > Cycle->Depth);
        if (!ChildDepth) {
          ChildDepth = Child->Depth;
        } else {
          check(ChildDepth == Child->Depth);
        }
      }
    }
  }

  for (const auto &Entry : BlockMap) {
    BlockT *Block = Entry.first;
    for (const CycleT *Cycle = Entry.second; Cycle;
         Cycle = Cycle->ParentCycle) {
      check(is_contained(Cycle->Blocks, Block));
    }
  }

#undef check

  return true;
}
#endif

/// \brief Print the cycle info.
template <typename ContextT>
void GenericCycleInfo<ContextT>::print(raw_ostream &Out) const {
  for (const auto *TLC : toplevel_cycles()) {
    for (const CycleT *Cycle : depth_first(TLC)) {
      for (unsigned I = 0; I < Cycle->Depth; ++I)
        Out << "    ";

      Out << Cycle->print(Context) << '\n';
    }
  }
}

} // namespace llvm

#undef DEBUG_TYPE

#endif // LLVM_ADT_GENERICCYCLEIMPL_H
PKiwFZ�g**ADT/AllocatorList.hnu�[���//===- llvm/ADT/AllocatorList.h - Custom allocator list ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ADT_ALLOCATORLIST_H
#define LLVM_ADT_ALLOCATORLIST_H

#include "llvm/ADT/ilist_node.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/simple_ilist.h"
#include "llvm/Support/Allocator.h"
#include <cassert>
#include <cstddef>
#include <iterator>
#include <type_traits>
#include <utility>

namespace llvm {

/// A linked-list with a custom, local allocator.
///
/// Expose a std::list-like interface that owns and uses a custom LLVM-style
/// allocator (e.g., BumpPtrAllocator), leveraging \a simple_ilist for the
/// implementation details.
///
/// Because this list owns the allocator, calling \a splice() with a different
/// list isn't generally safe.  As such, \a splice has been left out of the
/// interface entirely.
template <class T, class AllocatorT> class AllocatorList : AllocatorT {
  struct Node : ilist_node<Node> {
    Node(Node &&) = delete;
    Node(const Node &) = delete;
    Node &operator=(Node &&) = delete;
    Node &operator=(const Node &) = delete;

    Node(T &&V) : V(std::move(V)) {}
    Node(const T &V) : V(V) {}
    template <class... Ts> Node(Ts &&... Vs) : V(std::forward<Ts>(Vs)...) {}
    T V;
  };

  using list_type = simple_ilist<Node>;

  list_type List;

  AllocatorT &getAlloc() { return *this; }
  const AllocatorT &getAlloc() const { return *this; }

  template <class... ArgTs> Node *create(ArgTs &&... Args) {
    return new (getAlloc()) Node(std::forward<ArgTs>(Args)...);
  }

  struct Cloner {
    AllocatorList &AL;

    Cloner(AllocatorList &AL) : AL(AL) {}

    Node *operator()(const Node &N) const { return AL.create(N.V); }
  };

  struct Disposer {
    AllocatorList &AL;

    Disposer(AllocatorList &AL) : AL(AL) {}

    void operator()(Node *N) const {
      N->~Node();
      AL.getAlloc().Deallocate(N);
    }
  };

public:
  using value_type = T;
  using pointer = T *;
  using reference = T &;
  using const_pointer = const T *;
  using const_reference = const T &;
  using size_type = typename list_type::size_type;
  using difference_type = typename list_type::difference_type;

private:
  template <class ValueT, class IteratorBase>
  class IteratorImpl
      : public iterator_adaptor_base<IteratorImpl<ValueT, IteratorBase>,
                                     IteratorBase,
                                     std::bidirectional_iterator_tag, ValueT> {
    template <class OtherValueT, class OtherIteratorBase>
    friend class IteratorImpl;
    friend AllocatorList;

    using base_type =
        iterator_adaptor_base<IteratorImpl<ValueT, IteratorBase>, IteratorBase,
                              std::bidirectional_iterator_tag, ValueT>;

  public:
    using value_type = ValueT;
    using pointer = ValueT *;
    using reference = ValueT &;

    IteratorImpl() = default;
    IteratorImpl(const IteratorImpl &) = default;
    IteratorImpl &operator=(const IteratorImpl &) = default;

    explicit IteratorImpl(const IteratorBase &I) : base_type(I) {}

    template <class OtherValueT, class OtherIteratorBase>
    IteratorImpl(const IteratorImpl<OtherValueT, OtherIteratorBase> &X,
                 std::enable_if_t<std::is_convertible<
                     OtherIteratorBase, IteratorBase>::value> * = nullptr)
        : base_type(X.wrapped()) {}

    ~IteratorImpl() = default;

    reference operator*() const { return base_type::wrapped()->V; }
    pointer operator->() const { return &operator*(); }
  };

public:
  using iterator = IteratorImpl<T, typename list_type::iterator>;
  using reverse_iterator =
      IteratorImpl<T, typename list_type::reverse_iterator>;
  using const_iterator =
      IteratorImpl<const T, typename list_type::const_iterator>;
  using const_reverse_iterator =
      IteratorImpl<const T, typename list_type::const_reverse_iterator>;

  AllocatorList() = default;
  AllocatorList(AllocatorList &&X)
      : AllocatorT(std::move(X.getAlloc())), List(std::move(X.List)) {}

  AllocatorList(const AllocatorList &X) {
    List.cloneFrom(X.List, Cloner(*this), Disposer(*this));
  }

  AllocatorList &operator=(AllocatorList &&X) {
    clear(); // Dispose of current nodes explicitly.
    List = std::move(X.List);
    getAlloc() = std::move(X.getAlloc());
    return *this;
  }

  AllocatorList &operator=(const AllocatorList &X) {
    List.cloneFrom(X.List, Cloner(*this), Disposer(*this));
    return *this;
  }

  ~AllocatorList() { clear(); }

  void swap(AllocatorList &RHS) {
    List.swap(RHS.List);
    std::swap(getAlloc(), RHS.getAlloc());
  }

  bool empty() { return List.empty(); }
  size_t size() { return List.size(); }

  iterator begin() { return iterator(List.begin()); }
  iterator end() { return iterator(List.end()); }
  const_iterator begin() const { return const_iterator(List.begin()); }
  const_iterator end() const { return const_iterator(List.end()); }
  reverse_iterator rbegin() { return reverse_iterator(List.rbegin()); }
  reverse_iterator rend() { return reverse_iterator(List.rend()); }
  const_reverse_iterator rbegin() const {
    return const_reverse_iterator(List.rbegin());
  }
  const_reverse_iterator rend() const {
    return const_reverse_iterator(List.rend());
  }

  T &back() { return List.back().V; }
  T &front() { return List.front().V; }
  const T &back() const { return List.back().V; }
  const T &front() const { return List.front().V; }

  template <class... Ts> iterator emplace(iterator I, Ts &&... Vs) {
    return iterator(List.insert(I.wrapped(), *create(std::forward<Ts>(Vs)...)));
  }

  iterator insert(iterator I, T &&V) {
    return iterator(List.insert(I.wrapped(), *create(std::move(V))));
  }
  iterator insert(iterator I, const T &V) {
    return iterator(List.insert(I.wrapped(), *create(V)));
  }

  template <class Iterator>
  void insert(iterator I, Iterator First, Iterator Last) {
    for (; First != Last; ++First)
      List.insert(I.wrapped(), *create(*First));
  }

  iterator erase(iterator I) {
    return iterator(List.eraseAndDispose(I.wrapped(), Disposer(*this)));
  }

  iterator erase(iterator First, iterator Last) {
    return iterator(
        List.eraseAndDispose(First.wrapped(), Last.wrapped(), Disposer(*this)));
  }

  void clear() { List.clearAndDispose(Disposer(*this)); }
  void pop_back() { List.eraseAndDispose(--List.end(), Disposer(*this)); }
  void pop_front() { List.eraseAndDispose(List.begin(), Disposer(*this)); }
  void push_back(T &&V) { insert(end(), std::move(V)); }
  void push_front(T &&V) { insert(begin(), std::move(V)); }
  void push_back(const T &V) { insert(end(), V); }
  void push_front(const T &V) { insert(begin(), V); }
  template <class... Ts> void emplace_back(Ts &&... Vs) {
    emplace(end(), std::forward<Ts>(Vs)...);
  }
  template <class... Ts> void emplace_front(Ts &&... Vs) {
    emplace(begin(), std::forward<Ts>(Vs)...);
  }

  /// Reset the underlying allocator.
  ///
  /// \pre \c empty()
  void resetAlloc() {
    assert(empty() && "Cannot reset allocator if not empty");
    getAlloc().Reset();
  }
};

template <class T> using BumpPtrList = AllocatorList<T, BumpPtrAllocator>;

} // end namespace llvm

#endif // LLVM_ADT_ALLOCATORLIST_H
PKiwFZ9:q�DDTarget/TargetPfmCounters.tdnu�[���//===- TargetPfmCounters.td - Target Pfm Counters -*- tablegen ----------*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the target-independent interfaces for performance counters.
//
//===----------------------------------------------------------------------===//

// Definition of a hardware counters from libpfm identifiers.
class PfmCounter<string counter> {
  // The name of the counter that measures events.
  // The name can be "some_counter + some_other_counter", in which case the
  // measured value is the sum of events on these counters.
  string Counter = counter;
}

// Issue counters can be tied to a ProcResource
class PfmIssueCounter<string resource_name, string counter>
    : PfmCounter<counter> {
  // The name of the ProcResource on which uops are issued. This is used by
  // llvm-exegesis to compare measurements with values in the SchedModels.
  // If the CPU has a sched model, this should correspond to the name of a
  // ProcResource.
  string ResourceName = resource_name;
}

def NoPfmCounter : PfmCounter <""> {}

// Set of PfmCounters for measuring sched model characteristics.
class ProcPfmCounters {
  // Processors can define how to measure cycles by defining a CycleCounter.
  PfmCounter CycleCounter = NoPfmCounter;
  // Processors can define how to measure uops by defining a UopsCounter.
  PfmCounter UopsCounter = NoPfmCounter;
  // Processors can define how to measure issued uops by defining IssueCounters.
  list<PfmIssueCounter> IssueCounters = [];
}

// A binding of a set of counters to a CPU.
class PfmCountersBinding<string cpu_name, ProcPfmCounters counters> {
  string CpuName = cpu_name;
  ProcPfmCounters Counters = counters;
}

// Declares the default binding for unbound CPUs for the target.
class PfmCountersDefaultBinding<ProcPfmCounters counters>
    : PfmCountersBinding<"", counters> {}
PKiwFZ.�t�H�HTarget/TargetOptions.hnu�[���//===-- llvm/Target/TargetOptions.h - Target Options ------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines command line option flags that are shared across various
// targets.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TARGET_TARGETOPTIONS_H
#define LLVM_TARGET_TARGETOPTIONS_H

#include "llvm/ADT/FloatingPointMode.h"
#include "llvm/MC/MCTargetOptions.h"

#include <memory>

namespace llvm {
  struct fltSemantics;
  class MachineFunction;
  class MemoryBuffer;

  namespace FloatABI {
    enum ABIType {
      Default, // Target-specific (either soft or hard depending on triple, etc).
      Soft,    // Soft float.
      Hard     // Hard float.
    };
  }

  namespace FPOpFusion {
    enum FPOpFusionMode {
      Fast,     // Enable fusion of FP ops wherever it's profitable.
      Standard, // Only allow fusion of 'blessed' ops (currently just fmuladd).
      Strict    // Never fuse FP-ops.
    };
  }

  namespace JumpTable {
    enum JumpTableType {
      Single,          // Use a single table for all indirect jumptable calls.
      Arity,           // Use one table per number of function parameters.
      Simplified,      // Use one table per function type, with types projected
                       // into 4 types: pointer to non-function, struct,
                       // primitive, and function pointer.
      Full             // Use one table per unique function type
    };
  }

  namespace ThreadModel {
    enum Model {
      POSIX,  // POSIX Threads
      Single  // Single Threaded Environment
    };
  }

  enum class BasicBlockSection {
    All,    // Use Basic Block Sections for all basic blocks.  A section
            // for every basic block can significantly bloat object file sizes.
    List,   // Get list of functions & BBs from a file. Selectively enables
            // basic block sections for a subset of basic blocks which can be
            // used to control object size bloats from creating sections.
    Labels, // Do not use Basic Block Sections but label basic blocks.  This
            // is useful when associating profile counts from virtual addresses
            // to basic blocks.
    Preset, // Similar to list but the blocks are identified by passes which
            // seek to use Basic Block Sections, e.g. MachineFunctionSplitter.
            // This option cannot be set via the command line.
    None    // Do not use Basic Block Sections.
  };

  enum class EABI {
    Unknown,
    Default, // Default means not specified
    EABI4,   // Target-specific (either 4, 5 or gnu depending on triple).
    EABI5,
    GNU
  };

  /// Identify a debugger for "tuning" the debug info.
  ///
  /// The "debugger tuning" concept allows us to present a more intuitive
  /// interface that unpacks into different sets of defaults for the various
  /// individual feature-flag settings, that suit the preferences of the
  /// various debuggers.  However, it's worth remembering that debuggers are
  /// not the only consumers of debug info, and some variations in DWARF might
  /// better be treated as target/platform issues. Fundamentally,
  /// o if the feature is useful (or not) to a particular debugger, regardless
  ///   of the target, that's a tuning decision;
  /// o if the feature is useful (or not) on a particular platform, regardless
  ///   of the debugger, that's a target decision.
  /// It's not impossible to see both factors in some specific case.
  enum class DebuggerKind {
    Default, ///< No specific tuning requested.
    GDB,     ///< Tune debug info for gdb.
    LLDB,    ///< Tune debug info for lldb.
    SCE,     ///< Tune debug info for SCE targets (e.g. PS4).
    DBX      ///< Tune debug info for dbx.
  };

  /// Enable abort calls when global instruction selection fails to lower/select
  /// an instruction.
  enum class GlobalISelAbortMode {
    Disable,        // Disable the abort.
    Enable,         // Enable the abort.
    DisableWithDiag // Disable the abort but emit a diagnostic on failure.
  };

  /// Indicates when and how the Swift async frame pointer bit should be set.
  enum class SwiftAsyncFramePointerMode {
    /// Determine whether to set the bit statically or dynamically based
    /// on the deployment target.
    DeploymentBased,
    /// Always set the bit.
    Always,
    /// Never set the bit.
    Never,
  };

  class TargetOptions {
  public:
    TargetOptions()
        : UnsafeFPMath(false), NoInfsFPMath(false), NoNaNsFPMath(false),
          NoTrappingFPMath(true), NoSignedZerosFPMath(false),
          ApproxFuncFPMath(false), EnableAIXExtendedAltivecABI(false),
          HonorSignDependentRoundingFPMathOption(false), NoZerosInBSS(false),
          GuaranteedTailCallOpt(false), StackSymbolOrdering(true),
          EnableFastISel(false), EnableGlobalISel(false), UseInitArray(false),
          DisableIntegratedAS(false), RelaxELFRelocations(true),
          FunctionSections(false), DataSections(false),
          IgnoreXCOFFVisibility(false), XCOFFTracebackTable(true),
          UniqueSectionNames(true), UniqueBasicBlockSectionNames(false),
          TrapUnreachable(false), NoTrapAfterNoreturn(false), TLSSize(0),
          EmulatedTLS(false), EnableIPRA(false), EmitStackSizeSection(false),
          EnableMachineOutliner(false), EnableMachineFunctionSplitter(false),
          SupportsDefaultOutlining(false), EmitAddrsig(false),
          EmitCallSiteInfo(false), SupportsDebugEntryValues(false),
          EnableDebugEntryValues(false), ValueTrackingVariableLocations(false),
          ForceDwarfFrameSection(false), XRayFunctionIndex(true),
          DebugStrictDwarf(false), Hotpatch(false),
          PPCGenScalarMASSEntries(false), JMCInstrument(false),
          EnableCFIFixup(false), MisExpect(false), XCOFFReadOnlyPointers(false),
          FPDenormalMode(DenormalMode::IEEE, DenormalMode::IEEE) {}

    /// DisableFramePointerElim - This returns true if frame pointer elimination
    /// optimization should be disabled for the given machine function.
    bool DisableFramePointerElim(const MachineFunction &MF) const;

    /// If greater than 0, override the default value of
    /// MCAsmInfo::BinutilsVersion.
    std::pair<int, int> BinutilsVersion{0, 0};

    /// UnsafeFPMath - This flag is enabled when the
    /// -enable-unsafe-fp-math flag is specified on the command line.  When
    /// this flag is off (the default), the code generator is not allowed to
    /// produce results that are "less precise" than IEEE allows.  This includes
    /// use of X86 instructions like FSIN and FCOS instead of libcalls.
    unsigned UnsafeFPMath : 1;

    /// NoInfsFPMath - This flag is enabled when the
    /// -enable-no-infs-fp-math flag is specified on the command line. When
    /// this flag is off (the default), the code generator is not allowed to
    /// assume the FP arithmetic arguments and results are never +-Infs.
    unsigned NoInfsFPMath : 1;

    /// NoNaNsFPMath - This flag is enabled when the
    /// -enable-no-nans-fp-math flag is specified on the command line. When
    /// this flag is off (the default), the code generator is not allowed to
    /// assume the FP arithmetic arguments and results are never NaNs.
    unsigned NoNaNsFPMath : 1;

    /// NoTrappingFPMath - This flag is enabled when the
    /// -enable-no-trapping-fp-math is specified on the command line. This
    /// specifies that there are no trap handlers to handle exceptions.
    unsigned NoTrappingFPMath : 1;

    /// NoSignedZerosFPMath - This flag is enabled when the
    /// -enable-no-signed-zeros-fp-math is specified on the command line. This
    /// specifies that optimizations are allowed to treat the sign of a zero
    /// argument or result as insignificant.
    unsigned NoSignedZerosFPMath : 1;

    /// ApproxFuncFPMath - This flag is enabled when the
    /// -enable-approx-func-fp-math is specified on the command line. This
    /// specifies that optimizations are allowed to substitute math functions
    /// with approximate calculations
    unsigned ApproxFuncFPMath : 1;

    /// EnableAIXExtendedAltivecABI - This flag returns true when -vec-extabi is
    /// specified. The code generator is then able to use both volatile and
    /// nonvolitle vector registers. When false, the code generator only uses
    /// volatile vector registers which is the default setting on AIX.
    unsigned EnableAIXExtendedAltivecABI : 1;

    /// HonorSignDependentRoundingFPMath - This returns true when the
    /// -enable-sign-dependent-rounding-fp-math is specified.  If this returns
    /// false (the default), the code generator is allowed to assume that the
    /// rounding behavior is the default (round-to-zero for all floating point
    /// to integer conversions, and round-to-nearest for all other arithmetic
    /// truncations).  If this is enabled (set to true), the code generator must
    /// assume that the rounding mode may dynamically change.
    unsigned HonorSignDependentRoundingFPMathOption : 1;
    bool HonorSignDependentRoundingFPMath() const;

    /// NoZerosInBSS - By default some codegens place zero-initialized data to
    /// .bss section. This flag disables such behaviour (necessary, e.g. for
    /// crt*.o compiling).
    unsigned NoZerosInBSS : 1;

    /// GuaranteedTailCallOpt - This flag is enabled when -tailcallopt is
    /// specified on the commandline. When the flag is on, participating targets
    /// will perform tail call optimization on all calls which use the fastcc
    /// calling convention and which satisfy certain target-independent
    /// criteria (being at the end of a function, having the same return type
    /// as their parent function, etc.), using an alternate ABI if necessary.
    unsigned GuaranteedTailCallOpt : 1;

    /// StackSymbolOrdering - When true, this will allow CodeGen to order
    /// the local stack symbols (for code size, code locality, or any other
    /// heuristics). When false, the local symbols are left in whatever order
    /// they were generated. Default is true.
    unsigned StackSymbolOrdering : 1;

    /// EnableFastISel - This flag enables fast-path instruction selection
    /// which trades away generated code quality in favor of reducing
    /// compile time.
    unsigned EnableFastISel : 1;

    /// EnableGlobalISel - This flag enables global instruction selection.
    unsigned EnableGlobalISel : 1;

    /// EnableGlobalISelAbort - Control abort behaviour when global instruction
    /// selection fails to lower/select an instruction.
    GlobalISelAbortMode GlobalISelAbort = GlobalISelAbortMode::Enable;

    /// Control when and how the Swift async frame pointer bit should
    /// be set.
    SwiftAsyncFramePointerMode SwiftAsyncFramePointer =
        SwiftAsyncFramePointerMode::Always;

    /// UseInitArray - Use .init_array instead of .ctors for static
    /// constructors.
    unsigned UseInitArray : 1;

    /// Disable the integrated assembler.
    unsigned DisableIntegratedAS : 1;

    /// Compress DWARF debug sections.
    DebugCompressionType CompressDebugSections = DebugCompressionType::None;

    unsigned RelaxELFRelocations : 1;

    /// Emit functions into separate sections.
    unsigned FunctionSections : 1;

    /// Emit data into separate sections.
    unsigned DataSections : 1;

    /// Do not emit visibility attribute for xcoff.
    unsigned IgnoreXCOFFVisibility : 1;

    /// Emit XCOFF traceback table.
    unsigned XCOFFTracebackTable : 1;

    unsigned UniqueSectionNames : 1;

    /// Use unique names for basic block sections.
    unsigned UniqueBasicBlockSectionNames : 1;

    /// Emit target-specific trap instruction for 'unreachable' IR instructions.
    unsigned TrapUnreachable : 1;

    /// Do not emit a trap instruction for 'unreachable' IR instructions behind
    /// noreturn calls, even if TrapUnreachable is true.
    unsigned NoTrapAfterNoreturn : 1;

    /// Bit size of immediate TLS offsets (0 == use the default).
    unsigned TLSSize : 8;

    /// EmulatedTLS - This flag enables emulated TLS model, using emutls
    /// function in the runtime library..
    unsigned EmulatedTLS : 1;

    /// This flag enables InterProcedural Register Allocation (IPRA).
    unsigned EnableIPRA : 1;

    /// Emit section containing metadata on function stack sizes.
    unsigned EmitStackSizeSection : 1;

    /// Enables the MachineOutliner pass.
    unsigned EnableMachineOutliner : 1;

    /// Enables the MachineFunctionSplitter pass.
    unsigned EnableMachineFunctionSplitter : 1;

    /// Set if the target supports default outlining behaviour.
    unsigned SupportsDefaultOutlining : 1;

    /// Emit address-significance table.
    unsigned EmitAddrsig : 1;

    /// Emit basic blocks into separate sections.
    BasicBlockSection BBSections = BasicBlockSection::None;

    /// Memory Buffer that contains information on sampled basic blocks and used
    /// to selectively generate basic block sections.
    std::shared_ptr<MemoryBuffer> BBSectionsFuncListBuf;

    /// The flag enables call site info production. It is used only for debug
    /// info, and it is restricted only to optimized code. This can be used for
    /// something else, so that should be controlled in the frontend.
    unsigned EmitCallSiteInfo : 1;
    /// Set if the target supports the debug entry values by default.
    unsigned SupportsDebugEntryValues : 1;
    /// When set to true, the EnableDebugEntryValues option forces production
    /// of debug entry values even if the target does not officially support
    /// it. Useful for testing purposes only. This flag should never be checked
    /// directly, always use \ref ShouldEmitDebugEntryValues instead.
     unsigned EnableDebugEntryValues : 1;
    /// NOTE: There are targets that still do not support the debug entry values
    /// production.
    bool ShouldEmitDebugEntryValues() const;

    // When set to true, use experimental new debug variable location tracking,
    // which seeks to follow the values of variables rather than their location,
    // post isel.
    unsigned ValueTrackingVariableLocations : 1;

    /// Emit DWARF debug frame section.
    unsigned ForceDwarfFrameSection : 1;

    /// Emit XRay Function Index section
    unsigned XRayFunctionIndex : 1;

    /// When set to true, don't use DWARF extensions in later DWARF versions.
    /// By default, it is set to false.
    unsigned DebugStrictDwarf : 1;

    /// Emit the hotpatch flag in CodeView debug.
    unsigned Hotpatch : 1;

    /// Enables scalar MASS conversions
    unsigned PPCGenScalarMASSEntries : 1;

    /// Enable JustMyCode instrumentation.
    unsigned JMCInstrument : 1;

    /// Enable the CFIFixup pass.
    unsigned EnableCFIFixup : 1;

    /// When set to true, enable MisExpect Diagnostics
    /// By default, it is set to false
    unsigned MisExpect : 1;

    /// When set to true, const objects with relocatable address values are put
    /// into the RO data section.
    unsigned XCOFFReadOnlyPointers : 1;

    /// Name of the stack usage file (i.e., .su file) if user passes
    /// -fstack-usage. If empty, it can be implied that -fstack-usage is not
    /// passed on the command line.
    std::string StackUsageOutput;

    /// If greater than 0, override TargetLoweringBase::PrefLoopAlignment.
    unsigned LoopAlignment = 0;

    /// FloatABIType - This setting is set by -float-abi=xxx option is specfied
    /// on the command line. This setting may either be Default, Soft, or Hard.
    /// Default selects the target's default behavior. Soft selects the ABI for
    /// software floating point, but does not indicate that FP hardware may not
    /// be used. Such a combination is unfortunately popular (e.g.
    /// arm-apple-darwin). Hard presumes that the normal FP ABI is used.
    FloatABI::ABIType FloatABIType = FloatABI::Default;

    /// AllowFPOpFusion - This flag is set by the -fp-contract=xxx option.
    /// This controls the creation of fused FP ops that store intermediate
    /// results in higher precision than IEEE allows (E.g. FMAs).
    ///
    /// Fast mode - allows formation of fused FP ops whenever they're
    /// profitable.
    /// Standard mode - allow fusion only for 'blessed' FP ops. At present the
    /// only blessed op is the fmuladd intrinsic. In the future more blessed ops
    /// may be added.
    /// Strict mode - allow fusion only if/when it can be proven that the excess
    /// precision won't effect the result.
    ///
    /// Note: This option only controls formation of fused ops by the
    /// optimizers.  Fused operations that are explicitly specified (e.g. FMA
    /// via the llvm.fma.* intrinsic) will always be honored, regardless of
    /// the value of this option.
    FPOpFusion::FPOpFusionMode AllowFPOpFusion = FPOpFusion::Standard;

    /// ThreadModel - This flag specifies the type of threading model to assume
    /// for things like atomics
    ThreadModel::Model ThreadModel = ThreadModel::POSIX;

    /// EABIVersion - This flag specifies the EABI version
    EABI EABIVersion = EABI::Default;

    /// Which debugger to tune for.
    DebuggerKind DebuggerTuning = DebuggerKind::Default;

  private:
    /// Flushing mode to assume in default FP environment.
    DenormalMode FPDenormalMode;

    /// Flushing mode to assume in default FP environment, for float/vector of
    /// float.
    DenormalMode FP32DenormalMode;

  public:
    void setFPDenormalMode(DenormalMode Mode) {
      FPDenormalMode = Mode;
    }

    void setFP32DenormalMode(DenormalMode Mode) {
      FP32DenormalMode = Mode;
    }

    DenormalMode getRawFPDenormalMode() const {
      return FPDenormalMode;
    }

    DenormalMode getRawFP32DenormalMode() const {
      return FP32DenormalMode;
    }

    DenormalMode getDenormalMode(const fltSemantics &FPType) const;

    /// What exception model to use
    ExceptionHandling ExceptionModel = ExceptionHandling::None;

    /// Machine level options.
    MCTargetOptions MCOptions;

    /// Stores the filename/path of the final .o/.obj file, to be written in the
    /// debug information. This is used for emitting the CodeView S_OBJNAME
    /// record.
    std::string ObjectFilenameForDebug;
  };

} // End llvm namespace

#endif
PKiwFZ�=z�(�(Target/Target.tdnu�[���//===- Target.td - Target Independent TableGen interface ---*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the target-independent interfaces which should be
// implemented by each target which is using a TableGen based code generator.
//
//===----------------------------------------------------------------------===//

// Include all information about LLVM intrinsics.
include "llvm/IR/Intrinsics.td"

class Predicate; // Forward def

//===----------------------------------------------------------------------===//
// Register file description - These classes are used to fill in the target
// description classes.

class HwMode<string FS, list<Predicate> Ps> {
  // A string representing subtarget features that turn on this HW mode.
  // For example, "+feat1,-feat2" will indicate that the mode is active
  // when "feat1" is enabled and "feat2" is disabled at the same time.
  // Any other features are not checked.
  // When multiple modes are used, they should be mutually exclusive,
  // otherwise the results are unpredictable.
  string Features = FS;

  // A list of predicates that turn on this HW mode.
  list<Predicate> Predicates = Ps;
}

// A special mode recognized by tablegen. This mode is considered active
// when no other mode is active. For targets that do not use specific hw
// modes, this is the only mode.
def DefaultMode : HwMode<"", []>;

// A class used to associate objects with HW modes. It is only intended to
// be used as a base class, where the derived class should contain a member
// "Objects", which is a list of the same length as the list of modes.
// The n-th element on the Objects list will be associated with the n-th
// element on the Modes list.
class HwModeSelect<list<HwMode> Ms> {
  list<HwMode> Modes = Ms;
}

// A common class that implements a counterpart of ValueType, which is
// dependent on a HW mode. This class inherits from ValueType itself,
// which makes it possible to use objects of this class where ValueType
// objects could be used. This is specifically applicable to selection
// patterns.
class ValueTypeByHwMode<list<HwMode> Ms, list<ValueType> Ts>
    : HwModeSelect<Ms>, ValueType<0, 0> {
  // The length of this list must be the same as the length of Ms.
  list<ValueType> Objects = Ts;
}

// A class representing the register size, spill size and spill alignment
// in bits of a register.
class RegInfo<int RS, int SS, int SA> {
  int RegSize = RS;         // Register size in bits.
  int SpillSize = SS;       // Spill slot size in bits.
  int SpillAlignment = SA;  // Spill slot alignment in bits.
}

// The register size/alignment information, parameterized by a HW mode.
class RegInfoByHwMode<list<HwMode> Ms = [], list<RegInfo> Ts = []>
    : HwModeSelect<Ms> {
  // The length of this list must be the same as the length of Ms.
  list<RegInfo> Objects = Ts;
}

// SubRegIndex - Use instances of SubRegIndex to identify subregisters.
class SubRegIndex<int size, int offset = 0> {
  string Namespace = "";

  // Size - Size (in bits) of the sub-registers represented by this index.
  int Size = size;

  // Offset - Offset of the first bit that is part of this sub-register index.
  // Set it to -1 if the same index is used to represent sub-registers that can
  // be at different offsets (for example when using an index to access an
  // element in a register tuple).
  int Offset = offset;

  // ComposedOf - A list of two SubRegIndex instances, [A, B].
  // This indicates that this SubRegIndex is the result of composing A and B.
  // See ComposedSubRegIndex.
  list<SubRegIndex> ComposedOf = [];

  // CoveringSubRegIndices - A list of two or more sub-register indexes that
  // cover this sub-register.
  //
  // This field should normally be left blank as TableGen can infer it.
  //
  // TableGen automatically detects sub-registers that straddle the registers
  // in the SubRegs field of a Register definition. For example:
  //
  //   Q0    = dsub_0 -> D0, dsub_1 -> D1
  //   Q1    = dsub_0 -> D2, dsub_1 -> D3
  //   D1_D2 = dsub_0 -> D1, dsub_1 -> D2
  //   QQ0   = qsub_0 -> Q0, qsub_1 -> Q1
  //
  // TableGen will infer that D1_D2 is a sub-register of QQ0. It will be given
  // the synthetic index dsub_1_dsub_2 unless some SubRegIndex is defined with
  // CoveringSubRegIndices = [dsub_1, dsub_2].
  list<SubRegIndex> CoveringSubRegIndices = [];
}

// ComposedSubRegIndex - A sub-register that is the result of composing A and B.
// Offset is set to the sum of A and B's Offsets. Size is set to B's Size.
class ComposedSubRegIndex<SubRegIndex A, SubRegIndex B>
  : SubRegIndex<B.Size, !cond(!eq(A.Offset, -1): -1,
                              !eq(B.Offset, -1): -1,
                              true:              !add(A.Offset, B.Offset))> {
  // See SubRegIndex.
  let ComposedOf = [A, B];
}

// RegAltNameIndex - The alternate name set to use for register operands of
// this register class when printing.
class RegAltNameIndex {
  string Namespace = "";

  // A set to be used if the name for a register is not defined in this set.
  // This allows creating name sets with only a few alternative names.
  RegAltNameIndex FallbackRegAltNameIndex = ?;
}
def NoRegAltName : RegAltNameIndex;

// Register - You should define one instance of this class for each register
// in the target machine.  String n will become the "name" of the register.
class Register<string n, list<string> altNames = []> {
  string Namespace = "";
  string AsmName = n;
  list<string> AltNames = altNames;

  // Aliases - A list of registers that this register overlaps with.  A read or
  // modification of this register can potentially read or modify the aliased
  // registers.
  list<Register> Aliases = [];

  // SubRegs - A list of registers that are parts of this register. Note these
  // are "immediate" sub-registers and the registers within the list do not
  // themselves overlap. e.g. For X86, EAX's SubRegs list contains only [AX],
  // not [AX, AH, AL].
  list<Register> SubRegs = [];

  // SubRegIndices - For each register in SubRegs, specify the SubRegIndex used
  // to address it. Sub-sub-register indices are automatically inherited from
  // SubRegs.
  list<SubRegIndex> SubRegIndices = [];

  // RegAltNameIndices - The alternate name indices which are valid for this
  // register.
  list<RegAltNameIndex> RegAltNameIndices = [];

  // DwarfNumbers - Numbers used internally by gcc/gdb to identify the register.
  // These values can be determined by locating the <target>.h file in the
  // directory llvmgcc/gcc/config/<target>/ and looking for REGISTER_NAMES.  The
  // order of these names correspond to the enumeration used by gcc.  A value of
  // -1 indicates that the gcc number is undefined and -2 that register number
  // is invalid for this mode/flavour.
  list<int> DwarfNumbers = [];

  // CostPerUse - Additional cost of instructions using this register compared
  // to other registers in its class. The register allocator will try to
  // minimize the number of instructions using a register with a CostPerUse.
  // This is used by the ARC target, by the ARM Thumb and x86-64 targets, where
  // some registers require larger instruction encodings, by the RISC-V target,
  // where some registers preclude using some C instructions. By making it a
  // list, targets can have multiple cost models associated with each register
  // and can choose one specific cost model per Machine Function by overriding
  // TargetRegisterInfo::getRegisterCostTableIndex. Every target register will
  // finally have an equal number of cost values which is the max of costPerUse
  // values specified. Any mismatch in the cost values for a register will be
  // filled with zeros. Restricted the cost type to uint8_t in the
  // generated table. It will considerably reduce the table size.
  list<int> CostPerUse = [0];

  // CoveredBySubRegs - When this bit is set, the value of this register is
  // completely determined by the value of its sub-registers.  For example, the
  // x86 register AX is covered by its sub-registers AL and AH, but EAX is not
  // covered by its sub-register AX.
  bit CoveredBySubRegs = false;

  // HWEncoding - The target specific hardware encoding for this register.
  bits<16> HWEncoding = 0;

  bit isArtificial = false;

  // isConstant - This register always holds a constant value (e.g. the zero
  // register in architectures such as MIPS)
  bit isConstant = false;
}

// RegisterWithSubRegs - This can be used to define instances of Register which
// need to specify sub-registers.
// List "subregs" specifies which registers are sub-registers to this one. This
// is used to populate the SubRegs and AliasSet fields of TargetRegisterDesc.
// This allows the code generator to be careful not to put two values with
// overlapping live ranges into registers which alias.
class RegisterWithSubRegs<string n, list<Register> subregs> : Register<n> {
  let SubRegs = subregs;
}

// DAGOperand - An empty base class that unifies RegisterClass's and other forms
// of Operand's that are legal as type qualifiers in DAG patterns.  This should
// only ever be used for defining multiclasses that are polymorphic over both
// RegisterClass's and other Operand's.
class DAGOperand {
  string OperandNamespace = "MCOI";
  string DecoderMethod = "";
}

// RegisterClass - Now that all of the registers are defined, and aliases
// between registers are defined, specify which registers belong to which
// register classes.  This also defines the default allocation order of
// registers by register allocators.
//
class RegisterClass<string namespace, list<ValueType> regTypes, int alignment,
                    dag regList, RegAltNameIndex idx = NoRegAltName>
  : DAGOperand {
  string Namespace = namespace;

  // The register size/alignment information, parameterized by a HW mode.
  RegInfoByHwMode RegInfos;

  // RegType - Specify the list ValueType of the registers in this register
  // class.  Note that all registers in a register class must have the same
  // ValueTypes.  This is a list because some targets permit storing different
  // types in same register, for example vector values with 128-bit total size,
  // but different count/size of items, like SSE on x86.
  //
  list<ValueType> RegTypes = regTypes;

  // Size - Specify the spill size in bits of the registers.  A default value of
  // zero lets tablegen pick an appropriate size.
  int Size = 0;

  // Alignment - Specify the alignment required of the registers when they are
  // stored or loaded to memory.
  //
  int Alignment = alignment;

  // CopyCost - This value is used to specify the cost of copying a value
  // between two registers in this register class. The default value is one
  // meaning it takes a single instruction to perform the copying. A negative
  // value means copying is extremely expensive or impossible.
  int CopyCost = 1;

  // MemberList - Specify which registers are in this class.  If the
  // allocation_order_* method are not specified, this also defines the order of
  // allocation used by the register allocator.
  //
  dag MemberList = regList;

  // AltNameIndex - The alternate register name to use when printing operands
  // of this register class. Every register in the register class must have
  // a valid alternate name for the given index.
  RegAltNameIndex altNameIndex = idx;

  // isAllocatable - Specify that the register class can be used for virtual
  // registers and register allocation.  Some register classes are only used to
  // model instruction operand constraints, and should have isAllocatable = 0.
  bit isAllocatable = true;

  // AltOrders - List of alternative allocation orders. The default order is
  // MemberList itself, and that is good enough for most targets since the
  // register allocators automatically remove reserved registers and move
  // callee-saved registers to the end.
  list<dag> AltOrders = [];

  // AltOrderSelect - The body of a function that selects the allocation order
  // to use in a given machine function. The code will be inserted in a
  // function like this:
  //
  //   static inline unsigned f(const MachineFunction &MF) { ... }
  //
  // The function should return 0 to select the default order defined by
  // MemberList, 1 to select the first AltOrders entry and so on.
  code AltOrderSelect = [{}];

  // Specify allocation priority for register allocators using a greedy
  // heuristic. Classes with higher priority values are assigned first. This is
  // useful as it is sometimes beneficial to assign registers to highly
  // constrained classes first. The value has to be in the range [0,31].
  int AllocationPriority = 0;

  // Force register class to use greedy's global heuristic for all
  // registers in this class. This should more aggressively try to
  // avoid spilling in pathological cases.
  bit GlobalPriority = false;

  // Generate register pressure set for this register class and any class
  // synthesized from it. Set to 0 to inhibit unneeded pressure sets.
  bit GeneratePressureSet = true;

  // Weight override for register pressure calculation. This is the value
  // TargetRegisterClass::getRegClassWeight() will return. The weight is in
  // units of pressure for this register class. If unset tablegen will
  // calculate a weight based on a number of register units in this register
  // class registers. The weight is per register.
  int Weight = ?;

  // The diagnostic type to present when referencing this operand in a match
  // failure error message. If this is empty, the default Match_InvalidOperand
  // diagnostic type will be used. If this is "<name>", a Match_<name> enum
  // value will be generated and used for this operand type. The target
  // assembly parser is responsible for converting this into a user-facing
  // diagnostic message.
  string DiagnosticType = "";

  // A diagnostic message to emit when an invalid value is provided for this
  // register class when it is being used an an assembly operand. If this is
  // non-empty, an anonymous diagnostic type enum value will be generated, and
  // the assembly matcher will provide a function to map from diagnostic types
  // to message strings.
  string DiagnosticString = "";

  // Target-specific flags. This becomes the TSFlags field in TargetRegisterClass.
  bits<8> TSFlags = 0;

  // If set then consider this register class to be the base class for registers in
  // its MemberList.  The base class for registers present in multiple base register
  // classes will be resolved in the order defined by this value, with lower values
  // taking precedence over higher ones.  Ties are resolved by enumeration order.
  int BaseClassOrder = ?;
}

// The memberList in a RegisterClass is a dag of set operations. TableGen
// evaluates these set operations and expand them into register lists. These
// are the most common operation, see test/TableGen/SetTheory.td for more
// examples of what is possible:
//
// (add R0, R1, R2) - Set Union. Each argument can be an individual register, a
// register class, or a sub-expression. This is also the way to simply list
// registers.
//
// (sub GPR, SP) - Set difference. Subtract the last arguments from the first.
//
// (and GPR, CSR) - Set intersection. All registers from the first set that are
// also in the second set.
//
// (sequence "R%u", 0, 15) -> [R0, R1, ..., R15]. Generate a sequence of
// numbered registers.  Takes an optional 4th operand which is a stride to use
// when generating the sequence.
//
// (shl GPR, 4) - Remove the first N elements.
//
// (trunc GPR, 4) - Truncate after the first N elements.
//
// (rotl GPR, 1) - Rotate N places to the left.
//
// (rotr GPR, 1) - Rotate N places to the right.
//
// (decimate GPR, 2) - Pick every N'th element, starting with the first.
//
// (interleave A, B, ...) - Interleave the elements from each argument list.
//
// All of these operators work on ordered sets, not lists. That means
// duplicates are removed from sub-expressions.

// Set operators. The rest is defined in TargetSelectionDAG.td.
def sequence;
def decimate;
def interleave;

// RegisterTuples - Automatically generate super-registers by forming tuples of
// sub-registers. This is useful for modeling register sequence constraints
// with pseudo-registers that are larger than the architectural registers.
//
// The sub-register lists are zipped together:
//
//   def EvenOdd : RegisterTuples<[sube, subo], [(add R0, R2), (add R1, R3)]>;
//
// Generates the same registers as:
//
//   let SubRegIndices = [sube, subo] in {
//     def R0_R1 : RegisterWithSubRegs<"", [R0, R1]>;
//     def R2_R3 : RegisterWithSubRegs<"", [R2, R3]>;
//   }
//
// The generated pseudo-registers inherit super-classes and fields from their
// first sub-register. Most fields from the Register class are inferred, and
// the AsmName and Dwarf numbers are cleared.
//
// RegisterTuples instances can be used in other set operations to form
// register classes and so on. This is the only way of using the generated
// registers.
//
// RegNames may be specified to supply asm names for the generated tuples.
// If used must have the same size as the list of produced registers.
class RegisterTuples<list<SubRegIndex> Indices, list<dag> Regs,
                     list<string> RegNames = []> {
  // SubRegs - N lists of registers to be zipped up. Super-registers are
  // synthesized from the first element of each SubRegs list, the second
  // element and so on.
  list<dag> SubRegs = Regs;

  // SubRegIndices - N SubRegIndex instances. This provides the names of the
  // sub-registers in the synthesized super-registers.
  list<SubRegIndex> SubRegIndices = Indices;

  // List of asm names for the generated tuple registers.
  list<string> RegAsmNames = RegNames;
}

// RegisterCategory - This class is a list of RegisterClasses that belong to a
// general cateogry --- e.g. "general purpose" or "fixed" registers. This is
// useful for identifying registers in a generic way instead of having
// information about a specific target's registers.
class RegisterCategory<list<RegisterClass> classes> {
  // Classes - A list of register classes that fall within the category.
  list<RegisterClass> Classes = classes;
}

//===----------------------------------------------------------------------===//
// DwarfRegNum - This class provides a mapping of the llvm register enumeration
// to the register numbering used by gcc and gdb.  These values are used by a
// debug information writer to describe where values may be located during
// execution.
class DwarfRegNum<list<int> Numbers> {
  // DwarfNumbers - Numbers used internally by gcc/gdb to identify the register.
  // These values can be determined by locating the <target>.h file in the
  // directory llvmgcc/gcc/config/<target>/ and looking for REGISTER_NAMES.  The
  // order of these names correspond to the enumeration used by gcc.  A value of
  // -1 indicates that the gcc number is undefined and -2 that register number
  // is invalid for this mode/flavour.
  list<int> DwarfNumbers = Numbers;
}

// DwarfRegAlias - This class declares that a given register uses the same dwarf
// numbers as another one. This is useful for making it clear that the two
// registers do have the same number. It also lets us build a mapping
// from dwarf register number to llvm register.
class DwarfRegAlias<Register reg> {
      Register DwarfAlias = reg;
}

//===----------------------------------------------------------------------===//
// Pull in the common support for MCPredicate (portable scheduling predicates).
//
include "llvm/Target/TargetInstrPredicate.td"

//===----------------------------------------------------------------------===//
// Pull in the common support for scheduling
//
include "llvm/Target/TargetSchedule.td"

class InstructionEncoding {
  // Size of encoded instruction.
  int Size;

  // The "namespace" in which this instruction exists, on targets like ARM
  // which multiple ISA namespaces exist.
  string DecoderNamespace = "";

  // List of predicates which will be turned into isel matching code.
  list<Predicate> Predicates = [];

  string DecoderMethod = "";

  // Is the instruction decoder method able to completely determine if the
  // given instruction is valid or not. If the TableGen definition of the
  // instruction specifies bitpattern A??B where A and B are static bits, the
  // hasCompleteDecoder flag says whether the decoder method fully handles the
  // ?? space, i.e. if it is a final arbiter for the instruction validity.
  // If not then the decoder attempts to continue decoding when the decoder
  // method fails.
  //
  // This allows to handle situations where the encoding is not fully
  // orthogonal. Example:
  // * InstA with bitpattern 0b0000????,
  // * InstB with bitpattern 0b000000?? but the associated decoder method
  //   DecodeInstB() returns Fail when ?? is 0b00 or 0b11.
  //
  // The decoder tries to decode a bitpattern that matches both InstA and
  // InstB bitpatterns first as InstB (because it is the most specific
  // encoding). In the default case (hasCompleteDecoder = 1), when
  // DecodeInstB() returns Fail the bitpattern gets rejected. By setting
  // hasCompleteDecoder = 0 in InstB, the decoder is informed that
  // DecodeInstB() is not able to determine if all possible values of ?? are
  // valid or not. If DecodeInstB() returns Fail the decoder will attempt to
  // decode the bitpattern as InstA too.
  bit hasCompleteDecoder = true;
}

// Allows specifying an InstructionEncoding by HwMode. If an Instruction specifies
// an EncodingByHwMode, its Inst and Size members are ignored and Ts are used
// to encode and decode based on HwMode.
class EncodingByHwMode<list<HwMode> Ms = [], list<InstructionEncoding> Ts = []>
    : HwModeSelect<Ms> {
  // The length of this list must be the same as the length of Ms.
  list<InstructionEncoding> Objects = Ts;
}

//===----------------------------------------------------------------------===//
// Instruction set description - These classes correspond to the C++ classes in
// the Target/TargetInstrInfo.h file.
//
class Instruction : InstructionEncoding {
  string Namespace = "";

  dag OutOperandList;       // An dag containing the MI def operand list.
  dag InOperandList;        // An dag containing the MI use operand list.
  string AsmString = "";    // The .s format to print the instruction with.

  // Allows specifying a canonical InstructionEncoding by HwMode. If non-empty,
  // the Inst member of this Instruction is ignored.
  EncodingByHwMode EncodingInfos;

  // Pattern - Set to the DAG pattern for this instruction, if we know of one,
  // otherwise, uninitialized.
  list<dag> Pattern;

  // The follow state will eventually be inferred automatically from the
  // instruction pattern.

  list<Register> Uses = []; // Default to using no non-operand registers
  list<Register> Defs = []; // Default to modifying no non-operand registers

  // Predicates - List of predicates which will be turned into isel matching
  // code.
  list<Predicate> Predicates = [];

  // Size - Size of encoded instruction, or zero if the size cannot be determined
  // from the opcode.
  int Size = 0;

  // Code size, for instruction selection.
  // FIXME: What does this actually mean?
  int CodeSize = 0;

  // Added complexity passed onto matching pattern.
  int AddedComplexity  = 0;

  // Indicates if this is a pre-isel opcode that should be
  // legalized/regbankselected/selected.
  bit isPreISelOpcode = false;

  // These bits capture information about the high-level semantics of the
  // instruction.
  bit isReturn     = false;     // Is this instruction a return instruction?
  bit isBranch     = false;     // Is this instruction a branch instruction?
  bit isEHScopeReturn = false;  // Does this instruction end an EH scope?
  bit isIndirectBranch = false; // Is this instruction an indirect branch?
  bit isCompare    = false;     // Is this instruction a comparison instruction?
  bit isMoveImm    = false;     // Is this instruction a move immediate instruction?
  bit isMoveReg    = false;     // Is this instruction a move register instruction?
  bit isBitcast    = false;     // Is this instruction a bitcast instruction?
  bit isSelect     = false;     // Is this instruction a select instruction?
  bit isBarrier    = false;     // Can control flow fall through this instruction?
  bit isCall       = false;     // Is this instruction a call instruction?
  bit isAdd        = false;     // Is this instruction an add instruction?
  bit isTrap       = false;     // Is this instruction a trap instruction?
  bit canFoldAsLoad = false;    // Can this be folded as a simple memory operand?
  bit mayLoad      = ?;         // Is it possible for this inst to read memory?
  bit mayStore     = ?;         // Is it possible for this inst to write memory?
  bit mayRaiseFPException = false; // Can this raise a floating-point exception?
  bit isConvertibleToThreeAddress = false;  // Can this 2-addr instruction promote?
  bit isCommutable = false;     // Is this 3 operand instruction commutable?
  bit isTerminator = false;     // Is this part of the terminator for a basic block?
  bit isReMaterializable = false; // Is this instruction re-materializable?
  bit isPredicable = false;     // 1 means this instruction is predicable
                                // even if it does not have any operand
                                // tablegen can identify as a predicate
  bit isUnpredicable = false;   // 1 means this instruction is not predicable
                                // even if it _does_ have a predicate operand
  bit hasDelaySlot = false;     // Does this instruction have an delay slot?
  bit usesCustomInserter = false; // Pseudo instr needing special help.
  bit hasPostISelHook = false;  // To be *adjusted* after isel by target hook.
  bit hasCtrlDep   = false;     // Does this instruction r/w ctrl-flow chains?
  bit isNotDuplicable = false;  // Is it unsafe to duplicate this instruction?
  bit isConvergent = false;     // Is this instruction convergent?
  bit isAuthenticated = false;  // Does this instruction authenticate a pointer?
  bit isAsCheapAsAMove = false; // As cheap (or cheaper) than a move instruction.
  bit hasExtraSrcRegAllocReq = false; // Sources have special regalloc requirement?
  bit hasExtraDefRegAllocReq = false; // Defs have special regalloc requirement?
  bit isRegSequence = false;    // Is this instruction a kind of reg sequence?
                                // If so, make sure to override
                                // TargetInstrInfo::getRegSequenceLikeInputs.
  bit isPseudo     = false;     // Is this instruction a pseudo-instruction?
                                // If so, won't have encoding information for
                                // the [MC]CodeEmitter stuff.
  bit isMeta = false;           // Is this instruction a meta-instruction?
                                // If so, won't produce any output in the form of
                                // executable instructions
  bit isExtractSubreg = false;  // Is this instruction a kind of extract subreg?
                                // If so, make sure to override
                                // TargetInstrInfo::getExtractSubregLikeInputs.
  bit isInsertSubreg = false;   // Is this instruction a kind of insert subreg?
                                // If so, make sure to override
                                // TargetInstrInfo::getInsertSubregLikeInputs.
  bit variadicOpsAreDefs = false; // Are variadic operands definitions?

  // Does the instruction have side effects that are not captured by any
  // operands of the instruction or other flags?
  bit hasSideEffects = ?;

  // Is this instruction a "real" instruction (with a distinct machine
  // encoding), or is it a pseudo instruction used for codegen modeling
  // purposes.
  // FIXME: For now this is distinct from isPseudo, above, as code-gen-only
  // instructions can (and often do) still have encoding information
  // associated with them. Once we've migrated all of them over to true
  // pseudo-instructions that are lowered to real instructions prior to
  // the printer/emitter, we can remove this attribute and just use isPseudo.
  //
  // The intended use is:
  // isPseudo: Does not have encoding information and should be expanded,
  //   at the latest, during lowering to MCInst.
  //
  // isCodeGenOnly: Does have encoding information and can go through to the
  //   CodeEmitter unchanged, but duplicates a canonical instruction
  //   definition's encoding and should be ignored when constructing the
  //   assembler match tables.
  bit isCodeGenOnly = false;

  // Is this instruction a pseudo instruction for use by the assembler parser.
  bit isAsmParserOnly = false;

  // This instruction is not expected to be queried for scheduling latencies
  // and therefore needs no scheduling information even for a complete
  // scheduling model.
  bit hasNoSchedulingInfo = false;

  InstrItinClass Itinerary = NoItinerary;// Execution steps used for scheduling.

  // Scheduling information from TargetSchedule.td.
  list<SchedReadWrite> SchedRW;

  string Constraints = "";  // OperandConstraint, e.g. $src = $dst.

  /// DisableEncoding - List of operand names (e.g. "$op1,$op2") that should not
  /// be encoded into the output machineinstr.
  string DisableEncoding = "";

  string PostEncoderMethod = "";

  /// Target-specific flags. This becomes the TSFlags field in TargetInstrDesc.
  bits<64> TSFlags = 0;

  ///@name Assembler Parser Support
  ///@{

  string AsmMatchConverter = "";

  /// TwoOperandAliasConstraint - Enable TableGen to auto-generate a
  /// two-operand matcher inst-alias for a three operand instruction.
  /// For example, the arm instruction "add r3, r3, r5" can be written
  /// as "add r3, r5". The constraint is of the same form as a tied-operand
  /// constraint. For example, "$Rn = $Rd".
  string TwoOperandAliasConstraint = "";

  /// Assembler variant name to use for this instruction. If specified then
  /// instruction will be presented only in MatchTable for this variant. If
  /// not specified then assembler variants will be determined based on
  /// AsmString
  string AsmVariantName = "";

  ///@}

  /// UseNamedOperandTable - If set, the operand indices of this instruction
  /// can be queried via the getNamedOperandIdx() function which is generated
  /// by TableGen.
  bit UseNamedOperandTable = false;

  /// Should generate helper functions that help you to map a logical operand's
  /// index to the underlying MIOperand's index.
  /// In most architectures logical operand indicies are equal to
  /// MIOperand indicies, but for some CISC architectures, a logical operand
  /// might be consist of multiple MIOperand (e.g. a logical operand that
  /// uses complex address mode).
  bit UseLogicalOperandMappings = false;

  /// Should FastISel ignore this instruction. For certain ISAs, they have
  /// instructions which map to the same ISD Opcode, value type operands and
  /// instruction selection predicates. FastISel cannot handle such cases, but
  /// SelectionDAG can.
  bit FastISelShouldIgnore = false;

  /// HasPositionOrder: Indicate tablegen to sort the instructions by record
  /// ID, so that instruction that is defined earlier can be sorted earlier
  /// in the assembly matching table.
  bit HasPositionOrder = false;
}

/// Defines a Pat match between compressed and uncompressed instruction.
/// The relationship and helper function generation are handled by
/// CompressInstEmitter backend.
class CompressPat<dag input, dag output, list<Predicate> predicates = []> {
  /// Uncompressed instruction description.
  dag Input = input;
  /// Compressed instruction description.
  dag Output = output;
  /// Predicates that must be true for this to match.
  list<Predicate> Predicates = predicates;
  /// Duplicate match when tied operand is just different.
  bit isCompressOnly = false;
}

/// Defines an additional encoding that disassembles to the given instruction
/// Like Instruction, the Inst and SoftFail fields are omitted to allow targets
// to specify their size.
class AdditionalEncoding<Instruction I> : InstructionEncoding {
  Instruction AliasOf = I;
}

/// PseudoInstExpansion - Expansion information for a pseudo-instruction.
/// Which instruction it expands to and how the operands map from the
/// pseudo.
class PseudoInstExpansion<dag Result> {
  dag ResultInst = Result;     // The instruction to generate.
  bit isPseudo = true;
}

/// Predicates - These are extra conditionals which are turned into instruction
/// selector matching code. Currently each predicate is just a string.
class Predicate<string cond> {
  string CondString = cond;

  /// AssemblerMatcherPredicate - If this feature can be used by the assembler
  /// matcher, this is true.  Targets should set this by inheriting their
  /// feature from the AssemblerPredicate class in addition to Predicate.
  bit AssemblerMatcherPredicate = false;

  /// AssemblerCondDag - Set of subtarget features being tested used
  /// as alternative condition string used for assembler matcher. Must be used
  /// with (all_of) to indicate that all features must be present, or (any_of)
  /// to indicate that at least one must be. The required lack of presence of
  /// a feature can be tested using a (not) node including the feature.
  /// e.g. "(all_of ModeThumb)" is translated to "(Bits & ModeThumb) != 0".
  ///      "(all_of (not ModeThumb))" is translated to
  ///      "(Bits & ModeThumb) == 0".
  ///      "(all_of ModeThumb, FeatureThumb2)" is translated to
  ///      "(Bits & ModeThumb) != 0 && (Bits & FeatureThumb2) != 0".
  ///      "(any_of ModeTumb, FeatureThumb2)" is translated to
  ///      "(Bits & ModeThumb) != 0 || (Bits & FeatureThumb2) != 0".
  /// all_of and any_of cannot be combined in a single dag, instead multiple
  /// predicates can be placed onto Instruction definitions.
  dag AssemblerCondDag;

  /// PredicateName - User-level name to use for the predicate. Mainly for use
  /// in diagnostics such as missing feature errors in the asm matcher.
  string PredicateName = "";

  /// Setting this to '1' indicates that the predicate must be recomputed on
  /// every function change. Most predicates can leave this at '0'.
  ///
  /// Ignored by SelectionDAG, it always recomputes the predicate on every use.
  bit RecomputePerFunction = false;
}

/// NoHonorSignDependentRounding - This predicate is true if support for
/// sign-dependent-rounding is not enabled.
def NoHonorSignDependentRounding
 : Predicate<"!TM.Options.HonorSignDependentRoundingFPMath()">;

class Requires<list<Predicate> preds> {
  list<Predicate> Predicates = preds;
}

/// ops definition - This is just a simple marker used to identify the operand
/// list for an instruction. outs and ins are identical both syntactically and
/// semantically; they are used to define def operands and use operands to
/// improve readability. This should be used like this:
///     (outs R32:$dst), (ins R32:$src1, R32:$src2) or something similar.
def ops;
def outs;
def ins;

/// variable_ops definition - Mark this instruction as taking a variable number
/// of operands.
def variable_ops;

/// variable-length instruction encoding utilities.
/// The `ascend` operator should be used like this:
///     (ascend 0b0010, 0b1101)
/// Which represent a seqence of encoding fragments placing from LSB to MSB.
/// Thus, in this case the final encoding will be 0b1101_0010.
/// The arguments for `ascend` can either be `bits` or another DAG.
def ascend;
/// In addition, we can use `descend` to describe an encoding that places
/// its arguments (i.e. encoding fragments) from MSB to LSB. For instance:
///     (descend 0b0010, 0b1101)
/// This results in an encoding of 0b0010_1101.
def descend;
/// The `operand` operator should be used like this:
///     (operand "$src", 4)
/// Which represents a 4-bit encoding for an instruction operand named `$src`.
def operand;
/// Similar to `operand`, we can reference only part of the operand's encoding:
///     (slice "$src", 6, 8)
///     (slice "$src", 8, 6)
/// Both DAG represent bit 6 to 8 (total of 3 bits) in the encoding of operand
/// `$src`.
def slice;
/// You can use `encoder` or `decoder` to specify a custom encoder or decoder
/// function for a specific `operand` or `slice` directive. For example:
///     (operand "$src", 4, (encoder "encodeMyImm"))
///     (slice "$src", 8, 6, (encoder "encodeMyReg"))
///     (operand "$src", 4, (encoder "encodeMyImm"), (decoder "decodeMyImm"))
/// The ordering of `encoder` and `decoder` in the same `operand` or `slice`
/// doesn't matter.
/// Note that currently we cannot assign different decoders in the same
/// (instruction) operand.
def encoder;
def decoder;

/// PointerLikeRegClass - Values that are designed to have pointer width are
/// derived from this.  TableGen treats the register class as having a symbolic
/// type that it doesn't know, and resolves the actual regclass to use by using
/// the TargetRegisterInfo::getPointerRegClass() hook at codegen time.
class PointerLikeRegClass<int Kind> {
  int RegClassKind = Kind;
}


/// ptr_rc definition - Mark this operand as being a pointer value whose
/// register class is resolved dynamically via a callback to TargetInstrInfo.
/// FIXME: We should probably change this to a class which contain a list of
/// flags. But currently we have but one flag.
def ptr_rc : PointerLikeRegClass<0>;

/// unknown definition - Mark this operand as being of unknown type, causing
/// it to be resolved by inference in the context it is used.
class unknown_class;
def unknown : unknown_class;

/// AsmOperandClass - Representation for the kinds of operands which the target
/// specific parser can create and the assembly matcher may need to distinguish.
///
/// Operand classes are used to define the order in which instructions are
/// matched, to ensure that the instruction which gets matched for any
/// particular list of operands is deterministic.
///
/// The target specific parser must be able to classify a parsed operand into a
/// unique class which does not partially overlap with any other classes. It can
/// match a subset of some other class, in which case the super class field
/// should be defined.
class AsmOperandClass {
  /// The name to use for this class, which should be usable as an enum value.
  string Name = ?;

  /// The super classes of this operand.
  list<AsmOperandClass> SuperClasses = [];

  /// The name of the method on the target specific operand to call to test
  /// whether the operand is an instance of this class. If not set, this will
  /// default to "isFoo", where Foo is the AsmOperandClass name. The method
  /// signature should be:
  ///   bool isFoo() const;
  string PredicateMethod = ?;

  /// The name of the method on the target specific operand to call to add the
  /// target specific operand to an MCInst. If not set, this will default to
  /// "addFooOperands", where Foo is the AsmOperandClass name. The method
  /// signature should be:
  ///   void addFooOperands(MCInst &Inst, unsigned N) const;
  string RenderMethod = ?;

  /// The name of the method on the target specific operand to call to custom
  /// handle the operand parsing. This is useful when the operands do not relate
  /// to immediates or registers and are very instruction specific (as flags to
  /// set in a processor register, coprocessor number, ...).
  string ParserMethod = ?;

  // The diagnostic type to present when referencing this operand in a
  // match failure error message. By default, use a generic "invalid operand"
  // diagnostic. The target AsmParser maps these codes to text.
  string DiagnosticType = "";

  /// A diagnostic message to emit when an invalid value is provided for this
  /// operand.
  string DiagnosticString = "";

  /// Set to 1 if this operand is optional and not always required. Typically,
  /// the AsmParser will emit an error when it finishes parsing an
  /// instruction if it hasn't matched all the operands yet.  However, this
  /// error will be suppressed if all of the remaining unmatched operands are
  /// marked as IsOptional.
  ///
  /// Optional arguments must be at the end of the operand list.
  bit IsOptional = false;

  /// The name of the method on the target specific asm parser that returns the
  /// default operand for this optional operand. This method is only used if
  /// IsOptional == 1. If not set, this will default to "defaultFooOperands",
  /// where Foo is the AsmOperandClass name. The method signature should be:
  ///   std::unique_ptr<MCParsedAsmOperand> defaultFooOperands() const;
  string DefaultMethod = ?;
}

def ImmAsmOperand : AsmOperandClass {
  let Name = "Imm";
}

/// Operand Types - These provide the built-in operand types that may be used
/// by a target.  Targets can optionally provide their own operand types as
/// needed, though this should not be needed for RISC targets.
class Operand<ValueType ty> : DAGOperand {
  ValueType Type = ty;
  string PrintMethod = "printOperand";
  string EncoderMethod = "";
  bit hasCompleteDecoder = true;
  string OperandType = "OPERAND_UNKNOWN";
  dag MIOperandInfo = (ops);

  // MCOperandPredicate - Optionally, a code fragment operating on
  // const MCOperand &MCOp, and returning a bool, to indicate if
  // the value of MCOp is valid for the specific subclass of Operand
  code MCOperandPredicate;

  // ParserMatchClass - The "match class" that operands of this type fit
  // in. Match classes are used to define the order in which instructions are
  // match, to ensure that which instructions gets matched is deterministic.
  //
  // The target specific parser must be able to classify an parsed operand into
  // a unique class, which does not partially overlap with any other classes. It
  // can match a subset of some other class, in which case the AsmOperandClass
  // should declare the other operand as one of its super classes.
  AsmOperandClass ParserMatchClass = ImmAsmOperand;
}

class RegisterOperand<RegisterClass regclass, string pm = "printOperand">
  : DAGOperand {
  // RegClass - The register class of the operand.
  RegisterClass RegClass = regclass;
  // PrintMethod - The target method to call to print register operands of
  // this type. The method normally will just use an alt-name index to look
  // up the name to print. Default to the generic printOperand().
  string PrintMethod = pm;

  // EncoderMethod - The target method name to call to encode this register
  // operand.
  string EncoderMethod = "";

  // ParserMatchClass - The "match class" that operands of this type fit
  // in. Match classes are used to define the order in which instructions are
  // match, to ensure that which instructions gets matched is deterministic.
  //
  // The target specific parser must be able to classify an parsed operand into
  // a unique class, which does not partially overlap with any other classes. It
  // can match a subset of some other class, in which case the AsmOperandClass
  // should declare the other operand as one of its super classes.
  AsmOperandClass ParserMatchClass;

  string OperandType = "OPERAND_REGISTER";

  // When referenced in the result of a CodeGen pattern, GlobalISel will
  // normally copy the matched operand to the result. When this is set, it will
  // emit a special copy that will replace zero-immediates with the specified
  // zero-register.
  Register GIZeroRegister = ?;
}

let OperandType = "OPERAND_IMMEDIATE" in {
def i1imm  : Operand<i1>;
def i8imm  : Operand<i8>;
def i16imm : Operand<i16>;
def i32imm : Operand<i32>;
def i64imm : Operand<i64>;

def f32imm : Operand<f32>;
def f64imm : Operand<f64>;
}

// Register operands for generic instructions don't have an MVT, but do have
// constraints linking the operands (e.g. all operands of a G_ADD must
// have the same LLT).
class TypedOperand<string Ty> : Operand<untyped> {
  let OperandType = Ty;
  bit IsPointer = false;
  bit IsImmediate = false;
}

def type0 : TypedOperand<"OPERAND_GENERIC_0">;
def type1 : TypedOperand<"OPERAND_GENERIC_1">;
def type2 : TypedOperand<"OPERAND_GENERIC_2">;
def type3 : TypedOperand<"OPERAND_GENERIC_3">;
def type4 : TypedOperand<"OPERAND_GENERIC_4">;
def type5 : TypedOperand<"OPERAND_GENERIC_5">;

let IsPointer = true in {
  def ptype0 : TypedOperand<"OPERAND_GENERIC_0">;
  def ptype1 : TypedOperand<"OPERAND_GENERIC_1">;
  def ptype2 : TypedOperand<"OPERAND_GENERIC_2">;
  def ptype3 : TypedOperand<"OPERAND_GENERIC_3">;
  def ptype4 : TypedOperand<"OPERAND_GENERIC_4">;
  def ptype5 : TypedOperand<"OPERAND_GENERIC_5">;
}

// untyped_imm is for operands where isImm() will be true. It currently has no
// special behaviour and is only used for clarity.
def untyped_imm_0 : TypedOperand<"OPERAND_GENERIC_IMM_0"> {
  let IsImmediate = true;
}

/// zero_reg definition - Special node to stand for the zero register.
///
def zero_reg;

/// undef_tied_input - Special node to indicate an input register tied
/// to an output which defaults to IMPLICIT_DEF.
def undef_tied_input;

/// All operands which the MC layer classifies as predicates should inherit from
/// this class in some manner. This is already handled for the most commonly
/// used PredicateOperand, but may be useful in other circumstances.
class PredicateOp;

/// OperandWithDefaultOps - This Operand class can be used as the parent class
/// for an Operand that needs to be initialized with a default value if
/// no value is supplied in a pattern.  This class can be used to simplify the
/// pattern definitions for instructions that have target specific flags
/// encoded as immediate operands.
class OperandWithDefaultOps<ValueType ty, dag defaultops>
  : Operand<ty> {
  dag DefaultOps = defaultops;
}

/// PredicateOperand - This can be used to define a predicate operand for an
/// instruction.  OpTypes specifies the MIOperandInfo for the operand, and
/// AlwaysVal specifies the value of this predicate when set to "always
/// execute".
class PredicateOperand<ValueType ty, dag OpTypes, dag AlwaysVal>
  : OperandWithDefaultOps<ty, AlwaysVal>, PredicateOp {
  let MIOperandInfo = OpTypes;
}

/// OptionalDefOperand - This is used to define a optional definition operand
/// for an instruction. DefaultOps is the register the operand represents if
/// none is supplied, e.g. zero_reg.
class OptionalDefOperand<ValueType ty, dag OpTypes, dag defaultops>
  : OperandWithDefaultOps<ty, defaultops> {
  let MIOperandInfo = OpTypes;
}


// InstrInfo - This class should only be instantiated once to provide parameters
// which are global to the target machine.
//
class InstrInfo {
  // Target can specify its instructions in either big or little-endian formats.
  // For instance, while both Sparc and PowerPC are big-endian platforms, the
  // Sparc manual specifies its instructions in the format [31..0] (big), while
  // PowerPC specifies them using the format [0..31] (little).
  bit isLittleEndianEncoding = false;

  // The instruction properties mayLoad, mayStore, and hasSideEffects are unset
  // by default, and TableGen will infer their value from the instruction
  // pattern when possible.
  //
  // Normally, TableGen will issue an error if it can't infer the value of a
  // property that hasn't been set explicitly. When guessInstructionProperties
  // is set, it will guess a safe value instead.
  //
  // This option is a temporary migration help. It will go away.
  bit guessInstructionProperties = true;
}

// Standard Pseudo Instructions.
// This list must match TargetOpcodes.def.
// Only these instructions are allowed in the TargetOpcode namespace.
// Ensure mayLoad and mayStore have a default value, so as not to break
// targets that set guessInstructionProperties=0. Any local definition of
// mayLoad/mayStore takes precedence over these default values.
class StandardPseudoInstruction : Instruction {
  let mayLoad = false;
  let mayStore = false;
  let isCodeGenOnly = true;
  let isPseudo = true;
  let hasNoSchedulingInfo = true;
  let Namespace = "TargetOpcode";
}
def PHI : StandardPseudoInstruction {
  let OutOperandList = (outs unknown:$dst);
  let InOperandList = (ins variable_ops);
  let AsmString = "PHINODE";
  let hasSideEffects = false;
}
def INLINEASM : StandardPseudoInstruction {
  let OutOperandList = (outs);
  let InOperandList = (ins variable_ops);
  let AsmString = "";
  let hasSideEffects = false;  // Note side effect is encoded in an operand.
}
def INLINEASM_BR : StandardPseudoInstruction {
  let OutOperandList = (outs);
  let InOperandList = (ins variable_ops);
  let AsmString = "";
  // Unlike INLINEASM, this is always treated as having side-effects.
  let hasSideEffects = true;
  // Despite potentially branching, this instruction is intentionally _not_
  // marked as a terminator or a branch.
}
def CFI_INSTRUCTION : StandardPseudoInstruction {
  let OutOperandList = (outs);
  let InOperandList = (ins i32imm:$id);
  let AsmString = "";
  let hasCtrlDep = true;
  let hasSideEffects = false;
  let isNotDuplicable = true;
  let isMeta = true;
}
def EH_LABEL : StandardPseudoInstruction {
  let OutOperandList = (outs);
  let InOperandList = (ins i32imm:$id);
  let AsmString = "";
  let hasCtrlDep = true;
  let hasSideEffects = false;
  let isNotDuplicable = true;
  let isMeta = true;
}
def GC_LABEL : StandardPseudoInstruction {
  let OutOperandList = (outs);
  let InOperandList = (ins i32imm:$id);
  let AsmString = "";
  let hasCtrlDep = true;
  let hasSideEffects = false;
  let isNotDuplicable = true;
  let isMeta = true;
}
def ANNOTATION_LABEL : StandardPseudoInstruction {
  let OutOperandList = (outs);
  let InOperandList = (ins i32imm:$id);
  let AsmString = "";
  let hasCtrlDep = true;
  let hasSideEffects = false;
  let isNotDuplicable = true;
}
def KILL : StandardPseudoInstruction {
  let OutOperandList = (outs);
  let InOperandList = (ins variable_ops);
  let AsmString = "";
  let hasSideEffects = false;
  let isMeta = true;
}
def EXTRACT_SUBREG : StandardPseudoInstruction {
  let OutOperandList = (outs unknown:$dst);
  let InOperandList = (ins unknown:$supersrc, i32imm:$subidx);
  let AsmString = "";
  let hasSideEffects = false;
}
def INSERT_SUBREG : StandardPseudoInstruction {
  let OutOperandList = (outs unknown:$dst);
  let InOperandList = (ins unknown:$supersrc, unknown:$subsrc, i32imm:$subidx);
  let AsmString = "";
  let hasSideEffects = false;
  let Constraints = "$supersrc = $dst";
}
def IMPLICIT_DEF : StandardPseudoInstruction {
  let OutOperandList = (outs unknown:$dst);
  let InOperandList = (ins);
  let AsmString = "";
  let hasSideEffects = false;
  let isReMaterializable = true;
  let isAsCheapAsAMove = true;
  let isMeta = true;
}
def SUBREG_TO_REG : StandardPseudoInstruction {
  let OutOperandList = (outs unknown:$dst);
  let InOperandList = (ins unknown:$implsrc, unknown:$subsrc, i32imm:$subidx);
  let AsmString = "";
  let hasSideEffects = false;
}
def COPY_TO_REGCLASS : StandardPseudoInstruction {
  let OutOperandList = (outs unknown:$dst);
  let InOperandList = (ins unknown:$src, i32imm:$regclass);
  let AsmString = "";
  let hasSideEffects = false;
  let isAsCheapAsAMove = true;
}
def DBG_VALUE : StandardPseudoInstruction {
  let OutOperandList = (outs);
  let InOperandList = (ins variable_ops);
  let AsmString = "DBG_VALUE";
  let hasSideEffects = false;
  let isMeta = true;
}
def DBG_VALUE_LIST : StandardPseudoInstruction {
  let OutOperandList = (outs);
  let InOperandList = (ins variable_ops);
  let AsmString = "DBG_VALUE_LIST";
  let hasSideEffects = 0;
  let isMeta = true;
}
def DBG_INSTR_REF : StandardPseudoInstruction {
  let OutOperandList = (outs);
  let InOperandList = (ins variable_ops);
  let AsmString = "DBG_INSTR_REF";
  let hasSideEffects = false;
  let isMeta = true;
}
def DBG_PHI : StandardPseudoInstruction {
  let OutOperandList = (outs);
  let InOperandList = (ins variable_ops);
  let AsmString = "DBG_PHI";
  let hasSideEffects = 0;
  let isMeta = true;
}
def DBG_LABEL : StandardPseudoInstruction {
  let OutOperandList = (outs);
  let InOperandList = (ins unknown:$label);
  let AsmString = "DBG_LABEL";
  let hasSideEffects = false;
  let isMeta = true;
}
def REG_SEQUENCE : StandardPseudoInstruction {
  let OutOperandList = (outs unknown:$dst);
  let InOperandList = (ins unknown:$supersrc, variable_ops);
  let AsmString = "";
  let hasSideEffects = false;
  let isAsCheapAsAMove = true;
}
def COPY : StandardPseudoInstruction {
  let OutOperandList = (outs unknown:$dst);
  let InOperandList = (ins unknown:$src);
  let AsmString = "";
  let hasSideEffects = false;
  let isAsCheapAsAMove = true;
  let hasNoSchedulingInfo = false;
}
def BUNDLE : StandardPseudoInstruction {
  let OutOperandList = (outs);
  let InOperandList = (ins variable_ops);
  let AsmString = "BUNDLE";
  let hasSideEffects = false;
}
def LIFETIME_START : StandardPseudoInstruction {
  let OutOperandList = (outs);
  let InOperandList = (ins i32imm:$id);
  let AsmString = "LIFETIME_START";
  let hasSideEffects = false;
  let isMeta = true;
}
def LIFETIME_END : StandardPseudoInstruction {
  let OutOperandList = (outs);
  let InOperandList = (ins i32imm:$id);
  let AsmString = "LIFETIME_END";
  let hasSideEffects = false;
  let isMeta = true;
}
def PSEUDO_PROBE : StandardPseudoInstruction {
  let OutOperandList = (outs);
  let InOperandList = (ins i64imm:$guid, i64imm:$index, i8imm:$type, i32imm:$attr);
  let AsmString = "PSEUDO_PROBE";
  let hasSideEffects = 1;
  let isMeta = true;
}
def ARITH_FENCE : StandardPseudoInstruction {
  let OutOperandList = (outs unknown:$dst);
  let InOperandList = (ins unknown:$src);
  let AsmString = "";
  let hasSideEffects = false;
  let Constraints = "$src = $dst";
  let isMeta = true;
}

def STACKMAP : StandardPseudoInstruction {
  let OutOperandList = (outs);
  let InOperandList = (ins i64imm:$id, i32imm:$nbytes, variable_ops);
  let hasSideEffects = true;
  let isCall = true;
  let mayLoad = true;
  let usesCustomInserter = true;
}
def PATCHPOINT : StandardPseudoInstruction {
  let OutOperandList = (outs unknown:$dst);
  let InOperandList = (ins i64imm:$id, i32imm:$nbytes, unknown:$callee,
                       i32imm:$nargs, i32imm:$cc, variable_ops);
  let hasSideEffects = true;
  let isCall = true;
  let mayLoad = true;
  let usesCustomInserter = true;
}
def STATEPOINT : StandardPseudoInstruction {
  let OutOperandList = (outs variable_ops);
  let InOperandList = (ins variable_ops);
  let usesCustomInserter = true;
  let mayLoad = true;
  let mayStore = true;
  let hasSideEffects = true;
  let isCall = true;
}
def LOAD_STACK_GUARD : StandardPseudoInstruction {
  let OutOperandList = (outs ptr_rc:$dst);
  let InOperandList = (ins);
  let mayLoad = true;
  bit isReMaterializable = true;
  let hasSideEffects = false;
  bit isPseudo = true;
}
def PREALLOCATED_SETUP : StandardPseudoInstruction {
  let OutOperandList = (outs);
  let InOperandList = (ins i32imm:$a);
  let usesCustomInserter = true;
  let hasSideEffects = true;
}
def PREALLOCATED_ARG : StandardPseudoInstruction {
  let OutOperandList = (outs ptr_rc:$loc);
  let InOperandList = (ins i32imm:$a, i32imm:$b);
  let usesCustomInserter = true;
  let hasSideEffects = true;
}
def LOCAL_ESCAPE : StandardPseudoInstruction {
  // This instruction is really just a label. It has to be part of the chain so
  // that it doesn't get dropped from the DAG, but it produces nothing and has
  // no side effects.
  let OutOperandList = (outs);
  let InOperandList = (ins ptr_rc:$symbol, i32imm:$id);
  let hasSideEffects = false;
  let hasCtrlDep = true;
}
def FAULTING_OP : StandardPseudoInstruction {
  let OutOperandList = (outs unknown:$dst);
  let InOperandList = (ins variable_ops);
  let usesCustomInserter = true;
  let hasSideEffects = true;
  let mayLoad = true;
  let mayStore = true;
  let isTerminator = true;
  let isBranch = true;
}
def PATCHABLE_OP : StandardPseudoInstruction {
  let OutOperandList = (outs);
  let InOperandList = (ins variable_ops);
  let usesCustomInserter = true;
  let mayLoad = true;
  let mayStore = true;
  let hasSideEffects = true;
}
def PATCHABLE_FUNCTION_ENTER : StandardPseudoInstruction {
  let OutOperandList = (outs);
  let InOperandList = (ins);
  let AsmString = "# XRay Function Enter.";
  let usesCustomInserter = true;
  let hasSideEffects = true;
}
def PATCHABLE_RET : StandardPseudoInstruction {
  let OutOperandList = (outs);
  let InOperandList = (ins variable_ops);
  let AsmString = "# XRay Function Patchable RET.";
  let usesCustomInserter = true;
  let hasSideEffects = true;
  let isTerminator = true;
  let isReturn = true;
}
def PATCHABLE_FUNCTION_EXIT : StandardPseudoInstruction {
  let OutOperandList = (outs);
  let InOperandList = (ins);
  let AsmString = "# XRay Function Exit.";
  let usesCustomInserter = true;
  let hasSideEffects = true;
  let isReturn = false; // Original return instruction will follow
}
def PATCHABLE_TAIL_CALL : StandardPseudoInstruction {
  let OutOperandList = (outs);
  let InOperandList = (ins variable_ops);
  let AsmString = "# XRay Tail Call Exit.";
  let usesCustomInserter = true;
  let hasSideEffects = true;
  let isReturn = true;
}
def PATCHABLE_EVENT_CALL : StandardPseudoInstruction {
  let OutOperandList = (outs);
  let InOperandList = (ins ptr_rc:$event, unknown:$size);
  let AsmString = "# XRay Custom Event Log.";
  let usesCustomInserter = true;
  let isCall = true;
  let mayLoad = true;
  let mayStore = true;
  let hasSideEffects = true;
}
def PATCHABLE_TYPED_EVENT_CALL : StandardPseudoInstruction {
  let OutOperandList = (outs);
  let InOperandList = (ins unknown:$type, ptr_rc:$event, unknown:$size);
  let AsmString = "# XRay Typed Event Log.";
  let usesCustomInserter = true;
  let isCall = true;
  let mayLoad = true;
  let mayStore = true;
  let hasSideEffects = true;
}
def FENTRY_CALL : StandardPseudoInstruction {
  let OutOperandList = (outs);
  let InOperandList = (ins);
  let AsmString = "# FEntry call";
  let usesCustomInserter = true;
  let isCall = true;
  let mayLoad = true;
  let mayStore = true;
  let hasSideEffects = true;
}
def ICALL_BRANCH_FUNNEL : StandardPseudoInstruction {
  let OutOperandList = (outs);
  let InOperandList = (ins variable_ops);
  let AsmString = "";
  let hasSideEffects = true;
}
def MEMBARRIER : StandardPseudoInstruction {
  let OutOperandList = (outs);
  let InOperandList = (ins);
  let AsmString = "";
  let hasSideEffects = true;
  let Size = 0;
  let isMeta = true;
}

// Generic opcodes used in GlobalISel.
include "llvm/Target/GenericOpcodes.td"

//===----------------------------------------------------------------------===//
// AsmParser - This class can be implemented by targets that wish to implement
// .s file parsing.
//
// Subtargets can have multiple different assembly parsers (e.g. AT&T vs Intel
// syntax on X86 for example).
//
class AsmParser {
  // AsmParserClassName - This specifies the suffix to use for the asmparser
  // class.  Generated AsmParser classes are always prefixed with the target
  // name.
  string AsmParserClassName  = "AsmParser";

  // AsmParserInstCleanup - If non-empty, this is the name of a custom member
  // function of the AsmParser class to call on every matched instruction.
  // This can be used to perform target specific instruction post-processing.
  string AsmParserInstCleanup  = "";

  // ShouldEmitMatchRegisterName - Set to false if the target needs a hand
  // written register name matcher
  bit ShouldEmitMatchRegisterName = true;

  // Set to true if the target needs a generated 'alternative register name'
  // matcher.
  //
  // This generates a function which can be used to lookup registers from
  // their aliases. This function will fail when called on targets where
  // several registers share the same alias (i.e. not a 1:1 mapping).
  bit ShouldEmitMatchRegisterAltName = false;

  // Set to true if MatchRegisterName and MatchRegisterAltName functions
  // should be generated even if there are duplicate register names. The
  // target is responsible for coercing aliased registers as necessary
  // (e.g. in validateTargetOperandClass), and there are no guarantees about
  // which numeric register identifier will be returned in the case of
  // multiple matches.
  bit AllowDuplicateRegisterNames = false;

  // HasMnemonicFirst - Set to false if target instructions don't always
  // start with a mnemonic as the first token.
  bit HasMnemonicFirst = true;

  // ReportMultipleNearMisses -
  // When 0, the assembly matcher reports an error for one encoding or operand
  // that did not match the parsed instruction.
  // When 1, the assembly matcher returns a list of encodings that were close
  // to matching the parsed instruction, so to allow more detailed error
  // messages.
  bit ReportMultipleNearMisses = false;

  // OperandParserMethod - If non-empty, this is the name of a custom
  // member function of the AsmParser class to call for every instruction
  // operand to be parsed.
  string OperandParserMethod = "";

  // CallCustomParserForAllOperands - Set to true if the custom parser
  // method shall be called for all operands as opposed to only those
  // that have their own specified custom parsers.
  bit CallCustomParserForAllOperands = false;
}
def DefaultAsmParser : AsmParser;

//===----------------------------------------------------------------------===//
// AsmParserVariant - Subtargets can have multiple different assembly parsers
// (e.g. AT&T vs Intel syntax on X86 for example). This class can be
// implemented by targets to describe such variants.
//
class AsmParserVariant {
  // Variant - AsmParsers can be of multiple different variants.  Variants are
  // used to support targets that need to parse multiple formats for the
  // assembly language.
  int Variant = 0;

  // Name - The AsmParser variant name (e.g., AT&T vs Intel).
  string Name = "";

  // CommentDelimiter - If given, the delimiter string used to recognize
  // comments which are hard coded in the .td assembler strings for individual
  // instructions.
  string CommentDelimiter = "";

  // RegisterPrefix - If given, the token prefix which indicates a register
  // token. This is used by the matcher to automatically recognize hard coded
  // register tokens as constrained registers, instead of tokens, for the
  // purposes of matching.
  string RegisterPrefix = "";

  // TokenizingCharacters - Characters that are standalone tokens
  string TokenizingCharacters = "[]*!";

  // SeparatorCharacters - Characters that are not tokens
  string SeparatorCharacters = " \t,";

  // BreakCharacters - Characters that start new identifiers
  string BreakCharacters = "";
}
def DefaultAsmParserVariant : AsmParserVariant;

// Operators for combining SubtargetFeatures in AssemblerPredicates
def any_of;
def all_of;

/// AssemblerPredicate - This is a Predicate that can be used when the assembler
/// matches instructions and aliases.
class AssemblerPredicate<dag cond, string name = ""> {
  bit AssemblerMatcherPredicate = true;
  dag AssemblerCondDag = cond;
  string PredicateName = name;
}

/// TokenAlias - This class allows targets to define assembler token
/// operand aliases. That is, a token literal operand which is equivalent
/// to another, canonical, token literal. For example, ARM allows:
///   vmov.u32 s4, #0  -> vmov.i32, #0
/// 'u32' is a more specific designator for the 32-bit integer type specifier
/// and is legal for any instruction which accepts 'i32' as a datatype suffix.
///   def : TokenAlias<".u32", ".i32">;
///
/// This works by marking the match class of 'From' as a subclass of the
/// match class of 'To'.
class TokenAlias<string From, string To> {
  string FromToken = From;
  string ToToken = To;
}

/// MnemonicAlias - This class allows targets to define assembler mnemonic
/// aliases.  This should be used when all forms of one mnemonic are accepted
/// with a different mnemonic.  For example, X86 allows:
///   sal %al, 1    -> shl %al, 1
///   sal %ax, %cl  -> shl %ax, %cl
///   sal %eax, %cl -> shl %eax, %cl
/// etc.  Though "sal" is accepted with many forms, all of them are directly
/// translated to a shl, so it can be handled with (in the case of X86, it
/// actually has one for each suffix as well):
///   def : MnemonicAlias<"sal", "shl">;
///
/// Mnemonic aliases are mapped before any other translation in the match phase,
/// and do allow Requires predicates, e.g.:
///
///  def : MnemonicAlias<"pushf", "pushfq">, Requires<[In64BitMode]>;
///  def : MnemonicAlias<"pushf", "pushfl">, Requires<[In32BitMode]>;
///
/// Mnemonic aliases can also be constrained to specific variants, e.g.:
///
///  def : MnemonicAlias<"pushf", "pushfq", "att">, Requires<[In64BitMode]>;
///
/// If no variant (e.g., "att" or "intel") is specified then the alias is
/// applied unconditionally.
class MnemonicAlias<string From, string To, string VariantName = ""> {
  string FromMnemonic = From;
  string ToMnemonic = To;
  string AsmVariantName = VariantName;

  // Predicates - Predicates that must be true for this remapping to happen.
  list<Predicate> Predicates = [];
}

/// InstAlias - This defines an alternate assembly syntax that is allowed to
/// match an instruction that has a different (more canonical) assembly
/// representation.
class InstAlias<string Asm, dag Result, int Emit = 1, string VariantName = ""> {
  string AsmString = Asm;      // The .s format to match the instruction with.
  dag ResultInst = Result;     // The MCInst to generate.

  // This determines which order the InstPrinter detects aliases for
  // printing. A larger value makes the alias more likely to be
  // emitted. The Instruction's own definition is notionally 0.5, so 0
  // disables printing and 1 enables it if there are no conflicting aliases.
  int EmitPriority = Emit;

  // Predicates - Predicates that must be true for this to match.
  list<Predicate> Predicates = [];

  // If the instruction specified in Result has defined an AsmMatchConverter
  // then setting this to 1 will cause the alias to use the AsmMatchConverter
  // function when converting the OperandVector into an MCInst instead of the
  // function that is generated by the dag Result.
  // Setting this to 0 will cause the alias to ignore the Result instruction's
  // defined AsmMatchConverter and instead use the function generated by the
  // dag Result.
  bit UseInstAsmMatchConverter = true;

  // Assembler variant name to use for this alias. If not specified then
  // assembler variants will be determined based on AsmString
  string AsmVariantName = VariantName;
}

//===----------------------------------------------------------------------===//
// AsmWriter - This class can be implemented by targets that need to customize
// the format of the .s file writer.
//
// Subtargets can have multiple different asmwriters (e.g. AT&T vs Intel syntax
// on X86 for example).
//
class AsmWriter {
  // AsmWriterClassName - This specifies the suffix to use for the asmwriter
  // class.  Generated AsmWriter classes are always prefixed with the target
  // name.
  string AsmWriterClassName  = "InstPrinter";

  // PassSubtarget - Determines whether MCSubtargetInfo should be passed to
  // the various print methods.
  // FIXME: Remove after all ports are updated.
  int PassSubtarget = 0;

  // Variant - AsmWriters can be of multiple different variants.  Variants are
  // used to support targets that need to emit assembly code in ways that are
  // mostly the same for different targets, but have minor differences in
  // syntax.  If the asmstring contains {|} characters in them, this integer
  // will specify which alternative to use.  For example "{x|y|z}" with Variant
  // == 1, will expand to "y".
  int Variant = 0;
}
def DefaultAsmWriter : AsmWriter;


//===----------------------------------------------------------------------===//
// Target - This class contains the "global" target information
//
class Target {
  // InstructionSet - Instruction set description for this target.
  InstrInfo InstructionSet;

  // AssemblyParsers - The AsmParser instances available for this target.
  list<AsmParser> AssemblyParsers = [DefaultAsmParser];

  /// AssemblyParserVariants - The AsmParserVariant instances available for
  /// this target.
  list<AsmParserVariant> AssemblyParserVariants = [DefaultAsmParserVariant];

  // AssemblyWriters - The AsmWriter instances available for this target.
  list<AsmWriter> AssemblyWriters = [DefaultAsmWriter];

  // AllowRegisterRenaming - Controls whether this target allows
  // post-register-allocation renaming of registers.  This is done by
  // setting hasExtraDefRegAllocReq and hasExtraSrcRegAllocReq to 1
  // for all opcodes if this flag is set to 0.
  int AllowRegisterRenaming = 0;
}

//===----------------------------------------------------------------------===//
// SubtargetFeature - A characteristic of the chip set.
//
class SubtargetFeature<string n, string f, string v, string d,
                       list<SubtargetFeature> i = []> {
  // Name - Feature name.  Used by command line (-mattr=) to determine the
  // appropriate target chip.
  //
  string Name = n;

  // FieldName - Field in XXXSubtarget to be set by feature.
  //
  string FieldName = f;

  // Value - Value the XXXSubtarget field to be set to by feature.
  //
  // A value of "true" or "false" implies the field is a bool. Otherwise,
  // it is assumed to be an integer. the integer value may be the name of an
  // enum constant. If multiple features use the same integer field, the
  // field will be set to the maximum value of all enabled features that
  // share the field.
  //
  string Value = v;

  // Desc - Feature description.  Used by command line (-mattr=) to display help
  // information.
  //
  string Desc = d;

  // Implies - Features that this feature implies are present. If one of those
  // features isn't set, then this one shouldn't be set either.
  //
  list<SubtargetFeature> Implies = i;
}

/// Specifies a Subtarget feature that this instruction is deprecated on.
class Deprecated<SubtargetFeature dep> {
  SubtargetFeature DeprecatedFeatureMask = dep;
}

/// A custom predicate used to determine if an instruction is
/// deprecated or not.
class ComplexDeprecationPredicate<string dep> {
  string ComplexDeprecationPredicate = dep;
}

//===----------------------------------------------------------------------===//
// Processor chip sets - These values represent each of the chip sets supported
// by the scheduler.  Each Processor definition requires corresponding
// instruction itineraries.
//
class Processor<string n, ProcessorItineraries pi, list<SubtargetFeature> f,
                list<SubtargetFeature> tunef = []> {
  // Name - Chip set name.  Used by command line (-mcpu=) to determine the
  // appropriate target chip.
  //
  string Name = n;

  // SchedModel - The machine model for scheduling and instruction cost.
  //
  SchedMachineModel SchedModel = NoSchedModel;

  // ProcItin - The scheduling information for the target processor.
  //
  ProcessorItineraries ProcItin = pi;

  // Features - list of
  list<SubtargetFeature> Features = f;

  // TuneFeatures - list of features for tuning for this CPU. If the target
  // supports -mtune, this should contain the list of features used to make
  // microarchitectural optimization decisions for a given processor.  While
  // Features should contain the architectural features for the processor.
  list<SubtargetFeature> TuneFeatures = tunef;
}

// ProcessorModel allows subtargets to specify the more general
// SchedMachineModel instead if a ProcessorItinerary. Subtargets will
// gradually move to this newer form.
//
// Although this class always passes NoItineraries to the Processor
// class, the SchedMachineModel may still define valid Itineraries.
class ProcessorModel<string n, SchedMachineModel m, list<SubtargetFeature> f,
                     list<SubtargetFeature> tunef = []>
  : Processor<n, NoItineraries, f, tunef> {
  let SchedModel = m;
}

//===----------------------------------------------------------------------===//
// InstrMapping - This class is used to create mapping tables to relate
// instructions with each other based on the values specified in RowFields,
// ColFields, KeyCol and ValueCols.
//
class InstrMapping {
  // FilterClass - Used to limit search space only to the instructions that
  // define the relationship modeled by this InstrMapping record.
  string FilterClass;

  // RowFields - List of fields/attributes that should be same for all the
  // instructions in a row of the relation table. Think of this as a set of
  // properties shared by all the instructions related by this relationship
  // model and is used to categorize instructions into subgroups. For instance,
  // if we want to define a relation that maps 'Add' instruction to its
  // predicated forms, we can define RowFields like this:
  //
  // let RowFields = BaseOp
  // All add instruction predicated/non-predicated will have to set their BaseOp
  // to the same value.
  //
  // def Add: { let BaseOp = 'ADD'; let predSense = 'nopred' }
  // def Add_predtrue: { let BaseOp = 'ADD'; let predSense = 'true' }
  // def Add_predfalse: { let BaseOp = 'ADD'; let predSense = 'false'  }
  list<string> RowFields = [];

  // List of fields/attributes that are same for all the instructions
  // in a column of the relation table.
  // Ex: let ColFields = 'predSense' -- It means that the columns are arranged
  // based on the 'predSense' values. All the instruction in a specific
  // column have the same value and it is fixed for the column according
  // to the values set in 'ValueCols'.
  list<string> ColFields = [];

  // Values for the fields/attributes listed in 'ColFields'.
  // Ex: let KeyCol = 'nopred' -- It means that the key instruction (instruction
  // that models this relation) should be non-predicated.
  // In the example above, 'Add' is the key instruction.
  list<string> KeyCol = [];

  // List of values for the fields/attributes listed in 'ColFields', one for
  // each column in the relation table.
  //
  // Ex: let ValueCols = [['true'],['false']] -- It adds two columns in the
  // table. First column requires all the instructions to have predSense
  // set to 'true' and second column requires it to be 'false'.
  list<list<string> > ValueCols = [];
}

//===----------------------------------------------------------------------===//
// Pull in the common support for calling conventions.
//
include "llvm/Target/TargetCallingConv.td"

//===----------------------------------------------------------------------===//
// Pull in the common support for DAG isel generation.
//
include "llvm/Target/TargetSelectionDAG.td"

//===----------------------------------------------------------------------===//
// Pull in the common support for Global ISel register bank info generation.
//
include "llvm/Target/GlobalISel/RegisterBank.td"

//===----------------------------------------------------------------------===//
// Pull in the common support for DAG isel generation.
//
include "llvm/Target/GlobalISel/Target.td"

//===----------------------------------------------------------------------===//
// Pull in the common support for the Global ISel DAG-based selector generation.
//
include "llvm/Target/GlobalISel/SelectionDAGCompat.td"

//===----------------------------------------------------------------------===//
// Pull in the common support for Pfm Counters generation.
//
include "llvm/Target/TargetPfmCounters.td"
PKiwFZYdn??Target/TargetInstrPredicate.tdnu�[���//===- TargetInstrPredicate.td - ---------------------------*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines class MCInstPredicate and its subclasses.
//
// MCInstPredicate definitions are used by target scheduling models to describe
// constraints on instructions.
//
// Here is an example of an MCInstPredicate definition in TableGen:
//
// def MCInstPredicateExample : CheckAll<[
//    CheckOpcode<[BLR]>,
//    CheckIsRegOperand<0>,
//    CheckNot<CheckRegOperand<0, LR>>]>;
//
// The syntax for MCInstPredicate is declarative, and predicate definitions can
// be composed together in order to generate more complex constraints.
//
// The `CheckAll` from the example defines a composition of three different
// predicates.  Definition `MCInstPredicateExample` identifies instructions
// whose opcode is BLR, and whose first operand is a register different from
// register `LR`.
//
// Every MCInstPredicate class has a well-known semantic in tablegen. For
// example, `CheckOpcode` is a special type of predicate used to describe a
// constraint on the value of an instruction opcode.
//
// MCInstPredicate definitions are typically used by scheduling models to
// construct MCSchedPredicate definitions (see the definition of class
// MCSchedPredicate in llvm/Target/TargetSchedule.td).
// In particular, an MCSchedPredicate can be used instead of a SchedPredicate
// when defining the set of SchedReadVariant and SchedWriteVariant of a
// processor scheduling model.
//
// The `MCInstPredicateExample` definition above is equivalent (and therefore
// could replace) the following definition from a previous ExynosM3 model (see
// AArch64SchedExynosM3.td):
//
// def M3BranchLinkFastPred  : SchedPredicate<[{
//    MI->getOpcode() == AArch64::BLR &&
//    MI->getOperand(0).isReg() &&
//    MI->getOperand(0).getReg() != AArch64::LR}]>;
//
// The main advantage of using MCInstPredicate instead of SchedPredicate is
// portability: users don't need to specify predicates in C++. As a consequence
// of this, MCInstPredicate definitions are not bound to a particular
// representation (i.e. MachineInstr vs MCInst).
//
// Tablegen backends know how to expand MCInstPredicate definitions into actual
// C++ code that works on MachineInstr (and/or MCInst).
//
// Instances of class PredicateExpander (see utils/Tablegen/PredicateExpander.h)
// know how to expand a predicate. For each MCInstPredicate class, there must be
// an "expand" method available in the PredicateExpander interface.
//
// For example, a `CheckOpcode` predicate is expanded using method
// `PredicateExpander::expandCheckOpcode()`.
//
// New MCInstPredicate classes must be added to this file. For each new class
// XYZ, an "expandXYZ" method must be added to the PredicateExpander.
//
//===----------------------------------------------------------------------===//

// Forward declarations.
class Instruction;
class SchedMachineModel;

// A generic machine instruction predicate.
class MCInstPredicate;

class MCTrue  : MCInstPredicate;   // A predicate that always evaluates to True.
class MCFalse : MCInstPredicate;   // A predicate that always evaluates to False.
def TruePred  : MCTrue;
def FalsePred : MCFalse;

// A predicate used to negate the outcome of another predicate.
// It allows to easily express "set difference" operations. For example, it
// makes it easy to describe a check that tests if an opcode is not part of a
// set of opcodes.
class CheckNot<MCInstPredicate P> : MCInstPredicate {
  MCInstPredicate Pred = P;
}

// This class is used as a building block to define predicates on instruction
// operands. It is used to reference a specific machine operand.
class MCOperandPredicate<int Index> : MCInstPredicate {
  int OpIndex = Index;
}

// Return true if machine operand at position `Index` is a register operand.
class CheckIsRegOperand<int Index> : MCOperandPredicate<Index>;

// Return true if machine operand at position `Index` is an immediate operand.
class CheckIsImmOperand<int Index> : MCOperandPredicate<Index>;

// Check if machine operands at index `First` and index `Second` both reference
// the same register.
class CheckSameRegOperand<int First, int Second> : MCInstPredicate {
  int FirstIndex = First;
  int SecondIndex = Second;
}

// Base class for checks on register/immediate operands.
// It allows users to define checks like:
//    MyFunction(MI->getOperand(Index).getImm()) == Val;
//
// In the example above, `MyFunction` is a function that takes as input an
// immediate operand value, and returns another value. Field `FunctionMapper` is
// the name of the function to call on the operand value.
class CheckOperandBase<int Index, string Fn = ""> : MCOperandPredicate<Index> {
  string FunctionMapper = Fn;
}

// Check that the machine register operand at position `Index` references
// register R. This predicate assumes that we already checked that the machine
// operand at position `Index` is a register operand.
class CheckRegOperand<int Index, Register R> : CheckOperandBase<Index> {
  Register Reg = R;
}

// Check if register operand at index `Index` is the invalid register.
class CheckInvalidRegOperand<int Index> : CheckOperandBase<Index>;

// Return true if machine operand at position `Index` is a valid
// register operand.
class CheckValidRegOperand<int Index> :
  CheckNot<CheckInvalidRegOperand<Index>>;

// Check that the operand at position `Index` is immediate `Imm`.
// If field `FunctionMapper` is a non-empty string, then function
// `FunctionMapper` is applied to the operand value, and the return value is then
// compared against `Imm`.
class CheckImmOperand<int Index, int Imm> : CheckOperandBase<Index> {
  int ImmVal = Imm;
}

// Similar to CheckImmOperand, however the immediate is not a literal number.
// This is useful when we want to compare the value of an operand against an
// enum value, and we know the actual integer value of that enum.
class CheckImmOperand_s<int Index, string Value> : CheckOperandBase<Index> {
  string ImmVal = Value;
}

// Expands to a call to `FunctionMapper` if field `FunctionMapper` is set.
// Otherwise, it expands to a CheckNot<CheckInvalidRegOperand<Index>>.
class CheckRegOperandSimple<int Index> : CheckOperandBase<Index>;

// Expands to a call to `FunctionMapper` if field `FunctionMapper` is set.
// Otherwise, it simply evaluates to TruePred.
class CheckImmOperandSimple<int Index> : CheckOperandBase<Index>;

// Check that the operand at position `Index` is immediate value zero.
class CheckZeroOperand<int Index> : CheckImmOperand<Index, 0>;

// Check that the instruction has exactly `Num` operands.
class CheckNumOperands<int Num> : MCInstPredicate {
  int NumOps = Num;
}

// Check that the instruction opcode is one of the opcodes in set `Opcodes`.
// This is a simple set membership query. The easier way to check if an opcode
// is not a member of the set is by using a `CheckNot<CheckOpcode<[...]>>`
// sequence.
class CheckOpcode<list<Instruction> Opcodes> : MCInstPredicate {
  list<Instruction> ValidOpcodes = Opcodes;
}

// Check that the instruction opcode is a pseudo opcode member of the set
// `Opcodes`.  This check is always expanded to "false" if we are generating
// code for MCInst.
class CheckPseudo<list<Instruction> Opcodes> : CheckOpcode<Opcodes>;

// A non-portable predicate. Only to use as a last resort when a block of code
// cannot possibly be converted in a declarative way using other MCInstPredicate
// classes. This check is always expanded to "false" when generating code for
// MCInst.
class CheckNonPortable<string Code> : MCInstPredicate {
  string CodeBlock = Code;
}

// A sequence of predicates. It is used as the base class for CheckAll, and
// CheckAny. It allows to describe compositions of predicates.
class CheckPredicateSequence<list<MCInstPredicate> Preds> : MCInstPredicate {
  list<MCInstPredicate> Predicates = Preds;
}

// Check that all of the predicates in `Preds` evaluate to true.
class CheckAll<list<MCInstPredicate> Sequence>
    : CheckPredicateSequence<Sequence>;

// Check that at least one of the predicates in `Preds` evaluates to true.
class CheckAny<list<MCInstPredicate> Sequence>
    : CheckPredicateSequence<Sequence>;


// Used to expand the body of a function predicate. See the definition of
// TIIPredicate below.
class MCStatement;

// Expands to a return statement. The return expression is a boolean expression
// described by a MCInstPredicate.
class MCReturnStatement<MCInstPredicate predicate> : MCStatement {
  MCInstPredicate Pred = predicate;
}

// Used to automatically construct cases of a switch statement where the switch
// variable is an instruction opcode. There is a 'case' for every opcode in the
// `opcodes` list, and each case is associated with MCStatement `caseStmt`.
class MCOpcodeSwitchCase<list<Instruction> opcodes, MCStatement caseStmt> {
  list<Instruction> Opcodes = opcodes;
  MCStatement CaseStmt = caseStmt;
}

// Expands to a switch statement. The switch variable is an instruction opcode.
// The auto-generated switch is populated by a number of cases based on the
// `cases` list in input. A default case is automatically generated, and it
// evaluates to `default`.
class MCOpcodeSwitchStatement<list<MCOpcodeSwitchCase> cases,
                              MCStatement default> : MCStatement {
  list<MCOpcodeSwitchCase> Cases = cases;
  MCStatement DefaultCase = default;
}

// Base class for function predicates.
class FunctionPredicateBase<string name, MCStatement body> {
  string FunctionName = name;
  MCStatement Body = body;
}

// Check that a call to method `Name` in class "XXXInstrInfo" (where XXX is
// the name of a target) returns true.
//
// TIIPredicate definitions are used to model calls to the target-specific
// InstrInfo. A TIIPredicate is treated specially by the InstrInfoEmitter
// tablegen backend, which will use it to automatically generate a definition in
// the target specific `InstrInfo` class.
//
// There cannot be multiple TIIPredicate definitions with the same name for the
// same target.
class TIIPredicate<string Name, MCStatement body>
    : FunctionPredicateBase<Name, body>, MCInstPredicate;

// A function predicate that takes as input a machine instruction, and returns
// a boolean value.
//
// This predicate is expanded into a function call by the PredicateExpander.
// In particular, the PredicateExpander would either expand this predicate into
// a call to `MCInstFn`, or into a call to`MachineInstrFn` depending on whether
// it is lowering predicates for MCInst or MachineInstr.
//
// In this context, `MCInstFn` and `MachineInstrFn` are both function names.
class CheckFunctionPredicate<string MCInstFn, string MachineInstrFn> : MCInstPredicate {
  string MCInstFnName = MCInstFn;
  string MachineInstrFnName = MachineInstrFn;
}

// Similar to CheckFunctionPredicate. However it assumes that MachineInstrFn is
// a method in TargetInstrInfo, and MCInstrFn takes an extra pointer to
// MCInstrInfo.
//
// It Expands to:
//  - TIIPointer->MachineInstrFn(MI)
//  - MCInstrFn(MI, MCII);
class CheckFunctionPredicateWithTII<string MCInstFn, string MachineInstrFn, string
TIIPointer = "TII"> : MCInstPredicate {
  string MCInstFnName = MCInstFn;
  string TIIPtrName = TIIPointer;
  string MachineInstrFnName = MachineInstrFn;
}

// Used to classify machine instructions based on a machine instruction
// predicate.
//
// Let IC be an InstructionEquivalenceClass definition, and MI a machine
// instruction.  We say that MI belongs to the equivalence class described by IC
// if and only if the following two conditions are met:
//  a) MI's opcode is in the `opcodes` set, and
//  b) `Predicate` evaluates to true when applied to MI.
//
// Instances of this class can be used by processor scheduling models to
// describe instructions that have a property in common.  For example,
// InstructionEquivalenceClass definitions can be used to identify the set of
// dependency breaking instructions for a processor model.
//
// An (optional) list of operand indices can be used to further describe
// properties that apply to instruction operands. For example, it can be used to
// identify register uses of a dependency breaking instructions that are not in
// a RAW dependency.
class InstructionEquivalenceClass<list<Instruction> opcodes,
                                  MCInstPredicate pred,
                                  list<int> operands = []> {
  list<Instruction> Opcodes = opcodes;
  MCInstPredicate Predicate = pred;
  list<int> OperandIndices = operands;
}

// Used by processor models to describe dependency breaking instructions.
//
// This is mainly an alias for InstructionEquivalenceClass.  Input operand
// `BrokenDeps` identifies the set of "broken dependencies". There is one bit
// per each implicit and explicit input operand.  An empty set of broken
// dependencies means: "explicit input register operands are independent."
class DepBreakingClass<list<Instruction> opcodes, MCInstPredicate pred,
                       list<int> BrokenDeps = []>
    : InstructionEquivalenceClass<opcodes, pred, BrokenDeps>;

// A function descriptor used to describe the signature of a predicate methods
// which will be expanded by the STIPredicateExpander into a tablegen'd
// XXXGenSubtargetInfo class member definition (here, XXX is a target name).
//
// It describes the signature of a TargetSubtarget hook, as well as a few extra
// properties. Examples of extra properties are:
//  - The default return value for the auto-generate function hook.
//  - A list of subtarget hooks (Delegates) that are called from this function.
//
class STIPredicateDecl<string name, MCInstPredicate default = FalsePred,
                       bit overrides = true, bit expandForMC = true,
                       bit updatesOpcodeMask = false,
                       list<STIPredicateDecl> delegates = []> {
  string Name = name;

  MCInstPredicate DefaultReturnValue = default;

  // True if this method is declared as virtual in class TargetSubtargetInfo.
  bit OverridesBaseClassMember = overrides;

  // True if we need an equivalent predicate function in the MC layer.
  bit ExpandForMC = expandForMC;

  // True if the autogenerated method has a extra in/out APInt param used as a
  // mask of operands.
  bit UpdatesOpcodeMask = updatesOpcodeMask;

  // A list of STIPredicates used by this definition to delegate part of the
  // computation. For example, STIPredicateFunction `isDependencyBreaking()`
  // delegates to `isZeroIdiom()` part of its computation.
  list<STIPredicateDecl> Delegates = delegates;
}

// A predicate function definition member of class `XXXGenSubtargetInfo`.
//
// If `Declaration.ExpandForMC` is true, then SubtargetEmitter
// will also expand another definition of this method that accepts a MCInst.
class STIPredicate<STIPredicateDecl declaration,
                   list<InstructionEquivalenceClass> classes> {
  STIPredicateDecl Declaration = declaration;
  list<InstructionEquivalenceClass> Classes = classes;
  SchedMachineModel SchedModel = ?;
}

// Convenience classes and definitions used by processor scheduling models to
// describe dependency breaking instructions and move elimination candidates.
let UpdatesOpcodeMask = true in {

def IsZeroIdiomDecl : STIPredicateDecl<"isZeroIdiom">;

let Delegates = [IsZeroIdiomDecl] in
def IsDepBreakingDecl : STIPredicateDecl<"isDependencyBreaking">;

} // UpdatesOpcodeMask

def IsOptimizableRegisterMoveDecl
    : STIPredicateDecl<"isOptimizableRegisterMove">;

class IsZeroIdiomFunction<list<DepBreakingClass> classes>
    : STIPredicate<IsZeroIdiomDecl, classes>;

class IsDepBreakingFunction<list<DepBreakingClass> classes>
    : STIPredicate<IsDepBreakingDecl, classes>;

class IsOptimizableRegisterMove<list<InstructionEquivalenceClass> classes>
    : STIPredicate<IsOptimizableRegisterMoveDecl, classes>;
PKiwFZ26Target/CGPassBuilderOption.hnu�[���//===- CGPassBuilderOption.h - Options for pass builder ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the CCState and CCValAssign classes, used for lowering
// and implementing calling conventions.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TARGET_CGPASSBUILDEROPTION_H
#define LLVM_TARGET_CGPASSBUILDEROPTION_H

#include "llvm/ADT/StringRef.h"
#include "llvm/Target/TargetOptions.h"
#include <optional>

namespace llvm {

enum class RunOutliner { TargetDefault, AlwaysOutline, NeverOutline };
enum class RegAllocType { Default, Basic, Fast, Greedy, PBQP };

// Not one-on-one but mostly corresponding to commandline options in
// TargetPassConfig.cpp.
struct CGPassBuilderOption {
  std::optional<bool> OptimizeRegAlloc;
  std::optional<bool> EnableIPRA;
  bool DebugPM = false;
  bool DisableVerify = false;
  bool EnableImplicitNullChecks = false;
  bool EnableBlockPlacementStats = false;
  bool MISchedPostRA = false;
  bool EarlyLiveIntervals = false;

  bool DisableLSR = false;
  bool DisableCGP = false;
  bool PrintLSR = false;
  bool DisableMergeICmps = false;
  bool DisablePartialLibcallInlining = false;
  bool DisableConstantHoisting = false;
  bool DisableSelectOptimize = true;
  bool PrintISelInput = false;
  bool PrintGCInfo = false;
  bool RequiresCodeGenSCCOrder = false;

  RunOutliner EnableMachineOutliner = RunOutliner::TargetDefault;
  RegAllocType RegAlloc = RegAllocType::Default;
  std::optional<GlobalISelAbortMode> EnableGlobalISelAbort;

  std::optional<bool> VerifyMachineCode;
  std::optional<bool> EnableFastISelOption;
  std::optional<bool> EnableGlobalISelOption;
};

CGPassBuilderOption getCGPassBuilderOption();

} // namespace llvm

#endif // LLVM_TARGET_CGPASSBUILDEROPTION_H
PKiwFZ��bfbfTarget/TargetSchedule.tdnu�[���//===- TargetSchedule.td - Target Independent Scheduling ---*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the target-independent scheduling interfaces which should
// be implemented by each target which is using TableGen based scheduling.
//
// The SchedMachineModel is defined by subtargets for three categories of data:
// 1. Basic properties for coarse grained instruction cost model.
// 2. Scheduler Read/Write resources for simple per-opcode cost model.
// 3. Instruction itineraries for detailed reservation tables.
//
// (1) Basic properties are defined by the SchedMachineModel
// class. Target hooks allow subtargets to associate opcodes with
// those properties.
//
// (2) A per-operand machine model can be implemented in any
// combination of the following ways:
//
// A. Associate per-operand SchedReadWrite types with Instructions by
// modifying the Instruction definition to inherit from Sched. For
// each subtarget, define WriteRes and ReadAdvance to associate
// processor resources and latency with each SchedReadWrite type.
//
// B. In each instruction definition, name an ItineraryClass. For each
// subtarget, define ItinRW entries to map ItineraryClass to
// per-operand SchedReadWrite types. Unlike method A, these types may
// be subtarget specific and can be directly associated with resources
// by defining SchedWriteRes and SchedReadAdvance.
//
// C. In the subtarget, map SchedReadWrite types to specific
// opcodes. This overrides any SchedReadWrite types or
// ItineraryClasses defined by the Instruction. As in method B, the
// subtarget can directly associate resources with SchedReadWrite
// types by defining SchedWriteRes and SchedReadAdvance.
//
// D. In either the target or subtarget, define SchedWriteVariant or
// SchedReadVariant to map one SchedReadWrite type onto another
// sequence of SchedReadWrite types. This allows dynamic selection of
// an instruction's machine model via custom C++ code. It also allows
// a machine-independent SchedReadWrite type to map to a sequence of
// machine-dependent types.
//
// (3) A per-pipeline-stage machine model can be implemented by providing
// Itineraries in addition to mapping instructions to ItineraryClasses.
//===----------------------------------------------------------------------===//

// Include legacy support for instruction itineraries.
include "llvm/Target/TargetItinerary.td"

class Predicate; // Forward def

// DAG operator that interprets the DAG args as Instruction defs.
def instrs;

// DAG operator that interprets each DAG arg as a regex pattern for
// matching Instruction opcode names.
// The regex must match the beginning of the opcode (as in Python re.match).
// To avoid matching prefixes, append '$' to the pattern.
def instregex;

// Define the SchedMachineModel and provide basic properties for
// coarse grained instruction cost model. Default values for the
// properties are defined in MCSchedModel. A value of "-1" in the
// target description's SchedMachineModel indicates that the property
// is not overriden by the target.
//
// Target hooks allow subtargets to associate LoadLatency and
// HighLatency with groups of opcodes.
//
// See MCSchedule.h for detailed comments.
class SchedMachineModel {
  int IssueWidth = -1; // Max micro-ops that may be scheduled per cycle.
  int MicroOpBufferSize = -1; // Max micro-ops that can be buffered.
  int LoopMicroOpBufferSize = -1; // Max micro-ops that can be buffered for
                                  // optimized loop dispatch/execution.
  int LoadLatency = -1; // Cycles for loads to access the cache.
  int HighLatency = -1; // Approximation of cycles for "high latency" ops.
  int MispredictPenalty = -1; // Extra cycles for a mispredicted branch.

  // Per-cycle resources tables.
  ProcessorItineraries Itineraries = NoItineraries;

  bit PostRAScheduler = false; // Enable Post RegAlloc Scheduler pass.

  // Subtargets that define a model for only a subset of instructions
  // that have a scheduling class (itinerary class or SchedRW list)
  // and may actually be generated for that subtarget must clear this
  // bit. Otherwise, the scheduler considers an unmodelled opcode to
  // be an error. This should only be set during initial bringup,
  // or there will be no way to catch simple errors in the model
  // resulting from changes to the instruction definitions.
  bit CompleteModel = true;

  // Indicates that we should do full overlap checking for multiple InstrRWs
  // defining the same instructions within the same SchedMachineModel.
  // FIXME: Remove when all in tree targets are clean with the full check
  // enabled.
  bit FullInstRWOverlapCheck = true;

  // A processor may only implement part of published ISA, due to either new ISA
  // extensions, (e.g. Pentium 4 doesn't have AVX) or implementation
  // (ARM/MIPS/PowerPC/SPARC soft float cores).
  //
  // For a processor which doesn't support some feature(s), the schedule model
  // can use:
  //
  // let<Predicate> UnsupportedFeatures = [HaveA,..,HaveY];
  //
  // to skip the checks for scheduling information when building LLVM for
  // instructions which have any of the listed predicates in their Predicates
  // field.
  list<Predicate> UnsupportedFeatures = [];

  bit NoModel = false; // Special tag to indicate missing machine model.

  // Tells the MachineScheduler whether or not to track resource usage
  // using intervals via ResourceSegments (see
  // llvm/include/llvm/CodeGen/MachineScheduler.h).
  bit EnableIntervals = false;
}

def NoSchedModel : SchedMachineModel {
  let NoModel = true;
  let CompleteModel = false;
}

// Define a kind of processor resource that may be common across
// similar subtargets.
class ProcResourceKind;

// Define a number of interchangeable processor resources. NumUnits
// determines the throughput of instructions that require the resource.
//
// An optional Super resource may be given to model these resources as
// a subset of the more general super resources. Using one of these
// resources implies using one of the super resources.
//
// ProcResourceUnits normally model a few buffered resources within an
// out-of-order engine. Buffered resources may be held for multiple
// clock cycles, but the scheduler does not pin them to a particular
// clock cycle relative to instruction dispatch. Setting BufferSize=0
// changes this to an in-order issue/dispatch resource. In this case,
// the scheduler counts down from the cycle that the instruction
// issues in-order, forcing a stall whenever a subsequent instruction
// requires the same resource until the number of ResourceCycles
// specified in WriteRes expire. Setting BufferSize=1 changes this to
// an in-order latency resource. In this case, the scheduler models
// producer/consumer stalls between instructions that use the
// resource.
//
// Examples (all assume an out-of-order engine):
//
// Use BufferSize = -1 for "issue ports" fed by a unified reservation
// station. Here the size of the reservation station is modeled by
// MicroOpBufferSize, which should be the minimum size of either the
// register rename pool, unified reservation station, or reorder
// buffer.
//
// Use BufferSize = 0 for resources that force "dispatch/issue
// groups". (Different processors define dispath/issue
// differently. Here we refer to stage between decoding into micro-ops
// and moving them into a reservation station.) Normally NumMicroOps
// is sufficient to limit dispatch/issue groups. However, some
// processors can form groups of with only certain combinations of
// instruction types. e.g. POWER7.
//
// Use BufferSize = 1 for in-order execution units. This is used for
// an in-order pipeline within an out-of-order core where scheduling
// dependent operations back-to-back is guaranteed to cause a
// bubble. e.g. Cortex-a9 floating-point.
//
// Use BufferSize > 1 for out-of-order executions units with a
// separate reservation station. This simply models the size of the
// reservation station.
//
// To model both dispatch/issue groups and in-order execution units,
// create two types of units, one with BufferSize=0 and one with
// BufferSize=1.
//
// SchedModel ties these units to a processor for any stand-alone defs
// of this class.
class ProcResourceUnits<ProcResourceKind kind, int num> {
  ProcResourceKind Kind = kind;
  int NumUnits = num;
  ProcResourceKind Super = ?;
  int BufferSize = -1;
  SchedMachineModel SchedModel = ?;
}

// EponymousProcResourceKind helps implement ProcResourceUnits by
// allowing a ProcResourceUnits definition to reference itself. It
// should not be referenced anywhere else.
def EponymousProcResourceKind : ProcResourceKind;

// Subtargets typically define processor resource kind and number of
// units in one place.
class ProcResource<int num> : ProcResourceKind,
  ProcResourceUnits<EponymousProcResourceKind, num>;

class ProcResGroup<list<ProcResource> resources> : ProcResourceKind {
  list<ProcResource> Resources = resources;
  SchedMachineModel SchedModel = ?;
  int BufferSize = -1;
}

// A target architecture may define SchedReadWrite types and associate
// them with instruction operands.
class SchedReadWrite;

// List the per-operand types that map to the machine model of an
// instruction. One SchedWrite type must be listed for each explicit
// def operand in order. Additional SchedWrite types may optionally be
// listed for implicit def operands.  SchedRead types may optionally
// be listed for use operands in order. The order of defs relative to
// uses is insignificant. This way, the same SchedReadWrite list may
// be used for multiple forms of an operation. For example, a
// two-address instruction could have two tied operands or single
// operand that both reads and writes a reg. In both cases we have a
// single SchedWrite and single SchedRead in any order.
class Sched<list<SchedReadWrite> schedrw> {
  list<SchedReadWrite> SchedRW = schedrw;
}

// Define a scheduler resource associated with a def operand.
class SchedWrite : SchedReadWrite;
def NoWrite : SchedWrite;

// Define a scheduler resource associated with a use operand.
class SchedRead  : SchedReadWrite;

// Define a SchedWrite that is modeled as a sequence of other
// SchedWrites with additive latency. This allows a single operand to
// be mapped the resources composed from a set of previously defined
// SchedWrites.
//
// If the final write in this sequence is a SchedWriteVariant marked
// Variadic, then the list of prior writes are distributed across all
// operands after resolving the predicate for the final write.
//
// SchedModel silences warnings but is ignored.
class WriteSequence<list<SchedWrite> writes, int rep = 1> : SchedWrite {
  list<SchedWrite> Writes = writes;
  int Repeat = rep;
  SchedMachineModel SchedModel = ?;
}

// Define values common to WriteRes and SchedWriteRes.
//
// SchedModel ties these resources to a processor.
class ProcWriteResources<list<ProcResourceKind> resources> {
  list<ProcResourceKind> ProcResources = resources;
  list<int> ResourceCycles = [];
  list<int> StartAtCycles = [];
  int Latency = 1;
  int NumMicroOps = 1;
  bit BeginGroup = false;
  bit EndGroup = false;
  // Allow a processor to mark some scheduling classes as unsupported
  // for stronger verification.
  bit Unsupported = false;
  // Allow a processor to mark some scheduling classes as single-issue.
  // SingleIssue is an alias for Begin/End Group.
  bit SingleIssue = false;
  // An instruction is allowed to retire out-of-order if RetireOOO is
  // true for at least one of its writes. This field is only used by
  // MCA for in-order subtargets, and is ignored for other targets.
  bit RetireOOO = false;
  SchedMachineModel SchedModel = ?;
}

// Define the resources and latency of a SchedWrite. This will be used
// directly by targets that have no itinerary classes. In this case,
// SchedWrite is defined by the target, while WriteResources is
// defined by the subtarget, and maps the SchedWrite to processor
// resources.
//
// If a target already has itinerary classes, SchedWriteResources can
// be used instead to define subtarget specific SchedWrites and map
// them to processor resources in one place. Then ItinRW can map
// itinerary classes to the subtarget's SchedWrites.
//
// ProcResources indicates the set of resources consumed by the write.
// Optionally, ResourceCycles indicates the number of cycles the
// resource is consumed. Each ResourceCycles item is paired with the
// ProcResource item at the same position in its list. ResourceCycles
// can be `[]`: in that case, all resources are consumed for a single
// cycle, regardless of latency, which models a fully pipelined processing
// unit. A value of 0 for ResourceCycles means that the resource must
// be available but is not consumed, which is only relevant for
// unbuffered resources.
//
// By default, each SchedWrite takes one micro-op, which is counted
// against the processor's IssueWidth limit. If an instruction can
// write multiple registers with a single micro-op, the subtarget
// should define one of the writes to be zero micro-ops. If a
// subtarget requires multiple micro-ops to write a single result, it
// should either override the write's NumMicroOps to be greater than 1
// or require additional writes. Extra writes can be required either
// by defining a WriteSequence, or simply listing extra writes in the
// instruction's list of writers beyond the number of "def"
// operands. The scheduler assumes that all micro-ops must be
// dispatched in the same cycle. These micro-ops may be required to
// begin or end the current dispatch group.
class WriteRes<SchedWrite write, list<ProcResourceKind> resources>
  : ProcWriteResources<resources> {
  SchedWrite WriteType = write;
}

// Directly name a set of WriteResources defining a new SchedWrite
// type at the same time. This class is unaware of its SchedModel so
// must be referenced by InstRW or ItinRW.
class SchedWriteRes<list<ProcResourceKind> resources> : SchedWrite,
  ProcWriteResources<resources>;

// Define values common to ReadAdvance and SchedReadAdvance.
//
// SchedModel ties these resources to a processor.
class ProcReadAdvance<int cycles, list<SchedWrite> writes = []> {
  int Cycles = cycles;
  list<SchedWrite> ValidWrites = writes;
  // Allow a processor to mark some scheduling classes as unsupported
  // for stronger verification.
  bit Unsupported = false;
  SchedMachineModel SchedModel = ?;
}

// A processor may define a ReadAdvance associated with a SchedRead
// to reduce latency of a prior write by N cycles. A negative advance
// effectively increases latency, which may be used for cross-domain
// stalls.
//
// A ReadAdvance may be associated with a list of SchedWrites
// to implement pipeline bypass. The Writes list may be empty to
// indicate operands that are always read this number of Cycles later
// than a normal register read, allowing the read's parent instruction
// to issue earlier relative to the writer.
class ReadAdvance<SchedRead read, int cycles, list<SchedWrite> writes = []>
  : ProcReadAdvance<cycles, writes> {
  SchedRead ReadType = read;
}

// Directly associate a new SchedRead type with a delay and optional
// pipeline bypass. For use with InstRW or ItinRW.
class SchedReadAdvance<int cycles, list<SchedWrite> writes = []> : SchedRead,
  ProcReadAdvance<cycles, writes>;

// Define SchedRead defaults. Reads seldom need special treatment.
def ReadDefault : SchedRead;
def NoReadAdvance : SchedReadAdvance<0>;

// Define shared code that will be in the same scope as all
// SchedPredicates. Available variables are:
// (const MachineInstr *MI, const TargetSchedModel *SchedModel)
class PredicateProlog<code c> {
  code Code = c;
}

// Base class for scheduling predicates.
class SchedPredicateBase;

// A scheduling predicate whose logic is defined by a MCInstPredicate.
// This can directly be used by SchedWriteVariant definitions.
class MCSchedPredicate<MCInstPredicate P> : SchedPredicateBase {
  MCInstPredicate Pred = P;
  SchedMachineModel SchedModel = ?;
}

// Define a predicate to determine which SchedVariant applies to a
// particular MachineInstr. The code snippet is used as an
// if-statement's expression. Available variables are MI, SchedModel,
// and anything defined in a PredicateProlog.
//
// SchedModel silences warnings but is ignored.
class SchedPredicate<code pred> : SchedPredicateBase {
  SchedMachineModel SchedModel = ?;
  code Predicate = pred;
}

// Define a predicate to be typically used as the default case in a
// SchedVariant.  It the SchedVariant does not use any other predicate based on
// MCSchedPredicate, this is the default scheduling case used by llvm-mca.
def NoSchedPred : MCSchedPredicate<TruePred>;

// Associate a predicate with a list of SchedReadWrites. By default,
// the selected SchedReadWrites are still associated with a single
// operand and assumed to execute sequentially with additive
// latency. However, if the parent SchedWriteVariant or
// SchedReadVariant is marked "Variadic", then each Selected
// SchedReadWrite is mapped in place to the instruction's variadic
// operands. In this case, latency is not additive. If the current Variant
// is already part of a Sequence, then that entire chain leading up to
// the Variant is distributed over the variadic operands.
class SchedVar<SchedPredicateBase pred, list<SchedReadWrite> selected> {
  SchedPredicateBase Predicate = pred;
  list<SchedReadWrite> Selected = selected;
}

// SchedModel silences warnings but is ignored.
class SchedVariant<list<SchedVar> variants> {
  list<SchedVar> Variants = variants;
  bit Variadic = false;
  SchedMachineModel SchedModel = ?;
}

// A SchedWriteVariant is a single SchedWrite type that maps to a list
// of SchedWrite types under the conditions defined by its predicates.
//
// A Variadic write is expanded to cover multiple "def" operands. The
// SchedVariant's Expansion list is then interpreted as one write
// per-operand instead of the usual sequential writes feeding a single
// operand.
class SchedWriteVariant<list<SchedVar> variants> : SchedWrite,
  SchedVariant<variants> {
}

// A SchedReadVariant is a single SchedRead type that maps to a list
// of SchedRead types under the conditions defined by its predicates.
//
// A Variadic write is expanded to cover multiple "readsReg" operands as
// explained above.
class SchedReadVariant<list<SchedVar> variants> : SchedRead,
  SchedVariant<variants> {
}

// Map a set of opcodes to a list of SchedReadWrite types. This allows
// the subtarget to easily override specific operations.
//
// SchedModel ties this opcode mapping to a processor.
class InstRW<list<SchedReadWrite> rw, dag instrlist> {
  list<SchedReadWrite> OperandReadWrites = rw;
  dag Instrs = instrlist;
  SchedMachineModel SchedModel = ?;
  // Allow a subtarget to mark some instructions as unsupported.
  bit Unsupported = false;
}

// Map a set of itinerary classes to SchedReadWrite resources. This is
// used to bootstrap a target (e.g. ARM) when itineraries already
// exist and changing InstrInfo is undesirable.
//
// SchedModel ties this ItineraryClass mapping to a processor.
class ItinRW<list<SchedReadWrite> rw, list<InstrItinClass> iic> {
  list<InstrItinClass> MatchedItinClasses = iic;
  list<SchedReadWrite> OperandReadWrites = rw;
  SchedMachineModel SchedModel = ?;
}

// Alias a target-defined SchedReadWrite to a processor specific
// SchedReadWrite. This allows a subtarget to easily map a
// SchedReadWrite type onto a WriteSequence, SchedWriteVariant, or
// SchedReadVariant.
//
// SchedModel will usually be provided by surrounding let statement
// and ties this SchedAlias mapping to a processor.
class SchedAlias<SchedReadWrite match, SchedReadWrite alias> {
  SchedReadWrite MatchRW = match;
  SchedReadWrite AliasRW = alias;
  SchedMachineModel SchedModel = ?;
}

// Allow the definition of processor register files for register renaming
// purposes.
//
// Each processor register file declares:
//  - The set of registers that can be renamed.
//  - The number of physical registers which can be used for register renaming
//    purpose.
//  - The cost of a register rename.
//  - The set of registers that allow move elimination.
//  - The maximum number of moves that can be eliminated every cycle.
//  - Whether move elimination is limited to register moves whose input
//    is known to be zero.
//
// The cost of a rename is the number of physical registers allocated by the
// register alias table to map the new definition. By default, register can be
// renamed at the cost of a single physical register.  Note that register costs
// are defined at register class granularity (see field `Costs`).
//
// The set of registers that are subject to register renaming is declared using
// a list of register classes (see field `RegClasses`). An empty list of
// register classes means: all the logical registers defined by the target can
// be fully renamed.
//
// A register R can be renamed if its register class appears in the `RegClasses`
// set. When R is written, a new alias is allocated at the cost of one or more
// physical registers; as a result, false dependencies on R are removed.
//
// A sub-register V of register R is implicitly part of the same register file.
// However, V is only renamed if its register class is part of `RegClasses`.
// Otherwise, the processor keeps it (as well as any other different part
// of R) together with R, and a write of V always causes a compulsory read of R.
//
// This is what happens for example on AMD processors (at least from Bulldozer
// onwards), where AL and AH are not treated as independent from AX, and AX is
// not treated as independent from EAX. A write to AL has an implicity false
// dependency on the last write to EAX (or a portion of EAX).  As a consequence,
// a write to AL cannot go in parallel with a write to AH.
//
// There is no false dependency if the partial register write belongs to a
// register class that is in `RegClasses`.
// There is also no penalty for writes that "clear the content a super-register"
// (see MC/MCInstrAnalysis.h - method MCInstrAnalysis::clearsSuperRegisters()).
// On x86-64, 32-bit GPR writes implicitly zero the upper half of the underlying
// physical register, effectively removing any false dependencies with the
// previous register definition.
//
// TODO: This implementation assumes that there is no limit in the number of
// renames per cycle, which might not be true for all hardware or register
// classes. Also, there is no limit to how many times the same logical register
// can be renamed during the same cycle.
//
// TODO: we don't currently model merge penalties for the case where a write to
// a part of a register is followed by a read from a larger part of the same
// register. On some Intel chips, different parts of a GPR can be stored in
// different physical registers. However, there is a cost to pay for when the
// partial write is combined with the previous super-register definition.  We
// should add support for these cases, and correctly model merge problems with
// partial register accesses.
//
// Field MaxMovesEliminatedPerCycle specifies how many moves can be eliminated
// every cycle. A default value of zero for that field means: there is no limit
// to the number of moves that can be eliminated by this register file.
//
// An instruction MI is a candidate for move elimination if a call to
// method TargetSubtargetInfo::isOptimizableRegisterMove(MI) returns true (see
// llvm/CodeGen/TargetSubtargetInfo.h, and llvm/MC/MCInstrAnalysis.h).
//
// Subtargets can instantiate tablegen class IsOptimizableRegisterMove (see
// llvm/Target/TargetInstrPredicate.td) to customize the set of move elimination
// candidates. By default, no instruction is a valid move elimination candidate.
//
// A register move MI is eliminated only if:
//  - MI is a move elimination candidate.
//  - The destination register is from a register class that allows move
//    elimination (see field `AllowMoveElimination` below).
//  - Constraints on the move kind, and the maximum number of moves that can be
//    eliminated per cycle are all met.

class RegisterFile<int numPhysRegs, list<RegisterClass> Classes = [],
                   list<int> Costs = [], list<bit> AllowMoveElim = [],
                   int MaxMoveElimPerCy = 0, bit AllowZeroMoveElimOnly = false> {
  list<RegisterClass> RegClasses = Classes;
  list<int> RegCosts = Costs;
  list<bit> AllowMoveElimination = AllowMoveElim;
  int NumPhysRegs = numPhysRegs;
  int MaxMovesEliminatedPerCycle = MaxMoveElimPerCy;
  bit AllowZeroMoveEliminationOnly = AllowZeroMoveElimOnly;
  SchedMachineModel SchedModel = ?;
}

// Describe the retire control unit.
// A retire control unit specifies the size of the reorder buffer, as well as
// the maximum number of opcodes that can be retired every cycle.
// A value less-than-or-equal-to zero for field 'ReorderBufferSize' means: "the
// size is unknown". The idea is that external tools can fall-back to using
// field MicroOpBufferSize in SchedModel if the reorder buffer size is unknown.
// A zero or negative value for field 'MaxRetirePerCycle' means "no
// restrictions on the number of instructions retired per cycle".
// Models can optionally specify up to one instance of RetireControlUnit per
// scheduling model.
class RetireControlUnit<int bufferSize, int retirePerCycle> {
  int ReorderBufferSize = bufferSize;
  int MaxRetirePerCycle = retirePerCycle;
  SchedMachineModel SchedModel = ?;
}

// Base class for Load/StoreQueue.  It is used to identify processor resources
// which describe load/store queues in the LS unit.
class MemoryQueue<ProcResourceKind PR> {
  ProcResourceKind QueueDescriptor = PR;
  SchedMachineModel SchedModel = ?;
}

class LoadQueue<ProcResourceKind LDQueue> : MemoryQueue<LDQueue>;
class StoreQueue<ProcResourceKind STQueue> : MemoryQueue<STQueue>;
PKiwFZ}��s�	�	Target/TargetIntrinsicInfo.hnu�[���//===-- llvm/Target/TargetIntrinsicInfo.h - Instruction Info ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file describes the target intrinsic instructions to the code generator.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TARGET_TARGETINTRINSICINFO_H
#define LLVM_TARGET_TARGETINTRINSICINFO_H

#include "llvm/ADT/StringRef.h"
#include <string>

namespace llvm {

class Function;
class Module;
class Type;

//---------------------------------------------------------------------------
///
/// TargetIntrinsicInfo - Interface to description of machine instruction set
///
class TargetIntrinsicInfo {
  TargetIntrinsicInfo(const TargetIntrinsicInfo &) = delete;
  void operator=(const TargetIntrinsicInfo &) = delete;
public:
  TargetIntrinsicInfo();
  virtual ~TargetIntrinsicInfo();

  /// Return the name of a target intrinsic, e.g. "llvm.bfin.ssync".
  /// The Tys and numTys parameters are for intrinsics with overloaded types
  /// (e.g., those using iAny or fAny). For a declaration for an overloaded
  /// intrinsic, Tys should point to an array of numTys pointers to Type,
  /// and must provide exactly one type for each overloaded type in the
  /// intrinsic.
  virtual std::string getName(unsigned IID, Type **Tys = nullptr,
                              unsigned numTys = 0) const = 0;

  /// Look up target intrinsic by name. Return intrinsic ID or 0 for unknown
  /// names.
  virtual unsigned lookupName(const char *Name, unsigned Len) const =0;

  unsigned lookupName(StringRef Name) const {
    return lookupName(Name.data(), Name.size());
  }

  /// Return the target intrinsic ID of a function, or 0.
  virtual unsigned getIntrinsicID(const Function *F) const;

  /// Returns true if the intrinsic can be overloaded.
  virtual bool isOverloaded(unsigned IID) const = 0;

  /// Create or insert an LLVM Function declaration for an intrinsic,
  /// and return it. The Tys and numTys are for intrinsics with overloaded
  /// types. See above for more information.
  virtual Function *getDeclaration(Module *M, unsigned ID, Type **Tys = nullptr,
                                   unsigned numTys = 0) const = 0;
};

} // End llvm namespace

#endif
PKiwFZn�g��Target/GenericOpcodes.tdnu�[���//===-- GenericOpcodes.td - Opcodes used with GlobalISel ---*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the generic opcodes used with GlobalISel.
// After instruction selection, these opcodes should not appear.
//
//===----------------------------------------------------------------------===//

//------------------------------------------------------------------------------
// Unary ops.
//------------------------------------------------------------------------------

class GenericInstruction : StandardPseudoInstruction {
  let isPreISelOpcode = true;
}

// Provide a variant of an instruction with the same operands, but
// different instruction flags. This is intended to provide a
// convenient way to define strict floating point variants of ordinary
// floating point instructions.
class ConstrainedInstruction<GenericInstruction baseInst> :
  GenericInstruction {
  let OutOperandList = baseInst.OutOperandList;
  let InOperandList =  baseInst.InOperandList;
  let isCommutable = baseInst.isCommutable;

  // TODO: Do we need a better way to mark reads from FP mode than
  // hasSideEffects?
  let hasSideEffects = true;
  let mayRaiseFPException = true;
}

// Extend the underlying scalar type of an operation, leaving the high bits
// unspecified.
def G_ANYEXT : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type1:$src);
  let hasSideEffects = false;
}

// Sign extend the underlying scalar type of an operation, copying the sign bit
// into the newly-created space.
def G_SEXT : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type1:$src);
  let hasSideEffects = false;
}

// Sign extend the a value from an arbitrary bit position, copying the sign bit
// into all bits above it. This is equivalent to a shl + ashr pair with an
// appropriate shift amount. $sz is an immediate (MachineOperand::isImm()
// returns true) to allow targets to have some bitwidths legal and others
// lowered. This opcode is particularly useful if the target has sign-extension
// instructions that are cheaper than the constituent shifts as the optimizer is
// able to make decisions on whether it's better to hang on to the G_SEXT_INREG
// or to lower it and optimize the individual shifts.
def G_SEXT_INREG : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src, untyped_imm_0:$sz);
  let hasSideEffects = false;
}

// Zero extend the underlying scalar type of an operation, putting zero bits
// into the newly-created space.
def G_ZEXT : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type1:$src);
  let hasSideEffects = false;
}


// Truncate the underlying scalar type of an operation. This is equivalent to
// G_EXTRACT for scalar types, but acts elementwise on vectors.
def G_TRUNC : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type1:$src);
  let hasSideEffects = false;
}

def G_IMPLICIT_DEF : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins);
  let hasSideEffects = false;
}

def G_PHI : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins variable_ops);
  let hasSideEffects = false;
}

def G_FRAME_INDEX : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins unknown:$src2);
  let hasSideEffects = false;
}

def G_GLOBAL_VALUE : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins unknown:$src);
  let hasSideEffects = false;
}

def G_CONSTANT_POOL : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins unknown:$src);
  let hasSideEffects = false;
}

def G_INTTOPTR : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type1:$src);
  let hasSideEffects = false;
}

def G_PTRTOINT : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type1:$src);
  let hasSideEffects = false;
}

def G_BITCAST : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type1:$src);
  let hasSideEffects = false;
}

// Only supports scalar result types
def G_CONSTANT : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins unknown:$imm);
  let hasSideEffects = false;
}

// Only supports scalar result types
def G_FCONSTANT : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins unknown:$imm);
  let hasSideEffects = false;
}

def G_VASTART : GenericInstruction {
  let OutOperandList = (outs);
  let InOperandList = (ins type0:$list);
  let hasSideEffects = false;
  let mayStore = true;
}

def G_VAARG : GenericInstruction {
  let OutOperandList = (outs type0:$val);
  let InOperandList = (ins type1:$list, unknown:$align);
  let hasSideEffects = false;
  let mayLoad = true;
  let mayStore = true;
}

def G_CTLZ : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type1:$src);
  let hasSideEffects = false;
}

def G_CTLZ_ZERO_UNDEF : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type1:$src);
  let hasSideEffects = false;
}

def G_CTTZ : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type1:$src);
  let hasSideEffects = false;
}

def G_CTTZ_ZERO_UNDEF : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type1:$src);
  let hasSideEffects = false;
}

def G_CTPOP : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type1:$src);
  let hasSideEffects = false;
}

def G_BSWAP : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src);
  let hasSideEffects = false;
}

def G_BITREVERSE : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src);
  let hasSideEffects = false;
}

def G_ADDRSPACE_CAST : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type1:$src);
  let hasSideEffects = false;
}

def G_BLOCK_ADDR : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins unknown:$ba);
  let hasSideEffects = false;
}

def G_JUMP_TABLE : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins unknown:$jti);
  let hasSideEffects = false;
}

def G_DYN_STACKALLOC : GenericInstruction {
  let OutOperandList = (outs ptype0:$dst);
  let InOperandList = (ins type1:$size, i32imm:$align);
  let hasSideEffects = true;
}

def G_FREEZE : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src);
  let hasSideEffects = false;
}

def G_LROUND: GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type1:$src);
  let hasSideEffects = false;
}

def G_LLROUND: GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type1:$src);
  let hasSideEffects = false;
}

//------------------------------------------------------------------------------
// Binary ops.
//------------------------------------------------------------------------------

// Generic addition.
def G_ADD : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src1, type0:$src2);
  let hasSideEffects = false;
  let isCommutable = true;
}

// Generic subtraction.
def G_SUB : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src1, type0:$src2);
  let hasSideEffects = false;
  let isCommutable = false;
}

// Generic multiplication.
def G_MUL : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src1, type0:$src2);
  let hasSideEffects = false;
  let isCommutable = true;
}

// Generic signed division.
def G_SDIV : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src1, type0:$src2);
  let hasSideEffects = false;
  let isCommutable = false;
}

// Generic unsigned division.
def G_UDIV : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src1, type0:$src2);
  let hasSideEffects = false;
  let isCommutable = false;
}

// Generic signed remainder.
def G_SREM : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src1, type0:$src2);
  let hasSideEffects = false;
  let isCommutable = false;
}

// Generic unsigned remainder.
def G_UREM : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src1, type0:$src2);
  let hasSideEffects = false;
  let isCommutable = false;
}

// Generic signed division and remainder.
def G_SDIVREM : GenericInstruction {
  let OutOperandList = (outs type0:$div, type0:$rem);
  let InOperandList = (ins type0:$src1, type0:$src2);
  let hasSideEffects = false;
  let isCommutable = false;
}

// Generic unsigned division and remainder.
def G_UDIVREM : GenericInstruction {
  let OutOperandList = (outs type0:$div, type0:$rem);
  let InOperandList = (ins type0:$src1, type0:$src2);
  let hasSideEffects = false;
  let isCommutable = false;
}

// Generic bitwise and.
def G_AND : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src1, type0:$src2);
  let hasSideEffects = false;
  let isCommutable = true;
}

// Generic bitwise or.
def G_OR : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src1, type0:$src2);
  let hasSideEffects = false;
  let isCommutable = true;
}

// Generic bitwise xor.
def G_XOR : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src1, type0:$src2);
  let hasSideEffects = false;
  let isCommutable = true;
}

// Generic left-shift.
def G_SHL : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src1, type1:$src2);
  let hasSideEffects = false;
}

// Generic logical right-shift.
def G_LSHR : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src1, type1:$src2);
  let hasSideEffects = false;
}

// Generic arithmetic right-shift.
def G_ASHR : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src1, type1:$src2);
  let hasSideEffects = false;
}

/// Funnel 'double' shifts take 3 operands, 2 inputs and the shift amount.
/// fshl(X,Y,Z): (X << (Z % bitwidth)) | (Y >> (bitwidth - (Z % bitwidth)))
def G_FSHL : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src1, type0:$src2, type1:$src3);
  let hasSideEffects = false;
}

/// Funnel 'double' shifts take 3 operands, 2 inputs and the shift amount.
/// fshr(X,Y,Z): (X << (bitwidth - (Z % bitwidth))) | (Y >> (Z % bitwidth))
def G_FSHR : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src1, type0:$src2, type1:$src3);
  let hasSideEffects = false;
}

/// Rotate bits right.
def G_ROTR : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src1, type1:$src2);
  let hasSideEffects = false;
}

/// Rotate bits left.
def G_ROTL : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src1, type1:$src2);
  let hasSideEffects = false;
}

// Generic integer comparison.
def G_ICMP : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins unknown:$tst, type1:$src1, type1:$src2);
  let hasSideEffects = false;
}

// Generic floating-point comparison.
def G_FCMP : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins unknown:$tst, type1:$src1, type1:$src2);
  let hasSideEffects = false;
}

// Generic select
def G_SELECT : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type1:$tst, type0:$src1, type0:$src2);
  let hasSideEffects = false;
}

// Generic pointer offset.
def G_PTR_ADD : GenericInstruction {
  let OutOperandList = (outs ptype0:$dst);
  let InOperandList = (ins ptype0:$src1, type1:$src2);
  let hasSideEffects = false;
}

// Generic pointer mask. type1 should be an integer with the same
// bitwidth as the pointer type.
def G_PTRMASK : GenericInstruction {
  let OutOperandList = (outs ptype0:$dst);
  let InOperandList = (ins ptype0:$src, type1:$bits);
  let hasSideEffects = false;
}

// Generic signed integer minimum.
def G_SMIN : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src1, type0:$src2);
  let hasSideEffects = false;
  let isCommutable = true;
}

// Generic signed integer maximum.
def G_SMAX : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src1, type0:$src2);
  let hasSideEffects = false;
  let isCommutable = true;
}

// Generic unsigned integer minimum.
def G_UMIN : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src1, type0:$src2);
  let hasSideEffects = false;
  let isCommutable = true;
}

// Generic unsigned integer maximum.
def G_UMAX : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src1, type0:$src2);
  let hasSideEffects = false;
  let isCommutable = true;
}

// Generic integer absolute value.
def G_ABS : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src);
  let hasSideEffects = false;
}

//------------------------------------------------------------------------------
// Overflow ops
//------------------------------------------------------------------------------

// Generic unsigned addition producing a carry flag.
def G_UADDO : GenericInstruction {
  let OutOperandList = (outs type0:$dst, type1:$carry_out);
  let InOperandList = (ins type0:$src1, type0:$src2);
  let hasSideEffects = false;
  let isCommutable = true;
}

// Generic unsigned addition consuming and producing a carry flag.
def G_UADDE : GenericInstruction {
  let OutOperandList = (outs type0:$dst, type1:$carry_out);
  let InOperandList = (ins type0:$src1, type0:$src2, type1:$carry_in);
  let hasSideEffects = false;
}

// Generic signed addition producing a carry flag.
def G_SADDO : GenericInstruction {
  let OutOperandList = (outs type0:$dst, type1:$carry_out);
  let InOperandList = (ins type0:$src1, type0:$src2);
  let hasSideEffects = false;
  let isCommutable = true;
}

// Generic signed addition consuming and producing a carry flag.
def G_SADDE : GenericInstruction {
  let OutOperandList = (outs type0:$dst, type1:$carry_out);
  let InOperandList = (ins type0:$src1, type0:$src2, type1:$carry_in);
  let hasSideEffects = false;
}

// Generic unsigned subtraction producing a carry flag.
def G_USUBO : GenericInstruction {
  let OutOperandList = (outs type0:$dst, type1:$carry_out);
  let InOperandList = (ins type0:$src1, type0:$src2);
  let hasSideEffects = false;
}
// Generic unsigned subtraction consuming and producing a carry flag.
def G_USUBE : GenericInstruction {
  let OutOperandList = (outs type0:$dst, type1:$carry_out);
  let InOperandList = (ins type0:$src1, type0:$src2, type1:$carry_in);
  let hasSideEffects = false;
}

// Generic signed subtraction producing a carry flag.
def G_SSUBO : GenericInstruction {
  let OutOperandList = (outs type0:$dst, type1:$carry_out);
  let InOperandList = (ins type0:$src1, type0:$src2);
  let hasSideEffects = false;
}

// Generic signed subtraction consuming and producing a carry flag.
def G_SSUBE : GenericInstruction {
  let OutOperandList = (outs type0:$dst, type1:$carry_out);
  let InOperandList = (ins type0:$src1, type0:$src2, type1:$carry_in);
  let hasSideEffects = false;
}

// Generic unsigned multiplication producing a carry flag.
def G_UMULO : GenericInstruction {
  let OutOperandList = (outs type0:$dst, type1:$carry_out);
  let InOperandList = (ins type0:$src1, type0:$src2);
  let hasSideEffects = false;
  let isCommutable = true;
}

// Generic signed multiplication producing a carry flag.
def G_SMULO : GenericInstruction {
  let OutOperandList = (outs type0:$dst, type1:$carry_out);
  let InOperandList = (ins type0:$src1, type0:$src2);
  let hasSideEffects = false;
  let isCommutable = true;
}

// Multiply two numbers at twice the incoming bit width (unsigned) and return
// the high half of the result.
def G_UMULH : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src1, type0:$src2);
  let hasSideEffects = false;
  let isCommutable = true;
}

// Multiply two numbers at twice the incoming bit width (signed) and return
// the high half of the result.
def G_SMULH : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src1, type0:$src2);
  let hasSideEffects = false;
  let isCommutable = true;
}

//------------------------------------------------------------------------------
// Saturating ops
//------------------------------------------------------------------------------

// Generic saturating unsigned addition.
def G_UADDSAT : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src1, type0:$src2);
  let hasSideEffects = false;
  let isCommutable = true;
}

// Generic saturating signed addition.
def G_SADDSAT : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src1, type0:$src2);
  let hasSideEffects = false;
  let isCommutable = true;
}

// Generic saturating unsigned subtraction.
def G_USUBSAT : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src1, type0:$src2);
  let hasSideEffects = false;
  let isCommutable = false;
}

// Generic saturating signed subtraction.
def G_SSUBSAT : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src1, type0:$src2);
  let hasSideEffects = false;
  let isCommutable = false;
}

// Generic saturating unsigned left shift.
def G_USHLSAT : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src1, type1:$src2);
  let hasSideEffects = false;
  let isCommutable = false;
}

// Generic saturating signed left shift.
def G_SSHLSAT : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src1, type1:$src2);
  let hasSideEffects = false;
  let isCommutable = false;
}

/// RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point
/// multiplication on 2 integers with the same width and scale. SCALE
/// represents the scale of both operands as fixed point numbers. This
/// SCALE parameter must be a constant integer. A scale of zero is
/// effectively performing multiplication on 2 integers.
def G_SMULFIX : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src0, type0:$src1, untyped_imm_0:$scale);
  let hasSideEffects = false;
  let isCommutable = true;
}

def G_UMULFIX : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src0, type0:$src1, untyped_imm_0:$scale);
  let hasSideEffects = false;
  let isCommutable = true;
}

/// Same as the corresponding unsaturated fixed point instructions, but the
/// result is clamped between the min and max values representable by the
/// bits of the first 2 operands.
def G_SMULFIXSAT : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src0, type0:$src1, untyped_imm_0:$scale);
  let hasSideEffects = false;
  let isCommutable = true;
}

def G_UMULFIXSAT : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src0, type0:$src1, untyped_imm_0:$scale);
  let hasSideEffects = false;
  let isCommutable = true;
}

/// RESULT = [US]DIVFIX(LHS, RHS, SCALE) - Perform fixed point division on
/// 2 integers with the same width and scale. SCALE represents the scale
/// of both operands as fixed point numbers. This SCALE parameter must be a
/// constant integer.
def G_SDIVFIX : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src0, type0:$src1, untyped_imm_0:$scale);
  let hasSideEffects = false;
  let isCommutable = false;
}

def G_UDIVFIX : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src0, type0:$src1, untyped_imm_0:$scale);
  let hasSideEffects = false;
  let isCommutable = false;
}

/// Same as the corresponding unsaturated fixed point instructions,
/// but the result is clamped between the min and max values
/// representable by the bits of the first 2 operands.
def G_SDIVFIXSAT : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src0, type0:$src1, untyped_imm_0:$scale);
  let hasSideEffects = false;
  let isCommutable = false;
}

def G_UDIVFIXSAT : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src0, type0:$src1, untyped_imm_0:$scale);
  let hasSideEffects = false;
  let isCommutable = false;
}

//------------------------------------------------------------------------------
// Floating Point Unary Ops.
//------------------------------------------------------------------------------

def G_FNEG : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src);
  let hasSideEffects = false;
}

def G_FPEXT : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type1:$src);
  let hasSideEffects = false;
}

def G_FPTRUNC : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type1:$src);
  let hasSideEffects = false;
}

def G_FPTOSI : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type1:$src);
  let hasSideEffects = false;
}

def G_FPTOUI : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type1:$src);
  let hasSideEffects = false;
}

def G_SITOFP : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type1:$src);
  let hasSideEffects = false;
}

def G_UITOFP : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type1:$src);
  let hasSideEffects = false;
}

def G_FABS : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src);
  let hasSideEffects = false;
}

def G_FCOPYSIGN : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src0, type1:$src1);
  let hasSideEffects = false;
}

def G_FCANONICALIZE : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src);
  let hasSideEffects = false;
}

// Generic opcode equivalent to the llvm.is_fpclass intrinsic.
def G_IS_FPCLASS: GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type1:$src, unknown:$test);
  let hasSideEffects = false;
}

// FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two
// values.
//
// In the case where a single input is a NaN (either signaling or quiet),
// the non-NaN input is returned.
//
// The return value of (FMINNUM 0.0, -0.0) could be either 0.0 or -0.0.
def G_FMINNUM : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src1, type0:$src2);
  let hasSideEffects = false;
  let isCommutable = true;
}

def G_FMAXNUM : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src1, type0:$src2);
  let hasSideEffects = false;
  let isCommutable = true;
}

// FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimum or maximum on
// two values, following the IEEE-754 2008 definition. This differs from
// FMINNUM/FMAXNUM in the handling of signaling NaNs. If one input is a
// signaling NaN, returns a quiet NaN.
def G_FMINNUM_IEEE : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src1, type0:$src2);
  let hasSideEffects = false;
  let isCommutable = true;
}

def G_FMAXNUM_IEEE : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src1, type0:$src2);
  let hasSideEffects = false;
  let isCommutable = true;
}

// FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0
// as less than 0.0. While FMINNUM_IEEE/FMAXNUM_IEEE follow IEEE 754-2008
// semantics, FMINIMUM/FMAXIMUM follow IEEE 754-2018 draft semantics.
def G_FMINIMUM : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src1, type0:$src2);
  let hasSideEffects = false;
  let isCommutable = true;
}

def G_FMAXIMUM : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src1, type0:$src2);
  let hasSideEffects = false;
  let isCommutable = true;
}

//------------------------------------------------------------------------------
// Floating Point Binary ops.
//------------------------------------------------------------------------------

// Generic FP addition.
def G_FADD : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src1, type0:$src2);
  let hasSideEffects = false;
  let isCommutable = true;
}

// Generic FP subtraction.
def G_FSUB : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src1, type0:$src2);
  let hasSideEffects = false;
  let isCommutable = false;
}

// Generic FP multiplication.
def G_FMUL : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src1, type0:$src2);
  let hasSideEffects = false;
  let isCommutable = true;
}

// Generic fused multiply-add instruction.
// Behaves like llvm fma intrinsic ie src1 * src2 + src3
def G_FMA : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src1, type0:$src2, type0:$src3);
  let hasSideEffects = false;
  let isCommutable = false;
}

/// Generic FP multiply and add. Perform a * b + c, while getting the
/// same result as the separately rounded operations, unlike G_FMA.
def G_FMAD : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src1, type0:$src2, type0:$src3);
  let hasSideEffects = false;
  let isCommutable = false;
}

// Generic FP division.
def G_FDIV : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src1, type0:$src2);
  let hasSideEffects = false;
}

// Generic FP remainder.
def G_FREM : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src1, type0:$src2);
  let hasSideEffects = false;
}

// Floating point exponentiation.
def G_FPOW : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src1, type0:$src2);
  let hasSideEffects = false;
}

// Floating point exponentiation, with an integer power.
def G_FPOWI : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src0, type1:$src1);
  let hasSideEffects = false;
}

// Floating point base-e exponential of a value.
def G_FEXP : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src1);
  let hasSideEffects = false;
}

// Floating point base-2 exponential of a value.
def G_FEXP2 : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src1);
  let hasSideEffects = false;
}

// Floating point base-e logarithm of a value.
def G_FLOG : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src1);
  let hasSideEffects = false;
}

// Floating point base-2 logarithm of a value.
def G_FLOG2 : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src1);
  let hasSideEffects = false;
}

// Floating point base-10 logarithm of a value.
def G_FLOG10 : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src1);
  let hasSideEffects = false;
}

// Floating point x * 2^n
def G_FLDEXP : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src0, type1:$src1);
  let hasSideEffects = false;
}

// Floating point frexp
def G_FFREXP : GenericInstruction {
  let OutOperandList = (outs type0:$dst0, type1:$dst1);
  let InOperandList = (ins type0:$src0);
  let hasSideEffects = false;
}

// Floating point ceiling of a value.
def G_FCEIL : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src1);
  let hasSideEffects = false;
}

// Floating point cosine of a value.
def G_FCOS : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src1);
  let hasSideEffects = false;
}

// Floating point sine of a value.
def G_FSIN : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src1);
  let hasSideEffects = false;
}

// Floating point square root of a value.
// This returns NaN for negative nonzero values.
// NOTE: Unlike libm sqrt(), this never sets errno. In all other respects it's
// libm-conformant.
def G_FSQRT : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src1);
  let hasSideEffects = false;
}

// Floating point floor of a value.
def G_FFLOOR : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src1);
  let hasSideEffects = false;
}

// Floating point round to next integer.
def G_FRINT : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src1);
  let hasSideEffects = false;
}

// Floating point round to the nearest integer.
def G_FNEARBYINT : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src1);
  let hasSideEffects = false;
}

//------------------------------------------------------------------------------
// Opcodes for LLVM Intrinsics
//------------------------------------------------------------------------------
def G_INTRINSIC_FPTRUNC_ROUND : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type1:$src1, i32imm:$round_mode);
  let hasSideEffects = false;
}

def G_INTRINSIC_TRUNC : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src1);
  let hasSideEffects = false;
}

def G_INTRINSIC_ROUND : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src1);
  let hasSideEffects = false;
}

def G_INTRINSIC_LRINT : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type1:$src);
  let hasSideEffects = false;
}

def G_INTRINSIC_ROUNDEVEN : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src1);
  let hasSideEffects = false;
}

def G_READCYCLECOUNTER : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins);
  let hasSideEffects = true;
}

//------------------------------------------------------------------------------
// Memory ops
//------------------------------------------------------------------------------

// Generic load. Expects a MachineMemOperand in addition to explicit
// operands. If the result size is larger than the memory size, the
// high bits are undefined. If the result is a vector type and larger
// than the memory size, the high elements are undefined (i.e. this is
// not a per-element, vector anyextload)
def G_LOAD : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins ptype1:$addr);
  let hasSideEffects = false;
  let mayLoad = true;
}

// Generic sign-extended load. Expects a MachineMemOperand in addition to explicit operands.
def G_SEXTLOAD : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins ptype1:$addr);
  let hasSideEffects = false;
  let mayLoad = true;
}

// Generic zero-extended load. Expects a MachineMemOperand in addition to explicit operands.
def G_ZEXTLOAD : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins ptype1:$addr);
  let hasSideEffects = false;
  let mayLoad = true;
}

// Generic indexed load. Combines a GEP with a load. $newaddr is set to $base + $offset.
// If $am is 0 (post-indexed), then the value is loaded from $base; if $am is 1 (pre-indexed)
//  then the value is loaded from $newaddr.
def G_INDEXED_LOAD : GenericInstruction {
  let OutOperandList = (outs type0:$dst, ptype1:$newaddr);
  let InOperandList = (ins ptype1:$base, type2:$offset, unknown:$am);
  let hasSideEffects = false;
  let mayLoad = true;
}

// Same as G_INDEXED_LOAD except that the load performed is sign-extending, as with G_SEXTLOAD.
def G_INDEXED_SEXTLOAD : GenericInstruction {
  let OutOperandList = (outs type0:$dst, ptype1:$newaddr);
  let InOperandList = (ins ptype1:$base, type2:$offset, unknown:$am);
  let hasSideEffects = false;
  let mayLoad = true;
}

// Same as G_INDEXED_LOAD except that the load performed is zero-extending, as with G_ZEXTLOAD.
def G_INDEXED_ZEXTLOAD : GenericInstruction {
  let OutOperandList = (outs type0:$dst, ptype1:$newaddr);
  let InOperandList = (ins ptype1:$base, type2:$offset, unknown:$am);
  let hasSideEffects = false;
  let mayLoad = true;
}

// Generic store. Expects a MachineMemOperand in addition to explicit operands.
def G_STORE : GenericInstruction {
  let OutOperandList = (outs);
  let InOperandList = (ins type0:$src, ptype1:$addr);
  let hasSideEffects = false;
  let mayStore = true;
}

// Combines a store with a GEP. See description of G_INDEXED_LOAD for indexing behaviour.
def G_INDEXED_STORE : GenericInstruction {
  let OutOperandList = (outs ptype0:$newaddr);
  let InOperandList = (ins type1:$src, ptype0:$base, ptype2:$offset,
                           unknown:$am);
  let hasSideEffects = false;
  let mayStore = true;
}

// Generic atomic cmpxchg with internal success check. Expects a
// MachineMemOperand in addition to explicit operands.
def G_ATOMIC_CMPXCHG_WITH_SUCCESS : GenericInstruction {
  let OutOperandList = (outs type0:$oldval, type1:$success);
  let InOperandList = (ins type2:$addr, type0:$cmpval, type0:$newval);
  let hasSideEffects = false;
  let mayLoad = true;
  let mayStore = true;
}

// Generic atomic cmpxchg. Expects a MachineMemOperand in addition to explicit
// operands.
def G_ATOMIC_CMPXCHG : GenericInstruction {
  let OutOperandList = (outs type0:$oldval);
  let InOperandList = (ins ptype1:$addr, type0:$cmpval, type0:$newval);
  let hasSideEffects = false;
  let mayLoad = true;
  let mayStore = true;
}

// Generic atomicrmw. Expects a MachineMemOperand in addition to explicit
// operands.
class G_ATOMICRMW_OP : GenericInstruction {
  let OutOperandList = (outs type0:$oldval);
  let InOperandList = (ins ptype1:$addr, type0:$val);
  let hasSideEffects = false;
  let mayLoad = true;
  let mayStore = true;
}

def G_ATOMICRMW_XCHG : G_ATOMICRMW_OP;
def G_ATOMICRMW_ADD : G_ATOMICRMW_OP;
def G_ATOMICRMW_SUB : G_ATOMICRMW_OP;
def G_ATOMICRMW_AND : G_ATOMICRMW_OP;
def G_ATOMICRMW_NAND : G_ATOMICRMW_OP;
def G_ATOMICRMW_OR : G_ATOMICRMW_OP;
def G_ATOMICRMW_XOR : G_ATOMICRMW_OP;
def G_ATOMICRMW_MAX : G_ATOMICRMW_OP;
def G_ATOMICRMW_MIN : G_ATOMICRMW_OP;
def G_ATOMICRMW_UMAX : G_ATOMICRMW_OP;
def G_ATOMICRMW_UMIN : G_ATOMICRMW_OP;
def G_ATOMICRMW_FADD : G_ATOMICRMW_OP;
def G_ATOMICRMW_FSUB : G_ATOMICRMW_OP;
def G_ATOMICRMW_FMAX : G_ATOMICRMW_OP;
def G_ATOMICRMW_FMIN : G_ATOMICRMW_OP;
def G_ATOMICRMW_UINC_WRAP : G_ATOMICRMW_OP;
def G_ATOMICRMW_UDEC_WRAP : G_ATOMICRMW_OP;

def G_FENCE : GenericInstruction {
  let OutOperandList = (outs);
  let InOperandList = (ins i32imm:$ordering, i32imm:$scope);
  let hasSideEffects = true;
}

//------------------------------------------------------------------------------
// Variadic ops
//------------------------------------------------------------------------------

// Extract a register of the specified size, starting from the block given by
// index. This will almost certainly be mapped to sub-register COPYs after
// register banks have been selected.
def G_EXTRACT : GenericInstruction {
  let OutOperandList = (outs type0:$res);
  let InOperandList = (ins type1:$src, untyped_imm_0:$offset);
  let hasSideEffects = false;
}

// Extract multiple registers specified size, starting from blocks given by
// indexes. This will almost certainly be mapped to sub-register COPYs after
// register banks have been selected.
// The output operands are always ordered from lowest bits to highest:
//   %bits_0_7:(s8), %bits_8_15:(s8),
//       %bits_16_23:(s8), %bits_24_31:(s8) = G_UNMERGE_VALUES %0:(s32)
def G_UNMERGE_VALUES : GenericInstruction {
  let OutOperandList = (outs type0:$dst0, variable_ops);
  let InOperandList = (ins type1:$src);
  let hasSideEffects = false;
}

// Insert a smaller register into a larger one at the specified bit-index.
def G_INSERT : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src, type1:$op, untyped_imm_0:$offset);
  let hasSideEffects = false;
}

// Concatenate multiple registers of the same size into a wider register.
// The input operands are always ordered from lowest bits to highest:
//   %0:(s32) = G_MERGE_VALUES %bits_0_7:(s8), %bits_8_15:(s8),
//                             %bits_16_23:(s8), %bits_24_31:(s8)
def G_MERGE_VALUES : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type1:$src0, variable_ops);
  let hasSideEffects = false;
}

/// Create a vector from multiple scalar registers. No implicit
/// conversion is performed (i.e. the result element type must be the
/// same as all source operands)
def G_BUILD_VECTOR : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type1:$src0, variable_ops);
  let hasSideEffects = false;
}

/// Like G_BUILD_VECTOR, but truncates the larger operand types to fit the
/// destination vector elt type.
def G_BUILD_VECTOR_TRUNC : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type1:$src0, variable_ops);
  let hasSideEffects = false;
}

/// Create a vector by concatenating vectors together.
def G_CONCAT_VECTORS : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type1:$src0, variable_ops);
  let hasSideEffects = false;
}

// Intrinsic without side effects.
def G_INTRINSIC : GenericInstruction {
  let OutOperandList = (outs);
  let InOperandList = (ins unknown:$intrin, variable_ops);
  let hasSideEffects = false;

  // Conservatively assume this is convergent. If there turnes out to
  // be a need, there should be separate convergent intrinsic opcodes.
  let isConvergent = 1;
}

// Intrinsic with side effects.
def G_INTRINSIC_W_SIDE_EFFECTS : GenericInstruction {
  let OutOperandList = (outs);
  let InOperandList = (ins unknown:$intrin, variable_ops);
  let hasSideEffects = true;
  let mayLoad = true;
  let mayStore = true;

  // Conservatively assume this is convergent. If there turnes out to
  // be a need, there should be separate convergent intrinsic opcodes.
  let isConvergent = true;
}

//------------------------------------------------------------------------------
// Branches.
//------------------------------------------------------------------------------

// Generic unconditional branch.
def G_BR : GenericInstruction {
  let OutOperandList = (outs);
  let InOperandList = (ins unknown:$src1);
  let hasSideEffects = false;
  let isBranch = true;
  let isTerminator = true;
  let isBarrier = true;
}

// Generic conditional branch.
def G_BRCOND : GenericInstruction {
  let OutOperandList = (outs);
  let InOperandList = (ins type0:$tst, unknown:$truebb);
  let hasSideEffects = false;
  let isBranch = true;
  let isTerminator = true;
}

// Generic indirect branch.
def G_BRINDIRECT : GenericInstruction {
  let OutOperandList = (outs);
  let InOperandList = (ins type0:$src1);
  let hasSideEffects = false;
  let isBranch = true;
  let isTerminator = true;
  let isBarrier = true;
  let isIndirectBranch = true;
}

// Generic branch to jump table entry
def G_BRJT : GenericInstruction {
  let OutOperandList = (outs);
  let InOperandList = (ins ptype0:$tbl, unknown:$jti, type1:$idx);
  let hasSideEffects = false;
  let isBranch = true;
  let isTerminator = true;
  let isBarrier = true;
  let isIndirectBranch = true;
}

// A marker to signal the following code is an invoke region, that may throw
// an exception and therefore not return.
def G_INVOKE_REGION_START : GenericInstruction {
  let OutOperandList = (outs);
  let InOperandList = (ins);
  let isTerminator = true; // This must be a terminator.
  let hasSideEffects = false;
}

def G_READ_REGISTER : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins unknown:$register);
  let hasSideEffects = true;

  // Assume convergent. It's probably not worth the effort of somehow
  // modeling convergent and nonconvergent register accesses.
  let isConvergent = true;
}

def G_WRITE_REGISTER : GenericInstruction {
  let OutOperandList = (outs);
  let InOperandList = (ins unknown:$register, type0:$value);
  let hasSideEffects = true;

  // Assume convergent. It's probably not worth the effort of somehow
  // modeling convergent and nonconvergent register accesses.
  let isConvergent = true;
}

//------------------------------------------------------------------------------
// Vector ops
//------------------------------------------------------------------------------

// Generic insertelement.
def G_INSERT_VECTOR_ELT : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src, type1:$elt, type2:$idx);
  let hasSideEffects = false;
}

// Generic extractelement.
def G_EXTRACT_VECTOR_ELT : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type1:$src, type2:$idx);
  let hasSideEffects = false;
}

// Generic shufflevector.
//
// The mask operand should be an IR Constant which exactly matches the
// corresponding mask for the IR shufflevector instruction.
def G_SHUFFLE_VECTOR: GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type1:$v1, type1:$v2, unknown:$mask);
  let hasSideEffects = false;
}

//------------------------------------------------------------------------------
// Vector reductions
//------------------------------------------------------------------------------

class VectorReduction : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type1:$v);
  let hasSideEffects = false;
}

def G_VECREDUCE_SEQ_FADD : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type1:$acc, type2:$v);
  let hasSideEffects = false;
}

def G_VECREDUCE_SEQ_FMUL : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type1:$acc, type2:$v);
  let hasSideEffects = false;
}

def G_VECREDUCE_FADD : VectorReduction;
def G_VECREDUCE_FMUL : VectorReduction;

def G_VECREDUCE_FMAX : VectorReduction;
def G_VECREDUCE_FMIN : VectorReduction;

def G_VECREDUCE_ADD : VectorReduction;
def G_VECREDUCE_MUL : VectorReduction;
def G_VECREDUCE_AND : VectorReduction;
def G_VECREDUCE_OR : VectorReduction;
def G_VECREDUCE_XOR : VectorReduction;
def G_VECREDUCE_SMAX : VectorReduction;
def G_VECREDUCE_SMIN : VectorReduction;
def G_VECREDUCE_UMAX : VectorReduction;
def G_VECREDUCE_UMIN : VectorReduction;

//------------------------------------------------------------------------------
// Constrained floating point ops
//------------------------------------------------------------------------------

def G_STRICT_FADD : ConstrainedInstruction<G_FADD>;
def G_STRICT_FSUB : ConstrainedInstruction<G_FSUB>;
def G_STRICT_FMUL : ConstrainedInstruction<G_FMUL>;
def G_STRICT_FDIV : ConstrainedInstruction<G_FDIV>;
def G_STRICT_FREM : ConstrainedInstruction<G_FREM>;
def G_STRICT_FMA : ConstrainedInstruction<G_FMA>;
def G_STRICT_FSQRT : ConstrainedInstruction<G_FSQRT>;
def G_STRICT_FLDEXP : ConstrainedInstruction<G_FLDEXP>;

//------------------------------------------------------------------------------
// Memory intrinsics
//------------------------------------------------------------------------------

def G_MEMCPY : GenericInstruction {
  let OutOperandList = (outs);
  let InOperandList = (ins ptype0:$dst_addr, ptype1:$src_addr, type2:$size, untyped_imm_0:$tailcall);
  let hasSideEffects = false;
  let mayLoad = true;
  let mayStore = true;
}

def G_MEMCPY_INLINE : GenericInstruction {
  let OutOperandList = (outs);
  let InOperandList = (ins ptype0:$dst_addr, ptype1:$src_addr, type2:$size);
  let hasSideEffects = false;
  let mayLoad = true;
  let mayStore = true;
}

def G_MEMMOVE : GenericInstruction {
  let OutOperandList = (outs);
  let InOperandList = (ins ptype0:$dst_addr, ptype1:$src_addr, type2:$size, untyped_imm_0:$tailcall);
  let hasSideEffects = false;
  let mayLoad = true;
  let mayStore = true;
}

def G_MEMSET : GenericInstruction {
  let OutOperandList = (outs);
  let InOperandList = (ins ptype0:$dst_addr, type1:$value, type2:$size, untyped_imm_0:$tailcall);
  let hasSideEffects = false;
  let mayStore = true;
}

def G_BZERO : GenericInstruction {
  let OutOperandList = (outs);
  let InOperandList = (ins ptype0:$dst_addr, type1:$size, untyped_imm_0:$tailcall);
  let hasSideEffects = false;
  let mayStore = true;
}

//------------------------------------------------------------------------------
// Bitfield extraction.
//------------------------------------------------------------------------------

// Generic signed bitfield extraction. The operands are in the range
// 0 <= lsb < lsb + width <= src bitwidth, where all values are unsigned.
def G_SBFX : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src, type1:$lsb, type1:$width);
  let hasSideEffects = false;
}

// Generic unsigned bitfield extraction. The operands are in the range
// 0 <= lsb < lsb + width <= src bitwidth, where all values are unsigned.
def G_UBFX : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src, type1:$lsb, type1:$width);
  let hasSideEffects = false;
}

//------------------------------------------------------------------------------
// Optimization hints
//------------------------------------------------------------------------------

// Asserts that an operation has already been zero-extended from a specific
// type.
def G_ASSERT_ZEXT : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src, untyped_imm_0:$sz);
  let hasSideEffects = false;
}

// Asserts that an operation has already been sign-extended from a specific
// type.
def G_ASSERT_SEXT : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src, untyped_imm_0:$sz);
  let hasSideEffects = false;
}

// Asserts that a value has at least the given alignment.
def G_ASSERT_ALIGN : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src, untyped_imm_0:$align);
  let hasSideEffects = false;
}

// Prevent constant folding of the source value with any users.
def G_CONSTANT_FOLD_BARRIER : GenericInstruction {
  let OutOperandList = (outs type0:$dst);
  let InOperandList = (ins type0:$src);
  let hasSideEffects = false;
}
PKiwFZ(Ȥ�QQ!Target/GlobalISel/RegisterBank.tdnu�[���//===- RegisterBank.td - Register bank definitions ---------*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
//
//===----------------------------------------------------------------------===//

class RegisterBank<string name, list<RegisterClass> classes> {
  string Name = name;
  list<RegisterClass> RegisterClasses = classes;
}
PKiwFZ
�FRRTarget/GlobalISel/Target.tdnu�[���//===- Target.td - Define GlobalISel rules -----------------*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the target-independent interfaces used to support
// SelectionDAG instruction selection patterns (specified in
// TargetSelectionDAG.td) when generating GlobalISel instruction selectors.
//
// This is intended as a compatibility layer, to enable reuse of target
// descriptions written for SelectionDAG without requiring explicit GlobalISel
// support.  It will eventually supersede SelectionDAG patterns.
//
//===----------------------------------------------------------------------===//

// Definitions that inherit from LLT define types that will be used in the
// GlobalISel matcher.
class LLT;

def s32 : LLT;
def s64 : LLT;
def v2s32 : LLT;
def v4s16 : LLT;

// Defines a matcher for complex operands. This is analogous to ComplexPattern
// from SelectionDAG.
//
// Definitions that inherit from this may also inherit from
// GIComplexPatternEquiv to enable the import of SelectionDAG patterns involving
// those ComplexPatterns.
class GIComplexOperandMatcher<LLT type, string matcherfn> {
  // The expected type of the root of the match.
  //
  // TODO: We should probably support, any-type, any-scalar, and multiple types
  //       in the future.
  LLT Type = type;

  // The function that determines whether the operand matches. It should be of
  // the form:
  //   ComplexRendererFn select(MachineOperand &Root) const;
  // where Root is the root of the match.  The function should return nullptr
  // on match failure, or a ComplexRendererFn that renders the operand in case
  // of a successful match.
  string MatcherFn = matcherfn;
}

// Defines a custom renderer. This is analogous to SDNodeXForm from
// SelectionDAG. Unlike SDNodeXForm, this matches a MachineInstr and
// renders directly to the result instruction without an intermediate node.
//
// Definitions that inherit from this may also inherit from GISDNodeXFormEquiv
// to enable the import of SelectionDAG patterns involving those SDNodeXForms.
class GICustomOperandRenderer<string rendererfn> {
  // The function renders the operand(s) of the matched instruction to
  // the specified instruction. It should be of the form:
  //   void render(MachineInstrBuilder &MIB, const MachineInstr &MI,
  //               int OpIdx = -1)
  //
  // If OpIdx is specified (i.e. not invalid/negative), this
  // references the source operand MI.getOperand(OpIdx). Otherwise,
  // this is the value defined by MI. This is to support the case
  // where there is no corresponding instruction to match.
  string RendererFn = rendererfn;
}
PKiwFZd��v&v&'Target/GlobalISel/SelectionDAGCompat.tdnu�[���//===- TargetGlobalISel.td - Common code for GlobalISel ----*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the target-independent interfaces used to support
// SelectionDAG instruction selection patterns (specified in
// TargetSelectionDAG.td) when generating GlobalISel instruction selectors.
//
// This is intended as a compatibility layer, to enable reuse of target
// descriptions written for SelectionDAG without requiring explicit GlobalISel
// support.  It will eventually supersede SelectionDAG patterns.
//
//===----------------------------------------------------------------------===//

// Declare that a generic Instruction is 'equivalent' to an SDNode, that is,
// SelectionDAG patterns involving the SDNode can be transformed to match the
// Instruction instead.
class GINodeEquiv<Instruction i, SDNode node> {
  Instruction I = i;
  SDNode Node = node;

  // SelectionDAG has separate nodes for atomic and non-atomic memory operations
  // (ISD::LOAD, ISD::ATOMIC_LOAD, ISD::STORE, ISD::ATOMIC_STORE) but GlobalISel
  // stores this information in the MachineMemoryOperand.
  bit CheckMMOIsNonAtomic = false;
  bit CheckMMOIsAtomic = false;

  // SelectionDAG has one node for all loads and uses predicates to
  // differentiate them. GlobalISel on the other hand uses separate opcodes.
  // When this is true, the resulting opcode is G_LOAD/G_SEXTLOAD/G_ZEXTLOAD
  // depending on the predicates on the node.
  Instruction IfSignExtend = ?;
  Instruction IfZeroExtend = ?;

  // SelectionDAG has one setcc for all compares. This differentiates
  // for G_ICMP and G_FCMP.
  Instruction IfFloatingPoint = ?;
}

// These are defined in the same order as the G_* instructions.
def : GINodeEquiv<G_ANYEXT, anyext>;
def : GINodeEquiv<G_SEXT, sext>;
def : GINodeEquiv<G_ZEXT, zext>;
def : GINodeEquiv<G_TRUNC, trunc>;
def : GINodeEquiv<G_BITCAST, bitconvert>;
// G_INTTOPTR - SelectionDAG has no equivalent.
// G_PTRTOINT - SelectionDAG has no equivalent.
def : GINodeEquiv<G_CONSTANT, imm>;
// timm must not be materialized and therefore has no GlobalISel equivalent
def : GINodeEquiv<G_FCONSTANT, fpimm>;
def : GINodeEquiv<G_IMPLICIT_DEF, undef>;
def : GINodeEquiv<G_FRAME_INDEX, frameindex>;
def : GINodeEquiv<G_BLOCK_ADDR, blockaddress>;
def : GINodeEquiv<G_PTR_ADD, ptradd>;
def : GINodeEquiv<G_ADD, add>;
def : GINodeEquiv<G_SUB, sub>;
def : GINodeEquiv<G_MUL, mul>;
def : GINodeEquiv<G_UMULH, mulhu>;
def : GINodeEquiv<G_SMULH, mulhs>;
def : GINodeEquiv<G_SDIV, sdiv>;
def : GINodeEquiv<G_UDIV, udiv>;
def : GINodeEquiv<G_SREM, srem>;
def : GINodeEquiv<G_UREM, urem>;
def : GINodeEquiv<G_AND, and>;
def : GINodeEquiv<G_OR, or>;
def : GINodeEquiv<G_XOR, xor>;
def : GINodeEquiv<G_SHL, shl>;
def : GINodeEquiv<G_LSHR, srl>;
def : GINodeEquiv<G_ASHR, sra>;
def : GINodeEquiv<G_SADDSAT, saddsat>;
def : GINodeEquiv<G_UADDSAT, uaddsat>;
def : GINodeEquiv<G_SSUBSAT, ssubsat>;
def : GINodeEquiv<G_USUBSAT, usubsat>;
def : GINodeEquiv<G_SSHLSAT, sshlsat>;
def : GINodeEquiv<G_USHLSAT, ushlsat>;
def : GINodeEquiv<G_SMULFIX, smulfix>;
def : GINodeEquiv<G_UMULFIX, umulfix>;
def : GINodeEquiv<G_SMULFIXSAT, smulfixsat>;
def : GINodeEquiv<G_UMULFIXSAT, umulfixsat>;
def : GINodeEquiv<G_SDIVFIX, sdivfix>;
def : GINodeEquiv<G_UDIVFIX, udivfix>;
def : GINodeEquiv<G_SDIVFIXSAT, sdivfixsat>;
def : GINodeEquiv<G_UDIVFIXSAT, udivfixsat>;
def : GINodeEquiv<G_SELECT, select>;
def : GINodeEquiv<G_FNEG, fneg>;
def : GINodeEquiv<G_FPEXT, fpextend>;
def : GINodeEquiv<G_FPTRUNC, fpround>;
def : GINodeEquiv<G_FPTOSI, fp_to_sint>;
def : GINodeEquiv<G_FPTOUI, fp_to_uint>;
def : GINodeEquiv<G_SITOFP, sint_to_fp>;
def : GINodeEquiv<G_UITOFP, uint_to_fp>;
def : GINodeEquiv<G_FADD, fadd>;
def : GINodeEquiv<G_FSUB, fsub>;
def : GINodeEquiv<G_FMA, fma>;
def : GINodeEquiv<G_FMAD, fmad>;
def : GINodeEquiv<G_FMUL, fmul>;
def : GINodeEquiv<G_FDIV, fdiv>;
def : GINodeEquiv<G_FREM, frem>;
def : GINodeEquiv<G_FPOW, fpow>;
def : GINodeEquiv<G_FEXP2, fexp2>;
def : GINodeEquiv<G_FLOG2, flog2>;
def : GINodeEquiv<G_FLDEXP, fldexp>;
def : GINodeEquiv<G_FCANONICALIZE, fcanonicalize>;
def : GINodeEquiv<G_IS_FPCLASS, is_fpclass>;
def : GINodeEquiv<G_INTRINSIC, intrinsic_wo_chain>;
// ISD::INTRINSIC_VOID can also be handled with G_INTRINSIC_W_SIDE_EFFECTS.
def : GINodeEquiv<G_INTRINSIC_W_SIDE_EFFECTS, intrinsic_void>;
def : GINodeEquiv<G_INTRINSIC_W_SIDE_EFFECTS, intrinsic_w_chain>;
def : GINodeEquiv<G_BR, br>;
def : GINodeEquiv<G_BSWAP, bswap>;
def : GINodeEquiv<G_BITREVERSE, bitreverse>;
def : GINodeEquiv<G_FSHL, fshl>;
def : GINodeEquiv<G_FSHR, fshr>;
def : GINodeEquiv<G_CTLZ, ctlz>;
def : GINodeEquiv<G_CTTZ, cttz>;
def : GINodeEquiv<G_CTLZ_ZERO_UNDEF, ctlz_zero_undef>;
def : GINodeEquiv<G_CTTZ_ZERO_UNDEF, cttz_zero_undef>;
def : GINodeEquiv<G_CTPOP, ctpop>;
def : GINodeEquiv<G_EXTRACT_VECTOR_ELT, extractelt>;
def : GINodeEquiv<G_CONCAT_VECTORS, concat_vectors>;
def : GINodeEquiv<G_BUILD_VECTOR, build_vector>;
def : GINodeEquiv<G_FCEIL, fceil>;
def : GINodeEquiv<G_FCOS, fcos>;
def : GINodeEquiv<G_FSIN, fsin>;
def : GINodeEquiv<G_FABS, fabs>;
def : GINodeEquiv<G_FSQRT, fsqrt>;
def : GINodeEquiv<G_FFLOOR, ffloor>;
def : GINodeEquiv<G_FRINT, frint>;
def : GINodeEquiv<G_FNEARBYINT, fnearbyint>;
def : GINodeEquiv<G_INTRINSIC_TRUNC, ftrunc>;
def : GINodeEquiv<G_INTRINSIC_ROUND, fround>;
def : GINodeEquiv<G_INTRINSIC_LRINT, lrint>;
def : GINodeEquiv<G_FCOPYSIGN, fcopysign>;
def : GINodeEquiv<G_SMIN, smin>;
def : GINodeEquiv<G_SMAX, smax>;
def : GINodeEquiv<G_UMIN, umin>;
def : GINodeEquiv<G_UMAX, umax>;
def : GINodeEquiv<G_ABS, abs>;
def : GINodeEquiv<G_FMINNUM, fminnum>;
def : GINodeEquiv<G_FMAXNUM, fmaxnum>;
def : GINodeEquiv<G_FMINNUM_IEEE, fminnum_ieee>;
def : GINodeEquiv<G_FMAXNUM_IEEE, fmaxnum_ieee>;
def : GINodeEquiv<G_FMAXIMUM, fmaximum>;
def : GINodeEquiv<G_FMINIMUM, fminimum>;
def : GINodeEquiv<G_READCYCLECOUNTER, readcyclecounter>;
def : GINodeEquiv<G_ROTR, rotr>;
def : GINodeEquiv<G_ROTL, rotl>;
def : GINodeEquiv<G_LROUND, lround>;
def : GINodeEquiv<G_LLROUND, llround>;
def : GINodeEquiv<G_VECREDUCE_FADD, vecreduce_fadd>;

def : GINodeEquiv<G_STRICT_FADD, strict_fadd>;
def : GINodeEquiv<G_STRICT_FSUB, strict_fsub>;
def : GINodeEquiv<G_STRICT_FMUL, strict_fmul>;
def : GINodeEquiv<G_STRICT_FDIV, strict_fdiv>;
def : GINodeEquiv<G_STRICT_FREM, strict_frem>;
def : GINodeEquiv<G_STRICT_FMA, strict_fma>;
def : GINodeEquiv<G_STRICT_FSQRT, strict_fsqrt>;
def : GINodeEquiv<G_STRICT_FLDEXP, strict_fldexp>;

// Broadly speaking G_LOAD is equivalent to ISD::LOAD but there are some
// complications that tablegen must take care of. For example, Predicates such
// as isSignExtLoad require that this is not a perfect 1:1 mapping since a
// sign-extending load is (G_SEXTLOAD x) in GlobalISel. Additionally,
// G_LOAD handles both atomic and non-atomic loads where as SelectionDAG had
// separate nodes for them. This GINodeEquiv maps the non-atomic loads to
// G_LOAD with a non-atomic MachineMemOperand.
def : GINodeEquiv<G_LOAD, ld> {
  let CheckMMOIsNonAtomic = true;
  let IfSignExtend = G_SEXTLOAD;
  let IfZeroExtend = G_ZEXTLOAD;
}

def : GINodeEquiv<G_ICMP, setcc> {
  let IfFloatingPoint = G_FCMP;
}

// Broadly speaking G_STORE is equivalent to ISD::STORE but there are some
// complications that tablegen must take care of. For example, predicates such
// as isTruncStore require that this is not a perfect 1:1 mapping since a
// truncating store is (G_STORE (G_TRUNCATE x)) in GlobalISel. Additionally,
// G_STORE handles both atomic and non-atomic stores where as SelectionDAG had
// separate nodes for them. This GINodeEquiv maps the non-atomic stores to
// G_STORE with a non-atomic MachineMemOperand.
def : GINodeEquiv<G_STORE, st> { let CheckMMOIsNonAtomic = true; }

def : GINodeEquiv<G_LOAD, atomic_load> {
  let CheckMMOIsNonAtomic = false;
  let CheckMMOIsAtomic = true;
  let IfSignExtend = G_SEXTLOAD;
  let IfZeroExtend = G_ZEXTLOAD;
}

// Operands are swapped for atomic_store vs. regular store
def : GINodeEquiv<G_STORE, atomic_store> {
  let CheckMMOIsNonAtomic = false;
  let CheckMMOIsAtomic = true;
}

def : GINodeEquiv<G_ATOMIC_CMPXCHG, atomic_cmp_swap>;
def : GINodeEquiv<G_ATOMICRMW_XCHG, atomic_swap>;
def : GINodeEquiv<G_ATOMICRMW_ADD, atomic_load_add>;
def : GINodeEquiv<G_ATOMICRMW_SUB, atomic_load_sub>;
def : GINodeEquiv<G_ATOMICRMW_AND, atomic_load_and>;
def : GINodeEquiv<G_ATOMICRMW_NAND, atomic_load_nand>;
def : GINodeEquiv<G_ATOMICRMW_OR, atomic_load_or>;
def : GINodeEquiv<G_ATOMICRMW_XOR, atomic_load_xor>;
def : GINodeEquiv<G_ATOMICRMW_MIN, atomic_load_min>;
def : GINodeEquiv<G_ATOMICRMW_MAX, atomic_load_max>;
def : GINodeEquiv<G_ATOMICRMW_UMIN, atomic_load_umin>;
def : GINodeEquiv<G_ATOMICRMW_UMAX, atomic_load_umax>;
def : GINodeEquiv<G_ATOMICRMW_FADD, atomic_load_fadd>;
def : GINodeEquiv<G_ATOMICRMW_FSUB, atomic_load_fsub>;
def : GINodeEquiv<G_ATOMICRMW_FMAX, atomic_load_fmax>;
def : GINodeEquiv<G_ATOMICRMW_FMIN, atomic_load_fmin>;
def : GINodeEquiv<G_ATOMICRMW_UINC_WRAP, atomic_load_uinc_wrap>;
def : GINodeEquiv<G_ATOMICRMW_UDEC_WRAP, atomic_load_udec_wrap>;
def : GINodeEquiv<G_FENCE, atomic_fence>;

// Specifies the GlobalISel equivalents for SelectionDAG's ComplexPattern.
// Should be used on defs that subclass GIComplexOperandMatcher<>.
class GIComplexPatternEquiv<ComplexPattern seldag> {
  ComplexPattern SelDAGEquivalent = seldag;
}

// Specifies the GlobalISel equivalents for SelectionDAG's SDNodeXForm.
// Should be used on defs that subclass GICustomOperandRenderer<>.
class GISDNodeXFormEquiv<SDNodeXForm seldag> {
  SDNodeXForm SelDAGEquivalent = seldag;
}
PKiwFZòw�4�4�Target/GlobalISel/Combine.tdnu�[���//===- Combine.td - Combine rule definitions ---------------*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Declare GlobalISel combine rules and provide mechanisms to opt-out.
//
//===----------------------------------------------------------------------===//

// Common base class for GICombineRule and GICombineGroup.
class GICombine {
  // See GICombineGroup. We only declare it here to make the tablegen pass
  // simpler.
  list<GICombine> Rules = ?;
}

// A group of combine rules that can be added to a GICombiner or another group.
class GICombineGroup<list<GICombine> rules> : GICombine {
  // The rules contained in this group. The rules in a group are flattened into
  // a single list and sorted into whatever order is most efficient. However,
  // they will never be re-ordered such that behaviour differs from the
  // specified order. It is therefore possible to use the order of rules in this
  // list to describe priorities.
  let Rules = rules;
}

class GICombinerHelperArg<string type, string name> {
  string Type = type;
  string Name = name;
}

// Declares a combiner helper class
class GICombinerHelper<string classname, list<GICombine> rules>
    : GICombineGroup<rules> {
  // The class name to use in the generated output.
  string Classname = classname;
  // The name of a run-time compiler option that will be generated to disable
  // specific rules within this combiner.
  string DisableRuleOption = ?;
  // The state class to inherit from (if any). The generated helper will inherit
  // from this class and will forward arguments to its constructors.
  string StateClass = "";
  // Any additional arguments that should be appended to the tryCombine*().
  list<GICombinerHelperArg> AdditionalArguments =
      [GICombinerHelperArg<"CombinerHelper &", "Helper">];
}
class GICombineRule<dag defs, dag match, dag apply> : GICombine {
  /// Defines the external interface of the match rule. This includes:
  /// * The names of the root nodes (requires at least one)
  /// See GIDefKind for details.
  dag Defs = defs;

  /// Defines the things which must be true for the pattern to match
  /// See GIMatchKind for details.
  dag Match = match;

  /// Defines the things which happen after the decision is made to apply a
  /// combine rule.
  /// See GIApplyKind for details.
  dag Apply = apply;

  /// Defines the predicates that are checked before the match function
  /// is called. Targets can use this to, for instance, check Subtarget
  /// features.
  list<Predicate> Predicates = [];
}

/// The operator at the root of a GICombineRule.Defs dag.
def defs;

/// All arguments of the defs operator must be subclasses of GIDefKind or
/// sub-dags whose operator is GIDefKindWithArgs.
class GIDefKind;
class GIDefKindWithArgs;
/// Declare a root node. There must be at least one of these in every combine
/// rule.
/// TODO: The plan is to elide `root` definitions and determine it from the DAG
///       itself with an overide for situations where the usual determination
///       is incorrect.
def root : GIDefKind;

/// Declares data that is passed from the match stage to the apply stage.
class GIDefMatchData<string type> : GIDefKind {
  /// A C++ type name indicating the storage type.
  string Type = type;
}

def extending_load_matchdata : GIDefMatchData<"PreferredTuple">;
def indexed_load_store_matchdata : GIDefMatchData<"IndexedLoadStoreMatchInfo">;
def instruction_steps_matchdata: GIDefMatchData<"InstructionStepsMatchInfo">;

/// The operator at the root of a GICombineRule.Match dag.
def match;
/// All arguments of the match operator must be either:
/// * A subclass of GIMatchKind
/// * A subclass of GIMatchKindWithArgs
/// * A subclass of Instruction
/// * A MIR code block (deprecated)
/// The GIMatchKind and GIMatchKindWithArgs cases are described in more detail
/// in their definitions below.
/// For the Instruction case, these are collected into a DAG where operand names
/// that occur multiple times introduce edges.
class GIMatchKind;
class GIMatchKindWithArgs;

/// In lieu of having proper macro support. Trivial one-off opcode checks can be
/// performed with this.
def wip_match_opcode : GIMatchKindWithArgs;

/// The operator at the root of a GICombineRule.Apply dag.
def apply;
/// All arguments of the apply operator must be subclasses of GIApplyKind, or
/// sub-dags whose operator is GIApplyKindWithArgs, or an MIR block
/// (deprecated).
class GIApplyKind;
class GIApplyKindWithArgs;

def register_matchinfo: GIDefMatchData<"Register">;
def int64_matchinfo: GIDefMatchData<"int64_t">;
def apint_matchinfo : GIDefMatchData<"APInt">;
def build_fn_matchinfo :
GIDefMatchData<"std::function<void(MachineIRBuilder &)>">;
def unsigned_matchinfo: GIDefMatchData<"unsigned">;

def copy_prop : GICombineRule<
  (defs root:$d),
  (match (COPY $d, $s):$mi,
         [{ return Helper.matchCombineCopy(*${mi}); }]),
  (apply [{ Helper.applyCombineCopy(*${mi}); }])>;

// idempotent operations
// Fold (freeze (freeze x)) -> (freeze x).
// Fold (fabs (fabs x)) -> (fabs x).
// Fold (fcanonicalize (fcanonicalize x)) -> (fcanonicalize x).
def idempotent_prop : GICombineRule<
   (defs root:$mi),
   (match (wip_match_opcode G_FREEZE, G_FABS, G_FCANONICALIZE):$mi,
          [{ return MRI.getVRegDef(${mi}->getOperand(1).getReg())->getOpcode() ==
                    ${mi}->getOpcode(); }]),
   (apply [{ Helper.replaceSingleDefInstWithOperand(*${mi}, 1); }])>;


def extending_loads : GICombineRule<
  (defs root:$root, extending_load_matchdata:$matchinfo),
  (match (wip_match_opcode G_LOAD, G_SEXTLOAD, G_ZEXTLOAD):$root,
         [{ return Helper.matchCombineExtendingLoads(*${root}, ${matchinfo}); }]),
  (apply [{ Helper.applyCombineExtendingLoads(*${root}, ${matchinfo}); }])>;

def load_and_mask : GICombineRule<
  (defs root:$root, build_fn_matchinfo:$matchinfo),
  (match (wip_match_opcode G_AND):$root,
        [{ return Helper.matchCombineLoadWithAndMask(*${root}, ${matchinfo}); }]),
  (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;
def combines_for_extload: GICombineGroup<[extending_loads, load_and_mask]>;

def sext_trunc_sextload : GICombineRule<
  (defs root:$d),
  (match (wip_match_opcode G_SEXT_INREG):$d,
         [{ return Helper.matchSextTruncSextLoad(*${d}); }]),
  (apply [{ Helper.applySextTruncSextLoad(*${d}); }])>;

def sext_inreg_of_load_matchdata : GIDefMatchData<"std::tuple<Register, unsigned>">;
def sext_inreg_of_load : GICombineRule<
  (defs root:$root, sext_inreg_of_load_matchdata:$matchinfo),
  (match (wip_match_opcode G_SEXT_INREG):$root,
         [{ return Helper.matchSextInRegOfLoad(*${root}, ${matchinfo}); }]),
  (apply [{ Helper.applySextInRegOfLoad(*${root}, ${matchinfo}); }])>;

def sext_inreg_to_zext_inreg : GICombineRule<
  (defs root:$dst),
  (match
    (G_SEXT_INREG $dst, $src, $imm):$root,
      [{
        unsigned BitWidth = MRI.getType(${src}.getReg()).getScalarSizeInBits();
        return Helper.getKnownBits()->maskedValueIsZero(${src}.getReg(),
                 APInt::getOneBitSet(BitWidth, ${imm}.getImm() - 1)); }]),
    (apply [{
      Helper.getBuilder().setInstrAndDebugLoc(*${root});
      Helper.getBuilder().buildZExtInReg(${dst}, ${src}, ${imm}.getImm());
      ${root}->eraseFromParent();
  }])
>;

def combine_indexed_load_store : GICombineRule<
  (defs root:$root, indexed_load_store_matchdata:$matchinfo),
  (match (wip_match_opcode G_LOAD, G_SEXTLOAD, G_ZEXTLOAD, G_STORE):$root,
         [{ return Helper.matchCombineIndexedLoadStore(*${root}, ${matchinfo}); }]),
  (apply [{ Helper.applyCombineIndexedLoadStore(*${root}, ${matchinfo}); }])>;

def opt_brcond_by_inverting_cond_matchdata : GIDefMatchData<"MachineInstr *">;
def opt_brcond_by_inverting_cond : GICombineRule<
  (defs root:$root, opt_brcond_by_inverting_cond_matchdata:$matchinfo),
  (match (wip_match_opcode G_BR):$root,
         [{ return Helper.matchOptBrCondByInvertingCond(*${root}, ${matchinfo}); }]),
  (apply [{ Helper.applyOptBrCondByInvertingCond(*${root}, ${matchinfo}); }])>;

def ptr_add_immed_matchdata : GIDefMatchData<"PtrAddChain">;
def ptr_add_immed_chain : GICombineRule<
  (defs root:$d, ptr_add_immed_matchdata:$matchinfo),
  (match (wip_match_opcode G_PTR_ADD):$d,
         [{ return Helper.matchPtrAddImmedChain(*${d}, ${matchinfo}); }]),
  (apply [{ Helper.applyPtrAddImmedChain(*${d}, ${matchinfo}); }])>;

def shifts_too_big : GICombineRule<
  (defs root:$root),
  (match (wip_match_opcode G_SHL, G_ASHR, G_LSHR):$root,
         [{ return Helper.matchShiftsTooBig(*${root}); }]),
  (apply [{ Helper.replaceInstWithUndef(*${root}); }])>;

// Fold shift (shift base x), y -> shift base, (x+y), if shifts are same
def shift_immed_matchdata : GIDefMatchData<"RegisterImmPair">;
def shift_immed_chain : GICombineRule<
  (defs root:$d, shift_immed_matchdata:$matchinfo),
  (match (wip_match_opcode G_SHL, G_ASHR, G_LSHR, G_SSHLSAT, G_USHLSAT):$d,
         [{ return Helper.matchShiftImmedChain(*${d}, ${matchinfo}); }]),
  (apply [{ Helper.applyShiftImmedChain(*${d}, ${matchinfo}); }])>;

// Transform shift (logic (shift X, C0), Y), C1
//        -> logic (shift X, (C0+C1)), (shift Y, C1), if shifts are same
def shift_of_shifted_logic_matchdata : GIDefMatchData<"ShiftOfShiftedLogic">;
def shift_of_shifted_logic_chain : GICombineRule<
  (defs root:$d, shift_of_shifted_logic_matchdata:$matchinfo),
  (match (wip_match_opcode G_SHL, G_ASHR, G_LSHR, G_USHLSAT, G_SSHLSAT):$d,
         [{ return Helper.matchShiftOfShiftedLogic(*${d}, ${matchinfo}); }]),
  (apply [{ Helper.applyShiftOfShiftedLogic(*${d}, ${matchinfo}); }])>;

def mul_to_shl_matchdata : GIDefMatchData<"unsigned">;
def mul_to_shl : GICombineRule<
  (defs root:$d, mul_to_shl_matchdata:$matchinfo),
  (match (G_MUL $d, $op1, $op2):$mi,
         [{ return Helper.matchCombineMulToShl(*${mi}, ${matchinfo}); }]),
  (apply [{ Helper.applyCombineMulToShl(*${mi}, ${matchinfo}); }])>;

// shl ([asz]ext x), y => zext (shl x, y), if shift does not overflow int
def reduce_shl_of_extend_matchdata : GIDefMatchData<"RegisterImmPair">;
def reduce_shl_of_extend : GICombineRule<
  (defs root:$dst, reduce_shl_of_extend_matchdata:$matchinfo),
  (match (G_SHL $dst, $src0, $src1):$mi,
         [{ return Helper.matchCombineShlOfExtend(*${mi}, ${matchinfo}); }]),
  (apply [{ Helper.applyCombineShlOfExtend(*${mi}, ${matchinfo}); }])>;

// Combine (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
// Combine (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
def commute_shift : GICombineRule<
  (defs root:$d, build_fn_matchinfo:$matchinfo),
  (match (wip_match_opcode G_SHL):$d,
         [{ return Helper.matchCommuteShift(*${d}, ${matchinfo}); }]),
  (apply [{ Helper.applyBuildFn(*${d}, ${matchinfo}); }])>;

def narrow_binop_feeding_and : GICombineRule<
  (defs root:$root, build_fn_matchinfo:$matchinfo),
  (match (wip_match_opcode G_AND):$root,
         [{ return Helper.matchNarrowBinopFeedingAnd(*${root}, ${matchinfo}); }]),
  (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>;

// [us]itofp(undef) = 0, because the result value is bounded.
def undef_to_fp_zero : GICombineRule<
  (defs root:$root),
  (match (wip_match_opcode G_UITOFP, G_SITOFP):$root,
         [{ return Helper.matchAnyExplicitUseIsUndef(*${root}); }]),
  (apply [{ Helper.replaceInstWithFConstant(*${root}, 0.0); }])>;

def undef_to_int_zero: GICombineRule<
  (defs root:$root),
  (match (wip_match_opcode G_AND, G_MUL):$root,
         [{ return Helper.matchAnyExplicitUseIsUndef(*${root}); }]),
  (apply [{ Helper.replaceInstWithConstant(*${root}, 0); }])>;

def undef_to_negative_one: GICombineRule<
  (defs root:$root),
  (match (wip_match_opcode G_OR):$root,
         [{ return Helper.matchAnyExplicitUseIsUndef(*${root}); }]),
  (apply [{ Helper.replaceInstWithConstant(*${root}, -1); }])>;

def binop_left_undef_to_zero: GICombineRule<
  (defs root:$root),
  (match (wip_match_opcode G_SHL, G_UDIV, G_UREM):$root,
         [{ return Helper.matchOperandIsUndef(*${root}, 1); }]),
  (apply [{ Helper.replaceInstWithConstant(*${root}, 0); }])>;

def binop_right_undef_to_undef: GICombineRule<
  (defs root:$root),
  (match (wip_match_opcode G_SHL, G_ASHR, G_LSHR):$root,
         [{ return Helper.matchOperandIsUndef(*${root}, 2); }]),
  (apply [{ Helper.replaceInstWithUndef(*${root}); }])>;

def unary_undef_to_zero: GICombineRule<
  (defs root:$root),
  (match (wip_match_opcode G_ABS):$root,
         [{ return Helper.matchOperandIsUndef(*${root}, 1); }]),
  (apply [{ Helper.replaceInstWithConstant(*${root}, 0); }])>;

// Instructions where if any source operand is undef, the instruction can be
// replaced with undef.
def propagate_undef_any_op: GICombineRule<
  (defs root:$root),
  (match (wip_match_opcode G_ADD, G_FPTOSI, G_FPTOUI, G_SUB, G_XOR, G_TRUNC):$root,
         [{ return Helper.matchAnyExplicitUseIsUndef(*${root}); }]),
  (apply [{ Helper.replaceInstWithUndef(*${root}); }])>;

// Instructions where if all source operands are undef, the instruction can be
// replaced with undef.
def propagate_undef_all_ops: GICombineRule<
  (defs root:$root),
  (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
          [{ return Helper.matchAllExplicitUsesAreUndef(*${root}); }]),
  (apply [{ Helper.replaceInstWithUndef(*${root}); }])>;

// Replace a G_SHUFFLE_VECTOR with an undef mask with a G_IMPLICIT_DEF.
def propagate_undef_shuffle_mask: GICombineRule<
  (defs root:$root),
  (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
         [{ return Helper.matchUndefShuffleVectorMask(*${root}); }]),
  (apply [{ Helper.replaceInstWithUndef(*${root}); }])>;

  // Replace an insert/extract element of an out of bounds index with undef.
  def insert_extract_vec_elt_out_of_bounds : GICombineRule<
  (defs root:$root),
  (match (wip_match_opcode G_INSERT_VECTOR_ELT, G_EXTRACT_VECTOR_ELT):$root,
         [{ return Helper.matchInsertExtractVecEltOutOfBounds(*${root}); }]),
  (apply [{ Helper.replaceInstWithUndef(*${root}); }])>;

// Fold (cond ? x : x) -> x
def select_same_val: GICombineRule<
  (defs root:$root),
  (match (wip_match_opcode G_SELECT):$root,
    [{ return Helper.matchSelectSameVal(*${root}); }]),
  (apply [{ Helper.replaceSingleDefInstWithOperand(*${root}, 2); }])
>;

// Fold (undef ? x : y) -> y
def select_undef_cmp: GICombineRule<
  (defs root:$root),
  (match (wip_match_opcode G_SELECT):$root,
    [{ return Helper.matchUndefSelectCmp(*${root}); }]),
  (apply [{ Helper.replaceSingleDefInstWithOperand(*${root}, 2); }])
>;

// Fold (true ? x : y) -> x
// Fold (false ? x : y) -> y
def select_constant_cmp_matchdata : GIDefMatchData<"unsigned">;
def select_constant_cmp: GICombineRule<
  (defs root:$root, select_constant_cmp_matchdata:$matchinfo),
  (match (wip_match_opcode G_SELECT):$root,
    [{ return Helper.matchConstantSelectCmp(*${root}, ${matchinfo}); }]),
  (apply [{ Helper.replaceSingleDefInstWithOperand(*${root}, ${matchinfo}); }])
>;

def select_to_logical : GICombineRule<
  (defs root:$root, build_fn_matchinfo:$matchinfo),
  (match (wip_match_opcode G_SELECT):$root,
    [{ return Helper.matchSelectToLogical(*${root}, ${matchinfo}); }]),
  (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])
>;

// Fold (C op x) -> (x op C)
// TODO: handle more isCommutable opcodes
// TODO: handle compares (currently not marked as isCommutable)
def commute_constant_to_rhs : GICombineRule<
  (defs root:$root),
  (match (wip_match_opcode G_ADD, G_MUL, G_AND, G_OR, G_XOR):$root, [{
    return getIConstantVRegVal(${root}->getOperand(1).getReg(), MRI).has_value();
  }]),
  (apply [{
    Observer.changingInstr(*${root});
    Register LHSReg = ${root}->getOperand(1).getReg();
    Register RHSReg = ${root}->getOperand(2).getReg();
    ${root}->getOperand(1).setReg(RHSReg);
    ${root}->getOperand(2).setReg(LHSReg);
    Observer.changedInstr(*${root});
  }])
>;

// Fold x op 0 -> x
def right_identity_zero: GICombineRule<
  (defs root:$root),
  (match (wip_match_opcode G_SUB, G_ADD, G_OR, G_XOR, G_SHL, G_ASHR, G_LSHR,
                           G_PTR_ADD, G_ROTL, G_ROTR):$root,
    [{ return Helper.matchConstantOp(${root}->getOperand(2), 0); }]),
  (apply [{ Helper.replaceSingleDefInstWithOperand(*${root}, 1); }])
>;

// Fold x op 1 -> x
def right_identity_one: GICombineRule<
  (defs root:$root),
  (match (wip_match_opcode G_MUL):$root,
    [{ return Helper.matchConstantOp(${root}->getOperand(2), 1); }]),
  (apply [{ Helper.replaceSingleDefInstWithOperand(*${root}, 1); }])
>;

// Fold (x op x) - > x
def binop_same_val: GICombineRule<
  (defs root:$root),
  (match (wip_match_opcode G_AND, G_OR):$root,
    [{ return Helper.matchBinOpSameVal(*${root}); }]),
  (apply [{ Helper.replaceSingleDefInstWithOperand(*${root}, 1); }])
>;

// Fold (0 op x) - > 0
def binop_left_to_zero: GICombineRule<
  (defs root:$root),
  (match (wip_match_opcode G_SDIV, G_UDIV, G_SREM, G_UREM):$root,
    [{ return Helper.matchOperandIsZero(*${root}, 1); }]),
  (apply [{ Helper.replaceSingleDefInstWithOperand(*${root}, 1); }])
>;

def urem_pow2_to_mask : GICombineRule<
  (defs root:$root),
  (match (wip_match_opcode G_UREM):$root,
    [{ return Helper.matchOperandIsKnownToBeAPowerOfTwo(*${root}, 2); }]),
  (apply [{ Helper.applySimplifyURemByPow2(*${root}); }])
>;

// Push a binary operator through a select on constants.
//
// binop (select cond, K0, K1), K2 ->
//   select cond, (binop K0, K2), (binop K1, K2)

// Every binary operator that has constant folding. We currently do
// not have constant folding for G_FPOW, G_FMAXNUM_IEEE or
// G_FMINNUM_IEEE.
def fold_binop_into_select : GICombineRule<
  (defs root:$root, unsigned_matchinfo:$select_op_no),
  (match (wip_match_opcode
    G_ADD, G_SUB, G_PTR_ADD, G_AND, G_OR, G_XOR,
    G_SDIV, G_SREM, G_UDIV, G_UREM, G_LSHR, G_ASHR, G_SHL,
    G_SMIN, G_SMAX, G_UMIN, G_UMAX,
    G_FMUL, G_FADD, G_FSUB, G_FDIV, G_FREM,
    G_FMINNUM, G_FMAXNUM, G_FMINIMUM, G_FMAXIMUM):$root,
    [{ return Helper.matchFoldBinOpIntoSelect(*${root}, ${select_op_no}); }]),
  (apply [{ Helper.applyFoldBinOpIntoSelect(*${root}, ${select_op_no}); }])
>;

// Transform d = [su]div(x, y) and r = [su]rem(x, y) - > d, r = [su]divrem(x, y)
def div_rem_to_divrem_matchdata : GIDefMatchData<"MachineInstr *">;
def div_rem_to_divrem : GICombineRule<
  (defs root:$root, div_rem_to_divrem_matchdata:$matchinfo),
  (match (wip_match_opcode G_SDIV, G_UDIV, G_SREM, G_UREM):$root,
    [{ return Helper.matchCombineDivRem(*${root}, ${matchinfo}); }]),
  (apply [{ Helper.applyCombineDivRem(*${root}, ${matchinfo}); }])
>;

// Fold (x op 0) - > 0
def binop_right_to_zero: GICombineRule<
  (defs root:$root),
  (match (wip_match_opcode G_MUL):$root,
    [{ return Helper.matchOperandIsZero(*${root}, 2); }]),
  (apply [{ Helper.replaceSingleDefInstWithOperand(*${root}, 2); }])
>;

// Erase stores of undef values.
def erase_undef_store : GICombineRule<
  (defs root:$root),
  (match (wip_match_opcode G_STORE):$root,
    [{ return Helper.matchUndefStore(*${root}); }]),
  (apply [{ Helper.eraseInst(*${root}); }])
>;

def simplify_add_to_sub_matchinfo: GIDefMatchData<"std::tuple<Register, Register>">;
def simplify_add_to_sub: GICombineRule <
  (defs root:$root, simplify_add_to_sub_matchinfo:$info),
  (match (wip_match_opcode G_ADD):$root,
    [{ return Helper.matchSimplifyAddToSub(*${root}, ${info}); }]),
  (apply [{ Helper.applySimplifyAddToSub(*${root}, ${info});}])
>;

// Fold fp_op(cst) to the constant result of the floating point operation.
class constant_fold_unary_fp_op_rule<Instruction opcode> : GICombineRule <
  (defs root:$dst),
  (match (opcode $dst, $src0):$root, (G_FCONSTANT $src0, $cst)),
  (apply [{ Helper.applyCombineConstantFoldFpUnary(*${root}, ${cst}.getFPImm()); }])
>;

def constant_fold_fneg : constant_fold_unary_fp_op_rule<G_FNEG>;
def constant_fold_fabs : constant_fold_unary_fp_op_rule<G_FABS>;
def constant_fold_fsqrt : constant_fold_unary_fp_op_rule<G_FSQRT>;
def constant_fold_flog2 : constant_fold_unary_fp_op_rule<G_FLOG2>;
def constant_fold_fptrunc : constant_fold_unary_fp_op_rule<G_FPTRUNC>;

def constant_fold_fp_ops : GICombineGroup<[
  constant_fold_fneg,
  constant_fold_fabs,
  constant_fold_fsqrt,
  constant_fold_flog2,
  constant_fold_fptrunc
]>;

// Fold int2ptr(ptr2int(x)) -> x
def p2i_to_i2p: GICombineRule<
  (defs root:$root, register_matchinfo:$info),
  (match (wip_match_opcode G_INTTOPTR):$root,
    [{ return Helper.matchCombineI2PToP2I(*${root}, ${info}); }]),
  (apply [{ Helper.applyCombineI2PToP2I(*${root}, ${info}); }])
>;

// Fold ptr2int(int2ptr(x)) -> x
def i2p_to_p2i: GICombineRule<
  (defs root:$dst, register_matchinfo:$info),
  (match (G_INTTOPTR $t, $ptr),
         (G_PTRTOINT $dst, $t):$mi,
    [{ ${info} = ${ptr}.getReg(); return true; }]),
  (apply [{ Helper.applyCombineP2IToI2P(*${mi}, ${info}); }])
>;

// Fold add ptrtoint(x), y -> ptrtoint (ptr_add x), y
def add_p2i_to_ptradd_matchinfo : GIDefMatchData<"std::pair<Register, bool>">;
def add_p2i_to_ptradd : GICombineRule<
  (defs root:$root, add_p2i_to_ptradd_matchinfo:$info),
  (match (wip_match_opcode G_ADD):$root,
    [{ return Helper.matchCombineAddP2IToPtrAdd(*${root}, ${info}); }]),
  (apply [{ Helper.applyCombineAddP2IToPtrAdd(*${root}, ${info}); }])
>;

// Fold (ptr_add (int2ptr C1), C2) -> C1 + C2
def const_ptradd_to_i2p_matchinfo : GIDefMatchData<"APInt">;
def const_ptradd_to_i2p: GICombineRule<
  (defs root:$root, const_ptradd_to_i2p_matchinfo:$info),
  (match (wip_match_opcode G_PTR_ADD):$root,
    [{ return Helper.matchCombineConstPtrAddToI2P(*${root}, ${info}); }]),
  (apply [{ Helper.applyCombineConstPtrAddToI2P(*${root}, ${info}); }])
>;

// Simplify: (logic_op (op x...), (op y...)) -> (op (logic_op x, y))
def hoist_logic_op_with_same_opcode_hands: GICombineRule <
  (defs root:$root, instruction_steps_matchdata:$info),
  (match (wip_match_opcode G_AND, G_OR, G_XOR):$root,
    [{ return Helper.matchHoistLogicOpWithSameOpcodeHands(*${root}, ${info}); }]),
  (apply [{ Helper.applyBuildInstructionSteps(*${root}, ${info});}])
>;

// Fold ashr (shl x, C), C -> sext_inreg (C)
def shl_ashr_to_sext_inreg_matchinfo : GIDefMatchData<"std::tuple<Register, int64_t>">;
def shl_ashr_to_sext_inreg : GICombineRule<
  (defs root:$root, shl_ashr_to_sext_inreg_matchinfo:$info),
  (match (wip_match_opcode G_ASHR): $root,
    [{ return Helper.matchAshrShlToSextInreg(*${root}, ${info}); }]),
  (apply [{ Helper.applyAshShlToSextInreg(*${root}, ${info});}])
>;

// Fold and(and(x, C1), C2) -> C1&C2 ? and(x, C1&C2) : 0
def overlapping_and: GICombineRule <
  (defs root:$root, build_fn_matchinfo:$info),
  (match (wip_match_opcode G_AND):$root,
         [{ return Helper.matchOverlappingAnd(*${root}, ${info}); }]),
  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])
>;

// Fold (x & y) -> x or (x & y) -> y when (x & y) is known to equal x or equal y.
def redundant_and: GICombineRule <
  (defs root:$root, register_matchinfo:$matchinfo),
  (match (wip_match_opcode G_AND):$root,
         [{ return Helper.matchRedundantAnd(*${root}, ${matchinfo}); }]),
  (apply [{ Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }])
>;

// Fold (x | y) -> x or (x | y) -> y when (x | y) is known to equal x or equal y.
def redundant_or: GICombineRule <
  (defs root:$root, register_matchinfo:$matchinfo),
  (match (wip_match_opcode G_OR):$root,
         [{ return Helper.matchRedundantOr(*${root}, ${matchinfo}); }]),
  (apply [{ Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }])
>;

// If the input is already sign extended, just drop the extension.
// sext_inreg x, K ->
//   if computeNumSignBits(x) >= (x.getScalarSizeInBits() - K + 1)
def redundant_sext_inreg: GICombineRule <
  (defs root:$root),
  (match (wip_match_opcode G_SEXT_INREG):$root,
         [{ return Helper.matchRedundantSExtInReg(*${root}); }]),
     (apply [{ Helper.replaceSingleDefInstWithOperand(*${root}, 1); }])
>;

// Fold (anyext (trunc x)) -> x if the source type is same as
// the destination type.
def anyext_trunc_fold: GICombineRule <
  (defs root:$root, register_matchinfo:$matchinfo),
  (match (wip_match_opcode G_ANYEXT):$root,
         [{ return Helper.matchCombineAnyExtTrunc(*${root}, ${matchinfo}); }]),
  (apply [{ Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }])
>;

// Fold (zext (trunc x)) -> x if the source type is same as the destination type
// and truncated bits are known to be zero.
def zext_trunc_fold_matchinfo : GIDefMatchData<"Register">;
def zext_trunc_fold: GICombineRule <
  (defs root:$root, zext_trunc_fold_matchinfo:$matchinfo),
  (match (wip_match_opcode G_ZEXT):$root,
         [{ return Helper.matchCombineZextTrunc(*${root}, ${matchinfo}); }]),
  (apply [{ Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }])
>;

// Fold ([asz]ext ([asz]ext x)) -> ([asz]ext x).
def ext_ext_fold_matchinfo : GIDefMatchData<"std::tuple<Register, unsigned>">;
def ext_ext_fold: GICombineRule <
  (defs root:$root, ext_ext_fold_matchinfo:$matchinfo),
  (match (wip_match_opcode G_ANYEXT, G_SEXT, G_ZEXT):$root,
         [{ return Helper.matchCombineExtOfExt(*${root}, ${matchinfo}); }]),
  (apply [{ Helper.applyCombineExtOfExt(*${root}, ${matchinfo}); }])
>;

def not_cmp_fold_matchinfo : GIDefMatchData<"SmallVector<Register, 4>">;
def not_cmp_fold : GICombineRule<
  (defs root:$d, not_cmp_fold_matchinfo:$info),
  (match (wip_match_opcode G_XOR): $d,
  [{ return Helper.matchNotCmp(*${d}, ${info}); }]),
  (apply [{ Helper.applyNotCmp(*${d}, ${info}); }])
>;

// Fold (fneg (fneg x)) -> x.
def fneg_fneg_fold: GICombineRule <
  (defs root:$dst, register_matchinfo:$matchinfo),
  (match (G_FNEG $t, $src),
         (G_FNEG $dst, $t):$mi,
         [{ ${matchinfo} = ${src}.getReg(); return true; }]),
  (apply [{ Helper.replaceSingleDefInstWithReg(*${mi}, ${matchinfo}); }])
>;

// Fold (unmerge(merge x, y, z)) -> z, y, z.
def unmerge_merge_matchinfo : GIDefMatchData<"SmallVector<Register, 8>">;
def unmerge_merge : GICombineRule<
  (defs root:$d, unmerge_merge_matchinfo:$info),
  (match (wip_match_opcode G_UNMERGE_VALUES): $d,
  [{ return Helper.matchCombineUnmergeMergeToPlainValues(*${d}, ${info}); }]),
  (apply [{ Helper.applyCombineUnmergeMergeToPlainValues(*${d}, ${info}); }])
>;

// Fold merge(unmerge).
def merge_unmerge : GICombineRule<
  (defs root:$d, register_matchinfo:$matchinfo),
  (match (wip_match_opcode G_MERGE_VALUES):$d,
  [{ return Helper.matchCombineMergeUnmerge(*${d}, ${matchinfo}); }]),
  (apply [{ Helper.replaceSingleDefInstWithReg(*${d}, ${matchinfo}); }])
>;

// Fold (fabs (fneg x)) -> (fabs x).
def fabs_fneg_fold: GICombineRule <
  (defs root:$root, build_fn_matchinfo:$matchinfo),
  (match (wip_match_opcode G_FABS):$root,
         [{ return Helper.matchCombineFAbsOfFNeg(*${root}, ${matchinfo}); }]),
  (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>;

// Fold (unmerge cst) -> cst1, cst2, ...
def unmerge_cst_matchinfo : GIDefMatchData<"SmallVector<APInt, 8>">;
def unmerge_cst : GICombineRule<
  (defs root:$d, unmerge_cst_matchinfo:$info),
  (match (wip_match_opcode G_UNMERGE_VALUES): $d,
  [{ return Helper.matchCombineUnmergeConstant(*${d}, ${info}); }]),
  (apply [{ Helper.applyCombineUnmergeConstant(*${d}, ${info}); }])
>;

// Fold (unmerge undef) -> undef, undef, ...
def unmerge_undef : GICombineRule<
  (defs root:$root, build_fn_matchinfo:$info),
  (match (wip_match_opcode G_UNMERGE_VALUES): $root,
         [{ return Helper.matchCombineUnmergeUndef(*${root}, ${info}); }]),
  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])
>;

// Transform x,y<dead> = unmerge z -> x = trunc z.
def unmerge_dead_to_trunc : GICombineRule<
  (defs root:$d),
  (match (wip_match_opcode G_UNMERGE_VALUES): $d,
  [{ return Helper.matchCombineUnmergeWithDeadLanesToTrunc(*${d}); }]),
  (apply [{ Helper.applyCombineUnmergeWithDeadLanesToTrunc(*${d}); }])
>;

// Transform x,y = unmerge(zext(z)) -> x = zext z; y = 0.
def unmerge_zext_to_zext : GICombineRule<
  (defs root:$d),
  (match (wip_match_opcode G_UNMERGE_VALUES): $d,
  [{ return Helper.matchCombineUnmergeZExtToZExt(*${d}); }]),
  (apply [{ Helper.applyCombineUnmergeZExtToZExt(*${d}); }])
>;

// Fold trunc ([asz]ext x) -> x or ([asz]ext x) or (trunc x).
def trunc_ext_fold_matchinfo : GIDefMatchData<"std::pair<Register, unsigned>">;
def trunc_ext_fold: GICombineRule <
  (defs root:$root, trunc_ext_fold_matchinfo:$matchinfo),
  (match (wip_match_opcode G_TRUNC):$root,
         [{ return Helper.matchCombineTruncOfExt(*${root}, ${matchinfo}); }]),
  (apply [{ Helper.applyCombineTruncOfExt(*${root}, ${matchinfo}); }])
>;

// Under certain conditions, transform:
//  trunc (shl x, K)     -> shl (trunc x), K//
//  trunc ([al]shr x, K) -> (trunc ([al]shr (trunc x), K))
def trunc_shift_matchinfo : GIDefMatchData<"std::pair<MachineInstr*, LLT>">;
def trunc_shift: GICombineRule <
  (defs root:$root, trunc_shift_matchinfo:$matchinfo),
  (match (wip_match_opcode G_TRUNC):$root,
         [{ return Helper.matchCombineTruncOfShift(*${root}, ${matchinfo}); }]),
  (apply [{ Helper.applyCombineTruncOfShift(*${root}, ${matchinfo}); }])
>;

// Transform (mul x, -1) -> (sub 0, x)
def mul_by_neg_one: GICombineRule <
  (defs root:$root),
  (match (wip_match_opcode G_MUL):$root,
         [{ return Helper.matchConstantOp(${root}->getOperand(2), -1); }]),
  (apply [{ Helper.applyCombineMulByNegativeOne(*${root}); }])
>;

// Fold (xor (and x, y), y) -> (and (not x), y)
def xor_of_and_with_same_reg_matchinfo :
    GIDefMatchData<"std::pair<Register, Register>">;
def xor_of_and_with_same_reg: GICombineRule <
  (defs root:$root, xor_of_and_with_same_reg_matchinfo:$matchinfo),
  (match (wip_match_opcode G_XOR):$root,
         [{ return Helper.matchXorOfAndWithSameReg(*${root}, ${matchinfo}); }]),
  (apply [{ Helper.applyXorOfAndWithSameReg(*${root}, ${matchinfo}); }])
>;

// Transform (ptr_add 0, x) -> (int_to_ptr x)
def ptr_add_with_zero: GICombineRule<
  (defs root:$root),
  (match (wip_match_opcode G_PTR_ADD):$root,
         [{ return Helper.matchPtrAddZero(*${root}); }]),
  (apply [{ Helper.applyPtrAddZero(*${root}); }])>;

def regs_small_vec : GIDefMatchData<"SmallVector<Register, 4>">;
def combine_insert_vec_elts_build_vector : GICombineRule<
  (defs root:$root, regs_small_vec:$info),
  (match (wip_match_opcode G_INSERT_VECTOR_ELT):$root,
    [{ return Helper.matchCombineInsertVecElts(*${root}, ${info}); }]),
  (apply [{ Helper.applyCombineInsertVecElts(*${root}, ${info}); }])>;

def load_or_combine : GICombineRule<
  (defs root:$root, build_fn_matchinfo:$info),
  (match (wip_match_opcode G_OR):$root,
    [{ return Helper.matchLoadOrCombine(*${root}, ${info}); }]),
  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;

def extend_through_phis_matchdata: GIDefMatchData<"MachineInstr*">;
def extend_through_phis : GICombineRule<
  (defs root:$root, extend_through_phis_matchdata:$matchinfo),
  (match (wip_match_opcode G_PHI):$root,
    [{ return Helper.matchExtendThroughPhis(*${root}, ${matchinfo}); }]),
  (apply [{ Helper.applyExtendThroughPhis(*${root}, ${matchinfo}); }])>;

// Currently only the one combine above.
def insert_vec_elt_combines : GICombineGroup<
                            [combine_insert_vec_elts_build_vector]>;

def extract_vec_elt_build_vec : GICombineRule<
  (defs root:$root, register_matchinfo:$matchinfo),
  (match (wip_match_opcode G_EXTRACT_VECTOR_ELT):$root,
    [{ return Helper.matchExtractVecEltBuildVec(*${root}, ${matchinfo}); }]),
  (apply [{ Helper.applyExtractVecEltBuildVec(*${root}, ${matchinfo}); }])>;

// Fold away full elt extracts from a build_vector.
def extract_all_elts_from_build_vector_matchinfo :
  GIDefMatchData<"SmallVector<std::pair<Register, MachineInstr*>>">;
def extract_all_elts_from_build_vector : GICombineRule<
  (defs root:$root, extract_all_elts_from_build_vector_matchinfo:$matchinfo),
  (match (wip_match_opcode G_BUILD_VECTOR):$root,
    [{ return Helper.matchExtractAllEltsFromBuildVector(*${root}, ${matchinfo}); }]),
  (apply [{ Helper.applyExtractAllEltsFromBuildVector(*${root}, ${matchinfo}); }])>;

def extract_vec_elt_combines : GICombineGroup<[
  extract_vec_elt_build_vec,
  extract_all_elts_from_build_vector]>;

def funnel_shift_from_or_shift : GICombineRule<
  (defs root:$root, build_fn_matchinfo:$info),
  (match (wip_match_opcode G_OR):$root,
    [{ return Helper.matchOrShiftToFunnelShift(*${root}, ${info}); }]),
  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])
>;

def funnel_shift_to_rotate : GICombineRule<
  (defs root:$root),
  (match (wip_match_opcode G_FSHL, G_FSHR):$root,
    [{ return Helper.matchFunnelShiftToRotate(*${root}); }]),
  (apply [{ Helper.applyFunnelShiftToRotate(*${root}); }])
>;

def rotate_out_of_range : GICombineRule<
  (defs root:$root),
  (match (wip_match_opcode G_ROTR, G_ROTL):$root,
    [{ return Helper.matchRotateOutOfRange(*${root}); }]),
  (apply [{ Helper.applyRotateOutOfRange(*${root}); }])
>;

def icmp_to_true_false_known_bits : GICombineRule<
  (defs root:$d, int64_matchinfo:$matchinfo),
  (match (wip_match_opcode G_ICMP):$d,
         [{ return Helper.matchICmpToTrueFalseKnownBits(*${d}, ${matchinfo}); }]),
  (apply [{ Helper.replaceInstWithConstant(*${d}, ${matchinfo}); }])>;

def icmp_to_lhs_known_bits : GICombineRule<
  (defs root:$root, build_fn_matchinfo:$info),
  (match (wip_match_opcode G_ICMP):$root,
         [{ return Helper.matchICmpToLHSKnownBits(*${root}, ${info}); }]),
  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;

def redundant_binop_in_equality : GICombineRule<
  (defs root:$root, build_fn_matchinfo:$info),
  (match (wip_match_opcode G_ICMP):$root,
         [{ return Helper.matchRedundantBinOpInEquality(*${root}, ${info}); }]),
  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;

def and_or_disjoint_mask : GICombineRule<
  (defs root:$root, build_fn_matchinfo:$info),
  (match (wip_match_opcode G_AND):$root,
         [{ return Helper.matchAndOrDisjointMask(*${root}, ${info}); }]),
  (apply [{ Helper.applyBuildFnNoErase(*${root}, ${info}); }])>;

def bitfield_extract_from_and : GICombineRule<
  (defs root:$root, build_fn_matchinfo:$info),
  (match (wip_match_opcode G_AND):$root,
    [{ return Helper.matchBitfieldExtractFromAnd(*${root}, ${info}); }]),
  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;

def funnel_shift_combines : GICombineGroup<[funnel_shift_from_or_shift,
                                            funnel_shift_to_rotate]>;

def bitfield_extract_from_sext_inreg : GICombineRule<
  (defs root:$root, build_fn_matchinfo:$info),
  (match (wip_match_opcode G_SEXT_INREG):$root,
    [{ return Helper.matchBitfieldExtractFromSExtInReg(*${root}, ${info}); }]),
  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;

def bitfield_extract_from_shr : GICombineRule<
  (defs root:$root, build_fn_matchinfo:$info),
  (match (wip_match_opcode G_ASHR, G_LSHR):$root,
    [{ return Helper.matchBitfieldExtractFromShr(*${root}, ${info}); }]),
  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;

def bitfield_extract_from_shr_and : GICombineRule<
  (defs root:$root, build_fn_matchinfo:$info),
  (match (wip_match_opcode G_ASHR, G_LSHR):$root,
    [{ return Helper.matchBitfieldExtractFromShrAnd(*${root}, ${info}); }]),
  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;

def form_bitfield_extract : GICombineGroup<[bitfield_extract_from_sext_inreg,
                                            bitfield_extract_from_and,
                                            bitfield_extract_from_shr,
                                            bitfield_extract_from_shr_and]>;

def udiv_by_const : GICombineRule<
  (defs root:$root),
  (match (wip_match_opcode G_UDIV):$root,
   [{ return Helper.matchUDivByConst(*${root}); }]),
  (apply [{ Helper.applyUDivByConst(*${root}); }])>;

def sdiv_by_const : GICombineRule<
  (defs root:$root),
  (match (wip_match_opcode G_SDIV):$root,
   [{ return Helper.matchSDivByConst(*${root}); }]),
  (apply [{ Helper.applySDivByConst(*${root}); }])>;

def intdiv_combines : GICombineGroup<[udiv_by_const, sdiv_by_const]>;

def reassoc_ptradd : GICombineRule<
  (defs root:$root, build_fn_matchinfo:$matchinfo),
  (match (wip_match_opcode G_PTR_ADD):$root,
    [{ return Helper.matchReassocPtrAdd(*${root}, ${matchinfo}); }]),
  (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>;

def reassoc_comm_binops : GICombineRule<
  (defs root:$root, build_fn_matchinfo:$matchinfo),
  (match (G_ADD $root, $src1, $src2):$root,
    [{ return Helper.matchReassocCommBinOp(*${root}, ${matchinfo}); }]),
  (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;

def reassocs : GICombineGroup<[reassoc_ptradd, reassoc_comm_binops]>;

// Constant fold operations.
def constant_fold : GICombineRule<
  (defs root:$d, apint_matchinfo:$matchinfo),
  (match (wip_match_opcode G_ADD, G_SUB, G_MUL, G_AND, G_OR, G_XOR):$d,
   [{ return Helper.matchConstantFold(*${d}, ${matchinfo}); }]),
  (apply [{ Helper.replaceInstWithConstant(*${d}, ${matchinfo}); }])>;

def mulo_by_2: GICombineRule<
  (defs root:$root, build_fn_matchinfo:$matchinfo),
  (match (wip_match_opcode G_UMULO, G_SMULO):$root,
         [{ return Helper.matchMulOBy2(*${root}, ${matchinfo}); }]),
  (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>;

def mulo_by_0: GICombineRule<
  (defs root:$root, build_fn_matchinfo:$matchinfo),
  (match (wip_match_opcode G_UMULO, G_SMULO):$root,
         [{ return Helper.matchMulOBy0(*${root}, ${matchinfo}); }]),
  (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;

def addo_by_0: GICombineRule<
  (defs root:$root, build_fn_matchinfo:$matchinfo),
  (match (wip_match_opcode G_UADDO, G_SADDO):$root,
         [{ return Helper.matchAddOBy0(*${root}, ${matchinfo}); }]),
  (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;

// Transform (uadde x, y, 0) -> (uaddo x, y)
//           (sadde x, y, 0) -> (saddo x, y)
//           (usube x, y, 0) -> (usubo x, y)
//           (ssube x, y, 0) -> (ssubo x, y)
def adde_to_addo: GICombineRule<
  (defs root:$root, build_fn_matchinfo:$matchinfo),
  (match (wip_match_opcode G_UADDE, G_SADDE, G_USUBE, G_SSUBE):$root,
         [{ return Helper.matchAddEToAddO(*${root}, ${matchinfo}); }]),
  (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>;

def mulh_to_lshr : GICombineRule<
  (defs root:$root),
  (match (wip_match_opcode G_UMULH):$root,
         [{ return Helper.matchUMulHToLShr(*${root}); }]),
  (apply [{ Helper.applyUMulHToLShr(*${root}); }])>;

def mulh_combines : GICombineGroup<[mulh_to_lshr]>;

def redundant_neg_operands: GICombineRule<
  (defs root:$root, build_fn_matchinfo:$matchinfo),
  (match (wip_match_opcode G_FADD, G_FSUB, G_FMUL, G_FDIV, G_FMAD, G_FMA):$root,
    [{ return Helper.matchRedundantNegOperands(*${root}, ${matchinfo}); }]),
  (apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])>;

// Transform (fsub +-0.0, X) -> (fneg X)
def fsub_to_fneg: GICombineRule<
  (defs root:$root, register_matchinfo:$matchinfo),
  (match (wip_match_opcode G_FSUB):$root,
    [{ return Helper.matchFsubToFneg(*${root}, ${matchinfo}); }]),
  (apply [{ Helper.applyFsubToFneg(*${root}, ${matchinfo}); }])>;

// Transform (fadd x, (fmul y, z)) -> (fma y, z, x)
//           (fadd x, (fmul y, z)) -> (fmad y, z, x)
// Transform (fadd (fmul x, y), z) -> (fma x, y, z)
//           (fadd (fmul x, y), z) -> (fmad x, y, z)
def combine_fadd_fmul_to_fmad_or_fma: GICombineRule<
  (defs root:$root, build_fn_matchinfo:$info),
  (match (wip_match_opcode G_FADD):$root,
         [{ return Helper.matchCombineFAddFMulToFMadOrFMA(*${root},
                                                          ${info}); }]),
  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;

// Transform (fadd (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), z)
//                                         -> (fmad (fpext x), (fpext y), z)
// Transform (fadd x, (fpext (fmul y, z))) -> (fma (fpext y), (fpext z), x)
//                                         -> (fmad (fpext y), (fpext z), x)
def combine_fadd_fpext_fmul_to_fmad_or_fma: GICombineRule<
  (defs root:$root, build_fn_matchinfo:$info),
  (match (wip_match_opcode G_FADD):$root,
         [{ return Helper.matchCombineFAddFpExtFMulToFMadOrFMA(*${root},
                                                               ${info}); }]),
  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;

// Transform (fadd (fma x, y, (fmul z, u)), v)  -> (fma x, y, (fma z, u, v))
//           (fadd (fmad x, y, (fmul z, u)), v) -> (fmad x, y, (fmad z, u, v))
// Transform (fadd v, (fma x, y, (fmul z, u)))  -> (fma x, y, (fma z, u, v))
//           (fadd v, (fmad x, y, (fmul z, u))) -> (fmad x, y, (fmad z, u, v))
def combine_fadd_fma_fmul_to_fmad_or_fma: GICombineRule<
  (defs root:$root, build_fn_matchinfo:$info),
  (match (wip_match_opcode G_FADD):$root,
         [{ return Helper.matchCombineFAddFMAFMulToFMadOrFMA(*${root},
                                                             ${info}); }]),
  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;

// Transform (fadd (fma x, y, (fpext (fmul u, v))), z) ->
//           (fma x, y, (fma (fpext u), (fpext v), z))
def combine_fadd_fpext_fma_fmul_to_fmad_or_fma: GICombineRule<
  (defs root:$root, build_fn_matchinfo:$info),
  (match (wip_match_opcode G_FADD):$root,
         [{ return Helper.matchCombineFAddFpExtFMulToFMadOrFMAAggressive(
                                                  *${root}, ${info}); }]),
  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;

// Transform (fsub (fmul x, y), z) -> (fma x, y, -z)
//                                 -> (fmad x, y, -z)
def combine_fsub_fmul_to_fmad_or_fma: GICombineRule<
  (defs root:$root, build_fn_matchinfo:$info),
  (match (wip_match_opcode G_FSUB):$root,
         [{ return Helper.matchCombineFSubFMulToFMadOrFMA(*${root},
                                                          ${info}); }]),
  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;

// Transform (fsub (fneg (fmul, x, y)), z) -> (fma (fneg x), y, (fneg z))
//           (fsub x, (fneg (fmul, y, z))) -> (fma y, z, x)
def combine_fsub_fneg_fmul_to_fmad_or_fma: GICombineRule<
  (defs root:$root, build_fn_matchinfo:$info),
  (match (wip_match_opcode G_FSUB):$root,
         [{ return Helper.matchCombineFSubFNegFMulToFMadOrFMA(*${root},
                                                              ${info}); }]),
  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;

// Transform (fsub (fpext (fmul x, y)), z) ->
//           (fma (fpext x), (fpext y), (fneg z))
def combine_fsub_fpext_fmul_to_fmad_or_fma: GICombineRule<
  (defs root:$root, build_fn_matchinfo:$info),
  (match (wip_match_opcode G_FSUB):$root,
         [{ return Helper.matchCombineFSubFpExtFMulToFMadOrFMA(*${root},
                                                               ${info}); }]),
  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;

// Transform (fsub (fneg (fpext (fmul x, y))), z) ->
//           (fneg (fma (fpext x), (fpext y), z))
def combine_fsub_fpext_fneg_fmul_to_fmad_or_fma: GICombineRule<
  (defs root:$root, build_fn_matchinfo:$info),
  (match (wip_match_opcode G_FSUB):$root,
         [{ return Helper.matchCombineFSubFpExtFNegFMulToFMadOrFMA(
                                            *${root}, ${info}); }]),
  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;

def combine_minmax_nan: GICombineRule<
  (defs root:$root, unsigned_matchinfo:$info),
  (match (wip_match_opcode G_FMINNUM, G_FMAXNUM, G_FMINIMUM, G_FMAXIMUM):$root,
         [{ return Helper.matchCombineFMinMaxNaN(*${root}, ${info}); }]),
  (apply [{ Helper.replaceSingleDefInstWithOperand(*${root}, ${info}); }])>;

// Transform (add x, (sub y, x)) -> y
// Transform (add (sub y, x), x) -> y
def add_sub_reg: GICombineRule <
  (defs root:$root, register_matchinfo:$matchinfo),
  (match (wip_match_opcode G_ADD):$root,
         [{ return Helper.matchAddSubSameReg(*${root}, ${matchinfo}); }]),
  (apply [{ Helper.replaceSingleDefInstWithReg(*${root},
                                                      ${matchinfo}); }])>;

def buildvector_identity_fold : GICombineRule<
  (defs root:$build_vector, register_matchinfo:$matchinfo),
  (match (wip_match_opcode G_BUILD_VECTOR_TRUNC, G_BUILD_VECTOR):$build_vector,
         [{ return Helper.matchBuildVectorIdentityFold(*${build_vector}, ${matchinfo}); }]),
  (apply [{ Helper.replaceSingleDefInstWithReg(*${build_vector}, ${matchinfo}); }])>;

def trunc_buildvector_fold : GICombineRule<
  (defs root:$op, register_matchinfo:$matchinfo),
  (match (wip_match_opcode G_TRUNC):$op,
      [{ return Helper.matchTruncBuildVectorFold(*${op}, ${matchinfo}); }]),
  (apply [{ Helper.replaceSingleDefInstWithReg(*${op}, ${matchinfo}); }])>;

def trunc_lshr_buildvector_fold : GICombineRule<
  (defs root:$op, register_matchinfo:$matchinfo),
  (match (wip_match_opcode G_TRUNC):$op,
      [{ return Helper.matchTruncLshrBuildVectorFold(*${op}, ${matchinfo}); }]),
  (apply [{ Helper.replaceSingleDefInstWithReg(*${op}, ${matchinfo}); }])>;

// Transform:
//   (x + y) - y -> x
//   (x + y) - x -> y
//   x - (y + x) -> 0 - y
//   x - (x + z) -> 0 - z
def sub_add_reg: GICombineRule <
  (defs root:$root, build_fn_matchinfo:$matchinfo),
  (match (wip_match_opcode G_SUB):$root,
         [{ return Helper.matchSubAddSameReg(*${root}, ${matchinfo}); }]),
  (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])>;

def bitcast_bitcast_fold : GICombineRule<
  (defs root:$dst),
  (match (G_BITCAST $dst, $src1):$op, (G_BITCAST $src1, $src0),
      [{ return MRI.getType(${src0}.getReg()) == MRI.getType(${dst}.getReg()); }]),
  (apply [{ Helper.replaceSingleDefInstWithReg(*${op}, ${src0}.getReg()); }])>;


def fptrunc_fpext_fold : GICombineRule<
  (defs root:$dst),
  (match (G_FPTRUNC $dst, $src1):$op, (G_FPEXT $src1, $src0),
      [{ return MRI.getType(${src0}.getReg()) == MRI.getType(${dst}.getReg()); }]),
  (apply [{ Helper.replaceSingleDefInstWithReg(*${op}, ${src0}.getReg()); }])>;


def select_to_minmax: GICombineRule<
  (defs root:$root, build_fn_matchinfo:$info),
  (match (wip_match_opcode G_SELECT):$root,
         [{ return Helper.matchSimplifySelectToMinMax(*${root}, ${info}); }]),
  (apply [{ Helper.applyBuildFn(*${root}, ${info}); }])>;

// FIXME: These should use the custom predicate feature once it lands.
def undef_combines : GICombineGroup<[undef_to_fp_zero, undef_to_int_zero,
                                     undef_to_negative_one,
                                     binop_left_undef_to_zero,
                                     binop_right_undef_to_undef,
                                     unary_undef_to_zero,
                                     propagate_undef_any_op,
                                     propagate_undef_all_ops,
                                     propagate_undef_shuffle_mask,
                                     erase_undef_store,
                                     unmerge_undef,
                                     insert_extract_vec_elt_out_of_bounds]>;

def identity_combines : GICombineGroup<[select_same_val, right_identity_zero,
                                        binop_same_val, binop_left_to_zero,
                                        binop_right_to_zero, p2i_to_i2p,
                                        i2p_to_p2i, anyext_trunc_fold,
                                        fneg_fneg_fold, right_identity_one,
                                        add_sub_reg, buildvector_identity_fold,
                                        trunc_buildvector_fold,
                                        trunc_lshr_buildvector_fold,
                                        bitcast_bitcast_fold, fptrunc_fpext_fold]>;

def const_combines : GICombineGroup<[constant_fold_fp_ops, const_ptradd_to_i2p,
                                     overlapping_and, mulo_by_2, mulo_by_0,
                                     addo_by_0, adde_to_addo,
                                     combine_minmax_nan]>;

def known_bits_simplifications : GICombineGroup<[
  redundant_and, redundant_sext_inreg, redundant_or, urem_pow2_to_mask,
  zext_trunc_fold, icmp_to_true_false_known_bits, icmp_to_lhs_known_bits,
  sext_inreg_to_zext_inreg]>;

def width_reduction_combines : GICombineGroup<[reduce_shl_of_extend,
                                               narrow_binop_feeding_and]>;

def phi_combines : GICombineGroup<[extend_through_phis]>;

def select_combines : GICombineGroup<[select_undef_cmp, select_constant_cmp,
                                      select_to_logical]>;

def trivial_combines : GICombineGroup<[copy_prop, mul_to_shl, add_p2i_to_ptradd,
                                       mul_by_neg_one, idempotent_prop]>;

def fma_combines : GICombineGroup<[combine_fadd_fmul_to_fmad_or_fma,
  combine_fadd_fpext_fmul_to_fmad_or_fma, combine_fadd_fma_fmul_to_fmad_or_fma,
  combine_fadd_fpext_fma_fmul_to_fmad_or_fma, combine_fsub_fmul_to_fmad_or_fma,
  combine_fsub_fneg_fmul_to_fmad_or_fma, combine_fsub_fpext_fmul_to_fmad_or_fma,
  combine_fsub_fpext_fneg_fmul_to_fmad_or_fma]>;

def all_combines : GICombineGroup<[trivial_combines, insert_vec_elt_combines,
    extract_vec_elt_combines, combines_for_extload,
    combine_indexed_load_store, undef_combines, identity_combines, phi_combines,
    simplify_add_to_sub, hoist_logic_op_with_same_opcode_hands, shifts_too_big,
    reassocs, ptr_add_immed_chain,
    shl_ashr_to_sext_inreg, sext_inreg_of_load,
    width_reduction_combines, select_combines,
    known_bits_simplifications, ext_ext_fold,
    not_cmp_fold, opt_brcond_by_inverting_cond,
    unmerge_merge, unmerge_cst, unmerge_dead_to_trunc,
    unmerge_zext_to_zext, merge_unmerge, trunc_ext_fold, trunc_shift,
    const_combines, xor_of_and_with_same_reg, ptr_add_with_zero,
    shift_immed_chain, shift_of_shifted_logic_chain, load_or_combine,
    div_rem_to_divrem, funnel_shift_combines, commute_shift,
    form_bitfield_extract, constant_fold, fabs_fneg_fold,
    intdiv_combines, mulh_combines, redundant_neg_operands,
    and_or_disjoint_mask, fma_combines, fold_binop_into_select,
    sub_add_reg, select_to_minmax, redundant_binop_in_equality,
    fsub_to_fneg, commute_constant_to_rhs]>;

// A combine group used to for prelegalizer combiners at -O0. The combines in
// this group have been selected based on experiments to balance code size and
// compile time performance.
def optnone_combines : GICombineGroup<[trivial_combines,
    ptr_add_immed_chain, combines_for_extload,
    not_cmp_fold, opt_brcond_by_inverting_cond]>;
PKiwFZE���S@S@Target/TargetSelectionDAG.tdnu�[���//===- TargetSelectionDAG.td - Common code for DAG isels ---*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the target-independent interfaces used by SelectionDAG
// instruction selection generators.
//
//===----------------------------------------------------------------------===//

//===----------------------------------------------------------------------===//
// Selection DAG Type Constraint definitions.
//
// Note that the semantics of these constraints are hard coded into tblgen.  To
// modify or add constraints, you have to hack tblgen.
//

class SDTypeConstraint<int opnum> {
  int OperandNum = opnum;
}

// SDTCisVT - The specified operand has exactly this VT.
class SDTCisVT<int OpNum, ValueType vt> : SDTypeConstraint<OpNum> {
  ValueType VT = vt;
}

class SDTCisPtrTy<int OpNum> : SDTypeConstraint<OpNum>;

// SDTCisInt - The specified operand has integer type.
class SDTCisInt<int OpNum> : SDTypeConstraint<OpNum>;

// SDTCisFP - The specified operand has floating-point type.
class SDTCisFP<int OpNum> : SDTypeConstraint<OpNum>;

// SDTCisVec - The specified operand has a vector type.
class SDTCisVec<int OpNum> : SDTypeConstraint<OpNum>;

// SDTCisSameAs - The two specified operands have identical types.
class SDTCisSameAs<int OpNum, int OtherOp> : SDTypeConstraint<OpNum> {
  int OtherOperandNum = OtherOp;
}

// SDTCisVTSmallerThanOp - The specified operand is a VT SDNode, and its type is
// smaller than the 'Other' operand.
class SDTCisVTSmallerThanOp<int OpNum, int OtherOp> : SDTypeConstraint<OpNum> {
  int OtherOperandNum = OtherOp;
}

class SDTCisOpSmallerThanOp<int SmallOp, int BigOp> : SDTypeConstraint<SmallOp>{
  int BigOperandNum = BigOp;
}

/// SDTCisEltOfVec - This indicates that ThisOp is a scalar type of the same
/// type as the element type of OtherOp, which is a vector type.
class SDTCisEltOfVec<int ThisOp, int OtherOp>
  : SDTypeConstraint<ThisOp> {
  int OtherOpNum = OtherOp;
}

/// SDTCisSubVecOfVec - This indicates that ThisOp is a vector type
/// with length less that of OtherOp, which is a vector type.
class SDTCisSubVecOfVec<int ThisOp, int OtherOp>
  : SDTypeConstraint<ThisOp> {
  int OtherOpNum = OtherOp;
}

// SDTCVecEltisVT - The specified operand is vector type with element type
// of VT.
class SDTCVecEltisVT<int OpNum, ValueType vt> : SDTypeConstraint<OpNum> {
  ValueType VT = vt;
}

// SDTCisSameNumEltsAs - The two specified operands have identical number
// of elements.
class SDTCisSameNumEltsAs<int OpNum, int OtherOp> : SDTypeConstraint<OpNum> {
  int OtherOperandNum = OtherOp;
}

// SDTCisSameSizeAs - The two specified operands have identical size.
class SDTCisSameSizeAs<int OpNum, int OtherOp> : SDTypeConstraint<OpNum> {
  int OtherOperandNum = OtherOp;
}

//===----------------------------------------------------------------------===//
// Selection DAG Type Profile definitions.
//
// These use the constraints defined above to describe the type requirements of
// the various nodes.  These are not hard coded into tblgen, allowing targets to
// add their own if needed.
//

// SDTypeProfile - This profile describes the type requirements of a Selection
// DAG node.
class SDTypeProfile<int numresults, int numoperands,
                    list<SDTypeConstraint> constraints> {
  int NumResults = numresults;
  int NumOperands = numoperands;
  list<SDTypeConstraint> Constraints = constraints;
}

// Builtin profiles.
def SDTIntLeaf: SDTypeProfile<1, 0, [SDTCisInt<0>]>;         // for 'imm'.
def SDTFPLeaf : SDTypeProfile<1, 0, [SDTCisFP<0>]>;          // for 'fpimm'.
def SDTPtrLeaf: SDTypeProfile<1, 0, [SDTCisPtrTy<0>]>;       // for '&g'.
def SDTOther  : SDTypeProfile<1, 0, [SDTCisVT<0, OtherVT>]>; // for 'vt'.
def SDTUNDEF  : SDTypeProfile<1, 0, []>;                     // for 'undef'.
def SDTUnaryOp  : SDTypeProfile<1, 1, []>;                   // for bitconvert.

def SDTPtrAddOp : SDTypeProfile<1, 2, [     // ptradd
  SDTCisSameAs<0, 1>, SDTCisInt<2>, SDTCisPtrTy<1>
]>;
def SDTIntBinOp : SDTypeProfile<1, 2, [     // add, and, or, xor, udiv, etc.
  SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisInt<0>
]>;
def SDTIntShiftOp : SDTypeProfile<1, 2, [   // shl, sra, srl
  SDTCisSameAs<0, 1>, SDTCisInt<0>, SDTCisInt<2>
]>;
def SDTIntShiftDOp: SDTypeProfile<1, 3, [   // fshl, fshr
  SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisInt<0>, SDTCisInt<3>
]>;
def SDTIntSatNoShOp : SDTypeProfile<1, 2, [   // ssat with no shift
  SDTCisSameAs<0, 1>, SDTCisInt<2>
]>;
def SDTIntBinHiLoOp : SDTypeProfile<2, 2, [ // mulhi, mullo, sdivrem, udivrem
  SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisSameAs<0, 3>,SDTCisInt<0>
]>;
def SDTIntScaledBinOp : SDTypeProfile<1, 3, [  // smulfix, sdivfix, etc
  SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisInt<0>, SDTCisInt<3>
]>;

def SDTFPBinOp : SDTypeProfile<1, 2, [      // fadd, fmul, etc.
  SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisFP<0>
]>;
def SDTFPSignOp : SDTypeProfile<1, 2, [     // fcopysign.
  SDTCisSameAs<0, 1>, SDTCisFP<0>, SDTCisFP<2>
]>;
def SDTFPTernaryOp : SDTypeProfile<1, 3, [  // fmadd, fnmsub, etc.
  SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisSameAs<0, 3>, SDTCisFP<0>
]>;
def SDTIntUnaryOp : SDTypeProfile<1, 1, [ // bitreverse
  SDTCisSameAs<0, 1>, SDTCisInt<0>
]>;
def SDTIntBitCountUnaryOp : SDTypeProfile<1, 1, [   // ctlz, cttz
  SDTCisInt<0>, SDTCisInt<1>
]>;
def SDTIntExtendOp : SDTypeProfile<1, 1, [  // sext, zext, anyext
  SDTCisInt<0>, SDTCisInt<1>, SDTCisOpSmallerThanOp<1, 0>, SDTCisSameNumEltsAs<0, 1>
]>;
def SDTIntTruncOp  : SDTypeProfile<1, 1, [  // trunc
  SDTCisInt<0>, SDTCisInt<1>, SDTCisOpSmallerThanOp<0, 1>, SDTCisSameNumEltsAs<0, 1>
]>;
def SDTFPUnaryOp  : SDTypeProfile<1, 1, [   // fneg, fsqrt, etc
  SDTCisSameAs<0, 1>, SDTCisFP<0>
]>;
def SDTFPRoundOp  : SDTypeProfile<1, 1, [   // fpround
  SDTCisFP<0>, SDTCisFP<1>, SDTCisOpSmallerThanOp<0, 1>, SDTCisSameNumEltsAs<0, 1>
]>;
def SDTFPExtendOp  : SDTypeProfile<1, 1, [  // fpextend
  SDTCisFP<0>, SDTCisFP<1>, SDTCisOpSmallerThanOp<1, 0>, SDTCisSameNumEltsAs<0, 1>
]>;
def SDIsFPClassOp : SDTypeProfile<1, 2, [   // is_fpclass
  SDTCisInt<0>, SDTCisFP<1>, SDTCisInt<2>, SDTCisSameNumEltsAs<0, 1>
]>;
def SDTIntToFPOp : SDTypeProfile<1, 1, [    // [su]int_to_fp
  SDTCisFP<0>, SDTCisInt<1>, SDTCisSameNumEltsAs<0, 1>
]>;
def SDTFPToIntOp : SDTypeProfile<1, 1, [    // fp_to_[su]int
  SDTCisInt<0>, SDTCisFP<1>, SDTCisSameNumEltsAs<0, 1>
]>;
def SDTFPToIntSatOp : SDTypeProfile<1, 2, [    // fp_to_[su]int_sat
  SDTCisInt<0>, SDTCisFP<1>, SDTCisSameNumEltsAs<0, 1>, SDTCisVT<2, OtherVT>
]>;
def SDTFPExpOp : SDTypeProfile<1, 2, [      // ldexp
  SDTCisSameAs<0, 1>, SDTCisFP<0>, SDTCisInt<2>
]>;
def SDTExtInreg : SDTypeProfile<1, 2, [     // sext_inreg
  SDTCisSameAs<0, 1>, SDTCisInt<0>, SDTCisVT<2, OtherVT>,
  SDTCisVTSmallerThanOp<2, 1>
]>;
def SDTExtInvec : SDTypeProfile<1, 1, [     // sext_invec
  SDTCisInt<0>, SDTCisVec<0>, SDTCisInt<1>, SDTCisVec<1>,
  SDTCisOpSmallerThanOp<1, 0>
]>;
def SDTFreeze : SDTypeProfile<1, 1, [
  SDTCisSameAs<0, 1>
]>;

def SDTSetCC : SDTypeProfile<1, 3, [        // setcc
  SDTCisInt<0>, SDTCisSameAs<1, 2>, SDTCisVT<3, OtherVT>
]>;

def SDTSelect : SDTypeProfile<1, 3, [       // select
  SDTCisInt<1>, SDTCisSameAs<0, 2>, SDTCisSameAs<2, 3>
]>;

def SDTVSelect : SDTypeProfile<1, 3, [       // vselect
  SDTCisVec<0>, SDTCisInt<1>, SDTCisSameAs<0, 2>, SDTCisSameAs<2, 3>, SDTCisSameNumEltsAs<0, 1>
]>;

def SDTSelectCC : SDTypeProfile<1, 5, [     // select_cc
  SDTCisSameAs<1, 2>, SDTCisSameAs<3, 4>, SDTCisSameAs<0, 3>,
  SDTCisVT<5, OtherVT>
]>;

def SDTBr : SDTypeProfile<0, 1, [           // br
  SDTCisVT<0, OtherVT>
]>;

def SDTBrCC : SDTypeProfile<0, 4, [       // brcc
  SDTCisVT<0, OtherVT>, SDTCisSameAs<1, 2>, SDTCisVT<3, OtherVT>
]>;

def SDTBrcond : SDTypeProfile<0, 2, [       // brcond
  SDTCisInt<0>, SDTCisVT<1, OtherVT>
]>;

def SDTBrind : SDTypeProfile<0, 1, [        // brind
  SDTCisPtrTy<0>
]>;

def SDTCatchret : SDTypeProfile<0, 2, [     // catchret
  SDTCisVT<0, OtherVT>, SDTCisVT<1, OtherVT>
]>;

def SDTNone : SDTypeProfile<0, 0, []>;      // ret, trap

def SDTUBSANTrap : SDTypeProfile<0, 1, []>;      // ubsantrap

def SDTLoad : SDTypeProfile<1, 1, [         // load
  SDTCisPtrTy<1>
]>;

def SDTStore : SDTypeProfile<0, 2, [        // store
  SDTCisPtrTy<1>
]>;

def SDTIStore : SDTypeProfile<1, 3, [       // indexed store
  SDTCisSameAs<0, 2>, SDTCisPtrTy<0>, SDTCisPtrTy<3>
]>;

def SDTMaskedStore: SDTypeProfile<0, 4, [       // masked store
  SDTCisVec<0>, SDTCisPtrTy<1>, SDTCisPtrTy<2>, SDTCisVec<3>, SDTCisSameNumEltsAs<0, 3>
]>;

def SDTMaskedLoad: SDTypeProfile<1, 4, [       // masked load
  SDTCisVec<0>, SDTCisPtrTy<1>, SDTCisPtrTy<2>, SDTCisVec<3>, SDTCisSameAs<0, 4>,
  SDTCisSameNumEltsAs<0, 3>
]>;

def SDTMaskedGather : SDTypeProfile<1, 4, [
  SDTCisVec<0>, SDTCisSameAs<0, 1>, SDTCisVec<2>, SDTCisPtrTy<3>, SDTCisVec<4>,
  SDTCisSameNumEltsAs<0, 2>, SDTCisSameNumEltsAs<0, 4>
]>;

def SDTMaskedScatter : SDTypeProfile<0, 4, [
  SDTCisVec<0>, SDTCisVec<1>, SDTCisPtrTy<2>, SDTCisVec<3>,
  SDTCisSameNumEltsAs<0, 1>, SDTCisSameNumEltsAs<0, 3>
]>;

def SDTVecShuffle : SDTypeProfile<1, 2, [
  SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2>
]>;
def SDTVecSlice : SDTypeProfile<1, 3, [     // vector splice
  SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2>, SDTCisInt<3>
]>;
def SDTVecExtract : SDTypeProfile<1, 2, [   // vector extract
  SDTCisEltOfVec<0, 1>, SDTCisPtrTy<2>
]>;
def SDTVecInsert : SDTypeProfile<1, 3, [    // vector insert
  SDTCisEltOfVec<2, 1>, SDTCisSameAs<0, 1>, SDTCisPtrTy<3>
]>;
def SDTVecReduce : SDTypeProfile<1, 1, [    // vector reduction
  SDTCisInt<0>, SDTCisVec<1>
]>;
def SDTFPVecReduce : SDTypeProfile<1, 1, [  // FP vector reduction
  SDTCisFP<0>, SDTCisVec<1>
]>;

def SDTVecReverse : SDTypeProfile<1, 1, [  // vector reverse
  SDTCisVec<0>, SDTCisSameAs<0,1>
]>;

def SDTSubVecExtract : SDTypeProfile<1, 2, [// subvector extract
  SDTCisSubVecOfVec<0,1>, SDTCisInt<2>
]>;
def SDTSubVecInsert : SDTypeProfile<1, 3, [ // subvector insert
  SDTCisSubVecOfVec<2, 1>, SDTCisSameAs<0,1>, SDTCisInt<3>
]>;

def SDTPrefetch : SDTypeProfile<0, 4, [     // prefetch
  SDTCisPtrTy<0>, SDTCisSameAs<1, 2>, SDTCisSameAs<1, 3>, SDTCisInt<1>
]>;

def SDTAtomicFence : SDTypeProfile<0, 2, [
  SDTCisSameAs<0,1>, SDTCisPtrTy<0>
]>;
def SDTAtomic3 : SDTypeProfile<1, 3, [
  SDTCisSameAs<0,2>,  SDTCisSameAs<0,3>, SDTCisInt<0>, SDTCisPtrTy<1>
]>;
def SDTAtomic2 : SDTypeProfile<1, 2, [
  SDTCisSameAs<0,2>, SDTCisInt<0>, SDTCisPtrTy<1>
]>;

def SDTFPAtomic2 : SDTypeProfile<1, 2, [
  SDTCisSameAs<0,2>, SDTCisFP<0>, SDTCisPtrTy<1>
]>;

def SDTAtomicStore : SDTypeProfile<0, 2, [
  SDTCisPtrTy<0>, SDTCisInt<1>
]>;
def SDTAtomicLoad : SDTypeProfile<1, 1, [
  SDTCisInt<0>, SDTCisPtrTy<1>
]>;

class SDCallSeqStart<list<SDTypeConstraint> constraints> :
        SDTypeProfile<0, 2, constraints>;
class SDCallSeqEnd<list<SDTypeConstraint> constraints> :
        SDTypeProfile<0, 2, constraints>;

//===----------------------------------------------------------------------===//
// Selection DAG Node definitions.
//
class SDNode<string opcode, SDTypeProfile typeprof,
             list<SDNodeProperty> props = [], string sdclass = "SDNode">
             : SDPatternOperator {
  string Opcode  = opcode;
  string SDClass = sdclass;
  let Properties = props;
  SDTypeProfile TypeProfile = typeprof;
}

// Special TableGen-recognized dag nodes
def set;
def implicit;
def node;
def srcvalue;

def imm        : SDNode<"ISD::Constant"  , SDTIntLeaf , [], "ConstantSDNode">;
def timm       : SDNode<"ISD::TargetConstant",SDTIntLeaf, [], "ConstantSDNode">;
def fpimm      : SDNode<"ISD::ConstantFP", SDTFPLeaf  , [], "ConstantFPSDNode">;
def vt         : SDNode<"ISD::VALUETYPE" , SDTOther   , [], "VTSDNode">;
def bb         : SDNode<"ISD::BasicBlock", SDTOther   , [], "BasicBlockSDNode">;
def cond       : SDNode<"ISD::CONDCODE"  , SDTOther   , [], "CondCodeSDNode">;
def undef      : SDNode<"ISD::UNDEF"     , SDTUNDEF   , []>;
def vscale     : SDNode<"ISD::VSCALE"    , SDTIntUnaryOp, []>;
def globaladdr : SDNode<"ISD::GlobalAddress",         SDTPtrLeaf, [],
                        "GlobalAddressSDNode">;
def tglobaladdr : SDNode<"ISD::TargetGlobalAddress",  SDTPtrLeaf, [],
                         "GlobalAddressSDNode">;
def globaltlsaddr : SDNode<"ISD::GlobalTLSAddress",         SDTPtrLeaf, [],
                          "GlobalAddressSDNode">;
def tglobaltlsaddr : SDNode<"ISD::TargetGlobalTLSAddress",  SDTPtrLeaf, [],
                           "GlobalAddressSDNode">;
def constpool   : SDNode<"ISD::ConstantPool",         SDTPtrLeaf, [],
                         "ConstantPoolSDNode">;
def tconstpool  : SDNode<"ISD::TargetConstantPool",   SDTPtrLeaf, [],
                         "ConstantPoolSDNode">;
def jumptable   : SDNode<"ISD::JumpTable",            SDTPtrLeaf, [],
                         "JumpTableSDNode">;
def tjumptable  : SDNode<"ISD::TargetJumpTable",      SDTPtrLeaf, [],
                         "JumpTableSDNode">;
def frameindex  : SDNode<"ISD::FrameIndex",           SDTPtrLeaf, [],
                         "FrameIndexSDNode">;
def tframeindex : SDNode<"ISD::TargetFrameIndex",     SDTPtrLeaf, [],
                         "FrameIndexSDNode">;
def externalsym : SDNode<"ISD::ExternalSymbol",       SDTPtrLeaf, [],
                         "ExternalSymbolSDNode">;
def texternalsym: SDNode<"ISD::TargetExternalSymbol", SDTPtrLeaf, [],
                         "ExternalSymbolSDNode">;
def mcsym: SDNode<"ISD::MCSymbol", SDTPtrLeaf, [], "MCSymbolSDNode">;
def blockaddress : SDNode<"ISD::BlockAddress",        SDTPtrLeaf, [],
                         "BlockAddressSDNode">;
def tblockaddress: SDNode<"ISD::TargetBlockAddress",  SDTPtrLeaf, [],
                         "BlockAddressSDNode">;

def add        : SDNode<"ISD::ADD"       , SDTIntBinOp   ,
                        [SDNPCommutative, SDNPAssociative]>;
def ptradd     : SDNode<"ISD::ADD"       , SDTPtrAddOp, []>;
def sub        : SDNode<"ISD::SUB"       , SDTIntBinOp>;
def mul        : SDNode<"ISD::MUL"       , SDTIntBinOp,
                        [SDNPCommutative, SDNPAssociative]>;
def mulhs      : SDNode<"ISD::MULHS"     , SDTIntBinOp, [SDNPCommutative]>;
def mulhu      : SDNode<"ISD::MULHU"     , SDTIntBinOp, [SDNPCommutative]>;
def avgfloors  : SDNode<"ISD::AVGFLOORS" , SDTIntBinOp, [SDNPCommutative]>;
def avgflooru  : SDNode<"ISD::AVGFLOORU" , SDTIntBinOp, [SDNPCommutative]>;
def avgceils   : SDNode<"ISD::AVGCEILS"  , SDTIntBinOp, [SDNPCommutative]>;
def avgceilu   : SDNode<"ISD::AVGCEILU"  , SDTIntBinOp, [SDNPCommutative]>;
def abds       : SDNode<"ISD::ABDS"      , SDTIntBinOp, [SDNPCommutative]>;
def abdu       : SDNode<"ISD::ABDU"      , SDTIntBinOp, [SDNPCommutative]>;
def smullohi   : SDNode<"ISD::SMUL_LOHI" , SDTIntBinHiLoOp, [SDNPCommutative]>;
def umullohi   : SDNode<"ISD::UMUL_LOHI" , SDTIntBinHiLoOp, [SDNPCommutative]>;
def sdiv       : SDNode<"ISD::SDIV"      , SDTIntBinOp>;
def udiv       : SDNode<"ISD::UDIV"      , SDTIntBinOp>;
def srem       : SDNode<"ISD::SREM"      , SDTIntBinOp>;
def urem       : SDNode<"ISD::UREM"      , SDTIntBinOp>;
def sdivrem    : SDNode<"ISD::SDIVREM"   , SDTIntBinHiLoOp>;
def udivrem    : SDNode<"ISD::UDIVREM"   , SDTIntBinHiLoOp>;
def srl        : SDNode<"ISD::SRL"       , SDTIntShiftOp>;
def sra        : SDNode<"ISD::SRA"       , SDTIntShiftOp>;
def shl        : SDNode<"ISD::SHL"       , SDTIntShiftOp>;
def rotl       : SDNode<"ISD::ROTL"      , SDTIntShiftOp>;
def rotr       : SDNode<"ISD::ROTR"      , SDTIntShiftOp>;
def fshl       : SDNode<"ISD::FSHL"      , SDTIntShiftDOp>;
def fshr       : SDNode<"ISD::FSHR"      , SDTIntShiftDOp>;
def and        : SDNode<"ISD::AND"       , SDTIntBinOp,
                        [SDNPCommutative, SDNPAssociative]>;
def or         : SDNode<"ISD::OR"        , SDTIntBinOp,
                        [SDNPCommutative, SDNPAssociative]>;
def xor        : SDNode<"ISD::XOR"       , SDTIntBinOp,
                        [SDNPCommutative, SDNPAssociative]>;
def addc       : SDNode<"ISD::ADDC"      , SDTIntBinOp,
                        [SDNPCommutative, SDNPOutGlue]>;
def adde       : SDNode<"ISD::ADDE"      , SDTIntBinOp,
                        [SDNPCommutative, SDNPOutGlue, SDNPInGlue]>;
def subc       : SDNode<"ISD::SUBC"      , SDTIntBinOp,
                        [SDNPOutGlue]>;
def sube       : SDNode<"ISD::SUBE"      , SDTIntBinOp,
                        [SDNPOutGlue, SDNPInGlue]>;
def smin       : SDNode<"ISD::SMIN"      , SDTIntBinOp,
                                  [SDNPCommutative, SDNPAssociative]>;
def smax       : SDNode<"ISD::SMAX"      , SDTIntBinOp,
                                  [SDNPCommutative, SDNPAssociative]>;
def umin       : SDNode<"ISD::UMIN"      , SDTIntBinOp,
                                  [SDNPCommutative, SDNPAssociative]>;
def umax       : SDNode<"ISD::UMAX"      , SDTIntBinOp,
                                  [SDNPCommutative, SDNPAssociative]>;

def saddsat    : SDNode<"ISD::SADDSAT"   , SDTIntBinOp, [SDNPCommutative]>;
def uaddsat    : SDNode<"ISD::UADDSAT"   , SDTIntBinOp, [SDNPCommutative]>;
def ssubsat    : SDNode<"ISD::SSUBSAT"   , SDTIntBinOp>;
def usubsat    : SDNode<"ISD::USUBSAT"   , SDTIntBinOp>;
def sshlsat    : SDNode<"ISD::SSHLSAT"   , SDTIntBinOp>;
def ushlsat    : SDNode<"ISD::USHLSAT"   , SDTIntBinOp>;

def smulfix    : SDNode<"ISD::SMULFIX"   , SDTIntScaledBinOp, [SDNPCommutative]>;
def smulfixsat : SDNode<"ISD::SMULFIXSAT", SDTIntScaledBinOp, [SDNPCommutative]>;
def umulfix    : SDNode<"ISD::UMULFIX"   , SDTIntScaledBinOp, [SDNPCommutative]>;
def umulfixsat : SDNode<"ISD::UMULFIXSAT", SDTIntScaledBinOp, [SDNPCommutative]>;
def sdivfix    : SDNode<"ISD::SDIVFIX"   , SDTIntScaledBinOp>;
def sdivfixsat : SDNode<"ISD::SDIVFIXSAT", SDTIntScaledBinOp>;
def udivfix    : SDNode<"ISD::UDIVFIX"   , SDTIntScaledBinOp>;
def udivfixsat : SDNode<"ISD::UDIVFIXSAT", SDTIntScaledBinOp>;

def sext_inreg : SDNode<"ISD::SIGN_EXTEND_INREG", SDTExtInreg>;
def sext_invec : SDNode<"ISD::SIGN_EXTEND_VECTOR_INREG", SDTExtInvec>;
def zext_invec : SDNode<"ISD::ZERO_EXTEND_VECTOR_INREG", SDTExtInvec>;

def abs        : SDNode<"ISD::ABS"        , SDTIntUnaryOp>;
def bitreverse : SDNode<"ISD::BITREVERSE" , SDTIntUnaryOp>;
def bswap      : SDNode<"ISD::BSWAP"      , SDTIntUnaryOp>;
def ctlz       : SDNode<"ISD::CTLZ"       , SDTIntBitCountUnaryOp>;
def cttz       : SDNode<"ISD::CTTZ"       , SDTIntBitCountUnaryOp>;
def ctpop      : SDNode<"ISD::CTPOP"      , SDTIntBitCountUnaryOp>;
def ctlz_zero_undef : SDNode<"ISD::CTLZ_ZERO_UNDEF", SDTIntBitCountUnaryOp>;
def cttz_zero_undef : SDNode<"ISD::CTTZ_ZERO_UNDEF", SDTIntBitCountUnaryOp>;
def sext       : SDNode<"ISD::SIGN_EXTEND", SDTIntExtendOp>;
def zext       : SDNode<"ISD::ZERO_EXTEND", SDTIntExtendOp>;
def anyext     : SDNode<"ISD::ANY_EXTEND" , SDTIntExtendOp>;
def trunc      : SDNode<"ISD::TRUNCATE"   , SDTIntTruncOp>;
def bitconvert : SDNode<"ISD::BITCAST"    , SDTUnaryOp>;
def addrspacecast : SDNode<"ISD::ADDRSPACECAST", SDTUnaryOp>;
def freeze     : SDNode<"ISD::FREEZE"     , SDTFreeze>;
def extractelt : SDNode<"ISD::EXTRACT_VECTOR_ELT", SDTVecExtract>;
def insertelt  : SDNode<"ISD::INSERT_VECTOR_ELT", SDTVecInsert>;

def vecreduce_add  : SDNode<"ISD::VECREDUCE_ADD", SDTVecReduce>;
def vecreduce_smax  : SDNode<"ISD::VECREDUCE_SMAX", SDTVecReduce>;
def vecreduce_umax  : SDNode<"ISD::VECREDUCE_UMAX", SDTVecReduce>;
def vecreduce_smin  : SDNode<"ISD::VECREDUCE_SMIN", SDTVecReduce>;
def vecreduce_umin  : SDNode<"ISD::VECREDUCE_UMIN", SDTVecReduce>;
def vecreduce_fadd  : SDNode<"ISD::VECREDUCE_FADD", SDTFPVecReduce>;
def vecreduce_fmin  : SDNode<"ISD::VECREDUCE_FMIN", SDTFPVecReduce>;
def vecreduce_fmax  : SDNode<"ISD::VECREDUCE_FMAX", SDTFPVecReduce>;
def vecreduce_fminimum : SDNode<"ISD::VECREDUCE_FMINIMUM", SDTFPVecReduce>;
def vecreduce_fmaximum : SDNode<"ISD::VECREDUCE_FMAXIMUM", SDTFPVecReduce>;

def fadd       : SDNode<"ISD::FADD"       , SDTFPBinOp, [SDNPCommutative]>;
def fsub       : SDNode<"ISD::FSUB"       , SDTFPBinOp>;
def fmul       : SDNode<"ISD::FMUL"       , SDTFPBinOp, [SDNPCommutative]>;
def fdiv       : SDNode<"ISD::FDIV"       , SDTFPBinOp>;
def frem       : SDNode<"ISD::FREM"       , SDTFPBinOp>;
def fma        : SDNode<"ISD::FMA"        , SDTFPTernaryOp, [SDNPCommutative]>;
def fmad       : SDNode<"ISD::FMAD"       , SDTFPTernaryOp, [SDNPCommutative]>;
def fabs       : SDNode<"ISD::FABS"       , SDTFPUnaryOp>;
def fminnum    : SDNode<"ISD::FMINNUM"    , SDTFPBinOp,
                                  [SDNPCommutative, SDNPAssociative]>;
def fmaxnum    : SDNode<"ISD::FMAXNUM"    , SDTFPBinOp,
                                  [SDNPCommutative, SDNPAssociative]>;
def fminnum_ieee : SDNode<"ISD::FMINNUM_IEEE", SDTFPBinOp,
                          [SDNPCommutative]>;
def fmaxnum_ieee  : SDNode<"ISD::FMAXNUM_IEEE", SDTFPBinOp,
                           [SDNPCommutative]>;
def fminimum   : SDNode<"ISD::FMINIMUM"   , SDTFPBinOp,
                        [SDNPCommutative, SDNPAssociative]>;
def fmaximum   : SDNode<"ISD::FMAXIMUM"   , SDTFPBinOp,
                        [SDNPCommutative, SDNPAssociative]>;
def fgetsign   : SDNode<"ISD::FGETSIGN"   , SDTFPToIntOp>;
def fcanonicalize : SDNode<"ISD::FCANONICALIZE", SDTFPUnaryOp>;
def fneg       : SDNode<"ISD::FNEG"       , SDTFPUnaryOp>;
def fsqrt      : SDNode<"ISD::FSQRT"      , SDTFPUnaryOp>;
def fsin       : SDNode<"ISD::FSIN"       , SDTFPUnaryOp>;
def fcos       : SDNode<"ISD::FCOS"       , SDTFPUnaryOp>;
def fexp2      : SDNode<"ISD::FEXP2"      , SDTFPUnaryOp>;
def fpow       : SDNode<"ISD::FPOW"       , SDTFPBinOp>;
def flog2      : SDNode<"ISD::FLOG2"      , SDTFPUnaryOp>;
def fldexp     : SDNode<"ISD::FLDEXP"     , SDTFPExpOp>;
def frint      : SDNode<"ISD::FRINT"      , SDTFPUnaryOp>;
def ftrunc     : SDNode<"ISD::FTRUNC"     , SDTFPUnaryOp>;
def fceil      : SDNode<"ISD::FCEIL"      , SDTFPUnaryOp>;
def ffloor     : SDNode<"ISD::FFLOOR"     , SDTFPUnaryOp>;
def fnearbyint : SDNode<"ISD::FNEARBYINT" , SDTFPUnaryOp>;
def fround     : SDNode<"ISD::FROUND"     , SDTFPUnaryOp>;
def froundeven : SDNode<"ISD::FROUNDEVEN" , SDTFPUnaryOp>;

def lround     : SDNode<"ISD::LROUND"     , SDTFPToIntOp>;
def llround    : SDNode<"ISD::LLROUND"    , SDTFPToIntOp>;
def lrint      : SDNode<"ISD::LRINT"      , SDTFPToIntOp>;
def llrint     : SDNode<"ISD::LLRINT"     , SDTFPToIntOp>;

def fpround    : SDNode<"ISD::FP_ROUND"   , SDTFPRoundOp>;
def fpextend   : SDNode<"ISD::FP_EXTEND"  , SDTFPExtendOp>;
def fcopysign  : SDNode<"ISD::FCOPYSIGN"  , SDTFPSignOp>;

def is_fpclass : SDNode<"ISD::IS_FPCLASS" , SDIsFPClassOp>;

def sint_to_fp : SDNode<"ISD::SINT_TO_FP" , SDTIntToFPOp>;
def uint_to_fp : SDNode<"ISD::UINT_TO_FP" , SDTIntToFPOp>;
def fp_to_sint : SDNode<"ISD::FP_TO_SINT" , SDTFPToIntOp>;
def fp_to_uint : SDNode<"ISD::FP_TO_UINT" , SDTFPToIntOp>;
def fp_to_sint_sat : SDNode<"ISD::FP_TO_SINT_SAT" , SDTFPToIntSatOp>;
def fp_to_uint_sat : SDNode<"ISD::FP_TO_UINT_SAT" , SDTFPToIntSatOp>;
def f16_to_fp  : SDNode<"ISD::FP16_TO_FP" , SDTIntToFPOp>;
def fp_to_f16  : SDNode<"ISD::FP_TO_FP16" , SDTFPToIntOp>;

def strict_fadd       : SDNode<"ISD::STRICT_FADD",
                               SDTFPBinOp, [SDNPHasChain, SDNPCommutative]>;
def strict_fsub       : SDNode<"ISD::STRICT_FSUB",
                               SDTFPBinOp, [SDNPHasChain]>;
def strict_fmul       : SDNode<"ISD::STRICT_FMUL",
                               SDTFPBinOp, [SDNPHasChain, SDNPCommutative]>;
def strict_fdiv       : SDNode<"ISD::STRICT_FDIV",
                               SDTFPBinOp, [SDNPHasChain]>;
def strict_frem       : SDNode<"ISD::STRICT_FREM",
                               SDTFPBinOp, [SDNPHasChain]>;
def strict_fma        : SDNode<"ISD::STRICT_FMA",
                               SDTFPTernaryOp, [SDNPHasChain, SDNPCommutative]>;
def strict_fsqrt      : SDNode<"ISD::STRICT_FSQRT",
                               SDTFPUnaryOp, [SDNPHasChain]>;
def strict_fsin       : SDNode<"ISD::STRICT_FSIN",
                               SDTFPUnaryOp, [SDNPHasChain]>;
def strict_fcos       : SDNode<"ISD::STRICT_FCOS",
                               SDTFPUnaryOp, [SDNPHasChain]>;
def strict_fexp2      : SDNode<"ISD::STRICT_FEXP2",
                               SDTFPUnaryOp, [SDNPHasChain]>;
def strict_fpow       : SDNode<"ISD::STRICT_FPOW",
                               SDTFPBinOp, [SDNPHasChain]>;
def strict_fldexp       : SDNode<"ISD::STRICT_FLDEXP",
                               SDTFPExpOp, [SDNPHasChain]>;
def strict_flog2      : SDNode<"ISD::STRICT_FLOG2",
                               SDTFPUnaryOp, [SDNPHasChain]>;
def strict_frint      : SDNode<"ISD::STRICT_FRINT",
                               SDTFPUnaryOp, [SDNPHasChain]>;
def strict_lrint      : SDNode<"ISD::STRICT_LRINT",
                               SDTFPToIntOp, [SDNPHasChain]>;
def strict_llrint     : SDNode<"ISD::STRICT_LLRINT",
                               SDTFPToIntOp, [SDNPHasChain]>;
def strict_fnearbyint : SDNode<"ISD::STRICT_FNEARBYINT",
                               SDTFPUnaryOp, [SDNPHasChain]>;
def strict_fceil      : SDNode<"ISD::STRICT_FCEIL",
                               SDTFPUnaryOp, [SDNPHasChain]>;
def strict_ffloor     : SDNode<"ISD::STRICT_FFLOOR",
                               SDTFPUnaryOp, [SDNPHasChain]>;
def strict_lround     : SDNode<"ISD::STRICT_LROUND",
                               SDTFPToIntOp, [SDNPHasChain]>;
def strict_llround    : SDNode<"ISD::STRICT_LLROUND",
                               SDTFPToIntOp, [SDNPHasChain]>;
def strict_fround     : SDNode<"ISD::STRICT_FROUND",
                               SDTFPUnaryOp, [SDNPHasChain]>;
def strict_froundeven : SDNode<"ISD::STRICT_FROUNDEVEN",
                               SDTFPUnaryOp, [SDNPHasChain]>;
def strict_ftrunc     : SDNode<"ISD::STRICT_FTRUNC",
                               SDTFPUnaryOp, [SDNPHasChain]>;
def strict_fminnum    : SDNode<"ISD::STRICT_FMINNUM",
                               SDTFPBinOp, [SDNPHasChain,
                                            SDNPCommutative, SDNPAssociative]>;
def strict_fmaxnum    : SDNode<"ISD::STRICT_FMAXNUM",
                               SDTFPBinOp, [SDNPHasChain,
                                            SDNPCommutative, SDNPAssociative]>;
def strict_fminimum   : SDNode<"ISD::STRICT_FMINIMUM",
                               SDTFPBinOp, [SDNPHasChain,
                                            SDNPCommutative, SDNPAssociative]>;
def strict_fmaximum   : SDNode<"ISD::STRICT_FMAXIMUM",
                               SDTFPBinOp, [SDNPHasChain,
                                            SDNPCommutative, SDNPAssociative]>;
def strict_fpround    : SDNode<"ISD::STRICT_FP_ROUND",
                               SDTFPRoundOp, [SDNPHasChain]>;
def strict_fpextend   : SDNode<"ISD::STRICT_FP_EXTEND",
                               SDTFPExtendOp, [SDNPHasChain]>;
def strict_fp_to_sint : SDNode<"ISD::STRICT_FP_TO_SINT",
                               SDTFPToIntOp, [SDNPHasChain]>;
def strict_fp_to_uint : SDNode<"ISD::STRICT_FP_TO_UINT",
                               SDTFPToIntOp, [SDNPHasChain]>;
def strict_sint_to_fp : SDNode<"ISD::STRICT_SINT_TO_FP",
                               SDTIntToFPOp, [SDNPHasChain]>;
def strict_uint_to_fp : SDNode<"ISD::STRICT_UINT_TO_FP",
                               SDTIntToFPOp, [SDNPHasChain]>;
def strict_fsetcc  : SDNode<"ISD::STRICT_FSETCC",  SDTSetCC, [SDNPHasChain]>;
def strict_fsetccs : SDNode<"ISD::STRICT_FSETCCS", SDTSetCC, [SDNPHasChain]>;

def setcc      : SDNode<"ISD::SETCC"      , SDTSetCC>;
def select     : SDNode<"ISD::SELECT"     , SDTSelect>;
def vselect    : SDNode<"ISD::VSELECT"    , SDTVSelect>;
def selectcc   : SDNode<"ISD::SELECT_CC"  , SDTSelectCC>;

def brcc       : SDNode<"ISD::BR_CC"      , SDTBrCC,   [SDNPHasChain]>;
def brcond     : SDNode<"ISD::BRCOND"     , SDTBrcond, [SDNPHasChain]>;
def brind      : SDNode<"ISD::BRIND"      , SDTBrind,  [SDNPHasChain]>;
def br         : SDNode<"ISD::BR"         , SDTBr,     [SDNPHasChain]>;
def catchret   : SDNode<"ISD::CATCHRET"   , SDTCatchret,
                        [SDNPHasChain, SDNPSideEffect]>;
def cleanupret : SDNode<"ISD::CLEANUPRET" , SDTNone,   [SDNPHasChain]>;

def trap       : SDNode<"ISD::TRAP"       , SDTNone,
                        [SDNPHasChain, SDNPSideEffect]>;
def debugtrap  : SDNode<"ISD::DEBUGTRAP"  , SDTNone,
                        [SDNPHasChain, SDNPSideEffect]>;
def ubsantrap  : SDNode<"ISD::UBSANTRAP"  , SDTUBSANTrap,
                        [SDNPHasChain, SDNPSideEffect]>;

def prefetch   : SDNode<"ISD::PREFETCH"   , SDTPrefetch,
                        [SDNPHasChain, SDNPMayLoad, SDNPMayStore,
                         SDNPMemOperand]>;

def readcyclecounter : SDNode<"ISD::READCYCLECOUNTER", SDTIntLeaf,
                     [SDNPHasChain, SDNPSideEffect]>;

def membarrier : SDNode<"ISD::MEMBARRIER", SDTNone,
                        [SDNPHasChain, SDNPSideEffect]>;

def atomic_fence : SDNode<"ISD::ATOMIC_FENCE" , SDTAtomicFence,
                          [SDNPHasChain, SDNPSideEffect]>;

def atomic_cmp_swap : SDNode<"ISD::ATOMIC_CMP_SWAP" , SDTAtomic3,
                    [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
def atomic_load_add : SDNode<"ISD::ATOMIC_LOAD_ADD" , SDTAtomic2,
                    [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
def atomic_swap     : SDNode<"ISD::ATOMIC_SWAP", SDTAtomic2,
                    [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
def atomic_load_sub : SDNode<"ISD::ATOMIC_LOAD_SUB" , SDTAtomic2,
                    [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
def atomic_load_and : SDNode<"ISD::ATOMIC_LOAD_AND" , SDTAtomic2,
                    [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
def atomic_load_clr : SDNode<"ISD::ATOMIC_LOAD_CLR" , SDTAtomic2,
                    [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
def atomic_load_or  : SDNode<"ISD::ATOMIC_LOAD_OR" , SDTAtomic2,
                    [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
def atomic_load_xor : SDNode<"ISD::ATOMIC_LOAD_XOR" , SDTAtomic2,
                    [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
def atomic_load_nand: SDNode<"ISD::ATOMIC_LOAD_NAND", SDTAtomic2,
                    [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
def atomic_load_min : SDNode<"ISD::ATOMIC_LOAD_MIN", SDTAtomic2,
                    [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
def atomic_load_max : SDNode<"ISD::ATOMIC_LOAD_MAX", SDTAtomic2,
                    [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
def atomic_load_umin : SDNode<"ISD::ATOMIC_LOAD_UMIN", SDTAtomic2,
                    [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
def atomic_load_umax : SDNode<"ISD::ATOMIC_LOAD_UMAX", SDTAtomic2,
                    [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
def atomic_load_fadd : SDNode<"ISD::ATOMIC_LOAD_FADD" , SDTFPAtomic2,
                    [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
def atomic_load_fsub : SDNode<"ISD::ATOMIC_LOAD_FSUB" , SDTFPAtomic2,
                    [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
def atomic_load_fmax : SDNode<"ISD::ATOMIC_LOAD_FMAX", SDTFPAtomic2,
                    [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
def atomic_load_fmin : SDNode<"ISD::ATOMIC_LOAD_FMIN", SDTFPAtomic2,
                    [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
def atomic_load_uinc_wrap : SDNode<"ISD::ATOMIC_LOAD_UINC_WRAP", SDTAtomic2,
                    [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
def atomic_load_udec_wrap : SDNode<"ISD::ATOMIC_LOAD_UDEC_WRAP", SDTAtomic2,
                    [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;

def atomic_load      : SDNode<"ISD::ATOMIC_LOAD", SDTAtomicLoad,
                    [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
def atomic_store     : SDNode<"ISD::ATOMIC_STORE", SDTAtomicStore,
                    [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;

def masked_st    : SDNode<"ISD::MSTORE",  SDTMaskedStore,
                       [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
def masked_ld    : SDNode<"ISD::MLOAD",  SDTMaskedLoad,
                       [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;

def masked_gather : SDNode<"ISD::MGATHER", SDTMaskedGather,
                           [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;

def masked_scatter : SDNode<"ISD::MSCATTER", SDTMaskedScatter,
                            [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;

// Do not use ld, st directly. Use load, extload, sextload, zextload, store,
// and truncst (see below).
def ld         : SDNode<"ISD::LOAD"       , SDTLoad,
                        [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
def st         : SDNode<"ISD::STORE"      , SDTStore,
                        [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
def ist        : SDNode<"ISD::STORE"      , SDTIStore,
                        [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;

def vector_shuffle : SDNode<"ISD::VECTOR_SHUFFLE", SDTVecShuffle, []>;
def vector_reverse : SDNode<"ISD::VECTOR_REVERSE", SDTVecReverse>;
def vector_splice : SDNode<"ISD::VECTOR_SPLICE", SDTVecSlice, []>;
def build_vector : SDNode<"ISD::BUILD_VECTOR", SDTypeProfile<1, -1, []>, []>;
def splat_vector : SDNode<"ISD::SPLAT_VECTOR", SDTypeProfile<1, 1, []>, []>;
def step_vector : SDNode<"ISD::STEP_VECTOR", SDTypeProfile<1, 1,
                       [SDTCisVec<0>, SDTCisInt<1>]>, []>;
def scalar_to_vector : SDNode<"ISD::SCALAR_TO_VECTOR", SDTypeProfile<1, 1, []>,
                              []>;

// vector_extract/vector_insert are deprecated. extractelt/insertelt
// are preferred.
def vector_extract : SDNode<"ISD::EXTRACT_VECTOR_ELT",
    SDTypeProfile<1, 2, [SDTCisPtrTy<2>]>, []>;
def vector_insert : SDNode<"ISD::INSERT_VECTOR_ELT",
    SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDTCisPtrTy<3>]>, []>;
def concat_vectors : SDNode<"ISD::CONCAT_VECTORS",
    SDTypeProfile<1, 2, [SDTCisSubVecOfVec<1, 0>, SDTCisSameAs<1, 2>]>,[]>;

// This operator does not do subvector type checking.  The ARM
// backend, at least, needs it.
def vector_extract_subvec : SDNode<"ISD::EXTRACT_SUBVECTOR",
    SDTypeProfile<1, 2, [SDTCisInt<2>, SDTCisVec<1>, SDTCisVec<0>]>,
    []>;
def vector_insert_subvec : SDNode<"ISD::INSERT_SUBVECTOR",
    SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0, 1>, SDTCisVec<2>, SDTCisInt<3>]>,
    []>;

// This operator does subvector type checking.
def extract_subvector : SDNode<"ISD::EXTRACT_SUBVECTOR", SDTSubVecExtract, []>;
def insert_subvector : SDNode<"ISD::INSERT_SUBVECTOR", SDTSubVecInsert, []>;

// Nodes for intrinsics, you should use the intrinsic itself and let tblgen use
// these internally.  Don't reference these directly.
def intrinsic_void : SDNode<"ISD::INTRINSIC_VOID",
                            SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>,
                            [SDNPHasChain]>;
def intrinsic_w_chain : SDNode<"ISD::INTRINSIC_W_CHAIN",
                               SDTypeProfile<1, -1, [SDTCisPtrTy<1>]>,
                               [SDNPHasChain]>;
def intrinsic_wo_chain : SDNode<"ISD::INTRINSIC_WO_CHAIN",
                                SDTypeProfile<1, -1, [SDTCisPtrTy<1>]>, []>;

def SDT_assert : SDTypeProfile<1, 1,
  [SDTCisInt<0>, SDTCisInt<1>, SDTCisSameAs<1, 0>]>;
def assertsext : SDNode<"ISD::AssertSext", SDT_assert>;
def assertzext : SDNode<"ISD::AssertZext", SDT_assert>;
def assertalign : SDNode<"ISD::AssertAlign", SDT_assert>;

//===----------------------------------------------------------------------===//
// Selection DAG Condition Codes

class CondCode<string fcmpName = "", string icmpName = ""> {
  string ICmpPredicate = icmpName;
  string FCmpPredicate = fcmpName;
}

// ISD::CondCode enums, and mapping to CmpInst::Predicate names
def SETOEQ : CondCode<"FCMP_OEQ">;
def SETOGT : CondCode<"FCMP_OGT">;
def SETOGE : CondCode<"FCMP_OGE">;
def SETOLT : CondCode<"FCMP_OLT">;
def SETOLE : CondCode<"FCMP_OLE">;
def SETONE : CondCode<"FCMP_ONE">;
def SETO   : CondCode<"FCMP_ORD">;
def SETUO  : CondCode<"FCMP_UNO">;
def SETUEQ : CondCode<"FCMP_UEQ">;
def SETUGT : CondCode<"FCMP_UGT", "ICMP_UGT">;
def SETUGE : CondCode<"FCMP_UGE", "ICMP_UGE">;
def SETULT : CondCode<"FCMP_ULT", "ICMP_ULT">;
def SETULE : CondCode<"FCMP_ULE", "ICMP_ULE">;
def SETUNE : CondCode<"FCMP_UNE">;
def SETEQ : CondCode<"", "ICMP_EQ">;
def SETGT : CondCode<"", "ICMP_SGT">;
def SETGE : CondCode<"", "ICMP_SGE">;
def SETLT : CondCode<"", "ICMP_SLT">;
def SETLE : CondCode<"", "ICMP_SLE">;
def SETNE : CondCode<"", "ICMP_NE">;

//===----------------------------------------------------------------------===//
// Selection DAG Node Transformation Functions.
//
// This mechanism allows targets to manipulate nodes in the output DAG once a
// match has been formed.  This is typically used to manipulate immediate
// values.
//
class SDNodeXForm<SDNode opc, code xformFunction> {
  SDNode Opcode = opc;
  code XFormFunction = xformFunction;
}

def NOOP_SDNodeXForm : SDNodeXForm<imm, [{}]>;

//===----------------------------------------------------------------------===//
// Selection DAG Pattern Fragments.
//
// Pattern fragments are reusable chunks of dags that match specific things.
// They can take arguments and have C++ predicates that control whether they
// match.  They are intended to make the patterns for common instructions more
// compact and readable.
//

/// PatFrags - Represents a set of pattern fragments.  Each single fragment
/// can match something on the DAG, from a single node to multiple nested other
/// fragments.   The whole set of fragments matches if any of the single
/// fragments match.  This allows e.g. matching and "add with overflow" and
/// a regular "add" with the same fragment set.
///
class PatFrags<dag ops, list<dag> frags, code pred = [{}],
               SDNodeXForm xform = NOOP_SDNodeXForm> : SDPatternOperator {
  dag Operands = ops;
  list<dag> Fragments = frags;
  code PredicateCode = pred;
  code GISelPredicateCode = [{}];
  code ImmediateCode = [{}];
  SDNodeXForm OperandTransform = xform;

  // When this is set, the PredicateCode may refer to a constant Operands
  // vector which contains the captured nodes of the DAG, in the order listed
  // by the Operands field above.
  //
  // This is useful when Fragments involves associative / commutative
  // operators: a single piece of code can easily refer to all operands even
  // when re-associated / commuted variants of the fragment are matched.
  bit PredicateCodeUsesOperands = false;

  // Define a few pre-packaged predicates. This helps GlobalISel import
  // existing rules from SelectionDAG for many common cases.
  // They will be tested prior to the code in pred and must not be used in
  // ImmLeaf and its subclasses.

  // If set to true, a predicate is added that checks for the absence of use of
  // the first result.
  bit HasNoUse = ?;

  // Is the desired pre-packaged predicate for a load?
  bit IsLoad = ?;
  // Is the desired pre-packaged predicate for a store?
  bit IsStore = ?;
  // Is the desired pre-packaged predicate for an atomic?
  bit IsAtomic = ?;

  // cast<LoadSDNode>(N)->getAddressingMode() == ISD::UNINDEXED;
  // cast<StoreSDNode>(N)->getAddressingMode() == ISD::UNINDEXED;
  bit IsUnindexed = ?;

  // cast<LoadSDNode>(N)->getExtensionType() != ISD::NON_EXTLOAD
  bit IsNonExtLoad = ?;
  // cast<LoadSDNode>(N)->getExtensionType() == ISD::EXTLOAD;
  bit IsAnyExtLoad = ?;
  // cast<LoadSDNode>(N)->getExtensionType() == ISD::SEXTLOAD;
  bit IsSignExtLoad = ?;
  // cast<LoadSDNode>(N)->getExtensionType() == ISD::ZEXTLOAD;
  bit IsZeroExtLoad = ?;
  // !cast<StoreSDNode>(N)->isTruncatingStore();
  // cast<StoreSDNode>(N)->isTruncatingStore();
  bit IsTruncStore = ?;

  // cast<MemSDNode>(N)->getAddressSpace() ==
  // If this empty, accept any address space.
  list<int> AddressSpaces = ?;

  // cast<MemSDNode>(N)->getAlign() >=
  // If this is empty, accept any alignment.
  int MinAlignment = ?;

  // cast<AtomicSDNode>(N)->getOrdering() == AtomicOrdering::Monotonic
  bit IsAtomicOrderingMonotonic = ?;
  // cast<AtomicSDNode>(N)->getOrdering() == AtomicOrdering::Acquire
  bit IsAtomicOrderingAcquire = ?;
  // cast<AtomicSDNode>(N)->getOrdering() == AtomicOrdering::Release
  bit IsAtomicOrderingRelease = ?;
  // cast<AtomicSDNode>(N)->getOrdering() == AtomicOrdering::AcquireRelease
  bit IsAtomicOrderingAcquireRelease = ?;
  // cast<AtomicSDNode>(N)->getOrdering() == AtomicOrdering::SequentiallyConsistent
  bit IsAtomicOrderingSequentiallyConsistent = ?;

  // isAcquireOrStronger(cast<AtomicSDNode>(N)->getOrdering())
  // !isAcquireOrStronger(cast<AtomicSDNode>(N)->getOrdering())
  bit IsAtomicOrderingAcquireOrStronger = ?;

  // isReleaseOrStronger(cast<AtomicSDNode>(N)->getOrdering())
  // !isReleaseOrStronger(cast<AtomicSDNode>(N)->getOrdering())
  bit IsAtomicOrderingReleaseOrStronger = ?;

  // cast<LoadSDNode>(N)->getMemoryVT() == MVT::<VT>;
  // cast<StoreSDNode>(N)->getMemoryVT() == MVT::<VT>;
  ValueType MemoryVT = ?;
  // cast<LoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::<VT>;
  // cast<StoreSDNode>(N)->getMemoryVT().getScalarType() == MVT::<VT>;
  ValueType ScalarMemoryVT = ?;
}

// Patterns and PatFrags can also subclass GISelFlags to set flags that affect
// how GlobalISel behaves when matching them.
class GISelFlags {
  bit GIIgnoreCopies = ?;
}

// PatFrag - A version of PatFrags matching only a single fragment.
class PatFrag<dag ops, dag frag, code pred = [{}],
              SDNodeXForm xform = NOOP_SDNodeXForm>
  : PatFrags<ops, [frag], pred, xform>;

// OutPatFrag is a pattern fragment that is used as part of an output pattern
// (not an input pattern). These do not have predicates or transforms, but are
// used to avoid repeated subexpressions in output patterns.
class OutPatFrag<dag ops, dag frag>
 : PatFrag<ops, frag, [{}], NOOP_SDNodeXForm>;

// PatLeaf's are pattern fragments that have no operands.  This is just a helper
// to define immediates and other common things concisely.
class PatLeaf<dag frag, code pred = [{}], SDNodeXForm xform = NOOP_SDNodeXForm>
 : PatFrag<(ops), frag, pred, xform>;


// ImmLeaf is a pattern fragment with a constraint on the immediate.  The
// constraint is a function that is run on the immediate (always with the value
// sign extended out to an int64_t) as Imm.  For example:
//
//  def immSExt8 : ImmLeaf<i16, [{ return (char)Imm == Imm; }]>;
//
// this is a more convenient form to match 'imm' nodes in than PatLeaf and also
// is preferred over using PatLeaf because it allows the code generator to
// reason more about the constraint.
//
// If FastIsel should ignore all instructions that have an operand of this type,
// the FastIselShouldIgnore flag can be set.  This is an optimization to reduce
// the code size of the generated fast instruction selector.
class ImmLeaf<ValueType vt, code pred, SDNodeXForm xform = NOOP_SDNodeXForm,
              SDNode ImmNode = imm>
  : PatFrag<(ops), (vt ImmNode), [{}], xform> {
  let ImmediateCode = pred;
  bit FastIselShouldIgnore = false;

  // Is the data type of the immediate an APInt?
  bit IsAPInt = false;

  // Is the data type of the immediate an APFloat?
  bit IsAPFloat = false;
}

// Convenience wrapper for ImmLeaf to use timm/TargetConstant instead
// of imm/Constant.
class TImmLeaf<ValueType vt, code pred, SDNodeXForm xform = NOOP_SDNodeXForm,
  SDNode ImmNode = timm> : ImmLeaf<vt, pred, xform, ImmNode>;

// An ImmLeaf except that Imm is an APInt. This is useful when you need to
// zero-extend the immediate instead of sign-extend it.
//
// Note that FastISel does not currently understand IntImmLeaf and will not
// generate code for rules that make use of it. As such, it does not make sense
// to replace ImmLeaf with IntImmLeaf. However, replacing PatLeaf with an
// IntImmLeaf will allow GlobalISel to import the rule.
class IntImmLeaf<ValueType vt, code pred, SDNodeXForm xform = NOOP_SDNodeXForm>
    : ImmLeaf<vt, pred, xform> {
  let IsAPInt = true;
  let FastIselShouldIgnore = true;
}

// An ImmLeaf except that Imm is an APFloat.
//
// Note that FastISel does not currently understand FPImmLeaf and will not
// generate code for rules that make use of it.
class FPImmLeaf<ValueType vt, code pred, SDNodeXForm xform = NOOP_SDNodeXForm>
  : ImmLeaf<vt, pred, xform, fpimm> {
  let IsAPFloat = true;
  let FastIselShouldIgnore = true;
}

// Leaf fragments.

def vtInt      : PatLeaf<(vt),  [{ return N->getVT().isInteger(); }]>;
def vtFP       : PatLeaf<(vt),  [{ return N->getVT().isFloatingPoint(); }]>;

// Use ISD::isConstantSplatVectorAllOnes or ISD::isConstantSplatVectorAllZeros
// to look for the corresponding build_vector or splat_vector. Will look through
// bitcasts and check for either opcode, except when used as a pattern root.
// When used as a pattern root, only fixed-length build_vector and scalable
// splat_vector are supported.
def immAllOnesV  : SDPatternOperator; // ISD::isConstantSplatVectorAllOnes
def immAllZerosV : SDPatternOperator; // ISD::isConstantSplatVectorAllZeros

// Other helper fragments.
def not  : PatFrag<(ops node:$in), (xor node:$in, -1)>;
def vnot : PatFrag<(ops node:$in), (xor node:$in, immAllOnesV)>;
def ineg : PatFrag<(ops node:$in), (sub 0, node:$in)>;

def zanyext : PatFrags<(ops node:$op),
                       [(zext node:$op),
                        (anyext node:$op)]>;

// null_frag - The null pattern operator is used in multiclass instantiations
// which accept an SDPatternOperator for use in matching patterns for internal
// definitions. When expanding a pattern, if the null fragment is referenced
// in the expansion, the pattern is discarded and it is as-if '[]' had been
// specified. This allows multiclasses to have the isel patterns be optional.
def null_frag : SDPatternOperator;

// load fragments.
def unindexedload : PatFrag<(ops node:$ptr), (ld node:$ptr)> {
  let IsLoad = true;
  let IsUnindexed = true;
}
def load : PatFrag<(ops node:$ptr), (unindexedload node:$ptr)> {
  let IsLoad = true;
  let IsNonExtLoad = true;
}

// extending load fragments.
def extload   : PatFrag<(ops node:$ptr), (unindexedload node:$ptr)> {
  let IsLoad = true;
  let IsAnyExtLoad = true;
}
def sextload  : PatFrag<(ops node:$ptr), (unindexedload node:$ptr)> {
  let IsLoad = true;
  let IsSignExtLoad = true;
}
def zextload  : PatFrag<(ops node:$ptr), (unindexedload node:$ptr)> {
  let IsLoad = true;
  let IsZeroExtLoad = true;
}

def extloadi1  : PatFrag<(ops node:$ptr), (extload node:$ptr)> {
  let IsLoad = true;
  let MemoryVT = i1;
}
def extloadi8  : PatFrag<(ops node:$ptr), (extload node:$ptr)> {
  let IsLoad = true;
  let MemoryVT = i8;
}
def extloadi16 : PatFrag<(ops node:$ptr), (extload node:$ptr)> {
  let IsLoad = true;
  let MemoryVT = i16;
}
def extloadi32 : PatFrag<(ops node:$ptr), (extload node:$ptr)> {
  let IsLoad = true;
  let MemoryVT = i32;
}
def extloadf16 : PatFrag<(ops node:$ptr), (extload node:$ptr)> {
  let IsLoad = true;
  let MemoryVT = f16;
}
def extloadf32 : PatFrag<(ops node:$ptr), (extload node:$ptr)> {
  let IsLoad = true;
  let MemoryVT = f32;
}
def extloadf64 : PatFrag<(ops node:$ptr), (extload node:$ptr)> {
  let IsLoad = true;
  let MemoryVT = f64;
}

def sextloadi1  : PatFrag<(ops node:$ptr), (sextload node:$ptr)> {
  let IsLoad = true;
  let MemoryVT = i1;
}
def sextloadi8  : PatFrag<(ops node:$ptr), (sextload node:$ptr)> {
  let IsLoad = true;
  let MemoryVT = i8;
}
def sextloadi16 : PatFrag<(ops node:$ptr), (sextload node:$ptr)> {
  let IsLoad = true;
  let MemoryVT = i16;
}
def sextloadi32 : PatFrag<(ops node:$ptr), (sextload node:$ptr)> {
  let IsLoad = true;
  let MemoryVT = i32;
}

def zextloadi1  : PatFrag<(ops node:$ptr), (zextload node:$ptr)> {
  let IsLoad = true;
  let MemoryVT = i1;
}
def zextloadi8  : PatFrag<(ops node:$ptr), (zextload node:$ptr)> {
  let IsLoad = true;
  let MemoryVT = i8;
}
def zextloadi16 : PatFrag<(ops node:$ptr), (zextload node:$ptr)> {
  let IsLoad = true;
  let MemoryVT = i16;
}
def zextloadi32 : PatFrag<(ops node:$ptr), (zextload node:$ptr)> {
  let IsLoad = true;
  let MemoryVT = i32;
}

def extloadvi1  : PatFrag<(ops node:$ptr), (extload node:$ptr)> {
  let IsLoad = true;
  let ScalarMemoryVT = i1;
}
def extloadvi8  : PatFrag<(ops node:$ptr), (extload node:$ptr)> {
  let IsLoad = true;
  let ScalarMemoryVT = i8;
}
def extloadvi16 : PatFrag<(ops node:$ptr), (extload node:$ptr)> {
  let IsLoad = true;
  let ScalarMemoryVT = i16;
}
def extloadvi32 : PatFrag<(ops node:$ptr), (extload node:$ptr)> {
  let IsLoad = true;
  let ScalarMemoryVT = i32;
}
def extloadvf16 : PatFrag<(ops node:$ptr), (extload node:$ptr)> {
  let IsLoad = true;
  let ScalarMemoryVT = f16;
}
def extloadvf32 : PatFrag<(ops node:$ptr), (extload node:$ptr)> {
  let IsLoad = true;
  let ScalarMemoryVT = f32;
}
def extloadvf64 : PatFrag<(ops node:$ptr), (extload node:$ptr)> {
  let IsLoad = true;
  let ScalarMemoryVT = f64;
}

def sextloadvi1  : PatFrag<(ops node:$ptr), (sextload node:$ptr)> {
  let IsLoad = true;
  let ScalarMemoryVT = i1;
}
def sextloadvi8  : PatFrag<(ops node:$ptr), (sextload node:$ptr)> {
  let IsLoad = true;
  let ScalarMemoryVT = i8;
}
def sextloadvi16 : PatFrag<(ops node:$ptr), (sextload node:$ptr)> {
  let IsLoad = true;
  let ScalarMemoryVT = i16;
}
def sextloadvi32 : PatFrag<(ops node:$ptr), (sextload node:$ptr)> {
  let IsLoad = true;
  let ScalarMemoryVT = i32;
}

def zextloadvi1  : PatFrag<(ops node:$ptr), (zextload node:$ptr)> {
  let IsLoad = true;
  let ScalarMemoryVT = i1;
}
def zextloadvi8  : PatFrag<(ops node:$ptr), (zextload node:$ptr)> {
  let IsLoad = true;
  let ScalarMemoryVT = i8;
}
def zextloadvi16 : PatFrag<(ops node:$ptr), (zextload node:$ptr)> {
  let IsLoad = true;
  let ScalarMemoryVT = i16;
}
def zextloadvi32 : PatFrag<(ops node:$ptr), (zextload node:$ptr)> {
  let IsLoad = true;
  let ScalarMemoryVT = i32;
}

// store fragments.
def unindexedstore : PatFrag<(ops node:$val, node:$ptr),
                             (st node:$val, node:$ptr)> {
  let IsStore = true;
  let IsUnindexed = true;
}
def store : PatFrag<(ops node:$val, node:$ptr),
                    (unindexedstore node:$val, node:$ptr)> {
  let IsStore = true;
  let IsTruncStore = false;
}

// truncstore fragments.
def truncstore : PatFrag<(ops node:$val, node:$ptr),
                         (unindexedstore node:$val, node:$ptr)> {
  let IsStore = true;
  let IsTruncStore = true;
}
def truncstorei8 : PatFrag<(ops node:$val, node:$ptr),
                           (truncstore node:$val, node:$ptr)> {
  let IsStore = true;
  let MemoryVT = i8;
  let IsTruncStore = true;
}
def truncstorei16 : PatFrag<(ops node:$val, node:$ptr),
                            (truncstore node:$val, node:$ptr)> {
  let IsStore = true;
  let MemoryVT = i16;
  let IsTruncStore = true;
}
def truncstorei32 : PatFrag<(ops node:$val, node:$ptr),
                            (truncstore node:$val, node:$ptr)> {
  let IsStore = true;
  let MemoryVT = i32;
  let IsTruncStore = true;
}
def truncstoref16 : PatFrag<(ops node:$val, node:$ptr),
                            (truncstore node:$val, node:$ptr)> {
  let IsStore = true;
  let MemoryVT = f16;
}
def truncstoref32 : PatFrag<(ops node:$val, node:$ptr),
                            (truncstore node:$val, node:$ptr)> {
  let IsStore = true;
  let MemoryVT = f32;
}
def truncstoref64 : PatFrag<(ops node:$val, node:$ptr),
                            (truncstore node:$val, node:$ptr)> {
  let IsStore = true;
  let MemoryVT = f64;
}

def truncstorevi8 : PatFrag<(ops node:$val, node:$ptr),
                            (truncstore node:$val, node:$ptr)> {
  let IsStore = true;
  let ScalarMemoryVT = i8;
}

def truncstorevi16 : PatFrag<(ops node:$val, node:$ptr),
                             (truncstore node:$val, node:$ptr)> {
  let IsStore = true;
  let ScalarMemoryVT = i16;
}

def truncstorevi32 : PatFrag<(ops node:$val, node:$ptr),
                             (truncstore node:$val, node:$ptr)> {
  let IsStore = true;
  let ScalarMemoryVT = i32;
}

// indexed store fragments.
def istore : PatFrag<(ops node:$val, node:$base, node:$offset),
                     (ist node:$val, node:$base, node:$offset)> {
  let IsStore = true;
  let IsTruncStore = false;
}

def pre_store : PatFrag<(ops node:$val, node:$base, node:$offset),
                        (istore node:$val, node:$base, node:$offset), [{
  ISD::MemIndexedMode AM = cast<StoreSDNode>(N)->getAddressingMode();
  return AM == ISD::PRE_INC || AM == ISD::PRE_DEC;
}]>;

def itruncstore : PatFrag<(ops node:$val, node:$base, node:$offset),
                          (ist node:$val, node:$base, node:$offset)> {
  let IsStore = true;
  let IsTruncStore = true;
}
def pre_truncst : PatFrag<(ops node:$val, node:$base, node:$offset),
                          (itruncstore node:$val, node:$base, node:$offset), [{
  ISD::MemIndexedMode AM = cast<StoreSDNode>(N)->getAddressingMode();
  return AM == ISD::PRE_INC || AM == ISD::PRE_DEC;
}]>;
def pre_truncsti1 : PatFrag<(ops node:$val, node:$base, node:$offset),
                            (pre_truncst node:$val, node:$base, node:$offset)> {
  let IsStore = true;
  let MemoryVT = i1;
}
def pre_truncsti8 : PatFrag<(ops node:$val, node:$base, node:$offset),
                            (pre_truncst node:$val, node:$base, node:$offset)> {
  let IsStore = true;
  let MemoryVT = i8;
}
def pre_truncsti16 : PatFrag<(ops node:$val, node:$base, node:$offset),
                             (pre_truncst node:$val, node:$base, node:$offset)> {
  let IsStore = true;
  let MemoryVT = i16;
}
def pre_truncsti32 : PatFrag<(ops node:$val, node:$base, node:$offset),
                             (pre_truncst node:$val, node:$base, node:$offset)> {
  let IsStore = true;
  let MemoryVT = i32;
}
def pre_truncstf32 : PatFrag<(ops node:$val, node:$base, node:$offset),
                             (pre_truncst node:$val, node:$base, node:$offset)> {
  let IsStore = true;
  let MemoryVT = f32;
}
def pre_truncstvi8 : PatFrag<(ops node:$val, node:$base, node:$offset),
                             (pre_truncst node:$val, node:$base, node:$offset)> {
  let IsStore = true;
  let ScalarMemoryVT = i8;
}
def pre_truncstvi16 : PatFrag<(ops node:$val, node:$base, node:$offset),
                              (pre_truncst node:$val, node:$base, node:$offset)> {
  let IsStore = true;
  let ScalarMemoryVT = i16;
}

def post_store : PatFrag<(ops node:$val, node:$ptr, node:$offset),
                         (istore node:$val, node:$ptr, node:$offset), [{
  ISD::MemIndexedMode AM = cast<StoreSDNode>(N)->getAddressingMode();
  return AM == ISD::POST_INC || AM == ISD::POST_DEC;
}]>;

def post_truncst : PatFrag<(ops node:$val, node:$base, node:$offset),
                           (itruncstore node:$val, node:$base, node:$offset), [{
  ISD::MemIndexedMode AM = cast<StoreSDNode>(N)->getAddressingMode();
  return AM == ISD::POST_INC || AM == ISD::POST_DEC;
}]>;
def post_truncsti1 : PatFrag<(ops node:$val, node:$base, node:$offset),
                             (post_truncst node:$val, node:$base, node:$offset)> {
  let IsStore = true;
  let MemoryVT = i1;
}
def post_truncsti8 : PatFrag<(ops node:$val, node:$base, node:$offset),
                             (post_truncst node:$val, node:$base, node:$offset)> {
  let IsStore = true;
  let MemoryVT = i8;
}
def post_truncsti16 : PatFrag<(ops node:$val, node:$base, node:$offset),
                              (post_truncst node:$val, node:$base, node:$offset)> {
  let IsStore = true;
  let MemoryVT = i16;
}
def post_truncsti32 : PatFrag<(ops node:$val, node:$base, node:$offset),
                              (post_truncst node:$val, node:$base, node:$offset)> {
  let IsStore = true;
  let MemoryVT = i32;
}
def post_truncstf32 : PatFrag<(ops node:$val, node:$base, node:$offset),
                              (post_truncst node:$val, node:$base, node:$offset)> {
  let IsStore = true;
  let MemoryVT = f32;
}
def post_truncstvi8 : PatFrag<(ops node:$val, node:$base, node:$offset),
                              (post_truncst node:$val, node:$base, node:$offset)> {
  let IsStore = true;
  let ScalarMemoryVT = i8;
}
def post_truncstvi16 : PatFrag<(ops node:$val, node:$base, node:$offset),
                               (post_truncst node:$val, node:$base, node:$offset)> {
  let IsStore = true;
  let ScalarMemoryVT = i16;
}

// A helper for matching undef or freeze undef
def undef_or_freeze_undef : PatFrags<(ops), [(undef), (freeze undef)]>;

// TODO: Split these into volatile and unordered flavors to enable
// selectively legal optimizations for each.  (See D66309)
def simple_load : PatFrag<(ops node:$ptr),
                          (load node:$ptr), [{
  return cast<LoadSDNode>(N)->isSimple();
}]>;
def simple_store : PatFrag<(ops node:$val, node:$ptr),
                           (store node:$val, node:$ptr), [{
  return cast<StoreSDNode>(N)->isSimple();
}]>;

// nontemporal store fragments.
def nontemporalstore : PatFrag<(ops node:$val, node:$ptr),
                               (store node:$val, node:$ptr), [{
  return cast<StoreSDNode>(N)->isNonTemporal();
}]>;

def alignednontemporalstore : PatFrag<(ops node:$val, node:$ptr),
                                      (nontemporalstore node:$val, node:$ptr), [{
  StoreSDNode *St = cast<StoreSDNode>(N);
  return St->getAlign() >= St->getMemoryVT().getStoreSize();
}]>;

def unalignednontemporalstore : PatFrag<(ops node:$val, node:$ptr),
                                        (nontemporalstore node:$val, node:$ptr), [{
  StoreSDNode *St = cast<StoreSDNode>(N);
  return St->getAlignment() < St->getMemoryVT().getStoreSize();
}]>;

// nontemporal load fragments.
def nontemporalload : PatFrag<(ops node:$ptr),
                               (load node:$ptr), [{
  return cast<LoadSDNode>(N)->isNonTemporal();
}]>;

def alignednontemporalload : PatFrag<(ops node:$ptr),
                                      (nontemporalload node:$ptr), [{
  LoadSDNode *Ld = cast<LoadSDNode>(N);
  return Ld->getAlign() >= Ld->getMemoryVT().getStoreSize();
}]>;

// setcc convenience fragments.
def setoeq : PatFrag<(ops node:$lhs, node:$rhs),
                     (setcc node:$lhs, node:$rhs, SETOEQ)>;
def setogt : PatFrag<(ops node:$lhs, node:$rhs),
                     (setcc node:$lhs, node:$rhs, SETOGT)>;
def setoge : PatFrag<(ops node:$lhs, node:$rhs),
                     (setcc node:$lhs, node:$rhs, SETOGE)>;
def setolt : PatFrag<(ops node:$lhs, node:$rhs),
                     (setcc node:$lhs, node:$rhs, SETOLT)>;
def setole : PatFrag<(ops node:$lhs, node:$rhs),
                     (setcc node:$lhs, node:$rhs, SETOLE)>;
def setone : PatFrag<(ops node:$lhs, node:$rhs),
                     (setcc node:$lhs, node:$rhs, SETONE)>;
def seto   : PatFrag<(ops node:$lhs, node:$rhs),
                     (setcc node:$lhs, node:$rhs, SETO)>;
def setuo  : PatFrag<(ops node:$lhs, node:$rhs),
                     (setcc node:$lhs, node:$rhs, SETUO)>;
def setueq : PatFrag<(ops node:$lhs, node:$rhs),
                     (setcc node:$lhs, node:$rhs, SETUEQ)>;
def setugt : PatFrag<(ops node:$lhs, node:$rhs),
                     (setcc node:$lhs, node:$rhs, SETUGT)>;
def setuge : PatFrag<(ops node:$lhs, node:$rhs),
                     (setcc node:$lhs, node:$rhs, SETUGE)>;
def setult : PatFrag<(ops node:$lhs, node:$rhs),
                     (setcc node:$lhs, node:$rhs, SETULT)>;
def setule : PatFrag<(ops node:$lhs, node:$rhs),
                     (setcc node:$lhs, node:$rhs, SETULE)>;
def setune : PatFrag<(ops node:$lhs, node:$rhs),
                     (setcc node:$lhs, node:$rhs, SETUNE)>;
def seteq  : PatFrag<(ops node:$lhs, node:$rhs),
                     (setcc node:$lhs, node:$rhs, SETEQ)>;
def setgt  : PatFrag<(ops node:$lhs, node:$rhs),
                     (setcc node:$lhs, node:$rhs, SETGT)>;
def setge  : PatFrag<(ops node:$lhs, node:$rhs),
                     (setcc node:$lhs, node:$rhs, SETGE)>;
def setlt  : PatFrag<(ops node:$lhs, node:$rhs),
                     (setcc node:$lhs, node:$rhs, SETLT)>;
def setle  : PatFrag<(ops node:$lhs, node:$rhs),
                     (setcc node:$lhs, node:$rhs, SETLE)>;
def setne  : PatFrag<(ops node:$lhs, node:$rhs),
                     (setcc node:$lhs, node:$rhs, SETNE)>;

// We don't have strict FP extended loads as single DAG nodes, but we can
// still provide convenience fragments to match those operations.
def strict_extloadf32 : PatFrag<(ops node:$ptr),
                                (strict_fpextend (f32 (load node:$ptr)))>;
def strict_extloadf64 : PatFrag<(ops node:$ptr),
                                (strict_fpextend (f64 (load node:$ptr)))>;

// Convenience fragments to match both strict and non-strict fp operations
def any_fadd       : PatFrags<(ops node:$lhs, node:$rhs),
                              [(strict_fadd node:$lhs, node:$rhs),
                               (fadd node:$lhs, node:$rhs)]>;
def any_fsub       : PatFrags<(ops node:$lhs, node:$rhs),
                              [(strict_fsub node:$lhs, node:$rhs),
                               (fsub node:$lhs, node:$rhs)]>;
def any_fmul       : PatFrags<(ops node:$lhs, node:$rhs),
                              [(strict_fmul node:$lhs, node:$rhs),
                               (fmul node:$lhs, node:$rhs)]>;
def any_fdiv       : PatFrags<(ops node:$lhs, node:$rhs),
                              [(strict_fdiv node:$lhs, node:$rhs),
                               (fdiv node:$lhs, node:$rhs)]>;
def any_frem       : PatFrags<(ops node:$lhs, node:$rhs),
                              [(strict_frem node:$lhs, node:$rhs),
                               (frem node:$lhs, node:$rhs)]>;
def any_fma        : PatFrags<(ops node:$src1, node:$src2, node:$src3),
                              [(strict_fma node:$src1, node:$src2, node:$src3),
                               (fma node:$src1, node:$src2, node:$src3)]>;
def any_fsqrt      : PatFrags<(ops node:$src),
                              [(strict_fsqrt node:$src),
                               (fsqrt node:$src)]>;
def any_fsin       : PatFrags<(ops node:$src),
                              [(strict_fsin node:$src),
                               (fsin node:$src)]>;
def any_fcos       : PatFrags<(ops node:$src),
                              [(strict_fcos node:$src),
                               (fcos node:$src)]>;
def any_fexp2      : PatFrags<(ops node:$src),
                              [(strict_fexp2 node:$src),
                               (fexp2 node:$src)]>;
def any_fpow       : PatFrags<(ops node:$lhs, node:$rhs),
                              [(strict_fpow node:$lhs, node:$rhs),
                               (fpow node:$lhs, node:$rhs)]>;
def any_fldexp      : PatFrags<(ops node:$lhs, node:$rhs),
                              [(strict_fldexp node:$lhs, node:$rhs),
                               (fldexp node:$lhs, node:$rhs)]>;
def any_flog2      : PatFrags<(ops node:$src),
                              [(strict_flog2 node:$src),
                               (flog2 node:$src)]>;
def any_frint      : PatFrags<(ops node:$src),
                              [(strict_frint node:$src),
                               (frint node:$src)]>;
def any_lrint      : PatFrags<(ops node:$src),
                              [(strict_lrint node:$src),
                               (lrint node:$src)]>;
def any_llrint     : PatFrags<(ops node:$src),
                              [(strict_llrint node:$src),
                               (llrint node:$src)]>;
def any_fnearbyint : PatFrags<(ops node:$src),
                              [(strict_fnearbyint node:$src),
                               (fnearbyint node:$src)]>;
def any_fceil      : PatFrags<(ops node:$src),
                              [(strict_fceil node:$src),
                               (fceil node:$src)]>;
def any_ffloor     : PatFrags<(ops node:$src),
                              [(strict_ffloor node:$src),
                               (ffloor node:$src)]>;
def any_lround     : PatFrags<(ops node:$src),
                              [(strict_lround node:$src),
                               (lround node:$src)]>;
def any_llround    : PatFrags<(ops node:$src),
                              [(strict_llround node:$src),
                               (llround node:$src)]>;
def any_fround     : PatFrags<(ops node:$src),
                              [(strict_fround node:$src),
                               (fround node:$src)]>;
def any_froundeven : PatFrags<(ops node:$src),
                              [(strict_froundeven node:$src),
                               (froundeven node:$src)]>;
def any_ftrunc     : PatFrags<(ops node:$src),
                              [(strict_ftrunc node:$src),
                               (ftrunc node:$src)]>;
def any_fmaxnum    : PatFrags<(ops node:$lhs, node:$rhs),
                              [(strict_fmaxnum node:$lhs, node:$rhs),
                               (fmaxnum node:$lhs, node:$rhs)]>;
def any_fminnum    : PatFrags<(ops node:$lhs, node:$rhs),
                              [(strict_fminnum node:$lhs, node:$rhs),
                               (fminnum node:$lhs, node:$rhs)]>;
def any_fmaximum   : PatFrags<(ops node:$lhs, node:$rhs),
                              [(strict_fmaximum node:$lhs, node:$rhs),
                               (fmaximum node:$lhs, node:$rhs)]>;
def any_fminimum   : PatFrags<(ops node:$lhs, node:$rhs),
                              [(strict_fminimum node:$lhs, node:$rhs),
                               (fminimum node:$lhs, node:$rhs)]>;
def any_fpround    : PatFrags<(ops node:$src),
                              [(strict_fpround node:$src),
                               (fpround node:$src)]>;
def any_fpextend   : PatFrags<(ops node:$src),
                              [(strict_fpextend node:$src),
                               (fpextend node:$src)]>;
def any_extloadf32 : PatFrags<(ops node:$ptr),
                              [(strict_extloadf32 node:$ptr),
                               (extloadf32 node:$ptr)]>;
def any_extloadf64 : PatFrags<(ops node:$ptr),
                              [(strict_extloadf64 node:$ptr),
                               (extloadf64 node:$ptr)]>;
def any_fp_to_sint : PatFrags<(ops node:$src),
                              [(strict_fp_to_sint node:$src),
                               (fp_to_sint node:$src)]>;
def any_fp_to_uint : PatFrags<(ops node:$src),
                              [(strict_fp_to_uint node:$src),
                               (fp_to_uint node:$src)]>;
def any_sint_to_fp : PatFrags<(ops node:$src),
                              [(strict_sint_to_fp node:$src),
                               (sint_to_fp node:$src)]>;
def any_uint_to_fp : PatFrags<(ops node:$src),
                              [(strict_uint_to_fp node:$src),
                               (uint_to_fp node:$src)]>;
def any_fsetcc : PatFrags<(ops node:$lhs, node:$rhs, node:$pred),
                          [(strict_fsetcc node:$lhs, node:$rhs, node:$pred),
                           (setcc node:$lhs, node:$rhs, node:$pred)]>;
def any_fsetccs : PatFrags<(ops node:$lhs, node:$rhs, node:$pred),
                          [(strict_fsetccs node:$lhs, node:$rhs, node:$pred),
                           (setcc node:$lhs, node:$rhs, node:$pred)]>;

multiclass binary_atomic_op_ord {
  def NAME#_monotonic : PatFrag<(ops node:$ptr, node:$val),
      (!cast<SDPatternOperator>(NAME) node:$ptr, node:$val)> {
    let IsAtomic = true;
    let IsAtomicOrderingMonotonic = true;
  }
  def NAME#_acquire : PatFrag<(ops node:$ptr, node:$val),
      (!cast<SDPatternOperator>(NAME) node:$ptr, node:$val)> {
    let IsAtomic = true;
    let IsAtomicOrderingAcquire = true;
  }
  def NAME#_release : PatFrag<(ops node:$ptr, node:$val),
      (!cast<SDPatternOperator>(NAME) node:$ptr, node:$val)> {
    let IsAtomic = true;
    let IsAtomicOrderingRelease = true;
  }
  def NAME#_acq_rel : PatFrag<(ops node:$ptr, node:$val),
      (!cast<SDPatternOperator>(NAME) node:$ptr, node:$val)> {
    let IsAtomic = true;
    let IsAtomicOrderingAcquireRelease = true;
  }
  def NAME#_seq_cst : PatFrag<(ops node:$ptr, node:$val),
      (!cast<SDPatternOperator>(NAME) node:$ptr, node:$val)> {
    let IsAtomic = true;
    let IsAtomicOrderingSequentiallyConsistent = true;
  }
}

multiclass ternary_atomic_op_ord {
  def NAME#_monotonic : PatFrag<(ops node:$ptr, node:$cmp, node:$val),
      (!cast<SDPatternOperator>(NAME) node:$ptr, node:$cmp, node:$val)> {
    let IsAtomic = true;
    let IsAtomicOrderingMonotonic = true;
  }
  def NAME#_acquire : PatFrag<(ops node:$ptr, node:$cmp, node:$val),
      (!cast<SDPatternOperator>(NAME) node:$ptr, node:$cmp, node:$val)> {
    let IsAtomic = true;
    let IsAtomicOrderingAcquire = true;
  }
  def NAME#_release : PatFrag<(ops node:$ptr, node:$cmp, node:$val),
      (!cast<SDPatternOperator>(NAME) node:$ptr, node:$cmp, node:$val)> {
    let IsAtomic = true;
    let IsAtomicOrderingRelease = true;
  }
  def NAME#_acq_rel : PatFrag<(ops node:$ptr, node:$cmp, node:$val),
      (!cast<SDPatternOperator>(NAME) node:$ptr, node:$cmp, node:$val)> {
    let IsAtomic = true;
    let IsAtomicOrderingAcquireRelease = true;
  }
  def NAME#_seq_cst : PatFrag<(ops node:$ptr, node:$cmp, node:$val),
      (!cast<SDPatternOperator>(NAME) node:$ptr, node:$cmp, node:$val)> {
    let IsAtomic = true;
    let IsAtomicOrderingSequentiallyConsistent = true;
  }
}

multiclass binary_atomic_op<SDNode atomic_op, bit IsInt = 1> {
  def _8 : PatFrag<(ops node:$ptr, node:$val),
                   (atomic_op  node:$ptr, node:$val)> {
    let IsAtomic = true;
    let MemoryVT = !if(IsInt, i8, ?);
  }
  def _16 : PatFrag<(ops node:$ptr, node:$val),
                    (atomic_op node:$ptr, node:$val)> {
    let IsAtomic = true;
    let MemoryVT = !if(IsInt, i16, f16);
  }
  def _32 : PatFrag<(ops node:$ptr, node:$val),
                    (atomic_op node:$ptr, node:$val)> {
    let IsAtomic = true;
    let MemoryVT = !if(IsInt, i32, f32);
  }
  def _64 : PatFrag<(ops node:$ptr, node:$val),
                    (atomic_op node:$ptr, node:$val)> {
    let IsAtomic = true;
    let MemoryVT = !if(IsInt, i64, f64);
  }

  defm NAME#_8  : binary_atomic_op_ord;
  defm NAME#_16 : binary_atomic_op_ord;
  defm NAME#_32 : binary_atomic_op_ord;
  defm NAME#_64 : binary_atomic_op_ord;
}

multiclass ternary_atomic_op<SDNode atomic_op> {
  def _8 : PatFrag<(ops node:$ptr, node:$cmp, node:$val),
                   (atomic_op  node:$ptr, node:$cmp, node:$val)> {
    let IsAtomic = true;
    let MemoryVT = i8;
  }
  def _16 : PatFrag<(ops node:$ptr, node:$cmp, node:$val),
                    (atomic_op node:$ptr, node:$cmp, node:$val)> {
    let IsAtomic = true;
    let MemoryVT = i16;
  }
  def _32 : PatFrag<(ops node:$ptr, node:$cmp, node:$val),
                    (atomic_op node:$ptr, node:$cmp, node:$val)> {
    let IsAtomic = true;
    let MemoryVT = i32;
  }
  def _64 : PatFrag<(ops node:$ptr, node:$cmp, node:$val),
                    (atomic_op node:$ptr, node:$cmp, node:$val)> {
    let IsAtomic = true;
    let MemoryVT = i64;
  }

  defm NAME#_8  : ternary_atomic_op_ord;
  defm NAME#_16 : ternary_atomic_op_ord;
  defm NAME#_32 : ternary_atomic_op_ord;
  defm NAME#_64 : ternary_atomic_op_ord;
}

defm atomic_load_add  : binary_atomic_op<atomic_load_add>;
defm atomic_swap      : binary_atomic_op<atomic_swap>;
defm atomic_load_sub  : binary_atomic_op<atomic_load_sub>;
defm atomic_load_and  : binary_atomic_op<atomic_load_and>;
defm atomic_load_clr  : binary_atomic_op<atomic_load_clr>;
defm atomic_load_or   : binary_atomic_op<atomic_load_or>;
defm atomic_load_xor  : binary_atomic_op<atomic_load_xor>;
defm atomic_load_nand : binary_atomic_op<atomic_load_nand>;
defm atomic_load_min  : binary_atomic_op<atomic_load_min>;
defm atomic_load_max  : binary_atomic_op<atomic_load_max>;
defm atomic_load_umin : binary_atomic_op<atomic_load_umin>;
defm atomic_load_umax : binary_atomic_op<atomic_load_umax>;
defm atomic_store     : binary_atomic_op<atomic_store>;
defm atomic_cmp_swap  : ternary_atomic_op<atomic_cmp_swap>;

/// Atomic load which zeroes the excess high bits.
def atomic_load_zext :
  PatFrag<(ops node:$ptr), (atomic_load node:$ptr)> {
  let IsAtomic = true; // FIXME: Should be IsLoad and/or IsAtomic?
  let IsZeroExtLoad = true;
}

/// Atomic load which sign extends the excess high bits.
def atomic_load_sext :
  PatFrag<(ops node:$ptr), (atomic_load node:$ptr)> {
  let IsAtomic = true; // FIXME: Should be IsLoad and/or IsAtomic?
  let IsSignExtLoad = true;
}

def atomic_load_8 :
  PatFrag<(ops node:$ptr),
          (atomic_load node:$ptr)> {
  let IsAtomic = true;
  let MemoryVT = i8;
}

def atomic_load_16 :
  PatFrag<(ops node:$ptr),
          (atomic_load node:$ptr)> {
  let IsAtomic = true;
  let MemoryVT = i16;
}

def atomic_load_32 :
  PatFrag<(ops node:$ptr),
          (atomic_load node:$ptr)> {
  let IsAtomic = true;
  let MemoryVT = i32;
}
def atomic_load_64 :
  PatFrag<(ops node:$ptr),
          (atomic_load node:$ptr)> {
  let IsAtomic = true;
  let MemoryVT = i64;
}

def atomic_load_zext_8 :
  PatFrag<(ops node:$ptr), (atomic_load_zext node:$ptr)> {
  let IsAtomic = true; // FIXME: Should be IsLoad and/or IsAtomic?
  let MemoryVT = i8;
}

def atomic_load_zext_16 :
  PatFrag<(ops node:$ptr), (atomic_load_zext node:$ptr)> {
  let IsAtomic = true; // FIXME: Should be IsLoad and/or IsAtomic?
  let MemoryVT = i16;
}

def atomic_load_sext_8 :
  PatFrag<(ops node:$ptr), (atomic_load_sext node:$ptr)> {
  let IsAtomic = true; // FIXME: Should be IsLoad and/or IsAtomic?
  let MemoryVT = i8;
}

def atomic_load_sext_16 :
  PatFrag<(ops node:$ptr), (atomic_load_sext node:$ptr)> {
  let IsAtomic = true; // FIXME: Should be IsLoad and/or IsAtomic?
  let MemoryVT = i16;
}

// Atomic load which zeroes or anyextends the high bits.
def atomic_load_az_8 : PatFrags<(ops node:$op),
                                [(atomic_load_8 node:$op),
                                 (atomic_load_zext_8 node:$op)]>;

// Atomic load which zeroes or anyextends the high bits.
def atomic_load_az_16 : PatFrags<(ops node:$op),
                                 [(atomic_load_16 node:$op),
                                  (atomic_load_zext_16 node:$op)]>;

def nonext_masked_gather :
  PatFrag<(ops node:$def, node:$pred, node:$ptr, node:$idx),
          (masked_gather node:$def, node:$pred, node:$ptr, node:$idx), [{
  return cast<MaskedGatherSDNode>(N)->getExtensionType() == ISD::NON_EXTLOAD;
}]>;

// Any extending masked gather fragments.
def ext_masked_gather_i8 :
  PatFrag<(ops node:$def, node:$pred, node:$ptr, node:$idx),
          (masked_gather node:$def, node:$pred, node:$ptr, node:$idx), [{
  auto MGN = cast<MaskedGatherSDNode>(N);
  return MGN->getExtensionType() == ISD::EXTLOAD &&
         MGN->getMemoryVT().getScalarType() == MVT::i8;
}]>;
def ext_masked_gather_i16 :
  PatFrag<(ops node:$def, node:$pred, node:$ptr, node:$idx),
          (masked_gather node:$def, node:$pred, node:$ptr, node:$idx), [{
  auto MGN = cast<MaskedGatherSDNode>(N);
  return MGN->getExtensionType() == ISD::EXTLOAD &&
         MGN->getMemoryVT().getScalarType() == MVT::i16;
}]>;
def ext_masked_gather_i32 :
  PatFrag<(ops node:$def, node:$pred, node:$ptr, node:$idx),
          (masked_gather node:$def, node:$pred, node:$ptr, node:$idx), [{
  auto MGN = cast<MaskedGatherSDNode>(N);
  return MGN->getExtensionType() == ISD::EXTLOAD &&
         MGN->getMemoryVT().getScalarType() == MVT::i32;
}]>;

// Sign extending masked gather fragments.
def sext_masked_gather_i8 :
  PatFrag<(ops node:$def, node:$pred, node:$ptr, node:$idx),
          (masked_gather node:$def, node:$pred, node:$ptr, node:$idx), [{
  auto MGN = cast<MaskedGatherSDNode>(N);
  return MGN->getExtensionType() == ISD::SEXTLOAD &&
         MGN->getMemoryVT().getScalarType() == MVT::i8;
}]>;
def sext_masked_gather_i16 :
  PatFrag<(ops node:$def, node:$pred, node:$ptr, node:$idx),
          (masked_gather node:$def, node:$pred, node:$ptr, node:$idx), [{
  auto MGN = cast<MaskedGatherSDNode>(N);
  return MGN->getExtensionType() == ISD::SEXTLOAD &&
         MGN->getMemoryVT().getScalarType() == MVT::i16;
}]>;
def sext_masked_gather_i32 :
  PatFrag<(ops node:$def, node:$pred, node:$ptr, node:$idx),
          (masked_gather node:$def, node:$pred, node:$ptr, node:$idx), [{
  auto MGN = cast<MaskedGatherSDNode>(N);
  return MGN->getExtensionType() == ISD::SEXTLOAD &&
         MGN->getMemoryVT().getScalarType() == MVT::i32;
}]>;

// Zero extending masked gather fragments.
def zext_masked_gather_i8 :
  PatFrag<(ops node:$def, node:$pred, node:$ptr, node:$idx),
          (masked_gather node:$def, node:$pred, node:$ptr, node:$idx), [{
  auto MGN = cast<MaskedGatherSDNode>(N);
  return MGN->getExtensionType() == ISD::ZEXTLOAD &&
         MGN->getMemoryVT().getScalarType() == MVT::i8;
}]>;
def zext_masked_gather_i16 :
  PatFrag<(ops node:$def, node:$pred, node:$ptr, node:$idx),
          (masked_gather node:$def, node:$pred, node:$ptr, node:$idx), [{
  auto MGN = cast<MaskedGatherSDNode>(N);
  return MGN->getExtensionType() == ISD::ZEXTLOAD &&
         MGN->getMemoryVT().getScalarType() == MVT::i16;
}]>;
def zext_masked_gather_i32 :
  PatFrag<(ops node:$def, node:$pred, node:$ptr, node:$idx),
          (masked_gather node:$def, node:$pred, node:$ptr, node:$idx), [{
  auto MGN = cast<MaskedGatherSDNode>(N);
  return MGN->getExtensionType() == ISD::ZEXTLOAD &&
         MGN->getMemoryVT().getScalarType() == MVT::i32;
}]>;

// Any/Zero extending masked gather fragments.
def azext_masked_gather_i8 :
  PatFrags<(ops node:$def, node:$pred, node:$ptr, node:$idx),
           [(ext_masked_gather_i8 node:$def, node:$pred, node:$ptr, node:$idx),
            (zext_masked_gather_i8 node:$def, node:$pred, node:$ptr, node:$idx)]>;
def azext_masked_gather_i16 :
  PatFrags<(ops node:$def, node:$pred, node:$ptr, node:$idx),
           [(ext_masked_gather_i16 node:$def, node:$pred, node:$ptr, node:$idx),
            (zext_masked_gather_i16 node:$def, node:$pred, node:$ptr, node:$idx)]>;
def azext_masked_gather_i32 :
  PatFrags<(ops node:$def, node:$pred, node:$ptr, node:$idx),
           [(ext_masked_gather_i32 node:$def, node:$pred, node:$ptr, node:$idx),
            (zext_masked_gather_i32 node:$def, node:$pred, node:$ptr, node:$idx)]>;

def nontrunc_masked_scatter :
  PatFrag<(ops node:$val, node:$pred, node:$ptr, node:$idx),
          (masked_scatter node:$val, node:$pred, node:$ptr, node:$idx), [{
  return !cast<MaskedScatterSDNode>(N)->isTruncatingStore();
}]>;

// Truncating masked scatter fragments.
def trunc_masked_scatter_i8 :
  PatFrag<(ops node:$val, node:$pred, node:$ptr, node:$idx),
          (masked_scatter node:$val, node:$pred, node:$ptr, node:$idx), [{
  auto MSN = cast<MaskedScatterSDNode>(N);
  return MSN->isTruncatingStore() &&
         MSN->getMemoryVT().getScalarType() == MVT::i8;
}]>;
def trunc_masked_scatter_i16 :
  PatFrag<(ops node:$val, node:$pred, node:$ptr, node:$idx),
          (masked_scatter node:$val, node:$pred, node:$ptr, node:$idx), [{
  auto MSN = cast<MaskedScatterSDNode>(N);
  return MSN->isTruncatingStore() &&
         MSN->getMemoryVT().getScalarType() == MVT::i16;
}]>;
def trunc_masked_scatter_i32 :
  PatFrag<(ops node:$val, node:$pred, node:$ptr, node:$idx),
          (masked_scatter node:$val, node:$pred, node:$ptr, node:$idx), [{
  auto MSN = cast<MaskedScatterSDNode>(N);
  return MSN->isTruncatingStore() &&
         MSN->getMemoryVT().getScalarType() == MVT::i32;
}]>;

//===----------------------------------------------------------------------===//
// Selection DAG Pattern Support.
//
// Patterns are what are actually matched against by the target-flavored
// instruction selection DAG.  Instructions defined by the target implicitly
// define patterns in most cases, but patterns can also be explicitly added when
// an operation is defined by a sequence of instructions (e.g. loading a large
// immediate value on RISC targets that do not support immediates as large as
// their GPRs).
//

class Pattern<dag patternToMatch, list<dag> resultInstrs> {
  dag             PatternToMatch  = patternToMatch;
  list<dag>       ResultInstrs    = resultInstrs;
  list<Predicate> Predicates      = [];  // See class Instruction in Target.td.
  int             AddedComplexity = 0;   // See class Instruction in Target.td.
}

// Pat - A simple (but common) form of a pattern, which produces a simple result
// not needing a full list.
class Pat<dag pattern, dag result> : Pattern<pattern, [result]>;

//===----------------------------------------------------------------------===//
// Complex pattern definitions.
//

// Complex patterns, e.g. X86 addressing mode, requires pattern matching code
// in C++. Ty is the type of return value; NumOperands is the number of operands
// returned by the select function; SelectFunc is the name of the function used
// to pattern match the max. pattern; RootNodes are the list of possible root nodes
// of the sub-dags to match.
// e.g. X86 addressing mode - def addr : ComplexPattern<iPTR, 4, "SelectAddr", [add]>;
//
class ComplexPattern<ValueType ty, int numops, string fn,
                     list<SDNode> roots = [], list<SDNodeProperty> props = [],
                     int complexity = -1> {
  ValueType Ty = ty;
  int NumOperands = numops;
  string SelectFunc = fn;
  list<SDNode> RootNodes = roots;
  list<SDNodeProperty> Properties = props;
  int Complexity = complexity;
}
PKiwFZ��-P-PTarget/TargetMachine.hnu�[���//===-- llvm/Target/TargetMachine.h - Target Information --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the TargetMachine and LLVMTargetMachine classes.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TARGET_TARGETMACHINE_H
#define LLVM_TARGET_TARGETMACHINE_H

#include "llvm/ADT/StringRef.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/PGOOptions.h"
#include "llvm/Target/CGPassBuilderOption.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/TargetParser/Triple.h"
#include <optional>
#include <string>
#include <utility>

namespace llvm {

class AAManager;
using ModulePassManager = PassManager<Module>;

class Function;
class GlobalValue;
class MachineFunctionPassManager;
class MachineFunctionAnalysisManager;
class MachineModuleInfoWrapperPass;
class Mangler;
class MCAsmInfo;
class MCContext;
class MCInstrInfo;
class MCRegisterInfo;
class MCStreamer;
class MCSubtargetInfo;
class MCSymbol;
class raw_pwrite_stream;
class PassBuilder;
struct PerFunctionMIParsingState;
class SMDiagnostic;
class SMRange;
class Target;
class TargetIntrinsicInfo;
class TargetIRAnalysis;
class TargetTransformInfo;
class TargetLoweringObjectFile;
class TargetPassConfig;
class TargetSubtargetInfo;

// The old pass manager infrastructure is hidden in a legacy namespace now.
namespace legacy {
class PassManagerBase;
}
using legacy::PassManagerBase;

struct MachineFunctionInfo;
namespace yaml {
struct MachineFunctionInfo;
}

//===----------------------------------------------------------------------===//
///
/// Primary interface to the complete machine description for the target
/// machine.  All target-specific information should be accessible through this
/// interface.
///
class TargetMachine {
protected: // Can only create subclasses.
  TargetMachine(const Target &T, StringRef DataLayoutString,
                const Triple &TargetTriple, StringRef CPU, StringRef FS,
                const TargetOptions &Options);

  /// The Target that this machine was created for.
  const Target &TheTarget;

  /// DataLayout for the target: keep ABI type size and alignment.
  ///
  /// The DataLayout is created based on the string representation provided
  /// during construction. It is kept here only to avoid reparsing the string
  /// but should not really be used during compilation, because it has an
  /// internal cache that is context specific.
  const DataLayout DL;

  /// Triple string, CPU name, and target feature strings the TargetMachine
  /// instance is created with.
  Triple TargetTriple;
  std::string TargetCPU;
  std::string TargetFS;

  Reloc::Model RM = Reloc::Static;
  CodeModel::Model CMModel = CodeModel::Small;
  CodeGenOpt::Level OptLevel = CodeGenOpt::Default;

  /// Contains target specific asm information.
  std::unique_ptr<const MCAsmInfo> AsmInfo;
  std::unique_ptr<const MCRegisterInfo> MRI;
  std::unique_ptr<const MCInstrInfo> MII;
  std::unique_ptr<const MCSubtargetInfo> STI;

  unsigned RequireStructuredCFG : 1;
  unsigned O0WantsFastISel : 1;

  // PGO related tunables.
  std::optional<PGOOptions> PGOOption;

public:
  const TargetOptions DefaultOptions;
  mutable TargetOptions Options;

  TargetMachine(const TargetMachine &) = delete;
  void operator=(const TargetMachine &) = delete;
  virtual ~TargetMachine();

  const Target &getTarget() const { return TheTarget; }

  const Triple &getTargetTriple() const { return TargetTriple; }
  StringRef getTargetCPU() const { return TargetCPU; }
  StringRef getTargetFeatureString() const { return TargetFS; }
  void setTargetFeatureString(StringRef FS) { TargetFS = std::string(FS); }

  /// Virtual method implemented by subclasses that returns a reference to that
  /// target's TargetSubtargetInfo-derived member variable.
  virtual const TargetSubtargetInfo *getSubtargetImpl(const Function &) const {
    return nullptr;
  }
  virtual TargetLoweringObjectFile *getObjFileLowering() const {
    return nullptr;
  }

  /// Create the target's instance of MachineFunctionInfo
  virtual MachineFunctionInfo *
  createMachineFunctionInfo(BumpPtrAllocator &Allocator, const Function &F,
                            const TargetSubtargetInfo *STI) const {
    return nullptr;
  }

  /// Allocate and return a default initialized instance of the YAML
  /// representation for the MachineFunctionInfo.
  virtual yaml::MachineFunctionInfo *createDefaultFuncInfoYAML() const {
    return nullptr;
  }

  /// Allocate and initialize an instance of the YAML representation of the
  /// MachineFunctionInfo.
  virtual yaml::MachineFunctionInfo *
  convertFuncInfoToYAML(const MachineFunction &MF) const {
    return nullptr;
  }

  /// Parse out the target's MachineFunctionInfo from the YAML reprsentation.
  virtual bool parseMachineFunctionInfo(const yaml::MachineFunctionInfo &,
                                        PerFunctionMIParsingState &PFS,
                                        SMDiagnostic &Error,
                                        SMRange &SourceRange) const {
    return false;
  }

  /// This method returns a pointer to the specified type of
  /// TargetSubtargetInfo.  In debug builds, it verifies that the object being
  /// returned is of the correct type.
  template <typename STC> const STC &getSubtarget(const Function &F) const {
    return *static_cast<const STC*>(getSubtargetImpl(F));
  }

  /// Create a DataLayout.
  const DataLayout createDataLayout() const { return DL; }

  /// Test if a DataLayout if compatible with the CodeGen for this target.
  ///
  /// The LLVM Module owns a DataLayout that is used for the target independent
  /// optimizations and code generation. This hook provides a target specific
  /// check on the validity of this DataLayout.
  bool isCompatibleDataLayout(const DataLayout &Candidate) const {
    return DL == Candidate;
  }

  /// Get the pointer size for this target.
  ///
  /// This is the only time the DataLayout in the TargetMachine is used.
  unsigned getPointerSize(unsigned AS) const {
    return DL.getPointerSize(AS);
  }

  unsigned getPointerSizeInBits(unsigned AS) const {
    return DL.getPointerSizeInBits(AS);
  }

  unsigned getProgramPointerSize() const {
    return DL.getPointerSize(DL.getProgramAddressSpace());
  }

  unsigned getAllocaPointerSize() const {
    return DL.getPointerSize(DL.getAllocaAddrSpace());
  }

  /// Reset the target options based on the function's attributes.
  // FIXME: Remove TargetOptions that affect per-function code generation
  // from TargetMachine.
  void resetTargetOptions(const Function &F) const;

  /// Return target specific asm information.
  const MCAsmInfo *getMCAsmInfo() const { return AsmInfo.get(); }

  const MCRegisterInfo *getMCRegisterInfo() const { return MRI.get(); }
  const MCInstrInfo *getMCInstrInfo() const { return MII.get(); }
  const MCSubtargetInfo *getMCSubtargetInfo() const { return STI.get(); }

  /// If intrinsic information is available, return it.  If not, return null.
  virtual const TargetIntrinsicInfo *getIntrinsicInfo() const {
    return nullptr;
  }

  bool requiresStructuredCFG() const { return RequireStructuredCFG; }
  void setRequiresStructuredCFG(bool Value) { RequireStructuredCFG = Value; }

  /// Returns the code generation relocation model. The choices are static, PIC,
  /// and dynamic-no-pic, and target default.
  Reloc::Model getRelocationModel() const;

  /// Returns the code model. The choices are small, kernel, medium, large, and
  /// target default.
  CodeModel::Model getCodeModel() const { return CMModel; }

  /// Set the code model.
  void setCodeModel(CodeModel::Model CM) { CMModel = CM; }

  bool isLargeData() const;

  bool isPositionIndependent() const;

  bool shouldAssumeDSOLocal(const Module &M, const GlobalValue *GV) const;

  /// Returns true if this target uses emulated TLS.
  bool useEmulatedTLS() const;

  /// Returns the TLS model which should be used for the given global variable.
  TLSModel::Model getTLSModel(const GlobalValue *GV) const;

  /// Returns the optimization level: None, Less, Default, or Aggressive.
  CodeGenOpt::Level getOptLevel() const;

  /// Overrides the optimization level.
  void setOptLevel(CodeGenOpt::Level Level);

  void setFastISel(bool Enable) { Options.EnableFastISel = Enable; }
  bool getO0WantsFastISel() { return O0WantsFastISel; }
  void setO0WantsFastISel(bool Enable) { O0WantsFastISel = Enable; }
  void setGlobalISel(bool Enable) { Options.EnableGlobalISel = Enable; }
  void setGlobalISelAbort(GlobalISelAbortMode Mode) {
    Options.GlobalISelAbort = Mode;
  }
  void setMachineOutliner(bool Enable) {
    Options.EnableMachineOutliner = Enable;
  }
  void setSupportsDefaultOutlining(bool Enable) {
    Options.SupportsDefaultOutlining = Enable;
  }
  void setSupportsDebugEntryValues(bool Enable) {
    Options.SupportsDebugEntryValues = Enable;
  }

  void setCFIFixup(bool Enable) { Options.EnableCFIFixup = Enable; }

  bool getAIXExtendedAltivecABI() const {
    return Options.EnableAIXExtendedAltivecABI;
  }

  bool getUniqueSectionNames() const { return Options.UniqueSectionNames; }

  /// Return true if unique basic block section names must be generated.
  bool getUniqueBasicBlockSectionNames() const {
    return Options.UniqueBasicBlockSectionNames;
  }

  /// Return true if data objects should be emitted into their own section,
  /// corresponds to -fdata-sections.
  bool getDataSections() const {
    return Options.DataSections;
  }

  /// Return true if functions should be emitted into their own section,
  /// corresponding to -ffunction-sections.
  bool getFunctionSections() const {
    return Options.FunctionSections;
  }

  /// Return true if visibility attribute should not be emitted in XCOFF,
  /// corresponding to -mignore-xcoff-visibility.
  bool getIgnoreXCOFFVisibility() const {
    return Options.IgnoreXCOFFVisibility;
  }

  /// Return true if XCOFF traceback table should be emitted,
  /// corresponding to -xcoff-traceback-table.
  bool getXCOFFTracebackTable() const { return Options.XCOFFTracebackTable; }

  /// If basic blocks should be emitted into their own section,
  /// corresponding to -fbasic-block-sections.
  llvm::BasicBlockSection getBBSectionsType() const {
    return Options.BBSections;
  }

  /// Get the list of functions and basic block ids that need unique sections.
  const MemoryBuffer *getBBSectionsFuncListBuf() const {
    return Options.BBSectionsFuncListBuf.get();
  }

  /// Returns true if a cast between SrcAS and DestAS is a noop.
  virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const {
    return false;
  }

  void setPGOOption(std::optional<PGOOptions> PGOOpt) { PGOOption = PGOOpt; }
  const std::optional<PGOOptions> &getPGOOption() const { return PGOOption; }

  /// If the specified generic pointer could be assumed as a pointer to a
  /// specific address space, return that address space.
  ///
  /// Under offloading programming, the offloading target may be passed with
  /// values only prepared on the host side and could assume certain
  /// properties.
  virtual unsigned getAssumedAddrSpace(const Value *V) const { return -1; }

  /// If the specified predicate checks whether a generic pointer falls within
  /// a specified address space, return that generic pointer and the address
  /// space being queried.
  ///
  /// Such predicates could be specified in @llvm.assume intrinsics for the
  /// optimizer to assume that the given generic pointer always falls within
  /// the address space based on that predicate.
  virtual std::pair<const Value *, unsigned>
  getPredicatedAddrSpace(const Value *V) const {
    return std::make_pair(nullptr, -1);
  }

  /// Get a \c TargetIRAnalysis appropriate for the target.
  ///
  /// This is used to construct the new pass manager's target IR analysis pass,
  /// set up appropriately for this target machine. Even the old pass manager
  /// uses this to answer queries about the IR.
  TargetIRAnalysis getTargetIRAnalysis() const;

  /// Return a TargetTransformInfo for a given function.
  ///
  /// The returned TargetTransformInfo is specialized to the subtarget
  /// corresponding to \p F.
  virtual TargetTransformInfo getTargetTransformInfo(const Function &F) const;

  /// Allow the target to modify the pass pipeline.
  virtual void registerPassBuilderCallbacks(PassBuilder &) {}

  /// Allow the target to register alias analyses with the AAManager for use
  /// with the new pass manager. Only affects the "default" AAManager.
  virtual void registerDefaultAliasAnalyses(AAManager &) {}

  /// Add passes to the specified pass manager to get the specified file
  /// emitted.  Typically this will involve several steps of code generation.
  /// This method should return true if emission of this file type is not
  /// supported, or false on success.
  /// \p MMIWP is an optional parameter that, if set to non-nullptr,
  /// will be used to set the MachineModuloInfo for this PM.
  virtual bool
  addPassesToEmitFile(PassManagerBase &, raw_pwrite_stream &,
                      raw_pwrite_stream *, CodeGenFileType,
                      bool /*DisableVerify*/ = true,
                      MachineModuleInfoWrapperPass *MMIWP = nullptr) {
    return true;
  }

  /// Add passes to the specified pass manager to get machine code emitted with
  /// the MCJIT. This method returns true if machine code is not supported. It
  /// fills the MCContext Ctx pointer which can be used to build custom
  /// MCStreamer.
  ///
  virtual bool addPassesToEmitMC(PassManagerBase &, MCContext *&,
                                 raw_pwrite_stream &,
                                 bool /*DisableVerify*/ = true) {
    return true;
  }

  /// True if subtarget inserts the final scheduling pass on its own.
  ///
  /// Branch relaxation, which must happen after block placement, can
  /// on some targets (e.g. SystemZ) expose additional post-RA
  /// scheduling opportunities.
  virtual bool targetSchedulesPostRAScheduling() const { return false; };

  void getNameWithPrefix(SmallVectorImpl<char> &Name, const GlobalValue *GV,
                         Mangler &Mang, bool MayAlwaysUsePrivate = false) const;
  MCSymbol *getSymbol(const GlobalValue *GV) const;

  /// The integer bit size to use for SjLj based exception handling.
  static constexpr unsigned DefaultSjLjDataSize = 32;
  virtual unsigned getSjLjDataSize() const { return DefaultSjLjDataSize; }

  static std::pair<int, int> parseBinutilsVersion(StringRef Version);

  /// getAddressSpaceForPseudoSourceKind - Given the kind of memory
  /// (e.g. stack) the target returns the corresponding address space.
  virtual unsigned getAddressSpaceForPseudoSourceKind(unsigned Kind) const {
    return 0;
  }
};

/// This class describes a target machine that is implemented with the LLVM
/// target-independent code generator.
///
class LLVMTargetMachine : public TargetMachine {
protected: // Can only create subclasses.
  LLVMTargetMachine(const Target &T, StringRef DataLayoutString,
                    const Triple &TT, StringRef CPU, StringRef FS,
                    const TargetOptions &Options, Reloc::Model RM,
                    CodeModel::Model CM, CodeGenOpt::Level OL);

  void initAsmInfo();

public:
  /// Get a TargetTransformInfo implementation for the target.
  ///
  /// The TTI returned uses the common code generator to answer queries about
  /// the IR.
  TargetTransformInfo getTargetTransformInfo(const Function &F) const override;

  /// Create a pass configuration object to be used by addPassToEmitX methods
  /// for generating a pipeline of CodeGen passes.
  virtual TargetPassConfig *createPassConfig(PassManagerBase &PM);

  /// Add passes to the specified pass manager to get the specified file
  /// emitted.  Typically this will involve several steps of code generation.
  /// \p MMIWP is an optional parameter that, if set to non-nullptr,
  /// will be used to set the MachineModuloInfo for this PM.
  bool
  addPassesToEmitFile(PassManagerBase &PM, raw_pwrite_stream &Out,
                      raw_pwrite_stream *DwoOut, CodeGenFileType FileType,
                      bool DisableVerify = true,
                      MachineModuleInfoWrapperPass *MMIWP = nullptr) override;

  virtual Error buildCodeGenPipeline(ModulePassManager &,
                                     MachineFunctionPassManager &,
                                     MachineFunctionAnalysisManager &,
                                     raw_pwrite_stream &, raw_pwrite_stream *,
                                     CodeGenFileType, CGPassBuilderOption,
                                     PassInstrumentationCallbacks *) {
    return make_error<StringError>("buildCodeGenPipeline is not overridden",
                                   inconvertibleErrorCode());
  }

  virtual std::pair<StringRef, bool> getPassNameFromLegacyName(StringRef) {
    llvm_unreachable(
        "getPassNameFromLegacyName parseMIRPipeline is not overridden");
  }

  /// Add passes to the specified pass manager to get machine code emitted with
  /// the MCJIT. This method returns true if machine code is not supported. It
  /// fills the MCContext Ctx pointer which can be used to build custom
  /// MCStreamer.
  bool addPassesToEmitMC(PassManagerBase &PM, MCContext *&Ctx,
                         raw_pwrite_stream &Out,
                         bool DisableVerify = true) override;

  /// Returns true if the target is expected to pass all machine verifier
  /// checks. This is a stopgap measure to fix targets one by one. We will
  /// remove this at some point and always enable the verifier when
  /// EXPENSIVE_CHECKS is enabled.
  virtual bool isMachineVerifierClean() const { return true; }

  /// Adds an AsmPrinter pass to the pipeline that prints assembly or
  /// machine code from the MI representation.
  bool addAsmPrinter(PassManagerBase &PM, raw_pwrite_stream &Out,
                     raw_pwrite_stream *DwoOut, CodeGenFileType FileType,
                     MCContext &Context);

  Expected<std::unique_ptr<MCStreamer>>
  createMCStreamer(raw_pwrite_stream &Out, raw_pwrite_stream *DwoOut,
                   CodeGenFileType FileType, MCContext &Ctx);

  /// True if the target uses physical regs (as nearly all targets do). False
  /// for stack machines such as WebAssembly and other virtual-register
  /// machines. If true, all vregs must be allocated before PEI. If false, then
  /// callee-save register spilling and scavenging are not needed or used. If
  /// false, implicitly defined registers will still be assumed to be physical
  /// registers, except that variadic defs will be allocated vregs.
  virtual bool usesPhysRegsForValues() const { return true; }

  /// True if the target wants to use interprocedural register allocation by
  /// default. The -enable-ipra flag can be used to override this.
  virtual bool useIPRA() const {
    return false;
  }

  /// The default variant to use in unqualified `asm` instructions.
  /// If this returns 0, `asm "$(foo$|bar$)"` will evaluate to `asm "foo"`.
  virtual int unqualifiedInlineAsmVariant() const { return 0; }

  // MachineRegisterInfo callback function
  virtual void registerMachineRegisterInfoCallback(MachineFunction &MF) const {}
};

/// Helper method for getting the code model, returning Default if
/// CM does not have a value. The tiny and kernel models will produce
/// an error, so targets that support them or require more complex codemodel
/// selection logic should implement and call their own getEffectiveCodeModel.
inline CodeModel::Model
getEffectiveCodeModel(std::optional<CodeModel::Model> CM,
                      CodeModel::Model Default) {
  if (CM) {
    // By default, targets do not support the tiny and kernel models.
    if (*CM == CodeModel::Tiny)
      report_fatal_error("Target does not support the tiny CodeModel", false);
    if (*CM == CodeModel::Kernel)
      report_fatal_error("Target does not support the kernel CodeModel", false);
    return *CM;
  }
  return Default;
}

} // end namespace llvm

#endif // LLVM_TARGET_TARGETMACHINE_H
PKiwFZ-���S"S"Target/TargetCallingConv.tdnu�[���//===- TargetCallingConv.td - Target Calling Conventions ---*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the target-independent interfaces with which targets
// describe their calling conventions.
//
//===----------------------------------------------------------------------===//

class CCAction;
class CallingConv;

/// CCCustom - Calls a custom arg handling function.
class CCCustom<string fn> : CCAction {
  string FuncName = fn;
}

/// CCPredicateAction - Instances of this class check some predicate, then
/// delegate to another action if the predicate is true.
class CCPredicateAction<CCAction A> : CCAction {
  CCAction SubAction = A;
}

/// CCIfType - If the current argument is one of the specified types, apply
/// Action A.
class CCIfType<list<ValueType> vts, CCAction A> : CCPredicateAction<A> {
  list<ValueType> VTs = vts;
}

/// CCIf - If the predicate matches, apply A.
class CCIf<string predicate, CCAction A> : CCPredicateAction<A> {
  string Predicate = predicate;
}

/// CCIfByVal - If the current argument has ByVal parameter attribute, apply
/// Action A.
class CCIfByVal<CCAction A> : CCIf<"ArgFlags.isByVal()", A> {
}

/// CCIfPreallocated - If the current argument has Preallocated parameter attribute,
/// apply Action A.
class CCIfPreallocated<CCAction A> : CCIf<"ArgFlags.isPreallocated()", A> {
}

/// CCIfSwiftSelf - If the current argument has swiftself parameter attribute,
/// apply Action A.
class CCIfSwiftSelf<CCAction A> : CCIf<"ArgFlags.isSwiftSelf()", A> {
}

/// CCIfSwiftAsync - If the current argument has swiftasync parameter attribute,
/// apply Action A.
class CCIfSwiftAsync<CCAction A> : CCIf<"ArgFlags.isSwiftAsync()", A> {
}

/// CCIfSwiftError - If the current argument has swifterror parameter attribute,
/// apply Action A.
class CCIfSwiftError<CCAction A> : CCIf<"ArgFlags.isSwiftError()", A> {
}

/// CCIfCFGuardTarget - If the current argument has cfguardtarget parameter
/// attribute, apply Action A.
class CCIfCFGuardTarget<CCAction A> : CCIf<"ArgFlags.isCFGuardTarget()", A> {
}

/// CCIfConsecutiveRegs - If the current argument has InConsecutiveRegs
/// parameter attribute, apply Action A.
class CCIfConsecutiveRegs<CCAction A> : CCIf<"ArgFlags.isInConsecutiveRegs()", A> {
}

/// CCIfCC - Match if the current calling convention is 'CC'.
class CCIfCC<string CC, CCAction A>
  : CCIf<!strconcat("State.getCallingConv() == ", CC), A> {}

/// CCIfInReg - If this argument is marked with the 'inreg' attribute, apply
/// the specified action.
class CCIfInReg<CCAction A> : CCIf<"ArgFlags.isInReg()", A> {}

/// CCIfNest - If this argument is marked with the 'nest' attribute, apply
/// the specified action.
class CCIfNest<CCAction A> : CCIf<"ArgFlags.isNest()", A> {}

/// CCIfSplit - If this argument is marked with the 'split' attribute, apply
/// the specified action.
class CCIfSplit<CCAction A> : CCIf<"ArgFlags.isSplit()", A> {}

/// CCIfSRet - If this argument is marked with the 'sret' attribute, apply
/// the specified action.
class CCIfSRet<CCAction A> : CCIf<"ArgFlags.isSRet()", A> {}

/// CCIfVarArg - If the current function is vararg - apply the action
class CCIfVarArg<CCAction A> : CCIf<"State.isVarArg()", A> {}

/// CCIfNotVarArg - If the current function is not vararg - apply the action
class CCIfNotVarArg<CCAction A> : CCIf<"!State.isVarArg()", A> {}

/// CCIfPtrAddrSpace - If the top-level parent of the current argument has
/// pointer type in the specified address-space.
class CCIfPtrAddrSpace<int AS, CCAction A>
    : CCIf<"(ArgFlags.isPointer() && ArgFlags.getPointerAddrSpace() == " # AS # ")", A> {}

/// CCIfPtr - If the top-level parent of the current argument had
/// pointer type in some address-space.
class CCIfPtr<CCAction A> : CCIf<"ArgFlags.isPointer()", A> {}

/// CCAssignToReg - This action matches if there is a register in the specified
/// list that is still available.  If so, it assigns the value to the first
/// available register and succeeds.
class CCAssignToReg<list<Register> regList> : CCAction {
  list<Register> RegList = regList;
}

/// CCAssignToRegWithShadow - Same as CCAssignToReg, but with list of registers
/// which became shadowed, when some register is used.
class CCAssignToRegWithShadow<list<Register> regList,
                              list<Register> shadowList> : CCAction {
  list<Register> RegList = regList;
  list<Register> ShadowRegList = shadowList;
}

/// CCAssignToStack - This action always matches: it assigns the value to a
/// stack slot of the specified size and alignment on the stack.  If size is
/// zero then the ABI size is used; if align is zero then the ABI alignment
/// is used - these may depend on the target or subtarget.
class CCAssignToStack<int size, int align> : CCAction {
  int Size = size;
  int Align = align;
}

/// CCAssignToStackWithShadow - Same as CCAssignToStack, but with a list of
/// registers to be shadowed. Note that, unlike CCAssignToRegWithShadow, this
/// shadows ALL of the registers in shadowList.
class CCAssignToStackWithShadow<int size,
                                int align,
                                list<Register> shadowList> : CCAction {
  int Size = size;
  int Align = align;
  list<Register> ShadowRegList = shadowList;
}

/// CCAssignToRegAndStack - Same as CCAssignToReg, but also allocates a stack
/// slot, when some register is used. Basically, it works like:
/// CCIf<CCAssignToReg<regList>, CCAssignToStack<size, align>>.
class CCAssignToRegAndStack<list<Register> regList, int size, int align>
    : CCAssignToReg<regList> {
  int Size = size;
  int Align = align;
}

/// CCPassByVal - This action always matches: it assigns the value to a stack
/// slot to implement ByVal aggregate parameter passing. Size and alignment
/// specify the minimum size and alignment for the stack slot.
class CCPassByVal<int size, int align> : CCAction {
  int Size = size;
  int Align = align;
}

/// CCPromoteToType - If applied, this promotes the specified current value to
/// the specified type.
class CCPromoteToType<ValueType destTy> : CCAction {
  ValueType DestTy = destTy;
}

/// CCPromoteToUpperBitsInType - If applied, this promotes the specified current
/// value to the specified type and shifts the value into the upper bits.
class CCPromoteToUpperBitsInType<ValueType destTy> : CCAction {
  ValueType DestTy = destTy;
}

/// CCBitConvertToType - If applied, this bitconverts the specified current
/// value to the specified type.
class CCBitConvertToType<ValueType destTy> : CCAction {
  ValueType DestTy = destTy;
}

/// CCTruncToType - If applied, this truncates the specified current value to
/// the specified type.
class CCTruncToType<ValueType destTy> : CCAction {
  ValueType DestTy = destTy;
}

/// CCPassIndirect - If applied, this stores the value to stack and passes the pointer
/// as normal argument.
class CCPassIndirect<ValueType destTy> : CCAction {
  ValueType DestTy = destTy;
}

/// CCDelegateTo - This action invokes the specified sub-calling-convention.  It
/// is successful if the specified CC matches.
class CCDelegateTo<CallingConv cc> : CCAction {
  CallingConv CC = cc;
}

/// CallingConv - An instance of this is used to define each calling convention
/// that the target supports.
class CallingConv<list<CCAction> actions> {
  list<CCAction> Actions = actions;

  /// If true, this calling convention will be emitted as externally visible in
  /// the llvm namespaces instead of as a static function.
  bit Entry = false;

  bit Custom = false;
}

/// CustomCallingConv - An instance of this is used to declare calling
/// conventions that are implemented using a custom function of the same name.
class CustomCallingConv : CallingConv<[]> {
  let Custom = true;
}

/// CalleeSavedRegs - A list of callee saved registers for a given calling
/// convention.  The order of registers is used by PrologEpilogInsertion when
/// allocation stack slots for saved registers.
///
/// For each CalleeSavedRegs def, TableGen will emit a FOO_SaveList array for
/// returning from getCalleeSavedRegs(), and a FOO_RegMask bit mask suitable for
/// returning from getCallPreservedMask().
class CalleeSavedRegs<dag saves> {
  dag SaveList = saves;

  // Registers that are also preserved across function calls, but should not be
  // included in the generated FOO_SaveList array. These registers will be
  // included in the FOO_RegMask bit mask. This can be used for registers that
  // are saved automatically, like the SPARC register windows.
  dag OtherPreserved;
}
PKiwFZG��q�-�-!Target/TargetLoweringObjectFile.hnu�[���//===-- llvm/Target/TargetLoweringObjectFile.h - Object Info ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements classes used to handle lowerings specific to common
// object file formats.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TARGET_TARGETLOWERINGOBJECTFILE_H
#define LLVM_TARGET_TARGETLOWERINGOBJECTFILE_H

#include "llvm/MC/MCObjectFileInfo.h"
#include "llvm/MC/MCRegister.h"
#include <cstdint>

namespace llvm {

struct Align;
class Constant;
class DataLayout;
class Function;
class GlobalObject;
class GlobalValue;
class MachineBasicBlock;
class MachineModuleInfo;
class Mangler;
class MCContext;
class MCExpr;
class MCSection;
class MCSymbol;
class MCSymbolRefExpr;
class MCStreamer;
class MCValue;
class Module;
class SectionKind;
class StringRef;
class TargetMachine;
class DSOLocalEquivalent;

class TargetLoweringObjectFile : public MCObjectFileInfo {
  /// Name-mangler for global names.
  Mangler *Mang = nullptr;

protected:
  bool SupportIndirectSymViaGOTPCRel = false;
  bool SupportGOTPCRelWithOffset = true;
  bool SupportDebugThreadLocalLocation = true;
  bool SupportDSOLocalEquivalentLowering = false;

  /// PersonalityEncoding, LSDAEncoding, TTypeEncoding - Some encoding values
  /// for EH.
  unsigned PersonalityEncoding = 0;
  unsigned LSDAEncoding = 0;
  unsigned TTypeEncoding = 0;
  unsigned CallSiteEncoding = 0;

  /// This section contains the static constructor pointer list.
  MCSection *StaticCtorSection = nullptr;

  /// This section contains the static destructor pointer list.
  MCSection *StaticDtorSection = nullptr;

  const TargetMachine *TM = nullptr;

public:
  TargetLoweringObjectFile() = default;
  TargetLoweringObjectFile(const TargetLoweringObjectFile &) = delete;
  TargetLoweringObjectFile &
  operator=(const TargetLoweringObjectFile &) = delete;
  virtual ~TargetLoweringObjectFile();

  Mangler &getMangler() const { return *Mang; }

  /// This method must be called before any actual lowering is done.  This
  /// specifies the current context for codegen, and gives the lowering
  /// implementations a chance to set up their default sections.
  virtual void Initialize(MCContext &ctx, const TargetMachine &TM);

  virtual void emitPersonalityValue(MCStreamer &Streamer, const DataLayout &TM,
                                    const MCSymbol *Sym) const;

  /// Emit the module-level metadata that the platform cares about.
  virtual void emitModuleMetadata(MCStreamer &Streamer, Module &M) const {}

  /// Emit Call Graph Profile metadata.
  void emitCGProfileMetadata(MCStreamer &Streamer, Module &M) const;

  /// Get the module-level metadata that the platform cares about.
  virtual void getModuleMetadata(Module &M) {}

  /// Given a constant with the SectionKind, return a section that it should be
  /// placed in.
  virtual MCSection *getSectionForConstant(const DataLayout &DL,
                                           SectionKind Kind, const Constant *C,
                                           Align &Alignment) const;

  virtual MCSection *
  getSectionForMachineBasicBlock(const Function &F,
                                 const MachineBasicBlock &MBB,
                                 const TargetMachine &TM) const;

  virtual MCSection *
  getUniqueSectionForFunction(const Function &F,
                              const TargetMachine &TM) const;

  /// Classify the specified global variable into a set of target independent
  /// categories embodied in SectionKind.
  static SectionKind getKindForGlobal(const GlobalObject *GO,
                                      const TargetMachine &TM);

  /// This method computes the appropriate section to emit the specified global
  /// variable or function definition. This should not be passed external (or
  /// available externally) globals.
  MCSection *SectionForGlobal(const GlobalObject *GO, SectionKind Kind,
                              const TargetMachine &TM) const;

  /// This method computes the appropriate section to emit the specified global
  /// variable or function definition. This should not be passed external (or
  /// available externally) globals.
  MCSection *SectionForGlobal(const GlobalObject *GO,
                              const TargetMachine &TM) const;

  virtual void getNameWithPrefix(SmallVectorImpl<char> &OutName,
                                 const GlobalValue *GV,
                                 const TargetMachine &TM) const;

  virtual MCSection *getSectionForJumpTable(const Function &F,
                                            const TargetMachine &TM) const;
  virtual MCSection *getSectionForLSDA(const Function &, const MCSymbol &,
                                       const TargetMachine &) const {
    return LSDASection;
  }

  virtual bool shouldPutJumpTableInFunctionSection(bool UsesLabelDifference,
                                                   const Function &F) const;

  /// Targets should implement this method to assign a section to globals with
  /// an explicit section specfied. The implementation of this method can
  /// assume that GO->hasSection() is true.
  virtual MCSection *
  getExplicitSectionGlobal(const GlobalObject *GO, SectionKind Kind,
                           const TargetMachine &TM) const = 0;

  /// Return an MCExpr to use for a reference to the specified global variable
  /// from exception handling information.
  virtual const MCExpr *getTTypeGlobalReference(const GlobalValue *GV,
                                                unsigned Encoding,
                                                const TargetMachine &TM,
                                                MachineModuleInfo *MMI,
                                                MCStreamer &Streamer) const;

  /// Return the MCSymbol for a private symbol with global value name as its
  /// base, with the specified suffix.
  MCSymbol *getSymbolWithGlobalValueBase(const GlobalValue *GV,
                                         StringRef Suffix,
                                         const TargetMachine &TM) const;

  // The symbol that gets passed to .cfi_personality.
  virtual MCSymbol *getCFIPersonalitySymbol(const GlobalValue *GV,
                                            const TargetMachine &TM,
                                            MachineModuleInfo *MMI) const;

  unsigned getPersonalityEncoding() const { return PersonalityEncoding; }
  unsigned getLSDAEncoding() const { return LSDAEncoding; }
  unsigned getTTypeEncoding() const { return TTypeEncoding; }
  unsigned getCallSiteEncoding() const;

  const MCExpr *getTTypeReference(const MCSymbolRefExpr *Sym, unsigned Encoding,
                                  MCStreamer &Streamer) const;

  virtual MCSection *getStaticCtorSection(unsigned Priority,
                                          const MCSymbol *KeySym) const {
    return StaticCtorSection;
  }

  virtual MCSection *getStaticDtorSection(unsigned Priority,
                                          const MCSymbol *KeySym) const {
    return StaticDtorSection;
  }

  /// Create a symbol reference to describe the given TLS variable when
  /// emitting the address in debug info.
  virtual const MCExpr *getDebugThreadLocalSymbol(const MCSymbol *Sym) const;

  virtual const MCExpr *lowerRelativeReference(const GlobalValue *LHS,
                                               const GlobalValue *RHS,
                                               const TargetMachine &TM) const {
    return nullptr;
  }

  /// Target supports a native lowering of a dso_local_equivalent constant
  /// without needing to replace it with equivalent IR.
  bool supportDSOLocalEquivalentLowering() const {
    return SupportDSOLocalEquivalentLowering;
  }

  virtual const MCExpr *lowerDSOLocalEquivalent(const DSOLocalEquivalent *Equiv,
                                                const TargetMachine &TM) const {
    return nullptr;
  }

  /// Target supports replacing a data "PC"-relative access to a symbol
  /// through another symbol, by accessing the later via a GOT entry instead?
  bool supportIndirectSymViaGOTPCRel() const {
    return SupportIndirectSymViaGOTPCRel;
  }

  /// Target GOT "PC"-relative relocation supports encoding an additional
  /// binary expression with an offset?
  bool supportGOTPCRelWithOffset() const {
    return SupportGOTPCRelWithOffset;
  }

  /// Target supports TLS offset relocation in debug section?
  bool supportDebugThreadLocalLocation() const {
    return SupportDebugThreadLocalLocation;
  }

  /// Returns the register used as static base in RWPI variants.
  virtual MCRegister getStaticBase() const { return MCRegister::NoRegister; }

  /// Get the target specific RWPI relocation.
  virtual const MCExpr *getIndirectSymViaRWPI(const MCSymbol *Sym) const {
    return nullptr;
  }

  /// Get the target specific PC relative GOT entry relocation
  virtual const MCExpr *getIndirectSymViaGOTPCRel(const GlobalValue *GV,
                                                  const MCSymbol *Sym,
                                                  const MCValue &MV,
                                                  int64_t Offset,
                                                  MachineModuleInfo *MMI,
                                                  MCStreamer &Streamer) const {
    return nullptr;
  }

  /// If supported, return the section to use for the llvm.commandline
  /// metadata. Otherwise, return nullptr.
  virtual MCSection *getSectionForCommandLines() const {
    return nullptr;
  }

  /// On targets that use separate function descriptor symbols, return a section
  /// for the descriptor given its symbol. Use only with defined functions.
  virtual MCSection *
  getSectionForFunctionDescriptor(const Function *F,
                                  const TargetMachine &TM) const {
    return nullptr;
  }

  /// On targets that support TOC entries, return a section for the entry given
  /// the symbol it refers to.
  /// TODO: Implement this interface for existing ELF targets.
  virtual MCSection *getSectionForTOCEntry(const MCSymbol *S,
                                           const TargetMachine &TM) const {
    return nullptr;
  }

  /// On targets that associate external references with a section, return such
  /// a section for the given external global.
  virtual MCSection *
  getSectionForExternalReference(const GlobalObject *GO,
                                 const TargetMachine &TM) const {
    return nullptr;
  }

  /// Targets that have a special convention for their symbols could use
  /// this hook to return a specialized symbol.
  virtual MCSymbol *getTargetSymbol(const GlobalValue *GV,
                                    const TargetMachine &TM) const {
    return nullptr;
  }

  /// If supported, return the function entry point symbol.
  /// Otherwise, returns nullptr.
  /// Func must be a function or an alias which has a function as base object.
  virtual MCSymbol *getFunctionEntryPointSymbol(const GlobalValue *Func,
                                                const TargetMachine &TM) const {
    return nullptr;
  }

protected:
  virtual MCSection *SelectSectionForGlobal(const GlobalObject *GO,
                                            SectionKind Kind,
                                            const TargetMachine &TM) const = 0;
};

} // end namespace llvm

#endif // LLVM_TARGET_TARGETLOWERINGOBJECTFILE_H
PKiwFZ��LLTarget/CodeGenCWrappers.hnu�[���//===- llvm/Target/CodeGenCWrappers.h - CodeGen C Wrappers ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines C bindings wrappers for enums in llvm/Support/CodeGen.h
// that need them.  The wrappers are separated to avoid adding an indirect
// dependency on llvm/Config/Targets.def to CodeGen.h.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TARGET_CODEGENCWRAPPERS_H
#define LLVM_TARGET_CODEGENCWRAPPERS_H

#include "llvm-c/TargetMachine.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/ErrorHandling.h"
#include <optional>

namespace llvm {

inline std::optional<CodeModel::Model> unwrap(LLVMCodeModel Model, bool &JIT) {
  JIT = false;
  switch (Model) {
  case LLVMCodeModelJITDefault:
    JIT = true;
    [[fallthrough]];
  case LLVMCodeModelDefault:
    return std::nullopt;
  case LLVMCodeModelTiny:
    return CodeModel::Tiny;
  case LLVMCodeModelSmall:
    return CodeModel::Small;
  case LLVMCodeModelKernel:
    return CodeModel::Kernel;
  case LLVMCodeModelMedium:
    return CodeModel::Medium;
  case LLVMCodeModelLarge:
    return CodeModel::Large;
  }
  return CodeModel::Small;
}

inline LLVMCodeModel wrap(CodeModel::Model Model) {
  switch (Model) {
  case CodeModel::Tiny:
    return LLVMCodeModelTiny;
  case CodeModel::Small:
    return LLVMCodeModelSmall;
  case CodeModel::Kernel:
    return LLVMCodeModelKernel;
  case CodeModel::Medium:
    return LLVMCodeModelMedium;
  case CodeModel::Large:
    return LLVMCodeModelLarge;
  }
  llvm_unreachable("Bad CodeModel!");
}
} // namespace llvm

#endif
PKiwFZ "i�;;Target/TargetItinerary.tdnu�[���//===- TargetItinerary.td - Target Itinerary Description --*- tablegen -*-====//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the target-independent scheduling interfaces
// which should be implemented by each target that uses instruction
// itineraries for scheduling. Itineraries are detailed reservation
// tables for each instruction class. They are most appropriate for
// in-order machine with complicated scheduling or bundling constraints.
//
//===----------------------------------------------------------------------===//

//===----------------------------------------------------------------------===//
// Processor functional unit - These values represent the function units
// available across all chip sets for the target.  Eg., IntUnit, FPUnit, ...
// These may be independent values for each chip set or may be shared across
// all chip sets of the target.  Each functional unit is treated as a resource
// during scheduling and has an affect instruction order based on availability
// during a time interval.
//
class FuncUnit;

//===----------------------------------------------------------------------===//
// Pipeline bypass / forwarding - These values specifies the symbolic names of
// pipeline bypasses which can be used to forward results of instructions
// that are forwarded to uses.
class Bypass;
def NoBypass : Bypass;

class ReservationKind<bits<1> val> {
  int Value = val;
}

def Required : ReservationKind<0>;
def Reserved : ReservationKind<1>;

//===----------------------------------------------------------------------===//
// Instruction stage - These values represent a non-pipelined step in
// the execution of an instruction.  Cycles represents the number of
// discrete time slots needed to complete the stage.  Units represent
// the choice of functional units that can be used to complete the
// stage.  Eg. IntUnit1, IntUnit2. TimeInc indicates how many cycles
// should elapse from the start of this stage to the start of the next
// stage in the itinerary.  For example:
//
// A stage is specified in one of two ways:
//
//   InstrStage<1, [FU_x, FU_y]>     - TimeInc defaults to Cycles
//   InstrStage<1, [FU_x, FU_y], 0>  - TimeInc explicit
//

class InstrStage<int cycles, list<FuncUnit> units,
                 int timeinc = -1,
                 ReservationKind kind = Required> {
  int Cycles          = cycles;       // length of stage in machine cycles
  list<FuncUnit> Units = units;       // choice of functional units
  int TimeInc         = timeinc;      // cycles till start of next stage
  int Kind            = kind.Value;   // kind of FU reservation
}

//===----------------------------------------------------------------------===//
// Instruction itinerary - An itinerary represents a sequential series of steps
// required to complete an instruction.  Itineraries are represented as lists of
// instruction stages.
//

//===----------------------------------------------------------------------===//
// Instruction itinerary classes - These values represent 'named' instruction
// itinerary.  Using named itineraries simplifies managing groups of
// instructions across chip sets.  An instruction uses the same itinerary class
// across all chip sets.  Thus a new chip set can be added without modifying
// instruction information.
//
class InstrItinClass;
def NoItinerary : InstrItinClass;

//===----------------------------------------------------------------------===//
// Instruction itinerary data - These values provide a runtime map of an
// instruction itinerary class (name) to its itinerary data.
//
// NumMicroOps represents the number of micro-operations that each instruction
// in the class are decoded to. If the number is zero, then it means the
// instruction can decode into variable number of micro-ops and it must be
// determined dynamically. This directly relates to the itineraries
// global IssueWidth property, which constrains the number of microops
// that can issue per cycle.
//
// OperandCycles are optional "cycle counts". They specify the cycle after
// instruction issue the values which correspond to specific operand indices
// are defined or read. Bypasses are optional "pipeline forwarding paths", if
// a def by an instruction is available on a specific bypass and the use can
// read from the same bypass, then the operand use latency is reduced by one.
//
//  InstrItinData<IIC_iLoad_i , [InstrStage<1, [A9_Pipe1]>,
//                               InstrStage<1, [A9_AGU]>],
//                              [3, 1], [A9_LdBypass]>,
//  InstrItinData<IIC_iMVNr   , [InstrStage<1, [A9_Pipe0, A9_Pipe1]>],
//                              [1, 1], [NoBypass, A9_LdBypass]>,
//
// In this example, the instruction of IIC_iLoadi reads its input on cycle 1
// (after issue) and the result of the load is available on cycle 3. The result
// is available via forwarding path A9_LdBypass. If it's used by the first
// source operand of instructions of IIC_iMVNr class, then the operand latency
// is reduced by 1.
class InstrItinData<InstrItinClass Class, list<InstrStage> stages,
                    list<int> operandcycles = [],
                    list<Bypass> bypasses = [], int uops = 1> {
  InstrItinClass TheClass = Class;
  int NumMicroOps = uops;
  list<InstrStage> Stages = stages;
  list<int> OperandCycles = operandcycles;
  list<Bypass> Bypasses = bypasses;
}

//===----------------------------------------------------------------------===//
// Processor itineraries - These values represent the set of all itinerary
// classes for a given chip set.
//
// Set property values to -1 to use the default.
// See InstrItineraryProps for comments and defaults.
class ProcessorItineraries<list<FuncUnit> fu, list<Bypass> bp,
                           list<InstrItinData> iid> {
  list<FuncUnit> FU = fu;
  list<Bypass> BP = bp;
  list<InstrItinData> IID = iid;
  // The packetizer automaton to use for this itinerary. By default all
  // itineraries for a target are bundled up into the same automaton. This only
  // works correctly when there are no conflicts in functional unit IDs between
  // itineraries. For example, given two itineraries A<[SLOT_A]>, B<[SLOT_B]>,
  // SLOT_A and SLOT_B will be assigned the same functional unit index, and
  // the generated packetizer will confuse instructions referencing these slots.
  //
  // To avoid this, setting PacketizerNamespace to non-"" will cause this
  // itinerary to be generated in a different automaton. The subtarget will need
  // to declare a method "create##Namespace##DFAPacketizer()".
  string PacketizerNamespace = "";
}

// NoItineraries - A marker that can be used by processors without schedule
// info. Subtargets using NoItineraries can bypass the scheduler's
// expensive HazardRecognizer because no reservation table is needed.
def NoItineraries : ProcessorItineraries<[], [], []>;

//===----------------------------------------------------------------------===//
// Combo Function Unit data - This is a map of combo function unit names to
// the list of functional units that are included in the combination.
//
class ComboFuncData<FuncUnit ComboFunc, list<FuncUnit> funclist> {
  FuncUnit TheComboFunc = ComboFunc;
  list<FuncUnit> FuncList = funclist;
}

//===----------------------------------------------------------------------===//
// Combo Function Units - This is a list of all combo function unit data.
class ComboFuncUnits<list<ComboFuncData> cfd> {
  list<ComboFuncData> CFD = cfd;
}

PKiwFZ��^�;;#Frontend/Directive/DirectiveBase.tdnu�[���//===-- DirectiveBase.td - Base directive definition file --*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This is the base definition file directives and clauses.
//
//===----------------------------------------------------------------------===//


// General information about the directive language.
class DirectiveLanguage {
  // Name of the directive language such as omp or acc.
  string name = ?;

  // The C++ namespace that code of this directive language should be placed
  // into. This namespace is nested in llvm namespace.
  //
  // By default, uses the name of the directive language as the only namespace.
  // To avoid placing in any namespace, use "". To specify nested namespaces,
  // use "::" as the delimiter, e.g., given "A::B", ops will be placed in
  // `namespace A { namespace B { <directives-clauses> } }`.
  string cppNamespace = name;

  // Optional prefix used for the generation of the enumerator in the Directive
  // enum.
  string directivePrefix = "";

  // Optional prefix used for the generation of the enumerator in the Clause
  // enum.
  string clausePrefix = "";

  // Make the enum values available in the namespace. This allows us to
  // write something like Enum_X if we have a `using namespace cppNamespace`.
  bit makeEnumAvailableInNamespace = false;

  // Generate include and macro to enable LLVM BitmaskEnum.
  bit enableBitmaskEnumInNamespace = false;

  // Header file included in the implementation code generated. Ususally the
  // output file of the declaration code generation. Can be left blank.
  string includeHeader = "";

  // EnumSet class name used for clauses to generated the allowed clauses map.
  string clauseEnumSetClass = "";

  // Class holding the clauses in the flang parse-tree.
  string flangClauseBaseClass = "";
}

// Information about values accepted by enum-like clauses
class ClauseVal<string n, int v, bit uv> {
  // Name of the clause value.
  string name = n;

  // Integer value of the clause.
  int value = v;

  // Can user specify this value?
  bit isUserValue = uv;

  // Set clause value used by default when unknown.
  bit isDefault = false;
}

// Information about a specific clause.
class Clause<string c> {
  // Name of the clause.
  string name = c;

  // Define an alternative name return in get<LanguageName>ClauseName function.
  string alternativeName = "";

  // Define aliases used in the parser.
  list<string> aliases = [];

  // Optional class holding value of the clause in clang AST.
  string clangClass = "";

  // Optional class holding value of the clause in flang AST.
  string flangClass = "";

  // If set to true, value is optional. Not optional by default.
  bit isValueOptional = false;

  // Name of enum when there is a list of allowed clause values.
  string enumClauseValue = "";

  // List of allowed clause values
  list<ClauseVal> allowedClauseValues = [];

  // If set to true, value class is part of a list. Single class by default.
  bit isValueList = false;

  // Define a default value such as "*".
  string defaultValue = "";

  // Is clause implicit? If clause is set as implicit, the default kind will
  // be return in get<LanguageName>ClauseKind instead of their own kind.
  bit isImplicit = false;

  // Set clause used by default when unknown. Function returning the kind
  // of enumeration will use this clause as the default.
  bit isDefault = false;

  // Prefix before the actual value. Used in the parser generation.
  // `clause(prefix: value)`
  string prefix = "";

  // Set the prefix as optional.
  // `clause([prefix]: value)`
  bit isPrefixOptional = true;
}

// Hold information about clause validity by version.
class VersionedClause<Clause c, int min = 1, int max = 0x7FFFFFFF> {
  // Actual clause.
  Clause clause = c;

  // Mininum version number where this clause is valid.
  int minVersion = min;

  // Maximum version number where this clause is valid.
  int maxVersion = max;
}

// Information about a specific directive.
class Directive<string d> {
  // Name of the directive. Can be composite directive sepearted by whitespace.
  string name = d;

  // Define an alternative name return in get<LanguageName>DirectiveName
  // function.
  string alternativeName = "";

  // Clauses cannot appear twice in the three allowed lists below. Also, since
  // required implies allowed, the same clause cannot appear in both the
  // allowedClauses and requiredClauses lists.

  // List of allowed clauses for the directive.
  list<VersionedClause> allowedClauses = [];

  // List of clauses that are allowed to appear only once.
  list<VersionedClause> allowedOnceClauses = [];

  // List of clauses that are allowed but mutually exclusive.
  list<VersionedClause> allowedExclusiveClauses = [];

  // List of clauses that are required.
  list<VersionedClause> requiredClauses = [];

  // Set directive used by default when unknown.
  bit isDefault = false;
}
PKiwFZ>9ssFrontend/HLSL/HLSLResource.hnu�[���//===- HLSLResource.h - HLSL Resource helper objects ----------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file This file contains helper objects for working with HLSL Resources.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_FRONTEND_HLSL_HLSLRESOURCE_H
#define LLVM_FRONTEND_HLSL_HLSLRESOURCE_H

#include "llvm/IR/Metadata.h"

namespace llvm {
class GlobalVariable;

namespace hlsl {

enum class ResourceClass : uint8_t {
  SRV = 0,
  UAV,
  CBuffer,
  Sampler,
  Invalid,
  NumClasses = Invalid,
};

// The value ordering of this enumeration is part of the DXIL ABI. Elements
// can only be added to the end, and not removed.
enum class ResourceKind : uint32_t {
  Invalid = 0,
  Texture1D,
  Texture2D,
  Texture2DMS,
  Texture3D,
  TextureCube,
  Texture1DArray,
  Texture2DArray,
  Texture2DMSArray,
  TextureCubeArray,
  TypedBuffer,
  RawBuffer,
  StructuredBuffer,
  CBuffer,
  Sampler,
  TBuffer,
  RTAccelerationStructure,
  FeedbackTexture2D,
  FeedbackTexture2DArray,
  NumEntries,
};

class FrontendResource {
  MDNode *Entry;

public:
  FrontendResource(MDNode *E) : Entry(E) {
    assert(Entry->getNumOperands() == 5 && "Unexpected metadata shape");
  }

  FrontendResource(GlobalVariable *GV, StringRef TypeStr, ResourceKind RK,
                   uint32_t ResIndex, uint32_t Space);

  GlobalVariable *getGlobalVariable();
  StringRef getSourceType();
  uint32_t getResourceKind();
  uint32_t getResourceIndex();
  uint32_t getSpace();
  MDNode *getMetadata() { return Entry; }
};
} // namespace hlsl
} // namespace llvm

#endif // LLVM_FRONTEND_HLSL_HLSLRESOURCE_H
PKiwFZ���Frontend/OpenACC/ACC.incnu�[���#ifdef GEN_FLANG_DIRECTIVE_CLAUSE_SETS
#undef GEN_FLANG_DIRECTIVE_CLAUSE_SETS

namespace llvm {
namespace acc {

  // Sets for atomic

  static AccClauseSet allowedClauses_ACCD_atomic {
  };

  static AccClauseSet allowedOnceClauses_ACCD_atomic {
  };

  static AccClauseSet allowedExclusiveClauses_ACCD_atomic {
  };

  static AccClauseSet requiredClauses_ACCD_atomic {
  };

  // Sets for cache

  static AccClauseSet allowedClauses_ACCD_cache {
  };

  static AccClauseSet allowedOnceClauses_ACCD_cache {
  };

  static AccClauseSet allowedExclusiveClauses_ACCD_cache {
  };

  static AccClauseSet requiredClauses_ACCD_cache {
  };

  // Sets for data

  static AccClauseSet allowedClauses_ACCD_data {
    llvm::acc::Clause::ACCC_device_type,
    llvm::acc::Clause::ACCC_wait,
  };

  static AccClauseSet allowedOnceClauses_ACCD_data {
    llvm::acc::Clause::ACCC_async,
    llvm::acc::Clause::ACCC_if,
    llvm::acc::Clause::ACCC_default,
  };

  static AccClauseSet allowedExclusiveClauses_ACCD_data {
  };

  static AccClauseSet requiredClauses_ACCD_data {
    llvm::acc::Clause::ACCC_attach,
    llvm::acc::Clause::ACCC_copy,
    llvm::acc::Clause::ACCC_copyin,
    llvm::acc::Clause::ACCC_copyout,
    llvm::acc::Clause::ACCC_create,
    llvm::acc::Clause::ACCC_default,
    llvm::acc::Clause::ACCC_deviceptr,
    llvm::acc::Clause::ACCC_no_create,
    llvm::acc::Clause::ACCC_present,
  };

  // Sets for declare

  static AccClauseSet allowedClauses_ACCD_declare {
    llvm::acc::Clause::ACCC_copy,
    llvm::acc::Clause::ACCC_copyin,
    llvm::acc::Clause::ACCC_copyout,
    llvm::acc::Clause::ACCC_create,
    llvm::acc::Clause::ACCC_present,
    llvm::acc::Clause::ACCC_deviceptr,
    llvm::acc::Clause::ACCC_device_resident,
    llvm::acc::Clause::ACCC_link,
  };

  static AccClauseSet allowedOnceClauses_ACCD_declare {
  };

  static AccClauseSet allowedExclusiveClauses_ACCD_declare {
  };

  static AccClauseSet requiredClauses_ACCD_declare {
  };

  // Sets for enter data

  static AccClauseSet allowedClauses_ACCD_enter_data {
    llvm::acc::Clause::ACCC_wait,
  };

  static AccClauseSet allowedOnceClauses_ACCD_enter_data {
    llvm::acc::Clause::ACCC_async,
    llvm::acc::Clause::ACCC_if,
  };

  static AccClauseSet allowedExclusiveClauses_ACCD_enter_data {
  };

  static AccClauseSet requiredClauses_ACCD_enter_data {
    llvm::acc::Clause::ACCC_attach,
    llvm::acc::Clause::ACCC_create,
    llvm::acc::Clause::ACCC_copyin,
  };

  // Sets for exit data

  static AccClauseSet allowedClauses_ACCD_exit_data {
    llvm::acc::Clause::ACCC_wait,
  };

  static AccClauseSet allowedOnceClauses_ACCD_exit_data {
    llvm::acc::Clause::ACCC_async,
    llvm::acc::Clause::ACCC_if,
    llvm::acc::Clause::ACCC_finalize,
  };

  static AccClauseSet allowedExclusiveClauses_ACCD_exit_data {
  };

  static AccClauseSet requiredClauses_ACCD_exit_data {
    llvm::acc::Clause::ACCC_copyout,
    llvm::acc::Clause::ACCC_delete,
    llvm::acc::Clause::ACCC_detach,
  };

  // Sets for host_data

  static AccClauseSet allowedClauses_ACCD_host_data {
  };

  static AccClauseSet allowedOnceClauses_ACCD_host_data {
    llvm::acc::Clause::ACCC_if,
    llvm::acc::Clause::ACCC_if_present,
  };

  static AccClauseSet allowedExclusiveClauses_ACCD_host_data {
  };

  static AccClauseSet requiredClauses_ACCD_host_data {
    llvm::acc::Clause::ACCC_use_device,
  };

  // Sets for init

  static AccClauseSet allowedClauses_ACCD_init {
  };

  static AccClauseSet allowedOnceClauses_ACCD_init {
    llvm::acc::Clause::ACCC_device_num,
    llvm::acc::Clause::ACCC_device_type,
    llvm::acc::Clause::ACCC_if,
  };

  static AccClauseSet allowedExclusiveClauses_ACCD_init {
  };

  static AccClauseSet requiredClauses_ACCD_init {
  };

  // Sets for kernels

  static AccClauseSet allowedClauses_ACCD_kernels {
    llvm::acc::Clause::ACCC_attach,
    llvm::acc::Clause::ACCC_copy,
    llvm::acc::Clause::ACCC_copyin,
    llvm::acc::Clause::ACCC_copyout,
    llvm::acc::Clause::ACCC_create,
    llvm::acc::Clause::ACCC_device_type,
    llvm::acc::Clause::ACCC_no_create,
    llvm::acc::Clause::ACCC_present,
    llvm::acc::Clause::ACCC_deviceptr,
    llvm::acc::Clause::ACCC_wait,
  };

  static AccClauseSet allowedOnceClauses_ACCD_kernels {
    llvm::acc::Clause::ACCC_async,
    llvm::acc::Clause::ACCC_default,
    llvm::acc::Clause::ACCC_if,
    llvm::acc::Clause::ACCC_num_gangs,
    llvm::acc::Clause::ACCC_num_workers,
    llvm::acc::Clause::ACCC_self,
    llvm::acc::Clause::ACCC_vector_length,
  };

  static AccClauseSet allowedExclusiveClauses_ACCD_kernels {
  };

  static AccClauseSet requiredClauses_ACCD_kernels {
  };

  // Sets for kernels loop

  static AccClauseSet allowedClauses_ACCD_kernels_loop {
    llvm::acc::Clause::ACCC_copy,
    llvm::acc::Clause::ACCC_copyin,
    llvm::acc::Clause::ACCC_copyout,
    llvm::acc::Clause::ACCC_create,
    llvm::acc::Clause::ACCC_device_type,
    llvm::acc::Clause::ACCC_no_create,
    llvm::acc::Clause::ACCC_present,
    llvm::acc::Clause::ACCC_private,
    llvm::acc::Clause::ACCC_reduction,
    llvm::acc::Clause::ACCC_deviceptr,
    llvm::acc::Clause::ACCC_attach,
    llvm::acc::Clause::ACCC_wait,
  };

  static AccClauseSet allowedOnceClauses_ACCD_kernels_loop {
    llvm::acc::Clause::ACCC_async,
    llvm::acc::Clause::ACCC_collapse,
    llvm::acc::Clause::ACCC_default,
    llvm::acc::Clause::ACCC_gang,
    llvm::acc::Clause::ACCC_if,
    llvm::acc::Clause::ACCC_num_gangs,
    llvm::acc::Clause::ACCC_num_workers,
    llvm::acc::Clause::ACCC_self,
    llvm::acc::Clause::ACCC_tile,
    llvm::acc::Clause::ACCC_vector,
    llvm::acc::Clause::ACCC_vector_length,
    llvm::acc::Clause::ACCC_worker,
  };

  static AccClauseSet allowedExclusiveClauses_ACCD_kernels_loop {
    llvm::acc::Clause::ACCC_auto,
    llvm::acc::Clause::ACCC_independent,
    llvm::acc::Clause::ACCC_seq,
  };

  static AccClauseSet requiredClauses_ACCD_kernels_loop {
  };

  // Sets for loop

  static AccClauseSet allowedClauses_ACCD_loop {
    llvm::acc::Clause::ACCC_device_type,
    llvm::acc::Clause::ACCC_private,
    llvm::acc::Clause::ACCC_reduction,
  };

  static AccClauseSet allowedOnceClauses_ACCD_loop {
    llvm::acc::Clause::ACCC_collapse,
    llvm::acc::Clause::ACCC_gang,
    llvm::acc::Clause::ACCC_tile,
    llvm::acc::Clause::ACCC_vector,
    llvm::acc::Clause::ACCC_worker,
  };

  static AccClauseSet allowedExclusiveClauses_ACCD_loop {
    llvm::acc::Clause::ACCC_auto,
    llvm::acc::Clause::ACCC_independent,
    llvm::acc::Clause::ACCC_seq,
  };

  static AccClauseSet requiredClauses_ACCD_loop {
  };

  // Sets for parallel

  static AccClauseSet allowedClauses_ACCD_parallel {
    llvm::acc::Clause::ACCC_attach,
    llvm::acc::Clause::ACCC_copy,
    llvm::acc::Clause::ACCC_copyin,
    llvm::acc::Clause::ACCC_copyout,
    llvm::acc::Clause::ACCC_create,
    llvm::acc::Clause::ACCC_deviceptr,
    llvm::acc::Clause::ACCC_device_type,
    llvm::acc::Clause::ACCC_no_create,
    llvm::acc::Clause::ACCC_present,
    llvm::acc::Clause::ACCC_private,
    llvm::acc::Clause::ACCC_firstprivate,
    llvm::acc::Clause::ACCC_reduction,
    llvm::acc::Clause::ACCC_wait,
  };

  static AccClauseSet allowedOnceClauses_ACCD_parallel {
    llvm::acc::Clause::ACCC_async,
    llvm::acc::Clause::ACCC_default,
    llvm::acc::Clause::ACCC_if,
    llvm::acc::Clause::ACCC_num_gangs,
    llvm::acc::Clause::ACCC_num_workers,
    llvm::acc::Clause::ACCC_self,
    llvm::acc::Clause::ACCC_vector_length,
  };

  static AccClauseSet allowedExclusiveClauses_ACCD_parallel {
  };

  static AccClauseSet requiredClauses_ACCD_parallel {
  };

  // Sets for parallel loop

  static AccClauseSet allowedClauses_ACCD_parallel_loop {
    llvm::acc::Clause::ACCC_attach,
    llvm::acc::Clause::ACCC_copy,
    llvm::acc::Clause::ACCC_copyin,
    llvm::acc::Clause::ACCC_copyout,
    llvm::acc::Clause::ACCC_create,
    llvm::acc::Clause::ACCC_deviceptr,
    llvm::acc::Clause::ACCC_device_type,
    llvm::acc::Clause::ACCC_firstprivate,
    llvm::acc::Clause::ACCC_no_create,
    llvm::acc::Clause::ACCC_present,
    llvm::acc::Clause::ACCC_private,
    llvm::acc::Clause::ACCC_reduction,
    llvm::acc::Clause::ACCC_tile,
    llvm::acc::Clause::ACCC_wait,
  };

  static AccClauseSet allowedOnceClauses_ACCD_parallel_loop {
    llvm::acc::Clause::ACCC_async,
    llvm::acc::Clause::ACCC_collapse,
    llvm::acc::Clause::ACCC_default,
    llvm::acc::Clause::ACCC_gang,
    llvm::acc::Clause::ACCC_if,
    llvm::acc::Clause::ACCC_num_gangs,
    llvm::acc::Clause::ACCC_num_workers,
    llvm::acc::Clause::ACCC_self,
    llvm::acc::Clause::ACCC_vector,
    llvm::acc::Clause::ACCC_vector_length,
    llvm::acc::Clause::ACCC_worker,
  };

  static AccClauseSet allowedExclusiveClauses_ACCD_parallel_loop {
    llvm::acc::Clause::ACCC_auto,
    llvm::acc::Clause::ACCC_independent,
    llvm::acc::Clause::ACCC_seq,
  };

  static AccClauseSet requiredClauses_ACCD_parallel_loop {
  };

  // Sets for routine

  static AccClauseSet allowedClauses_ACCD_routine {
  };

  static AccClauseSet allowedOnceClauses_ACCD_routine {
    llvm::acc::Clause::ACCC_bind,
    llvm::acc::Clause::ACCC_device_type,
    llvm::acc::Clause::ACCC_nohost,
    llvm::acc::Clause::ACCC_gang,
    llvm::acc::Clause::ACCC_seq,
    llvm::acc::Clause::ACCC_vector,
    llvm::acc::Clause::ACCC_worker,
  };

  static AccClauseSet allowedExclusiveClauses_ACCD_routine {
  };

  static AccClauseSet requiredClauses_ACCD_routine {
  };

  // Sets for serial

  static AccClauseSet allowedClauses_ACCD_serial {
    llvm::acc::Clause::ACCC_attach,
    llvm::acc::Clause::ACCC_copy,
    llvm::acc::Clause::ACCC_copyin,
    llvm::acc::Clause::ACCC_copyout,
    llvm::acc::Clause::ACCC_create,
    llvm::acc::Clause::ACCC_deviceptr,
    llvm::acc::Clause::ACCC_device_type,
    llvm::acc::Clause::ACCC_no_create,
    llvm::acc::Clause::ACCC_present,
    llvm::acc::Clause::ACCC_private,
    llvm::acc::Clause::ACCC_firstprivate,
    llvm::acc::Clause::ACCC_reduction,
    llvm::acc::Clause::ACCC_wait,
  };

  static AccClauseSet allowedOnceClauses_ACCD_serial {
    llvm::acc::Clause::ACCC_async,
    llvm::acc::Clause::ACCC_default,
    llvm::acc::Clause::ACCC_if,
    llvm::acc::Clause::ACCC_self,
  };

  static AccClauseSet allowedExclusiveClauses_ACCD_serial {
  };

  static AccClauseSet requiredClauses_ACCD_serial {
  };

  // Sets for serial loop

  static AccClauseSet allowedClauses_ACCD_serial_loop {
    llvm::acc::Clause::ACCC_attach,
    llvm::acc::Clause::ACCC_copy,
    llvm::acc::Clause::ACCC_copyin,
    llvm::acc::Clause::ACCC_copyout,
    llvm::acc::Clause::ACCC_create,
    llvm::acc::Clause::ACCC_deviceptr,
    llvm::acc::Clause::ACCC_device_type,
    llvm::acc::Clause::ACCC_firstprivate,
    llvm::acc::Clause::ACCC_no_create,
    llvm::acc::Clause::ACCC_present,
    llvm::acc::Clause::ACCC_private,
    llvm::acc::Clause::ACCC_reduction,
    llvm::acc::Clause::ACCC_wait,
  };

  static AccClauseSet allowedOnceClauses_ACCD_serial_loop {
    llvm::acc::Clause::ACCC_async,
    llvm::acc::Clause::ACCC_collapse,
    llvm::acc::Clause::ACCC_default,
    llvm::acc::Clause::ACCC_gang,
    llvm::acc::Clause::ACCC_if,
    llvm::acc::Clause::ACCC_self,
    llvm::acc::Clause::ACCC_tile,
    llvm::acc::Clause::ACCC_vector,
    llvm::acc::Clause::ACCC_worker,
  };

  static AccClauseSet allowedExclusiveClauses_ACCD_serial_loop {
    llvm::acc::Clause::ACCC_auto,
    llvm::acc::Clause::ACCC_independent,
    llvm::acc::Clause::ACCC_seq,
  };

  static AccClauseSet requiredClauses_ACCD_serial_loop {
  };

  // Sets for set

  static AccClauseSet allowedClauses_ACCD_set {
  };

  static AccClauseSet allowedOnceClauses_ACCD_set {
    llvm::acc::Clause::ACCC_default_async,
    llvm::acc::Clause::ACCC_device_num,
    llvm::acc::Clause::ACCC_device_type,
    llvm::acc::Clause::ACCC_if,
  };

  static AccClauseSet allowedExclusiveClauses_ACCD_set {
  };

  static AccClauseSet requiredClauses_ACCD_set {
    llvm::acc::Clause::ACCC_default_async,
    llvm::acc::Clause::ACCC_device_num,
    llvm::acc::Clause::ACCC_device_type,
  };

  // Sets for shutdown

  static AccClauseSet allowedClauses_ACCD_shutdown {
  };

  static AccClauseSet allowedOnceClauses_ACCD_shutdown {
    llvm::acc::Clause::ACCC_device_num,
    llvm::acc::Clause::ACCC_device_type,
    llvm::acc::Clause::ACCC_if,
  };

  static AccClauseSet allowedExclusiveClauses_ACCD_shutdown {
  };

  static AccClauseSet requiredClauses_ACCD_shutdown {
  };

  // Sets for unknown

  static AccClauseSet allowedClauses_ACCD_unknown {
  };

  static AccClauseSet allowedOnceClauses_ACCD_unknown {
  };

  static AccClauseSet allowedExclusiveClauses_ACCD_unknown {
  };

  static AccClauseSet requiredClauses_ACCD_unknown {
  };

  // Sets for update

  static AccClauseSet allowedClauses_ACCD_update {
    llvm::acc::Clause::ACCC_device_type,
    llvm::acc::Clause::ACCC_wait,
  };

  static AccClauseSet allowedOnceClauses_ACCD_update {
    llvm::acc::Clause::ACCC_async,
    llvm::acc::Clause::ACCC_if,
    llvm::acc::Clause::ACCC_if_present,
  };

  static AccClauseSet allowedExclusiveClauses_ACCD_update {
  };

  static AccClauseSet requiredClauses_ACCD_update {
    llvm::acc::Clause::ACCC_device,
    llvm::acc::Clause::ACCC_host,
    llvm::acc::Clause::ACCC_self,
  };

  // Sets for wait

  static AccClauseSet allowedClauses_ACCD_wait {
  };

  static AccClauseSet allowedOnceClauses_ACCD_wait {
    llvm::acc::Clause::ACCC_async,
    llvm::acc::Clause::ACCC_if,
  };

  static AccClauseSet allowedExclusiveClauses_ACCD_wait {
  };

  static AccClauseSet requiredClauses_ACCD_wait {
  };
} // namespace acc
} // namespace llvm

#endif // GEN_FLANG_DIRECTIVE_CLAUSE_SETS

#ifdef GEN_FLANG_DIRECTIVE_CLAUSE_MAP
#undef GEN_FLANG_DIRECTIVE_CLAUSE_MAP

{
  {llvm::acc::Directive::ACCD_atomic,
    {
      llvm::acc::allowedClauses_ACCD_atomic,
      llvm::acc::allowedOnceClauses_ACCD_atomic,
      llvm::acc::allowedExclusiveClauses_ACCD_atomic,
      llvm::acc::requiredClauses_ACCD_atomic,
    }
  },
  {llvm::acc::Directive::ACCD_cache,
    {
      llvm::acc::allowedClauses_ACCD_cache,
      llvm::acc::allowedOnceClauses_ACCD_cache,
      llvm::acc::allowedExclusiveClauses_ACCD_cache,
      llvm::acc::requiredClauses_ACCD_cache,
    }
  },
  {llvm::acc::Directive::ACCD_data,
    {
      llvm::acc::allowedClauses_ACCD_data,
      llvm::acc::allowedOnceClauses_ACCD_data,
      llvm::acc::allowedExclusiveClauses_ACCD_data,
      llvm::acc::requiredClauses_ACCD_data,
    }
  },
  {llvm::acc::Directive::ACCD_declare,
    {
      llvm::acc::allowedClauses_ACCD_declare,
      llvm::acc::allowedOnceClauses_ACCD_declare,
      llvm::acc::allowedExclusiveClauses_ACCD_declare,
      llvm::acc::requiredClauses_ACCD_declare,
    }
  },
  {llvm::acc::Directive::ACCD_enter_data,
    {
      llvm::acc::allowedClauses_ACCD_enter_data,
      llvm::acc::allowedOnceClauses_ACCD_enter_data,
      llvm::acc::allowedExclusiveClauses_ACCD_enter_data,
      llvm::acc::requiredClauses_ACCD_enter_data,
    }
  },
  {llvm::acc::Directive::ACCD_exit_data,
    {
      llvm::acc::allowedClauses_ACCD_exit_data,
      llvm::acc::allowedOnceClauses_ACCD_exit_data,
      llvm::acc::allowedExclusiveClauses_ACCD_exit_data,
      llvm::acc::requiredClauses_ACCD_exit_data,
    }
  },
  {llvm::acc::Directive::ACCD_host_data,
    {
      llvm::acc::allowedClauses_ACCD_host_data,
      llvm::acc::allowedOnceClauses_ACCD_host_data,
      llvm::acc::allowedExclusiveClauses_ACCD_host_data,
      llvm::acc::requiredClauses_ACCD_host_data,
    }
  },
  {llvm::acc::Directive::ACCD_init,
    {
      llvm::acc::allowedClauses_ACCD_init,
      llvm::acc::allowedOnceClauses_ACCD_init,
      llvm::acc::allowedExclusiveClauses_ACCD_init,
      llvm::acc::requiredClauses_ACCD_init,
    }
  },
  {llvm::acc::Directive::ACCD_kernels,
    {
      llvm::acc::allowedClauses_ACCD_kernels,
      llvm::acc::allowedOnceClauses_ACCD_kernels,
      llvm::acc::allowedExclusiveClauses_ACCD_kernels,
      llvm::acc::requiredClauses_ACCD_kernels,
    }
  },
  {llvm::acc::Directive::ACCD_kernels_loop,
    {
      llvm::acc::allowedClauses_ACCD_kernels_loop,
      llvm::acc::allowedOnceClauses_ACCD_kernels_loop,
      llvm::acc::allowedExclusiveClauses_ACCD_kernels_loop,
      llvm::acc::requiredClauses_ACCD_kernels_loop,
    }
  },
  {llvm::acc::Directive::ACCD_loop,
    {
      llvm::acc::allowedClauses_ACCD_loop,
      llvm::acc::allowedOnceClauses_ACCD_loop,
      llvm::acc::allowedExclusiveClauses_ACCD_loop,
      llvm::acc::requiredClauses_ACCD_loop,
    }
  },
  {llvm::acc::Directive::ACCD_parallel,
    {
      llvm::acc::allowedClauses_ACCD_parallel,
      llvm::acc::allowedOnceClauses_ACCD_parallel,
      llvm::acc::allowedExclusiveClauses_ACCD_parallel,
      llvm::acc::requiredClauses_ACCD_parallel,
    }
  },
  {llvm::acc::Directive::ACCD_parallel_loop,
    {
      llvm::acc::allowedClauses_ACCD_parallel_loop,
      llvm::acc::allowedOnceClauses_ACCD_parallel_loop,
      llvm::acc::allowedExclusiveClauses_ACCD_parallel_loop,
      llvm::acc::requiredClauses_ACCD_parallel_loop,
    }
  },
  {llvm::acc::Directive::ACCD_routine,
    {
      llvm::acc::allowedClauses_ACCD_routine,
      llvm::acc::allowedOnceClauses_ACCD_routine,
      llvm::acc::allowedExclusiveClauses_ACCD_routine,
      llvm::acc::requiredClauses_ACCD_routine,
    }
  },
  {llvm::acc::Directive::ACCD_serial,
    {
      llvm::acc::allowedClauses_ACCD_serial,
      llvm::acc::allowedOnceClauses_ACCD_serial,
      llvm::acc::allowedExclusiveClauses_ACCD_serial,
      llvm::acc::requiredClauses_ACCD_serial,
    }
  },
  {llvm::acc::Directive::ACCD_serial_loop,
    {
      llvm::acc::allowedClauses_ACCD_serial_loop,
      llvm::acc::allowedOnceClauses_ACCD_serial_loop,
      llvm::acc::allowedExclusiveClauses_ACCD_serial_loop,
      llvm::acc::requiredClauses_ACCD_serial_loop,
    }
  },
  {llvm::acc::Directive::ACCD_set,
    {
      llvm::acc::allowedClauses_ACCD_set,
      llvm::acc::allowedOnceClauses_ACCD_set,
      llvm::acc::allowedExclusiveClauses_ACCD_set,
      llvm::acc::requiredClauses_ACCD_set,
    }
  },
  {llvm::acc::Directive::ACCD_shutdown,
    {
      llvm::acc::allowedClauses_ACCD_shutdown,
      llvm::acc::allowedOnceClauses_ACCD_shutdown,
      llvm::acc::allowedExclusiveClauses_ACCD_shutdown,
      llvm::acc::requiredClauses_ACCD_shutdown,
    }
  },
  {llvm::acc::Directive::ACCD_unknown,
    {
      llvm::acc::allowedClauses_ACCD_unknown,
      llvm::acc::allowedOnceClauses_ACCD_unknown,
      llvm::acc::allowedExclusiveClauses_ACCD_unknown,
      llvm::acc::requiredClauses_ACCD_unknown,
    }
  },
  {llvm::acc::Directive::ACCD_update,
    {
      llvm::acc::allowedClauses_ACCD_update,
      llvm::acc::allowedOnceClauses_ACCD_update,
      llvm::acc::allowedExclusiveClauses_ACCD_update,
      llvm::acc::requiredClauses_ACCD_update,
    }
  },
  {llvm::acc::Directive::ACCD_wait,
    {
      llvm::acc::allowedClauses_ACCD_wait,
      llvm::acc::allowedOnceClauses_ACCD_wait,
      llvm::acc::allowedExclusiveClauses_ACCD_wait,
      llvm::acc::requiredClauses_ACCD_wait,
    }
  },
}

#endif // GEN_FLANG_DIRECTIVE_CLAUSE_MAP

#ifdef GEN_FLANG_CLAUSE_PARSER_CLASSES
#undef GEN_FLANG_CLAUSE_PARSER_CLASSES

WRAPPER_CLASS(Async, std::optional<ScalarIntExpr>);
WRAPPER_CLASS(Attach, AccObjectList);
EMPTY_CLASS(Auto);
WRAPPER_CLASS(Bind, AccBindClause);
EMPTY_CLASS(Capture);
WRAPPER_CLASS(Collapse, AccCollapseArg);
WRAPPER_CLASS(Copy, AccObjectList);
WRAPPER_CLASS(Copyin, AccObjectListWithModifier);
WRAPPER_CLASS(Copyout, AccObjectListWithModifier);
WRAPPER_CLASS(Create, AccObjectListWithModifier);
WRAPPER_CLASS(Default, AccDefaultClause);
WRAPPER_CLASS(DefaultAsync, ScalarIntExpr);
WRAPPER_CLASS(Delete, AccObjectList);
WRAPPER_CLASS(Detach, AccObjectList);
WRAPPER_CLASS(Device, AccObjectList);
WRAPPER_CLASS(DeviceNum, ScalarIntExpr);
WRAPPER_CLASS(Deviceptr, AccObjectList);
WRAPPER_CLASS(DeviceResident, AccObjectList);
WRAPPER_CLASS(DeviceType, AccDeviceTypeExprList);
EMPTY_CLASS(Finalize);
WRAPPER_CLASS(Firstprivate, AccObjectList);
WRAPPER_CLASS(Gang, std::optional<AccGangArgList>);
WRAPPER_CLASS(Host, AccObjectList);
WRAPPER_CLASS(If, ScalarLogicalExpr);
EMPTY_CLASS(IfPresent);
EMPTY_CLASS(Independent);
WRAPPER_CLASS(Link, AccObjectList);
WRAPPER_CLASS(NoCreate, AccObjectList);
EMPTY_CLASS(Nohost);
WRAPPER_CLASS(NumGangs, std::list<ScalarIntExpr>);
WRAPPER_CLASS(NumWorkers, ScalarIntExpr);
WRAPPER_CLASS(Present, AccObjectList);
WRAPPER_CLASS(Private, AccObjectList);
EMPTY_CLASS(Read);
WRAPPER_CLASS(Reduction, AccObjectListWithReduction);
WRAPPER_CLASS(Self, std::optional<AccSelfClause>);
EMPTY_CLASS(Seq);
WRAPPER_CLASS(Tile, AccTileExprList);
EMPTY_CLASS(Unknown);
WRAPPER_CLASS(UseDevice, AccObjectList);
WRAPPER_CLASS(Vector, std::optional<ScalarIntExpr>);
WRAPPER_CLASS(VectorLength, ScalarIntExpr);
WRAPPER_CLASS(Wait, std::optional<AccWaitArgument>);
WRAPPER_CLASS(Worker, std::optional<ScalarIntExpr>);
EMPTY_CLASS(Write);

#endif // GEN_FLANG_CLAUSE_PARSER_CLASSES

#ifdef GEN_FLANG_CLAUSE_PARSER_CLASSES_LIST
#undef GEN_FLANG_CLAUSE_PARSER_CLASSES_LIST

Async
, Attach
, Auto
, Bind
, Capture
, Collapse
, Copy
, Copyin
, Copyout
, Create
, Default
, DefaultAsync
, Delete
, Detach
, Device
, DeviceNum
, Deviceptr
, DeviceResident
, DeviceType
, Finalize
, Firstprivate
, Gang
, Host
, If
, IfPresent
, Independent
, Link
, NoCreate
, Nohost
, NumGangs
, NumWorkers
, Present
, Private
, Read
, Reduction
, Self
, Seq
, Tile
, Unknown
, UseDevice
, Vector
, VectorLength
, Wait
, Worker
, Write

#endif // GEN_FLANG_CLAUSE_PARSER_CLASSES_LIST

#ifdef GEN_FLANG_DUMP_PARSE_TREE_CLAUSES
#undef GEN_FLANG_DUMP_PARSE_TREE_CLAUSES

NODE(AccClause, Async)
NODE(AccClause, Attach)
NODE(AccClause, Auto)
NODE(AccClause, Bind)
NODE(AccClause, Capture)
NODE(AccClause, Collapse)
NODE(AccClause, Copy)
NODE(AccClause, Copyin)
NODE(AccClause, Copyout)
NODE(AccClause, Create)
NODE(AccClause, Default)
NODE(AccClause, DefaultAsync)
NODE(AccClause, Delete)
NODE(AccClause, Detach)
NODE(AccClause, Device)
NODE(AccClause, DeviceNum)
NODE(AccClause, Deviceptr)
NODE(AccClause, DeviceResident)
NODE(AccClause, DeviceType)
NODE(AccClause, Finalize)
NODE(AccClause, Firstprivate)
NODE(AccClause, Gang)
NODE(AccClause, Host)
NODE(AccClause, If)
NODE(AccClause, IfPresent)
NODE(AccClause, Independent)
NODE(AccClause, Link)
NODE(AccClause, NoCreate)
NODE(AccClause, Nohost)
NODE(AccClause, NumGangs)
NODE(AccClause, NumWorkers)
NODE(AccClause, Present)
NODE(AccClause, Private)
NODE(AccClause, Read)
NODE(AccClause, Reduction)
NODE(AccClause, Self)
NODE(AccClause, Seq)
NODE(AccClause, Tile)
NODE(AccClause, Unknown)
NODE(AccClause, UseDevice)
NODE(AccClause, Vector)
NODE(AccClause, VectorLength)
NODE(AccClause, Wait)
NODE(AccClause, Worker)
NODE(AccClause, Write)

#endif // GEN_FLANG_DUMP_PARSE_TREE_CLAUSES

#ifdef GEN_FLANG_CLAUSE_UNPARSE
#undef GEN_FLANG_CLAUSE_UNPARSE

void Unparse(const AccClause::Async &x) {
  Word("ASYNC");
  Walk("(", x.v, ")");
}
void Unparse(const AccClause::Attach &x) {
  Word("ATTACH");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Before(const AccClause::Auto &) { Word("AUTO"); }
void Unparse(const AccClause::Bind &x) {
  Word("BIND");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Before(const AccClause::Capture &) { Word("CAPTURE"); }
void Unparse(const AccClause::Collapse &x) {
  Word("COLLAPSE");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Unparse(const AccClause::Copy &x) {
  Word("COPY");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Unparse(const AccClause::Copyin &x) {
  Word("COPYIN");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Unparse(const AccClause::Copyout &x) {
  Word("COPYOUT");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Unparse(const AccClause::Create &x) {
  Word("CREATE");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Unparse(const AccClause::Default &x) {
  Word("DEFAULT");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Unparse(const AccClause::DefaultAsync &x) {
  Word("DEFAULT_ASYNC");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Unparse(const AccClause::Delete &x) {
  Word("DELETE");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Unparse(const AccClause::Detach &x) {
  Word("DETACH");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Unparse(const AccClause::Device &x) {
  Word("DEVICE");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Unparse(const AccClause::DeviceNum &x) {
  Word("DEVICE_NUM");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Unparse(const AccClause::Deviceptr &x) {
  Word("DEVICEPTR");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Unparse(const AccClause::DeviceResident &x) {
  Word("DEVICE_RESIDENT");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Unparse(const AccClause::DeviceType &x) {
  Word("DEVICE_TYPE");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Before(const AccClause::Finalize &) { Word("FINALIZE"); }
void Unparse(const AccClause::Firstprivate &x) {
  Word("FIRSTPRIVATE");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Unparse(const AccClause::Gang &x) {
  Word("GANG");
  Walk("(", x.v, ")");
}
void Unparse(const AccClause::Host &x) {
  Word("HOST");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Unparse(const AccClause::If &x) {
  Word("IF");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Before(const AccClause::IfPresent &) { Word("IF_PRESENT"); }
void Before(const AccClause::Independent &) { Word("INDEPENDENT"); }
void Unparse(const AccClause::Link &x) {
  Word("LINK");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Unparse(const AccClause::NoCreate &x) {
  Word("NO_CREATE");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Before(const AccClause::Nohost &) { Word("NOHOST"); }
void Unparse(const AccClause::NumGangs &x) {
  Word("NUM_GANGS");
  Put("(");
  Walk(x.v, ",");
  Put(")");
}
void Unparse(const AccClause::NumWorkers &x) {
  Word("NUM_WORKERS");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Unparse(const AccClause::Present &x) {
  Word("PRESENT");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Unparse(const AccClause::Private &x) {
  Word("PRIVATE");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Before(const AccClause::Read &) { Word("READ"); }
void Unparse(const AccClause::Reduction &x) {
  Word("REDUCTION");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Unparse(const AccClause::Self &x) {
  Word("SELF");
  Walk("(", x.v, ")");
}
void Before(const AccClause::Seq &) { Word("SEQ"); }
void Unparse(const AccClause::Tile &x) {
  Word("TILE");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Before(const AccClause::Unknown &) { Word("UNKNOWN"); }
void Unparse(const AccClause::UseDevice &x) {
  Word("USE_DEVICE");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Unparse(const AccClause::Vector &x) {
  Word("VECTOR");
  Walk("(", x.v, ")");
}
void Unparse(const AccClause::VectorLength &x) {
  Word("VECTOR_LENGTH");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Unparse(const AccClause::Wait &x) {
  Word("WAIT");
  Walk("(", x.v, ")");
}
void Unparse(const AccClause::Worker &x) {
  Word("WORKER");
  Walk("(", x.v, ")");
}
void Before(const AccClause::Write &) { Word("WRITE"); }

#endif // GEN_FLANG_CLAUSE_UNPARSE

#ifdef GEN_FLANG_CLAUSE_CHECK_ENTER
#undef GEN_FLANG_CLAUSE_CHECK_ENTER

void Enter(const parser::AccClause::Async &);
void Enter(const parser::AccClause::Attach &);
void Enter(const parser::AccClause::Auto &);
void Enter(const parser::AccClause::Bind &);
void Enter(const parser::AccClause::Capture &);
void Enter(const parser::AccClause::Collapse &);
void Enter(const parser::AccClause::Copy &);
void Enter(const parser::AccClause::Copyin &);
void Enter(const parser::AccClause::Copyout &);
void Enter(const parser::AccClause::Create &);
void Enter(const parser::AccClause::Default &);
void Enter(const parser::AccClause::DefaultAsync &);
void Enter(const parser::AccClause::Delete &);
void Enter(const parser::AccClause::Detach &);
void Enter(const parser::AccClause::Device &);
void Enter(const parser::AccClause::DeviceNum &);
void Enter(const parser::AccClause::Deviceptr &);
void Enter(const parser::AccClause::DeviceResident &);
void Enter(const parser::AccClause::DeviceType &);
void Enter(const parser::AccClause::Finalize &);
void Enter(const parser::AccClause::Firstprivate &);
void Enter(const parser::AccClause::Gang &);
void Enter(const parser::AccClause::Host &);
void Enter(const parser::AccClause::If &);
void Enter(const parser::AccClause::IfPresent &);
void Enter(const parser::AccClause::Independent &);
void Enter(const parser::AccClause::Link &);
void Enter(const parser::AccClause::NoCreate &);
void Enter(const parser::AccClause::Nohost &);
void Enter(const parser::AccClause::NumGangs &);
void Enter(const parser::AccClause::NumWorkers &);
void Enter(const parser::AccClause::Present &);
void Enter(const parser::AccClause::Private &);
void Enter(const parser::AccClause::Read &);
void Enter(const parser::AccClause::Reduction &);
void Enter(const parser::AccClause::Self &);
void Enter(const parser::AccClause::Seq &);
void Enter(const parser::AccClause::Tile &);
void Enter(const parser::AccClause::Unknown &);
void Enter(const parser::AccClause::UseDevice &);
void Enter(const parser::AccClause::Vector &);
void Enter(const parser::AccClause::VectorLength &);
void Enter(const parser::AccClause::Wait &);
void Enter(const parser::AccClause::Worker &);
void Enter(const parser::AccClause::Write &);

#endif // GEN_FLANG_CLAUSE_CHECK_ENTER

#ifdef GEN_FLANG_CLAUSE_PARSER_KIND_MAP
#undef GEN_FLANG_CLAUSE_PARSER_KIND_MAP

if constexpr (std::is_same_v<A, parser::AccClause::Async>)
  return llvm::acc::Clause::ACCC_async;
if constexpr (std::is_same_v<A, parser::AccClause::Attach>)
  return llvm::acc::Clause::ACCC_attach;
if constexpr (std::is_same_v<A, parser::AccClause::Auto>)
  return llvm::acc::Clause::ACCC_auto;
if constexpr (std::is_same_v<A, parser::AccClause::Bind>)
  return llvm::acc::Clause::ACCC_bind;
if constexpr (std::is_same_v<A, parser::AccClause::Capture>)
  return llvm::acc::Clause::ACCC_capture;
if constexpr (std::is_same_v<A, parser::AccClause::Collapse>)
  return llvm::acc::Clause::ACCC_collapse;
if constexpr (std::is_same_v<A, parser::AccClause::Copy>)
  return llvm::acc::Clause::ACCC_copy;
if constexpr (std::is_same_v<A, parser::AccClause::Copyin>)
  return llvm::acc::Clause::ACCC_copyin;
if constexpr (std::is_same_v<A, parser::AccClause::Copyout>)
  return llvm::acc::Clause::ACCC_copyout;
if constexpr (std::is_same_v<A, parser::AccClause::Create>)
  return llvm::acc::Clause::ACCC_create;
if constexpr (std::is_same_v<A, parser::AccClause::Default>)
  return llvm::acc::Clause::ACCC_default;
if constexpr (std::is_same_v<A, parser::AccClause::DefaultAsync>)
  return llvm::acc::Clause::ACCC_default_async;
if constexpr (std::is_same_v<A, parser::AccClause::Delete>)
  return llvm::acc::Clause::ACCC_delete;
if constexpr (std::is_same_v<A, parser::AccClause::Detach>)
  return llvm::acc::Clause::ACCC_detach;
if constexpr (std::is_same_v<A, parser::AccClause::Device>)
  return llvm::acc::Clause::ACCC_device;
if constexpr (std::is_same_v<A, parser::AccClause::DeviceNum>)
  return llvm::acc::Clause::ACCC_device_num;
if constexpr (std::is_same_v<A, parser::AccClause::Deviceptr>)
  return llvm::acc::Clause::ACCC_deviceptr;
if constexpr (std::is_same_v<A, parser::AccClause::DeviceResident>)
  return llvm::acc::Clause::ACCC_device_resident;
if constexpr (std::is_same_v<A, parser::AccClause::DeviceType>)
  return llvm::acc::Clause::ACCC_device_type;
if constexpr (std::is_same_v<A, parser::AccClause::Finalize>)
  return llvm::acc::Clause::ACCC_finalize;
if constexpr (std::is_same_v<A, parser::AccClause::Firstprivate>)
  return llvm::acc::Clause::ACCC_firstprivate;
if constexpr (std::is_same_v<A, parser::AccClause::Gang>)
  return llvm::acc::Clause::ACCC_gang;
if constexpr (std::is_same_v<A, parser::AccClause::Host>)
  return llvm::acc::Clause::ACCC_host;
if constexpr (std::is_same_v<A, parser::AccClause::If>)
  return llvm::acc::Clause::ACCC_if;
if constexpr (std::is_same_v<A, parser::AccClause::IfPresent>)
  return llvm::acc::Clause::ACCC_if_present;
if constexpr (std::is_same_v<A, parser::AccClause::Independent>)
  return llvm::acc::Clause::ACCC_independent;
if constexpr (std::is_same_v<A, parser::AccClause::Link>)
  return llvm::acc::Clause::ACCC_link;
if constexpr (std::is_same_v<A, parser::AccClause::NoCreate>)
  return llvm::acc::Clause::ACCC_no_create;
if constexpr (std::is_same_v<A, parser::AccClause::Nohost>)
  return llvm::acc::Clause::ACCC_nohost;
if constexpr (std::is_same_v<A, parser::AccClause::NumGangs>)
  return llvm::acc::Clause::ACCC_num_gangs;
if constexpr (std::is_same_v<A, parser::AccClause::NumWorkers>)
  return llvm::acc::Clause::ACCC_num_workers;
if constexpr (std::is_same_v<A, parser::AccClause::Present>)
  return llvm::acc::Clause::ACCC_present;
if constexpr (std::is_same_v<A, parser::AccClause::Private>)
  return llvm::acc::Clause::ACCC_private;
if constexpr (std::is_same_v<A, parser::AccClause::Read>)
  return llvm::acc::Clause::ACCC_read;
if constexpr (std::is_same_v<A, parser::AccClause::Reduction>)
  return llvm::acc::Clause::ACCC_reduction;
if constexpr (std::is_same_v<A, parser::AccClause::Self>)
  return llvm::acc::Clause::ACCC_self;
if constexpr (std::is_same_v<A, parser::AccClause::Seq>)
  return llvm::acc::Clause::ACCC_seq;
if constexpr (std::is_same_v<A, parser::AccClause::Tile>)
  return llvm::acc::Clause::ACCC_tile;
if constexpr (std::is_same_v<A, parser::AccClause::Unknown>)
  return llvm::acc::Clause::ACCC_unknown;
if constexpr (std::is_same_v<A, parser::AccClause::UseDevice>)
  return llvm::acc::Clause::ACCC_use_device;
if constexpr (std::is_same_v<A, parser::AccClause::Vector>)
  return llvm::acc::Clause::ACCC_vector;
if constexpr (std::is_same_v<A, parser::AccClause::VectorLength>)
  return llvm::acc::Clause::ACCC_vector_length;
if constexpr (std::is_same_v<A, parser::AccClause::Wait>)
  return llvm::acc::Clause::ACCC_wait;
if constexpr (std::is_same_v<A, parser::AccClause::Worker>)
  return llvm::acc::Clause::ACCC_worker;
if constexpr (std::is_same_v<A, parser::AccClause::Write>)
  return llvm::acc::Clause::ACCC_write;
llvm_unreachable("Invalid OpenACC Parser clause");

#endif // GEN_FLANG_CLAUSE_PARSER_KIND_MAP

#ifdef GEN_FLANG_CLAUSES_PARSER
#undef GEN_FLANG_CLAUSES_PARSER

TYPE_PARSER(
  "write" >> construct<AccClause>(construct<AccClause::Write>()) ||
  "worker" >> construct<AccClause>(construct<AccClause::Worker>(maybe(parenthesized("num:" >> scalarIntExpr || scalarIntExpr)))) ||
  "wait" >> construct<AccClause>(construct<AccClause::Wait>(maybe(parenthesized(Parser<AccWaitArgument>{})))) ||
  "vector_length" >> construct<AccClause>(construct<AccClause::VectorLength>(parenthesized(scalarIntExpr))) ||
  "vector" >> construct<AccClause>(construct<AccClause::Vector>(maybe(parenthesized("length:" >> scalarIntExpr || scalarIntExpr)))) ||
  "use_device" >> construct<AccClause>(construct<AccClause::UseDevice>(parenthesized(Parser<AccObjectList>{}))) ||
  "unknown" >> construct<AccClause>(construct<AccClause::Unknown>()) ||
  "tile" >> construct<AccClause>(construct<AccClause::Tile>(parenthesized(Parser<AccTileExprList>{}))) ||
  "seq" >> construct<AccClause>(construct<AccClause::Seq>()) ||
  "self" >> construct<AccClause>(construct<AccClause::Self>(maybe(parenthesized(Parser<AccSelfClause>{})))) ||
  "reduction" >> construct<AccClause>(construct<AccClause::Reduction>(parenthesized(Parser<AccObjectListWithReduction>{}))) ||
  "read" >> construct<AccClause>(construct<AccClause::Read>()) ||
  "private" >> construct<AccClause>(construct<AccClause::Private>(parenthesized(Parser<AccObjectList>{}))) ||
  "present" >> construct<AccClause>(construct<AccClause::Present>(parenthesized(Parser<AccObjectList>{}))) ||
  "num_workers" >> construct<AccClause>(construct<AccClause::NumWorkers>(parenthesized(scalarIntExpr))) ||
  "num_gangs" >> construct<AccClause>(construct<AccClause::NumGangs>(parenthesized(nonemptyList(scalarIntExpr)))) ||
  "nohost" >> construct<AccClause>(construct<AccClause::Nohost>()) ||
  "no_create" >> construct<AccClause>(construct<AccClause::NoCreate>(parenthesized(Parser<AccObjectList>{}))) ||
  "link" >> construct<AccClause>(construct<AccClause::Link>(parenthesized(Parser<AccObjectList>{}))) ||
  "independent" >> construct<AccClause>(construct<AccClause::Independent>()) ||
  "if_present" >> construct<AccClause>(construct<AccClause::IfPresent>()) ||
  "if" >> construct<AccClause>(construct<AccClause::If>(parenthesized(scalarLogicalExpr))) ||
  "host" >> construct<AccClause>(construct<AccClause::Host>(parenthesized(Parser<AccObjectList>{}))) ||
  "gang" >> construct<AccClause>(construct<AccClause::Gang>(maybe(parenthesized(Parser<AccGangArgList>{})))) ||
  "firstprivate" >> construct<AccClause>(construct<AccClause::Firstprivate>(parenthesized(Parser<AccObjectList>{}))) ||
  "finalize" >> construct<AccClause>(construct<AccClause::Finalize>()) ||
  "deviceptr" >> construct<AccClause>(construct<AccClause::Deviceptr>(parenthesized(Parser<AccObjectList>{}))) ||
  ("device_type"_tok || "dtype"_tok) >> construct<AccClause>(construct<AccClause::DeviceType>(parenthesized(Parser<AccDeviceTypeExprList>{}))) ||
  "device_resident" >> construct<AccClause>(construct<AccClause::DeviceResident>(parenthesized(Parser<AccObjectList>{}))) ||
  "device_num" >> construct<AccClause>(construct<AccClause::DeviceNum>(parenthesized(scalarIntExpr))) ||
  "device" >> construct<AccClause>(construct<AccClause::Device>(parenthesized(Parser<AccObjectList>{}))) ||
  "detach" >> construct<AccClause>(construct<AccClause::Detach>(parenthesized(Parser<AccObjectList>{}))) ||
  "delete" >> construct<AccClause>(construct<AccClause::Delete>(parenthesized(Parser<AccObjectList>{}))) ||
  "default_async" >> construct<AccClause>(construct<AccClause::DefaultAsync>(parenthesized(scalarIntExpr))) ||
  "default" >> construct<AccClause>(construct<AccClause::Default>(parenthesized(Parser<AccDefaultClause>{}))) ||
  ("create"_tok || "present_or_create"_tok || "pcreate"_tok) >> construct<AccClause>(construct<AccClause::Create>(parenthesized(Parser<AccObjectListWithModifier>{}))) ||
  ("copyout"_tok || "present_or_copyout"_tok || "pcopyout"_tok) >> construct<AccClause>(construct<AccClause::Copyout>(parenthesized(Parser<AccObjectListWithModifier>{}))) ||
  ("copyin"_tok || "present_or_copyin"_tok || "pcopyin"_tok) >> construct<AccClause>(construct<AccClause::Copyin>(parenthesized(Parser<AccObjectListWithModifier>{}))) ||
  ("copy"_tok || "present_or_copy"_tok || "pcopy"_tok) >> construct<AccClause>(construct<AccClause::Copy>(parenthesized(Parser<AccObjectList>{}))) ||
  "collapse" >> construct<AccClause>(construct<AccClause::Collapse>(parenthesized(Parser<AccCollapseArg>{}))) ||
  "capture" >> construct<AccClause>(construct<AccClause::Capture>()) ||
  "bind" >> construct<AccClause>(construct<AccClause::Bind>(parenthesized(Parser<AccBindClause>{}))) ||
  "auto" >> construct<AccClause>(construct<AccClause::Auto>()) ||
  "attach" >> construct<AccClause>(construct<AccClause::Attach>(parenthesized(Parser<AccObjectList>{}))) ||
  "async" >> construct<AccClause>(construct<AccClause::Async>(maybe(parenthesized(scalarIntExpr))))
)

#endif // GEN_FLANG_CLAUSES_PARSER

#ifdef GEN_CLANG_CLAUSE_CLASS
#undef GEN_CLANG_CLAUSE_CLASS

#ifndef CLAUSE
#define CLAUSE(Enum, Str, Implicit)
#endif
#ifndef CLAUSE_CLASS
#define CLAUSE_CLASS(Enum, Str, Class)
#endif
#ifndef CLAUSE_NO_CLASS
#define CLAUSE_NO_CLASS(Enum, Str)
#endif

#define __CLAUSE(Name, Class)                      \
  CLAUSE(ACCC_##Name, #Name, /* Implicit */ false) \
  CLAUSE_CLASS(ACCC_##Name, #Name, Class)
#define __CLAUSE_NO_CLASS(Name)                    \
  CLAUSE(ACCC_##Name, #Name, /* Implicit */ false) \
  CLAUSE_NO_CLASS(ACCC_##Name, #Name)
#define __IMPLICIT_CLAUSE_CLASS(Name, Str, Class)  \
  CLAUSE(ACCC_##Name, Str, /* Implicit */ true)    \
  CLAUSE_CLASS(ACCC_##Name, Str, Class)
#define __IMPLICIT_CLAUSE_NO_CLASS(Name, Str)      \
  CLAUSE(ACCC_##Name, Str, /* Implicit */ true)    \
  CLAUSE_NO_CLASS(ACCC_##Name, Str)

__CLAUSE_NO_CLASS(async)
__CLAUSE_NO_CLASS(attach)
__CLAUSE_NO_CLASS(auto)
__CLAUSE_NO_CLASS(bind)
__CLAUSE_NO_CLASS(capture)
__CLAUSE_NO_CLASS(collapse)
__CLAUSE_NO_CLASS(copy)
__CLAUSE_NO_CLASS(copyin)
__CLAUSE_NO_CLASS(copyout)
__CLAUSE_NO_CLASS(create)
__CLAUSE_NO_CLASS(default)
__CLAUSE_NO_CLASS(default_async)
__CLAUSE_NO_CLASS(delete)
__CLAUSE_NO_CLASS(detach)
__CLAUSE_NO_CLASS(device)
__CLAUSE_NO_CLASS(device_num)
__CLAUSE_NO_CLASS(deviceptr)
__CLAUSE_NO_CLASS(device_resident)
__CLAUSE_NO_CLASS(device_type)
__CLAUSE_NO_CLASS(finalize)
__CLAUSE_NO_CLASS(firstprivate)
__CLAUSE_NO_CLASS(gang)
__CLAUSE_NO_CLASS(host)
__CLAUSE_NO_CLASS(if)
__CLAUSE_NO_CLASS(if_present)
__CLAUSE_NO_CLASS(independent)
__CLAUSE_NO_CLASS(link)
__CLAUSE_NO_CLASS(no_create)
__CLAUSE_NO_CLASS(nohost)
__CLAUSE_NO_CLASS(num_gangs)
__CLAUSE_NO_CLASS(num_workers)
__CLAUSE_NO_CLASS(present)
__CLAUSE_NO_CLASS(private)
__CLAUSE_NO_CLASS(read)
__CLAUSE_NO_CLASS(reduction)
__CLAUSE_NO_CLASS(self)
__CLAUSE_NO_CLASS(seq)
__CLAUSE_NO_CLASS(tile)
__CLAUSE_NO_CLASS(unknown)
__CLAUSE_NO_CLASS(use_device)
__CLAUSE_NO_CLASS(vector)
__CLAUSE_NO_CLASS(vector_length)
__CLAUSE_NO_CLASS(wait)
__CLAUSE_NO_CLASS(worker)
__CLAUSE_NO_CLASS(write)

#undef __IMPLICIT_CLAUSE_NO_CLASS
#undef __IMPLICIT_CLAUSE_CLASS
#undef __CLAUSE
#undef CLAUSE_NO_CLASS
#undef CLAUSE_CLASS
#undef CLAUSE

#endif // GEN_CLANG_CLAUSE_CLASS

#ifdef GEN_DIRECTIVES_IMPL
#undef GEN_DIRECTIVES_IMPL

Directive llvm::acc::getOpenACCDirectiveKind(llvm::StringRef Str) {
  return llvm::StringSwitch<Directive>(Str)
    .Case("atomic",ACCD_atomic)
    .Case("cache",ACCD_cache)
    .Case("data",ACCD_data)
    .Case("declare",ACCD_declare)
    .Case("enter data",ACCD_enter_data)
    .Case("exit data",ACCD_exit_data)
    .Case("host_data",ACCD_host_data)
    .Case("init",ACCD_init)
    .Case("kernels",ACCD_kernels)
    .Case("kernels loop",ACCD_kernels_loop)
    .Case("loop",ACCD_loop)
    .Case("parallel",ACCD_parallel)
    .Case("parallel loop",ACCD_parallel_loop)
    .Case("routine",ACCD_routine)
    .Case("serial",ACCD_serial)
    .Case("serial loop",ACCD_serial_loop)
    .Case("set",ACCD_set)
    .Case("shutdown",ACCD_shutdown)
    .Case("unknown",ACCD_unknown)
    .Case("update",ACCD_update)
    .Case("wait",ACCD_wait)
    .Default(ACCD_unknown);
}

llvm::StringRef llvm::acc::getOpenACCDirectiveName(Directive Kind) {
  switch (Kind) {
    case ACCD_atomic:
      return "atomic";
    case ACCD_cache:
      return "cache";
    case ACCD_data:
      return "data";
    case ACCD_declare:
      return "declare";
    case ACCD_enter_data:
      return "enter data";
    case ACCD_exit_data:
      return "exit data";
    case ACCD_host_data:
      return "host_data";
    case ACCD_init:
      return "init";
    case ACCD_kernels:
      return "kernels";
    case ACCD_kernels_loop:
      return "kernels loop";
    case ACCD_loop:
      return "loop";
    case ACCD_parallel:
      return "parallel";
    case ACCD_parallel_loop:
      return "parallel loop";
    case ACCD_routine:
      return "routine";
    case ACCD_serial:
      return "serial";
    case ACCD_serial_loop:
      return "serial loop";
    case ACCD_set:
      return "set";
    case ACCD_shutdown:
      return "shutdown";
    case ACCD_unknown:
      return "unknown";
    case ACCD_update:
      return "update";
    case ACCD_wait:
      return "wait";
  }
  llvm_unreachable("Invalid OpenACC Directive kind");
}

Clause llvm::acc::getOpenACCClauseKind(llvm::StringRef Str) {
  return llvm::StringSwitch<Clause>(Str)
    .Case("async",ACCC_async)
    .Case("attach",ACCC_attach)
    .Case("auto",ACCC_auto)
    .Case("bind",ACCC_bind)
    .Case("capture",ACCC_capture)
    .Case("collapse",ACCC_collapse)
    .Case("copy",ACCC_copy)
    .Case("copyin",ACCC_copyin)
    .Case("copyout",ACCC_copyout)
    .Case("create",ACCC_create)
    .Case("default",ACCC_default)
    .Case("default_async",ACCC_default_async)
    .Case("delete",ACCC_delete)
    .Case("detach",ACCC_detach)
    .Case("device",ACCC_device)
    .Case("device_num",ACCC_device_num)
    .Case("deviceptr",ACCC_deviceptr)
    .Case("device_resident",ACCC_device_resident)
    .Case("device_type",ACCC_device_type)
    .Case("finalize",ACCC_finalize)
    .Case("firstprivate",ACCC_firstprivate)
    .Case("gang",ACCC_gang)
    .Case("host",ACCC_host)
    .Case("if",ACCC_if)
    .Case("if_present",ACCC_if_present)
    .Case("independent",ACCC_independent)
    .Case("link",ACCC_link)
    .Case("no_create",ACCC_no_create)
    .Case("nohost",ACCC_nohost)
    .Case("num_gangs",ACCC_num_gangs)
    .Case("num_workers",ACCC_num_workers)
    .Case("present",ACCC_present)
    .Case("private",ACCC_private)
    .Case("read",ACCC_read)
    .Case("reduction",ACCC_reduction)
    .Case("self",ACCC_self)
    .Case("seq",ACCC_seq)
    .Case("tile",ACCC_tile)
    .Case("unknown",ACCC_unknown)
    .Case("use_device",ACCC_use_device)
    .Case("vector",ACCC_vector)
    .Case("vector_length",ACCC_vector_length)
    .Case("wait",ACCC_wait)
    .Case("worker",ACCC_worker)
    .Case("write",ACCC_write)
    .Default(ACCC_unknown);
}

llvm::StringRef llvm::acc::getOpenACCClauseName(Clause Kind) {
  switch (Kind) {
    case ACCC_async:
      return "async";
    case ACCC_attach:
      return "attach";
    case ACCC_auto:
      return "auto";
    case ACCC_bind:
      return "bind";
    case ACCC_capture:
      return "capture";
    case ACCC_collapse:
      return "collapse";
    case ACCC_copy:
      return "copy";
    case ACCC_copyin:
      return "copyin";
    case ACCC_copyout:
      return "copyout";
    case ACCC_create:
      return "create";
    case ACCC_default:
      return "default";
    case ACCC_default_async:
      return "default_async";
    case ACCC_delete:
      return "delete";
    case ACCC_detach:
      return "detach";
    case ACCC_device:
      return "device";
    case ACCC_device_num:
      return "device_num";
    case ACCC_deviceptr:
      return "deviceptr";
    case ACCC_device_resident:
      return "device_resident";
    case ACCC_device_type:
      return "device_type";
    case ACCC_finalize:
      return "finalize";
    case ACCC_firstprivate:
      return "firstprivate";
    case ACCC_gang:
      return "gang";
    case ACCC_host:
      return "host";
    case ACCC_if:
      return "if";
    case ACCC_if_present:
      return "if_present";
    case ACCC_independent:
      return "independent";
    case ACCC_link:
      return "link";
    case ACCC_no_create:
      return "no_create";
    case ACCC_nohost:
      return "nohost";
    case ACCC_num_gangs:
      return "num_gangs";
    case ACCC_num_workers:
      return "num_workers";
    case ACCC_present:
      return "present";
    case ACCC_private:
      return "private";
    case ACCC_read:
      return "read";
    case ACCC_reduction:
      return "reduction";
    case ACCC_self:
      return "self";
    case ACCC_seq:
      return "seq";
    case ACCC_tile:
      return "tile";
    case ACCC_unknown:
      return "unknown";
    case ACCC_use_device:
      return "use_device";
    case ACCC_vector:
      return "vector";
    case ACCC_vector_length:
      return "vector_length";
    case ACCC_wait:
      return "wait";
    case ACCC_worker:
      return "worker";
    case ACCC_write:
      return "write";
  }
  llvm_unreachable("Invalid OpenACC Clause kind");
}

DefaultValue llvm::acc::getDefaultValue(llvm::StringRef Str) {
  return llvm::StringSwitch<DefaultValue>(Str)
    .Case("present",ACC_Default_present)
    .Case("none",ACC_Default_none)
    .Default(ACC_Default_none);
}

llvm::StringRef llvm::acc::getOpenACCDefaultValueName(llvm::acc::DefaultValue x) {
  switch (x) {
    case ACC_Default_present:
      return "present";
    case ACC_Default_none:
      return "none";
  }
  llvm_unreachable("Invalid OpenACC DefaultValue kind");
}

bool llvm::acc::isAllowedClauseForDirective(Directive D, Clause C, unsigned Version) {
  assert(unsigned(D) <= llvm::acc::Directive_enumSize);
  assert(unsigned(C) <= llvm::acc::Clause_enumSize);
  switch (D) {
    case ACCD_atomic:
      return false;
      break;
    case ACCD_cache:
      return false;
      break;
    case ACCD_data:
      switch (C) {
        case ACCC_device_type:
          return 32 <= Version && 2147483647 >= Version;
        case ACCC_wait:
          return 32 <= Version && 2147483647 >= Version;
        case ACCC_async:
          return 32 <= Version && 2147483647 >= Version;
        case ACCC_if:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_default:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_attach:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_copy:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_copyin:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_copyout:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_create:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_deviceptr:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_no_create:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_present:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case ACCD_declare:
      switch (C) {
        case ACCC_copy:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_copyin:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_copyout:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_create:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_present:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_deviceptr:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_device_resident:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_link:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case ACCD_enter_data:
      switch (C) {
        case ACCC_wait:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_async:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_if:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_attach:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_create:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_copyin:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case ACCD_exit_data:
      switch (C) {
        case ACCC_wait:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_async:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_if:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_finalize:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_copyout:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_delete:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_detach:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case ACCD_host_data:
      switch (C) {
        case ACCC_if:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_if_present:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_use_device:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case ACCD_init:
      switch (C) {
        case ACCC_device_num:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_device_type:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_if:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case ACCD_kernels:
      switch (C) {
        case ACCC_attach:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_copy:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_copyin:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_copyout:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_create:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_device_type:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_no_create:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_present:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_deviceptr:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_wait:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_async:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_default:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_if:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_num_gangs:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_num_workers:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_self:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_vector_length:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case ACCD_kernels_loop:
      switch (C) {
        case ACCC_copy:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_copyin:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_copyout:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_create:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_device_type:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_no_create:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_present:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_private:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_deviceptr:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_attach:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_wait:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_async:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_collapse:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_default:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_gang:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_if:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_num_gangs:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_num_workers:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_self:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_tile:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_vector:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_vector_length:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_worker:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_auto:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_independent:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_seq:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case ACCD_loop:
      switch (C) {
        case ACCC_device_type:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_private:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_collapse:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_gang:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_tile:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_vector:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_worker:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_auto:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_independent:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_seq:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case ACCD_parallel:
      switch (C) {
        case ACCC_attach:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_copy:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_copyin:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_copyout:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_create:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_deviceptr:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_device_type:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_no_create:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_present:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_private:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_firstprivate:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_wait:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_async:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_default:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_if:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_num_gangs:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_num_workers:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_self:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_vector_length:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case ACCD_parallel_loop:
      switch (C) {
        case ACCC_attach:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_copy:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_copyin:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_copyout:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_create:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_deviceptr:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_device_type:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_firstprivate:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_no_create:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_present:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_private:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_tile:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_wait:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_async:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_collapse:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_default:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_gang:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_if:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_num_gangs:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_num_workers:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_self:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_vector:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_vector_length:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_worker:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_auto:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_independent:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_seq:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case ACCD_routine:
      switch (C) {
        case ACCC_bind:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_device_type:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_nohost:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_gang:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_seq:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_vector:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_worker:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case ACCD_serial:
      switch (C) {
        case ACCC_attach:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_copy:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_copyin:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_copyout:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_create:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_deviceptr:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_device_type:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_no_create:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_present:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_private:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_firstprivate:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_wait:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_async:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_default:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_if:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_self:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case ACCD_serial_loop:
      switch (C) {
        case ACCC_attach:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_copy:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_copyin:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_copyout:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_create:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_deviceptr:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_device_type:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_firstprivate:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_no_create:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_present:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_private:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_wait:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_async:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_collapse:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_default:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_gang:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_if:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_self:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_tile:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_vector:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_worker:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_auto:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_independent:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_seq:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case ACCD_set:
      switch (C) {
        case ACCC_default_async:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_device_num:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_device_type:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_if:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case ACCD_shutdown:
      switch (C) {
        case ACCC_device_num:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_device_type:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_if:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case ACCD_unknown:
      return false;
      break;
    case ACCD_update:
      switch (C) {
        case ACCC_device_type:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_wait:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_async:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_if:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_if_present:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_device:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_host:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_self:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case ACCD_wait:
      switch (C) {
        case ACCC_async:
          return 1 <= Version && 2147483647 >= Version;
        case ACCC_if:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
  }
  llvm_unreachable("Invalid OpenACC Directive kind");
}

#endif // GEN_DIRECTIVES_IMPL

PKiwFZ'�W$$Frontend/OpenACC/ACC.h.incnu�[���#ifndef LLVM_OpenACC_INC
#define LLVM_OpenACC_INC

#include "llvm/ADT/BitmaskEnum.h"

namespace llvm {
class StringRef;
namespace acc {

LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE();

enum class Directive {
  ACCD_atomic,
  ACCD_cache,
  ACCD_data,
  ACCD_declare,
  ACCD_enter_data,
  ACCD_exit_data,
  ACCD_host_data,
  ACCD_init,
  ACCD_kernels,
  ACCD_kernels_loop,
  ACCD_loop,
  ACCD_parallel,
  ACCD_parallel_loop,
  ACCD_routine,
  ACCD_serial,
  ACCD_serial_loop,
  ACCD_set,
  ACCD_shutdown,
  ACCD_unknown,
  ACCD_update,
  ACCD_wait,
};

static constexpr std::size_t Directive_enumSize = 21;

constexpr auto ACCD_atomic = llvm::acc::Directive::ACCD_atomic;
constexpr auto ACCD_cache = llvm::acc::Directive::ACCD_cache;
constexpr auto ACCD_data = llvm::acc::Directive::ACCD_data;
constexpr auto ACCD_declare = llvm::acc::Directive::ACCD_declare;
constexpr auto ACCD_enter_data = llvm::acc::Directive::ACCD_enter_data;
constexpr auto ACCD_exit_data = llvm::acc::Directive::ACCD_exit_data;
constexpr auto ACCD_host_data = llvm::acc::Directive::ACCD_host_data;
constexpr auto ACCD_init = llvm::acc::Directive::ACCD_init;
constexpr auto ACCD_kernels = llvm::acc::Directive::ACCD_kernels;
constexpr auto ACCD_kernels_loop = llvm::acc::Directive::ACCD_kernels_loop;
constexpr auto ACCD_loop = llvm::acc::Directive::ACCD_loop;
constexpr auto ACCD_parallel = llvm::acc::Directive::ACCD_parallel;
constexpr auto ACCD_parallel_loop = llvm::acc::Directive::ACCD_parallel_loop;
constexpr auto ACCD_routine = llvm::acc::Directive::ACCD_routine;
constexpr auto ACCD_serial = llvm::acc::Directive::ACCD_serial;
constexpr auto ACCD_serial_loop = llvm::acc::Directive::ACCD_serial_loop;
constexpr auto ACCD_set = llvm::acc::Directive::ACCD_set;
constexpr auto ACCD_shutdown = llvm::acc::Directive::ACCD_shutdown;
constexpr auto ACCD_unknown = llvm::acc::Directive::ACCD_unknown;
constexpr auto ACCD_update = llvm::acc::Directive::ACCD_update;
constexpr auto ACCD_wait = llvm::acc::Directive::ACCD_wait;

enum class Clause {
  ACCC_async,
  ACCC_attach,
  ACCC_auto,
  ACCC_bind,
  ACCC_capture,
  ACCC_collapse,
  ACCC_copy,
  ACCC_copyin,
  ACCC_copyout,
  ACCC_create,
  ACCC_default,
  ACCC_default_async,
  ACCC_delete,
  ACCC_detach,
  ACCC_device,
  ACCC_device_num,
  ACCC_deviceptr,
  ACCC_device_resident,
  ACCC_device_type,
  ACCC_finalize,
  ACCC_firstprivate,
  ACCC_gang,
  ACCC_host,
  ACCC_if,
  ACCC_if_present,
  ACCC_independent,
  ACCC_link,
  ACCC_no_create,
  ACCC_nohost,
  ACCC_num_gangs,
  ACCC_num_workers,
  ACCC_present,
  ACCC_private,
  ACCC_read,
  ACCC_reduction,
  ACCC_self,
  ACCC_seq,
  ACCC_tile,
  ACCC_unknown,
  ACCC_use_device,
  ACCC_vector,
  ACCC_vector_length,
  ACCC_wait,
  ACCC_worker,
  ACCC_write,
};

static constexpr std::size_t Clause_enumSize = 45;

constexpr auto ACCC_async = llvm::acc::Clause::ACCC_async;
constexpr auto ACCC_attach = llvm::acc::Clause::ACCC_attach;
constexpr auto ACCC_auto = llvm::acc::Clause::ACCC_auto;
constexpr auto ACCC_bind = llvm::acc::Clause::ACCC_bind;
constexpr auto ACCC_capture = llvm::acc::Clause::ACCC_capture;
constexpr auto ACCC_collapse = llvm::acc::Clause::ACCC_collapse;
constexpr auto ACCC_copy = llvm::acc::Clause::ACCC_copy;
constexpr auto ACCC_copyin = llvm::acc::Clause::ACCC_copyin;
constexpr auto ACCC_copyout = llvm::acc::Clause::ACCC_copyout;
constexpr auto ACCC_create = llvm::acc::Clause::ACCC_create;
constexpr auto ACCC_default = llvm::acc::Clause::ACCC_default;
constexpr auto ACCC_default_async = llvm::acc::Clause::ACCC_default_async;
constexpr auto ACCC_delete = llvm::acc::Clause::ACCC_delete;
constexpr auto ACCC_detach = llvm::acc::Clause::ACCC_detach;
constexpr auto ACCC_device = llvm::acc::Clause::ACCC_device;
constexpr auto ACCC_device_num = llvm::acc::Clause::ACCC_device_num;
constexpr auto ACCC_deviceptr = llvm::acc::Clause::ACCC_deviceptr;
constexpr auto ACCC_device_resident = llvm::acc::Clause::ACCC_device_resident;
constexpr auto ACCC_device_type = llvm::acc::Clause::ACCC_device_type;
constexpr auto ACCC_finalize = llvm::acc::Clause::ACCC_finalize;
constexpr auto ACCC_firstprivate = llvm::acc::Clause::ACCC_firstprivate;
constexpr auto ACCC_gang = llvm::acc::Clause::ACCC_gang;
constexpr auto ACCC_host = llvm::acc::Clause::ACCC_host;
constexpr auto ACCC_if = llvm::acc::Clause::ACCC_if;
constexpr auto ACCC_if_present = llvm::acc::Clause::ACCC_if_present;
constexpr auto ACCC_independent = llvm::acc::Clause::ACCC_independent;
constexpr auto ACCC_link = llvm::acc::Clause::ACCC_link;
constexpr auto ACCC_no_create = llvm::acc::Clause::ACCC_no_create;
constexpr auto ACCC_nohost = llvm::acc::Clause::ACCC_nohost;
constexpr auto ACCC_num_gangs = llvm::acc::Clause::ACCC_num_gangs;
constexpr auto ACCC_num_workers = llvm::acc::Clause::ACCC_num_workers;
constexpr auto ACCC_present = llvm::acc::Clause::ACCC_present;
constexpr auto ACCC_private = llvm::acc::Clause::ACCC_private;
constexpr auto ACCC_read = llvm::acc::Clause::ACCC_read;
constexpr auto ACCC_reduction = llvm::acc::Clause::ACCC_reduction;
constexpr auto ACCC_self = llvm::acc::Clause::ACCC_self;
constexpr auto ACCC_seq = llvm::acc::Clause::ACCC_seq;
constexpr auto ACCC_tile = llvm::acc::Clause::ACCC_tile;
constexpr auto ACCC_unknown = llvm::acc::Clause::ACCC_unknown;
constexpr auto ACCC_use_device = llvm::acc::Clause::ACCC_use_device;
constexpr auto ACCC_vector = llvm::acc::Clause::ACCC_vector;
constexpr auto ACCC_vector_length = llvm::acc::Clause::ACCC_vector_length;
constexpr auto ACCC_wait = llvm::acc::Clause::ACCC_wait;
constexpr auto ACCC_worker = llvm::acc::Clause::ACCC_worker;
constexpr auto ACCC_write = llvm::acc::Clause::ACCC_write;

enum class DefaultValue {
  ACC_Default_present=0,
  ACC_Default_none=1,
};

constexpr auto ACC_Default_present = llvm::acc::DefaultValue::ACC_Default_present;
constexpr auto ACC_Default_none = llvm::acc::DefaultValue::ACC_Default_none;

// Enumeration helper functions
Directive getOpenACCDirectiveKind(llvm::StringRef Str);

llvm::StringRef getOpenACCDirectiveName(Directive D);

Clause getOpenACCClauseKind(llvm::StringRef Str);

llvm::StringRef getOpenACCClauseName(Clause C);

/// Return true if \p C is a valid clause for \p D in version \p Version.
bool isAllowedClauseForDirective(Directive D, Clause C, unsigned Version);

DefaultValue getDefaultValue(StringRef);
llvm::StringRef getOpenACCDefaultValueName(DefaultValue);

} // namespace acc
} // namespace llvm
#endif // LLVM_OpenACC_INC
PKiwFZ␸n�>�>Frontend/OpenACC/ACC.tdnu�[���//===-- ACC.td - OpenACC directive definition file ---------*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This is the definition file for OpenACC 3.3 directives and clauses.
//
//===----------------------------------------------------------------------===//

include "llvm/Frontend/Directive/DirectiveBase.td"

//===----------------------------------------------------------------------===//
// Definition of general OpenACC information
//===----------------------------------------------------------------------===//

def OpenACC : DirectiveLanguage {
  let name = "OpenACC";
  let cppNamespace = "acc"; // final namespace will be llvm::acc
  let directivePrefix = "ACCD_";
  let clausePrefix = "ACCC_";
  let makeEnumAvailableInNamespace = true;
  let enableBitmaskEnumInNamespace = true;
  let clauseEnumSetClass = "AccClauseSet";
  let flangClauseBaseClass = "AccClause";
}

//===----------------------------------------------------------------------===//
// Definition of OpenACC clauses
//===----------------------------------------------------------------------===//

// 2.16.1
def ACCC_Async : Clause<"async"> {
  let flangClass = "ScalarIntExpr";
  let isValueOptional = true;
}

// 2.9.7
def ACCC_Auto : Clause<"auto"> {}

// 2.7.12
def ACCC_Attach : Clause<"attach"> {
  let flangClass = "AccObjectList";
}

// 2.15.1
def ACCC_Bind : Clause<"bind"> {
  let flangClass = "AccBindClause";
}

// 2.12
def ACCC_Capture : Clause<"capture"> {
}

// 2.9.1
def ACCC_Collapse : Clause<"collapse"> {
  let flangClass = "AccCollapseArg";
}

// 2.7.6
def ACCC_Copy : Clause<"copy"> {
  let flangClass = "AccObjectList";
  let aliases = ["present_or_copy", "pcopy"];
}

// 2.7.7
def ACCC_Copyin : Clause<"copyin"> {
  let flangClass = "AccObjectListWithModifier";
  let aliases = ["present_or_copyin", "pcopyin"];
}

// 2.7.8
def ACCC_Copyout : Clause<"copyout"> {
  let flangClass = "AccObjectListWithModifier";
  let aliases = ["present_or_copyout", "pcopyout"];
}

// 2.7.9
def ACCC_Create : Clause<"create"> {
  let flangClass = "AccObjectListWithModifier";
  let aliases = ["present_or_create", "pcreate"];
}

// 2.5.16
def ACC_Default_none : ClauseVal<"none", 1, 1> { let isDefault = 1; }
def ACC_Default_present : ClauseVal<"present", 0, 1> {}

def ACCC_Default : Clause<"default"> {
  let flangClass = "AccDefaultClause";
  let enumClauseValue = "DefaultValue";
  let allowedClauseValues = [
    ACC_Default_present,
    ACC_Default_none
  ];
}

// 2.14.3
def ACCC_DefaultAsync : Clause<"default_async"> {
  let flangClass = "ScalarIntExpr";
}

// 2.7.11
def ACCC_Delete : Clause<"delete"> {
  let flangClass = "AccObjectList";
}

// 2.7.13
def ACCC_Detach : Clause<"detach"> {
  let flangClass = "AccObjectList";
}

// 2.14.4
def ACCC_Device : Clause<"device"> {
  let flangClass = "AccObjectList";
}

// 2.14.1 - 2.14.2
def ACCC_DeviceNum : Clause<"device_num">  {
  let flangClass = "ScalarIntExpr";
}

// 2.7.4
def ACCC_DevicePtr : Clause<"deviceptr"> {
  let flangClass = "AccObjectList";
}

// 2.13.1
def ACCC_DeviceResident : Clause<"device_resident"> {
  let flangClass = "AccObjectList";
}

// 2.4
def ACCC_DeviceType : Clause<"device_type"> {
  let flangClass = "AccDeviceTypeExprList";
  let defaultValue = "*";
  let aliases = ["dtype"];
}

// 2.6.6
def ACCC_Finalize : Clause<"finalize"> {}

// 2.5.14
def ACCC_FirstPrivate : Clause<"firstprivate"> {
  let flangClass = "AccObjectList";
}

// 2.9.2
def ACCC_Gang : Clause<"gang"> {
  let flangClass = "AccGangArgList";
  let isValueOptional = true;
}

// 2.14.4
def ACCC_Host : Clause<"host"> {
  let flangClass = "AccObjectList";
}

// 2.5.6
def ACCC_If : Clause <"if"> {
  let flangClass = "ScalarLogicalExpr";
}

// 2.14.4
def ACCC_IfPresent : Clause<"if_present"> {}

// 2.9.6
def ACCC_Independent : Clause<"independent"> {}

// 2.13.3
def ACCC_Link : Clause<"link"> {
  let flangClass = "AccObjectList";
}

// 2.7.10
def ACCC_NoCreate : Clause<"no_create"> {
  let flangClass = "AccObjectList";
}

// 2.15.1
def ACCC_NoHost : Clause<"nohost"> {}

// 2.5.10
def ACCC_NumGangs : Clause<"num_gangs"> {
  let flangClass = "ScalarIntExpr";
  let isValueList = 1;
}

// 2.5.11
def ACCC_NumWorkers : Clause<"num_workers"> {
  let flangClass = "ScalarIntExpr";
}

// 2.7.5
def ACCC_Present : Clause<"present"> {
  let flangClass = "AccObjectList";
}

// 2.5.13
def ACCC_Private : Clause<"private"> {
  let flangClass = "AccObjectList";
}

// 2.9.8
def ACCC_Tile : Clause <"tile"> {
  let flangClass = "AccTileExprList";
}

// 2.8.1
def ACCC_UseDevice : Clause <"use_device"> {
  let flangClass = "AccObjectList";
}

// 2.12
def ACCC_Read : Clause<"read"> {}

// 2.5.15
def ACCC_Reduction : Clause<"reduction"> {
  let flangClass = "AccObjectListWithReduction";
}

// 2.5.7
def ACCC_Self : Clause<"self"> {
  let flangClass = "AccSelfClause";
  let isValueOptional = true;
}

// 2.9.5
def ACCC_Seq : Clause<"seq"> {}

// 2.9.4
def ACCC_Vector : Clause<"vector"> {
  let flangClass = "ScalarIntExpr";
  let isValueOptional = true;
  let prefix = "length";
}

// 2.5.12
def ACCC_VectorLength : Clause<"vector_length"> {
  let flangClass = "ScalarIntExpr";
}

// 2.16.2
def ACCC_Wait : Clause<"wait"> {
  let flangClass = "AccWaitArgument";
  let isValueOptional = true;
}

// 2.9.3
def ACCC_Worker: Clause<"worker"> {
  let flangClass = "ScalarIntExpr";
  let isValueOptional = true;
  let prefix = "num";
}

// 2.12
def ACCC_Write : Clause<"write"> {}

def ACCC_Unknown : Clause<"unknown"> {
  let isDefault = true;
}

//===----------------------------------------------------------------------===//
// Definition of OpenACC directives
//===----------------------------------------------------------------------===//

// 2.12
def ACC_Atomic : Directive<"atomic"> {}

// 2.6.5
def ACC_Data : Directive<"data"> {
  let allowedOnceClauses = [
    VersionedClause<ACCC_Async, 32>,
    VersionedClause<ACCC_If>,
    VersionedClause<ACCC_Default>
  ];
  let allowedClauses = [
    VersionedClause<ACCC_DeviceType, 32>,
    VersionedClause<ACCC_Wait, 32>
  ];
  let requiredClauses = [
    VersionedClause<ACCC_Attach>,
    VersionedClause<ACCC_Copy>,
    VersionedClause<ACCC_Copyin>,
    VersionedClause<ACCC_Copyout>,
    VersionedClause<ACCC_Create>,
    VersionedClause<ACCC_Default>,
    VersionedClause<ACCC_DevicePtr>,
    VersionedClause<ACCC_NoCreate>,
    VersionedClause<ACCC_Present>
  ];
}

// 2.13
def ACC_Declare : Directive<"declare"> {
  let allowedClauses = [
    VersionedClause<ACCC_Copy>,
    VersionedClause<ACCC_Copyin>,
    VersionedClause<ACCC_Copyout>,
    VersionedClause<ACCC_Create>,
    VersionedClause<ACCC_Present>,
    VersionedClause<ACCC_DevicePtr>,
    VersionedClause<ACCC_DeviceResident>,
    VersionedClause<ACCC_Link>
  ];
}

// 2.5.3
def ACC_Kernels : Directive<"kernels"> {
  let allowedClauses = [
    VersionedClause<ACCC_Attach>,
    VersionedClause<ACCC_Copy>,
    VersionedClause<ACCC_Copyin>,
    VersionedClause<ACCC_Copyout>,
    VersionedClause<ACCC_Create>,
    VersionedClause<ACCC_DeviceType>,
    VersionedClause<ACCC_NoCreate>,
    VersionedClause<ACCC_Present>,
    VersionedClause<ACCC_DevicePtr>,
    VersionedClause<ACCC_Wait>
  ];
  let allowedOnceClauses = [
    VersionedClause<ACCC_Async>,
    VersionedClause<ACCC_Default>,
    VersionedClause<ACCC_If>,
    VersionedClause<ACCC_NumGangs>,
    VersionedClause<ACCC_NumWorkers>,
    VersionedClause<ACCC_Self>,
    VersionedClause<ACCC_VectorLength>
  ];
}

// 2.5.1
def ACC_Parallel : Directive<"parallel"> {
  let allowedClauses = [
    VersionedClause<ACCC_Attach>,
    VersionedClause<ACCC_Copy>,
    VersionedClause<ACCC_Copyin>,
    VersionedClause<ACCC_Copyout>,
    VersionedClause<ACCC_Create>,
    VersionedClause<ACCC_DevicePtr>,
    VersionedClause<ACCC_DeviceType>,
    VersionedClause<ACCC_NoCreate>,
    VersionedClause<ACCC_Present>,
    VersionedClause<ACCC_Private>,
    VersionedClause<ACCC_FirstPrivate>,
    VersionedClause<ACCC_Reduction>,
    VersionedClause<ACCC_Wait>
  ];
  let allowedOnceClauses = [
    VersionedClause<ACCC_Async>,
    VersionedClause<ACCC_Default>,
    VersionedClause<ACCC_If>,
    VersionedClause<ACCC_NumGangs>,
    VersionedClause<ACCC_NumWorkers>,
    VersionedClause<ACCC_Self>,
    VersionedClause<ACCC_VectorLength>
  ];
}

// 2.5.2
def ACC_Serial : Directive<"serial"> {
  // Spec line 950-951: clause is as for the parallel construct except that the
  // num_gangs, num_workers, and vector_length clauses are not permitted.
  let allowedClauses = [
    VersionedClause<ACCC_Attach>,
    VersionedClause<ACCC_Copy>,
    VersionedClause<ACCC_Copyin>,
    VersionedClause<ACCC_Copyout>,
    VersionedClause<ACCC_Create>,
    VersionedClause<ACCC_DevicePtr>,
    VersionedClause<ACCC_DeviceType>,
    VersionedClause<ACCC_NoCreate>,
    VersionedClause<ACCC_Present>,
    VersionedClause<ACCC_Private>,
    VersionedClause<ACCC_FirstPrivate>,
    VersionedClause<ACCC_Reduction>,
    VersionedClause<ACCC_Wait>
  ];
  let allowedOnceClauses = [
    VersionedClause<ACCC_Async>,
    VersionedClause<ACCC_Default>,
    VersionedClause<ACCC_If>,
    VersionedClause<ACCC_Self>
  ];
}

// 2.9
def ACC_Loop : Directive<"loop"> {
  let allowedClauses = [
    VersionedClause<ACCC_DeviceType>,
    VersionedClause<ACCC_Private>,
    VersionedClause<ACCC_Reduction>
  ];
  let allowedOnceClauses = [
    VersionedClause<ACCC_Collapse>,
    VersionedClause<ACCC_Gang>,
    VersionedClause<ACCC_Tile>,
    VersionedClause<ACCC_Vector>,
    VersionedClause<ACCC_Worker>
  ];
  let allowedExclusiveClauses = [
    VersionedClause<ACCC_Auto>,
    VersionedClause<ACCC_Independent>,
    VersionedClause<ACCC_Seq>
  ];
}

// 2.10
def ACC_Cache : Directive<"cache"> {}

// 2.14.1
def ACC_Init : Directive<"init"> {
  let allowedOnceClauses = [
    VersionedClause<ACCC_DeviceNum>,
    VersionedClause<ACCC_DeviceType>,
    VersionedClause<ACCC_If>
  ];
}

// 2.15.1
def ACC_Routine : Directive<"routine"> {
  let allowedOnceClauses = [
    VersionedClause<ACCC_Bind>,
    VersionedClause<ACCC_DeviceType>,
    VersionedClause<ACCC_NoHost>,
    VersionedClause<ACCC_Gang>,
    VersionedClause<ACCC_Seq>,
    VersionedClause<ACCC_Vector>,
    VersionedClause<ACCC_Worker>
  ];
}

// 2.14.3
def ACC_Set : Directive<"set"> {
  let allowedOnceClauses = [
    VersionedClause<ACCC_DefaultAsync>,
    VersionedClause<ACCC_DeviceNum>,
    VersionedClause<ACCC_DeviceType>,
    VersionedClause<ACCC_If>
  ];
  let requiredClauses = [
    // The three following clauses are also in allowedOnceClauses list due to
    // restriction 2255 - Two instances of the same clause may not appear on the
    // same directive.
    VersionedClause<ACCC_DefaultAsync>,
    VersionedClause<ACCC_DeviceNum>,
    VersionedClause<ACCC_DeviceType>
  ];
}

// 2.14.2
def ACC_Shutdown : Directive<"shutdown"> {
  let allowedOnceClauses = [
    VersionedClause<ACCC_DeviceNum>,
    VersionedClause<ACCC_DeviceType>,
    VersionedClause<ACCC_If>
  ];
}

// 2.14.4
def ACC_Update : Directive<"update"> {
  let allowedClauses = [
    VersionedClause<ACCC_DeviceType>,
    VersionedClause<ACCC_Wait>
  ];
  let allowedOnceClauses = [
    VersionedClause<ACCC_Async>,
    VersionedClause<ACCC_If>,
    VersionedClause<ACCC_IfPresent>
  ];
  let requiredClauses = [
    VersionedClause<ACCC_Device>,
    VersionedClause<ACCC_Host>,
    VersionedClause<ACCC_Self>
  ];
}

// 2.16.3
def ACC_Wait : Directive<"wait"> {
  let allowedOnceClauses = [
    VersionedClause<ACCC_Async>,
    VersionedClause<ACCC_If>
  ];
}

// 2.14.6
def ACC_EnterData : Directive<"enter data"> {
  let allowedClauses = [
    VersionedClause<ACCC_Wait>
  ];
  let allowedOnceClauses = [
    VersionedClause<ACCC_Async>,
    VersionedClause<ACCC_If>
  ];
  let requiredClauses = [
    VersionedClause<ACCC_Attach>,
    VersionedClause<ACCC_Create>,
    VersionedClause<ACCC_Copyin>
  ];
}

// 2.14.7
def ACC_ExitData : Directive<"exit data"> {
  let allowedClauses = [
    VersionedClause<ACCC_Wait>
  ];
  let allowedOnceClauses = [
    VersionedClause<ACCC_Async>,
    VersionedClause<ACCC_If>,
    VersionedClause<ACCC_Finalize>
  ];
  let requiredClauses = [
    VersionedClause<ACCC_Copyout>,
    VersionedClause<ACCC_Delete>,
    VersionedClause<ACCC_Detach>
  ];
}

// 2.8
def ACC_HostData : Directive<"host_data"> {
  let allowedOnceClauses = [
    VersionedClause<ACCC_If>,
    VersionedClause<ACCC_IfPresent>
  ];
  let requiredClauses = [
    VersionedClause<ACCC_UseDevice>
  ];
}

// 2.11
def ACC_KernelsLoop : Directive<"kernels loop"> {
  let allowedClauses = [
    VersionedClause<ACCC_Copy>,
    VersionedClause<ACCC_Copyin>,
    VersionedClause<ACCC_Copyout>,
    VersionedClause<ACCC_Create>,
    VersionedClause<ACCC_DeviceType>,
    VersionedClause<ACCC_NoCreate>,
    VersionedClause<ACCC_Present>,
    VersionedClause<ACCC_Private>,
    VersionedClause<ACCC_Reduction>,
    VersionedClause<ACCC_DevicePtr>,
    VersionedClause<ACCC_Attach>,
    VersionedClause<ACCC_Wait>
  ];
  let allowedOnceClauses = [
    VersionedClause<ACCC_Async>,
    VersionedClause<ACCC_Collapse>,
    VersionedClause<ACCC_Default>,
    VersionedClause<ACCC_Gang>,
    VersionedClause<ACCC_If>,
    VersionedClause<ACCC_NumGangs>,
    VersionedClause<ACCC_NumWorkers>,
    VersionedClause<ACCC_Self>,
    VersionedClause<ACCC_Tile>,
    VersionedClause<ACCC_Vector>,
    VersionedClause<ACCC_VectorLength>,
    VersionedClause<ACCC_Worker>
  ];
  let allowedExclusiveClauses = [
    VersionedClause<ACCC_Auto>,
    VersionedClause<ACCC_Independent>,
    VersionedClause<ACCC_Seq>
  ];
}

// 2.11
def ACC_ParallelLoop : Directive<"parallel loop"> {
  let allowedClauses = [
    VersionedClause<ACCC_Attach>,
    VersionedClause<ACCC_Copy>,
    VersionedClause<ACCC_Copyin>,
    VersionedClause<ACCC_Copyout>,
    VersionedClause<ACCC_Create>,
    VersionedClause<ACCC_DevicePtr>,
    VersionedClause<ACCC_DeviceType>,
    VersionedClause<ACCC_FirstPrivate>,
    VersionedClause<ACCC_NoCreate>,
    VersionedClause<ACCC_Present>,
    VersionedClause<ACCC_Private>,
    VersionedClause<ACCC_Reduction>,
    VersionedClause<ACCC_Tile>,
    VersionedClause<ACCC_Wait>
  ];
  let allowedOnceClauses = [
    VersionedClause<ACCC_Async>,
    VersionedClause<ACCC_Collapse>,
    VersionedClause<ACCC_Default>,
    VersionedClause<ACCC_Gang>,
    VersionedClause<ACCC_If>,
    VersionedClause<ACCC_NumGangs>,
    VersionedClause<ACCC_NumWorkers>,
    VersionedClause<ACCC_Self>,
    VersionedClause<ACCC_Vector>,
    VersionedClause<ACCC_VectorLength>,
    VersionedClause<ACCC_Worker>
  ];
  let allowedExclusiveClauses = [
    VersionedClause<ACCC_Auto>,
    VersionedClause<ACCC_Independent>,
    VersionedClause<ACCC_Seq>
  ];
}

// 2.11
def ACC_SerialLoop : Directive<"serial loop"> {
  let allowedClauses = [
    VersionedClause<ACCC_Attach>,
    VersionedClause<ACCC_Copy>,
    VersionedClause<ACCC_Copyin>,
    VersionedClause<ACCC_Copyout>,
    VersionedClause<ACCC_Create>,
    VersionedClause<ACCC_DevicePtr>,
    VersionedClause<ACCC_DeviceType>,
    VersionedClause<ACCC_FirstPrivate>,
    VersionedClause<ACCC_NoCreate>,
    VersionedClause<ACCC_Present>,
    VersionedClause<ACCC_Private>,
    VersionedClause<ACCC_Reduction>,
    VersionedClause<ACCC_Wait>
  ];
  let allowedOnceClauses = [
    VersionedClause<ACCC_Async>,
    VersionedClause<ACCC_Collapse>,
    VersionedClause<ACCC_Default>,
    VersionedClause<ACCC_Gang>,
    VersionedClause<ACCC_If>,
    VersionedClause<ACCC_Self>,
    VersionedClause<ACCC_Tile>,
    VersionedClause<ACCC_Vector>,
    VersionedClause<ACCC_Worker>
  ];
  let allowedExclusiveClauses = [
    VersionedClause<ACCC_Auto>,
    VersionedClause<ACCC_Independent>,
    VersionedClause<ACCC_Seq>
  ];
}

def ACC_Unknown : Directive<"unknown"> {
  let isDefault = true;
}
PKiwFZa:-Frontend/OpenMP/OMPGridValues.hnu�[���//====--- OMPGridValues.h - Language-specific address spaces --*- C++ -*-====//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// \brief Provides definitions for Target specific Grid Values
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_FRONTEND_OPENMP_OMPGRIDVALUES_H
#define LLVM_FRONTEND_OPENMP_OMPGRIDVALUES_H

namespace llvm {

namespace omp {

/// \brief Defines various target-specific GPU grid values that must be
///        consistent between host RTL (plugin), device RTL, and clang.
///        We can change grid values for a "fat" binary so that different
///        passes get the correct values when generating code for a
///        multi-target binary. Both amdgcn and nvptx values are stored in
///        this file. In the future, should there be differences between GPUs
///        of the same architecture, then simply make a different array and
///        use the new array name.
///
/// Example usage in clang:
///   const unsigned slot_size =
///   ctx.GetTargetInfo().getGridValue().GV_Warp_Size;
///
/// Example usage in libomptarget/deviceRTLs:
///   #include "llvm/Frontend/OpenMP/OMPGridValues.h"
///   #ifdef __AMDGPU__
///     #define GRIDVAL AMDGPUGridValues
///   #else
///     #define GRIDVAL NVPTXGridValues
///   #endif
///   ... Then use this reference for GV_Warp_Size in the deviceRTL source.
///   llvm::omp::GRIDVAL().GV_Warp_Size
///
/// Example usage in libomptarget hsa plugin:
///   #include "llvm/Frontend/OpenMP/OMPGridValues.h"
///   #define GRIDVAL AMDGPUGridValues
///   ... Then use this reference to access GV_Warp_Size in the hsa plugin.
///   llvm::omp::GRIDVAL().GV_Warp_Size
///
/// Example usage in libomptarget cuda plugin:
///    #include "llvm/Frontend/OpenMP/OMPGridValues.h"
///    #define GRIDVAL NVPTXGridValues
///   ... Then use this reference to access GV_Warp_Size in the cuda plugin.
///    llvm::omp::GRIDVAL().GV_Warp_Size
///

struct GV {
  /// The size reserved for data in a shared memory slot.
  unsigned GV_Slot_Size;
  /// The default value of maximum number of threads in a worker warp.
  unsigned GV_Warp_Size;

  constexpr unsigned warpSlotSize() const {
    return GV_Warp_Size * GV_Slot_Size;
  }

  /// the maximum number of teams.
  unsigned GV_Max_Teams;
  // The default number of teams in the absence of any other information.
  unsigned GV_Default_Num_Teams;

  // An alternative to the heavy data sharing infrastructure that uses global
  // memory is one that uses device __shared__ memory.  The amount of such space
  // (in bytes) reserved by the OpenMP runtime is noted here.
  unsigned GV_SimpleBufferSize;
  // The absolute maximum team size for a working group
  unsigned GV_Max_WG_Size;
  // The default maximum team size for a working group
  unsigned GV_Default_WG_Size;

  constexpr unsigned maxWarpNumber() const {
    return GV_Max_WG_Size / GV_Warp_Size;
  }
};

/// For AMDGPU GPUs
static constexpr GV AMDGPUGridValues64 = {
    256,       // GV_Slot_Size
    64,        // GV_Warp_Size
    (1 << 16), // GV_Max_Teams
    440,       // GV_Default_Num_Teams
    896,       // GV_SimpleBufferSize
    1024,      // GV_Max_WG_Size,
    256,       // GV_Default_WG_Size
};

static constexpr GV AMDGPUGridValues32 = {
    256,       // GV_Slot_Size
    32,        // GV_Warp_Size
    (1 << 16), // GV_Max_Teams
    440,       // GV_Default_Num_Teams
    896,       // GV_SimpleBufferSize
    1024,      // GV_Max_WG_Size,
    256,       // GV_Default_WG_Size
};

template <unsigned wavesize> constexpr const GV &getAMDGPUGridValues() {
  static_assert(wavesize == 32 || wavesize == 64, "Unexpected wavesize");
  return wavesize == 32 ? AMDGPUGridValues32 : AMDGPUGridValues64;
}

/// For Nvidia GPUs
static constexpr GV NVPTXGridValues = {
    256,       // GV_Slot_Size
    32,        // GV_Warp_Size
    (1 << 16), // GV_Max_Teams
    3200,      // GV_Default_Num_Teams
    896,       // GV_SimpleBufferSize
    1024,      // GV_Max_WG_Size
    128,       // GV_Default_WG_Size
};

} // namespace omp
} // namespace llvm

#endif // LLVM_FRONTEND_OPENMP_OMPGRIDVALUES_H
PKiwFZ�j��@�@�Frontend/OpenMP/OMPIRBuilder.hnu�[���//===- IR/OpenMPIRBuilder.h - OpenMP encoding builder for LLVM IR - C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the OpenMPIRBuilder class and helpers used as a convenient
// way to create LLVM instructions for OpenMP directives.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H
#define LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H

#include "llvm/Analysis/MemorySSAUpdater.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/Support/Allocator.h"
#include <forward_list>
#include <map>
#include <optional>

namespace llvm {
class CanonicalLoopInfo;
struct TargetRegionEntryInfo;
class OffloadEntriesInfoManager;
class OpenMPIRBuilder;

/// Move the instruction after an InsertPoint to the beginning of another
/// BasicBlock.
///
/// The instructions after \p IP are moved to the beginning of \p New which must
/// not have any PHINodes. If \p CreateBranch is true, a branch instruction to
/// \p New will be added such that there is no semantic change. Otherwise, the
/// \p IP insert block remains degenerate and it is up to the caller to insert a
/// terminator.
void spliceBB(IRBuilderBase::InsertPoint IP, BasicBlock *New,
              bool CreateBranch);

/// Splice a BasicBlock at an IRBuilder's current insertion point. Its new
/// insert location will stick to after the instruction before the insertion
/// point (instead of moving with the instruction the InsertPoint stores
/// internally).
void spliceBB(IRBuilder<> &Builder, BasicBlock *New, bool CreateBranch);

/// Split a BasicBlock at an InsertPoint, even if the block is degenerate
/// (missing the terminator).
///
/// llvm::SplitBasicBlock and BasicBlock::splitBasicBlock require a well-formed
/// BasicBlock. \p Name is used for the new successor block. If \p CreateBranch
/// is true, a branch to the new successor will new created such that
/// semantically there is no change; otherwise the block of the insertion point
/// remains degenerate and it is the caller's responsibility to insert a
/// terminator. Returns the new successor block.
BasicBlock *splitBB(IRBuilderBase::InsertPoint IP, bool CreateBranch,
                    llvm::Twine Name = {});

/// Split a BasicBlock at \p Builder's insertion point, even if the block is
/// degenerate (missing the terminator).  Its new insert location will stick to
/// after the instruction before the insertion point (instead of moving with the
/// instruction the InsertPoint stores internally).
BasicBlock *splitBB(IRBuilderBase &Builder, bool CreateBranch,
                    llvm::Twine Name = {});

/// Split a BasicBlock at \p Builder's insertion point, even if the block is
/// degenerate (missing the terminator).  Its new insert location will stick to
/// after the instruction before the insertion point (instead of moving with the
/// instruction the InsertPoint stores internally).
BasicBlock *splitBB(IRBuilder<> &Builder, bool CreateBranch, llvm::Twine Name);

/// Like splitBB, but reuses the current block's name for the new name.
BasicBlock *splitBBWithSuffix(IRBuilderBase &Builder, bool CreateBranch,
                              llvm::Twine Suffix = ".split");

/// Captures attributes that affect generating LLVM-IR using the
/// OpenMPIRBuilder and related classes. Note that not all attributes are
/// required for all classes or functions. In some use cases the configuration
/// is not necessary at all, because because the only functions that are called
/// are ones that are not dependent on the configuration.
class OpenMPIRBuilderConfig {
public:
  /// Flag for specifying if the compilation is done for embedded device code
  /// or host code.
  std::optional<bool> IsTargetDevice;

  /// Flag for specifying if the compilation is done for an accelerator.
  std::optional<bool> IsGPU;

  /// Flag for specifying weather a requires unified_shared_memory
  /// directive is present or not.
  std::optional<bool> HasRequiresUnifiedSharedMemory;

  // Flag for specifying if offloading is mandatory.
  std::optional<bool> OpenMPOffloadMandatory;

  /// First separator used between the initial two parts of a name.
  std::optional<StringRef> FirstSeparator;
  /// Separator used between all of the rest consecutive parts of s name
  std::optional<StringRef> Separator;

  OpenMPIRBuilderConfig() {}
  OpenMPIRBuilderConfig(bool IsTargetDevice, bool IsGPU,
                        bool HasRequiresUnifiedSharedMemory,
                        bool OpenMPOffloadMandatory)
      : IsTargetDevice(IsTargetDevice), IsGPU(IsGPU),
        HasRequiresUnifiedSharedMemory(HasRequiresUnifiedSharedMemory),
        OpenMPOffloadMandatory(OpenMPOffloadMandatory) {}

  // Getters functions that assert if the required values are not present.
  bool isTargetDevice() const {
    assert(IsTargetDevice.has_value() && "IsTargetDevice is not set");
    return *IsTargetDevice;
  }

  bool isGPU() const {
    assert(IsGPU.has_value() && "IsGPU is not set");
    return *IsGPU;
  }

  bool hasRequiresUnifiedSharedMemory() const {
    assert(HasRequiresUnifiedSharedMemory.has_value() &&
           "HasUnifiedSharedMemory is not set");
    return *HasRequiresUnifiedSharedMemory;
  }

  bool openMPOffloadMandatory() const {
    assert(OpenMPOffloadMandatory.has_value() &&
           "OpenMPOffloadMandatory is not set");
    return *OpenMPOffloadMandatory;
  }
  // Returns the FirstSeparator if set, otherwise use the default separator
  // depending on isGPU
  StringRef firstSeparator() const {
    if (FirstSeparator.has_value())
      return *FirstSeparator;
    if (isGPU())
      return "_";
    return ".";
  }

  // Returns the Separator if set, otherwise use the default separator depending
  // on isGPU
  StringRef separator() const {
    if (Separator.has_value())
      return *Separator;
    if (isGPU())
      return "$";
    return ".";
  }

  void setIsTargetDevice(bool Value) { IsTargetDevice = Value; }
  void setIsGPU(bool Value) { IsGPU = Value; }
  void setHasRequiresUnifiedSharedMemory(bool Value) {
    HasRequiresUnifiedSharedMemory = Value;
  }
  void setFirstSeparator(StringRef FS) { FirstSeparator = FS; }
  void setSeparator(StringRef S) { Separator = S; }
};

/// Data structure to contain the information needed to uniquely identify
/// a target entry.
struct TargetRegionEntryInfo {
  std::string ParentName;
  unsigned DeviceID;
  unsigned FileID;
  unsigned Line;
  unsigned Count;

  TargetRegionEntryInfo() : DeviceID(0), FileID(0), Line(0), Count(0) {}
  TargetRegionEntryInfo(StringRef ParentName, unsigned DeviceID,
                        unsigned FileID, unsigned Line, unsigned Count = 0)
      : ParentName(ParentName), DeviceID(DeviceID), FileID(FileID), Line(Line),
        Count(Count) {}

  static void getTargetRegionEntryFnName(SmallVectorImpl<char> &Name,
                                         StringRef ParentName,
                                         unsigned DeviceID, unsigned FileID,
                                         unsigned Line, unsigned Count);

  bool operator<(const TargetRegionEntryInfo RHS) const {
    return std::make_tuple(ParentName, DeviceID, FileID, Line, Count) <
           std::make_tuple(RHS.ParentName, RHS.DeviceID, RHS.FileID, RHS.Line,
                           RHS.Count);
  }
};

/// Class that manages information about offload code regions and data
class OffloadEntriesInfoManager {
  /// Number of entries registered so far.
  OpenMPIRBuilder *OMPBuilder;
  unsigned OffloadingEntriesNum = 0;

public:
  /// Base class of the entries info.
  class OffloadEntryInfo {
  public:
    /// Kind of a given entry.
    enum OffloadingEntryInfoKinds : unsigned {
      /// Entry is a target region.
      OffloadingEntryInfoTargetRegion = 0,
      /// Entry is a declare target variable.
      OffloadingEntryInfoDeviceGlobalVar = 1,
      /// Invalid entry info.
      OffloadingEntryInfoInvalid = ~0u
    };

  protected:
    OffloadEntryInfo() = delete;
    explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind) : Kind(Kind) {}
    explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind, unsigned Order,
                              uint32_t Flags)
        : Flags(Flags), Order(Order), Kind(Kind) {}
    ~OffloadEntryInfo() = default;

  public:
    bool isValid() const { return Order != ~0u; }
    unsigned getOrder() const { return Order; }
    OffloadingEntryInfoKinds getKind() const { return Kind; }
    uint32_t getFlags() const { return Flags; }
    void setFlags(uint32_t NewFlags) { Flags = NewFlags; }
    Constant *getAddress() const { return cast_or_null<Constant>(Addr); }
    void setAddress(Constant *V) {
      assert(!Addr.pointsToAliveValue() && "Address has been set before!");
      Addr = V;
    }
    static bool classof(const OffloadEntryInfo *Info) { return true; }

  private:
    /// Address of the entity that has to be mapped for offloading.
    WeakTrackingVH Addr;

    /// Flags associated with the device global.
    uint32_t Flags = 0u;

    /// Order this entry was emitted.
    unsigned Order = ~0u;

    OffloadingEntryInfoKinds Kind = OffloadingEntryInfoInvalid;
  };

  /// Return true if a there are no entries defined.
  bool empty() const;
  /// Return number of entries defined so far.
  unsigned size() const { return OffloadingEntriesNum; }

  OffloadEntriesInfoManager(OpenMPIRBuilder *builder) : OMPBuilder(builder) {}

  //
  // Target region entries related.
  //

  /// Kind of the target registry entry.
  enum OMPTargetRegionEntryKind : uint32_t {
    /// Mark the entry as target region.
    OMPTargetRegionEntryTargetRegion = 0x0,
    /// Mark the entry as a global constructor.
    OMPTargetRegionEntryCtor = 0x02,
    /// Mark the entry as a global destructor.
    OMPTargetRegionEntryDtor = 0x04,
  };

  /// Target region entries info.
  class OffloadEntryInfoTargetRegion final : public OffloadEntryInfo {
    /// Address that can be used as the ID of the entry.
    Constant *ID = nullptr;

  public:
    OffloadEntryInfoTargetRegion()
        : OffloadEntryInfo(OffloadingEntryInfoTargetRegion) {}
    explicit OffloadEntryInfoTargetRegion(unsigned Order, Constant *Addr,
                                          Constant *ID,
                                          OMPTargetRegionEntryKind Flags)
        : OffloadEntryInfo(OffloadingEntryInfoTargetRegion, Order, Flags),
          ID(ID) {
      setAddress(Addr);
    }

    Constant *getID() const { return ID; }
    void setID(Constant *V) {
      assert(!ID && "ID has been set before!");
      ID = V;
    }
    static bool classof(const OffloadEntryInfo *Info) {
      return Info->getKind() == OffloadingEntryInfoTargetRegion;
    }
  };

  /// Initialize target region entry.
  /// This is ONLY needed for DEVICE compilation.
  void initializeTargetRegionEntryInfo(const TargetRegionEntryInfo &EntryInfo,
                                       unsigned Order);
  /// Register target region entry.
  void registerTargetRegionEntryInfo(TargetRegionEntryInfo EntryInfo,
                                     Constant *Addr, Constant *ID,
                                     OMPTargetRegionEntryKind Flags);
  /// Return true if a target region entry with the provided information
  /// exists.
  bool hasTargetRegionEntryInfo(TargetRegionEntryInfo EntryInfo,
                                bool IgnoreAddressId = false) const;

  // Return the Name based on \a EntryInfo using the next available Count.
  void getTargetRegionEntryFnName(SmallVectorImpl<char> &Name,
                                  const TargetRegionEntryInfo &EntryInfo);

  /// brief Applies action \a Action on all registered entries.
  typedef function_ref<void(const TargetRegionEntryInfo &EntryInfo,
                            const OffloadEntryInfoTargetRegion &)>
      OffloadTargetRegionEntryInfoActTy;
  void
  actOnTargetRegionEntriesInfo(const OffloadTargetRegionEntryInfoActTy &Action);

  //
  // Device global variable entries related.
  //

  /// Kind of the global variable entry..
  enum OMPTargetGlobalVarEntryKind : uint32_t {
    /// Mark the entry as a to declare target.
    OMPTargetGlobalVarEntryTo = 0x0,
    /// Mark the entry as a to declare target link.
    OMPTargetGlobalVarEntryLink = 0x1,
    /// Mark the entry as a declare target enter.
    OMPTargetGlobalVarEntryEnter = 0x2,
    /// Mark the entry as having no declare target entry kind.
    OMPTargetGlobalVarEntryNone = 0x3,
  };

  /// Kind of device clause for declare target variables
  /// and functions
  /// NOTE: Currently not used as a part of a variable entry
  /// used for Flang and Clang to interface with the variable
  /// related registration functions
  enum OMPTargetDeviceClauseKind : uint32_t {
    /// The target is marked for all devices
    OMPTargetDeviceClauseAny = 0x0,
    /// The target is marked for non-host devices
    OMPTargetDeviceClauseNoHost = 0x1,
    /// The target is marked for host devices
    OMPTargetDeviceClauseHost = 0x2,
    /// The target is marked as having no clause
    OMPTargetDeviceClauseNone = 0x3
  };

  /// Device global variable entries info.
  class OffloadEntryInfoDeviceGlobalVar final : public OffloadEntryInfo {
    /// Type of the global variable.
    int64_t VarSize;
    GlobalValue::LinkageTypes Linkage;

  public:
    OffloadEntryInfoDeviceGlobalVar()
        : OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar) {}
    explicit OffloadEntryInfoDeviceGlobalVar(unsigned Order,
                                             OMPTargetGlobalVarEntryKind Flags)
        : OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar, Order, Flags) {}
    explicit OffloadEntryInfoDeviceGlobalVar(unsigned Order, Constant *Addr,
                                             int64_t VarSize,
                                             OMPTargetGlobalVarEntryKind Flags,
                                             GlobalValue::LinkageTypes Linkage)
        : OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar, Order, Flags),
          VarSize(VarSize), Linkage(Linkage) {
      setAddress(Addr);
    }

    int64_t getVarSize() const { return VarSize; }
    void setVarSize(int64_t Size) { VarSize = Size; }
    GlobalValue::LinkageTypes getLinkage() const { return Linkage; }
    void setLinkage(GlobalValue::LinkageTypes LT) { Linkage = LT; }
    static bool classof(const OffloadEntryInfo *Info) {
      return Info->getKind() == OffloadingEntryInfoDeviceGlobalVar;
    }
  };

  /// Initialize device global variable entry.
  /// This is ONLY used for DEVICE compilation.
  void initializeDeviceGlobalVarEntryInfo(StringRef Name,
                                          OMPTargetGlobalVarEntryKind Flags,
                                          unsigned Order);

  /// Register device global variable entry.
  void registerDeviceGlobalVarEntryInfo(StringRef VarName, Constant *Addr,
                                        int64_t VarSize,
                                        OMPTargetGlobalVarEntryKind Flags,
                                        GlobalValue::LinkageTypes Linkage);
  /// Checks if the variable with the given name has been registered already.
  bool hasDeviceGlobalVarEntryInfo(StringRef VarName) const {
    return OffloadEntriesDeviceGlobalVar.count(VarName) > 0;
  }
  /// Applies action \a Action on all registered entries.
  typedef function_ref<void(StringRef, const OffloadEntryInfoDeviceGlobalVar &)>
      OffloadDeviceGlobalVarEntryInfoActTy;
  void actOnDeviceGlobalVarEntriesInfo(
      const OffloadDeviceGlobalVarEntryInfoActTy &Action);

private:
  /// Return the count of entries at a particular source location.
  unsigned
  getTargetRegionEntryInfoCount(const TargetRegionEntryInfo &EntryInfo) const;

  /// Update the count of entries at a particular source location.
  void
  incrementTargetRegionEntryInfoCount(const TargetRegionEntryInfo &EntryInfo);

  static TargetRegionEntryInfo
  getTargetRegionEntryCountKey(const TargetRegionEntryInfo &EntryInfo) {
    return TargetRegionEntryInfo(EntryInfo.ParentName, EntryInfo.DeviceID,
                                 EntryInfo.FileID, EntryInfo.Line, 0);
  }

  // Count of entries at a location.
  std::map<TargetRegionEntryInfo, unsigned> OffloadEntriesTargetRegionCount;

  // Storage for target region entries kind.
  typedef std::map<TargetRegionEntryInfo, OffloadEntryInfoTargetRegion>
      OffloadEntriesTargetRegionTy;
  OffloadEntriesTargetRegionTy OffloadEntriesTargetRegion;
  /// Storage for device global variable entries kind. The storage is to be
  /// indexed by mangled name.
  typedef StringMap<OffloadEntryInfoDeviceGlobalVar>
      OffloadEntriesDeviceGlobalVarTy;
  OffloadEntriesDeviceGlobalVarTy OffloadEntriesDeviceGlobalVar;
};

/// An interface to create LLVM-IR for OpenMP directives.
///
/// Each OpenMP directive has a corresponding public generator method.
class OpenMPIRBuilder {
public:
  /// Create a new OpenMPIRBuilder operating on the given module \p M. This will
  /// not have an effect on \p M (see initialize)
  OpenMPIRBuilder(Module &M)
      : M(M), Builder(M.getContext()), OffloadInfoManager(this) {}
  ~OpenMPIRBuilder();

  /// Initialize the internal state, this will put structures types and
  /// potentially other helpers into the underlying module. Must be called
  /// before any other method and only once! This internal state includes
  /// Types used in the OpenMPIRBuilder generated from OMPKinds.def as well
  /// as loading offload metadata for device from the OpenMP host IR file
  /// passed in as the HostFilePath argument.
  /// \param HostFilePath The path to the host IR file, used to load in
  /// offload metadata for the device, allowing host and device to
  /// maintain the same metadata mapping.
  void initialize(StringRef HostFilePath = {});

  void setConfig(OpenMPIRBuilderConfig C) { Config = C; }

  /// Finalize the underlying module, e.g., by outlining regions.
  /// \param Fn                    The function to be finalized. If not used,
  ///                              all functions are finalized.
  void finalize(Function *Fn = nullptr);

  /// Add attributes known for \p FnID to \p Fn.
  void addAttributes(omp::RuntimeFunction FnID, Function &Fn);

  /// Type used throughout for insertion points.
  using InsertPointTy = IRBuilder<>::InsertPoint;

  /// Get the create a name using the platform specific separators.
  /// \param Parts parts of the final name that needs separation
  /// The created name has a first separator between the first and second part
  /// and a second separator between all other parts.
  /// E.g. with FirstSeparator "$" and Separator "." and
  /// parts: "p1", "p2", "p3", "p4"
  /// The resulting name is "p1$p2.p3.p4"
  /// The separators are retrieved from the OpenMPIRBuilderConfig.
  std::string createPlatformSpecificName(ArrayRef<StringRef> Parts) const;

  /// Callback type for variable finalization (think destructors).
  ///
  /// \param CodeGenIP is the insertion point at which the finalization code
  ///                  should be placed.
  ///
  /// A finalize callback knows about all objects that need finalization, e.g.
  /// destruction, when the scope of the currently generated construct is left
  /// at the time, and location, the callback is invoked.
  using FinalizeCallbackTy = std::function<void(InsertPointTy CodeGenIP)>;

  struct FinalizationInfo {
    /// The finalization callback provided by the last in-flight invocation of
    /// createXXXX for the directive of kind DK.
    FinalizeCallbackTy FiniCB;

    /// The directive kind of the innermost directive that has an associated
    /// region which might require finalization when it is left.
    omp::Directive DK;

    /// Flag to indicate if the directive is cancellable.
    bool IsCancellable;
  };

  /// Push a finalization callback on the finalization stack.
  ///
  /// NOTE: Temporary solution until Clang CG is gone.
  void pushFinalizationCB(const FinalizationInfo &FI) {
    FinalizationStack.push_back(FI);
  }

  /// Pop the last finalization callback from the finalization stack.
  ///
  /// NOTE: Temporary solution until Clang CG is gone.
  void popFinalizationCB() { FinalizationStack.pop_back(); }

  /// Callback type for body (=inner region) code generation
  ///
  /// The callback takes code locations as arguments, each describing a
  /// location where additional instructions can be inserted.
  ///
  /// The CodeGenIP may be in the middle of a basic block or point to the end of
  /// it. The basic block may have a terminator or be degenerate. The callback
  /// function may just insert instructions at that position, but also split the
  /// block (without the Before argument of BasicBlock::splitBasicBlock such
  /// that the identify of the split predecessor block is preserved) and insert
  /// additional control flow, including branches that do not lead back to what
  /// follows the CodeGenIP. Note that since the callback is allowed to split
  /// the block, callers must assume that InsertPoints to positions in the
  /// BasicBlock after CodeGenIP including CodeGenIP itself are invalidated. If
  /// such InsertPoints need to be preserved, it can split the block itself
  /// before calling the callback.
  ///
  /// AllocaIP and CodeGenIP must not point to the same position.
  ///
  /// \param AllocaIP is the insertion point at which new alloca instructions
  ///                 should be placed. The BasicBlock it is pointing to must
  ///                 not be split.
  /// \param CodeGenIP is the insertion point at which the body code should be
  ///                  placed.
  using BodyGenCallbackTy =
      function_ref<void(InsertPointTy AllocaIP, InsertPointTy CodeGenIP)>;

  // This is created primarily for sections construct as llvm::function_ref
  // (BodyGenCallbackTy) is not storable (as described in the comments of
  // function_ref class - function_ref contains non-ownable reference
  // to the callable.
  using StorableBodyGenCallbackTy =
      std::function<void(InsertPointTy AllocaIP, InsertPointTy CodeGenIP)>;

  /// Callback type for loop body code generation.
  ///
  /// \param CodeGenIP is the insertion point where the loop's body code must be
  ///                  placed. This will be a dedicated BasicBlock with a
  ///                  conditional branch from the loop condition check and
  ///                  terminated with an unconditional branch to the loop
  ///                  latch.
  /// \param IndVar    is the induction variable usable at the insertion point.
  using LoopBodyGenCallbackTy =
      function_ref<void(InsertPointTy CodeGenIP, Value *IndVar)>;

  /// Callback type for variable privatization (think copy & default
  /// constructor).
  ///
  /// \param AllocaIP is the insertion point at which new alloca instructions
  ///                 should be placed.
  /// \param CodeGenIP is the insertion point at which the privatization code
  ///                  should be placed.
  /// \param Original The value being copied/created, should not be used in the
  ///                 generated IR.
  /// \param Inner The equivalent of \p Original that should be used in the
  ///              generated IR; this is equal to \p Original if the value is
  ///              a pointer and can thus be passed directly, otherwise it is
  ///              an equivalent but different value.
  /// \param ReplVal The replacement value, thus a copy or new created version
  ///                of \p Inner.
  ///
  /// \returns The new insertion point where code generation continues and
  ///          \p ReplVal the replacement value.
  using PrivatizeCallbackTy = function_ref<InsertPointTy(
      InsertPointTy AllocaIP, InsertPointTy CodeGenIP, Value &Original,
      Value &Inner, Value *&ReplVal)>;

  /// Description of a LLVM-IR insertion point (IP) and a debug/source location
  /// (filename, line, column, ...).
  struct LocationDescription {
    LocationDescription(const IRBuilderBase &IRB)
        : IP(IRB.saveIP()), DL(IRB.getCurrentDebugLocation()) {}
    LocationDescription(const InsertPointTy &IP) : IP(IP) {}
    LocationDescription(const InsertPointTy &IP, const DebugLoc &DL)
        : IP(IP), DL(DL) {}
    InsertPointTy IP;
    DebugLoc DL;
  };

  /// Emitter methods for OpenMP directives.
  ///
  ///{

  /// Generator for '#omp barrier'
  ///
  /// \param Loc The location where the barrier directive was encountered.
  /// \param DK The kind of directive that caused the barrier.
  /// \param ForceSimpleCall Flag to force a simple (=non-cancellation) barrier.
  /// \param CheckCancelFlag Flag to indicate a cancel barrier return value
  ///                        should be checked and acted upon.
  ///
  /// \returns The insertion point after the barrier.
  InsertPointTy createBarrier(const LocationDescription &Loc, omp::Directive DK,
                              bool ForceSimpleCall = false,
                              bool CheckCancelFlag = true);

  /// Generator for '#omp cancel'
  ///
  /// \param Loc The location where the directive was encountered.
  /// \param IfCondition The evaluated 'if' clause expression, if any.
  /// \param CanceledDirective The kind of directive that is cancled.
  ///
  /// \returns The insertion point after the barrier.
  InsertPointTy createCancel(const LocationDescription &Loc, Value *IfCondition,
                             omp::Directive CanceledDirective);

  /// Generator for '#omp parallel'
  ///
  /// \param Loc The insert and source location description.
  /// \param AllocaIP The insertion points to be used for alloca instructions.
  /// \param BodyGenCB Callback that will generate the region code.
  /// \param PrivCB Callback to copy a given variable (think copy constructor).
  /// \param FiniCB Callback to finalize variable copies.
  /// \param IfCondition The evaluated 'if' clause expression, if any.
  /// \param NumThreads The evaluated 'num_threads' clause expression, if any.
  /// \param ProcBind The value of the 'proc_bind' clause (see ProcBindKind).
  /// \param IsCancellable Flag to indicate a cancellable parallel region.
  ///
  /// \returns The insertion position *after* the parallel.
  IRBuilder<>::InsertPoint
  createParallel(const LocationDescription &Loc, InsertPointTy AllocaIP,
                 BodyGenCallbackTy BodyGenCB, PrivatizeCallbackTy PrivCB,
                 FinalizeCallbackTy FiniCB, Value *IfCondition,
                 Value *NumThreads, omp::ProcBindKind ProcBind,
                 bool IsCancellable);

  /// Generator for the control flow structure of an OpenMP canonical loop.
  ///
  /// This generator operates on the logical iteration space of the loop, i.e.
  /// the caller only has to provide a loop trip count of the loop as defined by
  /// base language semantics. The trip count is interpreted as an unsigned
  /// integer. The induction variable passed to \p BodyGenCB will be of the same
  /// type and run from 0 to \p TripCount - 1. It is up to the callback to
  /// convert the logical iteration variable to the loop counter variable in the
  /// loop body.
  ///
  /// \param Loc       The insert and source location description. The insert
  ///                  location can be between two instructions or the end of a
  ///                  degenerate block (e.g. a BB under construction).
  /// \param BodyGenCB Callback that will generate the loop body code.
  /// \param TripCount Number of iterations the loop body is executed.
  /// \param Name      Base name used to derive BB and instruction names.
  ///
  /// \returns An object representing the created control flow structure which
  ///          can be used for loop-associated directives.
  CanonicalLoopInfo *createCanonicalLoop(const LocationDescription &Loc,
                                         LoopBodyGenCallbackTy BodyGenCB,
                                         Value *TripCount,
                                         const Twine &Name = "loop");

  /// Generator for the control flow structure of an OpenMP canonical loop.
  ///
  /// Instead of a logical iteration space, this allows specifying user-defined
  /// loop counter values using increment, upper- and lower bounds. To
  /// disambiguate the terminology when counting downwards, instead of lower
  /// bounds we use \p Start for the loop counter value in the first body
  /// iteration.
  ///
  /// Consider the following limitations:
  ///
  ///  * A loop counter space over all integer values of its bit-width cannot be
  ///    represented. E.g using uint8_t, its loop trip count of 256 cannot be
  ///    stored into an 8 bit integer):
  ///
  ///      DO I = 0, 255, 1
  ///
  ///  * Unsigned wrapping is only supported when wrapping only "once"; E.g.
  ///    effectively counting downwards:
  ///
  ///      for (uint8_t i = 100u; i > 0; i += 127u)
  ///
  ///
  /// TODO: May need to add additional parameters to represent:
  ///
  ///  * Allow representing downcounting with unsigned integers.
  ///
  ///  * Sign of the step and the comparison operator might disagree:
  ///
  ///      for (int i = 0; i < 42; i -= 1u)
  ///
  //
  /// \param Loc       The insert and source location description.
  /// \param BodyGenCB Callback that will generate the loop body code.
  /// \param Start     Value of the loop counter for the first iterations.
  /// \param Stop      Loop counter values past this will stop the loop.
  /// \param Step      Loop counter increment after each iteration; negative
  ///                  means counting down.
  /// \param IsSigned  Whether Start, Stop and Step are signed integers.
  /// \param InclusiveStop Whether \p Stop itself is a valid value for the loop
  ///                      counter.
  /// \param ComputeIP Insertion point for instructions computing the trip
  ///                  count. Can be used to ensure the trip count is available
  ///                  at the outermost loop of a loop nest. If not set,
  ///                  defaults to the preheader of the generated loop.
  /// \param Name      Base name used to derive BB and instruction names.
  ///
  /// \returns An object representing the created control flow structure which
  ///          can be used for loop-associated directives.
  CanonicalLoopInfo *createCanonicalLoop(const LocationDescription &Loc,
                                         LoopBodyGenCallbackTy BodyGenCB,
                                         Value *Start, Value *Stop, Value *Step,
                                         bool IsSigned, bool InclusiveStop,
                                         InsertPointTy ComputeIP = {},
                                         const Twine &Name = "loop");

  /// Collapse a loop nest into a single loop.
  ///
  /// Merges loops of a loop nest into a single CanonicalLoopNest representation
  /// that has the same number of innermost loop iterations as the origin loop
  /// nest. The induction variables of the input loops are derived from the
  /// collapsed loop's induction variable. This is intended to be used to
  /// implement OpenMP's collapse clause. Before applying a directive,
  /// collapseLoops normalizes a loop nest to contain only a single loop and the
  /// directive's implementation does not need to handle multiple loops itself.
  /// This does not remove the need to handle all loop nest handling by
  /// directives, such as the ordered(<n>) clause or the simd schedule-clause
  /// modifier of the worksharing-loop directive.
  ///
  /// Example:
  /// \code
  ///   for (int i = 0; i < 7; ++i) // Canonical loop "i"
  ///     for (int j = 0; j < 9; ++j) // Canonical loop "j"
  ///       body(i, j);
  /// \endcode
  ///
  /// After collapsing with Loops={i,j}, the loop is changed to
  /// \code
  ///   for (int ij = 0; ij < 63; ++ij) {
  ///     int i = ij / 9;
  ///     int j = ij % 9;
  ///     body(i, j);
  ///   }
  /// \endcode
  ///
  /// In the current implementation, the following limitations apply:
  ///
  ///  * All input loops have an induction variable of the same type.
  ///
  ///  * The collapsed loop will have the same trip count integer type as the
  ///    input loops. Therefore it is possible that the collapsed loop cannot
  ///    represent all iterations of the input loops. For instance, assuming a
  ///    32 bit integer type, and two input loops both iterating 2^16 times, the
  ///    theoretical trip count of the collapsed loop would be 2^32 iteration,
  ///    which cannot be represented in an 32-bit integer. Behavior is undefined
  ///    in this case.
  ///
  ///  * The trip counts of every input loop must be available at \p ComputeIP.
  ///    Non-rectangular loops are not yet supported.
  ///
  ///  * At each nest level, code between a surrounding loop and its nested loop
  ///    is hoisted into the loop body, and such code will be executed more
  ///    often than before collapsing (or not at all if any inner loop iteration
  ///    has a trip count of 0). This is permitted by the OpenMP specification.
  ///
  /// \param DL        Debug location for instructions added for collapsing,
  ///                  such as instructions to compute/derive the input loop's
  ///                  induction variables.
  /// \param Loops     Loops in the loop nest to collapse. Loops are specified
  ///                  from outermost-to-innermost and every control flow of a
  ///                  loop's body must pass through its directly nested loop.
  /// \param ComputeIP Where additional instruction that compute the collapsed
  ///                  trip count. If not set, defaults to before the generated
  ///                  loop.
  ///
  /// \returns The CanonicalLoopInfo object representing the collapsed loop.
  CanonicalLoopInfo *collapseLoops(DebugLoc DL,
                                   ArrayRef<CanonicalLoopInfo *> Loops,
                                   InsertPointTy ComputeIP);

  /// Get the default alignment value for given target
  ///
  /// \param TargetTriple   Target triple
  /// \param Features       StringMap which describes extra CPU features
  static unsigned getOpenMPDefaultSimdAlign(const Triple &TargetTriple,
                                            const StringMap<bool> &Features);

  /// Retrieve (or create if non-existent) the address of a declare
  /// target variable, used in conjunction with registerTargetGlobalVariable
  /// to create declare target global variables.
  ///
  /// \param CaptureClause - enumerator corresponding to the OpenMP capture
  /// clause used in conjunction with the variable being registered (link,
  /// to, enter).
  /// \param DeviceClause - enumerator corresponding to the OpenMP capture
  /// clause used in conjunction with the variable being registered (nohost,
  /// host, any)
  /// \param IsDeclaration - boolean stating if the variable being registered
  /// is a declaration-only and not a definition
  /// \param IsExternallyVisible - boolean stating if the variable is externally
  /// visible
  /// \param EntryInfo - Unique entry information for the value generated
  /// using getTargetEntryUniqueInfo, used to name generated pointer references
  /// to the declare target variable
  /// \param MangledName - the mangled name of the variable being registered
  /// \param GeneratedRefs - references generated by invocations of
  /// registerTargetGlobalVariable invoked from getAddrOfDeclareTargetVar,
  /// these are required by Clang for book keeping.
  /// \param OpenMPSIMD - if OpenMP SIMD mode is currently enabled
  /// \param TargetTriple - The OpenMP device target triple we are compiling
  /// for
  /// \param LlvmPtrTy - The type of the variable we are generating or
  /// retrieving an address for
  /// \param GlobalInitializer - a lambda function which creates a constant
  /// used for initializing a pointer reference to the variable in certain
  /// cases. If a nullptr is passed, it will default to utilising the original
  /// variable to initialize the pointer reference.
  /// \param VariableLinkage - a lambda function which returns the variables
  /// linkage type, if unspecified and a nullptr is given, it will instead
  /// utilise the linkage stored on the existing global variable in the
  /// LLVMModule.
  Constant *getAddrOfDeclareTargetVar(
      OffloadEntriesInfoManager::OMPTargetGlobalVarEntryKind CaptureClause,
      OffloadEntriesInfoManager::OMPTargetDeviceClauseKind DeviceClause,
      bool IsDeclaration, bool IsExternallyVisible,
      TargetRegionEntryInfo EntryInfo, StringRef MangledName,
      std::vector<GlobalVariable *> &GeneratedRefs, bool OpenMPSIMD,
      std::vector<Triple> TargetTriple, Type *LlvmPtrTy,
      std::function<Constant *()> GlobalInitializer,
      std::function<GlobalValue::LinkageTypes()> VariableLinkage);

  /// Registers a target variable for device or host.
  ///
  /// \param CaptureClause - enumerator corresponding to the OpenMP capture
  /// clause used in conjunction with the variable being registered (link,
  /// to, enter).
  /// \param DeviceClause - enumerator corresponding to the OpenMP capture
  /// clause used in conjunction with the variable being registered (nohost,
  /// host, any)
  /// \param IsDeclaration - boolean stating if the variable being registered
  /// is a declaration-only and not a definition
  /// \param IsExternallyVisible - boolean stating if the variable is externally
  /// visible
  /// \param EntryInfo - Unique entry information for the value generated
  /// using getTargetEntryUniqueInfo, used to name generated pointer references
  /// to the declare target variable
  /// \param MangledName - the mangled name of the variable being registered
  /// \param GeneratedRefs - references generated by invocations of
  /// registerTargetGlobalVariable these are required by Clang for book
  /// keeping.
  /// \param OpenMPSIMD - if OpenMP SIMD mode is currently enabled
  /// \param TargetTriple - The OpenMP device target triple we are compiling
  /// for
  /// \param GlobalInitializer - a lambda function which creates a constant
  /// used for initializing a pointer reference to the variable in certain
  /// cases. If a nullptr is passed, it will default to utilising the original
  /// variable to initialize the pointer reference.
  /// \param VariableLinkage - a lambda function which returns the variables
  /// linkage type, if unspecified and a nullptr is given, it will instead
  /// utilise the linkage stored on the existing global variable in the
  /// LLVMModule.
  /// \param LlvmPtrTy - The type of the variable we are generating or
  /// retrieving an address for
  /// \param Addr - the original llvm value (addr) of the variable to be
  /// registered
  void registerTargetGlobalVariable(
      OffloadEntriesInfoManager::OMPTargetGlobalVarEntryKind CaptureClause,
      OffloadEntriesInfoManager::OMPTargetDeviceClauseKind DeviceClause,
      bool IsDeclaration, bool IsExternallyVisible,
      TargetRegionEntryInfo EntryInfo, StringRef MangledName,
      std::vector<GlobalVariable *> &GeneratedRefs, bool OpenMPSIMD,
      std::vector<Triple> TargetTriple,
      std::function<Constant *()> GlobalInitializer,
      std::function<GlobalValue::LinkageTypes()> VariableLinkage,
      Type *LlvmPtrTy, Constant *Addr);

private:
  /// Modifies the canonical loop to be a statically-scheduled workshare loop.
  ///
  /// This takes a \p LoopInfo representing a canonical loop, such as the one
  /// created by \p createCanonicalLoop and emits additional instructions to
  /// turn it into a workshare loop. In particular, it calls to an OpenMP
  /// runtime function in the preheader to obtain the loop bounds to be used in
  /// the current thread, updates the relevant instructions in the canonical
  /// loop and calls to an OpenMP runtime finalization function after the loop.
  ///
  /// \param DL       Debug location for instructions added for the
  ///                 workshare-loop construct itself.
  /// \param CLI      A descriptor of the canonical loop to workshare.
  /// \param AllocaIP An insertion point for Alloca instructions usable in the
  ///                 preheader of the loop.
  /// \param NeedsBarrier Indicates whether a barrier must be inserted after
  ///                     the loop.
  ///
  /// \returns Point where to insert code after the workshare construct.
  InsertPointTy applyStaticWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI,
                                         InsertPointTy AllocaIP,
                                         bool NeedsBarrier);

  /// Modifies the canonical loop a statically-scheduled workshare loop with a
  /// user-specified chunk size.
  ///
  /// \param DL           Debug location for instructions added for the
  ///                     workshare-loop construct itself.
  /// \param CLI          A descriptor of the canonical loop to workshare.
  /// \param AllocaIP     An insertion point for Alloca instructions usable in
  ///                     the preheader of the loop.
  /// \param NeedsBarrier Indicates whether a barrier must be inserted after the
  ///                     loop.
  /// \param ChunkSize    The user-specified chunk size.
  ///
  /// \returns Point where to insert code after the workshare construct.
  InsertPointTy applyStaticChunkedWorkshareLoop(DebugLoc DL,
                                                CanonicalLoopInfo *CLI,
                                                InsertPointTy AllocaIP,
                                                bool NeedsBarrier,
                                                Value *ChunkSize);

  /// Modifies the canonical loop to be a dynamically-scheduled workshare loop.
  ///
  /// This takes a \p LoopInfo representing a canonical loop, such as the one
  /// created by \p createCanonicalLoop and emits additional instructions to
  /// turn it into a workshare loop. In particular, it calls to an OpenMP
  /// runtime function in the preheader to obtain, and then in each iteration
  /// to update the loop counter.
  ///
  /// \param DL       Debug location for instructions added for the
  ///                 workshare-loop construct itself.
  /// \param CLI      A descriptor of the canonical loop to workshare.
  /// \param AllocaIP An insertion point for Alloca instructions usable in the
  ///                 preheader of the loop.
  /// \param SchedType Type of scheduling to be passed to the init function.
  /// \param NeedsBarrier Indicates whether a barrier must be insterted after
  ///                     the loop.
  /// \param Chunk    The size of loop chunk considered as a unit when
  ///                 scheduling. If \p nullptr, defaults to 1.
  ///
  /// \returns Point where to insert code after the workshare construct.
  InsertPointTy applyDynamicWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI,
                                          InsertPointTy AllocaIP,
                                          omp::OMPScheduleType SchedType,
                                          bool NeedsBarrier,
                                          Value *Chunk = nullptr);

  /// Create alternative version of the loop to support if clause
  ///
  /// OpenMP if clause can require to generate second loop. This loop
  /// will be executed when if clause condition is not met. createIfVersion
  /// adds branch instruction to the copied loop if \p  ifCond is not met.
  ///
  /// \param Loop       Original loop which should be versioned.
  /// \param IfCond     Value which corresponds to if clause condition
  /// \param VMap       Value to value map to define relation between
  ///                   original and copied loop values and loop blocks.
  /// \param NamePrefix Optional name prefix for if.then if.else blocks.
  void createIfVersion(CanonicalLoopInfo *Loop, Value *IfCond,
                       ValueToValueMapTy &VMap, const Twine &NamePrefix = "");

public:
  /// Modifies the canonical loop to be a workshare loop.
  ///
  /// This takes a \p LoopInfo representing a canonical loop, such as the one
  /// created by \p createCanonicalLoop and emits additional instructions to
  /// turn it into a workshare loop. In particular, it calls to an OpenMP
  /// runtime function in the preheader to obtain the loop bounds to be used in
  /// the current thread, updates the relevant instructions in the canonical
  /// loop and calls to an OpenMP runtime finalization function after the loop.
  ///
  /// The concrete transformation is done by applyStaticWorkshareLoop,
  /// applyStaticChunkedWorkshareLoop, or applyDynamicWorkshareLoop, depending
  /// on the value of \p SchedKind and \p ChunkSize.
  ///
  /// \param DL       Debug location for instructions added for the
  ///                 workshare-loop construct itself.
  /// \param CLI      A descriptor of the canonical loop to workshare.
  /// \param AllocaIP An insertion point for Alloca instructions usable in the
  ///                 preheader of the loop.
  /// \param NeedsBarrier Indicates whether a barrier must be insterted after
  ///                     the loop.
  /// \param SchedKind Scheduling algorithm to use.
  /// \param ChunkSize The chunk size for the inner loop.
  /// \param HasSimdModifier Whether the simd modifier is present in the
  ///                        schedule clause.
  /// \param HasMonotonicModifier Whether the monotonic modifier is present in
  ///                             the schedule clause.
  /// \param HasNonmonotonicModifier Whether the nonmonotonic modifier is
  ///                                present in the schedule clause.
  /// \param HasOrderedClause Whether the (parameterless) ordered clause is
  ///                         present.
  ///
  /// \returns Point where to insert code after the workshare construct.
  InsertPointTy applyWorkshareLoop(
      DebugLoc DL, CanonicalLoopInfo *CLI, InsertPointTy AllocaIP,
      bool NeedsBarrier,
      llvm::omp::ScheduleKind SchedKind = llvm::omp::OMP_SCHEDULE_Default,
      Value *ChunkSize = nullptr, bool HasSimdModifier = false,
      bool HasMonotonicModifier = false, bool HasNonmonotonicModifier = false,
      bool HasOrderedClause = false);

  /// Tile a loop nest.
  ///
  /// Tiles the loops of \p Loops by the tile sizes in \p TileSizes. Loops in
  /// \p/ Loops must be perfectly nested, from outermost to innermost loop
  /// (i.e. Loops.front() is the outermost loop). The trip count llvm::Value
  /// of every loop and every tile sizes must be usable in the outermost
  /// loop's preheader. This implies that the loop nest is rectangular.
  ///
  /// Example:
  /// \code
  ///   for (int i = 0; i < 15; ++i) // Canonical loop "i"
  ///     for (int j = 0; j < 14; ++j) // Canonical loop "j"
  ///         body(i, j);
  /// \endcode
  ///
  /// After tiling with Loops={i,j} and TileSizes={5,7}, the loop is changed to
  /// \code
  ///   for (int i1 = 0; i1 < 3; ++i1)
  ///     for (int j1 = 0; j1 < 2; ++j1)
  ///       for (int i2 = 0; i2 < 5; ++i2)
  ///         for (int j2 = 0; j2 < 7; ++j2)
  ///           body(i1*3+i2, j1*3+j2);
  /// \endcode
  ///
  /// The returned vector are the loops {i1,j1,i2,j2}. The loops i1 and j1 are
  /// referred to the floor, and the loops i2 and j2 are the tiles. Tiling also
  /// handles non-constant trip counts, non-constant tile sizes and trip counts
  /// that are not multiples of the tile size. In the latter case the tile loop
  /// of the last floor-loop iteration will have fewer iterations than specified
  /// as its tile size.
  ///
  ///
  /// @param DL        Debug location for instructions added by tiling, for
  ///                  instance the floor- and tile trip count computation.
  /// @param Loops     Loops to tile. The CanonicalLoopInfo objects are
  ///                  invalidated by this method, i.e. should not used after
  ///                  tiling.
  /// @param TileSizes For each loop in \p Loops, the tile size for that
  ///                  dimensions.
  ///
  /// \returns A list of generated loops. Contains twice as many loops as the
  ///          input loop nest; the first half are the floor loops and the
  ///          second half are the tile loops.
  std::vector<CanonicalLoopInfo *>
  tileLoops(DebugLoc DL, ArrayRef<CanonicalLoopInfo *> Loops,
            ArrayRef<Value *> TileSizes);

  /// Fully unroll a loop.
  ///
  /// Instead of unrolling the loop immediately (and duplicating its body
  /// instructions), it is deferred to LLVM's LoopUnrollPass by adding loop
  /// metadata.
  ///
  /// \param DL   Debug location for instructions added by unrolling.
  /// \param Loop The loop to unroll. The loop will be invalidated.
  void unrollLoopFull(DebugLoc DL, CanonicalLoopInfo *Loop);

  /// Fully or partially unroll a loop. How the loop is unrolled is determined
  /// using LLVM's LoopUnrollPass.
  ///
  /// \param DL   Debug location for instructions added by unrolling.
  /// \param Loop The loop to unroll. The loop will be invalidated.
  void unrollLoopHeuristic(DebugLoc DL, CanonicalLoopInfo *Loop);

  /// Partially unroll a loop.
  ///
  /// The CanonicalLoopInfo of the unrolled loop for use with chained
  /// loop-associated directive can be requested using \p UnrolledCLI. Not
  /// needing the CanonicalLoopInfo allows more efficient code generation by
  /// deferring the actual unrolling to the LoopUnrollPass using loop metadata.
  /// A loop-associated directive applied to the unrolled loop needs to know the
  /// new trip count which means that if using a heuristically determined unroll
  /// factor (\p Factor == 0), that factor must be computed immediately. We are
  /// using the same logic as the LoopUnrollPass to derived the unroll factor,
  /// but which assumes that some canonicalization has taken place (e.g.
  /// Mem2Reg, LICM, GVN, Inlining, etc.). That is, the heuristic will perform
  /// better when the unrolled loop's CanonicalLoopInfo is not needed.
  ///
  /// \param DL          Debug location for instructions added by unrolling.
  /// \param Loop        The loop to unroll. The loop will be invalidated.
  /// \param Factor      The factor to unroll the loop by. A factor of 0
  ///                    indicates that a heuristic should be used to determine
  ///                    the unroll-factor.
  /// \param UnrolledCLI If non-null, receives the CanonicalLoopInfo of the
  ///                    partially unrolled loop. Otherwise, uses loop metadata
  ///                    to defer unrolling to the LoopUnrollPass.
  void unrollLoopPartial(DebugLoc DL, CanonicalLoopInfo *Loop, int32_t Factor,
                         CanonicalLoopInfo **UnrolledCLI);

  /// Add metadata to simd-ize a loop. If IfCond is not nullptr, the loop
  /// is cloned. The metadata which prevents vectorization is added to
  /// to the cloned loop. The cloned loop is executed when ifCond is evaluated
  /// to false.
  ///
  /// \param Loop        The loop to simd-ize.
  /// \param AlignedVars The map which containts pairs of the pointer
  ///                    and its corresponding alignment.
  /// \param IfCond      The value which corresponds to the if clause
  ///                    condition.
  /// \param Order       The enum to map order clause.
  /// \param Simdlen     The Simdlen length to apply to the simd loop.
  /// \param Safelen     The Safelen length to apply to the simd loop.
  void applySimd(CanonicalLoopInfo *Loop,
                 MapVector<Value *, Value *> AlignedVars, Value *IfCond,
                 omp::OrderKind Order, ConstantInt *Simdlen,
                 ConstantInt *Safelen);

  /// Generator for '#omp flush'
  ///
  /// \param Loc The location where the flush directive was encountered
  void createFlush(const LocationDescription &Loc);

  /// Generator for '#omp taskwait'
  ///
  /// \param Loc The location where the taskwait directive was encountered.
  void createTaskwait(const LocationDescription &Loc);

  /// Generator for '#omp taskyield'
  ///
  /// \param Loc The location where the taskyield directive was encountered.
  void createTaskyield(const LocationDescription &Loc);

  /// A struct to pack the relevant information for an OpenMP depend clause.
  struct DependData {
    omp::RTLDependenceKindTy DepKind = omp::RTLDependenceKindTy::DepUnknown;
    Type *DepValueType;
    Value *DepVal;
    explicit DependData() = default;
    DependData(omp::RTLDependenceKindTy DepKind, Type *DepValueType,
               Value *DepVal)
        : DepKind(DepKind), DepValueType(DepValueType), DepVal(DepVal) {}
  };

  /// Generator for `#omp task`
  ///
  /// \param Loc The location where the task construct was encountered.
  /// \param AllocaIP The insertion point to be used for alloca instructions.
  /// \param BodyGenCB Callback that will generate the region code.
  /// \param Tied True if the task is tied, false if the task is untied.
  /// \param Final i1 value which is `true` if the task is final, `false` if the
  ///              task is not final.
  /// \param IfCondition i1 value. If it evaluates to `false`, an undeferred
  ///                    task is generated, and the encountering thread must
  ///                    suspend the current task region, for which execution
  ///                    cannot be resumed until execution of the structured
  ///                    block that is associated with the generated task is
  ///                    completed.
  InsertPointTy createTask(const LocationDescription &Loc,
                           InsertPointTy AllocaIP, BodyGenCallbackTy BodyGenCB,
                           bool Tied = true, Value *Final = nullptr,
                           Value *IfCondition = nullptr,
                           SmallVector<DependData> Dependencies = {});

  /// Generator for the taskgroup construct
  ///
  /// \param Loc The location where the taskgroup construct was encountered.
  /// \param AllocaIP The insertion point to be used for alloca instructions.
  /// \param BodyGenCB Callback that will generate the region code.
  InsertPointTy createTaskgroup(const LocationDescription &Loc,
                                InsertPointTy AllocaIP,
                                BodyGenCallbackTy BodyGenCB);


  using FileIdentifierInfoCallbackTy = std::function<std::tuple<std::string, uint64_t>()>;

  /// Creates a unique info for a target entry when provided a filename and
  /// line number from.
  ///
  /// \param CallBack A callback function which should return filename the entry
  /// resides in as well as the line number for the target entry
  /// \param ParentName The name of the parent the target entry resides in, if
  /// any.
  static TargetRegionEntryInfo
  getTargetEntryUniqueInfo(FileIdentifierInfoCallbackTy CallBack,
                           StringRef ParentName = "");

  /// Functions used to generate reductions. Such functions take two Values
  /// representing LHS and RHS of the reduction, respectively, and a reference
  /// to the value that is updated to refer to the reduction result.
  using ReductionGenTy =
      function_ref<InsertPointTy(InsertPointTy, Value *, Value *, Value *&)>;

  /// Functions used to generate atomic reductions. Such functions take two
  /// Values representing pointers to LHS and RHS of the reduction, as well as
  /// the element type of these pointers. They are expected to atomically
  /// update the LHS to the reduced value.
  using AtomicReductionGenTy =
      function_ref<InsertPointTy(InsertPointTy, Type *, Value *, Value *)>;

  /// Information about an OpenMP reduction.
  struct ReductionInfo {
    ReductionInfo(Type *ElementType, Value *Variable, Value *PrivateVariable,
                  ReductionGenTy ReductionGen,
                  AtomicReductionGenTy AtomicReductionGen)
        : ElementType(ElementType), Variable(Variable),
          PrivateVariable(PrivateVariable), ReductionGen(ReductionGen),
          AtomicReductionGen(AtomicReductionGen) {}

    /// Reduction element type, must match pointee type of variable.
    Type *ElementType;

    /// Reduction variable of pointer type.
    Value *Variable;

    /// Thread-private partial reduction variable.
    Value *PrivateVariable;

    /// Callback for generating the reduction body. The IR produced by this will
    /// be used to combine two values in a thread-safe context, e.g., under
    /// lock or within the same thread, and therefore need not be atomic.
    ReductionGenTy ReductionGen;

    /// Callback for generating the atomic reduction body, may be null. The IR
    /// produced by this will be used to atomically combine two values during
    /// reduction. If null, the implementation will use the non-atomic version
    /// along with the appropriate synchronization mechanisms.
    AtomicReductionGenTy AtomicReductionGen;
  };

  // TODO: provide atomic and non-atomic reduction generators for reduction
  // operators defined by the OpenMP specification.

  /// Generator for '#omp reduction'.
  ///
  /// Emits the IR instructing the runtime to perform the specific kind of
  /// reductions. Expects reduction variables to have been privatized and
  /// initialized to reduction-neutral values separately. Emits the calls to
  /// runtime functions as well as the reduction function and the basic blocks
  /// performing the reduction atomically and non-atomically.
  ///
  /// The code emitted for the following:
  ///
  /// \code
  ///   type var_1;
  ///   type var_2;
  ///   #pragma omp <directive> reduction(reduction-op:var_1,var_2)
  ///   /* body */;
  /// \endcode
  ///
  /// corresponds to the following sketch.
  ///
  /// \code
  /// void _outlined_par() {
  ///   // N is the number of different reductions.
  ///   void *red_array[] = {privatized_var_1, privatized_var_2, ...};
  ///   switch(__kmpc_reduce(..., N, /*size of data in red array*/, red_array,
  ///                        _omp_reduction_func,
  ///                        _gomp_critical_user.reduction.var)) {
  ///   case 1: {
  ///     var_1 = var_1 <reduction-op> privatized_var_1;
  ///     var_2 = var_2 <reduction-op> privatized_var_2;
  ///     // ...
  ///    __kmpc_end_reduce(...);
  ///     break;
  ///   }
  ///   case 2: {
  ///     _Atomic<ReductionOp>(var_1, privatized_var_1);
  ///     _Atomic<ReductionOp>(var_2, privatized_var_2);
  ///     // ...
  ///     break;
  ///   }
  ///   default: break;
  ///   }
  /// }
  ///
  /// void _omp_reduction_func(void **lhs, void **rhs) {
  ///   *(type *)lhs[0] = *(type *)lhs[0] <reduction-op> *(type *)rhs[0];
  ///   *(type *)lhs[1] = *(type *)lhs[1] <reduction-op> *(type *)rhs[1];
  ///   // ...
  /// }
  /// \endcode
  ///
  /// \param Loc                The location where the reduction was
  ///                           encountered. Must be within the associate
  ///                           directive and after the last local access to the
  ///                           reduction variables.
  /// \param AllocaIP           An insertion point suitable for allocas usable
  ///                           in reductions.
  /// \param ReductionInfos     A list of info on each reduction variable.
  /// \param IsNoWait           A flag set if the reduction is marked as nowait.
  InsertPointTy createReductions(const LocationDescription &Loc,
                                 InsertPointTy AllocaIP,
                                 ArrayRef<ReductionInfo> ReductionInfos,
                                 bool IsNoWait = false);

  ///}

  /// Return the insertion point used by the underlying IRBuilder.
  InsertPointTy getInsertionPoint() { return Builder.saveIP(); }

  /// Update the internal location to \p Loc.
  bool updateToLocation(const LocationDescription &Loc) {
    Builder.restoreIP(Loc.IP);
    Builder.SetCurrentDebugLocation(Loc.DL);
    return Loc.IP.getBlock() != nullptr;
  }

  /// Return the function declaration for the runtime function with \p FnID.
  FunctionCallee getOrCreateRuntimeFunction(Module &M,
                                            omp::RuntimeFunction FnID);

  Function *getOrCreateRuntimeFunctionPtr(omp::RuntimeFunction FnID);

  /// Return the (LLVM-IR) string describing the source location \p LocStr.
  Constant *getOrCreateSrcLocStr(StringRef LocStr, uint32_t &SrcLocStrSize);

  /// Return the (LLVM-IR) string describing the default source location.
  Constant *getOrCreateDefaultSrcLocStr(uint32_t &SrcLocStrSize);

  /// Return the (LLVM-IR) string describing the source location identified by
  /// the arguments.
  Constant *getOrCreateSrcLocStr(StringRef FunctionName, StringRef FileName,
                                 unsigned Line, unsigned Column,
                                 uint32_t &SrcLocStrSize);

  /// Return the (LLVM-IR) string describing the DebugLoc \p DL. Use \p F as
  /// fallback if \p DL does not specify the function name.
  Constant *getOrCreateSrcLocStr(DebugLoc DL, uint32_t &SrcLocStrSize,
                                 Function *F = nullptr);

  /// Return the (LLVM-IR) string describing the source location \p Loc.
  Constant *getOrCreateSrcLocStr(const LocationDescription &Loc,
                                 uint32_t &SrcLocStrSize);

  /// Return an ident_t* encoding the source location \p SrcLocStr and \p Flags.
  /// TODO: Create a enum class for the Reserve2Flags
  Constant *getOrCreateIdent(Constant *SrcLocStr, uint32_t SrcLocStrSize,
                             omp::IdentFlag Flags = omp::IdentFlag(0),
                             unsigned Reserve2Flags = 0);

  /// Create a hidden global flag \p Name in the module with initial value \p
  /// Value.
  GlobalValue *createGlobalFlag(unsigned Value, StringRef Name);

  /// Create an offloading section struct used to register this global at
  /// runtime.
  ///
  /// Type struct __tgt_offload_entry{
  ///   void    *addr;      // Pointer to the offload entry info.
  ///                       // (function or global)
  ///   char    *name;      // Name of the function or global.
  ///   size_t  size;       // Size of the entry info (0 if it a function).
  ///   int32_t flags;
  ///   int32_t reserved;
  /// };
  ///
  /// \param Addr The pointer to the global being registered.
  /// \param Name The symbol name associated with the global.
  /// \param Size The size in bytes of the global (0 for functions).
  /// \param Flags Flags associated with the entry.
  /// \param SectionName The section this entry will be placed at.
  void emitOffloadingEntry(Constant *Addr, StringRef Name, uint64_t Size,
                           int32_t Flags,
                           StringRef SectionName = "omp_offloading_entries");

  /// Generate control flow and cleanup for cancellation.
  ///
  /// \param CancelFlag Flag indicating if the cancellation is performed.
  /// \param CanceledDirective The kind of directive that is cancled.
  /// \param ExitCB Extra code to be generated in the exit block.
  void emitCancelationCheckImpl(Value *CancelFlag,
                                omp::Directive CanceledDirective,
                                FinalizeCallbackTy ExitCB = {});

  /// Generate a target region entry call.
  ///
  /// \param Loc The location at which the request originated and is fulfilled.
  /// \param AllocaIP The insertion point to be used for alloca instructions.
  /// \param Return Return value of the created function returned by reference.
  /// \param DeviceID Identifier for the device via the 'device' clause.
  /// \param NumTeams Numer of teams for the region via the 'num_teams' clause
  ///                 or 0 if unspecified and -1 if there is no 'teams' clause.
  /// \param NumThreads Number of threads via the 'thread_limit' clause.
  /// \param HostPtr Pointer to the host-side pointer of the target kernel.
  /// \param KernelArgs Array of arguments to the kernel.
  InsertPointTy emitTargetKernel(const LocationDescription &Loc,
                                 InsertPointTy AllocaIP, Value *&Return,
                                 Value *Ident, Value *DeviceID, Value *NumTeams,
                                 Value *NumThreads, Value *HostPtr,
                                 ArrayRef<Value *> KernelArgs);

  /// Generate a barrier runtime call.
  ///
  /// \param Loc The location at which the request originated and is fulfilled.
  /// \param DK The directive which caused the barrier
  /// \param ForceSimpleCall Flag to force a simple (=non-cancellation) barrier.
  /// \param CheckCancelFlag Flag to indicate a cancel barrier return value
  ///                        should be checked and acted upon.
  ///
  /// \returns The insertion point after the barrier.
  InsertPointTy emitBarrierImpl(const LocationDescription &Loc,
                                omp::Directive DK, bool ForceSimpleCall,
                                bool CheckCancelFlag);

  /// Generate a flush runtime call.
  ///
  /// \param Loc The location at which the request originated and is fulfilled.
  void emitFlush(const LocationDescription &Loc);

  /// The finalization stack made up of finalize callbacks currently in-flight,
  /// wrapped into FinalizationInfo objects that reference also the finalization
  /// target block and the kind of cancellable directive.
  SmallVector<FinalizationInfo, 8> FinalizationStack;

  /// Return true if the last entry in the finalization stack is of kind \p DK
  /// and cancellable.
  bool isLastFinalizationInfoCancellable(omp::Directive DK) {
    return !FinalizationStack.empty() &&
           FinalizationStack.back().IsCancellable &&
           FinalizationStack.back().DK == DK;
  }

  /// Generate a taskwait runtime call.
  ///
  /// \param Loc The location at which the request originated and is fulfilled.
  void emitTaskwaitImpl(const LocationDescription &Loc);

  /// Generate a taskyield runtime call.
  ///
  /// \param Loc The location at which the request originated and is fulfilled.
  void emitTaskyieldImpl(const LocationDescription &Loc);

  /// Return the current thread ID.
  ///
  /// \param Ident The ident (ident_t*) describing the query origin.
  Value *getOrCreateThreadID(Value *Ident);

  /// The OpenMPIRBuilder Configuration
  OpenMPIRBuilderConfig Config;

  /// The underlying LLVM-IR module
  Module &M;

  /// The LLVM-IR Builder used to create IR.
  IRBuilder<> Builder;

  /// Map to remember source location strings
  StringMap<Constant *> SrcLocStrMap;

  /// Map to remember existing ident_t*.
  DenseMap<std::pair<Constant *, uint64_t>, Constant *> IdentMap;

  /// Info manager to keep track of target regions.
  OffloadEntriesInfoManager OffloadInfoManager;

  /// Helper that contains information about regions we need to outline
  /// during finalization.
  struct OutlineInfo {
    using PostOutlineCBTy = std::function<void(Function &)>;
    PostOutlineCBTy PostOutlineCB;
    BasicBlock *EntryBB, *ExitBB, *OuterAllocaBB;
    SmallVector<Value *, 2> ExcludeArgsFromAggregate;

    /// Collect all blocks in between EntryBB and ExitBB in both the given
    /// vector and set.
    void collectBlocks(SmallPtrSetImpl<BasicBlock *> &BlockSet,
                       SmallVectorImpl<BasicBlock *> &BlockVector);

    /// Return the function that contains the region to be outlined.
    Function *getFunction() const { return EntryBB->getParent(); }
  };

  /// Collection of regions that need to be outlined during finalization.
  SmallVector<OutlineInfo, 16> OutlineInfos;

  /// Collection of owned canonical loop objects that eventually need to be
  /// free'd.
  std::forward_list<CanonicalLoopInfo> LoopInfos;

  /// Add a new region that will be outlined later.
  void addOutlineInfo(OutlineInfo &&OI) { OutlineInfos.emplace_back(OI); }

  /// An ordered map of auto-generated variables to their unique names.
  /// It stores variables with the following names: 1) ".gomp_critical_user_" +
  /// <critical_section_name> + ".var" for "omp critical" directives; 2)
  /// <mangled_name_for_global_var> + ".cache." for cache for threadprivate
  /// variables.
  StringMap<GlobalVariable *, BumpPtrAllocator> InternalVars;

  /// Computes the size of type in bytes.
  Value *getSizeInBytes(Value *BasePtr);

  // Emit a branch from the current block to the Target block only if
  // the current block has a terminator.
  void emitBranch(BasicBlock *Target);

  // If BB has no use then delete it and return. Else place BB after the current
  // block, if possible, or else at the end of the function. Also add a branch
  // from current block to BB if current block does not have a terminator.
  void emitBlock(BasicBlock *BB, Function *CurFn, bool IsFinished = false);

  /// Emits code for OpenMP 'if' clause using specified \a BodyGenCallbackTy
  /// Here is the logic:
  /// if (Cond) {
  ///   ThenGen();
  /// } else {
  ///   ElseGen();
  /// }
  void emitIfClause(Value *Cond, BodyGenCallbackTy ThenGen,
                    BodyGenCallbackTy ElseGen, InsertPointTy AllocaIP = {});

  /// Create the global variable holding the offload mappings information.
  GlobalVariable *createOffloadMaptypes(SmallVectorImpl<uint64_t> &Mappings,
                                        std::string VarName);

  /// Create the global variable holding the offload names information.
  GlobalVariable *
  createOffloadMapnames(SmallVectorImpl<llvm::Constant *> &Names,
                        std::string VarName);

  struct MapperAllocas {
    AllocaInst *ArgsBase = nullptr;
    AllocaInst *Args = nullptr;
    AllocaInst *ArgSizes = nullptr;
  };

  /// Create the allocas instruction used in call to mapper functions.
  void createMapperAllocas(const LocationDescription &Loc,
                           InsertPointTy AllocaIP, unsigned NumOperands,
                           struct MapperAllocas &MapperAllocas);

  /// Create the call for the target mapper function.
  /// \param Loc The source location description.
  /// \param MapperFunc Function to be called.
  /// \param SrcLocInfo Source location information global.
  /// \param MaptypesArg The argument types.
  /// \param MapnamesArg The argument names.
  /// \param MapperAllocas The AllocaInst used for the call.
  /// \param DeviceID Device ID for the call.
  /// \param NumOperands Number of operands in the call.
  void emitMapperCall(const LocationDescription &Loc, Function *MapperFunc,
                      Value *SrcLocInfo, Value *MaptypesArg, Value *MapnamesArg,
                      struct MapperAllocas &MapperAllocas, int64_t DeviceID,
                      unsigned NumOperands);

  /// Container for the arguments used to pass data to the runtime library.
  struct TargetDataRTArgs {
    /// The array of base pointer passed to the runtime library.
    Value *BasePointersArray = nullptr;
    /// The array of section pointers passed to the runtime library.
    Value *PointersArray = nullptr;
    /// The array of sizes passed to the runtime library.
    Value *SizesArray = nullptr;
    /// The array of map types passed to the runtime library for the beginning
    /// of the region or for the entire region if there are no separate map
    /// types for the region end.
    Value *MapTypesArray = nullptr;
    /// The array of map types passed to the runtime library for the end of the
    /// region, or nullptr if there are no separate map types for the region
    /// end.
    Value *MapTypesArrayEnd = nullptr;
    /// The array of user-defined mappers passed to the runtime library.
    Value *MappersArray = nullptr;
    /// The array of original declaration names of mapped pointers sent to the
    /// runtime library for debugging
    Value *MapNamesArray = nullptr;

    explicit TargetDataRTArgs() {}
    explicit TargetDataRTArgs(Value *BasePointersArray, Value *PointersArray,
                              Value *SizesArray, Value *MapTypesArray,
                              Value *MapTypesArrayEnd, Value *MappersArray,
                              Value *MapNamesArray)
        : BasePointersArray(BasePointersArray), PointersArray(PointersArray),
          SizesArray(SizesArray), MapTypesArray(MapTypesArray),
          MapTypesArrayEnd(MapTypesArrayEnd), MappersArray(MappersArray),
          MapNamesArray(MapNamesArray) {}
  };

  /// Data structure that contains the needed information to construct the
  /// kernel args vector.
  struct TargetKernelArgs {
    /// Number of arguments passed to the runtime library.
    unsigned NumTargetItems;
    /// Arguments passed to the runtime library
    TargetDataRTArgs RTArgs;
    /// The number of iterations
    Value *NumIterations;
    /// The number of teams.
    Value *NumTeams;
    /// The number of threads.
    Value *NumThreads;
    /// The size of the dynamic shared memory.
    Value *DynCGGroupMem;
    /// True if the kernel has 'no wait' clause.
    bool HasNoWait;

    /// Constructor for TargetKernelArgs
    TargetKernelArgs(unsigned NumTargetItems, TargetDataRTArgs RTArgs,
                     Value *NumIterations, Value *NumTeams, Value *NumThreads,
                     Value *DynCGGroupMem, bool HasNoWait)
        : NumTargetItems(NumTargetItems), RTArgs(RTArgs),
          NumIterations(NumIterations), NumTeams(NumTeams),
          NumThreads(NumThreads), DynCGGroupMem(DynCGGroupMem),
          HasNoWait(HasNoWait) {}
  };

  /// Create the kernel args vector used by emitTargetKernel. This function
  /// creates various constant values that are used in the resulting args
  /// vector.
  static void getKernelArgsVector(TargetKernelArgs &KernelArgs,
                                  IRBuilderBase &Builder,
                                  SmallVector<Value *> &ArgsVector);

  /// Struct that keeps the information that should be kept throughout
  /// a 'target data' region.
  class TargetDataInfo {
    /// Set to true if device pointer information have to be obtained.
    bool RequiresDevicePointerInfo = false;
    /// Set to true if Clang emits separate runtime calls for the beginning and
    /// end of the region.  These calls might have separate map type arrays.
    bool SeparateBeginEndCalls = false;

  public:
    TargetDataRTArgs RTArgs;

    SmallMapVector<const Value *, std::pair<Value *, Value *>, 4>
        DevicePtrInfoMap;

    /// Indicate whether any user-defined mapper exists.
    bool HasMapper = false;
    /// The total number of pointers passed to the runtime library.
    unsigned NumberOfPtrs = 0u;

    explicit TargetDataInfo() {}
    explicit TargetDataInfo(bool RequiresDevicePointerInfo,
                            bool SeparateBeginEndCalls)
        : RequiresDevicePointerInfo(RequiresDevicePointerInfo),
          SeparateBeginEndCalls(SeparateBeginEndCalls) {}
    /// Clear information about the data arrays.
    void clearArrayInfo() {
      RTArgs = TargetDataRTArgs();
      HasMapper = false;
      NumberOfPtrs = 0u;
    }
    /// Return true if the current target data information has valid arrays.
    bool isValid() {
      return RTArgs.BasePointersArray && RTArgs.PointersArray &&
             RTArgs.SizesArray && RTArgs.MapTypesArray &&
             (!HasMapper || RTArgs.MappersArray) && NumberOfPtrs;
    }
    bool requiresDevicePointerInfo() { return RequiresDevicePointerInfo; }
    bool separateBeginEndCalls() { return SeparateBeginEndCalls; }
  };

  enum class DeviceInfoTy { None, Pointer, Address };
  using MapValuesArrayTy = SmallVector<Value *, 4>;
  using MapDeviceInfoArrayTy = SmallVector<DeviceInfoTy, 4>;
  using MapFlagsArrayTy = SmallVector<omp::OpenMPOffloadMappingFlags, 4>;
  using MapNamesArrayTy = SmallVector<Constant *, 4>;
  using MapDimArrayTy = SmallVector<uint64_t, 4>;
  using MapNonContiguousArrayTy = SmallVector<MapValuesArrayTy, 4>;

  /// This structure contains combined information generated for mappable
  /// clauses, including base pointers, pointers, sizes, map types, user-defined
  /// mappers, and non-contiguous information.
  struct MapInfosTy {
    struct StructNonContiguousInfo {
      bool IsNonContiguous = false;
      MapDimArrayTy Dims;
      MapNonContiguousArrayTy Offsets;
      MapNonContiguousArrayTy Counts;
      MapNonContiguousArrayTy Strides;
    };
    MapValuesArrayTy BasePointers;
    MapValuesArrayTy Pointers;
    MapDeviceInfoArrayTy DevicePointers;
    MapValuesArrayTy Sizes;
    MapFlagsArrayTy Types;
    MapNamesArrayTy Names;
    StructNonContiguousInfo NonContigInfo;

    /// Append arrays in \a CurInfo.
    void append(MapInfosTy &CurInfo) {
      BasePointers.append(CurInfo.BasePointers.begin(),
                          CurInfo.BasePointers.end());
      Pointers.append(CurInfo.Pointers.begin(), CurInfo.Pointers.end());
      DevicePointers.append(CurInfo.DevicePointers.begin(),
                            CurInfo.DevicePointers.end());
      Sizes.append(CurInfo.Sizes.begin(), CurInfo.Sizes.end());
      Types.append(CurInfo.Types.begin(), CurInfo.Types.end());
      Names.append(CurInfo.Names.begin(), CurInfo.Names.end());
      NonContigInfo.Dims.append(CurInfo.NonContigInfo.Dims.begin(),
                                CurInfo.NonContigInfo.Dims.end());
      NonContigInfo.Offsets.append(CurInfo.NonContigInfo.Offsets.begin(),
                                   CurInfo.NonContigInfo.Offsets.end());
      NonContigInfo.Counts.append(CurInfo.NonContigInfo.Counts.begin(),
                                  CurInfo.NonContigInfo.Counts.end());
      NonContigInfo.Strides.append(CurInfo.NonContigInfo.Strides.begin(),
                                   CurInfo.NonContigInfo.Strides.end());
    }
  };

  /// Callback function type for functions emitting the host fallback code that
  /// is executed when the kernel launch fails. It takes an insertion point as
  /// parameter where the code should be emitted. It returns an insertion point
  /// that points right after after the emitted code.
  using EmitFallbackCallbackTy = function_ref<InsertPointTy(InsertPointTy)>;

  /// Generate a target region entry call and host fallback call.
  ///
  /// \param Loc The location at which the request originated and is fulfilled.
  /// \param OutlinedFn The outlined kernel function.
  /// \param OutlinedFnID The ooulined function ID.
  /// \param EmitTargetCallFallbackCB Call back function to generate host
  ///        fallback code.
  /// \param Args Data structure holding information about the kernel arguments.
  /// \param DeviceID Identifier for the device via the 'device' clause.
  /// \param RTLoc Source location identifier
  /// \param AllocaIP The insertion point to be used for alloca instructions.
  InsertPointTy emitKernelLaunch(
      const LocationDescription &Loc, Function *OutlinedFn, Value *OutlinedFnID,
      EmitFallbackCallbackTy EmitTargetCallFallbackCB, TargetKernelArgs &Args,
      Value *DeviceID, Value *RTLoc, InsertPointTy AllocaIP);

  /// Emit the arguments to be passed to the runtime library based on the
  /// arrays of base pointers, pointers, sizes, map types, and mappers.  If
  /// ForEndCall, emit map types to be passed for the end of the region instead
  /// of the beginning.
  void emitOffloadingArraysArgument(IRBuilderBase &Builder,
                                    OpenMPIRBuilder::TargetDataRTArgs &RTArgs,
                                    OpenMPIRBuilder::TargetDataInfo &Info,
                                    bool EmitDebug = false,
                                    bool ForEndCall = false);

  /// Emit an array of struct descriptors to be assigned to the offload args.
  void emitNonContiguousDescriptor(InsertPointTy AllocaIP,
                                   InsertPointTy CodeGenIP,
                                   MapInfosTy &CombinedInfo,
                                   TargetDataInfo &Info);

  /// Emit the arrays used to pass the captures and map information to the
  /// offloading runtime library. If there is no map or capture information,
  /// return nullptr by reference.
  void emitOffloadingArrays(
      InsertPointTy AllocaIP, InsertPointTy CodeGenIP, MapInfosTy &CombinedInfo,
      TargetDataInfo &Info, bool IsNonContiguous = false,
      function_ref<void(unsigned int, Value *)> DeviceAddrCB = nullptr,
      function_ref<Value *(unsigned int)> CustomMapperCB = nullptr);

  /// Creates offloading entry for the provided entry ID \a ID, address \a
  /// Addr, size \a Size, and flags \a Flags.
  void createOffloadEntry(Constant *ID, Constant *Addr, uint64_t Size,
                          int32_t Flags, GlobalValue::LinkageTypes);

  /// The kind of errors that can occur when emitting the offload entries and
  /// metadata.
  enum EmitMetadataErrorKind {
    EMIT_MD_TARGET_REGION_ERROR,
    EMIT_MD_DECLARE_TARGET_ERROR,
    EMIT_MD_GLOBAL_VAR_LINK_ERROR
  };

  /// Callback function type
  using EmitMetadataErrorReportFunctionTy =
      std::function<void(EmitMetadataErrorKind, TargetRegionEntryInfo)>;

  // Emit the offloading entries and metadata so that the device codegen side
  // can easily figure out what to emit. The produced metadata looks like
  // this:
  //
  // !omp_offload.info = !{!1, ...}
  //
  // We only generate metadata for function that contain target regions.
  void createOffloadEntriesAndInfoMetadata(
      EmitMetadataErrorReportFunctionTy &ErrorReportFunction);

public:
  /// Generator for __kmpc_copyprivate
  ///
  /// \param Loc The source location description.
  /// \param BufSize Number of elements in the buffer.
  /// \param CpyBuf List of pointers to data to be copied.
  /// \param CpyFn function to call for copying data.
  /// \param DidIt flag variable; 1 for 'single' thread, 0 otherwise.
  ///
  /// \return The insertion position *after* the CopyPrivate call.

  InsertPointTy createCopyPrivate(const LocationDescription &Loc,
                                  llvm::Value *BufSize, llvm::Value *CpyBuf,
                                  llvm::Value *CpyFn, llvm::Value *DidIt);

  /// Generator for '#omp single'
  ///
  /// \param Loc The source location description.
  /// \param BodyGenCB Callback that will generate the region code.
  /// \param FiniCB Callback to finalize variable copies.
  /// \param IsNowait If false, a barrier is emitted.
  /// \param DidIt Local variable used as a flag to indicate 'single' thread
  ///
  /// \returns The insertion position *after* the single call.
  InsertPointTy createSingle(const LocationDescription &Loc,
                             BodyGenCallbackTy BodyGenCB,
                             FinalizeCallbackTy FiniCB, bool IsNowait,
                             llvm::Value *DidIt);

  /// Generator for '#omp master'
  ///
  /// \param Loc The insert and source location description.
  /// \param BodyGenCB Callback that will generate the region code.
  /// \param FiniCB Callback to finalize variable copies.
  ///
  /// \returns The insertion position *after* the master.
  InsertPointTy createMaster(const LocationDescription &Loc,
                             BodyGenCallbackTy BodyGenCB,
                             FinalizeCallbackTy FiniCB);

  /// Generator for '#omp masked'
  ///
  /// \param Loc The insert and source location description.
  /// \param BodyGenCB Callback that will generate the region code.
  /// \param FiniCB Callback to finialize variable copies.
  ///
  /// \returns The insertion position *after* the masked.
  InsertPointTy createMasked(const LocationDescription &Loc,
                             BodyGenCallbackTy BodyGenCB,
                             FinalizeCallbackTy FiniCB, Value *Filter);

  /// Generator for '#omp critical'
  ///
  /// \param Loc The insert and source location description.
  /// \param BodyGenCB Callback that will generate the region body code.
  /// \param FiniCB Callback to finalize variable copies.
  /// \param CriticalName name of the lock used by the critical directive
  /// \param HintInst Hint Instruction for hint clause associated with critical
  ///
  /// \returns The insertion position *after* the critical.
  InsertPointTy createCritical(const LocationDescription &Loc,
                               BodyGenCallbackTy BodyGenCB,
                               FinalizeCallbackTy FiniCB,
                               StringRef CriticalName, Value *HintInst);

  /// Generator for '#omp ordered depend (source | sink)'
  ///
  /// \param Loc The insert and source location description.
  /// \param AllocaIP The insertion point to be used for alloca instructions.
  /// \param NumLoops The number of loops in depend clause.
  /// \param StoreValues The value will be stored in vector address.
  /// \param Name The name of alloca instruction.
  /// \param IsDependSource If true, depend source; otherwise, depend sink.
  ///
  /// \return The insertion position *after* the ordered.
  InsertPointTy createOrderedDepend(const LocationDescription &Loc,
                                    InsertPointTy AllocaIP, unsigned NumLoops,
                                    ArrayRef<llvm::Value *> StoreValues,
                                    const Twine &Name, bool IsDependSource);

  /// Generator for '#omp ordered [threads | simd]'
  ///
  /// \param Loc The insert and source location description.
  /// \param BodyGenCB Callback that will generate the region code.
  /// \param FiniCB Callback to finalize variable copies.
  /// \param IsThreads If true, with threads clause or without clause;
  /// otherwise, with simd clause;
  ///
  /// \returns The insertion position *after* the ordered.
  InsertPointTy createOrderedThreadsSimd(const LocationDescription &Loc,
                                         BodyGenCallbackTy BodyGenCB,
                                         FinalizeCallbackTy FiniCB,
                                         bool IsThreads);

  /// Generator for '#omp sections'
  ///
  /// \param Loc The insert and source location description.
  /// \param AllocaIP The insertion points to be used for alloca instructions.
  /// \param SectionCBs Callbacks that will generate body of each section.
  /// \param PrivCB Callback to copy a given variable (think copy constructor).
  /// \param FiniCB Callback to finalize variable copies.
  /// \param IsCancellable Flag to indicate a cancellable parallel region.
  /// \param IsNowait If true, barrier - to ensure all sections are executed
  /// before moving forward will not be generated.
  /// \returns The insertion position *after* the sections.
  InsertPointTy createSections(const LocationDescription &Loc,
                               InsertPointTy AllocaIP,
                               ArrayRef<StorableBodyGenCallbackTy> SectionCBs,
                               PrivatizeCallbackTy PrivCB,
                               FinalizeCallbackTy FiniCB, bool IsCancellable,
                               bool IsNowait);

  /// Generator for '#omp section'
  ///
  /// \param Loc The insert and source location description.
  /// \param BodyGenCB Callback that will generate the region body code.
  /// \param FiniCB Callback to finalize variable copies.
  /// \returns The insertion position *after* the section.
  InsertPointTy createSection(const LocationDescription &Loc,
                              BodyGenCallbackTy BodyGenCB,
                              FinalizeCallbackTy FiniCB);

  /// Generate conditional branch and relevant BasicBlocks through which private
  /// threads copy the 'copyin' variables from Master copy to threadprivate
  /// copies.
  ///
  /// \param IP insertion block for copyin conditional
  /// \param MasterVarPtr a pointer to the master variable
  /// \param PrivateVarPtr a pointer to the threadprivate variable
  /// \param IntPtrTy Pointer size type
  /// \param BranchtoEnd Create a branch between the copyin.not.master blocks
  //				 and copy.in.end block
  ///
  /// \returns The insertion point where copying operation to be emitted.
  InsertPointTy createCopyinClauseBlocks(InsertPointTy IP, Value *MasterAddr,
                                         Value *PrivateAddr,
                                         llvm::IntegerType *IntPtrTy,
                                         bool BranchtoEnd = true);

  /// Create a runtime call for kmpc_Alloc
  ///
  /// \param Loc The insert and source location description.
  /// \param Size Size of allocated memory space
  /// \param Allocator Allocator information instruction
  /// \param Name Name of call Instruction for OMP_alloc
  ///
  /// \returns CallInst to the OMP_Alloc call
  CallInst *createOMPAlloc(const LocationDescription &Loc, Value *Size,
                           Value *Allocator, std::string Name = "");

  /// Create a runtime call for kmpc_free
  ///
  /// \param Loc The insert and source location description.
  /// \param Addr Address of memory space to be freed
  /// \param Allocator Allocator information instruction
  /// \param Name Name of call Instruction for OMP_Free
  ///
  /// \returns CallInst to the OMP_Free call
  CallInst *createOMPFree(const LocationDescription &Loc, Value *Addr,
                          Value *Allocator, std::string Name = "");

  /// Create a runtime call for kmpc_threadprivate_cached
  ///
  /// \param Loc The insert and source location description.
  /// \param Pointer pointer to data to be cached
  /// \param Size size of data to be cached
  /// \param Name Name of call Instruction for callinst
  ///
  /// \returns CallInst to the thread private cache call.
  CallInst *createCachedThreadPrivate(const LocationDescription &Loc,
                                      llvm::Value *Pointer,
                                      llvm::ConstantInt *Size,
                                      const llvm::Twine &Name = Twine(""));

  /// Create a runtime call for __tgt_interop_init
  ///
  /// \param Loc The insert and source location description.
  /// \param InteropVar variable to be allocated
  /// \param InteropType type of interop operation
  /// \param Device devide to which offloading will occur
  /// \param NumDependences  number of dependence variables
  /// \param DependenceAddress pointer to dependence variables
  /// \param HaveNowaitClause does nowait clause exist
  ///
  /// \returns CallInst to the __tgt_interop_init call
  CallInst *createOMPInteropInit(const LocationDescription &Loc,
                                 Value *InteropVar,
                                 omp::OMPInteropType InteropType, Value *Device,
                                 Value *NumDependences,
                                 Value *DependenceAddress,
                                 bool HaveNowaitClause);

  /// Create a runtime call for __tgt_interop_destroy
  ///
  /// \param Loc The insert and source location description.
  /// \param InteropVar variable to be allocated
  /// \param Device devide to which offloading will occur
  /// \param NumDependences  number of dependence variables
  /// \param DependenceAddress pointer to dependence variables
  /// \param HaveNowaitClause does nowait clause exist
  ///
  /// \returns CallInst to the __tgt_interop_destroy call
  CallInst *createOMPInteropDestroy(const LocationDescription &Loc,
                                    Value *InteropVar, Value *Device,
                                    Value *NumDependences,
                                    Value *DependenceAddress,
                                    bool HaveNowaitClause);

  /// Create a runtime call for __tgt_interop_use
  ///
  /// \param Loc The insert and source location description.
  /// \param InteropVar variable to be allocated
  /// \param Device devide to which offloading will occur
  /// \param NumDependences  number of dependence variables
  /// \param DependenceAddress pointer to dependence variables
  /// \param HaveNowaitClause does nowait clause exist
  ///
  /// \returns CallInst to the __tgt_interop_use call
  CallInst *createOMPInteropUse(const LocationDescription &Loc,
                                Value *InteropVar, Value *Device,
                                Value *NumDependences, Value *DependenceAddress,
                                bool HaveNowaitClause);

  /// The `omp target` interface
  ///
  /// For more information about the usage of this interface,
  /// \see openmp/libomptarget/deviceRTLs/common/include/target.h
  ///
  ///{

  /// Create a runtime call for kmpc_target_init
  ///
  /// \param Loc The insert and source location description.
  /// \param IsSPMD Flag to indicate if the kernel is an SPMD kernel or not.
  InsertPointTy createTargetInit(const LocationDescription &Loc, bool IsSPMD);

  /// Create a runtime call for kmpc_target_deinit
  ///
  /// \param Loc The insert and source location description.
  /// \param IsSPMD Flag to indicate if the kernel is an SPMD kernel or not.
  void createTargetDeinit(const LocationDescription &Loc, bool IsSPMD);

  ///}

private:
  // Sets the function attributes expected for the outlined function
  void setOutlinedTargetRegionFunctionAttributes(Function *OutlinedFn,
                                                 int32_t NumTeams,
                                                 int32_t NumThreads);

  // Creates the function ID/Address for the given outlined function.
  // In the case of an embedded device function the address of the function is
  // used, in the case of a non-offload function a constant is created.
  Constant *createOutlinedFunctionID(Function *OutlinedFn,
                                     StringRef EntryFnIDName);

  // Creates the region entry address for the outlined function
  Constant *createTargetRegionEntryAddr(Function *OutlinedFunction,
                                        StringRef EntryFnName);

public:
  /// Functions used to generate a function with the given name.
  using FunctionGenCallback = std::function<Function *(StringRef FunctionName)>;

  /// Create a unique name for the entry function using the source location
  /// information of the current target region. The name will be something like:
  ///
  /// __omp_offloading_DD_FFFF_PP_lBB[_CC]
  ///
  /// where DD_FFFF is an ID unique to the file (device and file IDs), PP is the
  /// mangled name of the function that encloses the target region and BB is the
  /// line number of the target region. CC is a count added when more than one
  /// region is located at the same location.
  ///
  /// If this target outline function is not an offload entry, we don't need to
  /// register it. This may happen if it is guarded by an if clause that is
  /// false at compile time, or no target archs have been specified.
  ///
  /// The created target region ID is used by the runtime library to identify
  /// the current target region, so it only has to be unique and not
  /// necessarily point to anything. It could be the pointer to the outlined
  /// function that implements the target region, but we aren't using that so
  /// that the compiler doesn't need to keep that, and could therefore inline
  /// the host function if proven worthwhile during optimization. In the other
  /// hand, if emitting code for the device, the ID has to be the function
  /// address so that it can retrieved from the offloading entry and launched
  /// by the runtime library. We also mark the outlined function to have
  /// external linkage in case we are emitting code for the device, because
  /// these functions will be entry points to the device.
  ///
  /// \param InfoManager The info manager keeping track of the offload entries
  /// \param EntryInfo The entry information about the function
  /// \param GenerateFunctionCallback The callback function to generate the code
  /// \param NumTeams Number default teams
  /// \param NumThreads Number default threads
  /// \param OutlinedFunction Pointer to the outlined function
  /// \param EntryFnIDName Name of the ID o be created
  void emitTargetRegionFunction(TargetRegionEntryInfo &EntryInfo,
                                FunctionGenCallback &GenerateFunctionCallback,
                                int32_t NumTeams, int32_t NumThreads,
                                bool IsOffloadEntry, Function *&OutlinedFn,
                                Constant *&OutlinedFnID);

  /// Registers the given function and sets up the attribtues of the function
  /// Returns the FunctionID.
  ///
  /// \param InfoManager The info manager keeping track of the offload entries
  /// \param EntryInfo The entry information about the function
  /// \param OutlinedFunction Pointer to the outlined function
  /// \param EntryFnName Name of the outlined function
  /// \param EntryFnIDName Name of the ID o be created
  /// \param NumTeams Number default teams
  /// \param NumThreads Number default threads
  Constant *registerTargetRegionFunction(TargetRegionEntryInfo &EntryInfo,
                                         Function *OutlinedFunction,
                                         StringRef EntryFnName,
                                         StringRef EntryFnIDName,
                                         int32_t NumTeams, int32_t NumThreads);
  /// Type of BodyGen to use for region codegen
  ///
  /// Priv: If device pointer privatization is required, emit the body of the
  /// region here. It will have to be duplicated: with and without
  /// privatization.
  /// DupNoPriv: If we need device pointer privatization, we need
  /// to emit the body of the region with no privatization in the 'else' branch
  /// of the conditional.
  /// NoPriv: If we don't require privatization of device
  /// pointers, we emit the body in between the runtime calls. This avoids
  /// duplicating the body code.
  enum BodyGenTy { Priv, DupNoPriv, NoPriv };

  /// Generator for '#omp target data'
  ///
  /// \param Loc The location where the target data construct was encountered.
  /// \param AllocaIP The insertion points to be used for alloca instructions.
  /// \param CodeGenIP The insertion point at which the target directive code
  /// should be placed.
  /// \param IsBegin If true then emits begin mapper call otherwise emits
  /// end mapper call.
  /// \param DeviceID Stores the DeviceID from the device clause.
  /// \param IfCond Value which corresponds to the if clause condition.
  /// \param Info Stores all information realted to the Target Data directive.
  /// \param GenMapInfoCB Callback that populates the MapInfos and returns.
  /// \param BodyGenCB Optional Callback to generate the region code.
  /// \param DeviceAddrCB Optional callback to generate code related to
  /// use_device_ptr and use_device_addr.
  /// \param CustomMapperCB Optional callback to generate code related to
  /// custom mappers.
  OpenMPIRBuilder::InsertPointTy createTargetData(
      const LocationDescription &Loc, InsertPointTy AllocaIP,
      InsertPointTy CodeGenIP, Value *DeviceID, Value *IfCond,
      TargetDataInfo &Info,
      function_ref<MapInfosTy &(InsertPointTy CodeGenIP)> GenMapInfoCB,
      omp::RuntimeFunction *MapperFunc = nullptr,
      function_ref<InsertPointTy(InsertPointTy CodeGenIP,
                                 BodyGenTy BodyGenType)>
          BodyGenCB = nullptr,
      function_ref<void(unsigned int, Value *)> DeviceAddrCB = nullptr,
      function_ref<Value *(unsigned int)> CustomMapperCB = nullptr,
      Value *SrcLocInfo = nullptr);

  using TargetBodyGenCallbackTy = function_ref<InsertPointTy(
      InsertPointTy AllocaIP, InsertPointTy CodeGenIP)>;

  /// Generator for '#omp target'
  ///
  /// \param Loc where the target data construct was encountered.
  /// \param CodeGenIP The insertion point where the call to the outlined
  /// function should be emitted.
  /// \param EntryInfo The entry information about the function.
  /// \param NumTeams Number of teams specified in the num_teams clause.
  /// \param NumThreads Number of teams specified in the thread_limit clause.
  /// \param Inputs The input values to the region that will be passed.
  /// as arguments to the outlined function.
  /// \param BodyGenCB Callback that will generate the region code.
  InsertPointTy createTarget(const LocationDescription &Loc,
                             OpenMPIRBuilder::InsertPointTy CodeGenIP,
                             TargetRegionEntryInfo &EntryInfo, int32_t NumTeams,
                             int32_t NumThreads,
                             SmallVectorImpl<Value *> &Inputs,
                             TargetBodyGenCallbackTy BodyGenCB);

  /// Declarations for LLVM-IR types (simple, array, function and structure) are
  /// generated below. Their names are defined and used in OpenMPKinds.def. Here
  /// we provide the declarations, the initializeTypes function will provide the
  /// values.
  ///
  ///{
#define OMP_TYPE(VarName, InitValue) Type *VarName = nullptr;
#define OMP_ARRAY_TYPE(VarName, ElemTy, ArraySize)                             \
  ArrayType *VarName##Ty = nullptr;                                            \
  PointerType *VarName##PtrTy = nullptr;
#define OMP_FUNCTION_TYPE(VarName, IsVarArg, ReturnType, ...)                  \
  FunctionType *VarName = nullptr;                                             \
  PointerType *VarName##Ptr = nullptr;
#define OMP_STRUCT_TYPE(VarName, StrName, ...)                                 \
  StructType *VarName = nullptr;                                               \
  PointerType *VarName##Ptr = nullptr;
#include "llvm/Frontend/OpenMP/OMPKinds.def"

  ///}

private:
  /// Create all simple and struct types exposed by the runtime and remember
  /// the llvm::PointerTypes of them for easy access later.
  void initializeTypes(Module &M);

  /// Common interface for generating entry calls for OMP Directives.
  /// if the directive has a region/body, It will set the insertion
  /// point to the body
  ///
  /// \param OMPD Directive to generate entry blocks for
  /// \param EntryCall Call to the entry OMP Runtime Function
  /// \param ExitBB block where the region ends.
  /// \param Conditional indicate if the entry call result will be used
  ///        to evaluate a conditional of whether a thread will execute
  ///        body code or not.
  ///
  /// \return The insertion position in exit block
  InsertPointTy emitCommonDirectiveEntry(omp::Directive OMPD, Value *EntryCall,
                                         BasicBlock *ExitBB,
                                         bool Conditional = false);

  /// Common interface to finalize the region
  ///
  /// \param OMPD Directive to generate exiting code for
  /// \param FinIP Insertion point for emitting Finalization code and exit call
  /// \param ExitCall Call to the ending OMP Runtime Function
  /// \param HasFinalize indicate if the directive will require finalization
  ///         and has a finalization callback in the stack that
  ///        should be called.
  ///
  /// \return The insertion position in exit block
  InsertPointTy emitCommonDirectiveExit(omp::Directive OMPD,
                                        InsertPointTy FinIP,
                                        Instruction *ExitCall,
                                        bool HasFinalize = true);

  /// Common Interface to generate OMP inlined regions
  ///
  /// \param OMPD Directive to generate inlined region for
  /// \param EntryCall Call to the entry OMP Runtime Function
  /// \param ExitCall Call to the ending OMP Runtime Function
  /// \param BodyGenCB Body code generation callback.
  /// \param FiniCB Finalization Callback. Will be called when finalizing region
  /// \param Conditional indicate if the entry call result will be used
  ///        to evaluate a conditional of whether a thread will execute
  ///        body code or not.
  /// \param HasFinalize indicate if the directive will require finalization
  ///        and has a finalization callback in the stack that
  ///        should be called.
  /// \param IsCancellable if HasFinalize is set to true, indicate if the
  ///        the directive should be cancellable.
  /// \return The insertion point after the region

  InsertPointTy
  EmitOMPInlinedRegion(omp::Directive OMPD, Instruction *EntryCall,
                       Instruction *ExitCall, BodyGenCallbackTy BodyGenCB,
                       FinalizeCallbackTy FiniCB, bool Conditional = false,
                       bool HasFinalize = true, bool IsCancellable = false);

  /// Get the platform-specific name separator.
  /// \param Parts different parts of the final name that needs separation
  /// \param FirstSeparator First separator used between the initial two
  ///        parts of the name.
  /// \param Separator separator used between all of the rest consecutive
  ///        parts of the name
  static std::string getNameWithSeparators(ArrayRef<StringRef> Parts,
                                           StringRef FirstSeparator,
                                           StringRef Separator);

  /// Returns corresponding lock object for the specified critical region
  /// name. If the lock object does not exist it is created, otherwise the
  /// reference to the existing copy is returned.
  /// \param CriticalName Name of the critical region.
  ///
  Value *getOMPCriticalRegionLock(StringRef CriticalName);

  /// Callback type for Atomic Expression update
  /// ex:
  /// \code{.cpp}
  /// unsigned x = 0;
  /// #pragma omp atomic update
  /// x = Expr(x_old);  //Expr() is any legal operation
  /// \endcode
  ///
  /// \param XOld the value of the atomic memory address to use for update
  /// \param IRB reference to the IRBuilder to use
  ///
  /// \returns Value to update X to.
  using AtomicUpdateCallbackTy =
      const function_ref<Value *(Value *XOld, IRBuilder<> &IRB)>;

private:
  enum AtomicKind { Read, Write, Update, Capture, Compare };

  /// Determine whether to emit flush or not
  ///
  /// \param Loc    The insert and source location description.
  /// \param AO     The required atomic ordering
  /// \param AK     The OpenMP atomic operation kind used.
  ///
  /// \returns		wether a flush was emitted or not
  bool checkAndEmitFlushAfterAtomic(const LocationDescription &Loc,
                                    AtomicOrdering AO, AtomicKind AK);

  /// Emit atomic update for constructs: X = X BinOp Expr ,or X = Expr BinOp X
  /// For complex Operations: X = UpdateOp(X) => CmpExch X, old_X, UpdateOp(X)
  /// Only Scalar data types.
  ///
  /// \param AllocaIP	  The insertion point to be used for alloca
  ///                   instructions.
  /// \param X			    The target atomic pointer to be updated
  /// \param XElemTy    The element type of the atomic pointer.
  /// \param Expr		    The value to update X with.
  /// \param AO			    Atomic ordering of the generated atomic
  ///                   instructions.
  /// \param RMWOp		  The binary operation used for update. If
  ///                   operation is not supported by atomicRMW,
  ///                   or belong to {FADD, FSUB, BAD_BINOP}.
  ///                   Then a `cmpExch` based	atomic will be generated.
  /// \param UpdateOp 	Code generator for complex expressions that cannot be
  ///                   expressed through atomicrmw instruction.
  /// \param VolatileX	     true if \a X volatile?
  /// \param IsXBinopExpr true if \a X is Left H.S. in Right H.S. part of the
  ///                     update expression, false otherwise.
  ///                     (e.g. true for X = X BinOp Expr)
  ///
  /// \returns A pair of the old value of X before the update, and the value
  ///          used for the update.
  std::pair<Value *, Value *>
  emitAtomicUpdate(InsertPointTy AllocaIP, Value *X, Type *XElemTy, Value *Expr,
                   AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp,
                   AtomicUpdateCallbackTy &UpdateOp, bool VolatileX,
                   bool IsXBinopExpr);

  /// Emit the binary op. described by \p RMWOp, using \p Src1 and \p Src2 .
  ///
  /// \Return The instruction
  Value *emitRMWOpAsInstruction(Value *Src1, Value *Src2,
                                AtomicRMWInst::BinOp RMWOp);

public:
  /// a struct to pack relevant information while generating atomic Ops
  struct AtomicOpValue {
    Value *Var = nullptr;
    Type *ElemTy = nullptr;
    bool IsSigned = false;
    bool IsVolatile = false;
  };

  /// Emit atomic Read for : V = X --- Only Scalar data types.
  ///
  /// \param Loc    The insert and source location description.
  /// \param X			The target pointer to be atomically read
  /// \param V			Memory address where to store atomically read
  /// 					    value
  /// \param AO			Atomic ordering of the generated atomic
  /// 					    instructions.
  ///
  /// \return Insertion point after generated atomic read IR.
  InsertPointTy createAtomicRead(const LocationDescription &Loc,
                                 AtomicOpValue &X, AtomicOpValue &V,
                                 AtomicOrdering AO);

  /// Emit atomic write for : X = Expr --- Only Scalar data types.
  ///
  /// \param Loc    The insert and source location description.
  /// \param X			The target pointer to be atomically written to
  /// \param Expr		The value to store.
  /// \param AO			Atomic ordering of the generated atomic
  ///               instructions.
  ///
  /// \return Insertion point after generated atomic Write IR.
  InsertPointTy createAtomicWrite(const LocationDescription &Loc,
                                  AtomicOpValue &X, Value *Expr,
                                  AtomicOrdering AO);

  /// Emit atomic update for constructs: X = X BinOp Expr ,or X = Expr BinOp X
  /// For complex Operations: X = UpdateOp(X) => CmpExch X, old_X, UpdateOp(X)
  /// Only Scalar data types.
  ///
  /// \param Loc      The insert and source location description.
  /// \param AllocaIP The insertion point to be used for alloca instructions.
  /// \param X        The target atomic pointer to be updated
  /// \param Expr     The value to update X with.
  /// \param AO       Atomic ordering of the generated atomic instructions.
  /// \param RMWOp    The binary operation used for update. If operation
  ///                 is	not supported by atomicRMW, or belong to
  ///	                {FADD, FSUB, BAD_BINOP}. Then a `cmpExch` based
  ///                 atomic will be generated.
  /// \param UpdateOp 	Code generator for complex expressions that cannot be
  ///                   expressed through atomicrmw instruction.
  /// \param IsXBinopExpr true if \a X is Left H.S. in Right H.S. part of the
  ///                     update expression, false otherwise.
  ///	                    (e.g. true for X = X BinOp Expr)
  ///
  /// \return Insertion point after generated atomic update IR.
  InsertPointTy createAtomicUpdate(const LocationDescription &Loc,
                                   InsertPointTy AllocaIP, AtomicOpValue &X,
                                   Value *Expr, AtomicOrdering AO,
                                   AtomicRMWInst::BinOp RMWOp,
                                   AtomicUpdateCallbackTy &UpdateOp,
                                   bool IsXBinopExpr);

  /// Emit atomic update for constructs: --- Only Scalar data types
  /// V = X; X = X BinOp Expr ,
  /// X = X BinOp Expr; V = X,
  /// V = X; X = Expr BinOp X,
  /// X = Expr BinOp X; V = X,
  /// V = X; X = UpdateOp(X),
  /// X = UpdateOp(X); V = X,
  ///
  /// \param Loc        The insert and source location description.
  /// \param AllocaIP   The insertion point to be used for alloca instructions.
  /// \param X          The target atomic pointer to be updated
  /// \param V          Memory address where to store captured value
  /// \param Expr       The value to update X with.
  /// \param AO         Atomic ordering of the generated atomic instructions
  /// \param RMWOp      The binary operation used for update. If
  ///                   operation is not supported by atomicRMW, or belong to
  ///	                  {FADD, FSUB, BAD_BINOP}. Then a cmpExch based
  ///                   atomic will be generated.
  /// \param UpdateOp   Code generator for complex expressions that cannot be
  ///                   expressed through atomicrmw instruction.
  /// \param UpdateExpr true if X is an in place update of the form
  ///                   X = X BinOp Expr or X = Expr BinOp X
  /// \param IsXBinopExpr true if X is Left H.S. in Right H.S. part of the
  ///                     update expression, false otherwise.
  ///                     (e.g. true for X = X BinOp Expr)
  /// \param IsPostfixUpdate true if original value of 'x' must be stored in
  ///                        'v', not an updated one.
  ///
  /// \return Insertion point after generated atomic capture IR.
  InsertPointTy
  createAtomicCapture(const LocationDescription &Loc, InsertPointTy AllocaIP,
                      AtomicOpValue &X, AtomicOpValue &V, Value *Expr,
                      AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp,
                      AtomicUpdateCallbackTy &UpdateOp, bool UpdateExpr,
                      bool IsPostfixUpdate, bool IsXBinopExpr);

  /// Emit atomic compare for constructs: --- Only scalar data types
  /// cond-expr-stmt:
  /// x = x ordop expr ? expr : x;
  /// x = expr ordop x ? expr : x;
  /// x = x == e ? d : x;
  /// x = e == x ? d : x; (this one is not in the spec)
  /// cond-update-stmt:
  /// if (x ordop expr) { x = expr; }
  /// if (expr ordop x) { x = expr; }
  /// if (x == e) { x = d; }
  /// if (e == x) { x = d; } (this one is not in the spec)
  /// conditional-update-capture-atomic:
  /// v = x; cond-update-stmt; (IsPostfixUpdate=true, IsFailOnly=false)
  /// cond-update-stmt; v = x; (IsPostfixUpdate=false, IsFailOnly=false)
  /// if (x == e) { x = d; } else { v = x; } (IsPostfixUpdate=false,
  ///                                         IsFailOnly=true)
  /// r = x == e; if (r) { x = d; } (IsPostfixUpdate=false, IsFailOnly=false)
  /// r = x == e; if (r) { x = d; } else { v = x; } (IsPostfixUpdate=false,
  ///                                                IsFailOnly=true)
  ///
  /// \param Loc          The insert and source location description.
  /// \param X            The target atomic pointer to be updated.
  /// \param V            Memory address where to store captured value (for
  ///                     compare capture only).
  /// \param R            Memory address where to store comparison result
  ///                     (for compare capture with '==' only).
  /// \param E            The expected value ('e') for forms that use an
  ///                     equality comparison or an expression ('expr') for
  ///                     forms that use 'ordop' (logically an atomic maximum or
  ///                     minimum).
  /// \param D            The desired value for forms that use an equality
  ///                     comparison. If forms that use 'ordop', it should be
  ///                     \p nullptr.
  /// \param AO           Atomic ordering of the generated atomic instructions.
  /// \param Op           Atomic compare operation. It can only be ==, <, or >.
  /// \param IsXBinopExpr True if the conditional statement is in the form where
  ///                     x is on LHS. It only matters for < or >.
  /// \param IsPostfixUpdate  True if original value of 'x' must be stored in
  ///                         'v', not an updated one (for compare capture
  ///                         only).
  /// \param IsFailOnly   True if the original value of 'x' is stored to 'v'
  ///                     only when the comparison fails. This is only valid for
  ///                     the case the comparison is '=='.
  ///
  /// \return Insertion point after generated atomic capture IR.
  InsertPointTy
  createAtomicCompare(const LocationDescription &Loc, AtomicOpValue &X,
                      AtomicOpValue &V, AtomicOpValue &R, Value *E, Value *D,
                      AtomicOrdering AO, omp::OMPAtomicCompareOp Op,
                      bool IsXBinopExpr, bool IsPostfixUpdate, bool IsFailOnly);

  /// Create the control flow structure of a canonical OpenMP loop.
  ///
  /// The emitted loop will be disconnected, i.e. no edge to the loop's
  /// preheader and no terminator in the AfterBB. The OpenMPIRBuilder's
  /// IRBuilder location is not preserved.
  ///
  /// \param DL        DebugLoc used for the instructions in the skeleton.
  /// \param TripCount Value to be used for the trip count.
  /// \param F         Function in which to insert the BasicBlocks.
  /// \param PreInsertBefore  Where to insert BBs that execute before the body,
  ///                         typically the body itself.
  /// \param PostInsertBefore Where to insert BBs that execute after the body.
  /// \param Name      Base name used to derive BB
  ///                  and instruction names.
  ///
  /// \returns The CanonicalLoopInfo that represents the emitted loop.
  CanonicalLoopInfo *createLoopSkeleton(DebugLoc DL, Value *TripCount,
                                        Function *F,
                                        BasicBlock *PreInsertBefore,
                                        BasicBlock *PostInsertBefore,
                                        const Twine &Name = {});
  /// OMP Offload Info Metadata name string
  const std::string ompOffloadInfoName = "omp_offload.info";

  /// Loads all the offload entries information from the host IR
  /// metadata. This function is only meant to be used with device code
  /// generation.
  ///
  /// \param M         Module to load Metadata info from. Module passed maybe
  /// loaded from bitcode file, i.e, different from OpenMPIRBuilder::M module.
  void loadOffloadInfoMetadata(Module &M);

  /// Gets (if variable with the given name already exist) or creates
  /// internal global variable with the specified Name. The created variable has
  /// linkage CommonLinkage by default and is initialized by null value.
  /// \param Ty Type of the global variable. If it is exist already the type
  /// must be the same.
  /// \param Name Name of the variable.
  GlobalVariable *getOrCreateInternalVariable(Type *Ty, const StringRef &Name,
                                              unsigned AddressSpace = 0);
};

/// Class to represented the control flow structure of an OpenMP canonical loop.
///
/// The control-flow structure is standardized for easy consumption by
/// directives associated with loops. For instance, the worksharing-loop
/// construct may change this control flow such that each loop iteration is
/// executed on only one thread. The constraints of a canonical loop in brief
/// are:
///
///  * The number of loop iterations must have been computed before entering the
///    loop.
///
///  * Has an (unsigned) logical induction variable that starts at zero and
///    increments by one.
///
///  * The loop's CFG itself has no side-effects. The OpenMP specification
///    itself allows side-effects, but the order in which they happen, including
///    how often or whether at all, is unspecified. We expect that the frontend
///    will emit those side-effect instructions somewhere (e.g. before the loop)
///    such that the CanonicalLoopInfo itself can be side-effect free.
///
/// Keep in mind that CanonicalLoopInfo is meant to only describe a repeated
/// execution of a loop body that satifies these constraints. It does NOT
/// represent arbitrary SESE regions that happen to contain a loop. Do not use
/// CanonicalLoopInfo for such purposes.
///
/// The control flow can be described as follows:
///
///     Preheader
///        |
///  /-> Header
///  |     |
///  |    Cond---\
///  |     |     |
///  |    Body   |
///  |    | |    |
///  |   <...>   |
///  |    | |    |
///   \--Latch   |
///              |
///             Exit
///              |
///            After
///
/// The loop is thought to start at PreheaderIP (at the Preheader's terminator,
/// including) and end at AfterIP (at the After's first instruction, excluding).
/// That is, instructions in the Preheader and After blocks (except the
/// Preheader's terminator) are out of CanonicalLoopInfo's control and may have
/// side-effects. Typically, the Preheader is used to compute the loop's trip
/// count. The instructions from BodyIP (at the Body block's first instruction,
/// excluding) until the Latch are also considered outside CanonicalLoopInfo's
/// control and thus can have side-effects. The body block is the single entry
/// point into the loop body, which may contain arbitrary control flow as long
/// as all control paths eventually branch to the Latch block.
///
/// TODO: Consider adding another standardized BasicBlock between Body CFG and
/// Latch to guarantee that there is only a single edge to the latch. It would
/// make loop transformations easier to not needing to consider multiple
/// predecessors of the latch (See redirectAllPredecessorsTo) and would give us
/// an equivalant to PreheaderIP, AfterIP and BodyIP for inserting code that
/// executes after each body iteration.
///
/// There must be no loop-carried dependencies through llvm::Values. This is
/// equivalant to that the Latch has no PHINode and the Header's only PHINode is
/// for the induction variable.
///
/// All code in Header, Cond, Latch and Exit (plus the terminator of the
/// Preheader) are CanonicalLoopInfo's responsibility and their build-up checked
/// by assertOK(). They are expected to not be modified unless explicitly
/// modifying the CanonicalLoopInfo through a methods that applies a OpenMP
/// loop-associated construct such as applyWorkshareLoop, tileLoops, unrollLoop,
/// etc. These methods usually invalidate the CanonicalLoopInfo and re-use its
/// basic blocks. After invalidation, the CanonicalLoopInfo must not be used
/// anymore as its underlying control flow may not exist anymore.
/// Loop-transformation methods such as tileLoops, collapseLoops and unrollLoop
/// may also return a new CanonicalLoopInfo that can be passed to other
/// loop-associated construct implementing methods. These loop-transforming
/// methods may either create a new CanonicalLoopInfo usually using
/// createLoopSkeleton and invalidate the input CanonicalLoopInfo, or reuse and
/// modify one of the input CanonicalLoopInfo and return it as representing the
/// modified loop. What is done is an implementation detail of
/// transformation-implementing method and callers should always assume that the
/// CanonicalLoopInfo passed to it is invalidated and a new object is returned.
/// Returned CanonicalLoopInfo have the same structure and guarantees as the one
/// created by createCanonicalLoop, such that transforming methods do not have
/// to special case where the CanonicalLoopInfo originated from.
///
/// Generally, methods consuming CanonicalLoopInfo do not need an
/// OpenMPIRBuilder::InsertPointTy as argument, but use the locations of the
/// CanonicalLoopInfo to insert new or modify existing instructions. Unless
/// documented otherwise, methods consuming CanonicalLoopInfo do not invalidate
/// any InsertPoint that is outside CanonicalLoopInfo's control. Specifically,
/// any InsertPoint in the Preheader, After or Block can still be used after
/// calling such a method.
///
/// TODO: Provide mechanisms for exception handling and cancellation points.
///
/// Defined outside OpenMPIRBuilder because nested classes cannot be
/// forward-declared, e.g. to avoid having to include the entire OMPIRBuilder.h.
class CanonicalLoopInfo {
  friend class OpenMPIRBuilder;

private:
  BasicBlock *Header = nullptr;
  BasicBlock *Cond = nullptr;
  BasicBlock *Latch = nullptr;
  BasicBlock *Exit = nullptr;

  /// Add the control blocks of this loop to \p BBs.
  ///
  /// This does not include any block from the body, including the one returned
  /// by getBody().
  ///
  /// FIXME: This currently includes the Preheader and After blocks even though
  /// their content is (mostly) not under CanonicalLoopInfo's control.
  /// Re-evaluated whether this makes sense.
  void collectControlBlocks(SmallVectorImpl<BasicBlock *> &BBs);

  /// Sets the number of loop iterations to the given value. This value must be
  /// valid in the condition block (i.e., defined in the preheader) and is
  /// interpreted as an unsigned integer.
  void setTripCount(Value *TripCount);

  /// Replace all uses of the canonical induction variable in the loop body with
  /// a new one.
  ///
  /// The intended use case is to update the induction variable for an updated
  /// iteration space such that it can stay normalized in the 0...tripcount-1
  /// range.
  ///
  /// The \p Updater is called with the (presumable updated) current normalized
  /// induction variable and is expected to return the value that uses of the
  /// pre-updated induction values should use instead, typically dependent on
  /// the new induction variable. This is a lambda (instead of e.g. just passing
  /// the new value) to be able to distinguish the uses of the pre-updated
  /// induction variable and uses of the induction varible to compute the
  /// updated induction variable value.
  void mapIndVar(llvm::function_ref<Value *(Instruction *)> Updater);

public:
  /// Returns whether this object currently represents the IR of a loop. If
  /// returning false, it may have been consumed by a loop transformation or not
  /// been intialized. Do not use in this case;
  bool isValid() const { return Header; }

  /// The preheader ensures that there is only a single edge entering the loop.
  /// Code that must be execute before any loop iteration can be emitted here,
  /// such as computing the loop trip count and begin lifetime markers. Code in
  /// the preheader is not considered part of the canonical loop.
  BasicBlock *getPreheader() const;

  /// The header is the entry for each iteration. In the canonical control flow,
  /// it only contains the PHINode for the induction variable.
  BasicBlock *getHeader() const {
    assert(isValid() && "Requires a valid canonical loop");
    return Header;
  }

  /// The condition block computes whether there is another loop iteration. If
  /// yes, branches to the body; otherwise to the exit block.
  BasicBlock *getCond() const {
    assert(isValid() && "Requires a valid canonical loop");
    return Cond;
  }

  /// The body block is the single entry for a loop iteration and not controlled
  /// by CanonicalLoopInfo. It can contain arbitrary control flow but must
  /// eventually branch to the \p Latch block.
  BasicBlock *getBody() const {
    assert(isValid() && "Requires a valid canonical loop");
    return cast<BranchInst>(Cond->getTerminator())->getSuccessor(0);
  }

  /// Reaching the latch indicates the end of the loop body code. In the
  /// canonical control flow, it only contains the increment of the induction
  /// variable.
  BasicBlock *getLatch() const {
    assert(isValid() && "Requires a valid canonical loop");
    return Latch;
  }

  /// Reaching the exit indicates no more iterations are being executed.
  BasicBlock *getExit() const {
    assert(isValid() && "Requires a valid canonical loop");
    return Exit;
  }

  /// The after block is intended for clean-up code such as lifetime end
  /// markers. It is separate from the exit block to ensure, analogous to the
  /// preheader, it having just a single entry edge and being free from PHI
  /// nodes should there be multiple loop exits (such as from break
  /// statements/cancellations).
  BasicBlock *getAfter() const {
    assert(isValid() && "Requires a valid canonical loop");
    return Exit->getSingleSuccessor();
  }

  /// Returns the llvm::Value containing the number of loop iterations. It must
  /// be valid in the preheader and always interpreted as an unsigned integer of
  /// any bit-width.
  Value *getTripCount() const {
    assert(isValid() && "Requires a valid canonical loop");
    Instruction *CmpI = &Cond->front();
    assert(isa<CmpInst>(CmpI) && "First inst must compare IV with TripCount");
    return CmpI->getOperand(1);
  }

  /// Returns the instruction representing the current logical induction
  /// variable. Always unsigned, always starting at 0 with an increment of one.
  Instruction *getIndVar() const {
    assert(isValid() && "Requires a valid canonical loop");
    Instruction *IndVarPHI = &Header->front();
    assert(isa<PHINode>(IndVarPHI) && "First inst must be the IV PHI");
    return IndVarPHI;
  }

  /// Return the type of the induction variable (and the trip count).
  Type *getIndVarType() const {
    assert(isValid() && "Requires a valid canonical loop");
    return getIndVar()->getType();
  }

  /// Return the insertion point for user code before the loop.
  OpenMPIRBuilder::InsertPointTy getPreheaderIP() const {
    assert(isValid() && "Requires a valid canonical loop");
    BasicBlock *Preheader = getPreheader();
    return {Preheader, std::prev(Preheader->end())};
  };

  /// Return the insertion point for user code in the body.
  OpenMPIRBuilder::InsertPointTy getBodyIP() const {
    assert(isValid() && "Requires a valid canonical loop");
    BasicBlock *Body = getBody();
    return {Body, Body->begin()};
  };

  /// Return the insertion point for user code after the loop.
  OpenMPIRBuilder::InsertPointTy getAfterIP() const {
    assert(isValid() && "Requires a valid canonical loop");
    BasicBlock *After = getAfter();
    return {After, After->begin()};
  };

  Function *getFunction() const {
    assert(isValid() && "Requires a valid canonical loop");
    return Header->getParent();
  }

  /// Consistency self-check.
  void assertOK() const;

  /// Invalidate this loop. That is, the underlying IR does not fulfill the
  /// requirements of an OpenMP canonical loop anymore.
  void invalidate();
};

} // end namespace llvm

#endif // LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H
PKiwFZ�*[�@-@-Frontend/OpenMP/OMPConstants.hnu�[���//===- OMPConstants.h - OpenMP related constants and helpers ------ C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This file defines constans and helpers used when dealing with OpenMP.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_FRONTEND_OPENMP_OMPCONSTANTS_H
#define LLVM_FRONTEND_OPENMP_OMPCONSTANTS_H

#include "llvm/ADT/BitmaskEnum.h"

#include "llvm/ADT/StringRef.h"
#include "llvm/Frontend/OpenMP/OMP.h.inc"

namespace llvm {
namespace omp {
LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE();

/// IDs for all Internal Control Variables (ICVs).
enum class InternalControlVar {
#define ICV_DATA_ENV(Enum, ...) Enum,
#include "llvm/Frontend/OpenMP/OMPKinds.def"
};

#define ICV_DATA_ENV(Enum, ...)                                                \
  constexpr auto Enum = omp::InternalControlVar::Enum;
#include "llvm/Frontend/OpenMP/OMPKinds.def"

enum class ICVInitValue {
#define ICV_INIT_VALUE(Enum, Name) Enum,
#include "llvm/Frontend/OpenMP/OMPKinds.def"
};

#define ICV_INIT_VALUE(Enum, Name)                                             \
  constexpr auto Enum = omp::ICVInitValue::Enum;
#include "llvm/Frontend/OpenMP/OMPKinds.def"

/// IDs for all omp runtime library (RTL) functions.
enum class RuntimeFunction {
#define OMP_RTL(Enum, ...) Enum,
#include "llvm/Frontend/OpenMP/OMPKinds.def"
};

#define OMP_RTL(Enum, ...) constexpr auto Enum = omp::RuntimeFunction::Enum;
#include "llvm/Frontend/OpenMP/OMPKinds.def"

/// IDs for the different default kinds.
enum class DefaultKind {
#define OMP_DEFAULT_KIND(Enum, Str) Enum,
#include "llvm/Frontend/OpenMP/OMPKinds.def"
};

#define OMP_DEFAULT_KIND(Enum, ...)                                            \
  constexpr auto Enum = omp::DefaultKind::Enum;
#include "llvm/Frontend/OpenMP/OMPKinds.def"

/// IDs for all omp runtime library ident_t flag encodings (see
/// their defintion in openmp/runtime/src/kmp.h).
enum class IdentFlag {
#define OMP_IDENT_FLAG(Enum, Str, Value) Enum = Value,
#include "llvm/Frontend/OpenMP/OMPKinds.def"
  LLVM_MARK_AS_BITMASK_ENUM(0x7FFFFFFF)
};

#define OMP_IDENT_FLAG(Enum, ...) constexpr auto Enum = omp::IdentFlag::Enum;
#include "llvm/Frontend/OpenMP/OMPKinds.def"

// Version of the kernel argument format used by the omp runtime.
#define OMP_KERNEL_ARG_VERSION 2

/// \note This needs to be kept in sync with kmp.h enum sched_type.
/// Todo: Update kmp.h to include this file, and remove the enums in kmp.h
enum class OMPScheduleType {
  // For typed comparisons, not a valid schedule
  None = 0,

  // Schedule algorithms
  BaseStaticChunked = 1,
  BaseStatic = 2,
  BaseDynamicChunked = 3,
  BaseGuidedChunked = 4,
  BaseRuntime = 5,
  BaseAuto = 6,
  BaseTrapezoidal = 7,
  BaseGreedy = 8,
  BaseBalanced = 9,
  BaseGuidedIterativeChunked = 10,
  BaseGuidedAnalyticalChunked = 11,
  BaseSteal = 12,

  // with chunk adjustment (e.g., simd)
  BaseStaticBalancedChunked = 13,
  BaseGuidedSimd = 14,
  BaseRuntimeSimd = 15,

  // static schedules algorithims for distribute
  BaseDistributeChunked = 27,
  BaseDistribute = 28,

  // Modifier flags to be combined with schedule algorithms
  ModifierUnordered = (1 << 5),
  ModifierOrdered = (1 << 6),
  ModifierNomerge = (1 << 7),
  ModifierMonotonic = (1 << 29),
  ModifierNonmonotonic = (1 << 30),

  // Masks combining multiple flags
  OrderingMask = ModifierUnordered | ModifierOrdered | ModifierNomerge,
  MonotonicityMask = ModifierMonotonic | ModifierNonmonotonic,
  ModifierMask = OrderingMask | MonotonicityMask,

  // valid schedule type values, without monotonicity flags
  UnorderedStaticChunked = BaseStaticChunked | ModifierUnordered,        //  33
  UnorderedStatic = BaseStatic | ModifierUnordered,                      //  34
  UnorderedDynamicChunked = BaseDynamicChunked | ModifierUnordered,      //  35
  UnorderedGuidedChunked = BaseGuidedChunked | ModifierUnordered,        //  36
  UnorderedRuntime = BaseRuntime | ModifierUnordered,                    //  37
  UnorderedAuto = BaseAuto | ModifierUnordered,                          //  38
  UnorderedTrapezoidal = BaseTrapezoidal | ModifierUnordered,            //  39
  UnorderedGreedy = BaseGreedy | ModifierUnordered,                      //  40
  UnorderedBalanced = BaseBalanced | ModifierUnordered,                  //  41
  UnorderedGuidedIterativeChunked =
      BaseGuidedIterativeChunked | ModifierUnordered,                    //  42
  UnorderedGuidedAnalyticalChunked =
      BaseGuidedAnalyticalChunked | ModifierUnordered,                   //  43
  UnorderedSteal = BaseSteal | ModifierUnordered,                        //  44

  UnorderedStaticBalancedChunked =
      BaseStaticBalancedChunked | ModifierUnordered,                     //  45
  UnorderedGuidedSimd = BaseGuidedSimd | ModifierUnordered,              //  46
  UnorderedRuntimeSimd = BaseRuntimeSimd | ModifierUnordered,            //  47

  OrderedStaticChunked = BaseStaticChunked | ModifierOrdered,            //  65
  OrderedStatic = BaseStatic | ModifierOrdered,                          //  66
  OrderedDynamicChunked = BaseDynamicChunked | ModifierOrdered,          //  67
  OrderedGuidedChunked = BaseGuidedChunked | ModifierOrdered,            //  68
  OrderedRuntime = BaseRuntime | ModifierOrdered,                        //  69
  OrderedAuto = BaseAuto | ModifierOrdered,                              //  70
  OrderdTrapezoidal = BaseTrapezoidal | ModifierOrdered,                 //  71

  OrderedDistributeChunked = BaseDistributeChunked | ModifierOrdered,    //  91
  OrderedDistribute = BaseDistribute | ModifierOrdered,                  //  92

  NomergeUnorderedStaticChunked =
      BaseStaticChunked | ModifierUnordered | ModifierNomerge,           // 161
  NomergeUnorderedStatic =
      BaseStatic | ModifierUnordered | ModifierNomerge,                  // 162
  NomergeUnorderedDynamicChunked =
      BaseDynamicChunked | ModifierUnordered | ModifierNomerge,          // 163
  NomergeUnorderedGuidedChunked =
      BaseGuidedChunked | ModifierUnordered | ModifierNomerge,           // 164
  NomergeUnorderedRuntime =
      BaseRuntime | ModifierUnordered | ModifierNomerge,                 // 165
  NomergeUnorderedAuto = BaseAuto | ModifierUnordered | ModifierNomerge, // 166
  NomergeUnorderedTrapezoidal =
      BaseTrapezoidal | ModifierUnordered | ModifierNomerge,             // 167
  NomergeUnorderedGreedy =
      BaseGreedy | ModifierUnordered | ModifierNomerge,                  // 168
  NomergeUnorderedBalanced =
      BaseBalanced | ModifierUnordered | ModifierNomerge,                // 169
  NomergeUnorderedGuidedIterativeChunked =
      BaseGuidedIterativeChunked | ModifierUnordered | ModifierNomerge,  // 170
  NomergeUnorderedGuidedAnalyticalChunked =
      BaseGuidedAnalyticalChunked | ModifierUnordered | ModifierNomerge, // 171
  NomergeUnorderedSteal =
      BaseSteal | ModifierUnordered | ModifierNomerge,                   // 172

  NomergeOrderedStaticChunked =
      BaseStaticChunked | ModifierOrdered | ModifierNomerge,             // 193
  NomergeOrderedStatic = BaseStatic | ModifierOrdered | ModifierNomerge, // 194
  NomergeOrderedDynamicChunked =
      BaseDynamicChunked | ModifierOrdered | ModifierNomerge,            // 195
  NomergeOrderedGuidedChunked =
      BaseGuidedChunked | ModifierOrdered | ModifierNomerge,             // 196
  NomergeOrderedRuntime =
      BaseRuntime | ModifierOrdered | ModifierNomerge,                   // 197
  NomergeOrderedAuto = BaseAuto | ModifierOrdered | ModifierNomerge,     // 198
  NomergeOrderedTrapezoidal =
      BaseTrapezoidal | ModifierOrdered | ModifierNomerge,               // 199

  LLVM_MARK_AS_BITMASK_ENUM(/* LargestValue */ ModifierMask)
};

/// Values for bit flags used to specify the mapping type for
/// offloading.
enum class OpenMPOffloadMappingFlags : uint64_t {
  /// No flags
  OMP_MAP_NONE = 0x0,
  /// Allocate memory on the device and move data from host to device.
  OMP_MAP_TO = 0x01,
  /// Allocate memory on the device and move data from device to host.
  OMP_MAP_FROM = 0x02,
  /// Always perform the requested mapping action on the element, even
  /// if it was already mapped before.
  OMP_MAP_ALWAYS = 0x04,
  /// Delete the element from the device environment, ignoring the
  /// current reference count associated with the element.
  OMP_MAP_DELETE = 0x08,
  /// The element being mapped is a pointer-pointee pair; both the
  /// pointer and the pointee should be mapped.
  OMP_MAP_PTR_AND_OBJ = 0x10,
  /// This flags signals that the base address of an entry should be
  /// passed to the target kernel as an argument.
  OMP_MAP_TARGET_PARAM = 0x20,
  /// Signal that the runtime library has to return the device pointer
  /// in the current position for the data being mapped. Used when we have the
  /// use_device_ptr or use_device_addr clause.
  OMP_MAP_RETURN_PARAM = 0x40,
  /// This flag signals that the reference being passed is a pointer to
  /// private data.
  OMP_MAP_PRIVATE = 0x80,
  /// Pass the element to the device by value.
  OMP_MAP_LITERAL = 0x100,
  /// Implicit map
  OMP_MAP_IMPLICIT = 0x200,
  /// Close is a hint to the runtime to allocate memory close to
  /// the target device.
  OMP_MAP_CLOSE = 0x400,
  /// 0x800 is reserved for compatibility with XLC.
  /// Produce a runtime error if the data is not already allocated.
  OMP_MAP_PRESENT = 0x1000,
  // Increment and decrement a separate reference counter so that the data
  // cannot be unmapped within the associated region.  Thus, this flag is
  // intended to be used on 'target' and 'target data' directives because they
  // are inherently structured.  It is not intended to be used on 'target
  // enter data' and 'target exit data' directives because they are inherently
  // dynamic.
  // This is an OpenMP extension for the sake of OpenACC support.
  OMP_MAP_OMPX_HOLD = 0x2000,
  /// Signal that the runtime library should use args as an array of
  /// descriptor_dim pointers and use args_size as dims. Used when we have
  /// non-contiguous list items in target update directive
  OMP_MAP_NON_CONTIG = 0x100000000000,
  /// The 16 MSBs of the flags indicate whether the entry is member of some
  /// struct/class.
  OMP_MAP_MEMBER_OF = 0xffff000000000000,
  LLVM_MARK_AS_BITMASK_ENUM(/* LargestFlag = */ OMP_MAP_MEMBER_OF)
};

enum OpenMPOffloadingReservedDeviceIDs {
  /// Device ID if the device was not defined, runtime should get it
  /// from environment variables in the spec.
  OMP_DEVICEID_UNDEF = -1
};

enum class AddressSpace : unsigned {
  Generic = 0,
  Global = 1,
  Shared = 3,
  Constant = 4,
  Local = 5,
};

/// \note This needs to be kept in sync with interop.h enum kmp_interop_type_t.:
enum class OMPInteropType { Unknown, Target, TargetSync };

/// Atomic compare operations. Currently OpenMP only supports ==, >, and <.
enum class OMPAtomicCompareOp : unsigned { EQ, MIN, MAX };

/// Fields ids in kmp_depend_info record.
enum class RTLDependInfoFields { BaseAddr, Len, Flags };

/// Dependence kind for RTL.
enum class RTLDependenceKindTy {
  DepUnknown = 0x0,
  DepIn = 0x01,
  DepInOut = 0x3,
  DepMutexInOutSet = 0x4,
  DepInOutSet = 0x8,
  DepOmpAllMem = 0x80,
};

} // end namespace omp

} // end namespace llvm

#include "OMPDeviceConstants.h"

#endif // LLVM_FRONTEND_OPENMP_OMPCONSTANTS_H
PKiwFZB{���$Frontend/OpenMP/OMPDeviceConstants.hnu�[���//===- OMPDeviceConstants.h - OpenMP device related constants ----- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This file defines constans that will be used by both host and device
/// compilation.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_FRONTEND_OPENMP_OMPDEVICECONSTANTS_H
#define LLVM_FRONTEND_OPENMP_OMPDEVICECONSTANTS_H

namespace llvm {
namespace omp {

enum OMPTgtExecModeFlags : unsigned char {
  OMP_TGT_EXEC_MODE_GENERIC = 1 << 0,
  OMP_TGT_EXEC_MODE_SPMD = 1 << 1,
  OMP_TGT_EXEC_MODE_GENERIC_SPMD =
      OMP_TGT_EXEC_MODE_GENERIC | OMP_TGT_EXEC_MODE_SPMD
};

} // end namespace omp
} // end namespace llvm

#endif // LLVM_FRONTEND_OPENMP_OMPDEVICECONSTANTS_H
PKiwFZͯb)a)aFrontend/OpenMP/OMP.h.incnu�[���#ifndef LLVM_OpenMP_INC
#define LLVM_OpenMP_INC

#include "llvm/ADT/BitmaskEnum.h"

namespace llvm {
class StringRef;
namespace omp {

LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE();

enum class Directive {
  OMPD_allocate,
  OMPD_assumes,
  OMPD_atomic,
  OMPD_barrier,
  OMPD_begin_assumes,
  OMPD_begin_declare_target,
  OMPD_begin_declare_variant,
  OMPD_cancel,
  OMPD_cancellation_point,
  OMPD_critical,
  OMPD_declare_mapper,
  OMPD_declare_reduction,
  OMPD_declare_simd,
  OMPD_declare_target,
  OMPD_declare_variant,
  OMPD_depobj,
  OMPD_distribute,
  OMPD_distribute_parallel_do,
  OMPD_distribute_parallel_do_simd,
  OMPD_distribute_parallel_for,
  OMPD_distribute_parallel_for_simd,
  OMPD_distribute_simd,
  OMPD_do,
  OMPD_do_simd,
  OMPD_end_assumes,
  OMPD_end_declare_target,
  OMPD_end_declare_variant,
  OMPD_end_do,
  OMPD_end_do_simd,
  OMPD_end_sections,
  OMPD_end_single,
  OMPD_end_workshare,
  OMPD_error,
  OMPD_flush,
  OMPD_for,
  OMPD_for_simd,
  OMPD_masked_taskloop,
  OMPD_masked_taskloop_simd,
  OMPD_master,
  OMPD_master_taskloop,
  OMPD_master_taskloop_simd,
  OMPD_metadirective,
  OMPD_nothing,
  OMPD_ordered,
  OMPD_parallel,
  OMPD_parallel_do,
  OMPD_parallel_do_simd,
  OMPD_parallel_for,
  OMPD_parallel_for_simd,
  OMPD_parallel_masked,
  OMPD_parallel_masked_taskloop,
  OMPD_parallel_masked_taskloop_simd,
  OMPD_parallel_master,
  OMPD_parallel_master_taskloop,
  OMPD_parallel_master_taskloop_simd,
  OMPD_parallel_sections,
  OMPD_parallel_workshare,
  OMPD_requires,
  OMPD_scan,
  OMPD_section,
  OMPD_sections,
  OMPD_simd,
  OMPD_single,
  OMPD_target,
  OMPD_target_data,
  OMPD_target_enter_data,
  OMPD_target_exit_data,
  OMPD_target_parallel,
  OMPD_target_parallel_do,
  OMPD_target_parallel_do_simd,
  OMPD_target_parallel_for,
  OMPD_target_parallel_for_simd,
  OMPD_target_simd,
  OMPD_target_teams,
  OMPD_target_teams_distribute,
  OMPD_target_teams_distribute_parallel_do,
  OMPD_target_teams_distribute_parallel_do_simd,
  OMPD_target_teams_distribute_parallel_for,
  OMPD_target_teams_distribute_parallel_for_simd,
  OMPD_target_teams_distribute_simd,
  OMPD_target_update,
  OMPD_task,
  OMPD_taskgroup,
  OMPD_taskloop,
  OMPD_taskloop_simd,
  OMPD_taskwait,
  OMPD_taskyield,
  OMPD_teams,
  OMPD_teams_distribute,
  OMPD_teams_distribute_parallel_do,
  OMPD_teams_distribute_parallel_do_simd,
  OMPD_teams_distribute_parallel_for,
  OMPD_teams_distribute_parallel_for_simd,
  OMPD_teams_distribute_simd,
  OMPD_threadprivate,
  OMPD_tile,
  OMPD_unknown,
  OMPD_unroll,
  OMPD_workshare,
  OMPD_dispatch,
  OMPD_interop,
  OMPD_loop,
  OMPD_masked,
  OMPD_parallel_loop,
  OMPD_target_parallel_loop,
  OMPD_target_teams_loop,
  OMPD_teams_loop,
};

static constexpr std::size_t Directive_enumSize = 107;

constexpr auto OMPD_allocate = llvm::omp::Directive::OMPD_allocate;
constexpr auto OMPD_assumes = llvm::omp::Directive::OMPD_assumes;
constexpr auto OMPD_atomic = llvm::omp::Directive::OMPD_atomic;
constexpr auto OMPD_barrier = llvm::omp::Directive::OMPD_barrier;
constexpr auto OMPD_begin_assumes = llvm::omp::Directive::OMPD_begin_assumes;
constexpr auto OMPD_begin_declare_target = llvm::omp::Directive::OMPD_begin_declare_target;
constexpr auto OMPD_begin_declare_variant = llvm::omp::Directive::OMPD_begin_declare_variant;
constexpr auto OMPD_cancel = llvm::omp::Directive::OMPD_cancel;
constexpr auto OMPD_cancellation_point = llvm::omp::Directive::OMPD_cancellation_point;
constexpr auto OMPD_critical = llvm::omp::Directive::OMPD_critical;
constexpr auto OMPD_declare_mapper = llvm::omp::Directive::OMPD_declare_mapper;
constexpr auto OMPD_declare_reduction = llvm::omp::Directive::OMPD_declare_reduction;
constexpr auto OMPD_declare_simd = llvm::omp::Directive::OMPD_declare_simd;
constexpr auto OMPD_declare_target = llvm::omp::Directive::OMPD_declare_target;
constexpr auto OMPD_declare_variant = llvm::omp::Directive::OMPD_declare_variant;
constexpr auto OMPD_depobj = llvm::omp::Directive::OMPD_depobj;
constexpr auto OMPD_distribute = llvm::omp::Directive::OMPD_distribute;
constexpr auto OMPD_distribute_parallel_do = llvm::omp::Directive::OMPD_distribute_parallel_do;
constexpr auto OMPD_distribute_parallel_do_simd = llvm::omp::Directive::OMPD_distribute_parallel_do_simd;
constexpr auto OMPD_distribute_parallel_for = llvm::omp::Directive::OMPD_distribute_parallel_for;
constexpr auto OMPD_distribute_parallel_for_simd = llvm::omp::Directive::OMPD_distribute_parallel_for_simd;
constexpr auto OMPD_distribute_simd = llvm::omp::Directive::OMPD_distribute_simd;
constexpr auto OMPD_do = llvm::omp::Directive::OMPD_do;
constexpr auto OMPD_do_simd = llvm::omp::Directive::OMPD_do_simd;
constexpr auto OMPD_end_assumes = llvm::omp::Directive::OMPD_end_assumes;
constexpr auto OMPD_end_declare_target = llvm::omp::Directive::OMPD_end_declare_target;
constexpr auto OMPD_end_declare_variant = llvm::omp::Directive::OMPD_end_declare_variant;
constexpr auto OMPD_end_do = llvm::omp::Directive::OMPD_end_do;
constexpr auto OMPD_end_do_simd = llvm::omp::Directive::OMPD_end_do_simd;
constexpr auto OMPD_end_sections = llvm::omp::Directive::OMPD_end_sections;
constexpr auto OMPD_end_single = llvm::omp::Directive::OMPD_end_single;
constexpr auto OMPD_end_workshare = llvm::omp::Directive::OMPD_end_workshare;
constexpr auto OMPD_error = llvm::omp::Directive::OMPD_error;
constexpr auto OMPD_flush = llvm::omp::Directive::OMPD_flush;
constexpr auto OMPD_for = llvm::omp::Directive::OMPD_for;
constexpr auto OMPD_for_simd = llvm::omp::Directive::OMPD_for_simd;
constexpr auto OMPD_masked_taskloop = llvm::omp::Directive::OMPD_masked_taskloop;
constexpr auto OMPD_masked_taskloop_simd = llvm::omp::Directive::OMPD_masked_taskloop_simd;
constexpr auto OMPD_master = llvm::omp::Directive::OMPD_master;
constexpr auto OMPD_master_taskloop = llvm::omp::Directive::OMPD_master_taskloop;
constexpr auto OMPD_master_taskloop_simd = llvm::omp::Directive::OMPD_master_taskloop_simd;
constexpr auto OMPD_metadirective = llvm::omp::Directive::OMPD_metadirective;
constexpr auto OMPD_nothing = llvm::omp::Directive::OMPD_nothing;
constexpr auto OMPD_ordered = llvm::omp::Directive::OMPD_ordered;
constexpr auto OMPD_parallel = llvm::omp::Directive::OMPD_parallel;
constexpr auto OMPD_parallel_do = llvm::omp::Directive::OMPD_parallel_do;
constexpr auto OMPD_parallel_do_simd = llvm::omp::Directive::OMPD_parallel_do_simd;
constexpr auto OMPD_parallel_for = llvm::omp::Directive::OMPD_parallel_for;
constexpr auto OMPD_parallel_for_simd = llvm::omp::Directive::OMPD_parallel_for_simd;
constexpr auto OMPD_parallel_masked = llvm::omp::Directive::OMPD_parallel_masked;
constexpr auto OMPD_parallel_masked_taskloop = llvm::omp::Directive::OMPD_parallel_masked_taskloop;
constexpr auto OMPD_parallel_masked_taskloop_simd = llvm::omp::Directive::OMPD_parallel_masked_taskloop_simd;
constexpr auto OMPD_parallel_master = llvm::omp::Directive::OMPD_parallel_master;
constexpr auto OMPD_parallel_master_taskloop = llvm::omp::Directive::OMPD_parallel_master_taskloop;
constexpr auto OMPD_parallel_master_taskloop_simd = llvm::omp::Directive::OMPD_parallel_master_taskloop_simd;
constexpr auto OMPD_parallel_sections = llvm::omp::Directive::OMPD_parallel_sections;
constexpr auto OMPD_parallel_workshare = llvm::omp::Directive::OMPD_parallel_workshare;
constexpr auto OMPD_requires = llvm::omp::Directive::OMPD_requires;
constexpr auto OMPD_scan = llvm::omp::Directive::OMPD_scan;
constexpr auto OMPD_section = llvm::omp::Directive::OMPD_section;
constexpr auto OMPD_sections = llvm::omp::Directive::OMPD_sections;
constexpr auto OMPD_simd = llvm::omp::Directive::OMPD_simd;
constexpr auto OMPD_single = llvm::omp::Directive::OMPD_single;
constexpr auto OMPD_target = llvm::omp::Directive::OMPD_target;
constexpr auto OMPD_target_data = llvm::omp::Directive::OMPD_target_data;
constexpr auto OMPD_target_enter_data = llvm::omp::Directive::OMPD_target_enter_data;
constexpr auto OMPD_target_exit_data = llvm::omp::Directive::OMPD_target_exit_data;
constexpr auto OMPD_target_parallel = llvm::omp::Directive::OMPD_target_parallel;
constexpr auto OMPD_target_parallel_do = llvm::omp::Directive::OMPD_target_parallel_do;
constexpr auto OMPD_target_parallel_do_simd = llvm::omp::Directive::OMPD_target_parallel_do_simd;
constexpr auto OMPD_target_parallel_for = llvm::omp::Directive::OMPD_target_parallel_for;
constexpr auto OMPD_target_parallel_for_simd = llvm::omp::Directive::OMPD_target_parallel_for_simd;
constexpr auto OMPD_target_simd = llvm::omp::Directive::OMPD_target_simd;
constexpr auto OMPD_target_teams = llvm::omp::Directive::OMPD_target_teams;
constexpr auto OMPD_target_teams_distribute = llvm::omp::Directive::OMPD_target_teams_distribute;
constexpr auto OMPD_target_teams_distribute_parallel_do = llvm::omp::Directive::OMPD_target_teams_distribute_parallel_do;
constexpr auto OMPD_target_teams_distribute_parallel_do_simd = llvm::omp::Directive::OMPD_target_teams_distribute_parallel_do_simd;
constexpr auto OMPD_target_teams_distribute_parallel_for = llvm::omp::Directive::OMPD_target_teams_distribute_parallel_for;
constexpr auto OMPD_target_teams_distribute_parallel_for_simd = llvm::omp::Directive::OMPD_target_teams_distribute_parallel_for_simd;
constexpr auto OMPD_target_teams_distribute_simd = llvm::omp::Directive::OMPD_target_teams_distribute_simd;
constexpr auto OMPD_target_update = llvm::omp::Directive::OMPD_target_update;
constexpr auto OMPD_task = llvm::omp::Directive::OMPD_task;
constexpr auto OMPD_taskgroup = llvm::omp::Directive::OMPD_taskgroup;
constexpr auto OMPD_taskloop = llvm::omp::Directive::OMPD_taskloop;
constexpr auto OMPD_taskloop_simd = llvm::omp::Directive::OMPD_taskloop_simd;
constexpr auto OMPD_taskwait = llvm::omp::Directive::OMPD_taskwait;
constexpr auto OMPD_taskyield = llvm::omp::Directive::OMPD_taskyield;
constexpr auto OMPD_teams = llvm::omp::Directive::OMPD_teams;
constexpr auto OMPD_teams_distribute = llvm::omp::Directive::OMPD_teams_distribute;
constexpr auto OMPD_teams_distribute_parallel_do = llvm::omp::Directive::OMPD_teams_distribute_parallel_do;
constexpr auto OMPD_teams_distribute_parallel_do_simd = llvm::omp::Directive::OMPD_teams_distribute_parallel_do_simd;
constexpr auto OMPD_teams_distribute_parallel_for = llvm::omp::Directive::OMPD_teams_distribute_parallel_for;
constexpr auto OMPD_teams_distribute_parallel_for_simd = llvm::omp::Directive::OMPD_teams_distribute_parallel_for_simd;
constexpr auto OMPD_teams_distribute_simd = llvm::omp::Directive::OMPD_teams_distribute_simd;
constexpr auto OMPD_threadprivate = llvm::omp::Directive::OMPD_threadprivate;
constexpr auto OMPD_tile = llvm::omp::Directive::OMPD_tile;
constexpr auto OMPD_unknown = llvm::omp::Directive::OMPD_unknown;
constexpr auto OMPD_unroll = llvm::omp::Directive::OMPD_unroll;
constexpr auto OMPD_workshare = llvm::omp::Directive::OMPD_workshare;
constexpr auto OMPD_dispatch = llvm::omp::Directive::OMPD_dispatch;
constexpr auto OMPD_interop = llvm::omp::Directive::OMPD_interop;
constexpr auto OMPD_loop = llvm::omp::Directive::OMPD_loop;
constexpr auto OMPD_masked = llvm::omp::Directive::OMPD_masked;
constexpr auto OMPD_parallel_loop = llvm::omp::Directive::OMPD_parallel_loop;
constexpr auto OMPD_target_parallel_loop = llvm::omp::Directive::OMPD_target_parallel_loop;
constexpr auto OMPD_target_teams_loop = llvm::omp::Directive::OMPD_target_teams_loop;
constexpr auto OMPD_teams_loop = llvm::omp::Directive::OMPD_teams_loop;

enum class Clause {
  OMPC_acq_rel,
  OMPC_acquire,
  OMPC_adjust_args,
  OMPC_affinity,
  OMPC_align,
  OMPC_aligned,
  OMPC_allocate,
  OMPC_allocator,
  OMPC_append_args,
  OMPC_at,
  OMPC_atomic_default_mem_order,
  OMPC_bind,
  OMPC_cancellation_construct_type,
  OMPC_capture,
  OMPC_collapse,
  OMPC_compare,
  OMPC_copyprivate,
  OMPC_copyin,
  OMPC_default,
  OMPC_defaultmap,
  OMPC_depend,
  OMPC_depobj,
  OMPC_destroy,
  OMPC_detach,
  OMPC_device,
  OMPC_device_type,
  OMPC_dist_schedule,
  OMPC_doacross,
  OMPC_dynamic_allocators,
  OMPC_exclusive,
  OMPC_filter,
  OMPC_final,
  OMPC_firstprivate,
  OMPC_flush,
  OMPC_from,
  OMPC_full,
  OMPC_grainsize,
  OMPC_has_device_addr,
  OMPC_hint,
  OMPC_if,
  OMPC_in_reduction,
  OMPC_inbranch,
  OMPC_inclusive,
  OMPC_indirect,
  OMPC_init,
  OMPC_is_device_ptr,
  OMPC_lastprivate,
  OMPC_linear,
  OMPC_link,
  OMPC_map,
  OMPC_match,
  OMPC_memory_order,
  OMPC_mergeable,
  OMPC_message,
  OMPC_nogroup,
  OMPC_nowait,
  OMPC_nocontext,
  OMPC_nontemporal,
  OMPC_notinbranch,
  OMPC_novariants,
  OMPC_num_tasks,
  OMPC_num_teams,
  OMPC_num_threads,
  OMPC_ompx_dyn_cgroup_mem,
  OMPC_order,
  OMPC_ordered,
  OMPC_partial,
  OMPC_priority,
  OMPC_private,
  OMPC_proc_bind,
  OMPC_read,
  OMPC_reduction,
  OMPC_relaxed,
  OMPC_release,
  OMPC_reverse_offload,
  OMPC_safelen,
  OMPC_schedule,
  OMPC_seq_cst,
  OMPC_severity,
  OMPC_shared,
  OMPC_simd,
  OMPC_simdlen,
  OMPC_sizes,
  OMPC_task_reduction,
  OMPC_thread_limit,
  OMPC_threadprivate,
  OMPC_threads,
  OMPC_to,
  OMPC_unified_address,
  OMPC_unified_shared_memory,
  OMPC_uniform,
  OMPC_unknown,
  OMPC_untied,
  OMPC_update,
  OMPC_use,
  OMPC_use_device_addr,
  OMPC_use_device_ptr,
  OMPC_uses_allocators,
  OMPC_when,
  OMPC_write,
};

static constexpr std::size_t Clause_enumSize = 100;

constexpr auto OMPC_acq_rel = llvm::omp::Clause::OMPC_acq_rel;
constexpr auto OMPC_acquire = llvm::omp::Clause::OMPC_acquire;
constexpr auto OMPC_adjust_args = llvm::omp::Clause::OMPC_adjust_args;
constexpr auto OMPC_affinity = llvm::omp::Clause::OMPC_affinity;
constexpr auto OMPC_align = llvm::omp::Clause::OMPC_align;
constexpr auto OMPC_aligned = llvm::omp::Clause::OMPC_aligned;
constexpr auto OMPC_allocate = llvm::omp::Clause::OMPC_allocate;
constexpr auto OMPC_allocator = llvm::omp::Clause::OMPC_allocator;
constexpr auto OMPC_append_args = llvm::omp::Clause::OMPC_append_args;
constexpr auto OMPC_at = llvm::omp::Clause::OMPC_at;
constexpr auto OMPC_atomic_default_mem_order = llvm::omp::Clause::OMPC_atomic_default_mem_order;
constexpr auto OMPC_bind = llvm::omp::Clause::OMPC_bind;
constexpr auto OMPC_cancellation_construct_type = llvm::omp::Clause::OMPC_cancellation_construct_type;
constexpr auto OMPC_capture = llvm::omp::Clause::OMPC_capture;
constexpr auto OMPC_collapse = llvm::omp::Clause::OMPC_collapse;
constexpr auto OMPC_compare = llvm::omp::Clause::OMPC_compare;
constexpr auto OMPC_copyprivate = llvm::omp::Clause::OMPC_copyprivate;
constexpr auto OMPC_copyin = llvm::omp::Clause::OMPC_copyin;
constexpr auto OMPC_default = llvm::omp::Clause::OMPC_default;
constexpr auto OMPC_defaultmap = llvm::omp::Clause::OMPC_defaultmap;
constexpr auto OMPC_depend = llvm::omp::Clause::OMPC_depend;
constexpr auto OMPC_depobj = llvm::omp::Clause::OMPC_depobj;
constexpr auto OMPC_destroy = llvm::omp::Clause::OMPC_destroy;
constexpr auto OMPC_detach = llvm::omp::Clause::OMPC_detach;
constexpr auto OMPC_device = llvm::omp::Clause::OMPC_device;
constexpr auto OMPC_device_type = llvm::omp::Clause::OMPC_device_type;
constexpr auto OMPC_dist_schedule = llvm::omp::Clause::OMPC_dist_schedule;
constexpr auto OMPC_doacross = llvm::omp::Clause::OMPC_doacross;
constexpr auto OMPC_dynamic_allocators = llvm::omp::Clause::OMPC_dynamic_allocators;
constexpr auto OMPC_exclusive = llvm::omp::Clause::OMPC_exclusive;
constexpr auto OMPC_filter = llvm::omp::Clause::OMPC_filter;
constexpr auto OMPC_final = llvm::omp::Clause::OMPC_final;
constexpr auto OMPC_firstprivate = llvm::omp::Clause::OMPC_firstprivate;
constexpr auto OMPC_flush = llvm::omp::Clause::OMPC_flush;
constexpr auto OMPC_from = llvm::omp::Clause::OMPC_from;
constexpr auto OMPC_full = llvm::omp::Clause::OMPC_full;
constexpr auto OMPC_grainsize = llvm::omp::Clause::OMPC_grainsize;
constexpr auto OMPC_has_device_addr = llvm::omp::Clause::OMPC_has_device_addr;
constexpr auto OMPC_hint = llvm::omp::Clause::OMPC_hint;
constexpr auto OMPC_if = llvm::omp::Clause::OMPC_if;
constexpr auto OMPC_in_reduction = llvm::omp::Clause::OMPC_in_reduction;
constexpr auto OMPC_inbranch = llvm::omp::Clause::OMPC_inbranch;
constexpr auto OMPC_inclusive = llvm::omp::Clause::OMPC_inclusive;
constexpr auto OMPC_indirect = llvm::omp::Clause::OMPC_indirect;
constexpr auto OMPC_init = llvm::omp::Clause::OMPC_init;
constexpr auto OMPC_is_device_ptr = llvm::omp::Clause::OMPC_is_device_ptr;
constexpr auto OMPC_lastprivate = llvm::omp::Clause::OMPC_lastprivate;
constexpr auto OMPC_linear = llvm::omp::Clause::OMPC_linear;
constexpr auto OMPC_link = llvm::omp::Clause::OMPC_link;
constexpr auto OMPC_map = llvm::omp::Clause::OMPC_map;
constexpr auto OMPC_match = llvm::omp::Clause::OMPC_match;
constexpr auto OMPC_memory_order = llvm::omp::Clause::OMPC_memory_order;
constexpr auto OMPC_mergeable = llvm::omp::Clause::OMPC_mergeable;
constexpr auto OMPC_message = llvm::omp::Clause::OMPC_message;
constexpr auto OMPC_nogroup = llvm::omp::Clause::OMPC_nogroup;
constexpr auto OMPC_nowait = llvm::omp::Clause::OMPC_nowait;
constexpr auto OMPC_nocontext = llvm::omp::Clause::OMPC_nocontext;
constexpr auto OMPC_nontemporal = llvm::omp::Clause::OMPC_nontemporal;
constexpr auto OMPC_notinbranch = llvm::omp::Clause::OMPC_notinbranch;
constexpr auto OMPC_novariants = llvm::omp::Clause::OMPC_novariants;
constexpr auto OMPC_num_tasks = llvm::omp::Clause::OMPC_num_tasks;
constexpr auto OMPC_num_teams = llvm::omp::Clause::OMPC_num_teams;
constexpr auto OMPC_num_threads = llvm::omp::Clause::OMPC_num_threads;
constexpr auto OMPC_ompx_dyn_cgroup_mem = llvm::omp::Clause::OMPC_ompx_dyn_cgroup_mem;
constexpr auto OMPC_order = llvm::omp::Clause::OMPC_order;
constexpr auto OMPC_ordered = llvm::omp::Clause::OMPC_ordered;
constexpr auto OMPC_partial = llvm::omp::Clause::OMPC_partial;
constexpr auto OMPC_priority = llvm::omp::Clause::OMPC_priority;
constexpr auto OMPC_private = llvm::omp::Clause::OMPC_private;
constexpr auto OMPC_proc_bind = llvm::omp::Clause::OMPC_proc_bind;
constexpr auto OMPC_read = llvm::omp::Clause::OMPC_read;
constexpr auto OMPC_reduction = llvm::omp::Clause::OMPC_reduction;
constexpr auto OMPC_relaxed = llvm::omp::Clause::OMPC_relaxed;
constexpr auto OMPC_release = llvm::omp::Clause::OMPC_release;
constexpr auto OMPC_reverse_offload = llvm::omp::Clause::OMPC_reverse_offload;
constexpr auto OMPC_safelen = llvm::omp::Clause::OMPC_safelen;
constexpr auto OMPC_schedule = llvm::omp::Clause::OMPC_schedule;
constexpr auto OMPC_seq_cst = llvm::omp::Clause::OMPC_seq_cst;
constexpr auto OMPC_severity = llvm::omp::Clause::OMPC_severity;
constexpr auto OMPC_shared = llvm::omp::Clause::OMPC_shared;
constexpr auto OMPC_simd = llvm::omp::Clause::OMPC_simd;
constexpr auto OMPC_simdlen = llvm::omp::Clause::OMPC_simdlen;
constexpr auto OMPC_sizes = llvm::omp::Clause::OMPC_sizes;
constexpr auto OMPC_task_reduction = llvm::omp::Clause::OMPC_task_reduction;
constexpr auto OMPC_thread_limit = llvm::omp::Clause::OMPC_thread_limit;
constexpr auto OMPC_threadprivate = llvm::omp::Clause::OMPC_threadprivate;
constexpr auto OMPC_threads = llvm::omp::Clause::OMPC_threads;
constexpr auto OMPC_to = llvm::omp::Clause::OMPC_to;
constexpr auto OMPC_unified_address = llvm::omp::Clause::OMPC_unified_address;
constexpr auto OMPC_unified_shared_memory = llvm::omp::Clause::OMPC_unified_shared_memory;
constexpr auto OMPC_uniform = llvm::omp::Clause::OMPC_uniform;
constexpr auto OMPC_unknown = llvm::omp::Clause::OMPC_unknown;
constexpr auto OMPC_untied = llvm::omp::Clause::OMPC_untied;
constexpr auto OMPC_update = llvm::omp::Clause::OMPC_update;
constexpr auto OMPC_use = llvm::omp::Clause::OMPC_use;
constexpr auto OMPC_use_device_addr = llvm::omp::Clause::OMPC_use_device_addr;
constexpr auto OMPC_use_device_ptr = llvm::omp::Clause::OMPC_use_device_ptr;
constexpr auto OMPC_uses_allocators = llvm::omp::Clause::OMPC_uses_allocators;
constexpr auto OMPC_when = llvm::omp::Clause::OMPC_when;
constexpr auto OMPC_write = llvm::omp::Clause::OMPC_write;

enum class CancellationConstructType {
  OMP_CANCELLATION_CONSTRUCT_Parallel=1,
  OMP_CANCELLATION_CONSTRUCT_Loop=2,
  OMP_CANCELLATION_CONSTRUCT_Sections=3,
  OMP_CANCELLATION_CONSTRUCT_Taskgroup=4,
  OMP_CANCELLATION_CONSTRUCT_None=5,
};

constexpr auto OMP_CANCELLATION_CONSTRUCT_Parallel = llvm::omp::CancellationConstructType::OMP_CANCELLATION_CONSTRUCT_Parallel;
constexpr auto OMP_CANCELLATION_CONSTRUCT_Loop = llvm::omp::CancellationConstructType::OMP_CANCELLATION_CONSTRUCT_Loop;
constexpr auto OMP_CANCELLATION_CONSTRUCT_Sections = llvm::omp::CancellationConstructType::OMP_CANCELLATION_CONSTRUCT_Sections;
constexpr auto OMP_CANCELLATION_CONSTRUCT_Taskgroup = llvm::omp::CancellationConstructType::OMP_CANCELLATION_CONSTRUCT_Taskgroup;
constexpr auto OMP_CANCELLATION_CONSTRUCT_None = llvm::omp::CancellationConstructType::OMP_CANCELLATION_CONSTRUCT_None;

enum class GrainsizeType {
  OMP_GRAINSIZE_Strict=1,
  OMP_GRAINSIZE_Unknown=2,
};

constexpr auto OMP_GRAINSIZE_Strict = llvm::omp::GrainsizeType::OMP_GRAINSIZE_Strict;
constexpr auto OMP_GRAINSIZE_Unknown = llvm::omp::GrainsizeType::OMP_GRAINSIZE_Unknown;

enum class MemoryOrderKind {
  OMP_MEMORY_ORDER_SeqCst=1,
  OMP_MEMORY_ORDER_AcqRel=2,
  OMP_MEMORY_ORDER_Acquire=3,
  OMP_MEMORY_ORDER_Release=4,
  OMP_MEMORY_ORDER_Relaxed=5,
  OMP_MEMORY_ORDER_Default=6,
};

constexpr auto OMP_MEMORY_ORDER_SeqCst = llvm::omp::MemoryOrderKind::OMP_MEMORY_ORDER_SeqCst;
constexpr auto OMP_MEMORY_ORDER_AcqRel = llvm::omp::MemoryOrderKind::OMP_MEMORY_ORDER_AcqRel;
constexpr auto OMP_MEMORY_ORDER_Acquire = llvm::omp::MemoryOrderKind::OMP_MEMORY_ORDER_Acquire;
constexpr auto OMP_MEMORY_ORDER_Release = llvm::omp::MemoryOrderKind::OMP_MEMORY_ORDER_Release;
constexpr auto OMP_MEMORY_ORDER_Relaxed = llvm::omp::MemoryOrderKind::OMP_MEMORY_ORDER_Relaxed;
constexpr auto OMP_MEMORY_ORDER_Default = llvm::omp::MemoryOrderKind::OMP_MEMORY_ORDER_Default;

enum class NumTasksType {
  OMP_NUMTASKS_Strict=1,
  OMP_NUMTASKS_Unknown=2,
};

constexpr auto OMP_NUMTASKS_Strict = llvm::omp::NumTasksType::OMP_NUMTASKS_Strict;
constexpr auto OMP_NUMTASKS_Unknown = llvm::omp::NumTasksType::OMP_NUMTASKS_Unknown;

enum class OrderKind {
  OMP_ORDER_unknown=2,
  OMP_ORDER_concurrent=1,
};

constexpr auto OMP_ORDER_unknown = llvm::omp::OrderKind::OMP_ORDER_unknown;
constexpr auto OMP_ORDER_concurrent = llvm::omp::OrderKind::OMP_ORDER_concurrent;

enum class ProcBindKind {
  OMP_PROC_BIND_primary=5,
  OMP_PROC_BIND_master=2,
  OMP_PROC_BIND_close=3,
  OMP_PROC_BIND_spread=4,
  OMP_PROC_BIND_default=6,
  OMP_PROC_BIND_unknown=7,
};

constexpr auto OMP_PROC_BIND_primary = llvm::omp::ProcBindKind::OMP_PROC_BIND_primary;
constexpr auto OMP_PROC_BIND_master = llvm::omp::ProcBindKind::OMP_PROC_BIND_master;
constexpr auto OMP_PROC_BIND_close = llvm::omp::ProcBindKind::OMP_PROC_BIND_close;
constexpr auto OMP_PROC_BIND_spread = llvm::omp::ProcBindKind::OMP_PROC_BIND_spread;
constexpr auto OMP_PROC_BIND_default = llvm::omp::ProcBindKind::OMP_PROC_BIND_default;
constexpr auto OMP_PROC_BIND_unknown = llvm::omp::ProcBindKind::OMP_PROC_BIND_unknown;

enum class ScheduleKind {
  OMP_SCHEDULE_Static=2,
  OMP_SCHEDULE_Dynamic=3,
  OMP_SCHEDULE_Guided=4,
  OMP_SCHEDULE_Auto=5,
  OMP_SCHEDULE_Runtime=6,
  OMP_SCHEDULE_Default=7,
};

constexpr auto OMP_SCHEDULE_Static = llvm::omp::ScheduleKind::OMP_SCHEDULE_Static;
constexpr auto OMP_SCHEDULE_Dynamic = llvm::omp::ScheduleKind::OMP_SCHEDULE_Dynamic;
constexpr auto OMP_SCHEDULE_Guided = llvm::omp::ScheduleKind::OMP_SCHEDULE_Guided;
constexpr auto OMP_SCHEDULE_Auto = llvm::omp::ScheduleKind::OMP_SCHEDULE_Auto;
constexpr auto OMP_SCHEDULE_Runtime = llvm::omp::ScheduleKind::OMP_SCHEDULE_Runtime;
constexpr auto OMP_SCHEDULE_Default = llvm::omp::ScheduleKind::OMP_SCHEDULE_Default;

// Enumeration helper functions
Directive getOpenMPDirectiveKind(llvm::StringRef Str);

llvm::StringRef getOpenMPDirectiveName(Directive D);

Clause getOpenMPClauseKind(llvm::StringRef Str);

llvm::StringRef getOpenMPClauseName(Clause C);

/// Return true if \p C is a valid clause for \p D in version \p Version.
bool isAllowedClauseForDirective(Directive D, Clause C, unsigned Version);

CancellationConstructType getCancellationConstructType(StringRef);
llvm::StringRef getOpenMPCancellationConstructTypeName(CancellationConstructType);
GrainsizeType getGrainsizeType(StringRef);
llvm::StringRef getOpenMPGrainsizeTypeName(GrainsizeType);
MemoryOrderKind getMemoryOrderKind(StringRef);
llvm::StringRef getOpenMPMemoryOrderKindName(MemoryOrderKind);
NumTasksType getNumTasksType(StringRef);
llvm::StringRef getOpenMPNumTasksTypeName(NumTasksType);
OrderKind getOrderKind(StringRef);
llvm::StringRef getOpenMPOrderKindName(OrderKind);
ProcBindKind getProcBindKind(StringRef);
llvm::StringRef getOpenMPProcBindKindName(ProcBindKind);
ScheduleKind getScheduleKind(StringRef);
llvm::StringRef getOpenMPScheduleKindName(ScheduleKind);

} // namespace omp
} // namespace llvm
#endif // LLVM_OpenMP_INC
PKiwFZ͙�n�!�!Frontend/OpenMP/OMPContext.hnu�[���//===- OpenMP/OMPContext.h ----- OpenMP context helper functions  - C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This file provides helper functions and classes to deal with OpenMP
/// contexts as used by `[begin/end] declare variant` and `metadirective`.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_FRONTEND_OPENMP_OMPCONTEXT_H
#define LLVM_FRONTEND_OPENMP_OMPCONTEXT_H

#include "llvm/ADT/APInt.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"

namespace llvm {
class Triple;
namespace omp {

/// OpenMP Context related IDs and helpers
///
///{

/// IDs for all OpenMP context selector trait sets (construct/device/...).
enum class TraitSet {
#define OMP_TRAIT_SET(Enum, ...) Enum,
#include "llvm/Frontend/OpenMP/OMPKinds.def"
};

/// IDs for all OpenMP context selector trait (device={kind/isa...}/...).
enum class TraitSelector {
#define OMP_TRAIT_SELECTOR(Enum, ...) Enum,
#include "llvm/Frontend/OpenMP/OMPKinds.def"
};

/// IDs for all OpenMP context trait properties (host/gpu/bsc/llvm/...)
enum class TraitProperty {
#define OMP_TRAIT_PROPERTY(Enum, ...) Enum,
#define OMP_LAST_TRAIT_PROPERTY(Enum) Last = Enum
#include "llvm/Frontend/OpenMP/OMPKinds.def"
};

/// Parse \p Str and return the trait set it matches or TraitSet::invalid.
TraitSet getOpenMPContextTraitSetKind(StringRef Str);

/// Return the trait set for which \p Selector is a selector.
TraitSet getOpenMPContextTraitSetForSelector(TraitSelector Selector);

/// Return the trait set for which \p Property is a property.
TraitSet getOpenMPContextTraitSetForProperty(TraitProperty Property);

/// Return a textual representation of the trait set \p Kind.
StringRef getOpenMPContextTraitSetName(TraitSet Kind);

/// Parse \p Str and return the trait set it matches or
/// TraitSelector::invalid.
TraitSelector getOpenMPContextTraitSelectorKind(StringRef Str);

/// Return the trait selector for which \p Property is a property.
TraitSelector getOpenMPContextTraitSelectorForProperty(TraitProperty Property);

/// Return a textual representation of the trait selector \p Kind.
StringRef getOpenMPContextTraitSelectorName(TraitSelector Kind);

/// Parse \p Str and return the trait property it matches in the set \p Set and
/// selector \p Selector or TraitProperty::invalid.
TraitProperty getOpenMPContextTraitPropertyKind(TraitSet Set,
                                                TraitSelector Selector,
                                                StringRef Str);

/// Return the trait property for a singleton selector \p Selector.
TraitProperty getOpenMPContextTraitPropertyForSelector(TraitSelector Selector);

/// Return a textual representation of the trait property \p Kind, which might
/// be the raw string we parsed (\p RawString) if we do not translate the
/// property into a (distinct) enum.
StringRef getOpenMPContextTraitPropertyName(TraitProperty Kind,
                                            StringRef RawString);

/// Return a textual representation of the trait property \p Kind with selector
/// and set name included.
StringRef getOpenMPContextTraitPropertyFullName(TraitProperty Kind);

/// Return a string listing all trait sets.
std::string listOpenMPContextTraitSets();

/// Return a string listing all trait selectors for \p Set.
std::string listOpenMPContextTraitSelectors(TraitSet Set);

/// Return a string listing all trait properties for \p Set and \p Selector.
std::string listOpenMPContextTraitProperties(TraitSet Set,
                                             TraitSelector Selector);
///}

/// Return true if \p Selector can be nested in \p Set. Also sets
/// \p AllowsTraitScore and \p RequiresProperty to true/false if the user can
/// specify a score for properties in \p Selector and if the \p Selector
/// requires at least one property.
bool isValidTraitSelectorForTraitSet(TraitSelector Selector, TraitSet Set,
                                     bool &AllowsTraitScore,
                                     bool &RequiresProperty);

/// Return true if \p Property can be nested in \p Selector and \p Set.
bool isValidTraitPropertyForTraitSetAndSelector(TraitProperty Property,
                                                TraitSelector Selector,
                                                TraitSet Set);

/// Variant match information describes the required traits and how they are
/// scored (via the ScoresMap). In addition, the required consturct nesting is
/// decribed as well.
struct VariantMatchInfo {
  /// Add the trait \p Property to the required trait set. \p RawString is the
  /// string we parsed and derived \p Property from. If \p Score is not null, it
  /// recorded as well. If \p Property is in the `construct` set it is recorded
  /// in-order in the ConstructTraits as well.
  void addTrait(TraitProperty Property, StringRef RawString,
                APInt *Score = nullptr) {
    addTrait(getOpenMPContextTraitSetForProperty(Property), Property, RawString,
             Score);
  }
  /// Add the trait \p Property which is in set \p Set to the required trait
  /// set. \p RawString is the string we parsed and derived \p Property from. If
  /// \p Score is not null, it recorded as well. If \p Set is the `construct`
  /// set it is recorded in-order in the ConstructTraits as well.
  void addTrait(TraitSet Set, TraitProperty Property, StringRef RawString,
                APInt *Score = nullptr) {
    if (Score)
      ScoreMap[Property] = *Score;

    // Special handling for `device={isa(...)}` as we do not match the enum but
    // the raw string.
    if (Property == TraitProperty::device_isa___ANY)
      ISATraits.push_back(RawString);

    RequiredTraits.set(unsigned(Property));
    if (Set == TraitSet::construct)
      ConstructTraits.push_back(Property);
  }

  BitVector RequiredTraits = BitVector(unsigned(TraitProperty::Last) + 1);
  SmallVector<StringRef, 8> ISATraits;
  SmallVector<TraitProperty, 8> ConstructTraits;
  SmallDenseMap<TraitProperty, APInt> ScoreMap;
};

/// The context for a source location is made up of active property traits,
/// e.g., device={kind(host)}, and constructs traits which describe the nesting
/// in OpenMP constructs at the location.
struct OMPContext {
  OMPContext(bool IsDeviceCompilation, Triple TargetTriple);
  virtual ~OMPContext() = default;

  void addTrait(TraitProperty Property) {
    addTrait(getOpenMPContextTraitSetForProperty(Property), Property);
  }
  void addTrait(TraitSet Set, TraitProperty Property) {
    ActiveTraits.set(unsigned(Property));
    if (Set == TraitSet::construct)
      ConstructTraits.push_back(Property);
  }

  /// Hook for users to check if an ISA trait matches. The trait is described as
  /// the string that got parsed and it depends on the target and context if
  /// this matches or not.
  virtual bool matchesISATrait(StringRef) const { return false; }

  BitVector ActiveTraits = BitVector(unsigned(TraitProperty::Last) + 1);
  SmallVector<TraitProperty, 8> ConstructTraits;
};

/// Return true if \p VMI is applicable in \p Ctx, that is, all traits required
/// by \p VMI are available in the OpenMP context \p Ctx. If \p DeviceSetOnly is
/// true, only the device selector set, if present, are checked. Note that we
/// still honor extension traits provided by the user.
bool isVariantApplicableInContext(const VariantMatchInfo &VMI,
                                  const OMPContext &Ctx,
                                  bool DeviceSetOnly = false);

/// Return the index (into \p VMIs) of the variant with the highest score
/// from the ones applicble in \p Ctx. See llvm::isVariantApplicableInContext.
int getBestVariantMatchForContext(const SmallVectorImpl<VariantMatchInfo> &VMIs,
                                  const OMPContext &Ctx);

} // namespace omp

template <> struct DenseMapInfo<omp::TraitProperty> {
  static inline omp::TraitProperty getEmptyKey() {
    return omp::TraitProperty(-1);
  }
  static inline omp::TraitProperty getTombstoneKey() {
    return omp::TraitProperty(-2);
  }
  static unsigned getHashValue(omp::TraitProperty val) {
    return std::hash<unsigned>{}(unsigned(val));
  }
  static bool isEqual(omp::TraitProperty LHS, omp::TraitProperty RHS) {
    return LHS == RHS;
  }
};

} // end namespace llvm
#endif // LLVM_FRONTEND_OPENMP_OMPCONTEXT_H
PKiwFZj��PSSFrontend/OpenMP/OMPAssume.hnu�[���//===- OpenMP/OMPAssume.h --- OpenMP assumption helper functions  - C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This file provides helper functions and classes to deal with OpenMP
/// assumptions, e.g., as used by `[begin/end] assumes` and `assume`.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_FRONTEND_OPENMP_OMPASSUME_H
#define LLVM_FRONTEND_OPENMP_OMPASSUME_H

#include "llvm/ADT/StringRef.h"

namespace llvm {

namespace omp {

/// Helper to describe assume clauses.
struct AssumptionClauseMappingInfo {
  /// The identifier describing the (beginning of the) clause.
  llvm::StringLiteral Identifier;
  /// Flag to determine if the identifier is a full name or the start of a name.
  bool StartsWith;
  /// Flag to determine if a directive lists follows.
  bool HasDirectiveList;
  /// Flag to determine if an expression follows.
  bool HasExpression;
};

/// All known assume clauses.
static constexpr AssumptionClauseMappingInfo AssumptionClauseMappings[] = {
#define OMP_ASSUME_CLAUSE(Identifier, StartsWith, HasDirectiveList,            \
                          HasExpression)                                       \
  {Identifier, StartsWith, HasDirectiveList, HasExpression},
#include "llvm/Frontend/OpenMP/OMPKinds.def"
};

inline std::string getAllAssumeClauseOptions() {
  std::string S;
  for (const AssumptionClauseMappingInfo &ACMI : AssumptionClauseMappings)
    S += (S.empty() ? "'" : "', '") + ACMI.Identifier.str();
  return S + "'";
}

} // namespace omp

} // namespace llvm

#endif // LLVM_FRONTEND_OPENMP_OMPASSUME_H
PKiwFZ	����	�	Frontend/OpenMP/OMP.tdnu�[���//===-- OMP.td - OpenMP directive definition file ----------*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This is the definition file for OpenMP directives and clauses.
//
//===----------------------------------------------------------------------===//

include "llvm/Frontend/Directive/DirectiveBase.td"

//===----------------------------------------------------------------------===//
// Definition of general OpenMP information
//===----------------------------------------------------------------------===//

def OpenMP : DirectiveLanguage {
  let name = "OpenMP";
  let cppNamespace = "omp"; // final namespace will be llvm::omp
  let directivePrefix = "OMPD_";
  let clausePrefix = "OMPC_";
  let makeEnumAvailableInNamespace = true;
  let enableBitmaskEnumInNamespace = true;
  let clauseEnumSetClass = "OmpClauseSet";
  let flangClauseBaseClass = "OmpClause";
}

//===----------------------------------------------------------------------===//
// Definition of OpenMP clauses
//===----------------------------------------------------------------------===//

def OMPC_Allocator : Clause<"allocator"> {
  let clangClass = "OMPAllocatorClause";
  let flangClass = "ScalarIntExpr";
}
def OMPC_If : Clause<"if"> {
  let clangClass = "OMPIfClause";
  let flangClass = "OmpIfClause";
}
def OMPC_Final : Clause<"final"> {
  let clangClass = "OMPFinalClause";
  let flangClass = "ScalarLogicalExpr";
}
def OMPC_NumThreads : Clause<"num_threads"> {
  let clangClass = "OMPNumThreadsClause";
  let flangClass = "ScalarIntExpr";
}
def OMPC_SafeLen : Clause<"safelen"> {
  let clangClass = "OMPSafelenClause";
  let flangClass = "ScalarIntConstantExpr";
}
def OMPC_SimdLen : Clause<"simdlen"> {
  let clangClass = "OMPSimdlenClause";
  let flangClass = "ScalarIntConstantExpr";
}
def OMPC_Collapse : Clause<"collapse"> {
  let clangClass = "OMPCollapseClause";
  let flangClass = "ScalarIntConstantExpr";
}
def OMPC_Default : Clause<"default"> {
  let clangClass = "OMPDefaultClause";
  let flangClass = "OmpDefaultClause";
}
def OMPC_Private : Clause<"private"> {
  let clangClass = "OMPPrivateClause";
  let flangClass = "OmpObjectList";
}
def OMPC_Sizes: Clause<"sizes"> { 
  let clangClass = "OMPSizesClause"; 
  let flangClass = "ScalarIntExpr";
  let isValueList = true;
  }
def OMPC_Full: Clause<"full"> { 
  let clangClass = "OMPFullClause"; 
}
def OMPC_Partial: Clause<"partial"> { 
  let clangClass = "OMPPartialClause";
  let flangClass = "ScalarIntConstantExpr"; 
  let isValueOptional = true;
 }
def OMPC_FirstPrivate : Clause<"firstprivate"> {
  let clangClass = "OMPFirstprivateClause";
  let flangClass = "OmpObjectList";
}
def OMPC_LastPrivate : Clause<"lastprivate"> {
  let clangClass = "OMPLastprivateClause";
  let flangClass = "OmpObjectList";
}
def OMPC_Shared : Clause<"shared"> {
  let clangClass = "OMPSharedClause";
  let flangClass = "OmpObjectList";
}
def OMPC_Reduction : Clause<"reduction"> {
  let clangClass = "OMPReductionClause";
  let flangClass = "OmpReductionClause";
}
def OMPC_Linear : Clause<"linear"> {
  let clangClass = "OMPLinearClause";
  let flangClass = "OmpLinearClause";
}
def OMPC_Aligned : Clause<"aligned"> {
  let clangClass = "OMPAlignedClause";
  let flangClass = "OmpAlignedClause";
}
def OMPC_Copyin : Clause<"copyin"> {
  let clangClass = "OMPCopyinClause";
  let flangClass = "OmpObjectList";
}
def OMPC_CopyPrivate : Clause<"copyprivate"> {
  let clangClass = "OMPCopyprivateClause";
  let flangClass = "OmpObjectList";
}
def OMP_PROC_BIND_master : ClauseVal<"master",2,1> {}
def OMP_PROC_BIND_close : ClauseVal<"close",3,1> {}
def OMP_PROC_BIND_spread : ClauseVal<"spread",4,1> {}
def OMP_PROC_BIND_primary : ClauseVal<"primary",5,1> {}
def OMP_PROC_BIND_default : ClauseVal<"default",6,0> {}
def OMP_PROC_BIND_unknown : ClauseVal<"unknown",7,0> { let isDefault = true; }
def OMPC_ProcBind : Clause<"proc_bind"> {
  let clangClass = "OMPProcBindClause";
  let flangClass = "OmpProcBindClause";
  let enumClauseValue = "ProcBindKind";
  let allowedClauseValues = [
    OMP_PROC_BIND_primary,
    OMP_PROC_BIND_master,
    OMP_PROC_BIND_close,
    OMP_PROC_BIND_spread,
    OMP_PROC_BIND_default,
    OMP_PROC_BIND_unknown
  ];
}

def OMP_SCHEDULE_Static : ClauseVal<"static", 2, 1> {}
def OMP_SCHEDULE_Dynamic : ClauseVal<"dynamic", 3, 1> {}
def OMP_SCHEDULE_Guided : ClauseVal<"guided", 4, 1> {}
def OMP_SCHEDULE_Auto : ClauseVal<"auto", 5, 1> {}
def OMP_SCHEDULE_Runtime : ClauseVal<"runtime", 6, 1> {}
def OMP_SCHEDULE_Default : ClauseVal<"default", 7, 0> { let isDefault = 1; }

def OMPC_Schedule : Clause<"schedule"> {
  let clangClass = "OMPScheduleClause";
  let flangClass = "OmpScheduleClause";
  let enumClauseValue = "ScheduleKind";
  let allowedClauseValues = [
    OMP_SCHEDULE_Static,
    OMP_SCHEDULE_Dynamic,
    OMP_SCHEDULE_Guided,
    OMP_SCHEDULE_Auto,
    OMP_SCHEDULE_Runtime,
    OMP_SCHEDULE_Default
  ];
}

def OMP_MEMORY_ORDER_SeqCst : ClauseVal<"seq_cst", 1, 1> {}
def OMP_MEMORY_ORDER_AcqRel : ClauseVal<"acq_rel", 2, 1> {}
def OMP_MEMORY_ORDER_Acquire : ClauseVal<"acquire", 3, 1> {}
def OMP_MEMORY_ORDER_Release : ClauseVal<"release", 4, 1> {}
def OMP_MEMORY_ORDER_Relaxed : ClauseVal<"relaxed", 5, 1> {}
def OMP_MEMORY_ORDER_Default : ClauseVal<"default", 6, 0> {
  let isDefault = 1;
}
def OMPC_MemoryOrder : Clause<"memory_order"> {
  let enumClauseValue = "MemoryOrderKind";
  let allowedClauseValues = [
    OMP_MEMORY_ORDER_SeqCst,
    OMP_MEMORY_ORDER_AcqRel,
    OMP_MEMORY_ORDER_Acquire,
    OMP_MEMORY_ORDER_Release,
    OMP_MEMORY_ORDER_Relaxed,
    OMP_MEMORY_ORDER_Default
  ];
}

def OMP_CANCELLATION_CONSTRUCT_Parallel : ClauseVal<"parallel", 1, 1> {}
def OMP_CANCELLATION_CONSTRUCT_Loop : ClauseVal<"loop", 2, 1> {}
def OMP_CANCELLATION_CONSTRUCT_Sections : ClauseVal<"sections", 3, 1> {}
def OMP_CANCELLATION_CONSTRUCT_Taskgroup : ClauseVal<"taskgroup", 4, 1> {}
def OMP_CANCELLATION_CONSTRUCT_None : ClauseVal<"none", 5, 0> {
  let isDefault = 1;
}

def OMPC_CancellationConstructType : Clause<"cancellation_construct_type"> {
  let enumClauseValue = "CancellationConstructType";
  let allowedClauseValues = [
    OMP_CANCELLATION_CONSTRUCT_Parallel,
    OMP_CANCELLATION_CONSTRUCT_Loop,
    OMP_CANCELLATION_CONSTRUCT_Sections,
    OMP_CANCELLATION_CONSTRUCT_Taskgroup,
    OMP_CANCELLATION_CONSTRUCT_None
  ];
}

def OMPC_Ordered : Clause<"ordered"> {
  let clangClass = "OMPOrderedClause";
  let flangClass = "ScalarIntConstantExpr";
  let isValueOptional = true;
}
def OMPC_NoWait : Clause<"nowait"> {
  let clangClass = "OMPNowaitClause";
}
def OMPC_Untied : Clause<"untied"> { let clangClass = "OMPUntiedClause"; }
def OMPC_Mergeable : Clause<"mergeable"> {
  let clangClass = "OMPMergeableClause";
}
def OMPC_Read : Clause<"read"> { let clangClass = "OMPReadClause"; }
def OMPC_Write : Clause<"write"> { let clangClass = "OMPWriteClause"; }
def OMPC_Update : Clause<"update"> { let clangClass = "OMPUpdateClause"; }
def OMPC_Capture : Clause<"capture"> { let clangClass = "OMPCaptureClause"; }
def OMPC_Compare : Clause<"compare"> { let clangClass = "OMPCompareClause"; }
def OMPC_SeqCst : Clause<"seq_cst"> { let clangClass = "OMPSeqCstClause"; }
def OMPC_AcqRel : Clause<"acq_rel"> { let clangClass = "OMPAcqRelClause"; }
def OMPC_Acquire : Clause<"acquire"> { let clangClass = "OMPAcquireClause"; }
def OMPC_Release : Clause<"release"> { let clangClass = "OMPReleaseClause"; }
def OMPC_Relaxed : Clause<"relaxed"> { let clangClass = "OMPRelaxedClause"; }
def OMPC_Depend : Clause<"depend"> {
  let clangClass = "OMPDependClause";
  let flangClass = "OmpDependClause";
}
def OMPC_Device : Clause<"device"> {
  let clangClass = "OMPDeviceClause";
  let flangClass = "OmpDeviceClause";
}
def OMPC_DeviceType : Clause<"device_type"> {
  let flangClass = "OmpDeviceTypeClause";
}
def OMPC_Threads : Clause<"threads"> { let clangClass = "OMPThreadsClause"; }
def OMPC_Simd : Clause<"simd"> { let clangClass = "OMPSIMDClause"; }
def OMPC_Map : Clause<"map"> {
  let clangClass = "OMPMapClause";
  let flangClass = "OmpMapClause";
}
def OMPC_NumTeams : Clause<"num_teams"> {
  let clangClass = "OMPNumTeamsClause";
  let flangClass = "ScalarIntExpr";
}
def OMPC_ThreadLimit : Clause<"thread_limit"> {
  let clangClass = "OMPThreadLimitClause";
  let flangClass = "ScalarIntExpr";
}
def OMPC_Priority : Clause<"priority"> {
  let clangClass = "OMPPriorityClause";
  let flangClass = "ScalarIntExpr";
}

def OMP_GRAINSIZE_Strict : ClauseVal<"strict", 1, 1> {}
def OMP_GRAINSIZE_Unknown : ClauseVal<"unkonwn", 2, 0> { let isDefault = 1; }

def OMPC_GrainSize : Clause<"grainsize"> {
  let clangClass = "OMPGrainsizeClause";
  let flangClass = "ScalarIntExpr";
  let enumClauseValue = "GrainsizeType";
  let allowedClauseValues = [
    OMP_GRAINSIZE_Strict,
    OMP_GRAINSIZE_Unknown
  ];
}
def OMPC_NoGroup : Clause<"nogroup"> {
  let clangClass = "OMPNogroupClause";
}

def OMP_NUMTASKS_Strict : ClauseVal<"strict", 1, 1> {}
def OMP_NUMTASKS_Unknown : ClauseVal<"unkonwn", 2, 0> { let isDefault = 1; }

def OMPC_NumTasks : Clause<"num_tasks"> {
  let clangClass = "OMPNumTasksClause";
  let flangClass = "ScalarIntExpr";
  let enumClauseValue = "NumTasksType";
  let allowedClauseValues = [
    OMP_NUMTASKS_Strict,
    OMP_NUMTASKS_Unknown
  ];
}
def OMPC_Hint : Clause<"hint"> {
  let clangClass = "OMPHintClause";
  let flangClass = "ConstantExpr";
}
def OMPC_DistSchedule : Clause<"dist_schedule"> {
  let clangClass = "OMPDistScheduleClause";
  let flangClass = "ScalarIntExpr";
  let isValueOptional = true;
}
def OMPC_DefaultMap : Clause<"defaultmap"> {
  let clangClass = "OMPDefaultmapClause";
  let flangClass = "OmpDefaultmapClause";
}
def OMPC_To : Clause<"to"> {
  let clangClass = "OMPToClause";
  let flangClass = "OmpObjectList";
}
def OMPC_From : Clause<"from"> {
  let clangClass = "OMPFromClause";
  let flangClass = "OmpObjectList";
}
def OMPC_UseDevicePtr : Clause<"use_device_ptr"> {
  let clangClass = "OMPUseDevicePtrClause";
  let flangClass = "OmpObjectList";
}
def OMPC_IsDevicePtr : Clause<"is_device_ptr"> {
  let clangClass = "OMPIsDevicePtrClause";
  let flangClass = "Name";
  let isValueList = true;
}
def OMPC_HasDeviceAddr : Clause<"has_device_addr"> {
  let clangClass = "OMPHasDeviceAddrClause";
  let flangClass = "Name";
  let isValueList = true;
}
def OMPC_TaskReduction : Clause<"task_reduction"> {
  let clangClass = "OMPTaskReductionClause";
  let flangClass = "OmpReductionClause";
}
def OMPC_InReduction : Clause<"in_reduction"> {
  let clangClass = "OMPInReductionClause";
  let flangClass = "OmpInReductionClause";
}
def OMPC_UnifiedAddress : Clause<"unified_address"> {
  let clangClass = "OMPUnifiedAddressClause";
}
def OMPC_UnifiedSharedMemory : Clause<"unified_shared_memory"> {
  let clangClass = "OMPUnifiedSharedMemoryClause";
}
def OMPC_ReverseOffload : Clause<"reverse_offload"> {
  let clangClass = "OMPReverseOffloadClause";
}
def OMPC_DynamicAllocators : Clause<"dynamic_allocators"> {
  let clangClass = "OMPDynamicAllocatorsClause";
}
def OMPC_AtomicDefaultMemOrder : Clause<"atomic_default_mem_order"> {
  let clangClass = "OMPAtomicDefaultMemOrderClause";
  let flangClass = "OmpAtomicDefaultMemOrderClause";
}
def OMPC_At : Clause<"at"> {
  let clangClass = "OMPAtClause";
}
def OMPC_Severity : Clause<"severity"> {
  let clangClass = "OMPSeverityClause";
}
def OMPC_Message : Clause<"message"> {
  let clangClass = "OMPMessageClause";
}
def OMPC_Allocate : Clause<"allocate"> {
  let clangClass = "OMPAllocateClause";
  let flangClass = "OmpAllocateClause";
}
def OMPC_NonTemporal : Clause<"nontemporal"> {
  let clangClass = "OMPNontemporalClause";
  let flangClass = "Name";
  let isValueList = true;
}

def OMP_ORDER_concurrent : ClauseVal<"concurrent",1,1> {}
def OMP_ORDER_unknown : ClauseVal<"unknown",2,0> { let isDefault = 1; }
def OMPC_Order : Clause<"order"> {
  let clangClass = "OMPOrderClause";
  let flangClass = "OmpOrderClause";
  let enumClauseValue = "OrderKind";
  let allowedClauseValues = [
    OMP_ORDER_unknown,
    OMP_ORDER_concurrent
  ];
}
def OMPC_Init : Clause<"init"> {
  let clangClass = "OMPInitClause";
}
def OMPC_Use : Clause<"use"> {
  let clangClass = "OMPUseClause";
}
def OMPC_Destroy : Clause<"destroy"> {
  let clangClass = "OMPDestroyClause";
}
def OMPC_Novariants : Clause<"novariants"> {
  let clangClass = "OMPNovariantsClause";
  let flangClass = "ScalarLogicalExpr";
}
def OMPC_Nocontext : Clause<"nocontext"> {
  let clangClass = "OMPNocontextClause";
  let flangClass = "ScalarLogicalExpr";
}
def OMPC_Detach : Clause<"detach"> {
  let clangClass = "OMPDetachClause";
}
def OMPC_Inclusive : Clause<"inclusive"> {
  let clangClass = "OMPInclusiveClause";
}
def OMPC_Exclusive : Clause<"exclusive"> {
  let clangClass = "OMPExclusiveClause";
}
def OMPC_UsesAllocators : Clause<"uses_allocators"> {
  let clangClass = "OMPUsesAllocatorsClause";
}
def OMPC_Affinity : Clause<"affinity"> {
  let clangClass = "OMPAffinityClause";
}
def OMPC_UseDeviceAddr : Clause<"use_device_addr"> {
  let clangClass = "OMPUseDeviceAddrClause";
  let flangClass = "OmpObjectList";
}
def OMPC_Uniform : Clause<"uniform"> {
  let flangClass = "Name";
  let isValueList = true;
}
def OMPC_Match : Clause<"match"> {}
def OMPC_AdjustArgs : Clause<"adjust_args"> { }
def OMPC_AppendArgs : Clause<"append_args"> { }
def OMPC_Depobj : Clause<"depobj"> {
  let clangClass = "OMPDepobjClause";
  let isImplicit = true;
}
def OMPC_Flush : Clause<"flush"> {
  let clangClass = "OMPFlushClause";
  let isImplicit = true;
}
def OMPC_ThreadPrivate : Clause<"threadprivate"> {
  let alternativeName = "threadprivate or thread local";
  let isImplicit = true;
}
def OMPC_Unknown : Clause<"unknown"> {
  let isImplicit = true;
  let isDefault = true;
}
def OMPC_Link : Clause<"link"> {
  let flangClass = "OmpObjectList";
}
def OMPC_Indirect : Clause<"indirect"> {}
def OMPC_Inbranch : Clause<"inbranch"> {}
def OMPC_Notinbranch : Clause<"notinbranch"> {}
def OMPC_Filter : Clause<"filter"> {
  let clangClass = "OMPFilterClause";
  let flangClass = "ScalarIntExpr";
}
def OMPC_Align : Clause<"align"> {
  let clangClass = "OMPAlignClause";
}
def OMPC_When: Clause<"when"> {}

def OMPC_Bind : Clause<"bind"> {
  let clangClass = "OMPBindClause";
}

def OMPC_OMPX_DynCGroupMem : Clause<"ompx_dyn_cgroup_mem"> {
  let clangClass = "OMPXDynCGroupMemClause";
  let flangClass = "ScalarIntExpr";
}

def OMPC_Doacross : Clause<"doacross"> {
  let clangClass = "OMPDoacrossClause";
}

//===----------------------------------------------------------------------===//
// Definition of OpenMP directives
//===----------------------------------------------------------------------===//

def OMP_ThreadPrivate : Directive<"threadprivate"> {}
def OMP_Parallel : Directive<"parallel"> {
  let allowedClauses = [
    VersionedClause<OMPC_Private>,
    VersionedClause<OMPC_FirstPrivate>,
    VersionedClause<OMPC_Shared>,
    VersionedClause<OMPC_Reduction>,
    VersionedClause<OMPC_Copyin>,
    VersionedClause<OMPC_Allocate>
  ];
  let allowedOnceClauses = [
    VersionedClause<OMPC_Default>,
    VersionedClause<OMPC_If>,
    VersionedClause<OMPC_NumThreads>,
    VersionedClause<OMPC_ProcBind>,
  ];
}
def OMP_Task : Directive<"task"> {
  let allowedClauses = [
    VersionedClause<OMPC_Private>,
    VersionedClause<OMPC_FirstPrivate>,
    VersionedClause<OMPC_Shared>,
    VersionedClause<OMPC_Untied>,
    VersionedClause<OMPC_Mergeable>,
    VersionedClause<OMPC_Depend>,
    VersionedClause<OMPC_InReduction>,
    VersionedClause<OMPC_Allocate>,
    VersionedClause<OMPC_Detach, 50>,
    VersionedClause<OMPC_Affinity, 50>
  ];
  let allowedOnceClauses = [
    VersionedClause<OMPC_Default>,
    VersionedClause<OMPC_If>,
    VersionedClause<OMPC_Final>,
    VersionedClause<OMPC_Priority>
  ];
}
def OMP_Simd : Directive<"simd"> {
  let allowedClauses = [
    VersionedClause<OMPC_Private>,
    VersionedClause<OMPC_LastPrivate>,
    VersionedClause<OMPC_Linear>,
    VersionedClause<OMPC_Aligned>,
    VersionedClause<OMPC_Reduction>,
    VersionedClause<OMPC_Allocate>,
    VersionedClause<OMPC_NonTemporal, 50>,
  ];
  let allowedOnceClauses = [
    VersionedClause<OMPC_Collapse>,
    VersionedClause<OMPC_SafeLen>,
    VersionedClause<OMPC_SimdLen>,
    VersionedClause<OMPC_If, 50>,
    VersionedClause<OMPC_Order, 50>
  ];
}
def OMP_Tile : Directive<"tile"> {
  let allowedOnceClauses = [
    VersionedClause<OMPC_Sizes, 51>,
  ];
}
def OMP_Unroll : Directive<"unroll"> {
  let allowedOnceClauses = [
    VersionedClause<OMPC_Full, 51>,
    VersionedClause<OMPC_Partial, 51>,
  ];
}
def OMP_For : Directive<"for"> {
  let allowedClauses = [
    VersionedClause<OMPC_Private>,
    VersionedClause<OMPC_LastPrivate>,
    VersionedClause<OMPC_FirstPrivate>,
    VersionedClause<OMPC_Reduction>,
    VersionedClause<OMPC_Collapse>,
    VersionedClause<OMPC_Schedule>,
    VersionedClause<OMPC_Ordered>,
    VersionedClause<OMPC_NoWait>,
    VersionedClause<OMPC_Linear>,
    VersionedClause<OMPC_Allocate>,
    VersionedClause<OMPC_Order, 50>
  ];
}
def OMP_Do : Directive<"do"> {
  let allowedClauses = [
    VersionedClause<OMPC_Private>,
    VersionedClause<OMPC_FirstPrivate>,
    VersionedClause<OMPC_LastPrivate>,
    VersionedClause<OMPC_Linear>,
    VersionedClause<OMPC_Reduction>
  ];
  let allowedOnceClauses = [
    VersionedClause<OMPC_Schedule>,
    VersionedClause<OMPC_Collapse>,
    VersionedClause<OMPC_Ordered>,
    VersionedClause<OMPC_NoWait>,
    VersionedClause<OMPC_Order, 50>
  ];
}
def OMP_Sections : Directive<"sections"> {
  let allowedClauses = [
    VersionedClause<OMPC_Private>,
    VersionedClause<OMPC_LastPrivate>,
    VersionedClause<OMPC_FirstPrivate>,
    VersionedClause<OMPC_Reduction>,
    VersionedClause<OMPC_NoWait>,
    VersionedClause<OMPC_Allocate>
  ];
}
def OMP_Section : Directive<"section"> {}
def OMP_Single : Directive<"single"> {
  let allowedClauses = [
    VersionedClause<OMPC_Private>,
    VersionedClause<OMPC_FirstPrivate>,
    VersionedClause<OMPC_CopyPrivate>,
    VersionedClause<OMPC_NoWait>,
    VersionedClause<OMPC_Allocate>
  ];
}
def OMP_Master : Directive<"master"> {}
def OMP_Critical : Directive<"critical"> {
  let allowedClauses = [
    VersionedClause<OMPC_Hint>
  ];
}
def OMP_TaskYield : Directive<"taskyield"> {}
def OMP_Barrier : Directive<"barrier"> {}
def OMP_Error : Directive<"error"> {
  let allowedClauses = [
    VersionedClause<OMPC_At, 51>,
    VersionedClause<OMPC_Severity, 51>,
    VersionedClause<OMPC_Message, 51>
  ];
}
def OMP_TaskWait : Directive<"taskwait"> {
  let allowedClauses = [
    VersionedClause<OMPC_Depend, 50>,
    VersionedClause<OMPC_NoWait, 51>
  ];
}
def OMP_TaskGroup : Directive<"taskgroup"> {
  let allowedClauses = [
    VersionedClause<OMPC_TaskReduction, 50>,
    VersionedClause<OMPC_Allocate, 50>
  ];
}
def OMP_Flush : Directive<"flush"> {
  let allowedOnceClauses = [
    VersionedClause<OMPC_AcqRel, 50>,
    VersionedClause<OMPC_Acquire, 50>,
    VersionedClause<OMPC_Release, 50>,
    // TODO This should ne `none` instead. Comment carried over from
    // OMPKinds.def.
    VersionedClause<OMPC_Flush>
  ];
}
def OMP_Ordered : Directive<"ordered"> {
  let allowedClauses = [
    VersionedClause<OMPC_Depend>,
    VersionedClause<OMPC_Doacross, 52>
  ];
  let allowedOnceClauses = [
    VersionedClause<OMPC_Threads>,
    VersionedClause<OMPC_Simd>
  ];
}
def OMP_Atomic : Directive<"atomic"> {
  let allowedClauses = [
    VersionedClause<OMPC_Read>,
    VersionedClause<OMPC_Write>,
    VersionedClause<OMPC_Update>,
    VersionedClause<OMPC_Capture>,
    VersionedClause<OMPC_Compare, 51>
  ];
  let allowedOnceClauses = [
    VersionedClause<OMPC_SeqCst>,
    VersionedClause<OMPC_AcqRel, 50>,
    VersionedClause<OMPC_Acquire, 50>,
    VersionedClause<OMPC_Release, 50>,
    VersionedClause<OMPC_Relaxed, 50>,
    VersionedClause<OMPC_Hint, 50>
  ];
}
def OMP_Target : Directive<"target"> {
  let allowedClauses = [
    VersionedClause<OMPC_If>,
    VersionedClause<OMPC_Map>,
    VersionedClause<OMPC_Private>,
    VersionedClause<OMPC_Depend>,
    VersionedClause<OMPC_FirstPrivate>,
    VersionedClause<OMPC_IsDevicePtr>,
    VersionedClause<OMPC_HasDeviceAddr, 51>,
    VersionedClause<OMPC_Reduction>,
    VersionedClause<OMPC_InReduction, 50>,
    VersionedClause<OMPC_Allocate>,
    VersionedClause<OMPC_UsesAllocators, 50>
  ];
  let allowedOnceClauses = [
    VersionedClause<OMPC_Device>,
    VersionedClause<OMPC_ThreadLimit, 51>,
    VersionedClause<OMPC_DefaultMap>,
    VersionedClause<OMPC_NoWait>,
    VersionedClause<OMPC_OMPX_DynCGroupMem>,
  ];
}
def OMP_Teams : Directive<"teams"> {
  let allowedClauses = [
    VersionedClause<OMPC_Private>,
    VersionedClause<OMPC_FirstPrivate>,
    VersionedClause<OMPC_Shared>,
    VersionedClause<OMPC_Reduction>,
    VersionedClause<OMPC_Allocate>
  ];
  let allowedOnceClauses = [
    VersionedClause<OMPC_Default>,
    VersionedClause<OMPC_NumTeams>,
    VersionedClause<OMPC_ThreadLimit>
  ];
}
def OMP_Cancel : Directive<"cancel"> {
  let allowedClauses = [
    VersionedClause<OMPC_If>
  ];
}
def OMP_Requires : Directive<"requires"> {
  let allowedOnceClauses = [
    VersionedClause<OMPC_UnifiedAddress>,
    VersionedClause<OMPC_UnifiedSharedMemory>,
    // OpenMP 5.2 Spec: If an implementation is not supporting a requirement
    // (reverse offload in this case) then it should give compile-time error
    // termination.
    // Seeting supported version for reverse_offload to a distant future version
    // 9.9 so that its partial support can be tested in the meantime.
    //
    // TODO: Correct this supprted version number whenever complete
    // implementation of reverse_offload is available.
    VersionedClause<OMPC_ReverseOffload, 99>,
    VersionedClause<OMPC_DynamicAllocators>,
    VersionedClause<OMPC_AtomicDefaultMemOrder>
  ];
}
def OMP_Nothing : Directive<"nothing"> {}
def OMP_TargetData : Directive<"target data"> {
  let allowedClauses = [
    VersionedClause<OMPC_UseDevicePtr>,
    VersionedClause<OMPC_UseDeviceAddr, 50>
  ];
  let allowedOnceClauses = [
    VersionedClause<OMPC_Device>,
    VersionedClause<OMPC_If>
  ];
  let requiredClauses = [
    VersionedClause<OMPC_Map>
  ];
}
def OMP_TargetEnterData : Directive<"target enter data"> {
  let allowedClauses = [
    VersionedClause<OMPC_Depend>
  ];
  let allowedOnceClauses = [
    VersionedClause<OMPC_If>,
    VersionedClause<OMPC_Device>,
    VersionedClause<OMPC_NoWait>
  ];
  let requiredClauses = [
    VersionedClause<OMPC_Map>
  ];
}
def OMP_TargetExitData : Directive<"target exit data"> {
  let allowedClauses = [
    VersionedClause<OMPC_Depend>
  ];
  let allowedOnceClauses = [
    VersionedClause<OMPC_Device>,
    VersionedClause<OMPC_If>,
    VersionedClause<OMPC_NoWait>
  ];
  let requiredClauses = [
    VersionedClause<OMPC_Map>
  ];
}
def OMP_TargetParallel : Directive<"target parallel"> {
  let allowedClauses = [
    VersionedClause<OMPC_Map>,
    VersionedClause<OMPC_NoWait>,
    VersionedClause<OMPC_Depend>,
    VersionedClause<OMPC_Private>,
    VersionedClause<OMPC_FirstPrivate>,
    VersionedClause<OMPC_Default>,
    VersionedClause<OMPC_Shared>,
    VersionedClause<OMPC_Reduction>,
    VersionedClause<OMPC_IsDevicePtr>,
    VersionedClause<OMPC_HasDeviceAddr, 51>,
    VersionedClause<OMPC_Allocate>,
    VersionedClause<OMPC_UsesAllocators, 50>
  ];
  let allowedOnceClauses = [
    VersionedClause<OMPC_DefaultMap>,
    VersionedClause<OMPC_Device>,
    VersionedClause<OMPC_If>,
    VersionedClause<OMPC_NumThreads>,
    VersionedClause<OMPC_ProcBind>,
    VersionedClause<OMPC_OMPX_DynCGroupMem>,
  ];
}
def OMP_TargetParallelFor : Directive<"target parallel for"> {
  let allowedClauses = [
    VersionedClause<OMPC_If>,
    VersionedClause<OMPC_Device>,
    VersionedClause<OMPC_Map>,
    VersionedClause<OMPC_Private>,
    VersionedClause<OMPC_FirstPrivate>,
    VersionedClause<OMPC_LastPrivate>,
    VersionedClause<OMPC_NoWait>,
    VersionedClause<OMPC_Depend>,
    VersionedClause<OMPC_DefaultMap>,
    VersionedClause<OMPC_NumThreads>,
    VersionedClause<OMPC_Default>,
    VersionedClause<OMPC_ProcBind>,
    VersionedClause<OMPC_Shared>,
    VersionedClause<OMPC_Reduction>,
    VersionedClause<OMPC_Collapse>,
    VersionedClause<OMPC_Schedule>,
    VersionedClause<OMPC_Ordered>,
    VersionedClause<OMPC_Linear>,
    VersionedClause<OMPC_IsDevicePtr>,
    VersionedClause<OMPC_HasDeviceAddr, 51>,
    VersionedClause<OMPC_Allocate>,
    VersionedClause<OMPC_Order, 50>,
    VersionedClause<OMPC_UsesAllocators, 50>
  ];
  let allowedOnceClauses = [
    VersionedClause<OMPC_OMPX_DynCGroupMem>,
  ];
}
def OMP_TargetParallelDo : Directive<"target parallel do"> {
  let allowedClauses = [
    VersionedClause<OMPC_Map>,
    VersionedClause<OMPC_Private>,
    VersionedClause<OMPC_FirstPrivate>,
    VersionedClause<OMPC_LastPrivate>,
    VersionedClause<OMPC_Depend>,
    VersionedClause<OMPC_Shared>,
    VersionedClause<OMPC_Reduction>,
    VersionedClause<OMPC_Linear>,
    VersionedClause<OMPC_IsDevicePtr>,
    VersionedClause<OMPC_HasDeviceAddr, 51>,
    VersionedClause<OMPC_Allocator>,
    VersionedClause<OMPC_UsesAllocators>,
    VersionedClause<OMPC_Default>,
    VersionedClause<OMPC_Copyin>
  ];
  let allowedOnceClauses = [
    VersionedClause<OMPC_If>,
    VersionedClause<OMPC_NumThreads>,
    VersionedClause<OMPC_ProcBind>,
    VersionedClause<OMPC_Device>,
    VersionedClause<OMPC_DefaultMap>,
    VersionedClause<OMPC_Schedule>,
    VersionedClause<OMPC_Collapse>,
    VersionedClause<OMPC_Ordered>,
    VersionedClause<OMPC_NoWait>,
    VersionedClause<OMPC_Order, 50>
  ];
}
def OMP_TargetUpdate : Directive<"target update"> {
  let allowedClauses = [
    VersionedClause<OMPC_To>,
    VersionedClause<OMPC_From>,
    VersionedClause<OMPC_Depend>
  ];
  let allowedOnceClauses = [
    VersionedClause<OMPC_Device>,
    VersionedClause<OMPC_If>,
    VersionedClause<OMPC_NoWait>
  ];
}
def OMP_ParallelFor : Directive<"parallel for"> {
  let allowedClauses = [
    VersionedClause<OMPC_If>,
    VersionedClause<OMPC_NumThreads>,
    VersionedClause<OMPC_Default>,
    VersionedClause<OMPC_ProcBind>,
    VersionedClause<OMPC_Private>,
    VersionedClause<OMPC_FirstPrivate>,
    VersionedClause<OMPC_Shared>,
    VersionedClause<OMPC_Reduction>,
    VersionedClause<OMPC_Copyin>,
    VersionedClause<OMPC_LastPrivate>,
    VersionedClause<OMPC_Collapse>,
    VersionedClause<OMPC_Schedule>,
    VersionedClause<OMPC_Ordered>,
    VersionedClause<OMPC_Linear>,
    VersionedClause<OMPC_Allocate>,
    VersionedClause<OMPC_Order, 50>
  ];
}
def OMP_ParallelDo : Directive<"parallel do"> {
  let allowedClauses = [
    VersionedClause<OMPC_Default>,
    VersionedClause<OMPC_Private>,
    VersionedClause<OMPC_FirstPrivate>,
    VersionedClause<OMPC_Shared>,
    VersionedClause<OMPC_Reduction>,
    VersionedClause<OMPC_Copyin>,
    VersionedClause<OMPC_LastPrivate>,
    VersionedClause<OMPC_Linear>
  ];
  let allowedOnceClauses = [
    VersionedClause<OMPC_If>,
    VersionedClause<OMPC_NumThreads>,
    VersionedClause<OMPC_ProcBind>,
    VersionedClause<OMPC_Schedule>,
    VersionedClause<OMPC_Ordered>,
    VersionedClause<OMPC_Collapse>,
    VersionedClause<OMPC_Order, 50>
  ];
}
def OMP_ParallelForSimd : Directive<"parallel for simd"> {
  let allowedClauses = [
    VersionedClause<OMPC_If>,
    VersionedClause<OMPC_NumThreads>,
    VersionedClause<OMPC_Default>,
    VersionedClause<OMPC_ProcBind>,
    VersionedClause<OMPC_Private>,
    VersionedClause<OMPC_FirstPrivate>,
    VersionedClause<OMPC_Shared>,
    VersionedClause<OMPC_Reduction>,
    VersionedClause<OMPC_Copyin>,
    VersionedClause<OMPC_LastPrivate>,
    VersionedClause<OMPC_Collapse>,
    VersionedClause<OMPC_Schedule>,
    VersionedClause<OMPC_SafeLen>,
    VersionedClause<OMPC_SimdLen>,
    VersionedClause<OMPC_Linear>,
    VersionedClause<OMPC_Aligned>,
    VersionedClause<OMPC_Ordered>,
    VersionedClause<OMPC_Allocate>,
    VersionedClause<OMPC_NonTemporal, 50>,
    VersionedClause<OMPC_Order, 50>
  ];
}
def OMP_ParallelDoSimd : Directive<"parallel do simd"> {
  let allowedClauses = [
    VersionedClause<OMPC_Default>,
    VersionedClause<OMPC_Private>,
    VersionedClause<OMPC_FirstPrivate>,
    VersionedClause<OMPC_Shared>,
    VersionedClause<OMPC_Reduction>,
    VersionedClause<OMPC_Copyin>,
    VersionedClause<OMPC_LastPrivate>,
    VersionedClause<OMPC_Linear>,
    VersionedClause<OMPC_Aligned>,
    VersionedClause<OMPC_Allocate>,
    VersionedClause<OMPC_NonTemporal>,
  ];
  let allowedOnceClauses = [
    VersionedClause<OMPC_If>,
    VersionedClause<OMPC_NumThreads>,
    VersionedClause<OMPC_ProcBind>,
    VersionedClause<OMPC_Schedule>,
    VersionedClause<OMPC_Ordered>,
    VersionedClause<OMPC_Collapse>,
    VersionedClause<OMPC_SafeLen>,
    VersionedClause<OMPC_SimdLen>,
    VersionedClause<OMPC_Order, 50>
  ];
}
def OMP_ParallelMaster : Directive<"parallel master"> {
  let allowedClauses = [
    VersionedClause<OMPC_If>,
    VersionedClause<OMPC_NumThreads>,
    VersionedClause<OMPC_Default>,
    VersionedClause<OMPC_Private>,
    VersionedClause<OMPC_FirstPrivate>,
    VersionedClause<OMPC_Shared>,
    VersionedClause<OMPC_Copyin>,
    VersionedClause<OMPC_Reduction>,
    VersionedClause<OMPC_ProcBind>,
    VersionedClause<OMPC_Allocate>
  ];
}
def OMP_ParallelMasked : Directive<"parallel masked"> {
  let allowedClauses = [
    VersionedClause<OMPC_If>,
    VersionedClause<OMPC_NumThreads>,
    VersionedClause<OMPC_Default>,
    VersionedClause<OMPC_Private>,
    VersionedClause<OMPC_FirstPrivate>,
    VersionedClause<OMPC_Shared>,
    VersionedClause<OMPC_Copyin>,
    VersionedClause<OMPC_Reduction>,
    VersionedClause<OMPC_ProcBind>,
    VersionedClause<OMPC_Allocate>,
    VersionedClause<OMPC_Filter>
  ];
}
def OMP_ParallelSections : Directive<"parallel sections"> {
  let allowedClauses = [
    VersionedClause<OMPC_If>,
    VersionedClause<OMPC_Default>,
    VersionedClause<OMPC_ProcBind>,
    VersionedClause<OMPC_Private>,
    VersionedClause<OMPC_FirstPrivate>,
    VersionedClause<OMPC_Shared>,
    VersionedClause<OMPC_Reduction>,
    VersionedClause<OMPC_Copyin>,
    VersionedClause<OMPC_LastPrivate>,
    VersionedClause<OMPC_Allocate>
  ];
  let allowedOnceClauses = [
    VersionedClause<OMPC_NumThreads>
  ];
}
def OMP_ForSimd : Directive<"for simd"> {
  let allowedClauses = [
    VersionedClause<OMPC_Private>,
    VersionedClause<OMPC_FirstPrivate>,
    VersionedClause<OMPC_LastPrivate>,
    VersionedClause<OMPC_Reduction>,
    VersionedClause<OMPC_Schedule>,
    VersionedClause<OMPC_Collapse>,
    VersionedClause<OMPC_NoWait>,
    VersionedClause<OMPC_SafeLen>,
    VersionedClause<OMPC_SimdLen>,
    VersionedClause<OMPC_Linear>,
    VersionedClause<OMPC_Aligned>,
    VersionedClause<OMPC_Ordered>,
    VersionedClause<OMPC_Allocate>,
    VersionedClause<OMPC_If, 50>,
    VersionedClause<OMPC_NonTemporal, 50>,
    VersionedClause<OMPC_Order, 50>
  ];
}
def OMP_DoSimd : Directive<"do simd"> {
  let allowedClauses = [
    VersionedClause<OMPC_Aligned>,
    VersionedClause<OMPC_Private>,
    VersionedClause<OMPC_FirstPrivate>,
    VersionedClause<OMPC_LastPrivate>,
    VersionedClause<OMPC_Linear>,
    VersionedClause<OMPC_Reduction>
  ];
  let allowedOnceClauses = [
    VersionedClause<OMPC_Schedule>,
    VersionedClause<OMPC_Collapse>,
    VersionedClause<OMPC_Ordered>,
    VersionedClause<OMPC_SafeLen>,
    VersionedClause<OMPC_SimdLen>,
    VersionedClause<OMPC_NoWait>,
    VersionedClause<OMPC_Order, 50>
  ];
}
def OMP_CancellationPoint : Directive<"cancellation point"> {}
def OMP_DeclareReduction : Directive<"declare reduction"> {}
def OMP_DeclareMapper : Directive<"declare mapper"> {
  let allowedClauses = [
    VersionedClause<OMPC_Map>
  ];
}
def OMP_DeclareSimd : Directive<"declare simd"> {
  let allowedClauses = [
    VersionedClause<OMPC_Linear>,
    VersionedClause<OMPC_Aligned>,
    VersionedClause<OMPC_Uniform>
  ];
  let allowedOnceClauses = [
    VersionedClause<OMPC_SimdLen>
  ];
  let allowedExclusiveClauses = [
    VersionedClause<OMPC_Inbranch>,
    VersionedClause<OMPC_Notinbranch>
  ];
}
def OMP_TaskLoop : Directive<"taskloop"> {
  let allowedClauses = [
    VersionedClause<OMPC_Shared>,
    VersionedClause<OMPC_Private>,
    VersionedClause<OMPC_FirstPrivate>,
    VersionedClause<OMPC_LastPrivate>,
    VersionedClause<OMPC_Untied>,
    VersionedClause<OMPC_Mergeable>,
    VersionedClause<OMPC_NoGroup>,
    VersionedClause<OMPC_Reduction>,
    VersionedClause<OMPC_InReduction>,
    VersionedClause<OMPC_Allocate>
  ];
  let allowedOnceClauses = [
    VersionedClause<OMPC_Default>,
    VersionedClause<OMPC_If>,
    VersionedClause<OMPC_Collapse>,
    VersionedClause<OMPC_Final>,
    VersionedClause<OMPC_Priority>,
  ];
  let allowedExclusiveClauses = [
    VersionedClause<OMPC_GrainSize>,
    VersionedClause<OMPC_NumTasks>
  ];
}
def OMP_TaskLoopSimd : Directive<"taskloop simd"> {
  let allowedClauses = [
    VersionedClause<OMPC_Aligned>,
    VersionedClause<OMPC_Allocate>,
    VersionedClause<OMPC_Default>,
    VersionedClause<OMPC_FirstPrivate>,
    VersionedClause<OMPC_InReduction>,
    VersionedClause<OMPC_LastPrivate>,
    VersionedClause<OMPC_Linear>,
    VersionedClause<OMPC_Mergeable>,
    VersionedClause<OMPC_NoGroup>,
    VersionedClause<OMPC_NonTemporal, 50>,
    VersionedClause<OMPC_Private>,
    VersionedClause<OMPC_Reduction>,
    VersionedClause<OMPC_Shared>,
    VersionedClause<OMPC_Untied>
  ];
  let allowedOnceClauses = [
    VersionedClause<OMPC_If>,
    VersionedClause<OMPC_Collapse>,
    VersionedClause<OMPC_SafeLen>,
    VersionedClause<OMPC_SimdLen>,
    VersionedClause<OMPC_Final>,
    VersionedClause<OMPC_Priority>,
    VersionedClause<OMPC_Order, 50>
  ];
  let allowedExclusiveClauses = [
    VersionedClause<OMPC_GrainSize>,
    VersionedClause<OMPC_NumTasks>
  ];
}
def OMP_Distribute : Directive<"distribute"> {
  let allowedClauses = [
    VersionedClause<OMPC_Private>,
    VersionedClause<OMPC_FirstPrivate>,
    VersionedClause<OMPC_LastPrivate>,
    VersionedClause<OMPC_Allocate>
  ];
  let allowedOnceClauses = [
    VersionedClause<OMPC_Collapse>,
    VersionedClause<OMPC_DistSchedule>
  ];
}
def OMP_BeginDeclareTarget : Directive<"begin declare target"> {
  let allowedClauses = [
    VersionedClause<OMPC_To>,
    VersionedClause<OMPC_Link>,
    VersionedClause<OMPC_DeviceType>,
    VersionedClause<OMPC_Indirect>
  ];
}
def OMP_DeclareTarget : Directive<"declare target"> {
  let allowedClauses = [
    VersionedClause<OMPC_To>,
    VersionedClause<OMPC_Link>,
    VersionedClause<OMPC_Indirect>
  ];
  let allowedOnceClauses = [
    VersionedClause<OMPC_DeviceType, 50>
  ];
}
def OMP_EndDeclareTarget : Directive<"end declare target"> {}
def OMP_DistributeParallelFor : Directive<"distribute parallel for"> {
  let allowedClauses = [
    VersionedClause<OMPC_FirstPrivate>,
    VersionedClause<OMPC_LastPrivate>,
    VersionedClause<OMPC_Collapse>,
    VersionedClause<OMPC_DistSchedule>,
    VersionedClause<OMPC_If>,
    VersionedClause<OMPC_NumThreads>,
    VersionedClause<OMPC_Default>,
    VersionedClause<OMPC_ProcBind>,
    VersionedClause<OMPC_Private>,
    VersionedClause<OMPC_Shared>,
    VersionedClause<OMPC_Reduction>,
    VersionedClause<OMPC_Copyin>,
    VersionedClause<OMPC_Schedule>,
    VersionedClause<OMPC_Allocate>,
    VersionedClause<OMPC_Order, 50>
  ];
}
def OMP_DistributeParallelDo : Directive<"distribute parallel do"> {
  let allowedClauses = [
    VersionedClause<OMPC_Private>,
    VersionedClause<OMPC_FirstPrivate>,
    VersionedClause<OMPC_LastPrivate>,
    VersionedClause<OMPC_Allocate>,
    VersionedClause<OMPC_Default>,
    VersionedClause<OMPC_Shared>,
    VersionedClause<OMPC_Reduction>,
    VersionedClause<OMPC_Copyin>,
    VersionedClause<OMPC_Linear>
  ];
  let allowedOnceClauses = [
    VersionedClause<OMPC_Collapse>,
    VersionedClause<OMPC_DistSchedule>,
    VersionedClause<OMPC_If>,
    VersionedClause<OMPC_NumThreads>,
    VersionedClause<OMPC_ProcBind>,
    VersionedClause<OMPC_Schedule>,
    VersionedClause<OMPC_Ordered>,
    VersionedClause<OMPC_Order, 50>
  ];
}
def OMP_DistributeParallelForSimd : Directive<"distribute parallel for simd"> {
  let allowedClauses = [
    VersionedClause<OMPC_FirstPrivate>,
    VersionedClause<OMPC_LastPrivate>,
    VersionedClause<OMPC_Collapse>,
    VersionedClause<OMPC_DistSchedule>,
    VersionedClause<OMPC_If>,
    VersionedClause<OMPC_NumThreads>,
    VersionedClause<OMPC_Default>,
    VersionedClause<OMPC_ProcBind>,
    VersionedClause<OMPC_Private>,
    VersionedClause<OMPC_Shared>,
    VersionedClause<OMPC_Reduction>,
    VersionedClause<OMPC_Copyin>,
    VersionedClause<OMPC_Schedule>,
    VersionedClause<OMPC_Linear>,
    VersionedClause<OMPC_Aligned>,
    VersionedClause<OMPC_SafeLen>,
    VersionedClause<OMPC_SimdLen>,
    VersionedClause<OMPC_Allocate>,
    VersionedClause<OMPC_NonTemporal, 50>,
    VersionedClause<OMPC_Order, 50>
  ];
}
def OMP_DistributeParallelDoSimd : Directive<"distribute parallel do simd"> {
  let allowedClauses = [
    VersionedClause<OMPC_FirstPrivate>,
    VersionedClause<OMPC_LastPrivate>,
    VersionedClause<OMPC_Collapse>,
    VersionedClause<OMPC_DistSchedule>,
    VersionedClause<OMPC_If>,
    VersionedClause<OMPC_NumThreads>,
    VersionedClause<OMPC_Default>,
    VersionedClause<OMPC_ProcBind>,
    VersionedClause<OMPC_Private>,
    VersionedClause<OMPC_Shared>,
    VersionedClause<OMPC_Reduction>,
    VersionedClause<OMPC_Copyin>,
    VersionedClause<OMPC_Schedule>,
    VersionedClause<OMPC_Linear>,
    VersionedClause<OMPC_Aligned>,
    VersionedClause<OMPC_SafeLen>,
    VersionedClause<OMPC_SimdLen>,
    VersionedClause<OMPC_Allocate>,
    VersionedClause<OMPC_NonTemporal>,
    VersionedClause<OMPC_Order, 50>
  ];
}
def OMP_DistributeSimd : Directive<"distribute simd"> {
  let allowedClauses = [
    VersionedClause<OMPC_Aligned>,
    VersionedClause<OMPC_Allocate>,
    VersionedClause<OMPC_Copyin>,
    VersionedClause<OMPC_Default>,
    VersionedClause<OMPC_Linear>,
    VersionedClause<OMPC_FirstPrivate>,
    VersionedClause<OMPC_LastPrivate>,
    VersionedClause<OMPC_NonTemporal, 50>,
    VersionedClause<OMPC_Private>,
    VersionedClause<OMPC_Reduction>
  ];
  let allowedOnceClauses = [
    VersionedClause<OMPC_Collapse>,
    VersionedClause<OMPC_DistSchedule>,
    VersionedClause<OMPC_If, 50>,
    VersionedClause<OMPC_NumThreads>,
    VersionedClause<OMPC_Ordered>,
    VersionedClause<OMPC_ProcBind>,
    VersionedClause<OMPC_Schedule>,
    VersionedClause<OMPC_SafeLen>,
    VersionedClause<OMPC_SimdLen>,
    VersionedClause<OMPC_Order, 50>
  ];
}

def OMP_TargetParallelForSimd : Directive<"target parallel for simd"> {
  let allowedClauses = [
    VersionedClause<OMPC_If>,
    VersionedClause<OMPC_Device>,
    VersionedClause<OMPC_Map>,
    VersionedClause<OMPC_Private>,
    VersionedClause<OMPC_FirstPrivate>,
    VersionedClause<OMPC_LastPrivate>,
    VersionedClause<OMPC_NoWait>,
    VersionedClause<OMPC_Depend>,
    VersionedClause<OMPC_DefaultMap>,
    VersionedClause<OMPC_NumThreads>,
    VersionedClause<OMPC_Default>,
    VersionedClause<OMPC_ProcBind>,
    VersionedClause<OMPC_Shared>,
    VersionedClause<OMPC_Reduction>,
    VersionedClause<OMPC_Collapse>,
    VersionedClause<OMPC_Schedule>,
    VersionedClause<OMPC_Ordered>,
    VersionedClause<OMPC_Linear>,
    VersionedClause<OMPC_SafeLen>,
    VersionedClause<OMPC_SimdLen>,
    VersionedClause<OMPC_Aligned>,
    VersionedClause<OMPC_IsDevicePtr>,
    VersionedClause<OMPC_HasDeviceAddr, 51>,
    VersionedClause<OMPC_Allocate>,
    VersionedClause<OMPC_NonTemporal, 50>,
    VersionedClause<OMPC_Order, 50>,
    VersionedClause<OMPC_UsesAllocators, 50>
  ];
  let allowedOnceClauses = [
    VersionedClause<OMPC_OMPX_DynCGroupMem>,
  ];
}
def OMP_TargetParallelDoSimd : Directive<"target parallel do simd"> {
  let allowedClauses = [
    VersionedClause<OMPC_If>,
    VersionedClause<OMPC_Device>,
    VersionedClause<OMPC_Map>,
    VersionedClause<OMPC_Private>,
    VersionedClause<OMPC_FirstPrivate>,
    VersionedClause<OMPC_LastPrivate>,
    VersionedClause<OMPC_NoWait>,
    VersionedClause<OMPC_Depend>,
    VersionedClause<OMPC_DefaultMap>,
    VersionedClause<OMPC_NumThreads>,
    VersionedClause<OMPC_Default>,
    VersionedClause<OMPC_ProcBind>,
    VersionedClause<OMPC_Shared>,
    VersionedClause<OMPC_Reduction>,
    VersionedClause<OMPC_Collapse>,
    VersionedClause<OMPC_Schedule>,
    VersionedClause<OMPC_Ordered>,
    VersionedClause<OMPC_Linear>,
    VersionedClause<OMPC_SafeLen>,
    VersionedClause<OMPC_SimdLen>,
    VersionedClause<OMPC_Aligned>,
    VersionedClause<OMPC_IsDevicePtr>,
    VersionedClause<OMPC_HasDeviceAddr, 51>,
    VersionedClause<OMPC_Allocate>,
    VersionedClause<OMPC_NonTemporal>,
    VersionedClause<OMPC_Order, 50>,
    VersionedClause<OMPC_UsesAllocators>
  ];
}
def OMP_TargetSimd : Directive<"target simd"> {
  let allowedClauses = [
    VersionedClause<OMPC_Aligned>,
    VersionedClause<OMPC_Allocate>,
    VersionedClause<OMPC_Depend>,
    VersionedClause<OMPC_FirstPrivate>,
    VersionedClause<OMPC_IsDevicePtr>,
    VersionedClause<OMPC_HasDeviceAddr, 51>,
    VersionedClause<OMPC_LastPrivate>,
    VersionedClause<OMPC_Linear>,
    VersionedClause<OMPC_Map>,
    VersionedClause<OMPC_NonTemporal, 50>,
    VersionedClause<OMPC_NoWait>,
    VersionedClause<OMPC_Private>,
    VersionedClause<OMPC_Reduction>,
    VersionedClause<OMPC_Shared>,
    VersionedClause<OMPC_UsesAllocators, 50>
  ];
  let allowedOnceClauses = [
    VersionedClause<OMPC_Collapse>,
    VersionedClause<OMPC_SafeLen>,
    VersionedClause<OMPC_SimdLen>,
    VersionedClause<OMPC_If>,
    VersionedClause<OMPC_NumThreads>,
    VersionedClause<OMPC_ProcBind>,
    VersionedClause<OMPC_Device>,
    VersionedClause<OMPC_DefaultMap>,
    VersionedClause<OMPC_Schedule>,
    VersionedClause<OMPC_OMPX_DynCGroupMem>,
    VersionedClause<OMPC_Order, 50>
  ];
}
def OMP_TeamsDistribute : Directive<"teams distribute"> {
  let allowedClauses = [
    VersionedClause<OMPC_Default>,
    VersionedClause<OMPC_Private>,
    VersionedClause<OMPC_FirstPrivate>,
    VersionedClause<OMPC_Shared>,
    VersionedClause<OMPC_Reduction>,
    VersionedClause<OMPC_NumTeams>,
    VersionedClause<OMPC_ThreadLimit>,
    VersionedClause<OMPC_LastPrivate>,
    VersionedClause<OMPC_Collapse>,
    VersionedClause<OMPC_DistSchedule>,
    VersionedClause<OMPC_Allocate>
  ];
}
def OMP_TeamsDistributeSimd : Directive<"teams distribute simd"> {
  let allowedClauses = [
    VersionedClause<OMPC_Aligned>,
    VersionedClause<OMPC_Allocate>,
    VersionedClause<OMPC_FirstPrivate>,
    VersionedClause<OMPC_LastPrivate>,
    VersionedClause<OMPC_Linear>,
    VersionedClause<OMPC_NonTemporal, 50>,
    VersionedClause<OMPC_Private>,
    VersionedClause<OMPC_Reduction>,
    VersionedClause<OMPC_Shared>
  ];
  let allowedOnceClauses = [
    VersionedClause<OMPC_Collapse>,
    VersionedClause<OMPC_Default>,
    VersionedClause<OMPC_DistSchedule>,
    VersionedClause<OMPC_If, 50>,
    VersionedClause<OMPC_NumTeams>,
    VersionedClause<OMPC_SafeLen>,
    VersionedClause<OMPC_SimdLen>,
    VersionedClause<OMPC_ThreadLimit>,
    VersionedClause<OMPC_Order, 50>
  ];
}

def OMP_TeamsDistributeParallelForSimd :
    Directive<"teams distribute parallel for simd"> {
  let allowedClauses = [
    VersionedClause<OMPC_FirstPrivate>,
    VersionedClause<OMPC_LastPrivate>,
    VersionedClause<OMPC_Collapse>,
    VersionedClause<OMPC_DistSchedule>,
    VersionedClause<OMPC_If>,
    VersionedClause<OMPC_NumThreads>,
    VersionedClause<OMPC_Default>,
    VersionedClause<OMPC_ProcBind>,
    VersionedClause<OMPC_Private>,
    VersionedClause<OMPC_Shared>,
    VersionedClause<OMPC_Reduction>,
    VersionedClause<OMPC_Schedule>,
    VersionedClause<OMPC_Linear>,
    VersionedClause<OMPC_Aligned>,
    VersionedClause<OMPC_SafeLen>,
    VersionedClause<OMPC_SimdLen>,
    VersionedClause<OMPC_NumTeams>,
    VersionedClause<OMPC_ThreadLimit>,
    VersionedClause<OMPC_Allocate>,
    VersionedClause<OMPC_NonTemporal, 50>,
    VersionedClause<OMPC_Order, 50>
  ];
}
def OMP_TeamsDistributeParallelDoSimd :
    Directive<"teams distribute parallel do simd"> {
  let allowedClauses = [
    VersionedClause<OMPC_Private>,
    VersionedClause<OMPC_FirstPrivate>,
    VersionedClause<OMPC_LastPrivate>,
    VersionedClause<OMPC_Allocate>,
    VersionedClause<OMPC_Shared>,
    VersionedClause<OMPC_Reduction>,
    VersionedClause<OMPC_Linear>,
    VersionedClause<OMPC_Aligned>,
    VersionedClause<OMPC_NonTemporal>
  ];
  let allowedOnceClauses = [
    VersionedClause<OMPC_Default>,
    VersionedClause<OMPC_NumTeams>,
    VersionedClause<OMPC_ThreadLimit>,
    VersionedClause<OMPC_Collapse>,
    VersionedClause<OMPC_DistSchedule>,
    VersionedClause<OMPC_NumThreads>,
    VersionedClause<OMPC_ProcBind>,
    VersionedClause<OMPC_Schedule>,
    VersionedClause<OMPC_SafeLen>,
    VersionedClause<OMPC_SimdLen>,
    VersionedClause<OMPC_If>,
    VersionedClause<OMPC_Order, 50>
  ];
}
def OMP_TeamsDistributeParallelFor :
    Directive<"teams distribute parallel for"> {
  let allowedClauses = [
    VersionedClause<OMPC_FirstPrivate>,
    VersionedClause<OMPC_LastPrivate>,
    VersionedClause<OMPC_Collapse>,
    VersionedClause<OMPC_DistSchedule>,
    VersionedClause<OMPC_If>,
    VersionedClause<OMPC_NumThreads>,
    VersionedClause<OMPC_Default>,
    VersionedClause<OMPC_ProcBind>,
    VersionedClause<OMPC_Private>,
    VersionedClause<OMPC_Shared>,
    VersionedClause<OMPC_Reduction>,
    VersionedClause<OMPC_Schedule>,
    VersionedClause<OMPC_NumTeams>,
    VersionedClause<OMPC_ThreadLimit>,
    VersionedClause<OMPC_Copyin>,
    VersionedClause<OMPC_Allocate>,
    VersionedClause<OMPC_Order, 50>
  ];
}
def OMP_TeamsDistributeParallelDo :
    Directive<"teams distribute parallel do"> {
  let allowedClauses = [
    VersionedClause<OMPC_Private>,
    VersionedClause<OMPC_FirstPrivate>,
    VersionedClause<OMPC_LastPrivate>,
    VersionedClause<OMPC_Shared>,
    VersionedClause<OMPC_Reduction>,
    VersionedClause<OMPC_Allocate>,
    VersionedClause<OMPC_Copyin>,
    VersionedClause<OMPC_Linear>
  ];
let allowedOnceClauses = [
    VersionedClause<OMPC_NumTeams>,
    VersionedClause<OMPC_ThreadLimit>,
    VersionedClause<OMPC_Default>,
    VersionedClause<OMPC_Collapse>,
    VersionedClause<OMPC_DistSchedule>,
    VersionedClause<OMPC_Ordered>,
    VersionedClause<OMPC_If>,
    VersionedClause<OMPC_NumThreads>,
    VersionedClause<OMPC_ProcBind>,
    VersionedClause<OMPC_Schedule>,
    VersionedClause<OMPC_Order, 50>
  ];
}
def OMP_TargetTeams : Directive<"target teams"> {
  let allowedClauses = [
    VersionedClause<OMPC_If>,
    VersionedClause<OMPC_Map>,
    VersionedClause<OMPC_Private>,
    VersionedClause<OMPC_Depend>,
    VersionedClause<OMPC_FirstPrivate>,
    VersionedClause<OMPC_IsDevicePtr>,
    VersionedClause<OMPC_HasDeviceAddr, 51>,
    VersionedClause<OMPC_Reduction>,
    VersionedClause<OMPC_Allocate>,
    VersionedClause<OMPC_UsesAllocators, 50>,
    VersionedClause<OMPC_Shared>
  ];

  let allowedOnceClauses = [
    VersionedClause<OMPC_Device>,
    VersionedClause<OMPC_NoWait>,
    VersionedClause<OMPC_DefaultMap>,
    VersionedClause<OMPC_Default>,
    VersionedClause<OMPC_NumTeams>,
    VersionedClause<OMPC_ThreadLimit>,
    VersionedClause<OMPC_OMPX_DynCGroupMem>,
  ];
}
def OMP_TargetTeamsDistribute : Directive<"target teams distribute"> {
  let allowedClauses = [
    VersionedClause<OMPC_If>,
    VersionedClause<OMPC_Map>,
    VersionedClause<OMPC_Private>,
    VersionedClause<OMPC_Depend>,
    VersionedClause<OMPC_FirstPrivate>,
    VersionedClause<OMPC_IsDevicePtr>,
    VersionedClause<OMPC_HasDeviceAddr, 51>,
    VersionedClause<OMPC_Reduction>,
    VersionedClause<OMPC_Allocate>,
    VersionedClause<OMPC_UsesAllocators, 50>,
    VersionedClause<OMPC_Shared>,
    VersionedClause<OMPC_LastPrivate>
  ];
  let allowedOnceClauses = [
    VersionedClause<OMPC_Device>,
    VersionedClause<OMPC_NoWait>,
    VersionedClause<OMPC_DefaultMap>,
    VersionedClause<OMPC_Default>,
    VersionedClause<OMPC_NumTeams>,
    VersionedClause<OMPC_ThreadLimit>,
    VersionedClause<OMPC_Collapse>,
    VersionedClause<OMPC_DistSchedule>,
    VersionedClause<OMPC_OMPX_DynCGroupMem>,
  ];
}

def OMP_TargetTeamsDistributeParallelFor :
    Directive<"target teams distribute parallel for"> {
  let allowedClauses = [
    VersionedClause<OMPC_If>,
    VersionedClause<OMPC_Device>,
    VersionedClause<OMPC_Map>,
    VersionedClause<OMPC_Private>,
    VersionedClause<OMPC_NoWait>,
    VersionedClause<OMPC_Depend>,
    VersionedClause<OMPC_DefaultMap>,
    VersionedClause<OMPC_FirstPrivate>,
    VersionedClause<OMPC_IsDevicePtr>,
    VersionedClause<OMPC_HasDeviceAddr, 51>,
    VersionedClause<OMPC_Default>,
    VersionedClause<OMPC_Shared>,
    VersionedClause<OMPC_Reduction>,
    VersionedClause<OMPC_NumTeams>,
    VersionedClause<OMPC_ThreadLimit>,
    VersionedClause<OMPC_LastPrivate>,
    VersionedClause<OMPC_Collapse>,
    VersionedClause<OMPC_DistSchedule>,
    VersionedClause<OMPC_NumThreads>,
    VersionedClause<OMPC_ProcBind>,
    VersionedClause<OMPC_Schedule>,
    VersionedClause<OMPC_Allocate>,
    VersionedClause<OMPC_Order, 50>,
    VersionedClause<OMPC_UsesAllocators, 50>
  ];
  let allowedOnceClauses = [
    VersionedClause<OMPC_OMPX_DynCGroupMem>,
  ];
}
def OMP_TargetTeamsDistributeParallelDo :
    Directive<"target teams distribute parallel do"> {
  let allowedClauses = [
    VersionedClause<OMPC_If>,
    VersionedClause<OMPC_Map>,
    VersionedClause<OMPC_Private>,
    VersionedClause<OMPC_Depend>,
    VersionedClause<OMPC_FirstPrivate>,
    VersionedClause<OMPC_IsDevicePtr>,
    VersionedClause<OMPC_HasDeviceAddr, 51>,
    VersionedClause<OMPC_Reduction>,
    VersionedClause<OMPC_Allocate>,
    VersionedClause<OMPC_UsesAllocators>,
    VersionedClause<OMPC_Shared>,
    VersionedClause<OMPC_LastPrivate>,
    VersionedClause<OMPC_Copyin>,
    VersionedClause<OMPC_Linear>,
    VersionedClause<OMPC_Ordered>,
  ];
  let allowedOnceClauses = [
    VersionedClause<OMPC_Device>,
    VersionedClause<OMPC_DefaultMap>,
    VersionedClause<OMPC_NoWait>,
    VersionedClause<OMPC_Default>,
    VersionedClause<OMPC_NumTeams>,
    VersionedClause<OMPC_ThreadLimit>,
    VersionedClause<OMPC_Collapse>,
    VersionedClause<OMPC_DistSchedule>,
    VersionedClause<OMPC_NumThreads>,
    VersionedClause<OMPC_ProcBind>,
    VersionedClause<OMPC_Schedule>,
    VersionedClause<OMPC_Order, 50>
  ];
}
def OMP_TargetTeamsDistributeParallelForSimd :
    Directive<"target teams distribute parallel for simd"> {
  let allowedClauses = [
    VersionedClause<OMPC_If>,
    VersionedClause<OMPC_Device>,
    VersionedClause<OMPC_Map>,
    VersionedClause<OMPC_Private>,
    VersionedClause<OMPC_NoWait>,
    VersionedClause<OMPC_Depend>,
    VersionedClause<OMPC_DefaultMap>,
    VersionedClause<OMPC_FirstPrivate>,
    VersionedClause<OMPC_IsDevicePtr>,
    VersionedClause<OMPC_HasDeviceAddr, 51>,
    VersionedClause<OMPC_Default>,
    VersionedClause<OMPC_Shared>,
    VersionedClause<OMPC_Reduction>,
    VersionedClause<OMPC_NumTeams>,
    VersionedClause<OMPC_ThreadLimit>,
    VersionedClause<OMPC_LastPrivate>,
    VersionedClause<OMPC_Collapse>,
    VersionedClause<OMPC_DistSchedule>,
    VersionedClause<OMPC_NumThreads>,
    VersionedClause<OMPC_ProcBind>,
    VersionedClause<OMPC_Schedule>,
    VersionedClause<OMPC_Linear>,
    VersionedClause<OMPC_Aligned>,
    VersionedClause<OMPC_SafeLen>,
    VersionedClause<OMPC_SimdLen>,
    VersionedClause<OMPC_Allocate>,
    VersionedClause<OMPC_NonTemporal, 50>,
    VersionedClause<OMPC_Order, 50>,
    VersionedClause<OMPC_UsesAllocators, 50>
  ];
  let allowedOnceClauses = [
    VersionedClause<OMPC_OMPX_DynCGroupMem>,
  ];
}
def OMP_TargetTeamsDistributeParallelDoSimd :
    Directive<"target teams distribute parallel do simd"> {
  let allowedClauses = [
    VersionedClause<OMPC_Map>,
    VersionedClause<OMPC_Private>,
    VersionedClause<OMPC_Depend>,
    VersionedClause<OMPC_FirstPrivate>,
    VersionedClause<OMPC_IsDevicePtr>,
    VersionedClause<OMPC_HasDeviceAddr, 51>,
    VersionedClause<OMPC_Reduction>,
    VersionedClause<OMPC_Allocate>,
    VersionedClause<OMPC_UsesAllocators>,
    VersionedClause<OMPC_Shared>,
    VersionedClause<OMPC_LastPrivate>,
    VersionedClause<OMPC_Copyin>,
    VersionedClause<OMPC_Linear>,
    VersionedClause<OMPC_Ordered>,
    VersionedClause<OMPC_Aligned>,
    VersionedClause<OMPC_NonTemporal>
  ];
  let allowedOnceClauses = [
    VersionedClause<OMPC_If>,
    VersionedClause<OMPC_Device>,
    VersionedClause<OMPC_NoWait>,
    VersionedClause<OMPC_DefaultMap>,
    VersionedClause<OMPC_Default>,
    VersionedClause<OMPC_NumTeams>,
    VersionedClause<OMPC_ThreadLimit>,
    VersionedClause<OMPC_Collapse>,
    VersionedClause<OMPC_DistSchedule>,
    VersionedClause<OMPC_NumThreads>,
    VersionedClause<OMPC_ProcBind>,
    VersionedClause<OMPC_Schedule>,
    VersionedClause<OMPC_SafeLen>,
    VersionedClause<OMPC_SimdLen>,
    VersionedClause<OMPC_Order, 50>
  ];
}
def OMP_TargetTeamsDistributeSimd :
    Directive<"target teams distribute simd"> {
  let allowedClauses = [
    VersionedClause<OMPC_Aligned>,
    VersionedClause<OMPC_Allocate>,
    VersionedClause<OMPC_Depend>,
    VersionedClause<OMPC_FirstPrivate>,
    VersionedClause<OMPC_If>,
    VersionedClause<OMPC_IsDevicePtr>,
    VersionedClause<OMPC_HasDeviceAddr, 51>,
    VersionedClause<OMPC_LastPrivate>,
    VersionedClause<OMPC_Linear>,
    VersionedClause<OMPC_Map>,
    VersionedClause<OMPC_NonTemporal, 50>,
    VersionedClause<OMPC_Private>,
    VersionedClause<OMPC_Reduction>,
    VersionedClause<OMPC_Shared>,
    VersionedClause<OMPC_UsesAllocators, 50>
  ];
  let allowedOnceClauses = [
    VersionedClause<OMPC_Device>,
    VersionedClause<OMPC_DefaultMap>,
    VersionedClause<OMPC_NoWait>,
    VersionedClause<OMPC_NumTeams>,
    VersionedClause<OMPC_ThreadLimit>,
    VersionedClause<OMPC_Collapse>,
    VersionedClause<OMPC_DistSchedule>,
    VersionedClause<OMPC_SafeLen>,
    VersionedClause<OMPC_SimdLen>,
    VersionedClause<OMPC_OMPX_DynCGroupMem>,
    VersionedClause<OMPC_Order, 50>
  ];
}
def OMP_Allocate : Directive<"allocate"> {
  let allowedOnceClauses = [
    VersionedClause<OMPC_Allocator>,
    VersionedClause<OMPC_Align, 51>
  ];
}
def OMP_DeclareVariant : Directive<"declare variant"> {
  let allowedClauses = [
    VersionedClause<OMPC_Match>
  ];
  let allowedExclusiveClauses = [
    VersionedClause<OMPC_AdjustArgs, 51>,
    VersionedClause<OMPC_AppendArgs, 51>
  ];
}
def OMP_MasterTaskloop : Directive<"master taskloop"> {
  let allowedClauses = [
    VersionedClause<OMPC_If>,
    VersionedClause<OMPC_Shared>,
    VersionedClause<OMPC_Private>,
    VersionedClause<OMPC_FirstPrivate>,
    VersionedClause<OMPC_LastPrivate>,
    VersionedClause<OMPC_Default>,
    VersionedClause<OMPC_Collapse>,
    VersionedClause<OMPC_Final>,
    VersionedClause<OMPC_Untied>,
    VersionedClause<OMPC_Mergeable>,
    VersionedClause<OMPC_Priority>,
    VersionedClause<OMPC_GrainSize>,
    VersionedClause<OMPC_NoGroup>,
    VersionedClause<OMPC_NumTasks>,
    VersionedClause<OMPC_Reduction>,
    VersionedClause<OMPC_InReduction>,
    VersionedClause<OMPC_Allocate>
  ];
}
def OMP_MaskedTaskloop : Directive<"masked taskloop"> {
  let allowedClauses = [
    VersionedClause<OMPC_If>,
    VersionedClause<OMPC_Shared>,
    VersionedClause<OMPC_Private>,
    VersionedClause<OMPC_FirstPrivate>,
    VersionedClause<OMPC_LastPrivate>,
    VersionedClause<OMPC_Default>,
    VersionedClause<OMPC_Collapse>,
    VersionedClause<OMPC_Final>,
    VersionedClause<OMPC_Untied>,
    VersionedClause<OMPC_Mergeable>,
    VersionedClause<OMPC_Priority>,
    VersionedClause<OMPC_GrainSize>,
    VersionedClause<OMPC_NoGroup>,
    VersionedClause<OMPC_NumTasks>,
    VersionedClause<OMPC_Reduction>,
    VersionedClause<OMPC_InReduction>,
    VersionedClause<OMPC_Allocate>,
    VersionedClause<OMPC_Filter>
  ];
}
def OMP_ParallelMasterTaskloop :
    Directive<"parallel master taskloop"> {
  let allowedClauses = [
    VersionedClause<OMPC_If>,
    VersionedClause<OMPC_Shared>,
    VersionedClause<OMPC_Private>,
    VersionedClause<OMPC_FirstPrivate>,
    VersionedClause<OMPC_LastPrivate>,
    VersionedClause<OMPC_Default>,
    VersionedClause<OMPC_Collapse>,
    VersionedClause<OMPC_Final>,
    VersionedClause<OMPC_Untied>,
    VersionedClause<OMPC_Mergeable>,
    VersionedClause<OMPC_Priority>,
    VersionedClause<OMPC_GrainSize>,
    VersionedClause<OMPC_NoGroup>,
    VersionedClause<OMPC_NumTasks>,
    VersionedClause<OMPC_Reduction>,
    VersionedClause<OMPC_Allocate>,
    VersionedClause<OMPC_NumThreads>,
    VersionedClause<OMPC_ProcBind>,
    VersionedClause<OMPC_Copyin>
  ];
}
def OMP_ParallelMaskedTaskloop :
    Directive<"parallel masked taskloop"> {
  let allowedClauses = [
    VersionedClause<OMPC_If>,
    VersionedClause<OMPC_Shared>,
    VersionedClause<OMPC_Private>,
    VersionedClause<OMPC_FirstPrivate>,
    VersionedClause<OMPC_LastPrivate>,
    VersionedClause<OMPC_Default>,
    VersionedClause<OMPC_Collapse>,
    VersionedClause<OMPC_Final>,
    VersionedClause<OMPC_Untied>,
    VersionedClause<OMPC_Mergeable>,
    VersionedClause<OMPC_Priority>,
    VersionedClause<OMPC_GrainSize>,
    VersionedClause<OMPC_NoGroup>,
    VersionedClause<OMPC_NumTasks>,
    VersionedClause<OMPC_Reduction>,
    VersionedClause<OMPC_Allocate>,
    VersionedClause<OMPC_NumThreads>,
    VersionedClause<OMPC_ProcBind>,
    VersionedClause<OMPC_Copyin>,
    VersionedClause<OMPC_Filter>
  ];
}
def OMP_MasterTaskloopSimd : Directive<"master taskloop simd"> {
  let allowedClauses = [
    VersionedClause<OMPC_If>,
    VersionedClause<OMPC_Shared>,
    VersionedClause<OMPC_Private>,
    VersionedClause<OMPC_FirstPrivate>,
    VersionedClause<OMPC_LastPrivate>,
    VersionedClause<OMPC_Default>,
    VersionedClause<OMPC_Collapse>,
    VersionedClause<OMPC_Final>,
    VersionedClause<OMPC_Untied>,
    VersionedClause<OMPC_Mergeable>,
    VersionedClause<OMPC_Priority>,
    VersionedClause<OMPC_Linear>,
    VersionedClause<OMPC_Aligned>,
    VersionedClause<OMPC_SafeLen>,
    VersionedClause<OMPC_SimdLen>,
    VersionedClause<OMPC_GrainSize>,
    VersionedClause<OMPC_NoGroup>,
    VersionedClause<OMPC_NumTasks>,
    VersionedClause<OMPC_Reduction>,
    VersionedClause<OMPC_InReduction>,
    VersionedClause<OMPC_Allocate>,
    VersionedClause<OMPC_NonTemporal, 50>,
    VersionedClause<OMPC_Order, 50>
  ];
}
def OMP_MaskedTaskloopSimd : Directive<"masked taskloop simd"> {
  let allowedClauses = [
    VersionedClause<OMPC_If>,
    VersionedClause<OMPC_Shared>,
    VersionedClause<OMPC_Private>,
    VersionedClause<OMPC_FirstPrivate>,
    VersionedClause<OMPC_LastPrivate>,
    VersionedClause<OMPC_Default>,
    VersionedClause<OMPC_Collapse>,
    VersionedClause<OMPC_Final>,
    VersionedClause<OMPC_Untied>,
    VersionedClause<OMPC_Mergeable>,
    VersionedClause<OMPC_Priority>,
    VersionedClause<OMPC_Linear>,
    VersionedClause<OMPC_Aligned>,
    VersionedClause<OMPC_SafeLen>,
    VersionedClause<OMPC_SimdLen>,
    VersionedClause<OMPC_GrainSize>,
    VersionedClause<OMPC_NoGroup>,
    VersionedClause<OMPC_NumTasks>,
    VersionedClause<OMPC_Reduction>,
    VersionedClause<OMPC_InReduction>,
    VersionedClause<OMPC_Allocate>,
    VersionedClause<OMPC_NonTemporal, 50>,
    VersionedClause<OMPC_Order, 50>,
    VersionedClause<OMPC_Filter>
  ];
}
def OMP_ParallelMasterTaskloopSimd :
    Directive<"parallel master taskloop simd"> {
  let allowedClauses = [
    VersionedClause<OMPC_If>,
    VersionedClause<OMPC_Shared>,
    VersionedClause<OMPC_Private>,
    VersionedClause<OMPC_FirstPrivate>,
    VersionedClause<OMPC_LastPrivate>,
    VersionedClause<OMPC_Default>,
    VersionedClause<OMPC_Collapse>,
    VersionedClause<OMPC_Final>,
    VersionedClause<OMPC_Untied>,
    VersionedClause<OMPC_Mergeable>,
    VersionedClause<OMPC_Priority>,
    VersionedClause<OMPC_GrainSize>,
    VersionedClause<OMPC_NoGroup>,
    VersionedClause<OMPC_NumTasks>,
    VersionedClause<OMPC_Reduction>,
    VersionedClause<OMPC_Allocate>,
    VersionedClause<OMPC_NumThreads>,
    VersionedClause<OMPC_ProcBind>,
    VersionedClause<OMPC_Copyin>,
    VersionedClause<OMPC_Linear>,
    VersionedClause<OMPC_Aligned>,
    VersionedClause<OMPC_SafeLen>,
    VersionedClause<OMPC_SimdLen>,
    VersionedClause<OMPC_NonTemporal, 50>,
    VersionedClause<OMPC_Order, 50>
  ];
}
def OMP_ParallelMaskedTaskloopSimd :
    Directive<"parallel masked taskloop simd"> {
  let allowedClauses = [
    VersionedClause<OMPC_If>,
    VersionedClause<OMPC_Shared>,
    VersionedClause<OMPC_Private>,
    VersionedClause<OMPC_FirstPrivate>,
    VersionedClause<OMPC_LastPrivate>,
    VersionedClause<OMPC_Default>,
    VersionedClause<OMPC_Collapse>,
    VersionedClause<OMPC_Final>,
    VersionedClause<OMPC_Untied>,
    VersionedClause<OMPC_Mergeable>,
    VersionedClause<OMPC_Priority>,
    VersionedClause<OMPC_GrainSize>,
    VersionedClause<OMPC_NoGroup>,
    VersionedClause<OMPC_NumTasks>,
    VersionedClause<OMPC_Reduction>,
    VersionedClause<OMPC_Allocate>,
    VersionedClause<OMPC_NumThreads>,
    VersionedClause<OMPC_ProcBind>,
    VersionedClause<OMPC_Copyin>,
    VersionedClause<OMPC_Linear>,
    VersionedClause<OMPC_Aligned>,
    VersionedClause<OMPC_SafeLen>,
    VersionedClause<OMPC_SimdLen>,
    VersionedClause<OMPC_NonTemporal, 50>,
    VersionedClause<OMPC_Order, 50>,
    VersionedClause<OMPC_Filter>
  ];
}
def OMP_Depobj : Directive<"depobj"> {
  let allowedClauses = [
    VersionedClause<OMPC_Depend, 50>,
    VersionedClause<OMPC_Destroy, 50>,
    VersionedClause<OMPC_Update, 50>,
    // TODO This should ne `none` instead. Comment carried over from
    // OMPKinds.def.
    VersionedClause<OMPC_Depobj, 50>
  ];
}
def OMP_Scan : Directive<"scan"> {
  let allowedClauses = [
    VersionedClause<OMPC_Inclusive, 50>,
    VersionedClause<OMPC_Exclusive, 50>
  ];
}
def OMP_Assumes : Directive<"assumes"> {}
def OMP_BeginAssumes : Directive<"begin assumes"> {}
def OMP_EndAssumes : Directive<"end assumes"> {}
def OMP_BeginDeclareVariant : Directive<"begin declare variant"> {}
def OMP_EndDeclareVariant : Directive<"end declare variant"> {}
def OMP_ParallelWorkshare : Directive<"parallel workshare"> {
  let allowedClauses = [
    VersionedClause<OMPC_Allocate>,
    VersionedClause<OMPC_Copyin>,
    VersionedClause<OMPC_Default>,
    VersionedClause<OMPC_FirstPrivate>,
    VersionedClause<OMPC_Private>,
    VersionedClause<OMPC_Reduction>,
    VersionedClause<OMPC_Shared>
  ];
  let allowedOnceClauses = [
    VersionedClause<OMPC_If>,
    VersionedClause<OMPC_NumThreads>,
    VersionedClause<OMPC_ProcBind>
  ];
}
def OMP_Workshare : Directive<"workshare"> {}
def OMP_EndDo : Directive<"end do"> {}
def OMP_EndDoSimd : Directive<"end do simd"> {}
def OMP_EndSections : Directive<"end sections"> {
  let allowedOnceClauses = [
    VersionedClause<OMPC_NoWait>
  ];
}
def OMP_EndSingle : Directive<"end single"> {
  let allowedClauses = [
    VersionedClause<OMPC_CopyPrivate>
  ];
  let allowedOnceClauses = [
    VersionedClause<OMPC_NoWait>
  ];
}
def OMP_EndWorkshare : Directive<"end workshare"> {
  let allowedClauses = [
    VersionedClause<OMPC_NoWait>
  ];
}
def OMP_interop : Directive<"interop"> {
  let allowedClauses = [
    VersionedClause<OMPC_Device>,
    VersionedClause<OMPC_Depend>,
    VersionedClause<OMPC_Destroy>,
    VersionedClause<OMPC_Init>,
    VersionedClause<OMPC_NoWait>,
    VersionedClause<OMPC_Use>,
  ];
}
def OMP_dispatch : Directive<"dispatch"> {
  let allowedClauses = [
    VersionedClause<OMPC_Device>,
    VersionedClause<OMPC_IsDevicePtr>,
    VersionedClause<OMPC_HasDeviceAddr, 51>,
    VersionedClause<OMPC_NoWait>,
    VersionedClause<OMPC_Depend>,
    VersionedClause<OMPC_Novariants>,
    VersionedClause<OMPC_Nocontext>
  ];
}
def OMP_masked : Directive<"masked"> {
  let allowedOnceClauses = [
    VersionedClause<OMPC_Filter>
  ];
}
def OMP_loop : Directive<"loop"> {
  let allowedClauses = [
    VersionedClause<OMPC_LastPrivate>,
    VersionedClause<OMPC_Private>,
    VersionedClause<OMPC_Reduction>,
  ];
  let allowedOnceClauses = [
    VersionedClause<OMPC_Bind, 50>,
    VersionedClause<OMPC_Collapse>,
    VersionedClause<OMPC_Order, 50>
  ];
}
def OMP_teams_loop : Directive<"teams loop"> {
  let allowedClauses = [
    VersionedClause<OMPC_Allocate>,
    VersionedClause<OMPC_FirstPrivate>,
    VersionedClause<OMPC_LastPrivate>,
    VersionedClause<OMPC_Private>,
    VersionedClause<OMPC_Reduction>,
    VersionedClause<OMPC_Shared>,
  ];
  let allowedOnceClauses = [
    VersionedClause<OMPC_Bind, 50>,
    VersionedClause<OMPC_Collapse>,
    VersionedClause<OMPC_Default>,
    VersionedClause<OMPC_NumTeams>,
    VersionedClause<OMPC_Order>,
    VersionedClause<OMPC_ThreadLimit>,
  ];
}
def OMP_target_teams_loop : Directive<"target teams loop"> {
  let allowedClauses = [
    VersionedClause<OMPC_Allocate>,
    VersionedClause<OMPC_Depend>,
    VersionedClause<OMPC_DefaultMap>,
    VersionedClause<OMPC_Device>,
    VersionedClause<OMPC_FirstPrivate>,
    VersionedClause<OMPC_IsDevicePtr>,
    VersionedClause<OMPC_HasDeviceAddr, 51>,
    VersionedClause<OMPC_LastPrivate>,
    VersionedClause<OMPC_Map>,
    VersionedClause<OMPC_Private>,
    VersionedClause<OMPC_Reduction>,
    VersionedClause<OMPC_Shared>,
    VersionedClause<OMPC_UsesAllocators, 50>
  ];
  let allowedOnceClauses = [
    VersionedClause<OMPC_Bind, 50>,
    VersionedClause<OMPC_Collapse>,
    VersionedClause<OMPC_Default>,
    VersionedClause<OMPC_If>,
    VersionedClause<OMPC_NoWait>,
    VersionedClause<OMPC_NumTeams>,
    VersionedClause<OMPC_Order>,
    VersionedClause<OMPC_ThreadLimit>,
    VersionedClause<OMPC_OMPX_DynCGroupMem>,
  ];
}
def OMP_parallel_loop : Directive<"parallel loop"> {
  let allowedClauses = [
    VersionedClause<OMPC_Allocate>,
    VersionedClause<OMPC_Copyin>,
    VersionedClause<OMPC_FirstPrivate>,
    VersionedClause<OMPC_LastPrivate>,
    VersionedClause<OMPC_Private>,
    VersionedClause<OMPC_Reduction>,
    VersionedClause<OMPC_Shared>,
  ];
  let allowedOnceClauses = [
    VersionedClause<OMPC_Bind, 50>,
    VersionedClause<OMPC_Collapse>,
    VersionedClause<OMPC_Default>,
    VersionedClause<OMPC_If>,
    VersionedClause<OMPC_NumThreads>,
    VersionedClause<OMPC_Order>,
    VersionedClause<OMPC_ProcBind>,
  ];
}
def OMP_target_parallel_loop : Directive<"target parallel loop"> {
  let allowedClauses = [
    VersionedClause<OMPC_Allocate>,
    VersionedClause<OMPC_Copyin>,
    VersionedClause<OMPC_Depend>,
    VersionedClause<OMPC_Device>,
    VersionedClause<OMPC_FirstPrivate>,
    VersionedClause<OMPC_IsDevicePtr>,
    VersionedClause<OMPC_HasDeviceAddr, 51>,
    VersionedClause<OMPC_LastPrivate>,
    VersionedClause<OMPC_Map>,
    VersionedClause<OMPC_Private>,
    VersionedClause<OMPC_Reduction>,
    VersionedClause<OMPC_Shared>,
    VersionedClause<OMPC_UsesAllocators, 50>,
  ];
  let allowedOnceClauses = [
    VersionedClause<OMPC_Bind, 50>,
    VersionedClause<OMPC_Collapse>,
    VersionedClause<OMPC_Default>,
    VersionedClause<OMPC_DefaultMap>,
    VersionedClause<OMPC_If>,
    VersionedClause<OMPC_NoWait>,
    VersionedClause<OMPC_NumThreads>,
    VersionedClause<OMPC_Order>,
    VersionedClause<OMPC_ProcBind>,
    VersionedClause<OMPC_OMPX_DynCGroupMem>,
  ];
}
def OMP_Metadirective : Directive<"metadirective"> {
  let allowedClauses = [VersionedClause<OMPC_When>];
  let allowedOnceClauses = [VersionedClause<OMPC_Default>];
}
def OMP_Unknown : Directive<"unknown"> {
  let isDefault = true;
}
PKiwFZ<��m����Frontend/OpenMP/OMPKinds.defnu�[���//===--- OMPKinds.def - OpenMP directives, clauses, rt-calls -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This file defines the list of supported OpenMP runtime
/// calls, and other things that need to be listed in enums.
///
/// This file is under transition to OMP.td with TableGen code generation.
///
//===----------------------------------------------------------------------===//

/// OpenMP Directives, combined directives and Clauses
/// - Moved to OMP.td

/// Types used in runtime structs or runtime functions
///
///{

#ifndef OMP_TYPE
#define OMP_TYPE(VarName, InitValue)
#endif

#define __OMP_TYPE(VarName) OMP_TYPE(VarName, Type::get##VarName##Ty(Ctx))

__OMP_TYPE(Void)
__OMP_TYPE(Int1)
__OMP_TYPE(Int8)
__OMP_TYPE(Int16)
__OMP_TYPE(Int32)
__OMP_TYPE(Int64)
__OMP_TYPE(Int8Ptr)
__OMP_TYPE(Int16Ptr)
__OMP_TYPE(Int32Ptr)
__OMP_TYPE(Int64Ptr)
__OMP_TYPE(Double)

OMP_TYPE(SizeTy, M.getDataLayout().getIntPtrType(Ctx))
OMP_TYPE(Int63, Type::getIntNTy(Ctx, 63))

#define __OMP_PTR_TYPE(NAME, BASE) OMP_TYPE(NAME, BASE->getPointerTo())

__OMP_PTR_TYPE(VoidPtr, Int8)
__OMP_PTR_TYPE(VoidPtrPtr, VoidPtr)
__OMP_PTR_TYPE(VoidPtrPtrPtr, VoidPtrPtr)

__OMP_PTR_TYPE(Int8PtrPtr, Int8Ptr)
__OMP_PTR_TYPE(Int8PtrPtrPtr, Int8PtrPtr)

#undef __OMP_PTR_TYPE

#undef __OMP_TYPE
#undef OMP_TYPE

///}

/// array types
///
///{

#ifndef OMP_ARRAY_TYPE
#define OMP_ARRAY_TYPE(VarName, ElemTy, ArraySize)
#endif

#define __OMP_ARRAY_TYPE(VarName, ElemTy, ArraySize)                           \
  OMP_ARRAY_TYPE(VarName, ElemTy, ArraySize)

__OMP_ARRAY_TYPE(KmpCriticalName, Int32, 8)
__OMP_ARRAY_TYPE(Int32Arr3, Int32, 3)

#undef __OMP_ARRAY_TYPE
#undef OMP_ARRAY_TYPE

///}

/// Struct and function types
///
///{

#ifndef OMP_STRUCT_TYPE
#define OMP_STRUCT_TYPE(VarName, StructName, Packed, ...)
#endif

#define __OMP_STRUCT_TYPE(VarName, Name, Packed, ...)                                  \
  OMP_STRUCT_TYPE(VarName, "struct." #Name, Packed, __VA_ARGS__)

__OMP_STRUCT_TYPE(Ident, ident_t, false, Int32, Int32, Int32, Int32, Int8Ptr)
__OMP_STRUCT_TYPE(OffloadEntry, __tgt_offload_entry, false, Int8Ptr, Int8Ptr, SizeTy,
                  Int32, Int32)
__OMP_STRUCT_TYPE(KernelArgs, __tgt_kernel_arguments, false, Int32, Int32, VoidPtrPtr,
		  VoidPtrPtr, Int64Ptr, Int64Ptr, VoidPtrPtr, VoidPtrPtr,
		  Int64, Int64, Int32Arr3Ty, Int32Arr3Ty, Int32)
__OMP_STRUCT_TYPE(AsyncInfo, __tgt_async_info, false, Int8Ptr)
__OMP_STRUCT_TYPE(DependInfo, kmp_dep_info, false, SizeTy, SizeTy, Int8)

#undef __OMP_STRUCT_TYPE
#undef OMP_STRUCT_TYPE

#ifndef OMP_FUNCTION_TYPE
#define OMP_FUNCTION_TYPE(VarName, IsVarArg, ReturnType, ...)
#endif

#define __OMP_FUNCTION_TYPE(VarName, IsVarArg, ReturnType, ...)                \
  OMP_FUNCTION_TYPE(VarName, IsVarArg, ReturnType, __VA_ARGS__)

__OMP_FUNCTION_TYPE(ParallelTask, true, Void, Int32Ptr, Int32Ptr)
__OMP_FUNCTION_TYPE(ReduceFunction, false, Void, VoidPtr, VoidPtr)
__OMP_FUNCTION_TYPE(CopyFunction, false, Void, VoidPtr, VoidPtr)
__OMP_FUNCTION_TYPE(KmpcCtor, false, VoidPtr, VoidPtr)
__OMP_FUNCTION_TYPE(KmpcDtor, false, Void, VoidPtr)
__OMP_FUNCTION_TYPE(KmpcCopyCtor, false, VoidPtr, VoidPtr, VoidPtr)
__OMP_FUNCTION_TYPE(TaskRoutineEntry, false, Int32, Int32,
                    /* kmp_task_t */ VoidPtr)
__OMP_FUNCTION_TYPE(ShuffleReduce, false, Void, VoidPtr, Int16, Int16, Int16)
__OMP_FUNCTION_TYPE(InterWarpCopy, false, Void, VoidPtr, Int32)
__OMP_FUNCTION_TYPE(GlobalList, false, Void, VoidPtr, Int32, VoidPtr)

#undef __OMP_FUNCTION_TYPE
#undef OMP_FUNCTION_TYPE

///}

/// Internal Control Variables information
///
///{

#ifndef ICV_INIT_VALUE
#define ICV_INIT_VALUE(Enum, Name)
#endif

#define __ICV_INIT_VALUE(Name) ICV_INIT_VALUE(ICV_##Name, #Name)

__ICV_INIT_VALUE(ZERO)
__ICV_INIT_VALUE(FALSE)
__ICV_INIT_VALUE(IMPLEMENTATION_DEFINED)
__ICV_INIT_VALUE(LAST)

#undef __ICV_INIT_VALUE
#undef ICV_INIT_VALUE

#ifndef ICV_DATA_ENV
#define ICV_DATA_ENV(Enum, Name, EnvVarName, Init)
#endif

#define __ICV_DATA_ENV(Name, EnvVarName, Init)                                 \
  ICV_DATA_ENV(ICV_##Name, #Name, #EnvVarName, Init)

__ICV_DATA_ENV(nthreads, OMP_NUM_THREADS, ICV_IMPLEMENTATION_DEFINED)
__ICV_DATA_ENV(active_levels, NONE, ICV_ZERO)
__ICV_DATA_ENV(cancel, OMP_CANCELLATION, ICV_FALSE)
__ICV_DATA_ENV(proc_bind, OMP_PROC_BIND, ICV_IMPLEMENTATION_DEFINED)
__ICV_DATA_ENV(__last, last, ICV_LAST)

#undef __ICV_DATA_ENV
#undef ICV_DATA_ENV

#ifndef ICV_RT_SET
#define ICV_RT_SET(Name, RTL)
#endif

#define __ICV_RT_SET(Name, RTL) ICV_RT_SET(ICV_##Name, OMPRTL_##RTL)

__ICV_RT_SET(nthreads, omp_set_num_threads)

#undef __ICV_RT_SET
#undef ICV_RT_SET

#ifndef ICV_RT_GET
#define ICV_RT_GET(Name, RTL)
#endif

#define __ICV_RT_GET(Name, RTL) ICV_RT_GET(ICV_##Name, OMPRTL_##RTL)

__ICV_RT_GET(nthreads, omp_get_max_threads)
__ICV_RT_GET(active_levels, omp_get_active_level)
__ICV_RT_GET(cancel, omp_get_cancellation)
__ICV_RT_GET(proc_bind, omp_get_proc_bind)

#undef __ICV_RT_GET
#undef ICV_RT_GET

///}

/// Runtime library function (and their attributes)
///
///{

#ifndef OMP_RTL
#define OMP_RTL(Enum, Str, IsVarArg, ReturnType, ...)
#endif

#define __OMP_RTL(Name, IsVarArg, ReturnType, ...)                             \
  OMP_RTL(OMPRTL_##Name, #Name, IsVarArg, ReturnType, __VA_ARGS__)



__OMP_RTL(__kmpc_barrier, false, Void, IdentPtr, Int32)
__OMP_RTL(__kmpc_cancel, false, Int32, IdentPtr, Int32, Int32)
__OMP_RTL(__kmpc_cancel_barrier, false, Int32, IdentPtr, Int32)
__OMP_RTL(__kmpc_error, false, Void, IdentPtr, Int32, Int8Ptr)
__OMP_RTL(__kmpc_flush, false, Void, IdentPtr)
__OMP_RTL(__kmpc_global_thread_num, false, Int32, IdentPtr)
__OMP_RTL(__kmpc_get_hardware_thread_id_in_block, false, Int32, )
__OMP_RTL(__kmpc_fork_call, true, Void, IdentPtr, Int32, ParallelTaskPtr)
__OMP_RTL(__kmpc_fork_call_if, false, Void, IdentPtr, Int32, ParallelTaskPtr,
          Int32, VoidPtr)
__OMP_RTL(__kmpc_omp_taskwait, false, Int32, IdentPtr, Int32)
__OMP_RTL(__kmpc_omp_taskyield, false, Int32, IdentPtr, Int32, /* Int */ Int32)
__OMP_RTL(__kmpc_push_num_threads, false, Void, IdentPtr, Int32,
          /* Int */ Int32)
__OMP_RTL(__kmpc_push_proc_bind, false, Void, IdentPtr, Int32, /* Int */ Int32)
__OMP_RTL(__kmpc_omp_reg_task_with_affinity, false, Int32, IdentPtr, Int32,
          /* kmp_task_t */ VoidPtr, Int32,
          /* kmp_task_affinity_info_t */ VoidPtr)

__OMP_RTL(__kmpc_get_hardware_num_blocks, false, Int32, )
__OMP_RTL(__kmpc_get_hardware_num_threads_in_block, false, Int32, )
__OMP_RTL(__kmpc_get_warp_size, false, Int32, )

__OMP_RTL(omp_get_thread_num, false, Int32, )
__OMP_RTL(omp_get_num_threads, false, Int32, )
__OMP_RTL(omp_get_max_threads, false, Int32, )
__OMP_RTL(omp_in_parallel, false, Int32, )
__OMP_RTL(omp_get_dynamic, false, Int32, )
__OMP_RTL(omp_get_cancellation, false, Int32, )
__OMP_RTL(omp_get_nested, false, Int32, )
__OMP_RTL(omp_get_schedule, false, Void, Int32Ptr, Int32Ptr)
__OMP_RTL(omp_get_thread_limit, false, Int32, )
__OMP_RTL(omp_get_supported_active_levels, false, Int32, )
__OMP_RTL(omp_get_max_active_levels, false, Int32, )
__OMP_RTL(omp_get_level, false, Int32, )
__OMP_RTL(omp_get_ancestor_thread_num, false, Int32, Int32)
__OMP_RTL(omp_get_team_size, false, Int32, Int32)
__OMP_RTL(omp_get_active_level, false, Int32, )
__OMP_RTL(omp_in_final, false, Int32, )
__OMP_RTL(omp_get_proc_bind, false, Int32, )
__OMP_RTL(omp_get_num_places, false, Int32, )
__OMP_RTL(omp_get_num_procs, false, Int32, )
__OMP_RTL(omp_get_place_proc_ids, false, Void, Int32, Int32Ptr)
__OMP_RTL(omp_get_place_num, false, Int32, )
__OMP_RTL(omp_get_partition_num_places, false, Int32, )
__OMP_RTL(omp_get_partition_place_nums, false, Void, Int32Ptr)
__OMP_RTL(omp_get_wtime, false, Double,)

__OMP_RTL(omp_set_num_threads, false, Void, Int32)
__OMP_RTL(omp_set_dynamic, false, Void, Int32)
__OMP_RTL(omp_set_nested, false, Void, Int32)
__OMP_RTL(omp_set_schedule, false, Void, Int32, Int32)
__OMP_RTL(omp_set_max_active_levels, false, Void, Int32)

__OMP_RTL(__kmpc_master, false, Int32, IdentPtr, Int32)
__OMP_RTL(__kmpc_end_master, false, Void, IdentPtr, Int32)
__OMP_RTL(__kmpc_masked, false, Int32, IdentPtr, Int32, Int32)
__OMP_RTL(__kmpc_end_masked, false, Void, IdentPtr, Int32)
__OMP_RTL(__kmpc_critical, false, Void, IdentPtr, Int32, KmpCriticalNamePtrTy)
__OMP_RTL(__kmpc_critical_with_hint, false, Void, IdentPtr, Int32,
          KmpCriticalNamePtrTy, Int32)
__OMP_RTL(__kmpc_end_critical, false, Void, IdentPtr, Int32,
          KmpCriticalNamePtrTy)

__OMP_RTL(__kmpc_begin, false, Void, IdentPtr, Int32)
__OMP_RTL(__kmpc_end, false, Void, IdentPtr)

__OMP_RTL(__kmpc_reduce, false, Int32, IdentPtr, Int32, Int32, SizeTy, VoidPtr,
          ReduceFunctionPtr, KmpCriticalNamePtrTy)
__OMP_RTL(__kmpc_reduce_nowait, false, Int32, IdentPtr, Int32, Int32, SizeTy,
          VoidPtr, ReduceFunctionPtr, KmpCriticalNamePtrTy)
__OMP_RTL(__kmpc_end_reduce, false, Void, IdentPtr, Int32, KmpCriticalNamePtrTy)
__OMP_RTL(__kmpc_end_reduce_nowait, false, Void, IdentPtr, Int32,
          KmpCriticalNamePtrTy)

__OMP_RTL(__kmpc_ordered, false, Void, IdentPtr, Int32)
__OMP_RTL(__kmpc_end_ordered, false, Void, IdentPtr, Int32)

__OMP_RTL(__kmpc_for_static_init_4, false, Void, IdentPtr, Int32, Int32,
          Int32Ptr, Int32Ptr, Int32Ptr, Int32Ptr, Int32, Int32)
__OMP_RTL(__kmpc_for_static_init_4u, false, Void, IdentPtr, Int32, Int32,
          Int32Ptr, Int32Ptr, Int32Ptr, Int32Ptr, Int32, Int32)
__OMP_RTL(__kmpc_for_static_init_8, false, Void, IdentPtr, Int32, Int32,
          Int32Ptr, Int64Ptr, Int64Ptr, Int64Ptr, Int64, Int64)
__OMP_RTL(__kmpc_for_static_init_8u, false, Void, IdentPtr, Int32, Int32,
          Int32Ptr, Int64Ptr, Int64Ptr, Int64Ptr, Int64, Int64)
__OMP_RTL(__kmpc_for_static_fini, false, Void, IdentPtr, Int32)
__OMP_RTL(__kmpc_distribute_static_init_4, false, Void, IdentPtr, Int32, Int32,
          Int32Ptr, Int32Ptr, Int32Ptr, Int32Ptr, Int32, Int32)
__OMP_RTL(__kmpc_distribute_static_init_4u, false, Void, IdentPtr, Int32, Int32,
          Int32Ptr, Int32Ptr, Int32Ptr, Int32Ptr, Int32, Int32)
__OMP_RTL(__kmpc_distribute_static_init_8, false, Void, IdentPtr, Int32, Int32,
          Int32Ptr, Int64Ptr, Int64Ptr, Int64Ptr, Int64, Int64)
__OMP_RTL(__kmpc_distribute_static_init_8u, false, Void, IdentPtr, Int32, Int32,
          Int32Ptr, Int64Ptr, Int64Ptr, Int64Ptr, Int64, Int64)
__OMP_RTL(__kmpc_distribute_static_fini, false, Void, IdentPtr, Int32)
__OMP_RTL(__kmpc_dist_dispatch_init_4, false, Void, IdentPtr, Int32, Int32,
          Int32Ptr, Int32, Int32, Int32, Int32)
__OMP_RTL(__kmpc_dist_dispatch_init_4u, false, Void, IdentPtr, Int32, Int32,
          Int32Ptr, Int32, Int32, Int32, Int32)
__OMP_RTL(__kmpc_dist_dispatch_init_8, false, Void, IdentPtr, Int32, Int32,
          Int32Ptr, Int64, Int64, Int64, Int64)
__OMP_RTL(__kmpc_dist_dispatch_init_8u, false, Void, IdentPtr, Int32, Int32,
          Int32Ptr, Int64, Int64, Int64, Int64)
__OMP_RTL(__kmpc_dispatch_init_4, false, Void, IdentPtr, Int32, Int32, Int32,
          Int32, Int32, Int32)
__OMP_RTL(__kmpc_dispatch_init_4u, false, Void, IdentPtr, Int32, Int32, Int32,
          Int32, Int32, Int32)
__OMP_RTL(__kmpc_dispatch_init_8, false, Void, IdentPtr, Int32, Int32, Int64,
          Int64, Int64, Int64)
__OMP_RTL(__kmpc_dispatch_init_8u, false, Void, IdentPtr, Int32, Int32, Int64,
          Int64, Int64, Int64)
__OMP_RTL(__kmpc_dispatch_next_4, false, Int32, IdentPtr, Int32, Int32Ptr,
          Int32Ptr, Int32Ptr, Int32Ptr)
__OMP_RTL(__kmpc_dispatch_next_4u, false, Int32, IdentPtr, Int32, Int32Ptr,
          Int32Ptr, Int32Ptr, Int32Ptr)
__OMP_RTL(__kmpc_dispatch_next_8, false, Int32, IdentPtr, Int32, Int32Ptr,
          Int64Ptr, Int64Ptr, Int64Ptr)
__OMP_RTL(__kmpc_dispatch_next_8u, false, Int32, IdentPtr, Int32, Int32Ptr,
          Int64Ptr, Int64Ptr, Int64Ptr)
__OMP_RTL(__kmpc_dispatch_fini_4, false, Void, IdentPtr, Int32)
__OMP_RTL(__kmpc_dispatch_fini_4u, false, Void, IdentPtr, Int32)
__OMP_RTL(__kmpc_dispatch_fini_8, false, Void, IdentPtr, Int32)
__OMP_RTL(__kmpc_dispatch_fini_8u, false, Void, IdentPtr, Int32)
__OMP_RTL(__kmpc_team_static_init_4, false, Void, IdentPtr, Int32, Int32Ptr,
          Int32Ptr, Int32Ptr, Int32Ptr, Int32, Int32)
__OMP_RTL(__kmpc_team_static_init_4u, false, Void, IdentPtr, Int32, Int32Ptr,
          Int32Ptr, Int32Ptr, Int32Ptr, Int32, Int32)
__OMP_RTL(__kmpc_team_static_init_8, false, Void, IdentPtr, Int32, Int32Ptr,
          Int64Ptr, Int64Ptr, Int64Ptr, Int64, Int64)
__OMP_RTL(__kmpc_team_static_init_8u, false, Void, IdentPtr, Int32, Int32Ptr,
          Int64Ptr, Int64Ptr, Int64Ptr, Int64, Int64)
__OMP_RTL(__kmpc_dist_for_static_init_4, false, Void, IdentPtr, Int32, Int32,
          Int32Ptr, Int32Ptr, Int32Ptr, Int32Ptr, Int32Ptr, Int32, Int32)
__OMP_RTL(__kmpc_dist_for_static_init_4u, false, Void, IdentPtr, Int32, Int32,
          Int32Ptr, Int32Ptr, Int32Ptr, Int32Ptr, Int32Ptr, Int32, Int32)
__OMP_RTL(__kmpc_dist_for_static_init_8, false, Void, IdentPtr, Int32, Int32,
          Int32Ptr, Int64Ptr, Int64Ptr, Int64Ptr, Int64Ptr, Int64, Int64)
__OMP_RTL(__kmpc_dist_for_static_init_8u, false, Void, IdentPtr, Int32, Int32,
          Int32Ptr, Int64Ptr, Int64Ptr, Int64Ptr, Int64Ptr, Int64, Int64)

__OMP_RTL(__kmpc_single, false, Int32, IdentPtr, Int32)
__OMP_RTL(__kmpc_end_single, false, Void, IdentPtr, Int32)

__OMP_RTL(__kmpc_omp_task_alloc, false, /* kmp_task_t */ VoidPtr, IdentPtr,
          Int32, Int32, SizeTy, SizeTy, TaskRoutineEntryPtr)
__OMP_RTL(__kmpc_omp_task, false, Int32, IdentPtr, Int32,
          /* kmp_task_t */ VoidPtr)
__OMP_RTL(__kmpc_end_taskgroup, false, Void, IdentPtr, Int32)
__OMP_RTL(__kmpc_taskgroup, false, Void, IdentPtr, Int32)
__OMP_RTL(__kmpc_omp_task_begin_if0, false, Void, IdentPtr, Int32,
          /* kmp_task_t */ VoidPtr)
__OMP_RTL(__kmpc_omp_task_complete_if0, false, Void, IdentPtr, Int32,
          /* kmp_tasK_t */ VoidPtr)
__OMP_RTL(__kmpc_omp_task_with_deps, false, Int32, IdentPtr, Int32,
          /* kmp_task_t */ VoidPtr, Int32,
          /* kmp_depend_info_t */ VoidPtr, Int32,
          /* kmp_depend_info_t */ VoidPtr)
__OMP_RTL(__kmpc_taskloop, false, Void, IdentPtr, /* Int */ Int32, VoidPtr,
          /* Int */ Int32, Int64Ptr, Int64Ptr, Int64, /* Int */ Int32,
          /* Int */ Int32, Int64, VoidPtr)
__OMP_RTL(__kmpc_omp_target_task_alloc, false, /* kmp_task_t */ VoidPtr,
          IdentPtr, Int32, Int32, SizeTy, SizeTy, TaskRoutineEntryPtr, Int64)
__OMP_RTL(__kmpc_taskred_modifier_init, false, /* kmp_taskgroup */ VoidPtr,
          IdentPtr, /* Int */ Int32, /* Int */ Int32, /* Int */ Int32, VoidPtr)
__OMP_RTL(__kmpc_taskred_init, false, /* kmp_taskgroup */ VoidPtr,
          /* Int */ Int32, /* Int */ Int32, VoidPtr)
__OMP_RTL(__kmpc_task_reduction_modifier_fini, false, Void, IdentPtr,
          /* Int */ Int32, /* Int */ Int32)
__OMP_RTL(__kmpc_task_reduction_get_th_data, false, VoidPtr, Int32, VoidPtr,
          VoidPtr)
__OMP_RTL(__kmpc_task_reduction_init, false, VoidPtr, Int32, Int32, VoidPtr)
__OMP_RTL(__kmpc_task_reduction_modifier_init, false, VoidPtr, VoidPtr, Int32,
          Int32, Int32, VoidPtr)
__OMP_RTL(__kmpc_proxy_task_completed_ooo, false, Void, VoidPtr)

__OMP_RTL(__kmpc_omp_wait_deps, false, Void, IdentPtr, Int32, Int32,
          /* kmp_depend_info_t */ VoidPtr, Int32, VoidPtr)
__OMP_RTL(__kmpc_omp_taskwait_deps_51, false, Void, IdentPtr, Int32, Int32,
          /* kmp_depend_info_t */ VoidPtr, Int32, VoidPtr, Int32)
__OMP_RTL(__kmpc_cancellationpoint, false, Int32, IdentPtr, Int32, Int32)

__OMP_RTL(__kmpc_fork_teams, true, Void, IdentPtr, Int32, ParallelTaskPtr)
__OMP_RTL(__kmpc_push_num_teams, false, Void, IdentPtr, Int32, Int32, Int32)

__OMP_RTL(__kmpc_copyprivate, false, Void, IdentPtr, Int32, SizeTy, VoidPtr,
          CopyFunctionPtr, Int32)
__OMP_RTL(__kmpc_threadprivate_cached, false, VoidPtr, IdentPtr, Int32, VoidPtr,
          SizeTy, VoidPtrPtrPtr)
__OMP_RTL(__kmpc_threadprivate_register, false, Void, IdentPtr, VoidPtr,
          KmpcCtorPtr, KmpcCopyCtorPtr, KmpcDtorPtr)

__OMP_RTL(__kmpc_doacross_init, false, Void, IdentPtr, Int32, Int32,
          /* kmp_dim */ VoidPtr)
__OMP_RTL(__kmpc_doacross_post, false, Void, IdentPtr, Int32, Int64Ptr)
__OMP_RTL(__kmpc_doacross_wait, false, Void, IdentPtr, Int32, Int64Ptr)
__OMP_RTL(__kmpc_doacross_fini, false, Void, IdentPtr, Int32)

__OMP_RTL(__kmpc_alloc, false, VoidPtr, /* Int */ Int32, SizeTy, VoidPtr)
__OMP_RTL(__kmpc_aligned_alloc, false, VoidPtr, /* Int */ Int32, SizeTy, SizeTy,
          VoidPtr)
__OMP_RTL(__kmpc_free, false, Void, /* Int */ Int32, VoidPtr, VoidPtr)

__OMP_RTL(__tgt_interop_init, false, Void, IdentPtr, Int32, VoidPtrPtr, Int32,
          Int32, Int32, VoidPtr, Int32)
__OMP_RTL(__tgt_interop_destroy, false, Void, IdentPtr, Int32, VoidPtrPtr,
          Int32, Int32, VoidPtr, Int32)
__OMP_RTL(__tgt_interop_use, false, Void, IdentPtr, Int32, VoidPtrPtr, Int32,
          Int32, VoidPtr, Int32)

__OMP_RTL(__kmpc_init_allocator, false, /* omp_allocator_handle_t */ VoidPtr,
          /* Int */ Int32, /* omp_memespace_handle_t */ VoidPtr,
          /* Int */ Int32, /* omp_alloctrait_t */ VoidPtr)
__OMP_RTL(__kmpc_destroy_allocator, false, Void, /* Int */ Int32,
          /* omp_allocator_handle_t */ VoidPtr)

__OMP_RTL(__kmpc_push_target_tripcount_mapper, false, Void, IdentPtr, Int64, Int64)
__OMP_RTL(__tgt_target_mapper, false, Int32, IdentPtr, Int64, VoidPtr, Int32, VoidPtrPtr,
          VoidPtrPtr, Int64Ptr, Int64Ptr, VoidPtrPtr, VoidPtrPtr)
__OMP_RTL(__tgt_target_nowait_mapper, false, Int32, IdentPtr, Int64, VoidPtr,
          Int32, VoidPtrPtr, VoidPtrPtr, Int64Ptr, Int64Ptr, VoidPtrPtr,
          VoidPtrPtr, Int32, VoidPtr, Int32, VoidPtr)
__OMP_RTL(__tgt_target_teams_mapper, false, Int32, IdentPtr, Int64, VoidPtr, Int32,
          VoidPtrPtr, VoidPtrPtr, Int64Ptr, Int64Ptr, VoidPtrPtr, VoidPtrPtr, Int32, Int32)
__OMP_RTL(__tgt_target_teams_nowait_mapper, false, Int32, IdentPtr, Int64,
          VoidPtr, Int32, VoidPtrPtr, VoidPtrPtr, Int64Ptr, Int64Ptr,
          VoidPtrPtr, VoidPtrPtr, Int32, Int32, Int32, VoidPtr, Int32, VoidPtr)
__OMP_RTL(__tgt_target_kernel, false, Int32, IdentPtr, Int64, Int32, Int32,
          VoidPtr, KernelArgsPtr)
__OMP_RTL(__tgt_target_kernel_nowait, false, Int32, IdentPtr, Int64, Int32,
          Int32, VoidPtr, KernelArgsPtr, Int32, VoidPtr, Int32, VoidPtr)
__OMP_RTL(__tgt_register_requires, false, Void, Int64)
__OMP_RTL(__tgt_target_data_begin_mapper, false, Void, IdentPtr, Int64, Int32, VoidPtrPtr,
          VoidPtrPtr, Int64Ptr, Int64Ptr, VoidPtrPtr, VoidPtrPtr)
__OMP_RTL(__tgt_target_data_begin_nowait_mapper, false, Void, IdentPtr, Int64, Int32,
          VoidPtrPtr, VoidPtrPtr, Int64Ptr, Int64Ptr, VoidPtrPtr, VoidPtrPtr)
__OMP_RTL(__tgt_target_data_begin_mapper_issue, false, Void, IdentPtr, Int64, Int32,
          VoidPtrPtr, VoidPtrPtr, Int64Ptr, Int64Ptr, VoidPtrPtr, VoidPtrPtr, AsyncInfoPtr)
__OMP_RTL(__tgt_target_data_begin_mapper_wait, false, Void, Int64, AsyncInfoPtr)
__OMP_RTL(__tgt_target_data_end_mapper, false, Void, IdentPtr, Int64, Int32, VoidPtrPtr,
          VoidPtrPtr, Int64Ptr, Int64Ptr, VoidPtrPtr, VoidPtrPtr)
__OMP_RTL(__tgt_target_data_end_nowait_mapper, false, Void, IdentPtr, Int64, Int32,
          VoidPtrPtr, VoidPtrPtr, Int64Ptr, Int64Ptr, VoidPtrPtr, VoidPtrPtr)
__OMP_RTL(__tgt_target_data_update_mapper, false, Void, IdentPtr, Int64, Int32,
          VoidPtrPtr, VoidPtrPtr, Int64Ptr, Int64Ptr, VoidPtrPtr, VoidPtrPtr)
__OMP_RTL(__tgt_target_data_update_nowait_mapper, false, Void, IdentPtr, Int64, Int32,
          VoidPtrPtr, VoidPtrPtr, Int64Ptr, Int64Ptr, VoidPtrPtr, VoidPtrPtr)
__OMP_RTL(__tgt_mapper_num_components, false, Int64, VoidPtr)
__OMP_RTL(__tgt_push_mapper_component, false, Void, VoidPtr, VoidPtr, VoidPtr,
          Int64, Int64, VoidPtr)
__OMP_RTL(__kmpc_task_allow_completion_event, false, VoidPtr, IdentPtr,
          /* Int */ Int32, /* kmp_task_t */ VoidPtr)

/// OpenMP Device runtime functions
__OMP_RTL(__kmpc_target_init, false, Int32, IdentPtr, Int8, Int1)
__OMP_RTL(__kmpc_target_deinit, false, Void, IdentPtr, Int8)
__OMP_RTL(__kmpc_kernel_prepare_parallel, false, Void, VoidPtr)
__OMP_RTL(__kmpc_parallel_51, false, Void, IdentPtr, Int32, Int32, Int32, Int32,
          VoidPtr, VoidPtr, VoidPtrPtr, SizeTy)
__OMP_RTL(__kmpc_kernel_parallel, false, Int1, VoidPtrPtr)
__OMP_RTL(__kmpc_kernel_end_parallel, false, Void, )
__OMP_RTL(__kmpc_serialized_parallel, false, Void, IdentPtr, Int32)
__OMP_RTL(__kmpc_end_serialized_parallel, false, Void, IdentPtr, Int32)
__OMP_RTL(__kmpc_shuffle_int32, false, Int32, Int32, Int16, Int16)
__OMP_RTL(__kmpc_nvptx_parallel_reduce_nowait_v2, false, Int32, IdentPtr, Int32,
          Int32, SizeTy, VoidPtr, ShuffleReducePtr, InterWarpCopyPtr)
__OMP_RTL(__kmpc_nvptx_end_reduce_nowait, false, Void, Int32)
__OMP_RTL(__kmpc_nvptx_teams_reduce_nowait_v2, false, Int32, IdentPtr, Int32,
          VoidPtr, Int32, VoidPtr, ShuffleReducePtr, InterWarpCopyPtr,
          GlobalListPtr, GlobalListPtr, GlobalListPtr, GlobalListPtr)

__OMP_RTL(__kmpc_shuffle_int64, false, Int64, Int64, Int16, Int16)

__OMP_RTL(__kmpc_alloc_shared, false, VoidPtr, SizeTy)
__OMP_RTL(__kmpc_free_shared, false, Void, VoidPtr, SizeTy)
__OMP_RTL(__kmpc_begin_sharing_variables, false, Void, VoidPtrPtrPtr, SizeTy)
__OMP_RTL(__kmpc_end_sharing_variables, false, Void, )
__OMP_RTL(__kmpc_get_shared_variables, false, Void, VoidPtrPtrPtr)
__OMP_RTL(__kmpc_parallel_level, false, Int16, IdentPtr, Int32)
__OMP_RTL(__kmpc_is_spmd_exec_mode, false, Int8, )
__OMP_RTL(__kmpc_barrier_simple_spmd, false, Void, IdentPtr, Int32)
__OMP_RTL(__kmpc_barrier_simple_generic, false, Void, IdentPtr, Int32)

__OMP_RTL(__kmpc_warp_active_thread_mask, false, Int64,)
__OMP_RTL(__kmpc_syncwarp, false, Void, Int64)

__OMP_RTL(__last, false, Void, )

#undef __OMP_RTL
#undef OMP_RTL

#define ParamAttrs(...) ArrayRef<AttributeSet>({__VA_ARGS__})
#define EnumAttr(Kind) Attribute::get(Ctx, Attribute::AttrKind::Kind)
#define EnumAttrInt(Kind, N) Attribute::get(Ctx, Attribute::AttrKind::Kind, N)
#define AllocSizeAttr(N, M) Attribute::getWithAllocSizeArgs(Ctx, N, M)
#define MemoryAttr(ME) Attribute::getWithMemoryEffects(Ctx, ME)
#define AttributeSet(...)                                                      \
  AttributeSet::get(Ctx, ArrayRef<Attribute>({__VA_ARGS__}))

#ifndef OMP_ATTRS_SET
#define OMP_ATTRS_SET(VarName, AttrSet)
#endif

#define __OMP_ATTRS_SET(VarName, AttrSet) OMP_ATTRS_SET(VarName, AttrSet)

__OMP_ATTRS_SET(
    GetterAttrs,
    OptimisticAttributes
        ? AttributeSet(
              EnumAttr(NoUnwind), EnumAttr(NoSync), EnumAttr(NoFree),
              EnumAttr(WillReturn),
              MemoryAttr(MemoryEffects::inaccessibleMemOnly(ModRefInfo::Ref)))
        : AttributeSet(EnumAttr(NoUnwind)))
__OMP_ATTRS_SET(
    GetterArgReadAttrs,
    OptimisticAttributes
        ? AttributeSet(
              EnumAttr(NoUnwind), EnumAttr(NoSync), EnumAttr(NoFree),
              EnumAttr(WillReturn),
              MemoryAttr(MemoryEffects::inaccessibleOrArgMemOnly(ModRefInfo::Ref)))
        : AttributeSet(EnumAttr(NoUnwind)))
__OMP_ATTRS_SET(
    GetterArgWriteAttrs,
    OptimisticAttributes
        ? AttributeSet(EnumAttr(NoUnwind), EnumAttr(NoSync), EnumAttr(NoFree),
                       EnumAttr(WillReturn),
                       MemoryAttr(MemoryEffects::argMemOnly() | MemoryEffects::inaccessibleMemOnly(ModRefInfo::Ref)))
        : AttributeSet(EnumAttr(NoUnwind)))
__OMP_ATTRS_SET(
  SetterAttrs,
  OptimisticAttributes
      ? AttributeSet(
            EnumAttr(NoUnwind), EnumAttr(NoSync), EnumAttr(NoFree),
            EnumAttr(WillReturn),
            MemoryAttr(MemoryEffects::inaccessibleMemOnly(ModRefInfo::Mod)))
      : AttributeSet(EnumAttr(NoUnwind)))

__OMP_ATTRS_SET(DefaultAttrs,
                OptimisticAttributes
                    ? AttributeSet(EnumAttr(NoUnwind), EnumAttr(NoSync),
                                   EnumAttr(WillReturn), EnumAttr(NoFree))
                    : AttributeSet(EnumAttr(NoUnwind)))

__OMP_ATTRS_SET(BarrierAttrs,
                OptimisticAttributes
                    ? AttributeSet(EnumAttr(NoUnwind), EnumAttr(Convergent))
                    : AttributeSet(EnumAttr(NoUnwind), EnumAttr(Convergent)))

__OMP_ATTRS_SET(
    InaccessibleArgOnlyAttrs,
    OptimisticAttributes
         ? AttributeSet(EnumAttr(NoUnwind), EnumAttr(NoSync), EnumAttr(NoFree),
                        EnumAttr(WillReturn),
                        MemoryAttr(MemoryEffects::inaccessibleOrArgMemOnly()))
         : AttributeSet(EnumAttr(NoUnwind)))

__OMP_ATTRS_SET(AlwaysInlineAttrs,
                OptimisticAttributes
                    ? AttributeSet(EnumAttr(AlwaysInline))
                    : AttributeSet(EnumAttr(AlwaysInline)))

#if 0
__OMP_ATTRS_SET(
    InaccessibleOnlyAttrs,
    OptimisticAttributes
        ? AttributeSet(EnumAttr(NoUnwind), EnumAttr(NoSync), EnumAttr(NoFree),
                       EnumAttr(WillReturn),
                       MemoryAttr(MemoryEffects::inaccessibleMemOnly()))
        : AttributeSet(EnumAttr(NoUnwind)))
#endif

__OMP_ATTRS_SET(AllocAttrs,
                OptimisticAttributes
                    ? AttributeSet(EnumAttr(NoUnwind), EnumAttr(NoSync),
                                   EnumAttr(WillReturn))
                    : AttributeSet(EnumAttr(NoUnwind)))

__OMP_ATTRS_SET(ForkAttrs, OptimisticAttributes
                               ? AttributeSet(EnumAttr(NoUnwind))
                               : AttributeSet(EnumAttr(NoUnwind)))

__OMP_ATTRS_SET(ReadOnlyPtrAttrs,
                OptimisticAttributes
                    ? AttributeSet(EnumAttr(ReadOnly), EnumAttr(NoFree),
                                   EnumAttr(NoCapture))
                    : AttributeSet())

__OMP_ATTRS_SET(DeviceAllocAttrs,
                OptimisticAttributes
                    ? AttributeSet(EnumAttr(NoUnwind), EnumAttr(NoSync))
                    : AttributeSet(EnumAttr(NoUnwind), EnumAttr(NoSync)))

#if 0
__OMP_ATTRS_SET(WriteOnlyPtrAttrs,
                OptimisticAttributes
                    ? AttributeSet(EnumAttr(WriteOnly), EnumAttr(NoFree),
                                   EnumAttr(NoCapture))
                    : AttributeSet())
#endif

__OMP_ATTRS_SET(ArgPtrAttrs,
                OptimisticAttributes
                    ? AttributeSet(EnumAttr(NoCapture), EnumAttr(NoFree))
                    : AttributeSet())

__OMP_ATTRS_SET(ReturnPtrAttrs,
                OptimisticAttributes
                    ? AttributeSet(EnumAttr(NoAlias))
                    : AttributeSet())

__OMP_ATTRS_SET(ZExt, AttributeSet(EnumAttr(ZExt)))
__OMP_ATTRS_SET(SExt, AttributeSet(EnumAttr(SExt)))
__OMP_ATTRS_SET(SizeTyExt,
                M.getDataLayout().getIntPtrType(Ctx)->getBitWidth() < 64
                    ? AttributeSet(EnumAttr(ZExt))
                    : AttributeSet())

#if 0
__OMP_ATTRS_SET(ReturnAlignedPtrAttrs,
                OptimisticAttributes
                    ? AttributeSet(EnumAttr(NoAlias), EnumAttrInt(Alignment, 8),
                                   EnumAttrInt(DereferenceableOrNull, 8))
                    : AttributeSet())
#endif

#undef __OMP_ATTRS_SET
#undef OMP_ATTRS_SET

#ifndef OMP_RTL_ATTRS
#define OMP_RTL_ATTRS(Enum, FnAttrSet, RetAttrSet, ArgAttrSets)
#endif

#define __OMP_RTL_ATTRS(Name, FnAttrSet, RetAttrSet, ArgAttrSets)              \
  OMP_RTL_ATTRS(OMPRTL_##Name, FnAttrSet, RetAttrSet, ArgAttrSets)

__OMP_RTL_ATTRS(__kmpc_barrier, BarrierAttrs, AttributeSet(),
                ParamAttrs(ReadOnlyPtrAttrs, SExt))
__OMP_RTL_ATTRS(__kmpc_barrier_simple_spmd, BarrierAttrs, AttributeSet(),
                ParamAttrs(ReadOnlyPtrAttrs, SExt))
__OMP_RTL_ATTRS(__kmpc_barrier_simple_generic, BarrierAttrs, AttributeSet(),
                ParamAttrs(ReadOnlyPtrAttrs, SExt))
__OMP_RTL_ATTRS(__kmpc_warp_active_thread_mask, BarrierAttrs, AttributeSet(),
                ParamAttrs())
__OMP_RTL_ATTRS(__kmpc_syncwarp, BarrierAttrs, AttributeSet(), ParamAttrs())
__OMP_RTL_ATTRS(__kmpc_cancel, InaccessibleArgOnlyAttrs, SExt,
                ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt))
__OMP_RTL_ATTRS(__kmpc_cancel_barrier, BarrierAttrs, SExt,
                ParamAttrs(ReadOnlyPtrAttrs, SExt))
__OMP_RTL_ATTRS(__kmpc_error, AttributeSet(), AttributeSet(),
                ParamAttrs(AttributeSet(), SExt))
__OMP_RTL_ATTRS(__kmpc_flush, BarrierAttrs, AttributeSet(),
                ParamAttrs(ReadOnlyPtrAttrs))
__OMP_RTL_ATTRS(__kmpc_global_thread_num, GetterArgReadAttrs, SExt,
                ParamAttrs(ReadOnlyPtrAttrs))
__OMP_RTL_ATTRS(__kmpc_get_hardware_thread_id_in_block, GetterAttrs, ZExt,
                ParamAttrs())
__OMP_RTL_ATTRS(__kmpc_fork_call, ForkAttrs, AttributeSet(),
                ParamAttrs(ReadOnlyPtrAttrs, SExt, ReadOnlyPtrAttrs))
__OMP_RTL_ATTRS(__kmpc_fork_call_if, AttributeSet(), AttributeSet(),
                ParamAttrs(ReadOnlyPtrAttrs, SExt, ReadOnlyPtrAttrs, SExt))
__OMP_RTL_ATTRS(__kmpc_omp_taskwait, BarrierAttrs, SExt,
                ParamAttrs(ReadOnlyPtrAttrs, SExt))
__OMP_RTL_ATTRS(__kmpc_omp_taskyield, InaccessibleArgOnlyAttrs, SExt,
                ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt))
__OMP_RTL_ATTRS(__kmpc_push_num_threads, InaccessibleArgOnlyAttrs,
                AttributeSet(), ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt))
__OMP_RTL_ATTRS(__kmpc_push_proc_bind, InaccessibleArgOnlyAttrs, AttributeSet(),
                ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt))
__OMP_RTL_ATTRS(__kmpc_omp_reg_task_with_affinity, DefaultAttrs, SExt,
                ParamAttrs(ReadOnlyPtrAttrs, SExt, ReadOnlyPtrAttrs,
                           SExt, ReadOnlyPtrAttrs))

__OMP_RTL_ATTRS(__kmpc_get_hardware_num_blocks, GetterAttrs, ZExt, ParamAttrs())
__OMP_RTL_ATTRS(__kmpc_get_hardware_num_threads_in_block, GetterAttrs, ZExt, ParamAttrs())
__OMP_RTL_ATTRS(__kmpc_get_warp_size, GetterAttrs, ZExt, ParamAttrs())

__OMP_RTL_ATTRS(omp_get_thread_num, GetterAttrs, SExt, ParamAttrs())
__OMP_RTL_ATTRS(omp_get_num_threads, GetterAttrs, SExt, ParamAttrs())
__OMP_RTL_ATTRS(omp_get_max_threads, GetterAttrs, SExt, ParamAttrs())
__OMP_RTL_ATTRS(omp_in_parallel, GetterAttrs, SExt, ParamAttrs())
__OMP_RTL_ATTRS(omp_get_dynamic, GetterAttrs, SExt, ParamAttrs())
__OMP_RTL_ATTRS(omp_get_cancellation, GetterAttrs, SExt, ParamAttrs())
__OMP_RTL_ATTRS(omp_get_nested, GetterAttrs, SExt, ParamAttrs())
__OMP_RTL_ATTRS(
    omp_get_schedule, GetterArgWriteAttrs, AttributeSet(),
    ParamAttrs(AttributeSet(EnumAttr(NoCapture), EnumAttr(WriteOnly)),
               AttributeSet(EnumAttr(NoCapture), EnumAttr(WriteOnly))))
__OMP_RTL_ATTRS(omp_get_thread_limit, GetterAttrs, SExt, ParamAttrs())
__OMP_RTL_ATTRS(omp_get_supported_active_levels, GetterAttrs, SExt, ParamAttrs())
__OMP_RTL_ATTRS(omp_get_max_active_levels, GetterAttrs, SExt, ParamAttrs())
__OMP_RTL_ATTRS(omp_get_level, GetterAttrs, SExt, ParamAttrs())
__OMP_RTL_ATTRS(omp_get_ancestor_thread_num, GetterAttrs, SExt, ParamAttrs(SExt))
__OMP_RTL_ATTRS(omp_get_team_size, GetterAttrs, SExt, ParamAttrs(SExt))
__OMP_RTL_ATTRS(omp_get_active_level, GetterAttrs, SExt, ParamAttrs())
__OMP_RTL_ATTRS(omp_in_final, GetterAttrs, SExt, ParamAttrs())
__OMP_RTL_ATTRS(omp_get_proc_bind, GetterAttrs, SExt, ParamAttrs())
__OMP_RTL_ATTRS(omp_get_num_places, GetterAttrs, SExt, ParamAttrs())
__OMP_RTL_ATTRS(omp_get_num_procs, GetterAttrs, SExt, ParamAttrs())
__OMP_RTL_ATTRS(omp_get_place_proc_ids, GetterArgWriteAttrs, AttributeSet(),
                ParamAttrs(SExt, AttributeSet(EnumAttr(NoCapture),
                                              EnumAttr(WriteOnly))))
__OMP_RTL_ATTRS(omp_get_place_num, GetterAttrs, SExt, ParamAttrs())
__OMP_RTL_ATTRS(omp_get_partition_num_places, GetterAttrs, SExt, ParamAttrs())
__OMP_RTL_ATTRS(omp_get_partition_place_nums, GetterArgWriteAttrs, AttributeSet(),
                ParamAttrs())
__OMP_RTL_ATTRS(omp_get_wtime, GetterAttrs, AttributeSet(), ParamAttrs())

__OMP_RTL_ATTRS(omp_set_num_threads, SetterAttrs, AttributeSet(),
                ParamAttrs(SExt))
__OMP_RTL_ATTRS(omp_set_dynamic, SetterAttrs, AttributeSet(), ParamAttrs(SExt))
__OMP_RTL_ATTRS(omp_set_nested, SetterAttrs, AttributeSet(), ParamAttrs(SExt))
__OMP_RTL_ATTRS(omp_set_schedule, SetterAttrs, AttributeSet(),
                ParamAttrs(SExt, SExt))
__OMP_RTL_ATTRS(omp_set_max_active_levels, SetterAttrs, AttributeSet(),
                ParamAttrs(SExt))

__OMP_RTL_ATTRS(__kmpc_master, InaccessibleArgOnlyAttrs, SExt,
                ParamAttrs(ReadOnlyPtrAttrs, SExt))
__OMP_RTL_ATTRS(__kmpc_end_master, InaccessibleArgOnlyAttrs, AttributeSet(),
                ParamAttrs(ReadOnlyPtrAttrs, SExt))
__OMP_RTL_ATTRS(__kmpc_masked, InaccessibleArgOnlyAttrs, SExt,
                ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt))
__OMP_RTL_ATTRS(__kmpc_end_masked, InaccessibleArgOnlyAttrs, AttributeSet(),
                ParamAttrs(ReadOnlyPtrAttrs, SExt))
__OMP_RTL_ATTRS(__kmpc_critical, BarrierAttrs, AttributeSet(),
                ParamAttrs(ReadOnlyPtrAttrs, SExt, AttributeSet()))
__OMP_RTL_ATTRS(__kmpc_critical_with_hint, BarrierAttrs, AttributeSet(),
                ParamAttrs(ReadOnlyPtrAttrs, SExt, AttributeSet(), ZExt))
__OMP_RTL_ATTRS(__kmpc_end_critical, BarrierAttrs, AttributeSet(),
                ParamAttrs(ReadOnlyPtrAttrs, SExt, AttributeSet()))

__OMP_RTL_ATTRS(__kmpc_begin, DefaultAttrs, AttributeSet(),
                ParamAttrs(ReadOnlyPtrAttrs, SExt))
__OMP_RTL_ATTRS(__kmpc_end, DefaultAttrs, AttributeSet(),
                ParamAttrs(ReadOnlyPtrAttrs))

__OMP_RTL_ATTRS(__kmpc_reduce, BarrierAttrs, SExt,
                ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt, SizeTyExt,
                           ReadOnlyPtrAttrs, AttributeSet()))
__OMP_RTL_ATTRS(__kmpc_reduce_nowait, BarrierAttrs, SExt,
                ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt, SizeTyExt,
                           ReadOnlyPtrAttrs, AttributeSet()))
__OMP_RTL_ATTRS(__kmpc_end_reduce, BarrierAttrs, AttributeSet(),
                ParamAttrs(ReadOnlyPtrAttrs, SExt, AttributeSet()))
__OMP_RTL_ATTRS(__kmpc_end_reduce_nowait, BarrierAttrs, AttributeSet(),
                ParamAttrs(ReadOnlyPtrAttrs, SExt, AttributeSet()))

__OMP_RTL_ATTRS(__kmpc_ordered, BarrierAttrs, AttributeSet(),
                ParamAttrs(ReadOnlyPtrAttrs, SExt))
__OMP_RTL_ATTRS(__kmpc_end_ordered, BarrierAttrs, AttributeSet(),
                ParamAttrs(ReadOnlyPtrAttrs, SExt))

__OMP_RTL_ATTRS(__kmpc_for_static_init_4, GetterArgWriteAttrs, AttributeSet(),
                ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt, ArgPtrAttrs,
                           ArgPtrAttrs, ArgPtrAttrs, ArgPtrAttrs, SExt, SExt))
__OMP_RTL_ATTRS(__kmpc_for_static_init_4u, GetterArgWriteAttrs, AttributeSet(),
                ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt, ArgPtrAttrs,
                           ArgPtrAttrs, ArgPtrAttrs, ArgPtrAttrs, SExt, SExt))
__OMP_RTL_ATTRS(__kmpc_for_static_init_8, GetterArgWriteAttrs, AttributeSet(),
                ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt, ArgPtrAttrs,
                           ArgPtrAttrs, ArgPtrAttrs, ArgPtrAttrs,
                           AttributeSet(), AttributeSet()))
__OMP_RTL_ATTRS(__kmpc_for_static_init_8u, GetterArgWriteAttrs, AttributeSet(),
                ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt, ArgPtrAttrs,
                           ArgPtrAttrs, ArgPtrAttrs, ArgPtrAttrs,
                           AttributeSet(), AttributeSet()))
__OMP_RTL_ATTRS(__kmpc_for_static_fini, InaccessibleArgOnlyAttrs,
                AttributeSet(), ParamAttrs(ReadOnlyPtrAttrs, SExt))
__OMP_RTL_ATTRS(__kmpc_distribute_static_init_4, GetterArgWriteAttrs,
                AttributeSet(),
                ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt, ArgPtrAttrs,
                           ArgPtrAttrs, ArgPtrAttrs, ArgPtrAttrs, SExt, SExt))
__OMP_RTL_ATTRS(__kmpc_distribute_static_init_4u, GetterArgWriteAttrs,
                AttributeSet(),
                ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt, ArgPtrAttrs,
                           ArgPtrAttrs, ArgPtrAttrs, ArgPtrAttrs, SExt, SExt))
__OMP_RTL_ATTRS(__kmpc_distribute_static_init_8, GetterArgWriteAttrs,
                AttributeSet(),
                ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt, ArgPtrAttrs,
                           ArgPtrAttrs, ArgPtrAttrs, ArgPtrAttrs,
                           AttributeSet(), AttributeSet()))
__OMP_RTL_ATTRS(__kmpc_distribute_static_init_8u, GetterArgWriteAttrs,
                AttributeSet(),
                ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt, ArgPtrAttrs,
                           ArgPtrAttrs, ArgPtrAttrs, ArgPtrAttrs,
                           AttributeSet(), AttributeSet()))
__OMP_RTL_ATTRS(__kmpc_distribute_static_fini, InaccessibleArgOnlyAttrs,
                AttributeSet(), ParamAttrs(ReadOnlyPtrAttrs, SExt))
__OMP_RTL_ATTRS(__kmpc_dist_dispatch_init_4, GetterArgWriteAttrs,
                AttributeSet(),
                ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt, ArgPtrAttrs, SExt,
                           SExt, SExt, SExt))
__OMP_RTL_ATTRS(__kmpc_dist_dispatch_init_4u, GetterArgWriteAttrs,
                AttributeSet(),
                ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt, ArgPtrAttrs, ZExt,
                           ZExt, SExt, SExt))
__OMP_RTL_ATTRS(__kmpc_dist_dispatch_init_8, GetterArgWriteAttrs,
                AttributeSet(),
                ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt, ArgPtrAttrs))
__OMP_RTL_ATTRS(__kmpc_dist_dispatch_init_8u, GetterArgWriteAttrs,
                AttributeSet(),
                ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt, ArgPtrAttrs))
__OMP_RTL_ATTRS(__kmpc_dispatch_init_4, GetterArgWriteAttrs, AttributeSet(),
                ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt, SExt, SExt, SExt, SExt))
__OMP_RTL_ATTRS(__kmpc_dispatch_init_4u, GetterArgWriteAttrs, AttributeSet(),
                ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt, ZExt, ZExt, SExt, SExt))
__OMP_RTL_ATTRS(__kmpc_dispatch_init_8, GetterArgWriteAttrs, AttributeSet(),
                ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt))
__OMP_RTL_ATTRS(__kmpc_dispatch_init_8u, GetterArgWriteAttrs, AttributeSet(),
                ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt))
__OMP_RTL_ATTRS(__kmpc_dispatch_next_4, GetterArgWriteAttrs, SExt,
                ParamAttrs(ReadOnlyPtrAttrs, SExt, ArgPtrAttrs, ArgPtrAttrs,
                           ArgPtrAttrs, ArgPtrAttrs))
__OMP_RTL_ATTRS(__kmpc_dispatch_next_4u, GetterArgWriteAttrs, SExt,
                ParamAttrs(ReadOnlyPtrAttrs, SExt, ArgPtrAttrs, ArgPtrAttrs,
                           ArgPtrAttrs, ArgPtrAttrs))
__OMP_RTL_ATTRS(__kmpc_dispatch_next_8, GetterArgWriteAttrs, SExt,
                ParamAttrs(ReadOnlyPtrAttrs, SExt, ArgPtrAttrs, ArgPtrAttrs,
                           ArgPtrAttrs, ArgPtrAttrs))
__OMP_RTL_ATTRS(__kmpc_dispatch_next_8u, GetterArgWriteAttrs, SExt,
                ParamAttrs(ReadOnlyPtrAttrs, SExt, ArgPtrAttrs, ArgPtrAttrs,
                           ArgPtrAttrs, ArgPtrAttrs))
__OMP_RTL_ATTRS(__kmpc_dispatch_fini_4, InaccessibleArgOnlyAttrs,
                AttributeSet(), ParamAttrs(ReadOnlyPtrAttrs, SExt))
__OMP_RTL_ATTRS(__kmpc_dispatch_fini_4u, InaccessibleArgOnlyAttrs,
                AttributeSet(), ParamAttrs(ReadOnlyPtrAttrs, SExt))
__OMP_RTL_ATTRS(__kmpc_dispatch_fini_8, InaccessibleArgOnlyAttrs,
                AttributeSet(), ParamAttrs(ReadOnlyPtrAttrs, SExt))
__OMP_RTL_ATTRS(__kmpc_dispatch_fini_8u, InaccessibleArgOnlyAttrs,
                AttributeSet(), ParamAttrs(ReadOnlyPtrAttrs, SExt))
__OMP_RTL_ATTRS(__kmpc_team_static_init_4, GetterArgWriteAttrs, AttributeSet(),
                ParamAttrs(ReadOnlyPtrAttrs, SExt, ArgPtrAttrs, ArgPtrAttrs,
                           ArgPtrAttrs, ArgPtrAttrs, SExt, SExt))
__OMP_RTL_ATTRS(__kmpc_team_static_init_4u, GetterArgWriteAttrs, AttributeSet(),
                ParamAttrs(ReadOnlyPtrAttrs, SExt, ArgPtrAttrs, ArgPtrAttrs,
                           ArgPtrAttrs, ArgPtrAttrs, SExt, SExt))
__OMP_RTL_ATTRS(__kmpc_team_static_init_8, GetterArgWriteAttrs, AttributeSet(),
                ParamAttrs(ReadOnlyPtrAttrs, SExt, ArgPtrAttrs, ArgPtrAttrs,
                           ArgPtrAttrs, ArgPtrAttrs))
__OMP_RTL_ATTRS(__kmpc_team_static_init_8u, GetterArgWriteAttrs, AttributeSet(),
                ParamAttrs(ReadOnlyPtrAttrs, SExt, ArgPtrAttrs, ArgPtrAttrs,
                           ArgPtrAttrs, ArgPtrAttrs))
__OMP_RTL_ATTRS(__kmpc_dist_for_static_init_4, GetterArgWriteAttrs,
                AttributeSet(),
                ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt, ArgPtrAttrs,
                           ArgPtrAttrs, ArgPtrAttrs, ArgPtrAttrs,
                           ArgPtrAttrs, SExt, SExt))
__OMP_RTL_ATTRS(__kmpc_dist_for_static_init_4u, GetterArgWriteAttrs,
                AttributeSet(),
                ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt, ArgPtrAttrs,
                           ArgPtrAttrs, ArgPtrAttrs, ArgPtrAttrs,
                           ArgPtrAttrs, SExt, SExt))
__OMP_RTL_ATTRS(__kmpc_dist_for_static_init_8, GetterArgWriteAttrs,
                AttributeSet(),
                ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt, ArgPtrAttrs,
                           ArgPtrAttrs, ArgPtrAttrs, ArgPtrAttrs, ArgPtrAttrs))
__OMP_RTL_ATTRS(__kmpc_dist_for_static_init_8u, GetterArgWriteAttrs,
                AttributeSet(),
                ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt, ArgPtrAttrs,
                           ArgPtrAttrs, ArgPtrAttrs, ArgPtrAttrs, ArgPtrAttrs))

__OMP_RTL_ATTRS(__kmpc_single, BarrierAttrs, SExt,
                ParamAttrs(ReadOnlyPtrAttrs, SExt))
__OMP_RTL_ATTRS(__kmpc_end_single, BarrierAttrs, AttributeSet(),
                ParamAttrs(ReadOnlyPtrAttrs, SExt))

__OMP_RTL_ATTRS(__kmpc_omp_task_alloc, DefaultAttrs, ReturnPtrAttrs,
                ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt, SizeTyExt, SizeTyExt,
                           ReadOnlyPtrAttrs))
__OMP_RTL_ATTRS(__kmpc_omp_task, DefaultAttrs, SExt,
                ParamAttrs(ReadOnlyPtrAttrs, SExt, AttributeSet()))
__OMP_RTL_ATTRS(__kmpc_end_taskgroup, BarrierAttrs, AttributeSet(),
                ParamAttrs(ReadOnlyPtrAttrs, SExt))
__OMP_RTL_ATTRS(__kmpc_taskgroup, BarrierAttrs, AttributeSet(),
                ParamAttrs(ReadOnlyPtrAttrs, SExt))
__OMP_RTL_ATTRS(__kmpc_omp_task_begin_if0, DefaultAttrs, AttributeSet(),
                ParamAttrs(ReadOnlyPtrAttrs, SExt))
__OMP_RTL_ATTRS(__kmpc_omp_task_complete_if0, DefaultAttrs, AttributeSet(),
                ParamAttrs(ReadOnlyPtrAttrs, SExt))
__OMP_RTL_ATTRS(__kmpc_omp_task_with_deps, DefaultAttrs, SExt,
                ParamAttrs(ReadOnlyPtrAttrs, SExt, AttributeSet(), SExt,
                           ReadOnlyPtrAttrs, SExt, ReadOnlyPtrAttrs))
__OMP_RTL_ATTRS(__kmpc_taskloop, DefaultAttrs, AttributeSet(),
                ParamAttrs(ReadOnlyPtrAttrs, SExt, AttributeSet(), SExt,
                           ArgPtrAttrs, ArgPtrAttrs, AttributeSet(), SExt, SExt))
__OMP_RTL_ATTRS(__kmpc_omp_target_task_alloc, DefaultAttrs, ReturnPtrAttrs,
                ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt, SizeTyExt, SizeTyExt,
                           ReadOnlyPtrAttrs, AttributeSet()))
__OMP_RTL_ATTRS(__kmpc_taskred_modifier_init, DefaultAttrs, ReturnPtrAttrs,
                ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt, SExt))
__OMP_RTL_ATTRS(__kmpc_taskred_init, DefaultAttrs, AttributeSet(),
                ParamAttrs(SExt, SExt))
__OMP_RTL_ATTRS(__kmpc_task_reduction_modifier_fini, BarrierAttrs,
                AttributeSet(), ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt))
__OMP_RTL_ATTRS(__kmpc_task_reduction_get_th_data, DefaultAttrs, ReturnPtrAttrs,
                ParamAttrs(SExt))
__OMP_RTL_ATTRS(__kmpc_task_reduction_init, DefaultAttrs, ReturnPtrAttrs,
                ParamAttrs(SExt, SExt))
__OMP_RTL_ATTRS(__kmpc_task_reduction_modifier_init, DefaultAttrs,
                ReturnPtrAttrs, ParamAttrs(AttributeSet(), SExt, SExt, SExt))
__OMP_RTL_ATTRS(__kmpc_proxy_task_completed_ooo, DefaultAttrs, AttributeSet(),
                ParamAttrs())

__OMP_RTL_ATTRS(__kmpc_omp_wait_deps, BarrierAttrs, AttributeSet(),
                ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt, ReadOnlyPtrAttrs, SExt))
__OMP_RTL_ATTRS(__kmpc_omp_taskwait_deps_51, BarrierAttrs, AttributeSet(),
                ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt, ReadOnlyPtrAttrs))
__OMP_RTL_ATTRS(__kmpc_cancellationpoint, DefaultAttrs, SExt,
                ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt))

__OMP_RTL_ATTRS(__kmpc_fork_teams, ForkAttrs, AttributeSet(),
                ParamAttrs(ReadOnlyPtrAttrs, SExt, ReadOnlyPtrAttrs))
__OMP_RTL_ATTRS(__kmpc_push_num_teams, InaccessibleArgOnlyAttrs, AttributeSet(),
                ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt, SExt))

__OMP_RTL_ATTRS(__kmpc_copyprivate, DefaultAttrs, AttributeSet(),
                ParamAttrs(ReadOnlyPtrAttrs, SExt, SizeTyExt,
                           ReadOnlyPtrAttrs, AttributeSet(), SExt))
__OMP_RTL_ATTRS(__kmpc_threadprivate_cached, DefaultAttrs, ReturnPtrAttrs,
                ParamAttrs(ReadOnlyPtrAttrs, SExt, AttributeSet(), SizeTyExt))
__OMP_RTL_ATTRS(__kmpc_threadprivate_register, DefaultAttrs, AttributeSet(),
                ParamAttrs(ReadOnlyPtrAttrs, AttributeSet(), ReadOnlyPtrAttrs,
                           ReadOnlyPtrAttrs, ReadOnlyPtrAttrs))

__OMP_RTL_ATTRS(__kmpc_doacross_init, BarrierAttrs, AttributeSet(),
                ParamAttrs(ReadOnlyPtrAttrs, SExt, SExt))
__OMP_RTL_ATTRS(__kmpc_doacross_post, BarrierAttrs, AttributeSet(),
                ParamAttrs(ReadOnlyPtrAttrs, SExt, ReadOnlyPtrAttrs))
__OMP_RTL_ATTRS(__kmpc_doacross_wait, BarrierAttrs, AttributeSet(),
                ParamAttrs(ReadOnlyPtrAttrs, SExt, ReadOnlyPtrAttrs))
__OMP_RTL_ATTRS(__kmpc_doacross_fini, BarrierAttrs, AttributeSet(),
                ParamAttrs(ReadOnlyPtrAttrs, SExt))

__OMP_RTL_ATTRS(__kmpc_alloc_shared,
                AttributeSet(EnumAttr(NoUnwind), EnumAttr(NoSync),
                             AllocSizeAttr(0, std::nullopt)),
                ReturnPtrAttrs, ParamAttrs(SizeTyExt))
__OMP_RTL_ATTRS(__kmpc_free_shared, DeviceAllocAttrs, AttributeSet(),
                ParamAttrs(AttributeSet(EnumAttr(NoCapture),
                                        EnumAttr(AllocatedPointer)),
                           SizeTyExt))
__OMP_RTL_ATTRS(__kmpc_begin_sharing_variables, AttributeSet(), AttributeSet(),
                ParamAttrs(AttributeSet(), SizeTyExt))

__OMP_RTL_ATTRS(__kmpc_alloc, DefaultAttrs, ReturnPtrAttrs,
                ParamAttrs(SExt, SizeTyExt))
__OMP_RTL_ATTRS(__kmpc_aligned_alloc, DefaultAttrs, ReturnPtrAttrs,
                ParamAttrs(SExt, SizeTyExt, SizeTyExt))
__OMP_RTL_ATTRS(__kmpc_free, AllocAttrs, AttributeSet(),
                ParamAttrs(SExt))

__OMP_RTL_ATTRS(__tgt_interop_init, AttributeSet(), AttributeSet(),
                ParamAttrs(AttributeSet(), SExt, AttributeSet(), SExt,
                           SExt, AttributeSet(), AttributeSet(), SExt))
__OMP_RTL_ATTRS(__tgt_interop_destroy, AttributeSet(), AttributeSet(),
                ParamAttrs(AttributeSet(), SExt, AttributeSet(), SExt, SExt,
                           AttributeSet(), SExt))
__OMP_RTL_ATTRS(__tgt_interop_use, AttributeSet(), AttributeSet(),
                ParamAttrs(AttributeSet(), SExt, AttributeSet(), SExt, SExt,
                           AttributeSet(), SExt))

__OMP_RTL_ATTRS(__kmpc_init_allocator, DefaultAttrs, ReturnPtrAttrs,
                ParamAttrs(SExt, AttributeSet(), SExt))
__OMP_RTL_ATTRS(__kmpc_destroy_allocator, AllocAttrs, AttributeSet(),
                ParamAttrs(SExt))

__OMP_RTL_ATTRS(__kmpc_push_target_tripcount_mapper, SetterAttrs,
                AttributeSet(), ParamAttrs())
__OMP_RTL_ATTRS(__tgt_target_mapper, ForkAttrs, SExt,
                ParamAttrs(AttributeSet(),AttributeSet(),AttributeSet(), SExt))
__OMP_RTL_ATTRS(__tgt_target_nowait_mapper, ForkAttrs, SExt,
                ParamAttrs(AttributeSet(), AttributeSet(), AttributeSet(), SExt,
                           AttributeSet(), AttributeSet(), AttributeSet(),
                           AttributeSet(), AttributeSet(), AttributeSet(),
                           SExt, AttributeSet(), SExt))
__OMP_RTL_ATTRS(__tgt_target_teams_mapper, ForkAttrs, SExt,
                ParamAttrs(AttributeSet(), AttributeSet(), AttributeSet(), SExt,
                           AttributeSet(), AttributeSet(), AttributeSet(),
                           AttributeSet(), AttributeSet(), AttributeSet(), SExt,
                           SExt))
__OMP_RTL_ATTRS(__tgt_target_teams_nowait_mapper, ForkAttrs, SExt,
                ParamAttrs(AttributeSet(), AttributeSet(), AttributeSet(), SExt,
                           AttributeSet(), AttributeSet(), AttributeSet(),
                           AttributeSet(), AttributeSet(), AttributeSet(),
                           SExt, SExt, SExt, AttributeSet(), SExt))
__OMP_RTL_ATTRS(__tgt_target_kernel, ForkAttrs, SExt,
                ParamAttrs(AttributeSet(), AttributeSet(), SExt, SExt))
__OMP_RTL_ATTRS(__tgt_target_kernel_nowait, ForkAttrs, SExt,
                ParamAttrs(AttributeSet(), AttributeSet(), SExt, SExt,
                           AttributeSet(), AttributeSet(), SExt, AttributeSet(),
                           SExt))
__OMP_RTL_ATTRS(__tgt_register_requires, ForkAttrs, AttributeSet(),
                ParamAttrs())
__OMP_RTL_ATTRS(__tgt_target_data_begin_mapper, ForkAttrs, AttributeSet(),
                ParamAttrs(AttributeSet(), AttributeSet(), SExt))
__OMP_RTL_ATTRS(__tgt_target_data_begin_nowait_mapper, ForkAttrs, AttributeSet(),
                ParamAttrs(AttributeSet(), AttributeSet(), SExt, AttributeSet(),
                           AttributeSet(), AttributeSet(), AttributeSet(),
                           AttributeSet(), AttributeSet()))
__OMP_RTL_ATTRS(__tgt_target_data_begin_mapper_issue, AttributeSet(),
                AttributeSet(),
                ParamAttrs(AttributeSet(), AttributeSet(), SExt))
__OMP_RTL_ATTRS(__tgt_target_data_end_mapper, ForkAttrs, AttributeSet(),
                ParamAttrs(AttributeSet(), AttributeSet(), SExt))
__OMP_RTL_ATTRS(__tgt_target_data_end_nowait_mapper, ForkAttrs, AttributeSet(),
                ParamAttrs(AttributeSet(), AttributeSet(), SExt, AttributeSet(),
                           AttributeSet(), AttributeSet(), AttributeSet(),
                           AttributeSet(), AttributeSet()))
__OMP_RTL_ATTRS(__tgt_target_data_update_mapper, ForkAttrs, AttributeSet(),
                ParamAttrs(AttributeSet(), AttributeSet(), SExt))
__OMP_RTL_ATTRS(__tgt_target_data_update_nowait_mapper, ForkAttrs, AttributeSet(),
                ParamAttrs(AttributeSet(), AttributeSet(), SExt, AttributeSet(),
                           AttributeSet(), AttributeSet(), AttributeSet(),
                           AttributeSet(), AttributeSet()))
__OMP_RTL_ATTRS(__tgt_mapper_num_components, ForkAttrs, AttributeSet(),
                ParamAttrs())
__OMP_RTL_ATTRS(__tgt_push_mapper_component, ForkAttrs, AttributeSet(),
                ParamAttrs())
__OMP_RTL_ATTRS(__kmpc_task_allow_completion_event, DefaultAttrs,
                ReturnPtrAttrs, ParamAttrs(ReadOnlyPtrAttrs, SExt))

__OMP_RTL_ATTRS(__kmpc_target_init, AttributeSet(), SExt,
                ParamAttrs(AttributeSet(), SExt, SExt))
__OMP_RTL_ATTRS(__kmpc_target_deinit, AttributeSet(), AttributeSet(),
                ParamAttrs(AttributeSet(), SExt))
__OMP_RTL_ATTRS(__kmpc_parallel_51, AlwaysInlineAttrs, AttributeSet(),
                ParamAttrs(AttributeSet(), SExt, SExt, SExt, SExt,
                           AttributeSet(), AttributeSet(), AttributeSet(),
                           SizeTyExt))
__OMP_RTL_ATTRS(__kmpc_serialized_parallel, InaccessibleArgOnlyAttrs,
                AttributeSet(), ParamAttrs(ReadOnlyPtrAttrs, SExt))
__OMP_RTL_ATTRS(__kmpc_end_serialized_parallel, InaccessibleArgOnlyAttrs,
                AttributeSet(), ParamAttrs(ReadOnlyPtrAttrs, SExt))
__OMP_RTL_ATTRS(__kmpc_shuffle_int32, AttributeSet(), SExt,
                ParamAttrs(SExt, SExt, SExt))
__OMP_RTL_ATTRS(__kmpc_nvptx_parallel_reduce_nowait_v2, AttributeSet(), SExt,
                ParamAttrs(AttributeSet(), SExt, SExt, SizeTyExt))
__OMP_RTL_ATTRS(__kmpc_nvptx_end_reduce_nowait, AttributeSet(), AttributeSet(),
                ParamAttrs(SExt))
__OMP_RTL_ATTRS(__kmpc_nvptx_teams_reduce_nowait_v2, AttributeSet(), SExt,
                ParamAttrs(AttributeSet(), SExt, AttributeSet(), ZExt))

__OMP_RTL_ATTRS(__kmpc_shuffle_int64, AttributeSet(), AttributeSet(),
                ParamAttrs(AttributeSet(), SExt, SExt))

__OMP_RTL_ATTRS(__kmpc_is_spmd_exec_mode, AttributeSet(), SExt, ParamAttrs())

#undef __OMP_RTL_ATTRS
#undef OMP_RTL_ATTRS
#undef AttributeSet
#undef EnumAttr
#undef EnumAttrInt
#undef ParamAttrs
#undef AllocSizeAttr

///}

/// KMP ident_t bit flags
///
/// In accordance with the values in `openmp/runtime/src/kmp.h`.
///
///{

#ifndef OMP_IDENT_FLAG
#define OMP_IDENT_FLAG(Enum, Str, Value)
#endif

#define __OMP_IDENT_FLAG(Name, Value)                                          \
  OMP_IDENT_FLAG(OMP_IDENT_FLAG_##Name, #Name, Value)

__OMP_IDENT_FLAG(KMPC, 0x02)
__OMP_IDENT_FLAG(ATOMIC_REDUCE, 0x10)
__OMP_IDENT_FLAG(BARRIER_EXPL, 0x20)
__OMP_IDENT_FLAG(BARRIER_IMPL, 0x0040)
__OMP_IDENT_FLAG(BARRIER_IMPL_MASK, 0x01C0)
__OMP_IDENT_FLAG(BARRIER_IMPL_FOR, 0x0040)
__OMP_IDENT_FLAG(BARRIER_IMPL_SECTIONS, 0x00C0)
__OMP_IDENT_FLAG(BARRIER_IMPL_SINGLE, 0x0140)
__OMP_IDENT_FLAG(BARRIER_IMPL_WORKSHARE, 0x01C0)

#undef __OMP_IDENT_FLAG
#undef OMP_IDENT_FLAG

///}

/// KMP cancel kind
///
///{

#ifndef OMP_CANCEL_KIND
#define OMP_CANCEL_KIND(Enum, Str, DirectiveEnum, Value)
#endif

#define __OMP_CANCEL_KIND(Name, Value)                                         \
  OMP_CANCEL_KIND(OMP_CANCEL_KIND_##Name, #Name, OMPD_##Name, Value)

__OMP_CANCEL_KIND(parallel, 1)
__OMP_CANCEL_KIND(for, 2)
__OMP_CANCEL_KIND(sections, 3)
__OMP_CANCEL_KIND(taskgroup, 4)

#undef __OMP_CANCEL_KIND
#undef OMP_CANCEL_KIND

///}

/// Default kinds
///
///{

#ifndef OMP_DEFAULT_KIND
#define OMP_DEFAULT_KIND(Enum, Str)
#endif

#define __OMP_DEFAULT_KIND(Name) OMP_DEFAULT_KIND(OMP_DEFAULT_##Name, #Name)

__OMP_DEFAULT_KIND(none)
__OMP_DEFAULT_KIND(shared)
__OMP_DEFAULT_KIND(private)
__OMP_DEFAULT_KIND(firstprivate)
__OMP_DEFAULT_KIND(unknown)

#undef __OMP_DEFAULT_KIND
#undef OMP_DEFAULT_KIND

///}

/// Proc bind kinds
///
///{

#ifndef OMP_PROC_BIND_KIND
#define OMP_PROC_BIND_KIND(Enum, Str, Value)
#endif

#define __OMP_PROC_BIND_KIND(Name, Value)                                      \
  OMP_PROC_BIND_KIND(OMP_PROC_BIND_##Name, #Name, Value)

__OMP_PROC_BIND_KIND(master, 2)
__OMP_PROC_BIND_KIND(close, 3)
__OMP_PROC_BIND_KIND(spread, 4)
__OMP_PROC_BIND_KIND(primary, 5)
__OMP_PROC_BIND_KIND(default, 6)
__OMP_PROC_BIND_KIND(unknown, 7)

#undef __OMP_PROC_BIND_KIND
#undef OMP_PROC_BIND_KIND

///}

/// OpenMP context related definitions:
///  - trait set selector
///  - trait selector
///  - trait property
///
///{

#ifndef OMP_TRAIT_SET
#define OMP_TRAIT_SET(Enum, Str)
#endif
#ifndef OMP_TRAIT_SELECTOR
#define OMP_TRAIT_SELECTOR(Enum, TraitSetEnum, Str, RequiresProperty)
#endif
#ifndef OMP_TRAIT_PROPERTY
#define OMP_TRAIT_PROPERTY(Enum, TraitSetEnum, TraitSelectorEnum, Str)
#endif
#ifndef OMP_LAST_TRAIT_PROPERTY
#define OMP_LAST_TRAIT_PROPERTY(Enum)
#endif

#define __OMP_TRAIT_SET(Name) OMP_TRAIT_SET(Name, #Name)
#define __OMP_TRAIT_SELECTOR(TraitSet, Name, RequiresProperty)                 \
  OMP_TRAIT_SELECTOR(TraitSet##_##Name, TraitSet, #Name, RequiresProperty)
#define __OMP_TRAIT_SELECTOR_AND_PROPERTY(TraitSet, Name)                      \
  OMP_TRAIT_SELECTOR(TraitSet##_##Name, TraitSet, #Name, false)                \
  OMP_TRAIT_PROPERTY(TraitSet##_##Name##_##Name, TraitSet, TraitSet##_##Name,  \
                     #Name)
#define __OMP_TRAIT_PROPERTY(TraitSet, TraitSelector, Name)                    \
  OMP_TRAIT_PROPERTY(TraitSet##_##TraitSelector##_##Name, TraitSet,            \
                     TraitSet##_##TraitSelector, #Name)

// "invalid" must go first.
OMP_TRAIT_SET(invalid, "invalid")
OMP_TRAIT_SELECTOR(invalid, invalid, "invalid", false)
OMP_TRAIT_PROPERTY(invalid, invalid, invalid, "invalid")

__OMP_TRAIT_SET(construct)
__OMP_TRAIT_SELECTOR_AND_PROPERTY(construct, target)
__OMP_TRAIT_SELECTOR_AND_PROPERTY(construct, teams)
__OMP_TRAIT_SELECTOR_AND_PROPERTY(construct, parallel)
__OMP_TRAIT_SELECTOR_AND_PROPERTY(construct, for)
__OMP_TRAIT_SELECTOR_AND_PROPERTY(construct, simd)

__OMP_TRAIT_SET(device)

__OMP_TRAIT_SELECTOR(device, kind, true)

__OMP_TRAIT_PROPERTY(device, kind, host)
__OMP_TRAIT_PROPERTY(device, kind, nohost)
__OMP_TRAIT_PROPERTY(device, kind, cpu)
__OMP_TRAIT_PROPERTY(device, kind, gpu)
__OMP_TRAIT_PROPERTY(device, kind, fpga)
__OMP_TRAIT_PROPERTY(device, kind, any)

__OMP_TRAIT_SELECTOR(device, arch, true)

__OMP_TRAIT_PROPERTY(device, arch, arm)
__OMP_TRAIT_PROPERTY(device, arch, armeb)
__OMP_TRAIT_PROPERTY(device, arch, aarch64)
__OMP_TRAIT_PROPERTY(device, arch, aarch64_be)
__OMP_TRAIT_PROPERTY(device, arch, aarch64_32)
__OMP_TRAIT_PROPERTY(device, arch, ppc)
__OMP_TRAIT_PROPERTY(device, arch, ppcle)
__OMP_TRAIT_PROPERTY(device, arch, ppc64)
__OMP_TRAIT_PROPERTY(device, arch, ppc64le)
__OMP_TRAIT_PROPERTY(device, arch, x86)
__OMP_TRAIT_PROPERTY(device, arch, x86_64)
__OMP_TRAIT_PROPERTY(device, arch, amdgcn)
__OMP_TRAIT_PROPERTY(device, arch, nvptx)
__OMP_TRAIT_PROPERTY(device, arch, nvptx64)

__OMP_TRAIT_SET(implementation)

__OMP_TRAIT_SELECTOR(implementation, vendor, true)

__OMP_TRAIT_PROPERTY(implementation, vendor, amd)
__OMP_TRAIT_PROPERTY(implementation, vendor, arm)
__OMP_TRAIT_PROPERTY(implementation, vendor, bsc)
__OMP_TRAIT_PROPERTY(implementation, vendor, cray)
__OMP_TRAIT_PROPERTY(implementation, vendor, fujitsu)
__OMP_TRAIT_PROPERTY(implementation, vendor, gnu)
__OMP_TRAIT_PROPERTY(implementation, vendor, ibm)
__OMP_TRAIT_PROPERTY(implementation, vendor, intel)
__OMP_TRAIT_PROPERTY(implementation, vendor, llvm)
__OMP_TRAIT_PROPERTY(implementation, vendor, nec)
__OMP_TRAIT_PROPERTY(implementation, vendor, nvidia)
__OMP_TRAIT_PROPERTY(implementation, vendor, pgi)
__OMP_TRAIT_PROPERTY(implementation, vendor, ti)
__OMP_TRAIT_PROPERTY(implementation, vendor, unknown)

__OMP_TRAIT_SELECTOR(implementation, extension, true)
__OMP_TRAIT_PROPERTY(implementation, extension, match_all)
__OMP_TRAIT_PROPERTY(implementation, extension, match_any)
__OMP_TRAIT_PROPERTY(implementation, extension, match_none)
__OMP_TRAIT_PROPERTY(implementation, extension, disable_implicit_base)
__OMP_TRAIT_PROPERTY(implementation, extension, allow_templates)
__OMP_TRAIT_PROPERTY(implementation, extension, bind_to_declaration)

__OMP_TRAIT_SET(user)

__OMP_TRAIT_SELECTOR(user, condition, true)

__OMP_TRAIT_PROPERTY(user, condition, true)
__OMP_TRAIT_PROPERTY(user, condition, false)
__OMP_TRAIT_PROPERTY(user, condition, unknown)

__OMP_TRAIT_SELECTOR_AND_PROPERTY(construct, dispatch)

// Note that we put isa last so that the other conditions are checked first.
// This allows us to issue warnings wrt. isa only if we match otherwise.
__OMP_TRAIT_SELECTOR(device, isa, true)

// We use "__ANY" as a placeholder in the isa property to denote the
// conceptual "any", not the literal `any` used in kind. The string we
// we use is not important except that it will show up in diagnostics.
OMP_TRAIT_PROPERTY(device_isa___ANY, device, device_isa,
                   "<any, entirely target dependent>")


#undef OMP_TRAIT_SET
#undef __OMP_TRAIT_SET
///}

/// Traits for the requires directive
///
/// These will (potentially) become trait selectors for the OpenMP context if
/// the OMP_REQUIRES_TRAIT macro is not defined.
///
///{

#ifdef OMP_REQUIRES_TRAIT
#define __OMP_REQUIRES_TRAIT(Name)                                             \
  OMP_REQUIRES_TRAIT(OMP_REQUIRES_TRAIT_##Name, #Name)
#else
#define __OMP_REQUIRES_TRAIT(Name)                                             \
  __OMP_TRAIT_SELECTOR_AND_PROPERTY(implementation, Name)
#endif

__OMP_REQUIRES_TRAIT(unified_address)
__OMP_REQUIRES_TRAIT(unified_shared_memory)
__OMP_REQUIRES_TRAIT(reverse_offload)
__OMP_REQUIRES_TRAIT(dynamic_allocators)
__OMP_REQUIRES_TRAIT(atomic_default_mem_order)

OMP_LAST_TRAIT_PROPERTY(
    implementation_atomic_default_mem_order_atomic_default_mem_order)

#undef __OMP_TRAIT_SELECTOR_AND_PROPERTY
#undef OMP_TRAIT_SELECTOR
#undef __OMP_TRAIT_SELECTOR
#undef OMP_TRAIT_PROPERTY
#undef OMP_LAST_TRAIT_PROPERTY
#undef __OMP_TRAIT_PROPERTY
#undef __OMP_REQUIRES_TRAIT
#undef OMP_REQUIRES_TRAIT
///}


/// Assumption clauses
///
///{

#ifdef OMP_ASSUME_CLAUSE
#define __OMP_ASSUME_CLAUSE(Identifier, StartsWith, HasDirectiveList, HasExpression) \
OMP_ASSUME_CLAUSE(Identifier, StartsWith, HasDirectiveList, HasExpression)
#else
#define __OMP_ASSUME_CLAUSE(...)
#endif

__OMP_ASSUME_CLAUSE(llvm::StringLiteral("ext_"), true, false, false)
__OMP_ASSUME_CLAUSE(llvm::StringLiteral("absent"), false, true, false)
__OMP_ASSUME_CLAUSE(llvm::StringLiteral("contains"), false, true, false)
__OMP_ASSUME_CLAUSE(llvm::StringLiteral("holds"), false, false, true)
__OMP_ASSUME_CLAUSE(llvm::StringLiteral("no_openmp"), false, false, false)
__OMP_ASSUME_CLAUSE(llvm::StringLiteral("no_openmp_routines"), false, false, false)
__OMP_ASSUME_CLAUSE(llvm::StringLiteral("no_parallelism"), false, false, false)

#undef __OMP_ASSUME_CLAUSE
#undef OMP_ASSUME_CLAUSE
///}
PKiwFZvx�YllFrontend/OpenMP/OMP.incnu�[���#ifdef GEN_FLANG_DIRECTIVE_CLAUSE_SETS
#undef GEN_FLANG_DIRECTIVE_CLAUSE_SETS

namespace llvm {
namespace omp {

  // Sets for allocate

  static OmpClauseSet allowedClauses_OMPD_allocate {
  };

  static OmpClauseSet allowedOnceClauses_OMPD_allocate {
    llvm::omp::Clause::OMPC_allocator,
    llvm::omp::Clause::OMPC_align,
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_allocate {
  };

  static OmpClauseSet requiredClauses_OMPD_allocate {
  };

  // Sets for assumes

  static OmpClauseSet allowedClauses_OMPD_assumes {
  };

  static OmpClauseSet allowedOnceClauses_OMPD_assumes {
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_assumes {
  };

  static OmpClauseSet requiredClauses_OMPD_assumes {
  };

  // Sets for atomic

  static OmpClauseSet allowedClauses_OMPD_atomic {
    llvm::omp::Clause::OMPC_read,
    llvm::omp::Clause::OMPC_write,
    llvm::omp::Clause::OMPC_update,
    llvm::omp::Clause::OMPC_capture,
    llvm::omp::Clause::OMPC_compare,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_atomic {
    llvm::omp::Clause::OMPC_seq_cst,
    llvm::omp::Clause::OMPC_acq_rel,
    llvm::omp::Clause::OMPC_acquire,
    llvm::omp::Clause::OMPC_release,
    llvm::omp::Clause::OMPC_relaxed,
    llvm::omp::Clause::OMPC_hint,
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_atomic {
  };

  static OmpClauseSet requiredClauses_OMPD_atomic {
  };

  // Sets for barrier

  static OmpClauseSet allowedClauses_OMPD_barrier {
  };

  static OmpClauseSet allowedOnceClauses_OMPD_barrier {
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_barrier {
  };

  static OmpClauseSet requiredClauses_OMPD_barrier {
  };

  // Sets for begin assumes

  static OmpClauseSet allowedClauses_OMPD_begin_assumes {
  };

  static OmpClauseSet allowedOnceClauses_OMPD_begin_assumes {
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_begin_assumes {
  };

  static OmpClauseSet requiredClauses_OMPD_begin_assumes {
  };

  // Sets for begin declare target

  static OmpClauseSet allowedClauses_OMPD_begin_declare_target {
    llvm::omp::Clause::OMPC_to,
    llvm::omp::Clause::OMPC_link,
    llvm::omp::Clause::OMPC_device_type,
    llvm::omp::Clause::OMPC_indirect,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_begin_declare_target {
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_begin_declare_target {
  };

  static OmpClauseSet requiredClauses_OMPD_begin_declare_target {
  };

  // Sets for begin declare variant

  static OmpClauseSet allowedClauses_OMPD_begin_declare_variant {
  };

  static OmpClauseSet allowedOnceClauses_OMPD_begin_declare_variant {
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_begin_declare_variant {
  };

  static OmpClauseSet requiredClauses_OMPD_begin_declare_variant {
  };

  // Sets for cancel

  static OmpClauseSet allowedClauses_OMPD_cancel {
    llvm::omp::Clause::OMPC_if,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_cancel {
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_cancel {
  };

  static OmpClauseSet requiredClauses_OMPD_cancel {
  };

  // Sets for cancellation point

  static OmpClauseSet allowedClauses_OMPD_cancellation_point {
  };

  static OmpClauseSet allowedOnceClauses_OMPD_cancellation_point {
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_cancellation_point {
  };

  static OmpClauseSet requiredClauses_OMPD_cancellation_point {
  };

  // Sets for critical

  static OmpClauseSet allowedClauses_OMPD_critical {
    llvm::omp::Clause::OMPC_hint,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_critical {
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_critical {
  };

  static OmpClauseSet requiredClauses_OMPD_critical {
  };

  // Sets for declare mapper

  static OmpClauseSet allowedClauses_OMPD_declare_mapper {
    llvm::omp::Clause::OMPC_map,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_declare_mapper {
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_declare_mapper {
  };

  static OmpClauseSet requiredClauses_OMPD_declare_mapper {
  };

  // Sets for declare reduction

  static OmpClauseSet allowedClauses_OMPD_declare_reduction {
  };

  static OmpClauseSet allowedOnceClauses_OMPD_declare_reduction {
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_declare_reduction {
  };

  static OmpClauseSet requiredClauses_OMPD_declare_reduction {
  };

  // Sets for declare simd

  static OmpClauseSet allowedClauses_OMPD_declare_simd {
    llvm::omp::Clause::OMPC_linear,
    llvm::omp::Clause::OMPC_aligned,
    llvm::omp::Clause::OMPC_uniform,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_declare_simd {
    llvm::omp::Clause::OMPC_simdlen,
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_declare_simd {
    llvm::omp::Clause::OMPC_inbranch,
    llvm::omp::Clause::OMPC_notinbranch,
  };

  static OmpClauseSet requiredClauses_OMPD_declare_simd {
  };

  // Sets for declare target

  static OmpClauseSet allowedClauses_OMPD_declare_target {
    llvm::omp::Clause::OMPC_to,
    llvm::omp::Clause::OMPC_link,
    llvm::omp::Clause::OMPC_indirect,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_declare_target {
    llvm::omp::Clause::OMPC_device_type,
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_declare_target {
  };

  static OmpClauseSet requiredClauses_OMPD_declare_target {
  };

  // Sets for declare variant

  static OmpClauseSet allowedClauses_OMPD_declare_variant {
    llvm::omp::Clause::OMPC_match,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_declare_variant {
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_declare_variant {
    llvm::omp::Clause::OMPC_adjust_args,
    llvm::omp::Clause::OMPC_append_args,
  };

  static OmpClauseSet requiredClauses_OMPD_declare_variant {
  };

  // Sets for depobj

  static OmpClauseSet allowedClauses_OMPD_depobj {
    llvm::omp::Clause::OMPC_depend,
    llvm::omp::Clause::OMPC_destroy,
    llvm::omp::Clause::OMPC_update,
    llvm::omp::Clause::OMPC_depobj,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_depobj {
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_depobj {
  };

  static OmpClauseSet requiredClauses_OMPD_depobj {
  };

  // Sets for distribute

  static OmpClauseSet allowedClauses_OMPD_distribute {
    llvm::omp::Clause::OMPC_private,
    llvm::omp::Clause::OMPC_firstprivate,
    llvm::omp::Clause::OMPC_lastprivate,
    llvm::omp::Clause::OMPC_allocate,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_distribute {
    llvm::omp::Clause::OMPC_collapse,
    llvm::omp::Clause::OMPC_dist_schedule,
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_distribute {
  };

  static OmpClauseSet requiredClauses_OMPD_distribute {
  };

  // Sets for distribute parallel do

  static OmpClauseSet allowedClauses_OMPD_distribute_parallel_do {
    llvm::omp::Clause::OMPC_private,
    llvm::omp::Clause::OMPC_firstprivate,
    llvm::omp::Clause::OMPC_lastprivate,
    llvm::omp::Clause::OMPC_allocate,
    llvm::omp::Clause::OMPC_default,
    llvm::omp::Clause::OMPC_shared,
    llvm::omp::Clause::OMPC_reduction,
    llvm::omp::Clause::OMPC_copyin,
    llvm::omp::Clause::OMPC_linear,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_distribute_parallel_do {
    llvm::omp::Clause::OMPC_collapse,
    llvm::omp::Clause::OMPC_dist_schedule,
    llvm::omp::Clause::OMPC_if,
    llvm::omp::Clause::OMPC_num_threads,
    llvm::omp::Clause::OMPC_proc_bind,
    llvm::omp::Clause::OMPC_schedule,
    llvm::omp::Clause::OMPC_ordered,
    llvm::omp::Clause::OMPC_order,
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_distribute_parallel_do {
  };

  static OmpClauseSet requiredClauses_OMPD_distribute_parallel_do {
  };

  // Sets for distribute parallel do simd

  static OmpClauseSet allowedClauses_OMPD_distribute_parallel_do_simd {
    llvm::omp::Clause::OMPC_firstprivate,
    llvm::omp::Clause::OMPC_lastprivate,
    llvm::omp::Clause::OMPC_collapse,
    llvm::omp::Clause::OMPC_dist_schedule,
    llvm::omp::Clause::OMPC_if,
    llvm::omp::Clause::OMPC_num_threads,
    llvm::omp::Clause::OMPC_default,
    llvm::omp::Clause::OMPC_proc_bind,
    llvm::omp::Clause::OMPC_private,
    llvm::omp::Clause::OMPC_shared,
    llvm::omp::Clause::OMPC_reduction,
    llvm::omp::Clause::OMPC_copyin,
    llvm::omp::Clause::OMPC_schedule,
    llvm::omp::Clause::OMPC_linear,
    llvm::omp::Clause::OMPC_aligned,
    llvm::omp::Clause::OMPC_safelen,
    llvm::omp::Clause::OMPC_simdlen,
    llvm::omp::Clause::OMPC_allocate,
    llvm::omp::Clause::OMPC_nontemporal,
    llvm::omp::Clause::OMPC_order,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_distribute_parallel_do_simd {
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_distribute_parallel_do_simd {
  };

  static OmpClauseSet requiredClauses_OMPD_distribute_parallel_do_simd {
  };

  // Sets for distribute parallel for

  static OmpClauseSet allowedClauses_OMPD_distribute_parallel_for {
    llvm::omp::Clause::OMPC_firstprivate,
    llvm::omp::Clause::OMPC_lastprivate,
    llvm::omp::Clause::OMPC_collapse,
    llvm::omp::Clause::OMPC_dist_schedule,
    llvm::omp::Clause::OMPC_if,
    llvm::omp::Clause::OMPC_num_threads,
    llvm::omp::Clause::OMPC_default,
    llvm::omp::Clause::OMPC_proc_bind,
    llvm::omp::Clause::OMPC_private,
    llvm::omp::Clause::OMPC_shared,
    llvm::omp::Clause::OMPC_reduction,
    llvm::omp::Clause::OMPC_copyin,
    llvm::omp::Clause::OMPC_schedule,
    llvm::omp::Clause::OMPC_allocate,
    llvm::omp::Clause::OMPC_order,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_distribute_parallel_for {
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_distribute_parallel_for {
  };

  static OmpClauseSet requiredClauses_OMPD_distribute_parallel_for {
  };

  // Sets for distribute parallel for simd

  static OmpClauseSet allowedClauses_OMPD_distribute_parallel_for_simd {
    llvm::omp::Clause::OMPC_firstprivate,
    llvm::omp::Clause::OMPC_lastprivate,
    llvm::omp::Clause::OMPC_collapse,
    llvm::omp::Clause::OMPC_dist_schedule,
    llvm::omp::Clause::OMPC_if,
    llvm::omp::Clause::OMPC_num_threads,
    llvm::omp::Clause::OMPC_default,
    llvm::omp::Clause::OMPC_proc_bind,
    llvm::omp::Clause::OMPC_private,
    llvm::omp::Clause::OMPC_shared,
    llvm::omp::Clause::OMPC_reduction,
    llvm::omp::Clause::OMPC_copyin,
    llvm::omp::Clause::OMPC_schedule,
    llvm::omp::Clause::OMPC_linear,
    llvm::omp::Clause::OMPC_aligned,
    llvm::omp::Clause::OMPC_safelen,
    llvm::omp::Clause::OMPC_simdlen,
    llvm::omp::Clause::OMPC_allocate,
    llvm::omp::Clause::OMPC_nontemporal,
    llvm::omp::Clause::OMPC_order,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_distribute_parallel_for_simd {
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_distribute_parallel_for_simd {
  };

  static OmpClauseSet requiredClauses_OMPD_distribute_parallel_for_simd {
  };

  // Sets for distribute simd

  static OmpClauseSet allowedClauses_OMPD_distribute_simd {
    llvm::omp::Clause::OMPC_aligned,
    llvm::omp::Clause::OMPC_allocate,
    llvm::omp::Clause::OMPC_copyin,
    llvm::omp::Clause::OMPC_default,
    llvm::omp::Clause::OMPC_linear,
    llvm::omp::Clause::OMPC_firstprivate,
    llvm::omp::Clause::OMPC_lastprivate,
    llvm::omp::Clause::OMPC_nontemporal,
    llvm::omp::Clause::OMPC_private,
    llvm::omp::Clause::OMPC_reduction,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_distribute_simd {
    llvm::omp::Clause::OMPC_collapse,
    llvm::omp::Clause::OMPC_dist_schedule,
    llvm::omp::Clause::OMPC_if,
    llvm::omp::Clause::OMPC_num_threads,
    llvm::omp::Clause::OMPC_ordered,
    llvm::omp::Clause::OMPC_proc_bind,
    llvm::omp::Clause::OMPC_schedule,
    llvm::omp::Clause::OMPC_safelen,
    llvm::omp::Clause::OMPC_simdlen,
    llvm::omp::Clause::OMPC_order,
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_distribute_simd {
  };

  static OmpClauseSet requiredClauses_OMPD_distribute_simd {
  };

  // Sets for do

  static OmpClauseSet allowedClauses_OMPD_do {
    llvm::omp::Clause::OMPC_private,
    llvm::omp::Clause::OMPC_firstprivate,
    llvm::omp::Clause::OMPC_lastprivate,
    llvm::omp::Clause::OMPC_linear,
    llvm::omp::Clause::OMPC_reduction,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_do {
    llvm::omp::Clause::OMPC_schedule,
    llvm::omp::Clause::OMPC_collapse,
    llvm::omp::Clause::OMPC_ordered,
    llvm::omp::Clause::OMPC_nowait,
    llvm::omp::Clause::OMPC_order,
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_do {
  };

  static OmpClauseSet requiredClauses_OMPD_do {
  };

  // Sets for do simd

  static OmpClauseSet allowedClauses_OMPD_do_simd {
    llvm::omp::Clause::OMPC_aligned,
    llvm::omp::Clause::OMPC_private,
    llvm::omp::Clause::OMPC_firstprivate,
    llvm::omp::Clause::OMPC_lastprivate,
    llvm::omp::Clause::OMPC_linear,
    llvm::omp::Clause::OMPC_reduction,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_do_simd {
    llvm::omp::Clause::OMPC_schedule,
    llvm::omp::Clause::OMPC_collapse,
    llvm::omp::Clause::OMPC_ordered,
    llvm::omp::Clause::OMPC_safelen,
    llvm::omp::Clause::OMPC_simdlen,
    llvm::omp::Clause::OMPC_nowait,
    llvm::omp::Clause::OMPC_order,
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_do_simd {
  };

  static OmpClauseSet requiredClauses_OMPD_do_simd {
  };

  // Sets for end assumes

  static OmpClauseSet allowedClauses_OMPD_end_assumes {
  };

  static OmpClauseSet allowedOnceClauses_OMPD_end_assumes {
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_end_assumes {
  };

  static OmpClauseSet requiredClauses_OMPD_end_assumes {
  };

  // Sets for end declare target

  static OmpClauseSet allowedClauses_OMPD_end_declare_target {
  };

  static OmpClauseSet allowedOnceClauses_OMPD_end_declare_target {
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_end_declare_target {
  };

  static OmpClauseSet requiredClauses_OMPD_end_declare_target {
  };

  // Sets for end declare variant

  static OmpClauseSet allowedClauses_OMPD_end_declare_variant {
  };

  static OmpClauseSet allowedOnceClauses_OMPD_end_declare_variant {
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_end_declare_variant {
  };

  static OmpClauseSet requiredClauses_OMPD_end_declare_variant {
  };

  // Sets for end do

  static OmpClauseSet allowedClauses_OMPD_end_do {
  };

  static OmpClauseSet allowedOnceClauses_OMPD_end_do {
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_end_do {
  };

  static OmpClauseSet requiredClauses_OMPD_end_do {
  };

  // Sets for end do simd

  static OmpClauseSet allowedClauses_OMPD_end_do_simd {
  };

  static OmpClauseSet allowedOnceClauses_OMPD_end_do_simd {
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_end_do_simd {
  };

  static OmpClauseSet requiredClauses_OMPD_end_do_simd {
  };

  // Sets for end sections

  static OmpClauseSet allowedClauses_OMPD_end_sections {
  };

  static OmpClauseSet allowedOnceClauses_OMPD_end_sections {
    llvm::omp::Clause::OMPC_nowait,
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_end_sections {
  };

  static OmpClauseSet requiredClauses_OMPD_end_sections {
  };

  // Sets for end single

  static OmpClauseSet allowedClauses_OMPD_end_single {
    llvm::omp::Clause::OMPC_copyprivate,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_end_single {
    llvm::omp::Clause::OMPC_nowait,
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_end_single {
  };

  static OmpClauseSet requiredClauses_OMPD_end_single {
  };

  // Sets for end workshare

  static OmpClauseSet allowedClauses_OMPD_end_workshare {
    llvm::omp::Clause::OMPC_nowait,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_end_workshare {
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_end_workshare {
  };

  static OmpClauseSet requiredClauses_OMPD_end_workshare {
  };

  // Sets for error

  static OmpClauseSet allowedClauses_OMPD_error {
    llvm::omp::Clause::OMPC_at,
    llvm::omp::Clause::OMPC_severity,
    llvm::omp::Clause::OMPC_message,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_error {
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_error {
  };

  static OmpClauseSet requiredClauses_OMPD_error {
  };

  // Sets for flush

  static OmpClauseSet allowedClauses_OMPD_flush {
  };

  static OmpClauseSet allowedOnceClauses_OMPD_flush {
    llvm::omp::Clause::OMPC_acq_rel,
    llvm::omp::Clause::OMPC_acquire,
    llvm::omp::Clause::OMPC_release,
    llvm::omp::Clause::OMPC_flush,
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_flush {
  };

  static OmpClauseSet requiredClauses_OMPD_flush {
  };

  // Sets for for

  static OmpClauseSet allowedClauses_OMPD_for {
    llvm::omp::Clause::OMPC_private,
    llvm::omp::Clause::OMPC_lastprivate,
    llvm::omp::Clause::OMPC_firstprivate,
    llvm::omp::Clause::OMPC_reduction,
    llvm::omp::Clause::OMPC_collapse,
    llvm::omp::Clause::OMPC_schedule,
    llvm::omp::Clause::OMPC_ordered,
    llvm::omp::Clause::OMPC_nowait,
    llvm::omp::Clause::OMPC_linear,
    llvm::omp::Clause::OMPC_allocate,
    llvm::omp::Clause::OMPC_order,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_for {
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_for {
  };

  static OmpClauseSet requiredClauses_OMPD_for {
  };

  // Sets for for simd

  static OmpClauseSet allowedClauses_OMPD_for_simd {
    llvm::omp::Clause::OMPC_private,
    llvm::omp::Clause::OMPC_firstprivate,
    llvm::omp::Clause::OMPC_lastprivate,
    llvm::omp::Clause::OMPC_reduction,
    llvm::omp::Clause::OMPC_schedule,
    llvm::omp::Clause::OMPC_collapse,
    llvm::omp::Clause::OMPC_nowait,
    llvm::omp::Clause::OMPC_safelen,
    llvm::omp::Clause::OMPC_simdlen,
    llvm::omp::Clause::OMPC_linear,
    llvm::omp::Clause::OMPC_aligned,
    llvm::omp::Clause::OMPC_ordered,
    llvm::omp::Clause::OMPC_allocate,
    llvm::omp::Clause::OMPC_if,
    llvm::omp::Clause::OMPC_nontemporal,
    llvm::omp::Clause::OMPC_order,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_for_simd {
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_for_simd {
  };

  static OmpClauseSet requiredClauses_OMPD_for_simd {
  };

  // Sets for masked taskloop

  static OmpClauseSet allowedClauses_OMPD_masked_taskloop {
    llvm::omp::Clause::OMPC_if,
    llvm::omp::Clause::OMPC_shared,
    llvm::omp::Clause::OMPC_private,
    llvm::omp::Clause::OMPC_firstprivate,
    llvm::omp::Clause::OMPC_lastprivate,
    llvm::omp::Clause::OMPC_default,
    llvm::omp::Clause::OMPC_collapse,
    llvm::omp::Clause::OMPC_final,
    llvm::omp::Clause::OMPC_untied,
    llvm::omp::Clause::OMPC_mergeable,
    llvm::omp::Clause::OMPC_priority,
    llvm::omp::Clause::OMPC_grainsize,
    llvm::omp::Clause::OMPC_nogroup,
    llvm::omp::Clause::OMPC_num_tasks,
    llvm::omp::Clause::OMPC_reduction,
    llvm::omp::Clause::OMPC_in_reduction,
    llvm::omp::Clause::OMPC_allocate,
    llvm::omp::Clause::OMPC_filter,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_masked_taskloop {
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_masked_taskloop {
  };

  static OmpClauseSet requiredClauses_OMPD_masked_taskloop {
  };

  // Sets for masked taskloop simd

  static OmpClauseSet allowedClauses_OMPD_masked_taskloop_simd {
    llvm::omp::Clause::OMPC_if,
    llvm::omp::Clause::OMPC_shared,
    llvm::omp::Clause::OMPC_private,
    llvm::omp::Clause::OMPC_firstprivate,
    llvm::omp::Clause::OMPC_lastprivate,
    llvm::omp::Clause::OMPC_default,
    llvm::omp::Clause::OMPC_collapse,
    llvm::omp::Clause::OMPC_final,
    llvm::omp::Clause::OMPC_untied,
    llvm::omp::Clause::OMPC_mergeable,
    llvm::omp::Clause::OMPC_priority,
    llvm::omp::Clause::OMPC_linear,
    llvm::omp::Clause::OMPC_aligned,
    llvm::omp::Clause::OMPC_safelen,
    llvm::omp::Clause::OMPC_simdlen,
    llvm::omp::Clause::OMPC_grainsize,
    llvm::omp::Clause::OMPC_nogroup,
    llvm::omp::Clause::OMPC_num_tasks,
    llvm::omp::Clause::OMPC_reduction,
    llvm::omp::Clause::OMPC_in_reduction,
    llvm::omp::Clause::OMPC_allocate,
    llvm::omp::Clause::OMPC_nontemporal,
    llvm::omp::Clause::OMPC_order,
    llvm::omp::Clause::OMPC_filter,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_masked_taskloop_simd {
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_masked_taskloop_simd {
  };

  static OmpClauseSet requiredClauses_OMPD_masked_taskloop_simd {
  };

  // Sets for master

  static OmpClauseSet allowedClauses_OMPD_master {
  };

  static OmpClauseSet allowedOnceClauses_OMPD_master {
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_master {
  };

  static OmpClauseSet requiredClauses_OMPD_master {
  };

  // Sets for master taskloop

  static OmpClauseSet allowedClauses_OMPD_master_taskloop {
    llvm::omp::Clause::OMPC_if,
    llvm::omp::Clause::OMPC_shared,
    llvm::omp::Clause::OMPC_private,
    llvm::omp::Clause::OMPC_firstprivate,
    llvm::omp::Clause::OMPC_lastprivate,
    llvm::omp::Clause::OMPC_default,
    llvm::omp::Clause::OMPC_collapse,
    llvm::omp::Clause::OMPC_final,
    llvm::omp::Clause::OMPC_untied,
    llvm::omp::Clause::OMPC_mergeable,
    llvm::omp::Clause::OMPC_priority,
    llvm::omp::Clause::OMPC_grainsize,
    llvm::omp::Clause::OMPC_nogroup,
    llvm::omp::Clause::OMPC_num_tasks,
    llvm::omp::Clause::OMPC_reduction,
    llvm::omp::Clause::OMPC_in_reduction,
    llvm::omp::Clause::OMPC_allocate,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_master_taskloop {
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_master_taskloop {
  };

  static OmpClauseSet requiredClauses_OMPD_master_taskloop {
  };

  // Sets for master taskloop simd

  static OmpClauseSet allowedClauses_OMPD_master_taskloop_simd {
    llvm::omp::Clause::OMPC_if,
    llvm::omp::Clause::OMPC_shared,
    llvm::omp::Clause::OMPC_private,
    llvm::omp::Clause::OMPC_firstprivate,
    llvm::omp::Clause::OMPC_lastprivate,
    llvm::omp::Clause::OMPC_default,
    llvm::omp::Clause::OMPC_collapse,
    llvm::omp::Clause::OMPC_final,
    llvm::omp::Clause::OMPC_untied,
    llvm::omp::Clause::OMPC_mergeable,
    llvm::omp::Clause::OMPC_priority,
    llvm::omp::Clause::OMPC_linear,
    llvm::omp::Clause::OMPC_aligned,
    llvm::omp::Clause::OMPC_safelen,
    llvm::omp::Clause::OMPC_simdlen,
    llvm::omp::Clause::OMPC_grainsize,
    llvm::omp::Clause::OMPC_nogroup,
    llvm::omp::Clause::OMPC_num_tasks,
    llvm::omp::Clause::OMPC_reduction,
    llvm::omp::Clause::OMPC_in_reduction,
    llvm::omp::Clause::OMPC_allocate,
    llvm::omp::Clause::OMPC_nontemporal,
    llvm::omp::Clause::OMPC_order,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_master_taskloop_simd {
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_master_taskloop_simd {
  };

  static OmpClauseSet requiredClauses_OMPD_master_taskloop_simd {
  };

  // Sets for metadirective

  static OmpClauseSet allowedClauses_OMPD_metadirective {
    llvm::omp::Clause::OMPC_when,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_metadirective {
    llvm::omp::Clause::OMPC_default,
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_metadirective {
  };

  static OmpClauseSet requiredClauses_OMPD_metadirective {
  };

  // Sets for nothing

  static OmpClauseSet allowedClauses_OMPD_nothing {
  };

  static OmpClauseSet allowedOnceClauses_OMPD_nothing {
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_nothing {
  };

  static OmpClauseSet requiredClauses_OMPD_nothing {
  };

  // Sets for ordered

  static OmpClauseSet allowedClauses_OMPD_ordered {
    llvm::omp::Clause::OMPC_depend,
    llvm::omp::Clause::OMPC_doacross,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_ordered {
    llvm::omp::Clause::OMPC_threads,
    llvm::omp::Clause::OMPC_simd,
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_ordered {
  };

  static OmpClauseSet requiredClauses_OMPD_ordered {
  };

  // Sets for parallel

  static OmpClauseSet allowedClauses_OMPD_parallel {
    llvm::omp::Clause::OMPC_private,
    llvm::omp::Clause::OMPC_firstprivate,
    llvm::omp::Clause::OMPC_shared,
    llvm::omp::Clause::OMPC_reduction,
    llvm::omp::Clause::OMPC_copyin,
    llvm::omp::Clause::OMPC_allocate,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_parallel {
    llvm::omp::Clause::OMPC_default,
    llvm::omp::Clause::OMPC_if,
    llvm::omp::Clause::OMPC_num_threads,
    llvm::omp::Clause::OMPC_proc_bind,
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_parallel {
  };

  static OmpClauseSet requiredClauses_OMPD_parallel {
  };

  // Sets for parallel do

  static OmpClauseSet allowedClauses_OMPD_parallel_do {
    llvm::omp::Clause::OMPC_default,
    llvm::omp::Clause::OMPC_private,
    llvm::omp::Clause::OMPC_firstprivate,
    llvm::omp::Clause::OMPC_shared,
    llvm::omp::Clause::OMPC_reduction,
    llvm::omp::Clause::OMPC_copyin,
    llvm::omp::Clause::OMPC_lastprivate,
    llvm::omp::Clause::OMPC_linear,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_parallel_do {
    llvm::omp::Clause::OMPC_if,
    llvm::omp::Clause::OMPC_num_threads,
    llvm::omp::Clause::OMPC_proc_bind,
    llvm::omp::Clause::OMPC_schedule,
    llvm::omp::Clause::OMPC_ordered,
    llvm::omp::Clause::OMPC_collapse,
    llvm::omp::Clause::OMPC_order,
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_parallel_do {
  };

  static OmpClauseSet requiredClauses_OMPD_parallel_do {
  };

  // Sets for parallel do simd

  static OmpClauseSet allowedClauses_OMPD_parallel_do_simd {
    llvm::omp::Clause::OMPC_default,
    llvm::omp::Clause::OMPC_private,
    llvm::omp::Clause::OMPC_firstprivate,
    llvm::omp::Clause::OMPC_shared,
    llvm::omp::Clause::OMPC_reduction,
    llvm::omp::Clause::OMPC_copyin,
    llvm::omp::Clause::OMPC_lastprivate,
    llvm::omp::Clause::OMPC_linear,
    llvm::omp::Clause::OMPC_aligned,
    llvm::omp::Clause::OMPC_allocate,
    llvm::omp::Clause::OMPC_nontemporal,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_parallel_do_simd {
    llvm::omp::Clause::OMPC_if,
    llvm::omp::Clause::OMPC_num_threads,
    llvm::omp::Clause::OMPC_proc_bind,
    llvm::omp::Clause::OMPC_schedule,
    llvm::omp::Clause::OMPC_ordered,
    llvm::omp::Clause::OMPC_collapse,
    llvm::omp::Clause::OMPC_safelen,
    llvm::omp::Clause::OMPC_simdlen,
    llvm::omp::Clause::OMPC_order,
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_parallel_do_simd {
  };

  static OmpClauseSet requiredClauses_OMPD_parallel_do_simd {
  };

  // Sets for parallel for

  static OmpClauseSet allowedClauses_OMPD_parallel_for {
    llvm::omp::Clause::OMPC_if,
    llvm::omp::Clause::OMPC_num_threads,
    llvm::omp::Clause::OMPC_default,
    llvm::omp::Clause::OMPC_proc_bind,
    llvm::omp::Clause::OMPC_private,
    llvm::omp::Clause::OMPC_firstprivate,
    llvm::omp::Clause::OMPC_shared,
    llvm::omp::Clause::OMPC_reduction,
    llvm::omp::Clause::OMPC_copyin,
    llvm::omp::Clause::OMPC_lastprivate,
    llvm::omp::Clause::OMPC_collapse,
    llvm::omp::Clause::OMPC_schedule,
    llvm::omp::Clause::OMPC_ordered,
    llvm::omp::Clause::OMPC_linear,
    llvm::omp::Clause::OMPC_allocate,
    llvm::omp::Clause::OMPC_order,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_parallel_for {
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_parallel_for {
  };

  static OmpClauseSet requiredClauses_OMPD_parallel_for {
  };

  // Sets for parallel for simd

  static OmpClauseSet allowedClauses_OMPD_parallel_for_simd {
    llvm::omp::Clause::OMPC_if,
    llvm::omp::Clause::OMPC_num_threads,
    llvm::omp::Clause::OMPC_default,
    llvm::omp::Clause::OMPC_proc_bind,
    llvm::omp::Clause::OMPC_private,
    llvm::omp::Clause::OMPC_firstprivate,
    llvm::omp::Clause::OMPC_shared,
    llvm::omp::Clause::OMPC_reduction,
    llvm::omp::Clause::OMPC_copyin,
    llvm::omp::Clause::OMPC_lastprivate,
    llvm::omp::Clause::OMPC_collapse,
    llvm::omp::Clause::OMPC_schedule,
    llvm::omp::Clause::OMPC_safelen,
    llvm::omp::Clause::OMPC_simdlen,
    llvm::omp::Clause::OMPC_linear,
    llvm::omp::Clause::OMPC_aligned,
    llvm::omp::Clause::OMPC_ordered,
    llvm::omp::Clause::OMPC_allocate,
    llvm::omp::Clause::OMPC_nontemporal,
    llvm::omp::Clause::OMPC_order,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_parallel_for_simd {
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_parallel_for_simd {
  };

  static OmpClauseSet requiredClauses_OMPD_parallel_for_simd {
  };

  // Sets for parallel masked

  static OmpClauseSet allowedClauses_OMPD_parallel_masked {
    llvm::omp::Clause::OMPC_if,
    llvm::omp::Clause::OMPC_num_threads,
    llvm::omp::Clause::OMPC_default,
    llvm::omp::Clause::OMPC_private,
    llvm::omp::Clause::OMPC_firstprivate,
    llvm::omp::Clause::OMPC_shared,
    llvm::omp::Clause::OMPC_copyin,
    llvm::omp::Clause::OMPC_reduction,
    llvm::omp::Clause::OMPC_proc_bind,
    llvm::omp::Clause::OMPC_allocate,
    llvm::omp::Clause::OMPC_filter,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_parallel_masked {
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_parallel_masked {
  };

  static OmpClauseSet requiredClauses_OMPD_parallel_masked {
  };

  // Sets for parallel masked taskloop

  static OmpClauseSet allowedClauses_OMPD_parallel_masked_taskloop {
    llvm::omp::Clause::OMPC_if,
    llvm::omp::Clause::OMPC_shared,
    llvm::omp::Clause::OMPC_private,
    llvm::omp::Clause::OMPC_firstprivate,
    llvm::omp::Clause::OMPC_lastprivate,
    llvm::omp::Clause::OMPC_default,
    llvm::omp::Clause::OMPC_collapse,
    llvm::omp::Clause::OMPC_final,
    llvm::omp::Clause::OMPC_untied,
    llvm::omp::Clause::OMPC_mergeable,
    llvm::omp::Clause::OMPC_priority,
    llvm::omp::Clause::OMPC_grainsize,
    llvm::omp::Clause::OMPC_nogroup,
    llvm::omp::Clause::OMPC_num_tasks,
    llvm::omp::Clause::OMPC_reduction,
    llvm::omp::Clause::OMPC_allocate,
    llvm::omp::Clause::OMPC_num_threads,
    llvm::omp::Clause::OMPC_proc_bind,
    llvm::omp::Clause::OMPC_copyin,
    llvm::omp::Clause::OMPC_filter,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_parallel_masked_taskloop {
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_parallel_masked_taskloop {
  };

  static OmpClauseSet requiredClauses_OMPD_parallel_masked_taskloop {
  };

  // Sets for parallel masked taskloop simd

  static OmpClauseSet allowedClauses_OMPD_parallel_masked_taskloop_simd {
    llvm::omp::Clause::OMPC_if,
    llvm::omp::Clause::OMPC_shared,
    llvm::omp::Clause::OMPC_private,
    llvm::omp::Clause::OMPC_firstprivate,
    llvm::omp::Clause::OMPC_lastprivate,
    llvm::omp::Clause::OMPC_default,
    llvm::omp::Clause::OMPC_collapse,
    llvm::omp::Clause::OMPC_final,
    llvm::omp::Clause::OMPC_untied,
    llvm::omp::Clause::OMPC_mergeable,
    llvm::omp::Clause::OMPC_priority,
    llvm::omp::Clause::OMPC_grainsize,
    llvm::omp::Clause::OMPC_nogroup,
    llvm::omp::Clause::OMPC_num_tasks,
    llvm::omp::Clause::OMPC_reduction,
    llvm::omp::Clause::OMPC_allocate,
    llvm::omp::Clause::OMPC_num_threads,
    llvm::omp::Clause::OMPC_proc_bind,
    llvm::omp::Clause::OMPC_copyin,
    llvm::omp::Clause::OMPC_linear,
    llvm::omp::Clause::OMPC_aligned,
    llvm::omp::Clause::OMPC_safelen,
    llvm::omp::Clause::OMPC_simdlen,
    llvm::omp::Clause::OMPC_nontemporal,
    llvm::omp::Clause::OMPC_order,
    llvm::omp::Clause::OMPC_filter,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_parallel_masked_taskloop_simd {
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_parallel_masked_taskloop_simd {
  };

  static OmpClauseSet requiredClauses_OMPD_parallel_masked_taskloop_simd {
  };

  // Sets for parallel master

  static OmpClauseSet allowedClauses_OMPD_parallel_master {
    llvm::omp::Clause::OMPC_if,
    llvm::omp::Clause::OMPC_num_threads,
    llvm::omp::Clause::OMPC_default,
    llvm::omp::Clause::OMPC_private,
    llvm::omp::Clause::OMPC_firstprivate,
    llvm::omp::Clause::OMPC_shared,
    llvm::omp::Clause::OMPC_copyin,
    llvm::omp::Clause::OMPC_reduction,
    llvm::omp::Clause::OMPC_proc_bind,
    llvm::omp::Clause::OMPC_allocate,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_parallel_master {
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_parallel_master {
  };

  static OmpClauseSet requiredClauses_OMPD_parallel_master {
  };

  // Sets for parallel master taskloop

  static OmpClauseSet allowedClauses_OMPD_parallel_master_taskloop {
    llvm::omp::Clause::OMPC_if,
    llvm::omp::Clause::OMPC_shared,
    llvm::omp::Clause::OMPC_private,
    llvm::omp::Clause::OMPC_firstprivate,
    llvm::omp::Clause::OMPC_lastprivate,
    llvm::omp::Clause::OMPC_default,
    llvm::omp::Clause::OMPC_collapse,
    llvm::omp::Clause::OMPC_final,
    llvm::omp::Clause::OMPC_untied,
    llvm::omp::Clause::OMPC_mergeable,
    llvm::omp::Clause::OMPC_priority,
    llvm::omp::Clause::OMPC_grainsize,
    llvm::omp::Clause::OMPC_nogroup,
    llvm::omp::Clause::OMPC_num_tasks,
    llvm::omp::Clause::OMPC_reduction,
    llvm::omp::Clause::OMPC_allocate,
    llvm::omp::Clause::OMPC_num_threads,
    llvm::omp::Clause::OMPC_proc_bind,
    llvm::omp::Clause::OMPC_copyin,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_parallel_master_taskloop {
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_parallel_master_taskloop {
  };

  static OmpClauseSet requiredClauses_OMPD_parallel_master_taskloop {
  };

  // Sets for parallel master taskloop simd

  static OmpClauseSet allowedClauses_OMPD_parallel_master_taskloop_simd {
    llvm::omp::Clause::OMPC_if,
    llvm::omp::Clause::OMPC_shared,
    llvm::omp::Clause::OMPC_private,
    llvm::omp::Clause::OMPC_firstprivate,
    llvm::omp::Clause::OMPC_lastprivate,
    llvm::omp::Clause::OMPC_default,
    llvm::omp::Clause::OMPC_collapse,
    llvm::omp::Clause::OMPC_final,
    llvm::omp::Clause::OMPC_untied,
    llvm::omp::Clause::OMPC_mergeable,
    llvm::omp::Clause::OMPC_priority,
    llvm::omp::Clause::OMPC_grainsize,
    llvm::omp::Clause::OMPC_nogroup,
    llvm::omp::Clause::OMPC_num_tasks,
    llvm::omp::Clause::OMPC_reduction,
    llvm::omp::Clause::OMPC_allocate,
    llvm::omp::Clause::OMPC_num_threads,
    llvm::omp::Clause::OMPC_proc_bind,
    llvm::omp::Clause::OMPC_copyin,
    llvm::omp::Clause::OMPC_linear,
    llvm::omp::Clause::OMPC_aligned,
    llvm::omp::Clause::OMPC_safelen,
    llvm::omp::Clause::OMPC_simdlen,
    llvm::omp::Clause::OMPC_nontemporal,
    llvm::omp::Clause::OMPC_order,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_parallel_master_taskloop_simd {
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_parallel_master_taskloop_simd {
  };

  static OmpClauseSet requiredClauses_OMPD_parallel_master_taskloop_simd {
  };

  // Sets for parallel sections

  static OmpClauseSet allowedClauses_OMPD_parallel_sections {
    llvm::omp::Clause::OMPC_if,
    llvm::omp::Clause::OMPC_default,
    llvm::omp::Clause::OMPC_proc_bind,
    llvm::omp::Clause::OMPC_private,
    llvm::omp::Clause::OMPC_firstprivate,
    llvm::omp::Clause::OMPC_shared,
    llvm::omp::Clause::OMPC_reduction,
    llvm::omp::Clause::OMPC_copyin,
    llvm::omp::Clause::OMPC_lastprivate,
    llvm::omp::Clause::OMPC_allocate,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_parallel_sections {
    llvm::omp::Clause::OMPC_num_threads,
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_parallel_sections {
  };

  static OmpClauseSet requiredClauses_OMPD_parallel_sections {
  };

  // Sets for parallel workshare

  static OmpClauseSet allowedClauses_OMPD_parallel_workshare {
    llvm::omp::Clause::OMPC_allocate,
    llvm::omp::Clause::OMPC_copyin,
    llvm::omp::Clause::OMPC_default,
    llvm::omp::Clause::OMPC_firstprivate,
    llvm::omp::Clause::OMPC_private,
    llvm::omp::Clause::OMPC_reduction,
    llvm::omp::Clause::OMPC_shared,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_parallel_workshare {
    llvm::omp::Clause::OMPC_if,
    llvm::omp::Clause::OMPC_num_threads,
    llvm::omp::Clause::OMPC_proc_bind,
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_parallel_workshare {
  };

  static OmpClauseSet requiredClauses_OMPD_parallel_workshare {
  };

  // Sets for requires

  static OmpClauseSet allowedClauses_OMPD_requires {
  };

  static OmpClauseSet allowedOnceClauses_OMPD_requires {
    llvm::omp::Clause::OMPC_unified_address,
    llvm::omp::Clause::OMPC_unified_shared_memory,
    llvm::omp::Clause::OMPC_reverse_offload,
    llvm::omp::Clause::OMPC_dynamic_allocators,
    llvm::omp::Clause::OMPC_atomic_default_mem_order,
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_requires {
  };

  static OmpClauseSet requiredClauses_OMPD_requires {
  };

  // Sets for scan

  static OmpClauseSet allowedClauses_OMPD_scan {
    llvm::omp::Clause::OMPC_inclusive,
    llvm::omp::Clause::OMPC_exclusive,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_scan {
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_scan {
  };

  static OmpClauseSet requiredClauses_OMPD_scan {
  };

  // Sets for section

  static OmpClauseSet allowedClauses_OMPD_section {
  };

  static OmpClauseSet allowedOnceClauses_OMPD_section {
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_section {
  };

  static OmpClauseSet requiredClauses_OMPD_section {
  };

  // Sets for sections

  static OmpClauseSet allowedClauses_OMPD_sections {
    llvm::omp::Clause::OMPC_private,
    llvm::omp::Clause::OMPC_lastprivate,
    llvm::omp::Clause::OMPC_firstprivate,
    llvm::omp::Clause::OMPC_reduction,
    llvm::omp::Clause::OMPC_nowait,
    llvm::omp::Clause::OMPC_allocate,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_sections {
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_sections {
  };

  static OmpClauseSet requiredClauses_OMPD_sections {
  };

  // Sets for simd

  static OmpClauseSet allowedClauses_OMPD_simd {
    llvm::omp::Clause::OMPC_private,
    llvm::omp::Clause::OMPC_lastprivate,
    llvm::omp::Clause::OMPC_linear,
    llvm::omp::Clause::OMPC_aligned,
    llvm::omp::Clause::OMPC_reduction,
    llvm::omp::Clause::OMPC_allocate,
    llvm::omp::Clause::OMPC_nontemporal,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_simd {
    llvm::omp::Clause::OMPC_collapse,
    llvm::omp::Clause::OMPC_safelen,
    llvm::omp::Clause::OMPC_simdlen,
    llvm::omp::Clause::OMPC_if,
    llvm::omp::Clause::OMPC_order,
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_simd {
  };

  static OmpClauseSet requiredClauses_OMPD_simd {
  };

  // Sets for single

  static OmpClauseSet allowedClauses_OMPD_single {
    llvm::omp::Clause::OMPC_private,
    llvm::omp::Clause::OMPC_firstprivate,
    llvm::omp::Clause::OMPC_copyprivate,
    llvm::omp::Clause::OMPC_nowait,
    llvm::omp::Clause::OMPC_allocate,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_single {
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_single {
  };

  static OmpClauseSet requiredClauses_OMPD_single {
  };

  // Sets for target

  static OmpClauseSet allowedClauses_OMPD_target {
    llvm::omp::Clause::OMPC_if,
    llvm::omp::Clause::OMPC_map,
    llvm::omp::Clause::OMPC_private,
    llvm::omp::Clause::OMPC_depend,
    llvm::omp::Clause::OMPC_firstprivate,
    llvm::omp::Clause::OMPC_is_device_ptr,
    llvm::omp::Clause::OMPC_has_device_addr,
    llvm::omp::Clause::OMPC_reduction,
    llvm::omp::Clause::OMPC_in_reduction,
    llvm::omp::Clause::OMPC_allocate,
    llvm::omp::Clause::OMPC_uses_allocators,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_target {
    llvm::omp::Clause::OMPC_device,
    llvm::omp::Clause::OMPC_thread_limit,
    llvm::omp::Clause::OMPC_defaultmap,
    llvm::omp::Clause::OMPC_nowait,
    llvm::omp::Clause::OMPC_ompx_dyn_cgroup_mem,
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_target {
  };

  static OmpClauseSet requiredClauses_OMPD_target {
  };

  // Sets for target data

  static OmpClauseSet allowedClauses_OMPD_target_data {
    llvm::omp::Clause::OMPC_use_device_ptr,
    llvm::omp::Clause::OMPC_use_device_addr,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_target_data {
    llvm::omp::Clause::OMPC_device,
    llvm::omp::Clause::OMPC_if,
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_target_data {
  };

  static OmpClauseSet requiredClauses_OMPD_target_data {
    llvm::omp::Clause::OMPC_map,
  };

  // Sets for target enter data

  static OmpClauseSet allowedClauses_OMPD_target_enter_data {
    llvm::omp::Clause::OMPC_depend,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_target_enter_data {
    llvm::omp::Clause::OMPC_if,
    llvm::omp::Clause::OMPC_device,
    llvm::omp::Clause::OMPC_nowait,
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_target_enter_data {
  };

  static OmpClauseSet requiredClauses_OMPD_target_enter_data {
    llvm::omp::Clause::OMPC_map,
  };

  // Sets for target exit data

  static OmpClauseSet allowedClauses_OMPD_target_exit_data {
    llvm::omp::Clause::OMPC_depend,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_target_exit_data {
    llvm::omp::Clause::OMPC_device,
    llvm::omp::Clause::OMPC_if,
    llvm::omp::Clause::OMPC_nowait,
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_target_exit_data {
  };

  static OmpClauseSet requiredClauses_OMPD_target_exit_data {
    llvm::omp::Clause::OMPC_map,
  };

  // Sets for target parallel

  static OmpClauseSet allowedClauses_OMPD_target_parallel {
    llvm::omp::Clause::OMPC_map,
    llvm::omp::Clause::OMPC_nowait,
    llvm::omp::Clause::OMPC_depend,
    llvm::omp::Clause::OMPC_private,
    llvm::omp::Clause::OMPC_firstprivate,
    llvm::omp::Clause::OMPC_default,
    llvm::omp::Clause::OMPC_shared,
    llvm::omp::Clause::OMPC_reduction,
    llvm::omp::Clause::OMPC_is_device_ptr,
    llvm::omp::Clause::OMPC_has_device_addr,
    llvm::omp::Clause::OMPC_allocate,
    llvm::omp::Clause::OMPC_uses_allocators,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_target_parallel {
    llvm::omp::Clause::OMPC_defaultmap,
    llvm::omp::Clause::OMPC_device,
    llvm::omp::Clause::OMPC_if,
    llvm::omp::Clause::OMPC_num_threads,
    llvm::omp::Clause::OMPC_proc_bind,
    llvm::omp::Clause::OMPC_ompx_dyn_cgroup_mem,
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_target_parallel {
  };

  static OmpClauseSet requiredClauses_OMPD_target_parallel {
  };

  // Sets for target parallel do

  static OmpClauseSet allowedClauses_OMPD_target_parallel_do {
    llvm::omp::Clause::OMPC_map,
    llvm::omp::Clause::OMPC_private,
    llvm::omp::Clause::OMPC_firstprivate,
    llvm::omp::Clause::OMPC_lastprivate,
    llvm::omp::Clause::OMPC_depend,
    llvm::omp::Clause::OMPC_shared,
    llvm::omp::Clause::OMPC_reduction,
    llvm::omp::Clause::OMPC_linear,
    llvm::omp::Clause::OMPC_is_device_ptr,
    llvm::omp::Clause::OMPC_has_device_addr,
    llvm::omp::Clause::OMPC_allocator,
    llvm::omp::Clause::OMPC_uses_allocators,
    llvm::omp::Clause::OMPC_default,
    llvm::omp::Clause::OMPC_copyin,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_target_parallel_do {
    llvm::omp::Clause::OMPC_if,
    llvm::omp::Clause::OMPC_num_threads,
    llvm::omp::Clause::OMPC_proc_bind,
    llvm::omp::Clause::OMPC_device,
    llvm::omp::Clause::OMPC_defaultmap,
    llvm::omp::Clause::OMPC_schedule,
    llvm::omp::Clause::OMPC_collapse,
    llvm::omp::Clause::OMPC_ordered,
    llvm::omp::Clause::OMPC_nowait,
    llvm::omp::Clause::OMPC_order,
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_target_parallel_do {
  };

  static OmpClauseSet requiredClauses_OMPD_target_parallel_do {
  };

  // Sets for target parallel do simd

  static OmpClauseSet allowedClauses_OMPD_target_parallel_do_simd {
    llvm::omp::Clause::OMPC_if,
    llvm::omp::Clause::OMPC_device,
    llvm::omp::Clause::OMPC_map,
    llvm::omp::Clause::OMPC_private,
    llvm::omp::Clause::OMPC_firstprivate,
    llvm::omp::Clause::OMPC_lastprivate,
    llvm::omp::Clause::OMPC_nowait,
    llvm::omp::Clause::OMPC_depend,
    llvm::omp::Clause::OMPC_defaultmap,
    llvm::omp::Clause::OMPC_num_threads,
    llvm::omp::Clause::OMPC_default,
    llvm::omp::Clause::OMPC_proc_bind,
    llvm::omp::Clause::OMPC_shared,
    llvm::omp::Clause::OMPC_reduction,
    llvm::omp::Clause::OMPC_collapse,
    llvm::omp::Clause::OMPC_schedule,
    llvm::omp::Clause::OMPC_ordered,
    llvm::omp::Clause::OMPC_linear,
    llvm::omp::Clause::OMPC_safelen,
    llvm::omp::Clause::OMPC_simdlen,
    llvm::omp::Clause::OMPC_aligned,
    llvm::omp::Clause::OMPC_is_device_ptr,
    llvm::omp::Clause::OMPC_has_device_addr,
    llvm::omp::Clause::OMPC_allocate,
    llvm::omp::Clause::OMPC_nontemporal,
    llvm::omp::Clause::OMPC_order,
    llvm::omp::Clause::OMPC_uses_allocators,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_target_parallel_do_simd {
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_target_parallel_do_simd {
  };

  static OmpClauseSet requiredClauses_OMPD_target_parallel_do_simd {
  };

  // Sets for target parallel for

  static OmpClauseSet allowedClauses_OMPD_target_parallel_for {
    llvm::omp::Clause::OMPC_if,
    llvm::omp::Clause::OMPC_device,
    llvm::omp::Clause::OMPC_map,
    llvm::omp::Clause::OMPC_private,
    llvm::omp::Clause::OMPC_firstprivate,
    llvm::omp::Clause::OMPC_lastprivate,
    llvm::omp::Clause::OMPC_nowait,
    llvm::omp::Clause::OMPC_depend,
    llvm::omp::Clause::OMPC_defaultmap,
    llvm::omp::Clause::OMPC_num_threads,
    llvm::omp::Clause::OMPC_default,
    llvm::omp::Clause::OMPC_proc_bind,
    llvm::omp::Clause::OMPC_shared,
    llvm::omp::Clause::OMPC_reduction,
    llvm::omp::Clause::OMPC_collapse,
    llvm::omp::Clause::OMPC_schedule,
    llvm::omp::Clause::OMPC_ordered,
    llvm::omp::Clause::OMPC_linear,
    llvm::omp::Clause::OMPC_is_device_ptr,
    llvm::omp::Clause::OMPC_has_device_addr,
    llvm::omp::Clause::OMPC_allocate,
    llvm::omp::Clause::OMPC_order,
    llvm::omp::Clause::OMPC_uses_allocators,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_target_parallel_for {
    llvm::omp::Clause::OMPC_ompx_dyn_cgroup_mem,
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_target_parallel_for {
  };

  static OmpClauseSet requiredClauses_OMPD_target_parallel_for {
  };

  // Sets for target parallel for simd

  static OmpClauseSet allowedClauses_OMPD_target_parallel_for_simd {
    llvm::omp::Clause::OMPC_if,
    llvm::omp::Clause::OMPC_device,
    llvm::omp::Clause::OMPC_map,
    llvm::omp::Clause::OMPC_private,
    llvm::omp::Clause::OMPC_firstprivate,
    llvm::omp::Clause::OMPC_lastprivate,
    llvm::omp::Clause::OMPC_nowait,
    llvm::omp::Clause::OMPC_depend,
    llvm::omp::Clause::OMPC_defaultmap,
    llvm::omp::Clause::OMPC_num_threads,
    llvm::omp::Clause::OMPC_default,
    llvm::omp::Clause::OMPC_proc_bind,
    llvm::omp::Clause::OMPC_shared,
    llvm::omp::Clause::OMPC_reduction,
    llvm::omp::Clause::OMPC_collapse,
    llvm::omp::Clause::OMPC_schedule,
    llvm::omp::Clause::OMPC_ordered,
    llvm::omp::Clause::OMPC_linear,
    llvm::omp::Clause::OMPC_safelen,
    llvm::omp::Clause::OMPC_simdlen,
    llvm::omp::Clause::OMPC_aligned,
    llvm::omp::Clause::OMPC_is_device_ptr,
    llvm::omp::Clause::OMPC_has_device_addr,
    llvm::omp::Clause::OMPC_allocate,
    llvm::omp::Clause::OMPC_nontemporal,
    llvm::omp::Clause::OMPC_order,
    llvm::omp::Clause::OMPC_uses_allocators,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_target_parallel_for_simd {
    llvm::omp::Clause::OMPC_ompx_dyn_cgroup_mem,
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_target_parallel_for_simd {
  };

  static OmpClauseSet requiredClauses_OMPD_target_parallel_for_simd {
  };

  // Sets for target simd

  static OmpClauseSet allowedClauses_OMPD_target_simd {
    llvm::omp::Clause::OMPC_aligned,
    llvm::omp::Clause::OMPC_allocate,
    llvm::omp::Clause::OMPC_depend,
    llvm::omp::Clause::OMPC_firstprivate,
    llvm::omp::Clause::OMPC_is_device_ptr,
    llvm::omp::Clause::OMPC_has_device_addr,
    llvm::omp::Clause::OMPC_lastprivate,
    llvm::omp::Clause::OMPC_linear,
    llvm::omp::Clause::OMPC_map,
    llvm::omp::Clause::OMPC_nontemporal,
    llvm::omp::Clause::OMPC_nowait,
    llvm::omp::Clause::OMPC_private,
    llvm::omp::Clause::OMPC_reduction,
    llvm::omp::Clause::OMPC_shared,
    llvm::omp::Clause::OMPC_uses_allocators,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_target_simd {
    llvm::omp::Clause::OMPC_collapse,
    llvm::omp::Clause::OMPC_safelen,
    llvm::omp::Clause::OMPC_simdlen,
    llvm::omp::Clause::OMPC_if,
    llvm::omp::Clause::OMPC_num_threads,
    llvm::omp::Clause::OMPC_proc_bind,
    llvm::omp::Clause::OMPC_device,
    llvm::omp::Clause::OMPC_defaultmap,
    llvm::omp::Clause::OMPC_schedule,
    llvm::omp::Clause::OMPC_ompx_dyn_cgroup_mem,
    llvm::omp::Clause::OMPC_order,
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_target_simd {
  };

  static OmpClauseSet requiredClauses_OMPD_target_simd {
  };

  // Sets for target teams

  static OmpClauseSet allowedClauses_OMPD_target_teams {
    llvm::omp::Clause::OMPC_if,
    llvm::omp::Clause::OMPC_map,
    llvm::omp::Clause::OMPC_private,
    llvm::omp::Clause::OMPC_depend,
    llvm::omp::Clause::OMPC_firstprivate,
    llvm::omp::Clause::OMPC_is_device_ptr,
    llvm::omp::Clause::OMPC_has_device_addr,
    llvm::omp::Clause::OMPC_reduction,
    llvm::omp::Clause::OMPC_allocate,
    llvm::omp::Clause::OMPC_uses_allocators,
    llvm::omp::Clause::OMPC_shared,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_target_teams {
    llvm::omp::Clause::OMPC_device,
    llvm::omp::Clause::OMPC_nowait,
    llvm::omp::Clause::OMPC_defaultmap,
    llvm::omp::Clause::OMPC_default,
    llvm::omp::Clause::OMPC_num_teams,
    llvm::omp::Clause::OMPC_thread_limit,
    llvm::omp::Clause::OMPC_ompx_dyn_cgroup_mem,
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_target_teams {
  };

  static OmpClauseSet requiredClauses_OMPD_target_teams {
  };

  // Sets for target teams distribute

  static OmpClauseSet allowedClauses_OMPD_target_teams_distribute {
    llvm::omp::Clause::OMPC_if,
    llvm::omp::Clause::OMPC_map,
    llvm::omp::Clause::OMPC_private,
    llvm::omp::Clause::OMPC_depend,
    llvm::omp::Clause::OMPC_firstprivate,
    llvm::omp::Clause::OMPC_is_device_ptr,
    llvm::omp::Clause::OMPC_has_device_addr,
    llvm::omp::Clause::OMPC_reduction,
    llvm::omp::Clause::OMPC_allocate,
    llvm::omp::Clause::OMPC_uses_allocators,
    llvm::omp::Clause::OMPC_shared,
    llvm::omp::Clause::OMPC_lastprivate,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_target_teams_distribute {
    llvm::omp::Clause::OMPC_device,
    llvm::omp::Clause::OMPC_nowait,
    llvm::omp::Clause::OMPC_defaultmap,
    llvm::omp::Clause::OMPC_default,
    llvm::omp::Clause::OMPC_num_teams,
    llvm::omp::Clause::OMPC_thread_limit,
    llvm::omp::Clause::OMPC_collapse,
    llvm::omp::Clause::OMPC_dist_schedule,
    llvm::omp::Clause::OMPC_ompx_dyn_cgroup_mem,
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_target_teams_distribute {
  };

  static OmpClauseSet requiredClauses_OMPD_target_teams_distribute {
  };

  // Sets for target teams distribute parallel do

  static OmpClauseSet allowedClauses_OMPD_target_teams_distribute_parallel_do {
    llvm::omp::Clause::OMPC_if,
    llvm::omp::Clause::OMPC_map,
    llvm::omp::Clause::OMPC_private,
    llvm::omp::Clause::OMPC_depend,
    llvm::omp::Clause::OMPC_firstprivate,
    llvm::omp::Clause::OMPC_is_device_ptr,
    llvm::omp::Clause::OMPC_has_device_addr,
    llvm::omp::Clause::OMPC_reduction,
    llvm::omp::Clause::OMPC_allocate,
    llvm::omp::Clause::OMPC_uses_allocators,
    llvm::omp::Clause::OMPC_shared,
    llvm::omp::Clause::OMPC_lastprivate,
    llvm::omp::Clause::OMPC_copyin,
    llvm::omp::Clause::OMPC_linear,
    llvm::omp::Clause::OMPC_ordered,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_target_teams_distribute_parallel_do {
    llvm::omp::Clause::OMPC_device,
    llvm::omp::Clause::OMPC_defaultmap,
    llvm::omp::Clause::OMPC_nowait,
    llvm::omp::Clause::OMPC_default,
    llvm::omp::Clause::OMPC_num_teams,
    llvm::omp::Clause::OMPC_thread_limit,
    llvm::omp::Clause::OMPC_collapse,
    llvm::omp::Clause::OMPC_dist_schedule,
    llvm::omp::Clause::OMPC_num_threads,
    llvm::omp::Clause::OMPC_proc_bind,
    llvm::omp::Clause::OMPC_schedule,
    llvm::omp::Clause::OMPC_order,
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_target_teams_distribute_parallel_do {
  };

  static OmpClauseSet requiredClauses_OMPD_target_teams_distribute_parallel_do {
  };

  // Sets for target teams distribute parallel do simd

  static OmpClauseSet allowedClauses_OMPD_target_teams_distribute_parallel_do_simd {
    llvm::omp::Clause::OMPC_map,
    llvm::omp::Clause::OMPC_private,
    llvm::omp::Clause::OMPC_depend,
    llvm::omp::Clause::OMPC_firstprivate,
    llvm::omp::Clause::OMPC_is_device_ptr,
    llvm::omp::Clause::OMPC_has_device_addr,
    llvm::omp::Clause::OMPC_reduction,
    llvm::omp::Clause::OMPC_allocate,
    llvm::omp::Clause::OMPC_uses_allocators,
    llvm::omp::Clause::OMPC_shared,
    llvm::omp::Clause::OMPC_lastprivate,
    llvm::omp::Clause::OMPC_copyin,
    llvm::omp::Clause::OMPC_linear,
    llvm::omp::Clause::OMPC_ordered,
    llvm::omp::Clause::OMPC_aligned,
    llvm::omp::Clause::OMPC_nontemporal,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_target_teams_distribute_parallel_do_simd {
    llvm::omp::Clause::OMPC_if,
    llvm::omp::Clause::OMPC_device,
    llvm::omp::Clause::OMPC_nowait,
    llvm::omp::Clause::OMPC_defaultmap,
    llvm::omp::Clause::OMPC_default,
    llvm::omp::Clause::OMPC_num_teams,
    llvm::omp::Clause::OMPC_thread_limit,
    llvm::omp::Clause::OMPC_collapse,
    llvm::omp::Clause::OMPC_dist_schedule,
    llvm::omp::Clause::OMPC_num_threads,
    llvm::omp::Clause::OMPC_proc_bind,
    llvm::omp::Clause::OMPC_schedule,
    llvm::omp::Clause::OMPC_safelen,
    llvm::omp::Clause::OMPC_simdlen,
    llvm::omp::Clause::OMPC_order,
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_target_teams_distribute_parallel_do_simd {
  };

  static OmpClauseSet requiredClauses_OMPD_target_teams_distribute_parallel_do_simd {
  };

  // Sets for target teams distribute parallel for

  static OmpClauseSet allowedClauses_OMPD_target_teams_distribute_parallel_for {
    llvm::omp::Clause::OMPC_if,
    llvm::omp::Clause::OMPC_device,
    llvm::omp::Clause::OMPC_map,
    llvm::omp::Clause::OMPC_private,
    llvm::omp::Clause::OMPC_nowait,
    llvm::omp::Clause::OMPC_depend,
    llvm::omp::Clause::OMPC_defaultmap,
    llvm::omp::Clause::OMPC_firstprivate,
    llvm::omp::Clause::OMPC_is_device_ptr,
    llvm::omp::Clause::OMPC_has_device_addr,
    llvm::omp::Clause::OMPC_default,
    llvm::omp::Clause::OMPC_shared,
    llvm::omp::Clause::OMPC_reduction,
    llvm::omp::Clause::OMPC_num_teams,
    llvm::omp::Clause::OMPC_thread_limit,
    llvm::omp::Clause::OMPC_lastprivate,
    llvm::omp::Clause::OMPC_collapse,
    llvm::omp::Clause::OMPC_dist_schedule,
    llvm::omp::Clause::OMPC_num_threads,
    llvm::omp::Clause::OMPC_proc_bind,
    llvm::omp::Clause::OMPC_schedule,
    llvm::omp::Clause::OMPC_allocate,
    llvm::omp::Clause::OMPC_order,
    llvm::omp::Clause::OMPC_uses_allocators,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_target_teams_distribute_parallel_for {
    llvm::omp::Clause::OMPC_ompx_dyn_cgroup_mem,
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_target_teams_distribute_parallel_for {
  };

  static OmpClauseSet requiredClauses_OMPD_target_teams_distribute_parallel_for {
  };

  // Sets for target teams distribute parallel for simd

  static OmpClauseSet allowedClauses_OMPD_target_teams_distribute_parallel_for_simd {
    llvm::omp::Clause::OMPC_if,
    llvm::omp::Clause::OMPC_device,
    llvm::omp::Clause::OMPC_map,
    llvm::omp::Clause::OMPC_private,
    llvm::omp::Clause::OMPC_nowait,
    llvm::omp::Clause::OMPC_depend,
    llvm::omp::Clause::OMPC_defaultmap,
    llvm::omp::Clause::OMPC_firstprivate,
    llvm::omp::Clause::OMPC_is_device_ptr,
    llvm::omp::Clause::OMPC_has_device_addr,
    llvm::omp::Clause::OMPC_default,
    llvm::omp::Clause::OMPC_shared,
    llvm::omp::Clause::OMPC_reduction,
    llvm::omp::Clause::OMPC_num_teams,
    llvm::omp::Clause::OMPC_thread_limit,
    llvm::omp::Clause::OMPC_lastprivate,
    llvm::omp::Clause::OMPC_collapse,
    llvm::omp::Clause::OMPC_dist_schedule,
    llvm::omp::Clause::OMPC_num_threads,
    llvm::omp::Clause::OMPC_proc_bind,
    llvm::omp::Clause::OMPC_schedule,
    llvm::omp::Clause::OMPC_linear,
    llvm::omp::Clause::OMPC_aligned,
    llvm::omp::Clause::OMPC_safelen,
    llvm::omp::Clause::OMPC_simdlen,
    llvm::omp::Clause::OMPC_allocate,
    llvm::omp::Clause::OMPC_nontemporal,
    llvm::omp::Clause::OMPC_order,
    llvm::omp::Clause::OMPC_uses_allocators,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_target_teams_distribute_parallel_for_simd {
    llvm::omp::Clause::OMPC_ompx_dyn_cgroup_mem,
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_target_teams_distribute_parallel_for_simd {
  };

  static OmpClauseSet requiredClauses_OMPD_target_teams_distribute_parallel_for_simd {
  };

  // Sets for target teams distribute simd

  static OmpClauseSet allowedClauses_OMPD_target_teams_distribute_simd {
    llvm::omp::Clause::OMPC_aligned,
    llvm::omp::Clause::OMPC_allocate,
    llvm::omp::Clause::OMPC_depend,
    llvm::omp::Clause::OMPC_firstprivate,
    llvm::omp::Clause::OMPC_if,
    llvm::omp::Clause::OMPC_is_device_ptr,
    llvm::omp::Clause::OMPC_has_device_addr,
    llvm::omp::Clause::OMPC_lastprivate,
    llvm::omp::Clause::OMPC_linear,
    llvm::omp::Clause::OMPC_map,
    llvm::omp::Clause::OMPC_nontemporal,
    llvm::omp::Clause::OMPC_private,
    llvm::omp::Clause::OMPC_reduction,
    llvm::omp::Clause::OMPC_shared,
    llvm::omp::Clause::OMPC_uses_allocators,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_target_teams_distribute_simd {
    llvm::omp::Clause::OMPC_device,
    llvm::omp::Clause::OMPC_defaultmap,
    llvm::omp::Clause::OMPC_nowait,
    llvm::omp::Clause::OMPC_num_teams,
    llvm::omp::Clause::OMPC_thread_limit,
    llvm::omp::Clause::OMPC_collapse,
    llvm::omp::Clause::OMPC_dist_schedule,
    llvm::omp::Clause::OMPC_safelen,
    llvm::omp::Clause::OMPC_simdlen,
    llvm::omp::Clause::OMPC_ompx_dyn_cgroup_mem,
    llvm::omp::Clause::OMPC_order,
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_target_teams_distribute_simd {
  };

  static OmpClauseSet requiredClauses_OMPD_target_teams_distribute_simd {
  };

  // Sets for target update

  static OmpClauseSet allowedClauses_OMPD_target_update {
    llvm::omp::Clause::OMPC_to,
    llvm::omp::Clause::OMPC_from,
    llvm::omp::Clause::OMPC_depend,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_target_update {
    llvm::omp::Clause::OMPC_device,
    llvm::omp::Clause::OMPC_if,
    llvm::omp::Clause::OMPC_nowait,
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_target_update {
  };

  static OmpClauseSet requiredClauses_OMPD_target_update {
  };

  // Sets for task

  static OmpClauseSet allowedClauses_OMPD_task {
    llvm::omp::Clause::OMPC_private,
    llvm::omp::Clause::OMPC_firstprivate,
    llvm::omp::Clause::OMPC_shared,
    llvm::omp::Clause::OMPC_untied,
    llvm::omp::Clause::OMPC_mergeable,
    llvm::omp::Clause::OMPC_depend,
    llvm::omp::Clause::OMPC_in_reduction,
    llvm::omp::Clause::OMPC_allocate,
    llvm::omp::Clause::OMPC_detach,
    llvm::omp::Clause::OMPC_affinity,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_task {
    llvm::omp::Clause::OMPC_default,
    llvm::omp::Clause::OMPC_if,
    llvm::omp::Clause::OMPC_final,
    llvm::omp::Clause::OMPC_priority,
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_task {
  };

  static OmpClauseSet requiredClauses_OMPD_task {
  };

  // Sets for taskgroup

  static OmpClauseSet allowedClauses_OMPD_taskgroup {
    llvm::omp::Clause::OMPC_task_reduction,
    llvm::omp::Clause::OMPC_allocate,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_taskgroup {
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_taskgroup {
  };

  static OmpClauseSet requiredClauses_OMPD_taskgroup {
  };

  // Sets for taskloop

  static OmpClauseSet allowedClauses_OMPD_taskloop {
    llvm::omp::Clause::OMPC_shared,
    llvm::omp::Clause::OMPC_private,
    llvm::omp::Clause::OMPC_firstprivate,
    llvm::omp::Clause::OMPC_lastprivate,
    llvm::omp::Clause::OMPC_untied,
    llvm::omp::Clause::OMPC_mergeable,
    llvm::omp::Clause::OMPC_nogroup,
    llvm::omp::Clause::OMPC_reduction,
    llvm::omp::Clause::OMPC_in_reduction,
    llvm::omp::Clause::OMPC_allocate,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_taskloop {
    llvm::omp::Clause::OMPC_default,
    llvm::omp::Clause::OMPC_if,
    llvm::omp::Clause::OMPC_collapse,
    llvm::omp::Clause::OMPC_final,
    llvm::omp::Clause::OMPC_priority,
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_taskloop {
    llvm::omp::Clause::OMPC_grainsize,
    llvm::omp::Clause::OMPC_num_tasks,
  };

  static OmpClauseSet requiredClauses_OMPD_taskloop {
  };

  // Sets for taskloop simd

  static OmpClauseSet allowedClauses_OMPD_taskloop_simd {
    llvm::omp::Clause::OMPC_aligned,
    llvm::omp::Clause::OMPC_allocate,
    llvm::omp::Clause::OMPC_default,
    llvm::omp::Clause::OMPC_firstprivate,
    llvm::omp::Clause::OMPC_in_reduction,
    llvm::omp::Clause::OMPC_lastprivate,
    llvm::omp::Clause::OMPC_linear,
    llvm::omp::Clause::OMPC_mergeable,
    llvm::omp::Clause::OMPC_nogroup,
    llvm::omp::Clause::OMPC_nontemporal,
    llvm::omp::Clause::OMPC_private,
    llvm::omp::Clause::OMPC_reduction,
    llvm::omp::Clause::OMPC_shared,
    llvm::omp::Clause::OMPC_untied,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_taskloop_simd {
    llvm::omp::Clause::OMPC_if,
    llvm::omp::Clause::OMPC_collapse,
    llvm::omp::Clause::OMPC_safelen,
    llvm::omp::Clause::OMPC_simdlen,
    llvm::omp::Clause::OMPC_final,
    llvm::omp::Clause::OMPC_priority,
    llvm::omp::Clause::OMPC_order,
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_taskloop_simd {
    llvm::omp::Clause::OMPC_grainsize,
    llvm::omp::Clause::OMPC_num_tasks,
  };

  static OmpClauseSet requiredClauses_OMPD_taskloop_simd {
  };

  // Sets for taskwait

  static OmpClauseSet allowedClauses_OMPD_taskwait {
    llvm::omp::Clause::OMPC_depend,
    llvm::omp::Clause::OMPC_nowait,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_taskwait {
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_taskwait {
  };

  static OmpClauseSet requiredClauses_OMPD_taskwait {
  };

  // Sets for taskyield

  static OmpClauseSet allowedClauses_OMPD_taskyield {
  };

  static OmpClauseSet allowedOnceClauses_OMPD_taskyield {
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_taskyield {
  };

  static OmpClauseSet requiredClauses_OMPD_taskyield {
  };

  // Sets for teams

  static OmpClauseSet allowedClauses_OMPD_teams {
    llvm::omp::Clause::OMPC_private,
    llvm::omp::Clause::OMPC_firstprivate,
    llvm::omp::Clause::OMPC_shared,
    llvm::omp::Clause::OMPC_reduction,
    llvm::omp::Clause::OMPC_allocate,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_teams {
    llvm::omp::Clause::OMPC_default,
    llvm::omp::Clause::OMPC_num_teams,
    llvm::omp::Clause::OMPC_thread_limit,
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_teams {
  };

  static OmpClauseSet requiredClauses_OMPD_teams {
  };

  // Sets for teams distribute

  static OmpClauseSet allowedClauses_OMPD_teams_distribute {
    llvm::omp::Clause::OMPC_default,
    llvm::omp::Clause::OMPC_private,
    llvm::omp::Clause::OMPC_firstprivate,
    llvm::omp::Clause::OMPC_shared,
    llvm::omp::Clause::OMPC_reduction,
    llvm::omp::Clause::OMPC_num_teams,
    llvm::omp::Clause::OMPC_thread_limit,
    llvm::omp::Clause::OMPC_lastprivate,
    llvm::omp::Clause::OMPC_collapse,
    llvm::omp::Clause::OMPC_dist_schedule,
    llvm::omp::Clause::OMPC_allocate,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_teams_distribute {
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_teams_distribute {
  };

  static OmpClauseSet requiredClauses_OMPD_teams_distribute {
  };

  // Sets for teams distribute parallel do

  static OmpClauseSet allowedClauses_OMPD_teams_distribute_parallel_do {
    llvm::omp::Clause::OMPC_private,
    llvm::omp::Clause::OMPC_firstprivate,
    llvm::omp::Clause::OMPC_lastprivate,
    llvm::omp::Clause::OMPC_shared,
    llvm::omp::Clause::OMPC_reduction,
    llvm::omp::Clause::OMPC_allocate,
    llvm::omp::Clause::OMPC_copyin,
    llvm::omp::Clause::OMPC_linear,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_teams_distribute_parallel_do {
    llvm::omp::Clause::OMPC_num_teams,
    llvm::omp::Clause::OMPC_thread_limit,
    llvm::omp::Clause::OMPC_default,
    llvm::omp::Clause::OMPC_collapse,
    llvm::omp::Clause::OMPC_dist_schedule,
    llvm::omp::Clause::OMPC_ordered,
    llvm::omp::Clause::OMPC_if,
    llvm::omp::Clause::OMPC_num_threads,
    llvm::omp::Clause::OMPC_proc_bind,
    llvm::omp::Clause::OMPC_schedule,
    llvm::omp::Clause::OMPC_order,
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_teams_distribute_parallel_do {
  };

  static OmpClauseSet requiredClauses_OMPD_teams_distribute_parallel_do {
  };

  // Sets for teams distribute parallel do simd

  static OmpClauseSet allowedClauses_OMPD_teams_distribute_parallel_do_simd {
    llvm::omp::Clause::OMPC_private,
    llvm::omp::Clause::OMPC_firstprivate,
    llvm::omp::Clause::OMPC_lastprivate,
    llvm::omp::Clause::OMPC_allocate,
    llvm::omp::Clause::OMPC_shared,
    llvm::omp::Clause::OMPC_reduction,
    llvm::omp::Clause::OMPC_linear,
    llvm::omp::Clause::OMPC_aligned,
    llvm::omp::Clause::OMPC_nontemporal,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_teams_distribute_parallel_do_simd {
    llvm::omp::Clause::OMPC_default,
    llvm::omp::Clause::OMPC_num_teams,
    llvm::omp::Clause::OMPC_thread_limit,
    llvm::omp::Clause::OMPC_collapse,
    llvm::omp::Clause::OMPC_dist_schedule,
    llvm::omp::Clause::OMPC_num_threads,
    llvm::omp::Clause::OMPC_proc_bind,
    llvm::omp::Clause::OMPC_schedule,
    llvm::omp::Clause::OMPC_safelen,
    llvm::omp::Clause::OMPC_simdlen,
    llvm::omp::Clause::OMPC_if,
    llvm::omp::Clause::OMPC_order,
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_teams_distribute_parallel_do_simd {
  };

  static OmpClauseSet requiredClauses_OMPD_teams_distribute_parallel_do_simd {
  };

  // Sets for teams distribute parallel for

  static OmpClauseSet allowedClauses_OMPD_teams_distribute_parallel_for {
    llvm::omp::Clause::OMPC_firstprivate,
    llvm::omp::Clause::OMPC_lastprivate,
    llvm::omp::Clause::OMPC_collapse,
    llvm::omp::Clause::OMPC_dist_schedule,
    llvm::omp::Clause::OMPC_if,
    llvm::omp::Clause::OMPC_num_threads,
    llvm::omp::Clause::OMPC_default,
    llvm::omp::Clause::OMPC_proc_bind,
    llvm::omp::Clause::OMPC_private,
    llvm::omp::Clause::OMPC_shared,
    llvm::omp::Clause::OMPC_reduction,
    llvm::omp::Clause::OMPC_schedule,
    llvm::omp::Clause::OMPC_num_teams,
    llvm::omp::Clause::OMPC_thread_limit,
    llvm::omp::Clause::OMPC_copyin,
    llvm::omp::Clause::OMPC_allocate,
    llvm::omp::Clause::OMPC_order,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_teams_distribute_parallel_for {
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_teams_distribute_parallel_for {
  };

  static OmpClauseSet requiredClauses_OMPD_teams_distribute_parallel_for {
  };

  // Sets for teams distribute parallel for simd

  static OmpClauseSet allowedClauses_OMPD_teams_distribute_parallel_for_simd {
    llvm::omp::Clause::OMPC_firstprivate,
    llvm::omp::Clause::OMPC_lastprivate,
    llvm::omp::Clause::OMPC_collapse,
    llvm::omp::Clause::OMPC_dist_schedule,
    llvm::omp::Clause::OMPC_if,
    llvm::omp::Clause::OMPC_num_threads,
    llvm::omp::Clause::OMPC_default,
    llvm::omp::Clause::OMPC_proc_bind,
    llvm::omp::Clause::OMPC_private,
    llvm::omp::Clause::OMPC_shared,
    llvm::omp::Clause::OMPC_reduction,
    llvm::omp::Clause::OMPC_schedule,
    llvm::omp::Clause::OMPC_linear,
    llvm::omp::Clause::OMPC_aligned,
    llvm::omp::Clause::OMPC_safelen,
    llvm::omp::Clause::OMPC_simdlen,
    llvm::omp::Clause::OMPC_num_teams,
    llvm::omp::Clause::OMPC_thread_limit,
    llvm::omp::Clause::OMPC_allocate,
    llvm::omp::Clause::OMPC_nontemporal,
    llvm::omp::Clause::OMPC_order,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_teams_distribute_parallel_for_simd {
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_teams_distribute_parallel_for_simd {
  };

  static OmpClauseSet requiredClauses_OMPD_teams_distribute_parallel_for_simd {
  };

  // Sets for teams distribute simd

  static OmpClauseSet allowedClauses_OMPD_teams_distribute_simd {
    llvm::omp::Clause::OMPC_aligned,
    llvm::omp::Clause::OMPC_allocate,
    llvm::omp::Clause::OMPC_firstprivate,
    llvm::omp::Clause::OMPC_lastprivate,
    llvm::omp::Clause::OMPC_linear,
    llvm::omp::Clause::OMPC_nontemporal,
    llvm::omp::Clause::OMPC_private,
    llvm::omp::Clause::OMPC_reduction,
    llvm::omp::Clause::OMPC_shared,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_teams_distribute_simd {
    llvm::omp::Clause::OMPC_collapse,
    llvm::omp::Clause::OMPC_default,
    llvm::omp::Clause::OMPC_dist_schedule,
    llvm::omp::Clause::OMPC_if,
    llvm::omp::Clause::OMPC_num_teams,
    llvm::omp::Clause::OMPC_safelen,
    llvm::omp::Clause::OMPC_simdlen,
    llvm::omp::Clause::OMPC_thread_limit,
    llvm::omp::Clause::OMPC_order,
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_teams_distribute_simd {
  };

  static OmpClauseSet requiredClauses_OMPD_teams_distribute_simd {
  };

  // Sets for threadprivate

  static OmpClauseSet allowedClauses_OMPD_threadprivate {
  };

  static OmpClauseSet allowedOnceClauses_OMPD_threadprivate {
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_threadprivate {
  };

  static OmpClauseSet requiredClauses_OMPD_threadprivate {
  };

  // Sets for tile

  static OmpClauseSet allowedClauses_OMPD_tile {
  };

  static OmpClauseSet allowedOnceClauses_OMPD_tile {
    llvm::omp::Clause::OMPC_sizes,
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_tile {
  };

  static OmpClauseSet requiredClauses_OMPD_tile {
  };

  // Sets for unknown

  static OmpClauseSet allowedClauses_OMPD_unknown {
  };

  static OmpClauseSet allowedOnceClauses_OMPD_unknown {
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_unknown {
  };

  static OmpClauseSet requiredClauses_OMPD_unknown {
  };

  // Sets for unroll

  static OmpClauseSet allowedClauses_OMPD_unroll {
  };

  static OmpClauseSet allowedOnceClauses_OMPD_unroll {
    llvm::omp::Clause::OMPC_full,
    llvm::omp::Clause::OMPC_partial,
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_unroll {
  };

  static OmpClauseSet requiredClauses_OMPD_unroll {
  };

  // Sets for workshare

  static OmpClauseSet allowedClauses_OMPD_workshare {
  };

  static OmpClauseSet allowedOnceClauses_OMPD_workshare {
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_workshare {
  };

  static OmpClauseSet requiredClauses_OMPD_workshare {
  };

  // Sets for dispatch

  static OmpClauseSet allowedClauses_OMPD_dispatch {
    llvm::omp::Clause::OMPC_device,
    llvm::omp::Clause::OMPC_is_device_ptr,
    llvm::omp::Clause::OMPC_has_device_addr,
    llvm::omp::Clause::OMPC_nowait,
    llvm::omp::Clause::OMPC_depend,
    llvm::omp::Clause::OMPC_novariants,
    llvm::omp::Clause::OMPC_nocontext,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_dispatch {
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_dispatch {
  };

  static OmpClauseSet requiredClauses_OMPD_dispatch {
  };

  // Sets for interop

  static OmpClauseSet allowedClauses_OMPD_interop {
    llvm::omp::Clause::OMPC_device,
    llvm::omp::Clause::OMPC_depend,
    llvm::omp::Clause::OMPC_destroy,
    llvm::omp::Clause::OMPC_init,
    llvm::omp::Clause::OMPC_nowait,
    llvm::omp::Clause::OMPC_use,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_interop {
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_interop {
  };

  static OmpClauseSet requiredClauses_OMPD_interop {
  };

  // Sets for loop

  static OmpClauseSet allowedClauses_OMPD_loop {
    llvm::omp::Clause::OMPC_lastprivate,
    llvm::omp::Clause::OMPC_private,
    llvm::omp::Clause::OMPC_reduction,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_loop {
    llvm::omp::Clause::OMPC_bind,
    llvm::omp::Clause::OMPC_collapse,
    llvm::omp::Clause::OMPC_order,
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_loop {
  };

  static OmpClauseSet requiredClauses_OMPD_loop {
  };

  // Sets for masked

  static OmpClauseSet allowedClauses_OMPD_masked {
  };

  static OmpClauseSet allowedOnceClauses_OMPD_masked {
    llvm::omp::Clause::OMPC_filter,
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_masked {
  };

  static OmpClauseSet requiredClauses_OMPD_masked {
  };

  // Sets for parallel loop

  static OmpClauseSet allowedClauses_OMPD_parallel_loop {
    llvm::omp::Clause::OMPC_allocate,
    llvm::omp::Clause::OMPC_copyin,
    llvm::omp::Clause::OMPC_firstprivate,
    llvm::omp::Clause::OMPC_lastprivate,
    llvm::omp::Clause::OMPC_private,
    llvm::omp::Clause::OMPC_reduction,
    llvm::omp::Clause::OMPC_shared,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_parallel_loop {
    llvm::omp::Clause::OMPC_bind,
    llvm::omp::Clause::OMPC_collapse,
    llvm::omp::Clause::OMPC_default,
    llvm::omp::Clause::OMPC_if,
    llvm::omp::Clause::OMPC_num_threads,
    llvm::omp::Clause::OMPC_order,
    llvm::omp::Clause::OMPC_proc_bind,
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_parallel_loop {
  };

  static OmpClauseSet requiredClauses_OMPD_parallel_loop {
  };

  // Sets for target parallel loop

  static OmpClauseSet allowedClauses_OMPD_target_parallel_loop {
    llvm::omp::Clause::OMPC_allocate,
    llvm::omp::Clause::OMPC_copyin,
    llvm::omp::Clause::OMPC_depend,
    llvm::omp::Clause::OMPC_device,
    llvm::omp::Clause::OMPC_firstprivate,
    llvm::omp::Clause::OMPC_is_device_ptr,
    llvm::omp::Clause::OMPC_has_device_addr,
    llvm::omp::Clause::OMPC_lastprivate,
    llvm::omp::Clause::OMPC_map,
    llvm::omp::Clause::OMPC_private,
    llvm::omp::Clause::OMPC_reduction,
    llvm::omp::Clause::OMPC_shared,
    llvm::omp::Clause::OMPC_uses_allocators,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_target_parallel_loop {
    llvm::omp::Clause::OMPC_bind,
    llvm::omp::Clause::OMPC_collapse,
    llvm::omp::Clause::OMPC_default,
    llvm::omp::Clause::OMPC_defaultmap,
    llvm::omp::Clause::OMPC_if,
    llvm::omp::Clause::OMPC_nowait,
    llvm::omp::Clause::OMPC_num_threads,
    llvm::omp::Clause::OMPC_order,
    llvm::omp::Clause::OMPC_proc_bind,
    llvm::omp::Clause::OMPC_ompx_dyn_cgroup_mem,
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_target_parallel_loop {
  };

  static OmpClauseSet requiredClauses_OMPD_target_parallel_loop {
  };

  // Sets for target teams loop

  static OmpClauseSet allowedClauses_OMPD_target_teams_loop {
    llvm::omp::Clause::OMPC_allocate,
    llvm::omp::Clause::OMPC_depend,
    llvm::omp::Clause::OMPC_defaultmap,
    llvm::omp::Clause::OMPC_device,
    llvm::omp::Clause::OMPC_firstprivate,
    llvm::omp::Clause::OMPC_is_device_ptr,
    llvm::omp::Clause::OMPC_has_device_addr,
    llvm::omp::Clause::OMPC_lastprivate,
    llvm::omp::Clause::OMPC_map,
    llvm::omp::Clause::OMPC_private,
    llvm::omp::Clause::OMPC_reduction,
    llvm::omp::Clause::OMPC_shared,
    llvm::omp::Clause::OMPC_uses_allocators,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_target_teams_loop {
    llvm::omp::Clause::OMPC_bind,
    llvm::omp::Clause::OMPC_collapse,
    llvm::omp::Clause::OMPC_default,
    llvm::omp::Clause::OMPC_if,
    llvm::omp::Clause::OMPC_nowait,
    llvm::omp::Clause::OMPC_num_teams,
    llvm::omp::Clause::OMPC_order,
    llvm::omp::Clause::OMPC_thread_limit,
    llvm::omp::Clause::OMPC_ompx_dyn_cgroup_mem,
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_target_teams_loop {
  };

  static OmpClauseSet requiredClauses_OMPD_target_teams_loop {
  };

  // Sets for teams loop

  static OmpClauseSet allowedClauses_OMPD_teams_loop {
    llvm::omp::Clause::OMPC_allocate,
    llvm::omp::Clause::OMPC_firstprivate,
    llvm::omp::Clause::OMPC_lastprivate,
    llvm::omp::Clause::OMPC_private,
    llvm::omp::Clause::OMPC_reduction,
    llvm::omp::Clause::OMPC_shared,
  };

  static OmpClauseSet allowedOnceClauses_OMPD_teams_loop {
    llvm::omp::Clause::OMPC_bind,
    llvm::omp::Clause::OMPC_collapse,
    llvm::omp::Clause::OMPC_default,
    llvm::omp::Clause::OMPC_num_teams,
    llvm::omp::Clause::OMPC_order,
    llvm::omp::Clause::OMPC_thread_limit,
  };

  static OmpClauseSet allowedExclusiveClauses_OMPD_teams_loop {
  };

  static OmpClauseSet requiredClauses_OMPD_teams_loop {
  };
} // namespace omp
} // namespace llvm

#endif // GEN_FLANG_DIRECTIVE_CLAUSE_SETS

#ifdef GEN_FLANG_DIRECTIVE_CLAUSE_MAP
#undef GEN_FLANG_DIRECTIVE_CLAUSE_MAP

{
  {llvm::omp::Directive::OMPD_allocate,
    {
      llvm::omp::allowedClauses_OMPD_allocate,
      llvm::omp::allowedOnceClauses_OMPD_allocate,
      llvm::omp::allowedExclusiveClauses_OMPD_allocate,
      llvm::omp::requiredClauses_OMPD_allocate,
    }
  },
  {llvm::omp::Directive::OMPD_assumes,
    {
      llvm::omp::allowedClauses_OMPD_assumes,
      llvm::omp::allowedOnceClauses_OMPD_assumes,
      llvm::omp::allowedExclusiveClauses_OMPD_assumes,
      llvm::omp::requiredClauses_OMPD_assumes,
    }
  },
  {llvm::omp::Directive::OMPD_atomic,
    {
      llvm::omp::allowedClauses_OMPD_atomic,
      llvm::omp::allowedOnceClauses_OMPD_atomic,
      llvm::omp::allowedExclusiveClauses_OMPD_atomic,
      llvm::omp::requiredClauses_OMPD_atomic,
    }
  },
  {llvm::omp::Directive::OMPD_barrier,
    {
      llvm::omp::allowedClauses_OMPD_barrier,
      llvm::omp::allowedOnceClauses_OMPD_barrier,
      llvm::omp::allowedExclusiveClauses_OMPD_barrier,
      llvm::omp::requiredClauses_OMPD_barrier,
    }
  },
  {llvm::omp::Directive::OMPD_begin_assumes,
    {
      llvm::omp::allowedClauses_OMPD_begin_assumes,
      llvm::omp::allowedOnceClauses_OMPD_begin_assumes,
      llvm::omp::allowedExclusiveClauses_OMPD_begin_assumes,
      llvm::omp::requiredClauses_OMPD_begin_assumes,
    }
  },
  {llvm::omp::Directive::OMPD_begin_declare_target,
    {
      llvm::omp::allowedClauses_OMPD_begin_declare_target,
      llvm::omp::allowedOnceClauses_OMPD_begin_declare_target,
      llvm::omp::allowedExclusiveClauses_OMPD_begin_declare_target,
      llvm::omp::requiredClauses_OMPD_begin_declare_target,
    }
  },
  {llvm::omp::Directive::OMPD_begin_declare_variant,
    {
      llvm::omp::allowedClauses_OMPD_begin_declare_variant,
      llvm::omp::allowedOnceClauses_OMPD_begin_declare_variant,
      llvm::omp::allowedExclusiveClauses_OMPD_begin_declare_variant,
      llvm::omp::requiredClauses_OMPD_begin_declare_variant,
    }
  },
  {llvm::omp::Directive::OMPD_cancel,
    {
      llvm::omp::allowedClauses_OMPD_cancel,
      llvm::omp::allowedOnceClauses_OMPD_cancel,
      llvm::omp::allowedExclusiveClauses_OMPD_cancel,
      llvm::omp::requiredClauses_OMPD_cancel,
    }
  },
  {llvm::omp::Directive::OMPD_cancellation_point,
    {
      llvm::omp::allowedClauses_OMPD_cancellation_point,
      llvm::omp::allowedOnceClauses_OMPD_cancellation_point,
      llvm::omp::allowedExclusiveClauses_OMPD_cancellation_point,
      llvm::omp::requiredClauses_OMPD_cancellation_point,
    }
  },
  {llvm::omp::Directive::OMPD_critical,
    {
      llvm::omp::allowedClauses_OMPD_critical,
      llvm::omp::allowedOnceClauses_OMPD_critical,
      llvm::omp::allowedExclusiveClauses_OMPD_critical,
      llvm::omp::requiredClauses_OMPD_critical,
    }
  },
  {llvm::omp::Directive::OMPD_declare_mapper,
    {
      llvm::omp::allowedClauses_OMPD_declare_mapper,
      llvm::omp::allowedOnceClauses_OMPD_declare_mapper,
      llvm::omp::allowedExclusiveClauses_OMPD_declare_mapper,
      llvm::omp::requiredClauses_OMPD_declare_mapper,
    }
  },
  {llvm::omp::Directive::OMPD_declare_reduction,
    {
      llvm::omp::allowedClauses_OMPD_declare_reduction,
      llvm::omp::allowedOnceClauses_OMPD_declare_reduction,
      llvm::omp::allowedExclusiveClauses_OMPD_declare_reduction,
      llvm::omp::requiredClauses_OMPD_declare_reduction,
    }
  },
  {llvm::omp::Directive::OMPD_declare_simd,
    {
      llvm::omp::allowedClauses_OMPD_declare_simd,
      llvm::omp::allowedOnceClauses_OMPD_declare_simd,
      llvm::omp::allowedExclusiveClauses_OMPD_declare_simd,
      llvm::omp::requiredClauses_OMPD_declare_simd,
    }
  },
  {llvm::omp::Directive::OMPD_declare_target,
    {
      llvm::omp::allowedClauses_OMPD_declare_target,
      llvm::omp::allowedOnceClauses_OMPD_declare_target,
      llvm::omp::allowedExclusiveClauses_OMPD_declare_target,
      llvm::omp::requiredClauses_OMPD_declare_target,
    }
  },
  {llvm::omp::Directive::OMPD_declare_variant,
    {
      llvm::omp::allowedClauses_OMPD_declare_variant,
      llvm::omp::allowedOnceClauses_OMPD_declare_variant,
      llvm::omp::allowedExclusiveClauses_OMPD_declare_variant,
      llvm::omp::requiredClauses_OMPD_declare_variant,
    }
  },
  {llvm::omp::Directive::OMPD_depobj,
    {
      llvm::omp::allowedClauses_OMPD_depobj,
      llvm::omp::allowedOnceClauses_OMPD_depobj,
      llvm::omp::allowedExclusiveClauses_OMPD_depobj,
      llvm::omp::requiredClauses_OMPD_depobj,
    }
  },
  {llvm::omp::Directive::OMPD_distribute,
    {
      llvm::omp::allowedClauses_OMPD_distribute,
      llvm::omp::allowedOnceClauses_OMPD_distribute,
      llvm::omp::allowedExclusiveClauses_OMPD_distribute,
      llvm::omp::requiredClauses_OMPD_distribute,
    }
  },
  {llvm::omp::Directive::OMPD_distribute_parallel_do,
    {
      llvm::omp::allowedClauses_OMPD_distribute_parallel_do,
      llvm::omp::allowedOnceClauses_OMPD_distribute_parallel_do,
      llvm::omp::allowedExclusiveClauses_OMPD_distribute_parallel_do,
      llvm::omp::requiredClauses_OMPD_distribute_parallel_do,
    }
  },
  {llvm::omp::Directive::OMPD_distribute_parallel_do_simd,
    {
      llvm::omp::allowedClauses_OMPD_distribute_parallel_do_simd,
      llvm::omp::allowedOnceClauses_OMPD_distribute_parallel_do_simd,
      llvm::omp::allowedExclusiveClauses_OMPD_distribute_parallel_do_simd,
      llvm::omp::requiredClauses_OMPD_distribute_parallel_do_simd,
    }
  },
  {llvm::omp::Directive::OMPD_distribute_parallel_for,
    {
      llvm::omp::allowedClauses_OMPD_distribute_parallel_for,
      llvm::omp::allowedOnceClauses_OMPD_distribute_parallel_for,
      llvm::omp::allowedExclusiveClauses_OMPD_distribute_parallel_for,
      llvm::omp::requiredClauses_OMPD_distribute_parallel_for,
    }
  },
  {llvm::omp::Directive::OMPD_distribute_parallel_for_simd,
    {
      llvm::omp::allowedClauses_OMPD_distribute_parallel_for_simd,
      llvm::omp::allowedOnceClauses_OMPD_distribute_parallel_for_simd,
      llvm::omp::allowedExclusiveClauses_OMPD_distribute_parallel_for_simd,
      llvm::omp::requiredClauses_OMPD_distribute_parallel_for_simd,
    }
  },
  {llvm::omp::Directive::OMPD_distribute_simd,
    {
      llvm::omp::allowedClauses_OMPD_distribute_simd,
      llvm::omp::allowedOnceClauses_OMPD_distribute_simd,
      llvm::omp::allowedExclusiveClauses_OMPD_distribute_simd,
      llvm::omp::requiredClauses_OMPD_distribute_simd,
    }
  },
  {llvm::omp::Directive::OMPD_do,
    {
      llvm::omp::allowedClauses_OMPD_do,
      llvm::omp::allowedOnceClauses_OMPD_do,
      llvm::omp::allowedExclusiveClauses_OMPD_do,
      llvm::omp::requiredClauses_OMPD_do,
    }
  },
  {llvm::omp::Directive::OMPD_do_simd,
    {
      llvm::omp::allowedClauses_OMPD_do_simd,
      llvm::omp::allowedOnceClauses_OMPD_do_simd,
      llvm::omp::allowedExclusiveClauses_OMPD_do_simd,
      llvm::omp::requiredClauses_OMPD_do_simd,
    }
  },
  {llvm::omp::Directive::OMPD_end_assumes,
    {
      llvm::omp::allowedClauses_OMPD_end_assumes,
      llvm::omp::allowedOnceClauses_OMPD_end_assumes,
      llvm::omp::allowedExclusiveClauses_OMPD_end_assumes,
      llvm::omp::requiredClauses_OMPD_end_assumes,
    }
  },
  {llvm::omp::Directive::OMPD_end_declare_target,
    {
      llvm::omp::allowedClauses_OMPD_end_declare_target,
      llvm::omp::allowedOnceClauses_OMPD_end_declare_target,
      llvm::omp::allowedExclusiveClauses_OMPD_end_declare_target,
      llvm::omp::requiredClauses_OMPD_end_declare_target,
    }
  },
  {llvm::omp::Directive::OMPD_end_declare_variant,
    {
      llvm::omp::allowedClauses_OMPD_end_declare_variant,
      llvm::omp::allowedOnceClauses_OMPD_end_declare_variant,
      llvm::omp::allowedExclusiveClauses_OMPD_end_declare_variant,
      llvm::omp::requiredClauses_OMPD_end_declare_variant,
    }
  },
  {llvm::omp::Directive::OMPD_end_do,
    {
      llvm::omp::allowedClauses_OMPD_end_do,
      llvm::omp::allowedOnceClauses_OMPD_end_do,
      llvm::omp::allowedExclusiveClauses_OMPD_end_do,
      llvm::omp::requiredClauses_OMPD_end_do,
    }
  },
  {llvm::omp::Directive::OMPD_end_do_simd,
    {
      llvm::omp::allowedClauses_OMPD_end_do_simd,
      llvm::omp::allowedOnceClauses_OMPD_end_do_simd,
      llvm::omp::allowedExclusiveClauses_OMPD_end_do_simd,
      llvm::omp::requiredClauses_OMPD_end_do_simd,
    }
  },
  {llvm::omp::Directive::OMPD_end_sections,
    {
      llvm::omp::allowedClauses_OMPD_end_sections,
      llvm::omp::allowedOnceClauses_OMPD_end_sections,
      llvm::omp::allowedExclusiveClauses_OMPD_end_sections,
      llvm::omp::requiredClauses_OMPD_end_sections,
    }
  },
  {llvm::omp::Directive::OMPD_end_single,
    {
      llvm::omp::allowedClauses_OMPD_end_single,
      llvm::omp::allowedOnceClauses_OMPD_end_single,
      llvm::omp::allowedExclusiveClauses_OMPD_end_single,
      llvm::omp::requiredClauses_OMPD_end_single,
    }
  },
  {llvm::omp::Directive::OMPD_end_workshare,
    {
      llvm::omp::allowedClauses_OMPD_end_workshare,
      llvm::omp::allowedOnceClauses_OMPD_end_workshare,
      llvm::omp::allowedExclusiveClauses_OMPD_end_workshare,
      llvm::omp::requiredClauses_OMPD_end_workshare,
    }
  },
  {llvm::omp::Directive::OMPD_error,
    {
      llvm::omp::allowedClauses_OMPD_error,
      llvm::omp::allowedOnceClauses_OMPD_error,
      llvm::omp::allowedExclusiveClauses_OMPD_error,
      llvm::omp::requiredClauses_OMPD_error,
    }
  },
  {llvm::omp::Directive::OMPD_flush,
    {
      llvm::omp::allowedClauses_OMPD_flush,
      llvm::omp::allowedOnceClauses_OMPD_flush,
      llvm::omp::allowedExclusiveClauses_OMPD_flush,
      llvm::omp::requiredClauses_OMPD_flush,
    }
  },
  {llvm::omp::Directive::OMPD_for,
    {
      llvm::omp::allowedClauses_OMPD_for,
      llvm::omp::allowedOnceClauses_OMPD_for,
      llvm::omp::allowedExclusiveClauses_OMPD_for,
      llvm::omp::requiredClauses_OMPD_for,
    }
  },
  {llvm::omp::Directive::OMPD_for_simd,
    {
      llvm::omp::allowedClauses_OMPD_for_simd,
      llvm::omp::allowedOnceClauses_OMPD_for_simd,
      llvm::omp::allowedExclusiveClauses_OMPD_for_simd,
      llvm::omp::requiredClauses_OMPD_for_simd,
    }
  },
  {llvm::omp::Directive::OMPD_masked_taskloop,
    {
      llvm::omp::allowedClauses_OMPD_masked_taskloop,
      llvm::omp::allowedOnceClauses_OMPD_masked_taskloop,
      llvm::omp::allowedExclusiveClauses_OMPD_masked_taskloop,
      llvm::omp::requiredClauses_OMPD_masked_taskloop,
    }
  },
  {llvm::omp::Directive::OMPD_masked_taskloop_simd,
    {
      llvm::omp::allowedClauses_OMPD_masked_taskloop_simd,
      llvm::omp::allowedOnceClauses_OMPD_masked_taskloop_simd,
      llvm::omp::allowedExclusiveClauses_OMPD_masked_taskloop_simd,
      llvm::omp::requiredClauses_OMPD_masked_taskloop_simd,
    }
  },
  {llvm::omp::Directive::OMPD_master,
    {
      llvm::omp::allowedClauses_OMPD_master,
      llvm::omp::allowedOnceClauses_OMPD_master,
      llvm::omp::allowedExclusiveClauses_OMPD_master,
      llvm::omp::requiredClauses_OMPD_master,
    }
  },
  {llvm::omp::Directive::OMPD_master_taskloop,
    {
      llvm::omp::allowedClauses_OMPD_master_taskloop,
      llvm::omp::allowedOnceClauses_OMPD_master_taskloop,
      llvm::omp::allowedExclusiveClauses_OMPD_master_taskloop,
      llvm::omp::requiredClauses_OMPD_master_taskloop,
    }
  },
  {llvm::omp::Directive::OMPD_master_taskloop_simd,
    {
      llvm::omp::allowedClauses_OMPD_master_taskloop_simd,
      llvm::omp::allowedOnceClauses_OMPD_master_taskloop_simd,
      llvm::omp::allowedExclusiveClauses_OMPD_master_taskloop_simd,
      llvm::omp::requiredClauses_OMPD_master_taskloop_simd,
    }
  },
  {llvm::omp::Directive::OMPD_metadirective,
    {
      llvm::omp::allowedClauses_OMPD_metadirective,
      llvm::omp::allowedOnceClauses_OMPD_metadirective,
      llvm::omp::allowedExclusiveClauses_OMPD_metadirective,
      llvm::omp::requiredClauses_OMPD_metadirective,
    }
  },
  {llvm::omp::Directive::OMPD_nothing,
    {
      llvm::omp::allowedClauses_OMPD_nothing,
      llvm::omp::allowedOnceClauses_OMPD_nothing,
      llvm::omp::allowedExclusiveClauses_OMPD_nothing,
      llvm::omp::requiredClauses_OMPD_nothing,
    }
  },
  {llvm::omp::Directive::OMPD_ordered,
    {
      llvm::omp::allowedClauses_OMPD_ordered,
      llvm::omp::allowedOnceClauses_OMPD_ordered,
      llvm::omp::allowedExclusiveClauses_OMPD_ordered,
      llvm::omp::requiredClauses_OMPD_ordered,
    }
  },
  {llvm::omp::Directive::OMPD_parallel,
    {
      llvm::omp::allowedClauses_OMPD_parallel,
      llvm::omp::allowedOnceClauses_OMPD_parallel,
      llvm::omp::allowedExclusiveClauses_OMPD_parallel,
      llvm::omp::requiredClauses_OMPD_parallel,
    }
  },
  {llvm::omp::Directive::OMPD_parallel_do,
    {
      llvm::omp::allowedClauses_OMPD_parallel_do,
      llvm::omp::allowedOnceClauses_OMPD_parallel_do,
      llvm::omp::allowedExclusiveClauses_OMPD_parallel_do,
      llvm::omp::requiredClauses_OMPD_parallel_do,
    }
  },
  {llvm::omp::Directive::OMPD_parallel_do_simd,
    {
      llvm::omp::allowedClauses_OMPD_parallel_do_simd,
      llvm::omp::allowedOnceClauses_OMPD_parallel_do_simd,
      llvm::omp::allowedExclusiveClauses_OMPD_parallel_do_simd,
      llvm::omp::requiredClauses_OMPD_parallel_do_simd,
    }
  },
  {llvm::omp::Directive::OMPD_parallel_for,
    {
      llvm::omp::allowedClauses_OMPD_parallel_for,
      llvm::omp::allowedOnceClauses_OMPD_parallel_for,
      llvm::omp::allowedExclusiveClauses_OMPD_parallel_for,
      llvm::omp::requiredClauses_OMPD_parallel_for,
    }
  },
  {llvm::omp::Directive::OMPD_parallel_for_simd,
    {
      llvm::omp::allowedClauses_OMPD_parallel_for_simd,
      llvm::omp::allowedOnceClauses_OMPD_parallel_for_simd,
      llvm::omp::allowedExclusiveClauses_OMPD_parallel_for_simd,
      llvm::omp::requiredClauses_OMPD_parallel_for_simd,
    }
  },
  {llvm::omp::Directive::OMPD_parallel_masked,
    {
      llvm::omp::allowedClauses_OMPD_parallel_masked,
      llvm::omp::allowedOnceClauses_OMPD_parallel_masked,
      llvm::omp::allowedExclusiveClauses_OMPD_parallel_masked,
      llvm::omp::requiredClauses_OMPD_parallel_masked,
    }
  },
  {llvm::omp::Directive::OMPD_parallel_masked_taskloop,
    {
      llvm::omp::allowedClauses_OMPD_parallel_masked_taskloop,
      llvm::omp::allowedOnceClauses_OMPD_parallel_masked_taskloop,
      llvm::omp::allowedExclusiveClauses_OMPD_parallel_masked_taskloop,
      llvm::omp::requiredClauses_OMPD_parallel_masked_taskloop,
    }
  },
  {llvm::omp::Directive::OMPD_parallel_masked_taskloop_simd,
    {
      llvm::omp::allowedClauses_OMPD_parallel_masked_taskloop_simd,
      llvm::omp::allowedOnceClauses_OMPD_parallel_masked_taskloop_simd,
      llvm::omp::allowedExclusiveClauses_OMPD_parallel_masked_taskloop_simd,
      llvm::omp::requiredClauses_OMPD_parallel_masked_taskloop_simd,
    }
  },
  {llvm::omp::Directive::OMPD_parallel_master,
    {
      llvm::omp::allowedClauses_OMPD_parallel_master,
      llvm::omp::allowedOnceClauses_OMPD_parallel_master,
      llvm::omp::allowedExclusiveClauses_OMPD_parallel_master,
      llvm::omp::requiredClauses_OMPD_parallel_master,
    }
  },
  {llvm::omp::Directive::OMPD_parallel_master_taskloop,
    {
      llvm::omp::allowedClauses_OMPD_parallel_master_taskloop,
      llvm::omp::allowedOnceClauses_OMPD_parallel_master_taskloop,
      llvm::omp::allowedExclusiveClauses_OMPD_parallel_master_taskloop,
      llvm::omp::requiredClauses_OMPD_parallel_master_taskloop,
    }
  },
  {llvm::omp::Directive::OMPD_parallel_master_taskloop_simd,
    {
      llvm::omp::allowedClauses_OMPD_parallel_master_taskloop_simd,
      llvm::omp::allowedOnceClauses_OMPD_parallel_master_taskloop_simd,
      llvm::omp::allowedExclusiveClauses_OMPD_parallel_master_taskloop_simd,
      llvm::omp::requiredClauses_OMPD_parallel_master_taskloop_simd,
    }
  },
  {llvm::omp::Directive::OMPD_parallel_sections,
    {
      llvm::omp::allowedClauses_OMPD_parallel_sections,
      llvm::omp::allowedOnceClauses_OMPD_parallel_sections,
      llvm::omp::allowedExclusiveClauses_OMPD_parallel_sections,
      llvm::omp::requiredClauses_OMPD_parallel_sections,
    }
  },
  {llvm::omp::Directive::OMPD_parallel_workshare,
    {
      llvm::omp::allowedClauses_OMPD_parallel_workshare,
      llvm::omp::allowedOnceClauses_OMPD_parallel_workshare,
      llvm::omp::allowedExclusiveClauses_OMPD_parallel_workshare,
      llvm::omp::requiredClauses_OMPD_parallel_workshare,
    }
  },
  {llvm::omp::Directive::OMPD_requires,
    {
      llvm::omp::allowedClauses_OMPD_requires,
      llvm::omp::allowedOnceClauses_OMPD_requires,
      llvm::omp::allowedExclusiveClauses_OMPD_requires,
      llvm::omp::requiredClauses_OMPD_requires,
    }
  },
  {llvm::omp::Directive::OMPD_scan,
    {
      llvm::omp::allowedClauses_OMPD_scan,
      llvm::omp::allowedOnceClauses_OMPD_scan,
      llvm::omp::allowedExclusiveClauses_OMPD_scan,
      llvm::omp::requiredClauses_OMPD_scan,
    }
  },
  {llvm::omp::Directive::OMPD_section,
    {
      llvm::omp::allowedClauses_OMPD_section,
      llvm::omp::allowedOnceClauses_OMPD_section,
      llvm::omp::allowedExclusiveClauses_OMPD_section,
      llvm::omp::requiredClauses_OMPD_section,
    }
  },
  {llvm::omp::Directive::OMPD_sections,
    {
      llvm::omp::allowedClauses_OMPD_sections,
      llvm::omp::allowedOnceClauses_OMPD_sections,
      llvm::omp::allowedExclusiveClauses_OMPD_sections,
      llvm::omp::requiredClauses_OMPD_sections,
    }
  },
  {llvm::omp::Directive::OMPD_simd,
    {
      llvm::omp::allowedClauses_OMPD_simd,
      llvm::omp::allowedOnceClauses_OMPD_simd,
      llvm::omp::allowedExclusiveClauses_OMPD_simd,
      llvm::omp::requiredClauses_OMPD_simd,
    }
  },
  {llvm::omp::Directive::OMPD_single,
    {
      llvm::omp::allowedClauses_OMPD_single,
      llvm::omp::allowedOnceClauses_OMPD_single,
      llvm::omp::allowedExclusiveClauses_OMPD_single,
      llvm::omp::requiredClauses_OMPD_single,
    }
  },
  {llvm::omp::Directive::OMPD_target,
    {
      llvm::omp::allowedClauses_OMPD_target,
      llvm::omp::allowedOnceClauses_OMPD_target,
      llvm::omp::allowedExclusiveClauses_OMPD_target,
      llvm::omp::requiredClauses_OMPD_target,
    }
  },
  {llvm::omp::Directive::OMPD_target_data,
    {
      llvm::omp::allowedClauses_OMPD_target_data,
      llvm::omp::allowedOnceClauses_OMPD_target_data,
      llvm::omp::allowedExclusiveClauses_OMPD_target_data,
      llvm::omp::requiredClauses_OMPD_target_data,
    }
  },
  {llvm::omp::Directive::OMPD_target_enter_data,
    {
      llvm::omp::allowedClauses_OMPD_target_enter_data,
      llvm::omp::allowedOnceClauses_OMPD_target_enter_data,
      llvm::omp::allowedExclusiveClauses_OMPD_target_enter_data,
      llvm::omp::requiredClauses_OMPD_target_enter_data,
    }
  },
  {llvm::omp::Directive::OMPD_target_exit_data,
    {
      llvm::omp::allowedClauses_OMPD_target_exit_data,
      llvm::omp::allowedOnceClauses_OMPD_target_exit_data,
      llvm::omp::allowedExclusiveClauses_OMPD_target_exit_data,
      llvm::omp::requiredClauses_OMPD_target_exit_data,
    }
  },
  {llvm::omp::Directive::OMPD_target_parallel,
    {
      llvm::omp::allowedClauses_OMPD_target_parallel,
      llvm::omp::allowedOnceClauses_OMPD_target_parallel,
      llvm::omp::allowedExclusiveClauses_OMPD_target_parallel,
      llvm::omp::requiredClauses_OMPD_target_parallel,
    }
  },
  {llvm::omp::Directive::OMPD_target_parallel_do,
    {
      llvm::omp::allowedClauses_OMPD_target_parallel_do,
      llvm::omp::allowedOnceClauses_OMPD_target_parallel_do,
      llvm::omp::allowedExclusiveClauses_OMPD_target_parallel_do,
      llvm::omp::requiredClauses_OMPD_target_parallel_do,
    }
  },
  {llvm::omp::Directive::OMPD_target_parallel_do_simd,
    {
      llvm::omp::allowedClauses_OMPD_target_parallel_do_simd,
      llvm::omp::allowedOnceClauses_OMPD_target_parallel_do_simd,
      llvm::omp::allowedExclusiveClauses_OMPD_target_parallel_do_simd,
      llvm::omp::requiredClauses_OMPD_target_parallel_do_simd,
    }
  },
  {llvm::omp::Directive::OMPD_target_parallel_for,
    {
      llvm::omp::allowedClauses_OMPD_target_parallel_for,
      llvm::omp::allowedOnceClauses_OMPD_target_parallel_for,
      llvm::omp::allowedExclusiveClauses_OMPD_target_parallel_for,
      llvm::omp::requiredClauses_OMPD_target_parallel_for,
    }
  },
  {llvm::omp::Directive::OMPD_target_parallel_for_simd,
    {
      llvm::omp::allowedClauses_OMPD_target_parallel_for_simd,
      llvm::omp::allowedOnceClauses_OMPD_target_parallel_for_simd,
      llvm::omp::allowedExclusiveClauses_OMPD_target_parallel_for_simd,
      llvm::omp::requiredClauses_OMPD_target_parallel_for_simd,
    }
  },
  {llvm::omp::Directive::OMPD_target_simd,
    {
      llvm::omp::allowedClauses_OMPD_target_simd,
      llvm::omp::allowedOnceClauses_OMPD_target_simd,
      llvm::omp::allowedExclusiveClauses_OMPD_target_simd,
      llvm::omp::requiredClauses_OMPD_target_simd,
    }
  },
  {llvm::omp::Directive::OMPD_target_teams,
    {
      llvm::omp::allowedClauses_OMPD_target_teams,
      llvm::omp::allowedOnceClauses_OMPD_target_teams,
      llvm::omp::allowedExclusiveClauses_OMPD_target_teams,
      llvm::omp::requiredClauses_OMPD_target_teams,
    }
  },
  {llvm::omp::Directive::OMPD_target_teams_distribute,
    {
      llvm::omp::allowedClauses_OMPD_target_teams_distribute,
      llvm::omp::allowedOnceClauses_OMPD_target_teams_distribute,
      llvm::omp::allowedExclusiveClauses_OMPD_target_teams_distribute,
      llvm::omp::requiredClauses_OMPD_target_teams_distribute,
    }
  },
  {llvm::omp::Directive::OMPD_target_teams_distribute_parallel_do,
    {
      llvm::omp::allowedClauses_OMPD_target_teams_distribute_parallel_do,
      llvm::omp::allowedOnceClauses_OMPD_target_teams_distribute_parallel_do,
      llvm::omp::allowedExclusiveClauses_OMPD_target_teams_distribute_parallel_do,
      llvm::omp::requiredClauses_OMPD_target_teams_distribute_parallel_do,
    }
  },
  {llvm::omp::Directive::OMPD_target_teams_distribute_parallel_do_simd,
    {
      llvm::omp::allowedClauses_OMPD_target_teams_distribute_parallel_do_simd,
      llvm::omp::allowedOnceClauses_OMPD_target_teams_distribute_parallel_do_simd,
      llvm::omp::allowedExclusiveClauses_OMPD_target_teams_distribute_parallel_do_simd,
      llvm::omp::requiredClauses_OMPD_target_teams_distribute_parallel_do_simd,
    }
  },
  {llvm::omp::Directive::OMPD_target_teams_distribute_parallel_for,
    {
      llvm::omp::allowedClauses_OMPD_target_teams_distribute_parallel_for,
      llvm::omp::allowedOnceClauses_OMPD_target_teams_distribute_parallel_for,
      llvm::omp::allowedExclusiveClauses_OMPD_target_teams_distribute_parallel_for,
      llvm::omp::requiredClauses_OMPD_target_teams_distribute_parallel_for,
    }
  },
  {llvm::omp::Directive::OMPD_target_teams_distribute_parallel_for_simd,
    {
      llvm::omp::allowedClauses_OMPD_target_teams_distribute_parallel_for_simd,
      llvm::omp::allowedOnceClauses_OMPD_target_teams_distribute_parallel_for_simd,
      llvm::omp::allowedExclusiveClauses_OMPD_target_teams_distribute_parallel_for_simd,
      llvm::omp::requiredClauses_OMPD_target_teams_distribute_parallel_for_simd,
    }
  },
  {llvm::omp::Directive::OMPD_target_teams_distribute_simd,
    {
      llvm::omp::allowedClauses_OMPD_target_teams_distribute_simd,
      llvm::omp::allowedOnceClauses_OMPD_target_teams_distribute_simd,
      llvm::omp::allowedExclusiveClauses_OMPD_target_teams_distribute_simd,
      llvm::omp::requiredClauses_OMPD_target_teams_distribute_simd,
    }
  },
  {llvm::omp::Directive::OMPD_target_update,
    {
      llvm::omp::allowedClauses_OMPD_target_update,
      llvm::omp::allowedOnceClauses_OMPD_target_update,
      llvm::omp::allowedExclusiveClauses_OMPD_target_update,
      llvm::omp::requiredClauses_OMPD_target_update,
    }
  },
  {llvm::omp::Directive::OMPD_task,
    {
      llvm::omp::allowedClauses_OMPD_task,
      llvm::omp::allowedOnceClauses_OMPD_task,
      llvm::omp::allowedExclusiveClauses_OMPD_task,
      llvm::omp::requiredClauses_OMPD_task,
    }
  },
  {llvm::omp::Directive::OMPD_taskgroup,
    {
      llvm::omp::allowedClauses_OMPD_taskgroup,
      llvm::omp::allowedOnceClauses_OMPD_taskgroup,
      llvm::omp::allowedExclusiveClauses_OMPD_taskgroup,
      llvm::omp::requiredClauses_OMPD_taskgroup,
    }
  },
  {llvm::omp::Directive::OMPD_taskloop,
    {
      llvm::omp::allowedClauses_OMPD_taskloop,
      llvm::omp::allowedOnceClauses_OMPD_taskloop,
      llvm::omp::allowedExclusiveClauses_OMPD_taskloop,
      llvm::omp::requiredClauses_OMPD_taskloop,
    }
  },
  {llvm::omp::Directive::OMPD_taskloop_simd,
    {
      llvm::omp::allowedClauses_OMPD_taskloop_simd,
      llvm::omp::allowedOnceClauses_OMPD_taskloop_simd,
      llvm::omp::allowedExclusiveClauses_OMPD_taskloop_simd,
      llvm::omp::requiredClauses_OMPD_taskloop_simd,
    }
  },
  {llvm::omp::Directive::OMPD_taskwait,
    {
      llvm::omp::allowedClauses_OMPD_taskwait,
      llvm::omp::allowedOnceClauses_OMPD_taskwait,
      llvm::omp::allowedExclusiveClauses_OMPD_taskwait,
      llvm::omp::requiredClauses_OMPD_taskwait,
    }
  },
  {llvm::omp::Directive::OMPD_taskyield,
    {
      llvm::omp::allowedClauses_OMPD_taskyield,
      llvm::omp::allowedOnceClauses_OMPD_taskyield,
      llvm::omp::allowedExclusiveClauses_OMPD_taskyield,
      llvm::omp::requiredClauses_OMPD_taskyield,
    }
  },
  {llvm::omp::Directive::OMPD_teams,
    {
      llvm::omp::allowedClauses_OMPD_teams,
      llvm::omp::allowedOnceClauses_OMPD_teams,
      llvm::omp::allowedExclusiveClauses_OMPD_teams,
      llvm::omp::requiredClauses_OMPD_teams,
    }
  },
  {llvm::omp::Directive::OMPD_teams_distribute,
    {
      llvm::omp::allowedClauses_OMPD_teams_distribute,
      llvm::omp::allowedOnceClauses_OMPD_teams_distribute,
      llvm::omp::allowedExclusiveClauses_OMPD_teams_distribute,
      llvm::omp::requiredClauses_OMPD_teams_distribute,
    }
  },
  {llvm::omp::Directive::OMPD_teams_distribute_parallel_do,
    {
      llvm::omp::allowedClauses_OMPD_teams_distribute_parallel_do,
      llvm::omp::allowedOnceClauses_OMPD_teams_distribute_parallel_do,
      llvm::omp::allowedExclusiveClauses_OMPD_teams_distribute_parallel_do,
      llvm::omp::requiredClauses_OMPD_teams_distribute_parallel_do,
    }
  },
  {llvm::omp::Directive::OMPD_teams_distribute_parallel_do_simd,
    {
      llvm::omp::allowedClauses_OMPD_teams_distribute_parallel_do_simd,
      llvm::omp::allowedOnceClauses_OMPD_teams_distribute_parallel_do_simd,
      llvm::omp::allowedExclusiveClauses_OMPD_teams_distribute_parallel_do_simd,
      llvm::omp::requiredClauses_OMPD_teams_distribute_parallel_do_simd,
    }
  },
  {llvm::omp::Directive::OMPD_teams_distribute_parallel_for,
    {
      llvm::omp::allowedClauses_OMPD_teams_distribute_parallel_for,
      llvm::omp::allowedOnceClauses_OMPD_teams_distribute_parallel_for,
      llvm::omp::allowedExclusiveClauses_OMPD_teams_distribute_parallel_for,
      llvm::omp::requiredClauses_OMPD_teams_distribute_parallel_for,
    }
  },
  {llvm::omp::Directive::OMPD_teams_distribute_parallel_for_simd,
    {
      llvm::omp::allowedClauses_OMPD_teams_distribute_parallel_for_simd,
      llvm::omp::allowedOnceClauses_OMPD_teams_distribute_parallel_for_simd,
      llvm::omp::allowedExclusiveClauses_OMPD_teams_distribute_parallel_for_simd,
      llvm::omp::requiredClauses_OMPD_teams_distribute_parallel_for_simd,
    }
  },
  {llvm::omp::Directive::OMPD_teams_distribute_simd,
    {
      llvm::omp::allowedClauses_OMPD_teams_distribute_simd,
      llvm::omp::allowedOnceClauses_OMPD_teams_distribute_simd,
      llvm::omp::allowedExclusiveClauses_OMPD_teams_distribute_simd,
      llvm::omp::requiredClauses_OMPD_teams_distribute_simd,
    }
  },
  {llvm::omp::Directive::OMPD_threadprivate,
    {
      llvm::omp::allowedClauses_OMPD_threadprivate,
      llvm::omp::allowedOnceClauses_OMPD_threadprivate,
      llvm::omp::allowedExclusiveClauses_OMPD_threadprivate,
      llvm::omp::requiredClauses_OMPD_threadprivate,
    }
  },
  {llvm::omp::Directive::OMPD_tile,
    {
      llvm::omp::allowedClauses_OMPD_tile,
      llvm::omp::allowedOnceClauses_OMPD_tile,
      llvm::omp::allowedExclusiveClauses_OMPD_tile,
      llvm::omp::requiredClauses_OMPD_tile,
    }
  },
  {llvm::omp::Directive::OMPD_unknown,
    {
      llvm::omp::allowedClauses_OMPD_unknown,
      llvm::omp::allowedOnceClauses_OMPD_unknown,
      llvm::omp::allowedExclusiveClauses_OMPD_unknown,
      llvm::omp::requiredClauses_OMPD_unknown,
    }
  },
  {llvm::omp::Directive::OMPD_unroll,
    {
      llvm::omp::allowedClauses_OMPD_unroll,
      llvm::omp::allowedOnceClauses_OMPD_unroll,
      llvm::omp::allowedExclusiveClauses_OMPD_unroll,
      llvm::omp::requiredClauses_OMPD_unroll,
    }
  },
  {llvm::omp::Directive::OMPD_workshare,
    {
      llvm::omp::allowedClauses_OMPD_workshare,
      llvm::omp::allowedOnceClauses_OMPD_workshare,
      llvm::omp::allowedExclusiveClauses_OMPD_workshare,
      llvm::omp::requiredClauses_OMPD_workshare,
    }
  },
  {llvm::omp::Directive::OMPD_dispatch,
    {
      llvm::omp::allowedClauses_OMPD_dispatch,
      llvm::omp::allowedOnceClauses_OMPD_dispatch,
      llvm::omp::allowedExclusiveClauses_OMPD_dispatch,
      llvm::omp::requiredClauses_OMPD_dispatch,
    }
  },
  {llvm::omp::Directive::OMPD_interop,
    {
      llvm::omp::allowedClauses_OMPD_interop,
      llvm::omp::allowedOnceClauses_OMPD_interop,
      llvm::omp::allowedExclusiveClauses_OMPD_interop,
      llvm::omp::requiredClauses_OMPD_interop,
    }
  },
  {llvm::omp::Directive::OMPD_loop,
    {
      llvm::omp::allowedClauses_OMPD_loop,
      llvm::omp::allowedOnceClauses_OMPD_loop,
      llvm::omp::allowedExclusiveClauses_OMPD_loop,
      llvm::omp::requiredClauses_OMPD_loop,
    }
  },
  {llvm::omp::Directive::OMPD_masked,
    {
      llvm::omp::allowedClauses_OMPD_masked,
      llvm::omp::allowedOnceClauses_OMPD_masked,
      llvm::omp::allowedExclusiveClauses_OMPD_masked,
      llvm::omp::requiredClauses_OMPD_masked,
    }
  },
  {llvm::omp::Directive::OMPD_parallel_loop,
    {
      llvm::omp::allowedClauses_OMPD_parallel_loop,
      llvm::omp::allowedOnceClauses_OMPD_parallel_loop,
      llvm::omp::allowedExclusiveClauses_OMPD_parallel_loop,
      llvm::omp::requiredClauses_OMPD_parallel_loop,
    }
  },
  {llvm::omp::Directive::OMPD_target_parallel_loop,
    {
      llvm::omp::allowedClauses_OMPD_target_parallel_loop,
      llvm::omp::allowedOnceClauses_OMPD_target_parallel_loop,
      llvm::omp::allowedExclusiveClauses_OMPD_target_parallel_loop,
      llvm::omp::requiredClauses_OMPD_target_parallel_loop,
    }
  },
  {llvm::omp::Directive::OMPD_target_teams_loop,
    {
      llvm::omp::allowedClauses_OMPD_target_teams_loop,
      llvm::omp::allowedOnceClauses_OMPD_target_teams_loop,
      llvm::omp::allowedExclusiveClauses_OMPD_target_teams_loop,
      llvm::omp::requiredClauses_OMPD_target_teams_loop,
    }
  },
  {llvm::omp::Directive::OMPD_teams_loop,
    {
      llvm::omp::allowedClauses_OMPD_teams_loop,
      llvm::omp::allowedOnceClauses_OMPD_teams_loop,
      llvm::omp::allowedExclusiveClauses_OMPD_teams_loop,
      llvm::omp::requiredClauses_OMPD_teams_loop,
    }
  },
}

#endif // GEN_FLANG_DIRECTIVE_CLAUSE_MAP

#ifdef GEN_FLANG_CLAUSE_PARSER_CLASSES
#undef GEN_FLANG_CLAUSE_PARSER_CLASSES

EMPTY_CLASS(AcqRel);
EMPTY_CLASS(Acquire);
EMPTY_CLASS(AdjustArgs);
EMPTY_CLASS(Affinity);
EMPTY_CLASS(Align);
WRAPPER_CLASS(Aligned, OmpAlignedClause);
WRAPPER_CLASS(Allocate, OmpAllocateClause);
WRAPPER_CLASS(Allocator, ScalarIntExpr);
EMPTY_CLASS(AppendArgs);
EMPTY_CLASS(At);
WRAPPER_CLASS(AtomicDefaultMemOrder, OmpAtomicDefaultMemOrderClause);
EMPTY_CLASS(Bind);
EMPTY_CLASS(CancellationConstructType);
EMPTY_CLASS(Capture);
WRAPPER_CLASS(Collapse, ScalarIntConstantExpr);
EMPTY_CLASS(Compare);
WRAPPER_CLASS(Copyprivate, OmpObjectList);
WRAPPER_CLASS(Copyin, OmpObjectList);
WRAPPER_CLASS(Default, OmpDefaultClause);
WRAPPER_CLASS(Defaultmap, OmpDefaultmapClause);
WRAPPER_CLASS(Depend, OmpDependClause);
EMPTY_CLASS(Depobj);
EMPTY_CLASS(Destroy);
EMPTY_CLASS(Detach);
WRAPPER_CLASS(Device, OmpDeviceClause);
WRAPPER_CLASS(DeviceType, OmpDeviceTypeClause);
WRAPPER_CLASS(DistSchedule, std::optional<ScalarIntExpr>);
EMPTY_CLASS(Doacross);
EMPTY_CLASS(DynamicAllocators);
EMPTY_CLASS(Exclusive);
WRAPPER_CLASS(Filter, ScalarIntExpr);
WRAPPER_CLASS(Final, ScalarLogicalExpr);
WRAPPER_CLASS(Firstprivate, OmpObjectList);
EMPTY_CLASS(Flush);
WRAPPER_CLASS(From, OmpObjectList);
EMPTY_CLASS(Full);
WRAPPER_CLASS(Grainsize, ScalarIntExpr);
WRAPPER_CLASS(HasDeviceAddr, std::list<Name>);
WRAPPER_CLASS(Hint, ConstantExpr);
WRAPPER_CLASS(If, OmpIfClause);
WRAPPER_CLASS(InReduction, OmpInReductionClause);
EMPTY_CLASS(Inbranch);
EMPTY_CLASS(Inclusive);
EMPTY_CLASS(Indirect);
EMPTY_CLASS(Init);
WRAPPER_CLASS(IsDevicePtr, std::list<Name>);
WRAPPER_CLASS(Lastprivate, OmpObjectList);
WRAPPER_CLASS(Linear, OmpLinearClause);
WRAPPER_CLASS(Link, OmpObjectList);
WRAPPER_CLASS(Map, OmpMapClause);
EMPTY_CLASS(Match);
EMPTY_CLASS(MemoryOrder);
EMPTY_CLASS(Mergeable);
EMPTY_CLASS(Message);
EMPTY_CLASS(Nogroup);
EMPTY_CLASS(Nowait);
WRAPPER_CLASS(Nocontext, ScalarLogicalExpr);
WRAPPER_CLASS(Nontemporal, std::list<Name>);
EMPTY_CLASS(Notinbranch);
WRAPPER_CLASS(Novariants, ScalarLogicalExpr);
WRAPPER_CLASS(NumTasks, ScalarIntExpr);
WRAPPER_CLASS(NumTeams, ScalarIntExpr);
WRAPPER_CLASS(NumThreads, ScalarIntExpr);
WRAPPER_CLASS(OmpxDynCgroupMem, ScalarIntExpr);
WRAPPER_CLASS(Order, OmpOrderClause);
WRAPPER_CLASS(Ordered, std::optional<ScalarIntConstantExpr>);
WRAPPER_CLASS(Partial, std::optional<ScalarIntConstantExpr>);
WRAPPER_CLASS(Priority, ScalarIntExpr);
WRAPPER_CLASS(Private, OmpObjectList);
WRAPPER_CLASS(ProcBind, OmpProcBindClause);
EMPTY_CLASS(Read);
WRAPPER_CLASS(Reduction, OmpReductionClause);
EMPTY_CLASS(Relaxed);
EMPTY_CLASS(Release);
EMPTY_CLASS(ReverseOffload);
WRAPPER_CLASS(Safelen, ScalarIntConstantExpr);
WRAPPER_CLASS(Schedule, OmpScheduleClause);
EMPTY_CLASS(SeqCst);
EMPTY_CLASS(Severity);
WRAPPER_CLASS(Shared, OmpObjectList);
EMPTY_CLASS(Simd);
WRAPPER_CLASS(Simdlen, ScalarIntConstantExpr);
WRAPPER_CLASS(Sizes, std::list<ScalarIntExpr>);
WRAPPER_CLASS(TaskReduction, OmpReductionClause);
WRAPPER_CLASS(ThreadLimit, ScalarIntExpr);
EMPTY_CLASS(Threadprivate);
EMPTY_CLASS(Threads);
WRAPPER_CLASS(To, OmpObjectList);
EMPTY_CLASS(UnifiedAddress);
EMPTY_CLASS(UnifiedSharedMemory);
WRAPPER_CLASS(Uniform, std::list<Name>);
EMPTY_CLASS(Unknown);
EMPTY_CLASS(Untied);
EMPTY_CLASS(Update);
EMPTY_CLASS(Use);
WRAPPER_CLASS(UseDeviceAddr, OmpObjectList);
WRAPPER_CLASS(UseDevicePtr, OmpObjectList);
EMPTY_CLASS(UsesAllocators);
EMPTY_CLASS(When);
EMPTY_CLASS(Write);

#endif // GEN_FLANG_CLAUSE_PARSER_CLASSES

#ifdef GEN_FLANG_CLAUSE_PARSER_CLASSES_LIST
#undef GEN_FLANG_CLAUSE_PARSER_CLASSES_LIST

AcqRel
, Acquire
, AdjustArgs
, Affinity
, Align
, Aligned
, Allocate
, Allocator
, AppendArgs
, At
, AtomicDefaultMemOrder
, Bind
, CancellationConstructType
, Capture
, Collapse
, Compare
, Copyprivate
, Copyin
, Default
, Defaultmap
, Depend
, Depobj
, Destroy
, Detach
, Device
, DeviceType
, DistSchedule
, Doacross
, DynamicAllocators
, Exclusive
, Filter
, Final
, Firstprivate
, Flush
, From
, Full
, Grainsize
, HasDeviceAddr
, Hint
, If
, InReduction
, Inbranch
, Inclusive
, Indirect
, Init
, IsDevicePtr
, Lastprivate
, Linear
, Link
, Map
, Match
, MemoryOrder
, Mergeable
, Message
, Nogroup
, Nowait
, Nocontext
, Nontemporal
, Notinbranch
, Novariants
, NumTasks
, NumTeams
, NumThreads
, OmpxDynCgroupMem
, Order
, Ordered
, Partial
, Priority
, Private
, ProcBind
, Read
, Reduction
, Relaxed
, Release
, ReverseOffload
, Safelen
, Schedule
, SeqCst
, Severity
, Shared
, Simd
, Simdlen
, Sizes
, TaskReduction
, ThreadLimit
, Threadprivate
, Threads
, To
, UnifiedAddress
, UnifiedSharedMemory
, Uniform
, Unknown
, Untied
, Update
, Use
, UseDeviceAddr
, UseDevicePtr
, UsesAllocators
, When
, Write

#endif // GEN_FLANG_CLAUSE_PARSER_CLASSES_LIST

#ifdef GEN_FLANG_DUMP_PARSE_TREE_CLAUSES
#undef GEN_FLANG_DUMP_PARSE_TREE_CLAUSES

NODE(OmpClause, AcqRel)
NODE(OmpClause, Acquire)
NODE(OmpClause, AdjustArgs)
NODE(OmpClause, Affinity)
NODE(OmpClause, Align)
NODE(OmpClause, Aligned)
NODE(OmpClause, Allocate)
NODE(OmpClause, Allocator)
NODE(OmpClause, AppendArgs)
NODE(OmpClause, At)
NODE(OmpClause, AtomicDefaultMemOrder)
NODE(OmpClause, Bind)
NODE(OmpClause, CancellationConstructType)
NODE(OmpClause, Capture)
NODE(OmpClause, Collapse)
NODE(OmpClause, Compare)
NODE(OmpClause, Copyprivate)
NODE(OmpClause, Copyin)
NODE(OmpClause, Default)
NODE(OmpClause, Defaultmap)
NODE(OmpClause, Depend)
NODE(OmpClause, Depobj)
NODE(OmpClause, Destroy)
NODE(OmpClause, Detach)
NODE(OmpClause, Device)
NODE(OmpClause, DeviceType)
NODE(OmpClause, DistSchedule)
NODE(OmpClause, Doacross)
NODE(OmpClause, DynamicAllocators)
NODE(OmpClause, Exclusive)
NODE(OmpClause, Filter)
NODE(OmpClause, Final)
NODE(OmpClause, Firstprivate)
NODE(OmpClause, Flush)
NODE(OmpClause, From)
NODE(OmpClause, Full)
NODE(OmpClause, Grainsize)
NODE(OmpClause, HasDeviceAddr)
NODE(OmpClause, Hint)
NODE(OmpClause, If)
NODE(OmpClause, InReduction)
NODE(OmpClause, Inbranch)
NODE(OmpClause, Inclusive)
NODE(OmpClause, Indirect)
NODE(OmpClause, Init)
NODE(OmpClause, IsDevicePtr)
NODE(OmpClause, Lastprivate)
NODE(OmpClause, Linear)
NODE(OmpClause, Link)
NODE(OmpClause, Map)
NODE(OmpClause, Match)
NODE(OmpClause, MemoryOrder)
NODE(OmpClause, Mergeable)
NODE(OmpClause, Message)
NODE(OmpClause, Nogroup)
NODE(OmpClause, Nowait)
NODE(OmpClause, Nocontext)
NODE(OmpClause, Nontemporal)
NODE(OmpClause, Notinbranch)
NODE(OmpClause, Novariants)
NODE(OmpClause, NumTasks)
NODE(OmpClause, NumTeams)
NODE(OmpClause, NumThreads)
NODE(OmpClause, OmpxDynCgroupMem)
NODE(OmpClause, Order)
NODE(OmpClause, Ordered)
NODE(OmpClause, Partial)
NODE(OmpClause, Priority)
NODE(OmpClause, Private)
NODE(OmpClause, ProcBind)
NODE(OmpClause, Read)
NODE(OmpClause, Reduction)
NODE(OmpClause, Relaxed)
NODE(OmpClause, Release)
NODE(OmpClause, ReverseOffload)
NODE(OmpClause, Safelen)
NODE(OmpClause, Schedule)
NODE(OmpClause, SeqCst)
NODE(OmpClause, Severity)
NODE(OmpClause, Shared)
NODE(OmpClause, Simd)
NODE(OmpClause, Simdlen)
NODE(OmpClause, Sizes)
NODE(OmpClause, TaskReduction)
NODE(OmpClause, ThreadLimit)
NODE(OmpClause, Threadprivate)
NODE(OmpClause, Threads)
NODE(OmpClause, To)
NODE(OmpClause, UnifiedAddress)
NODE(OmpClause, UnifiedSharedMemory)
NODE(OmpClause, Uniform)
NODE(OmpClause, Unknown)
NODE(OmpClause, Untied)
NODE(OmpClause, Update)
NODE(OmpClause, Use)
NODE(OmpClause, UseDeviceAddr)
NODE(OmpClause, UseDevicePtr)
NODE(OmpClause, UsesAllocators)
NODE(OmpClause, When)
NODE(OmpClause, Write)

#endif // GEN_FLANG_DUMP_PARSE_TREE_CLAUSES

#ifdef GEN_FLANG_CLAUSE_UNPARSE
#undef GEN_FLANG_CLAUSE_UNPARSE

void Before(const OmpClause::AcqRel &) { Word("ACQ_REL"); }
void Before(const OmpClause::Acquire &) { Word("ACQUIRE"); }
void Before(const OmpClause::AdjustArgs &) { Word("ADJUST_ARGS"); }
void Before(const OmpClause::Affinity &) { Word("AFFINITY"); }
void Before(const OmpClause::Align &) { Word("ALIGN"); }
void Unparse(const OmpClause::Aligned &x) {
  Word("ALIGNED");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Unparse(const OmpClause::Allocate &x) {
  Word("ALLOCATE");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Unparse(const OmpClause::Allocator &x) {
  Word("ALLOCATOR");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Before(const OmpClause::AppendArgs &) { Word("APPEND_ARGS"); }
void Before(const OmpClause::At &) { Word("AT"); }
void Unparse(const OmpClause::AtomicDefaultMemOrder &x) {
  Word("ATOMIC_DEFAULT_MEM_ORDER");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Before(const OmpClause::Bind &) { Word("BIND"); }
void Before(const OmpClause::CancellationConstructType &) { Word("CANCELLATION_CONSTRUCT_TYPE"); }
void Before(const OmpClause::Capture &) { Word("CAPTURE"); }
void Unparse(const OmpClause::Collapse &x) {
  Word("COLLAPSE");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Before(const OmpClause::Compare &) { Word("COMPARE"); }
void Unparse(const OmpClause::Copyprivate &x) {
  Word("COPYPRIVATE");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Unparse(const OmpClause::Copyin &x) {
  Word("COPYIN");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Unparse(const OmpClause::Default &x) {
  Word("DEFAULT");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Unparse(const OmpClause::Defaultmap &x) {
  Word("DEFAULTMAP");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Unparse(const OmpClause::Depend &x) {
  Word("DEPEND");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Before(const OmpClause::Depobj &) { Word("DEPOBJ"); }
void Before(const OmpClause::Destroy &) { Word("DESTROY"); }
void Before(const OmpClause::Detach &) { Word("DETACH"); }
void Unparse(const OmpClause::Device &x) {
  Word("DEVICE");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Unparse(const OmpClause::DeviceType &x) {
  Word("DEVICE_TYPE");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Unparse(const OmpClause::DistSchedule &x) {
  Word("DIST_SCHEDULE");
  Walk("(", x.v, ")");
}
void Before(const OmpClause::Doacross &) { Word("DOACROSS"); }
void Before(const OmpClause::DynamicAllocators &) { Word("DYNAMIC_ALLOCATORS"); }
void Before(const OmpClause::Exclusive &) { Word("EXCLUSIVE"); }
void Unparse(const OmpClause::Filter &x) {
  Word("FILTER");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Unparse(const OmpClause::Final &x) {
  Word("FINAL");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Unparse(const OmpClause::Firstprivate &x) {
  Word("FIRSTPRIVATE");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Before(const OmpClause::Flush &) { Word("FLUSH"); }
void Unparse(const OmpClause::From &x) {
  Word("FROM");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Before(const OmpClause::Full &) { Word("FULL"); }
void Unparse(const OmpClause::Grainsize &x) {
  Word("GRAINSIZE");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Unparse(const OmpClause::HasDeviceAddr &x) {
  Word("HAS_DEVICE_ADDR");
  Put("(");
  Walk(x.v, ",");
  Put(")");
}
void Unparse(const OmpClause::Hint &x) {
  Word("HINT");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Unparse(const OmpClause::If &x) {
  Word("IF");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Unparse(const OmpClause::InReduction &x) {
  Word("IN_REDUCTION");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Before(const OmpClause::Inbranch &) { Word("INBRANCH"); }
void Before(const OmpClause::Inclusive &) { Word("INCLUSIVE"); }
void Before(const OmpClause::Indirect &) { Word("INDIRECT"); }
void Before(const OmpClause::Init &) { Word("INIT"); }
void Unparse(const OmpClause::IsDevicePtr &x) {
  Word("IS_DEVICE_PTR");
  Put("(");
  Walk(x.v, ",");
  Put(")");
}
void Unparse(const OmpClause::Lastprivate &x) {
  Word("LASTPRIVATE");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Unparse(const OmpClause::Linear &x) {
  Word("LINEAR");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Unparse(const OmpClause::Link &x) {
  Word("LINK");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Unparse(const OmpClause::Map &x) {
  Word("MAP");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Before(const OmpClause::Match &) { Word("MATCH"); }
void Before(const OmpClause::MemoryOrder &) { Word("MEMORY_ORDER"); }
void Before(const OmpClause::Mergeable &) { Word("MERGEABLE"); }
void Before(const OmpClause::Message &) { Word("MESSAGE"); }
void Before(const OmpClause::Nogroup &) { Word("NOGROUP"); }
void Before(const OmpClause::Nowait &) { Word("NOWAIT"); }
void Unparse(const OmpClause::Nocontext &x) {
  Word("NOCONTEXT");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Unparse(const OmpClause::Nontemporal &x) {
  Word("NONTEMPORAL");
  Put("(");
  Walk(x.v, ",");
  Put(")");
}
void Before(const OmpClause::Notinbranch &) { Word("NOTINBRANCH"); }
void Unparse(const OmpClause::Novariants &x) {
  Word("NOVARIANTS");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Unparse(const OmpClause::NumTasks &x) {
  Word("NUM_TASKS");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Unparse(const OmpClause::NumTeams &x) {
  Word("NUM_TEAMS");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Unparse(const OmpClause::NumThreads &x) {
  Word("NUM_THREADS");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Unparse(const OmpClause::OmpxDynCgroupMem &x) {
  Word("OMPX_DYN_CGROUP_MEM");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Unparse(const OmpClause::Order &x) {
  Word("ORDER");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Unparse(const OmpClause::Ordered &x) {
  Word("ORDERED");
  Walk("(", x.v, ")");
}
void Unparse(const OmpClause::Partial &x) {
  Word("PARTIAL");
  Walk("(", x.v, ")");
}
void Unparse(const OmpClause::Priority &x) {
  Word("PRIORITY");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Unparse(const OmpClause::Private &x) {
  Word("PRIVATE");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Unparse(const OmpClause::ProcBind &x) {
  Word("PROC_BIND");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Before(const OmpClause::Read &) { Word("READ"); }
void Unparse(const OmpClause::Reduction &x) {
  Word("REDUCTION");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Before(const OmpClause::Relaxed &) { Word("RELAXED"); }
void Before(const OmpClause::Release &) { Word("RELEASE"); }
void Before(const OmpClause::ReverseOffload &) { Word("REVERSE_OFFLOAD"); }
void Unparse(const OmpClause::Safelen &x) {
  Word("SAFELEN");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Unparse(const OmpClause::Schedule &x) {
  Word("SCHEDULE");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Before(const OmpClause::SeqCst &) { Word("SEQ_CST"); }
void Before(const OmpClause::Severity &) { Word("SEVERITY"); }
void Unparse(const OmpClause::Shared &x) {
  Word("SHARED");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Before(const OmpClause::Simd &) { Word("SIMD"); }
void Unparse(const OmpClause::Simdlen &x) {
  Word("SIMDLEN");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Unparse(const OmpClause::Sizes &x) {
  Word("SIZES");
  Put("(");
  Walk(x.v, ",");
  Put(")");
}
void Unparse(const OmpClause::TaskReduction &x) {
  Word("TASK_REDUCTION");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Unparse(const OmpClause::ThreadLimit &x) {
  Word("THREAD_LIMIT");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Before(const OmpClause::Threadprivate &) { Word("THREADPRIVATE"); }
void Before(const OmpClause::Threads &) { Word("THREADS"); }
void Unparse(const OmpClause::To &x) {
  Word("TO");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Before(const OmpClause::UnifiedAddress &) { Word("UNIFIED_ADDRESS"); }
void Before(const OmpClause::UnifiedSharedMemory &) { Word("UNIFIED_SHARED_MEMORY"); }
void Unparse(const OmpClause::Uniform &x) {
  Word("UNIFORM");
  Put("(");
  Walk(x.v, ",");
  Put(")");
}
void Before(const OmpClause::Unknown &) { Word("UNKNOWN"); }
void Before(const OmpClause::Untied &) { Word("UNTIED"); }
void Before(const OmpClause::Update &) { Word("UPDATE"); }
void Before(const OmpClause::Use &) { Word("USE"); }
void Unparse(const OmpClause::UseDeviceAddr &x) {
  Word("USE_DEVICE_ADDR");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Unparse(const OmpClause::UseDevicePtr &x) {
  Word("USE_DEVICE_PTR");
  Put("(");
  Walk(x.v);
  Put(")");
}
void Before(const OmpClause::UsesAllocators &) { Word("USES_ALLOCATORS"); }
void Before(const OmpClause::When &) { Word("WHEN"); }
void Before(const OmpClause::Write &) { Word("WRITE"); }

#endif // GEN_FLANG_CLAUSE_UNPARSE

#ifdef GEN_FLANG_CLAUSE_CHECK_ENTER
#undef GEN_FLANG_CLAUSE_CHECK_ENTER

void Enter(const parser::OmpClause::AcqRel &);
void Enter(const parser::OmpClause::Acquire &);
void Enter(const parser::OmpClause::AdjustArgs &);
void Enter(const parser::OmpClause::Affinity &);
void Enter(const parser::OmpClause::Align &);
void Enter(const parser::OmpClause::Aligned &);
void Enter(const parser::OmpClause::Allocate &);
void Enter(const parser::OmpClause::Allocator &);
void Enter(const parser::OmpClause::AppendArgs &);
void Enter(const parser::OmpClause::At &);
void Enter(const parser::OmpClause::AtomicDefaultMemOrder &);
void Enter(const parser::OmpClause::Bind &);
void Enter(const parser::OmpClause::CancellationConstructType &);
void Enter(const parser::OmpClause::Capture &);
void Enter(const parser::OmpClause::Collapse &);
void Enter(const parser::OmpClause::Compare &);
void Enter(const parser::OmpClause::Copyprivate &);
void Enter(const parser::OmpClause::Copyin &);
void Enter(const parser::OmpClause::Default &);
void Enter(const parser::OmpClause::Defaultmap &);
void Enter(const parser::OmpClause::Depend &);
void Enter(const parser::OmpClause::Depobj &);
void Enter(const parser::OmpClause::Destroy &);
void Enter(const parser::OmpClause::Detach &);
void Enter(const parser::OmpClause::Device &);
void Enter(const parser::OmpClause::DeviceType &);
void Enter(const parser::OmpClause::DistSchedule &);
void Enter(const parser::OmpClause::Doacross &);
void Enter(const parser::OmpClause::DynamicAllocators &);
void Enter(const parser::OmpClause::Exclusive &);
void Enter(const parser::OmpClause::Filter &);
void Enter(const parser::OmpClause::Final &);
void Enter(const parser::OmpClause::Firstprivate &);
void Enter(const parser::OmpClause::Flush &);
void Enter(const parser::OmpClause::From &);
void Enter(const parser::OmpClause::Full &);
void Enter(const parser::OmpClause::Grainsize &);
void Enter(const parser::OmpClause::HasDeviceAddr &);
void Enter(const parser::OmpClause::Hint &);
void Enter(const parser::OmpClause::If &);
void Enter(const parser::OmpClause::InReduction &);
void Enter(const parser::OmpClause::Inbranch &);
void Enter(const parser::OmpClause::Inclusive &);
void Enter(const parser::OmpClause::Indirect &);
void Enter(const parser::OmpClause::Init &);
void Enter(const parser::OmpClause::IsDevicePtr &);
void Enter(const parser::OmpClause::Lastprivate &);
void Enter(const parser::OmpClause::Linear &);
void Enter(const parser::OmpClause::Link &);
void Enter(const parser::OmpClause::Map &);
void Enter(const parser::OmpClause::Match &);
void Enter(const parser::OmpClause::MemoryOrder &);
void Enter(const parser::OmpClause::Mergeable &);
void Enter(const parser::OmpClause::Message &);
void Enter(const parser::OmpClause::Nogroup &);
void Enter(const parser::OmpClause::Nowait &);
void Enter(const parser::OmpClause::Nocontext &);
void Enter(const parser::OmpClause::Nontemporal &);
void Enter(const parser::OmpClause::Notinbranch &);
void Enter(const parser::OmpClause::Novariants &);
void Enter(const parser::OmpClause::NumTasks &);
void Enter(const parser::OmpClause::NumTeams &);
void Enter(const parser::OmpClause::NumThreads &);
void Enter(const parser::OmpClause::OmpxDynCgroupMem &);
void Enter(const parser::OmpClause::Order &);
void Enter(const parser::OmpClause::Ordered &);
void Enter(const parser::OmpClause::Partial &);
void Enter(const parser::OmpClause::Priority &);
void Enter(const parser::OmpClause::Private &);
void Enter(const parser::OmpClause::ProcBind &);
void Enter(const parser::OmpClause::Read &);
void Enter(const parser::OmpClause::Reduction &);
void Enter(const parser::OmpClause::Relaxed &);
void Enter(const parser::OmpClause::Release &);
void Enter(const parser::OmpClause::ReverseOffload &);
void Enter(const parser::OmpClause::Safelen &);
void Enter(const parser::OmpClause::Schedule &);
void Enter(const parser::OmpClause::SeqCst &);
void Enter(const parser::OmpClause::Severity &);
void Enter(const parser::OmpClause::Shared &);
void Enter(const parser::OmpClause::Simd &);
void Enter(const parser::OmpClause::Simdlen &);
void Enter(const parser::OmpClause::Sizes &);
void Enter(const parser::OmpClause::TaskReduction &);
void Enter(const parser::OmpClause::ThreadLimit &);
void Enter(const parser::OmpClause::Threadprivate &);
void Enter(const parser::OmpClause::Threads &);
void Enter(const parser::OmpClause::To &);
void Enter(const parser::OmpClause::UnifiedAddress &);
void Enter(const parser::OmpClause::UnifiedSharedMemory &);
void Enter(const parser::OmpClause::Uniform &);
void Enter(const parser::OmpClause::Unknown &);
void Enter(const parser::OmpClause::Untied &);
void Enter(const parser::OmpClause::Update &);
void Enter(const parser::OmpClause::Use &);
void Enter(const parser::OmpClause::UseDeviceAddr &);
void Enter(const parser::OmpClause::UseDevicePtr &);
void Enter(const parser::OmpClause::UsesAllocators &);
void Enter(const parser::OmpClause::When &);
void Enter(const parser::OmpClause::Write &);

#endif // GEN_FLANG_CLAUSE_CHECK_ENTER

#ifdef GEN_FLANG_CLAUSE_PARSER_KIND_MAP
#undef GEN_FLANG_CLAUSE_PARSER_KIND_MAP

if constexpr (std::is_same_v<A, parser::OmpClause::AcqRel>)
  return llvm::omp::Clause::OMPC_acq_rel;
if constexpr (std::is_same_v<A, parser::OmpClause::Acquire>)
  return llvm::omp::Clause::OMPC_acquire;
if constexpr (std::is_same_v<A, parser::OmpClause::AdjustArgs>)
  return llvm::omp::Clause::OMPC_adjust_args;
if constexpr (std::is_same_v<A, parser::OmpClause::Affinity>)
  return llvm::omp::Clause::OMPC_affinity;
if constexpr (std::is_same_v<A, parser::OmpClause::Align>)
  return llvm::omp::Clause::OMPC_align;
if constexpr (std::is_same_v<A, parser::OmpClause::Aligned>)
  return llvm::omp::Clause::OMPC_aligned;
if constexpr (std::is_same_v<A, parser::OmpClause::Allocate>)
  return llvm::omp::Clause::OMPC_allocate;
if constexpr (std::is_same_v<A, parser::OmpClause::Allocator>)
  return llvm::omp::Clause::OMPC_allocator;
if constexpr (std::is_same_v<A, parser::OmpClause::AppendArgs>)
  return llvm::omp::Clause::OMPC_append_args;
if constexpr (std::is_same_v<A, parser::OmpClause::At>)
  return llvm::omp::Clause::OMPC_at;
if constexpr (std::is_same_v<A, parser::OmpClause::AtomicDefaultMemOrder>)
  return llvm::omp::Clause::OMPC_atomic_default_mem_order;
if constexpr (std::is_same_v<A, parser::OmpClause::Bind>)
  return llvm::omp::Clause::OMPC_bind;
if constexpr (std::is_same_v<A, parser::OmpClause::CancellationConstructType>)
  return llvm::omp::Clause::OMPC_cancellation_construct_type;
if constexpr (std::is_same_v<A, parser::OmpClause::Capture>)
  return llvm::omp::Clause::OMPC_capture;
if constexpr (std::is_same_v<A, parser::OmpClause::Collapse>)
  return llvm::omp::Clause::OMPC_collapse;
if constexpr (std::is_same_v<A, parser::OmpClause::Compare>)
  return llvm::omp::Clause::OMPC_compare;
if constexpr (std::is_same_v<A, parser::OmpClause::Copyprivate>)
  return llvm::omp::Clause::OMPC_copyprivate;
if constexpr (std::is_same_v<A, parser::OmpClause::Copyin>)
  return llvm::omp::Clause::OMPC_copyin;
if constexpr (std::is_same_v<A, parser::OmpClause::Default>)
  return llvm::omp::Clause::OMPC_default;
if constexpr (std::is_same_v<A, parser::OmpClause::Defaultmap>)
  return llvm::omp::Clause::OMPC_defaultmap;
if constexpr (std::is_same_v<A, parser::OmpClause::Depend>)
  return llvm::omp::Clause::OMPC_depend;
if constexpr (std::is_same_v<A, parser::OmpClause::Depobj>)
  return llvm::omp::Clause::OMPC_depobj;
if constexpr (std::is_same_v<A, parser::OmpClause::Destroy>)
  return llvm::omp::Clause::OMPC_destroy;
if constexpr (std::is_same_v<A, parser::OmpClause::Detach>)
  return llvm::omp::Clause::OMPC_detach;
if constexpr (std::is_same_v<A, parser::OmpClause::Device>)
  return llvm::omp::Clause::OMPC_device;
if constexpr (std::is_same_v<A, parser::OmpClause::DeviceType>)
  return llvm::omp::Clause::OMPC_device_type;
if constexpr (std::is_same_v<A, parser::OmpClause::DistSchedule>)
  return llvm::omp::Clause::OMPC_dist_schedule;
if constexpr (std::is_same_v<A, parser::OmpClause::Doacross>)
  return llvm::omp::Clause::OMPC_doacross;
if constexpr (std::is_same_v<A, parser::OmpClause::DynamicAllocators>)
  return llvm::omp::Clause::OMPC_dynamic_allocators;
if constexpr (std::is_same_v<A, parser::OmpClause::Exclusive>)
  return llvm::omp::Clause::OMPC_exclusive;
if constexpr (std::is_same_v<A, parser::OmpClause::Filter>)
  return llvm::omp::Clause::OMPC_filter;
if constexpr (std::is_same_v<A, parser::OmpClause::Final>)
  return llvm::omp::Clause::OMPC_final;
if constexpr (std::is_same_v<A, parser::OmpClause::Firstprivate>)
  return llvm::omp::Clause::OMPC_firstprivate;
if constexpr (std::is_same_v<A, parser::OmpClause::Flush>)
  return llvm::omp::Clause::OMPC_flush;
if constexpr (std::is_same_v<A, parser::OmpClause::From>)
  return llvm::omp::Clause::OMPC_from;
if constexpr (std::is_same_v<A, parser::OmpClause::Full>)
  return llvm::omp::Clause::OMPC_full;
if constexpr (std::is_same_v<A, parser::OmpClause::Grainsize>)
  return llvm::omp::Clause::OMPC_grainsize;
if constexpr (std::is_same_v<A, parser::OmpClause::HasDeviceAddr>)
  return llvm::omp::Clause::OMPC_has_device_addr;
if constexpr (std::is_same_v<A, parser::OmpClause::Hint>)
  return llvm::omp::Clause::OMPC_hint;
if constexpr (std::is_same_v<A, parser::OmpClause::If>)
  return llvm::omp::Clause::OMPC_if;
if constexpr (std::is_same_v<A, parser::OmpClause::InReduction>)
  return llvm::omp::Clause::OMPC_in_reduction;
if constexpr (std::is_same_v<A, parser::OmpClause::Inbranch>)
  return llvm::omp::Clause::OMPC_inbranch;
if constexpr (std::is_same_v<A, parser::OmpClause::Inclusive>)
  return llvm::omp::Clause::OMPC_inclusive;
if constexpr (std::is_same_v<A, parser::OmpClause::Indirect>)
  return llvm::omp::Clause::OMPC_indirect;
if constexpr (std::is_same_v<A, parser::OmpClause::Init>)
  return llvm::omp::Clause::OMPC_init;
if constexpr (std::is_same_v<A, parser::OmpClause::IsDevicePtr>)
  return llvm::omp::Clause::OMPC_is_device_ptr;
if constexpr (std::is_same_v<A, parser::OmpClause::Lastprivate>)
  return llvm::omp::Clause::OMPC_lastprivate;
if constexpr (std::is_same_v<A, parser::OmpClause::Linear>)
  return llvm::omp::Clause::OMPC_linear;
if constexpr (std::is_same_v<A, parser::OmpClause::Link>)
  return llvm::omp::Clause::OMPC_link;
if constexpr (std::is_same_v<A, parser::OmpClause::Map>)
  return llvm::omp::Clause::OMPC_map;
if constexpr (std::is_same_v<A, parser::OmpClause::Match>)
  return llvm::omp::Clause::OMPC_match;
if constexpr (std::is_same_v<A, parser::OmpClause::MemoryOrder>)
  return llvm::omp::Clause::OMPC_memory_order;
if constexpr (std::is_same_v<A, parser::OmpClause::Mergeable>)
  return llvm::omp::Clause::OMPC_mergeable;
if constexpr (std::is_same_v<A, parser::OmpClause::Message>)
  return llvm::omp::Clause::OMPC_message;
if constexpr (std::is_same_v<A, parser::OmpClause::Nogroup>)
  return llvm::omp::Clause::OMPC_nogroup;
if constexpr (std::is_same_v<A, parser::OmpClause::Nowait>)
  return llvm::omp::Clause::OMPC_nowait;
if constexpr (std::is_same_v<A, parser::OmpClause::Nocontext>)
  return llvm::omp::Clause::OMPC_nocontext;
if constexpr (std::is_same_v<A, parser::OmpClause::Nontemporal>)
  return llvm::omp::Clause::OMPC_nontemporal;
if constexpr (std::is_same_v<A, parser::OmpClause::Notinbranch>)
  return llvm::omp::Clause::OMPC_notinbranch;
if constexpr (std::is_same_v<A, parser::OmpClause::Novariants>)
  return llvm::omp::Clause::OMPC_novariants;
if constexpr (std::is_same_v<A, parser::OmpClause::NumTasks>)
  return llvm::omp::Clause::OMPC_num_tasks;
if constexpr (std::is_same_v<A, parser::OmpClause::NumTeams>)
  return llvm::omp::Clause::OMPC_num_teams;
if constexpr (std::is_same_v<A, parser::OmpClause::NumThreads>)
  return llvm::omp::Clause::OMPC_num_threads;
if constexpr (std::is_same_v<A, parser::OmpClause::OmpxDynCgroupMem>)
  return llvm::omp::Clause::OMPC_ompx_dyn_cgroup_mem;
if constexpr (std::is_same_v<A, parser::OmpClause::Order>)
  return llvm::omp::Clause::OMPC_order;
if constexpr (std::is_same_v<A, parser::OmpClause::Ordered>)
  return llvm::omp::Clause::OMPC_ordered;
if constexpr (std::is_same_v<A, parser::OmpClause::Partial>)
  return llvm::omp::Clause::OMPC_partial;
if constexpr (std::is_same_v<A, parser::OmpClause::Priority>)
  return llvm::omp::Clause::OMPC_priority;
if constexpr (std::is_same_v<A, parser::OmpClause::Private>)
  return llvm::omp::Clause::OMPC_private;
if constexpr (std::is_same_v<A, parser::OmpClause::ProcBind>)
  return llvm::omp::Clause::OMPC_proc_bind;
if constexpr (std::is_same_v<A, parser::OmpClause::Read>)
  return llvm::omp::Clause::OMPC_read;
if constexpr (std::is_same_v<A, parser::OmpClause::Reduction>)
  return llvm::omp::Clause::OMPC_reduction;
if constexpr (std::is_same_v<A, parser::OmpClause::Relaxed>)
  return llvm::omp::Clause::OMPC_relaxed;
if constexpr (std::is_same_v<A, parser::OmpClause::Release>)
  return llvm::omp::Clause::OMPC_release;
if constexpr (std::is_same_v<A, parser::OmpClause::ReverseOffload>)
  return llvm::omp::Clause::OMPC_reverse_offload;
if constexpr (std::is_same_v<A, parser::OmpClause::Safelen>)
  return llvm::omp::Clause::OMPC_safelen;
if constexpr (std::is_same_v<A, parser::OmpClause::Schedule>)
  return llvm::omp::Clause::OMPC_schedule;
if constexpr (std::is_same_v<A, parser::OmpClause::SeqCst>)
  return llvm::omp::Clause::OMPC_seq_cst;
if constexpr (std::is_same_v<A, parser::OmpClause::Severity>)
  return llvm::omp::Clause::OMPC_severity;
if constexpr (std::is_same_v<A, parser::OmpClause::Shared>)
  return llvm::omp::Clause::OMPC_shared;
if constexpr (std::is_same_v<A, parser::OmpClause::Simd>)
  return llvm::omp::Clause::OMPC_simd;
if constexpr (std::is_same_v<A, parser::OmpClause::Simdlen>)
  return llvm::omp::Clause::OMPC_simdlen;
if constexpr (std::is_same_v<A, parser::OmpClause::Sizes>)
  return llvm::omp::Clause::OMPC_sizes;
if constexpr (std::is_same_v<A, parser::OmpClause::TaskReduction>)
  return llvm::omp::Clause::OMPC_task_reduction;
if constexpr (std::is_same_v<A, parser::OmpClause::ThreadLimit>)
  return llvm::omp::Clause::OMPC_thread_limit;
if constexpr (std::is_same_v<A, parser::OmpClause::Threadprivate>)
  return llvm::omp::Clause::OMPC_threadprivate;
if constexpr (std::is_same_v<A, parser::OmpClause::Threads>)
  return llvm::omp::Clause::OMPC_threads;
if constexpr (std::is_same_v<A, parser::OmpClause::To>)
  return llvm::omp::Clause::OMPC_to;
if constexpr (std::is_same_v<A, parser::OmpClause::UnifiedAddress>)
  return llvm::omp::Clause::OMPC_unified_address;
if constexpr (std::is_same_v<A, parser::OmpClause::UnifiedSharedMemory>)
  return llvm::omp::Clause::OMPC_unified_shared_memory;
if constexpr (std::is_same_v<A, parser::OmpClause::Uniform>)
  return llvm::omp::Clause::OMPC_uniform;
if constexpr (std::is_same_v<A, parser::OmpClause::Unknown>)
  return llvm::omp::Clause::OMPC_unknown;
if constexpr (std::is_same_v<A, parser::OmpClause::Untied>)
  return llvm::omp::Clause::OMPC_untied;
if constexpr (std::is_same_v<A, parser::OmpClause::Update>)
  return llvm::omp::Clause::OMPC_update;
if constexpr (std::is_same_v<A, parser::OmpClause::Use>)
  return llvm::omp::Clause::OMPC_use;
if constexpr (std::is_same_v<A, parser::OmpClause::UseDeviceAddr>)
  return llvm::omp::Clause::OMPC_use_device_addr;
if constexpr (std::is_same_v<A, parser::OmpClause::UseDevicePtr>)
  return llvm::omp::Clause::OMPC_use_device_ptr;
if constexpr (std::is_same_v<A, parser::OmpClause::UsesAllocators>)
  return llvm::omp::Clause::OMPC_uses_allocators;
if constexpr (std::is_same_v<A, parser::OmpClause::When>)
  return llvm::omp::Clause::OMPC_when;
if constexpr (std::is_same_v<A, parser::OmpClause::Write>)
  return llvm::omp::Clause::OMPC_write;
llvm_unreachable("Invalid OpenMP Parser clause");

#endif // GEN_FLANG_CLAUSE_PARSER_KIND_MAP

#ifdef GEN_FLANG_CLAUSES_PARSER
#undef GEN_FLANG_CLAUSES_PARSER

TYPE_PARSER(
  "write" >> construct<OmpClause>(construct<OmpClause::Write>()) ||
  "when" >> construct<OmpClause>(construct<OmpClause::When>()) ||
  "uses_allocators" >> construct<OmpClause>(construct<OmpClause::UsesAllocators>()) ||
  "use_device_ptr" >> construct<OmpClause>(construct<OmpClause::UseDevicePtr>(parenthesized(Parser<OmpObjectList>{}))) ||
  "use_device_addr" >> construct<OmpClause>(construct<OmpClause::UseDeviceAddr>(parenthesized(Parser<OmpObjectList>{}))) ||
  "use" >> construct<OmpClause>(construct<OmpClause::Use>()) ||
  "update" >> construct<OmpClause>(construct<OmpClause::Update>()) ||
  "untied" >> construct<OmpClause>(construct<OmpClause::Untied>()) ||
  "unknown" >> construct<OmpClause>(construct<OmpClause::Unknown>()) ||
  "uniform" >> construct<OmpClause>(construct<OmpClause::Uniform>(parenthesized(nonemptyList(name)))) ||
  "unified_shared_memory" >> construct<OmpClause>(construct<OmpClause::UnifiedSharedMemory>()) ||
  "unified_address" >> construct<OmpClause>(construct<OmpClause::UnifiedAddress>()) ||
  "to" >> construct<OmpClause>(construct<OmpClause::To>(parenthesized(Parser<OmpObjectList>{}))) ||
  "threads" >> construct<OmpClause>(construct<OmpClause::Threads>()) ||
  "threadprivate" >> construct<OmpClause>(construct<OmpClause::Threadprivate>()) ||
  "thread_limit" >> construct<OmpClause>(construct<OmpClause::ThreadLimit>(parenthesized(scalarIntExpr))) ||
  "task_reduction" >> construct<OmpClause>(construct<OmpClause::TaskReduction>(parenthesized(Parser<OmpReductionClause>{}))) ||
  "sizes" >> construct<OmpClause>(construct<OmpClause::Sizes>(parenthesized(nonemptyList(scalarIntExpr)))) ||
  "simdlen" >> construct<OmpClause>(construct<OmpClause::Simdlen>(parenthesized(scalarIntConstantExpr))) ||
  "simd" >> construct<OmpClause>(construct<OmpClause::Simd>()) ||
  "shared" >> construct<OmpClause>(construct<OmpClause::Shared>(parenthesized(Parser<OmpObjectList>{}))) ||
  "severity" >> construct<OmpClause>(construct<OmpClause::Severity>()) ||
  "seq_cst" >> construct<OmpClause>(construct<OmpClause::SeqCst>()) ||
  "schedule" >> construct<OmpClause>(construct<OmpClause::Schedule>(parenthesized(Parser<OmpScheduleClause>{}))) ||
  "safelen" >> construct<OmpClause>(construct<OmpClause::Safelen>(parenthesized(scalarIntConstantExpr))) ||
  "reverse_offload" >> construct<OmpClause>(construct<OmpClause::ReverseOffload>()) ||
  "release" >> construct<OmpClause>(construct<OmpClause::Release>()) ||
  "relaxed" >> construct<OmpClause>(construct<OmpClause::Relaxed>()) ||
  "reduction" >> construct<OmpClause>(construct<OmpClause::Reduction>(parenthesized(Parser<OmpReductionClause>{}))) ||
  "read" >> construct<OmpClause>(construct<OmpClause::Read>()) ||
  "proc_bind" >> construct<OmpClause>(construct<OmpClause::ProcBind>(parenthesized(Parser<OmpProcBindClause>{}))) ||
  "private" >> construct<OmpClause>(construct<OmpClause::Private>(parenthesized(Parser<OmpObjectList>{}))) ||
  "priority" >> construct<OmpClause>(construct<OmpClause::Priority>(parenthesized(scalarIntExpr))) ||
  "partial" >> construct<OmpClause>(construct<OmpClause::Partial>(maybe(parenthesized(scalarIntConstantExpr)))) ||
  "ordered" >> construct<OmpClause>(construct<OmpClause::Ordered>(maybe(parenthesized(scalarIntConstantExpr)))) ||
  "order" >> construct<OmpClause>(construct<OmpClause::Order>(parenthesized(Parser<OmpOrderClause>{}))) ||
  "ompx_dyn_cgroup_mem" >> construct<OmpClause>(construct<OmpClause::OmpxDynCgroupMem>(parenthesized(scalarIntExpr))) ||
  "num_threads" >> construct<OmpClause>(construct<OmpClause::NumThreads>(parenthesized(scalarIntExpr))) ||
  "num_teams" >> construct<OmpClause>(construct<OmpClause::NumTeams>(parenthesized(scalarIntExpr))) ||
  "num_tasks" >> construct<OmpClause>(construct<OmpClause::NumTasks>(parenthesized(scalarIntExpr))) ||
  "nowait" >> construct<OmpClause>(construct<OmpClause::Nowait>()) ||
  "novariants" >> construct<OmpClause>(construct<OmpClause::Novariants>(parenthesized(scalarLogicalExpr))) ||
  "notinbranch" >> construct<OmpClause>(construct<OmpClause::Notinbranch>()) ||
  "nontemporal" >> construct<OmpClause>(construct<OmpClause::Nontemporal>(parenthesized(nonemptyList(name)))) ||
  "nogroup" >> construct<OmpClause>(construct<OmpClause::Nogroup>()) ||
  "nocontext" >> construct<OmpClause>(construct<OmpClause::Nocontext>(parenthesized(scalarLogicalExpr))) ||
  "message" >> construct<OmpClause>(construct<OmpClause::Message>()) ||
  "mergeable" >> construct<OmpClause>(construct<OmpClause::Mergeable>()) ||
  "memory_order" >> construct<OmpClause>(construct<OmpClause::MemoryOrder>()) ||
  "match" >> construct<OmpClause>(construct<OmpClause::Match>()) ||
  "map" >> construct<OmpClause>(construct<OmpClause::Map>(parenthesized(Parser<OmpMapClause>{}))) ||
  "link" >> construct<OmpClause>(construct<OmpClause::Link>(parenthesized(Parser<OmpObjectList>{}))) ||
  "linear" >> construct<OmpClause>(construct<OmpClause::Linear>(parenthesized(Parser<OmpLinearClause>{}))) ||
  "lastprivate" >> construct<OmpClause>(construct<OmpClause::Lastprivate>(parenthesized(Parser<OmpObjectList>{}))) ||
  "is_device_ptr" >> construct<OmpClause>(construct<OmpClause::IsDevicePtr>(parenthesized(nonemptyList(name)))) ||
  "init" >> construct<OmpClause>(construct<OmpClause::Init>()) ||
  "indirect" >> construct<OmpClause>(construct<OmpClause::Indirect>()) ||
  "inclusive" >> construct<OmpClause>(construct<OmpClause::Inclusive>()) ||
  "inbranch" >> construct<OmpClause>(construct<OmpClause::Inbranch>()) ||
  "in_reduction" >> construct<OmpClause>(construct<OmpClause::InReduction>(parenthesized(Parser<OmpInReductionClause>{}))) ||
  "if" >> construct<OmpClause>(construct<OmpClause::If>(parenthesized(Parser<OmpIfClause>{}))) ||
  "hint" >> construct<OmpClause>(construct<OmpClause::Hint>(parenthesized(Parser<ConstantExpr>{}))) ||
  "has_device_addr" >> construct<OmpClause>(construct<OmpClause::HasDeviceAddr>(parenthesized(nonemptyList(name)))) ||
  "grainsize" >> construct<OmpClause>(construct<OmpClause::Grainsize>(parenthesized(scalarIntExpr))) ||
  "full" >> construct<OmpClause>(construct<OmpClause::Full>()) ||
  "from" >> construct<OmpClause>(construct<OmpClause::From>(parenthesized(Parser<OmpObjectList>{}))) ||
  "flush" >> construct<OmpClause>(construct<OmpClause::Flush>()) ||
  "firstprivate" >> construct<OmpClause>(construct<OmpClause::Firstprivate>(parenthesized(Parser<OmpObjectList>{}))) ||
  "final" >> construct<OmpClause>(construct<OmpClause::Final>(parenthesized(scalarLogicalExpr))) ||
  "filter" >> construct<OmpClause>(construct<OmpClause::Filter>(parenthesized(scalarIntExpr))) ||
  "exclusive" >> construct<OmpClause>(construct<OmpClause::Exclusive>()) ||
  "dynamic_allocators" >> construct<OmpClause>(construct<OmpClause::DynamicAllocators>()) ||
  "doacross" >> construct<OmpClause>(construct<OmpClause::Doacross>()) ||
  "dist_schedule" >> construct<OmpClause>(construct<OmpClause::DistSchedule>(maybe(parenthesized(scalarIntExpr)))) ||
  "device_type" >> construct<OmpClause>(construct<OmpClause::DeviceType>(parenthesized(Parser<OmpDeviceTypeClause>{}))) ||
  "device" >> construct<OmpClause>(construct<OmpClause::Device>(parenthesized(Parser<OmpDeviceClause>{}))) ||
  "detach" >> construct<OmpClause>(construct<OmpClause::Detach>()) ||
  "destroy" >> construct<OmpClause>(construct<OmpClause::Destroy>()) ||
  "depobj" >> construct<OmpClause>(construct<OmpClause::Depobj>()) ||
  "depend" >> construct<OmpClause>(construct<OmpClause::Depend>(parenthesized(Parser<OmpDependClause>{}))) ||
  "defaultmap" >> construct<OmpClause>(construct<OmpClause::Defaultmap>(parenthesized(Parser<OmpDefaultmapClause>{}))) ||
  "default" >> construct<OmpClause>(construct<OmpClause::Default>(parenthesized(Parser<OmpDefaultClause>{}))) ||
  "copyprivate" >> construct<OmpClause>(construct<OmpClause::Copyprivate>(parenthesized(Parser<OmpObjectList>{}))) ||
  "copyin" >> construct<OmpClause>(construct<OmpClause::Copyin>(parenthesized(Parser<OmpObjectList>{}))) ||
  "compare" >> construct<OmpClause>(construct<OmpClause::Compare>()) ||
  "collapse" >> construct<OmpClause>(construct<OmpClause::Collapse>(parenthesized(scalarIntConstantExpr))) ||
  "capture" >> construct<OmpClause>(construct<OmpClause::Capture>()) ||
  "cancellation_construct_type" >> construct<OmpClause>(construct<OmpClause::CancellationConstructType>()) ||
  "bind" >> construct<OmpClause>(construct<OmpClause::Bind>()) ||
  "atomic_default_mem_order" >> construct<OmpClause>(construct<OmpClause::AtomicDefaultMemOrder>(parenthesized(Parser<OmpAtomicDefaultMemOrderClause>{}))) ||
  "at" >> construct<OmpClause>(construct<OmpClause::At>()) ||
  "append_args" >> construct<OmpClause>(construct<OmpClause::AppendArgs>()) ||
  "allocator" >> construct<OmpClause>(construct<OmpClause::Allocator>(parenthesized(scalarIntExpr))) ||
  "allocate" >> construct<OmpClause>(construct<OmpClause::Allocate>(parenthesized(Parser<OmpAllocateClause>{}))) ||
  "aligned" >> construct<OmpClause>(construct<OmpClause::Aligned>(parenthesized(Parser<OmpAlignedClause>{}))) ||
  "align" >> construct<OmpClause>(construct<OmpClause::Align>()) ||
  "affinity" >> construct<OmpClause>(construct<OmpClause::Affinity>()) ||
  "adjust_args" >> construct<OmpClause>(construct<OmpClause::AdjustArgs>()) ||
  "acquire" >> construct<OmpClause>(construct<OmpClause::Acquire>()) ||
  "acq_rel" >> construct<OmpClause>(construct<OmpClause::AcqRel>())
)

#endif // GEN_FLANG_CLAUSES_PARSER

#ifdef GEN_CLANG_CLAUSE_CLASS
#undef GEN_CLANG_CLAUSE_CLASS

#ifndef CLAUSE
#define CLAUSE(Enum, Str, Implicit)
#endif
#ifndef CLAUSE_CLASS
#define CLAUSE_CLASS(Enum, Str, Class)
#endif
#ifndef CLAUSE_NO_CLASS
#define CLAUSE_NO_CLASS(Enum, Str)
#endif

#define __CLAUSE(Name, Class)                      \
  CLAUSE(OMPC_##Name, #Name, /* Implicit */ false) \
  CLAUSE_CLASS(OMPC_##Name, #Name, Class)
#define __CLAUSE_NO_CLASS(Name)                    \
  CLAUSE(OMPC_##Name, #Name, /* Implicit */ false) \
  CLAUSE_NO_CLASS(OMPC_##Name, #Name)
#define __IMPLICIT_CLAUSE_CLASS(Name, Str, Class)  \
  CLAUSE(OMPC_##Name, Str, /* Implicit */ true)    \
  CLAUSE_CLASS(OMPC_##Name, Str, Class)
#define __IMPLICIT_CLAUSE_NO_CLASS(Name, Str)      \
  CLAUSE(OMPC_##Name, Str, /* Implicit */ true)    \
  CLAUSE_NO_CLASS(OMPC_##Name, Str)

__CLAUSE(acq_rel, OMPAcqRelClause)
__CLAUSE(acquire, OMPAcquireClause)
__CLAUSE_NO_CLASS(adjust_args)
__CLAUSE(affinity, OMPAffinityClause)
__CLAUSE(align, OMPAlignClause)
__CLAUSE(aligned, OMPAlignedClause)
__CLAUSE(allocate, OMPAllocateClause)
__CLAUSE(allocator, OMPAllocatorClause)
__CLAUSE_NO_CLASS(append_args)
__CLAUSE(at, OMPAtClause)
__CLAUSE(atomic_default_mem_order, OMPAtomicDefaultMemOrderClause)
__CLAUSE(bind, OMPBindClause)
__CLAUSE_NO_CLASS(cancellation_construct_type)
__CLAUSE(capture, OMPCaptureClause)
__CLAUSE(collapse, OMPCollapseClause)
__CLAUSE(compare, OMPCompareClause)
__CLAUSE(copyprivate, OMPCopyprivateClause)
__CLAUSE(copyin, OMPCopyinClause)
__CLAUSE(default, OMPDefaultClause)
__CLAUSE(defaultmap, OMPDefaultmapClause)
__CLAUSE(depend, OMPDependClause)
__IMPLICIT_CLAUSE_CLASS(depobj, "depobj", OMPDepobjClause)
__CLAUSE(destroy, OMPDestroyClause)
__CLAUSE(detach, OMPDetachClause)
__CLAUSE(device, OMPDeviceClause)
__CLAUSE_NO_CLASS(device_type)
__CLAUSE(dist_schedule, OMPDistScheduleClause)
__CLAUSE(doacross, OMPDoacrossClause)
__CLAUSE(dynamic_allocators, OMPDynamicAllocatorsClause)
__CLAUSE(exclusive, OMPExclusiveClause)
__CLAUSE(filter, OMPFilterClause)
__CLAUSE(final, OMPFinalClause)
__CLAUSE(firstprivate, OMPFirstprivateClause)
__IMPLICIT_CLAUSE_CLASS(flush, "flush", OMPFlushClause)
__CLAUSE(from, OMPFromClause)
__CLAUSE(full, OMPFullClause)
__CLAUSE(grainsize, OMPGrainsizeClause)
__CLAUSE(has_device_addr, OMPHasDeviceAddrClause)
__CLAUSE(hint, OMPHintClause)
__CLAUSE(if, OMPIfClause)
__CLAUSE(in_reduction, OMPInReductionClause)
__CLAUSE_NO_CLASS(inbranch)
__CLAUSE(inclusive, OMPInclusiveClause)
__CLAUSE_NO_CLASS(indirect)
__CLAUSE(init, OMPInitClause)
__CLAUSE(is_device_ptr, OMPIsDevicePtrClause)
__CLAUSE(lastprivate, OMPLastprivateClause)
__CLAUSE(linear, OMPLinearClause)
__CLAUSE_NO_CLASS(link)
__CLAUSE(map, OMPMapClause)
__CLAUSE_NO_CLASS(match)
__CLAUSE_NO_CLASS(memory_order)
__CLAUSE(mergeable, OMPMergeableClause)
__CLAUSE(message, OMPMessageClause)
__CLAUSE(nogroup, OMPNogroupClause)
__CLAUSE(nowait, OMPNowaitClause)
__CLAUSE(nocontext, OMPNocontextClause)
__CLAUSE(nontemporal, OMPNontemporalClause)
__CLAUSE_NO_CLASS(notinbranch)
__CLAUSE(novariants, OMPNovariantsClause)
__CLAUSE(num_tasks, OMPNumTasksClause)
__CLAUSE(num_teams, OMPNumTeamsClause)
__CLAUSE(num_threads, OMPNumThreadsClause)
__CLAUSE(ompx_dyn_cgroup_mem, OMPXDynCGroupMemClause)
__CLAUSE(order, OMPOrderClause)
__CLAUSE(ordered, OMPOrderedClause)
__CLAUSE(partial, OMPPartialClause)
__CLAUSE(priority, OMPPriorityClause)
__CLAUSE(private, OMPPrivateClause)
__CLAUSE(proc_bind, OMPProcBindClause)
__CLAUSE(read, OMPReadClause)
__CLAUSE(reduction, OMPReductionClause)
__CLAUSE(relaxed, OMPRelaxedClause)
__CLAUSE(release, OMPReleaseClause)
__CLAUSE(reverse_offload, OMPReverseOffloadClause)
__CLAUSE(safelen, OMPSafelenClause)
__CLAUSE(schedule, OMPScheduleClause)
__CLAUSE(seq_cst, OMPSeqCstClause)
__CLAUSE(severity, OMPSeverityClause)
__CLAUSE(shared, OMPSharedClause)
__CLAUSE(simd, OMPSIMDClause)
__CLAUSE(simdlen, OMPSimdlenClause)
__CLAUSE(sizes, OMPSizesClause)
__CLAUSE(task_reduction, OMPTaskReductionClause)
__CLAUSE(thread_limit, OMPThreadLimitClause)
__IMPLICIT_CLAUSE_NO_CLASS(threadprivate, "threadprivate")
__CLAUSE(threads, OMPThreadsClause)
__CLAUSE(to, OMPToClause)
__CLAUSE(unified_address, OMPUnifiedAddressClause)
__CLAUSE(unified_shared_memory, OMPUnifiedSharedMemoryClause)
__CLAUSE_NO_CLASS(uniform)
__IMPLICIT_CLAUSE_NO_CLASS(unknown, "unknown")
__CLAUSE(untied, OMPUntiedClause)
__CLAUSE(update, OMPUpdateClause)
__CLAUSE(use, OMPUseClause)
__CLAUSE(use_device_addr, OMPUseDeviceAddrClause)
__CLAUSE(use_device_ptr, OMPUseDevicePtrClause)
__CLAUSE(uses_allocators, OMPUsesAllocatorsClause)
__CLAUSE_NO_CLASS(when)
__CLAUSE(write, OMPWriteClause)

#undef __IMPLICIT_CLAUSE_NO_CLASS
#undef __IMPLICIT_CLAUSE_CLASS
#undef __CLAUSE
#undef CLAUSE_NO_CLASS
#undef CLAUSE_CLASS
#undef CLAUSE

#endif // GEN_CLANG_CLAUSE_CLASS

#ifdef GEN_DIRECTIVES_IMPL
#undef GEN_DIRECTIVES_IMPL

Directive llvm::omp::getOpenMPDirectiveKind(llvm::StringRef Str) {
  return llvm::StringSwitch<Directive>(Str)
    .Case("allocate",OMPD_allocate)
    .Case("assumes",OMPD_assumes)
    .Case("atomic",OMPD_atomic)
    .Case("barrier",OMPD_barrier)
    .Case("begin assumes",OMPD_begin_assumes)
    .Case("begin declare target",OMPD_begin_declare_target)
    .Case("begin declare variant",OMPD_begin_declare_variant)
    .Case("cancel",OMPD_cancel)
    .Case("cancellation point",OMPD_cancellation_point)
    .Case("critical",OMPD_critical)
    .Case("declare mapper",OMPD_declare_mapper)
    .Case("declare reduction",OMPD_declare_reduction)
    .Case("declare simd",OMPD_declare_simd)
    .Case("declare target",OMPD_declare_target)
    .Case("declare variant",OMPD_declare_variant)
    .Case("depobj",OMPD_depobj)
    .Case("distribute",OMPD_distribute)
    .Case("distribute parallel do",OMPD_distribute_parallel_do)
    .Case("distribute parallel do simd",OMPD_distribute_parallel_do_simd)
    .Case("distribute parallel for",OMPD_distribute_parallel_for)
    .Case("distribute parallel for simd",OMPD_distribute_parallel_for_simd)
    .Case("distribute simd",OMPD_distribute_simd)
    .Case("do",OMPD_do)
    .Case("do simd",OMPD_do_simd)
    .Case("end assumes",OMPD_end_assumes)
    .Case("end declare target",OMPD_end_declare_target)
    .Case("end declare variant",OMPD_end_declare_variant)
    .Case("end do",OMPD_end_do)
    .Case("end do simd",OMPD_end_do_simd)
    .Case("end sections",OMPD_end_sections)
    .Case("end single",OMPD_end_single)
    .Case("end workshare",OMPD_end_workshare)
    .Case("error",OMPD_error)
    .Case("flush",OMPD_flush)
    .Case("for",OMPD_for)
    .Case("for simd",OMPD_for_simd)
    .Case("masked taskloop",OMPD_masked_taskloop)
    .Case("masked taskloop simd",OMPD_masked_taskloop_simd)
    .Case("master",OMPD_master)
    .Case("master taskloop",OMPD_master_taskloop)
    .Case("master taskloop simd",OMPD_master_taskloop_simd)
    .Case("metadirective",OMPD_metadirective)
    .Case("nothing",OMPD_nothing)
    .Case("ordered",OMPD_ordered)
    .Case("parallel",OMPD_parallel)
    .Case("parallel do",OMPD_parallel_do)
    .Case("parallel do simd",OMPD_parallel_do_simd)
    .Case("parallel for",OMPD_parallel_for)
    .Case("parallel for simd",OMPD_parallel_for_simd)
    .Case("parallel masked",OMPD_parallel_masked)
    .Case("parallel masked taskloop",OMPD_parallel_masked_taskloop)
    .Case("parallel masked taskloop simd",OMPD_parallel_masked_taskloop_simd)
    .Case("parallel master",OMPD_parallel_master)
    .Case("parallel master taskloop",OMPD_parallel_master_taskloop)
    .Case("parallel master taskloop simd",OMPD_parallel_master_taskloop_simd)
    .Case("parallel sections",OMPD_parallel_sections)
    .Case("parallel workshare",OMPD_parallel_workshare)
    .Case("requires",OMPD_requires)
    .Case("scan",OMPD_scan)
    .Case("section",OMPD_section)
    .Case("sections",OMPD_sections)
    .Case("simd",OMPD_simd)
    .Case("single",OMPD_single)
    .Case("target",OMPD_target)
    .Case("target data",OMPD_target_data)
    .Case("target enter data",OMPD_target_enter_data)
    .Case("target exit data",OMPD_target_exit_data)
    .Case("target parallel",OMPD_target_parallel)
    .Case("target parallel do",OMPD_target_parallel_do)
    .Case("target parallel do simd",OMPD_target_parallel_do_simd)
    .Case("target parallel for",OMPD_target_parallel_for)
    .Case("target parallel for simd",OMPD_target_parallel_for_simd)
    .Case("target simd",OMPD_target_simd)
    .Case("target teams",OMPD_target_teams)
    .Case("target teams distribute",OMPD_target_teams_distribute)
    .Case("target teams distribute parallel do",OMPD_target_teams_distribute_parallel_do)
    .Case("target teams distribute parallel do simd",OMPD_target_teams_distribute_parallel_do_simd)
    .Case("target teams distribute parallel for",OMPD_target_teams_distribute_parallel_for)
    .Case("target teams distribute parallel for simd",OMPD_target_teams_distribute_parallel_for_simd)
    .Case("target teams distribute simd",OMPD_target_teams_distribute_simd)
    .Case("target update",OMPD_target_update)
    .Case("task",OMPD_task)
    .Case("taskgroup",OMPD_taskgroup)
    .Case("taskloop",OMPD_taskloop)
    .Case("taskloop simd",OMPD_taskloop_simd)
    .Case("taskwait",OMPD_taskwait)
    .Case("taskyield",OMPD_taskyield)
    .Case("teams",OMPD_teams)
    .Case("teams distribute",OMPD_teams_distribute)
    .Case("teams distribute parallel do",OMPD_teams_distribute_parallel_do)
    .Case("teams distribute parallel do simd",OMPD_teams_distribute_parallel_do_simd)
    .Case("teams distribute parallel for",OMPD_teams_distribute_parallel_for)
    .Case("teams distribute parallel for simd",OMPD_teams_distribute_parallel_for_simd)
    .Case("teams distribute simd",OMPD_teams_distribute_simd)
    .Case("threadprivate",OMPD_threadprivate)
    .Case("tile",OMPD_tile)
    .Case("unknown",OMPD_unknown)
    .Case("unroll",OMPD_unroll)
    .Case("workshare",OMPD_workshare)
    .Case("dispatch",OMPD_dispatch)
    .Case("interop",OMPD_interop)
    .Case("loop",OMPD_loop)
    .Case("masked",OMPD_masked)
    .Case("parallel loop",OMPD_parallel_loop)
    .Case("target parallel loop",OMPD_target_parallel_loop)
    .Case("target teams loop",OMPD_target_teams_loop)
    .Case("teams loop",OMPD_teams_loop)
    .Default(OMPD_unknown);
}

llvm::StringRef llvm::omp::getOpenMPDirectiveName(Directive Kind) {
  switch (Kind) {
    case OMPD_allocate:
      return "allocate";
    case OMPD_assumes:
      return "assumes";
    case OMPD_atomic:
      return "atomic";
    case OMPD_barrier:
      return "barrier";
    case OMPD_begin_assumes:
      return "begin assumes";
    case OMPD_begin_declare_target:
      return "begin declare target";
    case OMPD_begin_declare_variant:
      return "begin declare variant";
    case OMPD_cancel:
      return "cancel";
    case OMPD_cancellation_point:
      return "cancellation point";
    case OMPD_critical:
      return "critical";
    case OMPD_declare_mapper:
      return "declare mapper";
    case OMPD_declare_reduction:
      return "declare reduction";
    case OMPD_declare_simd:
      return "declare simd";
    case OMPD_declare_target:
      return "declare target";
    case OMPD_declare_variant:
      return "declare variant";
    case OMPD_depobj:
      return "depobj";
    case OMPD_distribute:
      return "distribute";
    case OMPD_distribute_parallel_do:
      return "distribute parallel do";
    case OMPD_distribute_parallel_do_simd:
      return "distribute parallel do simd";
    case OMPD_distribute_parallel_for:
      return "distribute parallel for";
    case OMPD_distribute_parallel_for_simd:
      return "distribute parallel for simd";
    case OMPD_distribute_simd:
      return "distribute simd";
    case OMPD_do:
      return "do";
    case OMPD_do_simd:
      return "do simd";
    case OMPD_end_assumes:
      return "end assumes";
    case OMPD_end_declare_target:
      return "end declare target";
    case OMPD_end_declare_variant:
      return "end declare variant";
    case OMPD_end_do:
      return "end do";
    case OMPD_end_do_simd:
      return "end do simd";
    case OMPD_end_sections:
      return "end sections";
    case OMPD_end_single:
      return "end single";
    case OMPD_end_workshare:
      return "end workshare";
    case OMPD_error:
      return "error";
    case OMPD_flush:
      return "flush";
    case OMPD_for:
      return "for";
    case OMPD_for_simd:
      return "for simd";
    case OMPD_masked_taskloop:
      return "masked taskloop";
    case OMPD_masked_taskloop_simd:
      return "masked taskloop simd";
    case OMPD_master:
      return "master";
    case OMPD_master_taskloop:
      return "master taskloop";
    case OMPD_master_taskloop_simd:
      return "master taskloop simd";
    case OMPD_metadirective:
      return "metadirective";
    case OMPD_nothing:
      return "nothing";
    case OMPD_ordered:
      return "ordered";
    case OMPD_parallel:
      return "parallel";
    case OMPD_parallel_do:
      return "parallel do";
    case OMPD_parallel_do_simd:
      return "parallel do simd";
    case OMPD_parallel_for:
      return "parallel for";
    case OMPD_parallel_for_simd:
      return "parallel for simd";
    case OMPD_parallel_masked:
      return "parallel masked";
    case OMPD_parallel_masked_taskloop:
      return "parallel masked taskloop";
    case OMPD_parallel_masked_taskloop_simd:
      return "parallel masked taskloop simd";
    case OMPD_parallel_master:
      return "parallel master";
    case OMPD_parallel_master_taskloop:
      return "parallel master taskloop";
    case OMPD_parallel_master_taskloop_simd:
      return "parallel master taskloop simd";
    case OMPD_parallel_sections:
      return "parallel sections";
    case OMPD_parallel_workshare:
      return "parallel workshare";
    case OMPD_requires:
      return "requires";
    case OMPD_scan:
      return "scan";
    case OMPD_section:
      return "section";
    case OMPD_sections:
      return "sections";
    case OMPD_simd:
      return "simd";
    case OMPD_single:
      return "single";
    case OMPD_target:
      return "target";
    case OMPD_target_data:
      return "target data";
    case OMPD_target_enter_data:
      return "target enter data";
    case OMPD_target_exit_data:
      return "target exit data";
    case OMPD_target_parallel:
      return "target parallel";
    case OMPD_target_parallel_do:
      return "target parallel do";
    case OMPD_target_parallel_do_simd:
      return "target parallel do simd";
    case OMPD_target_parallel_for:
      return "target parallel for";
    case OMPD_target_parallel_for_simd:
      return "target parallel for simd";
    case OMPD_target_simd:
      return "target simd";
    case OMPD_target_teams:
      return "target teams";
    case OMPD_target_teams_distribute:
      return "target teams distribute";
    case OMPD_target_teams_distribute_parallel_do:
      return "target teams distribute parallel do";
    case OMPD_target_teams_distribute_parallel_do_simd:
      return "target teams distribute parallel do simd";
    case OMPD_target_teams_distribute_parallel_for:
      return "target teams distribute parallel for";
    case OMPD_target_teams_distribute_parallel_for_simd:
      return "target teams distribute parallel for simd";
    case OMPD_target_teams_distribute_simd:
      return "target teams distribute simd";
    case OMPD_target_update:
      return "target update";
    case OMPD_task:
      return "task";
    case OMPD_taskgroup:
      return "taskgroup";
    case OMPD_taskloop:
      return "taskloop";
    case OMPD_taskloop_simd:
      return "taskloop simd";
    case OMPD_taskwait:
      return "taskwait";
    case OMPD_taskyield:
      return "taskyield";
    case OMPD_teams:
      return "teams";
    case OMPD_teams_distribute:
      return "teams distribute";
    case OMPD_teams_distribute_parallel_do:
      return "teams distribute parallel do";
    case OMPD_teams_distribute_parallel_do_simd:
      return "teams distribute parallel do simd";
    case OMPD_teams_distribute_parallel_for:
      return "teams distribute parallel for";
    case OMPD_teams_distribute_parallel_for_simd:
      return "teams distribute parallel for simd";
    case OMPD_teams_distribute_simd:
      return "teams distribute simd";
    case OMPD_threadprivate:
      return "threadprivate";
    case OMPD_tile:
      return "tile";
    case OMPD_unknown:
      return "unknown";
    case OMPD_unroll:
      return "unroll";
    case OMPD_workshare:
      return "workshare";
    case OMPD_dispatch:
      return "dispatch";
    case OMPD_interop:
      return "interop";
    case OMPD_loop:
      return "loop";
    case OMPD_masked:
      return "masked";
    case OMPD_parallel_loop:
      return "parallel loop";
    case OMPD_target_parallel_loop:
      return "target parallel loop";
    case OMPD_target_teams_loop:
      return "target teams loop";
    case OMPD_teams_loop:
      return "teams loop";
  }
  llvm_unreachable("Invalid OpenMP Directive kind");
}

Clause llvm::omp::getOpenMPClauseKind(llvm::StringRef Str) {
  return llvm::StringSwitch<Clause>(Str)
    .Case("acq_rel",OMPC_acq_rel)
    .Case("acquire",OMPC_acquire)
    .Case("adjust_args",OMPC_adjust_args)
    .Case("affinity",OMPC_affinity)
    .Case("align",OMPC_align)
    .Case("aligned",OMPC_aligned)
    .Case("allocate",OMPC_allocate)
    .Case("allocator",OMPC_allocator)
    .Case("append_args",OMPC_append_args)
    .Case("at",OMPC_at)
    .Case("atomic_default_mem_order",OMPC_atomic_default_mem_order)
    .Case("bind",OMPC_bind)
    .Case("cancellation_construct_type",OMPC_cancellation_construct_type)
    .Case("capture",OMPC_capture)
    .Case("collapse",OMPC_collapse)
    .Case("compare",OMPC_compare)
    .Case("copyprivate",OMPC_copyprivate)
    .Case("copyin",OMPC_copyin)
    .Case("default",OMPC_default)
    .Case("defaultmap",OMPC_defaultmap)
    .Case("depend",OMPC_depend)
    .Case("depobj",OMPC_unknown)
    .Case("destroy",OMPC_destroy)
    .Case("detach",OMPC_detach)
    .Case("device",OMPC_device)
    .Case("device_type",OMPC_device_type)
    .Case("dist_schedule",OMPC_dist_schedule)
    .Case("doacross",OMPC_doacross)
    .Case("dynamic_allocators",OMPC_dynamic_allocators)
    .Case("exclusive",OMPC_exclusive)
    .Case("filter",OMPC_filter)
    .Case("final",OMPC_final)
    .Case("firstprivate",OMPC_firstprivate)
    .Case("flush",OMPC_unknown)
    .Case("from",OMPC_from)
    .Case("full",OMPC_full)
    .Case("grainsize",OMPC_grainsize)
    .Case("has_device_addr",OMPC_has_device_addr)
    .Case("hint",OMPC_hint)
    .Case("if",OMPC_if)
    .Case("in_reduction",OMPC_in_reduction)
    .Case("inbranch",OMPC_inbranch)
    .Case("inclusive",OMPC_inclusive)
    .Case("indirect",OMPC_indirect)
    .Case("init",OMPC_init)
    .Case("is_device_ptr",OMPC_is_device_ptr)
    .Case("lastprivate",OMPC_lastprivate)
    .Case("linear",OMPC_linear)
    .Case("link",OMPC_link)
    .Case("map",OMPC_map)
    .Case("match",OMPC_match)
    .Case("memory_order",OMPC_memory_order)
    .Case("mergeable",OMPC_mergeable)
    .Case("message",OMPC_message)
    .Case("nogroup",OMPC_nogroup)
    .Case("nowait",OMPC_nowait)
    .Case("nocontext",OMPC_nocontext)
    .Case("nontemporal",OMPC_nontemporal)
    .Case("notinbranch",OMPC_notinbranch)
    .Case("novariants",OMPC_novariants)
    .Case("num_tasks",OMPC_num_tasks)
    .Case("num_teams",OMPC_num_teams)
    .Case("num_threads",OMPC_num_threads)
    .Case("ompx_dyn_cgroup_mem",OMPC_ompx_dyn_cgroup_mem)
    .Case("order",OMPC_order)
    .Case("ordered",OMPC_ordered)
    .Case("partial",OMPC_partial)
    .Case("priority",OMPC_priority)
    .Case("private",OMPC_private)
    .Case("proc_bind",OMPC_proc_bind)
    .Case("read",OMPC_read)
    .Case("reduction",OMPC_reduction)
    .Case("relaxed",OMPC_relaxed)
    .Case("release",OMPC_release)
    .Case("reverse_offload",OMPC_reverse_offload)
    .Case("safelen",OMPC_safelen)
    .Case("schedule",OMPC_schedule)
    .Case("seq_cst",OMPC_seq_cst)
    .Case("severity",OMPC_severity)
    .Case("shared",OMPC_shared)
    .Case("simd",OMPC_simd)
    .Case("simdlen",OMPC_simdlen)
    .Case("sizes",OMPC_sizes)
    .Case("task_reduction",OMPC_task_reduction)
    .Case("thread_limit",OMPC_thread_limit)
    .Case("threadprivate",OMPC_unknown)
    .Case("threads",OMPC_threads)
    .Case("to",OMPC_to)
    .Case("unified_address",OMPC_unified_address)
    .Case("unified_shared_memory",OMPC_unified_shared_memory)
    .Case("uniform",OMPC_uniform)
    .Case("unknown",OMPC_unknown)
    .Case("untied",OMPC_untied)
    .Case("update",OMPC_update)
    .Case("use",OMPC_use)
    .Case("use_device_addr",OMPC_use_device_addr)
    .Case("use_device_ptr",OMPC_use_device_ptr)
    .Case("uses_allocators",OMPC_uses_allocators)
    .Case("when",OMPC_when)
    .Case("write",OMPC_write)
    .Default(OMPC_unknown);
}

llvm::StringRef llvm::omp::getOpenMPClauseName(Clause Kind) {
  switch (Kind) {
    case OMPC_acq_rel:
      return "acq_rel";
    case OMPC_acquire:
      return "acquire";
    case OMPC_adjust_args:
      return "adjust_args";
    case OMPC_affinity:
      return "affinity";
    case OMPC_align:
      return "align";
    case OMPC_aligned:
      return "aligned";
    case OMPC_allocate:
      return "allocate";
    case OMPC_allocator:
      return "allocator";
    case OMPC_append_args:
      return "append_args";
    case OMPC_at:
      return "at";
    case OMPC_atomic_default_mem_order:
      return "atomic_default_mem_order";
    case OMPC_bind:
      return "bind";
    case OMPC_cancellation_construct_type:
      return "cancellation_construct_type";
    case OMPC_capture:
      return "capture";
    case OMPC_collapse:
      return "collapse";
    case OMPC_compare:
      return "compare";
    case OMPC_copyprivate:
      return "copyprivate";
    case OMPC_copyin:
      return "copyin";
    case OMPC_default:
      return "default";
    case OMPC_defaultmap:
      return "defaultmap";
    case OMPC_depend:
      return "depend";
    case OMPC_depobj:
      return "depobj";
    case OMPC_destroy:
      return "destroy";
    case OMPC_detach:
      return "detach";
    case OMPC_device:
      return "device";
    case OMPC_device_type:
      return "device_type";
    case OMPC_dist_schedule:
      return "dist_schedule";
    case OMPC_doacross:
      return "doacross";
    case OMPC_dynamic_allocators:
      return "dynamic_allocators";
    case OMPC_exclusive:
      return "exclusive";
    case OMPC_filter:
      return "filter";
    case OMPC_final:
      return "final";
    case OMPC_firstprivate:
      return "firstprivate";
    case OMPC_flush:
      return "flush";
    case OMPC_from:
      return "from";
    case OMPC_full:
      return "full";
    case OMPC_grainsize:
      return "grainsize";
    case OMPC_has_device_addr:
      return "has_device_addr";
    case OMPC_hint:
      return "hint";
    case OMPC_if:
      return "if";
    case OMPC_in_reduction:
      return "in_reduction";
    case OMPC_inbranch:
      return "inbranch";
    case OMPC_inclusive:
      return "inclusive";
    case OMPC_indirect:
      return "indirect";
    case OMPC_init:
      return "init";
    case OMPC_is_device_ptr:
      return "is_device_ptr";
    case OMPC_lastprivate:
      return "lastprivate";
    case OMPC_linear:
      return "linear";
    case OMPC_link:
      return "link";
    case OMPC_map:
      return "map";
    case OMPC_match:
      return "match";
    case OMPC_memory_order:
      return "memory_order";
    case OMPC_mergeable:
      return "mergeable";
    case OMPC_message:
      return "message";
    case OMPC_nogroup:
      return "nogroup";
    case OMPC_nowait:
      return "nowait";
    case OMPC_nocontext:
      return "nocontext";
    case OMPC_nontemporal:
      return "nontemporal";
    case OMPC_notinbranch:
      return "notinbranch";
    case OMPC_novariants:
      return "novariants";
    case OMPC_num_tasks:
      return "num_tasks";
    case OMPC_num_teams:
      return "num_teams";
    case OMPC_num_threads:
      return "num_threads";
    case OMPC_ompx_dyn_cgroup_mem:
      return "ompx_dyn_cgroup_mem";
    case OMPC_order:
      return "order";
    case OMPC_ordered:
      return "ordered";
    case OMPC_partial:
      return "partial";
    case OMPC_priority:
      return "priority";
    case OMPC_private:
      return "private";
    case OMPC_proc_bind:
      return "proc_bind";
    case OMPC_read:
      return "read";
    case OMPC_reduction:
      return "reduction";
    case OMPC_relaxed:
      return "relaxed";
    case OMPC_release:
      return "release";
    case OMPC_reverse_offload:
      return "reverse_offload";
    case OMPC_safelen:
      return "safelen";
    case OMPC_schedule:
      return "schedule";
    case OMPC_seq_cst:
      return "seq_cst";
    case OMPC_severity:
      return "severity";
    case OMPC_shared:
      return "shared";
    case OMPC_simd:
      return "simd";
    case OMPC_simdlen:
      return "simdlen";
    case OMPC_sizes:
      return "sizes";
    case OMPC_task_reduction:
      return "task_reduction";
    case OMPC_thread_limit:
      return "thread_limit";
    case OMPC_threadprivate:
      return "threadprivate or thread local";
    case OMPC_threads:
      return "threads";
    case OMPC_to:
      return "to";
    case OMPC_unified_address:
      return "unified_address";
    case OMPC_unified_shared_memory:
      return "unified_shared_memory";
    case OMPC_uniform:
      return "uniform";
    case OMPC_unknown:
      return "unknown";
    case OMPC_untied:
      return "untied";
    case OMPC_update:
      return "update";
    case OMPC_use:
      return "use";
    case OMPC_use_device_addr:
      return "use_device_addr";
    case OMPC_use_device_ptr:
      return "use_device_ptr";
    case OMPC_uses_allocators:
      return "uses_allocators";
    case OMPC_when:
      return "when";
    case OMPC_write:
      return "write";
  }
  llvm_unreachable("Invalid OpenMP Clause kind");
}

CancellationConstructType llvm::omp::getCancellationConstructType(llvm::StringRef Str) {
  return llvm::StringSwitch<CancellationConstructType>(Str)
    .Case("parallel",OMP_CANCELLATION_CONSTRUCT_Parallel)
    .Case("loop",OMP_CANCELLATION_CONSTRUCT_Loop)
    .Case("sections",OMP_CANCELLATION_CONSTRUCT_Sections)
    .Case("taskgroup",OMP_CANCELLATION_CONSTRUCT_Taskgroup)
    .Case("none",OMP_CANCELLATION_CONSTRUCT_None)
    .Default(OMP_CANCELLATION_CONSTRUCT_None);
}

llvm::StringRef llvm::omp::getOpenMPCancellationConstructTypeName(llvm::omp::CancellationConstructType x) {
  switch (x) {
    case OMP_CANCELLATION_CONSTRUCT_Parallel:
      return "parallel";
    case OMP_CANCELLATION_CONSTRUCT_Loop:
      return "loop";
    case OMP_CANCELLATION_CONSTRUCT_Sections:
      return "sections";
    case OMP_CANCELLATION_CONSTRUCT_Taskgroup:
      return "taskgroup";
    case OMP_CANCELLATION_CONSTRUCT_None:
      return "none";
  }
  llvm_unreachable("Invalid OpenMP CancellationConstructType kind");
}

GrainsizeType llvm::omp::getGrainsizeType(llvm::StringRef Str) {
  return llvm::StringSwitch<GrainsizeType>(Str)
    .Case("strict",OMP_GRAINSIZE_Strict)
    .Case("unkonwn",OMP_GRAINSIZE_Unknown)
    .Default(OMP_GRAINSIZE_Unknown);
}

llvm::StringRef llvm::omp::getOpenMPGrainsizeTypeName(llvm::omp::GrainsizeType x) {
  switch (x) {
    case OMP_GRAINSIZE_Strict:
      return "strict";
    case OMP_GRAINSIZE_Unknown:
      return "unkonwn";
  }
  llvm_unreachable("Invalid OpenMP GrainsizeType kind");
}

MemoryOrderKind llvm::omp::getMemoryOrderKind(llvm::StringRef Str) {
  return llvm::StringSwitch<MemoryOrderKind>(Str)
    .Case("seq_cst",OMP_MEMORY_ORDER_SeqCst)
    .Case("acq_rel",OMP_MEMORY_ORDER_AcqRel)
    .Case("acquire",OMP_MEMORY_ORDER_Acquire)
    .Case("release",OMP_MEMORY_ORDER_Release)
    .Case("relaxed",OMP_MEMORY_ORDER_Relaxed)
    .Case("default",OMP_MEMORY_ORDER_Default)
    .Default(OMP_MEMORY_ORDER_Default);
}

llvm::StringRef llvm::omp::getOpenMPMemoryOrderKindName(llvm::omp::MemoryOrderKind x) {
  switch (x) {
    case OMP_MEMORY_ORDER_SeqCst:
      return "seq_cst";
    case OMP_MEMORY_ORDER_AcqRel:
      return "acq_rel";
    case OMP_MEMORY_ORDER_Acquire:
      return "acquire";
    case OMP_MEMORY_ORDER_Release:
      return "release";
    case OMP_MEMORY_ORDER_Relaxed:
      return "relaxed";
    case OMP_MEMORY_ORDER_Default:
      return "default";
  }
  llvm_unreachable("Invalid OpenMP MemoryOrderKind kind");
}

NumTasksType llvm::omp::getNumTasksType(llvm::StringRef Str) {
  return llvm::StringSwitch<NumTasksType>(Str)
    .Case("strict",OMP_NUMTASKS_Strict)
    .Case("unkonwn",OMP_NUMTASKS_Unknown)
    .Default(OMP_NUMTASKS_Unknown);
}

llvm::StringRef llvm::omp::getOpenMPNumTasksTypeName(llvm::omp::NumTasksType x) {
  switch (x) {
    case OMP_NUMTASKS_Strict:
      return "strict";
    case OMP_NUMTASKS_Unknown:
      return "unkonwn";
  }
  llvm_unreachable("Invalid OpenMP NumTasksType kind");
}

OrderKind llvm::omp::getOrderKind(llvm::StringRef Str) {
  return llvm::StringSwitch<OrderKind>(Str)
    .Case("unknown",OMP_ORDER_unknown)
    .Case("concurrent",OMP_ORDER_concurrent)
    .Default(OMP_ORDER_unknown);
}

llvm::StringRef llvm::omp::getOpenMPOrderKindName(llvm::omp::OrderKind x) {
  switch (x) {
    case OMP_ORDER_unknown:
      return "unknown";
    case OMP_ORDER_concurrent:
      return "concurrent";
  }
  llvm_unreachable("Invalid OpenMP OrderKind kind");
}

ProcBindKind llvm::omp::getProcBindKind(llvm::StringRef Str) {
  return llvm::StringSwitch<ProcBindKind>(Str)
    .Case("primary",OMP_PROC_BIND_primary)
    .Case("master",OMP_PROC_BIND_master)
    .Case("close",OMP_PROC_BIND_close)
    .Case("spread",OMP_PROC_BIND_spread)
    .Case("default",OMP_PROC_BIND_default)
    .Case("unknown",OMP_PROC_BIND_unknown)
    .Default(OMP_PROC_BIND_unknown);
}

llvm::StringRef llvm::omp::getOpenMPProcBindKindName(llvm::omp::ProcBindKind x) {
  switch (x) {
    case OMP_PROC_BIND_primary:
      return "primary";
    case OMP_PROC_BIND_master:
      return "master";
    case OMP_PROC_BIND_close:
      return "close";
    case OMP_PROC_BIND_spread:
      return "spread";
    case OMP_PROC_BIND_default:
      return "default";
    case OMP_PROC_BIND_unknown:
      return "unknown";
  }
  llvm_unreachable("Invalid OpenMP ProcBindKind kind");
}

ScheduleKind llvm::omp::getScheduleKind(llvm::StringRef Str) {
  return llvm::StringSwitch<ScheduleKind>(Str)
    .Case("static",OMP_SCHEDULE_Static)
    .Case("dynamic",OMP_SCHEDULE_Dynamic)
    .Case("guided",OMP_SCHEDULE_Guided)
    .Case("auto",OMP_SCHEDULE_Auto)
    .Case("runtime",OMP_SCHEDULE_Runtime)
    .Case("default",OMP_SCHEDULE_Default)
    .Default(OMP_SCHEDULE_Default);
}

llvm::StringRef llvm::omp::getOpenMPScheduleKindName(llvm::omp::ScheduleKind x) {
  switch (x) {
    case OMP_SCHEDULE_Static:
      return "static";
    case OMP_SCHEDULE_Dynamic:
      return "dynamic";
    case OMP_SCHEDULE_Guided:
      return "guided";
    case OMP_SCHEDULE_Auto:
      return "auto";
    case OMP_SCHEDULE_Runtime:
      return "runtime";
    case OMP_SCHEDULE_Default:
      return "default";
  }
  llvm_unreachable("Invalid OpenMP ScheduleKind kind");
}

bool llvm::omp::isAllowedClauseForDirective(Directive D, Clause C, unsigned Version) {
  assert(unsigned(D) <= llvm::omp::Directive_enumSize);
  assert(unsigned(C) <= llvm::omp::Clause_enumSize);
  switch (D) {
    case OMPD_allocate:
      switch (C) {
        case OMPC_allocator:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_align:
          return 51 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_assumes:
      return false;
      break;
    case OMPD_atomic:
      switch (C) {
        case OMPC_read:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_write:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_update:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_capture:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_compare:
          return 51 <= Version && 2147483647 >= Version;
        case OMPC_seq_cst:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_acq_rel:
          return 50 <= Version && 2147483647 >= Version;
        case OMPC_acquire:
          return 50 <= Version && 2147483647 >= Version;
        case OMPC_release:
          return 50 <= Version && 2147483647 >= Version;
        case OMPC_relaxed:
          return 50 <= Version && 2147483647 >= Version;
        case OMPC_hint:
          return 50 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_barrier:
      return false;
      break;
    case OMPD_begin_assumes:
      return false;
      break;
    case OMPD_begin_declare_target:
      switch (C) {
        case OMPC_to:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_link:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_device_type:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_indirect:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_begin_declare_variant:
      return false;
      break;
    case OMPD_cancel:
      switch (C) {
        case OMPC_if:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_cancellation_point:
      return false;
      break;
    case OMPD_critical:
      switch (C) {
        case OMPC_hint:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_declare_mapper:
      switch (C) {
        case OMPC_map:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_declare_reduction:
      return false;
      break;
    case OMPD_declare_simd:
      switch (C) {
        case OMPC_linear:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_aligned:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_uniform:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_simdlen:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_inbranch:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_notinbranch:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_declare_target:
      switch (C) {
        case OMPC_to:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_link:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_indirect:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_device_type:
          return 50 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_declare_variant:
      switch (C) {
        case OMPC_match:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_adjust_args:
          return 51 <= Version && 2147483647 >= Version;
        case OMPC_append_args:
          return 51 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_depobj:
      switch (C) {
        case OMPC_depend:
          return 50 <= Version && 2147483647 >= Version;
        case OMPC_destroy:
          return 50 <= Version && 2147483647 >= Version;
        case OMPC_update:
          return 50 <= Version && 2147483647 >= Version;
        case OMPC_depobj:
          return 50 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_distribute:
      switch (C) {
        case OMPC_private:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_firstprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_lastprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_allocate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_collapse:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_dist_schedule:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_distribute_parallel_do:
      switch (C) {
        case OMPC_private:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_firstprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_lastprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_allocate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_default:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_shared:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_copyin:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_linear:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_collapse:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_dist_schedule:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_if:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_num_threads:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_proc_bind:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_schedule:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_ordered:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_order:
          return 50 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_distribute_parallel_do_simd:
      switch (C) {
        case OMPC_firstprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_lastprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_collapse:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_dist_schedule:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_if:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_num_threads:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_default:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_proc_bind:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_private:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_shared:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_copyin:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_schedule:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_linear:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_aligned:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_safelen:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_simdlen:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_allocate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_nontemporal:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_order:
          return 50 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_distribute_parallel_for:
      switch (C) {
        case OMPC_firstprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_lastprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_collapse:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_dist_schedule:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_if:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_num_threads:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_default:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_proc_bind:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_private:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_shared:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_copyin:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_schedule:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_allocate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_order:
          return 50 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_distribute_parallel_for_simd:
      switch (C) {
        case OMPC_firstprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_lastprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_collapse:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_dist_schedule:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_if:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_num_threads:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_default:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_proc_bind:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_private:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_shared:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_copyin:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_schedule:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_linear:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_aligned:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_safelen:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_simdlen:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_allocate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_nontemporal:
          return 50 <= Version && 2147483647 >= Version;
        case OMPC_order:
          return 50 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_distribute_simd:
      switch (C) {
        case OMPC_aligned:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_allocate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_copyin:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_default:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_linear:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_firstprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_lastprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_nontemporal:
          return 50 <= Version && 2147483647 >= Version;
        case OMPC_private:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_collapse:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_dist_schedule:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_if:
          return 50 <= Version && 2147483647 >= Version;
        case OMPC_num_threads:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_ordered:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_proc_bind:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_schedule:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_safelen:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_simdlen:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_order:
          return 50 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_do:
      switch (C) {
        case OMPC_private:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_firstprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_lastprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_linear:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_schedule:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_collapse:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_ordered:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_nowait:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_order:
          return 50 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_do_simd:
      switch (C) {
        case OMPC_aligned:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_private:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_firstprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_lastprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_linear:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_schedule:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_collapse:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_ordered:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_safelen:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_simdlen:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_nowait:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_order:
          return 50 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_end_assumes:
      return false;
      break;
    case OMPD_end_declare_target:
      return false;
      break;
    case OMPD_end_declare_variant:
      return false;
      break;
    case OMPD_end_do:
      return false;
      break;
    case OMPD_end_do_simd:
      return false;
      break;
    case OMPD_end_sections:
      switch (C) {
        case OMPC_nowait:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_end_single:
      switch (C) {
        case OMPC_copyprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_nowait:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_end_workshare:
      switch (C) {
        case OMPC_nowait:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_error:
      switch (C) {
        case OMPC_at:
          return 51 <= Version && 2147483647 >= Version;
        case OMPC_severity:
          return 51 <= Version && 2147483647 >= Version;
        case OMPC_message:
          return 51 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_flush:
      switch (C) {
        case OMPC_acq_rel:
          return 50 <= Version && 2147483647 >= Version;
        case OMPC_acquire:
          return 50 <= Version && 2147483647 >= Version;
        case OMPC_release:
          return 50 <= Version && 2147483647 >= Version;
        case OMPC_flush:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_for:
      switch (C) {
        case OMPC_private:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_lastprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_firstprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_collapse:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_schedule:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_ordered:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_nowait:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_linear:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_allocate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_order:
          return 50 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_for_simd:
      switch (C) {
        case OMPC_private:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_firstprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_lastprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_schedule:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_collapse:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_nowait:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_safelen:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_simdlen:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_linear:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_aligned:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_ordered:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_allocate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_if:
          return 50 <= Version && 2147483647 >= Version;
        case OMPC_nontemporal:
          return 50 <= Version && 2147483647 >= Version;
        case OMPC_order:
          return 50 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_masked_taskloop:
      switch (C) {
        case OMPC_if:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_shared:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_private:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_firstprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_lastprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_default:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_collapse:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_final:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_untied:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_mergeable:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_priority:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_grainsize:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_nogroup:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_num_tasks:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_in_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_allocate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_filter:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_masked_taskloop_simd:
      switch (C) {
        case OMPC_if:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_shared:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_private:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_firstprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_lastprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_default:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_collapse:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_final:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_untied:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_mergeable:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_priority:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_linear:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_aligned:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_safelen:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_simdlen:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_grainsize:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_nogroup:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_num_tasks:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_in_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_allocate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_nontemporal:
          return 50 <= Version && 2147483647 >= Version;
        case OMPC_order:
          return 50 <= Version && 2147483647 >= Version;
        case OMPC_filter:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_master:
      return false;
      break;
    case OMPD_master_taskloop:
      switch (C) {
        case OMPC_if:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_shared:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_private:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_firstprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_lastprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_default:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_collapse:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_final:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_untied:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_mergeable:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_priority:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_grainsize:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_nogroup:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_num_tasks:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_in_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_allocate:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_master_taskloop_simd:
      switch (C) {
        case OMPC_if:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_shared:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_private:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_firstprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_lastprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_default:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_collapse:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_final:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_untied:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_mergeable:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_priority:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_linear:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_aligned:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_safelen:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_simdlen:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_grainsize:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_nogroup:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_num_tasks:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_in_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_allocate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_nontemporal:
          return 50 <= Version && 2147483647 >= Version;
        case OMPC_order:
          return 50 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_metadirective:
      switch (C) {
        case OMPC_when:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_default:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_nothing:
      return false;
      break;
    case OMPD_ordered:
      switch (C) {
        case OMPC_depend:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_doacross:
          return 52 <= Version && 2147483647 >= Version;
        case OMPC_threads:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_simd:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_parallel:
      switch (C) {
        case OMPC_private:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_firstprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_shared:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_copyin:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_allocate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_default:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_if:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_num_threads:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_proc_bind:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_parallel_do:
      switch (C) {
        case OMPC_default:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_private:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_firstprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_shared:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_copyin:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_lastprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_linear:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_if:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_num_threads:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_proc_bind:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_schedule:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_ordered:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_collapse:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_order:
          return 50 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_parallel_do_simd:
      switch (C) {
        case OMPC_default:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_private:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_firstprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_shared:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_copyin:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_lastprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_linear:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_aligned:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_allocate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_nontemporal:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_if:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_num_threads:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_proc_bind:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_schedule:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_ordered:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_collapse:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_safelen:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_simdlen:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_order:
          return 50 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_parallel_for:
      switch (C) {
        case OMPC_if:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_num_threads:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_default:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_proc_bind:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_private:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_firstprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_shared:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_copyin:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_lastprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_collapse:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_schedule:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_ordered:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_linear:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_allocate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_order:
          return 50 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_parallel_for_simd:
      switch (C) {
        case OMPC_if:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_num_threads:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_default:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_proc_bind:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_private:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_firstprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_shared:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_copyin:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_lastprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_collapse:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_schedule:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_safelen:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_simdlen:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_linear:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_aligned:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_ordered:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_allocate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_nontemporal:
          return 50 <= Version && 2147483647 >= Version;
        case OMPC_order:
          return 50 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_parallel_masked:
      switch (C) {
        case OMPC_if:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_num_threads:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_default:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_private:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_firstprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_shared:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_copyin:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_proc_bind:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_allocate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_filter:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_parallel_masked_taskloop:
      switch (C) {
        case OMPC_if:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_shared:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_private:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_firstprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_lastprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_default:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_collapse:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_final:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_untied:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_mergeable:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_priority:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_grainsize:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_nogroup:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_num_tasks:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_allocate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_num_threads:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_proc_bind:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_copyin:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_filter:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_parallel_masked_taskloop_simd:
      switch (C) {
        case OMPC_if:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_shared:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_private:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_firstprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_lastprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_default:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_collapse:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_final:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_untied:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_mergeable:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_priority:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_grainsize:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_nogroup:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_num_tasks:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_allocate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_num_threads:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_proc_bind:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_copyin:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_linear:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_aligned:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_safelen:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_simdlen:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_nontemporal:
          return 50 <= Version && 2147483647 >= Version;
        case OMPC_order:
          return 50 <= Version && 2147483647 >= Version;
        case OMPC_filter:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_parallel_master:
      switch (C) {
        case OMPC_if:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_num_threads:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_default:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_private:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_firstprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_shared:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_copyin:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_proc_bind:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_allocate:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_parallel_master_taskloop:
      switch (C) {
        case OMPC_if:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_shared:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_private:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_firstprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_lastprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_default:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_collapse:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_final:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_untied:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_mergeable:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_priority:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_grainsize:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_nogroup:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_num_tasks:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_allocate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_num_threads:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_proc_bind:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_copyin:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_parallel_master_taskloop_simd:
      switch (C) {
        case OMPC_if:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_shared:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_private:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_firstprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_lastprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_default:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_collapse:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_final:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_untied:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_mergeable:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_priority:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_grainsize:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_nogroup:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_num_tasks:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_allocate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_num_threads:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_proc_bind:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_copyin:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_linear:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_aligned:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_safelen:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_simdlen:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_nontemporal:
          return 50 <= Version && 2147483647 >= Version;
        case OMPC_order:
          return 50 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_parallel_sections:
      switch (C) {
        case OMPC_if:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_default:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_proc_bind:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_private:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_firstprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_shared:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_copyin:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_lastprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_allocate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_num_threads:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_parallel_workshare:
      switch (C) {
        case OMPC_allocate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_copyin:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_default:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_firstprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_private:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_shared:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_if:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_num_threads:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_proc_bind:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_requires:
      switch (C) {
        case OMPC_unified_address:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_unified_shared_memory:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_reverse_offload:
          return 99 <= Version && 2147483647 >= Version;
        case OMPC_dynamic_allocators:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_atomic_default_mem_order:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_scan:
      switch (C) {
        case OMPC_inclusive:
          return 50 <= Version && 2147483647 >= Version;
        case OMPC_exclusive:
          return 50 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_section:
      return false;
      break;
    case OMPD_sections:
      switch (C) {
        case OMPC_private:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_lastprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_firstprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_nowait:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_allocate:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_simd:
      switch (C) {
        case OMPC_private:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_lastprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_linear:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_aligned:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_allocate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_nontemporal:
          return 50 <= Version && 2147483647 >= Version;
        case OMPC_collapse:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_safelen:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_simdlen:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_if:
          return 50 <= Version && 2147483647 >= Version;
        case OMPC_order:
          return 50 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_single:
      switch (C) {
        case OMPC_private:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_firstprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_copyprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_nowait:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_allocate:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_target:
      switch (C) {
        case OMPC_if:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_map:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_private:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_depend:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_firstprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_is_device_ptr:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_has_device_addr:
          return 51 <= Version && 2147483647 >= Version;
        case OMPC_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_in_reduction:
          return 50 <= Version && 2147483647 >= Version;
        case OMPC_allocate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_uses_allocators:
          return 50 <= Version && 2147483647 >= Version;
        case OMPC_device:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_thread_limit:
          return 51 <= Version && 2147483647 >= Version;
        case OMPC_defaultmap:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_nowait:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_ompx_dyn_cgroup_mem:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_target_data:
      switch (C) {
        case OMPC_use_device_ptr:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_use_device_addr:
          return 50 <= Version && 2147483647 >= Version;
        case OMPC_device:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_if:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_map:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_target_enter_data:
      switch (C) {
        case OMPC_depend:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_if:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_device:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_nowait:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_map:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_target_exit_data:
      switch (C) {
        case OMPC_depend:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_device:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_if:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_nowait:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_map:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_target_parallel:
      switch (C) {
        case OMPC_map:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_nowait:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_depend:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_private:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_firstprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_default:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_shared:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_is_device_ptr:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_has_device_addr:
          return 51 <= Version && 2147483647 >= Version;
        case OMPC_allocate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_uses_allocators:
          return 50 <= Version && 2147483647 >= Version;
        case OMPC_defaultmap:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_device:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_if:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_num_threads:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_proc_bind:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_ompx_dyn_cgroup_mem:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_target_parallel_do:
      switch (C) {
        case OMPC_map:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_private:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_firstprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_lastprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_depend:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_shared:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_linear:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_is_device_ptr:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_has_device_addr:
          return 51 <= Version && 2147483647 >= Version;
        case OMPC_allocator:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_uses_allocators:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_default:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_copyin:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_if:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_num_threads:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_proc_bind:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_device:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_defaultmap:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_schedule:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_collapse:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_ordered:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_nowait:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_order:
          return 50 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_target_parallel_do_simd:
      switch (C) {
        case OMPC_if:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_device:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_map:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_private:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_firstprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_lastprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_nowait:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_depend:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_defaultmap:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_num_threads:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_default:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_proc_bind:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_shared:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_collapse:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_schedule:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_ordered:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_linear:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_safelen:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_simdlen:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_aligned:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_is_device_ptr:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_has_device_addr:
          return 51 <= Version && 2147483647 >= Version;
        case OMPC_allocate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_nontemporal:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_order:
          return 50 <= Version && 2147483647 >= Version;
        case OMPC_uses_allocators:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_target_parallel_for:
      switch (C) {
        case OMPC_if:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_device:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_map:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_private:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_firstprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_lastprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_nowait:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_depend:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_defaultmap:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_num_threads:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_default:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_proc_bind:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_shared:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_collapse:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_schedule:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_ordered:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_linear:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_is_device_ptr:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_has_device_addr:
          return 51 <= Version && 2147483647 >= Version;
        case OMPC_allocate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_order:
          return 50 <= Version && 2147483647 >= Version;
        case OMPC_uses_allocators:
          return 50 <= Version && 2147483647 >= Version;
        case OMPC_ompx_dyn_cgroup_mem:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_target_parallel_for_simd:
      switch (C) {
        case OMPC_if:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_device:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_map:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_private:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_firstprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_lastprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_nowait:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_depend:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_defaultmap:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_num_threads:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_default:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_proc_bind:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_shared:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_collapse:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_schedule:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_ordered:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_linear:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_safelen:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_simdlen:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_aligned:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_is_device_ptr:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_has_device_addr:
          return 51 <= Version && 2147483647 >= Version;
        case OMPC_allocate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_nontemporal:
          return 50 <= Version && 2147483647 >= Version;
        case OMPC_order:
          return 50 <= Version && 2147483647 >= Version;
        case OMPC_uses_allocators:
          return 50 <= Version && 2147483647 >= Version;
        case OMPC_ompx_dyn_cgroup_mem:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_target_simd:
      switch (C) {
        case OMPC_aligned:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_allocate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_depend:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_firstprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_is_device_ptr:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_has_device_addr:
          return 51 <= Version && 2147483647 >= Version;
        case OMPC_lastprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_linear:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_map:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_nontemporal:
          return 50 <= Version && 2147483647 >= Version;
        case OMPC_nowait:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_private:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_shared:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_uses_allocators:
          return 50 <= Version && 2147483647 >= Version;
        case OMPC_collapse:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_safelen:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_simdlen:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_if:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_num_threads:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_proc_bind:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_device:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_defaultmap:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_schedule:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_ompx_dyn_cgroup_mem:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_order:
          return 50 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_target_teams:
      switch (C) {
        case OMPC_if:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_map:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_private:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_depend:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_firstprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_is_device_ptr:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_has_device_addr:
          return 51 <= Version && 2147483647 >= Version;
        case OMPC_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_allocate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_uses_allocators:
          return 50 <= Version && 2147483647 >= Version;
        case OMPC_shared:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_device:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_nowait:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_defaultmap:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_default:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_num_teams:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_thread_limit:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_ompx_dyn_cgroup_mem:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_target_teams_distribute:
      switch (C) {
        case OMPC_if:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_map:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_private:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_depend:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_firstprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_is_device_ptr:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_has_device_addr:
          return 51 <= Version && 2147483647 >= Version;
        case OMPC_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_allocate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_uses_allocators:
          return 50 <= Version && 2147483647 >= Version;
        case OMPC_shared:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_lastprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_device:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_nowait:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_defaultmap:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_default:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_num_teams:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_thread_limit:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_collapse:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_dist_schedule:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_ompx_dyn_cgroup_mem:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_target_teams_distribute_parallel_do:
      switch (C) {
        case OMPC_if:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_map:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_private:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_depend:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_firstprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_is_device_ptr:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_has_device_addr:
          return 51 <= Version && 2147483647 >= Version;
        case OMPC_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_allocate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_uses_allocators:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_shared:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_lastprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_copyin:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_linear:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_ordered:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_device:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_defaultmap:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_nowait:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_default:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_num_teams:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_thread_limit:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_collapse:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_dist_schedule:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_num_threads:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_proc_bind:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_schedule:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_order:
          return 50 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_target_teams_distribute_parallel_do_simd:
      switch (C) {
        case OMPC_map:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_private:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_depend:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_firstprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_is_device_ptr:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_has_device_addr:
          return 51 <= Version && 2147483647 >= Version;
        case OMPC_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_allocate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_uses_allocators:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_shared:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_lastprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_copyin:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_linear:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_ordered:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_aligned:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_nontemporal:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_if:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_device:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_nowait:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_defaultmap:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_default:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_num_teams:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_thread_limit:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_collapse:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_dist_schedule:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_num_threads:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_proc_bind:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_schedule:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_safelen:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_simdlen:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_order:
          return 50 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_target_teams_distribute_parallel_for:
      switch (C) {
        case OMPC_if:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_device:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_map:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_private:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_nowait:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_depend:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_defaultmap:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_firstprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_is_device_ptr:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_has_device_addr:
          return 51 <= Version && 2147483647 >= Version;
        case OMPC_default:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_shared:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_num_teams:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_thread_limit:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_lastprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_collapse:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_dist_schedule:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_num_threads:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_proc_bind:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_schedule:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_allocate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_order:
          return 50 <= Version && 2147483647 >= Version;
        case OMPC_uses_allocators:
          return 50 <= Version && 2147483647 >= Version;
        case OMPC_ompx_dyn_cgroup_mem:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_target_teams_distribute_parallel_for_simd:
      switch (C) {
        case OMPC_if:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_device:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_map:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_private:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_nowait:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_depend:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_defaultmap:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_firstprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_is_device_ptr:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_has_device_addr:
          return 51 <= Version && 2147483647 >= Version;
        case OMPC_default:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_shared:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_num_teams:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_thread_limit:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_lastprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_collapse:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_dist_schedule:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_num_threads:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_proc_bind:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_schedule:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_linear:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_aligned:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_safelen:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_simdlen:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_allocate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_nontemporal:
          return 50 <= Version && 2147483647 >= Version;
        case OMPC_order:
          return 50 <= Version && 2147483647 >= Version;
        case OMPC_uses_allocators:
          return 50 <= Version && 2147483647 >= Version;
        case OMPC_ompx_dyn_cgroup_mem:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_target_teams_distribute_simd:
      switch (C) {
        case OMPC_aligned:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_allocate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_depend:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_firstprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_if:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_is_device_ptr:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_has_device_addr:
          return 51 <= Version && 2147483647 >= Version;
        case OMPC_lastprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_linear:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_map:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_nontemporal:
          return 50 <= Version && 2147483647 >= Version;
        case OMPC_private:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_shared:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_uses_allocators:
          return 50 <= Version && 2147483647 >= Version;
        case OMPC_device:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_defaultmap:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_nowait:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_num_teams:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_thread_limit:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_collapse:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_dist_schedule:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_safelen:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_simdlen:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_ompx_dyn_cgroup_mem:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_order:
          return 50 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_target_update:
      switch (C) {
        case OMPC_to:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_from:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_depend:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_device:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_if:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_nowait:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_task:
      switch (C) {
        case OMPC_private:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_firstprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_shared:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_untied:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_mergeable:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_depend:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_in_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_allocate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_detach:
          return 50 <= Version && 2147483647 >= Version;
        case OMPC_affinity:
          return 50 <= Version && 2147483647 >= Version;
        case OMPC_default:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_if:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_final:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_priority:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_taskgroup:
      switch (C) {
        case OMPC_task_reduction:
          return 50 <= Version && 2147483647 >= Version;
        case OMPC_allocate:
          return 50 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_taskloop:
      switch (C) {
        case OMPC_shared:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_private:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_firstprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_lastprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_untied:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_mergeable:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_nogroup:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_in_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_allocate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_default:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_if:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_collapse:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_final:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_priority:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_grainsize:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_num_tasks:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_taskloop_simd:
      switch (C) {
        case OMPC_aligned:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_allocate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_default:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_firstprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_in_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_lastprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_linear:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_mergeable:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_nogroup:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_nontemporal:
          return 50 <= Version && 2147483647 >= Version;
        case OMPC_private:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_shared:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_untied:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_if:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_collapse:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_safelen:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_simdlen:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_final:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_priority:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_order:
          return 50 <= Version && 2147483647 >= Version;
        case OMPC_grainsize:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_num_tasks:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_taskwait:
      switch (C) {
        case OMPC_depend:
          return 50 <= Version && 2147483647 >= Version;
        case OMPC_nowait:
          return 51 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_taskyield:
      return false;
      break;
    case OMPD_teams:
      switch (C) {
        case OMPC_private:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_firstprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_shared:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_allocate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_default:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_num_teams:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_thread_limit:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_teams_distribute:
      switch (C) {
        case OMPC_default:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_private:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_firstprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_shared:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_num_teams:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_thread_limit:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_lastprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_collapse:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_dist_schedule:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_allocate:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_teams_distribute_parallel_do:
      switch (C) {
        case OMPC_private:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_firstprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_lastprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_shared:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_allocate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_copyin:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_linear:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_num_teams:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_thread_limit:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_default:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_collapse:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_dist_schedule:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_ordered:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_if:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_num_threads:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_proc_bind:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_schedule:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_order:
          return 50 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_teams_distribute_parallel_do_simd:
      switch (C) {
        case OMPC_private:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_firstprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_lastprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_allocate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_shared:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_linear:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_aligned:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_nontemporal:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_default:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_num_teams:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_thread_limit:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_collapse:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_dist_schedule:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_num_threads:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_proc_bind:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_schedule:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_safelen:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_simdlen:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_if:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_order:
          return 50 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_teams_distribute_parallel_for:
      switch (C) {
        case OMPC_firstprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_lastprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_collapse:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_dist_schedule:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_if:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_num_threads:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_default:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_proc_bind:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_private:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_shared:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_schedule:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_num_teams:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_thread_limit:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_copyin:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_allocate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_order:
          return 50 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_teams_distribute_parallel_for_simd:
      switch (C) {
        case OMPC_firstprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_lastprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_collapse:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_dist_schedule:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_if:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_num_threads:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_default:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_proc_bind:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_private:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_shared:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_schedule:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_linear:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_aligned:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_safelen:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_simdlen:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_num_teams:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_thread_limit:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_allocate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_nontemporal:
          return 50 <= Version && 2147483647 >= Version;
        case OMPC_order:
          return 50 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_teams_distribute_simd:
      switch (C) {
        case OMPC_aligned:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_allocate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_firstprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_lastprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_linear:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_nontemporal:
          return 50 <= Version && 2147483647 >= Version;
        case OMPC_private:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_shared:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_collapse:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_default:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_dist_schedule:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_if:
          return 50 <= Version && 2147483647 >= Version;
        case OMPC_num_teams:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_safelen:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_simdlen:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_thread_limit:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_order:
          return 50 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_threadprivate:
      return false;
      break;
    case OMPD_tile:
      switch (C) {
        case OMPC_sizes:
          return 51 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_unknown:
      return false;
      break;
    case OMPD_unroll:
      switch (C) {
        case OMPC_full:
          return 51 <= Version && 2147483647 >= Version;
        case OMPC_partial:
          return 51 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_workshare:
      return false;
      break;
    case OMPD_dispatch:
      switch (C) {
        case OMPC_device:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_is_device_ptr:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_has_device_addr:
          return 51 <= Version && 2147483647 >= Version;
        case OMPC_nowait:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_depend:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_novariants:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_nocontext:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_interop:
      switch (C) {
        case OMPC_device:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_depend:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_destroy:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_init:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_nowait:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_use:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_loop:
      switch (C) {
        case OMPC_lastprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_private:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_bind:
          return 50 <= Version && 2147483647 >= Version;
        case OMPC_collapse:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_order:
          return 50 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_masked:
      switch (C) {
        case OMPC_filter:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_parallel_loop:
      switch (C) {
        case OMPC_allocate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_copyin:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_firstprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_lastprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_private:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_shared:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_bind:
          return 50 <= Version && 2147483647 >= Version;
        case OMPC_collapse:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_default:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_if:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_num_threads:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_order:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_proc_bind:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_target_parallel_loop:
      switch (C) {
        case OMPC_allocate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_copyin:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_depend:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_device:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_firstprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_is_device_ptr:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_has_device_addr:
          return 51 <= Version && 2147483647 >= Version;
        case OMPC_lastprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_map:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_private:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_shared:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_uses_allocators:
          return 50 <= Version && 2147483647 >= Version;
        case OMPC_bind:
          return 50 <= Version && 2147483647 >= Version;
        case OMPC_collapse:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_default:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_defaultmap:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_if:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_nowait:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_num_threads:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_order:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_proc_bind:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_ompx_dyn_cgroup_mem:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_target_teams_loop:
      switch (C) {
        case OMPC_allocate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_depend:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_defaultmap:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_device:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_firstprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_is_device_ptr:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_has_device_addr:
          return 51 <= Version && 2147483647 >= Version;
        case OMPC_lastprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_map:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_private:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_shared:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_uses_allocators:
          return 50 <= Version && 2147483647 >= Version;
        case OMPC_bind:
          return 50 <= Version && 2147483647 >= Version;
        case OMPC_collapse:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_default:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_if:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_nowait:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_num_teams:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_order:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_thread_limit:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_ompx_dyn_cgroup_mem:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
    case OMPD_teams_loop:
      switch (C) {
        case OMPC_allocate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_firstprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_lastprivate:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_private:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_reduction:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_shared:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_bind:
          return 50 <= Version && 2147483647 >= Version;
        case OMPC_collapse:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_default:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_num_teams:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_order:
          return 1 <= Version && 2147483647 >= Version;
        case OMPC_thread_limit:
          return 1 <= Version && 2147483647 >= Version;
        default:
          return false;
      }
      break;
  }
  llvm_unreachable("Invalid OpenMP Directive kind");
}

#endif // GEN_DIRECTIVES_IMPL

PKiwFZ*Q��Frontend/Debug/Options.hnu�[���//===--- DebugInfoOptions.h - Debug Info Emission Types ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_FRONTEND_DEBUG_OPTIONS_H
#define LLVM_FRONTEND_DEBUG_OPTIONS_H

namespace llvm {
namespace codegenoptions {

enum DebugInfoFormat {
  DIF_DWARF,
  DIF_CodeView,
};

enum DebugInfoKind {
  /// Don't generate debug info.
  NoDebugInfo,

  /// Emit location information but do not generate debug info in the output.
  /// This is useful in cases where the backend wants to track source
  /// locations for instructions without actually emitting debug info for them
  /// (e.g., when -Rpass is used).
  LocTrackingOnly,

  /// Emit only debug directives with the line numbers data
  DebugDirectivesOnly,

  /// Emit only debug info necessary for generating line number tables
  /// (-gline-tables-only).
  DebugLineTablesOnly,

  /// Limit generated debug info for classes to reduce size. This emits class
  /// type info only where the constructor is emitted, if it is a class that
  /// has a constructor.
  /// FIXME: Consider combining this with LimitedDebugInfo.
  DebugInfoConstructor,

  /// Limit generated debug info to reduce size (-fno-standalone-debug). This
  /// emits forward decls for types that could be replaced with forward decls in
  /// the source code. For dynamic C++ classes type info is only emitted into
  /// the module that contains the classe's vtable.
  LimitedDebugInfo,

  /// Generate complete debug info.
  FullDebugInfo,

  /// Generate debug info for types that may be unused in the source
  /// (-fno-eliminate-unused-debug-types).
  UnusedTypeInfo,
};

enum class DebugTemplateNamesKind { Full, Simple, Mangled };

} // end namespace codegenoptions
} // end namespace llvm

#endif
PKiwFZrm�[��'WindowsManifest/WindowsManifestMerger.hnu�[���//===-- WindowsManifestMerger.h ---------------------------------*- C++-*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===---------------------------------------------------------------------===//
//
// This file provides a utility for merging Microsoft .manifest files.  These
// files are xml documents which contain meta-information about applications,
// such as whether or not admin access is required, system compatibility,
// versions, etc.  Part of the linking process of an executable may require
// merging several of these .manifest files using a tree-merge following
// specific rules.  Unfortunately, these rules are not documented well
// anywhere.  However, a careful investigation of the behavior of the original
// Microsoft Manifest Tool (mt.exe) revealed the rules of this merge.  As the
// saying goes, code is the best documentation, so please look below if you are
// interested in the exact merging requirements.
//
// Ref:
// https://msdn.microsoft.com/en-us/library/windows/desktop/aa374191(v=vs.85).aspx
//
//===---------------------------------------------------------------------===//

#ifndef LLVM_WINDOWSMANIFEST_WINDOWSMANIFESTMERGER_H
#define LLVM_WINDOWSMANIFEST_WINDOWSMANIFESTMERGER_H

#include "llvm/Support/Error.h"

namespace llvm {

class MemoryBuffer;
class MemoryBufferRef;

namespace windows_manifest {

bool isAvailable();

class WindowsManifestError : public ErrorInfo<WindowsManifestError, ECError> {
public:
  static char ID;
  WindowsManifestError(const Twine &Msg);
  void log(raw_ostream &OS) const override;

private:
  std::string Msg;
};

class WindowsManifestMerger {
public:
  WindowsManifestMerger();
  ~WindowsManifestMerger();
  Error merge(MemoryBufferRef Manifest);

  // Returns vector containing merged xml manifest, or uninitialized vector for
  // empty manifest.
  std::unique_ptr<MemoryBuffer> getMergedManifest();

private:
  class WindowsManifestMergerImpl;
  std::unique_ptr<WindowsManifestMergerImpl> Impl;
};

} // namespace windows_manifest
} // namespace llvm
#endif
PKiwFZԪ�„�*Transforms/Vectorize/LoadStoreVectorizer.hnu�[���//===- LoadStoreVectorizer.cpp - GPU Load & Store Vectorizer --------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_VECTORIZE_LOADSTOREVECTORIZER_H
#define LLVM_TRANSFORMS_VECTORIZE_LOADSTOREVECTORIZER_H

#include "llvm/IR/PassManager.h"

namespace llvm {
class Pass;
class Function;

class LoadStoreVectorizerPass : public PassInfoMixin<LoadStoreVectorizerPass> {
public:
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

/// Create a legacy pass manager instance of the LoadStoreVectorizer pass
Pass *createLoadStoreVectorizerPass();

}

#endif /* LLVM_TRANSFORMS_VECTORIZE_LOADSTOREVECTORIZER_H */
PKiwFZ+i�d
"
"$Transforms/Vectorize/LoopVectorize.hnu�[���//===- LoopVectorize.h ------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops
// and generates target-independent LLVM-IR.
// The vectorizer uses the TargetTransformInfo analysis to estimate the costs
// of instructions in order to estimate the profitability of vectorization.
//
// The loop vectorizer combines consecutive loop iterations into a single
// 'wide' iteration. After this transformation the index is incremented
// by the SIMD vector width, and not by one.
//
// This pass has four parts:
// 1. The main loop pass that drives the different parts.
// 2. LoopVectorizationLegality - A unit that checks for the legality
//    of the vectorization.
// 3. InnerLoopVectorizer - A unit that performs the actual
//    widening of instructions.
// 4. LoopVectorizationCostModel - A unit that checks for the profitability
//    of vectorization. It decides on the optimal vector width, which
//    can be one, if vectorization is not profitable.
//
// There is a development effort going on to migrate loop vectorizer to the
// VPlan infrastructure and to introduce outer loop vectorization support (see
// docs/Proposal/VectorizationPlan.rst and
// http://lists.llvm.org/pipermail/llvm-dev/2017-December/119523.html). For this
// purpose, we temporarily introduced the VPlan-native vectorization path: an
// alternative vectorization path that is natively implemented on top of the
// VPlan infrastructure. See EnableVPlanNativePath for enabling.
//
//===----------------------------------------------------------------------===//
//
// The reduction-variable vectorization is based on the paper:
//  D. Nuzman and R. Henderson. Multi-platform Auto-vectorization.
//
// Variable uniformity checks are inspired by:
//  Karrenberg, R. and Hack, S. Whole Function Vectorization.
//
// The interleaved access vectorization is based on the paper:
//  Dorit Nuzman, Ira Rosen and Ayal Zaks.  Auto-Vectorization of Interleaved
//  Data for SIMD
//
// Other ideas/concepts are from:
//  A. Zaks and D. Nuzman. Autovectorization in GCC-two years later.
//
//  S. Maleki, Y. Gao, M. Garzaran, T. Wong and D. Padua.  An Evaluation of
//  Vectorizing Compilers.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_VECTORIZE_LOOPVECTORIZE_H
#define LLVM_TRANSFORMS_VECTORIZE_LOOPVECTORIZE_H

#include "llvm/IR/PassManager.h"
#include "llvm/Support/CommandLine.h"
#include <functional>

namespace llvm {

class AssumptionCache;
class BlockFrequencyInfo;
class DemandedBits;
class DominatorTree;
class Function;
class Loop;
class LoopAccessInfoManager;
class LoopInfo;
class OptimizationRemarkEmitter;
class ProfileSummaryInfo;
class ScalarEvolution;
class TargetLibraryInfo;
class TargetTransformInfo;

extern cl::opt<bool> EnableLoopInterleaving;
extern cl::opt<bool> EnableLoopVectorization;

/// A marker to determine if extra passes after loop vectorization should be
/// run.
struct ShouldRunExtraVectorPasses
    : public AnalysisInfoMixin<ShouldRunExtraVectorPasses> {
  static AnalysisKey Key;
  struct Result {
    bool invalidate(Function &F, const PreservedAnalyses &PA,
                    FunctionAnalysisManager::Invalidator &) {
      // Check whether the analysis has been explicitly invalidated. Otherwise,
      // it remains preserved.
      auto PAC = PA.getChecker<ShouldRunExtraVectorPasses>();
      return !PAC.preservedWhenStateless();
    }
  };

  Result run(Function &F, FunctionAnalysisManager &FAM) { return Result(); }
};

/// A pass manager to run a set of extra function simplification passes after
/// vectorization, if requested. LoopVectorize caches the
/// ShouldRunExtraVectorPasses analysis to request extra simplifications, if
/// they could be beneficial.
struct ExtraVectorPassManager : public FunctionPassManager {
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM) {
    auto PA = PreservedAnalyses::all();
    if (AM.getCachedResult<ShouldRunExtraVectorPasses>(F))
      PA.intersect(FunctionPassManager::run(F, AM));
    PA.abandon<ShouldRunExtraVectorPasses>();
    return PA;
  }
};

struct LoopVectorizeOptions {
  /// If false, consider all loops for interleaving.
  /// If true, only loops that explicitly request interleaving are considered.
  bool InterleaveOnlyWhenForced;

  /// If false, consider all loops for vectorization.
  /// If true, only loops that explicitly request vectorization are considered.
  bool VectorizeOnlyWhenForced;

  /// The current defaults when creating the pass with no arguments are:
  /// EnableLoopInterleaving = true and EnableLoopVectorization = true. This
  /// means that interleaving default is consistent with the cl::opt flag, while
  /// vectorization is not.
  /// FIXME: The default for EnableLoopVectorization in the cl::opt should be
  /// set to true, and the corresponding change to account for this be made in
  /// opt.cpp. The initializations below will become:
  /// InterleaveOnlyWhenForced(!EnableLoopInterleaving)
  /// VectorizeOnlyWhenForced(!EnableLoopVectorization).
  LoopVectorizeOptions()
      : InterleaveOnlyWhenForced(false), VectorizeOnlyWhenForced(false) {}
  LoopVectorizeOptions(bool InterleaveOnlyWhenForced,
                       bool VectorizeOnlyWhenForced)
      : InterleaveOnlyWhenForced(InterleaveOnlyWhenForced),
        VectorizeOnlyWhenForced(VectorizeOnlyWhenForced) {}

  LoopVectorizeOptions &setInterleaveOnlyWhenForced(bool Value) {
    InterleaveOnlyWhenForced = Value;
    return *this;
  }

  LoopVectorizeOptions &setVectorizeOnlyWhenForced(bool Value) {
    VectorizeOnlyWhenForced = Value;
    return *this;
  }
};

/// Storage for information about made changes.
struct LoopVectorizeResult {
  bool MadeAnyChange;
  bool MadeCFGChange;

  LoopVectorizeResult(bool MadeAnyChange, bool MadeCFGChange)
      : MadeAnyChange(MadeAnyChange), MadeCFGChange(MadeCFGChange) {}
};

/// The LoopVectorize Pass.
struct LoopVectorizePass : public PassInfoMixin<LoopVectorizePass> {
private:
  /// If false, consider all loops for interleaving.
  /// If true, only loops that explicitly request interleaving are considered.
  bool InterleaveOnlyWhenForced;

  /// If false, consider all loops for vectorization.
  /// If true, only loops that explicitly request vectorization are considered.
  bool VectorizeOnlyWhenForced;

public:
  LoopVectorizePass(LoopVectorizeOptions Opts = {});

  ScalarEvolution *SE;
  LoopInfo *LI;
  TargetTransformInfo *TTI;
  DominatorTree *DT;
  BlockFrequencyInfo *BFI;
  TargetLibraryInfo *TLI;
  DemandedBits *DB;
  AssumptionCache *AC;
  LoopAccessInfoManager *LAIs;
  OptimizationRemarkEmitter *ORE;
  ProfileSummaryInfo *PSI;

  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
  void printPipeline(raw_ostream &OS,
                     function_ref<StringRef(StringRef)> MapClassName2PassName);

  // Shim for old PM.
  LoopVectorizeResult runImpl(Function &F, ScalarEvolution &SE_, LoopInfo &LI_,
                              TargetTransformInfo &TTI_, DominatorTree &DT_,
                              BlockFrequencyInfo *BFI_, TargetLibraryInfo *TLI_,
                              DemandedBits &DB_, AssumptionCache &AC_,
                              LoopAccessInfoManager &LAIs_,
                              OptimizationRemarkEmitter &ORE_,
                              ProfileSummaryInfo *PSI_);

  bool processLoop(Loop *L);
};

/// Reports a vectorization failure: print \p DebugMsg for debugging
/// purposes along with the corresponding optimization remark \p RemarkName.
/// If \p I is passed, it is an instruction that prevents vectorization.
/// Otherwise, the loop \p TheLoop is used for the location of the remark.
void reportVectorizationFailure(const StringRef DebugMsg,
    const StringRef OREMsg, const StringRef ORETag,
    OptimizationRemarkEmitter *ORE, Loop *TheLoop, Instruction *I = nullptr);

/// Reports an informative message: print \p Msg for debugging purposes as well
/// as an optimization remark. Uses either \p I as location of the remark, or
/// otherwise \p TheLoop.
void reportVectorizationInfo(const StringRef OREMsg, const StringRef ORETag,
                             OptimizationRemarkEmitter *ORE, Loop *TheLoop,
                             Instruction *I = nullptr);

} // end namespace llvm

#endif // LLVM_TRANSFORMS_VECTORIZE_LOOPVECTORIZE_H
PKiwFZ�˜�VV0Transforms/Vectorize/LoopVectorizationLegality.hnu�[���//===- llvm/Transforms/Vectorize/LoopVectorizationLegality.h ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file
/// This file defines the LoopVectorizationLegality class. Original code
/// in Loop Vectorizer has been moved out to its own file for modularity
/// and reusability.
///
/// Currently, it works for innermost loop vectorization. Extending this to
/// outer loop vectorization is a TODO item.
///
/// Also provides:
/// 1) LoopVectorizeHints class which keeps a number of loop annotations
/// locally for easy look up. It has the ability to write them back as
/// loop metadata, upon request.
/// 2) LoopVectorizationRequirements class for lazy bail out for the purpose
/// of reporting useful failure to vectorize message.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_VECTORIZE_LOOPVECTORIZATIONLEGALITY_H
#define LLVM_TRANSFORMS_VECTORIZE_LOOPVECTORIZATIONLEGALITY_H

#include "llvm/ADT/MapVector.h"
#include "llvm/Analysis/LoopAccessAnalysis.h"
#include "llvm/Support/TypeSize.h"
#include "llvm/Transforms/Utils/LoopUtils.h"

namespace llvm {
class AAResults;
class AssumptionCache;
class BasicBlock;
class BlockFrequencyInfo;
class DemandedBits;
class DominatorTree;
class Function;
class Loop;
class LoopInfo;
class Metadata;
class OptimizationRemarkEmitter;
class PredicatedScalarEvolution;
class ProfileSummaryInfo;
class TargetLibraryInfo;
class TargetTransformInfo;
class Type;

/// Utility class for getting and setting loop vectorizer hints in the form
/// of loop metadata.
/// This class keeps a number of loop annotations locally (as member variables)
/// and can, upon request, write them back as metadata on the loop. It will
/// initially scan the loop for existing metadata, and will update the local
/// values based on information in the loop.
/// We cannot write all values to metadata, as the mere presence of some info,
/// for example 'force', means a decision has been made. So, we need to be
/// careful NOT to add them if the user hasn't specifically asked so.
class LoopVectorizeHints {
  enum HintKind {
    HK_WIDTH,
    HK_INTERLEAVE,
    HK_FORCE,
    HK_ISVECTORIZED,
    HK_PREDICATE,
    HK_SCALABLE
  };

  /// Hint - associates name and validation with the hint value.
  struct Hint {
    const char *Name;
    unsigned Value; // This may have to change for non-numeric values.
    HintKind Kind;

    Hint(const char *Name, unsigned Value, HintKind Kind)
        : Name(Name), Value(Value), Kind(Kind) {}

    bool validate(unsigned Val);
  };

  /// Vectorization width.
  Hint Width;

  /// Vectorization interleave factor.
  Hint Interleave;

  /// Vectorization forced
  Hint Force;

  /// Already Vectorized
  Hint IsVectorized;

  /// Vector Predicate
  Hint Predicate;

  /// Says whether we should use fixed width or scalable vectorization.
  Hint Scalable;

  /// Return the loop metadata prefix.
  static StringRef Prefix() { return "llvm.loop."; }

  /// True if there is any unsafe math in the loop.
  bool PotentiallyUnsafe = false;

public:
  enum ForceKind {
    FK_Undefined = -1, ///< Not selected.
    FK_Disabled = 0,   ///< Forcing disabled.
    FK_Enabled = 1,    ///< Forcing enabled.
  };

  enum ScalableForceKind {
    /// Not selected.
    SK_Unspecified = -1,
    /// Disables vectorization with scalable vectors.
    SK_FixedWidthOnly = 0,
    /// Vectorize loops using scalable vectors or fixed-width vectors, but favor
    /// scalable vectors when the cost-model is inconclusive. This is the
    /// default when the scalable.enable hint is enabled through a pragma.
    SK_PreferScalable = 1
  };

  LoopVectorizeHints(const Loop *L, bool InterleaveOnlyWhenForced,
                     OptimizationRemarkEmitter &ORE,
                     const TargetTransformInfo *TTI = nullptr);

  /// Mark the loop L as already vectorized by setting the width to 1.
  void setAlreadyVectorized();

  bool allowVectorization(Function *F, Loop *L,
                          bool VectorizeOnlyWhenForced) const;

  /// Dumps all the hint information.
  void emitRemarkWithHints() const;

  ElementCount getWidth() const {
    return ElementCount::get(Width.Value, (ScalableForceKind)Scalable.Value ==
                                              SK_PreferScalable);
  }

  unsigned getInterleave() const {
    if (Interleave.Value)
      return Interleave.Value;
    // If interleaving is not explicitly set, assume that if we do not want
    // unrolling, we also don't want any interleaving.
    if (llvm::hasUnrollTransformation(TheLoop) & TM_Disable)
      return 1;
    return 0;
  }
  unsigned getIsVectorized() const { return IsVectorized.Value; }
  unsigned getPredicate() const { return Predicate.Value; }
  enum ForceKind getForce() const {
    if ((ForceKind)Force.Value == FK_Undefined &&
        hasDisableAllTransformsHint(TheLoop))
      return FK_Disabled;
    return (ForceKind)Force.Value;
  }

  /// \return true if scalable vectorization has been explicitly disabled.
  bool isScalableVectorizationDisabled() const {
    return (ScalableForceKind)Scalable.Value == SK_FixedWidthOnly;
  }

  /// If hints are provided that force vectorization, use the AlwaysPrint
  /// pass name to force the frontend to print the diagnostic.
  const char *vectorizeAnalysisPassName() const;

  /// When enabling loop hints are provided we allow the vectorizer to change
  /// the order of operations that is given by the scalar loop. This is not
  /// enabled by default because can be unsafe or inefficient. For example,
  /// reordering floating-point operations will change the way round-off
  /// error accumulates in the loop.
  bool allowReordering() const;

  bool isPotentiallyUnsafe() const {
    // Avoid FP vectorization if the target is unsure about proper support.
    // This may be related to the SIMD unit in the target not handling
    // IEEE 754 FP ops properly, or bad single-to-double promotions.
    // Otherwise, a sequence of vectorized loops, even without reduction,
    // could lead to different end results on the destination vectors.
    return getForce() != LoopVectorizeHints::FK_Enabled && PotentiallyUnsafe;
  }

  void setPotentiallyUnsafe() { PotentiallyUnsafe = true; }

private:
  /// Find hints specified in the loop metadata and update local values.
  void getHintsFromMetadata();

  /// Checks string hint with one operand and set value if valid.
  void setHint(StringRef Name, Metadata *Arg);

  /// The loop these hints belong to.
  const Loop *TheLoop;

  /// Interface to emit optimization remarks.
  OptimizationRemarkEmitter &ORE;
};

/// This holds vectorization requirements that must be verified late in
/// the process. The requirements are set by legalize and costmodel. Once
/// vectorization has been determined to be possible and profitable the
/// requirements can be verified by looking for metadata or compiler options.
/// For example, some loops require FP commutativity which is only allowed if
/// vectorization is explicitly specified or if the fast-math compiler option
/// has been provided.
/// Late evaluation of these requirements allows helpful diagnostics to be
/// composed that tells the user what need to be done to vectorize the loop. For
/// example, by specifying #pragma clang loop vectorize or -ffast-math. Late
/// evaluation should be used only when diagnostics can generated that can be
/// followed by a non-expert user.
class LoopVectorizationRequirements {
public:
  /// Track the 1st floating-point instruction that can not be reassociated.
  void addExactFPMathInst(Instruction *I) {
    if (I && !ExactFPMathInst)
      ExactFPMathInst = I;
  }

  Instruction *getExactFPInst() { return ExactFPMathInst; }

private:
  Instruction *ExactFPMathInst = nullptr;
};

/// LoopVectorizationLegality checks if it is legal to vectorize a loop, and
/// to what vectorization factor.
/// This class does not look at the profitability of vectorization, only the
/// legality. This class has two main kinds of checks:
/// * Memory checks - The code in canVectorizeMemory checks if vectorization
///   will change the order of memory accesses in a way that will change the
///   correctness of the program.
/// * Scalars checks - The code in canVectorizeInstrs and canVectorizeMemory
/// checks for a number of different conditions, such as the availability of a
/// single induction variable, that all types are supported and vectorize-able,
/// etc. This code reflects the capabilities of InnerLoopVectorizer.
/// This class is also used by InnerLoopVectorizer for identifying
/// induction variable and the different reduction variables.
class LoopVectorizationLegality {
public:
  LoopVectorizationLegality(
      Loop *L, PredicatedScalarEvolution &PSE, DominatorTree *DT,
      TargetTransformInfo *TTI, TargetLibraryInfo *TLI, Function *F,
      LoopAccessInfoManager &LAIs, LoopInfo *LI, OptimizationRemarkEmitter *ORE,
      LoopVectorizationRequirements *R, LoopVectorizeHints *H, DemandedBits *DB,
      AssumptionCache *AC, BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI)
      : TheLoop(L), LI(LI), PSE(PSE), TTI(TTI), TLI(TLI), DT(DT), LAIs(LAIs),
        ORE(ORE), Requirements(R), Hints(H), DB(DB), AC(AC), BFI(BFI),
        PSI(PSI) {}

  /// ReductionList contains the reduction descriptors for all
  /// of the reductions that were found in the loop.
  using ReductionList = MapVector<PHINode *, RecurrenceDescriptor>;

  /// InductionList saves induction variables and maps them to the
  /// induction descriptor.
  using InductionList = MapVector<PHINode *, InductionDescriptor>;

  /// RecurrenceSet contains the phi nodes that are recurrences other than
  /// inductions and reductions.
  using RecurrenceSet = SmallPtrSet<const PHINode *, 8>;

  /// Returns true if it is legal to vectorize this loop.
  /// This does not mean that it is profitable to vectorize this
  /// loop, only that it is legal to do so.
  /// Temporarily taking UseVPlanNativePath parameter. If true, take
  /// the new code path being implemented for outer loop vectorization
  /// (should be functional for inner loop vectorization) based on VPlan.
  /// If false, good old LV code.
  bool canVectorize(bool UseVPlanNativePath);

  /// Returns true if it is legal to vectorize the FP math operations in this
  /// loop. Vectorizing is legal if we allow reordering of FP operations, or if
  /// we can use in-order reductions.
  bool canVectorizeFPMath(bool EnableStrictReductions);

  /// Return true if we can vectorize this loop while folding its tail by
  /// masking, and mark all respective loads/stores for masking.
  /// This object's state is only modified iff this function returns true.
  bool prepareToFoldTailByMasking();

  /// Returns the primary induction variable.
  PHINode *getPrimaryInduction() { return PrimaryInduction; }

  /// Returns the reduction variables found in the loop.
  const ReductionList &getReductionVars() const { return Reductions; }

  /// Returns the induction variables found in the loop.
  const InductionList &getInductionVars() const { return Inductions; }

  /// Return the fixed-order recurrences found in the loop.
  RecurrenceSet &getFixedOrderRecurrences() { return FixedOrderRecurrences; }

  /// Returns the widest induction type.
  Type *getWidestInductionType() { return WidestIndTy; }

  /// Returns True if given store is a final invariant store of one of the
  /// reductions found in the loop.
  bool isInvariantStoreOfReduction(StoreInst *SI);

  /// Returns True if given address is invariant and is used to store recurrent
  /// expression
  bool isInvariantAddressOfReduction(Value *V);

  /// Returns True if V is a Phi node of an induction variable in this loop.
  bool isInductionPhi(const Value *V) const;

  /// Returns a pointer to the induction descriptor, if \p Phi is an integer or
  /// floating point induction.
  const InductionDescriptor *getIntOrFpInductionDescriptor(PHINode *Phi) const;

  /// Returns a pointer to the induction descriptor, if \p Phi is pointer
  /// induction.
  const InductionDescriptor *getPointerInductionDescriptor(PHINode *Phi) const;

  /// Returns True if V is a cast that is part of an induction def-use chain,
  /// and had been proven to be redundant under a runtime guard (in other
  /// words, the cast has the same SCEV expression as the induction phi).
  bool isCastedInductionVariable(const Value *V) const;

  /// Returns True if V can be considered as an induction variable in this
  /// loop. V can be the induction phi, or some redundant cast in the def-use
  /// chain of the inducion phi.
  bool isInductionVariable(const Value *V) const;

  /// Returns True if PN is a reduction variable in this loop.
  bool isReductionVariable(PHINode *PN) const { return Reductions.count(PN); }

  /// Returns True if Phi is a fixed-order recurrence in this loop.
  bool isFixedOrderRecurrence(const PHINode *Phi) const;

  /// Return true if the block BB needs to be predicated in order for the loop
  /// to be vectorized.
  bool blockNeedsPredication(BasicBlock *BB) const;

  /// Check if this pointer is consecutive when vectorizing. This happens
  /// when the last index of the GEP is the induction variable, or that the
  /// pointer itself is an induction variable.
  /// This check allows us to vectorize A[idx] into a wide load/store.
  /// Returns:
  /// 0 - Stride is unknown or non-consecutive.
  /// 1 - Address is consecutive.
  /// -1 - Address is consecutive, and decreasing.
  /// NOTE: This method must only be used before modifying the original scalar
  /// loop. Do not use after invoking 'createVectorizedLoopSkeleton' (PR34965).
  int isConsecutivePtr(Type *AccessTy, Value *Ptr) const;

  /// Returns true if value V is uniform across \p VF lanes, when \p VF is
  /// provided, and otherwise if \p V is invariant across all loop iterations.
  bool isInvariant(Value *V) const;

  /// Returns true if value V is uniform across \p VF lanes, when \p VF is
  /// provided, and otherwise if \p V is invariant across all loop iterations.
  bool isUniform(Value *V, ElementCount VF) const;

  /// A uniform memory op is a load or store which accesses the same memory
  /// location on all \p VF lanes, if \p VF is provided and otherwise if the
  /// memory location is invariant.
  bool isUniformMemOp(Instruction &I, ElementCount VF) const;

  /// Returns the information that we collected about runtime memory check.
  const RuntimePointerChecking *getRuntimePointerChecking() const {
    return LAI->getRuntimePointerChecking();
  }

  const LoopAccessInfo *getLAI() const { return LAI; }

  bool isSafeForAnyVectorWidth() const {
    return LAI->getDepChecker().isSafeForAnyVectorWidth();
  }

  uint64_t getMaxSafeVectorWidthInBits() const {
    return LAI->getDepChecker().getMaxSafeVectorWidthInBits();
  }

  /// Returns true if vector representation of the instruction \p I
  /// requires mask.
  bool isMaskRequired(const Instruction *I) const {
    return MaskedOp.contains(I);
  }

  unsigned getNumStores() const { return LAI->getNumStores(); }
  unsigned getNumLoads() const { return LAI->getNumLoads(); }

  /// Returns all assume calls in predicated blocks. They need to be dropped
  /// when flattening the CFG.
  const SmallPtrSetImpl<Instruction *> &getConditionalAssumes() const {
    return ConditionalAssumes;
  }

  PredicatedScalarEvolution *getPredicatedScalarEvolution() const {
    return &PSE;
  }

  Loop *getLoop() const { return TheLoop; }

  LoopInfo *getLoopInfo() const { return LI; }

  AssumptionCache *getAssumptionCache() const { return AC; }

  ScalarEvolution *getScalarEvolution() const { return PSE.getSE(); }

  DominatorTree *getDominatorTree() const { return DT; }

private:
  /// Return true if the pre-header, exiting and latch blocks of \p Lp and all
  /// its nested loops are considered legal for vectorization. These legal
  /// checks are common for inner and outer loop vectorization.
  /// Temporarily taking UseVPlanNativePath parameter. If true, take
  /// the new code path being implemented for outer loop vectorization
  /// (should be functional for inner loop vectorization) based on VPlan.
  /// If false, good old LV code.
  bool canVectorizeLoopNestCFG(Loop *Lp, bool UseVPlanNativePath);

  /// Set up outer loop inductions by checking Phis in outer loop header for
  /// supported inductions (int inductions). Return false if any of these Phis
  /// is not a supported induction or if we fail to find an induction.
  bool setupOuterLoopInductions();

  /// Return true if the pre-header, exiting and latch blocks of \p Lp
  /// (non-recursive) are considered legal for vectorization.
  /// Temporarily taking UseVPlanNativePath parameter. If true, take
  /// the new code path being implemented for outer loop vectorization
  /// (should be functional for inner loop vectorization) based on VPlan.
  /// If false, good old LV code.
  bool canVectorizeLoopCFG(Loop *Lp, bool UseVPlanNativePath);

  /// Check if a single basic block loop is vectorizable.
  /// At this point we know that this is a loop with a constant trip count
  /// and we only need to check individual instructions.
  bool canVectorizeInstrs();

  /// When we vectorize loops we may change the order in which
  /// we read and write from memory. This method checks if it is
  /// legal to vectorize the code, considering only memory constrains.
  /// Returns true if the loop is vectorizable
  bool canVectorizeMemory();

  /// Return true if we can vectorize this loop using the IF-conversion
  /// transformation.
  bool canVectorizeWithIfConvert();

  /// Return true if we can vectorize this outer loop. The method performs
  /// specific checks for outer loop vectorization.
  bool canVectorizeOuterLoop();

  /// Return true if all of the instructions in the block can be speculatively
  /// executed, and record the loads/stores that require masking.
  /// \p SafePtrs is a list of addresses that are known to be legal and we know
  /// that we can read from them without segfault.
  /// \p MaskedOp is a list of instructions that have to be transformed into
  /// calls to the appropriate masked intrinsic when the loop is vectorized.
  /// \p ConditionalAssumes is a list of assume instructions in predicated
  /// blocks that must be dropped if the CFG gets flattened.
  bool blockCanBePredicated(
      BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs,
      SmallPtrSetImpl<const Instruction *> &MaskedOp,
      SmallPtrSetImpl<Instruction *> &ConditionalAssumes) const;

  /// Updates the vectorization state by adding \p Phi to the inductions list.
  /// This can set \p Phi as the main induction of the loop if \p Phi is a
  /// better choice for the main induction than the existing one.
  void addInductionPhi(PHINode *Phi, const InductionDescriptor &ID,
                       SmallPtrSetImpl<Value *> &AllowedExit);

  /// The loop that we evaluate.
  Loop *TheLoop;

  /// Loop Info analysis.
  LoopInfo *LI;

  /// A wrapper around ScalarEvolution used to add runtime SCEV checks.
  /// Applies dynamic knowledge to simplify SCEV expressions in the context
  /// of existing SCEV assumptions. The analysis will also add a minimal set
  /// of new predicates if this is required to enable vectorization and
  /// unrolling.
  PredicatedScalarEvolution &PSE;

  /// Target Transform Info.
  TargetTransformInfo *TTI;

  /// Target Library Info.
  TargetLibraryInfo *TLI;

  /// Dominator Tree.
  DominatorTree *DT;

  // LoopAccess analysis.
  LoopAccessInfoManager &LAIs;

  const LoopAccessInfo *LAI = nullptr;

  /// Interface to emit optimization remarks.
  OptimizationRemarkEmitter *ORE;

  //  ---  vectorization state --- //

  /// Holds the primary induction variable. This is the counter of the
  /// loop.
  PHINode *PrimaryInduction = nullptr;

  /// Holds the reduction variables.
  ReductionList Reductions;

  /// Holds all of the induction variables that we found in the loop.
  /// Notice that inductions don't need to start at zero and that induction
  /// variables can be pointers.
  InductionList Inductions;

  /// Holds all the casts that participate in the update chain of the induction
  /// variables, and that have been proven to be redundant (possibly under a
  /// runtime guard). These casts can be ignored when creating the vectorized
  /// loop body.
  SmallPtrSet<Instruction *, 4> InductionCastsToIgnore;

  /// Holds the phi nodes that are fixed-order recurrences.
  RecurrenceSet FixedOrderRecurrences;

  /// Holds the widest induction type encountered.
  Type *WidestIndTy = nullptr;

  /// Allowed outside users. This holds the variables that can be accessed from
  /// outside the loop.
  SmallPtrSet<Value *, 4> AllowedExit;

  /// Vectorization requirements that will go through late-evaluation.
  LoopVectorizationRequirements *Requirements;

  /// Used to emit an analysis of any legality issues.
  LoopVectorizeHints *Hints;

  /// The demanded bits analysis is used to compute the minimum type size in
  /// which a reduction can be computed.
  DemandedBits *DB;

  /// The assumption cache analysis is used to compute the minimum type size in
  /// which a reduction can be computed.
  AssumptionCache *AC;

  /// While vectorizing these instructions we have to generate a
  /// call to the appropriate masked intrinsic
  SmallPtrSet<const Instruction *, 8> MaskedOp;

  /// Assume instructions in predicated blocks must be dropped if the CFG gets
  /// flattened.
  SmallPtrSet<Instruction *, 8> ConditionalAssumes;

  /// BFI and PSI are used to check for profile guided size optimizations.
  BlockFrequencyInfo *BFI;
  ProfileSummaryInfo *PSI;
};

} // namespace llvm

#endif // LLVM_TRANSFORMS_VECTORIZE_LOOPVECTORIZATIONLEGALITY_H
PKiwFZ��;$Transforms/Vectorize/VectorCombine.hnu�[���//===-------- VectorCombine.h - Optimize partial vector operations --------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This pass optimizes scalar/vector interactions using target cost models. The
// transforms implemented here may not fit in traditional loop-based or SLP
// vectorization passes.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_VECTORIZE_VECTORCOMBINE_H
#define LLVM_TRANSFORMS_VECTORIZE_VECTORCOMBINE_H

#include "llvm/IR/PassManager.h"

namespace llvm {

/// Optimize scalar/vector interactions in IR using target cost models.
class VectorCombinePass : public PassInfoMixin<VectorCombinePass> {
  /// If true, only perform beneficial early IR transforms. Do not introduce new
  /// vector operations.
  bool TryEarlyFoldsOnly;

public:
  VectorCombinePass(bool TryEarlyFoldsOnly = false)
      : TryEarlyFoldsOnly(TryEarlyFoldsOnly) {}

  PreservedAnalyses run(Function &F, FunctionAnalysisManager &);
};
}
#endif // LLVM_TRANSFORMS_VECTORIZE_VECTORCOMBINE_H
PKiwFZ���#zz$Transforms/Vectorize/SLPVectorizer.hnu�[���//===- SLPVectorizer.h ------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// This pass implements the Bottom Up SLP vectorizer. It detects consecutive
// stores that can be put together into vector-stores. Next, it attempts to
// construct vectorizable tree using the use-def chains. If a profitable tree
// was found, the SLP vectorizer performs vectorization on the tree.
//
// The pass is inspired by the work described in the paper:
//  "Loop-Aware SLP in GCC" by Ira Rosen, Dorit Nuzman, Ayal Zaks.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_VECTORIZE_SLPVECTORIZER_H
#define LLVM_TRANSFORMS_VECTORIZE_SLPVECTORIZER_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/PassManager.h"

namespace llvm {

class AAResults;
class AssumptionCache;
class BasicBlock;
class DemandedBits;
class DominatorTree;
class Function;
class GetElementPtrInst;
class InsertElementInst;
class InsertValueInst;
class Instruction;
class LoopInfo;
class OptimizationRemarkEmitter;
class PHINode;
class ScalarEvolution;
class StoreInst;
class TargetLibraryInfo;
class TargetTransformInfo;
class Value;
class WeakTrackingVH;

/// A private "module" namespace for types and utilities used by this pass.
/// These are implementation details and should not be used by clients.
namespace slpvectorizer {

class BoUpSLP;

} // end namespace slpvectorizer

struct SLPVectorizerPass : public PassInfoMixin<SLPVectorizerPass> {
  using StoreList = SmallVector<StoreInst *, 8>;
  using StoreListMap = MapVector<Value *, StoreList>;
  using GEPList = SmallVector<GetElementPtrInst *, 8>;
  using GEPListMap = MapVector<Value *, GEPList>;
  using InstSetVector = SmallSetVector<Instruction *, 8>;

  ScalarEvolution *SE = nullptr;
  TargetTransformInfo *TTI = nullptr;
  TargetLibraryInfo *TLI = nullptr;
  AAResults *AA = nullptr;
  LoopInfo *LI = nullptr;
  DominatorTree *DT = nullptr;
  AssumptionCache *AC = nullptr;
  DemandedBits *DB = nullptr;
  const DataLayout *DL = nullptr;

public:
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);

  // Glue for old PM.
  bool runImpl(Function &F, ScalarEvolution *SE_, TargetTransformInfo *TTI_,
               TargetLibraryInfo *TLI_, AAResults *AA_, LoopInfo *LI_,
               DominatorTree *DT_, AssumptionCache *AC_, DemandedBits *DB_,
               OptimizationRemarkEmitter *ORE_);

private:
  /// Collect store and getelementptr instructions and organize them
  /// according to the underlying object of their pointer operands. We sort the
  /// instructions by their underlying objects to reduce the cost of
  /// consecutive access queries.
  ///
  /// TODO: We can further reduce this cost if we flush the chain creation
  ///       every time we run into a memory barrier.
  void collectSeedInstructions(BasicBlock *BB);

  /// Try to vectorize a list of operands.
  /// \param MaxVFOnly Vectorize only using maximal allowed register size.
  /// \returns true if a value was vectorized.
  bool tryToVectorizeList(ArrayRef<Value *> VL, slpvectorizer::BoUpSLP &R,
                          bool MaxVFOnly = false);

  /// Try to vectorize a chain that may start at the operands of \p I.
  bool tryToVectorize(Instruction *I, slpvectorizer::BoUpSLP &R);

  /// Try to vectorize chains that may start at the operands of
  /// instructions in \p Insts.
  bool tryToVectorize(ArrayRef<WeakTrackingVH> Insts,
                      slpvectorizer::BoUpSLP &R);

  /// Vectorize the store instructions collected in Stores.
  bool vectorizeStoreChains(slpvectorizer::BoUpSLP &R);

  /// Vectorize the index computations of the getelementptr instructions
  /// collected in GEPs.
  bool vectorizeGEPIndices(BasicBlock *BB, slpvectorizer::BoUpSLP &R);

  /// Try to find horizontal reduction or otherwise, collect instructions
  /// for postponed vectorization attempts.
  /// \a P if not null designates phi node the reduction is fed into
  /// (with reduction operators \a Root or one of its operands, in a basic block
  /// \a BB).
  /// \returns true if a horizontal reduction was matched and reduced.
  /// \returns false if \a V is null or not an instruction,
  /// or a horizontal reduction was not matched or not possible.
  bool vectorizeHorReduction(PHINode *P, Instruction *Root, BasicBlock *BB,
                             slpvectorizer::BoUpSLP &R,
                             TargetTransformInfo *TTI,
                             SmallVectorImpl<WeakTrackingVH> &PostponedInsts);

  /// Make an attempt to vectorize reduction and then try to vectorize
  /// postponed binary operations.
  /// \returns true on any successfull vectorization.
  bool vectorizeRootInstruction(PHINode *P, Instruction *Root, BasicBlock *BB,
                                slpvectorizer::BoUpSLP &R,
                                TargetTransformInfo *TTI);

  /// Try to vectorize trees that start at insertvalue instructions.
  bool vectorizeInsertValueInst(InsertValueInst *IVI, BasicBlock *BB,
                                slpvectorizer::BoUpSLP &R);

  /// Try to vectorize trees that start at insertelement instructions.
  bool vectorizeInsertElementInst(InsertElementInst *IEI, BasicBlock *BB,
                                  slpvectorizer::BoUpSLP &R);

  /// Tries to vectorize \p CmpInts. \Returns true on success.
  template <typename ItT>
  bool vectorizeCmpInsts(iterator_range<ItT> CmpInsts, BasicBlock *BB,
                         slpvectorizer::BoUpSLP &R);

  /// Tries to vectorize constructs started from InsertValueInst or
  /// InsertElementInst instructions.
  bool vectorizeInserts(InstSetVector &Instructions, BasicBlock *BB,
                        slpvectorizer::BoUpSLP &R);

  /// Scan the basic block and look for patterns that are likely to start
  /// a vectorization chain.
  bool vectorizeChainsInBlock(BasicBlock *BB, slpvectorizer::BoUpSLP &R);

  bool vectorizeStoreChain(ArrayRef<Value *> Chain, slpvectorizer::BoUpSLP &R,
                           unsigned Idx, unsigned MinVF);

  bool vectorizeStores(ArrayRef<StoreInst *> Stores, slpvectorizer::BoUpSLP &R);

  /// The store instructions in a basic block organized by base pointer.
  StoreListMap Stores;

  /// The getelementptr instructions in a basic block organized by base pointer.
  GEPListMap GEPs;
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_VECTORIZE_SLPVECTORIZER_H
PKiwFZhL�EE%Transforms/Utils/FunctionComparator.hnu�[���//===- FunctionComparator.h - Function Comparator ---------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the FunctionComparator and GlobalNumberState classes which
// are used by the MergeFunctions pass for comparing functions.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_FUNCTIONCOMPARATOR_H
#define LLVM_TRANSFORMS_UTILS_FUNCTIONCOMPARATOR_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/ValueMap.h"
#include "llvm/Support/AtomicOrdering.h"
#include "llvm/Support/Casting.h"
#include <cstdint>
#include <tuple>

namespace llvm {

class APFloat;
class AttributeList;
class APInt;
class BasicBlock;
class Constant;
class Function;
class GlobalValue;
class InlineAsm;
class Instruction;
class MDNode;
class Type;
class Value;

/// GlobalNumberState assigns an integer to each global value in the program,
/// which is used by the comparison routine to order references to globals. This
/// state must be preserved throughout the pass, because Functions and other
/// globals need to maintain their relative order. Globals are assigned a number
/// when they are first visited. This order is deterministic, and so the
/// assigned numbers are as well. When two functions are merged, neither number
/// is updated. If the symbols are weak, this would be incorrect. If they are
/// strong, then one will be replaced at all references to the other, and so
/// direct callsites will now see one or the other symbol, and no update is
/// necessary. Note that if we were guaranteed unique names, we could just
/// compare those, but this would not work for stripped bitcodes or for those
/// few symbols without a name.
class GlobalNumberState {
  struct Config : ValueMapConfig<GlobalValue *> {
    enum { FollowRAUW = false };
  };

  // Each GlobalValue is mapped to an identifier. The Config ensures when RAUW
  // occurs, the mapping does not change. Tracking changes is unnecessary, and
  // also problematic for weak symbols (which may be overwritten).
  using ValueNumberMap = ValueMap<GlobalValue *, uint64_t, Config>;
  ValueNumberMap GlobalNumbers;

  // The next unused serial number to assign to a global.
  uint64_t NextNumber = 0;

public:
  GlobalNumberState() = default;

  uint64_t getNumber(GlobalValue* Global) {
    ValueNumberMap::iterator MapIter;
    bool Inserted;
    std::tie(MapIter, Inserted) = GlobalNumbers.insert({Global, NextNumber});
    if (Inserted)
      NextNumber++;
    return MapIter->second;
  }

  void erase(GlobalValue *Global) {
    GlobalNumbers.erase(Global);
  }

  void clear() {
    GlobalNumbers.clear();
  }
};

/// FunctionComparator - Compares two functions to determine whether or not
/// they will generate machine code with the same behaviour. DataLayout is
/// used if available. The comparator always fails conservatively (erring on the
/// side of claiming that two functions are different).
class FunctionComparator {
public:
  FunctionComparator(const Function *F1, const Function *F2,
                     GlobalNumberState* GN)
      : FnL(F1), FnR(F2), GlobalNumbers(GN) {}

  /// Test whether the two functions have equivalent behaviour.
  int compare();

  /// Hash a function. Equivalent functions will have the same hash, and unequal
  /// functions will have different hashes with high probability.
  using FunctionHash = uint64_t;
  static FunctionHash functionHash(Function &);

protected:
  /// Start the comparison.
  void beginCompare() {
    sn_mapL.clear();
    sn_mapR.clear();
  }

  /// Compares the signature and other general attributes of the two functions.
  int compareSignature() const;

  /// Test whether two basic blocks have equivalent behaviour.
  int cmpBasicBlocks(const BasicBlock *BBL, const BasicBlock *BBR) const;

  /// Constants comparison.
  /// Its analog to lexicographical comparison between hypothetical numbers
  /// of next format:
  /// <bitcastability-trait><raw-bit-contents>
  ///
  /// 1. Bitcastability.
  /// Check whether L's type could be losslessly bitcasted to R's type.
  /// On this stage method, in case when lossless bitcast is not possible
  /// method returns -1 or 1, thus also defining which type is greater in
  /// context of bitcastability.
  /// Stage 0: If types are equal in terms of cmpTypes, then we can go straight
  ///          to the contents comparison.
  ///          If types differ, remember types comparison result and check
  ///          whether we still can bitcast types.
  /// Stage 1: Types that satisfies isFirstClassType conditions are always
  ///          greater then others.
  /// Stage 2: Vector is greater then non-vector.
  ///          If both types are vectors, then vector with greater bitwidth is
  ///          greater.
  ///          If both types are vectors with the same bitwidth, then types
  ///          are bitcastable, and we can skip other stages, and go to contents
  ///          comparison.
  /// Stage 3: Pointer types are greater than non-pointers. If both types are
  ///          pointers of the same address space - go to contents comparison.
  ///          Different address spaces: pointer with greater address space is
  ///          greater.
  /// Stage 4: Types are neither vectors, nor pointers. And they differ.
  ///          We don't know how to bitcast them. So, we better don't do it,
  ///          and return types comparison result (so it determines the
  ///          relationship among constants we don't know how to bitcast).
  ///
  /// Just for clearance, let's see how the set of constants could look
  /// on single dimension axis:
  ///
  /// [NFCT], [FCT, "others"], [FCT, pointers], [FCT, vectors]
  /// Where: NFCT - Not a FirstClassType
  ///        FCT - FirstClassTyp:
  ///
  /// 2. Compare raw contents.
  /// It ignores types on this stage and only compares bits from L and R.
  /// Returns 0, if L and R has equivalent contents.
  /// -1 or 1 if values are different.
  /// Pretty trivial:
  /// 2.1. If contents are numbers, compare numbers.
  ///    Ints with greater bitwidth are greater. Ints with same bitwidths
  ///    compared by their contents.
  /// 2.2. "And so on". Just to avoid discrepancies with comments
  /// perhaps it would be better to read the implementation itself.
  /// 3. And again about overall picture. Let's look back at how the ordered set
  /// of constants will look like:
  /// [NFCT], [FCT, "others"], [FCT, pointers], [FCT, vectors]
  ///
  /// Now look, what could be inside [FCT, "others"], for example:
  /// [FCT, "others"] =
  /// [
  ///   [double 0.1], [double 1.23],
  ///   [i32 1], [i32 2],
  ///   { double 1.0 },       ; StructTyID, NumElements = 1
  ///   { i32 1 },            ; StructTyID, NumElements = 1
  ///   { double 1, i32 1 },  ; StructTyID, NumElements = 2
  ///   { i32 1, double 1 }   ; StructTyID, NumElements = 2
  /// ]
  ///
  /// Let's explain the order. Float numbers will be less than integers, just
  /// because of cmpType terms: FloatTyID < IntegerTyID.
  /// Floats (with same fltSemantics) are sorted according to their value.
  /// Then you can see integers, and they are, like a floats,
  /// could be easy sorted among each others.
  /// The structures. Structures are grouped at the tail, again because of their
  /// TypeID: StructTyID > IntegerTyID > FloatTyID.
  /// Structures with greater number of elements are greater. Structures with
  /// greater elements going first are greater.
  /// The same logic with vectors, arrays and other possible complex types.
  ///
  /// Bitcastable constants.
  /// Let's assume, that some constant, belongs to some group of
  /// "so-called-equal" values with different types, and at the same time
  /// belongs to another group of constants with equal types
  /// and "really" equal values.
  ///
  /// Now, prove that this is impossible:
  ///
  /// If constant A with type TyA is bitcastable to B with type TyB, then:
  /// 1. All constants with equal types to TyA, are bitcastable to B. Since
  ///    those should be vectors (if TyA is vector), pointers
  ///    (if TyA is pointer), or else (if TyA equal to TyB), those types should
  ///    be equal to TyB.
  /// 2. All constants with non-equal, but bitcastable types to TyA, are
  ///    bitcastable to B.
  ///    Once again, just because we allow it to vectors and pointers only.
  ///    This statement could be expanded as below:
  /// 2.1. All vectors with equal bitwidth to vector A, has equal bitwidth to
  ///      vector B, and thus bitcastable to B as well.
  /// 2.2. All pointers of the same address space, no matter what they point to,
  ///      bitcastable. So if C is pointer, it could be bitcasted to A and to B.
  /// So any constant equal or bitcastable to A is equal or bitcastable to B.
  /// QED.
  ///
  /// In another words, for pointers and vectors, we ignore top-level type and
  /// look at their particular properties (bit-width for vectors, and
  /// address space for pointers).
  /// If these properties are equal - compare their contents.
  int cmpConstants(const Constant *L, const Constant *R) const;

  /// Compares two global values by number. Uses the GlobalNumbersState to
  /// identify the same gobals across function calls.
  int cmpGlobalValues(GlobalValue *L, GlobalValue *R) const;

  /// Assign or look up previously assigned numbers for the two values, and
  /// return whether the numbers are equal. Numbers are assigned in the order
  /// visited.
  /// Comparison order:
  /// Stage 0: Value that is function itself is always greater then others.
  ///          If left and right values are references to their functions, then
  ///          they are equal.
  /// Stage 1: Constants are greater than non-constants.
  ///          If both left and right are constants, then the result of
  ///          cmpConstants is used as cmpValues result.
  /// Stage 2: InlineAsm instances are greater than others. If both left and
  ///          right are InlineAsm instances, InlineAsm* pointers casted to
  ///          integers and compared as numbers.
  /// Stage 3: For all other cases we compare order we meet these values in
  ///          their functions. If right value was met first during scanning,
  ///          then left value is greater.
  ///          In another words, we compare serial numbers, for more details
  ///          see comments for sn_mapL and sn_mapR.
  int cmpValues(const Value *L, const Value *R) const;

  /// Compare two Instructions for equivalence, similar to
  /// Instruction::isSameOperationAs.
  ///
  /// Stages are listed in "most significant stage first" order:
  /// On each stage below, we do comparison between some left and right
  /// operation parts. If parts are non-equal, we assign parts comparison
  /// result to the operation comparison result and exit from method.
  /// Otherwise we proceed to the next stage.
  /// Stages:
  /// 1. Operations opcodes. Compared as numbers.
  /// 2. Number of operands.
  /// 3. Operation types. Compared with cmpType method.
  /// 4. Compare operation subclass optional data as stream of bytes:
  /// just convert it to integers and call cmpNumbers.
  /// 5. Compare in operation operand types with cmpType in
  /// most significant operand first order.
  /// 6. Last stage. Check operations for some specific attributes.
  /// For example, for Load it would be:
  /// 6.1.Load: volatile (as boolean flag)
  /// 6.2.Load: alignment (as integer numbers)
  /// 6.3.Load: ordering (as underlying enum class value)
  /// 6.4.Load: synch-scope (as integer numbers)
  /// 6.5.Load: range metadata (as integer ranges)
  /// On this stage its better to see the code, since its not more than 10-15
  /// strings for particular instruction, and could change sometimes.
  ///
  /// Sets \p needToCmpOperands to true if the operands of the instructions
  /// still must be compared afterwards. In this case it's already guaranteed
  /// that both instructions have the same number of operands.
  int cmpOperations(const Instruction *L, const Instruction *R,
                    bool &needToCmpOperands) const;

  /// cmpType - compares two types,
  /// defines total ordering among the types set.
  ///
  /// Return values:
  /// 0 if types are equal,
  /// -1 if Left is less than Right,
  /// +1 if Left is greater than Right.
  ///
  /// Description:
  /// Comparison is broken onto stages. Like in lexicographical comparison
  /// stage coming first has higher priority.
  /// On each explanation stage keep in mind total ordering properties.
  ///
  /// 0. Before comparison we coerce pointer types of 0 address space to
  /// integer.
  /// We also don't bother with same type at left and right, so
  /// just return 0 in this case.
  ///
  /// 1. If types are of different kind (different type IDs).
  ///    Return result of type IDs comparison, treating them as numbers.
  /// 2. If types are integers, check that they have the same width. If they
  /// are vectors, check that they have the same count and subtype.
  /// 3. Types have the same ID, so check whether they are one of:
  /// * Void
  /// * Float
  /// * Double
  /// * X86_FP80
  /// * FP128
  /// * PPC_FP128
  /// * Label
  /// * Metadata
  /// We can treat these types as equal whenever their IDs are same.
  /// 4. If Left and Right are pointers, return result of address space
  /// comparison (numbers comparison). We can treat pointer types of same
  /// address space as equal.
  /// 5. If types are complex.
  /// Then both Left and Right are to be expanded and their element types will
  /// be checked with the same way. If we get Res != 0 on some stage, return it.
  /// Otherwise return 0.
  /// 6. For all other cases put llvm_unreachable.
  int cmpTypes(Type *TyL, Type *TyR) const;

  int cmpNumbers(uint64_t L, uint64_t R) const;
  int cmpAligns(Align L, Align R) const;
  int cmpAPInts(const APInt &L, const APInt &R) const;
  int cmpAPFloats(const APFloat &L, const APFloat &R) const;
  int cmpMem(StringRef L, StringRef R) const;

  // The two functions undergoing comparison.
  const Function *FnL, *FnR;

private:
  int cmpOrderings(AtomicOrdering L, AtomicOrdering R) const;
  int cmpInlineAsm(const InlineAsm *L, const InlineAsm *R) const;
  int cmpAttrs(const AttributeList L, const AttributeList R) const;
  int cmpMDNode(const MDNode *L, const MDNode *R) const;
  int cmpMetadata(const Metadata *L, const Metadata *R) const;
  int cmpInstMetadata(Instruction const *L, Instruction const *R) const;
  int cmpOperandBundlesSchema(const CallBase &LCS, const CallBase &RCS) const;

  /// Compare two GEPs for equivalent pointer arithmetic.
  /// Parts to be compared for each comparison stage,
  /// most significant stage first:
  /// 1. Address space. As numbers.
  /// 2. Constant offset, (using GEPOperator::accumulateConstantOffset method).
  /// 3. Pointer operand type (using cmpType method).
  /// 4. Number of operands.
  /// 5. Compare operands, using cmpValues method.
  int cmpGEPs(const GEPOperator *GEPL, const GEPOperator *GEPR) const;
  int cmpGEPs(const GetElementPtrInst *GEPL,
              const GetElementPtrInst *GEPR) const {
    return cmpGEPs(cast<GEPOperator>(GEPL), cast<GEPOperator>(GEPR));
  }

  /// Assign serial numbers to values from left function, and values from
  /// right function.
  /// Explanation:
  /// Being comparing functions we need to compare values we meet at left and
  /// right sides.
  /// Its easy to sort things out for external values. It just should be
  /// the same value at left and right.
  /// But for local values (those were introduced inside function body)
  /// we have to ensure they were introduced at exactly the same place,
  /// and plays the same role.
  /// Let's assign serial number to each value when we meet it first time.
  /// Values that were met at same place will be with same serial numbers.
  /// In this case it would be good to explain few points about values assigned
  /// to BBs and other ways of implementation (see below).
  ///
  /// 1. Safety of BB reordering.
  /// It's safe to change the order of BasicBlocks in function.
  /// Relationship with other functions and serial numbering will not be
  /// changed in this case.
  /// As follows from FunctionComparator::compare(), we do CFG walk: we start
  /// from the entry, and then take each terminator. So it doesn't matter how in
  /// fact BBs are ordered in function. And since cmpValues are called during
  /// this walk, the numbering depends only on how BBs located inside the CFG.
  /// So the answer is - yes. We will get the same numbering.
  ///
  /// 2. Impossibility to use dominance properties of values.
  /// If we compare two instruction operands: first is usage of local
  /// variable AL from function FL, and second is usage of local variable AR
  /// from FR, we could compare their origins and check whether they are
  /// defined at the same place.
  /// But, we are still not able to compare operands of PHI nodes, since those
  /// could be operands from further BBs we didn't scan yet.
  /// So it's impossible to use dominance properties in general.
  mutable DenseMap<const Value*, int> sn_mapL, sn_mapR;

  // The global state we will use
  GlobalNumberState* GlobalNumbers;
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_UTILS_FUNCTIONCOMPARATOR_H
PKiwFZ��@�((Transforms/Utils/SizeOpts.hnu�[���//===- llvm/Transforms/Utils/SizeOpts.h - size optimization -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains some shared code size optimization related code.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_SIZEOPTS_H
#define LLVM_TRANSFORMS_UTILS_SIZEOPTS_H

#include "llvm/Analysis/ProfileSummaryInfo.h"
#include "llvm/Support/CommandLine.h"

namespace llvm {
extern cl::opt<bool> EnablePGSO;
extern cl::opt<bool> PGSOLargeWorkingSetSizeOnly;
extern cl::opt<bool> PGSOColdCodeOnly;
extern cl::opt<bool> PGSOColdCodeOnlyForInstrPGO;
extern cl::opt<bool> PGSOColdCodeOnlyForSamplePGO;
extern cl::opt<bool> PGSOColdCodeOnlyForPartialSamplePGO;
extern cl::opt<bool> ForcePGSO;
extern cl::opt<int> PgsoCutoffInstrProf;
extern cl::opt<int> PgsoCutoffSampleProf;

class BasicBlock;
class BlockFrequencyInfo;
class Function;

enum class PGSOQueryType {
  IRPass, // A query call from an IR-level transform pass.
  Test,   // A query call from a unit test.
  Other,  // Others.
};

static inline bool isPGSOColdCodeOnly(ProfileSummaryInfo *PSI) {
  return PGSOColdCodeOnly ||
         (PSI->hasInstrumentationProfile() && PGSOColdCodeOnlyForInstrPGO) ||
         (PSI->hasSampleProfile() &&
          ((!PSI->hasPartialSampleProfile() && PGSOColdCodeOnlyForSamplePGO) ||
           (PSI->hasPartialSampleProfile() &&
            PGSOColdCodeOnlyForPartialSamplePGO))) ||
         (PGSOLargeWorkingSetSizeOnly && !PSI->hasLargeWorkingSetSize());
}

template <typename FuncT, typename BFIT>
bool shouldFuncOptimizeForSizeImpl(const FuncT *F, ProfileSummaryInfo *PSI,
                                   BFIT *BFI, PGSOQueryType QueryType) {
  assert(F);
  if (!PSI || !BFI || !PSI->hasProfileSummary())
    return false;
  if (ForcePGSO)
    return true;
  if (!EnablePGSO)
    return false;
  if (isPGSOColdCodeOnly(PSI))
    return PSI->isFunctionColdInCallGraph(F, *BFI);
  if (PSI->hasSampleProfile())
    // The "isCold" check seems to work better for Sample PGO as it could have
    // many profile-unannotated functions.
    return PSI->isFunctionColdInCallGraphNthPercentile(PgsoCutoffSampleProf, F,
                                                       *BFI);
  return !PSI->isFunctionHotInCallGraphNthPercentile(PgsoCutoffInstrProf, F,
                                                     *BFI);
}

template <typename BlockTOrBlockFreq, typename BFIT>
bool shouldOptimizeForSizeImpl(BlockTOrBlockFreq BBOrBlockFreq,
                               ProfileSummaryInfo *PSI, BFIT *BFI,
                               PGSOQueryType QueryType) {
  if (!PSI || !BFI || !PSI->hasProfileSummary())
    return false;
  if (ForcePGSO)
    return true;
  if (!EnablePGSO)
    return false;
  if (isPGSOColdCodeOnly(PSI))
    return PSI->isColdBlock(BBOrBlockFreq, BFI);
  if (PSI->hasSampleProfile())
    // The "isCold" check seems to work better for Sample PGO as it could have
    // many profile-unannotated functions.
    return PSI->isColdBlockNthPercentile(PgsoCutoffSampleProf, BBOrBlockFreq,
                                         BFI);
  return !PSI->isHotBlockNthPercentile(PgsoCutoffInstrProf, BBOrBlockFreq, BFI);
}

/// Returns true if function \p F is suggested to be size-optimized based on the
/// profile.
bool shouldOptimizeForSize(const Function *F, ProfileSummaryInfo *PSI,
                           BlockFrequencyInfo *BFI,
                           PGSOQueryType QueryType = PGSOQueryType::Other);

/// Returns true if basic block \p BB is suggested to be size-optimized based on
/// the profile.
bool shouldOptimizeForSize(const BasicBlock *BB, ProfileSummaryInfo *PSI,
                           BlockFrequencyInfo *BFI,
                           PGSOQueryType QueryType = PGSOQueryType::Other);

} // end namespace llvm

#endif // LLVM_TRANSFORMS_UTILS_SIZEOPTS_H
PKiwFZ[ę�Transforms/Utils/Mem2Reg.hnu�[���//===- Mem2Reg.h - The -mem2reg pass, a wrapper around the Utils lib ------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This pass is a simple pass wrapper around the PromoteMemToReg function call
// exposed by the Utils library.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_MEM2REG_H
#define LLVM_TRANSFORMS_UTILS_MEM2REG_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class Function;

class PromotePass : public PassInfoMixin<PromotePass> {
public:
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_UTILS_MEM2REG_H
PKiwFZw�zz,Transforms/Utils/CanonicalizeFreezeInLoops.hnu�[���//==- CanonicalizeFreezeInLoop.h - Canonicalize freezes in a loop-*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file canonicalizes freeze instructions in a loop.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_CANONICALIZEFREEZEINLOOPS_H
#define LLVM_TRANSFORMS_UTILS_CANONICALIZEFREEZEINLOOPS_H

#include "llvm/Analysis/LoopAnalysisManager.h"
#include "llvm/IR/PassManager.h"

namespace llvm {
class Loop;
class LPMUpdater;

/// A pass that canonicalizes freeze instructions in a loop.
class CanonicalizeFreezeInLoopsPass
    : public PassInfoMixin<CanonicalizeFreezeInLoopsPass> {
public:
  PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
                        LoopStandardAnalysisResults &AR, LPMUpdater &U);
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_UTILS_CANONICALIZEFREEZEINLOOPS_H
PKiwFZ���,^,^Transforms/Utils/Local.hnu�[���//===- Local.h - Functions to perform local transformations -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This family of functions perform various local transformations to the
// program.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_LOCAL_H
#define LLVM_TRANSFORMS_UTILS_LOCAL_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/IR/Dominators.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Transforms/Utils/SimplifyCFGOptions.h"
#include <cstdint>

namespace llvm {

class DataLayout;
class Value;
class WeakTrackingVH;
class WeakVH;
template <typename T> class SmallVectorImpl;
class AAResults;
class AllocaInst;
class AssumptionCache;
class BasicBlock;
class BranchInst;
class CallBase;
class CallInst;
class DbgVariableIntrinsic;
class DIBuilder;
class DomTreeUpdater;
class Function;
class Instruction;
class InvokeInst;
class LoadInst;
class MDNode;
class MemorySSAUpdater;
class PHINode;
class StoreInst;
class TargetLibraryInfo;
class TargetTransformInfo;

//===----------------------------------------------------------------------===//
//  Local constant propagation.
//

/// If a terminator instruction is predicated on a constant value, convert it
/// into an unconditional branch to the constant destination.
/// This is a nontrivial operation because the successors of this basic block
/// must have their PHI nodes updated.
/// Also calls RecursivelyDeleteTriviallyDeadInstructions() on any branch/switch
/// conditions and indirectbr addresses this might make dead if
/// DeleteDeadConditions is true.
bool ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions = false,
                            const TargetLibraryInfo *TLI = nullptr,
                            DomTreeUpdater *DTU = nullptr);

//===----------------------------------------------------------------------===//
//  Local dead code elimination.
//

/// Return true if the result produced by the instruction is not used, and the
/// instruction will return. Certain side-effecting instructions are also
/// considered dead if there are no uses of the instruction.
bool isInstructionTriviallyDead(Instruction *I,
                                const TargetLibraryInfo *TLI = nullptr);

/// Return true if the result produced by the instruction would have no side
/// effects if it was not used. This is equivalent to checking whether
/// isInstructionTriviallyDead would be true if the use count was 0.
bool wouldInstructionBeTriviallyDead(Instruction *I,
                                     const TargetLibraryInfo *TLI = nullptr);

/// Return true if the result produced by the instruction has no side effects on
/// any paths other than where it is used. This is less conservative than
/// wouldInstructionBeTriviallyDead which is based on the assumption
/// that the use count will be 0. An example usage of this API is for
/// identifying instructions that can be sunk down to use(s).
bool wouldInstructionBeTriviallyDeadOnUnusedPaths(
    Instruction *I, const TargetLibraryInfo *TLI = nullptr);

/// If the specified value is a trivially dead instruction, delete it.
/// If that makes any of its operands trivially dead, delete them too,
/// recursively. Return true if any instructions were deleted.
bool RecursivelyDeleteTriviallyDeadInstructions(
    Value *V, const TargetLibraryInfo *TLI = nullptr,
    MemorySSAUpdater *MSSAU = nullptr,
    std::function<void(Value *)> AboutToDeleteCallback =
        std::function<void(Value *)>());

/// Delete all of the instructions in `DeadInsts`, and all other instructions
/// that deleting these in turn causes to be trivially dead.
///
/// The initial instructions in the provided vector must all have empty use
/// lists and satisfy `isInstructionTriviallyDead`.
///
/// `DeadInsts` will be used as scratch storage for this routine and will be
/// empty afterward.
void RecursivelyDeleteTriviallyDeadInstructions(
    SmallVectorImpl<WeakTrackingVH> &DeadInsts,
    const TargetLibraryInfo *TLI = nullptr, MemorySSAUpdater *MSSAU = nullptr,
    std::function<void(Value *)> AboutToDeleteCallback =
        std::function<void(Value *)>());

/// Same functionality as RecursivelyDeleteTriviallyDeadInstructions, but allow
/// instructions that are not trivially dead. These will be ignored.
/// Returns true if any changes were made, i.e. any instructions trivially dead
/// were found and deleted.
bool RecursivelyDeleteTriviallyDeadInstructionsPermissive(
    SmallVectorImpl<WeakTrackingVH> &DeadInsts,
    const TargetLibraryInfo *TLI = nullptr, MemorySSAUpdater *MSSAU = nullptr,
    std::function<void(Value *)> AboutToDeleteCallback =
        std::function<void(Value *)>());

/// If the specified value is an effectively dead PHI node, due to being a
/// def-use chain of single-use nodes that either forms a cycle or is terminated
/// by a trivially dead instruction, delete it. If that makes any of its
/// operands trivially dead, delete them too, recursively. Return true if a
/// change was made.
bool RecursivelyDeleteDeadPHINode(PHINode *PN,
                                  const TargetLibraryInfo *TLI = nullptr,
                                  MemorySSAUpdater *MSSAU = nullptr);

/// Scan the specified basic block and try to simplify any instructions in it
/// and recursively delete dead instructions.
///
/// This returns true if it changed the code, note that it can delete
/// instructions in other blocks as well in this block.
bool SimplifyInstructionsInBlock(BasicBlock *BB,
                                 const TargetLibraryInfo *TLI = nullptr);

/// Replace all the uses of an SSA value in @llvm.dbg intrinsics with
/// undef. This is useful for signaling that a variable, e.g. has been
/// found dead and hence it's unavailable at a given program point.
/// Returns true if the dbg values have been changed.
bool replaceDbgUsesWithUndef(Instruction *I);

//===----------------------------------------------------------------------===//
//  Control Flow Graph Restructuring.
//

/// BB is a block with one predecessor and its predecessor is known to have one
/// successor (BB!). Eliminate the edge between them, moving the instructions in
/// the predecessor into BB. This deletes the predecessor block.
void MergeBasicBlockIntoOnlyPred(BasicBlock *BB, DomTreeUpdater *DTU = nullptr);

/// BB is known to contain an unconditional branch, and contains no instructions
/// other than PHI nodes, potential debug intrinsics and the branch. If
/// possible, eliminate BB by rewriting all the predecessors to branch to the
/// successor block and return true. If we can't transform, return false.
bool TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB,
                                             DomTreeUpdater *DTU = nullptr);

/// Check for and eliminate duplicate PHI nodes in this block. This doesn't try
/// to be clever about PHI nodes which differ only in the order of the incoming
/// values, but instcombine orders them so it usually won't matter.
///
/// This overload removes the duplicate PHI nodes directly.
bool EliminateDuplicatePHINodes(BasicBlock *BB);

/// Check for and eliminate duplicate PHI nodes in this block. This doesn't try
/// to be clever about PHI nodes which differ only in the order of the incoming
/// values, but instcombine orders them so it usually won't matter.
///
/// This overload collects the PHI nodes to be removed into the ToRemove set.
bool EliminateDuplicatePHINodes(BasicBlock *BB,
                                SmallPtrSetImpl<PHINode *> &ToRemove);

/// This function is used to do simplification of a CFG.  For example, it
/// adjusts branches to branches to eliminate the extra hop, it eliminates
/// unreachable basic blocks, and does other peephole optimization of the CFG.
/// It returns true if a modification was made, possibly deleting the basic
/// block that was pointed to. LoopHeaders is an optional input parameter
/// providing the set of loop headers that SimplifyCFG should not eliminate.
extern cl::opt<bool> RequireAndPreserveDomTree;
bool simplifyCFG(BasicBlock *BB, const TargetTransformInfo &TTI,
                 DomTreeUpdater *DTU = nullptr,
                 const SimplifyCFGOptions &Options = {},
                 ArrayRef<WeakVH> LoopHeaders = {});

/// This function is used to flatten a CFG. For example, it uses parallel-and
/// and parallel-or mode to collapse if-conditions and merge if-regions with
/// identical statements.
bool FlattenCFG(BasicBlock *BB, AAResults *AA = nullptr);

/// If this basic block is ONLY a setcc and a branch, and if a predecessor
/// branches to us and one of our successors, fold the setcc into the
/// predecessor and use logical operations to pick the right destination.
bool FoldBranchToCommonDest(BranchInst *BI, llvm::DomTreeUpdater *DTU = nullptr,
                            MemorySSAUpdater *MSSAU = nullptr,
                            const TargetTransformInfo *TTI = nullptr,
                            unsigned BonusInstThreshold = 1);

/// This function takes a virtual register computed by an Instruction and
/// replaces it with a slot in the stack frame, allocated via alloca.
/// This allows the CFG to be changed around without fear of invalidating the
/// SSA information for the value. It returns the pointer to the alloca inserted
/// to create a stack slot for X.
AllocaInst *DemoteRegToStack(Instruction &X,
                             bool VolatileLoads = false,
                             Instruction *AllocaPoint = nullptr);

/// This function takes a virtual register computed by a phi node and replaces
/// it with a slot in the stack frame, allocated via alloca. The phi node is
/// deleted and it returns the pointer to the alloca inserted.
AllocaInst *DemotePHIToStack(PHINode *P, Instruction *AllocaPoint = nullptr);

/// Try to ensure that the alignment of \p V is at least \p PrefAlign bytes. If
/// the owning object can be modified and has an alignment less than \p
/// PrefAlign, it will be increased and \p PrefAlign returned. If the alignment
/// cannot be increased, the known alignment of the value is returned.
///
/// It is not always possible to modify the alignment of the underlying object,
/// so if alignment is important, a more reliable approach is to simply align
/// all global variables and allocation instructions to their preferred
/// alignment from the beginning.
Align getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign,
                                 const DataLayout &DL,
                                 const Instruction *CxtI = nullptr,
                                 AssumptionCache *AC = nullptr,
                                 const DominatorTree *DT = nullptr);

/// Try to infer an alignment for the specified pointer.
inline Align getKnownAlignment(Value *V, const DataLayout &DL,
                               const Instruction *CxtI = nullptr,
                               AssumptionCache *AC = nullptr,
                               const DominatorTree *DT = nullptr) {
  return getOrEnforceKnownAlignment(V, MaybeAlign(), DL, CxtI, AC, DT);
}

/// Create a call that matches the invoke \p II in terms of arguments,
/// attributes, debug information, etc. The call is not placed in a block and it
/// will not have a name. The invoke instruction is not removed, nor are the
/// uses replaced by the new call.
CallInst *createCallMatchingInvoke(InvokeInst *II);

/// This function converts the specified invoke into a normal call.
CallInst *changeToCall(InvokeInst *II, DomTreeUpdater *DTU = nullptr);

///===---------------------------------------------------------------------===//
///  Dbg Intrinsic utilities
///

/// Inserts a llvm.dbg.value intrinsic before a store to an alloca'd value
/// that has an associated llvm.dbg.declare intrinsic.
void ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII,
                                     StoreInst *SI, DIBuilder &Builder);

/// Inserts a llvm.dbg.value intrinsic before a load of an alloca'd value
/// that has an associated llvm.dbg.declare intrinsic.
void ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII,
                                     LoadInst *LI, DIBuilder &Builder);

/// Inserts a llvm.dbg.value intrinsic after a phi that has an associated
/// llvm.dbg.declare intrinsic.
void ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII,
                                     PHINode *LI, DIBuilder &Builder);

/// Lowers llvm.dbg.declare intrinsics into appropriate set of
/// llvm.dbg.value intrinsics.
bool LowerDbgDeclare(Function &F);

/// Propagate dbg.value intrinsics through the newly inserted PHIs.
void insertDebugValuesForPHIs(BasicBlock *BB,
                              SmallVectorImpl<PHINode *> &InsertedPHIs);

/// Replaces llvm.dbg.declare instruction when the address it
/// describes is replaced with a new value. If Deref is true, an
/// additional DW_OP_deref is prepended to the expression. If Offset
/// is non-zero, a constant displacement is added to the expression
/// (between the optional Deref operations). Offset can be negative.
bool replaceDbgDeclare(Value *Address, Value *NewAddress, DIBuilder &Builder,
                       uint8_t DIExprFlags, int Offset);

/// Replaces multiple llvm.dbg.value instructions when the alloca it describes
/// is replaced with a new value. If Offset is non-zero, a constant displacement
/// is added to the expression (after the mandatory Deref). Offset can be
/// negative. New llvm.dbg.value instructions are inserted at the locations of
/// the instructions they replace.
void replaceDbgValueForAlloca(AllocaInst *AI, Value *NewAllocaAddress,
                              DIBuilder &Builder, int Offset = 0);

/// Assuming the instruction \p I is going to be deleted, attempt to salvage
/// debug users of \p I by writing the effect of \p I in a DIExpression. If it
/// cannot be salvaged changes its debug uses to undef.
void salvageDebugInfo(Instruction &I);


/// Implementation of salvageDebugInfo, applying only to instructions in
/// \p Insns, rather than all debug users from findDbgUsers( \p I).
/// Mark undef if salvaging cannot be completed.
void salvageDebugInfoForDbgValues(Instruction &I,
                                  ArrayRef<DbgVariableIntrinsic *> Insns);

/// Given an instruction \p I and DIExpression \p DIExpr operating on
/// it, append the effects of \p I to the DIExpression operand list
/// \p Ops, or return \p nullptr if it cannot be salvaged.
/// \p CurrentLocOps is the number of SSA values referenced by the
/// incoming \p Ops.  \return the first non-constant operand
/// implicitly referred to by Ops. If \p I references more than one
/// non-constant operand, any additional operands are added to
/// \p AdditionalValues.
///
/// \example
////
///   I = add %a, i32 1
///
///   Return = %a
///   Ops = llvm::dwarf::DW_OP_lit1 llvm::dwarf::DW_OP_add
///
///   I = add %a, %b
///
///   Return = %a
///   Ops = llvm::dwarf::DW_OP_LLVM_arg0 llvm::dwarf::DW_OP_add
///   AdditionalValues = %b
Value *salvageDebugInfoImpl(Instruction &I, uint64_t CurrentLocOps,
                            SmallVectorImpl<uint64_t> &Ops,
                            SmallVectorImpl<Value *> &AdditionalValues);

/// Point debug users of \p From to \p To or salvage them. Use this function
/// only when replacing all uses of \p From with \p To, with a guarantee that
/// \p From is going to be deleted.
///
/// Follow these rules to prevent use-before-def of \p To:
///   . If \p To is a linked Instruction, set \p DomPoint to \p To.
///   . If \p To is an unlinked Instruction, set \p DomPoint to the Instruction
///     \p To will be inserted after.
///   . If \p To is not an Instruction (e.g a Constant), the choice of
///     \p DomPoint is arbitrary. Pick \p From for simplicity.
///
/// If a debug user cannot be preserved without reordering variable updates or
/// introducing a use-before-def, it is either salvaged (\ref salvageDebugInfo)
/// or deleted. Returns true if any debug users were updated.
bool replaceAllDbgUsesWith(Instruction &From, Value &To, Instruction &DomPoint,
                           DominatorTree &DT);

/// Remove all instructions from a basic block other than its terminator
/// and any present EH pad instructions. Returns a pair where the first element
/// is the number of instructions (excluding debug info intrinsics) that have
/// been removed, and the second element is the number of debug info intrinsics
/// that have been removed.
std::pair<unsigned, unsigned>
removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB);

/// Insert an unreachable instruction before the specified
/// instruction, making it and the rest of the code in the block dead.
unsigned changeToUnreachable(Instruction *I, bool PreserveLCSSA = false,
                             DomTreeUpdater *DTU = nullptr,
                             MemorySSAUpdater *MSSAU = nullptr);

/// Convert the CallInst to InvokeInst with the specified unwind edge basic
/// block.  This also splits the basic block where CI is located, because
/// InvokeInst is a terminator instruction.  Returns the newly split basic
/// block.
BasicBlock *changeToInvokeAndSplitBasicBlock(CallInst *CI,
                                             BasicBlock *UnwindEdge,
                                             DomTreeUpdater *DTU = nullptr);

/// Replace 'BB's terminator with one that does not have an unwind successor
/// block. Rewrites `invoke` to `call`, etc. Updates any PHIs in unwind
/// successor. Returns the instruction that replaced the original terminator,
/// which might be a call in case the original terminator was an invoke.
///
/// \param BB  Block whose terminator will be replaced.  Its terminator must
///            have an unwind successor.
Instruction *removeUnwindEdge(BasicBlock *BB, DomTreeUpdater *DTU = nullptr);

/// Remove all blocks that can not be reached from the function's entry.
///
/// Returns true if any basic block was removed.
bool removeUnreachableBlocks(Function &F, DomTreeUpdater *DTU = nullptr,
                             MemorySSAUpdater *MSSAU = nullptr);

/// Combine the metadata of two instructions so that K can replace J. Some
/// metadata kinds can only be kept if K does not move, meaning it dominated
/// J in the original IR.
///
/// Metadata not listed as known via KnownIDs is removed
void combineMetadata(Instruction *K, const Instruction *J,
                     ArrayRef<unsigned> KnownIDs, bool DoesKMove);

/// Combine the metadata of two instructions so that K can replace J. This
/// specifically handles the case of CSE-like transformations. Some
/// metadata can only be kept if K dominates J. For this to be correct,
/// K cannot be hoisted.
///
/// Unknown metadata is removed.
void combineMetadataForCSE(Instruction *K, const Instruction *J,
                           bool DoesKMove);

/// Copy the metadata from the source instruction to the destination (the
/// replacement for the source instruction).
void copyMetadataForLoad(LoadInst &Dest, const LoadInst &Source);

/// Patch the replacement so that it is not more restrictive than the value
/// being replaced. It assumes that the replacement does not get moved from
/// its original position.
void patchReplacementInstruction(Instruction *I, Value *Repl);

// Replace each use of 'From' with 'To', if that use does not belong to basic
// block where 'From' is defined. Returns the number of replacements made.
unsigned replaceNonLocalUsesWith(Instruction *From, Value *To);

/// Replace each use of 'From' with 'To' if that use is dominated by
/// the given edge.  Returns the number of replacements made.
unsigned replaceDominatedUsesWith(Value *From, Value *To, DominatorTree &DT,
                                  const BasicBlockEdge &Edge);
/// Replace each use of 'From' with 'To' if that use is dominated by
/// the end of the given BasicBlock. Returns the number of replacements made.
unsigned replaceDominatedUsesWith(Value *From, Value *To, DominatorTree &DT,
                                  const BasicBlock *BB);

/// Return true if this call calls a gc leaf function.
///
/// A leaf function is a function that does not safepoint the thread during its
/// execution.  During a call or invoke to such a function, the callers stack
/// does not have to be made parseable.
///
/// Most passes can and should ignore this information, and it is only used
/// during lowering by the GC infrastructure.
bool callsGCLeafFunction(const CallBase *Call, const TargetLibraryInfo &TLI);

/// Copy a nonnull metadata node to a new load instruction.
///
/// This handles mapping it to range metadata if the new load is an integer
/// load instead of a pointer load.
void copyNonnullMetadata(const LoadInst &OldLI, MDNode *N, LoadInst &NewLI);

/// Copy a range metadata node to a new load instruction.
///
/// This handles mapping it to nonnull metadata if the new load is a pointer
/// load instead of an integer load and the range doesn't cover null.
void copyRangeMetadata(const DataLayout &DL, const LoadInst &OldLI, MDNode *N,
                       LoadInst &NewLI);

/// Remove the debug intrinsic instructions for the given instruction.
void dropDebugUsers(Instruction &I);

/// Hoist all of the instructions in the \p IfBlock to the dominant block
/// \p DomBlock, by moving its instructions to the insertion point \p InsertPt.
///
/// The moved instructions receive the insertion point debug location values
/// (DILocations) and their debug intrinsic instructions are removed.
void hoistAllInstructionsInto(BasicBlock *DomBlock, Instruction *InsertPt,
                              BasicBlock *BB);

//===----------------------------------------------------------------------===//
//  Intrinsic pattern matching
//

/// Try to match a bswap or bitreverse idiom.
///
/// If an idiom is matched, an intrinsic call is inserted before \c I. Any added
/// instructions are returned in \c InsertedInsts. They will all have been added
/// to a basic block.
///
/// A bitreverse idiom normally requires around 2*BW nodes to be searched (where
/// BW is the bitwidth of the integer type). A bswap idiom requires anywhere up
/// to BW / 4 nodes to be searched, so is significantly faster.
///
/// This function returns true on a successful match or false otherwise.
bool recognizeBSwapOrBitReverseIdiom(
    Instruction *I, bool MatchBSwaps, bool MatchBitReversals,
    SmallVectorImpl<Instruction *> &InsertedInsts);

//===----------------------------------------------------------------------===//
//  Sanitizer utilities
//

/// Given a CallInst, check if it calls a string function known to CodeGen,
/// and mark it with NoBuiltin if so.  To be used by sanitizers that intend
/// to intercept string functions and want to avoid converting them to target
/// specific instructions.
void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI,
                                            const TargetLibraryInfo *TLI);

//===----------------------------------------------------------------------===//
//  Transform predicates
//

/// Given an instruction, is it legal to set operand OpIdx to a non-constant
/// value?
bool canReplaceOperandWithVariable(const Instruction *I, unsigned OpIdx);

//===----------------------------------------------------------------------===//
//  Value helper functions
//

/// Invert the given true/false value, possibly reusing an existing copy.
Value *invertCondition(Value *Condition);


//===----------------------------------------------------------------------===//
//  Assorted
//

/// If we can infer one attribute from another on the declaration of a
/// function, explicitly materialize the maximal set in the IR.
bool inferAttributesFromOthers(Function &F);

} // end namespace llvm

#endif // LLVM_TRANSFORMS_UTILS_LOCAL_H
PKiwFZT��//Transforms/Utils/SSAUpdater.hnu�[���//===- SSAUpdater.h - Unstructured SSA Update Tool --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the SSAUpdater class.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_SSAUPDATER_H
#define LLVM_TRANSFORMS_UTILS_SSAUPDATER_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
#include <string>

namespace llvm {

class BasicBlock;
class Instruction;
class LoadInst;
class PHINode;
template <typename T> class SmallVectorImpl;
template <typename T> class SSAUpdaterTraits;
class Type;
class Use;
class Value;
class DbgValueInst;

/// Helper class for SSA formation on a set of values defined in
/// multiple blocks.
///
/// This is used when code duplication or another unstructured
/// transformation wants to rewrite a set of uses of one value with uses of a
/// set of values.
class SSAUpdater {
  friend class SSAUpdaterTraits<SSAUpdater>;

private:
  /// This keeps track of which value to use on a per-block basis. When we
  /// insert PHI nodes, we keep track of them here.
  void *AV = nullptr;

  /// ProtoType holds the type of the values being rewritten.
  Type *ProtoType = nullptr;

  /// PHI nodes are given a name based on ProtoName.
  std::string ProtoName;

  /// If this is non-null, the SSAUpdater adds all PHI nodes that it creates to
  /// the vector.
  SmallVectorImpl<PHINode *> *InsertedPHIs;

public:
  /// If InsertedPHIs is specified, it will be filled
  /// in with all PHI Nodes created by rewriting.
  explicit SSAUpdater(SmallVectorImpl<PHINode *> *InsertedPHIs = nullptr);
  SSAUpdater(const SSAUpdater &) = delete;
  SSAUpdater &operator=(const SSAUpdater &) = delete;
  ~SSAUpdater();

  /// Reset this object to get ready for a new set of SSA updates with
  /// type 'Ty'.
  ///
  /// PHI nodes get a name based on 'Name'.
  void Initialize(Type *Ty, StringRef Name);

  /// Indicate that a rewritten value is available in the specified block
  /// with the specified value.
  void AddAvailableValue(BasicBlock *BB, Value *V);

  /// Return true if the SSAUpdater already has a value for the specified
  /// block.
  bool HasValueForBlock(BasicBlock *BB) const;

  /// Return the value for the specified block if the SSAUpdater has one,
  /// otherwise return nullptr.
  Value *FindValueForBlock(BasicBlock *BB) const;

  /// Construct SSA form, materializing a value that is live at the end
  /// of the specified block.
  Value *GetValueAtEndOfBlock(BasicBlock *BB);

  /// Construct SSA form, materializing a value that is live in the
  /// middle of the specified block.
  ///
  /// \c GetValueInMiddleOfBlock is the same as \c GetValueAtEndOfBlock except
  /// in one important case: if there is a definition of the rewritten value
  /// after the 'use' in BB.  Consider code like this:
  ///
  /// \code
  ///      X1 = ...
  ///   SomeBB:
  ///      use(X)
  ///      X2 = ...
  ///      br Cond, SomeBB, OutBB
  /// \endcode
  ///
  /// In this case, there are two values (X1 and X2) added to the AvailableVals
  /// set by the client of the rewriter, and those values are both live out of
  /// their respective blocks.  However, the use of X happens in the *middle* of
  /// a block.  Because of this, we need to insert a new PHI node in SomeBB to
  /// merge the appropriate values, and this value isn't live out of the block.
  Value *GetValueInMiddleOfBlock(BasicBlock *BB);

  /// Rewrite a use of the symbolic value.
  ///
  /// This handles PHI nodes, which use their value in the corresponding
  /// predecessor. Note that this will not work if the use is supposed to be
  /// rewritten to a value defined in the same block as the use, but above it.
  /// Any 'AddAvailableValue's added for the use's block will be considered to
  /// be below it.
  void RewriteUse(Use &U);

  /// Rewrite debug value intrinsics to conform to a new SSA form.
  ///
  /// This will scout out all the debug value instrinsics associated with
  /// the instruction. Anything outside of its block will have its
  /// value set to the new SSA value if available, and undef if not.
  void UpdateDebugValues(Instruction *I);
  void UpdateDebugValues(Instruction *I,
                         SmallVectorImpl<DbgValueInst *> &DbgValues);

  /// Rewrite a use like \c RewriteUse but handling in-block definitions.
  ///
  /// This version of the method can rewrite uses in the same block as
  /// a definition, because it assumes that all uses of a value are below any
  /// inserted values.
  void RewriteUseAfterInsertions(Use &U);

private:
  Value *GetValueAtEndOfBlockInternal(BasicBlock *BB);
  void UpdateDebugValue(Instruction *I, DbgValueInst *DbgValue);
};

/// Helper class for promoting a collection of loads and stores into SSA
/// Form using the SSAUpdater.
///
/// This handles complexities that SSAUpdater doesn't, such as multiple loads
/// and stores in one block.
///
/// Clients of this class are expected to subclass this and implement the
/// virtual methods.
class LoadAndStorePromoter {
protected:
  SSAUpdater &SSA;

public:
  LoadAndStorePromoter(ArrayRef<const Instruction *> Insts,
                       SSAUpdater &S, StringRef Name = StringRef());
  virtual ~LoadAndStorePromoter() = default;

  /// This does the promotion.
  ///
  /// Insts is a list of loads and stores to promote, and Name is the basename
  /// for the PHIs to insert. After this is complete, the loads and stores are
  /// removed from the code.
  void run(const SmallVectorImpl<Instruction *> &Insts);

  /// Return true if the specified instruction is in the Inst list.
  ///
  /// The Insts list is the one passed into the constructor. Clients should
  /// implement this with a more efficient version if possible.
  virtual bool isInstInList(Instruction *I,
                            const SmallVectorImpl<Instruction *> &Insts) const;

  /// This hook is invoked after all the stores are found and inserted as
  /// available values.
  virtual void doExtraRewritesBeforeFinalDeletion() {}

  /// Clients can choose to implement this to get notified right before
  /// a load is RAUW'd another value.
  virtual void replaceLoadWithValue(LoadInst *LI, Value *V) const {}

  /// Called before each instruction is deleted.
  virtual void instructionDeleted(Instruction *I) const {}

  /// Called to update debug info associated with the instruction.
  virtual void updateDebugInfo(Instruction *I) const {}

  /// Return false if a sub-class wants to keep one of the loads/stores
  /// after the SSA construction.
  virtual bool shouldDelete(Instruction *I) const { return true; }
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_UTILS_SSAUPDATER_H
PKiwFZZ��Transforms/Utils/MoveAutoInit.hnu�[���//===- MoveAutoInit.h - Move insts marked as auto-init Pass --*- C++ -*-======//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This pass moves instructions marked as auto-init closer to their use if
// profitable, generally because it moves them under a guard, potentially
// skipping the overhead of the auto-init under some execution paths.
//
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_MOVEAUTOINIT_H
#define LLVM_TRANSFORMS_UTILS_MOVEAUTOINIT_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class MoveAutoInitPass : public PassInfoMixin<MoveAutoInitPass> {
public:
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
} // end namespace llvm

#endif // LLVM_TRANSFORMS_UTILS_MOVEAUTOINIT_H
PKiwFZF�p;&&Transforms/Utils/LoopPeel.hnu�[���//===- llvm/Transforms/Utils/LoopPeel.h ----- Peeling utilities -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines some loop peeling utilities. It does not define any
// actual pass or policy.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_LOOPPEEL_H
#define LLVM_TRANSFORMS_UTILS_LOOPPEEL_H

#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Transforms/Utils/ValueMapper.h"

namespace llvm {

bool canPeel(const Loop *L);

/// VMap is the value-map that maps instructions from the original loop to
/// instructions in the last peeled-off iteration.
bool peelLoop(Loop *L, unsigned PeelCount, LoopInfo *LI, ScalarEvolution *SE,
              DominatorTree &DT, AssumptionCache *AC, bool PreserveLCSSA,
              ValueToValueMapTy &VMap);

TargetTransformInfo::PeelingPreferences
gatherPeelingPreferences(Loop *L, ScalarEvolution &SE,
                         const TargetTransformInfo &TTI,
                         std::optional<bool> UserAllowPeeling,
                         std::optional<bool> UserAllowProfileBasedPeeling,
                         bool UnrollingSpecficValues = false);

void computePeelCount(Loop *L, unsigned LoopSize,
                      TargetTransformInfo::PeelingPreferences &PP,
                      unsigned TripCount, DominatorTree &DT,
                      ScalarEvolution &SE, AssumptionCache *AC = nullptr,
                      unsigned Threshold = UINT_MAX);

} // end namespace llvm

#endif // LLVM_TRANSFORMS_UTILS_LOOPPEEL_H
PKiwFZ�N�[��"Transforms/Utils/IntegerDivision.hnu�[���//===- llvm/Transforms/Utils/IntegerDivision.h ------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains an implementation of 32bit and 64bit scalar integer
// division for targets that don't have native support. It's largely derived
// from compiler-rt's implementations of __udivsi3 and __udivmoddi4,
// but hand-tuned for targets that prefer less control flow.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_INTEGERDIVISION_H
#define LLVM_TRANSFORMS_UTILS_INTEGERDIVISION_H

namespace llvm {
  class BinaryOperator;
}

namespace llvm {

  /// Generate code to calculate the remainder of two integers, replacing Rem
  /// with the generated code. This currently generates code using the udiv
  /// expansion, but future work includes generating more specialized code,
  /// e.g. when more information about the operands are known. Implements both
  /// 32bit and 64bit scalar division.
  ///
  /// Replace Rem with generated code.
  bool expandRemainder(BinaryOperator *Rem);

  /// Generate code to divide two integers, replacing Div with the generated
  /// code. This currently generates code similarly to compiler-rt's
  /// implementations, but future work includes generating more specialized code
  /// when more information about the operands are known. Implements both
  /// 32bit and 64bit scalar division.
  ///
  /// Replace Div with generated code.
  bool expandDivision(BinaryOperator* Div);

  /// Generate code to calculate the remainder of two integers, replacing Rem
  /// with the generated code. Uses ExpandReminder with a 32bit Rem which
  /// makes it useful for targets with little or no support for less than
  /// 32 bit arithmetic.
  ///
  /// Replace Rem with generated code.
  bool expandRemainderUpTo32Bits(BinaryOperator *Rem);

  /// Generate code to calculate the remainder of two integers, replacing Rem
  /// with the generated code. Uses ExpandReminder with a 64bit Rem.
  ///
  /// Replace Rem with generated code.
  bool expandRemainderUpTo64Bits(BinaryOperator *Rem);

  /// Generate code to divide two integers, replacing Div with the generated
  /// code. Uses ExpandDivision with a 32bit Div which makes it useful for
  /// targets with little or no support for less than 32 bit arithmetic.
  ///
  /// Replace Rem with generated code.
  bool expandDivisionUpTo32Bits(BinaryOperator *Div);

  /// Generate code to divide two integers, replacing Div with the generated
  /// code. Uses ExpandDivision with a 64bit Div.
  ///
  /// Replace Rem with generated code.
  bool expandDivisionUpTo64Bits(BinaryOperator *Div);

} // End llvm namespace

#endif
PKiwFZ!%u���Transforms/Utils/MetaRenamer.hnu�[���//===- MetaRenamer.h - Rename everything with metasyntatic names ----------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This pass renames everything with metasyntatic names. The intent is to use
// this pass after bugpoint reduction to conceal the nature of the original
// program.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_METARENAMER_H
#define LLVM_TRANSFORMS_UTILS_METARENAMER_H

#include "llvm/IR/PassManager.h"

namespace llvm {
struct MetaRenamerPass : PassInfoMixin<MetaRenamerPass> {
  PreservedAnalyses run(Module &, ModuleAnalysisManager &);
};
} // namespace llvm

#endif // LLVM_TRANSFORMS_UTILS_METARENAMER_H
PKiwFZ(��d��#Transforms/Utils/EscapeEnumerator.hnu�[���//===-- EscapeEnumerator.h --------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Defines a helper class that enumerates all possible exits from a function,
// including exception handling.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_ESCAPEENUMERATOR_H
#define LLVM_TRANSFORMS_UTILS_ESCAPEENUMERATOR_H

#include "llvm/IR/Function.h"
#include "llvm/IR/IRBuilder.h"

namespace llvm {

class DomTreeUpdater;

/// EscapeEnumerator - This is a little algorithm to find all escape points
/// from a function so that "finally"-style code can be inserted. In addition
/// to finding the existing return and unwind instructions, it also (if
/// necessary) transforms any call instructions into invokes and sends them to
/// a landing pad.
class EscapeEnumerator {
  Function &F;
  const char *CleanupBBName;

  Function::iterator StateBB, StateE;
  IRBuilder<> Builder;
  bool Done = false;
  bool HandleExceptions;

  DomTreeUpdater *DTU;

public:
  EscapeEnumerator(Function &F, const char *N = "cleanup",
                   bool HandleExceptions = true, DomTreeUpdater *DTU = nullptr)
      : F(F), CleanupBBName(N), StateBB(F.begin()), StateE(F.end()),
        Builder(F.getContext()), HandleExceptions(HandleExceptions), DTU(DTU) {}

  IRBuilder<> *Next();
};

}

#endif // LLVM_TRANSFORMS_UTILS_ESCAPEENUMERATOR_H
PKiwFZ�|�^�T�T*Transforms/Utils/ScalarEvolutionExpander.hnu�[���//===---- llvm/Analysis/ScalarEvolutionExpander.h - SCEV Exprs --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the classes used to generate code from scalar expressions.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_SCALAREVOLUTIONEXPANDER_H
#define LLVM_TRANSFORMS_UTILS_SCALAREVOLUTIONEXPANDER_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Analysis/InstSimplifyFolder.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/Analysis/ScalarEvolutionNormalization.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/InstructionCost.h"

namespace llvm {
extern cl::opt<unsigned> SCEVCheapExpansionBudget;

/// struct for holding enough information to help calculate the cost of the
/// given SCEV when expanded into IR.
struct SCEVOperand {
  explicit SCEVOperand(unsigned Opc, int Idx, const SCEV *S) :
    ParentOpcode(Opc), OperandIdx(Idx), S(S) { }
  /// LLVM instruction opcode that uses the operand.
  unsigned ParentOpcode;
  /// The use index of an expanded instruction.
  int OperandIdx;
  /// The SCEV operand to be costed.
  const SCEV* S;
};

/// This class uses information about analyze scalars to rewrite expressions
/// in canonical form.
///
/// Clients should create an instance of this class when rewriting is needed,
/// and destroy it when finished to allow the release of the associated
/// memory.
class SCEVExpander : public SCEVVisitor<SCEVExpander, Value *> {
  ScalarEvolution &SE;
  const DataLayout &DL;

  // New instructions receive a name to identify them with the current pass.
  const char *IVName;

  /// Indicates whether LCSSA phis should be created for inserted values.
  bool PreserveLCSSA;

  // InsertedExpressions caches Values for reuse, so must track RAUW.
  DenseMap<std::pair<const SCEV *, Instruction *>, TrackingVH<Value>>
      InsertedExpressions;

  // InsertedValues only flags inserted instructions so needs no RAUW.
  DenseSet<AssertingVH<Value>> InsertedValues;
  DenseSet<AssertingVH<Value>> InsertedPostIncValues;

  /// Keep track of the existing IR values re-used during expansion.
  /// FIXME: Ideally re-used instructions would not be added to
  /// InsertedValues/InsertedPostIncValues.
  SmallPtrSet<Value *, 16> ReusedValues;

  // The induction variables generated.
  SmallVector<WeakVH, 2> InsertedIVs;

  /// A memoization of the "relevant" loop for a given SCEV.
  DenseMap<const SCEV *, const Loop *> RelevantLoops;

  /// Addrecs referring to any of the given loops are expanded in post-inc
  /// mode. For example, expanding {1,+,1}<L> in post-inc mode returns the add
  /// instruction that adds one to the phi for {0,+,1}<L>, as opposed to a new
  /// phi starting at 1. This is only supported in non-canonical mode.
  PostIncLoopSet PostIncLoops;

  /// When this is non-null, addrecs expanded in the loop it indicates should
  /// be inserted with increments at IVIncInsertPos.
  const Loop *IVIncInsertLoop;

  /// When expanding addrecs in the IVIncInsertLoop loop, insert the IV
  /// increment at this position.
  Instruction *IVIncInsertPos;

  /// Phis that complete an IV chain. Reuse
  DenseSet<AssertingVH<PHINode>> ChainedPhis;

  /// When true, SCEVExpander tries to expand expressions in "canonical" form.
  /// When false, expressions are expanded in a more literal form.
  ///
  /// In "canonical" form addrecs are expanded as arithmetic based on a
  /// canonical induction variable. Note that CanonicalMode doesn't guarantee
  /// that all expressions are expanded in "canonical" form. For some
  /// expressions literal mode can be preferred.
  bool CanonicalMode;

  /// When invoked from LSR, the expander is in "strength reduction" mode. The
  /// only difference is that phi's are only reused if they are already in
  /// "expanded" form.
  bool LSRMode;

  typedef IRBuilder<InstSimplifyFolder, IRBuilderCallbackInserter> BuilderType;
  BuilderType Builder;

  // RAII object that stores the current insertion point and restores it when
  // the object is destroyed. This includes the debug location.  Duplicated
  // from InsertPointGuard to add SetInsertPoint() which is used to updated
  // InsertPointGuards stack when insert points are moved during SCEV
  // expansion.
  class SCEVInsertPointGuard {
    IRBuilderBase &Builder;
    AssertingVH<BasicBlock> Block;
    BasicBlock::iterator Point;
    DebugLoc DbgLoc;
    SCEVExpander *SE;

    SCEVInsertPointGuard(const SCEVInsertPointGuard &) = delete;
    SCEVInsertPointGuard &operator=(const SCEVInsertPointGuard &) = delete;

  public:
    SCEVInsertPointGuard(IRBuilderBase &B, SCEVExpander *SE)
        : Builder(B), Block(B.GetInsertBlock()), Point(B.GetInsertPoint()),
          DbgLoc(B.getCurrentDebugLocation()), SE(SE) {
      SE->InsertPointGuards.push_back(this);
    }

    ~SCEVInsertPointGuard() {
      // These guards should always created/destroyed in FIFO order since they
      // are used to guard lexically scoped blocks of code in
      // ScalarEvolutionExpander.
      assert(SE->InsertPointGuards.back() == this);
      SE->InsertPointGuards.pop_back();
      Builder.restoreIP(IRBuilderBase::InsertPoint(Block, Point));
      Builder.SetCurrentDebugLocation(DbgLoc);
    }

    BasicBlock::iterator GetInsertPoint() const { return Point; }
    void SetInsertPoint(BasicBlock::iterator I) { Point = I; }
  };

  /// Stack of pointers to saved insert points, used to keep insert points
  /// consistent when instructions are moved.
  SmallVector<SCEVInsertPointGuard *, 8> InsertPointGuards;

#ifdef LLVM_ENABLE_ABI_BREAKING_CHECKS
  const char *DebugType;
#endif

  friend struct SCEVVisitor<SCEVExpander, Value *>;

public:
  /// Construct a SCEVExpander in "canonical" mode.
  explicit SCEVExpander(ScalarEvolution &se, const DataLayout &DL,
                        const char *name, bool PreserveLCSSA = true)
      : SE(se), DL(DL), IVName(name), PreserveLCSSA(PreserveLCSSA),
        IVIncInsertLoop(nullptr), IVIncInsertPos(nullptr), CanonicalMode(true),
        LSRMode(false),
        Builder(se.getContext(), InstSimplifyFolder(DL),
                IRBuilderCallbackInserter(
                    [this](Instruction *I) { rememberInstruction(I); })) {
#ifdef LLVM_ENABLE_ABI_BREAKING_CHECKS
    DebugType = "";
#endif
  }

  ~SCEVExpander() {
    // Make sure the insert point guard stack is consistent.
    assert(InsertPointGuards.empty());
  }

#ifdef LLVM_ENABLE_ABI_BREAKING_CHECKS
  void setDebugType(const char *s) { DebugType = s; }
#endif

  /// Erase the contents of the InsertedExpressions map so that users trying
  /// to expand the same expression into multiple BasicBlocks or different
  /// places within the same BasicBlock can do so.
  void clear() {
    InsertedExpressions.clear();
    InsertedValues.clear();
    InsertedPostIncValues.clear();
    ReusedValues.clear();
    ChainedPhis.clear();
    InsertedIVs.clear();
  }

  ScalarEvolution *getSE() { return &SE; }
  const SmallVectorImpl<WeakVH> &getInsertedIVs() const { return InsertedIVs; }

  /// Return a vector containing all instructions inserted during expansion.
  SmallVector<Instruction *, 32> getAllInsertedInstructions() const {
    SmallVector<Instruction *, 32> Result;
    for (const auto &VH : InsertedValues) {
      Value *V = VH;
      if (ReusedValues.contains(V))
        continue;
      if (auto *Inst = dyn_cast<Instruction>(V))
        Result.push_back(Inst);
    }
    for (const auto &VH : InsertedPostIncValues) {
      Value *V = VH;
      if (ReusedValues.contains(V))
        continue;
      if (auto *Inst = dyn_cast<Instruction>(V))
        Result.push_back(Inst);
    }

    return Result;
  }

  /// Return true for expressions that can't be evaluated at runtime
  /// within given \b Budget.
  ///
  /// \p At is a parameter which specifies point in code where user is going to
  /// expand these expressions. Sometimes this knowledge can lead to
  /// a less pessimistic cost estimation.
  bool isHighCostExpansion(ArrayRef<const SCEV *> Exprs, Loop *L,
                           unsigned Budget, const TargetTransformInfo *TTI,
                           const Instruction *At) {
    assert(TTI && "This function requires TTI to be provided.");
    assert(At && "This function requires At instruction to be provided.");
    if (!TTI)      // In assert-less builds, avoid crashing
      return true; // by always claiming to be high-cost.
    SmallVector<SCEVOperand, 8> Worklist;
    SmallPtrSet<const SCEV *, 8> Processed;
    InstructionCost Cost = 0;
    unsigned ScaledBudget = Budget * TargetTransformInfo::TCC_Basic;
    for (auto *Expr : Exprs)
      Worklist.emplace_back(-1, -1, Expr);
    while (!Worklist.empty()) {
      const SCEVOperand WorkItem = Worklist.pop_back_val();
      if (isHighCostExpansionHelper(WorkItem, L, *At, Cost, ScaledBudget, *TTI,
                                    Processed, Worklist))
        return true;
    }
    assert(Cost <= ScaledBudget && "Should have returned from inner loop.");
    return false;
  }

  /// Return the induction variable increment's IV operand.
  Instruction *getIVIncOperand(Instruction *IncV, Instruction *InsertPos,
                               bool allowScale);

  /// Utility for hoisting \p IncV (with all subexpressions requried for its
  /// computation) before \p InsertPos. If \p RecomputePoisonFlags is set, drops
  /// all poison-generating flags from instructions being hoisted and tries to
  /// re-infer them in the new location. It should be used when we are going to
  /// introduce a new use in the new position that didn't exist before, and may
  /// trigger new UB in case of poison.
  bool hoistIVInc(Instruction *IncV, Instruction *InsertPos,
                  bool RecomputePoisonFlags = false);

  /// replace congruent phis with their most canonical representative. Return
  /// the number of phis eliminated.
  unsigned replaceCongruentIVs(Loop *L, const DominatorTree *DT,
                               SmallVectorImpl<WeakTrackingVH> &DeadInsts,
                               const TargetTransformInfo *TTI = nullptr);

  /// Return true if the given expression is safe to expand in the sense that
  /// all materialized values are safe to speculate anywhere their operands are
  /// defined, and the expander is capable of expanding the expression.
  bool isSafeToExpand(const SCEV *S) const;

  /// Return true if the given expression is safe to expand in the sense that
  /// all materialized values are defined and safe to speculate at the specified
  /// location and their operands are defined at this location.
  bool isSafeToExpandAt(const SCEV *S, const Instruction *InsertionPoint) const;

  /// Insert code to directly compute the specified SCEV expression into the
  /// program.  The code is inserted into the specified block.
  Value *expandCodeFor(const SCEV *SH, Type *Ty, Instruction *I) {
    return expandCodeForImpl(SH, Ty, I);
  }

  /// Insert code to directly compute the specified SCEV expression into the
  /// program.  The code is inserted into the SCEVExpander's current
  /// insertion point. If a type is specified, the result will be expanded to
  /// have that type, with a cast if necessary.
  Value *expandCodeFor(const SCEV *SH, Type *Ty = nullptr) {
    return expandCodeForImpl(SH, Ty);
  }

  /// Generates a code sequence that evaluates this predicate.  The inserted
  /// instructions will be at position \p Loc.  The result will be of type i1
  /// and will have a value of 0 when the predicate is false and 1 otherwise.
  Value *expandCodeForPredicate(const SCEVPredicate *Pred, Instruction *Loc);

  /// A specialized variant of expandCodeForPredicate, handling the case when
  /// we are expanding code for a SCEVComparePredicate.
  Value *expandComparePredicate(const SCEVComparePredicate *Pred,
                                Instruction *Loc);

  /// Generates code that evaluates if the \p AR expression will overflow.
  Value *generateOverflowCheck(const SCEVAddRecExpr *AR, Instruction *Loc,
                               bool Signed);

  /// A specialized variant of expandCodeForPredicate, handling the case when
  /// we are expanding code for a SCEVWrapPredicate.
  Value *expandWrapPredicate(const SCEVWrapPredicate *P, Instruction *Loc);

  /// A specialized variant of expandCodeForPredicate, handling the case when
  /// we are expanding code for a SCEVUnionPredicate.
  Value *expandUnionPredicate(const SCEVUnionPredicate *Pred, Instruction *Loc);

  /// Set the current IV increment loop and position.
  void setIVIncInsertPos(const Loop *L, Instruction *Pos) {
    assert(!CanonicalMode &&
           "IV increment positions are not supported in CanonicalMode");
    IVIncInsertLoop = L;
    IVIncInsertPos = Pos;
  }

  /// Enable post-inc expansion for addrecs referring to the given
  /// loops. Post-inc expansion is only supported in non-canonical mode.
  void setPostInc(const PostIncLoopSet &L) {
    assert(!CanonicalMode &&
           "Post-inc expansion is not supported in CanonicalMode");
    PostIncLoops = L;
  }

  /// Disable all post-inc expansion.
  void clearPostInc() {
    PostIncLoops.clear();

    // When we change the post-inc loop set, cached expansions may no
    // longer be valid.
    InsertedPostIncValues.clear();
  }

  /// Disable the behavior of expanding expressions in canonical form rather
  /// than in a more literal form. Non-canonical mode is useful for late
  /// optimization passes.
  void disableCanonicalMode() { CanonicalMode = false; }

  void enableLSRMode() { LSRMode = true; }

  /// Set the current insertion point. This is useful if multiple calls to
  /// expandCodeFor() are going to be made with the same insert point and the
  /// insert point may be moved during one of the expansions (e.g. if the
  /// insert point is not a block terminator).
  void setInsertPoint(Instruction *IP) {
    assert(IP);
    Builder.SetInsertPoint(IP);
  }

  /// Clear the current insertion point. This is useful if the instruction
  /// that had been serving as the insertion point may have been deleted.
  void clearInsertPoint() { Builder.ClearInsertionPoint(); }

  /// Set location information used by debugging information.
  void SetCurrentDebugLocation(DebugLoc L) {
    Builder.SetCurrentDebugLocation(std::move(L));
  }

  /// Get location information used by debugging information.
  DebugLoc getCurrentDebugLocation() const {
    return Builder.getCurrentDebugLocation();
  }

  /// Return true if the specified instruction was inserted by the code
  /// rewriter.  If so, the client should not modify the instruction. Note that
  /// this also includes instructions re-used during expansion.
  bool isInsertedInstruction(Instruction *I) const {
    return InsertedValues.count(I) || InsertedPostIncValues.count(I);
  }

  void setChainedPhi(PHINode *PN) { ChainedPhis.insert(PN); }

  /// Try to find the ValueOffsetPair for S. The function is mainly used to
  /// check whether S can be expanded cheaply.  If this returns a non-None
  /// value, we know we can codegen the `ValueOffsetPair` into a suitable
  /// expansion identical with S so that S can be expanded cheaply.
  ///
  /// L is a hint which tells in which loop to look for the suitable value.
  /// On success return value which is equivalent to the expanded S at point
  /// At. Return nullptr if value was not found.
  ///
  /// Note that this function does not perform an exhaustive search. I.e if it
  /// didn't find any value it does not mean that there is no such value.
  ///
  Value *getRelatedExistingExpansion(const SCEV *S, const Instruction *At,
                                     Loop *L);

  /// Returns a suitable insert point after \p I, that dominates \p
  /// MustDominate. Skips instructions inserted by the expander.
  BasicBlock::iterator findInsertPointAfter(Instruction *I,
                                            Instruction *MustDominate) const;

private:
  LLVMContext &getContext() const { return SE.getContext(); }

  /// Insert code to directly compute the specified SCEV expression into the
  /// program. The code is inserted into the SCEVExpander's current
  /// insertion point. If a type is specified, the result will be expanded to
  /// have that type, with a cast if necessary. If \p Root is true, this
  /// indicates that \p SH is the top-level expression to expand passed from
  /// an external client call.
  Value *expandCodeForImpl(const SCEV *SH, Type *Ty);

  /// Insert code to directly compute the specified SCEV expression into the
  /// program. The code is inserted into the specified block. If \p
  /// Root is true, this indicates that \p SH is the top-level expression to
  /// expand passed from an external client call.
  Value *expandCodeForImpl(const SCEV *SH, Type *Ty, Instruction *I);

  /// Recursive helper function for isHighCostExpansion.
  bool isHighCostExpansionHelper(const SCEVOperand &WorkItem, Loop *L,
                                 const Instruction &At, InstructionCost &Cost,
                                 unsigned Budget,
                                 const TargetTransformInfo &TTI,
                                 SmallPtrSetImpl<const SCEV *> &Processed,
                                 SmallVectorImpl<SCEVOperand> &Worklist);

  /// Insert the specified binary operator, doing a small amount of work to
  /// avoid inserting an obviously redundant operation, and hoisting to an
  /// outer loop when the opportunity is there and it is safe.
  Value *InsertBinop(Instruction::BinaryOps Opcode, Value *LHS, Value *RHS,
                     SCEV::NoWrapFlags Flags, bool IsSafeToHoist);

  /// We want to cast \p V. What would be the best place for such a cast?
  BasicBlock::iterator GetOptimalInsertionPointForCastOf(Value *V) const;

  /// Arrange for there to be a cast of V to Ty at IP, reusing an existing
  /// cast if a suitable one exists, moving an existing cast if a suitable one
  /// exists but isn't in the right place, or creating a new one.
  Value *ReuseOrCreateCast(Value *V, Type *Ty, Instruction::CastOps Op,
                           BasicBlock::iterator IP);

  /// Insert a cast of V to the specified type, which must be possible with a
  /// noop cast, doing what we can to share the casts.
  Value *InsertNoopCastOfTo(Value *V, Type *Ty);

  /// Expand a SCEVAddExpr with a pointer type into a GEP instead of using
  /// ptrtoint+arithmetic+inttoptr.
  Value *expandAddToGEP(const SCEV *Op, Type *Ty, Value *V);

  /// Find a previous Value in ExprValueMap for expand.
  Value *FindValueInExprValueMap(const SCEV *S, const Instruction *InsertPt);

  Value *expand(const SCEV *S);

  /// Determine the most "relevant" loop for the given SCEV.
  const Loop *getRelevantLoop(const SCEV *);

  Value *expandMinMaxExpr(const SCEVNAryExpr *S, Intrinsic::ID IntrinID,
                          Twine Name, bool IsSequential = false);

  Value *visitConstant(const SCEVConstant *S) { return S->getValue(); }

  Value *visitVScale(const SCEVVScale *S);

  Value *visitPtrToIntExpr(const SCEVPtrToIntExpr *S);

  Value *visitTruncateExpr(const SCEVTruncateExpr *S);

  Value *visitZeroExtendExpr(const SCEVZeroExtendExpr *S);

  Value *visitSignExtendExpr(const SCEVSignExtendExpr *S);

  Value *visitAddExpr(const SCEVAddExpr *S);

  Value *visitMulExpr(const SCEVMulExpr *S);

  Value *visitUDivExpr(const SCEVUDivExpr *S);

  Value *visitAddRecExpr(const SCEVAddRecExpr *S);

  Value *visitSMaxExpr(const SCEVSMaxExpr *S);

  Value *visitUMaxExpr(const SCEVUMaxExpr *S);

  Value *visitSMinExpr(const SCEVSMinExpr *S);

  Value *visitUMinExpr(const SCEVUMinExpr *S);

  Value *visitSequentialUMinExpr(const SCEVSequentialUMinExpr *S);

  Value *visitUnknown(const SCEVUnknown *S) { return S->getValue(); }

  void rememberInstruction(Value *I);

  bool isNormalAddRecExprPHI(PHINode *PN, Instruction *IncV, const Loop *L);

  bool isExpandedAddRecExprPHI(PHINode *PN, Instruction *IncV, const Loop *L);

  Value *expandAddRecExprLiterally(const SCEVAddRecExpr *);
  PHINode *getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
                                     const Loop *L, Type *ExpandTy, Type *IntTy,
                                     Type *&TruncTy, bool &InvertStep);
  Value *expandIVInc(PHINode *PN, Value *StepV, const Loop *L, Type *ExpandTy,
                     Type *IntTy, bool useSubtract);

  void fixupInsertPoints(Instruction *I);

  /// Create LCSSA PHIs for \p V, if it is required for uses at the Builder's
  /// current insertion point.
  Value *fixupLCSSAFormFor(Value *V);
};

/// Helper to remove instructions inserted during SCEV expansion, unless they
/// are marked as used.
class SCEVExpanderCleaner {
  SCEVExpander &Expander;

  /// Indicates whether the result of the expansion is used. If false, the
  /// instructions added during expansion are removed.
  bool ResultUsed;

public:
  SCEVExpanderCleaner(SCEVExpander &Expander)
      : Expander(Expander), ResultUsed(false) {}

  ~SCEVExpanderCleaner() { cleanup(); }

  /// Indicate that the result of the expansion is used.
  void markResultUsed() { ResultUsed = true; }

  void cleanup();
};
} // namespace llvm

#endif
PKiwFZ��

%Transforms/Utils/LowerMemIntrinsics.hnu�[���//===- llvm/Transforms/Utils/LowerMemIntrinsics.h ---------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Lower memset, memcpy, memmov intrinsics to loops (e.g. for targets without
// library support).
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_LOWERMEMINTRINSICS_H
#define LLVM_TRANSFORMS_UTILS_LOWERMEMINTRINSICS_H

#include <cstdint>
#include <optional>

namespace llvm {

class AtomicMemCpyInst;
class ConstantInt;
class Instruction;
class MemCpyInst;
class MemMoveInst;
class MemSetInst;
class ScalarEvolution;
class TargetTransformInfo;
class Value;
struct Align;

/// Emit a loop implementing the semantics of llvm.memcpy where the size is not
/// a compile-time constant. Loop will be insterted at \p InsertBefore.
void createMemCpyLoopUnknownSize(
    Instruction *InsertBefore, Value *SrcAddr, Value *DstAddr, Value *CopyLen,
    Align SrcAlign, Align DestAlign, bool SrcIsVolatile, bool DstIsVolatile,
    bool CanOverlap, const TargetTransformInfo &TTI,
    std::optional<unsigned> AtomicSize = std::nullopt);

/// Emit a loop implementing the semantics of an llvm.memcpy whose size is a
/// compile time constant. Loop is inserted at \p InsertBefore.
void createMemCpyLoopKnownSize(
    Instruction *InsertBefore, Value *SrcAddr, Value *DstAddr,
    ConstantInt *CopyLen, Align SrcAlign, Align DestAlign, bool SrcIsVolatile,
    bool DstIsVolatile, bool CanOverlap, const TargetTransformInfo &TTI,
    std::optional<uint32_t> AtomicCpySize = std::nullopt);

/// Expand \p MemCpy as a loop. \p MemCpy is not deleted.
void expandMemCpyAsLoop(MemCpyInst *MemCpy, const TargetTransformInfo &TTI,
                        ScalarEvolution *SE = nullptr);

/// Expand \p MemMove as a loop. \p MemMove is not deleted. Returns true if the
/// memmove was lowered.
bool expandMemMoveAsLoop(MemMoveInst *MemMove, const TargetTransformInfo &TTI);

/// Expand \p MemSet as a loop. \p MemSet is not deleted.
void expandMemSetAsLoop(MemSetInst *MemSet);

/// Expand \p AtomicMemCpy as a loop. \p AtomicMemCpy is not deleted.
void expandAtomicMemCpyAsLoop(AtomicMemCpyInst *AtomicMemCpy,
                              const TargetTransformInfo &TTI,
                              ScalarEvolution *SE);

} // End llvm namespace

#endif
PKiwFZ�.�	�	Transforms/Utils/CodeLayout.hnu�[���//===- CodeLayout.h - Code layout/placement algorithms  ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file
/// Declares methods and data structures for code layout algorithms.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_CODELAYOUT_H
#define LLVM_TRANSFORMS_UTILS_CODELAYOUT_H

#include "llvm/ADT/DenseMap.h"

#include <vector>

namespace llvm {

using EdgeT = std::pair<uint64_t, uint64_t>;
using EdgeCountT = std::pair<EdgeT, uint64_t>;

/// Find a layout of nodes (basic blocks) of a given CFG optimizing jump
/// locality and thus processor I-cache utilization. This is achieved via
/// increasing the number of fall-through jumps and co-locating frequently
/// executed nodes together.
/// The nodes are assumed to be indexed by integers from [0, |V|) so that the
/// current order is the identity permutation.
/// \p NodeSizes: The sizes of the nodes (in bytes).
/// \p NodeCounts: The execution counts of the nodes in the profile.
/// \p EdgeCounts: The execution counts of every edge (jump) in the profile. The
///    map also defines the edges in CFG and should include 0-count edges.
/// \returns The best block order found.
std::vector<uint64_t>
applyExtTspLayout(const std::vector<uint64_t> &NodeSizes,
                  const std::vector<uint64_t> &NodeCounts,
                  const std::vector<EdgeCountT> &EdgeCounts);

/// Estimate the "quality" of a given node order in CFG. The higher the score,
/// the better the order is. The score is designed to reflect the locality of
/// the given order, which is anti-correlated with the number of I-cache misses
/// in a typical execution of the function.
double calcExtTspScore(const std::vector<uint64_t> &Order,
                       const std::vector<uint64_t> &NodeSizes,
                       const std::vector<uint64_t> &NodeCounts,
                       const std::vector<EdgeCountT> &EdgeCounts);

/// Estimate the "quality" of the current node order in CFG.
double calcExtTspScore(const std::vector<uint64_t> &NodeSizes,
                       const std::vector<uint64_t> &NodeCounts,
                       const std::vector<EdgeCountT> &EdgeCounts);

} // end namespace llvm

#endif // LLVM_TRANSFORMS_UTILS_CODELAYOUT_H
PKiwFZQ�ք�3�3 Transforms/Utils/BuildLibCalls.hnu�[���//===- BuildLibCalls.h - Utility builder for libcalls -----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file exposes an interface to build some C language libcalls for
// optimization passes that need to call the various functions.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_BUILDLIBCALLS_H
#define LLVM_TRANSFORMS_UTILS_BUILDLIBCALLS_H

#include "llvm/Analysis/TargetLibraryInfo.h"

namespace llvm {
  class Value;
  class DataLayout;
  class IRBuilderBase;

  /// Analyze the name and prototype of the given function and set any
  /// applicable attributes. Note that this merely helps optimizations on an
  /// already existing function but does not consider mandatory attributes.
  ///
  /// If the library function is unavailable, this doesn't modify it.
  ///
  /// Returns true if any attributes were set and false otherwise.
  bool inferNonMandatoryLibFuncAttrs(Module *M, StringRef Name,
                                     const TargetLibraryInfo &TLI);
  bool inferNonMandatoryLibFuncAttrs(Function &F, const TargetLibraryInfo &TLI);

  /// Calls getOrInsertFunction() and then makes sure to add mandatory
  /// argument attributes.
  FunctionCallee getOrInsertLibFunc(Module *M, const TargetLibraryInfo &TLI,
                                    LibFunc TheLibFunc, FunctionType *T,
                                    AttributeList AttributeList);
  FunctionCallee getOrInsertLibFunc(Module *M, const TargetLibraryInfo &TLI,
                                    LibFunc TheLibFunc, FunctionType *T);
  template <typename... ArgsTy>
  FunctionCallee getOrInsertLibFunc(Module *M, const TargetLibraryInfo &TLI,
                               LibFunc TheLibFunc, AttributeList AttributeList,
                               Type *RetTy, ArgsTy... Args) {
    SmallVector<Type*, sizeof...(ArgsTy)> ArgTys{Args...};
    return getOrInsertLibFunc(M, TLI, TheLibFunc,
                              FunctionType::get(RetTy, ArgTys, false),
                              AttributeList);
  }
  /// Same as above, but without the attributes.
  template <typename... ArgsTy>
  FunctionCallee getOrInsertLibFunc(Module *M, const TargetLibraryInfo &TLI,
                             LibFunc TheLibFunc, Type *RetTy, ArgsTy... Args) {
    return getOrInsertLibFunc(M, TLI, TheLibFunc, AttributeList{}, RetTy,
                              Args...);
  }
  // Avoid an incorrect ordering that'd otherwise compile incorrectly.
  template <typename... ArgsTy>
  FunctionCallee
  getOrInsertLibFunc(Module *M, const TargetLibraryInfo &TLI,
                     LibFunc TheLibFunc, AttributeList AttributeList,
                     FunctionType *Invalid, ArgsTy... Args) = delete;

  /// Check whether the library function is available on target and also that
  /// it in the current Module is a Function with the right type.
  bool isLibFuncEmittable(const Module *M, const TargetLibraryInfo *TLI,
                          LibFunc TheLibFunc);
  bool isLibFuncEmittable(const Module *M, const TargetLibraryInfo *TLI,
                          StringRef Name);

  /// Check whether the overloaded floating point function
  /// corresponding to \a Ty is available.
  bool hasFloatFn(const Module *M, const TargetLibraryInfo *TLI, Type *Ty,
                  LibFunc DoubleFn, LibFunc FloatFn, LibFunc LongDoubleFn);

  /// Get the name of the overloaded floating point function
  /// corresponding to \a Ty. Return the LibFunc in \a TheLibFunc.
  StringRef getFloatFn(const Module *M, const TargetLibraryInfo *TLI, Type *Ty,
                       LibFunc DoubleFn, LibFunc FloatFn, LibFunc LongDoubleFn,
                       LibFunc &TheLibFunc);

  /// Return V if it is an i8*, otherwise cast it to i8*.
  Value *castToCStr(Value *V, IRBuilderBase &B);

  /// Emit a call to the strlen function to the builder, for the specified
  /// pointer. Ptr is required to be some pointer type, and the return value has
  /// 'size_t' type.
  Value *emitStrLen(Value *Ptr, IRBuilderBase &B, const DataLayout &DL,
                    const TargetLibraryInfo *TLI);

  /// Emit a call to the strdup function to the builder, for the specified
  /// pointer. Ptr is required to be some pointer type, and the return value has
  /// 'i8*' type.
  Value *emitStrDup(Value *Ptr, IRBuilderBase &B, const TargetLibraryInfo *TLI);

  /// Emit a call to the strchr function to the builder, for the specified
  /// pointer and character. Ptr is required to be some pointer type, and the
  /// return value has 'i8*' type.
  Value *emitStrChr(Value *Ptr, char C, IRBuilderBase &B,
                    const TargetLibraryInfo *TLI);

  /// Emit a call to the strncmp function to the builder.
  Value *emitStrNCmp(Value *Ptr1, Value *Ptr2, Value *Len, IRBuilderBase &B,
                     const DataLayout &DL, const TargetLibraryInfo *TLI);

  /// Emit a call to the strcpy function to the builder, for the specified
  /// pointer arguments.
  Value *emitStrCpy(Value *Dst, Value *Src, IRBuilderBase &B,
                    const TargetLibraryInfo *TLI);

  /// Emit a call to the stpcpy function to the builder, for the specified
  /// pointer arguments.
  Value *emitStpCpy(Value *Dst, Value *Src, IRBuilderBase &B,
                    const TargetLibraryInfo *TLI);

  /// Emit a call to the strncpy function to the builder, for the specified
  /// pointer arguments and length.
  Value *emitStrNCpy(Value *Dst, Value *Src, Value *Len, IRBuilderBase &B,
                     const TargetLibraryInfo *TLI);

  /// Emit a call to the stpncpy function to the builder, for the specified
  /// pointer arguments and length.
  Value *emitStpNCpy(Value *Dst, Value *Src, Value *Len, IRBuilderBase &B,
                     const TargetLibraryInfo *TLI);

  /// Emit a call to the __memcpy_chk function to the builder. This expects that
  /// the Len and ObjSize have type 'size_t' and Dst/Src are pointers.
  Value *emitMemCpyChk(Value *Dst, Value *Src, Value *Len, Value *ObjSize,
                       IRBuilderBase &B, const DataLayout &DL,
                       const TargetLibraryInfo *TLI);

  /// Emit a call to the mempcpy function.
  Value *emitMemPCpy(Value *Dst, Value *Src, Value *Len, IRBuilderBase &B,
                     const DataLayout &DL, const TargetLibraryInfo *TLI);

  /// Emit a call to the memchr function. This assumes that Ptr is a pointer,
  /// Val is an 'int' value, and Len is an 'size_t' value.
  Value *emitMemChr(Value *Ptr, Value *Val, Value *Len, IRBuilderBase &B,
                    const DataLayout &DL, const TargetLibraryInfo *TLI);

  /// Emit a call to the memrchr function, analogously to emitMemChr.
  Value *emitMemRChr(Value *Ptr, Value *Val, Value *Len, IRBuilderBase &B,
                    const DataLayout &DL, const TargetLibraryInfo *TLI);

  /// Emit a call to the memcmp function.
  Value *emitMemCmp(Value *Ptr1, Value *Ptr2, Value *Len, IRBuilderBase &B,
                    const DataLayout &DL, const TargetLibraryInfo *TLI);

  /// Emit a call to the bcmp function.
  Value *emitBCmp(Value *Ptr1, Value *Ptr2, Value *Len, IRBuilderBase &B,
                  const DataLayout &DL, const TargetLibraryInfo *TLI);

  /// Emit a call to the memccpy function.
  Value *emitMemCCpy(Value *Ptr1, Value *Ptr2, Value *Val, Value *Len,
                     IRBuilderBase &B, const TargetLibraryInfo *TLI);

  /// Emit a call to the snprintf function.
  Value *emitSNPrintf(Value *Dest, Value *Size, Value *Fmt,
                      ArrayRef<Value *> Args, IRBuilderBase &B,
                      const TargetLibraryInfo *TLI);

  /// Emit a call to the sprintf function.
  Value *emitSPrintf(Value *Dest, Value *Fmt, ArrayRef<Value *> VariadicArgs,
                     IRBuilderBase &B, const TargetLibraryInfo *TLI);

  /// Emit a call to the strcat function.
  Value *emitStrCat(Value *Dest, Value *Src, IRBuilderBase &B,
                    const TargetLibraryInfo *TLI);

  /// Emit a call to the strlcpy function.
  Value *emitStrLCpy(Value *Dest, Value *Src, Value *Size, IRBuilderBase &B,
                     const TargetLibraryInfo *TLI);

  /// Emit a call to the strlcat function.
  Value *emitStrLCat(Value *Dest, Value *Src, Value *Size, IRBuilderBase &B,
                     const TargetLibraryInfo *TLI);

  /// Emit a call to the strncat function.
  Value *emitStrNCat(Value *Dest, Value *Src, Value *Size, IRBuilderBase &B,
                     const TargetLibraryInfo *TLI);

  /// Emit a call to the vsnprintf function.
  Value *emitVSNPrintf(Value *Dest, Value *Size, Value *Fmt, Value *VAList,
                       IRBuilderBase &B, const TargetLibraryInfo *TLI);

  /// Emit a call to the vsprintf function.
  Value *emitVSPrintf(Value *Dest, Value *Fmt, Value *VAList, IRBuilderBase &B,
                      const TargetLibraryInfo *TLI);

  /// Emit a call to the unary function named 'Name' (e.g.  'floor'). This
  /// function is known to take a single of type matching 'Op' and returns one
  /// value with the same type. If 'Op' is a long double, 'l' is added as the
  /// suffix of name, if 'Op' is a float, we add a 'f' suffix.
  Value *emitUnaryFloatFnCall(Value *Op, const TargetLibraryInfo *TLI,
                              StringRef Name, IRBuilderBase &B,
                              const AttributeList &Attrs);

  /// Emit a call to the unary function DoubleFn, FloatFn or LongDoubleFn,
  /// depending of the type of Op.
  Value *emitUnaryFloatFnCall(Value *Op, const TargetLibraryInfo *TLI,
                              LibFunc DoubleFn, LibFunc FloatFn,
                              LibFunc LongDoubleFn, IRBuilderBase &B,
                              const AttributeList &Attrs);

  /// Emit a call to the binary function named 'Name' (e.g. 'fmin'). This
  /// function is known to take type matching 'Op1' and 'Op2' and return one
  /// value with the same type. If 'Op1/Op2' are long double, 'l' is added as
  /// the suffix of name, if 'Op1/Op2' are float, we add a 'f' suffix.
  Value *emitBinaryFloatFnCall(Value *Op1, Value *Op2,
                               const TargetLibraryInfo *TLI,
                               StringRef Name, IRBuilderBase &B,
                               const AttributeList &Attrs);

  /// Emit a call to the binary function DoubleFn, FloatFn or LongDoubleFn,
  /// depending of the type of Op1.
  Value *emitBinaryFloatFnCall(Value *Op1, Value *Op2,
                               const TargetLibraryInfo *TLI, LibFunc DoubleFn,
                               LibFunc FloatFn, LibFunc LongDoubleFn,
                               IRBuilderBase &B, const AttributeList &Attrs);

  /// Emit a call to the putchar function. This assumes that Char is an 'int'.
  Value *emitPutChar(Value *Char, IRBuilderBase &B,
                     const TargetLibraryInfo *TLI);

  /// Emit a call to the puts function. This assumes that Str is some pointer.
  Value *emitPutS(Value *Str, IRBuilderBase &B, const TargetLibraryInfo *TLI);

  /// Emit a call to the fputc function. This assumes that Char is an 'int', and
  /// File is a pointer to FILE.
  Value *emitFPutC(Value *Char, Value *File, IRBuilderBase &B,
                   const TargetLibraryInfo *TLI);

  /// Emit a call to the fputs function. Str is required to be a pointer and
  /// File is a pointer to FILE.
  Value *emitFPutS(Value *Str, Value *File, IRBuilderBase &B,
                   const TargetLibraryInfo *TLI);

  /// Emit a call to the fwrite function. This assumes that Ptr is a pointer,
  /// Size is an 'size_t', and File is a pointer to FILE.
  Value *emitFWrite(Value *Ptr, Value *Size, Value *File, IRBuilderBase &B,
                    const DataLayout &DL, const TargetLibraryInfo *TLI);

  /// Emit a call to the malloc function.
  Value *emitMalloc(Value *Num, IRBuilderBase &B, const DataLayout &DL,
                    const TargetLibraryInfo *TLI);

  /// Emit a call to the calloc function.
  Value *emitCalloc(Value *Num, Value *Size, IRBuilderBase &B,
                    const TargetLibraryInfo &TLI);

  /// Emit a call to the hot/cold operator new function.
  Value *emitHotColdNew(Value *Num, IRBuilderBase &B,
                        const TargetLibraryInfo *TLI, LibFunc NewFunc,
                        uint8_t HotCold);
  Value *emitHotColdNewNoThrow(Value *Num, Value *NoThrow, IRBuilderBase &B,
                               const TargetLibraryInfo *TLI, LibFunc NewFunc,
                               uint8_t HotCold);
  Value *emitHotColdNewAligned(Value *Num, Value *Align, IRBuilderBase &B,
                               const TargetLibraryInfo *TLI, LibFunc NewFunc,
                               uint8_t HotCold);
  Value *emitHotColdNewAlignedNoThrow(Value *Num, Value *Align, Value *NoThrow,
                                      IRBuilderBase &B,
                                      const TargetLibraryInfo *TLI,
                                      LibFunc NewFunc, uint8_t HotCold);
}

#endif
PKiwFZE����Transforms/Utils/MisExpect.hnu�[���//===--- MisExpect.h - Check the use of llvm.expect with PGO data ---------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This contains code to emit diagnostic messages for potentially incorrect
// usage of the llvm.expect intrinsic. This utility extracts the threshold
// values from metadata associated with the instrumented Branch or Switch
// instruction. The threshold values are then used to determine if a diagnostic
// should be emitted.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_MISEXPECT_H
#define LLVM_TRANSFORMS_UTILS_MISEXPECT_H

#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/LLVMContext.h"

namespace llvm {
namespace misexpect {

/// checkBackendInstrumentation - compares PGO counters to the thresholds used
/// for llvm.expect and warns if the PGO counters are outside of the expected
/// range. It extracts the expected weights from the MD_prof weights attatched
/// to the instruction, which are are assumed to come from lowered llvm.expect
/// intrinsics. The RealWeights parameter and the extracted expected weights are
/// then passed to verifyMisexpect() for verification
///
/// \param I The Instruction being checked
/// \param RealWeights A vector of profile weights for each target block
void checkBackendInstrumentation(Instruction &I,
                                 const llvm::ArrayRef<uint32_t> RealWeights);

/// checkFrontendInstrumentation - compares PGO counters to the thresholds used
/// for llvm.expect and warns if the PGO counters are outside of the expected
/// range. It extracts the expected weights from the MD_prof weights attatched
/// to the instruction, which are are assumed to come from profiling data
/// attached by the frontend prior to llvm.expect intrinsic lowering. The
/// ExpectedWeights parameter and the extracted real weights are then passed to
/// verifyMisexpect() for verification
///
/// \param I The Instruction being checked
/// \param ExpectedWeights A vector of the expected weights for each target
/// block, this determines the threshold values used when emiting diagnostics
void checkFrontendInstrumentation(Instruction &I,
                                  const ArrayRef<uint32_t> ExpectedWeights);

/// veryifyMisExpect - compares RealWeights to the thresholds used
/// for llvm.expect and warns if the PGO counters are outside of the expected
/// range.
///
/// \param I The Instruction being checked
/// \param RealWeights A vector of profile weights from the profile data
/// \param ExpectedWeights A vector of the weights attatch by llvm.expect
void verifyMisExpect(Instruction &I, ArrayRef<uint32_t> RealWeights,
                     const ArrayRef<uint32_t> ExpectedWeights);

/// checkExpectAnnotations - compares PGO counters to the thresholds used
/// for llvm.expect and warns if the PGO counters are outside of the expected
/// range. It extracts the expected weights from the MD_prof weights attatched
/// to the instruction, which are are assumed to come from lowered llvm.expect
/// intrinsics. The RealWeights parameter and the extracted expected weights are
/// then passed to verifyMisexpect() for verification. It is a thin wrapper
/// around the checkFrontendInstrumentation and checkBackendInstrumentation APIs
///
/// \param I The Instruction being checked
/// \param ExistingWeights A vector of profile weights for each target block
/// \param IsFrontend A boolean describing if this is Frontend instrumentation
void checkExpectAnnotations(Instruction &I,
                            const ArrayRef<uint32_t> ExistingWeights,
                            bool IsFrontend);

} // namespace misexpect
} // namespace llvm

#endif
PKiwFZHL�jjTransforms/Utils/LoopSimplify.hnu�[���//===- LoopSimplify.h - Loop Canonicalization Pass --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This pass performs several transformations to transform natural loops into a
// simpler form, which makes subsequent analyses and transformations simpler and
// more effective.
//
// Loop pre-header insertion guarantees that there is a single, non-critical
// entry edge from outside of the loop to the loop header.  This simplifies a
// number of analyses and transformations, such as LICM.
//
// Loop exit-block insertion guarantees that all exit blocks from the loop
// (blocks which are outside of the loop that have predecessors inside of the
// loop) only have predecessors from inside of the loop (and are thus dominated
// by the loop header).  This simplifies transformations such as store-sinking
// that are built into LICM.
//
// This pass also guarantees that loops will have exactly one backedge.
//
// Indirectbr instructions introduce several complications. If the loop
// contains or is entered by an indirectbr instruction, it may not be possible
// to transform the loop and make these guarantees. Client code should check
// that these conditions are true before relying on them.
//
// Note that the simplifycfg pass will clean up blocks which are split out but
// end up being unnecessary, so usage of this pass should not pessimize
// generated code.
//
// This pass obviously modifies the CFG, but updates loop information and
// dominator information.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_TRANSFORMS_UTILS_LOOPSIMPLIFY_H
#define LLVM_TRANSFORMS_UTILS_LOOPSIMPLIFY_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class AssumptionCache;
class DominatorTree;
class Loop;
class LoopInfo;
class MemorySSAUpdater;
class ScalarEvolution;

/// This pass is responsible for loop canonicalization.
class LoopSimplifyPass : public PassInfoMixin<LoopSimplifyPass> {
public:
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

/// Simplify each loop in a loop nest recursively.
///
/// This takes a potentially un-simplified loop L (and its children) and turns
/// it into a simplified loop nest with preheaders and single backedges. It will
/// update \c DominatorTree, \c LoopInfo, \c ScalarEvolution and \c MemorySSA
/// analyses if they're non-null, and LCSSA if \c PreserveLCSSA is true.
bool simplifyLoop(Loop *L, DominatorTree *DT, LoopInfo *LI, ScalarEvolution *SE,
                  AssumptionCache *AC, MemorySSAUpdater *MSSAU,
                  bool PreserveLCSSA);

} // end namespace llvm

#endif // LLVM_TRANSFORMS_UTILS_LOOPSIMPLIFY_H
PKiwFZ,��~PP#Transforms/Utils/CallGraphUpdater.hnu�[���//===- CallGraphUpdater.h - A (lazy) call graph update helper ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This file provides interfaces used to manipulate a call graph, regardless
/// if it is a "old style" CallGraph or an "new style" LazyCallGraph.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_CALLGRAPHUPDATER_H
#define LLVM_TRANSFORMS_UTILS_CALLGRAPHUPDATER_H

#include "llvm/Analysis/CGSCCPassManager.h"
#include "llvm/Analysis/LazyCallGraph.h"

namespace llvm {

class CallGraph;
class CallGraphSCC;

/// Wrapper to unify "old style" CallGraph and "new style" LazyCallGraph. This
/// simplifies the interface and the call sites, e.g., new and old pass manager
/// passes can share the same code.
class CallGraphUpdater {
  /// Containers for functions which we did replace or want to delete when
  /// `finalize` is called. This can happen explicitly or as part of the
  /// destructor. Dead functions in comdat sections are tracked separately
  /// because a function with discardable linakage in a COMDAT should only
  /// be dropped if the entire COMDAT is dropped, see git ac07703842cf.
  ///{
  SmallPtrSet<Function *, 16> ReplacedFunctions;
  SmallVector<Function *, 16> DeadFunctions;
  SmallVector<Function *, 16> DeadFunctionsInComdats;
  ///}

  /// Old PM variables
  ///{
  CallGraph *CG = nullptr;
  CallGraphSCC *CGSCC = nullptr;
  ///}

  /// New PM variables
  ///{
  LazyCallGraph *LCG = nullptr;
  LazyCallGraph::SCC *SCC = nullptr;
  CGSCCAnalysisManager *AM = nullptr;
  CGSCCUpdateResult *UR = nullptr;
  FunctionAnalysisManager *FAM = nullptr;
  ///}

public:
  CallGraphUpdater() = default;
  ~CallGraphUpdater() { finalize(); }

  /// Initializers for usage outside of a CGSCC pass, inside a CGSCC pass in
  /// the old and new pass manager (PM).
  ///{
  void initialize(CallGraph &CG, CallGraphSCC &SCC) {
    this->CG = &CG;
    this->CGSCC = &SCC;
  }
  void initialize(LazyCallGraph &LCG, LazyCallGraph::SCC &SCC,
                  CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR) {
    this->LCG = &LCG;
    this->SCC = &SCC;
    this->AM = &AM;
    this->UR = &UR;
    FAM =
        &AM.getResult<FunctionAnalysisManagerCGSCCProxy>(SCC, LCG).getManager();
  }
  ///}

  /// Finalizer that will trigger actions like function removal from the CG.
  bool finalize();

  /// Remove \p Fn from the call graph.
  void removeFunction(Function &Fn);

  /// After an CGSCC pass changes a function in ways that affect the call
  /// graph, this method can be called to update it.
  void reanalyzeFunction(Function &Fn);

  /// If a new function was created by outlining, this method can be called
  /// to update the call graph for the new function. Note that the old one
  /// still needs to be re-analyzed or manually updated.
  void registerOutlinedFunction(Function &OriginalFn, Function &NewFn);

  /// Replace \p OldFn in the call graph (and SCC) with \p NewFn. The uses
  /// outside the call graph and the function \p OldFn are not modified.
  /// Note that \p OldFn is also removed from the call graph
  /// (\see removeFunction).
  void replaceFunctionWith(Function &OldFn, Function &NewFn);

  /// Remove the call site \p CS from the call graph.
  void removeCallSite(CallBase &CS);

  /// Replace \p OldCS with the new call site \p NewCS.
  /// \return True if the replacement was successful, otherwise False. In the
  /// latter case the parent function of \p OldCB needs to be re-analyzed.
  bool replaceCallSite(CallBase &OldCS, CallBase &NewCS);
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_UTILS_CALLGRAPHUPDATER_H
PKiwFZ�X#�SS&Transforms/Utils/InstructionWorklist.hnu�[���//=== InstructionWorklist.h - Worklist for InstCombine & others -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_INSTRUCTIONWORKLIST_H
#define LLVM_TRANSFORMS_UTILS_INSTRUCTIONWORKLIST_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/Instruction.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"

namespace llvm {

/// InstructionWorklist - This is the worklist management logic for
/// InstCombine and other simplification passes.
class InstructionWorklist {
  SmallVector<Instruction *, 256> Worklist;
  DenseMap<Instruction *, unsigned> WorklistMap;
  /// These instructions will be added in reverse order after the current
  /// combine has finished. This means that these instructions will be visited
  /// in the order they have been added.
  SmallSetVector<Instruction *, 16> Deferred;

public:
  InstructionWorklist() = default;

  InstructionWorklist(InstructionWorklist &&) = default;
  InstructionWorklist &operator=(InstructionWorklist &&) = default;

  bool isEmpty() const { return Worklist.empty() && Deferred.empty(); }

  /// Add instruction to the worklist.
  /// Instructions will be visited in the order they are added.
  /// You likely want to use this method.
  void add(Instruction *I) {
    if (Deferred.insert(I))
      LLVM_DEBUG(dbgs() << "ADD DEFERRED: " << *I << '\n');
  }

  /// Add value to the worklist if it is an instruction.
  /// Instructions will be visited in the order they are added.
  void addValue(Value *V) {
    if (Instruction *I = dyn_cast<Instruction>(V))
      add(I);
  }

  /// Push the instruction onto the worklist stack.
  /// Instructions that have been added first will be visited last.
  void push(Instruction *I) {
    assert(I);
    assert(I->getParent() && "Instruction not inserted yet?");

    if (WorklistMap.insert(std::make_pair(I, Worklist.size())).second) {
      LLVM_DEBUG(dbgs() << "ADD: " << *I << '\n');
      Worklist.push_back(I);
    }
  }

  void pushValue(Value *V) {
    if (Instruction *I = dyn_cast<Instruction>(V))
      push(I);
  }

  Instruction *popDeferred() {
    if (Deferred.empty())
      return nullptr;
    return Deferred.pop_back_val();
  }

  void reserve(size_t Size) {
    Worklist.reserve(Size + 16);
    WorklistMap.reserve(Size);
  }

  /// Remove I from the worklist if it exists.
  void remove(Instruction *I) {
    DenseMap<Instruction *, unsigned>::iterator It = WorklistMap.find(I);
    if (It != WorklistMap.end()) {
      // Don't bother moving everything down, just null out the slot.
      Worklist[It->second] = nullptr;
      WorklistMap.erase(It);
    }

    Deferred.remove(I);
  }

  Instruction *removeOne() {
    if (Worklist.empty())
      return nullptr;
    Instruction *I = Worklist.pop_back_val();
    WorklistMap.erase(I);
    return I;
  }

  /// When an instruction is simplified, add all users of the instruction
  /// to the work lists because they might get more simplified now.
  void pushUsersToWorkList(Instruction &I) {
    for (User *U : I.users())
      push(cast<Instruction>(U));
  }

  /// Should be called *after* decrementing the use-count on V.
  void handleUseCountDecrement(Value *V) {
    if (auto *I = dyn_cast<Instruction>(V)) {
      add(I);
      // Many folds have one-use limitations. If there's only one use left,
      // revisit that use.
      if (I->hasOneUse())
        add(cast<Instruction>(*I->user_begin()));
    }
  }

  /// Check that the worklist is empty and nuke the backing store for the map.
  void zap() {
    assert(WorklistMap.empty() && "Worklist empty, but map not?");
    assert(Deferred.empty() && "Deferred instructions left over");

    // Do an explicit clear, this shrinks the map if needed.
    WorklistMap.clear();
  }
};

} // end namespace llvm.

#endif
PKiwFZ���cYY$Transforms/Utils/LoopRotationUtils.hnu�[���//===- LoopRotationUtils.h - Utilities to perform loop rotation -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file provides utilities to convert a loop into a loop with bottom test.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_LOOPROTATIONUTILS_H
#define LLVM_TRANSFORMS_UTILS_LOOPROTATIONUTILS_H

namespace llvm {

class AssumptionCache;
class DominatorTree;
class Loop;
class LoopInfo;
class MemorySSAUpdater;
class ScalarEvolution;
struct SimplifyQuery;
class TargetTransformInfo;

/// Convert a loop into a loop with bottom test. It may
/// perform loop latch simplication as well if the flag RotationOnly
/// is false. The flag Threshold represents the size threshold of the loop
/// header. If the loop header's size exceeds the threshold, the loop rotation
/// will give up. The flag IsUtilMode controls the heuristic used in the
/// LoopRotation. If it is true, the profitability heuristic will be ignored.
bool LoopRotation(Loop *L, LoopInfo *LI, const TargetTransformInfo *TTI,
                  AssumptionCache *AC, DominatorTree *DT, ScalarEvolution *SE,
                  MemorySSAUpdater *MSSAU, const SimplifyQuery &SQ,
                  bool RotationOnly, unsigned Threshold, bool IsUtilMode,
                  bool PrepareForLTO = false);

} // namespace llvm

#endif
PKiwFZ�
h��Transforms/Utils/HelloWorld.hnu�[���//===-- HelloWorld.h - Example Transformations ------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_HELLOWORLD_H
#define LLVM_TRANSFORMS_UTILS_HELLOWORLD_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class HelloWorldPass : public PassInfoMixin<HelloWorldPass> {
public:
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

} // namespace llvm

#endif // LLVM_TRANSFORMS_UTILS_HELLOWORLD_H
PKiwFZSb���� Transforms/Utils/PredicateInfo.hnu�[���//===- PredicateInfo.h - Build PredicateInfo ----------------------*-C++-*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
///  This file implements the PredicateInfo analysis, which creates an Extended
/// SSA form for operations used in branch comparisons and llvm.assume
/// comparisons.
///
/// Copies of these operations are inserted into the true/false edge (and after
/// assumes), and information attached to the copies.  All uses of the original
/// operation in blocks dominated by the true/false edge (and assume), are
/// replaced with uses of the copies.  This enables passes to easily and sparsely
/// propagate condition based info into the operations that may be affected.
///
/// Example:
/// %cmp = icmp eq i32 %x, 50
/// br i1 %cmp, label %true, label %false
/// true:
/// ret i32 %x
/// false:
/// ret i32 1
///
/// will become
///
/// %cmp = icmp eq i32, %x, 50
/// br i1 %cmp, label %true, label %false
/// true:
/// %x.0 = call \@llvm.ssa_copy.i32(i32 %x)
/// ret i32 %x.0
/// false:
/// ret i32 1
///
/// Using getPredicateInfoFor on x.0 will give you the comparison it is
/// dominated by (the icmp), and that you are located in the true edge of that
/// comparison, which tells you x.0 is 50.
///
/// In order to reduce the number of copies inserted, predicateinfo is only
/// inserted where it would actually be live.  This means if there are no uses of
/// an operation dominated by the branch edges, or by an assume, the associated
/// predicate info is never inserted.
///
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_PREDICATEINFO_H
#define LLVM_TRANSFORMS_UTILS_PREDICATEINFO_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/ilist.h"
#include "llvm/ADT/ilist_node.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/PassManager.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/Pass.h"

namespace llvm {

class AssumptionCache;
class DominatorTree;
class Function;
class Value;
class IntrinsicInst;
class raw_ostream;

enum PredicateType { PT_Branch, PT_Assume, PT_Switch };

/// Constraint for a predicate of the form "cmp Pred Op, OtherOp", where Op
/// is the value the constraint applies to (the ssa.copy result).
struct PredicateConstraint {
  CmpInst::Predicate Predicate;
  Value *OtherOp;
};

// Base class for all predicate information we provide.
// All of our predicate information has at least a comparison.
class PredicateBase : public ilist_node<PredicateBase> {
public:
  PredicateType Type;
  // The original operand before we renamed it.
  // This can be use by passes, when destroying predicateinfo, to know
  // whether they can just drop the intrinsic, or have to merge metadata.
  Value *OriginalOp;
  // The renamed operand in the condition used for this predicate. For nested
  // predicates, this is different to OriginalOp which refers to the initial
  // operand.
  Value *RenamedOp;
  // The condition associated with this predicate.
  Value *Condition;

  PredicateBase(const PredicateBase &) = delete;
  PredicateBase &operator=(const PredicateBase &) = delete;
  PredicateBase() = delete;
  virtual ~PredicateBase() = default;
  static bool classof(const PredicateBase *PB) {
    return PB->Type == PT_Assume || PB->Type == PT_Branch ||
           PB->Type == PT_Switch;
  }

  /// Fetch condition in the form of PredicateConstraint, if possible.
  std::optional<PredicateConstraint> getConstraint() const;

protected:
  PredicateBase(PredicateType PT, Value *Op, Value *Condition)
      : Type(PT), OriginalOp(Op), Condition(Condition) {}
};

// Provides predicate information for assumes.  Since assumes are always true,
// we simply provide the assume instruction, so you can tell your relative
// position to it.
class PredicateAssume : public PredicateBase {
public:
  IntrinsicInst *AssumeInst;
  PredicateAssume(Value *Op, IntrinsicInst *AssumeInst, Value *Condition)
      : PredicateBase(PT_Assume, Op, Condition), AssumeInst(AssumeInst) {}
  PredicateAssume() = delete;
  static bool classof(const PredicateBase *PB) {
    return PB->Type == PT_Assume;
  }
};

// Mixin class for edge predicates.  The FROM block is the block where the
// predicate originates, and the TO block is the block where the predicate is
// valid.
class PredicateWithEdge : public PredicateBase {
public:
  BasicBlock *From;
  BasicBlock *To;
  PredicateWithEdge() = delete;
  static bool classof(const PredicateBase *PB) {
    return PB->Type == PT_Branch || PB->Type == PT_Switch;
  }

protected:
  PredicateWithEdge(PredicateType PType, Value *Op, BasicBlock *From,
                    BasicBlock *To, Value *Cond)
      : PredicateBase(PType, Op, Cond), From(From), To(To) {}
};

// Provides predicate information for branches.
class PredicateBranch : public PredicateWithEdge {
public:
  // If true, SplitBB is the true successor, otherwise it's the false successor.
  bool TrueEdge;
  PredicateBranch(Value *Op, BasicBlock *BranchBB, BasicBlock *SplitBB,
                  Value *Condition, bool TakenEdge)
      : PredicateWithEdge(PT_Branch, Op, BranchBB, SplitBB, Condition),
        TrueEdge(TakenEdge) {}
  PredicateBranch() = delete;
  static bool classof(const PredicateBase *PB) {
    return PB->Type == PT_Branch;
  }
};

class PredicateSwitch : public PredicateWithEdge {
public:
  Value *CaseValue;
  // This is the switch instruction.
  SwitchInst *Switch;
  PredicateSwitch(Value *Op, BasicBlock *SwitchBB, BasicBlock *TargetBB,
                  Value *CaseValue, SwitchInst *SI)
      : PredicateWithEdge(PT_Switch, Op, SwitchBB, TargetBB,
                          SI->getCondition()),
        CaseValue(CaseValue), Switch(SI) {}
  PredicateSwitch() = delete;
  static bool classof(const PredicateBase *PB) {
    return PB->Type == PT_Switch;
  }
};

/// Encapsulates PredicateInfo, including all data associated with memory
/// accesses.
class PredicateInfo {
public:
  PredicateInfo(Function &, DominatorTree &, AssumptionCache &);
  ~PredicateInfo();

  void verifyPredicateInfo() const;

  void dump() const;
  void print(raw_ostream &) const;

  const PredicateBase *getPredicateInfoFor(const Value *V) const {
    return PredicateMap.lookup(V);
  }

protected:
  // Used by PredicateInfo annotater, dumpers, and wrapper pass.
  friend class PredicateInfoAnnotatedWriter;
  friend class PredicateInfoPrinterLegacyPass;
  friend class PredicateInfoBuilder;

private:
  Function &F;

  // This owns the all the predicate infos in the function, placed or not.
  iplist<PredicateBase> AllInfos;

  // This maps from copy operands to Predicate Info. Note that it does not own
  // the Predicate Info, they belong to the ValueInfo structs in the ValueInfos
  // vector.
  DenseMap<const Value *, const PredicateBase *> PredicateMap;
  // The set of ssa_copy declarations we created with our custom mangling.
  SmallSet<AssertingVH<Function>, 20> CreatedDeclarations;
};

// This pass does eager building and then printing of PredicateInfo. It is used
// by
// the tests to be able to build, dump, and verify PredicateInfo.
class PredicateInfoPrinterLegacyPass : public FunctionPass {
public:
  PredicateInfoPrinterLegacyPass();

  static char ID;
  bool runOnFunction(Function &) override;
  void getAnalysisUsage(AnalysisUsage &AU) const override;
};

/// Printer pass for \c PredicateInfo.
class PredicateInfoPrinterPass
    : public PassInfoMixin<PredicateInfoPrinterPass> {
  raw_ostream &OS;

public:
  explicit PredicateInfoPrinterPass(raw_ostream &OS) : OS(OS) {}
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

/// Verifier pass for \c PredicateInfo.
struct PredicateInfoVerifierPass : PassInfoMixin<PredicateInfoVerifierPass> {
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_UTILS_PREDICATEINFO_H
PKiwFZ29͜�!Transforms/Utils/SymbolRewriter.hnu�[���//===- SymbolRewriter.h - Symbol Rewriting Pass -----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file provides the prototypes and definitions related to the Symbol
// Rewriter pass.
//
// The Symbol Rewriter pass takes a set of rewrite descriptors which define
// transformations for symbol names.  These can be either single name to name
// trnsformation or more broad regular expression based transformations.
//
// All the functions are re-written at the IR level.  The Symbol Rewriter itself
// is exposed as a module level pass.  All symbols at the module level are
// iterated.  For any matching symbol, the requested transformation is applied,
// updating references to it as well (a la RAUW).  The resulting binary will
// only contain the rewritten symbols.
//
// By performing this operation in the compiler, we are able to catch symbols
// that would otherwise not be possible to catch (e.g. inlined symbols).
//
// This makes it possible to cleanly transform symbols without resorting to
// overly-complex macro tricks and the pre-processor.  An example of where this
// is useful is the sanitizers where we would like to intercept a well-defined
// set of functions across the module.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_SYMBOLREWRITER_H
#define LLVM_TRANSFORMS_UTILS_SYMBOLREWRITER_H

#include "llvm/IR/PassManager.h"
#include <list>
#include <memory>
#include <string>

namespace llvm {

class MemoryBuffer;
class Module;
class ModulePass;

namespace yaml {

class KeyValueNode;
class MappingNode;
class ScalarNode;
class Stream;

} // end namespace yaml

namespace SymbolRewriter {

/// The basic entity representing a rewrite operation.  It serves as the base
/// class for any rewrite descriptor.  It has a certain set of specializations
/// which describe a particular rewrite.
///
/// The RewriteMapParser can be used to parse a mapping file that provides the
/// mapping for rewriting the symbols.  The descriptors individually describe
/// whether to rewrite a function, global variable, or global alias.  Each of
/// these can be selected either by explicitly providing a name for the ones to
/// be rewritten or providing a (posix compatible) regular expression that will
/// select the symbols to rewrite.  This descriptor list is passed to the
/// SymbolRewriter pass.
class RewriteDescriptor {
public:
  enum class Type {
    Invalid,        /// invalid
    Function,       /// function - descriptor rewrites a function
    GlobalVariable, /// global variable - descriptor rewrites a global variable
    NamedAlias,     /// named alias - descriptor rewrites a global alias
  };

  RewriteDescriptor(const RewriteDescriptor &) = delete;
  RewriteDescriptor &operator=(const RewriteDescriptor &) = delete;
  virtual ~RewriteDescriptor() = default;

  Type getType() const { return Kind; }

  virtual bool performOnModule(Module &M) = 0;

protected:
  explicit RewriteDescriptor(Type T) : Kind(T) {}

private:
  const Type Kind;
};

using RewriteDescriptorList = std::list<std::unique_ptr<RewriteDescriptor>>;

class RewriteMapParser {
public:
  bool parse(const std::string &MapFile, RewriteDescriptorList *Descriptors);

private:
  bool parse(std::unique_ptr<MemoryBuffer> &MapFile, RewriteDescriptorList *DL);
  bool parseEntry(yaml::Stream &Stream, yaml::KeyValueNode &Entry,
                  RewriteDescriptorList *DL);
  bool parseRewriteFunctionDescriptor(yaml::Stream &Stream,
                                      yaml::ScalarNode *Key,
                                      yaml::MappingNode *Value,
                                      RewriteDescriptorList *DL);
  bool parseRewriteGlobalVariableDescriptor(yaml::Stream &Stream,
                                            yaml::ScalarNode *Key,
                                            yaml::MappingNode *Value,
                                            RewriteDescriptorList *DL);
  bool parseRewriteGlobalAliasDescriptor(yaml::Stream &YS, yaml::ScalarNode *K,
                                         yaml::MappingNode *V,
                                         RewriteDescriptorList *DL);
};

} // end namespace SymbolRewriter

class RewriteSymbolPass : public PassInfoMixin<RewriteSymbolPass> {
public:
  RewriteSymbolPass() { loadAndParseMapFiles(); }

  RewriteSymbolPass(SymbolRewriter::RewriteDescriptorList &DL) {
    Descriptors.splice(Descriptors.begin(), DL);
  }

  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);

  // Glue for old PM
  bool runImpl(Module &M);

private:
  void loadAndParseMapFiles();

  SymbolRewriter::RewriteDescriptorList Descriptors;
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_UTILS_SYMBOLREWRITER_H
PKiwFZ-(����%Transforms/Utils/CallPromotionUtils.hnu�[���//===- CallPromotionUtils.h - Utilities for call promotion ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares utilities useful for promoting indirect call sites to
// direct call sites.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_CALLPROMOTIONUTILS_H
#define LLVM_TRANSFORMS_UTILS_CALLPROMOTIONUTILS_H

namespace llvm {
class CallBase;
class CastInst;
class Function;
class MDNode;
class Value;

/// Return true if the given indirect call site can be made to call \p Callee.
///
/// This function ensures that the number and type of the call site's arguments
/// and return value match those of the given function. If the types do not
/// match exactly, they must at least be bitcast compatible. If \p FailureReason
/// is non-null and the indirect call cannot be promoted, the failure reason
/// will be stored in it.
bool isLegalToPromote(const CallBase &CB, Function *Callee,
                      const char **FailureReason = nullptr);

/// Promote the given indirect call site to unconditionally call \p Callee.
///
/// This function promotes the given call site, returning the direct call or
/// invoke instruction. If the function type of the call site doesn't match that
/// of the callee, bitcast instructions are inserted where appropriate. If \p
/// RetBitCast is non-null, it will be used to store the return value bitcast,
/// if created.
CallBase &promoteCall(CallBase &CB, Function *Callee,
                      CastInst **RetBitCast = nullptr);

/// Promote the given indirect call site to conditionally call \p Callee.
///
/// This function creates an if-then-else structure at the location of the call
/// site. The original call site is moved into the "else" block. A clone of the
/// indirect call site is promoted, placed in the "then" block, and returned. If
/// \p BranchWeights is non-null, it will be used to set !prof metadata on the
/// new conditional branch.
CallBase &promoteCallWithIfThenElse(CallBase &CB, Function *Callee,
                                    MDNode *BranchWeights = nullptr);

/// Try to promote (devirtualize) a virtual call on an Alloca. Return true on
/// success.
///
/// Look for a pattern like:
///
///  %o = alloca %class.Impl
///  %1 = getelementptr %class.Impl, %class.Impl* %o, i64 0, i32 0, i32 0
///  store i32 (...)** bitcast (i8** getelementptr inbounds
///      ({ [3 x i8*] }, { [3 x i8*] }* @_ZTV4Impl, i64 0, inrange i32 0, i64 2)
///      to i32 (...)**), i32 (...)*** %1
///  %2 = getelementptr inbounds %class.Impl, %class.Impl* %o, i64 0, i32 0
///  %3 = bitcast %class.Interface* %2 to void (%class.Interface*)***
///  %vtable.i = load void (%class.Interface*)**, void (%class.Interface*)*** %3
///  %4 = load void (%class.Interface*)*, void (%class.Interface*)** %vtable.i
///  call void %4(%class.Interface* nonnull %2)
///
/// @_ZTV4Impl = linkonce_odr dso_local unnamed_addr constant { [3 x i8*] }
///     { [3 x i8*]
///     [i8* null, i8* bitcast ({ i8*, i8*, i8* }* @_ZTI4Impl to i8*),
///     i8* bitcast (void (%class.Impl*)* @_ZN4Impl3RunEv to i8*)] }
///
bool tryPromoteCall(CallBase &CB);

/// Predicate and clone the given call site.
///
/// This function creates an if-then-else structure at the location of the call
/// site. The "if" condition compares the call site's called value to the given
/// callee. The original call site is moved into the "else" block, and a clone
/// of the call site is placed in the "then" block. The cloned instruction is
/// returned.
CallBase &versionCallSite(CallBase &CB, Value *Callee, MDNode *BranchWeights);

} // end namespace llvm

#endif // LLVM_TRANSFORMS_UTILS_CALLPROMOTIONUTILS_H
PKiwFZ,f���Transforms/Utils/CtorUtils.hnu�[���//===- CtorUtils.h - Helpers for working with global_ctors ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines functions that are used to process llvm.global_ctors.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_CTORUTILS_H
#define LLVM_TRANSFORMS_UTILS_CTORUTILS_H

#include "llvm/ADT/STLFunctionalExtras.h"

namespace llvm {

class Function;
class Module;

/// Call "ShouldRemove" for every entry in M's global_ctor list and remove the
/// entries for which it returns true.  Return true if anything changed.
bool optimizeGlobalCtorsList(
    Module &M, function_ref<bool(uint32_t, Function *)> ShouldRemove);

} // namespace llvm

#endif
PKiwFZ*��Q5
5
%Transforms/Utils/BypassSlowDivision.hnu�[���//===- llvm/Transforms/Utils/BypassSlowDivision.h ---------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains an optimization for div and rem on architectures that
// execute short instructions significantly faster than longer instructions.
// For example, on Intel Atom 32-bit divides are slow enough that during
// runtime it is profitable to check the value of the operands, and if they are
// positive and less than 256 use an unsigned 8-bit divide.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_BYPASSSLOWDIVISION_H
#define LLVM_TRANSFORMS_UTILS_BYPASSSLOWDIVISION_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/IR/ValueHandle.h"
#include <cstdint>

namespace llvm {

class BasicBlock;
class Value;

struct DivRemMapKey {
  bool SignedOp;
  AssertingVH<Value> Dividend;
  AssertingVH<Value> Divisor;

  DivRemMapKey() = default;

  DivRemMapKey(bool InSignedOp, Value *InDividend, Value *InDivisor)
      : SignedOp(InSignedOp), Dividend(InDividend), Divisor(InDivisor) {}
};

template <> struct DenseMapInfo<DivRemMapKey> {
  static bool isEqual(const DivRemMapKey &Val1, const DivRemMapKey &Val2) {
    return Val1.SignedOp == Val2.SignedOp && Val1.Dividend == Val2.Dividend &&
           Val1.Divisor == Val2.Divisor;
  }

  static DivRemMapKey getEmptyKey() {
    return DivRemMapKey(false, nullptr, nullptr);
  }

  static DivRemMapKey getTombstoneKey() {
    return DivRemMapKey(true, nullptr, nullptr);
  }

  static unsigned getHashValue(const DivRemMapKey &Val) {
    return (unsigned)(reinterpret_cast<uintptr_t>(
                          static_cast<Value *>(Val.Dividend)) ^
                      reinterpret_cast<uintptr_t>(
                          static_cast<Value *>(Val.Divisor))) ^
           (unsigned)Val.SignedOp;
  }
};

/// This optimization identifies DIV instructions in a BB that can be
/// profitably bypassed and carried out with a shorter, faster divide.
///
/// This optimization may add basic blocks immediately after BB; for obvious
/// reasons, you shouldn't pass those blocks to bypassSlowDivision.
bool bypassSlowDivision(
    BasicBlock *BB, const DenseMap<unsigned int, unsigned int> &BypassWidth);

} // end namespace llvm

#endif // LLVM_TRANSFORMS_UTILS_BYPASSSLOWDIVISION_H
PKiwFZ��@�VV$Transforms/Utils/AddDiscriminators.hnu�[���//===- AddDiscriminators.h --------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This pass adds DWARF discriminators to the IR. Path discriminators are used
// to decide what CFG path was taken inside sub-graphs whose instructions share
// the same line and column number information.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_ADDDISCRIMINATORS_H
#define LLVM_TRANSFORMS_UTILS_ADDDISCRIMINATORS_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class Function;

class AddDiscriminatorsPass : public PassInfoMixin<AddDiscriminatorsPass> {
public:
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
  static bool isRequired() { return true; }
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_UTILS_ADDDISCRIMINATORS_H
PKiwFZJ�����%Transforms/Utils/LibCallsShrinkWrap.hnu�[���//===- LibCallsShrinkWrap.h - Shrink Wrap Library Calls -------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_LIBCALLSSHRINKWRAP_H
#define LLVM_TRANSFORMS_UTILS_LIBCALLSSHRINKWRAP_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class LibCallsShrinkWrapPass : public PassInfoMixin<LibCallsShrinkWrapPass> {
public:
  static StringRef name() { return "LibCallsShrinkWrapPass"; }

  PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM);
};
} // end namespace llvm

#endif // LLVM_TRANSFORMS_UTILS_LIBCALLSSHRINKWRAP_H
PKiwFZ�F�Ի@�@Transforms/Utils/Cloning.hnu�[���//===- Cloning.h - Clone various parts of LLVM programs ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines various functions that are used to clone chunks of LLVM
// code for various purposes.  This varies from copying whole modules into new
// modules, to cloning functions with different arguments, to inlining
// functions, to copying basic blocks to support loop unrolling or superblock
// formation, etc.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_CLONING_H
#define LLVM_TRANSFORMS_UTILS_CLONING_H

#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/InlineCost.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/Transforms/Utils/ValueMapper.h"
#include <functional>
#include <memory>
#include <vector>

namespace llvm {

class AAResults;
class AllocaInst;
class BasicBlock;
class BlockFrequencyInfo;
class DebugInfoFinder;
class DominatorTree;
class Function;
class Instruction;
class Loop;
class LoopInfo;
class Module;
class ProfileSummaryInfo;
class ReturnInst;
class DomTreeUpdater;

/// Return an exact copy of the specified module
std::unique_ptr<Module> CloneModule(const Module &M);
std::unique_ptr<Module> CloneModule(const Module &M, ValueToValueMapTy &VMap);

/// Return a copy of the specified module. The ShouldCloneDefinition function
/// controls whether a specific GlobalValue's definition is cloned. If the
/// function returns false, the module copy will contain an external reference
/// in place of the global definition.
std::unique_ptr<Module>
CloneModule(const Module &M, ValueToValueMapTy &VMap,
            function_ref<bool(const GlobalValue *)> ShouldCloneDefinition);

/// This struct can be used to capture information about code
/// being cloned, while it is being cloned.
struct ClonedCodeInfo {
  /// This is set to true if the cloned code contains a normal call instruction.
  bool ContainsCalls = false;

  /// This is set to true if there is memprof related metadata (memprof or
  /// callsite metadata) in the cloned code.
  bool ContainsMemProfMetadata = false;

  /// This is set to true if the cloned code contains a 'dynamic' alloca.
  /// Dynamic allocas are allocas that are either not in the entry block or they
  /// are in the entry block but are not a constant size.
  bool ContainsDynamicAllocas = false;

  /// All cloned call sites that have operand bundles attached are appended to
  /// this vector.  This vector may contain nulls or undefs if some of the
  /// originally inserted callsites were DCE'ed after they were cloned.
  std::vector<WeakTrackingVH> OperandBundleCallSites;

  /// Like VMap, but maps only unsimplified instructions. Values in the map
  /// may be dangling, it is only intended to be used via isSimplified(), to
  /// check whether the main VMap mapping involves simplification or not.
  DenseMap<const Value *, const Value *> OrigVMap;

  ClonedCodeInfo() = default;

  bool isSimplified(const Value *From, const Value *To) const {
    return OrigVMap.lookup(From) != To;
  }
};

/// Return a copy of the specified basic block, but without
/// embedding the block into a particular function.  The block returned is an
/// exact copy of the specified basic block, without any remapping having been
/// performed.  Because of this, this is only suitable for applications where
/// the basic block will be inserted into the same function that it was cloned
/// from (loop unrolling would use this, for example).
///
/// Also, note that this function makes a direct copy of the basic block, and
/// can thus produce illegal LLVM code.  In particular, it will copy any PHI
/// nodes from the original block, even though there are no predecessors for the
/// newly cloned block (thus, phi nodes will have to be updated).  Also, this
/// block will branch to the old successors of the original block: these
/// successors will have to have any PHI nodes updated to account for the new
/// incoming edges.
///
/// The correlation between instructions in the source and result basic blocks
/// is recorded in the VMap map.
///
/// If you have a particular suffix you'd like to use to add to any cloned
/// names, specify it as the optional third parameter.
///
/// If you would like the basic block to be auto-inserted into the end of a
/// function, you can specify it as the optional fourth parameter.
///
/// If you would like to collect additional information about the cloned
/// function, you can specify a ClonedCodeInfo object with the optional fifth
/// parameter.
BasicBlock *CloneBasicBlock(const BasicBlock *BB, ValueToValueMapTy &VMap,
                            const Twine &NameSuffix = "", Function *F = nullptr,
                            ClonedCodeInfo *CodeInfo = nullptr,
                            DebugInfoFinder *DIFinder = nullptr);

/// Return a copy of the specified function and add it to that
/// function's module.  Also, any references specified in the VMap are changed
/// to refer to their mapped value instead of the original one.  If any of the
/// arguments to the function are in the VMap, the arguments are deleted from
/// the resultant function.  The VMap is updated to include mappings from all of
/// the instructions and basicblocks in the function from their old to new
/// values.  The final argument captures information about the cloned code if
/// non-null.
///
/// \pre VMap contains no non-identity GlobalValue mappings.
///
Function *CloneFunction(Function *F, ValueToValueMapTy &VMap,
                        ClonedCodeInfo *CodeInfo = nullptr);

enum class CloneFunctionChangeType {
  LocalChangesOnly,
  GlobalChanges,
  DifferentModule,
  ClonedModule,
};

/// Clone OldFunc into NewFunc, transforming the old arguments into references
/// to VMap values.  Note that if NewFunc already has basic blocks, the ones
/// cloned into it will be added to the end of the function.  This function
/// fills in a list of return instructions, and can optionally remap types
/// and/or append the specified suffix to all values cloned.
///
/// If \p Changes is \a CloneFunctionChangeType::LocalChangesOnly, VMap is
/// required to contain no non-identity GlobalValue mappings. Otherwise,
/// referenced metadata will be cloned.
///
/// If \p Changes is less than \a CloneFunctionChangeType::DifferentModule
/// indicating cloning into the same module (even if it's LocalChangesOnly), if
/// debug info metadata transitively references a \a DISubprogram, it will be
/// cloned, effectively upgrading \p Changes to GlobalChanges while suppressing
/// cloning of types and compile units.
///
/// If \p Changes is \a CloneFunctionChangeType::DifferentModule, the new
/// module's \c !llvm.dbg.cu will get updated with any newly created compile
/// units. (\a CloneFunctionChangeType::ClonedModule leaves that work for the
/// caller.)
///
/// FIXME: Consider simplifying this function by splitting out \a
/// CloneFunctionMetadataInto() and expecting / updating callers to call it
/// first when / how it's needed.
void CloneFunctionInto(Function *NewFunc, const Function *OldFunc,
                       ValueToValueMapTy &VMap, CloneFunctionChangeType Changes,
                       SmallVectorImpl<ReturnInst *> &Returns,
                       const char *NameSuffix = "",
                       ClonedCodeInfo *CodeInfo = nullptr,
                       ValueMapTypeRemapper *TypeMapper = nullptr,
                       ValueMaterializer *Materializer = nullptr);

void CloneAndPruneIntoFromInst(Function *NewFunc, const Function *OldFunc,
                               const Instruction *StartingInst,
                               ValueToValueMapTy &VMap, bool ModuleLevelChanges,
                               SmallVectorImpl<ReturnInst *> &Returns,
                               const char *NameSuffix = "",
                               ClonedCodeInfo *CodeInfo = nullptr);

/// This works exactly like CloneFunctionInto,
/// except that it does some simple constant prop and DCE on the fly.  The
/// effect of this is to copy significantly less code in cases where (for
/// example) a function call with constant arguments is inlined, and those
/// constant arguments cause a significant amount of code in the callee to be
/// dead.  Since this doesn't produce an exactly copy of the input, it can't be
/// used for things like CloneFunction or CloneModule.
///
/// If ModuleLevelChanges is false, VMap contains no non-identity GlobalValue
/// mappings.
///
void CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
                               ValueToValueMapTy &VMap, bool ModuleLevelChanges,
                               SmallVectorImpl<ReturnInst*> &Returns,
                               const char *NameSuffix = "",
                               ClonedCodeInfo *CodeInfo = nullptr);

/// This class captures the data input to the InlineFunction call, and records
/// the auxiliary results produced by it.
class InlineFunctionInfo {
public:
  explicit InlineFunctionInfo(
      function_ref<AssumptionCache &(Function &)> GetAssumptionCache = nullptr,
      ProfileSummaryInfo *PSI = nullptr,
      BlockFrequencyInfo *CallerBFI = nullptr,
      BlockFrequencyInfo *CalleeBFI = nullptr, bool UpdateProfile = true)
      : GetAssumptionCache(GetAssumptionCache), PSI(PSI), CallerBFI(CallerBFI),
        CalleeBFI(CalleeBFI), UpdateProfile(UpdateProfile) {}

  /// If non-null, InlineFunction will update the callgraph to reflect the
  /// changes it makes.
  function_ref<AssumptionCache &(Function &)> GetAssumptionCache;
  ProfileSummaryInfo *PSI;
  BlockFrequencyInfo *CallerBFI, *CalleeBFI;

  /// InlineFunction fills this in with all static allocas that get copied into
  /// the caller.
  SmallVector<AllocaInst *, 4> StaticAllocas;

  /// InlineFunction fills this in with callsites that were inlined from the
  /// callee. This is only filled in if CG is non-null.
  SmallVector<WeakTrackingVH, 8> InlinedCalls;

  /// All of the new call sites inlined into the caller.
  ///
  /// 'InlineFunction' fills this in by scanning the inlined instructions, and
  /// only if CG is null. If CG is non-null, instead the value handle
  /// `InlinedCalls` above is used.
  SmallVector<CallBase *, 8> InlinedCallSites;

  /// Update profile for callee as well as cloned version. We need to do this
  /// for regular inlining, but not for inlining from sample profile loader.
  bool UpdateProfile;

  void reset() {
    StaticAllocas.clear();
    InlinedCalls.clear();
    InlinedCallSites.clear();
  }
};

/// This function inlines the called function into the basic
/// block of the caller.  This returns false if it is not possible to inline
/// this call.  The program is still in a well defined state if this occurs
/// though.
///
/// Note that this only does one level of inlining.  For example, if the
/// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
/// exists in the instruction stream.  Similarly this will inline a recursive
/// function by one level.
///
/// Note that while this routine is allowed to cleanup and optimize the
/// *inlined* code to minimize the actual inserted code, it must not delete
/// code in the caller as users of this routine may have pointers to
/// instructions in the caller that need to remain stable.
///
/// If ForwardVarArgsTo is passed, inlining a function with varargs is allowed
/// and all varargs at the callsite will be passed to any calls to
/// ForwardVarArgsTo. The caller of InlineFunction has to make sure any varargs
/// are only used by ForwardVarArgsTo.
///
/// The callee's function attributes are merged into the callers' if
/// MergeAttributes is set to true.
InlineResult InlineFunction(CallBase &CB, InlineFunctionInfo &IFI,
                            bool MergeAttributes = false,
                            AAResults *CalleeAAR = nullptr,
                            bool InsertLifetime = true,
                            Function *ForwardVarArgsTo = nullptr);

/// Clones a loop \p OrigLoop.  Returns the loop and the blocks in \p
/// Blocks.
///
/// Updates LoopInfo and DominatorTree assuming the loop is dominated by block
/// \p LoopDomBB.  Insert the new blocks before block specified in \p Before.
/// Note: Only innermost loops are supported.
Loop *cloneLoopWithPreheader(BasicBlock *Before, BasicBlock *LoopDomBB,
                             Loop *OrigLoop, ValueToValueMapTy &VMap,
                             const Twine &NameSuffix, LoopInfo *LI,
                             DominatorTree *DT,
                             SmallVectorImpl<BasicBlock *> &Blocks);

/// Remaps instructions in \p Blocks using the mapping in \p VMap.
void remapInstructionsInBlocks(ArrayRef<BasicBlock *> Blocks,
                               ValueToValueMapTy &VMap);

/// Split edge between BB and PredBB and duplicate all non-Phi instructions
/// from BB between its beginning and the StopAt instruction into the split
/// block. Phi nodes are not duplicated, but their uses are handled correctly:
/// we replace them with the uses of corresponding Phi inputs. ValueMapping
/// is used to map the original instructions from BB to their newly-created
/// copies. Returns the split block.
BasicBlock *DuplicateInstructionsInSplitBetween(BasicBlock *BB,
                                                BasicBlock *PredBB,
                                                Instruction *StopAt,
                                                ValueToValueMapTy &ValueMapping,
                                                DomTreeUpdater &DTU);

/// Updates profile information by adjusting the entry count by adding
/// EntryDelta then scaling callsite information by the new count divided by the
/// old count. VMap is used during inlinng to also update the new clone
void updateProfileCallee(
    Function *Callee, int64_t EntryDelta,
    const ValueMap<const Value *, WeakTrackingVH> *VMap = nullptr);

/// Find the 'llvm.experimental.noalias.scope.decl' intrinsics in the specified
/// basic blocks and extract their scope. These are candidates for duplication
/// when cloning.
void identifyNoAliasScopesToClone(
    ArrayRef<BasicBlock *> BBs, SmallVectorImpl<MDNode *> &NoAliasDeclScopes);

/// Find the 'llvm.experimental.noalias.scope.decl' intrinsics in the specified
/// instruction range and extract their scope. These are candidates for
/// duplication when cloning.
void identifyNoAliasScopesToClone(
    BasicBlock::iterator Start, BasicBlock::iterator End,
    SmallVectorImpl<MDNode *> &NoAliasDeclScopes);

/// Duplicate the specified list of noalias decl scopes.
/// The 'Ext' string is added as an extension to the name.
/// Afterwards, the ClonedScopes contains the mapping of the original scope
/// MDNode onto the cloned scope.
/// Be aware that the cloned scopes are still part of the original scope domain.
void cloneNoAliasScopes(
    ArrayRef<MDNode *> NoAliasDeclScopes,
    DenseMap<MDNode *, MDNode *> &ClonedScopes,
    StringRef Ext, LLVMContext &Context);

/// Adapt the metadata for the specified instruction according to the
/// provided mapping. This is normally used after cloning an instruction, when
/// some noalias scopes needed to be cloned.
void adaptNoAliasScopes(
    llvm::Instruction *I, const DenseMap<MDNode *, MDNode *> &ClonedScopes,
    LLVMContext &Context);

/// Clone the specified noalias decl scopes. Then adapt all instructions in the
/// NewBlocks basicblocks to the cloned versions.
/// 'Ext' will be added to the duplicate scope names.
void cloneAndAdaptNoAliasScopes(ArrayRef<MDNode *> NoAliasDeclScopes,
                                ArrayRef<BasicBlock *> NewBlocks,
                                LLVMContext &Context, StringRef Ext);

/// Clone the specified noalias decl scopes. Then adapt all instructions in the
/// [IStart, IEnd] (IEnd included !) range to the cloned versions. 'Ext' will be
/// added to the duplicate scope names.
void cloneAndAdaptNoAliasScopes(ArrayRef<MDNode *> NoAliasDeclScopes,
                                Instruction *IStart, Instruction *IEnd,
                                LLVMContext &Context, StringRef Ext);
} // end namespace llvm

#endif // LLVM_TRANSFORMS_UTILS_CLONING_H
PKiwFZ��?�RRTransforms/Utils/LowerInvoke.hnu�[���//===- LowerInvoke.h - Eliminate Invoke instructions ----------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This transformation is designed for use by code generators which do not yet
// support stack unwinding.  This pass converts 'invoke' instructions to 'call'
// instructions, so that any exception-handling 'landingpad' blocks become dead
// code (which can be removed by running the '-simplifycfg' pass afterwards).
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_TRANSFORMS_UTILS_LOWERINVOKE_H
#define LLVM_TRANSFORMS_UTILS_LOWERINVOKE_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class LowerInvokePass : public PassInfoMixin<LowerInvokePass> {
public:
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

}

#endif // LLVM_TRANSFORMS_UTILS_LOWERINVOKE_H
PKiwFZ��)�))!Transforms/Utils/SSAUpdaterBulk.hnu�[���//===- SSAUpdaterBulk.h - Unstructured SSA Update Tool ----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the SSAUpdaterBulk class.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_SSAUPDATERBULK_H
#define LLVM_TRANSFORMS_UTILS_SSAUPDATERBULK_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/IR/PredIteratorCache.h"

namespace llvm {

class BasicBlock;
class PHINode;
template <typename T> class SmallVectorImpl;
class Type;
class Use;
class Value;
class DominatorTree;

/// Helper class for SSA formation on a set of values defined in multiple
/// blocks.
///
/// This is used when code duplication or another unstructured transformation
/// wants to rewrite a set of uses of one value with uses of a set of values.
/// The update is done only when RewriteAllUses is called, all other methods are
/// used for book-keeping. That helps to share some common computations between
/// updates of different uses (which is not the case when traditional SSAUpdater
/// is used).
class SSAUpdaterBulk {
  struct RewriteInfo {
    DenseMap<BasicBlock *, Value *> Defines;
    SmallVector<Use *, 4> Uses;
    StringRef Name;
    Type *Ty;
    RewriteInfo() = default;
    RewriteInfo(StringRef &N, Type *T) : Name(N), Ty(T){};
  };
  SmallVector<RewriteInfo, 4> Rewrites;

  PredIteratorCache PredCache;

  Value *computeValueAt(BasicBlock *BB, RewriteInfo &R, DominatorTree *DT);

public:
  explicit SSAUpdaterBulk() = default;
  SSAUpdaterBulk(const SSAUpdaterBulk &) = delete;
  SSAUpdaterBulk &operator=(const SSAUpdaterBulk &) = delete;
  ~SSAUpdaterBulk() = default;

  /// Add a new variable to the SSA rewriter. This needs to be called before
  /// AddAvailableValue or AddUse calls. The return value is the variable ID,
  /// which needs to be passed to AddAvailableValue and AddUse.
  unsigned AddVariable(StringRef Name, Type *Ty);

  /// Indicate that a rewritten value is available in the specified block with
  /// the specified value.
  void AddAvailableValue(unsigned Var, BasicBlock *BB, Value *V);

  /// Record a use of the symbolic value. This use will be updated with a
  /// rewritten value when RewriteAllUses is called.
  void AddUse(unsigned Var, Use *U);

  /// Perform all the necessary updates, including new PHI-nodes insertion and
  /// the requested uses update.
  ///
  /// The function requires dominator tree DT, which is used for computing
  /// locations for new phi-nodes insertions. If a nonnull pointer to a vector
  /// InsertedPHIs is passed, all the new phi-nodes will be added to this
  /// vector.
  void RewriteAllUses(DominatorTree *DT,
                      SmallVectorImpl<PHINode *> *InsertedPHIs = nullptr);
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_UTILS_SSAUPDATERBULK_H
PKiwFZ��T[CC"Transforms/Utils/PromoteMemToReg.hnu�[���//===- PromoteMemToReg.h - Promote Allocas to Scalars -----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file exposes an interface to promote alloca instructions to SSA
// registers, by using the SSA construction algorithm.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_PROMOTEMEMTOREG_H
#define LLVM_TRANSFORMS_UTILS_PROMOTEMEMTOREG_H

namespace llvm {

template <typename T> class ArrayRef;
class AllocaInst;
class DominatorTree;
class AssumptionCache;

/// Return true if this alloca is legal for promotion.
///
/// This is true if there are only loads, stores, and lifetime markers
/// (transitively) using this alloca. This also enforces that there is only
/// ever one layer of bitcasts or GEPs between the alloca and the lifetime
/// markers.
bool isAllocaPromotable(const AllocaInst *AI);

/// Promote the specified list of alloca instructions into scalar
/// registers, inserting PHI nodes as appropriate.
///
/// This function makes use of DominanceFrontier information.  This function
/// does not modify the CFG of the function at all.  All allocas must be from
/// the same function.
///
void PromoteMemToReg(ArrayRef<AllocaInst *> Allocas, DominatorTree &DT,
                     AssumptionCache *AC = nullptr);

} // End llvm namespace

#endif
PKiwFZؐ���!Transforms/Utils/UnifyLoopExits.hnu�[���//===- UnifyLoopExits.h - Redirect exiting edges to one block -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_UNIFYLOOPEXITS_H
#define LLVM_TRANSFORMS_UTILS_UNIFYLOOPEXITS_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class UnifyLoopExitsPass : public PassInfoMixin<UnifyLoopExitsPass> {
public:
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
} // namespace llvm

#endif // LLVM_TRANSFORMS_UTILS_UNIFYLOOPEXITS_H
PKiwFZTV����&Transforms/Utils/FunctionImportUtils.hnu�[���//===- FunctionImportUtils.h - Importing support utilities -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the FunctionImportGlobalProcessing class which is used
// to perform the necessary global value handling for function importing.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_FUNCTIONIMPORTUTILS_H
#define LLVM_TRANSFORMS_UTILS_FUNCTIONIMPORTUTILS_H

#include "llvm/ADT/SetVector.h"
#include "llvm/IR/ModuleSummaryIndex.h"

namespace llvm {
class Module;

/// Class to handle necessary GlobalValue changes required by ThinLTO
/// function importing, including linkage changes and any necessary renaming.
class FunctionImportGlobalProcessing {
  /// The Module which we are exporting or importing functions from.
  Module &M;

  /// Module summary index passed in for function importing/exporting handling.
  const ModuleSummaryIndex &ImportIndex;

  /// Globals to import from this module, all other functions will be
  /// imported as declarations instead of definitions.
  SetVector<GlobalValue *> *GlobalsToImport;

  /// Set to true if the given ModuleSummaryIndex contains any functions
  /// from this source module, in which case we must conservatively assume
  /// that any of its functions may be imported into another module
  /// as part of a different backend compilation process.
  bool HasExportedFunctions = false;

  /// Set to true (only applicatable to ELF -fpic) if dso_local should be
  /// dropped for a declaration.
  ///
  /// On ELF, the assembler is conservative and assumes a global default
  /// visibility symbol can be interposable. No direct access relocation is
  /// allowed, if the definition is not in the translation unit, even if the
  /// definition is available in the linkage unit. Thus we need to clear
  /// dso_local to disable direct access.
  ///
  /// This flag should not be set for -fno-pic or -fpie, which would
  /// unnecessarily disable direct access.
  bool ClearDSOLocalOnDeclarations;

  /// Set of llvm.*used values, in order to validate that we don't try
  /// to promote any non-renamable values.
  SmallPtrSet<GlobalValue *, 4> Used;

  /// Keep track of any COMDATs that require renaming (because COMDAT
  /// leader was promoted and renamed). Maps from original COMDAT to one
  /// with new name.
  DenseMap<const Comdat *, Comdat *> RenamedComdats;

  /// Check if we should promote the given local value to global scope.
  bool shouldPromoteLocalToGlobal(const GlobalValue *SGV, ValueInfo VI);

#ifndef NDEBUG
  /// Check if the given value is a local that can't be renamed (promoted).
  /// Only used in assertion checking, and disabled under NDEBUG since the Used
  /// set will not be populated.
  bool isNonRenamableLocal(const GlobalValue &GV) const;
#endif

  /// Helper methods to check if we are importing from or potentially
  /// exporting from the current source module.
  bool isPerformingImport() const { return GlobalsToImport != nullptr; }
  bool isModuleExporting() const { return HasExportedFunctions; }

  /// If we are importing from the source module, checks if we should
  /// import SGV as a definition, otherwise import as a declaration.
  bool doImportAsDefinition(const GlobalValue *SGV);

  /// Get the name for a local SGV that should be promoted and renamed to global
  /// scope in the linked destination module.
  std::string getPromotedName(const GlobalValue *SGV);

  /// Process globals so that they can be used in ThinLTO. This includes
  /// promoting local variables so that they can be reference externally by
  /// thin lto imported globals and converting strong external globals to
  /// available_externally.
  void processGlobalsForThinLTO();
  void processGlobalForThinLTO(GlobalValue &GV);

  /// Get the new linkage for SGV that should be used in the linked destination
  /// module. Specifically, for ThinLTO importing or exporting it may need
  /// to be adjusted. When \p DoPromote is true then we must adjust the
  /// linkage for a required promotion of a local to global scope.
  GlobalValue::LinkageTypes getLinkage(const GlobalValue *SGV, bool DoPromote);

public:
  FunctionImportGlobalProcessing(Module &M, const ModuleSummaryIndex &Index,
                                 SetVector<GlobalValue *> *GlobalsToImport,
                                 bool ClearDSOLocalOnDeclarations)
      : M(M), ImportIndex(Index), GlobalsToImport(GlobalsToImport),
        ClearDSOLocalOnDeclarations(ClearDSOLocalOnDeclarations) {
    // If we have a ModuleSummaryIndex but no function to import,
    // then this is the primary module being compiled in a ThinLTO
    // backend compilation, and we need to see if it has functions that
    // may be exported to another backend compilation.
    if (!GlobalsToImport)
      HasExportedFunctions = ImportIndex.hasExportedFunctions(M);

#ifndef NDEBUG
    SmallVector<GlobalValue *, 4> Vec;
    // First collect those in the llvm.used set.
    collectUsedGlobalVariables(M, Vec, /*CompilerUsed=*/false);
    // Next collect those in the llvm.compiler.used set.
    collectUsedGlobalVariables(M, Vec, /*CompilerUsed=*/true);
    Used = {Vec.begin(), Vec.end()};
#endif
  }

  bool run();
};

/// Perform in-place global value handling on the given Module for
/// exported local functions renamed and promoted for ThinLTO.
bool renameModuleForThinLTO(
    Module &M, const ModuleSummaryIndex &Index,
    bool ClearDSOLocalOnDeclarations,
    SetVector<GlobalValue *> *GlobalsToImport = nullptr);

} // End llvm namespace

#endif
PKiwFZh��$!Transforms/Utils/LoopVersioning.hnu�[���//===- LoopVersioning.h - Utility to version a loop -------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines a utility class to perform loop versioning.  The versioned
// loop speculates that otherwise may-aliasing memory accesses don't overlap and
// emits checks to prove this.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_LOOPVERSIONING_H
#define LLVM_TRANSFORMS_UTILS_LOOPVERSIONING_H

#include "llvm/IR/PassManager.h"
#include "llvm/Transforms/Utils/LoopUtils.h"
#include "llvm/Transforms/Utils/ValueMapper.h"

namespace llvm {

class Loop;
class SCEVPredicate;
class ScalarEvolution;
class LoopAccessInfo;
class LoopInfo;
struct RuntimeCheckingPtrGroup;
typedef std::pair<const RuntimeCheckingPtrGroup *,
                  const RuntimeCheckingPtrGroup *>
    RuntimePointerCheck;

template <typename T> class ArrayRef;

/// This class emits a version of the loop where run-time checks ensure
/// that may-alias pointers can't overlap.
///
/// It currently only supports single-exit loops and assumes that the loop
/// already has a preheader.
class LoopVersioning {
public:
  /// Expects LoopAccessInfo, Loop, LoopInfo, DominatorTree as input.
  /// It uses runtime check provided by the user. If \p UseLAIChecks is true,
  /// we will retain the default checks made by LAI. Otherwise, construct an
  /// object having no checks and we expect the user to add them.
  LoopVersioning(const LoopAccessInfo &LAI,
                 ArrayRef<RuntimePointerCheck> Checks, Loop *L, LoopInfo *LI,
                 DominatorTree *DT, ScalarEvolution *SE);

  /// Performs the CFG manipulation part of versioning the loop including
  /// the DominatorTree and LoopInfo updates.
  ///
  /// The loop that was used to construct the class will be the "versioned" loop
  /// i.e. the loop that will receive control if all the memchecks pass.
  ///
  /// This allows the loop transform pass to operate on the same loop regardless
  /// of whether versioning was necessary or not:
  ///
  ///    for each loop L:
  ///        analyze L
  ///        if versioning is necessary version L
  ///        transform L
  void versionLoop() { versionLoop(findDefsUsedOutsideOfLoop(VersionedLoop)); }

  /// Same but if the client has already precomputed the set of values
  /// used outside the loop, this API will allows passing that.
  void versionLoop(const SmallVectorImpl<Instruction *> &DefsUsedOutside);

  /// Returns the versioned loop.  Control flows here if pointers in the
  /// loop don't alias (i.e. all memchecks passed).  (This loop is actually the
  /// same as the original loop that we got constructed with.)
  Loop *getVersionedLoop() { return VersionedLoop; }

  /// Returns the fall-back loop.  Control flows here if pointers in the
  /// loop may alias (i.e. one of the memchecks failed).
  Loop *getNonVersionedLoop() { return NonVersionedLoop; }

  /// Annotate memory instructions in the versioned loop with no-alias
  /// metadata based on the memchecks issued.
  ///
  /// This is just wrapper that calls prepareNoAliasMetadata and
  /// annotateInstWithNoAlias on the instructions of the versioned loop.
  void annotateLoopWithNoAlias();

  /// Set up the aliasing scopes based on the memchecks.  This needs to
  /// be called before the first call to annotateInstWithNoAlias.
  void prepareNoAliasMetadata();

  /// Add the noalias annotations to \p VersionedInst.
  ///
  /// \p OrigInst is the instruction corresponding to \p VersionedInst in the
  /// original loop.  Initialize the aliasing scopes with
  /// prepareNoAliasMetadata once before this can be called.
  void annotateInstWithNoAlias(Instruction *VersionedInst,
                               const Instruction *OrigInst);

private:
  /// Adds the necessary PHI nodes for the versioned loops based on the
  /// loop-defined values used outside of the loop.
  ///
  /// This needs to be called after versionLoop if there are defs in the loop
  /// that are used outside the loop.
  void addPHINodes(const SmallVectorImpl<Instruction *> &DefsUsedOutside);

  /// Add the noalias annotations to \p I.  Initialize the aliasing
  /// scopes with prepareNoAliasMetadata once before this can be called.
  void annotateInstWithNoAlias(Instruction *I) {
    annotateInstWithNoAlias(I, I);
  }

  /// The original loop.  This becomes the "versioned" one.  I.e.,
  /// control flows here if pointers in the loop don't alias.
  Loop *VersionedLoop;
  /// The fall-back loop.  I.e. control flows here if pointers in the
  /// loop may alias (memchecks failed).
  Loop *NonVersionedLoop = nullptr;

  /// This maps the instructions from VersionedLoop to their counterpart
  /// in NonVersionedLoop.
  ValueToValueMapTy VMap;

  /// The set of alias checks that we are versioning for.
  SmallVector<RuntimePointerCheck, 4> AliasChecks;

  /// The set of SCEV checks that we are versioning for.
  const SCEVPredicate &Preds;

  /// Maps a pointer to the pointer checking group that the pointer
  /// belongs to.
  DenseMap<const Value *, const RuntimeCheckingPtrGroup *> PtrToGroup;

  /// The alias scope corresponding to a pointer checking group.
  DenseMap<const RuntimeCheckingPtrGroup *, MDNode *> GroupToScope;

  /// The list of alias scopes that a pointer checking group can't alias.
  DenseMap<const RuntimeCheckingPtrGroup *, MDNode *>
      GroupToNonAliasingScopeList;

  /// Analyses used.
  const LoopAccessInfo &LAI;
  LoopInfo *LI;
  DominatorTree *DT;
  ScalarEvolution *SE;
};

/// Expose LoopVersioning as a pass.  Currently this is only used for
/// unit-testing.  It adds all memchecks necessary to remove all may-aliasing
/// array accesses from the loop.
class LoopVersioningPass : public PassInfoMixin<LoopVersioningPass> {
public:
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM);
};
}

#endif
PKiwFZW����Transforms/Utils/LowerIFunc.hnu�[���//===-- LowerIFunc.h --------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_LOWERIFUNC_H
#define LLVM_TRANSFORMS_UTILS_LOWERIFUNC_H

#include "llvm/IR/PassManager.h"

namespace llvm {

/// Pass to replace calls to ifuncs with indirect calls. This could be used to
/// support ifunc on systems where the program loader does not natively support
/// it. Constant initializer uses of ifuncs are not handled.
class LowerIFuncPass : public PassInfoMixin<LowerIFuncPass> {
public:
  LowerIFuncPass() = default;

  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_UTILS_LOWERIFUNC_H
PKiwFZ{����Transforms/Utils/GlobalStatus.hnu�[���//===- GlobalStatus.h - Compute status info for globals ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_GLOBALSTATUS_H
#define LLVM_TRANSFORMS_UTILS_GLOBALSTATUS_H

#include "llvm/IR/Instructions.h"
#include "llvm/Support/AtomicOrdering.h"

namespace llvm {

class Constant;
class Function;
class Value;

/// It is safe to destroy a constant iff it is only used by constants itself.
/// Note that constants cannot be cyclic, so this test is pretty easy to
/// implement recursively.
///
bool isSafeToDestroyConstant(const Constant *C);

/// As we analyze each global, keep track of some information about it.  If we
/// find out that the address of the global is taken, none of this info will be
/// accurate.
struct GlobalStatus {
  /// True if the global's address is used in a comparison.
  bool IsCompared = false;

  /// True if the global is ever loaded.  If the global isn't ever loaded it
  /// can be deleted.
  bool IsLoaded = false;

  /// Number of stores to the global.
  unsigned NumStores = 0;

  /// Keep track of what stores to the global look like.
  enum StoredType {
    /// There is no store to this global.  It can thus be marked constant.
    NotStored,

    /// This global is stored to, but the only thing stored is the constant it
    /// was initialized with. This is only tracked for scalar globals.
    InitializerStored,

    /// This global is stored to, but only its initializer and one other value
    /// is ever stored to it.  If this global isStoredOnce, we track the value
    /// stored to it via StoredOnceStore below.  This is only tracked for scalar
    /// globals.
    StoredOnce,

    /// This global is stored to by multiple values or something else that we
    /// cannot track.
    Stored
  } StoredType = NotStored;

  /// If only one value (besides the initializer constant) is ever stored to
  /// this global, keep track of what value it is via the store instruction.
  const StoreInst *StoredOnceStore = nullptr;

  /// If only one value (besides the initializer constant) is ever stored to
  /// this global return the stored value.
  Value *getStoredOnceValue() const {
    return (StoredType == StoredOnce && StoredOnceStore)
               ? StoredOnceStore->getOperand(0)
               : nullptr;
  }

  /// These start out null/false.  When the first accessing function is noticed,
  /// it is recorded. When a second different accessing function is noticed,
  /// HasMultipleAccessingFunctions is set to true.
  const Function *AccessingFunction = nullptr;
  bool HasMultipleAccessingFunctions = false;

  /// Set to the strongest atomic ordering requirement.
  AtomicOrdering Ordering = AtomicOrdering::NotAtomic;

  GlobalStatus();

  /// Look at all uses of the global and fill in the GlobalStatus structure.  If
  /// the global has its address taken, return true to indicate we can't do
  /// anything with it.
  static bool analyzeGlobal(const Value *V, GlobalStatus &GS);
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_UTILS_GLOBALSTATUS_H
PKiwFZ`�;�pypy"Transforms/Utils/BasicBlockUtils.hnu�[���//===- Transform/Utils/BasicBlockUtils.h - BasicBlock Utils -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This family of functions perform manipulations on basic blocks, and
// instructions contained within basic blocks.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_BASICBLOCKUTILS_H
#define LLVM_TRANSFORMS_UTILS_BASICBLOCKUTILS_H

// FIXME: Move to this file: BasicBlock::removePredecessor, BB::splitBasicBlock

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Dominators.h"
#include <cassert>

namespace llvm {
class BranchInst;
class LandingPadInst;
class Loop;
class PHINode;
template <typename PtrType> class SmallPtrSetImpl;
class BlockFrequencyInfo;
class BranchProbabilityInfo;
class DomTreeUpdater;
class Function;
class IRBuilderBase;
class LoopInfo;
class MDNode;
class MemoryDependenceResults;
class MemorySSAUpdater;
class PostDominatorTree;
class ReturnInst;
class TargetLibraryInfo;
class Value;

/// Replace contents of every block in \p BBs with single unreachable
/// instruction. If \p Updates is specified, collect all necessary DT updates
/// into this vector. If \p KeepOneInputPHIs is true, one-input Phis in
/// successors of blocks being deleted will be preserved.
void detachDeadBlocks(ArrayRef <BasicBlock *> BBs,
                      SmallVectorImpl<DominatorTree::UpdateType> *Updates,
                      bool KeepOneInputPHIs = false);

/// Delete the specified block, which must have no predecessors.
void DeleteDeadBlock(BasicBlock *BB, DomTreeUpdater *DTU = nullptr,
                     bool KeepOneInputPHIs = false);

/// Delete the specified blocks from \p BB. The set of deleted blocks must have
/// no predecessors that are not being deleted themselves. \p BBs must have no
/// duplicating blocks. If there are loops among this set of blocks, all
/// relevant loop info updates should be done before this function is called.
/// If \p KeepOneInputPHIs is true, one-input Phis in successors of blocks
/// being deleted will be preserved.
void DeleteDeadBlocks(ArrayRef <BasicBlock *> BBs,
                      DomTreeUpdater *DTU = nullptr,
                      bool KeepOneInputPHIs = false);

/// Delete all basic blocks from \p F that are not reachable from its entry
/// node. If \p KeepOneInputPHIs is true, one-input Phis in successors of
/// blocks being deleted will be preserved.
bool EliminateUnreachableBlocks(Function &F, DomTreeUpdater *DTU = nullptr,
                                bool KeepOneInputPHIs = false);

/// We know that BB has one predecessor. If there are any single-entry PHI nodes
/// in it, fold them away. This handles the case when all entries to the PHI
/// nodes in a block are guaranteed equal, such as when the block has exactly
/// one predecessor.
bool FoldSingleEntryPHINodes(BasicBlock *BB,
                             MemoryDependenceResults *MemDep = nullptr);

/// Examine each PHI in the given block and delete it if it is dead. Also
/// recursively delete any operands that become dead as a result. This includes
/// tracing the def-use list from the PHI to see if it is ultimately unused or
/// if it reaches an unused cycle. Return true if any PHIs were deleted.
bool DeleteDeadPHIs(BasicBlock *BB, const TargetLibraryInfo *TLI = nullptr,
                    MemorySSAUpdater *MSSAU = nullptr);

/// Attempts to merge a block into its predecessor, if possible. The return
/// value indicates success or failure.
/// By default do not merge blocks if BB's predecessor has multiple successors.
/// If PredecessorWithTwoSuccessors = true, the blocks can only be merged
/// if BB's Pred has a branch to BB and to AnotherBB, and BB has a single
/// successor Sing. In this case the branch will be updated with Sing instead of
/// BB, and BB will still be merged into its predecessor and removed.
/// If \p DT is not nullptr, update it directly; in that case, DTU must be
/// nullptr.
bool MergeBlockIntoPredecessor(BasicBlock *BB, DomTreeUpdater *DTU = nullptr,
                               LoopInfo *LI = nullptr,
                               MemorySSAUpdater *MSSAU = nullptr,
                               MemoryDependenceResults *MemDep = nullptr,
                               bool PredecessorWithTwoSuccessors = false,
                               DominatorTree *DT = nullptr);

/// Merge block(s) sucessors, if possible. Return true if at least two
/// of the blocks were merged together.
/// In order to merge, each block must be terminated by an unconditional
/// branch. If L is provided, then the blocks merged into their predecessors
/// must be in L. In addition, This utility calls on another utility:
/// MergeBlockIntoPredecessor. Blocks are successfully merged when the call to
/// MergeBlockIntoPredecessor returns true.
bool MergeBlockSuccessorsIntoGivenBlocks(
    SmallPtrSetImpl<BasicBlock *> &MergeBlocks, Loop *L = nullptr,
    DomTreeUpdater *DTU = nullptr, LoopInfo *LI = nullptr);

/// Try to remove redundant dbg.value instructions from given basic block.
/// Returns true if at least one instruction was removed. Remove redundant
/// pseudo ops when RemovePseudoOp is true.
bool RemoveRedundantDbgInstrs(BasicBlock *BB);

/// Replace all uses of an instruction (specified by BI) with a value, then
/// remove and delete the original instruction.
void ReplaceInstWithValue(BasicBlock::iterator &BI, Value *V);

/// Replace the instruction specified by BI with the instruction specified by I.
/// Copies DebugLoc from BI to I, if I doesn't already have a DebugLoc. The
/// original instruction is deleted and BI is updated to point to the new
/// instruction.
void ReplaceInstWithInst(BasicBlock *BB, BasicBlock::iterator &BI,
                         Instruction *I);

/// Replace the instruction specified by From with the instruction specified by
/// To. Copies DebugLoc from BI to I, if I doesn't already have a DebugLoc.
void ReplaceInstWithInst(Instruction *From, Instruction *To);

/// Check if we can prove that all paths starting from this block converge
/// to a block that either has a @llvm.experimental.deoptimize call
/// prior to its terminating return instruction or is terminated by unreachable.
/// All blocks in the traversed sequence must have an unique successor, maybe
/// except for the last one.
bool IsBlockFollowedByDeoptOrUnreachable(const BasicBlock *BB);

/// Option class for critical edge splitting.
///
/// This provides a builder interface for overriding the default options used
/// during critical edge splitting.
struct CriticalEdgeSplittingOptions {
  DominatorTree *DT;
  PostDominatorTree *PDT;
  LoopInfo *LI;
  MemorySSAUpdater *MSSAU;
  bool MergeIdenticalEdges = false;
  bool KeepOneInputPHIs = false;
  bool PreserveLCSSA = false;
  bool IgnoreUnreachableDests = false;
  /// SplitCriticalEdge is guaranteed to preserve loop-simplify form if LI is
  /// provided. If it cannot be preserved, no splitting will take place. If it
  /// is not set, preserve loop-simplify form if possible.
  bool PreserveLoopSimplify = true;

  CriticalEdgeSplittingOptions(DominatorTree *DT = nullptr,
                               LoopInfo *LI = nullptr,
                               MemorySSAUpdater *MSSAU = nullptr,
                               PostDominatorTree *PDT = nullptr)
      : DT(DT), PDT(PDT), LI(LI), MSSAU(MSSAU) {}

  CriticalEdgeSplittingOptions &setMergeIdenticalEdges() {
    MergeIdenticalEdges = true;
    return *this;
  }

  CriticalEdgeSplittingOptions &setKeepOneInputPHIs() {
    KeepOneInputPHIs = true;
    return *this;
  }

  CriticalEdgeSplittingOptions &setPreserveLCSSA() {
    PreserveLCSSA = true;
    return *this;
  }

  CriticalEdgeSplittingOptions &setIgnoreUnreachableDests() {
    IgnoreUnreachableDests = true;
    return *this;
  }

  CriticalEdgeSplittingOptions &unsetPreserveLoopSimplify() {
    PreserveLoopSimplify = false;
    return *this;
  }
};

/// When a loop exit edge is split, LCSSA form may require new PHIs in the new
/// exit block. This function inserts the new PHIs, as needed. Preds is a list
/// of preds inside the loop, SplitBB is the new loop exit block, and DestBB is
/// the old loop exit, now the successor of SplitBB.
void createPHIsForSplitLoopExit(ArrayRef<BasicBlock *> Preds,
                                BasicBlock *SplitBB, BasicBlock *DestBB);

/// If this edge is a critical edge, insert a new node to split the critical
/// edge. This will update the analyses passed in through the option struct.
/// This returns the new block if the edge was split, null otherwise.
///
/// If MergeIdenticalEdges in the options struct is true (not the default),
/// *all* edges from TI to the specified successor will be merged into the same
/// critical edge block. This is most commonly interesting with switch
/// instructions, which may have many edges to any one destination.  This
/// ensures that all edges to that dest go to one block instead of each going
/// to a different block, but isn't the standard definition of a "critical
/// edge".
///
/// It is invalid to call this function on a critical edge that starts at an
/// IndirectBrInst.  Splitting these edges will almost always create an invalid
/// program because the address of the new block won't be the one that is jumped
/// to.
BasicBlock *SplitCriticalEdge(Instruction *TI, unsigned SuccNum,
                              const CriticalEdgeSplittingOptions &Options =
                                  CriticalEdgeSplittingOptions(),
                              const Twine &BBName = "");

/// If it is known that an edge is critical, SplitKnownCriticalEdge can be
/// called directly, rather than calling SplitCriticalEdge first.
BasicBlock *SplitKnownCriticalEdge(Instruction *TI, unsigned SuccNum,
                                   const CriticalEdgeSplittingOptions &Options =
                                       CriticalEdgeSplittingOptions(),
                                   const Twine &BBName = "");

/// If an edge from Src to Dst is critical, split the edge and return true,
/// otherwise return false. This method requires that there be an edge between
/// the two blocks. It updates the analyses passed in the options struct
inline BasicBlock *
SplitCriticalEdge(BasicBlock *Src, BasicBlock *Dst,
                  const CriticalEdgeSplittingOptions &Options =
                      CriticalEdgeSplittingOptions()) {
  Instruction *TI = Src->getTerminator();
  unsigned i = 0;
  while (true) {
    assert(i != TI->getNumSuccessors() && "Edge doesn't exist!");
    if (TI->getSuccessor(i) == Dst)
      return SplitCriticalEdge(TI, i, Options);
    ++i;
  }
}

/// Loop over all of the edges in the CFG, breaking critical edges as they are
/// found. Returns the number of broken edges.
unsigned SplitAllCriticalEdges(Function &F,
                               const CriticalEdgeSplittingOptions &Options =
                                   CriticalEdgeSplittingOptions());

/// Split the edge connecting the specified blocks, and return the newly created
/// basic block between \p From and \p To.
BasicBlock *SplitEdge(BasicBlock *From, BasicBlock *To,
                      DominatorTree *DT = nullptr, LoopInfo *LI = nullptr,
                      MemorySSAUpdater *MSSAU = nullptr,
                      const Twine &BBName = "");

/// Sets the unwind edge of an instruction to a particular successor.
void setUnwindEdgeTo(Instruction *TI, BasicBlock *Succ);

/// Replaces all uses of OldPred with the NewPred block in all PHINodes in a
/// block.
void updatePhiNodes(BasicBlock *DestBB, BasicBlock *OldPred,
                    BasicBlock *NewPred, PHINode *Until = nullptr);

/// Split the edge connect the specficed blocks in the case that \p Succ is an
/// Exception Handling Block
BasicBlock *ehAwareSplitEdge(BasicBlock *BB, BasicBlock *Succ,
                             LandingPadInst *OriginalPad = nullptr,
                             PHINode *LandingPadReplacement = nullptr,
                             const CriticalEdgeSplittingOptions &Options =
                                 CriticalEdgeSplittingOptions(),
                             const Twine &BBName = "");

/// Split the specified block at the specified instruction.
///
/// If \p Before is true, splitBlockBefore handles the block
/// splitting. Otherwise, execution proceeds as described below.
///
/// Everything before \p SplitPt stays in \p Old and everything starting with \p
/// SplitPt moves to a new block. The two blocks are joined by an unconditional
/// branch. The new block with name \p BBName is returned.
///
/// FIXME: deprecated, switch to the DomTreeUpdater-based one.
BasicBlock *SplitBlock(BasicBlock *Old, Instruction *SplitPt, DominatorTree *DT,
                       LoopInfo *LI = nullptr,
                       MemorySSAUpdater *MSSAU = nullptr,
                       const Twine &BBName = "", bool Before = false);

/// Split the specified block at the specified instruction.
///
/// If \p Before is true, splitBlockBefore handles the block
/// splitting. Otherwise, execution proceeds as described below.
///
/// Everything before \p SplitPt stays in \p Old and everything starting with \p
/// SplitPt moves to a new block. The two blocks are joined by an unconditional
/// branch. The new block with name \p BBName is returned.
BasicBlock *SplitBlock(BasicBlock *Old, Instruction *SplitPt,
                       DomTreeUpdater *DTU = nullptr, LoopInfo *LI = nullptr,
                       MemorySSAUpdater *MSSAU = nullptr,
                       const Twine &BBName = "", bool Before = false);

/// Split the specified block at the specified instruction \p SplitPt.
/// All instructions before \p SplitPt are moved to a new block and all
/// instructions after \p SplitPt stay in the old block. The new block and the
/// old block are joined by inserting an unconditional branch to the end of the
/// new block. The new block with name \p BBName is returned.
BasicBlock *splitBlockBefore(BasicBlock *Old, Instruction *SplitPt,
                             DomTreeUpdater *DTU, LoopInfo *LI,
                             MemorySSAUpdater *MSSAU, const Twine &BBName = "");

/// This method introduces at least one new basic block into the function and
/// moves some of the predecessors of BB to be predecessors of the new block.
/// The new predecessors are indicated by the Preds array. The new block is
/// given a suffix of 'Suffix'. Returns new basic block to which predecessors
/// from Preds are now pointing.
///
/// If BB is a landingpad block then additional basicblock might be introduced.
/// It will have Suffix+".split_lp". See SplitLandingPadPredecessors for more
/// details on this case.
///
/// This currently updates the LLVM IR, DominatorTree, LoopInfo, and LCCSA but
/// no other analyses. In particular, it does not preserve LoopSimplify
/// (because it's complicated to handle the case where one of the edges being
/// split is an exit of a loop with other exits).
///
/// FIXME: deprecated, switch to the DomTreeUpdater-based one.
BasicBlock *SplitBlockPredecessors(BasicBlock *BB, ArrayRef<BasicBlock *> Preds,
                                   const char *Suffix, DominatorTree *DT,
                                   LoopInfo *LI = nullptr,
                                   MemorySSAUpdater *MSSAU = nullptr,
                                   bool PreserveLCSSA = false);

/// This method introduces at least one new basic block into the function and
/// moves some of the predecessors of BB to be predecessors of the new block.
/// The new predecessors are indicated by the Preds array. The new block is
/// given a suffix of 'Suffix'. Returns new basic block to which predecessors
/// from Preds are now pointing.
///
/// If BB is a landingpad block then additional basicblock might be introduced.
/// It will have Suffix+".split_lp". See SplitLandingPadPredecessors for more
/// details on this case.
///
/// This currently updates the LLVM IR, DominatorTree, LoopInfo, and LCCSA but
/// no other analyses. In particular, it does not preserve LoopSimplify
/// (because it's complicated to handle the case where one of the edges being
/// split is an exit of a loop with other exits).
BasicBlock *SplitBlockPredecessors(BasicBlock *BB, ArrayRef<BasicBlock *> Preds,
                                   const char *Suffix,
                                   DomTreeUpdater *DTU = nullptr,
                                   LoopInfo *LI = nullptr,
                                   MemorySSAUpdater *MSSAU = nullptr,
                                   bool PreserveLCSSA = false);

/// This method transforms the landing pad, OrigBB, by introducing two new basic
/// blocks into the function. One of those new basic blocks gets the
/// predecessors listed in Preds. The other basic block gets the remaining
/// predecessors of OrigBB. The landingpad instruction OrigBB is clone into both
/// of the new basic blocks. The new blocks are given the suffixes 'Suffix1' and
/// 'Suffix2', and are returned in the NewBBs vector.
///
/// This currently updates the LLVM IR, DominatorTree, LoopInfo, and LCCSA but
/// no other analyses. In particular, it does not preserve LoopSimplify
/// (because it's complicated to handle the case where one of the edges being
/// split is an exit of a loop with other exits).
///
/// FIXME: deprecated, switch to the DomTreeUpdater-based one.
void SplitLandingPadPredecessors(BasicBlock *OrigBB,
                                 ArrayRef<BasicBlock *> Preds,
                                 const char *Suffix, const char *Suffix2,
                                 SmallVectorImpl<BasicBlock *> &NewBBs,
                                 DominatorTree *DT, LoopInfo *LI = nullptr,
                                 MemorySSAUpdater *MSSAU = nullptr,
                                 bool PreserveLCSSA = false);

/// This method transforms the landing pad, OrigBB, by introducing two new basic
/// blocks into the function. One of those new basic blocks gets the
/// predecessors listed in Preds. The other basic block gets the remaining
/// predecessors of OrigBB. The landingpad instruction OrigBB is clone into both
/// of the new basic blocks. The new blocks are given the suffixes 'Suffix1' and
/// 'Suffix2', and are returned in the NewBBs vector.
///
/// This currently updates the LLVM IR, DominatorTree, LoopInfo, and LCCSA but
/// no other analyses. In particular, it does not preserve LoopSimplify
/// (because it's complicated to handle the case where one of the edges being
/// split is an exit of a loop with other exits).
void SplitLandingPadPredecessors(
    BasicBlock *OrigBB, ArrayRef<BasicBlock *> Preds, const char *Suffix,
    const char *Suffix2, SmallVectorImpl<BasicBlock *> &NewBBs,
    DomTreeUpdater *DTU = nullptr, LoopInfo *LI = nullptr,
    MemorySSAUpdater *MSSAU = nullptr, bool PreserveLCSSA = false);

/// This method duplicates the specified return instruction into a predecessor
/// which ends in an unconditional branch. If the return instruction returns a
/// value defined by a PHI, propagate the right value into the return. It
/// returns the new return instruction in the predecessor.
ReturnInst *FoldReturnIntoUncondBranch(ReturnInst *RI, BasicBlock *BB,
                                       BasicBlock *Pred,
                                       DomTreeUpdater *DTU = nullptr);

/// Split the containing block at the specified instruction - everything before
/// SplitBefore stays in the old basic block, and the rest of the instructions
/// in the BB are moved to a new block. The two blocks are connected by a
/// conditional branch (with value of Cmp being the condition).
/// Before:
///   Head
///   SplitBefore
///   Tail
/// After:
///   Head
///   if (Cond)
///     ThenBlock
///   SplitBefore
///   Tail
///
/// If \p ThenBlock is not specified, a new block will be created for it.
/// If \p Unreachable is true, the newly created block will end with
/// UnreachableInst, otherwise it branches to Tail.
/// Returns the NewBasicBlock's terminator.
///
/// Updates DTU and LI if given.
Instruction *SplitBlockAndInsertIfThen(Value *Cond, Instruction *SplitBefore,
                                       bool Unreachable,
                                       MDNode *BranchWeights = nullptr,
                                       DomTreeUpdater *DTU = nullptr,
                                       LoopInfo *LI = nullptr,
                                       BasicBlock *ThenBlock = nullptr);

/// Similar to SplitBlockAndInsertIfThen, but the inserted block is on the false
/// path of the branch.
Instruction *SplitBlockAndInsertIfElse(Value *Cond, Instruction *SplitBefore,
                                       bool Unreachable,
                                       MDNode *BranchWeights = nullptr,
                                       DomTreeUpdater *DTU = nullptr,
                                       LoopInfo *LI = nullptr,
                                       BasicBlock *ElseBlock = nullptr);

/// SplitBlockAndInsertIfThenElse is similar to SplitBlockAndInsertIfThen,
/// but also creates the ElseBlock.
/// Before:
///   Head
///   SplitBefore
///   Tail
/// After:
///   Head
///   if (Cond)
///     ThenBlock
///   else
///     ElseBlock
///   SplitBefore
///   Tail
///
/// Updates DT if given.
void SplitBlockAndInsertIfThenElse(Value *Cond, Instruction *SplitBefore,
                                   Instruction **ThenTerm,
                                   Instruction **ElseTerm,
                                   MDNode *BranchWeights = nullptr,
                                   DomTreeUpdater *DTU = nullptr,
                                   LoopInfo *LI = nullptr);

/// Split the containing block at the specified instruction - everything before
/// SplitBefore stays in the old basic block, and the rest of the instructions
/// in the BB are moved to a new block. The two blocks are connected by a
/// conditional branch (with value of Cmp being the condition).
/// Before:
///   Head
///   SplitBefore
///   Tail
/// After:
///   Head
///   if (Cond)
///     TrueBlock
///   else
////    FalseBlock
///   SplitBefore
///   Tail
///
/// If \p ThenBlock is null, the resulting CFG won't contain the TrueBlock. If
/// \p ThenBlock is non-null and points to non-null BasicBlock pointer, that
/// block will be inserted as the TrueBlock. Otherwise a new block will be
/// created. Likewise for the \p ElseBlock parameter.
/// If \p UnreachableThen or \p UnreachableElse is true, the corresponding newly
/// created blocks will end with UnreachableInst, otherwise with branches to
/// Tail. The function will not modify existing basic blocks passed to it. The
/// caller must ensure that Tail is reachable from Head.
/// Returns the newly created blocks in \p ThenBlock and \p ElseBlock.
/// Updates DTU and LI if given.
void SplitBlockAndInsertIfThenElse(Value *Cond, Instruction *SplitBefore,
                                   BasicBlock **ThenBlock,
                                   BasicBlock **ElseBlock,
                                   bool UnreachableThen = false,
                                   bool UnreachableElse = false,
                                   MDNode *BranchWeights = nullptr,
                                   DomTreeUpdater *DTU = nullptr,
                                   LoopInfo *LI = nullptr);

/// Insert a for (int i = 0; i < End; i++) loop structure (with the exception
/// that \p End is assumed > 0, and thus not checked on entry) at \p
/// SplitBefore.  Returns the first insert point in the loop body, and the
/// PHINode for the induction variable (i.e. "i" above).
std::pair<Instruction*, Value*>
SplitBlockAndInsertSimpleForLoop(Value *End, Instruction *SplitBefore);

/// Utility function for performing a given action on each lane of a vector
/// with \p EC elements.  To simplify porting legacy code, this defaults to
/// unrolling the implied loop for non-scalable element counts, but this is
/// not considered to be part of the contract of this routine, and is
/// expected to change in the future. The callback takes as arguments an
/// IRBuilder whose insert point is correctly set for instantiating the
/// given index, and a value which is (at runtime) the index to access.
/// This index *may* be a constant.
void SplitBlockAndInsertForEachLane(ElementCount EC, Type *IndexTy,
    Instruction *InsertBefore,
    std::function<void(IRBuilderBase&, Value*)> Func);

/// Utility function for performing a given action on each lane of a vector
/// with \p EVL effective length. EVL is assumed > 0. To simplify porting legacy
/// code, this defaults to unrolling the implied loop for non-scalable element
/// counts, but this is not considered to be part of the contract of this
/// routine, and is expected to change in the future. The callback takes as
/// arguments an IRBuilder whose insert point is correctly set for instantiating
/// the given index, and a value which is (at runtime) the index to access. This
/// index *may* be a constant.
void SplitBlockAndInsertForEachLane(
    Value *End, Instruction *InsertBefore,
    std::function<void(IRBuilderBase &, Value *)> Func);

/// Check whether BB is the merge point of a if-region.
/// If so, return the branch instruction that determines which entry into
/// BB will be taken.  Also, return by references the block that will be
/// entered from if the condition is true, and the block that will be
/// entered if the condition is false.
///
/// This does no checking to see if the true/false blocks have large or unsavory
/// instructions in them.
BranchInst *GetIfCondition(BasicBlock *BB, BasicBlock *&IfTrue,
                           BasicBlock *&IfFalse);

// Split critical edges where the source of the edge is an indirectbr
// instruction. This isn't always possible, but we can handle some easy cases.
// This is useful because MI is unable to split such critical edges,
// which means it will not be able to sink instructions along those edges.
// This is especially painful for indirect branches with many successors, where
// we end up having to prepare all outgoing values in the origin block.
//
// Our normal algorithm for splitting critical edges requires us to update
// the outgoing edges of the edge origin block, but for an indirectbr this
// is hard, since it would require finding and updating the block addresses
// the indirect branch uses. But if a block only has a single indirectbr
// predecessor, with the others being regular branches, we can do it in a
// different way.
// Say we have A -> D, B -> D, I -> D where only I -> D is an indirectbr.
// We can split D into D0 and D1, where D0 contains only the PHIs from D,
// and D1 is the D block body. We can then duplicate D0 as D0A and D0B, and
// create the following structure:
// A -> D0A, B -> D0A, I -> D0B, D0A -> D1, D0B -> D1
// If BPI and BFI aren't non-null, BPI/BFI will be updated accordingly.
// When `IgnoreBlocksWithoutPHI` is set to `true` critical edges leading to a
// block without phi-instructions will not be split.
bool SplitIndirectBrCriticalEdges(Function &F, bool IgnoreBlocksWithoutPHI,
                                  BranchProbabilityInfo *BPI = nullptr,
                                  BlockFrequencyInfo *BFI = nullptr);

/// Given a set of incoming and outgoing blocks, create a "hub" such that every
/// edge from an incoming block InBB to an outgoing block OutBB is now split
/// into two edges, one from InBB to the hub and another from the hub to
/// OutBB. The hub consists of a series of guard blocks, one for each outgoing
/// block. Each guard block conditionally branches to the corresponding outgoing
/// block, or the next guard block in the chain. These guard blocks are returned
/// in the argument vector.
///
/// Since the control flow edges from InBB to OutBB have now been replaced, the
/// function also updates any PHINodes in OutBB. For each such PHINode, the
/// operands corresponding to incoming blocks are moved to a new PHINode in the
/// hub, and the hub is made an operand of the original PHINode.
///
/// Input CFG:
/// ----------
///
///                    Def
///                     |
///                     v
///           In1      In2
///            |        |
///            |        |
///            v        v
///  Foo ---> Out1     Out2
///                     |
///                     v
///                    Use
///
///
/// Create hub: Incoming = {In1, In2}, Outgoing = {Out1, Out2}
/// ----------------------------------------------------------
///
///             Def
///              |
///              v
///  In1        In2          Foo
///   |    Hub   |            |
///   |    + - - | - - +      |
///   |    '     v     '      V
///   +------> Guard1 -----> Out1
///        '     |     '
///        '     v     '
///        '   Guard2 -----> Out2
///        '           '      |
///        + - - - - - +      |
///                           v
///                          Use
///
/// Limitations:
/// -----------
/// 1. This assumes that all terminators in the CFG are direct branches (the
///    "br" instruction). The presence of any other control flow such as
///    indirectbr, switch or callbr will cause an assert.
///
/// 2. The updates to the PHINodes are not sufficient to restore SSA
///    form. Consider a definition Def, its use Use, incoming block In2 and
///    outgoing block Out2, such that:
///    a. In2 is reachable from D or contains D.
///    b. U is reachable from Out2 or is contained in Out2.
///    c. U is not a PHINode if U is contained in Out2.
///
///    Clearly, Def dominates Out2 since the program is valid SSA. But when the
///    hub is introduced, there is a new path through the hub along which Use is
///    reachable from entry without passing through Def, and SSA is no longer
///    valid. To fix this, we need to look at all the blocks post-dominated by
///    the hub on the one hand, and dominated by Out2 on the other. This is left
///    for the caller to accomplish, since each specific use of this function
///    may have additional information which simplifies this fixup. For example,
///    see restoreSSA() in the UnifyLoopExits pass.
BasicBlock *CreateControlFlowHub(
    DomTreeUpdater *DTU, SmallVectorImpl<BasicBlock *> &GuardBlocks,
    const SetVector<BasicBlock *> &Predecessors,
    const SetVector<BasicBlock *> &Successors, const StringRef Prefix,
    std::optional<unsigned> MaxControlFlowBooleans = std::nullopt);

// Utility function for inverting branch condition and for swapping its
// successors
void InvertBranch(BranchInst *PBI, IRBuilderBase &Builder);

} // end namespace llvm

#endif // LLVM_TRANSFORMS_UTILS_BASICBLOCKUTILS_H
PKiwFZ$���DDTransforms/Utils/SCCPSolver.hnu�[���//===- SCCPSolver.h - SCCP Utility ----------------------------- *- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// \file
// This file implements Sparse Conditional Constant Propagation (SCCP) utility.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_SCCPSOLVER_H
#define LLVM_TRANSFORMS_UTILS_SCCPSOLVER_H

#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/DomTreeUpdater.h"
#include "llvm/Transforms/Utils/PredicateInfo.h"
#include <vector>

namespace llvm {
class Argument;
class BasicBlock;
class CallInst;
class Constant;
class DataLayout;
class DominatorTree;
class Function;
class GlobalVariable;
class Instruction;
class LLVMContext;
class LoopInfo;
class PostDominatorTree;
class StructType;
class TargetLibraryInfo;
class Value;
class ValueLatticeElement;

/// Helper struct shared between Function Specialization and SCCP Solver.
struct ArgInfo {
  Argument *Formal; // The Formal argument being analysed.
  Constant *Actual; // A corresponding actual constant argument.

  ArgInfo(Argument *F, Constant *A) : Formal(F), Actual(A) {}

  bool operator==(const ArgInfo &Other) const {
    return Formal == Other.Formal && Actual == Other.Actual;
  }

  bool operator!=(const ArgInfo &Other) const { return !(*this == Other); }

  friend hash_code hash_value(const ArgInfo &A) {
    return hash_combine(hash_value(A.Formal), hash_value(A.Actual));
  }
};

class SCCPInstVisitor;

//===----------------------------------------------------------------------===//
//
/// SCCPSolver - This interface class is a general purpose solver for Sparse
/// Conditional Constant Propagation (SCCP).
///
class SCCPSolver {
  std::unique_ptr<SCCPInstVisitor> Visitor;

public:
  SCCPSolver(const DataLayout &DL,
             std::function<const TargetLibraryInfo &(Function &)> GetTLI,
             LLVMContext &Ctx);

  ~SCCPSolver();

  void addPredicateInfo(Function &F, DominatorTree &DT, AssumptionCache &AC);

  /// markBlockExecutable - This method can be used by clients to mark all of
  /// the blocks that are known to be intrinsically live in the processed unit.
  /// This returns true if the block was not considered live before.
  bool markBlockExecutable(BasicBlock *BB);

  const PredicateBase *getPredicateInfoFor(Instruction *I);

  /// trackValueOfGlobalVariable - Clients can use this method to
  /// inform the SCCPSolver that it should track loads and stores to the
  /// specified global variable if it can.  This is only legal to call if
  /// performing Interprocedural SCCP.
  void trackValueOfGlobalVariable(GlobalVariable *GV);

  /// addTrackedFunction - If the SCCP solver is supposed to track calls into
  /// and out of the specified function (which cannot have its address taken),
  /// this method must be called.
  void addTrackedFunction(Function *F);

  /// Add function to the list of functions whose return cannot be modified.
  void addToMustPreserveReturnsInFunctions(Function *F);

  /// Returns true if the return of the given function cannot be modified.
  bool mustPreserveReturn(Function *F);

  void addArgumentTrackedFunction(Function *F);

  /// Returns true if the given function is in the solver's set of
  /// argument-tracked functions.
  bool isArgumentTrackedFunction(Function *F);

  /// Solve - Solve for constants and executable blocks.
  void solve();

  /// resolvedUndefsIn - While solving the dataflow for a function, we assume
  /// that branches on undef values cannot reach any of their successors.
  /// However, this is not a safe assumption.  After we solve dataflow, this
  /// method should be use to handle this.  If this returns true, the solver
  /// should be rerun.
  bool resolvedUndefsIn(Function &F);

  void solveWhileResolvedUndefsIn(Module &M);

  void solveWhileResolvedUndefsIn(SmallVectorImpl<Function *> &WorkList);

  void solveWhileResolvedUndefs();

  bool isBlockExecutable(BasicBlock *BB) const;

  // isEdgeFeasible - Return true if the control flow edge from the 'From' basic
  // block to the 'To' basic block is currently feasible.
  bool isEdgeFeasible(BasicBlock *From, BasicBlock *To) const;

  std::vector<ValueLatticeElement> getStructLatticeValueFor(Value *V) const;

  void removeLatticeValueFor(Value *V);

  /// Invalidate the Lattice Value of \p Call and its users after specializing
  /// the call. Then recompute it.
  void resetLatticeValueFor(CallBase *Call);

  const ValueLatticeElement &getLatticeValueFor(Value *V) const;

  /// getTrackedRetVals - Get the inferred return value map.
  const MapVector<Function *, ValueLatticeElement> &getTrackedRetVals();

  /// getTrackedGlobals - Get and return the set of inferred initializers for
  /// global variables.
  const DenseMap<GlobalVariable *, ValueLatticeElement> &getTrackedGlobals();

  /// getMRVFunctionsTracked - Get the set of functions which return multiple
  /// values tracked by the pass.
  const SmallPtrSet<Function *, 16> getMRVFunctionsTracked();

  /// markOverdefined - Mark the specified value overdefined.  This
  /// works with both scalars and structs.
  void markOverdefined(Value *V);

  // isStructLatticeConstant - Return true if all the lattice values
  // corresponding to elements of the structure are constants,
  // false otherwise.
  bool isStructLatticeConstant(Function *F, StructType *STy);

  /// Helper to return a Constant if \p LV is either a constant or a constant
  /// range with a single element.
  Constant *getConstant(const ValueLatticeElement &LV, Type *Ty) const;

  /// Return either a Constant or nullptr for a given Value.
  Constant *getConstantOrNull(Value *V) const;

  /// Return a reference to the set of argument tracked functions.
  SmallPtrSetImpl<Function *> &getArgumentTrackedFunctions();

  /// Set the Lattice Value for the arguments of a specialization \p F.
  /// If an argument is Constant then its lattice value is marked with the
  /// corresponding actual argument in \p Args. Otherwise, its lattice value
  /// is inherited (copied) from the corresponding formal argument in \p Args.
  void setLatticeValueForSpecializationArguments(Function *F,
                                       const SmallVectorImpl<ArgInfo> &Args);

  /// Mark all of the blocks in function \p F non-executable. Clients can used
  /// this method to erase a function from the module (e.g., if it has been
  /// completely specialized and is no longer needed).
  void markFunctionUnreachable(Function *F);

  void visit(Instruction *I);
  void visitCall(CallInst &I);

  bool simplifyInstsInBlock(BasicBlock &BB,
                            SmallPtrSetImpl<Value *> &InsertedValues,
                            Statistic &InstRemovedStat,
                            Statistic &InstReplacedStat);

  bool removeNonFeasibleEdges(BasicBlock *BB, DomTreeUpdater &DTU,
                              BasicBlock *&NewUnreachableBB) const;

  bool tryToReplaceWithConstant(Value *V);

  // Helper to check if \p LV is either a constant or a constant
  // range with a single element. This should cover exactly the same cases as
  // the old ValueLatticeElement::isConstant() and is intended to be used in the
  // transition to ValueLatticeElement.
  static bool isConstant(const ValueLatticeElement &LV);

  // Helper to check if \p LV is either overdefined or a constant range with
  // more than a single element. This should cover exactly the same cases as the
  // old ValueLatticeElement::isOverdefined() and is intended to be used in the
  // transition to ValueLatticeElement.
  static bool isOverdefined(const ValueLatticeElement &LV);
};
} // namespace llvm

#endif // LLVM_TRANSFORMS_UTILS_SCCPSOLVER_H
PKiwFZK��K\B\B!Transforms/Utils/SSAUpdaterImpl.hnu�[���//===- SSAUpdaterImpl.h - SSA Updater Implementation ------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file provides a template that implements the core algorithm for the
// SSAUpdater and MachineSSAUpdater.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_SSAUPDATERIMPL_H
#define LLVM_TRANSFORMS_UTILS_SSAUPDATERIMPL_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"

#define DEBUG_TYPE "ssaupdater"

namespace llvm {

template<typename T> class SSAUpdaterTraits;

template<typename UpdaterT>
class SSAUpdaterImpl {
private:
  UpdaterT *Updater;

  using Traits = SSAUpdaterTraits<UpdaterT>;
  using BlkT = typename Traits::BlkT;
  using ValT = typename Traits::ValT;
  using PhiT = typename Traits::PhiT;

  /// BBInfo - Per-basic block information used internally by SSAUpdaterImpl.
  /// The predecessors of each block are cached here since pred_iterator is
  /// slow and we need to iterate over the blocks at least a few times.
  class BBInfo {
  public:
    // Back-pointer to the corresponding block.
    BlkT *BB;

    // Value to use in this block.
    ValT AvailableVal;

    // Block that defines the available value.
    BBInfo *DefBB;

    // Postorder number.
    int BlkNum = 0;

    // Immediate dominator.
    BBInfo *IDom = nullptr;

    // Number of predecessor blocks.
    unsigned NumPreds = 0;

    // Array[NumPreds] of predecessor blocks.
    BBInfo **Preds = nullptr;

    // Marker for existing PHIs that match.
    PhiT *PHITag = nullptr;

    BBInfo(BlkT *ThisBB, ValT V)
      : BB(ThisBB), AvailableVal(V), DefBB(V ? this : nullptr) {}
  };

  using AvailableValsTy = DenseMap<BlkT *, ValT>;

  AvailableValsTy *AvailableVals;

  SmallVectorImpl<PhiT *> *InsertedPHIs;

  using BlockListTy = SmallVectorImpl<BBInfo *>;
  using BBMapTy = DenseMap<BlkT *, BBInfo *>;

  BBMapTy BBMap;
  BumpPtrAllocator Allocator;

public:
  explicit SSAUpdaterImpl(UpdaterT *U, AvailableValsTy *A,
                          SmallVectorImpl<PhiT *> *Ins) :
    Updater(U), AvailableVals(A), InsertedPHIs(Ins) {}

  /// GetValue - Check to see if AvailableVals has an entry for the specified
  /// BB and if so, return it.  If not, construct SSA form by first
  /// calculating the required placement of PHIs and then inserting new PHIs
  /// where needed.
  ValT GetValue(BlkT *BB) {
    SmallVector<BBInfo *, 100> BlockList;
    BBInfo *PseudoEntry = BuildBlockList(BB, &BlockList);

    // Special case: bail out if BB is unreachable.
    if (BlockList.size() == 0) {
      ValT V = Traits::GetUndefVal(BB, Updater);
      (*AvailableVals)[BB] = V;
      return V;
    }

    FindDominators(&BlockList, PseudoEntry);
    FindPHIPlacement(&BlockList);
    FindAvailableVals(&BlockList);

    return BBMap[BB]->DefBB->AvailableVal;
  }

  /// BuildBlockList - Starting from the specified basic block, traverse back
  /// through its predecessors until reaching blocks with known values.
  /// Create BBInfo structures for the blocks and append them to the block
  /// list.
  BBInfo *BuildBlockList(BlkT *BB, BlockListTy *BlockList) {
    SmallVector<BBInfo *, 10> RootList;
    SmallVector<BBInfo *, 64> WorkList;

    BBInfo *Info = new (Allocator) BBInfo(BB, 0);
    BBMap[BB] = Info;
    WorkList.push_back(Info);

    // Search backward from BB, creating BBInfos along the way and stopping
    // when reaching blocks that define the value.  Record those defining
    // blocks on the RootList.
    SmallVector<BlkT *, 10> Preds;
    while (!WorkList.empty()) {
      Info = WorkList.pop_back_val();
      Preds.clear();
      Traits::FindPredecessorBlocks(Info->BB, &Preds);
      Info->NumPreds = Preds.size();
      if (Info->NumPreds == 0)
        Info->Preds = nullptr;
      else
        Info->Preds = static_cast<BBInfo **>(Allocator.Allocate(
            Info->NumPreds * sizeof(BBInfo *), alignof(BBInfo *)));

      for (unsigned p = 0; p != Info->NumPreds; ++p) {
        BlkT *Pred = Preds[p];
        // Check if BBMap already has a BBInfo for the predecessor block.
        typename BBMapTy::value_type &BBMapBucket =
          BBMap.FindAndConstruct(Pred);
        if (BBMapBucket.second) {
          Info->Preds[p] = BBMapBucket.second;
          continue;
        }

        // Create a new BBInfo for the predecessor.
        ValT PredVal = AvailableVals->lookup(Pred);
        BBInfo *PredInfo = new (Allocator) BBInfo(Pred, PredVal);
        BBMapBucket.second = PredInfo;
        Info->Preds[p] = PredInfo;

        if (PredInfo->AvailableVal) {
          RootList.push_back(PredInfo);
          continue;
        }
        WorkList.push_back(PredInfo);
      }
    }

    // Now that we know what blocks are backwards-reachable from the starting
    // block, do a forward depth-first traversal to assign postorder numbers
    // to those blocks.
    BBInfo *PseudoEntry = new (Allocator) BBInfo(nullptr, 0);
    unsigned BlkNum = 1;

    // Initialize the worklist with the roots from the backward traversal.
    while (!RootList.empty()) {
      Info = RootList.pop_back_val();
      Info->IDom = PseudoEntry;
      Info->BlkNum = -1;
      WorkList.push_back(Info);
    }

    while (!WorkList.empty()) {
      Info = WorkList.back();

      if (Info->BlkNum == -2) {
        // All the successors have been handled; assign the postorder number.
        Info->BlkNum = BlkNum++;
        // If not a root, put it on the BlockList.
        if (!Info->AvailableVal)
          BlockList->push_back(Info);
        WorkList.pop_back();
        continue;
      }

      // Leave this entry on the worklist, but set its BlkNum to mark that its
      // successors have been put on the worklist.  When it returns to the top
      // the list, after handling its successors, it will be assigned a
      // number.
      Info->BlkNum = -2;

      // Add unvisited successors to the work list.
      for (typename Traits::BlkSucc_iterator SI =
             Traits::BlkSucc_begin(Info->BB),
             E = Traits::BlkSucc_end(Info->BB); SI != E; ++SI) {
        BBInfo *SuccInfo = BBMap[*SI];
        if (!SuccInfo || SuccInfo->BlkNum)
          continue;
        SuccInfo->BlkNum = -1;
        WorkList.push_back(SuccInfo);
      }
    }
    PseudoEntry->BlkNum = BlkNum;
    return PseudoEntry;
  }

  /// IntersectDominators - This is the dataflow lattice "meet" operation for
  /// finding dominators.  Given two basic blocks, it walks up the dominator
  /// tree until it finds a common dominator of both.  It uses the postorder
  /// number of the blocks to determine how to do that.
  BBInfo *IntersectDominators(BBInfo *Blk1, BBInfo *Blk2) {
    while (Blk1 != Blk2) {
      while (Blk1->BlkNum < Blk2->BlkNum) {
        Blk1 = Blk1->IDom;
        if (!Blk1)
          return Blk2;
      }
      while (Blk2->BlkNum < Blk1->BlkNum) {
        Blk2 = Blk2->IDom;
        if (!Blk2)
          return Blk1;
      }
    }
    return Blk1;
  }

  /// FindDominators - Calculate the dominator tree for the subset of the CFG
  /// corresponding to the basic blocks on the BlockList.  This uses the
  /// algorithm from: "A Simple, Fast Dominance Algorithm" by Cooper, Harvey
  /// and Kennedy, published in Software--Practice and Experience, 2001,
  /// 4:1-10.  Because the CFG subset does not include any edges leading into
  /// blocks that define the value, the results are not the usual dominator
  /// tree.  The CFG subset has a single pseudo-entry node with edges to a set
  /// of root nodes for blocks that define the value.  The dominators for this
  /// subset CFG are not the standard dominators but they are adequate for
  /// placing PHIs within the subset CFG.
  void FindDominators(BlockListTy *BlockList, BBInfo *PseudoEntry) {
    bool Changed;
    do {
      Changed = false;
      // Iterate over the list in reverse order, i.e., forward on CFG edges.
      for (typename BlockListTy::reverse_iterator I = BlockList->rbegin(),
             E = BlockList->rend(); I != E; ++I) {
        BBInfo *Info = *I;
        BBInfo *NewIDom = nullptr;

        // Iterate through the block's predecessors.
        for (unsigned p = 0; p != Info->NumPreds; ++p) {
          BBInfo *Pred = Info->Preds[p];

          // Treat an unreachable predecessor as a definition with 'undef'.
          if (Pred->BlkNum == 0) {
            Pred->AvailableVal = Traits::GetUndefVal(Pred->BB, Updater);
            (*AvailableVals)[Pred->BB] = Pred->AvailableVal;
            Pred->DefBB = Pred;
            Pred->BlkNum = PseudoEntry->BlkNum;
            PseudoEntry->BlkNum++;
          }

          if (!NewIDom)
            NewIDom = Pred;
          else
            NewIDom = IntersectDominators(NewIDom, Pred);
        }

        // Check if the IDom value has changed.
        if (NewIDom && NewIDom != Info->IDom) {
          Info->IDom = NewIDom;
          Changed = true;
        }
      }
    } while (Changed);
  }

  /// IsDefInDomFrontier - Search up the dominator tree from Pred to IDom for
  /// any blocks containing definitions of the value.  If one is found, then
  /// the successor of Pred is in the dominance frontier for the definition,
  /// and this function returns true.
  bool IsDefInDomFrontier(const BBInfo *Pred, const BBInfo *IDom) {
    for (; Pred != IDom; Pred = Pred->IDom) {
      if (Pred->DefBB == Pred)
        return true;
    }
    return false;
  }

  /// FindPHIPlacement - PHIs are needed in the iterated dominance frontiers
  /// of the known definitions.  Iteratively add PHIs in the dom frontiers
  /// until nothing changes.  Along the way, keep track of the nearest
  /// dominating definitions for non-PHI blocks.
  void FindPHIPlacement(BlockListTy *BlockList) {
    bool Changed;
    do {
      Changed = false;
      // Iterate over the list in reverse order, i.e., forward on CFG edges.
      for (typename BlockListTy::reverse_iterator I = BlockList->rbegin(),
             E = BlockList->rend(); I != E; ++I) {
        BBInfo *Info = *I;

        // If this block already needs a PHI, there is nothing to do here.
        if (Info->DefBB == Info)
          continue;

        // Default to use the same def as the immediate dominator.
        BBInfo *NewDefBB = Info->IDom->DefBB;
        for (unsigned p = 0; p != Info->NumPreds; ++p) {
          if (IsDefInDomFrontier(Info->Preds[p], Info->IDom)) {
            // Need a PHI here.
            NewDefBB = Info;
            break;
          }
        }

        // Check if anything changed.
        if (NewDefBB != Info->DefBB) {
          Info->DefBB = NewDefBB;
          Changed = true;
        }
      }
    } while (Changed);
  }

  /// Check all predecessors and if all of them have the same AvailableVal use
  /// it as value for block represented by Info. Return true if singluar value
  /// is found.
  bool FindSingularVal(BBInfo *Info) {
    if (!Info->NumPreds)
      return false;
    ValT Singular = Info->Preds[0]->DefBB->AvailableVal;
    if (!Singular)
      return false;
    for (unsigned Idx = 1; Idx < Info->NumPreds; ++Idx) {
      ValT PredVal = Info->Preds[Idx]->DefBB->AvailableVal;
      if (!PredVal || Singular != PredVal)
        return false;
    }
    // Record Singular value.
    (*AvailableVals)[Info->BB] = Singular;
    assert(BBMap[Info->BB] == Info && "Info missed in BBMap?");
    Info->AvailableVal = Singular;
    Info->DefBB = Info->Preds[0]->DefBB;
    return true;
  }

  /// FindAvailableVal - If this block requires a PHI, first check if an
  /// existing PHI matches the PHI placement and reaching definitions computed
  /// earlier, and if not, create a new PHI.  Visit all the block's
  /// predecessors to calculate the available value for each one and fill in
  /// the incoming values for a new PHI.
  void FindAvailableVals(BlockListTy *BlockList) {
    // Go through the worklist in forward order (i.e., backward through the CFG)
    // and check if existing PHIs can be used.  If not, create empty PHIs where
    // they are needed.
    for (typename BlockListTy::iterator I = BlockList->begin(),
           E = BlockList->end(); I != E; ++I) {
      BBInfo *Info = *I;
      // Check if there needs to be a PHI in BB.
      if (Info->DefBB != Info)
        continue;

      // Look for singular value.
      if (FindSingularVal(Info))
        continue;

      // Look for an existing PHI.
      FindExistingPHI(Info->BB, BlockList);
      if (Info->AvailableVal)
        continue;

      ValT PHI = Traits::CreateEmptyPHI(Info->BB, Info->NumPreds, Updater);
      Info->AvailableVal = PHI;
      (*AvailableVals)[Info->BB] = PHI;
    }

    // Now go back through the worklist in reverse order to fill in the
    // arguments for any new PHIs added in the forward traversal.
    for (typename BlockListTy::reverse_iterator I = BlockList->rbegin(),
           E = BlockList->rend(); I != E; ++I) {
      BBInfo *Info = *I;

      if (Info->DefBB != Info) {
        // Record the available value to speed up subsequent uses of this
        // SSAUpdater for the same value.
        (*AvailableVals)[Info->BB] = Info->DefBB->AvailableVal;
        continue;
      }

      // Check if this block contains a newly added PHI.
      PhiT *PHI = Traits::ValueIsNewPHI(Info->AvailableVal, Updater);
      if (!PHI)
        continue;

      // Iterate through the block's predecessors.
      for (unsigned p = 0; p != Info->NumPreds; ++p) {
        BBInfo *PredInfo = Info->Preds[p];
        BlkT *Pred = PredInfo->BB;
        // Skip to the nearest preceding definition.
        if (PredInfo->DefBB != PredInfo)
          PredInfo = PredInfo->DefBB;
        Traits::AddPHIOperand(PHI, PredInfo->AvailableVal, Pred);
      }

      LLVM_DEBUG(dbgs() << "  Inserted PHI: " << *PHI << "\n");

      // If the client wants to know about all new instructions, tell it.
      if (InsertedPHIs) InsertedPHIs->push_back(PHI);
    }
  }

  /// FindExistingPHI - Look through the PHI nodes in a block to see if any of
  /// them match what is needed.
  void FindExistingPHI(BlkT *BB, BlockListTy *BlockList) {
    for (auto &SomePHI : BB->phis()) {
      if (CheckIfPHIMatches(&SomePHI)) {
        RecordMatchingPHIs(BlockList);
        break;
      }
      // Match failed: clear all the PHITag values.
      for (typename BlockListTy::iterator I = BlockList->begin(),
             E = BlockList->end(); I != E; ++I)
        (*I)->PHITag = nullptr;
    }
  }

  /// CheckIfPHIMatches - Check if a PHI node matches the placement and values
  /// in the BBMap.
  bool CheckIfPHIMatches(PhiT *PHI) {
    SmallVector<PhiT *, 20> WorkList;
    WorkList.push_back(PHI);

    // Mark that the block containing this PHI has been visited.
    BBMap[PHI->getParent()]->PHITag = PHI;

    while (!WorkList.empty()) {
      PHI = WorkList.pop_back_val();

      // Iterate through the PHI's incoming values.
      for (typename Traits::PHI_iterator I = Traits::PHI_begin(PHI),
             E = Traits::PHI_end(PHI); I != E; ++I) {
        ValT IncomingVal = I.getIncomingValue();
        BBInfo *PredInfo = BBMap[I.getIncomingBlock()];
        // Skip to the nearest preceding definition.
        if (PredInfo->DefBB != PredInfo)
          PredInfo = PredInfo->DefBB;

        // Check if it matches the expected value.
        if (PredInfo->AvailableVal) {
          if (IncomingVal == PredInfo->AvailableVal)
            continue;
          return false;
        }

        // Check if the value is a PHI in the correct block.
        PhiT *IncomingPHIVal = Traits::ValueIsPHI(IncomingVal, Updater);
        if (!IncomingPHIVal || IncomingPHIVal->getParent() != PredInfo->BB)
          return false;

        // If this block has already been visited, check if this PHI matches.
        if (PredInfo->PHITag) {
          if (IncomingPHIVal == PredInfo->PHITag)
            continue;
          return false;
        }
        PredInfo->PHITag = IncomingPHIVal;

        WorkList.push_back(IncomingPHIVal);
      }
    }
    return true;
  }

  /// RecordMatchingPHIs - For each PHI node that matches, record it in both
  /// the BBMap and the AvailableVals mapping.
  void RecordMatchingPHIs(BlockListTy *BlockList) {
    for (typename BlockListTy::iterator I = BlockList->begin(),
           E = BlockList->end(); I != E; ++I)
      if (PhiT *PHI = (*I)->PHITag) {
        BlkT *BB = PHI->getParent();
        ValT PHIVal = Traits::GetPHIValue(PHI);
        (*AvailableVals)[BB] = PHIVal;
        BBMap[BB]->AvailableVal = PHIVal;
      }
  }
};

} // end namespace llvm

#undef DEBUG_TYPE // "ssaupdater"

#endif // LLVM_TRANSFORMS_UTILS_SSAUPDATERIMPL_H
PKiwFZA:���'Transforms/Utils/MemoryTaggingSupport.hnu�[���//===- MemoryTaggingSupport.h - helpers for memory tagging implementations ===//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares common infrastructure for HWAddressSanitizer and
// Aarch64StackTagging.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_TRANSFORMS_UTILS_MEMORYTAGGINGSUPPORT_H
#define LLVM_TRANSFORMS_UTILS_MEMORYTAGGINGSUPPORT_H

#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/STLFunctionalExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/StackSafetyAnalysis.h"
#include "llvm/Support/Alignment.h"

namespace llvm {
class DominatorTree;
class DbgVariableIntrinsic;
class IntrinsicInst;
class PostDominatorTree;
class AllocaInst;
class Instruction;
namespace memtag {
// For an alloca valid between lifetime markers Start and Ends, call the
// Callback for all possible exits out of the lifetime in the containing
// function, which can return from the instructions in RetVec.
//
// Returns whether Ends covered all possible exits. If they did not,
// the caller should remove Ends to ensure that work done at the other
// exits does not happen outside of the lifetime.
bool forAllReachableExits(const DominatorTree &DT, const PostDominatorTree &PDT,
                          const LoopInfo &LI, const Instruction *Start,
                          const SmallVectorImpl<IntrinsicInst *> &Ends,
                          const SmallVectorImpl<Instruction *> &RetVec,
                          llvm::function_ref<void(Instruction *)> Callback);

bool isStandardLifetime(const SmallVectorImpl<IntrinsicInst *> &LifetimeStart,
                        const SmallVectorImpl<IntrinsicInst *> &LifetimeEnd,
                        const DominatorTree *DT, const LoopInfo *LI,
                        size_t MaxLifetimes);

Instruction *getUntagLocationIfFunctionExit(Instruction &Inst);

struct AllocaInfo {
  AllocaInst *AI;
  SmallVector<IntrinsicInst *, 2> LifetimeStart;
  SmallVector<IntrinsicInst *, 2> LifetimeEnd;
  SmallVector<DbgVariableIntrinsic *, 2> DbgVariableIntrinsics;
};

struct StackInfo {
  MapVector<AllocaInst *, AllocaInfo> AllocasToInstrument;
  SmallVector<Instruction *, 4> UnrecognizedLifetimes;
  SmallVector<Instruction *, 8> RetVec;
  bool CallsReturnTwice = false;
};

class StackInfoBuilder {
public:
  StackInfoBuilder(const StackSafetyGlobalInfo *SSI) : SSI(SSI) {}

  void visit(Instruction &Inst);
  bool isInterestingAlloca(const AllocaInst &AI);
  StackInfo &get() { return Info; };

private:
  StackInfo Info;
  const StackSafetyGlobalInfo *SSI;
};

uint64_t getAllocaSizeInBytes(const AllocaInst &AI);
void alignAndPadAlloca(memtag::AllocaInfo &Info, llvm::Align Align);

} // namespace memtag
} // namespace llvm

#endif
PKiwFZ=w"�!!Transforms/Utils/CountVisits.hnu�[���//===- CountVisits.h --------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_COUNT_VISITS_H
#define LLVM_TRANSFORMS_UTILS_COUNT_VISITS_H

#include "llvm/ADT/StringMap.h"
#include "llvm/IR/PassManager.h"

namespace llvm {

class Function;

struct CountVisitsPass : PassInfoMixin<CountVisitsPass> {
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &);

private:
  StringMap<uint32_t> Counts;
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_UTILS_COUNT_VISITS_H
PKiwFZ���z��Transforms/Utils/Evaluator.hnu�[���//===- Evaluator.h - LLVM IR evaluator --------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Function evaluator for LLVM IR.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_EVALUATOR_H
#define LLVM_TRANSFORMS_UTILS_EVALUATOR_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/Support/Casting.h"
#include <cassert>
#include <deque>
#include <memory>

namespace llvm {

class CallBase;
class DataLayout;
class Function;
class TargetLibraryInfo;

/// This class evaluates LLVM IR, producing the Constant representing each SSA
/// instruction.  Changes to global variables are stored in a mapping that can
/// be iterated over after the evaluation is complete.  Once an evaluation call
/// fails, the evaluation object should not be reused.
class Evaluator {
  struct MutableAggregate;

  /// The evaluator represents values either as a Constant*, or as a
  /// MutableAggregate, which allows changing individual aggregate elements
  /// without creating a new interned Constant.
  class MutableValue {
    PointerUnion<Constant *, MutableAggregate *> Val;
    void clear();
    bool makeMutable();

  public:
    MutableValue(Constant *C) { Val = C; }
    MutableValue(const MutableValue &) = delete;
    MutableValue(MutableValue &&Other) {
      Val = Other.Val;
      Other.Val = nullptr;
    }
    ~MutableValue() { clear(); }

    Type *getType() const {
      if (auto *C = dyn_cast_if_present<Constant *>(Val))
        return C->getType();
      return cast<MutableAggregate *>(Val)->Ty;
    }

    Constant *toConstant() const {
      if (auto *C = dyn_cast_if_present<Constant *>(Val))
        return C;
      return cast<MutableAggregate *>(Val)->toConstant();
    }

    Constant *read(Type *Ty, APInt Offset, const DataLayout &DL) const;
    bool write(Constant *V, APInt Offset, const DataLayout &DL);
  };

  struct MutableAggregate {
    Type *Ty;
    SmallVector<MutableValue> Elements;

    MutableAggregate(Type *Ty) : Ty(Ty) {}
    Constant *toConstant() const;
  };

public:
  Evaluator(const DataLayout &DL, const TargetLibraryInfo *TLI)
      : DL(DL), TLI(TLI) {
    ValueStack.emplace_back();
  }

  ~Evaluator() {
    for (auto &Tmp : AllocaTmps)
      // If there are still users of the alloca, the program is doing something
      // silly, e.g. storing the address of the alloca somewhere and using it
      // later.  Since this is undefined, we'll just make it be null.
      if (!Tmp->use_empty())
        Tmp->replaceAllUsesWith(Constant::getNullValue(Tmp->getType()));
  }

  /// Evaluate a call to function F, returning true if successful, false if we
  /// can't evaluate it.  ActualArgs contains the formal arguments for the
  /// function.
  bool EvaluateFunction(Function *F, Constant *&RetVal,
                        const SmallVectorImpl<Constant*> &ActualArgs);

  DenseMap<GlobalVariable *, Constant *> getMutatedInitializers() const {
    DenseMap<GlobalVariable *, Constant *> Result;
    for (const auto &Pair : MutatedMemory)
      Result[Pair.first] = Pair.second.toConstant();
    return Result;
  }

  const SmallPtrSetImpl<GlobalVariable *> &getInvariants() const {
    return Invariants;
  }

private:
  bool EvaluateBlock(BasicBlock::iterator CurInst, BasicBlock *&NextBB,
                     bool &StrippedPointerCastsForAliasAnalysis);

  Constant *getVal(Value *V) {
    if (Constant *CV = dyn_cast<Constant>(V)) return CV;
    Constant *R = ValueStack.back().lookup(V);
    assert(R && "Reference to an uncomputed value!");
    return R;
  }

  void setVal(Value *V, Constant *C) {
    ValueStack.back()[V] = C;
  }

  /// Casts call result to a type of bitcast call expression
  Constant *castCallResultIfNeeded(Type *ReturnType, Constant *RV);

  /// Given call site return callee and list of its formal arguments
  Function *getCalleeWithFormalArgs(CallBase &CB,
                                    SmallVectorImpl<Constant *> &Formals);

  /// Given call site and callee returns list of callee formal argument
  /// values converting them when necessary
  bool getFormalParams(CallBase &CB, Function *F,
                       SmallVectorImpl<Constant *> &Formals);

  Constant *ComputeLoadResult(Constant *P, Type *Ty);
  Constant *ComputeLoadResult(GlobalVariable *GV, Type *Ty,
                              const APInt &Offset);

  /// As we compute SSA register values, we store their contents here. The back
  /// of the deque contains the current function and the stack contains the
  /// values in the calling frames.
  std::deque<DenseMap<Value*, Constant*>> ValueStack;

  /// This is used to detect recursion.  In pathological situations we could hit
  /// exponential behavior, but at least there is nothing unbounded.
  SmallVector<Function*, 4> CallStack;

  /// For each store we execute, we update this map.  Loads check this to get
  /// the most up-to-date value.  If evaluation is successful, this state is
  /// committed to the process.
  DenseMap<GlobalVariable *, MutableValue> MutatedMemory;

  /// To 'execute' an alloca, we create a temporary global variable to represent
  /// its body.  This vector is needed so we can delete the temporary globals
  /// when we are done.
  SmallVector<std::unique_ptr<GlobalVariable>, 32> AllocaTmps;

  /// These global variables have been marked invariant by the static
  /// constructor.
  SmallPtrSet<GlobalVariable*, 8> Invariants;

  /// These are constants we have checked and know to be simple enough to live
  /// in a static initializer of a global.
  SmallPtrSet<Constant*, 8> SimpleConstants;

  const DataLayout &DL;
  const TargetLibraryInfo *TLI;
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_UTILS_EVALUATOR_H
PKiwFZ�Z]]�,�, Transforms/Utils/CodeExtractor.hnu�[���//===- Transform/Utils/CodeExtractor.h - Code extraction util ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// A utility to support extracting code from one function into its own
// stand-alone function.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_CODEEXTRACTOR_H
#define LLVM_TRANSFORMS_UTILS_CODEEXTRACTOR_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SetVector.h"
#include <limits>

namespace llvm {

template <typename PtrType> class SmallPtrSetImpl;
class AllocaInst;
class BasicBlock;
class BlockFrequency;
class BlockFrequencyInfo;
class BranchProbabilityInfo;
class AssumptionCache;
class CallInst;
class DominatorTree;
class Function;
class Instruction;
class Loop;
class Module;
class Type;
class Value;

/// A cache for the CodeExtractor analysis. The operation \ref
/// CodeExtractor::extractCodeRegion is guaranteed not to invalidate this
/// object. This object should conservatively be considered invalid if any
/// other mutating operations on the IR occur.
///
/// Constructing this object is O(n) in the size of the function.
class CodeExtractorAnalysisCache {
  /// The allocas in the function.
  SmallVector<AllocaInst *, 16> Allocas;

  /// Base memory addresses of load/store instructions, grouped by block.
  DenseMap<BasicBlock *, DenseSet<Value *>> BaseMemAddrs;

  /// Blocks which contain instructions which may have unknown side-effects
  /// on memory.
  DenseSet<BasicBlock *> SideEffectingBlocks;

  void findSideEffectInfoForBlock(BasicBlock &BB);

public:
  CodeExtractorAnalysisCache(Function &F);

  /// Get the allocas in the function at the time the analysis was created.
  /// Note that some of these allocas may no longer be present in the function,
  /// due to \ref CodeExtractor::extractCodeRegion.
  ArrayRef<AllocaInst *> getAllocas() const { return Allocas; }

  /// Check whether \p BB contains an instruction thought to load from, store
  /// to, or otherwise clobber the alloca \p Addr.
  bool doesBlockContainClobberOfAddr(BasicBlock &BB, AllocaInst *Addr) const;
};

  /// Utility class for extracting code into a new function.
  ///
  /// This utility provides a simple interface for extracting some sequence of
  /// code into its own function, replacing it with a call to that function. It
  /// also provides various methods to query about the nature and result of
  /// such a transformation.
  ///
  /// The rough algorithm used is:
  /// 1) Find both the inputs and outputs for the extracted region.
  /// 2) Pass the inputs as arguments, remapping them within the extracted
  ///    function to arguments.
  /// 3) Add allocas for any scalar outputs, adding all of the outputs' allocas
  ///    as arguments, and inserting stores to the arguments for any scalars.
  class CodeExtractor {
    using ValueSet = SetVector<Value *>;

    // Various bits of state computed on construction.
    DominatorTree *const DT;
    const bool AggregateArgs;
    BlockFrequencyInfo *BFI;
    BranchProbabilityInfo *BPI;
    AssumptionCache *AC;

    // A block outside of the extraction set where any intermediate
    // allocations will be placed inside. If this is null, allocations
    // will be placed in the entry block of the function.
    BasicBlock *AllocationBlock;

    // If true, varargs functions can be extracted.
    bool AllowVarArgs;

    // Bits of intermediate state computed at various phases of extraction.
    SetVector<BasicBlock *> Blocks;
    unsigned NumExitBlocks = std::numeric_limits<unsigned>::max();
    Type *RetTy;

    // Mapping from the original exit blocks, to the new blocks inside
    // the function.
    SmallVector<BasicBlock *, 4> OldTargets;

    // Suffix to use when creating extracted function (appended to the original
    // function name + "."). If empty, the default is to use the entry block
    // label, if non-empty, otherwise "extracted".
    std::string Suffix;

  public:
    /// Create a code extractor for a sequence of blocks.
    ///
    /// Given a sequence of basic blocks where the first block in the sequence
    /// dominates the rest, prepare a code extractor object for pulling this
    /// sequence out into its new function. When a DominatorTree is also given,
    /// extra checking and transformations are enabled. If AllowVarArgs is true,
    /// vararg functions can be extracted. This is safe, if all vararg handling
    /// code is extracted, including vastart. If AllowAlloca is true, then
    /// extraction of blocks containing alloca instructions would be possible,
    /// however code extractor won't validate whether extraction is legal.
    /// Any new allocations will be placed in the AllocationBlock, unless
    /// it is null, in which case it will be placed in the entry block of
    /// the function from which the code is being extracted.
    CodeExtractor(ArrayRef<BasicBlock *> BBs, DominatorTree *DT = nullptr,
                  bool AggregateArgs = false, BlockFrequencyInfo *BFI = nullptr,
                  BranchProbabilityInfo *BPI = nullptr,
                  AssumptionCache *AC = nullptr, bool AllowVarArgs = false,
                  bool AllowAlloca = false,
                  BasicBlock *AllocationBlock = nullptr,
                  std::string Suffix = "");

    /// Create a code extractor for a loop body.
    ///
    /// Behaves just like the generic code sequence constructor, but uses the
    /// block sequence of the loop.
    CodeExtractor(DominatorTree &DT, Loop &L, bool AggregateArgs = false,
                  BlockFrequencyInfo *BFI = nullptr,
                  BranchProbabilityInfo *BPI = nullptr,
                  AssumptionCache *AC = nullptr,
                  std::string Suffix = "");

    /// Perform the extraction, returning the new function.
    ///
    /// Returns zero when called on a CodeExtractor instance where isEligible
    /// returns false.
    Function *extractCodeRegion(const CodeExtractorAnalysisCache &CEAC);

    /// Perform the extraction, returning the new function and providing an
    /// interface to see what was categorized as inputs and outputs.
    ///
    /// \param CEAC - Cache to speed up operations for the CodeExtractor when
    /// hoisting, and extracting lifetime values and assumes.
    /// \param Inputs [out] - filled with  values marked as inputs to the
    /// newly outlined function.
     /// \param Outputs [out] - filled with values marked as outputs to the
    /// newly outlined function.
    /// \returns zero when called on a CodeExtractor instance where isEligible
    /// returns false.
    Function *extractCodeRegion(const CodeExtractorAnalysisCache &CEAC,
                                ValueSet &Inputs, ValueSet &Outputs);

    /// Verify that assumption cache isn't stale after a region is extracted.
    /// Returns true when verifier finds errors. AssumptionCache is passed as
    /// parameter to make this function stateless.
    static bool verifyAssumptionCache(const Function &OldFunc,
                                      const Function &NewFunc,
                                      AssumptionCache *AC);

    /// Test whether this code extractor is eligible.
    ///
    /// Based on the blocks used when constructing the code extractor,
    /// determine whether it is eligible for extraction.
    ///
    /// Checks that varargs handling (with vastart and vaend) is only done in
    /// the outlined blocks.
    bool isEligible() const;

    /// Compute the set of input values and output values for the code.
    ///
    /// These can be used either when performing the extraction or to evaluate
    /// the expected size of a call to the extracted function. Note that this
    /// work cannot be cached between the two as once we decide to extract
    /// a code sequence, that sequence is modified, including changing these
    /// sets, before extraction occurs. These modifications won't have any
    /// significant impact on the cost however.
    void findInputsOutputs(ValueSet &Inputs, ValueSet &Outputs,
                           const ValueSet &Allocas) const;

    /// Check if life time marker nodes can be hoisted/sunk into the outline
    /// region.
    ///
    /// Returns true if it is safe to do the code motion.
    bool
    isLegalToShrinkwrapLifetimeMarkers(const CodeExtractorAnalysisCache &CEAC,
                                       Instruction *AllocaAddr) const;

    /// Find the set of allocas whose life ranges are contained within the
    /// outlined region.
    ///
    /// Allocas which have life_time markers contained in the outlined region
    /// should be pushed to the outlined function. The address bitcasts that
    /// are used by the lifetime markers are also candidates for shrink-
    /// wrapping. The instructions that need to be sunk are collected in
    /// 'Allocas'.
    void findAllocas(const CodeExtractorAnalysisCache &CEAC,
                     ValueSet &SinkCands, ValueSet &HoistCands,
                     BasicBlock *&ExitBlock) const;

    /// Find or create a block within the outline region for placing hoisted
    /// code.
    ///
    /// CommonExitBlock is block outside the outline region. It is the common
    /// successor of blocks inside the region. If there exists a single block
    /// inside the region that is the predecessor of CommonExitBlock, that block
    /// will be returned. Otherwise CommonExitBlock will be split and the
    /// original block will be added to the outline region.
    BasicBlock *findOrCreateBlockForHoisting(BasicBlock *CommonExitBlock);

    /// Exclude a value from aggregate argument passing when extracting a code
    /// region, passing it instead as a scalar.
    void excludeArgFromAggregate(Value *Arg);

  private:
    struct LifetimeMarkerInfo {
      bool SinkLifeStart = false;
      bool HoistLifeEnd = false;
      Instruction *LifeStart = nullptr;
      Instruction *LifeEnd = nullptr;
    };

    ValueSet ExcludeArgsFromAggregate;

    LifetimeMarkerInfo
    getLifetimeMarkers(const CodeExtractorAnalysisCache &CEAC,
                       Instruction *Addr, BasicBlock *ExitBlock) const;

    void severSplitPHINodesOfEntry(BasicBlock *&Header);
    void severSplitPHINodesOfExits(const SmallPtrSetImpl<BasicBlock *> &Exits);
    void splitReturnBlocks();

    Function *constructFunction(const ValueSet &inputs,
                                const ValueSet &outputs,
                                BasicBlock *header,
                                BasicBlock *newRootNode, BasicBlock *newHeader,
                                Function *oldFunction, Module *M);

    void moveCodeToFunction(Function *newFunction);

    void calculateNewCallTerminatorWeights(
        BasicBlock *CodeReplacer,
        DenseMap<BasicBlock *, BlockFrequency> &ExitWeights,
        BranchProbabilityInfo *BPI);

    CallInst *emitCallAndSwitchStatement(Function *newFunction,
                                         BasicBlock *newHeader,
                                         ValueSet &inputs, ValueSet &outputs);
  };

} // end namespace llvm

#endif // LLVM_TRANSFORMS_UTILS_CODEEXTRACTOR_H
PKiwFZ�p�&�	�	%Transforms/Utils/SimplifyCFGOptions.hnu�[���//===- SimplifyCFGOptions.h - Control structure for SimplifyCFG -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// A set of parameters used to control the transforms in the SimplifyCFG pass.
// Options may change depending on the position in the optimization pipeline.
// For example, canonical form that includes switches and branches may later be
// replaced by lookup tables and selects.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_SIMPLIFYCFGOPTIONS_H
#define LLVM_TRANSFORMS_UTILS_SIMPLIFYCFGOPTIONS_H

namespace llvm {

class AssumptionCache;

struct SimplifyCFGOptions {
  int BonusInstThreshold = 1;
  bool ForwardSwitchCondToPhi = false;
  bool ConvertSwitchRangeToICmp = false;
  bool ConvertSwitchToLookupTable = false;
  bool NeedCanonicalLoop = true;
  bool HoistCommonInsts = false;
  bool SinkCommonInsts = false;
  bool SimplifyCondBranch = true;
  bool SpeculateBlocks = true;

  AssumptionCache *AC = nullptr;

  // Support 'builder' pattern to set members by name at construction time.
  SimplifyCFGOptions &bonusInstThreshold(int I) {
    BonusInstThreshold = I;
    return *this;
  }
  SimplifyCFGOptions &forwardSwitchCondToPhi(bool B) {
    ForwardSwitchCondToPhi = B;
    return *this;
  }
  SimplifyCFGOptions &convertSwitchRangeToICmp(bool B) {
    ConvertSwitchRangeToICmp = B;
    return *this;
  }
  SimplifyCFGOptions &convertSwitchToLookupTable(bool B) {
    ConvertSwitchToLookupTable = B;
    return *this;
  }
  SimplifyCFGOptions &needCanonicalLoops(bool B) {
    NeedCanonicalLoop = B;
    return *this;
  }
  SimplifyCFGOptions &hoistCommonInsts(bool B) {
    HoistCommonInsts = B;
    return *this;
  }
  SimplifyCFGOptions &sinkCommonInsts(bool B) {
    SinkCommonInsts = B;
    return *this;
  }
  SimplifyCFGOptions &setAssumptionCache(AssumptionCache *Cache) {
    AC = Cache;
    return *this;
  }
  SimplifyCFGOptions &setSimplifyCondBranch(bool B) {
    SimplifyCondBranch = B;
    return *this;
  }

  SimplifyCFGOptions &speculateBlocks(bool B) {
    SpeculateBlocks = B;
    return *this;
  }
};

} // namespace llvm

#endif // LLVM_TRANSFORMS_UTILS_SIMPLIFYCFGOPTIONS_H
PKiwFZ,Y%�<<Transforms/Utils/LCSSA.hnu�[���//===- LCSSA.h - Loop-closed SSA transform Pass -----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This pass transforms loops by placing phi nodes at the end of the loops for
// all values that are live across the loop boundary.  For example, it turns
// the left into the right code:
//
// for (...)                for (...)
//   if (c)                   if (c)
//     X1 = ...                 X1 = ...
//   else                     else
//     X2 = ...                 X2 = ...
//   X3 = phi(X1, X2)         X3 = phi(X1, X2)
// ... = X3 + 4             X4 = phi(X3)
//                          ... = X4 + 4
//
// This is still valid LLVM; the extra phi nodes are purely redundant, and will
// be trivially eliminated by InstCombine.  The major benefit of this
// transformation is that it makes many other loop optimizations, such as
// LoopUnswitching, simpler.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_LCSSA_H
#define LLVM_TRANSFORMS_UTILS_LCSSA_H

#include "llvm/IR/PassManager.h"

namespace llvm {

/// Converts loops into loop-closed SSA form.
class LCSSAPass : public PassInfoMixin<LCSSAPass> {
public:
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
} // end namespace llvm

#endif // LLVM_TRANSFORMS_UTILS_LCSSA_H
PKiwFZO�3�l�lTransforms/Utils/LoopUtils.hnu�[���//===- llvm/Transforms/Utils/LoopUtils.h - Loop utilities -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines some loop transformation utilities.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_LOOPUTILS_H
#define LLVM_TRANSFORMS_UTILS_LOOPUTILS_H

#include "llvm/Analysis/IVDescriptors.h"
#include "llvm/Analysis/LoopAccessAnalysis.h"
#include "llvm/Transforms/Utils/ValueMapper.h"

namespace llvm {

template <typename T> class DomTreeNodeBase;
using DomTreeNode = DomTreeNodeBase<BasicBlock>;
class AssumptionCache;
class StringRef;
class AnalysisUsage;
class TargetTransformInfo;
class AAResults;
class BasicBlock;
class ICFLoopSafetyInfo;
class IRBuilderBase;
class Loop;
class LoopInfo;
class MemoryAccess;
class MemorySSA;
class MemorySSAUpdater;
class OptimizationRemarkEmitter;
class PredIteratorCache;
class ScalarEvolution;
class SCEV;
class SCEVExpander;
class TargetLibraryInfo;
class LPPassManager;
class Instruction;
struct RuntimeCheckingPtrGroup;
typedef std::pair<const RuntimeCheckingPtrGroup *,
                  const RuntimeCheckingPtrGroup *>
    RuntimePointerCheck;

template <typename T, unsigned N> class SmallSetVector;
template <typename T, unsigned N> class SmallPriorityWorklist;

BasicBlock *InsertPreheaderForLoop(Loop *L, DominatorTree *DT, LoopInfo *LI,
                                   MemorySSAUpdater *MSSAU, bool PreserveLCSSA);

/// Ensure that all exit blocks of the loop are dedicated exits.
///
/// For any loop exit block with non-loop predecessors, we split the loop
/// predecessors to use a dedicated loop exit block. We update the dominator
/// tree and loop info if provided, and will preserve LCSSA if requested.
bool formDedicatedExitBlocks(Loop *L, DominatorTree *DT, LoopInfo *LI,
                             MemorySSAUpdater *MSSAU, bool PreserveLCSSA);

/// Ensures LCSSA form for every instruction from the Worklist in the scope of
/// innermost containing loop.
///
/// For the given instruction which have uses outside of the loop, an LCSSA PHI
/// node is inserted and the uses outside the loop are rewritten to use this
/// node.
///
/// LoopInfo and DominatorTree are required and, since the routine makes no
/// changes to CFG, preserved.
///
/// Returns true if any modifications are made.
///
/// This function may introduce unused PHI nodes. If \p PHIsToRemove is not
/// nullptr, those are added to it (before removing, the caller has to check if
/// they still do not have any uses). Otherwise the PHIs are directly removed.
///
/// If \p InsertedPHIs is not nullptr, inserted phis will be added to this
/// vector.
bool formLCSSAForInstructions(
    SmallVectorImpl<Instruction *> &Worklist, const DominatorTree &DT,
    const LoopInfo &LI, ScalarEvolution *SE,
    SmallVectorImpl<PHINode *> *PHIsToRemove = nullptr,
    SmallVectorImpl<PHINode *> *InsertedPHIs = nullptr);

/// Put loop into LCSSA form.
///
/// Looks at all instructions in the loop which have uses outside of the
/// current loop. For each, an LCSSA PHI node is inserted and the uses outside
/// the loop are rewritten to use this node. Sub-loops must be in LCSSA form
/// already.
///
/// LoopInfo and DominatorTree are required and preserved.
///
/// If ScalarEvolution is passed in, it will be preserved.
///
/// Returns true if any modifications are made to the loop.
bool formLCSSA(Loop &L, const DominatorTree &DT, const LoopInfo *LI,
               ScalarEvolution *SE);

/// Put a loop nest into LCSSA form.
///
/// This recursively forms LCSSA for a loop nest.
///
/// LoopInfo and DominatorTree are required and preserved.
///
/// If ScalarEvolution is passed in, it will be preserved.
///
/// Returns true if any modifications are made to the loop.
bool formLCSSARecursively(Loop &L, const DominatorTree &DT, const LoopInfo *LI,
                          ScalarEvolution *SE);

/// Flags controlling how much is checked when sinking or hoisting
/// instructions.  The number of memory access in the loop (and whether there
/// are too many) is determined in the constructors when using MemorySSA.
class SinkAndHoistLICMFlags {
public:
  // Explicitly set limits.
  SinkAndHoistLICMFlags(unsigned LicmMssaOptCap,
                        unsigned LicmMssaNoAccForPromotionCap, bool IsSink,
                        Loop &L, MemorySSA &MSSA);
  // Use default limits.
  SinkAndHoistLICMFlags(bool IsSink, Loop &L, MemorySSA &MSSA);

  void setIsSink(bool B) { IsSink = B; }
  bool getIsSink() { return IsSink; }
  bool tooManyMemoryAccesses() { return NoOfMemAccTooLarge; }
  bool tooManyClobberingCalls() { return LicmMssaOptCounter >= LicmMssaOptCap; }
  void incrementClobberingCalls() { ++LicmMssaOptCounter; }

protected:
  bool NoOfMemAccTooLarge = false;
  unsigned LicmMssaOptCounter = 0;
  unsigned LicmMssaOptCap;
  unsigned LicmMssaNoAccForPromotionCap;
  bool IsSink;
};

/// Walk the specified region of the CFG (defined by all blocks
/// dominated by the specified block, and that are in the current loop) in
/// reverse depth first order w.r.t the DominatorTree. This allows us to visit
/// uses before definitions, allowing us to sink a loop body in one pass without
/// iteration. Takes DomTreeNode, AAResults, LoopInfo, DominatorTree,
/// TargetLibraryInfo, Loop, AliasSet information for all
/// instructions of the loop and loop safety information as
/// arguments. Diagnostics is emitted via \p ORE. It returns changed status.
/// \p CurLoop is a loop to do sinking on. \p OutermostLoop is used only when
/// this function is called by \p sinkRegionForLoopNest.
bool sinkRegion(DomTreeNode *, AAResults *, LoopInfo *, DominatorTree *,
                TargetLibraryInfo *, TargetTransformInfo *, Loop *CurLoop,
                MemorySSAUpdater &, ICFLoopSafetyInfo *,
                SinkAndHoistLICMFlags &, OptimizationRemarkEmitter *,
                Loop *OutermostLoop = nullptr);

/// Call sinkRegion on loops contained within the specified loop
/// in order from innermost to outermost.
bool sinkRegionForLoopNest(DomTreeNode *, AAResults *, LoopInfo *,
                           DominatorTree *, TargetLibraryInfo *,
                           TargetTransformInfo *, Loop *, MemorySSAUpdater &,
                           ICFLoopSafetyInfo *, SinkAndHoistLICMFlags &,
                           OptimizationRemarkEmitter *);

/// Walk the specified region of the CFG (defined by all blocks
/// dominated by the specified block, and that are in the current loop) in depth
/// first order w.r.t the DominatorTree.  This allows us to visit definitions
/// before uses, allowing us to hoist a loop body in one pass without iteration.
/// Takes DomTreeNode, AAResults, LoopInfo, DominatorTree,
/// TargetLibraryInfo, Loop, AliasSet information for all
/// instructions of the loop and loop safety information as arguments.
/// Diagnostics is emitted via \p ORE. It returns changed status.
/// \p AllowSpeculation is whether values should be hoisted even if they are not
/// guaranteed to execute in the loop, but are safe to speculatively execute.
bool hoistRegion(DomTreeNode *, AAResults *, LoopInfo *, DominatorTree *,
                 AssumptionCache *, TargetLibraryInfo *, Loop *,
                 MemorySSAUpdater &, ScalarEvolution *, ICFLoopSafetyInfo *,
                 SinkAndHoistLICMFlags &, OptimizationRemarkEmitter *, bool,
                 bool AllowSpeculation);

/// Return true if the induction variable \p IV in a Loop whose latch is
/// \p LatchBlock would become dead if the exit test \p Cond were removed.
/// Conservatively returns false if analysis is insufficient.
bool isAlmostDeadIV(PHINode *IV, BasicBlock *LatchBlock, Value *Cond);

/// This function deletes dead loops. The caller of this function needs to
/// guarantee that the loop is infact dead.
/// The function requires a bunch or prerequisites to be present:
///   - The loop needs to be in LCSSA form
///   - The loop needs to have a Preheader
///   - A unique dedicated exit block must exist
///
/// This also updates the relevant analysis information in \p DT, \p SE, \p LI
/// and \p MSSA if pointers to those are provided.
/// It also updates the loop PM if an updater struct is provided.

void deleteDeadLoop(Loop *L, DominatorTree *DT, ScalarEvolution *SE,
                    LoopInfo *LI, MemorySSA *MSSA = nullptr);

/// Remove the backedge of the specified loop.  Handles loop nests and general
/// loop structures subject to the precondition that the loop has no parent
/// loop and has a single latch block.  Preserves all listed analyses.
void breakLoopBackedge(Loop *L, DominatorTree &DT, ScalarEvolution &SE,
                       LoopInfo &LI, MemorySSA *MSSA);

/// Try to promote memory values to scalars by sinking stores out of
/// the loop and moving loads to before the loop.  We do this by looping over
/// the stores in the loop, looking for stores to Must pointers which are
/// loop invariant. It takes a set of must-alias values, Loop exit blocks
/// vector, loop exit blocks insertion point vector, PredIteratorCache,
/// LoopInfo, DominatorTree, Loop, AliasSet information for all instructions
/// of the loop and loop safety information as arguments.
/// Diagnostics is emitted via \p ORE. It returns changed status.
/// \p AllowSpeculation is whether values should be hoisted even if they are not
/// guaranteed to execute in the loop, but are safe to speculatively execute.
bool promoteLoopAccessesToScalars(
    const SmallSetVector<Value *, 8> &, SmallVectorImpl<BasicBlock *> &,
    SmallVectorImpl<Instruction *> &, SmallVectorImpl<MemoryAccess *> &,
    PredIteratorCache &, LoopInfo *, DominatorTree *, AssumptionCache *AC,
    const TargetLibraryInfo *, TargetTransformInfo *, Loop *,
    MemorySSAUpdater &, ICFLoopSafetyInfo *, OptimizationRemarkEmitter *,
    bool AllowSpeculation, bool HasReadsOutsideSet);

/// Does a BFS from a given node to all of its children inside a given loop.
/// The returned vector of nodes includes the starting point.
SmallVector<DomTreeNode *, 16> collectChildrenInLoop(DomTreeNode *N,
                                                     const Loop *CurLoop);

/// Returns the instructions that use values defined in the loop.
SmallVector<Instruction *, 8> findDefsUsedOutsideOfLoop(Loop *L);

/// Find a combination of metadata ("llvm.loop.vectorize.width" and
/// "llvm.loop.vectorize.scalable.enable") for a loop and use it to construct a
/// ElementCount. If the metadata "llvm.loop.vectorize.width" cannot be found
/// then std::nullopt is returned.
std::optional<ElementCount>
getOptionalElementCountLoopAttribute(const Loop *TheLoop);

/// Create a new loop identifier for a loop created from a loop transformation.
///
/// @param OrigLoopID The loop ID of the loop before the transformation.
/// @param FollowupAttrs List of attribute names that contain attributes to be
///                      added to the new loop ID.
/// @param InheritOptionsAttrsPrefix Selects which attributes should be inherited
///                                  from the original loop. The following values
///                                  are considered:
///        nullptr   : Inherit all attributes from @p OrigLoopID.
///        ""        : Do not inherit any attribute from @p OrigLoopID; only use
///                    those specified by a followup attribute.
///        "<prefix>": Inherit all attributes except those which start with
///                    <prefix>; commonly used to remove metadata for the
///                    applied transformation.
/// @param AlwaysNew If true, do not try to reuse OrigLoopID and never return
///                  std::nullopt.
///
/// @return The loop ID for the after-transformation loop. The following values
///         can be returned:
///         std::nullopt : No followup attribute was found; it is up to the
///                        transformation to choose attributes that make sense.
///         @p OrigLoopID: The original identifier can be reused.
///         nullptr      : The new loop has no attributes.
///         MDNode*      : A new unique loop identifier.
std::optional<MDNode *>
makeFollowupLoopID(MDNode *OrigLoopID, ArrayRef<StringRef> FollowupAttrs,
                   const char *InheritOptionsAttrsPrefix = "",
                   bool AlwaysNew = false);

/// Look for the loop attribute that disables all transformation heuristic.
bool hasDisableAllTransformsHint(const Loop *L);

/// Look for the loop attribute that disables the LICM transformation heuristics.
bool hasDisableLICMTransformsHint(const Loop *L);

/// The mode sets how eager a transformation should be applied.
enum TransformationMode {
  /// The pass can use heuristics to determine whether a transformation should
  /// be applied.
  TM_Unspecified,

  /// The transformation should be applied without considering a cost model.
  TM_Enable,

  /// The transformation should not be applied.
  TM_Disable,

  /// Force is a flag and should not be used alone.
  TM_Force = 0x04,

  /// The transformation was directed by the user, e.g. by a #pragma in
  /// the source code. If the transformation could not be applied, a
  /// warning should be emitted.
  TM_ForcedByUser = TM_Enable | TM_Force,

  /// The transformation must not be applied. For instance, `#pragma clang loop
  /// unroll(disable)` explicitly forbids any unrolling to take place. Unlike
  /// general loop metadata, it must not be dropped. Most passes should not
  /// behave differently under TM_Disable and TM_SuppressedByUser.
  TM_SuppressedByUser = TM_Disable | TM_Force
};

/// @{
/// Get the mode for LLVM's supported loop transformations.
TransformationMode hasUnrollTransformation(const Loop *L);
TransformationMode hasUnrollAndJamTransformation(const Loop *L);
TransformationMode hasVectorizeTransformation(const Loop *L);
TransformationMode hasDistributeTransformation(const Loop *L);
TransformationMode hasLICMVersioningTransformation(const Loop *L);
/// @}

/// Set input string into loop metadata by keeping other values intact.
/// If the string is already in loop metadata update value if it is
/// different.
void addStringMetadataToLoop(Loop *TheLoop, const char *MDString,
                             unsigned V = 0);

/// Returns a loop's estimated trip count based on branch weight metadata.
/// In addition if \p EstimatedLoopInvocationWeight is not null it is
/// initialized with weight of loop's latch leading to the exit.
/// Returns 0 when the count is estimated to be 0, or std::nullopt when a
/// meaningful estimate can not be made.
std::optional<unsigned>
getLoopEstimatedTripCount(Loop *L,
                          unsigned *EstimatedLoopInvocationWeight = nullptr);

/// Set a loop's branch weight metadata to reflect that loop has \p
/// EstimatedTripCount iterations and \p EstimatedLoopInvocationWeight exits
/// through latch. Returns true if metadata is successfully updated, false
/// otherwise. Note that loop must have a latch block which controls loop exit
/// in order to succeed.
bool setLoopEstimatedTripCount(Loop *L, unsigned EstimatedTripCount,
                               unsigned EstimatedLoopInvocationWeight);

/// Check inner loop (L) backedge count is known to be invariant on all
/// iterations of its outer loop. If the loop has no parent, this is trivially
/// true.
bool hasIterationCountInvariantInParent(Loop *L, ScalarEvolution &SE);

/// Helper to consistently add the set of standard passes to a loop pass's \c
/// AnalysisUsage.
///
/// All loop passes should call this as part of implementing their \c
/// getAnalysisUsage.
void getLoopAnalysisUsage(AnalysisUsage &AU);

/// Returns true if is legal to hoist or sink this instruction disregarding the
/// possible introduction of faults.  Reasoning about potential faulting
/// instructions is the responsibility of the caller since it is challenging to
/// do efficiently from within this routine.
/// \p TargetExecutesOncePerLoop is true only when it is guaranteed that the
/// target executes at most once per execution of the loop body.  This is used
/// to assess the legality of duplicating atomic loads.  Generally, this is
/// true when moving out of loop and not true when moving into loops.
/// If \p ORE is set use it to emit optimization remarks.
bool canSinkOrHoistInst(Instruction &I, AAResults *AA, DominatorTree *DT,
                        Loop *CurLoop, MemorySSAUpdater &MSSAU,
                        bool TargetExecutesOncePerLoop,
                        SinkAndHoistLICMFlags &LICMFlags,
                        OptimizationRemarkEmitter *ORE = nullptr);

/// Returns the min/max intrinsic used when expanding a min/max reduction.
Intrinsic::ID getMinMaxReductionIntrinsicOp(RecurKind RK);

/// Returns the comparison predicate used when expanding a min/max reduction.
CmpInst::Predicate getMinMaxReductionPredicate(RecurKind RK);

/// See RecurrenceDescriptor::isSelectCmpPattern for a description of the
/// pattern we are trying to match. In this pattern we are only ever selecting
/// between two values: 1) an initial PHI start value, and 2) a loop invariant
/// value. This function uses \p LoopExitInst to determine 2), which we then use
/// to select between \p Left and \p Right. Any lane value in \p Left that
/// matches 2) will be merged into \p Right.
Value *createSelectCmpOp(IRBuilderBase &Builder, Value *StartVal, RecurKind RK,
                         Value *Left, Value *Right);

/// Returns a Min/Max operation corresponding to MinMaxRecurrenceKind.
/// The Builder's fast-math-flags must be set to propagate the expected values.
Value *createMinMaxOp(IRBuilderBase &Builder, RecurKind RK, Value *Left,
                      Value *Right);

/// Generates an ordered vector reduction using extracts to reduce the value.
Value *getOrderedReduction(IRBuilderBase &Builder, Value *Acc, Value *Src,
                           unsigned Op, RecurKind MinMaxKind = RecurKind::None);

/// Generates a vector reduction using shufflevectors to reduce the value.
/// Fast-math-flags are propagated using the IRBuilder's setting.
Value *getShuffleReduction(IRBuilderBase &Builder, Value *Src, unsigned Op,
                           RecurKind MinMaxKind = RecurKind::None);

/// Create a target reduction of the given vector. The reduction operation
/// is described by the \p Opcode parameter. min/max reductions require
/// additional information supplied in \p RdxKind.
/// The target is queried to determine if intrinsics or shuffle sequences are
/// required to implement the reduction.
/// Fast-math-flags are propagated using the IRBuilder's setting.
Value *createSimpleTargetReduction(IRBuilderBase &B,
                                   const TargetTransformInfo *TTI, Value *Src,
                                   RecurKind RdxKind);

/// Create a target reduction of the given vector \p Src for a reduction of the
/// kind RecurKind::SelectICmp or RecurKind::SelectFCmp. The reduction operation
/// is described by \p Desc.
Value *createSelectCmpTargetReduction(IRBuilderBase &B,
                                      const TargetTransformInfo *TTI,
                                      Value *Src,
                                      const RecurrenceDescriptor &Desc,
                                      PHINode *OrigPhi);

/// Create a generic target reduction using a recurrence descriptor \p Desc
/// The target is queried to determine if intrinsics or shuffle sequences are
/// required to implement the reduction.
/// Fast-math-flags are propagated using the RecurrenceDescriptor.
Value *createTargetReduction(IRBuilderBase &B, const TargetTransformInfo *TTI,
                             const RecurrenceDescriptor &Desc, Value *Src,
                             PHINode *OrigPhi = nullptr);

/// Create an ordered reduction intrinsic using the given recurrence
/// descriptor \p Desc.
Value *createOrderedReduction(IRBuilderBase &B,
                              const RecurrenceDescriptor &Desc, Value *Src,
                              Value *Start);

/// Get the intersection (logical and) of all of the potential IR flags
/// of each scalar operation (VL) that will be converted into a vector (I).
/// If OpValue is non-null, we only consider operations similar to OpValue
/// when intersecting.
/// Flag set: NSW, NUW (if IncludeWrapFlags is true), exact, and all of
/// fast-math.
void propagateIRFlags(Value *I, ArrayRef<Value *> VL, Value *OpValue = nullptr,
                      bool IncludeWrapFlags = true);

/// Returns true if we can prove that \p S is defined and always negative in
/// loop \p L.
bool isKnownNegativeInLoop(const SCEV *S, const Loop *L, ScalarEvolution &SE);

/// Returns true if we can prove that \p S is defined and always non-negative in
/// loop \p L.
bool isKnownNonNegativeInLoop(const SCEV *S, const Loop *L,
                              ScalarEvolution &SE);
/// Returns true if we can prove that \p S is defined and always positive in
/// loop \p L.
bool isKnownPositiveInLoop(const SCEV *S, const Loop *L, ScalarEvolution &SE);

/// Returns true if we can prove that \p S is defined and always non-positive in
/// loop \p L.
bool isKnownNonPositiveInLoop(const SCEV *S, const Loop *L,
                              ScalarEvolution &SE);

/// Returns true if \p S is defined and never is equal to signed/unsigned max.
bool cannotBeMaxInLoop(const SCEV *S, const Loop *L, ScalarEvolution &SE,
                       bool Signed);

/// Returns true if \p S is defined and never is equal to signed/unsigned min.
bool cannotBeMinInLoop(const SCEV *S, const Loop *L, ScalarEvolution &SE,
                       bool Signed);

enum ReplaceExitVal {
  NeverRepl,
  OnlyCheapRepl,
  NoHardUse,
  UnusedIndVarInLoop,
  AlwaysRepl
};

/// If the final value of any expressions that are recurrent in the loop can
/// be computed, substitute the exit values from the loop into any instructions
/// outside of the loop that use the final values of the current expressions.
/// Return the number of loop exit values that have been replaced, and the
/// corresponding phi node will be added to DeadInsts.
int rewriteLoopExitValues(Loop *L, LoopInfo *LI, TargetLibraryInfo *TLI,
                          ScalarEvolution *SE, const TargetTransformInfo *TTI,
                          SCEVExpander &Rewriter, DominatorTree *DT,
                          ReplaceExitVal ReplaceExitValue,
                          SmallVector<WeakTrackingVH, 16> &DeadInsts);

/// Set weights for \p UnrolledLoop and \p RemainderLoop based on weights for
/// \p OrigLoop and the following distribution of \p OrigLoop iteration among \p
/// UnrolledLoop and \p RemainderLoop. \p UnrolledLoop receives weights that
/// reflect TC/UF iterations, and \p RemainderLoop receives weights that reflect
/// the remaining TC%UF iterations.
///
/// Note that \p OrigLoop may be equal to either \p UnrolledLoop or \p
/// RemainderLoop in which case weights for \p OrigLoop are updated accordingly.
/// Note also behavior is undefined if \p UnrolledLoop and \p RemainderLoop are
/// equal. \p UF must be greater than zero.
/// If \p OrigLoop has no profile info associated nothing happens.
///
/// This utility may be useful for such optimizations as unroller and
/// vectorizer as it's typical transformation for them.
void setProfileInfoAfterUnrolling(Loop *OrigLoop, Loop *UnrolledLoop,
                                  Loop *RemainderLoop, uint64_t UF);

/// Utility that implements appending of loops onto a worklist given a range.
/// We want to process loops in postorder, but the worklist is a LIFO data
/// structure, so we append to it in *reverse* postorder.
/// For trees, a preorder traversal is a viable reverse postorder, so we
/// actually append using a preorder walk algorithm.
template <typename RangeT>
void appendLoopsToWorklist(RangeT &&, SmallPriorityWorklist<Loop *, 4> &);
/// Utility that implements appending of loops onto a worklist given a range.
/// It has the same behavior as appendLoopsToWorklist, but assumes the range of
/// loops has already been reversed, so it processes loops in the given order.
template <typename RangeT>
void appendReversedLoopsToWorklist(RangeT &&,
                                   SmallPriorityWorklist<Loop *, 4> &);

/// Utility that implements appending of loops onto a worklist given LoopInfo.
/// Calls the templated utility taking a Range of loops, handing it the Loops
/// in LoopInfo, iterated in reverse. This is because the loops are stored in
/// RPO w.r.t. the control flow graph in LoopInfo. For the purpose of unrolling,
/// loop deletion, and LICM, we largely want to work forward across the CFG so
/// that we visit defs before uses and can propagate simplifications from one
/// loop nest into the next. Calls appendReversedLoopsToWorklist with the
/// already reversed loops in LI.
/// FIXME: Consider changing the order in LoopInfo.
void appendLoopsToWorklist(LoopInfo &, SmallPriorityWorklist<Loop *, 4> &);

/// Recursively clone the specified loop and all of its children,
/// mapping the blocks with the specified map.
Loop *cloneLoop(Loop *L, Loop *PL, ValueToValueMapTy &VM,
                LoopInfo *LI, LPPassManager *LPM);

/// Add code that checks at runtime if the accessed arrays in \p PointerChecks
/// overlap. Returns the final comparator value or NULL if no check is needed.
Value *
addRuntimeChecks(Instruction *Loc, Loop *TheLoop,
                 const SmallVectorImpl<RuntimePointerCheck> &PointerChecks,
                 SCEVExpander &Expander);

Value *addDiffRuntimeChecks(
    Instruction *Loc, ArrayRef<PointerDiffInfo> Checks, SCEVExpander &Expander,
    function_ref<Value *(IRBuilderBase &, unsigned)> GetVF, unsigned IC);

/// Struct to hold information about a partially invariant condition.
struct IVConditionInfo {
  /// Instructions that need to be duplicated and checked for the unswitching
  /// condition.
  SmallVector<Instruction *> InstToDuplicate;

  /// Constant to indicate for which value the condition is invariant.
  Constant *KnownValue = nullptr;

  /// True if the partially invariant path is no-op (=does not have any
  /// side-effects and no loop value is used outside the loop).
  bool PathIsNoop = true;

  /// If the partially invariant path reaches a single exit block, ExitForPath
  /// is set to that block. Otherwise it is nullptr.
  BasicBlock *ExitForPath = nullptr;
};

/// Check if the loop header has a conditional branch that is not
/// loop-invariant, because it involves load instructions. If all paths from
/// either the true or false successor to the header or loop exists do not
/// modify the memory feeding the condition, perform 'partial unswitching'. That
/// is, duplicate the instructions feeding the condition in the pre-header. Then
/// unswitch on the duplicated condition. The condition is now known in the
/// unswitched version for the 'invariant' path through the original loop.
///
/// If the branch condition of the header is partially invariant, return a pair
/// containing the instructions to duplicate and a boolean Constant to update
/// the condition in the loops created for the true or false successors.
std::optional<IVConditionInfo> hasPartialIVCondition(const Loop &L,
                                                     unsigned MSSAThreshold,
                                                     const MemorySSA &MSSA,
                                                     AAResults &AA);

} // end namespace llvm

#endif // LLVM_TRANSFORMS_UTILS_LOOPUTILS_H
PKiwFZ1!�r��#Transforms/Utils/InstructionNamer.hnu�[���//===- InstructionNamer.h - Give anonymous instructions names -------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_INSTRUCTIONNAMER_H
#define LLVM_TRANSFORMS_UTILS_INSTRUCTIONNAMER_H

#include "llvm/IR/PassManager.h"

namespace llvm {
struct InstructionNamerPass : PassInfoMixin<InstructionNamerPass> {
  PreservedAnalyses run(Function &, FunctionAnalysisManager &);
};
} // namespace llvm

#endif // LLVM_TRANSFORMS_UTILS_INSTRUCTIONNAMER_H
PKiwFZ�(����!Transforms/Utils/FixIrreducible.hnu�[���//===- FixIrreducible.h - Convert irreducible control-flow into loops -----===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_FIXIRREDUCIBLE_H
#define LLVM_TRANSFORMS_UTILS_FIXIRREDUCIBLE_H

#include "llvm/IR/PassManager.h"

namespace llvm {
struct FixIrreduciblePass : PassInfoMixin<FixIrreduciblePass> {
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
} // namespace llvm

#endif // LLVM_TRANSFORMS_UTILS_FIXIRREDUCIBLE_H
PKiwFZܩM�Transforms/Utils/VNCoercion.hnu�[���//===- VNCoercion.h - Value Numbering Coercion Utilities --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file / This file provides routines used by LLVM's value numbering passes to
/// perform various forms of value extraction from memory when the types are not
/// identical.  For example, given
///
/// store i32 8, i32 *%foo
/// %a = bitcast i32 *%foo to i16
/// %val = load i16, i16 *%a
///
/// It possible to extract the value of the load of %a from the store to %foo.
/// These routines know how to tell whether they can do that (the analyze*
/// routines), and can also insert the necessary IR to do it (the get*
/// routines).

#ifndef LLVM_TRANSFORMS_UTILS_VNCOERCION_H
#define LLVM_TRANSFORMS_UTILS_VNCOERCION_H

namespace llvm {
class Constant;
class StoreInst;
class LoadInst;
class MemIntrinsic;
class Instruction;
class IRBuilderBase;
class Value;
class Type;
class DataLayout;
namespace VNCoercion {
/// Return true if CoerceAvailableValueToLoadType would succeed if it was
/// called.
bool canCoerceMustAliasedValueToLoad(Value *StoredVal, Type *LoadTy,
                                     const DataLayout &DL);

/// If we saw a store of a value to memory, and then a load from a must-aliased
/// pointer of a different type, try to coerce the stored value to the loaded
/// type.  LoadedTy is the type of the load we want to replace.  IRB is
/// IRBuilder used to insert new instructions.
///
/// If we can't do it, return null.
Value *coerceAvailableValueToLoadType(Value *StoredVal, Type *LoadedTy,
                                      IRBuilderBase &IRB, const DataLayout &DL);

/// This function determines whether a value for the pointer LoadPtr can be
/// extracted from the store at DepSI.
///
/// On success, it returns the offset into DepSI that extraction would start.
/// On failure, it returns -1.
int analyzeLoadFromClobberingStore(Type *LoadTy, Value *LoadPtr,
                                   StoreInst *DepSI, const DataLayout &DL);

/// This function determines whether a value for the pointer LoadPtr can be
/// extracted from the load at DepLI.
///
/// On success, it returns the offset into DepLI that extraction would start.
/// On failure, it returns -1.
int analyzeLoadFromClobberingLoad(Type *LoadTy, Value *LoadPtr, LoadInst *DepLI,
                                  const DataLayout &DL);

/// This function determines whether a value for the pointer LoadPtr can be
/// extracted from the memory intrinsic at DepMI.
///
/// On success, it returns the offset into DepMI that extraction would start.
/// On failure, it returns -1.
int analyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr,
                                     MemIntrinsic *DepMI, const DataLayout &DL);

/// If analyzeLoadFromClobberingStore/Load returned an offset, this function
/// can be used to actually perform the extraction of the bits from the store.
/// It inserts instructions to do so at InsertPt, and returns the extracted
/// value.
Value *getValueForLoad(Value *SrcVal, unsigned Offset, Type *LoadTy,
                            Instruction *InsertPt, const DataLayout &DL);
// This is the same as getValueForLoad, except it performs no insertion.
// It only allows constant inputs.
Constant *getConstantValueForLoad(Constant *SrcVal, unsigned Offset,
                                  Type *LoadTy, const DataLayout &DL);

/// If analyzeLoadFromClobberingMemInst returned an offset, this function can be
/// used to actually perform the extraction of the bits from the memory
/// intrinsic.  It inserts instructions to do so at InsertPt, and returns the
/// extracted value.
Value *getMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset,
                              Type *LoadTy, Instruction *InsertPt,
                              const DataLayout &DL);
// This is the same as getStoreValueForLoad, except it performs no insertion.
// It returns nullptr if it cannot produce a constant.
Constant *getConstantMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset,
                                         Type *LoadTy, const DataLayout &DL);
}
}
#endif
PKiwFZs���$Transforms/Utils/InjectTLIMappings.hnu�[���//===- InjectTLIMAppings.h - TLI to VFABI attribute injection  ------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Populates the VFABI attribute with the scalar-to-vector mappings
// from the TargetLibraryInfo.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_TRANSFORMS_UTILS_INJECTTLIMAPPINGS_H
#define LLVM_TRANSFORMS_UTILS_INJECTTLIMAPPINGS_H

#include "llvm/IR/PassManager.h"

namespace llvm {
class Function;
class InjectTLIMappings : public PassInfoMixin<InjectTLIMappings> {
public:
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

} // End namespace llvm
#endif // LLVM_TRANSFORMS_UTILS_INJECTTLIMAPPINGS_H
PKiwFZ����#Transforms/Utils/StripGCRelocates.hnu�[���//===- StripGCRelocates.h - -----------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_STRIPGCRELOCATES_H
#define LLVM_TRANSFORMS_UTILS_STRIPGCRELOCATES_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class Function;

class StripGCRelocates : public PassInfoMixin<StripGCRelocates> {
public:
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_UTILS_STRIPGCRELOCATES_H
PKiwFZ��)/)/Transforms/Utils/Debugify.hnu�[���//===- Debugify.h - Check debug info preservation in optimizations --------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file Interface to the `debugify` synthetic/original debug info testing
/// utility.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_DEBUGIFY_H
#define LLVM_TRANSFORMS_UTILS_DEBUGIFY_H

#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Bitcode/BitcodeWriterPass.h"
#include "llvm/IR/IRPrintingPasses.h"
#include "llvm/IR/LegacyPassManager.h"
#include "llvm/IR/PassManager.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/Pass.h"

using DebugFnMap =
    llvm::MapVector<const llvm::Function *, const llvm::DISubprogram *>;
using DebugInstMap = llvm::MapVector<const llvm::Instruction *, bool>;
using DebugVarMap = llvm::MapVector<const llvm::DILocalVariable *, unsigned>;
using WeakInstValueMap =
    llvm::MapVector<const llvm::Instruction *, llvm::WeakVH>;

/// Used to track the Debug Info Metadata information.
struct DebugInfoPerPass {
  // This maps a function name to its associated DISubprogram.
  DebugFnMap DIFunctions;
  // This maps an instruction and the info about whether it has !dbg attached.
  DebugInstMap DILocations;
  // This tracks value (instruction) deletion. If an instruction gets deleted,
  // WeakVH nulls itself.
  WeakInstValueMap InstToDelete;
  // Maps variable into dbg users (#dbg values/declares for this variable).
  DebugVarMap DIVariables;
};

namespace llvm {
class DIBuilder;

/// Add synthesized debug information to a module.
///
/// \param M The module to add debug information to.
/// \param Functions A range of functions to add debug information to.
/// \param Banner A prefix string to add to debug/error messages.
/// \param ApplyToMF A call back that will add debug information to the
///                  MachineFunction for a Function. If nullptr, then the
///                  MachineFunction (if any) will not be modified.
bool applyDebugifyMetadata(
    Module &M, iterator_range<Module::iterator> Functions, StringRef Banner,
    std::function<bool(DIBuilder &, Function &)> ApplyToMF);

/// Strip out all of the metadata and debug info inserted by debugify. If no
/// llvm.debugify module-level named metadata is present, this is a no-op.
/// Returns true if any change was made.
bool stripDebugifyMetadata(Module &M);

/// Collect original debug information before a pass.
///
/// \param M The module to collect debug information from.
/// \param Functions A range of functions to collect debug information from.
/// \param DebugInfoBeforePass DI metadata before a pass.
/// \param Banner A prefix string to add to debug/error messages.
/// \param NameOfWrappedPass A name of a pass to add to debug/error messages.
bool collectDebugInfoMetadata(Module &M,
                              iterator_range<Module::iterator> Functions,
                              DebugInfoPerPass &DebugInfoBeforePass,
                              StringRef Banner, StringRef NameOfWrappedPass);

/// Check original debug information after a pass.
///
/// \param M The module to collect debug information from.
/// \param Functions A range of functions to collect debug information from.
/// \param DebugInfoBeforePass DI metadata before a pass.
/// \param Banner A prefix string to add to debug/error messages.
/// \param NameOfWrappedPass A name of a pass to add to debug/error messages.
bool checkDebugInfoMetadata(Module &M,
                            iterator_range<Module::iterator> Functions,
                            DebugInfoPerPass &DebugInfoBeforePass,
                            StringRef Banner, StringRef NameOfWrappedPass,
                            StringRef OrigDIVerifyBugsReportFilePath);
} // namespace llvm

/// Used to check whether we track synthetic or original debug info.
enum class DebugifyMode { NoDebugify, SyntheticDebugInfo, OriginalDebugInfo };

llvm::ModulePass *createDebugifyModulePass(
    enum DebugifyMode Mode = DebugifyMode::SyntheticDebugInfo,
    llvm::StringRef NameOfWrappedPass = "",
    DebugInfoPerPass *DebugInfoBeforePass = nullptr);
llvm::FunctionPass *createDebugifyFunctionPass(
    enum DebugifyMode Mode = DebugifyMode::SyntheticDebugInfo,
    llvm::StringRef NameOfWrappedPass = "",
    DebugInfoPerPass *DebugInfoBeforePass = nullptr);

class NewPMDebugifyPass : public llvm::PassInfoMixin<NewPMDebugifyPass> {
  llvm::StringRef NameOfWrappedPass;
  DebugInfoPerPass *DebugInfoBeforePass = nullptr;
  enum DebugifyMode Mode = DebugifyMode::NoDebugify;
public:
  NewPMDebugifyPass(
      enum DebugifyMode Mode = DebugifyMode::SyntheticDebugInfo,
      llvm::StringRef NameOfWrappedPass = "",
      DebugInfoPerPass *DebugInfoBeforePass = nullptr)
      : NameOfWrappedPass(NameOfWrappedPass),
        DebugInfoBeforePass(DebugInfoBeforePass), Mode(Mode) {}

  llvm::PreservedAnalyses run(llvm::Module &M, llvm::ModuleAnalysisManager &AM);
};

/// Track how much `debugify` information (in the `synthetic` mode only)
/// has been lost.
struct DebugifyStatistics {
  /// Number of missing dbg.values.
  unsigned NumDbgValuesMissing = 0;

  /// Number of dbg.values expected.
  unsigned NumDbgValuesExpected = 0;

  /// Number of instructions with empty debug locations.
  unsigned NumDbgLocsMissing = 0;

  /// Number of instructions expected to have debug locations.
  unsigned NumDbgLocsExpected = 0;

  /// Get the ratio of missing/expected dbg.values.
  float getMissingValueRatio() const {
    return float(NumDbgValuesMissing) / float(NumDbgLocsExpected);
  }

  /// Get the ratio of missing/expected instructions with locations.
  float getEmptyLocationRatio() const {
    return float(NumDbgLocsMissing) / float(NumDbgLocsExpected);
  }
};

/// Map pass names to a per-pass DebugifyStatistics instance.
using DebugifyStatsMap = llvm::MapVector<llvm::StringRef, DebugifyStatistics>;

llvm::ModulePass *createCheckDebugifyModulePass(
    bool Strip = false, llvm::StringRef NameOfWrappedPass = "",
    DebugifyStatsMap *StatsMap = nullptr,
    enum DebugifyMode Mode = DebugifyMode::SyntheticDebugInfo,
    DebugInfoPerPass *DebugInfoBeforePass = nullptr,
    llvm::StringRef OrigDIVerifyBugsReportFilePath = "");

llvm::FunctionPass *createCheckDebugifyFunctionPass(
    bool Strip = false, llvm::StringRef NameOfWrappedPass = "",
    DebugifyStatsMap *StatsMap = nullptr,
    enum DebugifyMode Mode = DebugifyMode::SyntheticDebugInfo,
    DebugInfoPerPass *DebugInfoBeforePass = nullptr,
    llvm::StringRef OrigDIVerifyBugsReportFilePath = "");

class NewPMCheckDebugifyPass
    : public llvm::PassInfoMixin<NewPMCheckDebugifyPass> {
  llvm::StringRef NameOfWrappedPass;
  llvm::StringRef OrigDIVerifyBugsReportFilePath;
  DebugifyStatsMap *StatsMap;
  DebugInfoPerPass *DebugInfoBeforePass;
  enum DebugifyMode Mode;
  bool Strip;
public:
  NewPMCheckDebugifyPass(
      bool Strip = false, llvm::StringRef NameOfWrappedPass = "",
      DebugifyStatsMap *StatsMap = nullptr,
      enum DebugifyMode Mode = DebugifyMode::SyntheticDebugInfo,
      DebugInfoPerPass *DebugInfoBeforePass = nullptr,
      llvm::StringRef OrigDIVerifyBugsReportFilePath = "")
      : NameOfWrappedPass(NameOfWrappedPass),
        OrigDIVerifyBugsReportFilePath(OrigDIVerifyBugsReportFilePath),
        StatsMap(StatsMap), DebugInfoBeforePass(DebugInfoBeforePass), Mode(Mode),
        Strip(Strip) {}

  llvm::PreservedAnalyses run(llvm::Module &M, llvm::ModuleAnalysisManager &AM);
};

namespace llvm {
void exportDebugifyStats(StringRef Path, const DebugifyStatsMap &Map);

class DebugifyEachInstrumentation {
  llvm::StringRef OrigDIVerifyBugsReportFilePath = "";
  DebugInfoPerPass *DebugInfoBeforePass = nullptr;
  enum DebugifyMode Mode = DebugifyMode::NoDebugify;
  DebugifyStatsMap *DIStatsMap = nullptr;

public:
  void registerCallbacks(PassInstrumentationCallbacks &PIC,
                         ModuleAnalysisManager &MAM);
  // Used within DebugifyMode::SyntheticDebugInfo mode.
  void setDIStatsMap(DebugifyStatsMap &StatMap) { DIStatsMap = &StatMap; }
  const DebugifyStatsMap &getDebugifyStatsMap() const { return *DIStatsMap; }
  // Used within DebugifyMode::OriginalDebugInfo mode.
  void setDebugInfoBeforePass(DebugInfoPerPass &PerPassMap) {
    DebugInfoBeforePass = &PerPassMap;
  }
  DebugInfoPerPass &getDebugInfoPerPass() { return *DebugInfoBeforePass; }

  void setOrigDIVerifyBugsReportFilePath(StringRef BugsReportFilePath) {
    OrigDIVerifyBugsReportFilePath = BugsReportFilePath;
  }
  StringRef getOrigDIVerifyBugsReportFilePath() const {
    return OrigDIVerifyBugsReportFilePath;
  }

  void setDebugifyMode(enum DebugifyMode M) { Mode = M; }

  bool isSyntheticDebugInfo() const {
    return Mode == DebugifyMode::SyntheticDebugInfo;
  }
  bool isOriginalDebugInfoMode() const {
    return Mode == DebugifyMode::OriginalDebugInfo;
  }
};

/// DebugifyCustomPassManager wraps each pass with the debugify passes if
/// needed.
/// NOTE: We support legacy custom pass manager only.
/// TODO: Add New PM support for custom pass manager.
class DebugifyCustomPassManager : public legacy::PassManager {
  StringRef OrigDIVerifyBugsReportFilePath;
  DebugifyStatsMap *DIStatsMap = nullptr;
  DebugInfoPerPass *DebugInfoBeforePass = nullptr;
  enum DebugifyMode Mode = DebugifyMode::NoDebugify;

public:
  using super = legacy::PassManager;

  void add(Pass *P) override {
    // Wrap each pass with (-check)-debugify passes if requested, making
    // exceptions for passes which shouldn't see -debugify instrumentation.
    bool WrapWithDebugify = Mode != DebugifyMode::NoDebugify &&
                            !P->getAsImmutablePass() && !isIRPrintingPass(P) &&
                            !isBitcodeWriterPass(P);
    if (!WrapWithDebugify) {
      super::add(P);
      return;
    }

    // Either apply -debugify/-check-debugify before/after each pass and collect
    // debug info loss statistics, or collect and check original debug info in
    // the optimizations.
    PassKind Kind = P->getPassKind();
    StringRef Name = P->getPassName();

    // TODO: Implement Debugify for LoopPass.
    switch (Kind) {
    case PT_Function:
      super::add(createDebugifyFunctionPass(Mode, Name, DebugInfoBeforePass));
      super::add(P);
      super::add(createCheckDebugifyFunctionPass(
          isSyntheticDebugInfo(), Name, DIStatsMap, Mode, DebugInfoBeforePass,
          OrigDIVerifyBugsReportFilePath));
      break;
    case PT_Module:
      super::add(createDebugifyModulePass(Mode, Name, DebugInfoBeforePass));
      super::add(P);
      super::add(createCheckDebugifyModulePass(
          isSyntheticDebugInfo(), Name, DIStatsMap, Mode, DebugInfoBeforePass,
          OrigDIVerifyBugsReportFilePath));
      break;
    default:
      super::add(P);
      break;
    }
  }

  // Used within DebugifyMode::SyntheticDebugInfo mode.
  void setDIStatsMap(DebugifyStatsMap &StatMap) { DIStatsMap = &StatMap; }
  // Used within DebugifyMode::OriginalDebugInfo mode.
  void setDebugInfoBeforePass(DebugInfoPerPass &PerPassDI) {
    DebugInfoBeforePass = &PerPassDI;
  }
  void setOrigDIVerifyBugsReportFilePath(StringRef BugsReportFilePath) {
    OrigDIVerifyBugsReportFilePath = BugsReportFilePath;
  }
  StringRef getOrigDIVerifyBugsReportFilePath() const {
    return OrigDIVerifyBugsReportFilePath;
  }

  void setDebugifyMode(enum DebugifyMode M) { Mode = M; }

  bool isSyntheticDebugInfo() const {
    return Mode == DebugifyMode::SyntheticDebugInfo;
  }
  bool isOriginalDebugInfoMode() const {
    return Mode == DebugifyMode::OriginalDebugInfo;
  }

  const DebugifyStatsMap &getDebugifyStatsMap() const { return *DIStatsMap; }
  DebugInfoPerPass &getDebugInfoPerPass() { return *DebugInfoBeforePass; }
};
} // namespace llvm

#endif // LLVM_TRANSFORMS_UTILS_DEBUGIFY_H
PKiwFZ��4�.Transforms/Utils/SampleProfileLoaderBaseUtil.hnu�[���////===- SampleProfileLoadBaseUtil.h - Profile loader util func --*- C++-*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file
/// This file provides the utility functions for the sampled PGO loader base
/// implementation.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_SAMPLEPROFILELOADERBASEUTIL_H
#define LLVM_TRANSFORMS_UTILS_SAMPLEPROFILELOADERBASEUTIL_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ProfileData/SampleProf.h"
#include "llvm/Support/CommandLine.h"

namespace llvm {
using namespace sampleprof;

class ProfileSummaryInfo;
class Module;

extern cl::opt<unsigned> SampleProfileMaxPropagateIterations;
extern cl::opt<unsigned> SampleProfileRecordCoverage;
extern cl::opt<unsigned> SampleProfileSampleCoverage;
extern cl::opt<bool> NoWarnSampleUnused;

namespace sampleprofutil {

class SampleCoverageTracker {
public:
  bool markSamplesUsed(const FunctionSamples *FS, uint32_t LineOffset,
                       uint32_t Discriminator, uint64_t Samples);
  unsigned computeCoverage(unsigned Used, unsigned Total) const;
  unsigned countUsedRecords(const FunctionSamples *FS,
                            ProfileSummaryInfo *PSI) const;
  unsigned countBodyRecords(const FunctionSamples *FS,
                            ProfileSummaryInfo *PSI) const;
  uint64_t getTotalUsedSamples() const { return TotalUsedSamples; }
  uint64_t countBodySamples(const FunctionSamples *FS,
                            ProfileSummaryInfo *PSI) const;

  void clear() {
    SampleCoverage.clear();
    TotalUsedSamples = 0;
  }
  void setProfAccForSymsInList(bool V) { ProfAccForSymsInList = V; }

private:
  using BodySampleCoverageMap = std::map<LineLocation, unsigned>;
  using FunctionSamplesCoverageMap =
      DenseMap<const FunctionSamples *, BodySampleCoverageMap>;

  /// Coverage map for sampling records.
  ///
  /// This map keeps a record of sampling records that have been matched to
  /// an IR instruction. This is used to detect some form of staleness in
  /// profiles (see flag -sample-profile-check-coverage).
  ///
  /// Each entry in the map corresponds to a FunctionSamples instance.  This is
  /// another map that counts how many times the sample record at the
  /// given location has been used.
  FunctionSamplesCoverageMap SampleCoverage;

  /// Number of samples used from the profile.
  ///
  /// When a sampling record is used for the first time, the samples from
  /// that record are added to this accumulator.  Coverage is later computed
  /// based on the total number of samples available in this function and
  /// its callsites.
  ///
  /// Note that this accumulator tracks samples used from a single function
  /// and all the inlined callsites. Strictly, we should have a map of counters
  /// keyed by FunctionSamples pointers, but these stats are cleared after
  /// every function, so we just need to keep a single counter.
  uint64_t TotalUsedSamples = 0;

  // For symbol in profile symbol list, whether to regard their profiles
  // to be accurate. This is passed from the SampleLoader instance.
  bool ProfAccForSymsInList = false;
};

/// Return true if the given callsite is hot wrt to hot cutoff threshold.
bool callsiteIsHot(const FunctionSamples *CallsiteFS, ProfileSummaryInfo *PSI,
                   bool ProfAccForSymsInList);

/// Create a global variable to flag FSDiscriminators are used.
void createFSDiscriminatorVariable(Module *M);

} // end of namespace sampleprofutil
} // end of namespace llvm

#endif // LLVM_TRANSFORMS_UTILS_SAMPLEPROFILELOADERBASEUTIL_H
PKiwFZ'�B�'�')Transforms/Utils/SampleProfileInference.hnu�[���//===- Transforms/Utils/SampleProfileInference.h ----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file
/// This file provides the interface for the profile inference algorithm, profi.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_SAMPLEPROFILEINFERENCE_H
#define LLVM_TRANSFORMS_UTILS_SAMPLEPROFILEINFERENCE_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/SmallVector.h"

namespace llvm {

struct FlowJump;

/// A wrapper of a binary basic block.
struct FlowBlock {
  uint64_t Index;
  uint64_t Weight{0};
  bool HasUnknownWeight{true};
  bool IsUnlikely{false};
  uint64_t Flow{0};
  std::vector<FlowJump *> SuccJumps;
  std::vector<FlowJump *> PredJumps;

  /// Check if it is the entry block in the function.
  bool isEntry() const { return PredJumps.empty(); }

  /// Check if it is an exit block in the function.
  bool isExit() const { return SuccJumps.empty(); }
};

/// A wrapper of a jump between two basic blocks.
struct FlowJump {
  uint64_t Source;
  uint64_t Target;
  uint64_t Weight{0};
  bool HasUnknownWeight{true};
  bool IsUnlikely{false};
  uint64_t Flow{0};
};

/// A wrapper of binary function with basic blocks and jumps.
struct FlowFunction {
  /// Basic blocks in the function.
  std::vector<FlowBlock> Blocks;
  /// Jumps between the basic blocks.
  std::vector<FlowJump> Jumps;
  /// The index of the entry block.
  uint64_t Entry{0};
};

/// Various thresholds and options controlling the behavior of the profile
/// inference algorithm. Default values are tuned for several large-scale
/// applications, and can be modified via corresponding command-line flags.
struct ProfiParams {
  /// Evenly distribute flow when there are multiple equally likely options.
  bool EvenFlowDistribution{false};

  /// Evenly re-distribute flow among unknown subgraphs.
  bool RebalanceUnknown{false};

  /// Join isolated components having positive flow.
  bool JoinIslands{false};

  /// The cost of increasing a block's count by one.
  unsigned CostBlockInc{0};

  /// The cost of decreasing a block's count by one.
  unsigned CostBlockDec{0};

  /// The cost of increasing a count of zero-weight block by one.
  unsigned CostBlockZeroInc{0};

  /// The cost of increasing the entry block's count by one.
  unsigned CostBlockEntryInc{0};

  /// The cost of decreasing the entry block's count by one.
  unsigned CostBlockEntryDec{0};

  /// The cost of increasing an unknown block's count by one.
  unsigned CostBlockUnknownInc{0};

  /// The cost of increasing a jump's count by one.
  unsigned CostJumpInc{0};

  /// The cost of increasing a fall-through jump's count by one.
  unsigned CostJumpFTInc{0};

  /// The cost of decreasing a jump's count by one.
  unsigned CostJumpDec{0};

  /// The cost of decreasing a fall-through jump's count by one.
  unsigned CostJumpFTDec{0};

  /// The cost of increasing an unknown jump's count by one.
  unsigned CostJumpUnknownInc{0};

  /// The cost of increasing an unknown fall-through jump's count by one.
  unsigned CostJumpUnknownFTInc{0};

  /// The cost of taking an unlikely block/jump.
  const int64_t CostUnlikely = ((int64_t)1) << 30;
};

void applyFlowInference(const ProfiParams &Params, FlowFunction &Func);
void applyFlowInference(FlowFunction &Func);

/// Sample profile inference pass.
template <typename FT> class SampleProfileInference {
public:
  using NodeRef = typename GraphTraits<FT *>::NodeRef;
  using BasicBlockT = typename std::remove_pointer<NodeRef>::type;
  using FunctionT = FT;
  using Edge = std::pair<const BasicBlockT *, const BasicBlockT *>;
  using BlockWeightMap = DenseMap<const BasicBlockT *, uint64_t>;
  using EdgeWeightMap = DenseMap<Edge, uint64_t>;
  using BlockEdgeMap =
      DenseMap<const BasicBlockT *, SmallVector<const BasicBlockT *, 8>>;

  SampleProfileInference(FunctionT &F, BlockEdgeMap &Successors,
                         BlockWeightMap &SampleBlockWeights)
      : F(F), Successors(Successors), SampleBlockWeights(SampleBlockWeights) {}

  /// Apply the profile inference algorithm for a given function
  void apply(BlockWeightMap &BlockWeights, EdgeWeightMap &EdgeWeights);

private:
  /// Initialize flow function blocks, jumps and misc metadata.
  FlowFunction
  createFlowFunction(const std::vector<const BasicBlockT *> &BasicBlocks,
                     DenseMap<const BasicBlockT *, uint64_t> &BlockIndex);

  /// Try to infer branch probabilities mimicking implementation of
  /// BranchProbabilityInfo. Unlikely taken branches are marked so that the
  /// inference algorithm can avoid sending flow along corresponding edges.
  void findUnlikelyJumps(const std::vector<const BasicBlockT *> &BasicBlocks,
                         BlockEdgeMap &Successors, FlowFunction &Func);

  /// Determine whether the block is an exit in the CFG.
  bool isExit(const BasicBlockT *BB);

  /// Function.
  const FunctionT &F;

  /// Successors for each basic block in the CFG.
  BlockEdgeMap &Successors;

  /// Map basic blocks to their sampled weights.
  BlockWeightMap &SampleBlockWeights;
};

template <typename BT>
void SampleProfileInference<BT>::apply(BlockWeightMap &BlockWeights,
                                       EdgeWeightMap &EdgeWeights) {
  // Find all forwards reachable blocks which the inference algorithm will be
  // applied on.
  df_iterator_default_set<const BasicBlockT *> Reachable;
  for (auto *BB : depth_first_ext(&F, Reachable))
    (void)BB /* Mark all reachable blocks */;

  // Find all backwards reachable blocks which the inference algorithm will be
  // applied on.
  df_iterator_default_set<const BasicBlockT *> InverseReachable;
  for (const auto &BB : F) {
    // An exit block is a block without any successors.
    if (isExit(&BB)) {
      for (auto *RBB : inverse_depth_first_ext(&BB, InverseReachable))
        (void)RBB;
    }
  }

  // Keep a stable order for reachable blocks
  DenseMap<const BasicBlockT *, uint64_t> BlockIndex;
  std::vector<const BasicBlockT *> BasicBlocks;
  BlockIndex.reserve(Reachable.size());
  BasicBlocks.reserve(Reachable.size());
  for (const auto &BB : F) {
    if (Reachable.count(&BB) && InverseReachable.count(&BB)) {
      BlockIndex[&BB] = BasicBlocks.size();
      BasicBlocks.push_back(&BB);
    }
  }

  BlockWeights.clear();
  EdgeWeights.clear();
  bool HasSamples = false;
  for (const auto *BB : BasicBlocks) {
    auto It = SampleBlockWeights.find(BB);
    if (It != SampleBlockWeights.end() && It->second > 0) {
      HasSamples = true;
      BlockWeights[BB] = It->second;
    }
  }
  // Quit early for functions with a single block or ones w/o samples
  if (BasicBlocks.size() <= 1 || !HasSamples) {
    return;
  }

  // Create necessary objects
  FlowFunction Func = createFlowFunction(BasicBlocks, BlockIndex);

  // Create and apply the inference network model.
  applyFlowInference(Func);

  // Extract the resulting weights from the control flow
  // All weights are increased by one to avoid propagation errors introduced by
  // zero weights.
  for (const auto *BB : BasicBlocks) {
    BlockWeights[BB] = Func.Blocks[BlockIndex[BB]].Flow;
  }
  for (auto &Jump : Func.Jumps) {
    Edge E = std::make_pair(BasicBlocks[Jump.Source], BasicBlocks[Jump.Target]);
    EdgeWeights[E] = Jump.Flow;
  }

#ifndef NDEBUG
  // Unreachable blocks and edges should not have a weight.
  for (auto &I : BlockWeights) {
    assert(Reachable.contains(I.first));
    assert(InverseReachable.contains(I.first));
  }
  for (auto &I : EdgeWeights) {
    assert(Reachable.contains(I.first.first) &&
           Reachable.contains(I.first.second));
    assert(InverseReachable.contains(I.first.first) &&
           InverseReachable.contains(I.first.second));
  }
#endif
}

template <typename BT>
FlowFunction SampleProfileInference<BT>::createFlowFunction(
    const std::vector<const BasicBlockT *> &BasicBlocks,
    DenseMap<const BasicBlockT *, uint64_t> &BlockIndex) {
  FlowFunction Func;
  Func.Blocks.reserve(BasicBlocks.size());
  // Create FlowBlocks
  for (const auto *BB : BasicBlocks) {
    FlowBlock Block;
    if (SampleBlockWeights.find(BB) != SampleBlockWeights.end()) {
      Block.HasUnknownWeight = false;
      Block.Weight = SampleBlockWeights[BB];
    } else {
      Block.HasUnknownWeight = true;
      Block.Weight = 0;
    }
    Block.Index = Func.Blocks.size();
    Func.Blocks.push_back(Block);
  }
  // Create FlowEdges
  for (const auto *BB : BasicBlocks) {
    for (auto *Succ : Successors[BB]) {
      if (!BlockIndex.count(Succ))
        continue;
      FlowJump Jump;
      Jump.Source = BlockIndex[BB];
      Jump.Target = BlockIndex[Succ];
      Func.Jumps.push_back(Jump);
    }
  }
  for (auto &Jump : Func.Jumps) {
    uint64_t Src = Jump.Source;
    uint64_t Dst = Jump.Target;
    Func.Blocks[Src].SuccJumps.push_back(&Jump);
    Func.Blocks[Dst].PredJumps.push_back(&Jump);
  }

  // Try to infer probabilities of jumps based on the content of basic block
  findUnlikelyJumps(BasicBlocks, Successors, Func);

  // Find the entry block
  for (size_t I = 0; I < Func.Blocks.size(); I++) {
    if (Func.Blocks[I].isEntry()) {
      Func.Entry = I;
      break;
    }
  }
  assert(Func.Entry == 0 && "incorrect index of the entry block");

  // Pre-process data: make sure the entry weight is at least 1
  auto &EntryBlock = Func.Blocks[Func.Entry];
  if (EntryBlock.Weight == 0 && !EntryBlock.HasUnknownWeight) {
    EntryBlock.Weight = 1;
    EntryBlock.HasUnknownWeight = false;
  }

  return Func;
}

template <typename BT>
inline void SampleProfileInference<BT>::findUnlikelyJumps(
    const std::vector<const BasicBlockT *> &BasicBlocks,
    BlockEdgeMap &Successors, FlowFunction &Func) {}

template <typename BT>
inline bool SampleProfileInference<BT>::isExit(const BasicBlockT *BB) {
  return BB->succ_empty();
}

} // end namespace llvm
#endif // LLVM_TRANSFORMS_UTILS_SAMPLEPROFILEINFERENCE_H
PKiwFZk��X"Transforms/Utils/NameAnonGlobals.hnu�[���//===-- NameAnonGlobals.h - Anonymous Global Naming Pass --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements naming anonymous globals to make sure they can be
// referred to by ThinLTO.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_NAMEANONGLOBALS_H
#define LLVM_TRANSFORMS_UTILS_NAMEANONGLOBALS_H

#include "llvm/IR/PassManager.h"

namespace llvm {

/// Simple pass that provides a name to every anonymous globals.
class NameAnonGlobalPass : public PassInfoMixin<NameAnonGlobalPass> {
public:
  NameAnonGlobalPass() = default;

  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_UTILS_NAMEANONGLOBALS_H
PKiwFZ�|o��%Transforms/Utils/BreakCriticalEdges.hnu�[���//===- BreakCriticalEdges.h - Critical Edge Elimination Pass --------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// BreakCriticalEdges pass - Break all of the critical edges in the CFG by
// inserting a dummy basic block.  This pass may be "required" by passes that
// cannot deal with critical edges.  For this usage, the structure type is
// forward declared.  This pass obviously invalidates the CFG, but can update
// dominator trees.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_BREAKCRITICALEDGES_H
#define LLVM_TRANSFORMS_UTILS_BREAKCRITICALEDGES_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class Function;
struct BreakCriticalEdgesPass : public PassInfoMixin<BreakCriticalEdgesPass> {
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
} // namespace llvm
#endif // LLVM_TRANSFORMS_UTILS_BREAKCRITICALEDGES_H
PKiwFZ�|^�/�/Transforms/Utils/ValueMapper.hnu�[���//===- ValueMapper.h - Remapping for constants and metadata -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the MapValue interface which is used by various parts of
// the Transforms/Utils library to implement cloning and linking facilities.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_VALUEMAPPER_H
#define LLVM_TRANSFORMS_UTILS_VALUEMAPPER_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/IR/ValueMap.h"

namespace llvm {

class Constant;
class Function;
class GlobalVariable;
class Instruction;
class MDNode;
class Metadata;
class Type;
class Value;

using ValueToValueMapTy = ValueMap<const Value *, WeakTrackingVH>;

/// This is a class that can be implemented by clients to remap types when
/// cloning constants and instructions.
class ValueMapTypeRemapper {
  virtual void anchor(); // Out of line method.

public:
  virtual ~ValueMapTypeRemapper() = default;

  /// The client should implement this method if they want to remap types while
  /// mapping values.
  virtual Type *remapType(Type *SrcTy) = 0;
};

/// This is a class that can be implemented by clients to materialize Values on
/// demand.
class ValueMaterializer {
  virtual void anchor(); // Out of line method.

protected:
  ValueMaterializer() = default;
  ValueMaterializer(const ValueMaterializer &) = default;
  ValueMaterializer &operator=(const ValueMaterializer &) = default;
  ~ValueMaterializer() = default;

public:
  /// This method can be implemented to generate a mapped Value on demand. For
  /// example, if linking lazily. Returns null if the value is not materialized.
  virtual Value *materialize(Value *V) = 0;
};

/// These are flags that the value mapping APIs allow.
enum RemapFlags {
  RF_None = 0,

  /// If this flag is set, the remapper knows that only local values within a
  /// function (such as an instruction or argument) are mapped, not global
  /// values like functions and global metadata.
  RF_NoModuleLevelChanges = 1,

  /// If this flag is set, the remapper ignores missing function-local entries
  /// (Argument, Instruction, BasicBlock) that are not in the value map.  If it
  /// is unset, it aborts if an operand is asked to be remapped which doesn't
  /// exist in the mapping.
  ///
  /// There are no such assertions in MapValue(), whose results are almost
  /// unchanged by this flag.  This flag mainly changes the assertion behaviour
  /// in RemapInstruction().
  ///
  /// Since an Instruction's metadata operands (even that point to SSA values)
  /// aren't guaranteed to be dominated by their definitions, MapMetadata will
  /// return "!{}" instead of "null" for \a LocalAsMetadata instances whose SSA
  /// values are unmapped when this flag is set.  Otherwise, \a MapValue()
  /// completely ignores this flag.
  ///
  /// \a MapMetadata() always ignores this flag.
  RF_IgnoreMissingLocals = 2,

  /// Instruct the remapper to reuse and mutate distinct metadata (remapping
  /// them in place) instead of cloning remapped copies. This flag has no
  /// effect when when RF_NoModuleLevelChanges, since that implies an identity
  /// mapping.
  RF_ReuseAndMutateDistinctMDs = 4,

  /// Any global values not in value map are mapped to null instead of mapping
  /// to self.  Illegal if RF_IgnoreMissingLocals is also set.
  RF_NullMapMissingGlobalValues = 8,
};

inline RemapFlags operator|(RemapFlags LHS, RemapFlags RHS) {
  return RemapFlags(unsigned(LHS) | unsigned(RHS));
}

/// Context for (re-)mapping values (and metadata).
///
/// A shared context used for mapping and remapping of Value and Metadata
/// instances using \a ValueToValueMapTy, \a RemapFlags, \a
/// ValueMapTypeRemapper, and \a ValueMaterializer.
///
/// There are a number of top-level entry points:
/// - \a mapValue() (and \a mapConstant());
/// - \a mapMetadata() (and \a mapMDNode());
/// - \a remapInstruction();
/// - \a remapFunction(); and
/// - \a remapGlobalObjectMetadata().
///
/// The \a ValueMaterializer can be used as a callback, but cannot invoke any
/// of these top-level functions recursively.  Instead, callbacks should use
/// one of the following to schedule work lazily in the \a ValueMapper
/// instance:
/// - \a scheduleMapGlobalInitializer()
/// - \a scheduleMapAppendingVariable()
/// - \a scheduleMapGlobalAlias()
/// - \a scheduleMapGlobalIFunc()
/// - \a scheduleRemapFunction()
///
/// Sometimes a callback needs a different mapping context.  Such a context can
/// be registered using \a registerAlternateMappingContext(), which takes an
/// alternate \a ValueToValueMapTy and \a ValueMaterializer and returns a ID to
/// pass into the schedule*() functions.
///
/// TODO: lib/Linker really doesn't need the \a ValueHandle in the \a
/// ValueToValueMapTy.  We should template \a ValueMapper (and its
/// implementation classes), and explicitly instantiate on two concrete
/// instances of \a ValueMap (one as \a ValueToValueMap, and one with raw \a
/// Value pointers).  It may be viable to do away with \a TrackingMDRef in the
/// \a Metadata side map for the lib/Linker case as well, in which case we'll
/// need a new template parameter on \a ValueMap.
///
/// TODO: Update callers of \a RemapInstruction() and \a MapValue() (etc.) to
/// use \a ValueMapper directly.
class ValueMapper {
  void *pImpl;

public:
  ValueMapper(ValueToValueMapTy &VM, RemapFlags Flags = RF_None,
              ValueMapTypeRemapper *TypeMapper = nullptr,
              ValueMaterializer *Materializer = nullptr);
  ValueMapper(ValueMapper &&) = delete;
  ValueMapper(const ValueMapper &) = delete;
  ValueMapper &operator=(ValueMapper &&) = delete;
  ValueMapper &operator=(const ValueMapper &) = delete;
  ~ValueMapper();

  /// Register an alternate mapping context.
  ///
  /// Returns a MappingContextID that can be used with the various schedule*()
  /// API to switch in a different value map on-the-fly.
  unsigned
  registerAlternateMappingContext(ValueToValueMapTy &VM,
                                  ValueMaterializer *Materializer = nullptr);

  /// Add to the current \a RemapFlags.
  ///
  /// \note Like the top-level mapping functions, \a addFlags() must be called
  /// at the top level, not during a callback in a \a ValueMaterializer.
  void addFlags(RemapFlags Flags);

  Metadata *mapMetadata(const Metadata &MD);
  MDNode *mapMDNode(const MDNode &N);

  Value *mapValue(const Value &V);
  Constant *mapConstant(const Constant &C);

  void remapInstruction(Instruction &I);
  void remapFunction(Function &F);
  void remapGlobalObjectMetadata(GlobalObject &GO);

  void scheduleMapGlobalInitializer(GlobalVariable &GV, Constant &Init,
                                    unsigned MappingContextID = 0);
  void scheduleMapAppendingVariable(GlobalVariable &GV, Constant *InitPrefix,
                                    bool IsOldCtorDtor,
                                    ArrayRef<Constant *> NewMembers,
                                    unsigned MappingContextID = 0);
  void scheduleMapGlobalAlias(GlobalAlias &GA, Constant &Aliasee,
                              unsigned MappingContextID = 0);
  void scheduleMapGlobalIFunc(GlobalIFunc &GI, Constant &Resolver,
                              unsigned MappingContextID = 0);
  void scheduleRemapFunction(Function &F, unsigned MappingContextID = 0);
};

/// Look up or compute a value in the value map.
///
/// Return a mapped value for a function-local value (Argument, Instruction,
/// BasicBlock), or compute and memoize a value for a Constant.
///
///  1. If \c V is in VM, return the result.
///  2. Else if \c V can be materialized with \c Materializer, do so, memoize
///     it in \c VM, and return it.
///  3. Else if \c V is a function-local value, return nullptr.
///  4. Else if \c V is a \a GlobalValue, return \c nullptr or \c V depending
///     on \a RF_NullMapMissingGlobalValues.
///  5. Else if \c V is a \a MetadataAsValue wrapping a LocalAsMetadata,
///     recurse on the local SSA value, and return nullptr or "metadata !{}" on
///     missing depending on RF_IgnoreMissingValues.
///  6. Else if \c V is a \a MetadataAsValue, rewrap the return of \a
///     MapMetadata().
///  7. Else, compute the equivalent constant, and return it.
inline Value *MapValue(const Value *V, ValueToValueMapTy &VM,
                       RemapFlags Flags = RF_None,
                       ValueMapTypeRemapper *TypeMapper = nullptr,
                       ValueMaterializer *Materializer = nullptr) {
  return ValueMapper(VM, Flags, TypeMapper, Materializer).mapValue(*V);
}

/// Lookup or compute a mapping for a piece of metadata.
///
/// Compute and memoize a mapping for \c MD.
///
///  1. If \c MD is mapped, return it.
///  2. Else if \a RF_NoModuleLevelChanges or \c MD is an \a MDString, return
///     \c MD.
///  3. Else if \c MD is a \a ConstantAsMetadata, call \a MapValue() and
///     re-wrap its return (returning nullptr on nullptr).
///  4. Else, \c MD is an \a MDNode.  These are remapped, along with their
///     transitive operands.  Distinct nodes are duplicated or moved depending
///     on \a RF_MoveDistinctNodes.  Uniqued nodes are remapped like constants.
///
/// \note \a LocalAsMetadata is completely unsupported by \a MapMetadata.
/// Instead, use \a MapValue() with its wrapping \a MetadataAsValue instance.
inline Metadata *MapMetadata(const Metadata *MD, ValueToValueMapTy &VM,
                             RemapFlags Flags = RF_None,
                             ValueMapTypeRemapper *TypeMapper = nullptr,
                             ValueMaterializer *Materializer = nullptr) {
  return ValueMapper(VM, Flags, TypeMapper, Materializer).mapMetadata(*MD);
}

/// Version of MapMetadata with type safety for MDNode.
inline MDNode *MapMetadata(const MDNode *MD, ValueToValueMapTy &VM,
                           RemapFlags Flags = RF_None,
                           ValueMapTypeRemapper *TypeMapper = nullptr,
                           ValueMaterializer *Materializer = nullptr) {
  return ValueMapper(VM, Flags, TypeMapper, Materializer).mapMDNode(*MD);
}

/// Convert the instruction operands from referencing the current values into
/// those specified by VM.
///
/// If \a RF_IgnoreMissingLocals is set and an operand can't be found via \a
/// MapValue(), use the old value.  Otherwise assert that this doesn't happen.
///
/// Note that \a MapValue() only returns \c nullptr for SSA values missing from
/// \c VM.
inline void RemapInstruction(Instruction *I, ValueToValueMapTy &VM,
                             RemapFlags Flags = RF_None,
                             ValueMapTypeRemapper *TypeMapper = nullptr,
                             ValueMaterializer *Materializer = nullptr) {
  ValueMapper(VM, Flags, TypeMapper, Materializer).remapInstruction(*I);
}

/// Remap the operands, metadata, arguments, and instructions of a function.
///
/// Calls \a MapValue() on prefix data, prologue data, and personality
/// function; calls \a MapMetadata() on each attached MDNode; remaps the
/// argument types using the provided \c TypeMapper; and calls \a
/// RemapInstruction() on every instruction.
inline void RemapFunction(Function &F, ValueToValueMapTy &VM,
                          RemapFlags Flags = RF_None,
                          ValueMapTypeRemapper *TypeMapper = nullptr,
                          ValueMaterializer *Materializer = nullptr) {
  ValueMapper(VM, Flags, TypeMapper, Materializer).remapFunction(F);
}

/// Version of MapValue with type safety for Constant.
inline Constant *MapValue(const Constant *V, ValueToValueMapTy &VM,
                          RemapFlags Flags = RF_None,
                          ValueMapTypeRemapper *TypeMapper = nullptr,
                          ValueMaterializer *Materializer = nullptr) {
  return ValueMapper(VM, Flags, TypeMapper, Materializer).mapConstant(*V);
}

} // end namespace llvm

#endif // LLVM_TRANSFORMS_UTILS_VALUEMAPPER_H
PKiwFZ��fJ��Transforms/Utils/GuardUtils.hnu�[���//===-- GuardUtils.h - Utils for work with guards ---------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// Utils that are used to perform transformations related to guards and their
// conditions.
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_GUARDUTILS_H
#define LLVM_TRANSFORMS_UTILS_GUARDUTILS_H

namespace llvm {

class BranchInst;
class CallInst;
class Function;
class Value;

/// Splits control flow at point of \p Guard, replacing it with explicit branch
/// by the condition of guard's first argument. The taken branch then goes to
/// the block that contains  \p Guard's successors, and the non-taken branch
/// goes to a newly-created deopt block that contains a sole call of the
/// deoptimize function \p DeoptIntrinsic.  If 'UseWC' is set, preserve the
/// widenable nature of the guard by lowering to equivelent form.  If not set,
/// lower to a form without widenable semantics.
void makeGuardControlFlowExplicit(Function *DeoptIntrinsic, CallInst *Guard,
                                  bool UseWC);

/// Given a branch we know is widenable (defined per Analysis/GuardUtils.h),
/// widen it such that condition 'NewCond' is also known to hold on the taken
/// path.  Branch remains widenable after transform.
void widenWidenableBranch(BranchInst *WidenableBR, Value *NewCond);

/// Given a branch we know is widenable (defined per Analysis/GuardUtils.h),
/// *set* it's condition such that (only) 'Cond' is known to hold on the taken
/// path and that the branch remains widenable after transform.
void setWidenableBranchCond(BranchInst *WidenableBR, Value *Cond);

} // llvm

#endif // LLVM_TRANSFORMS_UTILS_GUARDUTILS_H
PKiwFZ�'HH!Transforms/Utils/CodeMoverUtils.hnu�[���//===- Transform/Utils/CodeMoverUtils.h - CodeMover Utils -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This family of functions determine movements are safe on basic blocks, and
// instructions contained within a function.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_CODEMOVERUTILS_H
#define LLVM_TRANSFORMS_UTILS_CODEMOVERUTILS_H

namespace llvm {

class BasicBlock;
class DependenceInfo;
class DominatorTree;
class Instruction;
class PostDominatorTree;

/// Return true if \p I0 and \p I1 are control flow equivalent.
/// Two instructions are control flow equivalent if their basic blocks are
/// control flow equivalent.
bool isControlFlowEquivalent(const Instruction &I0, const Instruction &I1,
                             const DominatorTree &DT,
                             const PostDominatorTree &PDT);

/// Return true if \p BB0 and \p BB1 are control flow equivalent.
/// Two basic blocks are control flow equivalent if when one executes, the other
/// is guaranteed to execute.
bool isControlFlowEquivalent(const BasicBlock &BB0, const BasicBlock &BB1,
                             const DominatorTree &DT,
                             const PostDominatorTree &PDT);

/// Return true if \p I can be safely moved before \p InsertPoint.
bool isSafeToMoveBefore(Instruction &I, Instruction &InsertPoint,
                        DominatorTree &DT,
                        const PostDominatorTree *PDT = nullptr,
                        DependenceInfo *DI = nullptr,
                        bool CheckForEntireBlock = false);

/// Return true if all instructions (except the terminator) in \p BB can be
/// safely moved before \p InsertPoint.
bool isSafeToMoveBefore(BasicBlock &BB, Instruction &InsertPoint,
                        DominatorTree &DT,
                        const PostDominatorTree *PDT = nullptr,
                        DependenceInfo *DI = nullptr);

/// Move instructions, in an order-preserving manner, from \p FromBB to the
/// beginning of \p ToBB when proven safe.
void moveInstructionsToTheBeginning(BasicBlock &FromBB, BasicBlock &ToBB,
                                    DominatorTree &DT,
                                    const PostDominatorTree &PDT,
                                    DependenceInfo &DI);

/// Move instructions, in an order-preserving manner, from \p FromBB to the end
/// of \p ToBB when proven safe.
void moveInstructionsToTheEnd(BasicBlock &FromBB, BasicBlock &ToBB,
                              DominatorTree &DT, const PostDominatorTree &PDT,
                              DependenceInfo &DI);

/// In case that two BBs \p ThisBlock and \p OtherBlock are control flow
/// equivalent but they do not strictly dominate and post-dominate each
/// other, we determine if \p ThisBlock is reached after \p OtherBlock
/// in the control flow.
bool nonStrictlyPostDominate(const BasicBlock *ThisBlock,
                             const BasicBlock *OtherBlock,
                             const DominatorTree *DT,
                             const PostDominatorTree *PDT);

// Check if I0 is reached before I1 in the control flow.
bool isReachedBefore(const Instruction *I0, const Instruction *I1,
                     const DominatorTree *DT, const PostDominatorTree *PDT);

} // end namespace llvm

#endif // LLVM_TRANSFORMS_UTILS_CODEMOVERUTILS_H
PKiwFZ�1�(//Transforms/Utils/LowerAtomic.hnu�[���//===- LowerAtomic.h - Lower atomic intrinsics ------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
// This pass lowers atomic intrinsics to non-atomic form for use in a known
// non-preemptible environment.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_LOWERATOMIC_H
#define LLVM_TRANSFORMS_UTILS_LOWERATOMIC_H

#include "llvm/IR/Instructions.h"

namespace llvm {

class IRBuilderBase;

/// Convert the given Cmpxchg into primitive load and compare.
bool lowerAtomicCmpXchgInst(AtomicCmpXchgInst *CXI);

/// Convert the given RMWI into primitive load and stores,
/// assuming that doing so is legal. Return true if the lowering
/// succeeds.
bool lowerAtomicRMWInst(AtomicRMWInst *RMWI);

/// Emit IR to implement the given atomicrmw operation on values in registers,
/// returning the new value.
Value *buildAtomicRMWValue(AtomicRMWInst::BinOp Op, IRBuilderBase &Builder,
                           Value *Loaded, Value *Val);
}

#endif // LLVM_TRANSFORMS_UTILS_LOWERATOMIC_H
PKiwFZ.��LL(Transforms/Utils/EntryExitInstrumenter.hnu�[���//===- EntryExitInstrumenter.h - Function Entry/Exit Instrumentation ------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// EntryExitInstrumenter pass - Instrument function entry/exit with calls to
// mcount(), @__cyg_profile_func_{enter,exit} and the like. There are two
// variants, intended to run pre- and post-inlining, respectively.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_ENTRYEXITINSTRUMENTER_H
#define LLVM_TRANSFORMS_UTILS_ENTRYEXITINSTRUMENTER_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class Function;

struct EntryExitInstrumenterPass
    : public PassInfoMixin<EntryExitInstrumenterPass> {
  EntryExitInstrumenterPass(bool PostInlining) : PostInlining(PostInlining) {}

  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);

  void printPipeline(raw_ostream &OS,
                     function_ref<StringRef(StringRef)> MapClassName2PassName);

  bool PostInlining;

  static bool isRequired() { return true; }
};

} // namespace llvm

#endif // LLVM_TRANSFORMS_UTILS_ENTRYEXITINSTRUMENTER_H
PKiwFZ�a�}��&Transforms/Utils/CanonicalizeAliases.hnu�[���//===-- CanonicalizeAliases.h - Alias Canonicalization Pass -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file canonicalizes aliases.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_CANONICALIZEALIASES_H
#define LLVM_TRANSFORMS_UTILS_CANONICALIZEALIASES_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class Module;

/// Simple pass that canonicalizes aliases.
class CanonicalizeAliasesPass : public PassInfoMixin<CanonicalizeAliasesPass> {
public:
  CanonicalizeAliasesPass() = default;

  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_UTILS_CANONICALIZEALIASES_H
PKiwFZǍ��!!!Transforms/Utils/SanitizerStats.hnu�[���//===- SanitizerStats.h - Sanitizer statistics gathering  -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Declares functions and data structures for sanitizer statistics gathering.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_SANITIZERSTATS_H
#define LLVM_TRANSFORMS_UTILS_SANITIZERSTATS_H

#include "llvm/IR/IRBuilder.h"

namespace llvm {

// Number of bits in data that are used for the sanitizer kind. Needs to match
// __sanitizer::kKindBits in compiler-rt/lib/stats/stats.h
enum { kSanitizerStatKindBits = 3 };

enum SanitizerStatKind {
  SanStat_CFI_VCall,
  SanStat_CFI_NVCall,
  SanStat_CFI_DerivedCast,
  SanStat_CFI_UnrelatedCast,
  SanStat_CFI_ICall,
};

struct SanitizerStatReport {
  SanitizerStatReport(Module *M);

  /// Generates code into B that increments a location-specific counter tagged
  /// with the given sanitizer kind SK.
  void create(IRBuilder<> &B, SanitizerStatKind SK);

  /// Finalize module stats array and add global constructor to register it.
  void finish();

private:
  Module *M;
  GlobalVariable *ModuleStatsGV;
  ArrayType *StatTy;
  StructType *EmptyModuleStatsTy;

  std::vector<Constant *> Inits;
  ArrayType *makeModuleStatsArrayTy();
  StructType *makeModuleStatsTy();
};

}

#endif
PKiwFZ�����!Transforms/Utils/MemoryOpRemark.hnu�[���//===- MemoryOpRemark.h - Memory operation remark analysis -*- C++ ------*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Provide more information about instructions that copy, move, or initialize
// memory, including those with a "auto-init" !annotation metadata.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_MEMORYOPREMARK_H
#define LLVM_TRANSFORMS_UTILS_MEMORYOPREMARK_H

#include "llvm/ADT/StringRef.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/IR/DiagnosticInfo.h"
#include <optional>

namespace llvm {

class CallInst;
class DataLayout;
class DiagnosticInfoIROptimization;
class Instruction;
class IntrinsicInst;
class Value;
class OptimizationRemarkEmitter;
class StoreInst;

// FIXME: Once we get to more remarks like this one, we need to re-evaluate how
// much of this logic should actually go into the remark emitter.
struct MemoryOpRemark {
  OptimizationRemarkEmitter &ORE;
  StringRef RemarkPass;
  const DataLayout &DL;
  const TargetLibraryInfo &TLI;

  MemoryOpRemark(OptimizationRemarkEmitter &ORE, StringRef RemarkPass,
                 const DataLayout &DL, const TargetLibraryInfo &TLI)
      : ORE(ORE), RemarkPass(RemarkPass), DL(DL), TLI(TLI) {}

  virtual ~MemoryOpRemark();

  /// \return true iff the instruction is understood by MemoryOpRemark.
  static bool canHandle(const Instruction *I, const TargetLibraryInfo &TLI);

  void visit(const Instruction *I);

protected:
  virtual std::string explainSource(StringRef Type) const;

  enum RemarkKind { RK_Store, RK_Unknown, RK_IntrinsicCall, RK_Call };
  virtual StringRef remarkName(RemarkKind RK) const;

  virtual DiagnosticKind diagnosticKind() const { return DK_OptimizationRemarkAnalysis; }

private:
  template<typename ...Ts>
  std::unique_ptr<DiagnosticInfoIROptimization> makeRemark(Ts... Args);

  /// Emit a remark using information from the store's destination, size, etc.
  void visitStore(const StoreInst &SI);
  /// Emit a generic auto-init remark.
  void visitUnknown(const Instruction &I);
  /// Emit a remark using information from known intrinsic calls.
  void visitIntrinsicCall(const IntrinsicInst &II);
  /// Emit a remark using information from known function calls.
  void visitCall(const CallInst &CI);

  /// Add callee information to a remark: whether it's known, the function name,
  /// etc.
  template <typename FTy>
  void visitCallee(FTy F, bool KnownLibCall, DiagnosticInfoIROptimization &R);
  /// Add operand information to a remark based on knowledge we have for known
  /// libcalls.
  void visitKnownLibCall(const CallInst &CI, LibFunc LF,
                         DiagnosticInfoIROptimization &R);
  /// Add the memory operation size to a remark.
  void visitSizeOperand(Value *V, DiagnosticInfoIROptimization &R);

  struct VariableInfo {
    std::optional<StringRef> Name;
    std::optional<uint64_t> Size;
    bool isEmpty() const { return !Name && !Size; }
  };
  /// Gather more information about \p V as a variable. This can be debug info,
  /// information from the alloca, etc. Since \p V can represent more than a
  /// single variable, they will all be added to the remark.
  void visitPtr(Value *V, bool IsSrc, DiagnosticInfoIROptimization &R);
  void visitVariable(const Value *V, SmallVectorImpl<VariableInfo> &Result);
};

/// Special case for -ftrivial-auto-var-init remarks.
struct AutoInitRemark : public MemoryOpRemark {
  AutoInitRemark(OptimizationRemarkEmitter &ORE, StringRef RemarkPass,
                 const DataLayout &DL, const TargetLibraryInfo &TLI)
      : MemoryOpRemark(ORE, RemarkPass, DL, TLI) {}

  /// \return true iff the instruction is understood by AutoInitRemark.
  static bool canHandle(const Instruction *I);

protected:
  std::string explainSource(StringRef Type) const override;
  StringRef remarkName(RemarkKind RK) const override;
  DiagnosticKind diagnosticKind() const override {
    return DK_OptimizationRemarkMissed;
  }
};

} // namespace llvm

#endif
PKiwFZb0}}&Transforms/Utils/AssumeBundleBuilder.hnu�[���//===- AssumeBundleBuilder.h - utils to build assume bundles ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contain tools to preserve informations. They should be used before
// performing a transformation that may move and delete instructions as those
// transformation may destroy or worsen information that can be derived from the
// IR.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_ASSUMEBUNDLEBUILDER_H
#define LLVM_TRANSFORMS_UTILS_ASSUMEBUNDLEBUILDER_H

#include "llvm/Analysis/AssumeBundleQueries.h"
#include "llvm/IR/PassManager.h"

namespace llvm {
class AssumeInst;
class Function;
class Instruction;
class AssumptionCache;
class DominatorTree;

/// Build a call to llvm.assume to preserve informations that can be derived
/// from the given instruction.
/// If no information derived from \p I, this call returns null.
/// The returned instruction is not inserted anywhere.
AssumeInst *buildAssumeFromInst(Instruction *I);

/// Calls BuildAssumeFromInst and if the resulting llvm.assume is valid insert
/// if before I. This is usually what need to be done to salvage the knowledge
/// contained in the instruction I.
/// The AssumptionCache must be provided if it is available or the cache may
/// become silently be invalid.
/// The DominatorTree can optionally be provided to enable cross-block
/// reasoning.
/// This returns if a change was made.
bool salvageKnowledge(Instruction *I, AssumptionCache *AC = nullptr,
                      DominatorTree *DT = nullptr);

/// Build and return a new assume created from the provided knowledge
/// if the knowledge in the assume is fully redundant this will return nullptr
AssumeInst *buildAssumeFromKnowledge(ArrayRef<RetainedKnowledge> Knowledge,
                                     Instruction *CtxI,
                                     AssumptionCache *AC = nullptr,
                                     DominatorTree *DT = nullptr);

/// This pass attempts to minimize the number of assume without loosing any
/// information.
struct AssumeSimplifyPass : public PassInfoMixin<AssumeSimplifyPass> {
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

/// This pass will try to build an llvm.assume for every instruction in the
/// function. Its main purpose is testing.
struct AssumeBuilderPass : public PassInfoMixin<AssumeBuilderPass> {
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

/// canonicalize the RetainedKnowledge RK. it is assumed that RK is part of
/// Assume. This will return an empty RetainedKnowledge if the knowledge is
/// useless.
RetainedKnowledge simplifyRetainedKnowledge(AssumeInst *Assume,
                                            RetainedKnowledge RK,
                                            AssumptionCache *AC,
                                            DominatorTree *DT);

} // namespace llvm

#endif
PKiwFZ4�^�#Transforms/Utils/LowerGlobalDtors.hnu�[���//===- LowerGlobalDtors.h - Lower @llvm.global_dtors ----------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This pass lowers @llvm.global_dtors by creating wrapper functions that are
// registered in @llvm.global_ctors and which contain a call to `__cxa_atexit`
// to register their destructor functions.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_TRANSFORMS_UTILS_LOWERGLOBALDTORS_H
#define LLVM_TRANSFORMS_UTILS_LOWERGLOBALDTORS_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class LowerGlobalDtorsPass : public PassInfoMixin<LowerGlobalDtorsPass> {
public:
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
};

} // namespace llvm

#endif // LLVM_TRANSFORMS_UTILS_LOWERGLOBALDTORS_H
PKiwFZPO�'Transforms/Utils/ASanStackFrameLayout.hnu�[���//===- ASanStackFrameLayout.h - ComputeASanStackFrameLayout -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This header defines ComputeASanStackFrameLayout and auxiliary data structs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_TRANSFORMS_UTILS_ASANSTACKFRAMELAYOUT_H
#define LLVM_TRANSFORMS_UTILS_ASANSTACKFRAMELAYOUT_H
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"

namespace llvm {

class AllocaInst;

// These magic constants should be the same as in
// in asan_internal.h from ASan runtime in compiler-rt.
static const int kAsanStackLeftRedzoneMagic = 0xf1;
static const int kAsanStackMidRedzoneMagic = 0xf2;
static const int kAsanStackRightRedzoneMagic = 0xf3;
static const int kAsanStackUseAfterReturnMagic = 0xf5;
static const int kAsanStackUseAfterScopeMagic = 0xf8;

// Input/output data struct for ComputeASanStackFrameLayout.
struct ASanStackVariableDescription {
  const char *Name;    // Name of the variable that will be displayed by asan
                       // if a stack-related bug is reported.
  uint64_t Size;       // Size of the variable in bytes.
  size_t LifetimeSize; // Size in bytes to use for lifetime analysis check.
                       // Will be rounded up to Granularity.
  uint64_t Alignment;  // Alignment of the variable (power of 2).
  AllocaInst *AI;      // The actual AllocaInst.
  size_t Offset;       // Offset from the beginning of the frame;
                       // set by ComputeASanStackFrameLayout.
  unsigned Line;       // Line number.
};

// Output data struct for ComputeASanStackFrameLayout.
struct ASanStackFrameLayout {
  uint64_t Granularity;     // Shadow granularity.
  uint64_t FrameAlignment;  // Alignment for the entire frame.
  uint64_t FrameSize;       // Size of the frame in bytes.
};

ASanStackFrameLayout ComputeASanStackFrameLayout(
    // The array of stack variables. The elements may get reordered and changed.
    SmallVectorImpl<ASanStackVariableDescription> &Vars,
    // AddressSanitizer's shadow granularity. Usually 8, may also be 16, 32, 64.
    uint64_t Granularity,
    // The minimal size of the left-most redzone (header).
    // At least 4 pointer sizes, power of 2, and >= Granularity.
    // The resulting FrameSize should be multiple of MinHeaderSize.
    uint64_t MinHeaderSize);

// Compute frame description, see DescribeAddressIfStack in ASan runtime.
SmallString<64> ComputeASanStackFrameDescription(
    const SmallVectorImpl<ASanStackVariableDescription> &Vars);

// Returns shadow bytes with marked red zones. This shadow represents the state
// if the stack frame when all local variables are inside of the own scope.
SmallVector<uint8_t, 64>
GetShadowBytes(const SmallVectorImpl<ASanStackVariableDescription> &Vars,
               const ASanStackFrameLayout &Layout);

// Returns shadow bytes with marked red zones and after scope. This shadow
// represents the state if the stack frame when all local variables are outside
// of the own scope.
SmallVector<uint8_t, 64> GetShadowBytesAfterScope(
    // The array of stack variables. The elements may get reordered and changed.
    const SmallVectorImpl<ASanStackVariableDescription> &Vars,
    const ASanStackFrameLayout &Layout);

} // llvm namespace

#endif  // LLVM_TRANSFORMS_UTILS_ASANSTACKFRAMELAYOUT_H
PKiwFZ��svTransforms/Utils/SplitModule.hnu�[���//===- SplitModule.h - Split a module into partitions -----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the function llvm::SplitModule, which splits a module
// into multiple linkable partitions. It can be used to implement parallel code
// generation for link-time optimization.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_SPLITMODULE_H
#define LLVM_TRANSFORMS_UTILS_SPLITMODULE_H

#include "llvm/ADT/STLFunctionalExtras.h"
#include <memory>

namespace llvm {

class Module;

/// Splits the module M into N linkable partitions. The function ModuleCallback
/// is called N times passing each individual partition as the MPart argument.
///
/// FIXME: This function does not deal with the somewhat subtle symbol
/// visibility issues around module splitting, including (but not limited to):
///
/// - Internal symbols should not collide with symbols defined outside the
///   module.
/// - Internal symbols defined in module-level inline asm should be visible to
///   each partition.
void SplitModule(
    Module &M, unsigned N,
    function_ref<void(std::unique_ptr<Module> MPart)> ModuleCallback,
    bool PreserveLocals = false);

} // end namespace llvm

#endif // LLVM_TRANSFORMS_UTILS_SPLITMODULE_H
PKiwFZ�y�6��#Transforms/Utils/AMDGPUEmitPrintf.hnu�[���//===- AMDGPUEmitPrintf.h ---------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Utility function to lower a printf call into a series of device
// library calls on the AMDGPU target.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_AMDGPUEMITPRINTF_H
#define LLVM_TRANSFORMS_UTILS_AMDGPUEMITPRINTF_H

#include "llvm/IR/IRBuilder.h"

namespace llvm {

Value *emitAMDGPUPrintfCall(IRBuilder<> &Builder, ArrayRef<Value *> Args,
                            bool isBuffered);

} // end namespace llvm

#endif // LLVM_TRANSFORMS_UTILS_AMDGPUEMITPRINTF_H
PKiwFZV�:��-�-#Transforms/Utils/SimplifyLibCalls.hnu�[���//===- SimplifyLibCalls.h - Library call simplifier -------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file exposes an interface to build some C language libcalls for
// optimization passes that need to call the various functions.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_SIMPLIFYLIBCALLS_H
#define LLVM_TRANSFORMS_UTILS_SIMPLIFYLIBCALLS_H

#include "llvm/ADT/STLFunctionalExtras.h"
#include "llvm/Analysis/TargetLibraryInfo.h"

namespace llvm {
class AssumptionCache;
class StringRef;
class Value;
class CallInst;
class DataLayout;
class Instruction;
class IRBuilderBase;
class Function;
class OptimizationRemarkEmitter;
class BlockFrequencyInfo;
class ProfileSummaryInfo;

/// This class implements simplifications for calls to fortified library
/// functions (__st*cpy_chk, __memcpy_chk, __memmove_chk, __memset_chk), to,
/// when possible, replace them with their non-checking counterparts.
/// Other optimizations can also be done, but it's possible to disable them and
/// only simplify needless use of the checking versions (when the object size
/// is unknown) by passing true for OnlyLowerUnknownSize.
class FortifiedLibCallSimplifier {
private:
  const TargetLibraryInfo *TLI;
  bool OnlyLowerUnknownSize;

public:
  FortifiedLibCallSimplifier(const TargetLibraryInfo *TLI,
                             bool OnlyLowerUnknownSize = false);

  /// Take the given call instruction and return a more
  /// optimal value to replace the instruction with or 0 if a more
  /// optimal form can't be found.
  /// The call must not be an indirect call.
  Value *optimizeCall(CallInst *CI, IRBuilderBase &B);

private:
  Value *optimizeMemCpyChk(CallInst *CI, IRBuilderBase &B);
  Value *optimizeMemMoveChk(CallInst *CI, IRBuilderBase &B);
  Value *optimizeMemSetChk(CallInst *CI, IRBuilderBase &B);

  /// Str/Stp cpy are similar enough to be handled in the same functions.
  Value *optimizeStrpCpyChk(CallInst *CI, IRBuilderBase &B, LibFunc Func);
  Value *optimizeStrpNCpyChk(CallInst *CI, IRBuilderBase &B, LibFunc Func);
  Value *optimizeStrLenChk(CallInst *CI, IRBuilderBase &B);
  Value *optimizeMemPCpyChk(CallInst *CI, IRBuilderBase &B);
  Value *optimizeMemCCpyChk(CallInst *CI, IRBuilderBase &B);
  Value *optimizeSNPrintfChk(CallInst *CI, IRBuilderBase &B);
  Value *optimizeSPrintfChk(CallInst *CI,IRBuilderBase &B);
  Value *optimizeStrCatChk(CallInst *CI, IRBuilderBase &B);
  Value *optimizeStrLCat(CallInst *CI, IRBuilderBase &B);
  Value *optimizeStrNCatChk(CallInst *CI, IRBuilderBase &B);
  Value *optimizeStrLCpyChk(CallInst *CI, IRBuilderBase &B);
  Value *optimizeVSNPrintfChk(CallInst *CI, IRBuilderBase &B);
  Value *optimizeVSPrintfChk(CallInst *CI, IRBuilderBase &B);

  /// Checks whether the call \p CI to a fortified libcall is foldable
  /// to the non-fortified version.
  ///
  /// \param CI the call to the fortified libcall.
  ///
  /// \param ObjSizeOp the index of the object size parameter of this chk
  /// function. Not optional since this is mandatory.
  ///
  /// \param SizeOp optionally set to the parameter index of an explicit buffer
  /// size argument. For instance, set to '2' for __strncpy_chk.
  ///
  /// \param StrOp optionally set to the parameter index of the source string
  /// parameter to strcpy-like functions, where only the strlen of the source
  /// will be writtin into the destination.
  ///
  /// \param FlagsOp optionally set to the parameter index of a 'flags'
  /// parameter. These are used by an implementation to opt-into stricter
  /// checking.
  bool isFortifiedCallFoldable(CallInst *CI, unsigned ObjSizeOp,
                               std::optional<unsigned> SizeOp = std::nullopt,
                               std::optional<unsigned> StrOp = std::nullopt,
                               std::optional<unsigned> FlagsOp = std::nullopt);
};

/// LibCallSimplifier - This class implements a collection of optimizations
/// that replace well formed calls to library functions with a more optimal
/// form.  For example, replacing 'printf("Hello!")' with 'puts("Hello!")'.
class LibCallSimplifier {
private:
  FortifiedLibCallSimplifier FortifiedSimplifier;
  const DataLayout &DL;
  const TargetLibraryInfo *TLI;
  AssumptionCache *AC;
  OptimizationRemarkEmitter &ORE;
  BlockFrequencyInfo *BFI;
  ProfileSummaryInfo *PSI;
  bool UnsafeFPShrink = false;
  function_ref<void(Instruction *, Value *)> Replacer;
  function_ref<void(Instruction *)> Eraser;

  /// Internal wrapper for RAUW that is the default implementation.
  ///
  /// Other users may provide an alternate function with this signature instead
  /// of this one.
  static void replaceAllUsesWithDefault(Instruction *I, Value *With) {
    I->replaceAllUsesWith(With);
  }

  /// Internal wrapper for eraseFromParent that is the default implementation.
  static void eraseFromParentDefault(Instruction *I) { I->eraseFromParent(); }

  /// Replace an instruction's uses with a value using our replacer.
  void replaceAllUsesWith(Instruction *I, Value *With);

  /// Erase an instruction from its parent with our eraser.
  void eraseFromParent(Instruction *I);

  /// Replace an instruction with a value and erase it from its parent.
  void substituteInParent(Instruction *I, Value *With) {
    replaceAllUsesWith(I, With);
    eraseFromParent(I);
  }

public:
  LibCallSimplifier(
      const DataLayout &DL, const TargetLibraryInfo *TLI, AssumptionCache *AC,
      OptimizationRemarkEmitter &ORE, BlockFrequencyInfo *BFI,
      ProfileSummaryInfo *PSI,
      function_ref<void(Instruction *, Value *)> Replacer =
          &replaceAllUsesWithDefault,
      function_ref<void(Instruction *)> Eraser = &eraseFromParentDefault);

  /// optimizeCall - Take the given call instruction and return a more
  /// optimal value to replace the instruction with or 0 if a more
  /// optimal form can't be found.  Note that the returned value may
  /// be equal to the instruction being optimized.  In this case all
  /// other instructions that use the given instruction were modified
  /// and the given instruction is dead.
  /// The call must not be an indirect call.
  Value *optimizeCall(CallInst *CI, IRBuilderBase &B);

private:
  // String and Memory Library Call Optimizations
  Value *optimizeStrCat(CallInst *CI, IRBuilderBase &B);
  Value *optimizeStrNCat(CallInst *CI, IRBuilderBase &B);
  Value *optimizeStrChr(CallInst *CI, IRBuilderBase &B);
  Value *optimizeStrRChr(CallInst *CI, IRBuilderBase &B);
  Value *optimizeStrCmp(CallInst *CI, IRBuilderBase &B);
  Value *optimizeStrNCmp(CallInst *CI, IRBuilderBase &B);
  Value *optimizeStrNDup(CallInst *CI, IRBuilderBase &B);
  Value *optimizeStrCpy(CallInst *CI, IRBuilderBase &B);
  Value *optimizeStpCpy(CallInst *CI, IRBuilderBase &B);
  Value *optimizeStrLCpy(CallInst *CI, IRBuilderBase &B);
  Value *optimizeStrNCpy(CallInst *CI, IRBuilderBase &B);
  Value *optimizeStrLen(CallInst *CI, IRBuilderBase &B);
  Value *optimizeStrNLen(CallInst *CI, IRBuilderBase &B);
  Value *optimizeStrPBrk(CallInst *CI, IRBuilderBase &B);
  Value *optimizeStrTo(CallInst *CI, IRBuilderBase &B);
  Value *optimizeStrSpn(CallInst *CI, IRBuilderBase &B);
  Value *optimizeStrCSpn(CallInst *CI, IRBuilderBase &B);
  Value *optimizeStrStr(CallInst *CI, IRBuilderBase &B);
  Value *optimizeMemChr(CallInst *CI, IRBuilderBase &B);
  Value *optimizeMemRChr(CallInst *CI, IRBuilderBase &B);
  Value *optimizeMemCmp(CallInst *CI, IRBuilderBase &B);
  Value *optimizeBCmp(CallInst *CI, IRBuilderBase &B);
  Value *optimizeMemCmpBCmpCommon(CallInst *CI, IRBuilderBase &B);
  Value *optimizeMemCCpy(CallInst *CI, IRBuilderBase &B);
  Value *optimizeMemPCpy(CallInst *CI, IRBuilderBase &B);
  Value *optimizeMemCpy(CallInst *CI, IRBuilderBase &B);
  Value *optimizeMemMove(CallInst *CI, IRBuilderBase &B);
  Value *optimizeMemSet(CallInst *CI, IRBuilderBase &B);
  Value *optimizeRealloc(CallInst *CI, IRBuilderBase &B);
  Value *optimizeNew(CallInst *CI, IRBuilderBase &B, LibFunc &Func);
  Value *optimizeWcslen(CallInst *CI, IRBuilderBase &B);
  Value *optimizeBCopy(CallInst *CI, IRBuilderBase &B);

  // Helper to optimize stpncpy and strncpy.
  Value *optimizeStringNCpy(CallInst *CI, bool RetEnd, IRBuilderBase &B);
  // Wrapper for all String/Memory Library Call Optimizations
  Value *optimizeStringMemoryLibCall(CallInst *CI, IRBuilderBase &B);

  // Math Library Optimizations
  Value *optimizeCAbs(CallInst *CI, IRBuilderBase &B);
  Value *optimizePow(CallInst *CI, IRBuilderBase &B);
  Value *replacePowWithExp(CallInst *Pow, IRBuilderBase &B);
  Value *replacePowWithSqrt(CallInst *Pow, IRBuilderBase &B);
  Value *optimizeExp2(CallInst *CI, IRBuilderBase &B);
  Value *optimizeFMinFMax(CallInst *CI, IRBuilderBase &B);
  Value *optimizeLog(CallInst *CI, IRBuilderBase &B);
  Value *optimizeSqrt(CallInst *CI, IRBuilderBase &B);
  Value *optimizeSinCosPi(CallInst *CI, bool IsSin, IRBuilderBase &B);
  Value *optimizeTan(CallInst *CI, IRBuilderBase &B);
  // Wrapper for all floating point library call optimizations
  Value *optimizeFloatingPointLibCall(CallInst *CI, LibFunc Func,
                                      IRBuilderBase &B);

  // Integer Library Call Optimizations
  Value *optimizeFFS(CallInst *CI, IRBuilderBase &B);
  Value *optimizeFls(CallInst *CI, IRBuilderBase &B);
  Value *optimizeAbs(CallInst *CI, IRBuilderBase &B);
  Value *optimizeIsDigit(CallInst *CI, IRBuilderBase &B);
  Value *optimizeIsAscii(CallInst *CI, IRBuilderBase &B);
  Value *optimizeToAscii(CallInst *CI, IRBuilderBase &B);
  Value *optimizeAtoi(CallInst *CI, IRBuilderBase &B);
  Value *optimizeStrToInt(CallInst *CI, IRBuilderBase &B, bool AsSigned);

  // Formatting and IO Library Call Optimizations
  Value *optimizeErrorReporting(CallInst *CI, IRBuilderBase &B,
                                int StreamArg = -1);
  Value *optimizePrintF(CallInst *CI, IRBuilderBase &B);
  Value *optimizeSPrintF(CallInst *CI, IRBuilderBase &B);
  Value *optimizeSnPrintF(CallInst *CI, IRBuilderBase &B);
  Value *optimizeFPrintF(CallInst *CI, IRBuilderBase &B);
  Value *optimizeFWrite(CallInst *CI, IRBuilderBase &B);
  Value *optimizeFPuts(CallInst *CI, IRBuilderBase &B);
  Value *optimizePuts(CallInst *CI, IRBuilderBase &B);

  // Helper methods
  Value* emitSnPrintfMemCpy(CallInst *CI, Value *StrArg, StringRef Str,
                            uint64_t N, IRBuilderBase &B);
  Value *emitStrLenMemCpy(Value *Src, Value *Dst, uint64_t Len,
                          IRBuilderBase &B);
  void classifyArgUse(Value *Val, Function *F, bool IsFloat,
                      SmallVectorImpl<CallInst *> &SinCalls,
                      SmallVectorImpl<CallInst *> &CosCalls,
                      SmallVectorImpl<CallInst *> &SinCosCalls);
  Value *optimizePrintFString(CallInst *CI, IRBuilderBase &B);
  Value *optimizeSPrintFString(CallInst *CI, IRBuilderBase &B);
  Value *optimizeSnPrintFString(CallInst *CI, IRBuilderBase &B);
  Value *optimizeFPrintFString(CallInst *CI, IRBuilderBase &B);

  /// hasFloatVersion - Checks if there is a float version of the specified
  /// function by checking for an existing function with name FuncName + f
  bool hasFloatVersion(const Module *M, StringRef FuncName);

  /// Shared code to optimize strlen+wcslen and strnlen+wcsnlen.
  Value *optimizeStringLength(CallInst *CI, IRBuilderBase &B, unsigned CharSize,
                              Value *Bound = nullptr);
};
} // End llvm namespace

#endif
PKiwFZ	����Transforms/Utils/LowerSwitch.hnu�[���//===- LowerSwitch.h - Eliminate Switch instructions ----------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// The LowerSwitch transformation rewrites switch instructions with a sequence
// of branches, which allows targets to get away with not implementing the
// switch instruction until it is convenient.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_LOWERSWITCH_H
#define LLVM_TRANSFORMS_UTILS_LOWERSWITCH_H

#include "llvm/IR/PassManager.h"

namespace llvm {
struct LowerSwitchPass : public PassInfoMixin<LowerSwitchPass> {
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
} // namespace llvm

#endif // LLVM_TRANSFORMS_UTILS_LOWERSWITCH_H
PKiwFZ_�	̓�)Transforms/Utils/UnifyFunctionExitNodes.hnu�[���//===-- UnifyFunctionExitNodes.h - Ensure fn's have one return --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This pass is used to ensure that functions have at most one return and one
// unreachable instruction in them.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_UNIFYFUNCTIONEXITNODES_H
#define LLVM_TRANSFORMS_UTILS_UNIFYFUNCTIONEXITNODES_H

#include "llvm/IR/PassManager.h"
#include "llvm/Pass.h"

namespace llvm {

class UnifyFunctionExitNodesLegacyPass : public FunctionPass {
public:
  static char ID; // Pass identification, replacement for typeid
  UnifyFunctionExitNodesLegacyPass();

  // We can preserve non-critical-edgeness when we unify function exit nodes
  void getAnalysisUsage(AnalysisUsage &AU) const override;

  bool runOnFunction(Function &F) override;
};

Pass *createUnifyFunctionExitNodesPass();

class UnifyFunctionExitNodesPass
    : public PassInfoMixin<UnifyFunctionExitNodesPass> {
public:
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_UTILS_UNIFYFUNCTIONEXITNODES_H
PKiwFZ���!Transforms/Utils/SimplifyIndVar.hnu�[���//===-- llvm/Transforms/Utils/SimplifyIndVar.h - Indvar Utils ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines in interface for induction variable simplification. It does
// not define any actual pass or policy, but provides a single function to
// simplify a loop's induction variables based on ScalarEvolution.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_SIMPLIFYINDVAR_H
#define LLVM_TRANSFORMS_UTILS_SIMPLIFYINDVAR_H

namespace llvm {

class Type;
class WeakTrackingVH;
template <typename T> class SmallVectorImpl;
class CastInst;
class DominatorTree;
class Loop;
class LoopInfo;
class PHINode;
class ScalarEvolution;
class SCEVExpander;
class TargetTransformInfo;

/// Interface for visiting interesting IV users that are recognized but not
/// simplified by this utility.
class IVVisitor {
protected:
  const DominatorTree *DT = nullptr;

  virtual void anchor();

public:
  IVVisitor() = default;
  virtual ~IVVisitor() = default;

  const DominatorTree *getDomTree() const { return DT; }
  virtual void visitCast(CastInst *Cast) = 0;
};

/// simplifyUsersOfIV - Simplify instructions that use this induction variable
/// by using ScalarEvolution to analyze the IV's recurrence.
bool simplifyUsersOfIV(PHINode *CurrIV, ScalarEvolution *SE, DominatorTree *DT,
                       LoopInfo *LI, const TargetTransformInfo *TTI,
                       SmallVectorImpl<WeakTrackingVH> &Dead,
                       SCEVExpander &Rewriter, IVVisitor *V = nullptr);

/// SimplifyLoopIVs - Simplify users of induction variables within this
/// loop. This does not actually change or add IVs.
bool simplifyLoopIVs(Loop *L, ScalarEvolution *SE, DominatorTree *DT,
                     LoopInfo *LI, const TargetTransformInfo *TTI,
                     SmallVectorImpl<WeakTrackingVH> &Dead);

/// Collect information about induction variables that are used by sign/zero
/// extend operations. This information is recorded by CollectExtend and provides
/// the input to WidenIV.
struct WideIVInfo {
  PHINode *NarrowIV = nullptr;

  // Widest integer type created [sz]ext
  Type *WidestNativeType = nullptr;

  // Was a sext user seen before a zext?
  bool IsSigned = false;
};

/// Widen Induction Variables - Extend the width of an IV to cover its
/// widest uses.
PHINode *createWideIV(const WideIVInfo &WI,
    LoopInfo *LI, ScalarEvolution *SE, SCEVExpander &Rewriter,
    DominatorTree *DT, SmallVectorImpl<WeakTrackingVH> &DeadInsts,
    unsigned &NumElimExt, unsigned &NumWidened,
    bool HasGuards, bool UsePostIncrementRanges);

} // end namespace llvm

#endif // LLVM_TRANSFORMS_UTILS_SIMPLIFYINDVAR_H
PKiwFZ�$����Transforms/Utils/MatrixUtils.hnu�[���//===- MatrixUtils.h - Utilities to lower matrix intrinsics -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Utilities for generating tiled loops for matrix operations.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_MATRIXUTILS_H
#define LLVM_TRANSFORMS_UTILS_MATRIXUTILS_H

#include "llvm/ADT/StringRef.h"

namespace llvm {
class DomTreeUpdater;
class BasicBlock;
class Value;
class Loop;
class LoopInfo;
class IRBuilderBase;

/// A helper struct to create IR loop nests for tiling in IR of the following
/// form:
///   for ColumnLoop.Index = 0..NumColumns
///     for RowLoop.Index = 0..NumRows
///       for KLoop.Index = 0..NumInner
struct TileInfo {
  /// Number of rows of the matrix.
  unsigned NumRows;

  /// Number of columns of the matrix.
  unsigned NumColumns;

  /// Number of columns of the first matrix of a multiply /
  /// number of rows of the second matrix of a multiply.
  unsigned NumInner;

  /// Number of rows/columns in a tile.
  unsigned TileSize = -1;

  /// Properties of a single loop used when generating the tiled loop nest.
  struct MatrixLoop {
    /// The index updated on every iteration.
    Value *Index = nullptr;
    /// The header and latch of the loop.
    BasicBlock *Header = nullptr;
    BasicBlock *Latch = nullptr;
  };

  /// The loop iterating on the rows.
  MatrixLoop RowLoop;
  /// The loop iterating on the columns.
  MatrixLoop ColumnLoop;
  /// The loop iterating on k (inner dimension).
  MatrixLoop KLoop;

  TileInfo(unsigned NumRows, unsigned NumColumns, unsigned NumInner,
           unsigned TileSize)
      : NumRows(NumRows), NumColumns(NumColumns), NumInner(NumInner),
        TileSize(TileSize) {}

  /// Creates an IR loop nests for tiling of the form below. Returns the block
  /// for the inner loop body and sets {Column,Row,Inner}LoopHeader/Latch
  /// fields.
  ///
  /// for ColumnLoop.Index = 0..NumColumns
  ///   for RowLoop.Index = 0..NumRows
  ///     for InnerLoop.Index = 0..NumInner
  BasicBlock *CreateTiledLoops(BasicBlock *Start, BasicBlock *End,
                               IRBuilderBase &B, DomTreeUpdater &DTU,
                               LoopInfo &LI);

private:
  /// Creates a new loop with header, body and latch blocks that iterates from
  /// [0, Bound). Updates \p Preheader to branch to the new header and uses \p
  /// Exit as exit block.  Adds the new loop blocks to \L and applies dominator
  /// tree updates to \p DTU.
  static BasicBlock *CreateLoop(BasicBlock *Preheader, BasicBlock *Exit,
                                Value *Bound, Value *Step, StringRef Name,
                                IRBuilderBase &B, DomTreeUpdater &DTU, Loop *L,
                                LoopInfo &LI);
};
} // namespace llvm

#endif
PKiwFZ�r!�����.Transforms/Utils/SampleProfileLoaderBaseImpl.hnu�[���////===- SampleProfileLoadBaseImpl.h - Profile loader base impl --*- C++-*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file
/// This file provides the interface for the sampled PGO profile loader base
/// implementation.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_SAMPLEPROFILELOADERBASEIMPL_H
#define LLVM_TRANSFORMS_UTILS_SAMPLEPROFILELOADERBASEIMPL_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/IntrusiveRefCntPtr.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/OptimizationRemarkEmitter.h"
#include "llvm/Analysis/PostDominators.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CFG.h"
#include "llvm/IR/DebugInfoMetadata.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/PseudoProbe.h"
#include "llvm/ProfileData/SampleProf.h"
#include "llvm/ProfileData/SampleProfReader.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/GenericDomTree.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Utils/SampleProfileInference.h"
#include "llvm/Transforms/Utils/SampleProfileLoaderBaseUtil.h"

namespace llvm {
using namespace sampleprof;
using namespace sampleprofutil;
using ProfileCount = Function::ProfileCount;

namespace vfs {
class FileSystem;
} // namespace vfs

#define DEBUG_TYPE "sample-profile-impl"

namespace afdo_detail {

template <typename BlockT> struct IRTraits;
template <> struct IRTraits<BasicBlock> {
  using InstructionT = Instruction;
  using BasicBlockT = BasicBlock;
  using FunctionT = Function;
  using BlockFrequencyInfoT = BlockFrequencyInfo;
  using LoopT = Loop;
  using LoopInfoPtrT = std::unique_ptr<LoopInfo>;
  using DominatorTreePtrT = std::unique_ptr<DominatorTree>;
  using PostDominatorTreeT = PostDominatorTree;
  using PostDominatorTreePtrT = std::unique_ptr<PostDominatorTree>;
  using OptRemarkEmitterT = OptimizationRemarkEmitter;
  using OptRemarkAnalysisT = OptimizationRemarkAnalysis;
  using PredRangeT = pred_range;
  using SuccRangeT = succ_range;
  static Function &getFunction(Function &F) { return F; }
  static const BasicBlock *getEntryBB(const Function *F) {
    return &F->getEntryBlock();
  }
  static pred_range getPredecessors(BasicBlock *BB) { return predecessors(BB); }
  static succ_range getSuccessors(BasicBlock *BB) { return successors(BB); }
};

} // end namespace afdo_detail

// This class serves sample counts correlation for SampleProfileLoader by
// analyzing pseudo probes and their function descriptors injected by
// SampleProfileProber.
class PseudoProbeManager {
  DenseMap<uint64_t, PseudoProbeDescriptor> GUIDToProbeDescMap;

  const PseudoProbeDescriptor *getDesc(const Function &F) const {
    auto I = GUIDToProbeDescMap.find(
        Function::getGUID(FunctionSamples::getCanonicalFnName(F)));
    return I == GUIDToProbeDescMap.end() ? nullptr : &I->second;
  }

public:
  PseudoProbeManager(const Module &M) {
    if (NamedMDNode *FuncInfo =
            M.getNamedMetadata(PseudoProbeDescMetadataName)) {
      for (const auto *Operand : FuncInfo->operands()) {
        const auto *MD = cast<MDNode>(Operand);
        auto GUID = mdconst::dyn_extract<ConstantInt>(MD->getOperand(0))
                        ->getZExtValue();
        auto Hash = mdconst::dyn_extract<ConstantInt>(MD->getOperand(1))
                        ->getZExtValue();
        GUIDToProbeDescMap.try_emplace(GUID, PseudoProbeDescriptor(GUID, Hash));
      }
    }
  }

  bool moduleIsProbed(const Module &M) const {
    return M.getNamedMetadata(PseudoProbeDescMetadataName);
  }

  bool profileIsValid(const Function &F, const FunctionSamples &Samples) const {
    const auto *Desc = getDesc(F);
    if (!Desc) {
      LLVM_DEBUG(dbgs() << "Probe descriptor missing for Function "
                        << F.getName() << "\n");
      return false;
    }
    if (Desc->getFunctionHash() != Samples.getFunctionHash()) {
      LLVM_DEBUG(dbgs() << "Hash mismatch for Function " << F.getName()
                        << "\n");
      return false;
    }
    return true;
  }
};



extern cl::opt<bool> SampleProfileUseProfi;

template <typename FT> class SampleProfileLoaderBaseImpl {
public:
  SampleProfileLoaderBaseImpl(std::string Name, std::string RemapName,
                              IntrusiveRefCntPtr<vfs::FileSystem> FS)
      : Filename(Name), RemappingFilename(RemapName), FS(std::move(FS)) {}
  void dump() { Reader->dump(); }

  using NodeRef = typename GraphTraits<FT *>::NodeRef;
  using BT = typename std::remove_pointer<NodeRef>::type;
  using InstructionT = typename afdo_detail::IRTraits<BT>::InstructionT;
  using BasicBlockT = typename afdo_detail::IRTraits<BT>::BasicBlockT;
  using BlockFrequencyInfoT =
      typename afdo_detail::IRTraits<BT>::BlockFrequencyInfoT;
  using FunctionT = typename afdo_detail::IRTraits<BT>::FunctionT;
  using LoopT = typename afdo_detail::IRTraits<BT>::LoopT;
  using LoopInfoPtrT = typename afdo_detail::IRTraits<BT>::LoopInfoPtrT;
  using DominatorTreePtrT =
      typename afdo_detail::IRTraits<BT>::DominatorTreePtrT;
  using PostDominatorTreePtrT =
      typename afdo_detail::IRTraits<BT>::PostDominatorTreePtrT;
  using PostDominatorTreeT =
      typename afdo_detail::IRTraits<BT>::PostDominatorTreeT;
  using OptRemarkEmitterT =
      typename afdo_detail::IRTraits<BT>::OptRemarkEmitterT;
  using OptRemarkAnalysisT =
      typename afdo_detail::IRTraits<BT>::OptRemarkAnalysisT;
  using PredRangeT = typename afdo_detail::IRTraits<BT>::PredRangeT;
  using SuccRangeT = typename afdo_detail::IRTraits<BT>::SuccRangeT;

  using BlockWeightMap = DenseMap<const BasicBlockT *, uint64_t>;
  using EquivalenceClassMap =
      DenseMap<const BasicBlockT *, const BasicBlockT *>;
  using Edge = std::pair<const BasicBlockT *, const BasicBlockT *>;
  using EdgeWeightMap = DenseMap<Edge, uint64_t>;
  using BlockEdgeMap =
      DenseMap<const BasicBlockT *, SmallVector<const BasicBlockT *, 8>>;

protected:
  ~SampleProfileLoaderBaseImpl() = default;
  friend class SampleCoverageTracker;

  Function &getFunction(FunctionT &F) {
    return afdo_detail::IRTraits<BT>::getFunction(F);
  }
  const BasicBlockT *getEntryBB(const FunctionT *F) {
    return afdo_detail::IRTraits<BT>::getEntryBB(F);
  }
  PredRangeT getPredecessors(BasicBlockT *BB) {
    return afdo_detail::IRTraits<BT>::getPredecessors(BB);
  }
  SuccRangeT getSuccessors(BasicBlockT *BB) {
    return afdo_detail::IRTraits<BT>::getSuccessors(BB);
  }

  unsigned getFunctionLoc(FunctionT &Func);
  virtual ErrorOr<uint64_t> getInstWeight(const InstructionT &Inst);
  ErrorOr<uint64_t> getInstWeightImpl(const InstructionT &Inst);
  virtual ErrorOr<uint64_t> getProbeWeight(const InstructionT &Inst);
  ErrorOr<uint64_t> getBlockWeight(const BasicBlockT *BB);
  mutable DenseMap<const DILocation *, const FunctionSamples *>
      DILocation2SampleMap;
  virtual const FunctionSamples *
  findFunctionSamples(const InstructionT &I) const;
  void printEdgeWeight(raw_ostream &OS, Edge E);
  void printBlockWeight(raw_ostream &OS, const BasicBlockT *BB) const;
  void printBlockEquivalence(raw_ostream &OS, const BasicBlockT *BB);
  bool computeBlockWeights(FunctionT &F);
  void findEquivalenceClasses(FunctionT &F);
  void findEquivalencesFor(BasicBlockT *BB1,
                           ArrayRef<BasicBlockT *> Descendants,
                           PostDominatorTreeT *DomTree);
  void propagateWeights(FunctionT &F);
  void applyProfi(FunctionT &F, BlockEdgeMap &Successors,
                  BlockWeightMap &SampleBlockWeights,
                  BlockWeightMap &BlockWeights, EdgeWeightMap &EdgeWeights);
  uint64_t visitEdge(Edge E, unsigned *NumUnknownEdges, Edge *UnknownEdge);
  void buildEdges(FunctionT &F);
  bool propagateThroughEdges(FunctionT &F, bool UpdateBlockCount);
  void clearFunctionData(bool ResetDT = true);
  void computeDominanceAndLoopInfo(FunctionT &F);
  bool
  computeAndPropagateWeights(FunctionT &F,
                             const DenseSet<GlobalValue::GUID> &InlinedGUIDs);
  void initWeightPropagation(FunctionT &F,
                             const DenseSet<GlobalValue::GUID> &InlinedGUIDs);
  void
  finalizeWeightPropagation(FunctionT &F,
                            const DenseSet<GlobalValue::GUID> &InlinedGUIDs);
  void emitCoverageRemarks(FunctionT &F);

  /// Map basic blocks to their computed weights.
  ///
  /// The weight of a basic block is defined to be the maximum
  /// of all the instruction weights in that block.
  BlockWeightMap BlockWeights;

  /// Map edges to their computed weights.
  ///
  /// Edge weights are computed by propagating basic block weights in
  /// SampleProfile::propagateWeights.
  EdgeWeightMap EdgeWeights;

  /// Set of visited blocks during propagation.
  SmallPtrSet<const BasicBlockT *, 32> VisitedBlocks;

  /// Set of visited edges during propagation.
  SmallSet<Edge, 32> VisitedEdges;

  /// Equivalence classes for block weights.
  ///
  /// Two blocks BB1 and BB2 are in the same equivalence class if they
  /// dominate and post-dominate each other, and they are in the same loop
  /// nest. When this happens, the two blocks are guaranteed to execute
  /// the same number of times.
  EquivalenceClassMap EquivalenceClass;

  /// Dominance, post-dominance and loop information.
  DominatorTreePtrT DT;
  PostDominatorTreePtrT PDT;
  LoopInfoPtrT LI;

  /// Predecessors for each basic block in the CFG.
  BlockEdgeMap Predecessors;

  /// Successors for each basic block in the CFG.
  BlockEdgeMap Successors;

  /// Profile coverage tracker.
  SampleCoverageTracker CoverageTracker;

  /// Profile reader object.
  std::unique_ptr<SampleProfileReader> Reader;

  // A pseudo probe helper to correlate the imported sample counts.
  std::unique_ptr<PseudoProbeManager> ProbeManager;

  /// Samples collected for the body of this function.
  FunctionSamples *Samples = nullptr;

  /// Name of the profile file to load.
  std::string Filename;

  /// Name of the profile remapping file to load.
  std::string RemappingFilename;

  /// VirtualFileSystem to load profile files from.
  IntrusiveRefCntPtr<vfs::FileSystem> FS;

  /// Profile Summary Info computed from sample profile.
  ProfileSummaryInfo *PSI = nullptr;

  /// Optimization Remark Emitter used to emit diagnostic remarks.
  OptRemarkEmitterT *ORE = nullptr;
};

/// Clear all the per-function data used to load samples and propagate weights.
template <typename BT>
void SampleProfileLoaderBaseImpl<BT>::clearFunctionData(bool ResetDT) {
  BlockWeights.clear();
  EdgeWeights.clear();
  VisitedBlocks.clear();
  VisitedEdges.clear();
  EquivalenceClass.clear();
  if (ResetDT) {
    DT = nullptr;
    PDT = nullptr;
    LI = nullptr;
  }
  Predecessors.clear();
  Successors.clear();
  CoverageTracker.clear();
}

#ifndef NDEBUG
/// Print the weight of edge \p E on stream \p OS.
///
/// \param OS  Stream to emit the output to.
/// \param E  Edge to print.
template <typename BT>
void SampleProfileLoaderBaseImpl<BT>::printEdgeWeight(raw_ostream &OS, Edge E) {
  OS << "weight[" << E.first->getName() << "->" << E.second->getName()
     << "]: " << EdgeWeights[E] << "\n";
}

/// Print the equivalence class of block \p BB on stream \p OS.
///
/// \param OS  Stream to emit the output to.
/// \param BB  Block to print.
template <typename BT>
void SampleProfileLoaderBaseImpl<BT>::printBlockEquivalence(
    raw_ostream &OS, const BasicBlockT *BB) {
  const BasicBlockT *Equiv = EquivalenceClass[BB];
  OS << "equivalence[" << BB->getName()
     << "]: " << ((Equiv) ? EquivalenceClass[BB]->getName() : "NONE") << "\n";
}

/// Print the weight of block \p BB on stream \p OS.
///
/// \param OS  Stream to emit the output to.
/// \param BB  Block to print.
template <typename BT>
void SampleProfileLoaderBaseImpl<BT>::printBlockWeight(
    raw_ostream &OS, const BasicBlockT *BB) const {
  const auto &I = BlockWeights.find(BB);
  uint64_t W = (I == BlockWeights.end() ? 0 : I->second);
  OS << "weight[" << BB->getName() << "]: " << W << "\n";
}
#endif

/// Get the weight for an instruction.
///
/// The "weight" of an instruction \p Inst is the number of samples
/// collected on that instruction at runtime. To retrieve it, we
/// need to compute the line number of \p Inst relative to the start of its
/// function. We use HeaderLineno to compute the offset. We then
/// look up the samples collected for \p Inst using BodySamples.
///
/// \param Inst Instruction to query.
///
/// \returns the weight of \p Inst.
template <typename BT>
ErrorOr<uint64_t>
SampleProfileLoaderBaseImpl<BT>::getInstWeight(const InstructionT &Inst) {
  if (FunctionSamples::ProfileIsProbeBased)
    return getProbeWeight(Inst);
  return getInstWeightImpl(Inst);
}

template <typename BT>
ErrorOr<uint64_t>
SampleProfileLoaderBaseImpl<BT>::getInstWeightImpl(const InstructionT &Inst) {
  const FunctionSamples *FS = findFunctionSamples(Inst);
  if (!FS)
    return std::error_code();

  const DebugLoc &DLoc = Inst.getDebugLoc();
  if (!DLoc)
    return std::error_code();

  const DILocation *DIL = DLoc;
  uint32_t LineOffset = FunctionSamples::getOffset(DIL);
  uint32_t Discriminator;
  if (EnableFSDiscriminator)
    Discriminator = DIL->getDiscriminator();
  else
    Discriminator = DIL->getBaseDiscriminator();

  ErrorOr<uint64_t> R = FS->findSamplesAt(LineOffset, Discriminator);
  if (R) {
    bool FirstMark =
        CoverageTracker.markSamplesUsed(FS, LineOffset, Discriminator, R.get());
    if (FirstMark) {
      ORE->emit([&]() {
        OptRemarkAnalysisT Remark(DEBUG_TYPE, "AppliedSamples", &Inst);
        Remark << "Applied " << ore::NV("NumSamples", *R);
        Remark << " samples from profile (offset: ";
        Remark << ore::NV("LineOffset", LineOffset);
        if (Discriminator) {
          Remark << ".";
          Remark << ore::NV("Discriminator", Discriminator);
        }
        Remark << ")";
        return Remark;
      });
    }
    LLVM_DEBUG(dbgs() << "    " << DLoc.getLine() << "." << Discriminator << ":"
                      << Inst << " (line offset: " << LineOffset << "."
                      << Discriminator << " - weight: " << R.get() << ")\n");
  }
  return R;
}

// Here use error_code to represent: 1) The dangling probe. 2) Ignore the weight
// of non-probe instruction. So if all instructions of the BB give error_code,
// tell the inference algorithm to infer the BB weight.
template <typename BT>
ErrorOr<uint64_t>
SampleProfileLoaderBaseImpl<BT>::getProbeWeight(const InstructionT &Inst) {
  assert(FunctionSamples::ProfileIsProbeBased &&
         "Profile is not pseudo probe based");
  std::optional<PseudoProbe> Probe = extractProbe(Inst);
  // Ignore the non-probe instruction. If none of the instruction in the BB is
  // probe, we choose to infer the BB's weight.
  if (!Probe)
    return std::error_code();

  const FunctionSamples *FS = findFunctionSamples(Inst);
  // If none of the instruction has FunctionSample, we choose to return zero
  // value sample to indicate the BB is cold. This could happen when the
  // instruction is from inlinee and no profile data is found.
  // FIXME: This should not be affected by the source drift issue as 1) if the
  // newly added function is top-level inliner, it won't match the CFG checksum
  // in the function profile or 2) if it's the inlinee, the inlinee should have
  // a profile, otherwise it wouldn't be inlined. For non-probe based profile,
  // we can improve it by adding a switch for profile-sample-block-accurate for
  // block level counts in the future.
  if (!FS)
    return 0;

  auto R = FS->findSamplesAt(Probe->Id, Probe->Discriminator);
  if (R) {
    uint64_t Samples = R.get() * Probe->Factor;
    bool FirstMark = CoverageTracker.markSamplesUsed(FS, Probe->Id, 0, Samples);
    if (FirstMark) {
      ORE->emit([&]() {
        OptRemarkAnalysisT Remark(DEBUG_TYPE, "AppliedSamples", &Inst);
        Remark << "Applied " << ore::NV("NumSamples", Samples);
        Remark << " samples from profile (ProbeId=";
        Remark << ore::NV("ProbeId", Probe->Id);
        if (Probe->Discriminator) {
          Remark << ".";
          Remark << ore::NV("Discriminator", Probe->Discriminator);
        }
        Remark << ", Factor=";
        Remark << ore::NV("Factor", Probe->Factor);
        Remark << ", OriginalSamples=";
        Remark << ore::NV("OriginalSamples", R.get());
        Remark << ")";
        return Remark;
      });
    }
    LLVM_DEBUG({dbgs() << "    " << Probe->Id;
      if (Probe->Discriminator)
        dbgs() << "." << Probe->Discriminator;
      dbgs() << ":" << Inst << " - weight: " << R.get()
             << " - factor: " << format("%0.2f", Probe->Factor) << ")\n";});
    return Samples;
  }
  return R;
}

/// Compute the weight of a basic block.
///
/// The weight of basic block \p BB is the maximum weight of all the
/// instructions in BB.
///
/// \param BB The basic block to query.
///
/// \returns the weight for \p BB.
template <typename BT>
ErrorOr<uint64_t>
SampleProfileLoaderBaseImpl<BT>::getBlockWeight(const BasicBlockT *BB) {
  uint64_t Max = 0;
  bool HasWeight = false;
  for (auto &I : *BB) {
    const ErrorOr<uint64_t> &R = getInstWeight(I);
    if (R) {
      Max = std::max(Max, R.get());
      HasWeight = true;
    }
  }
  return HasWeight ? ErrorOr<uint64_t>(Max) : std::error_code();
}

/// Compute and store the weights of every basic block.
///
/// This populates the BlockWeights map by computing
/// the weights of every basic block in the CFG.
///
/// \param F The function to query.
template <typename BT>
bool SampleProfileLoaderBaseImpl<BT>::computeBlockWeights(FunctionT &F) {
  bool Changed = false;
  LLVM_DEBUG(dbgs() << "Block weights\n");
  for (const auto &BB : F) {
    ErrorOr<uint64_t> Weight = getBlockWeight(&BB);
    if (Weight) {
      BlockWeights[&BB] = Weight.get();
      VisitedBlocks.insert(&BB);
      Changed = true;
    }
    LLVM_DEBUG(printBlockWeight(dbgs(), &BB));
  }

  return Changed;
}

/// Get the FunctionSamples for an instruction.
///
/// The FunctionSamples of an instruction \p Inst is the inlined instance
/// in which that instruction is coming from. We traverse the inline stack
/// of that instruction, and match it with the tree nodes in the profile.
///
/// \param Inst Instruction to query.
///
/// \returns the FunctionSamples pointer to the inlined instance.
template <typename BT>
const FunctionSamples *SampleProfileLoaderBaseImpl<BT>::findFunctionSamples(
    const InstructionT &Inst) const {
  const DILocation *DIL = Inst.getDebugLoc();
  if (!DIL)
    return Samples;

  auto it = DILocation2SampleMap.try_emplace(DIL, nullptr);
  if (it.second) {
    it.first->second = Samples->findFunctionSamples(DIL, Reader->getRemapper());
  }
  return it.first->second;
}

/// Find equivalence classes for the given block.
///
/// This finds all the blocks that are guaranteed to execute the same
/// number of times as \p BB1. To do this, it traverses all the
/// descendants of \p BB1 in the dominator or post-dominator tree.
///
/// A block BB2 will be in the same equivalence class as \p BB1 if
/// the following holds:
///
/// 1- \p BB1 is a descendant of BB2 in the opposite tree. So, if BB2
///    is a descendant of \p BB1 in the dominator tree, then BB2 should
///    dominate BB1 in the post-dominator tree.
///
/// 2- Both BB2 and \p BB1 must be in the same loop.
///
/// For every block BB2 that meets those two requirements, we set BB2's
/// equivalence class to \p BB1.
///
/// \param BB1  Block to check.
/// \param Descendants  Descendants of \p BB1 in either the dom or pdom tree.
/// \param DomTree  Opposite dominator tree. If \p Descendants is filled
///                 with blocks from \p BB1's dominator tree, then
///                 this is the post-dominator tree, and vice versa.
template <typename BT>
void SampleProfileLoaderBaseImpl<BT>::findEquivalencesFor(
    BasicBlockT *BB1, ArrayRef<BasicBlockT *> Descendants,
    PostDominatorTreeT *DomTree) {
  const BasicBlockT *EC = EquivalenceClass[BB1];
  uint64_t Weight = BlockWeights[EC];
  for (const auto *BB2 : Descendants) {
    bool IsDomParent = DomTree->dominates(BB2, BB1);
    bool IsInSameLoop = LI->getLoopFor(BB1) == LI->getLoopFor(BB2);
    if (BB1 != BB2 && IsDomParent && IsInSameLoop) {
      EquivalenceClass[BB2] = EC;
      // If BB2 is visited, then the entire EC should be marked as visited.
      if (VisitedBlocks.count(BB2)) {
        VisitedBlocks.insert(EC);
      }

      // If BB2 is heavier than BB1, make BB2 have the same weight
      // as BB1.
      //
      // Note that we don't worry about the opposite situation here
      // (when BB2 is lighter than BB1). We will deal with this
      // during the propagation phase. Right now, we just want to
      // make sure that BB1 has the largest weight of all the
      // members of its equivalence set.
      Weight = std::max(Weight, BlockWeights[BB2]);
    }
  }
  const BasicBlockT *EntryBB = getEntryBB(EC->getParent());
  if (EC == EntryBB) {
    BlockWeights[EC] = Samples->getHeadSamples() + 1;
  } else {
    BlockWeights[EC] = Weight;
  }
}

/// Find equivalence classes.
///
/// Since samples may be missing from blocks, we can fill in the gaps by setting
/// the weights of all the blocks in the same equivalence class to the same
/// weight. To compute the concept of equivalence, we use dominance and loop
/// information. Two blocks B1 and B2 are in the same equivalence class if B1
/// dominates B2, B2 post-dominates B1 and both are in the same loop.
///
/// \param F The function to query.
template <typename BT>
void SampleProfileLoaderBaseImpl<BT>::findEquivalenceClasses(FunctionT &F) {
  SmallVector<BasicBlockT *, 8> DominatedBBs;
  LLVM_DEBUG(dbgs() << "\nBlock equivalence classes\n");
  // Find equivalence sets based on dominance and post-dominance information.
  for (auto &BB : F) {
    BasicBlockT *BB1 = &BB;

    // Compute BB1's equivalence class once.
    if (EquivalenceClass.count(BB1)) {
      LLVM_DEBUG(printBlockEquivalence(dbgs(), BB1));
      continue;
    }

    // By default, blocks are in their own equivalence class.
    EquivalenceClass[BB1] = BB1;

    // Traverse all the blocks dominated by BB1. We are looking for
    // every basic block BB2 such that:
    //
    // 1- BB1 dominates BB2.
    // 2- BB2 post-dominates BB1.
    // 3- BB1 and BB2 are in the same loop nest.
    //
    // If all those conditions hold, it means that BB2 is executed
    // as many times as BB1, so they are placed in the same equivalence
    // class by making BB2's equivalence class be BB1.
    DominatedBBs.clear();
    DT->getDescendants(BB1, DominatedBBs);
    findEquivalencesFor(BB1, DominatedBBs, &*PDT);

    LLVM_DEBUG(printBlockEquivalence(dbgs(), BB1));
  }

  // Assign weights to equivalence classes.
  //
  // All the basic blocks in the same equivalence class will execute
  // the same number of times. Since we know that the head block in
  // each equivalence class has the largest weight, assign that weight
  // to all the blocks in that equivalence class.
  LLVM_DEBUG(
      dbgs() << "\nAssign the same weight to all blocks in the same class\n");
  for (auto &BI : F) {
    const BasicBlockT *BB = &BI;
    const BasicBlockT *EquivBB = EquivalenceClass[BB];
    if (BB != EquivBB)
      BlockWeights[BB] = BlockWeights[EquivBB];
    LLVM_DEBUG(printBlockWeight(dbgs(), BB));
  }
}

/// Visit the given edge to decide if it has a valid weight.
///
/// If \p E has not been visited before, we copy to \p UnknownEdge
/// and increment the count of unknown edges.
///
/// \param E  Edge to visit.
/// \param NumUnknownEdges  Current number of unknown edges.
/// \param UnknownEdge  Set if E has not been visited before.
///
/// \returns E's weight, if known. Otherwise, return 0.
template <typename BT>
uint64_t SampleProfileLoaderBaseImpl<BT>::visitEdge(Edge E,
                                                    unsigned *NumUnknownEdges,
                                                    Edge *UnknownEdge) {
  if (!VisitedEdges.count(E)) {
    (*NumUnknownEdges)++;
    *UnknownEdge = E;
    return 0;
  }

  return EdgeWeights[E];
}

/// Propagate weights through incoming/outgoing edges.
///
/// If the weight of a basic block is known, and there is only one edge
/// with an unknown weight, we can calculate the weight of that edge.
///
/// Similarly, if all the edges have a known count, we can calculate the
/// count of the basic block, if needed.
///
/// \param F  Function to process.
/// \param UpdateBlockCount  Whether we should update basic block counts that
///                          has already been annotated.
///
/// \returns  True if new weights were assigned to edges or blocks.
template <typename BT>
bool SampleProfileLoaderBaseImpl<BT>::propagateThroughEdges(
    FunctionT &F, bool UpdateBlockCount) {
  bool Changed = false;
  LLVM_DEBUG(dbgs() << "\nPropagation through edges\n");
  for (const auto &BI : F) {
    const BasicBlockT *BB = &BI;
    const BasicBlockT *EC = EquivalenceClass[BB];

    // Visit all the predecessor and successor edges to determine
    // which ones have a weight assigned already. Note that it doesn't
    // matter that we only keep track of a single unknown edge. The
    // only case we are interested in handling is when only a single
    // edge is unknown (see setEdgeOrBlockWeight).
    for (unsigned i = 0; i < 2; i++) {
      uint64_t TotalWeight = 0;
      unsigned NumUnknownEdges = 0, NumTotalEdges = 0;
      Edge UnknownEdge, SelfReferentialEdge, SingleEdge;

      if (i == 0) {
        // First, visit all predecessor edges.
        NumTotalEdges = Predecessors[BB].size();
        for (auto *Pred : Predecessors[BB]) {
          Edge E = std::make_pair(Pred, BB);
          TotalWeight += visitEdge(E, &NumUnknownEdges, &UnknownEdge);
          if (E.first == E.second)
            SelfReferentialEdge = E;
        }
        if (NumTotalEdges == 1) {
          SingleEdge = std::make_pair(Predecessors[BB][0], BB);
        }
      } else {
        // On the second round, visit all successor edges.
        NumTotalEdges = Successors[BB].size();
        for (auto *Succ : Successors[BB]) {
          Edge E = std::make_pair(BB, Succ);
          TotalWeight += visitEdge(E, &NumUnknownEdges, &UnknownEdge);
        }
        if (NumTotalEdges == 1) {
          SingleEdge = std::make_pair(BB, Successors[BB][0]);
        }
      }

      // After visiting all the edges, there are three cases that we
      // can handle immediately:
      //
      // - All the edge weights are known (i.e., NumUnknownEdges == 0).
      //   In this case, we simply check that the sum of all the edges
      //   is the same as BB's weight. If not, we change BB's weight
      //   to match. Additionally, if BB had not been visited before,
      //   we mark it visited.
      //
      // - Only one edge is unknown and BB has already been visited.
      //   In this case, we can compute the weight of the edge by
      //   subtracting the total block weight from all the known
      //   edge weights. If the edges weight more than BB, then the
      //   edge of the last remaining edge is set to zero.
      //
      // - There exists a self-referential edge and the weight of BB is
      //   known. In this case, this edge can be based on BB's weight.
      //   We add up all the other known edges and set the weight on
      //   the self-referential edge as we did in the previous case.
      //
      // In any other case, we must continue iterating. Eventually,
      // all edges will get a weight, or iteration will stop when
      // it reaches SampleProfileMaxPropagateIterations.
      if (NumUnknownEdges <= 1) {
        uint64_t &BBWeight = BlockWeights[EC];
        if (NumUnknownEdges == 0) {
          if (!VisitedBlocks.count(EC)) {
            // If we already know the weight of all edges, the weight of the
            // basic block can be computed. It should be no larger than the sum
            // of all edge weights.
            if (TotalWeight > BBWeight) {
              BBWeight = TotalWeight;
              Changed = true;
              LLVM_DEBUG(dbgs() << "All edge weights for " << BB->getName()
                                << " known. Set weight for block: ";
                         printBlockWeight(dbgs(), BB););
            }
          } else if (NumTotalEdges == 1 &&
                     EdgeWeights[SingleEdge] < BlockWeights[EC]) {
            // If there is only one edge for the visited basic block, use the
            // block weight to adjust edge weight if edge weight is smaller.
            EdgeWeights[SingleEdge] = BlockWeights[EC];
            Changed = true;
          }
        } else if (NumUnknownEdges == 1 && VisitedBlocks.count(EC)) {
          // If there is a single unknown edge and the block has been
          // visited, then we can compute E's weight.
          if (BBWeight >= TotalWeight)
            EdgeWeights[UnknownEdge] = BBWeight - TotalWeight;
          else
            EdgeWeights[UnknownEdge] = 0;
          const BasicBlockT *OtherEC;
          if (i == 0)
            OtherEC = EquivalenceClass[UnknownEdge.first];
          else
            OtherEC = EquivalenceClass[UnknownEdge.second];
          // Edge weights should never exceed the BB weights it connects.
          if (VisitedBlocks.count(OtherEC) &&
              EdgeWeights[UnknownEdge] > BlockWeights[OtherEC])
            EdgeWeights[UnknownEdge] = BlockWeights[OtherEC];
          VisitedEdges.insert(UnknownEdge);
          Changed = true;
          LLVM_DEBUG(dbgs() << "Set weight for edge: ";
                     printEdgeWeight(dbgs(), UnknownEdge));
        }
      } else if (VisitedBlocks.count(EC) && BlockWeights[EC] == 0) {
        // If a block Weights 0, all its in/out edges should weight 0.
        if (i == 0) {
          for (auto *Pred : Predecessors[BB]) {
            Edge E = std::make_pair(Pred, BB);
            EdgeWeights[E] = 0;
            VisitedEdges.insert(E);
          }
        } else {
          for (auto *Succ : Successors[BB]) {
            Edge E = std::make_pair(BB, Succ);
            EdgeWeights[E] = 0;
            VisitedEdges.insert(E);
          }
        }
      } else if (SelfReferentialEdge.first && VisitedBlocks.count(EC)) {
        uint64_t &BBWeight = BlockWeights[BB];
        // We have a self-referential edge and the weight of BB is known.
        if (BBWeight >= TotalWeight)
          EdgeWeights[SelfReferentialEdge] = BBWeight - TotalWeight;
        else
          EdgeWeights[SelfReferentialEdge] = 0;
        VisitedEdges.insert(SelfReferentialEdge);
        Changed = true;
        LLVM_DEBUG(dbgs() << "Set self-referential edge weight to: ";
                   printEdgeWeight(dbgs(), SelfReferentialEdge));
      }
      if (UpdateBlockCount && !VisitedBlocks.count(EC) && TotalWeight > 0) {
        BlockWeights[EC] = TotalWeight;
        VisitedBlocks.insert(EC);
        Changed = true;
      }
    }
  }

  return Changed;
}

/// Build in/out edge lists for each basic block in the CFG.
///
/// We are interested in unique edges. If a block B1 has multiple
/// edges to another block B2, we only add a single B1->B2 edge.
template <typename BT>
void SampleProfileLoaderBaseImpl<BT>::buildEdges(FunctionT &F) {
  for (auto &BI : F) {
    BasicBlockT *B1 = &BI;

    // Add predecessors for B1.
    SmallPtrSet<BasicBlockT *, 16> Visited;
    if (!Predecessors[B1].empty())
      llvm_unreachable("Found a stale predecessors list in a basic block.");
    for (auto *B2 : getPredecessors(B1))
      if (Visited.insert(B2).second)
        Predecessors[B1].push_back(B2);

    // Add successors for B1.
    Visited.clear();
    if (!Successors[B1].empty())
      llvm_unreachable("Found a stale successors list in a basic block.");
    for (auto *B2 : getSuccessors(B1))
      if (Visited.insert(B2).second)
        Successors[B1].push_back(B2);
  }
}

/// Propagate weights into edges
///
/// The following rules are applied to every block BB in the CFG:
///
/// - If BB has a single predecessor/successor, then the weight
///   of that edge is the weight of the block.
///
/// - If all incoming or outgoing edges are known except one, and the
///   weight of the block is already known, the weight of the unknown
///   edge will be the weight of the block minus the sum of all the known
///   edges. If the sum of all the known edges is larger than BB's weight,
///   we set the unknown edge weight to zero.
///
/// - If there is a self-referential edge, and the weight of the block is
///   known, the weight for that edge is set to the weight of the block
///   minus the weight of the other incoming edges to that block (if
///   known).
template <typename BT>
void SampleProfileLoaderBaseImpl<BT>::propagateWeights(FunctionT &F) {
  // Flow-based profile inference is only usable with BasicBlock instantiation
  // of SampleProfileLoaderBaseImpl.
  if (SampleProfileUseProfi) {
    // Prepare block sample counts for inference.
    BlockWeightMap SampleBlockWeights;
    for (const auto &BI : F) {
      ErrorOr<uint64_t> Weight = getBlockWeight(&BI);
      if (Weight)
        SampleBlockWeights[&BI] = Weight.get();
    }
    // Fill in BlockWeights and EdgeWeights using an inference algorithm.
    applyProfi(F, Successors, SampleBlockWeights, BlockWeights, EdgeWeights);
  } else {
    bool Changed = true;
    unsigned I = 0;

    // If BB weight is larger than its corresponding loop's header BB weight,
    // use the BB weight to replace the loop header BB weight.
    for (auto &BI : F) {
      BasicBlockT *BB = &BI;
      LoopT *L = LI->getLoopFor(BB);
      if (!L) {
        continue;
      }
      BasicBlockT *Header = L->getHeader();
      if (Header && BlockWeights[BB] > BlockWeights[Header]) {
        BlockWeights[Header] = BlockWeights[BB];
      }
    }

    // Propagate until we converge or we go past the iteration limit.
    while (Changed && I++ < SampleProfileMaxPropagateIterations) {
      Changed = propagateThroughEdges(F, false);
    }

    // The first propagation propagates BB counts from annotated BBs to unknown
    // BBs. The 2nd propagation pass resets edges weights, and use all BB
    // weights to propagate edge weights.
    VisitedEdges.clear();
    Changed = true;
    while (Changed && I++ < SampleProfileMaxPropagateIterations) {
      Changed = propagateThroughEdges(F, false);
    }

    // The 3rd propagation pass allows adjust annotated BB weights that are
    // obviously wrong.
    Changed = true;
    while (Changed && I++ < SampleProfileMaxPropagateIterations) {
      Changed = propagateThroughEdges(F, true);
    }
  }
}

template <typename FT>
void SampleProfileLoaderBaseImpl<FT>::applyProfi(
    FunctionT &F, BlockEdgeMap &Successors, BlockWeightMap &SampleBlockWeights,
    BlockWeightMap &BlockWeights, EdgeWeightMap &EdgeWeights) {
  auto Infer = SampleProfileInference<FT>(F, Successors, SampleBlockWeights);
  Infer.apply(BlockWeights, EdgeWeights);
}

/// Generate branch weight metadata for all branches in \p F.
///
/// Branch weights are computed out of instruction samples using a
/// propagation heuristic. Propagation proceeds in 3 phases:
///
/// 1- Assignment of block weights. All the basic blocks in the function
///    are initial assigned the same weight as their most frequently
///    executed instruction.
///
/// 2- Creation of equivalence classes. Since samples may be missing from
///    blocks, we can fill in the gaps by setting the weights of all the
///    blocks in the same equivalence class to the same weight. To compute
///    the concept of equivalence, we use dominance and loop information.
///    Two blocks B1 and B2 are in the same equivalence class if B1
///    dominates B2, B2 post-dominates B1 and both are in the same loop.
///
/// 3- Propagation of block weights into edges. This uses a simple
///    propagation heuristic. The following rules are applied to every
///    block BB in the CFG:
///
///    - If BB has a single predecessor/successor, then the weight
///      of that edge is the weight of the block.
///
///    - If all the edges are known except one, and the weight of the
///      block is already known, the weight of the unknown edge will
///      be the weight of the block minus the sum of all the known
///      edges. If the sum of all the known edges is larger than BB's weight,
///      we set the unknown edge weight to zero.
///
///    - If there is a self-referential edge, and the weight of the block is
///      known, the weight for that edge is set to the weight of the block
///      minus the weight of the other incoming edges to that block (if
///      known).
///
/// Since this propagation is not guaranteed to finalize for every CFG, we
/// only allow it to proceed for a limited number of iterations (controlled
/// by -sample-profile-max-propagate-iterations).
///
/// FIXME: Try to replace this propagation heuristic with a scheme
/// that is guaranteed to finalize. A work-list approach similar to
/// the standard value propagation algorithm used by SSA-CCP might
/// work here.
///
/// \param F The function to query.
///
/// \returns true if \p F was modified. Returns false, otherwise.
template <typename BT>
bool SampleProfileLoaderBaseImpl<BT>::computeAndPropagateWeights(
    FunctionT &F, const DenseSet<GlobalValue::GUID> &InlinedGUIDs) {
  bool Changed = (InlinedGUIDs.size() != 0);

  // Compute basic block weights.
  Changed |= computeBlockWeights(F);

  if (Changed) {
    // Initialize propagation.
    initWeightPropagation(F, InlinedGUIDs);

    // Propagate weights to all edges.
    propagateWeights(F);

    // Post-process propagated weights.
    finalizeWeightPropagation(F, InlinedGUIDs);
  }

  return Changed;
}

template <typename BT>
void SampleProfileLoaderBaseImpl<BT>::initWeightPropagation(
    FunctionT &F, const DenseSet<GlobalValue::GUID> &InlinedGUIDs) {
  // Add an entry count to the function using the samples gathered at the
  // function entry.
  // Sets the GUIDs that are inlined in the profiled binary. This is used
  // for ThinLink to make correct liveness analysis, and also make the IR
  // match the profiled binary before annotation.
  getFunction(F).setEntryCount(
      ProfileCount(Samples->getHeadSamples() + 1, Function::PCT_Real),
      &InlinedGUIDs);

  if (!SampleProfileUseProfi) {
    // Compute dominance and loop info needed for propagation.
    computeDominanceAndLoopInfo(F);

    // Find equivalence classes.
    findEquivalenceClasses(F);
  }

  // Before propagation starts, build, for each block, a list of
  // unique predecessors and successors. This is necessary to handle
  // identical edges in multiway branches. Since we visit all blocks and all
  // edges of the CFG, it is cleaner to build these lists once at the start
  // of the pass.
  buildEdges(F);
}

template <typename BT>
void SampleProfileLoaderBaseImpl<BT>::finalizeWeightPropagation(
    FunctionT &F, const DenseSet<GlobalValue::GUID> &InlinedGUIDs) {
  // If we utilize a flow-based count inference, then we trust the computed
  // counts and set the entry count as computed by the algorithm. This is
  // primarily done to sync the counts produced by profi and BFI inference,
  // which uses the entry count for mass propagation.
  // If profi produces a zero-value for the entry count, we fallback to
  // Samples->getHeadSamples() + 1 to avoid functions with zero count.
  if (SampleProfileUseProfi) {
    const BasicBlockT *EntryBB = getEntryBB(&F);
    ErrorOr<uint64_t> EntryWeight = getBlockWeight(EntryBB);
    if (BlockWeights[EntryBB] > 0) {
      getFunction(F).setEntryCount(
          ProfileCount(BlockWeights[EntryBB], Function::PCT_Real),
          &InlinedGUIDs);
    }
  }
}

template <typename BT>
void SampleProfileLoaderBaseImpl<BT>::emitCoverageRemarks(FunctionT &F) {
  // If coverage checking was requested, compute it now.
  const Function &Func = getFunction(F);
  if (SampleProfileRecordCoverage) {
    unsigned Used = CoverageTracker.countUsedRecords(Samples, PSI);
    unsigned Total = CoverageTracker.countBodyRecords(Samples, PSI);
    unsigned Coverage = CoverageTracker.computeCoverage(Used, Total);
    if (Coverage < SampleProfileRecordCoverage) {
      Func.getContext().diagnose(DiagnosticInfoSampleProfile(
          Func.getSubprogram()->getFilename(), getFunctionLoc(F),
          Twine(Used) + " of " + Twine(Total) + " available profile records (" +
              Twine(Coverage) + "%) were applied",
          DS_Warning));
    }
  }

  if (SampleProfileSampleCoverage) {
    uint64_t Used = CoverageTracker.getTotalUsedSamples();
    uint64_t Total = CoverageTracker.countBodySamples(Samples, PSI);
    unsigned Coverage = CoverageTracker.computeCoverage(Used, Total);
    if (Coverage < SampleProfileSampleCoverage) {
      Func.getContext().diagnose(DiagnosticInfoSampleProfile(
          Func.getSubprogram()->getFilename(), getFunctionLoc(F),
          Twine(Used) + " of " + Twine(Total) + " available profile samples (" +
              Twine(Coverage) + "%) were applied",
          DS_Warning));
    }
  }
}

/// Get the line number for the function header.
///
/// This looks up function \p F in the current compilation unit and
/// retrieves the line number where the function is defined. This is
/// line 0 for all the samples read from the profile file. Every line
/// number is relative to this line.
///
/// \param F  Function object to query.
///
/// \returns the line number where \p F is defined. If it returns 0,
///          it means that there is no debug information available for \p F.
template <typename BT>
unsigned SampleProfileLoaderBaseImpl<BT>::getFunctionLoc(FunctionT &F) {
  const Function &Func = getFunction(F);
  if (DISubprogram *S = Func.getSubprogram())
    return S->getLine();

  if (NoWarnSampleUnused)
    return 0;

  // If the start of \p F is missing, emit a diagnostic to inform the user
  // about the missed opportunity.
  Func.getContext().diagnose(DiagnosticInfoSampleProfile(
      "No debug information found in function " + Func.getName() +
          ": Function profile not used",
      DS_Warning));
  return 0;
}

#undef DEBUG_TYPE

} // namespace llvm
#endif // LLVM_TRANSFORMS_UTILS_SAMPLEPROFILELOADERBASEIMPL_H
PKiwFZY98�77Transforms/Utils/ModuleUtils.hnu�[���//===-- ModuleUtils.h - Functions to manipulate Modules ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This family of functions perform manipulations on Modules.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_MODULEUTILS_H
#define LLVM_TRANSFORMS_UTILS_MODULEUTILS_H

#include "llvm/ADT/STLFunctionalExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/IR/GlobalIFunc.h"
#include "llvm/Support/Alignment.h"
#include "llvm/Support/MemoryBufferRef.h"
#include <utility> // for std::pair

namespace llvm {
template <typename T> class SmallVectorImpl;

template <typename T> class ArrayRef;
class Module;
class Function;
class FunctionCallee;
class GlobalIFunc;
class GlobalValue;
class Constant;
class Value;
class Type;

/// Append F to the list of global ctors of module M with the given Priority.
/// This wraps the function in the appropriate structure and stores it along
/// side other global constructors. For details see
/// https://llvm.org/docs/LangRef.html#the-llvm-global-ctors-global-variable
void appendToGlobalCtors(Module &M, Function *F, int Priority,
                         Constant *Data = nullptr);

/// Same as appendToGlobalCtors(), but for global dtors.
void appendToGlobalDtors(Module &M, Function *F, int Priority,
                         Constant *Data = nullptr);

/// Sets the KCFI type for the function. Used for compiler-generated functions
/// that are indirectly called in instrumented code.
void setKCFIType(Module &M, Function &F, StringRef MangledType);

FunctionCallee declareSanitizerInitFunction(Module &M, StringRef InitName,
                                            ArrayRef<Type *> InitArgTypes,
                                            bool Weak = false);

/// Creates sanitizer constructor function.
/// \return Returns pointer to constructor.
Function *createSanitizerCtor(Module &M, StringRef CtorName);

/// Creates sanitizer constructor function, and calls sanitizer's init
/// function from it.
/// \return Returns pair of pointers to constructor, and init functions
/// respectively.
std::pair<Function *, FunctionCallee> createSanitizerCtorAndInitFunctions(
    Module &M, StringRef CtorName, StringRef InitName,
    ArrayRef<Type *> InitArgTypes, ArrayRef<Value *> InitArgs,
    StringRef VersionCheckName = StringRef(), bool Weak = false);

/// Creates sanitizer constructor function lazily. If a constructor and init
/// function already exist, this function returns it. Otherwise it calls \c
/// createSanitizerCtorAndInitFunctions. The FunctionsCreatedCallback is invoked
/// in that case, passing the new Ctor and Init function.
///
/// \return Returns pair of pointers to constructor, and init functions
/// respectively.
std::pair<Function *, FunctionCallee> getOrCreateSanitizerCtorAndInitFunctions(
    Module &M, StringRef CtorName, StringRef InitName,
    ArrayRef<Type *> InitArgTypes, ArrayRef<Value *> InitArgs,
    function_ref<void(Function *, FunctionCallee)> FunctionsCreatedCallback,
    StringRef VersionCheckName = StringRef(), bool Weak = false);

/// Rename all the anon globals in the module using a hash computed from
/// the list of public globals in the module.
bool nameUnamedGlobals(Module &M);

/// Adds global values to the llvm.used list.
void appendToUsed(Module &M, ArrayRef<GlobalValue *> Values);

/// Adds global values to the llvm.compiler.used list.
void appendToCompilerUsed(Module &M, ArrayRef<GlobalValue *> Values);

/// Removes global values from the llvm.used and llvm.compiler.used arrays. \p
/// ShouldRemove should return true for any initializer field that should not be
/// included in the replacement global.
void removeFromUsedLists(Module &M,
                         function_ref<bool(Constant *)> ShouldRemove);

/// Filter out potentially dead comdat functions where other entries keep the
/// entire comdat group alive.
///
/// This is designed for cases where functions appear to become dead but remain
/// alive due to other live entries in their comdat group.
///
/// The \p DeadComdatFunctions container should only have pointers to
/// `Function`s which are members of a comdat group and are believed to be
/// dead.
///
/// After this routine finishes, the only remaining `Function`s in \p
/// DeadComdatFunctions are those where every member of the comdat is listed
/// and thus removing them is safe (provided *all* are removed).
void filterDeadComdatFunctions(
    SmallVectorImpl<Function *> &DeadComdatFunctions);

/// Produce a unique identifier for this module by taking the MD5 sum of
/// the names of the module's strong external symbols that are not comdat
/// members.
///
/// This identifier is normally guaranteed to be unique, or the program would
/// fail to link due to multiply defined symbols.
///
/// If the module has no strong external symbols (such a module may still have a
/// semantic effect if it performs global initialization), we cannot produce a
/// unique identifier for this module, so we return the empty string.
std::string getUniqueModuleId(Module *M);

/// Embed the memory buffer \p Buf into the module \p M as a global using the
/// specified section name. Also provide a metadata entry to identify it in the
/// module using the same section name.
void embedBufferInModule(Module &M, MemoryBufferRef Buf, StringRef SectionName,
                         Align Alignment = Align(1));

/// Lower all calls to ifuncs by replacing uses with indirect calls loaded out
/// of a global table initialized in a global constructor. This will introduce
/// one constructor function and adds it to llvm.global_ctors. The constructor
/// will call the resolver function once for each ifunc.
///
/// Leaves any unhandled constant initializer uses as-is.
///
/// If \p IFuncsToLower is empty, all ifuncs in the module will be lowered.
/// If \p IFuncsToLower is non-empty, only the selected ifuncs will be lowered.
///
/// The processed ifuncs without remaining users will be removed from the
/// module.
bool lowerGlobalIFuncUsersAsGlobalCtor(
    Module &M, ArrayRef<GlobalIFunc *> IFuncsToLower = {});

class CallInst;
namespace VFABI {
/// Overwrite the Vector Function ABI variants attribute with the names provide
/// in \p VariantMappings.
void setVectorVariantNames(CallInst *CI, ArrayRef<std::string> VariantMappings);
} // End VFABI namespace
} // End llvm namespace

#endif // LLVM_TRANSFORMS_UTILS_MODULEUTILS_H
PKiwFZG�2�//-Transforms/Utils/StripNonLineTableDebugInfo.hnu�[���//===- StripNonLineTableDebugInfo.h - -------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_STRIPNONLINETABLEDEBUGINFO_H
#define LLVM_TRANSFORMS_UTILS_STRIPNONLINETABLEDEBUGINFO_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class Module;

class StripNonLineTableDebugInfoPass
    : public PassInfoMixin<StripNonLineTableDebugInfoPass> {
public:
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_UTILS_STRIPNONLINETABLEDEBUGINFO_H
PKiwFZ�=qt++Transforms/Utils/UnrollLoop.hnu�[���//===- llvm/Transforms/Utils/UnrollLoop.h - Unrolling utilities -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines some loop unrolling utilities. It does not define any
// actual pass or policy, but provides a single function to perform loop
// unrolling.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_UNROLLLOOP_H
#define LLVM_TRANSFORMS_UTILS_UNROLLLOOP_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Support/InstructionCost.h"

namespace llvm {

class AssumptionCache;
class BasicBlock;
class BlockFrequencyInfo;
class DependenceInfo;
class DominatorTree;
class Loop;
class LoopInfo;
class MDNode;
class ProfileSummaryInfo;
class OptimizationRemarkEmitter;
class ScalarEvolution;
class StringRef;
class Value;

using NewLoopsMap = SmallDenseMap<const Loop *, Loop *, 4>;

/// @{
/// Metadata attribute names
const char *const LLVMLoopUnrollFollowupAll = "llvm.loop.unroll.followup_all";
const char *const LLVMLoopUnrollFollowupUnrolled =
    "llvm.loop.unroll.followup_unrolled";
const char *const LLVMLoopUnrollFollowupRemainder =
    "llvm.loop.unroll.followup_remainder";
/// @}

const Loop* addClonedBlockToLoopInfo(BasicBlock *OriginalBB,
                                     BasicBlock *ClonedBB, LoopInfo *LI,
                                     NewLoopsMap &NewLoops);

/// Represents the result of a \c UnrollLoop invocation.
enum class LoopUnrollResult {
  /// The loop was not modified.
  Unmodified,

  /// The loop was partially unrolled -- we still have a loop, but with a
  /// smaller trip count.  We may also have emitted epilogue loop if the loop
  /// had a non-constant trip count.
  PartiallyUnrolled,

  /// The loop was fully unrolled into straight-line code.  We no longer have
  /// any back-edges.
  FullyUnrolled
};

struct UnrollLoopOptions {
  unsigned Count;
  bool Force;
  bool Runtime;
  bool AllowExpensiveTripCount;
  bool UnrollRemainder;
  bool ForgetAllSCEV;
};

LoopUnrollResult UnrollLoop(Loop *L, UnrollLoopOptions ULO, LoopInfo *LI,
                            ScalarEvolution *SE, DominatorTree *DT,
                            AssumptionCache *AC,
                            const llvm::TargetTransformInfo *TTI,
                            OptimizationRemarkEmitter *ORE, bool PreserveLCSSA,
                            Loop **RemainderLoop = nullptr);

bool UnrollRuntimeLoopRemainder(
    Loop *L, unsigned Count, bool AllowExpensiveTripCount,
    bool UseEpilogRemainder, bool UnrollRemainder, bool ForgetAllSCEV,
    LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT, AssumptionCache *AC,
    const TargetTransformInfo *TTI, bool PreserveLCSSA,
    Loop **ResultLoop = nullptr);

LoopUnrollResult UnrollAndJamLoop(Loop *L, unsigned Count, unsigned TripCount,
                                  unsigned TripMultiple, bool UnrollRemainder,
                                  LoopInfo *LI, ScalarEvolution *SE,
                                  DominatorTree *DT, AssumptionCache *AC,
                                  const TargetTransformInfo *TTI,
                                  OptimizationRemarkEmitter *ORE,
                                  Loop **EpilogueLoop = nullptr);

bool isSafeToUnrollAndJam(Loop *L, ScalarEvolution &SE, DominatorTree &DT,
                          DependenceInfo &DI, LoopInfo &LI);

bool computeUnrollCount(Loop *L, const TargetTransformInfo &TTI,
                        DominatorTree &DT, LoopInfo *LI, AssumptionCache *AC,
                        ScalarEvolution &SE,
                        const SmallPtrSetImpl<const Value *> &EphValues,
                        OptimizationRemarkEmitter *ORE, unsigned TripCount,
                        unsigned MaxTripCount, bool MaxOrZero,
                        unsigned TripMultiple, unsigned LoopSize,
                        TargetTransformInfo::UnrollingPreferences &UP,
                        TargetTransformInfo::PeelingPreferences &PP,
                        bool &UseUpperBound);

void simplifyLoopAfterUnroll(Loop *L, bool SimplifyIVs, LoopInfo *LI,
                             ScalarEvolution *SE, DominatorTree *DT,
                             AssumptionCache *AC,
                             const TargetTransformInfo *TTI);

MDNode *GetUnrollMetadata(MDNode *LoopID, StringRef Name);

TargetTransformInfo::UnrollingPreferences gatherUnrollingPreferences(
    Loop *L, ScalarEvolution &SE, const TargetTransformInfo &TTI,
    BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
    llvm::OptimizationRemarkEmitter &ORE, int OptLevel,
    std::optional<unsigned> UserThreshold, std::optional<unsigned> UserCount,
    std::optional<bool> UserAllowPartial, std::optional<bool> UserRuntime,
    std::optional<bool> UserUpperBound,
    std::optional<unsigned> UserFullUnrollMaxCount);

InstructionCost ApproximateLoopSize(const Loop *L, unsigned &NumCalls,
    bool &NotDuplicatable, bool &Convergent, const TargetTransformInfo &TTI,
    const SmallPtrSetImpl<const Value *> &EphValues, unsigned BEInsns);

} // end namespace llvm

#endif // LLVM_TRANSFORMS_UTILS_UNROLLLOOP_H
PKiwFZQ۪
�
*Transforms/Utils/RelLookupTableConverter.hnu�[���//===-- RelLookupTableConverterPass.h - Rel Table Conv ----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file
/// This file implements relative lookup table converter that converts
/// lookup tables to relative lookup tables to make them PIC-friendly.
///
/// Switch lookup table example:
/// @switch.table.foo = private unnamed_addr constant [3 x i8*]
/// [
/// i8* getelementptr inbounds ([5 x i8], [5 x i8]* @.str, i64 0, i64 0),
/// i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str.1, i64 0, i64 0),
/// i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str.2, i64 0, i64 0)
/// ], align 8
///
/// switch.lookup:
///   %1 = sext i32 %cond to i64
///   %switch.gep = getelementptr inbounds [3 x i8*],
///                 [3 x i8*]* @switch.table.foo, i64 0, i64 %1
///   %switch.load = load i8*, i8** %switch.gep, align 8
///  ret i8* %switch.load
///
/// Switch lookup table will become a relative lookup table that
/// consists of relative offsets.
///
/// @reltable.foo = private unnamed_addr constant [3 x i32]
/// [
/// i32 trunc (i64 sub (i64 ptrtoint ([5 x i8]* @.str to i64),
///                     i64 ptrtoint ([3 x i32]* @reltable.foo to i64)) to i32),
/// i32 trunc (i64 sub (i64 ptrtoint ([4 x i8]* @.str.1 to i64),
///                     i64 ptrtoint ([3 x i32]* @reltable.foo to i64)) to i32),
/// i32 trunc (i64 sub (i64 ptrtoint ([4 x i8]* @.str.2 to i64),
///                     i64 ptrtoint ([3 x i32]* @reltable.foo to i64)) to i32)
/// ], align 4
///
/// IR after converting to a relative lookup table:
/// switch.lookup:
///  %1 = sext i32 %cond to i64
///  %reltable.shift = shl i64 %1, 2
///  %reltable.intrinsic = call i8* @llvm.load.relative.i64(
///                        i8* bitcast ([3 x i32]* @reltable.foo to i8*),
///                        i64 %reltable.shift)
///  ret i8* %reltable.intrinsic
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_RELLOOKUPTABLECONVERTER_H
#define LLVM_TRANSFORMS_UTILS_RELLOOKUPTABLECONVERTER_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class Module;

// Pass that converts lookup tables to relative lookup tables.
class RelLookupTableConverterPass
    : public PassInfoMixin<RelLookupTableConverterPass> {
public:
  RelLookupTableConverterPass() = default;

  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_UTILS_RELLOOKUPTABLECONVERTER_H
PKiwFZ�a��Transforms/Coroutines.hnu�[���//===-- Coroutines.h - Coroutine Transformations ----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// Declare accessor functions for coroutine lowering passes.
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_COROUTINES_H
#define LLVM_TRANSFORMS_COROUTINES_H

namespace llvm {

class Pass;
class PassManagerBuilder;

/// Add all coroutine passes to appropriate extension points.
void addCoroutinePassesToExtensionPoints(PassManagerBuilder &Builder);

/// Lower coroutine intrinsics that are not needed by later passes.
Pass *createCoroEarlyLegacyPass();

/// Split up coroutines into multiple functions driving their state machines.
Pass *createCoroSplitLegacyPass(bool IsOptimizing = false);

/// Analyze coroutines use sites, devirtualize resume/destroy calls and elide
/// heap allocation for coroutine frame where possible.
Pass *createCoroElideLegacyPass();

/// Lower all remaining coroutine intrinsics.
Pass *createCoroCleanupLegacyPass();

}

#endif
PKiwFZO��
BBTransforms/ObjCARC.hnu�[���//===-- ObjCARC.h - ObjCARC Scalar Transformations --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This header file defines prototypes for accessor functions that expose passes
// in the ObjCARC Scalar Transformations library.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_OBJCARC_H
#define LLVM_TRANSFORMS_OBJCARC_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class Pass;

//===----------------------------------------------------------------------===//
//
// ObjCARCContract - Late ObjC ARC cleanups.
//
Pass *createObjCARCContractPass();

struct ObjCARCOptPass : public PassInfoMixin<ObjCARCOptPass> {
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

struct ObjCARCContractPass : public PassInfoMixin<ObjCARCContractPass> {
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

struct ObjCARCAPElimPass : public PassInfoMixin<ObjCARCAPElimPass> {
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
};

struct ObjCARCExpandPass : public PassInfoMixin<ObjCARCExpandPass> {
  PreservedAnalyses run(Function &M, FunctionAnalysisManager &AM);
};

struct PAEvalPass : public PassInfoMixin<PAEvalPass> {
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

} // End llvm namespace

#endif
PKiwFZ<���Transforms/Vectorize.hnu�[���//===-- Vectorize.h - Vectorization Transformations -------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This header file defines prototypes for accessor functions that expose passes
// in the Vectorize transformations library.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_VECTORIZE_H
#define LLVM_TRANSFORMS_VECTORIZE_H

namespace llvm {
class Pass;

//===----------------------------------------------------------------------===//
//
// LoadStoreVectorizer - Create vector loads and stores, but leave scalar
// operations.
//
Pass *createLoadStoreVectorizerPass();

} // End llvm namespace

#endif
PKiwFZ�y���Transforms/IPO/AlwaysInliner.hnu�[���//===-- AlwaysInliner.h - Pass to inline "always_inline" functions --------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// Provides passes to inlining "always_inline" functions.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_IPO_ALWAYSINLINER_H
#define LLVM_TRANSFORMS_IPO_ALWAYSINLINER_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class Module;
class Pass;

/// Inlines functions marked as "always_inline".
///
/// Note that this does not inline call sites marked as always_inline and does
/// not delete the functions even when all users are inlined. The normal
/// inliner should be used to handle call site inlining, this pass's goal is to
/// be the simplest possible pass to remove always_inline function definitions'
/// uses by inlining them. The \c GlobalDCE pass can be used to remove these
/// functions once all users are gone.
class AlwaysInlinerPass : public PassInfoMixin<AlwaysInlinerPass> {
  bool InsertLifetime;

public:
  AlwaysInlinerPass(bool InsertLifetime = true)
      : InsertLifetime(InsertLifetime) {}

  PreservedAnalyses run(Module &M, ModuleAnalysisManager &);
  static bool isRequired() { return true; }
};

/// Create a legacy pass manager instance of a pass to inline and remove
/// functions marked as "always_inline".
Pass *createAlwaysInlinerLegacyPass(bool InsertLifetime = true);

}

#endif // LLVM_TRANSFORMS_IPO_ALWAYSINLINER_H
PKiwFZ�o�%�%'Transforms/IPO/FunctionSpecialization.hnu�[���//===- FunctionSpecialization.h - Function Specialization -----------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This specialises functions with constant parameters. Constant parameters
// like function pointers and constant globals are propagated to the callee by
// specializing the function. The main benefit of this pass at the moment is
// that indirect calls are transformed into direct calls, which provides inline
// opportunities that the inliner would not have been able to achieve. That's
// why function specialisation is run before the inliner in the optimisation
// pipeline; that is by design. Otherwise, we would only benefit from constant
// passing, which is a valid use-case too, but hasn't been explored much in
// terms of performance uplifts, cost-model and compile-time impact.
//
// Current limitations:
// - It does not yet handle integer ranges. We do support "literal constants",
//   but that's off by default under an option.
// - The cost-model could be further looked into (it mainly focuses on inlining
//   benefits),
//
// Ideas:
// - With a function specialization attribute for arguments, we could have
//   a direct way to steer function specialization, avoiding the cost-model,
//   and thus control compile-times / code-size.
//
// Todos:
// - Specializing recursive functions relies on running the transformation a
//   number of times, which is controlled by option
//   `func-specialization-max-iters`. Thus, increasing this value and the
//   number of iterations, will linearly increase the number of times recursive
//   functions get specialized, see also the discussion in
//   https://reviews.llvm.org/D106426 for details. Perhaps there is a
//   compile-time friendlier way to control/limit the number of specialisations
//   for recursive functions.
// - Don't transform the function if function specialization does not trigger;
//   the SCCPSolver may make IR changes.
//
// References:
// - 2021 LLVM Dev Mtg “Introducing function specialisation, and can we enable
//   it by default?”, https://www.youtube.com/watch?v=zJiCjeXgV5Q
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_IPO_FUNCTIONSPECIALIZATION_H
#define LLVM_TRANSFORMS_IPO_FUNCTIONSPECIALIZATION_H

#include "llvm/Analysis/BlockFrequencyInfo.h"
#include "llvm/Analysis/CodeMetrics.h"
#include "llvm/Analysis/InlineCost.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/IR/InstVisitor.h"
#include "llvm/Transforms/Scalar/SCCP.h"
#include "llvm/Transforms/Utils/Cloning.h"
#include "llvm/Transforms/Utils/SCCPSolver.h"
#include "llvm/Transforms/Utils/SizeOpts.h"

using namespace llvm;

namespace llvm {
// Map of potential specializations for each function. The FunctionSpecializer
// keeps the discovered specialisation opportunities for the module in a single
// vector, where the specialisations of each function form a contiguous range.
// This map's value is the beginning and the end of that range.
using SpecMap = DenseMap<Function *, std::pair<unsigned, unsigned>>;

// Just a shorter abbreviation to improve indentation.
using Cost = InstructionCost;

// Map of known constants found during the specialization bonus estimation.
using ConstMap = DenseMap<Value *, Constant *>;

// Specialization signature, used to uniquely designate a specialization within
// a function.
struct SpecSig {
  // Hashing support, used to distinguish between ordinary, empty, or tombstone
  // keys.
  unsigned Key = 0;
  SmallVector<ArgInfo, 4> Args;

  bool operator==(const SpecSig &Other) const {
    if (Key != Other.Key || Args.size() != Other.Args.size())
      return false;
    for (size_t I = 0; I < Args.size(); ++I)
      if (Args[I] != Other.Args[I])
        return false;
    return true;
  }

  friend hash_code hash_value(const SpecSig &S) {
    return hash_combine(hash_value(S.Key),
                        hash_combine_range(S.Args.begin(), S.Args.end()));
  }
};

// Specialization instance.
struct Spec {
  // Original function.
  Function *F;

  // Cloned function, a specialized version of the original one.
  Function *Clone = nullptr;

  // Specialization signature.
  SpecSig Sig;

  // Profitability of the specialization.
  Cost Score;

  // List of call sites, matching this specialization.
  SmallVector<CallBase *> CallSites;

  Spec(Function *F, const SpecSig &S, Cost Score)
      : F(F), Sig(S), Score(Score) {}
  Spec(Function *F, const SpecSig &&S, Cost Score)
      : F(F), Sig(S), Score(Score) {}
};

class InstCostVisitor : public InstVisitor<InstCostVisitor, Constant *> {
  const DataLayout &DL;
  BlockFrequencyInfo &BFI;
  TargetTransformInfo &TTI;
  SCCPSolver &Solver;

  ConstMap KnownConstants;

  ConstMap::iterator LastVisited;

public:
  InstCostVisitor(const DataLayout &DL, BlockFrequencyInfo &BFI,
                  TargetTransformInfo &TTI, SCCPSolver &Solver)
      : DL(DL), BFI(BFI), TTI(TTI), Solver(Solver) {}

  Cost getUserBonus(Instruction *User, Value *Use, Constant *C);

private:
  friend class InstVisitor<InstCostVisitor, Constant *>;

  Cost estimateSwitchInst(SwitchInst &I);
  Cost estimateBranchInst(BranchInst &I);

  Constant *visitInstruction(Instruction &I) { return nullptr; }
  Constant *visitFreezeInst(FreezeInst &I);
  Constant *visitCallBase(CallBase &I);
  Constant *visitLoadInst(LoadInst &I);
  Constant *visitGetElementPtrInst(GetElementPtrInst &I);
  Constant *visitSelectInst(SelectInst &I);
  Constant *visitCastInst(CastInst &I);
  Constant *visitCmpInst(CmpInst &I);
  Constant *visitUnaryOperator(UnaryOperator &I);
  Constant *visitBinaryOperator(BinaryOperator &I);
};

class FunctionSpecializer {

  /// The IPSCCP Solver.
  SCCPSolver &Solver;

  Module &M;

  /// Analysis manager, needed to invalidate analyses.
  FunctionAnalysisManager *FAM;

  /// Analyses used to help determine if a function should be specialized.
  std::function<BlockFrequencyInfo &(Function &)> GetBFI;
  std::function<const TargetLibraryInfo &(Function &)> GetTLI;
  std::function<TargetTransformInfo &(Function &)> GetTTI;
  std::function<AssumptionCache &(Function &)> GetAC;

  SmallPtrSet<Function *, 32> Specializations;
  SmallPtrSet<Function *, 32> FullySpecialized;
  DenseMap<Function *, CodeMetrics> FunctionMetrics;

public:
  FunctionSpecializer(
      SCCPSolver &Solver, Module &M, FunctionAnalysisManager *FAM,
      std::function<BlockFrequencyInfo &(Function &)> GetBFI,
      std::function<const TargetLibraryInfo &(Function &)> GetTLI,
      std::function<TargetTransformInfo &(Function &)> GetTTI,
      std::function<AssumptionCache &(Function &)> GetAC)
      : Solver(Solver), M(M), FAM(FAM), GetBFI(GetBFI), GetTLI(GetTLI),
        GetTTI(GetTTI), GetAC(GetAC) {}

  ~FunctionSpecializer();

  bool run();

  InstCostVisitor getInstCostVisitorFor(Function *F) {
    auto &BFI = GetBFI(*F);
    auto &TTI = GetTTI(*F);
    return InstCostVisitor(M.getDataLayout(), BFI, TTI, Solver);
  }

  /// Compute a bonus for replacing argument \p A with constant \p C.
  Cost getSpecializationBonus(Argument *A, Constant *C,
                              InstCostVisitor &Visitor);

private:
  Constant *getPromotableAlloca(AllocaInst *Alloca, CallInst *Call);

  /// A constant stack value is an AllocaInst that has a single constant
  /// value stored to it. Return this constant if such an alloca stack value
  /// is a function argument.
  Constant *getConstantStackValue(CallInst *Call, Value *Val);

  /// See if there are any new constant values for the callers of \p F via
  /// stack variables and promote them to global variables.
  void promoteConstantStackValues(Function *F);

  /// Clean up fully specialized functions.
  void removeDeadFunctions();

  /// Remove any ssa_copy intrinsics that may have been introduced.
  void cleanUpSSA();

  /// @brief  Find potential specialization opportunities.
  /// @param F Function to specialize
  /// @param SpecCost Cost of specializing a function. Final score is benefit
  /// minus this cost.
  /// @param AllSpecs A vector to add potential specializations to.
  /// @param SM  A map for a function's specialisation range
  /// @return True, if any potential specializations were found
  bool findSpecializations(Function *F, Cost SpecCost,
                           SmallVectorImpl<Spec> &AllSpecs, SpecMap &SM);

  bool isCandidateFunction(Function *F);

  /// @brief Create a specialization of \p F and prime the SCCPSolver
  /// @param F Function to specialize
  /// @param S Which specialization to create
  /// @return The new, cloned function
  Function *createSpecialization(Function *F, const SpecSig &S);

  /// Determine if it is possible to specialise the function for constant values
  /// of the formal parameter \p A.
  bool isArgumentInteresting(Argument *A);

  /// Check if the value \p V  (an actual argument) is a constant or can only
  /// have a constant value. Return that constant.
  Constant *getCandidateConstant(Value *V);

  /// @brief Find and update calls to \p F, which match a specialization
  /// @param F Orginal function
  /// @param Begin Start of a range of possibly matching specialisations
  /// @param End End of a range (exclusive) of possibly matching specialisations
  void updateCallSites(Function *F, const Spec *Begin, const Spec *End);
};
} // namespace llvm

#endif // LLVM_TRANSFORMS_IPO_FUNCTIONSPECIALIZATION_H
PKiwFZ�9~)��Transforms/IPO/SampleProfile.hnu�[���//===- SampleProfile.h - SamplePGO pass ---------- --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file
/// This file provides the interface for the sampled PGO loader pass.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_IPO_SAMPLEPROFILE_H
#define LLVM_TRANSFORMS_IPO_SAMPLEPROFILE_H

#include "llvm/ADT/IntrusiveRefCntPtr.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Pass.h"
#include <string>

namespace llvm {

class Module;

namespace vfs {
class FileSystem;
} // namespace vfs

/// The sample profiler data loader pass.
class SampleProfileLoaderPass : public PassInfoMixin<SampleProfileLoaderPass> {
public:
  SampleProfileLoaderPass(
      std::string File = "", std::string RemappingFile = "",
      ThinOrFullLTOPhase LTOPhase = ThinOrFullLTOPhase::None,
      IntrusiveRefCntPtr<vfs::FileSystem> FS = nullptr);

  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);

private:
  std::string ProfileFileName;
  std::string ProfileRemappingFileName;
  const ThinOrFullLTOPhase LTOPhase;
  IntrusiveRefCntPtr<vfs::FileSystem> FS;
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_IPO_SAMPLEPROFILE_H
PKiwFZ�Iغ��Transforms/IPO/GlobalSplit.hnu�[���//===- GlobalSplit.h - global variable splitter -----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This pass uses inrange annotations on GEP indices to split globals where
// beneficial. Clang currently attaches these annotations to references to
// virtual table globals under the Itanium ABI for the benefit of the
// whole-program virtual call optimization and control flow integrity passes.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_IPO_GLOBALSPLIT_H
#define LLVM_TRANSFORMS_IPO_GLOBALSPLIT_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class Module;

/// Pass to perform split of global variables.
class GlobalSplitPass : public PassInfoMixin<GlobalSplitPass> {
public:
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_IPO_GLOBALSPLIT_H
PKiwFZ��>

#Transforms/IPO/ForceFunctionAttrs.hnu�[���//===-- ForceFunctionAttrs.h - Force function attrs for debugging ---------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// Super simple passes to force specific function attrs from the commandline
/// into the IR for debugging purposes.
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_IPO_FORCEFUNCTIONATTRS_H
#define LLVM_TRANSFORMS_IPO_FORCEFUNCTIONATTRS_H

#include "llvm/IR/PassManager.h"

namespace llvm {
class Module;

/// Pass which forces specific function attributes into the IR, primarily as
/// a debugging tool.
struct ForceFunctionAttrsPass : PassInfoMixin<ForceFunctionAttrsPass> {
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &);
};

}

#endif // LLVM_TRANSFORMS_IPO_FORCEFUNCTIONATTRS_H
PKiwFZ�I�nnTransforms/IPO/StripSymbols.hnu�[���//===- StripSymbols.h - Strip symbols and debug info from a module --------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// The StripSymbols transformation implements code stripping. Specifically, it
// can delete:
//
//   * names for virtual registers
//   * symbols for internal globals and functions
//   * debug information
//
// Note that this transformation makes code much less readable, so it should
// only be used in situations where the 'strip' utility would be used, such as
// reducing code size or making it harder to reverse engineer code.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_IPO_STRIPSYMBOLS_H
#define LLVM_TRANSFORMS_IPO_STRIPSYMBOLS_H

#include "llvm/IR/PassManager.h"

namespace llvm {

struct StripSymbolsPass : PassInfoMixin<StripSymbolsPass> {
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
};

struct StripNonDebugSymbolsPass : PassInfoMixin<StripNonDebugSymbolsPass> {
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
};

struct StripDebugDeclarePass : PassInfoMixin<StripDebugDeclarePass> {
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
};

struct StripDeadDebugInfoPass : PassInfoMixin<StripDeadDebugInfoPass> {
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_IPO_STRIPSYMBOLS_H
PKiwFZt/�  Transforms/IPO/GlobalOpt.hnu�[���//===- GlobalOpt.h - Optimize Global Variables ------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This pass transforms simple global variables that never have their address
// taken.  If obviously true, it marks read/write globals as constant, deletes
// variables only stored to, etc.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_IPO_GLOBALOPT_H
#define LLVM_TRANSFORMS_IPO_GLOBALOPT_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class Module;

/// Optimize globals that never have their address taken.
class GlobalOptPass : public PassInfoMixin<GlobalOptPass> {
public:
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_IPO_GLOBALOPT_H
PKiwFZߌ��c�c�Transforms/IPO/Attributor.hnu�[���//===- Attributor.h --- Module-wide attribute deduction ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Attributor: An inter procedural (abstract) "attribute" deduction framework.
//
// The Attributor framework is an inter procedural abstract analysis (fixpoint
// iteration analysis). The goal is to allow easy deduction of new attributes as
// well as information exchange between abstract attributes in-flight.
//
// The Attributor class is the driver and the link between the various abstract
// attributes. The Attributor will iterate until a fixpoint state is reached by
// all abstract attributes in-flight, or until it will enforce a pessimistic fix
// point because an iteration limit is reached.
//
// Abstract attributes, derived from the AbstractAttribute class, actually
// describe properties of the code. They can correspond to actual LLVM-IR
// attributes, or they can be more general, ultimately unrelated to LLVM-IR
// attributes. The latter is useful when an abstract attributes provides
// information to other abstract attributes in-flight but we might not want to
// manifest the information. The Attributor allows to query in-flight abstract
// attributes through the `Attributor::getAAFor` method (see the method
// description for an example). If the method is used by an abstract attribute
// P, and it results in an abstract attribute Q, the Attributor will
// automatically capture a potential dependence from Q to P. This dependence
// will cause P to be reevaluated whenever Q changes in the future.
//
// The Attributor will only reevaluate abstract attributes that might have
// changed since the last iteration. That means that the Attribute will not
// revisit all instructions/blocks/functions in the module but only query
// an update from a subset of the abstract attributes.
//
// The update method `AbstractAttribute::updateImpl` is implemented by the
// specific "abstract attribute" subclasses. The method is invoked whenever the
// currently assumed state (see the AbstractState class) might not be valid
// anymore. This can, for example, happen if the state was dependent on another
// abstract attribute that changed. In every invocation, the update method has
// to adjust the internal state of an abstract attribute to a point that is
// justifiable by the underlying IR and the current state of abstract attributes
// in-flight. Since the IR is given and assumed to be valid, the information
// derived from it can be assumed to hold. However, information derived from
// other abstract attributes is conditional on various things. If the justifying
// state changed, the `updateImpl` has to revisit the situation and potentially
// find another justification or limit the optimistic assumes made.
//
// Change is the key in this framework. Until a state of no-change, thus a
// fixpoint, is reached, the Attributor will query the abstract attributes
// in-flight to re-evaluate their state. If the (current) state is too
// optimistic, hence it cannot be justified anymore through other abstract
// attributes or the state of the IR, the state of the abstract attribute will
// have to change. Generally, we assume abstract attribute state to be a finite
// height lattice and the update function to be monotone. However, these
// conditions are not enforced because the iteration limit will guarantee
// termination. If an optimistic fixpoint is reached, or a pessimistic fix
// point is enforced after a timeout, the abstract attributes are tasked to
// manifest their result in the IR for passes to come.
//
// Attribute manifestation is not mandatory. If desired, there is support to
// generate a single or multiple LLVM-IR attributes already in the helper struct
// IRAttribute. In the simplest case, a subclass inherits from IRAttribute with
// a proper Attribute::AttrKind as template parameter. The Attributor
// manifestation framework will then create and place a new attribute if it is
// allowed to do so (based on the abstract state). Other use cases can be
// achieved by overloading AbstractAttribute or IRAttribute methods.
//
//
// The "mechanics" of adding a new "abstract attribute":
// - Define a class (transitively) inheriting from AbstractAttribute and one
//   (which could be the same) that (transitively) inherits from AbstractState.
//   For the latter, consider the already available BooleanState and
//   {Inc,Dec,Bit}IntegerState if they fit your needs, e.g., you require only a
//   number tracking or bit-encoding.
// - Implement all pure methods. Also use overloading if the attribute is not
//   conforming with the "default" behavior: A (set of) LLVM-IR attribute(s) for
//   an argument, call site argument, function return value, or function. See
//   the class and method descriptions for more information on the two
//   "Abstract" classes and their respective methods.
// - Register opportunities for the new abstract attribute in the
//   `Attributor::identifyDefaultAbstractAttributes` method if it should be
//   counted as a 'default' attribute.
// - Add sufficient tests.
// - Add a Statistics object for bookkeeping. If it is a simple (set of)
//   attribute(s) manifested through the Attributor manifestation framework, see
//   the bookkeeping function in Attributor.cpp.
// - If instructions with a certain opcode are interesting to the attribute, add
//   that opcode to the switch in `Attributor::identifyAbstractAttributes`. This
//   will make it possible to query all those instructions through the
//   `InformationCache::getOpcodeInstMapForFunction` interface and eliminate the
//   need to traverse the IR repeatedly.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_IPO_ATTRIBUTOR_H
#define LLVM_TRANSFORMS_IPO_ATTRIBUTOR_H

#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/GraphTraits.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetOperations.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/iterator.h"
#include "llvm/Analysis/AssumeBundleQueries.h"
#include "llvm/Analysis/CFG.h"
#include "llvm/Analysis/CGSCCPassManager.h"
#include "llvm/Analysis/LazyCallGraph.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/MemoryLocation.h"
#include "llvm/Analysis/MustExecute.h"
#include "llvm/Analysis/OptimizationRemarkEmitter.h"
#include "llvm/Analysis/PostDominators.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/IR/AbstractCallSite.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/ConstantRange.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/InstIterator.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/PassManager.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/Alignment.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/DOTGraphTraits.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/ModRef.h"
#include "llvm/Support/TimeProfiler.h"
#include "llvm/TargetParser/Triple.h"
#include "llvm/Transforms/Utils/CallGraphUpdater.h"

#include <limits>
#include <map>
#include <optional>

namespace llvm {

class DataLayout;
class LLVMContext;
class Pass;
template <typename Fn> class function_ref;
struct AADepGraphNode;
struct AADepGraph;
struct Attributor;
struct AbstractAttribute;
struct InformationCache;
struct AAIsDead;
struct AttributorCallGraph;
struct IRPosition;

class Function;

/// Abstract Attribute helper functions.
namespace AA {
using InstExclusionSetTy = SmallPtrSet<Instruction *, 4>;

enum class GPUAddressSpace : unsigned {
  Generic = 0,
  Global = 1,
  Shared = 3,
  Constant = 4,
  Local = 5,
};

/// Return true iff \p M target a GPU (and we can use GPU AS reasoning).
bool isGPU(const Module &M);

/// Flags to distinguish intra-procedural queries from *potentially*
/// inter-procedural queries. Not that information can be valid for both and
/// therefore both bits might be set.
enum ValueScope : uint8_t {
  Intraprocedural = 1,
  Interprocedural = 2,
  AnyScope = Intraprocedural | Interprocedural,
};

struct ValueAndContext : public std::pair<Value *, const Instruction *> {
  using Base = std::pair<Value *, const Instruction *>;
  ValueAndContext(const Base &B) : Base(B) {}
  ValueAndContext(Value &V, const Instruction *CtxI) : Base(&V, CtxI) {}
  ValueAndContext(Value &V, const Instruction &CtxI) : Base(&V, &CtxI) {}

  Value *getValue() const { return this->first; }
  const Instruction *getCtxI() const { return this->second; }
};

/// Return true if \p I is a `nosync` instruction. Use generic reasoning and
/// potentially the corresponding AANoSync.
bool isNoSyncInst(Attributor &A, const Instruction &I,
                  const AbstractAttribute &QueryingAA);

/// Return true if \p V is dynamically unique, that is, there are no two
/// "instances" of \p V at runtime with different values.
/// Note: If \p ForAnalysisOnly is set we only check that the Attributor will
/// never use \p V to represent two "instances" not that \p V could not
/// technically represent them.
bool isDynamicallyUnique(Attributor &A, const AbstractAttribute &QueryingAA,
                         const Value &V, bool ForAnalysisOnly = true);

/// Return true if \p V is a valid value in \p Scope, that is a constant or an
/// instruction/argument of \p Scope.
bool isValidInScope(const Value &V, const Function *Scope);

/// Return true if the value of \p VAC is a valid at the position of \p VAC,
/// that is a constant, an argument of the same function, or an instruction in
/// that function that dominates the position.
bool isValidAtPosition(const ValueAndContext &VAC, InformationCache &InfoCache);

/// Try to convert \p V to type \p Ty without introducing new instructions. If
/// this is not possible return `nullptr`. Note: this function basically knows
/// how to cast various constants.
Value *getWithType(Value &V, Type &Ty);

/// Return the combination of \p A and \p B such that the result is a possible
/// value of both. \p B is potentially casted to match the type \p Ty or the
/// type of \p A if \p Ty is null.
///
/// Examples:
///        X + none  => X
/// not_none + undef => not_none
///          V1 + V2 => nullptr
std::optional<Value *>
combineOptionalValuesInAAValueLatice(const std::optional<Value *> &A,
                                     const std::optional<Value *> &B, Type *Ty);

/// Helper to represent an access offset and size, with logic to deal with
/// uncertainty and check for overlapping accesses.
struct RangeTy {
  int64_t Offset = Unassigned;
  int64_t Size = Unassigned;

  RangeTy(int64_t Offset, int64_t Size) : Offset(Offset), Size(Size) {}
  RangeTy() = default;
  static RangeTy getUnknown() { return RangeTy{Unknown, Unknown}; }

  /// Return true if offset or size are unknown.
  bool offsetOrSizeAreUnknown() const {
    return Offset == RangeTy::Unknown || Size == RangeTy::Unknown;
  }

  /// Return true if offset and size are unknown, thus this is the default
  /// unknown object.
  bool offsetAndSizeAreUnknown() const {
    return Offset == RangeTy::Unknown && Size == RangeTy::Unknown;
  }

  /// Return true if the offset and size are unassigned.
  bool isUnassigned() const {
    assert((Offset == RangeTy::Unassigned) == (Size == RangeTy::Unassigned) &&
           "Inconsistent state!");
    return Offset == RangeTy::Unassigned;
  }

  /// Return true if this offset and size pair might describe an address that
  /// overlaps with \p Range.
  bool mayOverlap(const RangeTy &Range) const {
    // Any unknown value and we are giving up -> overlap.
    if (offsetOrSizeAreUnknown() || Range.offsetOrSizeAreUnknown())
      return true;

    // Check if one offset point is in the other interval [offset,
    // offset+size].
    return Range.Offset + Range.Size > Offset && Range.Offset < Offset + Size;
  }

  RangeTy &operator&=(const RangeTy &R) {
    if (R.isUnassigned())
      return *this;
    if (isUnassigned())
      return *this = R;
    if (Offset == Unknown || R.Offset == Unknown)
      Offset = Unknown;
    if (Size == Unknown || R.Size == Unknown)
      Size = Unknown;
    if (offsetAndSizeAreUnknown())
      return *this;
    if (Offset == Unknown) {
      Size = std::max(Size, R.Size);
    } else if (Size == Unknown) {
      Offset = std::min(Offset, R.Offset);
    } else {
      Offset = std::min(Offset, R.Offset);
      Size = std::max(Offset + Size, R.Offset + R.Size) - Offset;
    }
    return *this;
  }

  /// Comparison for sorting ranges by offset.
  ///
  /// Returns true if the offset \p L is less than that of \p R.
  inline static bool OffsetLessThan(const RangeTy &L, const RangeTy &R) {
    return L.Offset < R.Offset;
  }

  /// Constants used to represent special offsets or sizes.
  /// - We cannot assume that Offsets and Size are non-negative.
  /// - The constants should not clash with DenseMapInfo, such as EmptyKey
  ///   (INT64_MAX) and TombstoneKey (INT64_MIN).
  /// We use values "in the middle" of the 64 bit range to represent these
  /// special cases.
  static constexpr int64_t Unassigned = std::numeric_limits<int32_t>::min();
  static constexpr int64_t Unknown = std::numeric_limits<int32_t>::max();
};

inline raw_ostream &operator<<(raw_ostream &OS, const RangeTy &R) {
  OS << "[" << R.Offset << ", " << R.Size << "]";
  return OS;
}

inline bool operator==(const RangeTy &A, const RangeTy &B) {
  return A.Offset == B.Offset && A.Size == B.Size;
}

inline bool operator!=(const RangeTy &A, const RangeTy &B) { return !(A == B); }

/// Return the initial value of \p Obj with type \p Ty if that is a constant.
Constant *getInitialValueForObj(Attributor &A, Value &Obj, Type &Ty,
                                const TargetLibraryInfo *TLI,
                                const DataLayout &DL,
                                RangeTy *RangePtr = nullptr);

/// Collect all potential values \p LI could read into \p PotentialValues. That
/// is, the only values read by \p LI are assumed to be known and all are in
/// \p PotentialValues. \p PotentialValueOrigins will contain all the
/// instructions that might have put a potential value into \p PotentialValues.
/// Dependences onto \p QueryingAA are properly tracked, \p
/// UsedAssumedInformation will inform the caller if assumed information was
/// used.
///
/// \returns True if the assumed potential copies are all in \p PotentialValues,
///          false if something went wrong and the copies could not be
///          determined.
bool getPotentiallyLoadedValues(
    Attributor &A, LoadInst &LI, SmallSetVector<Value *, 4> &PotentialValues,
    SmallSetVector<Instruction *, 4> &PotentialValueOrigins,
    const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation,
    bool OnlyExact = false);

/// Collect all potential values of the one stored by \p SI into
/// \p PotentialCopies. That is, the only copies that were made via the
/// store are assumed to be known and all are in \p PotentialCopies. Dependences
/// onto \p QueryingAA are properly tracked, \p UsedAssumedInformation will
/// inform the caller if assumed information was used.
///
/// \returns True if the assumed potential copies are all in \p PotentialCopies,
///          false if something went wrong and the copies could not be
///          determined.
bool getPotentialCopiesOfStoredValue(
    Attributor &A, StoreInst &SI, SmallSetVector<Value *, 4> &PotentialCopies,
    const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation,
    bool OnlyExact = false);

/// Return true if \p IRP is readonly. This will query respective AAs that
/// deduce the information and introduce dependences for \p QueryingAA.
bool isAssumedReadOnly(Attributor &A, const IRPosition &IRP,
                       const AbstractAttribute &QueryingAA, bool &IsKnown);

/// Return true if \p IRP is readnone. This will query respective AAs that
/// deduce the information and introduce dependences for \p QueryingAA.
bool isAssumedReadNone(Attributor &A, const IRPosition &IRP,
                       const AbstractAttribute &QueryingAA, bool &IsKnown);

/// Return true if \p ToI is potentially reachable from \p FromI without running
/// into any instruction in \p ExclusionSet The two instructions do not need to
/// be in the same function. \p GoBackwardsCB can be provided to convey domain
/// knowledge about the "lifespan" the user is interested in. By default, the
/// callers of \p FromI are checked as well to determine if \p ToI can be
/// reached. If the query is not interested in callers beyond a certain point,
/// e.g., a GPU kernel entry or the function containing an alloca, the
/// \p GoBackwardsCB should return false.
bool isPotentiallyReachable(
    Attributor &A, const Instruction &FromI, const Instruction &ToI,
    const AbstractAttribute &QueryingAA,
    const AA::InstExclusionSetTy *ExclusionSet = nullptr,
    std::function<bool(const Function &F)> GoBackwardsCB = nullptr);

/// Same as above but it is sufficient to reach any instruction in \p ToFn.
bool isPotentiallyReachable(
    Attributor &A, const Instruction &FromI, const Function &ToFn,
    const AbstractAttribute &QueryingAA,
    const AA::InstExclusionSetTy *ExclusionSet = nullptr,
    std::function<bool(const Function &F)> GoBackwardsCB = nullptr);

/// Return true if \p Obj is assumed to be a thread local object.
bool isAssumedThreadLocalObject(Attributor &A, Value &Obj,
                                const AbstractAttribute &QueryingAA);

/// Return true if \p I is potentially affected by a barrier.
bool isPotentiallyAffectedByBarrier(Attributor &A, const Instruction &I,
                                    const AbstractAttribute &QueryingAA);
bool isPotentiallyAffectedByBarrier(Attributor &A, ArrayRef<const Value *> Ptrs,
                                    const AbstractAttribute &QueryingAA,
                                    const Instruction *CtxI);
} // namespace AA

template <>
struct DenseMapInfo<AA::ValueAndContext>
    : public DenseMapInfo<AA::ValueAndContext::Base> {
  using Base = DenseMapInfo<AA::ValueAndContext::Base>;
  static inline AA::ValueAndContext getEmptyKey() {
    return Base::getEmptyKey();
  }
  static inline AA::ValueAndContext getTombstoneKey() {
    return Base::getTombstoneKey();
  }
  static unsigned getHashValue(const AA::ValueAndContext &VAC) {
    return Base::getHashValue(VAC);
  }

  static bool isEqual(const AA::ValueAndContext &LHS,
                      const AA::ValueAndContext &RHS) {
    return Base::isEqual(LHS, RHS);
  }
};

template <>
struct DenseMapInfo<AA::ValueScope> : public DenseMapInfo<unsigned char> {
  using Base = DenseMapInfo<unsigned char>;
  static inline AA::ValueScope getEmptyKey() {
    return AA::ValueScope(Base::getEmptyKey());
  }
  static inline AA::ValueScope getTombstoneKey() {
    return AA::ValueScope(Base::getTombstoneKey());
  }
  static unsigned getHashValue(const AA::ValueScope &S) {
    return Base::getHashValue(S);
  }

  static bool isEqual(const AA::ValueScope &LHS, const AA::ValueScope &RHS) {
    return Base::isEqual(LHS, RHS);
  }
};

template <>
struct DenseMapInfo<const AA::InstExclusionSetTy *>
    : public DenseMapInfo<void *> {
  using super = DenseMapInfo<void *>;
  static inline const AA::InstExclusionSetTy *getEmptyKey() {
    return static_cast<const AA::InstExclusionSetTy *>(super::getEmptyKey());
  }
  static inline const AA::InstExclusionSetTy *getTombstoneKey() {
    return static_cast<const AA::InstExclusionSetTy *>(
        super::getTombstoneKey());
  }
  static unsigned getHashValue(const AA::InstExclusionSetTy *BES) {
    unsigned H = 0;
    if (BES)
      for (const auto *II : *BES)
        H += DenseMapInfo<const Instruction *>::getHashValue(II);
    return H;
  }
  static bool isEqual(const AA::InstExclusionSetTy *LHS,
                      const AA::InstExclusionSetTy *RHS) {
    if (LHS == RHS)
      return true;
    if (LHS == getEmptyKey() || RHS == getEmptyKey() ||
        LHS == getTombstoneKey() || RHS == getTombstoneKey())
      return false;
    auto SizeLHS = LHS ? LHS->size() : 0;
    auto SizeRHS = RHS ? RHS->size() : 0;
    if (SizeLHS != SizeRHS)
      return false;
    if (SizeRHS == 0)
      return true;
    return llvm::set_is_subset(*LHS, *RHS);
  }
};

/// The value passed to the line option that defines the maximal initialization
/// chain length.
extern unsigned MaxInitializationChainLength;

///{
enum class ChangeStatus {
  CHANGED,
  UNCHANGED,
};

ChangeStatus operator|(ChangeStatus l, ChangeStatus r);
ChangeStatus &operator|=(ChangeStatus &l, ChangeStatus r);
ChangeStatus operator&(ChangeStatus l, ChangeStatus r);
ChangeStatus &operator&=(ChangeStatus &l, ChangeStatus r);

enum class DepClassTy {
  REQUIRED, ///< The target cannot be valid if the source is not.
  OPTIONAL, ///< The target may be valid if the source is not.
  NONE,     ///< Do not track a dependence between source and target.
};
///}

/// The data structure for the nodes of a dependency graph
struct AADepGraphNode {
public:
  virtual ~AADepGraphNode() = default;
  using DepTy = PointerIntPair<AADepGraphNode *, 1>;
  using DepSetTy = SmallSetVector<DepTy, 2>;

protected:
  /// Set of dependency graph nodes which should be updated if this one
  /// is updated. The bit encodes if it is optional.
  DepSetTy Deps;

  static AADepGraphNode *DepGetVal(const DepTy &DT) { return DT.getPointer(); }
  static AbstractAttribute *DepGetValAA(const DepTy &DT) {
    return cast<AbstractAttribute>(DT.getPointer());
  }

  operator AbstractAttribute *() { return cast<AbstractAttribute>(this); }

public:
  using iterator = mapped_iterator<DepSetTy::iterator, decltype(&DepGetVal)>;
  using aaiterator =
      mapped_iterator<DepSetTy::iterator, decltype(&DepGetValAA)>;

  aaiterator begin() { return aaiterator(Deps.begin(), &DepGetValAA); }
  aaiterator end() { return aaiterator(Deps.end(), &DepGetValAA); }
  iterator child_begin() { return iterator(Deps.begin(), &DepGetVal); }
  iterator child_end() { return iterator(Deps.end(), &DepGetVal); }

  void print(raw_ostream &OS) const { print(nullptr, OS); }
  virtual void print(Attributor *, raw_ostream &OS) const {
    OS << "AADepNode Impl\n";
  }
  DepSetTy &getDeps() { return Deps; }

  friend struct Attributor;
  friend struct AADepGraph;
};

/// The data structure for the dependency graph
///
/// Note that in this graph if there is an edge from A to B (A -> B),
/// then it means that B depends on A, and when the state of A is
/// updated, node B should also be updated
struct AADepGraph {
  AADepGraph() = default;
  ~AADepGraph() = default;

  using DepTy = AADepGraphNode::DepTy;
  static AADepGraphNode *DepGetVal(const DepTy &DT) { return DT.getPointer(); }
  using iterator =
      mapped_iterator<AADepGraphNode::DepSetTy::iterator, decltype(&DepGetVal)>;

  /// There is no root node for the dependency graph. But the SCCIterator
  /// requires a single entry point, so we maintain a fake("synthetic") root
  /// node that depends on every node.
  AADepGraphNode SyntheticRoot;
  AADepGraphNode *GetEntryNode() { return &SyntheticRoot; }

  iterator begin() { return SyntheticRoot.child_begin(); }
  iterator end() { return SyntheticRoot.child_end(); }

  void viewGraph();

  /// Dump graph to file
  void dumpGraph();

  /// Print dependency graph
  void print();
};

/// Helper to describe and deal with positions in the LLVM-IR.
///
/// A position in the IR is described by an anchor value and an "offset" that
/// could be the argument number, for call sites and arguments, or an indicator
/// of the "position kind". The kinds, specified in the Kind enum below, include
/// the locations in the attribute list, i.a., function scope and return value,
/// as well as a distinction between call sites and functions. Finally, there
/// are floating values that do not have a corresponding attribute list
/// position.
struct IRPosition {
  // NOTE: In the future this definition can be changed to support recursive
  // functions.
  using CallBaseContext = CallBase;

  /// The positions we distinguish in the IR.
  enum Kind : char {
    IRP_INVALID,  ///< An invalid position.
    IRP_FLOAT,    ///< A position that is not associated with a spot suitable
                  ///< for attributes. This could be any value or instruction.
    IRP_RETURNED, ///< An attribute for the function return value.
    IRP_CALL_SITE_RETURNED, ///< An attribute for a call site return value.
    IRP_FUNCTION,           ///< An attribute for a function (scope).
    IRP_CALL_SITE,          ///< An attribute for a call site (function scope).
    IRP_ARGUMENT,           ///< An attribute for a function argument.
    IRP_CALL_SITE_ARGUMENT, ///< An attribute for a call site argument.
  };

  /// Default constructor available to create invalid positions implicitly. All
  /// other positions need to be created explicitly through the appropriate
  /// static member function.
  IRPosition() : Enc(nullptr, ENC_VALUE) { verify(); }

  /// Create a position describing the value of \p V.
  static const IRPosition value(const Value &V,
                                const CallBaseContext *CBContext = nullptr) {
    if (auto *Arg = dyn_cast<Argument>(&V))
      return IRPosition::argument(*Arg, CBContext);
    if (auto *CB = dyn_cast<CallBase>(&V))
      return IRPosition::callsite_returned(*CB);
    return IRPosition(const_cast<Value &>(V), IRP_FLOAT, CBContext);
  }

  /// Create a position describing the instruction \p I. This is different from
  /// the value version because call sites are treated as intrusctions rather
  /// than their return value in this function.
  static const IRPosition inst(const Instruction &I,
                               const CallBaseContext *CBContext = nullptr) {
    return IRPosition(const_cast<Instruction &>(I), IRP_FLOAT, CBContext);
  }

  /// Create a position describing the function scope of \p F.
  /// \p CBContext is used for call base specific analysis.
  static const IRPosition function(const Function &F,
                                   const CallBaseContext *CBContext = nullptr) {
    return IRPosition(const_cast<Function &>(F), IRP_FUNCTION, CBContext);
  }

  /// Create a position describing the returned value of \p F.
  /// \p CBContext is used for call base specific analysis.
  static const IRPosition returned(const Function &F,
                                   const CallBaseContext *CBContext = nullptr) {
    return IRPosition(const_cast<Function &>(F), IRP_RETURNED, CBContext);
  }

  /// Create a position describing the argument \p Arg.
  /// \p CBContext is used for call base specific analysis.
  static const IRPosition argument(const Argument &Arg,
                                   const CallBaseContext *CBContext = nullptr) {
    return IRPosition(const_cast<Argument &>(Arg), IRP_ARGUMENT, CBContext);
  }

  /// Create a position describing the function scope of \p CB.
  static const IRPosition callsite_function(const CallBase &CB) {
    return IRPosition(const_cast<CallBase &>(CB), IRP_CALL_SITE);
  }

  /// Create a position describing the returned value of \p CB.
  static const IRPosition callsite_returned(const CallBase &CB) {
    return IRPosition(const_cast<CallBase &>(CB), IRP_CALL_SITE_RETURNED);
  }

  /// Create a position describing the argument of \p CB at position \p ArgNo.
  static const IRPosition callsite_argument(const CallBase &CB,
                                            unsigned ArgNo) {
    return IRPosition(const_cast<Use &>(CB.getArgOperandUse(ArgNo)),
                      IRP_CALL_SITE_ARGUMENT);
  }

  /// Create a position describing the argument of \p ACS at position \p ArgNo.
  static const IRPosition callsite_argument(AbstractCallSite ACS,
                                            unsigned ArgNo) {
    if (ACS.getNumArgOperands() <= ArgNo)
      return IRPosition();
    int CSArgNo = ACS.getCallArgOperandNo(ArgNo);
    if (CSArgNo >= 0)
      return IRPosition::callsite_argument(
          cast<CallBase>(*ACS.getInstruction()), CSArgNo);
    return IRPosition();
  }

  /// Create a position with function scope matching the "context" of \p IRP.
  /// If \p IRP is a call site (see isAnyCallSitePosition()) then the result
  /// will be a call site position, otherwise the function position of the
  /// associated function.
  static const IRPosition
  function_scope(const IRPosition &IRP,
                 const CallBaseContext *CBContext = nullptr) {
    if (IRP.isAnyCallSitePosition()) {
      return IRPosition::callsite_function(
          cast<CallBase>(IRP.getAnchorValue()));
    }
    assert(IRP.getAssociatedFunction());
    return IRPosition::function(*IRP.getAssociatedFunction(), CBContext);
  }

  bool operator==(const IRPosition &RHS) const {
    return Enc == RHS.Enc && RHS.CBContext == CBContext;
  }
  bool operator!=(const IRPosition &RHS) const { return !(*this == RHS); }

  /// Return the value this abstract attribute is anchored with.
  ///
  /// The anchor value might not be the associated value if the latter is not
  /// sufficient to determine where arguments will be manifested. This is, so
  /// far, only the case for call site arguments as the value is not sufficient
  /// to pinpoint them. Instead, we can use the call site as an anchor.
  Value &getAnchorValue() const {
    switch (getEncodingBits()) {
    case ENC_VALUE:
    case ENC_RETURNED_VALUE:
    case ENC_FLOATING_FUNCTION:
      return *getAsValuePtr();
    case ENC_CALL_SITE_ARGUMENT_USE:
      return *(getAsUsePtr()->getUser());
    default:
      llvm_unreachable("Unkown encoding!");
    };
  }

  /// Return the associated function, if any.
  Function *getAssociatedFunction() const {
    if (auto *CB = dyn_cast<CallBase>(&getAnchorValue())) {
      // We reuse the logic that associates callback calles to arguments of a
      // call site here to identify the callback callee as the associated
      // function.
      if (Argument *Arg = getAssociatedArgument())
        return Arg->getParent();
      return dyn_cast_if_present<Function>(
          CB->getCalledOperand()->stripPointerCasts());
    }
    return getAnchorScope();
  }

  /// Return the associated argument, if any.
  Argument *getAssociatedArgument() const;

  /// Return true if the position refers to a function interface, that is the
  /// function scope, the function return, or an argument.
  bool isFnInterfaceKind() const {
    switch (getPositionKind()) {
    case IRPosition::IRP_FUNCTION:
    case IRPosition::IRP_RETURNED:
    case IRPosition::IRP_ARGUMENT:
      return true;
    default:
      return false;
    }
  }

  /// Return true if this is a function or call site position.
  bool isFunctionScope() const {
    switch (getPositionKind()) {
    case IRPosition::IRP_CALL_SITE:
    case IRPosition::IRP_FUNCTION:
      return true;
    default:
      return false;
    };
  }

  /// Return the Function surrounding the anchor value.
  Function *getAnchorScope() const {
    Value &V = getAnchorValue();
    if (isa<Function>(V))
      return &cast<Function>(V);
    if (isa<Argument>(V))
      return cast<Argument>(V).getParent();
    if (isa<Instruction>(V))
      return cast<Instruction>(V).getFunction();
    return nullptr;
  }

  /// Return the context instruction, if any.
  Instruction *getCtxI() const {
    Value &V = getAnchorValue();
    if (auto *I = dyn_cast<Instruction>(&V))
      return I;
    if (auto *Arg = dyn_cast<Argument>(&V))
      if (!Arg->getParent()->isDeclaration())
        return &Arg->getParent()->getEntryBlock().front();
    if (auto *F = dyn_cast<Function>(&V))
      if (!F->isDeclaration())
        return &(F->getEntryBlock().front());
    return nullptr;
  }

  /// Return the value this abstract attribute is associated with.
  Value &getAssociatedValue() const {
    if (getCallSiteArgNo() < 0 || isa<Argument>(&getAnchorValue()))
      return getAnchorValue();
    assert(isa<CallBase>(&getAnchorValue()) && "Expected a call base!");
    return *cast<CallBase>(&getAnchorValue())
                ->getArgOperand(getCallSiteArgNo());
  }

  /// Return the type this abstract attribute is associated with.
  Type *getAssociatedType() const {
    if (getPositionKind() == IRPosition::IRP_RETURNED)
      return getAssociatedFunction()->getReturnType();
    return getAssociatedValue().getType();
  }

  /// Return the callee argument number of the associated value if it is an
  /// argument or call site argument, otherwise a negative value. In contrast to
  /// `getCallSiteArgNo` this method will always return the "argument number"
  /// from the perspective of the callee. This may not the same as the call site
  /// if this is a callback call.
  int getCalleeArgNo() const {
    return getArgNo(/* CallbackCalleeArgIfApplicable */ true);
  }

  /// Return the call site argument number of the associated value if it is an
  /// argument or call site argument, otherwise a negative value. In contrast to
  /// `getCalleArgNo` this method will always return the "operand number" from
  /// the perspective of the call site. This may not the same as the callee
  /// perspective if this is a callback call.
  int getCallSiteArgNo() const {
    return getArgNo(/* CallbackCalleeArgIfApplicable */ false);
  }

  /// Return the index in the attribute list for this position.
  unsigned getAttrIdx() const {
    switch (getPositionKind()) {
    case IRPosition::IRP_INVALID:
    case IRPosition::IRP_FLOAT:
      break;
    case IRPosition::IRP_FUNCTION:
    case IRPosition::IRP_CALL_SITE:
      return AttributeList::FunctionIndex;
    case IRPosition::IRP_RETURNED:
    case IRPosition::IRP_CALL_SITE_RETURNED:
      return AttributeList::ReturnIndex;
    case IRPosition::IRP_ARGUMENT:
      return getCalleeArgNo() + AttributeList::FirstArgIndex;
    case IRPosition::IRP_CALL_SITE_ARGUMENT:
      return getCallSiteArgNo() + AttributeList::FirstArgIndex;
    }
    llvm_unreachable(
        "There is no attribute index for a floating or invalid position!");
  }

  /// Return the value attributes are attached to.
  Value *getAttrListAnchor() const {
    if (auto *CB = dyn_cast<CallBase>(&getAnchorValue()))
      return CB;
    return getAssociatedFunction();
  }

  /// Return the attributes associated with this function or call site scope.
  AttributeList getAttrList() const {
    if (auto *CB = dyn_cast<CallBase>(&getAnchorValue()))
      return CB->getAttributes();
    return getAssociatedFunction()->getAttributes();
  }

  /// Update the attributes associated with this function or call site scope.
  void setAttrList(const AttributeList &AttrList) const {
    if (auto *CB = dyn_cast<CallBase>(&getAnchorValue()))
      return CB->setAttributes(AttrList);
    return getAssociatedFunction()->setAttributes(AttrList);
  }

  /// Return the number of arguments associated with this function or call site
  /// scope.
  unsigned getNumArgs() const {
    assert((getPositionKind() == IRP_CALL_SITE ||
            getPositionKind() == IRP_FUNCTION) &&
           "Only valid for function/call site positions!");
    if (auto *CB = dyn_cast<CallBase>(&getAnchorValue()))
      return CB->arg_size();
    return getAssociatedFunction()->arg_size();
  }

  /// Return theargument \p ArgNo associated with this function or call site
  /// scope.
  Value *getArg(unsigned ArgNo) const {
    assert((getPositionKind() == IRP_CALL_SITE ||
            getPositionKind() == IRP_FUNCTION) &&
           "Only valid for function/call site positions!");
    if (auto *CB = dyn_cast<CallBase>(&getAnchorValue()))
      return CB->getArgOperand(ArgNo);
    return getAssociatedFunction()->getArg(ArgNo);
  }

  /// Return the associated position kind.
  Kind getPositionKind() const {
    char EncodingBits = getEncodingBits();
    if (EncodingBits == ENC_CALL_SITE_ARGUMENT_USE)
      return IRP_CALL_SITE_ARGUMENT;
    if (EncodingBits == ENC_FLOATING_FUNCTION)
      return IRP_FLOAT;

    Value *V = getAsValuePtr();
    if (!V)
      return IRP_INVALID;
    if (isa<Argument>(V))
      return IRP_ARGUMENT;
    if (isa<Function>(V))
      return isReturnPosition(EncodingBits) ? IRP_RETURNED : IRP_FUNCTION;
    if (isa<CallBase>(V))
      return isReturnPosition(EncodingBits) ? IRP_CALL_SITE_RETURNED
                                            : IRP_CALL_SITE;
    return IRP_FLOAT;
  }

  bool isAnyCallSitePosition() const {
    switch (getPositionKind()) {
    case IRPosition::IRP_CALL_SITE:
    case IRPosition::IRP_CALL_SITE_RETURNED:
    case IRPosition::IRP_CALL_SITE_ARGUMENT:
      return true;
    default:
      return false;
    }
  }

  /// Return true if the position is an argument or call site argument.
  bool isArgumentPosition() const {
    switch (getPositionKind()) {
    case IRPosition::IRP_ARGUMENT:
    case IRPosition::IRP_CALL_SITE_ARGUMENT:
      return true;
    default:
      return false;
    }
  }

  /// Return the same position without the call base context.
  IRPosition stripCallBaseContext() const {
    IRPosition Result = *this;
    Result.CBContext = nullptr;
    return Result;
  }

  /// Get the call base context from the position.
  const CallBaseContext *getCallBaseContext() const { return CBContext; }

  /// Check if the position has any call base context.
  bool hasCallBaseContext() const { return CBContext != nullptr; }

  /// Special DenseMap key values.
  ///
  ///{
  static const IRPosition EmptyKey;
  static const IRPosition TombstoneKey;
  ///}

  /// Conversion into a void * to allow reuse of pointer hashing.
  operator void *() const { return Enc.getOpaqueValue(); }

private:
  /// Private constructor for special values only!
  explicit IRPosition(void *Ptr, const CallBaseContext *CBContext = nullptr)
      : CBContext(CBContext) {
    Enc.setFromOpaqueValue(Ptr);
  }

  /// IRPosition anchored at \p AnchorVal with kind/argument numbet \p PK.
  explicit IRPosition(Value &AnchorVal, Kind PK,
                      const CallBaseContext *CBContext = nullptr)
      : CBContext(CBContext) {
    switch (PK) {
    case IRPosition::IRP_INVALID:
      llvm_unreachable("Cannot create invalid IRP with an anchor value!");
      break;
    case IRPosition::IRP_FLOAT:
      // Special case for floating functions.
      if (isa<Function>(AnchorVal) || isa<CallBase>(AnchorVal))
        Enc = {&AnchorVal, ENC_FLOATING_FUNCTION};
      else
        Enc = {&AnchorVal, ENC_VALUE};
      break;
    case IRPosition::IRP_FUNCTION:
    case IRPosition::IRP_CALL_SITE:
      Enc = {&AnchorVal, ENC_VALUE};
      break;
    case IRPosition::IRP_RETURNED:
    case IRPosition::IRP_CALL_SITE_RETURNED:
      Enc = {&AnchorVal, ENC_RETURNED_VALUE};
      break;
    case IRPosition::IRP_ARGUMENT:
      Enc = {&AnchorVal, ENC_VALUE};
      break;
    case IRPosition::IRP_CALL_SITE_ARGUMENT:
      llvm_unreachable(
          "Cannot create call site argument IRP with an anchor value!");
      break;
    }
    verify();
  }

  /// Return the callee argument number of the associated value if it is an
  /// argument or call site argument. See also `getCalleeArgNo` and
  /// `getCallSiteArgNo`.
  int getArgNo(bool CallbackCalleeArgIfApplicable) const {
    if (CallbackCalleeArgIfApplicable)
      if (Argument *Arg = getAssociatedArgument())
        return Arg->getArgNo();
    switch (getPositionKind()) {
    case IRPosition::IRP_ARGUMENT:
      return cast<Argument>(getAsValuePtr())->getArgNo();
    case IRPosition::IRP_CALL_SITE_ARGUMENT: {
      Use &U = *getAsUsePtr();
      return cast<CallBase>(U.getUser())->getArgOperandNo(&U);
    }
    default:
      return -1;
    }
  }

  /// IRPosition for the use \p U. The position kind \p PK needs to be
  /// IRP_CALL_SITE_ARGUMENT, the anchor value is the user, the associated value
  /// the used value.
  explicit IRPosition(Use &U, Kind PK) {
    assert(PK == IRP_CALL_SITE_ARGUMENT &&
           "Use constructor is for call site arguments only!");
    Enc = {&U, ENC_CALL_SITE_ARGUMENT_USE};
    verify();
  }

  /// Verify internal invariants.
  void verify();

  /// Return the underlying pointer as Value *, valid for all positions but
  /// IRP_CALL_SITE_ARGUMENT.
  Value *getAsValuePtr() const {
    assert(getEncodingBits() != ENC_CALL_SITE_ARGUMENT_USE &&
           "Not a value pointer!");
    return reinterpret_cast<Value *>(Enc.getPointer());
  }

  /// Return the underlying pointer as Use *, valid only for
  /// IRP_CALL_SITE_ARGUMENT positions.
  Use *getAsUsePtr() const {
    assert(getEncodingBits() == ENC_CALL_SITE_ARGUMENT_USE &&
           "Not a value pointer!");
    return reinterpret_cast<Use *>(Enc.getPointer());
  }

  /// Return true if \p EncodingBits describe a returned or call site returned
  /// position.
  static bool isReturnPosition(char EncodingBits) {
    return EncodingBits == ENC_RETURNED_VALUE;
  }

  /// Return true if the encoding bits describe a returned or call site returned
  /// position.
  bool isReturnPosition() const { return isReturnPosition(getEncodingBits()); }

  /// The encoding of the IRPosition is a combination of a pointer and two
  /// encoding bits. The values of the encoding bits are defined in the enum
  /// below. The pointer is either a Value* (for the first three encoding bit
  /// combinations) or Use* (for ENC_CALL_SITE_ARGUMENT_USE).
  ///
  ///{
  enum {
    ENC_VALUE = 0b00,
    ENC_RETURNED_VALUE = 0b01,
    ENC_FLOATING_FUNCTION = 0b10,
    ENC_CALL_SITE_ARGUMENT_USE = 0b11,
  };

  // Reserve the maximal amount of bits so there is no need to mask out the
  // remaining ones. We will not encode anything else in the pointer anyway.
  static constexpr int NumEncodingBits =
      PointerLikeTypeTraits<void *>::NumLowBitsAvailable;
  static_assert(NumEncodingBits >= 2, "At least two bits are required!");

  /// The pointer with the encoding bits.
  PointerIntPair<void *, NumEncodingBits, char> Enc;
  ///}

  /// Call base context. Used for callsite specific analysis.
  const CallBaseContext *CBContext = nullptr;

  /// Return the encoding bits.
  char getEncodingBits() const { return Enc.getInt(); }
};

/// Helper that allows IRPosition as a key in a DenseMap.
template <> struct DenseMapInfo<IRPosition> {
  static inline IRPosition getEmptyKey() { return IRPosition::EmptyKey; }
  static inline IRPosition getTombstoneKey() {
    return IRPosition::TombstoneKey;
  }
  static unsigned getHashValue(const IRPosition &IRP) {
    return (DenseMapInfo<void *>::getHashValue(IRP) << 4) ^
           (DenseMapInfo<Value *>::getHashValue(IRP.getCallBaseContext()));
  }

  static bool isEqual(const IRPosition &a, const IRPosition &b) {
    return a == b;
  }
};

/// A visitor class for IR positions.
///
/// Given a position P, the SubsumingPositionIterator allows to visit "subsuming
/// positions" wrt. attributes/information. Thus, if a piece of information
/// holds for a subsuming position, it also holds for the position P.
///
/// The subsuming positions always include the initial position and then,
/// depending on the position kind, additionally the following ones:
/// - for IRP_RETURNED:
///   - the function (IRP_FUNCTION)
/// - for IRP_ARGUMENT:
///   - the function (IRP_FUNCTION)
/// - for IRP_CALL_SITE:
///   - the callee (IRP_FUNCTION), if known
/// - for IRP_CALL_SITE_RETURNED:
///   - the callee (IRP_RETURNED), if known
///   - the call site (IRP_FUNCTION)
///   - the callee (IRP_FUNCTION), if known
/// - for IRP_CALL_SITE_ARGUMENT:
///   - the argument of the callee (IRP_ARGUMENT), if known
///   - the callee (IRP_FUNCTION), if known
///   - the position the call site argument is associated with if it is not
///     anchored to the call site, e.g., if it is an argument then the argument
///     (IRP_ARGUMENT)
class SubsumingPositionIterator {
  SmallVector<IRPosition, 4> IRPositions;
  using iterator = decltype(IRPositions)::iterator;

public:
  SubsumingPositionIterator(const IRPosition &IRP);
  iterator begin() { return IRPositions.begin(); }
  iterator end() { return IRPositions.end(); }
};

/// Wrapper for FunctionAnalysisManager.
struct AnalysisGetter {
  // The client may be running the old pass manager, in which case, we need to
  // map the requested Analysis to its equivalent wrapper in the old pass
  // manager. The scheme implemented here does not require every Analysis to be
  // updated. Only those new analyses that the client cares about in the old
  // pass manager need to expose a LegacyWrapper type, and that wrapper should
  // support a getResult() method that matches the new Analysis.
  //
  // We need SFINAE to check for the LegacyWrapper, but function templates don't
  // allow partial specialization, which is needed in this case. So instead, we
  // use a constexpr bool to perform the SFINAE, and then use this information
  // inside the function template.
  template <typename, typename = void>
  static constexpr bool HasLegacyWrapper = false;

  template <typename Analysis>
  typename Analysis::Result *getAnalysis(const Function &F,
                                         bool RequestCachedOnly = false) {
    if (!LegacyPass && !FAM)
      return nullptr;
    if (FAM) {
      if (CachedOnly || RequestCachedOnly)
        return FAM->getCachedResult<Analysis>(const_cast<Function &>(F));
      return &FAM->getResult<Analysis>(const_cast<Function &>(F));
    }
    if constexpr (HasLegacyWrapper<Analysis>) {
      if (!CachedOnly && !RequestCachedOnly)
        return &LegacyPass
                    ->getAnalysis<typename Analysis::LegacyWrapper>(
                        const_cast<Function &>(F))
                    .getResult();
      if (auto *P =
              LegacyPass
                  ->getAnalysisIfAvailable<typename Analysis::LegacyWrapper>())
        return &P->getResult();
    }
    return nullptr;
  }

  AnalysisGetter(FunctionAnalysisManager &FAM, bool CachedOnly = false)
      : FAM(&FAM), CachedOnly(CachedOnly) {}
  AnalysisGetter(Pass *P, bool CachedOnly = false)
      : LegacyPass(P), CachedOnly(CachedOnly) {}
  AnalysisGetter() = default;

private:
  FunctionAnalysisManager *FAM = nullptr;
  Pass *LegacyPass = nullptr;

  /// If \p CachedOnly is true, no pass is created, just existing results are
  /// used. Also available per request.
  bool CachedOnly = false;
};

template <typename Analysis>
constexpr bool AnalysisGetter::HasLegacyWrapper<
    Analysis, std::void_t<typename Analysis::LegacyWrapper>> = true;

/// Data structure to hold cached (LLVM-IR) information.
///
/// All attributes are given an InformationCache object at creation time to
/// avoid inspection of the IR by all of them individually. This default
/// InformationCache will hold information required by 'default' attributes,
/// thus the ones deduced when Attributor::identifyDefaultAbstractAttributes(..)
/// is called.
///
/// If custom abstract attributes, registered manually through
/// Attributor::registerAA(...), need more information, especially if it is not
/// reusable, it is advised to inherit from the InformationCache and cast the
/// instance down in the abstract attributes.
struct InformationCache {
  InformationCache(const Module &M, AnalysisGetter &AG,
                   BumpPtrAllocator &Allocator, SetVector<Function *> *CGSCC,
                   bool UseExplorer = true)
      : CGSCC(CGSCC), DL(M.getDataLayout()), Allocator(Allocator), AG(AG),
        TargetTriple(M.getTargetTriple()) {
    if (UseExplorer)
      Explorer = new (Allocator) MustBeExecutedContextExplorer(
          /* ExploreInterBlock */ true, /* ExploreCFGForward */ true,
          /* ExploreCFGBackward */ true,
          /* LIGetter */
          [&](const Function &F) { return AG.getAnalysis<LoopAnalysis>(F); },
          /* DTGetter */
          [&](const Function &F) {
            return AG.getAnalysis<DominatorTreeAnalysis>(F);
          },
          /* PDTGetter */
          [&](const Function &F) {
            return AG.getAnalysis<PostDominatorTreeAnalysis>(F);
          });
  }

  ~InformationCache() {
    // The FunctionInfo objects are allocated via a BumpPtrAllocator, we call
    // the destructor manually.
    for (auto &It : FuncInfoMap)
      It.getSecond()->~FunctionInfo();
    // Same is true for the instruction exclusions sets.
    using AA::InstExclusionSetTy;
    for (auto *BES : BESets)
      BES->~InstExclusionSetTy();
    if (Explorer)
      Explorer->~MustBeExecutedContextExplorer();
  }

  /// Apply \p CB to all uses of \p F. If \p LookThroughConstantExprUses is
  /// true, constant expression users are not given to \p CB but their uses are
  /// traversed transitively.
  template <typename CBTy>
  static void foreachUse(Function &F, CBTy CB,
                         bool LookThroughConstantExprUses = true) {
    SmallVector<Use *, 8> Worklist(make_pointer_range(F.uses()));

    for (unsigned Idx = 0; Idx < Worklist.size(); ++Idx) {
      Use &U = *Worklist[Idx];

      // Allow use in constant bitcasts and simply look through them.
      if (LookThroughConstantExprUses && isa<ConstantExpr>(U.getUser())) {
        for (Use &CEU : cast<ConstantExpr>(U.getUser())->uses())
          Worklist.push_back(&CEU);
        continue;
      }

      CB(U);
    }
  }

  /// The CG-SCC the pass is run on, or nullptr if it is a module pass.
  const SetVector<Function *> *const CGSCC = nullptr;

  /// A vector type to hold instructions.
  using InstructionVectorTy = SmallVector<Instruction *, 8>;

  /// A map type from opcodes to instructions with this opcode.
  using OpcodeInstMapTy = DenseMap<unsigned, InstructionVectorTy *>;

  /// Return the map that relates "interesting" opcodes with all instructions
  /// with that opcode in \p F.
  OpcodeInstMapTy &getOpcodeInstMapForFunction(const Function &F) {
    return getFunctionInfo(F).OpcodeInstMap;
  }

  /// Return the instructions in \p F that may read or write memory.
  InstructionVectorTy &getReadOrWriteInstsForFunction(const Function &F) {
    return getFunctionInfo(F).RWInsts;
  }

  /// Return MustBeExecutedContextExplorer
  MustBeExecutedContextExplorer *getMustBeExecutedContextExplorer() {
    return Explorer;
  }

  /// Return TargetLibraryInfo for function \p F.
  TargetLibraryInfo *getTargetLibraryInfoForFunction(const Function &F) {
    return AG.getAnalysis<TargetLibraryAnalysis>(F);
  }

  /// Return true if \p Arg is involved in a must-tail call, thus the argument
  /// of the caller or callee.
  bool isInvolvedInMustTailCall(const Argument &Arg) {
    FunctionInfo &FI = getFunctionInfo(*Arg.getParent());
    return FI.CalledViaMustTail || FI.ContainsMustTailCall;
  }

  bool isOnlyUsedByAssume(const Instruction &I) const {
    return AssumeOnlyValues.contains(&I);
  }

  /// Return the analysis result from a pass \p AP for function \p F.
  template <typename AP>
  typename AP::Result *getAnalysisResultForFunction(const Function &F,
                                                    bool CachedOnly = false) {
    return AG.getAnalysis<AP>(F, CachedOnly);
  }

  /// Return datalayout used in the module.
  const DataLayout &getDL() { return DL; }

  /// Return the map conaining all the knowledge we have from `llvm.assume`s.
  const RetainedKnowledgeMap &getKnowledgeMap() const { return KnowledgeMap; }

  /// Given \p BES, return a uniqued version.
  const AA::InstExclusionSetTy *
  getOrCreateUniqueBlockExecutionSet(const AA::InstExclusionSetTy *BES) {
    auto It = BESets.find(BES);
    if (It != BESets.end())
      return *It;
    auto *UniqueBES = new (Allocator) AA::InstExclusionSetTy(*BES);
    bool Success = BESets.insert(UniqueBES).second;
    (void)Success;
    assert(Success && "Expected only new entries to be added");
    return UniqueBES;
  }

  /// Return true if the stack (llvm::Alloca) can be accessed by other threads.
  bool stackIsAccessibleByOtherThreads() { return !targetIsGPU(); }

  /// Return true if the target is a GPU.
  bool targetIsGPU() {
    return TargetTriple.isAMDGPU() || TargetTriple.isNVPTX();
  }

private:
  struct FunctionInfo {
    ~FunctionInfo();

    /// A nested map that remembers all instructions in a function with a
    /// certain instruction opcode (Instruction::getOpcode()).
    OpcodeInstMapTy OpcodeInstMap;

    /// A map from functions to their instructions that may read or write
    /// memory.
    InstructionVectorTy RWInsts;

    /// Function is called by a `musttail` call.
    bool CalledViaMustTail;

    /// Function contains a `musttail` call.
    bool ContainsMustTailCall;
  };

  /// A map type from functions to informatio about it.
  DenseMap<const Function *, FunctionInfo *> FuncInfoMap;

  /// Return information about the function \p F, potentially by creating it.
  FunctionInfo &getFunctionInfo(const Function &F) {
    FunctionInfo *&FI = FuncInfoMap[&F];
    if (!FI) {
      FI = new (Allocator) FunctionInfo();
      initializeInformationCache(F, *FI);
    }
    return *FI;
  }

  /// Initialize the function information cache \p FI for the function \p F.
  ///
  /// This method needs to be called for all function that might be looked at
  /// through the information cache interface *prior* to looking at them.
  void initializeInformationCache(const Function &F, FunctionInfo &FI);

  /// The datalayout used in the module.
  const DataLayout &DL;

  /// The allocator used to allocate memory, e.g. for `FunctionInfo`s.
  BumpPtrAllocator &Allocator;

  /// MustBeExecutedContextExplorer
  MustBeExecutedContextExplorer *Explorer = nullptr;

  /// A map with knowledge retained in `llvm.assume` instructions.
  RetainedKnowledgeMap KnowledgeMap;

  /// A container for all instructions that are only used by `llvm.assume`.
  SetVector<const Instruction *> AssumeOnlyValues;

  /// Cache for block sets to allow reuse.
  DenseSet<const AA::InstExclusionSetTy *> BESets;

  /// Getters for analysis.
  AnalysisGetter &AG;

  /// Set of inlineable functions
  SmallPtrSet<const Function *, 8> InlineableFunctions;

  /// The triple describing the target machine.
  Triple TargetTriple;

  /// Give the Attributor access to the members so
  /// Attributor::identifyDefaultAbstractAttributes(...) can initialize them.
  friend struct Attributor;
};

/// Configuration for the Attributor.
struct AttributorConfig {

  AttributorConfig(CallGraphUpdater &CGUpdater) : CGUpdater(CGUpdater) {}

  /// Is the user of the Attributor a module pass or not. This determines what
  /// IR we can look at and modify. If it is a module pass we might deduce facts
  /// outside the initial function set and modify functions outside that set,
  /// but only as part of the optimization of the functions in the initial
  /// function set. For CGSCC passes we can look at the IR of the module slice
  /// but never run any deduction, or perform any modification, outside the
  /// initial function set (which we assume is the SCC).
  bool IsModulePass = true;

  /// Flag to determine if we can delete functions or keep dead ones around.
  bool DeleteFns = true;

  /// Flag to determine if we rewrite function signatures.
  bool RewriteSignatures = true;

  /// Flag to determine if we want to initialize all default AAs for an internal
  /// function marked live. See also: InitializationCallback>
  bool DefaultInitializeLiveInternals = true;

  /// Flag to determine if we should skip all liveness checks early on.
  bool UseLiveness = true;

  /// Callback function to be invoked on internal functions marked live.
  std::function<void(Attributor &A, const Function &F)> InitializationCallback =
      nullptr;

  /// Helper to update an underlying call graph and to delete functions.
  CallGraphUpdater &CGUpdater;

  /// If not null, a set limiting the attribute opportunities.
  DenseSet<const char *> *Allowed = nullptr;

  /// Maximum number of iterations to run until fixpoint.
  std::optional<unsigned> MaxFixpointIterations;

  /// A callback function that returns an ORE object from a Function pointer.
  ///{
  using OptimizationRemarkGetter =
      function_ref<OptimizationRemarkEmitter &(Function *)>;
  OptimizationRemarkGetter OREGetter = nullptr;
  ///}

  /// The name of the pass running the attributor, used to emit remarks.
  const char *PassName = nullptr;

  using IPOAmendableCBTy = function_ref<bool(const Function &F)>;
  IPOAmendableCBTy IPOAmendableCB;
};

/// The fixpoint analysis framework that orchestrates the attribute deduction.
///
/// The Attributor provides a general abstract analysis framework (guided
/// fixpoint iteration) as well as helper functions for the deduction of
/// (LLVM-IR) attributes. However, also other code properties can be deduced,
/// propagated, and ultimately manifested through the Attributor framework. This
/// is particularly useful if these properties interact with attributes and a
/// co-scheduled deduction allows to improve the solution. Even if not, thus if
/// attributes/properties are completely isolated, they should use the
/// Attributor framework to reduce the number of fixpoint iteration frameworks
/// in the code base. Note that the Attributor design makes sure that isolated
/// attributes are not impacted, in any way, by others derived at the same time
/// if there is no cross-reasoning performed.
///
/// The public facing interface of the Attributor is kept simple and basically
/// allows abstract attributes to one thing, query abstract attributes
/// in-flight. There are two reasons to do this:
///    a) The optimistic state of one abstract attribute can justify an
///       optimistic state of another, allowing to framework to end up with an
///       optimistic (=best possible) fixpoint instead of one based solely on
///       information in the IR.
///    b) This avoids reimplementing various kinds of lookups, e.g., to check
///       for existing IR attributes, in favor of a single lookups interface
///       provided by an abstract attribute subclass.
///
/// NOTE: The mechanics of adding a new "concrete" abstract attribute are
///       described in the file comment.
struct Attributor {

  /// Constructor
  ///
  /// \param Functions The set of functions we are deriving attributes for.
  /// \param InfoCache Cache to hold various information accessible for
  ///                  the abstract attributes.
  /// \param Configuration The Attributor configuration which determines what
  ///                      generic features to use.
  Attributor(SetVector<Function *> &Functions, InformationCache &InfoCache,
             AttributorConfig Configuration)
      : Allocator(InfoCache.Allocator), Functions(Functions),
        InfoCache(InfoCache), Configuration(Configuration) {}

  ~Attributor();

  /// Run the analyses until a fixpoint is reached or enforced (timeout).
  ///
  /// The attributes registered with this Attributor can be used after as long
  /// as the Attributor is not destroyed (it owns the attributes now).
  ///
  /// \Returns CHANGED if the IR was changed, otherwise UNCHANGED.
  ChangeStatus run();

  /// Lookup an abstract attribute of type \p AAType at position \p IRP. While
  /// no abstract attribute is found equivalent positions are checked, see
  /// SubsumingPositionIterator. Thus, the returned abstract attribute
  /// might be anchored at a different position, e.g., the callee if \p IRP is a
  /// call base.
  ///
  /// This method is the only (supported) way an abstract attribute can retrieve
  /// information from another abstract attribute. As an example, take an
  /// abstract attribute that determines the memory access behavior for a
  /// argument (readnone, readonly, ...). It should use `getAAFor` to get the
  /// most optimistic information for other abstract attributes in-flight, e.g.
  /// the one reasoning about the "captured" state for the argument or the one
  /// reasoning on the memory access behavior of the function as a whole.
  ///
  /// If the DepClass enum is set to `DepClassTy::None` the dependence from
  /// \p QueryingAA to the return abstract attribute is not automatically
  /// recorded. This should only be used if the caller will record the
  /// dependence explicitly if necessary, thus if it the returned abstract
  /// attribute is used for reasoning. To record the dependences explicitly use
  /// the `Attributor::recordDependence` method.
  template <typename AAType>
  const AAType *getAAFor(const AbstractAttribute &QueryingAA,
                         const IRPosition &IRP, DepClassTy DepClass) {
    return getOrCreateAAFor<AAType>(IRP, &QueryingAA, DepClass,
                                    /* ForceUpdate */ false);
  }

  /// Similar to getAAFor but the return abstract attribute will be updated (via
  /// `AbstractAttribute::update`) even if it is found in the cache. This is
  /// especially useful for AAIsDead as changes in liveness can make updates
  /// possible/useful that were not happening before as the abstract attribute
  /// was assumed dead.
  template <typename AAType>
  const AAType *getAndUpdateAAFor(const AbstractAttribute &QueryingAA,
                                  const IRPosition &IRP, DepClassTy DepClass) {
    return getOrCreateAAFor<AAType>(IRP, &QueryingAA, DepClass,
                                    /* ForceUpdate */ true);
  }

  /// The version of getAAFor that allows to omit a querying abstract
  /// attribute. Using this after Attributor started running is restricted to
  /// only the Attributor itself. Initial seeding of AAs can be done via this
  /// function.
  /// NOTE: ForceUpdate is ignored in any stage other than the update stage.
  template <typename AAType>
  const AAType *getOrCreateAAFor(IRPosition IRP,
                                 const AbstractAttribute *QueryingAA,
                                 DepClassTy DepClass, bool ForceUpdate = false,
                                 bool UpdateAfterInit = true) {
    if (!shouldPropagateCallBaseContext(IRP))
      IRP = IRP.stripCallBaseContext();

    if (AAType *AAPtr = lookupAAFor<AAType>(IRP, QueryingAA, DepClass,
                                            /* AllowInvalidState */ true)) {
      if (ForceUpdate && Phase == AttributorPhase::UPDATE)
        updateAA(*AAPtr);
      return AAPtr;
    }

    bool ShouldUpdateAA;
    if (!shouldInitialize<AAType>(IRP, ShouldUpdateAA))
      return nullptr;

    // No matching attribute found, create one.
    // Use the static create method.
    auto &AA = AAType::createForPosition(IRP, *this);

    // Always register a new attribute to make sure we clean up the allocated
    // memory properly.
    registerAA(AA);

    // If we are currenty seeding attributes, enforce seeding rules.
    if (Phase == AttributorPhase::SEEDING && !shouldSeedAttribute(AA)) {
      AA.getState().indicatePessimisticFixpoint();
      return &AA;
    }

    // Bootstrap the new attribute with an initial update to propagate
    // information, e.g., function -> call site.
    {
      TimeTraceScope TimeScope("initialize", [&]() {
        return AA.getName() +
               std::to_string(AA.getIRPosition().getPositionKind());
      });
      ++InitializationChainLength;
      AA.initialize(*this);
      --InitializationChainLength;
    }

    if (!ShouldUpdateAA) {
      AA.getState().indicatePessimisticFixpoint();
      return &AA;
    }

    // Allow seeded attributes to declare dependencies.
    // Remember the seeding state.
    if (UpdateAfterInit) {
      AttributorPhase OldPhase = Phase;
      Phase = AttributorPhase::UPDATE;

      updateAA(AA);

      Phase = OldPhase;
    }

    if (QueryingAA && AA.getState().isValidState())
      recordDependence(AA, const_cast<AbstractAttribute &>(*QueryingAA),
                       DepClass);
    return &AA;
  }

  template <typename AAType>
  const AAType *getOrCreateAAFor(const IRPosition &IRP) {
    return getOrCreateAAFor<AAType>(IRP, /* QueryingAA */ nullptr,
                                    DepClassTy::NONE);
  }

  /// Return the attribute of \p AAType for \p IRP if existing and valid. This
  /// also allows non-AA users lookup.
  template <typename AAType>
  AAType *lookupAAFor(const IRPosition &IRP,
                      const AbstractAttribute *QueryingAA = nullptr,
                      DepClassTy DepClass = DepClassTy::OPTIONAL,
                      bool AllowInvalidState = false) {
    static_assert(std::is_base_of<AbstractAttribute, AAType>::value,
                  "Cannot query an attribute with a type not derived from "
                  "'AbstractAttribute'!");
    // Lookup the abstract attribute of type AAType. If found, return it after
    // registering a dependence of QueryingAA on the one returned attribute.
    AbstractAttribute *AAPtr = AAMap.lookup({&AAType::ID, IRP});
    if (!AAPtr)
      return nullptr;

    AAType *AA = static_cast<AAType *>(AAPtr);

    // Do not register a dependence on an attribute with an invalid state.
    if (DepClass != DepClassTy::NONE && QueryingAA &&
        AA->getState().isValidState())
      recordDependence(*AA, const_cast<AbstractAttribute &>(*QueryingAA),
                       DepClass);

    // Return nullptr if this attribute has an invalid state.
    if (!AllowInvalidState && !AA->getState().isValidState())
      return nullptr;
    return AA;
  }

  /// Allows a query AA to request an update if a new query was received.
  void registerForUpdate(AbstractAttribute &AA);

  /// Explicitly record a dependence from \p FromAA to \p ToAA, that is if
  /// \p FromAA changes \p ToAA should be updated as well.
  ///
  /// This method should be used in conjunction with the `getAAFor` method and
  /// with the DepClass enum passed to the method set to None. This can
  /// be beneficial to avoid false dependences but it requires the users of
  /// `getAAFor` to explicitly record true dependences through this method.
  /// The \p DepClass flag indicates if the dependence is striclty necessary.
  /// That means for required dependences, if \p FromAA changes to an invalid
  /// state, \p ToAA can be moved to a pessimistic fixpoint because it required
  /// information from \p FromAA but none are available anymore.
  void recordDependence(const AbstractAttribute &FromAA,
                        const AbstractAttribute &ToAA, DepClassTy DepClass);

  /// Introduce a new abstract attribute into the fixpoint analysis.
  ///
  /// Note that ownership of the attribute is given to the Attributor. It will
  /// invoke delete for the Attributor on destruction of the Attributor.
  ///
  /// Attributes are identified by their IR position (AAType::getIRPosition())
  /// and the address of their static member (see AAType::ID).
  template <typename AAType> AAType &registerAA(AAType &AA) {
    static_assert(std::is_base_of<AbstractAttribute, AAType>::value,
                  "Cannot register an attribute with a type not derived from "
                  "'AbstractAttribute'!");
    // Put the attribute in the lookup map structure and the container we use to
    // keep track of all attributes.
    const IRPosition &IRP = AA.getIRPosition();
    AbstractAttribute *&AAPtr = AAMap[{&AAType::ID, IRP}];

    assert(!AAPtr && "Attribute already in map!");
    AAPtr = &AA;

    // Register AA with the synthetic root only before the manifest stage.
    if (Phase == AttributorPhase::SEEDING || Phase == AttributorPhase::UPDATE)
      DG.SyntheticRoot.Deps.insert(
          AADepGraphNode::DepTy(&AA, unsigned(DepClassTy::REQUIRED)));

    return AA;
  }

  /// Return the internal information cache.
  InformationCache &getInfoCache() { return InfoCache; }

  /// Return true if this is a module pass, false otherwise.
  bool isModulePass() const { return Configuration.IsModulePass; }

  /// Return true if we derive attributes for \p Fn
  bool isRunOn(Function &Fn) const { return isRunOn(&Fn); }
  bool isRunOn(Function *Fn) const {
    return Functions.empty() || Functions.count(Fn);
  }

  template <typename AAType> bool shouldUpdateAA(const IRPosition &IRP) {
    // If this is queried in the manifest stage, we force the AA to indicate
    // pessimistic fixpoint immediately.
    if (Phase == AttributorPhase::MANIFEST || Phase == AttributorPhase::CLEANUP)
      return false;

    Function *AssociatedFn = IRP.getAssociatedFunction();

    // Check if we require a callee but there is none.
    if (!AssociatedFn && AAType::requiresCalleeForCallBase() &&
        IRP.isAnyCallSitePosition())
      return false;

    // Check if we require a calles but we can't see all.
    if (AAType::requiresCallersForArgOrFunction())
      if (IRP.getPositionKind() == IRPosition::IRP_FUNCTION ||
          IRP.getPositionKind() == IRPosition::IRP_ARGUMENT)
        if (!AssociatedFn->hasLocalLinkage())
          return false;

    if (!AAType::isValidIRPositionForUpdate(*this, IRP))
      return false;

    // We update only AAs associated with functions in the Functions set or
    // call sites of them.
    return (!AssociatedFn || isModulePass() || isRunOn(AssociatedFn) ||
            isRunOn(IRP.getAnchorScope()));
  }

  template <typename AAType>
  bool shouldInitialize(const IRPosition &IRP, bool &ShouldUpdateAA) {
    if (!AAType::isValidIRPositionForInit(*this, IRP))
      return false;

    if (Configuration.Allowed && !Configuration.Allowed->count(&AAType::ID))
      return false;

    // For now we skip anything in naked and optnone functions.
    const Function *AnchorFn = IRP.getAnchorScope();
    if (AnchorFn && (AnchorFn->hasFnAttribute(Attribute::Naked) ||
                     AnchorFn->hasFnAttribute(Attribute::OptimizeNone)))
      return false;

    // Avoid too many nested initializations to prevent a stack overflow.
    if (InitializationChainLength > MaxInitializationChainLength)
      return false;

    ShouldUpdateAA = shouldUpdateAA<AAType>(IRP);

    return !AAType::hasTrivialInitializer() || ShouldUpdateAA;
  }

  /// Determine opportunities to derive 'default' attributes in \p F and create
  /// abstract attribute objects for them.
  ///
  /// \param F The function that is checked for attribute opportunities.
  ///
  /// Note that abstract attribute instances are generally created even if the
  /// IR already contains the information they would deduce. The most important
  /// reason for this is the single interface, the one of the abstract attribute
  /// instance, which can be queried without the need to look at the IR in
  /// various places.
  void identifyDefaultAbstractAttributes(Function &F);

  /// Determine whether the function \p F is IPO amendable
  ///
  /// If a function is exactly defined or it has alwaysinline attribute
  /// and is viable to be inlined, we say it is IPO amendable
  bool isFunctionIPOAmendable(const Function &F) {
    return F.hasExactDefinition() || InfoCache.InlineableFunctions.count(&F) ||
           (Configuration.IPOAmendableCB && Configuration.IPOAmendableCB(F));
  }

  /// Mark the internal function \p F as live.
  ///
  /// This will trigger the identification and initialization of attributes for
  /// \p F.
  void markLiveInternalFunction(const Function &F) {
    assert(F.hasLocalLinkage() &&
           "Only local linkage is assumed dead initially.");

    if (Configuration.DefaultInitializeLiveInternals)
      identifyDefaultAbstractAttributes(const_cast<Function &>(F));
    if (Configuration.InitializationCallback)
      Configuration.InitializationCallback(*this, F);
  }

  /// Helper function to remove callsite.
  void removeCallSite(CallInst *CI) {
    if (!CI)
      return;

    Configuration.CGUpdater.removeCallSite(*CI);
  }

  /// Record that \p U is to be replaces with \p NV after information was
  /// manifested. This also triggers deletion of trivially dead istructions.
  bool changeUseAfterManifest(Use &U, Value &NV) {
    Value *&V = ToBeChangedUses[&U];
    if (V && (V->stripPointerCasts() == NV.stripPointerCasts() ||
              isa_and_nonnull<UndefValue>(V)))
      return false;
    assert((!V || V == &NV || isa<UndefValue>(NV)) &&
           "Use was registered twice for replacement with different values!");
    V = &NV;
    return true;
  }

  /// Helper function to replace all uses associated with \p IRP with \p NV.
  /// Return true if there is any change. The flag \p ChangeDroppable indicates
  /// if dropppable uses should be changed too.
  bool changeAfterManifest(const IRPosition IRP, Value &NV,
                           bool ChangeDroppable = true) {
    if (IRP.getPositionKind() == IRPosition::IRP_CALL_SITE_ARGUMENT) {
      auto *CB = cast<CallBase>(IRP.getCtxI());
      return changeUseAfterManifest(
          CB->getArgOperandUse(IRP.getCallSiteArgNo()), NV);
    }
    Value &V = IRP.getAssociatedValue();
    auto &Entry = ToBeChangedValues[&V];
    Value *CurNV = get<0>(Entry);
    if (CurNV && (CurNV->stripPointerCasts() == NV.stripPointerCasts() ||
                  isa<UndefValue>(CurNV)))
      return false;
    assert((!CurNV || CurNV == &NV || isa<UndefValue>(NV)) &&
           "Value replacement was registered twice with different values!");
    Entry = {&NV, ChangeDroppable};
    return true;
  }

  /// Record that \p I is to be replaced with `unreachable` after information
  /// was manifested.
  void changeToUnreachableAfterManifest(Instruction *I) {
    ToBeChangedToUnreachableInsts.insert(I);
  }

  /// Record that \p II has at least one dead successor block. This information
  /// is used, e.g., to replace \p II with a call, after information was
  /// manifested.
  void registerInvokeWithDeadSuccessor(InvokeInst &II) {
    InvokeWithDeadSuccessor.insert(&II);
  }

  /// Record that \p I is deleted after information was manifested. This also
  /// triggers deletion of trivially dead istructions.
  void deleteAfterManifest(Instruction &I) { ToBeDeletedInsts.insert(&I); }

  /// Record that \p BB is deleted after information was manifested. This also
  /// triggers deletion of trivially dead istructions.
  void deleteAfterManifest(BasicBlock &BB) { ToBeDeletedBlocks.insert(&BB); }

  // Record that \p BB is added during the manifest of an AA. Added basic blocks
  // are preserved in the IR.
  void registerManifestAddedBasicBlock(BasicBlock &BB) {
    ManifestAddedBlocks.insert(&BB);
  }

  /// Record that \p F is deleted after information was manifested.
  void deleteAfterManifest(Function &F) {
    if (Configuration.DeleteFns)
      ToBeDeletedFunctions.insert(&F);
  }

  /// Return the attributes of kind \p AK existing in the IR as operand bundles
  /// of an llvm.assume.
  bool getAttrsFromAssumes(const IRPosition &IRP, Attribute::AttrKind AK,
                           SmallVectorImpl<Attribute> &Attrs);

  /// Return true if any kind in \p AKs existing in the IR at a position that
  /// will affect this one. See also getAttrs(...).
  /// \param IgnoreSubsumingPositions Flag to determine if subsuming positions,
  ///                                 e.g., the function position if this is an
  ///                                 argument position, should be ignored.
  bool hasAttr(const IRPosition &IRP, ArrayRef<Attribute::AttrKind> AKs,
               bool IgnoreSubsumingPositions = false,
               Attribute::AttrKind ImpliedAttributeKind = Attribute::None);

  /// Return the attributes of any kind in \p AKs existing in the IR at a
  /// position that will affect this one. While each position can only have a
  /// single attribute of any kind in \p AKs, there are "subsuming" positions
  /// that could have an attribute as well. This method returns all attributes
  /// found in \p Attrs.
  /// \param IgnoreSubsumingPositions Flag to determine if subsuming positions,
  ///                                 e.g., the function position if this is an
  ///                                 argument position, should be ignored.
  void getAttrs(const IRPosition &IRP, ArrayRef<Attribute::AttrKind> AKs,
                SmallVectorImpl<Attribute> &Attrs,
                bool IgnoreSubsumingPositions = false);

  /// Remove all \p AttrKinds attached to \p IRP.
  ChangeStatus removeAttrs(const IRPosition &IRP,
                           const ArrayRef<Attribute::AttrKind> &AttrKinds);

  /// Attach \p DeducedAttrs to \p IRP, if \p ForceReplace is set we do this
  /// even if the same attribute kind was already present.
  ChangeStatus manifestAttrs(const IRPosition &IRP,
                             const ArrayRef<Attribute> &DeducedAttrs,
                             bool ForceReplace = false);

private:
  /// Helper to check \p Attrs for \p AK, if not found, check if \p
  /// AAType::isImpliedByIR is true, and if not, create AAType for \p IRP.
  template <Attribute::AttrKind AK, typename AAType>
  void checkAndQueryIRAttr(const IRPosition &IRP, AttributeSet Attrs);

  /// Helper to apply \p CB on all attributes of type \p AttrDescs of \p IRP.
  template <typename DescTy>
  ChangeStatus updateAttrMap(const IRPosition &IRP,
                             const ArrayRef<DescTy> &AttrDescs,
                             function_ref<bool(const DescTy &, AttributeSet,
                                               AttributeMask &, AttrBuilder &)>
                                 CB);

  /// Mapping from functions/call sites to their attributes.
  DenseMap<Value *, AttributeList> AttrsMap;

public:
  /// If \p IRP is assumed to be a constant, return it, if it is unclear yet,
  /// return std::nullopt, otherwise return `nullptr`.
  std::optional<Constant *> getAssumedConstant(const IRPosition &IRP,
                                               const AbstractAttribute &AA,
                                               bool &UsedAssumedInformation);
  std::optional<Constant *> getAssumedConstant(const Value &V,
                                               const AbstractAttribute &AA,
                                               bool &UsedAssumedInformation) {
    return getAssumedConstant(IRPosition::value(V), AA, UsedAssumedInformation);
  }

  /// If \p V is assumed simplified, return it, if it is unclear yet,
  /// return std::nullopt, otherwise return `nullptr`.
  std::optional<Value *> getAssumedSimplified(const IRPosition &IRP,
                                              const AbstractAttribute &AA,
                                              bool &UsedAssumedInformation,
                                              AA::ValueScope S) {
    return getAssumedSimplified(IRP, &AA, UsedAssumedInformation, S);
  }
  std::optional<Value *> getAssumedSimplified(const Value &V,
                                              const AbstractAttribute &AA,
                                              bool &UsedAssumedInformation,
                                              AA::ValueScope S) {
    return getAssumedSimplified(IRPosition::value(V), AA,
                                UsedAssumedInformation, S);
  }

  /// If \p V is assumed simplified, return it, if it is unclear yet,
  /// return std::nullopt, otherwise return `nullptr`. Same as the public
  /// version except that it can be used without recording dependences on any \p
  /// AA.
  std::optional<Value *> getAssumedSimplified(const IRPosition &V,
                                              const AbstractAttribute *AA,
                                              bool &UsedAssumedInformation,
                                              AA::ValueScope S);

  /// Try to simplify \p IRP and in the scope \p S. If successful, true is
  /// returned and all potential values \p IRP can take are put into \p Values.
  /// If the result in \p Values contains select or PHI instructions it means
  /// those could not be simplified to a single value. Recursive calls with
  /// these instructions will yield their respective potential values. If false
  /// is returned no other information is valid.
  bool getAssumedSimplifiedValues(const IRPosition &IRP,
                                  const AbstractAttribute *AA,
                                  SmallVectorImpl<AA::ValueAndContext> &Values,
                                  AA::ValueScope S,
                                  bool &UsedAssumedInformation,
                                  bool RecurseForSelectAndPHI = true);

  /// Register \p CB as a simplification callback.
  /// `Attributor::getAssumedSimplified` will use these callbacks before
  /// we it will ask `AAValueSimplify`. It is important to ensure this
  /// is called before `identifyDefaultAbstractAttributes`, assuming the
  /// latter is called at all.
  using SimplifictionCallbackTy = std::function<std::optional<Value *>(
      const IRPosition &, const AbstractAttribute *, bool &)>;
  void registerSimplificationCallback(const IRPosition &IRP,
                                      const SimplifictionCallbackTy &CB) {
    SimplificationCallbacks[IRP].emplace_back(CB);
  }

  /// Return true if there is a simplification callback for \p IRP.
  bool hasSimplificationCallback(const IRPosition &IRP) {
    return SimplificationCallbacks.count(IRP);
  }

  /// Register \p CB as a simplification callback.
  /// Similar to \p registerSimplificationCallback, the call back will be called
  /// first when we simplify a global variable \p GV.
  using GlobalVariableSimplifictionCallbackTy =
      std::function<std::optional<Constant *>(
          const GlobalVariable &, const AbstractAttribute *, bool &)>;
  void registerGlobalVariableSimplificationCallback(
      const GlobalVariable &GV,
      const GlobalVariableSimplifictionCallbackTy &CB) {
    GlobalVariableSimplificationCallbacks[&GV].emplace_back(CB);
  }

  /// Return true if there is a simplification callback for \p GV.
  bool hasGlobalVariableSimplificationCallback(const GlobalVariable &GV) {
    return GlobalVariableSimplificationCallbacks.count(&GV);
  }

  /// Return \p std::nullopt if there is no call back registered for \p GV or
  /// the call back is still not sure if \p GV can be simplified. Return \p
  /// nullptr if \p GV can't be simplified.
  std::optional<Constant *>
  getAssumedInitializerFromCallBack(const GlobalVariable &GV,
                                    const AbstractAttribute *AA,
                                    bool &UsedAssumedInformation) {
    assert(GlobalVariableSimplificationCallbacks.contains(&GV));
    for (auto &CB : GlobalVariableSimplificationCallbacks.lookup(&GV)) {
      auto SimplifiedGV = CB(GV, AA, UsedAssumedInformation);
      // For now we assume the call back will not return a std::nullopt.
      assert(SimplifiedGV.has_value() && "SimplifiedGV has not value");
      return *SimplifiedGV;
    }
    llvm_unreachable("there must be a callback registered");
  }

  using VirtualUseCallbackTy =
      std::function<bool(Attributor &, const AbstractAttribute *)>;
  void registerVirtualUseCallback(const Value &V,
                                  const VirtualUseCallbackTy &CB) {
    VirtualUseCallbacks[&V].emplace_back(CB);
  }

private:
  /// The vector with all simplification callbacks registered by outside AAs.
  DenseMap<IRPosition, SmallVector<SimplifictionCallbackTy, 1>>
      SimplificationCallbacks;

  /// The vector with all simplification callbacks for global variables
  /// registered by outside AAs.
  DenseMap<const GlobalVariable *,
           SmallVector<GlobalVariableSimplifictionCallbackTy, 1>>
      GlobalVariableSimplificationCallbacks;

  DenseMap<const Value *, SmallVector<VirtualUseCallbackTy, 1>>
      VirtualUseCallbacks;

public:
  /// Translate \p V from the callee context into the call site context.
  std::optional<Value *>
  translateArgumentToCallSiteContent(std::optional<Value *> V, CallBase &CB,
                                     const AbstractAttribute &AA,
                                     bool &UsedAssumedInformation);

  /// Return true if \p AA (or its context instruction) is assumed dead.
  ///
  /// If \p LivenessAA is not provided it is queried.
  bool isAssumedDead(const AbstractAttribute &AA, const AAIsDead *LivenessAA,
                     bool &UsedAssumedInformation,
                     bool CheckBBLivenessOnly = false,
                     DepClassTy DepClass = DepClassTy::OPTIONAL);

  /// Return true if \p I is assumed dead.
  ///
  /// If \p LivenessAA is not provided it is queried.
  bool isAssumedDead(const Instruction &I, const AbstractAttribute *QueryingAA,
                     const AAIsDead *LivenessAA, bool &UsedAssumedInformation,
                     bool CheckBBLivenessOnly = false,
                     DepClassTy DepClass = DepClassTy::OPTIONAL,
                     bool CheckForDeadStore = false);

  /// Return true if \p U is assumed dead.
  ///
  /// If \p FnLivenessAA is not provided it is queried.
  bool isAssumedDead(const Use &U, const AbstractAttribute *QueryingAA,
                     const AAIsDead *FnLivenessAA, bool &UsedAssumedInformation,
                     bool CheckBBLivenessOnly = false,
                     DepClassTy DepClass = DepClassTy::OPTIONAL);

  /// Return true if \p IRP is assumed dead.
  ///
  /// If \p FnLivenessAA is not provided it is queried.
  bool isAssumedDead(const IRPosition &IRP, const AbstractAttribute *QueryingAA,
                     const AAIsDead *FnLivenessAA, bool &UsedAssumedInformation,
                     bool CheckBBLivenessOnly = false,
                     DepClassTy DepClass = DepClassTy::OPTIONAL);

  /// Return true if \p BB is assumed dead.
  ///
  /// If \p LivenessAA is not provided it is queried.
  bool isAssumedDead(const BasicBlock &BB, const AbstractAttribute *QueryingAA,
                     const AAIsDead *FnLivenessAA,
                     DepClassTy DepClass = DepClassTy::OPTIONAL);

  /// Check \p Pred on all (transitive) uses of \p V.
  ///
  /// This method will evaluate \p Pred on all (transitive) uses of the
  /// associated value and return true if \p Pred holds every time.
  /// If uses are skipped in favor of equivalent ones, e.g., if we look through
  /// memory, the \p EquivalentUseCB will be used to give the caller an idea
  /// what original used was replaced by a new one (or new ones). The visit is
  /// cut short if \p EquivalentUseCB returns false and the function will return
  /// false as well.
  bool checkForAllUses(function_ref<bool(const Use &, bool &)> Pred,
                       const AbstractAttribute &QueryingAA, const Value &V,
                       bool CheckBBLivenessOnly = false,
                       DepClassTy LivenessDepClass = DepClassTy::OPTIONAL,
                       bool IgnoreDroppableUses = true,
                       function_ref<bool(const Use &OldU, const Use &NewU)>
                           EquivalentUseCB = nullptr);

  /// Emit a remark generically.
  ///
  /// This template function can be used to generically emit a remark. The
  /// RemarkKind should be one of the following:
  ///   - OptimizationRemark to indicate a successful optimization attempt
  ///   - OptimizationRemarkMissed to report a failed optimization attempt
  ///   - OptimizationRemarkAnalysis to provide additional information about an
  ///     optimization attempt
  ///
  /// The remark is built using a callback function \p RemarkCB that takes a
  /// RemarkKind as input and returns a RemarkKind.
  template <typename RemarkKind, typename RemarkCallBack>
  void emitRemark(Instruction *I, StringRef RemarkName,
                  RemarkCallBack &&RemarkCB) const {
    if (!Configuration.OREGetter)
      return;

    Function *F = I->getFunction();
    auto &ORE = Configuration.OREGetter(F);

    if (RemarkName.startswith("OMP"))
      ORE.emit([&]() {
        return RemarkCB(RemarkKind(Configuration.PassName, RemarkName, I))
               << " [" << RemarkName << "]";
      });
    else
      ORE.emit([&]() {
        return RemarkCB(RemarkKind(Configuration.PassName, RemarkName, I));
      });
  }

  /// Emit a remark on a function.
  template <typename RemarkKind, typename RemarkCallBack>
  void emitRemark(Function *F, StringRef RemarkName,
                  RemarkCallBack &&RemarkCB) const {
    if (!Configuration.OREGetter)
      return;

    auto &ORE = Configuration.OREGetter(F);

    if (RemarkName.startswith("OMP"))
      ORE.emit([&]() {
        return RemarkCB(RemarkKind(Configuration.PassName, RemarkName, F))
               << " [" << RemarkName << "]";
      });
    else
      ORE.emit([&]() {
        return RemarkCB(RemarkKind(Configuration.PassName, RemarkName, F));
      });
  }

  /// Helper struct used in the communication between an abstract attribute (AA)
  /// that wants to change the signature of a function and the Attributor which
  /// applies the changes. The struct is partially initialized with the
  /// information from the AA (see the constructor). All other members are
  /// provided by the Attributor prior to invoking any callbacks.
  struct ArgumentReplacementInfo {
    /// Callee repair callback type
    ///
    /// The function repair callback is invoked once to rewire the replacement
    /// arguments in the body of the new function. The argument replacement info
    /// is passed, as build from the registerFunctionSignatureRewrite call, as
    /// well as the replacement function and an iteratore to the first
    /// replacement argument.
    using CalleeRepairCBTy = std::function<void(
        const ArgumentReplacementInfo &, Function &, Function::arg_iterator)>;

    /// Abstract call site (ACS) repair callback type
    ///
    /// The abstract call site repair callback is invoked once on every abstract
    /// call site of the replaced function (\see ReplacedFn). The callback needs
    /// to provide the operands for the call to the new replacement function.
    /// The number and type of the operands appended to the provided vector
    /// (second argument) is defined by the number and types determined through
    /// the replacement type vector (\see ReplacementTypes). The first argument
    /// is the ArgumentReplacementInfo object registered with the Attributor
    /// through the registerFunctionSignatureRewrite call.
    using ACSRepairCBTy =
        std::function<void(const ArgumentReplacementInfo &, AbstractCallSite,
                           SmallVectorImpl<Value *> &)>;

    /// Simple getters, see the corresponding members for details.
    ///{

    Attributor &getAttributor() const { return A; }
    const Function &getReplacedFn() const { return ReplacedFn; }
    const Argument &getReplacedArg() const { return ReplacedArg; }
    unsigned getNumReplacementArgs() const { return ReplacementTypes.size(); }
    const SmallVectorImpl<Type *> &getReplacementTypes() const {
      return ReplacementTypes;
    }

    ///}

  private:
    /// Constructor that takes the argument to be replaced, the types of
    /// the replacement arguments, as well as callbacks to repair the call sites
    /// and new function after the replacement happened.
    ArgumentReplacementInfo(Attributor &A, Argument &Arg,
                            ArrayRef<Type *> ReplacementTypes,
                            CalleeRepairCBTy &&CalleeRepairCB,
                            ACSRepairCBTy &&ACSRepairCB)
        : A(A), ReplacedFn(*Arg.getParent()), ReplacedArg(Arg),
          ReplacementTypes(ReplacementTypes.begin(), ReplacementTypes.end()),
          CalleeRepairCB(std::move(CalleeRepairCB)),
          ACSRepairCB(std::move(ACSRepairCB)) {}

    /// Reference to the attributor to allow access from the callbacks.
    Attributor &A;

    /// The "old" function replaced by ReplacementFn.
    const Function &ReplacedFn;

    /// The "old" argument replaced by new ones defined via ReplacementTypes.
    const Argument &ReplacedArg;

    /// The types of the arguments replacing ReplacedArg.
    const SmallVector<Type *, 8> ReplacementTypes;

    /// Callee repair callback, see CalleeRepairCBTy.
    const CalleeRepairCBTy CalleeRepairCB;

    /// Abstract call site (ACS) repair callback, see ACSRepairCBTy.
    const ACSRepairCBTy ACSRepairCB;

    /// Allow access to the private members from the Attributor.
    friend struct Attributor;
  };

  /// Check if we can rewrite a function signature.
  ///
  /// The argument \p Arg is replaced with new ones defined by the number,
  /// order, and types in \p ReplacementTypes.
  ///
  /// \returns True, if the replacement can be registered, via
  /// registerFunctionSignatureRewrite, false otherwise.
  bool isValidFunctionSignatureRewrite(Argument &Arg,
                                       ArrayRef<Type *> ReplacementTypes);

  /// Register a rewrite for a function signature.
  ///
  /// The argument \p Arg is replaced with new ones defined by the number,
  /// order, and types in \p ReplacementTypes. The rewiring at the call sites is
  /// done through \p ACSRepairCB and at the callee site through
  /// \p CalleeRepairCB.
  ///
  /// \returns True, if the replacement was registered, false otherwise.
  bool registerFunctionSignatureRewrite(
      Argument &Arg, ArrayRef<Type *> ReplacementTypes,
      ArgumentReplacementInfo::CalleeRepairCBTy &&CalleeRepairCB,
      ArgumentReplacementInfo::ACSRepairCBTy &&ACSRepairCB);

  /// Check \p Pred on all function call sites.
  ///
  /// This method will evaluate \p Pred on call sites and return
  /// true if \p Pred holds in every call sites. However, this is only possible
  /// all call sites are known, hence the function has internal linkage.
  /// If true is returned, \p UsedAssumedInformation is set if assumed
  /// information was used to skip or simplify potential call sites.
  bool checkForAllCallSites(function_ref<bool(AbstractCallSite)> Pred,
                            const AbstractAttribute &QueryingAA,
                            bool RequireAllCallSites,
                            bool &UsedAssumedInformation);

  /// Check \p Pred on all call sites of \p Fn.
  ///
  /// This method will evaluate \p Pred on call sites and return
  /// true if \p Pred holds in every call sites. However, this is only possible
  /// all call sites are known, hence the function has internal linkage.
  /// If true is returned, \p UsedAssumedInformation is set if assumed
  /// information was used to skip or simplify potential call sites.
  bool checkForAllCallSites(function_ref<bool(AbstractCallSite)> Pred,
                            const Function &Fn, bool RequireAllCallSites,
                            const AbstractAttribute *QueryingAA,
                            bool &UsedAssumedInformation,
                            bool CheckPotentiallyDead = false);

  /// Check \p Pred on all values potentially returned by the function
  /// associated with \p QueryingAA.
  ///
  /// This is the context insensitive version of the method above.
  bool
  checkForAllReturnedValues(function_ref<bool(Value &)> Pred,
                            const AbstractAttribute &QueryingAA,
                            AA::ValueScope S = AA::ValueScope::Intraprocedural,
                            bool RecurseForSelectAndPHI = true);

  /// Check \p Pred on all instructions in \p Fn with an opcode present in
  /// \p Opcodes.
  ///
  /// This method will evaluate \p Pred on all instructions with an opcode
  /// present in \p Opcode and return true if \p Pred holds on all of them.
  bool checkForAllInstructions(function_ref<bool(Instruction &)> Pred,
                               const Function *Fn,
                               const AbstractAttribute &QueryingAA,
                               const ArrayRef<unsigned> &Opcodes,
                               bool &UsedAssumedInformation,
                               bool CheckBBLivenessOnly = false,
                               bool CheckPotentiallyDead = false);

  /// Check \p Pred on all instructions with an opcode present in \p Opcodes.
  ///
  /// This method will evaluate \p Pred on all instructions with an opcode
  /// present in \p Opcode and return true if \p Pred holds on all of them.
  bool checkForAllInstructions(function_ref<bool(Instruction &)> Pred,
                               const AbstractAttribute &QueryingAA,
                               const ArrayRef<unsigned> &Opcodes,
                               bool &UsedAssumedInformation,
                               bool CheckBBLivenessOnly = false,
                               bool CheckPotentiallyDead = false);

  /// Check \p Pred on all call-like instructions (=CallBased derived).
  ///
  /// See checkForAllCallLikeInstructions(...) for more information.
  bool checkForAllCallLikeInstructions(function_ref<bool(Instruction &)> Pred,
                                       const AbstractAttribute &QueryingAA,
                                       bool &UsedAssumedInformation,
                                       bool CheckBBLivenessOnly = false,
                                       bool CheckPotentiallyDead = false) {
    return checkForAllInstructions(
        Pred, QueryingAA,
        {(unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr,
         (unsigned)Instruction::Call},
        UsedAssumedInformation, CheckBBLivenessOnly, CheckPotentiallyDead);
  }

  /// Check \p Pred on all Read/Write instructions.
  ///
  /// This method will evaluate \p Pred on all instructions that read or write
  /// to memory present in the information cache and return true if \p Pred
  /// holds on all of them.
  bool checkForAllReadWriteInstructions(function_ref<bool(Instruction &)> Pred,
                                        AbstractAttribute &QueryingAA,
                                        bool &UsedAssumedInformation);

  /// Create a shallow wrapper for \p F such that \p F has internal linkage
  /// afterwards. It also sets the original \p F 's name to anonymous
  ///
  /// A wrapper is a function with the same type (and attributes) as \p F
  /// that will only call \p F and return the result, if any.
  ///
  /// Assuming the declaration of looks like:
  ///   rty F(aty0 arg0, ..., atyN argN);
  ///
  /// The wrapper will then look as follows:
  ///   rty wrapper(aty0 arg0, ..., atyN argN) {
  ///     return F(arg0, ..., argN);
  ///   }
  ///
  static void createShallowWrapper(Function &F);

  /// Returns true if the function \p F can be internalized. i.e. it has a
  /// compatible linkage.
  static bool isInternalizable(Function &F);

  /// Make another copy of the function \p F such that the copied version has
  /// internal linkage afterwards and can be analysed. Then we replace all uses
  /// of the original function to the copied one
  ///
  /// Only non-locally linked functions that have `linkonce_odr` or `weak_odr`
  /// linkage can be internalized because these linkages guarantee that other
  /// definitions with the same name have the same semantics as this one.
  ///
  /// This will only be run if the `attributor-allow-deep-wrappers` option is
  /// set, or if the function is called with \p Force set to true.
  ///
  /// If the function \p F failed to be internalized the return value will be a
  /// null pointer.
  static Function *internalizeFunction(Function &F, bool Force = false);

  /// Make copies of each function in the set \p FnSet such that the copied
  /// version has internal linkage afterwards and can be analysed. Then we
  /// replace all uses of the original function to the copied one. The map
  /// \p FnMap contains a mapping of functions to their internalized versions.
  ///
  /// Only non-locally linked functions that have `linkonce_odr` or `weak_odr`
  /// linkage can be internalized because these linkages guarantee that other
  /// definitions with the same name have the same semantics as this one.
  ///
  /// This version will internalize all the functions in the set \p FnSet at
  /// once and then replace the uses. This prevents internalized functions being
  /// called by external functions when there is an internalized version in the
  /// module.
  static bool internalizeFunctions(SmallPtrSetImpl<Function *> &FnSet,
                                   DenseMap<Function *, Function *> &FnMap);

  /// Return the data layout associated with the anchor scope.
  const DataLayout &getDataLayout() const { return InfoCache.DL; }

  /// The allocator used to allocate memory, e.g. for `AbstractAttribute`s.
  BumpPtrAllocator &Allocator;

private:
  /// This method will do fixpoint iteration until fixpoint or the
  /// maximum iteration count is reached.
  ///
  /// If the maximum iteration count is reached, This method will
  /// indicate pessimistic fixpoint on attributes that transitively depend
  /// on attributes that were scheduled for an update.
  void runTillFixpoint();

  /// Gets called after scheduling, manifests attributes to the LLVM IR.
  ChangeStatus manifestAttributes();

  /// Gets called after attributes have been manifested, cleans up the IR.
  /// Deletes dead functions, blocks and instructions.
  /// Rewrites function signitures and updates the call graph.
  ChangeStatus cleanupIR();

  /// Identify internal functions that are effectively dead, thus not reachable
  /// from a live entry point. The functions are added to ToBeDeletedFunctions.
  void identifyDeadInternalFunctions();

  /// Run `::update` on \p AA and track the dependences queried while doing so.
  /// Also adjust the state if we know further updates are not necessary.
  ChangeStatus updateAA(AbstractAttribute &AA);

  /// Remember the dependences on the top of the dependence stack such that they
  /// may trigger further updates. (\see DependenceStack)
  void rememberDependences();

  /// Determine if CallBase context in \p IRP should be propagated.
  bool shouldPropagateCallBaseContext(const IRPosition &IRP);

  /// Apply all requested function signature rewrites
  /// (\see registerFunctionSignatureRewrite) and return Changed if the module
  /// was altered.
  ChangeStatus
  rewriteFunctionSignatures(SmallSetVector<Function *, 8> &ModifiedFns);

  /// Check if the Attribute \p AA should be seeded.
  /// See getOrCreateAAFor.
  bool shouldSeedAttribute(AbstractAttribute &AA);

  /// A nested map to lookup abstract attributes based on the argument position
  /// on the outer level, and the addresses of the static member (AAType::ID) on
  /// the inner level.
  ///{
  using AAMapKeyTy = std::pair<const char *, IRPosition>;
  DenseMap<AAMapKeyTy, AbstractAttribute *> AAMap;
  ///}

  /// Map to remember all requested signature changes (= argument replacements).
  DenseMap<Function *, SmallVector<std::unique_ptr<ArgumentReplacementInfo>, 8>>
      ArgumentReplacementMap;

  /// The set of functions we are deriving attributes for.
  SetVector<Function *> &Functions;

  /// The information cache that holds pre-processed (LLVM-IR) information.
  InformationCache &InfoCache;

  /// Abstract Attribute dependency graph
  AADepGraph DG;

  /// Set of functions for which we modified the content such that it might
  /// impact the call graph.
  SmallSetVector<Function *, 8> CGModifiedFunctions;

  /// Information about a dependence. If FromAA is changed ToAA needs to be
  /// updated as well.
  struct DepInfo {
    const AbstractAttribute *FromAA;
    const AbstractAttribute *ToAA;
    DepClassTy DepClass;
  };

  /// The dependence stack is used to track dependences during an
  /// `AbstractAttribute::update` call. As `AbstractAttribute::update` can be
  /// recursive we might have multiple vectors of dependences in here. The stack
  /// size, should be adjusted according to the expected recursion depth and the
  /// inner dependence vector size to the expected number of dependences per
  /// abstract attribute. Since the inner vectors are actually allocated on the
  /// stack we can be generous with their size.
  using DependenceVector = SmallVector<DepInfo, 8>;
  SmallVector<DependenceVector *, 16> DependenceStack;

  /// A set to remember the functions we already assume to be live and visited.
  DenseSet<const Function *> VisitedFunctions;

  /// Uses we replace with a new value after manifest is done. We will remove
  /// then trivially dead instructions as well.
  SmallMapVector<Use *, Value *, 32> ToBeChangedUses;

  /// Values we replace with a new value after manifest is done. We will remove
  /// then trivially dead instructions as well.
  SmallMapVector<Value *, PointerIntPair<Value *, 1, bool>, 32>
      ToBeChangedValues;

  /// Instructions we replace with `unreachable` insts after manifest is done.
  SmallSetVector<WeakVH, 16> ToBeChangedToUnreachableInsts;

  /// Invoke instructions with at least a single dead successor block.
  SmallSetVector<WeakVH, 16> InvokeWithDeadSuccessor;

  /// A flag that indicates which stage of the process we are in. Initially, the
  /// phase is SEEDING. Phase is changed in `Attributor::run()`
  enum class AttributorPhase {
    SEEDING,
    UPDATE,
    MANIFEST,
    CLEANUP,
  } Phase = AttributorPhase::SEEDING;

  /// The current initialization chain length. Tracked to avoid stack overflows.
  unsigned InitializationChainLength = 0;

  /// Functions, blocks, and instructions we delete after manifest is done.
  ///
  ///{
  SmallPtrSet<BasicBlock *, 8> ManifestAddedBlocks;
  SmallSetVector<Function *, 8> ToBeDeletedFunctions;
  SmallSetVector<BasicBlock *, 8> ToBeDeletedBlocks;
  SmallSetVector<WeakVH, 8> ToBeDeletedInsts;
  ///}

  /// Container with all the query AAs that requested an update via
  /// registerForUpdate.
  SmallSetVector<AbstractAttribute *, 16> QueryAAsAwaitingUpdate;

  /// User provided configuration for this Attributor instance.
  const AttributorConfig Configuration;

  friend AADepGraph;
  friend AttributorCallGraph;
};

/// An interface to query the internal state of an abstract attribute.
///
/// The abstract state is a minimal interface that allows the Attributor to
/// communicate with the abstract attributes about their internal state without
/// enforcing or exposing implementation details, e.g., the (existence of an)
/// underlying lattice.
///
/// It is sufficient to be able to query if a state is (1) valid or invalid, (2)
/// at a fixpoint, and to indicate to the state that (3) an optimistic fixpoint
/// was reached or (4) a pessimistic fixpoint was enforced.
///
/// All methods need to be implemented by the subclass. For the common use case,
/// a single boolean state or a bit-encoded state, the BooleanState and
/// {Inc,Dec,Bit}IntegerState classes are already provided. An abstract
/// attribute can inherit from them to get the abstract state interface and
/// additional methods to directly modify the state based if needed. See the
/// class comments for help.
struct AbstractState {
  virtual ~AbstractState() = default;

  /// Return if this abstract state is in a valid state. If false, no
  /// information provided should be used.
  virtual bool isValidState() const = 0;

  /// Return if this abstract state is fixed, thus does not need to be updated
  /// if information changes as it cannot change itself.
  virtual bool isAtFixpoint() const = 0;

  /// Indicate that the abstract state should converge to the optimistic state.
  ///
  /// This will usually make the optimistically assumed state the known to be
  /// true state.
  ///
  /// \returns ChangeStatus::UNCHANGED as the assumed value should not change.
  virtual ChangeStatus indicateOptimisticFixpoint() = 0;

  /// Indicate that the abstract state should converge to the pessimistic state.
  ///
  /// This will usually revert the optimistically assumed state to the known to
  /// be true state.
  ///
  /// \returns ChangeStatus::CHANGED as the assumed value may change.
  virtual ChangeStatus indicatePessimisticFixpoint() = 0;
};

/// Simple state with integers encoding.
///
/// The interface ensures that the assumed bits are always a subset of the known
/// bits. Users can only add known bits and, except through adding known bits,
/// they can only remove assumed bits. This should guarantee monotonicity and
/// thereby the existence of a fixpoint (if used correctly). The fixpoint is
/// reached when the assumed and known state/bits are equal. Users can
/// force/inidicate a fixpoint. If an optimistic one is indicated, the known
/// state will catch up with the assumed one, for a pessimistic fixpoint it is
/// the other way around.
template <typename base_ty, base_ty BestState, base_ty WorstState>
struct IntegerStateBase : public AbstractState {
  using base_t = base_ty;

  IntegerStateBase() = default;
  IntegerStateBase(base_t Assumed) : Assumed(Assumed) {}

  /// Return the best possible representable state.
  static constexpr base_t getBestState() { return BestState; }
  static constexpr base_t getBestState(const IntegerStateBase &) {
    return getBestState();
  }

  /// Return the worst possible representable state.
  static constexpr base_t getWorstState() { return WorstState; }
  static constexpr base_t getWorstState(const IntegerStateBase &) {
    return getWorstState();
  }

  /// See AbstractState::isValidState()
  /// NOTE: For now we simply pretend that the worst possible state is invalid.
  bool isValidState() const override { return Assumed != getWorstState(); }

  /// See AbstractState::isAtFixpoint()
  bool isAtFixpoint() const override { return Assumed == Known; }

  /// See AbstractState::indicateOptimisticFixpoint(...)
  ChangeStatus indicateOptimisticFixpoint() override {
    Known = Assumed;
    return ChangeStatus::UNCHANGED;
  }

  /// See AbstractState::indicatePessimisticFixpoint(...)
  ChangeStatus indicatePessimisticFixpoint() override {
    Assumed = Known;
    return ChangeStatus::CHANGED;
  }

  /// Return the known state encoding
  base_t getKnown() const { return Known; }

  /// Return the assumed state encoding.
  base_t getAssumed() const { return Assumed; }

  /// Equality for IntegerStateBase.
  bool
  operator==(const IntegerStateBase<base_t, BestState, WorstState> &R) const {
    return this->getAssumed() == R.getAssumed() &&
           this->getKnown() == R.getKnown();
  }

  /// Inequality for IntegerStateBase.
  bool
  operator!=(const IntegerStateBase<base_t, BestState, WorstState> &R) const {
    return !(*this == R);
  }

  /// "Clamp" this state with \p R. The result is subtype dependent but it is
  /// intended that only information assumed in both states will be assumed in
  /// this one afterwards.
  void operator^=(const IntegerStateBase<base_t, BestState, WorstState> &R) {
    handleNewAssumedValue(R.getAssumed());
  }

  /// "Clamp" this state with \p R. The result is subtype dependent but it is
  /// intended that information known in either state will be known in
  /// this one afterwards.
  void operator+=(const IntegerStateBase<base_t, BestState, WorstState> &R) {
    handleNewKnownValue(R.getKnown());
  }

  void operator|=(const IntegerStateBase<base_t, BestState, WorstState> &R) {
    joinOR(R.getAssumed(), R.getKnown());
  }

  void operator&=(const IntegerStateBase<base_t, BestState, WorstState> &R) {
    joinAND(R.getAssumed(), R.getKnown());
  }

protected:
  /// Handle a new assumed value \p Value. Subtype dependent.
  virtual void handleNewAssumedValue(base_t Value) = 0;

  /// Handle a new known value \p Value. Subtype dependent.
  virtual void handleNewKnownValue(base_t Value) = 0;

  /// Handle a  value \p Value. Subtype dependent.
  virtual void joinOR(base_t AssumedValue, base_t KnownValue) = 0;

  /// Handle a new assumed value \p Value. Subtype dependent.
  virtual void joinAND(base_t AssumedValue, base_t KnownValue) = 0;

  /// The known state encoding in an integer of type base_t.
  base_t Known = getWorstState();

  /// The assumed state encoding in an integer of type base_t.
  base_t Assumed = getBestState();
};

/// Specialization of the integer state for a bit-wise encoding.
template <typename base_ty = uint32_t, base_ty BestState = ~base_ty(0),
          base_ty WorstState = 0>
struct BitIntegerState
    : public IntegerStateBase<base_ty, BestState, WorstState> {
  using super = IntegerStateBase<base_ty, BestState, WorstState>;
  using base_t = base_ty;
  BitIntegerState() = default;
  BitIntegerState(base_t Assumed) : super(Assumed) {}

  /// Return true if the bits set in \p BitsEncoding are "known bits".
  bool isKnown(base_t BitsEncoding = BestState) const {
    return (this->Known & BitsEncoding) == BitsEncoding;
  }

  /// Return true if the bits set in \p BitsEncoding are "assumed bits".
  bool isAssumed(base_t BitsEncoding = BestState) const {
    return (this->Assumed & BitsEncoding) == BitsEncoding;
  }

  /// Add the bits in \p BitsEncoding to the "known bits".
  BitIntegerState &addKnownBits(base_t Bits) {
    // Make sure we never miss any "known bits".
    this->Assumed |= Bits;
    this->Known |= Bits;
    return *this;
  }

  /// Remove the bits in \p BitsEncoding from the "assumed bits" if not known.
  BitIntegerState &removeAssumedBits(base_t BitsEncoding) {
    return intersectAssumedBits(~BitsEncoding);
  }

  /// Remove the bits in \p BitsEncoding from the "known bits".
  BitIntegerState &removeKnownBits(base_t BitsEncoding) {
    this->Known = (this->Known & ~BitsEncoding);
    return *this;
  }

  /// Keep only "assumed bits" also set in \p BitsEncoding but all known ones.
  BitIntegerState &intersectAssumedBits(base_t BitsEncoding) {
    // Make sure we never lose any "known bits".
    this->Assumed = (this->Assumed & BitsEncoding) | this->Known;
    return *this;
  }

private:
  void handleNewAssumedValue(base_t Value) override {
    intersectAssumedBits(Value);
  }
  void handleNewKnownValue(base_t Value) override { addKnownBits(Value); }
  void joinOR(base_t AssumedValue, base_t KnownValue) override {
    this->Known |= KnownValue;
    this->Assumed |= AssumedValue;
  }
  void joinAND(base_t AssumedValue, base_t KnownValue) override {
    this->Known &= KnownValue;
    this->Assumed &= AssumedValue;
  }
};

/// Specialization of the integer state for an increasing value, hence ~0u is
/// the best state and 0 the worst.
template <typename base_ty = uint32_t, base_ty BestState = ~base_ty(0),
          base_ty WorstState = 0>
struct IncIntegerState
    : public IntegerStateBase<base_ty, BestState, WorstState> {
  using super = IntegerStateBase<base_ty, BestState, WorstState>;
  using base_t = base_ty;

  IncIntegerState() : super() {}
  IncIntegerState(base_t Assumed) : super(Assumed) {}

  /// Return the best possible representable state.
  static constexpr base_t getBestState() { return BestState; }
  static constexpr base_t
  getBestState(const IncIntegerState<base_ty, BestState, WorstState> &) {
    return getBestState();
  }

  /// Take minimum of assumed and \p Value.
  IncIntegerState &takeAssumedMinimum(base_t Value) {
    // Make sure we never lose "known value".
    this->Assumed = std::max(std::min(this->Assumed, Value), this->Known);
    return *this;
  }

  /// Take maximum of known and \p Value.
  IncIntegerState &takeKnownMaximum(base_t Value) {
    // Make sure we never lose "known value".
    this->Assumed = std::max(Value, this->Assumed);
    this->Known = std::max(Value, this->Known);
    return *this;
  }

private:
  void handleNewAssumedValue(base_t Value) override {
    takeAssumedMinimum(Value);
  }
  void handleNewKnownValue(base_t Value) override { takeKnownMaximum(Value); }
  void joinOR(base_t AssumedValue, base_t KnownValue) override {
    this->Known = std::max(this->Known, KnownValue);
    this->Assumed = std::max(this->Assumed, AssumedValue);
  }
  void joinAND(base_t AssumedValue, base_t KnownValue) override {
    this->Known = std::min(this->Known, KnownValue);
    this->Assumed = std::min(this->Assumed, AssumedValue);
  }
};

/// Specialization of the integer state for a decreasing value, hence 0 is the
/// best state and ~0u the worst.
template <typename base_ty = uint32_t>
struct DecIntegerState : public IntegerStateBase<base_ty, 0, ~base_ty(0)> {
  using base_t = base_ty;

  /// Take maximum of assumed and \p Value.
  DecIntegerState &takeAssumedMaximum(base_t Value) {
    // Make sure we never lose "known value".
    this->Assumed = std::min(std::max(this->Assumed, Value), this->Known);
    return *this;
  }

  /// Take minimum of known and \p Value.
  DecIntegerState &takeKnownMinimum(base_t Value) {
    // Make sure we never lose "known value".
    this->Assumed = std::min(Value, this->Assumed);
    this->Known = std::min(Value, this->Known);
    return *this;
  }

private:
  void handleNewAssumedValue(base_t Value) override {
    takeAssumedMaximum(Value);
  }
  void handleNewKnownValue(base_t Value) override { takeKnownMinimum(Value); }
  void joinOR(base_t AssumedValue, base_t KnownValue) override {
    this->Assumed = std::min(this->Assumed, KnownValue);
    this->Assumed = std::min(this->Assumed, AssumedValue);
  }
  void joinAND(base_t AssumedValue, base_t KnownValue) override {
    this->Assumed = std::max(this->Assumed, KnownValue);
    this->Assumed = std::max(this->Assumed, AssumedValue);
  }
};

/// Simple wrapper for a single bit (boolean) state.
struct BooleanState : public IntegerStateBase<bool, true, false> {
  using super = IntegerStateBase<bool, true, false>;
  using base_t = IntegerStateBase::base_t;

  BooleanState() = default;
  BooleanState(base_t Assumed) : super(Assumed) {}

  /// Set the assumed value to \p Value but never below the known one.
  void setAssumed(bool Value) { Assumed &= (Known | Value); }

  /// Set the known and asssumed value to \p Value.
  void setKnown(bool Value) {
    Known |= Value;
    Assumed |= Value;
  }

  /// Return true if the state is assumed to hold.
  bool isAssumed() const { return getAssumed(); }

  /// Return true if the state is known to hold.
  bool isKnown() const { return getKnown(); }

private:
  void handleNewAssumedValue(base_t Value) override {
    if (!Value)
      Assumed = Known;
  }
  void handleNewKnownValue(base_t Value) override {
    if (Value)
      Known = (Assumed = Value);
  }
  void joinOR(base_t AssumedValue, base_t KnownValue) override {
    Known |= KnownValue;
    Assumed |= AssumedValue;
  }
  void joinAND(base_t AssumedValue, base_t KnownValue) override {
    Known &= KnownValue;
    Assumed &= AssumedValue;
  }
};

/// State for an integer range.
struct IntegerRangeState : public AbstractState {

  /// Bitwidth of the associated value.
  uint32_t BitWidth;

  /// State representing assumed range, initially set to empty.
  ConstantRange Assumed;

  /// State representing known range, initially set to [-inf, inf].
  ConstantRange Known;

  IntegerRangeState(uint32_t BitWidth)
      : BitWidth(BitWidth), Assumed(ConstantRange::getEmpty(BitWidth)),
        Known(ConstantRange::getFull(BitWidth)) {}

  IntegerRangeState(const ConstantRange &CR)
      : BitWidth(CR.getBitWidth()), Assumed(CR),
        Known(getWorstState(CR.getBitWidth())) {}

  /// Return the worst possible representable state.
  static ConstantRange getWorstState(uint32_t BitWidth) {
    return ConstantRange::getFull(BitWidth);
  }

  /// Return the best possible representable state.
  static ConstantRange getBestState(uint32_t BitWidth) {
    return ConstantRange::getEmpty(BitWidth);
  }
  static ConstantRange getBestState(const IntegerRangeState &IRS) {
    return getBestState(IRS.getBitWidth());
  }

  /// Return associated values' bit width.
  uint32_t getBitWidth() const { return BitWidth; }

  /// See AbstractState::isValidState()
  bool isValidState() const override {
    return BitWidth > 0 && !Assumed.isFullSet();
  }

  /// See AbstractState::isAtFixpoint()
  bool isAtFixpoint() const override { return Assumed == Known; }

  /// See AbstractState::indicateOptimisticFixpoint(...)
  ChangeStatus indicateOptimisticFixpoint() override {
    Known = Assumed;
    return ChangeStatus::CHANGED;
  }

  /// See AbstractState::indicatePessimisticFixpoint(...)
  ChangeStatus indicatePessimisticFixpoint() override {
    Assumed = Known;
    return ChangeStatus::CHANGED;
  }

  /// Return the known state encoding
  ConstantRange getKnown() const { return Known; }

  /// Return the assumed state encoding.
  ConstantRange getAssumed() const { return Assumed; }

  /// Unite assumed range with the passed state.
  void unionAssumed(const ConstantRange &R) {
    // Don't lose a known range.
    Assumed = Assumed.unionWith(R).intersectWith(Known);
  }

  /// See IntegerRangeState::unionAssumed(..).
  void unionAssumed(const IntegerRangeState &R) {
    unionAssumed(R.getAssumed());
  }

  /// Intersect known range with the passed state.
  void intersectKnown(const ConstantRange &R) {
    Assumed = Assumed.intersectWith(R);
    Known = Known.intersectWith(R);
  }

  /// See IntegerRangeState::intersectKnown(..).
  void intersectKnown(const IntegerRangeState &R) {
    intersectKnown(R.getKnown());
  }

  /// Equality for IntegerRangeState.
  bool operator==(const IntegerRangeState &R) const {
    return getAssumed() == R.getAssumed() && getKnown() == R.getKnown();
  }

  /// "Clamp" this state with \p R. The result is subtype dependent but it is
  /// intended that only information assumed in both states will be assumed in
  /// this one afterwards.
  IntegerRangeState operator^=(const IntegerRangeState &R) {
    // NOTE: `^=` operator seems like `intersect` but in this case, we need to
    // take `union`.
    unionAssumed(R);
    return *this;
  }

  IntegerRangeState operator&=(const IntegerRangeState &R) {
    // NOTE: `&=` operator seems like `intersect` but in this case, we need to
    // take `union`.
    Known = Known.unionWith(R.getKnown());
    Assumed = Assumed.unionWith(R.getAssumed());
    return *this;
  }
};

/// Simple state for a set.
///
/// This represents a state containing a set of values. The interface supports
/// modelling sets that contain all possible elements. The state's internal
/// value is modified using union or intersection operations.
template <typename BaseTy> struct SetState : public AbstractState {
  /// A wrapper around a set that has semantics for handling unions and
  /// intersections with a "universal" set that contains all elements.
  struct SetContents {
    /// Creates a universal set with no concrete elements or an empty set.
    SetContents(bool Universal) : Universal(Universal) {}

    /// Creates a non-universal set with concrete values.
    SetContents(const DenseSet<BaseTy> &Assumptions)
        : Universal(false), Set(Assumptions) {}

    SetContents(bool Universal, const DenseSet<BaseTy> &Assumptions)
        : Universal(Universal), Set(Assumptions) {}

    const DenseSet<BaseTy> &getSet() const { return Set; }

    bool isUniversal() const { return Universal; }

    bool empty() const { return Set.empty() && !Universal; }

    /// Finds A := A ^ B where A or B could be the "Universal" set which
    /// contains every possible attribute. Returns true if changes were made.
    bool getIntersection(const SetContents &RHS) {
      bool IsUniversal = Universal;
      unsigned Size = Set.size();

      // A := A ^ U = A
      if (RHS.isUniversal())
        return false;

      // A := U ^ B = B
      if (Universal)
        Set = RHS.getSet();
      else
        set_intersect(Set, RHS.getSet());

      Universal &= RHS.isUniversal();
      return IsUniversal != Universal || Size != Set.size();
    }

    /// Finds A := A u B where A or B could be the "Universal" set which
    /// contains every possible attribute. returns true if changes were made.
    bool getUnion(const SetContents &RHS) {
      bool IsUniversal = Universal;
      unsigned Size = Set.size();

      // A := A u U = U = U u B
      if (!RHS.isUniversal() && !Universal)
        set_union(Set, RHS.getSet());

      Universal |= RHS.isUniversal();
      return IsUniversal != Universal || Size != Set.size();
    }

  private:
    /// Indicates if this set is "universal", containing every possible element.
    bool Universal;

    /// The set of currently active assumptions.
    DenseSet<BaseTy> Set;
  };

  SetState() : Known(false), Assumed(true), IsAtFixedpoint(false) {}

  /// Initializes the known state with an initial set and initializes the
  /// assumed state as universal.
  SetState(const DenseSet<BaseTy> &Known)
      : Known(Known), Assumed(true), IsAtFixedpoint(false) {}

  /// See AbstractState::isValidState()
  bool isValidState() const override { return !Assumed.empty(); }

  /// See AbstractState::isAtFixpoint()
  bool isAtFixpoint() const override { return IsAtFixedpoint; }

  /// See AbstractState::indicateOptimisticFixpoint(...)
  ChangeStatus indicateOptimisticFixpoint() override {
    IsAtFixedpoint = true;
    Known = Assumed;
    return ChangeStatus::UNCHANGED;
  }

  /// See AbstractState::indicatePessimisticFixpoint(...)
  ChangeStatus indicatePessimisticFixpoint() override {
    IsAtFixedpoint = true;
    Assumed = Known;
    return ChangeStatus::CHANGED;
  }

  /// Return the known state encoding.
  const SetContents &getKnown() const { return Known; }

  /// Return the assumed state encoding.
  const SetContents &getAssumed() const { return Assumed; }

  /// Returns if the set state contains the element.
  bool setContains(const BaseTy &Elem) const {
    return Assumed.getSet().contains(Elem) || Known.getSet().contains(Elem);
  }

  /// Performs the set intersection between this set and \p RHS. Returns true if
  /// changes were made.
  bool getIntersection(const SetContents &RHS) {
    bool IsUniversal = Assumed.isUniversal();
    unsigned SizeBefore = Assumed.getSet().size();

    // Get intersection and make sure that the known set is still a proper
    // subset of the assumed set. A := K u (A ^ R).
    Assumed.getIntersection(RHS);
    Assumed.getUnion(Known);

    return SizeBefore != Assumed.getSet().size() ||
           IsUniversal != Assumed.isUniversal();
  }

  /// Performs the set union between this set and \p RHS. Returns true if
  /// changes were made.
  bool getUnion(const SetContents &RHS) { return Assumed.getUnion(RHS); }

private:
  /// The set of values known for this state.
  SetContents Known;

  /// The set of assumed values for this state.
  SetContents Assumed;

  bool IsAtFixedpoint;
};

/// Helper to tie a abstract state implementation to an abstract attribute.
template <typename StateTy, typename BaseType, class... Ts>
struct StateWrapper : public BaseType, public StateTy {
  /// Provide static access to the type of the state.
  using StateType = StateTy;

  StateWrapper(const IRPosition &IRP, Ts... Args)
      : BaseType(IRP), StateTy(Args...) {}

  /// See AbstractAttribute::getState(...).
  StateType &getState() override { return *this; }

  /// See AbstractAttribute::getState(...).
  const StateType &getState() const override { return *this; }
};

/// Helper class that provides common functionality to manifest IR attributes.
template <Attribute::AttrKind AK, typename BaseType, typename AAType>
struct IRAttribute : public BaseType {
  IRAttribute(const IRPosition &IRP) : BaseType(IRP) {}

  /// Most boolean IRAttribute AAs don't do anything non-trivial
  /// in their initializers while non-boolean ones often do. Subclasses can
  /// change this.
  static bool hasTrivialInitializer() { return Attribute::isEnumAttrKind(AK); }

  /// Compile time access to the IR attribute kind.
  static constexpr Attribute::AttrKind IRAttributeKind = AK;

  /// Return true if the IR attribute(s) associated with this AA are implied for
  /// an undef value.
  static bool isImpliedByUndef() { return true; }

  /// Return true if the IR attribute(s) associated with this AA are implied for
  /// an poison value.
  static bool isImpliedByPoison() { return true; }

  static bool isImpliedByIR(Attributor &A, const IRPosition &IRP,
                            Attribute::AttrKind ImpliedAttributeKind = AK,
                            bool IgnoreSubsumingPositions = false) {
    if (AAType::isImpliedByUndef() && isa<UndefValue>(IRP.getAssociatedValue()))
      return true;
    if (AAType::isImpliedByPoison() &&
        isa<PoisonValue>(IRP.getAssociatedValue()))
      return true;
    return A.hasAttr(IRP, {ImpliedAttributeKind}, IgnoreSubsumingPositions,
                     ImpliedAttributeKind);
  }

  /// See AbstractAttribute::manifest(...).
  ChangeStatus manifest(Attributor &A) override {
    if (isa<UndefValue>(this->getIRPosition().getAssociatedValue()))
      return ChangeStatus::UNCHANGED;
    SmallVector<Attribute, 4> DeducedAttrs;
    getDeducedAttributes(A, this->getAnchorValue().getContext(), DeducedAttrs);
    if (DeducedAttrs.empty())
      return ChangeStatus::UNCHANGED;
    return A.manifestAttrs(this->getIRPosition(), DeducedAttrs);
  }

  /// Return the kind that identifies the abstract attribute implementation.
  Attribute::AttrKind getAttrKind() const { return AK; }

  /// Return the deduced attributes in \p Attrs.
  virtual void getDeducedAttributes(Attributor &A, LLVMContext &Ctx,
                                    SmallVectorImpl<Attribute> &Attrs) const {
    Attrs.emplace_back(Attribute::get(Ctx, getAttrKind()));
  }
};

/// Base struct for all "concrete attribute" deductions.
///
/// The abstract attribute is a minimal interface that allows the Attributor to
/// orchestrate the abstract/fixpoint analysis. The design allows to hide away
/// implementation choices made for the subclasses but also to structure their
/// implementation and simplify the use of other abstract attributes in-flight.
///
/// To allow easy creation of new attributes, most methods have default
/// implementations. The ones that do not are generally straight forward, except
/// `AbstractAttribute::updateImpl` which is the location of most reasoning
/// associated with the abstract attribute. The update is invoked by the
/// Attributor in case the situation used to justify the current optimistic
/// state might have changed. The Attributor determines this automatically
/// by monitoring the `Attributor::getAAFor` calls made by abstract attributes.
///
/// The `updateImpl` method should inspect the IR and other abstract attributes
/// in-flight to justify the best possible (=optimistic) state. The actual
/// implementation is, similar to the underlying abstract state encoding, not
/// exposed. In the most common case, the `updateImpl` will go through a list of
/// reasons why its optimistic state is valid given the current information. If
/// any combination of them holds and is sufficient to justify the current
/// optimistic state, the method shall return UNCHAGED. If not, the optimistic
/// state is adjusted to the situation and the method shall return CHANGED.
///
/// If the manifestation of the "concrete attribute" deduced by the subclass
/// differs from the "default" behavior, which is a (set of) LLVM-IR
/// attribute(s) for an argument, call site argument, function return value, or
/// function, the `AbstractAttribute::manifest` method should be overloaded.
///
/// NOTE: If the state obtained via getState() is INVALID, thus if
///       AbstractAttribute::getState().isValidState() returns false, no
///       information provided by the methods of this class should be used.
/// NOTE: The Attributor currently has certain limitations to what we can do.
///       As a general rule of thumb, "concrete" abstract attributes should *for
///       now* only perform "backward" information propagation. That means
///       optimistic information obtained through abstract attributes should
///       only be used at positions that precede the origin of the information
///       with regards to the program flow. More practically, information can
///       *now* be propagated from instructions to their enclosing function, but
///       *not* from call sites to the called function. The mechanisms to allow
///       both directions will be added in the future.
/// NOTE: The mechanics of adding a new "concrete" abstract attribute are
///       described in the file comment.
struct AbstractAttribute : public IRPosition, public AADepGraphNode {
  using StateType = AbstractState;

  AbstractAttribute(const IRPosition &IRP) : IRPosition(IRP) {}

  /// Virtual destructor.
  virtual ~AbstractAttribute() = default;

  /// This function is used to identify if an \p DGN is of type
  /// AbstractAttribute so that the dyn_cast and cast can use such information
  /// to cast an AADepGraphNode to an AbstractAttribute.
  ///
  /// We eagerly return true here because all AADepGraphNodes except for the
  /// Synthethis Node are of type AbstractAttribute
  static bool classof(const AADepGraphNode *DGN) { return true; }

  /// Return false if this AA does anything non-trivial (hence not done by
  /// default) in its initializer.
  static bool hasTrivialInitializer() { return false; }

  /// Return true if this AA requires a "callee" (or an associted function) for
  /// a call site positon. Default is optimistic to minimize AAs.
  static bool requiresCalleeForCallBase() { return true; }

  /// Return true if this AA requires all callees for an argument or function
  /// positon.
  static bool requiresCallersForArgOrFunction() { return false; }

  /// Return false if an AA should not be created for \p IRP.
  static bool isValidIRPositionForInit(Attributor &A, const IRPosition &IRP) {
    return true;
  }

  /// Return false if an AA should not be updated for \p IRP.
  static bool isValidIRPositionForUpdate(Attributor &A, const IRPosition &IRP) {
    Function *AssociatedFn = IRP.getAssociatedFunction();
    bool IsFnInterface = IRP.isFnInterfaceKind();
    assert((!IsFnInterface || AssociatedFn) &&
           "Function interface without a function?");

    // TODO: Not all attributes require an exact definition. Find a way to
    //       enable deduction for some but not all attributes in case the
    //       definition might be changed at runtime, see also
    //       http://lists.llvm.org/pipermail/llvm-dev/2018-February/121275.html.
    // TODO: We could always determine abstract attributes and if sufficient
    //       information was found we could duplicate the functions that do not
    //       have an exact definition.
    return !IsFnInterface || A.isFunctionIPOAmendable(*AssociatedFn);
  }

  /// Initialize the state with the information in the Attributor \p A.
  ///
  /// This function is called by the Attributor once all abstract attributes
  /// have been identified. It can and shall be used for task like:
  ///  - identify existing knowledge in the IR and use it for the "known state"
  ///  - perform any work that is not going to change over time, e.g., determine
  ///    a subset of the IR, or attributes in-flight, that have to be looked at
  ///    in the `updateImpl` method.
  virtual void initialize(Attributor &A) {}

  /// A query AA is always scheduled as long as we do updates because it does
  /// lazy computation that cannot be determined to be done from the outside.
  /// However, while query AAs will not be fixed if they do not have outstanding
  /// dependences, we will only schedule them like other AAs. If a query AA that
  /// received a new query it needs to request an update via
  /// `Attributor::requestUpdateForAA`.
  virtual bool isQueryAA() const { return false; }

  /// Return the internal abstract state for inspection.
  virtual StateType &getState() = 0;
  virtual const StateType &getState() const = 0;

  /// Return an IR position, see struct IRPosition.
  const IRPosition &getIRPosition() const { return *this; };
  IRPosition &getIRPosition() { return *this; };

  /// Helper functions, for debug purposes only.
  ///{
  void print(raw_ostream &OS) const { print(nullptr, OS); }
  void print(Attributor *, raw_ostream &OS) const override;
  virtual void printWithDeps(raw_ostream &OS) const;
  void dump() const { this->print(dbgs()); }

  /// This function should return the "summarized" assumed state as string.
  virtual const std::string getAsStr(Attributor *A) const = 0;

  /// This function should return the name of the AbstractAttribute
  virtual const std::string getName() const = 0;

  /// This function should return the address of the ID of the AbstractAttribute
  virtual const char *getIdAddr() const = 0;
  ///}

  /// Allow the Attributor access to the protected methods.
  friend struct Attributor;

protected:
  /// Hook for the Attributor to trigger an update of the internal state.
  ///
  /// If this attribute is already fixed, this method will return UNCHANGED,
  /// otherwise it delegates to `AbstractAttribute::updateImpl`.
  ///
  /// \Return CHANGED if the internal state changed, otherwise UNCHANGED.
  ChangeStatus update(Attributor &A);

  /// Hook for the Attributor to trigger the manifestation of the information
  /// represented by the abstract attribute in the LLVM-IR.
  ///
  /// \Return CHANGED if the IR was altered, otherwise UNCHANGED.
  virtual ChangeStatus manifest(Attributor &A) {
    return ChangeStatus::UNCHANGED;
  }

  /// Hook to enable custom statistic tracking, called after manifest that
  /// resulted in a change if statistics are enabled.
  ///
  /// We require subclasses to provide an implementation so we remember to
  /// add statistics for them.
  virtual void trackStatistics() const = 0;

  /// The actual update/transfer function which has to be implemented by the
  /// derived classes.
  ///
  /// If it is called, the environment has changed and we have to determine if
  /// the current information is still valid or adjust it otherwise.
  ///
  /// \Return CHANGED if the internal state changed, otherwise UNCHANGED.
  virtual ChangeStatus updateImpl(Attributor &A) = 0;
};

/// Forward declarations of output streams for debug purposes.
///
///{
raw_ostream &operator<<(raw_ostream &OS, const AbstractAttribute &AA);
raw_ostream &operator<<(raw_ostream &OS, ChangeStatus S);
raw_ostream &operator<<(raw_ostream &OS, IRPosition::Kind);
raw_ostream &operator<<(raw_ostream &OS, const IRPosition &);
raw_ostream &operator<<(raw_ostream &OS, const AbstractState &State);
template <typename base_ty, base_ty BestState, base_ty WorstState>
raw_ostream &
operator<<(raw_ostream &OS,
           const IntegerStateBase<base_ty, BestState, WorstState> &S) {
  return OS << "(" << S.getKnown() << "-" << S.getAssumed() << ")"
            << static_cast<const AbstractState &>(S);
}
raw_ostream &operator<<(raw_ostream &OS, const IntegerRangeState &State);
///}

struct AttributorPass : public PassInfoMixin<AttributorPass> {
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
};
struct AttributorCGSCCPass : public PassInfoMixin<AttributorCGSCCPass> {
  PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM,
                        LazyCallGraph &CG, CGSCCUpdateResult &UR);
};

/// Helper function to clamp a state \p S of type \p StateType with the
/// information in \p R and indicate/return if \p S did change (as-in update is
/// required to be run again).
template <typename StateType>
ChangeStatus clampStateAndIndicateChange(StateType &S, const StateType &R) {
  auto Assumed = S.getAssumed();
  S ^= R;
  return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
                                   : ChangeStatus::CHANGED;
}

/// ----------------------------------------------------------------------------
///                       Abstract Attribute Classes
/// ----------------------------------------------------------------------------

struct AANoUnwind
    : public IRAttribute<Attribute::NoUnwind,
                         StateWrapper<BooleanState, AbstractAttribute>,
                         AANoUnwind> {
  AANoUnwind(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {}

  /// Returns true if nounwind is assumed.
  bool isAssumedNoUnwind() const { return getAssumed(); }

  /// Returns true if nounwind is known.
  bool isKnownNoUnwind() const { return getKnown(); }

  /// Create an abstract attribute view for the position \p IRP.
  static AANoUnwind &createForPosition(const IRPosition &IRP, Attributor &A);

  /// See AbstractAttribute::getName()
  const std::string getName() const override { return "AANoUnwind"; }

  /// See AbstractAttribute::getIdAddr()
  const char *getIdAddr() const override { return &ID; }

  /// This function should return true if the type of the \p AA is AANoUnwind
  static bool classof(const AbstractAttribute *AA) {
    return (AA->getIdAddr() == &ID);
  }

  /// Unique ID (due to the unique address)
  static const char ID;
};

struct AANoSync
    : public IRAttribute<Attribute::NoSync,
                         StateWrapper<BooleanState, AbstractAttribute>,
                         AANoSync> {
  AANoSync(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {}

  /// See AbstractAttribute::isValidIRPositionForInit
  static bool isValidIRPositionForInit(Attributor &A, const IRPosition &IRP) {
    if (!IRP.isFunctionScope() &&
        !IRP.getAssociatedType()->isPtrOrPtrVectorTy())
      return false;
    return IRAttribute::isValidIRPositionForInit(A, IRP);
  }

  /// Returns true if "nosync" is assumed.
  bool isAssumedNoSync() const { return getAssumed(); }

  /// Returns true if "nosync" is known.
  bool isKnownNoSync() const { return getKnown(); }

  /// Helper function used to determine whether an instruction is non-relaxed
  /// atomic. In other words, if an atomic instruction does not have unordered
  /// or monotonic ordering
  static bool isNonRelaxedAtomic(const Instruction *I);

  /// Helper function specific for intrinsics which are potentially volatile.
  static bool isNoSyncIntrinsic(const Instruction *I);

  /// Helper function to determine if \p CB is an aligned (GPU) barrier. Aligned
  /// barriers have to be executed by all threads. The flag \p ExecutedAligned
  /// indicates if the call is executed by all threads in a (thread) block in an
  /// aligned way. If that is the case, non-aligned barriers are effectively
  /// aligned barriers.
  static bool isAlignedBarrier(const CallBase &CB, bool ExecutedAligned);

  /// Create an abstract attribute view for the position \p IRP.
  static AANoSync &createForPosition(const IRPosition &IRP, Attributor &A);

  /// See AbstractAttribute::getName()
  const std::string getName() const override { return "AANoSync"; }

  /// See AbstractAttribute::getIdAddr()
  const char *getIdAddr() const override { return &ID; }

  /// This function should return true if the type of the \p AA is AANoSync
  static bool classof(const AbstractAttribute *AA) {
    return (AA->getIdAddr() == &ID);
  }

  /// Unique ID (due to the unique address)
  static const char ID;
};

/// An abstract interface for all nonnull attributes.
struct AAMustProgress
    : public IRAttribute<Attribute::MustProgress,
                         StateWrapper<BooleanState, AbstractAttribute>,
                         AAMustProgress> {
  AAMustProgress(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {}

  static bool isImpliedByIR(Attributor &A, const IRPosition &IRP,
                            Attribute::AttrKind ImpliedAttributeKind,
                            bool IgnoreSubsumingPositions = false) {
    // Note: This is also run for non-IPO amendable functions.
    assert(ImpliedAttributeKind == Attribute::MustProgress);
    return A.hasAttr(IRP, {Attribute::MustProgress, Attribute::WillReturn},
                     IgnoreSubsumingPositions, Attribute::MustProgress);
  }

  /// Return true if we assume that the underlying value is nonnull.
  bool isAssumedMustProgress() const { return getAssumed(); }

  /// Return true if we know that underlying value is nonnull.
  bool isKnownMustProgress() const { return getKnown(); }

  /// Create an abstract attribute view for the position \p IRP.
  static AAMustProgress &createForPosition(const IRPosition &IRP,
                                           Attributor &A);

  /// See AbstractAttribute::getName()
  const std::string getName() const override { return "AAMustProgress"; }

  /// See AbstractAttribute::getIdAddr()
  const char *getIdAddr() const override { return &ID; }

  /// This function should return true if the type of the \p AA is
  /// AAMustProgress
  static bool classof(const AbstractAttribute *AA) {
    return (AA->getIdAddr() == &ID);
  }

  /// Unique ID (due to the unique address)
  static const char ID;
};

/// An abstract interface for all nonnull attributes.
struct AANonNull
    : public IRAttribute<Attribute::NonNull,
                         StateWrapper<BooleanState, AbstractAttribute>,
                         AANonNull> {
  AANonNull(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {}

  /// See AbstractAttribute::hasTrivialInitializer.
  static bool hasTrivialInitializer() { return false; }

  /// See IRAttribute::isImpliedByUndef.
  /// Undef is not necessarily nonnull as nonnull + noundef would cause poison.
  /// Poison implies nonnull though.
  static bool isImpliedByUndef() { return false; }

  /// See AbstractAttribute::isValidIRPositionForInit
  static bool isValidIRPositionForInit(Attributor &A, const IRPosition &IRP) {
    if (!IRP.getAssociatedType()->isPtrOrPtrVectorTy())
      return false;
    return IRAttribute::isValidIRPositionForInit(A, IRP);
  }

  /// See AbstractAttribute::isImpliedByIR(...).
  static bool isImpliedByIR(Attributor &A, const IRPosition &IRP,
                            Attribute::AttrKind ImpliedAttributeKind,
                            bool IgnoreSubsumingPositions = false);

  /// Return true if we assume that the underlying value is nonnull.
  bool isAssumedNonNull() const { return getAssumed(); }

  /// Return true if we know that underlying value is nonnull.
  bool isKnownNonNull() const { return getKnown(); }

  /// Create an abstract attribute view for the position \p IRP.
  static AANonNull &createForPosition(const IRPosition &IRP, Attributor &A);

  /// See AbstractAttribute::getName()
  const std::string getName() const override { return "AANonNull"; }

  /// See AbstractAttribute::getIdAddr()
  const char *getIdAddr() const override { return &ID; }

  /// This function should return true if the type of the \p AA is AANonNull
  static bool classof(const AbstractAttribute *AA) {
    return (AA->getIdAddr() == &ID);
  }

  /// Unique ID (due to the unique address)
  static const char ID;
};

/// An abstract attribute for norecurse.
struct AANoRecurse
    : public IRAttribute<Attribute::NoRecurse,
                         StateWrapper<BooleanState, AbstractAttribute>,
                         AANoRecurse> {
  AANoRecurse(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {}

  /// Return true if "norecurse" is assumed.
  bool isAssumedNoRecurse() const { return getAssumed(); }

  /// Return true if "norecurse" is known.
  bool isKnownNoRecurse() const { return getKnown(); }

  /// Create an abstract attribute view for the position \p IRP.
  static AANoRecurse &createForPosition(const IRPosition &IRP, Attributor &A);

  /// See AbstractAttribute::getName()
  const std::string getName() const override { return "AANoRecurse"; }

  /// See AbstractAttribute::getIdAddr()
  const char *getIdAddr() const override { return &ID; }

  /// This function should return true if the type of the \p AA is AANoRecurse
  static bool classof(const AbstractAttribute *AA) {
    return (AA->getIdAddr() == &ID);
  }

  /// Unique ID (due to the unique address)
  static const char ID;
};

/// An abstract attribute for willreturn.
struct AAWillReturn
    : public IRAttribute<Attribute::WillReturn,
                         StateWrapper<BooleanState, AbstractAttribute>,
                         AAWillReturn> {
  AAWillReturn(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {}

  static bool isImpliedByIR(Attributor &A, const IRPosition &IRP,
                            Attribute::AttrKind ImpliedAttributeKind,
                            bool IgnoreSubsumingPositions = false) {
    // Note: This is also run for non-IPO amendable functions.
    assert(ImpliedAttributeKind == Attribute::WillReturn);
    if (IRAttribute::isImpliedByIR(A, IRP, ImpliedAttributeKind,
                                   IgnoreSubsumingPositions))
      return true;
    if (!isImpliedByMustprogressAndReadonly(A, IRP))
      return false;
    A.manifestAttrs(IRP, Attribute::get(IRP.getAnchorValue().getContext(),
                                        Attribute::WillReturn));
    return true;
  }

  /// Check for `mustprogress` and `readonly` as they imply `willreturn`.
  static bool isImpliedByMustprogressAndReadonly(Attributor &A,
                                                 const IRPosition &IRP) {
    // Check for `mustprogress` in the scope and the associated function which
    // might be different if this is a call site.
    if (!A.hasAttr(IRP, {Attribute::MustProgress}))
      return false;

    SmallVector<Attribute, 2> Attrs;
    A.getAttrs(IRP, {Attribute::Memory}, Attrs,
               /* IgnoreSubsumingPositions */ false);

    MemoryEffects ME = MemoryEffects::unknown();
    for (const Attribute &Attr : Attrs)
      ME &= Attr.getMemoryEffects();
    return ME.onlyReadsMemory();
  }

  /// Return true if "willreturn" is assumed.
  bool isAssumedWillReturn() const { return getAssumed(); }

  /// Return true if "willreturn" is known.
  bool isKnownWillReturn() const { return getKnown(); }

  /// Create an abstract attribute view for the position \p IRP.
  static AAWillReturn &createForPosition(const IRPosition &IRP, Attributor &A);

  /// See AbstractAttribute::getName()
  const std::string getName() const override { return "AAWillReturn"; }

  /// See AbstractAttribute::getIdAddr()
  const char *getIdAddr() const override { return &ID; }

  /// This function should return true if the type of the \p AA is AAWillReturn
  static bool classof(const AbstractAttribute *AA) {
    return (AA->getIdAddr() == &ID);
  }

  /// Unique ID (due to the unique address)
  static const char ID;
};

/// An abstract attribute for undefined behavior.
struct AAUndefinedBehavior
    : public StateWrapper<BooleanState, AbstractAttribute> {
  using Base = StateWrapper<BooleanState, AbstractAttribute>;
  AAUndefinedBehavior(const IRPosition &IRP, Attributor &A) : Base(IRP) {}

  /// Return true if "undefined behavior" is assumed.
  bool isAssumedToCauseUB() const { return getAssumed(); }

  /// Return true if "undefined behavior" is assumed for a specific instruction.
  virtual bool isAssumedToCauseUB(Instruction *I) const = 0;

  /// Return true if "undefined behavior" is known.
  bool isKnownToCauseUB() const { return getKnown(); }

  /// Return true if "undefined behavior" is known for a specific instruction.
  virtual bool isKnownToCauseUB(Instruction *I) const = 0;

  /// Create an abstract attribute view for the position \p IRP.
  static AAUndefinedBehavior &createForPosition(const IRPosition &IRP,
                                                Attributor &A);

  /// See AbstractAttribute::getName()
  const std::string getName() const override { return "AAUndefinedBehavior"; }

  /// See AbstractAttribute::getIdAddr()
  const char *getIdAddr() const override { return &ID; }

  /// This function should return true if the type of the \p AA is
  /// AAUndefineBehavior
  static bool classof(const AbstractAttribute *AA) {
    return (AA->getIdAddr() == &ID);
  }

  /// Unique ID (due to the unique address)
  static const char ID;
};

/// An abstract interface to determine reachability of point A to B.
struct AAIntraFnReachability
    : public StateWrapper<BooleanState, AbstractAttribute> {
  using Base = StateWrapper<BooleanState, AbstractAttribute>;
  AAIntraFnReachability(const IRPosition &IRP, Attributor &A) : Base(IRP) {}

  /// Returns true if 'From' instruction is assumed to reach, 'To' instruction.
  /// Users should provide two positions they are interested in, and the class
  /// determines (and caches) reachability.
  virtual bool isAssumedReachable(
      Attributor &A, const Instruction &From, const Instruction &To,
      const AA::InstExclusionSetTy *ExclusionSet = nullptr) const = 0;

  /// Create an abstract attribute view for the position \p IRP.
  static AAIntraFnReachability &createForPosition(const IRPosition &IRP,
                                                  Attributor &A);

  /// See AbstractAttribute::getName()
  const std::string getName() const override { return "AAIntraFnReachability"; }

  /// See AbstractAttribute::getIdAddr()
  const char *getIdAddr() const override { return &ID; }

  /// This function should return true if the type of the \p AA is
  /// AAIntraFnReachability
  static bool classof(const AbstractAttribute *AA) {
    return (AA->getIdAddr() == &ID);
  }

  /// Unique ID (due to the unique address)
  static const char ID;
};

/// An abstract interface for all noalias attributes.
struct AANoAlias
    : public IRAttribute<Attribute::NoAlias,
                         StateWrapper<BooleanState, AbstractAttribute>,
                         AANoAlias> {
  AANoAlias(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {}

  /// See AbstractAttribute::isValidIRPositionForInit
  static bool isValidIRPositionForInit(Attributor &A, const IRPosition &IRP) {
    if (!IRP.getAssociatedType()->isPtrOrPtrVectorTy())
      return false;
    return IRAttribute::isValidIRPositionForInit(A, IRP);
  }

  /// See IRAttribute::isImpliedByIR
  static bool isImpliedByIR(Attributor &A, const IRPosition &IRP,
                            Attribute::AttrKind ImpliedAttributeKind,
                            bool IgnoreSubsumingPositions = false);

  /// See AbstractAttribute::requiresCalleeForCallBase
  static bool requiresCalleeForCallBase() { return false; }

  /// See AbstractAttribute::requiresCallersForArgOrFunction
  static bool requiresCallersForArgOrFunction() { return true; }

  /// Return true if we assume that the underlying value is alias.
  bool isAssumedNoAlias() const { return getAssumed(); }

  /// Return true if we know that underlying value is noalias.
  bool isKnownNoAlias() const { return getKnown(); }

  /// Create an abstract attribute view for the position \p IRP.
  static AANoAlias &createForPosition(const IRPosition &IRP, Attributor &A);

  /// See AbstractAttribute::getName()
  const std::string getName() const override { return "AANoAlias"; }

  /// See AbstractAttribute::getIdAddr()
  const char *getIdAddr() const override { return &ID; }

  /// This function should return true if the type of the \p AA is AANoAlias
  static bool classof(const AbstractAttribute *AA) {
    return (AA->getIdAddr() == &ID);
  }

  /// Unique ID (due to the unique address)
  static const char ID;
};

/// An AbstractAttribute for nofree.
struct AANoFree
    : public IRAttribute<Attribute::NoFree,
                         StateWrapper<BooleanState, AbstractAttribute>,
                         AANoFree> {
  AANoFree(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {}

  /// See IRAttribute::isImpliedByIR
  static bool isImpliedByIR(Attributor &A, const IRPosition &IRP,
                            Attribute::AttrKind ImpliedAttributeKind,
                            bool IgnoreSubsumingPositions = false) {
    // Note: This is also run for non-IPO amendable functions.
    assert(ImpliedAttributeKind == Attribute::NoFree);
    return A.hasAttr(
        IRP, {Attribute::ReadNone, Attribute::ReadOnly, Attribute::NoFree},
        IgnoreSubsumingPositions, Attribute::NoFree);
  }

  /// See AbstractAttribute::isValidIRPositionForInit
  static bool isValidIRPositionForInit(Attributor &A, const IRPosition &IRP) {
    if (!IRP.isFunctionScope() &&
        !IRP.getAssociatedType()->isPtrOrPtrVectorTy())
      return false;
    return IRAttribute::isValidIRPositionForInit(A, IRP);
  }

  /// Return true if "nofree" is assumed.
  bool isAssumedNoFree() const { return getAssumed(); }

  /// Return true if "nofree" is known.
  bool isKnownNoFree() const { return getKnown(); }

  /// Create an abstract attribute view for the position \p IRP.
  static AANoFree &createForPosition(const IRPosition &IRP, Attributor &A);

  /// See AbstractAttribute::getName()
  const std::string getName() const override { return "AANoFree"; }

  /// See AbstractAttribute::getIdAddr()
  const char *getIdAddr() const override { return &ID; }

  /// This function should return true if the type of the \p AA is AANoFree
  static bool classof(const AbstractAttribute *AA) {
    return (AA->getIdAddr() == &ID);
  }

  /// Unique ID (due to the unique address)
  static const char ID;
};

/// An AbstractAttribute for noreturn.
struct AANoReturn
    : public IRAttribute<Attribute::NoReturn,
                         StateWrapper<BooleanState, AbstractAttribute>,
                         AANoReturn> {
  AANoReturn(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {}

  /// Return true if the underlying object is assumed to never return.
  bool isAssumedNoReturn() const { return getAssumed(); }

  /// Return true if the underlying object is known to never return.
  bool isKnownNoReturn() const { return getKnown(); }

  /// Create an abstract attribute view for the position \p IRP.
  static AANoReturn &createForPosition(const IRPosition &IRP, Attributor &A);

  /// See AbstractAttribute::getName()
  const std::string getName() const override { return "AANoReturn"; }

  /// See AbstractAttribute::getIdAddr()
  const char *getIdAddr() const override { return &ID; }

  /// This function should return true if the type of the \p AA is AANoReturn
  static bool classof(const AbstractAttribute *AA) {
    return (AA->getIdAddr() == &ID);
  }

  /// Unique ID (due to the unique address)
  static const char ID;
};

/// An abstract interface for liveness abstract attribute.
struct AAIsDead
    : public StateWrapper<BitIntegerState<uint8_t, 3, 0>, AbstractAttribute> {
  using Base = StateWrapper<BitIntegerState<uint8_t, 3, 0>, AbstractAttribute>;
  AAIsDead(const IRPosition &IRP, Attributor &A) : Base(IRP) {}

  /// State encoding bits. A set bit in the state means the property holds.
  enum {
    HAS_NO_EFFECT = 1 << 0,
    IS_REMOVABLE = 1 << 1,

    IS_DEAD = HAS_NO_EFFECT | IS_REMOVABLE,
  };
  static_assert(IS_DEAD == getBestState(), "Unexpected BEST_STATE value");

protected:
  /// The query functions are protected such that other attributes need to go
  /// through the Attributor interfaces: `Attributor::isAssumedDead(...)`

  /// Returns true if the underlying value is assumed dead.
  virtual bool isAssumedDead() const = 0;

  /// Returns true if the underlying value is known dead.
  virtual bool isKnownDead() const = 0;

  /// Returns true if \p BB is known dead.
  virtual bool isKnownDead(const BasicBlock *BB) const = 0;

  /// Returns true if \p I is assumed dead.
  virtual bool isAssumedDead(const Instruction *I) const = 0;

  /// Returns true if \p I is known dead.
  virtual bool isKnownDead(const Instruction *I) const = 0;

  /// Return true if the underlying value is a store that is known to be
  /// removable. This is different from dead stores as the removable store
  /// can have an effect on live values, especially loads, but that effect
  /// is propagated which allows us to remove the store in turn.
  virtual bool isRemovableStore() const { return false; }

  /// This method is used to check if at least one instruction in a collection
  /// of instructions is live.
  template <typename T> bool isLiveInstSet(T begin, T end) const {
    for (const auto &I : llvm::make_range(begin, end)) {
      assert(I->getFunction() == getIRPosition().getAssociatedFunction() &&
             "Instruction must be in the same anchor scope function.");

      if (!isAssumedDead(I))
        return true;
    }

    return false;
  }

public:
  /// Create an abstract attribute view for the position \p IRP.
  static AAIsDead &createForPosition(const IRPosition &IRP, Attributor &A);

  /// Determine if \p F might catch asynchronous exceptions.
  static bool mayCatchAsynchronousExceptions(const Function &F) {
    return F.hasPersonalityFn() && !canSimplifyInvokeNoUnwind(&F);
  }

  /// Returns true if \p BB is assumed dead.
  virtual bool isAssumedDead(const BasicBlock *BB) const = 0;

  /// Return if the edge from \p From BB to \p To BB is assumed dead.
  /// This is specifically useful in AAReachability.
  virtual bool isEdgeDead(const BasicBlock *From, const BasicBlock *To) const {
    return false;
  }

  /// See AbstractAttribute::getName()
  const std::string getName() const override { return "AAIsDead"; }

  /// See AbstractAttribute::getIdAddr()
  const char *getIdAddr() const override { return &ID; }

  /// This function should return true if the type of the \p AA is AAIsDead
  static bool classof(const AbstractAttribute *AA) {
    return (AA->getIdAddr() == &ID);
  }

  /// Unique ID (due to the unique address)
  static const char ID;

  friend struct Attributor;
};

/// State for dereferenceable attribute
struct DerefState : AbstractState {

  static DerefState getBestState() { return DerefState(); }
  static DerefState getBestState(const DerefState &) { return getBestState(); }

  /// Return the worst possible representable state.
  static DerefState getWorstState() {
    DerefState DS;
    DS.indicatePessimisticFixpoint();
    return DS;
  }
  static DerefState getWorstState(const DerefState &) {
    return getWorstState();
  }

  /// State representing for dereferenceable bytes.
  IncIntegerState<> DerefBytesState;

  /// Map representing for accessed memory offsets and sizes.
  /// A key is Offset and a value is size.
  /// If there is a load/store instruction something like,
  ///   p[offset] = v;
  /// (offset, sizeof(v)) will be inserted to this map.
  /// std::map is used because we want to iterate keys in ascending order.
  std::map<int64_t, uint64_t> AccessedBytesMap;

  /// Helper function to calculate dereferenceable bytes from current known
  /// bytes and accessed bytes.
  ///
  /// int f(int *A){
  ///    *A = 0;
  ///    *(A+2) = 2;
  ///    *(A+1) = 1;
  ///    *(A+10) = 10;
  /// }
  /// ```
  /// In that case, AccessedBytesMap is `{0:4, 4:4, 8:4, 40:4}`.
  /// AccessedBytesMap is std::map so it is iterated in accending order on
  /// key(Offset). So KnownBytes will be updated like this:
  ///
  /// |Access | KnownBytes
  /// |(0, 4)| 0 -> 4
  /// |(4, 4)| 4 -> 8
  /// |(8, 4)| 8 -> 12
  /// |(40, 4) | 12 (break)
  void computeKnownDerefBytesFromAccessedMap() {
    int64_t KnownBytes = DerefBytesState.getKnown();
    for (auto &Access : AccessedBytesMap) {
      if (KnownBytes < Access.first)
        break;
      KnownBytes = std::max(KnownBytes, Access.first + (int64_t)Access.second);
    }

    DerefBytesState.takeKnownMaximum(KnownBytes);
  }

  /// State representing that whether the value is globaly dereferenceable.
  BooleanState GlobalState;

  /// See AbstractState::isValidState()
  bool isValidState() const override { return DerefBytesState.isValidState(); }

  /// See AbstractState::isAtFixpoint()
  bool isAtFixpoint() const override {
    return !isValidState() ||
           (DerefBytesState.isAtFixpoint() && GlobalState.isAtFixpoint());
  }

  /// See AbstractState::indicateOptimisticFixpoint(...)
  ChangeStatus indicateOptimisticFixpoint() override {
    DerefBytesState.indicateOptimisticFixpoint();
    GlobalState.indicateOptimisticFixpoint();
    return ChangeStatus::UNCHANGED;
  }

  /// See AbstractState::indicatePessimisticFixpoint(...)
  ChangeStatus indicatePessimisticFixpoint() override {
    DerefBytesState.indicatePessimisticFixpoint();
    GlobalState.indicatePessimisticFixpoint();
    return ChangeStatus::CHANGED;
  }

  /// Update known dereferenceable bytes.
  void takeKnownDerefBytesMaximum(uint64_t Bytes) {
    DerefBytesState.takeKnownMaximum(Bytes);

    // Known bytes might increase.
    computeKnownDerefBytesFromAccessedMap();
  }

  /// Update assumed dereferenceable bytes.
  void takeAssumedDerefBytesMinimum(uint64_t Bytes) {
    DerefBytesState.takeAssumedMinimum(Bytes);
  }

  /// Add accessed bytes to the map.
  void addAccessedBytes(int64_t Offset, uint64_t Size) {
    uint64_t &AccessedBytes = AccessedBytesMap[Offset];
    AccessedBytes = std::max(AccessedBytes, Size);

    // Known bytes might increase.
    computeKnownDerefBytesFromAccessedMap();
  }

  /// Equality for DerefState.
  bool operator==(const DerefState &R) const {
    return this->DerefBytesState == R.DerefBytesState &&
           this->GlobalState == R.GlobalState;
  }

  /// Inequality for DerefState.
  bool operator!=(const DerefState &R) const { return !(*this == R); }

  /// See IntegerStateBase::operator^=
  DerefState operator^=(const DerefState &R) {
    DerefBytesState ^= R.DerefBytesState;
    GlobalState ^= R.GlobalState;
    return *this;
  }

  /// See IntegerStateBase::operator+=
  DerefState operator+=(const DerefState &R) {
    DerefBytesState += R.DerefBytesState;
    GlobalState += R.GlobalState;
    return *this;
  }

  /// See IntegerStateBase::operator&=
  DerefState operator&=(const DerefState &R) {
    DerefBytesState &= R.DerefBytesState;
    GlobalState &= R.GlobalState;
    return *this;
  }

  /// See IntegerStateBase::operator|=
  DerefState operator|=(const DerefState &R) {
    DerefBytesState |= R.DerefBytesState;
    GlobalState |= R.GlobalState;
    return *this;
  }
};

/// An abstract interface for all dereferenceable attribute.
struct AADereferenceable
    : public IRAttribute<Attribute::Dereferenceable,
                         StateWrapper<DerefState, AbstractAttribute>,
                         AADereferenceable> {
  AADereferenceable(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {}

  /// See AbstractAttribute::isValidIRPositionForInit
  static bool isValidIRPositionForInit(Attributor &A, const IRPosition &IRP) {
    if (!IRP.getAssociatedType()->isPtrOrPtrVectorTy())
      return false;
    return IRAttribute::isValidIRPositionForInit(A, IRP);
  }

  /// Return true if we assume that underlying value is
  /// dereferenceable(_or_null) globally.
  bool isAssumedGlobal() const { return GlobalState.getAssumed(); }

  /// Return true if we know that underlying value is
  /// dereferenceable(_or_null) globally.
  bool isKnownGlobal() const { return GlobalState.getKnown(); }

  /// Return assumed dereferenceable bytes.
  uint32_t getAssumedDereferenceableBytes() const {
    return DerefBytesState.getAssumed();
  }

  /// Return known dereferenceable bytes.
  uint32_t getKnownDereferenceableBytes() const {
    return DerefBytesState.getKnown();
  }

  /// Create an abstract attribute view for the position \p IRP.
  static AADereferenceable &createForPosition(const IRPosition &IRP,
                                              Attributor &A);

  /// See AbstractAttribute::getName()
  const std::string getName() const override { return "AADereferenceable"; }

  /// See AbstractAttribute::getIdAddr()
  const char *getIdAddr() const override { return &ID; }

  /// This function should return true if the type of the \p AA is
  /// AADereferenceable
  static bool classof(const AbstractAttribute *AA) {
    return (AA->getIdAddr() == &ID);
  }

  /// Unique ID (due to the unique address)
  static const char ID;
};

using AAAlignmentStateType =
    IncIntegerState<uint64_t, Value::MaximumAlignment, 1>;
/// An abstract interface for all align attributes.
struct AAAlign
    : public IRAttribute<Attribute::Alignment,
                         StateWrapper<AAAlignmentStateType, AbstractAttribute>,
                         AAAlign> {
  AAAlign(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {}

  /// See AbstractAttribute::isValidIRPositionForInit
  static bool isValidIRPositionForInit(Attributor &A, const IRPosition &IRP) {
    if (!IRP.getAssociatedType()->isPtrOrPtrVectorTy())
      return false;
    return IRAttribute::isValidIRPositionForInit(A, IRP);
  }

  /// Return assumed alignment.
  Align getAssumedAlign() const { return Align(getAssumed()); }

  /// Return known alignment.
  Align getKnownAlign() const { return Align(getKnown()); }

  /// See AbstractAttribute::getName()
  const std::string getName() const override { return "AAAlign"; }

  /// See AbstractAttribute::getIdAddr()
  const char *getIdAddr() const override { return &ID; }

  /// This function should return true if the type of the \p AA is AAAlign
  static bool classof(const AbstractAttribute *AA) {
    return (AA->getIdAddr() == &ID);
  }

  /// Create an abstract attribute view for the position \p IRP.
  static AAAlign &createForPosition(const IRPosition &IRP, Attributor &A);

  /// Unique ID (due to the unique address)
  static const char ID;
};

/// An abstract interface to track if a value leaves it's defining function
/// instance.
/// TODO: We should make it a ternary AA tracking uniqueness, and uniqueness
/// wrt. the Attributor analysis separately.
struct AAInstanceInfo : public StateWrapper<BooleanState, AbstractAttribute> {
  AAInstanceInfo(const IRPosition &IRP, Attributor &A)
      : StateWrapper<BooleanState, AbstractAttribute>(IRP) {}

  /// Return true if we know that the underlying value is unique in its scope
  /// wrt. the Attributor analysis. That means it might not be unique but we can
  /// still use pointer equality without risking to represent two instances with
  /// one `llvm::Value`.
  bool isKnownUniqueForAnalysis() const { return isKnown(); }

  /// Return true if we assume that the underlying value is unique in its scope
  /// wrt. the Attributor analysis. That means it might not be unique but we can
  /// still use pointer equality without risking to represent two instances with
  /// one `llvm::Value`.
  bool isAssumedUniqueForAnalysis() const { return isAssumed(); }

  /// Create an abstract attribute view for the position \p IRP.
  static AAInstanceInfo &createForPosition(const IRPosition &IRP,
                                           Attributor &A);

  /// See AbstractAttribute::getName()
  const std::string getName() const override { return "AAInstanceInfo"; }

  /// See AbstractAttribute::getIdAddr()
  const char *getIdAddr() const override { return &ID; }

  /// This function should return true if the type of the \p AA is
  /// AAInstanceInfo
  static bool classof(const AbstractAttribute *AA) {
    return (AA->getIdAddr() == &ID);
  }

  /// Unique ID (due to the unique address)
  static const char ID;
};

/// An abstract interface for all nocapture attributes.
struct AANoCapture
    : public IRAttribute<
          Attribute::NoCapture,
          StateWrapper<BitIntegerState<uint16_t, 7, 0>, AbstractAttribute>,
          AANoCapture> {
  AANoCapture(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {}

  /// See IRAttribute::isImpliedByIR
  static bool isImpliedByIR(Attributor &A, const IRPosition &IRP,
                            Attribute::AttrKind ImpliedAttributeKind,
                            bool IgnoreSubsumingPositions = false);

  /// Update \p State according to the capture capabilities of \p F for position
  /// \p IRP.
  static void determineFunctionCaptureCapabilities(const IRPosition &IRP,
                                                   const Function &F,
                                                   BitIntegerState &State);

  /// See AbstractAttribute::isValidIRPositionForInit
  static bool isValidIRPositionForInit(Attributor &A, const IRPosition &IRP) {
    if (!IRP.getAssociatedType()->isPtrOrPtrVectorTy())
      return false;
    return IRAttribute::isValidIRPositionForInit(A, IRP);
  }

  /// State encoding bits. A set bit in the state means the property holds.
  /// NO_CAPTURE is the best possible state, 0 the worst possible state.
  enum {
    NOT_CAPTURED_IN_MEM = 1 << 0,
    NOT_CAPTURED_IN_INT = 1 << 1,
    NOT_CAPTURED_IN_RET = 1 << 2,

    /// If we do not capture the value in memory or through integers we can only
    /// communicate it back as a derived pointer.
    NO_CAPTURE_MAYBE_RETURNED = NOT_CAPTURED_IN_MEM | NOT_CAPTURED_IN_INT,

    /// If we do not capture the value in memory, through integers, or as a
    /// derived pointer we know it is not captured.
    NO_CAPTURE =
        NOT_CAPTURED_IN_MEM | NOT_CAPTURED_IN_INT | NOT_CAPTURED_IN_RET,
  };

  /// Return true if we know that the underlying value is not captured in its
  /// respective scope.
  bool isKnownNoCapture() const { return isKnown(NO_CAPTURE); }

  /// Return true if we assume that the underlying value is not captured in its
  /// respective scope.
  bool isAssumedNoCapture() const { return isAssumed(NO_CAPTURE); }

  /// Return true if we know that the underlying value is not captured in its
  /// respective scope but we allow it to escape through a "return".
  bool isKnownNoCaptureMaybeReturned() const {
    return isKnown(NO_CAPTURE_MAYBE_RETURNED);
  }

  /// Return true if we assume that the underlying value is not captured in its
  /// respective scope but we allow it to escape through a "return".
  bool isAssumedNoCaptureMaybeReturned() const {
    return isAssumed(NO_CAPTURE_MAYBE_RETURNED);
  }

  /// Create an abstract attribute view for the position \p IRP.
  static AANoCapture &createForPosition(const IRPosition &IRP, Attributor &A);

  /// See AbstractAttribute::getName()
  const std::string getName() const override { return "AANoCapture"; }

  /// See AbstractAttribute::getIdAddr()
  const char *getIdAddr() const override { return &ID; }

  /// This function should return true if the type of the \p AA is AANoCapture
  static bool classof(const AbstractAttribute *AA) {
    return (AA->getIdAddr() == &ID);
  }

  /// Unique ID (due to the unique address)
  static const char ID;
};

struct ValueSimplifyStateType : public AbstractState {

  ValueSimplifyStateType(Type *Ty) : Ty(Ty) {}

  static ValueSimplifyStateType getBestState(Type *Ty) {
    return ValueSimplifyStateType(Ty);
  }
  static ValueSimplifyStateType getBestState(const ValueSimplifyStateType &VS) {
    return getBestState(VS.Ty);
  }

  /// Return the worst possible representable state.
  static ValueSimplifyStateType getWorstState(Type *Ty) {
    ValueSimplifyStateType DS(Ty);
    DS.indicatePessimisticFixpoint();
    return DS;
  }
  static ValueSimplifyStateType
  getWorstState(const ValueSimplifyStateType &VS) {
    return getWorstState(VS.Ty);
  }

  /// See AbstractState::isValidState(...)
  bool isValidState() const override { return BS.isValidState(); }

  /// See AbstractState::isAtFixpoint(...)
  bool isAtFixpoint() const override { return BS.isAtFixpoint(); }

  /// Return the assumed state encoding.
  ValueSimplifyStateType getAssumed() { return *this; }
  const ValueSimplifyStateType &getAssumed() const { return *this; }

  /// See AbstractState::indicatePessimisticFixpoint(...)
  ChangeStatus indicatePessimisticFixpoint() override {
    return BS.indicatePessimisticFixpoint();
  }

  /// See AbstractState::indicateOptimisticFixpoint(...)
  ChangeStatus indicateOptimisticFixpoint() override {
    return BS.indicateOptimisticFixpoint();
  }

  /// "Clamp" this state with \p PVS.
  ValueSimplifyStateType operator^=(const ValueSimplifyStateType &VS) {
    BS ^= VS.BS;
    unionAssumed(VS.SimplifiedAssociatedValue);
    return *this;
  }

  bool operator==(const ValueSimplifyStateType &RHS) const {
    if (isValidState() != RHS.isValidState())
      return false;
    if (!isValidState() && !RHS.isValidState())
      return true;
    return SimplifiedAssociatedValue == RHS.SimplifiedAssociatedValue;
  }

protected:
  /// The type of the original value.
  Type *Ty;

  /// Merge \p Other into the currently assumed simplified value
  bool unionAssumed(std::optional<Value *> Other);

  /// Helper to track validity and fixpoint
  BooleanState BS;

  /// An assumed simplified value. Initially, it is set to std::nullopt, which
  /// means that the value is not clear under current assumption. If in the
  /// pessimistic state, getAssumedSimplifiedValue doesn't return this value but
  /// returns orignal associated value.
  std::optional<Value *> SimplifiedAssociatedValue;
};

/// An abstract interface for value simplify abstract attribute.
struct AAValueSimplify
    : public StateWrapper<ValueSimplifyStateType, AbstractAttribute, Type *> {
  using Base = StateWrapper<ValueSimplifyStateType, AbstractAttribute, Type *>;
  AAValueSimplify(const IRPosition &IRP, Attributor &A)
      : Base(IRP, IRP.getAssociatedType()) {}

  /// Create an abstract attribute view for the position \p IRP.
  static AAValueSimplify &createForPosition(const IRPosition &IRP,
                                            Attributor &A);

  /// See AbstractAttribute::getName()
  const std::string getName() const override { return "AAValueSimplify"; }

  /// See AbstractAttribute::getIdAddr()
  const char *getIdAddr() const override { return &ID; }

  /// This function should return true if the type of the \p AA is
  /// AAValueSimplify
  static bool classof(const AbstractAttribute *AA) {
    return (AA->getIdAddr() == &ID);
  }

  /// Unique ID (due to the unique address)
  static const char ID;

private:
  /// Return an assumed simplified value if a single candidate is found. If
  /// there cannot be one, return original value. If it is not clear yet, return
  /// std::nullopt.
  ///
  /// Use `Attributor::getAssumedSimplified` for value simplification.
  virtual std::optional<Value *>
  getAssumedSimplifiedValue(Attributor &A) const = 0;

  friend struct Attributor;
};

struct AAHeapToStack : public StateWrapper<BooleanState, AbstractAttribute> {
  using Base = StateWrapper<BooleanState, AbstractAttribute>;
  AAHeapToStack(const IRPosition &IRP, Attributor &A) : Base(IRP) {}

  /// Returns true if HeapToStack conversion is assumed to be possible.
  virtual bool isAssumedHeapToStack(const CallBase &CB) const = 0;

  /// Returns true if HeapToStack conversion is assumed and the CB is a
  /// callsite to a free operation to be removed.
  virtual bool isAssumedHeapToStackRemovedFree(CallBase &CB) const = 0;

  /// Create an abstract attribute view for the position \p IRP.
  static AAHeapToStack &createForPosition(const IRPosition &IRP, Attributor &A);

  /// See AbstractAttribute::getName()
  const std::string getName() const override { return "AAHeapToStack"; }

  /// See AbstractAttribute::getIdAddr()
  const char *getIdAddr() const override { return &ID; }

  /// This function should return true if the type of the \p AA is AAHeapToStack
  static bool classof(const AbstractAttribute *AA) {
    return (AA->getIdAddr() == &ID);
  }

  /// Unique ID (due to the unique address)
  static const char ID;
};

/// An abstract interface for privatizability.
///
/// A pointer is privatizable if it can be replaced by a new, private one.
/// Privatizing pointer reduces the use count, interaction between unrelated
/// code parts.
///
/// In order for a pointer to be privatizable its value cannot be observed
/// (=nocapture), it is (for now) not written (=readonly & noalias), we know
/// what values are necessary to make the private copy look like the original
/// one, and the values we need can be loaded (=dereferenceable).
struct AAPrivatizablePtr
    : public StateWrapper<BooleanState, AbstractAttribute> {
  using Base = StateWrapper<BooleanState, AbstractAttribute>;
  AAPrivatizablePtr(const IRPosition &IRP, Attributor &A) : Base(IRP) {}

  /// See AbstractAttribute::isValidIRPositionForInit
  static bool isValidIRPositionForInit(Attributor &A, const IRPosition &IRP) {
    if (!IRP.getAssociatedType()->isPtrOrPtrVectorTy())
      return false;
    return AbstractAttribute::isValidIRPositionForInit(A, IRP);
  }

  /// Returns true if pointer privatization is assumed to be possible.
  bool isAssumedPrivatizablePtr() const { return getAssumed(); }

  /// Returns true if pointer privatization is known to be possible.
  bool isKnownPrivatizablePtr() const { return getKnown(); }

  /// See AbstractAttribute::requiresCallersForArgOrFunction
  static bool requiresCallersForArgOrFunction() { return true; }

  /// Return the type we can choose for a private copy of the underlying
  /// value. std::nullopt means it is not clear yet, nullptr means there is
  /// none.
  virtual std::optional<Type *> getPrivatizableType() const = 0;

  /// Create an abstract attribute view for the position \p IRP.
  static AAPrivatizablePtr &createForPosition(const IRPosition &IRP,
                                              Attributor &A);

  /// See AbstractAttribute::getName()
  const std::string getName() const override { return "AAPrivatizablePtr"; }

  /// See AbstractAttribute::getIdAddr()
  const char *getIdAddr() const override { return &ID; }

  /// This function should return true if the type of the \p AA is
  /// AAPricatizablePtr
  static bool classof(const AbstractAttribute *AA) {
    return (AA->getIdAddr() == &ID);
  }

  /// Unique ID (due to the unique address)
  static const char ID;
};

/// An abstract interface for memory access kind related attributes
/// (readnone/readonly/writeonly).
struct AAMemoryBehavior
    : public IRAttribute<
          Attribute::ReadNone,
          StateWrapper<BitIntegerState<uint8_t, 3>, AbstractAttribute>,
          AAMemoryBehavior> {
  AAMemoryBehavior(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {}

  /// See AbstractAttribute::hasTrivialInitializer.
  static bool hasTrivialInitializer() { return false; }

  /// See AbstractAttribute::isValidIRPositionForInit
  static bool isValidIRPositionForInit(Attributor &A, const IRPosition &IRP) {
    if (!IRP.isFunctionScope() &&
        !IRP.getAssociatedType()->isPtrOrPtrVectorTy())
      return false;
    return IRAttribute::isValidIRPositionForInit(A, IRP);
  }

  /// State encoding bits. A set bit in the state means the property holds.
  /// BEST_STATE is the best possible state, 0 the worst possible state.
  enum {
    NO_READS = 1 << 0,
    NO_WRITES = 1 << 1,
    NO_ACCESSES = NO_READS | NO_WRITES,

    BEST_STATE = NO_ACCESSES,
  };
  static_assert(BEST_STATE == getBestState(), "Unexpected BEST_STATE value");

  /// Return true if we know that the underlying value is not read or accessed
  /// in its respective scope.
  bool isKnownReadNone() const { return isKnown(NO_ACCESSES); }

  /// Return true if we assume that the underlying value is not read or accessed
  /// in its respective scope.
  bool isAssumedReadNone() const { return isAssumed(NO_ACCESSES); }

  /// Return true if we know that the underlying value is not accessed
  /// (=written) in its respective scope.
  bool isKnownReadOnly() const { return isKnown(NO_WRITES); }

  /// Return true if we assume that the underlying value is not accessed
  /// (=written) in its respective scope.
  bool isAssumedReadOnly() const { return isAssumed(NO_WRITES); }

  /// Return true if we know that the underlying value is not read in its
  /// respective scope.
  bool isKnownWriteOnly() const { return isKnown(NO_READS); }

  /// Return true if we assume that the underlying value is not read in its
  /// respective scope.
  bool isAssumedWriteOnly() const { return isAssumed(NO_READS); }

  /// Create an abstract attribute view for the position \p IRP.
  static AAMemoryBehavior &createForPosition(const IRPosition &IRP,
                                             Attributor &A);

  /// See AbstractAttribute::getName()
  const std::string getName() const override { return "AAMemoryBehavior"; }

  /// See AbstractAttribute::getIdAddr()
  const char *getIdAddr() const override { return &ID; }

  /// This function should return true if the type of the \p AA is
  /// AAMemoryBehavior
  static bool classof(const AbstractAttribute *AA) {
    return (AA->getIdAddr() == &ID);
  }

  /// Unique ID (due to the unique address)
  static const char ID;
};

/// An abstract interface for all memory location attributes
/// (readnone/argmemonly/inaccessiblememonly/inaccessibleorargmemonly).
struct AAMemoryLocation
    : public IRAttribute<
          Attribute::ReadNone,
          StateWrapper<BitIntegerState<uint32_t, 511>, AbstractAttribute>,
          AAMemoryLocation> {
  using MemoryLocationsKind = StateType::base_t;

  AAMemoryLocation(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {}

  /// See AbstractAttribute::hasTrivialInitializer.
  static bool hasTrivialInitializer() { return false; }

  /// See AbstractAttribute::isValidIRPositionForInit
  static bool isValidIRPositionForInit(Attributor &A, const IRPosition &IRP) {
    if (!IRP.isFunctionScope() &&
        !IRP.getAssociatedType()->isPtrOrPtrVectorTy())
      return false;
    return IRAttribute::isValidIRPositionForInit(A, IRP);
  }

  /// Encoding of different locations that could be accessed by a memory
  /// access.
  enum {
    ALL_LOCATIONS = 0,
    NO_LOCAL_MEM = 1 << 0,
    NO_CONST_MEM = 1 << 1,
    NO_GLOBAL_INTERNAL_MEM = 1 << 2,
    NO_GLOBAL_EXTERNAL_MEM = 1 << 3,
    NO_GLOBAL_MEM = NO_GLOBAL_INTERNAL_MEM | NO_GLOBAL_EXTERNAL_MEM,
    NO_ARGUMENT_MEM = 1 << 4,
    NO_INACCESSIBLE_MEM = 1 << 5,
    NO_MALLOCED_MEM = 1 << 6,
    NO_UNKOWN_MEM = 1 << 7,
    NO_LOCATIONS = NO_LOCAL_MEM | NO_CONST_MEM | NO_GLOBAL_INTERNAL_MEM |
                   NO_GLOBAL_EXTERNAL_MEM | NO_ARGUMENT_MEM |
                   NO_INACCESSIBLE_MEM | NO_MALLOCED_MEM | NO_UNKOWN_MEM,

    // Helper bit to track if we gave up or not.
    VALID_STATE = NO_LOCATIONS + 1,

    BEST_STATE = NO_LOCATIONS | VALID_STATE,
  };
  static_assert(BEST_STATE == getBestState(), "Unexpected BEST_STATE value");

  /// Return true if we know that the associated functions has no observable
  /// accesses.
  bool isKnownReadNone() const { return isKnown(NO_LOCATIONS); }

  /// Return true if we assume that the associated functions has no observable
  /// accesses.
  bool isAssumedReadNone() const {
    return isAssumed(NO_LOCATIONS) || isAssumedStackOnly();
  }

  /// Return true if we know that the associated functions has at most
  /// local/stack accesses.
  bool isKnowStackOnly() const {
    return isKnown(inverseLocation(NO_LOCAL_MEM, true, true));
  }

  /// Return true if we assume that the associated functions has at most
  /// local/stack accesses.
  bool isAssumedStackOnly() const {
    return isAssumed(inverseLocation(NO_LOCAL_MEM, true, true));
  }

  /// Return true if we know that the underlying value will only access
  /// inaccesible memory only (see Attribute::InaccessibleMemOnly).
  bool isKnownInaccessibleMemOnly() const {
    return isKnown(inverseLocation(NO_INACCESSIBLE_MEM, true, true));
  }

  /// Return true if we assume that the underlying value will only access
  /// inaccesible memory only (see Attribute::InaccessibleMemOnly).
  bool isAssumedInaccessibleMemOnly() const {
    return isAssumed(inverseLocation(NO_INACCESSIBLE_MEM, true, true));
  }

  /// Return true if we know that the underlying value will only access
  /// argument pointees (see Attribute::ArgMemOnly).
  bool isKnownArgMemOnly() const {
    return isKnown(inverseLocation(NO_ARGUMENT_MEM, true, true));
  }

  /// Return true if we assume that the underlying value will only access
  /// argument pointees (see Attribute::ArgMemOnly).
  bool isAssumedArgMemOnly() const {
    return isAssumed(inverseLocation(NO_ARGUMENT_MEM, true, true));
  }

  /// Return true if we know that the underlying value will only access
  /// inaccesible memory or argument pointees (see
  /// Attribute::InaccessibleOrArgMemOnly).
  bool isKnownInaccessibleOrArgMemOnly() const {
    return isKnown(
        inverseLocation(NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true));
  }

  /// Return true if we assume that the underlying value will only access
  /// inaccesible memory or argument pointees (see
  /// Attribute::InaccessibleOrArgMemOnly).
  bool isAssumedInaccessibleOrArgMemOnly() const {
    return isAssumed(
        inverseLocation(NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true));
  }

  /// Return true if the underlying value may access memory through arguement
  /// pointers of the associated function, if any.
  bool mayAccessArgMem() const { return !isAssumed(NO_ARGUMENT_MEM); }

  /// Return true if only the memory locations specififed by \p MLK are assumed
  /// to be accessed by the associated function.
  bool isAssumedSpecifiedMemOnly(MemoryLocationsKind MLK) const {
    return isAssumed(MLK);
  }

  /// Return the locations that are assumed to be not accessed by the associated
  /// function, if any.
  MemoryLocationsKind getAssumedNotAccessedLocation() const {
    return getAssumed();
  }

  /// Return the inverse of location \p Loc, thus for NO_XXX the return
  /// describes ONLY_XXX. The flags \p AndLocalMem and \p AndConstMem determine
  /// if local (=stack) and constant memory are allowed as well. Most of the
  /// time we do want them to be included, e.g., argmemonly allows accesses via
  /// argument pointers or local or constant memory accesses.
  static MemoryLocationsKind
  inverseLocation(MemoryLocationsKind Loc, bool AndLocalMem, bool AndConstMem) {
    return NO_LOCATIONS & ~(Loc | (AndLocalMem ? NO_LOCAL_MEM : 0) |
                            (AndConstMem ? NO_CONST_MEM : 0));
  };

  /// Return the locations encoded by \p MLK as a readable string.
  static std::string getMemoryLocationsAsStr(MemoryLocationsKind MLK);

  /// Simple enum to distinguish read/write/read-write accesses.
  enum AccessKind {
    NONE = 0,
    READ = 1 << 0,
    WRITE = 1 << 1,
    READ_WRITE = READ | WRITE,
  };

  /// Check \p Pred on all accesses to the memory kinds specified by \p MLK.
  ///
  /// This method will evaluate \p Pred on all accesses (access instruction +
  /// underlying accessed memory pointer) and it will return true if \p Pred
  /// holds every time.
  virtual bool checkForAllAccessesToMemoryKind(
      function_ref<bool(const Instruction *, const Value *, AccessKind,
                        MemoryLocationsKind)>
          Pred,
      MemoryLocationsKind MLK) const = 0;

  /// Create an abstract attribute view for the position \p IRP.
  static AAMemoryLocation &createForPosition(const IRPosition &IRP,
                                             Attributor &A);

  /// See AbstractState::getAsStr(Attributor).
  const std::string getAsStr(Attributor *A) const override {
    return getMemoryLocationsAsStr(getAssumedNotAccessedLocation());
  }

  /// See AbstractAttribute::getName()
  const std::string getName() const override { return "AAMemoryLocation"; }

  /// See AbstractAttribute::getIdAddr()
  const char *getIdAddr() const override { return &ID; }

  /// This function should return true if the type of the \p AA is
  /// AAMemoryLocation
  static bool classof(const AbstractAttribute *AA) {
    return (AA->getIdAddr() == &ID);
  }

  /// Unique ID (due to the unique address)
  static const char ID;
};

/// An abstract interface for range value analysis.
struct AAValueConstantRange
    : public StateWrapper<IntegerRangeState, AbstractAttribute, uint32_t> {
  using Base = StateWrapper<IntegerRangeState, AbstractAttribute, uint32_t>;
  AAValueConstantRange(const IRPosition &IRP, Attributor &A)
      : Base(IRP, IRP.getAssociatedType()->getIntegerBitWidth()) {}

  /// See AbstractAttribute::isValidIRPositionForInit
  static bool isValidIRPositionForInit(Attributor &A, const IRPosition &IRP) {
    if (!IRP.getAssociatedType()->isIntegerTy())
      return false;
    return AbstractAttribute::isValidIRPositionForInit(A, IRP);
  }

  /// See AbstractAttribute::requiresCallersForArgOrFunction
  static bool requiresCallersForArgOrFunction() { return true; }

  /// See AbstractAttribute::getState(...).
  IntegerRangeState &getState() override { return *this; }
  const IntegerRangeState &getState() const override { return *this; }

  /// Create an abstract attribute view for the position \p IRP.
  static AAValueConstantRange &createForPosition(const IRPosition &IRP,
                                                 Attributor &A);

  /// Return an assumed range for the associated value a program point \p CtxI.
  /// If \p I is nullptr, simply return an assumed range.
  virtual ConstantRange
  getAssumedConstantRange(Attributor &A,
                          const Instruction *CtxI = nullptr) const = 0;

  /// Return a known range for the associated value at a program point \p CtxI.
  /// If \p I is nullptr, simply return a known range.
  virtual ConstantRange
  getKnownConstantRange(Attributor &A,
                        const Instruction *CtxI = nullptr) const = 0;

  /// Return an assumed constant for the associated value a program point \p
  /// CtxI.
  std::optional<Constant *>
  getAssumedConstant(Attributor &A, const Instruction *CtxI = nullptr) const {
    ConstantRange RangeV = getAssumedConstantRange(A, CtxI);
    if (auto *C = RangeV.getSingleElement()) {
      Type *Ty = getAssociatedValue().getType();
      return cast_or_null<Constant>(
          AA::getWithType(*ConstantInt::get(Ty->getContext(), *C), *Ty));
    }
    if (RangeV.isEmptySet())
      return std::nullopt;
    return nullptr;
  }

  /// See AbstractAttribute::getName()
  const std::string getName() const override { return "AAValueConstantRange"; }

  /// See AbstractAttribute::getIdAddr()
  const char *getIdAddr() const override { return &ID; }

  /// This function should return true if the type of the \p AA is
  /// AAValueConstantRange
  static bool classof(const AbstractAttribute *AA) {
    return (AA->getIdAddr() == &ID);
  }

  /// Unique ID (due to the unique address)
  static const char ID;
};

/// A class for a set state.
/// The assumed boolean state indicates whether the corresponding set is full
/// set or not. If the assumed state is false, this is the worst state. The
/// worst state (invalid state) of set of potential values is when the set
/// contains every possible value (i.e. we cannot in any way limit the value
/// that the target position can take). That never happens naturally, we only
/// force it. As for the conditions under which we force it, see
/// AAPotentialConstantValues.
template <typename MemberTy> struct PotentialValuesState : AbstractState {
  using SetTy = SmallSetVector<MemberTy, 8>;

  PotentialValuesState() : IsValidState(true), UndefIsContained(false) {}

  PotentialValuesState(bool IsValid)
      : IsValidState(IsValid), UndefIsContained(false) {}

  /// See AbstractState::isValidState(...)
  bool isValidState() const override { return IsValidState.isValidState(); }

  /// See AbstractState::isAtFixpoint(...)
  bool isAtFixpoint() const override { return IsValidState.isAtFixpoint(); }

  /// See AbstractState::indicatePessimisticFixpoint(...)
  ChangeStatus indicatePessimisticFixpoint() override {
    return IsValidState.indicatePessimisticFixpoint();
  }

  /// See AbstractState::indicateOptimisticFixpoint(...)
  ChangeStatus indicateOptimisticFixpoint() override {
    return IsValidState.indicateOptimisticFixpoint();
  }

  /// Return the assumed state
  PotentialValuesState &getAssumed() { return *this; }
  const PotentialValuesState &getAssumed() const { return *this; }

  /// Return this set. We should check whether this set is valid or not by
  /// isValidState() before calling this function.
  const SetTy &getAssumedSet() const {
    assert(isValidState() && "This set shoud not be used when it is invalid!");
    return Set;
  }

  /// Returns whether this state contains an undef value or not.
  bool undefIsContained() const {
    assert(isValidState() && "This flag shoud not be used when it is invalid!");
    return UndefIsContained;
  }

  bool operator==(const PotentialValuesState &RHS) const {
    if (isValidState() != RHS.isValidState())
      return false;
    if (!isValidState() && !RHS.isValidState())
      return true;
    if (undefIsContained() != RHS.undefIsContained())
      return false;
    return Set == RHS.getAssumedSet();
  }

  /// Maximum number of potential values to be tracked.
  /// This is set by -attributor-max-potential-values command line option
  static unsigned MaxPotentialValues;

  /// Return empty set as the best state of potential values.
  static PotentialValuesState getBestState() {
    return PotentialValuesState(true);
  }

  static PotentialValuesState getBestState(const PotentialValuesState &PVS) {
    return getBestState();
  }

  /// Return full set as the worst state of potential values.
  static PotentialValuesState getWorstState() {
    return PotentialValuesState(false);
  }

  /// Union assumed set with the passed value.
  void unionAssumed(const MemberTy &C) { insert(C); }

  /// Union assumed set with assumed set of the passed state \p PVS.
  void unionAssumed(const PotentialValuesState &PVS) { unionWith(PVS); }

  /// Union assumed set with an undef value.
  void unionAssumedWithUndef() { unionWithUndef(); }

  /// "Clamp" this state with \p PVS.
  PotentialValuesState operator^=(const PotentialValuesState &PVS) {
    IsValidState ^= PVS.IsValidState;
    unionAssumed(PVS);
    return *this;
  }

  PotentialValuesState operator&=(const PotentialValuesState &PVS) {
    IsValidState &= PVS.IsValidState;
    unionAssumed(PVS);
    return *this;
  }

  bool contains(const MemberTy &V) const {
    return !isValidState() ? true : Set.contains(V);
  }

protected:
  SetTy &getAssumedSet() {
    assert(isValidState() && "This set shoud not be used when it is invalid!");
    return Set;
  }

private:
  /// Check the size of this set, and invalidate when the size is no
  /// less than \p MaxPotentialValues threshold.
  void checkAndInvalidate() {
    if (Set.size() >= MaxPotentialValues)
      indicatePessimisticFixpoint();
    else
      reduceUndefValue();
  }

  /// If this state contains both undef and not undef, we can reduce
  /// undef to the not undef value.
  void reduceUndefValue() { UndefIsContained = UndefIsContained & Set.empty(); }

  /// Insert an element into this set.
  void insert(const MemberTy &C) {
    if (!isValidState())
      return;
    Set.insert(C);
    checkAndInvalidate();
  }

  /// Take union with R.
  void unionWith(const PotentialValuesState &R) {
    /// If this is a full set, do nothing.
    if (!isValidState())
      return;
    /// If R is full set, change L to a full set.
    if (!R.isValidState()) {
      indicatePessimisticFixpoint();
      return;
    }
    for (const MemberTy &C : R.Set)
      Set.insert(C);
    UndefIsContained |= R.undefIsContained();
    checkAndInvalidate();
  }

  /// Take union with an undef value.
  void unionWithUndef() {
    UndefIsContained = true;
    reduceUndefValue();
  }

  /// Take intersection with R.
  void intersectWith(const PotentialValuesState &R) {
    /// If R is a full set, do nothing.
    if (!R.isValidState())
      return;
    /// If this is a full set, change this to R.
    if (!isValidState()) {
      *this = R;
      return;
    }
    SetTy IntersectSet;
    for (const MemberTy &C : Set) {
      if (R.Set.count(C))
        IntersectSet.insert(C);
    }
    Set = IntersectSet;
    UndefIsContained &= R.undefIsContained();
    reduceUndefValue();
  }

  /// A helper state which indicate whether this state is valid or not.
  BooleanState IsValidState;

  /// Container for potential values
  SetTy Set;

  /// Flag for undef value
  bool UndefIsContained;
};

using PotentialConstantIntValuesState = PotentialValuesState<APInt>;
using PotentialLLVMValuesState =
    PotentialValuesState<std::pair<AA::ValueAndContext, AA::ValueScope>>;

raw_ostream &operator<<(raw_ostream &OS,
                        const PotentialConstantIntValuesState &R);
raw_ostream &operator<<(raw_ostream &OS, const PotentialLLVMValuesState &R);

/// An abstract interface for potential values analysis.
///
/// This AA collects potential values for each IR position.
/// An assumed set of potential values is initialized with the empty set (the
/// best state) and it will grow monotonically as we find more potential values
/// for this position.
/// The set might be forced to the worst state, that is, to contain every
/// possible value for this position in 2 cases.
///   1. We surpassed the \p MaxPotentialValues threshold. This includes the
///      case that this position is affected (e.g. because of an operation) by a
///      Value that is in the worst state.
///   2. We tried to initialize on a Value that we cannot handle (e.g. an
///      operator we do not currently handle).
///
/// For non constant integers see AAPotentialValues.
struct AAPotentialConstantValues
    : public StateWrapper<PotentialConstantIntValuesState, AbstractAttribute> {
  using Base = StateWrapper<PotentialConstantIntValuesState, AbstractAttribute>;
  AAPotentialConstantValues(const IRPosition &IRP, Attributor &A) : Base(IRP) {}

  /// See AbstractAttribute::isValidIRPositionForInit
  static bool isValidIRPositionForInit(Attributor &A, const IRPosition &IRP) {
    if (!IRP.getAssociatedType()->isIntegerTy())
      return false;
    return AbstractAttribute::isValidIRPositionForInit(A, IRP);
  }

  /// See AbstractAttribute::requiresCallersForArgOrFunction
  static bool requiresCallersForArgOrFunction() { return true; }

  /// See AbstractAttribute::getState(...).
  PotentialConstantIntValuesState &getState() override { return *this; }
  const PotentialConstantIntValuesState &getState() const override {
    return *this;
  }

  /// Create an abstract attribute view for the position \p IRP.
  static AAPotentialConstantValues &createForPosition(const IRPosition &IRP,
                                                      Attributor &A);

  /// Return assumed constant for the associated value
  std::optional<Constant *>
  getAssumedConstant(Attributor &A, const Instruction *CtxI = nullptr) const {
    if (!isValidState())
      return nullptr;
    if (getAssumedSet().size() == 1) {
      Type *Ty = getAssociatedValue().getType();
      return cast_or_null<Constant>(AA::getWithType(
          *ConstantInt::get(Ty->getContext(), *(getAssumedSet().begin())),
          *Ty));
    }
    if (getAssumedSet().size() == 0) {
      if (undefIsContained())
        return UndefValue::get(getAssociatedValue().getType());
      return std::nullopt;
    }

    return nullptr;
  }

  /// See AbstractAttribute::getName()
  const std::string getName() const override {
    return "AAPotentialConstantValues";
  }

  /// See AbstractAttribute::getIdAddr()
  const char *getIdAddr() const override { return &ID; }

  /// This function should return true if the type of the \p AA is
  /// AAPotentialConstantValues
  static bool classof(const AbstractAttribute *AA) {
    return (AA->getIdAddr() == &ID);
  }

  /// Unique ID (due to the unique address)
  static const char ID;
};

struct AAPotentialValues
    : public StateWrapper<PotentialLLVMValuesState, AbstractAttribute> {
  using Base = StateWrapper<PotentialLLVMValuesState, AbstractAttribute>;
  AAPotentialValues(const IRPosition &IRP, Attributor &A) : Base(IRP) {}

  /// See AbstractAttribute::requiresCallersForArgOrFunction
  static bool requiresCallersForArgOrFunction() { return true; }

  /// See AbstractAttribute::getState(...).
  PotentialLLVMValuesState &getState() override { return *this; }
  const PotentialLLVMValuesState &getState() const override { return *this; }

  /// Create an abstract attribute view for the position \p IRP.
  static AAPotentialValues &createForPosition(const IRPosition &IRP,
                                              Attributor &A);

  /// Extract the single value in \p Values if any.
  static Value *getSingleValue(Attributor &A, const AbstractAttribute &AA,
                               const IRPosition &IRP,
                               SmallVectorImpl<AA::ValueAndContext> &Values);

  /// See AbstractAttribute::getName()
  const std::string getName() const override { return "AAPotentialValues"; }

  /// See AbstractAttribute::getIdAddr()
  const char *getIdAddr() const override { return &ID; }

  /// This function should return true if the type of the \p AA is
  /// AAPotentialValues
  static bool classof(const AbstractAttribute *AA) {
    return (AA->getIdAddr() == &ID);
  }

  /// Unique ID (due to the unique address)
  static const char ID;

private:
  virtual bool getAssumedSimplifiedValues(
      Attributor &A, SmallVectorImpl<AA::ValueAndContext> &Values,
      AA::ValueScope, bool RecurseForSelectAndPHI = false) const = 0;

  friend struct Attributor;
};

/// An abstract interface for all noundef attributes.
struct AANoUndef
    : public IRAttribute<Attribute::NoUndef,
                         StateWrapper<BooleanState, AbstractAttribute>,
                         AANoUndef> {
  AANoUndef(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {}

  /// See IRAttribute::isImpliedByUndef
  static bool isImpliedByUndef() { return false; }

  /// See IRAttribute::isImpliedByPoison
  static bool isImpliedByPoison() { return false; }

  /// See IRAttribute::isImpliedByIR
  static bool isImpliedByIR(Attributor &A, const IRPosition &IRP,
                            Attribute::AttrKind ImpliedAttributeKind,
                            bool IgnoreSubsumingPositions = false);

  /// Return true if we assume that the underlying value is noundef.
  bool isAssumedNoUndef() const { return getAssumed(); }

  /// Return true if we know that underlying value is noundef.
  bool isKnownNoUndef() const { return getKnown(); }

  /// Create an abstract attribute view for the position \p IRP.
  static AANoUndef &createForPosition(const IRPosition &IRP, Attributor &A);

  /// See AbstractAttribute::getName()
  const std::string getName() const override { return "AANoUndef"; }

  /// See AbstractAttribute::getIdAddr()
  const char *getIdAddr() const override { return &ID; }

  /// This function should return true if the type of the \p AA is AANoUndef
  static bool classof(const AbstractAttribute *AA) {
    return (AA->getIdAddr() == &ID);
  }

  /// Unique ID (due to the unique address)
  static const char ID;
};

struct AANoFPClass
    : public IRAttribute<
          Attribute::NoFPClass,
          StateWrapper<BitIntegerState<uint32_t, fcAllFlags, fcNone>,
                       AbstractAttribute>,
          AANoFPClass> {
  using Base = StateWrapper<BitIntegerState<uint32_t, fcAllFlags, fcNone>,
                            AbstractAttribute>;

  AANoFPClass(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {}

  /// See AbstractAttribute::isValidIRPositionForInit
  static bool isValidIRPositionForInit(Attributor &A, const IRPosition &IRP) {
    Type *Ty = IRP.getAssociatedType();
    do {
      if (Ty->isFPOrFPVectorTy())
        return IRAttribute::isValidIRPositionForInit(A, IRP);
      if (!Ty->isArrayTy())
        break;
      Ty = Ty->getArrayElementType();
    } while (true);
    return false;
  }

  /// Return true if we assume that the underlying value is nofpclass.
  FPClassTest getAssumedNoFPClass() const {
    return static_cast<FPClassTest>(getAssumed());
  }

  /// Create an abstract attribute view for the position \p IRP.
  static AANoFPClass &createForPosition(const IRPosition &IRP, Attributor &A);

  /// See AbstractAttribute::getName()
  const std::string getName() const override { return "AANoFPClass"; }

  /// See AbstractAttribute::getIdAddr()
  const char *getIdAddr() const override { return &ID; }

  /// This function should return true if the type of the \p AA is AANoFPClass
  static bool classof(const AbstractAttribute *AA) {
    return (AA->getIdAddr() == &ID);
  }

  /// Unique ID (due to the unique address)
  static const char ID;
};

struct AACallGraphNode;
struct AACallEdges;

/// An Iterator for call edges, creates AACallEdges attributes in a lazy way.
/// This iterator becomes invalid if the underlying edge list changes.
/// So This shouldn't outlive a iteration of Attributor.
class AACallEdgeIterator
    : public iterator_adaptor_base<AACallEdgeIterator,
                                   SetVector<Function *>::iterator> {
  AACallEdgeIterator(Attributor &A, SetVector<Function *>::iterator Begin)
      : iterator_adaptor_base(Begin), A(A) {}

public:
  AACallGraphNode *operator*() const;

private:
  Attributor &A;
  friend AACallEdges;
  friend AttributorCallGraph;
};

struct AACallGraphNode {
  AACallGraphNode(Attributor &A) : A(A) {}
  virtual ~AACallGraphNode() = default;

  virtual AACallEdgeIterator optimisticEdgesBegin() const = 0;
  virtual AACallEdgeIterator optimisticEdgesEnd() const = 0;

  /// Iterator range for exploring the call graph.
  iterator_range<AACallEdgeIterator> optimisticEdgesRange() const {
    return iterator_range<AACallEdgeIterator>(optimisticEdgesBegin(),
                                              optimisticEdgesEnd());
  }

protected:
  /// Reference to Attributor needed for GraphTraits implementation.
  Attributor &A;
};

/// An abstract state for querying live call edges.
/// This interface uses the Attributor's optimistic liveness
/// information to compute the edges that are alive.
struct AACallEdges : public StateWrapper<BooleanState, AbstractAttribute>,
                     AACallGraphNode {
  using Base = StateWrapper<BooleanState, AbstractAttribute>;

  AACallEdges(const IRPosition &IRP, Attributor &A)
      : Base(IRP), AACallGraphNode(A) {}

  /// The callee value is tracked beyond a simple stripPointerCasts, so we allow
  /// unknown callees.
  static bool requiresCalleeForCallBase() { return false; }

  /// Get the optimistic edges.
  virtual const SetVector<Function *> &getOptimisticEdges() const = 0;

  /// Is there any call with a unknown callee.
  virtual bool hasUnknownCallee() const = 0;

  /// Is there any call with a unknown callee, excluding any inline asm.
  virtual bool hasNonAsmUnknownCallee() const = 0;

  /// Iterator for exploring the call graph.
  AACallEdgeIterator optimisticEdgesBegin() const override {
    return AACallEdgeIterator(A, getOptimisticEdges().begin());
  }

  /// Iterator for exploring the call graph.
  AACallEdgeIterator optimisticEdgesEnd() const override {
    return AACallEdgeIterator(A, getOptimisticEdges().end());
  }

  /// Create an abstract attribute view for the position \p IRP.
  static AACallEdges &createForPosition(const IRPosition &IRP, Attributor &A);

  /// See AbstractAttribute::getName()
  const std::string getName() const override { return "AACallEdges"; }

  /// See AbstractAttribute::getIdAddr()
  const char *getIdAddr() const override { return &ID; }

  /// This function should return true if the type of the \p AA is AACallEdges.
  static bool classof(const AbstractAttribute *AA) {
    return (AA->getIdAddr() == &ID);
  }

  /// Unique ID (due to the unique address)
  static const char ID;
};

// Synthetic root node for the Attributor's internal call graph.
struct AttributorCallGraph : public AACallGraphNode {
  AttributorCallGraph(Attributor &A) : AACallGraphNode(A) {}
  virtual ~AttributorCallGraph() = default;

  AACallEdgeIterator optimisticEdgesBegin() const override {
    return AACallEdgeIterator(A, A.Functions.begin());
  }

  AACallEdgeIterator optimisticEdgesEnd() const override {
    return AACallEdgeIterator(A, A.Functions.end());
  }

  /// Force populate the entire call graph.
  void populateAll() const {
    for (const AACallGraphNode *AA : optimisticEdgesRange()) {
      // Nothing else to do here.
      (void)AA;
    }
  }

  void print();
};

template <> struct GraphTraits<AACallGraphNode *> {
  using NodeRef = AACallGraphNode *;
  using ChildIteratorType = AACallEdgeIterator;

  static AACallEdgeIterator child_begin(AACallGraphNode *Node) {
    return Node->optimisticEdgesBegin();
  }

  static AACallEdgeIterator child_end(AACallGraphNode *Node) {
    return Node->optimisticEdgesEnd();
  }
};

template <>
struct GraphTraits<AttributorCallGraph *>
    : public GraphTraits<AACallGraphNode *> {
  using nodes_iterator = AACallEdgeIterator;

  static AACallGraphNode *getEntryNode(AttributorCallGraph *G) {
    return static_cast<AACallGraphNode *>(G);
  }

  static AACallEdgeIterator nodes_begin(const AttributorCallGraph *G) {
    return G->optimisticEdgesBegin();
  }

  static AACallEdgeIterator nodes_end(const AttributorCallGraph *G) {
    return G->optimisticEdgesEnd();
  }
};

template <>
struct DOTGraphTraits<AttributorCallGraph *> : public DefaultDOTGraphTraits {
  DOTGraphTraits(bool Simple = false) : DefaultDOTGraphTraits(Simple) {}

  std::string getNodeLabel(const AACallGraphNode *Node,
                           const AttributorCallGraph *Graph) {
    const AACallEdges *AACE = static_cast<const AACallEdges *>(Node);
    return AACE->getAssociatedFunction()->getName().str();
  }

  static bool isNodeHidden(const AACallGraphNode *Node,
                           const AttributorCallGraph *Graph) {
    // Hide the synth root.
    return static_cast<const AACallGraphNode *>(Graph) == Node;
  }
};

struct AAExecutionDomain
    : public StateWrapper<BooleanState, AbstractAttribute> {
  using Base = StateWrapper<BooleanState, AbstractAttribute>;
  AAExecutionDomain(const IRPosition &IRP, Attributor &A) : Base(IRP) {}

  /// Summary about the execution domain of a block or instruction.
  struct ExecutionDomainTy {
    using BarriersSetTy = SmallPtrSet<CallBase *, 2>;
    using AssumesSetTy = SmallPtrSet<AssumeInst *, 4>;

    void addAssumeInst(Attributor &A, AssumeInst &AI) {
      EncounteredAssumes.insert(&AI);
    }

    void addAlignedBarrier(Attributor &A, CallBase &CB) {
      AlignedBarriers.insert(&CB);
    }

    void clearAssumeInstAndAlignedBarriers() {
      EncounteredAssumes.clear();
      AlignedBarriers.clear();
    }

    bool IsExecutedByInitialThreadOnly = true;
    bool IsReachedFromAlignedBarrierOnly = true;
    bool IsReachingAlignedBarrierOnly = true;
    bool EncounteredNonLocalSideEffect = false;
    BarriersSetTy AlignedBarriers;
    AssumesSetTy EncounteredAssumes;
  };

  /// Create an abstract attribute view for the position \p IRP.
  static AAExecutionDomain &createForPosition(const IRPosition &IRP,
                                              Attributor &A);

  /// See AbstractAttribute::getName().
  const std::string getName() const override { return "AAExecutionDomain"; }

  /// See AbstractAttribute::getIdAddr().
  const char *getIdAddr() const override { return &ID; }

  /// Check if an instruction is executed only by the initial thread.
  bool isExecutedByInitialThreadOnly(const Instruction &I) const {
    return isExecutedByInitialThreadOnly(*I.getParent());
  }

  /// Check if a basic block is executed only by the initial thread.
  virtual bool isExecutedByInitialThreadOnly(const BasicBlock &) const = 0;

  /// Check if the instruction \p I is executed in an aligned region, that is,
  /// the synchronizing effects before and after \p I are both aligned barriers.
  /// This effectively means all threads execute \p I together.
  virtual bool isExecutedInAlignedRegion(Attributor &A,
                                         const Instruction &I) const = 0;

  virtual ExecutionDomainTy getExecutionDomain(const BasicBlock &) const = 0;
  /// Return the execution domain with which the call \p CB is entered and the
  /// one with which it is left.
  virtual std::pair<ExecutionDomainTy, ExecutionDomainTy>
  getExecutionDomain(const CallBase &CB) const = 0;
  virtual ExecutionDomainTy getFunctionExecutionDomain() const = 0;

  /// Helper function to determine if \p FI is a no-op given the information
  /// about its execution from \p ExecDomainAA.
  virtual bool isNoOpFence(const FenceInst &FI) const = 0;

  /// This function should return true if the type of the \p AA is
  /// AAExecutionDomain.
  static bool classof(const AbstractAttribute *AA) {
    return (AA->getIdAddr() == &ID);
  }

  /// Unique ID (due to the unique address)
  static const char ID;
};

/// An abstract Attribute for computing reachability between functions.
struct AAInterFnReachability
    : public StateWrapper<BooleanState, AbstractAttribute> {
  using Base = StateWrapper<BooleanState, AbstractAttribute>;

  AAInterFnReachability(const IRPosition &IRP, Attributor &A) : Base(IRP) {}

  /// If the function represented by this possition can reach \p Fn.
  bool canReach(Attributor &A, const Function &Fn) const {
    Function *Scope = getAnchorScope();
    if (!Scope || Scope->isDeclaration())
      return true;
    return instructionCanReach(A, Scope->getEntryBlock().front(), Fn);
  }

  /// Can  \p Inst reach \p Fn.
  /// See also AA::isPotentiallyReachable.
  virtual bool instructionCanReach(
      Attributor &A, const Instruction &Inst, const Function &Fn,
      const AA::InstExclusionSetTy *ExclusionSet = nullptr,
      SmallPtrSet<const Function *, 16> *Visited = nullptr) const = 0;

  /// Create an abstract attribute view for the position \p IRP.
  static AAInterFnReachability &createForPosition(const IRPosition &IRP,
                                                  Attributor &A);

  /// See AbstractAttribute::getName()
  const std::string getName() const override { return "AAInterFnReachability"; }

  /// See AbstractAttribute::getIdAddr()
  const char *getIdAddr() const override { return &ID; }

  /// This function should return true if the type of the \p AA is AACallEdges.
  static bool classof(const AbstractAttribute *AA) {
    return (AA->getIdAddr() == &ID);
  }

  /// Unique ID (due to the unique address)
  static const char ID;
};

/// An abstract Attribute for determining the necessity of the convergent
/// attribute.
struct AANonConvergent : public StateWrapper<BooleanState, AbstractAttribute> {
  using Base = StateWrapper<BooleanState, AbstractAttribute>;

  AANonConvergent(const IRPosition &IRP, Attributor &A) : Base(IRP) {}

  /// Create an abstract attribute view for the position \p IRP.
  static AANonConvergent &createForPosition(const IRPosition &IRP,
                                            Attributor &A);

  /// Return true if "non-convergent" is assumed.
  bool isAssumedNotConvergent() const { return getAssumed(); }

  /// Return true if "non-convergent" is known.
  bool isKnownNotConvergent() const { return getKnown(); }

  /// See AbstractAttribute::getName()
  const std::string getName() const override { return "AANonConvergent"; }

  /// See AbstractAttribute::getIdAddr()
  const char *getIdAddr() const override { return &ID; }

  /// This function should return true if the type of the \p AA is
  /// AANonConvergent.
  static bool classof(const AbstractAttribute *AA) {
    return (AA->getIdAddr() == &ID);
  }

  /// Unique ID (due to the unique address)
  static const char ID;
};

/// An abstract interface for struct information.
struct AAPointerInfo : public AbstractAttribute {
  AAPointerInfo(const IRPosition &IRP) : AbstractAttribute(IRP) {}

  /// See AbstractAttribute::isValidIRPositionForInit
  static bool isValidIRPositionForInit(Attributor &A, const IRPosition &IRP) {
    if (!IRP.getAssociatedType()->isPtrOrPtrVectorTy())
      return false;
    return AbstractAttribute::isValidIRPositionForInit(A, IRP);
  }

  enum AccessKind {
    // First two bits to distinguish may and must accesses.
    AK_MUST = 1 << 0,
    AK_MAY = 1 << 1,

    // Then two bits for read and write. These are not exclusive.
    AK_R = 1 << 2,
    AK_W = 1 << 3,
    AK_RW = AK_R | AK_W,

    // One special case for assumptions about memory content. These
    // are neither reads nor writes. They are however always modeled
    // as read to avoid using them for write removal.
    AK_ASSUMPTION = (1 << 4) | AK_MUST,

    // Helper for easy access.
    AK_MAY_READ = AK_MAY | AK_R,
    AK_MAY_WRITE = AK_MAY | AK_W,
    AK_MAY_READ_WRITE = AK_MAY | AK_R | AK_W,
    AK_MUST_READ = AK_MUST | AK_R,
    AK_MUST_WRITE = AK_MUST | AK_W,
    AK_MUST_READ_WRITE = AK_MUST | AK_R | AK_W,
  };

  /// A container for a list of ranges.
  struct RangeList {
    // The set of ranges rarely contains more than one element, and is unlikely
    // to contain more than say four elements. So we find the middle-ground with
    // a sorted vector. This avoids hard-coding a rarely used number like "four"
    // into every instance of a SmallSet.
    using RangeTy = AA::RangeTy;
    using VecTy = SmallVector<RangeTy>;
    using iterator = VecTy::iterator;
    using const_iterator = VecTy::const_iterator;
    VecTy Ranges;

    RangeList(const RangeTy &R) { Ranges.push_back(R); }
    RangeList(ArrayRef<int64_t> Offsets, int64_t Size) {
      Ranges.reserve(Offsets.size());
      for (unsigned i = 0, e = Offsets.size(); i != e; ++i) {
        assert(((i + 1 == e) || Offsets[i] < Offsets[i + 1]) &&
               "Expected strictly ascending offsets.");
        Ranges.emplace_back(Offsets[i], Size);
      }
    }
    RangeList() = default;

    iterator begin() { return Ranges.begin(); }
    iterator end() { return Ranges.end(); }
    const_iterator begin() const { return Ranges.begin(); }
    const_iterator end() const { return Ranges.end(); }

    // Helpers required for std::set_difference
    using value_type = RangeTy;
    void push_back(const RangeTy &R) {
      assert((Ranges.empty() || RangeTy::OffsetLessThan(Ranges.back(), R)) &&
             "Ensure the last element is the greatest.");
      Ranges.push_back(R);
    }

    /// Copy ranges from \p L that are not in \p R, into \p D.
    static void set_difference(const RangeList &L, const RangeList &R,
                               RangeList &D) {
      std::set_difference(L.begin(), L.end(), R.begin(), R.end(),
                          std::back_inserter(D), RangeTy::OffsetLessThan);
    }

    unsigned size() const { return Ranges.size(); }

    bool operator==(const RangeList &OI) const { return Ranges == OI.Ranges; }

    /// Merge the ranges in \p RHS into the current ranges.
    /// - Merging a list of  unknown ranges makes the current list unknown.
    /// - Ranges with the same offset are merged according to RangeTy::operator&
    /// \return true if the current RangeList changed.
    bool merge(const RangeList &RHS) {
      if (isUnknown())
        return false;
      if (RHS.isUnknown()) {
        setUnknown();
        return true;
      }

      if (Ranges.empty()) {
        Ranges = RHS.Ranges;
        return true;
      }

      bool Changed = false;
      auto LPos = Ranges.begin();
      for (auto &R : RHS.Ranges) {
        auto Result = insert(LPos, R);
        if (isUnknown())
          return true;
        LPos = Result.first;
        Changed |= Result.second;
      }
      return Changed;
    }

    /// Insert \p R at the given iterator \p Pos, and merge if necessary.
    ///
    /// This assumes that all ranges before \p Pos are OffsetLessThan \p R, and
    /// then maintains the sorted order for the suffix list.
    ///
    /// \return The place of insertion and true iff anything changed.
    std::pair<iterator, bool> insert(iterator Pos, const RangeTy &R) {
      if (isUnknown())
        return std::make_pair(Ranges.begin(), false);
      if (R.offsetOrSizeAreUnknown()) {
        return std::make_pair(setUnknown(), true);
      }

      // Maintain this as a sorted vector of unique entries.
      auto LB = std::lower_bound(Pos, Ranges.end(), R, RangeTy::OffsetLessThan);
      if (LB == Ranges.end() || LB->Offset != R.Offset)
        return std::make_pair(Ranges.insert(LB, R), true);
      bool Changed = *LB != R;
      *LB &= R;
      if (LB->offsetOrSizeAreUnknown())
        return std::make_pair(setUnknown(), true);
      return std::make_pair(LB, Changed);
    }

    /// Insert the given range \p R, maintaining sorted order.
    ///
    /// \return The place of insertion and true iff anything changed.
    std::pair<iterator, bool> insert(const RangeTy &R) {
      return insert(Ranges.begin(), R);
    }

    /// Add the increment \p Inc to the offset of every range.
    void addToAllOffsets(int64_t Inc) {
      assert(!isUnassigned() &&
             "Cannot increment if the offset is not yet computed!");
      if (isUnknown())
        return;
      for (auto &R : Ranges) {
        R.Offset += Inc;
      }
    }

    /// Return true iff there is exactly one range and it is known.
    bool isUnique() const {
      return Ranges.size() == 1 && !Ranges.front().offsetOrSizeAreUnknown();
    }

    /// Return the unique range, assuming it exists.
    const RangeTy &getUnique() const {
      assert(isUnique() && "No unique range to return!");
      return Ranges.front();
    }

    /// Return true iff the list contains an unknown range.
    bool isUnknown() const {
      if (isUnassigned())
        return false;
      if (Ranges.front().offsetOrSizeAreUnknown()) {
        assert(Ranges.size() == 1 && "Unknown is a singleton range.");
        return true;
      }
      return false;
    }

    /// Discard all ranges and insert a single unknown range.
    iterator setUnknown() {
      Ranges.clear();
      Ranges.push_back(RangeTy::getUnknown());
      return Ranges.begin();
    }

    /// Return true if no ranges have been inserted.
    bool isUnassigned() const { return Ranges.size() == 0; }
  };

  /// An access description.
  struct Access {
    Access(Instruction *I, int64_t Offset, int64_t Size,
           std::optional<Value *> Content, AccessKind Kind, Type *Ty)
        : LocalI(I), RemoteI(I), Content(Content), Ranges(Offset, Size),
          Kind(Kind), Ty(Ty) {
      verify();
    }
    Access(Instruction *LocalI, Instruction *RemoteI, const RangeList &Ranges,
           std::optional<Value *> Content, AccessKind K, Type *Ty)
        : LocalI(LocalI), RemoteI(RemoteI), Content(Content), Ranges(Ranges),
          Kind(K), Ty(Ty) {
      if (Ranges.size() > 1) {
        Kind = AccessKind(Kind | AK_MAY);
        Kind = AccessKind(Kind & ~AK_MUST);
      }
      verify();
    }
    Access(Instruction *LocalI, Instruction *RemoteI, int64_t Offset,
           int64_t Size, std::optional<Value *> Content, AccessKind Kind,
           Type *Ty)
        : LocalI(LocalI), RemoteI(RemoteI), Content(Content),
          Ranges(Offset, Size), Kind(Kind), Ty(Ty) {
      verify();
    }
    Access(const Access &Other) = default;

    Access &operator=(const Access &Other) = default;
    bool operator==(const Access &R) const {
      return LocalI == R.LocalI && RemoteI == R.RemoteI && Ranges == R.Ranges &&
             Content == R.Content && Kind == R.Kind;
    }
    bool operator!=(const Access &R) const { return !(*this == R); }

    Access &operator&=(const Access &R) {
      assert(RemoteI == R.RemoteI && "Expected same instruction!");
      assert(LocalI == R.LocalI && "Expected same instruction!");

      // Note that every Access object corresponds to a unique Value, and only
      // accesses to the same Value are merged. Hence we assume that all ranges
      // are the same size. If ranges can be different size, then the contents
      // must be dropped.
      Ranges.merge(R.Ranges);
      Content =
          AA::combineOptionalValuesInAAValueLatice(Content, R.Content, Ty);

      // Combine the access kind, which results in a bitwise union.
      // If there is more than one range, then this must be a MAY.
      // If we combine a may and a must access we clear the must bit.
      Kind = AccessKind(Kind | R.Kind);
      if ((Kind & AK_MAY) || Ranges.size() > 1) {
        Kind = AccessKind(Kind | AK_MAY);
        Kind = AccessKind(Kind & ~AK_MUST);
      }
      verify();
      return *this;
    }

    void verify() {
      assert(isMustAccess() + isMayAccess() == 1 &&
             "Expect must or may access, not both.");
      assert(isAssumption() + isWrite() <= 1 &&
             "Expect assumption access or write access, never both.");
      assert((isMayAccess() || Ranges.size() == 1) &&
             "Cannot be a must access if there are multiple ranges.");
    }

    /// Return the access kind.
    AccessKind getKind() const { return Kind; }

    /// Return true if this is a read access.
    bool isRead() const { return Kind & AK_R; }

    /// Return true if this is a write access.
    bool isWrite() const { return Kind & AK_W; }

    /// Return true if this is a write access.
    bool isWriteOrAssumption() const { return isWrite() || isAssumption(); }

    /// Return true if this is an assumption access.
    bool isAssumption() const { return Kind == AK_ASSUMPTION; }

    bool isMustAccess() const {
      bool MustAccess = Kind & AK_MUST;
      assert((!MustAccess || Ranges.size() < 2) &&
             "Cannot be a must access if there are multiple ranges.");
      return MustAccess;
    }

    bool isMayAccess() const {
      bool MayAccess = Kind & AK_MAY;
      assert((MayAccess || Ranges.size() < 2) &&
             "Cannot be a must access if there are multiple ranges.");
      return MayAccess;
    }

    /// Return the instruction that causes the access with respect to the local
    /// scope of the associated attribute.
    Instruction *getLocalInst() const { return LocalI; }

    /// Return the actual instruction that causes the access.
    Instruction *getRemoteInst() const { return RemoteI; }

    /// Return true if the value written is not known yet.
    bool isWrittenValueYetUndetermined() const { return !Content; }

    /// Return true if the value written cannot be determined at all.
    bool isWrittenValueUnknown() const {
      return Content.has_value() && !*Content;
    }

    /// Set the value written to nullptr, i.e., unknown.
    void setWrittenValueUnknown() { Content = nullptr; }

    /// Return the type associated with the access, if known.
    Type *getType() const { return Ty; }

    /// Return the value writen, if any.
    Value *getWrittenValue() const {
      assert(!isWrittenValueYetUndetermined() &&
             "Value needs to be determined before accessing it.");
      return *Content;
    }

    /// Return the written value which can be `llvm::null` if it is not yet
    /// determined.
    std::optional<Value *> getContent() const { return Content; }

    bool hasUniqueRange() const { return Ranges.isUnique(); }
    const AA::RangeTy &getUniqueRange() const { return Ranges.getUnique(); }

    /// Add a range accessed by this Access.
    ///
    /// If there are multiple ranges, then this is a "may access".
    void addRange(int64_t Offset, int64_t Size) {
      Ranges.insert({Offset, Size});
      if (!hasUniqueRange()) {
        Kind = AccessKind(Kind | AK_MAY);
        Kind = AccessKind(Kind & ~AK_MUST);
      }
    }

    const RangeList &getRanges() const { return Ranges; }

    using const_iterator = RangeList::const_iterator;
    const_iterator begin() const { return Ranges.begin(); }
    const_iterator end() const { return Ranges.end(); }

  private:
    /// The instruction responsible for the access with respect to the local
    /// scope of the associated attribute.
    Instruction *LocalI;

    /// The instruction responsible for the access.
    Instruction *RemoteI;

    /// The value written, if any. `std::nullopt` means "not known yet",
    /// `nullptr` cannot be determined.
    std::optional<Value *> Content;

    /// Set of potential ranges accessed from the base pointer.
    RangeList Ranges;

    /// The access kind, e.g., READ, as bitset (could be more than one).
    AccessKind Kind;

    /// The type of the content, thus the type read/written, can be null if not
    /// available.
    Type *Ty;
  };

  /// Create an abstract attribute view for the position \p IRP.
  static AAPointerInfo &createForPosition(const IRPosition &IRP, Attributor &A);

  /// See AbstractAttribute::getName()
  const std::string getName() const override { return "AAPointerInfo"; }

  /// See AbstractAttribute::getIdAddr()
  const char *getIdAddr() const override { return &ID; }

  /// Call \p CB on all accesses that might interfere with \p Range and return
  /// true if all such accesses were known and the callback returned true for
  /// all of them, false otherwise. An access interferes with an offset-size
  /// pair if it might read or write that memory region.
  virtual bool forallInterferingAccesses(
      AA::RangeTy Range, function_ref<bool(const Access &, bool)> CB) const = 0;

  /// Call \p CB on all accesses that might interfere with \p I and
  /// return true if all such accesses were known and the callback returned true
  /// for all of them, false otherwise. In contrast to forallInterferingAccesses
  /// this function will perform reasoning to exclude write accesses that cannot
  /// affect the load even if they on the surface look as if they would. The
  /// flag \p HasBeenWrittenTo will be set to true if we know that \p I does not
  /// read the intial value of the underlying memory.
  virtual bool forallInterferingAccesses(
      Attributor &A, const AbstractAttribute &QueryingAA, Instruction &I,
      bool FindInterferingWrites, bool FindInterferingReads,
      function_ref<bool(const Access &, bool)> CB, bool &HasBeenWrittenTo,
      AA::RangeTy &Range) const = 0;

  /// This function should return true if the type of the \p AA is AAPointerInfo
  static bool classof(const AbstractAttribute *AA) {
    return (AA->getIdAddr() == &ID);
  }

  /// Unique ID (due to the unique address)
  static const char ID;
};

/// An abstract attribute for getting assumption information.
struct AAAssumptionInfo
    : public StateWrapper<SetState<StringRef>, AbstractAttribute,
                          DenseSet<StringRef>> {
  using Base =
      StateWrapper<SetState<StringRef>, AbstractAttribute, DenseSet<StringRef>>;

  AAAssumptionInfo(const IRPosition &IRP, Attributor &A,
                   const DenseSet<StringRef> &Known)
      : Base(IRP, Known) {}

  /// Returns true if the assumption set contains the assumption \p Assumption.
  virtual bool hasAssumption(const StringRef Assumption) const = 0;

  /// Create an abstract attribute view for the position \p IRP.
  static AAAssumptionInfo &createForPosition(const IRPosition &IRP,
                                             Attributor &A);

  /// See AbstractAttribute::getName()
  const std::string getName() const override { return "AAAssumptionInfo"; }

  /// See AbstractAttribute::getIdAddr()
  const char *getIdAddr() const override { return &ID; }

  /// This function should return true if the type of the \p AA is
  /// AAAssumptionInfo
  static bool classof(const AbstractAttribute *AA) {
    return (AA->getIdAddr() == &ID);
  }

  /// Unique ID (due to the unique address)
  static const char ID;
};

/// An abstract attribute for getting all assumption underlying objects.
struct AAUnderlyingObjects : AbstractAttribute {
  AAUnderlyingObjects(const IRPosition &IRP) : AbstractAttribute(IRP) {}

  /// See AbstractAttribute::isValidIRPositionForInit
  static bool isValidIRPositionForInit(Attributor &A, const IRPosition &IRP) {
    if (!IRP.getAssociatedType()->isPtrOrPtrVectorTy())
      return false;
    return AbstractAttribute::isValidIRPositionForInit(A, IRP);
  }

  /// See AbstractAttribute::requiresCallersForArgOrFunction
  static bool requiresCallersForArgOrFunction() { return true; }

  /// Create an abstract attribute biew for the position \p IRP.
  static AAUnderlyingObjects &createForPosition(const IRPosition &IRP,
                                                Attributor &A);

  /// See AbstractAttribute::getName()
  const std::string getName() const override { return "AAUnderlyingObjects"; }

  /// See AbstractAttribute::getIdAddr()
  const char *getIdAddr() const override { return &ID; }

  /// This function should return true if the type of the \p AA is
  /// AAUnderlyingObjects.
  static bool classof(const AbstractAttribute *AA) {
    return (AA->getIdAddr() == &ID);
  }

  /// Unique ID (due to the unique address)
  static const char ID;

  /// Check \p Pred on all underlying objects in \p Scope collected so far.
  ///
  /// This method will evaluate \p Pred on all underlying objects in \p Scope
  /// collected so far and return true if \p Pred holds on all of them.
  virtual bool
  forallUnderlyingObjects(function_ref<bool(Value &)> Pred,
                          AA::ValueScope Scope = AA::Interprocedural) const = 0;
};

/// An abstract interface for address space information.
struct AAAddressSpace : public StateWrapper<BooleanState, AbstractAttribute> {
  AAAddressSpace(const IRPosition &IRP, Attributor &A)
      : StateWrapper<BooleanState, AbstractAttribute>(IRP) {}

  /// See AbstractAttribute::isValidIRPositionForInit
  static bool isValidIRPositionForInit(Attributor &A, const IRPosition &IRP) {
    if (!IRP.getAssociatedType()->isPtrOrPtrVectorTy())
      return false;
    return AbstractAttribute::isValidIRPositionForInit(A, IRP);
  }

  /// See AbstractAttribute::requiresCallersForArgOrFunction
  static bool requiresCallersForArgOrFunction() { return true; }

  /// Return the address space of the associated value. \p NoAddressSpace is
  /// returned if the associated value is dead. This functions is not supposed
  /// to be called if the AA is invalid.
  virtual int32_t getAddressSpace() const = 0;

  /// Create an abstract attribute view for the position \p IRP.
  static AAAddressSpace &createForPosition(const IRPosition &IRP,
                                           Attributor &A);

  /// See AbstractAttribute::getName()
  const std::string getName() const override { return "AAAddressSpace"; }

  /// See AbstractAttribute::getIdAddr()
  const char *getIdAddr() const override { return &ID; }

  /// This function should return true if the type of the \p AA is
  /// AAAssumptionInfo
  static bool classof(const AbstractAttribute *AA) {
    return (AA->getIdAddr() == &ID);
  }

  // No address space which indicates the associated value is dead.
  static const int32_t NoAddressSpace = -1;

  /// Unique ID (due to the unique address)
  static const char ID;
};

raw_ostream &operator<<(raw_ostream &, const AAPointerInfo::Access &);

/// Run options, used by the pass manager.
enum AttributorRunOption {
  NONE = 0,
  MODULE = 1 << 0,
  CGSCC = 1 << 1,
  ALL = MODULE | CGSCC
};

namespace AA {
/// Helper to avoid creating an AA for IR Attributes that might already be set.
template <Attribute::AttrKind AK, typename AAType = AbstractAttribute>
bool hasAssumedIRAttr(Attributor &A, const AbstractAttribute *QueryingAA,
                      const IRPosition &IRP, DepClassTy DepClass, bool &IsKnown,
                      bool IgnoreSubsumingPositions = false,
                      const AAType **AAPtr = nullptr) {
  IsKnown = false;
  switch (AK) {
#define CASE(ATTRNAME, AANAME, ...)                                            \
  case Attribute::ATTRNAME: {                                                  \
    if (AANAME::isImpliedByIR(A, IRP, AK, IgnoreSubsumingPositions))           \
      return IsKnown = true;                                                   \
    if (!QueryingAA)                                                           \
      return false;                                                            \
    const auto *AA = A.getAAFor<AANAME>(*QueryingAA, IRP, DepClass);           \
    if (AAPtr)                                                                 \
      *AAPtr = reinterpret_cast<const AAType *>(AA);                           \
    if (!AA || !AA->isAssumed(__VA_ARGS__))                                    \
      return false;                                                            \
    IsKnown = AA->isKnown(__VA_ARGS__);                                        \
    return true;                                                               \
  }
    CASE(NoUnwind, AANoUnwind, );
    CASE(WillReturn, AAWillReturn, );
    CASE(NoFree, AANoFree, );
    CASE(NoCapture, AANoCapture, );
    CASE(NoRecurse, AANoRecurse, );
    CASE(NoReturn, AANoReturn, );
    CASE(NoSync, AANoSync, );
    CASE(NoAlias, AANoAlias, );
    CASE(NonNull, AANonNull, );
    CASE(MustProgress, AAMustProgress, );
    CASE(NoUndef, AANoUndef, );
    CASE(ReadNone, AAMemoryBehavior, AAMemoryBehavior::NO_ACCESSES);
    CASE(ReadOnly, AAMemoryBehavior, AAMemoryBehavior::NO_WRITES);
    CASE(WriteOnly, AAMemoryBehavior, AAMemoryBehavior::NO_READS);
#undef CASE
  default:
    llvm_unreachable("hasAssumedIRAttr not available for this attribute kind");
  };
}
} // namespace AA

} // end namespace llvm

#endif // LLVM_TRANSFORMS_IPO_ATTRIBUTOR_H
PKiwFZ�bk���Transforms/IPO/ExtractGV.hnu�[���//===-- ExtractGV.h -------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_IPO_EXTRACTGV_H
#define LLVM_TRANSFORMS_IPO_EXTRACTGV_H

#include "llvm/ADT/SetVector.h"
#include "llvm/IR/PassManager.h"

namespace llvm {

class ExtractGVPass : public PassInfoMixin<ExtractGVPass> {
private:
  SetVector<GlobalValue *> Named;
  bool deleteStuff;
  bool keepConstInit;

public:
  ExtractGVPass(std::vector<GlobalValue *> &GVs, bool deleteS = true,
                bool keepConstInit = false);
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &);
};
} // namespace llvm

#endif // LLVM_TRANSFORMS_IPO_EXTRACTGV_H
PKiwFZH�K-kk$Transforms/IPO/StripDeadPrototypes.hnu�[���//===-- StripDeadPrototypes.h - Remove unused function declarations -------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This pass loops over all of the functions in the input module, looking for
// dead declarations and removes them. Dead declarations are declarations of
// functions for which no implementation is available (i.e., declarations for
// unused library functions).
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_IPO_STRIPDEADPROTOTYPES_H
#define LLVM_TRANSFORMS_IPO_STRIPDEADPROTOTYPES_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class Module;

/// Pass to remove unused function declarations.
struct StripDeadPrototypesPass : PassInfoMixin<StripDeadPrototypesPass> {
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &);
};

}

#endif // LLVM_TRANSFORMS_IPO_STRIPDEADPROTOTYPES_H
PKiwFZ��z��$Transforms/IPO/Annotation2Metadata.hnu�[���//===- Annotation2Metadata.h - Add !annotation metadata. --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// New pass manager pass to convert @llvm.global.annotations to !annotation
// metadata.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_IPO_ANNOTATION2METADATA_H
#define LLVM_TRANSFORMS_IPO_ANNOTATION2METADATA_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class Module;

/// Pass to convert @llvm.global.annotations to !annotation metadata.
struct Annotation2MetadataPass : public PassInfoMixin<Annotation2MetadataPass> {
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_IPO_ANNOTATION2METADATA_H
PKiwFZ��,�k$k$%Transforms/IPO/SampleContextTracker.hnu�[���//===- Transforms/IPO/SampleContextTracker.h --------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file
/// This file provides the interface for context-sensitive profile tracker used
/// by CSSPGO.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_IPO_SAMPLECONTEXTTRACKER_H
#define LLVM_TRANSFORMS_IPO_SAMPLECONTEXTTRACKER_H

#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ProfileData/SampleProf.h"
#include <map>
#include <queue>
#include <vector>

namespace llvm {
class CallBase;
class DILocation;
class Function;
class Instruction;

// Internal trie tree representation used for tracking context tree and sample
// profiles. The path from root node to a given node represents the context of
// that nodes' profile.
class ContextTrieNode {
public:
  ContextTrieNode(ContextTrieNode *Parent = nullptr,
                  StringRef FName = StringRef(),
                  FunctionSamples *FSamples = nullptr,
                  LineLocation CallLoc = {0, 0})
      : ParentContext(Parent), FuncName(FName), FuncSamples(FSamples),
        CallSiteLoc(CallLoc){};
  ContextTrieNode *getChildContext(const LineLocation &CallSite,
                                   StringRef ChildName);
  ContextTrieNode *getHottestChildContext(const LineLocation &CallSite);
  ContextTrieNode *getOrCreateChildContext(const LineLocation &CallSite,
                                           StringRef ChildName,
                                           bool AllowCreate = true);
  void removeChildContext(const LineLocation &CallSite, StringRef ChildName);
  std::map<uint64_t, ContextTrieNode> &getAllChildContext();
  StringRef getFuncName() const;
  FunctionSamples *getFunctionSamples() const;
  void setFunctionSamples(FunctionSamples *FSamples);
  std::optional<uint32_t> getFunctionSize() const;
  void addFunctionSize(uint32_t FSize);
  LineLocation getCallSiteLoc() const;
  ContextTrieNode *getParentContext() const;
  void setParentContext(ContextTrieNode *Parent);
  void setCallSiteLoc(const LineLocation &Loc);
  void dumpNode();
  void dumpTree();

private:
  // Map line+discriminator location to child context
  std::map<uint64_t, ContextTrieNode> AllChildContext;

  // Link to parent context node
  ContextTrieNode *ParentContext;

  // Function name for current context
  StringRef FuncName;

  // Function Samples for current context
  FunctionSamples *FuncSamples;

  // Function size for current context
  std::optional<uint32_t> FuncSize;

  // Callsite location in parent context
  LineLocation CallSiteLoc;
};

// Profile tracker that manages profiles and its associated context. It
// provides interfaces used by sample profile loader to query context profile or
// base profile for given function or location; it also manages context tree
// manipulation that is needed to accommodate inline decisions so we have
// accurate post-inline profile for functions. Internally context profiles
// are organized in a trie, with each node representing profile for specific
// calling context and the context is identified by path from root to the node.
class SampleContextTracker {
public:
  using ContextSamplesTy = std::vector<FunctionSamples *>;

  SampleContextTracker() = default;
  SampleContextTracker(SampleProfileMap &Profiles,
                       const DenseMap<uint64_t, StringRef> *GUIDToFuncNameMap);
  // Populate the FuncToCtxtProfiles map after the trie is built.
  void populateFuncToCtxtMap();
  // Query context profile for a specific callee with given name at a given
  // call-site. The full context is identified by location of call instruction.
  FunctionSamples *getCalleeContextSamplesFor(const CallBase &Inst,
                                              StringRef CalleeName);
  // Get samples for indirect call targets for call site at given location.
  std::vector<const FunctionSamples *>
  getIndirectCalleeContextSamplesFor(const DILocation *DIL);
  // Query context profile for a given location. The full context
  // is identified by input DILocation.
  FunctionSamples *getContextSamplesFor(const DILocation *DIL);
  // Query context profile for a given sample contxt of a function.
  FunctionSamples *getContextSamplesFor(const SampleContext &Context);
  // Get all context profile for given function.
  ContextSamplesTy &getAllContextSamplesFor(const Function &Func);
  ContextSamplesTy &getAllContextSamplesFor(StringRef Name);
  ContextTrieNode *getOrCreateContextPath(const SampleContext &Context,
                                          bool AllowCreate);
  // Query base profile for a given function. A base profile is a merged view
  // of all context profiles for contexts that are not inlined.
  FunctionSamples *getBaseSamplesFor(const Function &Func,
                                     bool MergeContext = true);
  // Query base profile for a given function by name.
  FunctionSamples *getBaseSamplesFor(StringRef Name, bool MergeContext = true);
  // Retrieve the context trie node for given profile context
  ContextTrieNode *getContextFor(const SampleContext &Context);
  // Get real function name for a given trie node.
  StringRef getFuncNameFor(ContextTrieNode *Node) const;
  // Mark a context profile as inlined when function is inlined.
  // This makes sure that inlined context profile will be excluded in
  // function's base profile.
  void markContextSamplesInlined(const FunctionSamples *InlinedSamples);
  ContextTrieNode &getRootContext();
  void promoteMergeContextSamplesTree(const Instruction &Inst,
                                      StringRef CalleeName);

  // Create a merged conext-less profile map.
  void createContextLessProfileMap(SampleProfileMap &ContextLessProfiles);
  ContextTrieNode *
  getContextNodeForProfile(const FunctionSamples *FSamples) const {
    auto I = ProfileToNodeMap.find(FSamples);
    if (I == ProfileToNodeMap.end())
      return nullptr;
    return I->second;
  }
  StringMap<ContextSamplesTy> &getFuncToCtxtProfiles() {
    return FuncToCtxtProfiles;
  }

  class Iterator : public llvm::iterator_facade_base<
                       Iterator, std::forward_iterator_tag, ContextTrieNode *,
                       std::ptrdiff_t, ContextTrieNode **, ContextTrieNode *> {
    std::queue<ContextTrieNode *> NodeQueue;

  public:
    explicit Iterator() = default;
    explicit Iterator(ContextTrieNode *Node) { NodeQueue.push(Node); }
    Iterator &operator++() {
      assert(!NodeQueue.empty() && "Iterator already at the end");
      ContextTrieNode *Node = NodeQueue.front();
      NodeQueue.pop();
      for (auto &It : Node->getAllChildContext())
        NodeQueue.push(&It.second);
      return *this;
    }

    bool operator==(const Iterator &Other) const {
      if (NodeQueue.empty() && Other.NodeQueue.empty())
        return true;
      if (NodeQueue.empty() || Other.NodeQueue.empty())
        return false;
      return NodeQueue.front() == Other.NodeQueue.front();
    }

    ContextTrieNode *operator*() const {
      assert(!NodeQueue.empty() && "Invalid access to end iterator");
      return NodeQueue.front();
    }
  };

  Iterator begin() { return Iterator(&RootContext); }
  Iterator end() { return Iterator(); }

#ifndef NDEBUG
  // Get a context string from root to current node.
  std::string getContextString(const FunctionSamples &FSamples) const;
  std::string getContextString(ContextTrieNode *Node) const;
#endif
  // Dump the internal context profile trie.
  void dump();

private:
  ContextTrieNode *getContextFor(const DILocation *DIL);
  ContextTrieNode *getCalleeContextFor(const DILocation *DIL,
                                       StringRef CalleeName);
  ContextTrieNode *getTopLevelContextNode(StringRef FName);
  ContextTrieNode &addTopLevelContextNode(StringRef FName);
  ContextTrieNode &promoteMergeContextSamplesTree(ContextTrieNode &NodeToPromo);
  void mergeContextNode(ContextTrieNode &FromNode, ContextTrieNode &ToNode);
  ContextTrieNode &
  promoteMergeContextSamplesTree(ContextTrieNode &FromNode,
                                 ContextTrieNode &ToNodeParent);
  ContextTrieNode &moveContextSamples(ContextTrieNode &ToNodeParent,
                                      const LineLocation &CallSite,
                                      ContextTrieNode &&NodeToMove);
  void setContextNode(const FunctionSamples *FSample, ContextTrieNode *Node) {
    ProfileToNodeMap[FSample] = Node;
  }
  // Map from function name to context profiles (excluding base profile)
  StringMap<ContextSamplesTy> FuncToCtxtProfiles;

  // Map from current FunctionSample to the belonged context trie.
  std::unordered_map<const FunctionSamples *, ContextTrieNode *>
      ProfileToNodeMap;

  // Map from function guid to real function names. Only used in md5 mode.
  const DenseMap<uint64_t, StringRef> *GUIDToFuncNameMap;

  // Root node for context trie tree
  ContextTrieNode RootContext;
};

} // end namespace llvm
#endif // LLVM_TRANSFORMS_IPO_SAMPLECONTEXTTRACKER_H
PKiwFZk�J&��Transforms/IPO/ModuleInliner.hnu�[���//===- ModuleInliner.h - Module level Inliner pass --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_IPO_MODULEINLINER_H
#define LLVM_TRANSFORMS_IPO_MODULEINLINER_H

#include "llvm/Analysis/InlineAdvisor.h"
#include "llvm/Analysis/InlineCost.h"
#include "llvm/IR/PassManager.h"

namespace llvm {

/// The module inliner pass for the new pass manager.
///
/// This pass wires together the inlining utilities and the inline cost
/// analysis into a module pass. Different from SCC inliner, it considers every
/// call in every function in the whole module and tries to inline if
/// profitable. With this module level inliner, it is possible to evaluate more
/// heuristics in the module level such like PriorityInlineOrder. It can be
/// tuned with a number of parameters to control what cost model is used and
/// what tradeoffs are made when making the decision.
class ModuleInlinerPass : public PassInfoMixin<ModuleInlinerPass> {
public:
  ModuleInlinerPass(InlineParams Params = getInlineParams(),
                    InliningAdvisorMode Mode = InliningAdvisorMode::Default,
                    ThinOrFullLTOPhase LTOPhase = ThinOrFullLTOPhase::None)
      : Params(Params), Mode(Mode), LTOPhase(LTOPhase){};
  ModuleInlinerPass(ModuleInlinerPass &&Arg) = default;

  PreservedAnalyses run(Module &, ModuleAnalysisManager &);

private:
  InlineAdvisor &getAdvisor(const ModuleAnalysisManager &MAM,
                            FunctionAnalysisManager &FAM, Module &M);
  std::unique_ptr<InlineAdvisor> OwnedAdvisor;
  const InlineParams Params;
  const InliningAdvisorMode Mode;
  const ThinOrFullLTOPhase LTOPhase;
};
} // end namespace llvm

#endif // LLVM_TRANSFORMS_IPO_MODULEINLINER_H
PKiwFZ��F��"Transforms/IPO/ArgumentPromotion.hnu�[���//===- ArgumentPromotion.h - Promote by-reference arguments -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_IPO_ARGUMENTPROMOTION_H
#define LLVM_TRANSFORMS_IPO_ARGUMENTPROMOTION_H

#include "llvm/Analysis/CGSCCPassManager.h"
#include "llvm/Analysis/LazyCallGraph.h"
#include "llvm/IR/PassManager.h"

namespace llvm {

/// Argument promotion pass.
///
/// This pass walks the functions in each SCC and for each one tries to
/// transform it and all of its callers to replace indirect arguments with
/// direct (by-value) arguments.
class ArgumentPromotionPass : public PassInfoMixin<ArgumentPromotionPass> {
  unsigned MaxElements;

public:
  ArgumentPromotionPass(unsigned MaxElements = 2u) : MaxElements(MaxElements) {}

  PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM,
                        LazyCallGraph &CG, CGSCCUpdateResult &UR);
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_IPO_ARGUMENTPROMOTION_H
PKiwFZ+��EE(Transforms/IPO/DeadArgumentElimination.hnu�[���//===- DeadArgumentElimination.h - Eliminate Dead Args ----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This pass deletes dead arguments from internal functions.  Dead argument
// elimination removes arguments which are directly dead, as well as arguments
// only passed into function calls as dead arguments of other functions.  This
// pass also deletes dead return values in a similar way.
//
// This pass is often useful as a cleanup pass to run after aggressive
// interprocedural passes, which add possibly-dead arguments or return values.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_IPO_DEADARGUMENTELIMINATION_H
#define LLVM_TRANSFORMS_IPO_DEADARGUMENTELIMINATION_H

#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Twine.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/PassManager.h"
#include <map>
#include <set>
#include <string>
#include <tuple>

namespace llvm {

class Module;
class Use;
class Value;

/// Eliminate dead arguments (and return values) from functions.
class DeadArgumentEliminationPass
    : public PassInfoMixin<DeadArgumentEliminationPass> {
public:
  /// Struct that represents (part of) either a return value or a function
  /// argument.  Used so that arguments and return values can be used
  /// interchangeably.
  struct RetOrArg {
    const Function *F;
    unsigned Idx;
    bool IsArg;

    RetOrArg(const Function *F, unsigned Idx, bool IsArg)
        : F(F), Idx(Idx), IsArg(IsArg) {}

    /// Make RetOrArg comparable, so we can put it into a map.
    bool operator<(const RetOrArg &O) const {
      return std::tie(F, Idx, IsArg) < std::tie(O.F, O.Idx, O.IsArg);
    }

    /// Make RetOrArg comparable, so we can easily iterate the multimap.
    bool operator==(const RetOrArg &O) const {
      return F == O.F && Idx == O.Idx && IsArg == O.IsArg;
    }

    std::string getDescription() const {
      return (Twine(IsArg ? "Argument #" : "Return value #") + Twine(Idx) +
              " of function " + F->getName())
          .str();
    }
  };

  /// During our initial pass over the program, we determine that things are
  /// either alive or maybe alive. We don't mark anything explicitly dead (even
  /// if we know they are), since anything not alive with no registered uses
  /// (in Uses) will never be marked alive and will thus become dead in the end.
  enum Liveness { Live, MaybeLive };

  DeadArgumentEliminationPass(bool ShouldHackArguments = false)
      : ShouldHackArguments(ShouldHackArguments) {}

  PreservedAnalyses run(Module &M, ModuleAnalysisManager &);

  /// Convenience wrapper
  RetOrArg createRet(const Function *F, unsigned Idx) {
    return RetOrArg(F, Idx, false);
  }

  /// Convenience wrapper
  RetOrArg createArg(const Function *F, unsigned Idx) {
    return RetOrArg(F, Idx, true);
  }

  using UseMap = std::multimap<RetOrArg, RetOrArg>;

  /// This maps a return value or argument to any MaybeLive return values or
  /// arguments it uses. This allows the MaybeLive values to be marked live
  /// when any of its users is marked live.
  /// For example (indices are left out for clarity):
  ///  - Uses[ret F] = ret G
  ///    This means that F calls G, and F returns the value returned by G.
  ///  - Uses[arg F] = ret G
  ///    This means that some function calls G and passes its result as an
  ///    argument to F.
  ///  - Uses[ret F] = arg F
  ///    This means that F returns one of its own arguments.
  ///  - Uses[arg F] = arg G
  ///    This means that G calls F and passes one of its own (G's) arguments
  ///    directly to F.
  UseMap Uses;

  using LiveSet = std::set<RetOrArg>;
  using LiveFuncSet = std::set<const Function *>;

  /// This set contains all values that have been determined to be live.
  LiveSet LiveValues;

  /// This set contains all values that are cannot be changed in any way.
  LiveFuncSet LiveFunctions;

  using UseVector = SmallVector<RetOrArg, 5>;

  /// This allows this pass to do double-duty as the dead arg hacking pass
  /// (used only by bugpoint).
  bool ShouldHackArguments = false;

private:
  Liveness markIfNotLive(RetOrArg Use, UseVector &MaybeLiveUses);
  Liveness surveyUse(const Use *U, UseVector &MaybeLiveUses,
                     unsigned RetValNum = -1U);
  Liveness surveyUses(const Value *V, UseVector &MaybeLiveUses);

  void surveyFunction(const Function &F);
  bool isLive(const RetOrArg &RA);
  void markValue(const RetOrArg &RA, Liveness L,
                 const UseVector &MaybeLiveUses);
  void markLive(const RetOrArg &RA);
  void markLive(const Function &F);
  void propagateLiveness(const RetOrArg &RA);
  bool removeDeadStuffFromFunction(Function *F);
  bool deleteDeadVarargs(Function &F);
  bool removeDeadArgumentsFromCallers(Function &F);
  void propagateVirtMustcallLiveness(const Module &M);
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_IPO_DEADARGUMENTELIMINATION_H
PKiwFZ0��**Transforms/IPO/LowerTypeTests.hnu�[���//===- LowerTypeTests.h - type metadata lowering pass -----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines parts of the type test lowering pass implementation that
// may be usefully unit tested.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_IPO_LOWERTYPETESTS_H
#define LLVM_TRANSFORMS_IPO_LOWERTYPETESTS_H

#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/PassManager.h"
#include <cstdint>
#include <cstring>
#include <limits>
#include <set>
#include <vector>

namespace llvm {

class Module;
class ModuleSummaryIndex;
class raw_ostream;

namespace lowertypetests {

struct BitSetInfo {
  // The indices of the set bits in the bitset.
  std::set<uint64_t> Bits;

  // The byte offset into the combined global represented by the bitset.
  uint64_t ByteOffset;

  // The size of the bitset in bits.
  uint64_t BitSize;

  // Log2 alignment of the bit set relative to the combined global.
  // For example, a log2 alignment of 3 means that bits in the bitset
  // represent addresses 8 bytes apart.
  unsigned AlignLog2;

  bool isSingleOffset() const {
    return Bits.size() == 1;
  }

  bool isAllOnes() const {
    return Bits.size() == BitSize;
  }

  bool containsGlobalOffset(uint64_t Offset) const;

  void print(raw_ostream &OS) const;
};

struct BitSetBuilder {
  SmallVector<uint64_t, 16> Offsets;
  uint64_t Min = std::numeric_limits<uint64_t>::max();
  uint64_t Max = 0;

  BitSetBuilder() = default;

  void addOffset(uint64_t Offset) {
    if (Min > Offset)
      Min = Offset;
    if (Max < Offset)
      Max = Offset;

    Offsets.push_back(Offset);
  }

  BitSetInfo build();
};

/// This class implements a layout algorithm for globals referenced by bit sets
/// that tries to keep members of small bit sets together. This can
/// significantly reduce bit set sizes in many cases.
///
/// It works by assembling fragments of layout from sets of referenced globals.
/// Each set of referenced globals causes the algorithm to create a new
/// fragment, which is assembled by appending each referenced global in the set
/// into the fragment. If a referenced global has already been referenced by an
/// fragment created earlier, we instead delete that fragment and append its
/// contents into the fragment we are assembling.
///
/// By starting with the smallest fragments, we minimize the size of the
/// fragments that are copied into larger fragments. This is most intuitively
/// thought about when considering the case where the globals are virtual tables
/// and the bit sets represent their derived classes: in a single inheritance
/// hierarchy, the optimum layout would involve a depth-first search of the
/// class hierarchy (and in fact the computed layout ends up looking a lot like
/// a DFS), but a naive DFS would not work well in the presence of multiple
/// inheritance. This aspect of the algorithm ends up fitting smaller
/// hierarchies inside larger ones where that would be beneficial.
///
/// For example, consider this class hierarchy:
///
/// A       B
///   \   / | \
///     C   D   E
///
/// We have five bit sets: bsA (A, C), bsB (B, C, D, E), bsC (C), bsD (D) and
/// bsE (E). If we laid out our objects by DFS traversing B followed by A, our
/// layout would be {B, C, D, E, A}. This is optimal for bsB as it needs to
/// cover the only 4 objects in its hierarchy, but not for bsA as it needs to
/// cover 5 objects, i.e. the entire layout. Our algorithm proceeds as follows:
///
/// Add bsC, fragments {{C}}
/// Add bsD, fragments {{C}, {D}}
/// Add bsE, fragments {{C}, {D}, {E}}
/// Add bsA, fragments {{A, C}, {D}, {E}}
/// Add bsB, fragments {{B, A, C, D, E}}
///
/// This layout is optimal for bsA, as it now only needs to cover two (i.e. 3
/// fewer) objects, at the cost of bsB needing to cover 1 more object.
///
/// The bit set lowering pass assigns an object index to each object that needs
/// to be laid out, and calls addFragment for each bit set passing the object
/// indices of its referenced globals. It then assembles a layout from the
/// computed layout in the Fragments field.
struct GlobalLayoutBuilder {
  /// The computed layout. Each element of this vector contains a fragment of
  /// layout (which may be empty) consisting of object indices.
  std::vector<std::vector<uint64_t>> Fragments;

  /// Mapping from object index to fragment index.
  std::vector<uint64_t> FragmentMap;

  GlobalLayoutBuilder(uint64_t NumObjects)
      : Fragments(1), FragmentMap(NumObjects) {}

  /// Add F to the layout while trying to keep its indices contiguous.
  /// If a previously seen fragment uses any of F's indices, that
  /// fragment will be laid out inside F.
  void addFragment(const std::set<uint64_t> &F);
};

/// This class is used to build a byte array containing overlapping bit sets. By
/// loading from indexed offsets into the byte array and applying a mask, a
/// program can test bits from the bit set with a relatively short instruction
/// sequence. For example, suppose we have 15 bit sets to lay out:
///
/// A (16 bits), B (15 bits), C (14 bits), D (13 bits), E (12 bits),
/// F (11 bits), G (10 bits), H (9 bits), I (7 bits), J (6 bits), K (5 bits),
/// L (4 bits), M (3 bits), N (2 bits), O (1 bit)
///
/// These bits can be laid out in a 16-byte array like this:
///
///       Byte Offset
///     0123456789ABCDEF
/// Bit
///   7 HHHHHHHHHIIIIIII
///   6 GGGGGGGGGGJJJJJJ
///   5 FFFFFFFFFFFKKKKK
///   4 EEEEEEEEEEEELLLL
///   3 DDDDDDDDDDDDDMMM
///   2 CCCCCCCCCCCCCCNN
///   1 BBBBBBBBBBBBBBBO
///   0 AAAAAAAAAAAAAAAA
///
/// For example, to test bit X of A, we evaluate ((bits[X] & 1) != 0), or to
/// test bit X of I, we evaluate ((bits[9 + X] & 0x80) != 0). This can be done
/// in 1-2 machine instructions on x86, or 4-6 instructions on ARM.
///
/// This is a byte array, rather than (say) a 2-byte array or a 4-byte array,
/// because for one thing it gives us better packing (the more bins there are,
/// the less evenly they will be filled), and for another, the instruction
/// sequences can be slightly shorter, both on x86 and ARM.
struct ByteArrayBuilder {
  /// The byte array built so far.
  std::vector<uint8_t> Bytes;

  enum { BitsPerByte = 8 };

  /// The number of bytes allocated so far for each of the bits.
  uint64_t BitAllocs[BitsPerByte];

  ByteArrayBuilder() {
    memset(BitAllocs, 0, sizeof(BitAllocs));
  }

  /// Allocate BitSize bits in the byte array where Bits contains the bits to
  /// set. AllocByteOffset is set to the offset within the byte array and
  /// AllocMask is set to the bitmask for those bits. This uses the LPT (Longest
  /// Processing Time) multiprocessor scheduling algorithm to lay out the bits
  /// efficiently; the pass allocates bit sets in decreasing size order.
  void allocate(const std::set<uint64_t> &Bits, uint64_t BitSize,
                uint64_t &AllocByteOffset, uint8_t &AllocMask);
};

bool isJumpTableCanonical(Function *F);

} // end namespace lowertypetests

class LowerTypeTestsPass : public PassInfoMixin<LowerTypeTestsPass> {
  bool UseCommandLine = false;

  ModuleSummaryIndex *ExportSummary = nullptr;
  const ModuleSummaryIndex *ImportSummary = nullptr;
  bool DropTypeTests = true;

public:
  LowerTypeTestsPass() : UseCommandLine(true) {}
  LowerTypeTestsPass(ModuleSummaryIndex *ExportSummary,
                     const ModuleSummaryIndex *ImportSummary,
                     bool DropTypeTests = false)
      : ExportSummary(ExportSummary), ImportSummary(ImportSummary),
        DropTypeTests(DropTypeTests) {}
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_IPO_LOWERTYPETESTS_H
PKiwFZ/���#Transforms/IPO/SampleProfileProbe.hnu�[���//===- Transforms/IPO/SampleProfileProbe.h ----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file
/// This file provides the interface for the pseudo probe implementation for
/// AutoFDO.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_IPO_SAMPLEPROFILEPROBE_H
#define LLVM_TRANSFORMS_IPO_SAMPLEPROFILEPROBE_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/Analysis/LazyCallGraph.h"
#include "llvm/IR/PassManager.h"
#include "llvm/ProfileData/SampleProf.h"
#include <unordered_map>

namespace llvm {
class BasicBlock;
class Function;
class Instruction;
class Loop;
class PassInstrumentationCallbacks;
class TargetMachine;

class Module;

using namespace sampleprof;
using BlockIdMap = std::unordered_map<BasicBlock *, uint32_t>;
using InstructionIdMap = std::unordered_map<Instruction *, uint32_t>;
// Map from tuples of Probe id and inline stack hash code to distribution
// factors.
using ProbeFactorMap = std::unordered_map<std::pair<uint64_t, uint64_t>, float,
                                          pair_hash<uint64_t, uint64_t>>;
using FuncProbeFactorMap = StringMap<ProbeFactorMap>;


// A pseudo probe verifier that can be run after each IR passes to detect the
// violation of updating probe factors. In principle, the sum of distribution
// factor for a probe should be identical before and after a pass. For a
// function pass, the factor sum for a probe would be typically 100%.
class PseudoProbeVerifier {
public:
  void registerCallbacks(PassInstrumentationCallbacks &PIC);

  // Implementation of pass instrumentation callbacks for new pass manager.
  void runAfterPass(StringRef PassID, Any IR);

private:
  // Allow a little bias due the rounding to integral factors.
  constexpr static float DistributionFactorVariance = 0.02f;
  // Distribution factors from last pass.
  FuncProbeFactorMap FunctionProbeFactors;

  void collectProbeFactors(const BasicBlock *BB, ProbeFactorMap &ProbeFactors);
  void runAfterPass(const Module *M);
  void runAfterPass(const LazyCallGraph::SCC *C);
  void runAfterPass(const Function *F);
  void runAfterPass(const Loop *L);
  bool shouldVerifyFunction(const Function *F);
  void verifyProbeFactors(const Function *F,
                          const ProbeFactorMap &ProbeFactors);
};

/// Sample profile pseudo prober.
///
/// Insert pseudo probes for block sampling and value sampling.
class SampleProfileProber {
public:
  // Give an empty module id when the prober is not used for instrumentation.
  SampleProfileProber(Function &F, const std::string &CurModuleUniqueId);
  void instrumentOneFunc(Function &F, TargetMachine *TM);

private:
  Function *getFunction() const { return F; }
  uint64_t getFunctionHash() const { return FunctionHash; }
  uint32_t getBlockId(const BasicBlock *BB) const;
  uint32_t getCallsiteId(const Instruction *Call) const;
  void computeCFGHash();
  void computeProbeIdForBlocks();
  void computeProbeIdForCallsites();

  Function *F;

  /// The current module ID that is used to name a static object as a comdat
  /// group.
  std::string CurModuleUniqueId;

  /// A CFG hash code used to identify a function code changes.
  uint64_t FunctionHash;

  /// Map basic blocks to the their pseudo probe ids.
  BlockIdMap BlockProbeIds;

  /// Map indirect calls to the their pseudo probe ids.
  InstructionIdMap CallProbeIds;

  /// The ID of the last probe, Can be used to number a new probe.
  uint32_t LastProbeId;
};

class SampleProfileProbePass : public PassInfoMixin<SampleProfileProbePass> {
  TargetMachine *TM;

public:
  SampleProfileProbePass(TargetMachine *TM) : TM(TM) {}
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
};

// Pseudo probe distribution factor updater.
// Sample profile annotation can happen in both LTO prelink and postlink. The
// postlink-time re-annotation can degrade profile quality because of prelink
// code duplication transformation, such as loop unrolling, jump threading,
// indirect call promotion etc. As such, samples corresponding to a source
// location may be aggregated multiple times in postlink. With a concept of
// distribution factor for pseudo probes, samples can be distributed among
// duplicated probes reasonable based on the assumption that optimizations
// duplicating code well-maintain the branch frequency information (BFI). This
// pass updates distribution factors for each pseudo probe at the end of the
// prelink pipeline, to reflect an estimated portion of the real execution
// count.
class PseudoProbeUpdatePass : public PassInfoMixin<PseudoProbeUpdatePass> {
  void runOnFunction(Function &F, FunctionAnalysisManager &FAM);

public:
  PseudoProbeUpdatePass() = default;
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
};

} // end namespace llvm
#endif // LLVM_TRANSFORMS_IPO_SAMPLEPROFILEPROBE_H
PKiwFZ8g�f!
!
!Transforms/IPO/HotColdSplitting.hnu�[���//===- HotColdSplitting.h ---- Outline Cold Regions -------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//===----------------------------------------------------------------------===//
//
// This pass outlines cold regions to a separate function.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_IPO_HOTCOLDSPLITTING_H
#define LLVM_TRANSFORMS_IPO_HOTCOLDSPLITTING_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class Module;
class ProfileSummaryInfo;
class BlockFrequencyInfo;
class TargetTransformInfo;
class OptimizationRemarkEmitter;
class AssumptionCache;
class DominatorTree;
class CodeExtractorAnalysisCache;

/// A sequence of basic blocks.
///
/// A 0-sized SmallVector is slightly cheaper to move than a std::vector.
using BlockSequence = SmallVector<BasicBlock *, 0>;

class HotColdSplitting {
public:
  HotColdSplitting(ProfileSummaryInfo *ProfSI,
                   function_ref<BlockFrequencyInfo *(Function &)> GBFI,
                   function_ref<TargetTransformInfo &(Function &)> GTTI,
                   std::function<OptimizationRemarkEmitter &(Function &)> *GORE,
                   function_ref<AssumptionCache *(Function &)> LAC)
      : PSI(ProfSI), GetBFI(GBFI), GetTTI(GTTI), GetORE(GORE), LookupAC(LAC) {}
  bool run(Module &M);

private:
  bool isFunctionCold(const Function &F) const;
  bool shouldOutlineFrom(const Function &F) const;
  bool outlineColdRegions(Function &F, bool HasProfileSummary);
  Function *extractColdRegion(const BlockSequence &Region,
                              const CodeExtractorAnalysisCache &CEAC,
                              DominatorTree &DT, BlockFrequencyInfo *BFI,
                              TargetTransformInfo &TTI,
                              OptimizationRemarkEmitter &ORE,
                              AssumptionCache *AC, unsigned Count);
  ProfileSummaryInfo *PSI;
  function_ref<BlockFrequencyInfo *(Function &)> GetBFI;
  function_ref<TargetTransformInfo &(Function &)> GetTTI;
  std::function<OptimizationRemarkEmitter &(Function &)> *GetORE;
  function_ref<AssumptionCache *(Function &)> LookupAC;
};

/// Pass to outline cold regions.
class HotColdSplittingPass : public PassInfoMixin<HotColdSplittingPass> {
public:
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_IPO_HOTCOLDSPLITTING_H

PKiwFZ���,,#Transforms/IPO/InferFunctionAttrs.hnu�[���//===-- InferFunctionAttrs.h - Infer implicit function attributes ---------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// Interfaces for passes which infer implicit function attributes from the
/// name and signature of function declarations.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_IPO_INFERFUNCTIONATTRS_H
#define LLVM_TRANSFORMS_IPO_INFERFUNCTIONATTRS_H

#include "llvm/IR/PassManager.h"

namespace llvm {
class Module;

/// A pass which infers function attributes from the names and signatures of
/// function declarations in a module.
struct InferFunctionAttrsPass : PassInfoMixin<InferFunctionAttrsPass> {
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
};

}

#endif // LLVM_TRANSFORMS_IPO_INFERFUNCTIONATTRS_H
PKiwFZ��g$$#Transforms/IPO/PassManagerBuilder.hnu�[���// llvm/Transforms/IPO/PassManagerBuilder.h - Build Standard Pass -*- C++ -*-=//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the PassManagerBuilder class, which is used to set up a
// "standard" optimization sequence suitable for languages like C and C++.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_IPO_PASSMANAGERBUILDER_H
#define LLVM_TRANSFORMS_IPO_PASSMANAGERBUILDER_H

#include "llvm-c/Transforms/PassManagerBuilder.h"
#include <functional>
#include <string>
#include <vector>

namespace llvm {
class ModuleSummaryIndex;
class Pass;
class TargetLibraryInfoImpl;

// The old pass manager infrastructure is hidden in a legacy namespace now.
namespace legacy {
class FunctionPassManager;
class PassManagerBase;
}

/// PassManagerBuilder - This class is used to set up a standard optimization
/// sequence for languages like C and C++, allowing some APIs to customize the
/// pass sequence in various ways. A simple example of using it would be:
///
///  PassManagerBuilder Builder;
///  Builder.OptLevel = 2;
///  Builder.populateFunctionPassManager(FPM);
///  Builder.populateModulePassManager(MPM);
///
/// In addition to setting up the basic passes, PassManagerBuilder allows
/// frontends to vend a plugin API, where plugins are allowed to add extensions
/// to the default pass manager.  They do this by specifying where in the pass
/// pipeline they want to be added, along with a callback function that adds
/// the pass(es).  For example, a plugin that wanted to add a loop optimization
/// could do something like this:
///
/// static void addMyLoopPass(const PMBuilder &Builder, PassManagerBase &PM) {
///   if (Builder.getOptLevel() > 2 && Builder.getOptSizeLevel() == 0)
///     PM.add(createMyAwesomePass());
/// }
///   ...
///   Builder.addExtension(PassManagerBuilder::EP_LoopOptimizerEnd,
///                        addMyLoopPass);
///   ...
class PassManagerBuilder {
public:
  /// Extensions are passed to the builder itself (so they can see how it is
  /// configured) as well as the pass manager to add stuff to.
  typedef std::function<void(const PassManagerBuilder &Builder,
                             legacy::PassManagerBase &PM)>
      ExtensionFn;
  typedef int GlobalExtensionID;

  /// The Optimization Level - Specify the basic optimization level.
  ///    0 = -O0, 1 = -O1, 2 = -O2, 3 = -O3
  unsigned OptLevel;

  /// SizeLevel - How much we're optimizing for size.
  ///    0 = none, 1 = -Os, 2 = -Oz
  unsigned SizeLevel;

  /// LibraryInfo - Specifies information about the runtime library for the
  /// optimizer.  If this is non-null, it is added to both the function and
  /// per-module pass pipeline.
  TargetLibraryInfoImpl *LibraryInfo;

  /// Inliner - Specifies the inliner to use.  If this is non-null, it is
  /// added to the per-module passes.
  Pass *Inliner;

  /// The module summary index to use for exporting information from the
  /// regular LTO phase, for example for the CFI and devirtualization type
  /// tests.
  ModuleSummaryIndex *ExportSummary = nullptr;

  /// The module summary index to use for importing information to the
  /// thin LTO backends, for example for the CFI and devirtualization type
  /// tests.
  const ModuleSummaryIndex *ImportSummary = nullptr;

  bool DisableUnrollLoops;
  bool CallGraphProfile;
  bool SLPVectorize;
  bool LoopVectorize;
  bool LoopsInterleaved;
  bool DisableGVNLoadPRE;
  bool ForgetAllSCEVInLoopUnroll;
  bool VerifyInput;
  bool VerifyOutput;
  bool MergeFunctions;
  bool DivergentTarget;
  unsigned LicmMssaOptCap;
  unsigned LicmMssaNoAccForPromotionCap;

public:
  PassManagerBuilder();
  ~PassManagerBuilder();

private:
  void addInitialAliasAnalysisPasses(legacy::PassManagerBase &PM) const;
  void addFunctionSimplificationPasses(legacy::PassManagerBase &MPM);
  void addVectorPasses(legacy::PassManagerBase &PM, bool IsFullLTO);

public:
  /// populateFunctionPassManager - This fills in the function pass manager,
  /// which is expected to be run on each function immediately as it is
  /// generated.  The idea is to reduce the size of the IR in memory.
  void populateFunctionPassManager(legacy::FunctionPassManager &FPM);

  /// populateModulePassManager - This sets up the primary pass manager.
  void populateModulePassManager(legacy::PassManagerBase &MPM);
};

inline PassManagerBuilder *unwrap(LLVMPassManagerBuilderRef P) {
    return reinterpret_cast<PassManagerBuilder*>(P);
}

inline LLVMPassManagerBuilderRef wrap(PassManagerBuilder *P) {
  return reinterpret_cast<LLVMPassManagerBuilderRef>(P);
}

} // end namespace llvm
#endif
PKiwFZ�(]�&,&,Transforms/IPO/FunctionImport.hnu�[���//===- llvm/Transforms/IPO/FunctionImport.h - ThinLTO importing -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_IPO_FUNCTIONIMPORT_H
#define LLVM_TRANSFORMS_IPO_FUNCTIONIMPORT_H

#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/ModuleSummaryIndex.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Support/Error.h"
#include <functional>
#include <map>
#include <memory>
#include <string>
#include <system_error>
#include <unordered_set>
#include <utility>

namespace llvm {

class Module;

/// The function importer is automatically importing function from other modules
/// based on the provided summary informations.
class FunctionImporter {
public:
  /// Set of functions to import from a source module. Each entry is a set
  /// containing all the GUIDs of all functions to import for a source module.
  using FunctionsToImportTy = std::unordered_set<GlobalValue::GUID>;

  /// The different reasons selectCallee will chose not to import a
  /// candidate.
  enum ImportFailureReason {
    None,
    // We can encounter a global variable instead of a function in rare
    // situations with SamplePGO. See comments where this failure type is
    // set for more details.
    GlobalVar,
    // Found to be globally dead, so we don't bother importing.
    NotLive,
    // Instruction count over the current threshold.
    TooLarge,
    // Don't import something with interposable linkage as we can't inline it
    // anyway.
    InterposableLinkage,
    // Generally we won't end up failing due to this reason, as we expect
    // to find at least one summary for the GUID that is global or a local
    // in the referenced module for direct calls.
    LocalLinkageNotInModule,
    // This corresponds to the NotEligibleToImport being set on the summary,
    // which can happen in a few different cases (e.g. local that can't be
    // renamed or promoted because it is referenced on a llvm*.used variable).
    NotEligible,
    // This corresponds to NoInline being set on the function summary,
    // which will happen if it is known that the inliner will not be able
    // to inline the function (e.g. it is marked with a NoInline attribute).
    NoInline
  };

  /// Information optionally tracked for candidates the importer decided
  /// not to import. Used for optional stat printing.
  struct ImportFailureInfo {
    // The ValueInfo corresponding to the candidate. We save an index hash
    // table lookup for each GUID by stashing this here.
    ValueInfo VI;
    // The maximum call edge hotness for all failed imports of this candidate.
    CalleeInfo::HotnessType MaxHotness;
    // most recent reason for failing to import (doesn't necessarily correspond
    // to the attempt with the maximum hotness).
    ImportFailureReason Reason;
    // The number of times we tried to import candidate but failed.
    unsigned Attempts;
    ImportFailureInfo(ValueInfo VI, CalleeInfo::HotnessType MaxHotness,
                      ImportFailureReason Reason, unsigned Attempts)
        : VI(VI), MaxHotness(MaxHotness), Reason(Reason), Attempts(Attempts) {}
  };

  /// Map of callee GUID considered for import into a given module to a pair
  /// consisting of the largest threshold applied when deciding whether to
  /// import it and, if we decided to import, a pointer to the summary instance
  /// imported. If we decided not to import, the summary will be nullptr.
  using ImportThresholdsTy =
      DenseMap<GlobalValue::GUID,
               std::tuple<unsigned, const GlobalValueSummary *,
                          std::unique_ptr<ImportFailureInfo>>>;

  /// The map contains an entry for every module to import from, the key being
  /// the module identifier to pass to the ModuleLoader. The value is the set of
  /// functions to import.
  using ImportMapTy = StringMap<FunctionsToImportTy>;

  /// The set contains an entry for every global value the module exports.
  using ExportSetTy = DenseSet<ValueInfo>;

  /// A function of this type is used to load modules referenced by the index.
  using ModuleLoaderTy =
      std::function<Expected<std::unique_ptr<Module>>(StringRef Identifier)>;

  /// Create a Function Importer.
  FunctionImporter(const ModuleSummaryIndex &Index, ModuleLoaderTy ModuleLoader,
                   bool ClearDSOLocalOnDeclarations)
      : Index(Index), ModuleLoader(std::move(ModuleLoader)),
        ClearDSOLocalOnDeclarations(ClearDSOLocalOnDeclarations) {}

  /// Import functions in Module \p M based on the supplied import list.
  Expected<bool> importFunctions(Module &M, const ImportMapTy &ImportList);

private:
  /// The summaries index used to trigger importing.
  const ModuleSummaryIndex &Index;

  /// Factory function to load a Module for a given identifier
  ModuleLoaderTy ModuleLoader;

  /// See the comment of ClearDSOLocalOnDeclarations in
  /// Utils/FunctionImportUtils.h.
  bool ClearDSOLocalOnDeclarations;
};

/// The function importing pass
class FunctionImportPass : public PassInfoMixin<FunctionImportPass> {
public:
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
};

/// Compute all the imports and exports for every module in the Index.
///
/// \p ModuleToDefinedGVSummaries contains for each Module a map
/// (GUID -> Summary) for every global defined in the module.
///
/// \p isPrevailing is a callback that will be called with a global value's GUID
/// and summary and should return whether the module corresponding to the
/// summary contains the linker-prevailing copy of that value.
///
/// \p ImportLists will be populated with an entry for every Module we are
/// importing into. This entry is itself a map that can be passed to
/// FunctionImporter::importFunctions() above (see description there).
///
/// \p ExportLists contains for each Module the set of globals (GUID) that will
/// be imported by another module, or referenced by such a function. I.e. this
/// is the set of globals that need to be promoted/renamed appropriately.
void ComputeCrossModuleImport(
    const ModuleSummaryIndex &Index,
    const StringMap<GVSummaryMapTy> &ModuleToDefinedGVSummaries,
    function_ref<bool(GlobalValue::GUID, const GlobalValueSummary *)>
        isPrevailing,
    StringMap<FunctionImporter::ImportMapTy> &ImportLists,
    StringMap<FunctionImporter::ExportSetTy> &ExportLists);

/// Compute all the imports for the given module using the Index.
///
/// \p isPrevailing is a callback that will be called with a global value's GUID
/// and summary and should return whether the module corresponding to the
/// summary contains the linker-prevailing copy of that value.
///
/// \p ImportList will be populated with a map that can be passed to
/// FunctionImporter::importFunctions() above (see description there).
void ComputeCrossModuleImportForModule(
    StringRef ModulePath,
    function_ref<bool(GlobalValue::GUID, const GlobalValueSummary *)>
        isPrevailing,
    const ModuleSummaryIndex &Index, FunctionImporter::ImportMapTy &ImportList);

/// Mark all external summaries in \p Index for import into the given module.
/// Used for distributed builds using a distributed index.
///
/// \p ImportList will be populated with a map that can be passed to
/// FunctionImporter::importFunctions() above (see description there).
void ComputeCrossModuleImportForModuleFromIndex(
    StringRef ModulePath, const ModuleSummaryIndex &Index,
    FunctionImporter::ImportMapTy &ImportList);

/// PrevailingType enum used as a return type of callback passed
/// to computeDeadSymbolsAndUpdateIndirectCalls. Yes and No values used when
/// status explicitly set by symbols resolution, otherwise status is Unknown.
enum class PrevailingType { Yes, No, Unknown };

/// Update call edges for indirect calls to local functions added from
/// SamplePGO when needed. Normally this is done during
/// computeDeadSymbolsAndUpdateIndirectCalls, but can be called standalone
/// when that is not called (e.g. during testing).
void updateIndirectCalls(ModuleSummaryIndex &Index);

/// Compute all the symbols that are "dead": i.e these that can't be reached
/// in the graph from any of the given symbols listed in
/// \p GUIDPreservedSymbols. Non-prevailing symbols are symbols without a
/// prevailing copy anywhere in IR and are normally dead, \p isPrevailing
/// predicate returns status of symbol.
/// Also update call edges for indirect calls to local functions added from
/// SamplePGO when needed.
void computeDeadSymbolsAndUpdateIndirectCalls(
    ModuleSummaryIndex &Index,
    const DenseSet<GlobalValue::GUID> &GUIDPreservedSymbols,
    function_ref<PrevailingType(GlobalValue::GUID)> isPrevailing);

/// Compute dead symbols and run constant propagation in combined index
/// after that.
void computeDeadSymbolsWithConstProp(
    ModuleSummaryIndex &Index,
    const DenseSet<GlobalValue::GUID> &GUIDPreservedSymbols,
    function_ref<PrevailingType(GlobalValue::GUID)> isPrevailing,
    bool ImportEnabled);

/// Converts value \p GV to declaration, or replaces with a declaration if
/// it is an alias. Returns true if converted, false if replaced.
bool convertToDeclaration(GlobalValue &GV);

/// Compute the set of summaries needed for a ThinLTO backend compilation of
/// \p ModulePath.
//
/// This includes summaries from that module (in case any global summary based
/// optimizations were recorded) and from any definitions in other modules that
/// should be imported.
//
/// \p ModuleToSummariesForIndex will be populated with the needed summaries
/// from each required module path. Use a std::map instead of StringMap to get
/// stable order for bitcode emission.
void gatherImportedSummariesForModule(
    StringRef ModulePath,
    const StringMap<GVSummaryMapTy> &ModuleToDefinedGVSummaries,
    const FunctionImporter::ImportMapTy &ImportList,
    std::map<std::string, GVSummaryMapTy> &ModuleToSummariesForIndex);

/// Emit into \p OutputFilename the files module \p ModulePath will import from.
std::error_code EmitImportsFiles(
    StringRef ModulePath, StringRef OutputFilename,
    const std::map<std::string, GVSummaryMapTy> &ModuleToSummariesForIndex);

/// Based on the information recorded in the summaries during global
/// summary-based analysis:
/// 1. Resolve prevailing symbol linkages and constrain visibility (CanAutoHide
///    and consider visibility from other definitions for ELF) in \p TheModule
/// 2. (optional) Apply propagated function attributes to \p TheModule if
///    PropagateAttrs is true
void thinLTOFinalizeInModule(Module &TheModule,
                             const GVSummaryMapTy &DefinedGlobals,
                             bool PropagateAttrs);

/// Internalize \p TheModule based on the information recorded in the summaries
/// during global summary-based analysis.
void thinLTOInternalizeModule(Module &TheModule,
                              const GVSummaryMapTy &DefinedGlobals);

} // end namespace llvm

#endif // LLVM_TRANSFORMS_IPO_FUNCTIONIMPORT_H
PKiwFZʴ���Transforms/IPO/BlockExtractor.hnu�[���//===- BlockExtractor.h - Extracts blocks into their own functions --------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This pass extracts the specified basic blocks from the module into their
// own functions.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_IPO_BLOCKEXTRACTOR_H
#define LLVM_TRANSFORMS_IPO_BLOCKEXTRACTOR_H

#include <vector>

#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/PassManager.h"

namespace llvm {
class BasicBlock;

struct BlockExtractorPass : PassInfoMixin<BlockExtractorPass> {
  BlockExtractorPass(std::vector<std::vector<BasicBlock *>> &&GroupsOfBlocks,
                     bool EraseFunctions);
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);

private:
  std::vector<std::vector<BasicBlock *>> GroupsOfBlocks;
  bool EraseFunctions;
};
} // namespace llvm

#endif // LLVM_TRANSFORMS_IPO_BLOCKEXTRACTOR_H
PKiwFZ�SDYGGTransforms/IPO/Inliner.hnu�[���//===- Inliner.h - Inliner pass and infrastructure --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_IPO_INLINER_H
#define LLVM_TRANSFORMS_IPO_INLINER_H

#include "llvm/Analysis/CGSCCPassManager.h"
#include "llvm/Analysis/InlineAdvisor.h"
#include "llvm/Analysis/InlineCost.h"
#include "llvm/Analysis/LazyCallGraph.h"
#include "llvm/Analysis/Utils/ImportedFunctionsInliningStatistics.h"
#include "llvm/IR/PassManager.h"

namespace llvm {

/// The inliner pass for the new pass manager.
///
/// This pass wires together the inlining utilities and the inline cost
/// analysis into a CGSCC pass. It considers every call in every function in
/// the SCC and tries to inline if profitable. It can be tuned with a number of
/// parameters to control what cost model is used and what tradeoffs are made
/// when making the decision.
///
/// It should be noted that the legacy inliners do considerably more than this
/// inliner pass does. They provide logic for manually merging allocas, and
/// doing considerable DCE including the DCE of dead functions. This pass makes
/// every attempt to be simpler. DCE of functions requires complex reasoning
/// about comdat groups, etc. Instead, it is expected that other more focused
/// passes be composed to achieve the same end result.
class InlinerPass : public PassInfoMixin<InlinerPass> {
public:
  InlinerPass(bool OnlyMandatory = false,
              ThinOrFullLTOPhase LTOPhase = ThinOrFullLTOPhase::None)
      : OnlyMandatory(OnlyMandatory), LTOPhase(LTOPhase) {}
  InlinerPass(InlinerPass &&Arg) = default;

  PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM,
                        LazyCallGraph &CG, CGSCCUpdateResult &UR);

  void printPipeline(raw_ostream &OS,
                     function_ref<StringRef(StringRef)> MapClassName2PassName);

private:
  InlineAdvisor &getAdvisor(const ModuleAnalysisManagerCGSCCProxy::Result &MAM,
                            FunctionAnalysisManager &FAM, Module &M);
  std::unique_ptr<InlineAdvisor> OwnedAdvisor;
  const bool OnlyMandatory;
  const ThinOrFullLTOPhase LTOPhase;
};

/// Module pass, wrapping the inliner pass. This works in conjunction with the
/// InlineAdvisorAnalysis to facilitate inlining decisions taking into account
/// module-wide state, that need to keep track of inter-inliner pass runs, for
/// a given module. An InlineAdvisor is configured and kept alive for the
/// duration of the ModuleInlinerWrapperPass::run.
class ModuleInlinerWrapperPass
    : public PassInfoMixin<ModuleInlinerWrapperPass> {
public:
  ModuleInlinerWrapperPass(
      InlineParams Params = getInlineParams(), bool MandatoryFirst = true,
      InlineContext IC = {},
      InliningAdvisorMode Mode = InliningAdvisorMode::Default,
      unsigned MaxDevirtIterations = 0);
  ModuleInlinerWrapperPass(ModuleInlinerWrapperPass &&Arg) = default;

  PreservedAnalyses run(Module &, ModuleAnalysisManager &);

  /// Allow adding more CGSCC passes, besides inlining. This should be called
  /// before run is called, as part of pass pipeline building.
  CGSCCPassManager &getPM() { return PM; }

  /// Add a module pass that runs before the CGSCC passes.
  template <class T> void addModulePass(T Pass) {
    MPM.addPass(std::move(Pass));
  }

  /// Add a module pass that runs after the CGSCC passes.
  template <class T> void addLateModulePass(T Pass) {
    AfterCGMPM.addPass(std::move(Pass));
  }

  void printPipeline(raw_ostream &OS,
                     function_ref<StringRef(StringRef)> MapClassName2PassName);

private:
  const InlineParams Params;
  const InlineContext IC;
  const InliningAdvisorMode Mode;
  const unsigned MaxDevirtIterations;
  // TODO: Clean this up so we only have one ModulePassManager.
  CGSCCPassManager PM;
  ModulePassManager MPM;
  ModulePassManager AfterCGMPM;
};
} // end namespace llvm

#endif // LLVM_TRANSFORMS_IPO_INLINER_H
PKiwFZ�3U>>Transforms/IPO/ConstantMerge.hnu�[���//===- ConstantMerge.h - Merge duplicate global constants -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the interface to a pass that merges duplicate global
// constants together into a single constant that is shared.  This is useful
// because some passes (ie TraceValues) insert a lot of string constants into
// the program, regardless of whether or not an existing string is available.
//
// Algorithm: ConstantMerge is designed to build up a map of available constants
// and eliminate duplicates when it is initialized.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_IPO_CONSTANTMERGE_H
#define LLVM_TRANSFORMS_IPO_CONSTANTMERGE_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class Module;

/// A pass that merges duplicate global constants into a single constant.
class ConstantMergePass : public PassInfoMixin<ConstantMergePass> {
public:
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &);
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_IPO_CONSTANTMERGE_H
PKiwFZ��.$��Transforms/IPO/SCCP.hnu�[���//===- SCCP.h - Sparse Conditional Constant Propagation ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This pass implements  interprocedural sparse conditional constant
// propagation and merging.
//
// Specifically, this:
//   * Assumes values are constant unless proven otherwise
//   * Assumes BasicBlocks are dead unless proven otherwise
//   * Proves values to be constant, and replaces them with constants
//   * Proves conditional branches to be unconditional
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_IPO_SCCP_H
#define LLVM_TRANSFORMS_IPO_SCCP_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class Module;

/// A set of parameters to control various transforms performed by IPSCCP pass.
/// Each of the boolean parameters can be set to:
///   true - enabling the transformation.
///   false - disabling the transformation.
/// Intended use is to create a default object, modify parameters with
/// additional setters and then pass it to IPSCCP.
struct IPSCCPOptions {
  bool AllowFuncSpec;

  IPSCCPOptions(bool AllowFuncSpec = true) : AllowFuncSpec(AllowFuncSpec) {}

  /// Enables or disables Specialization of Functions.
  IPSCCPOptions &setFuncSpec(bool FuncSpec) {
    AllowFuncSpec = FuncSpec;
    return *this;
  }
};

/// Pass to perform interprocedural constant propagation.
class IPSCCPPass : public PassInfoMixin<IPSCCPPass> {
  IPSCCPOptions Options;

public:
  IPSCCPPass() = default;

  IPSCCPPass(IPSCCPOptions Options) : Options(Options) {}

  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);

  bool isFuncSpecEnabled() const { return Options.AllowFuncSpec; }
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_IPO_SCCP_H
PKiwFZ�n;+Transforms/IPO/MergeFunctions.hnu�[���//===- MergeFunctions.h - Merge Identical Functions -------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This pass transforms simple global variables that never have their address
// taken.  If obviously true, it marks read/write globals as constant, deletes
// variables only stored to, etc.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_IPO_MERGEFUNCTIONS_H
#define LLVM_TRANSFORMS_IPO_MERGEFUNCTIONS_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class Module;

/// Merge identical functions.
class MergeFunctionsPass : public PassInfoMixin<MergeFunctionsPass> {
public:
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_IPO_MERGEFUNCTIONS_H
PKiwFZV�k��� Transforms/IPO/PartialInlining.hnu�[���//===- PartialInlining.h - Inline parts of functions ------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This pass performs partial inlining, typically by inlining an if statement
// that surrounds the body of the function.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_IPO_PARTIALINLINING_H
#define LLVM_TRANSFORMS_IPO_PARTIALINLINING_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class Module;

/// Pass to remove unused function declarations.
class PartialInlinerPass : public PassInfoMixin<PartialInlinerPass> {
public:
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &);
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_IPO_PARTIALINLINING_H
PKiwFZj5���Transforms/IPO/FunctionAttrs.hnu�[���//===- FunctionAttrs.h - Compute function attributes ------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file
/// Provides passes for computing function attributes based on interprocedural
/// analyses.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_IPO_FUNCTIONATTRS_H
#define LLVM_TRANSFORMS_IPO_FUNCTIONATTRS_H

#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/CGSCCPassManager.h"
#include "llvm/Analysis/LazyCallGraph.h"
#include "llvm/IR/PassManager.h"

namespace llvm {

class GlobalValueSummary;
class ModuleSummaryIndex;
class Function;
class Module;

/// Returns the memory access properties of this copy of the function.
MemoryEffects computeFunctionBodyMemoryAccess(Function &F, AAResults &AAR);

/// Propagate function attributes for function summaries along the index's
/// callgraph during thinlink
bool thinLTOPropagateFunctionAttrs(
    ModuleSummaryIndex &Index,
    function_ref<bool(GlobalValue::GUID, const GlobalValueSummary *)>
        isPrevailing);

/// Computes function attributes in post-order over the call graph.
///
/// By operating in post-order, this pass computes precise attributes for
/// called functions prior to processsing their callers. This "bottom-up"
/// approach allows powerful interprocedural inference of function attributes
/// like memory access patterns, etc. It can discover functions that do not
/// access memory, or only read memory, and give them the readnone/readonly
/// attribute. It also discovers function arguments that are not captured by
/// the function and marks them with the nocapture attribute.
struct PostOrderFunctionAttrsPass : PassInfoMixin<PostOrderFunctionAttrsPass> {
  PostOrderFunctionAttrsPass(bool SkipNonRecursive = false)
      : SkipNonRecursive(SkipNonRecursive) {}
  PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM,
                        LazyCallGraph &CG, CGSCCUpdateResult &UR);

  void printPipeline(raw_ostream &OS,
                     function_ref<StringRef(StringRef)> MapClassName2PassName);

private:
  bool SkipNonRecursive;
};

/// A pass to do RPO deduction and propagation of function attributes.
///
/// This pass provides a general RPO or "top down" propagation of
/// function attributes. For a few (rare) cases, we can deduce significantly
/// more about function attributes by working in RPO, so this pass
/// provides the complement to the post-order pass above where the majority of
/// deduction is performed.
// FIXME: Currently there is no RPO CGSCC pass structure to slide into and so
// this is a boring module pass, but eventually it should be an RPO CGSCC pass
// when such infrastructure is available.
class ReversePostOrderFunctionAttrsPass
    : public PassInfoMixin<ReversePostOrderFunctionAttrsPass> {
public:
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_IPO_FUNCTIONATTRS_H
PKiwFZ�,
�UU'Transforms/IPO/CalledValuePropagation.hnu�[���//===- CalledValuePropagation.h - Propagate called values -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements a transformation that attaches !callees metadata to
// indirect call sites. For a given call site, the metadata, if present,
// indicates the set of functions the call site could possibly target at
// run-time. This metadata is added to indirect call sites when the set of
// possible targets can be determined by analysis and is known to be small. The
// analysis driving the transformation is similar to constant propagation and
// makes uses of the generic sparse propagation solver.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_IPO_CALLEDVALUEPROPAGATION_H
#define LLVM_TRANSFORMS_IPO_CALLEDVALUEPROPAGATION_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class CalledValuePropagationPass
    : public PassInfoMixin<CalledValuePropagationPass> {
public:
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &);
};
} // namespace llvm

#endif // LLVM_TRANSFORMS_IPO_CALLEDVALUEPROPAGATION_H
PKiwFZy΍:@!@!"Transforms/IPO/ProfiledCallGraph.hnu�[���//===-- ProfiledCallGraph.h - Profiled Call Graph ----------------- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_IPO_PROFILEDCALLGRAPH_H
#define LLVM_TRANSFORMS_IPO_PROFILEDCALLGRAPH_H

#include "llvm/ADT/GraphTraits.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ProfileData/SampleProf.h"
#include "llvm/ProfileData/SampleProfReader.h"
#include "llvm/Transforms/IPO/SampleContextTracker.h"
#include <queue>
#include <set>

namespace llvm {
namespace sampleprof {

struct ProfiledCallGraphNode;

struct ProfiledCallGraphEdge {
  ProfiledCallGraphEdge(ProfiledCallGraphNode *Source,
                        ProfiledCallGraphNode *Target, uint64_t Weight)
      : Source(Source), Target(Target), Weight(Weight) {}
  ProfiledCallGraphNode *Source;
  ProfiledCallGraphNode *Target;
  uint64_t Weight;

  // The call destination is the only important data here,
  // allow to transparently unwrap into it.
  operator ProfiledCallGraphNode *() const { return Target; }
};

struct ProfiledCallGraphNode {

  // Sort edges by callee names only since all edges to be compared are from
  // same caller. Edge weights are not considered either because for the same
  // callee only the edge with the largest weight is added to the edge set.
  struct ProfiledCallGraphEdgeComparer {
    bool operator()(const ProfiledCallGraphEdge &L,
                    const ProfiledCallGraphEdge &R) const {
      return L.Target->Name < R.Target->Name;
    }
  };

  using edge = ProfiledCallGraphEdge;
  using edges = std::set<edge, ProfiledCallGraphEdgeComparer>;
  using iterator = edges::iterator;
  using const_iterator = edges::const_iterator;

  ProfiledCallGraphNode(StringRef FName = StringRef()) : Name(FName) {}

  StringRef Name;
  edges Edges;
};

class ProfiledCallGraph {
public:
  using iterator = ProfiledCallGraphNode::iterator;

  // Constructor for non-CS profile.
  ProfiledCallGraph(SampleProfileMap &ProfileMap,
                    uint64_t IgnoreColdCallThreshold = 0) {
    assert(!FunctionSamples::ProfileIsCS &&
           "CS flat profile is not handled here");
    for (const auto &Samples : ProfileMap) {
      addProfiledCalls(Samples.second);
    }

    // Trim edges with weight up to `IgnoreColdCallThreshold`. This aims
    // for a more stable call graph with "determinstic" edges from run to run.
    trimColdEges(IgnoreColdCallThreshold);
  }

  // Constructor for CS profile.
  ProfiledCallGraph(SampleContextTracker &ContextTracker,
                    uint64_t IgnoreColdCallThreshold = 0) {
    // BFS traverse the context profile trie to add call edges for calls shown
    // in context.
    std::queue<ContextTrieNode *> Queue;
    for (auto &Child : ContextTracker.getRootContext().getAllChildContext()) {
      ContextTrieNode *Callee = &Child.second;
      addProfiledFunction(ContextTracker.getFuncNameFor(Callee));
      Queue.push(Callee);
    }

    while (!Queue.empty()) {
      ContextTrieNode *Caller = Queue.front();
      Queue.pop();
      FunctionSamples *CallerSamples = Caller->getFunctionSamples();

      // Add calls for context.
      // Note that callsite target samples are completely ignored since they can
      // conflict with the context edges, which are formed by context
      // compression during profile generation, for cyclic SCCs. This may
      // further result in an SCC order incompatible with the purely
      // context-based one, which may in turn block context-based inlining.
      for (auto &Child : Caller->getAllChildContext()) {
        ContextTrieNode *Callee = &Child.second;
        addProfiledFunction(ContextTracker.getFuncNameFor(Callee));
        Queue.push(Callee);

        // Fetch edge weight from the profile.
        uint64_t Weight;
        FunctionSamples *CalleeSamples = Callee->getFunctionSamples();
        if (!CalleeSamples || !CallerSamples) {
          Weight = 0;
        } else {
          uint64_t CalleeEntryCount = CalleeSamples->getHeadSamplesEstimate();
          uint64_t CallsiteCount = 0;
          LineLocation Callsite = Callee->getCallSiteLoc();
          if (auto CallTargets = CallerSamples->findCallTargetMapAt(Callsite)) {
            SampleRecord::CallTargetMap &TargetCounts = CallTargets.get();
            auto It = TargetCounts.find(CalleeSamples->getName());
            if (It != TargetCounts.end())
              CallsiteCount = It->second;
          }
          Weight = std::max(CallsiteCount, CalleeEntryCount);
        }

        addProfiledCall(ContextTracker.getFuncNameFor(Caller),
                        ContextTracker.getFuncNameFor(Callee), Weight);
      }
    }

    // Trim edges with weight up to `IgnoreColdCallThreshold`. This aims
    // for a more stable call graph with "determinstic" edges from run to run.
    trimColdEges(IgnoreColdCallThreshold);
  }

  iterator begin() { return Root.Edges.begin(); }
  iterator end() { return Root.Edges.end(); }
  ProfiledCallGraphNode *getEntryNode() { return &Root; }

  void addProfiledFunction(StringRef Name) {
    if (!ProfiledFunctions.count(Name)) {
      // Link to synthetic root to make sure every node is reachable
      // from root. This does not affect SCC order.
      ProfiledFunctions[Name] = ProfiledCallGraphNode(Name);
      Root.Edges.emplace(&Root, &ProfiledFunctions[Name], 0);
    }
  }

private:
  void addProfiledCall(StringRef CallerName, StringRef CalleeName,
                       uint64_t Weight = 0) {
    assert(ProfiledFunctions.count(CallerName));
    auto CalleeIt = ProfiledFunctions.find(CalleeName);
    if (CalleeIt == ProfiledFunctions.end())
      return;
    ProfiledCallGraphEdge Edge(&ProfiledFunctions[CallerName],
                               &CalleeIt->second, Weight);
    auto &Edges = ProfiledFunctions[CallerName].Edges;
    auto EdgeIt = Edges.find(Edge);
    if (EdgeIt == Edges.end()) {
      Edges.insert(Edge);
    } else {
      // Accumulate weight to the existing edge.
      Edge.Weight += EdgeIt->Weight;
      Edges.erase(EdgeIt);
      Edges.insert(Edge);
    }
  }

  void addProfiledCalls(const FunctionSamples &Samples) {
    addProfiledFunction(Samples.getFuncName());

    for (const auto &Sample : Samples.getBodySamples()) {
      for (const auto &[Target, Frequency] : Sample.second.getCallTargets()) {
        addProfiledFunction(Target);
        addProfiledCall(Samples.getFuncName(), Target, Frequency);
      }
    }

    for (const auto &CallsiteSamples : Samples.getCallsiteSamples()) {
      for (const auto &InlinedSamples : CallsiteSamples.second) {
        addProfiledFunction(InlinedSamples.first);
        addProfiledCall(Samples.getFuncName(), InlinedSamples.first,
                        InlinedSamples.second.getHeadSamplesEstimate());
        addProfiledCalls(InlinedSamples.second);
      }
    }
  }

  // Trim edges with weight up to `Threshold`. Do not trim anything if
  // `Threshold` is zero.
  void trimColdEges(uint64_t Threshold = 0) {
    if (!Threshold)
      return;

    for (auto &Node : ProfiledFunctions) {
      auto &Edges = Node.second.Edges;
      auto I = Edges.begin();
      while (I != Edges.end()) {
        if (I->Weight <= Threshold)
          I = Edges.erase(I);
        else
          I++;
      }
    }
  }

  ProfiledCallGraphNode Root;
  StringMap<ProfiledCallGraphNode> ProfiledFunctions;
};

} // end namespace sampleprof

template <> struct GraphTraits<ProfiledCallGraphNode *> {
  using NodeType = ProfiledCallGraphNode;
  using NodeRef = ProfiledCallGraphNode *;
  using EdgeType = NodeType::edge;
  using ChildIteratorType = NodeType::const_iterator;

  static NodeRef getEntryNode(NodeRef PCGN) { return PCGN; }
  static ChildIteratorType child_begin(NodeRef N) { return N->Edges.begin(); }
  static ChildIteratorType child_end(NodeRef N) { return N->Edges.end(); }
};

template <>
struct GraphTraits<ProfiledCallGraph *>
    : public GraphTraits<ProfiledCallGraphNode *> {
  static NodeRef getEntryNode(ProfiledCallGraph *PCG) {
    return PCG->getEntryNode();
  }

  static ChildIteratorType nodes_begin(ProfiledCallGraph *PCG) {
    return PCG->begin();
  }

  static ChildIteratorType nodes_end(ProfiledCallGraph *PCG) {
    return PCG->end();
  }
};

} // end namespace llvm

#endif
PKiwFZ]/���%Transforms/IPO/ThinLTOBitcodeWriter.hnu�[���//===- ThinLTOBitcodeWriter.h - Bitcode writing pass for ThinLTO ----------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This pass prepares a module containing type metadata for ThinLTO by splitting
// it into regular and thin LTO parts if possible, and writing both parts to
// a multi-module bitcode file. Modules that do not contain type metadata are
// written unmodified as a single module.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_IPO_THINLTOBITCODEWRITER_H
#define LLVM_TRANSFORMS_IPO_THINLTOBITCODEWRITER_H

#include <llvm/IR/PassManager.h>

namespace llvm {
class Module;
class raw_ostream;

class ThinLTOBitcodeWriterPass
    : public PassInfoMixin<ThinLTOBitcodeWriterPass> {
  raw_ostream &OS;
  raw_ostream *ThinLinkOS;

public:
  // Writes bitcode to OS. Also write thin link file to ThinLinkOS, if
  // it's not nullptr.
  ThinLTOBitcodeWriterPass(raw_ostream &OS, raw_ostream *ThinLinkOS)
      : OS(OS), ThinLinkOS(ThinLinkOS) {}

  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);

  static bool isRequired() { return true; }
};

} // namespace llvm

#endif
PKiwFZ:+2��+Transforms/IPO/SyntheticCountsPropagation.hnu�[���//=- SyntheticCountsPropagation.h - Propagate function counts -----*- C++ -*-=//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_IPO_SYNTHETICCOUNTSPROPAGATION_H
#define LLVM_TRANSFORMS_IPO_SYNTHETICCOUNTSPROPAGATION_H

#include "llvm/IR/PassManager.h"

namespace llvm {
class Module;

class SyntheticCountsPropagation
    : public PassInfoMixin<SyntheticCountsPropagation> {
public:
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &MAM);
};
} // namespace llvm
#endif
PKiwFZ_���Transforms/IPO/OpenMPOpt.hnu�[���//===- IPO/OpenMPOpt.h - Collection of OpenMP optimizations -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_IPO_OPENMPOPT_H
#define LLVM_TRANSFORMS_IPO_OPENMPOPT_H

#include "llvm/Analysis/CGSCCPassManager.h"
#include "llvm/Analysis/LazyCallGraph.h"
#include "llvm/IR/PassManager.h"

namespace llvm {

namespace omp {

/// Summary of a kernel (=entry point for target offloading).
using Kernel = Function *;

/// Set of kernels in the module
using KernelSet = SetVector<Kernel>;

/// Helper to determine if \p M contains OpenMP.
bool containsOpenMP(Module &M);

/// Helper to determine if \p M is a OpenMP target offloading device module.
bool isOpenMPDevice(Module &M);

/// Return true iff \p Fn is a GPU kernel; \p Fn has the "kernel" attribute.
bool isKernel(Function &Fn);

/// Get OpenMP device kernels in \p M.
KernelSet getDeviceKernels(Module &M);

} // namespace omp

/// OpenMP optimizations pass.
class OpenMPOptPass : public PassInfoMixin<OpenMPOptPass> {
public:
  OpenMPOptPass() = default;
  OpenMPOptPass(ThinOrFullLTOPhase LTOPhase) : LTOPhase(LTOPhase) {}

  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);

private:
  const ThinOrFullLTOPhase LTOPhase = ThinOrFullLTOPhase::None;
};

class OpenMPOptCGSCCPass : public PassInfoMixin<OpenMPOptCGSCCPass> {
public:
  OpenMPOptCGSCCPass() = default;
  OpenMPOptCGSCCPass(ThinOrFullLTOPhase LTOPhase) : LTOPhase(LTOPhase) {}

  PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM,
                        LazyCallGraph &CG, CGSCCUpdateResult &UR);

private:
  const ThinOrFullLTOPhase LTOPhase = ThinOrFullLTOPhase::None;
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_IPO_OPENMPOPT_H
PKiwFZ�0
AATransforms/IPO/GlobalDCE.hnu�[���//===-- GlobalDCE.h - DCE unreachable internal functions ------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This transform is designed to eliminate unreachable internal globals from the
// program.  It uses an aggressive algorithm, searching out globals that are
// known to be alive.  After it finds all of the globals which are needed, it
// deletes whatever is left over.  This allows it to delete recursive chunks of
// the program which are unreachable.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_IPO_GLOBALDCE_H
#define LLVM_TRANSFORMS_IPO_GLOBALDCE_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/PassManager.h"
#include <unordered_map>

namespace llvm {
class Comdat;
class Constant;
class Function;
class GlobalVariable;
class Metadata;
class Module;
class Value;

/// Pass to remove unused function declarations.
class GlobalDCEPass : public PassInfoMixin<GlobalDCEPass> {
public:
  GlobalDCEPass(bool InLTOPostLink = false) : InLTOPostLink(InLTOPostLink) {}

  PreservedAnalyses run(Module &M, ModuleAnalysisManager &);

  void printPipeline(raw_ostream &OS,
                     function_ref<StringRef(StringRef)> MapClassName2PassName);

private:
  bool InLTOPostLink = false;

  SmallPtrSet<GlobalValue*, 32> AliveGlobals;

  /// Global -> Global that uses this global.
  DenseMap<GlobalValue *, SmallPtrSet<GlobalValue *, 4>> GVDependencies;

  /// Constant -> Globals that use this global cache.
  std::unordered_map<Constant *, SmallPtrSet<GlobalValue *, 8>>
      ConstantDependenciesCache;

  /// Comdat -> Globals in that Comdat section.
  std::unordered_multimap<Comdat *, GlobalValue *> ComdatMembers;

  /// !type metadata -> set of (vtable, offset) pairs
  DenseMap<Metadata *, SmallSet<std::pair<GlobalVariable *, uint64_t>, 4>>
      TypeIdMap;

  // Global variables which are vtables, and which we have enough information
  // about to safely do dead virtual function elimination.
  SmallPtrSet<GlobalValue *, 32> VFESafeVTables;

  void UpdateGVDependencies(GlobalValue &GV);
  void MarkLive(GlobalValue &GV,
                SmallVectorImpl<GlobalValue *> *Updates = nullptr);

  // Dead virtual function elimination.
  void AddVirtualFunctionDependencies(Module &M);
  void ScanVTables(Module &M);
  void ScanTypeCheckedLoadIntrinsics(Module &M);
  void ScanVTableLoad(Function *Caller, Metadata *TypeId, uint64_t CallOffset);

  void ComputeDependencies(Value *V, SmallPtrSetImpl<GlobalValue *> &U);
};

}

#endif // LLVM_TRANSFORMS_IPO_GLOBALDCE_H
PKiwFZ0��qii-Transforms/IPO/MemProfContextDisambiguation.hnu�[���//==- MemProfContextDisambiguation.h - Context Disambiguation ----*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Implements support for context disambiguation of allocation calls for profile
// guided heap optimization using memprof metadata. See implementation file for
// details.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_IPO_MEMPROF_CONTEXT_DISAMBIGUATION_H
#define LLVM_TRANSFORMS_IPO_MEMPROF_CONTEXT_DISAMBIGUATION_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/ModuleSummaryIndex.h"
#include "llvm/IR/PassManager.h"
#include <functional>

namespace llvm {
class GlobalValueSummary;
class Module;
class OptimizationRemarkEmitter;

class MemProfContextDisambiguation
    : public PassInfoMixin<MemProfContextDisambiguation> {
  /// Run the context disambiguator on \p M, returns true if any changes made.
  bool processModule(
      Module &M,
      function_ref<OptimizationRemarkEmitter &(Function *)> OREGetter);

  /// In the ThinLTO backend, apply the cloning decisions in ImportSummary to
  /// the IR.
  bool applyImport(Module &M);

  /// Import summary containing cloning decisions for the ThinLTO backend.
  const ModuleSummaryIndex *ImportSummary;

  // Owns the import summary specified by internal options for testing the
  // ThinLTO backend via opt (to simulate distributed ThinLTO).
  std::unique_ptr<ModuleSummaryIndex> ImportSummaryForTesting;

public:
  MemProfContextDisambiguation(const ModuleSummaryIndex *Summary = nullptr);

  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);

  void run(ModuleSummaryIndex &Index,
           function_ref<bool(GlobalValue::GUID, const GlobalValueSummary *)>
               isPrevailing);
};
} // end namespace llvm

#endif // LLVM_TRANSFORMS_IPO_MEMPROF_CONTEXT_DISAMBIGUATION_H
PKiwFZ9J����!Transforms/IPO/EmbedBitcodePass.hnu�[���//===-- EmbedBitcodePass.h - Embeds bitcode into global ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This file provides a pass which clones the current module and runs the
/// provided pass pipeline on the clone. The optimized module is stored into a
/// global variable in the `.llvm.lto` section. Primarily, this pass is used
/// to support the FatLTO pipeline, but could be used to generate a bitcode
/// section for any arbitrary pass pipeline without changing the current module.
///
//===----------------------------------------------------------------------===//
//
#ifndef LLVM_TRANSFORMS_IPO_EMBEDBITCODEPASS_H
#define LLVM_TRANSFORMS_IPO_EMBEDBITCODEPASS_H

#include "llvm/IR/PassManager.h"

namespace llvm {
class Module;
class ModulePass;
class Pass;

struct EmbedBitcodeOptions {
  EmbedBitcodeOptions() : EmbedBitcodeOptions(false, false) {}
  EmbedBitcodeOptions(bool IsThinLTO, bool EmitLTOSummary)
      : IsThinLTO(IsThinLTO), EmitLTOSummary(EmitLTOSummary) {}
  bool IsThinLTO;
  bool EmitLTOSummary;
};

/// Pass embeds a copy of the module optimized with the provided pass pipeline
/// into a global variable.
class EmbedBitcodePass : public PassInfoMixin<EmbedBitcodePass> {
  bool IsThinLTO;
  bool EmitLTOSummary;
  ModulePassManager MPM;

public:
  EmbedBitcodePass(EmbedBitcodeOptions Opts)
      : EmbedBitcodePass(Opts.IsThinLTO, Opts.EmitLTOSummary,
                         ModulePassManager()) {}
  EmbedBitcodePass(bool IsThinLTO, bool EmitLTOSummary, ModulePassManager &&MPM)
      : IsThinLTO(IsThinLTO), EmitLTOSummary(EmitLTOSummary),
        MPM(std::move(MPM)) {}

  PreservedAnalyses run(Module &M, ModuleAnalysisManager &);

  static bool isRequired() { return true; }
};

} // end namespace llvm.

#endif
PKiwFZ����Transforms/IPO/CrossDSOCFI.hnu�[���//===-- CrossDSOCFI.cpp - Externalize this module's CFI checks --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This pass exports all llvm.bitset's found in the module in the form of a
// __cfi_check function, which can be used to verify cross-DSO call targets.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_IPO_CROSSDSOCFI_H
#define LLVM_TRANSFORMS_IPO_CROSSDSOCFI_H

#include "llvm/IR/PassManager.h"

namespace llvm {
class CrossDSOCFIPass : public PassInfoMixin<CrossDSOCFIPass> {
public:
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
};
}
#endif // LLVM_TRANSFORMS_IPO_CROSSDSOCFI_H

PKiwFZ��/11 Transforms/IPO/ElimAvailExtern.hnu�[���//===- ElimAvailExtern.h - Optimize Global Variables ------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This transform is designed to eliminate available external global
// definitions from the program, turning them into declarations.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_IPO_ELIMAVAILEXTERN_H
#define LLVM_TRANSFORMS_IPO_ELIMAVAILEXTERN_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class Module;

/// A pass that transforms external global definitions into declarations.
class EliminateAvailableExternallyPass
    : public PassInfoMixin<EliminateAvailableExternallyPass> {
public:
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &);
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_IPO_ELIMAVAILEXTERN_H
PKiwFZԿB��(�(#Transforms/IPO/WholeProgramDevirt.hnu�[���//===- WholeProgramDevirt.h - Whole-program devirt pass ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines parts of the whole-program devirtualization pass
// implementation that may be usefully unit tested.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_IPO_WHOLEPROGRAMDEVIRT_H
#define LLVM_TRANSFORMS_IPO_WHOLEPROGRAMDEVIRT_H

#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/PassManager.h"
#include <cassert>
#include <cstdint>
#include <map>
#include <set>
#include <utility>
#include <vector>

namespace llvm {
class Module;

template <typename T> class ArrayRef;
template <typename T> class MutableArrayRef;
class GlobalVariable;
class ModuleSummaryIndex;
struct ValueInfo;

namespace wholeprogramdevirt {

// A bit vector that keeps track of which bits are used. We use this to
// pack constant values compactly before and after each virtual table.
struct AccumBitVector {
  std::vector<uint8_t> Bytes;

  // Bits in BytesUsed[I] are 1 if matching bit in Bytes[I] is used, 0 if not.
  std::vector<uint8_t> BytesUsed;

  std::pair<uint8_t *, uint8_t *> getPtrToData(uint64_t Pos, uint8_t Size) {
    if (Bytes.size() < Pos + Size) {
      Bytes.resize(Pos + Size);
      BytesUsed.resize(Pos + Size);
    }
    return std::make_pair(Bytes.data() + Pos, BytesUsed.data() + Pos);
  }

  // Set little-endian value Val with size Size at bit position Pos,
  // and mark bytes as used.
  void setLE(uint64_t Pos, uint64_t Val, uint8_t Size) {
    assert(Pos % 8 == 0);
    auto DataUsed = getPtrToData(Pos / 8, Size);
    for (unsigned I = 0; I != Size; ++I) {
      DataUsed.first[I] = Val >> (I * 8);
      assert(!DataUsed.second[I]);
      DataUsed.second[I] = 0xff;
    }
  }

  // Set big-endian value Val with size Size at bit position Pos,
  // and mark bytes as used.
  void setBE(uint64_t Pos, uint64_t Val, uint8_t Size) {
    assert(Pos % 8 == 0);
    auto DataUsed = getPtrToData(Pos / 8, Size);
    for (unsigned I = 0; I != Size; ++I) {
      DataUsed.first[Size - I - 1] = Val >> (I * 8);
      assert(!DataUsed.second[Size - I - 1]);
      DataUsed.second[Size - I - 1] = 0xff;
    }
  }

  // Set bit at bit position Pos to b and mark bit as used.
  void setBit(uint64_t Pos, bool b) {
    auto DataUsed = getPtrToData(Pos / 8, 1);
    if (b)
      *DataUsed.first |= 1 << (Pos % 8);
    assert(!(*DataUsed.second & (1 << Pos % 8)));
    *DataUsed.second |= 1 << (Pos % 8);
  }
};

// The bits that will be stored before and after a particular vtable.
struct VTableBits {
  // The vtable global.
  GlobalVariable *GV;

  // Cache of the vtable's size in bytes.
  uint64_t ObjectSize = 0;

  // The bit vector that will be laid out before the vtable. Note that these
  // bytes are stored in reverse order until the globals are rebuilt. This means
  // that any values in the array must be stored using the opposite endianness
  // from the target.
  AccumBitVector Before;

  // The bit vector that will be laid out after the vtable.
  AccumBitVector After;
};

// Information about a member of a particular type identifier.
struct TypeMemberInfo {
  // The VTableBits for the vtable.
  VTableBits *Bits;

  // The offset in bytes from the start of the vtable (i.e. the address point).
  uint64_t Offset;

  bool operator<(const TypeMemberInfo &other) const {
    return Bits < other.Bits || (Bits == other.Bits && Offset < other.Offset);
  }
};

// A virtual call target, i.e. an entry in a particular vtable.
struct VirtualCallTarget {
  VirtualCallTarget(GlobalValue *Fn, const TypeMemberInfo *TM);

  // For testing only.
  VirtualCallTarget(const TypeMemberInfo *TM, bool IsBigEndian)
      : Fn(nullptr), TM(TM), IsBigEndian(IsBigEndian), WasDevirt(false) {}

  // The function (or an alias to a function) stored in the vtable.
  GlobalValue *Fn;

  // A pointer to the type identifier member through which the pointer to Fn is
  // accessed.
  const TypeMemberInfo *TM;

  // When doing virtual constant propagation, this stores the return value for
  // the function when passed the currently considered argument list.
  uint64_t RetVal;

  // Whether the target is big endian.
  bool IsBigEndian;

  // Whether at least one call site to the target was devirtualized.
  bool WasDevirt;

  // The minimum byte offset before the address point. This covers the bytes in
  // the vtable object before the address point (e.g. RTTI, access-to-top,
  // vtables for other base classes) and is equal to the offset from the start
  // of the vtable object to the address point.
  uint64_t minBeforeBytes() const { return TM->Offset; }

  // The minimum byte offset after the address point. This covers the bytes in
  // the vtable object after the address point (e.g. the vtable for the current
  // class and any later base classes) and is equal to the size of the vtable
  // object minus the offset from the start of the vtable object to the address
  // point.
  uint64_t minAfterBytes() const { return TM->Bits->ObjectSize - TM->Offset; }

  // The number of bytes allocated (for the vtable plus the byte array) before
  // the address point.
  uint64_t allocatedBeforeBytes() const {
    return minBeforeBytes() + TM->Bits->Before.Bytes.size();
  }

  // The number of bytes allocated (for the vtable plus the byte array) after
  // the address point.
  uint64_t allocatedAfterBytes() const {
    return minAfterBytes() + TM->Bits->After.Bytes.size();
  }

  // Set the bit at position Pos before the address point to RetVal.
  void setBeforeBit(uint64_t Pos) {
    assert(Pos >= 8 * minBeforeBytes());
    TM->Bits->Before.setBit(Pos - 8 * minBeforeBytes(), RetVal);
  }

  // Set the bit at position Pos after the address point to RetVal.
  void setAfterBit(uint64_t Pos) {
    assert(Pos >= 8 * minAfterBytes());
    TM->Bits->After.setBit(Pos - 8 * minAfterBytes(), RetVal);
  }

  // Set the bytes at position Pos before the address point to RetVal.
  // Because the bytes in Before are stored in reverse order, we use the
  // opposite endianness to the target.
  void setBeforeBytes(uint64_t Pos, uint8_t Size) {
    assert(Pos >= 8 * minBeforeBytes());
    if (IsBigEndian)
      TM->Bits->Before.setLE(Pos - 8 * minBeforeBytes(), RetVal, Size);
    else
      TM->Bits->Before.setBE(Pos - 8 * minBeforeBytes(), RetVal, Size);
  }

  // Set the bytes at position Pos after the address point to RetVal.
  void setAfterBytes(uint64_t Pos, uint8_t Size) {
    assert(Pos >= 8 * minAfterBytes());
    if (IsBigEndian)
      TM->Bits->After.setBE(Pos - 8 * minAfterBytes(), RetVal, Size);
    else
      TM->Bits->After.setLE(Pos - 8 * minAfterBytes(), RetVal, Size);
  }
};

// Find the minimum offset that we may store a value of size Size bits at. If
// IsAfter is set, look for an offset before the object, otherwise look for an
// offset after the object.
uint64_t findLowestOffset(ArrayRef<VirtualCallTarget> Targets, bool IsAfter,
                          uint64_t Size);

// Set the stored value in each of Targets to VirtualCallTarget::RetVal at the
// given allocation offset before the vtable address. Stores the computed
// byte/bit offset to OffsetByte/OffsetBit.
void setBeforeReturnValues(MutableArrayRef<VirtualCallTarget> Targets,
                           uint64_t AllocBefore, unsigned BitWidth,
                           int64_t &OffsetByte, uint64_t &OffsetBit);

// Set the stored value in each of Targets to VirtualCallTarget::RetVal at the
// given allocation offset after the vtable address. Stores the computed
// byte/bit offset to OffsetByte/OffsetBit.
void setAfterReturnValues(MutableArrayRef<VirtualCallTarget> Targets,
                          uint64_t AllocAfter, unsigned BitWidth,
                          int64_t &OffsetByte, uint64_t &OffsetBit);

} // end namespace wholeprogramdevirt

struct WholeProgramDevirtPass : public PassInfoMixin<WholeProgramDevirtPass> {
  ModuleSummaryIndex *ExportSummary;
  const ModuleSummaryIndex *ImportSummary;
  bool UseCommandLine = false;
  WholeProgramDevirtPass()
      : ExportSummary(nullptr), ImportSummary(nullptr), UseCommandLine(true) {}
  WholeProgramDevirtPass(ModuleSummaryIndex *ExportSummary,
                         const ModuleSummaryIndex *ImportSummary)
      : ExportSummary(ExportSummary), ImportSummary(ImportSummary) {
    assert(!(ExportSummary && ImportSummary));
  }
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &);
};

struct VTableSlotSummary {
  StringRef TypeID;
  uint64_t ByteOffset;
};
bool hasWholeProgramVisibility(bool WholeProgramVisibilityEnabledInLTO);
void updatePublicTypeTestCalls(Module &M,
                               bool WholeProgramVisibilityEnabledInLTO);
void updateVCallVisibilityInModule(
    Module &M, bool WholeProgramVisibilityEnabledInLTO,
    const DenseSet<GlobalValue::GUID> &DynamicExportSymbols);
void updateVCallVisibilityInIndex(
    ModuleSummaryIndex &Index, bool WholeProgramVisibilityEnabledInLTO,
    const DenseSet<GlobalValue::GUID> &DynamicExportSymbols);

/// Perform index-based whole program devirtualization on the \p Summary
/// index. Any devirtualized targets used by a type test in another module
/// are added to the \p ExportedGUIDs set. For any local devirtualized targets
/// only used within the defining module, the information necessary for
/// locating the corresponding WPD resolution is recorded for the ValueInfo
/// in case it is exported by cross module importing (in which case the
/// devirtualized target name will need adjustment).
void runWholeProgramDevirtOnIndex(
    ModuleSummaryIndex &Summary, std::set<GlobalValue::GUID> &ExportedGUIDs,
    std::map<ValueInfo, std::vector<VTableSlotSummary>> &LocalWPDTargetsMap);

/// Call after cross-module importing to update the recorded single impl
/// devirt target names for any locals that were exported.
void updateIndexWPDForExports(
    ModuleSummaryIndex &Summary,
    function_ref<bool(StringRef, ValueInfo)> isExported,
    std::map<ValueInfo, std::vector<VTableSlotSummary>> &LocalWPDTargetsMap);

} // end namespace llvm

#endif // LLVM_TRANSFORMS_IPO_WHOLEPROGRAMDEVIRT_H
PKiwFZ�ﴘ��Transforms/IPO/Internalize.hnu�[���//====- Internalize.h - Internalization API ---------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This pass loops over all of the functions and variables in the input module.
// If the function or variable does not need to be preserved according to the
// client supplied callback, it is marked as internal.
//
// This transformation would not be legal in a regular compilation, but it gets
// extra information from the linker about what is safe.
//
// For example: Internalizing a function with external linkage. Only if we are
// told it is only used from within this module, it is safe to do it.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_IPO_INTERNALIZE_H
#define LLVM_TRANSFORMS_IPO_INTERNALIZE_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/IR/PassManager.h"
#include <functional>

namespace llvm {
class Module;

/// A pass that internalizes all functions and variables other than those that
/// must be preserved according to \c MustPreserveGV.
class InternalizePass : public PassInfoMixin<InternalizePass> {
  struct ComdatInfo {
    // The number of members. A comdat with one member which is not externally
    // visible can be freely dropped.
    size_t Size = 0;
    // Whether the comdat has an externally visible member.
    bool External = false;
  };

  bool IsWasm = false;

  /// Client supplied callback to control wheter a symbol must be preserved.
  const std::function<bool(const GlobalValue &)> MustPreserveGV;
  /// Set of symbols private to the compiler that this pass should not touch.
  StringSet<> AlwaysPreserved;

  /// Return false if we're allowed to internalize this GV.
  bool shouldPreserveGV(const GlobalValue &GV);
  /// Internalize GV if it is possible to do so, i.e. it is not externally
  /// visible and is not a member of an externally visible comdat.
  bool maybeInternalize(GlobalValue &GV,
                        DenseMap<const Comdat *, ComdatInfo> &ComdatMap);
  /// If GV is part of a comdat and is externally visible, keep track of its
  /// comdat so that we don't internalize any of its members.
  void checkComdat(GlobalValue &GV,
                   DenseMap<const Comdat *, ComdatInfo> &ComdatMap);

public:
  InternalizePass();
  InternalizePass(std::function<bool(const GlobalValue &)> MustPreserveGV)
      : MustPreserveGV(std::move(MustPreserveGV)) {}

  /// Run the internalizer on \p TheModule, returns true if any changes was
  /// made.
  bool internalizeModule(Module &TheModule);

  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
};

/// Helper function to internalize functions and variables in a Module.
inline bool
internalizeModule(Module &TheModule,
                  std::function<bool(const GlobalValue &)> MustPreserveGV) {
  return InternalizePass(std::move(MustPreserveGV))
      .internalizeModule(TheModule);
}
} // end namespace llvm

#endif // LLVM_TRANSFORMS_IPO_INTERNALIZE_H
PKiwFZ����L�LTransforms/IPO/IROutliner.hnu�[���//===- IROutliner.h - Extract similar IR regions into functions --*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// \file
// The interface file for the IROutliner which is used by the IROutliner Pass.
//
// The outliner uses the IRSimilarityIdentifier to identify the similar regions
// of code.  It evaluates each set of IRSimilarityCandidates with an estimate of
// whether it will provide code size reduction.  Each region is extracted using
// the code extractor.  These extracted functions are consolidated into a single
// function and called from the extracted call site.
//
// For example:
// \code
//   %1 = add i32 %a, %b
//   %2 = add i32 %b, %a
//   %3 = add i32 %b, %a
//   %4 = add i32 %a, %b
// \endcode
// would become function
// \code
// define internal void outlined_ir_function(i32 %0, i32 %1) {
//   %1 = add i32 %0, %1
//   %2 = add i32 %1, %0
//   ret void
// }
// \endcode
// with calls:
// \code
//   call void outlined_ir_function(i32 %a, i32 %b)
//   call void outlined_ir_function(i32 %b, i32 %a)
// \endcode
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_IPO_IROUTLINER_H
#define LLVM_TRANSFORMS_IPO_IROUTLINER_H

#include "llvm/Analysis/IRSimilarityIdentifier.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Support/InstructionCost.h"
#include "llvm/Transforms/Utils/CodeExtractor.h"

struct OutlinableGroup;

namespace llvm {
using namespace CallingConv;
using namespace IRSimilarity;

class Module;
class TargetTransformInfo;
class OptimizationRemarkEmitter;

/// The OutlinableRegion holds all the information for a specific region, or
/// sequence of instructions. This includes what values need to be hoisted to
/// arguments from the extracted function, inputs and outputs to the region, and
/// mapping from the extracted function arguments to overall function arguments.
struct OutlinableRegion {
  /// Describes the region of code.
  IRSimilarityCandidate *Candidate = nullptr;

  /// If this region is outlined, the front and back IRInstructionData could
  /// potentially become invalidated if the only new instruction is a call.
  /// This ensures that we replace in the instruction in the IRInstructionData.
  IRInstructionData *NewFront = nullptr;
  IRInstructionData *NewBack = nullptr;

  /// The number of extracted inputs from the CodeExtractor.
  unsigned NumExtractedInputs = 0;

  /// The corresponding BasicBlock with the appropriate stores for this
  /// OutlinableRegion in the overall function.
  unsigned OutputBlockNum = -1;

  /// Mapping the extracted argument number to the argument number in the
  /// overall function.  Since there will be inputs, such as elevated constants
  /// that are not the same in each region in a SimilarityGroup, or values that
  /// cannot be sunk into the extracted section in every region, we must keep
  /// track of which extracted argument maps to which overall argument.
  DenseMap<unsigned, unsigned> ExtractedArgToAgg;
  DenseMap<unsigned, unsigned> AggArgToExtracted;

  /// Values in the outlined functions will often be replaced by arguments. When
  /// finding corresponding values from one region to another, the found value
  /// will be the value the argument previously replaced.  This structure maps
  /// any replaced values for the region to the aggregate aggregate argument
  /// in the overall function.
  DenseMap<Value *, Value *> RemappedArguments;

  /// Marks whether we need to change the order of the arguments when mapping
  /// the old extracted function call to the new aggregate outlined function
  /// call.
  bool ChangedArgOrder = false;

  /// Marks whether this region ends in a branch, there is special handling
  /// required for the following basic blocks in this case.
  bool EndsInBranch = false;

  /// The PHIBlocks with their corresponding return block based on the return
  /// value as the key.
  DenseMap<Value *, BasicBlock *> PHIBlocks;

  /// Mapping of the argument number in the deduplicated function
  /// to a given constant, which is used when creating the arguments to the call
  /// to the newly created deduplicated function.  This is handled separately
  /// since the CodeExtractor does not recognize constants.
  DenseMap<unsigned, Constant *> AggArgToConstant;

  /// The global value numbers that are used as outputs for this section. Once
  /// extracted, each output will be stored to an output register.  This
  /// documents the global value numbers that are used in this pattern.
  SmallVector<unsigned, 4> GVNStores;

  /// Used to create an outlined function.
  CodeExtractor *CE = nullptr;

  /// The call site of the extracted region.
  CallInst *Call = nullptr;

  /// The function for the extracted region.
  Function *ExtractedFunction = nullptr;

  /// Flag for whether we have split out the IRSimilarityCanidate. That is,
  /// make the region contained the IRSimilarityCandidate its own BasicBlock.
  bool CandidateSplit = false;

  /// Flag for whether we should not consider this region for extraction.
  bool IgnoreRegion = false;

  /// The BasicBlock that is before the start of the region BasicBlock,
  /// only defined when the region has been split.
  BasicBlock *PrevBB = nullptr;

  /// The BasicBlock that contains the starting instruction of the region.
  BasicBlock *StartBB = nullptr;

  /// The BasicBlock that contains the ending instruction of the region.
  BasicBlock *EndBB = nullptr;

  /// The BasicBlock that is after the start of the region BasicBlock,
  /// only defined when the region has been split.
  BasicBlock *FollowBB = nullptr;

  /// The Outlinable Group that contains this region and structurally similar
  /// regions to this region.
  OutlinableGroup *Parent = nullptr;

  OutlinableRegion(IRSimilarityCandidate &C, OutlinableGroup &Group)
      : Candidate(&C), Parent(&Group) {
    StartBB = C.getStartBB();
    EndBB = C.getEndBB();
  }

  /// For the contained region, split the parent BasicBlock at the starting and
  /// ending instructions of the contained IRSimilarityCandidate.
  void splitCandidate();

  /// For the contained region, reattach the BasicBlock at the starting and
  /// ending instructions of the contained IRSimilarityCandidate, or if the
  /// function has been extracted, the start and end of the BasicBlock
  /// containing the called function.
  void reattachCandidate();

  /// Find a corresponding value for \p V in similar OutlinableRegion \p Other.
  ///
  /// \param Other [in] - The OutlinableRegion to find the corresponding Value
  /// in.
  /// \param V [in] - The Value to look for in the other region.
  /// \return The corresponding Value to \p V if it exists, otherwise nullptr.
  Value *findCorrespondingValueIn(const OutlinableRegion &Other, Value *V);

  /// Find a corresponding BasicBlock for \p BB in similar OutlinableRegion \p Other.
  ///
  /// \param Other [in] - The OutlinableRegion to find the corresponding
  /// BasicBlock in.
  /// \param BB [in] - The BasicBlock to look for in the other region.
  /// \return The corresponding Value to \p V if it exists, otherwise nullptr.
  BasicBlock *findCorrespondingBlockIn(const OutlinableRegion &Other,
                                       BasicBlock *BB);

  /// Get the size of the code removed from the region.
  ///
  /// \param [in] TTI - The TargetTransformInfo for the parent function.
  /// \returns the code size of the region
  InstructionCost getBenefit(TargetTransformInfo &TTI);
};

/// This class is a pass that identifies similarity in a Module, extracts
/// instances of the similarity, and then consolidating the similar regions
/// in an effort to reduce code size.  It uses the IRSimilarityIdentifier pass
/// to identify the similar regions of code, and then extracts the similar
/// sections into a single function.  See the above for an example as to
/// how code is extracted and consolidated into a single function.
class IROutliner {
public:
  IROutliner(function_ref<TargetTransformInfo &(Function &)> GTTI,
             function_ref<IRSimilarityIdentifier &(Module &)> GIRSI,
             function_ref<OptimizationRemarkEmitter &(Function &)> GORE)
      : getTTI(GTTI), getIRSI(GIRSI), getORE(GORE) {
    
    // Check that the DenseMap implementation has not changed.
    assert(DenseMapInfo<unsigned>::getEmptyKey() == (unsigned)-1 &&
           "DenseMapInfo<unsigned>'s empty key isn't -1!");
    assert(DenseMapInfo<unsigned>::getTombstoneKey() == (unsigned)-2 &&
           "DenseMapInfo<unsigned>'s tombstone key isn't -2!");
  }
  bool run(Module &M);

private:
  /// Find repeated similar code sequences in \p M and outline them into new
  /// Functions.
  ///
  /// \param [in] M - The module to outline from.
  /// \returns The number of Functions created.
  unsigned doOutline(Module &M);

  /// Check whether an OutlinableRegion is incompatible with code already
  /// outlined. OutlinableRegions are incomptaible when there are overlapping
  /// instructions, or code that has not been recorded has been added to the
  /// instructions.
  ///
  /// \param [in] Region - The OutlinableRegion to check for conflicts with
  /// already outlined code.
  /// \returns whether the region can safely be outlined.
  bool isCompatibleWithAlreadyOutlinedCode(const OutlinableRegion &Region);

  /// Remove all the IRSimilarityCandidates from \p CandidateVec that have
  /// instructions contained in a previously outlined region and put the
  /// remaining regions in \p CurrentGroup.
  ///
  /// \param [in] CandidateVec - List of similarity candidates for regions with
  /// the same similarity structure.
  /// \param [in,out] CurrentGroup - Contains the potential sections to
  /// be outlined.
  void
  pruneIncompatibleRegions(std::vector<IRSimilarityCandidate> &CandidateVec,
                           OutlinableGroup &CurrentGroup);

  /// Create the function based on the overall types found in the current
  /// regions being outlined.
  ///
  /// \param M - The module to outline from.
  /// \param [in,out] CG - The OutlinableGroup for the regions to be outlined.
  /// \param [in] FunctionNameSuffix - How many functions have we previously
  /// created.
  /// \returns the newly created function.
  Function *createFunction(Module &M, OutlinableGroup &CG,
                           unsigned FunctionNameSuffix);

  /// Identify the needed extracted inputs in a section, and add to the overall
  /// function if needed.
  ///
  /// \param [in] M - The module to outline from.
  /// \param [in,out] Region - The region to be extracted.
  /// \param [in] NotSame - The global value numbers of the Values in the region
  /// that do not have the same Constant in each strucutrally similar region.
  void findAddInputsOutputs(Module &M, OutlinableRegion &Region,
                            DenseSet<unsigned> &NotSame);

  /// Find the number of instructions that will be removed by extracting the
  /// OutlinableRegions in \p CurrentGroup.
  ///
  /// \param [in] CurrentGroup - The collection of OutlinableRegions to be
  /// analyzed.
  /// \returns the number of outlined instructions across all regions.
  InstructionCost findBenefitFromAllRegions(OutlinableGroup &CurrentGroup);

  /// Find the number of instructions that will be added by reloading arguments.
  ///
  /// \param [in] CurrentGroup - The collection of OutlinableRegions to be
  /// analyzed.
  /// \returns the number of added reload instructions across all regions.
  InstructionCost findCostOutputReloads(OutlinableGroup &CurrentGroup);

  /// Find the cost and the benefit of \p CurrentGroup and save it back to
  /// \p CurrentGroup.
  ///
  /// \param [in] M - The module being analyzed
  /// \param [in,out] CurrentGroup - The overall outlined section
  void findCostBenefit(Module &M, OutlinableGroup &CurrentGroup);

  /// Update the output mapping based on the load instruction, and the outputs
  /// of the extracted function.
  ///
  /// \param Region - The region extracted
  /// \param Outputs - The outputs from the extracted function.
  /// \param LI - The load instruction used to update the mapping.
  void updateOutputMapping(OutlinableRegion &Region,
                           ArrayRef<Value *> Outputs, LoadInst *LI);

  /// Extract \p Region into its own function.
  ///
  /// \param [in] Region - The region to be extracted into its own function.
  /// \returns True if it was successfully outlined.
  bool extractSection(OutlinableRegion &Region);

  /// For the similarities found, and the extracted sections, create a single
  /// outlined function with appropriate output blocks as necessary.
  ///
  /// \param [in] M - The module to outline from
  /// \param [in] CurrentGroup - The set of extracted sections to consolidate.
  /// \param [in,out] FuncsToRemove - List of functions to remove from the
  /// module after outlining is completed.
  /// \param [in,out] OutlinedFunctionNum - the number of new outlined
  /// functions.
  void deduplicateExtractedSections(Module &M, OutlinableGroup &CurrentGroup,
                                    std::vector<Function *> &FuncsToRemove,
                                    unsigned &OutlinedFunctionNum);

  /// If true, enables us to outline from functions that have LinkOnceFromODR
  /// linkages.
  bool OutlineFromLinkODRs = false;

  /// If false, we do not worry if the cost is greater than the benefit.  This
  /// is for debugging and testing, so that we can test small cases to ensure
  /// that the outlining is being done correctly.
  bool CostModel = true;

  /// The set of outlined Instructions, identified by their location in the
  /// sequential ordering of instructions in a Module.
  DenseSet<unsigned> Outlined;

  /// TargetTransformInfo lambda for target specific information.
  function_ref<TargetTransformInfo &(Function &)> getTTI;

  /// A mapping from newly created reloaded output values to the original value.
  /// If an value is replace by an output from an outlined region, this maps
  /// that Value, back to its original Value.
  DenseMap<Value *, Value *> OutputMappings;

  /// IRSimilarityIdentifier lambda to retrieve IRSimilarityIdentifier.
  function_ref<IRSimilarityIdentifier &(Module &)> getIRSI;

  /// The optimization remark emitter for the pass.
  function_ref<OptimizationRemarkEmitter &(Function &)> getORE;

  /// The memory allocator used to allocate the CodeExtractors.
  SpecificBumpPtrAllocator<CodeExtractor> ExtractorAllocator;

  /// The memory allocator used to allocate the OutlinableRegions.
  SpecificBumpPtrAllocator<OutlinableRegion> RegionAllocator;

  /// The memory allocator used to allocate new IRInstructionData.
  SpecificBumpPtrAllocator<IRInstructionData> InstDataAllocator;

  /// Custom InstVisitor to classify different instructions for whether it can
  /// be analyzed for similarity.  This is needed as there may be instruction we
  /// can identify as having similarity, but are more complicated to outline.
  struct InstructionAllowed : public InstVisitor<InstructionAllowed, bool> {
    InstructionAllowed() = default;

    bool visitBranchInst(BranchInst &BI) { return EnableBranches; }
    bool visitPHINode(PHINode &PN) { return EnableBranches; }
    // TODO: Handle allocas.
    bool visitAllocaInst(AllocaInst &AI) { return false; }
    // VAArg instructions are not allowed since this could cause difficulty when
    // differentiating between different sets of variable instructions in
    // the deduplicated outlined regions.
    bool visitVAArgInst(VAArgInst &VI) { return false; }
    // We exclude all exception handling cases since they are so context
    // dependent.
    bool visitLandingPadInst(LandingPadInst &LPI) { return false; }
    bool visitFuncletPadInst(FuncletPadInst &FPI) { return false; }
    // DebugInfo should be included in the regions, but should not be
    // analyzed for similarity as it has no bearing on the outcome of the
    // program.
    bool visitDbgInfoIntrinsic(DbgInfoIntrinsic &DII) { return true; }
    // TODO: Handle specific intrinsics individually from those that can be
    // handled.
    bool IntrinsicInst(IntrinsicInst &II) { return EnableIntrinsics; }
    // We only handle CallInsts that are not indirect, since we cannot guarantee
    // that they have a name in these cases.
    bool visitCallInst(CallInst &CI) {
      Function *F = CI.getCalledFunction();
      bool IsIndirectCall = CI.isIndirectCall();
      if (IsIndirectCall && !EnableIndirectCalls)
        return false;
      if (!F && !IsIndirectCall)
        return false;
      // Returning twice can cause issues with the state of the function call
      // that were not expected when the function was used, so we do not include
      // the call in outlined functions.
      if (CI.canReturnTwice())
        return false;
      // TODO: Update the outliner to capture whether the outlined function
      // needs these extra attributes.

      // Functions marked with the swifttailcc and tailcc calling conventions
      // require special handling when outlining musttail functions.  The
      // calling convention must be passed down to the outlined function as
      // well. Further, there is special handling for musttail calls as well,
      // requiring a return call directly after.  For now, the outliner does not
      // support this.
      bool IsTailCC = CI.getCallingConv() == CallingConv::SwiftTail ||
                      CI.getCallingConv() == CallingConv::Tail;
      if (IsTailCC && !EnableMustTailCalls)
        return false;
      if (CI.isMustTailCall() && !EnableMustTailCalls)
        return false;
      // The outliner can only handle musttail items if it is also accompanied
      // by the tailcc or swifttailcc calling convention.
      if (CI.isMustTailCall() && !IsTailCC)
        return false;
      return true;
    }
    // TODO: Handle FreezeInsts.  Since a frozen value could be frozen inside
    // the outlined region, and then returned as an output, this will have to be
    // handled differently.
    bool visitFreezeInst(FreezeInst &CI) { return false; }
    // TODO: We do not current handle similarity that changes the control flow.
    bool visitInvokeInst(InvokeInst &II) { return false; }
    // TODO: We do not current handle similarity that changes the control flow.
    bool visitCallBrInst(CallBrInst &CBI) { return false; }
    // TODO: Handle interblock similarity.
    bool visitTerminator(Instruction &I) { return false; }
    bool visitInstruction(Instruction &I) { return true; }

    // The flag variable that marks whether we should allow branch instructions
    // to be outlined.
    bool EnableBranches = false;

    // The flag variable that marks whether we should allow indirect calls
    // to be outlined.
    bool EnableIndirectCalls = true;

    // The flag variable that marks whether we should allow intrinsics
    // instructions to be outlined.
    bool EnableIntrinsics = false;

    // The flag variable that marks whether we should allow musttail calls.
    bool EnableMustTailCalls = false;
  };

  /// A InstVisitor used to exclude certain instructions from being outlined.
  InstructionAllowed InstructionClassifier;
};

/// Pass to outline similar regions.
class IROutlinerPass : public PassInfoMixin<IROutlinerPass> {
public:
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_IPO_IROUTLINER_H
PKiwFZ-t[��Transforms/IPO/LoopExtractor.hnu�[���//===- LoopExtractor.h - Extract each loop into a new function ------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// A pass wrapper around the ExtractLoop() scalar transformation to extract each
// top-level loop into its own new function. If the loop is the ONLY loop in a
// given function, it is not touched. This is a pass most useful for debugging
// via bugpoint.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_IPO_LOOPEXTRACTOR_H
#define LLVM_TRANSFORMS_IPO_LOOPEXTRACTOR_H

#include "llvm/IR/PassManager.h"

namespace llvm {

struct LoopExtractorPass : public PassInfoMixin<LoopExtractorPass> {
  LoopExtractorPass(unsigned NumLoops = ~0) : NumLoops(NumLoops) {}
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
  void printPipeline(raw_ostream &OS,
                     function_ref<StringRef(StringRef)> MapClassName2PassName);

private:
  unsigned NumLoops;
};
} // namespace llvm

#endif // LLVM_TRANSFORMS_IPO_LOOPEXTRACTOR_H
PKiwFZe��Transforms/Utils.hnu�[���//===- llvm/Transforms/Utils.h - Utility Transformations --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This header file defines prototypes for accessor functions that expose passes
// in the Utils transformations library.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_UTILS_H
#define LLVM_TRANSFORMS_UTILS_H

namespace llvm {

class ModulePass;
class FunctionPass;
class Pass;

//===----------------------------------------------------------------------===//
//
// LowerInvoke - This pass removes invoke instructions, converting them to call
// instructions.
//
FunctionPass *createLowerInvokePass();
extern char &LowerInvokePassID;

//===----------------------------------------------------------------------===//
//
// LowerSwitch - This pass converts SwitchInst instructions into a sequence of
// chained binary branch instructions.
//
FunctionPass *createLowerSwitchPass();
extern char &LowerSwitchID;

//===----------------------------------------------------------------------===//
//
// BreakCriticalEdges - Break all of the critical edges in the CFG by inserting
// a dummy basic block. This pass may be "required" by passes that cannot deal
// with critical edges. For this usage, a pass must call:
//
//   AU.addRequiredID(BreakCriticalEdgesID);
//
// This pass obviously invalidates the CFG, but can update forward dominator
// (set, immediate dominators, tree, and frontier) information.
//
FunctionPass *createBreakCriticalEdgesPass();
extern char &BreakCriticalEdgesID;

//===----------------------------------------------------------------------===//
//
// LCSSA - This pass inserts phi nodes at loop boundaries to simplify other loop
// optimizations.
//
Pass *createLCSSAPass();
extern char &LCSSAID;

//===----------------------------------------------------------------------===//
//
// PromoteMemoryToRegister - This pass is used to promote memory references to
// be register references. A simple example of the transformation performed by
// this pass is:
//
//        FROM CODE                           TO CODE
//   %X = alloca i32, i32 1                 ret i32 42
//   store i32 42, i32 *%X
//   %Y = load i32* %X
//   ret i32 %Y
//
FunctionPass *createPromoteMemoryToRegisterPass(bool IsForced = false);

//===----------------------------------------------------------------------===//
//
// LoopSimplify - Insert Pre-header blocks into the CFG for every function in
// the module.  This pass updates dominator information, loop information, and
// does not add critical edges to the CFG.
//
//   AU.addRequiredID(LoopSimplifyID);
//
Pass *createLoopSimplifyPass();
extern char &LoopSimplifyID;

//===----------------------------------------------------------------------===//
//
// UnifyLoopExits - For each loop, creates a new block N such that all exiting
// blocks branch to N, and then N distributes control flow to all the original
// exit blocks.
//
FunctionPass *createUnifyLoopExitsPass();

//===----------------------------------------------------------------------===//
//
// FixIrreducible - Convert each SCC with irreducible control-flow
// into a natural loop.
//
FunctionPass *createFixIrreduciblePass();

//===----------------------------------------------------------------------===//
//
// CanonicalizeFreezeInLoops - Canonicalize freeze instructions in loops so they
// don't block SCEV.
//
Pass *createCanonicalizeFreezeInLoopsPass();

//===----------------------------------------------------------------------===//
// LowerGlobalDtorsLegacy - Lower @llvm.global_dtors by creating wrapper
// functions that are registered in @llvm.global_ctors and which contain a call
// to `__cxa_atexit` to register their destructor functions.
ModulePass *createLowerGlobalDtorsLegacyPass();
} // namespace llvm

#endif
PKiwFZxp;@77!Transforms/Coroutines/CoroElide.hnu�[���//===---- CoroElide.h - Coroutine frame allocation elision ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// \file
// This file declares a pass that replaces dynamic allocation of coroutine
// frames with alloca and replaces calls to llvm.coro.resume and
// llvm.coro.destroy with direct calls to coroutine sub-functions.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_COROUTINES_COROELIDE_H
#define LLVM_TRANSFORMS_COROUTINES_COROELIDE_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class Function;

struct CoroElidePass : PassInfoMixin<CoroElidePass> {
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
  static bool isRequired() { return true; }
};
} // end namespace llvm

#endif // LLVM_TRANSFORMS_COROUTINES_COROELIDE_H
PKiwFZ߁t uu.Transforms/Coroutines/CoroConditionalWrapper.hnu�[���//===---- CoroConditionalWrapper.h ------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_COROUTINES_COROCONDITIONALWRAPPER_H
#define LLVM_TRANSFORMS_COROUTINES_COROCONDITIONALWRAPPER_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class Module;

// Only runs passes in the contained pass manager if the module contains any
// coroutine intrinsic declarations.
struct CoroConditionalWrapper : PassInfoMixin<CoroConditionalWrapper> {
  CoroConditionalWrapper(ModulePassManager &&);
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
  static bool isRequired() { return true; }
  void printPipeline(raw_ostream &OS,
                     function_ref<StringRef(StringRef)> MapClassName2PassName);

private:
  ModulePassManager PM;
};
} // end namespace llvm

#endif // LLVM_TRANSFORMS_COROUTINES_COROCONDITIONALWRAPPER_H
PKiwFZ�+��{{!Transforms/Coroutines/CoroEarly.hnu�[���//===---- CoroEarly.h - Lower early coroutine intrinsics --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// \file
// This file provides the interface to the early coroutine intrinsic lowering
// pass. This pass lowers coroutine intrinsics that hide the details of the
// exact calling convention for coroutine resume and destroy functions and
// details of the structure of the coroutine frame.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_COROUTINES_COROEARLY_H
#define LLVM_TRANSFORMS_COROUTINES_COROEARLY_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class Module;

struct CoroEarlyPass : PassInfoMixin<CoroEarlyPass> {
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
  static bool isRequired() { return true; }
};
} // end namespace llvm

#endif // LLVM_TRANSFORMS_COROUTINES_COROEARLY_H
PKiwFZv�O��#Transforms/Coroutines/CoroCleanup.hnu�[���//===-- CoroCleanup.h - Lower all coroutine related intrinsics --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// \file
// This file delcares a pass that lowers all remaining coroutine intrinsics.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_COROUTINES_COROCLEANUP_H
#define LLVM_TRANSFORMS_COROUTINES_COROCLEANUP_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class Module;

struct CoroCleanupPass : PassInfoMixin<CoroCleanupPass> {
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &MAM);
  static bool isRequired() { return true; }
};
} // end namespace llvm

#endif // LLVM_TRANSFORMS_COROUTINES_COROCLEANUP_H
PKiwFZ��t""!Transforms/Coroutines/CoroSplit.hnu�[���//===- CoroSplit.h - Converts a coroutine into a state machine -*- C++ -*--===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// \file
// This file declares the pass that builds the coroutine frame and outlines
// the resume and destroy parts of the coroutine into separate functions.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_COROUTINES_COROSPLIT_H
#define LLVM_TRANSFORMS_COROUTINES_COROSPLIT_H

#include "llvm/Analysis/CGSCCPassManager.h"
#include "llvm/Analysis/LazyCallGraph.h"
#include "llvm/IR/PassManager.h"

namespace llvm {

struct CoroSplitPass : PassInfoMixin<CoroSplitPass> {
  const std::function<bool(Instruction &)> MaterializableCallback;

  CoroSplitPass(bool OptimizeFrame = false);
  CoroSplitPass(std::function<bool(Instruction &)> MaterializableCallback,
                bool OptimizeFrame = false)
      : MaterializableCallback(MaterializableCallback),
        OptimizeFrame(OptimizeFrame) {}

  PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM,
                        LazyCallGraph &CG, CGSCCUpdateResult &UR);
  static bool isRequired() { return true; }

  // Would be true if the Optimization level isn't O0.
  bool OptimizeFrame;
};
} // end namespace llvm

#endif // LLVM_TRANSFORMS_COROUTINES_COROSPLIT_H
PKiwFZ�+
�,,Transforms/Scalar.hnu�[���//===-- Scalar.h - Scalar Transformations -----------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This header file defines prototypes for accessor functions that expose passes
// in the Scalar transformations library.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_H
#define LLVM_TRANSFORMS_SCALAR_H

#include "llvm/Transforms/Utils/SimplifyCFGOptions.h"
#include <functional>

namespace llvm {

class Function;
class FunctionPass;
class Pass;

//===----------------------------------------------------------------------===//
//
// RedundantDbgInstElimination - This pass removes redundant dbg intrinsics
// without modifying the CFG of the function.  It is a FunctionPass.
//
Pass *createRedundantDbgInstEliminationPass();

//===----------------------------------------------------------------------===//
//
// DeadCodeElimination - This pass is more powerful than DeadInstElimination,
// because it is worklist driven that can potentially revisit instructions when
// their other instructions become dead, to eliminate chains of dead
// computations.
//
FunctionPass *createDeadCodeEliminationPass();


//===----------------------------------------------------------------------===//
//
// GuardWidening - An optimization over the @llvm.experimental.guard intrinsic
// that (optimistically) combines multiple guards into one to have fewer checks
// at runtime.
//
FunctionPass *createGuardWideningPass();


//===----------------------------------------------------------------------===//
//
// LoopGuardWidening - Analogous to the GuardWidening pass, but restricted to a
// single loop at a time for use within a LoopPassManager.  Desired effect is
// to widen guards into preheader or a single guard within loop if that's not
// possible.
//
Pass *createLoopGuardWideningPass();


//===----------------------------------------------------------------------===//
//
// SROA - Replace aggregates or pieces of aggregates with scalar SSA values.
//
FunctionPass *createSROAPass(bool PreserveCFG = true);

//===----------------------------------------------------------------------===//
//
// LICM - This pass is a loop invariant code motion and memory promotion pass.
//
Pass *createLICMPass();

//===----------------------------------------------------------------------===//
//
// LoopSink - This pass sinks invariants from preheader to loop body where
// frequency is lower than loop preheader.
//
Pass *createLoopSinkPass();

//===----------------------------------------------------------------------===//
//
// LoopPredication - This pass does loop predication on guards.
//
Pass *createLoopPredicationPass();

//===----------------------------------------------------------------------===//
//
// LoopStrengthReduce - This pass is strength reduces GEP instructions that use
// a loop's canonical induction variable as one of their indices.
//
Pass *createLoopStrengthReducePass();

//===----------------------------------------------------------------------===//
//
// LoopInstSimplify - This pass simplifies instructions in a loop's body.
//
Pass *createLoopInstSimplifyPass();

//===----------------------------------------------------------------------===//
//
// LoopUnroll - This pass is a simple loop unrolling pass.
//
Pass *createLoopUnrollPass(int OptLevel = 2, bool OnlyWhenForced = false,
                           bool ForgetAllSCEV = false, int Threshold = -1,
                           int Count = -1, int AllowPartial = -1,
                           int Runtime = -1, int UpperBound = -1,
                           int AllowPeeling = -1);

//===----------------------------------------------------------------------===//
//
// LoopRotate - This pass is a simple loop rotating pass.
//
Pass *createLoopRotatePass(int MaxHeaderSize = -1, bool PrepareForLTO = false);

//===----------------------------------------------------------------------===//
//
// DemoteRegisterToMemoryPass - This pass is used to demote registers to memory
// references. In basically undoes the PromoteMemoryToRegister pass to make cfg
// hacking easier.
//
FunctionPass *createDemoteRegisterToMemoryPass();
extern char &DemoteRegisterToMemoryID;

//===----------------------------------------------------------------------===//
//
// Reassociate - This pass reassociates commutative expressions in an order that
// is designed to promote better constant propagation, GCSE, LICM, PRE...
//
// For example:  4 + (x + 5)  ->  x + (4 + 5)
//
FunctionPass *createReassociatePass();

//===----------------------------------------------------------------------===//
//
// CFGSimplification - Merge basic blocks, eliminate unreachable blocks,
// simplify terminator instructions, convert switches to lookup tables, etc.
//
FunctionPass *createCFGSimplificationPass(
    SimplifyCFGOptions Options = SimplifyCFGOptions(),
    std::function<bool(const Function &)> Ftor = nullptr);

//===----------------------------------------------------------------------===//
//
// FlattenCFG - flatten CFG, reduce number of conditional branches by using
// parallel-and and parallel-or mode, etc...
//
FunctionPass *createFlattenCFGPass();

//===----------------------------------------------------------------------===//
//
// CFG Structurization - Remove irreducible control flow
//
///
/// When \p SkipUniformRegions is true the structizer will not structurize
/// regions that only contain uniform branches.
Pass *createStructurizeCFGPass(bool SkipUniformRegions = false);

//===----------------------------------------------------------------------===//
//
// TailCallElimination - This pass eliminates call instructions to the current
// function which occur immediately before return instructions.
//
FunctionPass *createTailCallEliminationPass();

//===----------------------------------------------------------------------===//
//
// EarlyCSE - This pass performs a simple and fast CSE pass over the dominator
// tree.
//
FunctionPass *createEarlyCSEPass(bool UseMemorySSA = false);

//===----------------------------------------------------------------------===//
//
// MergedLoadStoreMotion - This pass merges loads and stores in diamonds. Loads
// are hoisted into the header, while stores sink into the footer.
//
FunctionPass *createMergedLoadStoreMotionPass(bool SplitFooterBB = false);

//===----------------------------------------------------------------------===//
//
// ConstantHoisting - This pass prepares a function for expensive constants.
//
FunctionPass *createConstantHoistingPass();

//===----------------------------------------------------------------------===//
//
// Sink - Code Sinking
//
FunctionPass *createSinkingPass();

//===----------------------------------------------------------------------===//
//
// LowerAtomic - Lower atomic intrinsics to non-atomic form
//
Pass *createLowerAtomicPass();

//===----------------------------------------------------------------------===//
//
// LowerGuardIntrinsic - Lower guard intrinsics to normal control flow.
//
Pass *createLowerGuardIntrinsicPass();

//===----------------------------------------------------------------------===//
//
// LowerWidenableCondition - Lower widenable condition to i1 true.
//
Pass *createLowerWidenableConditionPass();

//===----------------------------------------------------------------------===//
//
// MergeICmps - Merge integer comparison chains into a memcmp
//
Pass *createMergeICmpsLegacyPass();

//===----------------------------------------------------------------------===//
//
// InferAddressSpaces - Modify users of addrspacecast instructions with values
// in the source address space if using the destination address space is slower
// on the target. If AddressSpace is left to its default value, it will be
// obtained from the TargetTransformInfo.
//
FunctionPass *createInferAddressSpacesPass(unsigned AddressSpace = ~0u);
extern char &InferAddressSpacesID;

//===----------------------------------------------------------------------===//
//
// LowerExpectIntrinsics - Removes llvm.expect intrinsics and creates
// "block_weights" metadata.
FunctionPass *createLowerExpectIntrinsicPass();

//===----------------------------------------------------------------------===//
//
// TLSVariableHoist - This pass reduce duplicated TLS address call.
//
FunctionPass *createTLSVariableHoistPass();

//===----------------------------------------------------------------------===//
//
// LowerConstantIntrinsicss - Expand any remaining llvm.objectsize and
// llvm.is.constant intrinsic calls, even for the unknown cases.
//
FunctionPass *createLowerConstantIntrinsicsPass();

//===----------------------------------------------------------------------===//
//
// PartiallyInlineLibCalls - Tries to inline the fast path of library
// calls such as sqrt.
//
FunctionPass *createPartiallyInlineLibCallsPass();

//===----------------------------------------------------------------------===//
//
// SeparateConstOffsetFromGEP - Split GEPs for better CSE
//
FunctionPass *createSeparateConstOffsetFromGEPPass(bool LowerGEP = false);

//===----------------------------------------------------------------------===//
//
// SpeculativeExecution - Aggressively hoist instructions to enable
// speculative execution on targets where branches are expensive.
//
FunctionPass *createSpeculativeExecutionPass();

// Same as createSpeculativeExecutionPass, but does nothing unless
// TargetTransformInfo::hasBranchDivergence() is true.
FunctionPass *createSpeculativeExecutionIfHasBranchDivergencePass();

//===----------------------------------------------------------------------===//
//
// StraightLineStrengthReduce - This pass strength-reduces some certain
// instruction patterns in straight-line code.
//
FunctionPass *createStraightLineStrengthReducePass();

//===----------------------------------------------------------------------===//
//
// NaryReassociate - Simplify n-ary operations by reassociation.
//
FunctionPass *createNaryReassociatePass();

//===----------------------------------------------------------------------===//
//
// LoopDataPrefetch - Perform data prefetching in loops.
//
FunctionPass *createLoopDataPrefetchPass();

//===----------------------------------------------------------------------===//
//
// LoopSimplifyCFG - This pass performs basic CFG simplification on loops,
// primarily to help other loop passes.
//
Pass *createLoopSimplifyCFGPass();

//===----------------------------------------------------------------------===//
//
// This pass does instruction simplification on each
// instruction in a function.
//
FunctionPass *createInstSimplifyLegacyPass();


//===----------------------------------------------------------------------===//
//
// createScalarizeMaskedMemIntrinPass - Replace masked load, store, gather
// and scatter intrinsics with scalar code when target doesn't support them.
//
FunctionPass *createScalarizeMaskedMemIntrinLegacyPass();
} // End llvm namespace

#endif
PKiwFZmD�TTransforms/Instrumentation.hnu�[���//===- Transforms/Instrumentation.h - Instrumentation passes ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines constructor functions for instrumentation passes.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_INSTRUMENTATION_H
#define LLVM_TRANSFORMS_INSTRUMENTATION_H

#include "llvm/ADT/StringRef.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/DebugInfoMetadata.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instruction.h"
#include <cassert>
#include <cstdint>
#include <limits>
#include <string>
#include <vector>

namespace llvm {

class Triple;
class OptimizationRemarkEmitter;
class Comdat;
class CallBase;

/// Instrumentation passes often insert conditional checks into entry blocks.
/// Call this function before splitting the entry block to move instructions
/// that must remain in the entry block up before the split point. Static
/// allocas and llvm.localescape calls, for example, must remain in the entry
/// block.
BasicBlock::iterator PrepareToSplitEntryBlock(BasicBlock &BB,
                                              BasicBlock::iterator IP);

// Create a constant for Str so that we can pass it to the run-time lib.
GlobalVariable *createPrivateGlobalForString(Module &M, StringRef Str,
                                             bool AllowMerging,
                                             const char *NamePrefix = "");

// Returns F.getComdat() if it exists.
// Otherwise creates a new comdat, sets F's comdat, and returns it.
// Returns nullptr on failure.
Comdat *getOrCreateFunctionComdat(Function &F, Triple &T);

// Insert GCOV profiling instrumentation
struct GCOVOptions {
  static GCOVOptions getDefault();

  // Specify whether to emit .gcno files.
  bool EmitNotes;

  // Specify whether to modify the program to emit .gcda files when run.
  bool EmitData;

  // A four-byte version string. The meaning of a version string is described in
  // gcc's gcov-io.h
  char Version[4];

  // Add the 'noredzone' attribute to added runtime library calls.
  bool NoRedZone;

  // Use atomic profile counter increments.
  bool Atomic = false;

  // Regexes separated by a semi-colon to filter the files to instrument.
  std::string Filter;

  // Regexes separated by a semi-colon to filter the files to not instrument.
  std::string Exclude;
};

// The pgo-specific indirect call promotion function declared below is used by
// the pgo-driven indirect call promotion and sample profile passes. It's a
// wrapper around llvm::promoteCall, et al. that additionally computes !prof
// metadata. We place it in a pgo namespace so it's not confused with the
// generic utilities.
namespace pgo {

// Helper function that transforms CB (either an indirect-call instruction, or
// an invoke instruction , to a conditional call to F. This is like:
//     if (Inst.CalledValue == F)
//        F(...);
//     else
//        Inst(...);
//     end
// TotalCount is the profile count value that the instruction executes.
// Count is the profile count value that F is the target function.
// These two values are used to update the branch weight.
// If \p AttachProfToDirectCall is true, a prof metadata is attached to the
// new direct call to contain \p Count.
// Returns the promoted direct call instruction.
CallBase &promoteIndirectCall(CallBase &CB, Function *F, uint64_t Count,
                              uint64_t TotalCount, bool AttachProfToDirectCall,
                              OptimizationRemarkEmitter *ORE);
} // namespace pgo

/// Options for the frontend instrumentation based profiling pass.
struct InstrProfOptions {
  // Add the 'noredzone' attribute to added runtime library calls.
  bool NoRedZone = false;

  // Do counter register promotion
  bool DoCounterPromotion = false;

  // Use atomic profile counter increments.
  bool Atomic = false;

  // Use BFI to guide register promotion
  bool UseBFIInPromotion = false;

  // Name of the profile file to use as output
  std::string InstrProfileOutput;

  InstrProfOptions() = default;
};

// Options for sanitizer coverage instrumentation.
struct SanitizerCoverageOptions {
  enum Type {
    SCK_None = 0,
    SCK_Function,
    SCK_BB,
    SCK_Edge
  } CoverageType = SCK_None;
  bool IndirectCalls = false;
  bool TraceBB = false;
  bool TraceCmp = false;
  bool TraceDiv = false;
  bool TraceGep = false;
  bool Use8bitCounters = false;
  bool TracePC = false;
  bool TracePCGuard = false;
  bool Inline8bitCounters = false;
  bool InlineBoolFlag = false;
  bool PCTable = false;
  bool NoPrune = false;
  bool StackDepth = false;
  bool TraceLoads = false;
  bool TraceStores = false;
  bool CollectControlFlow = false;

  SanitizerCoverageOptions() = default;
};

/// Calculate what to divide by to scale counts.
///
/// Given the maximum count, calculate a divisor that will scale all the
/// weights to strictly less than std::numeric_limits<uint32_t>::max().
static inline uint64_t calculateCountScale(uint64_t MaxCount) {
  return MaxCount < std::numeric_limits<uint32_t>::max()
             ? 1
             : MaxCount / std::numeric_limits<uint32_t>::max() + 1;
}

/// Scale an individual branch count.
///
/// Scale a 64-bit weight down to 32-bits using \c Scale.
///
static inline uint32_t scaleBranchCount(uint64_t Count, uint64_t Scale) {
  uint64_t Scaled = Count / Scale;
  assert(Scaled <= std::numeric_limits<uint32_t>::max() && "overflow 32-bits");
  return Scaled;
}

// Use to ensure the inserted instrumentation has a DebugLocation; if none is
// attached to the source instruction, try to use a DILocation with offset 0
// scoped to surrounding function (if it has a DebugLocation).
//
// Some non-call instructions may be missing debug info, but when inserting
// instrumentation calls, some builds (e.g. LTO) want calls to have debug info
// if the enclosing function does.
struct InstrumentationIRBuilder : IRBuilder<> {
  static void ensureDebugInfo(IRBuilder<> &IRB, const Function &F) {
    if (IRB.getCurrentDebugLocation())
      return;
    if (DISubprogram *SP = F.getSubprogram())
      IRB.SetCurrentDebugLocation(DILocation::get(SP->getContext(), 0, 0, SP));
  }

  explicit InstrumentationIRBuilder(Instruction *IP) : IRBuilder<>(IP) {
    ensureDebugInfo(*this, *IP->getFunction());
  }
};
} // end namespace llvm

#endif // LLVM_TRANSFORMS_INSTRUMENTATION_H
PKiwFZ��2�ddTransforms/IPO.hnu�[���//===- llvm/Transforms/IPO.h - Interprocedural Transformations --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This header file defines prototypes for accessor functions that expose passes
// in the IPO transformations library.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_IPO_H
#define LLVM_TRANSFORMS_IPO_H

#include "llvm/ADT/SmallVector.h"
#include <functional>
#include <vector>

namespace llvm {

class ModulePass;
class Pass;
class raw_ostream;

//===----------------------------------------------------------------------===//
/// createDeadArgEliminationPass - This pass removes arguments from functions
/// which are not used by the body of the function.
///
ModulePass *createDeadArgEliminationPass();

/// DeadArgHacking pass - Same as DAE, but delete arguments of external
/// functions as well.  This is definitely not safe, and should only be used by
/// bugpoint.
ModulePass *createDeadArgHackingPass();

//===----------------------------------------------------------------------===//
//
/// createLoopExtractorPass - This pass extracts all natural loops from the
/// program into a function if it can.
///
Pass *createLoopExtractorPass();

/// createSingleLoopExtractorPass - This pass extracts one natural loop from the
/// program into a function if it can.  This is used by bugpoint.
///
Pass *createSingleLoopExtractorPass();

//===----------------------------------------------------------------------===//
/// createBarrierNoopPass - This pass is purely a module pass barrier in a pass
/// manager.
ModulePass *createBarrierNoopPass();

/// What to do with the summary when running passes that operate on it.
enum class PassSummaryAction {
  None,   ///< Do nothing.
  Import, ///< Import information from summary.
  Export, ///< Export information to summary.
};

} // End llvm namespace

#endif
PKiwFZ0�G���8Transforms/AggressiveInstCombine/AggressiveInstCombine.hnu�[���//===- AggressiveInstCombine.h - AggressiveInstCombine pass -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// AggressiveInstCombiner - Combine expression patterns to form expressions
/// with fewer, simple instructions. This pass does not modify the CFG.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_AGGRESSIVEINSTCOMBINE_AGGRESSIVEINSTCOMBINE_H
#define LLVM_TRANSFORMS_AGGRESSIVEINSTCOMBINE_AGGRESSIVEINSTCOMBINE_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class AggressiveInstCombinePass
    : public PassInfoMixin<AggressiveInstCombinePass> {
public:
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
}

#endif
PKiwFZ�V�## Transforms/Scalar/LoopDeletion.hnu�[���//===- LoopDeletion.h - Loop Deletion ---------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file provides the interface for the Loop Deletion Pass.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_LOOPDELETION_H
#define LLVM_TRANSFORMS_SCALAR_LOOPDELETION_H

#include "llvm/Analysis/LoopAnalysisManager.h"
#include "llvm/IR/PassManager.h"

namespace llvm {

class Loop;
class LPMUpdater;

class LoopDeletionPass : public PassInfoMixin<LoopDeletionPass> {
public:
  LoopDeletionPass() = default;

  PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
                        LoopStandardAnalysisResults &AR, LPMUpdater &U);
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_SCALAR_LOOPDELETION_H
PKiwFZ����(Transforms/Scalar/LowerExpectIntrinsic.hnu�[���//===- LowerExpectIntrinsic.h - LowerExpectIntrinsic pass -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// The header file for the LowerExpectIntrinsic pass as used by the new pass
/// manager.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_LOWEREXPECTINTRINSIC_H
#define LLVM_TRANSFORMS_SCALAR_LOWEREXPECTINTRINSIC_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class Function;

struct LowerExpectIntrinsicPass : PassInfoMixin<LowerExpectIntrinsicPass> {
  /// Run the pass over the function.
  ///
  /// This will lower all of the expect intrinsic calls in this function into
  /// branch weight metadata. That metadata will subsequently feed the analysis
  /// of the probabilities and frequencies of the CFG. After running this pass,
  /// no more expect intrinsics remain, allowing the rest of the optimizer to
  /// ignore them.
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &);
};

}

#endif
PKiwFZ-�a�llTransforms/Scalar/LoopFuse.hnu�[���//===- LoopFuse.h - Loop Fusion Pass ----------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file implements the Loop Fusion pass.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_LOOPFUSE_H
#define LLVM_TRANSFORMS_SCALAR_LOOPFUSE_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class Function;

class LoopFusePass : public PassInfoMixin<LoopFusePass> {
public:
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_SCALAR_LOOPFUSE_H
PKiwFZ!t�W~~Transforms/Scalar/Scalarizer.hnu�[���//===- Scalarizer.h --- Scalarize vector operations -----------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file
/// This pass converts vector operations into scalar operations (or, optionally,
/// operations on smaller vector widths), in order to expose optimization
/// opportunities on the individual scalar operations.
/// It is mainly intended for targets that do not have vector units, but it
/// may also be useful for revectorizing code to different vector widths.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_SCALARIZER_H
#define LLVM_TRANSFORMS_SCALAR_SCALARIZER_H

#include "llvm/IR/PassManager.h"
#include <optional>

namespace llvm {

class Function;
class FunctionPass;

struct ScalarizerPassOptions {
  // These options correspond 1:1 to cl::opt options defined in
  // Scalarizer.cpp. When the cl::opt are specified, they take precedence.
  // When the cl::opt are not specified, the present optional values allow to
  // override the cl::opt's default values.
  std::optional<bool> ScalarizeVariableInsertExtract;
  std::optional<bool> ScalarizeLoadStore;
  std::optional<unsigned> ScalarizeMinBits;
};

class ScalarizerPass : public PassInfoMixin<ScalarizerPass> {
  ScalarizerPassOptions Options;

public:
  ScalarizerPass() = default;
  ScalarizerPass(const ScalarizerPassOptions &Options) : Options(Options) {}

  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);

  void setScalarizeVariableInsertExtract(bool Value) {
    Options.ScalarizeVariableInsertExtract = Value;
  }
  void setScalarizeLoadStore(bool Value) { Options.ScalarizeLoadStore = Value; }
  void setScalarizeMinBits(unsigned Value) { Options.ScalarizeMinBits = Value; }
};

/// Create a legacy pass manager instance of the Scalarizer pass
FunctionPass *createScalarizerPass();

}

#endif /* LLVM_TRANSFORMS_SCALAR_SCALARIZER_H */
PKiwFZ����"Transforms/Scalar/StructurizeCFG.hnu�[���//===- StructurizeCFG.h ---------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_STRUCTURIZECFG_H
#define LLVM_TRANSFORMS_SCALAR_STRUCTURIZECFG_H

#include "llvm/IR/PassManager.h"

namespace llvm {
struct StructurizeCFGPass : PassInfoMixin<StructurizeCFGPass> {
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
} // namespace llvm

#endif // LLVM_TRANSFORMS_SCALAR_STRUCTURIZECFG_H
PKiwFZF�A{kk#Transforms/Scalar/LoopPredication.hnu�[���//===- LoopPredication.h - Guard based loop predication pass ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This pass tries to convert loop variant range checks to loop invariant by
// widening checks across loop iterations.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_LOOPPREDICATION_H
#define LLVM_TRANSFORMS_SCALAR_LOOPPREDICATION_H

#include "llvm/Analysis/LoopAnalysisManager.h"
#include "llvm/IR/PassManager.h"

namespace llvm {

class LPMUpdater;
class Loop;
/// Performs Loop Predication Pass.
class LoopPredicationPass : public PassInfoMixin<LoopPredicationPass> {
public:
  PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
                        LoopStandardAnalysisResults &AR, LPMUpdater &U);
};
} // end namespace llvm

#endif // LLVM_TRANSFORMS_SCALAR_LOOPPREDICATION_H
PKiwFZ���pXXTransforms/Scalar/Reg2Mem.hnu�[���//===- Reg2Mem.h - Convert registers to allocas -----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file provides the interface for the RegToMem Pass.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_REG2MEM_H
#define LLVM_TRANSFORMS_SCALAR_REG2MEM_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class RegToMemPass : public PassInfoMixin<RegToMemPass> {
public:
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_SCALAR_REG2MEM_H
PKiwFZ�7TxxTransforms/Scalar/SimplifyCFG.hnu�[���//===- SimplifyCFG.h - Simplify and canonicalize the CFG --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// This file provides the interface for the pass responsible for both
/// simplifying and canonicalizing the CFG.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_SIMPLIFYCFG_H
#define LLVM_TRANSFORMS_SCALAR_SIMPLIFYCFG_H

#include "llvm/IR/Function.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Transforms/Utils/SimplifyCFGOptions.h"

namespace llvm {

/// A pass to simplify and canonicalize the CFG of a function.
///
/// This pass iteratively simplifies the entire CFG of a function. It may change
/// or remove control flow to put the CFG into a canonical form expected by
/// other passes of the mid-level optimizer. Depending on the specified options,
/// it may further optimize control-flow to create non-canonical forms.
class SimplifyCFGPass : public PassInfoMixin<SimplifyCFGPass> {
  SimplifyCFGOptions Options;

public:
  /// The default constructor sets the pass options to create canonical IR,
  /// rather than optimal IR. That is, by default we bypass transformations that
  /// are likely to improve performance but make analysis for other passes more
  /// difficult.
  SimplifyCFGPass();

  /// Construct a pass with optional optimizations.
  SimplifyCFGPass(const SimplifyCFGOptions &PassOptions);

  /// Run the pass over the function.
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);

  void printPipeline(raw_ostream &OS,
                     function_ref<StringRef(StringRef)> MapClassName2PassName);
};

}

#endif
PKiwFZ���.Transforms/Scalar/SeparateConstOffsetFromGEP.hnu�[���//===- SeparateConstOffsetFromGEP.h ---------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_SEPARATECONSTOFFSETFROMGEP_H
#define LLVM_TRANSFORMS_SCALAR_SEPARATECONSTOFFSETFROMGEP_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class SeparateConstOffsetFromGEPPass
    : public PassInfoMixin<SeparateConstOffsetFromGEPPass> {
  bool LowerGEP;

public:
  SeparateConstOffsetFromGEPPass(bool LowerGEP = false) : LowerGEP(LowerGEP) {}
  void printPipeline(raw_ostream &OS,
                     function_ref<StringRef(StringRef)> MapClassName2PassName);
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &);
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_SCALAR_SEPARATECONSTOFFSETFROMGEP_H
PKiwFZ��>!!!Transforms/Scalar/JumpThreading.hnu�[���//===- JumpThreading.h - thread control through conditional BBs -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file
/// See the comments on JumpThreadingPass.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_JUMPTHREADING_H
#define LLVM_TRANSFORMS_SCALAR_JUMPTHREADING_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Analysis/BlockFrequencyInfo.h"
#include "llvm/Analysis/BranchProbabilityInfo.h"
#include "llvm/Analysis/DomTreeUpdater.h"
#include "llvm/IR/ValueHandle.h"
#include <optional>
#include <utility>

namespace llvm {

class AAResults;
class BasicBlock;
class BinaryOperator;
class BranchInst;
class CmpInst;
class Constant;
class Function;
class Instruction;
class IntrinsicInst;
class LazyValueInfo;
class LoadInst;
class PHINode;
class SelectInst;
class SwitchInst;
class TargetLibraryInfo;
class TargetTransformInfo;
class Value;

/// A private "module" namespace for types and utilities used by
/// JumpThreading.
/// These are implementation details and should not be used by clients.
namespace jumpthreading {

// These are at global scope so static functions can use them too.
using PredValueInfo = SmallVectorImpl<std::pair<Constant *, BasicBlock *>>;
using PredValueInfoTy = SmallVector<std::pair<Constant *, BasicBlock *>, 8>;

// This is used to keep track of what kind of constant we're currently hoping
// to find.
enum ConstantPreference { WantInteger, WantBlockAddress };

} // end namespace jumpthreading

/// This pass performs 'jump threading', which looks at blocks that have
/// multiple predecessors and multiple successors.  If one or more of the
/// predecessors of the block can be proven to always jump to one of the
/// successors, we forward the edge from the predecessor to the successor by
/// duplicating the contents of this block.
///
/// An example of when this can occur is code like this:
///
///   if () { ...
///     X = 4;
///   }
///   if (X < 3) {
///
/// In this case, the unconditional branch at the end of the first if can be
/// revectored to the false side of the second if.
class JumpThreadingPass : public PassInfoMixin<JumpThreadingPass> {
  Function *F = nullptr;
  FunctionAnalysisManager *FAM = nullptr;
  TargetLibraryInfo *TLI = nullptr;
  TargetTransformInfo *TTI = nullptr;
  LazyValueInfo *LVI = nullptr;
  AAResults *AA = nullptr;
  std::unique_ptr<DomTreeUpdater> DTU;
  std::optional<BlockFrequencyInfo *> BFI;
  std::optional<BranchProbabilityInfo *> BPI;
  bool ChangedSinceLastAnalysisUpdate = false;
  bool HasGuards = false;
#ifndef LLVM_ENABLE_ABI_BREAKING_CHECKS
  SmallPtrSet<const BasicBlock *, 16> LoopHeaders;
#else
  SmallSet<AssertingVH<const BasicBlock>, 16> LoopHeaders;
#endif

  unsigned BBDupThreshold;
  unsigned DefaultBBDupThreshold;

public:
  JumpThreadingPass(int T = -1);

  // Glue for old PM.
  bool runImpl(Function &F, FunctionAnalysisManager *FAM,
               TargetLibraryInfo *TLI, TargetTransformInfo *TTI,
               LazyValueInfo *LVI, AAResults *AA,
               std::unique_ptr<DomTreeUpdater> DTU,
               std::optional<BlockFrequencyInfo *> BFI,
               std::optional<BranchProbabilityInfo *> BPI);

  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);

  DomTreeUpdater *getDomTreeUpdater() const { return DTU.get(); }
  void findLoopHeaders(Function &F);
  bool processBlock(BasicBlock *BB);
  bool maybeMergeBasicBlockIntoOnlyPred(BasicBlock *BB);
  void updateSSA(BasicBlock *BB, BasicBlock *NewBB,
                 DenseMap<Instruction *, Value *> &ValueMapping);
  DenseMap<Instruction *, Value *> cloneInstructions(BasicBlock::iterator BI,
                                                     BasicBlock::iterator BE,
                                                     BasicBlock *NewBB,
                                                     BasicBlock *PredBB);
  bool tryThreadEdge(BasicBlock *BB,
                     const SmallVectorImpl<BasicBlock *> &PredBBs,
                     BasicBlock *SuccBB);
  void threadEdge(BasicBlock *BB, const SmallVectorImpl<BasicBlock *> &PredBBs,
                  BasicBlock *SuccBB);
  bool duplicateCondBranchOnPHIIntoPred(
      BasicBlock *BB, const SmallVectorImpl<BasicBlock *> &PredBBs);

  bool computeValueKnownInPredecessorsImpl(
      Value *V, BasicBlock *BB, jumpthreading::PredValueInfo &Result,
      jumpthreading::ConstantPreference Preference,
      DenseSet<Value *> &RecursionSet, Instruction *CxtI = nullptr);
  bool
  computeValueKnownInPredecessors(Value *V, BasicBlock *BB,
                                  jumpthreading::PredValueInfo &Result,
                                  jumpthreading::ConstantPreference Preference,
                                  Instruction *CxtI = nullptr) {
    DenseSet<Value *> RecursionSet;
    return computeValueKnownInPredecessorsImpl(V, BB, Result, Preference,
                                               RecursionSet, CxtI);
  }

  Constant *evaluateOnPredecessorEdge(BasicBlock *BB, BasicBlock *PredPredBB,
                                      Value *cond);
  bool maybethreadThroughTwoBasicBlocks(BasicBlock *BB, Value *Cond);
  void threadThroughTwoBasicBlocks(BasicBlock *PredPredBB, BasicBlock *PredBB,
                                   BasicBlock *BB, BasicBlock *SuccBB);
  bool processThreadableEdges(Value *Cond, BasicBlock *BB,
                              jumpthreading::ConstantPreference Preference,
                              Instruction *CxtI = nullptr);

  bool processBranchOnPHI(PHINode *PN);
  bool processBranchOnXOR(BinaryOperator *BO);
  bool processImpliedCondition(BasicBlock *BB);

  bool simplifyPartiallyRedundantLoad(LoadInst *LI);
  void unfoldSelectInstr(BasicBlock *Pred, BasicBlock *BB, SelectInst *SI,
                         PHINode *SIUse, unsigned Idx);

  bool tryToUnfoldSelect(CmpInst *CondCmp, BasicBlock *BB);
  bool tryToUnfoldSelect(SwitchInst *SI, BasicBlock *BB);
  bool tryToUnfoldSelectInCurrBB(BasicBlock *BB);

  bool processGuards(BasicBlock *BB);
  bool threadGuard(BasicBlock *BB, IntrinsicInst *Guard, BranchInst *BI);

private:
  BasicBlock *splitBlockPreds(BasicBlock *BB, ArrayRef<BasicBlock *> Preds,
                              const char *Suffix);
  void updateBlockFreqAndEdgeWeight(BasicBlock *PredBB, BasicBlock *BB,
                                    BasicBlock *NewBB, BasicBlock *SuccBB,
                                    BlockFrequencyInfo *BFI,
                                    BranchProbabilityInfo *BPI,
                                    bool HasProfile);
  /// Check if the block has profile metadata for its outgoing edges.
  bool doesBlockHaveProfileData(BasicBlock *BB);

  /// Returns analysis preserved by the pass.
  PreservedAnalyses getPreservedAnalysis() const;

  /// Helper function to run "external" analysis in the middle of JumpThreading.
  /// It takes care of updating/invalidating other existing analysis
  /// before/after  running the "external" one.
  template <typename AnalysisT>
  typename AnalysisT::Result *runExternalAnalysis();

  /// Returns an existing instance of BPI if any, otherwise nullptr. By
  /// "existing" we mean either cached result provided by FunctionAnalysisManger
  /// or created by preceding call to 'getOrCreateBPI'.
  BranchProbabilityInfo *getBPI();

  /// Returns an existing instance of BFI if any, otherwise nullptr. By
  /// "existing" we mean either cached result provided by FunctionAnalysisManger
  /// or created by preceding call to 'getOrCreateBFI'.
  BlockFrequencyInfo *getBFI();

  /// Returns an existing instance of BPI if any, otherwise:
  ///   if 'HasProfile' is true creates new instance through
  ///   FunctionAnalysisManager, otherwise nullptr.
  BranchProbabilityInfo *getOrCreateBPI(bool Force = false);

  /// Returns an existing instance of BFI if any, otherwise:
  ///   if 'HasProfile' is true creates new instance through
  ///   FunctionAnalysisManager, otherwise nullptr.
  BlockFrequencyInfo *getOrCreateBFI(bool Force = false);
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_SCALAR_JUMPTHREADING_H
PKiwFZ�����Transforms/Scalar/LoopSink.hnu�[���//===- LoopSink.h - Loop Sink Pass ------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file provides the interface for the Loop Sink pass.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_LOOPSINK_H
#define LLVM_TRANSFORMS_SCALAR_LOOPSINK_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class Function;

/// A pass that does profile-guided sinking of instructions into loops.
///
/// This is a function pass as it shouldn't be composed into any kind of
/// unified loop pass pipeline. The goal of it is to sink code into loops that
/// is loop invariant but only required within the loop body when doing so
/// reduces the global expected dynamic frequency with which it executes.
/// A classic example is an extremely cold branch within a loop body.
///
/// We do this as a separate pass so that during normal optimization all
/// invariant operations can be held outside the loop body to simplify
/// fundamental analyses and transforms of the loop.
class LoopSinkPass : public PassInfoMixin<LoopSinkPass> {
public:
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM);
};
}

#endif // LLVM_TRANSFORMS_SCALAR_LOOPSINK_H
PKiwFZ���U��2Transforms/Scalar/InductiveRangeCheckElimination.hnu�[���//===- InductiveRangeCheckElimination.h - IRCE ------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file provides the interface for the Inductive Range Check Elimination
// loop pass.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_INDUCTIVERANGECHECKELIMINATION_H
#define LLVM_TRANSFORMS_SCALAR_INDUCTIVERANGECHECKELIMINATION_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class IRCEPass : public PassInfoMixin<IRCEPass> {
public:
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_SCALAR_INDUCTIVERANGECHECKELIMINATION_H
PKiwFZ�@bbb&Transforms/Scalar/InferAddressSpaces.hnu�[���//===- InferAddressSpace.h - ----------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_INFERADDRESSSPACES_H
#define LLVM_TRANSFORMS_SCALAR_INFERADDRESSSPACES_H

#include "llvm/IR/PassManager.h"

namespace llvm {

struct InferAddressSpacesPass : PassInfoMixin<InferAddressSpacesPass> {
  InferAddressSpacesPass();
  InferAddressSpacesPass(unsigned AddressSpace);
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);

private:
  unsigned FlatAddrSpace = 0;
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_SCALAR_INFERADDRESSSPACES_H
PKiwFZ�vSSTransforms/Scalar/Float2Int.hnu�[���//===-- Float2Int.h - Demote floating point ops to work on integers -------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file provides the Float2Int pass, which aims to demote floating
// point operations to work on integers, where that is losslessly possible.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_FLOAT2INT_H
#define LLVM_TRANSFORMS_SCALAR_FLOAT2INT_H

#include "llvm/ADT/EquivalenceClasses.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/IR/ConstantRange.h"
#include "llvm/IR/PassManager.h"

namespace llvm {
class DominatorTree;
class Function;
class Instruction;
class LLVMContext;
class Type;
class Value;

class Float2IntPass : public PassInfoMixin<Float2IntPass> {
public:
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);

  // Glue for old PM.
  bool runImpl(Function &F, const DominatorTree &DT);

private:
  void findRoots(Function &F, const DominatorTree &DT);
  void seen(Instruction *I, ConstantRange R);
  ConstantRange badRange();
  ConstantRange unknownRange();
  ConstantRange validateRange(ConstantRange R);
  std::optional<ConstantRange> calcRange(Instruction *I);
  void walkBackwards();
  void walkForwards();
  bool validateAndTransform();
  Value *convert(Instruction *I, Type *ToTy);
  void cleanup();

  MapVector<Instruction *, ConstantRange> SeenInsts;
  SmallSetVector<Instruction *, 8> Roots;
  EquivalenceClasses<Instruction *> ECs;
  MapVector<Instruction *, Value *> ConvertedInsts;
  LLVMContext *Ctx;
};
}
#endif // LLVM_TRANSFORMS_SCALAR_FLOAT2INT_H
PKiwFZw�(Transforms/Scalar/LoopUnrollAndJamPass.hnu�[���//===- LoopUnrollAndJamPass.h -----------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_LOOPUNROLLANDJAMPASS_H
#define LLVM_TRANSFORMS_SCALAR_LOOPUNROLLANDJAMPASS_H

#include "llvm/Analysis/LoopAnalysisManager.h"
#include "llvm/IR/PassManager.h"

namespace llvm {
class LPMUpdater;
class LoopNest;

/// A simple loop rotation transformation.
class LoopUnrollAndJamPass : public PassInfoMixin<LoopUnrollAndJamPass> {
  const int OptLevel;

public:
  explicit LoopUnrollAndJamPass(int OptLevel = 2) : OptLevel(OptLevel) {}
  PreservedAnalyses run(LoopNest &L, LoopAnalysisManager &AM,
                        LoopStandardAnalysisResults &AR, LPMUpdater &U);
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_SCALAR_LOOPUNROLLANDJAMPASS_H
PKiwFZ�����#Transforms/Scalar/PlaceSafepoints.hnu�[���//===- PlaceSafepoints.h - Place GC Safepoints ----------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Place garbage collection safepoints at appropriate locations in the IR. This
// does not make relocation semantics or variable liveness explicit.  That's
// done by RewriteStatepointsForGC.
//
// Terminology:
// - A call is said to be "parseable" if there is a stack map generated for the
// return PC of the call.  A runtime can determine where values listed in the
// deopt arguments and (after RewriteStatepointsForGC) gc arguments are located
// on the stack when the code is suspended inside such a call.  Every parse
// point is represented by a call wrapped in an gc.statepoint intrinsic.
// - A "poll" is an explicit check in the generated code to determine if the
// runtime needs the generated code to cooperate by calling a helper routine
// and thus suspending its execution at a known state. The call to the helper
// routine will be parseable.  The (gc & runtime specific) logic of a poll is
// assumed to be provided in a function of the name "gc.safepoint_poll".
//
// We aim to insert polls such that running code can quickly be brought to a
// well defined state for inspection by the collector.  In the current
// implementation, this is done via the insertion of poll sites at method entry
// and the backedge of most loops.  We try to avoid inserting more polls than
// are necessary to ensure a finite period between poll sites.  This is not
// because the poll itself is expensive in the generated code; it's not.  Polls
// do tend to impact the optimizer itself in negative ways; we'd like to avoid
// perturbing the optimization of the method as much as we can.
//
// We also need to make most call sites parseable.  The callee might execute a
// poll (or otherwise be inspected by the GC).  If so, the entire stack
// (including the suspended frame of the current method) must be parseable.
//
// This pass will insert:
// - Call parse points ("call safepoints") for any call which may need to
// reach a safepoint during the execution of the callee function.
// - Backedge safepoint polls and entry safepoint polls to ensure that
// executing code reaches a safepoint poll in a finite amount of time.
//
// We do not currently support return statepoints, but adding them would not
// be hard.  They are not required for correctness - entry safepoints are an
// alternative - but some GCs may prefer them.  Patches welcome.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_PLACESAFEPOINTS_H
#define LLVM_TRANSFORMS_SCALAR_PLACESAFEPOINTS_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class TargetLibraryInfo;

class PlaceSafepointsPass : public PassInfoMixin<PlaceSafepointsPass> {
public:
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);

  bool runImpl(Function &F, const TargetLibraryInfo &TLI);

  void cleanup() {}

private:
};
} // namespace llvm

#endif // LLVM_TRANSFORMS_SCALAR_PLACESAFEPOINTS_H
PKiwFZ&^����%Transforms/Scalar/AnnotationRemarks.hnu�[���//===- AnnotationRemarks.cpp - Emit remarks for !annotation MD --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// \file
// This file defines AnnotationRemarksPass for the new pass manager.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_ANNOTATIONREMARKS_H
#define LLVM_TRANSFORMS_SCALAR_ANNOTATIONREMARKS_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class Function;

struct AnnotationRemarksPass : public PassInfoMixin<AnnotationRemarksPass> {
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
  static bool isRequired() { return true; }
};
} // namespace llvm

#endif // LLVM_TRANSFORMS_SCALAR_ANNOTATIONREMARKS_H
PKiwFZ�}���)Transforms/Scalar/MergedLoadStoreMotion.hnu�[���//===- MergedLoadStoreMotion.h - merge and hoist/sink load/stores ---------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
//! \file
//! This pass performs merges of loads and stores on both sides of a
//  diamond (hammock). It hoists the loads and sinks the stores.
//
// The algorithm iteratively hoists two loads to the same address out of a
// diamond (hammock) and merges them into a single load in the header. Similar
// it sinks and merges two stores to the tail block (footer). The algorithm
// iterates over the instructions of one side of the diamond and attempts to
// find a matching load/store on the other side. It hoists / sinks when it
// thinks it safe to do so.  This optimization helps with eg. hiding load
// latencies, triggering if-conversion, and reducing static code size.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_MERGEDLOADSTOREMOTION_H
#define LLVM_TRANSFORMS_SCALAR_MERGEDLOADSTOREMOTION_H

#include "llvm/ADT/STLFunctionalExtras.h"
#include "llvm/IR/PassManager.h"

namespace llvm {
class Function;
struct MergedLoadStoreMotionOptions {
  bool SplitFooterBB;
  MergedLoadStoreMotionOptions(bool SplitFooterBB = false)
      : SplitFooterBB(SplitFooterBB) {}

  MergedLoadStoreMotionOptions &splitFooterBB(bool SFBB) {
    SplitFooterBB = SFBB;
    return *this;
  }
};

class MergedLoadStoreMotionPass
    : public PassInfoMixin<MergedLoadStoreMotionPass> {
  MergedLoadStoreMotionOptions Options;

public:
  MergedLoadStoreMotionPass()
      : MergedLoadStoreMotionPass(MergedLoadStoreMotionOptions()) {}
  MergedLoadStoreMotionPass(const MergedLoadStoreMotionOptions &PassOptions)
      : Options(PassOptions) {}
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
  void printPipeline(raw_ostream &OS,
                     function_ref<StringRef(StringRef)> MapClassName2PassName);
};
}

#endif // LLVM_TRANSFORMS_SCALAR_MERGEDLOADSTOREMOTION_H
PKjwFZ<I�(+Transforms/Scalar/PartiallyInlineLibCalls.hnu�[���//===--- PartiallyInlineLibCalls.h - Partially inline libcalls --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This pass tries to partially inline the fast path of well-known library
// functions, such as using square-root instructions for cases where sqrt()
// does not need to set errno.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_PARTIALLYINLINELIBCALLS_H
#define LLVM_TRANSFORMS_SCALAR_PARTIALLYINLINELIBCALLS_H

#include "llvm/IR/PassManager.h"

namespace llvm {
class Function;
class PartiallyInlineLibCallsPass
    : public PassInfoMixin<PartiallyInlineLibCallsPass> {
public:
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
}

#endif // LLVM_TRANSFORMS_SCALAR_PARTIALLYINLINELIBCALLS_H
PKjwFZ�[M!!.Transforms/Scalar/StraightLineStrengthReduce.hnu�[���//===- StraightLineStrengthReduce.h - -----------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_STRAIGHTLINESTRENGTHREDUCE_H
#define LLVM_TRANSFORMS_SCALAR_STRAIGHTLINESTRENGTHREDUCE_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class StraightLineStrengthReducePass
    : public PassInfoMixin<StraightLineStrengthReducePass> {
public:
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

} // namespace llvm

#endif // LLVM_TRANSFORMS_SCALAR_STRAIGHTLINESTRENGTHREDUCE_H
PKjwFZsTS`��Transforms/Scalar/Sink.hnu�[���//===-- Sink.h - Code Sinking -----------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This pass moves instructions into successor blocks, when possible, so that
// they aren't executed on paths where their results aren't needed.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_SINK_H
#define LLVM_TRANSFORMS_SCALAR_SINK_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class Function;

/// Move instructions into successor blocks when possible.
class SinkingPass : public PassInfoMixin<SinkingPass> {
public:
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
}

#endif // LLVM_TRANSFORMS_SCALAR_SINK_H
PKjwFZ�JՎ��!Transforms/Scalar/GuardWidening.hnu�[���//===- GuardWidening.h - ----------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Guard widening is an optimization over the @llvm.experimental.guard intrinsic
// that (optimistically) combines multiple guards into one to have fewer checks
// at runtime.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_GUARDWIDENING_H
#define LLVM_TRANSFORMS_SCALAR_GUARDWIDENING_H

#include "llvm/Analysis/LoopAnalysisManager.h"
#include "llvm/IR/PassManager.h"

namespace llvm {

class LPMUpdater;
class Loop;
class Function;

struct GuardWideningPass : public PassInfoMixin<GuardWideningPass> {
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
  PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
                        LoopStandardAnalysisResults &AR, LPMUpdater &U);
};
}

#endif // LLVM_TRANSFORMS_SCALAR_GUARDWIDENING_H
PKjwFZUkW)),Transforms/Scalar/ScalarizeMaskedMemIntrin.hnu�[���//===- ScalarizeMaskedMemIntrin.h - Scalarize unsupported masked mem ----===//
//                                    intrinsics
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This pass replaces masked memory intrinsics - when unsupported by the target
// - with a chain of basic blocks, that deal with the elements one-by-one if the
// appropriate mask bit is set.
//
//===----------------------------------------------------------------------===//
//
#ifndef LLVM_TRANSFORMS_SCALAR_SCALARIZEMASKEDMEMINTRIN_H
#define LLVM_TRANSFORMS_SCALAR_SCALARIZEMASKEDMEMINTRIN_H

#include "llvm/IR/PassManager.h"

namespace llvm {

struct ScalarizeMaskedMemIntrinPass
    : public PassInfoMixin<ScalarizeMaskedMemIntrinPass> {
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
} // end namespace llvm

#endif
PKjwFZ��B%%Transforms/Scalar/LoopFlatten.hnu�[���//===- LoopFlatten.h - Loop Flatten ----------------  -----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file provides the interface for the Loop Flatten Pass.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_LOOPFLATTEN_H
#define LLVM_TRANSFORMS_SCALAR_LOOPFLATTEN_H

#include "llvm/Analysis/LoopAnalysisManager.h"
#include "llvm/IR/PassManager.h"

namespace llvm {
class LPMUpdater;
class LoopNest;

class LoopFlattenPass : public PassInfoMixin<LoopFlattenPass> {
public:
  LoopFlattenPass() = default;

  PreservedAnalyses run(LoopNest &LN, LoopAnalysisManager &LAM,
                        LoopStandardAnalysisResults &AR, LPMUpdater &U);
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_SCALAR_LOOPFLATTEN_H
PKjwFZ�����Transforms/Scalar/ADCE.hnu�[���//===- ADCE.h - Aggressive dead code elimination ----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file provides the interface for the Aggressive Dead Code Elimination
// pass. This pass optimistically assumes that all instructions are dead until
// proven otherwise, allowing it to eliminate dead computations that other DCE
// passes do not catch, particularly involving loop computations.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_ADCE_H
#define LLVM_TRANSFORMS_SCALAR_ADCE_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class Function;

/// A DCE pass that assumes instructions are dead until proven otherwise.
///
/// This pass eliminates dead code by optimistically assuming that all
/// instructions are dead until proven otherwise. This allows it to eliminate
/// dead computations that other DCE passes do not catch, particularly involving
/// loop computations.
struct ADCEPass : PassInfoMixin<ADCEPass> {
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &);
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_SCALAR_ADCE_H
PKjwFZ�r�d��Transforms/Scalar/SROA.hnu�[���//===- SROA.h - Scalar Replacement Of Aggregates ----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// This file provides the interface for LLVM's Scalar Replacement of
/// Aggregates pass. This pass provides both aggregate splitting and the
/// primary SSA formation used in the compiler.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_SROA_H
#define LLVM_TRANSFORMS_SCALAR_SROA_H

#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/PassManager.h"
#include "llvm/IR/ValueHandle.h"
#include <variant>
#include <vector>

namespace llvm {

class AllocaInst;
class LoadInst;
class StoreInst;
class AssumptionCache;
class DominatorTree;
class DomTreeUpdater;
class Function;
class LLVMContext;
class PHINode;
class SelectInst;
class Use;

/// A private "module" namespace for types and utilities used by SROA. These
/// are implementation details and should not be used by clients.
namespace LLVM_LIBRARY_VISIBILITY sroa {

class AllocaSliceRewriter;
class AllocaSlices;
class Partition;
class SROALegacyPass;

class SelectHandSpeculativity {
  unsigned char Storage = 0; // None are speculatable by default.
  using TrueVal = Bitfield::Element<bool, 0, 1>;  // Low 0'th bit.
  using FalseVal = Bitfield::Element<bool, 1, 1>; // Low 1'th bit.
public:
  SelectHandSpeculativity() = default;
  SelectHandSpeculativity &setAsSpeculatable(bool isTrueVal);
  bool isSpeculatable(bool isTrueVal) const;
  bool areAllSpeculatable() const;
  bool areAnySpeculatable() const;
  bool areNoneSpeculatable() const;
  // For interop as int half of PointerIntPair.
  explicit operator intptr_t() const { return static_cast<intptr_t>(Storage); }
  explicit SelectHandSpeculativity(intptr_t Storage_) : Storage(Storage_) {}
};
static_assert(sizeof(SelectHandSpeculativity) == sizeof(unsigned char));

using PossiblySpeculatableLoad =
    PointerIntPair<LoadInst *, 2, sroa::SelectHandSpeculativity>;
using UnspeculatableStore = StoreInst *;
using RewriteableMemOp =
    std::variant<PossiblySpeculatableLoad, UnspeculatableStore>;
using RewriteableMemOps = SmallVector<RewriteableMemOp, 2>;

} // end namespace sroa

enum class SROAOptions : bool { ModifyCFG, PreserveCFG };

/// An optimization pass providing Scalar Replacement of Aggregates.
///
/// This pass takes allocations which can be completely analyzed (that is, they
/// don't escape) and tries to turn them into scalar SSA values. There are
/// a few steps to this process.
///
/// 1) It takes allocations of aggregates and analyzes the ways in which they
///    are used to try to split them into smaller allocations, ideally of
///    a single scalar data type. It will split up memcpy and memset accesses
///    as necessary and try to isolate individual scalar accesses.
/// 2) It will transform accesses into forms which are suitable for SSA value
///    promotion. This can be replacing a memset with a scalar store of an
///    integer value, or it can involve speculating operations on a PHI or
///    select to be a PHI or select of the results.
/// 3) Finally, this will try to detect a pattern of accesses which map cleanly
///    onto insert and extract operations on a vector value, and convert them to
///    this form. By doing so, it will enable promotion of vector aggregates to
///    SSA vector values.
class SROAPass : public PassInfoMixin<SROAPass> {
  LLVMContext *C = nullptr;
  DomTreeUpdater *DTU = nullptr;
  AssumptionCache *AC = nullptr;
  const bool PreserveCFG;

  /// Worklist of alloca instructions to simplify.
  ///
  /// Each alloca in the function is added to this. Each new alloca formed gets
  /// added to it as well to recursively simplify unless that alloca can be
  /// directly promoted. Finally, each time we rewrite a use of an alloca other
  /// the one being actively rewritten, we add it back onto the list if not
  /// already present to ensure it is re-visited.
  SmallSetVector<AllocaInst *, 16> Worklist;

  /// A collection of instructions to delete.
  /// We try to batch deletions to simplify code and make things a bit more
  /// efficient. We also make sure there is no dangling pointers.
  SmallVector<WeakVH, 8> DeadInsts;

  /// Post-promotion worklist.
  ///
  /// Sometimes we discover an alloca which has a high probability of becoming
  /// viable for SROA after a round of promotion takes place. In those cases,
  /// the alloca is enqueued here for re-processing.
  ///
  /// Note that we have to be very careful to clear allocas out of this list in
  /// the event they are deleted.
  SmallSetVector<AllocaInst *, 16> PostPromotionWorklist;

  /// A collection of alloca instructions we can directly promote.
  std::vector<AllocaInst *> PromotableAllocas;

  /// A worklist of PHIs to speculate prior to promoting allocas.
  ///
  /// All of these PHIs have been checked for the safety of speculation and by
  /// being speculated will allow promoting allocas currently in the promotable
  /// queue.
  SmallSetVector<PHINode *, 8> SpeculatablePHIs;

  /// A worklist of select instructions to rewrite prior to promoting
  /// allocas.
  SmallMapVector<SelectInst *, sroa::RewriteableMemOps, 8> SelectsToRewrite;

  /// Select instructions that use an alloca and are subsequently loaded can be
  /// rewritten to load both input pointers and then select between the result,
  /// allowing the load of the alloca to be promoted.
  /// From this:
  ///   %P2 = select i1 %cond, ptr %Alloca, ptr %Other
  ///   %V = load <type>, ptr %P2
  /// to:
  ///   %V1 = load <type>, ptr %Alloca      -> will be mem2reg'd
  ///   %V2 = load <type>, ptr %Other
  ///   %V = select i1 %cond, <type> %V1, <type> %V2
  ///
  /// We can do this to a select if its only uses are loads
  /// and if either the operand to the select can be loaded unconditionally,
  ///        or if we are allowed to perform CFG modifications.
  /// If found an intervening bitcast with a single use of the load,
  /// allow the promotion.
  static std::optional<sroa::RewriteableMemOps>
  isSafeSelectToSpeculate(SelectInst &SI, bool PreserveCFG);

public:
  /// If \p PreserveCFG is set, then the pass is not allowed to modify CFG
  /// in any way, even if it would update CFG analyses.
  SROAPass(SROAOptions PreserveCFG);

  /// Run the pass over the function.
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);

  void printPipeline(raw_ostream &OS,
                     function_ref<StringRef(StringRef)> MapClassName2PassName);

private:
  friend class sroa::AllocaSliceRewriter;
  friend class sroa::SROALegacyPass;

  /// Helper used by both the public run method and by the legacy pass.
  PreservedAnalyses runImpl(Function &F, DomTreeUpdater &RunDTU,
                            AssumptionCache &RunAC);
  PreservedAnalyses runImpl(Function &F, DominatorTree &RunDT,
                            AssumptionCache &RunAC);

  bool presplitLoadsAndStores(AllocaInst &AI, sroa::AllocaSlices &AS);
  AllocaInst *rewritePartition(AllocaInst &AI, sroa::AllocaSlices &AS,
                               sroa::Partition &P);
  bool splitAlloca(AllocaInst &AI, sroa::AllocaSlices &AS);
  std::pair<bool /*Changed*/, bool /*CFGChanged*/> runOnAlloca(AllocaInst &AI);
  void clobberUse(Use &U);
  bool deleteDeadInstructions(SmallPtrSetImpl<AllocaInst *> &DeletedAllocas);
  bool promoteAllocas(Function &F);
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_SCALAR_SROA_H
PKjwFZ�?�c��'Transforms/Scalar/LowerGuardIntrinsic.hnu�[���//===--- LowerGuardIntrinsic.h - Lower the guard intrinsic ---------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This pass lowers the llvm.experimental.guard intrinsic to a conditional call
// to @llvm.experimental.deoptimize.  Once this happens, the guard can no longer
// be widened.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_TRANSFORMS_SCALAR_LOWERGUARDINTRINSIC_H
#define LLVM_TRANSFORMS_SCALAR_LOWERGUARDINTRINSIC_H

#include "llvm/IR/PassManager.h"

namespace llvm {

struct LowerGuardIntrinsicPass : PassInfoMixin<LowerGuardIntrinsicPass> {
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

}

#endif // LLVM_TRANSFORMS_SCALAR_LOWERGUARDINTRINSIC_H
PKjwFZKx�}��"Transforms/Scalar/IVUsersPrinter.hnu�[���//===- IVUsersPrinter.h - Induction Variable Users Printing -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_IVUSERSPRINTER_H
#define LLVM_TRANSFORMS_SCALAR_IVUSERSPRINTER_H

#include "llvm/Analysis/LoopAnalysisManager.h"
#include "llvm/IR/PassManager.h"

namespace llvm {
class LPMUpdater;
class Loop;
class raw_ostream;

/// Printer pass for the \c IVUsers for a loop.
class IVUsersPrinterPass : public PassInfoMixin<IVUsersPrinterPass> {
  raw_ostream &OS;

public:
  explicit IVUsersPrinterPass(raw_ostream &OS) : OS(OS) {}
  PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
                        LoopStandardAnalysisResults &AR, LPMUpdater &U);
};
}

#endif
PKjwFZ���k		(Transforms/Scalar/WarnMissedTransforms.hnu�[���//===- WarnMissedTransforms.h -----------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Emit warnings if forced code transformations have not been performed.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_WARNMISSEDTRANSFORMS_H
#define LLVM_TRANSFORMS_SCALAR_WARNMISSEDTRANSFORMS_H

#include "llvm/IR/PassManager.h"

namespace llvm {
// New pass manager boilerplate.
class WarnMissedTransformationsPass
    : public PassInfoMixin<WarnMissedTransformationsPass> {
public:
  explicit WarnMissedTransformationsPass() = default;

  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
} // end namespace llvm

#endif // LLVM_TRANSFORMS_SCALAR_WARNMISSEDTRANSFORMS_H
PKjwFZ��k6ii'Transforms/Scalar/LoopLoadElimination.hnu�[���//===- LoopLoadElimination.h ------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file
/// This header defines the LoopLoadEliminationPass object. This pass forwards
/// loaded values around loop backedges to allow their use in subsequent
/// iterations.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_LOOPLOADELIMINATION_H
#define LLVM_TRANSFORMS_SCALAR_LOOPLOADELIMINATION_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class Function;

/// Pass to forward loads in a loop around the backedge to subsequent
/// iterations.
struct LoopLoadEliminationPass : public PassInfoMixin<LoopLoadEliminationPass> {
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_SCALAR_LOOPLOADELIMINATION_H
PKjwFZ�D�%Transforms/Scalar/CallSiteSplitting.hnu�[���//===- CallSiteSplitting..h - Callsite Splitting ------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_CALLSITESPLITTING_H
#define LLVM_TRANSFORMS_SCALAR_CALLSITESPLITTING_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class Function;

struct CallSiteSplittingPass : PassInfoMixin<CallSiteSplittingPass> {
  /// Run the pass over the function.
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
} // end namespace llvm

#endif // LLVM_TRANSFORMS_SCALAR_CALLSITESPLITTING_H
PKjwFZAl����-Transforms/Scalar/LoopAccessAnalysisPrinter.hnu�[���//===- llvm/Analysis/LoopAccessAnalysisPrinter.h ----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_LOOPACCESSANALYSISPRINTER_H
#define LLVM_TRANSFORMS_SCALAR_LOOPACCESSANALYSISPRINTER_H
#include "llvm/Analysis/LoopAnalysisManager.h"
#include "llvm/IR/PassManager.h"

namespace llvm {

class Function;
class raw_ostream;

/// Printer pass for the \c LoopAccessInfo results.
class LoopAccessInfoPrinterPass
    : public PassInfoMixin<LoopAccessInfoPrinterPass> {
  raw_ostream &OS;

public:
  explicit LoopAccessInfoPrinterPass(raw_ostream &OS) : OS(OS) {}
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

} // End llvm namespace

#endif
PKjwFZ����{{#Transforms/Scalar/LoopInterchange.hnu�[���//===- LoopInterchange.h - Loop interchange pass --------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_LOOPINTERCHANGE_H
#define LLVM_TRANSFORMS_SCALAR_LOOPINTERCHANGE_H

#include "llvm/Analysis/LoopAnalysisManager.h"
#include "llvm/IR/PassManager.h"

namespace llvm {

class LPMUpdater;
class LoopNest;

struct LoopInterchangePass : public PassInfoMixin<LoopInterchangePass> {
  PreservedAnalyses run(LoopNest &L, LoopAnalysisManager &AM,
                        LoopStandardAnalysisResults &AR, LPMUpdater &U);
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_SCALAR_LOOPINTERCHANGE_H
PKjwFZ�&/�88 Transforms/Scalar/LoopRotation.hnu�[���//===- LoopRotation.h - Loop Rotation -------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file provides the interface for the Loop Rotation pass.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_LOOPROTATION_H
#define LLVM_TRANSFORMS_SCALAR_LOOPROTATION_H

#include "llvm/Analysis/LoopAnalysisManager.h"
#include "llvm/IR/PassManager.h"

namespace llvm {
class LPMUpdater;
class Loop;

/// A simple loop rotation transformation.
class LoopRotatePass : public PassInfoMixin<LoopRotatePass> {
public:
  LoopRotatePass(bool EnableHeaderDuplication = true,
                 bool PrepareForLTO = false);
  PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
                        LoopStandardAnalysisResults &AR, LPMUpdater &U);

  void printPipeline(raw_ostream &OS,
                     function_ref<StringRef(StringRef)> MapClassName2PassName);

private:
  const bool EnableHeaderDuplication;
  const bool PrepareForLTO;
};
}

#endif // LLVM_TRANSFORMS_SCALAR_LOOPROTATION_H
PKjwFZo�\�)
)
,Transforms/Scalar/TailRecursionElimination.hnu�[���//===---- TailRecursionElimination.h ----------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file transforms calls of the current function (self recursion) followed
// by a return instruction with a branch to the entry of the function, creating
// a loop.  This pass also implements the following extensions to the basic
// algorithm:
//
//  1. Trivial instructions between the call and return do not prevent the
//     transformation from taking place, though currently the analysis cannot
//     support moving any really useful instructions (only dead ones).
//  2. This pass transforms functions that are prevented from being tail
//     recursive by an associative and commutative expression to use an
//     accumulator variable, thus compiling the typical naive factorial or
//     'fib' implementation into efficient code.
//  3. TRE is performed if the function returns void, if the return
//     returns the result returned by the call, or if the function returns a
//     run-time constant on all exits from the function.  It is possible, though
//     unlikely, that the return returns something else (like constant 0), and
//     can still be TRE'd.  It can be TRE'd if ALL OTHER return instructions in
//     the function return the exact same value.
//  4. If it can prove that callees do not access their caller stack frame,
//     they are marked as eligible for tail call elimination (by the code
//     generator).
//
// There are several improvements that could be made:
//
//  1. If the function has any alloca instructions, these instructions will be
//     moved out of the entry block of the function, causing them to be
//     evaluated each time through the tail recursion.  Safely keeping allocas
//     in the entry block requires analysis to proves that the tail-called
//     function does not read or write the stack object.
//  2. Tail recursion is only performed if the call immediately precedes the
//     return instruction.  It's possible that there could be a jump between
//     the call and the return.
//  3. There can be intervening operations between the call and the return that
//     prevent the TRE from occurring.  For example, there could be GEP's and
//     stores to memory that will not be read or written by the call.  This
//     requires some substantial analysis (such as with DSA) to prove safe to
//     move ahead of the call, but doing so could allow many more TREs to be
//     performed, for example in TreeAdd/TreeAlloc from the treeadd benchmark.
//  4. The algorithm we use to detect if callees access their caller stack
//     frames is very primitive.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_TAILRECURSIONELIMINATION_H
#define LLVM_TRANSFORMS_SCALAR_TAILRECURSIONELIMINATION_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class Function;

struct TailCallElimPass : PassInfoMixin<TailCallElimPass> {
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
}

#endif // LLVM_TRANSFORMS_SCALAR_TAILRECURSIONELIMINATION_H
PKjwFZ��c")Transforms/Scalar/ConstraintElimination.hnu�[���//===- ConstraintElimination.h - Constraint elimination pass ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_CONSTRAINTELIMINATION_H
#define LLVM_TRANSFORMS_SCALAR_CONSTRAINTELIMINATION_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class ConstraintEliminationPass
    : public PassInfoMixin<ConstraintEliminationPass> {
public:
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &);
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_SCALAR_CONSTRAINTELIMINATION_H
PKjwFZ*y�R�7�7Transforms/Scalar/GVN.hnu�[���//===- GVN.h - Eliminate redundant values and loads -------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// This file provides the interface for LLVM's Global Value Numbering pass
/// which eliminates fully redundant instructions. It also does somewhat Ad-Hoc
/// PRE and dead load elimination.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_GVN_H
#define LLVM_TRANSFORMS_SCALAR_GVN_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/PassManager.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Compiler.h"
#include <cstdint>
#include <optional>
#include <utility>
#include <vector>

namespace llvm {

class AAResults;
class AssumeInst;
class AssumptionCache;
class BasicBlock;
class BranchInst;
class CallInst;
class ExtractValueInst;
class Function;
class FunctionPass;
class GetElementPtrInst;
class ImplicitControlFlowTracking;
class LoadInst;
class LoopInfo;
class MemDepResult;
class MemoryDependenceResults;
class MemorySSA;
class MemorySSAUpdater;
class NonLocalDepResult;
class OptimizationRemarkEmitter;
class PHINode;
class TargetLibraryInfo;
class Value;
/// A private "module" namespace for types and utilities used by GVN. These
/// are implementation details and should not be used by clients.
namespace LLVM_LIBRARY_VISIBILITY gvn {

struct AvailableValue;
struct AvailableValueInBlock;
class GVNLegacyPass;

} // end namespace gvn

/// A set of parameters to control various transforms performed by GVN pass.
//  Each of the optional boolean parameters can be set to:
///      true - enabling the transformation.
///      false - disabling the transformation.
///      None - relying on a global default.
/// Intended use is to create a default object, modify parameters with
/// additional setters and then pass it to GVN.
struct GVNOptions {
  std::optional<bool> AllowPRE;
  std::optional<bool> AllowLoadPRE;
  std::optional<bool> AllowLoadInLoopPRE;
  std::optional<bool> AllowLoadPRESplitBackedge;
  std::optional<bool> AllowMemDep;

  GVNOptions() = default;

  /// Enables or disables PRE in GVN.
  GVNOptions &setPRE(bool PRE) {
    AllowPRE = PRE;
    return *this;
  }

  /// Enables or disables PRE of loads in GVN.
  GVNOptions &setLoadPRE(bool LoadPRE) {
    AllowLoadPRE = LoadPRE;
    return *this;
  }

  GVNOptions &setLoadInLoopPRE(bool LoadInLoopPRE) {
    AllowLoadInLoopPRE = LoadInLoopPRE;
    return *this;
  }

  /// Enables or disables PRE of loads in GVN.
  GVNOptions &setLoadPRESplitBackedge(bool LoadPRESplitBackedge) {
    AllowLoadPRESplitBackedge = LoadPRESplitBackedge;
    return *this;
  }

  /// Enables or disables use of MemDepAnalysis.
  GVNOptions &setMemDep(bool MemDep) {
    AllowMemDep = MemDep;
    return *this;
  }
};

/// The core GVN pass object.
///
/// FIXME: We should have a good summary of the GVN algorithm implemented by
/// this particular pass here.
class GVNPass : public PassInfoMixin<GVNPass> {
  GVNOptions Options;

public:
  struct Expression;

  GVNPass(GVNOptions Options = {}) : Options(Options) {}

  /// Run the pass over the function.
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);

  void printPipeline(raw_ostream &OS,
                     function_ref<StringRef(StringRef)> MapClassName2PassName);

  /// This removes the specified instruction from
  /// our various maps and marks it for deletion.
  void markInstructionForDeletion(Instruction *I) {
    VN.erase(I);
    InstrsToErase.push_back(I);
  }

  DominatorTree &getDominatorTree() const { return *DT; }
  AAResults *getAliasAnalysis() const { return VN.getAliasAnalysis(); }
  MemoryDependenceResults &getMemDep() const { return *MD; }

  bool isPREEnabled() const;
  bool isLoadPREEnabled() const;
  bool isLoadInLoopPREEnabled() const;
  bool isLoadPRESplitBackedgeEnabled() const;
  bool isMemDepEnabled() const;

  /// This class holds the mapping between values and value numbers.  It is used
  /// as an efficient mechanism to determine the expression-wise equivalence of
  /// two values.
  class ValueTable {
    DenseMap<Value *, uint32_t> valueNumbering;
    DenseMap<Expression, uint32_t> expressionNumbering;

    // Expressions is the vector of Expression. ExprIdx is the mapping from
    // value number to the index of Expression in Expressions. We use it
    // instead of a DenseMap because filling such mapping is faster than
    // filling a DenseMap and the compile time is a little better.
    uint32_t nextExprNumber = 0;

    std::vector<Expression> Expressions;
    std::vector<uint32_t> ExprIdx;

    // Value number to PHINode mapping. Used for phi-translate in scalarpre.
    DenseMap<uint32_t, PHINode *> NumberingPhi;

    // Cache for phi-translate in scalarpre.
    using PhiTranslateMap =
        DenseMap<std::pair<uint32_t, const BasicBlock *>, uint32_t>;
    PhiTranslateMap PhiTranslateTable;

    AAResults *AA = nullptr;
    MemoryDependenceResults *MD = nullptr;
    DominatorTree *DT = nullptr;

    uint32_t nextValueNumber = 1;

    Expression createExpr(Instruction *I);
    Expression createCmpExpr(unsigned Opcode, CmpInst::Predicate Predicate,
                             Value *LHS, Value *RHS);
    Expression createExtractvalueExpr(ExtractValueInst *EI);
    Expression createGEPExpr(GetElementPtrInst *GEP);
    uint32_t lookupOrAddCall(CallInst *C);
    uint32_t phiTranslateImpl(const BasicBlock *BB, const BasicBlock *PhiBlock,
                              uint32_t Num, GVNPass &Gvn);
    bool areCallValsEqual(uint32_t Num, uint32_t NewNum, const BasicBlock *Pred,
                          const BasicBlock *PhiBlock, GVNPass &Gvn);
    std::pair<uint32_t, bool> assignExpNewValueNum(Expression &exp);
    bool areAllValsInBB(uint32_t num, const BasicBlock *BB, GVNPass &Gvn);

  public:
    ValueTable();
    ValueTable(const ValueTable &Arg);
    ValueTable(ValueTable &&Arg);
    ~ValueTable();
    ValueTable &operator=(const ValueTable &Arg);

    uint32_t lookupOrAdd(Value *V);
    uint32_t lookup(Value *V, bool Verify = true) const;
    uint32_t lookupOrAddCmp(unsigned Opcode, CmpInst::Predicate Pred,
                            Value *LHS, Value *RHS);
    uint32_t phiTranslate(const BasicBlock *BB, const BasicBlock *PhiBlock,
                          uint32_t Num, GVNPass &Gvn);
    void eraseTranslateCacheEntry(uint32_t Num, const BasicBlock &CurrBlock);
    bool exists(Value *V) const;
    void add(Value *V, uint32_t num);
    void clear();
    void erase(Value *v);
    void setAliasAnalysis(AAResults *A) { AA = A; }
    AAResults *getAliasAnalysis() const { return AA; }
    void setMemDep(MemoryDependenceResults *M) { MD = M; }
    void setDomTree(DominatorTree *D) { DT = D; }
    uint32_t getNextUnusedValueNumber() { return nextValueNumber; }
    void verifyRemoved(const Value *) const;
  };

private:
  friend class gvn::GVNLegacyPass;
  friend struct DenseMapInfo<Expression>;

  MemoryDependenceResults *MD = nullptr;
  DominatorTree *DT = nullptr;
  const TargetLibraryInfo *TLI = nullptr;
  AssumptionCache *AC = nullptr;
  SetVector<BasicBlock *> DeadBlocks;
  OptimizationRemarkEmitter *ORE = nullptr;
  ImplicitControlFlowTracking *ICF = nullptr;
  LoopInfo *LI = nullptr;
  MemorySSAUpdater *MSSAU = nullptr;

  ValueTable VN;

  /// A mapping from value numbers to lists of Value*'s that
  /// have that value number.  Use findLeader to query it.
  struct LeaderTableEntry {
    Value *Val;
    const BasicBlock *BB;
    LeaderTableEntry *Next;
  };
  DenseMap<uint32_t, LeaderTableEntry> LeaderTable;
  BumpPtrAllocator TableAllocator;

  // Block-local map of equivalent values to their leader, does not
  // propagate to any successors. Entries added mid-block are applied
  // to the remaining instructions in the block.
  SmallMapVector<Value *, Value *, 4> ReplaceOperandsWithMap;
  SmallVector<Instruction *, 8> InstrsToErase;

  // Map the block to reversed postorder traversal number. It is used to
  // find back edge easily.
  DenseMap<AssertingVH<BasicBlock>, uint32_t> BlockRPONumber;

  // This is set 'true' initially and also when new blocks have been added to
  // the function being analyzed. This boolean is used to control the updating
  // of BlockRPONumber prior to accessing the contents of BlockRPONumber.
  bool InvalidBlockRPONumbers = true;

  using LoadDepVect = SmallVector<NonLocalDepResult, 64>;
  using AvailValInBlkVect = SmallVector<gvn::AvailableValueInBlock, 64>;
  using UnavailBlkVect = SmallVector<BasicBlock *, 64>;

  bool runImpl(Function &F, AssumptionCache &RunAC, DominatorTree &RunDT,
               const TargetLibraryInfo &RunTLI, AAResults &RunAA,
               MemoryDependenceResults *RunMD, LoopInfo *LI,
               OptimizationRemarkEmitter *ORE, MemorySSA *MSSA = nullptr);

  /// Push a new Value to the LeaderTable onto the list for its value number.
  void addToLeaderTable(uint32_t N, Value *V, const BasicBlock *BB) {
    LeaderTableEntry &Curr = LeaderTable[N];
    if (!Curr.Val) {
      Curr.Val = V;
      Curr.BB = BB;
      return;
    }

    LeaderTableEntry *Node = TableAllocator.Allocate<LeaderTableEntry>();
    Node->Val = V;
    Node->BB = BB;
    Node->Next = Curr.Next;
    Curr.Next = Node;
  }

  /// Scan the list of values corresponding to a given
  /// value number, and remove the given instruction if encountered.
  void removeFromLeaderTable(uint32_t N, Instruction *I, BasicBlock *BB) {
    LeaderTableEntry *Prev = nullptr;
    LeaderTableEntry *Curr = &LeaderTable[N];

    while (Curr && (Curr->Val != I || Curr->BB != BB)) {
      Prev = Curr;
      Curr = Curr->Next;
    }

    if (!Curr)
      return;

    if (Prev) {
      Prev->Next = Curr->Next;
    } else {
      if (!Curr->Next) {
        Curr->Val = nullptr;
        Curr->BB = nullptr;
      } else {
        LeaderTableEntry *Next = Curr->Next;
        Curr->Val = Next->Val;
        Curr->BB = Next->BB;
        Curr->Next = Next->Next;
      }
    }
  }

  // List of critical edges to be split between iterations.
  SmallVector<std::pair<Instruction *, unsigned>, 4> toSplit;

  // Helper functions of redundant load elimination
  bool processLoad(LoadInst *L);
  bool processNonLocalLoad(LoadInst *L);
  bool processAssumeIntrinsic(AssumeInst *II);

  /// Given a local dependency (Def or Clobber) determine if a value is
  /// available for the load.
  std::optional<gvn::AvailableValue>
  AnalyzeLoadAvailability(LoadInst *Load, MemDepResult DepInfo, Value *Address);

  /// Given a list of non-local dependencies, determine if a value is
  /// available for the load in each specified block.  If it is, add it to
  /// ValuesPerBlock.  If not, add it to UnavailableBlocks.
  void AnalyzeLoadAvailability(LoadInst *Load, LoadDepVect &Deps,
                               AvailValInBlkVect &ValuesPerBlock,
                               UnavailBlkVect &UnavailableBlocks);

  /// Given a critical edge from Pred to LoadBB, find a load instruction
  /// which is identical to Load from another successor of Pred.
  LoadInst *findLoadToHoistIntoPred(BasicBlock *Pred, BasicBlock *LoadBB,
                                    LoadInst *Load);

  bool PerformLoadPRE(LoadInst *Load, AvailValInBlkVect &ValuesPerBlock,
                      UnavailBlkVect &UnavailableBlocks);

  /// Try to replace a load which executes on each loop iteraiton with Phi
  /// translation of load in preheader and load(s) in conditionally executed
  /// paths.
  bool performLoopLoadPRE(LoadInst *Load, AvailValInBlkVect &ValuesPerBlock,
                          UnavailBlkVect &UnavailableBlocks);

  /// Eliminates partially redundant \p Load, replacing it with \p
  /// AvailableLoads (connected by Phis if needed).
  void eliminatePartiallyRedundantLoad(
      LoadInst *Load, AvailValInBlkVect &ValuesPerBlock,
      MapVector<BasicBlock *, Value *> &AvailableLoads,
      MapVector<BasicBlock *, LoadInst *> *CriticalEdgePredAndLoad);

  // Other helper routines
  bool processInstruction(Instruction *I);
  bool processBlock(BasicBlock *BB);
  void dump(DenseMap<uint32_t, Value *> &d) const;
  bool iterateOnFunction(Function &F);
  bool performPRE(Function &F);
  bool performScalarPRE(Instruction *I);
  bool performScalarPREInsertion(Instruction *Instr, BasicBlock *Pred,
                                 BasicBlock *Curr, unsigned int ValNo);
  Value *findLeader(const BasicBlock *BB, uint32_t num);
  void cleanupGlobalSets();
  void removeInstruction(Instruction *I);
  void verifyRemoved(const Instruction *I) const;
  bool splitCriticalEdges();
  BasicBlock *splitCriticalEdges(BasicBlock *Pred, BasicBlock *Succ);
  bool replaceOperandsForInBlockEquality(Instruction *I) const;
  bool propagateEquality(Value *LHS, Value *RHS, const BasicBlockEdge &Root,
                         bool DominatesByEdge);
  bool processFoldableCondBr(BranchInst *BI);
  void addDeadBlock(BasicBlock *BB);
  void assignValNumForDeadCode();
  void assignBlockRPONumber(Function &F);
};

/// Create a legacy GVN pass. This also allows parameterizing whether or not
/// MemDep is enabled.
FunctionPass *createGVNPass(bool NoMemDepAnalysis = false);

/// A simple and fast domtree-based GVN pass to hoist common expressions
/// from sibling branches.
struct GVNHoistPass : PassInfoMixin<GVNHoistPass> {
  /// Run the pass over the function.
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

/// Uses an "inverted" value numbering to decide the similarity of
/// expressions and sinks similar expressions into successors.
struct GVNSinkPass : PassInfoMixin<GVNSinkPass> {
  /// Run the pass over the function.
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_SCALAR_GVN_H
PKjwFZ�x,�5[5[#Transforms/Scalar/LoopPassManager.hnu�[���//===- LoopPassManager.h - Loop pass management -----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This header provides classes for managing a pipeline of passes over loops
/// in LLVM IR.
///
/// The primary loop pass pipeline is managed in a very particular way to
/// provide a set of core guarantees:
/// 1) Loops are, where possible, in simplified form.
/// 2) Loops are *always* in LCSSA form.
/// 3) A collection of Loop-specific analysis results are available:
///    - LoopInfo
///    - DominatorTree
///    - ScalarEvolution
///    - AAManager
/// 4) All loop passes preserve #1 (where possible), #2, and #3.
/// 5) Loop passes run over each loop in the loop nest from the innermost to
///    the outermost. Specifically, all inner loops are processed before
///    passes run over outer loops. When running the pipeline across an inner
///    loop creates new inner loops, those are added and processed in this
///    order as well.
///
/// This process is designed to facilitate transformations which simplify,
/// reduce, and remove loops. For passes which are more oriented towards
/// optimizing loops, especially optimizing loop *nests* instead of single
/// loops in isolation, this framework is less interesting.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_LOOPPASSMANAGER_H
#define LLVM_TRANSFORMS_SCALAR_LOOPPASSMANAGER_H

#include "llvm/ADT/PriorityWorklist.h"
#include "llvm/Analysis/LoopAnalysisManager.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/LoopNestAnalysis.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Transforms/Utils/LCSSA.h"
#include "llvm/Transforms/Utils/LoopSimplify.h"
#include "llvm/Transforms/Utils/LoopUtils.h"
#include <memory>

namespace llvm {

// Forward declarations of an update tracking API used in the pass manager.
class LPMUpdater;
class PassInstrumentation;

namespace {

template <typename PassT>
using HasRunOnLoopT = decltype(std::declval<PassT>().run(
    std::declval<Loop &>(), std::declval<LoopAnalysisManager &>(),
    std::declval<LoopStandardAnalysisResults &>(),
    std::declval<LPMUpdater &>()));

} // namespace

// Explicit specialization and instantiation declarations for the pass manager.
// See the comments on the definition of the specialization for details on how
// it differs from the primary template.
template <>
class PassManager<Loop, LoopAnalysisManager, LoopStandardAnalysisResults &,
                  LPMUpdater &>
    : public PassInfoMixin<
          PassManager<Loop, LoopAnalysisManager, LoopStandardAnalysisResults &,
                      LPMUpdater &>> {
public:
  explicit PassManager() = default;

  // FIXME: These are equivalent to the default move constructor/move
  // assignment. However, using = default triggers linker errors due to the
  // explicit instantiations below. Find a way to use the default and remove the
  // duplicated code here.
  PassManager(PassManager &&Arg)
      : IsLoopNestPass(std::move(Arg.IsLoopNestPass)),
        LoopPasses(std::move(Arg.LoopPasses)),
        LoopNestPasses(std::move(Arg.LoopNestPasses)) {}

  PassManager &operator=(PassManager &&RHS) {
    IsLoopNestPass = std::move(RHS.IsLoopNestPass);
    LoopPasses = std::move(RHS.LoopPasses);
    LoopNestPasses = std::move(RHS.LoopNestPasses);
    return *this;
  }

  PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
                        LoopStandardAnalysisResults &AR, LPMUpdater &U);

  void printPipeline(raw_ostream &OS,
                     function_ref<StringRef(StringRef)> MapClassName2PassName);
  /// Add either a loop pass or a loop-nest pass to the pass manager. Append \p
  /// Pass to the list of loop passes if it has a dedicated \fn run() method for
  /// loops and to the list of loop-nest passes if the \fn run() method is for
  /// loop-nests instead. Also append whether \p Pass is loop-nest pass or not
  /// to the end of \var IsLoopNestPass so we can easily identify the types of
  /// passes in the pass manager later.
  template <typename PassT>
  LLVM_ATTRIBUTE_MINSIZE
      std::enable_if_t<is_detected<HasRunOnLoopT, PassT>::value>
      addPass(PassT &&Pass) {
    using LoopPassModelT =
        detail::PassModel<Loop, PassT, PreservedAnalyses, LoopAnalysisManager,
                          LoopStandardAnalysisResults &, LPMUpdater &>;
    IsLoopNestPass.push_back(false);
    // Do not use make_unique or emplace_back, they cause too many template
    // instantiations, causing terrible compile times.
    LoopPasses.push_back(std::unique_ptr<LoopPassConceptT>(
        new LoopPassModelT(std::forward<PassT>(Pass))));
  }

  template <typename PassT>
  LLVM_ATTRIBUTE_MINSIZE
      std::enable_if_t<!is_detected<HasRunOnLoopT, PassT>::value>
      addPass(PassT &&Pass) {
    using LoopNestPassModelT =
        detail::PassModel<LoopNest, PassT, PreservedAnalyses,
                          LoopAnalysisManager, LoopStandardAnalysisResults &,
                          LPMUpdater &>;
    IsLoopNestPass.push_back(true);
    // Do not use make_unique or emplace_back, they cause too many template
    // instantiations, causing terrible compile times.
    LoopNestPasses.push_back(std::unique_ptr<LoopNestPassConceptT>(
        new LoopNestPassModelT(std::forward<PassT>(Pass))));
  }

  // Specializations of `addPass` for `RepeatedPass`. These are necessary since
  // `RepeatedPass` has a templated `run` method that will result in incorrect
  // detection of `HasRunOnLoopT`.
  template <typename PassT>
  LLVM_ATTRIBUTE_MINSIZE
      std::enable_if_t<is_detected<HasRunOnLoopT, PassT>::value>
      addPass(RepeatedPass<PassT> &&Pass) {
    using RepeatedLoopPassModelT =
        detail::PassModel<Loop, RepeatedPass<PassT>, PreservedAnalyses,
                          LoopAnalysisManager, LoopStandardAnalysisResults &,
                          LPMUpdater &>;
    IsLoopNestPass.push_back(false);
    // Do not use make_unique or emplace_back, they cause too many template
    // instantiations, causing terrible compile times.
    LoopPasses.push_back(std::unique_ptr<LoopPassConceptT>(
        new RepeatedLoopPassModelT(std::move(Pass))));
  }

  template <typename PassT>
  LLVM_ATTRIBUTE_MINSIZE
      std::enable_if_t<!is_detected<HasRunOnLoopT, PassT>::value>
      addPass(RepeatedPass<PassT> &&Pass) {
    using RepeatedLoopNestPassModelT =
        detail::PassModel<LoopNest, RepeatedPass<PassT>, PreservedAnalyses,
                          LoopAnalysisManager, LoopStandardAnalysisResults &,
                          LPMUpdater &>;
    IsLoopNestPass.push_back(true);
    // Do not use make_unique or emplace_back, they cause too many template
    // instantiations, causing terrible compile times.
    LoopNestPasses.push_back(std::unique_ptr<LoopNestPassConceptT>(
        new RepeatedLoopNestPassModelT(std::move(Pass))));
  }

  bool isEmpty() const { return LoopPasses.empty() && LoopNestPasses.empty(); }

  static bool isRequired() { return true; }

  size_t getNumLoopPasses() const { return LoopPasses.size(); }
  size_t getNumLoopNestPasses() const { return LoopNestPasses.size(); }

protected:
  using LoopPassConceptT =
      detail::PassConcept<Loop, LoopAnalysisManager,
                          LoopStandardAnalysisResults &, LPMUpdater &>;
  using LoopNestPassConceptT =
      detail::PassConcept<LoopNest, LoopAnalysisManager,
                          LoopStandardAnalysisResults &, LPMUpdater &>;

  // BitVector that identifies whether the passes are loop passes or loop-nest
  // passes (true for loop-nest passes).
  BitVector IsLoopNestPass;
  std::vector<std::unique_ptr<LoopPassConceptT>> LoopPasses;
  std::vector<std::unique_ptr<LoopNestPassConceptT>> LoopNestPasses;

  /// Run either a loop pass or a loop-nest pass. Returns `std::nullopt` if
  /// PassInstrumentation's BeforePass returns false. Otherwise, returns the
  /// preserved analyses of the pass.
  template <typename IRUnitT, typename PassT>
  std::optional<PreservedAnalyses>
  runSinglePass(IRUnitT &IR, PassT &Pass, LoopAnalysisManager &AM,
                LoopStandardAnalysisResults &AR, LPMUpdater &U,
                PassInstrumentation &PI);

  PreservedAnalyses runWithLoopNestPasses(Loop &L, LoopAnalysisManager &AM,
                                          LoopStandardAnalysisResults &AR,
                                          LPMUpdater &U);
  PreservedAnalyses runWithoutLoopNestPasses(Loop &L, LoopAnalysisManager &AM,
                                             LoopStandardAnalysisResults &AR,
                                             LPMUpdater &U);

private:
  static const Loop &getLoopFromIR(Loop &L) { return L; }
  static const Loop &getLoopFromIR(LoopNest &LN) {
    return LN.getOutermostLoop();
  }
};

/// The Loop pass manager.
///
/// See the documentation for the PassManager template for details. It runs
/// a sequence of Loop passes over each Loop that the manager is run over. This
/// typedef serves as a convenient way to refer to this construct.
typedef PassManager<Loop, LoopAnalysisManager, LoopStandardAnalysisResults &,
                    LPMUpdater &>
    LoopPassManager;

/// A partial specialization of the require analysis template pass to forward
/// the extra parameters from a transformation's run method to the
/// AnalysisManager's getResult.
template <typename AnalysisT>
struct RequireAnalysisPass<AnalysisT, Loop, LoopAnalysisManager,
                           LoopStandardAnalysisResults &, LPMUpdater &>
    : PassInfoMixin<
          RequireAnalysisPass<AnalysisT, Loop, LoopAnalysisManager,
                              LoopStandardAnalysisResults &, LPMUpdater &>> {
  PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
                        LoopStandardAnalysisResults &AR, LPMUpdater &) {
    (void)AM.template getResult<AnalysisT>(L, AR);
    return PreservedAnalyses::all();
  }
  void printPipeline(raw_ostream &OS,
                     function_ref<StringRef(StringRef)> MapClassName2PassName) {
    auto ClassName = AnalysisT::name();
    auto PassName = MapClassName2PassName(ClassName);
    OS << "require<" << PassName << '>';
  }
};

/// An alias template to easily name a require analysis loop pass.
template <typename AnalysisT>
using RequireAnalysisLoopPass =
    RequireAnalysisPass<AnalysisT, Loop, LoopAnalysisManager,
                        LoopStandardAnalysisResults &, LPMUpdater &>;

class FunctionToLoopPassAdaptor;

/// This class provides an interface for updating the loop pass manager based
/// on mutations to the loop nest.
///
/// A reference to an instance of this class is passed as an argument to each
/// Loop pass, and Loop passes should use it to update LPM infrastructure if
/// they modify the loop nest structure.
///
/// \c LPMUpdater comes with two modes: the loop mode and the loop-nest mode. In
/// loop mode, all the loops in the function will be pushed into the worklist
/// and when new loops are added to the pipeline, their subloops are also
/// inserted recursively. On the other hand, in loop-nest mode, only top-level
/// loops are contained in the worklist and the addition of new (top-level)
/// loops will not trigger the addition of their subloops.
class LPMUpdater {
public:
  /// This can be queried by loop passes which run other loop passes (like pass
  /// managers) to know whether the loop needs to be skipped due to updates to
  /// the loop nest.
  ///
  /// If this returns true, the loop object may have been deleted, so passes
  /// should take care not to touch the object.
  bool skipCurrentLoop() const { return SkipCurrentLoop; }

  /// Loop passes should use this method to indicate they have deleted a loop
  /// from the nest.
  ///
  /// Note that this loop must either be the current loop or a subloop of the
  /// current loop. This routine must be called prior to removing the loop from
  /// the loop nest.
  ///
  /// If this is called for the current loop, in addition to clearing any
  /// state, this routine will mark that the current loop should be skipped by
  /// the rest of the pass management infrastructure.
  void markLoopAsDeleted(Loop &L, llvm::StringRef Name) {
    LAM.clear(L, Name);
    assert((&L == CurrentL || CurrentL->contains(&L)) &&
           "Cannot delete a loop outside of the "
           "subloop tree currently being processed.");
    if (&L == CurrentL)
      SkipCurrentLoop = true;
  }

  void setParentLoop(Loop *L) {
#ifdef LLVM_ENABLE_ABI_BREAKING_CHECKS
    ParentL = L;
#endif
  }

  /// Loop passes should use this method to indicate they have added new child
  /// loops of the current loop.
  ///
  /// \p NewChildLoops must contain only the immediate children. Any nested
  /// loops within them will be visited in postorder as usual for the loop pass
  /// manager.
  void addChildLoops(ArrayRef<Loop *> NewChildLoops) {
    assert(!LoopNestMode &&
           "Child loops should not be pushed in loop-nest mode.");
    // Insert ourselves back into the worklist first, as this loop should be
    // revisited after all the children have been processed.
    Worklist.insert(CurrentL);

#ifndef NDEBUG
    for (Loop *NewL : NewChildLoops)
      assert(NewL->getParentLoop() == CurrentL && "All of the new loops must "
                                                  "be immediate children of "
                                                  "the current loop!");
#endif

    appendLoopsToWorklist(NewChildLoops, Worklist);

    // Also skip further processing of the current loop--it will be revisited
    // after all of its newly added children are accounted for.
    SkipCurrentLoop = true;
  }

  /// Loop passes should use this method to indicate they have added new
  /// sibling loops to the current loop.
  ///
  /// \p NewSibLoops must only contain the immediate sibling loops. Any nested
  /// loops within them will be visited in postorder as usual for the loop pass
  /// manager.
  void addSiblingLoops(ArrayRef<Loop *> NewSibLoops) {
#if defined(LLVM_ENABLE_ABI_BREAKING_CHECKS) && !defined(NDEBUG)
    for (Loop *NewL : NewSibLoops)
      assert(NewL->getParentLoop() == ParentL &&
             "All of the new loops must be siblings of the current loop!");
#endif

    if (LoopNestMode)
      Worklist.insert(NewSibLoops);
    else
      appendLoopsToWorklist(NewSibLoops, Worklist);

    // No need to skip the current loop or revisit it, as sibling loops
    // shouldn't impact anything.
  }

  /// Restart the current loop.
  ///
  /// Loop passes should call this method to indicate the current loop has been
  /// sufficiently changed that it should be re-visited from the begining of
  /// the loop pass pipeline rather than continuing.
  void revisitCurrentLoop() {
    // Tell the currently in-flight pipeline to stop running.
    SkipCurrentLoop = true;

    // And insert ourselves back into the worklist.
    Worklist.insert(CurrentL);
  }

  bool isLoopNestChanged() const {
    return LoopNestChanged;
  }

  /// Loopnest passes should use this method to indicate if the
  /// loopnest has been modified.
  void markLoopNestChanged(bool Changed) {
    LoopNestChanged = Changed;
  }

private:
  friend class llvm::FunctionToLoopPassAdaptor;

  /// The \c FunctionToLoopPassAdaptor's worklist of loops to process.
  SmallPriorityWorklist<Loop *, 4> &Worklist;

  /// The analysis manager for use in the current loop nest.
  LoopAnalysisManager &LAM;

  Loop *CurrentL;
  bool SkipCurrentLoop;
  const bool LoopNestMode;
  bool LoopNestChanged;

#ifdef LLVM_ENABLE_ABI_BREAKING_CHECKS
  // In debug builds we also track the parent loop to implement asserts even in
  // the face of loop deletion.
  Loop *ParentL;
#endif

  LPMUpdater(SmallPriorityWorklist<Loop *, 4> &Worklist,
             LoopAnalysisManager &LAM, bool LoopNestMode = false,
             bool LoopNestChanged = false)
      : Worklist(Worklist), LAM(LAM), LoopNestMode(LoopNestMode),
        LoopNestChanged(LoopNestChanged) {}
};

template <typename IRUnitT, typename PassT>
std::optional<PreservedAnalyses> LoopPassManager::runSinglePass(
    IRUnitT &IR, PassT &Pass, LoopAnalysisManager &AM,
    LoopStandardAnalysisResults &AR, LPMUpdater &U, PassInstrumentation &PI) {
  // Get the loop in case of Loop pass and outermost loop in case of LoopNest
  // pass which is to be passed to BeforePass and AfterPass call backs.
  const Loop &L = getLoopFromIR(IR);
  // Check the PassInstrumentation's BeforePass callbacks before running the
  // pass, skip its execution completely if asked to (callback returns false).
  if (!PI.runBeforePass<Loop>(*Pass, L))
    return std::nullopt;

  PreservedAnalyses PA = Pass->run(IR, AM, AR, U);

  // do not pass deleted Loop into the instrumentation
  if (U.skipCurrentLoop())
    PI.runAfterPassInvalidated<IRUnitT>(*Pass, PA);
  else
    PI.runAfterPass<Loop>(*Pass, L, PA);
  return PA;
}

/// Adaptor that maps from a function to its loops.
///
/// Designed to allow composition of a LoopPass(Manager) and a
/// FunctionPassManager. Note that if this pass is constructed with a \c
/// FunctionAnalysisManager it will run the \c LoopAnalysisManagerFunctionProxy
/// analysis prior to running the loop passes over the function to enable a \c
/// LoopAnalysisManager to be used within this run safely.
///
/// The adaptor comes with two modes: the loop mode and the loop-nest mode, and
/// the worklist updater lived inside will be in the same mode as the adaptor
/// (refer to the documentation of \c LPMUpdater for more detailed explanation).
/// Specifically, in loop mode, all loops in the function will be pushed into
/// the worklist and processed by \p Pass, while only top-level loops are
/// processed in loop-nest mode. Please refer to the various specializations of
/// \fn createLoopFunctionToLoopPassAdaptor to see when loop mode and loop-nest
/// mode are used.
class FunctionToLoopPassAdaptor
    : public PassInfoMixin<FunctionToLoopPassAdaptor> {
public:
  using PassConceptT =
      detail::PassConcept<Loop, LoopAnalysisManager,
                          LoopStandardAnalysisResults &, LPMUpdater &>;

  explicit FunctionToLoopPassAdaptor(std::unique_ptr<PassConceptT> Pass,
                                     bool UseMemorySSA = false,
                                     bool UseBlockFrequencyInfo = false,
                                     bool UseBranchProbabilityInfo = false,
                                     bool LoopNestMode = false)
      : Pass(std::move(Pass)), UseMemorySSA(UseMemorySSA),
        UseBlockFrequencyInfo(UseBlockFrequencyInfo),
        UseBranchProbabilityInfo(UseBranchProbabilityInfo),
        LoopNestMode(LoopNestMode) {
    LoopCanonicalizationFPM.addPass(LoopSimplifyPass());
    LoopCanonicalizationFPM.addPass(LCSSAPass());
  }

  /// Runs the loop passes across every loop in the function.
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
  void printPipeline(raw_ostream &OS,
                     function_ref<StringRef(StringRef)> MapClassName2PassName);

  static bool isRequired() { return true; }

  bool isLoopNestMode() const { return LoopNestMode; }

private:
  std::unique_ptr<PassConceptT> Pass;

  FunctionPassManager LoopCanonicalizationFPM;

  bool UseMemorySSA = false;
  bool UseBlockFrequencyInfo = false;
  bool UseBranchProbabilityInfo = false;
  const bool LoopNestMode;
};

/// A function to deduce a loop pass type and wrap it in the templated
/// adaptor.
///
/// If \p Pass is a loop pass, the returned adaptor will be in loop mode.
template <typename LoopPassT>
inline std::enable_if_t<is_detected<HasRunOnLoopT, LoopPassT>::value,
                        FunctionToLoopPassAdaptor>
createFunctionToLoopPassAdaptor(LoopPassT &&Pass, bool UseMemorySSA = false,
                                bool UseBlockFrequencyInfo = false,
                                bool UseBranchProbabilityInfo = false) {
  using PassModelT =
      detail::PassModel<Loop, LoopPassT, PreservedAnalyses, LoopAnalysisManager,
                        LoopStandardAnalysisResults &, LPMUpdater &>;
  // Do not use make_unique, it causes too many template instantiations,
  // causing terrible compile times.
  return FunctionToLoopPassAdaptor(
      std::unique_ptr<FunctionToLoopPassAdaptor::PassConceptT>(
          new PassModelT(std::forward<LoopPassT>(Pass))),
      UseMemorySSA, UseBlockFrequencyInfo, UseBranchProbabilityInfo, false);
}

/// If \p Pass is a loop-nest pass, \p Pass will first be wrapped into a
/// \c LoopPassManager and the returned adaptor will be in loop-nest mode.
template <typename LoopNestPassT>
inline std::enable_if_t<!is_detected<HasRunOnLoopT, LoopNestPassT>::value,
                        FunctionToLoopPassAdaptor>
createFunctionToLoopPassAdaptor(LoopNestPassT &&Pass, bool UseMemorySSA = false,
                                bool UseBlockFrequencyInfo = false,
                                bool UseBranchProbabilityInfo = false) {
  LoopPassManager LPM;
  LPM.addPass(std::forward<LoopNestPassT>(Pass));
  using PassModelT =
      detail::PassModel<Loop, LoopPassManager, PreservedAnalyses,
                        LoopAnalysisManager, LoopStandardAnalysisResults &,
                        LPMUpdater &>;
  // Do not use make_unique, it causes too many template instantiations,
  // causing terrible compile times.
  return FunctionToLoopPassAdaptor(
      std::unique_ptr<FunctionToLoopPassAdaptor::PassConceptT>(
          new PassModelT(std::move(LPM))),
      UseMemorySSA, UseBlockFrequencyInfo, UseBranchProbabilityInfo, true);
}

/// If \p Pass is an instance of \c LoopPassManager, the returned adaptor will
/// be in loop-nest mode if the pass manager contains only loop-nest passes.
template <>
inline FunctionToLoopPassAdaptor
createFunctionToLoopPassAdaptor<LoopPassManager>(
    LoopPassManager &&LPM, bool UseMemorySSA, bool UseBlockFrequencyInfo,
    bool UseBranchProbabilityInfo) {
  // Check if LPM contains any loop pass and if it does not, returns an adaptor
  // in loop-nest mode.
  using PassModelT =
      detail::PassModel<Loop, LoopPassManager, PreservedAnalyses,
                        LoopAnalysisManager, LoopStandardAnalysisResults &,
                        LPMUpdater &>;
  bool LoopNestMode = (LPM.getNumLoopPasses() == 0);
  // Do not use make_unique, it causes too many template instantiations,
  // causing terrible compile times.
  return FunctionToLoopPassAdaptor(
      std::unique_ptr<FunctionToLoopPassAdaptor::PassConceptT>(
          new PassModelT(std::move(LPM))),
      UseMemorySSA, UseBlockFrequencyInfo, UseBranchProbabilityInfo,
      LoopNestMode);
}

/// Pass for printing a loop's contents as textual IR.
class PrintLoopPass : public PassInfoMixin<PrintLoopPass> {
  raw_ostream &OS;
  std::string Banner;

public:
  PrintLoopPass();
  PrintLoopPass(raw_ostream &OS, const std::string &Banner = "");

  PreservedAnalyses run(Loop &L, LoopAnalysisManager &,
                        LoopStandardAnalysisResults &, LPMUpdater &);
};
}

#endif // LLVM_TRANSFORMS_SCALAR_LOOPPASSMANAGER_H
PKjwFZ���LL#Transforms/Scalar/MemCpyOptimizer.hnu�[���//===- MemCpyOptimizer.h - memcpy optimization ------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This pass performs various transformations related to eliminating memcpy
// calls, or transforming sets of stores into memset's.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_MEMCPYOPTIMIZER_H
#define LLVM_TRANSFORMS_SCALAR_MEMCPYOPTIMIZER_H

#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/PassManager.h"

namespace llvm {

class AAResults;
class BatchAAResults;
class AssumptionCache;
class CallBase;
class CallInst;
class DominatorTree;
class Function;
class Instruction;
class LoadInst;
class MemCpyInst;
class MemMoveInst;
class MemorySSA;
class MemorySSAUpdater;
class MemSetInst;
class StoreInst;
class TargetLibraryInfo;
class Value;

class MemCpyOptPass : public PassInfoMixin<MemCpyOptPass> {
  TargetLibraryInfo *TLI = nullptr;
  AAResults *AA = nullptr;
  AssumptionCache *AC = nullptr;
  DominatorTree *DT = nullptr;
  MemorySSA *MSSA = nullptr;
  MemorySSAUpdater *MSSAU = nullptr;

public:
  MemCpyOptPass() = default;

  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);

  // Glue for the old PM.
  bool runImpl(Function &F, TargetLibraryInfo *TLI, AAResults *AA,
               AssumptionCache *AC, DominatorTree *DT, MemorySSA *MSSA);

private:
  // Helper functions
  bool processStore(StoreInst *SI, BasicBlock::iterator &BBI);
  bool processStoreOfLoad(StoreInst *SI, LoadInst *LI, const DataLayout &DL,
                          BasicBlock::iterator &BBI);
  bool processMemSet(MemSetInst *SI, BasicBlock::iterator &BBI);
  bool processMemCpy(MemCpyInst *M, BasicBlock::iterator &BBI);
  bool processMemMove(MemMoveInst *M);
  bool performCallSlotOptzn(Instruction *cpyLoad, Instruction *cpyStore,
                            Value *cpyDst, Value *cpySrc, TypeSize cpyLen,
                            Align cpyAlign, BatchAAResults &BAA,
                            std::function<CallInst *()> GetC);
  bool processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep,
                                     BatchAAResults &BAA);
  bool processMemSetMemCpyDependence(MemCpyInst *MemCpy, MemSetInst *MemSet,
                                     BatchAAResults &BAA);
  bool performMemCpyToMemSetOptzn(MemCpyInst *MemCpy, MemSetInst *MemSet,
                                  BatchAAResults &BAA);
  bool processByValArgument(CallBase &CB, unsigned ArgNo);
  bool processImmutArgument(CallBase &CB, unsigned ArgNo);
  Instruction *tryMergingIntoMemset(Instruction *I, Value *StartPtr,
                                    Value *ByteVal);
  bool moveUp(StoreInst *SI, Instruction *P, const LoadInst *LI);

  void eraseInstruction(Instruction *I);
  bool iterateOnFunction(Function &F);
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_SCALAR_MEMCPYOPTIMIZER_H
PKjwFZ�p��(Transforms/Scalar/SpeculativeExecution.hnu�[���//===- SpeculativeExecution.h -----------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This pass hoists instructions to enable speculative execution on
// targets where branches are expensive. This is aimed at GPUs. It
// currently works on simple if-then and if-then-else
// patterns.
//
// Removing branches is not the only motivation for this
// pass. E.g. consider this code and assume that there is no
// addressing mode for multiplying by sizeof(*a):
//
//   if (b > 0)
//     c = a[i + 1]
//   if (d > 0)
//     e = a[i + 2]
//
// turns into
//
//   p = &a[i + 1];
//   if (b > 0)
//     c = *p;
//   q = &a[i + 2];
//   if (d > 0)
//     e = *q;
//
// which could later be optimized to
//
//   r = &a[i];
//   if (b > 0)
//     c = r[1];
//   if (d > 0)
//     e = r[2];
//
// Later passes sink back much of the speculated code that did not enable
// further optimization.
//
// This pass is more aggressive than the function SpeculativeyExecuteBB in
// SimplifyCFG. SimplifyCFG will not speculate if no selects are introduced and
// it will speculate at most one instruction. It also will not speculate if
// there is a value defined in the if-block that is only used in the then-block.
// These restrictions make sense since the speculation in SimplifyCFG seems
// aimed at introducing cheap selects, while this pass is intended to do more
// aggressive speculation while counting on later passes to either capitalize on
// that or clean it up.
//
// If the pass was created by calling
// createSpeculativeExecutionIfHasBranchDivergencePass or the
// -spec-exec-only-if-divergent-target option is present, this pass only has an
// effect on targets where TargetTransformInfo::hasBranchDivergence() is true;
// on other targets, it is a nop.
//
// This lets you include this pass unconditionally in the IR pass pipeline, but
// only enable it for relevant targets.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_TRANSFORMS_SCALAR_SPECULATIVEEXECUTION_H
#define LLVM_TRANSFORMS_SCALAR_SPECULATIVEEXECUTION_H

#include "llvm/IR/PassManager.h"

namespace llvm {
class TargetTransformInfo;
class SpeculativeExecutionPass
    : public PassInfoMixin<SpeculativeExecutionPass> {
public:
  SpeculativeExecutionPass(bool OnlyIfDivergentTarget = false);

  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);

  // Glue for old PM
  bool runImpl(Function &F, TargetTransformInfo *TTI);

private:
  bool runOnBasicBlock(BasicBlock &B);
  bool considerHoistingFromTo(BasicBlock &FromBlock, BasicBlock &ToBlock);

  // If true, this pass is a nop unless the target architecture has branch
  // divergence.
  const bool OnlyIfDivergentTarget = false;

  TargetTransformInfo *TTI = nullptr;
};
}

#endif // LLVM_TRANSFORMS_SCALAR_SPECULATIVEEXECUTION_H
PKjwFZC�|�88&Transforms/Scalar/SimpleLoopUnswitch.hnu�[���//===- SimpleLoopUnswitch.h - Hoist loop-invariant control flow -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_SIMPLELOOPUNSWITCH_H
#define LLVM_TRANSFORMS_SCALAR_SIMPLELOOPUNSWITCH_H

#include "llvm/ADT/STLFunctionalExtras.h"
#include "llvm/Analysis/LoopAnalysisManager.h"
#include "llvm/IR/PassManager.h"

namespace llvm {

class LPMUpdater;
class Loop;
class Pass;
class StringRef;
class raw_ostream;

/// This pass transforms loops that contain branches or switches on loop-
/// invariant conditions to have multiple loops. For example, it turns the left
/// into the right code:
///
///  for (...)                  if (lic)
///    A                          for (...)
///    if (lic)                     A; B; C
///      B                      else
///    C                          for (...)
///                                 A; C
///
/// This can increase the size of the code exponentially (doubling it every time
/// a loop is unswitched) so we only unswitch if the resultant code will be
/// smaller than a threshold.
///
/// This pass expects LICM to be run before it to hoist invariant conditions out
/// of the loop, to make the unswitching opportunity obvious.
///
/// There is a taxonomy of unswitching that we use to classify different forms
/// of this transformaiton:
///
/// - Trival unswitching: this is when the condition can be unswitched without
///   cloning any code from inside the loop. A non-trivial unswitch requires
///   code duplication.
///
/// - Full unswitching: this is when the branch or switch is completely moved
///   from inside the loop to outside the loop. Partial unswitching removes the
///   branch from the clone of the loop but must leave a (somewhat simplified)
///   branch in the original loop. While theoretically partial unswitching can
///   be done for switches, the requirements are extreme - we need the loop
///   invariant input to the switch to be sufficient to collapse to a single
///   successor in each clone.
///
/// This pass always does trivial, full unswitching for both branches and
/// switches. For branches, it also always does trivial, partial unswitching.
///
/// If enabled (via the constructor's `NonTrivial` parameter), this pass will
/// additionally do non-trivial, full unswitching for branches and switches, and
/// will do non-trivial, partial unswitching for branches.
///
/// Because partial unswitching of switches is extremely unlikely to be possible
/// in practice and significantly complicates the implementation, this pass does
/// not currently implement that in any mode.
class SimpleLoopUnswitchPass : public PassInfoMixin<SimpleLoopUnswitchPass> {
  bool NonTrivial;
  bool Trivial;

public:
  SimpleLoopUnswitchPass(bool NonTrivial = false, bool Trivial = true)
      : NonTrivial(NonTrivial), Trivial(Trivial) {}

  PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
                        LoopStandardAnalysisResults &AR, LPMUpdater &U);

  void printPipeline(raw_ostream &OS,
                     function_ref<StringRef(StringRef)> MapClassName2PassName);
};

/// Create the legacy pass object for the simple loop unswitcher.
///
/// See the documentaion for `SimpleLoopUnswitchPass` for details.
Pass *createSimpleLoopUnswitchLegacyPass(bool NonTrivial = false);

} // end namespace llvm

#endif // LLVM_TRANSFORMS_SCALAR_SIMPLELOOPUNSWITCH_H
PKjwFZZ����"Transforms/Scalar/IndVarSimplify.hnu�[���//===- IndVarSimplify.h - Induction Variable Simplification -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file provides the interface for the Induction Variable
// Simplification pass.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_INDVARSIMPLIFY_H
#define LLVM_TRANSFORMS_SCALAR_INDVARSIMPLIFY_H

#include "llvm/Analysis/LoopAnalysisManager.h"
#include "llvm/IR/PassManager.h"

namespace llvm {

class Loop;
class LPMUpdater;

class IndVarSimplifyPass : public PassInfoMixin<IndVarSimplifyPass> {
  /// Perform IV widening during the pass.
  bool WidenIndVars;

public:
  IndVarSimplifyPass(bool WidenIndVars = true) : WidenIndVars(WidenIndVars) {}
  PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
                        LoopStandardAnalysisResults &AR, LPMUpdater &U);
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_SCALAR_INDVARSIMPLIFY_H
PKjwFZ��Vh��Transforms/Scalar/NewGVN.hnu�[���//===- NewGVN.h - Global Value Numbering Pass -------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file
/// This file provides the interface for LLVM's Global Value Numbering pass.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_NEWGVN_H
#define LLVM_TRANSFORMS_SCALAR_NEWGVN_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class Function;

class NewGVNPass : public PassInfoMixin<NewGVNPass> {
public:
  /// Run the pass over the function.
  PreservedAnalyses run(Function &F, AnalysisManager<Function> &AM);
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_SCALAR_NEWGVN_H

PKjwFZ?�}*Transforms/Scalar/EarlyCSE.hnu�[���//===- EarlyCSE.h - Simple and fast CSE pass --------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file
/// This file provides the interface for a simple, fast CSE pass.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_EARLYCSE_H
#define LLVM_TRANSFORMS_SCALAR_EARLYCSE_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class Function;

/// A simple and fast domtree-based CSE pass.
///
/// This pass does a simple depth-first walk over the dominator tree,
/// eliminating trivially redundant instructions and using instsimplify to
/// canonicalize things as it goes. It is intended to be fast and catch obvious
/// cases so that instcombine and other passes are more effective. It is
/// expected that a later pass of GVN will catch the interesting/hard cases.
struct EarlyCSEPass : PassInfoMixin<EarlyCSEPass> {
  EarlyCSEPass(bool UseMemorySSA = false) : UseMemorySSA(UseMemorySSA) {}

  /// Run the pass over the function.
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
  void printPipeline(raw_ostream &OS,
                     function_ref<StringRef(StringRef)> MapClassName2PassName);

  bool UseMemorySSA;
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_SCALAR_EARLYCSE_H
PKjwFZ�PII$Transforms/Scalar/LoopInstSimplify.hnu�[���//===- LoopInstSimplify.h - Loop Inst Simplify Pass -------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This pass performs lightweight instruction simplification on loop bodies.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_LOOPINSTSIMPLIFY_H
#define LLVM_TRANSFORMS_SCALAR_LOOPINSTSIMPLIFY_H

#include "llvm/Analysis/LoopAnalysisManager.h"
#include "llvm/IR/PassManager.h"

namespace llvm {

class Loop;
class LPMUpdater;

/// Performs Loop Inst Simplify Pass.
class LoopInstSimplifyPass : public PassInfoMixin<LoopInstSimplifyPass> {
public:
  PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
                        LoopStandardAnalysisResults &AR, LPMUpdater &U);
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_SCALAR_LOOPINSTSIMPLIFY_H
PKjwFZ���""Transforms/Scalar/DCE.hnu�[���//===- DCE.h - Dead code elimination ----------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file provides the interface for the Dead Code Elimination pass.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_DCE_H
#define LLVM_TRANSFORMS_SCALAR_DCE_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class Function;

/// Basic Dead Code Elimination pass.
class DCEPass : public PassInfoMixin<DCEPass> {
public:
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

class RedundantDbgInstEliminationPass
    : public PassInfoMixin<RedundantDbgInstEliminationPass> {
public:
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
}

#endif // LLVM_TRANSFORMS_SCALAR_DCE_H
PKjwFZ+���Transforms/Scalar/DivRemPairs.hnu�[���//===- DivRemPairs.h - Hoist/decompose integer division and remainder -----===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This pass hoists and/or decomposes integer division and remainder
// instructions to enable CFG improvements and better codegen.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_DIVREMPAIRS_H
#define LLVM_TRANSFORMS_SCALAR_DIVREMPAIRS_H

#include "llvm/IR/PassManager.h"

namespace llvm {

/// Hoist/decompose integer division and remainder instructions to enable CFG
/// improvements and better codegen.
struct DivRemPairsPass : public PassInfoMixin<DivRemPairsPass> {
public:
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &);
};

}
#endif // LLVM_TRANSFORMS_SCALAR_DIVREMPAIRS_H

PKjwFZ�011#Transforms/Scalar/LoopSimplifyCFG.hnu�[���//===- LoopSimplifyCFG.cpp - Loop CFG Simplification Pass -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements the Loop SimplifyCFG Pass. This pass is responsible for
// basic loop CFG cleanup, primarily to assist other loop passes. If you
// encounter a noncanonical CFG construct that causes another loop pass to
// perform suboptimally, this is the place to fix it up.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_LOOPSIMPLIFYCFG_H
#define LLVM_TRANSFORMS_SCALAR_LOOPSIMPLIFYCFG_H

#include "llvm/Analysis/LoopAnalysisManager.h"
#include "llvm/IR/PassManager.h"

namespace llvm {

class LPMUpdater;
class Loop;

/// Performs basic CFG simplifications to assist other loop passes.
class LoopSimplifyCFGPass : public PassInfoMixin<LoopSimplifyCFGPass> {
public:
  PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
                        LoopStandardAnalysisResults &AR, LPMUpdater &U);
};
} // end namespace llvm

#endif // LLVM_TRANSFORMS_SCALAR_LOOPSIMPLIFYCFG_H
PKjwFZ�/Db��Transforms/Scalar/LICM.hnu�[���//===- LICM.h - Loop Invariant Code Motion Pass -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This pass performs loop invariant code motion, attempting to remove as much
// code from the body of a loop as possible.  It does this by either hoisting
// code into the preheader block, or by sinking code to the exit blocks if it is
// safe.  This pass also promotes must-aliased memory locations in the loop to
// live in registers, thus hoisting and sinking "invariant" loads and stores.
//
// This pass uses alias analysis for two purposes:
//
//  1. Moving loop invariant loads and calls out of loops.  If we can determine
//     that a load or call inside of a loop never aliases anything stored to,
//     we can hoist it or sink it like any other instruction.
//  2. Scalar Promotion of Memory - If there is a store instruction inside of
//     the loop, we try to move the store to happen AFTER the loop instead of
//     inside of the loop.  This can only happen if a few conditions are true:
//       A. The pointer stored through is loop invariant
//       B. There are no stores or loads in the loop which _may_ alias the
//          pointer.  There are no calls in the loop which mod/ref the pointer.
//     If these conditions are true, we can promote the loads and stores in the
//     loop of the pointer to use a temporary alloca'd variable.  We then use
//     the SSAUpdater to construct the appropriate SSA form for the value.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_LICM_H
#define LLVM_TRANSFORMS_SCALAR_LICM_H

#include "llvm/Analysis/LoopAnalysisManager.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Support/CommandLine.h"

namespace llvm {

class LPMUpdater;
class Loop;
class LoopNest;

extern cl::opt<unsigned> SetLicmMssaOptCap;
extern cl::opt<unsigned> SetLicmMssaNoAccForPromotionCap;

struct LICMOptions {
  unsigned MssaOptCap;
  unsigned MssaNoAccForPromotionCap;
  bool AllowSpeculation;

  LICMOptions()
      : MssaOptCap(SetLicmMssaOptCap),
        MssaNoAccForPromotionCap(SetLicmMssaNoAccForPromotionCap),
        AllowSpeculation(true) {}

  LICMOptions(unsigned MssaOptCap, unsigned MssaNoAccForPromotionCap,
              bool AllowSpeculation)
      : MssaOptCap(MssaOptCap),
        MssaNoAccForPromotionCap(MssaNoAccForPromotionCap),
        AllowSpeculation(AllowSpeculation) {}
};

/// Performs Loop Invariant Code Motion Pass.
class LICMPass : public PassInfoMixin<LICMPass> {
  LICMOptions Opts;

public:
  LICMPass(unsigned MssaOptCap, unsigned MssaNoAccForPromotionCap,
           bool AllowSpeculation)
      : LICMPass(LICMOptions(MssaOptCap, MssaNoAccForPromotionCap,
                             AllowSpeculation)) {}
  LICMPass(LICMOptions Opts) : Opts(Opts) {}

  PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
                        LoopStandardAnalysisResults &AR, LPMUpdater &U);

  void printPipeline(raw_ostream &OS,
                     function_ref<StringRef(StringRef)> MapClassName2PassName);
};

/// Performs LoopNest Invariant Code Motion Pass.
class LNICMPass : public PassInfoMixin<LNICMPass> {
  LICMOptions Opts;

public:
  LNICMPass(unsigned MssaOptCap, unsigned MssaNoAccForPromotionCap,
            bool AllowSpeculation)
      : LNICMPass(LICMOptions(MssaOptCap, MssaNoAccForPromotionCap,
                              AllowSpeculation)) {}
  LNICMPass(LICMOptions Opts) : Opts(Opts) {}

  PreservedAnalyses run(LoopNest &L, LoopAnalysisManager &AM,
                        LoopStandardAnalysisResults &AR, LPMUpdater &U);

  void printPipeline(raw_ostream &OS,
                     function_ref<StringRef(StringRef)> MapClassName2PassName);
};
} // end namespace llvm

#endif // LLVM_TRANSFORMS_SCALAR_LICM_H
PKjwFZD(�bjjTransforms/Scalar/Reassociate.hnu�[���//===- Reassociate.h - Reassociate binary expressions -----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This pass reassociates commutative expressions in an order that is designed
// to promote better constant propagation, GCSE, LICM, PRE, etc.
//
// For example: 4 + (x + 5) -> x + (4 + 5)
//
// In the implementation of this algorithm, constants are assigned rank = 0,
// function arguments are rank = 1, and other values are assigned ranks
// corresponding to the reverse post order traversal of current function
// (starting at 2), which effectively gives values in deep loops higher rank
// than values not in loops.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_REASSOCIATE_H
#define LLVM_TRANSFORMS_SCALAR_REASSOCIATE_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/IR/PassManager.h"
#include "llvm/IR/ValueHandle.h"
#include <deque>

namespace llvm {

class APInt;
class BasicBlock;
class BinaryOperator;
class Function;
class Instruction;
class IRBuilderBase;
class Value;

/// A private "module" namespace for types and utilities used by Reassociate.
/// These are implementation details and should not be used by clients.
namespace reassociate {

struct ValueEntry {
  unsigned Rank;
  Value *Op;

  ValueEntry(unsigned R, Value *O) : Rank(R), Op(O) {}
};

inline bool operator<(const ValueEntry &LHS, const ValueEntry &RHS) {
  return LHS.Rank > RHS.Rank; // Sort so that highest rank goes to start.
}

/// Utility class representing a base and exponent pair which form one
/// factor of some product.
struct Factor {
  Value *Base;
  unsigned Power;

  Factor(Value *Base, unsigned Power) : Base(Base), Power(Power) {}
};

class XorOpnd;

} // end namespace reassociate

/// Reassociate commutative expressions.
class ReassociatePass : public PassInfoMixin<ReassociatePass> {
public:
  using OrderedSet =
      SetVector<AssertingVH<Instruction>, std::deque<AssertingVH<Instruction>>>;

protected:
  DenseMap<BasicBlock *, unsigned> RankMap;
  DenseMap<AssertingVH<Value>, unsigned> ValueRankMap;
  OrderedSet RedoInsts;

  // Arbitrary, but prevents quadratic behavior.
  static const unsigned GlobalReassociateLimit = 10;
  static const unsigned NumBinaryOps =
      Instruction::BinaryOpsEnd - Instruction::BinaryOpsBegin;

  struct PairMapValue {
    WeakVH Value1;
    WeakVH Value2;
    unsigned Score;
    bool isValid() const { return Value1 && Value2; }
  };
  DenseMap<std::pair<Value *, Value *>, PairMapValue> PairMap[NumBinaryOps];

  bool MadeChange;

public:
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &);

private:
  void BuildRankMap(Function &F, ReversePostOrderTraversal<Function *> &RPOT);
  unsigned getRank(Value *V);
  void canonicalizeOperands(Instruction *I);
  void ReassociateExpression(BinaryOperator *I);
  void RewriteExprTree(BinaryOperator *I,
                       SmallVectorImpl<reassociate::ValueEntry> &Ops);
  Value *OptimizeExpression(BinaryOperator *I,
                            SmallVectorImpl<reassociate::ValueEntry> &Ops);
  Value *OptimizeAdd(Instruction *I,
                     SmallVectorImpl<reassociate::ValueEntry> &Ops);
  Value *OptimizeXor(Instruction *I,
                     SmallVectorImpl<reassociate::ValueEntry> &Ops);
  bool CombineXorOpnd(Instruction *I, reassociate::XorOpnd *Opnd1,
                      APInt &ConstOpnd, Value *&Res);
  bool CombineXorOpnd(Instruction *I, reassociate::XorOpnd *Opnd1,
                      reassociate::XorOpnd *Opnd2, APInt &ConstOpnd,
                      Value *&Res);
  Value *buildMinimalMultiplyDAG(IRBuilderBase &Builder,
                                 SmallVectorImpl<reassociate::Factor> &Factors);
  Value *OptimizeMul(BinaryOperator *I,
                     SmallVectorImpl<reassociate::ValueEntry> &Ops);
  Value *RemoveFactorFromExpression(Value *V, Value *Factor);
  void EraseInst(Instruction *I);
  void RecursivelyEraseDeadInsts(Instruction *I, OrderedSet &Insts);
  void OptimizeInst(Instruction *I);
  Instruction *canonicalizeNegFPConstantsForOp(Instruction *I, Instruction *Op,
                                               Value *OtherOp);
  Instruction *canonicalizeNegFPConstants(Instruction *I);
  void BuildPairMap(ReversePostOrderTraversal<Function *> &RPOT);
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_SCALAR_REASSOCIATE_H
PKjwFZ�����$Transforms/Scalar/InstSimplifyPass.hnu�[���//===- InstSimplifyPass.h ---------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// Defines passes for running instruction simplification across chunks of IR.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_INSTSIMPLIFYPASS_H
#define LLVM_TRANSFORMS_SCALAR_INSTSIMPLIFYPASS_H

#include "llvm/IR/PassManager.h"

namespace llvm {

/// Run instruction simplification across each instruction in the function.
///
/// Instruction simplification has useful constraints in some contexts:
/// - It will never introduce *new* instructions.
/// - There is no need to iterate to a fixed point.
///
/// Many passes use instruction simplification as a library facility, but it may
/// also be useful (in tests and other contexts) to have access to this very
/// restricted transform at a pass granularity. However, for a much more
/// powerful and comprehensive peephole optimization engine, see the
/// `instcombine` pass instead.
class InstSimplifyPass : public PassInfoMixin<InstSimplifyPass> {
public:
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_SCALAR_INSTSIMPLIFYPASS_H
PKjwFZ��B�""$Transforms/Scalar/ConstantHoisting.hnu�[���//==- ConstantHoisting.h - Prepare code for expensive constants --*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This pass identifies expensive constants to hoist and coalesces them to
// better prepare it for SelectionDAG-based code generation. This works around
// the limitations of the basic-block-at-a-time approach.
//
// First it scans all instructions for integer constants and calculates its
// cost. If the constant can be folded into the instruction (the cost is
// TCC_Free) or the cost is just a simple operation (TCC_BASIC), then we don't
// consider it expensive and leave it alone. This is the default behavior and
// the default implementation of getIntImmCostInst will always return TCC_Free.
//
// If the cost is more than TCC_BASIC, then the integer constant can't be folded
// into the instruction and it might be beneficial to hoist the constant.
// Similar constants are coalesced to reduce register pressure and
// materialization code.
//
// When a constant is hoisted, it is also hidden behind a bitcast to force it to
// be live-out of the basic block. Otherwise the constant would be just
// duplicated and each basic block would have its own copy in the SelectionDAG.
// The SelectionDAG recognizes such constants as opaque and doesn't perform
// certain transformations on them, which would create a new expensive constant.
//
// This optimization is only applied to integer constants in instructions and
// simple (this means not nested) constant cast expressions. For example:
// %0 = load i64* inttoptr (i64 big_constant to i64*)
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_CONSTANTHOISTING_H
#define LLVM_TRANSFORMS_SCALAR_CONSTANTHOISTING_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/PassManager.h"
#include <algorithm>
#include <vector>

namespace llvm {

class BasicBlock;
class BlockFrequencyInfo;
class Constant;
class ConstantInt;
class ConstantExpr;
class DominatorTree;
class Function;
class GlobalVariable;
class Instruction;
class ProfileSummaryInfo;
class TargetTransformInfo;

/// A private "module" namespace for types and utilities used by
/// ConstantHoisting. These are implementation details and should not be used by
/// clients.
namespace consthoist {

/// Keeps track of the user of a constant and the operand index where the
/// constant is used.
struct ConstantUser {
  Instruction *Inst;
  unsigned OpndIdx;

  ConstantUser(Instruction *Inst, unsigned Idx) : Inst(Inst), OpndIdx(Idx) {}
};

using ConstantUseListType = SmallVector<ConstantUser, 8>;

/// Keeps track of a constant candidate and its uses.
struct ConstantCandidate {
  ConstantUseListType Uses;
  // If the candidate is a ConstantExpr (currely only constant GEP expressions
  // whose base pointers are GlobalVariables are supported), ConstInt records
  // its offset from the base GV, ConstExpr tracks the candidate GEP expr.
  ConstantInt *ConstInt;
  ConstantExpr *ConstExpr;
  unsigned CumulativeCost = 0;

  ConstantCandidate(ConstantInt *ConstInt, ConstantExpr *ConstExpr=nullptr) :
      ConstInt(ConstInt), ConstExpr(ConstExpr) {}

  /// Add the user to the use list and update the cost.
  void addUser(Instruction *Inst, unsigned Idx, unsigned Cost) {
    CumulativeCost += Cost;
    Uses.push_back(ConstantUser(Inst, Idx));
  }
};

/// This represents a constant that has been rebased with respect to a
/// base constant. The difference to the base constant is recorded in Offset.
struct RebasedConstantInfo {
  ConstantUseListType Uses;
  Constant *Offset;
  Type *Ty;

  RebasedConstantInfo(ConstantUseListType &&Uses, Constant *Offset,
      Type *Ty=nullptr) : Uses(std::move(Uses)), Offset(Offset), Ty(Ty) {}
};

using RebasedConstantListType = SmallVector<RebasedConstantInfo, 4>;

/// A base constant and all its rebased constants.
struct ConstantInfo {
  // If the candidate is a ConstantExpr (currely only constant GEP expressions
  // whose base pointers are GlobalVariables are supported), ConstInt records
  // its offset from the base GV, ConstExpr tracks the candidate GEP expr.
  ConstantInt *BaseInt;
  ConstantExpr *BaseExpr;
  RebasedConstantListType RebasedConstants;
};

} // end namespace consthoist

class ConstantHoistingPass : public PassInfoMixin<ConstantHoistingPass> {
public:
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);

  // Glue for old PM.
  bool runImpl(Function &F, TargetTransformInfo &TTI, DominatorTree &DT,
               BlockFrequencyInfo *BFI, BasicBlock &Entry,
               ProfileSummaryInfo *PSI);

  void cleanup() {
    ClonedCastMap.clear();
    ConstIntCandVec.clear();
    for (auto MapEntry : ConstGEPCandMap)
      MapEntry.second.clear();
    ConstGEPCandMap.clear();
    ConstIntInfoVec.clear();
    for (auto MapEntry : ConstGEPInfoMap)
      MapEntry.second.clear();
    ConstGEPInfoMap.clear();
  }

private:
  using ConstPtrUnionType = PointerUnion<ConstantInt *, ConstantExpr *>;
  using ConstCandMapType = DenseMap<ConstPtrUnionType, unsigned>;

  const TargetTransformInfo *TTI;
  DominatorTree *DT;
  BlockFrequencyInfo *BFI;
  LLVMContext *Ctx;
  const DataLayout *DL;
  BasicBlock *Entry;
  ProfileSummaryInfo *PSI;

  /// Keeps track of constant candidates found in the function.
  using ConstCandVecType = std::vector<consthoist::ConstantCandidate>;
  using GVCandVecMapType = MapVector<GlobalVariable *, ConstCandVecType>;
  ConstCandVecType ConstIntCandVec;
  GVCandVecMapType ConstGEPCandMap;

  /// These are the final constants we decided to hoist.
  using ConstInfoVecType = SmallVector<consthoist::ConstantInfo, 8>;
  using GVInfoVecMapType = MapVector<GlobalVariable *, ConstInfoVecType>;
  ConstInfoVecType ConstIntInfoVec;
  GVInfoVecMapType ConstGEPInfoMap;

  /// Keep track of cast instructions we already cloned.
  MapVector<Instruction *, Instruction *> ClonedCastMap;

  void collectMatInsertPts(
      const consthoist::RebasedConstantListType &RebasedConstants,
      SmallVectorImpl<Instruction *> &MatInsertPts) const;
  Instruction *findMatInsertPt(Instruction *Inst, unsigned Idx = ~0U) const;
  SetVector<Instruction *>
  findConstantInsertionPoint(const consthoist::ConstantInfo &ConstInfo,
                             const ArrayRef<Instruction *> MatInsertPts) const;
  void collectConstantCandidates(ConstCandMapType &ConstCandMap,
                                 Instruction *Inst, unsigned Idx,
                                 ConstantInt *ConstInt);
  void collectConstantCandidates(ConstCandMapType &ConstCandMap,
                                 Instruction *Inst, unsigned Idx,
                                 ConstantExpr *ConstExpr);
  void collectConstantCandidates(ConstCandMapType &ConstCandMap,
                                 Instruction *Inst, unsigned Idx);
  void collectConstantCandidates(ConstCandMapType &ConstCandMap,
                                 Instruction *Inst);
  void collectConstantCandidates(Function &Fn);
  void findAndMakeBaseConstant(ConstCandVecType::iterator S,
                               ConstCandVecType::iterator E,
      SmallVectorImpl<consthoist::ConstantInfo> &ConstInfoVec);
  unsigned maximizeConstantsInRange(ConstCandVecType::iterator S,
                                    ConstCandVecType::iterator E,
                                    ConstCandVecType::iterator &MaxCostItr);
  // If BaseGV is nullptr, find base among Constant Integer candidates;
  // otherwise find base among constant GEPs sharing BaseGV as base pointer.
  void findBaseConstants(GlobalVariable *BaseGV);

  /// A ConstantUser grouped with the Type and Constant adjustment. The user
  /// will be adjusted by Offset.
  struct UserAdjustment {
    Constant *Offset;
    Type *Ty;
    Instruction *MatInsertPt;
    const consthoist::ConstantUser User;
    UserAdjustment(Constant *O, Type *T, Instruction *I,
                   consthoist::ConstantUser U)
        : Offset(O), Ty(T), MatInsertPt(I), User(U) {}
  };
  void emitBaseConstants(Instruction *Base, UserAdjustment *Adj);
  // If BaseGV is nullptr, emit Constant Integer base; otherwise emit
  // constant GEP base.
  bool emitBaseConstants(GlobalVariable *BaseGV);
  void deleteDeadCastInst() const;
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_SCALAR_CONSTANTHOISTING_H
PKjwFZ�n��UU"Transforms/Scalar/LoopDistribute.hnu�[���//===- LoopDistribute.cpp - Loop Distribution Pass --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements the Loop Distribution Pass.  Its main focus is to
// distribute loops that cannot be vectorized due to dependence cycles.  It
// tries to isolate the offending dependences into a new loop allowing
// vectorization of the remaining parts.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_LOOPDISTRIBUTE_H
#define LLVM_TRANSFORMS_SCALAR_LOOPDISTRIBUTE_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class Function;

class LoopDistributePass : public PassInfoMixin<LoopDistributePass> {
public:
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_SCALAR_LOOPDISTRIBUTE_H
PKjwFZ�9v���(Transforms/Scalar/DeadStoreElimination.hnu�[���//===- DeadStoreElimination.h - Fast Dead Store Elimination -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements a trivial dead store elimination that only considers
// basic-block local redundant stores.
//
// FIXME: This should eventually be extended to be a post-dominator tree
// traversal.  Doing so would be pretty trivial.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_DEADSTOREELIMINATION_H
#define LLVM_TRANSFORMS_SCALAR_DEADSTOREELIMINATION_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class Function;

/// This class implements a trivial dead store elimination. We consider
/// only the redundant stores that are local to a single Basic Block.
class DSEPass : public PassInfoMixin<DSEPass> {
public:
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM);
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_SCALAR_DEADSTOREELIMINATION_H
PKjwFZ�r����&Transforms/Scalar/LoopVersioningLICM.hnu�[���//===- LoopVersioningLICM.h - LICM Loop Versioning ------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_LOOPVERSIONINGLICM_H
#define LLVM_TRANSFORMS_SCALAR_LOOPVERSIONINGLICM_H

#include "llvm/Analysis/LoopAnalysisManager.h"
#include "llvm/IR/PassManager.h"

namespace llvm {
class LPMUpdater;
class Loop;

class LoopVersioningLICMPass : public PassInfoMixin<LoopVersioningLICMPass> {
public:
  PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
                        LoopStandardAnalysisResults &LAR, LPMUpdater &U);
};

} // namespace llvm

#endif // LLVM_TRANSFORMS_SCALAR_LOOPVERSIONINGLICM_H
PKjwFZ��r�&Transforms/Scalar/LoopStrengthReduce.hnu�[���//===- LoopStrengthReduce.h - Loop Strength Reduce Pass ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This transformation analyzes and transforms the induction variables (and
// computations derived from them) into forms suitable for efficient execution
// on the target.
//
// This pass performs a strength reduction on array references inside loops that
// have as one or more of their components the loop induction variable, it
// rewrites expressions to take advantage of scaled-index addressing modes
// available on the target, and it performs a variety of other optimizations
// related to loop induction variables.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_LOOPSTRENGTHREDUCE_H
#define LLVM_TRANSFORMS_SCALAR_LOOPSTRENGTHREDUCE_H

#include "llvm/Analysis/LoopAnalysisManager.h"
#include "llvm/IR/PassManager.h"

namespace llvm {

class Loop;
class LPMUpdater;

/// Performs Loop Strength Reduce Pass.
class LoopStrengthReducePass : public PassInfoMixin<LoopStrengthReducePass> {
public:
  PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
                        LoopStandardAnalysisResults &AR, LPMUpdater &U);
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_SCALAR_LOOPSTRENGTHREDUCE_H
PKjwFZ��44$Transforms/Scalar/LoopDataPrefetch.hnu�[���//===-------- LoopDataPrefetch.h - Loop Data Prefetching Pass ---*- C++ -*-===//
//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// This file provides the interface for LLVM's Loop Data Prefetching Pass.
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_LOOPDATAPREFETCH_H
#define LLVM_TRANSFORMS_SCALAR_LOOPDATAPREFETCH_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class Function;

/// An optimization pass inserting data prefetches in loops.
class LoopDataPrefetchPass : public PassInfoMixin<LoopDataPrefetchPass> {
public:
  LoopDataPrefetchPass() = default;

  /// Run the pass over the function.
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_SCALAR_LOOPDATAPREFETCH_H
PKjwFZ��_@�Q�Q!Transforms/Scalar/GVNExpression.hnu�[���//===- GVNExpression.h - GVN Expression classes -----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file
///
/// The header file for the GVN pass that contains expression handling
/// classes
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_GVNEXPRESSION_H
#define LLVM_TRANSFORMS_SCALAR_GVNEXPRESSION_H

#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Analysis/MemorySSA.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/ArrayRecycler.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cassert>
#include <iterator>
#include <utility>

namespace llvm {

class BasicBlock;
class Type;

namespace GVNExpression {

enum ExpressionType {
  ET_Base,
  ET_Constant,
  ET_Variable,
  ET_Dead,
  ET_Unknown,
  ET_BasicStart,
  ET_Basic,
  ET_AggregateValue,
  ET_Phi,
  ET_MemoryStart,
  ET_Call,
  ET_Load,
  ET_Store,
  ET_MemoryEnd,
  ET_BasicEnd
};

class Expression {
private:
  ExpressionType EType;
  unsigned Opcode;
  mutable hash_code HashVal = 0;

public:
  Expression(ExpressionType ET = ET_Base, unsigned O = ~2U)
      : EType(ET), Opcode(O) {}
  Expression(const Expression &) = delete;
  Expression &operator=(const Expression &) = delete;
  virtual ~Expression();

  static unsigned getEmptyKey() { return ~0U; }
  static unsigned getTombstoneKey() { return ~1U; }

  bool operator!=(const Expression &Other) const { return !(*this == Other); }
  bool operator==(const Expression &Other) const {
    if (getOpcode() != Other.getOpcode())
      return false;
    if (getOpcode() == getEmptyKey() || getOpcode() == getTombstoneKey())
      return true;
    // Compare the expression type for anything but load and store.
    // For load and store we set the opcode to zero to make them equal.
    if (getExpressionType() != ET_Load && getExpressionType() != ET_Store &&
        getExpressionType() != Other.getExpressionType())
      return false;

    return equals(Other);
  }

  hash_code getComputedHash() const {
    // It's theoretically possible for a thing to hash to zero.  In that case,
    // we will just compute the hash a few extra times, which is no worse that
    // we did before, which was to compute it always.
    if (static_cast<unsigned>(HashVal) == 0)
      HashVal = getHashValue();
    return HashVal;
  }

  virtual bool equals(const Expression &Other) const { return true; }

  // Return true if the two expressions are exactly the same, including the
  // normally ignored fields.
  virtual bool exactlyEquals(const Expression &Other) const {
    return getExpressionType() == Other.getExpressionType() && equals(Other);
  }

  unsigned getOpcode() const { return Opcode; }
  void setOpcode(unsigned opcode) { Opcode = opcode; }
  ExpressionType getExpressionType() const { return EType; }

  // We deliberately leave the expression type out of the hash value.
  virtual hash_code getHashValue() const { return getOpcode(); }

  // Debugging support
  virtual void printInternal(raw_ostream &OS, bool PrintEType) const {
    if (PrintEType)
      OS << "etype = " << getExpressionType() << ",";
    OS << "opcode = " << getOpcode() << ", ";
  }

  void print(raw_ostream &OS) const {
    OS << "{ ";
    printInternal(OS, true);
    OS << "}";
  }

  LLVM_DUMP_METHOD void dump() const;
};

inline raw_ostream &operator<<(raw_ostream &OS, const Expression &E) {
  E.print(OS);
  return OS;
}

class BasicExpression : public Expression {
private:
  using RecyclerType = ArrayRecycler<Value *>;
  using RecyclerCapacity = RecyclerType::Capacity;

  Value **Operands = nullptr;
  unsigned MaxOperands;
  unsigned NumOperands = 0;
  Type *ValueType = nullptr;

public:
  BasicExpression(unsigned NumOperands)
      : BasicExpression(NumOperands, ET_Basic) {}
  BasicExpression(unsigned NumOperands, ExpressionType ET)
      : Expression(ET), MaxOperands(NumOperands) {}
  BasicExpression() = delete;
  BasicExpression(const BasicExpression &) = delete;
  BasicExpression &operator=(const BasicExpression &) = delete;
  ~BasicExpression() override;

  static bool classof(const Expression *EB) {
    ExpressionType ET = EB->getExpressionType();
    return ET > ET_BasicStart && ET < ET_BasicEnd;
  }

  /// Swap two operands. Used during GVN to put commutative operands in
  /// order.
  void swapOperands(unsigned First, unsigned Second) {
    std::swap(Operands[First], Operands[Second]);
  }

  Value *getOperand(unsigned N) const {
    assert(Operands && "Operands not allocated");
    assert(N < NumOperands && "Operand out of range");
    return Operands[N];
  }

  void setOperand(unsigned N, Value *V) {
    assert(Operands && "Operands not allocated before setting");
    assert(N < NumOperands && "Operand out of range");
    Operands[N] = V;
  }

  unsigned getNumOperands() const { return NumOperands; }

  using op_iterator = Value **;
  using const_op_iterator = Value *const *;

  op_iterator op_begin() { return Operands; }
  op_iterator op_end() { return Operands + NumOperands; }
  const_op_iterator op_begin() const { return Operands; }
  const_op_iterator op_end() const { return Operands + NumOperands; }
  iterator_range<op_iterator> operands() {
    return iterator_range<op_iterator>(op_begin(), op_end());
  }
  iterator_range<const_op_iterator> operands() const {
    return iterator_range<const_op_iterator>(op_begin(), op_end());
  }

  void op_push_back(Value *Arg) {
    assert(NumOperands < MaxOperands && "Tried to add too many operands");
    assert(Operands && "Operandss not allocated before pushing");
    Operands[NumOperands++] = Arg;
  }
  bool op_empty() const { return getNumOperands() == 0; }

  void allocateOperands(RecyclerType &Recycler, BumpPtrAllocator &Allocator) {
    assert(!Operands && "Operands already allocated");
    Operands = Recycler.allocate(RecyclerCapacity::get(MaxOperands), Allocator);
  }
  void deallocateOperands(RecyclerType &Recycler) {
    Recycler.deallocate(RecyclerCapacity::get(MaxOperands), Operands);
  }

  void setType(Type *T) { ValueType = T; }
  Type *getType() const { return ValueType; }

  bool equals(const Expression &Other) const override {
    if (getOpcode() != Other.getOpcode())
      return false;

    const auto &OE = cast<BasicExpression>(Other);
    return getType() == OE.getType() && NumOperands == OE.NumOperands &&
           std::equal(op_begin(), op_end(), OE.op_begin());
  }

  hash_code getHashValue() const override {
    return hash_combine(this->Expression::getHashValue(), ValueType,
                        hash_combine_range(op_begin(), op_end()));
  }

  // Debugging support
  void printInternal(raw_ostream &OS, bool PrintEType) const override {
    if (PrintEType)
      OS << "ExpressionTypeBasic, ";

    this->Expression::printInternal(OS, false);
    OS << "operands = {";
    for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
      OS << "[" << i << "] = ";
      Operands[i]->printAsOperand(OS);
      OS << "  ";
    }
    OS << "} ";
  }
};

class op_inserter {
private:
  using Container = BasicExpression;

  Container *BE;

public:
  using iterator_category = std::output_iterator_tag;
  using value_type = void;
  using difference_type = void;
  using pointer = void;
  using reference = void;

  explicit op_inserter(BasicExpression &E) : BE(&E) {}
  explicit op_inserter(BasicExpression *E) : BE(E) {}

  op_inserter &operator=(Value *val) {
    BE->op_push_back(val);
    return *this;
  }
  op_inserter &operator*() { return *this; }
  op_inserter &operator++() { return *this; }
  op_inserter &operator++(int) { return *this; }
};

class MemoryExpression : public BasicExpression {
private:
  const MemoryAccess *MemoryLeader;

public:
  MemoryExpression(unsigned NumOperands, enum ExpressionType EType,
                   const MemoryAccess *MemoryLeader)
      : BasicExpression(NumOperands, EType), MemoryLeader(MemoryLeader) {}
  MemoryExpression() = delete;
  MemoryExpression(const MemoryExpression &) = delete;
  MemoryExpression &operator=(const MemoryExpression &) = delete;

  static bool classof(const Expression *EB) {
    return EB->getExpressionType() > ET_MemoryStart &&
           EB->getExpressionType() < ET_MemoryEnd;
  }

  hash_code getHashValue() const override {
    return hash_combine(this->BasicExpression::getHashValue(), MemoryLeader);
  }

  bool equals(const Expression &Other) const override {
    if (!this->BasicExpression::equals(Other))
      return false;
    const MemoryExpression &OtherMCE = cast<MemoryExpression>(Other);

    return MemoryLeader == OtherMCE.MemoryLeader;
  }

  const MemoryAccess *getMemoryLeader() const { return MemoryLeader; }
  void setMemoryLeader(const MemoryAccess *ML) { MemoryLeader = ML; }
};

class CallExpression final : public MemoryExpression {
private:
  CallInst *Call;

public:
  CallExpression(unsigned NumOperands, CallInst *C,
                 const MemoryAccess *MemoryLeader)
      : MemoryExpression(NumOperands, ET_Call, MemoryLeader), Call(C) {}
  CallExpression() = delete;
  CallExpression(const CallExpression &) = delete;
  CallExpression &operator=(const CallExpression &) = delete;
  ~CallExpression() override;

  static bool classof(const Expression *EB) {
    return EB->getExpressionType() == ET_Call;
  }

  // Debugging support
  void printInternal(raw_ostream &OS, bool PrintEType) const override {
    if (PrintEType)
      OS << "ExpressionTypeCall, ";
    this->BasicExpression::printInternal(OS, false);
    OS << " represents call at ";
    Call->printAsOperand(OS);
  }
};

class LoadExpression final : public MemoryExpression {
private:
  LoadInst *Load;

public:
  LoadExpression(unsigned NumOperands, LoadInst *L,
                 const MemoryAccess *MemoryLeader)
      : LoadExpression(ET_Load, NumOperands, L, MemoryLeader) {}

  LoadExpression(enum ExpressionType EType, unsigned NumOperands, LoadInst *L,
                 const MemoryAccess *MemoryLeader)
      : MemoryExpression(NumOperands, EType, MemoryLeader), Load(L) {}

  LoadExpression() = delete;
  LoadExpression(const LoadExpression &) = delete;
  LoadExpression &operator=(const LoadExpression &) = delete;
  ~LoadExpression() override;

  static bool classof(const Expression *EB) {
    return EB->getExpressionType() == ET_Load;
  }

  LoadInst *getLoadInst() const { return Load; }
  void setLoadInst(LoadInst *L) { Load = L; }

  bool equals(const Expression &Other) const override;
  bool exactlyEquals(const Expression &Other) const override {
    return Expression::exactlyEquals(Other) &&
           cast<LoadExpression>(Other).getLoadInst() == getLoadInst();
  }

  // Debugging support
  void printInternal(raw_ostream &OS, bool PrintEType) const override {
    if (PrintEType)
      OS << "ExpressionTypeLoad, ";
    this->BasicExpression::printInternal(OS, false);
    OS << " represents Load at ";
    Load->printAsOperand(OS);
    OS << " with MemoryLeader " << *getMemoryLeader();
  }
};

class StoreExpression final : public MemoryExpression {
private:
  StoreInst *Store;
  Value *StoredValue;

public:
  StoreExpression(unsigned NumOperands, StoreInst *S, Value *StoredValue,
                  const MemoryAccess *MemoryLeader)
      : MemoryExpression(NumOperands, ET_Store, MemoryLeader), Store(S),
        StoredValue(StoredValue) {}
  StoreExpression() = delete;
  StoreExpression(const StoreExpression &) = delete;
  StoreExpression &operator=(const StoreExpression &) = delete;
  ~StoreExpression() override;

  static bool classof(const Expression *EB) {
    return EB->getExpressionType() == ET_Store;
  }

  StoreInst *getStoreInst() const { return Store; }
  Value *getStoredValue() const { return StoredValue; }

  bool equals(const Expression &Other) const override;

  bool exactlyEquals(const Expression &Other) const override {
    return Expression::exactlyEquals(Other) &&
           cast<StoreExpression>(Other).getStoreInst() == getStoreInst();
  }

  // Debugging support
  void printInternal(raw_ostream &OS, bool PrintEType) const override {
    if (PrintEType)
      OS << "ExpressionTypeStore, ";
    this->BasicExpression::printInternal(OS, false);
    OS << " represents Store  " << *Store;
    OS << " with StoredValue ";
    StoredValue->printAsOperand(OS);
    OS << " and MemoryLeader " << *getMemoryLeader();
  }
};

class AggregateValueExpression final : public BasicExpression {
private:
  unsigned MaxIntOperands;
  unsigned NumIntOperands = 0;
  unsigned *IntOperands = nullptr;

public:
  AggregateValueExpression(unsigned NumOperands, unsigned NumIntOperands)
      : BasicExpression(NumOperands, ET_AggregateValue),
        MaxIntOperands(NumIntOperands) {}
  AggregateValueExpression() = delete;
  AggregateValueExpression(const AggregateValueExpression &) = delete;
  AggregateValueExpression &
  operator=(const AggregateValueExpression &) = delete;
  ~AggregateValueExpression() override;

  static bool classof(const Expression *EB) {
    return EB->getExpressionType() == ET_AggregateValue;
  }

  using int_arg_iterator = unsigned *;
  using const_int_arg_iterator = const unsigned *;

  int_arg_iterator int_op_begin() { return IntOperands; }
  int_arg_iterator int_op_end() { return IntOperands + NumIntOperands; }
  const_int_arg_iterator int_op_begin() const { return IntOperands; }
  const_int_arg_iterator int_op_end() const {
    return IntOperands + NumIntOperands;
  }
  unsigned int_op_size() const { return NumIntOperands; }
  bool int_op_empty() const { return NumIntOperands == 0; }
  void int_op_push_back(unsigned IntOperand) {
    assert(NumIntOperands < MaxIntOperands &&
           "Tried to add too many int operands");
    assert(IntOperands && "Operands not allocated before pushing");
    IntOperands[NumIntOperands++] = IntOperand;
  }

  virtual void allocateIntOperands(BumpPtrAllocator &Allocator) {
    assert(!IntOperands && "Operands already allocated");
    IntOperands = Allocator.Allocate<unsigned>(MaxIntOperands);
  }

  bool equals(const Expression &Other) const override {
    if (!this->BasicExpression::equals(Other))
      return false;
    const AggregateValueExpression &OE = cast<AggregateValueExpression>(Other);
    return NumIntOperands == OE.NumIntOperands &&
           std::equal(int_op_begin(), int_op_end(), OE.int_op_begin());
  }

  hash_code getHashValue() const override {
    return hash_combine(this->BasicExpression::getHashValue(),
                        hash_combine_range(int_op_begin(), int_op_end()));
  }

  // Debugging support
  void printInternal(raw_ostream &OS, bool PrintEType) const override {
    if (PrintEType)
      OS << "ExpressionTypeAggregateValue, ";
    this->BasicExpression::printInternal(OS, false);
    OS << ", intoperands = {";
    for (unsigned i = 0, e = int_op_size(); i != e; ++i) {
      OS << "[" << i << "] = " << IntOperands[i] << "  ";
    }
    OS << "}";
  }
};

class int_op_inserter {
private:
  using Container = AggregateValueExpression;

  Container *AVE;

public:
  using iterator_category = std::output_iterator_tag;
  using value_type = void;
  using difference_type = void;
  using pointer = void;
  using reference = void;

  explicit int_op_inserter(AggregateValueExpression &E) : AVE(&E) {}
  explicit int_op_inserter(AggregateValueExpression *E) : AVE(E) {}

  int_op_inserter &operator=(unsigned int val) {
    AVE->int_op_push_back(val);
    return *this;
  }
  int_op_inserter &operator*() { return *this; }
  int_op_inserter &operator++() { return *this; }
  int_op_inserter &operator++(int) { return *this; }
};

class PHIExpression final : public BasicExpression {
private:
  BasicBlock *BB;

public:
  PHIExpression(unsigned NumOperands, BasicBlock *B)
      : BasicExpression(NumOperands, ET_Phi), BB(B) {}
  PHIExpression() = delete;
  PHIExpression(const PHIExpression &) = delete;
  PHIExpression &operator=(const PHIExpression &) = delete;
  ~PHIExpression() override;

  static bool classof(const Expression *EB) {
    return EB->getExpressionType() == ET_Phi;
  }

  bool equals(const Expression &Other) const override {
    if (!this->BasicExpression::equals(Other))
      return false;
    const PHIExpression &OE = cast<PHIExpression>(Other);
    return BB == OE.BB;
  }

  hash_code getHashValue() const override {
    return hash_combine(this->BasicExpression::getHashValue(), BB);
  }

  // Debugging support
  void printInternal(raw_ostream &OS, bool PrintEType) const override {
    if (PrintEType)
      OS << "ExpressionTypePhi, ";
    this->BasicExpression::printInternal(OS, false);
    OS << "bb = " << BB;
  }
};

class DeadExpression final : public Expression {
public:
  DeadExpression() : Expression(ET_Dead) {}
  DeadExpression(const DeadExpression &) = delete;
  DeadExpression &operator=(const DeadExpression &) = delete;

  static bool classof(const Expression *E) {
    return E->getExpressionType() == ET_Dead;
  }
};

class VariableExpression final : public Expression {
private:
  Value *VariableValue;

public:
  VariableExpression(Value *V) : Expression(ET_Variable), VariableValue(V) {}
  VariableExpression() = delete;
  VariableExpression(const VariableExpression &) = delete;
  VariableExpression &operator=(const VariableExpression &) = delete;

  static bool classof(const Expression *EB) {
    return EB->getExpressionType() == ET_Variable;
  }

  Value *getVariableValue() const { return VariableValue; }
  void setVariableValue(Value *V) { VariableValue = V; }

  bool equals(const Expression &Other) const override {
    const VariableExpression &OC = cast<VariableExpression>(Other);
    return VariableValue == OC.VariableValue;
  }

  hash_code getHashValue() const override {
    return hash_combine(this->Expression::getHashValue(),
                        VariableValue->getType(), VariableValue);
  }

  // Debugging support
  void printInternal(raw_ostream &OS, bool PrintEType) const override {
    if (PrintEType)
      OS << "ExpressionTypeVariable, ";
    this->Expression::printInternal(OS, false);
    OS << " variable = " << *VariableValue;
  }
};

class ConstantExpression final : public Expression {
private:
  Constant *ConstantValue = nullptr;

public:
  ConstantExpression() : Expression(ET_Constant) {}
  ConstantExpression(Constant *constantValue)
      : Expression(ET_Constant), ConstantValue(constantValue) {}
  ConstantExpression(const ConstantExpression &) = delete;
  ConstantExpression &operator=(const ConstantExpression &) = delete;

  static bool classof(const Expression *EB) {
    return EB->getExpressionType() == ET_Constant;
  }

  Constant *getConstantValue() const { return ConstantValue; }
  void setConstantValue(Constant *V) { ConstantValue = V; }

  bool equals(const Expression &Other) const override {
    const ConstantExpression &OC = cast<ConstantExpression>(Other);
    return ConstantValue == OC.ConstantValue;
  }

  hash_code getHashValue() const override {
    return hash_combine(this->Expression::getHashValue(),
                        ConstantValue->getType(), ConstantValue);
  }

  // Debugging support
  void printInternal(raw_ostream &OS, bool PrintEType) const override {
    if (PrintEType)
      OS << "ExpressionTypeConstant, ";
    this->Expression::printInternal(OS, false);
    OS << " constant = " << *ConstantValue;
  }
};

class UnknownExpression final : public Expression {
private:
  Instruction *Inst;

public:
  UnknownExpression(Instruction *I) : Expression(ET_Unknown), Inst(I) {}
  UnknownExpression() = delete;
  UnknownExpression(const UnknownExpression &) = delete;
  UnknownExpression &operator=(const UnknownExpression &) = delete;

  static bool classof(const Expression *EB) {
    return EB->getExpressionType() == ET_Unknown;
  }

  Instruction *getInstruction() const { return Inst; }
  void setInstruction(Instruction *I) { Inst = I; }

  bool equals(const Expression &Other) const override {
    const auto &OU = cast<UnknownExpression>(Other);
    return Inst == OU.Inst;
  }

  hash_code getHashValue() const override {
    return hash_combine(this->Expression::getHashValue(), Inst);
  }

  // Debugging support
  void printInternal(raw_ostream &OS, bool PrintEType) const override {
    if (PrintEType)
      OS << "ExpressionTypeUnknown, ";
    this->Expression::printInternal(OS, false);
    OS << " inst = " << *Inst;
  }
};

} // end namespace GVNExpression

} // end namespace llvm

#endif // LLVM_TRANSFORMS_SCALAR_GVNEXPRESSION_H
PKjwFZ4U:QGGTransforms/Scalar/LoopReroll.hnu�[���//===- LoopReroll.h - Loop rerolling pass ---------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_LOOPREROLL_H
#define LLVM_TRANSFORMS_SCALAR_LOOPREROLL_H

#include "llvm/IR/PassManager.h"
#include "llvm/Transforms/Scalar/LoopPassManager.h"

namespace llvm {

class LoopRerollPass : public PassInfoMixin<LoopRerollPass> {
public:
  PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
                        LoopStandardAnalysisResults &AR, LPMUpdater &U);
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_SCALAR_LOOPREROLL_H
PKjwFZ�����,Transforms/Scalar/AlignmentFromAssumptions.hnu�[���//===---- AlignmentFromAssumptions.h ----------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements a ScalarEvolution-based transformation to set
// the alignments of load, stores and memory intrinsics based on the truth
// expressions of assume intrinsics. The primary motivation is to handle
// complex alignment assumptions that apply to vector loads and stores that
// appear after vectorization and unrolling.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_ALIGNMENTFROMASSUMPTIONS_H
#define LLVM_TRANSFORMS_SCALAR_ALIGNMENTFROMASSUMPTIONS_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class AssumptionCache;
class DominatorTree;
class ScalarEvolution;
class SCEV;

struct AlignmentFromAssumptionsPass
    : public PassInfoMixin<AlignmentFromAssumptionsPass> {
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);

  // Glue for old PM.
  bool runImpl(Function &F, AssumptionCache &AC, ScalarEvolution *SE_,
               DominatorTree *DT_);

  ScalarEvolution *SE = nullptr;
  DominatorTree *DT = nullptr;

  bool extractAlignmentInfo(CallInst *I, unsigned Idx, Value *&AAPtr,
                            const SCEV *&AlignSCEV, const SCEV *&OffSCEV);
  bool processAssumption(CallInst *I, unsigned Idx);
};
}

#endif // LLVM_TRANSFORMS_SCALAR_ALIGNMENTFROMASSUMPTIONS_H
PKjwFZ�:���$Transforms/Scalar/TLSVariableHoist.hnu�[���//==- TLSVariableHoist.h ------ Remove Redundant TLS Loads -------*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This pass identifies/eliminates Redundant TLS Loads if related option is set.
// For example:
// static __thread int x;
// int g();
// int f(int c) {
//   int *px = &x;
//   while (c--)
//     *px += g();
//   return *px;
// }
//
// will generate Redundant TLS Loads by compiling it with
// clang++ -fPIC -ftls-model=global-dynamic -O2 -S
//
// .LBB0_2:                                # %while.body
//                                         # =>This Inner Loop Header: Depth=1
//         callq   _Z1gv@PLT
//         movl    %eax, %ebp
//         leaq    _ZL1x@TLSLD(%rip), %rdi
//         callq   __tls_get_addr@PLT
//         addl    _ZL1x@DTPOFF(%rax), %ebp
//         movl    %ebp, _ZL1x@DTPOFF(%rax)
//         addl    $-1, %ebx
//         jne     .LBB0_2
//         jmp     .LBB0_3
// .LBB0_4:                                # %entry.while.end_crit_edge
//         leaq    _ZL1x@TLSLD(%rip), %rdi
//         callq   __tls_get_addr@PLT
//         movl    _ZL1x@DTPOFF(%rax), %ebp
//
// The Redundant TLS Loads will hurt the performance, especially in loops.
// So we try to eliminate/move them if required by customers, let it be:
//
// # %bb.0:                                # %entry
//         ...
//         movl    %edi, %ebx
//         leaq    _ZL1x@TLSLD(%rip), %rdi
//         callq   __tls_get_addr@PLT
//         leaq    _ZL1x@DTPOFF(%rax), %r14
//         testl   %ebx, %ebx
//         je      .LBB0_1
// .LBB0_2:                                # %while.body
//                                         # =>This Inner Loop Header: Depth=1
//         callq   _Z1gv@PLT
//         addl    (%r14), %eax
//         movl    %eax, (%r14)
//         addl    $-1, %ebx
//         jne     .LBB0_2
//         jmp     .LBB0_3
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_TLSVARIABLEHOIST_H
#define LLVM_TRANSFORMS_SCALAR_TLSVARIABLEHOIST_H

#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/IR/PassManager.h"

namespace llvm {

class BasicBlock;
class DominatorTree;
class Function;
class GlobalVariable;
class Instruction;

/// A private "module" namespace for types and utilities used by
/// TLSVariableHoist. These are implementation details and should
/// not be used by clients.
namespace tlshoist {

/// Keeps track of the user of a TLS variable and the operand index
/// where the variable is used.
struct TLSUser {
  Instruction *Inst;
  unsigned OpndIdx;

  TLSUser(Instruction *Inst, unsigned Idx) : Inst(Inst), OpndIdx(Idx) {}
};

/// Keeps track of a TLS variable candidate and its users.
struct TLSCandidate {
  SmallVector<TLSUser, 8> Users;

  /// Add the user to the use list and update the cost.
  void addUser(Instruction *Inst, unsigned Idx) {
    Users.push_back(TLSUser(Inst, Idx));
  }
};

} // end namespace tlshoist

class TLSVariableHoistPass : public PassInfoMixin<TLSVariableHoistPass> {
public:
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);

  // Glue for old PM.
  bool runImpl(Function &F, DominatorTree &DT, LoopInfo &LI);

private:
  DominatorTree *DT;
  LoopInfo *LI;

  /// Keeps track of TLS variable candidates found in the function.
  using TLSCandMapType = MapVector<GlobalVariable *, tlshoist::TLSCandidate>;
  TLSCandMapType TLSCandMap;

  void collectTLSCandidates(Function &Fn);
  void collectTLSCandidate(Instruction *Inst);
  Instruction *getNearestLoopDomInst(BasicBlock *BB, Loop *L);
  Instruction *getDomInst(Instruction *I1, Instruction *I2);
  BasicBlock::iterator findInsertPos(Function &Fn, GlobalVariable *GV,
                                     BasicBlock *&PosBB);
  Instruction *genBitCastInst(Function &Fn, GlobalVariable *GV);
  bool tryReplaceTLSCandidates(Function &Fn);
  bool tryReplaceTLSCandidate(Function &Fn, GlobalVariable *GV);
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_SCALAR_TLSVARIABLEHOIST_H
PKjwFZ������Transforms/Scalar/SCCP.hnu�[���//===- SCCP.cpp - Sparse Conditional Constant Propagation -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// \file
// This file implements sparse conditional constant propagation and merging:
//
// Specifically, this:
//   * Assumes values are constant unless proven otherwise
//   * Assumes BasicBlocks are dead unless proven otherwise
//   * Proves values to be constant, and replaces them with constants
//   * Proves conditional branches to be unconditional
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_SCCP_H
#define LLVM_TRANSFORMS_SCALAR_SCCP_H

#include "llvm/IR/PassManager.h"

namespace llvm {
class Function;

/// This pass performs function-level constant propagation and merging.
class SCCPPass : public PassInfoMixin<SCCPPass> {
public:
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_SCALAR_SCCP_H
PKjwFZ��ii)Transforms/Scalar/LowerMatrixIntrinsics.hnu�[���//===- LowerMatrixIntrinsics.h - Lower matrix intrinsics. -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This pass lowers matrix intrinsics down to vector operations.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_LOWERMATRIXINTRINSICS_H
#define LLVM_TRANSFORMS_SCALAR_LOWERMATRIXINTRINSICS_H

#include "llvm/IR/PassManager.h"

namespace llvm {
class LowerMatrixIntrinsicsPass
    : public PassInfoMixin<LowerMatrixIntrinsicsPass> {
  bool Minimal;

public:
  LowerMatrixIntrinsicsPass(bool Minimal = false) : Minimal(Minimal) {}
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
  void printPipeline(raw_ostream &OS,
                     function_ref<StringRef(StringRef)> MapClassName2PassName);
  static bool isRequired() { return true; }
};
} // namespace llvm

#endif
PKjwFZ��"Transforms/Scalar/LoopBoundSplit.hnu�[���//===------- LoopBoundSplit.h - Split Loop Bound ----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_LOOPBOUNDSPLIT_H
#define LLVM_TRANSFORMS_SCALAR_LOOPBOUNDSPLIT_H

#include "llvm/Analysis/LoopAnalysisManager.h"
#include "llvm/IR/PassManager.h"

namespace llvm {
class LPMUpdater;
class Loop;

/// This pass transforms loops that contain a conditional branch with induction
/// variable. For example, it transforms left code to right code:
///
///                              newbound = min(n, c)
///  while (iv < n) {            while(iv < newbound) {
///    A                           A
///    if (iv < c)                 B
///      B                         C
///    C                         }
///                              if (iv != n) {
///                                while (iv < n) {
///                                  A
///                                  C
///                                }
///                              }
class LoopBoundSplitPass : public PassInfoMixin<LoopBoundSplitPass> {
public:
  PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
                        LoopStandardAnalysisResults &AR, LPMUpdater &U);
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_SCALAR_LOOPBOUNDSPLIT_H
PKjwFZ
�+Transforms/Scalar/RewriteStatepointsForGC.hnu�[���//===- RewriteStatepointsForGC.h - ------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file provides interface to "Rewrite Statepoints for GC" pass.
//
// This passe rewrites call/invoke instructions so as to make potential
// relocations performed by the garbage collector explicit in the IR.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_REWRITESTATEPOINTSFORGC_H
#define LLVM_TRANSFORMS_SCALAR_REWRITESTATEPOINTSFORGC_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class DominatorTree;
class Function;
class Module;
class TargetTransformInfo;
class TargetLibraryInfo;

struct RewriteStatepointsForGC : public PassInfoMixin<RewriteStatepointsForGC> {
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);

  bool runOnFunction(Function &F, DominatorTree &, TargetTransformInfo &,
                     const TargetLibraryInfo &);
};

} // namespace llvm

#endif // LLVM_TRANSFORMS_SCALAR_REWRITESTATEPOINTSFORGC_H
PKjwFZ�(3��$Transforms/Scalar/DFAJumpThreading.hnu�[���//===- DFAJumpThreading.h - Threads a switch statement inside a loop ------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file provides the interface for the DFAJumpThreading pass.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_DFAJUMPTHREADING_H
#define LLVM_TRANSFORMS_SCALAR_DFAJUMPTHREADING_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class Function;

struct DFAJumpThreadingPass : PassInfoMixin<DFAJumpThreadingPass> {
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_SCALAR_DFAJUMPTHREADING_H
PKjwFZi"&;��Transforms/Scalar/MergeICmps.hnu�[���//===- MergeICmps.h -----------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_MERGEICMPS_H
#define LLVM_TRANSFORMS_SCALAR_MERGEICMPS_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class Function;

struct MergeICmpsPass
    : PassInfoMixin<MergeICmpsPass> {
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_SCALAR_MERGEICMPS_H
PKjwFZ%s���Transforms/Scalar/LowerAtomic.hnu�[���//===- LowerAtomic.cpp - Lower atomic intrinsics ----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
// This pass lowers atomic intrinsics to non-atomic form for use in a known
// non-preemptible environment.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_LOWERATOMIC_H
#define LLVM_TRANSFORMS_SCALAR_LOWERATOMIC_H

#include "llvm/IR/PassManager.h"

namespace llvm {

/// A pass that lowers atomic intrinsic into non-atomic intrinsics.
class LowerAtomicPass : public PassInfoMixin<LowerAtomicPass> {
public:
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &);
  static bool isRequired() { return true; }
};

class AtomicRMWInst;
/// Convert the given RMWI into primitive load and stores,
/// assuming that doing so is legal. Return true if the lowering
/// succeeds.
bool lowerAtomicRMWInst(AtomicRMWInst *RMWI);
}

#endif // LLVM_TRANSFORMS_SCALAR_LOWERATOMIC_H
PKjwFZ�)���+Transforms/Scalar/LowerConstantIntrinsics.hnu�[���//===- LowerConstantIntrinsics.h - Lower constant int. pass -*- C++ -*-========//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// The header file for the LowerConstantIntrinsics pass as used by the new pass
/// manager.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_LOWERCONSTANTINTRINSICS_H
#define LLVM_TRANSFORMS_SCALAR_LOWERCONSTANTINTRINSICS_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class Function;

struct LowerConstantIntrinsicsPass :
    PassInfoMixin<LowerConstantIntrinsicsPass> {
public:
  explicit LowerConstantIntrinsicsPass() = default;

  /// Run the pass over the function.
  ///
  /// This will lower all remaining 'objectsize' and 'is.constant'`
  /// intrinsic calls in this function, even when the argument has no known
  /// size or is not a constant respectively. The resulting constant is
  /// propagated and conditional branches are resolved where possible.
  /// This complements the Instruction Simplification and
  /// Instruction Combination passes of the optimized pass chain.
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &);
};

}

#endif
PKjwFZ���rr"Transforms/Scalar/LoopUnrollPass.hnu�[���//===- LoopUnrollPass.h -----------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_LOOPUNROLLPASS_H
#define LLVM_TRANSFORMS_SCALAR_LOOPUNROLLPASS_H

#include "llvm/Analysis/LoopAnalysisManager.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Support/CommandLine.h"
#include <optional>

namespace llvm {

extern cl::opt<bool> ForgetSCEVInLoopUnroll;

class Function;
class Loop;
class LPMUpdater;

/// Loop unroll pass that only does full loop unrolling and peeling.
class LoopFullUnrollPass : public PassInfoMixin<LoopFullUnrollPass> {
  const int OptLevel;

  /// If false, use a cost model to determine whether unrolling of a loop is
  /// profitable. If true, only loops that explicitly request unrolling via
  /// metadata are considered. All other loops are skipped.
  const bool OnlyWhenForced;

  /// If true, forget all loops when unrolling. If false, forget top-most loop
  /// of the currently processed loops, which removes one entry at a time from
  /// the internal SCEV records. For large loops, the former is faster.
  const bool ForgetSCEV;

public:
  explicit LoopFullUnrollPass(int OptLevel = 2, bool OnlyWhenForced = false,
                              bool ForgetSCEV = false)
      : OptLevel(OptLevel), OnlyWhenForced(OnlyWhenForced),
        ForgetSCEV(ForgetSCEV) {}

  PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
                        LoopStandardAnalysisResults &AR, LPMUpdater &U);
};

/// A set of parameters used to control various transforms performed by the
/// LoopUnroll pass. Each of the boolean parameters can be set to:
///      true - enabling the transformation.
///      false - disabling the transformation.
///      None - relying on a global default.
///
/// There is also OptLevel parameter, which is used for additional loop unroll
/// tuning.
///
/// Intended use is to create a default object, modify parameters with
/// additional setters and then pass it to LoopUnrollPass.
///
struct LoopUnrollOptions {
  std::optional<bool> AllowPartial;
  std::optional<bool> AllowPeeling;
  std::optional<bool> AllowRuntime;
  std::optional<bool> AllowUpperBound;
  std::optional<bool> AllowProfileBasedPeeling;
  std::optional<unsigned> FullUnrollMaxCount;
  int OptLevel;

  /// If false, use a cost model to determine whether unrolling of a loop is
  /// profitable. If true, only loops that explicitly request unrolling via
  /// metadata are considered. All other loops are skipped.
  bool OnlyWhenForced;

  /// If true, forget all loops when unrolling. If false, forget top-most loop
  /// of the currently processed loops, which removes one entry at a time from
  /// the internal SCEV records. For large loops, the former is faster.
  const bool ForgetSCEV;

  LoopUnrollOptions(int OptLevel = 2, bool OnlyWhenForced = false,
                    bool ForgetSCEV = false)
      : OptLevel(OptLevel), OnlyWhenForced(OnlyWhenForced),
        ForgetSCEV(ForgetSCEV) {}

  /// Enables or disables partial unrolling. When disabled only full unrolling
  /// is allowed.
  LoopUnrollOptions &setPartial(bool Partial) {
    AllowPartial = Partial;
    return *this;
  }

  /// Enables or disables unrolling of loops with runtime trip count.
  LoopUnrollOptions &setRuntime(bool Runtime) {
    AllowRuntime = Runtime;
    return *this;
  }

  /// Enables or disables loop peeling.
  LoopUnrollOptions &setPeeling(bool Peeling) {
    AllowPeeling = Peeling;
    return *this;
  }

  /// Enables or disables the use of trip count upper bound
  /// in loop unrolling.
  LoopUnrollOptions &setUpperBound(bool UpperBound) {
    AllowUpperBound = UpperBound;
    return *this;
  }

  // Sets "optimization level" tuning parameter for loop unrolling.
  LoopUnrollOptions &setOptLevel(int O) {
    OptLevel = O;
    return *this;
  }

  // Enables or disables loop peeling basing on profile.
  LoopUnrollOptions &setProfileBasedPeeling(int O) {
    AllowProfileBasedPeeling = O;
    return *this;
  }

  // Sets the max full unroll count.
  LoopUnrollOptions &setFullUnrollMaxCount(unsigned O) {
    FullUnrollMaxCount = O;
    return *this;
  }
};

/// Loop unroll pass that will support both full and partial unrolling.
/// It is a function pass to have access to function and module analyses.
/// It will also put loops into canonical form (simplified and LCSSA).
class LoopUnrollPass : public PassInfoMixin<LoopUnrollPass> {
  LoopUnrollOptions UnrollOpts;

public:
  /// This uses the target information (or flags) to control the thresholds for
  /// different unrolling stategies but supports all of them.
  explicit LoopUnrollPass(LoopUnrollOptions UnrollOpts = {})
      : UnrollOpts(UnrollOpts) {}

  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
  void printPipeline(raw_ostream &OS,
                     function_ref<StringRef(StringRef)> MapClassName2PassName);
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_SCALAR_LOOPUNROLLPASS_H
PKjwFZ�B�?cc&Transforms/Scalar/MakeGuardsExplicit.hnu�[���//===-- MakeGuardsExplicit.h - Turn guard intrinsics into guard branches --===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This pass lowers the @llvm.experimental.guard intrinsic to the new form of
// guard represented as widenable explicit branch to the deopt block. The
// difference between this pass and LowerGuardIntrinsic is that after this pass
// the guard represented as intrinsic:
//
//   call void(i1, ...) @llvm.experimental.guard(i1 %old_cond) [ "deopt"() ]
//
// transforms to a guard represented as widenable explicit branch:
//
//   %widenable_cond = call i1 @llvm.experimental.widenable.condition()
//   br i1 (%old_cond & %widenable_cond), label %guarded, label %deopt
//
// Here:
//   - The semantics of @llvm.experimental.widenable.condition allows to replace
//     %widenable_cond with the construction (%widenable_cond & %any_other_cond)
//     without loss of correctness;
//   - %guarded is the lower part of old guard intrinsic's parent block split by
//     the intrinsic call;
//   - %deopt is a block containing a sole call to @llvm.experimental.deoptimize
//     intrinsic.
//
// Therefore, this branch preserves the property of widenability.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_TRANSFORMS_SCALAR_MAKEGUARDSEXPLICIT_H
#define LLVM_TRANSFORMS_SCALAR_MAKEGUARDSEXPLICIT_H

#include "llvm/IR/PassManager.h"

namespace llvm {

struct MakeGuardsExplicitPass : public PassInfoMixin<MakeGuardsExplicitPass> {
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

} // namespace llvm

#endif // LLVM_TRANSFORMS_SCALAR_MAKEGUARDSEXPLICIT_H
PKjwFZ����Transforms/Scalar/BDCE.hnu�[���//===---- BDCE.cpp - Bit-tracking dead code elimination ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements the Bit-Tracking Dead Code Elimination pass. Some
// instructions (shifts, some ands, ors, etc.) kill some of their input bits.
// We track these dead bits and remove instructions that compute only these
// dead bits.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_BDCE_H
#define LLVM_TRANSFORMS_SCALAR_BDCE_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class Function;

// The Bit-Tracking Dead Code Elimination pass.
struct BDCEPass : PassInfoMixin<BDCEPass> {
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
}

#endif // LLVM_TRANSFORMS_SCALAR_BDCE_H
PKjwFZeB���Transforms/Scalar/FlattenCFG.hnu�[���//===- FlattenCFG.h -------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// The FlattenCFG pass flattens a function's CFG using the FlattenCFG utility
// function, iteratively flattening until no further changes are made.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_FLATTENCFG_H
#define LLVM_TRANSFORMS_SCALAR_FLATTENCFG_H

#include "llvm/IR/PassManager.h"

namespace llvm {
struct FlattenCFGPass : PassInfoMixin<FlattenCFGPass> {
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
} // namespace llvm

#endif // LLVM_TRANSFORMS_SCALAR_FLATTENCFG_H
PKjwFZqٹ��+Transforms/Scalar/LowerWidenableCondition.hnu�[���//===--- LowerWidenableCondition.h - Lower the guard intrinsic ---------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This pass lowers the llvm.widenable.condition intrinsic to default value
// which is i1 true.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_TRANSFORMS_SCALAR_LOWERWIDENABLECONDITION_H
#define LLVM_TRANSFORMS_SCALAR_LOWERWIDENABLECONDITION_H

#include "llvm/IR/PassManager.h"

namespace llvm {

struct LowerWidenableConditionPass : PassInfoMixin<LowerWidenableConditionPass> {
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

}

#endif // LLVM_TRANSFORMS_SCALAR_LOWERWIDENABLECONDITION_H
PKjwFZ����**.Transforms/Scalar/CorrelatedValuePropagation.hnu�[���//===- CorrelatedValuePropagation.h -----------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_CORRELATEDVALUEPROPAGATION_H
#define LLVM_TRANSFORMS_SCALAR_CORRELATEDVALUEPROPAGATION_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class Function;

struct CorrelatedValuePropagationPass
    : PassInfoMixin<CorrelatedValuePropagationPass> {
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_SCALAR_CORRELATEDVALUEPROPAGATION_H
PKjwFZ
��~#Transforms/Scalar/LowerAtomicPass.hnu�[���//===- LowerAtomicPass.h - Lower atomic intrinsics --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
// This pass lowers atomic intrinsics to non-atomic form for use in a known
// non-preemptible environment.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_LOWERATOMICPASS_H
#define LLVM_TRANSFORMS_SCALAR_LOWERATOMICPASS_H

#include "llvm/IR/PassManager.h"

namespace llvm {

/// A pass that lowers atomic intrinsic into non-atomic intrinsics.
class LowerAtomicPass : public PassInfoMixin<LowerAtomicPass> {
public:
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &);
  static bool isRequired() { return true; }
};

}

#endif // LLVM_TRANSFORMS_SCALAR_LOWERATOMICPASS_H
PKjwFZ�}��]]#Transforms/Scalar/NaryReassociate.hnu�[���//===- NaryReassociate.h - Reassociate n-ary expressions --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This pass reassociates n-ary add expressions and eliminates the redundancy
// exposed by the reassociation.
//
// A motivating example:
//
//   void foo(int a, int b) {
//     bar(a + b);
//     bar((a + 2) + b);
//   }
//
// An ideal compiler should reassociate (a + 2) + b to (a + b) + 2 and simplify
// the above code to
//
//   int t = a + b;
//   bar(t);
//   bar(t + 2);
//
// However, the Reassociate pass is unable to do that because it processes each
// instruction individually and believes (a + 2) + b is the best form according
// to its rank system.
//
// To address this limitation, NaryReassociate reassociates an expression in a
// form that reuses existing instructions. As a result, NaryReassociate can
// reassociate (a + 2) + b in the example to (a + b) + 2 because it detects that
// (a + b) is computed before.
//
// NaryReassociate works as follows. For every instruction in the form of (a +
// b) + c, it checks whether a + c or b + c is already computed by a dominating
// instruction. If so, it then reassociates (a + b) + c into (a + c) + b or (b +
// c) + a and removes the redundancy accordingly. To efficiently look up whether
// an expression is computed before, we store each instruction seen and its SCEV
// into an SCEV-to-instruction map.
//
// Although the algorithm pattern-matches only ternary additions, it
// automatically handles many >3-ary expressions by walking through the function
// in the depth-first order. For example, given
//
//   (a + c) + d
//   ((a + b) + c) + d
//
// NaryReassociate first rewrites (a + b) + c to (a + c) + b, and then rewrites
// ((a + c) + b) + d into ((a + c) + d) + b.
//
// Finally, the above dominator-based algorithm may need to be run multiple
// iterations before emitting optimal code. One source of this need is that we
// only split an operand when it is used only once. The above algorithm can
// eliminate an instruction and decrease the usage count of its operands. As a
// result, an instruction that previously had multiple uses may become a
// single-use instruction and thus eligible for split consideration. For
// example,
//
//   ac = a + c
//   ab = a + b
//   abc = ab + c
//   ab2 = ab + b
//   ab2c = ab2 + c
//
// In the first iteration, we cannot reassociate abc to ac+b because ab is used
// twice. However, we can reassociate ab2c to abc+b in the first iteration. As a
// result, ab2 becomes dead and ab will be used only once in the second
// iteration.
//
// Limitations and TODO items:
//
// 1) We only considers n-ary adds and muls for now. This should be extended
// and generalized.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_NARYREASSOCIATE_H
#define LLVM_TRANSFORMS_SCALAR_NARYREASSOCIATE_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/PassManager.h"
#include "llvm/IR/ValueHandle.h"

namespace llvm {

class AssumptionCache;
class BinaryOperator;
class DataLayout;
class DominatorTree;
class Function;
class GetElementPtrInst;
class Instruction;
class ScalarEvolution;
class SCEV;
class TargetLibraryInfo;
class TargetTransformInfo;
class Type;
class Value;

class NaryReassociatePass : public PassInfoMixin<NaryReassociatePass> {
public:
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);

  // Glue for old PM.
  bool runImpl(Function &F, AssumptionCache *AC_, DominatorTree *DT_,
               ScalarEvolution *SE_, TargetLibraryInfo *TLI_,
               TargetTransformInfo *TTI_);

private:
  // Runs only one iteration of the dominator-based algorithm. See the header
  // comments for why we need multiple iterations.
  bool doOneIteration(Function &F);

  // Reassociates I for better CSE.
  Instruction *tryReassociate(Instruction *I, const SCEV *&OrigSCEV);

  // Reassociate GEP for better CSE.
  Instruction *tryReassociateGEP(GetElementPtrInst *GEP);

  // Try splitting GEP at the I-th index and see whether either part can be
  // CSE'ed. This is a helper function for tryReassociateGEP.
  //
  // \p IndexedType The element type indexed by GEP's I-th index. This is
  //                equivalent to
  //                  GEP->getIndexedType(GEP->getPointerOperand(), 0-th index,
  //                                      ..., i-th index).
  GetElementPtrInst *tryReassociateGEPAtIndex(GetElementPtrInst *GEP,
                                              unsigned I, Type *IndexedType);

  // Given GEP's I-th index = LHS + RHS, see whether &Base[..][LHS][..] or
  // &Base[..][RHS][..] can be CSE'ed and rewrite GEP accordingly.
  GetElementPtrInst *tryReassociateGEPAtIndex(GetElementPtrInst *GEP,
                                              unsigned I, Value *LHS,
                                              Value *RHS, Type *IndexedType);

  // Reassociate binary operators for better CSE.
  Instruction *tryReassociateBinaryOp(BinaryOperator *I);

  // A helper function for tryReassociateBinaryOp. LHS and RHS are explicitly
  // passed.
  Instruction *tryReassociateBinaryOp(Value *LHS, Value *RHS,
                                      BinaryOperator *I);
  // Rewrites I to (LHS op RHS) if LHS is computed already.
  Instruction *tryReassociatedBinaryOp(const SCEV *LHS, Value *RHS,
                                       BinaryOperator *I);

  // Tries to match Op1 and Op2 by using V.
  bool matchTernaryOp(BinaryOperator *I, Value *V, Value *&Op1, Value *&Op2);

  // Gets SCEV for (LHS op RHS).
  const SCEV *getBinarySCEV(BinaryOperator *I, const SCEV *LHS,
                            const SCEV *RHS);

  // Returns the closest dominator of \c Dominatee that computes
  // \c CandidateExpr. Returns null if not found.
  Instruction *findClosestMatchingDominator(const SCEV *CandidateExpr,
                                            Instruction *Dominatee);

  // Try to match \p I as signed/unsigned Min/Max and reassociate it. \p
  // OrigSCEV is set if \I matches Min/Max regardless whether resassociation is
  // done or not. If reassociation was successful newly generated instruction is
  // returned, otherwise nullptr.
  template <typename PredT>
  Instruction *matchAndReassociateMinOrMax(Instruction *I,
                                           const SCEV *&OrigSCEV);

  // Reassociate Min/Max.
  template <typename MaxMinT>
  Value *tryReassociateMinOrMax(Instruction *I, MaxMinT MaxMinMatch, Value *LHS,
                                Value *RHS);

  // GetElementPtrInst implicitly sign-extends an index if the index is shorter
  // than the pointer size. This function returns whether Index is shorter than
  // GEP's pointer size, i.e., whether Index needs to be sign-extended in order
  // to be an index of GEP.
  bool requiresSignExtension(Value *Index, GetElementPtrInst *GEP);

  AssumptionCache *AC;
  const DataLayout *DL;
  DominatorTree *DT;
  ScalarEvolution *SE;
  TargetLibraryInfo *TLI;
  TargetTransformInfo *TTI;

  // A lookup table quickly telling which instructions compute the given SCEV.
  // Note that there can be multiple instructions at different locations
  // computing to the same SCEV, so we map a SCEV to an instruction list.  For
  // example,
  //
  //   if (p1)
  //     foo(a + b);
  //   if (p2)
  //     bar(a + b);
  DenseMap<const SCEV *, SmallVector<WeakTrackingVH, 2>> SeenExprs;
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_SCALAR_NARYREASSOCIATE_H
PKjwFZ�i_
��&Transforms/Scalar/LoopIdiomRecognize.hnu�[���//===- LoopIdiomRecognize.h - Loop Idiom Recognize Pass ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This pass implements an idiom recognizer that transforms simple loops into a
// non-loop form.  In cases that this kicks in, it can be a significant
// performance win.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_SCALAR_LOOPIDIOMRECOGNIZE_H
#define LLVM_TRANSFORMS_SCALAR_LOOPIDIOMRECOGNIZE_H

#include "llvm/Analysis/LoopAnalysisManager.h"
#include "llvm/IR/PassManager.h"

namespace llvm {

class Loop;
class LPMUpdater;

/// Options to disable Loop Idiom Recognize, which can be shared with other
/// passes.
struct DisableLIRP {
  /// When true, the entire pass is disabled.
  static bool All;

  /// When true, Memset is disabled.
  static bool Memset;

  /// When true, Memcpy is disabled.
  static bool Memcpy;
};

/// Performs Loop Idiom Recognize Pass.
class LoopIdiomRecognizePass : public PassInfoMixin<LoopIdiomRecognizePass> {
public:
  PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
                        LoopStandardAnalysisResults &AR, LPMUpdater &U);
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_SCALAR_LOOPIDIOMRECOGNIZE_H
PKjwFZ���R�)�)#Transforms/Instrumentation/CFGMST.hnu�[���//===-- CFGMST.h - Minimum Spanning Tree for CFG ----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements a Union-find algorithm to compute Minimum Spanning Tree
// for a given CFG.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_INSTRUMENTATION_CFGMST_H
#define LLVM_TRANSFORMS_INSTRUMENTATION_CFGMST_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Analysis/BlockFrequencyInfo.h"
#include "llvm/Analysis/BranchProbabilityInfo.h"
#include "llvm/Analysis/CFG.h"
#include "llvm/Support/BranchProbability.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include <utility>
#include <vector>

#define DEBUG_TYPE "cfgmst"

using namespace llvm;

namespace llvm {

/// An union-find based Minimum Spanning Tree for CFG
///
/// Implements a Union-find algorithm to compute Minimum Spanning Tree
/// for a given CFG.
template <class Edge, class BBInfo> class CFGMST {
public:
  Function &F;

  // Store all the edges in CFG. It may contain some stale edges
  // when Removed is set.
  std::vector<std::unique_ptr<Edge>> AllEdges;

  // This map records the auxiliary information for each BB.
  DenseMap<const BasicBlock *, std::unique_ptr<BBInfo>> BBInfos;

  // Whehter the function has an exit block with no successors.
  // (For function with an infinite loop, this block may be absent)
  bool ExitBlockFound = false;

  // Find the root group of the G and compress the path from G to the root.
  BBInfo *findAndCompressGroup(BBInfo *G) {
    if (G->Group != G)
      G->Group = findAndCompressGroup(static_cast<BBInfo *>(G->Group));
    return static_cast<BBInfo *>(G->Group);
  }

  // Union BB1 and BB2 into the same group and return true.
  // Returns false if BB1 and BB2 are already in the same group.
  bool unionGroups(const BasicBlock *BB1, const BasicBlock *BB2) {
    BBInfo *BB1G = findAndCompressGroup(&getBBInfo(BB1));
    BBInfo *BB2G = findAndCompressGroup(&getBBInfo(BB2));

    if (BB1G == BB2G)
      return false;

    // Make the smaller rank tree a direct child or the root of high rank tree.
    if (BB1G->Rank < BB2G->Rank)
      BB1G->Group = BB2G;
    else {
      BB2G->Group = BB1G;
      // If the ranks are the same, increment root of one tree by one.
      if (BB1G->Rank == BB2G->Rank)
        BB1G->Rank++;
    }
    return true;
  }

  // Give BB, return the auxiliary information.
  BBInfo &getBBInfo(const BasicBlock *BB) const {
    auto It = BBInfos.find(BB);
    assert(It->second.get() != nullptr);
    return *It->second.get();
  }

  // Give BB, return the auxiliary information if it's available.
  BBInfo *findBBInfo(const BasicBlock *BB) const {
    auto It = BBInfos.find(BB);
    if (It == BBInfos.end())
      return nullptr;
    return It->second.get();
  }

  // Traverse the CFG using a stack. Find all the edges and assign the weight.
  // Edges with large weight will be put into MST first so they are less likely
  // to be instrumented.
  void buildEdges() {
    LLVM_DEBUG(dbgs() << "Build Edge on " << F.getName() << "\n");

    BasicBlock *Entry = &(F.getEntryBlock());
    uint64_t EntryWeight = (BFI != nullptr ? BFI->getEntryFreq() : 2);
    // If we want to instrument the entry count, lower the weight to 0.
    if (InstrumentFuncEntry)
      EntryWeight = 0;
    Edge *EntryIncoming = nullptr, *EntryOutgoing = nullptr,
         *ExitOutgoing = nullptr, *ExitIncoming = nullptr;
    uint64_t MaxEntryOutWeight = 0, MaxExitOutWeight = 0, MaxExitInWeight = 0;

    // Add a fake edge to the entry.
    EntryIncoming = &addEdge(nullptr, Entry, EntryWeight);
    LLVM_DEBUG(dbgs() << "  Edge: from fake node to " << Entry->getName()
                      << " w = " << EntryWeight << "\n");

    // Special handling for single BB functions.
    if (succ_empty(Entry)) {
      addEdge(Entry, nullptr, EntryWeight);
      return;
    }

    static const uint32_t CriticalEdgeMultiplier = 1000;

    for (BasicBlock &BB : F) {
      Instruction *TI = BB.getTerminator();
      uint64_t BBWeight =
          (BFI != nullptr ? BFI->getBlockFreq(&BB).getFrequency() : 2);
      uint64_t Weight = 2;
      if (int successors = TI->getNumSuccessors()) {
        for (int i = 0; i != successors; ++i) {
          BasicBlock *TargetBB = TI->getSuccessor(i);
          bool Critical = isCriticalEdge(TI, i);
          uint64_t scaleFactor = BBWeight;
          if (Critical) {
            if (scaleFactor < UINT64_MAX / CriticalEdgeMultiplier)
              scaleFactor *= CriticalEdgeMultiplier;
            else
              scaleFactor = UINT64_MAX;
          }
          if (BPI != nullptr)
            Weight = BPI->getEdgeProbability(&BB, TargetBB).scale(scaleFactor);
          if (Weight == 0)
            Weight++;
          auto *E = &addEdge(&BB, TargetBB, Weight);
          E->IsCritical = Critical;
          LLVM_DEBUG(dbgs() << "  Edge: from " << BB.getName() << " to "
                            << TargetBB->getName() << "  w=" << Weight << "\n");

          // Keep track of entry/exit edges:
          if (&BB == Entry) {
            if (Weight > MaxEntryOutWeight) {
              MaxEntryOutWeight = Weight;
              EntryOutgoing = E;
            }
          }

          auto *TargetTI = TargetBB->getTerminator();
          if (TargetTI && !TargetTI->getNumSuccessors()) {
            if (Weight > MaxExitInWeight) {
              MaxExitInWeight = Weight;
              ExitIncoming = E;
            }
          }
        }
      } else {
        ExitBlockFound = true;
        Edge *ExitO = &addEdge(&BB, nullptr, BBWeight);
        if (BBWeight > MaxExitOutWeight) {
          MaxExitOutWeight = BBWeight;
          ExitOutgoing = ExitO;
        }
        LLVM_DEBUG(dbgs() << "  Edge: from " << BB.getName() << " to fake exit"
                          << " w = " << BBWeight << "\n");
      }
    }

    // Entry/exit edge adjustment heurisitic:
    // prefer instrumenting entry edge over exit edge
    // if possible. Those exit edges may never have a chance to be
    // executed (for instance the program is an event handling loop)
    // before the profile is asynchronously dumped.
    //
    // If EntryIncoming and ExitOutgoing has similar weight, make sure
    // ExitOutging is selected as the min-edge. Similarly, if EntryOutgoing
    // and ExitIncoming has similar weight, make sure ExitIncoming becomes
    // the min-edge.
    uint64_t EntryInWeight = EntryWeight;

    if (EntryInWeight >= MaxExitOutWeight &&
        EntryInWeight * 2 < MaxExitOutWeight * 3) {
      EntryIncoming->Weight = MaxExitOutWeight;
      ExitOutgoing->Weight = EntryInWeight + 1;
    }

    if (MaxEntryOutWeight >= MaxExitInWeight &&
        MaxEntryOutWeight * 2 < MaxExitInWeight * 3) {
      EntryOutgoing->Weight = MaxExitInWeight;
      ExitIncoming->Weight = MaxEntryOutWeight + 1;
    }
  }

  // Sort CFG edges based on its weight.
  void sortEdgesByWeight() {
    llvm::stable_sort(AllEdges, [](const std::unique_ptr<Edge> &Edge1,
                                   const std::unique_ptr<Edge> &Edge2) {
      return Edge1->Weight > Edge2->Weight;
    });
  }

  // Traverse all the edges and compute the Minimum Weight Spanning Tree
  // using union-find algorithm.
  void computeMinimumSpanningTree() {
    // First, put all the critical edge with landing-pad as the Dest to MST.
    // This works around the insufficient support of critical edges split
    // when destination BB is a landing pad.
    for (auto &Ei : AllEdges) {
      if (Ei->Removed)
        continue;
      if (Ei->IsCritical) {
        if (Ei->DestBB && Ei->DestBB->isLandingPad()) {
          if (unionGroups(Ei->SrcBB, Ei->DestBB))
            Ei->InMST = true;
        }
      }
    }

    for (auto &Ei : AllEdges) {
      if (Ei->Removed)
        continue;
      // If we detect infinite loops, force
      // instrumenting the entry edge:
      if (!ExitBlockFound && Ei->SrcBB == nullptr)
        continue;
      if (unionGroups(Ei->SrcBB, Ei->DestBB))
        Ei->InMST = true;
    }
  }

  // Dump the Debug information about the instrumentation.
  void dumpEdges(raw_ostream &OS, const Twine &Message) const {
    if (!Message.str().empty())
      OS << Message << "\n";
    OS << "  Number of Basic Blocks: " << BBInfos.size() << "\n";
    for (auto &BI : BBInfos) {
      const BasicBlock *BB = BI.first;
      OS << "  BB: " << (BB == nullptr ? "FakeNode" : BB->getName()) << "  "
         << BI.second->infoString() << "\n";
    }

    OS << "  Number of Edges: " << AllEdges.size()
       << " (*: Instrument, C: CriticalEdge, -: Removed)\n";
    uint32_t Count = 0;
    for (auto &EI : AllEdges)
      OS << "  Edge " << Count++ << ": " << getBBInfo(EI->SrcBB).Index << "-->"
         << getBBInfo(EI->DestBB).Index << EI->infoString() << "\n";
  }

  // Add an edge to AllEdges with weight W.
  Edge &addEdge(BasicBlock *Src, BasicBlock *Dest, uint64_t W) {
    uint32_t Index = BBInfos.size();
    auto Iter = BBInfos.end();
    bool Inserted;
    std::tie(Iter, Inserted) = BBInfos.insert(std::make_pair(Src, nullptr));
    if (Inserted) {
      // Newly inserted, update the real info.
      Iter->second = std::move(std::make_unique<BBInfo>(Index));
      Index++;
    }
    std::tie(Iter, Inserted) = BBInfos.insert(std::make_pair(Dest, nullptr));
    if (Inserted)
      // Newly inserted, update the real info.
      Iter->second = std::move(std::make_unique<BBInfo>(Index));
    AllEdges.emplace_back(new Edge(Src, Dest, W));
    return *AllEdges.back();
  }

  BranchProbabilityInfo *BPI;
  BlockFrequencyInfo *BFI;

  // If function entry will be always instrumented.
  bool InstrumentFuncEntry;

public:
  CFGMST(Function &Func, bool InstrumentFuncEntry_,
         BranchProbabilityInfo *BPI_ = nullptr,
         BlockFrequencyInfo *BFI_ = nullptr)
      : F(Func), BPI(BPI_), BFI(BFI_),
        InstrumentFuncEntry(InstrumentFuncEntry_) {
    buildEdges();
    sortEdgesByWeight();
    computeMinimumSpanningTree();
    if (AllEdges.size() > 1 && InstrumentFuncEntry)
      std::iter_swap(std::move(AllEdges.begin()),
                     std::move(AllEdges.begin() + AllEdges.size() - 1));
  }
};

} // end namespace llvm

#undef DEBUG_TYPE // "cfgmst"

#endif // LLVM_TRANSFORMS_INSTRUMENTATION_CFGMST_H
PKjwFZ��-~((3Transforms/Instrumentation/BlockCoverageInference.hnu�[���//===-- BlockCoverageInference.h - Minimal Execution Coverage ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file finds the minimum set of blocks on a CFG that must be instrumented
/// to infer execution coverage for the whole graph.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_INSTRUMENTATION_BLOCKCOVERAGEINFERENCE_H
#define LLVM_TRANSFORMS_INSTRUMENTATION_BLOCKCOVERAGEINFERENCE_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/Support/raw_ostream.h"

namespace llvm {

class Function;
class BasicBlock;
class DotFuncBCIInfo;

class BlockCoverageInference {
  friend class DotFuncBCIInfo;

public:
  using BlockSet = SmallSetVector<const BasicBlock *, 4>;

  BlockCoverageInference(const Function &F, bool ForceInstrumentEntry);

  /// \return true if \p BB should be instrumented for coverage.
  bool shouldInstrumentBlock(const BasicBlock &BB) const;

  /// \return the set of blocks \p Deps such that \p BB is covered iff any
  /// blocks in \p Deps are covered.
  BlockSet getDependencies(const BasicBlock &BB) const;

  /// \return a hash that depends on the set of instrumented blocks.
  uint64_t getInstrumentedBlocksHash() const;

  /// Dump the inference graph.
  void dump(raw_ostream &OS) const;

  /// View the inferred block coverage as a dot file.
  /// Filled gray blocks are instrumented, red outlined blocks are found to be
  /// covered, red edges show that a block's coverage can be inferred from its
  /// successors, and blue edges show that a block's coverage can be inferred
  /// from its predecessors.
  void viewBlockCoverageGraph(
      const DenseMap<const BasicBlock *, bool> *Coverage = nullptr) const;

private:
  const Function &F;
  bool ForceInstrumentEntry;

  /// Maps blocks to a minimal list of predecessors that can be used to infer
  /// this block's coverage.
  DenseMap<const BasicBlock *, BlockSet> PredecessorDependencies;

  /// Maps blocks to a minimal list of successors that can be used to infer
  /// this block's coverage.
  DenseMap<const BasicBlock *, BlockSet> SuccessorDependencies;

  /// Compute \p PredecessorDependencies and \p SuccessorDependencies.
  void findDependencies();

  /// Find the set of basic blocks that are reachable from \p Start without the
  /// basic block \p Avoid.
  void getReachableAvoiding(const BasicBlock &Start, const BasicBlock &Avoid,
                            bool IsForward, BlockSet &Reachable) const;

  static std::string getBlockNames(ArrayRef<const BasicBlock *> BBs);
  static std::string getBlockNames(BlockSet BBs) {
    return getBlockNames(ArrayRef<const BasicBlock *>(BBs.begin(), BBs.end()));
  }
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_INSTRUMENTATION_BLOCKCOVERAGEINFERENCE_H
PKjwFZ���Q,,3Transforms/Instrumentation/ControlHeightReduction.hnu�[���//===- ControlHeightReduction.h - Control Height Reduction ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This pass merges conditional blocks of code and reduces the number of
// conditional branches in the hot paths based on profiles.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_INSTRUMENTATION_CONTROLHEIGHTREDUCTION_H
#define LLVM_TRANSFORMS_INSTRUMENTATION_CONTROLHEIGHTREDUCTION_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class ControlHeightReductionPass :
      public PassInfoMixin<ControlHeightReductionPass> {
public:
  ControlHeightReductionPass();
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM);
};
} // end namespace llvm

#endif // LLVM_TRANSFORMS_INSTRUMENTATION_CONTROLHEIGHTREDUCTION_H
PKjwFZ�[Q�$$+Transforms/Instrumentation/PoisonChecking.hnu�[���//===- PoisonChecking.h - ---------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_INSTRUMENTATION_POISONCHECKING_H
#define LLVM_TRANSFORMS_INSTRUMENTATION_POISONCHECKING_H

#include "llvm/IR/PassManager.h"

namespace llvm {

struct PoisonCheckingPass : public PassInfoMixin<PoisonCheckingPass> {
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

}

#endif // LLVM_TRANSFORMS_INSTRUMENTATION_POISONCHECKING_H
PKjwFZ7Dm���+Transforms/Instrumentation/InstrProfiling.hnu�[���//===- Transforms/Instrumentation/InstrProfiling.h --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// This file provides the interface for LLVM's PGO Instrumentation lowering
/// pass.
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_INSTRUMENTATION_INSTRPROFILING_H
#define LLVM_TRANSFORMS_INSTRUMENTATION_INSTRPROFILING_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/PassManager.h"
#include "llvm/ProfileData/InstrProf.h"
#include "llvm/Transforms/Instrumentation.h"
#include <cstdint>
#include <cstring>
#include <vector>

namespace llvm {

class TargetLibraryInfo;
using LoadStorePair = std::pair<Instruction *, Instruction *>;

/// Instrumentation based profiling lowering pass. This pass lowers
/// the profile instrumented code generated by FE or the IR based
/// instrumentation pass.
class InstrProfiling : public PassInfoMixin<InstrProfiling> {
public:
  InstrProfiling() : IsCS(false) {}
  InstrProfiling(const InstrProfOptions &Options, bool IsCS = false)
      : Options(Options), IsCS(IsCS) {}

  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
  bool run(Module &M,
           std::function<const TargetLibraryInfo &(Function &F)> GetTLI);

private:
  InstrProfOptions Options;
  Module *M;
  Triple TT;
  std::function<const TargetLibraryInfo &(Function &F)> GetTLI;
  struct PerFunctionProfileData {
    uint32_t NumValueSites[IPVK_Last + 1];
    GlobalVariable *RegionCounters = nullptr;
    GlobalVariable *DataVar = nullptr;

    PerFunctionProfileData() {
      memset(NumValueSites, 0, sizeof(uint32_t) * (IPVK_Last + 1));
    }
  };
  DenseMap<GlobalVariable *, PerFunctionProfileData> ProfileDataMap;
  /// If runtime relocation is enabled, this maps functions to the load
  /// instruction that produces the profile relocation bias.
  DenseMap<const Function *, LoadInst *> FunctionToProfileBiasMap;
  std::vector<GlobalValue *> CompilerUsedVars;
  std::vector<GlobalValue *> UsedVars;
  std::vector<GlobalVariable *> ReferencedNames;
  GlobalVariable *NamesVar;
  size_t NamesSize;

  // Is this lowering for the context-sensitive instrumentation.
  bool IsCS;

  // vector of counter load/store pairs to be register promoted.
  std::vector<LoadStorePair> PromotionCandidates;

  int64_t TotalCountersPromoted = 0;

  /// Lower instrumentation intrinsics in the function. Returns true if there
  /// any lowering.
  bool lowerIntrinsics(Function *F);

  /// Register-promote counter loads and stores in loops.
  void promoteCounterLoadStores(Function *F);

  /// Returns true if relocating counters at runtime is enabled.
  bool isRuntimeCounterRelocationEnabled() const;

  /// Returns true if profile counter update register promotion is enabled.
  bool isCounterPromotionEnabled() const;

  /// Count the number of instrumented value sites for the function.
  void computeNumValueSiteCounts(InstrProfValueProfileInst *Ins);

  /// Replace instrprof.value.profile with a call to runtime library.
  void lowerValueProfileInst(InstrProfValueProfileInst *Ins);

  /// Replace instrprof.cover with a store instruction to the coverage byte.
  void lowerCover(InstrProfCoverInst *Inc);

  /// Replace instrprof.timestamp with a call to
  /// INSTR_PROF_PROFILE_SET_TIMESTAMP.
  void lowerTimestamp(InstrProfTimestampInst *TimestampInstruction);

  /// Replace instrprof.increment with an increment of the appropriate value.
  void lowerIncrement(InstrProfIncrementInst *Inc);

  /// Force emitting of name vars for unused functions.
  void lowerCoverageData(GlobalVariable *CoverageNamesVar);

  /// Compute the address of the counter value that this profiling instruction
  /// acts on.
  Value *getCounterAddress(InstrProfInstBase *I);

  /// Get the region counters for an increment, creating them if necessary.
  ///
  /// If the counter array doesn't yet exist, the profile data variables
  /// referring to them will also be created.
  GlobalVariable *getOrCreateRegionCounters(InstrProfInstBase *Inc);

  /// Create the region counters.
  GlobalVariable *createRegionCounters(InstrProfInstBase *Inc, StringRef Name,
                                       GlobalValue::LinkageTypes Linkage);

  /// Emit the section with compressed function names.
  void emitNameData();

  /// Emit value nodes section for value profiling.
  void emitVNodes();

  /// Emit runtime registration functions for each profile data variable.
  void emitRegistration();

  /// Emit the necessary plumbing to pull in the runtime initialization.
  /// Returns true if a change was made.
  bool emitRuntimeHook();

  /// Add uses of our data variables and runtime hook.
  void emitUses();

  /// Create a static initializer for our data, on platforms that need it,
  /// and for any profile output file that was specified.
  void emitInitialization();
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_INSTRUMENTATION_INSTRPROFILING_H
PKjwFZtCn99)Transforms/Instrumentation/GCOVProfiler.hnu�[���//===- Transforms/Instrumentation/GCOVProfiler.h ----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// This file provides the interface for the GCOV style profiler  pass.
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_INSTRUMENTATION_GCOVPROFILER_H
#define LLVM_TRANSFORMS_INSTRUMENTATION_GCOVPROFILER_H

#include "llvm/IR/PassManager.h"
#include "llvm/Transforms/Instrumentation.h"

namespace llvm {
/// The gcov-style instrumentation pass
class GCOVProfilerPass : public PassInfoMixin<GCOVProfilerPass> {
public:
  GCOVProfilerPass(const GCOVOptions &Options = GCOVOptions::getDefault()) : GCOVOpts(Options) { }
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);

private:
  GCOVOptions GCOVOpts;
};

} // namespace llvm
#endif
PKjwFZeL���.Transforms/Instrumentation/DataFlowSanitizer.hnu�[���//===- DataFlowSanitizer.h - dynamic data flow analysis -------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_TRANSFORMS_INSTRUMENTATION_DATAFLOWSANITIZER_H
#define LLVM_TRANSFORMS_INSTRUMENTATION_DATAFLOWSANITIZER_H

#include "llvm/IR/PassManager.h"
#include <string>
#include <vector>

namespace llvm {
class Module;

class DataFlowSanitizerPass : public PassInfoMixin<DataFlowSanitizerPass> {
private:
  std::vector<std::string> ABIListFiles;

public:
  DataFlowSanitizerPass(
      const std::vector<std::string> &ABIListFiles = std::vector<std::string>())
      : ABIListFiles(ABIListFiles) {}
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
  static bool isRequired() { return true; }
};

} // namespace llvm

#endif
PKjwFZe�		4Transforms/Instrumentation/SanitizerBinaryMetadata.hnu�[���//===------- Definition of the SanitizerBinaryMetadata class ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the SanitizerBinaryMetadata pass.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_TRANSFORMS_INSTRUMENTATION_SANITIZERBINARYMETADATA_H
#define LLVM_TRANSFORMS_INSTRUMENTATION_SANITIZERBINARYMETADATA_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Transforms/Instrumentation.h"

namespace llvm {

struct SanitizerBinaryMetadataOptions {
  bool Covered = false;
  bool Atomics = false;
  bool UAR = false;
  SanitizerBinaryMetadataOptions() = default;
};

inline constexpr int kSanitizerBinaryMetadataAtomicsBit = 0;
inline constexpr int kSanitizerBinaryMetadataUARBit = 1;
inline constexpr int kSanitizerBinaryMetadataUARHasSizeBit = 2;

inline constexpr uint64_t kSanitizerBinaryMetadataAtomics =
    1 << kSanitizerBinaryMetadataAtomicsBit;
inline constexpr uint64_t kSanitizerBinaryMetadataUAR =
    1 << kSanitizerBinaryMetadataUARBit;
inline constexpr uint64_t kSanitizerBinaryMetadataUARHasSize =
    1 << kSanitizerBinaryMetadataUARHasSizeBit;

inline constexpr char kSanitizerBinaryMetadataCoveredSection[] =
    "sanmd_covered";
inline constexpr char kSanitizerBinaryMetadataAtomicsSection[] =
    "sanmd_atomics";

/// Public interface to the SanitizerBinaryMetadata module pass for emitting
/// metadata for binary analysis sanitizers.
//
/// The pass should be inserted after optimizations.
class SanitizerBinaryMetadataPass
    : public PassInfoMixin<SanitizerBinaryMetadataPass> {
public:
  explicit SanitizerBinaryMetadataPass(
      SanitizerBinaryMetadataOptions Opts = {},
      ArrayRef<std::string> IgnorelistFiles = {});
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
  static bool isRequired() { return true; }

private:
  const SanitizerBinaryMetadataOptions Options;
  const ArrayRef<std::string> IgnorelistFiles;
};

} // namespace llvm

#endif
PKjwFZ�ho߆	�	/Transforms/Instrumentation/HWAddressSanitizer.hnu�[���//===--------- Definition of the HWAddressSanitizer class -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the Hardware AddressSanitizer class which is a port of the
// legacy HWAddressSanitizer pass to use the new PassManager infrastructure.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_TRANSFORMS_INSTRUMENTATION_HWADDRESSSANITIZER_H
#define LLVM_TRANSFORMS_INSTRUMENTATION_HWADDRESSSANITIZER_H

#include "llvm/ADT/STLFunctionalExtras.h"
#include "llvm/IR/PassManager.h"

namespace llvm {
class Module;
class StringRef;
class raw_ostream;

struct HWAddressSanitizerOptions {
  HWAddressSanitizerOptions()
      : HWAddressSanitizerOptions(false, false, false){};
  HWAddressSanitizerOptions(bool CompileKernel, bool Recover,
                            bool DisableOptimization)
      : CompileKernel(CompileKernel), Recover(Recover),
        DisableOptimization(DisableOptimization){};
  bool CompileKernel;
  bool Recover;
  bool DisableOptimization;
};

/// This is a public interface to the hardware address sanitizer pass for
/// instrumenting code to check for various memory errors at runtime, similar to
/// AddressSanitizer but based on partial hardware assistance.
class HWAddressSanitizerPass : public PassInfoMixin<HWAddressSanitizerPass> {
public:
  explicit HWAddressSanitizerPass(HWAddressSanitizerOptions Options)
      : Options(Options){};
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &MAM);
  static bool isRequired() { return true; }
  void printPipeline(raw_ostream &OS,
                     function_ref<StringRef(StringRef)> MapClassName2PassName);

private:
  HWAddressSanitizerOptions Options;
};

namespace HWASanAccessInfo {

// Bit field positions for the accessinfo parameter to
// llvm.hwasan.check.memaccess. Shared between the pass and the backend. Bits
// 0-15 are also used by the runtime.
enum {
  AccessSizeShift = 0, // 4 bits
  IsWriteShift = 4,
  RecoverShift = 5,
  MatchAllShift = 16, // 8 bits
  HasMatchAllShift = 24,
  CompileKernelShift = 25,
};

enum { RuntimeMask = 0xffff };

} // namespace HWASanAccessInfo

} // namespace llvm

#endif
PKjwFZ�Ɖ�
�
/Transforms/Instrumentation/PGOInstrumentation.hnu�[���//===- Transforms/Instrumentation/PGOInstrumentation.h ----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file
/// This file provides the interface for IR based instrumentation passes (
/// (profile-gen, and profile-use).
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_INSTRUMENTATION_PGOINSTRUMENTATION_H
#define LLVM_TRANSFORMS_INSTRUMENTATION_PGOINSTRUMENTATION_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/IntrusiveRefCntPtr.h"
#include "llvm/IR/PassManager.h"
#include <cstdint>
#include <string>

namespace llvm {

class Function;
class Instruction;
class Module;

namespace vfs {
class FileSystem;
} // namespace vfs

/// The instrumentation (profile-instr-gen) pass for IR based PGO.
// We use this pass to create COMDAT profile variables for context
// sensitive PGO (CSPGO). The reason to have a pass for this is CSPGO
// can be run after LTO/ThinLTO linking. Lld linker needs to see
// all the COMDAT variables before linking. So we have this pass
// always run before linking for CSPGO.
class PGOInstrumentationGenCreateVar
    : public PassInfoMixin<PGOInstrumentationGenCreateVar> {
public:
  PGOInstrumentationGenCreateVar(std::string CSInstrName = "")
      : CSInstrName(CSInstrName) {}
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &MAM);

private:
  std::string CSInstrName;
};

/// The instrumentation (profile-instr-gen) pass for IR based PGO.
class PGOInstrumentationGen : public PassInfoMixin<PGOInstrumentationGen> {
public:
  PGOInstrumentationGen(bool IsCS = false) : IsCS(IsCS) {}
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &MAM);

private:
  // If this is a context sensitive instrumentation.
  bool IsCS;
};

/// The profile annotation (profile-instr-use) pass for IR based PGO.
class PGOInstrumentationUse : public PassInfoMixin<PGOInstrumentationUse> {
public:
  PGOInstrumentationUse(std::string Filename = "",
                        std::string RemappingFilename = "", bool IsCS = false,
                        IntrusiveRefCntPtr<vfs::FileSystem> FS = nullptr);

  PreservedAnalyses run(Module &M, ModuleAnalysisManager &MAM);

private:
  std::string ProfileFileName;
  std::string ProfileRemappingFileName;
  // If this is a context sensitive instrumentation.
  bool IsCS;
  IntrusiveRefCntPtr<vfs::FileSystem> FS;
};

/// The indirect function call promotion pass.
class PGOIndirectCallPromotion : public PassInfoMixin<PGOIndirectCallPromotion> {
public:
  PGOIndirectCallPromotion(bool IsInLTO = false, bool SamplePGO = false)
      : InLTO(IsInLTO), SamplePGO(SamplePGO) {}

  PreservedAnalyses run(Module &M, ModuleAnalysisManager &MAM);

private:
  bool InLTO;
  bool SamplePGO;
};

/// The profile size based optimization pass for memory intrinsics.
class PGOMemOPSizeOpt : public PassInfoMixin<PGOMemOPSizeOpt> {
public:
  PGOMemOPSizeOpt() = default;

  PreservedAnalyses run(Function &F, FunctionAnalysisManager &MAM);
};

void setProfMetadata(Module *M, Instruction *TI, ArrayRef<uint64_t> EdgeCounts,
                     uint64_t MaxCount);

void setIrrLoopHeaderMetadata(Module *M, Instruction *TI, uint64_t Count);

} // end namespace llvm

#endif // LLVM_TRANSFORMS_INSTRUMENTATION_PGOINSTRUMENTATION_H
PKjwFZCրww4Transforms/Instrumentation/AddressSanitizerOptions.hnu�[���//===--------- Definition of the AddressSanitizer options -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// This file defines data types used to set Address Sanitizer options.
//===----------------------------------------------------------------------===//
#ifndef LLVM_TRANSFORMS_INSTRUMENTATION_ADDRESSSANITIZEROPTIONS_H
#define LLVM_TRANSFORMS_INSTRUMENTATION_ADDRESSSANITIZEROPTIONS_H

namespace llvm {

/// Types of ASan module destructors supported
enum class AsanDtorKind {
  None,    ///< Do not emit any destructors for ASan
  Global,  ///< Append to llvm.global_dtors
  Invalid, ///< Not a valid destructor Kind.
};

/// Types of ASan module constructors supported
enum class AsanCtorKind {
  None,
  Global
};

/// Mode of ASan detect stack use after return
enum class AsanDetectStackUseAfterReturnMode {
  Never,   ///< Never detect stack use after return.
  Runtime, ///< Detect stack use after return if not disabled runtime with
           ///< (ASAN_OPTIONS=detect_stack_use_after_return=0).
  Always,  ///< Always detect stack use after return.
  Invalid, ///< Not a valid detect mode.
};

} // namespace llvm

#endif
PKjwFZY��#��,Transforms/Instrumentation/MemorySanitizer.hnu�[���//===- Transforms/Instrumentation/MemorySanitizer.h - MSan Pass -----------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the memoy sanitizer pass.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_INSTRUMENTATION_MEMORYSANITIZER_H
#define LLVM_TRANSFORMS_INSTRUMENTATION_MEMORYSANITIZER_H

#include "llvm/ADT/STLFunctionalExtras.h"
#include "llvm/IR/PassManager.h"

namespace llvm {
class Module;
class StringRef;
class raw_ostream;

struct MemorySanitizerOptions {
  MemorySanitizerOptions() : MemorySanitizerOptions(0, false, false, false){};
  MemorySanitizerOptions(int TrackOrigins, bool Recover, bool Kernel)
      : MemorySanitizerOptions(TrackOrigins, Recover, Kernel, false) {}
  MemorySanitizerOptions(int TrackOrigins, bool Recover, bool Kernel,
                         bool EagerChecks);
  bool Kernel;
  int TrackOrigins;
  bool Recover;
  bool EagerChecks;
};

/// A module pass for msan instrumentation.
///
/// Instruments functions to detect unitialized reads. This function pass
/// inserts calls to runtime library functions. If the functions aren't declared
/// yet, the pass inserts the declarations. Otherwise the existing globals are
/// used.
struct MemorySanitizerPass : public PassInfoMixin<MemorySanitizerPass> {
  MemorySanitizerPass(MemorySanitizerOptions Options) : Options(Options) {}

  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
  void printPipeline(raw_ostream &OS,
                     function_ref<StringRef(StringRef)> MapClassName2PassName);
  static bool isRequired() { return true; }

private:
  MemorySanitizerOptions Options;
};
}

#endif /* LLVM_TRANSFORMS_INSTRUMENTATION_MEMORYSANITIZER_H */
PKjwFZ��t��!Transforms/Instrumentation/KCFI.hnu�[���//===-- KCFI.h - Generic KCFI operand bundle lowering -----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This pass emits generic KCFI indirect call checks for targets that don't
// support lowering KCFI operand bundles in the back-end.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_INSTRUMENTATION_KCFI_H
#define LLVM_TRANSFORMS_INSTRUMENTATION_KCFI_H

#include "llvm/IR/PassManager.h"

namespace llvm {
class KCFIPass : public PassInfoMixin<KCFIPass> {
public:
  static bool isRequired() { return true; }
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
} // namespace llvm
#endif // LLVM_TRANSFORMS_INSTRUMENTATION_KCFI_H
PKjwFZ�v�tt(Transforms/Instrumentation/MemProfiler.hnu�[���//===--------- Definition of the MemProfiler class --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the MemProfiler class.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_TRANSFORMS_INSTRUMENTATION_MEMPROFILER_H
#define LLVM_TRANSFORMS_INSTRUMENTATION_MEMPROFILER_H

#include "llvm/ADT/IntrusiveRefCntPtr.h"
#include "llvm/IR/PassManager.h"

namespace llvm {
class Function;
class FunctionPass;
class Module;
class ModulePass;

namespace vfs {
class FileSystem;
} // namespace vfs

/// Public interface to the memory profiler pass for instrumenting code to
/// profile memory accesses.
///
/// The profiler itself is a function pass that works by inserting various
/// calls to the MemProfiler runtime library functions. The runtime library
/// essentially replaces malloc() and free() with custom implementations that
/// record data about the allocations.
class MemProfilerPass : public PassInfoMixin<MemProfilerPass> {
public:
  explicit MemProfilerPass();
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
  static bool isRequired() { return true; }
};

/// Public interface to the memory profiler module pass for instrumenting code
/// to profile memory allocations and accesses.
class ModuleMemProfilerPass : public PassInfoMixin<ModuleMemProfilerPass> {
public:
  explicit ModuleMemProfilerPass();
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
  static bool isRequired() { return true; }
};

class MemProfUsePass : public PassInfoMixin<MemProfUsePass> {
public:
  explicit MemProfUsePass(std::string MemoryProfileFile,
                          IntrusiveRefCntPtr<vfs::FileSystem> FS = nullptr);
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);

private:
  std::string MemoryProfileFileName;
  IntrusiveRefCntPtr<vfs::FileSystem> FS;
};

} // namespace llvm

#endif
PKjwFZ��r�

,Transforms/Instrumentation/ThreadSanitizer.hnu�[���//===- Transforms/Instrumentation/ThreadSanitizer.h - TSan Pass -----------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the thread sanitizer pass.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_INSTRUMENTATION_THREADSANITIZER_H
#define LLVM_TRANSFORMS_INSTRUMENTATION_THREADSANITIZER_H

#include "llvm/IR/PassManager.h"

namespace llvm {
class Function;
class Module;

/// A function pass for tsan instrumentation.
///
/// Instruments functions to detect race conditions reads. This function pass
/// inserts calls to runtime library functions. If the functions aren't declared
/// yet, the pass inserts the declarations. Otherwise the existing globals are
struct ThreadSanitizerPass : public PassInfoMixin<ThreadSanitizerPass> {
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &FAM);
  static bool isRequired() { return true; }
};

/// A module pass for tsan instrumentation.
///
/// Create ctor and init functions.
struct ModuleThreadSanitizerPass
  : public PassInfoMixin<ModuleThreadSanitizerPass> {
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
  static bool isRequired() { return true; }
};

} // namespace llvm
#endif /* LLVM_TRANSFORMS_INSTRUMENTATION_THREADSANITIZER_H */
PKjwFZ ��		3Transforms/Instrumentation/AddressSanitizerCommon.hnu�[���//===--------- Definition of the AddressSanitizer class ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares common infrastructure for AddressSanitizer and
// HWAddressSanitizer.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_TRANSFORMS_INSTRUMENTATION_ADDRESSSANITIZERCOMMON_H
#define LLVM_TRANSFORMS_INSTRUMENTATION_ADDRESSSANITIZERCOMMON_H

#include "llvm/Analysis/CFG.h"
#include "llvm/Analysis/PostDominators.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Module.h"

namespace llvm {

class InterestingMemoryOperand {
public:
  Use *PtrUse;
  bool IsWrite;
  Type *OpType;
  TypeSize TypeStoreSize = TypeSize::Fixed(0);
  MaybeAlign Alignment;
  // The mask Value, if we're looking at a masked load/store.
  Value *MaybeMask;
  // The EVL Value, if we're looking at a vp intrinsic.
  Value *MaybeEVL;
  // The Stride Value, if we're looking at a strided load/store.
  Value *MaybeStride;

  InterestingMemoryOperand(Instruction *I, unsigned OperandNo, bool IsWrite,
                           class Type *OpType, MaybeAlign Alignment,
                           Value *MaybeMask = nullptr,
                           Value *MaybeEVL = nullptr,
                           Value *MaybeStride = nullptr)
      : IsWrite(IsWrite), OpType(OpType), Alignment(Alignment),
        MaybeMask(MaybeMask), MaybeEVL(MaybeEVL), MaybeStride(MaybeStride) {
    const DataLayout &DL = I->getModule()->getDataLayout();
    TypeStoreSize = DL.getTypeStoreSizeInBits(OpType);
    PtrUse = &I->getOperandUse(OperandNo);
  }

  Instruction *getInsn() { return cast<Instruction>(PtrUse->getUser()); }

  Value *getPtr() { return PtrUse->get(); }
};

// Get AddressSanitizer parameters.
void getAddressSanitizerParams(const Triple &TargetTriple, int LongSize,
                               bool IsKasan, uint64_t *ShadowBase,
                               int *MappingScale, bool *OrShadowOffset);

} // namespace llvm

#endif
PKjwFZ�&\ט�+Transforms/Instrumentation/InstrOrderFile.hnu�[���//===- InstrOrderFile.h ---- Late IR instrumentation for order file ----===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_INSTRUMENTATION_INSTRORDERFILE_H
#define LLVM_TRANSFORMS_INSTRUMENTATION_INSTRORDERFILE_H

#include "llvm/IR/PassManager.h"

namespace llvm {
class Module;

/// The instrumentation pass for recording function order.
class InstrOrderFilePass : public PassInfoMixin<InstrOrderFilePass> {
public:
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_INSTRUMENTATION_INSTRORDERFILE_H
PKjwFZ`@ƨ��&Transforms/Instrumentation/CGProfile.hnu�[���//===- Transforms/Instrumentation/CGProfile.h -------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// This file provides the interface for LLVM's Call Graph Profile pass.
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_INSTRUMENTATION_CGPROFILE_H
#define LLVM_TRANSFORMS_INSTRUMENTATION_CGPROFILE_H

#include "llvm/IR/PassManager.h"

namespace llvm {
class Module;
class CGProfilePass : public PassInfoMixin<CGProfilePass> {
public:
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
};
} // end namespace llvm

#endif // LLVM_TRANSFORMS_INSTRUMENTATION_CGPROFILE_H
PKjwFZ�_��]	]	-Transforms/Instrumentation/AddressSanitizer.hnu�[���//===--------- Definition of the AddressSanitizer class ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the AddressSanitizer class which is a port of the legacy
// AddressSanitizer pass to use the new PassManager infrastructure.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_TRANSFORMS_INSTRUMENTATION_ADDRESSSANITIZER_H
#define LLVM_TRANSFORMS_INSTRUMENTATION_ADDRESSSANITIZER_H

#include "llvm/IR/PassManager.h"
#include "llvm/Transforms/Instrumentation/AddressSanitizerOptions.h"

namespace llvm {
class Module;
class raw_ostream;

struct AddressSanitizerOptions {
  bool CompileKernel = false;
  bool Recover = false;
  bool UseAfterScope = false;
  AsanDetectStackUseAfterReturnMode UseAfterReturn =
      AsanDetectStackUseAfterReturnMode::Runtime;
};

/// Public interface to the address sanitizer module pass for instrumenting code
/// to check for various memory errors.
///
/// This adds 'asan.module_ctor' to 'llvm.global_ctors'. This pass may also
/// run intependently of the function address sanitizer.
class AddressSanitizerPass : public PassInfoMixin<AddressSanitizerPass> {
public:
  AddressSanitizerPass(const AddressSanitizerOptions &Options,
                       bool UseGlobalGC = true, bool UseOdrIndicator = true,
                       AsanDtorKind DestructorKind = AsanDtorKind::Global,
                       AsanCtorKind ConstructorKind = AsanCtorKind::Global);
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
  void printPipeline(raw_ostream &OS,
                     function_ref<StringRef(StringRef)> MapClassName2PassName);
  static bool isRequired() { return true; }

private:
  AddressSanitizerOptions Options;
  bool UseGlobalGC;
  bool UseOdrIndicator;
  AsanDtorKind DestructorKind;
  AsanCtorKind ConstructorKind;
};

struct ASanAccessInfo {
  const int32_t Packed;
  const uint8_t AccessSizeIndex;
  const bool IsWrite;
  const bool CompileKernel;

  explicit ASanAccessInfo(int32_t Packed);
  ASanAccessInfo(bool IsWrite, bool CompileKernel, uint8_t AccessSizeIndex);
};

} // namespace llvm

#endif
PKjwFZ�$��.Transforms/Instrumentation/SanitizerCoverage.hnu�[���//===--------- Definition of the SanitizerCoverage class --------*- C++ -*-===//
//
//                     The LLVM Compiler Infrastructure
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// SanitizerCoverage is a simple code coverage implementation.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_INSTRUMENTATION_SANITIZERCOVERAGE_H
#define LLVM_TRANSFORMS_INSTRUMENTATION_SANITIZERCOVERAGE_H

#include "llvm/IR/PassManager.h"
#include "llvm/Support/SpecialCaseList.h"
#include "llvm/Support/VirtualFileSystem.h"
#include "llvm/Transforms/Instrumentation.h"

namespace llvm {
class Module;

/// This is the ModuleSanitizerCoverage pass used in the new pass manager. The
/// pass instruments functions for coverage, adds initialization calls to the
/// module for trace PC guards and 8bit counters if they are requested, and
/// appends globals to llvm.compiler.used.
class SanitizerCoveragePass : public PassInfoMixin<SanitizerCoveragePass> {
public:
  explicit SanitizerCoveragePass(
      SanitizerCoverageOptions Options = SanitizerCoverageOptions(),
      const std::vector<std::string> &AllowlistFiles =
          std::vector<std::string>(),
      const std::vector<std::string> &BlocklistFiles =
          std::vector<std::string>())
      : Options(Options) {
    if (AllowlistFiles.size() > 0)
      Allowlist = SpecialCaseList::createOrDie(AllowlistFiles,
                                               *vfs::getRealFileSystem());
    if (BlocklistFiles.size() > 0)
      Blocklist = SpecialCaseList::createOrDie(BlocklistFiles,
                                               *vfs::getRealFileSystem());
  }
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
  static bool isRequired() { return true; }

private:
  SanitizerCoverageOptions Options;

  std::unique_ptr<SpecialCaseList> Allowlist;
  std::unique_ptr<SpecialCaseList> Blocklist;
};

} // namespace llvm

#endif
PKjwFZ�u{��+Transforms/Instrumentation/BoundsChecking.hnu�[���//===- BoundsChecking.h - Bounds checking instrumentation -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_INSTRUMENTATION_BOUNDSCHECKING_H
#define LLVM_TRANSFORMS_INSTRUMENTATION_BOUNDSCHECKING_H

#include "llvm/IR/PassManager.h"

namespace llvm {
class Function;

/// A pass to instrument code and perform run-time bounds checking on loads,
/// stores, and other memory intrinsics.
struct BoundsCheckingPass : PassInfoMixin<BoundsCheckingPass> {
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
  static bool isRequired() { return true; }
};

} // end namespace llvm

#endif // LLVM_TRANSFORMS_INSTRUMENTATION_BOUNDSCHECKING_H
PKjwFZ�VVTransforms/CFGuard.hnu�[���//===-- CFGuard.h - CFGuard Transformations ---------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===---------------------------------------------------------------------===//
// Windows Control Flow Guard passes (/guard:cf).
//===---------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_CFGUARD_H
#define LLVM_TRANSFORMS_CFGUARD_H

namespace llvm {

class FunctionPass;

/// Insert Control FLow Guard checks on indirect function calls.
FunctionPass *createCFGuardCheckPass();

/// Insert Control FLow Guard dispatches on indirect function calls.
FunctionPass *createCFGuardDispatchPass();

} // namespace llvm

#endif
PKjwFZ83��
�
$Transforms/InstCombine/InstCombine.hnu�[���//===- InstCombine.h - InstCombine pass -------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This file provides the primary interface to the instcombine pass. This pass
/// is suitable for use in the new pass manager. For a pass that works with the
/// legacy pass manager, use \c createInstructionCombiningPass().
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_INSTCOMBINE_INSTCOMBINE_H
#define LLVM_TRANSFORMS_INSTCOMBINE_INSTCOMBINE_H

#include "llvm/IR/Function.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Pass.h"

#define DEBUG_TYPE "instcombine"
#include "llvm/Transforms/Utils/InstructionWorklist.h"

namespace llvm {

static constexpr unsigned InstCombineDefaultMaxIterations = 1000;

struct InstCombineOptions {
  bool UseLoopInfo = false;
  unsigned MaxIterations = InstCombineDefaultMaxIterations;

  InstCombineOptions() = default;

  InstCombineOptions &setUseLoopInfo(bool Value) {
    UseLoopInfo = Value;
    return *this;
  }

  InstCombineOptions &setMaxIterations(unsigned Value) {
    MaxIterations = Value;
    return *this;
  }
};

class InstCombinePass : public PassInfoMixin<InstCombinePass> {
private:
  InstructionWorklist Worklist;
  InstCombineOptions Options;

public:
  explicit InstCombinePass(InstCombineOptions Opts = {});
  void printPipeline(raw_ostream &OS,
                     function_ref<StringRef(StringRef)> MapClassName2PassName);

  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

/// The legacy pass manager's instcombine pass.
///
/// This is a basic whole-function wrapper around the instcombine utility. It
/// will try to combine all instructions in the function.
class InstructionCombiningPass : public FunctionPass {
  InstructionWorklist Worklist;

public:
  static char ID; // Pass identification, replacement for typeid

  explicit InstructionCombiningPass();

  void getAnalysisUsage(AnalysisUsage &AU) const override;
  bool runOnFunction(Function &F) override;
};

//===----------------------------------------------------------------------===//
//
// InstructionCombining - Combine instructions to form fewer, simple
// instructions. This pass does not modify the CFG, and has a tendency to make
// instructions dead, so a subsequent DCE pass is useful.
//
// This pass combines things like:
//    %Y = add int 1, %X
//    %Z = add int 1, %Y
// into:
//    %Z = add int 2, %X
//
FunctionPass *createInstructionCombiningPass();
}

#undef DEBUG_TYPE

#endif
PKjwFZQ�|��T�T%Transforms/InstCombine/InstCombiner.hnu�[���//===- InstCombiner.h - InstCombine implementation --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This file provides the interface for the instcombine pass implementation.
/// The interface is used for generic transformations in this folder and
/// target specific combinations in the targets.
/// The visitor implementation is in \c InstCombinerImpl in
/// \c InstCombineInternal.h.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_TRANSFORMS_INSTCOMBINE_INSTCOMBINER_H
#define LLVM_TRANSFORMS_INSTCOMBINE_INSTCOMBINER_H

#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/TargetFolder.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/PatternMatch.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/KnownBits.h"
#include <cassert>

#define DEBUG_TYPE "instcombine"
#include "llvm/Transforms/Utils/InstructionWorklist.h"

namespace llvm {

class AAResults;
class AssumptionCache;
class OptimizationRemarkEmitter;
class ProfileSummaryInfo;
class TargetLibraryInfo;
class TargetTransformInfo;

/// The core instruction combiner logic.
///
/// This class provides both the logic to recursively visit instructions and
/// combine them.
class LLVM_LIBRARY_VISIBILITY InstCombiner {
  /// Only used to call target specific intrinsic combining.
  /// It must **NOT** be used for any other purpose, as InstCombine is a
  /// target-independent canonicalization transform.
  TargetTransformInfo &TTI;

public:
  /// Maximum size of array considered when transforming.
  uint64_t MaxArraySizeForCombine = 0;

  /// An IRBuilder that automatically inserts new instructions into the
  /// worklist.
  using BuilderTy = IRBuilder<TargetFolder, IRBuilderCallbackInserter>;
  BuilderTy &Builder;

protected:
  /// A worklist of the instructions that need to be simplified.
  InstructionWorklist &Worklist;

  // Mode in which we are running the combiner.
  const bool MinimizeSize;

  AAResults *AA;

  // Required analyses.
  AssumptionCache &AC;
  TargetLibraryInfo &TLI;
  DominatorTree &DT;
  const DataLayout &DL;
  const SimplifyQuery SQ;
  OptimizationRemarkEmitter &ORE;
  BlockFrequencyInfo *BFI;
  ProfileSummaryInfo *PSI;

  // Optional analyses. When non-null, these can both be used to do better
  // combining and will be updated to reflect any changes.
  LoopInfo *LI;

  bool MadeIRChange = false;

public:
  InstCombiner(InstructionWorklist &Worklist, BuilderTy &Builder,
               bool MinimizeSize, AAResults *AA, AssumptionCache &AC,
               TargetLibraryInfo &TLI, TargetTransformInfo &TTI,
               DominatorTree &DT, OptimizationRemarkEmitter &ORE,
               BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
               const DataLayout &DL, LoopInfo *LI)
      : TTI(TTI), Builder(Builder), Worklist(Worklist),
        MinimizeSize(MinimizeSize), AA(AA), AC(AC), TLI(TLI), DT(DT), DL(DL),
        SQ(DL, &TLI, &DT, &AC), ORE(ORE), BFI(BFI), PSI(PSI), LI(LI) {}

  virtual ~InstCombiner() = default;

  /// Return the source operand of a potentially bitcasted value while
  /// optionally checking if it has one use. If there is no bitcast or the one
  /// use check is not met, return the input value itself.
  static Value *peekThroughBitcast(Value *V, bool OneUseOnly = false) {
    if (auto *BitCast = dyn_cast<BitCastInst>(V))
      if (!OneUseOnly || BitCast->hasOneUse())
        return BitCast->getOperand(0);

    // V is not a bitcast or V has more than one use and OneUseOnly is true.
    return V;
  }

  /// Assign a complexity or rank value to LLVM Values. This is used to reduce
  /// the amount of pattern matching needed for compares and commutative
  /// instructions. For example, if we have:
  ///   icmp ugt X, Constant
  /// or
  ///   xor (add X, Constant), cast Z
  ///
  /// We do not have to consider the commuted variants of these patterns because
  /// canonicalization based on complexity guarantees the above ordering.
  ///
  /// This routine maps IR values to various complexity ranks:
  ///   0 -> undef
  ///   1 -> Constants
  ///   2 -> Other non-instructions
  ///   3 -> Arguments
  ///   4 -> Cast and (f)neg/not instructions
  ///   5 -> Other instructions
  static unsigned getComplexity(Value *V) {
    if (isa<Instruction>(V)) {
      if (isa<CastInst>(V) || match(V, m_Neg(PatternMatch::m_Value())) ||
          match(V, m_Not(PatternMatch::m_Value())) ||
          match(V, m_FNeg(PatternMatch::m_Value())))
        return 4;
      return 5;
    }
    if (isa<Argument>(V))
      return 3;
    return isa<Constant>(V) ? (isa<UndefValue>(V) ? 0 : 1) : 2;
  }

  /// Predicate canonicalization reduces the number of patterns that need to be
  /// matched by other transforms. For example, we may swap the operands of a
  /// conditional branch or select to create a compare with a canonical
  /// (inverted) predicate which is then more likely to be matched with other
  /// values.
  static bool isCanonicalPredicate(CmpInst::Predicate Pred) {
    switch (Pred) {
    case CmpInst::ICMP_NE:
    case CmpInst::ICMP_ULE:
    case CmpInst::ICMP_SLE:
    case CmpInst::ICMP_UGE:
    case CmpInst::ICMP_SGE:
    // TODO: There are 16 FCMP predicates. Should others be (not) canonical?
    case CmpInst::FCMP_ONE:
    case CmpInst::FCMP_OLE:
    case CmpInst::FCMP_OGE:
      return false;
    default:
      return true;
    }
  }

  /// Given an exploded icmp instruction, return true if the comparison only
  /// checks the sign bit. If it only checks the sign bit, set TrueIfSigned if
  /// the result of the comparison is true when the input value is signed.
  static bool isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS,
                             bool &TrueIfSigned) {
    switch (Pred) {
    case ICmpInst::ICMP_SLT: // True if LHS s< 0
      TrueIfSigned = true;
      return RHS.isZero();
    case ICmpInst::ICMP_SLE: // True if LHS s<= -1
      TrueIfSigned = true;
      return RHS.isAllOnes();
    case ICmpInst::ICMP_SGT: // True if LHS s> -1
      TrueIfSigned = false;
      return RHS.isAllOnes();
    case ICmpInst::ICMP_SGE: // True if LHS s>= 0
      TrueIfSigned = false;
      return RHS.isZero();
    case ICmpInst::ICMP_UGT:
      // True if LHS u> RHS and RHS == sign-bit-mask - 1
      TrueIfSigned = true;
      return RHS.isMaxSignedValue();
    case ICmpInst::ICMP_UGE:
      // True if LHS u>= RHS and RHS == sign-bit-mask (2^7, 2^15, 2^31, etc)
      TrueIfSigned = true;
      return RHS.isMinSignedValue();
    case ICmpInst::ICMP_ULT:
      // True if LHS u< RHS and RHS == sign-bit-mask (2^7, 2^15, 2^31, etc)
      TrueIfSigned = false;
      return RHS.isMinSignedValue();
    case ICmpInst::ICMP_ULE:
      // True if LHS u<= RHS and RHS == sign-bit-mask - 1
      TrueIfSigned = false;
      return RHS.isMaxSignedValue();
    default:
      return false;
    }
  }

  /// Add one to a Constant
  static Constant *AddOne(Constant *C) {
    return ConstantExpr::getAdd(C, ConstantInt::get(C->getType(), 1));
  }

  /// Subtract one from a Constant
  static Constant *SubOne(Constant *C) {
    return ConstantExpr::getSub(C, ConstantInt::get(C->getType(), 1));
  }

  std::optional<std::pair<
      CmpInst::Predicate,
      Constant *>> static getFlippedStrictnessPredicateAndConstant(CmpInst::
                                                                       Predicate
                                                                           Pred,
                                                                   Constant *C);

  static bool shouldAvoidAbsorbingNotIntoSelect(const SelectInst &SI) {
    // a ? b : false and a ? true : b are the canonical form of logical and/or.
    // This includes !a ? b : false and !a ? true : b. Absorbing the not into
    // the select by swapping operands would break recognition of this pattern
    // in other analyses, so don't do that.
    return match(&SI, PatternMatch::m_LogicalAnd(PatternMatch::m_Value(),
                                                 PatternMatch::m_Value())) ||
           match(&SI, PatternMatch::m_LogicalOr(PatternMatch::m_Value(),
                                                PatternMatch::m_Value()));
  }

  /// Return true if the specified value is free to invert (apply ~ to).
  /// This happens in cases where the ~ can be eliminated.  If WillInvertAllUses
  /// is true, work under the assumption that the caller intends to remove all
  /// uses of V and only keep uses of ~V.
  ///
  /// See also: canFreelyInvertAllUsersOf()
  static bool isFreeToInvert(Value *V, bool WillInvertAllUses) {
    // ~(~(X)) -> X.
    if (match(V, m_Not(PatternMatch::m_Value())))
      return true;

    // Constants can be considered to be not'ed values.
    if (match(V, PatternMatch::m_AnyIntegralConstant()))
      return true;

    // Compares can be inverted if all of their uses are being modified to use
    // the ~V.
    if (isa<CmpInst>(V))
      return WillInvertAllUses;

    // If `V` is of the form `A + Constant` then `-1 - V` can be folded into
    // `(-1 - Constant) - A` if we are willing to invert all of the uses.
    if (match(V, m_Add(PatternMatch::m_Value(), PatternMatch::m_ImmConstant())))
      return WillInvertAllUses;

    // If `V` is of the form `Constant - A` then `-1 - V` can be folded into
    // `A + (-1 - Constant)` if we are willing to invert all of the uses.
    if (match(V, m_Sub(PatternMatch::m_ImmConstant(), PatternMatch::m_Value())))
      return WillInvertAllUses;

    // Selects with invertible operands are freely invertible
    if (match(V,
              m_Select(PatternMatch::m_Value(), m_Not(PatternMatch::m_Value()),
                       m_Not(PatternMatch::m_Value()))))
      return WillInvertAllUses;

    // Min/max may be in the form of intrinsics, so handle those identically
    // to select patterns.
    if (match(V, m_MaxOrMin(m_Not(PatternMatch::m_Value()),
                            m_Not(PatternMatch::m_Value()))))
      return WillInvertAllUses;

    return false;
  }

  /// Given i1 V, can every user of V be freely adapted if V is changed to !V ?
  /// InstCombine's freelyInvertAllUsersOf() must be kept in sync with this fn.
  /// NOTE: for Instructions only!
  ///
  /// See also: isFreeToInvert()
  static bool canFreelyInvertAllUsersOf(Instruction *V, Value *IgnoredUser) {
    // Look at every user of V.
    for (Use &U : V->uses()) {
      if (U.getUser() == IgnoredUser)
        continue; // Don't consider this user.

      auto *I = cast<Instruction>(U.getUser());
      switch (I->getOpcode()) {
      case Instruction::Select:
        if (U.getOperandNo() != 0) // Only if the value is used as select cond.
          return false;
        if (shouldAvoidAbsorbingNotIntoSelect(*cast<SelectInst>(I)))
          return false;
        break;
      case Instruction::Br:
        assert(U.getOperandNo() == 0 && "Must be branching on that value.");
        break; // Free to invert by swapping true/false values/destinations.
      case Instruction::Xor: // Can invert 'xor' if it's a 'not', by ignoring
                             // it.
        if (!match(I, m_Not(PatternMatch::m_Value())))
          return false; // Not a 'not'.
        break;
      default:
        return false; // Don't know, likely not freely invertible.
      }
      // So far all users were free to invert...
    }
    return true; // Can freely invert all users!
  }

  /// Some binary operators require special handling to avoid poison and
  /// undefined behavior. If a constant vector has undef elements, replace those
  /// undefs with identity constants if possible because those are always safe
  /// to execute. If no identity constant exists, replace undef with some other
  /// safe constant.
  static Constant *
  getSafeVectorConstantForBinop(BinaryOperator::BinaryOps Opcode, Constant *In,
                                bool IsRHSConstant) {
    auto *InVTy = cast<FixedVectorType>(In->getType());

    Type *EltTy = InVTy->getElementType();
    auto *SafeC = ConstantExpr::getBinOpIdentity(Opcode, EltTy, IsRHSConstant);
    if (!SafeC) {
      // TODO: Should this be available as a constant utility function? It is
      // similar to getBinOpAbsorber().
      if (IsRHSConstant) {
        switch (Opcode) {
        case Instruction::SRem: // X % 1 = 0
        case Instruction::URem: // X %u 1 = 0
          SafeC = ConstantInt::get(EltTy, 1);
          break;
        case Instruction::FRem: // X % 1.0 (doesn't simplify, but it is safe)
          SafeC = ConstantFP::get(EltTy, 1.0);
          break;
        default:
          llvm_unreachable(
              "Only rem opcodes have no identity constant for RHS");
        }
      } else {
        switch (Opcode) {
        case Instruction::Shl:  // 0 << X = 0
        case Instruction::LShr: // 0 >>u X = 0
        case Instruction::AShr: // 0 >> X = 0
        case Instruction::SDiv: // 0 / X = 0
        case Instruction::UDiv: // 0 /u X = 0
        case Instruction::SRem: // 0 % X = 0
        case Instruction::URem: // 0 %u X = 0
        case Instruction::Sub:  // 0 - X (doesn't simplify, but it is safe)
        case Instruction::FSub: // 0.0 - X (doesn't simplify, but it is safe)
        case Instruction::FDiv: // 0.0 / X (doesn't simplify, but it is safe)
        case Instruction::FRem: // 0.0 % X = 0
          SafeC = Constant::getNullValue(EltTy);
          break;
        default:
          llvm_unreachable("Expected to find identity constant for opcode");
        }
      }
    }
    assert(SafeC && "Must have safe constant for binop");
    unsigned NumElts = InVTy->getNumElements();
    SmallVector<Constant *, 16> Out(NumElts);
    for (unsigned i = 0; i != NumElts; ++i) {
      Constant *C = In->getAggregateElement(i);
      Out[i] = isa<UndefValue>(C) ? SafeC : C;
    }
    return ConstantVector::get(Out);
  }

  void addToWorklist(Instruction *I) { Worklist.push(I); }

  AssumptionCache &getAssumptionCache() const { return AC; }
  TargetLibraryInfo &getTargetLibraryInfo() const { return TLI; }
  DominatorTree &getDominatorTree() const { return DT; }
  const DataLayout &getDataLayout() const { return DL; }
  const SimplifyQuery &getSimplifyQuery() const { return SQ; }
  OptimizationRemarkEmitter &getOptimizationRemarkEmitter() const {
    return ORE;
  }
  BlockFrequencyInfo *getBlockFrequencyInfo() const { return BFI; }
  ProfileSummaryInfo *getProfileSummaryInfo() const { return PSI; }
  LoopInfo *getLoopInfo() const { return LI; }

  // Call target specific combiners
  std::optional<Instruction *> targetInstCombineIntrinsic(IntrinsicInst &II);
  std::optional<Value *>
  targetSimplifyDemandedUseBitsIntrinsic(IntrinsicInst &II, APInt DemandedMask,
                                         KnownBits &Known,
                                         bool &KnownBitsComputed);
  std::optional<Value *> targetSimplifyDemandedVectorEltsIntrinsic(
      IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
      APInt &UndefElts2, APInt &UndefElts3,
      std::function<void(Instruction *, unsigned, APInt, APInt &)>
          SimplifyAndSetOp);

  /// Inserts an instruction \p New before instruction \p Old
  ///
  /// Also adds the new instruction to the worklist and returns \p New so that
  /// it is suitable for use as the return from the visitation patterns.
  Instruction *InsertNewInstBefore(Instruction *New, Instruction &Old) {
    assert(New && !New->getParent() &&
           "New instruction already inserted into a basic block!");
    BasicBlock *BB = Old.getParent();
    New->insertInto(BB, Old.getIterator()); // Insert inst
    Worklist.add(New);
    return New;
  }

  /// Same as InsertNewInstBefore, but also sets the debug loc.
  Instruction *InsertNewInstWith(Instruction *New, Instruction &Old) {
    New->setDebugLoc(Old.getDebugLoc());
    return InsertNewInstBefore(New, Old);
  }

  /// A combiner-aware RAUW-like routine.
  ///
  /// This method is to be used when an instruction is found to be dead,
  /// replaceable with another preexisting expression. Here we add all uses of
  /// I to the worklist, replace all uses of I with the new value, then return
  /// I, so that the inst combiner will know that I was modified.
  Instruction *replaceInstUsesWith(Instruction &I, Value *V) {
    // If there are no uses to replace, then we return nullptr to indicate that
    // no changes were made to the program.
    if (I.use_empty()) return nullptr;

    Worklist.pushUsersToWorkList(I); // Add all modified instrs to worklist.

    // If we are replacing the instruction with itself, this must be in a
    // segment of unreachable code, so just clobber the instruction.
    if (&I == V)
      V = PoisonValue::get(I.getType());

    LLVM_DEBUG(dbgs() << "IC: Replacing " << I << "\n"
                      << "    with " << *V << '\n');

    // If V is a new unnamed instruction, take the name from the old one.
    if (V->use_empty() && isa<Instruction>(V) && !V->hasName() && I.hasName())
      V->takeName(&I);

    I.replaceAllUsesWith(V);
    return &I;
  }

  /// Replace operand of instruction and add old operand to the worklist.
  Instruction *replaceOperand(Instruction &I, unsigned OpNum, Value *V) {
    Value *OldOp = I.getOperand(OpNum);
    I.setOperand(OpNum, V);
    Worklist.handleUseCountDecrement(OldOp);
    return &I;
  }

  /// Replace use and add the previously used value to the worklist.
  void replaceUse(Use &U, Value *NewValue) {
    Value *OldOp = U;
    U = NewValue;
    Worklist.handleUseCountDecrement(OldOp);
  }

  /// Combiner aware instruction erasure.
  ///
  /// When dealing with an instruction that has side effects or produces a void
  /// value, we can't rely on DCE to delete the instruction. Instead, visit
  /// methods should return the value returned by this function.
  virtual Instruction *eraseInstFromFunction(Instruction &I) = 0;

  void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
                        const Instruction *CxtI) const {
    llvm::computeKnownBits(V, Known, DL, Depth, &AC, CxtI, &DT);
  }

  KnownBits computeKnownBits(const Value *V, unsigned Depth,
                             const Instruction *CxtI) const {
    return llvm::computeKnownBits(V, DL, Depth, &AC, CxtI, &DT);
  }

  bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero = false,
                              unsigned Depth = 0,
                              const Instruction *CxtI = nullptr) {
    return llvm::isKnownToBeAPowerOfTwo(V, DL, OrZero, Depth, &AC, CxtI, &DT);
  }

  bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth = 0,
                         const Instruction *CxtI = nullptr) const {
    return llvm::MaskedValueIsZero(V, Mask, DL, Depth, &AC, CxtI, &DT);
  }

  unsigned ComputeNumSignBits(const Value *Op, unsigned Depth = 0,
                              const Instruction *CxtI = nullptr) const {
    return llvm::ComputeNumSignBits(Op, DL, Depth, &AC, CxtI, &DT);
  }

  unsigned ComputeMaxSignificantBits(const Value *Op, unsigned Depth = 0,
                                     const Instruction *CxtI = nullptr) const {
    return llvm::ComputeMaxSignificantBits(Op, DL, Depth, &AC, CxtI, &DT);
  }

  OverflowResult computeOverflowForUnsignedMul(const Value *LHS,
                                               const Value *RHS,
                                               const Instruction *CxtI) const {
    return llvm::computeOverflowForUnsignedMul(LHS, RHS, DL, &AC, CxtI, &DT);
  }

  OverflowResult computeOverflowForSignedMul(const Value *LHS, const Value *RHS,
                                             const Instruction *CxtI) const {
    return llvm::computeOverflowForSignedMul(LHS, RHS, DL, &AC, CxtI, &DT);
  }

  OverflowResult computeOverflowForUnsignedAdd(const Value *LHS,
                                               const Value *RHS,
                                               const Instruction *CxtI) const {
    return llvm::computeOverflowForUnsignedAdd(LHS, RHS, DL, &AC, CxtI, &DT);
  }

  OverflowResult computeOverflowForSignedAdd(const Value *LHS, const Value *RHS,
                                             const Instruction *CxtI) const {
    return llvm::computeOverflowForSignedAdd(LHS, RHS, DL, &AC, CxtI, &DT);
  }

  OverflowResult computeOverflowForUnsignedSub(const Value *LHS,
                                               const Value *RHS,
                                               const Instruction *CxtI) const {
    return llvm::computeOverflowForUnsignedSub(LHS, RHS, DL, &AC, CxtI, &DT);
  }

  OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS,
                                             const Instruction *CxtI) const {
    return llvm::computeOverflowForSignedSub(LHS, RHS, DL, &AC, CxtI, &DT);
  }

  virtual bool SimplifyDemandedBits(Instruction *I, unsigned OpNo,
                                    const APInt &DemandedMask, KnownBits &Known,
                                    unsigned Depth = 0) = 0;
  virtual Value *
  SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, APInt &UndefElts,
                             unsigned Depth = 0,
                             bool AllowMultipleUsers = false) = 0;

  bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const;
};

} // namespace llvm

#undef DEBUG_TYPE

#endif
PKjwFZ�s//
PassSupport.hnu�[���//===- llvm/PassSupport.h - Pass Support code -------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines stuff that is used to define and "use" Passes.  This file
// is automatically #included by Pass.h, so:
//
//           NO .CPP FILES SHOULD INCLUDE THIS FILE DIRECTLY
//
// Instead, #include Pass.h.
//
// This file defines Pass registration code and classes used for it.
//
//===----------------------------------------------------------------------===//

#if !defined(LLVM_PASS_H) || defined(LLVM_PASSSUPPORT_H)
#error "Do not include <PassSupport.h>; include <Pass.h> instead"
#endif

#ifndef LLVM_PASSSUPPORT_H
#define LLVM_PASSSUPPORT_H

#include "llvm/ADT/StringRef.h"
#include "llvm/PassInfo.h"
#include "llvm/PassRegistry.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/Threading.h"
#include <functional>

namespace llvm {

class Pass;

#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)                    \
  static void *initialize##passName##PassOnce(PassRegistry &Registry) {        \
    PassInfo *PI = new PassInfo(                                               \
        name, arg, &passName::ID,                                              \
        PassInfo::NormalCtor_t(callDefaultCtor<passName>), cfg, analysis);     \
    Registry.registerPass(*PI, true);                                          \
    return PI;                                                                 \
  }                                                                            \
  static llvm::once_flag Initialize##passName##PassFlag;                       \
  void llvm::initialize##passName##Pass(PassRegistry &Registry) {              \
    llvm::call_once(Initialize##passName##PassFlag,                            \
                    initialize##passName##PassOnce, std::ref(Registry));       \
  }

#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)              \
  static void *initialize##passName##PassOnce(PassRegistry &Registry) {

#define INITIALIZE_PASS_DEPENDENCY(depName) initialize##depName##Pass(Registry);
#define INITIALIZE_AG_DEPENDENCY(depName)                                      \
  initialize##depName##AnalysisGroup(Registry);

#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)                \
  PassInfo *PI = new PassInfo(                                                 \
      name, arg, &passName::ID,                                                \
      PassInfo::NormalCtor_t(callDefaultCtor<passName>), cfg, analysis);       \
  Registry.registerPass(*PI, true);                                            \
  return PI;                                                                   \
  }                                                                            \
  static llvm::once_flag Initialize##passName##PassFlag;                       \
  void llvm::initialize##passName##Pass(PassRegistry &Registry) {              \
    llvm::call_once(Initialize##passName##PassFlag,                            \
                    initialize##passName##PassOnce, std::ref(Registry));       \
  }

#define INITIALIZE_PASS_WITH_OPTIONS(PassName, Arg, Name, Cfg, Analysis)       \
  INITIALIZE_PASS_BEGIN(PassName, Arg, Name, Cfg, Analysis)                    \
  PassName::registerOptions();                                                 \
  INITIALIZE_PASS_END(PassName, Arg, Name, Cfg, Analysis)

#define INITIALIZE_PASS_WITH_OPTIONS_BEGIN(PassName, Arg, Name, Cfg, Analysis) \
  INITIALIZE_PASS_BEGIN(PassName, Arg, Name, Cfg, Analysis)                    \
  PassName::registerOptions();

template <
    class PassName,
    std::enable_if_t<std::is_default_constructible<PassName>{}, bool> = true>
Pass *callDefaultCtor() {
  return new PassName();
}

template <
    class PassName,
    std::enable_if_t<!std::is_default_constructible<PassName>{}, bool> = true>
Pass *callDefaultCtor() {
  // Some codegen passes should only be testable via
  // `llc -{start|stop}-{before|after}=<passname>`, not via `opt -<passname>`.
  report_fatal_error("target-specific codegen-only pass");
}

//===---------------------------------------------------------------------------
/// RegisterPass<t> template - This template class is used to notify the system
/// that a Pass is available for use, and registers it into the internal
/// database maintained by the PassManager.  Unless this template is used, opt,
/// for example will not be able to see the pass and attempts to create the pass
/// will fail. This template is used in the follow manner (at global scope, in
/// your .cpp file):
///
/// static RegisterPass<YourPassClassName> tmp("passopt", "My Pass Name");
///
/// This statement will cause your pass to be created by calling the default
/// constructor exposed by the pass.
template <typename passName> struct RegisterPass : public PassInfo {
  // Register Pass using default constructor...
  RegisterPass(StringRef PassArg, StringRef Name, bool CFGOnly = false,
               bool is_analysis = false)
      : PassInfo(Name, PassArg, &passName::ID,
                 PassInfo::NormalCtor_t(callDefaultCtor<passName>), CFGOnly,
                 is_analysis) {
    PassRegistry::getPassRegistry()->registerPass(*this);
  }
};

/// RegisterAnalysisGroup - Register a Pass as a member of an analysis _group_.
/// Analysis groups are used to define an interface (which need not derive from
/// Pass) that is required by passes to do their job.  Analysis Groups differ
/// from normal analyses because any available implementation of the group will
/// be used if it is available.
///
/// If no analysis implementing the interface is available, a default
/// implementation is created and added.  A pass registers itself as the default
/// implementation by specifying 'true' as the second template argument of this
/// class.
///
/// In addition to registering itself as an analysis group member, a pass must
/// register itself normally as well.  Passes may be members of multiple groups
/// and may still be "required" specifically by name.
///
/// The actual interface may also be registered as well (by not specifying the
/// second template argument).  The interface should be registered to associate
/// a nice name with the interface.
class RegisterAGBase : public PassInfo {
public:
  RegisterAGBase(StringRef Name, const void *InterfaceID,
                 const void *PassID = nullptr, bool isDefault = false);
};

template <typename Interface, bool Default = false>
struct RegisterAnalysisGroup : public RegisterAGBase {
  explicit RegisterAnalysisGroup(PassInfo &RPB)
      : RegisterAGBase(RPB.getPassName(), &Interface::ID, RPB.getTypeInfo(),
                       Default) {}

  explicit RegisterAnalysisGroup(const char *Name)
      : RegisterAGBase(Name, &Interface::ID) {}
};

#define INITIALIZE_ANALYSIS_GROUP(agName, name, defaultPass)                   \
  static void *initialize##agName##AnalysisGroupOnce(PassRegistry &Registry) { \
    initialize##defaultPass##Pass(Registry);                                   \
    PassInfo *AI = new PassInfo(name, &agName::ID);                            \
    Registry.registerAnalysisGroup(&agName::ID, 0, *AI, false, true);          \
    return AI;                                                                 \
  }                                                                            \
  static llvm::once_flag Initialize##agName##AnalysisGroupFlag;                \
  void llvm::initialize##agName##AnalysisGroup(PassRegistry &Registry) {       \
    llvm::call_once(Initialize##agName##AnalysisGroupFlag,                     \
                    initialize##agName##AnalysisGroupOnce,                     \
                    std::ref(Registry));                                       \
  }

#define INITIALIZE_AG_PASS(passName, agName, arg, name, cfg, analysis, def)    \
  static void *initialize##passName##PassOnce(PassRegistry &Registry) {        \
    if (!def)                                                                  \
      initialize##agName##AnalysisGroup(Registry);                             \
    PassInfo *PI = new PassInfo(                                               \
        name, arg, &passName::ID,                                              \
        PassInfo::NormalCtor_t(callDefaultCtor<passName>), cfg, analysis);     \
    Registry.registerPass(*PI, true);                                          \
                                                                               \
    PassInfo *AI = new PassInfo(name, &agName::ID);                            \
    Registry.registerAnalysisGroup(&agName::ID, &passName::ID, *AI, def,       \
                                   true);                                      \
    return AI;                                                                 \
  }                                                                            \
  static llvm::once_flag Initialize##passName##PassFlag;                       \
  void llvm::initialize##passName##Pass(PassRegistry &Registry) {              \
    llvm::call_once(Initialize##passName##PassFlag,                            \
                    initialize##passName##PassOnce, std::ref(Registry));       \
  }

#define INITIALIZE_AG_PASS_BEGIN(passName, agName, arg, n, cfg, analysis, def) \
  static void *initialize##passName##PassOnce(PassRegistry &Registry) {        \
    if (!def)                                                                  \
      initialize##agName##AnalysisGroup(Registry);

#define INITIALIZE_AG_PASS_END(passName, agName, arg, n, cfg, analysis, def)   \
  PassInfo *PI = new PassInfo(                                                 \
      n, arg, &passName::ID,                                                   \
      PassInfo::NormalCtor_t(callDefaultCtor<passName>), cfg, analysis);       \
  Registry.registerPass(*PI, true);                                            \
                                                                               \
  PassInfo *AI = new PassInfo(n, &agName::ID);                                 \
  Registry.registerAnalysisGroup(&agName::ID, &passName::ID, *AI, def, true);  \
  return AI;                                                                   \
  }                                                                            \
  static llvm::once_flag Initialize##passName##PassFlag;                       \
  void llvm::initialize##passName##Pass(PassRegistry &Registry) {              \
    llvm::call_once(Initialize##passName##PassFlag,                            \
                    initialize##passName##PassOnce, std::ref(Registry));       \
  }

//===---------------------------------------------------------------------------
/// PassRegistrationListener class - This class is meant to be derived from by
/// clients that are interested in which passes get registered and unregistered
/// at runtime (which can be because of the RegisterPass constructors being run
/// as the program starts up, or may be because a shared object just got
/// loaded).
struct PassRegistrationListener {
  PassRegistrationListener() = default;
  virtual ~PassRegistrationListener() = default;

  /// Callback functions - These functions are invoked whenever a pass is loaded
  /// or removed from the current executable.
  virtual void passRegistered(const PassInfo *) {}

  /// enumeratePasses - Iterate over the registered passes, calling the
  /// passEnumerate callback on each PassInfo object.
  void enumeratePasses();

  /// passEnumerate - Callback function invoked when someone calls
  /// enumeratePasses on this PassRegistrationListener object.
  virtual void passEnumerate(const PassInfo *) {}
};

} // end namespace llvm

#endif // LLVM_PASSSUPPORT_H
PKjwFZ]*8InterfaceStub/ELFObjHandler.hnu�[���//===- ELFObjHandler.h ------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===-----------------------------------------------------------------------===/
/// \file
/// This supports reading and writing of elf dynamic shared objects.
///
//===-----------------------------------------------------------------------===/

#ifndef LLVM_INTERFACESTUB_ELFOBJHANDLER_H
#define LLVM_INTERFACESTUB_ELFOBJHANDLER_H

#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/MemoryBufferRef.h"
#include <memory>

namespace llvm {

namespace ifs {
struct IFSStub;

/// Attempt to read a binary ELF file from a MemoryBuffer.
Expected<std::unique_ptr<IFSStub>> readELFFile(MemoryBufferRef Buf);

/// Attempt to write a binary ELF stub.
/// This function determines appropriate ELFType using the passed ELFTarget and
/// then writes a binary ELF stub to a specified file path.
///
/// @param FilePath File path for writing the ELF binary.
/// @param Stub Source ELFStub to generate a binary ELF stub from.
/// @param WriteIfChanged Whether or not to preserve timestamp if
///        the output stays the same.
Error writeBinaryStub(StringRef FilePath, const IFSStub &Stub,
                      bool WriteIfChanged = false);

} // end namespace ifs
} // end namespace llvm

#endif // LLVM_INTERFACESTUB_ELFOBJHANDLER_H
PKjwFZQ=e8��InterfaceStub/IFSStub.hnu�[���//===- IFSStub.h ------------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===-----------------------------------------------------------------------===/
///
/// \file
/// This file defines an internal representation of an InterFace Stub.
///
//===-----------------------------------------------------------------------===/

#ifndef LLVM_INTERFACESTUB_IFSSTUB_H
#define LLVM_INTERFACESTUB_IFSSTUB_H

#include "llvm/Support/VersionTuple.h"
#include <optional>
#include <vector>

namespace llvm {
namespace ifs {

typedef uint16_t IFSArch;

enum class IFSSymbolType {
  NoType,
  Object,
  Func,
  TLS,

  // Type information is 4 bits, so 16 is safely out of range.
  Unknown = 16,
};

enum class IFSEndiannessType {
  Little,
  Big,

  // Endianness info is 1 bytes, 256 is safely out of range.
  Unknown = 256,
};

enum class IFSBitWidthType {
  IFS32,
  IFS64,

  // Bit width info is 1 bytes, 256 is safely out of range.
  Unknown = 256,
};

struct IFSSymbol {
  IFSSymbol() = default;
  explicit IFSSymbol(std::string SymbolName) : Name(std::move(SymbolName)) {}
  std::string Name;
  std::optional<uint64_t> Size;
  IFSSymbolType Type = IFSSymbolType::NoType;
  bool Undefined = false;
  bool Weak = false;
  std::optional<std::string> Warning;
  bool operator<(const IFSSymbol &RHS) const { return Name < RHS.Name; }
};

struct IFSTarget {
  std::optional<std::string> Triple;
  std::optional<std::string> ObjectFormat;
  std::optional<IFSArch> Arch;
  std::optional<std::string> ArchString;
  std::optional<IFSEndiannessType> Endianness;
  std::optional<IFSBitWidthType> BitWidth;

  bool empty();
};

inline bool operator==(const IFSTarget &Lhs, const IFSTarget &Rhs) {
  if (Lhs.Arch != Rhs.Arch || Lhs.BitWidth != Rhs.BitWidth ||
      Lhs.Endianness != Rhs.Endianness ||
      Lhs.ObjectFormat != Rhs.ObjectFormat || Lhs.Triple != Rhs.Triple)
    return false;
  return true;
}

inline bool operator!=(const IFSTarget &Lhs, const IFSTarget &Rhs) {
  return !(Lhs == Rhs);
}

// A cumulative representation of InterFace stubs.
// Both textual and binary stubs will read into and write from this object.
struct IFSStub {
  // TODO: Add support for symbol versioning.
  VersionTuple IfsVersion;
  std::optional<std::string> SoName;
  IFSTarget Target;
  std::vector<std::string> NeededLibs;
  std::vector<IFSSymbol> Symbols;

  IFSStub() = default;
  IFSStub(const IFSStub &Stub);
  IFSStub(IFSStub &&Stub);
  virtual ~IFSStub() = default;
};

// Create a alias class for IFSStub.
// LLVM's YAML library does not allow mapping a class with 2 traits,
// which prevents us using 'Target:' field with different definitions.
// This class makes it possible to map a second traits so the same data
// structure can be used for 2 different yaml schema.
struct IFSStubTriple : IFSStub {
  IFSStubTriple() = default;
  IFSStubTriple(const IFSStub &Stub);
  IFSStubTriple(const IFSStubTriple &Stub);
  IFSStubTriple(IFSStubTriple &&Stub);
};

/// This function convert bit width type from IFS enum to ELF format
/// Currently, ELFCLASS32 and ELFCLASS64 are supported.
///
/// @param BitWidth IFS bit width type.
uint8_t convertIFSBitWidthToELF(IFSBitWidthType BitWidth);

/// This function convert endianness type from IFS enum to ELF format
/// Currently, ELFDATA2LSB and ELFDATA2MSB are supported.
///
/// @param Endianness IFS endianness type.
uint8_t convertIFSEndiannessToELF(IFSEndiannessType Endianness);

/// This function convert symbol type from IFS enum to ELF format
/// Currently, STT_NOTYPE, STT_OBJECT, STT_FUNC, and STT_TLS are supported.
///
/// @param SymbolType IFS symbol type.
uint8_t convertIFSSymbolTypeToELF(IFSSymbolType SymbolType);

/// This function extracts ELF bit width from e_ident[EI_CLASS] of an ELF file
/// Currently, ELFCLASS32 and ELFCLASS64 are supported.
/// Other endianness types are mapped to IFSBitWidthType::Unknown.
///
/// @param BitWidth e_ident[EI_CLASS] value to extract bit width from.
IFSBitWidthType convertELFBitWidthToIFS(uint8_t BitWidth);

/// This function extracts ELF endianness from e_ident[EI_DATA] of an ELF file
/// Currently, ELFDATA2LSB and ELFDATA2MSB are supported.
/// Other endianness types are mapped to IFSEndiannessType::Unknown.
///
/// @param Endianness e_ident[EI_DATA] value to extract endianness type from.
IFSEndiannessType convertELFEndiannessToIFS(uint8_t Endianness);

/// This function extracts symbol type from a symbol's st_info member and
/// maps it to an IFSSymbolType enum.
/// Currently, STT_NOTYPE, STT_OBJECT, STT_FUNC, and STT_TLS are supported.
/// Other symbol types are mapped to IFSSymbolType::Unknown.
///
/// @param SymbolType Binary symbol st_info to extract symbol type from.
IFSSymbolType convertELFSymbolTypeToIFS(uint8_t SymbolType);
} // namespace ifs
} // end namespace llvm

#endif // LLVM_INTERFACESTUB_IFSSTUB_H
PKjwFZ��Ԭ�InterfaceStub/IFSHandler.hnu�[���//===- IFSHandler.h ---------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===-----------------------------------------------------------------------===/
///
/// \file
/// This file declares an interface for reading and writing .ifs (text-based
/// InterFace Stub) files.
///
//===-----------------------------------------------------------------------===/

#ifndef LLVM_INTERFACESTUB_IFSHANDLER_H
#define LLVM_INTERFACESTUB_IFSHANDLER_H

#include "IFSStub.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/VersionTuple.h"
#include <memory>
#include <optional>
#include <string>
#include <vector>

namespace llvm {

class raw_ostream;
class Error;
class StringRef;

namespace ifs {

struct IFSStub;

const VersionTuple IFSVersionCurrent(3, 0);

/// Attempts to read an IFS interface file from a StringRef buffer.
Expected<std::unique_ptr<IFSStub>> readIFSFromBuffer(StringRef Buf);

/// Attempts to write an IFS interface file to a raw_ostream.
Error writeIFSToOutputStream(raw_ostream &OS, const IFSStub &Stub);

/// Override the target platform inforation in the text stub.
Error overrideIFSTarget(IFSStub &Stub, std::optional<IFSArch> OverrideArch,
                        std::optional<IFSEndiannessType> OverrideEndianness,
                        std::optional<IFSBitWidthType> OverrideBitWidth,
                        std::optional<std::string> OverrideTriple);

/// Validate the target platform inforation in the text stub.
Error validateIFSTarget(IFSStub &Stub, bool ParseTriple);

/// Strips target platform information from the text stub.
void stripIFSTarget(IFSStub &Stub, bool StripTriple, bool StripArch,
                    bool StripEndianness, bool StripBitWidth);

Error filterIFSSyms(IFSStub &Stub, bool StripUndefined,
                    const std::vector<std::string> &Exclude = {});

/// Parse llvm triple string into a IFSTarget struct.
IFSTarget parseTriple(StringRef TripleStr);

} // end namespace ifs
} // end namespace llvm

#endif // LLVM_INTERFACESTUB_IFSHANDLER_H
PKjwFZmp@ObjectYAML/CodeViewYAMLTypes.hnu�[���//==- CodeViewYAMLTypes.h - CodeView YAMLIO Type implementation --*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines classes for handling the YAML representation of CodeView
// Debug Info.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJECTYAML_CODEVIEWYAMLTYPES_H
#define LLVM_OBJECTYAML_CODEVIEWYAMLTYPES_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/DebugInfo/CodeView/TypeRecord.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/YAMLTraits.h"
#include <cstdint>
#include <memory>
#include <vector>

namespace llvm {

namespace codeview {
class AppendingTypeTableBuilder;
}

namespace CodeViewYAML {

namespace detail {

struct LeafRecordBase;
struct MemberRecordBase;

} // end namespace detail

struct MemberRecord {
  std::shared_ptr<detail::MemberRecordBase> Member;
};

struct LeafRecord {
  std::shared_ptr<detail::LeafRecordBase> Leaf;

  codeview::CVType
  toCodeViewRecord(codeview::AppendingTypeTableBuilder &Serializer) const;
  static Expected<LeafRecord> fromCodeViewRecord(codeview::CVType Type);
};

std::vector<LeafRecord> fromDebugT(ArrayRef<uint8_t> DebugTorP,
                                   StringRef SectionName);
ArrayRef<uint8_t> toDebugT(ArrayRef<LeafRecord>, BumpPtrAllocator &Alloc,
                           StringRef SectionName);

} // end namespace CodeViewYAML

} // end namespace llvm

LLVM_YAML_DECLARE_SCALAR_TRAITS(codeview::GUID, QuotingType::Single)

LLVM_YAML_DECLARE_MAPPING_TRAITS(CodeViewYAML::LeafRecord)
LLVM_YAML_DECLARE_MAPPING_TRAITS(CodeViewYAML::MemberRecord)

LLVM_YAML_IS_SEQUENCE_VECTOR(CodeViewYAML::LeafRecord)
LLVM_YAML_IS_SEQUENCE_VECTOR(CodeViewYAML::MemberRecord)

#endif // LLVM_OBJECTYAML_CODEVIEWYAMLTYPES_H
PKjwFZ� *�o�oObjectYAML/ELFYAML.hnu�[���//===- ELFYAML.h - ELF YAMLIO implementation --------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file declares classes for handling the YAML representation
/// of ELF.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJECTYAML_ELFYAML_H
#define LLVM_OBJECTYAML_ELFYAML_H

#include "llvm/ADT/StringRef.h"
#include "llvm/BinaryFormat/ELF.h"
#include "llvm/Object/ELFTypes.h"
#include "llvm/ObjectYAML/DWARFYAML.h"
#include "llvm/ObjectYAML/YAML.h"
#include "llvm/Support/YAMLTraits.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <vector>

namespace llvm {
namespace ELFYAML {

StringRef dropUniqueSuffix(StringRef S);
std::string appendUniqueSuffix(StringRef Name, const Twine& Msg);

// These types are invariant across 32/64-bit ELF, so for simplicity just
// directly give them their exact sizes. We don't need to worry about
// endianness because these are just the types in the YAMLIO structures,
// and are appropriately converted to the necessary endianness when
// reading/generating binary object files.
// The naming of these types is intended to be ELF_PREFIX, where PREFIX is
// the common prefix of the respective constants. E.g. ELF_EM corresponds
// to the `e_machine` constants, like `EM_X86_64`.
// In the future, these would probably be better suited by C++11 enum
// class's with appropriate fixed underlying type.
LLVM_YAML_STRONG_TYPEDEF(uint16_t, ELF_ET)
LLVM_YAML_STRONG_TYPEDEF(uint32_t, ELF_PT)
LLVM_YAML_STRONG_TYPEDEF(uint32_t, ELF_EM)
LLVM_YAML_STRONG_TYPEDEF(uint8_t, ELF_ELFCLASS)
LLVM_YAML_STRONG_TYPEDEF(uint8_t, ELF_ELFDATA)
LLVM_YAML_STRONG_TYPEDEF(uint8_t, ELF_ELFOSABI)
// Just use 64, since it can hold 32-bit values too.
LLVM_YAML_STRONG_TYPEDEF(uint64_t, ELF_EF)
// Just use 64, since it can hold 32-bit values too.
LLVM_YAML_STRONG_TYPEDEF(uint64_t, ELF_DYNTAG)
LLVM_YAML_STRONG_TYPEDEF(uint32_t, ELF_PF)
LLVM_YAML_STRONG_TYPEDEF(uint32_t, ELF_SHT)
LLVM_YAML_STRONG_TYPEDEF(uint32_t, ELF_REL)
LLVM_YAML_STRONG_TYPEDEF(uint8_t, ELF_RSS)
// Just use 64, since it can hold 32-bit values too.
LLVM_YAML_STRONG_TYPEDEF(uint64_t, ELF_SHF)
LLVM_YAML_STRONG_TYPEDEF(uint16_t, ELF_SHN)
LLVM_YAML_STRONG_TYPEDEF(uint8_t, ELF_STB)
LLVM_YAML_STRONG_TYPEDEF(uint8_t, ELF_STT)
LLVM_YAML_STRONG_TYPEDEF(uint32_t, ELF_NT)

LLVM_YAML_STRONG_TYPEDEF(uint8_t, MIPS_AFL_REG)
LLVM_YAML_STRONG_TYPEDEF(uint8_t, MIPS_ABI_FP)
LLVM_YAML_STRONG_TYPEDEF(uint32_t, MIPS_AFL_EXT)
LLVM_YAML_STRONG_TYPEDEF(uint32_t, MIPS_AFL_ASE)
LLVM_YAML_STRONG_TYPEDEF(uint32_t, MIPS_AFL_FLAGS1)
LLVM_YAML_STRONG_TYPEDEF(uint32_t, MIPS_ISA)

LLVM_YAML_STRONG_TYPEDEF(StringRef, YAMLFlowString)
LLVM_YAML_STRONG_TYPEDEF(int64_t, YAMLIntUInt)

template <class ELFT>
unsigned getDefaultShEntSize(unsigned EMachine, ELF_SHT SecType,
                             StringRef SecName) {
  if (EMachine == ELF::EM_MIPS && SecType == ELF::SHT_MIPS_ABIFLAGS)
    return sizeof(object::Elf_Mips_ABIFlags<ELFT>);

  switch (SecType) {
  case ELF::SHT_SYMTAB:
  case ELF::SHT_DYNSYM:
    return sizeof(typename ELFT::Sym);
  case ELF::SHT_GROUP:
    return sizeof(typename ELFT::Word);
  case ELF::SHT_REL:
    return sizeof(typename ELFT::Rel);
  case ELF::SHT_RELA:
    return sizeof(typename ELFT::Rela);
  case ELF::SHT_RELR:
    return sizeof(typename ELFT::Relr);
  case ELF::SHT_DYNAMIC:
    return sizeof(typename ELFT::Dyn);
  case ELF::SHT_HASH:
    return sizeof(typename ELFT::Word);
  case ELF::SHT_SYMTAB_SHNDX:
    return sizeof(typename ELFT::Word);
  case ELF::SHT_GNU_versym:
    return sizeof(typename ELFT::Half);
  case ELF::SHT_LLVM_CALL_GRAPH_PROFILE:
    return sizeof(object::Elf_CGProfile_Impl<ELFT>);
  default:
    if (SecName == ".debug_str")
      return 1;
    return 0;
  }
}

// For now, hardcode 64 bits everywhere that 32 or 64 would be needed
// since 64-bit can hold 32-bit values too.
struct FileHeader {
  ELF_ELFCLASS Class;
  ELF_ELFDATA Data;
  ELF_ELFOSABI OSABI;
  llvm::yaml::Hex8 ABIVersion;
  ELF_ET Type;
  std::optional<ELF_EM> Machine;
  ELF_EF Flags;
  llvm::yaml::Hex64 Entry;
  std::optional<StringRef> SectionHeaderStringTable;

  std::optional<llvm::yaml::Hex64> EPhOff;
  std::optional<llvm::yaml::Hex16> EPhEntSize;
  std::optional<llvm::yaml::Hex16> EPhNum;
  std::optional<llvm::yaml::Hex16> EShEntSize;
  std::optional<llvm::yaml::Hex64> EShOff;
  std::optional<llvm::yaml::Hex16> EShNum;
  std::optional<llvm::yaml::Hex16> EShStrNdx;
};

struct SectionHeader {
  StringRef Name;
};

struct Symbol {
  StringRef Name;
  ELF_STT Type;
  std::optional<StringRef> Section;
  std::optional<ELF_SHN> Index;
  ELF_STB Binding;
  std::optional<llvm::yaml::Hex64> Value;
  std::optional<llvm::yaml::Hex64> Size;
  std::optional<uint8_t> Other;

  std::optional<uint32_t> StName;
};

struct SectionOrType {
  StringRef sectionNameOrType;
};

struct DynamicEntry {
  ELF_DYNTAG Tag;
  llvm::yaml::Hex64 Val;
};

struct BBAddrMapEntry {
  struct BBEntry {
    uint32_t ID;
    llvm::yaml::Hex64 AddressOffset;
    llvm::yaml::Hex64 Size;
    llvm::yaml::Hex64 Metadata;
  };
  uint8_t Version;
  llvm::yaml::Hex8 Feature;
  llvm::yaml::Hex64 Address;
  std::optional<uint64_t> NumBlocks;
  std::optional<std::vector<BBEntry>> BBEntries;
};

struct StackSizeEntry {
  llvm::yaml::Hex64 Address;
  llvm::yaml::Hex64 Size;
};

struct NoteEntry {
  StringRef Name;
  yaml::BinaryRef Desc;
  ELF_NT Type;
};

struct Chunk {
  enum class ChunkKind {
    Dynamic,
    Group,
    RawContent,
    Relocation,
    Relr,
    NoBits,
    Note,
    Hash,
    GnuHash,
    Verdef,
    Verneed,
    StackSizes,
    SymtabShndxSection,
    Symver,
    ARMIndexTable,
    MipsABIFlags,
    Addrsig,
    LinkerOptions,
    DependentLibraries,
    CallGraphProfile,
    BBAddrMap,

    // Special chunks.
    SpecialChunksStart,
    Fill = SpecialChunksStart,
    SectionHeaderTable,
  };

  ChunkKind Kind;
  StringRef Name;
  std::optional<llvm::yaml::Hex64> Offset;

  // Usually chunks are not created implicitly, but rather loaded from YAML.
  // This flag is used to signal whether this is the case or not.
  bool IsImplicit;

  Chunk(ChunkKind K, bool Implicit) : Kind(K), IsImplicit(Implicit) {}
  virtual ~Chunk();
};

struct Section : public Chunk {
  ELF_SHT Type;
  std::optional<ELF_SHF> Flags;
  std::optional<llvm::yaml::Hex64> Address;
  std::optional<StringRef> Link;
  llvm::yaml::Hex64 AddressAlign;
  std::optional<llvm::yaml::Hex64> EntSize;

  std::optional<yaml::BinaryRef> Content;
  std::optional<llvm::yaml::Hex64> Size;

  // Holds the original section index.
  unsigned OriginalSecNdx;

  Section(ChunkKind Kind, bool IsImplicit = false) : Chunk(Kind, IsImplicit) {}

  static bool classof(const Chunk *S) {
    return S->Kind < ChunkKind::SpecialChunksStart;
  }

  // Some derived sections might have their own special entries. This method
  // returns a vector of <entry name, is used> pairs. It is used for section
  // validation.
  virtual std::vector<std::pair<StringRef, bool>> getEntries() const {
    return {};
  };

  // The following members are used to override section fields which is
  // useful for creating invalid objects.

  // This can be used to override the sh_addralign field.
  std::optional<llvm::yaml::Hex64> ShAddrAlign;

  // This can be used to override the offset stored in the sh_name field.
  // It does not affect the name stored in the string table.
  std::optional<llvm::yaml::Hex64> ShName;

  // This can be used to override the sh_offset field. It does not place the
  // section data at the offset specified.
  std::optional<llvm::yaml::Hex64> ShOffset;

  // This can be used to override the sh_size field. It does not affect the
  // content written.
  std::optional<llvm::yaml::Hex64> ShSize;

  // This can be used to override the sh_flags field.
  std::optional<llvm::yaml::Hex64> ShFlags;

  // This can be used to override the sh_type field. It is useful when we
  // want to use specific YAML keys for a section of a particular type to
  // describe the content, but still want to have a different final type
  // for the section.
  std::optional<ELF_SHT> ShType;
};

// Fill is a block of data which is placed outside of sections. It is
// not present in the sections header table, but it might affect the output file
// size and program headers produced.
struct Fill : Chunk {
  std::optional<yaml::BinaryRef> Pattern;
  llvm::yaml::Hex64 Size;

  Fill() : Chunk(ChunkKind::Fill, /*Implicit=*/false) {}

  static bool classof(const Chunk *S) { return S->Kind == ChunkKind::Fill; }
};

struct SectionHeaderTable : Chunk {
  SectionHeaderTable(bool IsImplicit)
      : Chunk(ChunkKind::SectionHeaderTable, IsImplicit) {}

  static bool classof(const Chunk *S) {
    return S->Kind == ChunkKind::SectionHeaderTable;
  }

  std::optional<std::vector<SectionHeader>> Sections;
  std::optional<std::vector<SectionHeader>> Excluded;
  std::optional<bool> NoHeaders;

  size_t getNumHeaders(size_t SectionsNum) const {
    if (IsImplicit || isDefault())
      return SectionsNum;
    if (NoHeaders)
      return (*NoHeaders) ? 0 : SectionsNum;
    return (Sections ? Sections->size() : 0) + /*Null section*/ 1;
  }

  bool isDefault() const { return !Sections && !Excluded && !NoHeaders; }

  static constexpr StringRef TypeStr = "SectionHeaderTable";
};

struct BBAddrMapSection : Section {
  std::optional<std::vector<BBAddrMapEntry>> Entries;

  BBAddrMapSection() : Section(ChunkKind::BBAddrMap) {}

  std::vector<std::pair<StringRef, bool>> getEntries() const override {
    return {{"Entries", Entries.has_value()}};
  };

  static bool classof(const Chunk *S) {
    return S->Kind == ChunkKind::BBAddrMap;
  }
};

struct StackSizesSection : Section {
  std::optional<std::vector<StackSizeEntry>> Entries;

  StackSizesSection() : Section(ChunkKind::StackSizes) {}

  std::vector<std::pair<StringRef, bool>> getEntries() const override {
    return {{"Entries", Entries.has_value()}};
  };

  static bool classof(const Chunk *S) {
    return S->Kind == ChunkKind::StackSizes;
  }

  static bool nameMatches(StringRef Name) {
    return Name == ".stack_sizes";
  }
};

struct DynamicSection : Section {
  std::optional<std::vector<DynamicEntry>> Entries;

  DynamicSection() : Section(ChunkKind::Dynamic) {}

  std::vector<std::pair<StringRef, bool>> getEntries() const override {
    return {{"Entries", Entries.has_value()}};
  };

  static bool classof(const Chunk *S) { return S->Kind == ChunkKind::Dynamic; }
};

struct RawContentSection : Section {
  std::optional<llvm::yaml::Hex64> Info;

  RawContentSection() : Section(ChunkKind::RawContent) {}

  static bool classof(const Chunk *S) {
    return S->Kind == ChunkKind::RawContent;
  }

  // Is used when a content is read as an array of bytes.
  std::optional<std::vector<uint8_t>> ContentBuf;
};

struct NoBitsSection : Section {
  NoBitsSection() : Section(ChunkKind::NoBits) {}

  static bool classof(const Chunk *S) { return S->Kind == ChunkKind::NoBits; }
};

struct NoteSection : Section {
  std::optional<std::vector<ELFYAML::NoteEntry>> Notes;

  NoteSection() : Section(ChunkKind::Note) {}

  std::vector<std::pair<StringRef, bool>> getEntries() const override {
    return {{"Notes", Notes.has_value()}};
  };

  static bool classof(const Chunk *S) { return S->Kind == ChunkKind::Note; }
};

struct HashSection : Section {
  std::optional<std::vector<uint32_t>> Bucket;
  std::optional<std::vector<uint32_t>> Chain;

  std::vector<std::pair<StringRef, bool>> getEntries() const override {
    return {{"Bucket", Bucket.has_value()}, {"Chain", Chain.has_value()}};
  };

  // The following members are used to override section fields.
  // This is useful for creating invalid objects.
  std::optional<llvm::yaml::Hex64> NBucket;
  std::optional<llvm::yaml::Hex64> NChain;

  HashSection() : Section(ChunkKind::Hash) {}

  static bool classof(const Chunk *S) { return S->Kind == ChunkKind::Hash; }
};

struct GnuHashHeader {
  // The number of hash buckets.
  // Not used when dumping the object, but can be used to override
  // the real number of buckets when emiting an object from a YAML document.
  std::optional<llvm::yaml::Hex32> NBuckets;

  // Index of the first symbol in the dynamic symbol table
  // included in the hash table.
  llvm::yaml::Hex32 SymNdx;

  // The number of words in the Bloom filter.
  // Not used when dumping the object, but can be used to override the real
  // number of words in the Bloom filter when emiting an object from a YAML
  // document.
  std::optional<llvm::yaml::Hex32> MaskWords;

  // A shift constant used by the Bloom filter.
  llvm::yaml::Hex32 Shift2;
};

struct GnuHashSection : Section {
  std::optional<GnuHashHeader> Header;
  std::optional<std::vector<llvm::yaml::Hex64>> BloomFilter;
  std::optional<std::vector<llvm::yaml::Hex32>> HashBuckets;
  std::optional<std::vector<llvm::yaml::Hex32>> HashValues;

  GnuHashSection() : Section(ChunkKind::GnuHash) {}

  std::vector<std::pair<StringRef, bool>> getEntries() const override {
    return {{"Header", Header.has_value()},
            {"BloomFilter", BloomFilter.has_value()},
            {"HashBuckets", HashBuckets.has_value()},
            {"HashValues", HashValues.has_value()}};
  };

  static bool classof(const Chunk *S) { return S->Kind == ChunkKind::GnuHash; }
};

struct VernauxEntry {
  uint32_t Hash;
  uint16_t Flags;
  uint16_t Other;
  StringRef Name;
};

struct VerneedEntry {
  uint16_t Version;
  StringRef File;
  std::vector<VernauxEntry> AuxV;
};

struct VerneedSection : Section {
  std::optional<std::vector<VerneedEntry>> VerneedV;
  std::optional<llvm::yaml::Hex64> Info;

  VerneedSection() : Section(ChunkKind::Verneed) {}

  std::vector<std::pair<StringRef, bool>> getEntries() const override {
    return {{"Dependencies", VerneedV.has_value()}};
  };

  static bool classof(const Chunk *S) {
    return S->Kind == ChunkKind::Verneed;
  }
};

struct AddrsigSection : Section {
  std::optional<std::vector<YAMLFlowString>> Symbols;

  AddrsigSection() : Section(ChunkKind::Addrsig) {}

  std::vector<std::pair<StringRef, bool>> getEntries() const override {
    return {{"Symbols", Symbols.has_value()}};
  };

  static bool classof(const Chunk *S) { return S->Kind == ChunkKind::Addrsig; }
};

struct LinkerOption {
  StringRef Key;
  StringRef Value;
};

struct LinkerOptionsSection : Section {
  std::optional<std::vector<LinkerOption>> Options;

  LinkerOptionsSection() : Section(ChunkKind::LinkerOptions) {}

  std::vector<std::pair<StringRef, bool>> getEntries() const override {
    return {{"Options", Options.has_value()}};
  };

  static bool classof(const Chunk *S) {
    return S->Kind == ChunkKind::LinkerOptions;
  }
};

struct DependentLibrariesSection : Section {
  std::optional<std::vector<YAMLFlowString>> Libs;

  DependentLibrariesSection() : Section(ChunkKind::DependentLibraries) {}

  std::vector<std::pair<StringRef, bool>> getEntries() const override {
    return {{"Libraries", Libs.has_value()}};
  };

  static bool classof(const Chunk *S) {
    return S->Kind == ChunkKind::DependentLibraries;
  }
};

// Represents the call graph profile section entry.
struct CallGraphEntryWeight {
  // The weight of the edge.
  uint64_t Weight;
};

struct CallGraphProfileSection : Section {
  std::optional<std::vector<CallGraphEntryWeight>> Entries;

  CallGraphProfileSection() : Section(ChunkKind::CallGraphProfile) {}

  std::vector<std::pair<StringRef, bool>> getEntries() const override {
    return {{"Entries", Entries.has_value()}};
  };

  static bool classof(const Chunk *S) {
    return S->Kind == ChunkKind::CallGraphProfile;
  }
};

struct SymverSection : Section {
  std::optional<std::vector<uint16_t>> Entries;

  SymverSection() : Section(ChunkKind::Symver) {}

  std::vector<std::pair<StringRef, bool>> getEntries() const override {
    return {{"Entries", Entries.has_value()}};
  };

  static bool classof(const Chunk *S) { return S->Kind == ChunkKind::Symver; }
};

struct VerdefEntry {
  std::optional<uint16_t> Version;
  std::optional<uint16_t> Flags;
  std::optional<uint16_t> VersionNdx;
  std::optional<uint32_t> Hash;
  std::vector<StringRef> VerNames;
};

struct VerdefSection : Section {
  std::optional<std::vector<VerdefEntry>> Entries;
  std::optional<llvm::yaml::Hex64> Info;

  VerdefSection() : Section(ChunkKind::Verdef) {}

  std::vector<std::pair<StringRef, bool>> getEntries() const override {
    return {{"Entries", Entries.has_value()}};
  };

  static bool classof(const Chunk *S) { return S->Kind == ChunkKind::Verdef; }
};

struct GroupSection : Section {
  // Members of a group contain a flag and a list of section indices
  // that are part of the group.
  std::optional<std::vector<SectionOrType>> Members;
  std::optional<StringRef> Signature; /* Info */

  GroupSection() : Section(ChunkKind::Group) {}

  std::vector<std::pair<StringRef, bool>> getEntries() const override {
    return {{"Members", Members.has_value()}};
  };

  static bool classof(const Chunk *S) { return S->Kind == ChunkKind::Group; }
};

struct Relocation {
  llvm::yaml::Hex64 Offset;
  YAMLIntUInt Addend;
  ELF_REL Type;
  std::optional<StringRef> Symbol;
};

struct RelocationSection : Section {
  std::optional<std::vector<Relocation>> Relocations;
  StringRef RelocatableSec; /* Info */

  RelocationSection() : Section(ChunkKind::Relocation) {}

  std::vector<std::pair<StringRef, bool>> getEntries() const override {
    return {{"Relocations", Relocations.has_value()}};
  };

  static bool classof(const Chunk *S) {
    return S->Kind == ChunkKind::Relocation;
  }
};

struct RelrSection : Section {
  std::optional<std::vector<llvm::yaml::Hex64>> Entries;

  RelrSection() : Section(ChunkKind::Relr) {}

  std::vector<std::pair<StringRef, bool>> getEntries() const override {
    return {{"Entries", Entries.has_value()}};
  };

  static bool classof(const Chunk *S) {
    return S->Kind == ChunkKind::Relr;
  }
};

struct SymtabShndxSection : Section {
  std::optional<std::vector<uint32_t>> Entries;

  SymtabShndxSection() : Section(ChunkKind::SymtabShndxSection) {}

  std::vector<std::pair<StringRef, bool>> getEntries() const override {
    return {{"Entries", Entries.has_value()}};
  };

  static bool classof(const Chunk *S) {
    return S->Kind == ChunkKind::SymtabShndxSection;
  }
};

struct ARMIndexTableEntry {
  llvm::yaml::Hex32 Offset;
  llvm::yaml::Hex32 Value;
};

struct ARMIndexTableSection : Section {
  std::optional<std::vector<ARMIndexTableEntry>> Entries;

  ARMIndexTableSection() : Section(ChunkKind::ARMIndexTable) {}

  std::vector<std::pair<StringRef, bool>> getEntries() const override {
    return {{"Entries", Entries.has_value()}};
  };

  static bool classof(const Chunk *S) {
    return S->Kind == ChunkKind::ARMIndexTable;
  }
};

// Represents .MIPS.abiflags section
struct MipsABIFlags : Section {
  llvm::yaml::Hex16 Version;
  MIPS_ISA ISALevel;
  llvm::yaml::Hex8 ISARevision;
  MIPS_AFL_REG GPRSize;
  MIPS_AFL_REG CPR1Size;
  MIPS_AFL_REG CPR2Size;
  MIPS_ABI_FP FpABI;
  MIPS_AFL_EXT ISAExtension;
  MIPS_AFL_ASE ASEs;
  MIPS_AFL_FLAGS1 Flags1;
  llvm::yaml::Hex32 Flags2;

  MipsABIFlags() : Section(ChunkKind::MipsABIFlags) {}

  static bool classof(const Chunk *S) {
    return S->Kind == ChunkKind::MipsABIFlags;
  }
};

struct ProgramHeader {
  ELF_PT Type;
  ELF_PF Flags;
  llvm::yaml::Hex64 VAddr;
  llvm::yaml::Hex64 PAddr;
  std::optional<llvm::yaml::Hex64> Align;
  std::optional<llvm::yaml::Hex64> FileSize;
  std::optional<llvm::yaml::Hex64> MemSize;
  std::optional<llvm::yaml::Hex64> Offset;
  std::optional<StringRef> FirstSec;
  std::optional<StringRef> LastSec;

  // This vector contains all chunks from [FirstSec, LastSec].
  std::vector<Chunk *> Chunks;
};

struct Object {
  FileHeader Header;
  std::vector<ProgramHeader> ProgramHeaders;

  // An object might contain output section descriptions as well as
  // custom data that does not belong to any section.
  std::vector<std::unique_ptr<Chunk>> Chunks;

  // Although in reality the symbols reside in a section, it is a lot
  // cleaner and nicer if we read them from the YAML as a separate
  // top-level key, which automatically ensures that invariants like there
  // being a single SHT_SYMTAB section are upheld.
  std::optional<std::vector<Symbol>> Symbols;
  std::optional<std::vector<Symbol>> DynamicSymbols;
  std::optional<DWARFYAML::Data> DWARF;

  std::vector<Section *> getSections() {
    std::vector<Section *> Ret;
    for (const std::unique_ptr<Chunk> &Sec : Chunks)
      if (auto S = dyn_cast<ELFYAML::Section>(Sec.get()))
        Ret.push_back(S);
    return Ret;
  }

  const SectionHeaderTable &getSectionHeaderTable() const {
    for (const std::unique_ptr<Chunk> &C : Chunks)
      if (auto *S = dyn_cast<ELFYAML::SectionHeaderTable>(C.get()))
        return *S;
    llvm_unreachable("the section header table chunk must always be present");
  }

  ELF_ELFOSABI getOSAbi() const;
  unsigned getMachine() const;
};

bool shouldAllocateFileSpace(ArrayRef<ProgramHeader> Phdrs,
                             const NoBitsSection &S);

} // end namespace ELFYAML
} // end namespace llvm

LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::ELFYAML::StackSizeEntry)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::ELFYAML::BBAddrMapEntry)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::ELFYAML::BBAddrMapEntry::BBEntry)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::ELFYAML::DynamicEntry)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::ELFYAML::LinkerOption)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::ELFYAML::CallGraphEntryWeight)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::ELFYAML::NoteEntry)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::ELFYAML::ProgramHeader)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::ELFYAML::SectionHeader)
LLVM_YAML_IS_SEQUENCE_VECTOR(std::unique_ptr<llvm::ELFYAML::Chunk>)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::ELFYAML::Symbol)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::ELFYAML::VerdefEntry)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::ELFYAML::VernauxEntry)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::ELFYAML::VerneedEntry)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::ELFYAML::Relocation)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::ELFYAML::SectionOrType)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::ELFYAML::ARMIndexTableEntry)

namespace llvm {
namespace yaml {

template <> struct ScalarTraits<ELFYAML::YAMLIntUInt> {
  static void output(const ELFYAML::YAMLIntUInt &Val, void *Ctx,
                     raw_ostream &Out);
  static StringRef input(StringRef Scalar, void *Ctx,
                         ELFYAML::YAMLIntUInt &Val);
  static QuotingType mustQuote(StringRef) { return QuotingType::None; }
};

template <>
struct ScalarEnumerationTraits<ELFYAML::ELF_ET> {
  static void enumeration(IO &IO, ELFYAML::ELF_ET &Value);
};

template <> struct ScalarEnumerationTraits<ELFYAML::ELF_PT> {
  static void enumeration(IO &IO, ELFYAML::ELF_PT &Value);
};

template <> struct ScalarEnumerationTraits<ELFYAML::ELF_NT> {
  static void enumeration(IO &IO, ELFYAML::ELF_NT &Value);
};

template <>
struct ScalarEnumerationTraits<ELFYAML::ELF_EM> {
  static void enumeration(IO &IO, ELFYAML::ELF_EM &Value);
};

template <>
struct ScalarEnumerationTraits<ELFYAML::ELF_ELFCLASS> {
  static void enumeration(IO &IO, ELFYAML::ELF_ELFCLASS &Value);
};

template <>
struct ScalarEnumerationTraits<ELFYAML::ELF_ELFDATA> {
  static void enumeration(IO &IO, ELFYAML::ELF_ELFDATA &Value);
};

template <>
struct ScalarEnumerationTraits<ELFYAML::ELF_ELFOSABI> {
  static void enumeration(IO &IO, ELFYAML::ELF_ELFOSABI &Value);
};

template <>
struct ScalarBitSetTraits<ELFYAML::ELF_EF> {
  static void bitset(IO &IO, ELFYAML::ELF_EF &Value);
};

template <> struct ScalarBitSetTraits<ELFYAML::ELF_PF> {
  static void bitset(IO &IO, ELFYAML::ELF_PF &Value);
};

template <>
struct ScalarEnumerationTraits<ELFYAML::ELF_SHT> {
  static void enumeration(IO &IO, ELFYAML::ELF_SHT &Value);
};

template <>
struct ScalarBitSetTraits<ELFYAML::ELF_SHF> {
  static void bitset(IO &IO, ELFYAML::ELF_SHF &Value);
};

template <> struct ScalarEnumerationTraits<ELFYAML::ELF_SHN> {
  static void enumeration(IO &IO, ELFYAML::ELF_SHN &Value);
};

template <> struct ScalarEnumerationTraits<ELFYAML::ELF_STB> {
  static void enumeration(IO &IO, ELFYAML::ELF_STB &Value);
};

template <>
struct ScalarEnumerationTraits<ELFYAML::ELF_STT> {
  static void enumeration(IO &IO, ELFYAML::ELF_STT &Value);
};

template <>
struct ScalarEnumerationTraits<ELFYAML::ELF_REL> {
  static void enumeration(IO &IO, ELFYAML::ELF_REL &Value);
};

template <>
struct ScalarEnumerationTraits<ELFYAML::ELF_DYNTAG> {
  static void enumeration(IO &IO, ELFYAML::ELF_DYNTAG &Value);
};

template <>
struct ScalarEnumerationTraits<ELFYAML::ELF_RSS> {
  static void enumeration(IO &IO, ELFYAML::ELF_RSS &Value);
};

template <>
struct ScalarEnumerationTraits<ELFYAML::MIPS_AFL_REG> {
  static void enumeration(IO &IO, ELFYAML::MIPS_AFL_REG &Value);
};

template <>
struct ScalarEnumerationTraits<ELFYAML::MIPS_ABI_FP> {
  static void enumeration(IO &IO, ELFYAML::MIPS_ABI_FP &Value);
};

template <>
struct ScalarEnumerationTraits<ELFYAML::MIPS_AFL_EXT> {
  static void enumeration(IO &IO, ELFYAML::MIPS_AFL_EXT &Value);
};

template <>
struct ScalarEnumerationTraits<ELFYAML::MIPS_ISA> {
  static void enumeration(IO &IO, ELFYAML::MIPS_ISA &Value);
};

template <>
struct ScalarBitSetTraits<ELFYAML::MIPS_AFL_ASE> {
  static void bitset(IO &IO, ELFYAML::MIPS_AFL_ASE &Value);
};

template <>
struct ScalarBitSetTraits<ELFYAML::MIPS_AFL_FLAGS1> {
  static void bitset(IO &IO, ELFYAML::MIPS_AFL_FLAGS1 &Value);
};

template <>
struct MappingTraits<ELFYAML::FileHeader> {
  static void mapping(IO &IO, ELFYAML::FileHeader &FileHdr);
};

template <> struct MappingTraits<ELFYAML::SectionHeader> {
  static void mapping(IO &IO, ELFYAML::SectionHeader &SHdr);
};

template <> struct MappingTraits<ELFYAML::ProgramHeader> {
  static void mapping(IO &IO, ELFYAML::ProgramHeader &FileHdr);
  static std::string validate(IO &IO, ELFYAML::ProgramHeader &FileHdr);
};

template <>
struct MappingTraits<ELFYAML::Symbol> {
  static void mapping(IO &IO, ELFYAML::Symbol &Symbol);
  static std::string validate(IO &IO, ELFYAML::Symbol &Symbol);
};

template <> struct MappingTraits<ELFYAML::StackSizeEntry> {
  static void mapping(IO &IO, ELFYAML::StackSizeEntry &Rel);
};

template <> struct MappingTraits<ELFYAML::BBAddrMapEntry> {
  static void mapping(IO &IO, ELFYAML::BBAddrMapEntry &Rel);
};

template <> struct MappingTraits<ELFYAML::BBAddrMapEntry::BBEntry> {
  static void mapping(IO &IO, ELFYAML::BBAddrMapEntry::BBEntry &Rel);
};

template <> struct MappingTraits<ELFYAML::GnuHashHeader> {
  static void mapping(IO &IO, ELFYAML::GnuHashHeader &Rel);
};

template <> struct MappingTraits<ELFYAML::DynamicEntry> {
  static void mapping(IO &IO, ELFYAML::DynamicEntry &Rel);
};

template <> struct MappingTraits<ELFYAML::NoteEntry> {
  static void mapping(IO &IO, ELFYAML::NoteEntry &N);
};

template <> struct MappingTraits<ELFYAML::VerdefEntry> {
  static void mapping(IO &IO, ELFYAML::VerdefEntry &E);
};

template <> struct MappingTraits<ELFYAML::VerneedEntry> {
  static void mapping(IO &IO, ELFYAML::VerneedEntry &E);
};

template <> struct MappingTraits<ELFYAML::VernauxEntry> {
  static void mapping(IO &IO, ELFYAML::VernauxEntry &E);
};

template <> struct MappingTraits<ELFYAML::LinkerOption> {
  static void mapping(IO &IO, ELFYAML::LinkerOption &Sym);
};

template <> struct MappingTraits<ELFYAML::CallGraphEntryWeight> {
  static void mapping(IO &IO, ELFYAML::CallGraphEntryWeight &E);
};

template <> struct MappingTraits<ELFYAML::Relocation> {
  static void mapping(IO &IO, ELFYAML::Relocation &Rel);
};

template <> struct MappingTraits<ELFYAML::ARMIndexTableEntry> {
  static void mapping(IO &IO, ELFYAML::ARMIndexTableEntry &E);
};

template <> struct MappingTraits<std::unique_ptr<ELFYAML::Chunk>> {
  static void mapping(IO &IO, std::unique_ptr<ELFYAML::Chunk> &C);
  static std::string validate(IO &io, std::unique_ptr<ELFYAML::Chunk> &C);
};

template <>
struct MappingTraits<ELFYAML::Object> {
  static void mapping(IO &IO, ELFYAML::Object &Object);
};

template <> struct MappingTraits<ELFYAML::SectionOrType> {
  static void mapping(IO &IO, ELFYAML::SectionOrType &sectionOrType);
};

} // end namespace yaml
} // end namespace llvm

#endif // LLVM_OBJECTYAML_ELFYAML_H
PKjwFZ�s��S!S!ObjectYAML/COFFYAML.hnu�[���//===- COFFYAML.h - COFF YAMLIO implementation ------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares classes for handling the YAML representation of COFF.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJECTYAML_COFFYAML_H
#define LLVM_OBJECTYAML_COFFYAML_H

#include "llvm/ADT/StringRef.h"
#include "llvm/BinaryFormat/COFF.h"
#include "llvm/Object/COFF.h"
#include "llvm/ObjectYAML/CodeViewYAMLDebugSections.h"
#include "llvm/ObjectYAML/CodeViewYAMLTypeHashing.h"
#include "llvm/ObjectYAML/CodeViewYAMLTypes.h"
#include "llvm/ObjectYAML/YAML.h"
#include <cstdint>
#include <optional>
#include <vector>

namespace llvm {

namespace COFF {

inline Characteristics operator|(Characteristics a, Characteristics b) {
  uint32_t Ret = static_cast<uint32_t>(a) | static_cast<uint32_t>(b);
  return static_cast<Characteristics>(Ret);
}

inline SectionCharacteristics operator|(SectionCharacteristics a,
                                        SectionCharacteristics b) {
  uint32_t Ret = static_cast<uint32_t>(a) | static_cast<uint32_t>(b);
  return static_cast<SectionCharacteristics>(Ret);
}

inline DLLCharacteristics operator|(DLLCharacteristics a,
                                    DLLCharacteristics b) {
  uint16_t Ret = static_cast<uint16_t>(a) | static_cast<uint16_t>(b);
  return static_cast<DLLCharacteristics>(Ret);
}

} // end namespace COFF

// The structure of the yaml files is not an exact 1:1 match to COFF. In order
// to use yaml::IO, we use these structures which are closer to the source.
namespace COFFYAML {

LLVM_YAML_STRONG_TYPEDEF(uint8_t, COMDATType)
LLVM_YAML_STRONG_TYPEDEF(uint32_t, WeakExternalCharacteristics)
LLVM_YAML_STRONG_TYPEDEF(uint8_t, AuxSymbolType)

struct Relocation {
  uint32_t VirtualAddress;
  uint16_t Type;

  // Normally a Relocation can refer to the symbol via its name.
  // It can also use a direct symbol table index instead (with no name
  // specified), allowing disambiguating between multiple symbols with the
  // same name or crafting intentionally broken files for testing.
  StringRef SymbolName;
  std::optional<uint32_t> SymbolTableIndex;
};

struct SectionDataEntry {
  std::optional<uint32_t> UInt32;
  yaml::BinaryRef Binary;
  std::optional<object::coff_load_configuration32> LoadConfig32;
  std::optional<object::coff_load_configuration64> LoadConfig64;

  size_t size() const;
  void writeAsBinary(raw_ostream &OS) const;
};

struct Section {
  COFF::section Header;
  unsigned Alignment = 0;
  yaml::BinaryRef SectionData;
  std::vector<CodeViewYAML::YAMLDebugSubsection> DebugS;
  std::vector<CodeViewYAML::LeafRecord> DebugT;
  std::vector<CodeViewYAML::LeafRecord> DebugP;
  std::optional<CodeViewYAML::DebugHSection> DebugH;
  std::vector<SectionDataEntry> StructuredData;
  std::vector<Relocation> Relocations;
  StringRef Name;

  Section();
};

struct Symbol {
  COFF::symbol Header;
  COFF::SymbolBaseType SimpleType = COFF::IMAGE_SYM_TYPE_NULL;
  COFF::SymbolComplexType ComplexType = COFF::IMAGE_SYM_DTYPE_NULL;
  std::optional<COFF::AuxiliaryFunctionDefinition> FunctionDefinition;
  std::optional<COFF::AuxiliarybfAndefSymbol> bfAndefSymbol;
  std::optional<COFF::AuxiliaryWeakExternal> WeakExternal;
  StringRef File;
  std::optional<COFF::AuxiliarySectionDefinition> SectionDefinition;
  std::optional<COFF::AuxiliaryCLRToken> CLRToken;
  StringRef Name;

  Symbol();
};

struct PEHeader {
  COFF::PE32Header Header;
  std::optional<COFF::DataDirectory>
      DataDirectories[COFF::NUM_DATA_DIRECTORIES];
};

struct Object {
  std::optional<PEHeader> OptionalHeader;
  COFF::header Header;
  std::vector<Section> Sections;
  std::vector<Symbol> Symbols;

  Object();
};

} // end namespace COFFYAML

} // end namespace llvm

LLVM_YAML_IS_SEQUENCE_VECTOR(COFFYAML::Section)
LLVM_YAML_IS_SEQUENCE_VECTOR(COFFYAML::Symbol)
LLVM_YAML_IS_SEQUENCE_VECTOR(COFFYAML::Relocation)
LLVM_YAML_IS_SEQUENCE_VECTOR(COFFYAML::SectionDataEntry)

namespace llvm {
namespace yaml {

template <>
struct ScalarEnumerationTraits<COFFYAML::WeakExternalCharacteristics> {
  static void enumeration(IO &IO, COFFYAML::WeakExternalCharacteristics &Value);
};

template <>
struct ScalarEnumerationTraits<COFFYAML::AuxSymbolType> {
  static void enumeration(IO &IO, COFFYAML::AuxSymbolType &Value);
};

template <>
struct ScalarEnumerationTraits<COFFYAML::COMDATType> {
  static void enumeration(IO &IO, COFFYAML::COMDATType &Value);
};

template <>
struct ScalarEnumerationTraits<COFF::MachineTypes> {
  static void enumeration(IO &IO, COFF::MachineTypes &Value);
};

template <>
struct ScalarEnumerationTraits<COFF::SymbolBaseType> {
  static void enumeration(IO &IO, COFF::SymbolBaseType &Value);
};

template <>
struct ScalarEnumerationTraits<COFF::SymbolStorageClass> {
  static void enumeration(IO &IO, COFF::SymbolStorageClass &Value);
};

template <>
struct ScalarEnumerationTraits<COFF::SymbolComplexType> {
  static void enumeration(IO &IO, COFF::SymbolComplexType &Value);
};

template <>
struct ScalarEnumerationTraits<COFF::RelocationTypeI386> {
  static void enumeration(IO &IO, COFF::RelocationTypeI386 &Value);
};

template <>
struct ScalarEnumerationTraits<COFF::RelocationTypeAMD64> {
  static void enumeration(IO &IO, COFF::RelocationTypeAMD64 &Value);
};

template <>
struct ScalarEnumerationTraits<COFF::RelocationTypesARM> {
  static void enumeration(IO &IO, COFF::RelocationTypesARM &Value);
};

template <>
struct ScalarEnumerationTraits<COFF::RelocationTypesARM64> {
  static void enumeration(IO &IO, COFF::RelocationTypesARM64 &Value);
};

template <>
struct ScalarEnumerationTraits<COFF::WindowsSubsystem> {
  static void enumeration(IO &IO, COFF::WindowsSubsystem &Value);
};

template <>
struct ScalarBitSetTraits<COFF::Characteristics> {
  static void bitset(IO &IO, COFF::Characteristics &Value);
};

template <>
struct ScalarBitSetTraits<COFF::SectionCharacteristics> {
  static void bitset(IO &IO, COFF::SectionCharacteristics &Value);
};

template <>
struct ScalarBitSetTraits<COFF::DLLCharacteristics> {
  static void bitset(IO &IO, COFF::DLLCharacteristics &Value);
};

template <>
struct MappingTraits<COFFYAML::Relocation> {
  static void mapping(IO &IO, COFFYAML::Relocation &Rel);
};

template <>
struct MappingTraits<COFFYAML::PEHeader> {
  static void mapping(IO &IO, COFFYAML::PEHeader &PH);
};

template <>
struct MappingTraits<COFF::DataDirectory> {
  static void mapping(IO &IO, COFF::DataDirectory &DD);
};

template <>
struct MappingTraits<COFF::header> {
  static void mapping(IO &IO, COFF::header &H);
};

template <> struct MappingTraits<COFF::AuxiliaryFunctionDefinition> {
  static void mapping(IO &IO, COFF::AuxiliaryFunctionDefinition &AFD);
};

template <> struct MappingTraits<COFF::AuxiliarybfAndefSymbol> {
  static void mapping(IO &IO, COFF::AuxiliarybfAndefSymbol &AAS);
};

template <> struct MappingTraits<COFF::AuxiliaryWeakExternal> {
  static void mapping(IO &IO, COFF::AuxiliaryWeakExternal &AWE);
};

template <> struct MappingTraits<COFF::AuxiliarySectionDefinition> {
  static void mapping(IO &IO, COFF::AuxiliarySectionDefinition &ASD);
};

template <> struct MappingTraits<COFF::AuxiliaryCLRToken> {
  static void mapping(IO &IO, COFF::AuxiliaryCLRToken &ACT);
};

template <> struct MappingTraits<object::coff_load_configuration32> {
  static void mapping(IO &IO, object::coff_load_configuration32 &ACT);
};

template <> struct MappingTraits<object::coff_load_configuration64> {
  static void mapping(IO &IO, object::coff_load_configuration64 &ACT);
};

template <> struct MappingTraits<object::coff_load_config_code_integrity> {
  static void mapping(IO &IO, object::coff_load_config_code_integrity &ACT);
};

template <>
struct MappingTraits<COFFYAML::Symbol> {
  static void mapping(IO &IO, COFFYAML::Symbol &S);
};

template <> struct MappingTraits<COFFYAML::SectionDataEntry> {
  static void mapping(IO &IO, COFFYAML::SectionDataEntry &Sec);
};

template <>
struct MappingTraits<COFFYAML::Section> {
  static void mapping(IO &IO, COFFYAML::Section &Sec);
};

template <>
struct MappingTraits<COFFYAML::Object> {
  static void mapping(IO &IO, COFFYAML::Object &Obj);
};

} // end namespace yaml
} // end namespace llvm

#endif // LLVM_OBJECTYAML_COFFYAML_H
PKjwFZ��5��ObjectYAML/ArchiveYAML.hnu�[���//===- ArchiveYAML.h - Archive YAMLIO implementation ------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file declares classes for handling the YAML representation of archives.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJECTYAML_ARCHIVEYAML_H
#define LLVM_OBJECTYAML_ARCHIVEYAML_H

#include "llvm/Support/YAMLTraits.h"
#include "llvm/ObjectYAML/YAML.h"
#include "llvm/ADT/MapVector.h"
#include <optional>

namespace llvm {
namespace ArchYAML {

struct Archive {
  struct Child {
    struct Field {
      Field() = default;
      Field(StringRef Default, unsigned Length)
          : DefaultValue(Default), MaxLength(Length) {}
      StringRef Value;
      StringRef DefaultValue;
      unsigned MaxLength;
    };

    Child() {
      Fields["Name"] = {"", 16};
      Fields["LastModified"] = {"0", 12};
      Fields["UID"] = {"0", 6};
      Fields["GID"] = {"0", 6};
      Fields["AccessMode"] = {"0", 8};
      Fields["Size"] = {"0", 10};
      Fields["Terminator"] = {"`\n", 2};
    }

    MapVector<StringRef, Field> Fields;

    std::optional<yaml::BinaryRef> Content;
    std::optional<llvm::yaml::Hex8> PaddingByte;
  };

  StringRef Magic;
  std::optional<std::vector<Child>> Members;
  std::optional<yaml::BinaryRef> Content;
};

} // end namespace ArchYAML
} // end namespace llvm

LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::ArchYAML::Archive::Child)

namespace llvm {
namespace yaml {

template <> struct MappingTraits<ArchYAML::Archive> {
  static void mapping(IO &IO, ArchYAML::Archive &A);
  static std::string validate(IO &, ArchYAML::Archive &A);
};

template <> struct MappingTraits<ArchYAML::Archive::Child> {
  static void mapping(IO &IO, ArchYAML::Archive::Child &C);
  static std::string validate(IO &, ArchYAML::Archive::Child &C);
};

} // end namespace yaml
} // end namespace llvm

#endif // LLVM_OBJECTYAML_ARCHIVEYAML_H
PKjwFZ��P�� ObjectYAML/CodeViewYAMLSymbols.hnu�[���//===- CodeViewYAMLSymbols.h - CodeView YAMLIO Symbol implementation ------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines classes for handling the YAML representation of CodeView
// Debug Info.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJECTYAML_CODEVIEWYAMLSYMBOLS_H
#define LLVM_OBJECTYAML_CODEVIEWYAMLSYMBOLS_H

#include "llvm/DebugInfo/CodeView/CodeView.h"
#include "llvm/DebugInfo/CodeView/SymbolRecord.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/YAMLTraits.h"
#include <memory>

namespace llvm {
namespace CodeViewYAML {

namespace detail {

struct SymbolRecordBase;

} // end namespace detail

struct SymbolRecord {
  std::shared_ptr<detail::SymbolRecordBase> Symbol;

  codeview::CVSymbol
  toCodeViewSymbol(BumpPtrAllocator &Allocator,
                   codeview::CodeViewContainer Container) const;

  static Expected<SymbolRecord> fromCodeViewSymbol(codeview::CVSymbol Symbol);
};

} // end namespace CodeViewYAML
} // end namespace llvm

LLVM_YAML_DECLARE_MAPPING_TRAITS(CodeViewYAML::SymbolRecord)
LLVM_YAML_IS_SEQUENCE_VECTOR(CodeViewYAML::SymbolRecord)

#endif // LLVM_OBJECTYAML_CODEVIEWYAMLSYMBOLS_H
PKjwFZ�Nj�&�&ObjectYAML/MinidumpYAML.hnu�[���//===- MinidumpYAML.h - Minidump YAMLIO implementation ----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJECTYAML_MINIDUMPYAML_H
#define LLVM_OBJECTYAML_MINIDUMPYAML_H

#include "llvm/BinaryFormat/Minidump.h"
#include "llvm/Object/Minidump.h"
#include "llvm/ObjectYAML/YAML.h"
#include "llvm/Support/YAMLTraits.h"

namespace llvm {
namespace MinidumpYAML {

/// The base class for all minidump streams. The "Type" of the stream
/// corresponds to the Stream Type field in the minidump file. The "Kind" field
/// specifies how are we going to treat it. For highly specialized streams (e.g.
/// SystemInfo), there is a 1:1 mapping between Types and Kinds, but in general
/// one stream Kind can be used to represent multiple stream Types (e.g. any
/// unrecognised stream Type will be handled via RawContentStream). The mapping
/// from Types to Kinds is fixed and given by the static getKind function.
struct Stream {
  enum class StreamKind {
    Exception,
    MemoryInfoList,
    MemoryList,
    ModuleList,
    RawContent,
    SystemInfo,
    TextContent,
    ThreadList,
  };

  Stream(StreamKind Kind, minidump::StreamType Type) : Kind(Kind), Type(Type) {}
  virtual ~Stream(); // anchor

  const StreamKind Kind;
  const minidump::StreamType Type;

  /// Get the stream Kind used for representing streams of a given Type.
  static StreamKind getKind(minidump::StreamType Type);

  /// Create an empty stream of the given Type.
  static std::unique_ptr<Stream> create(minidump::StreamType Type);

  /// Create a stream from the given stream directory entry.
  static Expected<std::unique_ptr<Stream>>
  create(const minidump::Directory &StreamDesc,
         const object::MinidumpFile &File);
};

namespace detail {
/// A stream representing a list of abstract entries in a minidump stream. Its
/// instantiations can be used to represent the ModuleList stream and other
/// streams with a similar structure.
template <typename EntryT> struct ListStream : public Stream {
  using entry_type = EntryT;

  std::vector<entry_type> Entries;

  explicit ListStream(std::vector<entry_type> Entries = {})
      : Stream(EntryT::Kind, EntryT::Type), Entries(std::move(Entries)) {}

  static bool classof(const Stream *S) { return S->Kind == EntryT::Kind; }
};

/// A structure containing all data belonging to a single minidump module.
struct ParsedModule {
  static constexpr Stream::StreamKind Kind = Stream::StreamKind::ModuleList;
  static constexpr minidump::StreamType Type = minidump::StreamType::ModuleList;

  minidump::Module Entry;
  std::string Name;
  yaml::BinaryRef CvRecord;
  yaml::BinaryRef MiscRecord;
};

/// A structure containing all data belonging to a single minidump thread.
struct ParsedThread {
  static constexpr Stream::StreamKind Kind = Stream::StreamKind::ThreadList;
  static constexpr minidump::StreamType Type = minidump::StreamType::ThreadList;

  minidump::Thread Entry;
  yaml::BinaryRef Stack;
  yaml::BinaryRef Context;
};

/// A structure containing all data describing a single memory region.
struct ParsedMemoryDescriptor {
  static constexpr Stream::StreamKind Kind = Stream::StreamKind::MemoryList;
  static constexpr minidump::StreamType Type = minidump::StreamType::MemoryList;

  minidump::MemoryDescriptor Entry;
  yaml::BinaryRef Content;
};
} // namespace detail

using ModuleListStream = detail::ListStream<detail::ParsedModule>;
using ThreadListStream = detail::ListStream<detail::ParsedThread>;
using MemoryListStream = detail::ListStream<detail::ParsedMemoryDescriptor>;

/// ExceptionStream minidump stream.
struct ExceptionStream : public Stream {
  minidump::ExceptionStream MDExceptionStream;
  yaml::BinaryRef ThreadContext;

  ExceptionStream()
      : Stream(StreamKind::Exception, minidump::StreamType::Exception),
        MDExceptionStream({}) {}

  explicit ExceptionStream(const minidump::ExceptionStream &MDExceptionStream,
                           ArrayRef<uint8_t> ThreadContext)
      : Stream(StreamKind::Exception, minidump::StreamType::Exception),
        MDExceptionStream(MDExceptionStream), ThreadContext(ThreadContext) {}

  static bool classof(const Stream *S) {
    return S->Kind == StreamKind::Exception;
  }
};

/// A structure containing the list of MemoryInfo entries comprising a
/// MemoryInfoList stream.
struct MemoryInfoListStream : public Stream {
  std::vector<minidump::MemoryInfo> Infos;

  MemoryInfoListStream()
      : Stream(StreamKind::MemoryInfoList,
               minidump::StreamType::MemoryInfoList) {}

  explicit MemoryInfoListStream(
      iterator_range<object::MinidumpFile::MemoryInfoIterator> Range)
      : Stream(StreamKind::MemoryInfoList,
               minidump::StreamType::MemoryInfoList),
        Infos(Range.begin(), Range.end()) {}

  static bool classof(const Stream *S) {
    return S->Kind == StreamKind::MemoryInfoList;
  }
};

/// A minidump stream represented as a sequence of hex bytes. This is used as a
/// fallback when no other stream kind is suitable.
struct RawContentStream : public Stream {
  yaml::BinaryRef Content;
  yaml::Hex32 Size;

  RawContentStream(minidump::StreamType Type, ArrayRef<uint8_t> Content = {})
      : Stream(StreamKind::RawContent, Type), Content(Content),
        Size(Content.size()) {}

  static bool classof(const Stream *S) {
    return S->Kind == StreamKind::RawContent;
  }
};

/// SystemInfo minidump stream.
struct SystemInfoStream : public Stream {
  minidump::SystemInfo Info;
  std::string CSDVersion;

  SystemInfoStream()
      : Stream(StreamKind::SystemInfo, minidump::StreamType::SystemInfo) {
    memset(&Info, 0, sizeof(Info));
  }

  explicit SystemInfoStream(const minidump::SystemInfo &Info,
                            std::string CSDVersion)
      : Stream(StreamKind::SystemInfo, minidump::StreamType::SystemInfo),
        Info(Info), CSDVersion(std::move(CSDVersion)) {}

  static bool classof(const Stream *S) {
    return S->Kind == StreamKind::SystemInfo;
  }
};

/// A StringRef, which is printed using YAML block notation.
LLVM_YAML_STRONG_TYPEDEF(StringRef, BlockStringRef)

/// A minidump stream containing textual data (typically, the contents of a
/// /proc/<pid> file on linux).
struct TextContentStream : public Stream {
  BlockStringRef Text;

  TextContentStream(minidump::StreamType Type, StringRef Text = {})
      : Stream(StreamKind::TextContent, Type), Text(Text) {}

  static bool classof(const Stream *S) {
    return S->Kind == StreamKind::TextContent;
  }
};

/// The top level structure representing a minidump object, consisting of a
/// minidump header, and zero or more streams. To construct an Object from a
/// minidump file, use the static create function. To serialize to/from yaml,
/// use the appropriate streaming operator on a yaml stream.
struct Object {
  Object() = default;
  Object(const Object &) = delete;
  Object &operator=(const Object &) = delete;
  Object(Object &&) = default;
  Object &operator=(Object &&) = default;

  Object(const minidump::Header &Header,
         std::vector<std::unique_ptr<Stream>> Streams)
      : Header(Header), Streams(std::move(Streams)) {}

  /// The minidump header.
  minidump::Header Header;

  /// The list of streams in this minidump object.
  std::vector<std::unique_ptr<Stream>> Streams;

  static Expected<Object> create(const object::MinidumpFile &File);
};

} // namespace MinidumpYAML

namespace yaml {
template <> struct BlockScalarTraits<MinidumpYAML::BlockStringRef> {
  static void output(const MinidumpYAML::BlockStringRef &Text, void *,
                     raw_ostream &OS) {
    OS << Text;
  }

  static StringRef input(StringRef Scalar, void *,
                         MinidumpYAML::BlockStringRef &Text) {
    Text = Scalar;
    return "";
  }
};

template <> struct MappingTraits<std::unique_ptr<MinidumpYAML::Stream>> {
  static void mapping(IO &IO, std::unique_ptr<MinidumpYAML::Stream> &S);
  static std::string validate(IO &IO, std::unique_ptr<MinidumpYAML::Stream> &S);
};

template <> struct MappingContextTraits<minidump::MemoryDescriptor, BinaryRef> {
  static void mapping(IO &IO, minidump::MemoryDescriptor &Memory,
                      BinaryRef &Content);
};

} // namespace yaml

} // namespace llvm

LLVM_YAML_DECLARE_BITSET_TRAITS(llvm::minidump::MemoryProtection)
LLVM_YAML_DECLARE_BITSET_TRAITS(llvm::minidump::MemoryState)
LLVM_YAML_DECLARE_BITSET_TRAITS(llvm::minidump::MemoryType)

LLVM_YAML_DECLARE_ENUM_TRAITS(llvm::minidump::ProcessorArchitecture)
LLVM_YAML_DECLARE_ENUM_TRAITS(llvm::minidump::OSPlatform)
LLVM_YAML_DECLARE_ENUM_TRAITS(llvm::minidump::StreamType)

LLVM_YAML_DECLARE_MAPPING_TRAITS(llvm::minidump::CPUInfo::ArmInfo)
LLVM_YAML_DECLARE_MAPPING_TRAITS(llvm::minidump::CPUInfo::OtherInfo)
LLVM_YAML_DECLARE_MAPPING_TRAITS(llvm::minidump::CPUInfo::X86Info)
LLVM_YAML_DECLARE_MAPPING_TRAITS(llvm::minidump::Exception)
LLVM_YAML_DECLARE_MAPPING_TRAITS(llvm::minidump::MemoryInfo)
LLVM_YAML_DECLARE_MAPPING_TRAITS(llvm::minidump::VSFixedFileInfo)

LLVM_YAML_DECLARE_MAPPING_TRAITS(
    llvm::MinidumpYAML::MemoryListStream::entry_type)
LLVM_YAML_DECLARE_MAPPING_TRAITS(
    llvm::MinidumpYAML::ModuleListStream::entry_type)
LLVM_YAML_DECLARE_MAPPING_TRAITS(
    llvm::MinidumpYAML::ThreadListStream::entry_type)

LLVM_YAML_IS_SEQUENCE_VECTOR(std::unique_ptr<llvm::MinidumpYAML::Stream>)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::MinidumpYAML::MemoryListStream::entry_type)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::MinidumpYAML::ModuleListStream::entry_type)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::MinidumpYAML::ThreadListStream::entry_type)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::minidump::MemoryInfo)

LLVM_YAML_DECLARE_MAPPING_TRAITS(llvm::MinidumpYAML::Object)

#endif // LLVM_OBJECTYAML_MINIDUMPYAML_H
PKjwFZ���		ObjectYAML/OffloadYAML.hnu�[���//===- OffloadYAML.h - Offload Binary YAMLIO implementation -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file declares classes for handling the YAML representation of
/// offloading binaries.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJECTYAML_OFFLOADYAML_H
#define LLVM_OBJECTYAML_OFFLOADYAML_H

#include "llvm/ADT/MapVector.h"
#include "llvm/Object/OffloadBinary.h"
#include "llvm/ObjectYAML/YAML.h"
#include "llvm/Support/YAMLTraits.h"
#include <optional>

namespace llvm {
namespace OffloadYAML {

struct Binary {
  struct StringEntry {
    StringRef Key;
    StringRef Value;
  };

  struct Member {
    std::optional<object::ImageKind> ImageKind;
    std::optional<object::OffloadKind> OffloadKind;
    std::optional<uint32_t> Flags;
    std::optional<std::vector<StringEntry>> StringEntries;
    std::optional<yaml::BinaryRef> Content;
  };

  std::optional<uint32_t> Version;
  std::optional<uint64_t> Size;
  std::optional<uint64_t> EntryOffset;
  std::optional<uint64_t> EntrySize;
  std::vector<Member> Members;
};

} // end namespace OffloadYAML
} // end namespace llvm

LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::OffloadYAML::Binary::Member)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::OffloadYAML::Binary::StringEntry)

namespace llvm {
namespace yaml {

template <> struct ScalarEnumerationTraits<object::ImageKind> {
  static void enumeration(IO &IO, object::ImageKind &Value);
};

template <> struct ScalarEnumerationTraits<object::OffloadKind> {
  static void enumeration(IO &IO, object::OffloadKind &Value);
};

template <> struct MappingTraits<OffloadYAML::Binary> {
  static void mapping(IO &IO, OffloadYAML::Binary &O);
};

template <> struct MappingTraits<OffloadYAML::Binary::StringEntry> {
  static void mapping(IO &IO, OffloadYAML::Binary::StringEntry &M);
};

template <> struct MappingTraits<OffloadYAML::Binary::Member> {
  static void mapping(IO &IO, OffloadYAML::Binary::Member &M);
};

} // end namespace yaml
} // end namespace llvm

#endif // LLVM_OBJECTYAML_OFFLOADYAML_H
PKjwFZ2�}'#'#ObjectYAML/XCOFFYAML.hnu�[���//===----- XCOFFYAML.h - XCOFF YAMLIO implementation ------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares classes for handling the YAML representation of XCOFF.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_OBJECTYAML_XCOFFYAML_H
#define LLVM_OBJECTYAML_XCOFFYAML_H

#include "llvm/BinaryFormat/XCOFF.h"
#include "llvm/ObjectYAML/YAML.h"
#include <optional>
#include <vector>

namespace llvm {
namespace XCOFFYAML {

struct FileHeader {
  llvm::yaml::Hex16 Magic;
  uint16_t NumberOfSections;
  int32_t TimeStamp;
  llvm::yaml::Hex64 SymbolTableOffset;
  int32_t NumberOfSymTableEntries;
  uint16_t AuxHeaderSize;
  llvm::yaml::Hex16 Flags;
};

struct AuxiliaryHeader {
  std::optional<llvm::yaml::Hex16> Magic;
  std::optional<llvm::yaml::Hex16> Version;
  std::optional<llvm::yaml::Hex64> TextStartAddr;
  std::optional<llvm::yaml::Hex64> DataStartAddr;
  std::optional<llvm::yaml::Hex64> TOCAnchorAddr;
  std::optional<uint16_t> SecNumOfEntryPoint;
  std::optional<uint16_t> SecNumOfText;
  std::optional<uint16_t> SecNumOfData;
  std::optional<uint16_t> SecNumOfTOC;
  std::optional<uint16_t> SecNumOfLoader;
  std::optional<uint16_t> SecNumOfBSS;
  std::optional<llvm::yaml::Hex16> MaxAlignOfText;
  std::optional<llvm::yaml::Hex16> MaxAlignOfData;
  std::optional<llvm::yaml::Hex16> ModuleType;
  std::optional<llvm::yaml::Hex8> CpuFlag;
  std::optional<llvm::yaml::Hex8> CpuType;
  std::optional<llvm::yaml::Hex8> TextPageSize;
  std::optional<llvm::yaml::Hex8> DataPageSize;
  std::optional<llvm::yaml::Hex8> StackPageSize;
  std::optional<llvm::yaml::Hex8> FlagAndTDataAlignment;
  std::optional<llvm::yaml::Hex64> TextSize;
  std::optional<llvm::yaml::Hex64> InitDataSize;
  std::optional<llvm::yaml::Hex64> BssDataSize;
  std::optional<llvm::yaml::Hex64> EntryPointAddr;
  std::optional<llvm::yaml::Hex64> MaxStackSize;
  std::optional<llvm::yaml::Hex64> MaxDataSize;
  std::optional<uint16_t> SecNumOfTData;
  std::optional<uint16_t> SecNumOfTBSS;
  std::optional<llvm::yaml::Hex16> Flag;
};

struct Relocation {
  llvm::yaml::Hex64 VirtualAddress;
  llvm::yaml::Hex64 SymbolIndex;
  llvm::yaml::Hex8 Info;
  llvm::yaml::Hex8 Type;
};

struct Section {
  StringRef SectionName;
  llvm::yaml::Hex64 Address;
  llvm::yaml::Hex64 Size;
  llvm::yaml::Hex64 FileOffsetToData;
  llvm::yaml::Hex64 FileOffsetToRelocations;
  llvm::yaml::Hex64 FileOffsetToLineNumbers; // Line number pointer. Not supported yet.
  llvm::yaml::Hex16 NumberOfRelocations;
  llvm::yaml::Hex16 NumberOfLineNumbers; // Line number counts. Not supported yet.
  uint32_t Flags;
  yaml::BinaryRef SectionData;
  std::vector<Relocation> Relocations;
};

enum AuxSymbolType : uint8_t {
  AUX_EXCEPT = 255,
  AUX_FCN = 254,
  AUX_SYM = 253,
  AUX_FILE = 252,
  AUX_CSECT = 251,
  AUX_SECT = 250,
  AUX_STAT = 249
};

struct AuxSymbolEnt {
  AuxSymbolType Type;

  explicit AuxSymbolEnt(AuxSymbolType T) : Type(T) {}
  virtual ~AuxSymbolEnt();
};

struct FileAuxEnt : AuxSymbolEnt {
  std::optional<StringRef> FileNameOrString;
  std::optional<XCOFF::CFileStringType> FileStringType;

  FileAuxEnt() : AuxSymbolEnt(AuxSymbolType::AUX_FILE) {}
  static bool classof(const AuxSymbolEnt *S) {
    return S->Type == AuxSymbolType::AUX_FILE;
  }
};

struct CsectAuxEnt : AuxSymbolEnt {
  // Only for XCOFF32.
  std::optional<uint32_t> SectionOrLength;
  std::optional<uint32_t> StabInfoIndex;
  std::optional<uint16_t> StabSectNum;
  // Only for XCOFF64.
  std::optional<uint32_t> SectionOrLengthLo;
  std::optional<uint32_t> SectionOrLengthHi;
  // Common fields for both XCOFF32 and XCOFF64.
  std::optional<uint32_t> ParameterHashIndex;
  std::optional<uint16_t> TypeChkSectNum;
  std::optional<uint8_t> SymbolAlignmentAndType;
  std::optional<XCOFF::StorageMappingClass> StorageMappingClass;

  CsectAuxEnt() : AuxSymbolEnt(AuxSymbolType::AUX_CSECT) {}
  static bool classof(const AuxSymbolEnt *S) {
    return S->Type == AuxSymbolType::AUX_CSECT;
  }
};

struct FunctionAuxEnt : AuxSymbolEnt {
  std::optional<uint32_t> OffsetToExceptionTbl; // Only for XCOFF32.
  std::optional<uint64_t> PtrToLineNum;
  std::optional<uint32_t> SizeOfFunction;
  std::optional<int32_t> SymIdxOfNextBeyond;

  FunctionAuxEnt() : AuxSymbolEnt(AuxSymbolType::AUX_FCN) {}
  static bool classof(const AuxSymbolEnt *S) {
    return S->Type == AuxSymbolType::AUX_FCN;
  }
};

struct ExcpetionAuxEnt : AuxSymbolEnt {
  std::optional<uint64_t> OffsetToExceptionTbl;
  std::optional<uint32_t> SizeOfFunction;
  std::optional<int32_t> SymIdxOfNextBeyond;

  ExcpetionAuxEnt() : AuxSymbolEnt(AuxSymbolType::AUX_EXCEPT) {}
  static bool classof(const AuxSymbolEnt *S) {
    return S->Type == AuxSymbolType::AUX_EXCEPT;
  }
}; // Only for XCOFF64.

struct BlockAuxEnt : AuxSymbolEnt {
  // Only for XCOFF32.
  std::optional<uint16_t> LineNumHi;
  std::optional<uint16_t> LineNumLo;
  // Only for XCOFF64.
  std::optional<uint32_t> LineNum;

  BlockAuxEnt() : AuxSymbolEnt(AuxSymbolType::AUX_SYM) {}
  static bool classof(const AuxSymbolEnt *S) {
    return S->Type == AuxSymbolType::AUX_SYM;
  }
};

struct SectAuxEntForDWARF : AuxSymbolEnt {
  std::optional<uint32_t> LengthOfSectionPortion;
  std::optional<uint32_t> NumberOfRelocEnt;

  SectAuxEntForDWARF() : AuxSymbolEnt(AuxSymbolType::AUX_SECT) {}
  static bool classof(const AuxSymbolEnt *S) {
    return S->Type == AuxSymbolType::AUX_SECT;
  }
};

struct SectAuxEntForStat : AuxSymbolEnt {
  std::optional<uint32_t> SectionLength;
  std::optional<uint16_t> NumberOfRelocEnt;
  std::optional<uint16_t> NumberOfLineNum;

  SectAuxEntForStat() : AuxSymbolEnt(AuxSymbolType::AUX_STAT) {}
  static bool classof(const AuxSymbolEnt *S) {
    return S->Type == AuxSymbolType::AUX_STAT;
  }
}; // Only for XCOFF32.

struct Symbol {
  StringRef SymbolName;
  llvm::yaml::Hex64 Value; // Symbol value; storage class-dependent.
  std::optional<StringRef> SectionName;
  std::optional<uint16_t> SectionIndex;
  llvm::yaml::Hex16 Type;
  XCOFF::StorageClass StorageClass;
  std::optional<uint8_t> NumberOfAuxEntries;
  std::vector<std::unique_ptr<AuxSymbolEnt>> AuxEntries;
};

struct StringTable {
  std::optional<uint32_t> ContentSize; // The total size of the string table.
  std::optional<uint32_t> Length; // The value of the length field for the first
                                  // 4 bytes of the table.
  std::optional<std::vector<StringRef>> Strings;
  std::optional<yaml::BinaryRef> RawContent;
};

struct Object {
  FileHeader Header;
  std::optional<AuxiliaryHeader> AuxHeader;
  std::vector<Section> Sections;
  std::vector<Symbol> Symbols;
  StringTable StrTbl;
  Object();
};
} // namespace XCOFFYAML
} // namespace llvm

LLVM_YAML_IS_SEQUENCE_VECTOR(XCOFFYAML::Symbol)
LLVM_YAML_IS_SEQUENCE_VECTOR(XCOFFYAML::Relocation)
LLVM_YAML_IS_SEQUENCE_VECTOR(XCOFFYAML::Section)
LLVM_YAML_IS_SEQUENCE_VECTOR(std::unique_ptr<llvm::XCOFFYAML::AuxSymbolEnt>)

namespace llvm {
namespace yaml {

template <> struct ScalarBitSetTraits<XCOFF::SectionTypeFlags> {
  static void bitset(IO &IO, XCOFF::SectionTypeFlags &Value);
};

template <> struct ScalarEnumerationTraits<XCOFF::StorageClass> {
  static void enumeration(IO &IO, XCOFF::StorageClass &Value);
};

template <> struct ScalarEnumerationTraits<XCOFF::StorageMappingClass> {
  static void enumeration(IO &IO, XCOFF::StorageMappingClass &Value);
};

template <> struct ScalarEnumerationTraits<XCOFF::CFileStringType> {
  static void enumeration(IO &IO, XCOFF::CFileStringType &Type);
};

template <> struct ScalarEnumerationTraits<XCOFFYAML::AuxSymbolType> {
  static void enumeration(IO &IO, XCOFFYAML::AuxSymbolType &Type);
};

template <> struct MappingTraits<XCOFFYAML::FileHeader> {
  static void mapping(IO &IO, XCOFFYAML::FileHeader &H);
};

template <> struct MappingTraits<XCOFFYAML::AuxiliaryHeader> {
  static void mapping(IO &IO, XCOFFYAML::AuxiliaryHeader &AuxHdr);
};

template <> struct MappingTraits<std::unique_ptr<XCOFFYAML::AuxSymbolEnt>> {
  static void mapping(IO &IO, std::unique_ptr<XCOFFYAML::AuxSymbolEnt> &AuxSym);
};

template <> struct MappingTraits<XCOFFYAML::Symbol> {
  static void mapping(IO &IO, XCOFFYAML::Symbol &S);
};

template <> struct MappingTraits<XCOFFYAML::Relocation> {
  static void mapping(IO &IO, XCOFFYAML::Relocation &R);
};

template <> struct MappingTraits<XCOFFYAML::Section> {
  static void mapping(IO &IO, XCOFFYAML::Section &Sec);
};

template <> struct MappingTraits<XCOFFYAML::StringTable> {
  static void mapping(IO &IO, XCOFFYAML::StringTable &Str);
};

template <> struct MappingTraits<XCOFFYAML::Object> {
  static void mapping(IO &IO, XCOFFYAML::Object &Obj);
};

} // namespace yaml
} // namespace llvm

#endif // LLVM_OBJECTYAML_XCOFFYAML_H
PKjwFZ�G �'�'ObjectYAML/MachOYAML.hnu�[���//===- MachOYAML.h - Mach-O YAMLIO implementation ---------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file declares classes for handling the YAML representation
/// of Mach-O.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJECTYAML_MACHOYAML_H
#define LLVM_OBJECTYAML_MACHOYAML_H

#include "llvm/ADT/StringRef.h"
#include "llvm/BinaryFormat/MachO.h"
#include "llvm/ObjectYAML/DWARFYAML.h"
#include "llvm/ObjectYAML/YAML.h"
#include "llvm/Support/YAMLTraits.h"
#include <cstdint>
#include <optional>
#include <string>
#include <vector>

namespace llvm {
namespace MachOYAML {

struct Relocation {
  // Offset in the section to what is being relocated.
  llvm::yaml::Hex32 address;
  // Symbol index if r_extern == 1 else section index.
  uint32_t symbolnum;
  bool is_pcrel;
  // Real length = 2 ^ length.
  uint8_t length;
  bool is_extern;
  uint8_t type;
  bool is_scattered;
  int32_t value;
};

struct Section {
  char sectname[16];
  char segname[16];
  llvm::yaml::Hex64 addr;
  uint64_t size;
  llvm::yaml::Hex32 offset;
  uint32_t align;
  llvm::yaml::Hex32 reloff;
  uint32_t nreloc;
  llvm::yaml::Hex32 flags;
  llvm::yaml::Hex32 reserved1;
  llvm::yaml::Hex32 reserved2;
  llvm::yaml::Hex32 reserved3;
  std::optional<llvm::yaml::BinaryRef> content;
  std::vector<Relocation> relocations;
};

struct FileHeader {
  llvm::yaml::Hex32 magic;
  llvm::yaml::Hex32 cputype;
  llvm::yaml::Hex32 cpusubtype;
  llvm::yaml::Hex32 filetype;
  uint32_t ncmds;
  uint32_t sizeofcmds;
  llvm::yaml::Hex32 flags;
  llvm::yaml::Hex32 reserved;
};

struct LoadCommand {
  virtual ~LoadCommand();

  llvm::MachO::macho_load_command Data;
  std::vector<Section> Sections;
  std::vector<MachO::build_tool_version> Tools;
  std::vector<llvm::yaml::Hex8> PayloadBytes;
  std::string Content;
  uint64_t ZeroPadBytes;
};

struct NListEntry {
  uint32_t n_strx;
  llvm::yaml::Hex8 n_type;
  uint8_t n_sect;
  uint16_t n_desc;
  uint64_t n_value;
};

struct RebaseOpcode {
  MachO::RebaseOpcode Opcode;
  uint8_t Imm;
  std::vector<yaml::Hex64> ExtraData;
};

struct BindOpcode {
  MachO::BindOpcode Opcode;
  uint8_t Imm;
  std::vector<yaml::Hex64> ULEBExtraData;
  std::vector<int64_t> SLEBExtraData;
  StringRef Symbol;
};

struct ExportEntry {
  uint64_t TerminalSize = 0;
  uint64_t NodeOffset = 0;
  std::string Name;
  llvm::yaml::Hex64 Flags = 0;
  llvm::yaml::Hex64 Address = 0;
  llvm::yaml::Hex64 Other = 0;
  std::string ImportName;
  std::vector<MachOYAML::ExportEntry> Children;
};

struct DataInCodeEntry {
  llvm::yaml::Hex32 Offset;
  uint16_t Length;
  llvm::yaml::Hex16 Kind;
};

struct LinkEditData {
  std::vector<MachOYAML::RebaseOpcode> RebaseOpcodes;
  std::vector<MachOYAML::BindOpcode> BindOpcodes;
  std::vector<MachOYAML::BindOpcode> WeakBindOpcodes;
  std::vector<MachOYAML::BindOpcode> LazyBindOpcodes;
  MachOYAML::ExportEntry ExportTrie;
  std::vector<NListEntry> NameList;
  std::vector<StringRef> StringTable;
  std::vector<yaml::Hex32> IndirectSymbols;
  std::vector<yaml::Hex64> FunctionStarts;
  std::vector<DataInCodeEntry> DataInCode;
  std::vector<yaml::Hex8> ChainedFixups;

  bool isEmpty() const;
};

struct Object {
  bool IsLittleEndian;
  FileHeader Header;
  std::vector<LoadCommand> LoadCommands;
  std::vector<Section> Sections;
  LinkEditData LinkEdit;
  std::optional<llvm::yaml::BinaryRef> RawLinkEditSegment;
  DWARFYAML::Data DWARF;
};

struct FatHeader {
  llvm::yaml::Hex32 magic;
  uint32_t nfat_arch;
};

struct FatArch {
  llvm::yaml::Hex32 cputype;
  llvm::yaml::Hex32 cpusubtype;
  llvm::yaml::Hex64 offset;
  uint64_t size;
  uint32_t align;
  llvm::yaml::Hex32 reserved;
};

struct UniversalBinary {
  FatHeader Header;
  std::vector<FatArch> FatArchs;
  std::vector<Object> Slices;
};

} // end namespace MachOYAML
} // end namespace llvm

LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::MachOYAML::LoadCommand)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::MachOYAML::Relocation)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::MachOYAML::Section)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::MachOYAML::RebaseOpcode)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::MachOYAML::BindOpcode)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::MachOYAML::ExportEntry)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::MachOYAML::NListEntry)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::MachOYAML::Object)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::MachOYAML::FatArch)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::MachOYAML::DataInCodeEntry)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::MachO::build_tool_version)

namespace llvm {

class raw_ostream;

namespace yaml {

template <> struct MappingTraits<MachOYAML::FileHeader> {
  static void mapping(IO &IO, MachOYAML::FileHeader &FileHeader);
};

template <> struct MappingTraits<MachOYAML::Object> {
  static void mapping(IO &IO, MachOYAML::Object &Object);
};

template <> struct MappingTraits<MachOYAML::FatHeader> {
  static void mapping(IO &IO, MachOYAML::FatHeader &FatHeader);
};

template <> struct MappingTraits<MachOYAML::FatArch> {
  static void mapping(IO &IO, MachOYAML::FatArch &FatArch);
};

template <> struct MappingTraits<MachOYAML::UniversalBinary> {
  static void mapping(IO &IO, MachOYAML::UniversalBinary &UniversalBinary);
};

template <> struct MappingTraits<MachOYAML::LoadCommand> {
  static void mapping(IO &IO, MachOYAML::LoadCommand &LoadCommand);
};

template <> struct MappingTraits<MachOYAML::LinkEditData> {
  static void mapping(IO &IO, MachOYAML::LinkEditData &LinkEditData);
};

template <> struct MappingTraits<MachOYAML::RebaseOpcode> {
  static void mapping(IO &IO, MachOYAML::RebaseOpcode &RebaseOpcode);
};

template <> struct MappingTraits<MachOYAML::BindOpcode> {
  static void mapping(IO &IO, MachOYAML::BindOpcode &BindOpcode);
};

template <> struct MappingTraits<MachOYAML::ExportEntry> {
  static void mapping(IO &IO, MachOYAML::ExportEntry &ExportEntry);
};

template <> struct MappingTraits<MachOYAML::Relocation> {
  static void mapping(IO &IO, MachOYAML::Relocation &R);
};

template <> struct MappingTraits<MachOYAML::Section> {
  static void mapping(IO &IO, MachOYAML::Section &Section);
  static std::string validate(IO &io, MachOYAML::Section &Section);
};

template <> struct MappingTraits<MachOYAML::NListEntry> {
  static void mapping(IO &IO, MachOYAML::NListEntry &NListEntry);
};

template <> struct MappingTraits<MachO::build_tool_version> {
  static void mapping(IO &IO, MachO::build_tool_version &tool);
};

template <> struct MappingTraits<MachOYAML::DataInCodeEntry> {
  static void mapping(IO &IO, MachOYAML::DataInCodeEntry &DataInCodeEntry);
};

#define HANDLE_LOAD_COMMAND(LCName, LCValue, LCStruct)                         \
  io.enumCase(value, #LCName, MachO::LCName);

template <> struct ScalarEnumerationTraits<MachO::LoadCommandType> {
  static void enumeration(IO &io, MachO::LoadCommandType &value) {
#include "llvm/BinaryFormat/MachO.def"
    io.enumFallback<Hex32>(value);
  }
};

#define ENUM_CASE(Enum) io.enumCase(value, #Enum, MachO::Enum);

template <> struct ScalarEnumerationTraits<MachO::RebaseOpcode> {
  static void enumeration(IO &io, MachO::RebaseOpcode &value) {
    ENUM_CASE(REBASE_OPCODE_DONE)
    ENUM_CASE(REBASE_OPCODE_SET_TYPE_IMM)
    ENUM_CASE(REBASE_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB)
    ENUM_CASE(REBASE_OPCODE_ADD_ADDR_ULEB)
    ENUM_CASE(REBASE_OPCODE_ADD_ADDR_IMM_SCALED)
    ENUM_CASE(REBASE_OPCODE_DO_REBASE_IMM_TIMES)
    ENUM_CASE(REBASE_OPCODE_DO_REBASE_ULEB_TIMES)
    ENUM_CASE(REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB)
    ENUM_CASE(REBASE_OPCODE_DO_REBASE_ULEB_TIMES_SKIPPING_ULEB)
    io.enumFallback<Hex8>(value);
  }
};

template <> struct ScalarEnumerationTraits<MachO::BindOpcode> {
  static void enumeration(IO &io, MachO::BindOpcode &value) {
    ENUM_CASE(BIND_OPCODE_DONE)
    ENUM_CASE(BIND_OPCODE_SET_DYLIB_ORDINAL_IMM)
    ENUM_CASE(BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB)
    ENUM_CASE(BIND_OPCODE_SET_DYLIB_SPECIAL_IMM)
    ENUM_CASE(BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM)
    ENUM_CASE(BIND_OPCODE_SET_TYPE_IMM)
    ENUM_CASE(BIND_OPCODE_SET_ADDEND_SLEB)
    ENUM_CASE(BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB)
    ENUM_CASE(BIND_OPCODE_ADD_ADDR_ULEB)
    ENUM_CASE(BIND_OPCODE_DO_BIND)
    ENUM_CASE(BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB)
    ENUM_CASE(BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED)
    ENUM_CASE(BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB)
    io.enumFallback<Hex8>(value);
  }
};

// This trait is used for 16-byte chars in Mach structures used for strings
using char_16 = char[16];

template <> struct ScalarTraits<char_16> {
  static void output(const char_16 &Val, void *, raw_ostream &Out);
  static StringRef input(StringRef Scalar, void *, char_16 &Val);
  static QuotingType mustQuote(StringRef S);
};

// This trait is used for UUIDs. It reads and writes them matching otool's
// formatting style.
using uuid_t = raw_ostream::uuid_t;

template <> struct ScalarTraits<uuid_t> {
  static void output(const uuid_t &Val, void *, raw_ostream &Out);
  static StringRef input(StringRef Scalar, void *, uuid_t &Val);
  static QuotingType mustQuote(StringRef S);
};

// Load Command struct mapping traits

#define LOAD_COMMAND_STRUCT(LCStruct)                                          \
  template <> struct MappingTraits<MachO::LCStruct> {                          \
    static void mapping(IO &IO, MachO::LCStruct &LoadCommand);                 \
  };

#include "llvm/BinaryFormat/MachO.def"

// Extra structures used by load commands
template <> struct MappingTraits<MachO::dylib> {
  static void mapping(IO &IO, MachO::dylib &LoadCommand);
};

template <> struct MappingTraits<MachO::fvmlib> {
  static void mapping(IO &IO, MachO::fvmlib &LoadCommand);
};

template <> struct MappingTraits<MachO::section> {
  static void mapping(IO &IO, MachO::section &LoadCommand);
};

template <> struct MappingTraits<MachO::section_64> {
  static void mapping(IO &IO, MachO::section_64 &LoadCommand);
};

} // end namespace yaml

} // end namespace llvm

#endif // LLVM_OBJECTYAML_MACHOYAML_H
PKjwFZ�"4���ObjectYAML/DXContainerYAML.hnu�[���//===- DXContainerYAML.h - DXContainer YAMLIO implementation ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file declares classes for handling the YAML representation
/// of DXContainer.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJECTYAML_DXCONTAINERYAML_H
#define LLVM_OBJECTYAML_DXCONTAINERYAML_H

#include "llvm/ADT/StringRef.h"
#include "llvm/BinaryFormat/DXContainer.h"
#include "llvm/ObjectYAML/YAML.h"
#include "llvm/Support/YAMLTraits.h"
#include <cstdint>
#include <optional>
#include <string>
#include <vector>

namespace llvm {
namespace DXContainerYAML {

struct VersionTuple {
  uint16_t Major;
  uint16_t Minor;
};

// The optional header fields are required in the binary and will be populated
// when reading from binary, but can be omitted in the YAML text because the
// emitter can calculate them.
struct FileHeader {
  std::vector<llvm::yaml::Hex8> Hash;
  VersionTuple Version;
  std::optional<uint32_t> FileSize;
  uint32_t PartCount;
  std::optional<std::vector<uint32_t>> PartOffsets;
};

struct DXILProgram {
  uint8_t MajorVersion;
  uint8_t MinorVersion;
  uint16_t ShaderKind;
  std::optional<uint32_t> Size;
  uint16_t DXILMajorVersion;
  uint16_t DXILMinorVersion;
  std::optional<uint32_t> DXILOffset;
  std::optional<uint32_t> DXILSize;
  std::optional<std::vector<llvm::yaml::Hex8>> DXIL;
};

#define SHADER_FLAG(Num, Val, Str) bool Val = false;
struct ShaderFlags {
  ShaderFlags() = default;
  ShaderFlags(uint64_t FlagData);
  uint64_t getEncodedFlags();
#include "llvm/BinaryFormat/DXContainerConstants.def"
};

struct ShaderHash {
  ShaderHash() = default;
  ShaderHash(const dxbc::ShaderHash &Data);

  bool IncludesSource;
  std::vector<llvm::yaml::Hex8> Digest;
};

using ResourceBindInfo = dxbc::PSV::v2::ResourceBindInfo;

struct PSVInfo {
  // The version field isn't actually encoded in the file, but it is inferred by
  // the size of data regions. We include it in the yaml because it simplifies
  // the format.
  uint32_t Version;

  dxbc::PSV::v2::RuntimeInfo Info;
  uint32_t ResourceStride;
  std::vector<ResourceBindInfo> Resources;

  void mapInfoForVersion(yaml::IO &IO);

  PSVInfo();
  PSVInfo(const dxbc::PSV::v0::RuntimeInfo *P, uint16_t Stage);
  PSVInfo(const dxbc::PSV::v1::RuntimeInfo *P);
  PSVInfo(const dxbc::PSV::v2::RuntimeInfo *P);
};

struct Part {
  Part() = default;
  Part(std::string N, uint32_t S) : Name(N), Size(S) {}
  std::string Name;
  uint32_t Size;
  std::optional<DXILProgram> Program;
  std::optional<ShaderFlags> Flags;
  std::optional<ShaderHash> Hash;
  std::optional<PSVInfo> Info;
};

struct Object {
  FileHeader Header;
  std::vector<Part> Parts;
};

} // namespace DXContainerYAML
} // namespace llvm

LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::DXContainerYAML::Part)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::DXContainerYAML::ResourceBindInfo)
namespace llvm {

class raw_ostream;

namespace yaml {

template <> struct MappingTraits<DXContainerYAML::VersionTuple> {
  static void mapping(IO &IO, DXContainerYAML::VersionTuple &Version);
};

template <> struct MappingTraits<DXContainerYAML::FileHeader> {
  static void mapping(IO &IO, DXContainerYAML::FileHeader &Header);
};

template <> struct MappingTraits<DXContainerYAML::DXILProgram> {
  static void mapping(IO &IO, DXContainerYAML::DXILProgram &Program);
};

template <> struct MappingTraits<DXContainerYAML::ShaderFlags> {
  static void mapping(IO &IO, DXContainerYAML::ShaderFlags &Flags);
};

template <> struct MappingTraits<DXContainerYAML::ShaderHash> {
  static void mapping(IO &IO, DXContainerYAML::ShaderHash &Hash);
};

template <> struct MappingTraits<DXContainerYAML::PSVInfo> {
  static void mapping(IO &IO, DXContainerYAML::PSVInfo &PSV);
};

template <> struct MappingTraits<DXContainerYAML::Part> {
  static void mapping(IO &IO, DXContainerYAML::Part &Version);
};

template <> struct MappingTraits<DXContainerYAML::Object> {
  static void mapping(IO &IO, DXContainerYAML::Object &Obj);
};

template <> struct MappingTraits<DXContainerYAML::ResourceBindInfo> {
  static void mapping(IO &IO, DXContainerYAML::ResourceBindInfo &Res);
};

} // namespace yaml

} // namespace llvm

#endif // LLVM_OBJECTYAML_DXCONTAINERYAML_H
PKjwFZ|`̚BBObjectYAML/ObjectYAML.hnu�[���//===- ObjectYAML.h ---------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJECTYAML_OBJECTYAML_H
#define LLVM_OBJECTYAML_OBJECTYAML_H

#include "llvm/ObjectYAML/ArchiveYAML.h"
#include "llvm/ObjectYAML/COFFYAML.h"
#include "llvm/ObjectYAML/DXContainerYAML.h"
#include "llvm/ObjectYAML/ELFYAML.h"
#include "llvm/ObjectYAML/MachOYAML.h"
#include "llvm/ObjectYAML/MinidumpYAML.h"
#include "llvm/ObjectYAML/OffloadYAML.h"
#include "llvm/ObjectYAML/WasmYAML.h"
#include "llvm/ObjectYAML/XCOFFYAML.h"
#include "llvm/Support/YAMLTraits.h"
#include <memory>

namespace llvm {
namespace yaml {

class IO;

struct YamlObjectFile {
  std::unique_ptr<ArchYAML::Archive> Arch;
  std::unique_ptr<ELFYAML::Object> Elf;
  std::unique_ptr<COFFYAML::Object> Coff;
  std::unique_ptr<MachOYAML::Object> MachO;
  std::unique_ptr<MachOYAML::UniversalBinary> FatMachO;
  std::unique_ptr<MinidumpYAML::Object> Minidump;
  std::unique_ptr<OffloadYAML::Binary> Offload;
  std::unique_ptr<WasmYAML::Object> Wasm;
  std::unique_ptr<XCOFFYAML::Object> Xcoff;
  std::unique_ptr<DXContainerYAML::Object> DXContainer;
};

template <> struct MappingTraits<YamlObjectFile> {
  static void mapping(IO &IO, YamlObjectFile &ObjectFile);
};

} // end namespace yaml
} // end namespace llvm

#endif // LLVM_OBJECTYAML_OBJECTYAML_H
PKjwFZ*!�W88$ObjectYAML/CodeViewYAMLTypeHashing.hnu�[���//==- CodeViewYAMLTypeHashing.h - CodeView YAMLIO Type hashing ----*- C++-*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines classes for handling the YAML representation of CodeView
// Debug Info.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJECTYAML_CODEVIEWYAMLTYPEHASHING_H
#define LLVM_OBJECTYAML_CODEVIEWYAMLTYPEHASHING_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/DebugInfo/CodeView/TypeHashing.h"
#include "llvm/ObjectYAML/YAML.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/YAMLTraits.h"
#include <cstdint>
#include <memory>
#include <vector>

namespace llvm {

namespace CodeViewYAML {

struct GlobalHash {
  GlobalHash() = default;
  explicit GlobalHash(StringRef S) : Hash(S) {
    assert(S.size() == 8 && "Invalid hash size!");
  }
  explicit GlobalHash(ArrayRef<uint8_t> S) : Hash(S) {
    assert(S.size() == 8 && "Invalid hash size!");
  }
  yaml::BinaryRef Hash;
};

struct DebugHSection {
  uint32_t Magic;
  uint16_t Version;
  uint16_t HashAlgorithm;
  std::vector<GlobalHash> Hashes;
};

DebugHSection fromDebugH(ArrayRef<uint8_t> DebugH);
ArrayRef<uint8_t> toDebugH(const DebugHSection &DebugH,
                           BumpPtrAllocator &Alloc);

} // end namespace CodeViewYAML

} // end namespace llvm

LLVM_YAML_DECLARE_MAPPING_TRAITS(CodeViewYAML::DebugHSection)
LLVM_YAML_DECLARE_SCALAR_TRAITS(CodeViewYAML::GlobalHash, QuotingType::None)
LLVM_YAML_IS_SEQUENCE_VECTOR(CodeViewYAML::GlobalHash)

#endif // LLVM_OBJECTYAML_CODEVIEWYAMLTYPEHASHING_H
PKjwFZ�9�:�:ObjectYAML/DWARFYAML.hnu�[���//===- DWARFYAML.h - DWARF YAMLIO implementation ----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file declares classes for handling the YAML representation
/// of DWARF Debug Info.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJECTYAML_DWARFYAML_H
#define LLVM_OBJECTYAML_DWARFYAML_H

#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/BinaryFormat/Dwarf.h"
#include "llvm/ObjectYAML/YAML.h"
#include "llvm/Support/YAMLTraits.h"
#include <cstdint>
#include <optional>
#include <unordered_map>
#include <vector>

namespace llvm {
namespace DWARFYAML {

struct AttributeAbbrev {
  llvm::dwarf::Attribute Attribute;
  llvm::dwarf::Form Form;
  llvm::yaml::Hex64 Value; // Some DWARF5 attributes have values
};

struct Abbrev {
  std::optional<yaml::Hex64> Code;
  llvm::dwarf::Tag Tag;
  llvm::dwarf::Constants Children;
  std::vector<AttributeAbbrev> Attributes;
};

struct AbbrevTable {
  std::optional<uint64_t> ID;
  std::vector<Abbrev> Table;
};

struct ARangeDescriptor {
  llvm::yaml::Hex64 Address;
  yaml::Hex64 Length;
};

struct ARange {
  dwarf::DwarfFormat Format;
  std::optional<yaml::Hex64> Length;
  uint16_t Version;
  yaml::Hex64 CuOffset;
  std::optional<yaml::Hex8> AddrSize;
  yaml::Hex8 SegSize;
  std::vector<ARangeDescriptor> Descriptors;
};

/// Class that describes a range list entry, or a base address selection entry
/// within a range list in the .debug_ranges section.
struct RangeEntry {
  llvm::yaml::Hex64 LowOffset;
  llvm::yaml::Hex64 HighOffset;
};

/// Class that describes a single range list inside the .debug_ranges section.
struct Ranges {
  std::optional<llvm::yaml::Hex64> Offset;
  std::optional<llvm::yaml::Hex8> AddrSize;
  std::vector<RangeEntry> Entries;
};

struct PubEntry {
  llvm::yaml::Hex32 DieOffset;
  llvm::yaml::Hex8 Descriptor;
  StringRef Name;
};

struct PubSection {
  dwarf::DwarfFormat Format;
  yaml::Hex64 Length;
  uint16_t Version;
  uint32_t UnitOffset;
  uint32_t UnitSize;
  std::vector<PubEntry> Entries;
};

struct FormValue {
  llvm::yaml::Hex64 Value;
  StringRef CStr;
  std::vector<llvm::yaml::Hex8> BlockData;
};

struct Entry {
  llvm::yaml::Hex32 AbbrCode;
  std::vector<FormValue> Values;
};

/// Class that contains helpful context information when mapping YAML into DWARF
/// data structures.
struct DWARFContext {
  bool IsGNUPubSec = false;
};

struct Unit {
  dwarf::DwarfFormat Format;
  std::optional<yaml::Hex64> Length;
  uint16_t Version;
  std::optional<uint8_t> AddrSize;
  llvm::dwarf::UnitType Type; // Added in DWARF 5
  std::optional<uint64_t> AbbrevTableID;
  std::optional<yaml::Hex64> AbbrOffset;
  std::vector<Entry> Entries;
};

struct File {
  StringRef Name;
  uint64_t DirIdx;
  uint64_t ModTime;
  uint64_t Length;
};

struct LineTableOpcode {
  dwarf::LineNumberOps Opcode;
  std::optional<uint64_t> ExtLen;
  dwarf::LineNumberExtendedOps SubOpcode;
  uint64_t Data;
  int64_t SData;
  File FileEntry;
  std::vector<llvm::yaml::Hex8> UnknownOpcodeData;
  std::vector<llvm::yaml::Hex64> StandardOpcodeData;
};

struct LineTable {
  dwarf::DwarfFormat Format;
  std::optional<uint64_t> Length;
  uint16_t Version;
  std::optional<uint64_t> PrologueLength;
  uint8_t MinInstLength;
  uint8_t MaxOpsPerInst;
  uint8_t DefaultIsStmt;
  uint8_t LineBase;
  uint8_t LineRange;
  std::optional<uint8_t> OpcodeBase;
  std::optional<std::vector<uint8_t>> StandardOpcodeLengths;
  std::vector<StringRef> IncludeDirs;
  std::vector<File> Files;
  std::vector<LineTableOpcode> Opcodes;
};

struct SegAddrPair {
  yaml::Hex64 Segment;
  yaml::Hex64 Address;
};

struct AddrTableEntry {
  dwarf::DwarfFormat Format;
  std::optional<yaml::Hex64> Length;
  yaml::Hex16 Version;
  std::optional<yaml::Hex8> AddrSize;
  yaml::Hex8 SegSelectorSize;
  std::vector<SegAddrPair> SegAddrPairs;
};

struct StringOffsetsTable {
  dwarf::DwarfFormat Format;
  std::optional<yaml::Hex64> Length;
  yaml::Hex16 Version;
  yaml::Hex16 Padding;
  std::vector<yaml::Hex64> Offsets;
};

struct DWARFOperation {
  dwarf::LocationAtom Operator;
  std::vector<yaml::Hex64> Values;
};

struct RnglistEntry {
  dwarf::RnglistEntries Operator;
  std::vector<yaml::Hex64> Values;
};

struct LoclistEntry {
  dwarf::LoclistEntries Operator;
  std::vector<yaml::Hex64> Values;
  std::optional<yaml::Hex64> DescriptionsLength;
  std::vector<DWARFOperation> Descriptions;
};

template <typename EntryType> struct ListEntries {
  std::optional<std::vector<EntryType>> Entries;
  std::optional<yaml::BinaryRef> Content;
};

template <typename EntryType> struct ListTable {
  dwarf::DwarfFormat Format;
  std::optional<yaml::Hex64> Length;
  yaml::Hex16 Version;
  std::optional<yaml::Hex8> AddrSize;
  yaml::Hex8 SegSelectorSize;
  std::optional<uint32_t> OffsetEntryCount;
  std::optional<std::vector<yaml::Hex64>> Offsets;
  std::vector<ListEntries<EntryType>> Lists;
};

struct Data {
  bool IsLittleEndian;
  bool Is64BitAddrSize;
  std::vector<AbbrevTable> DebugAbbrev;
  std::optional<std::vector<StringRef>> DebugStrings;
  std::optional<std::vector<StringOffsetsTable>> DebugStrOffsets;
  std::optional<std::vector<ARange>> DebugAranges;
  std::optional<std::vector<Ranges>> DebugRanges;
  std::optional<std::vector<AddrTableEntry>> DebugAddr;
  std::optional<PubSection> PubNames;
  std::optional<PubSection> PubTypes;

  std::optional<PubSection> GNUPubNames;
  std::optional<PubSection> GNUPubTypes;

  std::vector<Unit> CompileUnits;

  std::vector<LineTable> DebugLines;
  std::optional<std::vector<ListTable<RnglistEntry>>> DebugRnglists;
  std::optional<std::vector<ListTable<LoclistEntry>>> DebugLoclists;

  bool isEmpty() const;

  SetVector<StringRef> getNonEmptySectionNames() const;

  struct AbbrevTableInfo {
    uint64_t Index;
    uint64_t Offset;
  };
  Expected<AbbrevTableInfo> getAbbrevTableInfoByID(uint64_t ID) const;
  StringRef getAbbrevTableContentByIndex(uint64_t Index) const;

private:
  mutable std::unordered_map<uint64_t, AbbrevTableInfo> AbbrevTableInfoMap;
  mutable std::unordered_map<uint64_t, std::string> AbbrevTableContents;
};

} // end namespace DWARFYAML
} // end namespace llvm

LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::DWARFYAML::AttributeAbbrev)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::DWARFYAML::Abbrev)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::DWARFYAML::AbbrevTable)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::DWARFYAML::ARangeDescriptor)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::DWARFYAML::ARange)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::DWARFYAML::RangeEntry)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::DWARFYAML::Ranges)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::DWARFYAML::PubEntry)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::DWARFYAML::Unit)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::DWARFYAML::FormValue)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::DWARFYAML::Entry)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::DWARFYAML::File)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::DWARFYAML::LineTable)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::DWARFYAML::LineTableOpcode)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::DWARFYAML::SegAddrPair)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::DWARFYAML::AddrTableEntry)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::DWARFYAML::StringOffsetsTable)
LLVM_YAML_IS_SEQUENCE_VECTOR(
    llvm::DWARFYAML::ListTable<DWARFYAML::RnglistEntry>)
LLVM_YAML_IS_SEQUENCE_VECTOR(
    llvm::DWARFYAML::ListEntries<DWARFYAML::RnglistEntry>)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::DWARFYAML::RnglistEntry)
LLVM_YAML_IS_SEQUENCE_VECTOR(
    llvm::DWARFYAML::ListTable<DWARFYAML::LoclistEntry>)
LLVM_YAML_IS_SEQUENCE_VECTOR(
    llvm::DWARFYAML::ListEntries<DWARFYAML::LoclistEntry>)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::DWARFYAML::LoclistEntry)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::DWARFYAML::DWARFOperation)

namespace llvm {
namespace yaml {

template <> struct MappingTraits<DWARFYAML::Data> {
  static void mapping(IO &IO, DWARFYAML::Data &DWARF);
};

template <> struct MappingTraits<DWARFYAML::AbbrevTable> {
  static void mapping(IO &IO, DWARFYAML::AbbrevTable &AbbrevTable);
};

template <> struct MappingTraits<DWARFYAML::Abbrev> {
  static void mapping(IO &IO, DWARFYAML::Abbrev &Abbrev);
};

template <> struct MappingTraits<DWARFYAML::AttributeAbbrev> {
  static void mapping(IO &IO, DWARFYAML::AttributeAbbrev &AttAbbrev);
};

template <> struct MappingTraits<DWARFYAML::ARangeDescriptor> {
  static void mapping(IO &IO, DWARFYAML::ARangeDescriptor &Descriptor);
};

template <> struct MappingTraits<DWARFYAML::ARange> {
  static void mapping(IO &IO, DWARFYAML::ARange &ARange);
};

template <> struct MappingTraits<DWARFYAML::RangeEntry> {
  static void mapping(IO &IO, DWARFYAML::RangeEntry &Entry);
};

template <> struct MappingTraits<DWARFYAML::Ranges> {
  static void mapping(IO &IO, DWARFYAML::Ranges &Ranges);
};

template <> struct MappingTraits<DWARFYAML::PubEntry> {
  static void mapping(IO &IO, DWARFYAML::PubEntry &Entry);
};

template <> struct MappingTraits<DWARFYAML::PubSection> {
  static void mapping(IO &IO, DWARFYAML::PubSection &Section);
};

template <> struct MappingTraits<DWARFYAML::Unit> {
  static void mapping(IO &IO, DWARFYAML::Unit &Unit);
};

template <> struct MappingTraits<DWARFYAML::Entry> {
  static void mapping(IO &IO, DWARFYAML::Entry &Entry);
};

template <> struct MappingTraits<DWARFYAML::FormValue> {
  static void mapping(IO &IO, DWARFYAML::FormValue &FormValue);
};

template <> struct MappingTraits<DWARFYAML::File> {
  static void mapping(IO &IO, DWARFYAML::File &File);
};

template <> struct MappingTraits<DWARFYAML::LineTableOpcode> {
  static void mapping(IO &IO, DWARFYAML::LineTableOpcode &LineTableOpcode);
};

template <> struct MappingTraits<DWARFYAML::LineTable> {
  static void mapping(IO &IO, DWARFYAML::LineTable &LineTable);
};

template <> struct MappingTraits<DWARFYAML::SegAddrPair> {
  static void mapping(IO &IO, DWARFYAML::SegAddrPair &SegAddrPair);
};

template <> struct MappingTraits<DWARFYAML::DWARFOperation> {
  static void mapping(IO &IO, DWARFYAML::DWARFOperation &DWARFOperation);
};

template <typename EntryType>
struct MappingTraits<DWARFYAML::ListTable<EntryType>> {
  static void mapping(IO &IO, DWARFYAML::ListTable<EntryType> &ListTable);
};

template <typename EntryType>
struct MappingTraits<DWARFYAML::ListEntries<EntryType>> {
  static void mapping(IO &IO, DWARFYAML::ListEntries<EntryType> &ListEntries);
  static std::string validate(IO &IO,
                              DWARFYAML::ListEntries<EntryType> &ListEntries);
};

template <> struct MappingTraits<DWARFYAML::RnglistEntry> {
  static void mapping(IO &IO, DWARFYAML::RnglistEntry &RnglistEntry);
};

template <> struct MappingTraits<DWARFYAML::LoclistEntry> {
  static void mapping(IO &IO, DWARFYAML::LoclistEntry &LoclistEntry);
};

template <> struct MappingTraits<DWARFYAML::AddrTableEntry> {
  static void mapping(IO &IO, DWARFYAML::AddrTableEntry &AddrTable);
};

template <> struct MappingTraits<DWARFYAML::StringOffsetsTable> {
  static void mapping(IO &IO, DWARFYAML::StringOffsetsTable &StrOffsetsTable);
};

template <> struct ScalarEnumerationTraits<dwarf::DwarfFormat> {
  static void enumeration(IO &IO, dwarf::DwarfFormat &Format) {
    IO.enumCase(Format, "DWARF32", dwarf::DWARF32);
    IO.enumCase(Format, "DWARF64", dwarf::DWARF64);
  }
};

#define HANDLE_DW_TAG(unused, name, unused2, unused3, unused4)                 \
  io.enumCase(value, "DW_TAG_" #name, dwarf::DW_TAG_##name);

template <> struct ScalarEnumerationTraits<dwarf::Tag> {
  static void enumeration(IO &io, dwarf::Tag &value) {
#include "llvm/BinaryFormat/Dwarf.def"
    io.enumFallback<Hex16>(value);
  }
};

#define HANDLE_DW_LNS(unused, name)                                            \
  io.enumCase(value, "DW_LNS_" #name, dwarf::DW_LNS_##name);

template <> struct ScalarEnumerationTraits<dwarf::LineNumberOps> {
  static void enumeration(IO &io, dwarf::LineNumberOps &value) {
#include "llvm/BinaryFormat/Dwarf.def"
    io.enumFallback<Hex8>(value);
  }
};

#define HANDLE_DW_LNE(unused, name)                                            \
  io.enumCase(value, "DW_LNE_" #name, dwarf::DW_LNE_##name);

template <> struct ScalarEnumerationTraits<dwarf::LineNumberExtendedOps> {
  static void enumeration(IO &io, dwarf::LineNumberExtendedOps &value) {
#include "llvm/BinaryFormat/Dwarf.def"
    io.enumFallback<Hex16>(value);
  }
};

#define HANDLE_DW_AT(unused, name, unused2, unused3)                           \
  io.enumCase(value, "DW_AT_" #name, dwarf::DW_AT_##name);

template <> struct ScalarEnumerationTraits<dwarf::Attribute> {
  static void enumeration(IO &io, dwarf::Attribute &value) {
#include "llvm/BinaryFormat/Dwarf.def"
    io.enumFallback<Hex16>(value);
  }
};

#define HANDLE_DW_FORM(unused, name, unused2, unused3)                         \
  io.enumCase(value, "DW_FORM_" #name, dwarf::DW_FORM_##name);

template <> struct ScalarEnumerationTraits<dwarf::Form> {
  static void enumeration(IO &io, dwarf::Form &value) {
#include "llvm/BinaryFormat/Dwarf.def"
    io.enumFallback<Hex16>(value);
  }
};

#define HANDLE_DW_UT(unused, name)                                             \
  io.enumCase(value, "DW_UT_" #name, dwarf::DW_UT_##name);

template <> struct ScalarEnumerationTraits<dwarf::UnitType> {
  static void enumeration(IO &io, dwarf::UnitType &value) {
#include "llvm/BinaryFormat/Dwarf.def"
    io.enumFallback<Hex8>(value);
  }
};

template <> struct ScalarEnumerationTraits<dwarf::Constants> {
  static void enumeration(IO &io, dwarf::Constants &value) {
    io.enumCase(value, "DW_CHILDREN_no", dwarf::DW_CHILDREN_no);
    io.enumCase(value, "DW_CHILDREN_yes", dwarf::DW_CHILDREN_yes);
    io.enumFallback<Hex16>(value);
  }
};

#define HANDLE_DW_RLE(unused, name)                                            \
  io.enumCase(value, "DW_RLE_" #name, dwarf::DW_RLE_##name);

template <> struct ScalarEnumerationTraits<dwarf::RnglistEntries> {
  static void enumeration(IO &io, dwarf::RnglistEntries &value) {
#include "llvm/BinaryFormat/Dwarf.def"
  }
};

#define HANDLE_DW_LLE(unused, name)                                            \
  io.enumCase(value, "DW_LLE_" #name, dwarf::DW_LLE_##name);

template <> struct ScalarEnumerationTraits<dwarf::LoclistEntries> {
  static void enumeration(IO &io, dwarf::LoclistEntries &value) {
#include "llvm/BinaryFormat/Dwarf.def"
  }
};

#define HANDLE_DW_OP(id, name, version, vendor)                                \
  io.enumCase(value, "DW_OP_" #name, dwarf::DW_OP_##name);

template <> struct ScalarEnumerationTraits<dwarf::LocationAtom> {
  static void enumeration(IO &io, dwarf::LocationAtom &value) {
#include "llvm/BinaryFormat/Dwarf.def"
    io.enumFallback<yaml::Hex8>(value);
  }
};

} // end namespace yaml
} // end namespace llvm

#endif // LLVM_OBJECTYAML_DWARFYAML_H
PKjwFZ�1T[�
�
&ObjectYAML/CodeViewYAMLDebugSections.hnu�[���//=- CodeViewYAMLDebugSections.h - CodeView YAMLIO debug sections -*- C++ -*-=//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines classes for handling the YAML representation of CodeView
// Debug Info.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJECTYAML_CODEVIEWYAMLDEBUGSECTIONS_H
#define LLVM_OBJECTYAML_CODEVIEWYAMLDEBUGSECTIONS_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/DebugInfo/CodeView/CodeView.h"
#include "llvm/DebugInfo/CodeView/DebugSubsection.h"
#include "llvm/DebugInfo/CodeView/DebugSubsectionRecord.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/YAMLTraits.h"
#include <cstdint>
#include <memory>
#include <vector>

namespace llvm {

namespace codeview {

class StringsAndChecksums;
class StringsAndChecksumsRef;

} // end namespace codeview

namespace CodeViewYAML {

namespace detail {

struct YAMLSubsectionBase;

} // end namespace detail

struct YAMLFrameData {
  uint32_t RvaStart;
  uint32_t CodeSize;
  uint32_t LocalSize;
  uint32_t ParamsSize;
  uint32_t MaxStackSize;
  StringRef FrameFunc;
  uint32_t PrologSize;
  uint32_t SavedRegsSize;
  uint32_t Flags;
};

struct YAMLCrossModuleImport {
  StringRef ModuleName;
  std::vector<uint32_t> ImportIds;
};

struct SourceLineEntry {
  uint32_t Offset;
  uint32_t LineStart;
  uint32_t EndDelta;
  bool IsStatement;
};

struct SourceColumnEntry {
  uint16_t StartColumn;
  uint16_t EndColumn;
};

struct SourceLineBlock {
  StringRef FileName;
  std::vector<SourceLineEntry> Lines;
  std::vector<SourceColumnEntry> Columns;
};

struct HexFormattedString {
  std::vector<uint8_t> Bytes;
};

struct SourceFileChecksumEntry {
  StringRef FileName;
  codeview::FileChecksumKind Kind;
  HexFormattedString ChecksumBytes;
};

struct SourceLineInfo {
  uint32_t RelocOffset;
  uint32_t RelocSegment;
  codeview::LineFlags Flags;
  uint32_t CodeSize;
  std::vector<SourceLineBlock> Blocks;
};

struct InlineeSite {
  uint32_t Inlinee;
  StringRef FileName;
  uint32_t SourceLineNum;
  std::vector<StringRef> ExtraFiles;
};

struct InlineeInfo {
  bool HasExtraFiles;
  std::vector<InlineeSite> Sites;
};

struct YAMLDebugSubsection {
  static Expected<YAMLDebugSubsection>
  fromCodeViewSubection(const codeview::StringsAndChecksumsRef &SC,
                        const codeview::DebugSubsectionRecord &SS);

  std::shared_ptr<detail::YAMLSubsectionBase> Subsection;
};

Expected<std::vector<std::shared_ptr<codeview::DebugSubsection>>>
toCodeViewSubsectionList(BumpPtrAllocator &Allocator,
                         ArrayRef<YAMLDebugSubsection> Subsections,
                         const codeview::StringsAndChecksums &SC);

std::vector<YAMLDebugSubsection>
fromDebugS(ArrayRef<uint8_t> Data, const codeview::StringsAndChecksumsRef &SC);

void initializeStringsAndChecksums(ArrayRef<YAMLDebugSubsection> Sections,
                                   codeview::StringsAndChecksums &SC);

} // end namespace CodeViewYAML

} // end namespace llvm

LLVM_YAML_DECLARE_MAPPING_TRAITS(CodeViewYAML::YAMLDebugSubsection)

LLVM_YAML_IS_SEQUENCE_VECTOR(CodeViewYAML::YAMLDebugSubsection)

#endif // LLVM_OBJECTYAML_CODEVIEWYAMLDEBUGSECTIONS_H
PKjwFZ)�?���ObjectYAML/DWARFEmitter.hnu�[���//===--- DWARFEmitter.h - ---------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// Common declarations for yaml2obj
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJECTYAML_DWARFEMITTER_H
#define LLVM_OBJECTYAML_DWARFEMITTER_H

#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/TargetParser/Host.h"
#include <memory>

namespace llvm {

class raw_ostream;

namespace DWARFYAML {

struct Data;

Error emitDebugAbbrev(raw_ostream &OS, const Data &DI);
Error emitDebugStr(raw_ostream &OS, const Data &DI);

Error emitDebugAranges(raw_ostream &OS, const Data &DI);
Error emitDebugRanges(raw_ostream &OS, const Data &DI);
Error emitDebugPubnames(raw_ostream &OS, const Data &DI);
Error emitDebugPubtypes(raw_ostream &OS, const Data &DI);
Error emitDebugGNUPubnames(raw_ostream &OS, const Data &DI);
Error emitDebugGNUPubtypes(raw_ostream &OS, const Data &DI);
Error emitDebugInfo(raw_ostream &OS, const Data &DI);
Error emitDebugLine(raw_ostream &OS, const Data &DI);
Error emitDebugAddr(raw_ostream &OS, const Data &DI);
Error emitDebugStrOffsets(raw_ostream &OS, const Data &DI);
Error emitDebugRnglists(raw_ostream &OS, const Data &DI);
Error emitDebugLoclists(raw_ostream &OS, const Data &DI);

std::function<Error(raw_ostream &, const Data &)>
getDWARFEmitterByName(StringRef SecName);
Expected<StringMap<std::unique_ptr<MemoryBuffer>>>
emitDebugSections(StringRef YAMLString,
                  bool IsLittleEndian = sys::IsLittleEndianHost,
                  bool Is64BitAddrSize = true);
} // end namespace DWARFYAML
} // end namespace llvm

#endif // LLVM_OBJECTYAML_DWARFEMITTER_H
PKjwFZ����T	T	ObjectYAML/yaml2obj.hnu�[���//===--- yaml2obj.h - -------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// Common declarations for yaml2obj
//===----------------------------------------------------------------------===//
#ifndef LLVM_OBJECTYAML_YAML2OBJ_H
#define LLVM_OBJECTYAML_YAML2OBJ_H

#include "llvm/ADT/STLExtras.h"
#include <memory>

namespace llvm {
class raw_ostream;
template <typename T> class SmallVectorImpl;
class StringRef;
class Twine;

namespace object {
class ObjectFile;
}

namespace COFFYAML {
struct Object;
}

namespace ELFYAML {
struct Object;
}

namespace MinidumpYAML {
struct Object;
}

namespace OffloadYAML {
struct Binary;
}

namespace WasmYAML {
struct Object;
}

namespace XCOFFYAML {
struct Object;
}

namespace ArchYAML {
struct Archive;
}

namespace DXContainerYAML {
struct Object;
} // namespace DXContainerYAML

namespace yaml {
class Input;
struct YamlObjectFile;

using ErrorHandler = llvm::function_ref<void(const Twine &Msg)>;

bool yaml2archive(ArchYAML::Archive &Doc, raw_ostream &Out, ErrorHandler EH);
bool yaml2coff(COFFYAML::Object &Doc, raw_ostream &Out, ErrorHandler EH);
bool yaml2elf(ELFYAML::Object &Doc, raw_ostream &Out, ErrorHandler EH,
              uint64_t MaxSize);
bool yaml2macho(YamlObjectFile &Doc, raw_ostream &Out, ErrorHandler EH);
bool yaml2minidump(MinidumpYAML::Object &Doc, raw_ostream &Out,
                   ErrorHandler EH);
bool yaml2offload(OffloadYAML::Binary &Doc, raw_ostream &Out, ErrorHandler EH);
bool yaml2wasm(WasmYAML::Object &Doc, raw_ostream &Out, ErrorHandler EH);
bool yaml2xcoff(XCOFFYAML::Object &Doc, raw_ostream &Out, ErrorHandler EH);
bool yaml2dxcontainer(DXContainerYAML::Object &Doc, raw_ostream &Out,
                      ErrorHandler EH);

bool convertYAML(Input &YIn, raw_ostream &Out, ErrorHandler ErrHandler,
                 unsigned DocNum = 1, uint64_t MaxSize = UINT64_MAX);

/// Convenience function for tests.
std::unique_ptr<object::ObjectFile>
yaml2ObjectFile(SmallVectorImpl<char> &Storage, StringRef Yaml,
                ErrorHandler ErrHandler);

} // namespace yaml
} // namespace llvm

#endif
PKjwFZ��՞�ObjectYAML/YAML.hnu�[���//===- YAML.h ---------------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJECTYAML_YAML_H
#define LLVM_OBJECTYAML_YAML_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/YAMLTraits.h"
#include <cstdint>

namespace llvm {

class raw_ostream;

namespace yaml {

/// Specialized YAMLIO scalar type for representing a binary blob.
///
/// A typical use case would be to represent the content of a section in a
/// binary file.
/// This class has custom YAMLIO traits for convenient reading and writing.
/// It renders as a string of hex digits in a YAML file.
/// For example, it might render as `DEADBEEFCAFEBABE` (YAML does not
/// require the quotation marks, so for simplicity when outputting they are
/// omitted).
/// When reading, any string whose content is an even number of hex digits
/// will be accepted.
/// For example, all of the following are acceptable:
/// `DEADBEEF`, `"DeADbEeF"`, `"\x44EADBEEF"` (Note: '\x44' == 'D')
///
/// A significant advantage of using this class is that it never allocates
/// temporary strings or buffers for any of its functionality.
///
/// Example:
///
/// The YAML mapping:
/// \code
/// Foo: DEADBEEFCAFEBABE
/// \endcode
///
/// Could be modeled in YAMLIO by the struct:
/// \code
/// struct FooHolder {
///   BinaryRef Foo;
/// };
/// namespace llvm {
/// namespace yaml {
/// template <>
/// struct MappingTraits<FooHolder> {
///   static void mapping(IO &IO, FooHolder &FH) {
///     IO.mapRequired("Foo", FH.Foo);
///   }
/// };
/// } // end namespace yaml
/// } // end namespace llvm
/// \endcode
class BinaryRef {
  friend bool operator==(const BinaryRef &LHS, const BinaryRef &RHS);

  /// Either raw binary data, or a string of hex bytes (must always
  /// be an even number of characters).
  ArrayRef<uint8_t> Data;

  /// Discriminator between the two states of the `Data` member.
  bool DataIsHexString = true;

public:
  BinaryRef() = default;
  BinaryRef(ArrayRef<uint8_t> Data) : Data(Data), DataIsHexString(false) {}
  BinaryRef(StringRef Data) : Data(arrayRefFromStringRef(Data)) {}

  /// The number of bytes that are represented by this BinaryRef.
  /// This is the number of bytes that writeAsBinary() will write.
  ArrayRef<uint8_t>::size_type binary_size() const {
    if (DataIsHexString)
      return Data.size() / 2;
    return Data.size();
  }

  /// Write the contents (regardless of whether it is binary or a
  /// hex string) as binary to the given raw_ostream.
  /// N can be used to specify the maximum number of bytes.
  void writeAsBinary(raw_ostream &OS, uint64_t N = UINT64_MAX) const;

  /// Write the contents (regardless of whether it is binary or a
  /// hex string) as hex to the given raw_ostream.
  ///
  /// For example, a possible output could be `DEADBEEFCAFEBABE`.
  void writeAsHex(raw_ostream &OS) const;
};

inline bool operator==(const BinaryRef &LHS, const BinaryRef &RHS) {
  // Special case for default constructed BinaryRef.
  if (LHS.Data.empty() && RHS.Data.empty())
    return true;

  return LHS.DataIsHexString == RHS.DataIsHexString && LHS.Data == RHS.Data;
}

template <> struct ScalarTraits<BinaryRef> {
  static void output(const BinaryRef &, void *, raw_ostream &);
  static StringRef input(StringRef, void *, BinaryRef &);
  static QuotingType mustQuote(StringRef S) { return needsQuotes(S); }
};

} // end namespace yaml

} // end namespace llvm

#endif // LLVM_OBJECTYAML_YAML_H
PKjwFZ�:BS;S;ObjectYAML/WasmYAML.hnu�[���//===- WasmYAML.h - Wasm YAMLIO implementation ------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file declares classes for handling the YAML representation
/// of wasm binaries.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_OBJECTYAML_WASMYAML_H
#define LLVM_OBJECTYAML_WASMYAML_H

#include "llvm/ADT/StringRef.h"
#include "llvm/BinaryFormat/Wasm.h"
#include "llvm/ObjectYAML/YAML.h"
#include "llvm/Support/Casting.h"
#include <cstdint>
#include <memory>
#include <vector>

namespace llvm {
namespace WasmYAML {

LLVM_YAML_STRONG_TYPEDEF(uint32_t, SectionType)
LLVM_YAML_STRONG_TYPEDEF(uint32_t, ValueType)
LLVM_YAML_STRONG_TYPEDEF(uint32_t, TableType)
LLVM_YAML_STRONG_TYPEDEF(uint32_t, SignatureForm)
LLVM_YAML_STRONG_TYPEDEF(uint32_t, ExportKind)
LLVM_YAML_STRONG_TYPEDEF(uint32_t, Opcode)
LLVM_YAML_STRONG_TYPEDEF(uint32_t, RelocType)
LLVM_YAML_STRONG_TYPEDEF(uint32_t, SymbolFlags)
LLVM_YAML_STRONG_TYPEDEF(uint32_t, SymbolKind)
LLVM_YAML_STRONG_TYPEDEF(uint32_t, SegmentFlags)
LLVM_YAML_STRONG_TYPEDEF(uint32_t, LimitFlags)
LLVM_YAML_STRONG_TYPEDEF(uint32_t, ComdatKind)
LLVM_YAML_STRONG_TYPEDEF(uint32_t, FeaturePolicyPrefix)

struct FileHeader {
  yaml::Hex32 Version;
};

struct Limits {
  LimitFlags Flags;
  yaml::Hex32 Minimum;
  yaml::Hex32 Maximum;
};

struct Table {
  TableType ElemType;
  Limits TableLimits;
  uint32_t Index;
};

struct Export {
  StringRef Name;
  ExportKind Kind;
  uint32_t Index;
};

struct InitExpr {
  InitExpr() {}
  bool Extended;
  union {
    wasm::WasmInitExprMVP Inst;
    yaml::BinaryRef Body;
  };
};

struct ElemSegment {
  uint32_t Flags;
  uint32_t TableNumber;
  ValueType ElemKind;
  InitExpr Offset;
  std::vector<uint32_t> Functions;
};

struct Global {
  uint32_t Index;
  ValueType Type;
  bool Mutable;
  InitExpr Init;
};

struct Import {
  Import() {}
  StringRef Module;
  StringRef Field;
  ExportKind Kind;
  union {
    uint32_t SigIndex;
    Table TableImport;
    Limits Memory;
    uint32_t TagIndex;
    Global GlobalImport;
  };
};

struct LocalDecl {
  ValueType Type;
  uint32_t Count;
};

struct Function {
  uint32_t Index;
  std::vector<LocalDecl> Locals;
  yaml::BinaryRef Body;
};

struct Relocation {
  RelocType Type;
  uint32_t Index;
  // TODO(wvo): this would strictly be better as Hex64, but that will change
  // all existing obj2yaml output.
  yaml::Hex32 Offset;
  int64_t Addend;
};

struct DataSegment {
  uint32_t SectionOffset;
  uint32_t InitFlags;
  uint32_t MemoryIndex;
  InitExpr Offset;
  yaml::BinaryRef Content;
};

struct NameEntry {
  uint32_t Index;
  StringRef Name;
};

struct ProducerEntry {
  std::string Name;
  std::string Version;
};

struct FeatureEntry {
  FeaturePolicyPrefix Prefix;
  std::string Name;
};

struct SegmentInfo {
  uint32_t Index;
  StringRef Name;
  uint32_t Alignment;
  SegmentFlags Flags;
};

struct Signature {
  uint32_t Index;
  SignatureForm Form = wasm::WASM_TYPE_FUNC;
  std::vector<ValueType> ParamTypes;
  std::vector<ValueType> ReturnTypes;
};

struct SymbolInfo {
  uint32_t Index;
  StringRef Name;
  SymbolKind Kind;
  SymbolFlags Flags;
  union {
    uint32_t ElementIndex;
    wasm::WasmDataReference DataRef;
  };
};

struct InitFunction {
  uint32_t Priority;
  uint32_t Symbol;
};

struct ComdatEntry {
  ComdatKind Kind;
  uint32_t Index;
};

struct Comdat {
  StringRef Name;
  std::vector<ComdatEntry> Entries;
};

struct Section {
  explicit Section(SectionType SecType) : Type(SecType) {}
  virtual ~Section();

  SectionType Type;
  std::vector<Relocation> Relocations;
  std::optional<uint8_t> HeaderSecSizeEncodingLen;
};

struct CustomSection : Section {
  explicit CustomSection(StringRef Name)
      : Section(wasm::WASM_SEC_CUSTOM), Name(Name) {}

  static bool classof(const Section *S) {
    return S->Type == wasm::WASM_SEC_CUSTOM;
  }

  StringRef Name;
  yaml::BinaryRef Payload;
};

struct DylinkImportInfo {
  StringRef Module;
  StringRef Field;
  SymbolFlags Flags;
};

struct DylinkExportInfo {
  StringRef Name;
  SymbolFlags Flags;
};

struct DylinkSection : CustomSection {
  DylinkSection() : CustomSection("dylink.0") {}

  static bool classof(const Section *S) {
    auto C = dyn_cast<CustomSection>(S);
    return C && C->Name == "dylink.0";
  }

  uint32_t MemorySize;
  uint32_t MemoryAlignment;
  uint32_t TableSize;
  uint32_t TableAlignment;
  std::vector<StringRef> Needed;
  std::vector<DylinkImportInfo> ImportInfo;
  std::vector<DylinkExportInfo> ExportInfo;
};

struct NameSection : CustomSection {
  NameSection() : CustomSection("name") {}

  static bool classof(const Section *S) {
    auto C = dyn_cast<CustomSection>(S);
    return C && C->Name == "name";
  }

  std::vector<NameEntry> FunctionNames;
  std::vector<NameEntry> GlobalNames;
  std::vector<NameEntry> DataSegmentNames;
};

struct LinkingSection : CustomSection {
  LinkingSection() : CustomSection("linking") {}

  static bool classof(const Section *S) {
    auto C = dyn_cast<CustomSection>(S);
    return C && C->Name == "linking";
  }

  uint32_t Version;
  std::vector<SymbolInfo> SymbolTable;
  std::vector<SegmentInfo> SegmentInfos;
  std::vector<InitFunction> InitFunctions;
  std::vector<Comdat> Comdats;
};

struct ProducersSection : CustomSection {
  ProducersSection() : CustomSection("producers") {}

  static bool classof(const Section *S) {
    auto C = dyn_cast<CustomSection>(S);
    return C && C->Name == "producers";
  }

  std::vector<ProducerEntry> Languages;
  std::vector<ProducerEntry> Tools;
  std::vector<ProducerEntry> SDKs;
};

struct TargetFeaturesSection : CustomSection {
  TargetFeaturesSection() : CustomSection("target_features") {}

  static bool classof(const Section *S) {
    auto C = dyn_cast<CustomSection>(S);
    return C && C->Name == "target_features";
  }

  std::vector<FeatureEntry> Features;
};

struct TypeSection : Section {
  TypeSection() : Section(wasm::WASM_SEC_TYPE) {}

  static bool classof(const Section *S) {
    return S->Type == wasm::WASM_SEC_TYPE;
  }

  std::vector<Signature> Signatures;
};

struct ImportSection : Section {
  ImportSection() : Section(wasm::WASM_SEC_IMPORT) {}

  static bool classof(const Section *S) {
    return S->Type == wasm::WASM_SEC_IMPORT;
  }

  std::vector<Import> Imports;
};

struct FunctionSection : Section {
  FunctionSection() : Section(wasm::WASM_SEC_FUNCTION) {}

  static bool classof(const Section *S) {
    return S->Type == wasm::WASM_SEC_FUNCTION;
  }

  std::vector<uint32_t> FunctionTypes;
};

struct TableSection : Section {
  TableSection() : Section(wasm::WASM_SEC_TABLE) {}

  static bool classof(const Section *S) {
    return S->Type == wasm::WASM_SEC_TABLE;
  }

  std::vector<Table> Tables;
};

struct MemorySection : Section {
  MemorySection() : Section(wasm::WASM_SEC_MEMORY) {}

  static bool classof(const Section *S) {
    return S->Type == wasm::WASM_SEC_MEMORY;
  }

  std::vector<Limits> Memories;
};

struct TagSection : Section {
  TagSection() : Section(wasm::WASM_SEC_TAG) {}

  static bool classof(const Section *S) {
    return S->Type == wasm::WASM_SEC_TAG;
  }

  std::vector<uint32_t> TagTypes;
};

struct GlobalSection : Section {
  GlobalSection() : Section(wasm::WASM_SEC_GLOBAL) {}

  static bool classof(const Section *S) {
    return S->Type == wasm::WASM_SEC_GLOBAL;
  }

  std::vector<Global> Globals;
};

struct ExportSection : Section {
  ExportSection() : Section(wasm::WASM_SEC_EXPORT) {}

  static bool classof(const Section *S) {
    return S->Type == wasm::WASM_SEC_EXPORT;
  }

  std::vector<Export> Exports;
};

struct StartSection : Section {
  StartSection() : Section(wasm::WASM_SEC_START) {}

  static bool classof(const Section *S) {
    return S->Type == wasm::WASM_SEC_START;
  }

  uint32_t StartFunction;
};

struct ElemSection : Section {
  ElemSection() : Section(wasm::WASM_SEC_ELEM) {}

  static bool classof(const Section *S) {
    return S->Type == wasm::WASM_SEC_ELEM;
  }

  std::vector<ElemSegment> Segments;
};

struct CodeSection : Section {
  CodeSection() : Section(wasm::WASM_SEC_CODE) {}

  static bool classof(const Section *S) {
    return S->Type == wasm::WASM_SEC_CODE;
  }

  std::vector<Function> Functions;
};

struct DataSection : Section {
  DataSection() : Section(wasm::WASM_SEC_DATA) {}

  static bool classof(const Section *S) {
    return S->Type == wasm::WASM_SEC_DATA;
  }

  std::vector<DataSegment> Segments;
};

struct DataCountSection : Section {
  DataCountSection() : Section(wasm::WASM_SEC_DATACOUNT) {}

  static bool classof(const Section *S) {
    return S->Type == wasm::WASM_SEC_DATACOUNT;
  }

  uint32_t Count;
};

struct Object {
  FileHeader Header;
  std::vector<std::unique_ptr<Section>> Sections;
};

} // end namespace WasmYAML
} // end namespace llvm

LLVM_YAML_IS_SEQUENCE_VECTOR(std::unique_ptr<llvm::WasmYAML::Section>)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::WasmYAML::Signature)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::WasmYAML::ValueType)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::WasmYAML::Table)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::WasmYAML::Import)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::WasmYAML::Export)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::WasmYAML::ElemSegment)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::WasmYAML::Limits)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::WasmYAML::DataSegment)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::WasmYAML::Global)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::WasmYAML::Function)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::WasmYAML::LocalDecl)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::WasmYAML::Relocation)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::WasmYAML::NameEntry)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::WasmYAML::ProducerEntry)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::WasmYAML::FeatureEntry)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::WasmYAML::SegmentInfo)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::WasmYAML::SymbolInfo)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::WasmYAML::InitFunction)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::WasmYAML::ComdatEntry)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::WasmYAML::Comdat)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::WasmYAML::DylinkImportInfo)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::WasmYAML::DylinkExportInfo)

namespace llvm {
namespace yaml {

template <> struct MappingTraits<WasmYAML::FileHeader> {
  static void mapping(IO &IO, WasmYAML::FileHeader &FileHdr);
};

template <> struct MappingTraits<std::unique_ptr<WasmYAML::Section>> {
  static void mapping(IO &IO, std::unique_ptr<WasmYAML::Section> &Section);
};

template <> struct MappingTraits<WasmYAML::Object> {
  static void mapping(IO &IO, WasmYAML::Object &Object);
};

template <> struct MappingTraits<WasmYAML::Import> {
  static void mapping(IO &IO, WasmYAML::Import &Import);
};

template <> struct MappingTraits<WasmYAML::Export> {
  static void mapping(IO &IO, WasmYAML::Export &Export);
};

template <> struct MappingTraits<WasmYAML::Global> {
  static void mapping(IO &IO, WasmYAML::Global &Global);
};

template <> struct ScalarBitSetTraits<WasmYAML::LimitFlags> {
  static void bitset(IO &IO, WasmYAML::LimitFlags &Value);
};

template <> struct ScalarBitSetTraits<WasmYAML::SymbolFlags> {
  static void bitset(IO &IO, WasmYAML::SymbolFlags &Value);
};

template <> struct ScalarEnumerationTraits<WasmYAML::SymbolKind> {
  static void enumeration(IO &IO, WasmYAML::SymbolKind &Kind);
};

template <> struct ScalarBitSetTraits<WasmYAML::SegmentFlags> {
  static void bitset(IO &IO, WasmYAML::SegmentFlags &Value);
};

template <> struct ScalarEnumerationTraits<WasmYAML::SectionType> {
  static void enumeration(IO &IO, WasmYAML::SectionType &Type);
};

template <> struct MappingTraits<WasmYAML::Signature> {
  static void mapping(IO &IO, WasmYAML::Signature &Signature);
};

template <> struct MappingTraits<WasmYAML::Table> {
  static void mapping(IO &IO, WasmYAML::Table &Table);
};

template <> struct MappingTraits<WasmYAML::Limits> {
  static void mapping(IO &IO, WasmYAML::Limits &Limits);
};

template <> struct MappingTraits<WasmYAML::Function> {
  static void mapping(IO &IO, WasmYAML::Function &Function);
};

template <> struct MappingTraits<WasmYAML::Relocation> {
  static void mapping(IO &IO, WasmYAML::Relocation &Relocation);
};

template <> struct MappingTraits<WasmYAML::NameEntry> {
  static void mapping(IO &IO, WasmYAML::NameEntry &NameEntry);
};

template <> struct MappingTraits<WasmYAML::ProducerEntry> {
  static void mapping(IO &IO, WasmYAML::ProducerEntry &ProducerEntry);
};

template <> struct ScalarEnumerationTraits<WasmYAML::FeaturePolicyPrefix> {
  static void enumeration(IO &IO, WasmYAML::FeaturePolicyPrefix &Prefix);
};

template <> struct MappingTraits<WasmYAML::FeatureEntry> {
  static void mapping(IO &IO, WasmYAML::FeatureEntry &FeatureEntry);
};

template <> struct MappingTraits<WasmYAML::SegmentInfo> {
  static void mapping(IO &IO, WasmYAML::SegmentInfo &SegmentInfo);
};

template <> struct MappingTraits<WasmYAML::LocalDecl> {
  static void mapping(IO &IO, WasmYAML::LocalDecl &LocalDecl);
};

template <> struct MappingTraits<WasmYAML::InitExpr> {
  static void mapping(IO &IO, WasmYAML::InitExpr &Expr);
};

template <> struct MappingTraits<WasmYAML::DataSegment> {
  static void mapping(IO &IO, WasmYAML::DataSegment &Segment);
};

template <> struct MappingTraits<WasmYAML::ElemSegment> {
  static void mapping(IO &IO, WasmYAML::ElemSegment &Segment);
};

template <> struct MappingTraits<WasmYAML::SymbolInfo> {
  static void mapping(IO &IO, WasmYAML::SymbolInfo &Info);
};

template <> struct MappingTraits<WasmYAML::InitFunction> {
  static void mapping(IO &IO, WasmYAML::InitFunction &Init);
};

template <> struct ScalarEnumerationTraits<WasmYAML::ComdatKind> {
  static void enumeration(IO &IO, WasmYAML::ComdatKind &Kind);
};

template <> struct MappingTraits<WasmYAML::ComdatEntry> {
  static void mapping(IO &IO, WasmYAML::ComdatEntry &ComdatEntry);
};

template <> struct MappingTraits<WasmYAML::Comdat> {
  static void mapping(IO &IO, WasmYAML::Comdat &Comdat);
};

template <> struct ScalarEnumerationTraits<WasmYAML::ValueType> {
  static void enumeration(IO &IO, WasmYAML::ValueType &Type);
};

template <> struct ScalarEnumerationTraits<WasmYAML::ExportKind> {
  static void enumeration(IO &IO, WasmYAML::ExportKind &Kind);
};

template <> struct ScalarEnumerationTraits<WasmYAML::TableType> {
  static void enumeration(IO &IO, WasmYAML::TableType &Type);
};

template <> struct ScalarEnumerationTraits<WasmYAML::Opcode> {
  static void enumeration(IO &IO, WasmYAML::Opcode &Opcode);
};

template <> struct ScalarEnumerationTraits<WasmYAML::RelocType> {
  static void enumeration(IO &IO, WasmYAML::RelocType &Kind);
};

template <> struct MappingTraits<WasmYAML::DylinkImportInfo> {
  static void mapping(IO &IO, WasmYAML::DylinkImportInfo &Info);
};

template <> struct MappingTraits<WasmYAML::DylinkExportInfo> {
  static void mapping(IO &IO, WasmYAML::DylinkExportInfo &Info);
};

} // end namespace yaml
} // end namespace llvm

#endif // LLVM_OBJECTYAML_WASMYAML_H
PKjwFZ�R�?��TableGen/Record.hnu�[���//===- llvm/TableGen/Record.h - Classes for Table Records -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the main TableGen data structures, including the TableGen
// types, values, and high-level data structures.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TABLEGEN_RECORD_H
#define LLVM_TABLEGEN_RECORD_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/SMLoc.h"
#include "llvm/Support/Timer.h"
#include "llvm/Support/TrailingObjects.h"
#include "llvm/Support/raw_ostream.h"
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <map>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <variant>
#include <vector>

namespace llvm {
namespace detail {
struct RecordKeeperImpl;
} // namespace detail

class ListRecTy;
class Record;
class RecordKeeper;
class RecordVal;
class Resolver;
class StringInit;
class TypedInit;

//===----------------------------------------------------------------------===//
//  Type Classes
//===----------------------------------------------------------------------===//

class RecTy {
public:
  /// Subclass discriminator (for dyn_cast<> et al.)
  enum RecTyKind {
    BitRecTyKind,
    BitsRecTyKind,
    IntRecTyKind,
    StringRecTyKind,
    ListRecTyKind,
    DagRecTyKind,
    RecordRecTyKind
  };

private:
  RecTyKind Kind;
  /// The RecordKeeper that uniqued this Type.
  RecordKeeper &RK;
  /// ListRecTy of the list that has elements of this type.
  ListRecTy *ListTy = nullptr;

public:
  RecTy(RecTyKind K, RecordKeeper &RK) : Kind(K), RK(RK) {}
  virtual ~RecTy() = default;

  RecTyKind getRecTyKind() const { return Kind; }

  /// Return the RecordKeeper that uniqued this Type.
  RecordKeeper &getRecordKeeper() const { return RK; }

  virtual std::string getAsString() const = 0;
  void print(raw_ostream &OS) const { OS << getAsString(); }
  void dump() const;

  /// Return true if all values of 'this' type can be converted to the specified
  /// type.
  virtual bool typeIsConvertibleTo(const RecTy *RHS) const;

  /// Return true if 'this' type is equal to or a subtype of RHS. For example,
  /// a bit set is not an int, but they are convertible.
  virtual bool typeIsA(const RecTy *RHS) const;

  /// Returns the type representing list<thistype>.
  ListRecTy *getListTy();
};

inline raw_ostream &operator<<(raw_ostream &OS, const RecTy &Ty) {
  Ty.print(OS);
  return OS;
}

/// 'bit' - Represent a single bit
class BitRecTy : public RecTy {
  friend detail::RecordKeeperImpl;

  BitRecTy(RecordKeeper &RK) : RecTy(BitRecTyKind, RK) {}

public:
  static bool classof(const RecTy *RT) {
    return RT->getRecTyKind() == BitRecTyKind;
  }

  static BitRecTy *get(RecordKeeper &RK);

  std::string getAsString() const override { return "bit"; }

  bool typeIsConvertibleTo(const RecTy *RHS) const override;
};

/// 'bits<n>' - Represent a fixed number of bits
class BitsRecTy : public RecTy {
  unsigned Size;

  explicit BitsRecTy(RecordKeeper &RK, unsigned Sz)
      : RecTy(BitsRecTyKind, RK), Size(Sz) {}

public:
  static bool classof(const RecTy *RT) {
    return RT->getRecTyKind() == BitsRecTyKind;
  }

  static BitsRecTy *get(RecordKeeper &RK, unsigned Sz);

  unsigned getNumBits() const { return Size; }

  std::string getAsString() const override;

  bool typeIsConvertibleTo(const RecTy *RHS) const override;
};

/// 'int' - Represent an integer value of no particular size
class IntRecTy : public RecTy {
  friend detail::RecordKeeperImpl;

  IntRecTy(RecordKeeper &RK) : RecTy(IntRecTyKind, RK) {}

public:
  static bool classof(const RecTy *RT) {
    return RT->getRecTyKind() == IntRecTyKind;
  }

  static IntRecTy *get(RecordKeeper &RK);

  std::string getAsString() const override { return "int"; }

  bool typeIsConvertibleTo(const RecTy *RHS) const override;
};

/// 'string' - Represent an string value
class StringRecTy : public RecTy {
  friend detail::RecordKeeperImpl;

  StringRecTy(RecordKeeper &RK) : RecTy(StringRecTyKind, RK) {}

public:
  static bool classof(const RecTy *RT) {
    return RT->getRecTyKind() == StringRecTyKind;
  }

  static StringRecTy *get(RecordKeeper &RK);

  std::string getAsString() const override;

  bool typeIsConvertibleTo(const RecTy *RHS) const override;
};

/// 'list<Ty>' - Represent a list of element values, all of which must be of
/// the specified type. The type is stored in ElementTy.
class ListRecTy : public RecTy {
  friend ListRecTy *RecTy::getListTy();

  RecTy *ElementTy;

  explicit ListRecTy(RecTy *T)
      : RecTy(ListRecTyKind, T->getRecordKeeper()), ElementTy(T) {}

public:
  static bool classof(const RecTy *RT) {
    return RT->getRecTyKind() == ListRecTyKind;
  }

  static ListRecTy *get(RecTy *T) { return T->getListTy(); }
  RecTy *getElementType() const { return ElementTy; }

  std::string getAsString() const override;

  bool typeIsConvertibleTo(const RecTy *RHS) const override;

  bool typeIsA(const RecTy *RHS) const override;
};

/// 'dag' - Represent a dag fragment
class DagRecTy : public RecTy {
  friend detail::RecordKeeperImpl;

  DagRecTy(RecordKeeper &RK) : RecTy(DagRecTyKind, RK) {}

public:
  static bool classof(const RecTy *RT) {
    return RT->getRecTyKind() == DagRecTyKind;
  }

  static DagRecTy *get(RecordKeeper &RK);

  std::string getAsString() const override;
};

/// '[classname]' - Type of record values that have zero or more superclasses.
///
/// The list of superclasses is non-redundant, i.e. only contains classes that
/// are not the superclass of some other listed class.
class RecordRecTy final : public RecTy, public FoldingSetNode,
                          public TrailingObjects<RecordRecTy, Record *> {
  friend class Record;
  friend detail::RecordKeeperImpl;

  unsigned NumClasses;

  explicit RecordRecTy(RecordKeeper &RK, unsigned Num)
      : RecTy(RecordRecTyKind, RK), NumClasses(Num) {}

public:
  RecordRecTy(const RecordRecTy &) = delete;
  RecordRecTy &operator=(const RecordRecTy &) = delete;

  // Do not use sized deallocation due to trailing objects.
  void operator delete(void *p) { ::operator delete(p); }

  static bool classof(const RecTy *RT) {
    return RT->getRecTyKind() == RecordRecTyKind;
  }

  /// Get the record type with the given non-redundant list of superclasses.
  static RecordRecTy *get(RecordKeeper &RK, ArrayRef<Record *> Classes);
  static RecordRecTy *get(Record *Class);

  void Profile(FoldingSetNodeID &ID) const;

  ArrayRef<Record *> getClasses() const {
    return ArrayRef(getTrailingObjects<Record *>(), NumClasses);
  }

  using const_record_iterator = Record * const *;

  const_record_iterator classes_begin() const { return getClasses().begin(); }
  const_record_iterator classes_end() const { return getClasses().end(); }

  std::string getAsString() const override;

  bool isSubClassOf(Record *Class) const;
  bool typeIsConvertibleTo(const RecTy *RHS) const override;

  bool typeIsA(const RecTy *RHS) const override;
};

/// Find a common type that T1 and T2 convert to.
/// Return 0 if no such type exists.
RecTy *resolveTypes(RecTy *T1, RecTy *T2);

//===----------------------------------------------------------------------===//
//  Initializer Classes
//===----------------------------------------------------------------------===//

class Init {
protected:
  /// Discriminator enum (for isa<>, dyn_cast<>, et al.)
  ///
  /// This enum is laid out by a preorder traversal of the inheritance
  /// hierarchy, and does not contain an entry for abstract classes, as per
  /// the recommendation in docs/HowToSetUpLLVMStyleRTTI.rst.
  ///
  /// We also explicitly include "first" and "last" values for each
  /// interior node of the inheritance tree, to make it easier to read the
  /// corresponding classof().
  ///
  /// We could pack these a bit tighter by not having the IK_FirstXXXInit
  /// and IK_LastXXXInit be their own values, but that would degrade
  /// readability for really no benefit.
  enum InitKind : uint8_t {
    IK_First, // unused; silence a spurious warning
    IK_FirstTypedInit,
    IK_BitInit,
    IK_BitsInit,
    IK_DagInit,
    IK_DefInit,
    IK_FieldInit,
    IK_IntInit,
    IK_ListInit,
    IK_FirstOpInit,
    IK_BinOpInit,
    IK_TernOpInit,
    IK_UnOpInit,
    IK_LastOpInit,
    IK_CondOpInit,
    IK_FoldOpInit,
    IK_IsAOpInit,
    IK_ExistsOpInit,
    IK_AnonymousNameInit,
    IK_StringInit,
    IK_VarInit,
    IK_VarBitInit,
    IK_VarDefInit,
    IK_LastTypedInit,
    IK_UnsetInit,
    IK_ArgumentInit,
  };

private:
  const InitKind Kind;

protected:
  uint8_t Opc; // Used by UnOpInit, BinOpInit, and TernOpInit

private:
  virtual void anchor();

public:
  /// Get the kind (type) of the value.
  InitKind getKind() const { return Kind; }

  /// Get the record keeper that initialized this Init.
  RecordKeeper &getRecordKeeper() const;

protected:
  explicit Init(InitKind K, uint8_t Opc = 0) : Kind(K), Opc(Opc) {}

public:
  Init(const Init &) = delete;
  Init &operator=(const Init &) = delete;
  virtual ~Init() = default;

  /// Is this a complete value with no unset (uninitialized) subvalues?
  virtual bool isComplete() const { return true; }

  /// Is this a concrete and fully resolved value without any references or
  /// stuck operations? Unset values are concrete.
  virtual bool isConcrete() const { return false; }

  /// Print this value.
  void print(raw_ostream &OS) const { OS << getAsString(); }

  /// Convert this value to a literal form.
  virtual std::string getAsString() const = 0;

  /// Convert this value to a literal form,
  /// without adding quotes around a string.
  virtual std::string getAsUnquotedString() const { return getAsString(); }

  /// Debugging method that may be called through a debugger; just
  /// invokes print on stderr.
  void dump() const;

  /// If this value is convertible to type \p Ty, return a value whose
  /// type is \p Ty, generating a !cast operation if required.
  /// Otherwise, return null.
  virtual Init *getCastTo(RecTy *Ty) const = 0;

  /// Convert to a value whose type is \p Ty, or return null if this
  /// is not possible. This can happen if the value's type is convertible
  /// to \p Ty, but there are unresolved references.
  virtual Init *convertInitializerTo(RecTy *Ty) const = 0;

  /// This function is used to implement the bit range
  /// selection operator. Given a value, it selects the specified bits,
  /// returning them as a new \p Init of type \p bits. If it is not legal
  /// to use the bit selection operator on this value, null is returned.
  virtual Init *convertInitializerBitRange(ArrayRef<unsigned> Bits) const {
    return nullptr;
  }

  /// This function is used to implement the FieldInit class.
  /// Implementors of this method should return the type of the named
  /// field if they are of type record.
  virtual RecTy *getFieldType(StringInit *FieldName) const {
    return nullptr;
  }

  /// This function is used by classes that refer to other
  /// variables which may not be defined at the time the expression is formed.
  /// If a value is set for the variable later, this method will be called on
  /// users of the value to allow the value to propagate out.
  virtual Init *resolveReferences(Resolver &R) const {
    return const_cast<Init *>(this);
  }

  /// Get the \p Init value of the specified bit.
  virtual Init *getBit(unsigned Bit) const = 0;
};

inline raw_ostream &operator<<(raw_ostream &OS, const Init &I) {
  I.print(OS); return OS;
}

/// This is the common superclass of types that have a specific,
/// explicit type, stored in ValueTy.
class TypedInit : public Init {
  RecTy *ValueTy;

protected:
  explicit TypedInit(InitKind K, RecTy *T, uint8_t Opc = 0)
      : Init(K, Opc), ValueTy(T) {}

public:
  TypedInit(const TypedInit &) = delete;
  TypedInit &operator=(const TypedInit &) = delete;

  static bool classof(const Init *I) {
    return I->getKind() >= IK_FirstTypedInit &&
           I->getKind() <= IK_LastTypedInit;
  }

  /// Get the type of the Init as a RecTy.
  RecTy *getType() const { return ValueTy; }

  /// Get the record keeper that initialized this Init.
  RecordKeeper &getRecordKeeper() const { return ValueTy->getRecordKeeper(); }

  Init *getCastTo(RecTy *Ty) const override;
  Init *convertInitializerTo(RecTy *Ty) const override;

  Init *convertInitializerBitRange(ArrayRef<unsigned> Bits) const override;

  /// This method is used to implement the FieldInit class.
  /// Implementors of this method should return the type of the named field if
  /// they are of type record.
  RecTy *getFieldType(StringInit *FieldName) const override;
};

/// '?' - Represents an uninitialized value.
class UnsetInit : public Init {
  friend detail::RecordKeeperImpl;

  /// The record keeper that initialized this Init.
  RecordKeeper &RK;

  UnsetInit(RecordKeeper &RK) : Init(IK_UnsetInit), RK(RK) {}

public:
  UnsetInit(const UnsetInit &) = delete;
  UnsetInit &operator=(const UnsetInit &) = delete;

  static bool classof(const Init *I) {
    return I->getKind() == IK_UnsetInit;
  }

  /// Get the singleton unset Init.
  static UnsetInit *get(RecordKeeper &RK);

  /// Get the record keeper that initialized this Init.
  RecordKeeper &getRecordKeeper() const { return RK; }

  Init *getCastTo(RecTy *Ty) const override;
  Init *convertInitializerTo(RecTy *Ty) const override;

  Init *getBit(unsigned Bit) const override {
    return const_cast<UnsetInit*>(this);
  }

  /// Is this a complete value with no unset (uninitialized) subvalues?
  bool isComplete() const override { return false; }

  bool isConcrete() const override { return true; }

  /// Get the string representation of the Init.
  std::string getAsString() const override { return "?"; }
};

// Represent an argument.
using ArgAuxType = std::variant<unsigned, Init *>;
class ArgumentInit : public Init, public FoldingSetNode {
public:
  enum Kind {
    Positional,
    Named,
  };

private:
  Init *Value;
  ArgAuxType Aux;

protected:
  explicit ArgumentInit(Init *Value, ArgAuxType Aux)
      : Init(IK_ArgumentInit), Value(Value), Aux(Aux) {}

public:
  ArgumentInit(const ArgumentInit &) = delete;
  ArgumentInit &operator=(const ArgumentInit &) = delete;

  static bool classof(const Init *I) { return I->getKind() == IK_ArgumentInit; }

  RecordKeeper &getRecordKeeper() const { return Value->getRecordKeeper(); }

  static ArgumentInit *get(Init *Value, ArgAuxType Aux);

  bool isPositional() const { return Aux.index() == Positional; }
  bool isNamed() const { return Aux.index() == Named; }

  Init *getValue() const { return Value; }
  unsigned getIndex() const {
    assert(isPositional() && "Should be positional!");
    return std::get<Positional>(Aux);
  }
  Init *getName() const {
    assert(isNamed() && "Should be named!");
    return std::get<Named>(Aux);
  }
  ArgumentInit *cloneWithValue(Init *Value) const { return get(Value, Aux); }

  void Profile(FoldingSetNodeID &ID) const;

  Init *resolveReferences(Resolver &R) const override;
  std::string getAsString() const override {
    if (isPositional())
      return utostr(getIndex()) + ": " + Value->getAsString();
    if (isNamed())
      return getName()->getAsString() + ": " + Value->getAsString();
    llvm_unreachable("Unsupported argument type!");
    return "";
  }

  bool isComplete() const override { return false; }
  bool isConcrete() const override { return false; }
  Init *getBit(unsigned Bit) const override { return Value->getBit(Bit); }
  Init *getCastTo(RecTy *Ty) const override { return Value->getCastTo(Ty); }
  Init *convertInitializerTo(RecTy *Ty) const override {
    return Value->convertInitializerTo(Ty);
  }
};

/// 'true'/'false' - Represent a concrete initializer for a bit.
class BitInit final : public TypedInit {
  friend detail::RecordKeeperImpl;

  bool Value;

  explicit BitInit(bool V, RecTy *T) : TypedInit(IK_BitInit, T), Value(V) {}

public:
  BitInit(const BitInit &) = delete;
  BitInit &operator=(BitInit &) = delete;

  static bool classof(const Init *I) {
    return I->getKind() == IK_BitInit;
  }

  static BitInit *get(RecordKeeper &RK, bool V);

  bool getValue() const { return Value; }

  Init *convertInitializerTo(RecTy *Ty) const override;

  Init *getBit(unsigned Bit) const override {
    assert(Bit < 1 && "Bit index out of range!");
    return const_cast<BitInit*>(this);
  }

  bool isConcrete() const override { return true; }
  std::string getAsString() const override { return Value ? "1" : "0"; }
};

/// '{ a, b, c }' - Represents an initializer for a BitsRecTy value.
/// It contains a vector of bits, whose size is determined by the type.
class BitsInit final : public TypedInit, public FoldingSetNode,
                       public TrailingObjects<BitsInit, Init *> {
  unsigned NumBits;

  BitsInit(RecordKeeper &RK, unsigned N)
      : TypedInit(IK_BitsInit, BitsRecTy::get(RK, N)), NumBits(N) {}

public:
  BitsInit(const BitsInit &) = delete;
  BitsInit &operator=(const BitsInit &) = delete;

  // Do not use sized deallocation due to trailing objects.
  void operator delete(void *p) { ::operator delete(p); }

  static bool classof(const Init *I) {
    return I->getKind() == IK_BitsInit;
  }

  static BitsInit *get(RecordKeeper &RK, ArrayRef<Init *> Range);

  void Profile(FoldingSetNodeID &ID) const;

  unsigned getNumBits() const { return NumBits; }

  Init *convertInitializerTo(RecTy *Ty) const override;
  Init *convertInitializerBitRange(ArrayRef<unsigned> Bits) const override;

  bool isComplete() const override {
    for (unsigned i = 0; i != getNumBits(); ++i)
      if (!getBit(i)->isComplete()) return false;
    return true;
  }

  bool allInComplete() const {
    for (unsigned i = 0; i != getNumBits(); ++i)
      if (getBit(i)->isComplete()) return false;
    return true;
  }

  bool isConcrete() const override;
  std::string getAsString() const override;

  Init *resolveReferences(Resolver &R) const override;

  Init *getBit(unsigned Bit) const override {
    assert(Bit < NumBits && "Bit index out of range!");
    return getTrailingObjects<Init *>()[Bit];
  }
};

/// '7' - Represent an initialization by a literal integer value.
class IntInit : public TypedInit {
  int64_t Value;

  explicit IntInit(RecordKeeper &RK, int64_t V)
      : TypedInit(IK_IntInit, IntRecTy::get(RK)), Value(V) {}

public:
  IntInit(const IntInit &) = delete;
  IntInit &operator=(const IntInit &) = delete;

  static bool classof(const Init *I) {
    return I->getKind() == IK_IntInit;
  }

  static IntInit *get(RecordKeeper &RK, int64_t V);

  int64_t getValue() const { return Value; }

  Init *convertInitializerTo(RecTy *Ty) const override;
  Init *convertInitializerBitRange(ArrayRef<unsigned> Bits) const override;

  bool isConcrete() const override { return true; }
  std::string getAsString() const override;

  Init *getBit(unsigned Bit) const override {
    return BitInit::get(getRecordKeeper(), (Value & (1ULL << Bit)) != 0);
  }
};

/// "anonymous_n" - Represent an anonymous record name
class AnonymousNameInit : public TypedInit {
  unsigned Value;

  explicit AnonymousNameInit(RecordKeeper &RK, unsigned V)
      : TypedInit(IK_AnonymousNameInit, StringRecTy::get(RK)), Value(V) {}

public:
  AnonymousNameInit(const AnonymousNameInit &) = delete;
  AnonymousNameInit &operator=(const AnonymousNameInit &) = delete;

  static bool classof(const Init *I) {
    return I->getKind() == IK_AnonymousNameInit;
  }

  static AnonymousNameInit *get(RecordKeeper &RK, unsigned);

  unsigned getValue() const { return Value; }

  StringInit *getNameInit() const;

  std::string getAsString() const override;

  Init *resolveReferences(Resolver &R) const override;

  Init *getBit(unsigned Bit) const override {
    llvm_unreachable("Illegal bit reference off string");
  }
};

/// "foo" - Represent an initialization by a string value.
class StringInit : public TypedInit {
public:
  enum StringFormat {
    SF_String, // Format as "text"
    SF_Code,   // Format as [{text}]
  };

private:
  StringRef Value;
  StringFormat Format;

  explicit StringInit(RecordKeeper &RK, StringRef V, StringFormat Fmt)
      : TypedInit(IK_StringInit, StringRecTy::get(RK)), Value(V), Format(Fmt) {}

public:
  StringInit(const StringInit &) = delete;
  StringInit &operator=(const StringInit &) = delete;

  static bool classof(const Init *I) {
    return I->getKind() == IK_StringInit;
  }

  static StringInit *get(RecordKeeper &RK, StringRef,
                         StringFormat Fmt = SF_String);

  static StringFormat determineFormat(StringFormat Fmt1, StringFormat Fmt2) {
    return (Fmt1 == SF_Code || Fmt2 == SF_Code) ? SF_Code : SF_String;
  }

  StringRef getValue() const { return Value; }
  StringFormat getFormat() const { return Format; }
  bool hasCodeFormat() const { return Format == SF_Code; }

  Init *convertInitializerTo(RecTy *Ty) const override;

  bool isConcrete() const override { return true; }

  std::string getAsString() const override {
    if (Format == SF_String)
      return "\"" + Value.str() + "\"";
    else
      return "[{" + Value.str() + "}]";
  }

  std::string getAsUnquotedString() const override {
    return std::string(Value);
  }

  Init *getBit(unsigned Bit) const override {
    llvm_unreachable("Illegal bit reference off string");
  }
};

/// [AL, AH, CL] - Represent a list of defs
///
class ListInit final : public TypedInit, public FoldingSetNode,
                       public TrailingObjects<ListInit, Init *> {
  unsigned NumValues;

public:
  using const_iterator = Init *const *;

private:
  explicit ListInit(unsigned N, RecTy *EltTy)
      : TypedInit(IK_ListInit, ListRecTy::get(EltTy)), NumValues(N) {}

public:
  ListInit(const ListInit &) = delete;
  ListInit &operator=(const ListInit &) = delete;

  // Do not use sized deallocation due to trailing objects.
  void operator delete(void *p) { ::operator delete(p); }

  static bool classof(const Init *I) {
    return I->getKind() == IK_ListInit;
  }
  static ListInit *get(ArrayRef<Init *> Range, RecTy *EltTy);

  void Profile(FoldingSetNodeID &ID) const;

  Init *getElement(unsigned i) const {
    assert(i < NumValues && "List element index out of range!");
    return getTrailingObjects<Init *>()[i];
  }
  RecTy *getElementType() const {
    return cast<ListRecTy>(getType())->getElementType();
  }

  Record *getElementAsRecord(unsigned i) const;

  Init *convertInitializerTo(RecTy *Ty) const override;

  /// This method is used by classes that refer to other
  /// variables which may not be defined at the time they expression is formed.
  /// If a value is set for the variable later, this method will be called on
  /// users of the value to allow the value to propagate out.
  ///
  Init *resolveReferences(Resolver &R) const override;

  bool isComplete() const override;
  bool isConcrete() const override;
  std::string getAsString() const override;

  ArrayRef<Init*> getValues() const {
    return ArrayRef(getTrailingObjects<Init *>(), NumValues);
  }

  const_iterator begin() const { return getTrailingObjects<Init *>(); }
  const_iterator end  () const { return begin() + NumValues; }

  size_t         size () const { return NumValues;  }
  bool           empty() const { return NumValues == 0; }

  Init *getBit(unsigned Bit) const override {
    llvm_unreachable("Illegal bit reference off list");
  }
};

/// Base class for operators
///
class OpInit : public TypedInit {
protected:
  explicit OpInit(InitKind K, RecTy *Type, uint8_t Opc)
    : TypedInit(K, Type, Opc) {}

public:
  OpInit(const OpInit &) = delete;
  OpInit &operator=(OpInit &) = delete;

  static bool classof(const Init *I) {
    return I->getKind() >= IK_FirstOpInit &&
           I->getKind() <= IK_LastOpInit;
  }

  // Clone - Clone this operator, replacing arguments with the new list
  virtual OpInit *clone(ArrayRef<Init *> Operands) const = 0;

  virtual unsigned getNumOperands() const = 0;
  virtual Init *getOperand(unsigned i) const = 0;

  Init *getBit(unsigned Bit) const override;
};

/// !op (X) - Transform an init.
///
class UnOpInit : public OpInit, public FoldingSetNode {
public:
  enum UnaryOp : uint8_t {
    TOLOWER,
    TOUPPER,
    CAST,
    NOT,
    HEAD,
    TAIL,
    SIZE,
    EMPTY,
    GETDAGOP,
    LOG2
  };

private:
  Init *LHS;

  UnOpInit(UnaryOp opc, Init *lhs, RecTy *Type)
    : OpInit(IK_UnOpInit, Type, opc), LHS(lhs) {}

public:
  UnOpInit(const UnOpInit &) = delete;
  UnOpInit &operator=(const UnOpInit &) = delete;

  static bool classof(const Init *I) {
    return I->getKind() == IK_UnOpInit;
  }

  static UnOpInit *get(UnaryOp opc, Init *lhs, RecTy *Type);

  void Profile(FoldingSetNodeID &ID) const;

  // Clone - Clone this operator, replacing arguments with the new list
  OpInit *clone(ArrayRef<Init *> Operands) const override {
    assert(Operands.size() == 1 &&
           "Wrong number of operands for unary operation");
    return UnOpInit::get(getOpcode(), *Operands.begin(), getType());
  }

  unsigned getNumOperands() const override { return 1; }

  Init *getOperand(unsigned i) const override {
    assert(i == 0 && "Invalid operand id for unary operator");
    return getOperand();
  }

  UnaryOp getOpcode() const { return (UnaryOp)Opc; }
  Init *getOperand() const { return LHS; }

  // Fold - If possible, fold this to a simpler init.  Return this if not
  // possible to fold.
  Init *Fold(Record *CurRec, bool IsFinal = false) const;

  Init *resolveReferences(Resolver &R) const override;

  std::string getAsString() const override;
};

/// !op (X, Y) - Combine two inits.
class BinOpInit : public OpInit, public FoldingSetNode {
public:
  enum BinaryOp : uint8_t {
    ADD,
    SUB,
    MUL,
    DIV,
    AND,
    OR,
    XOR,
    SHL,
    SRA,
    SRL,
    LISTCONCAT,
    LISTSPLAT,
    LISTREMOVE,
    LISTELEM,
    LISTSLICE,
    RANGE,
    RANGEC,
    STRCONCAT,
    INTERLEAVE,
    CONCAT,
    EQ,
    NE,
    LE,
    LT,
    GE,
    GT,
    GETDAGARG,
    GETDAGNAME,
    SETDAGOP,
  };

private:
  Init *LHS, *RHS;

  BinOpInit(BinaryOp opc, Init *lhs, Init *rhs, RecTy *Type) :
      OpInit(IK_BinOpInit, Type, opc), LHS(lhs), RHS(rhs) {}

public:
  BinOpInit(const BinOpInit &) = delete;
  BinOpInit &operator=(const BinOpInit &) = delete;

  static bool classof(const Init *I) {
    return I->getKind() == IK_BinOpInit;
  }

  static BinOpInit *get(BinaryOp opc, Init *lhs, Init *rhs,
                        RecTy *Type);
  static Init *getStrConcat(Init *lhs, Init *rhs);
  static Init *getListConcat(TypedInit *lhs, Init *rhs);

  void Profile(FoldingSetNodeID &ID) const;

  // Clone - Clone this operator, replacing arguments with the new list
  OpInit *clone(ArrayRef<Init *> Operands) const override {
    assert(Operands.size() == 2 &&
           "Wrong number of operands for binary operation");
    return BinOpInit::get(getOpcode(), Operands[0], Operands[1], getType());
  }

  unsigned getNumOperands() const override { return 2; }
  Init *getOperand(unsigned i) const override {
    switch (i) {
    default: llvm_unreachable("Invalid operand id for binary operator");
    case 0: return getLHS();
    case 1: return getRHS();
    }
  }

  BinaryOp getOpcode() const { return (BinaryOp)Opc; }
  Init *getLHS() const { return LHS; }
  Init *getRHS() const { return RHS; }

  std::optional<bool> CompareInit(unsigned Opc, Init *LHS, Init *RHS) const;

  // Fold - If possible, fold this to a simpler init.  Return this if not
  // possible to fold.
  Init *Fold(Record *CurRec) const;

  Init *resolveReferences(Resolver &R) const override;

  std::string getAsString() const override;
};

/// !op (X, Y, Z) - Combine two inits.
class TernOpInit : public OpInit, public FoldingSetNode {
public:
  enum TernaryOp : uint8_t {
    SUBST,
    FOREACH,
    FILTER,
    IF,
    DAG,
    SUBSTR,
    FIND,
    SETDAGARG,
    SETDAGNAME,
  };

private:
  Init *LHS, *MHS, *RHS;

  TernOpInit(TernaryOp opc, Init *lhs, Init *mhs, Init *rhs,
             RecTy *Type) :
      OpInit(IK_TernOpInit, Type, opc), LHS(lhs), MHS(mhs), RHS(rhs) {}

public:
  TernOpInit(const TernOpInit &) = delete;
  TernOpInit &operator=(const TernOpInit &) = delete;

  static bool classof(const Init *I) {
    return I->getKind() == IK_TernOpInit;
  }

  static TernOpInit *get(TernaryOp opc, Init *lhs,
                         Init *mhs, Init *rhs,
                         RecTy *Type);

  void Profile(FoldingSetNodeID &ID) const;

  // Clone - Clone this operator, replacing arguments with the new list
  OpInit *clone(ArrayRef<Init *> Operands) const override {
    assert(Operands.size() == 3 &&
           "Wrong number of operands for ternary operation");
    return TernOpInit::get(getOpcode(), Operands[0], Operands[1], Operands[2],
                           getType());
  }

  unsigned getNumOperands() const override { return 3; }
  Init *getOperand(unsigned i) const override {
    switch (i) {
    default: llvm_unreachable("Invalid operand id for ternary operator");
    case 0: return getLHS();
    case 1: return getMHS();
    case 2: return getRHS();
    }
  }

  TernaryOp getOpcode() const { return (TernaryOp)Opc; }
  Init *getLHS() const { return LHS; }
  Init *getMHS() const { return MHS; }
  Init *getRHS() const { return RHS; }

  // Fold - If possible, fold this to a simpler init.  Return this if not
  // possible to fold.
  Init *Fold(Record *CurRec) const;

  bool isComplete() const override {
    return LHS->isComplete() && MHS->isComplete() && RHS->isComplete();
  }

  Init *resolveReferences(Resolver &R) const override;

  std::string getAsString() const override;
};

/// !cond(condition_1: value1, ... , condition_n: value)
/// Selects the first value for which condition is true.
/// Otherwise reports an error.
class CondOpInit final : public TypedInit, public FoldingSetNode,
                      public TrailingObjects<CondOpInit, Init *> {
  unsigned NumConds;
  RecTy *ValType;

  CondOpInit(unsigned NC, RecTy *Type)
    : TypedInit(IK_CondOpInit, Type),
      NumConds(NC), ValType(Type) {}

  size_t numTrailingObjects(OverloadToken<Init *>) const {
    return 2*NumConds;
  }

public:
  CondOpInit(const CondOpInit &) = delete;
  CondOpInit &operator=(const CondOpInit &) = delete;

  static bool classof(const Init *I) {
    return I->getKind() == IK_CondOpInit;
  }

  static CondOpInit *get(ArrayRef<Init*> C, ArrayRef<Init*> V,
                        RecTy *Type);

  void Profile(FoldingSetNodeID &ID) const;

  RecTy *getValType() const { return ValType; }

  unsigned getNumConds() const { return NumConds; }

  Init *getCond(unsigned Num) const {
    assert(Num < NumConds && "Condition number out of range!");
    return getTrailingObjects<Init *>()[Num];
  }

  Init *getVal(unsigned Num) const {
    assert(Num < NumConds && "Val number out of range!");
    return getTrailingObjects<Init *>()[Num+NumConds];
  }

  ArrayRef<Init *> getConds() const {
    return ArrayRef(getTrailingObjects<Init *>(), NumConds);
  }

  ArrayRef<Init *> getVals() const {
    return ArrayRef(getTrailingObjects<Init *>() + NumConds, NumConds);
  }

  Init *Fold(Record *CurRec) const;

  Init *resolveReferences(Resolver &R) const override;

  bool isConcrete() const override;
  bool isComplete() const override;
  std::string getAsString() const override;

  using const_case_iterator = SmallVectorImpl<Init*>::const_iterator;
  using const_val_iterator = SmallVectorImpl<Init*>::const_iterator;

  inline const_case_iterator  arg_begin() const { return getConds().begin(); }
  inline const_case_iterator  arg_end  () const { return getConds().end(); }

  inline size_t              case_size () const { return NumConds; }
  inline bool                case_empty() const { return NumConds == 0; }

  inline const_val_iterator name_begin() const { return getVals().begin();}
  inline const_val_iterator name_end  () const { return getVals().end(); }

  inline size_t              val_size () const { return NumConds; }
  inline bool                val_empty() const { return NumConds == 0; }

  Init *getBit(unsigned Bit) const override;
};

/// !foldl (a, b, expr, start, lst) - Fold over a list.
class FoldOpInit : public TypedInit, public FoldingSetNode {
private:
  Init *Start;
  Init *List;
  Init *A;
  Init *B;
  Init *Expr;

  FoldOpInit(Init *Start, Init *List, Init *A, Init *B, Init *Expr, RecTy *Type)
      : TypedInit(IK_FoldOpInit, Type), Start(Start), List(List), A(A), B(B),
        Expr(Expr) {}

public:
  FoldOpInit(const FoldOpInit &) = delete;
  FoldOpInit &operator=(const FoldOpInit &) = delete;

  static bool classof(const Init *I) { return I->getKind() == IK_FoldOpInit; }

  static FoldOpInit *get(Init *Start, Init *List, Init *A, Init *B, Init *Expr,
                         RecTy *Type);

  void Profile(FoldingSetNodeID &ID) const;

  // Fold - If possible, fold this to a simpler init.  Return this if not
  // possible to fold.
  Init *Fold(Record *CurRec) const;

  bool isComplete() const override { return false; }

  Init *resolveReferences(Resolver &R) const override;

  Init *getBit(unsigned Bit) const override;

  std::string getAsString() const override;
};

/// !isa<type>(expr) - Dynamically determine the type of an expression.
class IsAOpInit : public TypedInit, public FoldingSetNode {
private:
  RecTy *CheckType;
  Init *Expr;

  IsAOpInit(RecTy *CheckType, Init *Expr)
      : TypedInit(IK_IsAOpInit, IntRecTy::get(CheckType->getRecordKeeper())),
        CheckType(CheckType), Expr(Expr) {}

public:
  IsAOpInit(const IsAOpInit &) = delete;
  IsAOpInit &operator=(const IsAOpInit &) = delete;

  static bool classof(const Init *I) { return I->getKind() == IK_IsAOpInit; }

  static IsAOpInit *get(RecTy *CheckType, Init *Expr);

  void Profile(FoldingSetNodeID &ID) const;

  // Fold - If possible, fold this to a simpler init.  Return this if not
  // possible to fold.
  Init *Fold() const;

  bool isComplete() const override { return false; }

  Init *resolveReferences(Resolver &R) const override;

  Init *getBit(unsigned Bit) const override;

  std::string getAsString() const override;
};

/// !exists<type>(expr) - Dynamically determine if a record of `type` named
/// `expr` exists.
class ExistsOpInit : public TypedInit, public FoldingSetNode {
private:
  RecTy *CheckType;
  Init *Expr;

  ExistsOpInit(RecTy *CheckType, Init *Expr)
      : TypedInit(IK_ExistsOpInit, IntRecTy::get(CheckType->getRecordKeeper())),
        CheckType(CheckType), Expr(Expr) {}

public:
  ExistsOpInit(const ExistsOpInit &) = delete;
  ExistsOpInit &operator=(const ExistsOpInit &) = delete;

  static bool classof(const Init *I) { return I->getKind() == IK_ExistsOpInit; }

  static ExistsOpInit *get(RecTy *CheckType, Init *Expr);

  void Profile(FoldingSetNodeID &ID) const;

  // Fold - If possible, fold this to a simpler init.  Return this if not
  // possible to fold.
  Init *Fold(Record *CurRec, bool IsFinal = false) const;

  bool isComplete() const override { return false; }

  Init *resolveReferences(Resolver &R) const override;

  Init *getBit(unsigned Bit) const override;

  std::string getAsString() const override;
};

/// 'Opcode' - Represent a reference to an entire variable object.
class VarInit : public TypedInit {
  Init *VarName;

  explicit VarInit(Init *VN, RecTy *T)
      : TypedInit(IK_VarInit, T), VarName(VN) {}

public:
  VarInit(const VarInit &) = delete;
  VarInit &operator=(const VarInit &) = delete;

  static bool classof(const Init *I) {
    return I->getKind() == IK_VarInit;
  }

  static VarInit *get(StringRef VN, RecTy *T);
  static VarInit *get(Init *VN, RecTy *T);

  StringRef getName() const;
  Init *getNameInit() const { return VarName; }

  std::string getNameInitAsString() const {
    return getNameInit()->getAsUnquotedString();
  }

  /// This method is used by classes that refer to other
  /// variables which may not be defined at the time they expression is formed.
  /// If a value is set for the variable later, this method will be called on
  /// users of the value to allow the value to propagate out.
  ///
  Init *resolveReferences(Resolver &R) const override;

  Init *getBit(unsigned Bit) const override;

  std::string getAsString() const override { return std::string(getName()); }
};

/// Opcode{0} - Represent access to one bit of a variable or field.
class VarBitInit final : public TypedInit {
  TypedInit *TI;
  unsigned Bit;

  VarBitInit(TypedInit *T, unsigned B)
      : TypedInit(IK_VarBitInit, BitRecTy::get(T->getRecordKeeper())), TI(T),
        Bit(B) {
    assert(T->getType() &&
           (isa<IntRecTy>(T->getType()) ||
            (isa<BitsRecTy>(T->getType()) &&
             cast<BitsRecTy>(T->getType())->getNumBits() > B)) &&
           "Illegal VarBitInit expression!");
  }

public:
  VarBitInit(const VarBitInit &) = delete;
  VarBitInit &operator=(const VarBitInit &) = delete;

  static bool classof(const Init *I) {
    return I->getKind() == IK_VarBitInit;
  }

  static VarBitInit *get(TypedInit *T, unsigned B);

  Init *getBitVar() const { return TI; }
  unsigned getBitNum() const { return Bit; }

  std::string getAsString() const override;
  Init *resolveReferences(Resolver &R) const override;

  Init *getBit(unsigned B) const override {
    assert(B < 1 && "Bit index out of range!");
    return const_cast<VarBitInit*>(this);
  }
};

/// AL - Represent a reference to a 'def' in the description
class DefInit : public TypedInit {
  friend class Record;

  Record *Def;

  explicit DefInit(Record *D);

public:
  DefInit(const DefInit &) = delete;
  DefInit &operator=(const DefInit &) = delete;

  static bool classof(const Init *I) {
    return I->getKind() == IK_DefInit;
  }

  static DefInit *get(Record*);

  Init *convertInitializerTo(RecTy *Ty) const override;

  Record *getDef() const { return Def; }

  //virtual Init *convertInitializerBitRange(ArrayRef<unsigned> Bits);

  RecTy *getFieldType(StringInit *FieldName) const override;

  bool isConcrete() const override { return true; }
  std::string getAsString() const override;

  Init *getBit(unsigned Bit) const override {
    llvm_unreachable("Illegal bit reference off def");
  }
};

/// classname<targs...> - Represent an uninstantiated anonymous class
/// instantiation.
class VarDefInit final : public TypedInit,
                         public FoldingSetNode,
                         public TrailingObjects<VarDefInit, ArgumentInit *> {
  Record *Class;
  DefInit *Def = nullptr; // after instantiation
  unsigned NumArgs;

  explicit VarDefInit(Record *Class, unsigned N);

  DefInit *instantiate();

public:
  VarDefInit(const VarDefInit &) = delete;
  VarDefInit &operator=(const VarDefInit &) = delete;

  // Do not use sized deallocation due to trailing objects.
  void operator delete(void *p) { ::operator delete(p); }

  static bool classof(const Init *I) {
    return I->getKind() == IK_VarDefInit;
  }
  static VarDefInit *get(Record *Class, ArrayRef<ArgumentInit *> Args);

  void Profile(FoldingSetNodeID &ID) const;

  Init *resolveReferences(Resolver &R) const override;
  Init *Fold() const;

  std::string getAsString() const override;

  ArgumentInit *getArg(unsigned i) const {
    assert(i < NumArgs && "Argument index out of range!");
    return getTrailingObjects<ArgumentInit *>()[i];
  }

  using const_iterator = ArgumentInit *const *;

  const_iterator args_begin() const {
    return getTrailingObjects<ArgumentInit *>();
  }
  const_iterator args_end  () const { return args_begin() + NumArgs; }

  size_t         args_size () const { return NumArgs; }
  bool           args_empty() const { return NumArgs == 0; }

  ArrayRef<ArgumentInit *> args() const {
    return ArrayRef(args_begin(), NumArgs);
  }

  Init *getBit(unsigned Bit) const override {
    llvm_unreachable("Illegal bit reference off anonymous def");
  }
};

/// X.Y - Represent a reference to a subfield of a variable
class FieldInit : public TypedInit {
  Init *Rec;                // Record we are referring to
  StringInit *FieldName;    // Field we are accessing

  FieldInit(Init *R, StringInit *FN)
      : TypedInit(IK_FieldInit, R->getFieldType(FN)), Rec(R), FieldName(FN) {
#ifndef NDEBUG
    if (!getType()) {
      llvm::errs() << "In Record = " << Rec->getAsString()
                   << ", got FieldName = " << *FieldName
                   << " with non-record type!\n";
      llvm_unreachable("FieldInit with non-record type!");
    }
#endif
  }

public:
  FieldInit(const FieldInit &) = delete;
  FieldInit &operator=(const FieldInit &) = delete;

  static bool classof(const Init *I) {
    return I->getKind() == IK_FieldInit;
  }

  static FieldInit *get(Init *R, StringInit *FN);

  Init *getRecord() const { return Rec; }
  StringInit *getFieldName() const { return FieldName; }

  Init *getBit(unsigned Bit) const override;

  Init *resolveReferences(Resolver &R) const override;
  Init *Fold(Record *CurRec) const;

  bool isConcrete() const override;
  std::string getAsString() const override {
    return Rec->getAsString() + "." + FieldName->getValue().str();
  }
};

/// (v a, b) - Represent a DAG tree value.  DAG inits are required
/// to have at least one value then a (possibly empty) list of arguments.  Each
/// argument can have a name associated with it.
class DagInit final : public TypedInit, public FoldingSetNode,
                      public TrailingObjects<DagInit, Init *, StringInit *> {
  friend TrailingObjects;

  Init *Val;
  StringInit *ValName;
  unsigned NumArgs;
  unsigned NumArgNames;

  DagInit(Init *V, StringInit *VN, unsigned NumArgs, unsigned NumArgNames)
      : TypedInit(IK_DagInit, DagRecTy::get(V->getRecordKeeper())), Val(V),
        ValName(VN), NumArgs(NumArgs), NumArgNames(NumArgNames) {}

  size_t numTrailingObjects(OverloadToken<Init *>) const { return NumArgs; }

public:
  DagInit(const DagInit &) = delete;
  DagInit &operator=(const DagInit &) = delete;

  static bool classof(const Init *I) {
    return I->getKind() == IK_DagInit;
  }

  static DagInit *get(Init *V, StringInit *VN, ArrayRef<Init *> ArgRange,
                      ArrayRef<StringInit*> NameRange);
  static DagInit *get(Init *V, StringInit *VN,
                      ArrayRef<std::pair<Init*, StringInit*>> Args);

  void Profile(FoldingSetNodeID &ID) const;

  Init *getOperator() const { return Val; }
  Record *getOperatorAsDef(ArrayRef<SMLoc> Loc) const;

  StringInit *getName() const { return ValName; }

  StringRef getNameStr() const {
    return ValName ? ValName->getValue() : StringRef();
  }

  unsigned getNumArgs() const { return NumArgs; }

  Init *getArg(unsigned Num) const {
    assert(Num < NumArgs && "Arg number out of range!");
    return getTrailingObjects<Init *>()[Num];
  }

  /// This method looks up the specified argument name and returns its argument
  /// number or std::nullopt if that argument name does not exist.
  std::optional<unsigned> getArgNo(StringRef Name) const;

  StringInit *getArgName(unsigned Num) const {
    assert(Num < NumArgNames && "Arg number out of range!");
    return getTrailingObjects<StringInit *>()[Num];
  }

  StringRef getArgNameStr(unsigned Num) const {
    StringInit *Init = getArgName(Num);
    return Init ? Init->getValue() : StringRef();
  }

  ArrayRef<Init *> getArgs() const {
    return ArrayRef(getTrailingObjects<Init *>(), NumArgs);
  }

  ArrayRef<StringInit *> getArgNames() const {
    return ArrayRef(getTrailingObjects<StringInit *>(), NumArgNames);
  }

  Init *resolveReferences(Resolver &R) const override;

  bool isConcrete() const override;
  std::string getAsString() const override;

  using const_arg_iterator = SmallVectorImpl<Init*>::const_iterator;
  using const_name_iterator = SmallVectorImpl<StringInit*>::const_iterator;

  inline const_arg_iterator  arg_begin() const { return getArgs().begin(); }
  inline const_arg_iterator  arg_end  () const { return getArgs().end(); }

  inline size_t              arg_size () const { return NumArgs; }
  inline bool                arg_empty() const { return NumArgs == 0; }

  inline const_name_iterator name_begin() const { return getArgNames().begin();}
  inline const_name_iterator name_end  () const { return getArgNames().end(); }

  inline size_t              name_size () const { return NumArgNames; }
  inline bool                name_empty() const { return NumArgNames == 0; }

  Init *getBit(unsigned Bit) const override {
    llvm_unreachable("Illegal bit reference off dag");
  }
};

//===----------------------------------------------------------------------===//
//  High-Level Classes
//===----------------------------------------------------------------------===//

/// This class represents a field in a record, including its name, type,
/// value, and source location.
class RecordVal {
  friend class Record;

public:
  enum FieldKind {
    FK_Normal,        // A normal record field.
    FK_NonconcreteOK, // A field that can be nonconcrete ('field' keyword).
    FK_TemplateArg,   // A template argument.
  };

private:
  Init *Name;
  SMLoc Loc; // Source location of definition of name.
  PointerIntPair<RecTy *, 2, FieldKind> TyAndKind;
  Init *Value;
  bool IsUsed = false;

  /// Reference locations to this record value.
  SmallVector<SMRange> ReferenceLocs;

public:
  RecordVal(Init *N, RecTy *T, FieldKind K);
  RecordVal(Init *N, SMLoc Loc, RecTy *T, FieldKind K);

  /// Get the record keeper used to unique this value.
  RecordKeeper &getRecordKeeper() const { return Name->getRecordKeeper(); }

  /// Get the name of the field as a StringRef.
  StringRef getName() const;

  /// Get the name of the field as an Init.
  Init *getNameInit() const { return Name; }

  /// Get the name of the field as a std::string.
  std::string getNameInitAsString() const {
    return getNameInit()->getAsUnquotedString();
  }

  /// Get the source location of the point where the field was defined.
  const SMLoc &getLoc() const { return Loc; }

  /// Is this a field where nonconcrete values are okay?
  bool isNonconcreteOK() const {
    return TyAndKind.getInt() == FK_NonconcreteOK;
  }

  /// Is this a template argument?
  bool isTemplateArg() const {
    return TyAndKind.getInt() == FK_TemplateArg;
  }

  /// Get the type of the field value as a RecTy.
  RecTy *getType() const { return TyAndKind.getPointer(); }

  /// Get the type of the field for printing purposes.
  std::string getPrintType() const;

  /// Get the value of the field as an Init.
  Init *getValue() const { return Value; }

  /// Set the value of the field from an Init.
  bool setValue(Init *V);

  /// Set the value and source location of the field.
  bool setValue(Init *V, SMLoc NewLoc);

  /// Add a reference to this record value.
  void addReferenceLoc(SMRange Loc) { ReferenceLocs.push_back(Loc); }

  /// Return the references of this record value.
  ArrayRef<SMRange> getReferenceLocs() const { return ReferenceLocs; }

  /// Whether this value is used. Useful for reporting warnings, for example
  /// when a template argument is unused.
  void setUsed(bool Used) { IsUsed = Used; }
  bool isUsed() const { return IsUsed; }

  void dump() const;

  /// Print the value to an output stream, possibly with a semicolon.
  void print(raw_ostream &OS, bool PrintSem = true) const;
};

inline raw_ostream &operator<<(raw_ostream &OS, const RecordVal &RV) {
  RV.print(OS << "  ");
  return OS;
}

class Record {
public:
  struct AssertionInfo {
    SMLoc Loc;
    Init *Condition;
    Init *Message;

    // User-defined constructor to support std::make_unique(). It can be
    // removed in C++20 when braced initialization is supported.
    AssertionInfo(SMLoc Loc, Init *Condition, Init *Message)
        : Loc(Loc), Condition(Condition), Message(Message) {}
  };

private:
  Init *Name;
  // Location where record was instantiated, followed by the location of
  // multiclass prototypes used, and finally by the locations of references to
  // this record.
  SmallVector<SMLoc, 4> Locs;
  SmallVector<SMLoc, 0> ForwardDeclarationLocs;
  SmallVector<SMRange, 0> ReferenceLocs;
  SmallVector<Init *, 0> TemplateArgs;
  SmallVector<RecordVal, 0> Values;
  SmallVector<AssertionInfo, 0> Assertions;

  // All superclasses in the inheritance forest in post-order (yes, it
  // must be a forest; diamond-shaped inheritance is not allowed).
  SmallVector<std::pair<Record *, SMRange>, 0> SuperClasses;

  // Tracks Record instances. Not owned by Record.
  RecordKeeper &TrackedRecords;

  // The DefInit corresponding to this record.
  DefInit *CorrespondingDefInit = nullptr;

  // Unique record ID.
  unsigned ID;

  bool IsAnonymous;
  bool IsClass;

  void checkName();

public:
  // Constructs a record.
  explicit Record(Init *N, ArrayRef<SMLoc> locs, RecordKeeper &records,
                  bool Anonymous = false, bool Class = false)
      : Name(N), Locs(locs.begin(), locs.end()), TrackedRecords(records),
        ID(getNewUID(N->getRecordKeeper())), IsAnonymous(Anonymous),
        IsClass(Class) {
    checkName();
  }

  explicit Record(StringRef N, ArrayRef<SMLoc> locs, RecordKeeper &records,
                  bool Class = false)
      : Record(StringInit::get(records, N), locs, records, false, Class) {}

  // When copy-constructing a Record, we must still guarantee a globally unique
  // ID number. Don't copy CorrespondingDefInit either, since it's owned by the
  // original record. All other fields can be copied normally.
  Record(const Record &O)
      : Name(O.Name), Locs(O.Locs), TemplateArgs(O.TemplateArgs),
        Values(O.Values), Assertions(O.Assertions),
        SuperClasses(O.SuperClasses), TrackedRecords(O.TrackedRecords),
        ID(getNewUID(O.getRecords())), IsAnonymous(O.IsAnonymous),
        IsClass(O.IsClass) {}

  static unsigned getNewUID(RecordKeeper &RK);

  unsigned getID() const { return ID; }

  StringRef getName() const { return cast<StringInit>(Name)->getValue(); }

  Init *getNameInit() const {
    return Name;
  }

  std::string getNameInitAsString() const {
    return getNameInit()->getAsUnquotedString();
  }

  void setName(Init *Name);      // Also updates RecordKeeper.

  ArrayRef<SMLoc> getLoc() const { return Locs; }
  void appendLoc(SMLoc Loc) { Locs.push_back(Loc); }

  ArrayRef<SMLoc> getForwardDeclarationLocs() const {
    return ForwardDeclarationLocs;
  }

  /// Add a reference to this record value.
  void appendReferenceLoc(SMRange Loc) { ReferenceLocs.push_back(Loc); }

  /// Return the references of this record value.
  ArrayRef<SMRange> getReferenceLocs() const { return ReferenceLocs; }

  // Update a class location when encountering a (re-)definition.
  void updateClassLoc(SMLoc Loc);

  // Make the type that this record should have based on its superclasses.
  RecordRecTy *getType();

  /// get the corresponding DefInit.
  DefInit *getDefInit();

  bool isClass() const { return IsClass; }

  ArrayRef<Init *> getTemplateArgs() const {
    return TemplateArgs;
  }

  ArrayRef<RecordVal> getValues() const { return Values; }

  ArrayRef<AssertionInfo> getAssertions() const { return Assertions; }

  ArrayRef<std::pair<Record *, SMRange>>  getSuperClasses() const {
    return SuperClasses;
  }

  /// Determine whether this record has the specified direct superclass.
  bool hasDirectSuperClass(const Record *SuperClass) const;

  /// Append the direct superclasses of this record to Classes.
  void getDirectSuperClasses(SmallVectorImpl<Record *> &Classes) const;

  bool isTemplateArg(Init *Name) const {
    return llvm::is_contained(TemplateArgs, Name);
  }

  const RecordVal *getValue(const Init *Name) const {
    for (const RecordVal &Val : Values)
      if (Val.Name == Name) return &Val;
    return nullptr;
  }

  const RecordVal *getValue(StringRef Name) const {
    return getValue(StringInit::get(getRecords(), Name));
  }

  RecordVal *getValue(const Init *Name) {
    return const_cast<RecordVal *>(static_cast<const Record *>(this)->getValue(Name));
  }

  RecordVal *getValue(StringRef Name) {
    return const_cast<RecordVal *>(static_cast<const Record *>(this)->getValue(Name));
  }

  void addTemplateArg(Init *Name) {
    assert(!isTemplateArg(Name) && "Template arg already defined!");
    TemplateArgs.push_back(Name);
  }

  void addValue(const RecordVal &RV) {
    assert(getValue(RV.getNameInit()) == nullptr && "Value already added!");
    Values.push_back(RV);
  }

  void removeValue(Init *Name) {
    for (unsigned i = 0, e = Values.size(); i != e; ++i)
      if (Values[i].getNameInit() == Name) {
        Values.erase(Values.begin()+i);
        return;
      }
    llvm_unreachable("Cannot remove an entry that does not exist!");
  }

  void removeValue(StringRef Name) {
    removeValue(StringInit::get(getRecords(), Name));
  }

  void addAssertion(SMLoc Loc, Init *Condition, Init *Message) {
    Assertions.push_back(AssertionInfo(Loc, Condition, Message));
  }

  void appendAssertions(const Record *Rec) {
    Assertions.append(Rec->Assertions);
  }

  void checkRecordAssertions();
  void checkUnusedTemplateArgs();

  bool isSubClassOf(const Record *R) const {
    for (const auto &SCPair : SuperClasses)
      if (SCPair.first == R)
        return true;
    return false;
  }

  bool isSubClassOf(StringRef Name) const {
    for (const auto &SCPair : SuperClasses) {
      if (const auto *SI = dyn_cast<StringInit>(SCPair.first->getNameInit())) {
        if (SI->getValue() == Name)
          return true;
      } else if (SCPair.first->getNameInitAsString() == Name) {
        return true;
      }
    }
    return false;
  }

  void addSuperClass(Record *R, SMRange Range) {
    assert(!CorrespondingDefInit &&
           "changing type of record after it has been referenced");
    assert(!isSubClassOf(R) && "Already subclassing record!");
    SuperClasses.push_back(std::make_pair(R, Range));
  }

  /// If there are any field references that refer to fields that have been
  /// filled in, we can propagate the values now.
  ///
  /// This is a final resolve: any error messages, e.g. due to undefined !cast
  /// references, are generated now.
  void resolveReferences(Init *NewName = nullptr);

  /// Apply the resolver to the name of the record as well as to the
  /// initializers of all fields of the record except SkipVal.
  ///
  /// The resolver should not resolve any of the fields itself, to avoid
  /// recursion / infinite loops.
  void resolveReferences(Resolver &R, const RecordVal *SkipVal = nullptr);

  RecordKeeper &getRecords() const {
    return TrackedRecords;
  }

  bool isAnonymous() const {
    return IsAnonymous;
  }

  void dump() const;

  //===--------------------------------------------------------------------===//
  // High-level methods useful to tablegen back-ends
  //

  /// Return the source location for the named field.
  SMLoc getFieldLoc(StringRef FieldName) const;

  /// Return the initializer for a value with the specified name, or throw an
  /// exception if the field does not exist.
  Init *getValueInit(StringRef FieldName) const;

  /// Return true if the named field is unset.
  bool isValueUnset(StringRef FieldName) const {
    return isa<UnsetInit>(getValueInit(FieldName));
  }

  /// This method looks up the specified field and returns its value as a
  /// string, throwing an exception if the field does not exist or if the value
  /// is not a string.
  StringRef getValueAsString(StringRef FieldName) const;

  /// This method looks up the specified field and returns its value as a
  /// string, throwing an exception if the value is not a string and
  /// std::nullopt if the field does not exist.
  std::optional<StringRef> getValueAsOptionalString(StringRef FieldName) const;

  /// This method looks up the specified field and returns its value as a
  /// BitsInit, throwing an exception if the field does not exist or if the
  /// value is not the right type.
  BitsInit *getValueAsBitsInit(StringRef FieldName) const;

  /// This method looks up the specified field and returns its value as a
  /// ListInit, throwing an exception if the field does not exist or if the
  /// value is not the right type.
  ListInit *getValueAsListInit(StringRef FieldName) const;

  /// This method looks up the specified field and returns its value as a
  /// vector of records, throwing an exception if the field does not exist or
  /// if the value is not the right type.
  std::vector<Record*> getValueAsListOfDefs(StringRef FieldName) const;

  /// This method looks up the specified field and returns its value as a
  /// vector of integers, throwing an exception if the field does not exist or
  /// if the value is not the right type.
  std::vector<int64_t> getValueAsListOfInts(StringRef FieldName) const;

  /// This method looks up the specified field and returns its value as a
  /// vector of strings, throwing an exception if the field does not exist or
  /// if the value is not the right type.
  std::vector<StringRef> getValueAsListOfStrings(StringRef FieldName) const;

  /// This method looks up the specified field and returns its value as a
  /// Record, throwing an exception if the field does not exist or if the value
  /// is not the right type.
  Record *getValueAsDef(StringRef FieldName) const;

  /// This method looks up the specified field and returns its value as a
  /// Record, returning null if the field exists but is "uninitialized" (i.e.
  /// set to `?`), and throwing an exception if the field does not exist or if
  /// its value is not the right type.
  Record *getValueAsOptionalDef(StringRef FieldName) const;

  /// This method looks up the specified field and returns its value as a bit,
  /// throwing an exception if the field does not exist or if the value is not
  /// the right type.
  bool getValueAsBit(StringRef FieldName) const;

  /// This method looks up the specified field and returns its value as a bit.
  /// If the field is unset, sets Unset to true and returns false.
  bool getValueAsBitOrUnset(StringRef FieldName, bool &Unset) const;

  /// This method looks up the specified field and returns its value as an
  /// int64_t, throwing an exception if the field does not exist or if the
  /// value is not the right type.
  int64_t getValueAsInt(StringRef FieldName) const;

  /// This method looks up the specified field and returns its value as an Dag,
  /// throwing an exception if the field does not exist or if the value is not
  /// the right type.
  DagInit *getValueAsDag(StringRef FieldName) const;
};

raw_ostream &operator<<(raw_ostream &OS, const Record &R);

class RecordKeeper {
  using RecordMap = std::map<std::string, std::unique_ptr<Record>, std::less<>>;
  using GlobalMap = std::map<std::string, Init *, std::less<>>;

public:
  RecordKeeper();
  ~RecordKeeper();

  /// Return the internal implementation of the RecordKeeper.
  detail::RecordKeeperImpl &getImpl() { return *Impl; }

  /// Get the main TableGen input file's name.
  const std::string getInputFilename() const { return InputFilename; }

  /// Get the map of classes.
  const RecordMap &getClasses() const { return Classes; }

  /// Get the map of records (defs).
  const RecordMap &getDefs() const { return Defs; }

  /// Get the map of global variables.
  const GlobalMap &getGlobals() const { return ExtraGlobals; }

  /// Get the class with the specified name.
  Record *getClass(StringRef Name) const {
    auto I = Classes.find(Name);
    return I == Classes.end() ? nullptr : I->second.get();
  }

  /// Get the concrete record with the specified name.
  Record *getDef(StringRef Name) const {
    auto I = Defs.find(Name);
    return I == Defs.end() ? nullptr : I->second.get();
  }

  /// Get the \p Init value of the specified global variable.
  Init *getGlobal(StringRef Name) const {
    if (Record *R = getDef(Name))
      return R->getDefInit();
    auto It = ExtraGlobals.find(Name);
    return It == ExtraGlobals.end() ? nullptr : It->second;
  }

  void saveInputFilename(std::string Filename) {
    InputFilename = Filename;
  }

  void addClass(std::unique_ptr<Record> R) {
    bool Ins = Classes.insert(std::make_pair(std::string(R->getName()),
                                             std::move(R))).second;
    (void)Ins;
    assert(Ins && "Class already exists");
  }

  void addDef(std::unique_ptr<Record> R) {
    bool Ins = Defs.insert(std::make_pair(std::string(R->getName()),
                                          std::move(R))).second;
    (void)Ins;
    assert(Ins && "Record already exists");
  }

  void addExtraGlobal(StringRef Name, Init *I) {
    bool Ins = ExtraGlobals.insert(std::make_pair(std::string(Name), I)).second;
    (void)Ins;
    assert(!getDef(Name));
    assert(Ins && "Global already exists");
  }

  Init *getNewAnonymousName();

  /// Start phase timing; called if the --time-phases option is specified.
  void startPhaseTiming() {
    TimingGroup = new TimerGroup("TableGen", "TableGen Phase Timing");
  }

  /// Start timing a phase. Automatically stops any previous phase timer.
  void startTimer(StringRef Name);

  /// Stop timing a phase.
  void stopTimer();

  /// Start timing the overall backend. If the backend itself starts a timer,
  /// then this timer is cleared.
  void startBackendTimer(StringRef Name);

  /// Stop timing the overall backend.
  void stopBackendTimer();

  /// Stop phase timing and print the report.
  void stopPhaseTiming() {
    if (TimingGroup)
      delete TimingGroup;
  }

  //===--------------------------------------------------------------------===//
  // High-level helper methods, useful for tablegen backends.

  /// Get all the concrete records that inherit from the one specified
  /// class. The class must be defined.
  std::vector<Record *> getAllDerivedDefinitions(StringRef ClassName) const;

  /// Get all the concrete records that inherit from all the specified
  /// classes. The classes must be defined.
  std::vector<Record *> getAllDerivedDefinitions(
      ArrayRef<StringRef> ClassNames) const;

  /// Get all the concrete records that inherit from specified class, if the
  /// class is defined. Returns an empty vector if the class is not defined.
  std::vector<Record *>
  getAllDerivedDefinitionsIfDefined(StringRef ClassName) const;

  void dump() const;

private:
  RecordKeeper(RecordKeeper &&) = delete;
  RecordKeeper(const RecordKeeper &) = delete;
  RecordKeeper &operator=(RecordKeeper &&) = delete;
  RecordKeeper &operator=(const RecordKeeper &) = delete;

  std::string InputFilename;
  RecordMap Classes, Defs;
  mutable StringMap<std::vector<Record *>> ClassRecordsMap;
  GlobalMap ExtraGlobals;

  // These members are for the phase timing feature. We need a timer group,
  // the last timer started, and a flag to say whether the last timer
  // is the special "backend overall timer."
  TimerGroup *TimingGroup = nullptr;
  Timer *LastTimer = nullptr;
  bool BackendTimer = false;

  /// The internal uniquer implementation of the RecordKeeper.
  std::unique_ptr<detail::RecordKeeperImpl> Impl;
};

/// Sorting predicate to sort record pointers by name.
struct LessRecord {
  bool operator()(const Record *Rec1, const Record *Rec2) const {
    return StringRef(Rec1->getName()).compare_numeric(Rec2->getName()) < 0;
  }
};

/// Sorting predicate to sort record pointers by their
/// unique ID. If you just need a deterministic order, use this, since it
/// just compares two `unsigned`; the other sorting predicates require
/// string manipulation.
struct LessRecordByID {
  bool operator()(const Record *LHS, const Record *RHS) const {
    return LHS->getID() < RHS->getID();
  }
};

/// Sorting predicate to sort record pointers by their
/// name field.
struct LessRecordFieldName {
  bool operator()(const Record *Rec1, const Record *Rec2) const {
    return Rec1->getValueAsString("Name") < Rec2->getValueAsString("Name");
  }
};

struct LessRecordRegister {
  struct RecordParts {
    SmallVector<std::pair< bool, StringRef>, 4> Parts;

    RecordParts(StringRef Rec) {
      if (Rec.empty())
        return;

      size_t Len = 0;
      const char *Start = Rec.data();
      const char *Curr = Start;
      bool IsDigitPart = isDigit(Curr[0]);
      for (size_t I = 0, E = Rec.size(); I != E; ++I, ++Len) {
        bool IsDigit = isDigit(Curr[I]);
        if (IsDigit != IsDigitPart) {
          Parts.push_back(std::make_pair(IsDigitPart, StringRef(Start, Len)));
          Len = 0;
          Start = &Curr[I];
          IsDigitPart = isDigit(Curr[I]);
        }
      }
      // Push the last part.
      Parts.push_back(std::make_pair(IsDigitPart, StringRef(Start, Len)));
    }

    size_t size() { return Parts.size(); }

    std::pair<bool, StringRef> getPart(size_t i) {
      assert (i < Parts.size() && "Invalid idx!");
      return Parts[i];
    }
  };

  bool operator()(const Record *Rec1, const Record *Rec2) const {
    RecordParts LHSParts(StringRef(Rec1->getName()));
    RecordParts RHSParts(StringRef(Rec2->getName()));

    size_t LHSNumParts = LHSParts.size();
    size_t RHSNumParts = RHSParts.size();
    assert (LHSNumParts && RHSNumParts && "Expected at least one part!");

    if (LHSNumParts != RHSNumParts)
      return LHSNumParts < RHSNumParts;

    // We expect the registers to be of the form [_a-zA-Z]+([0-9]*[_a-zA-Z]*)*.
    for (size_t I = 0, E = LHSNumParts; I < E; I+=2) {
      std::pair<bool, StringRef> LHSPart = LHSParts.getPart(I);
      std::pair<bool, StringRef> RHSPart = RHSParts.getPart(I);
      // Expect even part to always be alpha.
      assert (LHSPart.first == false && RHSPart.first == false &&
              "Expected both parts to be alpha.");
      if (int Res = LHSPart.second.compare(RHSPart.second))
        return Res < 0;
    }
    for (size_t I = 1, E = LHSNumParts; I < E; I+=2) {
      std::pair<bool, StringRef> LHSPart = LHSParts.getPart(I);
      std::pair<bool, StringRef> RHSPart = RHSParts.getPart(I);
      // Expect odd part to always be numeric.
      assert (LHSPart.first == true && RHSPart.first == true &&
              "Expected both parts to be numeric.");
      if (LHSPart.second.size() != RHSPart.second.size())
        return LHSPart.second.size() < RHSPart.second.size();

      unsigned LHSVal, RHSVal;

      bool LHSFailed = LHSPart.second.getAsInteger(10, LHSVal); (void)LHSFailed;
      assert(!LHSFailed && "Unable to convert LHS to integer.");
      bool RHSFailed = RHSPart.second.getAsInteger(10, RHSVal); (void)RHSFailed;
      assert(!RHSFailed && "Unable to convert RHS to integer.");

      if (LHSVal != RHSVal)
        return LHSVal < RHSVal;
    }
    return LHSNumParts < RHSNumParts;
  }
};

raw_ostream &operator<<(raw_ostream &OS, const RecordKeeper &RK);

//===----------------------------------------------------------------------===//
//  Resolvers
//===----------------------------------------------------------------------===//

/// Interface for looking up the initializer for a variable name, used by
/// Init::resolveReferences.
class Resolver {
  Record *CurRec;
  bool IsFinal = false;

public:
  explicit Resolver(Record *CurRec) : CurRec(CurRec) {}
  virtual ~Resolver() = default;

  Record *getCurrentRecord() const { return CurRec; }

  /// Return the initializer for the given variable name (should normally be a
  /// StringInit), or nullptr if the name could not be resolved.
  virtual Init *resolve(Init *VarName) = 0;

  // Whether bits in a BitsInit should stay unresolved if resolving them would
  // result in a ? (UnsetInit). This behavior is used to represent instruction
  // encodings by keeping references to unset variables within a record.
  virtual bool keepUnsetBits() const { return false; }

  // Whether this is the final resolve step before adding a record to the
  // RecordKeeper. Error reporting during resolve and related constant folding
  // should only happen when this is true.
  bool isFinal() const { return IsFinal; }

  void setFinal(bool Final) { IsFinal = Final; }
};

/// Resolve arbitrary mappings.
class MapResolver final : public Resolver {
  struct MappedValue {
    Init *V;
    bool Resolved;

    MappedValue() : V(nullptr), Resolved(false) {}
    MappedValue(Init *V, bool Resolved) : V(V), Resolved(Resolved) {}
  };

  DenseMap<Init *, MappedValue> Map;

public:
  explicit MapResolver(Record *CurRec = nullptr) : Resolver(CurRec) {}

  void set(Init *Key, Init *Value) { Map[Key] = {Value, false}; }

  bool isComplete(Init *VarName) const {
    auto It = Map.find(VarName);
    assert(It != Map.end() && "key must be present in map");
    return It->second.V->isComplete();
  }

  Init *resolve(Init *VarName) override;
};

/// Resolve all variables from a record except for unset variables.
class RecordResolver final : public Resolver {
  DenseMap<Init *, Init *> Cache;
  SmallVector<Init *, 4> Stack;
  Init *Name = nullptr;

public:
  explicit RecordResolver(Record &R) : Resolver(&R) {}

  void setName(Init *NewName) { Name = NewName; }

  Init *resolve(Init *VarName) override;

  bool keepUnsetBits() const override { return true; }
};

/// Delegate resolving to a sub-resolver, but shadow some variable names.
class ShadowResolver final : public Resolver {
  Resolver &R;
  DenseSet<Init *> Shadowed;

public:
  explicit ShadowResolver(Resolver &R)
      : Resolver(R.getCurrentRecord()), R(R) {
    setFinal(R.isFinal());
  }

  void addShadow(Init *Key) { Shadowed.insert(Key); }

  Init *resolve(Init *VarName) override {
    if (Shadowed.count(VarName))
      return nullptr;
    return R.resolve(VarName);
  }
};

/// (Optionally) delegate resolving to a sub-resolver, and keep track whether
/// there were unresolved references.
class TrackUnresolvedResolver final : public Resolver {
  Resolver *R;
  bool FoundUnresolved = false;

public:
  explicit TrackUnresolvedResolver(Resolver *R = nullptr)
      : Resolver(R ? R->getCurrentRecord() : nullptr), R(R) {}

  bool foundUnresolved() const { return FoundUnresolved; }

  Init *resolve(Init *VarName) override;
};

/// Do not resolve anything, but keep track of whether a given variable was
/// referenced.
class HasReferenceResolver final : public Resolver {
  Init *VarNameToTrack;
  bool Found = false;

public:
  explicit HasReferenceResolver(Init *VarNameToTrack)
      : Resolver(nullptr), VarNameToTrack(VarNameToTrack) {}

  bool found() const { return Found; }

  Init *resolve(Init *VarName) override;
};

void EmitDetailedRecords(RecordKeeper &RK, raw_ostream &OS);
void EmitJSON(RecordKeeper &RK, raw_ostream &OS);

} // end namespace llvm

#endif // LLVM_TABLEGEN_RECORD_H
PKjwFZ��j��TableGen/Automaton.tdnu�[���//===- Automaton.td ----------------------------------------*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the key top-level classes needed to produce a reasonably
// generic finite-state automaton.
//
//===----------------------------------------------------------------------===//

// Define a record inheriting from GenericAutomaton to generate a reasonably
// generic finite-state automaton over a set of actions and states.
//
// This automaton is defined by:
//   1) a state space (explicit, always bits<32>).
//   2) a set of input symbols (actions, explicit) and
//   3) a transition function from state + action -> state.
//
// A theoretical automaton is defined by <Q, S, d, q0, F>:
//   Q: A set of possible states.
//   S: (sigma) The input alphabet.
//   d: (delta) The transition function f(q in Q, s in S) -> q' in Q.
//   F: The set of final (accepting) states.
//
// Because generating all possible states is tedious, we instead define the
// transition function only and crawl all reachable states starting from the
// initial state with all inputs under all transitions until termination.
//
// We define F = S, that is, all valid states are accepting.
//
// To ensure the generation of the automaton terminates, the state transitions
// are defined as a lattice (meaning every transitioned-to state is more
// specific than the transitioned-from state, for some definition of specificity).
// Concretely a transition may set one or more bits in the state that were
// previously zero to one. If any bit was not zero, the transition is invalid.
//
// Instead of defining all possible states (which would be cumbersome), the user
// provides a set of possible Transitions from state A, consuming an input
// symbol A to state B. The Transition object transforms state A to state B and
// acts as a predicate. This means the state space can be discovered by crawling
// all the possible transitions until none are valid.
//
// This automaton is considered to be nondeterministic, meaning that multiple
// transitions can occur from any (state, action) pair. The generated automaton
// is determinized, meaning that is executes in O(k) time where k is the input
// sequence length.
//
// In addition to a generated automaton that determines if a sequence of inputs
// is accepted or not, a table is emitted that allows determining a plausible
// sequence of states traversed to accept that input.
class GenericAutomaton {
  // Name of a class that inherits from Transition. All records inheriting from
  // this class will be considered when constructing the automaton.
  string TransitionClass;

  // Names of fields within TransitionClass that define the action symbol. This
  // defines the action as an N-tuple.
  //
  // Each symbol field can be of class, int, string or code type.
  //   If the type of a field is a class, the Record's name is used verbatim
  //     in C++ and the class name is used as the C++ type name.
  //   If the type of a field is a string, code or int, that is also used
  //     verbatim in C++.
  //
  // To override the C++ type name for field F, define a field called TypeOf_F.
  // This should be a string that will be used verbatim in C++.
  //
  // As an example, to define a 2-tuple with an enum and a string, one might:
  //   def MyTransition : Transition {
  //     MyEnum S1;
  //     int S2;
  //   }
  //   def MyAutomaton : GenericAutomaton }{
  //     let TransitionClass = "Transition";
  //     let SymbolFields = ["S1", "S2"];
  //     let TypeOf_S1 = "MyEnumInCxxKind";
  //   }
  list<string> SymbolFields;
}

// All transitions inherit from Transition.
class Transition {
  // A transition S' = T(S) is valid if, for every set bit in NewState, the
  // corresponding bit in S is clear. That is:
  //   def T(S):
  //     S' = S | NewState
  //     return S' if S' != S else Failure
  //
  // The automaton generator uses this property to crawl the set of possible
  // transitions from a starting state of 0b0.
  bits<32> NewState;
}
PKjwFZy+�##TableGen/SearchableTable.tdnu�[���//===- SearchableTable.td ----------------------------------*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the key top-level classes needed to produce a reasonably
// generic table that can be binary-searched. Three types of objects can be
// defined using the classes in this file:
//
// 1. (Generic) Enums. By instantiating the GenericEnum class once, an enum with
// the name of the def is generated. It is guarded by the preprocessor define
// GET_name_DECL, where name is the name of the def.
//
// 2. (Generic) Tables and search indices. By instantiating the GenericTable
// class once, a table with the name of the instantiating def is generated and
// guarded by the GET_name_IMPL preprocessor guard.
//
// Both a primary key and additional secondary keys / search indices can also
// be defined, which result in the generation of lookup functions. Their
// declarations and definitions are all guarded by GET_name_DECL and
// GET_name_IMPL, respectively, where name is the name of the underlying table.
//
// See AArch64SystemOperands.td and its generated header for example uses.
//
//===----------------------------------------------------------------------===//

// Define a record derived from this class to generate a generic enum.
//
// The name of the record is used as the type name of the C++ enum.
class GenericEnum {
  // Name of a TableGen class. The enum will have one entry for each record
  // that derives from that class.
  string FilterClass;

  // (Optional) Name of a field that is present in all collected records and
  // contains the name of enum entries.
  //
  // If NameField is not set, the record names will be used instead.
  string NameField;

  // (Optional) Name of a field that is present in all collected records and
  // contains the numerical value of enum entries.
  //
  // If ValueField is not set, enum values will be assigned automatically,
  // starting at 0, according to a lexicographical sort of the entry names.
  string ValueField;
}

// Define a record derived from this class to generate a generic table. This
// table can have a searchable primary key, and it can also be referenced by
// external search indices.
//
// The name of the record is used as the name of the global primary array of
// entries of the table in C++.
class GenericTable {
  // Name of a class. The table will have one entry for each record that
  // derives from that class.
  string FilterClass;

  // Name of the C++ struct/class type that holds table entries. The
  // declaration of this type is not generated automatically.
  string CppTypeName = FilterClass;

  // List of the names of fields of collected records that contain the data for
  // table entries, in the order that is used for initialization in C++.
  //
  // TableGen needs to know the type of the fields so that it can format
  // the initializers correctly. It can infer the type of bit, bits, string,
  // Intrinsic, and Instruction values. 
  //
  // For each field of the table named xxx, TableGen will look for a field
  // named TypeOf_xxx and use that as a more detailed description of the
  // type of the field. This is required for fields whose type
  // cannot be deduced automatically, such as enum fields. For example:
  //
  //   def MyEnum : GenericEnum {
  //     let FilterClass = "MyEnum";
  //     ...
  //   }
  //
  //   class MyTableEntry {
  //     MyEnum V;
  //     ...
  //   }
  //
  //   def MyTable : GenericTable {
  //     let FilterClass = "MyTableEntry";
  //     let Fields = ["V", ...];
  //     string TypeOf_V = "MyEnum";
  //   }
  //
  // If a string field was initialized with a code literal, TableGen will
  // emit the code verbatim. However, if a string field was initialized
  // in some other way, but should be interpreted as code, then a TypeOf_xxx
  // field is necessary, with a value of "code":
  //
  //     string TypeOf_Predicate = "code";
  list<string> Fields;

  // (Optional) List of fields that make up the primary key.
  list<string> PrimaryKey;

  // (Optional) Name of the primary key search function.
  string PrimaryKeyName;

  // See SearchIndex.EarlyOut
  bit PrimaryKeyEarlyOut = false;
}

// Define a record derived from this class to generate an additional search
// index for a generic table that has been defined earlier.
//
// The name of the record will be used as the name of the C++ lookup function.
class SearchIndex {
  // Table that this search index refers to.
  GenericTable Table;

  // List of fields that make up the key.
  list<string> Key;

  // If true, the lookup function will check the first field of the key against
  // the minimum and maximum values in the index before entering the binary
  // search. This is convenient for tables that add extended data for a subset
  // of a larger enum-based space, e.g. extended data about a subset of
  // instructions.
  //
  // Can only be used when the first field is an integral (non-string) type.
  bit EarlyOut = false;
}

// Legacy table type with integrated enum.
class SearchableTable {
  list<string> SearchableFields;
  string EnumNameField = "Name";
  string EnumValueField;
}
PKjwFZ�/T��TableGen/StringMatcher.hnu�[���//===- StringMatcher.h - Generate a matcher for input strings ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements the StringMatcher class.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TABLEGEN_STRINGMATCHER_H
#define LLVM_TABLEGEN_STRINGMATCHER_H

#include "llvm/ADT/StringRef.h"
#include <string>
#include <utility>
#include <vector>

namespace llvm {

class raw_ostream;

/// Given a list of strings and code to execute when they match, output a
/// simple switch tree to classify the input string.
///
/// If a match is found, the code in Matches[i].second is executed; control must
/// not exit this code fragment.  If nothing matches, execution falls through.
class StringMatcher {
public:
  using StringPair = std::pair<std::string, std::string>;

private:
  StringRef StrVariableName;
  const std::vector<StringPair> &Matches;
  raw_ostream &OS;

public:
  StringMatcher(StringRef strVariableName,
                const std::vector<StringPair> &matches, raw_ostream &os)
    : StrVariableName(strVariableName), Matches(matches), OS(os) {}

  void Emit(unsigned Indent = 0, bool IgnoreDuplicates = false) const;

private:
  bool EmitStringMatcherForChar(const std::vector<const StringPair *> &Matches,
                                unsigned CharNo, unsigned IndentCount,
                                bool IgnoreDuplicates) const;
};

} // end namespace llvm

#endif // LLVM_TABLEGEN_STRINGMATCHER_H
PKjwFZ�Ѣe��TableGen/Main.hnu�[���//===- llvm/TableGen/Main.h - tblgen entry point ----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the common entry point for tblgen tools.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TABLEGEN_MAIN_H
#define LLVM_TABLEGEN_MAIN_H

#include <functional>

namespace llvm {

class raw_ostream;
class RecordKeeper;

/// Perform the action using Records, and write output to OS.
/// Returns true on error, false otherwise.
using TableGenMainFn = bool (raw_ostream &OS, RecordKeeper &Records);

int TableGenMain(const char *argv0,
                 std::function<TableGenMainFn> MainFn = nullptr);

} // end namespace llvm

#endif // LLVM_TABLEGEN_MAIN_H
PKjwFZ�K��99TableGen/StringToOffsetTable.hnu�[���//===- StringToOffsetTable.h - Emit a big concatenated string ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TABLEGEN_STRINGTOOFFSETTABLE_H
#define LLVM_TABLEGEN_STRINGTOOFFSETTABLE_H

#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/Support/raw_ostream.h"
#include <cctype>

namespace llvm {

/// StringToOffsetTable - This class uniques a bunch of nul-terminated strings
/// and keeps track of their offset in a massive contiguous string allocation.
/// It can then output this string blob and use indexes into the string to
/// reference each piece.
class StringToOffsetTable {
  StringMap<unsigned> StringOffset;
  std::string AggregateString;

public:
  bool Empty() const { return StringOffset.empty(); }

  unsigned GetOrAddStringOffset(StringRef Str, bool appendZero = true) {
    auto IterBool =
        StringOffset.insert(std::make_pair(Str, AggregateString.size()));
    if (IterBool.second) {
      // Add the string to the aggregate if this is the first time found.
      AggregateString.append(Str.begin(), Str.end());
      if (appendZero)
        AggregateString += '\0';
    }

    return IterBool.first->second;
  }

  void EmitString(raw_ostream &O) {
    // Escape the string.
    SmallString<256> Str;
    raw_svector_ostream(Str).write_escaped(AggregateString);
    AggregateString = std::string(Str.str());

    O << "    \"";
    unsigned CharsPrinted = 0;
    for (unsigned i = 0, e = AggregateString.size(); i != e; ++i) {
      if (CharsPrinted > 70) {
        O << "\"\n    \"";
        CharsPrinted = 0;
      }
      O << AggregateString[i];
      ++CharsPrinted;

      // Print escape sequences all together.
      if (AggregateString[i] != '\\')
        continue;

      assert(i + 1 < AggregateString.size() && "Incomplete escape sequence!");
      if (isdigit(AggregateString[i + 1])) {
        assert(isdigit(AggregateString[i + 2]) &&
               isdigit(AggregateString[i + 3]) &&
               "Expected 3 digit octal escape!");
        O << AggregateString[++i];
        O << AggregateString[++i];
        O << AggregateString[++i];
        CharsPrinted += 3;
      } else {
        O << AggregateString[++i];
        ++CharsPrinted;
      }
    }
    O << "\"";
  }

  /// Emit the string using character literals. MSVC has a limitation that
  /// string literals cannot be longer than 64K.
  void EmitCharArray(raw_ostream &O) {
    assert(AggregateString.find(')') == std::string::npos &&
           "can't emit raw string with closing parens");
    int Count = 0;
    O << ' ';
    for (char C : AggregateString) {
      O << " \'";
      O.write_escaped(StringRef(&C, 1));
      O << "\',";
      Count++;
      if (Count > 14) {
        O << "\n ";
        Count = 0;
      }
    }
    O << '\n';
  }
};

} // end namespace llvm

#endif
PKjwFZ#��=��TableGen/Parser.hnu�[���//===- llvm/TableGen/Parser.h - tblgen parser entry point -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares an entry point into the tablegen parser for use by tools.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TABLEGEN_PARSER_H
#define LLVM_TABLEGEN_PARSER_H

#include "llvm/ADT/STLExtras.h"
#include <string>
#include <vector>

namespace llvm {
class RecordKeeper;
class SourceMgr;

/// Parse the TableGen file defined within the main buffer of the given
/// SourceMgr. On success, populates the provided RecordKeeper with the parsed
/// records and returns false. On failure, returns true.
///
/// NOTE: TableGen currently relies on global state within a given parser
///       invocation, so this function is not thread-safe.
bool TableGenParseFile(SourceMgr &InputSrcMgr, RecordKeeper &Records);

} // end namespace llvm

#endif // LLVM_TABLEGEN_PARSER_H
PKjwFZ���00TableGen/TableGenBackend.hnu�[���//===- llvm/TableGen/TableGenBackend.h - Backend utilities ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Useful utilities for TableGen backends.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TABLEGEN_TABLEGENBACKEND_H
#define LLVM_TABLEGEN_TABLEGENBACKEND_H

#include "llvm/ADT/StringRef.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ManagedStatic.h"

namespace llvm {

class RecordKeeper;
class raw_ostream;

namespace TableGen::Emitter {
using FnT = void (*)(RecordKeeper &Records, raw_ostream &OS);

struct OptCreatorT {
  static void *call();
};

extern ManagedStatic<cl::opt<FnT>, OptCreatorT> Action;

struct Opt {
  Opt(StringRef Name, FnT CB, StringRef Desc, bool ByDefault = false) {
    if (ByDefault)
      Action->setInitialValue(CB);
    Action->getParser().addLiteralOption(Name, CB, Desc);
  }
};

template <class EmitterC> class OptClass : Opt {
  static void run(RecordKeeper &RK, raw_ostream &OS) { EmitterC(RK).run(OS); }

public:
  OptClass(StringRef Name, StringRef Desc) : Opt(Name, run, Desc) {}
};

} // namespace TableGen::Emitter

/// emitSourceFileHeader - Output an LLVM style file header to the specified
/// raw_ostream.
void emitSourceFileHeader(StringRef Desc, raw_ostream &OS);

} // End llvm namespace

#endif
PKjwFZӋ�ͽ�TableGen/DirectiveEmitter.hnu�[���#ifndef LLVM_TABLEGEN_DIRECTIVEEMITTER_H
#define LLVM_TABLEGEN_DIRECTIVEEMITTER_H

#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/TableGen/Record.h"
#include <algorithm>
#include <string>
#include <vector>

namespace llvm {

// Wrapper class that contains DirectiveLanguage's information defined in
// DirectiveBase.td and provides helper methods for accessing it.
class DirectiveLanguage {
public:
  explicit DirectiveLanguage(const llvm::RecordKeeper &Records)
      : Records(Records) {
    const auto &DirectiveLanguages = getDirectiveLanguages();
    Def = DirectiveLanguages[0];
  }

  StringRef getName() const { return Def->getValueAsString("name"); }

  StringRef getCppNamespace() const {
    return Def->getValueAsString("cppNamespace");
  }

  StringRef getDirectivePrefix() const {
    return Def->getValueAsString("directivePrefix");
  }

  StringRef getClausePrefix() const {
    return Def->getValueAsString("clausePrefix");
  }

  StringRef getClauseEnumSetClass() const {
    return Def->getValueAsString("clauseEnumSetClass");
  }

  StringRef getFlangClauseBaseClass() const {
    return Def->getValueAsString("flangClauseBaseClass");
  }

  bool hasMakeEnumAvailableInNamespace() const {
    return Def->getValueAsBit("makeEnumAvailableInNamespace");
  }

  bool hasEnableBitmaskEnumInNamespace() const {
    return Def->getValueAsBit("enableBitmaskEnumInNamespace");
  }

  std::vector<Record *> getDirectives() const {
    return Records.getAllDerivedDefinitions("Directive");
  }

  std::vector<Record *> getClauses() const {
    return Records.getAllDerivedDefinitions("Clause");
  }

  bool HasValidityErrors() const;

private:
  const llvm::Record *Def;
  const llvm::RecordKeeper &Records;

  std::vector<Record *> getDirectiveLanguages() const {
    return Records.getAllDerivedDefinitions("DirectiveLanguage");
  }
};

// Base record class used for Directive and Clause class defined in
// DirectiveBase.td.
class BaseRecord {
public:
  explicit BaseRecord(const llvm::Record *Def) : Def(Def) {}

  StringRef getName() const { return Def->getValueAsString("name"); }

  StringRef getAlternativeName() const {
    return Def->getValueAsString("alternativeName");
  }

  // Returns the name of the directive formatted for output. Whitespace are
  // replaced with underscores.
  std::string getFormattedName() {
    StringRef Name = Def->getValueAsString("name");
    std::string N = Name.str();
    std::replace(N.begin(), N.end(), ' ', '_');
    return N;
  }

  bool isDefault() const { return Def->getValueAsBit("isDefault"); }

  // Returns the record name.
  StringRef getRecordName() const { return Def->getName(); }

protected:
  const llvm::Record *Def;
};

// Wrapper class that contains a Directive's information defined in
// DirectiveBase.td and provides helper methods for accessing it.
class Directive : public BaseRecord {
public:
  explicit Directive(const llvm::Record *Def) : BaseRecord(Def) {}

  std::vector<Record *> getAllowedClauses() const {
    return Def->getValueAsListOfDefs("allowedClauses");
  }

  std::vector<Record *> getAllowedOnceClauses() const {
    return Def->getValueAsListOfDefs("allowedOnceClauses");
  }

  std::vector<Record *> getAllowedExclusiveClauses() const {
    return Def->getValueAsListOfDefs("allowedExclusiveClauses");
  }

  std::vector<Record *> getRequiredClauses() const {
    return Def->getValueAsListOfDefs("requiredClauses");
  }
};

// Wrapper class that contains Clause's information defined in DirectiveBase.td
// and provides helper methods for accessing it.
class Clause : public BaseRecord {
public:
  explicit Clause(const llvm::Record *Def) : BaseRecord(Def) {}

  // Optional field.
  StringRef getClangClass() const {
    return Def->getValueAsString("clangClass");
  }

  // Optional field.
  StringRef getFlangClass() const {
    return Def->getValueAsString("flangClass");
  }

  // Get the formatted name for Flang parser class. The generic formatted class
  // name is constructed from the name were the first letter of each word is
  // captitalized and the underscores are removed.
  // ex: async -> Async
  //     num_threads -> NumThreads
  std::string getFormattedParserClassName() {
    StringRef Name = Def->getValueAsString("name");
    std::string N = Name.str();
    bool Cap = true;
    std::transform(N.begin(), N.end(), N.begin(), [&Cap](unsigned char C) {
      if (Cap == true) {
        C = llvm::toUpper(C);
        Cap = false;
      } else if (C == '_') {
        Cap = true;
      }
      return C;
    });
    llvm::erase_value(N, '_');
    return N;
  }

  // Optional field.
  StringRef getEnumName() const {
    return Def->getValueAsString("enumClauseValue");
  }

  std::vector<Record *> getClauseVals() const {
    return Def->getValueAsListOfDefs("allowedClauseValues");
  }

  bool isValueOptional() const { return Def->getValueAsBit("isValueOptional"); }

  bool isValueList() const { return Def->getValueAsBit("isValueList"); }

  StringRef getDefaultValue() const {
    return Def->getValueAsString("defaultValue");
  }

  bool isImplicit() const { return Def->getValueAsBit("isImplicit"); }

  std::vector<StringRef> getAliases() const {
    return Def->getValueAsListOfStrings("aliases");
  }

  StringRef getPrefix() const { return Def->getValueAsString("prefix"); }

  bool isPrefixOptional() const {
    return Def->getValueAsBit("isPrefixOptional");
  }
};

// Wrapper class that contains VersionedClause's information defined in
// DirectiveBase.td and provides helper methods for accessing it.
class VersionedClause {
public:
  explicit VersionedClause(const llvm::Record *Def) : Def(Def) {}

  // Return the specific clause record wrapped in the Clause class.
  Clause getClause() const { return Clause{Def->getValueAsDef("clause")}; }

  int64_t getMinVersion() const { return Def->getValueAsInt("minVersion"); }

  int64_t getMaxVersion() const { return Def->getValueAsInt("maxVersion"); }

private:
  const llvm::Record *Def;
};

class ClauseVal : public BaseRecord {
public:
  explicit ClauseVal(const llvm::Record *Def) : BaseRecord(Def) {}

  int getValue() const { return Def->getValueAsInt("value"); }

  bool isUserVisible() const { return Def->getValueAsBit("isUserValue"); }
};

} // namespace llvm

#endif
PKjwFZ�`C�TableGen/SetTheory.hnu�[���//===- SetTheory.h - Generate ordered sets from DAG expressions -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements the SetTheory class that computes ordered sets of
// Records from DAG expressions.  Operators for standard set operations are
// predefined, and it is possible to add special purpose set operators as well.
//
// The user may define named sets as Records of predefined classes. Set
// expanders can be added to a SetTheory instance to teach it how to find the
// elements of such a named set.
//
// These are the predefined operators. The argument lists can be individual
// elements (defs), other sets (defs of expandable classes), lists, or DAG
// expressions that are evaluated recursively.
//
// - (add S1, S2 ...) Union sets. This is also how sets are created from element
//   lists.
//
// - (sub S1, S2, ...) Set difference. Every element in S1 except for the
//   elements in S2, ...
//
// - (and S1, S2) Set intersection. Every element in S1 that is also in S2.
//
// - (shl S, N) Shift left. Remove the first N elements from S.
//
// - (trunc S, N) Truncate. The first N elements of S.
//
// - (rotl S, N) Rotate left. Same as (add (shl S, N), (trunc S, N)).
//
// - (rotr S, N) Rotate right.
//
// - (decimate S, N) Decimate S by picking every N'th element, starting with
//   the first one. For instance, (decimate S, 2) returns the even elements of
//   S.
//
// - (sequence "Format", From, To, [Stride]) Generate a sequence of defs with
//   printf. For instance, (sequence "R%u", 0, 3) -> [ R0, R1, R2, R3 ] and
//   (sequence "R%u", 20, 30, 5) -> [ R20, R25, R30 ].
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TABLEGEN_SETTHEORY_H
#define LLVM_TABLEGEN_SETTHEORY_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/SMLoc.h"
#include <map>
#include <memory>
#include <vector>

namespace llvm {

class DagInit;
class Init;
class Record;

class SetTheory {
public:
  using RecVec = std::vector<Record *>;
  using RecSet = SmallSetVector<Record *, 16>;

  /// Operator - A callback representing a DAG operator.
  class Operator {
    virtual void anchor();

  public:
    virtual ~Operator() = default;

    /// apply - Apply this operator to Expr's arguments and insert the result
    /// in Elts.
    virtual void apply(SetTheory&, DagInit *Expr, RecSet &Elts,
                       ArrayRef<SMLoc> Loc) = 0;
  };

  /// Expander - A callback function that can transform a Record representing a
  /// set into a fully expanded list of elements. Expanders provide a way for
  /// users to define named sets that can be used in DAG expressions.
  class Expander {
    virtual void anchor();

  public:
    virtual ~Expander() = default;

    virtual void expand(SetTheory&, Record*, RecSet &Elts) = 0;
  };

private:
  // Map set defs to their fully expanded contents. This serves as a memoization
  // cache and it makes it possible to return const references on queries.
  using ExpandMap = std::map<Record *, RecVec>;
  ExpandMap Expansions;

  // Known DAG operators by name.
  StringMap<std::unique_ptr<Operator>> Operators;

  // Typed expanders by class name.
  StringMap<std::unique_ptr<Expander>> Expanders;

public:
  /// Create a SetTheory instance with only the standard operators.
  SetTheory();

  /// addExpander - Add an expander for Records with the named super class.
  void addExpander(StringRef ClassName, std::unique_ptr<Expander>);

  /// addFieldExpander - Add an expander for ClassName that simply evaluates
  /// FieldName in the Record to get the set elements.  That is all that is
  /// needed for a class like:
  ///
  ///   class Set<dag d> {
  ///     dag Elts = d;
  ///   }
  ///
  void addFieldExpander(StringRef ClassName, StringRef FieldName);

  /// addOperator - Add a DAG operator.
  void addOperator(StringRef Name, std::unique_ptr<Operator>);

  /// evaluate - Evaluate Expr and append the resulting set to Elts.
  void evaluate(Init *Expr, RecSet &Elts, ArrayRef<SMLoc> Loc);

  /// evaluate - Evaluate a sequence of Inits and append to Elts.
  template<typename Iter>
  void evaluate(Iter begin, Iter end, RecSet &Elts, ArrayRef<SMLoc> Loc) {
    while (begin != end)
      evaluate(*begin++, Elts, Loc);
  }

  /// expand - Expand a record into a set of elements if possible.  Return a
  /// pointer to the expanded elements, or NULL if Set cannot be expanded
  /// further.
  const RecVec *expand(Record *Set);
};

} // end namespace llvm

#endif // LLVM_TABLEGEN_SETTHEORY_H
PKjwFZʰ^��TableGen/Error.hnu�[���//===- llvm/TableGen/Error.h - tblgen error handling helpers ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains error handling helper routines to pretty-print diagnostic
// messages from tblgen.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_TABLEGEN_ERROR_H
#define LLVM_TABLEGEN_ERROR_H

#include "llvm/Support/SourceMgr.h"
#include "llvm/TableGen/Record.h"

namespace llvm {

void PrintNote(const Twine &Msg);
void PrintNote(ArrayRef<SMLoc> NoteLoc, const Twine &Msg);

[[noreturn]] void PrintFatalNote(const Twine &Msg);
[[noreturn]] void PrintFatalNote(ArrayRef<SMLoc> ErrorLoc, const Twine &Msg);
[[noreturn]] void PrintFatalNote(const Record *Rec, const Twine &Msg);
[[noreturn]] void PrintFatalNote(const RecordVal *RecVal, const Twine &Msg);

void PrintWarning(const Twine &Msg);
void PrintWarning(ArrayRef<SMLoc> WarningLoc, const Twine &Msg);
void PrintWarning(const char *Loc, const Twine &Msg);

void PrintError(const Twine &Msg);
void PrintError(ArrayRef<SMLoc> ErrorLoc, const Twine &Msg);
void PrintError(const char *Loc, const Twine &Msg);
void PrintError(const Record *Rec, const Twine &Msg);
void PrintError(const RecordVal *RecVal, const Twine &Msg);

[[noreturn]] void PrintFatalError(const Twine &Msg);
[[noreturn]] void PrintFatalError(ArrayRef<SMLoc> ErrorLoc, const Twine &Msg);
[[noreturn]] void PrintFatalError(const Record *Rec, const Twine &Msg);
[[noreturn]] void PrintFatalError(const RecordVal *RecVal, const Twine &Msg);

void CheckAssert(SMLoc Loc, Init *Condition, Init *Message);

extern SourceMgr SrcMgr;
extern unsigned ErrorsPrinted;

} // end namespace llvm

#endif
PKjwFZ݀g��"�"!DWARFLinkerParallel/DWARFLinker.hnu�[���//===- DWARFLinker.h --------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DWARFLINKERPARALLEL_DWARFLINKER_H
#define LLVM_DWARFLINKERPARALLEL_DWARFLINKER_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/CodeGen/AsmPrinter.h"
#include "llvm/DWARFLinkerParallel/DWARFFile.h"
#include "llvm/DebugInfo/DWARF/DWARFContext.h"
#include "llvm/DebugInfo/DWARF/DWARFDie.h"
#include "llvm/MC/MCDwarf.h"
#include "llvm/TargetParser/Triple.h"

/// ------------------------------------------------------------------
/// The core of the Dwarf linking logic.
///
/// The generation of the dwarf information from the object files will be
/// driven by the selection of 'root DIEs', which are DIEs that
/// describe variables or functions that resolves to the corresponding
/// code section(and thus have entries in the Addresses map). All the debug
/// information that will be generated(the DIEs, but also the line
/// tables, ranges, ...) is derived from that set of root DIEs.
///
/// The root DIEs are identified because they contain relocations that
/// points to code section(the low_pc for a function, the location for
/// a variable). These relocations are gathered as a very first step
/// when we start processing a object file by AddressesMap.
///
/// The overall linking process looks like this:
///
/// parrallel_for_each(ObjectFile) {
///   for_each (Compile Unit) {
///     1. Load Clang modules.
///   }
///
///   parrallel_for_each(Compile Unit) {
///     1. Load input DWARF for Compile Unit.
///     2. Report warnings for Clang modules.
///     3. Analyze live DIEs and type names(if ODR deduplication is requested).
///     4. Clone DIEs(Generate output DIEs and resulting DWARF tables).
///        The result is in an OutDebugInfoBytes, which is an ELF file
///        containing DWARF tables corresponding to the current compile unit.
///     5. Cleanup Input and Output DIEs.
///   }
///
///   Deallocate loaded Object file.
/// }
///
/// if (ODR deduplication is requested)
///   Generate an artificial compilation unit ("Type Table": used to partially
///   generate DIEs at the clone stage).
///
/// for_each (ObjectFile) {
///   for_each (Compile Unit) {
///     1. Set offsets to Compile Units DWARF tables.
///     2. Sort offsets/attributes/patches to have a predictable result.
///     3. Patch size/offsets fields.
///     4. Generate index tables.
///     5. Move DWARF tables of compile units into the resulting file.
///   }
/// }
///
/// Every compile unit is processed separately, visited only once
/// (except case inter-CU references exist), and used data is freed
/// after the compile unit is processed. The resulting file is glued together
/// from the generated debug tables which correspond to separate compile units.
///
/// Handling inter-CU references: inter-CU references are hard to process
/// using only one pass. f.e. if CU1 references CU100 and CU100 references
/// CU1, we could not finish handling of CU1 until we finished CU100.
/// Thus we either need to load all CUs into the memory, either load CUs several
/// times. This implementation loads inter-connected CU into memory at the first
/// pass and processes them at the second pass.
///
/// ODR deduplication: Artificial compilation unit will be constructed to keep
/// type dies. All types are moved into that compilation unit. Type's references
/// are patched so that they point to the corresponding types from artificial
/// compilation unit. All partial type definitions would be merged into single
/// type definition.
///

namespace llvm {
namespace dwarflinker_parallel {

/// ExtraDwarfEmitter allows adding extra data to the DWARFLinker output.
/// The finish() method should be called after all extra data are emitted.
class ExtraDwarfEmitter {
public:
  virtual ~ExtraDwarfEmitter() = default;

  /// Dump the file to the disk.
  virtual void finish() = 0;

  /// Emit section named SecName with data SecData.
  virtual void emitSectionContents(StringRef SecData, StringRef SecName) = 0;

  /// Emit temporarily symbol named \p SymName inside section \p SecName.
  virtual MCSymbol *emitTempSym(StringRef SecName, StringRef SymName) = 0;

  /// Emit the swift_ast section stored in \p Buffer.
  virtual void emitSwiftAST(StringRef Buffer) = 0;

  /// Emit the swift reflection section stored in \p Buffer.
  virtual void emitSwiftReflectionSection(
      llvm::binaryformat::Swift5ReflectionSectionKind ReflSectionKind,
      StringRef Buffer, uint32_t Alignment, uint32_t Size) = 0;

  /// Returns underlying AsmPrinter.
  virtual AsmPrinter &getAsmPrinter() const = 0;
};

class DWARFLinker {
public:
  /// Type of output file.
  enum class OutputFileType {
    Object,
    Assembly,
  };

  /// The kind of accelerator tables we should emit.
  enum class AccelTableKind : uint8_t {
    Apple,     ///< .apple_names, .apple_namespaces, .apple_types, .apple_objc.
    Pub,       ///< .debug_pubnames, .debug_pubtypes
    DebugNames ///< .debug_names.
  };

  using MessageHandlerTy = std::function<void(
      const Twine &Warning, StringRef Context, const DWARFDie *DIE)>;
  using ObjFileLoaderTy = std::function<ErrorOr<DWARFFile &>(
      StringRef ContainerName, StringRef Path)>;
  using InputVerificationHandlerTy = std::function<void(const DWARFFile &File)>;
  using ObjectPrefixMapTy = std::map<std::string, std::string>;
  using CompileUnitHandlerTy = function_ref<void(const DWARFUnit &Unit)>;
  using TranslatorFuncTy = std::function<StringRef(StringRef)>;
  using SwiftInterfacesMapTy = std::map<std::string, std::string>;

  virtual ~DWARFLinker() = default;

  /// Creates dwarf linker instance.
  static std::unique_ptr<DWARFLinker>
  createLinker(MessageHandlerTy ErrorHandler, MessageHandlerTy WarningHandler,
               TranslatorFuncTy StringsTranslator = nullptr);

  /// Creates emitter for output dwarf.
  virtual Error createEmitter(const Triple &TheTriple, OutputFileType FileType,
                              raw_pwrite_stream &OutFile) = 0;

  /// Returns previously created dwarf emitter. May be nullptr.
  virtual ExtraDwarfEmitter *getEmitter() = 0;

  /// Add object file to be linked. Pre-load compile unit die. Call
  /// \p OnCUDieLoaded for each compile unit die. If specified \p File
  /// has reference to the Clang module then such module would be
  /// pre-loaded by \p Loader for !Update case.
  ///
  /// \pre NoODR, Update options should be set before call to addObjectFile.
  virtual void addObjectFile(
      DWARFFile &File, ObjFileLoaderTy Loader = nullptr,
      CompileUnitHandlerTy OnCUDieLoaded = [](const DWARFUnit &) {}) = 0;

  /// Link debug info for added files.
  virtual Error link() = 0;

  /// \defgroup Methods setting various linking options:
  ///
  /// @{

  /// Allows to generate log of linking process to the standard output.
  virtual void setVerbosity(bool Verbose) = 0;

  /// Print statistics to standard output.
  virtual void setStatistics(bool Statistics) = 0;

  /// Verify the input DWARF.
  virtual void setVerifyInputDWARF(bool Verify) = 0;

  /// Do not unique types according to ODR.
  virtual void setNoODR(bool NoODR) = 0;

  /// Update index tables only(do not modify rest of DWARF).
  virtual void setUpdateIndexTablesOnly(bool UpdateIndexTablesOnly) = 0;

  /// Allow generating valid, but non-deterministic output.
  virtual void
  setAllowNonDeterministicOutput(bool AllowNonDeterministicOutput) = 0;

  /// Set to keep the enclosing function for a static variable.
  virtual void setKeepFunctionForStatic(bool KeepFunctionForStatic) = 0;

  /// Use specified number of threads for parallel files linking.
  virtual void setNumThreads(unsigned NumThreads) = 0;

  /// Add kind of accelerator tables to be generated.
  virtual void addAccelTableKind(AccelTableKind Kind) = 0;

  /// Set prepend path for clang modules.
  virtual void setPrependPath(const std::string &Ppath) = 0;

  /// Set estimated objects files amount, for preliminary data allocation.
  virtual void setEstimatedObjfilesAmount(unsigned ObjFilesNum) = 0;

  /// Set verification handler which would be used to report verification
  /// errors.
  virtual void
  setInputVerificationHandler(InputVerificationHandlerTy Handler) = 0;

  /// Set map for Swift interfaces.
  virtual void setSwiftInterfacesMap(SwiftInterfacesMapTy *Map) = 0;

  /// Set prefix map for objects.
  virtual void setObjectPrefixMap(ObjectPrefixMapTy *Map) = 0;

  /// Set target DWARF version.
  virtual Error setTargetDWARFVersion(uint16_t TargetDWARFVersion) = 0;
  /// @}
};

} // end namespace dwarflinker_parallel
} // end namespace llvm

#endif // LLVM_DWARFLINKERPARALLEL_DWARFLINKER_H
PKjwFZ��

 DWARFLinkerParallel/StringPool.hnu�[���//===- StringPool.h ---------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DWARFLINKERPARALLEL_STRINGPOOL_H
#define LLVM_DWARFLINKERPARALLEL_STRINGPOOL_H

#include "llvm/ADT/ConcurrentHashtable.h"
#include "llvm/CodeGen/DwarfStringPoolEntry.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/PerThreadBumpPtrAllocator.h"
#include <string>
#include <string_view>

namespace llvm {
namespace dwarflinker_parallel {

/// StringEntry keeps data of the string: the length, external offset
/// and a string body which is placed right after StringEntry.
using StringEntry = StringMapEntry<DwarfStringPoolEntry *>;

class StringPoolEntryInfo {
public:
  /// \returns Hash value for the specified \p Key.
  static inline uint64_t getHashValue(const StringRef &Key) {
    return xxh3_64bits(Key);
  }

  /// \returns true if both \p LHS and \p RHS are equal.
  static inline bool isEqual(const StringRef &LHS, const StringRef &RHS) {
    return LHS == RHS;
  }

  /// \returns key for the specified \p KeyData.
  static inline StringRef getKey(const StringEntry &KeyData) {
    return KeyData.getKey();
  }

  /// \returns newly created object of KeyDataTy type.
  static inline StringEntry *
  create(const StringRef &Key, parallel::PerThreadBumpPtrAllocator &Allocator) {
    return StringEntry::create(Key, Allocator);
  }
};

class StringPool
    : public ConcurrentHashTableByPtr<StringRef, StringEntry,
                                      parallel::PerThreadBumpPtrAllocator,
                                      StringPoolEntryInfo> {
public:
  StringPool()
      : ConcurrentHashTableByPtr<StringRef, StringEntry,
                                 parallel::PerThreadBumpPtrAllocator,
                                 StringPoolEntryInfo>(Allocator) {}

  StringPool(size_t InitialSize)
      : ConcurrentHashTableByPtr<StringRef, StringEntry,
                                 parallel::PerThreadBumpPtrAllocator,
                                 StringPoolEntryInfo>(Allocator, InitialSize) {}

  parallel::PerThreadBumpPtrAllocator &getAllocatorRef() { return Allocator; }

private:
  parallel::PerThreadBumpPtrAllocator Allocator;
};

} // end of namespace dwarflinker_parallel
} // end namespace llvm

#endif // LLVM_DWARFLINKERPARALLEL_STRINGPOOL_H
PKjwFZ�r��		DWARFLinkerParallel/DWARFFile.hnu�[���//===- DWARFFile.h ----------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DWARFLINKERPARALLEL_DWARFFILE_H
#define LLVM_DWARFLINKERPARALLEL_DWARFFILE_H

#include "llvm/ADT/StringRef.h"
#include "llvm/DWARFLinkerParallel/AddressesMap.h"
#include "llvm/DebugInfo/DWARF/DWARFContext.h"
#include "llvm/Support/Endian.h"
#include <functional>
#include <memory>

namespace llvm {
namespace dwarflinker_parallel {

/// This class represents DWARF information for source file
/// and it's address map.
///
/// May be used asynchroniously for reading.
class DWARFFile {
public:
  using UnloadCallbackTy = std::function<void(StringRef FileName)>;

  DWARFFile(StringRef Name, std::unique_ptr<DWARFContext> Dwarf,
            std::unique_ptr<AddressesMap> Addresses,
            const std::vector<std::string> &Warnings,
            UnloadCallbackTy UnloadFunc = nullptr)
      : FileName(Name), Dwarf(std::move(Dwarf)),
        Addresses(std::move(Addresses)), Warnings(Warnings),
        UnloadFunc(UnloadFunc) {
    if (this->Dwarf)
      Endianess = this->Dwarf->isLittleEndian() ? support::endianness::little
                                                : support::endianness::big;
  }

  /// Object file name.
  StringRef FileName;

  /// Source DWARF information.
  std::unique_ptr<DWARFContext> Dwarf;

  /// Helpful address information(list of valid address ranges, relocations).
  std::unique_ptr<AddressesMap> Addresses;

  /// Warnings for object file.
  const std::vector<std::string> &Warnings;

  /// Endiannes of source DWARF information.
  support::endianness Endianess = support::endianness::little;

  /// Callback to the module keeping object file to unload.
  UnloadCallbackTy UnloadFunc;

  /// Unloads object file and corresponding AddressesMap and Dwarf Context.
  void unload() {
    Addresses.reset();
    Dwarf.reset();

    if (UnloadFunc)
      UnloadFunc(FileName);
  }
};

} // end namespace dwarflinker_parallel
} // end namespace llvm

#endif // LLVM_DWARFLINKERPARALLEL_DWARFFILE_H
PKjwFZ�3'>>!DWARFLinkerParallel/StringTable.hnu�[���//===- StringTable.h --------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DWARFLINKERPARALLEL_STRINGTABLE_H
#define LLVM_DWARFLINKERPARALLEL_STRINGTABLE_H

#include "llvm/ADT/SmallVector.h"
#include "llvm/DWARFLinkerParallel/StringPool.h"

namespace llvm {
namespace dwarflinker_parallel {

using StringsVector = SmallVector<StringEntry *>;

/// This class prepares strings for emission into .debug_str table:
/// translates string if necessary, assigns index and offset, keeps in order.
class StringTable {
public:
  StringTable(StringPool &Strings,
              std::function<StringRef(StringRef)> StringsTranslator)
      : Strings(Strings), StringsTranslator(StringsTranslator) {}
  ~StringTable() {}

  /// Add string to the vector of strings which should be emitted.
  /// Translate input string if neccessary, assign index and offset.
  /// \returns updated string entry.
  StringEntry *add(StringEntry *String) {
    // Translate string if necessary.
    if (StringsTranslator)
      String = Strings.insert(StringsTranslator(String->first())).first;

    // Store String for emission and assign index and offset.
    if (String->getValue() == nullptr) {
      DwarfStringPoolEntry *NewEntry =
          Strings.getAllocatorRef().Allocate<DwarfStringPoolEntry>();

      NewEntry->Symbol = nullptr;
      NewEntry->Index = StringEntriesForEmission.size();

      if (StringEntriesForEmission.empty())
        NewEntry->Offset = 0;
      else {
        StringEntry *PrevString = StringEntriesForEmission.back();
        NewEntry->Offset =
            PrevString->getValue()->Offset + PrevString->getKeyLength() + 1;
      }

      String->getValue() = NewEntry;
      StringEntriesForEmission.push_back(String);
    }

    return String;
  }

  /// Erase contents of StringsForEmission.
  void clear() { StringEntriesForEmission.clear(); }

  /// Enumerate all strings in sequential order and call \p Handler for each
  /// string.
  void forEach(function_ref<void(DwarfStringPoolEntryRef)> Handler) const {
    for (const StringEntry *Entry : StringEntriesForEmission)
      Handler(*Entry);
  }

  std::function<StringRef(StringRef)> getTranslator() {
    return StringsTranslator;
  }

protected:
  /// List of strings for emission.
  StringsVector StringEntriesForEmission;

  /// String pool for the translated strings.
  StringPool &Strings;

  /// Translator for the strings.
  std::function<StringRef(StringRef)> StringsTranslator;
};

} // end of namespace dwarflinker_parallel
} // end namespace llvm

#endif // LLVM_DWARFLINKERPARALLEL_STRINGTABLE_H
PKjwFZ�}3"DWARFLinkerParallel/AddressesMap.hnu�[���//===- AddressesMap.h -------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_DWARFLINKERPARALLEL_ADDRESSESMAP_H
#define LLVM_DWARFLINKERPARALLEL_ADDRESSESMAP_H

#include "llvm/ADT/AddressRanges.h"
#include "llvm/DebugInfo/DWARF/DWARFDie.h"
#include "llvm/DebugInfo/DWARF/DWARFExpression.h"
#include <cstdint>

namespace llvm {
namespace dwarflinker_parallel {

/// Mapped value in the address map is the offset to apply to the
/// linked address.
using RangesTy = AddressRangesMap;

/// AddressesMap represents information about valid addresses used
/// by debug information. Valid addresses are those which points to
/// live code sections. i.e. relocations for these addresses point
/// into sections which would be/are placed into resulting binary.
class AddressesMap {
public:
  virtual ~AddressesMap() = default;

  /// Checks that there are valid relocations in the .debug_info
  /// section.
  virtual bool hasValidRelocs() = 0;

  /// Checks that the specified DWARF expression operand \p Op references live
  /// code section and returns the relocation adjustment value (to get the
  /// linked address this value might be added to the source expression operand
  /// address).
  /// \returns relocation adjustment value or std::nullopt if there is no
  /// corresponding live address.
  virtual std::optional<int64_t>
  getExprOpAddressRelocAdjustment(DWARFUnit &U,
                                  const DWARFExpression::Operation &Op,
                                  uint64_t StartOffset, uint64_t EndOffset) = 0;

  /// Checks that the specified subprogram \p DIE references the live code
  /// section and returns the relocation adjustment value (to get the linked
  /// address this value might be added to the source subprogram address).
  /// Allowed kinds of input DIE: DW_TAG_subprogram, DW_TAG_label.
  /// \returns relocation adjustment value or std::nullopt if there is no
  /// corresponding live address.
  virtual std::optional<int64_t>
  getSubprogramRelocAdjustment(const DWARFDie &DIE) = 0;

  /// Apply the valid relocations to the buffer \p Data, taking into
  /// account that Data is at \p BaseOffset in the .debug_info section.
  ///
  /// \returns true whether any reloc has been applied.
  virtual bool applyValidRelocs(MutableArrayRef<char> Data, uint64_t BaseOffset,
                                bool IsLittleEndian) = 0;

  /// Erases all data.
  virtual void clear() = 0;
};

} // end of namespace dwarflinker_parallel
} // end namespace llvm

#endif // LLVM_DWARFLINKERPARALLEL_ADDRESSESMAP_H
PKjwFZw�{�HDHDXRay/Graph.hnu�[���//===-- Graph.h - XRay Graph Class ------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// A Graph Datatype for XRay.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_XRAY_GRAPH_H
#define LLVM_XRAY_GRAPH_H

#include <initializer_list>
#include <stdint.h>
#include <type_traits>
#include <utility>

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/iterator.h"
#include "llvm/Support/Error.h"

namespace llvm {
namespace xray {

/// A Graph object represents a Directed Graph and is used in XRay to compute
/// and store function call graphs and associated statistical information.
///
/// The graph takes in four template parameters, these are:
///  - VertexAttribute, this is a structure which is stored for each vertex.
///    Must be DefaultConstructible, CopyConstructible, CopyAssignable and
///    Destructible.
///  - EdgeAttribute, this is a structure which is stored for each edge
///    Must be DefaultConstructible, CopyConstructible, CopyAssignable and
///    Destructible.
///  - EdgeAttribute, this is a structure which is stored for each variable
///  - VI, this is a type over which DenseMapInfo is defined and is the type
///    used look up strings, available as VertexIdentifier.
///  - If the built in DenseMapInfo is not defined, provide a specialization
///    class type here.
///
/// Graph is CopyConstructible, CopyAssignable, MoveConstructible and
/// MoveAssignable but is not EqualityComparible or LessThanComparible.
///
/// Usage Example Graph with weighted edges and vertices:
///   Graph<int, int, int> G;
///
///   G[1] = 0;
///   G[2] = 2;
///   G[{1,2}] = 1;
///   G[{2,1}] = -1;
///   for(const auto &v : G.vertices()){
///     // Do something with the vertices in the graph;
///   }
///   for(const auto &e : G.edges()){
///     // Do something with the edges in the graph;
///   }
///
/// Usage Example with StrRef keys.
///   Graph<int, double, StrRef> StrG;
///    char va[] = "Vertex A";
///    char vaa[] = "Vertex A";
///    char vb[] = "Vertex B"; // Vertices are referenced by String Refs.
///    G[va] = 0;
///    G[vb] = 1;
///    G[{va, vb}] = 1.0;
///    cout() << G[vaa] << " " << G[{vaa, vb}]; //prints "0 1.0".
///
template <typename VertexAttribute, typename EdgeAttribute,
          typename VI = int32_t>
class Graph {
public:
  /// These objects are used to name edges and vertices in the graph.
  typedef VI VertexIdentifier;
  typedef std::pair<VI, VI> EdgeIdentifier;

  /// This type is the value_type of all iterators which range over vertices,
  /// Determined by the Vertices DenseMap
  using VertexValueType =
      detail::DenseMapPair<VertexIdentifier, VertexAttribute>;

  /// This type is the value_type of all iterators which range over edges,
  /// Determined by the Edges DenseMap.
  using EdgeValueType = detail::DenseMapPair<EdgeIdentifier, EdgeAttribute>;

  using size_type = std::size_t;

private:
  /// The type used for storing the EdgeAttribute for each edge in the graph
  using EdgeMapT = DenseMap<EdgeIdentifier, EdgeAttribute>;

  /// The type used for storing the VertexAttribute for each vertex in
  /// the graph.
  using VertexMapT = DenseMap<VertexIdentifier, VertexAttribute>;

  /// The type used for storing the edges entering a vertex. Indexed by
  /// the VertexIdentifier of the start of the edge. Only used to determine
  /// where the incoming edges are, the EdgeIdentifiers are stored in an
  /// InnerEdgeMapT.
  using NeighborSetT = DenseSet<VertexIdentifier>;

  /// The type storing the InnerInvGraphT corresponding to each vertex in
  /// the graph (When a vertex has an incoming edge incident to it)
  using NeighborLookupT = DenseMap<VertexIdentifier, NeighborSetT>;

private:
  /// Stores the map from the start and end vertex of an edge to it's
  /// EdgeAttribute
  EdgeMapT Edges;

  /// Stores the map from VertexIdentifier to VertexAttribute
  VertexMapT Vertices;

  /// Allows fast lookup for the incoming edge set of any given vertex.
  NeighborLookupT InNeighbors;

  /// Allows fast lookup for the outgoing edge set of any given vertex.
  NeighborLookupT OutNeighbors;

  /// An Iterator adapter using an InnerInvGraphT::iterator as a base iterator,
  /// and storing the VertexIdentifier the iterator range comes from. The
  /// dereference operator is then performed using a pointer to the graph's edge
  /// set.
  template <bool IsConst, bool IsOut,
            typename BaseIt = typename NeighborSetT::const_iterator,
            typename T =
                std::conditional_t<IsConst, const EdgeValueType, EdgeValueType>>
  class NeighborEdgeIteratorT
      : public iterator_adaptor_base<
            NeighborEdgeIteratorT<IsConst, IsOut>, BaseIt,
            typename std::iterator_traits<BaseIt>::iterator_category, T> {
    using InternalEdgeMapT =
        std::conditional_t<IsConst, const EdgeMapT, EdgeMapT>;

    friend class NeighborEdgeIteratorT<false, IsOut, BaseIt, EdgeValueType>;
    friend class NeighborEdgeIteratorT<true, IsOut, BaseIt,
                                       const EdgeValueType>;

    InternalEdgeMapT *MP;
    VertexIdentifier SI;

  public:
    template <bool IsConstDest,
              typename = std::enable_if<IsConstDest && !IsConst>>
    operator NeighborEdgeIteratorT<IsConstDest, IsOut, BaseIt,
                                   const EdgeValueType>() const {
      return NeighborEdgeIteratorT<IsConstDest, IsOut, BaseIt,
                                   const EdgeValueType>(this->I, MP, SI);
    }

    NeighborEdgeIteratorT() = default;
    NeighborEdgeIteratorT(BaseIt _I, InternalEdgeMapT *_MP,
                          VertexIdentifier _SI)
        : iterator_adaptor_base<
              NeighborEdgeIteratorT<IsConst, IsOut>, BaseIt,
              typename std::iterator_traits<BaseIt>::iterator_category, T>(_I),
          MP(_MP), SI(_SI) {}

    T &operator*() const {
      if (!IsOut)
        return *(MP->find({*(this->I), SI}));
      else
        return *(MP->find({SI, *(this->I)}));
    }
  };

public:
  /// A const iterator type for iterating through the set of edges entering a
  /// vertex.
  ///
  /// Has a const EdgeValueType as its value_type
  using ConstInEdgeIterator = NeighborEdgeIteratorT<true, false>;

  /// An iterator type for iterating through the set of edges leaving a vertex.
  ///
  /// Has an EdgeValueType as its value_type
  using InEdgeIterator = NeighborEdgeIteratorT<false, false>;

  /// A const iterator type for iterating through the set of edges entering a
  /// vertex.
  ///
  /// Has a const EdgeValueType as its value_type
  using ConstOutEdgeIterator = NeighborEdgeIteratorT<true, true>;

  /// An iterator type for iterating through the set of edges leaving a vertex.
  ///
  /// Has an EdgeValueType as its value_type
  using OutEdgeIterator = NeighborEdgeIteratorT<false, true>;

  /// A class for ranging over the incoming edges incident to a vertex.
  ///
  /// Like all views in this class it provides methods to get the beginning and
  /// past the range iterators for the range, as well as methods to determine
  /// the number of elements in the range and whether the range is empty.
  template <bool isConst, bool isOut> class InOutEdgeView {
  public:
    using iterator = NeighborEdgeIteratorT<isConst, isOut>;
    using const_iterator = NeighborEdgeIteratorT<true, isOut>;
    using GraphT = std::conditional_t<isConst, const Graph, Graph>;
    using InternalEdgeMapT =
        std::conditional_t<isConst, const EdgeMapT, EdgeMapT>;

  private:
    InternalEdgeMapT &M;
    const VertexIdentifier A;
    const NeighborLookupT &NL;

  public:
    iterator begin() {
      auto It = NL.find(A);
      if (It == NL.end())
        return iterator();
      return iterator(It->second.begin(), &M, A);
    }

    const_iterator cbegin() const {
      auto It = NL.find(A);
      if (It == NL.end())
        return const_iterator();
      return const_iterator(It->second.begin(), &M, A);
    }

    const_iterator begin() const { return cbegin(); }

    iterator end() {
      auto It = NL.find(A);
      if (It == NL.end())
        return iterator();
      return iterator(It->second.end(), &M, A);
    }
    const_iterator cend() const {
      auto It = NL.find(A);
      if (It == NL.end())
        return const_iterator();
      return const_iterator(It->second.end(), &M, A);
    }

    const_iterator end() const { return cend(); }

    size_type size() const {
      auto I = NL.find(A);
      if (I == NL.end())
        return 0;
      else
        return I->second.size();
    }

    bool empty() const { return NL.count(A) == 0; };

    InOutEdgeView(GraphT &G, VertexIdentifier A)
        : M(G.Edges), A(A), NL(isOut ? G.OutNeighbors : G.InNeighbors) {}
  };

  /// A const iterator type for iterating through the whole vertex set of the
  /// graph.
  ///
  /// Has a const VertexValueType as its value_type
  using ConstVertexIterator = typename VertexMapT::const_iterator;

  /// An iterator type for iterating through the whole vertex set of the graph.
  ///
  /// Has a VertexValueType as its value_type
  using VertexIterator = typename VertexMapT::iterator;

  /// A class for ranging over the vertices in the graph.
  ///
  /// Like all views in this class it provides methods to get the beginning and
  /// past the range iterators for the range, as well as methods to determine
  /// the number of elements in the range and whether the range is empty.
  template <bool isConst> class VertexView {
  public:
    using iterator =
        std::conditional_t<isConst, ConstVertexIterator, VertexIterator>;
    using const_iterator = ConstVertexIterator;
    using GraphT = std::conditional_t<isConst, const Graph, Graph>;

  private:
    GraphT &G;

  public:
    iterator begin() { return G.Vertices.begin(); }
    iterator end() { return G.Vertices.end(); }
    const_iterator cbegin() const { return G.Vertices.cbegin(); }
    const_iterator cend() const { return G.Vertices.cend(); }
    const_iterator begin() const { return G.Vertices.begin(); }
    const_iterator end() const { return G.Vertices.end(); }
    size_type size() const { return G.Vertices.size(); }
    bool empty() const { return G.Vertices.empty(); }
    VertexView(GraphT &_G) : G(_G) {}
  };

  /// A const iterator for iterating through the entire edge set of the graph.
  ///
  /// Has a const EdgeValueType as its value_type
  using ConstEdgeIterator = typename EdgeMapT::const_iterator;

  /// An iterator for iterating through the entire edge set of the graph.
  ///
  /// Has an EdgeValueType as its value_type
  using EdgeIterator = typename EdgeMapT::iterator;

  /// A class for ranging over all the edges in the graph.
  ///
  /// Like all views in this class it provides methods to get the beginning and
  /// past the range iterators for the range, as well as methods to determine
  /// the number of elements in the range and whether the range is empty.
  template <bool isConst> class EdgeView {
  public:
    using iterator =
        std::conditional_t<isConst, ConstEdgeIterator, EdgeIterator>;
    using const_iterator = ConstEdgeIterator;
    using GraphT = std::conditional_t<isConst, const Graph, Graph>;

  private:
    GraphT &G;

  public:
    iterator begin() { return G.Edges.begin(); }
    iterator end() { return G.Edges.end(); }
    const_iterator cbegin() const { return G.Edges.cbegin(); }
    const_iterator cend() const { return G.Edges.cend(); }
    const_iterator begin() const { return G.Edges.begin(); }
    const_iterator end() const { return G.Edges.end(); }
    size_type size() const { return G.Edges.size(); }
    bool empty() const { return G.Edges.empty(); }
    EdgeView(GraphT &_G) : G(_G) {}
  };

public:
  // TODO: implement constructor to enable Graph Initialisation.\
  // Something like:
  //   Graph<int, int, int> G(
  //   {1, 2, 3, 4, 5},
  //   {{1, 2}, {2, 3}, {3, 4}});

  /// Empty the Graph
  void clear() {
    Edges.clear();
    Vertices.clear();
    InNeighbors.clear();
    OutNeighbors.clear();
  }

  /// Returns a view object allowing iteration over the vertices of the graph.
  /// also allows access to the size of the vertex set.
  VertexView<false> vertices() { return VertexView<false>(*this); }

  VertexView<true> vertices() const { return VertexView<true>(*this); }

  /// Returns a view object allowing iteration over the edges of the graph.
  /// also allows access to the size of the edge set.
  EdgeView<false> edges() { return EdgeView<false>(*this); }

  EdgeView<true> edges() const { return EdgeView<true>(*this); }

  /// Returns a view object allowing iteration over the edges which start at
  /// a vertex I.
  InOutEdgeView<false, true> outEdges(const VertexIdentifier I) {
    return InOutEdgeView<false, true>(*this, I);
  }

  InOutEdgeView<true, true> outEdges(const VertexIdentifier I) const {
    return InOutEdgeView<true, true>(*this, I);
  }

  /// Returns a view object allowing iteration over the edges which point to
  /// a vertex I.
  InOutEdgeView<false, false> inEdges(const VertexIdentifier I) {
    return InOutEdgeView<false, false>(*this, I);
  }

  InOutEdgeView<true, false> inEdges(const VertexIdentifier I) const {
    return InOutEdgeView<true, false>(*this, I);
  }

  /// Looks up the vertex with identifier I, if it does not exist it default
  /// constructs it.
  VertexAttribute &operator[](const VertexIdentifier &I) {
    return Vertices.FindAndConstruct(I).second;
  }

  /// Looks up the edge with identifier I, if it does not exist it default
  /// constructs it, if it's endpoints do not exist it also default constructs
  /// them.
  EdgeAttribute &operator[](const EdgeIdentifier &I) {
    auto &P = Edges.FindAndConstruct(I);
    Vertices.FindAndConstruct(I.first);
    Vertices.FindAndConstruct(I.second);
    InNeighbors[I.second].insert(I.first);
    OutNeighbors[I.first].insert(I.second);
    return P.second;
  }

  /// Looks up a vertex with Identifier I, or an error if it does not exist.
  Expected<VertexAttribute &> at(const VertexIdentifier &I) {
    auto It = Vertices.find(I);
    if (It == Vertices.end())
      return make_error<StringError>(
          "Vertex Identifier Does Not Exist",
          std::make_error_code(std::errc::invalid_argument));
    return It->second;
  }

  Expected<const VertexAttribute &> at(const VertexIdentifier &I) const {
    auto It = Vertices.find(I);
    if (It == Vertices.end())
      return make_error<StringError>(
          "Vertex Identifier Does Not Exist",
          std::make_error_code(std::errc::invalid_argument));
    return It->second;
  }

  /// Looks up an edge with Identifier I, or an error if it does not exist.
  Expected<EdgeAttribute &> at(const EdgeIdentifier &I) {
    auto It = Edges.find(I);
    if (It == Edges.end())
      return make_error<StringError>(
          "Edge Identifier Does Not Exist",
          std::make_error_code(std::errc::invalid_argument));
    return It->second;
  }

  Expected<const EdgeAttribute &> at(const EdgeIdentifier &I) const {
    auto It = Edges.find(I);
    if (It == Edges.end())
      return make_error<StringError>(
          "Edge Identifier Does Not Exist",
          std::make_error_code(std::errc::invalid_argument));
    return It->second;
  }

  /// Looks for a vertex with identifier I, returns 1 if one exists, and
  /// 0 otherwise
  size_type count(const VertexIdentifier &I) const {
    return Vertices.count(I);
  }

  /// Looks for an edge with Identifier I, returns 1 if one exists and 0
  /// otherwise
  size_type count(const EdgeIdentifier &I) const { return Edges.count(I); }

  /// Inserts a vertex into the graph with Identifier Val.first, and
  /// Attribute Val.second.
  std::pair<VertexIterator, bool>
  insert(const std::pair<VertexIdentifier, VertexAttribute> &Val) {
    return Vertices.insert(Val);
  }

  std::pair<VertexIterator, bool>
  insert(std::pair<VertexIdentifier, VertexAttribute> &&Val) {
    return Vertices.insert(std::move(Val));
  }

  /// Inserts an edge into the graph with Identifier Val.first, and
  /// Attribute Val.second. If the key is already in the map, it returns false
  /// and doesn't update the value.
  std::pair<EdgeIterator, bool>
  insert(const std::pair<EdgeIdentifier, EdgeAttribute> &Val) {
    const auto &p = Edges.insert(Val);
    if (p.second) {
      const auto &EI = Val.first;
      Vertices.FindAndConstruct(EI.first);
      Vertices.FindAndConstruct(EI.second);
      InNeighbors[EI.second].insert(EI.first);
      OutNeighbors[EI.first].insert(EI.second);
    };

    return p;
  }

  /// Inserts an edge into the graph with Identifier Val.first, and
  /// Attribute Val.second. If the key is already in the map, it returns false
  /// and doesn't update the value.
  std::pair<EdgeIterator, bool>
  insert(std::pair<EdgeIdentifier, EdgeAttribute> &&Val) {
    auto EI = Val.first;
    const auto &p = Edges.insert(std::move(Val));
    if (p.second) {
      Vertices.FindAndConstruct(EI.first);
      Vertices.FindAndConstruct(EI.second);
      InNeighbors[EI.second].insert(EI.first);
      OutNeighbors[EI.first].insert(EI.second);
    };

    return p;
  }
};
}
}
#endif
PKjwFZy��XRay/Profile.hnu�[���//===- Profile.h - XRay Profile Abstraction -------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Defines the XRay Profile class representing the latency profile generated by
// XRay's profiling mode.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_XRAY_PROFILE_H
#define LLVM_XRAY_PROFILE_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Error.h"
#include <list>
#include <utility>
#include <vector>

namespace llvm {
namespace xray {

class Profile;

// We forward declare the Trace type for turning a Trace into a Profile.
class Trace;

/// This function will attempt to load an XRay Profiling Mode profile from the
/// provided |Filename|.
///
/// For any errors encountered in the loading of the profile data from
/// |Filename|, this function will return an Error condition appropriately.
Expected<Profile> loadProfile(StringRef Filename);

/// This algorithm will merge two Profile instances into a single Profile
/// instance, aggregating blocks by Thread ID.
Profile mergeProfilesByThread(const Profile &L, const Profile &R);

/// This algorithm will merge two Profile instances into a single Profile
/// instance, aggregating blocks by function call stack.
Profile mergeProfilesByStack(const Profile &L, const Profile &R);

/// This function takes a Trace and creates a Profile instance from it.
Expected<Profile> profileFromTrace(const Trace &T);

/// Profile instances are thread-compatible.
class Profile {
public:
  using ThreadID = uint64_t;
  using PathID = unsigned;
  using FuncID = int32_t;

  struct Data {
    uint64_t CallCount;
    uint64_t CumulativeLocalTime;
  };

  struct Block {
    ThreadID Thread;
    std::vector<std::pair<PathID, Data>> PathData;
  };

  /// Provides a sequence of function IDs from a previously interned PathID.
  ///
  /// Returns an error if |P| had not been interned before into the Profile.
  ///
  Expected<std::vector<FuncID>> expandPath(PathID P) const;

  /// The stack represented in |P| must be in stack order (leaf to root). This
  /// will always return the same PathID for |P| that has the same sequence.
  PathID internPath(ArrayRef<FuncID> P);

  /// Appends a fully-formed Block instance into the Profile.
  ///
  /// Returns an error condition in the following cases:
  ///
  ///    - The PathData component of the Block is empty
  ///
  Error addBlock(Block &&B);

  Profile() = default;
  ~Profile() = default;

  Profile(Profile &&O) noexcept
      : Blocks(std::move(O.Blocks)), NodeStorage(std::move(O.NodeStorage)),
        Roots(std::move(O.Roots)), PathIDMap(std::move(O.PathIDMap)),
        NextID(O.NextID) {}

  Profile &operator=(Profile &&O) noexcept {
    Blocks = std::move(O.Blocks);
    NodeStorage = std::move(O.NodeStorage);
    Roots = std::move(O.Roots);
    PathIDMap = std::move(O.PathIDMap);
    NextID = O.NextID;
    return *this;
  }

  Profile(const Profile &);
  Profile &operator=(const Profile &);

  friend void swap(Profile &L, Profile &R) {
    using std::swap;
    swap(L.Blocks, R.Blocks);
    swap(L.NodeStorage, R.NodeStorage);
    swap(L.Roots, R.Roots);
    swap(L.PathIDMap, R.PathIDMap);
    swap(L.NextID, R.NextID);
  }

private:
  using BlockList = std::list<Block>;

  struct TrieNode {
    FuncID Func = 0;
    std::vector<TrieNode *> Callees{};
    TrieNode *Caller = nullptr;
    PathID ID = 0;
  };

  // List of blocks associated with a Profile.
  BlockList Blocks;

  // List of TrieNode elements we've seen.
  std::list<TrieNode> NodeStorage;

  // List of call stack roots.
  SmallVector<TrieNode *, 4> Roots;

  // Reverse mapping between a PathID to a TrieNode*.
  DenseMap<PathID, TrieNode *> PathIDMap;

  // Used to identify paths.
  PathID NextID = 1;

public:
  using const_iterator = BlockList::const_iterator;
  const_iterator begin() const { return Blocks.begin(); }
  const_iterator end() const { return Blocks.end(); }
  bool empty() const { return Blocks.empty(); }
};

} // namespace xray
} // namespace llvm

#endif
PKjwFZ`�;�TTXRay/FDRRecordProducer.hnu�[���//===- FDRRecordProducer.h - XRay FDR Mode Record Producer ----------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_XRAY_FDRRECORDPRODUCER_H
#define LLVM_XRAY_FDRRECORDPRODUCER_H

#include "llvm/Support/Error.h"
#include "llvm/XRay/FDRRecords.h"
#include "llvm/XRay/XRayRecord.h"
#include <memory>

namespace llvm {
namespace xray {

class RecordProducer {
public:
  /// All producer implementations must yield either an Error or a non-nullptr
  /// unique_ptr<Record>.
  virtual Expected<std::unique_ptr<Record>> produce() = 0;
  virtual ~RecordProducer() = default;
};

class FileBasedRecordProducer : public RecordProducer {
  const XRayFileHeader &Header;
  DataExtractor &E;
  uint64_t &OffsetPtr;
  uint32_t CurrentBufferBytes = 0;

  // Helper function which gets the next record by speculatively reading through
  // the log, finding a buffer extents record.
  Expected<std::unique_ptr<Record>> findNextBufferExtent();

public:
  FileBasedRecordProducer(const XRayFileHeader &FH, DataExtractor &DE,
                          uint64_t &OP)
      : Header(FH), E(DE), OffsetPtr(OP) {}

  /// This producer encapsulates the logic for loading a File-backed
  /// RecordProducer hidden behind a DataExtractor.
  Expected<std::unique_ptr<Record>> produce() override;
};

} // namespace xray
} // namespace llvm

#endif // LLVM_XRAY_FDRRECORDPRODUCER_H
PKjwFZ>�2�
�
XRay/Trace.hnu�[���//===- Trace.h - XRay Trace Abstraction -----------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Defines the XRay Trace class representing records in an XRay trace file.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_XRAY_TRACE_H
#define LLVM_XRAY_TRACE_H

#include <cstdint>
#include <vector>

#include "llvm/ADT/StringRef.h"
#include "llvm/Support/DataExtractor.h"
#include "llvm/Support/Error.h"
#include "llvm/XRay/XRayRecord.h"

namespace llvm {
namespace xray {

/// A Trace object represents the records that have been loaded from XRay
/// log files generated by instrumented binaries. We encapsulate the logic of
/// reading the traces in factory functions that populate the Trace object
/// appropriately.
///
/// Trace objects provide an accessor to an XRayFileHeader which says more about
/// details of the file from which the XRay trace was loaded from.
///
/// Usage:
///
///   if (auto TraceOrErr = loadTraceFile("xray-log.something.xray")) {
///     auto& T = *TraceOrErr;
///     // T.getFileHeader() will provide information from the trace header.
///     for (const XRayRecord &R : T) {
///       // ... do something with R here.
///     }
///   } else {
///     // Handle the error here.
///   }
///
class Trace {
  XRayFileHeader FileHeader;
  using RecordVector = std::vector<XRayRecord>;
  RecordVector Records;

  typedef std::vector<XRayRecord>::const_iterator citerator;

  friend Expected<Trace> loadTrace(const DataExtractor &, bool);

public:
  using size_type = RecordVector::size_type;
  using value_type = RecordVector::value_type;
  using const_iterator = RecordVector::const_iterator;

  /// Provides access to the loaded XRay trace file header.
  const XRayFileHeader &getFileHeader() const { return FileHeader; }

  const_iterator begin() const { return Records.begin(); }
  const_iterator end() const { return Records.end(); }
  bool empty() const { return Records.empty(); }
  size_type size() const { return Records.size(); }
};

/// This function will attempt to load XRay trace records from the provided
/// |Filename|.
Expected<Trace> loadTraceFile(StringRef Filename, bool Sort = false);

/// This function will attempt to load XRay trace records from the provided
/// DataExtractor.
Expected<Trace> loadTrace(const DataExtractor &Extractor, bool Sort = false);

} // namespace xray
} // namespace llvm

#endif // LLVM_XRAY_TRACE_H
PKjwFZm�?��XRay/InstrumentationMap.hnu�[���//===- InstrumentationMap.h - XRay Instrumentation Map ----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Defines the interface for extracting the instrumentation map from an
// XRay-instrumented binary.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_XRAY_INSTRUMENTATIONMAP_H
#define LLVM_XRAY_INSTRUMENTATIONMAP_H

#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/YAMLTraits.h"
#include <cstdint>
#include <optional>
#include <unordered_map>
#include <vector>

namespace llvm {

namespace xray {

// Forward declare to make a friend.
class InstrumentationMap;

/// Loads the instrumentation map from |Filename|. This auto-deduces the type of
/// the instrumentation map.
Expected<InstrumentationMap> loadInstrumentationMap(StringRef Filename);

/// Represents an XRay instrumentation sled entry from an object file.
struct SledEntry {
  /// Each entry here represents the kinds of supported instrumentation map
  /// entries.
  enum class FunctionKinds { ENTRY, EXIT, TAIL, LOG_ARGS_ENTER, CUSTOM_EVENT };

  /// The address of the sled.
  uint64_t Address;

  /// The address of the function.
  uint64_t Function;

  /// The kind of sled.
  FunctionKinds Kind;

  /// Whether the sled was annotated to always be instrumented.
  bool AlwaysInstrument;

  unsigned char Version;
};

struct YAMLXRaySledEntry {
  int32_t FuncId;
  yaml::Hex64 Address;
  yaml::Hex64 Function;
  SledEntry::FunctionKinds Kind;
  bool AlwaysInstrument;
  std::string FunctionName;
  unsigned char Version;
};

/// The InstrumentationMap represents the computed function id's and indicated
/// function addresses from an object file (or a YAML file). This provides an
/// interface to just the mapping between the function id, and the function
/// address.
///
/// We also provide raw access to the actual instrumentation map entries we find
/// associated with a particular object file.
///
class InstrumentationMap {
public:
  using FunctionAddressMap = std::unordered_map<int32_t, uint64_t>;
  using FunctionAddressReverseMap = std::unordered_map<uint64_t, int32_t>;
  using SledContainer = std::vector<SledEntry>;

private:
  SledContainer Sleds;
  FunctionAddressMap FunctionAddresses;
  FunctionAddressReverseMap FunctionIds;

  friend Expected<InstrumentationMap> loadInstrumentationMap(StringRef);

public:
  /// Provides a raw accessor to the unordered map of function addresses.
  const FunctionAddressMap &getFunctionAddresses() { return FunctionAddresses; }

  /// Returns an XRay computed function id, provided a function address.
  std::optional<int32_t> getFunctionId(uint64_t Addr) const;

  /// Returns the function address for a function id.
  std::optional<uint64_t> getFunctionAddr(int32_t FuncId) const;

  /// Provide read-only access to the entries of the instrumentation map.
  const SledContainer &sleds() const { return Sleds; };
};

} // end namespace xray

namespace yaml {

template <> struct ScalarEnumerationTraits<xray::SledEntry::FunctionKinds> {
  static void enumeration(IO &IO, xray::SledEntry::FunctionKinds &Kind) {
    IO.enumCase(Kind, "function-enter", xray::SledEntry::FunctionKinds::ENTRY);
    IO.enumCase(Kind, "function-exit", xray::SledEntry::FunctionKinds::EXIT);
    IO.enumCase(Kind, "tail-exit", xray::SledEntry::FunctionKinds::TAIL);
    IO.enumCase(Kind, "log-args-enter",
                xray::SledEntry::FunctionKinds::LOG_ARGS_ENTER);
    IO.enumCase(Kind, "custom-event",
                xray::SledEntry::FunctionKinds::CUSTOM_EVENT);
  }
};

template <> struct MappingTraits<xray::YAMLXRaySledEntry> {
  static void mapping(IO &IO, xray::YAMLXRaySledEntry &Entry) {
    IO.mapRequired("id", Entry.FuncId);
    IO.mapRequired("address", Entry.Address);
    IO.mapRequired("function", Entry.Function);
    IO.mapRequired("kind", Entry.Kind);
    IO.mapRequired("always-instrument", Entry.AlwaysInstrument);
    IO.mapOptional("function-name", Entry.FunctionName);
    IO.mapOptional("version", Entry.Version, 0);
  }

  static constexpr bool flow = true;
};

} // end namespace yaml

} // end namespace llvm

LLVM_YAML_IS_SEQUENCE_VECTOR(xray::YAMLXRaySledEntry)

#endif // LLVM_XRAY_INSTRUMENTATIONMAP_H
PKjwFZ�³��XRay/BlockIndexer.hnu�[���//===- BlockIndexer.h - FDR Block Indexing Visitor ------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// An implementation of the RecordVisitor which generates a mapping between a
// thread and a range of records representing a block.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_XRAY_BLOCKINDEXER_H
#define LLVM_XRAY_BLOCKINDEXER_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/XRay/FDRRecords.h"
#include <cstdint>
#include <vector>

namespace llvm {
namespace xray {

// The BlockIndexer will gather all related records associated with a
// process+thread and group them by 'Block'.
class BlockIndexer : public RecordVisitor {
public:
  struct Block {
    uint64_t ProcessID;
    int32_t ThreadID;
    WallclockRecord *WallclockTime;
    std::vector<Record *> Records;
  };

  // This maps the process + thread combination to a sequence of blocks.
  using Index = DenseMap<std::pair<uint64_t, int32_t>, std::vector<Block>>;

private:
  Index &Indices;

  Block CurrentBlock{0, 0, nullptr, {}};

public:
  explicit BlockIndexer(Index &I) : Indices(I) {}

  Error visit(BufferExtents &) override;
  Error visit(WallclockRecord &) override;
  Error visit(NewCPUIDRecord &) override;
  Error visit(TSCWrapRecord &) override;
  Error visit(CustomEventRecord &) override;
  Error visit(CallArgRecord &) override;
  Error visit(PIDRecord &) override;
  Error visit(NewBufferRecord &) override;
  Error visit(EndBufferRecord &) override;
  Error visit(FunctionRecord &) override;
  Error visit(CustomEventRecordV5 &) override;
  Error visit(TypedEventRecord &) override;

  /// The flush() function will clear out the current state of the visitor, to
  /// allow for explicitly flushing a block's records to the currently
  /// recognized thread and process combination.
  Error flush();
};

} // namespace xray
} // namespace llvm

#endif // LLVM_XRAY_BLOCKINDEXER_H
PKjwFZ0�����XRay/BlockPrinter.hnu�[���//===- BlockPrinter.h - FDR Block Pretty Printer -------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// An implementation of the RecordVisitor which formats a block of records for
// easier human consumption.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_XRAY_BLOCKPRINTER_H
#define LLVM_XRAY_BLOCKPRINTER_H

#include "llvm/Support/raw_ostream.h"
#include "llvm/XRay/FDRRecords.h"
#include "llvm/XRay/RecordPrinter.h"

namespace llvm {
namespace xray {

class BlockPrinter : public RecordVisitor {
  enum class State {
    Start,
    Preamble,
    Metadata,
    Function,
    Arg,
    CustomEvent,
    End,
  };

  raw_ostream &OS;
  RecordPrinter &RP;
  State CurrentState = State::Start;

public:
  explicit BlockPrinter(raw_ostream &O, RecordPrinter &P) : OS(O), RP(P) {}

  Error visit(BufferExtents &) override;
  Error visit(WallclockRecord &) override;
  Error visit(NewCPUIDRecord &) override;
  Error visit(TSCWrapRecord &) override;
  Error visit(CustomEventRecord &) override;
  Error visit(CallArgRecord &) override;
  Error visit(PIDRecord &) override;
  Error visit(NewBufferRecord &) override;
  Error visit(EndBufferRecord &) override;
  Error visit(FunctionRecord &) override;
  Error visit(CustomEventRecordV5 &) override;
  Error visit(TypedEventRecord &) override;

  void reset() { CurrentState = State::Start; }
};

} // namespace xray
} // namespace llvm

#endif // LLVM_XRAY_BLOCKPRINTER_H
PKjwFZx�DQQXRay/RecordPrinter.hnu�[���//===- RecordPrinter.h - FDR Record Printer -------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// An implementation of the RecordVisitor which prints an individual record's
// data in an adhoc format, suitable for human inspection.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_XRAY_RECORDPRINTER_H
#define LLVM_XRAY_RECORDPRINTER_H

#include "llvm/Support/raw_ostream.h"
#include "llvm/XRay/FDRRecords.h"

namespace llvm {
namespace xray {

class RecordPrinter : public RecordVisitor {
  raw_ostream &OS;
  std::string Delim;

public:
  explicit RecordPrinter(raw_ostream &O, std::string D)
      : OS(O), Delim(std::move(D)) {}

  explicit RecordPrinter(raw_ostream &O) : RecordPrinter(O, ""){};

  Error visit(BufferExtents &) override;
  Error visit(WallclockRecord &) override;
  Error visit(NewCPUIDRecord &) override;
  Error visit(TSCWrapRecord &) override;
  Error visit(CustomEventRecord &) override;
  Error visit(CallArgRecord &) override;
  Error visit(PIDRecord &) override;
  Error visit(NewBufferRecord &) override;
  Error visit(EndBufferRecord &) override;
  Error visit(FunctionRecord &) override;
  Error visit(CustomEventRecordV5 &) override;
  Error visit(TypedEventRecord &) override;
};

} // namespace xray
} // namespace llvm

#endif // LLVM_XRAY_RECORDPRINTER_H
PKjwFZ��z%%XRay/FDRRecordConsumer.hnu�[���//===- FDRRecordConsumer.h - XRay Flight Data Recorder Mode Records -------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_XRAY_FDRRECORDCONSUMER_H
#define LLVM_XRAY_FDRRECORDCONSUMER_H

#include "llvm/Support/Error.h"
#include "llvm/XRay/FDRRecords.h"
#include <algorithm>
#include <memory>
#include <vector>

namespace llvm {
namespace xray {

class RecordConsumer {
public:
  virtual Error consume(std::unique_ptr<Record> R) = 0;
  virtual ~RecordConsumer() = default;
};

// This consumer will collect all the records into a vector of records, in
// arrival order.
class LogBuilderConsumer : public RecordConsumer {
  std::vector<std::unique_ptr<Record>> &Records;

public:
  explicit LogBuilderConsumer(std::vector<std::unique_ptr<Record>> &R)
      : Records(R) {}

  Error consume(std::unique_ptr<Record> R) override;
};

// A PipelineConsumer applies a set of visitors to every consumed Record, in the
// order by which the visitors are added to the pipeline in the order of
// appearance.
class PipelineConsumer : public RecordConsumer {
  std::vector<RecordVisitor *> Visitors;

public:
  PipelineConsumer(std::initializer_list<RecordVisitor *> V) : Visitors(V) {}

  Error consume(std::unique_ptr<Record> R) override;
};

} // namespace xray
} // namespace llvm

#endif // LLVM_XRAY_FDRRECORDCONSUMER_H
PKjwFZ�
�
XRay/XRayRecord.hnu�[���//===- XRayRecord.h - XRay Trace Record -----------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file replicates the record definition for XRay log entries. This should
// follow the evolution of the log record versions supported in the compiler-rt
// xray project.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_XRAY_XRAYRECORD_H
#define LLVM_XRAY_XRAYRECORD_H

#include <cstdint>
#include <vector>
#include <string>

namespace llvm {
namespace xray {

/// XRay traces all have a header providing some top-matter information useful
/// to help tools determine how to interpret the information available in the
/// trace.
struct XRayFileHeader {
  /// Version of the XRay implementation that produced this file.
  uint16_t Version = 0;

  /// A numeric identifier for the type of file this is. Best used in
  /// combination with Version.
  uint16_t Type = 0;

  /// Whether the CPU that produced the timestamp counters (TSC) move at a
  /// constant rate.
  bool ConstantTSC = false;

  /// Whether the CPU that produced the timestamp counters (TSC) do not stop.
  bool NonstopTSC = false;

  /// The number of cycles per second for the CPU that produced the timestamp
  /// counter (TSC) values. Useful for estimating the amount of time that
  /// elapsed between two TSCs on some platforms.
  uint64_t CycleFrequency = 0;

  // This is different depending on the type of xray record. The naive format
  // stores a Wallclock timespec. FDR logging stores the size of a thread
  // buffer.
  char FreeFormData[16] = {};
};

/// Determines the supported types of records that could be seen in XRay traces.
/// This may or may not correspond to actual record types in the raw trace (as
/// the loader implementation may synthesize this information in the process of
/// of loading).
enum class RecordTypes {
  ENTER,
  EXIT,
  TAIL_EXIT,
  ENTER_ARG,
  CUSTOM_EVENT,
  TYPED_EVENT
};

/// An XRayRecord is the denormalized view of data associated in a trace. These
/// records may not correspond to actual entries in the raw traces, but they are
/// the logical representation of records in a higher-level event log.
struct XRayRecord {
  /// RecordType values are used as "sub-types" which have meaning in the
  /// context of the `Type` below. For function call and custom event records,
  /// the RecordType is always 0, while for typed events we store the type in
  /// the RecordType field.
  uint16_t RecordType;

  /// The CPU where the thread is running. We assume number of CPUs <= 65536.
  uint16_t CPU;

  /// Identifies the type of record.
  RecordTypes Type;

  /// The function ID for the record, if this is a function call record.
  int32_t FuncId;

  /// Get the full 8 bytes of the TSC when we get the log record.
  uint64_t TSC;

  /// The thread ID for the currently running thread.
  uint32_t TId;

  /// The process ID for the currently running process.
  uint32_t PId;

  /// The function call arguments.
  std::vector<uint64_t> CallArgs;

  /// For custom and typed events, we provide the raw data from the trace.
  std::string Data;
};

} // namespace xray
} // namespace llvm

#endif // LLVM_XRAY_XRAYRECORD_H
PKjwFZV,*�\\XRay/FileHeaderReader.hnu�[���//===- FileHeaderReader.h - XRay Trace File Header Reading Function -------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares functions that can load an XRay log header from various
// sources.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_XRAY_FILEHEADERREADER_H
#define LLVM_XRAY_FILEHEADERREADER_H

#include "llvm/Support/DataExtractor.h"
#include "llvm/Support/Error.h"
#include "llvm/XRay/XRayRecord.h"
#include <cstdint>

namespace llvm {
namespace xray {

/// Convenience function for loading the file header given a data extractor at a
/// specified offset.
Expected<XRayFileHeader> readBinaryFormatHeader(DataExtractor &HeaderExtractor,
                                                uint64_t &OffsetPtr);

} // namespace xray
} // namespace llvm

#endif // LLVM_XRAY_FILEHEADERREADER_H
PKjwFZ�`�XRay/FDRTraceWriter.hnu�[���//===- FDRTraceWriter.h - XRay FDR Trace Writer -----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Test a utility that can write out XRay FDR Mode formatted trace files.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_XRAY_FDRTRACEWRITER_H
#define LLVM_XRAY_FDRTRACEWRITER_H

#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/EndianStream.h"
#include "llvm/XRay/FDRRecords.h"
#include "llvm/XRay/XRayRecord.h"

namespace llvm {
namespace xray {

/// The FDRTraceWriter allows us to hand-craft an XRay Flight Data Recorder
/// (FDR) mode log file. This is used primarily for testing, generating
/// sequences of FDR records that can be read/processed. It can also be used to
/// generate various kinds of execution traces without using the XRay runtime.
/// Note that this writer does not do any validation, but uses the types of
/// records defined in the FDRRecords.h file.
class FDRTraceWriter : public RecordVisitor {
public:
  // Construct an FDRTraceWriter associated with an output stream.
  explicit FDRTraceWriter(raw_ostream &O, const XRayFileHeader &H);
  ~FDRTraceWriter();

  Error visit(BufferExtents &) override;
  Error visit(WallclockRecord &) override;
  Error visit(NewCPUIDRecord &) override;
  Error visit(TSCWrapRecord &) override;
  Error visit(CustomEventRecord &) override;
  Error visit(CallArgRecord &) override;
  Error visit(PIDRecord &) override;
  Error visit(NewBufferRecord &) override;
  Error visit(EndBufferRecord &) override;
  Error visit(FunctionRecord &) override;
  Error visit(CustomEventRecordV5 &) override;
  Error visit(TypedEventRecord &) override;

private:
  support::endian::Writer OS;
};

} // namespace xray
} // namespace llvm

#endif // LLVM_XRAY_FDRTRACEWRITER_H
PKjwFZ$~��XRay/YAMLXRayRecord.hnu�[���//===- YAMLXRayRecord.h - XRay Record YAML Support Definitions ------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Types and traits specialisations for YAML I/O of XRay log entries.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_XRAY_YAMLXRAYRECORD_H
#define LLVM_XRAY_YAMLXRAYRECORD_H

#include <type_traits>

#include "llvm/Support/YAMLTraits.h"
#include "llvm/XRay/XRayRecord.h"

namespace llvm {
namespace xray {

struct YAMLXRayFileHeader {
  uint16_t Version;
  uint16_t Type;
  bool ConstantTSC;
  bool NonstopTSC;
  uint64_t CycleFrequency;
};

struct YAMLXRayRecord {
  uint16_t RecordType;
  uint16_t CPU;
  RecordTypes Type;
  int32_t FuncId;
  std::string Function;
  uint64_t TSC;
  uint32_t TId;
  uint32_t PId;
  std::vector<uint64_t> CallArgs;
  std::string Data;
};

struct YAMLXRayTrace {
  YAMLXRayFileHeader Header;
  std::vector<YAMLXRayRecord> Records;
};

} // namespace xray

namespace yaml {

// YAML Traits
// -----------
template <> struct ScalarEnumerationTraits<xray::RecordTypes> {
  static void enumeration(IO &IO, xray::RecordTypes &Type) {
    IO.enumCase(Type, "function-enter", xray::RecordTypes::ENTER);
    IO.enumCase(Type, "function-exit", xray::RecordTypes::EXIT);
    IO.enumCase(Type, "function-tail-exit", xray::RecordTypes::TAIL_EXIT);
    IO.enumCase(Type, "function-enter-arg", xray::RecordTypes::ENTER_ARG);
    IO.enumCase(Type, "custom-event", xray::RecordTypes::CUSTOM_EVENT);
    IO.enumCase(Type, "typed-event", xray::RecordTypes::TYPED_EVENT);
  }
};

template <> struct MappingTraits<xray::YAMLXRayFileHeader> {
  static void mapping(IO &IO, xray::YAMLXRayFileHeader &Header) {
    IO.mapRequired("version", Header.Version);
    IO.mapRequired("type", Header.Type);
    IO.mapRequired("constant-tsc", Header.ConstantTSC);
    IO.mapRequired("nonstop-tsc", Header.NonstopTSC);
    IO.mapRequired("cycle-frequency", Header.CycleFrequency);
  }
};

template <> struct MappingTraits<xray::YAMLXRayRecord> {
  static void mapping(IO &IO, xray::YAMLXRayRecord &Record) {
    IO.mapRequired("type", Record.RecordType);
    IO.mapOptional("func-id", Record.FuncId);
    IO.mapOptional("function", Record.Function);
    IO.mapOptional("args", Record.CallArgs);
    IO.mapRequired("cpu", Record.CPU);
    IO.mapOptional("thread", Record.TId, 0U);
    IO.mapOptional("process", Record.PId, 0U);
    IO.mapRequired("kind", Record.Type);
    IO.mapRequired("tsc", Record.TSC);
    IO.mapOptional("data", Record.Data);
  }

  static constexpr bool flow = true;
};

template <> struct MappingTraits<xray::YAMLXRayTrace> {
  static void mapping(IO &IO, xray::YAMLXRayTrace &Trace) {
    // A trace file contains two parts, the header and the list of all the
    // trace records.
    IO.mapRequired("header", Trace.Header);
    IO.mapRequired("records", Trace.Records);
  }
};

} // namespace yaml
} // namespace llvm

LLVM_YAML_IS_SEQUENCE_VECTOR(xray::YAMLXRayRecord)

#endif // LLVM_XRAY_YAMLXRAYRECORD_H
PKjwFZ�.m�	1	1XRay/FDRRecords.hnu�[���//===- FDRRecords.h - XRay Flight Data Recorder Mode Records --------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Define types and operations on these types that represent the different kinds
// of records we encounter in XRay flight data recorder mode traces.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_XRAY_FDRRECORDS_H
#define LLVM_XRAY_FDRRECORDS_H

#include <cstdint>
#include <string>

#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/DataExtractor.h"
#include "llvm/Support/Error.h"
#include "llvm/XRay/XRayRecord.h"

namespace llvm {
namespace xray {

class RecordVisitor;
class RecordInitializer;

class Record {
public:
  enum class RecordKind {
    RK_Metadata,
    RK_Metadata_BufferExtents,
    RK_Metadata_WallClockTime,
    RK_Metadata_NewCPUId,
    RK_Metadata_TSCWrap,
    RK_Metadata_CustomEvent,
    RK_Metadata_CustomEventV5,
    RK_Metadata_CallArg,
    RK_Metadata_PIDEntry,
    RK_Metadata_NewBuffer,
    RK_Metadata_EndOfBuffer,
    RK_Metadata_TypedEvent,
    RK_Metadata_LastMetadata,
    RK_Function,
  };

  static StringRef kindToString(RecordKind K);

private:
  const RecordKind T;

public:
  Record(const Record &) = delete;
  Record(Record &&) = delete;
  Record &operator=(const Record &) = delete;
  Record &operator=(Record &&) = delete;
  explicit Record(RecordKind T) : T(T) {}

  RecordKind getRecordType() const { return T; }

  // Each Record should be able to apply an abstract visitor, and choose the
  // appropriate function in the visitor to invoke, given its own type.
  virtual Error apply(RecordVisitor &V) = 0;

  virtual ~Record() = default;
};

class MetadataRecord : public Record {
public:
  enum class MetadataType : unsigned {
    Unknown,
    BufferExtents,
    WallClockTime,
    NewCPUId,
    TSCWrap,
    CustomEvent,
    CallArg,
    PIDEntry,
    NewBuffer,
    EndOfBuffer,
    TypedEvent,
  };

protected:
  static constexpr int kMetadataBodySize = 15;
  friend class RecordInitializer;

private:
  const MetadataType MT;

public:
  explicit MetadataRecord(RecordKind T, MetadataType M) : Record(T), MT(M) {}

  static bool classof(const Record *R) {
    return R->getRecordType() >= RecordKind::RK_Metadata &&
           R->getRecordType() <= RecordKind::RK_Metadata_LastMetadata;
  }

  MetadataType metadataType() const { return MT; }

  virtual ~MetadataRecord() = default;
};

// What follows are specific Metadata record types which encapsulate the
// information associated with specific metadata record types in an FDR mode
// log.
class BufferExtents : public MetadataRecord {
  uint64_t Size = 0;
  friend class RecordInitializer;

public:
  BufferExtents()
      : MetadataRecord(RecordKind::RK_Metadata_BufferExtents,
                       MetadataType::BufferExtents) {}

  explicit BufferExtents(uint64_t S)
      : MetadataRecord(RecordKind::RK_Metadata_BufferExtents,
                       MetadataType::BufferExtents),
        Size(S) {}

  uint64_t size() const { return Size; }

  Error apply(RecordVisitor &V) override;

  static bool classof(const Record *R) {
    return R->getRecordType() == RecordKind::RK_Metadata_BufferExtents;
  }
};

class WallclockRecord : public MetadataRecord {
  uint64_t Seconds = 0;
  uint32_t Nanos = 0;
  friend class RecordInitializer;

public:
  WallclockRecord()
      : MetadataRecord(RecordKind::RK_Metadata_WallClockTime,
                       MetadataType::WallClockTime) {}

  explicit WallclockRecord(uint64_t S, uint32_t N)
      : MetadataRecord(RecordKind::RK_Metadata_WallClockTime,
                       MetadataType::WallClockTime),
        Seconds(S), Nanos(N) {}

  uint64_t seconds() const { return Seconds; }
  uint32_t nanos() const { return Nanos; }

  Error apply(RecordVisitor &V) override;

  static bool classof(const Record *R) {
    return R->getRecordType() == RecordKind::RK_Metadata_WallClockTime;
  }
};

class NewCPUIDRecord : public MetadataRecord {
  uint16_t CPUId = 0;
  uint64_t TSC = 0;
  friend class RecordInitializer;

public:
  NewCPUIDRecord()
      : MetadataRecord(RecordKind::RK_Metadata_NewCPUId,
                       MetadataType::NewCPUId) {}

  NewCPUIDRecord(uint16_t C, uint64_t T)
      : MetadataRecord(RecordKind::RK_Metadata_NewCPUId,
                       MetadataType::NewCPUId),
        CPUId(C), TSC(T) {}

  uint16_t cpuid() const { return CPUId; }

  uint64_t tsc() const { return TSC; }

  Error apply(RecordVisitor &V) override;

  static bool classof(const Record *R) {
    return R->getRecordType() == RecordKind::RK_Metadata_NewCPUId;
  }
};

class TSCWrapRecord : public MetadataRecord {
  uint64_t BaseTSC = 0;
  friend class RecordInitializer;

public:
  TSCWrapRecord()
      : MetadataRecord(RecordKind::RK_Metadata_TSCWrap, MetadataType::TSCWrap) {
  }

  explicit TSCWrapRecord(uint64_t B)
      : MetadataRecord(RecordKind::RK_Metadata_TSCWrap, MetadataType::TSCWrap),
        BaseTSC(B) {}

  uint64_t tsc() const { return BaseTSC; }

  Error apply(RecordVisitor &V) override;

  static bool classof(const Record *R) {
    return R->getRecordType() == RecordKind::RK_Metadata_TSCWrap;
  }
};

class CustomEventRecord : public MetadataRecord {
  int32_t Size = 0;
  uint64_t TSC = 0;
  uint16_t CPU = 0;
  std::string Data{};
  friend class RecordInitializer;

public:
  CustomEventRecord()
      : MetadataRecord(RecordKind::RK_Metadata_CustomEvent,
                       MetadataType::CustomEvent) {}

  explicit CustomEventRecord(uint64_t S, uint64_t T, uint16_t C, std::string D)
      : MetadataRecord(RecordKind::RK_Metadata_CustomEvent,
                       MetadataType::CustomEvent),
        Size(S), TSC(T), CPU(C), Data(std::move(D)) {}

  int32_t size() const { return Size; }
  uint64_t tsc() const { return TSC; }
  uint16_t cpu() const { return CPU; }
  StringRef data() const { return Data; }

  Error apply(RecordVisitor &V) override;

  static bool classof(const Record *R) {
    return R->getRecordType() == RecordKind::RK_Metadata_CustomEvent;
  }
};

class CustomEventRecordV5 : public MetadataRecord {
  int32_t Size = 0;
  int32_t Delta = 0;
  std::string Data{};
  friend class RecordInitializer;

public:
  CustomEventRecordV5()
      : MetadataRecord(RecordKind::RK_Metadata_CustomEventV5,
                       MetadataType::CustomEvent) {}

  explicit CustomEventRecordV5(int32_t S, int32_t D, std::string P)
      : MetadataRecord(RecordKind::RK_Metadata_CustomEventV5,
                       MetadataType::CustomEvent),
        Size(S), Delta(D), Data(std::move(P)) {}

  int32_t size() const { return Size; }
  int32_t delta() const { return Delta; }
  StringRef data() const { return Data; }

  Error apply(RecordVisitor &V) override;

  static bool classof(const Record *R) {
    return R->getRecordType() == RecordKind::RK_Metadata_CustomEventV5;
  }
};

class TypedEventRecord : public MetadataRecord {
  int32_t Size = 0;
  int32_t Delta = 0;
  uint16_t EventType = 0;
  std::string Data{};
  friend class RecordInitializer;

public:
  TypedEventRecord()
      : MetadataRecord(RecordKind::RK_Metadata_TypedEvent,
                       MetadataType::TypedEvent) {}

  explicit TypedEventRecord(int32_t S, int32_t D, uint16_t E, std::string P)
      : MetadataRecord(RecordKind::RK_Metadata_TypedEvent,
                       MetadataType::TypedEvent),
        Size(S), Delta(D), Data(std::move(P)) {}

  int32_t size() const { return Size; }
  int32_t delta() const { return Delta; }
  uint16_t eventType() const { return EventType; }
  StringRef data() const { return Data; }

  Error apply(RecordVisitor &V) override;

  static bool classof(const Record *R) {
    return R->getRecordType() == RecordKind::RK_Metadata_TypedEvent;
  }
};

class CallArgRecord : public MetadataRecord {
  uint64_t Arg = 0;
  friend class RecordInitializer;

public:
  CallArgRecord()
      : MetadataRecord(RecordKind::RK_Metadata_CallArg, MetadataType::CallArg) {
  }

  explicit CallArgRecord(uint64_t A)
      : MetadataRecord(RecordKind::RK_Metadata_CallArg, MetadataType::CallArg),
        Arg(A) {}

  uint64_t arg() const { return Arg; }

  Error apply(RecordVisitor &V) override;

  static bool classof(const Record *R) {
    return R->getRecordType() == RecordKind::RK_Metadata_CallArg;
  }
};

class PIDRecord : public MetadataRecord {
  int32_t PID = 0;
  friend class RecordInitializer;

public:
  PIDRecord()
      : MetadataRecord(RecordKind::RK_Metadata_PIDEntry,
                       MetadataType::PIDEntry) {}

  explicit PIDRecord(int32_t P)
      : MetadataRecord(RecordKind::RK_Metadata_PIDEntry,
                       MetadataType::PIDEntry),
        PID(P) {}

  int32_t pid() const { return PID; }

  Error apply(RecordVisitor &V) override;

  static bool classof(const Record *R) {
    return R->getRecordType() == RecordKind::RK_Metadata_PIDEntry;
  }
};

class NewBufferRecord : public MetadataRecord {
  int32_t TID = 0;
  friend class RecordInitializer;

public:
  NewBufferRecord()
      : MetadataRecord(RecordKind::RK_Metadata_NewBuffer,
                       MetadataType::NewBuffer) {}

  explicit NewBufferRecord(int32_t T)
      : MetadataRecord(RecordKind::RK_Metadata_NewBuffer,
                       MetadataType::NewBuffer),
        TID(T) {}

  int32_t tid() const { return TID; }

  Error apply(RecordVisitor &V) override;

  static bool classof(const Record *R) {
    return R->getRecordType() == RecordKind::RK_Metadata_NewBuffer;
  }
};

class EndBufferRecord : public MetadataRecord {
public:
  EndBufferRecord()
      : MetadataRecord(RecordKind::RK_Metadata_EndOfBuffer,
                       MetadataType::EndOfBuffer) {}

  Error apply(RecordVisitor &V) override;

  static bool classof(const Record *R) {
    return R->getRecordType() == RecordKind::RK_Metadata_EndOfBuffer;
  }
};

class FunctionRecord : public Record {
  RecordTypes Kind;
  int32_t FuncId = 0;
  uint32_t Delta = 0;
  friend class RecordInitializer;

  static constexpr unsigned kFunctionRecordSize = 8;

public:
  FunctionRecord() : Record(RecordKind::RK_Function) {}

  explicit FunctionRecord(RecordTypes K, int32_t F, uint32_t D)
      : Record(RecordKind::RK_Function), Kind(K), FuncId(F), Delta(D) {}

  // A function record is a concrete record type which has a number of common
  // properties.
  RecordTypes recordType() const { return Kind; }
  int32_t functionId() const { return FuncId; }
  uint32_t delta() const { return Delta; }

  Error apply(RecordVisitor &V) override;

  static bool classof(const Record *R) {
    return R->getRecordType() == RecordKind::RK_Function;
  }
};

class RecordVisitor {
public:
  virtual ~RecordVisitor() = default;

  // Support all specific kinds of records:
  virtual Error visit(BufferExtents &) = 0;
  virtual Error visit(WallclockRecord &) = 0;
  virtual Error visit(NewCPUIDRecord &) = 0;
  virtual Error visit(TSCWrapRecord &) = 0;
  virtual Error visit(CustomEventRecord &) = 0;
  virtual Error visit(CallArgRecord &) = 0;
  virtual Error visit(PIDRecord &) = 0;
  virtual Error visit(NewBufferRecord &) = 0;
  virtual Error visit(EndBufferRecord &) = 0;
  virtual Error visit(FunctionRecord &) = 0;
  virtual Error visit(CustomEventRecordV5 &) = 0;
  virtual Error visit(TypedEventRecord &) = 0;
};

class RecordInitializer : public RecordVisitor {
  DataExtractor &E;
  uint64_t &OffsetPtr;
  uint16_t Version;

public:
  static constexpr uint16_t DefaultVersion = 5u;

  explicit RecordInitializer(DataExtractor &DE, uint64_t &OP, uint16_t V)
      : E(DE), OffsetPtr(OP), Version(V) {}

  explicit RecordInitializer(DataExtractor &DE, uint64_t &OP)
      : RecordInitializer(DE, OP, DefaultVersion) {}

  Error visit(BufferExtents &) override;
  Error visit(WallclockRecord &) override;
  Error visit(NewCPUIDRecord &) override;
  Error visit(TSCWrapRecord &) override;
  Error visit(CustomEventRecord &) override;
  Error visit(CallArgRecord &) override;
  Error visit(PIDRecord &) override;
  Error visit(NewBufferRecord &) override;
  Error visit(EndBufferRecord &) override;
  Error visit(FunctionRecord &) override;
  Error visit(CustomEventRecordV5 &) override;
  Error visit(TypedEventRecord &) override;
};

} // namespace xray
} // namespace llvm

#endif // LLVM_XRAY_FDRRECORDS_H
PKjwFZ�ٰ6��XRay/BlockVerifier.hnu�[���//===- BlockVerifier.h - FDR Block Verifier -------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// An implementation of the RecordVisitor which verifies a sequence of records
// associated with a block, following the FDR mode log format's specifications.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_XRAY_BLOCKVERIFIER_H
#define LLVM_XRAY_BLOCKVERIFIER_H

#include "llvm/XRay/FDRRecords.h"
#include <array>
#include <bitset>

namespace llvm {
namespace xray {

class BlockVerifier : public RecordVisitor {
public:
  // We force State elements to be size_t, to be used as indices for containers.
  enum class State : std::size_t {
    Unknown,
    BufferExtents,
    NewBuffer,
    WallClockTime,
    PIDEntry,
    NewCPUId,
    TSCWrap,
    CustomEvent,
    TypedEvent,
    Function,
    CallArg,
    EndOfBuffer,
    StateMax,
  };

private:
  // We keep track of the current record seen by the verifier.
  State CurrentRecord = State::Unknown;

  // Transitions the current record to the new record, records an error on
  // invalid transitions.
  Error transition(State To);

public:
  Error visit(BufferExtents &) override;
  Error visit(WallclockRecord &) override;
  Error visit(NewCPUIDRecord &) override;
  Error visit(TSCWrapRecord &) override;
  Error visit(CustomEventRecord &) override;
  Error visit(CallArgRecord &) override;
  Error visit(PIDRecord &) override;
  Error visit(NewBufferRecord &) override;
  Error visit(EndBufferRecord &) override;
  Error visit(FunctionRecord &) override;
  Error visit(CustomEventRecordV5 &) override;
  Error visit(TypedEventRecord &) override;

  Error verify();
  void reset();
};

} // namespace xray
} // namespace llvm

#endif // LLVM_XRAY_BLOCKVERIFIER_H
PKjwFZ�J�,CCXRay/FDRTraceExpander.hnu�[���//===- FDRTraceExpander.h - XRay FDR Mode Log Expander --------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// We define an FDR record visitor which can re-constitute XRayRecord instances
// from a sequence of FDR mode records in arrival order into a collection.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_XRAY_FDRTRACEEXPANDER_H
#define LLVM_XRAY_FDRTRACEEXPANDER_H

#include "llvm/ADT/STLExtras.h"
#include "llvm/XRay/FDRRecords.h"
#include "llvm/XRay/XRayRecord.h"

namespace llvm {
namespace xray {

class TraceExpander : public RecordVisitor {
  // Type-erased callback for handling individual XRayRecord instances.
  function_ref<void(const XRayRecord &)> C;
  int32_t PID = 0;
  int32_t TID = 0;
  uint64_t BaseTSC = 0;
  XRayRecord CurrentRecord{0, 0, RecordTypes::ENTER, 0, 0, 0, 0, {}, {}};
  uint16_t CPUId = 0;
  uint16_t LogVersion = 0;
  bool BuildingRecord = false;
  bool IgnoringRecords = false;

  void resetCurrentRecord();

public:
  explicit TraceExpander(function_ref<void(const XRayRecord &)> F, uint16_t L)
      : C(std::move(F)), LogVersion(L) {}

  Error visit(BufferExtents &) override;
  Error visit(WallclockRecord &) override;
  Error visit(NewCPUIDRecord &) override;
  Error visit(TSCWrapRecord &) override;
  Error visit(CustomEventRecord &) override;
  Error visit(CallArgRecord &) override;
  Error visit(PIDRecord &) override;
  Error visit(NewBufferRecord &) override;
  Error visit(EndBufferRecord &) override;
  Error visit(FunctionRecord &) override;
  Error visit(CustomEventRecordV5 &) override;
  Error visit(TypedEventRecord &) override;

  // Must be called after all the records have been processed, to handle the
  // most recent record generated.
  Error flush();
};

} // namespace xray
} // namespace llvm

#endif // LLVM_XRAY_FDRTRACEEXPANDER_H
PKjwFZ-���XRay/FDRLogBuilder.hnu�[���//===- FDRLogBuilder.h - XRay FDR Log Building Utility --------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_XRAY_FDRLOGBUILDER_H
#define LLVM_XRAY_FDRLOGBUILDER_H

#include "llvm/XRay/FDRRecords.h"

namespace llvm {
namespace xray {

/// The LogBuilder class allows for creating ad-hoc collections of records
/// through the `add<...>(...)` function. An example use of this API is in
/// crafting arbitrary sequences of records:
///
///   auto Records = LogBuilder()
///       .add<BufferExtents>(256)
///       .add<NewBufferRecord>(1)
///       .consume();
///
class LogBuilder {
  std::vector<std::unique_ptr<Record>> Records;

public:
  template <class R, class... T> LogBuilder &add(T &&... A) {
    Records.emplace_back(new R(std::forward<T>(A)...));
    return *this;
  }

  std::vector<std::unique_ptr<Record>> consume() { return std::move(Records); }
};

} // namespace xray
} // namespace llvm

#endif // LLVM_XRAY_FDRLOGBUILDER_H
PKjwFZ�i��

Option/Arg.hnu�[���//===- Arg.h - Parsed Argument Classes --------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// Defines the llvm::Arg class for parsed arguments.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_OPTION_ARG_H
#define LLVM_OPTION_ARG_H

#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Option/Option.h"
#include <string>

namespace llvm {

class raw_ostream;

namespace opt {

class ArgList;

/// A concrete instance of a particular driver option.
///
/// The Arg class encodes just enough information to be able to
/// derive the argument values efficiently.
class Arg {
private:
  /// The option this argument is an instance of.
  const Option Opt;

  /// The argument this argument was derived from (during tool chain
  /// argument translation), if any.
  const Arg *BaseArg;

  /// How this instance of the option was spelled.
  StringRef Spelling;

  /// The index at which this argument appears in the containing
  /// ArgList.
  unsigned Index;

  /// Was this argument used to affect compilation?
  ///
  /// This is used to generate an "argument unused" warning (without
  /// clang::driver::options::TargetSpecific) or "unsupported option" error
  /// (with TargetSpecific).
  mutable unsigned Claimed : 1;

  /// Used by an unclaimed option with the TargetSpecific flag. If set, report
  /// an "argument unused" warning instead of an "unsupported option" error.
  unsigned IgnoredTargetSpecific : 1;

  /// Does this argument own its values?
  mutable unsigned OwnsValues : 1;

  /// The argument values, as C strings.
  SmallVector<const char *, 2> Values;

  /// If this arg was created through an alias, this is the original alias arg.
  /// For example, *this might be "-finput-charset=utf-8" and Alias might
  /// point to an arg representing "/source-charset:utf-8".
  std::unique_ptr<Arg> Alias;

public:
  Arg(const Option Opt, StringRef Spelling, unsigned Index,
      const Arg *BaseArg = nullptr);
  Arg(const Option Opt, StringRef Spelling, unsigned Index,
      const char *Value0, const Arg *BaseArg = nullptr);
  Arg(const Option Opt, StringRef Spelling, unsigned Index,
      const char *Value0, const char *Value1, const Arg *BaseArg = nullptr);
  Arg(const Arg &) = delete;
  Arg &operator=(const Arg &) = delete;
  ~Arg();

  const Option &getOption() const { return Opt; }

  /// Returns the used prefix and name of the option:
  /// For `--foo=bar`, returns `--foo=`.
  /// This is often the wrong function to call:
  /// * Use `getValue()` to get `bar`.
  /// * Use `getAsString()` to get a string suitable for printing an Arg in
  ///   a diagnostic.
  StringRef getSpelling() const { return Spelling; }

  unsigned getIndex() const { return Index; }

  /// Return the base argument which generated this arg.
  ///
  /// This is either the argument itself or the argument it was
  /// derived from during tool chain specific argument translation.
  const Arg &getBaseArg() const {
    return BaseArg ? *BaseArg : *this;
  }
  Arg &getBaseArg() { return BaseArg ? const_cast<Arg &>(*BaseArg) : *this; }
  void setBaseArg(const Arg *BaseArg) { this->BaseArg = BaseArg; }

  /// Args are converted to their unaliased form.  For args that originally
  /// came from an alias, this returns the alias the arg was produced from.
  const Arg* getAlias() const { return Alias.get(); }
  void setAlias(std::unique_ptr<Arg> Alias) { this->Alias = std::move(Alias); }

  bool getOwnsValues() const { return OwnsValues; }
  void setOwnsValues(bool Value) const { OwnsValues = Value; }

  bool isClaimed() const { return getBaseArg().Claimed; }
  void claim() const { getBaseArg().Claimed = true; }

  bool isIgnoredTargetSpecific() const {
    return getBaseArg().IgnoredTargetSpecific;
  }
  void ignoreTargetSpecific() {
    getBaseArg().IgnoredTargetSpecific = true;
  }

  unsigned getNumValues() const { return Values.size(); }

  const char *getValue(unsigned N = 0) const {
    return Values[N];
  }

  SmallVectorImpl<const char *> &getValues() { return Values; }
  const SmallVectorImpl<const char *> &getValues() const { return Values; }

  bool containsValue(StringRef Value) const {
    return llvm::is_contained(Values, Value);
  }

  /// Append the argument onto the given array as strings.
  void render(const ArgList &Args, ArgStringList &Output) const;

  /// Append the argument, render as an input, onto the given
  /// array as strings.
  ///
  /// The distinction is that some options only render their values
  /// when rendered as a input (e.g., Xlinker).
  void renderAsInput(const ArgList &Args, ArgStringList &Output) const;

  void print(raw_ostream &O) const;
  void dump() const;

  /// Return a formatted version of the argument and its values, for
  /// diagnostics. Since this is for diagnostics, if this Arg was produced
  /// through an alias, this returns the string representation of the alias
  /// that the user wrote.
  std::string getAsString(const ArgList &Args) const;
};

} // end namespace opt

} // end namespace llvm

#endif // LLVM_OPTION_ARG_H
PKjwFZ_�	���Option/Option.hnu�[���//===- Option.h - Abstract Driver Options -----------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_OPTION_OPTION_H
#define LLVM_OPTION_OPTION_H

#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Option/OptSpecifier.h"
#include "llvm/Option/OptTable.h"
#include "llvm/Support/ErrorHandling.h"
#include <cassert>
#include <string>

namespace llvm {

class raw_ostream;

namespace opt {

class Arg;
class ArgList;

/// ArgStringList - Type used for constructing argv lists for subprocesses.
using ArgStringList = SmallVector<const char *, 16>;

/// Base flags for all options. Custom flags may be added after.
enum DriverFlag {
  HelpHidden       = (1 << 0),
  RenderAsInput    = (1 << 1),
  RenderJoined     = (1 << 2),
  RenderSeparate   = (1 << 3)
};

/// Option - Abstract representation for a single form of driver
/// argument.
///
/// An Option class represents a form of option that the driver
/// takes, for example how many arguments the option has and how
/// they can be provided. Individual option instances store
/// additional information about what group the option is a member
/// of (if any), if the option is an alias, and a number of
/// flags. At runtime the driver parses the command line into
/// concrete Arg instances, each of which corresponds to a
/// particular Option instance.
class Option {
public:
  enum OptionClass {
    GroupClass = 0,
    InputClass,
    UnknownClass,
    FlagClass,
    JoinedClass,
    ValuesClass,
    SeparateClass,
    RemainingArgsClass,
    RemainingArgsJoinedClass,
    CommaJoinedClass,
    MultiArgClass,
    JoinedOrSeparateClass,
    JoinedAndSeparateClass
  };

  enum RenderStyleKind {
    RenderCommaJoinedStyle,
    RenderJoinedStyle,
    RenderSeparateStyle,
    RenderValuesStyle
  };

protected:
  const OptTable::Info *Info;
  const OptTable *Owner;

public:
  Option(const OptTable::Info *Info, const OptTable *Owner);

  bool isValid() const {
    return Info != nullptr;
  }

  unsigned getID() const {
    assert(Info && "Must have a valid info!");
    return Info->ID;
  }

  OptionClass getKind() const {
    assert(Info && "Must have a valid info!");
    return OptionClass(Info->Kind);
  }

  /// Get the name of this option without any prefix.
  StringRef getName() const {
    assert(Info && "Must have a valid info!");
    return Info->Name;
  }

  const Option getGroup() const {
    assert(Info && "Must have a valid info!");
    assert(Owner && "Must have a valid owner!");
    return Owner->getOption(Info->GroupID);
  }

  const Option getAlias() const {
    assert(Info && "Must have a valid info!");
    assert(Owner && "Must have a valid owner!");
    return Owner->getOption(Info->AliasID);
  }

  /// Get the alias arguments as a \0 separated list.
  /// E.g. ["foo", "bar"] would be returned as "foo\0bar\0".
  const char *getAliasArgs() const {
    assert(Info && "Must have a valid info!");
    assert((!Info->AliasArgs || Info->AliasArgs[0] != 0) &&
           "AliasArgs should be either 0 or non-empty.");

    return Info->AliasArgs;
  }

  /// Get the default prefix for this option.
  StringRef getPrefix() const {
    return Info->Prefixes.empty()
               ? StringRef()
               : static_cast<const StringRef &>(Info->Prefixes[0]);
  }

  /// Get the name of this option with the default prefix.
  std::string getPrefixedName() const {
    std::string Ret(getPrefix());
    Ret += getName();
    return Ret;
  }

  /// Get the help text for this option.
  StringRef getHelpText() const {
    assert(Info && "Must have a valid info!");
    return Info->HelpText;
  }

  /// Get the meta-variable list for this option.
  StringRef getMetaVar() const {
    assert(Info && "Must have a valid info!");
    return Info->MetaVar;
  }

  unsigned getNumArgs() const { return Info->Param; }

  bool hasNoOptAsInput() const { return Info->Flags & RenderAsInput;}

  RenderStyleKind getRenderStyle() const {
    if (Info->Flags & RenderJoined)
      return RenderJoinedStyle;
    if (Info->Flags & RenderSeparate)
      return RenderSeparateStyle;
    switch (getKind()) {
    case GroupClass:
    case InputClass:
    case UnknownClass:
      return RenderValuesStyle;
    case JoinedClass:
    case JoinedAndSeparateClass:
      return RenderJoinedStyle;
    case CommaJoinedClass:
      return RenderCommaJoinedStyle;
    case FlagClass:
    case ValuesClass:
    case SeparateClass:
    case MultiArgClass:
    case JoinedOrSeparateClass:
    case RemainingArgsClass:
    case RemainingArgsJoinedClass:
      return RenderSeparateStyle;
    }
    llvm_unreachable("Unexpected kind!");
  }

  /// Test if this option has the flag \a Val.
  bool hasFlag(unsigned Val) const {
    return Info->Flags & Val;
  }

  /// getUnaliasedOption - Return the final option this option
  /// aliases (itself, if the option has no alias).
  const Option getUnaliasedOption() const {
    const Option Alias = getAlias();
    if (Alias.isValid()) return Alias.getUnaliasedOption();
    return *this;
  }

  /// getRenderName - Return the name to use when rendering this
  /// option.
  StringRef getRenderName() const {
    return getUnaliasedOption().getName();
  }

  /// matches - Predicate for whether this option is part of the
  /// given option (which may be a group).
  ///
  /// Note that matches against options which are an alias should never be
  /// done -- aliases do not participate in matching and so such a query will
  /// always be false.
  bool matches(OptSpecifier ID) const;

  /// Potentially accept the current argument, returning a new Arg instance,
  /// or 0 if the option does not accept this argument (or the argument is
  /// missing values).
  ///
  /// If the option accepts the current argument, accept() sets
  /// Index to the position where argument parsing should resume
  /// (even if the argument is missing values).
  ///
  /// \p CurArg The argument to be matched. It may be shorter than the
  /// underlying storage to represent a Joined argument.
  /// \p GroupedShortOption If true, we are handling the fallback case of
  /// parsing a prefix of the current argument as a short option.
  std::unique_ptr<Arg> accept(const ArgList &Args, StringRef CurArg,
                              bool GroupedShortOption, unsigned &Index) const;

private:
  std::unique_ptr<Arg> acceptInternal(const ArgList &Args, StringRef CurArg,
                                      unsigned &Index) const;

public:
  void print(raw_ostream &O) const;
  void dump() const;
};

} // end namespace opt

} // end namespace llvm

#endif // LLVM_OPTION_OPTION_H
PKjwFZfߐ,�G�GOption/ArgList.hnu�[���//===- ArgList.h - Argument List Management ---------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_OPTION_ARGLIST_H
#define LLVM_OPTION_ARGLIST_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Option/Arg.h"
#include "llvm/Option/OptSpecifier.h"
#include "llvm/Option/Option.h"
#include <algorithm>
#include <cstddef>
#include <initializer_list>
#include <iterator>
#include <list>
#include <memory>
#include <string>
#include <utility>
#include <vector>

namespace llvm {

class raw_ostream;

namespace opt {

/// arg_iterator - Iterates through arguments stored inside an ArgList.
template<typename BaseIter, unsigned NumOptSpecifiers = 0>
class arg_iterator {
  /// The current argument and the end of the sequence we're iterating.
  BaseIter Current, End;

  /// Optional filters on the arguments which will be match. To avoid a
  /// zero-sized array, we store one specifier even if we're asked for none.
  OptSpecifier Ids[NumOptSpecifiers ? NumOptSpecifiers : 1];

  void SkipToNextArg() {
    for (; Current != End; ++Current) {
      // Skip erased elements.
      if (!*Current)
        continue;

      // Done if there are no filters.
      if (!NumOptSpecifiers)
        return;

      // Otherwise require a match.
      const Option &O = (*Current)->getOption();
      for (auto Id : Ids) {
        if (!Id.isValid())
          break;
        if (O.matches(Id))
          return;
      }
    }
  }

  using Traits = std::iterator_traits<BaseIter>;

public:
  using value_type = typename Traits::value_type;
  using reference = typename Traits::reference;
  using pointer = typename Traits::pointer;
  using iterator_category = std::forward_iterator_tag;
  using difference_type = std::ptrdiff_t;

  arg_iterator(
      BaseIter Current, BaseIter End,
      const OptSpecifier (&Ids)[NumOptSpecifiers ? NumOptSpecifiers : 1] = {})
      : Current(Current), End(End) {
    for (unsigned I = 0; I != NumOptSpecifiers; ++I)
      this->Ids[I] = Ids[I];
    SkipToNextArg();
  }

  reference operator*() const { return *Current; }
  pointer operator->() const { return Current; }

  arg_iterator &operator++() {
    ++Current;
    SkipToNextArg();
    return *this;
  }

  arg_iterator operator++(int) {
    arg_iterator tmp(*this);
    ++(*this);
    return tmp;
  }

  friend bool operator==(arg_iterator LHS, arg_iterator RHS) {
    return LHS.Current == RHS.Current;
  }
  friend bool operator!=(arg_iterator LHS, arg_iterator RHS) {
    return !(LHS == RHS);
  }
};

/// ArgList - Ordered collection of driver arguments.
///
/// The ArgList class manages a list of Arg instances as well as
/// auxiliary data and convenience methods to allow Tools to quickly
/// check for the presence of Arg instances for a particular Option
/// and to iterate over groups of arguments.
class ArgList {
public:
  using arglist_type = SmallVector<Arg *, 16>;
  using iterator = arg_iterator<arglist_type::iterator>;
  using const_iterator = arg_iterator<arglist_type::const_iterator>;
  using reverse_iterator = arg_iterator<arglist_type::reverse_iterator>;
  using const_reverse_iterator =
      arg_iterator<arglist_type::const_reverse_iterator>;

  template<unsigned N> using filtered_iterator =
      arg_iterator<arglist_type::const_iterator, N>;
  template<unsigned N> using filtered_reverse_iterator =
      arg_iterator<arglist_type::const_reverse_iterator, N>;

private:
  /// The internal list of arguments.
  arglist_type Args;

  using OptRange = std::pair<unsigned, unsigned>;
  static OptRange emptyRange() { return {-1u, 0u}; }

  /// The first and last index of each different OptSpecifier ID.
  DenseMap<unsigned, OptRange> OptRanges;

  /// Get the range of indexes in which options with the specified IDs might
  /// reside, or (0, 0) if there are no such options.
  OptRange getRange(std::initializer_list<OptSpecifier> Ids) const;

protected:
  // Make the default special members protected so they won't be used to slice
  // derived objects, but can still be used by derived objects to implement
  // their own special members.
  ArgList() = default;

  // Explicit move operations to ensure the container is cleared post-move
  // otherwise it could lead to a double-delete in the case of moving of an
  // InputArgList which deletes the contents of the container. If we could fix
  // up the ownership here (delegate storage/ownership to the derived class so
  // it can be a container of unique_ptr) this would be simpler.
  ArgList(ArgList &&RHS)
      : Args(std::move(RHS.Args)), OptRanges(std::move(RHS.OptRanges)) {
    RHS.Args.clear();
    RHS.OptRanges.clear();
  }

  ArgList &operator=(ArgList &&RHS) {
    Args = std::move(RHS.Args);
    RHS.Args.clear();
    OptRanges = std::move(RHS.OptRanges);
    RHS.OptRanges.clear();
    return *this;
  }

  // Protect the dtor to ensure this type is never destroyed polymorphically.
  ~ArgList() = default;

  // Implicitly convert a value to an OptSpecifier. Used to work around a bug
  // in MSVC's implementation of narrowing conversion checking.
  static OptSpecifier toOptSpecifier(OptSpecifier S) { return S; }

public:
  /// @name Arg Access
  /// @{

  /// append - Append \p A to the arg list.
  void append(Arg *A);

  const arglist_type &getArgs() const { return Args; }

  unsigned size() const { return Args.size(); }

  /// @}
  /// @name Arg Iteration
  /// @{

  iterator begin() { return {Args.begin(), Args.end()}; }
  iterator end() { return {Args.end(), Args.end()}; }

  reverse_iterator rbegin() { return {Args.rbegin(), Args.rend()}; }
  reverse_iterator rend() { return {Args.rend(), Args.rend()}; }

  const_iterator begin() const { return {Args.begin(), Args.end()}; }
  const_iterator end() const { return {Args.end(), Args.end()}; }

  const_reverse_iterator rbegin() const { return {Args.rbegin(), Args.rend()}; }
  const_reverse_iterator rend() const { return {Args.rend(), Args.rend()}; }

  template<typename ...OptSpecifiers>
  iterator_range<filtered_iterator<sizeof...(OptSpecifiers)>>
  filtered(OptSpecifiers ...Ids) const {
    OptRange Range = getRange({toOptSpecifier(Ids)...});
    auto B = Args.begin() + Range.first;
    auto E = Args.begin() + Range.second;
    using Iterator = filtered_iterator<sizeof...(OptSpecifiers)>;
    return make_range(Iterator(B, E, {toOptSpecifier(Ids)...}),
                      Iterator(E, E, {toOptSpecifier(Ids)...}));
  }

  template<typename ...OptSpecifiers>
  iterator_range<filtered_reverse_iterator<sizeof...(OptSpecifiers)>>
  filtered_reverse(OptSpecifiers ...Ids) const {
    OptRange Range = getRange({toOptSpecifier(Ids)...});
    auto B = Args.rend() - Range.second;
    auto E = Args.rend() - Range.first;
    using Iterator = filtered_reverse_iterator<sizeof...(OptSpecifiers)>;
    return make_range(Iterator(B, E, {toOptSpecifier(Ids)...}),
                      Iterator(E, E, {toOptSpecifier(Ids)...}));
  }

  /// @}
  /// @name Arg Removal
  /// @{

  /// eraseArg - Remove any option matching \p Id.
  void eraseArg(OptSpecifier Id);

  /// @}
  /// @name Arg Access
  /// @{

  /// hasArg - Does the arg list contain any option matching \p Id.
  ///
  /// \p Claim Whether the argument should be claimed, if it exists.
  template<typename ...OptSpecifiers>
  bool hasArgNoClaim(OptSpecifiers ...Ids) const {
    return getLastArgNoClaim(Ids...) != nullptr;
  }
  template<typename ...OptSpecifiers>
  bool hasArg(OptSpecifiers ...Ids) const {
    return getLastArg(Ids...) != nullptr;
  }

  /// Return true if the arg list contains multiple arguments matching \p Id.
  bool hasMultipleArgs(OptSpecifier Id) const {
    auto Args = filtered(Id);
    return (Args.begin() != Args.end()) && (++Args.begin()) != Args.end();
  }

  /// Return the last argument matching \p Id, or null.
  template<typename ...OptSpecifiers>
  Arg *getLastArg(OptSpecifiers ...Ids) const {
    Arg *Res = nullptr;
    for (Arg *A : filtered(Ids...)) {
      Res = A;
      Res->claim();
    }
    return Res;
  }

  /// Return the last argument matching \p Id, or null. Do not "claim" the
  /// option (don't mark it as having been used).
  template<typename ...OptSpecifiers>
  Arg *getLastArgNoClaim(OptSpecifiers ...Ids) const {
    for (Arg *A : filtered_reverse(Ids...))
      return A;
    return nullptr;
  }

  /// getArgString - Return the input argument string at \p Index.
  virtual const char *getArgString(unsigned Index) const = 0;

  /// getNumInputArgStrings - Return the number of original argument strings,
  /// which are guaranteed to be the first strings in the argument string
  /// list.
  virtual unsigned getNumInputArgStrings() const = 0;

  /// @}
  /// @name Argument Lookup Utilities
  /// @{

  /// getLastArgValue - Return the value of the last argument, or a default.
  StringRef getLastArgValue(OptSpecifier Id, StringRef Default = "") const;

  /// getAllArgValues - Get the values of all instances of the given argument
  /// as strings.
  std::vector<std::string> getAllArgValues(OptSpecifier Id) const;

  /// @}
  /// @name Translation Utilities
  /// @{

  /// hasFlag - Given an option \p Pos and its negative form \p Neg, return
  /// true if the option is present, false if the negation is present, and
  /// \p Default if neither option is given. If both the option and its
  /// negation are present, the last one wins.
  bool hasFlag(OptSpecifier Pos, OptSpecifier Neg, bool Default) const;
  bool hasFlagNoClaim(OptSpecifier Pos, OptSpecifier Neg, bool Default) const;

  /// hasFlag - Given an option \p Pos, an alias \p PosAlias and its negative
  /// form \p Neg, return true if the option or its alias is present, false if
  /// the negation is present, and \p Default if none of the options are
  /// given. If multiple options are present, the last one wins.
  bool hasFlag(OptSpecifier Pos, OptSpecifier PosAlias, OptSpecifier Neg,
               bool Default) const;

  /// Given an option Pos and its negative form Neg, render the option if Pos is
  /// present.
  void addOptInFlag(ArgStringList &Output, OptSpecifier Pos,
                    OptSpecifier Neg) const;
  /// Render the option if Neg is present.
  void addOptOutFlag(ArgStringList &Output, OptSpecifier Pos,
                     OptSpecifier Neg) const {
    addOptInFlag(Output, Neg, Pos);
  }

  /// Render only the last argument match \p Id0, if present.
  template<typename ...OptSpecifiers>
  void AddLastArg(ArgStringList &Output, OptSpecifiers ...Ids) const {
    if (Arg *A = getLastArg(Ids...)) // Calls claim() on all Ids's Args.
      A->render(*this, Output);
  }

  /// AddAllArgsExcept - Render all arguments matching any of the given ids
  /// and not matching any of the excluded ids.
  void AddAllArgsExcept(ArgStringList &Output, ArrayRef<OptSpecifier> Ids,
                        ArrayRef<OptSpecifier> ExcludeIds) const;
  /// AddAllArgs - Render all arguments matching any of the given ids.
  void AddAllArgs(ArgStringList &Output, ArrayRef<OptSpecifier> Ids) const;

  /// AddAllArgs - Render all arguments matching the given ids.
  void AddAllArgs(ArgStringList &Output, OptSpecifier Id0,
                  OptSpecifier Id1 = 0U, OptSpecifier Id2 = 0U) const;

  /// AddAllArgValues - Render the argument values of all arguments
  /// matching the given ids.
  void AddAllArgValues(ArgStringList &Output, OptSpecifier Id0,
                       OptSpecifier Id1 = 0U, OptSpecifier Id2 = 0U) const;

  /// AddAllArgsTranslated - Render all the arguments matching the
  /// given ids, but forced to separate args and using the provided
  /// name instead of the first option value.
  ///
  /// \param Joined - If true, render the argument as joined with
  /// the option specifier.
  void AddAllArgsTranslated(ArgStringList &Output, OptSpecifier Id0,
                            const char *Translation,
                            bool Joined = false) const;

  /// ClaimAllArgs - Claim all arguments which match the given
  /// option id.
  void ClaimAllArgs(OptSpecifier Id0) const;

  template <typename... OptSpecifiers>
  void claimAllArgs(OptSpecifiers... Ids) const {
    for (Arg *A : filtered(Ids...))
      A->claim();
  }

  /// ClaimAllArgs - Claim all arguments.
  ///
  void ClaimAllArgs() const;
  /// @}
  /// @name Arg Synthesis
  /// @{

  /// Construct a constant string pointer whose
  /// lifetime will match that of the ArgList.
  virtual const char *MakeArgStringRef(StringRef Str) const = 0;
  const char *MakeArgString(const Twine &Str) const {
    SmallString<256> Buf;
    return MakeArgStringRef(Str.toStringRef(Buf));
  }

  /// Create an arg string for (\p LHS + \p RHS), reusing the
  /// string at \p Index if possible.
  const char *GetOrMakeJoinedArgString(unsigned Index, StringRef LHS,
                                        StringRef RHS) const;

  void print(raw_ostream &O) const;
  void dump() const;

  /// @}
};

class InputArgList final : public ArgList {
private:
  /// List of argument strings used by the contained Args.
  ///
  /// This is mutable since we treat the ArgList as being the list
  /// of Args, and allow routines to add new strings (to have a
  /// convenient place to store the memory) via MakeIndex.
  mutable ArgStringList ArgStrings;

  /// Strings for synthesized arguments.
  ///
  /// This is mutable since we treat the ArgList as being the list
  /// of Args, and allow routines to add new strings (to have a
  /// convenient place to store the memory) via MakeIndex.
  mutable std::list<std::string> SynthesizedStrings;

  /// The number of original input argument strings.
  unsigned NumInputArgStrings;

  /// Release allocated arguments.
  void releaseMemory();

public:
  InputArgList() : NumInputArgStrings(0) {}

  InputArgList(const char* const *ArgBegin, const char* const *ArgEnd);

  InputArgList(InputArgList &&RHS)
      : ArgList(std::move(RHS)), ArgStrings(std::move(RHS.ArgStrings)),
        SynthesizedStrings(std::move(RHS.SynthesizedStrings)),
        NumInputArgStrings(RHS.NumInputArgStrings) {}

  InputArgList &operator=(InputArgList &&RHS) {
    releaseMemory();
    ArgList::operator=(std::move(RHS));
    ArgStrings = std::move(RHS.ArgStrings);
    SynthesizedStrings = std::move(RHS.SynthesizedStrings);
    NumInputArgStrings = RHS.NumInputArgStrings;
    return *this;
  }

  ~InputArgList() { releaseMemory(); }

  const char *getArgString(unsigned Index) const override {
    return ArgStrings[Index];
  }

  void replaceArgString(unsigned Index, const Twine &S) {
    ArgStrings[Index] = MakeArgString(S);
  }

  unsigned getNumInputArgStrings() const override {
    return NumInputArgStrings;
  }

  /// @name Arg Synthesis
  /// @{

public:
  /// MakeIndex - Get an index for the given string(s).
  unsigned MakeIndex(StringRef String0) const;
  unsigned MakeIndex(StringRef String0, StringRef String1) const;

  using ArgList::MakeArgString;
  const char *MakeArgStringRef(StringRef Str) const override;

  /// @}
};

/// DerivedArgList - An ordered collection of driver arguments,
/// whose storage may be in another argument list.
class DerivedArgList final : public ArgList {
  const InputArgList &BaseArgs;

  /// The list of arguments we synthesized.
  mutable SmallVector<std::unique_ptr<Arg>, 16> SynthesizedArgs;

public:
  /// Construct a new derived arg list from \p BaseArgs.
  DerivedArgList(const InputArgList &BaseArgs);

  const char *getArgString(unsigned Index) const override {
    return BaseArgs.getArgString(Index);
  }

  unsigned getNumInputArgStrings() const override {
    return BaseArgs.getNumInputArgStrings();
  }

  const InputArgList &getBaseArgs() const {
    return BaseArgs;
  }

  /// @name Arg Synthesis
  /// @{

  /// AddSynthesizedArg - Add a argument to the list of synthesized arguments
  /// (to be freed).
  void AddSynthesizedArg(Arg *A);

  using ArgList::MakeArgString;
  const char *MakeArgStringRef(StringRef Str) const override;

  /// AddFlagArg - Construct a new FlagArg for the given option \p Id and
  /// append it to the argument list.
  void AddFlagArg(const Arg *BaseArg, const Option Opt) {
    append(MakeFlagArg(BaseArg, Opt));
  }

  /// AddPositionalArg - Construct a new Positional arg for the given option
  /// \p Id, with the provided \p Value and append it to the argument
  /// list.
  void AddPositionalArg(const Arg *BaseArg, const Option Opt,
                        StringRef Value) {
    append(MakePositionalArg(BaseArg, Opt, Value));
  }

  /// AddSeparateArg - Construct a new Positional arg for the given option
  /// \p Id, with the provided \p Value and append it to the argument
  /// list.
  void AddSeparateArg(const Arg *BaseArg, const Option Opt,
                      StringRef Value) {
    append(MakeSeparateArg(BaseArg, Opt, Value));
  }

  /// AddJoinedArg - Construct a new Positional arg for the given option
  /// \p Id, with the provided \p Value and append it to the argument list.
  void AddJoinedArg(const Arg *BaseArg, const Option Opt,
                    StringRef Value) {
    append(MakeJoinedArg(BaseArg, Opt, Value));
  }

  /// MakeFlagArg - Construct a new FlagArg for the given option \p Id.
  Arg *MakeFlagArg(const Arg *BaseArg, const Option Opt) const;

  /// MakePositionalArg - Construct a new Positional arg for the
  /// given option \p Id, with the provided \p Value.
  Arg *MakePositionalArg(const Arg *BaseArg, const Option Opt,
                          StringRef Value) const;

  /// MakeSeparateArg - Construct a new Positional arg for the
  /// given option \p Id, with the provided \p Value.
  Arg *MakeSeparateArg(const Arg *BaseArg, const Option Opt,
                        StringRef Value) const;

  /// MakeJoinedArg - Construct a new Positional arg for the
  /// given option \p Id, with the provided \p Value.
  Arg *MakeJoinedArg(const Arg *BaseArg, const Option Opt,
                      StringRef Value) const;

  /// @}
};

} // end namespace opt

} // end namespace llvm

#endif // LLVM_OPTION_ARGLIST_H
PKjwFZR���,�,Option/OptTable.hnu�[���//===- OptTable.h - Option Table --------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_OPTION_OPTTABLE_H
#define LLVM_OPTION_OPTTABLE_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Option/OptSpecifier.h"
#include "llvm/Support/StringSaver.h"
#include <cassert>
#include <string>
#include <vector>

namespace llvm {

class raw_ostream;
template <typename Fn> class function_ref;

namespace opt {

class Arg;
class ArgList;
class InputArgList;
class Option;

/// Provide access to the Option info table.
///
/// The OptTable class provides a layer of indirection which allows Option
/// instance to be created lazily. In the common case, only a few options will
/// be needed at runtime; the OptTable class maintains enough information to
/// parse command lines without instantiating Options, while letting other
/// parts of the driver still use Option instances where convenient.
class OptTable {
public:
  /// Entry for a single option instance in the option data table.
  struct Info {
    /// A null terminated array of prefix strings to apply to name while
    /// matching.
    ArrayRef<StringLiteral> Prefixes;
    StringRef Name;
    const char *HelpText;
    const char *MetaVar;
    unsigned ID;
    unsigned char Kind;
    unsigned char Param;
    unsigned int Flags;
    unsigned short GroupID;
    unsigned short AliasID;
    const char *AliasArgs;
    const char *Values;
  };

private:
  /// The option information table.
  ArrayRef<Info> OptionInfos;
  bool IgnoreCase;
  bool GroupedShortOptions = false;
  bool DashDashParsing = false;
  const char *EnvVar = nullptr;

  unsigned InputOptionID = 0;
  unsigned UnknownOptionID = 0;

protected:
  /// The index of the first option which can be parsed (i.e., is not a
  /// special option like 'input' or 'unknown', and is not an option group).
  unsigned FirstSearchableIndex = 0;

  /// The union of the first element of all option prefixes.
  SmallString<8> PrefixChars;

  /// The union of all option prefixes. If an argument does not begin with
  /// one of these, it is an input.
  virtual ArrayRef<StringLiteral> getPrefixesUnion() const = 0;

private:
  const Info &getInfo(OptSpecifier Opt) const {
    unsigned id = Opt.getID();
    assert(id > 0 && id - 1 < getNumOptions() && "Invalid Option ID.");
    return OptionInfos[id - 1];
  }

  std::unique_ptr<Arg> parseOneArgGrouped(InputArgList &Args,
                                          unsigned &Index) const;

protected:
  /// Initialize OptTable using Tablegen'ed OptionInfos. Child class must
  /// manually call \c buildPrefixChars once they are fully constructed.
  OptTable(ArrayRef<Info> OptionInfos, bool IgnoreCase = false);

  /// Build (or rebuild) the PrefixChars member.
  void buildPrefixChars();

public:
  virtual ~OptTable();

  /// Return the total number of option classes.
  unsigned getNumOptions() const { return OptionInfos.size(); }

  /// Get the given Opt's Option instance, lazily creating it
  /// if necessary.
  ///
  /// \return The option, or null for the INVALID option id.
  const Option getOption(OptSpecifier Opt) const;

  /// Lookup the name of the given option.
  StringRef getOptionName(OptSpecifier id) const { return getInfo(id).Name; }

  /// Get the kind of the given option.
  unsigned getOptionKind(OptSpecifier id) const {
    return getInfo(id).Kind;
  }

  /// Get the group id for the given option.
  unsigned getOptionGroupID(OptSpecifier id) const {
    return getInfo(id).GroupID;
  }

  /// Get the help text to use to describe this option.
  const char *getOptionHelpText(OptSpecifier id) const {
    return getInfo(id).HelpText;
  }

  /// Get the meta-variable name to use when describing
  /// this options values in the help text.
  const char *getOptionMetaVar(OptSpecifier id) const {
    return getInfo(id).MetaVar;
  }

  /// Specify the environment variable where initial options should be read.
  void setInitialOptionsFromEnvironment(const char *E) { EnvVar = E; }

  /// Support grouped short options. e.g. -ab represents -a -b.
  void setGroupedShortOptions(bool Value) { GroupedShortOptions = Value; }

  /// Set whether "--" stops option parsing and treats all subsequent arguments
  /// as positional. E.g. -- -a -b gives two positional inputs.
  void setDashDashParsing(bool Value) { DashDashParsing = Value; }

  /// Find possible value for given flags. This is used for shell
  /// autocompletion.
  ///
  /// \param [in] Option - Key flag like "-stdlib=" when "-stdlib=l"
  /// was passed to clang.
  ///
  /// \param [in] Arg - Value which we want to autocomplete like "l"
  /// when "-stdlib=l" was passed to clang.
  ///
  /// \return The vector of possible values.
  std::vector<std::string> suggestValueCompletions(StringRef Option,
                                                   StringRef Arg) const;

  /// Find flags from OptTable which starts with Cur.
  ///
  /// \param [in] Cur - String prefix that all returned flags need
  //  to start with.
  ///
  /// \return The vector of flags which start with Cur.
  std::vector<std::string> findByPrefix(StringRef Cur,
                                        unsigned int DisableFlags) const;

  /// Find the OptTable option that most closely matches the given string.
  ///
  /// \param [in] Option - A string, such as "-stdlibs=l", that represents user
  /// input of an option that may not exist in the OptTable. Note that the
  /// string includes prefix dashes "-" as well as values "=l".
  /// \param [out] NearestString - The nearest option string found in the
  /// OptTable.
  /// \param [in] FlagsToInclude - Only find options with any of these flags.
  /// Zero is the default, which includes all flags.
  /// \param [in] FlagsToExclude - Don't find options with this flag. Zero
  /// is the default, and means exclude nothing.
  /// \param [in] MinimumLength - Don't find options shorter than this length.
  /// For example, a minimum length of 3 prevents "-x" from being considered
  /// near to "-S".
  /// \param [in] MaximumDistance - Don't find options whose distance is greater
  /// than this value.
  ///
  /// \return The edit distance of the nearest string found.
  unsigned findNearest(StringRef Option, std::string &NearestString,
                       unsigned FlagsToInclude = 0, unsigned FlagsToExclude = 0,
                       unsigned MinimumLength = 4,
                       unsigned MaximumDistance = UINT_MAX) const;

  bool findExact(StringRef Option, std::string &ExactString,
                 unsigned FlagsToInclude = 0,
                 unsigned FlagsToExclude = 0) const {
    return findNearest(Option, ExactString, FlagsToInclude, FlagsToExclude, 4,
                       0) == 0;
  }

  /// Parse a single argument; returning the new argument and
  /// updating Index.
  ///
  /// \param [in,out] Index - The current parsing position in the argument
  /// string list; on return this will be the index of the next argument
  /// string to parse.
  /// \param [in] FlagsToInclude - Only parse options with any of these flags.
  /// Zero is the default which includes all flags.
  /// \param [in] FlagsToExclude - Don't parse options with this flag.  Zero
  /// is the default and means exclude nothing.
  ///
  /// \return The parsed argument, or 0 if the argument is missing values
  /// (in which case Index still points at the conceptual next argument string
  /// to parse).
  std::unique_ptr<Arg> ParseOneArg(const ArgList &Args, unsigned &Index,
                                   unsigned FlagsToInclude = 0,
                                   unsigned FlagsToExclude = 0) const;

  /// Parse an list of arguments into an InputArgList.
  ///
  /// The resulting InputArgList will reference the strings in [\p ArgBegin,
  /// \p ArgEnd), and their lifetime should extend past that of the returned
  /// InputArgList.
  ///
  /// The only error that can occur in this routine is if an argument is
  /// missing values; in this case \p MissingArgCount will be non-zero.
  ///
  /// \param MissingArgIndex - On error, the index of the option which could
  /// not be parsed.
  /// \param MissingArgCount - On error, the number of missing options.
  /// \param FlagsToInclude - Only parse options with any of these flags.
  /// Zero is the default which includes all flags.
  /// \param FlagsToExclude - Don't parse options with this flag.  Zero
  /// is the default and means exclude nothing.
  /// \return An InputArgList; on error this will contain all the options
  /// which could be parsed.
  InputArgList ParseArgs(ArrayRef<const char *> Args, unsigned &MissingArgIndex,
                         unsigned &MissingArgCount, unsigned FlagsToInclude = 0,
                         unsigned FlagsToExclude = 0) const;

  /// A convenience helper which handles optional initial options populated from
  /// an environment variable, expands response files recursively and parses
  /// options.
  ///
  /// \param ErrorFn - Called on a formatted error message for missing arguments
  /// or unknown options.
  /// \return An InputArgList; on error this will contain all the options which
  /// could be parsed.
  InputArgList parseArgs(int Argc, char *const *Argv, OptSpecifier Unknown,
                         StringSaver &Saver,
                         function_ref<void(StringRef)> ErrorFn) const;

  /// Render the help text for an option table.
  ///
  /// \param OS - The stream to write the help text to.
  /// \param Usage - USAGE: Usage
  /// \param Title - OVERVIEW: Title
  /// \param FlagsToInclude - If non-zero, only include options with any
  ///                         of these flags set.
  /// \param FlagsToExclude - Exclude options with any of these flags set.
  /// \param ShowAllAliases - If true, display all options including aliases
  ///                         that don't have help texts. By default, we display
  ///                         only options that are not hidden and have help
  ///                         texts.
  void printHelp(raw_ostream &OS, const char *Usage, const char *Title,
                 unsigned FlagsToInclude, unsigned FlagsToExclude,
                 bool ShowAllAliases) const;

  void printHelp(raw_ostream &OS, const char *Usage, const char *Title,
                 bool ShowHidden = false, bool ShowAllAliases = false) const;
};

/// Specialization of OptTable
class GenericOptTable : public OptTable {
  SmallVector<StringLiteral> PrefixesUnionBuffer;

protected:
  GenericOptTable(ArrayRef<Info> OptionInfos, bool IgnoreCase = false);
  ArrayRef<StringLiteral> getPrefixesUnion() const final {
    return PrefixesUnionBuffer;
  }
};

class PrecomputedOptTable : public OptTable {
  ArrayRef<StringLiteral> PrefixesUnion;

protected:
  PrecomputedOptTable(ArrayRef<Info> OptionInfos,
                      ArrayRef<StringLiteral> PrefixesTable,
                      bool IgnoreCase = false)
      : OptTable(OptionInfos, IgnoreCase), PrefixesUnion(PrefixesTable) {
    buildPrefixChars();
  }
  ArrayRef<StringLiteral> getPrefixesUnion() const final {
    return PrefixesUnion;
  }
};

} // end namespace opt

} // end namespace llvm

#endif // LLVM_OPTION_OPTTABLE_H
PKjwFZ*bmLLOption/OptSpecifier.hnu�[���//===- OptSpecifier.h - Option Specifiers -----------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_OPTION_OPTSPECIFIER_H
#define LLVM_OPTION_OPTSPECIFIER_H

namespace llvm {
namespace opt {

class Option;

/// OptSpecifier - Wrapper class for abstracting references to option IDs.
class OptSpecifier {
  unsigned ID = 0;

public:
  OptSpecifier() = default;
  explicit OptSpecifier(bool) = delete;
  /*implicit*/ OptSpecifier(unsigned ID) : ID(ID) {}
  /*implicit*/ OptSpecifier(const Option *Opt);

  bool isValid() const { return ID != 0; }

  unsigned getID() const { return ID; }

  bool operator==(OptSpecifier Opt) const { return ID == Opt.getID(); }
  bool operator!=(OptSpecifier Opt) const { return !(*this == Opt); }
};

} // end namespace opt
} // end namespace llvm

#endif // LLVM_OPTION_OPTSPECIFIER_H
PKjwFZN�r%g&g&Option/OptParser.tdnu�[���//===--- OptParser.td - Common Option Parsing Interfaces ------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
//  This file defines the common interfaces used by the option parsing TableGen
//  backend.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_OPTION_OPTPARSER_TD
#define LLVM_OPTION_OPTPARSER_TD

// Define the kinds of options.

class OptionKind<string name, int precedence = 0, bit sentinel = false> {
  string Name = name;
  // The kind precedence, kinds with lower precedence are matched first.
  int Precedence = precedence;
  // Indicate a sentinel option.
  bit Sentinel = sentinel;
}

// An option group.
def KIND_GROUP : OptionKind<"Group">;
// The input option kind.
def KIND_INPUT : OptionKind<"Input", 1, true>;
// The unknown option kind.
def KIND_UNKNOWN : OptionKind<"Unknown", 2, true>;
// A flag with no values.
def KIND_FLAG : OptionKind<"Flag">;
// An option which prefixes its (single) value.
def KIND_JOINED : OptionKind<"Joined", 1>;
// An option which is followed by its value.
def KIND_SEPARATE : OptionKind<"Separate">;
// An option followed by its values, which are separated by commas.
def KIND_COMMAJOINED : OptionKind<"CommaJoined">;
// An option which is which takes multiple (separate) arguments.
def KIND_MULTIARG : OptionKind<"MultiArg">;
// An option which is either joined to its (non-empty) value, or followed by its
// value.
def KIND_JOINED_OR_SEPARATE : OptionKind<"JoinedOrSeparate">;
// An option which is both joined to its (first) value, and followed by its
// (second) value.
def KIND_JOINED_AND_SEPARATE : OptionKind<"JoinedAndSeparate">;
// An option which consumes all remaining arguments if there are any.
def KIND_REMAINING_ARGS : OptionKind<"RemainingArgs">;
// An option which consumes an optional joined argument and any other remaining
// arguments.
def KIND_REMAINING_ARGS_JOINED : OptionKind<"RemainingArgsJoined">;

// Define the option flags.

class OptionFlag {}

// HelpHidden - The option should not be displayed in --help, even if it has
// help text. Clients *can* use this in conjunction with the OptTable::PrintHelp
// arguments to implement hidden help groups.
def HelpHidden : OptionFlag;

// RenderAsInput - The option should not render the name when rendered as an
// input (i.e., the option is rendered as values).
def RenderAsInput : OptionFlag;

// RenderJoined - The option should be rendered joined, even if separate (only
// sensible on single value separate options).
def RenderJoined : OptionFlag;

// RenderSeparate - The option should be rendered separately, even if joined
// (only sensible on joined options).
def RenderSeparate : OptionFlag;

// Define the option group class.

class OptionGroup<string name> {
  string EnumName = ?; // Uses the def name if undefined.
  string Name = name;
  string HelpText = ?;
  OptionGroup Group = ?;
  list<OptionFlag> Flags = [];
}

// Define the option class.

class Option<list<string> prefixes, string name, OptionKind kind> {
  string EnumName = ?; // Uses the def name if undefined.
  list<string> Prefixes = prefixes;
  string Name = name;
  OptionKind Kind = kind;
  // Used by MultiArg option kind.
  int NumArgs = 0;
  string HelpText = ?;
  string MetaVarName = ?;
  string Values = ?;
  code ValuesCode = ?;
  list<OptionFlag> Flags = [];
  OptionGroup Group = ?;
  Option Alias = ?;
  list<string> AliasArgs = [];
  code MacroPrefix = "";
  code KeyPath = ?;
  code DefaultValue = ?;
  code ImpliedValue = ?;
  code ImpliedCheck = "false";
  code ShouldParse = "true";
  bit ShouldAlwaysEmit = false;
  code NormalizerRetTy = ?;
  code NormalizedValuesScope = "";
  code Normalizer = "";
  code Denormalizer = "";
  code ValueMerger = "mergeForwardValue";
  code ValueExtractor = "extractForwardValue";
  list<code> NormalizedValues = ?;
}

// Helpers for defining options.

class Flag<list<string> prefixes, string name>
  : Option<prefixes, name, KIND_FLAG>;
class Joined<list<string> prefixes, string name>
  : Option<prefixes, name, KIND_JOINED>;
class Separate<list<string> prefixes, string name>
  : Option<prefixes, name, KIND_SEPARATE>;
class CommaJoined<list<string> prefixes, string name>
  : Option<prefixes, name, KIND_COMMAJOINED>;
class MultiArg<list<string> prefixes, string name, int numargs>
  : Option<prefixes, name, KIND_MULTIARG> {
  int NumArgs = numargs;
}
class JoinedOrSeparate<list<string> prefixes, string name>
  : Option<prefixes, name, KIND_JOINED_OR_SEPARATE>;
class JoinedAndSeparate<list<string> prefixes, string name>
  : Option<prefixes, name, KIND_JOINED_AND_SEPARATE>;

// Mix-ins for adding optional attributes.

class Alias<Option alias> { Option Alias = alias; }
class AliasArgs<list<string> aliasargs> { list<string> AliasArgs = aliasargs; }
class EnumName<string name> { string EnumName = name; }
class Flags<list<OptionFlag> flags> { list<OptionFlag> Flags = flags; }
class Group<OptionGroup group> { OptionGroup Group = group; }
class HelpText<string text> { string HelpText = text; }
class MetaVarName<string name> { string MetaVarName = name; }
class Values<string value> { string Values = value; }
class ValuesCode<code valuecode> { code ValuesCode = valuecode; }

// Helpers for defining marshalling information (typically used in Clang's -cc1
// frontend).

// The key path to the mapped field and the macro prefix for the resulting
// definition database.
class KeyPathAndMacro<string key_path_prefix, string key_path_base,
                      string macro_prefix = ""> {
  code KeyPath = !strconcat(key_path_prefix, key_path_base);
  code MacroPrefix = macro_prefix;
}

// Mixin that implies the specified value for the current option when any of the
// given key paths evaluates to true.
class ImpliedByAnyOf<list<string> key_paths, code value = "true"> {
  code ImpliedCheck = !foldl("false", key_paths, accumulator, key_path,
                             !strconcat(accumulator, " || ", key_path));
  code ImpliedValue = value;
}

// Parent class for marshalled options (typically used in Clang's -cc1 frontend).
class MarshallingInfo<KeyPathAndMacro kpm, code defaultvalue> {
  code KeyPath = kpm.KeyPath;
  code MacroPrefix = kpm.MacroPrefix;
  code DefaultValue = defaultvalue;
}

// Marshalled option accepting a string argument.
class MarshallingInfoString<KeyPathAndMacro kpm, code defaultvalue="std::string()">
  : MarshallingInfo<kpm, defaultvalue> {
  code Normalizer = "normalizeString";
  code Denormalizer = "denormalizeString";
}

// Marshalled option accepting an integer argument.
class MarshallingInfoInt<KeyPathAndMacro kpm, code defaultvalue="0", code type="unsigned">
  : MarshallingInfo<kpm, defaultvalue> {
  code Normalizer = "normalizeStringIntegral<"#type#">";
  code Denormalizer = "denormalizeString<"#type#">";
}

// Marshalled option accepting vector of strings.
class MarshallingInfoStringVector<KeyPathAndMacro kpm>
  : MarshallingInfo<kpm, "std::vector<std::string>({})"> {
  code Normalizer = "normalizeStringVector";
  code Denormalizer = "denormalizeStringVector";
}

// Marshalled option - single positive flag.
class MarshallingInfoFlag<KeyPathAndMacro kpm, code defaultvalue = "false">
  : MarshallingInfo<kpm, defaultvalue> {
  code Normalizer = "normalizeSimpleFlag";
  code Denormalizer = "denormalizeSimpleFlag";
}

// Marshalled option - single negative flag.
class MarshallingInfoNegativeFlag<KeyPathAndMacro kpm, code defaultvalue = "true">
  : MarshallingInfo<kpm, defaultvalue> {
  code Normalizer = "normalizeSimpleNegativeFlag";
  code Denormalizer = "denormalizeSimpleFlag";
}

// Marshalled option - single flag contributing to a bitfield.
class MarshallingInfoBitfieldFlag<KeyPathAndMacro kpm, code value>
  : MarshallingInfoFlag<kpm, "0u"> {
  code Normalizer = "makeFlagToValueNormalizer("#value#")";
  code ValueMerger = "mergeMaskValue";
  code ValueExtractor = "(extractMaskValue<unsigned, decltype("#value#"), "#value#">)";
}

// Implementation detail of BoolOption.
class MarshallingInfoBooleanFlag<KeyPathAndMacro kpm, code defaultvalue, code value,
                                 code other_value, code other_name>
  : MarshallingInfoFlag<kpm, defaultvalue> {
  code Normalizer = "makeBooleanOptionNormalizer("#value#", "#other_value#", OPT_"#other_name#")";
  code Denormalizer = "makeBooleanOptionDenormalizer("#value#")";
}

// Marshalled option accepting any of the specified enum values.
// Typically used with `Values`, `NormalizedValues` and `NormalizedValuesScope`.
class MarshallingInfoEnum<KeyPathAndMacro kpm, code defaultvalue>
  : MarshallingInfo<kpm, defaultvalue> {
  code Normalizer = "normalizeSimpleEnum";
  code Denormalizer = "denormalizeSimpleEnum";
}

// Mixins for additional marshalling attributes.

class ShouldParseIf<code condition> { code ShouldParse = condition; }
class AlwaysEmit { bit ShouldAlwaysEmit = true; }
class Normalizer<code normalizer> { code Normalizer = normalizer; }
class Denormalizer<code denormalizer> { code Denormalizer = denormalizer; }
class NormalizedValuesScope<code scope> { code NormalizedValuesScope = scope; }
class NormalizedValues<list<code> definitions> { list<code> NormalizedValues = definitions; } 
class ValueMerger<code merger> { code ValueMerger = merger; }
class ValueExtractor<code extractor> { code ValueExtractor = extractor; }

// Predefined options.

// FIXME: Have generator validate that these appear in correct position (and
// aren't duplicated).
def INPUT : Option<[], "<input>", KIND_INPUT>;
def UNKNOWN : Option<[], "<unknown>", KIND_UNKNOWN>;

#endif // LLVM_OPTION_OPTPARSER_TD
PKjwFZ&ëL
0
0!LTO/legacy/ThinLTOCodeGenerator.hnu�[���//===-ThinLTOCodeGenerator.h - LLVM Link Time Optimizer -------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the ThinLTOCodeGenerator class, similar to the
// LTOCodeGenerator but for the ThinLTO scheme. It provides an interface for
// linker plugin.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_LTO_LEGACY_THINLTOCODEGENERATOR_H
#define LLVM_LTO_LEGACY_THINLTOCODEGENERATOR_H

#include "llvm-c/lto.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/IR/ModuleSummaryIndex.h"
#include "llvm/LTO/LTO.h"
#include "llvm/Support/CachePruning.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/TargetParser/Triple.h"

#include <string>

namespace llvm {
class StringRef;
class TargetMachine;

/// Helper to gather options relevant to the target machine creation
struct TargetMachineBuilder {
  Triple TheTriple;
  std::string MCpu;
  std::string MAttr;
  TargetOptions Options;
  std::optional<Reloc::Model> RelocModel;
  CodeGenOpt::Level CGOptLevel = CodeGenOpt::Aggressive;

  std::unique_ptr<TargetMachine> create() const;
};

/// This class define an interface similar to the LTOCodeGenerator, but adapted
/// for ThinLTO processing.
/// The ThinLTOCodeGenerator is not intended to be reuse for multiple
/// compilation: the model is that the client adds modules to the generator and
/// ask to perform the ThinLTO optimizations / codegen, and finally destroys the
/// codegenerator.
class ThinLTOCodeGenerator {
public:
  /// Add given module to the code generator.
  void addModule(StringRef Identifier, StringRef Data);

  /**
   * Adds to a list of all global symbols that must exist in the final generated
   * code. If a symbol is not listed there, it will be optimized away if it is
   * inlined into every usage.
   */
  void preserveSymbol(StringRef Name);

  /**
   * Adds to a list of all global symbols that are cross-referenced between
   * ThinLTO files. If the ThinLTO CodeGenerator can ensure that every
   * references from a ThinLTO module to this symbol is optimized away, then
   * the symbol can be discarded.
   */
  void crossReferenceSymbol(StringRef Name);

  /**
   * Process all the modules that were added to the code generator in parallel.
   *
   * Client can access the resulting object files using getProducedBinaries(),
   * unless setGeneratedObjectsDirectory() has been called, in which case
   * results are available through getProducedBinaryFiles().
   */
  void run();

  /**
   * Return the "in memory" binaries produced by the code generator. This is
   * filled after run() unless setGeneratedObjectsDirectory() has been
   * called, in which case results are available through
   * getProducedBinaryFiles().
   */
  std::vector<std::unique_ptr<MemoryBuffer>> &getProducedBinaries() {
    return ProducedBinaries;
  }

  /**
   * Return the "on-disk" binaries produced by the code generator. This is
   * filled after run() when setGeneratedObjectsDirectory() has been
   * called, in which case results are available through getProducedBinaries().
   */
  std::vector<std::string> &getProducedBinaryFiles() {
    return ProducedBinaryFiles;
  }

  /**
   * \defgroup Options setters
   * @{
   */

  /**
   * \defgroup Cache controlling options
   *
   * These entry points control the ThinLTO cache. The cache is intended to
   * support incremental build, and thus needs to be persistent accross build.
   * The client enabled the cache by supplying a path to an existing directory.
   * The code generator will use this to store objects files that may be reused
   * during a subsequent build.
   * To avoid filling the disk space, a few knobs are provided:
   *  - The pruning interval limit the frequency at which the garbage collector
   *    will try to scan the cache directory to prune it from expired entries.
   *    Setting to -1 disable the pruning (default). Setting to 0 will force
   *    pruning to occur.
   *  - The pruning expiration time indicates to the garbage collector how old
   *    an entry needs to be to be removed.
   *  - Finally, the garbage collector can be instructed to prune the cache till
   *    the occupied space goes below a threshold.
   * @{
   */

  struct CachingOptions {
    std::string Path;                    // Path to the cache, empty to disable.
    CachePruningPolicy Policy;
  };

  /// Provide a path to a directory where to store the cached files for
  /// incremental build.
  void setCacheDir(std::string Path) { CacheOptions.Path = std::move(Path); }

  /// Cache policy: interval (seconds) between two prunes of the cache. Set to a
  /// negative value to disable pruning. A value of 0 will force pruning to
  /// occur.
  void setCachePruningInterval(int Interval) {
    if(Interval < 0)
      CacheOptions.Policy.Interval.reset();
    else
      CacheOptions.Policy.Interval = std::chrono::seconds(Interval);
  }

  /// Cache policy: expiration (in seconds) for an entry.
  /// A value of 0 will be ignored.
  void setCacheEntryExpiration(unsigned Expiration) {
    if (Expiration)
      CacheOptions.Policy.Expiration = std::chrono::seconds(Expiration);
  }

  /**
   * Sets the maximum cache size that can be persistent across build, in terms
   * of percentage of the available space on the disk. Set to 100 to indicate
   * no limit, 50 to indicate that the cache size will not be left over
   * half the available space. A value over 100 will be reduced to 100, and a
   * value of 0 will be ignored.
   *
   *
   * The formula looks like:
   *  AvailableSpace = FreeSpace + ExistingCacheSize
   *  NewCacheSize = AvailableSpace * P/100
   *
   */
  void setMaxCacheSizeRelativeToAvailableSpace(unsigned Percentage) {
    if (Percentage)
      CacheOptions.Policy.MaxSizePercentageOfAvailableSpace = Percentage;
  }

  /// Cache policy: the maximum size for the cache directory in bytes. A value
  /// over the amount of available space on the disk will be reduced to the
  /// amount of available space. A value of 0 will be ignored.
  void setCacheMaxSizeBytes(uint64_t MaxSizeBytes) {
    if (MaxSizeBytes)
      CacheOptions.Policy.MaxSizeBytes = MaxSizeBytes;
  }

  /// Cache policy: the maximum number of files in the cache directory. A value
  /// of 0 will be ignored.
  void setCacheMaxSizeFiles(unsigned MaxSizeFiles) {
    if (MaxSizeFiles)
      CacheOptions.Policy.MaxSizeFiles = MaxSizeFiles;
  }

  /**@}*/

  /// Set the path to a directory where to save temporaries at various stages of
  /// the processing.
  void setSaveTempsDir(std::string Path) { SaveTempsDir = std::move(Path); }

  /// Set the path to a directory where to save generated object files. This
  /// path can be used by a linker to request on-disk files instead of in-memory
  /// buffers. When set, results are available through getProducedBinaryFiles()
  /// instead of getProducedBinaries().
  void setGeneratedObjectsDirectory(std::string Path) {
    SavedObjectsDirectoryPath = std::move(Path);
  }

  /// CPU to use to initialize the TargetMachine
  void setCpu(std::string Cpu) { TMBuilder.MCpu = std::move(Cpu); }

  /// Subtarget attributes
  void setAttr(std::string MAttr) { TMBuilder.MAttr = std::move(MAttr); }

  /// TargetMachine options
  void setTargetOptions(TargetOptions Options) {
    TMBuilder.Options = std::move(Options);
  }

  /// Enable the Freestanding mode: indicate that the optimizer should not
  /// assume builtins are present on the target.
  void setFreestanding(bool Enabled) { Freestanding = Enabled; }

  /// CodeModel
  void setCodePICModel(std::optional<Reloc::Model> Model) {
    TMBuilder.RelocModel = Model;
  }

  /// CodeGen optimization level
  void setCodeGenOptLevel(CodeGenOpt::Level CGOptLevel) {
    TMBuilder.CGOptLevel = CGOptLevel;
  }

  /// IR optimization level: from 0 to 3.
  void setOptLevel(unsigned NewOptLevel) {
    OptLevel = (NewOptLevel > 3) ? 3 : NewOptLevel;
  }

  /// Enable or disable debug output for the new pass manager.
  void setDebugPassManager(unsigned Enabled) { DebugPassManager = Enabled; }

  /// Disable CodeGen, only run the stages till codegen and stop. The output
  /// will be bitcode.
  void disableCodeGen(bool Disable) { DisableCodeGen = Disable; }

  /// Perform CodeGen only: disable all other stages.
  void setCodeGenOnly(bool CGOnly) { CodeGenOnly = CGOnly; }

  /**@}*/

  /**
   * \defgroup Set of APIs to run individual stages in isolation.
   * @{
   */

  /**
   * Produce the combined summary index from all the bitcode files:
   * "thin-link".
   */
  std::unique_ptr<ModuleSummaryIndex> linkCombinedIndex();

  /**
   * Perform promotion and renaming of exported internal functions,
   * and additionally resolve weak and linkonce symbols.
   * Index is updated to reflect linkage changes from weak resolution.
   */
  void promote(Module &Module, ModuleSummaryIndex &Index,
               const lto::InputFile &File);

  /**
   * Compute and emit the imported files for module at \p ModulePath.
   */
  void emitImports(Module &Module, StringRef OutputName,
                   ModuleSummaryIndex &Index,
                   const lto::InputFile &File);

  /**
   * Perform cross-module importing for the module identified by
   * ModuleIdentifier.
   */
  void crossModuleImport(Module &Module, ModuleSummaryIndex &Index,
                         const lto::InputFile &File);

  /**
   * Compute the list of summaries needed for importing into module.
   */
  void gatherImportedSummariesForModule(
      Module &Module, ModuleSummaryIndex &Index,
      std::map<std::string, GVSummaryMapTy> &ModuleToSummariesForIndex,
      const lto::InputFile &File);

  /**
   * Perform internalization. Index is updated to reflect linkage changes.
   */
  void internalize(Module &Module, ModuleSummaryIndex &Index,
                   const lto::InputFile &File);

  /**
   * Perform post-importing ThinLTO optimizations.
   */
  void optimize(Module &Module);

  /**
   * Write temporary object file to SavedObjectDirectoryPath, write symlink
   * to Cache directory if needed. Returns the path to the generated file in
   * SavedObjectsDirectoryPath.
   */
  std::string writeGeneratedObject(int count, StringRef CacheEntryPath,
                                   const MemoryBuffer &OutputBuffer);
  /**@}*/

private:
  /// Helper factory to build a TargetMachine
  TargetMachineBuilder TMBuilder;

  /// Vector holding the in-memory buffer containing the produced binaries, when
  /// SavedObjectsDirectoryPath isn't set.
  std::vector<std::unique_ptr<MemoryBuffer>> ProducedBinaries;

  /// Path to generated files in the supplied SavedObjectsDirectoryPath if any.
  std::vector<std::string> ProducedBinaryFiles;

  /// Vector holding the input buffers containing the bitcode modules to
  /// process.
  std::vector<std::unique_ptr<lto::InputFile>> Modules;

  /// Set of symbols that need to be preserved outside of the set of bitcode
  /// files.
  StringSet<> PreservedSymbols;

  /// Set of symbols that are cross-referenced between bitcode files.
  StringSet<> CrossReferencedSymbols;

  /// Control the caching behavior.
  CachingOptions CacheOptions;

  /// Path to a directory to save the temporary bitcode files.
  std::string SaveTempsDir;

  /// Path to a directory to save the generated object files.
  std::string SavedObjectsDirectoryPath;

  /// Flag to enable/disable CodeGen. When set to true, the process stops after
  /// optimizations and a bitcode is produced.
  bool DisableCodeGen = false;

  /// Flag to indicate that only the CodeGen will be performed, no cross-module
  /// importing or optimization.
  bool CodeGenOnly = false;

  /// Flag to indicate that the optimizer should not assume builtins are present
  /// on the target.
  bool Freestanding = false;

  /// IR Optimization Level [0-3].
  unsigned OptLevel = 3;

  /// Flag to indicate whether debug output should be enabled for the new pass
  /// manager.
  bool DebugPassManager = false;
};
}
#endif
PKjwFZ�p`��LTO/legacy/UpdateCompilerUsed.hnu�[���//==------ UpdateCompilerUsed.h - LLVM Link Time Optimizer Utility --------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares a helper class to update llvm.compiler_used metadata.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_LTO_LEGACY_UPDATECOMPILERUSED_H
#define LLVM_LTO_LEGACY_UPDATECOMPILERUSED_H

#include "llvm/ADT/StringSet.h"
#include "llvm/IR/GlobalValue.h"

namespace llvm {
class Module;
class TargetMachine;

/// Find all globals in \p TheModule that are referenced in
/// \p AsmUndefinedRefs, as well as the user-supplied functions definitions that
/// are also libcalls, and create or update the magic "llvm.compiler_used"
/// global in \p TheModule.
void updateCompilerUsed(Module &TheModule, const TargetMachine &TM,
                        const StringSet<> &AsmUndefinedRefs);
}

#endif // LLVM_LTO_LEGACY_UPDATECOMPILERUSED_H
PKjwFZ[����LTO/legacy/LTOModule.hnu�[���//===-LTOModule.h - LLVM Link Time Optimizer ------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the LTOModule class.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_LTO_LEGACY_LTOMODULE_H
#define LLVM_LTO_LEGACY_LTOMODULE_H

#include "llvm-c/lto.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/IR/Module.h"
#include "llvm/LTO/LTO.h"
#include "llvm/Object/IRObjectFile.h"
#include "llvm/Object/ModuleSymbolTable.h"
#include "llvm/Target/TargetMachine.h"
#include <string>
#include <vector>

// Forward references to llvm classes.
namespace llvm {
  class Function;
  class GlobalValue;
  class MemoryBuffer;
  class TargetOptions;
  class Value;

//===----------------------------------------------------------------------===//
/// C++ class which implements the opaque lto_module_t type.
///
struct LTOModule {
private:
  struct NameAndAttributes {
    StringRef name;
    uint32_t           attributes = 0;
    bool               isFunction = false;
    const GlobalValue *symbol = nullptr;
  };

  std::unique_ptr<LLVMContext> OwnedContext;

  std::string LinkerOpts;

  std::unique_ptr<Module> Mod;
  MemoryBufferRef MBRef;
  ModuleSymbolTable SymTab;
  std::unique_ptr<TargetMachine> _target;
  std::vector<NameAndAttributes> _symbols;

  // _defines and _undefines only needed to disambiguate tentative definitions
  StringSet<>                             _defines;
  StringMap<NameAndAttributes> _undefines;
  std::vector<StringRef> _asm_undefines;

  LTOModule(std::unique_ptr<Module> M, MemoryBufferRef MBRef,
            TargetMachine *TM);

public:
  ~LTOModule();

  /// Returns 'true' if the file or memory contents is LLVM bitcode.
  static bool isBitcodeFile(const void *mem, size_t length);
  static bool isBitcodeFile(StringRef path);

  /// Returns 'true' if the Module is produced for ThinLTO.
  bool isThinLTO();

  /// Returns 'true' if the memory buffer is LLVM bitcode for the specified
  /// triple.
  static bool isBitcodeForTarget(MemoryBuffer *memBuffer,
                                 StringRef triplePrefix);

  /// Returns a string representing the producer identification stored in the
  /// bitcode, or "" if the bitcode does not contains any.
  ///
  static std::string getProducerString(MemoryBuffer *Buffer);

  /// Create a MemoryBuffer from a memory range with an optional name.
  static std::unique_ptr<MemoryBuffer>
  makeBuffer(const void *mem, size_t length, StringRef name = "");

  /// Create an LTOModule. N.B. These methods take ownership of the buffer. The
  /// caller must have initialized the Targets, the TargetMCs, the AsmPrinters,
  /// and the AsmParsers by calling:
  ///
  /// InitializeAllTargets();
  /// InitializeAllTargetMCs();
  /// InitializeAllAsmPrinters();
  /// InitializeAllAsmParsers();
  static ErrorOr<std::unique_ptr<LTOModule>>
  createFromFile(LLVMContext &Context, StringRef path,
                 const TargetOptions &options);
  static ErrorOr<std::unique_ptr<LTOModule>>
  createFromOpenFile(LLVMContext &Context, int fd, StringRef path, size_t size,
                     const TargetOptions &options);
  static ErrorOr<std::unique_ptr<LTOModule>>
  createFromOpenFileSlice(LLVMContext &Context, int fd, StringRef path,
                          size_t map_size, off_t offset,
                          const TargetOptions &options);
  static ErrorOr<std::unique_ptr<LTOModule>>
  createFromBuffer(LLVMContext &Context, const void *mem, size_t length,
                   const TargetOptions &options, StringRef path = "");
  static ErrorOr<std::unique_ptr<LTOModule>>
  createInLocalContext(std::unique_ptr<LLVMContext> Context, const void *mem,
                       size_t length, const TargetOptions &options,
                       StringRef path);

  const Module &getModule() const { return *Mod; }
  Module &getModule() { return *Mod; }

  std::unique_ptr<Module> takeModule() { return std::move(Mod); }

  /// Return the Module's target triple.
  const std::string &getTargetTriple() {
    return getModule().getTargetTriple();
  }

  /// Set the Module's target triple.
  void setTargetTriple(StringRef Triple) {
    getModule().setTargetTriple(Triple);
  }

  /// Get the number of symbols
  uint32_t getSymbolCount() {
    return _symbols.size();
  }

  /// Get the attributes for a symbol at the specified index.
  lto_symbol_attributes getSymbolAttributes(uint32_t index) {
    if (index < _symbols.size())
      return lto_symbol_attributes(_symbols[index].attributes);
    return lto_symbol_attributes(0);
  }

  /// Get the name of the symbol at the specified index.
  StringRef getSymbolName(uint32_t index) {
    if (index < _symbols.size())
      return _symbols[index].name;
    return StringRef();
  }

  const GlobalValue *getSymbolGV(uint32_t index) {
    if (index < _symbols.size())
      return _symbols[index].symbol;
    return nullptr;
  }

  StringRef getLinkerOpts() { return LinkerOpts; }

  const std::vector<StringRef> &getAsmUndefinedRefs() { return _asm_undefines; }

  static lto::InputFile *createInputFile(const void *buffer, size_t buffer_size,
                                         const char *path, std::string &out_error);

  static size_t getDependentLibraryCount(lto::InputFile *input);

  static const char *getDependentLibrary(lto::InputFile *input, size_t index, size_t *size);

  Expected<uint32_t> getMachOCPUType() const;

  Expected<uint32_t> getMachOCPUSubType() const;

  /// Returns true if the module has either the @llvm.global_ctors or the
  /// @llvm.global_dtors symbol. Otherwise returns false.
  bool hasCtorDtor() const;

private:
  /// Parse metadata from the module
  // FIXME: it only parses "llvm.linker.options" metadata at the moment
  // FIXME: can't access metadata in lazily loaded modules
  void parseMetadata();

  /// Parse the symbols from the module and model-level ASM and add them to
  /// either the defined or undefined lists.
  void parseSymbols();

  /// Add a symbol which isn't defined just yet to a list to be resolved later.
  void addPotentialUndefinedSymbol(ModuleSymbolTable::Symbol Sym,
                                   bool isFunc);

  /// Add a defined symbol to the list.
  void addDefinedSymbol(StringRef Name, const GlobalValue *def,
                        bool isFunction);

  /// Add a data symbol as defined to the list.
  void addDefinedDataSymbol(ModuleSymbolTable::Symbol Sym);
  void addDefinedDataSymbol(StringRef Name, const GlobalValue *v);

  /// Add a function symbol as defined to the list.
  void addDefinedFunctionSymbol(ModuleSymbolTable::Symbol Sym);
  void addDefinedFunctionSymbol(StringRef Name, const Function *F);

  /// Add a global symbol from module-level ASM to the defined list.
  void addAsmGlobalSymbol(StringRef, lto_symbol_attributes scope);

  /// Add a global symbol from module-level ASM to the undefined list.
  void addAsmGlobalSymbolUndef(StringRef);

  /// Parse i386/ppc ObjC class data structure.
  void addObjCClass(const GlobalVariable *clgv);

  /// Parse i386/ppc ObjC category data structure.
  void addObjCCategory(const GlobalVariable *clgv);

  /// Parse i386/ppc ObjC class list data structure.
  void addObjCClassRef(const GlobalVariable *clgv);

  /// Get string that the data pointer points to.
  bool objcClassNameFromExpression(const Constant *c, std::string &name);

  /// Create an LTOModule (private version).
  static ErrorOr<std::unique_ptr<LTOModule>>
  makeLTOModule(MemoryBufferRef Buffer, const TargetOptions &options,
                LLVMContext &Context, bool ShouldBeLazy);
};
}
#endif
PKjwFZnF��&&LTO/legacy/LTOCodeGenerator.hnu�[���//===-LTOCodeGenerator.h - LLVM Link Time Optimizer -----------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the LTOCodeGenerator class.
//
//   LTO compilation consists of three phases: Pre-IPO, IPO and Post-IPO.
//
//   The Pre-IPO phase compiles source code into bitcode file. The resulting
// bitcode files, along with object files and libraries, will be fed to the
// linker to through the IPO and Post-IPO phases. By using obj-file extension,
// the resulting bitcode file disguises itself as an object file, and therefore
// obviates the need of writing a special set of the make-rules only for LTO
// compilation.
//
//   The IPO phase perform inter-procedural analyses and optimizations, and
// the Post-IPO consists two sub-phases: intra-procedural scalar optimizations
// (SOPT), and intra-procedural target-dependent code generator (CG).
//
//   As of this writing, we don't separate IPO and the Post-IPO SOPT. They
// are intermingled together, and are driven by a single pass manager (see
// PassManagerBuilder::populateLTOPassManager()).
//   FIXME: populateLTOPassManager no longer exists.
//
//   The "LTOCodeGenerator" is the driver for the IPO and Post-IPO stages.
// The "CodeGenerator" here is bit confusing. Don't confuse the "CodeGenerator"
// with the machine specific code generator.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_LTO_LEGACY_LTOCODEGENERATOR_H
#define LLVM_LTO_LEGACY_LTOCODEGENERATOR_H

#include "llvm-c/lto.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/Module.h"
#include "llvm/LTO/Config.h"
#include "llvm/LTO/LTO.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/ToolOutputFile.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include <string>
#include <vector>

namespace llvm {
template <typename T> class ArrayRef;
  class LLVMContext;
  class DiagnosticInfo;
  class Linker;
  class Mangler;
  class MemoryBuffer;
  class TargetLibraryInfo;
  class TargetMachine;
  class raw_ostream;
  class raw_pwrite_stream;

/// Enable global value internalization in LTO.
extern cl::opt<bool> EnableLTOInternalization;

//===----------------------------------------------------------------------===//
/// C++ class which implements the opaque lto_code_gen_t type.
///
struct LTOCodeGenerator {
  static const char *getVersionString();

  LTOCodeGenerator(LLVMContext &Context);
  ~LTOCodeGenerator();

  /// Merge given module.  Return true on success.
  ///
  /// Resets \a HasVerifiedInput.
  bool addModule(struct LTOModule *);

  /// Set the destination module.
  ///
  /// Resets \a HasVerifiedInput.
  void setModule(std::unique_ptr<LTOModule> M);

  void setAsmUndefinedRefs(struct LTOModule *);
  void setTargetOptions(const TargetOptions &Options);
  void setDebugInfo(lto_debug_model);
  void setCodePICModel(std::optional<Reloc::Model> Model) {
    Config.RelocModel = Model;
  }

  /// Set the file type to be emitted (assembly or object code).
  /// The default is CGFT_ObjectFile.
  void setFileType(CodeGenFileType FT) { Config.CGFileType = FT; }

  void setCpu(StringRef MCpu) { Config.CPU = std::string(MCpu); }
  void setAttrs(std::vector<std::string> MAttrs) { Config.MAttrs = MAttrs; }
  void setOptLevel(unsigned OptLevel);

  void setShouldInternalize(bool Value) { ShouldInternalize = Value; }
  void setShouldEmbedUselists(bool Value) { ShouldEmbedUselists = Value; }
  void setSaveIRBeforeOptPath(std::string Value) {
    SaveIRBeforeOptPath = Value;
  }

  /// Restore linkage of globals
  ///
  /// When set, the linkage of globals will be restored prior to code
  /// generation. That is, a global symbol that had external linkage prior to
  /// LTO will be emitted with external linkage again; and a local will remain
  /// local. Note that this option only affects the end result - globals may
  /// still be internalized in the process of LTO and may be modified and/or
  /// deleted where legal.
  ///
  /// The default behavior will internalize globals (unless on the preserve
  /// list) and, if parallel code generation is enabled, will externalize
  /// all locals.
  void setShouldRestoreGlobalsLinkage(bool Value) {
    ShouldRestoreGlobalsLinkage = Value;
  }

  void addMustPreserveSymbol(StringRef Sym) { MustPreserveSymbols.insert(Sym); }

  /// Pass options to the driver and optimization passes.
  ///
  /// These options are not necessarily for debugging purpose (the function
  /// name is misleading).  This function should be called before
  /// LTOCodeGenerator::compilexxx(), and
  /// LTOCodeGenerator::writeMergedModules().
  void setCodeGenDebugOptions(ArrayRef<StringRef> Opts);

  /// Parse the options set in setCodeGenDebugOptions.
  ///
  /// Like \a setCodeGenDebugOptions(), this must be called before
  /// LTOCodeGenerator::compilexxx() and
  /// LTOCodeGenerator::writeMergedModules().
  void parseCodeGenDebugOptions();

  /// Write the merged module to the file specified by the given path.  Return
  /// true on success.
  ///
  /// Calls \a verifyMergedModuleOnce().
  bool writeMergedModules(StringRef Path);

  /// Compile the merged module into a *single* output file; the path to output
  /// file is returned to the caller via argument "name". Return true on
  /// success.
  ///
  /// \note It is up to the linker to remove the intermediate output file.  Do
  /// not try to remove the object file in LTOCodeGenerator's destructor as we
  /// don't who (LTOCodeGenerator or the output file) will last longer.
  bool compile_to_file(const char **Name);

  /// As with compile_to_file(), this function compiles the merged module into
  /// single output file. Instead of returning the output file path to the
  /// caller (linker), it brings the output to a buffer, and returns the buffer
  /// to the caller. This function should delete the intermediate file once
  /// its content is brought to memory. Return NULL if the compilation was not
  /// successful.
  std::unique_ptr<MemoryBuffer> compile();

  /// Optimizes the merged module.  Returns true on success.
  ///
  /// Calls \a verifyMergedModuleOnce().
  bool optimize();

  /// Compiles the merged optimized module into a single output file. It brings
  /// the output to a buffer, and returns the buffer to the caller. Return NULL
  /// if the compilation was not successful.
  std::unique_ptr<MemoryBuffer> compileOptimized();

  /// Compile the merged optimized module \p ParallelismLevel output files each
  /// representing a linkable partition of the module. If out contains more
  /// than one element, code generation is done in parallel with \p
  /// ParallelismLevel threads.  Output files will be written to the streams
  /// created using the \p AddStream callback. Returns true on success.
  ///
  /// Calls \a verifyMergedModuleOnce().
  bool compileOptimized(AddStreamFn AddStream, unsigned ParallelismLevel);

  /// Enable the Freestanding mode: indicate that the optimizer should not
  /// assume builtins are present on the target.
  void setFreestanding(bool Enabled) { Config.Freestanding = Enabled; }

  void setDisableVerify(bool Value) { Config.DisableVerify = Value; }

  void setDebugPassManager(bool Enabled) { Config.DebugPassManager = Enabled; }

  void setDiagnosticHandler(lto_diagnostic_handler_t, void *);

  LLVMContext &getContext() { return Context; }

  void resetMergedModule() { MergedModule.reset(); }
  void DiagnosticHandler(const DiagnosticInfo &DI);

private:
  /// Verify the merged module on first call.
  ///
  /// Sets \a HasVerifiedInput on first call and doesn't run again on the same
  /// input.
  void verifyMergedModuleOnce();

  bool compileOptimizedToFile(const char **Name);
  void restoreLinkageForExternals();
  void applyScopeRestrictions();
  void preserveDiscardableGVs(
      Module &TheModule,
      llvm::function_ref<bool(const GlobalValue &)> mustPreserveGV);

  bool determineTarget();
  std::unique_ptr<TargetMachine> createTargetMachine();

  bool useAIXSystemAssembler();
  bool runAIXSystemAssembler(SmallString<128> &AssemblyFile);

  void emitError(const std::string &ErrMsg);
  void emitWarning(const std::string &ErrMsg);

  void finishOptimizationRemarks();

  LLVMContext &Context;
  std::unique_ptr<Module> MergedModule;
  std::unique_ptr<Linker> TheLinker;
  std::unique_ptr<TargetMachine> TargetMach;
  bool EmitDwarfDebugInfo = false;
  bool ScopeRestrictionsDone = false;
  bool HasVerifiedInput = false;
  StringSet<> MustPreserveSymbols;
  StringSet<> AsmUndefinedRefs;
  StringMap<GlobalValue::LinkageTypes> ExternalSymbols;
  std::vector<std::string> CodegenOptions;
  std::string FeatureStr;
  std::string NativeObjectPath;
  const Target *MArch = nullptr;
  std::string TripleStr;
  lto_diagnostic_handler_t DiagHandler = nullptr;
  void *DiagContext = nullptr;
  bool ShouldInternalize = EnableLTOInternalization;
  bool ShouldEmbedUselists = false;
  bool ShouldRestoreGlobalsLinkage = false;
  std::unique_ptr<ToolOutputFile> DiagnosticOutputFile;
  std::unique_ptr<ToolOutputFile> StatsFile = nullptr;
  std::string SaveIRBeforeOptPath;

  lto::Config Config;
};

/// A convenience function that calls cl::ParseCommandLineOptions on the given
/// set of options.
void parseCommandLineOptions(std::vector<std::string> &Options);
}
#endif
PKjwFZ�w?N?N	LTO/LTO.hnu�[���//===-LTO.h - LLVM Link Time Optimizer ------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares functions and classes used to support LTO. It is intended
// to be used both by LTO classes as well as by clients (gold-plugin) that
// don't utilize the LTO code generator interfaces.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_LTO_LTO_H
#define LLVM_LTO_LTO_H

#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/Bitcode/BitcodeReader.h"
#include "llvm/IR/ModuleSummaryIndex.h"
#include "llvm/LTO/Config.h"
#include "llvm/Object/IRSymtab.h"
#include "llvm/Support/Caching.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/thread.h"
#include "llvm/Transforms/IPO/FunctionAttrs.h"
#include "llvm/Transforms/IPO/FunctionImport.h"

namespace llvm {

class Error;
class IRMover;
class LLVMContext;
class MemoryBufferRef;
class Module;
class raw_pwrite_stream;
class ToolOutputFile;

/// Resolve linkage for prevailing symbols in the \p Index. Linkage changes
/// recorded in the index and the ThinLTO backends must apply the changes to
/// the module via thinLTOFinalizeInModule.
///
/// This is done for correctness (if value exported, ensure we always
/// emit a copy), and compile-time optimization (allow drop of duplicates).
void thinLTOResolvePrevailingInIndex(
    const lto::Config &C, ModuleSummaryIndex &Index,
    function_ref<bool(GlobalValue::GUID, const GlobalValueSummary *)>
        isPrevailing,
    function_ref<void(StringRef, GlobalValue::GUID, GlobalValue::LinkageTypes)>
        recordNewLinkage,
    const DenseSet<GlobalValue::GUID> &GUIDPreservedSymbols);

/// Update the linkages in the given \p Index to mark exported values
/// as external and non-exported values as internal. The ThinLTO backends
/// must apply the changes to the Module via thinLTOInternalizeModule.
void thinLTOInternalizeAndPromoteInIndex(
    ModuleSummaryIndex &Index,
    function_ref<bool(StringRef, ValueInfo)> isExported,
    function_ref<bool(GlobalValue::GUID, const GlobalValueSummary *)>
        isPrevailing);

/// Computes a unique hash for the Module considering the current list of
/// export/import and other global analysis results.
/// The hash is produced in \p Key.
void computeLTOCacheKey(
    SmallString<40> &Key, const lto::Config &Conf,
    const ModuleSummaryIndex &Index, StringRef ModuleID,
    const FunctionImporter::ImportMapTy &ImportList,
    const FunctionImporter::ExportSetTy &ExportList,
    const std::map<GlobalValue::GUID, GlobalValue::LinkageTypes> &ResolvedODR,
    const GVSummaryMapTy &DefinedGlobals,
    const std::set<GlobalValue::GUID> &CfiFunctionDefs = {},
    const std::set<GlobalValue::GUID> &CfiFunctionDecls = {});

namespace lto {

/// Given the original \p Path to an output file, replace any path
/// prefix matching \p OldPrefix with \p NewPrefix. Also, create the
/// resulting directory if it does not yet exist.
std::string getThinLTOOutputFile(StringRef Path, StringRef OldPrefix,
                                 StringRef NewPrefix);

/// Setup optimization remarks.
Expected<std::unique_ptr<ToolOutputFile>> setupLLVMOptimizationRemarks(
    LLVMContext &Context, StringRef RemarksFilename, StringRef RemarksPasses,
    StringRef RemarksFormat, bool RemarksWithHotness,
    std::optional<uint64_t> RemarksHotnessThreshold = 0, int Count = -1);

/// Setups the output file for saving statistics.
Expected<std::unique_ptr<ToolOutputFile>>
setupStatsFile(StringRef StatsFilename);

/// Produces a container ordering for optimal multi-threaded processing. Returns
/// ordered indices to elements in the input array.
std::vector<int> generateModulesOrdering(ArrayRef<BitcodeModule *> R);

/// Updates MemProf attributes (and metadata) based on whether the index
/// has recorded that we are linking with allocation libraries containing
/// the necessary APIs for downstream transformations.
void updateMemProfAttributes(Module &Mod, const ModuleSummaryIndex &Index);

class LTO;
struct SymbolResolution;
class ThinBackendProc;

/// An input file. This is a symbol table wrapper that only exposes the
/// information that an LTO client should need in order to do symbol resolution.
class InputFile {
public:
  class Symbol;

private:
  // FIXME: Remove LTO class friendship once we have bitcode symbol tables.
  friend LTO;
  InputFile() = default;

  std::vector<BitcodeModule> Mods;
  SmallVector<char, 0> Strtab;
  std::vector<Symbol> Symbols;

  // [begin, end) for each module
  std::vector<std::pair<size_t, size_t>> ModuleSymIndices;

  StringRef TargetTriple, SourceFileName, COFFLinkerOpts;
  std::vector<StringRef> DependentLibraries;
  std::vector<std::pair<StringRef, Comdat::SelectionKind>> ComdatTable;

public:
  ~InputFile();

  /// Create an InputFile.
  static Expected<std::unique_ptr<InputFile>> create(MemoryBufferRef Object);

  /// The purpose of this class is to only expose the symbol information that an
  /// LTO client should need in order to do symbol resolution.
  class Symbol : irsymtab::Symbol {
    friend LTO;

  public:
    Symbol(const irsymtab::Symbol &S) : irsymtab::Symbol(S) {}

    using irsymtab::Symbol::isUndefined;
    using irsymtab::Symbol::isCommon;
    using irsymtab::Symbol::isWeak;
    using irsymtab::Symbol::isIndirect;
    using irsymtab::Symbol::getName;
    using irsymtab::Symbol::getIRName;
    using irsymtab::Symbol::getVisibility;
    using irsymtab::Symbol::canBeOmittedFromSymbolTable;
    using irsymtab::Symbol::isTLS;
    using irsymtab::Symbol::getComdatIndex;
    using irsymtab::Symbol::getCommonSize;
    using irsymtab::Symbol::getCommonAlignment;
    using irsymtab::Symbol::getCOFFWeakExternalFallback;
    using irsymtab::Symbol::getSectionName;
    using irsymtab::Symbol::isExecutable;
    using irsymtab::Symbol::isUsed;
  };

  /// A range over the symbols in this InputFile.
  ArrayRef<Symbol> symbols() const { return Symbols; }

  /// Returns linker options specified in the input file.
  StringRef getCOFFLinkerOpts() const { return COFFLinkerOpts; }

  /// Returns dependent library specifiers from the input file.
  ArrayRef<StringRef> getDependentLibraries() const { return DependentLibraries; }

  /// Returns the path to the InputFile.
  StringRef getName() const;

  /// Returns the input file's target triple.
  StringRef getTargetTriple() const { return TargetTriple; }

  /// Returns the source file path specified at compile time.
  StringRef getSourceFileName() const { return SourceFileName; }

  // Returns a table with all the comdats used by this file.
  ArrayRef<std::pair<StringRef, Comdat::SelectionKind>> getComdatTable() const {
    return ComdatTable;
  }

  // Returns the only BitcodeModule from InputFile.
  BitcodeModule &getSingleBitcodeModule();

private:
  ArrayRef<Symbol> module_symbols(unsigned I) const {
    const auto &Indices = ModuleSymIndices[I];
    return {Symbols.data() + Indices.first, Symbols.data() + Indices.second};
  }
};

/// A ThinBackend defines what happens after the thin-link phase during ThinLTO.
/// The details of this type definition aren't important; clients can only
/// create a ThinBackend using one of the create*ThinBackend() functions below.
using ThinBackend = std::function<std::unique_ptr<ThinBackendProc>(
    const Config &C, ModuleSummaryIndex &CombinedIndex,
    StringMap<GVSummaryMapTy> &ModuleToDefinedGVSummaries,
    AddStreamFn AddStream, FileCache Cache)>;

/// This ThinBackend runs the individual backend jobs in-process.
/// The default value means to use one job per hardware core (not hyper-thread).
/// OnWrite is callback which receives module identifier and notifies LTO user
/// that index file for the module (and optionally imports file) was created.
/// ShouldEmitIndexFiles being true will write sharded ThinLTO index files
/// to the same path as the input module, with suffix ".thinlto.bc"
/// ShouldEmitImportsFiles is true it also writes a list of imported files to a
/// similar path with ".imports" appended instead.
using IndexWriteCallback = std::function<void(const std::string &)>;
ThinBackend createInProcessThinBackend(ThreadPoolStrategy Parallelism,
                                       IndexWriteCallback OnWrite = nullptr,
                                       bool ShouldEmitIndexFiles = false,
                                       bool ShouldEmitImportsFiles = false);

/// This ThinBackend writes individual module indexes to files, instead of
/// running the individual backend jobs. This backend is for distributed builds
/// where separate processes will invoke the real backends.
///
/// To find the path to write the index to, the backend checks if the path has a
/// prefix of OldPrefix; if so, it replaces that prefix with NewPrefix. It then
/// appends ".thinlto.bc" and writes the index to that path. If
/// ShouldEmitImportsFiles is true it also writes a list of imported files to a
/// similar path with ".imports" appended instead.
/// LinkedObjectsFile is an output stream to write the list of object files for
/// the final ThinLTO linking. Can be nullptr.  If LinkedObjectsFile is not
/// nullptr and NativeObjectPrefix is not empty then it replaces the prefix of
/// the objects with NativeObjectPrefix instead of NewPrefix. OnWrite is
/// callback which receives module identifier and notifies LTO user that index
/// file for the module (and optionally imports file) was created.
ThinBackend createWriteIndexesThinBackend(std::string OldPrefix,
                                          std::string NewPrefix,
                                          std::string NativeObjectPrefix,
                                          bool ShouldEmitImportsFiles,
                                          raw_fd_ostream *LinkedObjectsFile,
                                          IndexWriteCallback OnWrite);

/// This class implements a resolution-based interface to LLVM's LTO
/// functionality. It supports regular LTO, parallel LTO code generation and
/// ThinLTO. You can use it from a linker in the following way:
/// - Set hooks and code generation options (see lto::Config struct defined in
///   Config.h), and use the lto::Config object to create an lto::LTO object.
/// - Create lto::InputFile objects using lto::InputFile::create(), then use
///   the symbols() function to enumerate its symbols and compute a resolution
///   for each symbol (see SymbolResolution below).
/// - After the linker has visited each input file (and each regular object
///   file) and computed a resolution for each symbol, take each lto::InputFile
///   and pass it and an array of symbol resolutions to the add() function.
/// - Call the getMaxTasks() function to get an upper bound on the number of
///   native object files that LTO may add to the link.
/// - Call the run() function. This function will use the supplied AddStream
///   and Cache functions to add up to getMaxTasks() native object files to
///   the link.
class LTO {
  friend InputFile;

public:
  /// Unified LTO modes
  enum LTOKind {
    /// Any LTO mode without Unified LTO. The default mode.
    LTOK_Default,

    /// Regular LTO, with Unified LTO enabled.
    LTOK_UnifiedRegular,

    /// ThinLTO, with Unified LTO enabled.
    LTOK_UnifiedThin,
  };

  /// Create an LTO object. A default constructed LTO object has a reasonable
  /// production configuration, but you can customize it by passing arguments to
  /// this constructor.
  /// FIXME: We do currently require the DiagHandler field to be set in Conf.
  /// Until that is fixed, a Config argument is required.
  LTO(Config Conf, ThinBackend Backend = nullptr,
      unsigned ParallelCodeGenParallelismLevel = 1,
      LTOKind LTOMode = LTOK_Default);
  ~LTO();

  /// Add an input file to the LTO link, using the provided symbol resolutions.
  /// The symbol resolutions must appear in the enumeration order given by
  /// InputFile::symbols().
  Error add(std::unique_ptr<InputFile> Obj, ArrayRef<SymbolResolution> Res);

  /// Returns an upper bound on the number of tasks that the client may expect.
  /// This may only be called after all IR object files have been added. For a
  /// full description of tasks see LTOBackend.h.
  unsigned getMaxTasks() const;

  /// Runs the LTO pipeline. This function calls the supplied AddStream
  /// function to add native object files to the link.
  ///
  /// The Cache parameter is optional. If supplied, it will be used to cache
  /// native object files and add them to the link.
  ///
  /// The client will receive at most one callback (via either AddStream or
  /// Cache) for each task identifier.
  Error run(AddStreamFn AddStream, FileCache Cache = nullptr);

  /// Static method that returns a list of libcall symbols that can be generated
  /// by LTO but might not be visible from bitcode symbol table.
  static ArrayRef<const char*> getRuntimeLibcallSymbols();

private:
  Config Conf;

  struct RegularLTOState {
    RegularLTOState(unsigned ParallelCodeGenParallelismLevel,
                    const Config &Conf);
    struct CommonResolution {
      uint64_t Size = 0;
      Align Alignment;
      /// Record if at least one instance of the common was marked as prevailing
      bool Prevailing = false;
    };
    std::map<std::string, CommonResolution> Commons;

    unsigned ParallelCodeGenParallelismLevel;
    LTOLLVMContext Ctx;
    std::unique_ptr<Module> CombinedModule;
    std::unique_ptr<IRMover> Mover;

    // This stores the information about a regular LTO module that we have added
    // to the link. It will either be linked immediately (for modules without
    // summaries) or after summary-based dead stripping (for modules with
    // summaries).
    struct AddedModule {
      std::unique_ptr<Module> M;
      std::vector<GlobalValue *> Keep;
    };
    std::vector<AddedModule> ModsWithSummaries;
    bool EmptyCombinedModule = true;
  } RegularLTO;

  using ModuleMapType = MapVector<StringRef, BitcodeModule>;

  struct ThinLTOState {
    ThinLTOState(ThinBackend Backend);

    ThinBackend Backend;
    ModuleSummaryIndex CombinedIndex;
    // The full set of bitcode modules in input order.
    ModuleMapType ModuleMap;
    // The bitcode modules to compile, if specified by the LTO Config.
    std::optional<ModuleMapType> ModulesToCompile;
    DenseMap<GlobalValue::GUID, StringRef> PrevailingModuleForGUID;
  } ThinLTO;

  // The global resolution for a particular (mangled) symbol name. This is in
  // particular necessary to track whether each symbol can be internalized.
  // Because any input file may introduce a new cross-partition reference, we
  // cannot make any final internalization decisions until all input files have
  // been added and the client has called run(). During run() we apply
  // internalization decisions either directly to the module (for regular LTO)
  // or to the combined index (for ThinLTO).
  struct GlobalResolution {
    /// The unmangled name of the global.
    std::string IRName;

    /// Keep track if the symbol is visible outside of a module with a summary
    /// (i.e. in either a regular object or a regular LTO module without a
    /// summary).
    bool VisibleOutsideSummary = false;

    /// The symbol was exported dynamically, and therefore could be referenced
    /// by a shared library not visible to the linker.
    bool ExportDynamic = false;

    bool UnnamedAddr = true;

    /// True if module contains the prevailing definition.
    bool Prevailing = false;

    /// Returns true if module contains the prevailing definition and symbol is
    /// an IR symbol. For example when module-level inline asm block is used,
    /// symbol can be prevailing in module but have no IR name.
    bool isPrevailingIRSymbol() const { return Prevailing && !IRName.empty(); }

    /// This field keeps track of the partition number of this global. The
    /// regular LTO object is partition 0, while each ThinLTO object has its own
    /// partition number from 1 onwards.
    ///
    /// Any global that is defined or used by more than one partition, or that
    /// is referenced externally, may not be internalized.
    ///
    /// Partitions generally have a one-to-one correspondence with tasks, except
    /// that we use partition 0 for all parallel LTO code generation partitions.
    /// Any partitioning of the combined LTO object is done internally by the
    /// LTO backend.
    unsigned Partition = Unknown;

    /// Special partition numbers.
    enum : unsigned {
      /// A partition number has not yet been assigned to this global.
      Unknown = -1u,

      /// This global is either used by more than one partition or has an
      /// external reference, and therefore cannot be internalized.
      External = -2u,

      /// The RegularLTO partition
      RegularLTO = 0,
    };
  };

  // Global mapping from mangled symbol names to resolutions.
  StringMap<GlobalResolution> GlobalResolutions;

  void addModuleToGlobalRes(ArrayRef<InputFile::Symbol> Syms,
                            ArrayRef<SymbolResolution> Res, unsigned Partition,
                            bool InSummary);

  // These functions take a range of symbol resolutions [ResI, ResE) and consume
  // the resolutions used by a single input module by incrementing ResI. After
  // these functions return, [ResI, ResE) will refer to the resolution range for
  // the remaining modules in the InputFile.
  Error addModule(InputFile &Input, unsigned ModI,
                  const SymbolResolution *&ResI, const SymbolResolution *ResE);

  Expected<RegularLTOState::AddedModule>
  addRegularLTO(BitcodeModule BM, ArrayRef<InputFile::Symbol> Syms,
                const SymbolResolution *&ResI, const SymbolResolution *ResE);
  Error linkRegularLTO(RegularLTOState::AddedModule Mod,
                       bool LivenessFromIndex);

  Error addThinLTO(BitcodeModule BM, ArrayRef<InputFile::Symbol> Syms,
                   const SymbolResolution *&ResI, const SymbolResolution *ResE);

  Error runRegularLTO(AddStreamFn AddStream);
  Error runThinLTO(AddStreamFn AddStream, FileCache Cache,
                   const DenseSet<GlobalValue::GUID> &GUIDPreservedSymbols);

  Error checkPartiallySplit();

  mutable bool CalledGetMaxTasks = false;

  // LTO mode when using Unified LTO.
  LTOKind LTOMode;

  // Use Optional to distinguish false from not yet initialized.
  std::optional<bool> EnableSplitLTOUnit;

  // Identify symbols exported dynamically, and that therefore could be
  // referenced by a shared library not visible to the linker.
  DenseSet<GlobalValue::GUID> DynamicExportSymbols;

  // Diagnostic optimization remarks file
  std::unique_ptr<ToolOutputFile> DiagnosticOutputFile;
};

/// The resolution for a symbol. The linker must provide a SymbolResolution for
/// each global symbol based on its internal resolution of that symbol.
struct SymbolResolution {
  SymbolResolution()
      : Prevailing(0), FinalDefinitionInLinkageUnit(0), VisibleToRegularObj(0),
        ExportDynamic(0), LinkerRedefined(0) {}

  /// The linker has chosen this definition of the symbol.
  unsigned Prevailing : 1;

  /// The definition of this symbol is unpreemptable at runtime and is known to
  /// be in this linkage unit.
  unsigned FinalDefinitionInLinkageUnit : 1;

  /// The definition of this symbol is visible outside of the LTO unit.
  unsigned VisibleToRegularObj : 1;

  /// The symbol was exported dynamically, and therefore could be referenced
  /// by a shared library not visible to the linker.
  unsigned ExportDynamic : 1;

  /// Linker redefined version of the symbol which appeared in -wrap or -defsym
  /// linker option.
  unsigned LinkerRedefined : 1;
};

} // namespace lto
} // namespace llvm

#endif
PKjwFZ�� �yyLTO/SummaryBasedOptimizations.hnu�[���//=- llvm/LTO/SummaryBasedOptimizations.h -Link time optimizations-*- C++ -*-=//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_LTO_SUMMARYBASEDOPTIMIZATIONS_H
#define LLVM_LTO_SUMMARYBASEDOPTIMIZATIONS_H
namespace llvm {
class ModuleSummaryIndex;

/// Compute synthetic function entry counts.
void computeSyntheticCounts(ModuleSummaryIndex &Index);

} // namespace llvm
#endif
PKjwFZlw�ҡ.�.LTO/Config.hnu�[���//===-Config.h - LLVM Link Time Optimizer Configuration ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the lto::Config data structure, which allows clients to
// configure LTO.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_LTO_CONFIG_H
#define LLVM_LTO_CONFIG_H

#include "llvm/ADT/DenseSet.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/LegacyPassManager.h"
#include "llvm/Passes/PassBuilder.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Target/TargetOptions.h"

#include <functional>
#include <optional>

namespace llvm {

class Error;
class Module;
class ModuleSummaryIndex;
class raw_pwrite_stream;

namespace lto {

/// LTO configuration. A linker can configure LTO by setting fields in this data
/// structure and passing it to the lto::LTO constructor.
struct Config {
  enum VisScheme {
    FromPrevailing,
    ELF,
  };
  // Note: when adding fields here, consider whether they need to be added to
  // computeLTOCacheKey in LTO.cpp.
  std::string CPU;
  TargetOptions Options;
  std::vector<std::string> MAttrs;
  std::vector<std::string> MllvmArgs;
  std::vector<std::string> PassPlugins;
  /// For adding passes that run right before codegen.
  std::function<void(legacy::PassManager &)> PreCodeGenPassesHook;
  std::optional<Reloc::Model> RelocModel = Reloc::PIC_;
  std::optional<CodeModel::Model> CodeModel;
  CodeGenOpt::Level CGOptLevel = CodeGenOpt::Default;
  CodeGenFileType CGFileType = CGFT_ObjectFile;
  unsigned OptLevel = 2;
  bool VerifyEach = false;
  bool DisableVerify = false;

  /// Use the standard optimization pipeline.
  bool UseDefaultPipeline = false;

  /// Flag to indicate that the optimizer should not assume builtins are present
  /// on the target.
  bool Freestanding = false;

  /// Disable entirely the optimizer, including importing for ThinLTO
  bool CodeGenOnly = false;

  /// Run PGO context sensitive IR instrumentation.
  bool RunCSIRInstr = false;

  /// Turn on/off the warning about a hash mismatch in the PGO profile data.
  bool PGOWarnMismatch = true;

  /// Asserts whether we can assume whole program visibility during the LTO
  /// link.
  bool HasWholeProgramVisibility = false;

  /// Always emit a Regular LTO object even when it is empty because no Regular
  /// LTO modules were linked. This option is useful for some build system which
  /// want to know a priori all possible output files.
  bool AlwaysEmitRegularLTOObj = false;

  /// Allows non-imported definitions to get the potentially more constraining
  /// visibility from the prevailing definition. FromPrevailing is the default
  /// because it works for many binary formats. ELF can use the more optimized
  /// 'ELF' scheme.
  VisScheme VisibilityScheme = FromPrevailing;

  /// If this field is set, the set of passes run in the middle-end optimizer
  /// will be the one specified by the string. Only works with the new pass
  /// manager as the old one doesn't have this ability.
  std::string OptPipeline;

  // If this field is set, it has the same effect of specifying an AA pipeline
  // identified by the string. Only works with the new pass manager, in
  // conjunction OptPipeline.
  std::string AAPipeline;

  /// Setting this field will replace target triples in input files with this
  /// triple.
  std::string OverrideTriple;

  /// Setting this field will replace unspecified target triples in input files
  /// with this triple.
  std::string DefaultTriple;

  /// Context Sensitive PGO profile path.
  std::string CSIRProfile;

  /// Sample PGO profile path.
  std::string SampleProfile;

  /// Name remapping file for profile data.
  std::string ProfileRemapping;

  /// The directory to store .dwo files.
  std::string DwoDir;

  /// The name for the split debug info file used for the DW_AT_[GNU_]dwo_name
  /// attribute in the skeleton CU. This should generally only be used when
  /// running an individual backend directly via thinBackend(), as otherwise
  /// all objects would use the same .dwo file. Not used as output path.
  std::string SplitDwarfFile;

  /// The path to write a .dwo file to. This should generally only be used when
  /// running an individual backend directly via thinBackend(), as otherwise
  /// all .dwo files will be written to the same path. Not used in skeleton CU.
  std::string SplitDwarfOutput;

  /// Optimization remarks file path.
  std::string RemarksFilename;

  /// Optimization remarks pass filter.
  std::string RemarksPasses;

  /// Whether to emit optimization remarks with hotness informations.
  bool RemarksWithHotness = false;

  /// The minimum hotness value a diagnostic needs in order to be included in
  /// optimization diagnostics.
  ///
  /// The threshold is an Optional value, which maps to one of the 3 states:
  /// 1. 0            => threshold disabled. All emarks will be printed.
  /// 2. positive int => manual threshold by user. Remarks with hotness exceed
  ///                    threshold will be printed.
  /// 3. None         => 'auto' threshold by user. The actual value is not
  ///                    available at command line, but will be synced with
  ///                    hotness threhold from profile summary during
  ///                    compilation.
  ///
  /// If threshold option is not specified, it is disabled by default.
  std::optional<uint64_t> RemarksHotnessThreshold = 0;

  /// The format used for serializing remarks (default: YAML).
  std::string RemarksFormat;

  /// Whether to emit the pass manager debuggging informations.
  bool DebugPassManager = false;

  /// Statistics output file path.
  std::string StatsFile;

  /// Specific thinLTO modules to compile.
  std::vector<std::string> ThinLTOModulesToCompile;

  /// Time trace enabled.
  bool TimeTraceEnabled = false;

  /// Time trace granularity.
  unsigned TimeTraceGranularity = 500;

  bool ShouldDiscardValueNames = true;
  DiagnosticHandlerFunction DiagHandler;

  /// Add FSAFDO discriminators.
  bool AddFSDiscriminator = false;

  /// If this field is set, LTO will write input file paths and symbol
  /// resolutions here in llvm-lto2 command line flag format. This can be
  /// used for testing and for running the LTO pipeline outside of the linker
  /// with llvm-lto2.
  std::unique_ptr<raw_ostream> ResolutionFile;

  /// Tunable parameters for passes in the default pipelines.
  PipelineTuningOptions PTO;

  /// The following callbacks deal with tasks, which normally represent the
  /// entire optimization and code generation pipeline for what will become a
  /// single native object file. Each task has a unique identifier between 0 and
  /// getMaxTasks()-1, which is supplied to the callback via the Task parameter.
  /// A task represents the entire pipeline for ThinLTO and regular
  /// (non-parallel) LTO, but a parallel code generation task will be split into
  /// N tasks before code generation, where N is the parallelism level.
  ///
  /// LTO may decide to stop processing a task at any time, for example if the
  /// module is empty or if a module hook (see below) returns false. For this
  /// reason, the client should not expect to receive exactly getMaxTasks()
  /// native object files.

  /// A module hook may be used by a linker to perform actions during the LTO
  /// pipeline. For example, a linker may use this function to implement
  /// -save-temps. If this function returns false, any further processing for
  /// that task is aborted.
  ///
  /// Module hooks must be thread safe with respect to the linker's internal
  /// data structures. A module hook will never be called concurrently from
  /// multiple threads with the same task ID, or the same module.
  ///
  /// Note that in out-of-process backend scenarios, none of the hooks will be
  /// called for ThinLTO tasks.
  using ModuleHookFn = std::function<bool(unsigned Task, const Module &)>;

  /// This module hook is called after linking (regular LTO) or loading
  /// (ThinLTO) the module, before modifying it.
  ModuleHookFn PreOptModuleHook;

  /// This hook is called after promoting any internal functions
  /// (ThinLTO-specific).
  ModuleHookFn PostPromoteModuleHook;

  /// This hook is called after internalizing the module.
  ModuleHookFn PostInternalizeModuleHook;

  /// This hook is called after importing from other modules (ThinLTO-specific).
  ModuleHookFn PostImportModuleHook;

  /// This module hook is called after optimization is complete.
  ModuleHookFn PostOptModuleHook;

  /// This module hook is called before code generation. It is similar to the
  /// PostOptModuleHook, but for parallel code generation it is called after
  /// splitting the module.
  ModuleHookFn PreCodeGenModuleHook;

  /// A combined index hook is called after all per-module indexes have been
  /// combined (ThinLTO-specific). It can be used to implement -save-temps for
  /// the combined index.
  ///
  /// If this function returns false, any further processing for ThinLTO tasks
  /// is aborted.
  ///
  /// It is called regardless of whether the backend is in-process, although it
  /// is not called from individual backend processes.
  using CombinedIndexHookFn = std::function<bool(
      const ModuleSummaryIndex &Index,
      const DenseSet<GlobalValue::GUID> &GUIDPreservedSymbols)>;
  CombinedIndexHookFn CombinedIndexHook;

  /// This is a convenience function that configures this Config object to write
  /// temporary files named after the given OutputFileName for each of the LTO
  /// phases to disk. A client can use this function to implement -save-temps.
  ///
  /// FIXME: Temporary files derived from ThinLTO backends are currently named
  /// after the input file name, rather than the output file name, when
  /// UseInputModulePath is set to true.
  ///
  /// Specifically, it (1) sets each of the above module hooks and the combined
  /// index hook to a function that calls the hook function (if any) that was
  /// present in the appropriate field when the addSaveTemps function was
  /// called, and writes the module to a bitcode file with a name prefixed by
  /// the given output file name, and (2) creates a resolution file whose name
  /// is prefixed by the given output file name and sets ResolutionFile to its
  /// file handle.
  ///
  /// SaveTempsArgs can be specified to select which temps to save.
  /// If SaveTempsArgs is not provided, all temps are saved.
  Error addSaveTemps(std::string OutputFileName,
                     bool UseInputModulePath = false,
                     const DenseSet<StringRef> &SaveTempsArgs = {});
};

struct LTOLLVMDiagnosticHandler : public DiagnosticHandler {
  DiagnosticHandlerFunction *Fn;
  LTOLLVMDiagnosticHandler(DiagnosticHandlerFunction *DiagHandlerFn)
      : Fn(DiagHandlerFn) {}
  bool handleDiagnostics(const DiagnosticInfo &DI) override {
    (*Fn)(DI);
    return true;
  }
};
/// A derived class of LLVMContext that initializes itself according to a given
/// Config object. The purpose of this class is to tie ownership of the
/// diagnostic handler to the context, as opposed to the Config object (which
/// may be ephemeral).
// FIXME: This should not be required as diagnostic handler is not callback.
struct LTOLLVMContext : LLVMContext {

  LTOLLVMContext(const Config &C) : DiagHandler(C.DiagHandler) {
    setDiscardValueNames(C.ShouldDiscardValueNames);
    enableDebugTypeODRUniquing();
    setDiagnosticHandler(
        std::make_unique<LTOLLVMDiagnosticHandler>(&DiagHandler), true);
  }
  DiagnosticHandlerFunction DiagHandler;
};

}
}

#endif
PKjwFZ\��eeLTO/LTOBackend.hnu�[���//===-LTOBackend.h - LLVM Link Time Optimizer Backend ---------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements the "backend" phase of LTO, i.e. it performs
// optimization and code generation on a loaded module. It is generally used
// internally by the LTO class but can also be used independently, for example
// to implement a standalone ThinLTO backend.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_LTO_LTOBACKEND_H
#define LLVM_LTO_LTOBACKEND_H

#include "llvm/ADT/MapVector.h"
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/ModuleSummaryIndex.h"
#include "llvm/LTO/LTO.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Transforms/IPO/FunctionImport.h"

namespace llvm {

class BitcodeModule;
class Error;
class Module;
class Target;

namespace lto {

/// Runs middle-end LTO optimizations on \p Mod.
bool opt(const Config &Conf, TargetMachine *TM, unsigned Task, Module &Mod,
         bool IsThinLTO, ModuleSummaryIndex *ExportSummary,
         const ModuleSummaryIndex *ImportSummary,
         const std::vector<uint8_t> &CmdArgs);

/// Runs a regular LTO backend. The regular LTO backend can also act as the
/// regular LTO phase of ThinLTO, which may need to access the combined index.
Error backend(const Config &C, AddStreamFn AddStream,
              unsigned ParallelCodeGenParallelismLevel, Module &M,
              ModuleSummaryIndex &CombinedIndex);

/// Runs a ThinLTO backend.
/// If \p ModuleMap is not nullptr, all the module files to be imported have
/// already been mapped to memory and the corresponding BitcodeModule objects
/// are saved in the ModuleMap. If \p ModuleMap is nullptr, module files will
/// be mapped to memory on demand and at any given time during importing, only
/// one source module will be kept open at the most.
Error thinBackend(const Config &C, unsigned Task, AddStreamFn AddStream,
                  Module &M, const ModuleSummaryIndex &CombinedIndex,
                  const FunctionImporter::ImportMapTy &ImportList,
                  const GVSummaryMapTy &DefinedGlobals,
                  MapVector<StringRef, BitcodeModule> *ModuleMap,
                  const std::vector<uint8_t> &CmdArgs = std::vector<uint8_t>());

Error finalizeOptimizationRemarks(
    std::unique_ptr<ToolOutputFile> DiagOutputFile);

/// Returns the BitcodeModule that is ThinLTO.
BitcodeModule *findThinLTOModule(MutableArrayRef<BitcodeModule> BMs);

/// Variant of the above.
Expected<BitcodeModule> findThinLTOModule(MemoryBufferRef MBRef);

/// Distributed ThinLTO: collect the referenced modules based on
/// module summary and initialize ImportList. Returns false if the
/// operation failed.
bool initImportList(const Module &M, const ModuleSummaryIndex &CombinedIndex,
                    FunctionImporter::ImportMapTy &ImportList);
}
}

#endif
PKjwFZn�Z\($($AsmParser/Parser.hnu�[���//===-- Parser.h - Parser for LLVM IR text assembly files -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
//  These classes are implemented by the lib/AsmParser library.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ASMPARSER_PARSER_H
#define LLVM_ASMPARSER_PARSER_H

#include "llvm/ADT/STLFunctionalExtras.h"
#include "llvm/ADT/StringRef.h"
#include <memory>
#include <optional>

namespace llvm {

class Constant;
class LLVMContext;
class MemoryBufferRef;
class Module;
class ModuleSummaryIndex;
struct SlotMapping;
class SMDiagnostic;
class Type;

typedef llvm::function_ref<std::optional<std::string>(StringRef, StringRef)>
    DataLayoutCallbackTy;

/// This function is a main interface to the LLVM Assembly Parser. It parses
/// an ASCII file that (presumably) contains LLVM Assembly code. It returns a
/// Module (intermediate representation) with the corresponding features. Note
/// that this does not verify that the generated Module is valid, so you should
/// run the verifier after parsing the file to check that it is okay.
/// Parse LLVM Assembly from a file
/// \param Filename The name of the file to parse
/// \param Err Error result info.
/// \param Context Context in which to allocate globals info.
/// \param Slots The optional slot mapping that will be initialized during
///              parsing.
std::unique_ptr<Module> parseAssemblyFile(StringRef Filename, SMDiagnostic &Err,
                                          LLVMContext &Context,
                                          SlotMapping *Slots = nullptr);

/// The function is a secondary interface to the LLVM Assembly Parser. It parses
/// an ASCII string that (presumably) contains LLVM Assembly code. It returns a
/// Module (intermediate representation) with the corresponding features. Note
/// that this does not verify that the generated Module is valid, so you should
/// run the verifier after parsing the file to check that it is okay.
/// Parse LLVM Assembly from a string
/// \param AsmString The string containing assembly
/// \param Err Error result info.
/// \param Context Context in which to allocate globals info.
/// \param Slots The optional slot mapping that will be initialized during
///              parsing.
std::unique_ptr<Module> parseAssemblyString(StringRef AsmString,
                                            SMDiagnostic &Err,
                                            LLVMContext &Context,
                                            SlotMapping *Slots = nullptr);

/// Holds the Module and ModuleSummaryIndex returned by the interfaces
/// that parse both.
struct ParsedModuleAndIndex {
  std::unique_ptr<Module> Mod;
  std::unique_ptr<ModuleSummaryIndex> Index;
};

/// This function is a main interface to the LLVM Assembly Parser. It parses
/// an ASCII file that (presumably) contains LLVM Assembly code, including
/// a module summary. It returns a Module (intermediate representation) and
/// a ModuleSummaryIndex with the corresponding features. Note that this does
/// not verify that the generated Module or Index are valid, so you should
/// run the verifier after parsing the file to check that they are okay.
/// Parse LLVM Assembly from a file
/// \param Filename The name of the file to parse
/// \param Err Error result info.
/// \param Context Context in which to allocate globals info.
/// \param Slots The optional slot mapping that will be initialized during
///              parsing.
/// \param DataLayoutCallback Override datalayout in the llvm assembly.
ParsedModuleAndIndex parseAssemblyFileWithIndex(
    StringRef Filename, SMDiagnostic &Err, LLVMContext &Context,
    SlotMapping *Slots = nullptr,
    DataLayoutCallbackTy DataLayoutCallback = [](StringRef, StringRef) {
      return std::nullopt;
    });

/// Only for use in llvm-as for testing; this does not produce a valid module.
ParsedModuleAndIndex parseAssemblyFileWithIndexNoUpgradeDebugInfo(
    StringRef Filename, SMDiagnostic &Err, LLVMContext &Context,
    SlotMapping *Slots, DataLayoutCallbackTy DataLayoutCallback);

/// This function is a main interface to the LLVM Assembly Parser. It parses
/// an ASCII file that (presumably) contains LLVM Assembly code for a module
/// summary. It returns a a ModuleSummaryIndex with the corresponding features.
/// Note that this does not verify that the generated Index is valid, so you
/// should run the verifier after parsing the file to check that it is okay.
/// Parse LLVM Assembly Index from a file
/// \param Filename The name of the file to parse
/// \param Err Error result info.
std::unique_ptr<ModuleSummaryIndex>
parseSummaryIndexAssemblyFile(StringRef Filename, SMDiagnostic &Err);

/// The function is a secondary interface to the LLVM Assembly Parser. It parses
/// an ASCII string that (presumably) contains LLVM Assembly code for a module
/// summary. It returns a a ModuleSummaryIndex with the corresponding features.
/// Note that this does not verify that the generated Index is valid, so you
/// should run the verifier after parsing the file to check that it is okay.
/// Parse LLVM Assembly from a string
/// \param AsmString The string containing assembly
/// \param Err Error result info.
std::unique_ptr<ModuleSummaryIndex>
parseSummaryIndexAssemblyString(StringRef AsmString, SMDiagnostic &Err);

/// parseAssemblyFile and parseAssemblyString are wrappers around this function.
/// Parse LLVM Assembly from a MemoryBuffer.
/// \param F The MemoryBuffer containing assembly
/// \param Err Error result info.
/// \param Slots The optional slot mapping that will be initialized during
///              parsing.
/// \param DataLayoutCallback Override datalayout in the llvm assembly.
std::unique_ptr<Module> parseAssembly(
    MemoryBufferRef F, SMDiagnostic &Err, LLVMContext &Context,
    SlotMapping *Slots = nullptr,
    DataLayoutCallbackTy DataLayoutCallback = [](StringRef, StringRef) {
      return std::nullopt;
    });

/// Parse LLVM Assembly including the summary index from a MemoryBuffer.
///
/// \param F The MemoryBuffer containing assembly with summary
/// \param Err Error result info.
/// \param Slots The optional slot mapping that will be initialized during
///              parsing.
///
/// parseAssemblyFileWithIndex is a wrapper around this function.
ParsedModuleAndIndex parseAssemblyWithIndex(MemoryBufferRef F,
                                            SMDiagnostic &Err,
                                            LLVMContext &Context,
                                            SlotMapping *Slots = nullptr);

/// Parse LLVM Assembly for summary index from a MemoryBuffer.
///
/// \param F The MemoryBuffer containing assembly with summary
/// \param Err Error result info.
///
/// parseSummaryIndexAssemblyFile is a wrapper around this function.
std::unique_ptr<ModuleSummaryIndex>
parseSummaryIndexAssembly(MemoryBufferRef F, SMDiagnostic &Err);

/// This function is the low-level interface to the LLVM Assembly Parser.
/// This is kept as an independent function instead of being inlined into
/// parseAssembly for the convenience of interactive users that want to add
/// recently parsed bits to an existing module.
///
/// \param F The MemoryBuffer containing assembly
/// \param M The module to add data to.
/// \param Index The index to add data to.
/// \param Err Error result info.
/// \param Slots The optional slot mapping that will be initialized during
///              parsing.
/// \return true on error.
/// \param DataLayoutCallback Override datalayout in the llvm assembly.
bool parseAssemblyInto(
    MemoryBufferRef F, Module *M, ModuleSummaryIndex *Index, SMDiagnostic &Err,
    SlotMapping *Slots = nullptr,
    DataLayoutCallbackTy DataLayoutCallback = [](StringRef, StringRef) {
      return std::nullopt;
    });

/// Parse a type and a constant value in the given string.
///
/// The constant value can be any LLVM constant, including a constant
/// expression.
///
/// \param Slots The optional slot mapping that will restore the parsing state
/// of the module.
/// \return null on error.
Constant *parseConstantValue(StringRef Asm, SMDiagnostic &Err, const Module &M,
                             const SlotMapping *Slots = nullptr);

/// Parse a type in the given string.
///
/// \param Slots The optional slot mapping that will restore the parsing state
/// of the module.
/// \return null on error.
Type *parseType(StringRef Asm, SMDiagnostic &Err, const Module &M,
                const SlotMapping *Slots = nullptr);

/// Parse a string \p Asm that starts with a type.
/// \p Read[out] gives the number of characters that have been read to parse
/// the type in \p Asm.
///
/// \param Slots The optional slot mapping that will restore the parsing state
/// of the module.
/// \return null on error.
Type *parseTypeAtBeginning(StringRef Asm, unsigned &Read, SMDiagnostic &Err,
                           const Module &M, const SlotMapping *Slots = nullptr);

} // End llvm namespace

#endif
PKjwFZV�{�((AsmParser/SlotMapping.hnu�[���//===-- SlotMapping.h - Slot number mapping for unnamed values --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the declaration of the SlotMapping struct.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ASMPARSER_SLOTMAPPING_H
#define LLVM_ASMPARSER_SLOTMAPPING_H

#include "llvm/ADT/StringMap.h"
#include "llvm/IR/TrackingMDRef.h"
#include <map>
#include <vector>

namespace llvm {

class GlobalValue;
class Type;

/// This struct contains the mappings from the slot numbers to unnamed metadata
/// nodes, global values and types. It also contains the mapping for the named
/// types.
/// It can be used to save the parsing state of an LLVM IR module so that the
/// textual references to the values in the module can be parsed outside of the
/// module's source.
struct SlotMapping {
  std::vector<GlobalValue *> GlobalValues;
  std::map<unsigned, TrackingMDNodeRef> MetadataNodes;
  StringMap<Type *> NamedTypes;
  std::map<unsigned, Type *> Types;
};

} // end namespace llvm

#endif
PKjwFZ��D\ \ AsmParser/LLToken.hnu�[���//===- LLToken.h - Token Codes for LLVM Assembly Files ----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the enums for the .ll lexer.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ASMPARSER_LLTOKEN_H
#define LLVM_ASMPARSER_LLTOKEN_H

namespace llvm {
namespace lltok {
enum Kind {
  // Markers
  Eof,
  Error,

  // Tokens with no info.
  dotdotdot, // ...
  equal,
  comma, // =  ,
  star,  // *
  lsquare,
  rsquare, // [  ]
  lbrace,
  rbrace, // {  }
  less,
  greater, // <  >
  lparen,
  rparen,  // (  )
  exclaim, // !
  bar,     // |
  colon,   // :

  kw_vscale,
  kw_x,
  kw_true,
  kw_false,
  kw_declare,
  kw_define,
  kw_global,
  kw_constant,

  kw_dso_local,
  kw_dso_preemptable,

  kw_private,
  kw_internal,
  kw_linkonce,
  kw_linkonce_odr,
  kw_weak, // Used as a linkage, and a modifier for "cmpxchg".
  kw_weak_odr,
  kw_appending,
  kw_dllimport,
  kw_dllexport,
  kw_common,
  kw_available_externally,
  kw_default,
  kw_hidden,
  kw_protected,
  kw_unnamed_addr,
  kw_local_unnamed_addr,
  kw_externally_initialized,
  kw_extern_weak,
  kw_external,
  kw_thread_local,
  kw_localdynamic,
  kw_initialexec,
  kw_localexec,
  kw_zeroinitializer,
  kw_undef,
  kw_poison,
  kw_null,
  kw_none,
  kw_to,
  kw_caller,
  kw_within,
  kw_from,
  kw_tail,
  kw_musttail,
  kw_notail,
  kw_target,
  kw_triple,
  kw_source_filename,
  kw_unwind,
  kw_datalayout,
  kw_volatile,
  kw_atomic,
  kw_unordered,
  kw_monotonic,
  kw_acquire,
  kw_release,
  kw_acq_rel,
  kw_seq_cst,
  kw_syncscope,
  kw_nnan,
  kw_ninf,
  kw_nsz,
  kw_arcp,
  kw_contract,
  kw_reassoc,
  kw_afn,
  kw_fast,
  kw_nuw,
  kw_nsw,
  kw_exact,
  kw_inbounds,
  kw_inrange,
  kw_addrspace,
  kw_section,
  kw_partition,
  kw_alias,
  kw_ifunc,
  kw_module,
  kw_asm,
  kw_sideeffect,
  kw_inteldialect,
  kw_gc,
  kw_prefix,
  kw_prologue,
  kw_c,

  kw_cc,
  kw_ccc,
  kw_fastcc,
  kw_coldcc,
  kw_intel_ocl_bicc,
  kw_cfguard_checkcc,
  kw_x86_stdcallcc,
  kw_x86_fastcallcc,
  kw_x86_thiscallcc,
  kw_x86_vectorcallcc,
  kw_x86_regcallcc,
  kw_arm_apcscc,
  kw_arm_aapcscc,
  kw_arm_aapcs_vfpcc,
  kw_aarch64_vector_pcs,
  kw_aarch64_sve_vector_pcs,
  kw_aarch64_sme_preservemost_from_x0,
  kw_aarch64_sme_preservemost_from_x2,
  kw_msp430_intrcc,
  kw_avr_intrcc,
  kw_avr_signalcc,
  kw_ptx_kernel,
  kw_ptx_device,
  kw_spir_kernel,
  kw_spir_func,
  kw_x86_64_sysvcc,
  kw_win64cc,
  kw_webkit_jscc,
  kw_anyregcc,
  kw_swiftcc,
  kw_swifttailcc,
  kw_preserve_mostcc,
  kw_preserve_allcc,
  kw_ghccc,
  kw_x86_intrcc,
  kw_hhvmcc,
  kw_hhvm_ccc,
  kw_cxx_fast_tlscc,
  kw_amdgpu_vs,
  kw_amdgpu_ls,
  kw_amdgpu_hs,
  kw_amdgpu_es,
  kw_amdgpu_gs,
  kw_amdgpu_ps,
  kw_amdgpu_cs,
  kw_amdgpu_cs_chain,
  kw_amdgpu_cs_chain_preserve,
  kw_amdgpu_kernel,
  kw_amdgpu_gfx,
  kw_tailcc,

  // Attributes:
  kw_attributes,
  kw_sync,
  kw_async,
#define GET_ATTR_NAMES
#define ATTRIBUTE_ENUM(ENUM_NAME, DISPLAY_NAME) \
  kw_##DISPLAY_NAME,
#include "llvm/IR/Attributes.inc"

  // Memory attribute:
  kw_read,
  kw_write,
  kw_readwrite,
  kw_argmem,
  kw_inaccessiblemem,

  // Legacy memory attributes:
  kw_argmemonly,
  kw_inaccessiblememonly,
  kw_inaccessiblemem_or_argmemonly,

  // nofpclass attribute:
  kw_all,
  kw_nan,
  kw_snan,
  kw_qnan,
  kw_inf,
  // kw_ninf, - already an fmf
  kw_pinf,
  kw_norm,
  kw_nnorm,
  kw_pnorm,
  // kw_sub,  - already an instruction
  kw_nsub,
  kw_psub,
  kw_zero,
  kw_nzero,
  kw_pzero,

  kw_type,
  kw_opaque,

  kw_comdat,

  // Comdat types
  kw_any,
  kw_exactmatch,
  kw_largest,
  kw_nodeduplicate,
  kw_samesize,

  kw_eq,
  kw_ne,
  kw_slt,
  kw_sgt,
  kw_sle,
  kw_sge,
  kw_ult,
  kw_ugt,
  kw_ule,
  kw_uge,
  kw_oeq,
  kw_one,
  kw_olt,
  kw_ogt,
  kw_ole,
  kw_oge,
  kw_ord,
  kw_uno,
  kw_ueq,
  kw_une,

  // atomicrmw operations that aren't also instruction keywords.
  kw_xchg,
  kw_nand,
  kw_max,
  kw_min,
  kw_umax,
  kw_umin,
  kw_fmax,
  kw_fmin,
  kw_uinc_wrap,
  kw_udec_wrap,

  // Instruction Opcodes (Opcode in UIntVal).
  kw_fneg,
  kw_add,
  kw_fadd,
  kw_sub,
  kw_fsub,
  kw_mul,
  kw_fmul,
  kw_udiv,
  kw_sdiv,
  kw_fdiv,
  kw_urem,
  kw_srem,
  kw_frem,
  kw_shl,
  kw_lshr,
  kw_ashr,
  kw_and,
  kw_or,
  kw_xor,
  kw_icmp,
  kw_fcmp,

  kw_phi,
  kw_call,
  kw_trunc,
  kw_zext,
  kw_sext,
  kw_fptrunc,
  kw_fpext,
  kw_uitofp,
  kw_sitofp,
  kw_fptoui,
  kw_fptosi,
  kw_inttoptr,
  kw_ptrtoint,
  kw_bitcast,
  kw_addrspacecast,
  kw_select,
  kw_va_arg,

  kw_landingpad,
  kw_personality,
  kw_cleanup,
  kw_catch,
  kw_filter,

  kw_ret,
  kw_br,
  kw_switch,
  kw_indirectbr,
  kw_invoke,
  kw_resume,
  kw_unreachable,
  kw_cleanupret,
  kw_catchswitch,
  kw_catchret,
  kw_catchpad,
  kw_cleanuppad,
  kw_callbr,

  kw_alloca,
  kw_load,
  kw_store,
  kw_fence,
  kw_cmpxchg,
  kw_atomicrmw,
  kw_getelementptr,

  kw_extractelement,
  kw_insertelement,
  kw_shufflevector,
  kw_extractvalue,
  kw_insertvalue,
  kw_blockaddress,
  kw_dso_local_equivalent,
  kw_no_cfi,

  kw_freeze,

  // Metadata types.
  kw_distinct,

  // Use-list order directives.
  kw_uselistorder,
  kw_uselistorder_bb,

  // Summary index keywords
  kw_path,
  kw_hash,
  kw_gv,
  kw_guid,
  kw_name,
  kw_summaries,
  kw_flags,
  kw_blockcount,
  kw_linkage,
  kw_visibility,
  kw_notEligibleToImport,
  kw_live,
  kw_dsoLocal,
  kw_canAutoHide,
  kw_function,
  kw_insts,
  kw_funcFlags,
  kw_readNone,
  kw_readOnly,
  kw_noRecurse,
  kw_returnDoesNotAlias,
  kw_noInline,
  kw_alwaysInline,
  kw_noUnwind,
  kw_mayThrow,
  kw_hasUnknownCall,
  kw_mustBeUnreachable,
  kw_calls,
  kw_callee,
  kw_params,
  kw_param,
  kw_hotness,
  kw_unknown,
  kw_critical,
  kw_relbf,
  kw_variable,
  kw_vTableFuncs,
  kw_virtFunc,
  kw_aliasee,
  kw_refs,
  kw_typeIdInfo,
  kw_typeTests,
  kw_typeTestAssumeVCalls,
  kw_typeCheckedLoadVCalls,
  kw_typeTestAssumeConstVCalls,
  kw_typeCheckedLoadConstVCalls,
  kw_vFuncId,
  kw_offset,
  kw_args,
  kw_typeid,
  kw_typeidCompatibleVTable,
  kw_summary,
  kw_typeTestRes,
  kw_kind,
  kw_unsat,
  kw_byteArray,
  kw_inline,
  kw_single,
  kw_allOnes,
  kw_sizeM1BitWidth,
  kw_alignLog2,
  kw_sizeM1,
  kw_bitMask,
  kw_inlineBits,
  kw_vcall_visibility,
  kw_wpdResolutions,
  kw_wpdRes,
  kw_indir,
  kw_singleImpl,
  kw_branchFunnel,
  kw_singleImplName,
  kw_resByArg,
  kw_byArg,
  kw_uniformRetVal,
  kw_uniqueRetVal,
  kw_virtualConstProp,
  kw_info,
  kw_byte,
  kw_bit,
  kw_varFlags,
  // The following are used by MemProf summary info.
  kw_callsites,
  kw_clones,
  kw_stackIds,
  kw_allocs,
  kw_versions,
  kw_memProf,
  kw_notcold,

  // GV's with __attribute__((no_sanitize("address"))), or things in
  // -fsanitize-ignorelist when built with ASan.
  kw_no_sanitize_address,
  // GV's with __attribute__((no_sanitize("hwaddress"))), or things in
  // -fsanitize-ignorelist when built with HWASan.
  kw_no_sanitize_hwaddress,
  // GV's where the clang++ frontend (when ASan is used) notes that this is
  // dynamically initialized, and thus needs ODR detection.
  kw_sanitize_address_dyninit,

  // Unsigned Valued tokens (UIntVal).
  LabelID,    // 42:
  GlobalID,   // @42
  LocalVarID, // %42
  AttrGrpID,  // #42
  SummaryID,  // ^42

  // String valued tokens (StrVal).
  LabelStr,         // foo:
  GlobalVar,        // @foo @"foo"
  ComdatVar,        // $foo
  LocalVar,         // %foo %"foo"
  MetadataVar,      // !foo
  StringConstant,   // "foo"
  DwarfTag,         // DW_TAG_foo
  DwarfAttEncoding, // DW_ATE_foo
  DwarfVirtuality,  // DW_VIRTUALITY_foo
  DwarfLang,        // DW_LANG_foo
  DwarfCC,          // DW_CC_foo
  EmissionKind,     // lineTablesOnly
  NameTableKind,    // GNU
  DwarfOp,          // DW_OP_foo
  DIFlag,           // DIFlagFoo
  DISPFlag,         // DISPFlagFoo
  DwarfMacinfo,     // DW_MACINFO_foo
  ChecksumKind,     // CSK_foo

  // Type valued tokens (TyVal).
  Type,

  APFloat, // APFloatVal
  APSInt   // APSInt
};
} // end namespace lltok
} // end namespace llvm

#endif
PKjwFZc�A��AsmParser/LLLexer.hnu�[���//===- LLLexer.h - Lexer for LLVM Assembly Files ----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This class represents the Lexer for .ll files.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ASMPARSER_LLLEXER_H
#define LLVM_ASMPARSER_LLLEXER_H

#include "LLToken.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APSInt.h"
#include "llvm/Support/SMLoc.h"
#include <string>

namespace llvm {
  class Type;
  class SMDiagnostic;
  class SourceMgr;
  class LLVMContext;

  class LLLexer {
    const char *CurPtr;
    StringRef CurBuf;
    SMDiagnostic &ErrorInfo;
    SourceMgr &SM;
    LLVMContext &Context;

    // Information about the current token.
    const char *TokStart;
    lltok::Kind CurKind;
    std::string StrVal;
    unsigned UIntVal = 0;
    Type *TyVal = nullptr;
    APFloat APFloatVal{0.0};
    APSInt APSIntVal{0};

    // When false (default), an identifier ending in ':' is a label token.
    // When true, the ':' is treated as a separate token.
    bool IgnoreColonInIdentifiers = false;

  public:
    explicit LLLexer(StringRef StartBuf, SourceMgr &SM, SMDiagnostic &,
                     LLVMContext &C);

    lltok::Kind Lex() {
      return CurKind = LexToken();
    }

    typedef SMLoc LocTy;
    LocTy getLoc() const { return SMLoc::getFromPointer(TokStart); }
    lltok::Kind getKind() const { return CurKind; }
    const std::string &getStrVal() const { return StrVal; }
    Type *getTyVal() const { return TyVal; }
    unsigned getUIntVal() const { return UIntVal; }
    const APSInt &getAPSIntVal() const { return APSIntVal; }
    const APFloat &getAPFloatVal() const { return APFloatVal; }

    void setIgnoreColonInIdentifiers(bool val) {
      IgnoreColonInIdentifiers = val;
    }

    bool Error(LocTy ErrorLoc, const Twine &Msg) const;
    bool Error(const Twine &Msg) const { return Error(getLoc(), Msg); }

    void Warning(LocTy WarningLoc, const Twine &Msg) const;
    void Warning(const Twine &Msg) const { return Warning(getLoc(), Msg); }

  private:
    lltok::Kind LexToken();

    int getNextChar();
    void SkipLineComment();
    lltok::Kind ReadString(lltok::Kind kind);
    bool ReadVarName();

    lltok::Kind LexIdentifier();
    lltok::Kind LexDigitOrNegative();
    lltok::Kind LexPositive();
    lltok::Kind LexAt();
    lltok::Kind LexDollar();
    lltok::Kind LexExclaim();
    lltok::Kind LexPercent();
    lltok::Kind LexUIntID(lltok::Kind Token);
    lltok::Kind LexVar(lltok::Kind Var, lltok::Kind VarID);
    lltok::Kind LexQuote();
    lltok::Kind Lex0x();
    lltok::Kind LexHash();
    lltok::Kind LexCaret();

    uint64_t atoull(const char *Buffer, const char *End);
    uint64_t HexIntToVal(const char *Buffer, const char *End);
    void HexToIntPair(const char *Buffer, const char *End, uint64_t Pair[2]);
    void FP80HexToIntPair(const char *Buffer, const char *End, uint64_t Pair[2]);
  };
} // end namespace llvm

#endif
PKjwFZ\�K�3n3nAsmParser/LLParser.hnu�[���//===-- LLParser.h - Parser Class -------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
//  This file defines the parser class for .ll files.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ASMPARSER_LLPARSER_H
#define LLVM_ASMPARSER_LLPARSER_H

#include "LLLexer.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/AsmParser/Parser.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/FMF.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/ModuleSummaryIndex.h"
#include "llvm/Support/ModRef.h"
#include <map>
#include <optional>

namespace llvm {
  class Module;
  class ConstantRange;
  class FunctionType;
  class GlobalObject;
  class SMDiagnostic;
  class SMLoc;
  class SourceMgr;
  class Type;
  struct MaybeAlign;
  class Function;
  class Value;
  class BasicBlock;
  class Instruction;
  class Constant;
  class GlobalValue;
  class Comdat;
  class MDString;
  class MDNode;
  struct SlotMapping;

  /// ValID - Represents a reference of a definition of some sort with no type.
  /// There are several cases where we have to parse the value but where the
  /// type can depend on later context.  This may either be a numeric reference
  /// or a symbolic (%var) reference.  This is just a discriminated union.
  struct ValID {
    enum {
      t_LocalID, t_GlobalID,           // ID in UIntVal.
      t_LocalName, t_GlobalName,       // Name in StrVal.
      t_APSInt, t_APFloat,             // Value in APSIntVal/APFloatVal.
      t_Null, t_Undef, t_Zero, t_None, t_Poison, // No value.
      t_EmptyArray,                    // No value:  []
      t_Constant,                      // Value in ConstantVal.
      t_InlineAsm,                     // Value in FTy/StrVal/StrVal2/UIntVal.
      t_ConstantStruct,                // Value in ConstantStructElts.
      t_PackedConstantStruct           // Value in ConstantStructElts.
    } Kind = t_LocalID;

    LLLexer::LocTy Loc;
    unsigned UIntVal;
    FunctionType *FTy = nullptr;
    std::string StrVal, StrVal2;
    APSInt APSIntVal;
    APFloat APFloatVal{0.0};
    Constant *ConstantVal;
    std::unique_ptr<Constant *[]> ConstantStructElts;
    bool NoCFI = false;

    ValID() = default;
    ValID(const ValID &RHS)
        : Kind(RHS.Kind), Loc(RHS.Loc), UIntVal(RHS.UIntVal), FTy(RHS.FTy),
          StrVal(RHS.StrVal), StrVal2(RHS.StrVal2), APSIntVal(RHS.APSIntVal),
          APFloatVal(RHS.APFloatVal), ConstantVal(RHS.ConstantVal),
          NoCFI(RHS.NoCFI) {
      assert(!RHS.ConstantStructElts);
    }

    bool operator<(const ValID &RHS) const {
      assert(Kind == RHS.Kind && "Comparing ValIDs of different kinds");
      if (Kind == t_LocalID || Kind == t_GlobalID)
        return UIntVal < RHS.UIntVal;
      assert((Kind == t_LocalName || Kind == t_GlobalName ||
              Kind == t_ConstantStruct || Kind == t_PackedConstantStruct) &&
             "Ordering not defined for this ValID kind yet");
      return StrVal < RHS.StrVal;
    }
  };

  class LLParser {
  public:
    typedef LLLexer::LocTy LocTy;
  private:
    LLVMContext &Context;
    // Lexer to determine whether to use opaque pointers or not.
    LLLexer OPLex;
    LLLexer Lex;
    // Module being parsed, null if we are only parsing summary index.
    Module *M;
    // Summary index being parsed, null if we are only parsing Module.
    ModuleSummaryIndex *Index;
    SlotMapping *Slots;

    SmallVector<Instruction*, 64> InstsWithTBAATag;

    /// DIAssignID metadata does not support temporary RAUW so we cannot use
    /// the normal metadata forward reference resolution method. Instead,
    /// non-temporary DIAssignID are attached to instructions (recorded here)
    /// then replaced later.
    DenseMap<MDNode *, SmallVector<Instruction *, 2>> TempDIAssignIDAttachments;

    // Type resolution handling data structures.  The location is set when we
    // have processed a use of the type but not a definition yet.
    StringMap<std::pair<Type*, LocTy> > NamedTypes;
    std::map<unsigned, std::pair<Type*, LocTy> > NumberedTypes;

    std::map<unsigned, TrackingMDNodeRef> NumberedMetadata;
    std::map<unsigned, std::pair<TempMDTuple, LocTy>> ForwardRefMDNodes;

    // Global Value reference information.
    std::map<std::string, std::pair<GlobalValue*, LocTy> > ForwardRefVals;
    std::map<unsigned, std::pair<GlobalValue*, LocTy> > ForwardRefValIDs;
    std::vector<GlobalValue*> NumberedVals;

    // Comdat forward reference information.
    std::map<std::string, LocTy> ForwardRefComdats;

    // References to blockaddress.  The key is the function ValID, the value is
    // a list of references to blocks in that function.
    std::map<ValID, std::map<ValID, GlobalValue *>> ForwardRefBlockAddresses;
    class PerFunctionState;
    /// Reference to per-function state to allow basic blocks to be
    /// forward-referenced by blockaddress instructions within the same
    /// function.
    PerFunctionState *BlockAddressPFS;

    // References to dso_local_equivalent. The key is the global's ValID, the
    // value is a placeholder value that will be replaced. Note there are two
    // maps for tracking ValIDs that are GlobalNames and ValIDs that are
    // GlobalIDs. These are needed because "operator<" doesn't discriminate
    // between the two.
    std::map<ValID, GlobalValue *> ForwardRefDSOLocalEquivalentNames;
    std::map<ValID, GlobalValue *> ForwardRefDSOLocalEquivalentIDs;

    // Attribute builder reference information.
    std::map<Value*, std::vector<unsigned> > ForwardRefAttrGroups;
    std::map<unsigned, AttrBuilder> NumberedAttrBuilders;

    // Summary global value reference information.
    std::map<unsigned, std::vector<std::pair<ValueInfo *, LocTy>>>
        ForwardRefValueInfos;
    std::map<unsigned, std::vector<std::pair<AliasSummary *, LocTy>>>
        ForwardRefAliasees;
    std::vector<ValueInfo> NumberedValueInfos;

    // Summary type id reference information.
    std::map<unsigned, std::vector<std::pair<GlobalValue::GUID *, LocTy>>>
        ForwardRefTypeIds;

    // Map of module ID to path.
    std::map<unsigned, StringRef> ModuleIdMap;

    /// Only the llvm-as tool may set this to false to bypass
    /// UpgradeDebuginfo so it can generate broken bitcode.
    bool UpgradeDebugInfo;

    std::string SourceFileName;

  public:
    LLParser(StringRef F, SourceMgr &SM, SMDiagnostic &Err, Module *M,
             ModuleSummaryIndex *Index, LLVMContext &Context,
             SlotMapping *Slots = nullptr)
        : Context(Context), OPLex(F, SM, Err, Context),
          Lex(F, SM, Err, Context), M(M), Index(Index), Slots(Slots),
          BlockAddressPFS(nullptr) {}
    bool Run(
        bool UpgradeDebugInfo,
        DataLayoutCallbackTy DataLayoutCallback = [](StringRef, StringRef) {
          return std::nullopt;
        });

    bool parseStandaloneConstantValue(Constant *&C, const SlotMapping *Slots);

    bool parseTypeAtBeginning(Type *&Ty, unsigned &Read,
                              const SlotMapping *Slots);

    LLVMContext &getContext() { return Context; }

  private:
    bool error(LocTy L, const Twine &Msg) const { return Lex.Error(L, Msg); }
    bool tokError(const Twine &Msg) const { return error(Lex.getLoc(), Msg); }

    /// Restore the internal name and slot mappings using the mappings that
    /// were created at an earlier parsing stage.
    void restoreParsingState(const SlotMapping *Slots);

    /// getGlobalVal - Get a value with the specified name or ID, creating a
    /// forward reference record if needed.  This can return null if the value
    /// exists but does not have the right type.
    GlobalValue *getGlobalVal(const std::string &N, Type *Ty, LocTy Loc);
    GlobalValue *getGlobalVal(unsigned ID, Type *Ty, LocTy Loc);

    /// Get a Comdat with the specified name, creating a forward reference
    /// record if needed.
    Comdat *getComdat(const std::string &Name, LocTy Loc);

    // Helper Routines.
    bool parseToken(lltok::Kind T, const char *ErrMsg);
    bool EatIfPresent(lltok::Kind T) {
      if (Lex.getKind() != T) return false;
      Lex.Lex();
      return true;
    }

    FastMathFlags EatFastMathFlagsIfPresent() {
      FastMathFlags FMF;
      while (true)
        switch (Lex.getKind()) {
        case lltok::kw_fast: FMF.setFast();            Lex.Lex(); continue;
        case lltok::kw_nnan: FMF.setNoNaNs();          Lex.Lex(); continue;
        case lltok::kw_ninf: FMF.setNoInfs();          Lex.Lex(); continue;
        case lltok::kw_nsz:  FMF.setNoSignedZeros();   Lex.Lex(); continue;
        case lltok::kw_arcp: FMF.setAllowReciprocal(); Lex.Lex(); continue;
        case lltok::kw_contract:
          FMF.setAllowContract(true);
          Lex.Lex();
          continue;
        case lltok::kw_reassoc: FMF.setAllowReassoc(); Lex.Lex(); continue;
        case lltok::kw_afn:     FMF.setApproxFunc();   Lex.Lex(); continue;
        default: return FMF;
        }
      return FMF;
    }

    bool parseOptionalToken(lltok::Kind T, bool &Present,
                            LocTy *Loc = nullptr) {
      if (Lex.getKind() != T) {
        Present = false;
      } else {
        if (Loc)
          *Loc = Lex.getLoc();
        Lex.Lex();
        Present = true;
      }
      return false;
    }
    bool parseStringConstant(std::string &Result);
    bool parseUInt32(unsigned &Val);
    bool parseUInt32(unsigned &Val, LocTy &Loc) {
      Loc = Lex.getLoc();
      return parseUInt32(Val);
    }
    bool parseUInt64(uint64_t &Val);
    bool parseUInt64(uint64_t &Val, LocTy &Loc) {
      Loc = Lex.getLoc();
      return parseUInt64(Val);
    }
    bool parseFlag(unsigned &Val);

    bool parseStringAttribute(AttrBuilder &B);

    bool parseTLSModel(GlobalVariable::ThreadLocalMode &TLM);
    bool parseOptionalThreadLocal(GlobalVariable::ThreadLocalMode &TLM);
    bool parseOptionalUnnamedAddr(GlobalVariable::UnnamedAddr &UnnamedAddr);
    bool parseOptionalAddrSpace(unsigned &AddrSpace, unsigned DefaultAS = 0);
    bool parseOptionalProgramAddrSpace(unsigned &AddrSpace) {
      return parseOptionalAddrSpace(
          AddrSpace, M->getDataLayout().getProgramAddressSpace());
    };
    bool parseEnumAttribute(Attribute::AttrKind Attr, AttrBuilder &B,
                            bool InAttrGroup);
    bool parseOptionalParamOrReturnAttrs(AttrBuilder &B, bool IsParam);
    bool parseOptionalParamAttrs(AttrBuilder &B) {
      return parseOptionalParamOrReturnAttrs(B, true);
    }
    bool parseOptionalReturnAttrs(AttrBuilder &B) {
      return parseOptionalParamOrReturnAttrs(B, false);
    }
    bool parseOptionalLinkage(unsigned &Res, bool &HasLinkage,
                              unsigned &Visibility, unsigned &DLLStorageClass,
                              bool &DSOLocal);
    void parseOptionalDSOLocal(bool &DSOLocal);
    void parseOptionalVisibility(unsigned &Res);
    void parseOptionalDLLStorageClass(unsigned &Res);
    bool parseOptionalCallingConv(unsigned &CC);
    bool parseOptionalAlignment(MaybeAlign &Alignment,
                                bool AllowParens = false);
    bool parseOptionalDerefAttrBytes(lltok::Kind AttrKind, uint64_t &Bytes);
    bool parseOptionalUWTableKind(UWTableKind &Kind);
    bool parseAllocKind(AllocFnKind &Kind);
    std::optional<MemoryEffects> parseMemoryAttr();
    unsigned parseNoFPClassAttr();
    bool parseScopeAndOrdering(bool IsAtomic, SyncScope::ID &SSID,
                               AtomicOrdering &Ordering);
    bool parseScope(SyncScope::ID &SSID);
    bool parseOrdering(AtomicOrdering &Ordering);
    bool parseOptionalStackAlignment(unsigned &Alignment);
    bool parseOptionalCommaAlign(MaybeAlign &Alignment, bool &AteExtraComma);
    bool parseOptionalCommaAddrSpace(unsigned &AddrSpace, LocTy &Loc,
                                     bool &AteExtraComma);
    bool parseAllocSizeArguments(unsigned &BaseSizeArg,
                                 std::optional<unsigned> &HowManyArg);
    bool parseVScaleRangeArguments(unsigned &MinValue, unsigned &MaxValue);
    bool parseIndexList(SmallVectorImpl<unsigned> &Indices,
                        bool &AteExtraComma);
    bool parseIndexList(SmallVectorImpl<unsigned> &Indices) {
      bool AteExtraComma;
      if (parseIndexList(Indices, AteExtraComma))
        return true;
      if (AteExtraComma)
        return tokError("expected index");
      return false;
    }

    // Top-Level Entities
    bool parseTopLevelEntities();
    bool validateEndOfModule(bool UpgradeDebugInfo);
    bool validateEndOfIndex();
    bool parseTargetDefinitions(DataLayoutCallbackTy DataLayoutCallback);
    bool parseTargetDefinition(std::string &TentativeDLStr, LocTy &DLStrLoc);
    bool parseModuleAsm();
    bool parseSourceFileName();
    bool parseUnnamedType();
    bool parseNamedType();
    bool parseDeclare();
    bool parseDefine();

    bool parseGlobalType(bool &IsConstant);
    bool parseUnnamedGlobal();
    bool parseNamedGlobal();
    bool parseGlobal(const std::string &Name, LocTy NameLoc, unsigned Linkage,
                     bool HasLinkage, unsigned Visibility,
                     unsigned DLLStorageClass, bool DSOLocal,
                     GlobalVariable::ThreadLocalMode TLM,
                     GlobalVariable::UnnamedAddr UnnamedAddr);
    bool parseAliasOrIFunc(const std::string &Name, LocTy NameLoc, unsigned L,
                           unsigned Visibility, unsigned DLLStorageClass,
                           bool DSOLocal, GlobalVariable::ThreadLocalMode TLM,
                           GlobalVariable::UnnamedAddr UnnamedAddr);
    bool parseComdat();
    bool parseStandaloneMetadata();
    bool parseNamedMetadata();
    bool parseMDString(MDString *&Result);
    bool parseMDNodeID(MDNode *&Result);
    bool parseUnnamedAttrGrp();
    bool parseFnAttributeValuePairs(AttrBuilder &B,
                                    std::vector<unsigned> &FwdRefAttrGrps,
                                    bool inAttrGrp, LocTy &BuiltinLoc);
    bool parseRequiredTypeAttr(AttrBuilder &B, lltok::Kind AttrToken,
                               Attribute::AttrKind AttrKind);

    // Module Summary Index Parsing.
    bool skipModuleSummaryEntry();
    bool parseSummaryEntry();
    bool parseModuleEntry(unsigned ID);
    bool parseModuleReference(StringRef &ModulePath);
    bool parseGVReference(ValueInfo &VI, unsigned &GVId);
    bool parseSummaryIndexFlags();
    bool parseBlockCount();
    bool parseGVEntry(unsigned ID);
    bool parseFunctionSummary(std::string Name, GlobalValue::GUID, unsigned ID);
    bool parseVariableSummary(std::string Name, GlobalValue::GUID, unsigned ID);
    bool parseAliasSummary(std::string Name, GlobalValue::GUID, unsigned ID);
    bool parseGVFlags(GlobalValueSummary::GVFlags &GVFlags);
    bool parseGVarFlags(GlobalVarSummary::GVarFlags &GVarFlags);
    bool parseOptionalFFlags(FunctionSummary::FFlags &FFlags);
    bool parseOptionalCalls(std::vector<FunctionSummary::EdgeTy> &Calls);
    bool parseHotness(CalleeInfo::HotnessType &Hotness);
    bool parseOptionalTypeIdInfo(FunctionSummary::TypeIdInfo &TypeIdInfo);
    bool parseTypeTests(std::vector<GlobalValue::GUID> &TypeTests);
    bool parseVFuncIdList(lltok::Kind Kind,
                          std::vector<FunctionSummary::VFuncId> &VFuncIdList);
    bool parseConstVCallList(
        lltok::Kind Kind,
        std::vector<FunctionSummary::ConstVCall> &ConstVCallList);
    using IdToIndexMapType =
        std::map<unsigned, std::vector<std::pair<unsigned, LocTy>>>;
    bool parseConstVCall(FunctionSummary::ConstVCall &ConstVCall,
                         IdToIndexMapType &IdToIndexMap, unsigned Index);
    bool parseVFuncId(FunctionSummary::VFuncId &VFuncId,
                      IdToIndexMapType &IdToIndexMap, unsigned Index);
    bool parseOptionalVTableFuncs(VTableFuncList &VTableFuncs);
    bool parseOptionalParamAccesses(
        std::vector<FunctionSummary::ParamAccess> &Params);
    bool parseParamNo(uint64_t &ParamNo);
    using IdLocListType = std::vector<std::pair<unsigned, LocTy>>;
    bool parseParamAccess(FunctionSummary::ParamAccess &Param,
                          IdLocListType &IdLocList);
    bool parseParamAccessCall(FunctionSummary::ParamAccess::Call &Call,
                              IdLocListType &IdLocList);
    bool parseParamAccessOffset(ConstantRange &Range);
    bool parseOptionalRefs(std::vector<ValueInfo> &Refs);
    bool parseTypeIdEntry(unsigned ID);
    bool parseTypeIdSummary(TypeIdSummary &TIS);
    bool parseTypeIdCompatibleVtableEntry(unsigned ID);
    bool parseTypeTestResolution(TypeTestResolution &TTRes);
    bool parseOptionalWpdResolutions(
        std::map<uint64_t, WholeProgramDevirtResolution> &WPDResMap);
    bool parseWpdRes(WholeProgramDevirtResolution &WPDRes);
    bool parseOptionalResByArg(
        std::map<std::vector<uint64_t>, WholeProgramDevirtResolution::ByArg>
            &ResByArg);
    bool parseArgs(std::vector<uint64_t> &Args);
    void addGlobalValueToIndex(std::string Name, GlobalValue::GUID,
                               GlobalValue::LinkageTypes Linkage, unsigned ID,
                               std::unique_ptr<GlobalValueSummary> Summary);
    bool parseOptionalAllocs(std::vector<AllocInfo> &Allocs);
    bool parseMemProfs(std::vector<MIBInfo> &MIBs);
    bool parseAllocType(uint8_t &AllocType);
    bool parseOptionalCallsites(std::vector<CallsiteInfo> &Callsites);

    // Type Parsing.
    bool parseType(Type *&Result, const Twine &Msg, bool AllowVoid = false);
    bool parseType(Type *&Result, bool AllowVoid = false) {
      return parseType(Result, "expected type", AllowVoid);
    }
    bool parseType(Type *&Result, const Twine &Msg, LocTy &Loc,
                   bool AllowVoid = false) {
      Loc = Lex.getLoc();
      return parseType(Result, Msg, AllowVoid);
    }
    bool parseType(Type *&Result, LocTy &Loc, bool AllowVoid = false) {
      Loc = Lex.getLoc();
      return parseType(Result, AllowVoid);
    }
    bool parseAnonStructType(Type *&Result, bool Packed);
    bool parseStructBody(SmallVectorImpl<Type *> &Body);
    bool parseStructDefinition(SMLoc TypeLoc, StringRef Name,
                               std::pair<Type *, LocTy> &Entry,
                               Type *&ResultTy);

    bool parseArrayVectorType(Type *&Result, bool IsVector);
    bool parseFunctionType(Type *&Result);
    bool parseTargetExtType(Type *&Result);

    // Function Semantic Analysis.
    class PerFunctionState {
      LLParser &P;
      Function &F;
      std::map<std::string, std::pair<Value*, LocTy> > ForwardRefVals;
      std::map<unsigned, std::pair<Value*, LocTy> > ForwardRefValIDs;
      std::vector<Value*> NumberedVals;

      /// FunctionNumber - If this is an unnamed function, this is the slot
      /// number of it, otherwise it is -1.
      int FunctionNumber;
    public:
      PerFunctionState(LLParser &p, Function &f, int functionNumber);
      ~PerFunctionState();

      Function &getFunction() const { return F; }

      bool finishFunction();

      /// GetVal - Get a value with the specified name or ID, creating a
      /// forward reference record if needed.  This can return null if the value
      /// exists but does not have the right type.
      Value *getVal(const std::string &Name, Type *Ty, LocTy Loc);
      Value *getVal(unsigned ID, Type *Ty, LocTy Loc);

      /// setInstName - After an instruction is parsed and inserted into its
      /// basic block, this installs its name.
      bool setInstName(int NameID, const std::string &NameStr, LocTy NameLoc,
                       Instruction *Inst);

      /// GetBB - Get a basic block with the specified name or ID, creating a
      /// forward reference record if needed.  This can return null if the value
      /// is not a BasicBlock.
      BasicBlock *getBB(const std::string &Name, LocTy Loc);
      BasicBlock *getBB(unsigned ID, LocTy Loc);

      /// DefineBB - Define the specified basic block, which is either named or
      /// unnamed.  If there is an error, this returns null otherwise it returns
      /// the block being defined.
      BasicBlock *defineBB(const std::string &Name, int NameID, LocTy Loc);

      bool resolveForwardRefBlockAddresses();
    };

    bool convertValIDToValue(Type *Ty, ValID &ID, Value *&V,
                             PerFunctionState *PFS);

    Value *checkValidVariableType(LocTy Loc, const Twine &Name, Type *Ty,
                                  Value *Val);

    bool parseConstantValue(Type *Ty, Constant *&C);
    bool parseValue(Type *Ty, Value *&V, PerFunctionState *PFS);
    bool parseValue(Type *Ty, Value *&V, PerFunctionState &PFS) {
      return parseValue(Ty, V, &PFS);
    }

    bool parseValue(Type *Ty, Value *&V, LocTy &Loc, PerFunctionState &PFS) {
      Loc = Lex.getLoc();
      return parseValue(Ty, V, &PFS);
    }

    bool parseTypeAndValue(Value *&V, PerFunctionState *PFS);
    bool parseTypeAndValue(Value *&V, PerFunctionState &PFS) {
      return parseTypeAndValue(V, &PFS);
    }
    bool parseTypeAndValue(Value *&V, LocTy &Loc, PerFunctionState &PFS) {
      Loc = Lex.getLoc();
      return parseTypeAndValue(V, PFS);
    }
    bool parseTypeAndBasicBlock(BasicBlock *&BB, LocTy &Loc,
                                PerFunctionState &PFS);
    bool parseTypeAndBasicBlock(BasicBlock *&BB, PerFunctionState &PFS) {
      LocTy Loc;
      return parseTypeAndBasicBlock(BB, Loc, PFS);
    }

    struct ParamInfo {
      LocTy Loc;
      Value *V;
      AttributeSet Attrs;
      ParamInfo(LocTy loc, Value *v, AttributeSet attrs)
          : Loc(loc), V(v), Attrs(attrs) {}
    };
    bool parseParameterList(SmallVectorImpl<ParamInfo> &ArgList,
                            PerFunctionState &PFS, bool IsMustTailCall = false,
                            bool InVarArgsFunc = false);

    bool
    parseOptionalOperandBundles(SmallVectorImpl<OperandBundleDef> &BundleList,
                                PerFunctionState &PFS);

    bool parseExceptionArgs(SmallVectorImpl<Value *> &Args,
                            PerFunctionState &PFS);

    bool resolveFunctionType(Type *RetType,
                             const SmallVector<ParamInfo, 16> &ArgList,
                             FunctionType *&FuncTy);

    // Constant Parsing.
    bool parseValID(ValID &ID, PerFunctionState *PFS,
                    Type *ExpectedTy = nullptr);
    bool parseGlobalValue(Type *Ty, Constant *&C);
    bool parseGlobalTypeAndValue(Constant *&V);
    bool parseGlobalValueVector(SmallVectorImpl<Constant *> &Elts,
                                std::optional<unsigned> *InRangeOp = nullptr);
    bool parseOptionalComdat(StringRef GlobalName, Comdat *&C);
    bool parseSanitizer(GlobalVariable *GV);
    bool parseMetadataAsValue(Value *&V, PerFunctionState &PFS);
    bool parseValueAsMetadata(Metadata *&MD, const Twine &TypeMsg,
                              PerFunctionState *PFS);
    bool parseMetadata(Metadata *&MD, PerFunctionState *PFS);
    bool parseMDTuple(MDNode *&MD, bool IsDistinct = false);
    bool parseMDNode(MDNode *&N);
    bool parseMDNodeTail(MDNode *&N);
    bool parseMDNodeVector(SmallVectorImpl<Metadata *> &Elts);
    bool parseMetadataAttachment(unsigned &Kind, MDNode *&MD);
    bool parseInstructionMetadata(Instruction &Inst);
    bool parseGlobalObjectMetadataAttachment(GlobalObject &GO);
    bool parseOptionalFunctionMetadata(Function &F);

    template <class FieldTy>
    bool parseMDField(LocTy Loc, StringRef Name, FieldTy &Result);
    template <class FieldTy> bool parseMDField(StringRef Name, FieldTy &Result);
    template <class ParserTy> bool parseMDFieldsImplBody(ParserTy ParseField);
    template <class ParserTy>
    bool parseMDFieldsImpl(ParserTy ParseField, LocTy &ClosingLoc);
    bool parseSpecializedMDNode(MDNode *&N, bool IsDistinct = false);

#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS)                                  \
  bool parse##CLASS(MDNode *&Result, bool IsDistinct);
#include "llvm/IR/Metadata.def"
    bool parseDIArgList(MDNode *&Result, bool IsDistinct,
                        PerFunctionState *PFS);

    // Function Parsing.
    struct ArgInfo {
      LocTy Loc;
      Type *Ty;
      AttributeSet Attrs;
      std::string Name;
      ArgInfo(LocTy L, Type *ty, AttributeSet Attr, const std::string &N)
          : Loc(L), Ty(ty), Attrs(Attr), Name(N) {}
    };
    bool parseArgumentList(SmallVectorImpl<ArgInfo> &ArgList, bool &IsVarArg);
    bool parseFunctionHeader(Function *&Fn, bool IsDefine);
    bool parseFunctionBody(Function &Fn);
    bool parseBasicBlock(PerFunctionState &PFS);

    enum TailCallType { TCT_None, TCT_Tail, TCT_MustTail };

    // Instruction Parsing.  Each instruction parsing routine can return with a
    // normal result, an error result, or return having eaten an extra comma.
    enum InstResult { InstNormal = 0, InstError = 1, InstExtraComma = 2 };
    int parseInstruction(Instruction *&Inst, BasicBlock *BB,
                         PerFunctionState &PFS);
    bool parseCmpPredicate(unsigned &P, unsigned Opc);

    bool parseRet(Instruction *&Inst, BasicBlock *BB, PerFunctionState &PFS);
    bool parseBr(Instruction *&Inst, PerFunctionState &PFS);
    bool parseSwitch(Instruction *&Inst, PerFunctionState &PFS);
    bool parseIndirectBr(Instruction *&Inst, PerFunctionState &PFS);
    bool parseInvoke(Instruction *&Inst, PerFunctionState &PFS);
    bool parseResume(Instruction *&Inst, PerFunctionState &PFS);
    bool parseCleanupRet(Instruction *&Inst, PerFunctionState &PFS);
    bool parseCatchRet(Instruction *&Inst, PerFunctionState &PFS);
    bool parseCatchSwitch(Instruction *&Inst, PerFunctionState &PFS);
    bool parseCatchPad(Instruction *&Inst, PerFunctionState &PFS);
    bool parseCleanupPad(Instruction *&Inst, PerFunctionState &PFS);
    bool parseCallBr(Instruction *&Inst, PerFunctionState &PFS);

    bool parseUnaryOp(Instruction *&Inst, PerFunctionState &PFS, unsigned Opc,
                      bool IsFP);
    bool parseArithmetic(Instruction *&Inst, PerFunctionState &PFS,
                         unsigned Opc, bool IsFP);
    bool parseLogical(Instruction *&Inst, PerFunctionState &PFS, unsigned Opc);
    bool parseCompare(Instruction *&Inst, PerFunctionState &PFS, unsigned Opc);
    bool parseCast(Instruction *&Inst, PerFunctionState &PFS, unsigned Opc);
    bool parseSelect(Instruction *&Inst, PerFunctionState &PFS);
    bool parseVAArg(Instruction *&Inst, PerFunctionState &PFS);
    bool parseExtractElement(Instruction *&Inst, PerFunctionState &PFS);
    bool parseInsertElement(Instruction *&Inst, PerFunctionState &PFS);
    bool parseShuffleVector(Instruction *&Inst, PerFunctionState &PFS);
    int parsePHI(Instruction *&Inst, PerFunctionState &PFS);
    bool parseLandingPad(Instruction *&Inst, PerFunctionState &PFS);
    bool parseCall(Instruction *&Inst, PerFunctionState &PFS,
                   CallInst::TailCallKind TCK);
    int parseAlloc(Instruction *&Inst, PerFunctionState &PFS);
    int parseLoad(Instruction *&Inst, PerFunctionState &PFS);
    int parseStore(Instruction *&Inst, PerFunctionState &PFS);
    int parseCmpXchg(Instruction *&Inst, PerFunctionState &PFS);
    int parseAtomicRMW(Instruction *&Inst, PerFunctionState &PFS);
    int parseFence(Instruction *&Inst, PerFunctionState &PFS);
    int parseGetElementPtr(Instruction *&Inst, PerFunctionState &PFS);
    int parseExtractValue(Instruction *&Inst, PerFunctionState &PFS);
    int parseInsertValue(Instruction *&Inst, PerFunctionState &PFS);
    bool parseFreeze(Instruction *&I, PerFunctionState &PFS);

    // Use-list order directives.
    bool parseUseListOrder(PerFunctionState *PFS = nullptr);
    bool parseUseListOrderBB();
    bool parseUseListOrderIndexes(SmallVectorImpl<unsigned> &Indexes);
    bool sortUseListOrder(Value *V, ArrayRef<unsigned> Indexes, SMLoc Loc);
  };
} // End llvm namespace

#endif
PKjwFZ�KIR/BuiltinGCs.hnu�[���//===-- BuiltinGCs.h - Garbage collector linkage hacks --------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains hack functions to force linking in the builtin GC
// components.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_BUILTINGCS_H
#define LLVM_IR_BUILTINGCS_H

namespace llvm {

/// FIXME: Collector instances are not useful on their own. These no longer
///        serve any purpose except to link in the plugins.

/// Ensure the definition of the builtin GCs gets linked in
void linkAllBuiltinGCs();

/// Creates an ocaml-compatible metadata printer.
void linkOcamlGCPrinter();

/// Creates an erlang-compatible metadata printer.
void linkErlangGCPrinter();

} // namespace llvm

#endif // LLVM_IR_BUILTINGCS_H
PKjwFZ��Ma�	�	IR/FixedMetadataKinds.defnu�[���/*===-- FixedMetadataKinds.def - Fixed metadata kind IDs -------*- C++ -*-=== *\
|*
|* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|* See https://llvm.org/LICENSE.txt for license information.
|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|*
\*===----------------------------------------------------------------------===*/

#ifndef LLVM_FIXED_MD_KIND
#error "LLVM_FIXED_MD_KIND(EnumID, Name, Value) is not defined."
#endif

LLVM_FIXED_MD_KIND(MD_dbg, "dbg", 0)
LLVM_FIXED_MD_KIND(MD_tbaa, "tbaa", 1)
LLVM_FIXED_MD_KIND(MD_prof, "prof", 2)
LLVM_FIXED_MD_KIND(MD_fpmath, "fpmath", 3)
LLVM_FIXED_MD_KIND(MD_range, "range", 4)
LLVM_FIXED_MD_KIND(MD_tbaa_struct, "tbaa.struct", 5)
LLVM_FIXED_MD_KIND(MD_invariant_load, "invariant.load", 6)
LLVM_FIXED_MD_KIND(MD_alias_scope, "alias.scope", 7)
LLVM_FIXED_MD_KIND(MD_noalias, "noalias", 8)
LLVM_FIXED_MD_KIND(MD_nontemporal, "nontemporal", 9)
LLVM_FIXED_MD_KIND(MD_mem_parallel_loop_access,
                    "llvm.mem.parallel_loop_access", 10)
LLVM_FIXED_MD_KIND(MD_nonnull, "nonnull", 11)
LLVM_FIXED_MD_KIND(MD_dereferenceable, "dereferenceable", 12)
LLVM_FIXED_MD_KIND(MD_dereferenceable_or_null, "dereferenceable_or_null", 13)
LLVM_FIXED_MD_KIND(MD_make_implicit, "make.implicit", 14)
LLVM_FIXED_MD_KIND(MD_unpredictable, "unpredictable", 15)
LLVM_FIXED_MD_KIND(MD_invariant_group, "invariant.group", 16)
LLVM_FIXED_MD_KIND(MD_align, "align", 17)
LLVM_FIXED_MD_KIND(MD_loop, "llvm.loop", 18)
LLVM_FIXED_MD_KIND(MD_type, "type", 19)
LLVM_FIXED_MD_KIND(MD_section_prefix, "section_prefix", 20)
LLVM_FIXED_MD_KIND(MD_absolute_symbol, "absolute_symbol", 21)
LLVM_FIXED_MD_KIND(MD_associated, "associated", 22)
LLVM_FIXED_MD_KIND(MD_callees, "callees", 23)
LLVM_FIXED_MD_KIND(MD_irr_loop, "irr_loop", 24)
LLVM_FIXED_MD_KIND(MD_access_group, "llvm.access.group", 25)
LLVM_FIXED_MD_KIND(MD_callback, "callback", 26)
LLVM_FIXED_MD_KIND(MD_preserve_access_index, "llvm.preserve.access.index", 27)
LLVM_FIXED_MD_KIND(MD_vcall_visibility, "vcall_visibility", 28)
LLVM_FIXED_MD_KIND(MD_noundef, "noundef", 29)
LLVM_FIXED_MD_KIND(MD_annotation, "annotation", 30)
LLVM_FIXED_MD_KIND(MD_nosanitize, "nosanitize", 31)
LLVM_FIXED_MD_KIND(MD_func_sanitize, "func_sanitize", 32)
LLVM_FIXED_MD_KIND(MD_exclude, "exclude", 33)
LLVM_FIXED_MD_KIND(MD_memprof, "memprof", 34)
LLVM_FIXED_MD_KIND(MD_callsite, "callsite", 35)
LLVM_FIXED_MD_KIND(MD_kcfi_type, "kcfi_type", 36)
LLVM_FIXED_MD_KIND(MD_pcsections, "pcsections", 37)
LLVM_FIXED_MD_KIND(MD_DIAssignID, "DIAssignID", 38)
PKjwFZ�z.�.�IR/Module.hnu�[���//===- llvm/Module.h - C++ class to represent a VM module -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// @file
/// Module.h This file contains the declarations for the Module class.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_MODULE_H
#define LLVM_IR_MODULE_H

#include "llvm-c/Types.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/Comdat.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalAlias.h"
#include "llvm/IR/GlobalIFunc.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/ProfileSummary.h"
#include "llvm/IR/SymbolTableListTraits.h"
#include "llvm/Support/CBindingWrapping.h"
#include "llvm/Support/CodeGen.h"
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <memory>
#include <optional>
#include <string>
#include <vector>

namespace llvm {

class Error;
class FunctionType;
class GVMaterializer;
class LLVMContext;
class MemoryBuffer;
class ModuleSummaryIndex;
class RandomNumberGenerator;
class StructType;
class VersionTuple;

/// A Module instance is used to store all the information related to an
/// LLVM module. Modules are the top level container of all other LLVM
/// Intermediate Representation (IR) objects. Each module directly contains a
/// list of globals variables, a list of functions, a list of libraries (or
/// other modules) this module depends on, a symbol table, and various data
/// about the target's characteristics.
///
/// A module maintains a GlobalList object that is used to hold all
/// constant references to global variables in the module.  When a global
/// variable is destroyed, it should have no entries in the GlobalList.
/// The main container class for the LLVM Intermediate Representation.
class LLVM_EXTERNAL_VISIBILITY Module {
  /// @name Types And Enumerations
  /// @{
public:
  /// The type for the list of global variables.
  using GlobalListType = SymbolTableList<GlobalVariable>;
  /// The type for the list of functions.
  using FunctionListType = SymbolTableList<Function>;
  /// The type for the list of aliases.
  using AliasListType = SymbolTableList<GlobalAlias>;
  /// The type for the list of ifuncs.
  using IFuncListType = SymbolTableList<GlobalIFunc>;
  /// The type for the list of named metadata.
  using NamedMDListType = ilist<NamedMDNode>;
  /// The type of the comdat "symbol" table.
  using ComdatSymTabType = StringMap<Comdat>;
  /// The type for mapping names to named metadata.
  using NamedMDSymTabType = StringMap<NamedMDNode *>;

  /// The Global Variable iterator.
  using global_iterator = GlobalListType::iterator;
  /// The Global Variable constant iterator.
  using const_global_iterator = GlobalListType::const_iterator;

  /// The Function iterators.
  using iterator = FunctionListType::iterator;
  /// The Function constant iterator
  using const_iterator = FunctionListType::const_iterator;

  /// The Function reverse iterator.
  using reverse_iterator = FunctionListType::reverse_iterator;
  /// The Function constant reverse iterator.
  using const_reverse_iterator = FunctionListType::const_reverse_iterator;

  /// The Global Alias iterators.
  using alias_iterator = AliasListType::iterator;
  /// The Global Alias constant iterator
  using const_alias_iterator = AliasListType::const_iterator;

  /// The Global IFunc iterators.
  using ifunc_iterator = IFuncListType::iterator;
  /// The Global IFunc constant iterator
  using const_ifunc_iterator = IFuncListType::const_iterator;

  /// The named metadata iterators.
  using named_metadata_iterator = NamedMDListType::iterator;
  /// The named metadata constant iterators.
  using const_named_metadata_iterator = NamedMDListType::const_iterator;

  /// This enumeration defines the supported behaviors of module flags.
  enum ModFlagBehavior {
    /// Emits an error if two values disagree, otherwise the resulting value is
    /// that of the operands.
    Error = 1,

    /// Emits a warning if two values disagree. The result value will be the
    /// operand for the flag from the first module being linked.
    Warning = 2,

    /// Adds a requirement that another module flag be present and have a
    /// specified value after linking is performed. The value must be a metadata
    /// pair, where the first element of the pair is the ID of the module flag
    /// to be restricted, and the second element of the pair is the value the
    /// module flag should be restricted to. This behavior can be used to
    /// restrict the allowable results (via triggering of an error) of linking
    /// IDs with the **Override** behavior.
    Require = 3,

    /// Uses the specified value, regardless of the behavior or value of the
    /// other module. If both modules specify **Override**, but the values
    /// differ, an error will be emitted.
    Override = 4,

    /// Appends the two values, which are required to be metadata nodes.
    Append = 5,

    /// Appends the two values, which are required to be metadata
    /// nodes. However, duplicate entries in the second list are dropped
    /// during the append operation.
    AppendUnique = 6,

    /// Takes the max of the two values, which are required to be integers.
    Max = 7,

    /// Takes the min of the two values, which are required to be integers.
    Min = 8,

    // Markers:
    ModFlagBehaviorFirstVal = Error,
    ModFlagBehaviorLastVal = Min
  };

  /// Checks if Metadata represents a valid ModFlagBehavior, and stores the
  /// converted result in MFB.
  static bool isValidModFlagBehavior(Metadata *MD, ModFlagBehavior &MFB);

  /// Check if the given module flag metadata represents a valid module flag,
  /// and store the flag behavior, the key string and the value metadata.
  static bool isValidModuleFlag(const MDNode &ModFlag, ModFlagBehavior &MFB,
                                MDString *&Key, Metadata *&Val);

  struct ModuleFlagEntry {
    ModFlagBehavior Behavior;
    MDString *Key;
    Metadata *Val;

    ModuleFlagEntry(ModFlagBehavior B, MDString *K, Metadata *V)
        : Behavior(B), Key(K), Val(V) {}
  };

/// @}
/// @name Member Variables
/// @{
private:
  LLVMContext &Context;           ///< The LLVMContext from which types and
                                  ///< constants are allocated.
  GlobalListType GlobalList;      ///< The Global Variables in the module
  FunctionListType FunctionList;  ///< The Functions in the module
  AliasListType AliasList;        ///< The Aliases in the module
  IFuncListType IFuncList;        ///< The IFuncs in the module
  NamedMDListType NamedMDList;    ///< The named metadata in the module
  std::string GlobalScopeAsm;     ///< Inline Asm at global scope.
  std::unique_ptr<ValueSymbolTable> ValSymTab; ///< Symbol table for values
  ComdatSymTabType ComdatSymTab;  ///< Symbol table for COMDATs
  std::unique_ptr<MemoryBuffer>
  OwnedMemoryBuffer;              ///< Memory buffer directly owned by this
                                  ///< module, for legacy clients only.
  std::unique_ptr<GVMaterializer>
  Materializer;                   ///< Used to materialize GlobalValues
  std::string ModuleID;           ///< Human readable identifier for the module
  std::string SourceFileName;     ///< Original source file name for module,
                                  ///< recorded in bitcode.
  std::string TargetTriple;       ///< Platform target triple Module compiled on
                                  ///< Format: (arch)(sub)-(vendor)-(sys0-(abi)
  NamedMDSymTabType NamedMDSymTab;  ///< NamedMDNode names.
  DataLayout DL;                  ///< DataLayout associated with the module
  StringMap<unsigned>
      CurrentIntrinsicIds; ///< Keep track of the current unique id count for
                           ///< the specified intrinsic basename.
  DenseMap<std::pair<Intrinsic::ID, const FunctionType *>, unsigned>
      UniquedIntrinsicNames; ///< Keep track of uniqued names of intrinsics
                             ///< based on unnamed types. The combination of
                             ///< ID and FunctionType maps to the extension that
                             ///< is used to make the intrinsic name unique.

  friend class Constant;

/// @}
/// @name Constructors
/// @{
public:
  /// The Module constructor. Note that there is no default constructor. You
  /// must provide a name for the module upon construction.
  explicit Module(StringRef ModuleID, LLVMContext& C);
  /// The module destructor. This will dropAllReferences.
  ~Module();

/// @}
/// @name Module Level Accessors
/// @{

  /// Get the module identifier which is, essentially, the name of the module.
  /// @returns the module identifier as a string
  const std::string &getModuleIdentifier() const { return ModuleID; }

  /// Returns the number of non-debug IR instructions in the module.
  /// This is equivalent to the sum of the IR instruction counts of each
  /// function contained in the module.
  unsigned getInstructionCount() const;

  /// Get the module's original source file name. When compiling from
  /// bitcode, this is taken from a bitcode record where it was recorded.
  /// For other compiles it is the same as the ModuleID, which would
  /// contain the source file name.
  const std::string &getSourceFileName() const { return SourceFileName; }

  /// Get a short "name" for the module.
  ///
  /// This is useful for debugging or logging. It is essentially a convenience
  /// wrapper around getModuleIdentifier().
  StringRef getName() const { return ModuleID; }

  /// Get the data layout string for the module's target platform. This is
  /// equivalent to getDataLayout()->getStringRepresentation().
  const std::string &getDataLayoutStr() const {
    return DL.getStringRepresentation();
  }

  /// Get the data layout for the module's target platform.
  const DataLayout &getDataLayout() const;

  /// Get the target triple which is a string describing the target host.
  /// @returns a string containing the target triple.
  const std::string &getTargetTriple() const { return TargetTriple; }

  /// Get the global data context.
  /// @returns LLVMContext - a container for LLVM's global information
  LLVMContext &getContext() const { return Context; }

  /// Get any module-scope inline assembly blocks.
  /// @returns a string containing the module-scope inline assembly blocks.
  const std::string &getModuleInlineAsm() const { return GlobalScopeAsm; }

  /// Get a RandomNumberGenerator salted for use with this module. The
  /// RNG can be seeded via -rng-seed=<uint64> and is salted with the
  /// ModuleID and the provided pass salt. The returned RNG should not
  /// be shared across threads or passes.
  ///
  /// A unique RNG per pass ensures a reproducible random stream even
  /// when other randomness consuming passes are added or removed. In
  /// addition, the random stream will be reproducible across LLVM
  /// versions when the pass does not change.
  std::unique_ptr<RandomNumberGenerator> createRNG(const StringRef Name) const;

  /// Return true if size-info optimization remark is enabled, false
  /// otherwise.
  bool shouldEmitInstrCountChangedRemark() {
    return getContext().getDiagHandlerPtr()->isAnalysisRemarkEnabled(
        "size-info");
  }

  /// @}
  /// @name Module Level Mutators
  /// @{

  /// Set the module identifier.
  void setModuleIdentifier(StringRef ID) { ModuleID = std::string(ID); }

  /// Set the module's original source file name.
  void setSourceFileName(StringRef Name) { SourceFileName = std::string(Name); }

  /// Set the data layout
  void setDataLayout(StringRef Desc);
  void setDataLayout(const DataLayout &Other);

  /// Set the target triple.
  void setTargetTriple(StringRef T) { TargetTriple = std::string(T); }

  /// Set the module-scope inline assembly blocks.
  /// A trailing newline is added if the input doesn't have one.
  void setModuleInlineAsm(StringRef Asm) {
    GlobalScopeAsm = std::string(Asm);
    if (!GlobalScopeAsm.empty() && GlobalScopeAsm.back() != '\n')
      GlobalScopeAsm += '\n';
  }

  /// Append to the module-scope inline assembly blocks.
  /// A trailing newline is added if the input doesn't have one.
  void appendModuleInlineAsm(StringRef Asm) {
    GlobalScopeAsm += Asm;
    if (!GlobalScopeAsm.empty() && GlobalScopeAsm.back() != '\n')
      GlobalScopeAsm += '\n';
  }

/// @}
/// @name Generic Value Accessors
/// @{

  /// Return the global value in the module with the specified name, of
  /// arbitrary type. This method returns null if a global with the specified
  /// name is not found.
  GlobalValue *getNamedValue(StringRef Name) const;

  /// Return the number of global values in the module.
  unsigned getNumNamedValues() const;

  /// Return a unique non-zero ID for the specified metadata kind. This ID is
  /// uniqued across modules in the current LLVMContext.
  unsigned getMDKindID(StringRef Name) const;

  /// Populate client supplied SmallVector with the name for custom metadata IDs
  /// registered in this LLVMContext.
  void getMDKindNames(SmallVectorImpl<StringRef> &Result) const;

  /// Populate client supplied SmallVector with the bundle tags registered in
  /// this LLVMContext.  The bundle tags are ordered by increasing bundle IDs.
  /// \see LLVMContext::getOperandBundleTagID
  void getOperandBundleTags(SmallVectorImpl<StringRef> &Result) const;

  std::vector<StructType *> getIdentifiedStructTypes() const;

  /// Return a unique name for an intrinsic whose mangling is based on an
  /// unnamed type. The Proto represents the function prototype.
  std::string getUniqueIntrinsicName(StringRef BaseName, Intrinsic::ID Id,
                                     const FunctionType *Proto);

/// @}
/// @name Function Accessors
/// @{

  /// Look up the specified function in the module symbol table. Four
  /// possibilities:
  ///   1. If it does not exist, add a prototype for the function and return it.
  ///   2. Otherwise, if the existing function has the correct prototype, return
  ///      the existing function.
  ///   3. Finally, the function exists but has the wrong prototype: return the
  ///      function with a constantexpr cast to the right prototype.
  ///
  /// In all cases, the returned value is a FunctionCallee wrapper around the
  /// 'FunctionType *T' passed in, as well as a 'Value*' either of the Function or
  /// the bitcast to the function.
  ///
  /// Note: For library calls getOrInsertLibFunc() should be used instead.
  FunctionCallee getOrInsertFunction(StringRef Name, FunctionType *T,
                                     AttributeList AttributeList);

  FunctionCallee getOrInsertFunction(StringRef Name, FunctionType *T);

  /// Look up the specified function in the module symbol table. If it does not
  /// exist, add a prototype for the function and return it. This function
  /// guarantees to return a constant of pointer to the specified function type
  /// or a ConstantExpr BitCast of that type if the named function has a
  /// different type. This version of the method takes a list of
  /// function arguments, which makes it easier for clients to use.
  template <typename... ArgsTy>
  FunctionCallee getOrInsertFunction(StringRef Name,
                                     AttributeList AttributeList, Type *RetTy,
                                     ArgsTy... Args) {
    SmallVector<Type*, sizeof...(ArgsTy)> ArgTys{Args...};
    return getOrInsertFunction(Name,
                               FunctionType::get(RetTy, ArgTys, false),
                               AttributeList);
  }

  /// Same as above, but without the attributes.
  template <typename... ArgsTy>
  FunctionCallee getOrInsertFunction(StringRef Name, Type *RetTy,
                                     ArgsTy... Args) {
    return getOrInsertFunction(Name, AttributeList{}, RetTy, Args...);
  }

  // Avoid an incorrect ordering that'd otherwise compile incorrectly.
  template <typename... ArgsTy>
  FunctionCallee
  getOrInsertFunction(StringRef Name, AttributeList AttributeList,
                      FunctionType *Invalid, ArgsTy... Args) = delete;

  /// Look up the specified function in the module symbol table. If it does not
  /// exist, return null.
  Function *getFunction(StringRef Name) const;

/// @}
/// @name Global Variable Accessors
/// @{

  /// Look up the specified global variable in the module symbol table. If it
  /// does not exist, return null. If AllowInternal is set to true, this
  /// function will return types that have InternalLinkage. By default, these
  /// types are not returned.
  GlobalVariable *getGlobalVariable(StringRef Name) const {
    return getGlobalVariable(Name, false);
  }

  GlobalVariable *getGlobalVariable(StringRef Name, bool AllowInternal) const;

  GlobalVariable *getGlobalVariable(StringRef Name,
                                    bool AllowInternal = false) {
    return static_cast<const Module *>(this)->getGlobalVariable(Name,
                                                                AllowInternal);
  }

  /// Return the global variable in the module with the specified name, of
  /// arbitrary type. This method returns null if a global with the specified
  /// name is not found.
  const GlobalVariable *getNamedGlobal(StringRef Name) const {
    return getGlobalVariable(Name, true);
  }
  GlobalVariable *getNamedGlobal(StringRef Name) {
    return const_cast<GlobalVariable *>(
                       static_cast<const Module *>(this)->getNamedGlobal(Name));
  }

  /// Look up the specified global in the module symbol table.
  /// If it does not exist, invoke a callback to create a declaration of the
  /// global and return it. The global is constantexpr casted to the expected
  /// type if necessary.
  Constant *
  getOrInsertGlobal(StringRef Name, Type *Ty,
                    function_ref<GlobalVariable *()> CreateGlobalCallback);

  /// Look up the specified global in the module symbol table. If required, this
  /// overload constructs the global variable using its constructor's defaults.
  Constant *getOrInsertGlobal(StringRef Name, Type *Ty);

/// @}
/// @name Global Alias Accessors
/// @{

  /// Return the global alias in the module with the specified name, of
  /// arbitrary type. This method returns null if a global with the specified
  /// name is not found.
  GlobalAlias *getNamedAlias(StringRef Name) const;

/// @}
/// @name Global IFunc Accessors
/// @{

  /// Return the global ifunc in the module with the specified name, of
  /// arbitrary type. This method returns null if a global with the specified
  /// name is not found.
  GlobalIFunc *getNamedIFunc(StringRef Name) const;

/// @}
/// @name Named Metadata Accessors
/// @{

  /// Return the first NamedMDNode in the module with the specified name. This
  /// method returns null if a NamedMDNode with the specified name is not found.
  NamedMDNode *getNamedMetadata(const Twine &Name) const;

  /// Return the named MDNode in the module with the specified name. This method
  /// returns a new NamedMDNode if a NamedMDNode with the specified name is not
  /// found.
  NamedMDNode *getOrInsertNamedMetadata(StringRef Name);

  /// Remove the given NamedMDNode from this module and delete it.
  void eraseNamedMetadata(NamedMDNode *NMD);

/// @}
/// @name Comdat Accessors
/// @{

  /// Return the Comdat in the module with the specified name. It is created
  /// if it didn't already exist.
  Comdat *getOrInsertComdat(StringRef Name);

/// @}
/// @name Module Flags Accessors
/// @{

  /// Returns the module flags in the provided vector.
  void getModuleFlagsMetadata(SmallVectorImpl<ModuleFlagEntry> &Flags) const;

  /// Return the corresponding value if Key appears in module flags, otherwise
  /// return null.
  Metadata *getModuleFlag(StringRef Key) const;

  /// Returns the NamedMDNode in the module that represents module-level flags.
  /// This method returns null if there are no module-level flags.
  NamedMDNode *getModuleFlagsMetadata() const;

  /// Returns the NamedMDNode in the module that represents module-level flags.
  /// If module-level flags aren't found, it creates the named metadata that
  /// contains them.
  NamedMDNode *getOrInsertModuleFlagsMetadata();

  /// Add a module-level flag to the module-level flags metadata. It will create
  /// the module-level flags named metadata if it doesn't already exist.
  void addModuleFlag(ModFlagBehavior Behavior, StringRef Key, Metadata *Val);
  void addModuleFlag(ModFlagBehavior Behavior, StringRef Key, Constant *Val);
  void addModuleFlag(ModFlagBehavior Behavior, StringRef Key, uint32_t Val);
  void addModuleFlag(MDNode *Node);
  /// Like addModuleFlag but replaces the old module flag if it already exists.
  void setModuleFlag(ModFlagBehavior Behavior, StringRef Key, Metadata *Val);

  /// @}
  /// @name Materialization
  /// @{

  /// Sets the GVMaterializer to GVM. This module must not yet have a
  /// Materializer. To reset the materializer for a module that already has one,
  /// call materializeAll first. Destroying this module will destroy
  /// its materializer without materializing any more GlobalValues. Without
  /// destroying the Module, there is no way to detach or destroy a materializer
  /// without materializing all the GVs it controls, to avoid leaving orphan
  /// unmaterialized GVs.
  void setMaterializer(GVMaterializer *GVM);
  /// Retrieves the GVMaterializer, if any, for this Module.
  GVMaterializer *getMaterializer() const { return Materializer.get(); }
  bool isMaterialized() const { return !getMaterializer(); }

  /// Make sure the GlobalValue is fully read.
  llvm::Error materialize(GlobalValue *GV);

  /// Make sure all GlobalValues in this Module are fully read and clear the
  /// Materializer.
  llvm::Error materializeAll();

  llvm::Error materializeMetadata();

  /// Detach global variable \p GV from the list but don't delete it.
  void removeGlobalVariable(GlobalVariable *GV) { GlobalList.remove(GV); }
  /// Remove global variable \p GV from the list and delete it.
  void eraseGlobalVariable(GlobalVariable *GV) { GlobalList.erase(GV); }
  /// Insert global variable \p GV at the end of the global variable list and
  /// take ownership.
  void insertGlobalVariable(GlobalVariable *GV) {
    insertGlobalVariable(GlobalList.end(), GV);
  }
  /// Insert global variable \p GV into the global variable list before \p
  /// Where and take ownership.
  void insertGlobalVariable(GlobalListType::iterator Where, GlobalVariable *GV) {
    GlobalList.insert(Where, GV);
  }
  // Use global_size() to get the total number of global variables.
  // Use globals() to get the range of all global variables.

private:
/// @}
/// @name Direct access to the globals list, functions list, and symbol table
/// @{

  /// Get the Module's list of global variables (constant).
  const GlobalListType   &getGlobalList() const       { return GlobalList; }
  /// Get the Module's list of global variables.
  GlobalListType         &getGlobalList()             { return GlobalList; }

  static GlobalListType Module::*getSublistAccess(GlobalVariable*) {
    return &Module::GlobalList;
  }
  friend class llvm::SymbolTableListTraits<llvm::GlobalVariable>;

public:
  /// Get the Module's list of functions (constant).
  const FunctionListType &getFunctionList() const     { return FunctionList; }
  /// Get the Module's list of functions.
  FunctionListType       &getFunctionList()           { return FunctionList; }
  static FunctionListType Module::*getSublistAccess(Function*) {
    return &Module::FunctionList;
  }

  /// Detach \p Alias from the list but don't delete it.
  void removeAlias(GlobalAlias *Alias) { AliasList.remove(Alias); }
  /// Remove \p Alias from the list and delete it.
  void eraseAlias(GlobalAlias *Alias) { AliasList.erase(Alias); }
  /// Insert \p Alias at the end of the alias list and take ownership.
  void insertAlias(GlobalAlias *Alias) { AliasList.insert(AliasList.end(), Alias); }
  // Use alias_size() to get the size of AliasList.
  // Use aliases() to get a range of all Alias objects in AliasList.

  /// Detach \p IFunc from the list but don't delete it.
  void removeIFunc(GlobalIFunc *IFunc) { IFuncList.remove(IFunc); }
  /// Remove \p IFunc from the list and delete it.
  void eraseIFunc(GlobalIFunc *IFunc) { IFuncList.erase(IFunc); }
  /// Insert \p IFunc at the end of the alias list and take ownership.
  void insertIFunc(GlobalIFunc *IFunc) { IFuncList.push_back(IFunc); }
  // Use ifunc_size() to get the number of functions in IFuncList.
  // Use ifuncs() to get the range of all IFuncs.

  /// Detach \p MDNode from the list but don't delete it.
  void removeNamedMDNode(NamedMDNode *MDNode) { NamedMDList.remove(MDNode); }
  /// Remove \p MDNode from the list and delete it.
  void eraseNamedMDNode(NamedMDNode *MDNode) { NamedMDList.erase(MDNode); }
  /// Insert \p MDNode at the end of the alias list and take ownership.
  void insertNamedMDNode(NamedMDNode *MDNode) {
    NamedMDList.push_back(MDNode);
  }
  // Use named_metadata_size() to get the size of the named meatadata list.
  // Use named_metadata() to get the range of all named metadata.

private: // Please use functions like insertAlias(), removeAlias() etc.
  /// Get the Module's list of aliases (constant).
  const AliasListType    &getAliasList() const        { return AliasList; }
  /// Get the Module's list of aliases.
  AliasListType          &getAliasList()              { return AliasList; }

  static AliasListType Module::*getSublistAccess(GlobalAlias*) {
    return &Module::AliasList;
  }
  friend class llvm::SymbolTableListTraits<llvm::GlobalAlias>;

  /// Get the Module's list of ifuncs (constant).
  const IFuncListType    &getIFuncList() const        { return IFuncList; }
  /// Get the Module's list of ifuncs.
  IFuncListType          &getIFuncList()              { return IFuncList; }

  static IFuncListType Module::*getSublistAccess(GlobalIFunc*) {
    return &Module::IFuncList;
  }
  friend class llvm::SymbolTableListTraits<llvm::GlobalIFunc>;

  /// Get the Module's list of named metadata (constant).
  const NamedMDListType  &getNamedMDList() const      { return NamedMDList; }
  /// Get the Module's list of named metadata.
  NamedMDListType        &getNamedMDList()            { return NamedMDList; }

  static NamedMDListType Module::*getSublistAccess(NamedMDNode*) {
    return &Module::NamedMDList;
  }

public:
  /// Get the symbol table of global variable and function identifiers
  const ValueSymbolTable &getValueSymbolTable() const { return *ValSymTab; }
  /// Get the Module's symbol table of global variable and function identifiers.
  ValueSymbolTable       &getValueSymbolTable()       { return *ValSymTab; }

  /// Get the Module's symbol table for COMDATs (constant).
  const ComdatSymTabType &getComdatSymbolTable() const { return ComdatSymTab; }
  /// Get the Module's symbol table for COMDATs.
  ComdatSymTabType &getComdatSymbolTable() { return ComdatSymTab; }

/// @}
/// @name Global Variable Iteration
/// @{

  global_iterator       global_begin()       { return GlobalList.begin(); }
  const_global_iterator global_begin() const { return GlobalList.begin(); }
  global_iterator       global_end  ()       { return GlobalList.end(); }
  const_global_iterator global_end  () const { return GlobalList.end(); }
  size_t                global_size () const { return GlobalList.size(); }
  bool                  global_empty() const { return GlobalList.empty(); }

  iterator_range<global_iterator> globals() {
    return make_range(global_begin(), global_end());
  }
  iterator_range<const_global_iterator> globals() const {
    return make_range(global_begin(), global_end());
  }

/// @}
/// @name Function Iteration
/// @{

  iterator                begin()       { return FunctionList.begin(); }
  const_iterator          begin() const { return FunctionList.begin(); }
  iterator                end  ()       { return FunctionList.end();   }
  const_iterator          end  () const { return FunctionList.end();   }
  reverse_iterator        rbegin()      { return FunctionList.rbegin(); }
  const_reverse_iterator  rbegin() const{ return FunctionList.rbegin(); }
  reverse_iterator        rend()        { return FunctionList.rend(); }
  const_reverse_iterator  rend() const  { return FunctionList.rend(); }
  size_t                  size() const  { return FunctionList.size(); }
  bool                    empty() const { return FunctionList.empty(); }

  iterator_range<iterator> functions() {
    return make_range(begin(), end());
  }
  iterator_range<const_iterator> functions() const {
    return make_range(begin(), end());
  }

/// @}
/// @name Alias Iteration
/// @{

  alias_iterator       alias_begin()            { return AliasList.begin(); }
  const_alias_iterator alias_begin() const      { return AliasList.begin(); }
  alias_iterator       alias_end  ()            { return AliasList.end();   }
  const_alias_iterator alias_end  () const      { return AliasList.end();   }
  size_t               alias_size () const      { return AliasList.size();  }
  bool                 alias_empty() const      { return AliasList.empty(); }

  iterator_range<alias_iterator> aliases() {
    return make_range(alias_begin(), alias_end());
  }
  iterator_range<const_alias_iterator> aliases() const {
    return make_range(alias_begin(), alias_end());
  }

/// @}
/// @name IFunc Iteration
/// @{

  ifunc_iterator       ifunc_begin()            { return IFuncList.begin(); }
  const_ifunc_iterator ifunc_begin() const      { return IFuncList.begin(); }
  ifunc_iterator       ifunc_end  ()            { return IFuncList.end();   }
  const_ifunc_iterator ifunc_end  () const      { return IFuncList.end();   }
  size_t               ifunc_size () const      { return IFuncList.size();  }
  bool                 ifunc_empty() const      { return IFuncList.empty(); }

  iterator_range<ifunc_iterator> ifuncs() {
    return make_range(ifunc_begin(), ifunc_end());
  }
  iterator_range<const_ifunc_iterator> ifuncs() const {
    return make_range(ifunc_begin(), ifunc_end());
  }

  /// @}
  /// @name Convenience iterators
  /// @{

  using global_object_iterator =
      concat_iterator<GlobalObject, iterator, global_iterator>;
  using const_global_object_iterator =
      concat_iterator<const GlobalObject, const_iterator,
                      const_global_iterator>;

  iterator_range<global_object_iterator> global_objects();
  iterator_range<const_global_object_iterator> global_objects() const;

  using global_value_iterator =
      concat_iterator<GlobalValue, iterator, global_iterator, alias_iterator,
                      ifunc_iterator>;
  using const_global_value_iterator =
      concat_iterator<const GlobalValue, const_iterator, const_global_iterator,
                      const_alias_iterator, const_ifunc_iterator>;

  iterator_range<global_value_iterator> global_values();
  iterator_range<const_global_value_iterator> global_values() const;

  /// @}
  /// @name Named Metadata Iteration
  /// @{

  named_metadata_iterator named_metadata_begin() { return NamedMDList.begin(); }
  const_named_metadata_iterator named_metadata_begin() const {
    return NamedMDList.begin();
  }

  named_metadata_iterator named_metadata_end() { return NamedMDList.end(); }
  const_named_metadata_iterator named_metadata_end() const {
    return NamedMDList.end();
  }

  size_t named_metadata_size() const { return NamedMDList.size();  }
  bool named_metadata_empty() const { return NamedMDList.empty(); }

  iterator_range<named_metadata_iterator> named_metadata() {
    return make_range(named_metadata_begin(), named_metadata_end());
  }
  iterator_range<const_named_metadata_iterator> named_metadata() const {
    return make_range(named_metadata_begin(), named_metadata_end());
  }

  /// An iterator for DICompileUnits that skips those marked NoDebug.
  class debug_compile_units_iterator {
    NamedMDNode *CUs;
    unsigned Idx;

    void SkipNoDebugCUs();

  public:
    using iterator_category = std::input_iterator_tag;
    using value_type = DICompileUnit *;
    using difference_type = std::ptrdiff_t;
    using pointer = value_type *;
    using reference = value_type &;

    explicit debug_compile_units_iterator(NamedMDNode *CUs, unsigned Idx)
        : CUs(CUs), Idx(Idx) {
      SkipNoDebugCUs();
    }

    debug_compile_units_iterator &operator++() {
      ++Idx;
      SkipNoDebugCUs();
      return *this;
    }

    debug_compile_units_iterator operator++(int) {
      debug_compile_units_iterator T(*this);
      ++Idx;
      return T;
    }

    bool operator==(const debug_compile_units_iterator &I) const {
      return Idx == I.Idx;
    }

    bool operator!=(const debug_compile_units_iterator &I) const {
      return Idx != I.Idx;
    }

    DICompileUnit *operator*() const;
    DICompileUnit *operator->() const;
  };

  debug_compile_units_iterator debug_compile_units_begin() const {
    auto *CUs = getNamedMetadata("llvm.dbg.cu");
    return debug_compile_units_iterator(CUs, 0);
  }

  debug_compile_units_iterator debug_compile_units_end() const {
    auto *CUs = getNamedMetadata("llvm.dbg.cu");
    return debug_compile_units_iterator(CUs, CUs ? CUs->getNumOperands() : 0);
  }

  /// Return an iterator for all DICompileUnits listed in this Module's
  /// llvm.dbg.cu named metadata node and aren't explicitly marked as
  /// NoDebug.
  iterator_range<debug_compile_units_iterator> debug_compile_units() const {
    auto *CUs = getNamedMetadata("llvm.dbg.cu");
    return make_range(
        debug_compile_units_iterator(CUs, 0),
        debug_compile_units_iterator(CUs, CUs ? CUs->getNumOperands() : 0));
  }
/// @}

  /// Destroy ConstantArrays in LLVMContext if they are not used.
  /// ConstantArrays constructed during linking can cause quadratic memory
  /// explosion. Releasing all unused constants can cause a 20% LTO compile-time
  /// slowdown for a large application.
  ///
  /// NOTE: Constants are currently owned by LLVMContext. This can then only
  /// be called where all uses of the LLVMContext are understood.
  void dropTriviallyDeadConstantArrays();

/// @name Utility functions for printing and dumping Module objects
/// @{

  /// Print the module to an output stream with an optional
  /// AssemblyAnnotationWriter.  If \c ShouldPreserveUseListOrder, then include
  /// uselistorder directives so that use-lists can be recreated when reading
  /// the assembly.
  void print(raw_ostream &OS, AssemblyAnnotationWriter *AAW,
             bool ShouldPreserveUseListOrder = false,
             bool IsForDebug = false) const;

  /// Dump the module to stderr (for debugging).
  void dump() const;

  /// This function causes all the subinstructions to "let go" of all references
  /// that they are maintaining.  This allows one to 'delete' a whole class at
  /// a time, even though there may be circular references... first all
  /// references are dropped, and all use counts go to zero.  Then everything
  /// is delete'd for real.  Note that no operations are valid on an object
  /// that has "dropped all references", except operator delete.
  void dropAllReferences();

/// @}
/// @name Utility functions for querying Debug information.
/// @{

  /// Returns the Number of Register ParametersDwarf Version by checking
  /// module flags.
  unsigned getNumberRegisterParameters() const;

  /// Returns the Dwarf Version by checking module flags.
  unsigned getDwarfVersion() const;

  /// Returns the DWARF format by checking module flags.
  bool isDwarf64() const;

  /// Returns the CodeView Version by checking module flags.
  /// Returns zero if not present in module.
  unsigned getCodeViewFlag() const;

/// @}
/// @name Utility functions for querying and setting PIC level
/// @{

  /// Returns the PIC level (small or large model)
  PICLevel::Level getPICLevel() const;

  /// Set the PIC level (small or large model)
  void setPICLevel(PICLevel::Level PL);
/// @}

/// @}
/// @name Utility functions for querying and setting PIE level
/// @{

  /// Returns the PIE level (small or large model)
  PIELevel::Level getPIELevel() const;

  /// Set the PIE level (small or large model)
  void setPIELevel(PIELevel::Level PL);
/// @}

  /// @}
  /// @name Utility function for querying and setting code model
  /// @{

  /// Returns the code model (tiny, small, kernel, medium or large model)
  std::optional<CodeModel::Model> getCodeModel() const;

  /// Set the code model (tiny, small, kernel, medium or large)
  void setCodeModel(CodeModel::Model CL);
  /// @}

  /// @name Utility functions for querying and setting PGO summary
  /// @{

  /// Attach profile summary metadata to this module.
  void setProfileSummary(Metadata *M, ProfileSummary::Kind Kind);

  /// Returns profile summary metadata. When IsCS is true, use the context
  /// sensitive profile summary.
  Metadata *getProfileSummary(bool IsCS) const;
  /// @}

  /// Returns whether semantic interposition is to be respected.
  bool getSemanticInterposition() const;

  /// Set whether semantic interposition is to be respected.
  void setSemanticInterposition(bool);

  /// Returns true if PLT should be avoided for RTLib calls.
  bool getRtLibUseGOT() const;

  /// Set that PLT should be avoid for RTLib calls.
  void setRtLibUseGOT();

  /// Get/set whether referencing global variables can use direct access
  /// relocations on ELF targets.
  bool getDirectAccessExternalData() const;
  void setDirectAccessExternalData(bool Value);

  /// Get/set whether synthesized functions should get the uwtable attribute.
  UWTableKind getUwtable() const;
  void setUwtable(UWTableKind Kind);

  /// Get/set whether synthesized functions should get the "frame-pointer"
  /// attribute.
  FramePointerKind getFramePointer() const;
  void setFramePointer(FramePointerKind Kind);

  /// Get/set what kind of stack protector guard to use.
  StringRef getStackProtectorGuard() const;
  void setStackProtectorGuard(StringRef Kind);

  /// Get/set which register to use as the stack protector guard register. The
  /// empty string is equivalent to "global". Other values may be "tls" or
  /// "sysreg".
  StringRef getStackProtectorGuardReg() const;
  void setStackProtectorGuardReg(StringRef Reg);

  /// Get/set a symbol to use as the stack protector guard.
  StringRef getStackProtectorGuardSymbol() const;
  void setStackProtectorGuardSymbol(StringRef Symbol);

  /// Get/set what offset from the stack protector to use.
  int getStackProtectorGuardOffset() const;
  void setStackProtectorGuardOffset(int Offset);

  /// Get/set the stack alignment overridden from the default.
  unsigned getOverrideStackAlignment() const;
  void setOverrideStackAlignment(unsigned Align);

  unsigned getMaxTLSAlignment() const;

  /// @name Utility functions for querying and setting the build SDK version
  /// @{

  /// Attach a build SDK version metadata to this module.
  void setSDKVersion(const VersionTuple &V);

  /// Get the build SDK version metadata.
  ///
  /// An empty version is returned if no such metadata is attached.
  VersionTuple getSDKVersion() const;
  /// @}

  /// Take ownership of the given memory buffer.
  void setOwnedMemoryBuffer(std::unique_ptr<MemoryBuffer> MB);

  /// Set the partial sample profile ratio in the profile summary module flag,
  /// if applicable.
  void setPartialSampleProfileRatio(const ModuleSummaryIndex &Index);

  /// Get the target variant triple which is a string describing a variant of
  /// the target host platform. For example, Mac Catalyst can be a variant
  /// target triple for a macOS target.
  /// @returns a string containing the target variant triple.
  StringRef getDarwinTargetVariantTriple() const;

  /// Set the target variant triple which is a string describing a variant of
  /// the target host platform.
  void setDarwinTargetVariantTriple(StringRef T);

  /// Get the target variant version build SDK version metadata.
  ///
  /// An empty version is returned if no such metadata is attached.
  VersionTuple getDarwinTargetVariantSDKVersion() const;

  /// Set the target variant version build SDK version metadata.
  void setDarwinTargetVariantSDKVersion(VersionTuple Version);
};

/// Given "llvm.used" or "llvm.compiler.used" as a global name, collect the
/// initializer elements of that global in a SmallVector and return the global
/// itself.
GlobalVariable *collectUsedGlobalVariables(const Module &M,
                                           SmallVectorImpl<GlobalValue *> &Vec,
                                           bool CompilerUsed);

/// An raw_ostream inserter for modules.
inline raw_ostream &operator<<(raw_ostream &O, const Module &M) {
  M.print(O, nullptr);
  return O;
}

// Create wrappers for C Binding types (see CBindingWrapping.h).
DEFINE_SIMPLE_CONVERSION_FUNCTIONS(Module, LLVMModuleRef)

/* LLVMModuleProviderRef exists for historical reasons, but now just holds a
 * Module.
 */
inline Module *unwrap(LLVMModuleProviderRef MP) {
  return reinterpret_cast<Module*>(MP);
}

} // end namespace llvm

#endif // LLVM_IR_MODULE_H
PKjwFZ^��+��IR/Value.defnu�[���//===-------- llvm/IR/Value.def - File that describes Values ---v-*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains descriptions of the various LLVM values.  This is
// used as a central place for enumerating the different values.
//
//===----------------------------------------------------------------------===//

// NOTE: NO INCLUDE GUARD DESIRED!

// Provide definitions of macros so that users of this file do not have to
// define everything to use it...
//
#if !(defined HANDLE_GLOBAL_VALUE || defined HANDLE_CONSTANT ||                \
      defined HANDLE_INSTRUCTION || defined HANDLE_INLINE_ASM_VALUE ||         \
      defined HANDLE_METADATA_VALUE || defined HANDLE_VALUE ||                 \
      defined HANDLE_CONSTANT_MARKER || defined HANDLE_MEMORY_VALUE)
#error "Missing macro definition of HANDLE_VALUE*"
#endif

// If the LLVM_C_API macro is set, then values handled via HANDLE_*_EXCLUDE_LLVM_C_API will not be expanded in areas the HANDLE_* macro is used. If it is not set, then HANDLE_*_EXCLUDE_LLVM_C_API values are handled normally as their HANDLE_* counterparts.
#ifndef LLVM_C_API
#define LLVM_C_API 0
#endif

#ifndef HANDLE_MEMORY_VALUE
#define HANDLE_MEMORY_VALUE(ValueName) HANDLE_VALUE(ValueName)
#endif

#ifndef HANDLE_GLOBAL_VALUE
#define HANDLE_GLOBAL_VALUE(ValueName) HANDLE_CONSTANT(ValueName)
#endif

#ifndef HANDLE_CONSTANT
#define HANDLE_CONSTANT(ValueName) HANDLE_VALUE(ValueName)
#endif

#ifndef HANDLE_INSTRUCTION
#define HANDLE_INSTRUCTION(ValueName) HANDLE_VALUE(ValueName)
#endif

#ifndef HANDLE_INLINE_ASM_VALUE
#define HANDLE_INLINE_ASM_VALUE(ValueName) HANDLE_VALUE(ValueName)
#endif

#ifndef HANDLE_METADATA_VALUE
#define HANDLE_METADATA_VALUE(ValueName) HANDLE_VALUE(ValueName)
#endif

#ifndef HANDLE_VALUE
#define HANDLE_VALUE(ValueName)
#endif

#ifndef HANDLE_CONSTANT_MARKER
#define HANDLE_CONSTANT_MARKER(MarkerName, ValueName)
#endif

#ifndef HANDLE_CONSTANT_EXCLUDE_LLVM_C_API
#define HANDLE_CONSTANT_EXCLUDE_LLVM_C_API(ValueName) HANDLE_CONSTANT(ValueName)
#endif

#if LLVM_C_API
#undef HANDLE_CONSTANT_EXCLUDE_LLVM_C_API
#define HANDLE_CONSTANT_EXCLUDE_LLVM_C_API(ValueName)
#endif

// Having constant first makes the range check for isa<Constant> faster
// and smaller by one operation.

// Constant
HANDLE_GLOBAL_VALUE(Function)
HANDLE_GLOBAL_VALUE(GlobalAlias)
HANDLE_GLOBAL_VALUE(GlobalIFunc)
HANDLE_GLOBAL_VALUE(GlobalVariable)
HANDLE_CONSTANT(BlockAddress)
HANDLE_CONSTANT(ConstantExpr)
HANDLE_CONSTANT_EXCLUDE_LLVM_C_API(DSOLocalEquivalent)
HANDLE_CONSTANT_EXCLUDE_LLVM_C_API(NoCFIValue)

// ConstantAggregate.
HANDLE_CONSTANT(ConstantArray)
HANDLE_CONSTANT(ConstantStruct)
HANDLE_CONSTANT(ConstantVector)

// ConstantData.
HANDLE_CONSTANT(UndefValue)
HANDLE_CONSTANT(PoisonValue)
HANDLE_CONSTANT(ConstantAggregateZero)
HANDLE_CONSTANT(ConstantDataArray)
HANDLE_CONSTANT(ConstantDataVector)
HANDLE_CONSTANT(ConstantInt)
HANDLE_CONSTANT(ConstantFP)
HANDLE_CONSTANT(ConstantTargetNone)
HANDLE_CONSTANT(ConstantPointerNull)
HANDLE_CONSTANT(ConstantTokenNone)

HANDLE_CONSTANT_MARKER(ConstantFirstVal, Function)
HANDLE_CONSTANT_MARKER(ConstantLastVal, ConstantTokenNone)
HANDLE_CONSTANT_MARKER(ConstantDataFirstVal, UndefValue)
HANDLE_CONSTANT_MARKER(ConstantDataLastVal, ConstantTokenNone)
HANDLE_CONSTANT_MARKER(ConstantAggregateFirstVal, ConstantArray)
HANDLE_CONSTANT_MARKER(ConstantAggregateLastVal, ConstantVector)

HANDLE_VALUE(Argument)
HANDLE_VALUE(BasicBlock)


HANDLE_METADATA_VALUE(MetadataAsValue)
HANDLE_INLINE_ASM_VALUE(InlineAsm)

// FIXME: It's awkward that Value.def knows about classes in Analysis. While
// this doesn't introduce a strict link or include dependency, we should remove
// the circular dependency eventually.
HANDLE_MEMORY_VALUE(MemoryUse)
HANDLE_MEMORY_VALUE(MemoryDef)
HANDLE_MEMORY_VALUE(MemoryPhi)

HANDLE_INSTRUCTION(Instruction)
// Enum values starting at InstructionVal are used for Instructions;
// don't add new values here!

#undef HANDLE_MEMORY_VALUE
#undef HANDLE_GLOBAL_VALUE
#undef HANDLE_CONSTANT
#undef HANDLE_INSTRUCTION
#undef HANDLE_METADATA_VALUE
#undef HANDLE_INLINE_ASM_VALUE
#undef HANDLE_VALUE
#undef HANDLE_CONSTANT_MARKER
#undef HANDLE_CONSTANT_EXCLUDE_LLVM_C_API
#undef LLVM_C_API
PKjwFZw���IR/GlobalAlias.hnu�[���//===-------- llvm/GlobalAlias.h - GlobalAlias class ------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the declaration of the GlobalAlias class, which
// represents a single function or variable alias in the IR.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_GLOBALALIAS_H
#define LLVM_IR_GLOBALALIAS_H

#include "llvm/ADT/ilist_node.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/OperandTraits.h"
#include "llvm/IR/Value.h"

namespace llvm {

class Twine;
class Module;
template <typename ValueSubClass> class SymbolTableListTraits;

class GlobalAlias : public GlobalValue, public ilist_node<GlobalAlias> {
  friend class SymbolTableListTraits<GlobalAlias>;

  GlobalAlias(Type *Ty, unsigned AddressSpace, LinkageTypes Linkage,
              const Twine &Name, Constant *Aliasee, Module *Parent);

public:
  GlobalAlias(const GlobalAlias &) = delete;
  GlobalAlias &operator=(const GlobalAlias &) = delete;

  /// If a parent module is specified, the alias is automatically inserted into
  /// the end of the specified module's alias list.
  static GlobalAlias *create(Type *Ty, unsigned AddressSpace,
                             LinkageTypes Linkage, const Twine &Name,
                             Constant *Aliasee, Module *Parent);

  // Without the Aliasee.
  static GlobalAlias *create(Type *Ty, unsigned AddressSpace,
                             LinkageTypes Linkage, const Twine &Name,
                             Module *Parent);

  // The module is taken from the Aliasee.
  static GlobalAlias *create(Type *Ty, unsigned AddressSpace,
                             LinkageTypes Linkage, const Twine &Name,
                             GlobalValue *Aliasee);

  // Type, Parent and AddressSpace taken from the Aliasee.
  static GlobalAlias *create(LinkageTypes Linkage, const Twine &Name,
                             GlobalValue *Aliasee);

  // Linkage, Type, Parent and AddressSpace taken from the Aliasee.
  static GlobalAlias *create(const Twine &Name, GlobalValue *Aliasee);

  // allocate space for exactly one operand
  void *operator new(size_t S) { return User::operator new(S, 1); }
  void operator delete(void *Ptr) { User::operator delete(Ptr); }

  /// Provide fast operand accessors
  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Constant);

  void copyAttributesFrom(const GlobalAlias *Src) {
    GlobalValue::copyAttributesFrom(Src);
  }

  /// removeFromParent - This method unlinks 'this' from the containing module,
  /// but does not delete it.
  ///
  void removeFromParent();

  /// eraseFromParent - This method unlinks 'this' from the containing module
  /// and deletes it.
  ///
  void eraseFromParent();

  /// These methods retrieve and set alias target.
  void setAliasee(Constant *Aliasee);
  const Constant *getAliasee() const {
    return static_cast<Constant *>(Op<0>().get());
  }
  Constant *getAliasee() { return static_cast<Constant *>(Op<0>().get()); }

  const GlobalObject *getAliaseeObject() const;
  GlobalObject *getAliaseeObject() {
    return const_cast<GlobalObject *>(
        static_cast<const GlobalAlias *>(this)->getAliaseeObject());
  }

  static bool isValidLinkage(LinkageTypes L) {
    return isExternalLinkage(L) || isLocalLinkage(L) || isWeakLinkage(L) ||
           isLinkOnceLinkage(L) || isAvailableExternallyLinkage(L);
  }

  // Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Value *V) {
    return V->getValueID() == Value::GlobalAliasVal;
  }
};

template <>
struct OperandTraits<GlobalAlias>
    : public FixedNumOperandTraits<GlobalAlias, 1> {};

DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GlobalAlias, Constant)

} // end namespace llvm

#endif // LLVM_IR_GLOBALALIAS_H
PKjwFZ,�%�%IR/MDBuilder.hnu�[���//===---- llvm/MDBuilder.h - Builder for LLVM metadata ----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the MDBuilder class, which is used as a convenient way to
// create LLVM metadata with a consistent and simplified interface.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_MDBUILDER_H
#define LLVM_IR_MDBUILDER_H

#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/Support/DataTypes.h"
#include <utility>

namespace llvm {

class APInt;
template <typename T> class ArrayRef;
class LLVMContext;
class Constant;
class ConstantAsMetadata;
class Function;
class MDNode;
class MDString;
class Metadata;

class MDBuilder {
  LLVMContext &Context;

public:
  MDBuilder(LLVMContext &context) : Context(context) {}

  /// Return the given string as metadata.
  MDString *createString(StringRef Str);

  /// Return the given constant as metadata.
  ConstantAsMetadata *createConstant(Constant *C);

  //===------------------------------------------------------------------===//
  // FPMath metadata.
  //===------------------------------------------------------------------===//

  /// Return metadata with the given settings.  The special value 0.0
  /// for the Accuracy parameter indicates the default (maximal precision)
  /// setting.
  MDNode *createFPMath(float Accuracy);

  //===------------------------------------------------------------------===//
  // Prof metadata.
  //===------------------------------------------------------------------===//

  /// Return metadata containing two branch weights.
  MDNode *createBranchWeights(uint32_t TrueWeight, uint32_t FalseWeight);

  /// Return metadata containing a number of branch weights.
  MDNode *createBranchWeights(ArrayRef<uint32_t> Weights);

  /// Return metadata specifying that a branch or switch is unpredictable.
  MDNode *createUnpredictable();

  /// Return metadata containing the entry \p Count for a function, a boolean
  /// \Synthetic indicating whether the counts were synthetized, and the
  /// GUIDs stored in \p Imports that need to be imported for sample PGO, to
  /// enable the same inlines as the profiled optimized binary
  MDNode *createFunctionEntryCount(uint64_t Count, bool Synthetic,
                                   const DenseSet<GlobalValue::GUID> *Imports);

  /// Return metadata containing the section prefix for a function.
  MDNode *createFunctionSectionPrefix(StringRef Prefix);

  /// Return metadata containing the pseudo probe descriptor for a function.
  MDNode *createPseudoProbeDesc(uint64_t GUID, uint64_t Hash, StringRef FName);

  /// Return metadata containing llvm statistics.
  MDNode *
  createLLVMStats(ArrayRef<std::pair<StringRef, uint64_t>> LLVMStatsVec);

  //===------------------------------------------------------------------===//
  // Range metadata.
  //===------------------------------------------------------------------===//

  /// Return metadata describing the range [Lo, Hi).
  MDNode *createRange(const APInt &Lo, const APInt &Hi);

  /// Return metadata describing the range [Lo, Hi).
  MDNode *createRange(Constant *Lo, Constant *Hi);

  //===------------------------------------------------------------------===//
  // Callees metadata.
  //===------------------------------------------------------------------===//

  /// Return metadata indicating the possible callees of indirect
  /// calls.
  MDNode *createCallees(ArrayRef<Function *> Callees);

  //===------------------------------------------------------------------===//
  // Callback metadata.
  //===------------------------------------------------------------------===//

  /// Return metadata describing a callback (see llvm::AbstractCallSite).
  MDNode *createCallbackEncoding(unsigned CalleeArgNo, ArrayRef<int> Arguments,
                                 bool VarArgsArePassed);

  /// Merge the new callback encoding \p NewCB into \p ExistingCallbacks.
  MDNode *mergeCallbackEncodings(MDNode *ExistingCallbacks, MDNode *NewCB);

  /// Return metadata feeding to the CodeGen about how to generate a function
  /// prologue for the "function" santizier.
  MDNode *createRTTIPointerPrologue(Constant *PrologueSig, Constant *RTTI);

  //===------------------------------------------------------------------===//
  // PC sections metadata.
  //===------------------------------------------------------------------===//

  /// A pair of PC section name with auxilliary constant data.
  using PCSection = std::pair<StringRef, SmallVector<Constant *>>;

  /// Return metadata for PC sections.
  MDNode *createPCSections(ArrayRef<PCSection> Sections);

  //===------------------------------------------------------------------===//
  // AA metadata.
  //===------------------------------------------------------------------===//

protected:
  /// Return metadata appropriate for a AA root node (scope or TBAA).
  /// Each returned node is distinct from all other metadata and will never
  /// be identified (uniqued) with anything else.
  MDNode *createAnonymousAARoot(StringRef Name = StringRef(),
                                MDNode *Extra = nullptr);

public:
  /// Return metadata appropriate for a TBAA root node. Each returned
  /// node is distinct from all other metadata and will never be identified
  /// (uniqued) with anything else.
  MDNode *createAnonymousTBAARoot() {
    return createAnonymousAARoot();
  }

  /// Return metadata appropriate for an alias scope domain node.
  /// Each returned node is distinct from all other metadata and will never
  /// be identified (uniqued) with anything else.
  MDNode *createAnonymousAliasScopeDomain(StringRef Name = StringRef()) {
    return createAnonymousAARoot(Name);
  }

  /// Return metadata appropriate for an alias scope root node.
  /// Each returned node is distinct from all other metadata and will never
  /// be identified (uniqued) with anything else.
  MDNode *createAnonymousAliasScope(MDNode *Domain,
                                    StringRef Name = StringRef()) {
    return createAnonymousAARoot(Name, Domain);
  }

  /// Return metadata appropriate for a TBAA root node with the given
  /// name.  This may be identified (uniqued) with other roots with the same
  /// name.
  MDNode *createTBAARoot(StringRef Name);

  /// Return metadata appropriate for an alias scope domain node with
  /// the given name. This may be identified (uniqued) with other roots with
  /// the same name.
  MDNode *createAliasScopeDomain(StringRef Name);

  /// Return metadata appropriate for an alias scope node with
  /// the given name. This may be identified (uniqued) with other scopes with
  /// the same name and domain.
  MDNode *createAliasScope(StringRef Name, MDNode *Domain);

  /// Return metadata for a non-root TBAA node with the given name,
  /// parent in the TBAA tree, and value for 'pointsToConstantMemory'.
  MDNode *createTBAANode(StringRef Name, MDNode *Parent,
                         bool isConstant = false);

  struct TBAAStructField {
    uint64_t Offset;
    uint64_t Size;
    MDNode *Type;
    TBAAStructField(uint64_t Offset, uint64_t Size, MDNode *Type) :
      Offset(Offset), Size(Size), Type(Type) {}
  };

  /// Return metadata for a tbaa.struct node with the given
  /// struct field descriptions.
  MDNode *createTBAAStructNode(ArrayRef<TBAAStructField> Fields);

  /// Return metadata for a TBAA struct node in the type DAG
  /// with the given name, a list of pairs (offset, field type in the type DAG).
  MDNode *
  createTBAAStructTypeNode(StringRef Name,
                           ArrayRef<std::pair<MDNode *, uint64_t>> Fields);

  /// Return metadata for a TBAA scalar type node with the
  /// given name, an offset and a parent in the TBAA type DAG.
  MDNode *createTBAAScalarTypeNode(StringRef Name, MDNode *Parent,
                                   uint64_t Offset = 0);

  /// Return metadata for a TBAA tag node with the given
  /// base type, access type and offset relative to the base type.
  MDNode *createTBAAStructTagNode(MDNode *BaseType, MDNode *AccessType,
                                  uint64_t Offset, bool IsConstant = false);

  /// Return metadata for a TBAA type node in the TBAA type DAG with the
  /// given parent type, size in bytes, type identifier and a list of fields.
  MDNode *createTBAATypeNode(MDNode *Parent, uint64_t Size, Metadata *Id,
                             ArrayRef<TBAAStructField> Fields =
                                 ArrayRef<TBAAStructField>());

  /// Return metadata for a TBAA access tag with the given base type,
  /// final access type, offset of the access relative to the base type, size of
  /// the access and flag indicating whether the accessed object can be
  /// considered immutable for the purposes of the TBAA analysis.
  MDNode *createTBAAAccessTag(MDNode *BaseType, MDNode *AccessType,
                              uint64_t Offset, uint64_t Size,
                              bool IsImmutable = false);

  /// Return mutable version of the given mutable or immutable TBAA
  /// access tag.
  MDNode *createMutableTBAAAccessTag(MDNode *Tag);

  /// Return metadata containing an irreducible loop header weight.
  MDNode *createIrrLoopHeaderWeight(uint64_t Weight);
};

} // end namespace llvm

#endif
PKjwFZy�D�+�+�IR/IntrinsicsARM.hnu�[���/*===- TableGen'erated file -------------------------------------*- C++ -*-===*\
|*                                                                            *|
|* Intrinsic Function Source Fragment                                         *|
|*                                                                            *|
|* Automatically generated file, do not edit!                                 *|
|*                                                                            *|
\*===----------------------------------------------------------------------===*/

#ifndef LLVM_IR_INTRINSIC_ARM_ENUMS_H
#define LLVM_IR_INTRINSIC_ARM_ENUMS_H

namespace llvm {
namespace Intrinsic {
enum ARMIntrinsics : unsigned {
// Enum values for intrinsics
    arm_cde_cx1 = 2663,                               // llvm.arm.cde.cx1
    arm_cde_cx1a,                              // llvm.arm.cde.cx1a
    arm_cde_cx1d,                              // llvm.arm.cde.cx1d
    arm_cde_cx1da,                             // llvm.arm.cde.cx1da
    arm_cde_cx2,                               // llvm.arm.cde.cx2
    arm_cde_cx2a,                              // llvm.arm.cde.cx2a
    arm_cde_cx2d,                              // llvm.arm.cde.cx2d
    arm_cde_cx2da,                             // llvm.arm.cde.cx2da
    arm_cde_cx3,                               // llvm.arm.cde.cx3
    arm_cde_cx3a,                              // llvm.arm.cde.cx3a
    arm_cde_cx3d,                              // llvm.arm.cde.cx3d
    arm_cde_cx3da,                             // llvm.arm.cde.cx3da
    arm_cde_vcx1,                              // llvm.arm.cde.vcx1
    arm_cde_vcx1a,                             // llvm.arm.cde.vcx1a
    arm_cde_vcx1q,                             // llvm.arm.cde.vcx1q
    arm_cde_vcx1q_predicated,                  // llvm.arm.cde.vcx1q.predicated
    arm_cde_vcx1qa,                            // llvm.arm.cde.vcx1qa
    arm_cde_vcx1qa_predicated,                 // llvm.arm.cde.vcx1qa.predicated
    arm_cde_vcx2,                              // llvm.arm.cde.vcx2
    arm_cde_vcx2a,                             // llvm.arm.cde.vcx2a
    arm_cde_vcx2q,                             // llvm.arm.cde.vcx2q
    arm_cde_vcx2q_predicated,                  // llvm.arm.cde.vcx2q.predicated
    arm_cde_vcx2qa,                            // llvm.arm.cde.vcx2qa
    arm_cde_vcx2qa_predicated,                 // llvm.arm.cde.vcx2qa.predicated
    arm_cde_vcx3,                              // llvm.arm.cde.vcx3
    arm_cde_vcx3a,                             // llvm.arm.cde.vcx3a
    arm_cde_vcx3q,                             // llvm.arm.cde.vcx3q
    arm_cde_vcx3q_predicated,                  // llvm.arm.cde.vcx3q.predicated
    arm_cde_vcx3qa,                            // llvm.arm.cde.vcx3qa
    arm_cde_vcx3qa_predicated,                 // llvm.arm.cde.vcx3qa.predicated
    arm_cdp,                                   // llvm.arm.cdp
    arm_cdp2,                                  // llvm.arm.cdp2
    arm_clrex,                                 // llvm.arm.clrex
    arm_cls,                                   // llvm.arm.cls
    arm_cls64,                                 // llvm.arm.cls64
    arm_cmse_tt,                               // llvm.arm.cmse.tt
    arm_cmse_tta,                              // llvm.arm.cmse.tta
    arm_cmse_ttat,                             // llvm.arm.cmse.ttat
    arm_cmse_ttt,                              // llvm.arm.cmse.ttt
    arm_crc32b,                                // llvm.arm.crc32b
    arm_crc32cb,                               // llvm.arm.crc32cb
    arm_crc32ch,                               // llvm.arm.crc32ch
    arm_crc32cw,                               // llvm.arm.crc32cw
    arm_crc32h,                                // llvm.arm.crc32h
    arm_crc32w,                                // llvm.arm.crc32w
    arm_dbg,                                   // llvm.arm.dbg
    arm_dmb,                                   // llvm.arm.dmb
    arm_dsb,                                   // llvm.arm.dsb
    arm_get_fpscr,                             // llvm.arm.get.fpscr
    arm_gnu_eabi_mcount,                       // llvm.arm.gnu.eabi.mcount
    arm_hint,                                  // llvm.arm.hint
    arm_isb,                                   // llvm.arm.isb
    arm_ldaex,                                 // llvm.arm.ldaex
    arm_ldaexd,                                // llvm.arm.ldaexd
    arm_ldc,                                   // llvm.arm.ldc
    arm_ldc2,                                  // llvm.arm.ldc2
    arm_ldc2l,                                 // llvm.arm.ldc2l
    arm_ldcl,                                  // llvm.arm.ldcl
    arm_ldrex,                                 // llvm.arm.ldrex
    arm_ldrexd,                                // llvm.arm.ldrexd
    arm_mcr,                                   // llvm.arm.mcr
    arm_mcr2,                                  // llvm.arm.mcr2
    arm_mcrr,                                  // llvm.arm.mcrr
    arm_mcrr2,                                 // llvm.arm.mcrr2
    arm_mrc,                                   // llvm.arm.mrc
    arm_mrc2,                                  // llvm.arm.mrc2
    arm_mrrc,                                  // llvm.arm.mrrc
    arm_mrrc2,                                 // llvm.arm.mrrc2
    arm_mve_abd_predicated,                    // llvm.arm.mve.abd.predicated
    arm_mve_abs_predicated,                    // llvm.arm.mve.abs.predicated
    arm_mve_add_predicated,                    // llvm.arm.mve.add.predicated
    arm_mve_addlv,                             // llvm.arm.mve.addlv
    arm_mve_addlv_predicated,                  // llvm.arm.mve.addlv.predicated
    arm_mve_addv,                              // llvm.arm.mve.addv
    arm_mve_addv_predicated,                   // llvm.arm.mve.addv.predicated
    arm_mve_and_predicated,                    // llvm.arm.mve.and.predicated
    arm_mve_asrl,                              // llvm.arm.mve.asrl
    arm_mve_bic_predicated,                    // llvm.arm.mve.bic.predicated
    arm_mve_cls_predicated,                    // llvm.arm.mve.cls.predicated
    arm_mve_clz_predicated,                    // llvm.arm.mve.clz.predicated
    arm_mve_eor_predicated,                    // llvm.arm.mve.eor.predicated
    arm_mve_fma_predicated,                    // llvm.arm.mve.fma.predicated
    arm_mve_hadd_predicated,                   // llvm.arm.mve.hadd.predicated
    arm_mve_hsub_predicated,                   // llvm.arm.mve.hsub.predicated
    arm_mve_lsll,                              // llvm.arm.mve.lsll
    arm_mve_max_predicated,                    // llvm.arm.mve.max.predicated
    arm_mve_maxav,                             // llvm.arm.mve.maxav
    arm_mve_maxav_predicated,                  // llvm.arm.mve.maxav.predicated
    arm_mve_maxnmav,                           // llvm.arm.mve.maxnmav
    arm_mve_maxnmav_predicated,                // llvm.arm.mve.maxnmav.predicated
    arm_mve_maxnmv,                            // llvm.arm.mve.maxnmv
    arm_mve_maxnmv_predicated,                 // llvm.arm.mve.maxnmv.predicated
    arm_mve_maxv,                              // llvm.arm.mve.maxv
    arm_mve_maxv_predicated,                   // llvm.arm.mve.maxv.predicated
    arm_mve_min_predicated,                    // llvm.arm.mve.min.predicated
    arm_mve_minav,                             // llvm.arm.mve.minav
    arm_mve_minav_predicated,                  // llvm.arm.mve.minav.predicated
    arm_mve_minnmav,                           // llvm.arm.mve.minnmav
    arm_mve_minnmav_predicated,                // llvm.arm.mve.minnmav.predicated
    arm_mve_minnmv,                            // llvm.arm.mve.minnmv
    arm_mve_minnmv_predicated,                 // llvm.arm.mve.minnmv.predicated
    arm_mve_minv,                              // llvm.arm.mve.minv
    arm_mve_minv_predicated,                   // llvm.arm.mve.minv.predicated
    arm_mve_mul_predicated,                    // llvm.arm.mve.mul.predicated
    arm_mve_mulh_predicated,                   // llvm.arm.mve.mulh.predicated
    arm_mve_mull_int_predicated,               // llvm.arm.mve.mull.int.predicated
    arm_mve_mull_poly_predicated,              // llvm.arm.mve.mull.poly.predicated
    arm_mve_mvn_predicated,                    // llvm.arm.mve.mvn.predicated
    arm_mve_neg_predicated,                    // llvm.arm.mve.neg.predicated
    arm_mve_orn_predicated,                    // llvm.arm.mve.orn.predicated
    arm_mve_orr_predicated,                    // llvm.arm.mve.orr.predicated
    arm_mve_pred_i2v,                          // llvm.arm.mve.pred.i2v
    arm_mve_pred_v2i,                          // llvm.arm.mve.pred.v2i
    arm_mve_qabs_predicated,                   // llvm.arm.mve.qabs.predicated
    arm_mve_qadd_predicated,                   // llvm.arm.mve.qadd.predicated
    arm_mve_qdmulh_predicated,                 // llvm.arm.mve.qdmulh.predicated
    arm_mve_qneg_predicated,                   // llvm.arm.mve.qneg.predicated
    arm_mve_qrdmulh_predicated,                // llvm.arm.mve.qrdmulh.predicated
    arm_mve_qsub_predicated,                   // llvm.arm.mve.qsub.predicated
    arm_mve_rhadd_predicated,                  // llvm.arm.mve.rhadd.predicated
    arm_mve_rmulh_predicated,                  // llvm.arm.mve.rmulh.predicated
    arm_mve_shl_imm_predicated,                // llvm.arm.mve.shl.imm.predicated
    arm_mve_shr_imm_predicated,                // llvm.arm.mve.shr.imm.predicated
    arm_mve_sqrshr,                            // llvm.arm.mve.sqrshr
    arm_mve_sqrshrl,                           // llvm.arm.mve.sqrshrl
    arm_mve_sqshl,                             // llvm.arm.mve.sqshl
    arm_mve_sqshll,                            // llvm.arm.mve.sqshll
    arm_mve_srshr,                             // llvm.arm.mve.srshr
    arm_mve_srshrl,                            // llvm.arm.mve.srshrl
    arm_mve_sub_predicated,                    // llvm.arm.mve.sub.predicated
    arm_mve_uqrshl,                            // llvm.arm.mve.uqrshl
    arm_mve_uqrshll,                           // llvm.arm.mve.uqrshll
    arm_mve_uqshl,                             // llvm.arm.mve.uqshl
    arm_mve_uqshll,                            // llvm.arm.mve.uqshll
    arm_mve_urshr,                             // llvm.arm.mve.urshr
    arm_mve_urshrl,                            // llvm.arm.mve.urshrl
    arm_mve_vabav,                             // llvm.arm.mve.vabav
    arm_mve_vabav_predicated,                  // llvm.arm.mve.vabav.predicated
    arm_mve_vabd,                              // llvm.arm.mve.vabd
    arm_mve_vadc,                              // llvm.arm.mve.vadc
    arm_mve_vadc_predicated,                   // llvm.arm.mve.vadc.predicated
    arm_mve_vbrsr,                             // llvm.arm.mve.vbrsr
    arm_mve_vbrsr_predicated,                  // llvm.arm.mve.vbrsr.predicated
    arm_mve_vcaddq,                            // llvm.arm.mve.vcaddq
    arm_mve_vcaddq_predicated,                 // llvm.arm.mve.vcaddq.predicated
    arm_mve_vcls,                              // llvm.arm.mve.vcls
    arm_mve_vcmlaq,                            // llvm.arm.mve.vcmlaq
    arm_mve_vcmlaq_predicated,                 // llvm.arm.mve.vcmlaq.predicated
    arm_mve_vcmulq,                            // llvm.arm.mve.vcmulq
    arm_mve_vcmulq_predicated,                 // llvm.arm.mve.vcmulq.predicated
    arm_mve_vctp16,                            // llvm.arm.mve.vctp16
    arm_mve_vctp32,                            // llvm.arm.mve.vctp32
    arm_mve_vctp64,                            // llvm.arm.mve.vctp64
    arm_mve_vctp8,                             // llvm.arm.mve.vctp8
    arm_mve_vcvt_fix,                          // llvm.arm.mve.vcvt.fix
    arm_mve_vcvt_fix_predicated,               // llvm.arm.mve.vcvt.fix.predicated
    arm_mve_vcvt_fp_int_predicated,            // llvm.arm.mve.vcvt.fp.int.predicated
    arm_mve_vcvt_narrow,                       // llvm.arm.mve.vcvt.narrow
    arm_mve_vcvt_narrow_predicated,            // llvm.arm.mve.vcvt.narrow.predicated
    arm_mve_vcvt_widen,                        // llvm.arm.mve.vcvt.widen
    arm_mve_vcvt_widen_predicated,             // llvm.arm.mve.vcvt.widen.predicated
    arm_mve_vcvta,                             // llvm.arm.mve.vcvta
    arm_mve_vcvta_predicated,                  // llvm.arm.mve.vcvta.predicated
    arm_mve_vcvtm,                             // llvm.arm.mve.vcvtm
    arm_mve_vcvtm_predicated,                  // llvm.arm.mve.vcvtm.predicated
    arm_mve_vcvtn,                             // llvm.arm.mve.vcvtn
    arm_mve_vcvtn_predicated,                  // llvm.arm.mve.vcvtn.predicated
    arm_mve_vcvtp,                             // llvm.arm.mve.vcvtp
    arm_mve_vcvtp_predicated,                  // llvm.arm.mve.vcvtp.predicated
    arm_mve_vddup,                             // llvm.arm.mve.vddup
    arm_mve_vddup_predicated,                  // llvm.arm.mve.vddup.predicated
    arm_mve_vdwdup,                            // llvm.arm.mve.vdwdup
    arm_mve_vdwdup_predicated,                 // llvm.arm.mve.vdwdup.predicated
    arm_mve_vhadd,                             // llvm.arm.mve.vhadd
    arm_mve_vhsub,                             // llvm.arm.mve.vhsub
    arm_mve_vidup,                             // llvm.arm.mve.vidup
    arm_mve_vidup_predicated,                  // llvm.arm.mve.vidup.predicated
    arm_mve_viwdup,                            // llvm.arm.mve.viwdup
    arm_mve_viwdup_predicated,                 // llvm.arm.mve.viwdup.predicated
    arm_mve_vld2q,                             // llvm.arm.mve.vld2q
    arm_mve_vld4q,                             // llvm.arm.mve.vld4q
    arm_mve_vldr_gather_base,                  // llvm.arm.mve.vldr.gather.base
    arm_mve_vldr_gather_base_predicated,       // llvm.arm.mve.vldr.gather.base.predicated
    arm_mve_vldr_gather_base_wb,               // llvm.arm.mve.vldr.gather.base.wb
    arm_mve_vldr_gather_base_wb_predicated,    // llvm.arm.mve.vldr.gather.base.wb.predicated
    arm_mve_vldr_gather_offset,                // llvm.arm.mve.vldr.gather.offset
    arm_mve_vldr_gather_offset_predicated,     // llvm.arm.mve.vldr.gather.offset.predicated
    arm_mve_vmaxa_predicated,                  // llvm.arm.mve.vmaxa.predicated
    arm_mve_vmaxnma_predicated,                // llvm.arm.mve.vmaxnma.predicated
    arm_mve_vmina_predicated,                  // llvm.arm.mve.vmina.predicated
    arm_mve_vminnma_predicated,                // llvm.arm.mve.vminnma.predicated
    arm_mve_vmla_n_predicated,                 // llvm.arm.mve.vmla.n.predicated
    arm_mve_vmlas_n_predicated,                // llvm.arm.mve.vmlas.n.predicated
    arm_mve_vmldava,                           // llvm.arm.mve.vmldava
    arm_mve_vmldava_predicated,                // llvm.arm.mve.vmldava.predicated
    arm_mve_vmlldava,                          // llvm.arm.mve.vmlldava
    arm_mve_vmlldava_predicated,               // llvm.arm.mve.vmlldava.predicated
    arm_mve_vmovl_predicated,                  // llvm.arm.mve.vmovl.predicated
    arm_mve_vmovn_predicated,                  // llvm.arm.mve.vmovn.predicated
    arm_mve_vmulh,                             // llvm.arm.mve.vmulh
    arm_mve_vmull,                             // llvm.arm.mve.vmull
    arm_mve_vmull_poly,                        // llvm.arm.mve.vmull.poly
    arm_mve_vqdmlad,                           // llvm.arm.mve.vqdmlad
    arm_mve_vqdmlad_predicated,                // llvm.arm.mve.vqdmlad.predicated
    arm_mve_vqdmlah,                           // llvm.arm.mve.vqdmlah
    arm_mve_vqdmlah_predicated,                // llvm.arm.mve.vqdmlah.predicated
    arm_mve_vqdmlash,                          // llvm.arm.mve.vqdmlash
    arm_mve_vqdmlash_predicated,               // llvm.arm.mve.vqdmlash.predicated
    arm_mve_vqdmulh,                           // llvm.arm.mve.vqdmulh
    arm_mve_vqdmull,                           // llvm.arm.mve.vqdmull
    arm_mve_vqdmull_predicated,                // llvm.arm.mve.vqdmull.predicated
    arm_mve_vqmovn,                            // llvm.arm.mve.vqmovn
    arm_mve_vqmovn_predicated,                 // llvm.arm.mve.vqmovn.predicated
    arm_mve_vqrdmlah,                          // llvm.arm.mve.vqrdmlah
    arm_mve_vqrdmlah_predicated,               // llvm.arm.mve.vqrdmlah.predicated
    arm_mve_vqrdmlash,                         // llvm.arm.mve.vqrdmlash
    arm_mve_vqrdmlash_predicated,              // llvm.arm.mve.vqrdmlash.predicated
    arm_mve_vqrdmulh,                          // llvm.arm.mve.vqrdmulh
    arm_mve_vqshl_imm,                         // llvm.arm.mve.vqshl.imm
    arm_mve_vqshl_imm_predicated,              // llvm.arm.mve.vqshl.imm.predicated
    arm_mve_vqshlu_imm,                        // llvm.arm.mve.vqshlu.imm
    arm_mve_vqshlu_imm_predicated,             // llvm.arm.mve.vqshlu.imm.predicated
    arm_mve_vreinterpretq,                     // llvm.arm.mve.vreinterpretq
    arm_mve_vrev_predicated,                   // llvm.arm.mve.vrev.predicated
    arm_mve_vrhadd,                            // llvm.arm.mve.vrhadd
    arm_mve_vrinta_predicated,                 // llvm.arm.mve.vrinta.predicated
    arm_mve_vrintm_predicated,                 // llvm.arm.mve.vrintm.predicated
    arm_mve_vrintn,                            // llvm.arm.mve.vrintn
    arm_mve_vrintn_predicated,                 // llvm.arm.mve.vrintn.predicated
    arm_mve_vrintp_predicated,                 // llvm.arm.mve.vrintp.predicated
    arm_mve_vrintx_predicated,                 // llvm.arm.mve.vrintx.predicated
    arm_mve_vrintz_predicated,                 // llvm.arm.mve.vrintz.predicated
    arm_mve_vrmlldavha,                        // llvm.arm.mve.vrmlldavha
    arm_mve_vrmlldavha_predicated,             // llvm.arm.mve.vrmlldavha.predicated
    arm_mve_vrmulh,                            // llvm.arm.mve.vrmulh
    arm_mve_vrshr_imm,                         // llvm.arm.mve.vrshr.imm
    arm_mve_vrshr_imm_predicated,              // llvm.arm.mve.vrshr.imm.predicated
    arm_mve_vsbc,                              // llvm.arm.mve.vsbc
    arm_mve_vsbc_predicated,                   // llvm.arm.mve.vsbc.predicated
    arm_mve_vshl_scalar,                       // llvm.arm.mve.vshl.scalar
    arm_mve_vshl_scalar_predicated,            // llvm.arm.mve.vshl.scalar.predicated
    arm_mve_vshl_vector,                       // llvm.arm.mve.vshl.vector
    arm_mve_vshl_vector_predicated,            // llvm.arm.mve.vshl.vector.predicated
    arm_mve_vshlc,                             // llvm.arm.mve.vshlc
    arm_mve_vshlc_predicated,                  // llvm.arm.mve.vshlc.predicated
    arm_mve_vshll_imm,                         // llvm.arm.mve.vshll.imm
    arm_mve_vshll_imm_predicated,              // llvm.arm.mve.vshll.imm.predicated
    arm_mve_vshrn,                             // llvm.arm.mve.vshrn
    arm_mve_vshrn_predicated,                  // llvm.arm.mve.vshrn.predicated
    arm_mve_vsli,                              // llvm.arm.mve.vsli
    arm_mve_vsli_predicated,                   // llvm.arm.mve.vsli.predicated
    arm_mve_vsri,                              // llvm.arm.mve.vsri
    arm_mve_vsri_predicated,                   // llvm.arm.mve.vsri.predicated
    arm_mve_vst2q,                             // llvm.arm.mve.vst2q
    arm_mve_vst4q,                             // llvm.arm.mve.vst4q
    arm_mve_vstr_scatter_base,                 // llvm.arm.mve.vstr.scatter.base
    arm_mve_vstr_scatter_base_predicated,      // llvm.arm.mve.vstr.scatter.base.predicated
    arm_mve_vstr_scatter_base_wb,              // llvm.arm.mve.vstr.scatter.base.wb
    arm_mve_vstr_scatter_base_wb_predicated,   // llvm.arm.mve.vstr.scatter.base.wb.predicated
    arm_mve_vstr_scatter_offset,               // llvm.arm.mve.vstr.scatter.offset
    arm_mve_vstr_scatter_offset_predicated,    // llvm.arm.mve.vstr.scatter.offset.predicated
    arm_neon_aesd,                             // llvm.arm.neon.aesd
    arm_neon_aese,                             // llvm.arm.neon.aese
    arm_neon_aesimc,                           // llvm.arm.neon.aesimc
    arm_neon_aesmc,                            // llvm.arm.neon.aesmc
    arm_neon_bfdot,                            // llvm.arm.neon.bfdot
    arm_neon_bfmlalb,                          // llvm.arm.neon.bfmlalb
    arm_neon_bfmlalt,                          // llvm.arm.neon.bfmlalt
    arm_neon_bfmmla,                           // llvm.arm.neon.bfmmla
    arm_neon_sdot,                             // llvm.arm.neon.sdot
    arm_neon_sha1c,                            // llvm.arm.neon.sha1c
    arm_neon_sha1h,                            // llvm.arm.neon.sha1h
    arm_neon_sha1m,                            // llvm.arm.neon.sha1m
    arm_neon_sha1p,                            // llvm.arm.neon.sha1p
    arm_neon_sha1su0,                          // llvm.arm.neon.sha1su0
    arm_neon_sha1su1,                          // llvm.arm.neon.sha1su1
    arm_neon_sha256h,                          // llvm.arm.neon.sha256h
    arm_neon_sha256h2,                         // llvm.arm.neon.sha256h2
    arm_neon_sha256su0,                        // llvm.arm.neon.sha256su0
    arm_neon_sha256su1,                        // llvm.arm.neon.sha256su1
    arm_neon_smmla,                            // llvm.arm.neon.smmla
    arm_neon_udot,                             // llvm.arm.neon.udot
    arm_neon_ummla,                            // llvm.arm.neon.ummla
    arm_neon_usdot,                            // llvm.arm.neon.usdot
    arm_neon_usmmla,                           // llvm.arm.neon.usmmla
    arm_neon_vabds,                            // llvm.arm.neon.vabds
    arm_neon_vabdu,                            // llvm.arm.neon.vabdu
    arm_neon_vabs,                             // llvm.arm.neon.vabs
    arm_neon_vacge,                            // llvm.arm.neon.vacge
    arm_neon_vacgt,                            // llvm.arm.neon.vacgt
    arm_neon_vbsl,                             // llvm.arm.neon.vbsl
    arm_neon_vcadd_rot270,                     // llvm.arm.neon.vcadd.rot270
    arm_neon_vcadd_rot90,                      // llvm.arm.neon.vcadd.rot90
    arm_neon_vcls,                             // llvm.arm.neon.vcls
    arm_neon_vcvtas,                           // llvm.arm.neon.vcvtas
    arm_neon_vcvtau,                           // llvm.arm.neon.vcvtau
    arm_neon_vcvtbfp2bf,                       // llvm.arm.neon.vcvtbfp2bf
    arm_neon_vcvtfp2bf,                        // llvm.arm.neon.vcvtfp2bf
    arm_neon_vcvtfp2fxs,                       // llvm.arm.neon.vcvtfp2fxs
    arm_neon_vcvtfp2fxu,                       // llvm.arm.neon.vcvtfp2fxu
    arm_neon_vcvtfp2hf,                        // llvm.arm.neon.vcvtfp2hf
    arm_neon_vcvtfxs2fp,                       // llvm.arm.neon.vcvtfxs2fp
    arm_neon_vcvtfxu2fp,                       // llvm.arm.neon.vcvtfxu2fp
    arm_neon_vcvthf2fp,                        // llvm.arm.neon.vcvthf2fp
    arm_neon_vcvtms,                           // llvm.arm.neon.vcvtms
    arm_neon_vcvtmu,                           // llvm.arm.neon.vcvtmu
    arm_neon_vcvtns,                           // llvm.arm.neon.vcvtns
    arm_neon_vcvtnu,                           // llvm.arm.neon.vcvtnu
    arm_neon_vcvtps,                           // llvm.arm.neon.vcvtps
    arm_neon_vcvtpu,                           // llvm.arm.neon.vcvtpu
    arm_neon_vhadds,                           // llvm.arm.neon.vhadds
    arm_neon_vhaddu,                           // llvm.arm.neon.vhaddu
    arm_neon_vhsubs,                           // llvm.arm.neon.vhsubs
    arm_neon_vhsubu,                           // llvm.arm.neon.vhsubu
    arm_neon_vld1,                             // llvm.arm.neon.vld1
    arm_neon_vld1x2,                           // llvm.arm.neon.vld1x2
    arm_neon_vld1x3,                           // llvm.arm.neon.vld1x3
    arm_neon_vld1x4,                           // llvm.arm.neon.vld1x4
    arm_neon_vld2,                             // llvm.arm.neon.vld2
    arm_neon_vld2dup,                          // llvm.arm.neon.vld2dup
    arm_neon_vld2lane,                         // llvm.arm.neon.vld2lane
    arm_neon_vld3,                             // llvm.arm.neon.vld3
    arm_neon_vld3dup,                          // llvm.arm.neon.vld3dup
    arm_neon_vld3lane,                         // llvm.arm.neon.vld3lane
    arm_neon_vld4,                             // llvm.arm.neon.vld4
    arm_neon_vld4dup,                          // llvm.arm.neon.vld4dup
    arm_neon_vld4lane,                         // llvm.arm.neon.vld4lane
    arm_neon_vmaxnm,                           // llvm.arm.neon.vmaxnm
    arm_neon_vmaxs,                            // llvm.arm.neon.vmaxs
    arm_neon_vmaxu,                            // llvm.arm.neon.vmaxu
    arm_neon_vminnm,                           // llvm.arm.neon.vminnm
    arm_neon_vmins,                            // llvm.arm.neon.vmins
    arm_neon_vminu,                            // llvm.arm.neon.vminu
    arm_neon_vmullp,                           // llvm.arm.neon.vmullp
    arm_neon_vmulls,                           // llvm.arm.neon.vmulls
    arm_neon_vmullu,                           // llvm.arm.neon.vmullu
    arm_neon_vmulp,                            // llvm.arm.neon.vmulp
    arm_neon_vpadals,                          // llvm.arm.neon.vpadals
    arm_neon_vpadalu,                          // llvm.arm.neon.vpadalu
    arm_neon_vpadd,                            // llvm.arm.neon.vpadd
    arm_neon_vpaddls,                          // llvm.arm.neon.vpaddls
    arm_neon_vpaddlu,                          // llvm.arm.neon.vpaddlu
    arm_neon_vpmaxs,                           // llvm.arm.neon.vpmaxs
    arm_neon_vpmaxu,                           // llvm.arm.neon.vpmaxu
    arm_neon_vpmins,                           // llvm.arm.neon.vpmins
    arm_neon_vpminu,                           // llvm.arm.neon.vpminu
    arm_neon_vqabs,                            // llvm.arm.neon.vqabs
    arm_neon_vqdmulh,                          // llvm.arm.neon.vqdmulh
    arm_neon_vqdmull,                          // llvm.arm.neon.vqdmull
    arm_neon_vqmovns,                          // llvm.arm.neon.vqmovns
    arm_neon_vqmovnsu,                         // llvm.arm.neon.vqmovnsu
    arm_neon_vqmovnu,                          // llvm.arm.neon.vqmovnu
    arm_neon_vqneg,                            // llvm.arm.neon.vqneg
    arm_neon_vqrdmlah,                         // llvm.arm.neon.vqrdmlah
    arm_neon_vqrdmlsh,                         // llvm.arm.neon.vqrdmlsh
    arm_neon_vqrdmulh,                         // llvm.arm.neon.vqrdmulh
    arm_neon_vqrshiftns,                       // llvm.arm.neon.vqrshiftns
    arm_neon_vqrshiftnsu,                      // llvm.arm.neon.vqrshiftnsu
    arm_neon_vqrshiftnu,                       // llvm.arm.neon.vqrshiftnu
    arm_neon_vqrshifts,                        // llvm.arm.neon.vqrshifts
    arm_neon_vqrshiftu,                        // llvm.arm.neon.vqrshiftu
    arm_neon_vqshiftns,                        // llvm.arm.neon.vqshiftns
    arm_neon_vqshiftnsu,                       // llvm.arm.neon.vqshiftnsu
    arm_neon_vqshiftnu,                        // llvm.arm.neon.vqshiftnu
    arm_neon_vqshifts,                         // llvm.arm.neon.vqshifts
    arm_neon_vqshiftsu,                        // llvm.arm.neon.vqshiftsu
    arm_neon_vqshiftu,                         // llvm.arm.neon.vqshiftu
    arm_neon_vraddhn,                          // llvm.arm.neon.vraddhn
    arm_neon_vrecpe,                           // llvm.arm.neon.vrecpe
    arm_neon_vrecps,                           // llvm.arm.neon.vrecps
    arm_neon_vrhadds,                          // llvm.arm.neon.vrhadds
    arm_neon_vrhaddu,                          // llvm.arm.neon.vrhaddu
    arm_neon_vrinta,                           // llvm.arm.neon.vrinta
    arm_neon_vrintm,                           // llvm.arm.neon.vrintm
    arm_neon_vrintn,                           // llvm.arm.neon.vrintn
    arm_neon_vrintp,                           // llvm.arm.neon.vrintp
    arm_neon_vrintx,                           // llvm.arm.neon.vrintx
    arm_neon_vrintz,                           // llvm.arm.neon.vrintz
    arm_neon_vrshiftn,                         // llvm.arm.neon.vrshiftn
    arm_neon_vrshifts,                         // llvm.arm.neon.vrshifts
    arm_neon_vrshiftu,                         // llvm.arm.neon.vrshiftu
    arm_neon_vrsqrte,                          // llvm.arm.neon.vrsqrte
    arm_neon_vrsqrts,                          // llvm.arm.neon.vrsqrts
    arm_neon_vrsubhn,                          // llvm.arm.neon.vrsubhn
    arm_neon_vshiftins,                        // llvm.arm.neon.vshiftins
    arm_neon_vshifts,                          // llvm.arm.neon.vshifts
    arm_neon_vshiftu,                          // llvm.arm.neon.vshiftu
    arm_neon_vst1,                             // llvm.arm.neon.vst1
    arm_neon_vst1x2,                           // llvm.arm.neon.vst1x2
    arm_neon_vst1x3,                           // llvm.arm.neon.vst1x3
    arm_neon_vst1x4,                           // llvm.arm.neon.vst1x4
    arm_neon_vst2,                             // llvm.arm.neon.vst2
    arm_neon_vst2lane,                         // llvm.arm.neon.vst2lane
    arm_neon_vst3,                             // llvm.arm.neon.vst3
    arm_neon_vst3lane,                         // llvm.arm.neon.vst3lane
    arm_neon_vst4,                             // llvm.arm.neon.vst4
    arm_neon_vst4lane,                         // llvm.arm.neon.vst4lane
    arm_neon_vtbl1,                            // llvm.arm.neon.vtbl1
    arm_neon_vtbl2,                            // llvm.arm.neon.vtbl2
    arm_neon_vtbl3,                            // llvm.arm.neon.vtbl3
    arm_neon_vtbl4,                            // llvm.arm.neon.vtbl4
    arm_neon_vtbx1,                            // llvm.arm.neon.vtbx1
    arm_neon_vtbx2,                            // llvm.arm.neon.vtbx2
    arm_neon_vtbx3,                            // llvm.arm.neon.vtbx3
    arm_neon_vtbx4,                            // llvm.arm.neon.vtbx4
    arm_qadd,                                  // llvm.arm.qadd
    arm_qadd16,                                // llvm.arm.qadd16
    arm_qadd8,                                 // llvm.arm.qadd8
    arm_qasx,                                  // llvm.arm.qasx
    arm_qsax,                                  // llvm.arm.qsax
    arm_qsub,                                  // llvm.arm.qsub
    arm_qsub16,                                // llvm.arm.qsub16
    arm_qsub8,                                 // llvm.arm.qsub8
    arm_sadd16,                                // llvm.arm.sadd16
    arm_sadd8,                                 // llvm.arm.sadd8
    arm_sasx,                                  // llvm.arm.sasx
    arm_sel,                                   // llvm.arm.sel
    arm_set_fpscr,                             // llvm.arm.set.fpscr
    arm_shadd16,                               // llvm.arm.shadd16
    arm_shadd8,                                // llvm.arm.shadd8
    arm_shasx,                                 // llvm.arm.shasx
    arm_shsax,                                 // llvm.arm.shsax
    arm_shsub16,                               // llvm.arm.shsub16
    arm_shsub8,                                // llvm.arm.shsub8
    arm_smlabb,                                // llvm.arm.smlabb
    arm_smlabt,                                // llvm.arm.smlabt
    arm_smlad,                                 // llvm.arm.smlad
    arm_smladx,                                // llvm.arm.smladx
    arm_smlald,                                // llvm.arm.smlald
    arm_smlaldx,                               // llvm.arm.smlaldx
    arm_smlatb,                                // llvm.arm.smlatb
    arm_smlatt,                                // llvm.arm.smlatt
    arm_smlawb,                                // llvm.arm.smlawb
    arm_smlawt,                                // llvm.arm.smlawt
    arm_smlsd,                                 // llvm.arm.smlsd
    arm_smlsdx,                                // llvm.arm.smlsdx
    arm_smlsld,                                // llvm.arm.smlsld
    arm_smlsldx,                               // llvm.arm.smlsldx
    arm_smuad,                                 // llvm.arm.smuad
    arm_smuadx,                                // llvm.arm.smuadx
    arm_smulbb,                                // llvm.arm.smulbb
    arm_smulbt,                                // llvm.arm.smulbt
    arm_smultb,                                // llvm.arm.smultb
    arm_smultt,                                // llvm.arm.smultt
    arm_smulwb,                                // llvm.arm.smulwb
    arm_smulwt,                                // llvm.arm.smulwt
    arm_smusd,                                 // llvm.arm.smusd
    arm_smusdx,                                // llvm.arm.smusdx
    arm_space,                                 // llvm.arm.space
    arm_ssat,                                  // llvm.arm.ssat
    arm_ssat16,                                // llvm.arm.ssat16
    arm_ssax,                                  // llvm.arm.ssax
    arm_ssub16,                                // llvm.arm.ssub16
    arm_ssub8,                                 // llvm.arm.ssub8
    arm_stc,                                   // llvm.arm.stc
    arm_stc2,                                  // llvm.arm.stc2
    arm_stc2l,                                 // llvm.arm.stc2l
    arm_stcl,                                  // llvm.arm.stcl
    arm_stlex,                                 // llvm.arm.stlex
    arm_stlexd,                                // llvm.arm.stlexd
    arm_strex,                                 // llvm.arm.strex
    arm_strexd,                                // llvm.arm.strexd
    arm_sxtab16,                               // llvm.arm.sxtab16
    arm_sxtb16,                                // llvm.arm.sxtb16
    arm_uadd16,                                // llvm.arm.uadd16
    arm_uadd8,                                 // llvm.arm.uadd8
    arm_uasx,                                  // llvm.arm.uasx
    arm_uhadd16,                               // llvm.arm.uhadd16
    arm_uhadd8,                                // llvm.arm.uhadd8
    arm_uhasx,                                 // llvm.arm.uhasx
    arm_uhsax,                                 // llvm.arm.uhsax
    arm_uhsub16,                               // llvm.arm.uhsub16
    arm_uhsub8,                                // llvm.arm.uhsub8
    arm_undefined,                             // llvm.arm.undefined
    arm_uqadd16,                               // llvm.arm.uqadd16
    arm_uqadd8,                                // llvm.arm.uqadd8
    arm_uqasx,                                 // llvm.arm.uqasx
    arm_uqsax,                                 // llvm.arm.uqsax
    arm_uqsub16,                               // llvm.arm.uqsub16
    arm_uqsub8,                                // llvm.arm.uqsub8
    arm_usad8,                                 // llvm.arm.usad8
    arm_usada8,                                // llvm.arm.usada8
    arm_usat,                                  // llvm.arm.usat
    arm_usat16,                                // llvm.arm.usat16
    arm_usax,                                  // llvm.arm.usax
    arm_usub16,                                // llvm.arm.usub16
    arm_usub8,                                 // llvm.arm.usub8
    arm_uxtab16,                               // llvm.arm.uxtab16
    arm_uxtb16,                                // llvm.arm.uxtb16
    arm_vcvtr,                                 // llvm.arm.vcvtr
    arm_vcvtru,                                // llvm.arm.vcvtru
}; // enum
} // namespace Intrinsic
} // namespace llvm

#endif
PKjwFZ��r�22IR/StructuralHash.hnu�[���//===- llvm/IR/StructuralHash.h - IR Hashing --------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file provides hashing of the LLVM IR structure to be used to check
// Passes modification status.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_STRUCTURALHASH_H
#define LLVM_IR_STRUCTURALHASH_H

#include <cstdint>

namespace llvm {

class Function;
class Module;

uint64_t StructuralHash(const Function &F);
uint64_t StructuralHash(const Module &M);

} // end namespace llvm

#endif
PKjwFZ;��<V�V�IR/IRBuilder.hnu�[���//===- llvm/IRBuilder.h - Builder for LLVM Instructions ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the IRBuilder class, which is used as a convenient way
// to create LLVM instructions with a consistent and simplified interface.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_IRBUILDER_H
#define LLVM_IR_IRBUILDER_H

#include "llvm-c/Types.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/ConstantFolder.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/FPEnv.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Value.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/Support/AtomicOrdering.h"
#include "llvm/Support/CBindingWrapping.h"
#include "llvm/Support/Casting.h"
#include <cassert>
#include <cstdint>
#include <functional>
#include <optional>
#include <utility>

namespace llvm {

class APInt;
class Use;

/// This provides the default implementation of the IRBuilder
/// 'InsertHelper' method that is called whenever an instruction is created by
/// IRBuilder and needs to be inserted.
///
/// By default, this inserts the instruction at the insertion point.
class IRBuilderDefaultInserter {
public:
  virtual ~IRBuilderDefaultInserter();

  virtual void InsertHelper(Instruction *I, const Twine &Name,
                            BasicBlock *BB,
                            BasicBlock::iterator InsertPt) const {
    if (BB)
      I->insertInto(BB, InsertPt);
    I->setName(Name);
  }
};

/// Provides an 'InsertHelper' that calls a user-provided callback after
/// performing the default insertion.
class IRBuilderCallbackInserter : public IRBuilderDefaultInserter {
  std::function<void(Instruction *)> Callback;

public:
  ~IRBuilderCallbackInserter() override;

  IRBuilderCallbackInserter(std::function<void(Instruction *)> Callback)
      : Callback(std::move(Callback)) {}

  void InsertHelper(Instruction *I, const Twine &Name,
                    BasicBlock *BB,
                    BasicBlock::iterator InsertPt) const override {
    IRBuilderDefaultInserter::InsertHelper(I, Name, BB, InsertPt);
    Callback(I);
  }
};

/// Common base class shared among various IRBuilders.
class IRBuilderBase {
  /// Pairs of (metadata kind, MDNode *) that should be added to all newly
  /// created instructions, like !dbg metadata.
  SmallVector<std::pair<unsigned, MDNode *>, 2> MetadataToCopy;

  /// Add or update the an entry (Kind, MD) to MetadataToCopy, if \p MD is not
  /// null. If \p MD is null, remove the entry with \p Kind.
  void AddOrRemoveMetadataToCopy(unsigned Kind, MDNode *MD) {
    if (!MD) {
      erase_if(MetadataToCopy, [Kind](const std::pair<unsigned, MDNode *> &KV) {
        return KV.first == Kind;
      });
      return;
    }

    for (auto &KV : MetadataToCopy)
      if (KV.first == Kind) {
        KV.second = MD;
        return;
      }

    MetadataToCopy.emplace_back(Kind, MD);
  }

protected:
  BasicBlock *BB;
  BasicBlock::iterator InsertPt;
  LLVMContext &Context;
  const IRBuilderFolder &Folder;
  const IRBuilderDefaultInserter &Inserter;

  MDNode *DefaultFPMathTag;
  FastMathFlags FMF;

  bool IsFPConstrained = false;
  fp::ExceptionBehavior DefaultConstrainedExcept = fp::ebStrict;
  RoundingMode DefaultConstrainedRounding = RoundingMode::Dynamic;

  ArrayRef<OperandBundleDef> DefaultOperandBundles;

public:
  IRBuilderBase(LLVMContext &context, const IRBuilderFolder &Folder,
                const IRBuilderDefaultInserter &Inserter, MDNode *FPMathTag,
                ArrayRef<OperandBundleDef> OpBundles)
      : Context(context), Folder(Folder), Inserter(Inserter),
        DefaultFPMathTag(FPMathTag), DefaultOperandBundles(OpBundles) {
    ClearInsertionPoint();
  }

  /// Insert and return the specified instruction.
  template<typename InstTy>
  InstTy *Insert(InstTy *I, const Twine &Name = "") const {
    Inserter.InsertHelper(I, Name, BB, InsertPt);
    AddMetadataToInst(I);
    return I;
  }

  /// No-op overload to handle constants.
  Constant *Insert(Constant *C, const Twine& = "") const {
    return C;
  }

  Value *Insert(Value *V, const Twine &Name = "") const {
    if (Instruction *I = dyn_cast<Instruction>(V))
      return Insert(I, Name);
    assert(isa<Constant>(V));
    return V;
  }

  //===--------------------------------------------------------------------===//
  // Builder configuration methods
  //===--------------------------------------------------------------------===//

  /// Clear the insertion point: created instructions will not be
  /// inserted into a block.
  void ClearInsertionPoint() {
    BB = nullptr;
    InsertPt = BasicBlock::iterator();
  }

  BasicBlock *GetInsertBlock() const { return BB; }
  BasicBlock::iterator GetInsertPoint() const { return InsertPt; }
  LLVMContext &getContext() const { return Context; }

  /// This specifies that created instructions should be appended to the
  /// end of the specified block.
  void SetInsertPoint(BasicBlock *TheBB) {
    BB = TheBB;
    InsertPt = BB->end();
  }

  /// This specifies that created instructions should be inserted before
  /// the specified instruction.
  void SetInsertPoint(Instruction *I) {
    BB = I->getParent();
    InsertPt = I->getIterator();
    assert(InsertPt != BB->end() && "Can't read debug loc from end()");
    SetCurrentDebugLocation(I->getDebugLoc());
  }

  /// This specifies that created instructions should be inserted at the
  /// specified point.
  void SetInsertPoint(BasicBlock *TheBB, BasicBlock::iterator IP) {
    BB = TheBB;
    InsertPt = IP;
    if (IP != TheBB->end())
      SetCurrentDebugLocation(IP->getDebugLoc());
  }

  /// This specifies that created instructions should inserted at the beginning
  /// end of the specified function, but after already existing static alloca
  /// instructions that are at the start.
  void SetInsertPointPastAllocas(Function *F) {
    BB = &F->getEntryBlock();
    InsertPt = BB->getFirstNonPHIOrDbgOrAlloca();
  }

  /// Set location information used by debugging information.
  void SetCurrentDebugLocation(DebugLoc L) {
    AddOrRemoveMetadataToCopy(LLVMContext::MD_dbg, L.getAsMDNode());
  }

  /// Collect metadata with IDs \p MetadataKinds from \p Src which should be
  /// added to all created instructions. Entries present in MedataDataToCopy but
  /// not on \p Src will be dropped from MetadataToCopy.
  void CollectMetadataToCopy(Instruction *Src,
                             ArrayRef<unsigned> MetadataKinds) {
    for (unsigned K : MetadataKinds)
      AddOrRemoveMetadataToCopy(K, Src->getMetadata(K));
  }

  /// Get location information used by debugging information.
  DebugLoc getCurrentDebugLocation() const;

  /// If this builder has a current debug location, set it on the
  /// specified instruction.
  void SetInstDebugLocation(Instruction *I) const;

  /// Add all entries in MetadataToCopy to \p I.
  void AddMetadataToInst(Instruction *I) const {
    for (const auto &KV : MetadataToCopy)
      I->setMetadata(KV.first, KV.second);
  }

  /// Get the return type of the current function that we're emitting
  /// into.
  Type *getCurrentFunctionReturnType() const;

  /// InsertPoint - A saved insertion point.
  class InsertPoint {
    BasicBlock *Block = nullptr;
    BasicBlock::iterator Point;

  public:
    /// Creates a new insertion point which doesn't point to anything.
    InsertPoint() = default;

    /// Creates a new insertion point at the given location.
    InsertPoint(BasicBlock *InsertBlock, BasicBlock::iterator InsertPoint)
        : Block(InsertBlock), Point(InsertPoint) {}

    /// Returns true if this insert point is set.
    bool isSet() const { return (Block != nullptr); }

    BasicBlock *getBlock() const { return Block; }
    BasicBlock::iterator getPoint() const { return Point; }
  };

  /// Returns the current insert point.
  InsertPoint saveIP() const {
    return InsertPoint(GetInsertBlock(), GetInsertPoint());
  }

  /// Returns the current insert point, clearing it in the process.
  InsertPoint saveAndClearIP() {
    InsertPoint IP(GetInsertBlock(), GetInsertPoint());
    ClearInsertionPoint();
    return IP;
  }

  /// Sets the current insert point to a previously-saved location.
  void restoreIP(InsertPoint IP) {
    if (IP.isSet())
      SetInsertPoint(IP.getBlock(), IP.getPoint());
    else
      ClearInsertionPoint();
  }

  /// Get the floating point math metadata being used.
  MDNode *getDefaultFPMathTag() const { return DefaultFPMathTag; }

  /// Get the flags to be applied to created floating point ops
  FastMathFlags getFastMathFlags() const { return FMF; }

  FastMathFlags &getFastMathFlags() { return FMF; }

  /// Clear the fast-math flags.
  void clearFastMathFlags() { FMF.clear(); }

  /// Set the floating point math metadata to be used.
  void setDefaultFPMathTag(MDNode *FPMathTag) { DefaultFPMathTag = FPMathTag; }

  /// Set the fast-math flags to be used with generated fp-math operators
  void setFastMathFlags(FastMathFlags NewFMF) { FMF = NewFMF; }

  /// Enable/Disable use of constrained floating point math. When
  /// enabled the CreateF<op>() calls instead create constrained
  /// floating point intrinsic calls. Fast math flags are unaffected
  /// by this setting.
  void setIsFPConstrained(bool IsCon) { IsFPConstrained = IsCon; }

  /// Query for the use of constrained floating point math
  bool getIsFPConstrained() { return IsFPConstrained; }

  /// Set the exception handling to be used with constrained floating point
  void setDefaultConstrainedExcept(fp::ExceptionBehavior NewExcept) {
#ifndef NDEBUG
    std::optional<StringRef> ExceptStr =
        convertExceptionBehaviorToStr(NewExcept);
    assert(ExceptStr && "Garbage strict exception behavior!");
#endif
    DefaultConstrainedExcept = NewExcept;
  }

  /// Set the rounding mode handling to be used with constrained floating point
  void setDefaultConstrainedRounding(RoundingMode NewRounding) {
#ifndef NDEBUG
    std::optional<StringRef> RoundingStr =
        convertRoundingModeToStr(NewRounding);
    assert(RoundingStr && "Garbage strict rounding mode!");
#endif
    DefaultConstrainedRounding = NewRounding;
  }

  /// Get the exception handling used with constrained floating point
  fp::ExceptionBehavior getDefaultConstrainedExcept() {
    return DefaultConstrainedExcept;
  }

  /// Get the rounding mode handling used with constrained floating point
  RoundingMode getDefaultConstrainedRounding() {
    return DefaultConstrainedRounding;
  }

  void setConstrainedFPFunctionAttr() {
    assert(BB && "Must have a basic block to set any function attributes!");

    Function *F = BB->getParent();
    if (!F->hasFnAttribute(Attribute::StrictFP)) {
      F->addFnAttr(Attribute::StrictFP);
    }
  }

  void setConstrainedFPCallAttr(CallBase *I) {
    I->addFnAttr(Attribute::StrictFP);
  }

  void setDefaultOperandBundles(ArrayRef<OperandBundleDef> OpBundles) {
    DefaultOperandBundles = OpBundles;
  }

  //===--------------------------------------------------------------------===//
  // RAII helpers.
  //===--------------------------------------------------------------------===//

  // RAII object that stores the current insertion point and restores it
  // when the object is destroyed. This includes the debug location.
  class InsertPointGuard {
    IRBuilderBase &Builder;
    AssertingVH<BasicBlock> Block;
    BasicBlock::iterator Point;
    DebugLoc DbgLoc;

  public:
    InsertPointGuard(IRBuilderBase &B)
        : Builder(B), Block(B.GetInsertBlock()), Point(B.GetInsertPoint()),
          DbgLoc(B.getCurrentDebugLocation()) {}

    InsertPointGuard(const InsertPointGuard &) = delete;
    InsertPointGuard &operator=(const InsertPointGuard &) = delete;

    ~InsertPointGuard() {
      Builder.restoreIP(InsertPoint(Block, Point));
      Builder.SetCurrentDebugLocation(DbgLoc);
    }
  };

  // RAII object that stores the current fast math settings and restores
  // them when the object is destroyed.
  class FastMathFlagGuard {
    IRBuilderBase &Builder;
    FastMathFlags FMF;
    MDNode *FPMathTag;
    bool IsFPConstrained;
    fp::ExceptionBehavior DefaultConstrainedExcept;
    RoundingMode DefaultConstrainedRounding;

  public:
    FastMathFlagGuard(IRBuilderBase &B)
        : Builder(B), FMF(B.FMF), FPMathTag(B.DefaultFPMathTag),
          IsFPConstrained(B.IsFPConstrained),
          DefaultConstrainedExcept(B.DefaultConstrainedExcept),
          DefaultConstrainedRounding(B.DefaultConstrainedRounding) {}

    FastMathFlagGuard(const FastMathFlagGuard &) = delete;
    FastMathFlagGuard &operator=(const FastMathFlagGuard &) = delete;

    ~FastMathFlagGuard() {
      Builder.FMF = FMF;
      Builder.DefaultFPMathTag = FPMathTag;
      Builder.IsFPConstrained = IsFPConstrained;
      Builder.DefaultConstrainedExcept = DefaultConstrainedExcept;
      Builder.DefaultConstrainedRounding = DefaultConstrainedRounding;
    }
  };

  // RAII object that stores the current default operand bundles and restores
  // them when the object is destroyed.
  class OperandBundlesGuard {
    IRBuilderBase &Builder;
    ArrayRef<OperandBundleDef> DefaultOperandBundles;

  public:
    OperandBundlesGuard(IRBuilderBase &B)
        : Builder(B), DefaultOperandBundles(B.DefaultOperandBundles) {}

    OperandBundlesGuard(const OperandBundlesGuard &) = delete;
    OperandBundlesGuard &operator=(const OperandBundlesGuard &) = delete;

    ~OperandBundlesGuard() {
      Builder.DefaultOperandBundles = DefaultOperandBundles;
    }
  };


  //===--------------------------------------------------------------------===//
  // Miscellaneous creation methods.
  //===--------------------------------------------------------------------===//

  /// Make a new global variable with initializer type i8*
  ///
  /// Make a new global variable with an initializer that has array of i8 type
  /// filled in with the null terminated string value specified.  The new global
  /// variable will be marked mergable with any others of the same contents.  If
  /// Name is specified, it is the name of the global variable created.
  ///
  /// If no module is given via \p M, it is take from the insertion point basic
  /// block.
  GlobalVariable *CreateGlobalString(StringRef Str, const Twine &Name = "",
                                     unsigned AddressSpace = 0,
                                     Module *M = nullptr);

  /// Get a constant value representing either true or false.
  ConstantInt *getInt1(bool V) {
    return ConstantInt::get(getInt1Ty(), V);
  }

  /// Get the constant value for i1 true.
  ConstantInt *getTrue() {
    return ConstantInt::getTrue(Context);
  }

  /// Get the constant value for i1 false.
  ConstantInt *getFalse() {
    return ConstantInt::getFalse(Context);
  }

  /// Get a constant 8-bit value.
  ConstantInt *getInt8(uint8_t C) {
    return ConstantInt::get(getInt8Ty(), C);
  }

  /// Get a constant 16-bit value.
  ConstantInt *getInt16(uint16_t C) {
    return ConstantInt::get(getInt16Ty(), C);
  }

  /// Get a constant 32-bit value.
  ConstantInt *getInt32(uint32_t C) {
    return ConstantInt::get(getInt32Ty(), C);
  }

  /// Get a constant 64-bit value.
  ConstantInt *getInt64(uint64_t C) {
    return ConstantInt::get(getInt64Ty(), C);
  }

  /// Get a constant N-bit value, zero extended or truncated from
  /// a 64-bit value.
  ConstantInt *getIntN(unsigned N, uint64_t C) {
    return ConstantInt::get(getIntNTy(N), C);
  }

  /// Get a constant integer value.
  ConstantInt *getInt(const APInt &AI) {
    return ConstantInt::get(Context, AI);
  }

  //===--------------------------------------------------------------------===//
  // Type creation methods
  //===--------------------------------------------------------------------===//

  /// Fetch the type representing a single bit
  IntegerType *getInt1Ty() {
    return Type::getInt1Ty(Context);
  }

  /// Fetch the type representing an 8-bit integer.
  IntegerType *getInt8Ty() {
    return Type::getInt8Ty(Context);
  }

  /// Fetch the type representing a 16-bit integer.
  IntegerType *getInt16Ty() {
    return Type::getInt16Ty(Context);
  }

  /// Fetch the type representing a 32-bit integer.
  IntegerType *getInt32Ty() {
    return Type::getInt32Ty(Context);
  }

  /// Fetch the type representing a 64-bit integer.
  IntegerType *getInt64Ty() {
    return Type::getInt64Ty(Context);
  }

  /// Fetch the type representing a 128-bit integer.
  IntegerType *getInt128Ty() { return Type::getInt128Ty(Context); }

  /// Fetch the type representing an N-bit integer.
  IntegerType *getIntNTy(unsigned N) {
    return Type::getIntNTy(Context, N);
  }

  /// Fetch the type representing a 16-bit floating point value.
  Type *getHalfTy() {
    return Type::getHalfTy(Context);
  }

  /// Fetch the type representing a 16-bit brain floating point value.
  Type *getBFloatTy() {
    return Type::getBFloatTy(Context);
  }

  /// Fetch the type representing a 32-bit floating point value.
  Type *getFloatTy() {
    return Type::getFloatTy(Context);
  }

  /// Fetch the type representing a 64-bit floating point value.
  Type *getDoubleTy() {
    return Type::getDoubleTy(Context);
  }

  /// Fetch the type representing void.
  Type *getVoidTy() {
    return Type::getVoidTy(Context);
  }

  /// Fetch the type representing a pointer.
  PointerType *getPtrTy(unsigned AddrSpace = 0) {
    return PointerType::get(Context, AddrSpace);
  }

  /// Fetch the type representing a pointer to an 8-bit integer value.
  PointerType *getInt8PtrTy(unsigned AddrSpace = 0) {
    return Type::getInt8PtrTy(Context, AddrSpace);
  }

  /// Fetch the type of an integer with size at least as big as that of a
  /// pointer in the given address space.
  IntegerType *getIntPtrTy(const DataLayout &DL, unsigned AddrSpace = 0) {
    return DL.getIntPtrType(Context, AddrSpace);
  }

  /// Fetch the type of an integer that should be used to index GEP operations
  /// within AddressSpace.
  IntegerType *getIndexTy(const DataLayout &DL, unsigned AddrSpace) {
    return DL.getIndexType(Context, AddrSpace);
  }

  //===--------------------------------------------------------------------===//
  // Intrinsic creation methods
  //===--------------------------------------------------------------------===//

  /// Create and insert a memset to the specified pointer and the
  /// specified value.
  ///
  /// If the pointer isn't an i8*, it will be converted. If a TBAA tag is
  /// specified, it will be added to the instruction. Likewise with alias.scope
  /// and noalias tags.
  CallInst *CreateMemSet(Value *Ptr, Value *Val, uint64_t Size,
                         MaybeAlign Align, bool isVolatile = false,
                         MDNode *TBAATag = nullptr, MDNode *ScopeTag = nullptr,
                         MDNode *NoAliasTag = nullptr) {
    return CreateMemSet(Ptr, Val, getInt64(Size), Align, isVolatile,
                        TBAATag, ScopeTag, NoAliasTag);
  }

  CallInst *CreateMemSet(Value *Ptr, Value *Val, Value *Size, MaybeAlign Align,
                         bool isVolatile = false, MDNode *TBAATag = nullptr,
                         MDNode *ScopeTag = nullptr,
                         MDNode *NoAliasTag = nullptr);

  CallInst *CreateMemSetInline(Value *Dst, MaybeAlign DstAlign, Value *Val,
                               Value *Size, bool IsVolatile = false,
                               MDNode *TBAATag = nullptr,
                               MDNode *ScopeTag = nullptr,
                               MDNode *NoAliasTag = nullptr);

  /// Create and insert an element unordered-atomic memset of the region of
  /// memory starting at the given pointer to the given value.
  ///
  /// If the pointer isn't an i8*, it will be converted. If a TBAA tag is
  /// specified, it will be added to the instruction. Likewise with alias.scope
  /// and noalias tags.
  CallInst *CreateElementUnorderedAtomicMemSet(Value *Ptr, Value *Val,
                                               uint64_t Size, Align Alignment,
                                               uint32_t ElementSize,
                                               MDNode *TBAATag = nullptr,
                                               MDNode *ScopeTag = nullptr,
                                               MDNode *NoAliasTag = nullptr) {
    return CreateElementUnorderedAtomicMemSet(Ptr, Val, getInt64(Size),
                                              Align(Alignment), ElementSize,
                                              TBAATag, ScopeTag, NoAliasTag);
  }

  CallInst *CreateElementUnorderedAtomicMemSet(Value *Ptr, Value *Val,
                                               Value *Size, Align Alignment,
                                               uint32_t ElementSize,
                                               MDNode *TBAATag = nullptr,
                                               MDNode *ScopeTag = nullptr,
                                               MDNode *NoAliasTag = nullptr);

  /// Create and insert a memcpy between the specified pointers.
  ///
  /// If the pointers aren't i8*, they will be converted.  If a TBAA tag is
  /// specified, it will be added to the instruction. Likewise with alias.scope
  /// and noalias tags.
  CallInst *CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src,
                         MaybeAlign SrcAlign, uint64_t Size,
                         bool isVolatile = false, MDNode *TBAATag = nullptr,
                         MDNode *TBAAStructTag = nullptr,
                         MDNode *ScopeTag = nullptr,
                         MDNode *NoAliasTag = nullptr) {
    return CreateMemCpy(Dst, DstAlign, Src, SrcAlign, getInt64(Size),
                        isVolatile, TBAATag, TBAAStructTag, ScopeTag,
                        NoAliasTag);
  }

  CallInst *CreateMemTransferInst(
      Intrinsic::ID IntrID, Value *Dst, MaybeAlign DstAlign, Value *Src,
      MaybeAlign SrcAlign, Value *Size, bool isVolatile = false,
      MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
      MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr);

  CallInst *CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src,
                         MaybeAlign SrcAlign, Value *Size,
                         bool isVolatile = false, MDNode *TBAATag = nullptr,
                         MDNode *TBAAStructTag = nullptr,
                         MDNode *ScopeTag = nullptr,
                         MDNode *NoAliasTag = nullptr) {
    return CreateMemTransferInst(Intrinsic::memcpy, Dst, DstAlign, Src,
                                 SrcAlign, Size, isVolatile, TBAATag,
                                 TBAAStructTag, ScopeTag, NoAliasTag);
  }

  CallInst *
  CreateMemCpyInline(Value *Dst, MaybeAlign DstAlign, Value *Src,
                     MaybeAlign SrcAlign, Value *Size, bool IsVolatile = false,
                     MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
                     MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr);

  /// Create and insert an element unordered-atomic memcpy between the
  /// specified pointers.
  ///
  /// DstAlign/SrcAlign are the alignments of the Dst/Src pointers, respectively.
  ///
  /// If the pointers aren't i8*, they will be converted.  If a TBAA tag is
  /// specified, it will be added to the instruction. Likewise with alias.scope
  /// and noalias tags.
  CallInst *CreateElementUnorderedAtomicMemCpy(
      Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size,
      uint32_t ElementSize, MDNode *TBAATag = nullptr,
      MDNode *TBAAStructTag = nullptr, MDNode *ScopeTag = nullptr,
      MDNode *NoAliasTag = nullptr);

  CallInst *CreateMemMove(Value *Dst, MaybeAlign DstAlign, Value *Src,
                          MaybeAlign SrcAlign, uint64_t Size,
                          bool isVolatile = false, MDNode *TBAATag = nullptr,
                          MDNode *ScopeTag = nullptr,
                          MDNode *NoAliasTag = nullptr) {
    return CreateMemMove(Dst, DstAlign, Src, SrcAlign, getInt64(Size),
                         isVolatile, TBAATag, ScopeTag, NoAliasTag);
  }

  CallInst *CreateMemMove(Value *Dst, MaybeAlign DstAlign, Value *Src,
                          MaybeAlign SrcAlign, Value *Size,
                          bool isVolatile = false, MDNode *TBAATag = nullptr,
                          MDNode *ScopeTag = nullptr,
                          MDNode *NoAliasTag = nullptr);

  /// \brief Create and insert an element unordered-atomic memmove between the
  /// specified pointers.
  ///
  /// DstAlign/SrcAlign are the alignments of the Dst/Src pointers,
  /// respectively.
  ///
  /// If the pointers aren't i8*, they will be converted.  If a TBAA tag is
  /// specified, it will be added to the instruction. Likewise with alias.scope
  /// and noalias tags.
  CallInst *CreateElementUnorderedAtomicMemMove(
      Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size,
      uint32_t ElementSize, MDNode *TBAATag = nullptr,
      MDNode *TBAAStructTag = nullptr, MDNode *ScopeTag = nullptr,
      MDNode *NoAliasTag = nullptr);

private:
  CallInst *getReductionIntrinsic(Intrinsic::ID ID, Value *Src);

public:
  /// Create a sequential vector fadd reduction intrinsic of the source vector.
  /// The first parameter is a scalar accumulator value. An unordered reduction
  /// can be created by adding the reassoc fast-math flag to the resulting
  /// sequential reduction.
  CallInst *CreateFAddReduce(Value *Acc, Value *Src);

  /// Create a sequential vector fmul reduction intrinsic of the source vector.
  /// The first parameter is a scalar accumulator value. An unordered reduction
  /// can be created by adding the reassoc fast-math flag to the resulting
  /// sequential reduction.
  CallInst *CreateFMulReduce(Value *Acc, Value *Src);

  /// Create a vector int add reduction intrinsic of the source vector.
  CallInst *CreateAddReduce(Value *Src);

  /// Create a vector int mul reduction intrinsic of the source vector.
  CallInst *CreateMulReduce(Value *Src);

  /// Create a vector int AND reduction intrinsic of the source vector.
  CallInst *CreateAndReduce(Value *Src);

  /// Create a vector int OR reduction intrinsic of the source vector.
  CallInst *CreateOrReduce(Value *Src);

  /// Create a vector int XOR reduction intrinsic of the source vector.
  CallInst *CreateXorReduce(Value *Src);

  /// Create a vector integer max reduction intrinsic of the source
  /// vector.
  CallInst *CreateIntMaxReduce(Value *Src, bool IsSigned = false);

  /// Create a vector integer min reduction intrinsic of the source
  /// vector.
  CallInst *CreateIntMinReduce(Value *Src, bool IsSigned = false);

  /// Create a vector float max reduction intrinsic of the source
  /// vector.
  CallInst *CreateFPMaxReduce(Value *Src);

  /// Create a vector float min reduction intrinsic of the source
  /// vector.
  CallInst *CreateFPMinReduce(Value *Src);

  /// Create a vector float maximum reduction intrinsic of the source
  /// vector. This variant follows the NaN and signed zero semantic of
  /// llvm.maximum intrinsic.
  CallInst *CreateFPMaximumReduce(Value *Src);

  /// Create a vector float minimum reduction intrinsic of the source
  /// vector. This variant follows the NaN and signed zero semantic of
  /// llvm.minimum intrinsic.
  CallInst *CreateFPMinimumReduce(Value *Src);

  /// Create a lifetime.start intrinsic.
  ///
  /// If the pointer isn't i8* it will be converted.
  CallInst *CreateLifetimeStart(Value *Ptr, ConstantInt *Size = nullptr);

  /// Create a lifetime.end intrinsic.
  ///
  /// If the pointer isn't i8* it will be converted.
  CallInst *CreateLifetimeEnd(Value *Ptr, ConstantInt *Size = nullptr);

  /// Create a call to invariant.start intrinsic.
  ///
  /// If the pointer isn't i8* it will be converted.
  CallInst *CreateInvariantStart(Value *Ptr, ConstantInt *Size = nullptr);

  /// Create a call to llvm.threadlocal.address intrinsic.
  CallInst *CreateThreadLocalAddress(Value *Ptr);

  /// Create a call to Masked Load intrinsic
  CallInst *CreateMaskedLoad(Type *Ty, Value *Ptr, Align Alignment, Value *Mask,
                             Value *PassThru = nullptr, const Twine &Name = "");

  /// Create a call to Masked Store intrinsic
  CallInst *CreateMaskedStore(Value *Val, Value *Ptr, Align Alignment,
                              Value *Mask);

  /// Create a call to Masked Gather intrinsic
  CallInst *CreateMaskedGather(Type *Ty, Value *Ptrs, Align Alignment,
                               Value *Mask = nullptr, Value *PassThru = nullptr,
                               const Twine &Name = "");

  /// Create a call to Masked Scatter intrinsic
  CallInst *CreateMaskedScatter(Value *Val, Value *Ptrs, Align Alignment,
                                Value *Mask = nullptr);

  /// Create a call to Masked Expand Load intrinsic
  CallInst *CreateMaskedExpandLoad(Type *Ty, Value *Ptr, Value *Mask = nullptr,
                                   Value *PassThru = nullptr,
                                   const Twine &Name = "");

  /// Create a call to Masked Compress Store intrinsic
  CallInst *CreateMaskedCompressStore(Value *Val, Value *Ptr,
                                      Value *Mask = nullptr);

  /// Return an all true boolean vector (mask) with \p NumElts lanes.
  Value *getAllOnesMask(ElementCount NumElts) {
    VectorType *VTy = VectorType::get(Type::getInt1Ty(Context), NumElts);
    return Constant::getAllOnesValue(VTy);
  }

  /// Create an assume intrinsic call that allows the optimizer to
  /// assume that the provided condition will be true.
  ///
  /// The optional argument \p OpBundles specifies operand bundles that are
  /// added to the call instruction.
  CallInst *
  CreateAssumption(Value *Cond,
                   ArrayRef<OperandBundleDef> OpBundles = std::nullopt);

  /// Create a llvm.experimental.noalias.scope.decl intrinsic call.
  Instruction *CreateNoAliasScopeDeclaration(Value *Scope);
  Instruction *CreateNoAliasScopeDeclaration(MDNode *ScopeTag) {
    return CreateNoAliasScopeDeclaration(
        MetadataAsValue::get(Context, ScopeTag));
  }

  /// Create a call to the experimental.gc.statepoint intrinsic to
  /// start a new statepoint sequence.
  CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes,
                                   FunctionCallee ActualCallee,
                                   ArrayRef<Value *> CallArgs,
                                   std::optional<ArrayRef<Value *>> DeoptArgs,
                                   ArrayRef<Value *> GCArgs,
                                   const Twine &Name = "");

  /// Create a call to the experimental.gc.statepoint intrinsic to
  /// start a new statepoint sequence.
  CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes,
                                   FunctionCallee ActualCallee, uint32_t Flags,
                                   ArrayRef<Value *> CallArgs,
                                   std::optional<ArrayRef<Use>> TransitionArgs,
                                   std::optional<ArrayRef<Use>> DeoptArgs,
                                   ArrayRef<Value *> GCArgs,
                                   const Twine &Name = "");

  /// Conveninence function for the common case when CallArgs are filled
  /// in using ArrayRef(CS.arg_begin(), CS.arg_end()); Use needs to be
  /// .get()'ed to get the Value pointer.
  CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes,
                                   FunctionCallee ActualCallee,
                                   ArrayRef<Use> CallArgs,
                                   std::optional<ArrayRef<Value *>> DeoptArgs,
                                   ArrayRef<Value *> GCArgs,
                                   const Twine &Name = "");

  /// Create an invoke to the experimental.gc.statepoint intrinsic to
  /// start a new statepoint sequence.
  InvokeInst *
  CreateGCStatepointInvoke(uint64_t ID, uint32_t NumPatchBytes,
                           FunctionCallee ActualInvokee, BasicBlock *NormalDest,
                           BasicBlock *UnwindDest, ArrayRef<Value *> InvokeArgs,
                           std::optional<ArrayRef<Value *>> DeoptArgs,
                           ArrayRef<Value *> GCArgs, const Twine &Name = "");

  /// Create an invoke to the experimental.gc.statepoint intrinsic to
  /// start a new statepoint sequence.
  InvokeInst *CreateGCStatepointInvoke(
      uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualInvokee,
      BasicBlock *NormalDest, BasicBlock *UnwindDest, uint32_t Flags,
      ArrayRef<Value *> InvokeArgs, std::optional<ArrayRef<Use>> TransitionArgs,
      std::optional<ArrayRef<Use>> DeoptArgs, ArrayRef<Value *> GCArgs,
      const Twine &Name = "");

  // Convenience function for the common case when CallArgs are filled in using
  // ArrayRef(CS.arg_begin(), CS.arg_end()); Use needs to be .get()'ed to
  // get the Value *.
  InvokeInst *
  CreateGCStatepointInvoke(uint64_t ID, uint32_t NumPatchBytes,
                           FunctionCallee ActualInvokee, BasicBlock *NormalDest,
                           BasicBlock *UnwindDest, ArrayRef<Use> InvokeArgs,
                           std::optional<ArrayRef<Value *>> DeoptArgs,
                           ArrayRef<Value *> GCArgs, const Twine &Name = "");

  /// Create a call to the experimental.gc.result intrinsic to extract
  /// the result from a call wrapped in a statepoint.
  CallInst *CreateGCResult(Instruction *Statepoint,
                           Type *ResultType,
                           const Twine &Name = "");

  /// Create a call to the experimental.gc.relocate intrinsics to
  /// project the relocated value of one pointer from the statepoint.
  CallInst *CreateGCRelocate(Instruction *Statepoint,
                             int BaseOffset,
                             int DerivedOffset,
                             Type *ResultType,
                             const Twine &Name = "");

  /// Create a call to the experimental.gc.pointer.base intrinsic to get the
  /// base pointer for the specified derived pointer.
  CallInst *CreateGCGetPointerBase(Value *DerivedPtr, const Twine &Name = "");

  /// Create a call to the experimental.gc.get.pointer.offset intrinsic to get
  /// the offset of the specified derived pointer from its base.
  CallInst *CreateGCGetPointerOffset(Value *DerivedPtr, const Twine &Name = "");

  /// Create a call to llvm.vscale, multiplied by \p Scaling. The type of VScale
  /// will be the same type as that of \p Scaling.
  Value *CreateVScale(Constant *Scaling, const Twine &Name = "");

  /// Create an expression which evaluates to the number of elements in \p EC
  /// at runtime.
  Value *CreateElementCount(Type *DstType, ElementCount EC);

  /// Create an expression which evaluates to the number of units in \p Size
  /// at runtime.  This works for both units of bits and bytes.
  Value *CreateTypeSize(Type *DstType, TypeSize Size);

  /// Creates a vector of type \p DstType with the linear sequence <0, 1, ...>
  Value *CreateStepVector(Type *DstType, const Twine &Name = "");

  /// Create a call to intrinsic \p ID with 1 operand which is mangled on its
  /// type.
  CallInst *CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V,
                                 Instruction *FMFSource = nullptr,
                                 const Twine &Name = "");

  /// Create a call to intrinsic \p ID with 2 operands which is mangled on the
  /// first type.
  CallInst *CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS,
                                  Instruction *FMFSource = nullptr,
                                  const Twine &Name = "");

  /// Create a call to intrinsic \p ID with \p Args, mangled using \p Types. If
  /// \p FMFSource is provided, copy fast-math-flags from that instruction to
  /// the intrinsic.
  CallInst *CreateIntrinsic(Intrinsic::ID ID, ArrayRef<Type *> Types,
                            ArrayRef<Value *> Args,
                            Instruction *FMFSource = nullptr,
                            const Twine &Name = "");

  /// Create a call to intrinsic \p ID with \p RetTy and \p Args. If
  /// \p FMFSource is provided, copy fast-math-flags from that instruction to
  /// the intrinsic.
  CallInst *CreateIntrinsic(Type *RetTy, Intrinsic::ID ID,
                            ArrayRef<Value *> Args,
                            Instruction *FMFSource = nullptr,
                            const Twine &Name = "");

  /// Create call to the minnum intrinsic.
  CallInst *CreateMinNum(Value *LHS, Value *RHS, const Twine &Name = "") {
    if (IsFPConstrained) {
      return CreateConstrainedFPUnroundedBinOp(
          Intrinsic::experimental_constrained_minnum, LHS, RHS, nullptr, Name);
    }

    return CreateBinaryIntrinsic(Intrinsic::minnum, LHS, RHS, nullptr, Name);
  }

  /// Create call to the maxnum intrinsic.
  CallInst *CreateMaxNum(Value *LHS, Value *RHS, const Twine &Name = "") {
    if (IsFPConstrained) {
      return CreateConstrainedFPUnroundedBinOp(
          Intrinsic::experimental_constrained_maxnum, LHS, RHS, nullptr, Name);
    }

    return CreateBinaryIntrinsic(Intrinsic::maxnum, LHS, RHS, nullptr, Name);
  }

  /// Create call to the minimum intrinsic.
  CallInst *CreateMinimum(Value *LHS, Value *RHS, const Twine &Name = "") {
    return CreateBinaryIntrinsic(Intrinsic::minimum, LHS, RHS, nullptr, Name);
  }

  /// Create call to the maximum intrinsic.
  CallInst *CreateMaximum(Value *LHS, Value *RHS, const Twine &Name = "") {
    return CreateBinaryIntrinsic(Intrinsic::maximum, LHS, RHS, nullptr, Name);
  }

  /// Create call to the copysign intrinsic.
  CallInst *CreateCopySign(Value *LHS, Value *RHS,
                           Instruction *FMFSource = nullptr,
                           const Twine &Name = "") {
    return CreateBinaryIntrinsic(Intrinsic::copysign, LHS, RHS, FMFSource,
                                 Name);
  }

  /// Create a call to the arithmetic_fence intrinsic.
  CallInst *CreateArithmeticFence(Value *Val, Type *DstType,
                                  const Twine &Name = "") {
    return CreateIntrinsic(Intrinsic::arithmetic_fence, DstType, Val, nullptr,
                           Name);
  }

  /// Create a call to the vector.extract intrinsic.
  CallInst *CreateExtractVector(Type *DstType, Value *SrcVec, Value *Idx,
                                const Twine &Name = "") {
    return CreateIntrinsic(Intrinsic::vector_extract,
                           {DstType, SrcVec->getType()}, {SrcVec, Idx}, nullptr,
                           Name);
  }

  /// Create a call to the vector.insert intrinsic.
  CallInst *CreateInsertVector(Type *DstType, Value *SrcVec, Value *SubVec,
                               Value *Idx, const Twine &Name = "") {
    return CreateIntrinsic(Intrinsic::vector_insert,
                           {DstType, SubVec->getType()}, {SrcVec, SubVec, Idx},
                           nullptr, Name);
  }

private:
  /// Create a call to a masked intrinsic with given Id.
  CallInst *CreateMaskedIntrinsic(Intrinsic::ID Id, ArrayRef<Value *> Ops,
                                  ArrayRef<Type *> OverloadedTypes,
                                  const Twine &Name = "");

  //===--------------------------------------------------------------------===//
  // Instruction creation methods: Terminators
  //===--------------------------------------------------------------------===//

private:
  /// Helper to add branch weight and unpredictable metadata onto an
  /// instruction.
  /// \returns The annotated instruction.
  template <typename InstTy>
  InstTy *addBranchMetadata(InstTy *I, MDNode *Weights, MDNode *Unpredictable) {
    if (Weights)
      I->setMetadata(LLVMContext::MD_prof, Weights);
    if (Unpredictable)
      I->setMetadata(LLVMContext::MD_unpredictable, Unpredictable);
    return I;
  }

public:
  /// Create a 'ret void' instruction.
  ReturnInst *CreateRetVoid() {
    return Insert(ReturnInst::Create(Context));
  }

  /// Create a 'ret <val>' instruction.
  ReturnInst *CreateRet(Value *V) {
    return Insert(ReturnInst::Create(Context, V));
  }

  /// Create a sequence of N insertvalue instructions,
  /// with one Value from the retVals array each, that build a aggregate
  /// return value one value at a time, and a ret instruction to return
  /// the resulting aggregate value.
  ///
  /// This is a convenience function for code that uses aggregate return values
  /// as a vehicle for having multiple return values.
  ReturnInst *CreateAggregateRet(Value *const *retVals, unsigned N) {
    Value *V = PoisonValue::get(getCurrentFunctionReturnType());
    for (unsigned i = 0; i != N; ++i)
      V = CreateInsertValue(V, retVals[i], i, "mrv");
    return Insert(ReturnInst::Create(Context, V));
  }

  /// Create an unconditional 'br label X' instruction.
  BranchInst *CreateBr(BasicBlock *Dest) {
    return Insert(BranchInst::Create(Dest));
  }

  /// Create a conditional 'br Cond, TrueDest, FalseDest'
  /// instruction.
  BranchInst *CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False,
                           MDNode *BranchWeights = nullptr,
                           MDNode *Unpredictable = nullptr) {
    return Insert(addBranchMetadata(BranchInst::Create(True, False, Cond),
                                    BranchWeights, Unpredictable));
  }

  /// Create a conditional 'br Cond, TrueDest, FalseDest'
  /// instruction. Copy branch meta data if available.
  BranchInst *CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False,
                           Instruction *MDSrc) {
    BranchInst *Br = BranchInst::Create(True, False, Cond);
    if (MDSrc) {
      unsigned WL[4] = {LLVMContext::MD_prof, LLVMContext::MD_unpredictable,
                        LLVMContext::MD_make_implicit, LLVMContext::MD_dbg};
      Br->copyMetadata(*MDSrc, WL);
    }
    return Insert(Br);
  }

  /// Create a switch instruction with the specified value, default dest,
  /// and with a hint for the number of cases that will be added (for efficient
  /// allocation).
  SwitchInst *CreateSwitch(Value *V, BasicBlock *Dest, unsigned NumCases = 10,
                           MDNode *BranchWeights = nullptr,
                           MDNode *Unpredictable = nullptr) {
    return Insert(addBranchMetadata(SwitchInst::Create(V, Dest, NumCases),
                                    BranchWeights, Unpredictable));
  }

  /// Create an indirect branch instruction with the specified address
  /// operand, with an optional hint for the number of destinations that will be
  /// added (for efficient allocation).
  IndirectBrInst *CreateIndirectBr(Value *Addr, unsigned NumDests = 10) {
    return Insert(IndirectBrInst::Create(Addr, NumDests));
  }

  /// Create an invoke instruction.
  InvokeInst *CreateInvoke(FunctionType *Ty, Value *Callee,
                           BasicBlock *NormalDest, BasicBlock *UnwindDest,
                           ArrayRef<Value *> Args,
                           ArrayRef<OperandBundleDef> OpBundles,
                           const Twine &Name = "") {
    InvokeInst *II =
        InvokeInst::Create(Ty, Callee, NormalDest, UnwindDest, Args, OpBundles);
    if (IsFPConstrained)
      setConstrainedFPCallAttr(II);
    return Insert(II, Name);
  }
  InvokeInst *CreateInvoke(FunctionType *Ty, Value *Callee,
                           BasicBlock *NormalDest, BasicBlock *UnwindDest,
                           ArrayRef<Value *> Args = std::nullopt,
                           const Twine &Name = "") {
    InvokeInst *II =
        InvokeInst::Create(Ty, Callee, NormalDest, UnwindDest, Args);
    if (IsFPConstrained)
      setConstrainedFPCallAttr(II);
    return Insert(II, Name);
  }

  InvokeInst *CreateInvoke(FunctionCallee Callee, BasicBlock *NormalDest,
                           BasicBlock *UnwindDest, ArrayRef<Value *> Args,
                           ArrayRef<OperandBundleDef> OpBundles,
                           const Twine &Name = "") {
    return CreateInvoke(Callee.getFunctionType(), Callee.getCallee(),
                        NormalDest, UnwindDest, Args, OpBundles, Name);
  }

  InvokeInst *CreateInvoke(FunctionCallee Callee, BasicBlock *NormalDest,
                           BasicBlock *UnwindDest,
                           ArrayRef<Value *> Args = std::nullopt,
                           const Twine &Name = "") {
    return CreateInvoke(Callee.getFunctionType(), Callee.getCallee(),
                        NormalDest, UnwindDest, Args, Name);
  }

  /// \brief Create a callbr instruction.
  CallBrInst *CreateCallBr(FunctionType *Ty, Value *Callee,
                           BasicBlock *DefaultDest,
                           ArrayRef<BasicBlock *> IndirectDests,
                           ArrayRef<Value *> Args = std::nullopt,
                           const Twine &Name = "") {
    return Insert(CallBrInst::Create(Ty, Callee, DefaultDest, IndirectDests,
                                     Args), Name);
  }
  CallBrInst *CreateCallBr(FunctionType *Ty, Value *Callee,
                           BasicBlock *DefaultDest,
                           ArrayRef<BasicBlock *> IndirectDests,
                           ArrayRef<Value *> Args,
                           ArrayRef<OperandBundleDef> OpBundles,
                           const Twine &Name = "") {
    return Insert(
        CallBrInst::Create(Ty, Callee, DefaultDest, IndirectDests, Args,
                           OpBundles), Name);
  }

  CallBrInst *CreateCallBr(FunctionCallee Callee, BasicBlock *DefaultDest,
                           ArrayRef<BasicBlock *> IndirectDests,
                           ArrayRef<Value *> Args = std::nullopt,
                           const Twine &Name = "") {
    return CreateCallBr(Callee.getFunctionType(), Callee.getCallee(),
                        DefaultDest, IndirectDests, Args, Name);
  }
  CallBrInst *CreateCallBr(FunctionCallee Callee, BasicBlock *DefaultDest,
                           ArrayRef<BasicBlock *> IndirectDests,
                           ArrayRef<Value *> Args,
                           ArrayRef<OperandBundleDef> OpBundles,
                           const Twine &Name = "") {
    return CreateCallBr(Callee.getFunctionType(), Callee.getCallee(),
                        DefaultDest, IndirectDests, Args, Name);
  }

  ResumeInst *CreateResume(Value *Exn) {
    return Insert(ResumeInst::Create(Exn));
  }

  CleanupReturnInst *CreateCleanupRet(CleanupPadInst *CleanupPad,
                                      BasicBlock *UnwindBB = nullptr) {
    return Insert(CleanupReturnInst::Create(CleanupPad, UnwindBB));
  }

  CatchSwitchInst *CreateCatchSwitch(Value *ParentPad, BasicBlock *UnwindBB,
                                     unsigned NumHandlers,
                                     const Twine &Name = "") {
    return Insert(CatchSwitchInst::Create(ParentPad, UnwindBB, NumHandlers),
                  Name);
  }

  CatchPadInst *CreateCatchPad(Value *ParentPad, ArrayRef<Value *> Args,
                               const Twine &Name = "") {
    return Insert(CatchPadInst::Create(ParentPad, Args), Name);
  }

  CleanupPadInst *CreateCleanupPad(Value *ParentPad,
                                   ArrayRef<Value *> Args = std::nullopt,
                                   const Twine &Name = "") {
    return Insert(CleanupPadInst::Create(ParentPad, Args), Name);
  }

  CatchReturnInst *CreateCatchRet(CatchPadInst *CatchPad, BasicBlock *BB) {
    return Insert(CatchReturnInst::Create(CatchPad, BB));
  }

  UnreachableInst *CreateUnreachable() {
    return Insert(new UnreachableInst(Context));
  }

  //===--------------------------------------------------------------------===//
  // Instruction creation methods: Binary Operators
  //===--------------------------------------------------------------------===//
private:
  BinaryOperator *CreateInsertNUWNSWBinOp(BinaryOperator::BinaryOps Opc,
                                          Value *LHS, Value *RHS,
                                          const Twine &Name,
                                          bool HasNUW, bool HasNSW) {
    BinaryOperator *BO = Insert(BinaryOperator::Create(Opc, LHS, RHS), Name);
    if (HasNUW) BO->setHasNoUnsignedWrap();
    if (HasNSW) BO->setHasNoSignedWrap();
    return BO;
  }

  Instruction *setFPAttrs(Instruction *I, MDNode *FPMD,
                          FastMathFlags FMF) const {
    if (!FPMD)
      FPMD = DefaultFPMathTag;
    if (FPMD)
      I->setMetadata(LLVMContext::MD_fpmath, FPMD);
    I->setFastMathFlags(FMF);
    return I;
  }

  Value *getConstrainedFPRounding(std::optional<RoundingMode> Rounding) {
    RoundingMode UseRounding = DefaultConstrainedRounding;

    if (Rounding)
      UseRounding = *Rounding;

    std::optional<StringRef> RoundingStr =
        convertRoundingModeToStr(UseRounding);
    assert(RoundingStr && "Garbage strict rounding mode!");
    auto *RoundingMDS = MDString::get(Context, *RoundingStr);

    return MetadataAsValue::get(Context, RoundingMDS);
  }

  Value *getConstrainedFPExcept(std::optional<fp::ExceptionBehavior> Except) {
    std::optional<StringRef> ExceptStr = convertExceptionBehaviorToStr(
        Except.value_or(DefaultConstrainedExcept));
    assert(ExceptStr && "Garbage strict exception behavior!");
    auto *ExceptMDS = MDString::get(Context, *ExceptStr);

    return MetadataAsValue::get(Context, ExceptMDS);
  }

  Value *getConstrainedFPPredicate(CmpInst::Predicate Predicate) {
    assert(CmpInst::isFPPredicate(Predicate) &&
           Predicate != CmpInst::FCMP_FALSE &&
           Predicate != CmpInst::FCMP_TRUE &&
           "Invalid constrained FP comparison predicate!");

    StringRef PredicateStr = CmpInst::getPredicateName(Predicate);
    auto *PredicateMDS = MDString::get(Context, PredicateStr);

    return MetadataAsValue::get(Context, PredicateMDS);
  }

public:
  Value *CreateAdd(Value *LHS, Value *RHS, const Twine &Name = "",
                   bool HasNUW = false, bool HasNSW = false) {
    if (Value *V =
            Folder.FoldNoWrapBinOp(Instruction::Add, LHS, RHS, HasNUW, HasNSW))
      return V;
    return CreateInsertNUWNSWBinOp(Instruction::Add, LHS, RHS, Name, HasNUW,
                                   HasNSW);
  }

  Value *CreateNSWAdd(Value *LHS, Value *RHS, const Twine &Name = "") {
    return CreateAdd(LHS, RHS, Name, false, true);
  }

  Value *CreateNUWAdd(Value *LHS, Value *RHS, const Twine &Name = "") {
    return CreateAdd(LHS, RHS, Name, true, false);
  }

  Value *CreateSub(Value *LHS, Value *RHS, const Twine &Name = "",
                   bool HasNUW = false, bool HasNSW = false) {
    if (Value *V =
            Folder.FoldNoWrapBinOp(Instruction::Sub, LHS, RHS, HasNUW, HasNSW))
      return V;
    return CreateInsertNUWNSWBinOp(Instruction::Sub, LHS, RHS, Name, HasNUW,
                                   HasNSW);
  }

  Value *CreateNSWSub(Value *LHS, Value *RHS, const Twine &Name = "") {
    return CreateSub(LHS, RHS, Name, false, true);
  }

  Value *CreateNUWSub(Value *LHS, Value *RHS, const Twine &Name = "") {
    return CreateSub(LHS, RHS, Name, true, false);
  }

  Value *CreateMul(Value *LHS, Value *RHS, const Twine &Name = "",
                   bool HasNUW = false, bool HasNSW = false) {
    if (Value *V =
            Folder.FoldNoWrapBinOp(Instruction::Mul, LHS, RHS, HasNUW, HasNSW))
      return V;
    return CreateInsertNUWNSWBinOp(Instruction::Mul, LHS, RHS, Name, HasNUW,
                                   HasNSW);
  }

  Value *CreateNSWMul(Value *LHS, Value *RHS, const Twine &Name = "") {
    return CreateMul(LHS, RHS, Name, false, true);
  }

  Value *CreateNUWMul(Value *LHS, Value *RHS, const Twine &Name = "") {
    return CreateMul(LHS, RHS, Name, true, false);
  }

  Value *CreateUDiv(Value *LHS, Value *RHS, const Twine &Name = "",
                    bool isExact = false) {
    if (Value *V = Folder.FoldExactBinOp(Instruction::UDiv, LHS, RHS, isExact))
      return V;
    if (!isExact)
      return Insert(BinaryOperator::CreateUDiv(LHS, RHS), Name);
    return Insert(BinaryOperator::CreateExactUDiv(LHS, RHS), Name);
  }

  Value *CreateExactUDiv(Value *LHS, Value *RHS, const Twine &Name = "") {
    return CreateUDiv(LHS, RHS, Name, true);
  }

  Value *CreateSDiv(Value *LHS, Value *RHS, const Twine &Name = "",
                    bool isExact = false) {
    if (Value *V = Folder.FoldExactBinOp(Instruction::SDiv, LHS, RHS, isExact))
      return V;
    if (!isExact)
      return Insert(BinaryOperator::CreateSDiv(LHS, RHS), Name);
    return Insert(BinaryOperator::CreateExactSDiv(LHS, RHS), Name);
  }

  Value *CreateExactSDiv(Value *LHS, Value *RHS, const Twine &Name = "") {
    return CreateSDiv(LHS, RHS, Name, true);
  }

  Value *CreateURem(Value *LHS, Value *RHS, const Twine &Name = "") {
    if (Value *V = Folder.FoldBinOp(Instruction::URem, LHS, RHS))
      return V;
    return Insert(BinaryOperator::CreateURem(LHS, RHS), Name);
  }

  Value *CreateSRem(Value *LHS, Value *RHS, const Twine &Name = "") {
    if (Value *V = Folder.FoldBinOp(Instruction::SRem, LHS, RHS))
      return V;
    return Insert(BinaryOperator::CreateSRem(LHS, RHS), Name);
  }

  Value *CreateShl(Value *LHS, Value *RHS, const Twine &Name = "",
                   bool HasNUW = false, bool HasNSW = false) {
    if (Value *V =
            Folder.FoldNoWrapBinOp(Instruction::Shl, LHS, RHS, HasNUW, HasNSW))
      return V;
    return CreateInsertNUWNSWBinOp(Instruction::Shl, LHS, RHS, Name,
                                   HasNUW, HasNSW);
  }

  Value *CreateShl(Value *LHS, const APInt &RHS, const Twine &Name = "",
                   bool HasNUW = false, bool HasNSW = false) {
    return CreateShl(LHS, ConstantInt::get(LHS->getType(), RHS), Name,
                     HasNUW, HasNSW);
  }

  Value *CreateShl(Value *LHS, uint64_t RHS, const Twine &Name = "",
                   bool HasNUW = false, bool HasNSW = false) {
    return CreateShl(LHS, ConstantInt::get(LHS->getType(), RHS), Name,
                     HasNUW, HasNSW);
  }

  Value *CreateLShr(Value *LHS, Value *RHS, const Twine &Name = "",
                    bool isExact = false) {
    if (Value *V = Folder.FoldExactBinOp(Instruction::LShr, LHS, RHS, isExact))
      return V;
    if (!isExact)
      return Insert(BinaryOperator::CreateLShr(LHS, RHS), Name);
    return Insert(BinaryOperator::CreateExactLShr(LHS, RHS), Name);
  }

  Value *CreateLShr(Value *LHS, const APInt &RHS, const Twine &Name = "",
                    bool isExact = false) {
    return CreateLShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
  }

  Value *CreateLShr(Value *LHS, uint64_t RHS, const Twine &Name = "",
                    bool isExact = false) {
    return CreateLShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
  }

  Value *CreateAShr(Value *LHS, Value *RHS, const Twine &Name = "",
                    bool isExact = false) {
    if (Value *V = Folder.FoldExactBinOp(Instruction::AShr, LHS, RHS, isExact))
      return V;
    if (!isExact)
      return Insert(BinaryOperator::CreateAShr(LHS, RHS), Name);
    return Insert(BinaryOperator::CreateExactAShr(LHS, RHS), Name);
  }

  Value *CreateAShr(Value *LHS, const APInt &RHS, const Twine &Name = "",
                    bool isExact = false) {
    return CreateAShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
  }

  Value *CreateAShr(Value *LHS, uint64_t RHS, const Twine &Name = "",
                    bool isExact = false) {
    return CreateAShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
  }

  Value *CreateAnd(Value *LHS, Value *RHS, const Twine &Name = "") {
    if (auto *V = Folder.FoldBinOp(Instruction::And, LHS, RHS))
      return V;
    return Insert(BinaryOperator::CreateAnd(LHS, RHS), Name);
  }

  Value *CreateAnd(Value *LHS, const APInt &RHS, const Twine &Name = "") {
    return CreateAnd(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
  }

  Value *CreateAnd(Value *LHS, uint64_t RHS, const Twine &Name = "") {
    return CreateAnd(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
  }

  Value *CreateAnd(ArrayRef<Value*> Ops) {
    assert(!Ops.empty());
    Value *Accum = Ops[0];
    for (unsigned i = 1; i < Ops.size(); i++)
      Accum = CreateAnd(Accum, Ops[i]);
    return Accum;
  }

  Value *CreateOr(Value *LHS, Value *RHS, const Twine &Name = "") {
    if (auto *V = Folder.FoldBinOp(Instruction::Or, LHS, RHS))
      return V;
    return Insert(BinaryOperator::CreateOr(LHS, RHS), Name);
  }

  Value *CreateOr(Value *LHS, const APInt &RHS, const Twine &Name = "") {
    return CreateOr(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
  }

  Value *CreateOr(Value *LHS, uint64_t RHS, const Twine &Name = "") {
    return CreateOr(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
  }

  Value *CreateOr(ArrayRef<Value*> Ops) {
    assert(!Ops.empty());
    Value *Accum = Ops[0];
    for (unsigned i = 1; i < Ops.size(); i++)
      Accum = CreateOr(Accum, Ops[i]);
    return Accum;
  }

  Value *CreateXor(Value *LHS, Value *RHS, const Twine &Name = "") {
    if (Value *V = Folder.FoldBinOp(Instruction::Xor, LHS, RHS))
      return V;
    return Insert(BinaryOperator::CreateXor(LHS, RHS), Name);
  }

  Value *CreateXor(Value *LHS, const APInt &RHS, const Twine &Name = "") {
    return CreateXor(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
  }

  Value *CreateXor(Value *LHS, uint64_t RHS, const Twine &Name = "") {
    return CreateXor(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
  }

  Value *CreateFAdd(Value *L, Value *R, const Twine &Name = "",
                    MDNode *FPMD = nullptr) {
    if (IsFPConstrained)
      return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fadd,
                                      L, R, nullptr, Name, FPMD);

    if (Value *V = Folder.FoldBinOpFMF(Instruction::FAdd, L, R, FMF))
      return V;
    Instruction *I = setFPAttrs(BinaryOperator::CreateFAdd(L, R), FPMD, FMF);
    return Insert(I, Name);
  }

  /// Copy fast-math-flags from an instruction rather than using the builder's
  /// default FMF.
  Value *CreateFAddFMF(Value *L, Value *R, Instruction *FMFSource,
                       const Twine &Name = "") {
    if (IsFPConstrained)
      return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fadd,
                                      L, R, FMFSource, Name);

    FastMathFlags FMF = FMFSource->getFastMathFlags();
    if (Value *V = Folder.FoldBinOpFMF(Instruction::FAdd, L, R, FMF))
      return V;
    Instruction *I = setFPAttrs(BinaryOperator::CreateFAdd(L, R), nullptr, FMF);
    return Insert(I, Name);
  }

  Value *CreateFSub(Value *L, Value *R, const Twine &Name = "",
                    MDNode *FPMD = nullptr) {
    if (IsFPConstrained)
      return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fsub,
                                      L, R, nullptr, Name, FPMD);

    if (Value *V = Folder.FoldBinOpFMF(Instruction::FSub, L, R, FMF))
      return V;
    Instruction *I = setFPAttrs(BinaryOperator::CreateFSub(L, R), FPMD, FMF);
    return Insert(I, Name);
  }

  /// Copy fast-math-flags from an instruction rather than using the builder's
  /// default FMF.
  Value *CreateFSubFMF(Value *L, Value *R, Instruction *FMFSource,
                       const Twine &Name = "") {
    if (IsFPConstrained)
      return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fsub,
                                      L, R, FMFSource, Name);

    FastMathFlags FMF = FMFSource->getFastMathFlags();
    if (Value *V = Folder.FoldBinOpFMF(Instruction::FSub, L, R, FMF))
      return V;
    Instruction *I = setFPAttrs(BinaryOperator::CreateFSub(L, R), nullptr, FMF);
    return Insert(I, Name);
  }

  Value *CreateFMul(Value *L, Value *R, const Twine &Name = "",
                    MDNode *FPMD = nullptr) {
    if (IsFPConstrained)
      return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fmul,
                                      L, R, nullptr, Name, FPMD);

    if (Value *V = Folder.FoldBinOpFMF(Instruction::FMul, L, R, FMF))
      return V;
    Instruction *I = setFPAttrs(BinaryOperator::CreateFMul(L, R), FPMD, FMF);
    return Insert(I, Name);
  }

  /// Copy fast-math-flags from an instruction rather than using the builder's
  /// default FMF.
  Value *CreateFMulFMF(Value *L, Value *R, Instruction *FMFSource,
                       const Twine &Name = "") {
    if (IsFPConstrained)
      return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fmul,
                                      L, R, FMFSource, Name);

    FastMathFlags FMF = FMFSource->getFastMathFlags();
    if (Value *V = Folder.FoldBinOpFMF(Instruction::FMul, L, R, FMF))
      return V;
    Instruction *I = setFPAttrs(BinaryOperator::CreateFMul(L, R), nullptr, FMF);
    return Insert(I, Name);
  }

  Value *CreateFDiv(Value *L, Value *R, const Twine &Name = "",
                    MDNode *FPMD = nullptr) {
    if (IsFPConstrained)
      return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fdiv,
                                      L, R, nullptr, Name, FPMD);

    if (Value *V = Folder.FoldBinOpFMF(Instruction::FDiv, L, R, FMF))
      return V;
    Instruction *I = setFPAttrs(BinaryOperator::CreateFDiv(L, R), FPMD, FMF);
    return Insert(I, Name);
  }

  /// Copy fast-math-flags from an instruction rather than using the builder's
  /// default FMF.
  Value *CreateFDivFMF(Value *L, Value *R, Instruction *FMFSource,
                       const Twine &Name = "") {
    if (IsFPConstrained)
      return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fdiv,
                                      L, R, FMFSource, Name);

    FastMathFlags FMF = FMFSource->getFastMathFlags();
    if (Value *V = Folder.FoldBinOpFMF(Instruction::FDiv, L, R, FMF))
      return V;
    Instruction *I = setFPAttrs(BinaryOperator::CreateFDiv(L, R), nullptr, FMF);
    return Insert(I, Name);
  }

  Value *CreateFRem(Value *L, Value *R, const Twine &Name = "",
                    MDNode *FPMD = nullptr) {
    if (IsFPConstrained)
      return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_frem,
                                      L, R, nullptr, Name, FPMD);

    if (Value *V = Folder.FoldBinOpFMF(Instruction::FRem, L, R, FMF)) return V;
    Instruction *I = setFPAttrs(BinaryOperator::CreateFRem(L, R), FPMD, FMF);
    return Insert(I, Name);
  }

  /// Copy fast-math-flags from an instruction rather than using the builder's
  /// default FMF.
  Value *CreateFRemFMF(Value *L, Value *R, Instruction *FMFSource,
                       const Twine &Name = "") {
    if (IsFPConstrained)
      return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_frem,
                                      L, R, FMFSource, Name);

    FastMathFlags FMF = FMFSource->getFastMathFlags();
    if (Value *V = Folder.FoldBinOpFMF(Instruction::FRem, L, R, FMF)) return V;
    Instruction *I = setFPAttrs(BinaryOperator::CreateFRem(L, R), nullptr, FMF);
    return Insert(I, Name);
  }

  Value *CreateBinOp(Instruction::BinaryOps Opc,
                     Value *LHS, Value *RHS, const Twine &Name = "",
                     MDNode *FPMathTag = nullptr) {
    if (Value *V = Folder.FoldBinOp(Opc, LHS, RHS)) return V;
    Instruction *BinOp = BinaryOperator::Create(Opc, LHS, RHS);
    if (isa<FPMathOperator>(BinOp))
      setFPAttrs(BinOp, FPMathTag, FMF);
    return Insert(BinOp, Name);
  }

  Value *CreateLogicalAnd(Value *Cond1, Value *Cond2, const Twine &Name = "") {
    assert(Cond2->getType()->isIntOrIntVectorTy(1));
    return CreateSelect(Cond1, Cond2,
                        ConstantInt::getNullValue(Cond2->getType()), Name);
  }

  Value *CreateLogicalOr(Value *Cond1, Value *Cond2, const Twine &Name = "") {
    assert(Cond2->getType()->isIntOrIntVectorTy(1));
    return CreateSelect(Cond1, ConstantInt::getAllOnesValue(Cond2->getType()),
                        Cond2, Name);
  }

  Value *CreateLogicalOp(Instruction::BinaryOps Opc, Value *Cond1, Value *Cond2,
                         const Twine &Name = "") {
    switch (Opc) {
    case Instruction::And:
      return CreateLogicalAnd(Cond1, Cond2, Name);
    case Instruction::Or:
      return CreateLogicalOr(Cond1, Cond2, Name);
    default:
      break;
    }
    llvm_unreachable("Not a logical operation.");
  }

  // NOTE: this is sequential, non-commutative, ordered reduction!
  Value *CreateLogicalOr(ArrayRef<Value *> Ops) {
    assert(!Ops.empty());
    Value *Accum = Ops[0];
    for (unsigned i = 1; i < Ops.size(); i++)
      Accum = CreateLogicalOr(Accum, Ops[i]);
    return Accum;
  }

  CallInst *CreateConstrainedFPBinOp(
      Intrinsic::ID ID, Value *L, Value *R, Instruction *FMFSource = nullptr,
      const Twine &Name = "", MDNode *FPMathTag = nullptr,
      std::optional<RoundingMode> Rounding = std::nullopt,
      std::optional<fp::ExceptionBehavior> Except = std::nullopt);

  CallInst *CreateConstrainedFPUnroundedBinOp(
      Intrinsic::ID ID, Value *L, Value *R, Instruction *FMFSource = nullptr,
      const Twine &Name = "", MDNode *FPMathTag = nullptr,
      std::optional<fp::ExceptionBehavior> Except = std::nullopt);

  Value *CreateNeg(Value *V, const Twine &Name = "", bool HasNUW = false,
                   bool HasNSW = false) {
    return CreateSub(Constant::getNullValue(V->getType()), V, Name, HasNUW,
                     HasNSW);
  }

  Value *CreateNSWNeg(Value *V, const Twine &Name = "") {
    return CreateNeg(V, Name, false, true);
  }

  Value *CreateNUWNeg(Value *V, const Twine &Name = "") {
    return CreateNeg(V, Name, true, false);
  }

  Value *CreateFNeg(Value *V, const Twine &Name = "",
                    MDNode *FPMathTag = nullptr) {
    if (Value *Res = Folder.FoldUnOpFMF(Instruction::FNeg, V, FMF))
      return Res;
    return Insert(setFPAttrs(UnaryOperator::CreateFNeg(V), FPMathTag, FMF),
                  Name);
  }

  /// Copy fast-math-flags from an instruction rather than using the builder's
  /// default FMF.
  Value *CreateFNegFMF(Value *V, Instruction *FMFSource,
                       const Twine &Name = "") {
   FastMathFlags FMF = FMFSource->getFastMathFlags();
    if (Value *Res = Folder.FoldUnOpFMF(Instruction::FNeg, V, FMF))
      return Res;
   return Insert(setFPAttrs(UnaryOperator::CreateFNeg(V), nullptr, FMF),
                 Name);
  }

  Value *CreateNot(Value *V, const Twine &Name = "") {
    return CreateXor(V, Constant::getAllOnesValue(V->getType()), Name);
  }

  Value *CreateUnOp(Instruction::UnaryOps Opc,
                    Value *V, const Twine &Name = "",
                    MDNode *FPMathTag = nullptr) {
    if (Value *Res = Folder.FoldUnOpFMF(Opc, V, FMF))
      return Res;
    Instruction *UnOp = UnaryOperator::Create(Opc, V);
    if (isa<FPMathOperator>(UnOp))
      setFPAttrs(UnOp, FPMathTag, FMF);
    return Insert(UnOp, Name);
  }

  /// Create either a UnaryOperator or BinaryOperator depending on \p Opc.
  /// Correct number of operands must be passed accordingly.
  Value *CreateNAryOp(unsigned Opc, ArrayRef<Value *> Ops,
                      const Twine &Name = "", MDNode *FPMathTag = nullptr);

  //===--------------------------------------------------------------------===//
  // Instruction creation methods: Memory Instructions
  //===--------------------------------------------------------------------===//

  AllocaInst *CreateAlloca(Type *Ty, unsigned AddrSpace,
                           Value *ArraySize = nullptr, const Twine &Name = "") {
    const DataLayout &DL = BB->getModule()->getDataLayout();
    Align AllocaAlign = DL.getPrefTypeAlign(Ty);
    return Insert(new AllocaInst(Ty, AddrSpace, ArraySize, AllocaAlign), Name);
  }

  AllocaInst *CreateAlloca(Type *Ty, Value *ArraySize = nullptr,
                           const Twine &Name = "") {
    const DataLayout &DL = BB->getModule()->getDataLayout();
    Align AllocaAlign = DL.getPrefTypeAlign(Ty);
    unsigned AddrSpace = DL.getAllocaAddrSpace();
    return Insert(new AllocaInst(Ty, AddrSpace, ArraySize, AllocaAlign), Name);
  }

  /// Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of
  /// converting the string to 'bool' for the isVolatile parameter.
  LoadInst *CreateLoad(Type *Ty, Value *Ptr, const char *Name) {
    return CreateAlignedLoad(Ty, Ptr, MaybeAlign(), Name);
  }

  LoadInst *CreateLoad(Type *Ty, Value *Ptr, const Twine &Name = "") {
    return CreateAlignedLoad(Ty, Ptr, MaybeAlign(), Name);
  }

  LoadInst *CreateLoad(Type *Ty, Value *Ptr, bool isVolatile,
                       const Twine &Name = "") {
    return CreateAlignedLoad(Ty, Ptr, MaybeAlign(), isVolatile, Name);
  }

  StoreInst *CreateStore(Value *Val, Value *Ptr, bool isVolatile = false) {
    return CreateAlignedStore(Val, Ptr, MaybeAlign(), isVolatile);
  }

  LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align,
                              const char *Name) {
    return CreateAlignedLoad(Ty, Ptr, Align, /*isVolatile*/false, Name);
  }

  LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align,
                              const Twine &Name = "") {
    return CreateAlignedLoad(Ty, Ptr, Align, /*isVolatile*/false, Name);
  }

  LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align,
                              bool isVolatile, const Twine &Name = "") {
    if (!Align) {
      const DataLayout &DL = BB->getModule()->getDataLayout();
      Align = DL.getABITypeAlign(Ty);
    }
    return Insert(new LoadInst(Ty, Ptr, Twine(), isVolatile, *Align), Name);
  }

  StoreInst *CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align,
                                bool isVolatile = false) {
    if (!Align) {
      const DataLayout &DL = BB->getModule()->getDataLayout();
      Align = DL.getABITypeAlign(Val->getType());
    }
    return Insert(new StoreInst(Val, Ptr, isVolatile, *Align));
  }
  FenceInst *CreateFence(AtomicOrdering Ordering,
                         SyncScope::ID SSID = SyncScope::System,
                         const Twine &Name = "") {
    return Insert(new FenceInst(Context, Ordering, SSID), Name);
  }

  AtomicCmpXchgInst *
  CreateAtomicCmpXchg(Value *Ptr, Value *Cmp, Value *New, MaybeAlign Align,
                      AtomicOrdering SuccessOrdering,
                      AtomicOrdering FailureOrdering,
                      SyncScope::ID SSID = SyncScope::System) {
    if (!Align) {
      const DataLayout &DL = BB->getModule()->getDataLayout();
      Align = llvm::Align(DL.getTypeStoreSize(New->getType()));
    }

    return Insert(new AtomicCmpXchgInst(Ptr, Cmp, New, *Align, SuccessOrdering,
                                        FailureOrdering, SSID));
  }

  AtomicRMWInst *CreateAtomicRMW(AtomicRMWInst::BinOp Op, Value *Ptr,
                                 Value *Val, MaybeAlign Align,
                                 AtomicOrdering Ordering,
                                 SyncScope::ID SSID = SyncScope::System) {
    if (!Align) {
      const DataLayout &DL = BB->getModule()->getDataLayout();
      Align = llvm::Align(DL.getTypeStoreSize(Val->getType()));
    }

    return Insert(new AtomicRMWInst(Op, Ptr, Val, *Align, Ordering, SSID));
  }

  Value *CreateGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList,
                   const Twine &Name = "", bool IsInBounds = false) {
    if (auto *V = Folder.FoldGEP(Ty, Ptr, IdxList, IsInBounds))
      return V;
    return Insert(IsInBounds
                      ? GetElementPtrInst::CreateInBounds(Ty, Ptr, IdxList)
                      : GetElementPtrInst::Create(Ty, Ptr, IdxList),
                  Name);
  }

  Value *CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList,
                           const Twine &Name = "") {
    return CreateGEP(Ty, Ptr, IdxList, Name, /* IsInBounds */ true);
  }

  Value *CreateConstGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0,
                            const Twine &Name = "") {
    Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), Idx0);

    if (auto *V = Folder.FoldGEP(Ty, Ptr, Idx, /*IsInBounds=*/false))
      return V;

    return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name);
  }

  Value *CreateConstInBoundsGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0,
                                    const Twine &Name = "") {
    Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), Idx0);

    if (auto *V = Folder.FoldGEP(Ty, Ptr, Idx, /*IsInBounds=*/true))
      return V;

    return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name);
  }

  Value *CreateConstGEP2_32(Type *Ty, Value *Ptr, unsigned Idx0, unsigned Idx1,
                            const Twine &Name = "") {
    Value *Idxs[] = {
      ConstantInt::get(Type::getInt32Ty(Context), Idx0),
      ConstantInt::get(Type::getInt32Ty(Context), Idx1)
    };

    if (auto *V = Folder.FoldGEP(Ty, Ptr, Idxs, /*IsInBounds=*/false))
      return V;

    return Insert(GetElementPtrInst::Create(Ty, Ptr, Idxs), Name);
  }

  Value *CreateConstInBoundsGEP2_32(Type *Ty, Value *Ptr, unsigned Idx0,
                                    unsigned Idx1, const Twine &Name = "") {
    Value *Idxs[] = {
      ConstantInt::get(Type::getInt32Ty(Context), Idx0),
      ConstantInt::get(Type::getInt32Ty(Context), Idx1)
    };

    if (auto *V = Folder.FoldGEP(Ty, Ptr, Idxs, /*IsInBounds=*/true))
      return V;

    return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idxs), Name);
  }

  Value *CreateConstGEP1_64(Type *Ty, Value *Ptr, uint64_t Idx0,
                            const Twine &Name = "") {
    Value *Idx = ConstantInt::get(Type::getInt64Ty(Context), Idx0);

    if (auto *V = Folder.FoldGEP(Ty, Ptr, Idx, /*IsInBounds=*/false))
      return V;

    return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name);
  }

  Value *CreateConstInBoundsGEP1_64(Type *Ty, Value *Ptr, uint64_t Idx0,
                                    const Twine &Name = "") {
    Value *Idx = ConstantInt::get(Type::getInt64Ty(Context), Idx0);

    if (auto *V = Folder.FoldGEP(Ty, Ptr, Idx, /*IsInBounds=*/true))
      return V;

    return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name);
  }

  Value *CreateConstGEP2_64(Type *Ty, Value *Ptr, uint64_t Idx0, uint64_t Idx1,
                            const Twine &Name = "") {
    Value *Idxs[] = {
      ConstantInt::get(Type::getInt64Ty(Context), Idx0),
      ConstantInt::get(Type::getInt64Ty(Context), Idx1)
    };

    if (auto *V = Folder.FoldGEP(Ty, Ptr, Idxs, /*IsInBounds=*/false))
      return V;

    return Insert(GetElementPtrInst::Create(Ty, Ptr, Idxs), Name);
  }

  Value *CreateConstInBoundsGEP2_64(Type *Ty, Value *Ptr, uint64_t Idx0,
                                    uint64_t Idx1, const Twine &Name = "") {
    Value *Idxs[] = {
      ConstantInt::get(Type::getInt64Ty(Context), Idx0),
      ConstantInt::get(Type::getInt64Ty(Context), Idx1)
    };

    if (auto *V = Folder.FoldGEP(Ty, Ptr, Idxs, /*IsInBounds=*/true))
      return V;

    return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idxs), Name);
  }

  Value *CreateStructGEP(Type *Ty, Value *Ptr, unsigned Idx,
                         const Twine &Name = "") {
    return CreateConstInBoundsGEP2_32(Ty, Ptr, 0, Idx, Name);
  }

  /// Same as CreateGlobalString, but return a pointer with "i8*" type
  /// instead of a pointer to array of i8.
  ///
  /// If no module is given via \p M, it is take from the insertion point basic
  /// block.
  Constant *CreateGlobalStringPtr(StringRef Str, const Twine &Name = "",
                                  unsigned AddressSpace = 0,
                                  Module *M = nullptr) {
    GlobalVariable *GV = CreateGlobalString(Str, Name, AddressSpace, M);
    Constant *Zero = ConstantInt::get(Type::getInt32Ty(Context), 0);
    Constant *Indices[] = {Zero, Zero};
    return ConstantExpr::getInBoundsGetElementPtr(GV->getValueType(), GV,
                                                  Indices);
  }

  //===--------------------------------------------------------------------===//
  // Instruction creation methods: Cast/Conversion Operators
  //===--------------------------------------------------------------------===//

  Value *CreateTrunc(Value *V, Type *DestTy, const Twine &Name = "") {
    return CreateCast(Instruction::Trunc, V, DestTy, Name);
  }

  Value *CreateZExt(Value *V, Type *DestTy, const Twine &Name = "") {
    return CreateCast(Instruction::ZExt, V, DestTy, Name);
  }

  Value *CreateSExt(Value *V, Type *DestTy, const Twine &Name = "") {
    return CreateCast(Instruction::SExt, V, DestTy, Name);
  }

  /// Create a ZExt or Trunc from the integer value V to DestTy. Return
  /// the value untouched if the type of V is already DestTy.
  Value *CreateZExtOrTrunc(Value *V, Type *DestTy,
                           const Twine &Name = "") {
    assert(V->getType()->isIntOrIntVectorTy() &&
           DestTy->isIntOrIntVectorTy() &&
           "Can only zero extend/truncate integers!");
    Type *VTy = V->getType();
    if (VTy->getScalarSizeInBits() < DestTy->getScalarSizeInBits())
      return CreateZExt(V, DestTy, Name);
    if (VTy->getScalarSizeInBits() > DestTy->getScalarSizeInBits())
      return CreateTrunc(V, DestTy, Name);
    return V;
  }

  /// Create a SExt or Trunc from the integer value V to DestTy. Return
  /// the value untouched if the type of V is already DestTy.
  Value *CreateSExtOrTrunc(Value *V, Type *DestTy,
                           const Twine &Name = "") {
    assert(V->getType()->isIntOrIntVectorTy() &&
           DestTy->isIntOrIntVectorTy() &&
           "Can only sign extend/truncate integers!");
    Type *VTy = V->getType();
    if (VTy->getScalarSizeInBits() < DestTy->getScalarSizeInBits())
      return CreateSExt(V, DestTy, Name);
    if (VTy->getScalarSizeInBits() > DestTy->getScalarSizeInBits())
      return CreateTrunc(V, DestTy, Name);
    return V;
  }

  Value *CreateFPToUI(Value *V, Type *DestTy, const Twine &Name = "") {
    if (IsFPConstrained)
      return CreateConstrainedFPCast(Intrinsic::experimental_constrained_fptoui,
                                     V, DestTy, nullptr, Name);
    return CreateCast(Instruction::FPToUI, V, DestTy, Name);
  }

  Value *CreateFPToSI(Value *V, Type *DestTy, const Twine &Name = "") {
    if (IsFPConstrained)
      return CreateConstrainedFPCast(Intrinsic::experimental_constrained_fptosi,
                                     V, DestTy, nullptr, Name);
    return CreateCast(Instruction::FPToSI, V, DestTy, Name);
  }

  Value *CreateUIToFP(Value *V, Type *DestTy, const Twine &Name = ""){
    if (IsFPConstrained)
      return CreateConstrainedFPCast(Intrinsic::experimental_constrained_uitofp,
                                     V, DestTy, nullptr, Name);
    return CreateCast(Instruction::UIToFP, V, DestTy, Name);
  }

  Value *CreateSIToFP(Value *V, Type *DestTy, const Twine &Name = ""){
    if (IsFPConstrained)
      return CreateConstrainedFPCast(Intrinsic::experimental_constrained_sitofp,
                                     V, DestTy, nullptr, Name);
    return CreateCast(Instruction::SIToFP, V, DestTy, Name);
  }

  Value *CreateFPTrunc(Value *V, Type *DestTy,
                       const Twine &Name = "") {
    if (IsFPConstrained)
      return CreateConstrainedFPCast(
          Intrinsic::experimental_constrained_fptrunc, V, DestTy, nullptr,
          Name);
    return CreateCast(Instruction::FPTrunc, V, DestTy, Name);
  }

  Value *CreateFPExt(Value *V, Type *DestTy, const Twine &Name = "") {
    if (IsFPConstrained)
      return CreateConstrainedFPCast(Intrinsic::experimental_constrained_fpext,
                                     V, DestTy, nullptr, Name);
    return CreateCast(Instruction::FPExt, V, DestTy, Name);
  }

  Value *CreatePtrToInt(Value *V, Type *DestTy,
                        const Twine &Name = "") {
    return CreateCast(Instruction::PtrToInt, V, DestTy, Name);
  }

  Value *CreateIntToPtr(Value *V, Type *DestTy,
                        const Twine &Name = "") {
    return CreateCast(Instruction::IntToPtr, V, DestTy, Name);
  }

  Value *CreateBitCast(Value *V, Type *DestTy,
                       const Twine &Name = "") {
    return CreateCast(Instruction::BitCast, V, DestTy, Name);
  }

  Value *CreateAddrSpaceCast(Value *V, Type *DestTy,
                             const Twine &Name = "") {
    return CreateCast(Instruction::AddrSpaceCast, V, DestTy, Name);
  }

  Value *CreateZExtOrBitCast(Value *V, Type *DestTy,
                             const Twine &Name = "") {
    if (V->getType() == DestTy)
      return V;
    if (auto *VC = dyn_cast<Constant>(V))
      return Insert(Folder.CreateZExtOrBitCast(VC, DestTy), Name);
    return Insert(CastInst::CreateZExtOrBitCast(V, DestTy), Name);
  }

  Value *CreateSExtOrBitCast(Value *V, Type *DestTy,
                             const Twine &Name = "") {
    if (V->getType() == DestTy)
      return V;
    if (auto *VC = dyn_cast<Constant>(V))
      return Insert(Folder.CreateSExtOrBitCast(VC, DestTy), Name);
    return Insert(CastInst::CreateSExtOrBitCast(V, DestTy), Name);
  }

  Value *CreateTruncOrBitCast(Value *V, Type *DestTy,
                              const Twine &Name = "") {
    if (V->getType() == DestTy)
      return V;
    if (auto *VC = dyn_cast<Constant>(V))
      return Insert(Folder.CreateTruncOrBitCast(VC, DestTy), Name);
    return Insert(CastInst::CreateTruncOrBitCast(V, DestTy), Name);
  }

  Value *CreateCast(Instruction::CastOps Op, Value *V, Type *DestTy,
                    const Twine &Name = "") {
    if (V->getType() == DestTy)
      return V;
    if (auto *VC = dyn_cast<Constant>(V))
      return Insert(Folder.CreateCast(Op, VC, DestTy), Name);
    return Insert(CastInst::Create(Op, V, DestTy), Name);
  }

  Value *CreatePointerCast(Value *V, Type *DestTy,
                           const Twine &Name = "") {
    if (V->getType() == DestTy)
      return V;
    if (auto *VC = dyn_cast<Constant>(V))
      return Insert(Folder.CreatePointerCast(VC, DestTy), Name);
    return Insert(CastInst::CreatePointerCast(V, DestTy), Name);
  }

  Value *CreatePointerBitCastOrAddrSpaceCast(Value *V, Type *DestTy,
                                             const Twine &Name = "") {
    if (V->getType() == DestTy)
      return V;

    if (auto *VC = dyn_cast<Constant>(V)) {
      return Insert(Folder.CreatePointerBitCastOrAddrSpaceCast(VC, DestTy),
                    Name);
    }

    return Insert(CastInst::CreatePointerBitCastOrAddrSpaceCast(V, DestTy),
                  Name);
  }

  Value *CreateIntCast(Value *V, Type *DestTy, bool isSigned,
                       const Twine &Name = "") {
    if (V->getType() == DestTy)
      return V;
    if (auto *VC = dyn_cast<Constant>(V))
      return Insert(Folder.CreateIntCast(VC, DestTy, isSigned), Name);
    return Insert(CastInst::CreateIntegerCast(V, DestTy, isSigned), Name);
  }

  Value *CreateBitOrPointerCast(Value *V, Type *DestTy,
                                const Twine &Name = "") {
    if (V->getType() == DestTy)
      return V;
    if (V->getType()->isPtrOrPtrVectorTy() && DestTy->isIntOrIntVectorTy())
      return CreatePtrToInt(V, DestTy, Name);
    if (V->getType()->isIntOrIntVectorTy() && DestTy->isPtrOrPtrVectorTy())
      return CreateIntToPtr(V, DestTy, Name);

    return CreateBitCast(V, DestTy, Name);
  }

  Value *CreateFPCast(Value *V, Type *DestTy, const Twine &Name = "") {
    if (V->getType() == DestTy)
      return V;
    if (auto *VC = dyn_cast<Constant>(V))
      return Insert(Folder.CreateFPCast(VC, DestTy), Name);
    return Insert(CastInst::CreateFPCast(V, DestTy), Name);
  }

  CallInst *CreateConstrainedFPCast(
      Intrinsic::ID ID, Value *V, Type *DestTy,
      Instruction *FMFSource = nullptr, const Twine &Name = "",
      MDNode *FPMathTag = nullptr,
      std::optional<RoundingMode> Rounding = std::nullopt,
      std::optional<fp::ExceptionBehavior> Except = std::nullopt);

  // Provided to resolve 'CreateIntCast(Ptr, Ptr, "...")', giving a
  // compile time error, instead of converting the string to bool for the
  // isSigned parameter.
  Value *CreateIntCast(Value *, Type *, const char *) = delete;

  //===--------------------------------------------------------------------===//
  // Instruction creation methods: Compare Instructions
  //===--------------------------------------------------------------------===//

  Value *CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name = "") {
    return CreateICmp(ICmpInst::ICMP_EQ, LHS, RHS, Name);
  }

  Value *CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name = "") {
    return CreateICmp(ICmpInst::ICMP_NE, LHS, RHS, Name);
  }

  Value *CreateICmpUGT(Value *LHS, Value *RHS, const Twine &Name = "") {
    return CreateICmp(ICmpInst::ICMP_UGT, LHS, RHS, Name);
  }

  Value *CreateICmpUGE(Value *LHS, Value *RHS, const Twine &Name = "") {
    return CreateICmp(ICmpInst::ICMP_UGE, LHS, RHS, Name);
  }

  Value *CreateICmpULT(Value *LHS, Value *RHS, const Twine &Name = "") {
    return CreateICmp(ICmpInst::ICMP_ULT, LHS, RHS, Name);
  }

  Value *CreateICmpULE(Value *LHS, Value *RHS, const Twine &Name = "") {
    return CreateICmp(ICmpInst::ICMP_ULE, LHS, RHS, Name);
  }

  Value *CreateICmpSGT(Value *LHS, Value *RHS, const Twine &Name = "") {
    return CreateICmp(ICmpInst::ICMP_SGT, LHS, RHS, Name);
  }

  Value *CreateICmpSGE(Value *LHS, Value *RHS, const Twine &Name = "") {
    return CreateICmp(ICmpInst::ICMP_SGE, LHS, RHS, Name);
  }

  Value *CreateICmpSLT(Value *LHS, Value *RHS, const Twine &Name = "") {
    return CreateICmp(ICmpInst::ICMP_SLT, LHS, RHS, Name);
  }

  Value *CreateICmpSLE(Value *LHS, Value *RHS, const Twine &Name = "") {
    return CreateICmp(ICmpInst::ICMP_SLE, LHS, RHS, Name);
  }

  Value *CreateFCmpOEQ(Value *LHS, Value *RHS, const Twine &Name = "",
                       MDNode *FPMathTag = nullptr) {
    return CreateFCmp(FCmpInst::FCMP_OEQ, LHS, RHS, Name, FPMathTag);
  }

  Value *CreateFCmpOGT(Value *LHS, Value *RHS, const Twine &Name = "",
                       MDNode *FPMathTag = nullptr) {
    return CreateFCmp(FCmpInst::FCMP_OGT, LHS, RHS, Name, FPMathTag);
  }

  Value *CreateFCmpOGE(Value *LHS, Value *RHS, const Twine &Name = "",
                       MDNode *FPMathTag = nullptr) {
    return CreateFCmp(FCmpInst::FCMP_OGE, LHS, RHS, Name, FPMathTag);
  }

  Value *CreateFCmpOLT(Value *LHS, Value *RHS, const Twine &Name = "",
                       MDNode *FPMathTag = nullptr) {
    return CreateFCmp(FCmpInst::FCMP_OLT, LHS, RHS, Name, FPMathTag);
  }

  Value *CreateFCmpOLE(Value *LHS, Value *RHS, const Twine &Name = "",
                       MDNode *FPMathTag = nullptr) {
    return CreateFCmp(FCmpInst::FCMP_OLE, LHS, RHS, Name, FPMathTag);
  }

  Value *CreateFCmpONE(Value *LHS, Value *RHS, const Twine &Name = "",
                       MDNode *FPMathTag = nullptr) {
    return CreateFCmp(FCmpInst::FCMP_ONE, LHS, RHS, Name, FPMathTag);
  }

  Value *CreateFCmpORD(Value *LHS, Value *RHS, const Twine &Name = "",
                       MDNode *FPMathTag = nullptr) {
    return CreateFCmp(FCmpInst::FCMP_ORD, LHS, RHS, Name, FPMathTag);
  }

  Value *CreateFCmpUNO(Value *LHS, Value *RHS, const Twine &Name = "",
                       MDNode *FPMathTag = nullptr) {
    return CreateFCmp(FCmpInst::FCMP_UNO, LHS, RHS, Name, FPMathTag);
  }

  Value *CreateFCmpUEQ(Value *LHS, Value *RHS, const Twine &Name = "",
                       MDNode *FPMathTag = nullptr) {
    return CreateFCmp(FCmpInst::FCMP_UEQ, LHS, RHS, Name, FPMathTag);
  }

  Value *CreateFCmpUGT(Value *LHS, Value *RHS, const Twine &Name = "",
                       MDNode *FPMathTag = nullptr) {
    return CreateFCmp(FCmpInst::FCMP_UGT, LHS, RHS, Name, FPMathTag);
  }

  Value *CreateFCmpUGE(Value *LHS, Value *RHS, const Twine &Name = "",
                       MDNode *FPMathTag = nullptr) {
    return CreateFCmp(FCmpInst::FCMP_UGE, LHS, RHS, Name, FPMathTag);
  }

  Value *CreateFCmpULT(Value *LHS, Value *RHS, const Twine &Name = "",
                       MDNode *FPMathTag = nullptr) {
    return CreateFCmp(FCmpInst::FCMP_ULT, LHS, RHS, Name, FPMathTag);
  }

  Value *CreateFCmpULE(Value *LHS, Value *RHS, const Twine &Name = "",
                       MDNode *FPMathTag = nullptr) {
    return CreateFCmp(FCmpInst::FCMP_ULE, LHS, RHS, Name, FPMathTag);
  }

  Value *CreateFCmpUNE(Value *LHS, Value *RHS, const Twine &Name = "",
                       MDNode *FPMathTag = nullptr) {
    return CreateFCmp(FCmpInst::FCMP_UNE, LHS, RHS, Name, FPMathTag);
  }

  Value *CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS,
                    const Twine &Name = "") {
    if (auto *V = Folder.FoldICmp(P, LHS, RHS))
      return V;
    return Insert(new ICmpInst(P, LHS, RHS), Name);
  }

  // Create a quiet floating-point comparison (i.e. one that raises an FP
  // exception only in the case where an input is a signaling NaN).
  // Note that this differs from CreateFCmpS only if IsFPConstrained is true.
  Value *CreateFCmp(CmpInst::Predicate P, Value *LHS, Value *RHS,
                    const Twine &Name = "", MDNode *FPMathTag = nullptr) {
    return CreateFCmpHelper(P, LHS, RHS, Name, FPMathTag, false);
  }

  Value *CreateCmp(CmpInst::Predicate Pred, Value *LHS, Value *RHS,
                   const Twine &Name = "", MDNode *FPMathTag = nullptr) {
    return CmpInst::isFPPredicate(Pred)
               ? CreateFCmp(Pred, LHS, RHS, Name, FPMathTag)
               : CreateICmp(Pred, LHS, RHS, Name);
  }

  // Create a signaling floating-point comparison (i.e. one that raises an FP
  // exception whenever an input is any NaN, signaling or quiet).
  // Note that this differs from CreateFCmp only if IsFPConstrained is true.
  Value *CreateFCmpS(CmpInst::Predicate P, Value *LHS, Value *RHS,
                     const Twine &Name = "", MDNode *FPMathTag = nullptr) {
    return CreateFCmpHelper(P, LHS, RHS, Name, FPMathTag, true);
  }

private:
  // Helper routine to create either a signaling or a quiet FP comparison.
  Value *CreateFCmpHelper(CmpInst::Predicate P, Value *LHS, Value *RHS,
                          const Twine &Name, MDNode *FPMathTag,
                          bool IsSignaling);

public:
  CallInst *CreateConstrainedFPCmp(
      Intrinsic::ID ID, CmpInst::Predicate P, Value *L, Value *R,
      const Twine &Name = "",
      std::optional<fp::ExceptionBehavior> Except = std::nullopt);

  //===--------------------------------------------------------------------===//
  // Instruction creation methods: Other Instructions
  //===--------------------------------------------------------------------===//

  PHINode *CreatePHI(Type *Ty, unsigned NumReservedValues,
                     const Twine &Name = "") {
    PHINode *Phi = PHINode::Create(Ty, NumReservedValues);
    if (isa<FPMathOperator>(Phi))
      setFPAttrs(Phi, nullptr /* MDNode* */, FMF);
    return Insert(Phi, Name);
  }

private:
  CallInst *createCallHelper(Function *Callee, ArrayRef<Value *> Ops,
                             const Twine &Name = "",
                             Instruction *FMFSource = nullptr,
                             ArrayRef<OperandBundleDef> OpBundles = {});

public:
  CallInst *CreateCall(FunctionType *FTy, Value *Callee,
                       ArrayRef<Value *> Args = std::nullopt,
                       const Twine &Name = "", MDNode *FPMathTag = nullptr) {
    CallInst *CI = CallInst::Create(FTy, Callee, Args, DefaultOperandBundles);
    if (IsFPConstrained)
      setConstrainedFPCallAttr(CI);
    if (isa<FPMathOperator>(CI))
      setFPAttrs(CI, FPMathTag, FMF);
    return Insert(CI, Name);
  }

  CallInst *CreateCall(FunctionType *FTy, Value *Callee, ArrayRef<Value *> Args,
                       ArrayRef<OperandBundleDef> OpBundles,
                       const Twine &Name = "", MDNode *FPMathTag = nullptr) {
    CallInst *CI = CallInst::Create(FTy, Callee, Args, OpBundles);
    if (IsFPConstrained)
      setConstrainedFPCallAttr(CI);
    if (isa<FPMathOperator>(CI))
      setFPAttrs(CI, FPMathTag, FMF);
    return Insert(CI, Name);
  }

  CallInst *CreateCall(FunctionCallee Callee,
                       ArrayRef<Value *> Args = std::nullopt,
                       const Twine &Name = "", MDNode *FPMathTag = nullptr) {
    return CreateCall(Callee.getFunctionType(), Callee.getCallee(), Args, Name,
                      FPMathTag);
  }

  CallInst *CreateCall(FunctionCallee Callee, ArrayRef<Value *> Args,
                       ArrayRef<OperandBundleDef> OpBundles,
                       const Twine &Name = "", MDNode *FPMathTag = nullptr) {
    return CreateCall(Callee.getFunctionType(), Callee.getCallee(), Args,
                      OpBundles, Name, FPMathTag);
  }

  CallInst *CreateConstrainedFPCall(
      Function *Callee, ArrayRef<Value *> Args, const Twine &Name = "",
      std::optional<RoundingMode> Rounding = std::nullopt,
      std::optional<fp::ExceptionBehavior> Except = std::nullopt);

  Value *CreateSelect(Value *C, Value *True, Value *False,
                      const Twine &Name = "", Instruction *MDFrom = nullptr);

  VAArgInst *CreateVAArg(Value *List, Type *Ty, const Twine &Name = "") {
    return Insert(new VAArgInst(List, Ty), Name);
  }

  Value *CreateExtractElement(Value *Vec, Value *Idx,
                              const Twine &Name = "") {
    if (Value *V = Folder.FoldExtractElement(Vec, Idx))
      return V;
    return Insert(ExtractElementInst::Create(Vec, Idx), Name);
  }

  Value *CreateExtractElement(Value *Vec, uint64_t Idx,
                              const Twine &Name = "") {
    return CreateExtractElement(Vec, getInt64(Idx), Name);
  }

  Value *CreateInsertElement(Type *VecTy, Value *NewElt, Value *Idx,
                             const Twine &Name = "") {
    return CreateInsertElement(PoisonValue::get(VecTy), NewElt, Idx, Name);
  }

  Value *CreateInsertElement(Type *VecTy, Value *NewElt, uint64_t Idx,
                             const Twine &Name = "") {
    return CreateInsertElement(PoisonValue::get(VecTy), NewElt, Idx, Name);
  }

  Value *CreateInsertElement(Value *Vec, Value *NewElt, Value *Idx,
                             const Twine &Name = "") {
    if (Value *V = Folder.FoldInsertElement(Vec, NewElt, Idx))
      return V;
    return Insert(InsertElementInst::Create(Vec, NewElt, Idx), Name);
  }

  Value *CreateInsertElement(Value *Vec, Value *NewElt, uint64_t Idx,
                             const Twine &Name = "") {
    return CreateInsertElement(Vec, NewElt, getInt64(Idx), Name);
  }

  Value *CreateShuffleVector(Value *V1, Value *V2, Value *Mask,
                             const Twine &Name = "") {
    SmallVector<int, 16> IntMask;
    ShuffleVectorInst::getShuffleMask(cast<Constant>(Mask), IntMask);
    return CreateShuffleVector(V1, V2, IntMask, Name);
  }

  /// See class ShuffleVectorInst for a description of the mask representation.
  Value *CreateShuffleVector(Value *V1, Value *V2, ArrayRef<int> Mask,
                             const Twine &Name = "") {
    if (Value *V = Folder.FoldShuffleVector(V1, V2, Mask))
      return V;
    return Insert(new ShuffleVectorInst(V1, V2, Mask), Name);
  }

  /// Create a unary shuffle. The second vector operand of the IR instruction
  /// is poison.
  Value *CreateShuffleVector(Value *V, ArrayRef<int> Mask,
                             const Twine &Name = "") {
    return CreateShuffleVector(V, PoisonValue::get(V->getType()), Mask, Name);
  }

  Value *CreateExtractValue(Value *Agg, ArrayRef<unsigned> Idxs,
                            const Twine &Name = "") {
    if (auto *V = Folder.FoldExtractValue(Agg, Idxs))
      return V;
    return Insert(ExtractValueInst::Create(Agg, Idxs), Name);
  }

  Value *CreateInsertValue(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
                           const Twine &Name = "") {
    if (auto *V = Folder.FoldInsertValue(Agg, Val, Idxs))
      return V;
    return Insert(InsertValueInst::Create(Agg, Val, Idxs), Name);
  }

  LandingPadInst *CreateLandingPad(Type *Ty, unsigned NumClauses,
                                   const Twine &Name = "") {
    return Insert(LandingPadInst::Create(Ty, NumClauses), Name);
  }

  Value *CreateFreeze(Value *V, const Twine &Name = "") {
    return Insert(new FreezeInst(V), Name);
  }

  //===--------------------------------------------------------------------===//
  // Utility creation methods
  //===--------------------------------------------------------------------===//

  /// Return a boolean value testing if \p Arg == 0.
  Value *CreateIsNull(Value *Arg, const Twine &Name = "") {
    return CreateICmpEQ(Arg, Constant::getNullValue(Arg->getType()), Name);
  }

  /// Return a boolean value testing if \p Arg != 0.
  Value *CreateIsNotNull(Value *Arg, const Twine &Name = "") {
    return CreateICmpNE(Arg, Constant::getNullValue(Arg->getType()), Name);
  }

  /// Return a boolean value testing if \p Arg < 0.
  Value *CreateIsNeg(Value *Arg, const Twine &Name = "") {
    return CreateICmpSLT(Arg, ConstantInt::getNullValue(Arg->getType()), Name);
  }

  /// Return a boolean value testing if \p Arg > -1.
  Value *CreateIsNotNeg(Value *Arg, const Twine &Name = "") {
    return CreateICmpSGT(Arg, ConstantInt::getAllOnesValue(Arg->getType()),
                         Name);
  }

  /// Return the i64 difference between two pointer values, dividing out
  /// the size of the pointed-to objects.
  ///
  /// This is intended to implement C-style pointer subtraction. As such, the
  /// pointers must be appropriately aligned for their element types and
  /// pointing into the same object.
  Value *CreatePtrDiff(Type *ElemTy, Value *LHS, Value *RHS,
                       const Twine &Name = "");

  /// Create a launder.invariant.group intrinsic call. If Ptr type is
  /// different from pointer to i8, it's casted to pointer to i8 in the same
  /// address space before call and casted back to Ptr type after call.
  Value *CreateLaunderInvariantGroup(Value *Ptr);

  /// \brief Create a strip.invariant.group intrinsic call. If Ptr type is
  /// different from pointer to i8, it's casted to pointer to i8 in the same
  /// address space before call and casted back to Ptr type after call.
  Value *CreateStripInvariantGroup(Value *Ptr);

  /// Return a vector value that contains the vector V reversed
  Value *CreateVectorReverse(Value *V, const Twine &Name = "");

  /// Return a vector splice intrinsic if using scalable vectors, otherwise
  /// return a shufflevector. If the immediate is positive, a vector is
  /// extracted from concat(V1, V2), starting at Imm. If the immediate
  /// is negative, we extract -Imm elements from V1 and the remaining
  /// elements from V2. Imm is a signed integer in the range
  /// -VL <= Imm < VL (where VL is the runtime vector length of the
  /// source/result vector)
  Value *CreateVectorSplice(Value *V1, Value *V2, int64_t Imm,
                            const Twine &Name = "");

  /// Return a vector value that contains \arg V broadcasted to \p
  /// NumElts elements.
  Value *CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name = "");

  /// Return a vector value that contains \arg V broadcasted to \p
  /// EC elements.
  Value *CreateVectorSplat(ElementCount EC, Value *V, const Twine &Name = "");

  /// Return a value that has been extracted from a larger integer type.
  Value *CreateExtractInteger(const DataLayout &DL, Value *From,
                              IntegerType *ExtractedTy, uint64_t Offset,
                              const Twine &Name);

  Value *CreatePreserveArrayAccessIndex(Type *ElTy, Value *Base,
                                        unsigned Dimension, unsigned LastIndex,
                                        MDNode *DbgInfo);

  Value *CreatePreserveUnionAccessIndex(Value *Base, unsigned FieldIndex,
                                        MDNode *DbgInfo);

  Value *CreatePreserveStructAccessIndex(Type *ElTy, Value *Base,
                                         unsigned Index, unsigned FieldIndex,
                                         MDNode *DbgInfo);

  Value *createIsFPClass(Value *FPNum, unsigned Test);

private:
  /// Helper function that creates an assume intrinsic call that
  /// represents an alignment assumption on the provided pointer \p PtrValue
  /// with offset \p OffsetValue and alignment value \p AlignValue.
  CallInst *CreateAlignmentAssumptionHelper(const DataLayout &DL,
                                            Value *PtrValue, Value *AlignValue,
                                            Value *OffsetValue);

public:
  /// Create an assume intrinsic call that represents an alignment
  /// assumption on the provided pointer.
  ///
  /// An optional offset can be provided, and if it is provided, the offset
  /// must be subtracted from the provided pointer to get the pointer with the
  /// specified alignment.
  CallInst *CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue,
                                      unsigned Alignment,
                                      Value *OffsetValue = nullptr);

  /// Create an assume intrinsic call that represents an alignment
  /// assumption on the provided pointer.
  ///
  /// An optional offset can be provided, and if it is provided, the offset
  /// must be subtracted from the provided pointer to get the pointer with the
  /// specified alignment.
  ///
  /// This overload handles the condition where the Alignment is dependent
  /// on an existing value rather than a static value.
  CallInst *CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue,
                                      Value *Alignment,
                                      Value *OffsetValue = nullptr);
};

/// This provides a uniform API for creating instructions and inserting
/// them into a basic block: either at the end of a BasicBlock, or at a specific
/// iterator location in a block.
///
/// Note that the builder does not expose the full generality of LLVM
/// instructions.  For access to extra instruction properties, use the mutators
/// (e.g. setVolatile) on the instructions after they have been
/// created. Convenience state exists to specify fast-math flags and fp-math
/// tags.
///
/// The first template argument specifies a class to use for creating constants.
/// This defaults to creating minimally folded constants.  The second template
/// argument allows clients to specify custom insertion hooks that are called on
/// every newly created insertion.
template <typename FolderTy = ConstantFolder,
          typename InserterTy = IRBuilderDefaultInserter>
class IRBuilder : public IRBuilderBase {
private:
  FolderTy Folder;
  InserterTy Inserter;

public:
  IRBuilder(LLVMContext &C, FolderTy Folder, InserterTy Inserter = InserterTy(),
            MDNode *FPMathTag = nullptr,
            ArrayRef<OperandBundleDef> OpBundles = std::nullopt)
      : IRBuilderBase(C, this->Folder, this->Inserter, FPMathTag, OpBundles),
        Folder(Folder), Inserter(Inserter) {}

  explicit IRBuilder(LLVMContext &C, MDNode *FPMathTag = nullptr,
                     ArrayRef<OperandBundleDef> OpBundles = std::nullopt)
      : IRBuilderBase(C, this->Folder, this->Inserter, FPMathTag, OpBundles) {}

  explicit IRBuilder(BasicBlock *TheBB, FolderTy Folder,
                     MDNode *FPMathTag = nullptr,
                     ArrayRef<OperandBundleDef> OpBundles = std::nullopt)
      : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter,
                      FPMathTag, OpBundles),
        Folder(Folder) {
    SetInsertPoint(TheBB);
  }

  explicit IRBuilder(BasicBlock *TheBB, MDNode *FPMathTag = nullptr,
                     ArrayRef<OperandBundleDef> OpBundles = std::nullopt)
      : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter,
                      FPMathTag, OpBundles) {
    SetInsertPoint(TheBB);
  }

  explicit IRBuilder(Instruction *IP, MDNode *FPMathTag = nullptr,
                     ArrayRef<OperandBundleDef> OpBundles = std::nullopt)
      : IRBuilderBase(IP->getContext(), this->Folder, this->Inserter, FPMathTag,
                      OpBundles) {
    SetInsertPoint(IP);
  }

  IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP, FolderTy Folder,
            MDNode *FPMathTag = nullptr,
            ArrayRef<OperandBundleDef> OpBundles = std::nullopt)
      : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter,
                      FPMathTag, OpBundles),
        Folder(Folder) {
    SetInsertPoint(TheBB, IP);
  }

  IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP,
            MDNode *FPMathTag = nullptr,
            ArrayRef<OperandBundleDef> OpBundles = std::nullopt)
      : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter,
                      FPMathTag, OpBundles) {
    SetInsertPoint(TheBB, IP);
  }

  /// Avoid copying the full IRBuilder. Prefer using InsertPointGuard
  /// or FastMathFlagGuard instead.
  IRBuilder(const IRBuilder &) = delete;

  InserterTy &getInserter() { return Inserter; }
};

template <typename FolderTy, typename InserterTy>
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *,
          ArrayRef<OperandBundleDef>) -> IRBuilder<FolderTy, InserterTy>;
IRBuilder(LLVMContext &, MDNode *, ArrayRef<OperandBundleDef>) -> IRBuilder<>;
template <typename FolderTy>
IRBuilder(BasicBlock *, FolderTy, MDNode *, ArrayRef<OperandBundleDef>)
    -> IRBuilder<FolderTy>;
IRBuilder(BasicBlock *, MDNode *, ArrayRef<OperandBundleDef>) -> IRBuilder<>;
IRBuilder(Instruction *, MDNode *, ArrayRef<OperandBundleDef>) -> IRBuilder<>;
template <typename FolderTy>
IRBuilder(BasicBlock *, BasicBlock::iterator, FolderTy, MDNode *,
          ArrayRef<OperandBundleDef>) -> IRBuilder<FolderTy>;
IRBuilder(BasicBlock *, BasicBlock::iterator, MDNode *,
          ArrayRef<OperandBundleDef>) -> IRBuilder<>;


// Create wrappers for C Binding types (see CBindingWrapping.h).
DEFINE_SIMPLE_CONVERSION_FUNCTIONS(IRBuilder<>, LLVMBuilderRef)

} // end namespace llvm

#endif // LLVM_IR_IRBUILDER_H
PKjwFZC�}v��IR/VectorBuilder.hnu�[���//===- llvm/VectorBuilder.h - Builder for VP Intrinsics ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the VectorBuilder class, which is used as a convenient way
// to create VP intrinsics as if they were LLVM instructions with a consistent
// and simplified interface.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_VECTORBUILDER_H
#define LLVM_IR_VECTORBUILDER_H

#include <llvm/IR/IRBuilder.h>
#include <llvm/IR/InstrTypes.h>
#include <llvm/IR/Instruction.h>
#include <llvm/IR/Value.h>

namespace llvm {

class VectorBuilder {
public:
  enum class Behavior {
    // Abort if the requested VP intrinsic could not be created.
    // This is useful for strict consistency.
    ReportAndAbort = 0,

    // Return a default-initialized value if the requested VP intrinsic could
    // not be created.
    // This is useful for a defensive fallback to non-VP code.
    SilentlyReturnNone = 1,
  };

private:
  IRBuilderBase &Builder;
  Behavior ErrorHandling;

  // Explicit mask parameter.
  Value *Mask;
  // Explicit vector length parameter.
  Value *ExplicitVectorLength;
  // Compile-time vector length.
  ElementCount StaticVectorLength;

  // Get mask/evl value handles for the current configuration.
  Value &requestMask();
  Value &requestEVL();

  void handleError(const char *ErrorMsg) const;
  template <typename RetType>
  RetType returnWithError(const char *ErrorMsg) const {
    handleError(ErrorMsg);
    return RetType();
  }

public:
  VectorBuilder(IRBuilderBase &Builder,
                Behavior ErrorHandling = Behavior::ReportAndAbort)
      : Builder(Builder), ErrorHandling(ErrorHandling), Mask(nullptr),
        ExplicitVectorLength(nullptr),
        StaticVectorLength(ElementCount::getFixed(0)) {}

  Module &getModule() const;
  LLVMContext &getContext() const { return Builder.getContext(); }

  // All-true mask for the currently configured explicit vector length.
  Value *getAllTrueMask();

  VectorBuilder &setMask(Value *NewMask) {
    Mask = NewMask;
    return *this;
  }
  VectorBuilder &setEVL(Value *NewExplicitVectorLength) {
    ExplicitVectorLength = NewExplicitVectorLength;
    return *this;
  }
  VectorBuilder &setStaticVL(unsigned NewFixedVL) {
    StaticVectorLength = ElementCount::getFixed(NewFixedVL);
    return *this;
  }
  // TODO: setStaticVL(ElementCount) for scalable types.

  // Emit a VP intrinsic call that mimics a regular instruction.
  // This operation behaves according to the VectorBuilderBehavior.
  // \p Opcode      The functional instruction opcode of the emitted intrinsic.
  // \p ReturnTy    The return type of the operation.
  // \p VecOpArray  The operand list.
  Value *createVectorInstruction(unsigned Opcode, Type *ReturnTy,
                                 ArrayRef<Value *> VecOpArray,
                                 const Twine &Name = Twine());
};

} // namespace llvm

#endif // LLVM_IR_VECTORBUILDER_H
PKjwFZ!hP�8)8)
IR/Constant.hnu�[���//===-- llvm/Constant.h - Constant class definition -------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the declaration of the Constant class.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_CONSTANT_H
#define LLVM_IR_CONSTANT_H

#include "llvm/IR/User.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/Casting.h"

namespace llvm {

class APInt;

/// This is an important base class in LLVM. It provides the common facilities
/// of all constant values in an LLVM program. A constant is a value that is
/// immutable at runtime. Functions are constants because their address is
/// immutable. Same with global variables.
///
/// All constants share the capabilities provided in this class. All constants
/// can have a null value. They can have an operand list. Constants can be
/// simple (integer and floating point values), complex (arrays and structures),
/// or expression based (computations yielding a constant value composed of
/// only certain operators and other constant values).
///
/// Note that Constants are immutable (once created they never change)
/// and are fully shared by structural equivalence.  This means that two
/// structurally equivalent constants will always have the same address.
/// Constants are created on demand as needed and never deleted: thus clients
/// don't have to worry about the lifetime of the objects.
/// LLVM Constant Representation
class Constant : public User {
protected:
  Constant(Type *ty, ValueTy vty, Use *Ops, unsigned NumOps)
    : User(ty, vty, Ops, NumOps) {}

  ~Constant() = default;

public:
  void operator=(const Constant &) = delete;
  Constant(const Constant &) = delete;

  /// Return true if this is the value that would be returned by getNullValue.
  bool isNullValue() const;

  /// Returns true if the value is one.
  bool isOneValue() const;

  /// Return true if the value is not the one value, or,
  /// for vectors, does not contain one value elements.
  bool isNotOneValue() const;

  /// Return true if this is the value that would be returned by
  /// getAllOnesValue.
  bool isAllOnesValue() const;

  /// Return true if the value is what would be returned by
  /// getZeroValueForNegation.
  bool isNegativeZeroValue() const;

  /// Return true if the value is negative zero or null value.
  bool isZeroValue() const;

  /// Return true if the value is not the smallest signed value, or,
  /// for vectors, does not contain smallest signed value elements.
  bool isNotMinSignedValue() const;

  /// Return true if the value is the smallest signed value.
  bool isMinSignedValue() const;

  /// Return true if this is a finite and non-zero floating-point scalar
  /// constant or a fixed width vector constant with all finite and non-zero
  /// elements.
  bool isFiniteNonZeroFP() const;

  /// Return true if this is a normal (as opposed to denormal, infinity, nan,
  /// or zero) floating-point scalar constant or a vector constant with all
  /// normal elements. See APFloat::isNormal.
  bool isNormalFP() const;

  /// Return true if this scalar has an exact multiplicative inverse or this
  /// vector has an exact multiplicative inverse for each element in the vector.
  bool hasExactInverseFP() const;

  /// Return true if this is a floating-point NaN constant or a vector
  /// floating-point constant with all NaN elements.
  bool isNaN() const;

  /// Return true if this constant and a constant 'Y' are element-wise equal.
  /// This is identical to just comparing the pointers, with the exception that
  /// for vectors, if only one of the constants has an `undef` element in some
  /// lane, the constants still match.
  bool isElementWiseEqual(Value *Y) const;

  /// Return true if this is a vector constant that includes any undef or
  /// poison elements. Since it is impossible to inspect a scalable vector
  /// element- wise at compile time, this function returns true only if the
  /// entire vector is undef or poison.
  bool containsUndefOrPoisonElement() const;

  /// Return true if this is a vector constant that includes any poison
  /// elements.
  bool containsPoisonElement() const;

  /// Return true if this is a vector constant that includes any strictly undef
  /// (not poison) elements.
  bool containsUndefElement() const;

  /// Return true if this is a fixed width vector constant that includes
  /// any constant expressions.
  bool containsConstantExpression() const;

  /// Return true if the value can vary between threads.
  bool isThreadDependent() const;

  /// Return true if the value is dependent on a dllimport variable.
  bool isDLLImportDependent() const;

  /// Return true if the constant has users other than constant expressions and
  /// other dangling things.
  bool isConstantUsed() const;

  /// This method classifies the entry according to whether or not it may
  /// generate a relocation entry (either static or dynamic). This must be
  /// conservative, so if it might codegen to a relocatable entry, it should say
  /// so.
  ///
  /// FIXME: This really should not be in IR.
  bool needsRelocation() const;
  bool needsDynamicRelocation() const;

  /// For aggregates (struct/array/vector) return the constant that corresponds
  /// to the specified element if possible, or null if not. This can return null
  /// if the element index is a ConstantExpr, if 'this' is a constant expr or
  /// if the constant does not fit into an uint64_t.
  Constant *getAggregateElement(unsigned Elt) const;
  Constant *getAggregateElement(Constant *Elt) const;

  /// If all elements of the vector constant have the same value, return that
  /// value. Otherwise, return nullptr. Ignore undefined elements by setting
  /// AllowUndefs to true.
  Constant *getSplatValue(bool AllowUndefs = false) const;

  /// If C is a constant integer then return its value, otherwise C must be a
  /// vector of constant integers, all equal, and the common value is returned.
  const APInt &getUniqueInteger() const;

  /// Called if some element of this constant is no longer valid.
  /// At this point only other constants may be on the use_list for this
  /// constant.  Any constants on our Use list must also be destroy'd.  The
  /// implementation must be sure to remove the constant from the list of
  /// available cached constants.  Implementations should implement
  /// destroyConstantImpl to remove constants from any pools/maps they are
  /// contained it.
  void destroyConstant();

  //// Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Value *V) {
    static_assert(ConstantFirstVal == 0, "V->getValueID() >= ConstantFirstVal always succeeds");
    return V->getValueID() <= ConstantLastVal;
  }

  /// This method is a special form of User::replaceUsesOfWith
  /// (which does not work on constants) that does work
  /// on constants.  Basically this method goes through the trouble of building
  /// a new constant that is equivalent to the current one, with all uses of
  /// From replaced with uses of To.  After this construction is completed, all
  /// of the users of 'this' are replaced to use the new constant, and then
  /// 'this' is deleted.  In general, you should not call this method, instead,
  /// use Value::replaceAllUsesWith, which automatically dispatches to this
  /// method as needed.
  ///
  void handleOperandChange(Value *, Value *);

  static Constant *getNullValue(Type* Ty);

  /// @returns the value for an integer or vector of integer constant of the
  /// given type that has all its bits set to true.
  /// Get the all ones value
  static Constant *getAllOnesValue(Type* Ty);

  /// Return the value for an integer or pointer constant, or a vector thereof,
  /// with the given scalar value.
  static Constant *getIntegerValue(Type *Ty, const APInt &V);

  /// If there are any dead constant users dangling off of this constant, remove
  /// them. This method is useful for clients that want to check to see if a
  /// global is unused, but don't want to deal with potentially dead constants
  /// hanging off of the globals.
  void removeDeadConstantUsers() const;

  /// Return true if the constant has exactly one live use.
  ///
  /// This returns the same result as calling Value::hasOneUse after
  /// Constant::removeDeadConstantUsers, but doesn't remove dead constants.
  bool hasOneLiveUse() const;

  /// Return true if the constant has no live uses.
  ///
  /// This returns the same result as calling Value::use_empty after
  /// Constant::removeDeadConstantUsers, but doesn't remove dead constants.
  bool hasZeroLiveUses() const;

  const Constant *stripPointerCasts() const {
    return cast<Constant>(Value::stripPointerCasts());
  }

  Constant *stripPointerCasts() {
    return const_cast<Constant*>(
                      static_cast<const Constant *>(this)->stripPointerCasts());
  }

  /// Try to replace undefined constant C or undefined elements in C with
  /// Replacement. If no changes are made, the constant C is returned.
  static Constant *replaceUndefsWith(Constant *C, Constant *Replacement);

  /// Merges undefs of a Constant with another Constant, along with the
  /// undefs already present. Other doesn't have to be the same type as C, but
  /// both must either be scalars or vectors with the same element count. If no
  /// changes are made, the constant C is returned.
  static Constant *mergeUndefsWith(Constant *C, Constant *Other);

  /// Return true if a constant is ConstantData or a ConstantAggregate or
  /// ConstantExpr that contain only ConstantData.
  bool isManifestConstant() const;

private:
  enum PossibleRelocationsTy {
    /// This constant requires no relocations. That is, it holds simple
    /// constants (like integrals).
    NoRelocation = 0,

    /// This constant holds static relocations that can be resolved by the
    /// static linker.
    LocalRelocation = 1,

    /// This constant holds dynamic relocations that the dynamic linker will
    /// need to resolve.
    GlobalRelocation = 2,
  };

  /// Determine what potential relocations may be needed by this constant.
  PossibleRelocationsTy getRelocationInfo() const;

  bool hasNLiveUses(unsigned N) const;
};

} // end namespace llvm

#endif // LLVM_IR_CONSTANT_H
PKjwFZ��I�((IR/IntrinsicsRISCVXsf.tdnu�[���//===- IntrinsicsRISCVXsf.td - SiFive intrinsics -----------*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines all of the SiFive vendor intrinsics for RISC-V.
//
//===----------------------------------------------------------------------===//

class VCIXSuffix<string range> {
  list<string> suffix = !cond(!eq(range, "c"): ["e8mf8", "e8mf4", "e8mf2", "e8m1", "e8m2", "e8m4", "e8m8"],
                              !eq(range, "s"): ["e16mf4", "e16mf2", "e16m1", "e16m2", "e16m4", "e16m8"],
                              !eq(range, "i"): ["e32mf2", "e32m1", "e32m2", "e32m4", "e32m8"],
                              !eq(range, "l"): ["e64m1", "e64m2", "e64m4", "e64m8"]);
}

let TargetPrefix = "riscv" in {
  // Output: (vector_out) or ()
  // Input: (bit<27-26>, bit<24-20>, scalar_in, vl) or
  //        (bit<27-26>, bit<24-20>, bit<11-7>, scalar_in, vl)
  class RISCVSFCustomVC_X<bit HasDst, bit HasSE, bit ImmScalar>
        : Intrinsic<!if(HasDst, [llvm_anyvector_ty], []),
                    !listconcat(!if(HasDst, [llvm_anyint_ty, LLVMMatchType<1>],
                                            [llvm_anyint_ty, LLVMMatchType<0>, LLVMMatchType<0>]),
                                [llvm_any_ty, llvm_anyint_ty]),
                    !listconcat([IntrNoMem, ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>],    // bit<27-26> and bit<24-20>
                                !if(HasDst, [], [ImmArg<ArgIndex<2>>]),                   // Vd or bit<11-7>
                                !if(ImmScalar, !if(HasDst, [ImmArg<ArgIndex<2>>],
                                                           [ImmArg<ArgIndex<3>>]), []),   // ScalarOperand
                                !if(HasSE, [IntrHasSideEffects], []))>,
          RISCVVIntrinsic {
    let ScalarOperand = !cond(ImmScalar: NoScalarOperand,
                              HasDst: 2,
                              true: 3);
    let VLOperand = !if(HasDst, 3, 4);
  }
  // Output: (vector_out) or ()
  // Input: (bit<27-26>, vector_in, vector_in/scalar_in, vl) or
  //        (bit<27-26>, bit<11-7>, vector_in, vector_in/scalar_in, vl)
  class RISCVSFCustomVC_XV<bit HasDst, bit HasSE, bit ImmScalar>
        : Intrinsic<!if(HasDst, [llvm_anyvector_ty], []),
                    !listconcat(!if(HasDst, [llvm_anyint_ty, LLVMMatchType<0>],
                                            [llvm_anyint_ty, LLVMMatchType<0>, llvm_anyvector_ty]),
                                [llvm_any_ty, llvm_anyint_ty]),
                    !listconcat([IntrNoMem, ImmArg<ArgIndex<0>>],                        // bit<27-26>
                                !if(HasDst, [], [ImmArg<ArgIndex<1>>]),                  // Vd or bit<11-7>
                                !if(ImmScalar, !if(HasDst, [ImmArg<ArgIndex<2>>],
                                                           [ImmArg<ArgIndex<3>>]), []),  // ScalarOperand
                                !if(HasSE, [IntrHasSideEffects], []))>,
          RISCVVIntrinsic {
    let ScalarOperand = !cond(ImmScalar: NoScalarOperand,
                              HasDst: 2,
                              true: 3);
    let VLOperand = !if(HasDst, 3, 4);
  }
  // Output: (vector_out) or ()
  // Input: (bit<27-26>, passthru, vector_in, vector_in/scalar_in, vl) or
  //        (bit<27-26>, vector_in, vector_in, vector_in/scalar_in, vl)
  class RISCVSFCustomVC_XVV<bit HasDst, bit HasSE, bit ImmScalar>
        : Intrinsic<!if(HasDst, [llvm_anyvector_ty], []),
                    !listconcat(!if(HasDst, [llvm_anyint_ty, LLVMMatchType<0>, LLVMMatchType<0>],
                                            [llvm_anyint_ty, llvm_anyvector_ty, LLVMMatchType<1>]),
                                [llvm_any_ty, llvm_anyint_ty]),
                    !listconcat([IntrNoMem, ImmArg<ArgIndex<0>>],                        // bit<27-26>
                                !if(ImmScalar, [ImmArg<ArgIndex<3>>], []),               // ScalarOperand
                                !if(HasSE, [IntrHasSideEffects], []))>,
          RISCVVIntrinsic {
    let ScalarOperand = !if(ImmScalar, NoScalarOperand, 3);
    let VLOperand = 4;
  }
  // Output: (wvector_out) or ()
  // Input: (bit<27-26>, passthru, vector_in, vector_in/scalar_in, vl) or
  //        (bit<27-26>, wvector_in, vector_in, vector_in/scalar_in, vl)
  class RISCVSFCustomVC_XVW<bit HasDst, bit HasSE, bit ImmScalar>
        : Intrinsic<!if(HasDst, [llvm_anyvector_ty], []),
                    !listconcat(!if(HasDst, [llvm_anyint_ty, LLVMMatchType<0>, llvm_anyvector_ty],
                                            [llvm_anyint_ty, llvm_anyvector_ty, llvm_anyvector_ty]),
                                [llvm_any_ty, llvm_anyint_ty]),
                    !listconcat([IntrNoMem, ImmArg<ArgIndex<0>>],                        // bit<27-26>
                                !if(ImmScalar, [ImmArg<ArgIndex<3>>], []),               // ScalarOperand
                                !if(HasSE, [IntrHasSideEffects], []))>,
          RISCVVIntrinsic {
    let ScalarOperand = !if(ImmScalar, NoScalarOperand, 3);
    let VLOperand = 4;
  }

  multiclass RISCVSFCustomVC_X<list<string> type> {
    foreach t = type in {
      defvar ImmScalar = !eq(t, "i");
      defvar range = ["c", "s", "i", "l"];
      foreach r = range in {
        foreach s = VCIXSuffix<r>.suffix in {
          def "int_riscv_sf_vc_" # t # "_se_" # s : RISCVSFCustomVC_X</*HasDst*/0, /*HasSE*/1, ImmScalar>;
        }
      }
      def "int_riscv_sf_vc_v_" # t # "_se" : RISCVSFCustomVC_X</*HasDst*/1, /*HasSE*/1, ImmScalar>;
      def "int_riscv_sf_vc_v_" # t         : RISCVSFCustomVC_X</*HasDst*/1, /*HasSE*/0, ImmScalar>;
    }
  }

  multiclass RISCVSFCustomVC_XV<list<string> type> {
    foreach t = type in {
      defvar ImmScalar = !eq(t, "i");
      def "int_riscv_sf_vc_" # t # "v_se"   : RISCVSFCustomVC_XV</*HasDst*/0, /*HasSE*/1, ImmScalar>;
      def "int_riscv_sf_vc_v_" # t # "v_se" : RISCVSFCustomVC_XV</*HasDst*/1, /*HasSE*/1, ImmScalar>;
      def "int_riscv_sf_vc_v_" # t # "v"    : RISCVSFCustomVC_XV</*HasDst*/1, /*HasSE*/0, ImmScalar>;
    }
  }

  multiclass RISCVSFCustomVC_XVV<list<string> type> {
    foreach t = type in {
      defvar ImmScalar = !eq(t, "i");
      def "int_riscv_sf_vc_" # t # "vv_se"   : RISCVSFCustomVC_XVV</*HasDst*/0, /*HasSE*/1, ImmScalar>;
      def "int_riscv_sf_vc_v_" # t # "vv_se" : RISCVSFCustomVC_XVV</*HasDst*/1, /*HasSE*/1, ImmScalar>;
      def "int_riscv_sf_vc_v_" # t # "vv"    : RISCVSFCustomVC_XVV</*HasDst*/1, /*HasSE*/0, ImmScalar>;
    }
  }

  multiclass RISCVSFCustomVC_XVW<list<string> type> {
    foreach t = type in {
      defvar ImmScalar = !eq(t, "i");
      def "int_riscv_sf_vc_" # t # "vw_se"   : RISCVSFCustomVC_XVW</*HasDst*/0, /*HasSE*/1, ImmScalar>;
      def "int_riscv_sf_vc_v_" # t # "vw_se" : RISCVSFCustomVC_XVW</*HasDst*/1, /*HasSE*/1, ImmScalar>;
      def "int_riscv_sf_vc_v_" # t # "vw"    : RISCVSFCustomVC_XVW</*HasDst*/1, /*HasSE*/0, ImmScalar>;
    }
  }

  defm "" : RISCVSFCustomVC_X<["x", "i"]>;
  defm "" : RISCVSFCustomVC_XV<["x", "i", "v", "f"]>;
  defm "" : RISCVSFCustomVC_XVV<["x", "i", "v", "f"]>;
  defm "" : RISCVSFCustomVC_XVW<["x", "i", "v", "f"]>;
} // TargetPrefix = "riscv"
PKjwFZk�>�hhIR/IRPrintingPasses.hnu�[���//===- IRPrintingPasses.h - Passes to print out IR constructs ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This file contains an interface for creating legacy passes to print out IR
/// in various granularities.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_IRPRINTINGPASSES_H
#define LLVM_IR_IRPRINTINGPASSES_H

#include <string>

namespace llvm {
class raw_ostream;
class StringRef;
class FunctionPass;
class ModulePass;
class Pass;

/// Create and return a pass that writes the module to the specified
/// \c raw_ostream.
ModulePass *createPrintModulePass(raw_ostream &OS,
                                  const std::string &Banner = "",
                                  bool ShouldPreserveUseListOrder = false);

/// Create and return a pass that prints functions to the specified
/// \c raw_ostream as they are processed.
FunctionPass *createPrintFunctionPass(raw_ostream &OS,
                                      const std::string &Banner = "");

/// Print out a name of an LLVM value without any prefixes.
///
/// The name is surrounded with ""'s and escaped if it has any special or
/// non-printable characters in it.
void printLLVMNameWithoutPrefix(raw_ostream &OS, StringRef Name);

/// Return true if a pass is for IR printing.
bool isIRPrintingPass(Pass *P);

} // namespace llvm

#endif
PKjwFZ�T�f��IR/GlobalObject.hnu�[���//===-- llvm/GlobalObject.h - Class to represent global objects -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This represents an independent object. That is, a function or a global
// variable, but not an alias.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_GLOBALOBJECT_H
#define LLVM_IR_GLOBALOBJECT_H

#include "llvm/ADT/StringRef.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/Alignment.h"

namespace llvm {

class Comdat;
class Metadata;

class GlobalObject : public GlobalValue {
public:
  // VCallVisibility - values for visibility metadata attached to vtables. This
  // describes the scope in which a virtual call could end up being dispatched
  // through this vtable.
  enum VCallVisibility {
    // Type is potentially visible to external code.
    VCallVisibilityPublic = 0,
    // Type is only visible to code which will be in the current Module after
    // LTO internalization.
    VCallVisibilityLinkageUnit = 1,
    // Type is only visible to code in the current Module.
    VCallVisibilityTranslationUnit = 2,
  };

protected:
  GlobalObject(Type *Ty, ValueTy VTy, Use *Ops, unsigned NumOps,
               LinkageTypes Linkage, const Twine &Name,
               unsigned AddressSpace = 0)
      : GlobalValue(Ty, VTy, Ops, NumOps, Linkage, Name, AddressSpace) {
    setGlobalValueSubClassData(0);
  }
  ~GlobalObject();

  Comdat *ObjComdat = nullptr;
  enum {
    LastAlignmentBit = 5,
    HasSectionHashEntryBit,

    GlobalObjectBits,
  };
  static const unsigned GlobalObjectSubClassDataBits =
      GlobalValueSubClassDataBits - GlobalObjectBits;

private:
  static const unsigned AlignmentBits = LastAlignmentBit + 1;
  static const unsigned AlignmentMask = (1 << AlignmentBits) - 1;
  static const unsigned GlobalObjectMask = (1 << GlobalObjectBits) - 1;

public:
  GlobalObject(const GlobalObject &) = delete;

  /// FIXME: Remove this function once transition to Align is over.
  uint64_t getAlignment() const {
    MaybeAlign Align = getAlign();
    return Align ? Align->value() : 0;
  }

  /// Returns the alignment of the given variable or function.
  ///
  /// Note that for functions this is the alignment of the code, not the
  /// alignment of a function pointer.
  MaybeAlign getAlign() const {
    unsigned Data = getGlobalValueSubClassData();
    unsigned AlignmentData = Data & AlignmentMask;
    return decodeMaybeAlign(AlignmentData);
  }

  /// Sets the alignment attribute of the GlobalObject.
  void setAlignment(Align Align);

  /// Sets the alignment attribute of the GlobalObject.
  /// This method will be deprecated as the alignment property should always be
  /// defined.
  void setAlignment(MaybeAlign Align);

  unsigned getGlobalObjectSubClassData() const {
    unsigned ValueData = getGlobalValueSubClassData();
    return ValueData >> GlobalObjectBits;
  }

  void setGlobalObjectSubClassData(unsigned Val) {
    unsigned OldData = getGlobalValueSubClassData();
    setGlobalValueSubClassData((OldData & GlobalObjectMask) |
                               (Val << GlobalObjectBits));
    assert(getGlobalObjectSubClassData() == Val && "representation error");
  }

  /// Check if this global has a custom object file section.
  ///
  /// This is more efficient than calling getSection() and checking for an empty
  /// string.
  bool hasSection() const {
    return getGlobalValueSubClassData() & (1 << HasSectionHashEntryBit);
  }

  /// Get the custom section of this global if it has one.
  ///
  /// If this global does not have a custom section, this will be empty and the
  /// default object file section (.text, .data, etc) will be used.
  StringRef getSection() const {
    return hasSection() ? getSectionImpl() : StringRef();
  }

  /// Change the section for this global.
  ///
  /// Setting the section to the empty string tells LLVM to choose an
  /// appropriate default object file section.
  void setSection(StringRef S);

  bool hasComdat() const { return getComdat() != nullptr; }
  const Comdat *getComdat() const { return ObjComdat; }
  Comdat *getComdat() { return ObjComdat; }
  void setComdat(Comdat *C);

  using Value::addMetadata;
  using Value::clearMetadata;
  using Value::eraseMetadata;
  using Value::getAllMetadata;
  using Value::getMetadata;
  using Value::hasMetadata;
  using Value::setMetadata;

  /// Copy metadata from Src, adjusting offsets by Offset.
  void copyMetadata(const GlobalObject *Src, unsigned Offset);

  void addTypeMetadata(unsigned Offset, Metadata *TypeID);
  void setVCallVisibilityMetadata(VCallVisibility Visibility);
  VCallVisibility getVCallVisibility() const;

  /// Returns true if the alignment of the value can be unilaterally
  /// increased.
  ///
  /// Note that for functions this is the alignment of the code, not the
  /// alignment of a function pointer.
  bool canIncreaseAlignment() const;

protected:
  void copyAttributesFrom(const GlobalObject *Src);

public:
  // Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Value *V) {
    return V->getValueID() == Value::FunctionVal ||
           V->getValueID() == Value::GlobalVariableVal ||
           V->getValueID() == Value::GlobalIFuncVal;
  }

private:
  void setGlobalObjectFlag(unsigned Bit, bool Val) {
    unsigned Mask = 1 << Bit;
    setGlobalValueSubClassData((~Mask & getGlobalValueSubClassData()) |
                               (Val ? Mask : 0u));
  }

  StringRef getSectionImpl() const;
};

} // end namespace llvm

#endif // LLVM_IR_GLOBALOBJECT_H
PKjwFZ�W{i){i)IR/IntrinsicImpl.incnu�[���/*===- TableGen'erated file -------------------------------------*- C++ -*-===*\
|*                                                                            *|
|* Intrinsic Function Source Fragment                                         *|
|*                                                                            *|
|* Automatically generated file, do not edit!                                 *|
|*                                                                            *|
\*===----------------------------------------------------------------------===*/

#ifdef GET_INTRINSIC_IITINFO
  IIT_Done = 0,
  IIT_I1 = 1,
  IIT_I8 = 2,
  IIT_I16 = 3,
  IIT_I32 = 4,
  IIT_I64 = 5,
  IIT_F16 = 6,
  IIT_F32 = 7,
  IIT_F64 = 8,
  IIT_V2 = 9,
  IIT_V4 = 10,
  IIT_V8 = 11,
  IIT_V16 = 12,
  IIT_V32 = 13,
  IIT_PTR = 14,
  IIT_ARG = 15,
  IIT_V64 = 16,
  IIT_MMX = 17,
  IIT_TOKEN = 18,
  IIT_METADATA = 19,
  IIT_EMPTYSTRUCT = 20,
  IIT_STRUCT2 = 21,
  IIT_STRUCT3 = 22,
  IIT_STRUCT4 = 23,
  IIT_STRUCT5 = 24,
  IIT_EXTEND_ARG = 25,
  IIT_TRUNC_ARG = 26,
  IIT_ANYPTR = 27,
  IIT_V1 = 28,
  IIT_VARARG = 29,
  IIT_HALF_VEC_ARG = 30,
  IIT_SAME_VEC_WIDTH_ARG = 31,
  IIT_VEC_OF_ANYPTRS_TO_ELT = 34,
  IIT_I128 = 35,
  IIT_V512 = 36,
  IIT_V1024 = 37,
  IIT_STRUCT6 = 38,
  IIT_STRUCT7 = 39,
  IIT_STRUCT8 = 40,
  IIT_F128 = 41,
  IIT_VEC_ELEMENT = 42,
  IIT_SCALABLE_VEC = 43,
  IIT_SUBDIVIDE2_ARG = 44,
  IIT_SUBDIVIDE4_ARG = 45,
  IIT_VEC_OF_BITCASTS_TO_INT = 46,
  IIT_V128 = 47,
  IIT_BF16 = 48,
  IIT_STRUCT9 = 49,
  IIT_V256 = 50,
  IIT_AMX = 51,
  IIT_PPCF128 = 52,
  IIT_V3 = 53,
  IIT_EXTERNREF = 54,
  IIT_FUNCREF = 55,
  IIT_I2 = 57,
  IIT_I4 = 58,
  IIT_AARCH64_SVCOUNT = 59,
#endif

// Target mapping
#ifdef GET_INTRINSIC_TARGET_DATA
struct IntrinsicTargetInfo {
  llvm::StringLiteral Name;
  size_t Offset;
  size_t Count;
};
static constexpr IntrinsicTargetInfo TargetInfos[] = {
  {llvm::StringLiteral(""), 0, 427},
  {llvm::StringLiteral("aarch64"), 427, 1384},
  {llvm::StringLiteral("amdgcn"), 1811, 851},
  {llvm::StringLiteral("arm"), 2662, 491},
  {llvm::StringLiteral("bpf"), 3153, 10},
  {llvm::StringLiteral("dx"), 3163, 5},
  {llvm::StringLiteral("hexagon"), 3168, 2009},
  {llvm::StringLiteral("loongarch"), 5177, 50},
  {llvm::StringLiteral("mips"), 5227, 671},
  {llvm::StringLiteral("nvvm"), 5898, 1648},
  {llvm::StringLiteral("ppc"), 7546, 614},
  {llvm::StringLiteral("r600"), 8160, 35},
  {llvm::StringLiteral("riscv"), 8195, 654},
  {llvm::StringLiteral("s390"), 8849, 233},
  {llvm::StringLiteral("spv"), 9082, 19},
  {llvm::StringLiteral("ve"), 9101, 1263},
  {llvm::StringLiteral("wasm"), 10364, 64},
  {llvm::StringLiteral("x86"), 10428, 1444},
  {llvm::StringLiteral("xcore"), 11872, 53},
};
#endif

// Intrinsic ID to name table
#ifdef GET_INTRINSIC_NAME_TABLE
  // Note that entry #0 is the invalid intrinsic!
  "llvm.abs",
  "llvm.addressofreturnaddress",
  "llvm.adjust.trampoline",
  "llvm.annotation",
  "llvm.arithmetic.fence",
  "llvm.asan.check.memaccess",
  "llvm.assume",
  "llvm.bitreverse",
  "llvm.bswap",
  "llvm.call.preallocated.arg",
  "llvm.call.preallocated.setup",
  "llvm.call.preallocated.teardown",
  "llvm.callbr.landingpad",
  "llvm.canonicalize",
  "llvm.ceil",
  "llvm.clear_cache",
  "llvm.codeview.annotation",
  "llvm.convert.from.fp16",
  "llvm.convert.to.fp16",
  "llvm.copysign",
  "llvm.coro.align",
  "llvm.coro.alloc",
  "llvm.coro.alloca.alloc",
  "llvm.coro.alloca.free",
  "llvm.coro.alloca.get",
  "llvm.coro.async.context.alloc",
  "llvm.coro.async.context.dealloc",
  "llvm.coro.async.resume",
  "llvm.coro.async.size.replace",
  "llvm.coro.begin",
  "llvm.coro.destroy",
  "llvm.coro.done",
  "llvm.coro.end",
  "llvm.coro.end.async",
  "llvm.coro.frame",
  "llvm.coro.free",
  "llvm.coro.id",
  "llvm.coro.id.async",
  "llvm.coro.id.retcon",
  "llvm.coro.id.retcon.once",
  "llvm.coro.noop",
  "llvm.coro.prepare.async",
  "llvm.coro.prepare.retcon",
  "llvm.coro.promise",
  "llvm.coro.resume",
  "llvm.coro.save",
  "llvm.coro.size",
  "llvm.coro.subfn.addr",
  "llvm.coro.suspend",
  "llvm.coro.suspend.async",
  "llvm.coro.suspend.retcon",
  "llvm.cos",
  "llvm.ctlz",
  "llvm.ctpop",
  "llvm.cttz",
  "llvm.dbg.assign",
  "llvm.dbg.declare",
  "llvm.dbg.label",
  "llvm.dbg.value",
  "llvm.debugtrap",
  "llvm.donothing",
  "llvm.eh.dwarf.cfa",
  "llvm.eh.exceptioncode",
  "llvm.eh.exceptionpointer",
  "llvm.eh.recoverfp",
  "llvm.eh.return.i32",
  "llvm.eh.return.i64",
  "llvm.eh.sjlj.callsite",
  "llvm.eh.sjlj.functioncontext",
  "llvm.eh.sjlj.longjmp",
  "llvm.eh.sjlj.lsda",
  "llvm.eh.sjlj.setjmp",
  "llvm.eh.sjlj.setup.dispatch",
  "llvm.eh.typeid.for",
  "llvm.eh.unwind.init",
  "llvm.exp",
  "llvm.exp2",
  "llvm.expect",
  "llvm.expect.with.probability",
  "llvm.experimental.constrained.ceil",
  "llvm.experimental.constrained.cos",
  "llvm.experimental.constrained.exp",
  "llvm.experimental.constrained.exp2",
  "llvm.experimental.constrained.fadd",
  "llvm.experimental.constrained.fcmp",
  "llvm.experimental.constrained.fcmps",
  "llvm.experimental.constrained.fdiv",
  "llvm.experimental.constrained.floor",
  "llvm.experimental.constrained.fma",
  "llvm.experimental.constrained.fmul",
  "llvm.experimental.constrained.fmuladd",
  "llvm.experimental.constrained.fpext",
  "llvm.experimental.constrained.fptosi",
  "llvm.experimental.constrained.fptoui",
  "llvm.experimental.constrained.fptrunc",
  "llvm.experimental.constrained.frem",
  "llvm.experimental.constrained.fsub",
  "llvm.experimental.constrained.ldexp",
  "llvm.experimental.constrained.llrint",
  "llvm.experimental.constrained.llround",
  "llvm.experimental.constrained.log",
  "llvm.experimental.constrained.log10",
  "llvm.experimental.constrained.log2",
  "llvm.experimental.constrained.lrint",
  "llvm.experimental.constrained.lround",
  "llvm.experimental.constrained.maximum",
  "llvm.experimental.constrained.maxnum",
  "llvm.experimental.constrained.minimum",
  "llvm.experimental.constrained.minnum",
  "llvm.experimental.constrained.nearbyint",
  "llvm.experimental.constrained.pow",
  "llvm.experimental.constrained.powi",
  "llvm.experimental.constrained.rint",
  "llvm.experimental.constrained.round",
  "llvm.experimental.constrained.roundeven",
  "llvm.experimental.constrained.sin",
  "llvm.experimental.constrained.sitofp",
  "llvm.experimental.constrained.sqrt",
  "llvm.experimental.constrained.trunc",
  "llvm.experimental.constrained.uitofp",
  "llvm.experimental.convergence.anchor",
  "llvm.experimental.convergence.entry",
  "llvm.experimental.convergence.loop",
  "llvm.experimental.deoptimize",
  "llvm.experimental.gc.get.pointer.base",
  "llvm.experimental.gc.get.pointer.offset",
  "llvm.experimental.gc.relocate",
  "llvm.experimental.gc.result",
  "llvm.experimental.gc.statepoint",
  "llvm.experimental.get.vector.length",
  "llvm.experimental.guard",
  "llvm.experimental.noalias.scope.decl",
  "llvm.experimental.patchpoint.i64",
  "llvm.experimental.patchpoint.void",
  "llvm.experimental.stackmap",
  "llvm.experimental.stepvector",
  "llvm.experimental.vector.deinterleave2",
  "llvm.experimental.vector.interleave2",
  "llvm.experimental.vector.reverse",
  "llvm.experimental.vector.splice",
  "llvm.experimental.vp.splice",
  "llvm.experimental.vp.strided.load",
  "llvm.experimental.vp.strided.store",
  "llvm.experimental.widenable.condition",
  "llvm.fabs",
  "llvm.floor",
  "llvm.fma",
  "llvm.fmuladd",
  "llvm.fptosi.sat",
  "llvm.fptoui.sat",
  "llvm.fptrunc.round",
  "llvm.frameaddress",
  "llvm.frexp",
  "llvm.fshl",
  "llvm.fshr",
  "llvm.gcread",
  "llvm.gcroot",
  "llvm.gcwrite",
  "llvm.get.active.lane.mask",
  "llvm.get.dynamic.area.offset",
  "llvm.get.fpenv",
  "llvm.get.rounding",
  "llvm.hwasan.check.memaccess",
  "llvm.hwasan.check.memaccess.shortgranules",
  "llvm.icall.branch.funnel",
  "llvm.init.trampoline",
  "llvm.instrprof.cover",
  "llvm.instrprof.increment",
  "llvm.instrprof.increment.step",
  "llvm.instrprof.timestamp",
  "llvm.instrprof.value.profile",
  "llvm.invariant.end",
  "llvm.invariant.start",
  "llvm.is.constant",
  "llvm.is.fpclass",
  "llvm.launder.invariant.group",
  "llvm.ldexp",
  "llvm.lifetime.end",
  "llvm.lifetime.start",
  "llvm.llrint",
  "llvm.llround",
  "llvm.load.relative",
  "llvm.localaddress",
  "llvm.localescape",
  "llvm.localrecover",
  "llvm.log",
  "llvm.log10",
  "llvm.log2",
  "llvm.loop.decrement",
  "llvm.loop.decrement.reg",
  "llvm.lrint",
  "llvm.lround",
  "llvm.masked.compressstore",
  "llvm.masked.expandload",
  "llvm.masked.gather",
  "llvm.masked.load",
  "llvm.masked.scatter",
  "llvm.masked.store",
  "llvm.matrix.column.major.load",
  "llvm.matrix.column.major.store",
  "llvm.matrix.multiply",
  "llvm.matrix.transpose",
  "llvm.maximum",
  "llvm.maxnum",
  "llvm.memcpy",
  "llvm.memcpy.element.unordered.atomic",
  "llvm.memcpy.inline",
  "llvm.memmove",
  "llvm.memmove.element.unordered.atomic",
  "llvm.memset",
  "llvm.memset.element.unordered.atomic",
  "llvm.memset.inline",
  "llvm.minimum",
  "llvm.minnum",
  "llvm.nearbyint",
  "llvm.objc.arc.annotation.bottomup.bbend",
  "llvm.objc.arc.annotation.bottomup.bbstart",
  "llvm.objc.arc.annotation.topdown.bbend",
  "llvm.objc.arc.annotation.topdown.bbstart",
  "llvm.objc.autorelease",
  "llvm.objc.autoreleasePoolPop",
  "llvm.objc.autoreleasePoolPush",
  "llvm.objc.autoreleaseReturnValue",
  "llvm.objc.clang.arc.noop.use",
  "llvm.objc.clang.arc.use",
  "llvm.objc.copyWeak",
  "llvm.objc.destroyWeak",
  "llvm.objc.initWeak",
  "llvm.objc.loadWeak",
  "llvm.objc.loadWeakRetained",
  "llvm.objc.moveWeak",
  "llvm.objc.release",
  "llvm.objc.retain",
  "llvm.objc.retain.autorelease",
  "llvm.objc.retainAutorelease",
  "llvm.objc.retainAutoreleaseReturnValue",
  "llvm.objc.retainAutoreleasedReturnValue",
  "llvm.objc.retainBlock",
  "llvm.objc.retainedObject",
  "llvm.objc.storeStrong",
  "llvm.objc.storeWeak",
  "llvm.objc.sync.enter",
  "llvm.objc.sync.exit",
  "llvm.objc.unretainedObject",
  "llvm.objc.unretainedPointer",
  "llvm.objc.unsafeClaimAutoreleasedReturnValue",
  "llvm.objectsize",
  "llvm.pcmarker",
  "llvm.pow",
  "llvm.powi",
  "llvm.prefetch",
  "llvm.preserve.array.access.index",
  "llvm.preserve.struct.access.index",
  "llvm.preserve.union.access.index",
  "llvm.pseudoprobe",
  "llvm.ptr.annotation",
  "llvm.ptrauth.auth",
  "llvm.ptrauth.blend",
  "llvm.ptrauth.resign",
  "llvm.ptrauth.sign",
  "llvm.ptrauth.sign.generic",
  "llvm.ptrauth.strip",
  "llvm.ptrmask",
  "llvm.public.type.test",
  "llvm.read_register",
  "llvm.read_volatile_register",
  "llvm.readcyclecounter",
  "llvm.reset.fpenv",
  "llvm.returnaddress",
  "llvm.rint",
  "llvm.round",
  "llvm.roundeven",
  "llvm.sadd.sat",
  "llvm.sadd.with.overflow",
  "llvm.sdiv.fix",
  "llvm.sdiv.fix.sat",
  "llvm.seh.scope.begin",
  "llvm.seh.scope.end",
  "llvm.seh.try.begin",
  "llvm.seh.try.end",
  "llvm.set.fpenv",
  "llvm.set.loop.iterations",
  "llvm.set.rounding",
  "llvm.sideeffect",
  "llvm.sin",
  "llvm.smax",
  "llvm.smin",
  "llvm.smul.fix",
  "llvm.smul.fix.sat",
  "llvm.smul.with.overflow",
  "llvm.sponentry",
  "llvm.sqrt",
  "llvm.ssa.copy",
  "llvm.sshl.sat",
  "llvm.ssub.sat",
  "llvm.ssub.with.overflow",
  "llvm.stackguard",
  "llvm.stackprotector",
  "llvm.stackrestore",
  "llvm.stacksave",
  "llvm.start.loop.iterations",
  "llvm.strip.invariant.group",
  "llvm.swift.async.context.addr",
  "llvm.test.set.loop.iterations",
  "llvm.test.start.loop.iterations",
  "llvm.thread.pointer",
  "llvm.threadlocal.address",
  "llvm.trap",
  "llvm.trunc",
  "llvm.type.checked.load",
  "llvm.type.checked.load.relative",
  "llvm.type.test",
  "llvm.uadd.sat",
  "llvm.uadd.with.overflow",
  "llvm.ubsantrap",
  "llvm.udiv.fix",
  "llvm.udiv.fix.sat",
  "llvm.umax",
  "llvm.umin",
  "llvm.umul.fix",
  "llvm.umul.fix.sat",
  "llvm.umul.with.overflow",
  "llvm.ushl.sat",
  "llvm.usub.sat",
  "llvm.usub.with.overflow",
  "llvm.va_copy",
  "llvm.va_end",
  "llvm.va_start",
  "llvm.var.annotation",
  "llvm.vector.extract",
  "llvm.vector.insert",
  "llvm.vector.reduce.add",
  "llvm.vector.reduce.and",
  "llvm.vector.reduce.fadd",
  "llvm.vector.reduce.fmax",
  "llvm.vector.reduce.fmaximum",
  "llvm.vector.reduce.fmin",
  "llvm.vector.reduce.fminimum",
  "llvm.vector.reduce.fmul",
  "llvm.vector.reduce.mul",
  "llvm.vector.reduce.or",
  "llvm.vector.reduce.smax",
  "llvm.vector.reduce.smin",
  "llvm.vector.reduce.umax",
  "llvm.vector.reduce.umin",
  "llvm.vector.reduce.xor",
  "llvm.vp.abs",
  "llvm.vp.add",
  "llvm.vp.and",
  "llvm.vp.ashr",
  "llvm.vp.bitreverse",
  "llvm.vp.bswap",
  "llvm.vp.ceil",
  "llvm.vp.copysign",
  "llvm.vp.ctlz",
  "llvm.vp.ctpop",
  "llvm.vp.cttz",
  "llvm.vp.fabs",
  "llvm.vp.fadd",
  "llvm.vp.fcmp",
  "llvm.vp.fdiv",
  "llvm.vp.floor",
  "llvm.vp.fma",
  "llvm.vp.fmul",
  "llvm.vp.fmuladd",
  "llvm.vp.fneg",
  "llvm.vp.fpext",
  "llvm.vp.fptosi",
  "llvm.vp.fptoui",
  "llvm.vp.fptrunc",
  "llvm.vp.frem",
  "llvm.vp.fshl",
  "llvm.vp.fshr",
  "llvm.vp.fsub",
  "llvm.vp.gather",
  "llvm.vp.icmp",
  "llvm.vp.inttoptr",
  "llvm.vp.load",
  "llvm.vp.lshr",
  "llvm.vp.maxnum",
  "llvm.vp.merge",
  "llvm.vp.minnum",
  "llvm.vp.mul",
  "llvm.vp.nearbyint",
  "llvm.vp.or",
  "llvm.vp.ptrtoint",
  "llvm.vp.reduce.add",
  "llvm.vp.reduce.and",
  "llvm.vp.reduce.fadd",
  "llvm.vp.reduce.fmax",
  "llvm.vp.reduce.fmin",
  "llvm.vp.reduce.fmul",
  "llvm.vp.reduce.mul",
  "llvm.vp.reduce.or",
  "llvm.vp.reduce.smax",
  "llvm.vp.reduce.smin",
  "llvm.vp.reduce.umax",
  "llvm.vp.reduce.umin",
  "llvm.vp.reduce.xor",
  "llvm.vp.rint",
  "llvm.vp.round",
  "llvm.vp.roundeven",
  "llvm.vp.roundtozero",
  "llvm.vp.scatter",
  "llvm.vp.sdiv",
  "llvm.vp.select",
  "llvm.vp.sext",
  "llvm.vp.shl",
  "llvm.vp.sitofp",
  "llvm.vp.smax",
  "llvm.vp.smin",
  "llvm.vp.sqrt",
  "llvm.vp.srem",
  "llvm.vp.store",
  "llvm.vp.sub",
  "llvm.vp.trunc",
  "llvm.vp.udiv",
  "llvm.vp.uitofp",
  "llvm.vp.umax",
  "llvm.vp.umin",
  "llvm.vp.urem",
  "llvm.vp.xor",
  "llvm.vp.zext",
  "llvm.vscale",
  "llvm.write_register",
  "llvm.xray.customevent",
  "llvm.xray.typedevent",
  "llvm.aarch64.addg",
  "llvm.aarch64.break",
  "llvm.aarch64.clrex",
  "llvm.aarch64.cls",
  "llvm.aarch64.cls64",
  "llvm.aarch64.crc32b",
  "llvm.aarch64.crc32cb",
  "llvm.aarch64.crc32ch",
  "llvm.aarch64.crc32cw",
  "llvm.aarch64.crc32cx",
  "llvm.aarch64.crc32h",
  "llvm.aarch64.crc32w",
  "llvm.aarch64.crc32x",
  "llvm.aarch64.crypto.aesd",
  "llvm.aarch64.crypto.aese",
  "llvm.aarch64.crypto.aesimc",
  "llvm.aarch64.crypto.aesmc",
  "llvm.aarch64.crypto.bcaxs",
  "llvm.aarch64.crypto.bcaxu",
  "llvm.aarch64.crypto.eor3s",
  "llvm.aarch64.crypto.eor3u",
  "llvm.aarch64.crypto.rax1",
  "llvm.aarch64.crypto.sha1c",
  "llvm.aarch64.crypto.sha1h",
  "llvm.aarch64.crypto.sha1m",
  "llvm.aarch64.crypto.sha1p",
  "llvm.aarch64.crypto.sha1su0",
  "llvm.aarch64.crypto.sha1su1",
  "llvm.aarch64.crypto.sha256h",
  "llvm.aarch64.crypto.sha256h2",
  "llvm.aarch64.crypto.sha256su0",
  "llvm.aarch64.crypto.sha256su1",
  "llvm.aarch64.crypto.sha512h",
  "llvm.aarch64.crypto.sha512h2",
  "llvm.aarch64.crypto.sha512su0",
  "llvm.aarch64.crypto.sha512su1",
  "llvm.aarch64.crypto.sm3partw1",
  "llvm.aarch64.crypto.sm3partw2",
  "llvm.aarch64.crypto.sm3ss1",
  "llvm.aarch64.crypto.sm3tt1a",
  "llvm.aarch64.crypto.sm3tt1b",
  "llvm.aarch64.crypto.sm3tt2a",
  "llvm.aarch64.crypto.sm3tt2b",
  "llvm.aarch64.crypto.sm4e",
  "llvm.aarch64.crypto.sm4ekey",
  "llvm.aarch64.crypto.xar",
  "llvm.aarch64.dmb",
  "llvm.aarch64.dsb",
  "llvm.aarch64.fjcvtzs",
  "llvm.aarch64.frint32x",
  "llvm.aarch64.frint32z",
  "llvm.aarch64.frint64x",
  "llvm.aarch64.frint64z",
  "llvm.aarch64.get.fpcr",
  "llvm.aarch64.gmi",
  "llvm.aarch64.hint",
  "llvm.aarch64.irg",
  "llvm.aarch64.irg.sp",
  "llvm.aarch64.isb",
  "llvm.aarch64.ld64b",
  "llvm.aarch64.ldaxp",
  "llvm.aarch64.ldaxr",
  "llvm.aarch64.ldg",
  "llvm.aarch64.ldxp",
  "llvm.aarch64.ldxr",
  "llvm.aarch64.mops.memset.tag",
  "llvm.aarch64.neon.abs",
  "llvm.aarch64.neon.addhn",
  "llvm.aarch64.neon.addp",
  "llvm.aarch64.neon.bfcvt",
  "llvm.aarch64.neon.bfcvtn",
  "llvm.aarch64.neon.bfcvtn2",
  "llvm.aarch64.neon.bfdot",
  "llvm.aarch64.neon.bfmlalb",
  "llvm.aarch64.neon.bfmlalt",
  "llvm.aarch64.neon.bfmmla",
  "llvm.aarch64.neon.cls",
  "llvm.aarch64.neon.fabd",
  "llvm.aarch64.neon.facge",
  "llvm.aarch64.neon.facgt",
  "llvm.aarch64.neon.faddp",
  "llvm.aarch64.neon.faddv",
  "llvm.aarch64.neon.fcvtas",
  "llvm.aarch64.neon.fcvtau",
  "llvm.aarch64.neon.fcvtms",
  "llvm.aarch64.neon.fcvtmu",
  "llvm.aarch64.neon.fcvtns",
  "llvm.aarch64.neon.fcvtnu",
  "llvm.aarch64.neon.fcvtps",
  "llvm.aarch64.neon.fcvtpu",
  "llvm.aarch64.neon.fcvtxn",
  "llvm.aarch64.neon.fcvtzs",
  "llvm.aarch64.neon.fcvtzu",
  "llvm.aarch64.neon.fmax",
  "llvm.aarch64.neon.fmaxnm",
  "llvm.aarch64.neon.fmaxnmp",
  "llvm.aarch64.neon.fmaxnmv",
  "llvm.aarch64.neon.fmaxp",
  "llvm.aarch64.neon.fmaxv",
  "llvm.aarch64.neon.fmin",
  "llvm.aarch64.neon.fminnm",
  "llvm.aarch64.neon.fminnmp",
  "llvm.aarch64.neon.fminnmv",
  "llvm.aarch64.neon.fminp",
  "llvm.aarch64.neon.fminv",
  "llvm.aarch64.neon.fmlal",
  "llvm.aarch64.neon.fmlal2",
  "llvm.aarch64.neon.fmlsl",
  "llvm.aarch64.neon.fmlsl2",
  "llvm.aarch64.neon.fmulx",
  "llvm.aarch64.neon.frecpe",
  "llvm.aarch64.neon.frecps",
  "llvm.aarch64.neon.frecpx",
  "llvm.aarch64.neon.frint32x",
  "llvm.aarch64.neon.frint32z",
  "llvm.aarch64.neon.frint64x",
  "llvm.aarch64.neon.frint64z",
  "llvm.aarch64.neon.frsqrte",
  "llvm.aarch64.neon.frsqrts",
  "llvm.aarch64.neon.ld1x2",
  "llvm.aarch64.neon.ld1x3",
  "llvm.aarch64.neon.ld1x4",
  "llvm.aarch64.neon.ld2",
  "llvm.aarch64.neon.ld2lane",
  "llvm.aarch64.neon.ld2r",
  "llvm.aarch64.neon.ld3",
  "llvm.aarch64.neon.ld3lane",
  "llvm.aarch64.neon.ld3r",
  "llvm.aarch64.neon.ld4",
  "llvm.aarch64.neon.ld4lane",
  "llvm.aarch64.neon.ld4r",
  "llvm.aarch64.neon.pmul",
  "llvm.aarch64.neon.pmull",
  "llvm.aarch64.neon.pmull64",
  "llvm.aarch64.neon.raddhn",
  "llvm.aarch64.neon.rshrn",
  "llvm.aarch64.neon.rsubhn",
  "llvm.aarch64.neon.sabd",
  "llvm.aarch64.neon.saddlp",
  "llvm.aarch64.neon.saddlv",
  "llvm.aarch64.neon.saddv",
  "llvm.aarch64.neon.scalar.sqxtn",
  "llvm.aarch64.neon.scalar.sqxtun",
  "llvm.aarch64.neon.scalar.uqxtn",
  "llvm.aarch64.neon.sdot",
  "llvm.aarch64.neon.shadd",
  "llvm.aarch64.neon.shll",
  "llvm.aarch64.neon.shsub",
  "llvm.aarch64.neon.smax",
  "llvm.aarch64.neon.smaxp",
  "llvm.aarch64.neon.smaxv",
  "llvm.aarch64.neon.smin",
  "llvm.aarch64.neon.sminp",
  "llvm.aarch64.neon.sminv",
  "llvm.aarch64.neon.smmla",
  "llvm.aarch64.neon.smull",
  "llvm.aarch64.neon.sqabs",
  "llvm.aarch64.neon.sqadd",
  "llvm.aarch64.neon.sqdmulh",
  "llvm.aarch64.neon.sqdmulh.lane",
  "llvm.aarch64.neon.sqdmulh.laneq",
  "llvm.aarch64.neon.sqdmull",
  "llvm.aarch64.neon.sqdmulls.scalar",
  "llvm.aarch64.neon.sqneg",
  "llvm.aarch64.neon.sqrdmlah",
  "llvm.aarch64.neon.sqrdmlsh",
  "llvm.aarch64.neon.sqrdmulh",
  "llvm.aarch64.neon.sqrdmulh.lane",
  "llvm.aarch64.neon.sqrdmulh.laneq",
  "llvm.aarch64.neon.sqrshl",
  "llvm.aarch64.neon.sqrshrn",
  "llvm.aarch64.neon.sqrshrun",
  "llvm.aarch64.neon.sqshl",
  "llvm.aarch64.neon.sqshlu",
  "llvm.aarch64.neon.sqshrn",
  "llvm.aarch64.neon.sqshrun",
  "llvm.aarch64.neon.sqsub",
  "llvm.aarch64.neon.sqxtn",
  "llvm.aarch64.neon.sqxtun",
  "llvm.aarch64.neon.srhadd",
  "llvm.aarch64.neon.srshl",
  "llvm.aarch64.neon.sshl",
  "llvm.aarch64.neon.sshll",
  "llvm.aarch64.neon.st1x2",
  "llvm.aarch64.neon.st1x3",
  "llvm.aarch64.neon.st1x4",
  "llvm.aarch64.neon.st2",
  "llvm.aarch64.neon.st2lane",
  "llvm.aarch64.neon.st3",
  "llvm.aarch64.neon.st3lane",
  "llvm.aarch64.neon.st4",
  "llvm.aarch64.neon.st4lane",
  "llvm.aarch64.neon.subhn",
  "llvm.aarch64.neon.suqadd",
  "llvm.aarch64.neon.tbl1",
  "llvm.aarch64.neon.tbl2",
  "llvm.aarch64.neon.tbl3",
  "llvm.aarch64.neon.tbl4",
  "llvm.aarch64.neon.tbx1",
  "llvm.aarch64.neon.tbx2",
  "llvm.aarch64.neon.tbx3",
  "llvm.aarch64.neon.tbx4",
  "llvm.aarch64.neon.uabd",
  "llvm.aarch64.neon.uaddlp",
  "llvm.aarch64.neon.uaddlv",
  "llvm.aarch64.neon.uaddv",
  "llvm.aarch64.neon.udot",
  "llvm.aarch64.neon.uhadd",
  "llvm.aarch64.neon.uhsub",
  "llvm.aarch64.neon.umax",
  "llvm.aarch64.neon.umaxp",
  "llvm.aarch64.neon.umaxv",
  "llvm.aarch64.neon.umin",
  "llvm.aarch64.neon.uminp",
  "llvm.aarch64.neon.uminv",
  "llvm.aarch64.neon.ummla",
  "llvm.aarch64.neon.umull",
  "llvm.aarch64.neon.uqadd",
  "llvm.aarch64.neon.uqrshl",
  "llvm.aarch64.neon.uqrshrn",
  "llvm.aarch64.neon.uqshl",
  "llvm.aarch64.neon.uqshrn",
  "llvm.aarch64.neon.uqsub",
  "llvm.aarch64.neon.uqxtn",
  "llvm.aarch64.neon.urecpe",
  "llvm.aarch64.neon.urhadd",
  "llvm.aarch64.neon.urshl",
  "llvm.aarch64.neon.ursqrte",
  "llvm.aarch64.neon.usdot",
  "llvm.aarch64.neon.ushl",
  "llvm.aarch64.neon.ushll",
  "llvm.aarch64.neon.usmmla",
  "llvm.aarch64.neon.usqadd",
  "llvm.aarch64.neon.vcadd.rot270",
  "llvm.aarch64.neon.vcadd.rot90",
  "llvm.aarch64.neon.vcmla.rot0",
  "llvm.aarch64.neon.vcmla.rot180",
  "llvm.aarch64.neon.vcmla.rot270",
  "llvm.aarch64.neon.vcmla.rot90",
  "llvm.aarch64.neon.vcopy.lane",
  "llvm.aarch64.neon.vcvtfp2fxs",
  "llvm.aarch64.neon.vcvtfp2fxu",
  "llvm.aarch64.neon.vcvtfp2hf",
  "llvm.aarch64.neon.vcvtfxs2fp",
  "llvm.aarch64.neon.vcvtfxu2fp",
  "llvm.aarch64.neon.vcvthf2fp",
  "llvm.aarch64.neon.vsli",
  "llvm.aarch64.neon.vsri",
  "llvm.aarch64.prefetch",
  "llvm.aarch64.rndr",
  "llvm.aarch64.rndrrs",
  "llvm.aarch64.sdiv",
  "llvm.aarch64.set.fpcr",
  "llvm.aarch64.settag",
  "llvm.aarch64.settag.zero",
  "llvm.aarch64.sisd.fabd",
  "llvm.aarch64.sisd.fcvtxn",
  "llvm.aarch64.sme.add.write.single.za.vg1x2",
  "llvm.aarch64.sme.add.write.single.za.vg1x4",
  "llvm.aarch64.sme.add.write.za.vg1x2",
  "llvm.aarch64.sme.add.write.za.vg1x4",
  "llvm.aarch64.sme.add.za32.vg1x2",
  "llvm.aarch64.sme.add.za32.vg1x4",
  "llvm.aarch64.sme.add.za64.vg1x2",
  "llvm.aarch64.sme.add.za64.vg1x4",
  "llvm.aarch64.sme.addha",
  "llvm.aarch64.sme.addva",
  "llvm.aarch64.sme.bmopa.za32",
  "llvm.aarch64.sme.bmops.za32",
  "llvm.aarch64.sme.cntsb",
  "llvm.aarch64.sme.cntsd",
  "llvm.aarch64.sme.cntsh",
  "llvm.aarch64.sme.cntsw",
  "llvm.aarch64.sme.fdot.lane.za32.vg1x2",
  "llvm.aarch64.sme.fdot.lane.za32.vg1x4",
  "llvm.aarch64.sme.fdot.single.za32.vg1x2",
  "llvm.aarch64.sme.fdot.single.za32.vg1x4",
  "llvm.aarch64.sme.fdot.za32.vg1x2",
  "llvm.aarch64.sme.fdot.za32.vg1x4",
  "llvm.aarch64.sme.fmla.lane.vg1x2",
  "llvm.aarch64.sme.fmla.lane.vg1x4",
  "llvm.aarch64.sme.fmla.single.vg1x2",
  "llvm.aarch64.sme.fmla.single.vg1x4",
  "llvm.aarch64.sme.fmla.vg1x2",
  "llvm.aarch64.sme.fmla.vg1x4",
  "llvm.aarch64.sme.fmlal.lane.vg2x1",
  "llvm.aarch64.sme.fmlal.lane.vg2x2",
  "llvm.aarch64.sme.fmlal.lane.vg2x4",
  "llvm.aarch64.sme.fmlal.single.vg2x1",
  "llvm.aarch64.sme.fmlal.single.vg2x2",
  "llvm.aarch64.sme.fmlal.single.vg2x4",
  "llvm.aarch64.sme.fmlal.vg2x2",
  "llvm.aarch64.sme.fmlal.vg2x4",
  "llvm.aarch64.sme.fmls.lane.vg1x2",
  "llvm.aarch64.sme.fmls.lane.vg1x4",
  "llvm.aarch64.sme.fmls.single.vg1x2",
  "llvm.aarch64.sme.fmls.single.vg1x4",
  "llvm.aarch64.sme.fmls.vg1x2",
  "llvm.aarch64.sme.fmls.vg1x4",
  "llvm.aarch64.sme.fmlsl.lane.vg2x1",
  "llvm.aarch64.sme.fmlsl.lane.vg2x2",
  "llvm.aarch64.sme.fmlsl.lane.vg2x4",
  "llvm.aarch64.sme.fmlsl.single.vg2x1",
  "llvm.aarch64.sme.fmlsl.single.vg2x2",
  "llvm.aarch64.sme.fmlsl.single.vg2x4",
  "llvm.aarch64.sme.fmlsl.vg2x2",
  "llvm.aarch64.sme.fmlsl.vg2x4",
  "llvm.aarch64.sme.fvdot.lane.za32.vg1x2",
  "llvm.aarch64.sme.get.tpidr2",
  "llvm.aarch64.sme.ld1b.horiz",
  "llvm.aarch64.sme.ld1b.vert",
  "llvm.aarch64.sme.ld1d.horiz",
  "llvm.aarch64.sme.ld1d.vert",
  "llvm.aarch64.sme.ld1h.horiz",
  "llvm.aarch64.sme.ld1h.vert",
  "llvm.aarch64.sme.ld1q.horiz",
  "llvm.aarch64.sme.ld1q.vert",
  "llvm.aarch64.sme.ld1w.horiz",
  "llvm.aarch64.sme.ld1w.vert",
  "llvm.aarch64.sme.ldr",
  "llvm.aarch64.sme.mopa",
  "llvm.aarch64.sme.mopa.wide",
  "llvm.aarch64.sme.mops",
  "llvm.aarch64.sme.mops.wide",
  "llvm.aarch64.sme.read.hor.vg2",
  "llvm.aarch64.sme.read.hor.vg4",
  "llvm.aarch64.sme.read.horiz",
  "llvm.aarch64.sme.read.ver.vg2",
  "llvm.aarch64.sme.read.ver.vg4",
  "llvm.aarch64.sme.read.vert",
  "llvm.aarch64.sme.read.vg1x2",
  "llvm.aarch64.sme.read.vg1x4",
  "llvm.aarch64.sme.readq.horiz",
  "llvm.aarch64.sme.readq.vert",
  "llvm.aarch64.sme.sdot.lane.za32.vg1x2",
  "llvm.aarch64.sme.sdot.lane.za32.vg1x4",
  "llvm.aarch64.sme.sdot.lane.za64.vg1x2",
  "llvm.aarch64.sme.sdot.lane.za64.vg1x4",
  "llvm.aarch64.sme.sdot.single.za32.vg1x2",
  "llvm.aarch64.sme.sdot.single.za32.vg1x4",
  "llvm.aarch64.sme.sdot.single.za64.vg1x2",
  "llvm.aarch64.sme.sdot.single.za64.vg1x4",
  "llvm.aarch64.sme.sdot.za32.vg1x2",
  "llvm.aarch64.sme.sdot.za32.vg1x4",
  "llvm.aarch64.sme.sdot.za64.vg1x2",
  "llvm.aarch64.sme.sdot.za64.vg1x4",
  "llvm.aarch64.sme.set.tpidr2",
  "llvm.aarch64.sme.smla.za32.lane.vg4x1",
  "llvm.aarch64.sme.smla.za32.lane.vg4x2",
  "llvm.aarch64.sme.smla.za32.lane.vg4x4",
  "llvm.aarch64.sme.smla.za32.single.vg4x1",
  "llvm.aarch64.sme.smla.za32.single.vg4x2",
  "llvm.aarch64.sme.smla.za32.single.vg4x4",
  "llvm.aarch64.sme.smla.za32.vg4x2",
  "llvm.aarch64.sme.smla.za32.vg4x4",
  "llvm.aarch64.sme.smla.za64.lane.vg4x1",
  "llvm.aarch64.sme.smla.za64.lane.vg4x2",
  "llvm.aarch64.sme.smla.za64.lane.vg4x4",
  "llvm.aarch64.sme.smla.za64.single.vg4x1",
  "llvm.aarch64.sme.smla.za64.single.vg4x2",
  "llvm.aarch64.sme.smla.za64.single.vg4x4",
  "llvm.aarch64.sme.smla.za64.vg4x2",
  "llvm.aarch64.sme.smla.za64.vg4x4",
  "llvm.aarch64.sme.smlal.lane.vg2x1",
  "llvm.aarch64.sme.smlal.lane.vg2x2",
  "llvm.aarch64.sme.smlal.lane.vg2x4",
  "llvm.aarch64.sme.smlal.single.vg2x1",
  "llvm.aarch64.sme.smlal.single.vg2x2",
  "llvm.aarch64.sme.smlal.single.vg2x4",
  "llvm.aarch64.sme.smlal.vg2x2",
  "llvm.aarch64.sme.smlal.vg2x4",
  "llvm.aarch64.sme.smls.za32.lane.vg4x1",
  "llvm.aarch64.sme.smls.za32.lane.vg4x2",
  "llvm.aarch64.sme.smls.za32.lane.vg4x4",
  "llvm.aarch64.sme.smls.za32.single.vg4x1",
  "llvm.aarch64.sme.smls.za32.single.vg4x2",
  "llvm.aarch64.sme.smls.za32.single.vg4x4",
  "llvm.aarch64.sme.smls.za32.vg4x2",
  "llvm.aarch64.sme.smls.za32.vg4x4",
  "llvm.aarch64.sme.smls.za64.lane.vg4x1",
  "llvm.aarch64.sme.smls.za64.lane.vg4x2",
  "llvm.aarch64.sme.smls.za64.lane.vg4x4",
  "llvm.aarch64.sme.smls.za64.single.vg4x1",
  "llvm.aarch64.sme.smls.za64.single.vg4x2",
  "llvm.aarch64.sme.smls.za64.single.vg4x4",
  "llvm.aarch64.sme.smls.za64.vg4x2",
  "llvm.aarch64.sme.smls.za64.vg4x4",
  "llvm.aarch64.sme.smlsl.lane.vg2x1",
  "llvm.aarch64.sme.smlsl.lane.vg2x2",
  "llvm.aarch64.sme.smlsl.lane.vg2x4",
  "llvm.aarch64.sme.smlsl.single.vg2x1",
  "llvm.aarch64.sme.smlsl.single.vg2x2",
  "llvm.aarch64.sme.smlsl.single.vg2x4",
  "llvm.aarch64.sme.smlsl.vg2x2",
  "llvm.aarch64.sme.smlsl.vg2x4",
  "llvm.aarch64.sme.smopa.wide",
  "llvm.aarch64.sme.smopa.za32",
  "llvm.aarch64.sme.smops.wide",
  "llvm.aarch64.sme.smops.za32",
  "llvm.aarch64.sme.st1b.horiz",
  "llvm.aarch64.sme.st1b.vert",
  "llvm.aarch64.sme.st1d.horiz",
  "llvm.aarch64.sme.st1d.vert",
  "llvm.aarch64.sme.st1h.horiz",
  "llvm.aarch64.sme.st1h.vert",
  "llvm.aarch64.sme.st1q.horiz",
  "llvm.aarch64.sme.st1q.vert",
  "llvm.aarch64.sme.st1w.horiz",
  "llvm.aarch64.sme.st1w.vert",
  "llvm.aarch64.sme.str",
  "llvm.aarch64.sme.sub.write.single.za.vg1x2",
  "llvm.aarch64.sme.sub.write.single.za.vg1x4",
  "llvm.aarch64.sme.sub.write.za.vg1x2",
  "llvm.aarch64.sme.sub.write.za.vg1x4",
  "llvm.aarch64.sme.sub.za32.vg1x2",
  "llvm.aarch64.sme.sub.za32.vg1x4",
  "llvm.aarch64.sme.sub.za64.vg1x2",
  "llvm.aarch64.sme.sub.za64.vg1x4",
  "llvm.aarch64.sme.sudot.lane.za32.vg1x2",
  "llvm.aarch64.sme.sudot.lane.za32.vg1x4",
  "llvm.aarch64.sme.sudot.single.za32.vg1x2",
  "llvm.aarch64.sme.sudot.single.za32.vg1x4",
  "llvm.aarch64.sme.sumla.za32.lane.vg4x1",
  "llvm.aarch64.sme.sumla.za32.lane.vg4x2",
  "llvm.aarch64.sme.sumla.za32.lane.vg4x4",
  "llvm.aarch64.sme.sumla.za32.single.vg4x2",
  "llvm.aarch64.sme.sumla.za32.single.vg4x4",
  "llvm.aarch64.sme.sumopa.wide",
  "llvm.aarch64.sme.sumops.wide",
  "llvm.aarch64.sme.suvdot.lane.za32.vg1x4",
  "llvm.aarch64.sme.svdot.lane.za32.vg1x2",
  "llvm.aarch64.sme.svdot.lane.za32.vg1x4",
  "llvm.aarch64.sme.svdot.lane.za64.vg1x4",
  "llvm.aarch64.sme.udot.lane.za32.vg1x2",
  "llvm.aarch64.sme.udot.lane.za32.vg1x4",
  "llvm.aarch64.sme.udot.lane.za64.vg1x2",
  "llvm.aarch64.sme.udot.lane.za64.vg1x4",
  "llvm.aarch64.sme.udot.single.za32.vg1x2",
  "llvm.aarch64.sme.udot.single.za32.vg1x4",
  "llvm.aarch64.sme.udot.single.za64.vg1x2",
  "llvm.aarch64.sme.udot.single.za64.vg1x4",
  "llvm.aarch64.sme.udot.za32.vg1x2",
  "llvm.aarch64.sme.udot.za32.vg1x4",
  "llvm.aarch64.sme.udot.za64.vg1x2",
  "llvm.aarch64.sme.udot.za64.vg1x4",
  "llvm.aarch64.sme.umla.za32.lane.vg4x1",
  "llvm.aarch64.sme.umla.za32.lane.vg4x2",
  "llvm.aarch64.sme.umla.za32.lane.vg4x4",
  "llvm.aarch64.sme.umla.za32.single.vg4x1",
  "llvm.aarch64.sme.umla.za32.single.vg4x2",
  "llvm.aarch64.sme.umla.za32.single.vg4x4",
  "llvm.aarch64.sme.umla.za32.vg4x2",
  "llvm.aarch64.sme.umla.za32.vg4x4",
  "llvm.aarch64.sme.umla.za64.lane.vg4x1",
  "llvm.aarch64.sme.umla.za64.lane.vg4x2",
  "llvm.aarch64.sme.umla.za64.lane.vg4x4",
  "llvm.aarch64.sme.umla.za64.single.vg4x1",
  "llvm.aarch64.sme.umla.za64.single.vg4x2",
  "llvm.aarch64.sme.umla.za64.single.vg4x4",
  "llvm.aarch64.sme.umla.za64.vg4x2",
  "llvm.aarch64.sme.umla.za64.vg4x4",
  "llvm.aarch64.sme.umlal.lane.vg2x1",
  "llvm.aarch64.sme.umlal.lane.vg2x2",
  "llvm.aarch64.sme.umlal.lane.vg2x4",
  "llvm.aarch64.sme.umlal.single.vg2x1",
  "llvm.aarch64.sme.umlal.single.vg2x2",
  "llvm.aarch64.sme.umlal.single.vg2x4",
  "llvm.aarch64.sme.umlal.vg2x2",
  "llvm.aarch64.sme.umlal.vg2x4",
  "llvm.aarch64.sme.umls.za32.lane.vg4x1",
  "llvm.aarch64.sme.umls.za32.lane.vg4x2",
  "llvm.aarch64.sme.umls.za32.lane.vg4x4",
  "llvm.aarch64.sme.umls.za32.single.vg4x1",
  "llvm.aarch64.sme.umls.za32.single.vg4x2",
  "llvm.aarch64.sme.umls.za32.single.vg4x4",
  "llvm.aarch64.sme.umls.za32.vg4x2",
  "llvm.aarch64.sme.umls.za32.vg4x4",
  "llvm.aarch64.sme.umls.za64.lane.vg4x1",
  "llvm.aarch64.sme.umls.za64.lane.vg4x2",
  "llvm.aarch64.sme.umls.za64.lane.vg4x4",
  "llvm.aarch64.sme.umls.za64.single.vg4x1",
  "llvm.aarch64.sme.umls.za64.single.vg4x2",
  "llvm.aarch64.sme.umls.za64.single.vg4x4",
  "llvm.aarch64.sme.umls.za64.vg4x2",
  "llvm.aarch64.sme.umls.za64.vg4x4",
  "llvm.aarch64.sme.umlsl.lane.vg2x1",
  "llvm.aarch64.sme.umlsl.lane.vg2x2",
  "llvm.aarch64.sme.umlsl.lane.vg2x4",
  "llvm.aarch64.sme.umlsl.single.vg2x1",
  "llvm.aarch64.sme.umlsl.single.vg2x2",
  "llvm.aarch64.sme.umlsl.single.vg2x4",
  "llvm.aarch64.sme.umlsl.vg2x2",
  "llvm.aarch64.sme.umlsl.vg2x4",
  "llvm.aarch64.sme.umopa.wide",
  "llvm.aarch64.sme.umopa.za32",
  "llvm.aarch64.sme.umops.wide",
  "llvm.aarch64.sme.umops.za32",
  "llvm.aarch64.sme.usdot.lane.za32.vg1x2",
  "llvm.aarch64.sme.usdot.lane.za32.vg1x4",
  "llvm.aarch64.sme.usdot.single.za32.vg1x2",
  "llvm.aarch64.sme.usdot.single.za32.vg1x4",
  "llvm.aarch64.sme.usdot.za32.vg1x2",
  "llvm.aarch64.sme.usdot.za32.vg1x4",
  "llvm.aarch64.sme.usmla.za32.lane.vg4x1",
  "llvm.aarch64.sme.usmla.za32.lane.vg4x2",
  "llvm.aarch64.sme.usmla.za32.lane.vg4x4",
  "llvm.aarch64.sme.usmla.za32.single.vg4x1",
  "llvm.aarch64.sme.usmla.za32.single.vg4x2",
  "llvm.aarch64.sme.usmla.za32.single.vg4x4",
  "llvm.aarch64.sme.usmla.za32.vg4x2",
  "llvm.aarch64.sme.usmla.za32.vg4x4",
  "llvm.aarch64.sme.usmopa.wide",
  "llvm.aarch64.sme.usmops.wide",
  "llvm.aarch64.sme.usvdot.lane.za32.vg1x4",
  "llvm.aarch64.sme.uvdot.lane.za32.vg1x2",
  "llvm.aarch64.sme.uvdot.lane.za32.vg1x4",
  "llvm.aarch64.sme.uvdot.lane.za64.vg1x4",
  "llvm.aarch64.sme.write.hor.vg2",
  "llvm.aarch64.sme.write.hor.vg4",
  "llvm.aarch64.sme.write.horiz",
  "llvm.aarch64.sme.write.ver.vg2",
  "llvm.aarch64.sme.write.ver.vg4",
  "llvm.aarch64.sme.write.vert",
  "llvm.aarch64.sme.write.vg1x2",
  "llvm.aarch64.sme.write.vg1x4",
  "llvm.aarch64.sme.writeq.horiz",
  "llvm.aarch64.sme.writeq.vert",
  "llvm.aarch64.sme.za.disable",
  "llvm.aarch64.sme.za.enable",
  "llvm.aarch64.sme.zero",
  "llvm.aarch64.space",
  "llvm.aarch64.st64b",
  "llvm.aarch64.st64bv",
  "llvm.aarch64.st64bv0",
  "llvm.aarch64.stg",
  "llvm.aarch64.stgp",
  "llvm.aarch64.stlxp",
  "llvm.aarch64.stlxr",
  "llvm.aarch64.stxp",
  "llvm.aarch64.stxr",
  "llvm.aarch64.subp",
  "llvm.aarch64.sve.abs",
  "llvm.aarch64.sve.adclb",
  "llvm.aarch64.sve.adclt",
  "llvm.aarch64.sve.add",
  "llvm.aarch64.sve.add.single.x2",
  "llvm.aarch64.sve.add.single.x4",
  "llvm.aarch64.sve.add.u",
  "llvm.aarch64.sve.addhnb",
  "llvm.aarch64.sve.addhnt",
  "llvm.aarch64.sve.addp",
  "llvm.aarch64.sve.adrb",
  "llvm.aarch64.sve.adrd",
  "llvm.aarch64.sve.adrh",
  "llvm.aarch64.sve.adrw",
  "llvm.aarch64.sve.aesd",
  "llvm.aarch64.sve.aese",
  "llvm.aarch64.sve.aesimc",
  "llvm.aarch64.sve.aesmc",
  "llvm.aarch64.sve.and",
  "llvm.aarch64.sve.and.u",
  "llvm.aarch64.sve.and.z",
  "llvm.aarch64.sve.andv",
  "llvm.aarch64.sve.asr",
  "llvm.aarch64.sve.asr.u",
  "llvm.aarch64.sve.asr.wide",
  "llvm.aarch64.sve.asrd",
  "llvm.aarch64.sve.bcax",
  "llvm.aarch64.sve.bdep.x",
  "llvm.aarch64.sve.bext.x",
  "llvm.aarch64.sve.bfcvt.x2",
  "llvm.aarch64.sve.bfcvtn.x2",
  "llvm.aarch64.sve.bfdot",
  "llvm.aarch64.sve.bfdot.lane.v2",
  "llvm.aarch64.sve.bfmlalb",
  "llvm.aarch64.sve.bfmlalb.lane.v2",
  "llvm.aarch64.sve.bfmlalt",
  "llvm.aarch64.sve.bfmlalt.lane.v2",
  "llvm.aarch64.sve.bfmmla",
  "llvm.aarch64.sve.bgrp.x",
  "llvm.aarch64.sve.bic",
  "llvm.aarch64.sve.bic.u",
  "llvm.aarch64.sve.bic.z",
  "llvm.aarch64.sve.brka",
  "llvm.aarch64.sve.brka.z",
  "llvm.aarch64.sve.brkb",
  "llvm.aarch64.sve.brkb.z",
  "llvm.aarch64.sve.brkn.z",
  "llvm.aarch64.sve.brkpa.z",
  "llvm.aarch64.sve.brkpb.z",
  "llvm.aarch64.sve.bsl",
  "llvm.aarch64.sve.bsl1n",
  "llvm.aarch64.sve.bsl2n",
  "llvm.aarch64.sve.cadd.x",
  "llvm.aarch64.sve.cdot",
  "llvm.aarch64.sve.cdot.lane",
  "llvm.aarch64.sve.clasta",
  "llvm.aarch64.sve.clasta.n",
  "llvm.aarch64.sve.clastb",
  "llvm.aarch64.sve.clastb.n",
  "llvm.aarch64.sve.cls",
  "llvm.aarch64.sve.clz",
  "llvm.aarch64.sve.cmla.lane.x",
  "llvm.aarch64.sve.cmla.x",
  "llvm.aarch64.sve.cmpeq",
  "llvm.aarch64.sve.cmpeq.wide",
  "llvm.aarch64.sve.cmpge",
  "llvm.aarch64.sve.cmpge.wide",
  "llvm.aarch64.sve.cmpgt",
  "llvm.aarch64.sve.cmpgt.wide",
  "llvm.aarch64.sve.cmphi",
  "llvm.aarch64.sve.cmphi.wide",
  "llvm.aarch64.sve.cmphs",
  "llvm.aarch64.sve.cmphs.wide",
  "llvm.aarch64.sve.cmple.wide",
  "llvm.aarch64.sve.cmplo.wide",
  "llvm.aarch64.sve.cmpls.wide",
  "llvm.aarch64.sve.cmplt.wide",
  "llvm.aarch64.sve.cmpne",
  "llvm.aarch64.sve.cmpne.wide",
  "llvm.aarch64.sve.cnot",
  "llvm.aarch64.sve.cnt",
  "llvm.aarch64.sve.cntb",
  "llvm.aarch64.sve.cntd",
  "llvm.aarch64.sve.cnth",
  "llvm.aarch64.sve.cntp",
  "llvm.aarch64.sve.cntp.c16",
  "llvm.aarch64.sve.cntp.c32",
  "llvm.aarch64.sve.cntp.c64",
  "llvm.aarch64.sve.cntp.c8",
  "llvm.aarch64.sve.cntw",
  "llvm.aarch64.sve.compact",
  "llvm.aarch64.sve.convert.from.svbool",
  "llvm.aarch64.sve.convert.to.svbool",
  "llvm.aarch64.sve.dup",
  "llvm.aarch64.sve.dup.x",
  "llvm.aarch64.sve.dupq.lane",
  "llvm.aarch64.sve.eor",
  "llvm.aarch64.sve.eor.u",
  "llvm.aarch64.sve.eor.z",
  "llvm.aarch64.sve.eor3",
  "llvm.aarch64.sve.eorbt",
  "llvm.aarch64.sve.eortb",
  "llvm.aarch64.sve.eorv",
  "llvm.aarch64.sve.ext",
  "llvm.aarch64.sve.fabd",
  "llvm.aarch64.sve.fabd.u",
  "llvm.aarch64.sve.fabs",
  "llvm.aarch64.sve.facge",
  "llvm.aarch64.sve.facgt",
  "llvm.aarch64.sve.fadd",
  "llvm.aarch64.sve.fadd.u",
  "llvm.aarch64.sve.fadda",
  "llvm.aarch64.sve.faddp",
  "llvm.aarch64.sve.faddv",
  "llvm.aarch64.sve.fcadd",
  "llvm.aarch64.sve.fclamp",
  "llvm.aarch64.sve.fclamp.single.x2",
  "llvm.aarch64.sve.fclamp.single.x4",
  "llvm.aarch64.sve.fcmla",
  "llvm.aarch64.sve.fcmla.lane",
  "llvm.aarch64.sve.fcmpeq",
  "llvm.aarch64.sve.fcmpge",
  "llvm.aarch64.sve.fcmpgt",
  "llvm.aarch64.sve.fcmpne",
  "llvm.aarch64.sve.fcmpuo",
  "llvm.aarch64.sve.fcvt",
  "llvm.aarch64.sve.fcvt.bf16f32",
  "llvm.aarch64.sve.fcvt.f16f32",
  "llvm.aarch64.sve.fcvt.f16f64",
  "llvm.aarch64.sve.fcvt.f32f16",
  "llvm.aarch64.sve.fcvt.f32f64",
  "llvm.aarch64.sve.fcvt.f64f16",
  "llvm.aarch64.sve.fcvt.f64f32",
  "llvm.aarch64.sve.fcvt.x2",
  "llvm.aarch64.sve.fcvtlt.f32f16",
  "llvm.aarch64.sve.fcvtlt.f64f32",
  "llvm.aarch64.sve.fcvtn.x2",
  "llvm.aarch64.sve.fcvtnt.bf16f32",
  "llvm.aarch64.sve.fcvtnt.f16f32",
  "llvm.aarch64.sve.fcvtnt.f32f64",
  "llvm.aarch64.sve.fcvts.x2",
  "llvm.aarch64.sve.fcvts.x4",
  "llvm.aarch64.sve.fcvtu.x2",
  "llvm.aarch64.sve.fcvtu.x4",
  "llvm.aarch64.sve.fcvtx.f32f64",
  "llvm.aarch64.sve.fcvtxnt.f32f64",
  "llvm.aarch64.sve.fcvtzs",
  "llvm.aarch64.sve.fcvtzs.i32f16",
  "llvm.aarch64.sve.fcvtzs.i32f64",
  "llvm.aarch64.sve.fcvtzs.i64f16",
  "llvm.aarch64.sve.fcvtzs.i64f32",
  "llvm.aarch64.sve.fcvtzu",
  "llvm.aarch64.sve.fcvtzu.i32f16",
  "llvm.aarch64.sve.fcvtzu.i32f64",
  "llvm.aarch64.sve.fcvtzu.i64f16",
  "llvm.aarch64.sve.fcvtzu.i64f32",
  "llvm.aarch64.sve.fdiv",
  "llvm.aarch64.sve.fdiv.u",
  "llvm.aarch64.sve.fdivr",
  "llvm.aarch64.sve.fdot.lane.x2",
  "llvm.aarch64.sve.fdot.x2",
  "llvm.aarch64.sve.fexpa.x",
  "llvm.aarch64.sve.flogb",
  "llvm.aarch64.sve.fmad",
  "llvm.aarch64.sve.fmax",
  "llvm.aarch64.sve.fmax.single.x2",
  "llvm.aarch64.sve.fmax.single.x4",
  "llvm.aarch64.sve.fmax.u",
  "llvm.aarch64.sve.fmax.x2",
  "llvm.aarch64.sve.fmax.x4",
  "llvm.aarch64.sve.fmaxnm",
  "llvm.aarch64.sve.fmaxnm.single.x2",
  "llvm.aarch64.sve.fmaxnm.single.x4",
  "llvm.aarch64.sve.fmaxnm.u",
  "llvm.aarch64.sve.fmaxnm.x2",
  "llvm.aarch64.sve.fmaxnm.x4",
  "llvm.aarch64.sve.fmaxnmp",
  "llvm.aarch64.sve.fmaxnmv",
  "llvm.aarch64.sve.fmaxp",
  "llvm.aarch64.sve.fmaxv",
  "llvm.aarch64.sve.fmin",
  "llvm.aarch64.sve.fmin.single.x2",
  "llvm.aarch64.sve.fmin.single.x4",
  "llvm.aarch64.sve.fmin.u",
  "llvm.aarch64.sve.fmin.x2",
  "llvm.aarch64.sve.fmin.x4",
  "llvm.aarch64.sve.fminnm",
  "llvm.aarch64.sve.fminnm.single.x2",
  "llvm.aarch64.sve.fminnm.single.x4",
  "llvm.aarch64.sve.fminnm.u",
  "llvm.aarch64.sve.fminnm.x2",
  "llvm.aarch64.sve.fminnm.x4",
  "llvm.aarch64.sve.fminnmp",
  "llvm.aarch64.sve.fminnmv",
  "llvm.aarch64.sve.fminp",
  "llvm.aarch64.sve.fminv",
  "llvm.aarch64.sve.fmla",
  "llvm.aarch64.sve.fmla.lane",
  "llvm.aarch64.sve.fmla.u",
  "llvm.aarch64.sve.fmlalb",
  "llvm.aarch64.sve.fmlalb.lane",
  "llvm.aarch64.sve.fmlalt",
  "llvm.aarch64.sve.fmlalt.lane",
  "llvm.aarch64.sve.fmls",
  "llvm.aarch64.sve.fmls.lane",
  "llvm.aarch64.sve.fmls.u",
  "llvm.aarch64.sve.fmlslb",
  "llvm.aarch64.sve.fmlslb.lane",
  "llvm.aarch64.sve.fmlslt",
  "llvm.aarch64.sve.fmlslt.lane",
  "llvm.aarch64.sve.fmmla",
  "llvm.aarch64.sve.fmsb",
  "llvm.aarch64.sve.fmul",
  "llvm.aarch64.sve.fmul.lane",
  "llvm.aarch64.sve.fmul.u",
  "llvm.aarch64.sve.fmulx",
  "llvm.aarch64.sve.fmulx.u",
  "llvm.aarch64.sve.fneg",
  "llvm.aarch64.sve.fnmad",
  "llvm.aarch64.sve.fnmla",
  "llvm.aarch64.sve.fnmla.u",
  "llvm.aarch64.sve.fnmls",
  "llvm.aarch64.sve.fnmls.u",
  "llvm.aarch64.sve.fnmsb",
  "llvm.aarch64.sve.frecpe.x",
  "llvm.aarch64.sve.frecps.x",
  "llvm.aarch64.sve.frecpx",
  "llvm.aarch64.sve.frinta",
  "llvm.aarch64.sve.frinta.x2",
  "llvm.aarch64.sve.frinta.x4",
  "llvm.aarch64.sve.frinti",
  "llvm.aarch64.sve.frintm",
  "llvm.aarch64.sve.frintm.x2",
  "llvm.aarch64.sve.frintm.x4",
  "llvm.aarch64.sve.frintn",
  "llvm.aarch64.sve.frintn.x2",
  "llvm.aarch64.sve.frintn.x4",
  "llvm.aarch64.sve.frintp",
  "llvm.aarch64.sve.frintp.x2",
  "llvm.aarch64.sve.frintp.x4",
  "llvm.aarch64.sve.frintx",
  "llvm.aarch64.sve.frintz",
  "llvm.aarch64.sve.frsqrte.x",
  "llvm.aarch64.sve.frsqrts.x",
  "llvm.aarch64.sve.fscale",
  "llvm.aarch64.sve.fsqrt",
  "llvm.aarch64.sve.fsub",
  "llvm.aarch64.sve.fsub.u",
  "llvm.aarch64.sve.fsubr",
  "llvm.aarch64.sve.ftmad.x",
  "llvm.aarch64.sve.ftsmul.x",
  "llvm.aarch64.sve.ftssel.x",
  "llvm.aarch64.sve.histcnt",
  "llvm.aarch64.sve.histseg",
  "llvm.aarch64.sve.index",
  "llvm.aarch64.sve.insr",
  "llvm.aarch64.sve.lasta",
  "llvm.aarch64.sve.lastb",
  "llvm.aarch64.sve.ld1",
  "llvm.aarch64.sve.ld1.gather",
  "llvm.aarch64.sve.ld1.gather.index",
  "llvm.aarch64.sve.ld1.gather.scalar.offset",
  "llvm.aarch64.sve.ld1.gather.sxtw",
  "llvm.aarch64.sve.ld1.gather.sxtw.index",
  "llvm.aarch64.sve.ld1.gather.uxtw",
  "llvm.aarch64.sve.ld1.gather.uxtw.index",
  "llvm.aarch64.sve.ld1.pn.x2",
  "llvm.aarch64.sve.ld1.pn.x4",
  "llvm.aarch64.sve.ld1ro",
  "llvm.aarch64.sve.ld1rq",
  "llvm.aarch64.sve.ld2.sret",
  "llvm.aarch64.sve.ld3.sret",
  "llvm.aarch64.sve.ld4.sret",
  "llvm.aarch64.sve.ldff1",
  "llvm.aarch64.sve.ldff1.gather",
  "llvm.aarch64.sve.ldff1.gather.index",
  "llvm.aarch64.sve.ldff1.gather.scalar.offset",
  "llvm.aarch64.sve.ldff1.gather.sxtw",
  "llvm.aarch64.sve.ldff1.gather.sxtw.index",
  "llvm.aarch64.sve.ldff1.gather.uxtw",
  "llvm.aarch64.sve.ldff1.gather.uxtw.index",
  "llvm.aarch64.sve.ldnf1",
  "llvm.aarch64.sve.ldnt1",
  "llvm.aarch64.sve.ldnt1.gather",
  "llvm.aarch64.sve.ldnt1.gather.index",
  "llvm.aarch64.sve.ldnt1.gather.scalar.offset",
  "llvm.aarch64.sve.ldnt1.gather.uxtw",
  "llvm.aarch64.sve.ldnt1.pn.x2",
  "llvm.aarch64.sve.ldnt1.pn.x4",
  "llvm.aarch64.sve.lsl",
  "llvm.aarch64.sve.lsl.u",
  "llvm.aarch64.sve.lsl.wide",
  "llvm.aarch64.sve.lsr",
  "llvm.aarch64.sve.lsr.u",
  "llvm.aarch64.sve.lsr.wide",
  "llvm.aarch64.sve.mad",
  "llvm.aarch64.sve.match",
  "llvm.aarch64.sve.mla",
  "llvm.aarch64.sve.mla.lane",
  "llvm.aarch64.sve.mla.u",
  "llvm.aarch64.sve.mls",
  "llvm.aarch64.sve.mls.lane",
  "llvm.aarch64.sve.mls.u",
  "llvm.aarch64.sve.msb",
  "llvm.aarch64.sve.mul",
  "llvm.aarch64.sve.mul.lane",
  "llvm.aarch64.sve.mul.u",
  "llvm.aarch64.sve.nand.z",
  "llvm.aarch64.sve.nbsl",
  "llvm.aarch64.sve.neg",
  "llvm.aarch64.sve.nmatch",
  "llvm.aarch64.sve.nor.z",
  "llvm.aarch64.sve.not",
  "llvm.aarch64.sve.orn.z",
  "llvm.aarch64.sve.orr",
  "llvm.aarch64.sve.orr.u",
  "llvm.aarch64.sve.orr.z",
  "llvm.aarch64.sve.orv",
  "llvm.aarch64.sve.pext",
  "llvm.aarch64.sve.pext.x2",
  "llvm.aarch64.sve.pfirst",
  "llvm.aarch64.sve.pmul",
  "llvm.aarch64.sve.pmullb.pair",
  "llvm.aarch64.sve.pmullt.pair",
  "llvm.aarch64.sve.pnext",
  "llvm.aarch64.sve.prf",
  "llvm.aarch64.sve.prfb.gather.index",
  "llvm.aarch64.sve.prfb.gather.scalar.offset",
  "llvm.aarch64.sve.prfb.gather.sxtw.index",
  "llvm.aarch64.sve.prfb.gather.uxtw.index",
  "llvm.aarch64.sve.prfd.gather.index",
  "llvm.aarch64.sve.prfd.gather.scalar.offset",
  "llvm.aarch64.sve.prfd.gather.sxtw.index",
  "llvm.aarch64.sve.prfd.gather.uxtw.index",
  "llvm.aarch64.sve.prfh.gather.index",
  "llvm.aarch64.sve.prfh.gather.scalar.offset",
  "llvm.aarch64.sve.prfh.gather.sxtw.index",
  "llvm.aarch64.sve.prfh.gather.uxtw.index",
  "llvm.aarch64.sve.prfw.gather.index",
  "llvm.aarch64.sve.prfw.gather.scalar.offset",
  "llvm.aarch64.sve.prfw.gather.sxtw.index",
  "llvm.aarch64.sve.prfw.gather.uxtw.index",
  "llvm.aarch64.sve.psel",
  "llvm.aarch64.sve.ptest.any",
  "llvm.aarch64.sve.ptest.first",
  "llvm.aarch64.sve.ptest.last",
  "llvm.aarch64.sve.ptrue",
  "llvm.aarch64.sve.ptrue.c16",
  "llvm.aarch64.sve.ptrue.c32",
  "llvm.aarch64.sve.ptrue.c64",
  "llvm.aarch64.sve.ptrue.c8",
  "llvm.aarch64.sve.punpkhi",
  "llvm.aarch64.sve.punpklo",
  "llvm.aarch64.sve.raddhnb",
  "llvm.aarch64.sve.raddhnt",
  "llvm.aarch64.sve.rax1",
  "llvm.aarch64.sve.rbit",
  "llvm.aarch64.sve.rdffr",
  "llvm.aarch64.sve.rdffr.z",
  "llvm.aarch64.sve.rev",
  "llvm.aarch64.sve.rev.b16",
  "llvm.aarch64.sve.rev.b32",
  "llvm.aarch64.sve.rev.b64",
  "llvm.aarch64.sve.revb",
  "llvm.aarch64.sve.revd",
  "llvm.aarch64.sve.revh",
  "llvm.aarch64.sve.revw",
  "llvm.aarch64.sve.rshrnb",
  "llvm.aarch64.sve.rshrnt",
  "llvm.aarch64.sve.rsubhnb",
  "llvm.aarch64.sve.rsubhnt",
  "llvm.aarch64.sve.saba",
  "llvm.aarch64.sve.sabalb",
  "llvm.aarch64.sve.sabalt",
  "llvm.aarch64.sve.sabd",
  "llvm.aarch64.sve.sabd.u",
  "llvm.aarch64.sve.sabdlb",
  "llvm.aarch64.sve.sabdlt",
  "llvm.aarch64.sve.sadalp",
  "llvm.aarch64.sve.saddlb",
  "llvm.aarch64.sve.saddlbt",
  "llvm.aarch64.sve.saddlt",
  "llvm.aarch64.sve.saddv",
  "llvm.aarch64.sve.saddwb",
  "llvm.aarch64.sve.saddwt",
  "llvm.aarch64.sve.sbclb",
  "llvm.aarch64.sve.sbclt",
  "llvm.aarch64.sve.sclamp",
  "llvm.aarch64.sve.sclamp.single.x2",
  "llvm.aarch64.sve.sclamp.single.x4",
  "llvm.aarch64.sve.scvtf",
  "llvm.aarch64.sve.scvtf.f16i32",
  "llvm.aarch64.sve.scvtf.f16i64",
  "llvm.aarch64.sve.scvtf.f32i64",
  "llvm.aarch64.sve.scvtf.f64i32",
  "llvm.aarch64.sve.scvtf.x2",
  "llvm.aarch64.sve.scvtf.x4",
  "llvm.aarch64.sve.sdiv",
  "llvm.aarch64.sve.sdiv.u",
  "llvm.aarch64.sve.sdivr",
  "llvm.aarch64.sve.sdot",
  "llvm.aarch64.sve.sdot.lane",
  "llvm.aarch64.sve.sdot.lane.x2",
  "llvm.aarch64.sve.sdot.x2",
  "llvm.aarch64.sve.sel",
  "llvm.aarch64.sve.sel.x2",
  "llvm.aarch64.sve.sel.x4",
  "llvm.aarch64.sve.setffr",
  "llvm.aarch64.sve.shadd",
  "llvm.aarch64.sve.shrnb",
  "llvm.aarch64.sve.shrnt",
  "llvm.aarch64.sve.shsub",
  "llvm.aarch64.sve.shsubr",
  "llvm.aarch64.sve.sli",
  "llvm.aarch64.sve.sm4e",
  "llvm.aarch64.sve.sm4ekey",
  "llvm.aarch64.sve.smax",
  "llvm.aarch64.sve.smax.single.x2",
  "llvm.aarch64.sve.smax.single.x4",
  "llvm.aarch64.sve.smax.u",
  "llvm.aarch64.sve.smax.x2",
  "llvm.aarch64.sve.smax.x4",
  "llvm.aarch64.sve.smaxp",
  "llvm.aarch64.sve.smaxv",
  "llvm.aarch64.sve.smin",
  "llvm.aarch64.sve.smin.single.x2",
  "llvm.aarch64.sve.smin.single.x4",
  "llvm.aarch64.sve.smin.u",
  "llvm.aarch64.sve.smin.x2",
  "llvm.aarch64.sve.smin.x4",
  "llvm.aarch64.sve.sminp",
  "llvm.aarch64.sve.sminv",
  "llvm.aarch64.sve.smlalb",
  "llvm.aarch64.sve.smlalb.lane",
  "llvm.aarch64.sve.smlalt",
  "llvm.aarch64.sve.smlalt.lane",
  "llvm.aarch64.sve.smlslb",
  "llvm.aarch64.sve.smlslb.lane",
  "llvm.aarch64.sve.smlslt",
  "llvm.aarch64.sve.smlslt.lane",
  "llvm.aarch64.sve.smmla",
  "llvm.aarch64.sve.smulh",
  "llvm.aarch64.sve.smulh.u",
  "llvm.aarch64.sve.smullb",
  "llvm.aarch64.sve.smullb.lane",
  "llvm.aarch64.sve.smullt",
  "llvm.aarch64.sve.smullt.lane",
  "llvm.aarch64.sve.splice",
  "llvm.aarch64.sve.sqabs",
  "llvm.aarch64.sve.sqadd",
  "llvm.aarch64.sve.sqadd.x",
  "llvm.aarch64.sve.sqcadd.x",
  "llvm.aarch64.sve.sqcvt.x2",
  "llvm.aarch64.sve.sqcvt.x4",
  "llvm.aarch64.sve.sqcvtn.x2",
  "llvm.aarch64.sve.sqcvtn.x4",
  "llvm.aarch64.sve.sqcvtu.x2",
  "llvm.aarch64.sve.sqcvtu.x4",
  "llvm.aarch64.sve.sqcvtun.x2",
  "llvm.aarch64.sve.sqcvtun.x4",
  "llvm.aarch64.sve.sqdecb.n32",
  "llvm.aarch64.sve.sqdecb.n64",
  "llvm.aarch64.sve.sqdecd",
  "llvm.aarch64.sve.sqdecd.n32",
  "llvm.aarch64.sve.sqdecd.n64",
  "llvm.aarch64.sve.sqdech",
  "llvm.aarch64.sve.sqdech.n32",
  "llvm.aarch64.sve.sqdech.n64",
  "llvm.aarch64.sve.sqdecp",
  "llvm.aarch64.sve.sqdecp.n32",
  "llvm.aarch64.sve.sqdecp.n64",
  "llvm.aarch64.sve.sqdecw",
  "llvm.aarch64.sve.sqdecw.n32",
  "llvm.aarch64.sve.sqdecw.n64",
  "llvm.aarch64.sve.sqdmlalb",
  "llvm.aarch64.sve.sqdmlalb.lane",
  "llvm.aarch64.sve.sqdmlalbt",
  "llvm.aarch64.sve.sqdmlalt",
  "llvm.aarch64.sve.sqdmlalt.lane",
  "llvm.aarch64.sve.sqdmlslb",
  "llvm.aarch64.sve.sqdmlslb.lane",
  "llvm.aarch64.sve.sqdmlslbt",
  "llvm.aarch64.sve.sqdmlslt",
  "llvm.aarch64.sve.sqdmlslt.lane",
  "llvm.aarch64.sve.sqdmulh",
  "llvm.aarch64.sve.sqdmulh.lane",
  "llvm.aarch64.sve.sqdmulh.single.vgx2",
  "llvm.aarch64.sve.sqdmulh.single.vgx4",
  "llvm.aarch64.sve.sqdmulh.vgx2",
  "llvm.aarch64.sve.sqdmulh.vgx4",
  "llvm.aarch64.sve.sqdmullb",
  "llvm.aarch64.sve.sqdmullb.lane",
  "llvm.aarch64.sve.sqdmullt",
  "llvm.aarch64.sve.sqdmullt.lane",
  "llvm.aarch64.sve.sqincb.n32",
  "llvm.aarch64.sve.sqincb.n64",
  "llvm.aarch64.sve.sqincd",
  "llvm.aarch64.sve.sqincd.n32",
  "llvm.aarch64.sve.sqincd.n64",
  "llvm.aarch64.sve.sqinch",
  "llvm.aarch64.sve.sqinch.n32",
  "llvm.aarch64.sve.sqinch.n64",
  "llvm.aarch64.sve.sqincp",
  "llvm.aarch64.sve.sqincp.n32",
  "llvm.aarch64.sve.sqincp.n64",
  "llvm.aarch64.sve.sqincw",
  "llvm.aarch64.sve.sqincw.n32",
  "llvm.aarch64.sve.sqincw.n64",
  "llvm.aarch64.sve.sqneg",
  "llvm.aarch64.sve.sqrdcmlah.lane.x",
  "llvm.aarch64.sve.sqrdcmlah.x",
  "llvm.aarch64.sve.sqrdmlah",
  "llvm.aarch64.sve.sqrdmlah.lane",
  "llvm.aarch64.sve.sqrdmlsh",
  "llvm.aarch64.sve.sqrdmlsh.lane",
  "llvm.aarch64.sve.sqrdmulh",
  "llvm.aarch64.sve.sqrdmulh.lane",
  "llvm.aarch64.sve.sqrshl",
  "llvm.aarch64.sve.sqrshr.x2",
  "llvm.aarch64.sve.sqrshr.x4",
  "llvm.aarch64.sve.sqrshrn.x2",
  "llvm.aarch64.sve.sqrshrn.x4",
  "llvm.aarch64.sve.sqrshrnb",
  "llvm.aarch64.sve.sqrshrnt",
  "llvm.aarch64.sve.sqrshru.x2",
  "llvm.aarch64.sve.sqrshru.x4",
  "llvm.aarch64.sve.sqrshrun.x2",
  "llvm.aarch64.sve.sqrshrun.x4",
  "llvm.aarch64.sve.sqrshrunb",
  "llvm.aarch64.sve.sqrshrunt",
  "llvm.aarch64.sve.sqshl",
  "llvm.aarch64.sve.sqshlu",
  "llvm.aarch64.sve.sqshrnb",
  "llvm.aarch64.sve.sqshrnt",
  "llvm.aarch64.sve.sqshrunb",
  "llvm.aarch64.sve.sqshrunt",
  "llvm.aarch64.sve.sqsub",
  "llvm.aarch64.sve.sqsub.u",
  "llvm.aarch64.sve.sqsub.x",
  "llvm.aarch64.sve.sqsubr",
  "llvm.aarch64.sve.sqxtnb",
  "llvm.aarch64.sve.sqxtnt",
  "llvm.aarch64.sve.sqxtunb",
  "llvm.aarch64.sve.sqxtunt",
  "llvm.aarch64.sve.srhadd",
  "llvm.aarch64.sve.sri",
  "llvm.aarch64.sve.srshl",
  "llvm.aarch64.sve.srshl.single.x2",
  "llvm.aarch64.sve.srshl.single.x4",
  "llvm.aarch64.sve.srshl.x2",
  "llvm.aarch64.sve.srshl.x4",
  "llvm.aarch64.sve.srshr",
  "llvm.aarch64.sve.srsra",
  "llvm.aarch64.sve.sshllb",
  "llvm.aarch64.sve.sshllt",
  "llvm.aarch64.sve.ssra",
  "llvm.aarch64.sve.ssublb",
  "llvm.aarch64.sve.ssublbt",
  "llvm.aarch64.sve.ssublt",
  "llvm.aarch64.sve.ssubltb",
  "llvm.aarch64.sve.ssubwb",
  "llvm.aarch64.sve.ssubwt",
  "llvm.aarch64.sve.st1",
  "llvm.aarch64.sve.st1.pn.x2",
  "llvm.aarch64.sve.st1.pn.x4",
  "llvm.aarch64.sve.st1.scatter",
  "llvm.aarch64.sve.st1.scatter.index",
  "llvm.aarch64.sve.st1.scatter.scalar.offset",
  "llvm.aarch64.sve.st1.scatter.sxtw",
  "llvm.aarch64.sve.st1.scatter.sxtw.index",
  "llvm.aarch64.sve.st1.scatter.uxtw",
  "llvm.aarch64.sve.st1.scatter.uxtw.index",
  "llvm.aarch64.sve.st2",
  "llvm.aarch64.sve.st3",
  "llvm.aarch64.sve.st4",
  "llvm.aarch64.sve.stnt1",
  "llvm.aarch64.sve.stnt1.pn.x2",
  "llvm.aarch64.sve.stnt1.pn.x4",
  "llvm.aarch64.sve.stnt1.scatter",
  "llvm.aarch64.sve.stnt1.scatter.index",
  "llvm.aarch64.sve.stnt1.scatter.scalar.offset",
  "llvm.aarch64.sve.stnt1.scatter.uxtw",
  "llvm.aarch64.sve.sub",
  "llvm.aarch64.sve.sub.u",
  "llvm.aarch64.sve.subhnb",
  "llvm.aarch64.sve.subhnt",
  "llvm.aarch64.sve.subr",
  "llvm.aarch64.sve.sudot.lane",
  "llvm.aarch64.sve.sunpk.x2",
  "llvm.aarch64.sve.sunpk.x4",
  "llvm.aarch64.sve.sunpkhi",
  "llvm.aarch64.sve.sunpklo",
  "llvm.aarch64.sve.suqadd",
  "llvm.aarch64.sve.sxtb",
  "llvm.aarch64.sve.sxth",
  "llvm.aarch64.sve.sxtw",
  "llvm.aarch64.sve.tbl",
  "llvm.aarch64.sve.tbl2",
  "llvm.aarch64.sve.tbx",
  "llvm.aarch64.sve.trn1",
  "llvm.aarch64.sve.trn1.b16",
  "llvm.aarch64.sve.trn1.b32",
  "llvm.aarch64.sve.trn1.b64",
  "llvm.aarch64.sve.trn1q",
  "llvm.aarch64.sve.trn2",
  "llvm.aarch64.sve.trn2.b16",
  "llvm.aarch64.sve.trn2.b32",
  "llvm.aarch64.sve.trn2.b64",
  "llvm.aarch64.sve.trn2q",
  "llvm.aarch64.sve.uaba",
  "llvm.aarch64.sve.uabalb",
  "llvm.aarch64.sve.uabalt",
  "llvm.aarch64.sve.uabd",
  "llvm.aarch64.sve.uabd.u",
  "llvm.aarch64.sve.uabdlb",
  "llvm.aarch64.sve.uabdlt",
  "llvm.aarch64.sve.uadalp",
  "llvm.aarch64.sve.uaddlb",
  "llvm.aarch64.sve.uaddlt",
  "llvm.aarch64.sve.uaddv",
  "llvm.aarch64.sve.uaddwb",
  "llvm.aarch64.sve.uaddwt",
  "llvm.aarch64.sve.uclamp",
  "llvm.aarch64.sve.uclamp.single.x2",
  "llvm.aarch64.sve.uclamp.single.x4",
  "llvm.aarch64.sve.ucvtf",
  "llvm.aarch64.sve.ucvtf.f16i32",
  "llvm.aarch64.sve.ucvtf.f16i64",
  "llvm.aarch64.sve.ucvtf.f32i64",
  "llvm.aarch64.sve.ucvtf.f64i32",
  "llvm.aarch64.sve.ucvtf.x2",
  "llvm.aarch64.sve.ucvtf.x4",
  "llvm.aarch64.sve.udiv",
  "llvm.aarch64.sve.udiv.u",
  "llvm.aarch64.sve.udivr",
  "llvm.aarch64.sve.udot",
  "llvm.aarch64.sve.udot.lane",
  "llvm.aarch64.sve.udot.lane.x2",
  "llvm.aarch64.sve.udot.x2",
  "llvm.aarch64.sve.uhadd",
  "llvm.aarch64.sve.uhsub",
  "llvm.aarch64.sve.uhsubr",
  "llvm.aarch64.sve.umax",
  "llvm.aarch64.sve.umax.single.x2",
  "llvm.aarch64.sve.umax.single.x4",
  "llvm.aarch64.sve.umax.u",
  "llvm.aarch64.sve.umax.x2",
  "llvm.aarch64.sve.umax.x4",
  "llvm.aarch64.sve.umaxp",
  "llvm.aarch64.sve.umaxv",
  "llvm.aarch64.sve.umin",
  "llvm.aarch64.sve.umin.single.x2",
  "llvm.aarch64.sve.umin.single.x4",
  "llvm.aarch64.sve.umin.u",
  "llvm.aarch64.sve.umin.x2",
  "llvm.aarch64.sve.umin.x4",
  "llvm.aarch64.sve.uminp",
  "llvm.aarch64.sve.uminv",
  "llvm.aarch64.sve.umlalb",
  "llvm.aarch64.sve.umlalb.lane",
  "llvm.aarch64.sve.umlalt",
  "llvm.aarch64.sve.umlalt.lane",
  "llvm.aarch64.sve.umlslb",
  "llvm.aarch64.sve.umlslb.lane",
  "llvm.aarch64.sve.umlslt",
  "llvm.aarch64.sve.umlslt.lane",
  "llvm.aarch64.sve.ummla",
  "llvm.aarch64.sve.umulh",
  "llvm.aarch64.sve.umulh.u",
  "llvm.aarch64.sve.umullb",
  "llvm.aarch64.sve.umullb.lane",
  "llvm.aarch64.sve.umullt",
  "llvm.aarch64.sve.umullt.lane",
  "llvm.aarch64.sve.uqadd",
  "llvm.aarch64.sve.uqadd.x",
  "llvm.aarch64.sve.uqcvt.x2",
  "llvm.aarch64.sve.uqcvt.x4",
  "llvm.aarch64.sve.uqcvtn.x2",
  "llvm.aarch64.sve.uqcvtn.x4",
  "llvm.aarch64.sve.uqdecb.n32",
  "llvm.aarch64.sve.uqdecb.n64",
  "llvm.aarch64.sve.uqdecd",
  "llvm.aarch64.sve.uqdecd.n32",
  "llvm.aarch64.sve.uqdecd.n64",
  "llvm.aarch64.sve.uqdech",
  "llvm.aarch64.sve.uqdech.n32",
  "llvm.aarch64.sve.uqdech.n64",
  "llvm.aarch64.sve.uqdecp",
  "llvm.aarch64.sve.uqdecp.n32",
  "llvm.aarch64.sve.uqdecp.n64",
  "llvm.aarch64.sve.uqdecw",
  "llvm.aarch64.sve.uqdecw.n32",
  "llvm.aarch64.sve.uqdecw.n64",
  "llvm.aarch64.sve.uqincb.n32",
  "llvm.aarch64.sve.uqincb.n64",
  "llvm.aarch64.sve.uqincd",
  "llvm.aarch64.sve.uqincd.n32",
  "llvm.aarch64.sve.uqincd.n64",
  "llvm.aarch64.sve.uqinch",
  "llvm.aarch64.sve.uqinch.n32",
  "llvm.aarch64.sve.uqinch.n64",
  "llvm.aarch64.sve.uqincp",
  "llvm.aarch64.sve.uqincp.n32",
  "llvm.aarch64.sve.uqincp.n64",
  "llvm.aarch64.sve.uqincw",
  "llvm.aarch64.sve.uqincw.n32",
  "llvm.aarch64.sve.uqincw.n64",
  "llvm.aarch64.sve.uqrshl",
  "llvm.aarch64.sve.uqrshr.x2",
  "llvm.aarch64.sve.uqrshr.x4",
  "llvm.aarch64.sve.uqrshrn.x2",
  "llvm.aarch64.sve.uqrshrn.x4",
  "llvm.aarch64.sve.uqrshrnb",
  "llvm.aarch64.sve.uqrshrnt",
  "llvm.aarch64.sve.uqshl",
  "llvm.aarch64.sve.uqshrnb",
  "llvm.aarch64.sve.uqshrnt",
  "llvm.aarch64.sve.uqsub",
  "llvm.aarch64.sve.uqsub.u",
  "llvm.aarch64.sve.uqsub.x",
  "llvm.aarch64.sve.uqsubr",
  "llvm.aarch64.sve.uqxtnb",
  "llvm.aarch64.sve.uqxtnt",
  "llvm.aarch64.sve.urecpe",
  "llvm.aarch64.sve.urhadd",
  "llvm.aarch64.sve.urshl",
  "llvm.aarch64.sve.urshl.single.x2",
  "llvm.aarch64.sve.urshl.single.x4",
  "llvm.aarch64.sve.urshl.x2",
  "llvm.aarch64.sve.urshl.x4",
  "llvm.aarch64.sve.urshr",
  "llvm.aarch64.sve.ursqrte",
  "llvm.aarch64.sve.ursra",
  "llvm.aarch64.sve.usdot",
  "llvm.aarch64.sve.usdot.lane",
  "llvm.aarch64.sve.ushllb",
  "llvm.aarch64.sve.ushllt",
  "llvm.aarch64.sve.usmmla",
  "llvm.aarch64.sve.usqadd",
  "llvm.aarch64.sve.usra",
  "llvm.aarch64.sve.usublb",
  "llvm.aarch64.sve.usublt",
  "llvm.aarch64.sve.usubwb",
  "llvm.aarch64.sve.usubwt",
  "llvm.aarch64.sve.uunpk.x2",
  "llvm.aarch64.sve.uunpk.x4",
  "llvm.aarch64.sve.uunpkhi",
  "llvm.aarch64.sve.uunpklo",
  "llvm.aarch64.sve.uxtb",
  "llvm.aarch64.sve.uxth",
  "llvm.aarch64.sve.uxtw",
  "llvm.aarch64.sve.uzp.x2",
  "llvm.aarch64.sve.uzp.x4",
  "llvm.aarch64.sve.uzp1",
  "llvm.aarch64.sve.uzp1.b16",
  "llvm.aarch64.sve.uzp1.b32",
  "llvm.aarch64.sve.uzp1.b64",
  "llvm.aarch64.sve.uzp1q",
  "llvm.aarch64.sve.uzp2",
  "llvm.aarch64.sve.uzp2.b16",
  "llvm.aarch64.sve.uzp2.b32",
  "llvm.aarch64.sve.uzp2.b64",
  "llvm.aarch64.sve.uzp2q",
  "llvm.aarch64.sve.uzpq.x2",
  "llvm.aarch64.sve.uzpq.x4",
  "llvm.aarch64.sve.whilege",
  "llvm.aarch64.sve.whilege.c16",
  "llvm.aarch64.sve.whilege.c32",
  "llvm.aarch64.sve.whilege.c64",
  "llvm.aarch64.sve.whilege.c8",
  "llvm.aarch64.sve.whilege.x2",
  "llvm.aarch64.sve.whilegt",
  "llvm.aarch64.sve.whilegt.c16",
  "llvm.aarch64.sve.whilegt.c32",
  "llvm.aarch64.sve.whilegt.c64",
  "llvm.aarch64.sve.whilegt.c8",
  "llvm.aarch64.sve.whilegt.x2",
  "llvm.aarch64.sve.whilehi",
  "llvm.aarch64.sve.whilehi.c16",
  "llvm.aarch64.sve.whilehi.c32",
  "llvm.aarch64.sve.whilehi.c64",
  "llvm.aarch64.sve.whilehi.c8",
  "llvm.aarch64.sve.whilehi.x2",
  "llvm.aarch64.sve.whilehs",
  "llvm.aarch64.sve.whilehs.c16",
  "llvm.aarch64.sve.whilehs.c32",
  "llvm.aarch64.sve.whilehs.c64",
  "llvm.aarch64.sve.whilehs.c8",
  "llvm.aarch64.sve.whilehs.x2",
  "llvm.aarch64.sve.whilele",
  "llvm.aarch64.sve.whilele.c16",
  "llvm.aarch64.sve.whilele.c32",
  "llvm.aarch64.sve.whilele.c64",
  "llvm.aarch64.sve.whilele.c8",
  "llvm.aarch64.sve.whilele.x2",
  "llvm.aarch64.sve.whilelo",
  "llvm.aarch64.sve.whilelo.c16",
  "llvm.aarch64.sve.whilelo.c32",
  "llvm.aarch64.sve.whilelo.c64",
  "llvm.aarch64.sve.whilelo.c8",
  "llvm.aarch64.sve.whilelo.x2",
  "llvm.aarch64.sve.whilels",
  "llvm.aarch64.sve.whilels.c16",
  "llvm.aarch64.sve.whilels.c32",
  "llvm.aarch64.sve.whilels.c64",
  "llvm.aarch64.sve.whilels.c8",
  "llvm.aarch64.sve.whilels.x2",
  "llvm.aarch64.sve.whilelt",
  "llvm.aarch64.sve.whilelt.c16",
  "llvm.aarch64.sve.whilelt.c32",
  "llvm.aarch64.sve.whilelt.c64",
  "llvm.aarch64.sve.whilelt.c8",
  "llvm.aarch64.sve.whilelt.x2",
  "llvm.aarch64.sve.whilerw.b",
  "llvm.aarch64.sve.whilerw.d",
  "llvm.aarch64.sve.whilerw.h",
  "llvm.aarch64.sve.whilerw.s",
  "llvm.aarch64.sve.whilewr.b",
  "llvm.aarch64.sve.whilewr.d",
  "llvm.aarch64.sve.whilewr.h",
  "llvm.aarch64.sve.whilewr.s",
  "llvm.aarch64.sve.wrffr",
  "llvm.aarch64.sve.xar",
  "llvm.aarch64.sve.zip.x2",
  "llvm.aarch64.sve.zip.x4",
  "llvm.aarch64.sve.zip1",
  "llvm.aarch64.sve.zip1.b16",
  "llvm.aarch64.sve.zip1.b32",
  "llvm.aarch64.sve.zip1.b64",
  "llvm.aarch64.sve.zip1q",
  "llvm.aarch64.sve.zip2",
  "llvm.aarch64.sve.zip2.b16",
  "llvm.aarch64.sve.zip2.b32",
  "llvm.aarch64.sve.zip2.b64",
  "llvm.aarch64.sve.zip2q",
  "llvm.aarch64.sve.zipq.x2",
  "llvm.aarch64.sve.zipq.x4",
  "llvm.aarch64.tagp",
  "llvm.aarch64.tcancel",
  "llvm.aarch64.tcommit",
  "llvm.aarch64.tstart",
  "llvm.aarch64.ttest",
  "llvm.aarch64.udiv",
  "llvm.amdgcn.alignbyte",
  "llvm.amdgcn.ballot",
  "llvm.amdgcn.buffer.atomic.add",
  "llvm.amdgcn.buffer.atomic.and",
  "llvm.amdgcn.buffer.atomic.cmpswap",
  "llvm.amdgcn.buffer.atomic.csub",
  "llvm.amdgcn.buffer.atomic.fadd",
  "llvm.amdgcn.buffer.atomic.or",
  "llvm.amdgcn.buffer.atomic.smax",
  "llvm.amdgcn.buffer.atomic.smin",
  "llvm.amdgcn.buffer.atomic.sub",
  "llvm.amdgcn.buffer.atomic.swap",
  "llvm.amdgcn.buffer.atomic.umax",
  "llvm.amdgcn.buffer.atomic.umin",
  "llvm.amdgcn.buffer.atomic.xor",
  "llvm.amdgcn.buffer.load",
  "llvm.amdgcn.buffer.load.format",
  "llvm.amdgcn.buffer.store",
  "llvm.amdgcn.buffer.store.format",
  "llvm.amdgcn.buffer.wbinvl1",
  "llvm.amdgcn.buffer.wbinvl1.sc",
  "llvm.amdgcn.buffer.wbinvl1.vol",
  "llvm.amdgcn.class",
  "llvm.amdgcn.cos",
  "llvm.amdgcn.cs.chain",
  "llvm.amdgcn.cubeid",
  "llvm.amdgcn.cubema",
  "llvm.amdgcn.cubesc",
  "llvm.amdgcn.cubetc",
  "llvm.amdgcn.cvt.f32.bf8",
  "llvm.amdgcn.cvt.f32.fp8",
  "llvm.amdgcn.cvt.pk.bf8.f32",
  "llvm.amdgcn.cvt.pk.f32.bf8",
  "llvm.amdgcn.cvt.pk.f32.fp8",
  "llvm.amdgcn.cvt.pk.fp8.f32",
  "llvm.amdgcn.cvt.pk.i16",
  "llvm.amdgcn.cvt.pk.u16",
  "llvm.amdgcn.cvt.pk.u8.f32",
  "llvm.amdgcn.cvt.pknorm.i16",
  "llvm.amdgcn.cvt.pknorm.u16",
  "llvm.amdgcn.cvt.pkrtz",
  "llvm.amdgcn.cvt.sr.bf8.f32",
  "llvm.amdgcn.cvt.sr.fp8.f32",
  "llvm.amdgcn.dispatch.id",
  "llvm.amdgcn.dispatch.ptr",
  "llvm.amdgcn.div.fixup",
  "llvm.amdgcn.div.fmas",
  "llvm.amdgcn.div.scale",
  "llvm.amdgcn.ds.add.gs.reg.rtn",
  "llvm.amdgcn.ds.append",
  "llvm.amdgcn.ds.bpermute",
  "llvm.amdgcn.ds.bvh.stack.rtn",
  "llvm.amdgcn.ds.consume",
  "llvm.amdgcn.ds.fadd",
  "llvm.amdgcn.ds.fadd.v2bf16",
  "llvm.amdgcn.ds.fmax",
  "llvm.amdgcn.ds.fmin",
  "llvm.amdgcn.ds.gws.barrier",
  "llvm.amdgcn.ds.gws.init",
  "llvm.amdgcn.ds.gws.sema.br",
  "llvm.amdgcn.ds.gws.sema.p",
  "llvm.amdgcn.ds.gws.sema.release.all",
  "llvm.amdgcn.ds.gws.sema.v",
  "llvm.amdgcn.ds.ordered.add",
  "llvm.amdgcn.ds.ordered.swap",
  "llvm.amdgcn.ds.permute",
  "llvm.amdgcn.ds.sub.gs.reg.rtn",
  "llvm.amdgcn.ds.swizzle",
  "llvm.amdgcn.else",
  "llvm.amdgcn.end.cf",
  "llvm.amdgcn.endpgm",
  "llvm.amdgcn.exp",
  "llvm.amdgcn.exp.compr",
  "llvm.amdgcn.exp.row",
  "llvm.amdgcn.exp2",
  "llvm.amdgcn.fcmp",
  "llvm.amdgcn.fdiv.fast",
  "llvm.amdgcn.fdot2",
  "llvm.amdgcn.fdot2.bf16.bf16",
  "llvm.amdgcn.fdot2.f16.f16",
  "llvm.amdgcn.fdot2.f32.bf16",
  "llvm.amdgcn.flat.atomic.fadd",
  "llvm.amdgcn.flat.atomic.fadd.v2bf16",
  "llvm.amdgcn.flat.atomic.fmax",
  "llvm.amdgcn.flat.atomic.fmin",
  "llvm.amdgcn.fma.legacy",
  "llvm.amdgcn.fmad.ftz",
  "llvm.amdgcn.fmed3",
  "llvm.amdgcn.fmul.legacy",
  "llvm.amdgcn.fract",
  "llvm.amdgcn.frexp.exp",
  "llvm.amdgcn.frexp.mant",
  "llvm.amdgcn.global.atomic.csub",
  "llvm.amdgcn.global.atomic.fadd",
  "llvm.amdgcn.global.atomic.fadd.v2bf16",
  "llvm.amdgcn.global.atomic.fmax",
  "llvm.amdgcn.global.atomic.fmin",
  "llvm.amdgcn.global.load.lds",
  "llvm.amdgcn.groupstaticsize",
  "llvm.amdgcn.icmp",
  "llvm.amdgcn.if",
  "llvm.amdgcn.if.break",
  "llvm.amdgcn.iglp.opt",
  "llvm.amdgcn.image.atomic.add.1d",
  "llvm.amdgcn.image.atomic.add.1darray",
  "llvm.amdgcn.image.atomic.add.2d",
  "llvm.amdgcn.image.atomic.add.2darray",
  "llvm.amdgcn.image.atomic.add.2darraymsaa",
  "llvm.amdgcn.image.atomic.add.2dmsaa",
  "llvm.amdgcn.image.atomic.add.3d",
  "llvm.amdgcn.image.atomic.add.cube",
  "llvm.amdgcn.image.atomic.and.1d",
  "llvm.amdgcn.image.atomic.and.1darray",
  "llvm.amdgcn.image.atomic.and.2d",
  "llvm.amdgcn.image.atomic.and.2darray",
  "llvm.amdgcn.image.atomic.and.2darraymsaa",
  "llvm.amdgcn.image.atomic.and.2dmsaa",
  "llvm.amdgcn.image.atomic.and.3d",
  "llvm.amdgcn.image.atomic.and.cube",
  "llvm.amdgcn.image.atomic.cmpswap.1d",
  "llvm.amdgcn.image.atomic.cmpswap.1darray",
  "llvm.amdgcn.image.atomic.cmpswap.2d",
  "llvm.amdgcn.image.atomic.cmpswap.2darray",
  "llvm.amdgcn.image.atomic.cmpswap.2darraymsaa",
  "llvm.amdgcn.image.atomic.cmpswap.2dmsaa",
  "llvm.amdgcn.image.atomic.cmpswap.3d",
  "llvm.amdgcn.image.atomic.cmpswap.cube",
  "llvm.amdgcn.image.atomic.dec.1d",
  "llvm.amdgcn.image.atomic.dec.1darray",
  "llvm.amdgcn.image.atomic.dec.2d",
  "llvm.amdgcn.image.atomic.dec.2darray",
  "llvm.amdgcn.image.atomic.dec.2darraymsaa",
  "llvm.amdgcn.image.atomic.dec.2dmsaa",
  "llvm.amdgcn.image.atomic.dec.3d",
  "llvm.amdgcn.image.atomic.dec.cube",
  "llvm.amdgcn.image.atomic.fmax.1d",
  "llvm.amdgcn.image.atomic.fmax.1darray",
  "llvm.amdgcn.image.atomic.fmax.2d",
  "llvm.amdgcn.image.atomic.fmax.2darray",
  "llvm.amdgcn.image.atomic.fmax.2darraymsaa",
  "llvm.amdgcn.image.atomic.fmax.2dmsaa",
  "llvm.amdgcn.image.atomic.fmax.3d",
  "llvm.amdgcn.image.atomic.fmax.cube",
  "llvm.amdgcn.image.atomic.fmin.1d",
  "llvm.amdgcn.image.atomic.fmin.1darray",
  "llvm.amdgcn.image.atomic.fmin.2d",
  "llvm.amdgcn.image.atomic.fmin.2darray",
  "llvm.amdgcn.image.atomic.fmin.2darraymsaa",
  "llvm.amdgcn.image.atomic.fmin.2dmsaa",
  "llvm.amdgcn.image.atomic.fmin.3d",
  "llvm.amdgcn.image.atomic.fmin.cube",
  "llvm.amdgcn.image.atomic.inc.1d",
  "llvm.amdgcn.image.atomic.inc.1darray",
  "llvm.amdgcn.image.atomic.inc.2d",
  "llvm.amdgcn.image.atomic.inc.2darray",
  "llvm.amdgcn.image.atomic.inc.2darraymsaa",
  "llvm.amdgcn.image.atomic.inc.2dmsaa",
  "llvm.amdgcn.image.atomic.inc.3d",
  "llvm.amdgcn.image.atomic.inc.cube",
  "llvm.amdgcn.image.atomic.or.1d",
  "llvm.amdgcn.image.atomic.or.1darray",
  "llvm.amdgcn.image.atomic.or.2d",
  "llvm.amdgcn.image.atomic.or.2darray",
  "llvm.amdgcn.image.atomic.or.2darraymsaa",
  "llvm.amdgcn.image.atomic.or.2dmsaa",
  "llvm.amdgcn.image.atomic.or.3d",
  "llvm.amdgcn.image.atomic.or.cube",
  "llvm.amdgcn.image.atomic.smax.1d",
  "llvm.amdgcn.image.atomic.smax.1darray",
  "llvm.amdgcn.image.atomic.smax.2d",
  "llvm.amdgcn.image.atomic.smax.2darray",
  "llvm.amdgcn.image.atomic.smax.2darraymsaa",
  "llvm.amdgcn.image.atomic.smax.2dmsaa",
  "llvm.amdgcn.image.atomic.smax.3d",
  "llvm.amdgcn.image.atomic.smax.cube",
  "llvm.amdgcn.image.atomic.smin.1d",
  "llvm.amdgcn.image.atomic.smin.1darray",
  "llvm.amdgcn.image.atomic.smin.2d",
  "llvm.amdgcn.image.atomic.smin.2darray",
  "llvm.amdgcn.image.atomic.smin.2darraymsaa",
  "llvm.amdgcn.image.atomic.smin.2dmsaa",
  "llvm.amdgcn.image.atomic.smin.3d",
  "llvm.amdgcn.image.atomic.smin.cube",
  "llvm.amdgcn.image.atomic.sub.1d",
  "llvm.amdgcn.image.atomic.sub.1darray",
  "llvm.amdgcn.image.atomic.sub.2d",
  "llvm.amdgcn.image.atomic.sub.2darray",
  "llvm.amdgcn.image.atomic.sub.2darraymsaa",
  "llvm.amdgcn.image.atomic.sub.2dmsaa",
  "llvm.amdgcn.image.atomic.sub.3d",
  "llvm.amdgcn.image.atomic.sub.cube",
  "llvm.amdgcn.image.atomic.swap.1d",
  "llvm.amdgcn.image.atomic.swap.1darray",
  "llvm.amdgcn.image.atomic.swap.2d",
  "llvm.amdgcn.image.atomic.swap.2darray",
  "llvm.amdgcn.image.atomic.swap.2darraymsaa",
  "llvm.amdgcn.image.atomic.swap.2dmsaa",
  "llvm.amdgcn.image.atomic.swap.3d",
  "llvm.amdgcn.image.atomic.swap.cube",
  "llvm.amdgcn.image.atomic.umax.1d",
  "llvm.amdgcn.image.atomic.umax.1darray",
  "llvm.amdgcn.image.atomic.umax.2d",
  "llvm.amdgcn.image.atomic.umax.2darray",
  "llvm.amdgcn.image.atomic.umax.2darraymsaa",
  "llvm.amdgcn.image.atomic.umax.2dmsaa",
  "llvm.amdgcn.image.atomic.umax.3d",
  "llvm.amdgcn.image.atomic.umax.cube",
  "llvm.amdgcn.image.atomic.umin.1d",
  "llvm.amdgcn.image.atomic.umin.1darray",
  "llvm.amdgcn.image.atomic.umin.2d",
  "llvm.amdgcn.image.atomic.umin.2darray",
  "llvm.amdgcn.image.atomic.umin.2darraymsaa",
  "llvm.amdgcn.image.atomic.umin.2dmsaa",
  "llvm.amdgcn.image.atomic.umin.3d",
  "llvm.amdgcn.image.atomic.umin.cube",
  "llvm.amdgcn.image.atomic.xor.1d",
  "llvm.amdgcn.image.atomic.xor.1darray",
  "llvm.amdgcn.image.atomic.xor.2d",
  "llvm.amdgcn.image.atomic.xor.2darray",
  "llvm.amdgcn.image.atomic.xor.2darraymsaa",
  "llvm.amdgcn.image.atomic.xor.2dmsaa",
  "llvm.amdgcn.image.atomic.xor.3d",
  "llvm.amdgcn.image.atomic.xor.cube",
  "llvm.amdgcn.image.bvh.intersect.ray",
  "llvm.amdgcn.image.gather4.2d",
  "llvm.amdgcn.image.gather4.2darray",
  "llvm.amdgcn.image.gather4.b.2d",
  "llvm.amdgcn.image.gather4.b.2darray",
  "llvm.amdgcn.image.gather4.b.cl.2d",
  "llvm.amdgcn.image.gather4.b.cl.2darray",
  "llvm.amdgcn.image.gather4.b.cl.cube",
  "llvm.amdgcn.image.gather4.b.cl.o.2d",
  "llvm.amdgcn.image.gather4.b.cl.o.2darray",
  "llvm.amdgcn.image.gather4.b.cl.o.cube",
  "llvm.amdgcn.image.gather4.b.cube",
  "llvm.amdgcn.image.gather4.b.o.2d",
  "llvm.amdgcn.image.gather4.b.o.2darray",
  "llvm.amdgcn.image.gather4.b.o.cube",
  "llvm.amdgcn.image.gather4.c.2d",
  "llvm.amdgcn.image.gather4.c.2darray",
  "llvm.amdgcn.image.gather4.c.b.2d",
  "llvm.amdgcn.image.gather4.c.b.2darray",
  "llvm.amdgcn.image.gather4.c.b.cl.2d",
  "llvm.amdgcn.image.gather4.c.b.cl.2darray",
  "llvm.amdgcn.image.gather4.c.b.cl.cube",
  "llvm.amdgcn.image.gather4.c.b.cl.o.2d",
  "llvm.amdgcn.image.gather4.c.b.cl.o.2darray",
  "llvm.amdgcn.image.gather4.c.b.cl.o.cube",
  "llvm.amdgcn.image.gather4.c.b.cube",
  "llvm.amdgcn.image.gather4.c.b.o.2d",
  "llvm.amdgcn.image.gather4.c.b.o.2darray",
  "llvm.amdgcn.image.gather4.c.b.o.cube",
  "llvm.amdgcn.image.gather4.c.cl.2d",
  "llvm.amdgcn.image.gather4.c.cl.2darray",
  "llvm.amdgcn.image.gather4.c.cl.cube",
  "llvm.amdgcn.image.gather4.c.cl.o.2d",
  "llvm.amdgcn.image.gather4.c.cl.o.2darray",
  "llvm.amdgcn.image.gather4.c.cl.o.cube",
  "llvm.amdgcn.image.gather4.c.cube",
  "llvm.amdgcn.image.gather4.c.l.2d",
  "llvm.amdgcn.image.gather4.c.l.2darray",
  "llvm.amdgcn.image.gather4.c.l.cube",
  "llvm.amdgcn.image.gather4.c.l.o.2d",
  "llvm.amdgcn.image.gather4.c.l.o.2darray",
  "llvm.amdgcn.image.gather4.c.l.o.cube",
  "llvm.amdgcn.image.gather4.c.lz.2d",
  "llvm.amdgcn.image.gather4.c.lz.2darray",
  "llvm.amdgcn.image.gather4.c.lz.cube",
  "llvm.amdgcn.image.gather4.c.lz.o.2d",
  "llvm.amdgcn.image.gather4.c.lz.o.2darray",
  "llvm.amdgcn.image.gather4.c.lz.o.cube",
  "llvm.amdgcn.image.gather4.c.o.2d",
  "llvm.amdgcn.image.gather4.c.o.2darray",
  "llvm.amdgcn.image.gather4.c.o.cube",
  "llvm.amdgcn.image.gather4.cl.2d",
  "llvm.amdgcn.image.gather4.cl.2darray",
  "llvm.amdgcn.image.gather4.cl.cube",
  "llvm.amdgcn.image.gather4.cl.o.2d",
  "llvm.amdgcn.image.gather4.cl.o.2darray",
  "llvm.amdgcn.image.gather4.cl.o.cube",
  "llvm.amdgcn.image.gather4.cube",
  "llvm.amdgcn.image.gather4.l.2d",
  "llvm.amdgcn.image.gather4.l.2darray",
  "llvm.amdgcn.image.gather4.l.cube",
  "llvm.amdgcn.image.gather4.l.o.2d",
  "llvm.amdgcn.image.gather4.l.o.2darray",
  "llvm.amdgcn.image.gather4.l.o.cube",
  "llvm.amdgcn.image.gather4.lz.2d",
  "llvm.amdgcn.image.gather4.lz.2darray",
  "llvm.amdgcn.image.gather4.lz.cube",
  "llvm.amdgcn.image.gather4.lz.o.2d",
  "llvm.amdgcn.image.gather4.lz.o.2darray",
  "llvm.amdgcn.image.gather4.lz.o.cube",
  "llvm.amdgcn.image.gather4.o.2d",
  "llvm.amdgcn.image.gather4.o.2darray",
  "llvm.amdgcn.image.gather4.o.cube",
  "llvm.amdgcn.image.getlod.1d",
  "llvm.amdgcn.image.getlod.1darray",
  "llvm.amdgcn.image.getlod.2d",
  "llvm.amdgcn.image.getlod.2darray",
  "llvm.amdgcn.image.getlod.3d",
  "llvm.amdgcn.image.getlod.cube",
  "llvm.amdgcn.image.getresinfo.1d",
  "llvm.amdgcn.image.getresinfo.1darray",
  "llvm.amdgcn.image.getresinfo.2d",
  "llvm.amdgcn.image.getresinfo.2darray",
  "llvm.amdgcn.image.getresinfo.2darraymsaa",
  "llvm.amdgcn.image.getresinfo.2dmsaa",
  "llvm.amdgcn.image.getresinfo.3d",
  "llvm.amdgcn.image.getresinfo.cube",
  "llvm.amdgcn.image.load.1d",
  "llvm.amdgcn.image.load.1darray",
  "llvm.amdgcn.image.load.2d",
  "llvm.amdgcn.image.load.2darray",
  "llvm.amdgcn.image.load.2darraymsaa",
  "llvm.amdgcn.image.load.2dmsaa",
  "llvm.amdgcn.image.load.3d",
  "llvm.amdgcn.image.load.cube",
  "llvm.amdgcn.image.load.mip.1d",
  "llvm.amdgcn.image.load.mip.1darray",
  "llvm.amdgcn.image.load.mip.2d",
  "llvm.amdgcn.image.load.mip.2darray",
  "llvm.amdgcn.image.load.mip.3d",
  "llvm.amdgcn.image.load.mip.cube",
  "llvm.amdgcn.image.msaa.load.2darraymsaa",
  "llvm.amdgcn.image.msaa.load.2dmsaa",
  "llvm.amdgcn.image.msaa.load.x.2darraymsaa",
  "llvm.amdgcn.image.msaa.load.x.2dmsaa",
  "llvm.amdgcn.image.sample.1d",
  "llvm.amdgcn.image.sample.1darray",
  "llvm.amdgcn.image.sample.2d",
  "llvm.amdgcn.image.sample.2darray",
  "llvm.amdgcn.image.sample.3d",
  "llvm.amdgcn.image.sample.b.1d",
  "llvm.amdgcn.image.sample.b.1darray",
  "llvm.amdgcn.image.sample.b.2d",
  "llvm.amdgcn.image.sample.b.2darray",
  "llvm.amdgcn.image.sample.b.3d",
  "llvm.amdgcn.image.sample.b.cl.1d",
  "llvm.amdgcn.image.sample.b.cl.1darray",
  "llvm.amdgcn.image.sample.b.cl.2d",
  "llvm.amdgcn.image.sample.b.cl.2darray",
  "llvm.amdgcn.image.sample.b.cl.3d",
  "llvm.amdgcn.image.sample.b.cl.cube",
  "llvm.amdgcn.image.sample.b.cl.o.1d",
  "llvm.amdgcn.image.sample.b.cl.o.1darray",
  "llvm.amdgcn.image.sample.b.cl.o.2d",
  "llvm.amdgcn.image.sample.b.cl.o.2darray",
  "llvm.amdgcn.image.sample.b.cl.o.3d",
  "llvm.amdgcn.image.sample.b.cl.o.cube",
  "llvm.amdgcn.image.sample.b.cube",
  "llvm.amdgcn.image.sample.b.o.1d",
  "llvm.amdgcn.image.sample.b.o.1darray",
  "llvm.amdgcn.image.sample.b.o.2d",
  "llvm.amdgcn.image.sample.b.o.2darray",
  "llvm.amdgcn.image.sample.b.o.3d",
  "llvm.amdgcn.image.sample.b.o.cube",
  "llvm.amdgcn.image.sample.c.1d",
  "llvm.amdgcn.image.sample.c.1darray",
  "llvm.amdgcn.image.sample.c.2d",
  "llvm.amdgcn.image.sample.c.2darray",
  "llvm.amdgcn.image.sample.c.3d",
  "llvm.amdgcn.image.sample.c.b.1d",
  "llvm.amdgcn.image.sample.c.b.1darray",
  "llvm.amdgcn.image.sample.c.b.2d",
  "llvm.amdgcn.image.sample.c.b.2darray",
  "llvm.amdgcn.image.sample.c.b.3d",
  "llvm.amdgcn.image.sample.c.b.cl.1d",
  "llvm.amdgcn.image.sample.c.b.cl.1darray",
  "llvm.amdgcn.image.sample.c.b.cl.2d",
  "llvm.amdgcn.image.sample.c.b.cl.2darray",
  "llvm.amdgcn.image.sample.c.b.cl.3d",
  "llvm.amdgcn.image.sample.c.b.cl.cube",
  "llvm.amdgcn.image.sample.c.b.cl.o.1d",
  "llvm.amdgcn.image.sample.c.b.cl.o.1darray",
  "llvm.amdgcn.image.sample.c.b.cl.o.2d",
  "llvm.amdgcn.image.sample.c.b.cl.o.2darray",
  "llvm.amdgcn.image.sample.c.b.cl.o.3d",
  "llvm.amdgcn.image.sample.c.b.cl.o.cube",
  "llvm.amdgcn.image.sample.c.b.cube",
  "llvm.amdgcn.image.sample.c.b.o.1d",
  "llvm.amdgcn.image.sample.c.b.o.1darray",
  "llvm.amdgcn.image.sample.c.b.o.2d",
  "llvm.amdgcn.image.sample.c.b.o.2darray",
  "llvm.amdgcn.image.sample.c.b.o.3d",
  "llvm.amdgcn.image.sample.c.b.o.cube",
  "llvm.amdgcn.image.sample.c.cd.1d",
  "llvm.amdgcn.image.sample.c.cd.1darray",
  "llvm.amdgcn.image.sample.c.cd.2d",
  "llvm.amdgcn.image.sample.c.cd.2darray",
  "llvm.amdgcn.image.sample.c.cd.3d",
  "llvm.amdgcn.image.sample.c.cd.cl.1d",
  "llvm.amdgcn.image.sample.c.cd.cl.1darray",
  "llvm.amdgcn.image.sample.c.cd.cl.2d",
  "llvm.amdgcn.image.sample.c.cd.cl.2darray",
  "llvm.amdgcn.image.sample.c.cd.cl.3d",
  "llvm.amdgcn.image.sample.c.cd.cl.cube",
  "llvm.amdgcn.image.sample.c.cd.cl.o.1d",
  "llvm.amdgcn.image.sample.c.cd.cl.o.1darray",
  "llvm.amdgcn.image.sample.c.cd.cl.o.2d",
  "llvm.amdgcn.image.sample.c.cd.cl.o.2darray",
  "llvm.amdgcn.image.sample.c.cd.cl.o.3d",
  "llvm.amdgcn.image.sample.c.cd.cl.o.cube",
  "llvm.amdgcn.image.sample.c.cd.cube",
  "llvm.amdgcn.image.sample.c.cd.o.1d",
  "llvm.amdgcn.image.sample.c.cd.o.1darray",
  "llvm.amdgcn.image.sample.c.cd.o.2d",
  "llvm.amdgcn.image.sample.c.cd.o.2darray",
  "llvm.amdgcn.image.sample.c.cd.o.3d",
  "llvm.amdgcn.image.sample.c.cd.o.cube",
  "llvm.amdgcn.image.sample.c.cl.1d",
  "llvm.amdgcn.image.sample.c.cl.1darray",
  "llvm.amdgcn.image.sample.c.cl.2d",
  "llvm.amdgcn.image.sample.c.cl.2darray",
  "llvm.amdgcn.image.sample.c.cl.3d",
  "llvm.amdgcn.image.sample.c.cl.cube",
  "llvm.amdgcn.image.sample.c.cl.o.1d",
  "llvm.amdgcn.image.sample.c.cl.o.1darray",
  "llvm.amdgcn.image.sample.c.cl.o.2d",
  "llvm.amdgcn.image.sample.c.cl.o.2darray",
  "llvm.amdgcn.image.sample.c.cl.o.3d",
  "llvm.amdgcn.image.sample.c.cl.o.cube",
  "llvm.amdgcn.image.sample.c.cube",
  "llvm.amdgcn.image.sample.c.d.1d",
  "llvm.amdgcn.image.sample.c.d.1darray",
  "llvm.amdgcn.image.sample.c.d.2d",
  "llvm.amdgcn.image.sample.c.d.2darray",
  "llvm.amdgcn.image.sample.c.d.3d",
  "llvm.amdgcn.image.sample.c.d.cl.1d",
  "llvm.amdgcn.image.sample.c.d.cl.1darray",
  "llvm.amdgcn.image.sample.c.d.cl.2d",
  "llvm.amdgcn.image.sample.c.d.cl.2darray",
  "llvm.amdgcn.image.sample.c.d.cl.3d",
  "llvm.amdgcn.image.sample.c.d.cl.cube",
  "llvm.amdgcn.image.sample.c.d.cl.o.1d",
  "llvm.amdgcn.image.sample.c.d.cl.o.1darray",
  "llvm.amdgcn.image.sample.c.d.cl.o.2d",
  "llvm.amdgcn.image.sample.c.d.cl.o.2darray",
  "llvm.amdgcn.image.sample.c.d.cl.o.3d",
  "llvm.amdgcn.image.sample.c.d.cl.o.cube",
  "llvm.amdgcn.image.sample.c.d.cube",
  "llvm.amdgcn.image.sample.c.d.o.1d",
  "llvm.amdgcn.image.sample.c.d.o.1darray",
  "llvm.amdgcn.image.sample.c.d.o.2d",
  "llvm.amdgcn.image.sample.c.d.o.2darray",
  "llvm.amdgcn.image.sample.c.d.o.3d",
  "llvm.amdgcn.image.sample.c.d.o.cube",
  "llvm.amdgcn.image.sample.c.l.1d",
  "llvm.amdgcn.image.sample.c.l.1darray",
  "llvm.amdgcn.image.sample.c.l.2d",
  "llvm.amdgcn.image.sample.c.l.2darray",
  "llvm.amdgcn.image.sample.c.l.3d",
  "llvm.amdgcn.image.sample.c.l.cube",
  "llvm.amdgcn.image.sample.c.l.o.1d",
  "llvm.amdgcn.image.sample.c.l.o.1darray",
  "llvm.amdgcn.image.sample.c.l.o.2d",
  "llvm.amdgcn.image.sample.c.l.o.2darray",
  "llvm.amdgcn.image.sample.c.l.o.3d",
  "llvm.amdgcn.image.sample.c.l.o.cube",
  "llvm.amdgcn.image.sample.c.lz.1d",
  "llvm.amdgcn.image.sample.c.lz.1darray",
  "llvm.amdgcn.image.sample.c.lz.2d",
  "llvm.amdgcn.image.sample.c.lz.2darray",
  "llvm.amdgcn.image.sample.c.lz.3d",
  "llvm.amdgcn.image.sample.c.lz.cube",
  "llvm.amdgcn.image.sample.c.lz.o.1d",
  "llvm.amdgcn.image.sample.c.lz.o.1darray",
  "llvm.amdgcn.image.sample.c.lz.o.2d",
  "llvm.amdgcn.image.sample.c.lz.o.2darray",
  "llvm.amdgcn.image.sample.c.lz.o.3d",
  "llvm.amdgcn.image.sample.c.lz.o.cube",
  "llvm.amdgcn.image.sample.c.o.1d",
  "llvm.amdgcn.image.sample.c.o.1darray",
  "llvm.amdgcn.image.sample.c.o.2d",
  "llvm.amdgcn.image.sample.c.o.2darray",
  "llvm.amdgcn.image.sample.c.o.3d",
  "llvm.amdgcn.image.sample.c.o.cube",
  "llvm.amdgcn.image.sample.cd.1d",
  "llvm.amdgcn.image.sample.cd.1darray",
  "llvm.amdgcn.image.sample.cd.2d",
  "llvm.amdgcn.image.sample.cd.2darray",
  "llvm.amdgcn.image.sample.cd.3d",
  "llvm.amdgcn.image.sample.cd.cl.1d",
  "llvm.amdgcn.image.sample.cd.cl.1darray",
  "llvm.amdgcn.image.sample.cd.cl.2d",
  "llvm.amdgcn.image.sample.cd.cl.2darray",
  "llvm.amdgcn.image.sample.cd.cl.3d",
  "llvm.amdgcn.image.sample.cd.cl.cube",
  "llvm.amdgcn.image.sample.cd.cl.o.1d",
  "llvm.amdgcn.image.sample.cd.cl.o.1darray",
  "llvm.amdgcn.image.sample.cd.cl.o.2d",
  "llvm.amdgcn.image.sample.cd.cl.o.2darray",
  "llvm.amdgcn.image.sample.cd.cl.o.3d",
  "llvm.amdgcn.image.sample.cd.cl.o.cube",
  "llvm.amdgcn.image.sample.cd.cube",
  "llvm.amdgcn.image.sample.cd.o.1d",
  "llvm.amdgcn.image.sample.cd.o.1darray",
  "llvm.amdgcn.image.sample.cd.o.2d",
  "llvm.amdgcn.image.sample.cd.o.2darray",
  "llvm.amdgcn.image.sample.cd.o.3d",
  "llvm.amdgcn.image.sample.cd.o.cube",
  "llvm.amdgcn.image.sample.cl.1d",
  "llvm.amdgcn.image.sample.cl.1darray",
  "llvm.amdgcn.image.sample.cl.2d",
  "llvm.amdgcn.image.sample.cl.2darray",
  "llvm.amdgcn.image.sample.cl.3d",
  "llvm.amdgcn.image.sample.cl.cube",
  "llvm.amdgcn.image.sample.cl.o.1d",
  "llvm.amdgcn.image.sample.cl.o.1darray",
  "llvm.amdgcn.image.sample.cl.o.2d",
  "llvm.amdgcn.image.sample.cl.o.2darray",
  "llvm.amdgcn.image.sample.cl.o.3d",
  "llvm.amdgcn.image.sample.cl.o.cube",
  "llvm.amdgcn.image.sample.cube",
  "llvm.amdgcn.image.sample.d.1d",
  "llvm.amdgcn.image.sample.d.1darray",
  "llvm.amdgcn.image.sample.d.2d",
  "llvm.amdgcn.image.sample.d.2darray",
  "llvm.amdgcn.image.sample.d.3d",
  "llvm.amdgcn.image.sample.d.cl.1d",
  "llvm.amdgcn.image.sample.d.cl.1darray",
  "llvm.amdgcn.image.sample.d.cl.2d",
  "llvm.amdgcn.image.sample.d.cl.2darray",
  "llvm.amdgcn.image.sample.d.cl.3d",
  "llvm.amdgcn.image.sample.d.cl.cube",
  "llvm.amdgcn.image.sample.d.cl.o.1d",
  "llvm.amdgcn.image.sample.d.cl.o.1darray",
  "llvm.amdgcn.image.sample.d.cl.o.2d",
  "llvm.amdgcn.image.sample.d.cl.o.2darray",
  "llvm.amdgcn.image.sample.d.cl.o.3d",
  "llvm.amdgcn.image.sample.d.cl.o.cube",
  "llvm.amdgcn.image.sample.d.cube",
  "llvm.amdgcn.image.sample.d.o.1d",
  "llvm.amdgcn.image.sample.d.o.1darray",
  "llvm.amdgcn.image.sample.d.o.2d",
  "llvm.amdgcn.image.sample.d.o.2darray",
  "llvm.amdgcn.image.sample.d.o.3d",
  "llvm.amdgcn.image.sample.d.o.cube",
  "llvm.amdgcn.image.sample.l.1d",
  "llvm.amdgcn.image.sample.l.1darray",
  "llvm.amdgcn.image.sample.l.2d",
  "llvm.amdgcn.image.sample.l.2darray",
  "llvm.amdgcn.image.sample.l.3d",
  "llvm.amdgcn.image.sample.l.cube",
  "llvm.amdgcn.image.sample.l.o.1d",
  "llvm.amdgcn.image.sample.l.o.1darray",
  "llvm.amdgcn.image.sample.l.o.2d",
  "llvm.amdgcn.image.sample.l.o.2darray",
  "llvm.amdgcn.image.sample.l.o.3d",
  "llvm.amdgcn.image.sample.l.o.cube",
  "llvm.amdgcn.image.sample.lz.1d",
  "llvm.amdgcn.image.sample.lz.1darray",
  "llvm.amdgcn.image.sample.lz.2d",
  "llvm.amdgcn.image.sample.lz.2darray",
  "llvm.amdgcn.image.sample.lz.3d",
  "llvm.amdgcn.image.sample.lz.cube",
  "llvm.amdgcn.image.sample.lz.o.1d",
  "llvm.amdgcn.image.sample.lz.o.1darray",
  "llvm.amdgcn.image.sample.lz.o.2d",
  "llvm.amdgcn.image.sample.lz.o.2darray",
  "llvm.amdgcn.image.sample.lz.o.3d",
  "llvm.amdgcn.image.sample.lz.o.cube",
  "llvm.amdgcn.image.sample.o.1d",
  "llvm.amdgcn.image.sample.o.1darray",
  "llvm.amdgcn.image.sample.o.2d",
  "llvm.amdgcn.image.sample.o.2darray",
  "llvm.amdgcn.image.sample.o.3d",
  "llvm.amdgcn.image.sample.o.cube",
  "llvm.amdgcn.image.store.1d",
  "llvm.amdgcn.image.store.1darray",
  "llvm.amdgcn.image.store.2d",
  "llvm.amdgcn.image.store.2darray",
  "llvm.amdgcn.image.store.2darraymsaa",
  "llvm.amdgcn.image.store.2dmsaa",
  "llvm.amdgcn.image.store.3d",
  "llvm.amdgcn.image.store.cube",
  "llvm.amdgcn.image.store.mip.1d",
  "llvm.amdgcn.image.store.mip.1darray",
  "llvm.amdgcn.image.store.mip.2d",
  "llvm.amdgcn.image.store.mip.2darray",
  "llvm.amdgcn.image.store.mip.3d",
  "llvm.amdgcn.image.store.mip.cube",
  "llvm.amdgcn.implicit.buffer.ptr",
  "llvm.amdgcn.implicitarg.ptr",
  "llvm.amdgcn.init.exec",
  "llvm.amdgcn.init.exec.from.input",
  "llvm.amdgcn.interp.inreg.p10",
  "llvm.amdgcn.interp.inreg.p10.f16",
  "llvm.amdgcn.interp.inreg.p2",
  "llvm.amdgcn.interp.inreg.p2.f16",
  "llvm.amdgcn.interp.mov",
  "llvm.amdgcn.interp.p1",
  "llvm.amdgcn.interp.p1.f16",
  "llvm.amdgcn.interp.p2",
  "llvm.amdgcn.interp.p2.f16",
  "llvm.amdgcn.inverse.ballot",
  "llvm.amdgcn.is.private",
  "llvm.amdgcn.is.shared",
  "llvm.amdgcn.kernarg.segment.ptr",
  "llvm.amdgcn.kill",
  "llvm.amdgcn.ldexp",
  "llvm.amdgcn.lds.direct.load",
  "llvm.amdgcn.lds.kernel.id",
  "llvm.amdgcn.lds.param.load",
  "llvm.amdgcn.lerp",
  "llvm.amdgcn.live.mask",
  "llvm.amdgcn.log",
  "llvm.amdgcn.log.clamp",
  "llvm.amdgcn.loop",
  "llvm.amdgcn.make.buffer.rsrc",
  "llvm.amdgcn.mbcnt.hi",
  "llvm.amdgcn.mbcnt.lo",
  "llvm.amdgcn.mfma.f32.16x16x16bf16.1k",
  "llvm.amdgcn.mfma.f32.16x16x16f16",
  "llvm.amdgcn.mfma.f32.16x16x1f32",
  "llvm.amdgcn.mfma.f32.16x16x2bf16",
  "llvm.amdgcn.mfma.f32.16x16x32.bf8.bf8",
  "llvm.amdgcn.mfma.f32.16x16x32.bf8.fp8",
  "llvm.amdgcn.mfma.f32.16x16x32.fp8.bf8",
  "llvm.amdgcn.mfma.f32.16x16x32.fp8.fp8",
  "llvm.amdgcn.mfma.f32.16x16x4bf16.1k",
  "llvm.amdgcn.mfma.f32.16x16x4f16",
  "llvm.amdgcn.mfma.f32.16x16x4f32",
  "llvm.amdgcn.mfma.f32.16x16x8.xf32",
  "llvm.amdgcn.mfma.f32.16x16x8bf16",
  "llvm.amdgcn.mfma.f32.32x32x16.bf8.bf8",
  "llvm.amdgcn.mfma.f32.32x32x16.bf8.fp8",
  "llvm.amdgcn.mfma.f32.32x32x16.fp8.bf8",
  "llvm.amdgcn.mfma.f32.32x32x16.fp8.fp8",
  "llvm.amdgcn.mfma.f32.32x32x1f32",
  "llvm.amdgcn.mfma.f32.32x32x2bf16",
  "llvm.amdgcn.mfma.f32.32x32x2f32",
  "llvm.amdgcn.mfma.f32.32x32x4.xf32",
  "llvm.amdgcn.mfma.f32.32x32x4bf16",
  "llvm.amdgcn.mfma.f32.32x32x4bf16.1k",
  "llvm.amdgcn.mfma.f32.32x32x4f16",
  "llvm.amdgcn.mfma.f32.32x32x8bf16.1k",
  "llvm.amdgcn.mfma.f32.32x32x8f16",
  "llvm.amdgcn.mfma.f32.4x4x1f32",
  "llvm.amdgcn.mfma.f32.4x4x2bf16",
  "llvm.amdgcn.mfma.f32.4x4x4bf16.1k",
  "llvm.amdgcn.mfma.f32.4x4x4f16",
  "llvm.amdgcn.mfma.f64.16x16x4f64",
  "llvm.amdgcn.mfma.f64.4x4x4f64",
  "llvm.amdgcn.mfma.i32.16x16x16i8",
  "llvm.amdgcn.mfma.i32.16x16x32.i8",
  "llvm.amdgcn.mfma.i32.16x16x4i8",
  "llvm.amdgcn.mfma.i32.32x32x16.i8",
  "llvm.amdgcn.mfma.i32.32x32x4i8",
  "llvm.amdgcn.mfma.i32.32x32x8i8",
  "llvm.amdgcn.mfma.i32.4x4x4i8",
  "llvm.amdgcn.mov.dpp",
  "llvm.amdgcn.mov.dpp8",
  "llvm.amdgcn.mqsad.pk.u16.u8",
  "llvm.amdgcn.mqsad.u32.u8",
  "llvm.amdgcn.msad.u8",
  "llvm.amdgcn.mul.i24",
  "llvm.amdgcn.mul.u24",
  "llvm.amdgcn.mulhi.i24",
  "llvm.amdgcn.mulhi.u24",
  "llvm.amdgcn.perm",
  "llvm.amdgcn.permlane16",
  "llvm.amdgcn.permlane64",
  "llvm.amdgcn.permlanex16",
  "llvm.amdgcn.ps.live",
  "llvm.amdgcn.qsad.pk.u16.u8",
  "llvm.amdgcn.queue.ptr",
  "llvm.amdgcn.raw.buffer.atomic.add",
  "llvm.amdgcn.raw.buffer.atomic.and",
  "llvm.amdgcn.raw.buffer.atomic.cmpswap",
  "llvm.amdgcn.raw.buffer.atomic.dec",
  "llvm.amdgcn.raw.buffer.atomic.fadd",
  "llvm.amdgcn.raw.buffer.atomic.fmax",
  "llvm.amdgcn.raw.buffer.atomic.fmin",
  "llvm.amdgcn.raw.buffer.atomic.inc",
  "llvm.amdgcn.raw.buffer.atomic.or",
  "llvm.amdgcn.raw.buffer.atomic.smax",
  "llvm.amdgcn.raw.buffer.atomic.smin",
  "llvm.amdgcn.raw.buffer.atomic.sub",
  "llvm.amdgcn.raw.buffer.atomic.swap",
  "llvm.amdgcn.raw.buffer.atomic.umax",
  "llvm.amdgcn.raw.buffer.atomic.umin",
  "llvm.amdgcn.raw.buffer.atomic.xor",
  "llvm.amdgcn.raw.buffer.load",
  "llvm.amdgcn.raw.buffer.load.format",
  "llvm.amdgcn.raw.buffer.load.lds",
  "llvm.amdgcn.raw.buffer.store",
  "llvm.amdgcn.raw.buffer.store.format",
  "llvm.amdgcn.raw.ptr.buffer.atomic.add",
  "llvm.amdgcn.raw.ptr.buffer.atomic.and",
  "llvm.amdgcn.raw.ptr.buffer.atomic.cmpswap",
  "llvm.amdgcn.raw.ptr.buffer.atomic.dec",
  "llvm.amdgcn.raw.ptr.buffer.atomic.fadd",
  "llvm.amdgcn.raw.ptr.buffer.atomic.fmax",
  "llvm.amdgcn.raw.ptr.buffer.atomic.fmin",
  "llvm.amdgcn.raw.ptr.buffer.atomic.inc",
  "llvm.amdgcn.raw.ptr.buffer.atomic.or",
  "llvm.amdgcn.raw.ptr.buffer.atomic.smax",
  "llvm.amdgcn.raw.ptr.buffer.atomic.smin",
  "llvm.amdgcn.raw.ptr.buffer.atomic.sub",
  "llvm.amdgcn.raw.ptr.buffer.atomic.swap",
  "llvm.amdgcn.raw.ptr.buffer.atomic.umax",
  "llvm.amdgcn.raw.ptr.buffer.atomic.umin",
  "llvm.amdgcn.raw.ptr.buffer.atomic.xor",
  "llvm.amdgcn.raw.ptr.buffer.load",
  "llvm.amdgcn.raw.ptr.buffer.load.format",
  "llvm.amdgcn.raw.ptr.buffer.load.lds",
  "llvm.amdgcn.raw.ptr.buffer.store",
  "llvm.amdgcn.raw.ptr.buffer.store.format",
  "llvm.amdgcn.raw.ptr.tbuffer.load",
  "llvm.amdgcn.raw.ptr.tbuffer.store",
  "llvm.amdgcn.raw.tbuffer.load",
  "llvm.amdgcn.raw.tbuffer.store",
  "llvm.amdgcn.rcp",
  "llvm.amdgcn.rcp.legacy",
  "llvm.amdgcn.readfirstlane",
  "llvm.amdgcn.readlane",
  "llvm.amdgcn.reloc.constant",
  "llvm.amdgcn.rsq",
  "llvm.amdgcn.rsq.clamp",
  "llvm.amdgcn.rsq.legacy",
  "llvm.amdgcn.s.barrier",
  "llvm.amdgcn.s.buffer.load",
  "llvm.amdgcn.s.dcache.inv",
  "llvm.amdgcn.s.dcache.inv.vol",
  "llvm.amdgcn.s.dcache.wb",
  "llvm.amdgcn.s.dcache.wb.vol",
  "llvm.amdgcn.s.decperflevel",
  "llvm.amdgcn.s.get.waveid.in.workgroup",
  "llvm.amdgcn.s.getpc",
  "llvm.amdgcn.s.getreg",
  "llvm.amdgcn.s.incperflevel",
  "llvm.amdgcn.s.memrealtime",
  "llvm.amdgcn.s.memtime",
  "llvm.amdgcn.s.sendmsg",
  "llvm.amdgcn.s.sendmsg.rtn",
  "llvm.amdgcn.s.sendmsghalt",
  "llvm.amdgcn.s.sethalt",
  "llvm.amdgcn.s.setprio",
  "llvm.amdgcn.s.setreg",
  "llvm.amdgcn.s.sleep",
  "llvm.amdgcn.s.wait.event.export.ready",
  "llvm.amdgcn.s.waitcnt",
  "llvm.amdgcn.sad.hi.u8",
  "llvm.amdgcn.sad.u16",
  "llvm.amdgcn.sad.u8",
  "llvm.amdgcn.sbfe",
  "llvm.amdgcn.sched.barrier",
  "llvm.amdgcn.sched.group.barrier",
  "llvm.amdgcn.sdot2",
  "llvm.amdgcn.sdot4",
  "llvm.amdgcn.sdot8",
  "llvm.amdgcn.set.inactive",
  "llvm.amdgcn.sffbh",
  "llvm.amdgcn.sin",
  "llvm.amdgcn.smfmac.f32.16x16x32.bf16",
  "llvm.amdgcn.smfmac.f32.16x16x32.f16",
  "llvm.amdgcn.smfmac.f32.16x16x64.bf8.bf8",
  "llvm.amdgcn.smfmac.f32.16x16x64.bf8.fp8",
  "llvm.amdgcn.smfmac.f32.16x16x64.fp8.bf8",
  "llvm.amdgcn.smfmac.f32.16x16x64.fp8.fp8",
  "llvm.amdgcn.smfmac.f32.32x32x16.bf16",
  "llvm.amdgcn.smfmac.f32.32x32x16.f16",
  "llvm.amdgcn.smfmac.f32.32x32x32.bf8.bf8",
  "llvm.amdgcn.smfmac.f32.32x32x32.bf8.fp8",
  "llvm.amdgcn.smfmac.f32.32x32x32.fp8.bf8",
  "llvm.amdgcn.smfmac.f32.32x32x32.fp8.fp8",
  "llvm.amdgcn.smfmac.i32.16x16x64.i8",
  "llvm.amdgcn.smfmac.i32.32x32x32.i8",
  "llvm.amdgcn.softwqm",
  "llvm.amdgcn.sqrt",
  "llvm.amdgcn.strict.wqm",
  "llvm.amdgcn.strict.wwm",
  "llvm.amdgcn.struct.buffer.atomic.add",
  "llvm.amdgcn.struct.buffer.atomic.and",
  "llvm.amdgcn.struct.buffer.atomic.cmpswap",
  "llvm.amdgcn.struct.buffer.atomic.dec",
  "llvm.amdgcn.struct.buffer.atomic.fadd",
  "llvm.amdgcn.struct.buffer.atomic.fmax",
  "llvm.amdgcn.struct.buffer.atomic.fmin",
  "llvm.amdgcn.struct.buffer.atomic.inc",
  "llvm.amdgcn.struct.buffer.atomic.or",
  "llvm.amdgcn.struct.buffer.atomic.smax",
  "llvm.amdgcn.struct.buffer.atomic.smin",
  "llvm.amdgcn.struct.buffer.atomic.sub",
  "llvm.amdgcn.struct.buffer.atomic.swap",
  "llvm.amdgcn.struct.buffer.atomic.umax",
  "llvm.amdgcn.struct.buffer.atomic.umin",
  "llvm.amdgcn.struct.buffer.atomic.xor",
  "llvm.amdgcn.struct.buffer.load",
  "llvm.amdgcn.struct.buffer.load.format",
  "llvm.amdgcn.struct.buffer.load.lds",
  "llvm.amdgcn.struct.buffer.store",
  "llvm.amdgcn.struct.buffer.store.format",
  "llvm.amdgcn.struct.ptr.buffer.atomic.add",
  "llvm.amdgcn.struct.ptr.buffer.atomic.and",
  "llvm.amdgcn.struct.ptr.buffer.atomic.cmpswap",
  "llvm.amdgcn.struct.ptr.buffer.atomic.dec",
  "llvm.amdgcn.struct.ptr.buffer.atomic.fadd",
  "llvm.amdgcn.struct.ptr.buffer.atomic.fmax",
  "llvm.amdgcn.struct.ptr.buffer.atomic.fmin",
  "llvm.amdgcn.struct.ptr.buffer.atomic.inc",
  "llvm.amdgcn.struct.ptr.buffer.atomic.or",
  "llvm.amdgcn.struct.ptr.buffer.atomic.smax",
  "llvm.amdgcn.struct.ptr.buffer.atomic.smin",
  "llvm.amdgcn.struct.ptr.buffer.atomic.sub",
  "llvm.amdgcn.struct.ptr.buffer.atomic.swap",
  "llvm.amdgcn.struct.ptr.buffer.atomic.umax",
  "llvm.amdgcn.struct.ptr.buffer.atomic.umin",
  "llvm.amdgcn.struct.ptr.buffer.atomic.xor",
  "llvm.amdgcn.struct.ptr.buffer.load",
  "llvm.amdgcn.struct.ptr.buffer.load.format",
  "llvm.amdgcn.struct.ptr.buffer.load.lds",
  "llvm.amdgcn.struct.ptr.buffer.store",
  "llvm.amdgcn.struct.ptr.buffer.store.format",
  "llvm.amdgcn.struct.ptr.tbuffer.load",
  "llvm.amdgcn.struct.ptr.tbuffer.store",
  "llvm.amdgcn.struct.tbuffer.load",
  "llvm.amdgcn.struct.tbuffer.store",
  "llvm.amdgcn.sudot4",
  "llvm.amdgcn.sudot8",
  "llvm.amdgcn.tbuffer.load",
  "llvm.amdgcn.tbuffer.store",
  "llvm.amdgcn.trig.preop",
  "llvm.amdgcn.ubfe",
  "llvm.amdgcn.udot2",
  "llvm.amdgcn.udot4",
  "llvm.amdgcn.udot8",
  "llvm.amdgcn.unreachable",
  "llvm.amdgcn.update.dpp",
  "llvm.amdgcn.wave.barrier",
  "llvm.amdgcn.wave.reduce.umax",
  "llvm.amdgcn.wave.reduce.umin",
  "llvm.amdgcn.wavefrontsize",
  "llvm.amdgcn.wmma.bf16.16x16x16.bf16",
  "llvm.amdgcn.wmma.f16.16x16x16.f16",
  "llvm.amdgcn.wmma.f32.16x16x16.bf16",
  "llvm.amdgcn.wmma.f32.16x16x16.f16",
  "llvm.amdgcn.wmma.i32.16x16x16.iu4",
  "llvm.amdgcn.wmma.i32.16x16x16.iu8",
  "llvm.amdgcn.workgroup.id.x",
  "llvm.amdgcn.workgroup.id.y",
  "llvm.amdgcn.workgroup.id.z",
  "llvm.amdgcn.workitem.id.x",
  "llvm.amdgcn.workitem.id.y",
  "llvm.amdgcn.workitem.id.z",
  "llvm.amdgcn.wqm",
  "llvm.amdgcn.wqm.demote",
  "llvm.amdgcn.wqm.vote",
  "llvm.amdgcn.writelane",
  "llvm.amdgcn.wwm",
  "llvm.arm.cde.cx1",
  "llvm.arm.cde.cx1a",
  "llvm.arm.cde.cx1d",
  "llvm.arm.cde.cx1da",
  "llvm.arm.cde.cx2",
  "llvm.arm.cde.cx2a",
  "llvm.arm.cde.cx2d",
  "llvm.arm.cde.cx2da",
  "llvm.arm.cde.cx3",
  "llvm.arm.cde.cx3a",
  "llvm.arm.cde.cx3d",
  "llvm.arm.cde.cx3da",
  "llvm.arm.cde.vcx1",
  "llvm.arm.cde.vcx1a",
  "llvm.arm.cde.vcx1q",
  "llvm.arm.cde.vcx1q.predicated",
  "llvm.arm.cde.vcx1qa",
  "llvm.arm.cde.vcx1qa.predicated",
  "llvm.arm.cde.vcx2",
  "llvm.arm.cde.vcx2a",
  "llvm.arm.cde.vcx2q",
  "llvm.arm.cde.vcx2q.predicated",
  "llvm.arm.cde.vcx2qa",
  "llvm.arm.cde.vcx2qa.predicated",
  "llvm.arm.cde.vcx3",
  "llvm.arm.cde.vcx3a",
  "llvm.arm.cde.vcx3q",
  "llvm.arm.cde.vcx3q.predicated",
  "llvm.arm.cde.vcx3qa",
  "llvm.arm.cde.vcx3qa.predicated",
  "llvm.arm.cdp",
  "llvm.arm.cdp2",
  "llvm.arm.clrex",
  "llvm.arm.cls",
  "llvm.arm.cls64",
  "llvm.arm.cmse.tt",
  "llvm.arm.cmse.tta",
  "llvm.arm.cmse.ttat",
  "llvm.arm.cmse.ttt",
  "llvm.arm.crc32b",
  "llvm.arm.crc32cb",
  "llvm.arm.crc32ch",
  "llvm.arm.crc32cw",
  "llvm.arm.crc32h",
  "llvm.arm.crc32w",
  "llvm.arm.dbg",
  "llvm.arm.dmb",
  "llvm.arm.dsb",
  "llvm.arm.get.fpscr",
  "llvm.arm.gnu.eabi.mcount",
  "llvm.arm.hint",
  "llvm.arm.isb",
  "llvm.arm.ldaex",
  "llvm.arm.ldaexd",
  "llvm.arm.ldc",
  "llvm.arm.ldc2",
  "llvm.arm.ldc2l",
  "llvm.arm.ldcl",
  "llvm.arm.ldrex",
  "llvm.arm.ldrexd",
  "llvm.arm.mcr",
  "llvm.arm.mcr2",
  "llvm.arm.mcrr",
  "llvm.arm.mcrr2",
  "llvm.arm.mrc",
  "llvm.arm.mrc2",
  "llvm.arm.mrrc",
  "llvm.arm.mrrc2",
  "llvm.arm.mve.abd.predicated",
  "llvm.arm.mve.abs.predicated",
  "llvm.arm.mve.add.predicated",
  "llvm.arm.mve.addlv",
  "llvm.arm.mve.addlv.predicated",
  "llvm.arm.mve.addv",
  "llvm.arm.mve.addv.predicated",
  "llvm.arm.mve.and.predicated",
  "llvm.arm.mve.asrl",
  "llvm.arm.mve.bic.predicated",
  "llvm.arm.mve.cls.predicated",
  "llvm.arm.mve.clz.predicated",
  "llvm.arm.mve.eor.predicated",
  "llvm.arm.mve.fma.predicated",
  "llvm.arm.mve.hadd.predicated",
  "llvm.arm.mve.hsub.predicated",
  "llvm.arm.mve.lsll",
  "llvm.arm.mve.max.predicated",
  "llvm.arm.mve.maxav",
  "llvm.arm.mve.maxav.predicated",
  "llvm.arm.mve.maxnmav",
  "llvm.arm.mve.maxnmav.predicated",
  "llvm.arm.mve.maxnmv",
  "llvm.arm.mve.maxnmv.predicated",
  "llvm.arm.mve.maxv",
  "llvm.arm.mve.maxv.predicated",
  "llvm.arm.mve.min.predicated",
  "llvm.arm.mve.minav",
  "llvm.arm.mve.minav.predicated",
  "llvm.arm.mve.minnmav",
  "llvm.arm.mve.minnmav.predicated",
  "llvm.arm.mve.minnmv",
  "llvm.arm.mve.minnmv.predicated",
  "llvm.arm.mve.minv",
  "llvm.arm.mve.minv.predicated",
  "llvm.arm.mve.mul.predicated",
  "llvm.arm.mve.mulh.predicated",
  "llvm.arm.mve.mull.int.predicated",
  "llvm.arm.mve.mull.poly.predicated",
  "llvm.arm.mve.mvn.predicated",
  "llvm.arm.mve.neg.predicated",
  "llvm.arm.mve.orn.predicated",
  "llvm.arm.mve.orr.predicated",
  "llvm.arm.mve.pred.i2v",
  "llvm.arm.mve.pred.v2i",
  "llvm.arm.mve.qabs.predicated",
  "llvm.arm.mve.qadd.predicated",
  "llvm.arm.mve.qdmulh.predicated",
  "llvm.arm.mve.qneg.predicated",
  "llvm.arm.mve.qrdmulh.predicated",
  "llvm.arm.mve.qsub.predicated",
  "llvm.arm.mve.rhadd.predicated",
  "llvm.arm.mve.rmulh.predicated",
  "llvm.arm.mve.shl.imm.predicated",
  "llvm.arm.mve.shr.imm.predicated",
  "llvm.arm.mve.sqrshr",
  "llvm.arm.mve.sqrshrl",
  "llvm.arm.mve.sqshl",
  "llvm.arm.mve.sqshll",
  "llvm.arm.mve.srshr",
  "llvm.arm.mve.srshrl",
  "llvm.arm.mve.sub.predicated",
  "llvm.arm.mve.uqrshl",
  "llvm.arm.mve.uqrshll",
  "llvm.arm.mve.uqshl",
  "llvm.arm.mve.uqshll",
  "llvm.arm.mve.urshr",
  "llvm.arm.mve.urshrl",
  "llvm.arm.mve.vabav",
  "llvm.arm.mve.vabav.predicated",
  "llvm.arm.mve.vabd",
  "llvm.arm.mve.vadc",
  "llvm.arm.mve.vadc.predicated",
  "llvm.arm.mve.vbrsr",
  "llvm.arm.mve.vbrsr.predicated",
  "llvm.arm.mve.vcaddq",
  "llvm.arm.mve.vcaddq.predicated",
  "llvm.arm.mve.vcls",
  "llvm.arm.mve.vcmlaq",
  "llvm.arm.mve.vcmlaq.predicated",
  "llvm.arm.mve.vcmulq",
  "llvm.arm.mve.vcmulq.predicated",
  "llvm.arm.mve.vctp16",
  "llvm.arm.mve.vctp32",
  "llvm.arm.mve.vctp64",
  "llvm.arm.mve.vctp8",
  "llvm.arm.mve.vcvt.fix",
  "llvm.arm.mve.vcvt.fix.predicated",
  "llvm.arm.mve.vcvt.fp.int.predicated",
  "llvm.arm.mve.vcvt.narrow",
  "llvm.arm.mve.vcvt.narrow.predicated",
  "llvm.arm.mve.vcvt.widen",
  "llvm.arm.mve.vcvt.widen.predicated",
  "llvm.arm.mve.vcvta",
  "llvm.arm.mve.vcvta.predicated",
  "llvm.arm.mve.vcvtm",
  "llvm.arm.mve.vcvtm.predicated",
  "llvm.arm.mve.vcvtn",
  "llvm.arm.mve.vcvtn.predicated",
  "llvm.arm.mve.vcvtp",
  "llvm.arm.mve.vcvtp.predicated",
  "llvm.arm.mve.vddup",
  "llvm.arm.mve.vddup.predicated",
  "llvm.arm.mve.vdwdup",
  "llvm.arm.mve.vdwdup.predicated",
  "llvm.arm.mve.vhadd",
  "llvm.arm.mve.vhsub",
  "llvm.arm.mve.vidup",
  "llvm.arm.mve.vidup.predicated",
  "llvm.arm.mve.viwdup",
  "llvm.arm.mve.viwdup.predicated",
  "llvm.arm.mve.vld2q",
  "llvm.arm.mve.vld4q",
  "llvm.arm.mve.vldr.gather.base",
  "llvm.arm.mve.vldr.gather.base.predicated",
  "llvm.arm.mve.vldr.gather.base.wb",
  "llvm.arm.mve.vldr.gather.base.wb.predicated",
  "llvm.arm.mve.vldr.gather.offset",
  "llvm.arm.mve.vldr.gather.offset.predicated",
  "llvm.arm.mve.vmaxa.predicated",
  "llvm.arm.mve.vmaxnma.predicated",
  "llvm.arm.mve.vmina.predicated",
  "llvm.arm.mve.vminnma.predicated",
  "llvm.arm.mve.vmla.n.predicated",
  "llvm.arm.mve.vmlas.n.predicated",
  "llvm.arm.mve.vmldava",
  "llvm.arm.mve.vmldava.predicated",
  "llvm.arm.mve.vmlldava",
  "llvm.arm.mve.vmlldava.predicated",
  "llvm.arm.mve.vmovl.predicated",
  "llvm.arm.mve.vmovn.predicated",
  "llvm.arm.mve.vmulh",
  "llvm.arm.mve.vmull",
  "llvm.arm.mve.vmull.poly",
  "llvm.arm.mve.vqdmlad",
  "llvm.arm.mve.vqdmlad.predicated",
  "llvm.arm.mve.vqdmlah",
  "llvm.arm.mve.vqdmlah.predicated",
  "llvm.arm.mve.vqdmlash",
  "llvm.arm.mve.vqdmlash.predicated",
  "llvm.arm.mve.vqdmulh",
  "llvm.arm.mve.vqdmull",
  "llvm.arm.mve.vqdmull.predicated",
  "llvm.arm.mve.vqmovn",
  "llvm.arm.mve.vqmovn.predicated",
  "llvm.arm.mve.vqrdmlah",
  "llvm.arm.mve.vqrdmlah.predicated",
  "llvm.arm.mve.vqrdmlash",
  "llvm.arm.mve.vqrdmlash.predicated",
  "llvm.arm.mve.vqrdmulh",
  "llvm.arm.mve.vqshl.imm",
  "llvm.arm.mve.vqshl.imm.predicated",
  "llvm.arm.mve.vqshlu.imm",
  "llvm.arm.mve.vqshlu.imm.predicated",
  "llvm.arm.mve.vreinterpretq",
  "llvm.arm.mve.vrev.predicated",
  "llvm.arm.mve.vrhadd",
  "llvm.arm.mve.vrinta.predicated",
  "llvm.arm.mve.vrintm.predicated",
  "llvm.arm.mve.vrintn",
  "llvm.arm.mve.vrintn.predicated",
  "llvm.arm.mve.vrintp.predicated",
  "llvm.arm.mve.vrintx.predicated",
  "llvm.arm.mve.vrintz.predicated",
  "llvm.arm.mve.vrmlldavha",
  "llvm.arm.mve.vrmlldavha.predicated",
  "llvm.arm.mve.vrmulh",
  "llvm.arm.mve.vrshr.imm",
  "llvm.arm.mve.vrshr.imm.predicated",
  "llvm.arm.mve.vsbc",
  "llvm.arm.mve.vsbc.predicated",
  "llvm.arm.mve.vshl.scalar",
  "llvm.arm.mve.vshl.scalar.predicated",
  "llvm.arm.mve.vshl.vector",
  "llvm.arm.mve.vshl.vector.predicated",
  "llvm.arm.mve.vshlc",
  "llvm.arm.mve.vshlc.predicated",
  "llvm.arm.mve.vshll.imm",
  "llvm.arm.mve.vshll.imm.predicated",
  "llvm.arm.mve.vshrn",
  "llvm.arm.mve.vshrn.predicated",
  "llvm.arm.mve.vsli",
  "llvm.arm.mve.vsli.predicated",
  "llvm.arm.mve.vsri",
  "llvm.arm.mve.vsri.predicated",
  "llvm.arm.mve.vst2q",
  "llvm.arm.mve.vst4q",
  "llvm.arm.mve.vstr.scatter.base",
  "llvm.arm.mve.vstr.scatter.base.predicated",
  "llvm.arm.mve.vstr.scatter.base.wb",
  "llvm.arm.mve.vstr.scatter.base.wb.predicated",
  "llvm.arm.mve.vstr.scatter.offset",
  "llvm.arm.mve.vstr.scatter.offset.predicated",
  "llvm.arm.neon.aesd",
  "llvm.arm.neon.aese",
  "llvm.arm.neon.aesimc",
  "llvm.arm.neon.aesmc",
  "llvm.arm.neon.bfdot",
  "llvm.arm.neon.bfmlalb",
  "llvm.arm.neon.bfmlalt",
  "llvm.arm.neon.bfmmla",
  "llvm.arm.neon.sdot",
  "llvm.arm.neon.sha1c",
  "llvm.arm.neon.sha1h",
  "llvm.arm.neon.sha1m",
  "llvm.arm.neon.sha1p",
  "llvm.arm.neon.sha1su0",
  "llvm.arm.neon.sha1su1",
  "llvm.arm.neon.sha256h",
  "llvm.arm.neon.sha256h2",
  "llvm.arm.neon.sha256su0",
  "llvm.arm.neon.sha256su1",
  "llvm.arm.neon.smmla",
  "llvm.arm.neon.udot",
  "llvm.arm.neon.ummla",
  "llvm.arm.neon.usdot",
  "llvm.arm.neon.usmmla",
  "llvm.arm.neon.vabds",
  "llvm.arm.neon.vabdu",
  "llvm.arm.neon.vabs",
  "llvm.arm.neon.vacge",
  "llvm.arm.neon.vacgt",
  "llvm.arm.neon.vbsl",
  "llvm.arm.neon.vcadd.rot270",
  "llvm.arm.neon.vcadd.rot90",
  "llvm.arm.neon.vcls",
  "llvm.arm.neon.vcvtas",
  "llvm.arm.neon.vcvtau",
  "llvm.arm.neon.vcvtbfp2bf",
  "llvm.arm.neon.vcvtfp2bf",
  "llvm.arm.neon.vcvtfp2fxs",
  "llvm.arm.neon.vcvtfp2fxu",
  "llvm.arm.neon.vcvtfp2hf",
  "llvm.arm.neon.vcvtfxs2fp",
  "llvm.arm.neon.vcvtfxu2fp",
  "llvm.arm.neon.vcvthf2fp",
  "llvm.arm.neon.vcvtms",
  "llvm.arm.neon.vcvtmu",
  "llvm.arm.neon.vcvtns",
  "llvm.arm.neon.vcvtnu",
  "llvm.arm.neon.vcvtps",
  "llvm.arm.neon.vcvtpu",
  "llvm.arm.neon.vhadds",
  "llvm.arm.neon.vhaddu",
  "llvm.arm.neon.vhsubs",
  "llvm.arm.neon.vhsubu",
  "llvm.arm.neon.vld1",
  "llvm.arm.neon.vld1x2",
  "llvm.arm.neon.vld1x3",
  "llvm.arm.neon.vld1x4",
  "llvm.arm.neon.vld2",
  "llvm.arm.neon.vld2dup",
  "llvm.arm.neon.vld2lane",
  "llvm.arm.neon.vld3",
  "llvm.arm.neon.vld3dup",
  "llvm.arm.neon.vld3lane",
  "llvm.arm.neon.vld4",
  "llvm.arm.neon.vld4dup",
  "llvm.arm.neon.vld4lane",
  "llvm.arm.neon.vmaxnm",
  "llvm.arm.neon.vmaxs",
  "llvm.arm.neon.vmaxu",
  "llvm.arm.neon.vminnm",
  "llvm.arm.neon.vmins",
  "llvm.arm.neon.vminu",
  "llvm.arm.neon.vmullp",
  "llvm.arm.neon.vmulls",
  "llvm.arm.neon.vmullu",
  "llvm.arm.neon.vmulp",
  "llvm.arm.neon.vpadals",
  "llvm.arm.neon.vpadalu",
  "llvm.arm.neon.vpadd",
  "llvm.arm.neon.vpaddls",
  "llvm.arm.neon.vpaddlu",
  "llvm.arm.neon.vpmaxs",
  "llvm.arm.neon.vpmaxu",
  "llvm.arm.neon.vpmins",
  "llvm.arm.neon.vpminu",
  "llvm.arm.neon.vqabs",
  "llvm.arm.neon.vqdmulh",
  "llvm.arm.neon.vqdmull",
  "llvm.arm.neon.vqmovns",
  "llvm.arm.neon.vqmovnsu",
  "llvm.arm.neon.vqmovnu",
  "llvm.arm.neon.vqneg",
  "llvm.arm.neon.vqrdmlah",
  "llvm.arm.neon.vqrdmlsh",
  "llvm.arm.neon.vqrdmulh",
  "llvm.arm.neon.vqrshiftns",
  "llvm.arm.neon.vqrshiftnsu",
  "llvm.arm.neon.vqrshiftnu",
  "llvm.arm.neon.vqrshifts",
  "llvm.arm.neon.vqrshiftu",
  "llvm.arm.neon.vqshiftns",
  "llvm.arm.neon.vqshiftnsu",
  "llvm.arm.neon.vqshiftnu",
  "llvm.arm.neon.vqshifts",
  "llvm.arm.neon.vqshiftsu",
  "llvm.arm.neon.vqshiftu",
  "llvm.arm.neon.vraddhn",
  "llvm.arm.neon.vrecpe",
  "llvm.arm.neon.vrecps",
  "llvm.arm.neon.vrhadds",
  "llvm.arm.neon.vrhaddu",
  "llvm.arm.neon.vrinta",
  "llvm.arm.neon.vrintm",
  "llvm.arm.neon.vrintn",
  "llvm.arm.neon.vrintp",
  "llvm.arm.neon.vrintx",
  "llvm.arm.neon.vrintz",
  "llvm.arm.neon.vrshiftn",
  "llvm.arm.neon.vrshifts",
  "llvm.arm.neon.vrshiftu",
  "llvm.arm.neon.vrsqrte",
  "llvm.arm.neon.vrsqrts",
  "llvm.arm.neon.vrsubhn",
  "llvm.arm.neon.vshiftins",
  "llvm.arm.neon.vshifts",
  "llvm.arm.neon.vshiftu",
  "llvm.arm.neon.vst1",
  "llvm.arm.neon.vst1x2",
  "llvm.arm.neon.vst1x3",
  "llvm.arm.neon.vst1x4",
  "llvm.arm.neon.vst2",
  "llvm.arm.neon.vst2lane",
  "llvm.arm.neon.vst3",
  "llvm.arm.neon.vst3lane",
  "llvm.arm.neon.vst4",
  "llvm.arm.neon.vst4lane",
  "llvm.arm.neon.vtbl1",
  "llvm.arm.neon.vtbl2",
  "llvm.arm.neon.vtbl3",
  "llvm.arm.neon.vtbl4",
  "llvm.arm.neon.vtbx1",
  "llvm.arm.neon.vtbx2",
  "llvm.arm.neon.vtbx3",
  "llvm.arm.neon.vtbx4",
  "llvm.arm.qadd",
  "llvm.arm.qadd16",
  "llvm.arm.qadd8",
  "llvm.arm.qasx",
  "llvm.arm.qsax",
  "llvm.arm.qsub",
  "llvm.arm.qsub16",
  "llvm.arm.qsub8",
  "llvm.arm.sadd16",
  "llvm.arm.sadd8",
  "llvm.arm.sasx",
  "llvm.arm.sel",
  "llvm.arm.set.fpscr",
  "llvm.arm.shadd16",
  "llvm.arm.shadd8",
  "llvm.arm.shasx",
  "llvm.arm.shsax",
  "llvm.arm.shsub16",
  "llvm.arm.shsub8",
  "llvm.arm.smlabb",
  "llvm.arm.smlabt",
  "llvm.arm.smlad",
  "llvm.arm.smladx",
  "llvm.arm.smlald",
  "llvm.arm.smlaldx",
  "llvm.arm.smlatb",
  "llvm.arm.smlatt",
  "llvm.arm.smlawb",
  "llvm.arm.smlawt",
  "llvm.arm.smlsd",
  "llvm.arm.smlsdx",
  "llvm.arm.smlsld",
  "llvm.arm.smlsldx",
  "llvm.arm.smuad",
  "llvm.arm.smuadx",
  "llvm.arm.smulbb",
  "llvm.arm.smulbt",
  "llvm.arm.smultb",
  "llvm.arm.smultt",
  "llvm.arm.smulwb",
  "llvm.arm.smulwt",
  "llvm.arm.smusd",
  "llvm.arm.smusdx",
  "llvm.arm.space",
  "llvm.arm.ssat",
  "llvm.arm.ssat16",
  "llvm.arm.ssax",
  "llvm.arm.ssub16",
  "llvm.arm.ssub8",
  "llvm.arm.stc",
  "llvm.arm.stc2",
  "llvm.arm.stc2l",
  "llvm.arm.stcl",
  "llvm.arm.stlex",
  "llvm.arm.stlexd",
  "llvm.arm.strex",
  "llvm.arm.strexd",
  "llvm.arm.sxtab16",
  "llvm.arm.sxtb16",
  "llvm.arm.uadd16",
  "llvm.arm.uadd8",
  "llvm.arm.uasx",
  "llvm.arm.uhadd16",
  "llvm.arm.uhadd8",
  "llvm.arm.uhasx",
  "llvm.arm.uhsax",
  "llvm.arm.uhsub16",
  "llvm.arm.uhsub8",
  "llvm.arm.undefined",
  "llvm.arm.uqadd16",
  "llvm.arm.uqadd8",
  "llvm.arm.uqasx",
  "llvm.arm.uqsax",
  "llvm.arm.uqsub16",
  "llvm.arm.uqsub8",
  "llvm.arm.usad8",
  "llvm.arm.usada8",
  "llvm.arm.usat",
  "llvm.arm.usat16",
  "llvm.arm.usax",
  "llvm.arm.usub16",
  "llvm.arm.usub8",
  "llvm.arm.uxtab16",
  "llvm.arm.uxtb16",
  "llvm.arm.vcvtr",
  "llvm.arm.vcvtru",
  "llvm.bpf.btf.type.id",
  "llvm.bpf.compare",
  "llvm.bpf.load.byte",
  "llvm.bpf.load.half",
  "llvm.bpf.load.word",
  "llvm.bpf.passthrough",
  "llvm.bpf.preserve.enum.value",
  "llvm.bpf.preserve.field.info",
  "llvm.bpf.preserve.type.info",
  "llvm.bpf.pseudo",
  "llvm.dx.create.handle",
  "llvm.dx.flattened.thread.id.in.group",
  "llvm.dx.group.id",
  "llvm.dx.thread.id",
  "llvm.dx.thread.id.in.group",
  "llvm.hexagon.A2.abs",
  "llvm.hexagon.A2.absp",
  "llvm.hexagon.A2.abssat",
  "llvm.hexagon.A2.add",
  "llvm.hexagon.A2.addh.h16.hh",
  "llvm.hexagon.A2.addh.h16.hl",
  "llvm.hexagon.A2.addh.h16.lh",
  "llvm.hexagon.A2.addh.h16.ll",
  "llvm.hexagon.A2.addh.h16.sat.hh",
  "llvm.hexagon.A2.addh.h16.sat.hl",
  "llvm.hexagon.A2.addh.h16.sat.lh",
  "llvm.hexagon.A2.addh.h16.sat.ll",
  "llvm.hexagon.A2.addh.l16.hl",
  "llvm.hexagon.A2.addh.l16.ll",
  "llvm.hexagon.A2.addh.l16.sat.hl",
  "llvm.hexagon.A2.addh.l16.sat.ll",
  "llvm.hexagon.A2.addi",
  "llvm.hexagon.A2.addp",
  "llvm.hexagon.A2.addpsat",
  "llvm.hexagon.A2.addsat",
  "llvm.hexagon.A2.addsp",
  "llvm.hexagon.A2.and",
  "llvm.hexagon.A2.andir",
  "llvm.hexagon.A2.andp",
  "llvm.hexagon.A2.aslh",
  "llvm.hexagon.A2.asrh",
  "llvm.hexagon.A2.combine.hh",
  "llvm.hexagon.A2.combine.hl",
  "llvm.hexagon.A2.combine.lh",
  "llvm.hexagon.A2.combine.ll",
  "llvm.hexagon.A2.combineii",
  "llvm.hexagon.A2.combinew",
  "llvm.hexagon.A2.max",
  "llvm.hexagon.A2.maxp",
  "llvm.hexagon.A2.maxu",
  "llvm.hexagon.A2.maxup",
  "llvm.hexagon.A2.min",
  "llvm.hexagon.A2.minp",
  "llvm.hexagon.A2.minu",
  "llvm.hexagon.A2.minup",
  "llvm.hexagon.A2.neg",
  "llvm.hexagon.A2.negp",
  "llvm.hexagon.A2.negsat",
  "llvm.hexagon.A2.not",
  "llvm.hexagon.A2.notp",
  "llvm.hexagon.A2.or",
  "llvm.hexagon.A2.orir",
  "llvm.hexagon.A2.orp",
  "llvm.hexagon.A2.roundsat",
  "llvm.hexagon.A2.sat",
  "llvm.hexagon.A2.satb",
  "llvm.hexagon.A2.sath",
  "llvm.hexagon.A2.satub",
  "llvm.hexagon.A2.satuh",
  "llvm.hexagon.A2.sub",
  "llvm.hexagon.A2.subh.h16.hh",
  "llvm.hexagon.A2.subh.h16.hl",
  "llvm.hexagon.A2.subh.h16.lh",
  "llvm.hexagon.A2.subh.h16.ll",
  "llvm.hexagon.A2.subh.h16.sat.hh",
  "llvm.hexagon.A2.subh.h16.sat.hl",
  "llvm.hexagon.A2.subh.h16.sat.lh",
  "llvm.hexagon.A2.subh.h16.sat.ll",
  "llvm.hexagon.A2.subh.l16.hl",
  "llvm.hexagon.A2.subh.l16.ll",
  "llvm.hexagon.A2.subh.l16.sat.hl",
  "llvm.hexagon.A2.subh.l16.sat.ll",
  "llvm.hexagon.A2.subp",
  "llvm.hexagon.A2.subri",
  "llvm.hexagon.A2.subsat",
  "llvm.hexagon.A2.svaddh",
  "llvm.hexagon.A2.svaddhs",
  "llvm.hexagon.A2.svadduhs",
  "llvm.hexagon.A2.svavgh",
  "llvm.hexagon.A2.svavghs",
  "llvm.hexagon.A2.svnavgh",
  "llvm.hexagon.A2.svsubh",
  "llvm.hexagon.A2.svsubhs",
  "llvm.hexagon.A2.svsubuhs",
  "llvm.hexagon.A2.swiz",
  "llvm.hexagon.A2.sxtb",
  "llvm.hexagon.A2.sxth",
  "llvm.hexagon.A2.sxtw",
  "llvm.hexagon.A2.tfr",
  "llvm.hexagon.A2.tfrih",
  "llvm.hexagon.A2.tfril",
  "llvm.hexagon.A2.tfrp",
  "llvm.hexagon.A2.tfrpi",
  "llvm.hexagon.A2.tfrsi",
  "llvm.hexagon.A2.vabsh",
  "llvm.hexagon.A2.vabshsat",
  "llvm.hexagon.A2.vabsw",
  "llvm.hexagon.A2.vabswsat",
  "llvm.hexagon.A2.vaddb.map",
  "llvm.hexagon.A2.vaddh",
  "llvm.hexagon.A2.vaddhs",
  "llvm.hexagon.A2.vaddub",
  "llvm.hexagon.A2.vaddubs",
  "llvm.hexagon.A2.vadduhs",
  "llvm.hexagon.A2.vaddw",
  "llvm.hexagon.A2.vaddws",
  "llvm.hexagon.A2.vavgh",
  "llvm.hexagon.A2.vavghcr",
  "llvm.hexagon.A2.vavghr",
  "llvm.hexagon.A2.vavgub",
  "llvm.hexagon.A2.vavgubr",
  "llvm.hexagon.A2.vavguh",
  "llvm.hexagon.A2.vavguhr",
  "llvm.hexagon.A2.vavguw",
  "llvm.hexagon.A2.vavguwr",
  "llvm.hexagon.A2.vavgw",
  "llvm.hexagon.A2.vavgwcr",
  "llvm.hexagon.A2.vavgwr",
  "llvm.hexagon.A2.vcmpbeq",
  "llvm.hexagon.A2.vcmpbgtu",
  "llvm.hexagon.A2.vcmpheq",
  "llvm.hexagon.A2.vcmphgt",
  "llvm.hexagon.A2.vcmphgtu",
  "llvm.hexagon.A2.vcmpweq",
  "llvm.hexagon.A2.vcmpwgt",
  "llvm.hexagon.A2.vcmpwgtu",
  "llvm.hexagon.A2.vconj",
  "llvm.hexagon.A2.vmaxb",
  "llvm.hexagon.A2.vmaxh",
  "llvm.hexagon.A2.vmaxub",
  "llvm.hexagon.A2.vmaxuh",
  "llvm.hexagon.A2.vmaxuw",
  "llvm.hexagon.A2.vmaxw",
  "llvm.hexagon.A2.vminb",
  "llvm.hexagon.A2.vminh",
  "llvm.hexagon.A2.vminub",
  "llvm.hexagon.A2.vminuh",
  "llvm.hexagon.A2.vminuw",
  "llvm.hexagon.A2.vminw",
  "llvm.hexagon.A2.vnavgh",
  "llvm.hexagon.A2.vnavghcr",
  "llvm.hexagon.A2.vnavghr",
  "llvm.hexagon.A2.vnavgw",
  "llvm.hexagon.A2.vnavgwcr",
  "llvm.hexagon.A2.vnavgwr",
  "llvm.hexagon.A2.vraddub",
  "llvm.hexagon.A2.vraddub.acc",
  "llvm.hexagon.A2.vrsadub",
  "llvm.hexagon.A2.vrsadub.acc",
  "llvm.hexagon.A2.vsubb.map",
  "llvm.hexagon.A2.vsubh",
  "llvm.hexagon.A2.vsubhs",
  "llvm.hexagon.A2.vsubub",
  "llvm.hexagon.A2.vsububs",
  "llvm.hexagon.A2.vsubuhs",
  "llvm.hexagon.A2.vsubw",
  "llvm.hexagon.A2.vsubws",
  "llvm.hexagon.A2.xor",
  "llvm.hexagon.A2.xorp",
  "llvm.hexagon.A2.zxtb",
  "llvm.hexagon.A2.zxth",
  "llvm.hexagon.A4.andn",
  "llvm.hexagon.A4.andnp",
  "llvm.hexagon.A4.bitsplit",
  "llvm.hexagon.A4.bitspliti",
  "llvm.hexagon.A4.boundscheck",
  "llvm.hexagon.A4.cmpbeq",
  "llvm.hexagon.A4.cmpbeqi",
  "llvm.hexagon.A4.cmpbgt",
  "llvm.hexagon.A4.cmpbgti",
  "llvm.hexagon.A4.cmpbgtu",
  "llvm.hexagon.A4.cmpbgtui",
  "llvm.hexagon.A4.cmpheq",
  "llvm.hexagon.A4.cmpheqi",
  "llvm.hexagon.A4.cmphgt",
  "llvm.hexagon.A4.cmphgti",
  "llvm.hexagon.A4.cmphgtu",
  "llvm.hexagon.A4.cmphgtui",
  "llvm.hexagon.A4.combineir",
  "llvm.hexagon.A4.combineri",
  "llvm.hexagon.A4.cround.ri",
  "llvm.hexagon.A4.cround.rr",
  "llvm.hexagon.A4.modwrapu",
  "llvm.hexagon.A4.orn",
  "llvm.hexagon.A4.ornp",
  "llvm.hexagon.A4.rcmpeq",
  "llvm.hexagon.A4.rcmpeqi",
  "llvm.hexagon.A4.rcmpneq",
  "llvm.hexagon.A4.rcmpneqi",
  "llvm.hexagon.A4.round.ri",
  "llvm.hexagon.A4.round.ri.sat",
  "llvm.hexagon.A4.round.rr",
  "llvm.hexagon.A4.round.rr.sat",
  "llvm.hexagon.A4.tlbmatch",
  "llvm.hexagon.A4.vcmpbeq.any",
  "llvm.hexagon.A4.vcmpbeqi",
  "llvm.hexagon.A4.vcmpbgt",
  "llvm.hexagon.A4.vcmpbgti",
  "llvm.hexagon.A4.vcmpbgtui",
  "llvm.hexagon.A4.vcmpheqi",
  "llvm.hexagon.A4.vcmphgti",
  "llvm.hexagon.A4.vcmphgtui",
  "llvm.hexagon.A4.vcmpweqi",
  "llvm.hexagon.A4.vcmpwgti",
  "llvm.hexagon.A4.vcmpwgtui",
  "llvm.hexagon.A4.vrmaxh",
  "llvm.hexagon.A4.vrmaxuh",
  "llvm.hexagon.A4.vrmaxuw",
  "llvm.hexagon.A4.vrmaxw",
  "llvm.hexagon.A4.vrminh",
  "llvm.hexagon.A4.vrminuh",
  "llvm.hexagon.A4.vrminuw",
  "llvm.hexagon.A4.vrminw",
  "llvm.hexagon.A5.vaddhubs",
  "llvm.hexagon.A6.vcmpbeq.notany",
  "llvm.hexagon.A7.clip",
  "llvm.hexagon.A7.croundd.ri",
  "llvm.hexagon.A7.croundd.rr",
  "llvm.hexagon.A7.vclip",
  "llvm.hexagon.C2.all8",
  "llvm.hexagon.C2.and",
  "llvm.hexagon.C2.andn",
  "llvm.hexagon.C2.any8",
  "llvm.hexagon.C2.bitsclr",
  "llvm.hexagon.C2.bitsclri",
  "llvm.hexagon.C2.bitsset",
  "llvm.hexagon.C2.cmpeq",
  "llvm.hexagon.C2.cmpeqi",
  "llvm.hexagon.C2.cmpeqp",
  "llvm.hexagon.C2.cmpgei",
  "llvm.hexagon.C2.cmpgeui",
  "llvm.hexagon.C2.cmpgt",
  "llvm.hexagon.C2.cmpgti",
  "llvm.hexagon.C2.cmpgtp",
  "llvm.hexagon.C2.cmpgtu",
  "llvm.hexagon.C2.cmpgtui",
  "llvm.hexagon.C2.cmpgtup",
  "llvm.hexagon.C2.cmplt",
  "llvm.hexagon.C2.cmpltu",
  "llvm.hexagon.C2.mask",
  "llvm.hexagon.C2.mux",
  "llvm.hexagon.C2.muxii",
  "llvm.hexagon.C2.muxir",
  "llvm.hexagon.C2.muxri",
  "llvm.hexagon.C2.not",
  "llvm.hexagon.C2.or",
  "llvm.hexagon.C2.orn",
  "llvm.hexagon.C2.pxfer.map",
  "llvm.hexagon.C2.tfrpr",
  "llvm.hexagon.C2.tfrrp",
  "llvm.hexagon.C2.vitpack",
  "llvm.hexagon.C2.vmux",
  "llvm.hexagon.C2.xor",
  "llvm.hexagon.C4.and.and",
  "llvm.hexagon.C4.and.andn",
  "llvm.hexagon.C4.and.or",
  "llvm.hexagon.C4.and.orn",
  "llvm.hexagon.C4.cmplte",
  "llvm.hexagon.C4.cmpltei",
  "llvm.hexagon.C4.cmplteu",
  "llvm.hexagon.C4.cmplteui",
  "llvm.hexagon.C4.cmpneq",
  "llvm.hexagon.C4.cmpneqi",
  "llvm.hexagon.C4.fastcorner9",
  "llvm.hexagon.C4.fastcorner9.not",
  "llvm.hexagon.C4.nbitsclr",
  "llvm.hexagon.C4.nbitsclri",
  "llvm.hexagon.C4.nbitsset",
  "llvm.hexagon.C4.or.and",
  "llvm.hexagon.C4.or.andn",
  "llvm.hexagon.C4.or.or",
  "llvm.hexagon.C4.or.orn",
  "llvm.hexagon.F2.conv.d2df",
  "llvm.hexagon.F2.conv.d2sf",
  "llvm.hexagon.F2.conv.df2d",
  "llvm.hexagon.F2.conv.df2d.chop",
  "llvm.hexagon.F2.conv.df2sf",
  "llvm.hexagon.F2.conv.df2ud",
  "llvm.hexagon.F2.conv.df2ud.chop",
  "llvm.hexagon.F2.conv.df2uw",
  "llvm.hexagon.F2.conv.df2uw.chop",
  "llvm.hexagon.F2.conv.df2w",
  "llvm.hexagon.F2.conv.df2w.chop",
  "llvm.hexagon.F2.conv.sf2d",
  "llvm.hexagon.F2.conv.sf2d.chop",
  "llvm.hexagon.F2.conv.sf2df",
  "llvm.hexagon.F2.conv.sf2ud",
  "llvm.hexagon.F2.conv.sf2ud.chop",
  "llvm.hexagon.F2.conv.sf2uw",
  "llvm.hexagon.F2.conv.sf2uw.chop",
  "llvm.hexagon.F2.conv.sf2w",
  "llvm.hexagon.F2.conv.sf2w.chop",
  "llvm.hexagon.F2.conv.ud2df",
  "llvm.hexagon.F2.conv.ud2sf",
  "llvm.hexagon.F2.conv.uw2df",
  "llvm.hexagon.F2.conv.uw2sf",
  "llvm.hexagon.F2.conv.w2df",
  "llvm.hexagon.F2.conv.w2sf",
  "llvm.hexagon.F2.dfadd",
  "llvm.hexagon.F2.dfclass",
  "llvm.hexagon.F2.dfcmpeq",
  "llvm.hexagon.F2.dfcmpge",
  "llvm.hexagon.F2.dfcmpgt",
  "llvm.hexagon.F2.dfcmpuo",
  "llvm.hexagon.F2.dfimm.n",
  "llvm.hexagon.F2.dfimm.p",
  "llvm.hexagon.F2.dfmax",
  "llvm.hexagon.F2.dfmin",
  "llvm.hexagon.F2.dfmpyfix",
  "llvm.hexagon.F2.dfmpyhh",
  "llvm.hexagon.F2.dfmpylh",
  "llvm.hexagon.F2.dfmpyll",
  "llvm.hexagon.F2.dfsub",
  "llvm.hexagon.F2.sfadd",
  "llvm.hexagon.F2.sfclass",
  "llvm.hexagon.F2.sfcmpeq",
  "llvm.hexagon.F2.sfcmpge",
  "llvm.hexagon.F2.sfcmpgt",
  "llvm.hexagon.F2.sfcmpuo",
  "llvm.hexagon.F2.sffixupd",
  "llvm.hexagon.F2.sffixupn",
  "llvm.hexagon.F2.sffixupr",
  "llvm.hexagon.F2.sffma",
  "llvm.hexagon.F2.sffma.lib",
  "llvm.hexagon.F2.sffma.sc",
  "llvm.hexagon.F2.sffms",
  "llvm.hexagon.F2.sffms.lib",
  "llvm.hexagon.F2.sfimm.n",
  "llvm.hexagon.F2.sfimm.p",
  "llvm.hexagon.F2.sfmax",
  "llvm.hexagon.F2.sfmin",
  "llvm.hexagon.F2.sfmpy",
  "llvm.hexagon.F2.sfsub",
  "llvm.hexagon.L2.loadrb.pbr",
  "llvm.hexagon.L2.loadrb.pci",
  "llvm.hexagon.L2.loadrb.pcr",
  "llvm.hexagon.L2.loadrd.pbr",
  "llvm.hexagon.L2.loadrd.pci",
  "llvm.hexagon.L2.loadrd.pcr",
  "llvm.hexagon.L2.loadrh.pbr",
  "llvm.hexagon.L2.loadrh.pci",
  "llvm.hexagon.L2.loadrh.pcr",
  "llvm.hexagon.L2.loadri.pbr",
  "llvm.hexagon.L2.loadri.pci",
  "llvm.hexagon.L2.loadri.pcr",
  "llvm.hexagon.L2.loadrub.pbr",
  "llvm.hexagon.L2.loadrub.pci",
  "llvm.hexagon.L2.loadrub.pcr",
  "llvm.hexagon.L2.loadruh.pbr",
  "llvm.hexagon.L2.loadruh.pci",
  "llvm.hexagon.L2.loadruh.pcr",
  "llvm.hexagon.L2.loadw.locked",
  "llvm.hexagon.L4.loadd.locked",
  "llvm.hexagon.M2.acci",
  "llvm.hexagon.M2.accii",
  "llvm.hexagon.M2.cmaci.s0",
  "llvm.hexagon.M2.cmacr.s0",
  "llvm.hexagon.M2.cmacs.s0",
  "llvm.hexagon.M2.cmacs.s1",
  "llvm.hexagon.M2.cmacsc.s0",
  "llvm.hexagon.M2.cmacsc.s1",
  "llvm.hexagon.M2.cmpyi.s0",
  "llvm.hexagon.M2.cmpyr.s0",
  "llvm.hexagon.M2.cmpyrs.s0",
  "llvm.hexagon.M2.cmpyrs.s1",
  "llvm.hexagon.M2.cmpyrsc.s0",
  "llvm.hexagon.M2.cmpyrsc.s1",
  "llvm.hexagon.M2.cmpys.s0",
  "llvm.hexagon.M2.cmpys.s1",
  "llvm.hexagon.M2.cmpysc.s0",
  "llvm.hexagon.M2.cmpysc.s1",
  "llvm.hexagon.M2.cnacs.s0",
  "llvm.hexagon.M2.cnacs.s1",
  "llvm.hexagon.M2.cnacsc.s0",
  "llvm.hexagon.M2.cnacsc.s1",
  "llvm.hexagon.M2.dpmpyss.acc.s0",
  "llvm.hexagon.M2.dpmpyss.nac.s0",
  "llvm.hexagon.M2.dpmpyss.rnd.s0",
  "llvm.hexagon.M2.dpmpyss.s0",
  "llvm.hexagon.M2.dpmpyuu.acc.s0",
  "llvm.hexagon.M2.dpmpyuu.nac.s0",
  "llvm.hexagon.M2.dpmpyuu.s0",
  "llvm.hexagon.M2.hmmpyh.rs1",
  "llvm.hexagon.M2.hmmpyh.s1",
  "llvm.hexagon.M2.hmmpyl.rs1",
  "llvm.hexagon.M2.hmmpyl.s1",
  "llvm.hexagon.M2.maci",
  "llvm.hexagon.M2.macsin",
  "llvm.hexagon.M2.macsip",
  "llvm.hexagon.M2.mmachs.rs0",
  "llvm.hexagon.M2.mmachs.rs1",
  "llvm.hexagon.M2.mmachs.s0",
  "llvm.hexagon.M2.mmachs.s1",
  "llvm.hexagon.M2.mmacls.rs0",
  "llvm.hexagon.M2.mmacls.rs1",
  "llvm.hexagon.M2.mmacls.s0",
  "llvm.hexagon.M2.mmacls.s1",
  "llvm.hexagon.M2.mmacuhs.rs0",
  "llvm.hexagon.M2.mmacuhs.rs1",
  "llvm.hexagon.M2.mmacuhs.s0",
  "llvm.hexagon.M2.mmacuhs.s1",
  "llvm.hexagon.M2.mmaculs.rs0",
  "llvm.hexagon.M2.mmaculs.rs1",
  "llvm.hexagon.M2.mmaculs.s0",
  "llvm.hexagon.M2.mmaculs.s1",
  "llvm.hexagon.M2.mmpyh.rs0",
  "llvm.hexagon.M2.mmpyh.rs1",
  "llvm.hexagon.M2.mmpyh.s0",
  "llvm.hexagon.M2.mmpyh.s1",
  "llvm.hexagon.M2.mmpyl.rs0",
  "llvm.hexagon.M2.mmpyl.rs1",
  "llvm.hexagon.M2.mmpyl.s0",
  "llvm.hexagon.M2.mmpyl.s1",
  "llvm.hexagon.M2.mmpyuh.rs0",
  "llvm.hexagon.M2.mmpyuh.rs1",
  "llvm.hexagon.M2.mmpyuh.s0",
  "llvm.hexagon.M2.mmpyuh.s1",
  "llvm.hexagon.M2.mmpyul.rs0",
  "llvm.hexagon.M2.mmpyul.rs1",
  "llvm.hexagon.M2.mmpyul.s0",
  "llvm.hexagon.M2.mmpyul.s1",
  "llvm.hexagon.M2.mnaci",
  "llvm.hexagon.M2.mpy.acc.hh.s0",
  "llvm.hexagon.M2.mpy.acc.hh.s1",
  "llvm.hexagon.M2.mpy.acc.hl.s0",
  "llvm.hexagon.M2.mpy.acc.hl.s1",
  "llvm.hexagon.M2.mpy.acc.lh.s0",
  "llvm.hexagon.M2.mpy.acc.lh.s1",
  "llvm.hexagon.M2.mpy.acc.ll.s0",
  "llvm.hexagon.M2.mpy.acc.ll.s1",
  "llvm.hexagon.M2.mpy.acc.sat.hh.s0",
  "llvm.hexagon.M2.mpy.acc.sat.hh.s1",
  "llvm.hexagon.M2.mpy.acc.sat.hl.s0",
  "llvm.hexagon.M2.mpy.acc.sat.hl.s1",
  "llvm.hexagon.M2.mpy.acc.sat.lh.s0",
  "llvm.hexagon.M2.mpy.acc.sat.lh.s1",
  "llvm.hexagon.M2.mpy.acc.sat.ll.s0",
  "llvm.hexagon.M2.mpy.acc.sat.ll.s1",
  "llvm.hexagon.M2.mpy.hh.s0",
  "llvm.hexagon.M2.mpy.hh.s1",
  "llvm.hexagon.M2.mpy.hl.s0",
  "llvm.hexagon.M2.mpy.hl.s1",
  "llvm.hexagon.M2.mpy.lh.s0",
  "llvm.hexagon.M2.mpy.lh.s1",
  "llvm.hexagon.M2.mpy.ll.s0",
  "llvm.hexagon.M2.mpy.ll.s1",
  "llvm.hexagon.M2.mpy.nac.hh.s0",
  "llvm.hexagon.M2.mpy.nac.hh.s1",
  "llvm.hexagon.M2.mpy.nac.hl.s0",
  "llvm.hexagon.M2.mpy.nac.hl.s1",
  "llvm.hexagon.M2.mpy.nac.lh.s0",
  "llvm.hexagon.M2.mpy.nac.lh.s1",
  "llvm.hexagon.M2.mpy.nac.ll.s0",
  "llvm.hexagon.M2.mpy.nac.ll.s1",
  "llvm.hexagon.M2.mpy.nac.sat.hh.s0",
  "llvm.hexagon.M2.mpy.nac.sat.hh.s1",
  "llvm.hexagon.M2.mpy.nac.sat.hl.s0",
  "llvm.hexagon.M2.mpy.nac.sat.hl.s1",
  "llvm.hexagon.M2.mpy.nac.sat.lh.s0",
  "llvm.hexagon.M2.mpy.nac.sat.lh.s1",
  "llvm.hexagon.M2.mpy.nac.sat.ll.s0",
  "llvm.hexagon.M2.mpy.nac.sat.ll.s1",
  "llvm.hexagon.M2.mpy.rnd.hh.s0",
  "llvm.hexagon.M2.mpy.rnd.hh.s1",
  "llvm.hexagon.M2.mpy.rnd.hl.s0",
  "llvm.hexagon.M2.mpy.rnd.hl.s1",
  "llvm.hexagon.M2.mpy.rnd.lh.s0",
  "llvm.hexagon.M2.mpy.rnd.lh.s1",
  "llvm.hexagon.M2.mpy.rnd.ll.s0",
  "llvm.hexagon.M2.mpy.rnd.ll.s1",
  "llvm.hexagon.M2.mpy.sat.hh.s0",
  "llvm.hexagon.M2.mpy.sat.hh.s1",
  "llvm.hexagon.M2.mpy.sat.hl.s0",
  "llvm.hexagon.M2.mpy.sat.hl.s1",
  "llvm.hexagon.M2.mpy.sat.lh.s0",
  "llvm.hexagon.M2.mpy.sat.lh.s1",
  "llvm.hexagon.M2.mpy.sat.ll.s0",
  "llvm.hexagon.M2.mpy.sat.ll.s1",
  "llvm.hexagon.M2.mpy.sat.rnd.hh.s0",
  "llvm.hexagon.M2.mpy.sat.rnd.hh.s1",
  "llvm.hexagon.M2.mpy.sat.rnd.hl.s0",
  "llvm.hexagon.M2.mpy.sat.rnd.hl.s1",
  "llvm.hexagon.M2.mpy.sat.rnd.lh.s0",
  "llvm.hexagon.M2.mpy.sat.rnd.lh.s1",
  "llvm.hexagon.M2.mpy.sat.rnd.ll.s0",
  "llvm.hexagon.M2.mpy.sat.rnd.ll.s1",
  "llvm.hexagon.M2.mpy.up",
  "llvm.hexagon.M2.mpy.up.s1",
  "llvm.hexagon.M2.mpy.up.s1.sat",
  "llvm.hexagon.M2.mpyd.acc.hh.s0",
  "llvm.hexagon.M2.mpyd.acc.hh.s1",
  "llvm.hexagon.M2.mpyd.acc.hl.s0",
  "llvm.hexagon.M2.mpyd.acc.hl.s1",
  "llvm.hexagon.M2.mpyd.acc.lh.s0",
  "llvm.hexagon.M2.mpyd.acc.lh.s1",
  "llvm.hexagon.M2.mpyd.acc.ll.s0",
  "llvm.hexagon.M2.mpyd.acc.ll.s1",
  "llvm.hexagon.M2.mpyd.hh.s0",
  "llvm.hexagon.M2.mpyd.hh.s1",
  "llvm.hexagon.M2.mpyd.hl.s0",
  "llvm.hexagon.M2.mpyd.hl.s1",
  "llvm.hexagon.M2.mpyd.lh.s0",
  "llvm.hexagon.M2.mpyd.lh.s1",
  "llvm.hexagon.M2.mpyd.ll.s0",
  "llvm.hexagon.M2.mpyd.ll.s1",
  "llvm.hexagon.M2.mpyd.nac.hh.s0",
  "llvm.hexagon.M2.mpyd.nac.hh.s1",
  "llvm.hexagon.M2.mpyd.nac.hl.s0",
  "llvm.hexagon.M2.mpyd.nac.hl.s1",
  "llvm.hexagon.M2.mpyd.nac.lh.s0",
  "llvm.hexagon.M2.mpyd.nac.lh.s1",
  "llvm.hexagon.M2.mpyd.nac.ll.s0",
  "llvm.hexagon.M2.mpyd.nac.ll.s1",
  "llvm.hexagon.M2.mpyd.rnd.hh.s0",
  "llvm.hexagon.M2.mpyd.rnd.hh.s1",
  "llvm.hexagon.M2.mpyd.rnd.hl.s0",
  "llvm.hexagon.M2.mpyd.rnd.hl.s1",
  "llvm.hexagon.M2.mpyd.rnd.lh.s0",
  "llvm.hexagon.M2.mpyd.rnd.lh.s1",
  "llvm.hexagon.M2.mpyd.rnd.ll.s0",
  "llvm.hexagon.M2.mpyd.rnd.ll.s1",
  "llvm.hexagon.M2.mpyi",
  "llvm.hexagon.M2.mpysmi",
  "llvm.hexagon.M2.mpysu.up",
  "llvm.hexagon.M2.mpyu.acc.hh.s0",
  "llvm.hexagon.M2.mpyu.acc.hh.s1",
  "llvm.hexagon.M2.mpyu.acc.hl.s0",
  "llvm.hexagon.M2.mpyu.acc.hl.s1",
  "llvm.hexagon.M2.mpyu.acc.lh.s0",
  "llvm.hexagon.M2.mpyu.acc.lh.s1",
  "llvm.hexagon.M2.mpyu.acc.ll.s0",
  "llvm.hexagon.M2.mpyu.acc.ll.s1",
  "llvm.hexagon.M2.mpyu.hh.s0",
  "llvm.hexagon.M2.mpyu.hh.s1",
  "llvm.hexagon.M2.mpyu.hl.s0",
  "llvm.hexagon.M2.mpyu.hl.s1",
  "llvm.hexagon.M2.mpyu.lh.s0",
  "llvm.hexagon.M2.mpyu.lh.s1",
  "llvm.hexagon.M2.mpyu.ll.s0",
  "llvm.hexagon.M2.mpyu.ll.s1",
  "llvm.hexagon.M2.mpyu.nac.hh.s0",
  "llvm.hexagon.M2.mpyu.nac.hh.s1",
  "llvm.hexagon.M2.mpyu.nac.hl.s0",
  "llvm.hexagon.M2.mpyu.nac.hl.s1",
  "llvm.hexagon.M2.mpyu.nac.lh.s0",
  "llvm.hexagon.M2.mpyu.nac.lh.s1",
  "llvm.hexagon.M2.mpyu.nac.ll.s0",
  "llvm.hexagon.M2.mpyu.nac.ll.s1",
  "llvm.hexagon.M2.mpyu.up",
  "llvm.hexagon.M2.mpyud.acc.hh.s0",
  "llvm.hexagon.M2.mpyud.acc.hh.s1",
  "llvm.hexagon.M2.mpyud.acc.hl.s0",
  "llvm.hexagon.M2.mpyud.acc.hl.s1",
  "llvm.hexagon.M2.mpyud.acc.lh.s0",
  "llvm.hexagon.M2.mpyud.acc.lh.s1",
  "llvm.hexagon.M2.mpyud.acc.ll.s0",
  "llvm.hexagon.M2.mpyud.acc.ll.s1",
  "llvm.hexagon.M2.mpyud.hh.s0",
  "llvm.hexagon.M2.mpyud.hh.s1",
  "llvm.hexagon.M2.mpyud.hl.s0",
  "llvm.hexagon.M2.mpyud.hl.s1",
  "llvm.hexagon.M2.mpyud.lh.s0",
  "llvm.hexagon.M2.mpyud.lh.s1",
  "llvm.hexagon.M2.mpyud.ll.s0",
  "llvm.hexagon.M2.mpyud.ll.s1",
  "llvm.hexagon.M2.mpyud.nac.hh.s0",
  "llvm.hexagon.M2.mpyud.nac.hh.s1",
  "llvm.hexagon.M2.mpyud.nac.hl.s0",
  "llvm.hexagon.M2.mpyud.nac.hl.s1",
  "llvm.hexagon.M2.mpyud.nac.lh.s0",
  "llvm.hexagon.M2.mpyud.nac.lh.s1",
  "llvm.hexagon.M2.mpyud.nac.ll.s0",
  "llvm.hexagon.M2.mpyud.nac.ll.s1",
  "llvm.hexagon.M2.mpyui",
  "llvm.hexagon.M2.nacci",
  "llvm.hexagon.M2.naccii",
  "llvm.hexagon.M2.subacc",
  "llvm.hexagon.M2.vabsdiffh",
  "llvm.hexagon.M2.vabsdiffw",
  "llvm.hexagon.M2.vcmac.s0.sat.i",
  "llvm.hexagon.M2.vcmac.s0.sat.r",
  "llvm.hexagon.M2.vcmpy.s0.sat.i",
  "llvm.hexagon.M2.vcmpy.s0.sat.r",
  "llvm.hexagon.M2.vcmpy.s1.sat.i",
  "llvm.hexagon.M2.vcmpy.s1.sat.r",
  "llvm.hexagon.M2.vdmacs.s0",
  "llvm.hexagon.M2.vdmacs.s1",
  "llvm.hexagon.M2.vdmpyrs.s0",
  "llvm.hexagon.M2.vdmpyrs.s1",
  "llvm.hexagon.M2.vdmpys.s0",
  "llvm.hexagon.M2.vdmpys.s1",
  "llvm.hexagon.M2.vmac2",
  "llvm.hexagon.M2.vmac2es",
  "llvm.hexagon.M2.vmac2es.s0",
  "llvm.hexagon.M2.vmac2es.s1",
  "llvm.hexagon.M2.vmac2s.s0",
  "llvm.hexagon.M2.vmac2s.s1",
  "llvm.hexagon.M2.vmac2su.s0",
  "llvm.hexagon.M2.vmac2su.s1",
  "llvm.hexagon.M2.vmpy2es.s0",
  "llvm.hexagon.M2.vmpy2es.s1",
  "llvm.hexagon.M2.vmpy2s.s0",
  "llvm.hexagon.M2.vmpy2s.s0pack",
  "llvm.hexagon.M2.vmpy2s.s1",
  "llvm.hexagon.M2.vmpy2s.s1pack",
  "llvm.hexagon.M2.vmpy2su.s0",
  "llvm.hexagon.M2.vmpy2su.s1",
  "llvm.hexagon.M2.vraddh",
  "llvm.hexagon.M2.vradduh",
  "llvm.hexagon.M2.vrcmaci.s0",
  "llvm.hexagon.M2.vrcmaci.s0c",
  "llvm.hexagon.M2.vrcmacr.s0",
  "llvm.hexagon.M2.vrcmacr.s0c",
  "llvm.hexagon.M2.vrcmpyi.s0",
  "llvm.hexagon.M2.vrcmpyi.s0c",
  "llvm.hexagon.M2.vrcmpyr.s0",
  "llvm.hexagon.M2.vrcmpyr.s0c",
  "llvm.hexagon.M2.vrcmpys.acc.s1",
  "llvm.hexagon.M2.vrcmpys.s1",
  "llvm.hexagon.M2.vrcmpys.s1rp",
  "llvm.hexagon.M2.vrmac.s0",
  "llvm.hexagon.M2.vrmpy.s0",
  "llvm.hexagon.M2.xor.xacc",
  "llvm.hexagon.M4.and.and",
  "llvm.hexagon.M4.and.andn",
  "llvm.hexagon.M4.and.or",
  "llvm.hexagon.M4.and.xor",
  "llvm.hexagon.M4.cmpyi.wh",
  "llvm.hexagon.M4.cmpyi.whc",
  "llvm.hexagon.M4.cmpyr.wh",
  "llvm.hexagon.M4.cmpyr.whc",
  "llvm.hexagon.M4.mac.up.s1.sat",
  "llvm.hexagon.M4.mpyri.addi",
  "llvm.hexagon.M4.mpyri.addr",
  "llvm.hexagon.M4.mpyri.addr.u2",
  "llvm.hexagon.M4.mpyrr.addi",
  "llvm.hexagon.M4.mpyrr.addr",
  "llvm.hexagon.M4.nac.up.s1.sat",
  "llvm.hexagon.M4.or.and",
  "llvm.hexagon.M4.or.andn",
  "llvm.hexagon.M4.or.or",
  "llvm.hexagon.M4.or.xor",
  "llvm.hexagon.M4.pmpyw",
  "llvm.hexagon.M4.pmpyw.acc",
  "llvm.hexagon.M4.vpmpyh",
  "llvm.hexagon.M4.vpmpyh.acc",
  "llvm.hexagon.M4.vrmpyeh.acc.s0",
  "llvm.hexagon.M4.vrmpyeh.acc.s1",
  "llvm.hexagon.M4.vrmpyeh.s0",
  "llvm.hexagon.M4.vrmpyeh.s1",
  "llvm.hexagon.M4.vrmpyoh.acc.s0",
  "llvm.hexagon.M4.vrmpyoh.acc.s1",
  "llvm.hexagon.M4.vrmpyoh.s0",
  "llvm.hexagon.M4.vrmpyoh.s1",
  "llvm.hexagon.M4.xor.and",
  "llvm.hexagon.M4.xor.andn",
  "llvm.hexagon.M4.xor.or",
  "llvm.hexagon.M4.xor.xacc",
  "llvm.hexagon.M5.vdmacbsu",
  "llvm.hexagon.M5.vdmpybsu",
  "llvm.hexagon.M5.vmacbsu",
  "llvm.hexagon.M5.vmacbuu",
  "llvm.hexagon.M5.vmpybsu",
  "llvm.hexagon.M5.vmpybuu",
  "llvm.hexagon.M5.vrmacbsu",
  "llvm.hexagon.M5.vrmacbuu",
  "llvm.hexagon.M5.vrmpybsu",
  "llvm.hexagon.M5.vrmpybuu",
  "llvm.hexagon.M6.vabsdiffb",
  "llvm.hexagon.M6.vabsdiffub",
  "llvm.hexagon.M7.dcmpyiw",
  "llvm.hexagon.M7.dcmpyiw.acc",
  "llvm.hexagon.M7.dcmpyiwc",
  "llvm.hexagon.M7.dcmpyiwc.acc",
  "llvm.hexagon.M7.dcmpyrw",
  "llvm.hexagon.M7.dcmpyrw.acc",
  "llvm.hexagon.M7.dcmpyrwc",
  "llvm.hexagon.M7.dcmpyrwc.acc",
  "llvm.hexagon.M7.vdmpy",
  "llvm.hexagon.M7.vdmpy.acc",
  "llvm.hexagon.M7.wcmpyiw",
  "llvm.hexagon.M7.wcmpyiw.rnd",
  "llvm.hexagon.M7.wcmpyiwc",
  "llvm.hexagon.M7.wcmpyiwc.rnd",
  "llvm.hexagon.M7.wcmpyrw",
  "llvm.hexagon.M7.wcmpyrw.rnd",
  "llvm.hexagon.M7.wcmpyrwc",
  "llvm.hexagon.M7.wcmpyrwc.rnd",
  "llvm.hexagon.S2.addasl.rrri",
  "llvm.hexagon.S2.asl.i.p",
  "llvm.hexagon.S2.asl.i.p.acc",
  "llvm.hexagon.S2.asl.i.p.and",
  "llvm.hexagon.S2.asl.i.p.nac",
  "llvm.hexagon.S2.asl.i.p.or",
  "llvm.hexagon.S2.asl.i.p.xacc",
  "llvm.hexagon.S2.asl.i.r",
  "llvm.hexagon.S2.asl.i.r.acc",
  "llvm.hexagon.S2.asl.i.r.and",
  "llvm.hexagon.S2.asl.i.r.nac",
  "llvm.hexagon.S2.asl.i.r.or",
  "llvm.hexagon.S2.asl.i.r.sat",
  "llvm.hexagon.S2.asl.i.r.xacc",
  "llvm.hexagon.S2.asl.i.vh",
  "llvm.hexagon.S2.asl.i.vw",
  "llvm.hexagon.S2.asl.r.p",
  "llvm.hexagon.S2.asl.r.p.acc",
  "llvm.hexagon.S2.asl.r.p.and",
  "llvm.hexagon.S2.asl.r.p.nac",
  "llvm.hexagon.S2.asl.r.p.or",
  "llvm.hexagon.S2.asl.r.p.xor",
  "llvm.hexagon.S2.asl.r.r",
  "llvm.hexagon.S2.asl.r.r.acc",
  "llvm.hexagon.S2.asl.r.r.and",
  "llvm.hexagon.S2.asl.r.r.nac",
  "llvm.hexagon.S2.asl.r.r.or",
  "llvm.hexagon.S2.asl.r.r.sat",
  "llvm.hexagon.S2.asl.r.vh",
  "llvm.hexagon.S2.asl.r.vw",
  "llvm.hexagon.S2.asr.i.p",
  "llvm.hexagon.S2.asr.i.p.acc",
  "llvm.hexagon.S2.asr.i.p.and",
  "llvm.hexagon.S2.asr.i.p.nac",
  "llvm.hexagon.S2.asr.i.p.or",
  "llvm.hexagon.S2.asr.i.p.rnd",
  "llvm.hexagon.S2.asr.i.p.rnd.goodsyntax",
  "llvm.hexagon.S2.asr.i.r",
  "llvm.hexagon.S2.asr.i.r.acc",
  "llvm.hexagon.S2.asr.i.r.and",
  "llvm.hexagon.S2.asr.i.r.nac",
  "llvm.hexagon.S2.asr.i.r.or",
  "llvm.hexagon.S2.asr.i.r.rnd",
  "llvm.hexagon.S2.asr.i.r.rnd.goodsyntax",
  "llvm.hexagon.S2.asr.i.svw.trun",
  "llvm.hexagon.S2.asr.i.vh",
  "llvm.hexagon.S2.asr.i.vw",
  "llvm.hexagon.S2.asr.r.p",
  "llvm.hexagon.S2.asr.r.p.acc",
  "llvm.hexagon.S2.asr.r.p.and",
  "llvm.hexagon.S2.asr.r.p.nac",
  "llvm.hexagon.S2.asr.r.p.or",
  "llvm.hexagon.S2.asr.r.p.xor",
  "llvm.hexagon.S2.asr.r.r",
  "llvm.hexagon.S2.asr.r.r.acc",
  "llvm.hexagon.S2.asr.r.r.and",
  "llvm.hexagon.S2.asr.r.r.nac",
  "llvm.hexagon.S2.asr.r.r.or",
  "llvm.hexagon.S2.asr.r.r.sat",
  "llvm.hexagon.S2.asr.r.svw.trun",
  "llvm.hexagon.S2.asr.r.vh",
  "llvm.hexagon.S2.asr.r.vw",
  "llvm.hexagon.S2.brev",
  "llvm.hexagon.S2.brevp",
  "llvm.hexagon.S2.cl0",
  "llvm.hexagon.S2.cl0p",
  "llvm.hexagon.S2.cl1",
  "llvm.hexagon.S2.cl1p",
  "llvm.hexagon.S2.clb",
  "llvm.hexagon.S2.clbnorm",
  "llvm.hexagon.S2.clbp",
  "llvm.hexagon.S2.clrbit.i",
  "llvm.hexagon.S2.clrbit.r",
  "llvm.hexagon.S2.ct0",
  "llvm.hexagon.S2.ct0p",
  "llvm.hexagon.S2.ct1",
  "llvm.hexagon.S2.ct1p",
  "llvm.hexagon.S2.deinterleave",
  "llvm.hexagon.S2.extractu",
  "llvm.hexagon.S2.extractu.rp",
  "llvm.hexagon.S2.extractup",
  "llvm.hexagon.S2.extractup.rp",
  "llvm.hexagon.S2.insert",
  "llvm.hexagon.S2.insert.rp",
  "llvm.hexagon.S2.insertp",
  "llvm.hexagon.S2.insertp.rp",
  "llvm.hexagon.S2.interleave",
  "llvm.hexagon.S2.lfsp",
  "llvm.hexagon.S2.lsl.r.p",
  "llvm.hexagon.S2.lsl.r.p.acc",
  "llvm.hexagon.S2.lsl.r.p.and",
  "llvm.hexagon.S2.lsl.r.p.nac",
  "llvm.hexagon.S2.lsl.r.p.or",
  "llvm.hexagon.S2.lsl.r.p.xor",
  "llvm.hexagon.S2.lsl.r.r",
  "llvm.hexagon.S2.lsl.r.r.acc",
  "llvm.hexagon.S2.lsl.r.r.and",
  "llvm.hexagon.S2.lsl.r.r.nac",
  "llvm.hexagon.S2.lsl.r.r.or",
  "llvm.hexagon.S2.lsl.r.vh",
  "llvm.hexagon.S2.lsl.r.vw",
  "llvm.hexagon.S2.lsr.i.p",
  "llvm.hexagon.S2.lsr.i.p.acc",
  "llvm.hexagon.S2.lsr.i.p.and",
  "llvm.hexagon.S2.lsr.i.p.nac",
  "llvm.hexagon.S2.lsr.i.p.or",
  "llvm.hexagon.S2.lsr.i.p.xacc",
  "llvm.hexagon.S2.lsr.i.r",
  "llvm.hexagon.S2.lsr.i.r.acc",
  "llvm.hexagon.S2.lsr.i.r.and",
  "llvm.hexagon.S2.lsr.i.r.nac",
  "llvm.hexagon.S2.lsr.i.r.or",
  "llvm.hexagon.S2.lsr.i.r.xacc",
  "llvm.hexagon.S2.lsr.i.vh",
  "llvm.hexagon.S2.lsr.i.vw",
  "llvm.hexagon.S2.lsr.r.p",
  "llvm.hexagon.S2.lsr.r.p.acc",
  "llvm.hexagon.S2.lsr.r.p.and",
  "llvm.hexagon.S2.lsr.r.p.nac",
  "llvm.hexagon.S2.lsr.r.p.or",
  "llvm.hexagon.S2.lsr.r.p.xor",
  "llvm.hexagon.S2.lsr.r.r",
  "llvm.hexagon.S2.lsr.r.r.acc",
  "llvm.hexagon.S2.lsr.r.r.and",
  "llvm.hexagon.S2.lsr.r.r.nac",
  "llvm.hexagon.S2.lsr.r.r.or",
  "llvm.hexagon.S2.lsr.r.vh",
  "llvm.hexagon.S2.lsr.r.vw",
  "llvm.hexagon.S2.mask",
  "llvm.hexagon.S2.packhl",
  "llvm.hexagon.S2.parityp",
  "llvm.hexagon.S2.setbit.i",
  "llvm.hexagon.S2.setbit.r",
  "llvm.hexagon.S2.shuffeb",
  "llvm.hexagon.S2.shuffeh",
  "llvm.hexagon.S2.shuffob",
  "llvm.hexagon.S2.shuffoh",
  "llvm.hexagon.S2.storerb.pbr",
  "llvm.hexagon.S2.storerb.pci",
  "llvm.hexagon.S2.storerb.pcr",
  "llvm.hexagon.S2.storerd.pbr",
  "llvm.hexagon.S2.storerd.pci",
  "llvm.hexagon.S2.storerd.pcr",
  "llvm.hexagon.S2.storerf.pbr",
  "llvm.hexagon.S2.storerf.pci",
  "llvm.hexagon.S2.storerf.pcr",
  "llvm.hexagon.S2.storerh.pbr",
  "llvm.hexagon.S2.storerh.pci",
  "llvm.hexagon.S2.storerh.pcr",
  "llvm.hexagon.S2.storeri.pbr",
  "llvm.hexagon.S2.storeri.pci",
  "llvm.hexagon.S2.storeri.pcr",
  "llvm.hexagon.S2.storew.locked",
  "llvm.hexagon.S2.svsathb",
  "llvm.hexagon.S2.svsathub",
  "llvm.hexagon.S2.tableidxb.goodsyntax",
  "llvm.hexagon.S2.tableidxd.goodsyntax",
  "llvm.hexagon.S2.tableidxh.goodsyntax",
  "llvm.hexagon.S2.tableidxw.goodsyntax",
  "llvm.hexagon.S2.togglebit.i",
  "llvm.hexagon.S2.togglebit.r",
  "llvm.hexagon.S2.tstbit.i",
  "llvm.hexagon.S2.tstbit.r",
  "llvm.hexagon.S2.valignib",
  "llvm.hexagon.S2.valignrb",
  "llvm.hexagon.S2.vcnegh",
  "llvm.hexagon.S2.vcrotate",
  "llvm.hexagon.S2.vrcnegh",
  "llvm.hexagon.S2.vrndpackwh",
  "llvm.hexagon.S2.vrndpackwhs",
  "llvm.hexagon.S2.vsathb",
  "llvm.hexagon.S2.vsathb.nopack",
  "llvm.hexagon.S2.vsathub",
  "llvm.hexagon.S2.vsathub.nopack",
  "llvm.hexagon.S2.vsatwh",
  "llvm.hexagon.S2.vsatwh.nopack",
  "llvm.hexagon.S2.vsatwuh",
  "llvm.hexagon.S2.vsatwuh.nopack",
  "llvm.hexagon.S2.vsplatrb",
  "llvm.hexagon.S2.vsplatrh",
  "llvm.hexagon.S2.vspliceib",
  "llvm.hexagon.S2.vsplicerb",
  "llvm.hexagon.S2.vsxtbh",
  "llvm.hexagon.S2.vsxthw",
  "llvm.hexagon.S2.vtrunehb",
  "llvm.hexagon.S2.vtrunewh",
  "llvm.hexagon.S2.vtrunohb",
  "llvm.hexagon.S2.vtrunowh",
  "llvm.hexagon.S2.vzxtbh",
  "llvm.hexagon.S2.vzxthw",
  "llvm.hexagon.S4.addaddi",
  "llvm.hexagon.S4.addi.asl.ri",
  "llvm.hexagon.S4.addi.lsr.ri",
  "llvm.hexagon.S4.andi.asl.ri",
  "llvm.hexagon.S4.andi.lsr.ri",
  "llvm.hexagon.S4.clbaddi",
  "llvm.hexagon.S4.clbpaddi",
  "llvm.hexagon.S4.clbpnorm",
  "llvm.hexagon.S4.extract",
  "llvm.hexagon.S4.extract.rp",
  "llvm.hexagon.S4.extractp",
  "llvm.hexagon.S4.extractp.rp",
  "llvm.hexagon.S4.lsli",
  "llvm.hexagon.S4.ntstbit.i",
  "llvm.hexagon.S4.ntstbit.r",
  "llvm.hexagon.S4.or.andi",
  "llvm.hexagon.S4.or.andix",
  "llvm.hexagon.S4.or.ori",
  "llvm.hexagon.S4.ori.asl.ri",
  "llvm.hexagon.S4.ori.lsr.ri",
  "llvm.hexagon.S4.parity",
  "llvm.hexagon.S4.stored.locked",
  "llvm.hexagon.S4.subaddi",
  "llvm.hexagon.S4.subi.asl.ri",
  "llvm.hexagon.S4.subi.lsr.ri",
  "llvm.hexagon.S4.vrcrotate",
  "llvm.hexagon.S4.vrcrotate.acc",
  "llvm.hexagon.S4.vxaddsubh",
  "llvm.hexagon.S4.vxaddsubhr",
  "llvm.hexagon.S4.vxaddsubw",
  "llvm.hexagon.S4.vxsubaddh",
  "llvm.hexagon.S4.vxsubaddhr",
  "llvm.hexagon.S4.vxsubaddw",
  "llvm.hexagon.S5.asrhub.rnd.sat.goodsyntax",
  "llvm.hexagon.S5.asrhub.sat",
  "llvm.hexagon.S5.popcountp",
  "llvm.hexagon.S5.vasrhrnd.goodsyntax",
  "llvm.hexagon.S6.rol.i.p",
  "llvm.hexagon.S6.rol.i.p.acc",
  "llvm.hexagon.S6.rol.i.p.and",
  "llvm.hexagon.S6.rol.i.p.nac",
  "llvm.hexagon.S6.rol.i.p.or",
  "llvm.hexagon.S6.rol.i.p.xacc",
  "llvm.hexagon.S6.rol.i.r",
  "llvm.hexagon.S6.rol.i.r.acc",
  "llvm.hexagon.S6.rol.i.r.and",
  "llvm.hexagon.S6.rol.i.r.nac",
  "llvm.hexagon.S6.rol.i.r.or",
  "llvm.hexagon.S6.rol.i.r.xacc",
  "llvm.hexagon.S6.vsplatrbp",
  "llvm.hexagon.S6.vtrunehb.ppp",
  "llvm.hexagon.S6.vtrunohb.ppp",
  "llvm.hexagon.V6.extractw",
  "llvm.hexagon.V6.extractw.128B",
  "llvm.hexagon.V6.hi",
  "llvm.hexagon.V6.hi.128B",
  "llvm.hexagon.V6.lo",
  "llvm.hexagon.V6.lo.128B",
  "llvm.hexagon.V6.lvsplatb",
  "llvm.hexagon.V6.lvsplatb.128B",
  "llvm.hexagon.V6.lvsplath",
  "llvm.hexagon.V6.lvsplath.128B",
  "llvm.hexagon.V6.lvsplatw",
  "llvm.hexagon.V6.lvsplatw.128B",
  "llvm.hexagon.V6.pred.and",
  "llvm.hexagon.V6.pred.and.128B",
  "llvm.hexagon.V6.pred.and.n",
  "llvm.hexagon.V6.pred.and.n.128B",
  "llvm.hexagon.V6.pred.not",
  "llvm.hexagon.V6.pred.not.128B",
  "llvm.hexagon.V6.pred.or",
  "llvm.hexagon.V6.pred.or.128B",
  "llvm.hexagon.V6.pred.or.n",
  "llvm.hexagon.V6.pred.or.n.128B",
  "llvm.hexagon.V6.pred.scalar2",
  "llvm.hexagon.V6.pred.scalar2.128B",
  "llvm.hexagon.V6.pred.scalar2v2",
  "llvm.hexagon.V6.pred.scalar2v2.128B",
  "llvm.hexagon.V6.pred.typecast",
  "llvm.hexagon.V6.pred.typecast.128B",
  "llvm.hexagon.V6.pred.xor",
  "llvm.hexagon.V6.pred.xor.128B",
  "llvm.hexagon.V6.shuffeqh",
  "llvm.hexagon.V6.shuffeqh.128B",
  "llvm.hexagon.V6.shuffeqw",
  "llvm.hexagon.V6.shuffeqw.128B",
  "llvm.hexagon.V6.v6mpyhubs10",
  "llvm.hexagon.V6.v6mpyhubs10.128B",
  "llvm.hexagon.V6.v6mpyhubs10.vxx",
  "llvm.hexagon.V6.v6mpyhubs10.vxx.128B",
  "llvm.hexagon.V6.v6mpyvubs10",
  "llvm.hexagon.V6.v6mpyvubs10.128B",
  "llvm.hexagon.V6.v6mpyvubs10.vxx",
  "llvm.hexagon.V6.v6mpyvubs10.vxx.128B",
  "llvm.hexagon.V6.vL32b.npred.ai",
  "llvm.hexagon.V6.vL32b.npred.ai.128B",
  "llvm.hexagon.V6.vL32b.npred.pi",
  "llvm.hexagon.V6.vL32b.npred.pi.128B",
  "llvm.hexagon.V6.vL32b.npred.ppu",
  "llvm.hexagon.V6.vL32b.npred.ppu.128B",
  "llvm.hexagon.V6.vL32b.nt.npred.ai",
  "llvm.hexagon.V6.vL32b.nt.npred.ai.128B",
  "llvm.hexagon.V6.vL32b.nt.npred.pi",
  "llvm.hexagon.V6.vL32b.nt.npred.pi.128B",
  "llvm.hexagon.V6.vL32b.nt.npred.ppu",
  "llvm.hexagon.V6.vL32b.nt.npred.ppu.128B",
  "llvm.hexagon.V6.vL32b.nt.pred.ai",
  "llvm.hexagon.V6.vL32b.nt.pred.ai.128B",
  "llvm.hexagon.V6.vL32b.nt.pred.pi",
  "llvm.hexagon.V6.vL32b.nt.pred.pi.128B",
  "llvm.hexagon.V6.vL32b.nt.pred.ppu",
  "llvm.hexagon.V6.vL32b.nt.pred.ppu.128B",
  "llvm.hexagon.V6.vL32b.pred.ai",
  "llvm.hexagon.V6.vL32b.pred.ai.128B",
  "llvm.hexagon.V6.vL32b.pred.pi",
  "llvm.hexagon.V6.vL32b.pred.pi.128B",
  "llvm.hexagon.V6.vL32b.pred.ppu",
  "llvm.hexagon.V6.vL32b.pred.ppu.128B",
  "llvm.hexagon.V6.vS32Ub.npred.ai",
  "llvm.hexagon.V6.vS32Ub.npred.ai.128B",
  "llvm.hexagon.V6.vS32Ub.npred.pi",
  "llvm.hexagon.V6.vS32Ub.npred.pi.128B",
  "llvm.hexagon.V6.vS32Ub.npred.ppu",
  "llvm.hexagon.V6.vS32Ub.npred.ppu.128B",
  "llvm.hexagon.V6.vS32Ub.pred.ai",
  "llvm.hexagon.V6.vS32Ub.pred.ai.128B",
  "llvm.hexagon.V6.vS32Ub.pred.pi",
  "llvm.hexagon.V6.vS32Ub.pred.pi.128B",
  "llvm.hexagon.V6.vS32Ub.pred.ppu",
  "llvm.hexagon.V6.vS32Ub.pred.ppu.128B",
  "llvm.hexagon.V6.vS32b.npred.ai",
  "llvm.hexagon.V6.vS32b.npred.ai.128B",
  "llvm.hexagon.V6.vS32b.npred.pi",
  "llvm.hexagon.V6.vS32b.npred.pi.128B",
  "llvm.hexagon.V6.vS32b.npred.ppu",
  "llvm.hexagon.V6.vS32b.npred.ppu.128B",
  "llvm.hexagon.V6.vS32b.nqpred.ai",
  "llvm.hexagon.V6.vS32b.nqpred.ai.128B",
  "llvm.hexagon.V6.vS32b.nt.npred.ai",
  "llvm.hexagon.V6.vS32b.nt.npred.ai.128B",
  "llvm.hexagon.V6.vS32b.nt.npred.pi",
  "llvm.hexagon.V6.vS32b.nt.npred.pi.128B",
  "llvm.hexagon.V6.vS32b.nt.npred.ppu",
  "llvm.hexagon.V6.vS32b.nt.npred.ppu.128B",
  "llvm.hexagon.V6.vS32b.nt.nqpred.ai",
  "llvm.hexagon.V6.vS32b.nt.nqpred.ai.128B",
  "llvm.hexagon.V6.vS32b.nt.pred.ai",
  "llvm.hexagon.V6.vS32b.nt.pred.ai.128B",
  "llvm.hexagon.V6.vS32b.nt.pred.pi",
  "llvm.hexagon.V6.vS32b.nt.pred.pi.128B",
  "llvm.hexagon.V6.vS32b.nt.pred.ppu",
  "llvm.hexagon.V6.vS32b.nt.pred.ppu.128B",
  "llvm.hexagon.V6.vS32b.nt.qpred.ai",
  "llvm.hexagon.V6.vS32b.nt.qpred.ai.128B",
  "llvm.hexagon.V6.vS32b.pred.ai",
  "llvm.hexagon.V6.vS32b.pred.ai.128B",
  "llvm.hexagon.V6.vS32b.pred.pi",
  "llvm.hexagon.V6.vS32b.pred.pi.128B",
  "llvm.hexagon.V6.vS32b.pred.ppu",
  "llvm.hexagon.V6.vS32b.pred.ppu.128B",
  "llvm.hexagon.V6.vS32b.qpred.ai",
  "llvm.hexagon.V6.vS32b.qpred.ai.128B",
  "llvm.hexagon.V6.vabs.hf",
  "llvm.hexagon.V6.vabs.hf.128B",
  "llvm.hexagon.V6.vabs.sf",
  "llvm.hexagon.V6.vabs.sf.128B",
  "llvm.hexagon.V6.vabsb",
  "llvm.hexagon.V6.vabsb.128B",
  "llvm.hexagon.V6.vabsb.sat",
  "llvm.hexagon.V6.vabsb.sat.128B",
  "llvm.hexagon.V6.vabsdiffh",
  "llvm.hexagon.V6.vabsdiffh.128B",
  "llvm.hexagon.V6.vabsdiffub",
  "llvm.hexagon.V6.vabsdiffub.128B",
  "llvm.hexagon.V6.vabsdiffuh",
  "llvm.hexagon.V6.vabsdiffuh.128B",
  "llvm.hexagon.V6.vabsdiffw",
  "llvm.hexagon.V6.vabsdiffw.128B",
  "llvm.hexagon.V6.vabsh",
  "llvm.hexagon.V6.vabsh.128B",
  "llvm.hexagon.V6.vabsh.sat",
  "llvm.hexagon.V6.vabsh.sat.128B",
  "llvm.hexagon.V6.vabsw",
  "llvm.hexagon.V6.vabsw.128B",
  "llvm.hexagon.V6.vabsw.sat",
  "llvm.hexagon.V6.vabsw.sat.128B",
  "llvm.hexagon.V6.vadd.hf",
  "llvm.hexagon.V6.vadd.hf.128B",
  "llvm.hexagon.V6.vadd.hf.hf",
  "llvm.hexagon.V6.vadd.hf.hf.128B",
  "llvm.hexagon.V6.vadd.qf16",
  "llvm.hexagon.V6.vadd.qf16.128B",
  "llvm.hexagon.V6.vadd.qf16.mix",
  "llvm.hexagon.V6.vadd.qf16.mix.128B",
  "llvm.hexagon.V6.vadd.qf32",
  "llvm.hexagon.V6.vadd.qf32.128B",
  "llvm.hexagon.V6.vadd.qf32.mix",
  "llvm.hexagon.V6.vadd.qf32.mix.128B",
  "llvm.hexagon.V6.vadd.sf",
  "llvm.hexagon.V6.vadd.sf.128B",
  "llvm.hexagon.V6.vadd.sf.bf",
  "llvm.hexagon.V6.vadd.sf.bf.128B",
  "llvm.hexagon.V6.vadd.sf.hf",
  "llvm.hexagon.V6.vadd.sf.hf.128B",
  "llvm.hexagon.V6.vadd.sf.sf",
  "llvm.hexagon.V6.vadd.sf.sf.128B",
  "llvm.hexagon.V6.vaddb",
  "llvm.hexagon.V6.vaddb.128B",
  "llvm.hexagon.V6.vaddb.dv",
  "llvm.hexagon.V6.vaddb.dv.128B",
  "llvm.hexagon.V6.vaddbnq",
  "llvm.hexagon.V6.vaddbnq.128B",
  "llvm.hexagon.V6.vaddbq",
  "llvm.hexagon.V6.vaddbq.128B",
  "llvm.hexagon.V6.vaddbsat",
  "llvm.hexagon.V6.vaddbsat.128B",
  "llvm.hexagon.V6.vaddbsat.dv",
  "llvm.hexagon.V6.vaddbsat.dv.128B",
  "llvm.hexagon.V6.vaddcarry",
  "llvm.hexagon.V6.vaddcarry.128B",
  "llvm.hexagon.V6.vaddcarryo",
  "llvm.hexagon.V6.vaddcarryo.128B",
  "llvm.hexagon.V6.vaddcarrysat",
  "llvm.hexagon.V6.vaddcarrysat.128B",
  "llvm.hexagon.V6.vaddclbh",
  "llvm.hexagon.V6.vaddclbh.128B",
  "llvm.hexagon.V6.vaddclbw",
  "llvm.hexagon.V6.vaddclbw.128B",
  "llvm.hexagon.V6.vaddh",
  "llvm.hexagon.V6.vaddh.128B",
  "llvm.hexagon.V6.vaddh.dv",
  "llvm.hexagon.V6.vaddh.dv.128B",
  "llvm.hexagon.V6.vaddhnq",
  "llvm.hexagon.V6.vaddhnq.128B",
  "llvm.hexagon.V6.vaddhq",
  "llvm.hexagon.V6.vaddhq.128B",
  "llvm.hexagon.V6.vaddhsat",
  "llvm.hexagon.V6.vaddhsat.128B",
  "llvm.hexagon.V6.vaddhsat.dv",
  "llvm.hexagon.V6.vaddhsat.dv.128B",
  "llvm.hexagon.V6.vaddhw",
  "llvm.hexagon.V6.vaddhw.128B",
  "llvm.hexagon.V6.vaddhw.acc",
  "llvm.hexagon.V6.vaddhw.acc.128B",
  "llvm.hexagon.V6.vaddubh",
  "llvm.hexagon.V6.vaddubh.128B",
  "llvm.hexagon.V6.vaddubh.acc",
  "llvm.hexagon.V6.vaddubh.acc.128B",
  "llvm.hexagon.V6.vaddubsat",
  "llvm.hexagon.V6.vaddubsat.128B",
  "llvm.hexagon.V6.vaddubsat.dv",
  "llvm.hexagon.V6.vaddubsat.dv.128B",
  "llvm.hexagon.V6.vaddububb.sat",
  "llvm.hexagon.V6.vaddububb.sat.128B",
  "llvm.hexagon.V6.vadduhsat",
  "llvm.hexagon.V6.vadduhsat.128B",
  "llvm.hexagon.V6.vadduhsat.dv",
  "llvm.hexagon.V6.vadduhsat.dv.128B",
  "llvm.hexagon.V6.vadduhw",
  "llvm.hexagon.V6.vadduhw.128B",
  "llvm.hexagon.V6.vadduhw.acc",
  "llvm.hexagon.V6.vadduhw.acc.128B",
  "llvm.hexagon.V6.vadduwsat",
  "llvm.hexagon.V6.vadduwsat.128B",
  "llvm.hexagon.V6.vadduwsat.dv",
  "llvm.hexagon.V6.vadduwsat.dv.128B",
  "llvm.hexagon.V6.vaddw",
  "llvm.hexagon.V6.vaddw.128B",
  "llvm.hexagon.V6.vaddw.dv",
  "llvm.hexagon.V6.vaddw.dv.128B",
  "llvm.hexagon.V6.vaddwnq",
  "llvm.hexagon.V6.vaddwnq.128B",
  "llvm.hexagon.V6.vaddwq",
  "llvm.hexagon.V6.vaddwq.128B",
  "llvm.hexagon.V6.vaddwsat",
  "llvm.hexagon.V6.vaddwsat.128B",
  "llvm.hexagon.V6.vaddwsat.dv",
  "llvm.hexagon.V6.vaddwsat.dv.128B",
  "llvm.hexagon.V6.valignb",
  "llvm.hexagon.V6.valignb.128B",
  "llvm.hexagon.V6.valignbi",
  "llvm.hexagon.V6.valignbi.128B",
  "llvm.hexagon.V6.vand",
  "llvm.hexagon.V6.vand.128B",
  "llvm.hexagon.V6.vandnqrt",
  "llvm.hexagon.V6.vandnqrt.128B",
  "llvm.hexagon.V6.vandnqrt.acc",
  "llvm.hexagon.V6.vandnqrt.acc.128B",
  "llvm.hexagon.V6.vandqrt",
  "llvm.hexagon.V6.vandqrt.128B",
  "llvm.hexagon.V6.vandqrt.acc",
  "llvm.hexagon.V6.vandqrt.acc.128B",
  "llvm.hexagon.V6.vandvnqv",
  "llvm.hexagon.V6.vandvnqv.128B",
  "llvm.hexagon.V6.vandvqv",
  "llvm.hexagon.V6.vandvqv.128B",
  "llvm.hexagon.V6.vandvrt",
  "llvm.hexagon.V6.vandvrt.128B",
  "llvm.hexagon.V6.vandvrt.acc",
  "llvm.hexagon.V6.vandvrt.acc.128B",
  "llvm.hexagon.V6.vaslh",
  "llvm.hexagon.V6.vaslh.128B",
  "llvm.hexagon.V6.vaslh.acc",
  "llvm.hexagon.V6.vaslh.acc.128B",
  "llvm.hexagon.V6.vaslhv",
  "llvm.hexagon.V6.vaslhv.128B",
  "llvm.hexagon.V6.vaslw",
  "llvm.hexagon.V6.vaslw.128B",
  "llvm.hexagon.V6.vaslw.acc",
  "llvm.hexagon.V6.vaslw.acc.128B",
  "llvm.hexagon.V6.vaslwv",
  "llvm.hexagon.V6.vaslwv.128B",
  "llvm.hexagon.V6.vasr.into",
  "llvm.hexagon.V6.vasr.into.128B",
  "llvm.hexagon.V6.vasrh",
  "llvm.hexagon.V6.vasrh.128B",
  "llvm.hexagon.V6.vasrh.acc",
  "llvm.hexagon.V6.vasrh.acc.128B",
  "llvm.hexagon.V6.vasrhbrndsat",
  "llvm.hexagon.V6.vasrhbrndsat.128B",
  "llvm.hexagon.V6.vasrhbsat",
  "llvm.hexagon.V6.vasrhbsat.128B",
  "llvm.hexagon.V6.vasrhubrndsat",
  "llvm.hexagon.V6.vasrhubrndsat.128B",
  "llvm.hexagon.V6.vasrhubsat",
  "llvm.hexagon.V6.vasrhubsat.128B",
  "llvm.hexagon.V6.vasrhv",
  "llvm.hexagon.V6.vasrhv.128B",
  "llvm.hexagon.V6.vasruhubrndsat",
  "llvm.hexagon.V6.vasruhubrndsat.128B",
  "llvm.hexagon.V6.vasruhubsat",
  "llvm.hexagon.V6.vasruhubsat.128B",
  "llvm.hexagon.V6.vasruwuhrndsat",
  "llvm.hexagon.V6.vasruwuhrndsat.128B",
  "llvm.hexagon.V6.vasruwuhsat",
  "llvm.hexagon.V6.vasruwuhsat.128B",
  "llvm.hexagon.V6.vasrvuhubrndsat",
  "llvm.hexagon.V6.vasrvuhubrndsat.128B",
  "llvm.hexagon.V6.vasrvuhubsat",
  "llvm.hexagon.V6.vasrvuhubsat.128B",
  "llvm.hexagon.V6.vasrvwuhrndsat",
  "llvm.hexagon.V6.vasrvwuhrndsat.128B",
  "llvm.hexagon.V6.vasrvwuhsat",
  "llvm.hexagon.V6.vasrvwuhsat.128B",
  "llvm.hexagon.V6.vasrw",
  "llvm.hexagon.V6.vasrw.128B",
  "llvm.hexagon.V6.vasrw.acc",
  "llvm.hexagon.V6.vasrw.acc.128B",
  "llvm.hexagon.V6.vasrwh",
  "llvm.hexagon.V6.vasrwh.128B",
  "llvm.hexagon.V6.vasrwhrndsat",
  "llvm.hexagon.V6.vasrwhrndsat.128B",
  "llvm.hexagon.V6.vasrwhsat",
  "llvm.hexagon.V6.vasrwhsat.128B",
  "llvm.hexagon.V6.vasrwuhrndsat",
  "llvm.hexagon.V6.vasrwuhrndsat.128B",
  "llvm.hexagon.V6.vasrwuhsat",
  "llvm.hexagon.V6.vasrwuhsat.128B",
  "llvm.hexagon.V6.vasrwv",
  "llvm.hexagon.V6.vasrwv.128B",
  "llvm.hexagon.V6.vassign",
  "llvm.hexagon.V6.vassign.128B",
  "llvm.hexagon.V6.vassign.fp",
  "llvm.hexagon.V6.vassign.fp.128B",
  "llvm.hexagon.V6.vassignp",
  "llvm.hexagon.V6.vassignp.128B",
  "llvm.hexagon.V6.vavgb",
  "llvm.hexagon.V6.vavgb.128B",
  "llvm.hexagon.V6.vavgbrnd",
  "llvm.hexagon.V6.vavgbrnd.128B",
  "llvm.hexagon.V6.vavgh",
  "llvm.hexagon.V6.vavgh.128B",
  "llvm.hexagon.V6.vavghrnd",
  "llvm.hexagon.V6.vavghrnd.128B",
  "llvm.hexagon.V6.vavgub",
  "llvm.hexagon.V6.vavgub.128B",
  "llvm.hexagon.V6.vavgubrnd",
  "llvm.hexagon.V6.vavgubrnd.128B",
  "llvm.hexagon.V6.vavguh",
  "llvm.hexagon.V6.vavguh.128B",
  "llvm.hexagon.V6.vavguhrnd",
  "llvm.hexagon.V6.vavguhrnd.128B",
  "llvm.hexagon.V6.vavguw",
  "llvm.hexagon.V6.vavguw.128B",
  "llvm.hexagon.V6.vavguwrnd",
  "llvm.hexagon.V6.vavguwrnd.128B",
  "llvm.hexagon.V6.vavgw",
  "llvm.hexagon.V6.vavgw.128B",
  "llvm.hexagon.V6.vavgwrnd",
  "llvm.hexagon.V6.vavgwrnd.128B",
  "llvm.hexagon.V6.vcl0h",
  "llvm.hexagon.V6.vcl0h.128B",
  "llvm.hexagon.V6.vcl0w",
  "llvm.hexagon.V6.vcl0w.128B",
  "llvm.hexagon.V6.vcombine",
  "llvm.hexagon.V6.vcombine.128B",
  "llvm.hexagon.V6.vconv.h.hf",
  "llvm.hexagon.V6.vconv.h.hf.128B",
  "llvm.hexagon.V6.vconv.hf.h",
  "llvm.hexagon.V6.vconv.hf.h.128B",
  "llvm.hexagon.V6.vconv.hf.qf16",
  "llvm.hexagon.V6.vconv.hf.qf16.128B",
  "llvm.hexagon.V6.vconv.hf.qf32",
  "llvm.hexagon.V6.vconv.hf.qf32.128B",
  "llvm.hexagon.V6.vconv.sf.qf32",
  "llvm.hexagon.V6.vconv.sf.qf32.128B",
  "llvm.hexagon.V6.vconv.sf.w",
  "llvm.hexagon.V6.vconv.sf.w.128B",
  "llvm.hexagon.V6.vconv.w.sf",
  "llvm.hexagon.V6.vconv.w.sf.128B",
  "llvm.hexagon.V6.vcvt.b.hf",
  "llvm.hexagon.V6.vcvt.b.hf.128B",
  "llvm.hexagon.V6.vcvt.bf.sf",
  "llvm.hexagon.V6.vcvt.bf.sf.128B",
  "llvm.hexagon.V6.vcvt.h.hf",
  "llvm.hexagon.V6.vcvt.h.hf.128B",
  "llvm.hexagon.V6.vcvt.hf.b",
  "llvm.hexagon.V6.vcvt.hf.b.128B",
  "llvm.hexagon.V6.vcvt.hf.h",
  "llvm.hexagon.V6.vcvt.hf.h.128B",
  "llvm.hexagon.V6.vcvt.hf.sf",
  "llvm.hexagon.V6.vcvt.hf.sf.128B",
  "llvm.hexagon.V6.vcvt.hf.ub",
  "llvm.hexagon.V6.vcvt.hf.ub.128B",
  "llvm.hexagon.V6.vcvt.hf.uh",
  "llvm.hexagon.V6.vcvt.hf.uh.128B",
  "llvm.hexagon.V6.vcvt.sf.hf",
  "llvm.hexagon.V6.vcvt.sf.hf.128B",
  "llvm.hexagon.V6.vcvt.ub.hf",
  "llvm.hexagon.V6.vcvt.ub.hf.128B",
  "llvm.hexagon.V6.vcvt.uh.hf",
  "llvm.hexagon.V6.vcvt.uh.hf.128B",
  "llvm.hexagon.V6.vd0",
  "llvm.hexagon.V6.vd0.128B",
  "llvm.hexagon.V6.vdd0",
  "llvm.hexagon.V6.vdd0.128B",
  "llvm.hexagon.V6.vdealb",
  "llvm.hexagon.V6.vdealb.128B",
  "llvm.hexagon.V6.vdealb4w",
  "llvm.hexagon.V6.vdealb4w.128B",
  "llvm.hexagon.V6.vdealh",
  "llvm.hexagon.V6.vdealh.128B",
  "llvm.hexagon.V6.vdealvdd",
  "llvm.hexagon.V6.vdealvdd.128B",
  "llvm.hexagon.V6.vdelta",
  "llvm.hexagon.V6.vdelta.128B",
  "llvm.hexagon.V6.vdmpy.sf.hf",
  "llvm.hexagon.V6.vdmpy.sf.hf.128B",
  "llvm.hexagon.V6.vdmpy.sf.hf.acc",
  "llvm.hexagon.V6.vdmpy.sf.hf.acc.128B",
  "llvm.hexagon.V6.vdmpybus",
  "llvm.hexagon.V6.vdmpybus.128B",
  "llvm.hexagon.V6.vdmpybus.acc",
  "llvm.hexagon.V6.vdmpybus.acc.128B",
  "llvm.hexagon.V6.vdmpybus.dv",
  "llvm.hexagon.V6.vdmpybus.dv.128B",
  "llvm.hexagon.V6.vdmpybus.dv.acc",
  "llvm.hexagon.V6.vdmpybus.dv.acc.128B",
  "llvm.hexagon.V6.vdmpyhb",
  "llvm.hexagon.V6.vdmpyhb.128B",
  "llvm.hexagon.V6.vdmpyhb.acc",
  "llvm.hexagon.V6.vdmpyhb.acc.128B",
  "llvm.hexagon.V6.vdmpyhb.dv",
  "llvm.hexagon.V6.vdmpyhb.dv.128B",
  "llvm.hexagon.V6.vdmpyhb.dv.acc",
  "llvm.hexagon.V6.vdmpyhb.dv.acc.128B",
  "llvm.hexagon.V6.vdmpyhisat",
  "llvm.hexagon.V6.vdmpyhisat.128B",
  "llvm.hexagon.V6.vdmpyhisat.acc",
  "llvm.hexagon.V6.vdmpyhisat.acc.128B",
  "llvm.hexagon.V6.vdmpyhsat",
  "llvm.hexagon.V6.vdmpyhsat.128B",
  "llvm.hexagon.V6.vdmpyhsat.acc",
  "llvm.hexagon.V6.vdmpyhsat.acc.128B",
  "llvm.hexagon.V6.vdmpyhsuisat",
  "llvm.hexagon.V6.vdmpyhsuisat.128B",
  "llvm.hexagon.V6.vdmpyhsuisat.acc",
  "llvm.hexagon.V6.vdmpyhsuisat.acc.128B",
  "llvm.hexagon.V6.vdmpyhsusat",
  "llvm.hexagon.V6.vdmpyhsusat.128B",
  "llvm.hexagon.V6.vdmpyhsusat.acc",
  "llvm.hexagon.V6.vdmpyhsusat.acc.128B",
  "llvm.hexagon.V6.vdmpyhvsat",
  "llvm.hexagon.V6.vdmpyhvsat.128B",
  "llvm.hexagon.V6.vdmpyhvsat.acc",
  "llvm.hexagon.V6.vdmpyhvsat.acc.128B",
  "llvm.hexagon.V6.vdsaduh",
  "llvm.hexagon.V6.vdsaduh.128B",
  "llvm.hexagon.V6.vdsaduh.acc",
  "llvm.hexagon.V6.vdsaduh.acc.128B",
  "llvm.hexagon.V6.veqb",
  "llvm.hexagon.V6.veqb.128B",
  "llvm.hexagon.V6.veqb.and",
  "llvm.hexagon.V6.veqb.and.128B",
  "llvm.hexagon.V6.veqb.or",
  "llvm.hexagon.V6.veqb.or.128B",
  "llvm.hexagon.V6.veqb.xor",
  "llvm.hexagon.V6.veqb.xor.128B",
  "llvm.hexagon.V6.veqh",
  "llvm.hexagon.V6.veqh.128B",
  "llvm.hexagon.V6.veqh.and",
  "llvm.hexagon.V6.veqh.and.128B",
  "llvm.hexagon.V6.veqh.or",
  "llvm.hexagon.V6.veqh.or.128B",
  "llvm.hexagon.V6.veqh.xor",
  "llvm.hexagon.V6.veqh.xor.128B",
  "llvm.hexagon.V6.veqw",
  "llvm.hexagon.V6.veqw.128B",
  "llvm.hexagon.V6.veqw.and",
  "llvm.hexagon.V6.veqw.and.128B",
  "llvm.hexagon.V6.veqw.or",
  "llvm.hexagon.V6.veqw.or.128B",
  "llvm.hexagon.V6.veqw.xor",
  "llvm.hexagon.V6.veqw.xor.128B",
  "llvm.hexagon.V6.vfmax.hf",
  "llvm.hexagon.V6.vfmax.hf.128B",
  "llvm.hexagon.V6.vfmax.sf",
  "llvm.hexagon.V6.vfmax.sf.128B",
  "llvm.hexagon.V6.vfmin.hf",
  "llvm.hexagon.V6.vfmin.hf.128B",
  "llvm.hexagon.V6.vfmin.sf",
  "llvm.hexagon.V6.vfmin.sf.128B",
  "llvm.hexagon.V6.vfneg.hf",
  "llvm.hexagon.V6.vfneg.hf.128B",
  "llvm.hexagon.V6.vfneg.sf",
  "llvm.hexagon.V6.vfneg.sf.128B",
  "llvm.hexagon.V6.vgathermh",
  "llvm.hexagon.V6.vgathermh.128B",
  "llvm.hexagon.V6.vgathermhq",
  "llvm.hexagon.V6.vgathermhq.128B",
  "llvm.hexagon.V6.vgathermhw",
  "llvm.hexagon.V6.vgathermhw.128B",
  "llvm.hexagon.V6.vgathermhwq",
  "llvm.hexagon.V6.vgathermhwq.128B",
  "llvm.hexagon.V6.vgathermw",
  "llvm.hexagon.V6.vgathermw.128B",
  "llvm.hexagon.V6.vgathermwq",
  "llvm.hexagon.V6.vgathermwq.128B",
  "llvm.hexagon.V6.vgtb",
  "llvm.hexagon.V6.vgtb.128B",
  "llvm.hexagon.V6.vgtb.and",
  "llvm.hexagon.V6.vgtb.and.128B",
  "llvm.hexagon.V6.vgtb.or",
  "llvm.hexagon.V6.vgtb.or.128B",
  "llvm.hexagon.V6.vgtb.xor",
  "llvm.hexagon.V6.vgtb.xor.128B",
  "llvm.hexagon.V6.vgtbf",
  "llvm.hexagon.V6.vgtbf.128B",
  "llvm.hexagon.V6.vgtbf.and",
  "llvm.hexagon.V6.vgtbf.and.128B",
  "llvm.hexagon.V6.vgtbf.or",
  "llvm.hexagon.V6.vgtbf.or.128B",
  "llvm.hexagon.V6.vgtbf.xor",
  "llvm.hexagon.V6.vgtbf.xor.128B",
  "llvm.hexagon.V6.vgth",
  "llvm.hexagon.V6.vgth.128B",
  "llvm.hexagon.V6.vgth.and",
  "llvm.hexagon.V6.vgth.and.128B",
  "llvm.hexagon.V6.vgth.or",
  "llvm.hexagon.V6.vgth.or.128B",
  "llvm.hexagon.V6.vgth.xor",
  "llvm.hexagon.V6.vgth.xor.128B",
  "llvm.hexagon.V6.vgthf",
  "llvm.hexagon.V6.vgthf.128B",
  "llvm.hexagon.V6.vgthf.and",
  "llvm.hexagon.V6.vgthf.and.128B",
  "llvm.hexagon.V6.vgthf.or",
  "llvm.hexagon.V6.vgthf.or.128B",
  "llvm.hexagon.V6.vgthf.xor",
  "llvm.hexagon.V6.vgthf.xor.128B",
  "llvm.hexagon.V6.vgtsf",
  "llvm.hexagon.V6.vgtsf.128B",
  "llvm.hexagon.V6.vgtsf.and",
  "llvm.hexagon.V6.vgtsf.and.128B",
  "llvm.hexagon.V6.vgtsf.or",
  "llvm.hexagon.V6.vgtsf.or.128B",
  "llvm.hexagon.V6.vgtsf.xor",
  "llvm.hexagon.V6.vgtsf.xor.128B",
  "llvm.hexagon.V6.vgtub",
  "llvm.hexagon.V6.vgtub.128B",
  "llvm.hexagon.V6.vgtub.and",
  "llvm.hexagon.V6.vgtub.and.128B",
  "llvm.hexagon.V6.vgtub.or",
  "llvm.hexagon.V6.vgtub.or.128B",
  "llvm.hexagon.V6.vgtub.xor",
  "llvm.hexagon.V6.vgtub.xor.128B",
  "llvm.hexagon.V6.vgtuh",
  "llvm.hexagon.V6.vgtuh.128B",
  "llvm.hexagon.V6.vgtuh.and",
  "llvm.hexagon.V6.vgtuh.and.128B",
  "llvm.hexagon.V6.vgtuh.or",
  "llvm.hexagon.V6.vgtuh.or.128B",
  "llvm.hexagon.V6.vgtuh.xor",
  "llvm.hexagon.V6.vgtuh.xor.128B",
  "llvm.hexagon.V6.vgtuw",
  "llvm.hexagon.V6.vgtuw.128B",
  "llvm.hexagon.V6.vgtuw.and",
  "llvm.hexagon.V6.vgtuw.and.128B",
  "llvm.hexagon.V6.vgtuw.or",
  "llvm.hexagon.V6.vgtuw.or.128B",
  "llvm.hexagon.V6.vgtuw.xor",
  "llvm.hexagon.V6.vgtuw.xor.128B",
  "llvm.hexagon.V6.vgtw",
  "llvm.hexagon.V6.vgtw.128B",
  "llvm.hexagon.V6.vgtw.and",
  "llvm.hexagon.V6.vgtw.and.128B",
  "llvm.hexagon.V6.vgtw.or",
  "llvm.hexagon.V6.vgtw.or.128B",
  "llvm.hexagon.V6.vgtw.xor",
  "llvm.hexagon.V6.vgtw.xor.128B",
  "llvm.hexagon.V6.vinsertwr",
  "llvm.hexagon.V6.vinsertwr.128B",
  "llvm.hexagon.V6.vlalignb",
  "llvm.hexagon.V6.vlalignb.128B",
  "llvm.hexagon.V6.vlalignbi",
  "llvm.hexagon.V6.vlalignbi.128B",
  "llvm.hexagon.V6.vlsrb",
  "llvm.hexagon.V6.vlsrb.128B",
  "llvm.hexagon.V6.vlsrh",
  "llvm.hexagon.V6.vlsrh.128B",
  "llvm.hexagon.V6.vlsrhv",
  "llvm.hexagon.V6.vlsrhv.128B",
  "llvm.hexagon.V6.vlsrw",
  "llvm.hexagon.V6.vlsrw.128B",
  "llvm.hexagon.V6.vlsrwv",
  "llvm.hexagon.V6.vlsrwv.128B",
  "llvm.hexagon.V6.vlut4",
  "llvm.hexagon.V6.vlut4.128B",
  "llvm.hexagon.V6.vlutvvb",
  "llvm.hexagon.V6.vlutvvb.128B",
  "llvm.hexagon.V6.vlutvvb.nm",
  "llvm.hexagon.V6.vlutvvb.nm.128B",
  "llvm.hexagon.V6.vlutvvb.oracc",
  "llvm.hexagon.V6.vlutvvb.oracc.128B",
  "llvm.hexagon.V6.vlutvvb.oracci",
  "llvm.hexagon.V6.vlutvvb.oracci.128B",
  "llvm.hexagon.V6.vlutvvbi",
  "llvm.hexagon.V6.vlutvvbi.128B",
  "llvm.hexagon.V6.vlutvwh",
  "llvm.hexagon.V6.vlutvwh.128B",
  "llvm.hexagon.V6.vlutvwh.nm",
  "llvm.hexagon.V6.vlutvwh.nm.128B",
  "llvm.hexagon.V6.vlutvwh.oracc",
  "llvm.hexagon.V6.vlutvwh.oracc.128B",
  "llvm.hexagon.V6.vlutvwh.oracci",
  "llvm.hexagon.V6.vlutvwh.oracci.128B",
  "llvm.hexagon.V6.vlutvwhi",
  "llvm.hexagon.V6.vlutvwhi.128B",
  "llvm.hexagon.V6.vmaskedstorenq",
  "llvm.hexagon.V6.vmaskedstorenq.128B",
  "llvm.hexagon.V6.vmaskedstorentnq",
  "llvm.hexagon.V6.vmaskedstorentnq.128B",
  "llvm.hexagon.V6.vmaskedstorentq",
  "llvm.hexagon.V6.vmaskedstorentq.128B",
  "llvm.hexagon.V6.vmaskedstoreq",
  "llvm.hexagon.V6.vmaskedstoreq.128B",
  "llvm.hexagon.V6.vmax.bf",
  "llvm.hexagon.V6.vmax.bf.128B",
  "llvm.hexagon.V6.vmax.hf",
  "llvm.hexagon.V6.vmax.hf.128B",
  "llvm.hexagon.V6.vmax.sf",
  "llvm.hexagon.V6.vmax.sf.128B",
  "llvm.hexagon.V6.vmaxb",
  "llvm.hexagon.V6.vmaxb.128B",
  "llvm.hexagon.V6.vmaxh",
  "llvm.hexagon.V6.vmaxh.128B",
  "llvm.hexagon.V6.vmaxub",
  "llvm.hexagon.V6.vmaxub.128B",
  "llvm.hexagon.V6.vmaxuh",
  "llvm.hexagon.V6.vmaxuh.128B",
  "llvm.hexagon.V6.vmaxw",
  "llvm.hexagon.V6.vmaxw.128B",
  "llvm.hexagon.V6.vmin.bf",
  "llvm.hexagon.V6.vmin.bf.128B",
  "llvm.hexagon.V6.vmin.hf",
  "llvm.hexagon.V6.vmin.hf.128B",
  "llvm.hexagon.V6.vmin.sf",
  "llvm.hexagon.V6.vmin.sf.128B",
  "llvm.hexagon.V6.vminb",
  "llvm.hexagon.V6.vminb.128B",
  "llvm.hexagon.V6.vminh",
  "llvm.hexagon.V6.vminh.128B",
  "llvm.hexagon.V6.vminub",
  "llvm.hexagon.V6.vminub.128B",
  "llvm.hexagon.V6.vminuh",
  "llvm.hexagon.V6.vminuh.128B",
  "llvm.hexagon.V6.vminw",
  "llvm.hexagon.V6.vminw.128B",
  "llvm.hexagon.V6.vmpabus",
  "llvm.hexagon.V6.vmpabus.128B",
  "llvm.hexagon.V6.vmpabus.acc",
  "llvm.hexagon.V6.vmpabus.acc.128B",
  "llvm.hexagon.V6.vmpabusv",
  "llvm.hexagon.V6.vmpabusv.128B",
  "llvm.hexagon.V6.vmpabuu",
  "llvm.hexagon.V6.vmpabuu.128B",
  "llvm.hexagon.V6.vmpabuu.acc",
  "llvm.hexagon.V6.vmpabuu.acc.128B",
  "llvm.hexagon.V6.vmpabuuv",
  "llvm.hexagon.V6.vmpabuuv.128B",
  "llvm.hexagon.V6.vmpahb",
  "llvm.hexagon.V6.vmpahb.128B",
  "llvm.hexagon.V6.vmpahb.acc",
  "llvm.hexagon.V6.vmpahb.acc.128B",
  "llvm.hexagon.V6.vmpahhsat",
  "llvm.hexagon.V6.vmpahhsat.128B",
  "llvm.hexagon.V6.vmpauhb",
  "llvm.hexagon.V6.vmpauhb.128B",
  "llvm.hexagon.V6.vmpauhb.acc",
  "llvm.hexagon.V6.vmpauhb.acc.128B",
  "llvm.hexagon.V6.vmpauhuhsat",
  "llvm.hexagon.V6.vmpauhuhsat.128B",
  "llvm.hexagon.V6.vmpsuhuhsat",
  "llvm.hexagon.V6.vmpsuhuhsat.128B",
  "llvm.hexagon.V6.vmpy.hf.hf",
  "llvm.hexagon.V6.vmpy.hf.hf.128B",
  "llvm.hexagon.V6.vmpy.hf.hf.acc",
  "llvm.hexagon.V6.vmpy.hf.hf.acc.128B",
  "llvm.hexagon.V6.vmpy.qf16",
  "llvm.hexagon.V6.vmpy.qf16.128B",
  "llvm.hexagon.V6.vmpy.qf16.hf",
  "llvm.hexagon.V6.vmpy.qf16.hf.128B",
  "llvm.hexagon.V6.vmpy.qf16.mix.hf",
  "llvm.hexagon.V6.vmpy.qf16.mix.hf.128B",
  "llvm.hexagon.V6.vmpy.qf32",
  "llvm.hexagon.V6.vmpy.qf32.128B",
  "llvm.hexagon.V6.vmpy.qf32.hf",
  "llvm.hexagon.V6.vmpy.qf32.hf.128B",
  "llvm.hexagon.V6.vmpy.qf32.mix.hf",
  "llvm.hexagon.V6.vmpy.qf32.mix.hf.128B",
  "llvm.hexagon.V6.vmpy.qf32.qf16",
  "llvm.hexagon.V6.vmpy.qf32.qf16.128B",
  "llvm.hexagon.V6.vmpy.qf32.sf",
  "llvm.hexagon.V6.vmpy.qf32.sf.128B",
  "llvm.hexagon.V6.vmpy.sf.bf",
  "llvm.hexagon.V6.vmpy.sf.bf.128B",
  "llvm.hexagon.V6.vmpy.sf.bf.acc",
  "llvm.hexagon.V6.vmpy.sf.bf.acc.128B",
  "llvm.hexagon.V6.vmpy.sf.hf",
  "llvm.hexagon.V6.vmpy.sf.hf.128B",
  "llvm.hexagon.V6.vmpy.sf.hf.acc",
  "llvm.hexagon.V6.vmpy.sf.hf.acc.128B",
  "llvm.hexagon.V6.vmpy.sf.sf",
  "llvm.hexagon.V6.vmpy.sf.sf.128B",
  "llvm.hexagon.V6.vmpybus",
  "llvm.hexagon.V6.vmpybus.128B",
  "llvm.hexagon.V6.vmpybus.acc",
  "llvm.hexagon.V6.vmpybus.acc.128B",
  "llvm.hexagon.V6.vmpybusv",
  "llvm.hexagon.V6.vmpybusv.128B",
  "llvm.hexagon.V6.vmpybusv.acc",
  "llvm.hexagon.V6.vmpybusv.acc.128B",
  "llvm.hexagon.V6.vmpybv",
  "llvm.hexagon.V6.vmpybv.128B",
  "llvm.hexagon.V6.vmpybv.acc",
  "llvm.hexagon.V6.vmpybv.acc.128B",
  "llvm.hexagon.V6.vmpyewuh",
  "llvm.hexagon.V6.vmpyewuh.128B",
  "llvm.hexagon.V6.vmpyewuh.64",
  "llvm.hexagon.V6.vmpyewuh.64.128B",
  "llvm.hexagon.V6.vmpyh",
  "llvm.hexagon.V6.vmpyh.128B",
  "llvm.hexagon.V6.vmpyh.acc",
  "llvm.hexagon.V6.vmpyh.acc.128B",
  "llvm.hexagon.V6.vmpyhsat.acc",
  "llvm.hexagon.V6.vmpyhsat.acc.128B",
  "llvm.hexagon.V6.vmpyhsrs",
  "llvm.hexagon.V6.vmpyhsrs.128B",
  "llvm.hexagon.V6.vmpyhss",
  "llvm.hexagon.V6.vmpyhss.128B",
  "llvm.hexagon.V6.vmpyhus",
  "llvm.hexagon.V6.vmpyhus.128B",
  "llvm.hexagon.V6.vmpyhus.acc",
  "llvm.hexagon.V6.vmpyhus.acc.128B",
  "llvm.hexagon.V6.vmpyhv",
  "llvm.hexagon.V6.vmpyhv.128B",
  "llvm.hexagon.V6.vmpyhv.acc",
  "llvm.hexagon.V6.vmpyhv.acc.128B",
  "llvm.hexagon.V6.vmpyhvsrs",
  "llvm.hexagon.V6.vmpyhvsrs.128B",
  "llvm.hexagon.V6.vmpyieoh",
  "llvm.hexagon.V6.vmpyieoh.128B",
  "llvm.hexagon.V6.vmpyiewh.acc",
  "llvm.hexagon.V6.vmpyiewh.acc.128B",
  "llvm.hexagon.V6.vmpyiewuh",
  "llvm.hexagon.V6.vmpyiewuh.128B",
  "llvm.hexagon.V6.vmpyiewuh.acc",
  "llvm.hexagon.V6.vmpyiewuh.acc.128B",
  "llvm.hexagon.V6.vmpyih",
  "llvm.hexagon.V6.vmpyih.128B",
  "llvm.hexagon.V6.vmpyih.acc",
  "llvm.hexagon.V6.vmpyih.acc.128B",
  "llvm.hexagon.V6.vmpyihb",
  "llvm.hexagon.V6.vmpyihb.128B",
  "llvm.hexagon.V6.vmpyihb.acc",
  "llvm.hexagon.V6.vmpyihb.acc.128B",
  "llvm.hexagon.V6.vmpyiowh",
  "llvm.hexagon.V6.vmpyiowh.128B",
  "llvm.hexagon.V6.vmpyiwb",
  "llvm.hexagon.V6.vmpyiwb.128B",
  "llvm.hexagon.V6.vmpyiwb.acc",
  "llvm.hexagon.V6.vmpyiwb.acc.128B",
  "llvm.hexagon.V6.vmpyiwh",
  "llvm.hexagon.V6.vmpyiwh.128B",
  "llvm.hexagon.V6.vmpyiwh.acc",
  "llvm.hexagon.V6.vmpyiwh.acc.128B",
  "llvm.hexagon.V6.vmpyiwub",
  "llvm.hexagon.V6.vmpyiwub.128B",
  "llvm.hexagon.V6.vmpyiwub.acc",
  "llvm.hexagon.V6.vmpyiwub.acc.128B",
  "llvm.hexagon.V6.vmpyowh",
  "llvm.hexagon.V6.vmpyowh.128B",
  "llvm.hexagon.V6.vmpyowh.64.acc",
  "llvm.hexagon.V6.vmpyowh.64.acc.128B",
  "llvm.hexagon.V6.vmpyowh.rnd",
  "llvm.hexagon.V6.vmpyowh.rnd.128B",
  "llvm.hexagon.V6.vmpyowh.rnd.sacc",
  "llvm.hexagon.V6.vmpyowh.rnd.sacc.128B",
  "llvm.hexagon.V6.vmpyowh.sacc",
  "llvm.hexagon.V6.vmpyowh.sacc.128B",
  "llvm.hexagon.V6.vmpyss.parts",
  "llvm.hexagon.V6.vmpyss.parts.128B",
  "llvm.hexagon.V6.vmpyub",
  "llvm.hexagon.V6.vmpyub.128B",
  "llvm.hexagon.V6.vmpyub.acc",
  "llvm.hexagon.V6.vmpyub.acc.128B",
  "llvm.hexagon.V6.vmpyubv",
  "llvm.hexagon.V6.vmpyubv.128B",
  "llvm.hexagon.V6.vmpyubv.acc",
  "llvm.hexagon.V6.vmpyubv.acc.128B",
  "llvm.hexagon.V6.vmpyuh",
  "llvm.hexagon.V6.vmpyuh.128B",
  "llvm.hexagon.V6.vmpyuh.acc",
  "llvm.hexagon.V6.vmpyuh.acc.128B",
  "llvm.hexagon.V6.vmpyuhe",
  "llvm.hexagon.V6.vmpyuhe.128B",
  "llvm.hexagon.V6.vmpyuhe.acc",
  "llvm.hexagon.V6.vmpyuhe.acc.128B",
  "llvm.hexagon.V6.vmpyuhv",
  "llvm.hexagon.V6.vmpyuhv.128B",
  "llvm.hexagon.V6.vmpyuhv.acc",
  "llvm.hexagon.V6.vmpyuhv.acc.128B",
  "llvm.hexagon.V6.vmpyuhvs",
  "llvm.hexagon.V6.vmpyuhvs.128B",
  "llvm.hexagon.V6.vmpyus.parts",
  "llvm.hexagon.V6.vmpyus.parts.128B",
  "llvm.hexagon.V6.vmpyuu.parts",
  "llvm.hexagon.V6.vmpyuu.parts.128B",
  "llvm.hexagon.V6.vmux",
  "llvm.hexagon.V6.vmux.128B",
  "llvm.hexagon.V6.vnavgb",
  "llvm.hexagon.V6.vnavgb.128B",
  "llvm.hexagon.V6.vnavgh",
  "llvm.hexagon.V6.vnavgh.128B",
  "llvm.hexagon.V6.vnavgub",
  "llvm.hexagon.V6.vnavgub.128B",
  "llvm.hexagon.V6.vnavgw",
  "llvm.hexagon.V6.vnavgw.128B",
  "llvm.hexagon.V6.vnormamth",
  "llvm.hexagon.V6.vnormamth.128B",
  "llvm.hexagon.V6.vnormamtw",
  "llvm.hexagon.V6.vnormamtw.128B",
  "llvm.hexagon.V6.vnot",
  "llvm.hexagon.V6.vnot.128B",
  "llvm.hexagon.V6.vor",
  "llvm.hexagon.V6.vor.128B",
  "llvm.hexagon.V6.vpackeb",
  "llvm.hexagon.V6.vpackeb.128B",
  "llvm.hexagon.V6.vpackeh",
  "llvm.hexagon.V6.vpackeh.128B",
  "llvm.hexagon.V6.vpackhb.sat",
  "llvm.hexagon.V6.vpackhb.sat.128B",
  "llvm.hexagon.V6.vpackhub.sat",
  "llvm.hexagon.V6.vpackhub.sat.128B",
  "llvm.hexagon.V6.vpackob",
  "llvm.hexagon.V6.vpackob.128B",
  "llvm.hexagon.V6.vpackoh",
  "llvm.hexagon.V6.vpackoh.128B",
  "llvm.hexagon.V6.vpackwh.sat",
  "llvm.hexagon.V6.vpackwh.sat.128B",
  "llvm.hexagon.V6.vpackwuh.sat",
  "llvm.hexagon.V6.vpackwuh.sat.128B",
  "llvm.hexagon.V6.vpopcounth",
  "llvm.hexagon.V6.vpopcounth.128B",
  "llvm.hexagon.V6.vprefixqb",
  "llvm.hexagon.V6.vprefixqb.128B",
  "llvm.hexagon.V6.vprefixqh",
  "llvm.hexagon.V6.vprefixqh.128B",
  "llvm.hexagon.V6.vprefixqw",
  "llvm.hexagon.V6.vprefixqw.128B",
  "llvm.hexagon.V6.vrdelta",
  "llvm.hexagon.V6.vrdelta.128B",
  "llvm.hexagon.V6.vrmpybub.rtt",
  "llvm.hexagon.V6.vrmpybub.rtt.128B",
  "llvm.hexagon.V6.vrmpybub.rtt.acc",
  "llvm.hexagon.V6.vrmpybub.rtt.acc.128B",
  "llvm.hexagon.V6.vrmpybus",
  "llvm.hexagon.V6.vrmpybus.128B",
  "llvm.hexagon.V6.vrmpybus.acc",
  "llvm.hexagon.V6.vrmpybus.acc.128B",
  "llvm.hexagon.V6.vrmpybusi",
  "llvm.hexagon.V6.vrmpybusi.128B",
  "llvm.hexagon.V6.vrmpybusi.acc",
  "llvm.hexagon.V6.vrmpybusi.acc.128B",
  "llvm.hexagon.V6.vrmpybusv",
  "llvm.hexagon.V6.vrmpybusv.128B",
  "llvm.hexagon.V6.vrmpybusv.acc",
  "llvm.hexagon.V6.vrmpybusv.acc.128B",
  "llvm.hexagon.V6.vrmpybv",
  "llvm.hexagon.V6.vrmpybv.128B",
  "llvm.hexagon.V6.vrmpybv.acc",
  "llvm.hexagon.V6.vrmpybv.acc.128B",
  "llvm.hexagon.V6.vrmpyub",
  "llvm.hexagon.V6.vrmpyub.128B",
  "llvm.hexagon.V6.vrmpyub.acc",
  "llvm.hexagon.V6.vrmpyub.acc.128B",
  "llvm.hexagon.V6.vrmpyub.rtt",
  "llvm.hexagon.V6.vrmpyub.rtt.128B",
  "llvm.hexagon.V6.vrmpyub.rtt.acc",
  "llvm.hexagon.V6.vrmpyub.rtt.acc.128B",
  "llvm.hexagon.V6.vrmpyubi",
  "llvm.hexagon.V6.vrmpyubi.128B",
  "llvm.hexagon.V6.vrmpyubi.acc",
  "llvm.hexagon.V6.vrmpyubi.acc.128B",
  "llvm.hexagon.V6.vrmpyubv",
  "llvm.hexagon.V6.vrmpyubv.128B",
  "llvm.hexagon.V6.vrmpyubv.acc",
  "llvm.hexagon.V6.vrmpyubv.acc.128B",
  "llvm.hexagon.V6.vror",
  "llvm.hexagon.V6.vror.128B",
  "llvm.hexagon.V6.vrotr",
  "llvm.hexagon.V6.vrotr.128B",
  "llvm.hexagon.V6.vroundhb",
  "llvm.hexagon.V6.vroundhb.128B",
  "llvm.hexagon.V6.vroundhub",
  "llvm.hexagon.V6.vroundhub.128B",
  "llvm.hexagon.V6.vrounduhub",
  "llvm.hexagon.V6.vrounduhub.128B",
  "llvm.hexagon.V6.vrounduwuh",
  "llvm.hexagon.V6.vrounduwuh.128B",
  "llvm.hexagon.V6.vroundwh",
  "llvm.hexagon.V6.vroundwh.128B",
  "llvm.hexagon.V6.vroundwuh",
  "llvm.hexagon.V6.vroundwuh.128B",
  "llvm.hexagon.V6.vrsadubi",
  "llvm.hexagon.V6.vrsadubi.128B",
  "llvm.hexagon.V6.vrsadubi.acc",
  "llvm.hexagon.V6.vrsadubi.acc.128B",
  "llvm.hexagon.V6.vsatdw",
  "llvm.hexagon.V6.vsatdw.128B",
  "llvm.hexagon.V6.vsathub",
  "llvm.hexagon.V6.vsathub.128B",
  "llvm.hexagon.V6.vsatuwuh",
  "llvm.hexagon.V6.vsatuwuh.128B",
  "llvm.hexagon.V6.vsatwh",
  "llvm.hexagon.V6.vsatwh.128B",
  "llvm.hexagon.V6.vsb",
  "llvm.hexagon.V6.vsb.128B",
  "llvm.hexagon.V6.vscattermh",
  "llvm.hexagon.V6.vscattermh.128B",
  "llvm.hexagon.V6.vscattermh.add",
  "llvm.hexagon.V6.vscattermh.add.128B",
  "llvm.hexagon.V6.vscattermhq",
  "llvm.hexagon.V6.vscattermhq.128B",
  "llvm.hexagon.V6.vscattermhw",
  "llvm.hexagon.V6.vscattermhw.128B",
  "llvm.hexagon.V6.vscattermhw.add",
  "llvm.hexagon.V6.vscattermhw.add.128B",
  "llvm.hexagon.V6.vscattermhwq",
  "llvm.hexagon.V6.vscattermhwq.128B",
  "llvm.hexagon.V6.vscattermw",
  "llvm.hexagon.V6.vscattermw.128B",
  "llvm.hexagon.V6.vscattermw.add",
  "llvm.hexagon.V6.vscattermw.add.128B",
  "llvm.hexagon.V6.vscattermwq",
  "llvm.hexagon.V6.vscattermwq.128B",
  "llvm.hexagon.V6.vsh",
  "llvm.hexagon.V6.vsh.128B",
  "llvm.hexagon.V6.vshufeh",
  "llvm.hexagon.V6.vshufeh.128B",
  "llvm.hexagon.V6.vshuffb",
  "llvm.hexagon.V6.vshuffb.128B",
  "llvm.hexagon.V6.vshuffeb",
  "llvm.hexagon.V6.vshuffeb.128B",
  "llvm.hexagon.V6.vshuffh",
  "llvm.hexagon.V6.vshuffh.128B",
  "llvm.hexagon.V6.vshuffob",
  "llvm.hexagon.V6.vshuffob.128B",
  "llvm.hexagon.V6.vshuffvdd",
  "llvm.hexagon.V6.vshuffvdd.128B",
  "llvm.hexagon.V6.vshufoeb",
  "llvm.hexagon.V6.vshufoeb.128B",
  "llvm.hexagon.V6.vshufoeh",
  "llvm.hexagon.V6.vshufoeh.128B",
  "llvm.hexagon.V6.vshufoh",
  "llvm.hexagon.V6.vshufoh.128B",
  "llvm.hexagon.V6.vsub.hf",
  "llvm.hexagon.V6.vsub.hf.128B",
  "llvm.hexagon.V6.vsub.hf.hf",
  "llvm.hexagon.V6.vsub.hf.hf.128B",
  "llvm.hexagon.V6.vsub.qf16",
  "llvm.hexagon.V6.vsub.qf16.128B",
  "llvm.hexagon.V6.vsub.qf16.mix",
  "llvm.hexagon.V6.vsub.qf16.mix.128B",
  "llvm.hexagon.V6.vsub.qf32",
  "llvm.hexagon.V6.vsub.qf32.128B",
  "llvm.hexagon.V6.vsub.qf32.mix",
  "llvm.hexagon.V6.vsub.qf32.mix.128B",
  "llvm.hexagon.V6.vsub.sf",
  "llvm.hexagon.V6.vsub.sf.128B",
  "llvm.hexagon.V6.vsub.sf.bf",
  "llvm.hexagon.V6.vsub.sf.bf.128B",
  "llvm.hexagon.V6.vsub.sf.hf",
  "llvm.hexagon.V6.vsub.sf.hf.128B",
  "llvm.hexagon.V6.vsub.sf.sf",
  "llvm.hexagon.V6.vsub.sf.sf.128B",
  "llvm.hexagon.V6.vsubb",
  "llvm.hexagon.V6.vsubb.128B",
  "llvm.hexagon.V6.vsubb.dv",
  "llvm.hexagon.V6.vsubb.dv.128B",
  "llvm.hexagon.V6.vsubbnq",
  "llvm.hexagon.V6.vsubbnq.128B",
  "llvm.hexagon.V6.vsubbq",
  "llvm.hexagon.V6.vsubbq.128B",
  "llvm.hexagon.V6.vsubbsat",
  "llvm.hexagon.V6.vsubbsat.128B",
  "llvm.hexagon.V6.vsubbsat.dv",
  "llvm.hexagon.V6.vsubbsat.dv.128B",
  "llvm.hexagon.V6.vsubcarry",
  "llvm.hexagon.V6.vsubcarry.128B",
  "llvm.hexagon.V6.vsubcarryo",
  "llvm.hexagon.V6.vsubcarryo.128B",
  "llvm.hexagon.V6.vsubh",
  "llvm.hexagon.V6.vsubh.128B",
  "llvm.hexagon.V6.vsubh.dv",
  "llvm.hexagon.V6.vsubh.dv.128B",
  "llvm.hexagon.V6.vsubhnq",
  "llvm.hexagon.V6.vsubhnq.128B",
  "llvm.hexagon.V6.vsubhq",
  "llvm.hexagon.V6.vsubhq.128B",
  "llvm.hexagon.V6.vsubhsat",
  "llvm.hexagon.V6.vsubhsat.128B",
  "llvm.hexagon.V6.vsubhsat.dv",
  "llvm.hexagon.V6.vsubhsat.dv.128B",
  "llvm.hexagon.V6.vsubhw",
  "llvm.hexagon.V6.vsubhw.128B",
  "llvm.hexagon.V6.vsububh",
  "llvm.hexagon.V6.vsububh.128B",
  "llvm.hexagon.V6.vsububsat",
  "llvm.hexagon.V6.vsububsat.128B",
  "llvm.hexagon.V6.vsububsat.dv",
  "llvm.hexagon.V6.vsububsat.dv.128B",
  "llvm.hexagon.V6.vsubububb.sat",
  "llvm.hexagon.V6.vsubububb.sat.128B",
  "llvm.hexagon.V6.vsubuhsat",
  "llvm.hexagon.V6.vsubuhsat.128B",
  "llvm.hexagon.V6.vsubuhsat.dv",
  "llvm.hexagon.V6.vsubuhsat.dv.128B",
  "llvm.hexagon.V6.vsubuhw",
  "llvm.hexagon.V6.vsubuhw.128B",
  "llvm.hexagon.V6.vsubuwsat",
  "llvm.hexagon.V6.vsubuwsat.128B",
  "llvm.hexagon.V6.vsubuwsat.dv",
  "llvm.hexagon.V6.vsubuwsat.dv.128B",
  "llvm.hexagon.V6.vsubw",
  "llvm.hexagon.V6.vsubw.128B",
  "llvm.hexagon.V6.vsubw.dv",
  "llvm.hexagon.V6.vsubw.dv.128B",
  "llvm.hexagon.V6.vsubwnq",
  "llvm.hexagon.V6.vsubwnq.128B",
  "llvm.hexagon.V6.vsubwq",
  "llvm.hexagon.V6.vsubwq.128B",
  "llvm.hexagon.V6.vsubwsat",
  "llvm.hexagon.V6.vsubwsat.128B",
  "llvm.hexagon.V6.vsubwsat.dv",
  "llvm.hexagon.V6.vsubwsat.dv.128B",
  "llvm.hexagon.V6.vswap",
  "llvm.hexagon.V6.vswap.128B",
  "llvm.hexagon.V6.vtmpyb",
  "llvm.hexagon.V6.vtmpyb.128B",
  "llvm.hexagon.V6.vtmpyb.acc",
  "llvm.hexagon.V6.vtmpyb.acc.128B",
  "llvm.hexagon.V6.vtmpybus",
  "llvm.hexagon.V6.vtmpybus.128B",
  "llvm.hexagon.V6.vtmpybus.acc",
  "llvm.hexagon.V6.vtmpybus.acc.128B",
  "llvm.hexagon.V6.vtmpyhb",
  "llvm.hexagon.V6.vtmpyhb.128B",
  "llvm.hexagon.V6.vtmpyhb.acc",
  "llvm.hexagon.V6.vtmpyhb.acc.128B",
  "llvm.hexagon.V6.vunpackb",
  "llvm.hexagon.V6.vunpackb.128B",
  "llvm.hexagon.V6.vunpackh",
  "llvm.hexagon.V6.vunpackh.128B",
  "llvm.hexagon.V6.vunpackob",
  "llvm.hexagon.V6.vunpackob.128B",
  "llvm.hexagon.V6.vunpackoh",
  "llvm.hexagon.V6.vunpackoh.128B",
  "llvm.hexagon.V6.vunpackub",
  "llvm.hexagon.V6.vunpackub.128B",
  "llvm.hexagon.V6.vunpackuh",
  "llvm.hexagon.V6.vunpackuh.128B",
  "llvm.hexagon.V6.vxor",
  "llvm.hexagon.V6.vxor.128B",
  "llvm.hexagon.V6.vzb",
  "llvm.hexagon.V6.vzb.128B",
  "llvm.hexagon.V6.vzh",
  "llvm.hexagon.V6.vzh.128B",
  "llvm.hexagon.Y2.dccleana",
  "llvm.hexagon.Y2.dccleaninva",
  "llvm.hexagon.Y2.dcfetch",
  "llvm.hexagon.Y2.dcinva",
  "llvm.hexagon.Y2.dczeroa",
  "llvm.hexagon.Y4.l2fetch",
  "llvm.hexagon.Y5.l2fetch",
  "llvm.hexagon.Y6.dmlink",
  "llvm.hexagon.Y6.dmpause",
  "llvm.hexagon.Y6.dmpoll",
  "llvm.hexagon.Y6.dmresume",
  "llvm.hexagon.Y6.dmstart",
  "llvm.hexagon.Y6.dmwait",
  "llvm.hexagon.circ.ldb",
  "llvm.hexagon.circ.ldd",
  "llvm.hexagon.circ.ldh",
  "llvm.hexagon.circ.ldub",
  "llvm.hexagon.circ.lduh",
  "llvm.hexagon.circ.ldw",
  "llvm.hexagon.circ.stb",
  "llvm.hexagon.circ.std",
  "llvm.hexagon.circ.sth",
  "llvm.hexagon.circ.sthhi",
  "llvm.hexagon.circ.stw",
  "llvm.hexagon.instrprof.custom",
  "llvm.hexagon.prefetch",
  "llvm.hexagon.vmemcpy",
  "llvm.hexagon.vmemset",
  "llvm.loongarch.asrtgt.d",
  "llvm.loongarch.asrtle.d",
  "llvm.loongarch.break",
  "llvm.loongarch.cacop.d",
  "llvm.loongarch.cacop.w",
  "llvm.loongarch.cpucfg",
  "llvm.loongarch.crc.w.b.w",
  "llvm.loongarch.crc.w.d.w",
  "llvm.loongarch.crc.w.h.w",
  "llvm.loongarch.crc.w.w.w",
  "llvm.loongarch.crcc.w.b.w",
  "llvm.loongarch.crcc.w.d.w",
  "llvm.loongarch.crcc.w.h.w",
  "llvm.loongarch.crcc.w.w.w",
  "llvm.loongarch.csrrd.d",
  "llvm.loongarch.csrrd.w",
  "llvm.loongarch.csrwr.d",
  "llvm.loongarch.csrwr.w",
  "llvm.loongarch.csrxchg.d",
  "llvm.loongarch.csrxchg.w",
  "llvm.loongarch.dbar",
  "llvm.loongarch.ibar",
  "llvm.loongarch.iocsrrd.b",
  "llvm.loongarch.iocsrrd.d",
  "llvm.loongarch.iocsrrd.h",
  "llvm.loongarch.iocsrrd.w",
  "llvm.loongarch.iocsrwr.b",
  "llvm.loongarch.iocsrwr.d",
  "llvm.loongarch.iocsrwr.h",
  "llvm.loongarch.iocsrwr.w",
  "llvm.loongarch.lddir.d",
  "llvm.loongarch.ldpte.d",
  "llvm.loongarch.masked.atomicrmw.add.i32",
  "llvm.loongarch.masked.atomicrmw.add.i64",
  "llvm.loongarch.masked.atomicrmw.max.i64",
  "llvm.loongarch.masked.atomicrmw.min.i64",
  "llvm.loongarch.masked.atomicrmw.nand.i32",
  "llvm.loongarch.masked.atomicrmw.nand.i64",
  "llvm.loongarch.masked.atomicrmw.sub.i32",
  "llvm.loongarch.masked.atomicrmw.sub.i64",
  "llvm.loongarch.masked.atomicrmw.umax.i32",
  "llvm.loongarch.masked.atomicrmw.umax.i64",
  "llvm.loongarch.masked.atomicrmw.umin.i32",
  "llvm.loongarch.masked.atomicrmw.umin.i64",
  "llvm.loongarch.masked.atomicrmw.xchg.i32",
  "llvm.loongarch.masked.atomicrmw.xchg.i64",
  "llvm.loongarch.masked.cmpxchg.i64",
  "llvm.loongarch.movfcsr2gr",
  "llvm.loongarch.movgr2fcsr",
  "llvm.loongarch.syscall",
  "llvm.mips.absq.s.ph",
  "llvm.mips.absq.s.qb",
  "llvm.mips.absq.s.w",
  "llvm.mips.add.a.b",
  "llvm.mips.add.a.d",
  "llvm.mips.add.a.h",
  "llvm.mips.add.a.w",
  "llvm.mips.addq.ph",
  "llvm.mips.addq.s.ph",
  "llvm.mips.addq.s.w",
  "llvm.mips.addqh.ph",
  "llvm.mips.addqh.r.ph",
  "llvm.mips.addqh.r.w",
  "llvm.mips.addqh.w",
  "llvm.mips.adds.a.b",
  "llvm.mips.adds.a.d",
  "llvm.mips.adds.a.h",
  "llvm.mips.adds.a.w",
  "llvm.mips.adds.s.b",
  "llvm.mips.adds.s.d",
  "llvm.mips.adds.s.h",
  "llvm.mips.adds.s.w",
  "llvm.mips.adds.u.b",
  "llvm.mips.adds.u.d",
  "llvm.mips.adds.u.h",
  "llvm.mips.adds.u.w",
  "llvm.mips.addsc",
  "llvm.mips.addu.ph",
  "llvm.mips.addu.qb",
  "llvm.mips.addu.s.ph",
  "llvm.mips.addu.s.qb",
  "llvm.mips.adduh.qb",
  "llvm.mips.adduh.r.qb",
  "llvm.mips.addv.b",
  "llvm.mips.addv.d",
  "llvm.mips.addv.h",
  "llvm.mips.addv.w",
  "llvm.mips.addvi.b",
  "llvm.mips.addvi.d",
  "llvm.mips.addvi.h",
  "llvm.mips.addvi.w",
  "llvm.mips.addwc",
  "llvm.mips.and.v",
  "llvm.mips.andi.b",
  "llvm.mips.append",
  "llvm.mips.asub.s.b",
  "llvm.mips.asub.s.d",
  "llvm.mips.asub.s.h",
  "llvm.mips.asub.s.w",
  "llvm.mips.asub.u.b",
  "llvm.mips.asub.u.d",
  "llvm.mips.asub.u.h",
  "llvm.mips.asub.u.w",
  "llvm.mips.ave.s.b",
  "llvm.mips.ave.s.d",
  "llvm.mips.ave.s.h",
  "llvm.mips.ave.s.w",
  "llvm.mips.ave.u.b",
  "llvm.mips.ave.u.d",
  "llvm.mips.ave.u.h",
  "llvm.mips.ave.u.w",
  "llvm.mips.aver.s.b",
  "llvm.mips.aver.s.d",
  "llvm.mips.aver.s.h",
  "llvm.mips.aver.s.w",
  "llvm.mips.aver.u.b",
  "llvm.mips.aver.u.d",
  "llvm.mips.aver.u.h",
  "llvm.mips.aver.u.w",
  "llvm.mips.balign",
  "llvm.mips.bclr.b",
  "llvm.mips.bclr.d",
  "llvm.mips.bclr.h",
  "llvm.mips.bclr.w",
  "llvm.mips.bclri.b",
  "llvm.mips.bclri.d",
  "llvm.mips.bclri.h",
  "llvm.mips.bclri.w",
  "llvm.mips.binsl.b",
  "llvm.mips.binsl.d",
  "llvm.mips.binsl.h",
  "llvm.mips.binsl.w",
  "llvm.mips.binsli.b",
  "llvm.mips.binsli.d",
  "llvm.mips.binsli.h",
  "llvm.mips.binsli.w",
  "llvm.mips.binsr.b",
  "llvm.mips.binsr.d",
  "llvm.mips.binsr.h",
  "llvm.mips.binsr.w",
  "llvm.mips.binsri.b",
  "llvm.mips.binsri.d",
  "llvm.mips.binsri.h",
  "llvm.mips.binsri.w",
  "llvm.mips.bitrev",
  "llvm.mips.bmnz.v",
  "llvm.mips.bmnzi.b",
  "llvm.mips.bmz.v",
  "llvm.mips.bmzi.b",
  "llvm.mips.bneg.b",
  "llvm.mips.bneg.d",
  "llvm.mips.bneg.h",
  "llvm.mips.bneg.w",
  "llvm.mips.bnegi.b",
  "llvm.mips.bnegi.d",
  "llvm.mips.bnegi.h",
  "llvm.mips.bnegi.w",
  "llvm.mips.bnz.b",
  "llvm.mips.bnz.d",
  "llvm.mips.bnz.h",
  "llvm.mips.bnz.v",
  "llvm.mips.bnz.w",
  "llvm.mips.bposge32",
  "llvm.mips.bsel.v",
  "llvm.mips.bseli.b",
  "llvm.mips.bset.b",
  "llvm.mips.bset.d",
  "llvm.mips.bset.h",
  "llvm.mips.bset.w",
  "llvm.mips.bseti.b",
  "llvm.mips.bseti.d",
  "llvm.mips.bseti.h",
  "llvm.mips.bseti.w",
  "llvm.mips.bz.b",
  "llvm.mips.bz.d",
  "llvm.mips.bz.h",
  "llvm.mips.bz.v",
  "llvm.mips.bz.w",
  "llvm.mips.ceq.b",
  "llvm.mips.ceq.d",
  "llvm.mips.ceq.h",
  "llvm.mips.ceq.w",
  "llvm.mips.ceqi.b",
  "llvm.mips.ceqi.d",
  "llvm.mips.ceqi.h",
  "llvm.mips.ceqi.w",
  "llvm.mips.cfcmsa",
  "llvm.mips.cle.s.b",
  "llvm.mips.cle.s.d",
  "llvm.mips.cle.s.h",
  "llvm.mips.cle.s.w",
  "llvm.mips.cle.u.b",
  "llvm.mips.cle.u.d",
  "llvm.mips.cle.u.h",
  "llvm.mips.cle.u.w",
  "llvm.mips.clei.s.b",
  "llvm.mips.clei.s.d",
  "llvm.mips.clei.s.h",
  "llvm.mips.clei.s.w",
  "llvm.mips.clei.u.b",
  "llvm.mips.clei.u.d",
  "llvm.mips.clei.u.h",
  "llvm.mips.clei.u.w",
  "llvm.mips.clt.s.b",
  "llvm.mips.clt.s.d",
  "llvm.mips.clt.s.h",
  "llvm.mips.clt.s.w",
  "llvm.mips.clt.u.b",
  "llvm.mips.clt.u.d",
  "llvm.mips.clt.u.h",
  "llvm.mips.clt.u.w",
  "llvm.mips.clti.s.b",
  "llvm.mips.clti.s.d",
  "llvm.mips.clti.s.h",
  "llvm.mips.clti.s.w",
  "llvm.mips.clti.u.b",
  "llvm.mips.clti.u.d",
  "llvm.mips.clti.u.h",
  "llvm.mips.clti.u.w",
  "llvm.mips.cmp.eq.ph",
  "llvm.mips.cmp.le.ph",
  "llvm.mips.cmp.lt.ph",
  "llvm.mips.cmpgdu.eq.qb",
  "llvm.mips.cmpgdu.le.qb",
  "llvm.mips.cmpgdu.lt.qb",
  "llvm.mips.cmpgu.eq.qb",
  "llvm.mips.cmpgu.le.qb",
  "llvm.mips.cmpgu.lt.qb",
  "llvm.mips.cmpu.eq.qb",
  "llvm.mips.cmpu.le.qb",
  "llvm.mips.cmpu.lt.qb",
  "llvm.mips.copy.s.b",
  "llvm.mips.copy.s.d",
  "llvm.mips.copy.s.h",
  "llvm.mips.copy.s.w",
  "llvm.mips.copy.u.b",
  "llvm.mips.copy.u.d",
  "llvm.mips.copy.u.h",
  "llvm.mips.copy.u.w",
  "llvm.mips.ctcmsa",
  "llvm.mips.div.s.b",
  "llvm.mips.div.s.d",
  "llvm.mips.div.s.h",
  "llvm.mips.div.s.w",
  "llvm.mips.div.u.b",
  "llvm.mips.div.u.d",
  "llvm.mips.div.u.h",
  "llvm.mips.div.u.w",
  "llvm.mips.dlsa",
  "llvm.mips.dotp.s.d",
  "llvm.mips.dotp.s.h",
  "llvm.mips.dotp.s.w",
  "llvm.mips.dotp.u.d",
  "llvm.mips.dotp.u.h",
  "llvm.mips.dotp.u.w",
  "llvm.mips.dpa.w.ph",
  "llvm.mips.dpadd.s.d",
  "llvm.mips.dpadd.s.h",
  "llvm.mips.dpadd.s.w",
  "llvm.mips.dpadd.u.d",
  "llvm.mips.dpadd.u.h",
  "llvm.mips.dpadd.u.w",
  "llvm.mips.dpaq.s.w.ph",
  "llvm.mips.dpaq.sa.l.w",
  "llvm.mips.dpaqx.s.w.ph",
  "llvm.mips.dpaqx.sa.w.ph",
  "llvm.mips.dpau.h.qbl",
  "llvm.mips.dpau.h.qbr",
  "llvm.mips.dpax.w.ph",
  "llvm.mips.dps.w.ph",
  "llvm.mips.dpsq.s.w.ph",
  "llvm.mips.dpsq.sa.l.w",
  "llvm.mips.dpsqx.s.w.ph",
  "llvm.mips.dpsqx.sa.w.ph",
  "llvm.mips.dpsu.h.qbl",
  "llvm.mips.dpsu.h.qbr",
  "llvm.mips.dpsub.s.d",
  "llvm.mips.dpsub.s.h",
  "llvm.mips.dpsub.s.w",
  "llvm.mips.dpsub.u.d",
  "llvm.mips.dpsub.u.h",
  "llvm.mips.dpsub.u.w",
  "llvm.mips.dpsx.w.ph",
  "llvm.mips.extp",
  "llvm.mips.extpdp",
  "llvm.mips.extr.r.w",
  "llvm.mips.extr.rs.w",
  "llvm.mips.extr.s.h",
  "llvm.mips.extr.w",
  "llvm.mips.fadd.d",
  "llvm.mips.fadd.w",
  "llvm.mips.fcaf.d",
  "llvm.mips.fcaf.w",
  "llvm.mips.fceq.d",
  "llvm.mips.fceq.w",
  "llvm.mips.fclass.d",
  "llvm.mips.fclass.w",
  "llvm.mips.fcle.d",
  "llvm.mips.fcle.w",
  "llvm.mips.fclt.d",
  "llvm.mips.fclt.w",
  "llvm.mips.fcne.d",
  "llvm.mips.fcne.w",
  "llvm.mips.fcor.d",
  "llvm.mips.fcor.w",
  "llvm.mips.fcueq.d",
  "llvm.mips.fcueq.w",
  "llvm.mips.fcule.d",
  "llvm.mips.fcule.w",
  "llvm.mips.fcult.d",
  "llvm.mips.fcult.w",
  "llvm.mips.fcun.d",
  "llvm.mips.fcun.w",
  "llvm.mips.fcune.d",
  "llvm.mips.fcune.w",
  "llvm.mips.fdiv.d",
  "llvm.mips.fdiv.w",
  "llvm.mips.fexdo.h",
  "llvm.mips.fexdo.w",
  "llvm.mips.fexp2.d",
  "llvm.mips.fexp2.w",
  "llvm.mips.fexupl.d",
  "llvm.mips.fexupl.w",
  "llvm.mips.fexupr.d",
  "llvm.mips.fexupr.w",
  "llvm.mips.ffint.s.d",
  "llvm.mips.ffint.s.w",
  "llvm.mips.ffint.u.d",
  "llvm.mips.ffint.u.w",
  "llvm.mips.ffql.d",
  "llvm.mips.ffql.w",
  "llvm.mips.ffqr.d",
  "llvm.mips.ffqr.w",
  "llvm.mips.fill.b",
  "llvm.mips.fill.d",
  "llvm.mips.fill.h",
  "llvm.mips.fill.w",
  "llvm.mips.flog2.d",
  "llvm.mips.flog2.w",
  "llvm.mips.fmadd.d",
  "llvm.mips.fmadd.w",
  "llvm.mips.fmax.a.d",
  "llvm.mips.fmax.a.w",
  "llvm.mips.fmax.d",
  "llvm.mips.fmax.w",
  "llvm.mips.fmin.a.d",
  "llvm.mips.fmin.a.w",
  "llvm.mips.fmin.d",
  "llvm.mips.fmin.w",
  "llvm.mips.fmsub.d",
  "llvm.mips.fmsub.w",
  "llvm.mips.fmul.d",
  "llvm.mips.fmul.w",
  "llvm.mips.frcp.d",
  "llvm.mips.frcp.w",
  "llvm.mips.frint.d",
  "llvm.mips.frint.w",
  "llvm.mips.frsqrt.d",
  "llvm.mips.frsqrt.w",
  "llvm.mips.fsaf.d",
  "llvm.mips.fsaf.w",
  "llvm.mips.fseq.d",
  "llvm.mips.fseq.w",
  "llvm.mips.fsle.d",
  "llvm.mips.fsle.w",
  "llvm.mips.fslt.d",
  "llvm.mips.fslt.w",
  "llvm.mips.fsne.d",
  "llvm.mips.fsne.w",
  "llvm.mips.fsor.d",
  "llvm.mips.fsor.w",
  "llvm.mips.fsqrt.d",
  "llvm.mips.fsqrt.w",
  "llvm.mips.fsub.d",
  "llvm.mips.fsub.w",
  "llvm.mips.fsueq.d",
  "llvm.mips.fsueq.w",
  "llvm.mips.fsule.d",
  "llvm.mips.fsule.w",
  "llvm.mips.fsult.d",
  "llvm.mips.fsult.w",
  "llvm.mips.fsun.d",
  "llvm.mips.fsun.w",
  "llvm.mips.fsune.d",
  "llvm.mips.fsune.w",
  "llvm.mips.ftint.s.d",
  "llvm.mips.ftint.s.w",
  "llvm.mips.ftint.u.d",
  "llvm.mips.ftint.u.w",
  "llvm.mips.ftq.h",
  "llvm.mips.ftq.w",
  "llvm.mips.ftrunc.s.d",
  "llvm.mips.ftrunc.s.w",
  "llvm.mips.ftrunc.u.d",
  "llvm.mips.ftrunc.u.w",
  "llvm.mips.hadd.s.d",
  "llvm.mips.hadd.s.h",
  "llvm.mips.hadd.s.w",
  "llvm.mips.hadd.u.d",
  "llvm.mips.hadd.u.h",
  "llvm.mips.hadd.u.w",
  "llvm.mips.hsub.s.d",
  "llvm.mips.hsub.s.h",
  "llvm.mips.hsub.s.w",
  "llvm.mips.hsub.u.d",
  "llvm.mips.hsub.u.h",
  "llvm.mips.hsub.u.w",
  "llvm.mips.ilvev.b",
  "llvm.mips.ilvev.d",
  "llvm.mips.ilvev.h",
  "llvm.mips.ilvev.w",
  "llvm.mips.ilvl.b",
  "llvm.mips.ilvl.d",
  "llvm.mips.ilvl.h",
  "llvm.mips.ilvl.w",
  "llvm.mips.ilvod.b",
  "llvm.mips.ilvod.d",
  "llvm.mips.ilvod.h",
  "llvm.mips.ilvod.w",
  "llvm.mips.ilvr.b",
  "llvm.mips.ilvr.d",
  "llvm.mips.ilvr.h",
  "llvm.mips.ilvr.w",
  "llvm.mips.insert.b",
  "llvm.mips.insert.d",
  "llvm.mips.insert.h",
  "llvm.mips.insert.w",
  "llvm.mips.insv",
  "llvm.mips.insve.b",
  "llvm.mips.insve.d",
  "llvm.mips.insve.h",
  "llvm.mips.insve.w",
  "llvm.mips.lbux",
  "llvm.mips.ld.b",
  "llvm.mips.ld.d",
  "llvm.mips.ld.h",
  "llvm.mips.ld.w",
  "llvm.mips.ldi.b",
  "llvm.mips.ldi.d",
  "llvm.mips.ldi.h",
  "llvm.mips.ldi.w",
  "llvm.mips.ldr.d",
  "llvm.mips.ldr.w",
  "llvm.mips.lhx",
  "llvm.mips.lsa",
  "llvm.mips.lwx",
  "llvm.mips.madd",
  "llvm.mips.madd.q.h",
  "llvm.mips.madd.q.w",
  "llvm.mips.maddr.q.h",
  "llvm.mips.maddr.q.w",
  "llvm.mips.maddu",
  "llvm.mips.maddv.b",
  "llvm.mips.maddv.d",
  "llvm.mips.maddv.h",
  "llvm.mips.maddv.w",
  "llvm.mips.maq.s.w.phl",
  "llvm.mips.maq.s.w.phr",
  "llvm.mips.maq.sa.w.phl",
  "llvm.mips.maq.sa.w.phr",
  "llvm.mips.max.a.b",
  "llvm.mips.max.a.d",
  "llvm.mips.max.a.h",
  "llvm.mips.max.a.w",
  "llvm.mips.max.s.b",
  "llvm.mips.max.s.d",
  "llvm.mips.max.s.h",
  "llvm.mips.max.s.w",
  "llvm.mips.max.u.b",
  "llvm.mips.max.u.d",
  "llvm.mips.max.u.h",
  "llvm.mips.max.u.w",
  "llvm.mips.maxi.s.b",
  "llvm.mips.maxi.s.d",
  "llvm.mips.maxi.s.h",
  "llvm.mips.maxi.s.w",
  "llvm.mips.maxi.u.b",
  "llvm.mips.maxi.u.d",
  "llvm.mips.maxi.u.h",
  "llvm.mips.maxi.u.w",
  "llvm.mips.min.a.b",
  "llvm.mips.min.a.d",
  "llvm.mips.min.a.h",
  "llvm.mips.min.a.w",
  "llvm.mips.min.s.b",
  "llvm.mips.min.s.d",
  "llvm.mips.min.s.h",
  "llvm.mips.min.s.w",
  "llvm.mips.min.u.b",
  "llvm.mips.min.u.d",
  "llvm.mips.min.u.h",
  "llvm.mips.min.u.w",
  "llvm.mips.mini.s.b",
  "llvm.mips.mini.s.d",
  "llvm.mips.mini.s.h",
  "llvm.mips.mini.s.w",
  "llvm.mips.mini.u.b",
  "llvm.mips.mini.u.d",
  "llvm.mips.mini.u.h",
  "llvm.mips.mini.u.w",
  "llvm.mips.mod.s.b",
  "llvm.mips.mod.s.d",
  "llvm.mips.mod.s.h",
  "llvm.mips.mod.s.w",
  "llvm.mips.mod.u.b",
  "llvm.mips.mod.u.d",
  "llvm.mips.mod.u.h",
  "llvm.mips.mod.u.w",
  "llvm.mips.modsub",
  "llvm.mips.move.v",
  "llvm.mips.msub",
  "llvm.mips.msub.q.h",
  "llvm.mips.msub.q.w",
  "llvm.mips.msubr.q.h",
  "llvm.mips.msubr.q.w",
  "llvm.mips.msubu",
  "llvm.mips.msubv.b",
  "llvm.mips.msubv.d",
  "llvm.mips.msubv.h",
  "llvm.mips.msubv.w",
  "llvm.mips.mthlip",
  "llvm.mips.mul.ph",
  "llvm.mips.mul.q.h",
  "llvm.mips.mul.q.w",
  "llvm.mips.mul.s.ph",
  "llvm.mips.muleq.s.w.phl",
  "llvm.mips.muleq.s.w.phr",
  "llvm.mips.muleu.s.ph.qbl",
  "llvm.mips.muleu.s.ph.qbr",
  "llvm.mips.mulq.rs.ph",
  "llvm.mips.mulq.rs.w",
  "llvm.mips.mulq.s.ph",
  "llvm.mips.mulq.s.w",
  "llvm.mips.mulr.q.h",
  "llvm.mips.mulr.q.w",
  "llvm.mips.mulsa.w.ph",
  "llvm.mips.mulsaq.s.w.ph",
  "llvm.mips.mult",
  "llvm.mips.multu",
  "llvm.mips.mulv.b",
  "llvm.mips.mulv.d",
  "llvm.mips.mulv.h",
  "llvm.mips.mulv.w",
  "llvm.mips.nloc.b",
  "llvm.mips.nloc.d",
  "llvm.mips.nloc.h",
  "llvm.mips.nloc.w",
  "llvm.mips.nlzc.b",
  "llvm.mips.nlzc.d",
  "llvm.mips.nlzc.h",
  "llvm.mips.nlzc.w",
  "llvm.mips.nor.v",
  "llvm.mips.nori.b",
  "llvm.mips.or.v",
  "llvm.mips.ori.b",
  "llvm.mips.packrl.ph",
  "llvm.mips.pckev.b",
  "llvm.mips.pckev.d",
  "llvm.mips.pckev.h",
  "llvm.mips.pckev.w",
  "llvm.mips.pckod.b",
  "llvm.mips.pckod.d",
  "llvm.mips.pckod.h",
  "llvm.mips.pckod.w",
  "llvm.mips.pcnt.b",
  "llvm.mips.pcnt.d",
  "llvm.mips.pcnt.h",
  "llvm.mips.pcnt.w",
  "llvm.mips.pick.ph",
  "llvm.mips.pick.qb",
  "llvm.mips.preceq.w.phl",
  "llvm.mips.preceq.w.phr",
  "llvm.mips.precequ.ph.qbl",
  "llvm.mips.precequ.ph.qbla",
  "llvm.mips.precequ.ph.qbr",
  "llvm.mips.precequ.ph.qbra",
  "llvm.mips.preceu.ph.qbl",
  "llvm.mips.preceu.ph.qbla",
  "llvm.mips.preceu.ph.qbr",
  "llvm.mips.preceu.ph.qbra",
  "llvm.mips.precr.qb.ph",
  "llvm.mips.precr.sra.ph.w",
  "llvm.mips.precr.sra.r.ph.w",
  "llvm.mips.precrq.ph.w",
  "llvm.mips.precrq.qb.ph",
  "llvm.mips.precrq.rs.ph.w",
  "llvm.mips.precrqu.s.qb.ph",
  "llvm.mips.prepend",
  "llvm.mips.raddu.w.qb",
  "llvm.mips.rddsp",
  "llvm.mips.repl.ph",
  "llvm.mips.repl.qb",
  "llvm.mips.sat.s.b",
  "llvm.mips.sat.s.d",
  "llvm.mips.sat.s.h",
  "llvm.mips.sat.s.w",
  "llvm.mips.sat.u.b",
  "llvm.mips.sat.u.d",
  "llvm.mips.sat.u.h",
  "llvm.mips.sat.u.w",
  "llvm.mips.shf.b",
  "llvm.mips.shf.h",
  "llvm.mips.shf.w",
  "llvm.mips.shilo",
  "llvm.mips.shll.ph",
  "llvm.mips.shll.qb",
  "llvm.mips.shll.s.ph",
  "llvm.mips.shll.s.w",
  "llvm.mips.shra.ph",
  "llvm.mips.shra.qb",
  "llvm.mips.shra.r.ph",
  "llvm.mips.shra.r.qb",
  "llvm.mips.shra.r.w",
  "llvm.mips.shrl.ph",
  "llvm.mips.shrl.qb",
  "llvm.mips.sld.b",
  "llvm.mips.sld.d",
  "llvm.mips.sld.h",
  "llvm.mips.sld.w",
  "llvm.mips.sldi.b",
  "llvm.mips.sldi.d",
  "llvm.mips.sldi.h",
  "llvm.mips.sldi.w",
  "llvm.mips.sll.b",
  "llvm.mips.sll.d",
  "llvm.mips.sll.h",
  "llvm.mips.sll.w",
  "llvm.mips.slli.b",
  "llvm.mips.slli.d",
  "llvm.mips.slli.h",
  "llvm.mips.slli.w",
  "llvm.mips.splat.b",
  "llvm.mips.splat.d",
  "llvm.mips.splat.h",
  "llvm.mips.splat.w",
  "llvm.mips.splati.b",
  "llvm.mips.splati.d",
  "llvm.mips.splati.h",
  "llvm.mips.splati.w",
  "llvm.mips.sra.b",
  "llvm.mips.sra.d",
  "llvm.mips.sra.h",
  "llvm.mips.sra.w",
  "llvm.mips.srai.b",
  "llvm.mips.srai.d",
  "llvm.mips.srai.h",
  "llvm.mips.srai.w",
  "llvm.mips.srar.b",
  "llvm.mips.srar.d",
  "llvm.mips.srar.h",
  "llvm.mips.srar.w",
  "llvm.mips.srari.b",
  "llvm.mips.srari.d",
  "llvm.mips.srari.h",
  "llvm.mips.srari.w",
  "llvm.mips.srl.b",
  "llvm.mips.srl.d",
  "llvm.mips.srl.h",
  "llvm.mips.srl.w",
  "llvm.mips.srli.b",
  "llvm.mips.srli.d",
  "llvm.mips.srli.h",
  "llvm.mips.srli.w",
  "llvm.mips.srlr.b",
  "llvm.mips.srlr.d",
  "llvm.mips.srlr.h",
  "llvm.mips.srlr.w",
  "llvm.mips.srlri.b",
  "llvm.mips.srlri.d",
  "llvm.mips.srlri.h",
  "llvm.mips.srlri.w",
  "llvm.mips.st.b",
  "llvm.mips.st.d",
  "llvm.mips.st.h",
  "llvm.mips.st.w",
  "llvm.mips.str.d",
  "llvm.mips.str.w",
  "llvm.mips.subq.ph",
  "llvm.mips.subq.s.ph",
  "llvm.mips.subq.s.w",
  "llvm.mips.subqh.ph",
  "llvm.mips.subqh.r.ph",
  "llvm.mips.subqh.r.w",
  "llvm.mips.subqh.w",
  "llvm.mips.subs.s.b",
  "llvm.mips.subs.s.d",
  "llvm.mips.subs.s.h",
  "llvm.mips.subs.s.w",
  "llvm.mips.subs.u.b",
  "llvm.mips.subs.u.d",
  "llvm.mips.subs.u.h",
  "llvm.mips.subs.u.w",
  "llvm.mips.subsus.u.b",
  "llvm.mips.subsus.u.d",
  "llvm.mips.subsus.u.h",
  "llvm.mips.subsus.u.w",
  "llvm.mips.subsuu.s.b",
  "llvm.mips.subsuu.s.d",
  "llvm.mips.subsuu.s.h",
  "llvm.mips.subsuu.s.w",
  "llvm.mips.subu.ph",
  "llvm.mips.subu.qb",
  "llvm.mips.subu.s.ph",
  "llvm.mips.subu.s.qb",
  "llvm.mips.subuh.qb",
  "llvm.mips.subuh.r.qb",
  "llvm.mips.subv.b",
  "llvm.mips.subv.d",
  "llvm.mips.subv.h",
  "llvm.mips.subv.w",
  "llvm.mips.subvi.b",
  "llvm.mips.subvi.d",
  "llvm.mips.subvi.h",
  "llvm.mips.subvi.w",
  "llvm.mips.vshf.b",
  "llvm.mips.vshf.d",
  "llvm.mips.vshf.h",
  "llvm.mips.vshf.w",
  "llvm.mips.wrdsp",
  "llvm.mips.xor.v",
  "llvm.mips.xori.b",
  "llvm.nvvm.abs.bf16",
  "llvm.nvvm.abs.bf16x2",
  "llvm.nvvm.add.rm.d",
  "llvm.nvvm.add.rm.f",
  "llvm.nvvm.add.rm.ftz.f",
  "llvm.nvvm.add.rn.d",
  "llvm.nvvm.add.rn.f",
  "llvm.nvvm.add.rn.ftz.f",
  "llvm.nvvm.add.rp.d",
  "llvm.nvvm.add.rp.f",
  "llvm.nvvm.add.rp.ftz.f",
  "llvm.nvvm.add.rz.d",
  "llvm.nvvm.add.rz.f",
  "llvm.nvvm.add.rz.ftz.f",
  "llvm.nvvm.atomic.add.gen.f.cta",
  "llvm.nvvm.atomic.add.gen.f.sys",
  "llvm.nvvm.atomic.add.gen.i.cta",
  "llvm.nvvm.atomic.add.gen.i.sys",
  "llvm.nvvm.atomic.and.gen.i.cta",
  "llvm.nvvm.atomic.and.gen.i.sys",
  "llvm.nvvm.atomic.cas.gen.i.cta",
  "llvm.nvvm.atomic.cas.gen.i.sys",
  "llvm.nvvm.atomic.dec.gen.i.cta",
  "llvm.nvvm.atomic.dec.gen.i.sys",
  "llvm.nvvm.atomic.exch.gen.i.cta",
  "llvm.nvvm.atomic.exch.gen.i.sys",
  "llvm.nvvm.atomic.inc.gen.i.cta",
  "llvm.nvvm.atomic.inc.gen.i.sys",
  "llvm.nvvm.atomic.load.dec.32",
  "llvm.nvvm.atomic.load.inc.32",
  "llvm.nvvm.atomic.max.gen.i.cta",
  "llvm.nvvm.atomic.max.gen.i.sys",
  "llvm.nvvm.atomic.min.gen.i.cta",
  "llvm.nvvm.atomic.min.gen.i.sys",
  "llvm.nvvm.atomic.or.gen.i.cta",
  "llvm.nvvm.atomic.or.gen.i.sys",
  "llvm.nvvm.atomic.xor.gen.i.cta",
  "llvm.nvvm.atomic.xor.gen.i.sys",
  "llvm.nvvm.bar.sync",
  "llvm.nvvm.bar.warp.sync",
  "llvm.nvvm.barrier",
  "llvm.nvvm.barrier.cluster.arrive",
  "llvm.nvvm.barrier.cluster.arrive.relaxed",
  "llvm.nvvm.barrier.cluster.wait",
  "llvm.nvvm.barrier.n",
  "llvm.nvvm.barrier.sync",
  "llvm.nvvm.barrier.sync.cnt",
  "llvm.nvvm.barrier0",
  "llvm.nvvm.barrier0.and",
  "llvm.nvvm.barrier0.or",
  "llvm.nvvm.barrier0.popc",
  "llvm.nvvm.bf2h.rn",
  "llvm.nvvm.bf2h.rn.ftz",
  "llvm.nvvm.bitcast.d2ll",
  "llvm.nvvm.bitcast.f2i",
  "llvm.nvvm.bitcast.i2f",
  "llvm.nvvm.bitcast.ll2d",
  "llvm.nvvm.ceil.d",
  "llvm.nvvm.ceil.f",
  "llvm.nvvm.ceil.ftz.f",
  "llvm.nvvm.compiler.error",
  "llvm.nvvm.compiler.warn",
  "llvm.nvvm.cos.approx.f",
  "llvm.nvvm.cos.approx.ftz.f",
  "llvm.nvvm.cp.async.ca.shared.global.16",
  "llvm.nvvm.cp.async.ca.shared.global.16.s",
  "llvm.nvvm.cp.async.ca.shared.global.4",
  "llvm.nvvm.cp.async.ca.shared.global.4.s",
  "llvm.nvvm.cp.async.ca.shared.global.8",
  "llvm.nvvm.cp.async.ca.shared.global.8.s",
  "llvm.nvvm.cp.async.cg.shared.global.16",
  "llvm.nvvm.cp.async.cg.shared.global.16.s",
  "llvm.nvvm.cp.async.commit.group",
  "llvm.nvvm.cp.async.mbarrier.arrive",
  "llvm.nvvm.cp.async.mbarrier.arrive.noinc",
  "llvm.nvvm.cp.async.mbarrier.arrive.noinc.shared",
  "llvm.nvvm.cp.async.mbarrier.arrive.shared",
  "llvm.nvvm.cp.async.wait.all",
  "llvm.nvvm.cp.async.wait.group",
  "llvm.nvvm.d2f.rm",
  "llvm.nvvm.d2f.rm.ftz",
  "llvm.nvvm.d2f.rn",
  "llvm.nvvm.d2f.rn.ftz",
  "llvm.nvvm.d2f.rp",
  "llvm.nvvm.d2f.rp.ftz",
  "llvm.nvvm.d2f.rz",
  "llvm.nvvm.d2f.rz.ftz",
  "llvm.nvvm.d2i.hi",
  "llvm.nvvm.d2i.lo",
  "llvm.nvvm.d2i.rm",
  "llvm.nvvm.d2i.rn",
  "llvm.nvvm.d2i.rp",
  "llvm.nvvm.d2i.rz",
  "llvm.nvvm.d2ll.rm",
  "llvm.nvvm.d2ll.rn",
  "llvm.nvvm.d2ll.rp",
  "llvm.nvvm.d2ll.rz",
  "llvm.nvvm.d2ui.rm",
  "llvm.nvvm.d2ui.rn",
  "llvm.nvvm.d2ui.rp",
  "llvm.nvvm.d2ui.rz",
  "llvm.nvvm.d2ull.rm",
  "llvm.nvvm.d2ull.rn",
  "llvm.nvvm.d2ull.rp",
  "llvm.nvvm.d2ull.rz",
  "llvm.nvvm.div.approx.f",
  "llvm.nvvm.div.approx.ftz.f",
  "llvm.nvvm.div.rm.d",
  "llvm.nvvm.div.rm.f",
  "llvm.nvvm.div.rm.ftz.f",
  "llvm.nvvm.div.rn.d",
  "llvm.nvvm.div.rn.f",
  "llvm.nvvm.div.rn.ftz.f",
  "llvm.nvvm.div.rp.d",
  "llvm.nvvm.div.rp.f",
  "llvm.nvvm.div.rp.ftz.f",
  "llvm.nvvm.div.rz.d",
  "llvm.nvvm.div.rz.f",
  "llvm.nvvm.div.rz.ftz.f",
  "llvm.nvvm.ex2.approx.d",
  "llvm.nvvm.ex2.approx.f",
  "llvm.nvvm.ex2.approx.f16",
  "llvm.nvvm.ex2.approx.f16x2",
  "llvm.nvvm.ex2.approx.ftz.f",
  "llvm.nvvm.f2bf16.rn",
  "llvm.nvvm.f2bf16.rn.relu",
  "llvm.nvvm.f2bf16.rz",
  "llvm.nvvm.f2bf16.rz.relu",
  "llvm.nvvm.f2h.rn",
  "llvm.nvvm.f2h.rn.ftz",
  "llvm.nvvm.f2i.rm",
  "llvm.nvvm.f2i.rm.ftz",
  "llvm.nvvm.f2i.rn",
  "llvm.nvvm.f2i.rn.ftz",
  "llvm.nvvm.f2i.rp",
  "llvm.nvvm.f2i.rp.ftz",
  "llvm.nvvm.f2i.rz",
  "llvm.nvvm.f2i.rz.ftz",
  "llvm.nvvm.f2ll.rm",
  "llvm.nvvm.f2ll.rm.ftz",
  "llvm.nvvm.f2ll.rn",
  "llvm.nvvm.f2ll.rn.ftz",
  "llvm.nvvm.f2ll.rp",
  "llvm.nvvm.f2ll.rp.ftz",
  "llvm.nvvm.f2ll.rz",
  "llvm.nvvm.f2ll.rz.ftz",
  "llvm.nvvm.f2tf32.rna",
  "llvm.nvvm.f2ui.rm",
  "llvm.nvvm.f2ui.rm.ftz",
  "llvm.nvvm.f2ui.rn",
  "llvm.nvvm.f2ui.rn.ftz",
  "llvm.nvvm.f2ui.rp",
  "llvm.nvvm.f2ui.rp.ftz",
  "llvm.nvvm.f2ui.rz",
  "llvm.nvvm.f2ui.rz.ftz",
  "llvm.nvvm.f2ull.rm",
  "llvm.nvvm.f2ull.rm.ftz",
  "llvm.nvvm.f2ull.rn",
  "llvm.nvvm.f2ull.rn.ftz",
  "llvm.nvvm.f2ull.rp",
  "llvm.nvvm.f2ull.rp.ftz",
  "llvm.nvvm.f2ull.rz",
  "llvm.nvvm.f2ull.rz.ftz",
  "llvm.nvvm.fabs.d",
  "llvm.nvvm.fabs.f",
  "llvm.nvvm.fabs.ftz.f",
  "llvm.nvvm.fence.sc.cluster",
  "llvm.nvvm.ff2bf16x2.rn",
  "llvm.nvvm.ff2bf16x2.rn.relu",
  "llvm.nvvm.ff2bf16x2.rz",
  "llvm.nvvm.ff2bf16x2.rz.relu",
  "llvm.nvvm.ff2f16x2.rn",
  "llvm.nvvm.ff2f16x2.rn.relu",
  "llvm.nvvm.ff2f16x2.rz",
  "llvm.nvvm.ff2f16x2.rz.relu",
  "llvm.nvvm.floor.d",
  "llvm.nvvm.floor.f",
  "llvm.nvvm.floor.ftz.f",
  "llvm.nvvm.fma.rm.d",
  "llvm.nvvm.fma.rm.f",
  "llvm.nvvm.fma.rm.ftz.f",
  "llvm.nvvm.fma.rn.bf16",
  "llvm.nvvm.fma.rn.bf16x2",
  "llvm.nvvm.fma.rn.d",
  "llvm.nvvm.fma.rn.f",
  "llvm.nvvm.fma.rn.f16",
  "llvm.nvvm.fma.rn.f16x2",
  "llvm.nvvm.fma.rn.ftz.bf16",
  "llvm.nvvm.fma.rn.ftz.bf16x2",
  "llvm.nvvm.fma.rn.ftz.f",
  "llvm.nvvm.fma.rn.ftz.f16",
  "llvm.nvvm.fma.rn.ftz.f16x2",
  "llvm.nvvm.fma.rn.ftz.relu.bf16",
  "llvm.nvvm.fma.rn.ftz.relu.bf16x2",
  "llvm.nvvm.fma.rn.ftz.relu.f16",
  "llvm.nvvm.fma.rn.ftz.relu.f16x2",
  "llvm.nvvm.fma.rn.ftz.sat.bf16",
  "llvm.nvvm.fma.rn.ftz.sat.bf16x2",
  "llvm.nvvm.fma.rn.ftz.sat.f16",
  "llvm.nvvm.fma.rn.ftz.sat.f16x2",
  "llvm.nvvm.fma.rn.relu.bf16",
  "llvm.nvvm.fma.rn.relu.bf16x2",
  "llvm.nvvm.fma.rn.relu.f16",
  "llvm.nvvm.fma.rn.relu.f16x2",
  "llvm.nvvm.fma.rn.sat.bf16",
  "llvm.nvvm.fma.rn.sat.bf16x2",
  "llvm.nvvm.fma.rn.sat.f16",
  "llvm.nvvm.fma.rn.sat.f16x2",
  "llvm.nvvm.fma.rp.d",
  "llvm.nvvm.fma.rp.f",
  "llvm.nvvm.fma.rp.ftz.f",
  "llvm.nvvm.fma.rz.d",
  "llvm.nvvm.fma.rz.f",
  "llvm.nvvm.fma.rz.ftz.f",
  "llvm.nvvm.fmax.bf16",
  "llvm.nvvm.fmax.bf16x2",
  "llvm.nvvm.fmax.d",
  "llvm.nvvm.fmax.f",
  "llvm.nvvm.fmax.f16",
  "llvm.nvvm.fmax.f16x2",
  "llvm.nvvm.fmax.ftz.bf16",
  "llvm.nvvm.fmax.ftz.bf16x2",
  "llvm.nvvm.fmax.ftz.f",
  "llvm.nvvm.fmax.ftz.f16",
  "llvm.nvvm.fmax.ftz.f16x2",
  "llvm.nvvm.fmax.ftz.nan.bf16",
  "llvm.nvvm.fmax.ftz.nan.bf16x2",
  "llvm.nvvm.fmax.ftz.nan.f",
  "llvm.nvvm.fmax.ftz.nan.f16",
  "llvm.nvvm.fmax.ftz.nan.f16x2",
  "llvm.nvvm.fmax.ftz.nan.xorsign.abs.bf16",
  "llvm.nvvm.fmax.ftz.nan.xorsign.abs.bf16x2",
  "llvm.nvvm.fmax.ftz.nan.xorsign.abs.f",
  "llvm.nvvm.fmax.ftz.nan.xorsign.abs.f16",
  "llvm.nvvm.fmax.ftz.nan.xorsign.abs.f16x2",
  "llvm.nvvm.fmax.ftz.xorsign.abs.bf16",
  "llvm.nvvm.fmax.ftz.xorsign.abs.bf16x2",
  "llvm.nvvm.fmax.ftz.xorsign.abs.f",
  "llvm.nvvm.fmax.ftz.xorsign.abs.f16",
  "llvm.nvvm.fmax.ftz.xorsign.abs.f16x2",
  "llvm.nvvm.fmax.nan.bf16",
  "llvm.nvvm.fmax.nan.bf16x2",
  "llvm.nvvm.fmax.nan.f",
  "llvm.nvvm.fmax.nan.f16",
  "llvm.nvvm.fmax.nan.f16x2",
  "llvm.nvvm.fmax.nan.xorsign.abs.bf16",
  "llvm.nvvm.fmax.nan.xorsign.abs.bf16x2",
  "llvm.nvvm.fmax.nan.xorsign.abs.f",
  "llvm.nvvm.fmax.nan.xorsign.abs.f16",
  "llvm.nvvm.fmax.nan.xorsign.abs.f16x2",
  "llvm.nvvm.fmax.xorsign.abs.bf16",
  "llvm.nvvm.fmax.xorsign.abs.bf16x2",
  "llvm.nvvm.fmax.xorsign.abs.f",
  "llvm.nvvm.fmax.xorsign.abs.f16",
  "llvm.nvvm.fmax.xorsign.abs.f16x2",
  "llvm.nvvm.fmin.bf16",
  "llvm.nvvm.fmin.bf16x2",
  "llvm.nvvm.fmin.d",
  "llvm.nvvm.fmin.f",
  "llvm.nvvm.fmin.f16",
  "llvm.nvvm.fmin.f16x2",
  "llvm.nvvm.fmin.ftz.bf16",
  "llvm.nvvm.fmin.ftz.bf16x2",
  "llvm.nvvm.fmin.ftz.f",
  "llvm.nvvm.fmin.ftz.f16",
  "llvm.nvvm.fmin.ftz.f16x2",
  "llvm.nvvm.fmin.ftz.nan.bf16",
  "llvm.nvvm.fmin.ftz.nan.bf16x2",
  "llvm.nvvm.fmin.ftz.nan.f",
  "llvm.nvvm.fmin.ftz.nan.f16",
  "llvm.nvvm.fmin.ftz.nan.f16x2",
  "llvm.nvvm.fmin.ftz.nan.xorsign.abs.bf16",
  "llvm.nvvm.fmin.ftz.nan.xorsign.abs.bf16x2",
  "llvm.nvvm.fmin.ftz.nan.xorsign.abs.f",
  "llvm.nvvm.fmin.ftz.nan.xorsign.abs.f16",
  "llvm.nvvm.fmin.ftz.nan.xorsign.abs.f16x2",
  "llvm.nvvm.fmin.ftz.xorsign.abs.bf16",
  "llvm.nvvm.fmin.ftz.xorsign.abs.bf16x2",
  "llvm.nvvm.fmin.ftz.xorsign.abs.f",
  "llvm.nvvm.fmin.ftz.xorsign.abs.f16",
  "llvm.nvvm.fmin.ftz.xorsign.abs.f16x2",
  "llvm.nvvm.fmin.nan.bf16",
  "llvm.nvvm.fmin.nan.bf16x2",
  "llvm.nvvm.fmin.nan.f",
  "llvm.nvvm.fmin.nan.f16",
  "llvm.nvvm.fmin.nan.f16x2",
  "llvm.nvvm.fmin.nan.xorsign.abs.bf16",
  "llvm.nvvm.fmin.nan.xorsign.abs.bf16x2",
  "llvm.nvvm.fmin.nan.xorsign.abs.f",
  "llvm.nvvm.fmin.nan.xorsign.abs.f16",
  "llvm.nvvm.fmin.nan.xorsign.abs.f16x2",
  "llvm.nvvm.fmin.xorsign.abs.bf16",
  "llvm.nvvm.fmin.xorsign.abs.bf16x2",
  "llvm.nvvm.fmin.xorsign.abs.f",
  "llvm.nvvm.fmin.xorsign.abs.f16",
  "llvm.nvvm.fmin.xorsign.abs.f16x2",
  "llvm.nvvm.fns",
  "llvm.nvvm.getctarank",
  "llvm.nvvm.getctarank.shared.cluster",
  "llvm.nvvm.i2d.rm",
  "llvm.nvvm.i2d.rn",
  "llvm.nvvm.i2d.rp",
  "llvm.nvvm.i2d.rz",
  "llvm.nvvm.i2f.rm",
  "llvm.nvvm.i2f.rn",
  "llvm.nvvm.i2f.rp",
  "llvm.nvvm.i2f.rz",
  "llvm.nvvm.is_explicit_cluster",
  "llvm.nvvm.isspacep.const",
  "llvm.nvvm.isspacep.global",
  "llvm.nvvm.isspacep.local",
  "llvm.nvvm.isspacep.shared",
  "llvm.nvvm.isspacep.shared.cluster",
  "llvm.nvvm.istypep.sampler",
  "llvm.nvvm.istypep.surface",
  "llvm.nvvm.istypep.texture",
  "llvm.nvvm.ldg.global.f",
  "llvm.nvvm.ldg.global.i",
  "llvm.nvvm.ldg.global.p",
  "llvm.nvvm.ldmatrix.sync.aligned.m8n8.x1.b16",
  "llvm.nvvm.ldmatrix.sync.aligned.m8n8.x1.trans.b16",
  "llvm.nvvm.ldmatrix.sync.aligned.m8n8.x2.b16",
  "llvm.nvvm.ldmatrix.sync.aligned.m8n8.x2.trans.b16",
  "llvm.nvvm.ldmatrix.sync.aligned.m8n8.x4.b16",
  "llvm.nvvm.ldmatrix.sync.aligned.m8n8.x4.trans.b16",
  "llvm.nvvm.ldu.global.f",
  "llvm.nvvm.ldu.global.i",
  "llvm.nvvm.ldu.global.p",
  "llvm.nvvm.lg2.approx.d",
  "llvm.nvvm.lg2.approx.f",
  "llvm.nvvm.lg2.approx.ftz.f",
  "llvm.nvvm.ll2d.rm",
  "llvm.nvvm.ll2d.rn",
  "llvm.nvvm.ll2d.rp",
  "llvm.nvvm.ll2d.rz",
  "llvm.nvvm.ll2f.rm",
  "llvm.nvvm.ll2f.rn",
  "llvm.nvvm.ll2f.rp",
  "llvm.nvvm.ll2f.rz",
  "llvm.nvvm.lohi.i2d",
  "llvm.nvvm.mapa",
  "llvm.nvvm.mapa.shared.cluster",
  "llvm.nvvm.match.all.sync.i32p",
  "llvm.nvvm.match.all.sync.i64p",
  "llvm.nvvm.match.any.sync.i32",
  "llvm.nvvm.match.any.sync.i64",
  "llvm.nvvm.mbarrier.arrive",
  "llvm.nvvm.mbarrier.arrive.drop",
  "llvm.nvvm.mbarrier.arrive.drop.noComplete",
  "llvm.nvvm.mbarrier.arrive.drop.noComplete.shared",
  "llvm.nvvm.mbarrier.arrive.drop.shared",
  "llvm.nvvm.mbarrier.arrive.noComplete",
  "llvm.nvvm.mbarrier.arrive.noComplete.shared",
  "llvm.nvvm.mbarrier.arrive.shared",
  "llvm.nvvm.mbarrier.init",
  "llvm.nvvm.mbarrier.init.shared",
  "llvm.nvvm.mbarrier.inval",
  "llvm.nvvm.mbarrier.inval.shared",
  "llvm.nvvm.mbarrier.pending.count",
  "llvm.nvvm.mbarrier.test.wait",
  "llvm.nvvm.mbarrier.test.wait.shared",
  "llvm.nvvm.membar.cta",
  "llvm.nvvm.membar.gl",
  "llvm.nvvm.membar.sys",
  "llvm.nvvm.mma.and.popc.m16n8k128.row.col.b1",
  "llvm.nvvm.mma.and.popc.m16n8k256.row.col.b1",
  "llvm.nvvm.mma.and.popc.m8n8k128.row.col.b1",
  "llvm.nvvm.mma.m16n8k16.row.col.bf16",
  "llvm.nvvm.mma.m16n8k16.row.col.f16.f16",
  "llvm.nvvm.mma.m16n8k16.row.col.f16.f32",
  "llvm.nvvm.mma.m16n8k16.row.col.f32.f16",
  "llvm.nvvm.mma.m16n8k16.row.col.f32.f32",
  "llvm.nvvm.mma.m16n8k16.row.col.s8",
  "llvm.nvvm.mma.m16n8k16.row.col.s8.u8",
  "llvm.nvvm.mma.m16n8k16.row.col.satfinite.s8",
  "llvm.nvvm.mma.m16n8k16.row.col.satfinite.s8.u8",
  "llvm.nvvm.mma.m16n8k16.row.col.satfinite.u8",
  "llvm.nvvm.mma.m16n8k16.row.col.satfinite.u8.s8",
  "llvm.nvvm.mma.m16n8k16.row.col.u8",
  "llvm.nvvm.mma.m16n8k16.row.col.u8.s8",
  "llvm.nvvm.mma.m16n8k32.row.col.s4",
  "llvm.nvvm.mma.m16n8k32.row.col.s4.u4",
  "llvm.nvvm.mma.m16n8k32.row.col.s8",
  "llvm.nvvm.mma.m16n8k32.row.col.s8.u8",
  "llvm.nvvm.mma.m16n8k32.row.col.satfinite.s4",
  "llvm.nvvm.mma.m16n8k32.row.col.satfinite.s4.u4",
  "llvm.nvvm.mma.m16n8k32.row.col.satfinite.s8",
  "llvm.nvvm.mma.m16n8k32.row.col.satfinite.s8.u8",
  "llvm.nvvm.mma.m16n8k32.row.col.satfinite.u4",
  "llvm.nvvm.mma.m16n8k32.row.col.satfinite.u4.s4",
  "llvm.nvvm.mma.m16n8k32.row.col.satfinite.u8",
  "llvm.nvvm.mma.m16n8k32.row.col.satfinite.u8.s8",
  "llvm.nvvm.mma.m16n8k32.row.col.u4",
  "llvm.nvvm.mma.m16n8k32.row.col.u4.s4",
  "llvm.nvvm.mma.m16n8k32.row.col.u8",
  "llvm.nvvm.mma.m16n8k32.row.col.u8.s8",
  "llvm.nvvm.mma.m16n8k4.row.col.tf32",
  "llvm.nvvm.mma.m16n8k64.row.col.s4",
  "llvm.nvvm.mma.m16n8k64.row.col.s4.u4",
  "llvm.nvvm.mma.m16n8k64.row.col.satfinite.s4",
  "llvm.nvvm.mma.m16n8k64.row.col.satfinite.s4.u4",
  "llvm.nvvm.mma.m16n8k64.row.col.satfinite.u4",
  "llvm.nvvm.mma.m16n8k64.row.col.satfinite.u4.s4",
  "llvm.nvvm.mma.m16n8k64.row.col.u4",
  "llvm.nvvm.mma.m16n8k64.row.col.u4.s4",
  "llvm.nvvm.mma.m16n8k8.row.col.bf16",
  "llvm.nvvm.mma.m16n8k8.row.col.f16.f16",
  "llvm.nvvm.mma.m16n8k8.row.col.f32.f32",
  "llvm.nvvm.mma.m16n8k8.row.col.tf32",
  "llvm.nvvm.mma.m8n8k16.row.col.s8",
  "llvm.nvvm.mma.m8n8k16.row.col.s8.u8",
  "llvm.nvvm.mma.m8n8k16.row.col.satfinite.s8",
  "llvm.nvvm.mma.m8n8k16.row.col.satfinite.s8.u8",
  "llvm.nvvm.mma.m8n8k16.row.col.satfinite.u8",
  "llvm.nvvm.mma.m8n8k16.row.col.satfinite.u8.s8",
  "llvm.nvvm.mma.m8n8k16.row.col.u8",
  "llvm.nvvm.mma.m8n8k16.row.col.u8.s8",
  "llvm.nvvm.mma.m8n8k32.row.col.s4",
  "llvm.nvvm.mma.m8n8k32.row.col.s4.u4",
  "llvm.nvvm.mma.m8n8k32.row.col.satfinite.s4",
  "llvm.nvvm.mma.m8n8k32.row.col.satfinite.s4.u4",
  "llvm.nvvm.mma.m8n8k32.row.col.satfinite.u4",
  "llvm.nvvm.mma.m8n8k32.row.col.satfinite.u4.s4",
  "llvm.nvvm.mma.m8n8k32.row.col.u4",
  "llvm.nvvm.mma.m8n8k32.row.col.u4.s4",
  "llvm.nvvm.mma.m8n8k4.col.col.f16.f16",
  "llvm.nvvm.mma.m8n8k4.col.col.f32.f16",
  "llvm.nvvm.mma.m8n8k4.col.col.f32.f32",
  "llvm.nvvm.mma.m8n8k4.col.row.f16.f16",
  "llvm.nvvm.mma.m8n8k4.col.row.f32.f16",
  "llvm.nvvm.mma.m8n8k4.col.row.f32.f32",
  "llvm.nvvm.mma.m8n8k4.row.col.f16.f16",
  "llvm.nvvm.mma.m8n8k4.row.col.f32.f16",
  "llvm.nvvm.mma.m8n8k4.row.col.f32.f32",
  "llvm.nvvm.mma.m8n8k4.row.col.f64",
  "llvm.nvvm.mma.m8n8k4.row.row.f16.f16",
  "llvm.nvvm.mma.m8n8k4.row.row.f32.f16",
  "llvm.nvvm.mma.m8n8k4.row.row.f32.f32",
  "llvm.nvvm.mma.xor.popc.m16n8k128.row.col.b1",
  "llvm.nvvm.mma.xor.popc.m16n8k256.row.col.b1",
  "llvm.nvvm.mma.xor.popc.m8n8k128.row.col.b1",
  "llvm.nvvm.move.double",
  "llvm.nvvm.move.float",
  "llvm.nvvm.move.i16",
  "llvm.nvvm.move.i32",
  "llvm.nvvm.move.i64",
  "llvm.nvvm.move.ptr",
  "llvm.nvvm.mul.rm.d",
  "llvm.nvvm.mul.rm.f",
  "llvm.nvvm.mul.rm.ftz.f",
  "llvm.nvvm.mul.rn.d",
  "llvm.nvvm.mul.rn.f",
  "llvm.nvvm.mul.rn.ftz.f",
  "llvm.nvvm.mul.rp.d",
  "llvm.nvvm.mul.rp.f",
  "llvm.nvvm.mul.rp.ftz.f",
  "llvm.nvvm.mul.rz.d",
  "llvm.nvvm.mul.rz.f",
  "llvm.nvvm.mul.rz.ftz.f",
  "llvm.nvvm.mul24.i",
  "llvm.nvvm.mul24.ui",
  "llvm.nvvm.mulhi.i",
  "llvm.nvvm.mulhi.ll",
  "llvm.nvvm.mulhi.ui",
  "llvm.nvvm.mulhi.ull",
  "llvm.nvvm.neg.bf16",
  "llvm.nvvm.neg.bf16x2",
  "llvm.nvvm.prmt",
  "llvm.nvvm.ptr.constant.to.gen",
  "llvm.nvvm.ptr.gen.to.constant",
  "llvm.nvvm.ptr.gen.to.global",
  "llvm.nvvm.ptr.gen.to.local",
  "llvm.nvvm.ptr.gen.to.param",
  "llvm.nvvm.ptr.gen.to.shared",
  "llvm.nvvm.ptr.global.to.gen",
  "llvm.nvvm.ptr.local.to.gen",
  "llvm.nvvm.ptr.shared.to.gen",
  "llvm.nvvm.rcp.approx.ftz.d",
  "llvm.nvvm.rcp.approx.ftz.f",
  "llvm.nvvm.rcp.rm.d",
  "llvm.nvvm.rcp.rm.f",
  "llvm.nvvm.rcp.rm.ftz.f",
  "llvm.nvvm.rcp.rn.d",
  "llvm.nvvm.rcp.rn.f",
  "llvm.nvvm.rcp.rn.ftz.f",
  "llvm.nvvm.rcp.rp.d",
  "llvm.nvvm.rcp.rp.f",
  "llvm.nvvm.rcp.rp.ftz.f",
  "llvm.nvvm.rcp.rz.d",
  "llvm.nvvm.rcp.rz.f",
  "llvm.nvvm.rcp.rz.ftz.f",
  "llvm.nvvm.read.ptx.sreg.clock",
  "llvm.nvvm.read.ptx.sreg.clock64",
  "llvm.nvvm.read.ptx.sreg.cluster.ctaid.w",
  "llvm.nvvm.read.ptx.sreg.cluster.ctaid.x",
  "llvm.nvvm.read.ptx.sreg.cluster.ctaid.y",
  "llvm.nvvm.read.ptx.sreg.cluster.ctaid.z",
  "llvm.nvvm.read.ptx.sreg.cluster.ctarank",
  "llvm.nvvm.read.ptx.sreg.cluster.nctaid.w",
  "llvm.nvvm.read.ptx.sreg.cluster.nctaid.x",
  "llvm.nvvm.read.ptx.sreg.cluster.nctaid.y",
  "llvm.nvvm.read.ptx.sreg.cluster.nctaid.z",
  "llvm.nvvm.read.ptx.sreg.cluster.nctarank",
  "llvm.nvvm.read.ptx.sreg.clusterid.w",
  "llvm.nvvm.read.ptx.sreg.clusterid.x",
  "llvm.nvvm.read.ptx.sreg.clusterid.y",
  "llvm.nvvm.read.ptx.sreg.clusterid.z",
  "llvm.nvvm.read.ptx.sreg.ctaid.w",
  "llvm.nvvm.read.ptx.sreg.ctaid.x",
  "llvm.nvvm.read.ptx.sreg.ctaid.y",
  "llvm.nvvm.read.ptx.sreg.ctaid.z",
  "llvm.nvvm.read.ptx.sreg.envreg0",
  "llvm.nvvm.read.ptx.sreg.envreg1",
  "llvm.nvvm.read.ptx.sreg.envreg10",
  "llvm.nvvm.read.ptx.sreg.envreg11",
  "llvm.nvvm.read.ptx.sreg.envreg12",
  "llvm.nvvm.read.ptx.sreg.envreg13",
  "llvm.nvvm.read.ptx.sreg.envreg14",
  "llvm.nvvm.read.ptx.sreg.envreg15",
  "llvm.nvvm.read.ptx.sreg.envreg16",
  "llvm.nvvm.read.ptx.sreg.envreg17",
  "llvm.nvvm.read.ptx.sreg.envreg18",
  "llvm.nvvm.read.ptx.sreg.envreg19",
  "llvm.nvvm.read.ptx.sreg.envreg2",
  "llvm.nvvm.read.ptx.sreg.envreg20",
  "llvm.nvvm.read.ptx.sreg.envreg21",
  "llvm.nvvm.read.ptx.sreg.envreg22",
  "llvm.nvvm.read.ptx.sreg.envreg23",
  "llvm.nvvm.read.ptx.sreg.envreg24",
  "llvm.nvvm.read.ptx.sreg.envreg25",
  "llvm.nvvm.read.ptx.sreg.envreg26",
  "llvm.nvvm.read.ptx.sreg.envreg27",
  "llvm.nvvm.read.ptx.sreg.envreg28",
  "llvm.nvvm.read.ptx.sreg.envreg29",
  "llvm.nvvm.read.ptx.sreg.envreg3",
  "llvm.nvvm.read.ptx.sreg.envreg30",
  "llvm.nvvm.read.ptx.sreg.envreg31",
  "llvm.nvvm.read.ptx.sreg.envreg4",
  "llvm.nvvm.read.ptx.sreg.envreg5",
  "llvm.nvvm.read.ptx.sreg.envreg6",
  "llvm.nvvm.read.ptx.sreg.envreg7",
  "llvm.nvvm.read.ptx.sreg.envreg8",
  "llvm.nvvm.read.ptx.sreg.envreg9",
  "llvm.nvvm.read.ptx.sreg.gridid",
  "llvm.nvvm.read.ptx.sreg.laneid",
  "llvm.nvvm.read.ptx.sreg.lanemask.eq",
  "llvm.nvvm.read.ptx.sreg.lanemask.ge",
  "llvm.nvvm.read.ptx.sreg.lanemask.gt",
  "llvm.nvvm.read.ptx.sreg.lanemask.le",
  "llvm.nvvm.read.ptx.sreg.lanemask.lt",
  "llvm.nvvm.read.ptx.sreg.nclusterid.w",
  "llvm.nvvm.read.ptx.sreg.nclusterid.x",
  "llvm.nvvm.read.ptx.sreg.nclusterid.y",
  "llvm.nvvm.read.ptx.sreg.nclusterid.z",
  "llvm.nvvm.read.ptx.sreg.nctaid.w",
  "llvm.nvvm.read.ptx.sreg.nctaid.x",
  "llvm.nvvm.read.ptx.sreg.nctaid.y",
  "llvm.nvvm.read.ptx.sreg.nctaid.z",
  "llvm.nvvm.read.ptx.sreg.nsmid",
  "llvm.nvvm.read.ptx.sreg.ntid.w",
  "llvm.nvvm.read.ptx.sreg.ntid.x",
  "llvm.nvvm.read.ptx.sreg.ntid.y",
  "llvm.nvvm.read.ptx.sreg.ntid.z",
  "llvm.nvvm.read.ptx.sreg.nwarpid",
  "llvm.nvvm.read.ptx.sreg.pm0",
  "llvm.nvvm.read.ptx.sreg.pm1",
  "llvm.nvvm.read.ptx.sreg.pm2",
  "llvm.nvvm.read.ptx.sreg.pm3",
  "llvm.nvvm.read.ptx.sreg.smid",
  "llvm.nvvm.read.ptx.sreg.tid.w",
  "llvm.nvvm.read.ptx.sreg.tid.x",
  "llvm.nvvm.read.ptx.sreg.tid.y",
  "llvm.nvvm.read.ptx.sreg.tid.z",
  "llvm.nvvm.read.ptx.sreg.warpid",
  "llvm.nvvm.read.ptx.sreg.warpsize",
  "llvm.nvvm.redux.sync.add",
  "llvm.nvvm.redux.sync.and",
  "llvm.nvvm.redux.sync.max",
  "llvm.nvvm.redux.sync.min",
  "llvm.nvvm.redux.sync.or",
  "llvm.nvvm.redux.sync.umax",
  "llvm.nvvm.redux.sync.umin",
  "llvm.nvvm.redux.sync.xor",
  "llvm.nvvm.reflect",
  "llvm.nvvm.rotate.b32",
  "llvm.nvvm.rotate.b64",
  "llvm.nvvm.rotate.right.b64",
  "llvm.nvvm.round.d",
  "llvm.nvvm.round.f",
  "llvm.nvvm.round.ftz.f",
  "llvm.nvvm.rsqrt.approx.d",
  "llvm.nvvm.rsqrt.approx.f",
  "llvm.nvvm.rsqrt.approx.ftz.f",
  "llvm.nvvm.sad.i",
  "llvm.nvvm.sad.ui",
  "llvm.nvvm.saturate.d",
  "llvm.nvvm.saturate.f",
  "llvm.nvvm.saturate.ftz.f",
  "llvm.nvvm.shfl.bfly.f32",
  "llvm.nvvm.shfl.bfly.f32p",
  "llvm.nvvm.shfl.bfly.i32",
  "llvm.nvvm.shfl.bfly.i32p",
  "llvm.nvvm.shfl.down.f32",
  "llvm.nvvm.shfl.down.f32p",
  "llvm.nvvm.shfl.down.i32",
  "llvm.nvvm.shfl.down.i32p",
  "llvm.nvvm.shfl.idx.f32",
  "llvm.nvvm.shfl.idx.f32p",
  "llvm.nvvm.shfl.idx.i32",
  "llvm.nvvm.shfl.idx.i32p",
  "llvm.nvvm.shfl.sync.bfly.f32",
  "llvm.nvvm.shfl.sync.bfly.f32p",
  "llvm.nvvm.shfl.sync.bfly.i32",
  "llvm.nvvm.shfl.sync.bfly.i32p",
  "llvm.nvvm.shfl.sync.down.f32",
  "llvm.nvvm.shfl.sync.down.f32p",
  "llvm.nvvm.shfl.sync.down.i32",
  "llvm.nvvm.shfl.sync.down.i32p",
  "llvm.nvvm.shfl.sync.idx.f32",
  "llvm.nvvm.shfl.sync.idx.f32p",
  "llvm.nvvm.shfl.sync.idx.i32",
  "llvm.nvvm.shfl.sync.idx.i32p",
  "llvm.nvvm.shfl.sync.up.f32",
  "llvm.nvvm.shfl.sync.up.f32p",
  "llvm.nvvm.shfl.sync.up.i32",
  "llvm.nvvm.shfl.sync.up.i32p",
  "llvm.nvvm.shfl.up.f32",
  "llvm.nvvm.shfl.up.f32p",
  "llvm.nvvm.shfl.up.i32",
  "llvm.nvvm.shfl.up.i32p",
  "llvm.nvvm.sin.approx.f",
  "llvm.nvvm.sin.approx.ftz.f",
  "llvm.nvvm.sqrt.approx.f",
  "llvm.nvvm.sqrt.approx.ftz.f",
  "llvm.nvvm.sqrt.f",
  "llvm.nvvm.sqrt.rm.d",
  "llvm.nvvm.sqrt.rm.f",
  "llvm.nvvm.sqrt.rm.ftz.f",
  "llvm.nvvm.sqrt.rn.d",
  "llvm.nvvm.sqrt.rn.f",
  "llvm.nvvm.sqrt.rn.ftz.f",
  "llvm.nvvm.sqrt.rp.d",
  "llvm.nvvm.sqrt.rp.f",
  "llvm.nvvm.sqrt.rp.ftz.f",
  "llvm.nvvm.sqrt.rz.d",
  "llvm.nvvm.sqrt.rz.f",
  "llvm.nvvm.sqrt.rz.ftz.f",
  "llvm.nvvm.suld.1d.array.i16.clamp",
  "llvm.nvvm.suld.1d.array.i16.trap",
  "llvm.nvvm.suld.1d.array.i16.zero",
  "llvm.nvvm.suld.1d.array.i32.clamp",
  "llvm.nvvm.suld.1d.array.i32.trap",
  "llvm.nvvm.suld.1d.array.i32.zero",
  "llvm.nvvm.suld.1d.array.i64.clamp",
  "llvm.nvvm.suld.1d.array.i64.trap",
  "llvm.nvvm.suld.1d.array.i64.zero",
  "llvm.nvvm.suld.1d.array.i8.clamp",
  "llvm.nvvm.suld.1d.array.i8.trap",
  "llvm.nvvm.suld.1d.array.i8.zero",
  "llvm.nvvm.suld.1d.array.v2i16.clamp",
  "llvm.nvvm.suld.1d.array.v2i16.trap",
  "llvm.nvvm.suld.1d.array.v2i16.zero",
  "llvm.nvvm.suld.1d.array.v2i32.clamp",
  "llvm.nvvm.suld.1d.array.v2i32.trap",
  "llvm.nvvm.suld.1d.array.v2i32.zero",
  "llvm.nvvm.suld.1d.array.v2i64.clamp",
  "llvm.nvvm.suld.1d.array.v2i64.trap",
  "llvm.nvvm.suld.1d.array.v2i64.zero",
  "llvm.nvvm.suld.1d.array.v2i8.clamp",
  "llvm.nvvm.suld.1d.array.v2i8.trap",
  "llvm.nvvm.suld.1d.array.v2i8.zero",
  "llvm.nvvm.suld.1d.array.v4i16.clamp",
  "llvm.nvvm.suld.1d.array.v4i16.trap",
  "llvm.nvvm.suld.1d.array.v4i16.zero",
  "llvm.nvvm.suld.1d.array.v4i32.clamp",
  "llvm.nvvm.suld.1d.array.v4i32.trap",
  "llvm.nvvm.suld.1d.array.v4i32.zero",
  "llvm.nvvm.suld.1d.array.v4i8.clamp",
  "llvm.nvvm.suld.1d.array.v4i8.trap",
  "llvm.nvvm.suld.1d.array.v4i8.zero",
  "llvm.nvvm.suld.1d.i16.clamp",
  "llvm.nvvm.suld.1d.i16.trap",
  "llvm.nvvm.suld.1d.i16.zero",
  "llvm.nvvm.suld.1d.i32.clamp",
  "llvm.nvvm.suld.1d.i32.trap",
  "llvm.nvvm.suld.1d.i32.zero",
  "llvm.nvvm.suld.1d.i64.clamp",
  "llvm.nvvm.suld.1d.i64.trap",
  "llvm.nvvm.suld.1d.i64.zero",
  "llvm.nvvm.suld.1d.i8.clamp",
  "llvm.nvvm.suld.1d.i8.trap",
  "llvm.nvvm.suld.1d.i8.zero",
  "llvm.nvvm.suld.1d.v2i16.clamp",
  "llvm.nvvm.suld.1d.v2i16.trap",
  "llvm.nvvm.suld.1d.v2i16.zero",
  "llvm.nvvm.suld.1d.v2i32.clamp",
  "llvm.nvvm.suld.1d.v2i32.trap",
  "llvm.nvvm.suld.1d.v2i32.zero",
  "llvm.nvvm.suld.1d.v2i64.clamp",
  "llvm.nvvm.suld.1d.v2i64.trap",
  "llvm.nvvm.suld.1d.v2i64.zero",
  "llvm.nvvm.suld.1d.v2i8.clamp",
  "llvm.nvvm.suld.1d.v2i8.trap",
  "llvm.nvvm.suld.1d.v2i8.zero",
  "llvm.nvvm.suld.1d.v4i16.clamp",
  "llvm.nvvm.suld.1d.v4i16.trap",
  "llvm.nvvm.suld.1d.v4i16.zero",
  "llvm.nvvm.suld.1d.v4i32.clamp",
  "llvm.nvvm.suld.1d.v4i32.trap",
  "llvm.nvvm.suld.1d.v4i32.zero",
  "llvm.nvvm.suld.1d.v4i8.clamp",
  "llvm.nvvm.suld.1d.v4i8.trap",
  "llvm.nvvm.suld.1d.v4i8.zero",
  "llvm.nvvm.suld.2d.array.i16.clamp",
  "llvm.nvvm.suld.2d.array.i16.trap",
  "llvm.nvvm.suld.2d.array.i16.zero",
  "llvm.nvvm.suld.2d.array.i32.clamp",
  "llvm.nvvm.suld.2d.array.i32.trap",
  "llvm.nvvm.suld.2d.array.i32.zero",
  "llvm.nvvm.suld.2d.array.i64.clamp",
  "llvm.nvvm.suld.2d.array.i64.trap",
  "llvm.nvvm.suld.2d.array.i64.zero",
  "llvm.nvvm.suld.2d.array.i8.clamp",
  "llvm.nvvm.suld.2d.array.i8.trap",
  "llvm.nvvm.suld.2d.array.i8.zero",
  "llvm.nvvm.suld.2d.array.v2i16.clamp",
  "llvm.nvvm.suld.2d.array.v2i16.trap",
  "llvm.nvvm.suld.2d.array.v2i16.zero",
  "llvm.nvvm.suld.2d.array.v2i32.clamp",
  "llvm.nvvm.suld.2d.array.v2i32.trap",
  "llvm.nvvm.suld.2d.array.v2i32.zero",
  "llvm.nvvm.suld.2d.array.v2i64.clamp",
  "llvm.nvvm.suld.2d.array.v2i64.trap",
  "llvm.nvvm.suld.2d.array.v2i64.zero",
  "llvm.nvvm.suld.2d.array.v2i8.clamp",
  "llvm.nvvm.suld.2d.array.v2i8.trap",
  "llvm.nvvm.suld.2d.array.v2i8.zero",
  "llvm.nvvm.suld.2d.array.v4i16.clamp",
  "llvm.nvvm.suld.2d.array.v4i16.trap",
  "llvm.nvvm.suld.2d.array.v4i16.zero",
  "llvm.nvvm.suld.2d.array.v4i32.clamp",
  "llvm.nvvm.suld.2d.array.v4i32.trap",
  "llvm.nvvm.suld.2d.array.v4i32.zero",
  "llvm.nvvm.suld.2d.array.v4i8.clamp",
  "llvm.nvvm.suld.2d.array.v4i8.trap",
  "llvm.nvvm.suld.2d.array.v4i8.zero",
  "llvm.nvvm.suld.2d.i16.clamp",
  "llvm.nvvm.suld.2d.i16.trap",
  "llvm.nvvm.suld.2d.i16.zero",
  "llvm.nvvm.suld.2d.i32.clamp",
  "llvm.nvvm.suld.2d.i32.trap",
  "llvm.nvvm.suld.2d.i32.zero",
  "llvm.nvvm.suld.2d.i64.clamp",
  "llvm.nvvm.suld.2d.i64.trap",
  "llvm.nvvm.suld.2d.i64.zero",
  "llvm.nvvm.suld.2d.i8.clamp",
  "llvm.nvvm.suld.2d.i8.trap",
  "llvm.nvvm.suld.2d.i8.zero",
  "llvm.nvvm.suld.2d.v2i16.clamp",
  "llvm.nvvm.suld.2d.v2i16.trap",
  "llvm.nvvm.suld.2d.v2i16.zero",
  "llvm.nvvm.suld.2d.v2i32.clamp",
  "llvm.nvvm.suld.2d.v2i32.trap",
  "llvm.nvvm.suld.2d.v2i32.zero",
  "llvm.nvvm.suld.2d.v2i64.clamp",
  "llvm.nvvm.suld.2d.v2i64.trap",
  "llvm.nvvm.suld.2d.v2i64.zero",
  "llvm.nvvm.suld.2d.v2i8.clamp",
  "llvm.nvvm.suld.2d.v2i8.trap",
  "llvm.nvvm.suld.2d.v2i8.zero",
  "llvm.nvvm.suld.2d.v4i16.clamp",
  "llvm.nvvm.suld.2d.v4i16.trap",
  "llvm.nvvm.suld.2d.v4i16.zero",
  "llvm.nvvm.suld.2d.v4i32.clamp",
  "llvm.nvvm.suld.2d.v4i32.trap",
  "llvm.nvvm.suld.2d.v4i32.zero",
  "llvm.nvvm.suld.2d.v4i8.clamp",
  "llvm.nvvm.suld.2d.v4i8.trap",
  "llvm.nvvm.suld.2d.v4i8.zero",
  "llvm.nvvm.suld.3d.i16.clamp",
  "llvm.nvvm.suld.3d.i16.trap",
  "llvm.nvvm.suld.3d.i16.zero",
  "llvm.nvvm.suld.3d.i32.clamp",
  "llvm.nvvm.suld.3d.i32.trap",
  "llvm.nvvm.suld.3d.i32.zero",
  "llvm.nvvm.suld.3d.i64.clamp",
  "llvm.nvvm.suld.3d.i64.trap",
  "llvm.nvvm.suld.3d.i64.zero",
  "llvm.nvvm.suld.3d.i8.clamp",
  "llvm.nvvm.suld.3d.i8.trap",
  "llvm.nvvm.suld.3d.i8.zero",
  "llvm.nvvm.suld.3d.v2i16.clamp",
  "llvm.nvvm.suld.3d.v2i16.trap",
  "llvm.nvvm.suld.3d.v2i16.zero",
  "llvm.nvvm.suld.3d.v2i32.clamp",
  "llvm.nvvm.suld.3d.v2i32.trap",
  "llvm.nvvm.suld.3d.v2i32.zero",
  "llvm.nvvm.suld.3d.v2i64.clamp",
  "llvm.nvvm.suld.3d.v2i64.trap",
  "llvm.nvvm.suld.3d.v2i64.zero",
  "llvm.nvvm.suld.3d.v2i8.clamp",
  "llvm.nvvm.suld.3d.v2i8.trap",
  "llvm.nvvm.suld.3d.v2i8.zero",
  "llvm.nvvm.suld.3d.v4i16.clamp",
  "llvm.nvvm.suld.3d.v4i16.trap",
  "llvm.nvvm.suld.3d.v4i16.zero",
  "llvm.nvvm.suld.3d.v4i32.clamp",
  "llvm.nvvm.suld.3d.v4i32.trap",
  "llvm.nvvm.suld.3d.v4i32.zero",
  "llvm.nvvm.suld.3d.v4i8.clamp",
  "llvm.nvvm.suld.3d.v4i8.trap",
  "llvm.nvvm.suld.3d.v4i8.zero",
  "llvm.nvvm.suq.array.size",
  "llvm.nvvm.suq.channel.data.type",
  "llvm.nvvm.suq.channel.order",
  "llvm.nvvm.suq.depth",
  "llvm.nvvm.suq.height",
  "llvm.nvvm.suq.width",
  "llvm.nvvm.sust.b.1d.array.i16.clamp",
  "llvm.nvvm.sust.b.1d.array.i16.trap",
  "llvm.nvvm.sust.b.1d.array.i16.zero",
  "llvm.nvvm.sust.b.1d.array.i32.clamp",
  "llvm.nvvm.sust.b.1d.array.i32.trap",
  "llvm.nvvm.sust.b.1d.array.i32.zero",
  "llvm.nvvm.sust.b.1d.array.i64.clamp",
  "llvm.nvvm.sust.b.1d.array.i64.trap",
  "llvm.nvvm.sust.b.1d.array.i64.zero",
  "llvm.nvvm.sust.b.1d.array.i8.clamp",
  "llvm.nvvm.sust.b.1d.array.i8.trap",
  "llvm.nvvm.sust.b.1d.array.i8.zero",
  "llvm.nvvm.sust.b.1d.array.v2i16.clamp",
  "llvm.nvvm.sust.b.1d.array.v2i16.trap",
  "llvm.nvvm.sust.b.1d.array.v2i16.zero",
  "llvm.nvvm.sust.b.1d.array.v2i32.clamp",
  "llvm.nvvm.sust.b.1d.array.v2i32.trap",
  "llvm.nvvm.sust.b.1d.array.v2i32.zero",
  "llvm.nvvm.sust.b.1d.array.v2i64.clamp",
  "llvm.nvvm.sust.b.1d.array.v2i64.trap",
  "llvm.nvvm.sust.b.1d.array.v2i64.zero",
  "llvm.nvvm.sust.b.1d.array.v2i8.clamp",
  "llvm.nvvm.sust.b.1d.array.v2i8.trap",
  "llvm.nvvm.sust.b.1d.array.v2i8.zero",
  "llvm.nvvm.sust.b.1d.array.v4i16.clamp",
  "llvm.nvvm.sust.b.1d.array.v4i16.trap",
  "llvm.nvvm.sust.b.1d.array.v4i16.zero",
  "llvm.nvvm.sust.b.1d.array.v4i32.clamp",
  "llvm.nvvm.sust.b.1d.array.v4i32.trap",
  "llvm.nvvm.sust.b.1d.array.v4i32.zero",
  "llvm.nvvm.sust.b.1d.array.v4i8.clamp",
  "llvm.nvvm.sust.b.1d.array.v4i8.trap",
  "llvm.nvvm.sust.b.1d.array.v4i8.zero",
  "llvm.nvvm.sust.b.1d.i16.clamp",
  "llvm.nvvm.sust.b.1d.i16.trap",
  "llvm.nvvm.sust.b.1d.i16.zero",
  "llvm.nvvm.sust.b.1d.i32.clamp",
  "llvm.nvvm.sust.b.1d.i32.trap",
  "llvm.nvvm.sust.b.1d.i32.zero",
  "llvm.nvvm.sust.b.1d.i64.clamp",
  "llvm.nvvm.sust.b.1d.i64.trap",
  "llvm.nvvm.sust.b.1d.i64.zero",
  "llvm.nvvm.sust.b.1d.i8.clamp",
  "llvm.nvvm.sust.b.1d.i8.trap",
  "llvm.nvvm.sust.b.1d.i8.zero",
  "llvm.nvvm.sust.b.1d.v2i16.clamp",
  "llvm.nvvm.sust.b.1d.v2i16.trap",
  "llvm.nvvm.sust.b.1d.v2i16.zero",
  "llvm.nvvm.sust.b.1d.v2i32.clamp",
  "llvm.nvvm.sust.b.1d.v2i32.trap",
  "llvm.nvvm.sust.b.1d.v2i32.zero",
  "llvm.nvvm.sust.b.1d.v2i64.clamp",
  "llvm.nvvm.sust.b.1d.v2i64.trap",
  "llvm.nvvm.sust.b.1d.v2i64.zero",
  "llvm.nvvm.sust.b.1d.v2i8.clamp",
  "llvm.nvvm.sust.b.1d.v2i8.trap",
  "llvm.nvvm.sust.b.1d.v2i8.zero",
  "llvm.nvvm.sust.b.1d.v4i16.clamp",
  "llvm.nvvm.sust.b.1d.v4i16.trap",
  "llvm.nvvm.sust.b.1d.v4i16.zero",
  "llvm.nvvm.sust.b.1d.v4i32.clamp",
  "llvm.nvvm.sust.b.1d.v4i32.trap",
  "llvm.nvvm.sust.b.1d.v4i32.zero",
  "llvm.nvvm.sust.b.1d.v4i8.clamp",
  "llvm.nvvm.sust.b.1d.v4i8.trap",
  "llvm.nvvm.sust.b.1d.v4i8.zero",
  "llvm.nvvm.sust.b.2d.array.i16.clamp",
  "llvm.nvvm.sust.b.2d.array.i16.trap",
  "llvm.nvvm.sust.b.2d.array.i16.zero",
  "llvm.nvvm.sust.b.2d.array.i32.clamp",
  "llvm.nvvm.sust.b.2d.array.i32.trap",
  "llvm.nvvm.sust.b.2d.array.i32.zero",
  "llvm.nvvm.sust.b.2d.array.i64.clamp",
  "llvm.nvvm.sust.b.2d.array.i64.trap",
  "llvm.nvvm.sust.b.2d.array.i64.zero",
  "llvm.nvvm.sust.b.2d.array.i8.clamp",
  "llvm.nvvm.sust.b.2d.array.i8.trap",
  "llvm.nvvm.sust.b.2d.array.i8.zero",
  "llvm.nvvm.sust.b.2d.array.v2i16.clamp",
  "llvm.nvvm.sust.b.2d.array.v2i16.trap",
  "llvm.nvvm.sust.b.2d.array.v2i16.zero",
  "llvm.nvvm.sust.b.2d.array.v2i32.clamp",
  "llvm.nvvm.sust.b.2d.array.v2i32.trap",
  "llvm.nvvm.sust.b.2d.array.v2i32.zero",
  "llvm.nvvm.sust.b.2d.array.v2i64.clamp",
  "llvm.nvvm.sust.b.2d.array.v2i64.trap",
  "llvm.nvvm.sust.b.2d.array.v2i64.zero",
  "llvm.nvvm.sust.b.2d.array.v2i8.clamp",
  "llvm.nvvm.sust.b.2d.array.v2i8.trap",
  "llvm.nvvm.sust.b.2d.array.v2i8.zero",
  "llvm.nvvm.sust.b.2d.array.v4i16.clamp",
  "llvm.nvvm.sust.b.2d.array.v4i16.trap",
  "llvm.nvvm.sust.b.2d.array.v4i16.zero",
  "llvm.nvvm.sust.b.2d.array.v4i32.clamp",
  "llvm.nvvm.sust.b.2d.array.v4i32.trap",
  "llvm.nvvm.sust.b.2d.array.v4i32.zero",
  "llvm.nvvm.sust.b.2d.array.v4i8.clamp",
  "llvm.nvvm.sust.b.2d.array.v4i8.trap",
  "llvm.nvvm.sust.b.2d.array.v4i8.zero",
  "llvm.nvvm.sust.b.2d.i16.clamp",
  "llvm.nvvm.sust.b.2d.i16.trap",
  "llvm.nvvm.sust.b.2d.i16.zero",
  "llvm.nvvm.sust.b.2d.i32.clamp",
  "llvm.nvvm.sust.b.2d.i32.trap",
  "llvm.nvvm.sust.b.2d.i32.zero",
  "llvm.nvvm.sust.b.2d.i64.clamp",
  "llvm.nvvm.sust.b.2d.i64.trap",
  "llvm.nvvm.sust.b.2d.i64.zero",
  "llvm.nvvm.sust.b.2d.i8.clamp",
  "llvm.nvvm.sust.b.2d.i8.trap",
  "llvm.nvvm.sust.b.2d.i8.zero",
  "llvm.nvvm.sust.b.2d.v2i16.clamp",
  "llvm.nvvm.sust.b.2d.v2i16.trap",
  "llvm.nvvm.sust.b.2d.v2i16.zero",
  "llvm.nvvm.sust.b.2d.v2i32.clamp",
  "llvm.nvvm.sust.b.2d.v2i32.trap",
  "llvm.nvvm.sust.b.2d.v2i32.zero",
  "llvm.nvvm.sust.b.2d.v2i64.clamp",
  "llvm.nvvm.sust.b.2d.v2i64.trap",
  "llvm.nvvm.sust.b.2d.v2i64.zero",
  "llvm.nvvm.sust.b.2d.v2i8.clamp",
  "llvm.nvvm.sust.b.2d.v2i8.trap",
  "llvm.nvvm.sust.b.2d.v2i8.zero",
  "llvm.nvvm.sust.b.2d.v4i16.clamp",
  "llvm.nvvm.sust.b.2d.v4i16.trap",
  "llvm.nvvm.sust.b.2d.v4i16.zero",
  "llvm.nvvm.sust.b.2d.v4i32.clamp",
  "llvm.nvvm.sust.b.2d.v4i32.trap",
  "llvm.nvvm.sust.b.2d.v4i32.zero",
  "llvm.nvvm.sust.b.2d.v4i8.clamp",
  "llvm.nvvm.sust.b.2d.v4i8.trap",
  "llvm.nvvm.sust.b.2d.v4i8.zero",
  "llvm.nvvm.sust.b.3d.i16.clamp",
  "llvm.nvvm.sust.b.3d.i16.trap",
  "llvm.nvvm.sust.b.3d.i16.zero",
  "llvm.nvvm.sust.b.3d.i32.clamp",
  "llvm.nvvm.sust.b.3d.i32.trap",
  "llvm.nvvm.sust.b.3d.i32.zero",
  "llvm.nvvm.sust.b.3d.i64.clamp",
  "llvm.nvvm.sust.b.3d.i64.trap",
  "llvm.nvvm.sust.b.3d.i64.zero",
  "llvm.nvvm.sust.b.3d.i8.clamp",
  "llvm.nvvm.sust.b.3d.i8.trap",
  "llvm.nvvm.sust.b.3d.i8.zero",
  "llvm.nvvm.sust.b.3d.v2i16.clamp",
  "llvm.nvvm.sust.b.3d.v2i16.trap",
  "llvm.nvvm.sust.b.3d.v2i16.zero",
  "llvm.nvvm.sust.b.3d.v2i32.clamp",
  "llvm.nvvm.sust.b.3d.v2i32.trap",
  "llvm.nvvm.sust.b.3d.v2i32.zero",
  "llvm.nvvm.sust.b.3d.v2i64.clamp",
  "llvm.nvvm.sust.b.3d.v2i64.trap",
  "llvm.nvvm.sust.b.3d.v2i64.zero",
  "llvm.nvvm.sust.b.3d.v2i8.clamp",
  "llvm.nvvm.sust.b.3d.v2i8.trap",
  "llvm.nvvm.sust.b.3d.v2i8.zero",
  "llvm.nvvm.sust.b.3d.v4i16.clamp",
  "llvm.nvvm.sust.b.3d.v4i16.trap",
  "llvm.nvvm.sust.b.3d.v4i16.zero",
  "llvm.nvvm.sust.b.3d.v4i32.clamp",
  "llvm.nvvm.sust.b.3d.v4i32.trap",
  "llvm.nvvm.sust.b.3d.v4i32.zero",
  "llvm.nvvm.sust.b.3d.v4i8.clamp",
  "llvm.nvvm.sust.b.3d.v4i8.trap",
  "llvm.nvvm.sust.b.3d.v4i8.zero",
  "llvm.nvvm.sust.p.1d.array.i16.trap",
  "llvm.nvvm.sust.p.1d.array.i32.trap",
  "llvm.nvvm.sust.p.1d.array.i8.trap",
  "llvm.nvvm.sust.p.1d.array.v2i16.trap",
  "llvm.nvvm.sust.p.1d.array.v2i32.trap",
  "llvm.nvvm.sust.p.1d.array.v2i8.trap",
  "llvm.nvvm.sust.p.1d.array.v4i16.trap",
  "llvm.nvvm.sust.p.1d.array.v4i32.trap",
  "llvm.nvvm.sust.p.1d.array.v4i8.trap",
  "llvm.nvvm.sust.p.1d.i16.trap",
  "llvm.nvvm.sust.p.1d.i32.trap",
  "llvm.nvvm.sust.p.1d.i8.trap",
  "llvm.nvvm.sust.p.1d.v2i16.trap",
  "llvm.nvvm.sust.p.1d.v2i32.trap",
  "llvm.nvvm.sust.p.1d.v2i8.trap",
  "llvm.nvvm.sust.p.1d.v4i16.trap",
  "llvm.nvvm.sust.p.1d.v4i32.trap",
  "llvm.nvvm.sust.p.1d.v4i8.trap",
  "llvm.nvvm.sust.p.2d.array.i16.trap",
  "llvm.nvvm.sust.p.2d.array.i32.trap",
  "llvm.nvvm.sust.p.2d.array.i8.trap",
  "llvm.nvvm.sust.p.2d.array.v2i16.trap",
  "llvm.nvvm.sust.p.2d.array.v2i32.trap",
  "llvm.nvvm.sust.p.2d.array.v2i8.trap",
  "llvm.nvvm.sust.p.2d.array.v4i16.trap",
  "llvm.nvvm.sust.p.2d.array.v4i32.trap",
  "llvm.nvvm.sust.p.2d.array.v4i8.trap",
  "llvm.nvvm.sust.p.2d.i16.trap",
  "llvm.nvvm.sust.p.2d.i32.trap",
  "llvm.nvvm.sust.p.2d.i8.trap",
  "llvm.nvvm.sust.p.2d.v2i16.trap",
  "llvm.nvvm.sust.p.2d.v2i32.trap",
  "llvm.nvvm.sust.p.2d.v2i8.trap",
  "llvm.nvvm.sust.p.2d.v4i16.trap",
  "llvm.nvvm.sust.p.2d.v4i32.trap",
  "llvm.nvvm.sust.p.2d.v4i8.trap",
  "llvm.nvvm.sust.p.3d.i16.trap",
  "llvm.nvvm.sust.p.3d.i32.trap",
  "llvm.nvvm.sust.p.3d.i8.trap",
  "llvm.nvvm.sust.p.3d.v2i16.trap",
  "llvm.nvvm.sust.p.3d.v2i32.trap",
  "llvm.nvvm.sust.p.3d.v2i8.trap",
  "llvm.nvvm.sust.p.3d.v4i16.trap",
  "llvm.nvvm.sust.p.3d.v4i32.trap",
  "llvm.nvvm.sust.p.3d.v4i8.trap",
  "llvm.nvvm.swap.lo.hi.b64",
  "llvm.nvvm.tex.1d.array.grad.v4f32.f32",
  "llvm.nvvm.tex.1d.array.grad.v4s32.f32",
  "llvm.nvvm.tex.1d.array.grad.v4u32.f32",
  "llvm.nvvm.tex.1d.array.level.v4f32.f32",
  "llvm.nvvm.tex.1d.array.level.v4s32.f32",
  "llvm.nvvm.tex.1d.array.level.v4u32.f32",
  "llvm.nvvm.tex.1d.array.v4f32.f32",
  "llvm.nvvm.tex.1d.array.v4f32.s32",
  "llvm.nvvm.tex.1d.array.v4s32.f32",
  "llvm.nvvm.tex.1d.array.v4s32.s32",
  "llvm.nvvm.tex.1d.array.v4u32.f32",
  "llvm.nvvm.tex.1d.array.v4u32.s32",
  "llvm.nvvm.tex.1d.grad.v4f32.f32",
  "llvm.nvvm.tex.1d.grad.v4s32.f32",
  "llvm.nvvm.tex.1d.grad.v4u32.f32",
  "llvm.nvvm.tex.1d.level.v4f32.f32",
  "llvm.nvvm.tex.1d.level.v4s32.f32",
  "llvm.nvvm.tex.1d.level.v4u32.f32",
  "llvm.nvvm.tex.1d.v4f32.f32",
  "llvm.nvvm.tex.1d.v4f32.s32",
  "llvm.nvvm.tex.1d.v4s32.f32",
  "llvm.nvvm.tex.1d.v4s32.s32",
  "llvm.nvvm.tex.1d.v4u32.f32",
  "llvm.nvvm.tex.1d.v4u32.s32",
  "llvm.nvvm.tex.2d.array.grad.v4f32.f32",
  "llvm.nvvm.tex.2d.array.grad.v4s32.f32",
  "llvm.nvvm.tex.2d.array.grad.v4u32.f32",
  "llvm.nvvm.tex.2d.array.level.v4f32.f32",
  "llvm.nvvm.tex.2d.array.level.v4s32.f32",
  "llvm.nvvm.tex.2d.array.level.v4u32.f32",
  "llvm.nvvm.tex.2d.array.v4f32.f32",
  "llvm.nvvm.tex.2d.array.v4f32.s32",
  "llvm.nvvm.tex.2d.array.v4s32.f32",
  "llvm.nvvm.tex.2d.array.v4s32.s32",
  "llvm.nvvm.tex.2d.array.v4u32.f32",
  "llvm.nvvm.tex.2d.array.v4u32.s32",
  "llvm.nvvm.tex.2d.grad.v4f32.f32",
  "llvm.nvvm.tex.2d.grad.v4s32.f32",
  "llvm.nvvm.tex.2d.grad.v4u32.f32",
  "llvm.nvvm.tex.2d.level.v4f32.f32",
  "llvm.nvvm.tex.2d.level.v4s32.f32",
  "llvm.nvvm.tex.2d.level.v4u32.f32",
  "llvm.nvvm.tex.2d.v4f32.f32",
  "llvm.nvvm.tex.2d.v4f32.s32",
  "llvm.nvvm.tex.2d.v4s32.f32",
  "llvm.nvvm.tex.2d.v4s32.s32",
  "llvm.nvvm.tex.2d.v4u32.f32",
  "llvm.nvvm.tex.2d.v4u32.s32",
  "llvm.nvvm.tex.3d.grad.v4f32.f32",
  "llvm.nvvm.tex.3d.grad.v4s32.f32",
  "llvm.nvvm.tex.3d.grad.v4u32.f32",
  "llvm.nvvm.tex.3d.level.v4f32.f32",
  "llvm.nvvm.tex.3d.level.v4s32.f32",
  "llvm.nvvm.tex.3d.level.v4u32.f32",
  "llvm.nvvm.tex.3d.v4f32.f32",
  "llvm.nvvm.tex.3d.v4f32.s32",
  "llvm.nvvm.tex.3d.v4s32.f32",
  "llvm.nvvm.tex.3d.v4s32.s32",
  "llvm.nvvm.tex.3d.v4u32.f32",
  "llvm.nvvm.tex.3d.v4u32.s32",
  "llvm.nvvm.tex.cube.array.level.v4f32.f32",
  "llvm.nvvm.tex.cube.array.level.v4s32.f32",
  "llvm.nvvm.tex.cube.array.level.v4u32.f32",
  "llvm.nvvm.tex.cube.array.v4f32.f32",
  "llvm.nvvm.tex.cube.array.v4s32.f32",
  "llvm.nvvm.tex.cube.array.v4u32.f32",
  "llvm.nvvm.tex.cube.level.v4f32.f32",
  "llvm.nvvm.tex.cube.level.v4s32.f32",
  "llvm.nvvm.tex.cube.level.v4u32.f32",
  "llvm.nvvm.tex.cube.v4f32.f32",
  "llvm.nvvm.tex.cube.v4s32.f32",
  "llvm.nvvm.tex.cube.v4u32.f32",
  "llvm.nvvm.tex.unified.1d.array.grad.v4f32.f32",
  "llvm.nvvm.tex.unified.1d.array.grad.v4s32.f32",
  "llvm.nvvm.tex.unified.1d.array.grad.v4u32.f32",
  "llvm.nvvm.tex.unified.1d.array.level.v4f32.f32",
  "llvm.nvvm.tex.unified.1d.array.level.v4s32.f32",
  "llvm.nvvm.tex.unified.1d.array.level.v4u32.f32",
  "llvm.nvvm.tex.unified.1d.array.v4f32.f32",
  "llvm.nvvm.tex.unified.1d.array.v4f32.s32",
  "llvm.nvvm.tex.unified.1d.array.v4s32.f32",
  "llvm.nvvm.tex.unified.1d.array.v4s32.s32",
  "llvm.nvvm.tex.unified.1d.array.v4u32.f32",
  "llvm.nvvm.tex.unified.1d.array.v4u32.s32",
  "llvm.nvvm.tex.unified.1d.grad.v4f32.f32",
  "llvm.nvvm.tex.unified.1d.grad.v4s32.f32",
  "llvm.nvvm.tex.unified.1d.grad.v4u32.f32",
  "llvm.nvvm.tex.unified.1d.level.v4f32.f32",
  "llvm.nvvm.tex.unified.1d.level.v4s32.f32",
  "llvm.nvvm.tex.unified.1d.level.v4u32.f32",
  "llvm.nvvm.tex.unified.1d.v4f32.f32",
  "llvm.nvvm.tex.unified.1d.v4f32.s32",
  "llvm.nvvm.tex.unified.1d.v4s32.f32",
  "llvm.nvvm.tex.unified.1d.v4s32.s32",
  "llvm.nvvm.tex.unified.1d.v4u32.f32",
  "llvm.nvvm.tex.unified.1d.v4u32.s32",
  "llvm.nvvm.tex.unified.2d.array.grad.v4f32.f32",
  "llvm.nvvm.tex.unified.2d.array.grad.v4s32.f32",
  "llvm.nvvm.tex.unified.2d.array.grad.v4u32.f32",
  "llvm.nvvm.tex.unified.2d.array.level.v4f32.f32",
  "llvm.nvvm.tex.unified.2d.array.level.v4s32.f32",
  "llvm.nvvm.tex.unified.2d.array.level.v4u32.f32",
  "llvm.nvvm.tex.unified.2d.array.v4f32.f32",
  "llvm.nvvm.tex.unified.2d.array.v4f32.s32",
  "llvm.nvvm.tex.unified.2d.array.v4s32.f32",
  "llvm.nvvm.tex.unified.2d.array.v4s32.s32",
  "llvm.nvvm.tex.unified.2d.array.v4u32.f32",
  "llvm.nvvm.tex.unified.2d.array.v4u32.s32",
  "llvm.nvvm.tex.unified.2d.grad.v4f32.f32",
  "llvm.nvvm.tex.unified.2d.grad.v4s32.f32",
  "llvm.nvvm.tex.unified.2d.grad.v4u32.f32",
  "llvm.nvvm.tex.unified.2d.level.v4f32.f32",
  "llvm.nvvm.tex.unified.2d.level.v4s32.f32",
  "llvm.nvvm.tex.unified.2d.level.v4u32.f32",
  "llvm.nvvm.tex.unified.2d.v4f32.f32",
  "llvm.nvvm.tex.unified.2d.v4f32.s32",
  "llvm.nvvm.tex.unified.2d.v4s32.f32",
  "llvm.nvvm.tex.unified.2d.v4s32.s32",
  "llvm.nvvm.tex.unified.2d.v4u32.f32",
  "llvm.nvvm.tex.unified.2d.v4u32.s32",
  "llvm.nvvm.tex.unified.3d.grad.v4f32.f32",
  "llvm.nvvm.tex.unified.3d.grad.v4s32.f32",
  "llvm.nvvm.tex.unified.3d.grad.v4u32.f32",
  "llvm.nvvm.tex.unified.3d.level.v4f32.f32",
  "llvm.nvvm.tex.unified.3d.level.v4s32.f32",
  "llvm.nvvm.tex.unified.3d.level.v4u32.f32",
  "llvm.nvvm.tex.unified.3d.v4f32.f32",
  "llvm.nvvm.tex.unified.3d.v4f32.s32",
  "llvm.nvvm.tex.unified.3d.v4s32.f32",
  "llvm.nvvm.tex.unified.3d.v4s32.s32",
  "llvm.nvvm.tex.unified.3d.v4u32.f32",
  "llvm.nvvm.tex.unified.3d.v4u32.s32",
  "llvm.nvvm.tex.unified.cube.array.level.v4f32.f32",
  "llvm.nvvm.tex.unified.cube.array.level.v4s32.f32",
  "llvm.nvvm.tex.unified.cube.array.level.v4u32.f32",
  "llvm.nvvm.tex.unified.cube.array.v4f32.f32",
  "llvm.nvvm.tex.unified.cube.array.v4s32.f32",
  "llvm.nvvm.tex.unified.cube.array.v4u32.f32",
  "llvm.nvvm.tex.unified.cube.level.v4f32.f32",
  "llvm.nvvm.tex.unified.cube.level.v4s32.f32",
  "llvm.nvvm.tex.unified.cube.level.v4u32.f32",
  "llvm.nvvm.tex.unified.cube.v4f32.f32",
  "llvm.nvvm.tex.unified.cube.v4s32.f32",
  "llvm.nvvm.tex.unified.cube.v4u32.f32",
  "llvm.nvvm.texsurf.handle",
  "llvm.nvvm.texsurf.handle.internal",
  "llvm.nvvm.tld4.a.2d.v4f32.f32",
  "llvm.nvvm.tld4.a.2d.v4s32.f32",
  "llvm.nvvm.tld4.a.2d.v4u32.f32",
  "llvm.nvvm.tld4.b.2d.v4f32.f32",
  "llvm.nvvm.tld4.b.2d.v4s32.f32",
  "llvm.nvvm.tld4.b.2d.v4u32.f32",
  "llvm.nvvm.tld4.g.2d.v4f32.f32",
  "llvm.nvvm.tld4.g.2d.v4s32.f32",
  "llvm.nvvm.tld4.g.2d.v4u32.f32",
  "llvm.nvvm.tld4.r.2d.v4f32.f32",
  "llvm.nvvm.tld4.r.2d.v4s32.f32",
  "llvm.nvvm.tld4.r.2d.v4u32.f32",
  "llvm.nvvm.tld4.unified.a.2d.v4f32.f32",
  "llvm.nvvm.tld4.unified.a.2d.v4s32.f32",
  "llvm.nvvm.tld4.unified.a.2d.v4u32.f32",
  "llvm.nvvm.tld4.unified.b.2d.v4f32.f32",
  "llvm.nvvm.tld4.unified.b.2d.v4s32.f32",
  "llvm.nvvm.tld4.unified.b.2d.v4u32.f32",
  "llvm.nvvm.tld4.unified.g.2d.v4f32.f32",
  "llvm.nvvm.tld4.unified.g.2d.v4s32.f32",
  "llvm.nvvm.tld4.unified.g.2d.v4u32.f32",
  "llvm.nvvm.tld4.unified.r.2d.v4f32.f32",
  "llvm.nvvm.tld4.unified.r.2d.v4s32.f32",
  "llvm.nvvm.tld4.unified.r.2d.v4u32.f32",
  "llvm.nvvm.trunc.d",
  "llvm.nvvm.trunc.f",
  "llvm.nvvm.trunc.ftz.f",
  "llvm.nvvm.txq.array.size",
  "llvm.nvvm.txq.channel.data.type",
  "llvm.nvvm.txq.channel.order",
  "llvm.nvvm.txq.depth",
  "llvm.nvvm.txq.height",
  "llvm.nvvm.txq.num.mipmap.levels",
  "llvm.nvvm.txq.num.samples",
  "llvm.nvvm.txq.width",
  "llvm.nvvm.ui2d.rm",
  "llvm.nvvm.ui2d.rn",
  "llvm.nvvm.ui2d.rp",
  "llvm.nvvm.ui2d.rz",
  "llvm.nvvm.ui2f.rm",
  "llvm.nvvm.ui2f.rn",
  "llvm.nvvm.ui2f.rp",
  "llvm.nvvm.ui2f.rz",
  "llvm.nvvm.ull2d.rm",
  "llvm.nvvm.ull2d.rn",
  "llvm.nvvm.ull2d.rp",
  "llvm.nvvm.ull2d.rz",
  "llvm.nvvm.ull2f.rm",
  "llvm.nvvm.ull2f.rn",
  "llvm.nvvm.ull2f.rp",
  "llvm.nvvm.ull2f.rz",
  "llvm.nvvm.vote.all",
  "llvm.nvvm.vote.all.sync",
  "llvm.nvvm.vote.any",
  "llvm.nvvm.vote.any.sync",
  "llvm.nvvm.vote.ballot",
  "llvm.nvvm.vote.ballot.sync",
  "llvm.nvvm.vote.uni",
  "llvm.nvvm.vote.uni.sync",
  "llvm.nvvm.wmma.m16n16k16.load.a.col.bf16",
  "llvm.nvvm.wmma.m16n16k16.load.a.col.f16",
  "llvm.nvvm.wmma.m16n16k16.load.a.col.s8",
  "llvm.nvvm.wmma.m16n16k16.load.a.col.stride.bf16",
  "llvm.nvvm.wmma.m16n16k16.load.a.col.stride.f16",
  "llvm.nvvm.wmma.m16n16k16.load.a.col.stride.s8",
  "llvm.nvvm.wmma.m16n16k16.load.a.col.stride.u8",
  "llvm.nvvm.wmma.m16n16k16.load.a.col.u8",
  "llvm.nvvm.wmma.m16n16k16.load.a.row.bf16",
  "llvm.nvvm.wmma.m16n16k16.load.a.row.f16",
  "llvm.nvvm.wmma.m16n16k16.load.a.row.s8",
  "llvm.nvvm.wmma.m16n16k16.load.a.row.stride.bf16",
  "llvm.nvvm.wmma.m16n16k16.load.a.row.stride.f16",
  "llvm.nvvm.wmma.m16n16k16.load.a.row.stride.s8",
  "llvm.nvvm.wmma.m16n16k16.load.a.row.stride.u8",
  "llvm.nvvm.wmma.m16n16k16.load.a.row.u8",
  "llvm.nvvm.wmma.m16n16k16.load.b.col.bf16",
  "llvm.nvvm.wmma.m16n16k16.load.b.col.f16",
  "llvm.nvvm.wmma.m16n16k16.load.b.col.s8",
  "llvm.nvvm.wmma.m16n16k16.load.b.col.stride.bf16",
  "llvm.nvvm.wmma.m16n16k16.load.b.col.stride.f16",
  "llvm.nvvm.wmma.m16n16k16.load.b.col.stride.s8",
  "llvm.nvvm.wmma.m16n16k16.load.b.col.stride.u8",
  "llvm.nvvm.wmma.m16n16k16.load.b.col.u8",
  "llvm.nvvm.wmma.m16n16k16.load.b.row.bf16",
  "llvm.nvvm.wmma.m16n16k16.load.b.row.f16",
  "llvm.nvvm.wmma.m16n16k16.load.b.row.s8",
  "llvm.nvvm.wmma.m16n16k16.load.b.row.stride.bf16",
  "llvm.nvvm.wmma.m16n16k16.load.b.row.stride.f16",
  "llvm.nvvm.wmma.m16n16k16.load.b.row.stride.s8",
  "llvm.nvvm.wmma.m16n16k16.load.b.row.stride.u8",
  "llvm.nvvm.wmma.m16n16k16.load.b.row.u8",
  "llvm.nvvm.wmma.m16n16k16.load.c.col.f16",
  "llvm.nvvm.wmma.m16n16k16.load.c.col.f32",
  "llvm.nvvm.wmma.m16n16k16.load.c.col.s32",
  "llvm.nvvm.wmma.m16n16k16.load.c.col.stride.f16",
  "llvm.nvvm.wmma.m16n16k16.load.c.col.stride.f32",
  "llvm.nvvm.wmma.m16n16k16.load.c.col.stride.s32",
  "llvm.nvvm.wmma.m16n16k16.load.c.row.f16",
  "llvm.nvvm.wmma.m16n16k16.load.c.row.f32",
  "llvm.nvvm.wmma.m16n16k16.load.c.row.s32",
  "llvm.nvvm.wmma.m16n16k16.load.c.row.stride.f16",
  "llvm.nvvm.wmma.m16n16k16.load.c.row.stride.f32",
  "llvm.nvvm.wmma.m16n16k16.load.c.row.stride.s32",
  "llvm.nvvm.wmma.m16n16k16.mma.col.col.bf16",
  "llvm.nvvm.wmma.m16n16k16.mma.col.col.f16.f16",
  "llvm.nvvm.wmma.m16n16k16.mma.col.col.f16.f16.satfinite",
  "llvm.nvvm.wmma.m16n16k16.mma.col.col.f16.f32",
  "llvm.nvvm.wmma.m16n16k16.mma.col.col.f16.f32.satfinite",
  "llvm.nvvm.wmma.m16n16k16.mma.col.col.f32.f16",
  "llvm.nvvm.wmma.m16n16k16.mma.col.col.f32.f16.satfinite",
  "llvm.nvvm.wmma.m16n16k16.mma.col.col.f32.f32",
  "llvm.nvvm.wmma.m16n16k16.mma.col.col.f32.f32.satfinite",
  "llvm.nvvm.wmma.m16n16k16.mma.col.col.s8",
  "llvm.nvvm.wmma.m16n16k16.mma.col.col.s8.satfinite",
  "llvm.nvvm.wmma.m16n16k16.mma.col.col.u8",
  "llvm.nvvm.wmma.m16n16k16.mma.col.col.u8.satfinite",
  "llvm.nvvm.wmma.m16n16k16.mma.col.row.bf16",
  "llvm.nvvm.wmma.m16n16k16.mma.col.row.f16.f16",
  "llvm.nvvm.wmma.m16n16k16.mma.col.row.f16.f16.satfinite",
  "llvm.nvvm.wmma.m16n16k16.mma.col.row.f16.f32",
  "llvm.nvvm.wmma.m16n16k16.mma.col.row.f16.f32.satfinite",
  "llvm.nvvm.wmma.m16n16k16.mma.col.row.f32.f16",
  "llvm.nvvm.wmma.m16n16k16.mma.col.row.f32.f16.satfinite",
  "llvm.nvvm.wmma.m16n16k16.mma.col.row.f32.f32",
  "llvm.nvvm.wmma.m16n16k16.mma.col.row.f32.f32.satfinite",
  "llvm.nvvm.wmma.m16n16k16.mma.col.row.s8",
  "llvm.nvvm.wmma.m16n16k16.mma.col.row.s8.satfinite",
  "llvm.nvvm.wmma.m16n16k16.mma.col.row.u8",
  "llvm.nvvm.wmma.m16n16k16.mma.col.row.u8.satfinite",
  "llvm.nvvm.wmma.m16n16k16.mma.row.col.bf16",
  "llvm.nvvm.wmma.m16n16k16.mma.row.col.f16.f16",
  "llvm.nvvm.wmma.m16n16k16.mma.row.col.f16.f16.satfinite",
  "llvm.nvvm.wmma.m16n16k16.mma.row.col.f16.f32",
  "llvm.nvvm.wmma.m16n16k16.mma.row.col.f16.f32.satfinite",
  "llvm.nvvm.wmma.m16n16k16.mma.row.col.f32.f16",
  "llvm.nvvm.wmma.m16n16k16.mma.row.col.f32.f16.satfinite",
  "llvm.nvvm.wmma.m16n16k16.mma.row.col.f32.f32",
  "llvm.nvvm.wmma.m16n16k16.mma.row.col.f32.f32.satfinite",
  "llvm.nvvm.wmma.m16n16k16.mma.row.col.s8",
  "llvm.nvvm.wmma.m16n16k16.mma.row.col.s8.satfinite",
  "llvm.nvvm.wmma.m16n16k16.mma.row.col.u8",
  "llvm.nvvm.wmma.m16n16k16.mma.row.col.u8.satfinite",
  "llvm.nvvm.wmma.m16n16k16.mma.row.row.bf16",
  "llvm.nvvm.wmma.m16n16k16.mma.row.row.f16.f16",
  "llvm.nvvm.wmma.m16n16k16.mma.row.row.f16.f16.satfinite",
  "llvm.nvvm.wmma.m16n16k16.mma.row.row.f16.f32",
  "llvm.nvvm.wmma.m16n16k16.mma.row.row.f16.f32.satfinite",
  "llvm.nvvm.wmma.m16n16k16.mma.row.row.f32.f16",
  "llvm.nvvm.wmma.m16n16k16.mma.row.row.f32.f16.satfinite",
  "llvm.nvvm.wmma.m16n16k16.mma.row.row.f32.f32",
  "llvm.nvvm.wmma.m16n16k16.mma.row.row.f32.f32.satfinite",
  "llvm.nvvm.wmma.m16n16k16.mma.row.row.s8",
  "llvm.nvvm.wmma.m16n16k16.mma.row.row.s8.satfinite",
  "llvm.nvvm.wmma.m16n16k16.mma.row.row.u8",
  "llvm.nvvm.wmma.m16n16k16.mma.row.row.u8.satfinite",
  "llvm.nvvm.wmma.m16n16k16.store.d.col.f16",
  "llvm.nvvm.wmma.m16n16k16.store.d.col.f32",
  "llvm.nvvm.wmma.m16n16k16.store.d.col.s32",
  "llvm.nvvm.wmma.m16n16k16.store.d.col.stride.f16",
  "llvm.nvvm.wmma.m16n16k16.store.d.col.stride.f32",
  "llvm.nvvm.wmma.m16n16k16.store.d.col.stride.s32",
  "llvm.nvvm.wmma.m16n16k16.store.d.row.f16",
  "llvm.nvvm.wmma.m16n16k16.store.d.row.f32",
  "llvm.nvvm.wmma.m16n16k16.store.d.row.s32",
  "llvm.nvvm.wmma.m16n16k16.store.d.row.stride.f16",
  "llvm.nvvm.wmma.m16n16k16.store.d.row.stride.f32",
  "llvm.nvvm.wmma.m16n16k16.store.d.row.stride.s32",
  "llvm.nvvm.wmma.m16n16k8.load.a.col.stride.tf32",
  "llvm.nvvm.wmma.m16n16k8.load.a.col.tf32",
  "llvm.nvvm.wmma.m16n16k8.load.a.row.stride.tf32",
  "llvm.nvvm.wmma.m16n16k8.load.a.row.tf32",
  "llvm.nvvm.wmma.m16n16k8.load.b.col.stride.tf32",
  "llvm.nvvm.wmma.m16n16k8.load.b.col.tf32",
  "llvm.nvvm.wmma.m16n16k8.load.b.row.stride.tf32",
  "llvm.nvvm.wmma.m16n16k8.load.b.row.tf32",
  "llvm.nvvm.wmma.m16n16k8.load.c.col.f32",
  "llvm.nvvm.wmma.m16n16k8.load.c.col.stride.f32",
  "llvm.nvvm.wmma.m16n16k8.load.c.row.f32",
  "llvm.nvvm.wmma.m16n16k8.load.c.row.stride.f32",
  "llvm.nvvm.wmma.m16n16k8.mma.col.col.tf32",
  "llvm.nvvm.wmma.m16n16k8.mma.col.row.tf32",
  "llvm.nvvm.wmma.m16n16k8.mma.row.col.tf32",
  "llvm.nvvm.wmma.m16n16k8.mma.row.row.tf32",
  "llvm.nvvm.wmma.m16n16k8.store.d.col.f32",
  "llvm.nvvm.wmma.m16n16k8.store.d.col.stride.f32",
  "llvm.nvvm.wmma.m16n16k8.store.d.row.f32",
  "llvm.nvvm.wmma.m16n16k8.store.d.row.stride.f32",
  "llvm.nvvm.wmma.m32n8k16.load.a.col.bf16",
  "llvm.nvvm.wmma.m32n8k16.load.a.col.f16",
  "llvm.nvvm.wmma.m32n8k16.load.a.col.s8",
  "llvm.nvvm.wmma.m32n8k16.load.a.col.stride.bf16",
  "llvm.nvvm.wmma.m32n8k16.load.a.col.stride.f16",
  "llvm.nvvm.wmma.m32n8k16.load.a.col.stride.s8",
  "llvm.nvvm.wmma.m32n8k16.load.a.col.stride.u8",
  "llvm.nvvm.wmma.m32n8k16.load.a.col.u8",
  "llvm.nvvm.wmma.m32n8k16.load.a.row.bf16",
  "llvm.nvvm.wmma.m32n8k16.load.a.row.f16",
  "llvm.nvvm.wmma.m32n8k16.load.a.row.s8",
  "llvm.nvvm.wmma.m32n8k16.load.a.row.stride.bf16",
  "llvm.nvvm.wmma.m32n8k16.load.a.row.stride.f16",
  "llvm.nvvm.wmma.m32n8k16.load.a.row.stride.s8",
  "llvm.nvvm.wmma.m32n8k16.load.a.row.stride.u8",
  "llvm.nvvm.wmma.m32n8k16.load.a.row.u8",
  "llvm.nvvm.wmma.m32n8k16.load.b.col.bf16",
  "llvm.nvvm.wmma.m32n8k16.load.b.col.f16",
  "llvm.nvvm.wmma.m32n8k16.load.b.col.s8",
  "llvm.nvvm.wmma.m32n8k16.load.b.col.stride.bf16",
  "llvm.nvvm.wmma.m32n8k16.load.b.col.stride.f16",
  "llvm.nvvm.wmma.m32n8k16.load.b.col.stride.s8",
  "llvm.nvvm.wmma.m32n8k16.load.b.col.stride.u8",
  "llvm.nvvm.wmma.m32n8k16.load.b.col.u8",
  "llvm.nvvm.wmma.m32n8k16.load.b.row.bf16",
  "llvm.nvvm.wmma.m32n8k16.load.b.row.f16",
  "llvm.nvvm.wmma.m32n8k16.load.b.row.s8",
  "llvm.nvvm.wmma.m32n8k16.load.b.row.stride.bf16",
  "llvm.nvvm.wmma.m32n8k16.load.b.row.stride.f16",
  "llvm.nvvm.wmma.m32n8k16.load.b.row.stride.s8",
  "llvm.nvvm.wmma.m32n8k16.load.b.row.stride.u8",
  "llvm.nvvm.wmma.m32n8k16.load.b.row.u8",
  "llvm.nvvm.wmma.m32n8k16.load.c.col.f16",
  "llvm.nvvm.wmma.m32n8k16.load.c.col.f32",
  "llvm.nvvm.wmma.m32n8k16.load.c.col.s32",
  "llvm.nvvm.wmma.m32n8k16.load.c.col.stride.f16",
  "llvm.nvvm.wmma.m32n8k16.load.c.col.stride.f32",
  "llvm.nvvm.wmma.m32n8k16.load.c.col.stride.s32",
  "llvm.nvvm.wmma.m32n8k16.load.c.row.f16",
  "llvm.nvvm.wmma.m32n8k16.load.c.row.f32",
  "llvm.nvvm.wmma.m32n8k16.load.c.row.s32",
  "llvm.nvvm.wmma.m32n8k16.load.c.row.stride.f16",
  "llvm.nvvm.wmma.m32n8k16.load.c.row.stride.f32",
  "llvm.nvvm.wmma.m32n8k16.load.c.row.stride.s32",
  "llvm.nvvm.wmma.m32n8k16.mma.col.col.bf16",
  "llvm.nvvm.wmma.m32n8k16.mma.col.col.f16.f16",
  "llvm.nvvm.wmma.m32n8k16.mma.col.col.f16.f16.satfinite",
  "llvm.nvvm.wmma.m32n8k16.mma.col.col.f16.f32",
  "llvm.nvvm.wmma.m32n8k16.mma.col.col.f16.f32.satfinite",
  "llvm.nvvm.wmma.m32n8k16.mma.col.col.f32.f16",
  "llvm.nvvm.wmma.m32n8k16.mma.col.col.f32.f16.satfinite",
  "llvm.nvvm.wmma.m32n8k16.mma.col.col.f32.f32",
  "llvm.nvvm.wmma.m32n8k16.mma.col.col.f32.f32.satfinite",
  "llvm.nvvm.wmma.m32n8k16.mma.col.col.s8",
  "llvm.nvvm.wmma.m32n8k16.mma.col.col.s8.satfinite",
  "llvm.nvvm.wmma.m32n8k16.mma.col.col.u8",
  "llvm.nvvm.wmma.m32n8k16.mma.col.col.u8.satfinite",
  "llvm.nvvm.wmma.m32n8k16.mma.col.row.bf16",
  "llvm.nvvm.wmma.m32n8k16.mma.col.row.f16.f16",
  "llvm.nvvm.wmma.m32n8k16.mma.col.row.f16.f16.satfinite",
  "llvm.nvvm.wmma.m32n8k16.mma.col.row.f16.f32",
  "llvm.nvvm.wmma.m32n8k16.mma.col.row.f16.f32.satfinite",
  "llvm.nvvm.wmma.m32n8k16.mma.col.row.f32.f16",
  "llvm.nvvm.wmma.m32n8k16.mma.col.row.f32.f16.satfinite",
  "llvm.nvvm.wmma.m32n8k16.mma.col.row.f32.f32",
  "llvm.nvvm.wmma.m32n8k16.mma.col.row.f32.f32.satfinite",
  "llvm.nvvm.wmma.m32n8k16.mma.col.row.s8",
  "llvm.nvvm.wmma.m32n8k16.mma.col.row.s8.satfinite",
  "llvm.nvvm.wmma.m32n8k16.mma.col.row.u8",
  "llvm.nvvm.wmma.m32n8k16.mma.col.row.u8.satfinite",
  "llvm.nvvm.wmma.m32n8k16.mma.row.col.bf16",
  "llvm.nvvm.wmma.m32n8k16.mma.row.col.f16.f16",
  "llvm.nvvm.wmma.m32n8k16.mma.row.col.f16.f16.satfinite",
  "llvm.nvvm.wmma.m32n8k16.mma.row.col.f16.f32",
  "llvm.nvvm.wmma.m32n8k16.mma.row.col.f16.f32.satfinite",
  "llvm.nvvm.wmma.m32n8k16.mma.row.col.f32.f16",
  "llvm.nvvm.wmma.m32n8k16.mma.row.col.f32.f16.satfinite",
  "llvm.nvvm.wmma.m32n8k16.mma.row.col.f32.f32",
  "llvm.nvvm.wmma.m32n8k16.mma.row.col.f32.f32.satfinite",
  "llvm.nvvm.wmma.m32n8k16.mma.row.col.s8",
  "llvm.nvvm.wmma.m32n8k16.mma.row.col.s8.satfinite",
  "llvm.nvvm.wmma.m32n8k16.mma.row.col.u8",
  "llvm.nvvm.wmma.m32n8k16.mma.row.col.u8.satfinite",
  "llvm.nvvm.wmma.m32n8k16.mma.row.row.bf16",
  "llvm.nvvm.wmma.m32n8k16.mma.row.row.f16.f16",
  "llvm.nvvm.wmma.m32n8k16.mma.row.row.f16.f16.satfinite",
  "llvm.nvvm.wmma.m32n8k16.mma.row.row.f16.f32",
  "llvm.nvvm.wmma.m32n8k16.mma.row.row.f16.f32.satfinite",
  "llvm.nvvm.wmma.m32n8k16.mma.row.row.f32.f16",
  "llvm.nvvm.wmma.m32n8k16.mma.row.row.f32.f16.satfinite",
  "llvm.nvvm.wmma.m32n8k16.mma.row.row.f32.f32",
  "llvm.nvvm.wmma.m32n8k16.mma.row.row.f32.f32.satfinite",
  "llvm.nvvm.wmma.m32n8k16.mma.row.row.s8",
  "llvm.nvvm.wmma.m32n8k16.mma.row.row.s8.satfinite",
  "llvm.nvvm.wmma.m32n8k16.mma.row.row.u8",
  "llvm.nvvm.wmma.m32n8k16.mma.row.row.u8.satfinite",
  "llvm.nvvm.wmma.m32n8k16.store.d.col.f16",
  "llvm.nvvm.wmma.m32n8k16.store.d.col.f32",
  "llvm.nvvm.wmma.m32n8k16.store.d.col.s32",
  "llvm.nvvm.wmma.m32n8k16.store.d.col.stride.f16",
  "llvm.nvvm.wmma.m32n8k16.store.d.col.stride.f32",
  "llvm.nvvm.wmma.m32n8k16.store.d.col.stride.s32",
  "llvm.nvvm.wmma.m32n8k16.store.d.row.f16",
  "llvm.nvvm.wmma.m32n8k16.store.d.row.f32",
  "llvm.nvvm.wmma.m32n8k16.store.d.row.s32",
  "llvm.nvvm.wmma.m32n8k16.store.d.row.stride.f16",
  "llvm.nvvm.wmma.m32n8k16.store.d.row.stride.f32",
  "llvm.nvvm.wmma.m32n8k16.store.d.row.stride.s32",
  "llvm.nvvm.wmma.m8n32k16.load.a.col.bf16",
  "llvm.nvvm.wmma.m8n32k16.load.a.col.f16",
  "llvm.nvvm.wmma.m8n32k16.load.a.col.s8",
  "llvm.nvvm.wmma.m8n32k16.load.a.col.stride.bf16",
  "llvm.nvvm.wmma.m8n32k16.load.a.col.stride.f16",
  "llvm.nvvm.wmma.m8n32k16.load.a.col.stride.s8",
  "llvm.nvvm.wmma.m8n32k16.load.a.col.stride.u8",
  "llvm.nvvm.wmma.m8n32k16.load.a.col.u8",
  "llvm.nvvm.wmma.m8n32k16.load.a.row.bf16",
  "llvm.nvvm.wmma.m8n32k16.load.a.row.f16",
  "llvm.nvvm.wmma.m8n32k16.load.a.row.s8",
  "llvm.nvvm.wmma.m8n32k16.load.a.row.stride.bf16",
  "llvm.nvvm.wmma.m8n32k16.load.a.row.stride.f16",
  "llvm.nvvm.wmma.m8n32k16.load.a.row.stride.s8",
  "llvm.nvvm.wmma.m8n32k16.load.a.row.stride.u8",
  "llvm.nvvm.wmma.m8n32k16.load.a.row.u8",
  "llvm.nvvm.wmma.m8n32k16.load.b.col.bf16",
  "llvm.nvvm.wmma.m8n32k16.load.b.col.f16",
  "llvm.nvvm.wmma.m8n32k16.load.b.col.s8",
  "llvm.nvvm.wmma.m8n32k16.load.b.col.stride.bf16",
  "llvm.nvvm.wmma.m8n32k16.load.b.col.stride.f16",
  "llvm.nvvm.wmma.m8n32k16.load.b.col.stride.s8",
  "llvm.nvvm.wmma.m8n32k16.load.b.col.stride.u8",
  "llvm.nvvm.wmma.m8n32k16.load.b.col.u8",
  "llvm.nvvm.wmma.m8n32k16.load.b.row.bf16",
  "llvm.nvvm.wmma.m8n32k16.load.b.row.f16",
  "llvm.nvvm.wmma.m8n32k16.load.b.row.s8",
  "llvm.nvvm.wmma.m8n32k16.load.b.row.stride.bf16",
  "llvm.nvvm.wmma.m8n32k16.load.b.row.stride.f16",
  "llvm.nvvm.wmma.m8n32k16.load.b.row.stride.s8",
  "llvm.nvvm.wmma.m8n32k16.load.b.row.stride.u8",
  "llvm.nvvm.wmma.m8n32k16.load.b.row.u8",
  "llvm.nvvm.wmma.m8n32k16.load.c.col.f16",
  "llvm.nvvm.wmma.m8n32k16.load.c.col.f32",
  "llvm.nvvm.wmma.m8n32k16.load.c.col.s32",
  "llvm.nvvm.wmma.m8n32k16.load.c.col.stride.f16",
  "llvm.nvvm.wmma.m8n32k16.load.c.col.stride.f32",
  "llvm.nvvm.wmma.m8n32k16.load.c.col.stride.s32",
  "llvm.nvvm.wmma.m8n32k16.load.c.row.f16",
  "llvm.nvvm.wmma.m8n32k16.load.c.row.f32",
  "llvm.nvvm.wmma.m8n32k16.load.c.row.s32",
  "llvm.nvvm.wmma.m8n32k16.load.c.row.stride.f16",
  "llvm.nvvm.wmma.m8n32k16.load.c.row.stride.f32",
  "llvm.nvvm.wmma.m8n32k16.load.c.row.stride.s32",
  "llvm.nvvm.wmma.m8n32k16.mma.col.col.bf16",
  "llvm.nvvm.wmma.m8n32k16.mma.col.col.f16.f16",
  "llvm.nvvm.wmma.m8n32k16.mma.col.col.f16.f16.satfinite",
  "llvm.nvvm.wmma.m8n32k16.mma.col.col.f16.f32",
  "llvm.nvvm.wmma.m8n32k16.mma.col.col.f16.f32.satfinite",
  "llvm.nvvm.wmma.m8n32k16.mma.col.col.f32.f16",
  "llvm.nvvm.wmma.m8n32k16.mma.col.col.f32.f16.satfinite",
  "llvm.nvvm.wmma.m8n32k16.mma.col.col.f32.f32",
  "llvm.nvvm.wmma.m8n32k16.mma.col.col.f32.f32.satfinite",
  "llvm.nvvm.wmma.m8n32k16.mma.col.col.s8",
  "llvm.nvvm.wmma.m8n32k16.mma.col.col.s8.satfinite",
  "llvm.nvvm.wmma.m8n32k16.mma.col.col.u8",
  "llvm.nvvm.wmma.m8n32k16.mma.col.col.u8.satfinite",
  "llvm.nvvm.wmma.m8n32k16.mma.col.row.bf16",
  "llvm.nvvm.wmma.m8n32k16.mma.col.row.f16.f16",
  "llvm.nvvm.wmma.m8n32k16.mma.col.row.f16.f16.satfinite",
  "llvm.nvvm.wmma.m8n32k16.mma.col.row.f16.f32",
  "llvm.nvvm.wmma.m8n32k16.mma.col.row.f16.f32.satfinite",
  "llvm.nvvm.wmma.m8n32k16.mma.col.row.f32.f16",
  "llvm.nvvm.wmma.m8n32k16.mma.col.row.f32.f16.satfinite",
  "llvm.nvvm.wmma.m8n32k16.mma.col.row.f32.f32",
  "llvm.nvvm.wmma.m8n32k16.mma.col.row.f32.f32.satfinite",
  "llvm.nvvm.wmma.m8n32k16.mma.col.row.s8",
  "llvm.nvvm.wmma.m8n32k16.mma.col.row.s8.satfinite",
  "llvm.nvvm.wmma.m8n32k16.mma.col.row.u8",
  "llvm.nvvm.wmma.m8n32k16.mma.col.row.u8.satfinite",
  "llvm.nvvm.wmma.m8n32k16.mma.row.col.bf16",
  "llvm.nvvm.wmma.m8n32k16.mma.row.col.f16.f16",
  "llvm.nvvm.wmma.m8n32k16.mma.row.col.f16.f16.satfinite",
  "llvm.nvvm.wmma.m8n32k16.mma.row.col.f16.f32",
  "llvm.nvvm.wmma.m8n32k16.mma.row.col.f16.f32.satfinite",
  "llvm.nvvm.wmma.m8n32k16.mma.row.col.f32.f16",
  "llvm.nvvm.wmma.m8n32k16.mma.row.col.f32.f16.satfinite",
  "llvm.nvvm.wmma.m8n32k16.mma.row.col.f32.f32",
  "llvm.nvvm.wmma.m8n32k16.mma.row.col.f32.f32.satfinite",
  "llvm.nvvm.wmma.m8n32k16.mma.row.col.s8",
  "llvm.nvvm.wmma.m8n32k16.mma.row.col.s8.satfinite",
  "llvm.nvvm.wmma.m8n32k16.mma.row.col.u8",
  "llvm.nvvm.wmma.m8n32k16.mma.row.col.u8.satfinite",
  "llvm.nvvm.wmma.m8n32k16.mma.row.row.bf16",
  "llvm.nvvm.wmma.m8n32k16.mma.row.row.f16.f16",
  "llvm.nvvm.wmma.m8n32k16.mma.row.row.f16.f16.satfinite",
  "llvm.nvvm.wmma.m8n32k16.mma.row.row.f16.f32",
  "llvm.nvvm.wmma.m8n32k16.mma.row.row.f16.f32.satfinite",
  "llvm.nvvm.wmma.m8n32k16.mma.row.row.f32.f16",
  "llvm.nvvm.wmma.m8n32k16.mma.row.row.f32.f16.satfinite",
  "llvm.nvvm.wmma.m8n32k16.mma.row.row.f32.f32",
  "llvm.nvvm.wmma.m8n32k16.mma.row.row.f32.f32.satfinite",
  "llvm.nvvm.wmma.m8n32k16.mma.row.row.s8",
  "llvm.nvvm.wmma.m8n32k16.mma.row.row.s8.satfinite",
  "llvm.nvvm.wmma.m8n32k16.mma.row.row.u8",
  "llvm.nvvm.wmma.m8n32k16.mma.row.row.u8.satfinite",
  "llvm.nvvm.wmma.m8n32k16.store.d.col.f16",
  "llvm.nvvm.wmma.m8n32k16.store.d.col.f32",
  "llvm.nvvm.wmma.m8n32k16.store.d.col.s32",
  "llvm.nvvm.wmma.m8n32k16.store.d.col.stride.f16",
  "llvm.nvvm.wmma.m8n32k16.store.d.col.stride.f32",
  "llvm.nvvm.wmma.m8n32k16.store.d.col.stride.s32",
  "llvm.nvvm.wmma.m8n32k16.store.d.row.f16",
  "llvm.nvvm.wmma.m8n32k16.store.d.row.f32",
  "llvm.nvvm.wmma.m8n32k16.store.d.row.s32",
  "llvm.nvvm.wmma.m8n32k16.store.d.row.stride.f16",
  "llvm.nvvm.wmma.m8n32k16.store.d.row.stride.f32",
  "llvm.nvvm.wmma.m8n32k16.store.d.row.stride.s32",
  "llvm.nvvm.wmma.m8n8k128.load.a.row.b1",
  "llvm.nvvm.wmma.m8n8k128.load.a.row.stride.b1",
  "llvm.nvvm.wmma.m8n8k128.load.b.col.b1",
  "llvm.nvvm.wmma.m8n8k128.load.b.col.stride.b1",
  "llvm.nvvm.wmma.m8n8k128.load.c.col.s32",
  "llvm.nvvm.wmma.m8n8k128.load.c.col.stride.s32",
  "llvm.nvvm.wmma.m8n8k128.load.c.row.s32",
  "llvm.nvvm.wmma.m8n8k128.load.c.row.stride.s32",
  "llvm.nvvm.wmma.m8n8k128.mma.and.popc.row.col.b1",
  "llvm.nvvm.wmma.m8n8k128.mma.xor.popc.row.col.b1",
  "llvm.nvvm.wmma.m8n8k128.store.d.col.s32",
  "llvm.nvvm.wmma.m8n8k128.store.d.col.stride.s32",
  "llvm.nvvm.wmma.m8n8k128.store.d.row.s32",
  "llvm.nvvm.wmma.m8n8k128.store.d.row.stride.s32",
  "llvm.nvvm.wmma.m8n8k32.load.a.row.s4",
  "llvm.nvvm.wmma.m8n8k32.load.a.row.stride.s4",
  "llvm.nvvm.wmma.m8n8k32.load.a.row.stride.u4",
  "llvm.nvvm.wmma.m8n8k32.load.a.row.u4",
  "llvm.nvvm.wmma.m8n8k32.load.b.col.s4",
  "llvm.nvvm.wmma.m8n8k32.load.b.col.stride.s4",
  "llvm.nvvm.wmma.m8n8k32.load.b.col.stride.u4",
  "llvm.nvvm.wmma.m8n8k32.load.b.col.u4",
  "llvm.nvvm.wmma.m8n8k32.load.c.col.s32",
  "llvm.nvvm.wmma.m8n8k32.load.c.col.stride.s32",
  "llvm.nvvm.wmma.m8n8k32.load.c.row.s32",
  "llvm.nvvm.wmma.m8n8k32.load.c.row.stride.s32",
  "llvm.nvvm.wmma.m8n8k32.mma.row.col.s4",
  "llvm.nvvm.wmma.m8n8k32.mma.row.col.s4.satfinite",
  "llvm.nvvm.wmma.m8n8k32.mma.row.col.u4",
  "llvm.nvvm.wmma.m8n8k32.mma.row.col.u4.satfinite",
  "llvm.nvvm.wmma.m8n8k32.store.d.col.s32",
  "llvm.nvvm.wmma.m8n8k32.store.d.col.stride.s32",
  "llvm.nvvm.wmma.m8n8k32.store.d.row.s32",
  "llvm.nvvm.wmma.m8n8k32.store.d.row.stride.s32",
  "llvm.nvvm.wmma.m8n8k4.load.a.col.f64",
  "llvm.nvvm.wmma.m8n8k4.load.a.col.stride.f64",
  "llvm.nvvm.wmma.m8n8k4.load.a.row.f64",
  "llvm.nvvm.wmma.m8n8k4.load.a.row.stride.f64",
  "llvm.nvvm.wmma.m8n8k4.load.b.col.f64",
  "llvm.nvvm.wmma.m8n8k4.load.b.col.stride.f64",
  "llvm.nvvm.wmma.m8n8k4.load.b.row.f64",
  "llvm.nvvm.wmma.m8n8k4.load.b.row.stride.f64",
  "llvm.nvvm.wmma.m8n8k4.load.c.col.f64",
  "llvm.nvvm.wmma.m8n8k4.load.c.col.stride.f64",
  "llvm.nvvm.wmma.m8n8k4.load.c.row.f64",
  "llvm.nvvm.wmma.m8n8k4.load.c.row.stride.f64",
  "llvm.nvvm.wmma.m8n8k4.mma.col.col.f64",
  "llvm.nvvm.wmma.m8n8k4.mma.col.col.rm.f64",
  "llvm.nvvm.wmma.m8n8k4.mma.col.col.rn.f64",
  "llvm.nvvm.wmma.m8n8k4.mma.col.col.rp.f64",
  "llvm.nvvm.wmma.m8n8k4.mma.col.col.rz.f64",
  "llvm.nvvm.wmma.m8n8k4.mma.col.row.f64",
  "llvm.nvvm.wmma.m8n8k4.mma.col.row.rm.f64",
  "llvm.nvvm.wmma.m8n8k4.mma.col.row.rn.f64",
  "llvm.nvvm.wmma.m8n8k4.mma.col.row.rp.f64",
  "llvm.nvvm.wmma.m8n8k4.mma.col.row.rz.f64",
  "llvm.nvvm.wmma.m8n8k4.mma.row.col.f64",
  "llvm.nvvm.wmma.m8n8k4.mma.row.col.rm.f64",
  "llvm.nvvm.wmma.m8n8k4.mma.row.col.rn.f64",
  "llvm.nvvm.wmma.m8n8k4.mma.row.col.rp.f64",
  "llvm.nvvm.wmma.m8n8k4.mma.row.col.rz.f64",
  "llvm.nvvm.wmma.m8n8k4.mma.row.row.f64",
  "llvm.nvvm.wmma.m8n8k4.mma.row.row.rm.f64",
  "llvm.nvvm.wmma.m8n8k4.mma.row.row.rn.f64",
  "llvm.nvvm.wmma.m8n8k4.mma.row.row.rp.f64",
  "llvm.nvvm.wmma.m8n8k4.mma.row.row.rz.f64",
  "llvm.nvvm.wmma.m8n8k4.store.d.col.f64",
  "llvm.nvvm.wmma.m8n8k4.store.d.col.stride.f64",
  "llvm.nvvm.wmma.m8n8k4.store.d.row.f64",
  "llvm.nvvm.wmma.m8n8k4.store.d.row.stride.f64",
  "llvm.ppc.addex",
  "llvm.ppc.addf128.round.to.odd",
  "llvm.ppc.altivec.crypto.vcipher",
  "llvm.ppc.altivec.crypto.vcipherlast",
  "llvm.ppc.altivec.crypto.vncipher",
  "llvm.ppc.altivec.crypto.vncipherlast",
  "llvm.ppc.altivec.crypto.vpermxor",
  "llvm.ppc.altivec.crypto.vpermxor.be",
  "llvm.ppc.altivec.crypto.vpmsumb",
  "llvm.ppc.altivec.crypto.vpmsumd",
  "llvm.ppc.altivec.crypto.vpmsumh",
  "llvm.ppc.altivec.crypto.vpmsumw",
  "llvm.ppc.altivec.crypto.vsbox",
  "llvm.ppc.altivec.crypto.vshasigmad",
  "llvm.ppc.altivec.crypto.vshasigmaw",
  "llvm.ppc.altivec.dss",
  "llvm.ppc.altivec.dssall",
  "llvm.ppc.altivec.dst",
  "llvm.ppc.altivec.dstst",
  "llvm.ppc.altivec.dststt",
  "llvm.ppc.altivec.dstt",
  "llvm.ppc.altivec.lvebx",
  "llvm.ppc.altivec.lvehx",
  "llvm.ppc.altivec.lvewx",
  "llvm.ppc.altivec.lvsl",
  "llvm.ppc.altivec.lvsr",
  "llvm.ppc.altivec.lvx",
  "llvm.ppc.altivec.lvxl",
  "llvm.ppc.altivec.mfvscr",
  "llvm.ppc.altivec.mtvscr",
  "llvm.ppc.altivec.mtvsrbm",
  "llvm.ppc.altivec.mtvsrdm",
  "llvm.ppc.altivec.mtvsrhm",
  "llvm.ppc.altivec.mtvsrqm",
  "llvm.ppc.altivec.mtvsrwm",
  "llvm.ppc.altivec.stvebx",
  "llvm.ppc.altivec.stvehx",
  "llvm.ppc.altivec.stvewx",
  "llvm.ppc.altivec.stvx",
  "llvm.ppc.altivec.stvxl",
  "llvm.ppc.altivec.vabsdub",
  "llvm.ppc.altivec.vabsduh",
  "llvm.ppc.altivec.vabsduw",
  "llvm.ppc.altivec.vaddcuq",
  "llvm.ppc.altivec.vaddcuw",
  "llvm.ppc.altivec.vaddecuq",
  "llvm.ppc.altivec.vaddeuqm",
  "llvm.ppc.altivec.vaddsbs",
  "llvm.ppc.altivec.vaddshs",
  "llvm.ppc.altivec.vaddsws",
  "llvm.ppc.altivec.vaddubs",
  "llvm.ppc.altivec.vadduhs",
  "llvm.ppc.altivec.vadduws",
  "llvm.ppc.altivec.vavgsb",
  "llvm.ppc.altivec.vavgsh",
  "llvm.ppc.altivec.vavgsw",
  "llvm.ppc.altivec.vavgub",
  "llvm.ppc.altivec.vavguh",
  "llvm.ppc.altivec.vavguw",
  "llvm.ppc.altivec.vbpermd",
  "llvm.ppc.altivec.vbpermq",
  "llvm.ppc.altivec.vcfsx",
  "llvm.ppc.altivec.vcfuged",
  "llvm.ppc.altivec.vcfux",
  "llvm.ppc.altivec.vclrlb",
  "llvm.ppc.altivec.vclrrb",
  "llvm.ppc.altivec.vclzdm",
  "llvm.ppc.altivec.vclzlsbb",
  "llvm.ppc.altivec.vcmpbfp",
  "llvm.ppc.altivec.vcmpbfp.p",
  "llvm.ppc.altivec.vcmpeqfp",
  "llvm.ppc.altivec.vcmpeqfp.p",
  "llvm.ppc.altivec.vcmpequb",
  "llvm.ppc.altivec.vcmpequb.p",
  "llvm.ppc.altivec.vcmpequd",
  "llvm.ppc.altivec.vcmpequd.p",
  "llvm.ppc.altivec.vcmpequh",
  "llvm.ppc.altivec.vcmpequh.p",
  "llvm.ppc.altivec.vcmpequq",
  "llvm.ppc.altivec.vcmpequq.p",
  "llvm.ppc.altivec.vcmpequw",
  "llvm.ppc.altivec.vcmpequw.p",
  "llvm.ppc.altivec.vcmpgefp",
  "llvm.ppc.altivec.vcmpgefp.p",
  "llvm.ppc.altivec.vcmpgtfp",
  "llvm.ppc.altivec.vcmpgtfp.p",
  "llvm.ppc.altivec.vcmpgtsb",
  "llvm.ppc.altivec.vcmpgtsb.p",
  "llvm.ppc.altivec.vcmpgtsd",
  "llvm.ppc.altivec.vcmpgtsd.p",
  "llvm.ppc.altivec.vcmpgtsh",
  "llvm.ppc.altivec.vcmpgtsh.p",
  "llvm.ppc.altivec.vcmpgtsq",
  "llvm.ppc.altivec.vcmpgtsq.p",
  "llvm.ppc.altivec.vcmpgtsw",
  "llvm.ppc.altivec.vcmpgtsw.p",
  "llvm.ppc.altivec.vcmpgtub",
  "llvm.ppc.altivec.vcmpgtub.p",
  "llvm.ppc.altivec.vcmpgtud",
  "llvm.ppc.altivec.vcmpgtud.p",
  "llvm.ppc.altivec.vcmpgtuh",
  "llvm.ppc.altivec.vcmpgtuh.p",
  "llvm.ppc.altivec.vcmpgtuq",
  "llvm.ppc.altivec.vcmpgtuq.p",
  "llvm.ppc.altivec.vcmpgtuw",
  "llvm.ppc.altivec.vcmpgtuw.p",
  "llvm.ppc.altivec.vcmpneb",
  "llvm.ppc.altivec.vcmpneb.p",
  "llvm.ppc.altivec.vcmpneh",
  "llvm.ppc.altivec.vcmpneh.p",
  "llvm.ppc.altivec.vcmpnew",
  "llvm.ppc.altivec.vcmpnew.p",
  "llvm.ppc.altivec.vcmpnezb",
  "llvm.ppc.altivec.vcmpnezb.p",
  "llvm.ppc.altivec.vcmpnezh",
  "llvm.ppc.altivec.vcmpnezh.p",
  "llvm.ppc.altivec.vcmpnezw",
  "llvm.ppc.altivec.vcmpnezw.p",
  "llvm.ppc.altivec.vcntmbb",
  "llvm.ppc.altivec.vcntmbd",
  "llvm.ppc.altivec.vcntmbh",
  "llvm.ppc.altivec.vcntmbw",
  "llvm.ppc.altivec.vctsxs",
  "llvm.ppc.altivec.vctuxs",
  "llvm.ppc.altivec.vctzdm",
  "llvm.ppc.altivec.vctzlsbb",
  "llvm.ppc.altivec.vdivesd",
  "llvm.ppc.altivec.vdivesq",
  "llvm.ppc.altivec.vdivesw",
  "llvm.ppc.altivec.vdiveud",
  "llvm.ppc.altivec.vdiveuq",
  "llvm.ppc.altivec.vdiveuw",
  "llvm.ppc.altivec.vexpandbm",
  "llvm.ppc.altivec.vexpanddm",
  "llvm.ppc.altivec.vexpandhm",
  "llvm.ppc.altivec.vexpandqm",
  "llvm.ppc.altivec.vexpandwm",
  "llvm.ppc.altivec.vexptefp",
  "llvm.ppc.altivec.vextddvlx",
  "llvm.ppc.altivec.vextddvrx",
  "llvm.ppc.altivec.vextdubvlx",
  "llvm.ppc.altivec.vextdubvrx",
  "llvm.ppc.altivec.vextduhvlx",
  "llvm.ppc.altivec.vextduhvrx",
  "llvm.ppc.altivec.vextduwvlx",
  "llvm.ppc.altivec.vextduwvrx",
  "llvm.ppc.altivec.vextractbm",
  "llvm.ppc.altivec.vextractdm",
  "llvm.ppc.altivec.vextracthm",
  "llvm.ppc.altivec.vextractqm",
  "llvm.ppc.altivec.vextractwm",
  "llvm.ppc.altivec.vextsb2d",
  "llvm.ppc.altivec.vextsb2w",
  "llvm.ppc.altivec.vextsd2q",
  "llvm.ppc.altivec.vextsh2d",
  "llvm.ppc.altivec.vextsh2w",
  "llvm.ppc.altivec.vextsw2d",
  "llvm.ppc.altivec.vgbbd",
  "llvm.ppc.altivec.vgnb",
  "llvm.ppc.altivec.vinsblx",
  "llvm.ppc.altivec.vinsbrx",
  "llvm.ppc.altivec.vinsbvlx",
  "llvm.ppc.altivec.vinsbvrx",
  "llvm.ppc.altivec.vinsd",
  "llvm.ppc.altivec.vinsdlx",
  "llvm.ppc.altivec.vinsdrx",
  "llvm.ppc.altivec.vinshlx",
  "llvm.ppc.altivec.vinshrx",
  "llvm.ppc.altivec.vinshvlx",
  "llvm.ppc.altivec.vinshvrx",
  "llvm.ppc.altivec.vinsw",
  "llvm.ppc.altivec.vinswlx",
  "llvm.ppc.altivec.vinswrx",
  "llvm.ppc.altivec.vinswvlx",
  "llvm.ppc.altivec.vinswvrx",
  "llvm.ppc.altivec.vlogefp",
  "llvm.ppc.altivec.vmaddfp",
  "llvm.ppc.altivec.vmaxfp",
  "llvm.ppc.altivec.vmaxsb",
  "llvm.ppc.altivec.vmaxsd",
  "llvm.ppc.altivec.vmaxsh",
  "llvm.ppc.altivec.vmaxsw",
  "llvm.ppc.altivec.vmaxub",
  "llvm.ppc.altivec.vmaxud",
  "llvm.ppc.altivec.vmaxuh",
  "llvm.ppc.altivec.vmaxuw",
  "llvm.ppc.altivec.vmhaddshs",
  "llvm.ppc.altivec.vmhraddshs",
  "llvm.ppc.altivec.vminfp",
  "llvm.ppc.altivec.vminsb",
  "llvm.ppc.altivec.vminsd",
  "llvm.ppc.altivec.vminsh",
  "llvm.ppc.altivec.vminsw",
  "llvm.ppc.altivec.vminub",
  "llvm.ppc.altivec.vminud",
  "llvm.ppc.altivec.vminuh",
  "llvm.ppc.altivec.vminuw",
  "llvm.ppc.altivec.vmladduhm",
  "llvm.ppc.altivec.vmsumcud",
  "llvm.ppc.altivec.vmsummbm",
  "llvm.ppc.altivec.vmsumshm",
  "llvm.ppc.altivec.vmsumshs",
  "llvm.ppc.altivec.vmsumubm",
  "llvm.ppc.altivec.vmsumudm",
  "llvm.ppc.altivec.vmsumuhm",
  "llvm.ppc.altivec.vmsumuhs",
  "llvm.ppc.altivec.vmulesb",
  "llvm.ppc.altivec.vmulesd",
  "llvm.ppc.altivec.vmulesh",
  "llvm.ppc.altivec.vmulesw",
  "llvm.ppc.altivec.vmuleub",
  "llvm.ppc.altivec.vmuleud",
  "llvm.ppc.altivec.vmuleuh",
  "llvm.ppc.altivec.vmuleuw",
  "llvm.ppc.altivec.vmulhsd",
  "llvm.ppc.altivec.vmulhsw",
  "llvm.ppc.altivec.vmulhud",
  "llvm.ppc.altivec.vmulhuw",
  "llvm.ppc.altivec.vmulosb",
  "llvm.ppc.altivec.vmulosd",
  "llvm.ppc.altivec.vmulosh",
  "llvm.ppc.altivec.vmulosw",
  "llvm.ppc.altivec.vmuloub",
  "llvm.ppc.altivec.vmuloud",
  "llvm.ppc.altivec.vmulouh",
  "llvm.ppc.altivec.vmulouw",
  "llvm.ppc.altivec.vnmsubfp",
  "llvm.ppc.altivec.vpdepd",
  "llvm.ppc.altivec.vperm",
  "llvm.ppc.altivec.vpextd",
  "llvm.ppc.altivec.vpkpx",
  "llvm.ppc.altivec.vpksdss",
  "llvm.ppc.altivec.vpksdus",
  "llvm.ppc.altivec.vpkshss",
  "llvm.ppc.altivec.vpkshus",
  "llvm.ppc.altivec.vpkswss",
  "llvm.ppc.altivec.vpkswus",
  "llvm.ppc.altivec.vpkudus",
  "llvm.ppc.altivec.vpkuhus",
  "llvm.ppc.altivec.vpkuwus",
  "llvm.ppc.altivec.vprtybd",
  "llvm.ppc.altivec.vprtybq",
  "llvm.ppc.altivec.vprtybw",
  "llvm.ppc.altivec.vrefp",
  "llvm.ppc.altivec.vrfim",
  "llvm.ppc.altivec.vrfin",
  "llvm.ppc.altivec.vrfip",
  "llvm.ppc.altivec.vrfiz",
  "llvm.ppc.altivec.vrlb",
  "llvm.ppc.altivec.vrld",
  "llvm.ppc.altivec.vrldmi",
  "llvm.ppc.altivec.vrldnm",
  "llvm.ppc.altivec.vrlh",
  "llvm.ppc.altivec.vrlqmi",
  "llvm.ppc.altivec.vrlqnm",
  "llvm.ppc.altivec.vrlw",
  "llvm.ppc.altivec.vrlwmi",
  "llvm.ppc.altivec.vrlwnm",
  "llvm.ppc.altivec.vrsqrtefp",
  "llvm.ppc.altivec.vsel",
  "llvm.ppc.altivec.vsl",
  "llvm.ppc.altivec.vslb",
  "llvm.ppc.altivec.vsldbi",
  "llvm.ppc.altivec.vslh",
  "llvm.ppc.altivec.vslo",
  "llvm.ppc.altivec.vslv",
  "llvm.ppc.altivec.vslw",
  "llvm.ppc.altivec.vsr",
  "llvm.ppc.altivec.vsrab",
  "llvm.ppc.altivec.vsrah",
  "llvm.ppc.altivec.vsraw",
  "llvm.ppc.altivec.vsrb",
  "llvm.ppc.altivec.vsrdbi",
  "llvm.ppc.altivec.vsrh",
  "llvm.ppc.altivec.vsro",
  "llvm.ppc.altivec.vsrv",
  "llvm.ppc.altivec.vsrw",
  "llvm.ppc.altivec.vstribl",
  "llvm.ppc.altivec.vstribl.p",
  "llvm.ppc.altivec.vstribr",
  "llvm.ppc.altivec.vstribr.p",
  "llvm.ppc.altivec.vstrihl",
  "llvm.ppc.altivec.vstrihl.p",
  "llvm.ppc.altivec.vstrihr",
  "llvm.ppc.altivec.vstrihr.p",
  "llvm.ppc.altivec.vsubcuq",
  "llvm.ppc.altivec.vsubcuw",
  "llvm.ppc.altivec.vsubecuq",
  "llvm.ppc.altivec.vsubeuqm",
  "llvm.ppc.altivec.vsubsbs",
  "llvm.ppc.altivec.vsubshs",
  "llvm.ppc.altivec.vsubsws",
  "llvm.ppc.altivec.vsububs",
  "llvm.ppc.altivec.vsubuhs",
  "llvm.ppc.altivec.vsubuws",
  "llvm.ppc.altivec.vsum2sws",
  "llvm.ppc.altivec.vsum4sbs",
  "llvm.ppc.altivec.vsum4shs",
  "llvm.ppc.altivec.vsum4ubs",
  "llvm.ppc.altivec.vsumsws",
  "llvm.ppc.altivec.vupkhpx",
  "llvm.ppc.altivec.vupkhsb",
  "llvm.ppc.altivec.vupkhsh",
  "llvm.ppc.altivec.vupkhsw",
  "llvm.ppc.altivec.vupklpx",
  "llvm.ppc.altivec.vupklsb",
  "llvm.ppc.altivec.vupklsh",
  "llvm.ppc.altivec.vupklsw",
  "llvm.ppc.atomic.load.i128",
  "llvm.ppc.atomic.store.i128",
  "llvm.ppc.atomicrmw.add.i128",
  "llvm.ppc.atomicrmw.and.i128",
  "llvm.ppc.atomicrmw.nand.i128",
  "llvm.ppc.atomicrmw.or.i128",
  "llvm.ppc.atomicrmw.sub.i128",
  "llvm.ppc.atomicrmw.xchg.i128",
  "llvm.ppc.atomicrmw.xor.i128",
  "llvm.ppc.bcdadd",
  "llvm.ppc.bcdadd.p",
  "llvm.ppc.bcdsub",
  "llvm.ppc.bcdsub.p",
  "llvm.ppc.bpermd",
  "llvm.ppc.cfence",
  "llvm.ppc.cfuged",
  "llvm.ppc.cmpb",
  "llvm.ppc.cmpeqb",
  "llvm.ppc.cmprb",
  "llvm.ppc.cmpxchg.i128",
  "llvm.ppc.cntlzdm",
  "llvm.ppc.cnttzdm",
  "llvm.ppc.compare.exp.eq",
  "llvm.ppc.compare.exp.gt",
  "llvm.ppc.compare.exp.lt",
  "llvm.ppc.compare.exp.uo",
  "llvm.ppc.convert.f128.to.ppcf128",
  "llvm.ppc.convert.ppcf128.to.f128",
  "llvm.ppc.darn",
  "llvm.ppc.darn32",
  "llvm.ppc.darnraw",
  "llvm.ppc.dcba",
  "llvm.ppc.dcbf",
  "llvm.ppc.dcbfl",
  "llvm.ppc.dcbflp",
  "llvm.ppc.dcbfps",
  "llvm.ppc.dcbi",
  "llvm.ppc.dcbst",
  "llvm.ppc.dcbstps",
  "llvm.ppc.dcbt",
  "llvm.ppc.dcbt.with.hint",
  "llvm.ppc.dcbtst",
  "llvm.ppc.dcbtst.with.hint",
  "llvm.ppc.dcbtstt",
  "llvm.ppc.dcbtt",
  "llvm.ppc.dcbz",
  "llvm.ppc.dcbzl",
  "llvm.ppc.divde",
  "llvm.ppc.divdeu",
  "llvm.ppc.divf128.round.to.odd",
  "llvm.ppc.divwe",
  "llvm.ppc.divweu",
  "llvm.ppc.eieio",
  "llvm.ppc.extract.exp",
  "llvm.ppc.extract.sig",
  "llvm.ppc.fcfid",
  "llvm.ppc.fcfud",
  "llvm.ppc.fctid",
  "llvm.ppc.fctidz",
  "llvm.ppc.fctiw",
  "llvm.ppc.fctiwz",
  "llvm.ppc.fctudz",
  "llvm.ppc.fctuwz",
  "llvm.ppc.fmaf128.round.to.odd",
  "llvm.ppc.fmsub",
  "llvm.ppc.fmsubs",
  "llvm.ppc.fnabs",
  "llvm.ppc.fnabss",
  "llvm.ppc.fnmadd",
  "llvm.ppc.fnmadds",
  "llvm.ppc.fnmsub",
  "llvm.ppc.fre",
  "llvm.ppc.fres",
  "llvm.ppc.frsqrte",
  "llvm.ppc.frsqrtes",
  "llvm.ppc.fsel",
  "llvm.ppc.fsels",
  "llvm.ppc.get.texasr",
  "llvm.ppc.get.texasru",
  "llvm.ppc.get.tfhar",
  "llvm.ppc.get.tfiar",
  "llvm.ppc.icbt",
  "llvm.ppc.insert.exp",
  "llvm.ppc.iospace.eieio",
  "llvm.ppc.iospace.lwsync",
  "llvm.ppc.iospace.sync",
  "llvm.ppc.isync",
  "llvm.ppc.load2r",
  "llvm.ppc.load4r",
  "llvm.ppc.load8r",
  "llvm.ppc.lwsync",
  "llvm.ppc.maddhd",
  "llvm.ppc.maddhdu",
  "llvm.ppc.maddld",
  "llvm.ppc.maxfe",
  "llvm.ppc.maxfl",
  "llvm.ppc.maxfs",
  "llvm.ppc.mfmsr",
  "llvm.ppc.mfspr",
  "llvm.ppc.mftbu",
  "llvm.ppc.minfe",
  "llvm.ppc.minfl",
  "llvm.ppc.minfs",
  "llvm.ppc.mma.assemble.acc",
  "llvm.ppc.mma.disassemble.acc",
  "llvm.ppc.mma.pmxvbf16ger2",
  "llvm.ppc.mma.pmxvbf16ger2nn",
  "llvm.ppc.mma.pmxvbf16ger2np",
  "llvm.ppc.mma.pmxvbf16ger2pn",
  "llvm.ppc.mma.pmxvbf16ger2pp",
  "llvm.ppc.mma.pmxvf16ger2",
  "llvm.ppc.mma.pmxvf16ger2nn",
  "llvm.ppc.mma.pmxvf16ger2np",
  "llvm.ppc.mma.pmxvf16ger2pn",
  "llvm.ppc.mma.pmxvf16ger2pp",
  "llvm.ppc.mma.pmxvf32ger",
  "llvm.ppc.mma.pmxvf32gernn",
  "llvm.ppc.mma.pmxvf32gernp",
  "llvm.ppc.mma.pmxvf32gerpn",
  "llvm.ppc.mma.pmxvf32gerpp",
  "llvm.ppc.mma.pmxvf64ger",
  "llvm.ppc.mma.pmxvf64gernn",
  "llvm.ppc.mma.pmxvf64gernp",
  "llvm.ppc.mma.pmxvf64gerpn",
  "llvm.ppc.mma.pmxvf64gerpp",
  "llvm.ppc.mma.pmxvi16ger2",
  "llvm.ppc.mma.pmxvi16ger2pp",
  "llvm.ppc.mma.pmxvi16ger2s",
  "llvm.ppc.mma.pmxvi16ger2spp",
  "llvm.ppc.mma.pmxvi4ger8",
  "llvm.ppc.mma.pmxvi4ger8pp",
  "llvm.ppc.mma.pmxvi8ger4",
  "llvm.ppc.mma.pmxvi8ger4pp",
  "llvm.ppc.mma.pmxvi8ger4spp",
  "llvm.ppc.mma.xvbf16ger2",
  "llvm.ppc.mma.xvbf16ger2nn",
  "llvm.ppc.mma.xvbf16ger2np",
  "llvm.ppc.mma.xvbf16ger2pn",
  "llvm.ppc.mma.xvbf16ger2pp",
  "llvm.ppc.mma.xvf16ger2",
  "llvm.ppc.mma.xvf16ger2nn",
  "llvm.ppc.mma.xvf16ger2np",
  "llvm.ppc.mma.xvf16ger2pn",
  "llvm.ppc.mma.xvf16ger2pp",
  "llvm.ppc.mma.xvf32ger",
  "llvm.ppc.mma.xvf32gernn",
  "llvm.ppc.mma.xvf32gernp",
  "llvm.ppc.mma.xvf32gerpn",
  "llvm.ppc.mma.xvf32gerpp",
  "llvm.ppc.mma.xvf64ger",
  "llvm.ppc.mma.xvf64gernn",
  "llvm.ppc.mma.xvf64gernp",
  "llvm.ppc.mma.xvf64gerpn",
  "llvm.ppc.mma.xvf64gerpp",
  "llvm.ppc.mma.xvi16ger2",
  "llvm.ppc.mma.xvi16ger2pp",
  "llvm.ppc.mma.xvi16ger2s",
  "llvm.ppc.mma.xvi16ger2spp",
  "llvm.ppc.mma.xvi4ger8",
  "llvm.ppc.mma.xvi4ger8pp",
  "llvm.ppc.mma.xvi8ger4",
  "llvm.ppc.mma.xvi8ger4pp",
  "llvm.ppc.mma.xvi8ger4spp",
  "llvm.ppc.mma.xxmfacc",
  "llvm.ppc.mma.xxmtacc",
  "llvm.ppc.mma.xxsetaccz",
  "llvm.ppc.mtfsb0",
  "llvm.ppc.mtfsb1",
  "llvm.ppc.mtfsf",
  "llvm.ppc.mtfsfi",
  "llvm.ppc.mtmsr",
  "llvm.ppc.mtspr",
  "llvm.ppc.mulf128.round.to.odd",
  "llvm.ppc.mulhd",
  "llvm.ppc.mulhdu",
  "llvm.ppc.mulhw",
  "llvm.ppc.mulhwu",
  "llvm.ppc.pack.longdouble",
  "llvm.ppc.pdepd",
  "llvm.ppc.pextd",
  "llvm.ppc.popcntb",
  "llvm.ppc.readflm",
  "llvm.ppc.scalar.extract.expq",
  "llvm.ppc.scalar.insert.exp.qp",
  "llvm.ppc.set.texasr",
  "llvm.ppc.set.texasru",
  "llvm.ppc.set.tfhar",
  "llvm.ppc.set.tfiar",
  "llvm.ppc.setb",
  "llvm.ppc.setflm",
  "llvm.ppc.setrnd",
  "llvm.ppc.sqrtf128.round.to.odd",
  "llvm.ppc.stbcx",
  "llvm.ppc.stdcx",
  "llvm.ppc.stfiw",
  "llvm.ppc.sthcx",
  "llvm.ppc.store2r",
  "llvm.ppc.store4r",
  "llvm.ppc.store8r",
  "llvm.ppc.stwcx",
  "llvm.ppc.subf128.round.to.odd",
  "llvm.ppc.sync",
  "llvm.ppc.tabort",
  "llvm.ppc.tabortdc",
  "llvm.ppc.tabortdci",
  "llvm.ppc.tabortwc",
  "llvm.ppc.tabortwci",
  "llvm.ppc.tbegin",
  "llvm.ppc.tcheck",
  "llvm.ppc.tdw",
  "llvm.ppc.tend",
  "llvm.ppc.tendall",
  "llvm.ppc.test.data.class",
  "llvm.ppc.trap",
  "llvm.ppc.trapd",
  "llvm.ppc.trechkpt",
  "llvm.ppc.treclaim",
  "llvm.ppc.tresume",
  "llvm.ppc.truncf128.round.to.odd",
  "llvm.ppc.tsr",
  "llvm.ppc.tsuspend",
  "llvm.ppc.ttest",
  "llvm.ppc.tw",
  "llvm.ppc.unpack.longdouble",
  "llvm.ppc.vsx.assemble.pair",
  "llvm.ppc.vsx.disassemble.pair",
  "llvm.ppc.vsx.lxvd2x",
  "llvm.ppc.vsx.lxvd2x.be",
  "llvm.ppc.vsx.lxvl",
  "llvm.ppc.vsx.lxvll",
  "llvm.ppc.vsx.lxvp",
  "llvm.ppc.vsx.lxvw4x",
  "llvm.ppc.vsx.lxvw4x.be",
  "llvm.ppc.vsx.stxvd2x",
  "llvm.ppc.vsx.stxvd2x.be",
  "llvm.ppc.vsx.stxvl",
  "llvm.ppc.vsx.stxvll",
  "llvm.ppc.vsx.stxvp",
  "llvm.ppc.vsx.stxvw4x",
  "llvm.ppc.vsx.stxvw4x.be",
  "llvm.ppc.vsx.xsmaxdp",
  "llvm.ppc.vsx.xsmindp",
  "llvm.ppc.vsx.xvcmpeqdp",
  "llvm.ppc.vsx.xvcmpeqdp.p",
  "llvm.ppc.vsx.xvcmpeqsp",
  "llvm.ppc.vsx.xvcmpeqsp.p",
  "llvm.ppc.vsx.xvcmpgedp",
  "llvm.ppc.vsx.xvcmpgedp.p",
  "llvm.ppc.vsx.xvcmpgesp",
  "llvm.ppc.vsx.xvcmpgesp.p",
  "llvm.ppc.vsx.xvcmpgtdp",
  "llvm.ppc.vsx.xvcmpgtdp.p",
  "llvm.ppc.vsx.xvcmpgtsp",
  "llvm.ppc.vsx.xvcmpgtsp.p",
  "llvm.ppc.vsx.xvcvbf16spn",
  "llvm.ppc.vsx.xvcvdpsp",
  "llvm.ppc.vsx.xvcvdpsxws",
  "llvm.ppc.vsx.xvcvdpuxws",
  "llvm.ppc.vsx.xvcvhpsp",
  "llvm.ppc.vsx.xvcvspbf16",
  "llvm.ppc.vsx.xvcvspdp",
  "llvm.ppc.vsx.xvcvsphp",
  "llvm.ppc.vsx.xvcvspsxds",
  "llvm.ppc.vsx.xvcvspuxds",
  "llvm.ppc.vsx.xvcvsxdsp",
  "llvm.ppc.vsx.xvcvsxwdp",
  "llvm.ppc.vsx.xvcvuxdsp",
  "llvm.ppc.vsx.xvcvuxwdp",
  "llvm.ppc.vsx.xvdivdp",
  "llvm.ppc.vsx.xvdivsp",
  "llvm.ppc.vsx.xviexpdp",
  "llvm.ppc.vsx.xviexpsp",
  "llvm.ppc.vsx.xvmaxdp",
  "llvm.ppc.vsx.xvmaxsp",
  "llvm.ppc.vsx.xvmindp",
  "llvm.ppc.vsx.xvminsp",
  "llvm.ppc.vsx.xvrdpip",
  "llvm.ppc.vsx.xvredp",
  "llvm.ppc.vsx.xvresp",
  "llvm.ppc.vsx.xvrspip",
  "llvm.ppc.vsx.xvrsqrtedp",
  "llvm.ppc.vsx.xvrsqrtesp",
  "llvm.ppc.vsx.xvtdivdp",
  "llvm.ppc.vsx.xvtdivsp",
  "llvm.ppc.vsx.xvtlsbb",
  "llvm.ppc.vsx.xvtsqrtdp",
  "llvm.ppc.vsx.xvtsqrtsp",
  "llvm.ppc.vsx.xvtstdcdp",
  "llvm.ppc.vsx.xvtstdcsp",
  "llvm.ppc.vsx.xvxexpdp",
  "llvm.ppc.vsx.xvxexpsp",
  "llvm.ppc.vsx.xvxsigdp",
  "llvm.ppc.vsx.xvxsigsp",
  "llvm.ppc.vsx.xxblendvb",
  "llvm.ppc.vsx.xxblendvd",
  "llvm.ppc.vsx.xxblendvh",
  "llvm.ppc.vsx.xxblendvw",
  "llvm.ppc.vsx.xxeval",
  "llvm.ppc.vsx.xxextractuw",
  "llvm.ppc.vsx.xxgenpcvbm",
  "llvm.ppc.vsx.xxgenpcvdm",
  "llvm.ppc.vsx.xxgenpcvhm",
  "llvm.ppc.vsx.xxgenpcvwm",
  "llvm.ppc.vsx.xxinsertw",
  "llvm.ppc.vsx.xxleqv",
  "llvm.ppc.vsx.xxpermx",
  "llvm.r600.cube",
  "llvm.r600.ddx",
  "llvm.r600.ddy",
  "llvm.r600.dot4",
  "llvm.r600.group.barrier",
  "llvm.r600.implicitarg.ptr",
  "llvm.r600.kill",
  "llvm.r600.rat.store.typed",
  "llvm.r600.read.global.size.x",
  "llvm.r600.read.global.size.y",
  "llvm.r600.read.global.size.z",
  "llvm.r600.read.local.size.x",
  "llvm.r600.read.local.size.y",
  "llvm.r600.read.local.size.z",
  "llvm.r600.read.ngroups.x",
  "llvm.r600.read.ngroups.y",
  "llvm.r600.read.ngroups.z",
  "llvm.r600.read.tgid.x",
  "llvm.r600.read.tgid.y",
  "llvm.r600.read.tgid.z",
  "llvm.r600.read.tidig.x",
  "llvm.r600.read.tidig.y",
  "llvm.r600.read.tidig.z",
  "llvm.r600.recipsqrt.clamped",
  "llvm.r600.recipsqrt.ieee",
  "llvm.r600.store.stream.output",
  "llvm.r600.store.swizzle",
  "llvm.r600.tex",
  "llvm.r600.texc",
  "llvm.r600.txb",
  "llvm.r600.txbc",
  "llvm.r600.txf",
  "llvm.r600.txl",
  "llvm.r600.txlc",
  "llvm.r600.txq",
  "llvm.riscv.aes32dsi",
  "llvm.riscv.aes32dsmi",
  "llvm.riscv.aes32esi",
  "llvm.riscv.aes32esmi",
  "llvm.riscv.aes64ds",
  "llvm.riscv.aes64dsm",
  "llvm.riscv.aes64es",
  "llvm.riscv.aes64esm",
  "llvm.riscv.aes64im",
  "llvm.riscv.aes64ks1i",
  "llvm.riscv.aes64ks2",
  "llvm.riscv.brev8",
  "llvm.riscv.clmul",
  "llvm.riscv.clmulh",
  "llvm.riscv.clmulr",
  "llvm.riscv.masked.atomicrmw.add.i32",
  "llvm.riscv.masked.atomicrmw.add.i64",
  "llvm.riscv.masked.atomicrmw.max.i32",
  "llvm.riscv.masked.atomicrmw.max.i64",
  "llvm.riscv.masked.atomicrmw.min.i32",
  "llvm.riscv.masked.atomicrmw.min.i64",
  "llvm.riscv.masked.atomicrmw.nand.i32",
  "llvm.riscv.masked.atomicrmw.nand.i64",
  "llvm.riscv.masked.atomicrmw.sub.i32",
  "llvm.riscv.masked.atomicrmw.sub.i64",
  "llvm.riscv.masked.atomicrmw.umax.i32",
  "llvm.riscv.masked.atomicrmw.umax.i64",
  "llvm.riscv.masked.atomicrmw.umin.i32",
  "llvm.riscv.masked.atomicrmw.umin.i64",
  "llvm.riscv.masked.atomicrmw.xchg.i32",
  "llvm.riscv.masked.atomicrmw.xchg.i64",
  "llvm.riscv.masked.cmpxchg.i32",
  "llvm.riscv.masked.cmpxchg.i64",
  "llvm.riscv.masked.strided.load",
  "llvm.riscv.masked.strided.store",
  "llvm.riscv.orc.b",
  "llvm.riscv.seg2.load",
  "llvm.riscv.seg2.store",
  "llvm.riscv.seg3.load",
  "llvm.riscv.seg3.store",
  "llvm.riscv.seg4.load",
  "llvm.riscv.seg4.store",
  "llvm.riscv.seg5.load",
  "llvm.riscv.seg5.store",
  "llvm.riscv.seg6.load",
  "llvm.riscv.seg6.store",
  "llvm.riscv.seg7.load",
  "llvm.riscv.seg7.store",
  "llvm.riscv.seg8.load",
  "llvm.riscv.seg8.store",
  "llvm.riscv.sf.vc.fv.se",
  "llvm.riscv.sf.vc.fvv.se",
  "llvm.riscv.sf.vc.fvw.se",
  "llvm.riscv.sf.vc.i.se.e16m1",
  "llvm.riscv.sf.vc.i.se.e16m2",
  "llvm.riscv.sf.vc.i.se.e16m4",
  "llvm.riscv.sf.vc.i.se.e16m8",
  "llvm.riscv.sf.vc.i.se.e16mf2",
  "llvm.riscv.sf.vc.i.se.e16mf4",
  "llvm.riscv.sf.vc.i.se.e32m1",
  "llvm.riscv.sf.vc.i.se.e32m2",
  "llvm.riscv.sf.vc.i.se.e32m4",
  "llvm.riscv.sf.vc.i.se.e32m8",
  "llvm.riscv.sf.vc.i.se.e32mf2",
  "llvm.riscv.sf.vc.i.se.e64m1",
  "llvm.riscv.sf.vc.i.se.e64m2",
  "llvm.riscv.sf.vc.i.se.e64m4",
  "llvm.riscv.sf.vc.i.se.e64m8",
  "llvm.riscv.sf.vc.i.se.e8m1",
  "llvm.riscv.sf.vc.i.se.e8m2",
  "llvm.riscv.sf.vc.i.se.e8m4",
  "llvm.riscv.sf.vc.i.se.e8m8",
  "llvm.riscv.sf.vc.i.se.e8mf2",
  "llvm.riscv.sf.vc.i.se.e8mf4",
  "llvm.riscv.sf.vc.i.se.e8mf8",
  "llvm.riscv.sf.vc.iv.se",
  "llvm.riscv.sf.vc.ivv.se",
  "llvm.riscv.sf.vc.ivw.se",
  "llvm.riscv.sf.vc.v.fv",
  "llvm.riscv.sf.vc.v.fv.se",
  "llvm.riscv.sf.vc.v.fvv",
  "llvm.riscv.sf.vc.v.fvv.se",
  "llvm.riscv.sf.vc.v.fvw",
  "llvm.riscv.sf.vc.v.fvw.se",
  "llvm.riscv.sf.vc.v.i",
  "llvm.riscv.sf.vc.v.i.se",
  "llvm.riscv.sf.vc.v.iv",
  "llvm.riscv.sf.vc.v.iv.se",
  "llvm.riscv.sf.vc.v.ivv",
  "llvm.riscv.sf.vc.v.ivv.se",
  "llvm.riscv.sf.vc.v.ivw",
  "llvm.riscv.sf.vc.v.ivw.se",
  "llvm.riscv.sf.vc.v.vv",
  "llvm.riscv.sf.vc.v.vv.se",
  "llvm.riscv.sf.vc.v.vvv",
  "llvm.riscv.sf.vc.v.vvv.se",
  "llvm.riscv.sf.vc.v.vvw",
  "llvm.riscv.sf.vc.v.vvw.se",
  "llvm.riscv.sf.vc.v.x",
  "llvm.riscv.sf.vc.v.x.se",
  "llvm.riscv.sf.vc.v.xv",
  "llvm.riscv.sf.vc.v.xv.se",
  "llvm.riscv.sf.vc.v.xvv",
  "llvm.riscv.sf.vc.v.xvv.se",
  "llvm.riscv.sf.vc.v.xvw",
  "llvm.riscv.sf.vc.v.xvw.se",
  "llvm.riscv.sf.vc.vv.se",
  "llvm.riscv.sf.vc.vvv.se",
  "llvm.riscv.sf.vc.vvw.se",
  "llvm.riscv.sf.vc.x.se.e16m1",
  "llvm.riscv.sf.vc.x.se.e16m2",
  "llvm.riscv.sf.vc.x.se.e16m4",
  "llvm.riscv.sf.vc.x.se.e16m8",
  "llvm.riscv.sf.vc.x.se.e16mf2",
  "llvm.riscv.sf.vc.x.se.e16mf4",
  "llvm.riscv.sf.vc.x.se.e32m1",
  "llvm.riscv.sf.vc.x.se.e32m2",
  "llvm.riscv.sf.vc.x.se.e32m4",
  "llvm.riscv.sf.vc.x.se.e32m8",
  "llvm.riscv.sf.vc.x.se.e32mf2",
  "llvm.riscv.sf.vc.x.se.e64m1",
  "llvm.riscv.sf.vc.x.se.e64m2",
  "llvm.riscv.sf.vc.x.se.e64m4",
  "llvm.riscv.sf.vc.x.se.e64m8",
  "llvm.riscv.sf.vc.x.se.e8m1",
  "llvm.riscv.sf.vc.x.se.e8m2",
  "llvm.riscv.sf.vc.x.se.e8m4",
  "llvm.riscv.sf.vc.x.se.e8m8",
  "llvm.riscv.sf.vc.x.se.e8mf2",
  "llvm.riscv.sf.vc.x.se.e8mf4",
  "llvm.riscv.sf.vc.x.se.e8mf8",
  "llvm.riscv.sf.vc.xv.se",
  "llvm.riscv.sf.vc.xvv.se",
  "llvm.riscv.sf.vc.xvw.se",
  "llvm.riscv.sha256sig0",
  "llvm.riscv.sha256sig1",
  "llvm.riscv.sha256sum0",
  "llvm.riscv.sha256sum1",
  "llvm.riscv.sha512sig0",
  "llvm.riscv.sha512sig0h",
  "llvm.riscv.sha512sig0l",
  "llvm.riscv.sha512sig1",
  "llvm.riscv.sha512sig1h",
  "llvm.riscv.sha512sig1l",
  "llvm.riscv.sha512sum0",
  "llvm.riscv.sha512sum0r",
  "llvm.riscv.sha512sum1",
  "llvm.riscv.sha512sum1r",
  "llvm.riscv.sm3p0",
  "llvm.riscv.sm3p1",
  "llvm.riscv.sm4ed",
  "llvm.riscv.sm4ks",
  "llvm.riscv.th.vmaqa",
  "llvm.riscv.th.vmaqa.mask",
  "llvm.riscv.th.vmaqasu",
  "llvm.riscv.th.vmaqasu.mask",
  "llvm.riscv.th.vmaqau",
  "llvm.riscv.th.vmaqau.mask",
  "llvm.riscv.th.vmaqaus",
  "llvm.riscv.th.vmaqaus.mask",
  "llvm.riscv.unzip",
  "llvm.riscv.vaadd",
  "llvm.riscv.vaadd.mask",
  "llvm.riscv.vaaddu",
  "llvm.riscv.vaaddu.mask",
  "llvm.riscv.vadc",
  "llvm.riscv.vadd",
  "llvm.riscv.vadd.mask",
  "llvm.riscv.vand",
  "llvm.riscv.vand.mask",
  "llvm.riscv.vasub",
  "llvm.riscv.vasub.mask",
  "llvm.riscv.vasubu",
  "llvm.riscv.vasubu.mask",
  "llvm.riscv.vcompress",
  "llvm.riscv.vcpop",
  "llvm.riscv.vcpop.mask",
  "llvm.riscv.vdiv",
  "llvm.riscv.vdiv.mask",
  "llvm.riscv.vdivu",
  "llvm.riscv.vdivu.mask",
  "llvm.riscv.vfadd",
  "llvm.riscv.vfadd.mask",
  "llvm.riscv.vfclass",
  "llvm.riscv.vfclass.mask",
  "llvm.riscv.vfcvt.f.x.v",
  "llvm.riscv.vfcvt.f.x.v.mask",
  "llvm.riscv.vfcvt.f.xu.v",
  "llvm.riscv.vfcvt.f.xu.v.mask",
  "llvm.riscv.vfcvt.rtz.x.f.v",
  "llvm.riscv.vfcvt.rtz.x.f.v.mask",
  "llvm.riscv.vfcvt.rtz.xu.f.v",
  "llvm.riscv.vfcvt.rtz.xu.f.v.mask",
  "llvm.riscv.vfcvt.x.f.v",
  "llvm.riscv.vfcvt.x.f.v.mask",
  "llvm.riscv.vfcvt.xu.f.v",
  "llvm.riscv.vfcvt.xu.f.v.mask",
  "llvm.riscv.vfdiv",
  "llvm.riscv.vfdiv.mask",
  "llvm.riscv.vfirst",
  "llvm.riscv.vfirst.mask",
  "llvm.riscv.vfmacc",
  "llvm.riscv.vfmacc.mask",
  "llvm.riscv.vfmadd",
  "llvm.riscv.vfmadd.mask",
  "llvm.riscv.vfmax",
  "llvm.riscv.vfmax.mask",
  "llvm.riscv.vfmerge",
  "llvm.riscv.vfmin",
  "llvm.riscv.vfmin.mask",
  "llvm.riscv.vfmsac",
  "llvm.riscv.vfmsac.mask",
  "llvm.riscv.vfmsub",
  "llvm.riscv.vfmsub.mask",
  "llvm.riscv.vfmul",
  "llvm.riscv.vfmul.mask",
  "llvm.riscv.vfmv.f.s",
  "llvm.riscv.vfmv.s.f",
  "llvm.riscv.vfmv.v.f",
  "llvm.riscv.vfncvt.f.f.w",
  "llvm.riscv.vfncvt.f.f.w.mask",
  "llvm.riscv.vfncvt.f.x.w",
  "llvm.riscv.vfncvt.f.x.w.mask",
  "llvm.riscv.vfncvt.f.xu.w",
  "llvm.riscv.vfncvt.f.xu.w.mask",
  "llvm.riscv.vfncvt.rod.f.f.w",
  "llvm.riscv.vfncvt.rod.f.f.w.mask",
  "llvm.riscv.vfncvt.rtz.x.f.w",
  "llvm.riscv.vfncvt.rtz.x.f.w.mask",
  "llvm.riscv.vfncvt.rtz.xu.f.w",
  "llvm.riscv.vfncvt.rtz.xu.f.w.mask",
  "llvm.riscv.vfncvt.x.f.w",
  "llvm.riscv.vfncvt.x.f.w.mask",
  "llvm.riscv.vfncvt.xu.f.w",
  "llvm.riscv.vfncvt.xu.f.w.mask",
  "llvm.riscv.vfnmacc",
  "llvm.riscv.vfnmacc.mask",
  "llvm.riscv.vfnmadd",
  "llvm.riscv.vfnmadd.mask",
  "llvm.riscv.vfnmsac",
  "llvm.riscv.vfnmsac.mask",
  "llvm.riscv.vfnmsub",
  "llvm.riscv.vfnmsub.mask",
  "llvm.riscv.vfrdiv",
  "llvm.riscv.vfrdiv.mask",
  "llvm.riscv.vfrec7",
  "llvm.riscv.vfrec7.mask",
  "llvm.riscv.vfredmax",
  "llvm.riscv.vfredmax.mask",
  "llvm.riscv.vfredmin",
  "llvm.riscv.vfredmin.mask",
  "llvm.riscv.vfredosum",
  "llvm.riscv.vfredosum.mask",
  "llvm.riscv.vfredusum",
  "llvm.riscv.vfredusum.mask",
  "llvm.riscv.vfrsqrt7",
  "llvm.riscv.vfrsqrt7.mask",
  "llvm.riscv.vfrsub",
  "llvm.riscv.vfrsub.mask",
  "llvm.riscv.vfsgnj",
  "llvm.riscv.vfsgnj.mask",
  "llvm.riscv.vfsgnjn",
  "llvm.riscv.vfsgnjn.mask",
  "llvm.riscv.vfsgnjx",
  "llvm.riscv.vfsgnjx.mask",
  "llvm.riscv.vfslide1down",
  "llvm.riscv.vfslide1down.mask",
  "llvm.riscv.vfslide1up",
  "llvm.riscv.vfslide1up.mask",
  "llvm.riscv.vfsqrt",
  "llvm.riscv.vfsqrt.mask",
  "llvm.riscv.vfsub",
  "llvm.riscv.vfsub.mask",
  "llvm.riscv.vfwadd",
  "llvm.riscv.vfwadd.mask",
  "llvm.riscv.vfwadd.w",
  "llvm.riscv.vfwadd.w.mask",
  "llvm.riscv.vfwcvt.f.f.v",
  "llvm.riscv.vfwcvt.f.f.v.mask",
  "llvm.riscv.vfwcvt.f.x.v",
  "llvm.riscv.vfwcvt.f.x.v.mask",
  "llvm.riscv.vfwcvt.f.xu.v",
  "llvm.riscv.vfwcvt.f.xu.v.mask",
  "llvm.riscv.vfwcvt.rtz.x.f.v",
  "llvm.riscv.vfwcvt.rtz.x.f.v.mask",
  "llvm.riscv.vfwcvt.rtz.xu.f.v",
  "llvm.riscv.vfwcvt.rtz.xu.f.v.mask",
  "llvm.riscv.vfwcvt.x.f.v",
  "llvm.riscv.vfwcvt.x.f.v.mask",
  "llvm.riscv.vfwcvt.xu.f.v",
  "llvm.riscv.vfwcvt.xu.f.v.mask",
  "llvm.riscv.vfwmacc",
  "llvm.riscv.vfwmacc.mask",
  "llvm.riscv.vfwmsac",
  "llvm.riscv.vfwmsac.mask",
  "llvm.riscv.vfwmul",
  "llvm.riscv.vfwmul.mask",
  "llvm.riscv.vfwnmacc",
  "llvm.riscv.vfwnmacc.mask",
  "llvm.riscv.vfwnmsac",
  "llvm.riscv.vfwnmsac.mask",
  "llvm.riscv.vfwredosum",
  "llvm.riscv.vfwredosum.mask",
  "llvm.riscv.vfwredusum",
  "llvm.riscv.vfwredusum.mask",
  "llvm.riscv.vfwsub",
  "llvm.riscv.vfwsub.mask",
  "llvm.riscv.vfwsub.w",
  "llvm.riscv.vfwsub.w.mask",
  "llvm.riscv.vid",
  "llvm.riscv.vid.mask",
  "llvm.riscv.viota",
  "llvm.riscv.viota.mask",
  "llvm.riscv.vle",
  "llvm.riscv.vle.mask",
  "llvm.riscv.vleff",
  "llvm.riscv.vleff.mask",
  "llvm.riscv.vlm",
  "llvm.riscv.vloxei",
  "llvm.riscv.vloxei.mask",
  "llvm.riscv.vloxseg2",
  "llvm.riscv.vloxseg2.mask",
  "llvm.riscv.vloxseg3",
  "llvm.riscv.vloxseg3.mask",
  "llvm.riscv.vloxseg4",
  "llvm.riscv.vloxseg4.mask",
  "llvm.riscv.vloxseg5",
  "llvm.riscv.vloxseg5.mask",
  "llvm.riscv.vloxseg6",
  "llvm.riscv.vloxseg6.mask",
  "llvm.riscv.vloxseg7",
  "llvm.riscv.vloxseg7.mask",
  "llvm.riscv.vloxseg8",
  "llvm.riscv.vloxseg8.mask",
  "llvm.riscv.vlse",
  "llvm.riscv.vlse.mask",
  "llvm.riscv.vlseg2",
  "llvm.riscv.vlseg2.mask",
  "llvm.riscv.vlseg2ff",
  "llvm.riscv.vlseg2ff.mask",
  "llvm.riscv.vlseg3",
  "llvm.riscv.vlseg3.mask",
  "llvm.riscv.vlseg3ff",
  "llvm.riscv.vlseg3ff.mask",
  "llvm.riscv.vlseg4",
  "llvm.riscv.vlseg4.mask",
  "llvm.riscv.vlseg4ff",
  "llvm.riscv.vlseg4ff.mask",
  "llvm.riscv.vlseg5",
  "llvm.riscv.vlseg5.mask",
  "llvm.riscv.vlseg5ff",
  "llvm.riscv.vlseg5ff.mask",
  "llvm.riscv.vlseg6",
  "llvm.riscv.vlseg6.mask",
  "llvm.riscv.vlseg6ff",
  "llvm.riscv.vlseg6ff.mask",
  "llvm.riscv.vlseg7",
  "llvm.riscv.vlseg7.mask",
  "llvm.riscv.vlseg7ff",
  "llvm.riscv.vlseg7ff.mask",
  "llvm.riscv.vlseg8",
  "llvm.riscv.vlseg8.mask",
  "llvm.riscv.vlseg8ff",
  "llvm.riscv.vlseg8ff.mask",
  "llvm.riscv.vlsseg2",
  "llvm.riscv.vlsseg2.mask",
  "llvm.riscv.vlsseg3",
  "llvm.riscv.vlsseg3.mask",
  "llvm.riscv.vlsseg4",
  "llvm.riscv.vlsseg4.mask",
  "llvm.riscv.vlsseg5",
  "llvm.riscv.vlsseg5.mask",
  "llvm.riscv.vlsseg6",
  "llvm.riscv.vlsseg6.mask",
  "llvm.riscv.vlsseg7",
  "llvm.riscv.vlsseg7.mask",
  "llvm.riscv.vlsseg8",
  "llvm.riscv.vlsseg8.mask",
  "llvm.riscv.vluxei",
  "llvm.riscv.vluxei.mask",
  "llvm.riscv.vluxseg2",
  "llvm.riscv.vluxseg2.mask",
  "llvm.riscv.vluxseg3",
  "llvm.riscv.vluxseg3.mask",
  "llvm.riscv.vluxseg4",
  "llvm.riscv.vluxseg4.mask",
  "llvm.riscv.vluxseg5",
  "llvm.riscv.vluxseg5.mask",
  "llvm.riscv.vluxseg6",
  "llvm.riscv.vluxseg6.mask",
  "llvm.riscv.vluxseg7",
  "llvm.riscv.vluxseg7.mask",
  "llvm.riscv.vluxseg8",
  "llvm.riscv.vluxseg8.mask",
  "llvm.riscv.vmacc",
  "llvm.riscv.vmacc.mask",
  "llvm.riscv.vmadc",
  "llvm.riscv.vmadc.carry.in",
  "llvm.riscv.vmadd",
  "llvm.riscv.vmadd.mask",
  "llvm.riscv.vmand",
  "llvm.riscv.vmandn",
  "llvm.riscv.vmax",
  "llvm.riscv.vmax.mask",
  "llvm.riscv.vmaxu",
  "llvm.riscv.vmaxu.mask",
  "llvm.riscv.vmclr",
  "llvm.riscv.vmerge",
  "llvm.riscv.vmfeq",
  "llvm.riscv.vmfeq.mask",
  "llvm.riscv.vmfge",
  "llvm.riscv.vmfge.mask",
  "llvm.riscv.vmfgt",
  "llvm.riscv.vmfgt.mask",
  "llvm.riscv.vmfle",
  "llvm.riscv.vmfle.mask",
  "llvm.riscv.vmflt",
  "llvm.riscv.vmflt.mask",
  "llvm.riscv.vmfne",
  "llvm.riscv.vmfne.mask",
  "llvm.riscv.vmin",
  "llvm.riscv.vmin.mask",
  "llvm.riscv.vminu",
  "llvm.riscv.vminu.mask",
  "llvm.riscv.vmnand",
  "llvm.riscv.vmnor",
  "llvm.riscv.vmor",
  "llvm.riscv.vmorn",
  "llvm.riscv.vmsbc",
  "llvm.riscv.vmsbc.borrow.in",
  "llvm.riscv.vmsbf",
  "llvm.riscv.vmsbf.mask",
  "llvm.riscv.vmseq",
  "llvm.riscv.vmseq.mask",
  "llvm.riscv.vmset",
  "llvm.riscv.vmsge",
  "llvm.riscv.vmsge.mask",
  "llvm.riscv.vmsgeu",
  "llvm.riscv.vmsgeu.mask",
  "llvm.riscv.vmsgt",
  "llvm.riscv.vmsgt.mask",
  "llvm.riscv.vmsgtu",
  "llvm.riscv.vmsgtu.mask",
  "llvm.riscv.vmsif",
  "llvm.riscv.vmsif.mask",
  "llvm.riscv.vmsle",
  "llvm.riscv.vmsle.mask",
  "llvm.riscv.vmsleu",
  "llvm.riscv.vmsleu.mask",
  "llvm.riscv.vmslt",
  "llvm.riscv.vmslt.mask",
  "llvm.riscv.vmsltu",
  "llvm.riscv.vmsltu.mask",
  "llvm.riscv.vmsne",
  "llvm.riscv.vmsne.mask",
  "llvm.riscv.vmsof",
  "llvm.riscv.vmsof.mask",
  "llvm.riscv.vmul",
  "llvm.riscv.vmul.mask",
  "llvm.riscv.vmulh",
  "llvm.riscv.vmulh.mask",
  "llvm.riscv.vmulhsu",
  "llvm.riscv.vmulhsu.mask",
  "llvm.riscv.vmulhu",
  "llvm.riscv.vmulhu.mask",
  "llvm.riscv.vmv.s.x",
  "llvm.riscv.vmv.v.v",
  "llvm.riscv.vmv.v.x",
  "llvm.riscv.vmv.x.s",
  "llvm.riscv.vmxnor",
  "llvm.riscv.vmxor",
  "llvm.riscv.vnclip",
  "llvm.riscv.vnclip.mask",
  "llvm.riscv.vnclipu",
  "llvm.riscv.vnclipu.mask",
  "llvm.riscv.vnmsac",
  "llvm.riscv.vnmsac.mask",
  "llvm.riscv.vnmsub",
  "llvm.riscv.vnmsub.mask",
  "llvm.riscv.vnsra",
  "llvm.riscv.vnsra.mask",
  "llvm.riscv.vnsrl",
  "llvm.riscv.vnsrl.mask",
  "llvm.riscv.vor",
  "llvm.riscv.vor.mask",
  "llvm.riscv.vredand",
  "llvm.riscv.vredand.mask",
  "llvm.riscv.vredmax",
  "llvm.riscv.vredmax.mask",
  "llvm.riscv.vredmaxu",
  "llvm.riscv.vredmaxu.mask",
  "llvm.riscv.vredmin",
  "llvm.riscv.vredmin.mask",
  "llvm.riscv.vredminu",
  "llvm.riscv.vredminu.mask",
  "llvm.riscv.vredor",
  "llvm.riscv.vredor.mask",
  "llvm.riscv.vredsum",
  "llvm.riscv.vredsum.mask",
  "llvm.riscv.vredxor",
  "llvm.riscv.vredxor.mask",
  "llvm.riscv.vrem",
  "llvm.riscv.vrem.mask",
  "llvm.riscv.vremu",
  "llvm.riscv.vremu.mask",
  "llvm.riscv.vrgather.vv",
  "llvm.riscv.vrgather.vv.mask",
  "llvm.riscv.vrgather.vx",
  "llvm.riscv.vrgather.vx.mask",
  "llvm.riscv.vrgatherei16.vv",
  "llvm.riscv.vrgatherei16.vv.mask",
  "llvm.riscv.vrsub",
  "llvm.riscv.vrsub.mask",
  "llvm.riscv.vsadd",
  "llvm.riscv.vsadd.mask",
  "llvm.riscv.vsaddu",
  "llvm.riscv.vsaddu.mask",
  "llvm.riscv.vsbc",
  "llvm.riscv.vse",
  "llvm.riscv.vse.mask",
  "llvm.riscv.vsetvli",
  "llvm.riscv.vsetvlimax",
  "llvm.riscv.vsext",
  "llvm.riscv.vsext.mask",
  "llvm.riscv.vslide1down",
  "llvm.riscv.vslide1down.mask",
  "llvm.riscv.vslide1up",
  "llvm.riscv.vslide1up.mask",
  "llvm.riscv.vslidedown",
  "llvm.riscv.vslidedown.mask",
  "llvm.riscv.vslideup",
  "llvm.riscv.vslideup.mask",
  "llvm.riscv.vsll",
  "llvm.riscv.vsll.mask",
  "llvm.riscv.vsm",
  "llvm.riscv.vsmul",
  "llvm.riscv.vsmul.mask",
  "llvm.riscv.vsoxei",
  "llvm.riscv.vsoxei.mask",
  "llvm.riscv.vsoxseg2",
  "llvm.riscv.vsoxseg2.mask",
  "llvm.riscv.vsoxseg3",
  "llvm.riscv.vsoxseg3.mask",
  "llvm.riscv.vsoxseg4",
  "llvm.riscv.vsoxseg4.mask",
  "llvm.riscv.vsoxseg5",
  "llvm.riscv.vsoxseg5.mask",
  "llvm.riscv.vsoxseg6",
  "llvm.riscv.vsoxseg6.mask",
  "llvm.riscv.vsoxseg7",
  "llvm.riscv.vsoxseg7.mask",
  "llvm.riscv.vsoxseg8",
  "llvm.riscv.vsoxseg8.mask",
  "llvm.riscv.vsra",
  "llvm.riscv.vsra.mask",
  "llvm.riscv.vsrl",
  "llvm.riscv.vsrl.mask",
  "llvm.riscv.vsse",
  "llvm.riscv.vsse.mask",
  "llvm.riscv.vsseg2",
  "llvm.riscv.vsseg2.mask",
  "llvm.riscv.vsseg3",
  "llvm.riscv.vsseg3.mask",
  "llvm.riscv.vsseg4",
  "llvm.riscv.vsseg4.mask",
  "llvm.riscv.vsseg5",
  "llvm.riscv.vsseg5.mask",
  "llvm.riscv.vsseg6",
  "llvm.riscv.vsseg6.mask",
  "llvm.riscv.vsseg7",
  "llvm.riscv.vsseg7.mask",
  "llvm.riscv.vsseg8",
  "llvm.riscv.vsseg8.mask",
  "llvm.riscv.vssra",
  "llvm.riscv.vssra.mask",
  "llvm.riscv.vssrl",
  "llvm.riscv.vssrl.mask",
  "llvm.riscv.vssseg2",
  "llvm.riscv.vssseg2.mask",
  "llvm.riscv.vssseg3",
  "llvm.riscv.vssseg3.mask",
  "llvm.riscv.vssseg4",
  "llvm.riscv.vssseg4.mask",
  "llvm.riscv.vssseg5",
  "llvm.riscv.vssseg5.mask",
  "llvm.riscv.vssseg6",
  "llvm.riscv.vssseg6.mask",
  "llvm.riscv.vssseg7",
  "llvm.riscv.vssseg7.mask",
  "llvm.riscv.vssseg8",
  "llvm.riscv.vssseg8.mask",
  "llvm.riscv.vssub",
  "llvm.riscv.vssub.mask",
  "llvm.riscv.vssubu",
  "llvm.riscv.vssubu.mask",
  "llvm.riscv.vsub",
  "llvm.riscv.vsub.mask",
  "llvm.riscv.vsuxei",
  "llvm.riscv.vsuxei.mask",
  "llvm.riscv.vsuxseg2",
  "llvm.riscv.vsuxseg2.mask",
  "llvm.riscv.vsuxseg3",
  "llvm.riscv.vsuxseg3.mask",
  "llvm.riscv.vsuxseg4",
  "llvm.riscv.vsuxseg4.mask",
  "llvm.riscv.vsuxseg5",
  "llvm.riscv.vsuxseg5.mask",
  "llvm.riscv.vsuxseg6",
  "llvm.riscv.vsuxseg6.mask",
  "llvm.riscv.vsuxseg7",
  "llvm.riscv.vsuxseg7.mask",
  "llvm.riscv.vsuxseg8",
  "llvm.riscv.vsuxseg8.mask",
  "llvm.riscv.vwadd",
  "llvm.riscv.vwadd.mask",
  "llvm.riscv.vwadd.w",
  "llvm.riscv.vwadd.w.mask",
  "llvm.riscv.vwaddu",
  "llvm.riscv.vwaddu.mask",
  "llvm.riscv.vwaddu.w",
  "llvm.riscv.vwaddu.w.mask",
  "llvm.riscv.vwmacc",
  "llvm.riscv.vwmacc.mask",
  "llvm.riscv.vwmaccsu",
  "llvm.riscv.vwmaccsu.mask",
  "llvm.riscv.vwmaccu",
  "llvm.riscv.vwmaccu.mask",
  "llvm.riscv.vwmaccus",
  "llvm.riscv.vwmaccus.mask",
  "llvm.riscv.vwmul",
  "llvm.riscv.vwmul.mask",
  "llvm.riscv.vwmulsu",
  "llvm.riscv.vwmulsu.mask",
  "llvm.riscv.vwmulu",
  "llvm.riscv.vwmulu.mask",
  "llvm.riscv.vwredsum",
  "llvm.riscv.vwredsum.mask",
  "llvm.riscv.vwredsumu",
  "llvm.riscv.vwredsumu.mask",
  "llvm.riscv.vwsub",
  "llvm.riscv.vwsub.mask",
  "llvm.riscv.vwsub.w",
  "llvm.riscv.vwsub.w.mask",
  "llvm.riscv.vwsubu",
  "llvm.riscv.vwsubu.mask",
  "llvm.riscv.vwsubu.w",
  "llvm.riscv.vwsubu.w.mask",
  "llvm.riscv.vxor",
  "llvm.riscv.vxor.mask",
  "llvm.riscv.vzext",
  "llvm.riscv.vzext.mask",
  "llvm.riscv.xperm4",
  "llvm.riscv.xperm8",
  "llvm.riscv.zip",
  "llvm.s390.efpc",
  "llvm.s390.etnd",
  "llvm.s390.lcbb",
  "llvm.s390.ntstg",
  "llvm.s390.ppa.txassist",
  "llvm.s390.sfpc",
  "llvm.s390.tabort",
  "llvm.s390.tbegin",
  "llvm.s390.tbegin.nofloat",
  "llvm.s390.tbeginc",
  "llvm.s390.tdc",
  "llvm.s390.tend",
  "llvm.s390.vaccb",
  "llvm.s390.vacccq",
  "llvm.s390.vaccf",
  "llvm.s390.vaccg",
  "llvm.s390.vacch",
  "llvm.s390.vaccq",
  "llvm.s390.vacq",
  "llvm.s390.vaq",
  "llvm.s390.vavgb",
  "llvm.s390.vavgf",
  "llvm.s390.vavgg",
  "llvm.s390.vavgh",
  "llvm.s390.vavglb",
  "llvm.s390.vavglf",
  "llvm.s390.vavglg",
  "llvm.s390.vavglh",
  "llvm.s390.vbperm",
  "llvm.s390.vceqbs",
  "llvm.s390.vceqfs",
  "llvm.s390.vceqgs",
  "llvm.s390.vceqhs",
  "llvm.s390.vcfn",
  "llvm.s390.vchbs",
  "llvm.s390.vchfs",
  "llvm.s390.vchgs",
  "llvm.s390.vchhs",
  "llvm.s390.vchlbs",
  "llvm.s390.vchlfs",
  "llvm.s390.vchlgs",
  "llvm.s390.vchlhs",
  "llvm.s390.vcksm",
  "llvm.s390.vclfnhs",
  "llvm.s390.vclfnls",
  "llvm.s390.vcnf",
  "llvm.s390.vcrnfs",
  "llvm.s390.verimb",
  "llvm.s390.verimf",
  "llvm.s390.verimg",
  "llvm.s390.verimh",
  "llvm.s390.verllb",
  "llvm.s390.verllf",
  "llvm.s390.verllg",
  "llvm.s390.verllh",
  "llvm.s390.verllvb",
  "llvm.s390.verllvf",
  "llvm.s390.verllvg",
  "llvm.s390.verllvh",
  "llvm.s390.vfaeb",
  "llvm.s390.vfaebs",
  "llvm.s390.vfaef",
  "llvm.s390.vfaefs",
  "llvm.s390.vfaeh",
  "llvm.s390.vfaehs",
  "llvm.s390.vfaezb",
  "llvm.s390.vfaezbs",
  "llvm.s390.vfaezf",
  "llvm.s390.vfaezfs",
  "llvm.s390.vfaezh",
  "llvm.s390.vfaezhs",
  "llvm.s390.vfcedbs",
  "llvm.s390.vfcesbs",
  "llvm.s390.vfchdbs",
  "llvm.s390.vfchedbs",
  "llvm.s390.vfchesbs",
  "llvm.s390.vfchsbs",
  "llvm.s390.vfeeb",
  "llvm.s390.vfeebs",
  "llvm.s390.vfeef",
  "llvm.s390.vfeefs",
  "llvm.s390.vfeeh",
  "llvm.s390.vfeehs",
  "llvm.s390.vfeezb",
  "llvm.s390.vfeezbs",
  "llvm.s390.vfeezf",
  "llvm.s390.vfeezfs",
  "llvm.s390.vfeezh",
  "llvm.s390.vfeezhs",
  "llvm.s390.vfeneb",
  "llvm.s390.vfenebs",
  "llvm.s390.vfenef",
  "llvm.s390.vfenefs",
  "llvm.s390.vfeneh",
  "llvm.s390.vfenehs",
  "llvm.s390.vfenezb",
  "llvm.s390.vfenezbs",
  "llvm.s390.vfenezf",
  "llvm.s390.vfenezfs",
  "llvm.s390.vfenezh",
  "llvm.s390.vfenezhs",
  "llvm.s390.vfidb",
  "llvm.s390.vfisb",
  "llvm.s390.vfmaxdb",
  "llvm.s390.vfmaxsb",
  "llvm.s390.vfmindb",
  "llvm.s390.vfminsb",
  "llvm.s390.vftcidb",
  "llvm.s390.vftcisb",
  "llvm.s390.vgfmab",
  "llvm.s390.vgfmaf",
  "llvm.s390.vgfmag",
  "llvm.s390.vgfmah",
  "llvm.s390.vgfmb",
  "llvm.s390.vgfmf",
  "llvm.s390.vgfmg",
  "llvm.s390.vgfmh",
  "llvm.s390.vistrb",
  "llvm.s390.vistrbs",
  "llvm.s390.vistrf",
  "llvm.s390.vistrfs",
  "llvm.s390.vistrh",
  "llvm.s390.vistrhs",
  "llvm.s390.vlbb",
  "llvm.s390.vll",
  "llvm.s390.vlrl",
  "llvm.s390.vmaeb",
  "llvm.s390.vmaef",
  "llvm.s390.vmaeh",
  "llvm.s390.vmahb",
  "llvm.s390.vmahf",
  "llvm.s390.vmahh",
  "llvm.s390.vmaleb",
  "llvm.s390.vmalef",
  "llvm.s390.vmaleh",
  "llvm.s390.vmalhb",
  "llvm.s390.vmalhf",
  "llvm.s390.vmalhh",
  "llvm.s390.vmalob",
  "llvm.s390.vmalof",
  "llvm.s390.vmaloh",
  "llvm.s390.vmaob",
  "llvm.s390.vmaof",
  "llvm.s390.vmaoh",
  "llvm.s390.vmeb",
  "llvm.s390.vmef",
  "llvm.s390.vmeh",
  "llvm.s390.vmhb",
  "llvm.s390.vmhf",
  "llvm.s390.vmhh",
  "llvm.s390.vmleb",
  "llvm.s390.vmlef",
  "llvm.s390.vmleh",
  "llvm.s390.vmlhb",
  "llvm.s390.vmlhf",
  "llvm.s390.vmlhh",
  "llvm.s390.vmlob",
  "llvm.s390.vmlof",
  "llvm.s390.vmloh",
  "llvm.s390.vmob",
  "llvm.s390.vmof",
  "llvm.s390.vmoh",
  "llvm.s390.vmslg",
  "llvm.s390.vpdi",
  "llvm.s390.vperm",
  "llvm.s390.vpklsf",
  "llvm.s390.vpklsfs",
  "llvm.s390.vpklsg",
  "llvm.s390.vpklsgs",
  "llvm.s390.vpklsh",
  "llvm.s390.vpklshs",
  "llvm.s390.vpksf",
  "llvm.s390.vpksfs",
  "llvm.s390.vpksg",
  "llvm.s390.vpksgs",
  "llvm.s390.vpksh",
  "llvm.s390.vpkshs",
  "llvm.s390.vsbcbiq",
  "llvm.s390.vsbiq",
  "llvm.s390.vscbib",
  "llvm.s390.vscbif",
  "llvm.s390.vscbig",
  "llvm.s390.vscbih",
  "llvm.s390.vscbiq",
  "llvm.s390.vsl",
  "llvm.s390.vslb",
  "llvm.s390.vsld",
  "llvm.s390.vsldb",
  "llvm.s390.vsq",
  "llvm.s390.vsra",
  "llvm.s390.vsrab",
  "llvm.s390.vsrd",
  "llvm.s390.vsrl",
  "llvm.s390.vsrlb",
  "llvm.s390.vstl",
  "llvm.s390.vstrcb",
  "llvm.s390.vstrcbs",
  "llvm.s390.vstrcf",
  "llvm.s390.vstrcfs",
  "llvm.s390.vstrch",
  "llvm.s390.vstrchs",
  "llvm.s390.vstrczb",
  "llvm.s390.vstrczbs",
  "llvm.s390.vstrczf",
  "llvm.s390.vstrczfs",
  "llvm.s390.vstrczh",
  "llvm.s390.vstrczhs",
  "llvm.s390.vstrl",
  "llvm.s390.vstrsb",
  "llvm.s390.vstrsf",
  "llvm.s390.vstrsh",
  "llvm.s390.vstrszb",
  "llvm.s390.vstrszf",
  "llvm.s390.vstrszh",
  "llvm.s390.vsumb",
  "llvm.s390.vsumgf",
  "llvm.s390.vsumgh",
  "llvm.s390.vsumh",
  "llvm.s390.vsumqf",
  "llvm.s390.vsumqg",
  "llvm.s390.vtm",
  "llvm.s390.vuphb",
  "llvm.s390.vuphf",
  "llvm.s390.vuphh",
  "llvm.s390.vuplb",
  "llvm.s390.vuplf",
  "llvm.s390.vuplhb",
  "llvm.s390.vuplhf",
  "llvm.s390.vuplhh",
  "llvm.s390.vuplhw",
  "llvm.s390.vupllb",
  "llvm.s390.vupllf",
  "llvm.s390.vupllh",
  "llvm.spv.alloca",
  "llvm.spv.assign.name",
  "llvm.spv.assign.type",
  "llvm.spv.bitcast",
  "llvm.spv.cmpxchg",
  "llvm.spv.const.composite",
  "llvm.spv.extractelt",
  "llvm.spv.extractv",
  "llvm.spv.gep",
  "llvm.spv.init.global",
  "llvm.spv.insertelt",
  "llvm.spv.insertv",
  "llvm.spv.load",
  "llvm.spv.store",
  "llvm.spv.switch",
  "llvm.spv.track.constant",
  "llvm.spv.undef",
  "llvm.spv.unreachable",
  "llvm.spv.unref.global",
  "llvm.ve.vl.andm.MMM",
  "llvm.ve.vl.andm.mmm",
  "llvm.ve.vl.eqvm.MMM",
  "llvm.ve.vl.eqvm.mmm",
  "llvm.ve.vl.extract.vm512l",
  "llvm.ve.vl.extract.vm512u",
  "llvm.ve.vl.fencec.s",
  "llvm.ve.vl.fencei",
  "llvm.ve.vl.fencem.s",
  "llvm.ve.vl.fidcr.sss",
  "llvm.ve.vl.insert.vm512l",
  "llvm.ve.vl.insert.vm512u",
  "llvm.ve.vl.lcr.sss",
  "llvm.ve.vl.lsv.vvss",
  "llvm.ve.vl.lvm.MMss",
  "llvm.ve.vl.lvm.mmss",
  "llvm.ve.vl.lvsd.svs",
  "llvm.ve.vl.lvsl.svs",
  "llvm.ve.vl.lvss.svs",
  "llvm.ve.vl.lzvm.sml",
  "llvm.ve.vl.negm.MM",
  "llvm.ve.vl.negm.mm",
  "llvm.ve.vl.nndm.MMM",
  "llvm.ve.vl.nndm.mmm",
  "llvm.ve.vl.orm.MMM",
  "llvm.ve.vl.orm.mmm",
  "llvm.ve.vl.pack.f32a",
  "llvm.ve.vl.pack.f32p",
  "llvm.ve.vl.pcvm.sml",
  "llvm.ve.vl.pfchv.ssl",
  "llvm.ve.vl.pfchvnc.ssl",
  "llvm.ve.vl.pvadds.vsvMvl",
  "llvm.ve.vl.pvadds.vsvl",
  "llvm.ve.vl.pvadds.vsvvl",
  "llvm.ve.vl.pvadds.vvvMvl",
  "llvm.ve.vl.pvadds.vvvl",
  "llvm.ve.vl.pvadds.vvvvl",
  "llvm.ve.vl.pvaddu.vsvMvl",
  "llvm.ve.vl.pvaddu.vsvl",
  "llvm.ve.vl.pvaddu.vsvvl",
  "llvm.ve.vl.pvaddu.vvvMvl",
  "llvm.ve.vl.pvaddu.vvvl",
  "llvm.ve.vl.pvaddu.vvvvl",
  "llvm.ve.vl.pvand.vsvMvl",
  "llvm.ve.vl.pvand.vsvl",
  "llvm.ve.vl.pvand.vsvvl",
  "llvm.ve.vl.pvand.vvvMvl",
  "llvm.ve.vl.pvand.vvvl",
  "llvm.ve.vl.pvand.vvvvl",
  "llvm.ve.vl.pvbrd.vsMvl",
  "llvm.ve.vl.pvbrd.vsl",
  "llvm.ve.vl.pvbrd.vsvl",
  "llvm.ve.vl.pvbrv.vvMvl",
  "llvm.ve.vl.pvbrv.vvl",
  "llvm.ve.vl.pvbrv.vvvl",
  "llvm.ve.vl.pvbrvlo.vvl",
  "llvm.ve.vl.pvbrvlo.vvmvl",
  "llvm.ve.vl.pvbrvlo.vvvl",
  "llvm.ve.vl.pvbrvup.vvl",
  "llvm.ve.vl.pvbrvup.vvmvl",
  "llvm.ve.vl.pvbrvup.vvvl",
  "llvm.ve.vl.pvcmps.vsvMvl",
  "llvm.ve.vl.pvcmps.vsvl",
  "llvm.ve.vl.pvcmps.vsvvl",
  "llvm.ve.vl.pvcmps.vvvMvl",
  "llvm.ve.vl.pvcmps.vvvl",
  "llvm.ve.vl.pvcmps.vvvvl",
  "llvm.ve.vl.pvcmpu.vsvMvl",
  "llvm.ve.vl.pvcmpu.vsvl",
  "llvm.ve.vl.pvcmpu.vsvvl",
  "llvm.ve.vl.pvcmpu.vvvMvl",
  "llvm.ve.vl.pvcmpu.vvvl",
  "llvm.ve.vl.pvcmpu.vvvvl",
  "llvm.ve.vl.pvcvtsw.vvl",
  "llvm.ve.vl.pvcvtsw.vvvl",
  "llvm.ve.vl.pvcvtws.vvMvl",
  "llvm.ve.vl.pvcvtws.vvl",
  "llvm.ve.vl.pvcvtws.vvvl",
  "llvm.ve.vl.pvcvtwsrz.vvMvl",
  "llvm.ve.vl.pvcvtwsrz.vvl",
  "llvm.ve.vl.pvcvtwsrz.vvvl",
  "llvm.ve.vl.pveqv.vsvMvl",
  "llvm.ve.vl.pveqv.vsvl",
  "llvm.ve.vl.pveqv.vsvvl",
  "llvm.ve.vl.pveqv.vvvMvl",
  "llvm.ve.vl.pveqv.vvvl",
  "llvm.ve.vl.pveqv.vvvvl",
  "llvm.ve.vl.pvfadd.vsvMvl",
  "llvm.ve.vl.pvfadd.vsvl",
  "llvm.ve.vl.pvfadd.vsvvl",
  "llvm.ve.vl.pvfadd.vvvMvl",
  "llvm.ve.vl.pvfadd.vvvl",
  "llvm.ve.vl.pvfadd.vvvvl",
  "llvm.ve.vl.pvfcmp.vsvMvl",
  "llvm.ve.vl.pvfcmp.vsvl",
  "llvm.ve.vl.pvfcmp.vsvvl",
  "llvm.ve.vl.pvfcmp.vvvMvl",
  "llvm.ve.vl.pvfcmp.vvvl",
  "llvm.ve.vl.pvfcmp.vvvvl",
  "llvm.ve.vl.pvfmad.vsvvMvl",
  "llvm.ve.vl.pvfmad.vsvvl",
  "llvm.ve.vl.pvfmad.vsvvvl",
  "llvm.ve.vl.pvfmad.vvsvMvl",
  "llvm.ve.vl.pvfmad.vvsvl",
  "llvm.ve.vl.pvfmad.vvsvvl",
  "llvm.ve.vl.pvfmad.vvvvMvl",
  "llvm.ve.vl.pvfmad.vvvvl",
  "llvm.ve.vl.pvfmad.vvvvvl",
  "llvm.ve.vl.pvfmax.vsvMvl",
  "llvm.ve.vl.pvfmax.vsvl",
  "llvm.ve.vl.pvfmax.vsvvl",
  "llvm.ve.vl.pvfmax.vvvMvl",
  "llvm.ve.vl.pvfmax.vvvl",
  "llvm.ve.vl.pvfmax.vvvvl",
  "llvm.ve.vl.pvfmin.vsvMvl",
  "llvm.ve.vl.pvfmin.vsvl",
  "llvm.ve.vl.pvfmin.vsvvl",
  "llvm.ve.vl.pvfmin.vvvMvl",
  "llvm.ve.vl.pvfmin.vvvl",
  "llvm.ve.vl.pvfmin.vvvvl",
  "llvm.ve.vl.pvfmkaf.Ml",
  "llvm.ve.vl.pvfmkat.Ml",
  "llvm.ve.vl.pvfmkseq.MvMl",
  "llvm.ve.vl.pvfmkseq.Mvl",
  "llvm.ve.vl.pvfmkseqnan.MvMl",
  "llvm.ve.vl.pvfmkseqnan.Mvl",
  "llvm.ve.vl.pvfmksge.MvMl",
  "llvm.ve.vl.pvfmksge.Mvl",
  "llvm.ve.vl.pvfmksgenan.MvMl",
  "llvm.ve.vl.pvfmksgenan.Mvl",
  "llvm.ve.vl.pvfmksgt.MvMl",
  "llvm.ve.vl.pvfmksgt.Mvl",
  "llvm.ve.vl.pvfmksgtnan.MvMl",
  "llvm.ve.vl.pvfmksgtnan.Mvl",
  "llvm.ve.vl.pvfmksle.MvMl",
  "llvm.ve.vl.pvfmksle.Mvl",
  "llvm.ve.vl.pvfmkslenan.MvMl",
  "llvm.ve.vl.pvfmkslenan.Mvl",
  "llvm.ve.vl.pvfmksloeq.mvl",
  "llvm.ve.vl.pvfmksloeq.mvml",
  "llvm.ve.vl.pvfmksloeqnan.mvl",
  "llvm.ve.vl.pvfmksloeqnan.mvml",
  "llvm.ve.vl.pvfmksloge.mvl",
  "llvm.ve.vl.pvfmksloge.mvml",
  "llvm.ve.vl.pvfmkslogenan.mvl",
  "llvm.ve.vl.pvfmkslogenan.mvml",
  "llvm.ve.vl.pvfmkslogt.mvl",
  "llvm.ve.vl.pvfmkslogt.mvml",
  "llvm.ve.vl.pvfmkslogtnan.mvl",
  "llvm.ve.vl.pvfmkslogtnan.mvml",
  "llvm.ve.vl.pvfmkslole.mvl",
  "llvm.ve.vl.pvfmkslole.mvml",
  "llvm.ve.vl.pvfmkslolenan.mvl",
  "llvm.ve.vl.pvfmkslolenan.mvml",
  "llvm.ve.vl.pvfmkslolt.mvl",
  "llvm.ve.vl.pvfmkslolt.mvml",
  "llvm.ve.vl.pvfmksloltnan.mvl",
  "llvm.ve.vl.pvfmksloltnan.mvml",
  "llvm.ve.vl.pvfmkslonan.mvl",
  "llvm.ve.vl.pvfmkslonan.mvml",
  "llvm.ve.vl.pvfmkslone.mvl",
  "llvm.ve.vl.pvfmkslone.mvml",
  "llvm.ve.vl.pvfmkslonenan.mvl",
  "llvm.ve.vl.pvfmkslonenan.mvml",
  "llvm.ve.vl.pvfmkslonum.mvl",
  "llvm.ve.vl.pvfmkslonum.mvml",
  "llvm.ve.vl.pvfmkslt.MvMl",
  "llvm.ve.vl.pvfmkslt.Mvl",
  "llvm.ve.vl.pvfmksltnan.MvMl",
  "llvm.ve.vl.pvfmksltnan.Mvl",
  "llvm.ve.vl.pvfmksnan.MvMl",
  "llvm.ve.vl.pvfmksnan.Mvl",
  "llvm.ve.vl.pvfmksne.MvMl",
  "llvm.ve.vl.pvfmksne.Mvl",
  "llvm.ve.vl.pvfmksnenan.MvMl",
  "llvm.ve.vl.pvfmksnenan.Mvl",
  "llvm.ve.vl.pvfmksnum.MvMl",
  "llvm.ve.vl.pvfmksnum.Mvl",
  "llvm.ve.vl.pvfmksupeq.mvl",
  "llvm.ve.vl.pvfmksupeq.mvml",
  "llvm.ve.vl.pvfmksupeqnan.mvl",
  "llvm.ve.vl.pvfmksupeqnan.mvml",
  "llvm.ve.vl.pvfmksupge.mvl",
  "llvm.ve.vl.pvfmksupge.mvml",
  "llvm.ve.vl.pvfmksupgenan.mvl",
  "llvm.ve.vl.pvfmksupgenan.mvml",
  "llvm.ve.vl.pvfmksupgt.mvl",
  "llvm.ve.vl.pvfmksupgt.mvml",
  "llvm.ve.vl.pvfmksupgtnan.mvl",
  "llvm.ve.vl.pvfmksupgtnan.mvml",
  "llvm.ve.vl.pvfmksuple.mvl",
  "llvm.ve.vl.pvfmksuple.mvml",
  "llvm.ve.vl.pvfmksuplenan.mvl",
  "llvm.ve.vl.pvfmksuplenan.mvml",
  "llvm.ve.vl.pvfmksuplt.mvl",
  "llvm.ve.vl.pvfmksuplt.mvml",
  "llvm.ve.vl.pvfmksupltnan.mvl",
  "llvm.ve.vl.pvfmksupltnan.mvml",
  "llvm.ve.vl.pvfmksupnan.mvl",
  "llvm.ve.vl.pvfmksupnan.mvml",
  "llvm.ve.vl.pvfmksupne.mvl",
  "llvm.ve.vl.pvfmksupne.mvml",
  "llvm.ve.vl.pvfmksupnenan.mvl",
  "llvm.ve.vl.pvfmksupnenan.mvml",
  "llvm.ve.vl.pvfmksupnum.mvl",
  "llvm.ve.vl.pvfmksupnum.mvml",
  "llvm.ve.vl.pvfmkweq.MvMl",
  "llvm.ve.vl.pvfmkweq.Mvl",
  "llvm.ve.vl.pvfmkweqnan.MvMl",
  "llvm.ve.vl.pvfmkweqnan.Mvl",
  "llvm.ve.vl.pvfmkwge.MvMl",
  "llvm.ve.vl.pvfmkwge.Mvl",
  "llvm.ve.vl.pvfmkwgenan.MvMl",
  "llvm.ve.vl.pvfmkwgenan.Mvl",
  "llvm.ve.vl.pvfmkwgt.MvMl",
  "llvm.ve.vl.pvfmkwgt.Mvl",
  "llvm.ve.vl.pvfmkwgtnan.MvMl",
  "llvm.ve.vl.pvfmkwgtnan.Mvl",
  "llvm.ve.vl.pvfmkwle.MvMl",
  "llvm.ve.vl.pvfmkwle.Mvl",
  "llvm.ve.vl.pvfmkwlenan.MvMl",
  "llvm.ve.vl.pvfmkwlenan.Mvl",
  "llvm.ve.vl.pvfmkwloeq.mvl",
  "llvm.ve.vl.pvfmkwloeq.mvml",
  "llvm.ve.vl.pvfmkwloeqnan.mvl",
  "llvm.ve.vl.pvfmkwloeqnan.mvml",
  "llvm.ve.vl.pvfmkwloge.mvl",
  "llvm.ve.vl.pvfmkwloge.mvml",
  "llvm.ve.vl.pvfmkwlogenan.mvl",
  "llvm.ve.vl.pvfmkwlogenan.mvml",
  "llvm.ve.vl.pvfmkwlogt.mvl",
  "llvm.ve.vl.pvfmkwlogt.mvml",
  "llvm.ve.vl.pvfmkwlogtnan.mvl",
  "llvm.ve.vl.pvfmkwlogtnan.mvml",
  "llvm.ve.vl.pvfmkwlole.mvl",
  "llvm.ve.vl.pvfmkwlole.mvml",
  "llvm.ve.vl.pvfmkwlolenan.mvl",
  "llvm.ve.vl.pvfmkwlolenan.mvml",
  "llvm.ve.vl.pvfmkwlolt.mvl",
  "llvm.ve.vl.pvfmkwlolt.mvml",
  "llvm.ve.vl.pvfmkwloltnan.mvl",
  "llvm.ve.vl.pvfmkwloltnan.mvml",
  "llvm.ve.vl.pvfmkwlonan.mvl",
  "llvm.ve.vl.pvfmkwlonan.mvml",
  "llvm.ve.vl.pvfmkwlone.mvl",
  "llvm.ve.vl.pvfmkwlone.mvml",
  "llvm.ve.vl.pvfmkwlonenan.mvl",
  "llvm.ve.vl.pvfmkwlonenan.mvml",
  "llvm.ve.vl.pvfmkwlonum.mvl",
  "llvm.ve.vl.pvfmkwlonum.mvml",
  "llvm.ve.vl.pvfmkwlt.MvMl",
  "llvm.ve.vl.pvfmkwlt.Mvl",
  "llvm.ve.vl.pvfmkwltnan.MvMl",
  "llvm.ve.vl.pvfmkwltnan.Mvl",
  "llvm.ve.vl.pvfmkwnan.MvMl",
  "llvm.ve.vl.pvfmkwnan.Mvl",
  "llvm.ve.vl.pvfmkwne.MvMl",
  "llvm.ve.vl.pvfmkwne.Mvl",
  "llvm.ve.vl.pvfmkwnenan.MvMl",
  "llvm.ve.vl.pvfmkwnenan.Mvl",
  "llvm.ve.vl.pvfmkwnum.MvMl",
  "llvm.ve.vl.pvfmkwnum.Mvl",
  "llvm.ve.vl.pvfmkwupeq.mvl",
  "llvm.ve.vl.pvfmkwupeq.mvml",
  "llvm.ve.vl.pvfmkwupeqnan.mvl",
  "llvm.ve.vl.pvfmkwupeqnan.mvml",
  "llvm.ve.vl.pvfmkwupge.mvl",
  "llvm.ve.vl.pvfmkwupge.mvml",
  "llvm.ve.vl.pvfmkwupgenan.mvl",
  "llvm.ve.vl.pvfmkwupgenan.mvml",
  "llvm.ve.vl.pvfmkwupgt.mvl",
  "llvm.ve.vl.pvfmkwupgt.mvml",
  "llvm.ve.vl.pvfmkwupgtnan.mvl",
  "llvm.ve.vl.pvfmkwupgtnan.mvml",
  "llvm.ve.vl.pvfmkwuple.mvl",
  "llvm.ve.vl.pvfmkwuple.mvml",
  "llvm.ve.vl.pvfmkwuplenan.mvl",
  "llvm.ve.vl.pvfmkwuplenan.mvml",
  "llvm.ve.vl.pvfmkwuplt.mvl",
  "llvm.ve.vl.pvfmkwuplt.mvml",
  "llvm.ve.vl.pvfmkwupltnan.mvl",
  "llvm.ve.vl.pvfmkwupltnan.mvml",
  "llvm.ve.vl.pvfmkwupnan.mvl",
  "llvm.ve.vl.pvfmkwupnan.mvml",
  "llvm.ve.vl.pvfmkwupne.mvl",
  "llvm.ve.vl.pvfmkwupne.mvml",
  "llvm.ve.vl.pvfmkwupnenan.mvl",
  "llvm.ve.vl.pvfmkwupnenan.mvml",
  "llvm.ve.vl.pvfmkwupnum.mvl",
  "llvm.ve.vl.pvfmkwupnum.mvml",
  "llvm.ve.vl.pvfmsb.vsvvMvl",
  "llvm.ve.vl.pvfmsb.vsvvl",
  "llvm.ve.vl.pvfmsb.vsvvvl",
  "llvm.ve.vl.pvfmsb.vvsvMvl",
  "llvm.ve.vl.pvfmsb.vvsvl",
  "llvm.ve.vl.pvfmsb.vvsvvl",
  "llvm.ve.vl.pvfmsb.vvvvMvl",
  "llvm.ve.vl.pvfmsb.vvvvl",
  "llvm.ve.vl.pvfmsb.vvvvvl",
  "llvm.ve.vl.pvfmul.vsvMvl",
  "llvm.ve.vl.pvfmul.vsvl",
  "llvm.ve.vl.pvfmul.vsvvl",
  "llvm.ve.vl.pvfmul.vvvMvl",
  "llvm.ve.vl.pvfmul.vvvl",
  "llvm.ve.vl.pvfmul.vvvvl",
  "llvm.ve.vl.pvfnmad.vsvvMvl",
  "llvm.ve.vl.pvfnmad.vsvvl",
  "llvm.ve.vl.pvfnmad.vsvvvl",
  "llvm.ve.vl.pvfnmad.vvsvMvl",
  "llvm.ve.vl.pvfnmad.vvsvl",
  "llvm.ve.vl.pvfnmad.vvsvvl",
  "llvm.ve.vl.pvfnmad.vvvvMvl",
  "llvm.ve.vl.pvfnmad.vvvvl",
  "llvm.ve.vl.pvfnmad.vvvvvl",
  "llvm.ve.vl.pvfnmsb.vsvvMvl",
  "llvm.ve.vl.pvfnmsb.vsvvl",
  "llvm.ve.vl.pvfnmsb.vsvvvl",
  "llvm.ve.vl.pvfnmsb.vvsvMvl",
  "llvm.ve.vl.pvfnmsb.vvsvl",
  "llvm.ve.vl.pvfnmsb.vvsvvl",
  "llvm.ve.vl.pvfnmsb.vvvvMvl",
  "llvm.ve.vl.pvfnmsb.vvvvl",
  "llvm.ve.vl.pvfnmsb.vvvvvl",
  "llvm.ve.vl.pvfsub.vsvMvl",
  "llvm.ve.vl.pvfsub.vsvl",
  "llvm.ve.vl.pvfsub.vsvvl",
  "llvm.ve.vl.pvfsub.vvvMvl",
  "llvm.ve.vl.pvfsub.vvvl",
  "llvm.ve.vl.pvfsub.vvvvl",
  "llvm.ve.vl.pvldz.vvMvl",
  "llvm.ve.vl.pvldz.vvl",
  "llvm.ve.vl.pvldz.vvvl",
  "llvm.ve.vl.pvldzlo.vvl",
  "llvm.ve.vl.pvldzlo.vvmvl",
  "llvm.ve.vl.pvldzlo.vvvl",
  "llvm.ve.vl.pvldzup.vvl",
  "llvm.ve.vl.pvldzup.vvmvl",
  "llvm.ve.vl.pvldzup.vvvl",
  "llvm.ve.vl.pvmaxs.vsvMvl",
  "llvm.ve.vl.pvmaxs.vsvl",
  "llvm.ve.vl.pvmaxs.vsvvl",
  "llvm.ve.vl.pvmaxs.vvvMvl",
  "llvm.ve.vl.pvmaxs.vvvl",
  "llvm.ve.vl.pvmaxs.vvvvl",
  "llvm.ve.vl.pvmins.vsvMvl",
  "llvm.ve.vl.pvmins.vsvl",
  "llvm.ve.vl.pvmins.vsvvl",
  "llvm.ve.vl.pvmins.vvvMvl",
  "llvm.ve.vl.pvmins.vvvl",
  "llvm.ve.vl.pvmins.vvvvl",
  "llvm.ve.vl.pvor.vsvMvl",
  "llvm.ve.vl.pvor.vsvl",
  "llvm.ve.vl.pvor.vsvvl",
  "llvm.ve.vl.pvor.vvvMvl",
  "llvm.ve.vl.pvor.vvvl",
  "llvm.ve.vl.pvor.vvvvl",
  "llvm.ve.vl.pvpcnt.vvMvl",
  "llvm.ve.vl.pvpcnt.vvl",
  "llvm.ve.vl.pvpcnt.vvvl",
  "llvm.ve.vl.pvpcntlo.vvl",
  "llvm.ve.vl.pvpcntlo.vvmvl",
  "llvm.ve.vl.pvpcntlo.vvvl",
  "llvm.ve.vl.pvpcntup.vvl",
  "llvm.ve.vl.pvpcntup.vvmvl",
  "llvm.ve.vl.pvpcntup.vvvl",
  "llvm.ve.vl.pvrcp.vvl",
  "llvm.ve.vl.pvrcp.vvvl",
  "llvm.ve.vl.pvrsqrt.vvl",
  "llvm.ve.vl.pvrsqrt.vvvl",
  "llvm.ve.vl.pvrsqrtnex.vvl",
  "llvm.ve.vl.pvrsqrtnex.vvvl",
  "llvm.ve.vl.pvseq.vl",
  "llvm.ve.vl.pvseq.vvl",
  "llvm.ve.vl.pvseqlo.vl",
  "llvm.ve.vl.pvseqlo.vvl",
  "llvm.ve.vl.pvsequp.vl",
  "llvm.ve.vl.pvsequp.vvl",
  "llvm.ve.vl.pvsla.vvsMvl",
  "llvm.ve.vl.pvsla.vvsl",
  "llvm.ve.vl.pvsla.vvsvl",
  "llvm.ve.vl.pvsla.vvvMvl",
  "llvm.ve.vl.pvsla.vvvl",
  "llvm.ve.vl.pvsla.vvvvl",
  "llvm.ve.vl.pvsll.vvsMvl",
  "llvm.ve.vl.pvsll.vvsl",
  "llvm.ve.vl.pvsll.vvsvl",
  "llvm.ve.vl.pvsll.vvvMvl",
  "llvm.ve.vl.pvsll.vvvl",
  "llvm.ve.vl.pvsll.vvvvl",
  "llvm.ve.vl.pvsra.vvsMvl",
  "llvm.ve.vl.pvsra.vvsl",
  "llvm.ve.vl.pvsra.vvsvl",
  "llvm.ve.vl.pvsra.vvvMvl",
  "llvm.ve.vl.pvsra.vvvl",
  "llvm.ve.vl.pvsra.vvvvl",
  "llvm.ve.vl.pvsrl.vvsMvl",
  "llvm.ve.vl.pvsrl.vvsl",
  "llvm.ve.vl.pvsrl.vvsvl",
  "llvm.ve.vl.pvsrl.vvvMvl",
  "llvm.ve.vl.pvsrl.vvvl",
  "llvm.ve.vl.pvsrl.vvvvl",
  "llvm.ve.vl.pvsubs.vsvMvl",
  "llvm.ve.vl.pvsubs.vsvl",
  "llvm.ve.vl.pvsubs.vsvvl",
  "llvm.ve.vl.pvsubs.vvvMvl",
  "llvm.ve.vl.pvsubs.vvvl",
  "llvm.ve.vl.pvsubs.vvvvl",
  "llvm.ve.vl.pvsubu.vsvMvl",
  "llvm.ve.vl.pvsubu.vsvl",
  "llvm.ve.vl.pvsubu.vsvvl",
  "llvm.ve.vl.pvsubu.vvvMvl",
  "llvm.ve.vl.pvsubu.vvvl",
  "llvm.ve.vl.pvsubu.vvvvl",
  "llvm.ve.vl.pvxor.vsvMvl",
  "llvm.ve.vl.pvxor.vsvl",
  "llvm.ve.vl.pvxor.vsvvl",
  "llvm.ve.vl.pvxor.vvvMvl",
  "llvm.ve.vl.pvxor.vvvl",
  "llvm.ve.vl.pvxor.vvvvl",
  "llvm.ve.vl.scr.sss",
  "llvm.ve.vl.svm.sMs",
  "llvm.ve.vl.svm.sms",
  "llvm.ve.vl.svob",
  "llvm.ve.vl.tovm.sml",
  "llvm.ve.vl.tscr.ssss",
  "llvm.ve.vl.vaddsl.vsvl",
  "llvm.ve.vl.vaddsl.vsvmvl",
  "llvm.ve.vl.vaddsl.vsvvl",
  "llvm.ve.vl.vaddsl.vvvl",
  "llvm.ve.vl.vaddsl.vvvmvl",
  "llvm.ve.vl.vaddsl.vvvvl",
  "llvm.ve.vl.vaddswsx.vsvl",
  "llvm.ve.vl.vaddswsx.vsvmvl",
  "llvm.ve.vl.vaddswsx.vsvvl",
  "llvm.ve.vl.vaddswsx.vvvl",
  "llvm.ve.vl.vaddswsx.vvvmvl",
  "llvm.ve.vl.vaddswsx.vvvvl",
  "llvm.ve.vl.vaddswzx.vsvl",
  "llvm.ve.vl.vaddswzx.vsvmvl",
  "llvm.ve.vl.vaddswzx.vsvvl",
  "llvm.ve.vl.vaddswzx.vvvl",
  "llvm.ve.vl.vaddswzx.vvvmvl",
  "llvm.ve.vl.vaddswzx.vvvvl",
  "llvm.ve.vl.vaddul.vsvl",
  "llvm.ve.vl.vaddul.vsvmvl",
  "llvm.ve.vl.vaddul.vsvvl",
  "llvm.ve.vl.vaddul.vvvl",
  "llvm.ve.vl.vaddul.vvvmvl",
  "llvm.ve.vl.vaddul.vvvvl",
  "llvm.ve.vl.vadduw.vsvl",
  "llvm.ve.vl.vadduw.vsvmvl",
  "llvm.ve.vl.vadduw.vsvvl",
  "llvm.ve.vl.vadduw.vvvl",
  "llvm.ve.vl.vadduw.vvvmvl",
  "llvm.ve.vl.vadduw.vvvvl",
  "llvm.ve.vl.vand.vsvl",
  "llvm.ve.vl.vand.vsvmvl",
  "llvm.ve.vl.vand.vsvvl",
  "llvm.ve.vl.vand.vvvl",
  "llvm.ve.vl.vand.vvvmvl",
  "llvm.ve.vl.vand.vvvvl",
  "llvm.ve.vl.vbrdd.vsl",
  "llvm.ve.vl.vbrdd.vsmvl",
  "llvm.ve.vl.vbrdd.vsvl",
  "llvm.ve.vl.vbrdl.vsl",
  "llvm.ve.vl.vbrdl.vsmvl",
  "llvm.ve.vl.vbrdl.vsvl",
  "llvm.ve.vl.vbrds.vsl",
  "llvm.ve.vl.vbrds.vsmvl",
  "llvm.ve.vl.vbrds.vsvl",
  "llvm.ve.vl.vbrdw.vsl",
  "llvm.ve.vl.vbrdw.vsmvl",
  "llvm.ve.vl.vbrdw.vsvl",
  "llvm.ve.vl.vbrv.vvl",
  "llvm.ve.vl.vbrv.vvmvl",
  "llvm.ve.vl.vbrv.vvvl",
  "llvm.ve.vl.vcmpsl.vsvl",
  "llvm.ve.vl.vcmpsl.vsvmvl",
  "llvm.ve.vl.vcmpsl.vsvvl",
  "llvm.ve.vl.vcmpsl.vvvl",
  "llvm.ve.vl.vcmpsl.vvvmvl",
  "llvm.ve.vl.vcmpsl.vvvvl",
  "llvm.ve.vl.vcmpswsx.vsvl",
  "llvm.ve.vl.vcmpswsx.vsvmvl",
  "llvm.ve.vl.vcmpswsx.vsvvl",
  "llvm.ve.vl.vcmpswsx.vvvl",
  "llvm.ve.vl.vcmpswsx.vvvmvl",
  "llvm.ve.vl.vcmpswsx.vvvvl",
  "llvm.ve.vl.vcmpswzx.vsvl",
  "llvm.ve.vl.vcmpswzx.vsvmvl",
  "llvm.ve.vl.vcmpswzx.vsvvl",
  "llvm.ve.vl.vcmpswzx.vvvl",
  "llvm.ve.vl.vcmpswzx.vvvmvl",
  "llvm.ve.vl.vcmpswzx.vvvvl",
  "llvm.ve.vl.vcmpul.vsvl",
  "llvm.ve.vl.vcmpul.vsvmvl",
  "llvm.ve.vl.vcmpul.vsvvl",
  "llvm.ve.vl.vcmpul.vvvl",
  "llvm.ve.vl.vcmpul.vvvmvl",
  "llvm.ve.vl.vcmpul.vvvvl",
  "llvm.ve.vl.vcmpuw.vsvl",
  "llvm.ve.vl.vcmpuw.vsvmvl",
  "llvm.ve.vl.vcmpuw.vsvvl",
  "llvm.ve.vl.vcmpuw.vvvl",
  "llvm.ve.vl.vcmpuw.vvvmvl",
  "llvm.ve.vl.vcmpuw.vvvvl",
  "llvm.ve.vl.vcp.vvmvl",
  "llvm.ve.vl.vcvtdl.vvl",
  "llvm.ve.vl.vcvtdl.vvvl",
  "llvm.ve.vl.vcvtds.vvl",
  "llvm.ve.vl.vcvtds.vvvl",
  "llvm.ve.vl.vcvtdw.vvl",
  "llvm.ve.vl.vcvtdw.vvvl",
  "llvm.ve.vl.vcvtld.vvl",
  "llvm.ve.vl.vcvtld.vvmvl",
  "llvm.ve.vl.vcvtld.vvvl",
  "llvm.ve.vl.vcvtldrz.vvl",
  "llvm.ve.vl.vcvtldrz.vvmvl",
  "llvm.ve.vl.vcvtldrz.vvvl",
  "llvm.ve.vl.vcvtsd.vvl",
  "llvm.ve.vl.vcvtsd.vvvl",
  "llvm.ve.vl.vcvtsw.vvl",
  "llvm.ve.vl.vcvtsw.vvvl",
  "llvm.ve.vl.vcvtwdsx.vvl",
  "llvm.ve.vl.vcvtwdsx.vvmvl",
  "llvm.ve.vl.vcvtwdsx.vvvl",
  "llvm.ve.vl.vcvtwdsxrz.vvl",
  "llvm.ve.vl.vcvtwdsxrz.vvmvl",
  "llvm.ve.vl.vcvtwdsxrz.vvvl",
  "llvm.ve.vl.vcvtwdzx.vvl",
  "llvm.ve.vl.vcvtwdzx.vvmvl",
  "llvm.ve.vl.vcvtwdzx.vvvl",
  "llvm.ve.vl.vcvtwdzxrz.vvl",
  "llvm.ve.vl.vcvtwdzxrz.vvmvl",
  "llvm.ve.vl.vcvtwdzxrz.vvvl",
  "llvm.ve.vl.vcvtwssx.vvl",
  "llvm.ve.vl.vcvtwssx.vvmvl",
  "llvm.ve.vl.vcvtwssx.vvvl",
  "llvm.ve.vl.vcvtwssxrz.vvl",
  "llvm.ve.vl.vcvtwssxrz.vvmvl",
  "llvm.ve.vl.vcvtwssxrz.vvvl",
  "llvm.ve.vl.vcvtwszx.vvl",
  "llvm.ve.vl.vcvtwszx.vvmvl",
  "llvm.ve.vl.vcvtwszx.vvvl",
  "llvm.ve.vl.vcvtwszxrz.vvl",
  "llvm.ve.vl.vcvtwszxrz.vvmvl",
  "llvm.ve.vl.vcvtwszxrz.vvvl",
  "llvm.ve.vl.vdivsl.vsvl",
  "llvm.ve.vl.vdivsl.vsvmvl",
  "llvm.ve.vl.vdivsl.vsvvl",
  "llvm.ve.vl.vdivsl.vvsl",
  "llvm.ve.vl.vdivsl.vvsmvl",
  "llvm.ve.vl.vdivsl.vvsvl",
  "llvm.ve.vl.vdivsl.vvvl",
  "llvm.ve.vl.vdivsl.vvvmvl",
  "llvm.ve.vl.vdivsl.vvvvl",
  "llvm.ve.vl.vdivswsx.vsvl",
  "llvm.ve.vl.vdivswsx.vsvmvl",
  "llvm.ve.vl.vdivswsx.vsvvl",
  "llvm.ve.vl.vdivswsx.vvsl",
  "llvm.ve.vl.vdivswsx.vvsmvl",
  "llvm.ve.vl.vdivswsx.vvsvl",
  "llvm.ve.vl.vdivswsx.vvvl",
  "llvm.ve.vl.vdivswsx.vvvmvl",
  "llvm.ve.vl.vdivswsx.vvvvl",
  "llvm.ve.vl.vdivswzx.vsvl",
  "llvm.ve.vl.vdivswzx.vsvmvl",
  "llvm.ve.vl.vdivswzx.vsvvl",
  "llvm.ve.vl.vdivswzx.vvsl",
  "llvm.ve.vl.vdivswzx.vvsmvl",
  "llvm.ve.vl.vdivswzx.vvsvl",
  "llvm.ve.vl.vdivswzx.vvvl",
  "llvm.ve.vl.vdivswzx.vvvmvl",
  "llvm.ve.vl.vdivswzx.vvvvl",
  "llvm.ve.vl.vdivul.vsvl",
  "llvm.ve.vl.vdivul.vsvmvl",
  "llvm.ve.vl.vdivul.vsvvl",
  "llvm.ve.vl.vdivul.vvsl",
  "llvm.ve.vl.vdivul.vvsmvl",
  "llvm.ve.vl.vdivul.vvsvl",
  "llvm.ve.vl.vdivul.vvvl",
  "llvm.ve.vl.vdivul.vvvmvl",
  "llvm.ve.vl.vdivul.vvvvl",
  "llvm.ve.vl.vdivuw.vsvl",
  "llvm.ve.vl.vdivuw.vsvmvl",
  "llvm.ve.vl.vdivuw.vsvvl",
  "llvm.ve.vl.vdivuw.vvsl",
  "llvm.ve.vl.vdivuw.vvsmvl",
  "llvm.ve.vl.vdivuw.vvsvl",
  "llvm.ve.vl.vdivuw.vvvl",
  "llvm.ve.vl.vdivuw.vvvmvl",
  "llvm.ve.vl.vdivuw.vvvvl",
  "llvm.ve.vl.veqv.vsvl",
  "llvm.ve.vl.veqv.vsvmvl",
  "llvm.ve.vl.veqv.vsvvl",
  "llvm.ve.vl.veqv.vvvl",
  "llvm.ve.vl.veqv.vvvmvl",
  "llvm.ve.vl.veqv.vvvvl",
  "llvm.ve.vl.vex.vvmvl",
  "llvm.ve.vl.vfaddd.vsvl",
  "llvm.ve.vl.vfaddd.vsvmvl",
  "llvm.ve.vl.vfaddd.vsvvl",
  "llvm.ve.vl.vfaddd.vvvl",
  "llvm.ve.vl.vfaddd.vvvmvl",
  "llvm.ve.vl.vfaddd.vvvvl",
  "llvm.ve.vl.vfadds.vsvl",
  "llvm.ve.vl.vfadds.vsvmvl",
  "llvm.ve.vl.vfadds.vsvvl",
  "llvm.ve.vl.vfadds.vvvl",
  "llvm.ve.vl.vfadds.vvvmvl",
  "llvm.ve.vl.vfadds.vvvvl",
  "llvm.ve.vl.vfcmpd.vsvl",
  "llvm.ve.vl.vfcmpd.vsvmvl",
  "llvm.ve.vl.vfcmpd.vsvvl",
  "llvm.ve.vl.vfcmpd.vvvl",
  "llvm.ve.vl.vfcmpd.vvvmvl",
  "llvm.ve.vl.vfcmpd.vvvvl",
  "llvm.ve.vl.vfcmps.vsvl",
  "llvm.ve.vl.vfcmps.vsvmvl",
  "llvm.ve.vl.vfcmps.vsvvl",
  "llvm.ve.vl.vfcmps.vvvl",
  "llvm.ve.vl.vfcmps.vvvmvl",
  "llvm.ve.vl.vfcmps.vvvvl",
  "llvm.ve.vl.vfdivd.vsvl",
  "llvm.ve.vl.vfdivd.vsvmvl",
  "llvm.ve.vl.vfdivd.vsvvl",
  "llvm.ve.vl.vfdivd.vvvl",
  "llvm.ve.vl.vfdivd.vvvmvl",
  "llvm.ve.vl.vfdivd.vvvvl",
  "llvm.ve.vl.vfdivs.vsvl",
  "llvm.ve.vl.vfdivs.vsvmvl",
  "llvm.ve.vl.vfdivs.vsvvl",
  "llvm.ve.vl.vfdivs.vvvl",
  "llvm.ve.vl.vfdivs.vvvmvl",
  "llvm.ve.vl.vfdivs.vvvvl",
  "llvm.ve.vl.vfmadd.vsvvl",
  "llvm.ve.vl.vfmadd.vsvvmvl",
  "llvm.ve.vl.vfmadd.vsvvvl",
  "llvm.ve.vl.vfmadd.vvsvl",
  "llvm.ve.vl.vfmadd.vvsvmvl",
  "llvm.ve.vl.vfmadd.vvsvvl",
  "llvm.ve.vl.vfmadd.vvvvl",
  "llvm.ve.vl.vfmadd.vvvvmvl",
  "llvm.ve.vl.vfmadd.vvvvvl",
  "llvm.ve.vl.vfmads.vsvvl",
  "llvm.ve.vl.vfmads.vsvvmvl",
  "llvm.ve.vl.vfmads.vsvvvl",
  "llvm.ve.vl.vfmads.vvsvl",
  "llvm.ve.vl.vfmads.vvsvmvl",
  "llvm.ve.vl.vfmads.vvsvvl",
  "llvm.ve.vl.vfmads.vvvvl",
  "llvm.ve.vl.vfmads.vvvvmvl",
  "llvm.ve.vl.vfmads.vvvvvl",
  "llvm.ve.vl.vfmaxd.vsvl",
  "llvm.ve.vl.vfmaxd.vsvmvl",
  "llvm.ve.vl.vfmaxd.vsvvl",
  "llvm.ve.vl.vfmaxd.vvvl",
  "llvm.ve.vl.vfmaxd.vvvmvl",
  "llvm.ve.vl.vfmaxd.vvvvl",
  "llvm.ve.vl.vfmaxs.vsvl",
  "llvm.ve.vl.vfmaxs.vsvmvl",
  "llvm.ve.vl.vfmaxs.vsvvl",
  "llvm.ve.vl.vfmaxs.vvvl",
  "llvm.ve.vl.vfmaxs.vvvmvl",
  "llvm.ve.vl.vfmaxs.vvvvl",
  "llvm.ve.vl.vfmind.vsvl",
  "llvm.ve.vl.vfmind.vsvmvl",
  "llvm.ve.vl.vfmind.vsvvl",
  "llvm.ve.vl.vfmind.vvvl",
  "llvm.ve.vl.vfmind.vvvmvl",
  "llvm.ve.vl.vfmind.vvvvl",
  "llvm.ve.vl.vfmins.vsvl",
  "llvm.ve.vl.vfmins.vsvmvl",
  "llvm.ve.vl.vfmins.vsvvl",
  "llvm.ve.vl.vfmins.vvvl",
  "llvm.ve.vl.vfmins.vvvmvl",
  "llvm.ve.vl.vfmins.vvvvl",
  "llvm.ve.vl.vfmkdeq.mvl",
  "llvm.ve.vl.vfmkdeq.mvml",
  "llvm.ve.vl.vfmkdeqnan.mvl",
  "llvm.ve.vl.vfmkdeqnan.mvml",
  "llvm.ve.vl.vfmkdge.mvl",
  "llvm.ve.vl.vfmkdge.mvml",
  "llvm.ve.vl.vfmkdgenan.mvl",
  "llvm.ve.vl.vfmkdgenan.mvml",
  "llvm.ve.vl.vfmkdgt.mvl",
  "llvm.ve.vl.vfmkdgt.mvml",
  "llvm.ve.vl.vfmkdgtnan.mvl",
  "llvm.ve.vl.vfmkdgtnan.mvml",
  "llvm.ve.vl.vfmkdle.mvl",
  "llvm.ve.vl.vfmkdle.mvml",
  "llvm.ve.vl.vfmkdlenan.mvl",
  "llvm.ve.vl.vfmkdlenan.mvml",
  "llvm.ve.vl.vfmkdlt.mvl",
  "llvm.ve.vl.vfmkdlt.mvml",
  "llvm.ve.vl.vfmkdltnan.mvl",
  "llvm.ve.vl.vfmkdltnan.mvml",
  "llvm.ve.vl.vfmkdnan.mvl",
  "llvm.ve.vl.vfmkdnan.mvml",
  "llvm.ve.vl.vfmkdne.mvl",
  "llvm.ve.vl.vfmkdne.mvml",
  "llvm.ve.vl.vfmkdnenan.mvl",
  "llvm.ve.vl.vfmkdnenan.mvml",
  "llvm.ve.vl.vfmkdnum.mvl",
  "llvm.ve.vl.vfmkdnum.mvml",
  "llvm.ve.vl.vfmklaf.ml",
  "llvm.ve.vl.vfmklat.ml",
  "llvm.ve.vl.vfmkleq.mvl",
  "llvm.ve.vl.vfmkleq.mvml",
  "llvm.ve.vl.vfmkleqnan.mvl",
  "llvm.ve.vl.vfmkleqnan.mvml",
  "llvm.ve.vl.vfmklge.mvl",
  "llvm.ve.vl.vfmklge.mvml",
  "llvm.ve.vl.vfmklgenan.mvl",
  "llvm.ve.vl.vfmklgenan.mvml",
  "llvm.ve.vl.vfmklgt.mvl",
  "llvm.ve.vl.vfmklgt.mvml",
  "llvm.ve.vl.vfmklgtnan.mvl",
  "llvm.ve.vl.vfmklgtnan.mvml",
  "llvm.ve.vl.vfmklle.mvl",
  "llvm.ve.vl.vfmklle.mvml",
  "llvm.ve.vl.vfmkllenan.mvl",
  "llvm.ve.vl.vfmkllenan.mvml",
  "llvm.ve.vl.vfmkllt.mvl",
  "llvm.ve.vl.vfmkllt.mvml",
  "llvm.ve.vl.vfmklltnan.mvl",
  "llvm.ve.vl.vfmklltnan.mvml",
  "llvm.ve.vl.vfmklnan.mvl",
  "llvm.ve.vl.vfmklnan.mvml",
  "llvm.ve.vl.vfmklne.mvl",
  "llvm.ve.vl.vfmklne.mvml",
  "llvm.ve.vl.vfmklnenan.mvl",
  "llvm.ve.vl.vfmklnenan.mvml",
  "llvm.ve.vl.vfmklnum.mvl",
  "llvm.ve.vl.vfmklnum.mvml",
  "llvm.ve.vl.vfmkseq.mvl",
  "llvm.ve.vl.vfmkseq.mvml",
  "llvm.ve.vl.vfmkseqnan.mvl",
  "llvm.ve.vl.vfmkseqnan.mvml",
  "llvm.ve.vl.vfmksge.mvl",
  "llvm.ve.vl.vfmksge.mvml",
  "llvm.ve.vl.vfmksgenan.mvl",
  "llvm.ve.vl.vfmksgenan.mvml",
  "llvm.ve.vl.vfmksgt.mvl",
  "llvm.ve.vl.vfmksgt.mvml",
  "llvm.ve.vl.vfmksgtnan.mvl",
  "llvm.ve.vl.vfmksgtnan.mvml",
  "llvm.ve.vl.vfmksle.mvl",
  "llvm.ve.vl.vfmksle.mvml",
  "llvm.ve.vl.vfmkslenan.mvl",
  "llvm.ve.vl.vfmkslenan.mvml",
  "llvm.ve.vl.vfmkslt.mvl",
  "llvm.ve.vl.vfmkslt.mvml",
  "llvm.ve.vl.vfmksltnan.mvl",
  "llvm.ve.vl.vfmksltnan.mvml",
  "llvm.ve.vl.vfmksnan.mvl",
  "llvm.ve.vl.vfmksnan.mvml",
  "llvm.ve.vl.vfmksne.mvl",
  "llvm.ve.vl.vfmksne.mvml",
  "llvm.ve.vl.vfmksnenan.mvl",
  "llvm.ve.vl.vfmksnenan.mvml",
  "llvm.ve.vl.vfmksnum.mvl",
  "llvm.ve.vl.vfmksnum.mvml",
  "llvm.ve.vl.vfmkweq.mvl",
  "llvm.ve.vl.vfmkweq.mvml",
  "llvm.ve.vl.vfmkweqnan.mvl",
  "llvm.ve.vl.vfmkweqnan.mvml",
  "llvm.ve.vl.vfmkwge.mvl",
  "llvm.ve.vl.vfmkwge.mvml",
  "llvm.ve.vl.vfmkwgenan.mvl",
  "llvm.ve.vl.vfmkwgenan.mvml",
  "llvm.ve.vl.vfmkwgt.mvl",
  "llvm.ve.vl.vfmkwgt.mvml",
  "llvm.ve.vl.vfmkwgtnan.mvl",
  "llvm.ve.vl.vfmkwgtnan.mvml",
  "llvm.ve.vl.vfmkwle.mvl",
  "llvm.ve.vl.vfmkwle.mvml",
  "llvm.ve.vl.vfmkwlenan.mvl",
  "llvm.ve.vl.vfmkwlenan.mvml",
  "llvm.ve.vl.vfmkwlt.mvl",
  "llvm.ve.vl.vfmkwlt.mvml",
  "llvm.ve.vl.vfmkwltnan.mvl",
  "llvm.ve.vl.vfmkwltnan.mvml",
  "llvm.ve.vl.vfmkwnan.mvl",
  "llvm.ve.vl.vfmkwnan.mvml",
  "llvm.ve.vl.vfmkwne.mvl",
  "llvm.ve.vl.vfmkwne.mvml",
  "llvm.ve.vl.vfmkwnenan.mvl",
  "llvm.ve.vl.vfmkwnenan.mvml",
  "llvm.ve.vl.vfmkwnum.mvl",
  "llvm.ve.vl.vfmkwnum.mvml",
  "llvm.ve.vl.vfmsbd.vsvvl",
  "llvm.ve.vl.vfmsbd.vsvvmvl",
  "llvm.ve.vl.vfmsbd.vsvvvl",
  "llvm.ve.vl.vfmsbd.vvsvl",
  "llvm.ve.vl.vfmsbd.vvsvmvl",
  "llvm.ve.vl.vfmsbd.vvsvvl",
  "llvm.ve.vl.vfmsbd.vvvvl",
  "llvm.ve.vl.vfmsbd.vvvvmvl",
  "llvm.ve.vl.vfmsbd.vvvvvl",
  "llvm.ve.vl.vfmsbs.vsvvl",
  "llvm.ve.vl.vfmsbs.vsvvmvl",
  "llvm.ve.vl.vfmsbs.vsvvvl",
  "llvm.ve.vl.vfmsbs.vvsvl",
  "llvm.ve.vl.vfmsbs.vvsvmvl",
  "llvm.ve.vl.vfmsbs.vvsvvl",
  "llvm.ve.vl.vfmsbs.vvvvl",
  "llvm.ve.vl.vfmsbs.vvvvmvl",
  "llvm.ve.vl.vfmsbs.vvvvvl",
  "llvm.ve.vl.vfmuld.vsvl",
  "llvm.ve.vl.vfmuld.vsvmvl",
  "llvm.ve.vl.vfmuld.vsvvl",
  "llvm.ve.vl.vfmuld.vvvl",
  "llvm.ve.vl.vfmuld.vvvmvl",
  "llvm.ve.vl.vfmuld.vvvvl",
  "llvm.ve.vl.vfmuls.vsvl",
  "llvm.ve.vl.vfmuls.vsvmvl",
  "llvm.ve.vl.vfmuls.vsvvl",
  "llvm.ve.vl.vfmuls.vvvl",
  "llvm.ve.vl.vfmuls.vvvmvl",
  "llvm.ve.vl.vfmuls.vvvvl",
  "llvm.ve.vl.vfnmadd.vsvvl",
  "llvm.ve.vl.vfnmadd.vsvvmvl",
  "llvm.ve.vl.vfnmadd.vsvvvl",
  "llvm.ve.vl.vfnmadd.vvsvl",
  "llvm.ve.vl.vfnmadd.vvsvmvl",
  "llvm.ve.vl.vfnmadd.vvsvvl",
  "llvm.ve.vl.vfnmadd.vvvvl",
  "llvm.ve.vl.vfnmadd.vvvvmvl",
  "llvm.ve.vl.vfnmadd.vvvvvl",
  "llvm.ve.vl.vfnmads.vsvvl",
  "llvm.ve.vl.vfnmads.vsvvmvl",
  "llvm.ve.vl.vfnmads.vsvvvl",
  "llvm.ve.vl.vfnmads.vvsvl",
  "llvm.ve.vl.vfnmads.vvsvmvl",
  "llvm.ve.vl.vfnmads.vvsvvl",
  "llvm.ve.vl.vfnmads.vvvvl",
  "llvm.ve.vl.vfnmads.vvvvmvl",
  "llvm.ve.vl.vfnmads.vvvvvl",
  "llvm.ve.vl.vfnmsbd.vsvvl",
  "llvm.ve.vl.vfnmsbd.vsvvmvl",
  "llvm.ve.vl.vfnmsbd.vsvvvl",
  "llvm.ve.vl.vfnmsbd.vvsvl",
  "llvm.ve.vl.vfnmsbd.vvsvmvl",
  "llvm.ve.vl.vfnmsbd.vvsvvl",
  "llvm.ve.vl.vfnmsbd.vvvvl",
  "llvm.ve.vl.vfnmsbd.vvvvmvl",
  "llvm.ve.vl.vfnmsbd.vvvvvl",
  "llvm.ve.vl.vfnmsbs.vsvvl",
  "llvm.ve.vl.vfnmsbs.vsvvmvl",
  "llvm.ve.vl.vfnmsbs.vsvvvl",
  "llvm.ve.vl.vfnmsbs.vvsvl",
  "llvm.ve.vl.vfnmsbs.vvsvmvl",
  "llvm.ve.vl.vfnmsbs.vvsvvl",
  "llvm.ve.vl.vfnmsbs.vvvvl",
  "llvm.ve.vl.vfnmsbs.vvvvmvl",
  "llvm.ve.vl.vfnmsbs.vvvvvl",
  "llvm.ve.vl.vfrmaxdfst.vvl",
  "llvm.ve.vl.vfrmaxdfst.vvvl",
  "llvm.ve.vl.vfrmaxdlst.vvl",
  "llvm.ve.vl.vfrmaxdlst.vvvl",
  "llvm.ve.vl.vfrmaxsfst.vvl",
  "llvm.ve.vl.vfrmaxsfst.vvvl",
  "llvm.ve.vl.vfrmaxslst.vvl",
  "llvm.ve.vl.vfrmaxslst.vvvl",
  "llvm.ve.vl.vfrmindfst.vvl",
  "llvm.ve.vl.vfrmindfst.vvvl",
  "llvm.ve.vl.vfrmindlst.vvl",
  "llvm.ve.vl.vfrmindlst.vvvl",
  "llvm.ve.vl.vfrminsfst.vvl",
  "llvm.ve.vl.vfrminsfst.vvvl",
  "llvm.ve.vl.vfrminslst.vvl",
  "llvm.ve.vl.vfrminslst.vvvl",
  "llvm.ve.vl.vfsqrtd.vvl",
  "llvm.ve.vl.vfsqrtd.vvvl",
  "llvm.ve.vl.vfsqrts.vvl",
  "llvm.ve.vl.vfsqrts.vvvl",
  "llvm.ve.vl.vfsubd.vsvl",
  "llvm.ve.vl.vfsubd.vsvmvl",
  "llvm.ve.vl.vfsubd.vsvvl",
  "llvm.ve.vl.vfsubd.vvvl",
  "llvm.ve.vl.vfsubd.vvvmvl",
  "llvm.ve.vl.vfsubd.vvvvl",
  "llvm.ve.vl.vfsubs.vsvl",
  "llvm.ve.vl.vfsubs.vsvmvl",
  "llvm.ve.vl.vfsubs.vsvvl",
  "llvm.ve.vl.vfsubs.vvvl",
  "llvm.ve.vl.vfsubs.vvvmvl",
  "llvm.ve.vl.vfsubs.vvvvl",
  "llvm.ve.vl.vfsumd.vvl",
  "llvm.ve.vl.vfsumd.vvml",
  "llvm.ve.vl.vfsums.vvl",
  "llvm.ve.vl.vfsums.vvml",
  "llvm.ve.vl.vgt.vvssl",
  "llvm.ve.vl.vgt.vvssml",
  "llvm.ve.vl.vgt.vvssmvl",
  "llvm.ve.vl.vgt.vvssvl",
  "llvm.ve.vl.vgtlsx.vvssl",
  "llvm.ve.vl.vgtlsx.vvssml",
  "llvm.ve.vl.vgtlsx.vvssmvl",
  "llvm.ve.vl.vgtlsx.vvssvl",
  "llvm.ve.vl.vgtlsxnc.vvssl",
  "llvm.ve.vl.vgtlsxnc.vvssml",
  "llvm.ve.vl.vgtlsxnc.vvssmvl",
  "llvm.ve.vl.vgtlsxnc.vvssvl",
  "llvm.ve.vl.vgtlzx.vvssl",
  "llvm.ve.vl.vgtlzx.vvssml",
  "llvm.ve.vl.vgtlzx.vvssmvl",
  "llvm.ve.vl.vgtlzx.vvssvl",
  "llvm.ve.vl.vgtlzxnc.vvssl",
  "llvm.ve.vl.vgtlzxnc.vvssml",
  "llvm.ve.vl.vgtlzxnc.vvssmvl",
  "llvm.ve.vl.vgtlzxnc.vvssvl",
  "llvm.ve.vl.vgtnc.vvssl",
  "llvm.ve.vl.vgtnc.vvssml",
  "llvm.ve.vl.vgtnc.vvssmvl",
  "llvm.ve.vl.vgtnc.vvssvl",
  "llvm.ve.vl.vgtu.vvssl",
  "llvm.ve.vl.vgtu.vvssml",
  "llvm.ve.vl.vgtu.vvssmvl",
  "llvm.ve.vl.vgtu.vvssvl",
  "llvm.ve.vl.vgtunc.vvssl",
  "llvm.ve.vl.vgtunc.vvssml",
  "llvm.ve.vl.vgtunc.vvssmvl",
  "llvm.ve.vl.vgtunc.vvssvl",
  "llvm.ve.vl.vld.vssl",
  "llvm.ve.vl.vld.vssvl",
  "llvm.ve.vl.vld2d.vssl",
  "llvm.ve.vl.vld2d.vssvl",
  "llvm.ve.vl.vld2dnc.vssl",
  "llvm.ve.vl.vld2dnc.vssvl",
  "llvm.ve.vl.vldl2dsx.vssl",
  "llvm.ve.vl.vldl2dsx.vssvl",
  "llvm.ve.vl.vldl2dsxnc.vssl",
  "llvm.ve.vl.vldl2dsxnc.vssvl",
  "llvm.ve.vl.vldl2dzx.vssl",
  "llvm.ve.vl.vldl2dzx.vssvl",
  "llvm.ve.vl.vldl2dzxnc.vssl",
  "llvm.ve.vl.vldl2dzxnc.vssvl",
  "llvm.ve.vl.vldlsx.vssl",
  "llvm.ve.vl.vldlsx.vssvl",
  "llvm.ve.vl.vldlsxnc.vssl",
  "llvm.ve.vl.vldlsxnc.vssvl",
  "llvm.ve.vl.vldlzx.vssl",
  "llvm.ve.vl.vldlzx.vssvl",
  "llvm.ve.vl.vldlzxnc.vssl",
  "llvm.ve.vl.vldlzxnc.vssvl",
  "llvm.ve.vl.vldnc.vssl",
  "llvm.ve.vl.vldnc.vssvl",
  "llvm.ve.vl.vldu.vssl",
  "llvm.ve.vl.vldu.vssvl",
  "llvm.ve.vl.vldu2d.vssl",
  "llvm.ve.vl.vldu2d.vssvl",
  "llvm.ve.vl.vldu2dnc.vssl",
  "llvm.ve.vl.vldu2dnc.vssvl",
  "llvm.ve.vl.vldunc.vssl",
  "llvm.ve.vl.vldunc.vssvl",
  "llvm.ve.vl.vldz.vvl",
  "llvm.ve.vl.vldz.vvmvl",
  "llvm.ve.vl.vldz.vvvl",
  "llvm.ve.vl.vmaxsl.vsvl",
  "llvm.ve.vl.vmaxsl.vsvmvl",
  "llvm.ve.vl.vmaxsl.vsvvl",
  "llvm.ve.vl.vmaxsl.vvvl",
  "llvm.ve.vl.vmaxsl.vvvmvl",
  "llvm.ve.vl.vmaxsl.vvvvl",
  "llvm.ve.vl.vmaxswsx.vsvl",
  "llvm.ve.vl.vmaxswsx.vsvmvl",
  "llvm.ve.vl.vmaxswsx.vsvvl",
  "llvm.ve.vl.vmaxswsx.vvvl",
  "llvm.ve.vl.vmaxswsx.vvvmvl",
  "llvm.ve.vl.vmaxswsx.vvvvl",
  "llvm.ve.vl.vmaxswzx.vsvl",
  "llvm.ve.vl.vmaxswzx.vsvmvl",
  "llvm.ve.vl.vmaxswzx.vsvvl",
  "llvm.ve.vl.vmaxswzx.vvvl",
  "llvm.ve.vl.vmaxswzx.vvvmvl",
  "llvm.ve.vl.vmaxswzx.vvvvl",
  "llvm.ve.vl.vminsl.vsvl",
  "llvm.ve.vl.vminsl.vsvmvl",
  "llvm.ve.vl.vminsl.vsvvl",
  "llvm.ve.vl.vminsl.vvvl",
  "llvm.ve.vl.vminsl.vvvmvl",
  "llvm.ve.vl.vminsl.vvvvl",
  "llvm.ve.vl.vminswsx.vsvl",
  "llvm.ve.vl.vminswsx.vsvmvl",
  "llvm.ve.vl.vminswsx.vsvvl",
  "llvm.ve.vl.vminswsx.vvvl",
  "llvm.ve.vl.vminswsx.vvvmvl",
  "llvm.ve.vl.vminswsx.vvvvl",
  "llvm.ve.vl.vminswzx.vsvl",
  "llvm.ve.vl.vminswzx.vsvmvl",
  "llvm.ve.vl.vminswzx.vsvvl",
  "llvm.ve.vl.vminswzx.vvvl",
  "llvm.ve.vl.vminswzx.vvvmvl",
  "llvm.ve.vl.vminswzx.vvvvl",
  "llvm.ve.vl.vmrg.vsvml",
  "llvm.ve.vl.vmrg.vsvmvl",
  "llvm.ve.vl.vmrg.vvvml",
  "llvm.ve.vl.vmrg.vvvmvl",
  "llvm.ve.vl.vmrgw.vsvMl",
  "llvm.ve.vl.vmrgw.vsvMvl",
  "llvm.ve.vl.vmrgw.vvvMl",
  "llvm.ve.vl.vmrgw.vvvMvl",
  "llvm.ve.vl.vmulsl.vsvl",
  "llvm.ve.vl.vmulsl.vsvmvl",
  "llvm.ve.vl.vmulsl.vsvvl",
  "llvm.ve.vl.vmulsl.vvvl",
  "llvm.ve.vl.vmulsl.vvvmvl",
  "llvm.ve.vl.vmulsl.vvvvl",
  "llvm.ve.vl.vmulslw.vsvl",
  "llvm.ve.vl.vmulslw.vsvvl",
  "llvm.ve.vl.vmulslw.vvvl",
  "llvm.ve.vl.vmulslw.vvvvl",
  "llvm.ve.vl.vmulswsx.vsvl",
  "llvm.ve.vl.vmulswsx.vsvmvl",
  "llvm.ve.vl.vmulswsx.vsvvl",
  "llvm.ve.vl.vmulswsx.vvvl",
  "llvm.ve.vl.vmulswsx.vvvmvl",
  "llvm.ve.vl.vmulswsx.vvvvl",
  "llvm.ve.vl.vmulswzx.vsvl",
  "llvm.ve.vl.vmulswzx.vsvmvl",
  "llvm.ve.vl.vmulswzx.vsvvl",
  "llvm.ve.vl.vmulswzx.vvvl",
  "llvm.ve.vl.vmulswzx.vvvmvl",
  "llvm.ve.vl.vmulswzx.vvvvl",
  "llvm.ve.vl.vmulul.vsvl",
  "llvm.ve.vl.vmulul.vsvmvl",
  "llvm.ve.vl.vmulul.vsvvl",
  "llvm.ve.vl.vmulul.vvvl",
  "llvm.ve.vl.vmulul.vvvmvl",
  "llvm.ve.vl.vmulul.vvvvl",
  "llvm.ve.vl.vmuluw.vsvl",
  "llvm.ve.vl.vmuluw.vsvmvl",
  "llvm.ve.vl.vmuluw.vsvvl",
  "llvm.ve.vl.vmuluw.vvvl",
  "llvm.ve.vl.vmuluw.vvvmvl",
  "llvm.ve.vl.vmuluw.vvvvl",
  "llvm.ve.vl.vmv.vsvl",
  "llvm.ve.vl.vmv.vsvmvl",
  "llvm.ve.vl.vmv.vsvvl",
  "llvm.ve.vl.vor.vsvl",
  "llvm.ve.vl.vor.vsvmvl",
  "llvm.ve.vl.vor.vsvvl",
  "llvm.ve.vl.vor.vvvl",
  "llvm.ve.vl.vor.vvvmvl",
  "llvm.ve.vl.vor.vvvvl",
  "llvm.ve.vl.vpcnt.vvl",
  "llvm.ve.vl.vpcnt.vvmvl",
  "llvm.ve.vl.vpcnt.vvvl",
  "llvm.ve.vl.vrand.vvl",
  "llvm.ve.vl.vrand.vvml",
  "llvm.ve.vl.vrcpd.vvl",
  "llvm.ve.vl.vrcpd.vvvl",
  "llvm.ve.vl.vrcps.vvl",
  "llvm.ve.vl.vrcps.vvvl",
  "llvm.ve.vl.vrmaxslfst.vvl",
  "llvm.ve.vl.vrmaxslfst.vvvl",
  "llvm.ve.vl.vrmaxsllst.vvl",
  "llvm.ve.vl.vrmaxsllst.vvvl",
  "llvm.ve.vl.vrmaxswfstsx.vvl",
  "llvm.ve.vl.vrmaxswfstsx.vvvl",
  "llvm.ve.vl.vrmaxswfstzx.vvl",
  "llvm.ve.vl.vrmaxswfstzx.vvvl",
  "llvm.ve.vl.vrmaxswlstsx.vvl",
  "llvm.ve.vl.vrmaxswlstsx.vvvl",
  "llvm.ve.vl.vrmaxswlstzx.vvl",
  "llvm.ve.vl.vrmaxswlstzx.vvvl",
  "llvm.ve.vl.vrminslfst.vvl",
  "llvm.ve.vl.vrminslfst.vvvl",
  "llvm.ve.vl.vrminsllst.vvl",
  "llvm.ve.vl.vrminsllst.vvvl",
  "llvm.ve.vl.vrminswfstsx.vvl",
  "llvm.ve.vl.vrminswfstsx.vvvl",
  "llvm.ve.vl.vrminswfstzx.vvl",
  "llvm.ve.vl.vrminswfstzx.vvvl",
  "llvm.ve.vl.vrminswlstsx.vvl",
  "llvm.ve.vl.vrminswlstsx.vvvl",
  "llvm.ve.vl.vrminswlstzx.vvl",
  "llvm.ve.vl.vrminswlstzx.vvvl",
  "llvm.ve.vl.vror.vvl",
  "llvm.ve.vl.vror.vvml",
  "llvm.ve.vl.vrsqrtd.vvl",
  "llvm.ve.vl.vrsqrtd.vvvl",
  "llvm.ve.vl.vrsqrtdnex.vvl",
  "llvm.ve.vl.vrsqrtdnex.vvvl",
  "llvm.ve.vl.vrsqrts.vvl",
  "llvm.ve.vl.vrsqrts.vvvl",
  "llvm.ve.vl.vrsqrtsnex.vvl",
  "llvm.ve.vl.vrsqrtsnex.vvvl",
  "llvm.ve.vl.vrxor.vvl",
  "llvm.ve.vl.vrxor.vvml",
  "llvm.ve.vl.vsc.vvssl",
  "llvm.ve.vl.vsc.vvssml",
  "llvm.ve.vl.vscl.vvssl",
  "llvm.ve.vl.vscl.vvssml",
  "llvm.ve.vl.vsclnc.vvssl",
  "llvm.ve.vl.vsclnc.vvssml",
  "llvm.ve.vl.vsclncot.vvssl",
  "llvm.ve.vl.vsclncot.vvssml",
  "llvm.ve.vl.vsclot.vvssl",
  "llvm.ve.vl.vsclot.vvssml",
  "llvm.ve.vl.vscnc.vvssl",
  "llvm.ve.vl.vscnc.vvssml",
  "llvm.ve.vl.vscncot.vvssl",
  "llvm.ve.vl.vscncot.vvssml",
  "llvm.ve.vl.vscot.vvssl",
  "llvm.ve.vl.vscot.vvssml",
  "llvm.ve.vl.vscu.vvssl",
  "llvm.ve.vl.vscu.vvssml",
  "llvm.ve.vl.vscunc.vvssl",
  "llvm.ve.vl.vscunc.vvssml",
  "llvm.ve.vl.vscuncot.vvssl",
  "llvm.ve.vl.vscuncot.vvssml",
  "llvm.ve.vl.vscuot.vvssl",
  "llvm.ve.vl.vscuot.vvssml",
  "llvm.ve.vl.vseq.vl",
  "llvm.ve.vl.vseq.vvl",
  "llvm.ve.vl.vsfa.vvssl",
  "llvm.ve.vl.vsfa.vvssmvl",
  "llvm.ve.vl.vsfa.vvssvl",
  "llvm.ve.vl.vshf.vvvsl",
  "llvm.ve.vl.vshf.vvvsvl",
  "llvm.ve.vl.vslal.vvsl",
  "llvm.ve.vl.vslal.vvsmvl",
  "llvm.ve.vl.vslal.vvsvl",
  "llvm.ve.vl.vslal.vvvl",
  "llvm.ve.vl.vslal.vvvmvl",
  "llvm.ve.vl.vslal.vvvvl",
  "llvm.ve.vl.vslawsx.vvsl",
  "llvm.ve.vl.vslawsx.vvsmvl",
  "llvm.ve.vl.vslawsx.vvsvl",
  "llvm.ve.vl.vslawsx.vvvl",
  "llvm.ve.vl.vslawsx.vvvmvl",
  "llvm.ve.vl.vslawsx.vvvvl",
  "llvm.ve.vl.vslawzx.vvsl",
  "llvm.ve.vl.vslawzx.vvsmvl",
  "llvm.ve.vl.vslawzx.vvsvl",
  "llvm.ve.vl.vslawzx.vvvl",
  "llvm.ve.vl.vslawzx.vvvmvl",
  "llvm.ve.vl.vslawzx.vvvvl",
  "llvm.ve.vl.vsll.vvsl",
  "llvm.ve.vl.vsll.vvsmvl",
  "llvm.ve.vl.vsll.vvsvl",
  "llvm.ve.vl.vsll.vvvl",
  "llvm.ve.vl.vsll.vvvmvl",
  "llvm.ve.vl.vsll.vvvvl",
  "llvm.ve.vl.vsral.vvsl",
  "llvm.ve.vl.vsral.vvsmvl",
  "llvm.ve.vl.vsral.vvsvl",
  "llvm.ve.vl.vsral.vvvl",
  "llvm.ve.vl.vsral.vvvmvl",
  "llvm.ve.vl.vsral.vvvvl",
  "llvm.ve.vl.vsrawsx.vvsl",
  "llvm.ve.vl.vsrawsx.vvsmvl",
  "llvm.ve.vl.vsrawsx.vvsvl",
  "llvm.ve.vl.vsrawsx.vvvl",
  "llvm.ve.vl.vsrawsx.vvvmvl",
  "llvm.ve.vl.vsrawsx.vvvvl",
  "llvm.ve.vl.vsrawzx.vvsl",
  "llvm.ve.vl.vsrawzx.vvsmvl",
  "llvm.ve.vl.vsrawzx.vvsvl",
  "llvm.ve.vl.vsrawzx.vvvl",
  "llvm.ve.vl.vsrawzx.vvvmvl",
  "llvm.ve.vl.vsrawzx.vvvvl",
  "llvm.ve.vl.vsrl.vvsl",
  "llvm.ve.vl.vsrl.vvsmvl",
  "llvm.ve.vl.vsrl.vvsvl",
  "llvm.ve.vl.vsrl.vvvl",
  "llvm.ve.vl.vsrl.vvvmvl",
  "llvm.ve.vl.vsrl.vvvvl",
  "llvm.ve.vl.vst.vssl",
  "llvm.ve.vl.vst.vssml",
  "llvm.ve.vl.vst2d.vssl",
  "llvm.ve.vl.vst2d.vssml",
  "llvm.ve.vl.vst2dnc.vssl",
  "llvm.ve.vl.vst2dnc.vssml",
  "llvm.ve.vl.vst2dncot.vssl",
  "llvm.ve.vl.vst2dncot.vssml",
  "llvm.ve.vl.vst2dot.vssl",
  "llvm.ve.vl.vst2dot.vssml",
  "llvm.ve.vl.vstl.vssl",
  "llvm.ve.vl.vstl.vssml",
  "llvm.ve.vl.vstl2d.vssl",
  "llvm.ve.vl.vstl2d.vssml",
  "llvm.ve.vl.vstl2dnc.vssl",
  "llvm.ve.vl.vstl2dnc.vssml",
  "llvm.ve.vl.vstl2dncot.vssl",
  "llvm.ve.vl.vstl2dncot.vssml",
  "llvm.ve.vl.vstl2dot.vssl",
  "llvm.ve.vl.vstl2dot.vssml",
  "llvm.ve.vl.vstlnc.vssl",
  "llvm.ve.vl.vstlnc.vssml",
  "llvm.ve.vl.vstlncot.vssl",
  "llvm.ve.vl.vstlncot.vssml",
  "llvm.ve.vl.vstlot.vssl",
  "llvm.ve.vl.vstlot.vssml",
  "llvm.ve.vl.vstnc.vssl",
  "llvm.ve.vl.vstnc.vssml",
  "llvm.ve.vl.vstncot.vssl",
  "llvm.ve.vl.vstncot.vssml",
  "llvm.ve.vl.vstot.vssl",
  "llvm.ve.vl.vstot.vssml",
  "llvm.ve.vl.vstu.vssl",
  "llvm.ve.vl.vstu.vssml",
  "llvm.ve.vl.vstu2d.vssl",
  "llvm.ve.vl.vstu2d.vssml",
  "llvm.ve.vl.vstu2dnc.vssl",
  "llvm.ve.vl.vstu2dnc.vssml",
  "llvm.ve.vl.vstu2dncot.vssl",
  "llvm.ve.vl.vstu2dncot.vssml",
  "llvm.ve.vl.vstu2dot.vssl",
  "llvm.ve.vl.vstu2dot.vssml",
  "llvm.ve.vl.vstunc.vssl",
  "llvm.ve.vl.vstunc.vssml",
  "llvm.ve.vl.vstuncot.vssl",
  "llvm.ve.vl.vstuncot.vssml",
  "llvm.ve.vl.vstuot.vssl",
  "llvm.ve.vl.vstuot.vssml",
  "llvm.ve.vl.vsubsl.vsvl",
  "llvm.ve.vl.vsubsl.vsvmvl",
  "llvm.ve.vl.vsubsl.vsvvl",
  "llvm.ve.vl.vsubsl.vvvl",
  "llvm.ve.vl.vsubsl.vvvmvl",
  "llvm.ve.vl.vsubsl.vvvvl",
  "llvm.ve.vl.vsubswsx.vsvl",
  "llvm.ve.vl.vsubswsx.vsvmvl",
  "llvm.ve.vl.vsubswsx.vsvvl",
  "llvm.ve.vl.vsubswsx.vvvl",
  "llvm.ve.vl.vsubswsx.vvvmvl",
  "llvm.ve.vl.vsubswsx.vvvvl",
  "llvm.ve.vl.vsubswzx.vsvl",
  "llvm.ve.vl.vsubswzx.vsvmvl",
  "llvm.ve.vl.vsubswzx.vsvvl",
  "llvm.ve.vl.vsubswzx.vvvl",
  "llvm.ve.vl.vsubswzx.vvvmvl",
  "llvm.ve.vl.vsubswzx.vvvvl",
  "llvm.ve.vl.vsubul.vsvl",
  "llvm.ve.vl.vsubul.vsvmvl",
  "llvm.ve.vl.vsubul.vsvvl",
  "llvm.ve.vl.vsubul.vvvl",
  "llvm.ve.vl.vsubul.vvvmvl",
  "llvm.ve.vl.vsubul.vvvvl",
  "llvm.ve.vl.vsubuw.vsvl",
  "llvm.ve.vl.vsubuw.vsvmvl",
  "llvm.ve.vl.vsubuw.vsvvl",
  "llvm.ve.vl.vsubuw.vvvl",
  "llvm.ve.vl.vsubuw.vvvmvl",
  "llvm.ve.vl.vsubuw.vvvvl",
  "llvm.ve.vl.vsuml.vvl",
  "llvm.ve.vl.vsuml.vvml",
  "llvm.ve.vl.vsumwsx.vvl",
  "llvm.ve.vl.vsumwsx.vvml",
  "llvm.ve.vl.vsumwzx.vvl",
  "llvm.ve.vl.vsumwzx.vvml",
  "llvm.ve.vl.vxor.vsvl",
  "llvm.ve.vl.vxor.vsvmvl",
  "llvm.ve.vl.vxor.vsvvl",
  "llvm.ve.vl.vxor.vvvl",
  "llvm.ve.vl.vxor.vvvmvl",
  "llvm.ve.vl.vxor.vvvvl",
  "llvm.ve.vl.xorm.MMM",
  "llvm.ve.vl.xorm.mmm",
  "llvm.wasm.alltrue",
  "llvm.wasm.anytrue",
  "llvm.wasm.avgr.unsigned",
  "llvm.wasm.bitmask",
  "llvm.wasm.bitselect",
  "llvm.wasm.catch",
  "llvm.wasm.dot",
  "llvm.wasm.extadd.pairwise.signed",
  "llvm.wasm.extadd.pairwise.unsigned",
  "llvm.wasm.get.ehselector",
  "llvm.wasm.get.exception",
  "llvm.wasm.landingpad.index",
  "llvm.wasm.lsda",
  "llvm.wasm.memory.atomic.notify",
  "llvm.wasm.memory.atomic.wait32",
  "llvm.wasm.memory.atomic.wait64",
  "llvm.wasm.memory.grow",
  "llvm.wasm.memory.size",
  "llvm.wasm.narrow.signed",
  "llvm.wasm.narrow.unsigned",
  "llvm.wasm.pmax",
  "llvm.wasm.pmin",
  "llvm.wasm.q15mulr.sat.signed",
  "llvm.wasm.ref.is_null.extern",
  "llvm.wasm.ref.is_null.func",
  "llvm.wasm.ref.null.extern",
  "llvm.wasm.ref.null.func",
  "llvm.wasm.relaxed.dot.bf16x8.add.f32",
  "llvm.wasm.relaxed.dot.i8x16.i7x16.add.signed",
  "llvm.wasm.relaxed.dot.i8x16.i7x16.signed",
  "llvm.wasm.relaxed.laneselect",
  "llvm.wasm.relaxed.madd",
  "llvm.wasm.relaxed.max",
  "llvm.wasm.relaxed.min",
  "llvm.wasm.relaxed.nmadd",
  "llvm.wasm.relaxed.q15mulr.signed",
  "llvm.wasm.relaxed.swizzle",
  "llvm.wasm.relaxed.trunc.signed",
  "llvm.wasm.relaxed.trunc.signed.zero",
  "llvm.wasm.relaxed.trunc.unsigned",
  "llvm.wasm.relaxed.trunc.unsigned.zero",
  "llvm.wasm.rethrow",
  "llvm.wasm.shuffle",
  "llvm.wasm.sub.sat.signed",
  "llvm.wasm.sub.sat.unsigned",
  "llvm.wasm.swizzle",
  "llvm.wasm.table.copy",
  "llvm.wasm.table.fill.externref",
  "llvm.wasm.table.fill.funcref",
  "llvm.wasm.table.get.externref",
  "llvm.wasm.table.get.funcref",
  "llvm.wasm.table.grow.externref",
  "llvm.wasm.table.grow.funcref",
  "llvm.wasm.table.set.externref",
  "llvm.wasm.table.set.funcref",
  "llvm.wasm.table.size",
  "llvm.wasm.throw",
  "llvm.wasm.tls.align",
  "llvm.wasm.tls.base",
  "llvm.wasm.tls.size",
  "llvm.wasm.trunc.saturate.signed",
  "llvm.wasm.trunc.saturate.unsigned",
  "llvm.wasm.trunc.signed",
  "llvm.wasm.trunc.unsigned",
  "llvm.x86.3dnow.pavgusb",
  "llvm.x86.3dnow.pf2id",
  "llvm.x86.3dnow.pfacc",
  "llvm.x86.3dnow.pfadd",
  "llvm.x86.3dnow.pfcmpeq",
  "llvm.x86.3dnow.pfcmpge",
  "llvm.x86.3dnow.pfcmpgt",
  "llvm.x86.3dnow.pfmax",
  "llvm.x86.3dnow.pfmin",
  "llvm.x86.3dnow.pfmul",
  "llvm.x86.3dnow.pfrcp",
  "llvm.x86.3dnow.pfrcpit1",
  "llvm.x86.3dnow.pfrcpit2",
  "llvm.x86.3dnow.pfrsqit1",
  "llvm.x86.3dnow.pfrsqrt",
  "llvm.x86.3dnow.pfsub",
  "llvm.x86.3dnow.pfsubr",
  "llvm.x86.3dnow.pi2fd",
  "llvm.x86.3dnow.pmulhrw",
  "llvm.x86.3dnowa.pf2iw",
  "llvm.x86.3dnowa.pfnacc",
  "llvm.x86.3dnowa.pfpnacc",
  "llvm.x86.3dnowa.pi2fw",
  "llvm.x86.3dnowa.pswapd",
  "llvm.x86.aadd32",
  "llvm.x86.aadd64",
  "llvm.x86.aand32",
  "llvm.x86.aand64",
  "llvm.x86.addcarry.32",
  "llvm.x86.addcarry.64",
  "llvm.x86.aesdec128kl",
  "llvm.x86.aesdec256kl",
  "llvm.x86.aesdecwide128kl",
  "llvm.x86.aesdecwide256kl",
  "llvm.x86.aesenc128kl",
  "llvm.x86.aesenc256kl",
  "llvm.x86.aesencwide128kl",
  "llvm.x86.aesencwide256kl",
  "llvm.x86.aesni.aesdec",
  "llvm.x86.aesni.aesdec.256",
  "llvm.x86.aesni.aesdec.512",
  "llvm.x86.aesni.aesdeclast",
  "llvm.x86.aesni.aesdeclast.256",
  "llvm.x86.aesni.aesdeclast.512",
  "llvm.x86.aesni.aesenc",
  "llvm.x86.aesni.aesenc.256",
  "llvm.x86.aesni.aesenc.512",
  "llvm.x86.aesni.aesenclast",
  "llvm.x86.aesni.aesenclast.256",
  "llvm.x86.aesni.aesenclast.512",
  "llvm.x86.aesni.aesimc",
  "llvm.x86.aesni.aeskeygenassist",
  "llvm.x86.aor32",
  "llvm.x86.aor64",
  "llvm.x86.atomic.add.cc",
  "llvm.x86.atomic.and.cc",
  "llvm.x86.atomic.btc",
  "llvm.x86.atomic.btc.rm",
  "llvm.x86.atomic.btr",
  "llvm.x86.atomic.btr.rm",
  "llvm.x86.atomic.bts",
  "llvm.x86.atomic.bts.rm",
  "llvm.x86.atomic.or.cc",
  "llvm.x86.atomic.sub.cc",
  "llvm.x86.atomic.xor.cc",
  "llvm.x86.avx.addsub.pd.256",
  "llvm.x86.avx.addsub.ps.256",
  "llvm.x86.avx.blendv.pd.256",
  "llvm.x86.avx.blendv.ps.256",
  "llvm.x86.avx.cmp.pd.256",
  "llvm.x86.avx.cmp.ps.256",
  "llvm.x86.avx.cvt.pd2.ps.256",
  "llvm.x86.avx.cvt.pd2dq.256",
  "llvm.x86.avx.cvt.ps2dq.256",
  "llvm.x86.avx.cvtt.pd2dq.256",
  "llvm.x86.avx.cvtt.ps2dq.256",
  "llvm.x86.avx.dp.ps.256",
  "llvm.x86.avx.hadd.pd.256",
  "llvm.x86.avx.hadd.ps.256",
  "llvm.x86.avx.hsub.pd.256",
  "llvm.x86.avx.hsub.ps.256",
  "llvm.x86.avx.ldu.dq.256",
  "llvm.x86.avx.maskload.pd",
  "llvm.x86.avx.maskload.pd.256",
  "llvm.x86.avx.maskload.ps",
  "llvm.x86.avx.maskload.ps.256",
  "llvm.x86.avx.maskstore.pd",
  "llvm.x86.avx.maskstore.pd.256",
  "llvm.x86.avx.maskstore.ps",
  "llvm.x86.avx.maskstore.ps.256",
  "llvm.x86.avx.max.pd.256",
  "llvm.x86.avx.max.ps.256",
  "llvm.x86.avx.min.pd.256",
  "llvm.x86.avx.min.ps.256",
  "llvm.x86.avx.movmsk.pd.256",
  "llvm.x86.avx.movmsk.ps.256",
  "llvm.x86.avx.ptestc.256",
  "llvm.x86.avx.ptestnzc.256",
  "llvm.x86.avx.ptestz.256",
  "llvm.x86.avx.rcp.ps.256",
  "llvm.x86.avx.round.pd.256",
  "llvm.x86.avx.round.ps.256",
  "llvm.x86.avx.rsqrt.ps.256",
  "llvm.x86.avx.vpermilvar.pd",
  "llvm.x86.avx.vpermilvar.pd.256",
  "llvm.x86.avx.vpermilvar.ps",
  "llvm.x86.avx.vpermilvar.ps.256",
  "llvm.x86.avx.vtestc.pd",
  "llvm.x86.avx.vtestc.pd.256",
  "llvm.x86.avx.vtestc.ps",
  "llvm.x86.avx.vtestc.ps.256",
  "llvm.x86.avx.vtestnzc.pd",
  "llvm.x86.avx.vtestnzc.pd.256",
  "llvm.x86.avx.vtestnzc.ps",
  "llvm.x86.avx.vtestnzc.ps.256",
  "llvm.x86.avx.vtestz.pd",
  "llvm.x86.avx.vtestz.pd.256",
  "llvm.x86.avx.vtestz.ps",
  "llvm.x86.avx.vtestz.ps.256",
  "llvm.x86.avx.vzeroall",
  "llvm.x86.avx.vzeroupper",
  "llvm.x86.avx2.gather.d.d",
  "llvm.x86.avx2.gather.d.d.256",
  "llvm.x86.avx2.gather.d.pd",
  "llvm.x86.avx2.gather.d.pd.256",
  "llvm.x86.avx2.gather.d.ps",
  "llvm.x86.avx2.gather.d.ps.256",
  "llvm.x86.avx2.gather.d.q",
  "llvm.x86.avx2.gather.d.q.256",
  "llvm.x86.avx2.gather.q.d",
  "llvm.x86.avx2.gather.q.d.256",
  "llvm.x86.avx2.gather.q.pd",
  "llvm.x86.avx2.gather.q.pd.256",
  "llvm.x86.avx2.gather.q.ps",
  "llvm.x86.avx2.gather.q.ps.256",
  "llvm.x86.avx2.gather.q.q",
  "llvm.x86.avx2.gather.q.q.256",
  "llvm.x86.avx2.maskload.d",
  "llvm.x86.avx2.maskload.d.256",
  "llvm.x86.avx2.maskload.q",
  "llvm.x86.avx2.maskload.q.256",
  "llvm.x86.avx2.maskstore.d",
  "llvm.x86.avx2.maskstore.d.256",
  "llvm.x86.avx2.maskstore.q",
  "llvm.x86.avx2.maskstore.q.256",
  "llvm.x86.avx2.mpsadbw",
  "llvm.x86.avx2.packssdw",
  "llvm.x86.avx2.packsswb",
  "llvm.x86.avx2.packusdw",
  "llvm.x86.avx2.packuswb",
  "llvm.x86.avx2.pavg.b",
  "llvm.x86.avx2.pavg.w",
  "llvm.x86.avx2.pblendvb",
  "llvm.x86.avx2.permd",
  "llvm.x86.avx2.permps",
  "llvm.x86.avx2.phadd.d",
  "llvm.x86.avx2.phadd.sw",
  "llvm.x86.avx2.phadd.w",
  "llvm.x86.avx2.phsub.d",
  "llvm.x86.avx2.phsub.sw",
  "llvm.x86.avx2.phsub.w",
  "llvm.x86.avx2.pmadd.ub.sw",
  "llvm.x86.avx2.pmadd.wd",
  "llvm.x86.avx2.pmovmskb",
  "llvm.x86.avx2.pmul.hr.sw",
  "llvm.x86.avx2.pmulh.w",
  "llvm.x86.avx2.pmulhu.w",
  "llvm.x86.avx2.psad.bw",
  "llvm.x86.avx2.pshuf.b",
  "llvm.x86.avx2.psign.b",
  "llvm.x86.avx2.psign.d",
  "llvm.x86.avx2.psign.w",
  "llvm.x86.avx2.psll.d",
  "llvm.x86.avx2.psll.q",
  "llvm.x86.avx2.psll.w",
  "llvm.x86.avx2.pslli.d",
  "llvm.x86.avx2.pslli.q",
  "llvm.x86.avx2.pslli.w",
  "llvm.x86.avx2.psllv.d",
  "llvm.x86.avx2.psllv.d.256",
  "llvm.x86.avx2.psllv.q",
  "llvm.x86.avx2.psllv.q.256",
  "llvm.x86.avx2.psra.d",
  "llvm.x86.avx2.psra.w",
  "llvm.x86.avx2.psrai.d",
  "llvm.x86.avx2.psrai.w",
  "llvm.x86.avx2.psrav.d",
  "llvm.x86.avx2.psrav.d.256",
  "llvm.x86.avx2.psrl.d",
  "llvm.x86.avx2.psrl.q",
  "llvm.x86.avx2.psrl.w",
  "llvm.x86.avx2.psrli.d",
  "llvm.x86.avx2.psrli.q",
  "llvm.x86.avx2.psrli.w",
  "llvm.x86.avx2.psrlv.d",
  "llvm.x86.avx2.psrlv.d.256",
  "llvm.x86.avx2.psrlv.q",
  "llvm.x86.avx2.psrlv.q.256",
  "llvm.x86.avx2.vpdpbssd.128",
  "llvm.x86.avx2.vpdpbssd.256",
  "llvm.x86.avx2.vpdpbssds.128",
  "llvm.x86.avx2.vpdpbssds.256",
  "llvm.x86.avx2.vpdpbsud.128",
  "llvm.x86.avx2.vpdpbsud.256",
  "llvm.x86.avx2.vpdpbsuds.128",
  "llvm.x86.avx2.vpdpbsuds.256",
  "llvm.x86.avx2.vpdpbuud.128",
  "llvm.x86.avx2.vpdpbuud.256",
  "llvm.x86.avx2.vpdpbuuds.128",
  "llvm.x86.avx2.vpdpbuuds.256",
  "llvm.x86.avx2.vpdpwsud.128",
  "llvm.x86.avx2.vpdpwsud.256",
  "llvm.x86.avx2.vpdpwsuds.128",
  "llvm.x86.avx2.vpdpwsuds.256",
  "llvm.x86.avx2.vpdpwusd.128",
  "llvm.x86.avx2.vpdpwusd.256",
  "llvm.x86.avx2.vpdpwusds.128",
  "llvm.x86.avx2.vpdpwusds.256",
  "llvm.x86.avx2.vpdpwuud.128",
  "llvm.x86.avx2.vpdpwuud.256",
  "llvm.x86.avx2.vpdpwuuds.128",
  "llvm.x86.avx2.vpdpwuuds.256",
  "llvm.x86.avx512.add.pd.512",
  "llvm.x86.avx512.add.ps.512",
  "llvm.x86.avx512.broadcastmb.128",
  "llvm.x86.avx512.broadcastmb.256",
  "llvm.x86.avx512.broadcastmb.512",
  "llvm.x86.avx512.broadcastmw.128",
  "llvm.x86.avx512.broadcastmw.256",
  "llvm.x86.avx512.broadcastmw.512",
  "llvm.x86.avx512.conflict.d.128",
  "llvm.x86.avx512.conflict.d.256",
  "llvm.x86.avx512.conflict.d.512",
  "llvm.x86.avx512.conflict.q.128",
  "llvm.x86.avx512.conflict.q.256",
  "llvm.x86.avx512.conflict.q.512",
  "llvm.x86.avx512.cvtsi2sd64",
  "llvm.x86.avx512.cvtsi2ss32",
  "llvm.x86.avx512.cvtsi2ss64",
  "llvm.x86.avx512.cvttsd2si",
  "llvm.x86.avx512.cvttsd2si64",
  "llvm.x86.avx512.cvttsd2usi",
  "llvm.x86.avx512.cvttsd2usi64",
  "llvm.x86.avx512.cvttss2si",
  "llvm.x86.avx512.cvttss2si64",
  "llvm.x86.avx512.cvttss2usi",
  "llvm.x86.avx512.cvttss2usi64",
  "llvm.x86.avx512.cvtusi2ss",
  "llvm.x86.avx512.cvtusi642sd",
  "llvm.x86.avx512.cvtusi642ss",
  "llvm.x86.avx512.dbpsadbw.128",
  "llvm.x86.avx512.dbpsadbw.256",
  "llvm.x86.avx512.dbpsadbw.512",
  "llvm.x86.avx512.div.pd.512",
  "llvm.x86.avx512.div.ps.512",
  "llvm.x86.avx512.exp2.pd",
  "llvm.x86.avx512.exp2.ps",
  "llvm.x86.avx512.fpclass.pd.128",
  "llvm.x86.avx512.fpclass.pd.256",
  "llvm.x86.avx512.fpclass.pd.512",
  "llvm.x86.avx512.fpclass.ps.128",
  "llvm.x86.avx512.fpclass.ps.256",
  "llvm.x86.avx512.fpclass.ps.512",
  "llvm.x86.avx512.gather.dpd.512",
  "llvm.x86.avx512.gather.dpi.512",
  "llvm.x86.avx512.gather.dpq.512",
  "llvm.x86.avx512.gather.dps.512",
  "llvm.x86.avx512.gather.qpd.512",
  "llvm.x86.avx512.gather.qpi.512",
  "llvm.x86.avx512.gather.qpq.512",
  "llvm.x86.avx512.gather.qps.512",
  "llvm.x86.avx512.gather3div2.df",
  "llvm.x86.avx512.gather3div2.di",
  "llvm.x86.avx512.gather3div4.df",
  "llvm.x86.avx512.gather3div4.di",
  "llvm.x86.avx512.gather3div4.sf",
  "llvm.x86.avx512.gather3div4.si",
  "llvm.x86.avx512.gather3div8.sf",
  "llvm.x86.avx512.gather3div8.si",
  "llvm.x86.avx512.gather3siv2.df",
  "llvm.x86.avx512.gather3siv2.di",
  "llvm.x86.avx512.gather3siv4.df",
  "llvm.x86.avx512.gather3siv4.di",
  "llvm.x86.avx512.gather3siv4.sf",
  "llvm.x86.avx512.gather3siv4.si",
  "llvm.x86.avx512.gather3siv8.sf",
  "llvm.x86.avx512.gather3siv8.si",
  "llvm.x86.avx512.gatherpf.dpd.512",
  "llvm.x86.avx512.gatherpf.dps.512",
  "llvm.x86.avx512.gatherpf.qpd.512",
  "llvm.x86.avx512.gatherpf.qps.512",
  "llvm.x86.avx512.kadd.b",
  "llvm.x86.avx512.kadd.d",
  "llvm.x86.avx512.kadd.q",
  "llvm.x86.avx512.kadd.w",
  "llvm.x86.avx512.ktestc.b",
  "llvm.x86.avx512.ktestc.d",
  "llvm.x86.avx512.ktestc.q",
  "llvm.x86.avx512.ktestc.w",
  "llvm.x86.avx512.ktestz.b",
  "llvm.x86.avx512.ktestz.d",
  "llvm.x86.avx512.ktestz.q",
  "llvm.x86.avx512.ktestz.w",
  "llvm.x86.avx512.mask.add.sd.round",
  "llvm.x86.avx512.mask.add.ss.round",
  "llvm.x86.avx512.mask.cmp.pd.128",
  "llvm.x86.avx512.mask.cmp.pd.256",
  "llvm.x86.avx512.mask.cmp.pd.512",
  "llvm.x86.avx512.mask.cmp.ps.128",
  "llvm.x86.avx512.mask.cmp.ps.256",
  "llvm.x86.avx512.mask.cmp.ps.512",
  "llvm.x86.avx512.mask.cmp.sd",
  "llvm.x86.avx512.mask.cmp.ss",
  "llvm.x86.avx512.mask.compress",
  "llvm.x86.avx512.mask.cvtpd2dq.128",
  "llvm.x86.avx512.mask.cvtpd2dq.512",
  "llvm.x86.avx512.mask.cvtpd2ps",
  "llvm.x86.avx512.mask.cvtpd2ps.512",
  "llvm.x86.avx512.mask.cvtpd2qq.128",
  "llvm.x86.avx512.mask.cvtpd2qq.256",
  "llvm.x86.avx512.mask.cvtpd2qq.512",
  "llvm.x86.avx512.mask.cvtpd2udq.128",
  "llvm.x86.avx512.mask.cvtpd2udq.256",
  "llvm.x86.avx512.mask.cvtpd2udq.512",
  "llvm.x86.avx512.mask.cvtpd2uqq.128",
  "llvm.x86.avx512.mask.cvtpd2uqq.256",
  "llvm.x86.avx512.mask.cvtpd2uqq.512",
  "llvm.x86.avx512.mask.cvtps2dq.128",
  "llvm.x86.avx512.mask.cvtps2dq.256",
  "llvm.x86.avx512.mask.cvtps2dq.512",
  "llvm.x86.avx512.mask.cvtps2pd.512",
  "llvm.x86.avx512.mask.cvtps2qq.128",
  "llvm.x86.avx512.mask.cvtps2qq.256",
  "llvm.x86.avx512.mask.cvtps2qq.512",
  "llvm.x86.avx512.mask.cvtps2udq.128",
  "llvm.x86.avx512.mask.cvtps2udq.256",
  "llvm.x86.avx512.mask.cvtps2udq.512",
  "llvm.x86.avx512.mask.cvtps2uqq.128",
  "llvm.x86.avx512.mask.cvtps2uqq.256",
  "llvm.x86.avx512.mask.cvtps2uqq.512",
  "llvm.x86.avx512.mask.cvtqq2ps.128",
  "llvm.x86.avx512.mask.cvtsd2ss.round",
  "llvm.x86.avx512.mask.cvtss2sd.round",
  "llvm.x86.avx512.mask.cvttpd2dq.128",
  "llvm.x86.avx512.mask.cvttpd2dq.512",
  "llvm.x86.avx512.mask.cvttpd2qq.128",
  "llvm.x86.avx512.mask.cvttpd2qq.256",
  "llvm.x86.avx512.mask.cvttpd2qq.512",
  "llvm.x86.avx512.mask.cvttpd2udq.128",
  "llvm.x86.avx512.mask.cvttpd2udq.256",
  "llvm.x86.avx512.mask.cvttpd2udq.512",
  "llvm.x86.avx512.mask.cvttpd2uqq.128",
  "llvm.x86.avx512.mask.cvttpd2uqq.256",
  "llvm.x86.avx512.mask.cvttpd2uqq.512",
  "llvm.x86.avx512.mask.cvttps2dq.512",
  "llvm.x86.avx512.mask.cvttps2qq.128",
  "llvm.x86.avx512.mask.cvttps2qq.256",
  "llvm.x86.avx512.mask.cvttps2qq.512",
  "llvm.x86.avx512.mask.cvttps2udq.128",
  "llvm.x86.avx512.mask.cvttps2udq.256",
  "llvm.x86.avx512.mask.cvttps2udq.512",
  "llvm.x86.avx512.mask.cvttps2uqq.128",
  "llvm.x86.avx512.mask.cvttps2uqq.256",
  "llvm.x86.avx512.mask.cvttps2uqq.512",
  "llvm.x86.avx512.mask.cvtuqq2ps.128",
  "llvm.x86.avx512.mask.div.sd.round",
  "llvm.x86.avx512.mask.div.ss.round",
  "llvm.x86.avx512.mask.expand",
  "llvm.x86.avx512.mask.fixupimm.pd.128",
  "llvm.x86.avx512.mask.fixupimm.pd.256",
  "llvm.x86.avx512.mask.fixupimm.pd.512",
  "llvm.x86.avx512.mask.fixupimm.ps.128",
  "llvm.x86.avx512.mask.fixupimm.ps.256",
  "llvm.x86.avx512.mask.fixupimm.ps.512",
  "llvm.x86.avx512.mask.fixupimm.sd",
  "llvm.x86.avx512.mask.fixupimm.ss",
  "llvm.x86.avx512.mask.fpclass.sd",
  "llvm.x86.avx512.mask.fpclass.ss",
  "llvm.x86.avx512.mask.gather.dpd.512",
  "llvm.x86.avx512.mask.gather.dpi.512",
  "llvm.x86.avx512.mask.gather.dpq.512",
  "llvm.x86.avx512.mask.gather.dps.512",
  "llvm.x86.avx512.mask.gather.qpd.512",
  "llvm.x86.avx512.mask.gather.qpi.512",
  "llvm.x86.avx512.mask.gather.qpq.512",
  "llvm.x86.avx512.mask.gather.qps.512",
  "llvm.x86.avx512.mask.gather3div2.df",
  "llvm.x86.avx512.mask.gather3div2.di",
  "llvm.x86.avx512.mask.gather3div4.df",
  "llvm.x86.avx512.mask.gather3div4.di",
  "llvm.x86.avx512.mask.gather3div4.sf",
  "llvm.x86.avx512.mask.gather3div4.si",
  "llvm.x86.avx512.mask.gather3div8.sf",
  "llvm.x86.avx512.mask.gather3div8.si",
  "llvm.x86.avx512.mask.gather3siv2.df",
  "llvm.x86.avx512.mask.gather3siv2.di",
  "llvm.x86.avx512.mask.gather3siv4.df",
  "llvm.x86.avx512.mask.gather3siv4.di",
  "llvm.x86.avx512.mask.gather3siv4.sf",
  "llvm.x86.avx512.mask.gather3siv4.si",
  "llvm.x86.avx512.mask.gather3siv8.sf",
  "llvm.x86.avx512.mask.gather3siv8.si",
  "llvm.x86.avx512.mask.getexp.pd.128",
  "llvm.x86.avx512.mask.getexp.pd.256",
  "llvm.x86.avx512.mask.getexp.pd.512",
  "llvm.x86.avx512.mask.getexp.ps.128",
  "llvm.x86.avx512.mask.getexp.ps.256",
  "llvm.x86.avx512.mask.getexp.ps.512",
  "llvm.x86.avx512.mask.getexp.sd",
  "llvm.x86.avx512.mask.getexp.ss",
  "llvm.x86.avx512.mask.getmant.pd.128",
  "llvm.x86.avx512.mask.getmant.pd.256",
  "llvm.x86.avx512.mask.getmant.pd.512",
  "llvm.x86.avx512.mask.getmant.ps.128",
  "llvm.x86.avx512.mask.getmant.ps.256",
  "llvm.x86.avx512.mask.getmant.ps.512",
  "llvm.x86.avx512.mask.getmant.sd",
  "llvm.x86.avx512.mask.getmant.ss",
  "llvm.x86.avx512.mask.max.sd.round",
  "llvm.x86.avx512.mask.max.ss.round",
  "llvm.x86.avx512.mask.min.sd.round",
  "llvm.x86.avx512.mask.min.ss.round",
  "llvm.x86.avx512.mask.mul.sd.round",
  "llvm.x86.avx512.mask.mul.ss.round",
  "llvm.x86.avx512.mask.pmov.db.128",
  "llvm.x86.avx512.mask.pmov.db.256",
  "llvm.x86.avx512.mask.pmov.db.512",
  "llvm.x86.avx512.mask.pmov.db.mem.128",
  "llvm.x86.avx512.mask.pmov.db.mem.256",
  "llvm.x86.avx512.mask.pmov.db.mem.512",
  "llvm.x86.avx512.mask.pmov.dw.128",
  "llvm.x86.avx512.mask.pmov.dw.256",
  "llvm.x86.avx512.mask.pmov.dw.512",
  "llvm.x86.avx512.mask.pmov.dw.mem.128",
  "llvm.x86.avx512.mask.pmov.dw.mem.256",
  "llvm.x86.avx512.mask.pmov.dw.mem.512",
  "llvm.x86.avx512.mask.pmov.qb.128",
  "llvm.x86.avx512.mask.pmov.qb.256",
  "llvm.x86.avx512.mask.pmov.qb.512",
  "llvm.x86.avx512.mask.pmov.qb.mem.128",
  "llvm.x86.avx512.mask.pmov.qb.mem.256",
  "llvm.x86.avx512.mask.pmov.qb.mem.512",
  "llvm.x86.avx512.mask.pmov.qd.128",
  "llvm.x86.avx512.mask.pmov.qd.mem.128",
  "llvm.x86.avx512.mask.pmov.qd.mem.256",
  "llvm.x86.avx512.mask.pmov.qd.mem.512",
  "llvm.x86.avx512.mask.pmov.qw.128",
  "llvm.x86.avx512.mask.pmov.qw.256",
  "llvm.x86.avx512.mask.pmov.qw.512",
  "llvm.x86.avx512.mask.pmov.qw.mem.128",
  "llvm.x86.avx512.mask.pmov.qw.mem.256",
  "llvm.x86.avx512.mask.pmov.qw.mem.512",
  "llvm.x86.avx512.mask.pmov.wb.128",
  "llvm.x86.avx512.mask.pmov.wb.mem.128",
  "llvm.x86.avx512.mask.pmov.wb.mem.256",
  "llvm.x86.avx512.mask.pmov.wb.mem.512",
  "llvm.x86.avx512.mask.pmovs.db.128",
  "llvm.x86.avx512.mask.pmovs.db.256",
  "llvm.x86.avx512.mask.pmovs.db.512",
  "llvm.x86.avx512.mask.pmovs.db.mem.128",
  "llvm.x86.avx512.mask.pmovs.db.mem.256",
  "llvm.x86.avx512.mask.pmovs.db.mem.512",
  "llvm.x86.avx512.mask.pmovs.dw.128",
  "llvm.x86.avx512.mask.pmovs.dw.256",
  "llvm.x86.avx512.mask.pmovs.dw.512",
  "llvm.x86.avx512.mask.pmovs.dw.mem.128",
  "llvm.x86.avx512.mask.pmovs.dw.mem.256",
  "llvm.x86.avx512.mask.pmovs.dw.mem.512",
  "llvm.x86.avx512.mask.pmovs.qb.128",
  "llvm.x86.avx512.mask.pmovs.qb.256",
  "llvm.x86.avx512.mask.pmovs.qb.512",
  "llvm.x86.avx512.mask.pmovs.qb.mem.128",
  "llvm.x86.avx512.mask.pmovs.qb.mem.256",
  "llvm.x86.avx512.mask.pmovs.qb.mem.512",
  "llvm.x86.avx512.mask.pmovs.qd.128",
  "llvm.x86.avx512.mask.pmovs.qd.256",
  "llvm.x86.avx512.mask.pmovs.qd.512",
  "llvm.x86.avx512.mask.pmovs.qd.mem.128",
  "llvm.x86.avx512.mask.pmovs.qd.mem.256",
  "llvm.x86.avx512.mask.pmovs.qd.mem.512",
  "llvm.x86.avx512.mask.pmovs.qw.128",
  "llvm.x86.avx512.mask.pmovs.qw.256",
  "llvm.x86.avx512.mask.pmovs.qw.512",
  "llvm.x86.avx512.mask.pmovs.qw.mem.128",
  "llvm.x86.avx512.mask.pmovs.qw.mem.256",
  "llvm.x86.avx512.mask.pmovs.qw.mem.512",
  "llvm.x86.avx512.mask.pmovs.wb.128",
  "llvm.x86.avx512.mask.pmovs.wb.256",
  "llvm.x86.avx512.mask.pmovs.wb.512",
  "llvm.x86.avx512.mask.pmovs.wb.mem.128",
  "llvm.x86.avx512.mask.pmovs.wb.mem.256",
  "llvm.x86.avx512.mask.pmovs.wb.mem.512",
  "llvm.x86.avx512.mask.pmovus.db.128",
  "llvm.x86.avx512.mask.pmovus.db.256",
  "llvm.x86.avx512.mask.pmovus.db.512",
  "llvm.x86.avx512.mask.pmovus.db.mem.128",
  "llvm.x86.avx512.mask.pmovus.db.mem.256",
  "llvm.x86.avx512.mask.pmovus.db.mem.512",
  "llvm.x86.avx512.mask.pmovus.dw.128",
  "llvm.x86.avx512.mask.pmovus.dw.256",
  "llvm.x86.avx512.mask.pmovus.dw.512",
  "llvm.x86.avx512.mask.pmovus.dw.mem.128",
  "llvm.x86.avx512.mask.pmovus.dw.mem.256",
  "llvm.x86.avx512.mask.pmovus.dw.mem.512",
  "llvm.x86.avx512.mask.pmovus.qb.128",
  "llvm.x86.avx512.mask.pmovus.qb.256",
  "llvm.x86.avx512.mask.pmovus.qb.512",
  "llvm.x86.avx512.mask.pmovus.qb.mem.128",
  "llvm.x86.avx512.mask.pmovus.qb.mem.256",
  "llvm.x86.avx512.mask.pmovus.qb.mem.512",
  "llvm.x86.avx512.mask.pmovus.qd.128",
  "llvm.x86.avx512.mask.pmovus.qd.256",
  "llvm.x86.avx512.mask.pmovus.qd.512",
  "llvm.x86.avx512.mask.pmovus.qd.mem.128",
  "llvm.x86.avx512.mask.pmovus.qd.mem.256",
  "llvm.x86.avx512.mask.pmovus.qd.mem.512",
  "llvm.x86.avx512.mask.pmovus.qw.128",
  "llvm.x86.avx512.mask.pmovus.qw.256",
  "llvm.x86.avx512.mask.pmovus.qw.512",
  "llvm.x86.avx512.mask.pmovus.qw.mem.128",
  "llvm.x86.avx512.mask.pmovus.qw.mem.256",
  "llvm.x86.avx512.mask.pmovus.qw.mem.512",
  "llvm.x86.avx512.mask.pmovus.wb.128",
  "llvm.x86.avx512.mask.pmovus.wb.256",
  "llvm.x86.avx512.mask.pmovus.wb.512",
  "llvm.x86.avx512.mask.pmovus.wb.mem.128",
  "llvm.x86.avx512.mask.pmovus.wb.mem.256",
  "llvm.x86.avx512.mask.pmovus.wb.mem.512",
  "llvm.x86.avx512.mask.range.pd.128",
  "llvm.x86.avx512.mask.range.pd.256",
  "llvm.x86.avx512.mask.range.pd.512",
  "llvm.x86.avx512.mask.range.ps.128",
  "llvm.x86.avx512.mask.range.ps.256",
  "llvm.x86.avx512.mask.range.ps.512",
  "llvm.x86.avx512.mask.range.sd",
  "llvm.x86.avx512.mask.range.ss",
  "llvm.x86.avx512.mask.reduce.pd.128",
  "llvm.x86.avx512.mask.reduce.pd.256",
  "llvm.x86.avx512.mask.reduce.pd.512",
  "llvm.x86.avx512.mask.reduce.ps.128",
  "llvm.x86.avx512.mask.reduce.ps.256",
  "llvm.x86.avx512.mask.reduce.ps.512",
  "llvm.x86.avx512.mask.reduce.sd",
  "llvm.x86.avx512.mask.reduce.ss",
  "llvm.x86.avx512.mask.rndscale.pd.128",
  "llvm.x86.avx512.mask.rndscale.pd.256",
  "llvm.x86.avx512.mask.rndscale.pd.512",
  "llvm.x86.avx512.mask.rndscale.ps.128",
  "llvm.x86.avx512.mask.rndscale.ps.256",
  "llvm.x86.avx512.mask.rndscale.ps.512",
  "llvm.x86.avx512.mask.rndscale.sd",
  "llvm.x86.avx512.mask.rndscale.ss",
  "llvm.x86.avx512.mask.scalef.pd.128",
  "llvm.x86.avx512.mask.scalef.pd.256",
  "llvm.x86.avx512.mask.scalef.pd.512",
  "llvm.x86.avx512.mask.scalef.ps.128",
  "llvm.x86.avx512.mask.scalef.ps.256",
  "llvm.x86.avx512.mask.scalef.ps.512",
  "llvm.x86.avx512.mask.scalef.sd",
  "llvm.x86.avx512.mask.scalef.ss",
  "llvm.x86.avx512.mask.scatter.dpd.512",
  "llvm.x86.avx512.mask.scatter.dpi.512",
  "llvm.x86.avx512.mask.scatter.dpq.512",
  "llvm.x86.avx512.mask.scatter.dps.512",
  "llvm.x86.avx512.mask.scatter.qpd.512",
  "llvm.x86.avx512.mask.scatter.qpi.512",
  "llvm.x86.avx512.mask.scatter.qpq.512",
  "llvm.x86.avx512.mask.scatter.qps.512",
  "llvm.x86.avx512.mask.scatterdiv2.df",
  "llvm.x86.avx512.mask.scatterdiv2.di",
  "llvm.x86.avx512.mask.scatterdiv4.df",
  "llvm.x86.avx512.mask.scatterdiv4.di",
  "llvm.x86.avx512.mask.scatterdiv4.sf",
  "llvm.x86.avx512.mask.scatterdiv4.si",
  "llvm.x86.avx512.mask.scatterdiv8.sf",
  "llvm.x86.avx512.mask.scatterdiv8.si",
  "llvm.x86.avx512.mask.scattersiv2.df",
  "llvm.x86.avx512.mask.scattersiv2.di",
  "llvm.x86.avx512.mask.scattersiv4.df",
  "llvm.x86.avx512.mask.scattersiv4.di",
  "llvm.x86.avx512.mask.scattersiv4.sf",
  "llvm.x86.avx512.mask.scattersiv4.si",
  "llvm.x86.avx512.mask.scattersiv8.sf",
  "llvm.x86.avx512.mask.scattersiv8.si",
  "llvm.x86.avx512.mask.sqrt.sd",
  "llvm.x86.avx512.mask.sqrt.ss",
  "llvm.x86.avx512.mask.sub.sd.round",
  "llvm.x86.avx512.mask.sub.ss.round",
  "llvm.x86.avx512.mask.vcvtph2ps.512",
  "llvm.x86.avx512.mask.vcvtps2ph.128",
  "llvm.x86.avx512.mask.vcvtps2ph.256",
  "llvm.x86.avx512.mask.vcvtps2ph.512",
  "llvm.x86.avx512.maskz.fixupimm.pd.128",
  "llvm.x86.avx512.maskz.fixupimm.pd.256",
  "llvm.x86.avx512.maskz.fixupimm.pd.512",
  "llvm.x86.avx512.maskz.fixupimm.ps.128",
  "llvm.x86.avx512.maskz.fixupimm.ps.256",
  "llvm.x86.avx512.maskz.fixupimm.ps.512",
  "llvm.x86.avx512.maskz.fixupimm.sd",
  "llvm.x86.avx512.maskz.fixupimm.ss",
  "llvm.x86.avx512.max.pd.512",
  "llvm.x86.avx512.max.ps.512",
  "llvm.x86.avx512.min.pd.512",
  "llvm.x86.avx512.min.ps.512",
  "llvm.x86.avx512.mul.pd.512",
  "llvm.x86.avx512.mul.ps.512",
  "llvm.x86.avx512.packssdw.512",
  "llvm.x86.avx512.packsswb.512",
  "llvm.x86.avx512.packusdw.512",
  "llvm.x86.avx512.packuswb.512",
  "llvm.x86.avx512.pavg.b.512",
  "llvm.x86.avx512.pavg.w.512",
  "llvm.x86.avx512.permvar.df.256",
  "llvm.x86.avx512.permvar.df.512",
  "llvm.x86.avx512.permvar.di.256",
  "llvm.x86.avx512.permvar.di.512",
  "llvm.x86.avx512.permvar.hi.128",
  "llvm.x86.avx512.permvar.hi.256",
  "llvm.x86.avx512.permvar.hi.512",
  "llvm.x86.avx512.permvar.qi.128",
  "llvm.x86.avx512.permvar.qi.256",
  "llvm.x86.avx512.permvar.qi.512",
  "llvm.x86.avx512.permvar.sf.512",
  "llvm.x86.avx512.permvar.si.512",
  "llvm.x86.avx512.pmaddubs.w.512",
  "llvm.x86.avx512.pmaddw.d.512",
  "llvm.x86.avx512.pmul.hr.sw.512",
  "llvm.x86.avx512.pmulh.w.512",
  "llvm.x86.avx512.pmulhu.w.512",
  "llvm.x86.avx512.pmultishift.qb.128",
  "llvm.x86.avx512.pmultishift.qb.256",
  "llvm.x86.avx512.pmultishift.qb.512",
  "llvm.x86.avx512.psad.bw.512",
  "llvm.x86.avx512.pshuf.b.512",
  "llvm.x86.avx512.psll.d.512",
  "llvm.x86.avx512.psll.q.512",
  "llvm.x86.avx512.psll.w.512",
  "llvm.x86.avx512.pslli.d.512",
  "llvm.x86.avx512.pslli.q.512",
  "llvm.x86.avx512.pslli.w.512",
  "llvm.x86.avx512.psllv.d.512",
  "llvm.x86.avx512.psllv.q.512",
  "llvm.x86.avx512.psllv.w.128",
  "llvm.x86.avx512.psllv.w.256",
  "llvm.x86.avx512.psllv.w.512",
  "llvm.x86.avx512.psra.d.512",
  "llvm.x86.avx512.psra.q.128",
  "llvm.x86.avx512.psra.q.256",
  "llvm.x86.avx512.psra.q.512",
  "llvm.x86.avx512.psra.w.512",
  "llvm.x86.avx512.psrai.d.512",
  "llvm.x86.avx512.psrai.q.128",
  "llvm.x86.avx512.psrai.q.256",
  "llvm.x86.avx512.psrai.q.512",
  "llvm.x86.avx512.psrai.w.512",
  "llvm.x86.avx512.psrav.d.512",
  "llvm.x86.avx512.psrav.q.128",
  "llvm.x86.avx512.psrav.q.256",
  "llvm.x86.avx512.psrav.q.512",
  "llvm.x86.avx512.psrav.w.128",
  "llvm.x86.avx512.psrav.w.256",
  "llvm.x86.avx512.psrav.w.512",
  "llvm.x86.avx512.psrl.d.512",
  "llvm.x86.avx512.psrl.q.512",
  "llvm.x86.avx512.psrl.w.512",
  "llvm.x86.avx512.psrli.d.512",
  "llvm.x86.avx512.psrli.q.512",
  "llvm.x86.avx512.psrli.w.512",
  "llvm.x86.avx512.psrlv.d.512",
  "llvm.x86.avx512.psrlv.q.512",
  "llvm.x86.avx512.psrlv.w.128",
  "llvm.x86.avx512.psrlv.w.256",
  "llvm.x86.avx512.psrlv.w.512",
  "llvm.x86.avx512.pternlog.d.128",
  "llvm.x86.avx512.pternlog.d.256",
  "llvm.x86.avx512.pternlog.d.512",
  "llvm.x86.avx512.pternlog.q.128",
  "llvm.x86.avx512.pternlog.q.256",
  "llvm.x86.avx512.pternlog.q.512",
  "llvm.x86.avx512.rcp14.pd.128",
  "llvm.x86.avx512.rcp14.pd.256",
  "llvm.x86.avx512.rcp14.pd.512",
  "llvm.x86.avx512.rcp14.ps.128",
  "llvm.x86.avx512.rcp14.ps.256",
  "llvm.x86.avx512.rcp14.ps.512",
  "llvm.x86.avx512.rcp14.sd",
  "llvm.x86.avx512.rcp14.ss",
  "llvm.x86.avx512.rcp28.pd",
  "llvm.x86.avx512.rcp28.ps",
  "llvm.x86.avx512.rcp28.sd",
  "llvm.x86.avx512.rcp28.ss",
  "llvm.x86.avx512.rsqrt14.pd.128",
  "llvm.x86.avx512.rsqrt14.pd.256",
  "llvm.x86.avx512.rsqrt14.pd.512",
  "llvm.x86.avx512.rsqrt14.ps.128",
  "llvm.x86.avx512.rsqrt14.ps.256",
  "llvm.x86.avx512.rsqrt14.ps.512",
  "llvm.x86.avx512.rsqrt14.sd",
  "llvm.x86.avx512.rsqrt14.ss",
  "llvm.x86.avx512.rsqrt28.pd",
  "llvm.x86.avx512.rsqrt28.ps",
  "llvm.x86.avx512.rsqrt28.sd",
  "llvm.x86.avx512.rsqrt28.ss",
  "llvm.x86.avx512.scatter.dpd.512",
  "llvm.x86.avx512.scatter.dpi.512",
  "llvm.x86.avx512.scatter.dpq.512",
  "llvm.x86.avx512.scatter.dps.512",
  "llvm.x86.avx512.scatter.qpd.512",
  "llvm.x86.avx512.scatter.qpi.512",
  "llvm.x86.avx512.scatter.qpq.512",
  "llvm.x86.avx512.scatter.qps.512",
  "llvm.x86.avx512.scatterdiv2.df",
  "llvm.x86.avx512.scatterdiv2.di",
  "llvm.x86.avx512.scatterdiv4.df",
  "llvm.x86.avx512.scatterdiv4.di",
  "llvm.x86.avx512.scatterdiv4.sf",
  "llvm.x86.avx512.scatterdiv4.si",
  "llvm.x86.avx512.scatterdiv8.sf",
  "llvm.x86.avx512.scatterdiv8.si",
  "llvm.x86.avx512.scatterpf.dpd.512",
  "llvm.x86.avx512.scatterpf.dps.512",
  "llvm.x86.avx512.scatterpf.qpd.512",
  "llvm.x86.avx512.scatterpf.qps.512",
  "llvm.x86.avx512.scattersiv2.df",
  "llvm.x86.avx512.scattersiv2.di",
  "llvm.x86.avx512.scattersiv4.df",
  "llvm.x86.avx512.scattersiv4.di",
  "llvm.x86.avx512.scattersiv4.sf",
  "llvm.x86.avx512.scattersiv4.si",
  "llvm.x86.avx512.scattersiv8.sf",
  "llvm.x86.avx512.scattersiv8.si",
  "llvm.x86.avx512.sitofp.round",
  "llvm.x86.avx512.sqrt.pd.512",
  "llvm.x86.avx512.sqrt.ps.512",
  "llvm.x86.avx512.sub.pd.512",
  "llvm.x86.avx512.sub.ps.512",
  "llvm.x86.avx512.uitofp.round",
  "llvm.x86.avx512.vcomi.sd",
  "llvm.x86.avx512.vcomi.ss",
  "llvm.x86.avx512.vcvtsd2si32",
  "llvm.x86.avx512.vcvtsd2si64",
  "llvm.x86.avx512.vcvtsd2usi32",
  "llvm.x86.avx512.vcvtsd2usi64",
  "llvm.x86.avx512.vcvtss2si32",
  "llvm.x86.avx512.vcvtss2si64",
  "llvm.x86.avx512.vcvtss2usi32",
  "llvm.x86.avx512.vcvtss2usi64",
  "llvm.x86.avx512.vfmadd.f32",
  "llvm.x86.avx512.vfmadd.f64",
  "llvm.x86.avx512.vfmadd.pd.512",
  "llvm.x86.avx512.vfmadd.ps.512",
  "llvm.x86.avx512.vfmaddsub.pd.512",
  "llvm.x86.avx512.vfmaddsub.ps.512",
  "llvm.x86.avx512.vp2intersect.d.128",
  "llvm.x86.avx512.vp2intersect.d.256",
  "llvm.x86.avx512.vp2intersect.d.512",
  "llvm.x86.avx512.vp2intersect.q.128",
  "llvm.x86.avx512.vp2intersect.q.256",
  "llvm.x86.avx512.vp2intersect.q.512",
  "llvm.x86.avx512.vpdpbusd.128",
  "llvm.x86.avx512.vpdpbusd.256",
  "llvm.x86.avx512.vpdpbusd.512",
  "llvm.x86.avx512.vpdpbusds.128",
  "llvm.x86.avx512.vpdpbusds.256",
  "llvm.x86.avx512.vpdpbusds.512",
  "llvm.x86.avx512.vpdpwssd.128",
  "llvm.x86.avx512.vpdpwssd.256",
  "llvm.x86.avx512.vpdpwssd.512",
  "llvm.x86.avx512.vpdpwssds.128",
  "llvm.x86.avx512.vpdpwssds.256",
  "llvm.x86.avx512.vpdpwssds.512",
  "llvm.x86.avx512.vpermi2var.d.128",
  "llvm.x86.avx512.vpermi2var.d.256",
  "llvm.x86.avx512.vpermi2var.d.512",
  "llvm.x86.avx512.vpermi2var.hi.128",
  "llvm.x86.avx512.vpermi2var.hi.256",
  "llvm.x86.avx512.vpermi2var.hi.512",
  "llvm.x86.avx512.vpermi2var.pd.128",
  "llvm.x86.avx512.vpermi2var.pd.256",
  "llvm.x86.avx512.vpermi2var.pd.512",
  "llvm.x86.avx512.vpermi2var.ps.128",
  "llvm.x86.avx512.vpermi2var.ps.256",
  "llvm.x86.avx512.vpermi2var.ps.512",
  "llvm.x86.avx512.vpermi2var.q.128",
  "llvm.x86.avx512.vpermi2var.q.256",
  "llvm.x86.avx512.vpermi2var.q.512",
  "llvm.x86.avx512.vpermi2var.qi.128",
  "llvm.x86.avx512.vpermi2var.qi.256",
  "llvm.x86.avx512.vpermi2var.qi.512",
  "llvm.x86.avx512.vpermilvar.pd.512",
  "llvm.x86.avx512.vpermilvar.ps.512",
  "llvm.x86.avx512.vpmadd52h.uq.128",
  "llvm.x86.avx512.vpmadd52h.uq.256",
  "llvm.x86.avx512.vpmadd52h.uq.512",
  "llvm.x86.avx512.vpmadd52l.uq.128",
  "llvm.x86.avx512.vpmadd52l.uq.256",
  "llvm.x86.avx512.vpmadd52l.uq.512",
  "llvm.x86.avx512.vpshufbitqmb.128",
  "llvm.x86.avx512.vpshufbitqmb.256",
  "llvm.x86.avx512.vpshufbitqmb.512",
  "llvm.x86.avx512bf16.cvtne2ps2bf16.128",
  "llvm.x86.avx512bf16.cvtne2ps2bf16.256",
  "llvm.x86.avx512bf16.cvtne2ps2bf16.512",
  "llvm.x86.avx512bf16.cvtneps2bf16.256",
  "llvm.x86.avx512bf16.cvtneps2bf16.512",
  "llvm.x86.avx512bf16.dpbf16ps.128",
  "llvm.x86.avx512bf16.dpbf16ps.256",
  "llvm.x86.avx512bf16.dpbf16ps.512",
  "llvm.x86.avx512bf16.mask.cvtneps2bf16.128",
  "llvm.x86.avx512fp16.add.ph.512",
  "llvm.x86.avx512fp16.div.ph.512",
  "llvm.x86.avx512fp16.fpclass.ph.128",
  "llvm.x86.avx512fp16.fpclass.ph.256",
  "llvm.x86.avx512fp16.fpclass.ph.512",
  "llvm.x86.avx512fp16.mask.add.sh.round",
  "llvm.x86.avx512fp16.mask.cmp.ph.128",
  "llvm.x86.avx512fp16.mask.cmp.ph.256",
  "llvm.x86.avx512fp16.mask.cmp.ph.512",
  "llvm.x86.avx512fp16.mask.cmp.sh",
  "llvm.x86.avx512fp16.mask.div.sh.round",
  "llvm.x86.avx512fp16.mask.fpclass.sh",
  "llvm.x86.avx512fp16.mask.getexp.ph.128",
  "llvm.x86.avx512fp16.mask.getexp.ph.256",
  "llvm.x86.avx512fp16.mask.getexp.ph.512",
  "llvm.x86.avx512fp16.mask.getexp.sh",
  "llvm.x86.avx512fp16.mask.getmant.ph.128",
  "llvm.x86.avx512fp16.mask.getmant.ph.256",
  "llvm.x86.avx512fp16.mask.getmant.ph.512",
  "llvm.x86.avx512fp16.mask.getmant.sh",
  "llvm.x86.avx512fp16.mask.max.sh.round",
  "llvm.x86.avx512fp16.mask.min.sh.round",
  "llvm.x86.avx512fp16.mask.mul.sh.round",
  "llvm.x86.avx512fp16.mask.rcp.ph.128",
  "llvm.x86.avx512fp16.mask.rcp.ph.256",
  "llvm.x86.avx512fp16.mask.rcp.ph.512",
  "llvm.x86.avx512fp16.mask.rcp.sh",
  "llvm.x86.avx512fp16.mask.reduce.ph.128",
  "llvm.x86.avx512fp16.mask.reduce.ph.256",
  "llvm.x86.avx512fp16.mask.reduce.ph.512",
  "llvm.x86.avx512fp16.mask.reduce.sh",
  "llvm.x86.avx512fp16.mask.rndscale.ph.128",
  "llvm.x86.avx512fp16.mask.rndscale.ph.256",
  "llvm.x86.avx512fp16.mask.rndscale.ph.512",
  "llvm.x86.avx512fp16.mask.rndscale.sh",
  "llvm.x86.avx512fp16.mask.rsqrt.ph.128",
  "llvm.x86.avx512fp16.mask.rsqrt.ph.256",
  "llvm.x86.avx512fp16.mask.rsqrt.ph.512",
  "llvm.x86.avx512fp16.mask.rsqrt.sh",
  "llvm.x86.avx512fp16.mask.scalef.ph.128",
  "llvm.x86.avx512fp16.mask.scalef.ph.256",
  "llvm.x86.avx512fp16.mask.scalef.ph.512",
  "llvm.x86.avx512fp16.mask.scalef.sh",
  "llvm.x86.avx512fp16.mask.sqrt.sh",
  "llvm.x86.avx512fp16.mask.sub.sh.round",
  "llvm.x86.avx512fp16.mask.vcvtdq2ph.128",
  "llvm.x86.avx512fp16.mask.vcvtpd2ph.128",
  "llvm.x86.avx512fp16.mask.vcvtpd2ph.256",
  "llvm.x86.avx512fp16.mask.vcvtpd2ph.512",
  "llvm.x86.avx512fp16.mask.vcvtph2dq.128",
  "llvm.x86.avx512fp16.mask.vcvtph2dq.256",
  "llvm.x86.avx512fp16.mask.vcvtph2dq.512",
  "llvm.x86.avx512fp16.mask.vcvtph2pd.128",
  "llvm.x86.avx512fp16.mask.vcvtph2pd.256",
  "llvm.x86.avx512fp16.mask.vcvtph2pd.512",
  "llvm.x86.avx512fp16.mask.vcvtph2psx.128",
  "llvm.x86.avx512fp16.mask.vcvtph2psx.256",
  "llvm.x86.avx512fp16.mask.vcvtph2psx.512",
  "llvm.x86.avx512fp16.mask.vcvtph2qq.128",
  "llvm.x86.avx512fp16.mask.vcvtph2qq.256",
  "llvm.x86.avx512fp16.mask.vcvtph2qq.512",
  "llvm.x86.avx512fp16.mask.vcvtph2udq.128",
  "llvm.x86.avx512fp16.mask.vcvtph2udq.256",
  "llvm.x86.avx512fp16.mask.vcvtph2udq.512",
  "llvm.x86.avx512fp16.mask.vcvtph2uqq.128",
  "llvm.x86.avx512fp16.mask.vcvtph2uqq.256",
  "llvm.x86.avx512fp16.mask.vcvtph2uqq.512",
  "llvm.x86.avx512fp16.mask.vcvtph2uw.128",
  "llvm.x86.avx512fp16.mask.vcvtph2uw.256",
  "llvm.x86.avx512fp16.mask.vcvtph2uw.512",
  "llvm.x86.avx512fp16.mask.vcvtph2w.128",
  "llvm.x86.avx512fp16.mask.vcvtph2w.256",
  "llvm.x86.avx512fp16.mask.vcvtph2w.512",
  "llvm.x86.avx512fp16.mask.vcvtps2phx.128",
  "llvm.x86.avx512fp16.mask.vcvtps2phx.256",
  "llvm.x86.avx512fp16.mask.vcvtps2phx.512",
  "llvm.x86.avx512fp16.mask.vcvtqq2ph.128",
  "llvm.x86.avx512fp16.mask.vcvtqq2ph.256",
  "llvm.x86.avx512fp16.mask.vcvtsd2sh.round",
  "llvm.x86.avx512fp16.mask.vcvtsh2sd.round",
  "llvm.x86.avx512fp16.mask.vcvtsh2ss.round",
  "llvm.x86.avx512fp16.mask.vcvtss2sh.round",
  "llvm.x86.avx512fp16.mask.vcvttph2dq.128",
  "llvm.x86.avx512fp16.mask.vcvttph2dq.256",
  "llvm.x86.avx512fp16.mask.vcvttph2dq.512",
  "llvm.x86.avx512fp16.mask.vcvttph2qq.128",
  "llvm.x86.avx512fp16.mask.vcvttph2qq.256",
  "llvm.x86.avx512fp16.mask.vcvttph2qq.512",
  "llvm.x86.avx512fp16.mask.vcvttph2udq.128",
  "llvm.x86.avx512fp16.mask.vcvttph2udq.256",
  "llvm.x86.avx512fp16.mask.vcvttph2udq.512",
  "llvm.x86.avx512fp16.mask.vcvttph2uqq.128",
  "llvm.x86.avx512fp16.mask.vcvttph2uqq.256",
  "llvm.x86.avx512fp16.mask.vcvttph2uqq.512",
  "llvm.x86.avx512fp16.mask.vcvttph2uw.128",
  "llvm.x86.avx512fp16.mask.vcvttph2uw.256",
  "llvm.x86.avx512fp16.mask.vcvttph2uw.512",
  "llvm.x86.avx512fp16.mask.vcvttph2w.128",
  "llvm.x86.avx512fp16.mask.vcvttph2w.256",
  "llvm.x86.avx512fp16.mask.vcvttph2w.512",
  "llvm.x86.avx512fp16.mask.vcvtudq2ph.128",
  "llvm.x86.avx512fp16.mask.vcvtuqq2ph.128",
  "llvm.x86.avx512fp16.mask.vcvtuqq2ph.256",
  "llvm.x86.avx512fp16.mask.vfcmadd.cph.128",
  "llvm.x86.avx512fp16.mask.vfcmadd.cph.256",
  "llvm.x86.avx512fp16.mask.vfcmadd.cph.512",
  "llvm.x86.avx512fp16.mask.vfcmadd.csh",
  "llvm.x86.avx512fp16.mask.vfcmul.cph.128",
  "llvm.x86.avx512fp16.mask.vfcmul.cph.256",
  "llvm.x86.avx512fp16.mask.vfcmul.cph.512",
  "llvm.x86.avx512fp16.mask.vfcmul.csh",
  "llvm.x86.avx512fp16.mask.vfmadd.cph.128",
  "llvm.x86.avx512fp16.mask.vfmadd.cph.256",
  "llvm.x86.avx512fp16.mask.vfmadd.cph.512",
  "llvm.x86.avx512fp16.mask.vfmadd.csh",
  "llvm.x86.avx512fp16.mask.vfmul.cph.128",
  "llvm.x86.avx512fp16.mask.vfmul.cph.256",
  "llvm.x86.avx512fp16.mask.vfmul.cph.512",
  "llvm.x86.avx512fp16.mask.vfmul.csh",
  "llvm.x86.avx512fp16.maskz.vfcmadd.cph.128",
  "llvm.x86.avx512fp16.maskz.vfcmadd.cph.256",
  "llvm.x86.avx512fp16.maskz.vfcmadd.cph.512",
  "llvm.x86.avx512fp16.maskz.vfcmadd.csh",
  "llvm.x86.avx512fp16.maskz.vfmadd.cph.128",
  "llvm.x86.avx512fp16.maskz.vfmadd.cph.256",
  "llvm.x86.avx512fp16.maskz.vfmadd.cph.512",
  "llvm.x86.avx512fp16.maskz.vfmadd.csh",
  "llvm.x86.avx512fp16.max.ph.128",
  "llvm.x86.avx512fp16.max.ph.256",
  "llvm.x86.avx512fp16.max.ph.512",
  "llvm.x86.avx512fp16.min.ph.128",
  "llvm.x86.avx512fp16.min.ph.256",
  "llvm.x86.avx512fp16.min.ph.512",
  "llvm.x86.avx512fp16.mul.ph.512",
  "llvm.x86.avx512fp16.sqrt.ph.512",
  "llvm.x86.avx512fp16.sub.ph.512",
  "llvm.x86.avx512fp16.vcomi.sh",
  "llvm.x86.avx512fp16.vcvtsh2si32",
  "llvm.x86.avx512fp16.vcvtsh2si64",
  "llvm.x86.avx512fp16.vcvtsh2usi32",
  "llvm.x86.avx512fp16.vcvtsh2usi64",
  "llvm.x86.avx512fp16.vcvtsi2sh",
  "llvm.x86.avx512fp16.vcvtsi642sh",
  "llvm.x86.avx512fp16.vcvttsh2si32",
  "llvm.x86.avx512fp16.vcvttsh2si64",
  "llvm.x86.avx512fp16.vcvttsh2usi32",
  "llvm.x86.avx512fp16.vcvttsh2usi64",
  "llvm.x86.avx512fp16.vcvtusi2sh",
  "llvm.x86.avx512fp16.vcvtusi642sh",
  "llvm.x86.avx512fp16.vfmadd.f16",
  "llvm.x86.avx512fp16.vfmadd.ph.512",
  "llvm.x86.avx512fp16.vfmaddsub.ph.128",
  "llvm.x86.avx512fp16.vfmaddsub.ph.256",
  "llvm.x86.avx512fp16.vfmaddsub.ph.512",
  "llvm.x86.axor32",
  "llvm.x86.axor64",
  "llvm.x86.bmi.bextr.32",
  "llvm.x86.bmi.bextr.64",
  "llvm.x86.bmi.bzhi.32",
  "llvm.x86.bmi.bzhi.64",
  "llvm.x86.bmi.pdep.32",
  "llvm.x86.bmi.pdep.64",
  "llvm.x86.bmi.pext.32",
  "llvm.x86.bmi.pext.64",
  "llvm.x86.cast.tile.to.vector",
  "llvm.x86.cast.vector.to.tile",
  "llvm.x86.cldemote",
  "llvm.x86.clflushopt",
  "llvm.x86.clrssbsy",
  "llvm.x86.clui",
  "llvm.x86.clwb",
  "llvm.x86.clzero",
  "llvm.x86.cmpccxadd32",
  "llvm.x86.cmpccxadd64",
  "llvm.x86.directstore32",
  "llvm.x86.directstore64",
  "llvm.x86.encodekey128",
  "llvm.x86.encodekey256",
  "llvm.x86.enqcmd",
  "llvm.x86.enqcmds",
  "llvm.x86.flags.read.u32",
  "llvm.x86.flags.read.u64",
  "llvm.x86.flags.write.u32",
  "llvm.x86.flags.write.u64",
  "llvm.x86.fma.vfmaddsub.pd",
  "llvm.x86.fma.vfmaddsub.pd.256",
  "llvm.x86.fma.vfmaddsub.ps",
  "llvm.x86.fma.vfmaddsub.ps.256",
  "llvm.x86.fxrstor",
  "llvm.x86.fxrstor64",
  "llvm.x86.fxsave",
  "llvm.x86.fxsave64",
  "llvm.x86.incsspd",
  "llvm.x86.incsspq",
  "llvm.x86.int",
  "llvm.x86.invpcid",
  "llvm.x86.ldtilecfg",
  "llvm.x86.ldtilecfg.internal",
  "llvm.x86.llwpcb",
  "llvm.x86.loadiwkey",
  "llvm.x86.lwpins32",
  "llvm.x86.lwpins64",
  "llvm.x86.lwpval32",
  "llvm.x86.lwpval64",
  "llvm.x86.mmx.emms",
  "llvm.x86.mmx.femms",
  "llvm.x86.mmx.maskmovq",
  "llvm.x86.mmx.movnt.dq",
  "llvm.x86.mmx.packssdw",
  "llvm.x86.mmx.packsswb",
  "llvm.x86.mmx.packuswb",
  "llvm.x86.mmx.padd.b",
  "llvm.x86.mmx.padd.d",
  "llvm.x86.mmx.padd.q",
  "llvm.x86.mmx.padd.w",
  "llvm.x86.mmx.padds.b",
  "llvm.x86.mmx.padds.w",
  "llvm.x86.mmx.paddus.b",
  "llvm.x86.mmx.paddus.w",
  "llvm.x86.mmx.palignr.b",
  "llvm.x86.mmx.pand",
  "llvm.x86.mmx.pandn",
  "llvm.x86.mmx.pavg.b",
  "llvm.x86.mmx.pavg.w",
  "llvm.x86.mmx.pcmpeq.b",
  "llvm.x86.mmx.pcmpeq.d",
  "llvm.x86.mmx.pcmpeq.w",
  "llvm.x86.mmx.pcmpgt.b",
  "llvm.x86.mmx.pcmpgt.d",
  "llvm.x86.mmx.pcmpgt.w",
  "llvm.x86.mmx.pextr.w",
  "llvm.x86.mmx.pinsr.w",
  "llvm.x86.mmx.pmadd.wd",
  "llvm.x86.mmx.pmaxs.w",
  "llvm.x86.mmx.pmaxu.b",
  "llvm.x86.mmx.pmins.w",
  "llvm.x86.mmx.pminu.b",
  "llvm.x86.mmx.pmovmskb",
  "llvm.x86.mmx.pmulh.w",
  "llvm.x86.mmx.pmulhu.w",
  "llvm.x86.mmx.pmull.w",
  "llvm.x86.mmx.pmulu.dq",
  "llvm.x86.mmx.por",
  "llvm.x86.mmx.psad.bw",
  "llvm.x86.mmx.psll.d",
  "llvm.x86.mmx.psll.q",
  "llvm.x86.mmx.psll.w",
  "llvm.x86.mmx.pslli.d",
  "llvm.x86.mmx.pslli.q",
  "llvm.x86.mmx.pslli.w",
  "llvm.x86.mmx.psra.d",
  "llvm.x86.mmx.psra.w",
  "llvm.x86.mmx.psrai.d",
  "llvm.x86.mmx.psrai.w",
  "llvm.x86.mmx.psrl.d",
  "llvm.x86.mmx.psrl.q",
  "llvm.x86.mmx.psrl.w",
  "llvm.x86.mmx.psrli.d",
  "llvm.x86.mmx.psrli.q",
  "llvm.x86.mmx.psrli.w",
  "llvm.x86.mmx.psub.b",
  "llvm.x86.mmx.psub.d",
  "llvm.x86.mmx.psub.q",
  "llvm.x86.mmx.psub.w",
  "llvm.x86.mmx.psubs.b",
  "llvm.x86.mmx.psubs.w",
  "llvm.x86.mmx.psubus.b",
  "llvm.x86.mmx.psubus.w",
  "llvm.x86.mmx.punpckhbw",
  "llvm.x86.mmx.punpckhdq",
  "llvm.x86.mmx.punpckhwd",
  "llvm.x86.mmx.punpcklbw",
  "llvm.x86.mmx.punpckldq",
  "llvm.x86.mmx.punpcklwd",
  "llvm.x86.mmx.pxor",
  "llvm.x86.monitorx",
  "llvm.x86.movdir64b",
  "llvm.x86.mwaitx",
  "llvm.x86.pclmulqdq",
  "llvm.x86.pclmulqdq.256",
  "llvm.x86.pclmulqdq.512",
  "llvm.x86.ptwrite32",
  "llvm.x86.ptwrite64",
  "llvm.x86.rdfsbase.32",
  "llvm.x86.rdfsbase.64",
  "llvm.x86.rdgsbase.32",
  "llvm.x86.rdgsbase.64",
  "llvm.x86.rdpid",
  "llvm.x86.rdpkru",
  "llvm.x86.rdpmc",
  "llvm.x86.rdpru",
  "llvm.x86.rdrand.16",
  "llvm.x86.rdrand.32",
  "llvm.x86.rdrand.64",
  "llvm.x86.rdseed.16",
  "llvm.x86.rdseed.32",
  "llvm.x86.rdseed.64",
  "llvm.x86.rdsspd",
  "llvm.x86.rdsspq",
  "llvm.x86.rdtsc",
  "llvm.x86.rdtscp",
  "llvm.x86.rstorssp",
  "llvm.x86.saveprevssp",
  "llvm.x86.seh.ehguard",
  "llvm.x86.seh.ehregnode",
  "llvm.x86.seh.lsda",
  "llvm.x86.senduipi",
  "llvm.x86.serialize",
  "llvm.x86.setssbsy",
  "llvm.x86.sha1msg1",
  "llvm.x86.sha1msg2",
  "llvm.x86.sha1nexte",
  "llvm.x86.sha1rnds4",
  "llvm.x86.sha256msg1",
  "llvm.x86.sha256msg2",
  "llvm.x86.sha256rnds2",
  "llvm.x86.slwpcb",
  "llvm.x86.sse.cmp.ps",
  "llvm.x86.sse.cmp.ss",
  "llvm.x86.sse.comieq.ss",
  "llvm.x86.sse.comige.ss",
  "llvm.x86.sse.comigt.ss",
  "llvm.x86.sse.comile.ss",
  "llvm.x86.sse.comilt.ss",
  "llvm.x86.sse.comineq.ss",
  "llvm.x86.sse.cvtpd2pi",
  "llvm.x86.sse.cvtpi2pd",
  "llvm.x86.sse.cvtpi2ps",
  "llvm.x86.sse.cvtps2pi",
  "llvm.x86.sse.cvtss2si",
  "llvm.x86.sse.cvtss2si64",
  "llvm.x86.sse.cvttpd2pi",
  "llvm.x86.sse.cvttps2pi",
  "llvm.x86.sse.cvttss2si",
  "llvm.x86.sse.cvttss2si64",
  "llvm.x86.sse.ldmxcsr",
  "llvm.x86.sse.max.ps",
  "llvm.x86.sse.max.ss",
  "llvm.x86.sse.min.ps",
  "llvm.x86.sse.min.ss",
  "llvm.x86.sse.movmsk.ps",
  "llvm.x86.sse.pshuf.w",
  "llvm.x86.sse.rcp.ps",
  "llvm.x86.sse.rcp.ss",
  "llvm.x86.sse.rsqrt.ps",
  "llvm.x86.sse.rsqrt.ss",
  "llvm.x86.sse.sfence",
  "llvm.x86.sse.stmxcsr",
  "llvm.x86.sse.ucomieq.ss",
  "llvm.x86.sse.ucomige.ss",
  "llvm.x86.sse.ucomigt.ss",
  "llvm.x86.sse.ucomile.ss",
  "llvm.x86.sse.ucomilt.ss",
  "llvm.x86.sse.ucomineq.ss",
  "llvm.x86.sse2.clflush",
  "llvm.x86.sse2.cmp.pd",
  "llvm.x86.sse2.cmp.sd",
  "llvm.x86.sse2.comieq.sd",
  "llvm.x86.sse2.comige.sd",
  "llvm.x86.sse2.comigt.sd",
  "llvm.x86.sse2.comile.sd",
  "llvm.x86.sse2.comilt.sd",
  "llvm.x86.sse2.comineq.sd",
  "llvm.x86.sse2.cvtpd2dq",
  "llvm.x86.sse2.cvtpd2ps",
  "llvm.x86.sse2.cvtps2dq",
  "llvm.x86.sse2.cvtsd2si",
  "llvm.x86.sse2.cvtsd2si64",
  "llvm.x86.sse2.cvtsd2ss",
  "llvm.x86.sse2.cvttpd2dq",
  "llvm.x86.sse2.cvttps2dq",
  "llvm.x86.sse2.cvttsd2si",
  "llvm.x86.sse2.cvttsd2si64",
  "llvm.x86.sse2.lfence",
  "llvm.x86.sse2.maskmov.dqu",
  "llvm.x86.sse2.max.pd",
  "llvm.x86.sse2.max.sd",
  "llvm.x86.sse2.mfence",
  "llvm.x86.sse2.min.pd",
  "llvm.x86.sse2.min.sd",
  "llvm.x86.sse2.movmsk.pd",
  "llvm.x86.sse2.packssdw.128",
  "llvm.x86.sse2.packsswb.128",
  "llvm.x86.sse2.packuswb.128",
  "llvm.x86.sse2.pause",
  "llvm.x86.sse2.pavg.b",
  "llvm.x86.sse2.pavg.w",
  "llvm.x86.sse2.pmadd.wd",
  "llvm.x86.sse2.pmovmskb.128",
  "llvm.x86.sse2.pmulh.w",
  "llvm.x86.sse2.pmulhu.w",
  "llvm.x86.sse2.psad.bw",
  "llvm.x86.sse2.psll.d",
  "llvm.x86.sse2.psll.q",
  "llvm.x86.sse2.psll.w",
  "llvm.x86.sse2.pslli.d",
  "llvm.x86.sse2.pslli.q",
  "llvm.x86.sse2.pslli.w",
  "llvm.x86.sse2.psra.d",
  "llvm.x86.sse2.psra.w",
  "llvm.x86.sse2.psrai.d",
  "llvm.x86.sse2.psrai.w",
  "llvm.x86.sse2.psrl.d",
  "llvm.x86.sse2.psrl.q",
  "llvm.x86.sse2.psrl.w",
  "llvm.x86.sse2.psrli.d",
  "llvm.x86.sse2.psrli.q",
  "llvm.x86.sse2.psrli.w",
  "llvm.x86.sse2.ucomieq.sd",
  "llvm.x86.sse2.ucomige.sd",
  "llvm.x86.sse2.ucomigt.sd",
  "llvm.x86.sse2.ucomile.sd",
  "llvm.x86.sse2.ucomilt.sd",
  "llvm.x86.sse2.ucomineq.sd",
  "llvm.x86.sse3.addsub.pd",
  "llvm.x86.sse3.addsub.ps",
  "llvm.x86.sse3.hadd.pd",
  "llvm.x86.sse3.hadd.ps",
  "llvm.x86.sse3.hsub.pd",
  "llvm.x86.sse3.hsub.ps",
  "llvm.x86.sse3.ldu.dq",
  "llvm.x86.sse3.monitor",
  "llvm.x86.sse3.mwait",
  "llvm.x86.sse41.blendvpd",
  "llvm.x86.sse41.blendvps",
  "llvm.x86.sse41.dppd",
  "llvm.x86.sse41.dpps",
  "llvm.x86.sse41.insertps",
  "llvm.x86.sse41.mpsadbw",
  "llvm.x86.sse41.packusdw",
  "llvm.x86.sse41.pblendvb",
  "llvm.x86.sse41.phminposuw",
  "llvm.x86.sse41.ptestc",
  "llvm.x86.sse41.ptestnzc",
  "llvm.x86.sse41.ptestz",
  "llvm.x86.sse41.round.pd",
  "llvm.x86.sse41.round.ps",
  "llvm.x86.sse41.round.sd",
  "llvm.x86.sse41.round.ss",
  "llvm.x86.sse42.crc32.32.16",
  "llvm.x86.sse42.crc32.32.32",
  "llvm.x86.sse42.crc32.32.8",
  "llvm.x86.sse42.crc32.64.64",
  "llvm.x86.sse42.pcmpestri128",
  "llvm.x86.sse42.pcmpestria128",
  "llvm.x86.sse42.pcmpestric128",
  "llvm.x86.sse42.pcmpestrio128",
  "llvm.x86.sse42.pcmpestris128",
  "llvm.x86.sse42.pcmpestriz128",
  "llvm.x86.sse42.pcmpestrm128",
  "llvm.x86.sse42.pcmpistri128",
  "llvm.x86.sse42.pcmpistria128",
  "llvm.x86.sse42.pcmpistric128",
  "llvm.x86.sse42.pcmpistrio128",
  "llvm.x86.sse42.pcmpistris128",
  "llvm.x86.sse42.pcmpistriz128",
  "llvm.x86.sse42.pcmpistrm128",
  "llvm.x86.sse4a.extrq",
  "llvm.x86.sse4a.extrqi",
  "llvm.x86.sse4a.insertq",
  "llvm.x86.sse4a.insertqi",
  "llvm.x86.ssse3.pabs.b",
  "llvm.x86.ssse3.pabs.d",
  "llvm.x86.ssse3.pabs.w",
  "llvm.x86.ssse3.phadd.d",
  "llvm.x86.ssse3.phadd.d.128",
  "llvm.x86.ssse3.phadd.sw",
  "llvm.x86.ssse3.phadd.sw.128",
  "llvm.x86.ssse3.phadd.w",
  "llvm.x86.ssse3.phadd.w.128",
  "llvm.x86.ssse3.phsub.d",
  "llvm.x86.ssse3.phsub.d.128",
  "llvm.x86.ssse3.phsub.sw",
  "llvm.x86.ssse3.phsub.sw.128",
  "llvm.x86.ssse3.phsub.w",
  "llvm.x86.ssse3.phsub.w.128",
  "llvm.x86.ssse3.pmadd.ub.sw",
  "llvm.x86.ssse3.pmadd.ub.sw.128",
  "llvm.x86.ssse3.pmul.hr.sw",
  "llvm.x86.ssse3.pmul.hr.sw.128",
  "llvm.x86.ssse3.pshuf.b",
  "llvm.x86.ssse3.pshuf.b.128",
  "llvm.x86.ssse3.psign.b",
  "llvm.x86.ssse3.psign.b.128",
  "llvm.x86.ssse3.psign.d",
  "llvm.x86.ssse3.psign.d.128",
  "llvm.x86.ssse3.psign.w",
  "llvm.x86.ssse3.psign.w.128",
  "llvm.x86.sttilecfg",
  "llvm.x86.stui",
  "llvm.x86.subborrow.32",
  "llvm.x86.subborrow.64",
  "llvm.x86.tbm.bextri.u32",
  "llvm.x86.tbm.bextri.u64",
  "llvm.x86.tcmmimfp16ps",
  "llvm.x86.tcmmimfp16ps.internal",
  "llvm.x86.tcmmrlfp16ps",
  "llvm.x86.tcmmrlfp16ps.internal",
  "llvm.x86.tdpbf16ps",
  "llvm.x86.tdpbf16ps.internal",
  "llvm.x86.tdpbssd",
  "llvm.x86.tdpbssd.internal",
  "llvm.x86.tdpbsud",
  "llvm.x86.tdpbsud.internal",
  "llvm.x86.tdpbusd",
  "llvm.x86.tdpbusd.internal",
  "llvm.x86.tdpbuud",
  "llvm.x86.tdpbuud.internal",
  "llvm.x86.tdpfp16ps",
  "llvm.x86.tdpfp16ps.internal",
  "llvm.x86.testui",
  "llvm.x86.tileloadd64",
  "llvm.x86.tileloadd64.internal",
  "llvm.x86.tileloaddt164",
  "llvm.x86.tileloaddt164.internal",
  "llvm.x86.tilerelease",
  "llvm.x86.tilestored64",
  "llvm.x86.tilestored64.internal",
  "llvm.x86.tilezero",
  "llvm.x86.tilezero.internal",
  "llvm.x86.tpause",
  "llvm.x86.umonitor",
  "llvm.x86.umwait",
  "llvm.x86.vbcstnebf162ps128",
  "llvm.x86.vbcstnebf162ps256",
  "llvm.x86.vbcstnesh2ps128",
  "llvm.x86.vbcstnesh2ps256",
  "llvm.x86.vcvtneebf162ps128",
  "llvm.x86.vcvtneebf162ps256",
  "llvm.x86.vcvtneeph2ps128",
  "llvm.x86.vcvtneeph2ps256",
  "llvm.x86.vcvtneobf162ps128",
  "llvm.x86.vcvtneobf162ps256",
  "llvm.x86.vcvtneoph2ps128",
  "llvm.x86.vcvtneoph2ps256",
  "llvm.x86.vcvtneps2bf16128",
  "llvm.x86.vcvtneps2bf16256",
  "llvm.x86.vcvtps2ph.128",
  "llvm.x86.vcvtps2ph.256",
  "llvm.x86.vgf2p8affineinvqb.128",
  "llvm.x86.vgf2p8affineinvqb.256",
  "llvm.x86.vgf2p8affineinvqb.512",
  "llvm.x86.vgf2p8affineqb.128",
  "llvm.x86.vgf2p8affineqb.256",
  "llvm.x86.vgf2p8affineqb.512",
  "llvm.x86.vgf2p8mulb.128",
  "llvm.x86.vgf2p8mulb.256",
  "llvm.x86.vgf2p8mulb.512",
  "llvm.x86.vsha512msg1",
  "llvm.x86.vsha512msg2",
  "llvm.x86.vsha512rnds2",
  "llvm.x86.vsm3msg1",
  "llvm.x86.vsm3msg2",
  "llvm.x86.vsm3rnds2",
  "llvm.x86.vsm4key4128",
  "llvm.x86.vsm4key4256",
  "llvm.x86.vsm4rnds4128",
  "llvm.x86.vsm4rnds4256",
  "llvm.x86.wbinvd",
  "llvm.x86.wbnoinvd",
  "llvm.x86.wrfsbase.32",
  "llvm.x86.wrfsbase.64",
  "llvm.x86.wrgsbase.32",
  "llvm.x86.wrgsbase.64",
  "llvm.x86.wrpkru",
  "llvm.x86.wrssd",
  "llvm.x86.wrssq",
  "llvm.x86.wrussd",
  "llvm.x86.wrussq",
  "llvm.x86.xabort",
  "llvm.x86.xbegin",
  "llvm.x86.xend",
  "llvm.x86.xgetbv",
  "llvm.x86.xop.vfrcz.pd",
  "llvm.x86.xop.vfrcz.pd.256",
  "llvm.x86.xop.vfrcz.ps",
  "llvm.x86.xop.vfrcz.ps.256",
  "llvm.x86.xop.vfrcz.sd",
  "llvm.x86.xop.vfrcz.ss",
  "llvm.x86.xop.vpermil2pd",
  "llvm.x86.xop.vpermil2pd.256",
  "llvm.x86.xop.vpermil2ps",
  "llvm.x86.xop.vpermil2ps.256",
  "llvm.x86.xop.vphaddbd",
  "llvm.x86.xop.vphaddbq",
  "llvm.x86.xop.vphaddbw",
  "llvm.x86.xop.vphadddq",
  "llvm.x86.xop.vphaddubd",
  "llvm.x86.xop.vphaddubq",
  "llvm.x86.xop.vphaddubw",
  "llvm.x86.xop.vphaddudq",
  "llvm.x86.xop.vphadduwd",
  "llvm.x86.xop.vphadduwq",
  "llvm.x86.xop.vphaddwd",
  "llvm.x86.xop.vphaddwq",
  "llvm.x86.xop.vphsubbw",
  "llvm.x86.xop.vphsubdq",
  "llvm.x86.xop.vphsubwd",
  "llvm.x86.xop.vpmacsdd",
  "llvm.x86.xop.vpmacsdqh",
  "llvm.x86.xop.vpmacsdql",
  "llvm.x86.xop.vpmacssdd",
  "llvm.x86.xop.vpmacssdqh",
  "llvm.x86.xop.vpmacssdql",
  "llvm.x86.xop.vpmacsswd",
  "llvm.x86.xop.vpmacssww",
  "llvm.x86.xop.vpmacswd",
  "llvm.x86.xop.vpmacsww",
  "llvm.x86.xop.vpmadcsswd",
  "llvm.x86.xop.vpmadcswd",
  "llvm.x86.xop.vpperm",
  "llvm.x86.xop.vpshab",
  "llvm.x86.xop.vpshad",
  "llvm.x86.xop.vpshaq",
  "llvm.x86.xop.vpshaw",
  "llvm.x86.xop.vpshlb",
  "llvm.x86.xop.vpshld",
  "llvm.x86.xop.vpshlq",
  "llvm.x86.xop.vpshlw",
  "llvm.x86.xresldtrk",
  "llvm.x86.xrstor",
  "llvm.x86.xrstor64",
  "llvm.x86.xrstors",
  "llvm.x86.xrstors64",
  "llvm.x86.xsave",
  "llvm.x86.xsave64",
  "llvm.x86.xsavec",
  "llvm.x86.xsavec64",
  "llvm.x86.xsaveopt",
  "llvm.x86.xsaveopt64",
  "llvm.x86.xsaves",
  "llvm.x86.xsaves64",
  "llvm.x86.xsetbv",
  "llvm.x86.xsusldtrk",
  "llvm.x86.xtest",
  "llvm.xcore.bitrev",
  "llvm.xcore.checkevent",
  "llvm.xcore.chkct",
  "llvm.xcore.clre",
  "llvm.xcore.clrpt",
  "llvm.xcore.clrsr",
  "llvm.xcore.crc32",
  "llvm.xcore.crc8",
  "llvm.xcore.edu",
  "llvm.xcore.eeu",
  "llvm.xcore.endin",
  "llvm.xcore.freer",
  "llvm.xcore.geted",
  "llvm.xcore.getet",
  "llvm.xcore.getid",
  "llvm.xcore.getps",
  "llvm.xcore.getr",
  "llvm.xcore.getst",
  "llvm.xcore.getts",
  "llvm.xcore.in",
  "llvm.xcore.inct",
  "llvm.xcore.initcp",
  "llvm.xcore.initdp",
  "llvm.xcore.initlr",
  "llvm.xcore.initpc",
  "llvm.xcore.initsp",
  "llvm.xcore.inshr",
  "llvm.xcore.int",
  "llvm.xcore.mjoin",
  "llvm.xcore.msync",
  "llvm.xcore.out",
  "llvm.xcore.outct",
  "llvm.xcore.outshr",
  "llvm.xcore.outt",
  "llvm.xcore.peek",
  "llvm.xcore.setc",
  "llvm.xcore.setclk",
  "llvm.xcore.setd",
  "llvm.xcore.setev",
  "llvm.xcore.setps",
  "llvm.xcore.setpsc",
  "llvm.xcore.setpt",
  "llvm.xcore.setrdy",
  "llvm.xcore.setsr",
  "llvm.xcore.settw",
  "llvm.xcore.setv",
  "llvm.xcore.sext",
  "llvm.xcore.ssync",
  "llvm.xcore.syncr",
  "llvm.xcore.testct",
  "llvm.xcore.testwct",
  "llvm.xcore.waitevent",
  "llvm.xcore.zext",
#endif

// Intrinsic ID to overload bitset
#ifdef GET_INTRINSIC_OVERLOAD_TABLE
static const uint8_t OTable[] = {
  0 | (1<<1) | (1<<2) | (1<<4) | (1<<5),
  0 | (1<<0) | (1<<1) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<7),
  0,
  0,
  0 | (1<<7),
  0 | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0,
  0 | (1<<0),
  0 | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<7),
  0 | (1<<0) | (1<<1),
  0 | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6),
  0 | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0,
  0,
  0,
  0 | (1<<7),
  0 | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6),
  0 | (1<<0) | (1<<7),
  0 | (1<<1) | (1<<2) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4),
  0 | (1<<1) | (1<<2) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<5) | (1<<6),
  0 | (1<<0) | (1<<1) | (1<<3) | (1<<5),
  0 | (1<<1) | (1<<2) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5),
  0 | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1),
  0,
  0 | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0),
  0,
  0,
  0 | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0),
  0 | (1<<1) | (1<<4) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<4),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<7),
  0 | (1<<0) | (1<<2) | (1<<3) | (1<<7),
  0 | (1<<3) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0,
  0 | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2),
  0 | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2),
  0 | (1<<5) | (1<<7),
  0 | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6),
  0 | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5),
  0 | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<5),
  0 | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6),
  0 | (1<<6),
  0 | (1<<1) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<3),
  0 | (1<<0) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<5),
  0 | (1<<0) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6),
  0 | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<7),
  0 | (1<<2) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6),
  0 | (1<<1) | (1<<4) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<4) | (1<<5),
  0 | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<2) | (1<<5),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3),
  0 | (1<<0) | (1<<3) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<6) | (1<<7),
  0 | (1<<3) | (1<<4) | (1<<5) | (1<<6),
  0 | (1<<3) | (1<<4),
  0 | (1<<1) | (1<<2) | (1<<7),
  0 | (1<<0) | (1<<5) | (1<<6),
  0 | (1<<3) | (1<<4),
  0 | (1<<1) | (1<<2) | (1<<7),
  0 | (1<<0) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<6) | (1<<7),
  0 | (1<<3) | (1<<4) | (1<<5) | (1<<6),
  0 | (1<<3) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6),
  0 | (1<<2) | (1<<3) | (1<<4),
  0,
  0,
  0 | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5),
  0 | (1<<0) | (1<<1) | (1<<3) | (1<<4),
  0 | (1<<6),
  0 | (1<<0) | (1<<1) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<2) | (1<<3) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1),
  0 | (1<<7),
  0 | (1<<4) | (1<<5),
  0 | (1<<2) | (1<<3) | (1<<4) | (1<<5),
  0,
  0,
  0,
  0,
  0 | (1<<7),
  0 | (1<<0),
  0 | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5),
  0 | (1<<2) | (1<<3) | (1<<6),
  0,
  0 | (1<<3),
  0 | (1<<6),
  0 | (1<<4) | (1<<5) | (1<<6),
  0,
  0 | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6),
  0 | (1<<1) | (1<<2) | (1<<3) | (1<<4),
  0 | (1<<1) | (1<<3) | (1<<4) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3),
  0 | (1<<2) | (1<<6),
  0,
  0 | (1<<3) | (1<<4) | (1<<6),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<4) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<2) | (1<<4),
  0,
  0,
  0 | (1<<3),
  0 | (1<<1),
  0 | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1),
  0 | (1<<0) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4),
  0 | (1<<1) | (1<<2) | (1<<3),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3),
  0 | (1<<0) | (1<<4),
  0 | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<4) | (1<<5) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3),
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0 | (1<<1) | (1<<3),
  0,
  0,
  0,
  0 | (1<<0) | (1<<1) | (1<<3) | (1<<7),
  0 | (1<<1),
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0 | (1<<0) | (1<<1),
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0 | (1<<6) | (1<<7),
  0,
  0,
  0 | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1),
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0 | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0),
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0 | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0),
  0,
  0 | (1<<7),
  0 | (1<<0),
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0 | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2),
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0 | (1<<1),
  0,
  0 | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0 | (1<<2),
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0 | (1<<0) | (1<<1),
  0,
  0,
  0,
  0,
  0,
  0,
  0 | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0),
  0,
  0,
  0,
  0,
  0,
  0 | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4),
  0 | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0),
  0,
  0,
  0,
  0,
  0,
  0 | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4),
  0,
  0,
  0,
  0,
  0,
  0,
  0 | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6),
  0 | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2),
  0,
  0 | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2),
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0 | (1<<5) | (1<<7),
  0,
  0,
  0,
  0,
  0,
  0,
  0 | (1<<5),
  0,
  0,
  0,
  0 | (1<<1),
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0 | (1<<2),
  0 | (1<<3),
  0,
  0,
  0,
  0 | (1<<3),
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0 | (1<<0) | (1<<1),
  0,
  0 | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1),
  0,
  0 | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1),
  0 | (1<<4),
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0 | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<5),
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0 | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<4) | (1<<5),
  0 | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2),
  0 | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0,
  0 | (1<<0) | (1<<1),
  0 | (1<<6),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4),
  0,
  0,
  0,
  0,
  0,
  0,
  0 | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5),
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0 | (1<<6),
  0,
  0,
  0,
  0,
  0,
  0,
  0 | (1<<4),
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0 | (1<<0) | (1<<5),
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0 | (1<<2) | (1<<3),
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0,
  0 | (1<<3) | (1<<5),
  0 | (1<<1) | (1<<2) | (1<<3) | (1<<4),
  0 | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<0) | (1<<1) | (1<<2) | (1<<3) | (1<<4) | (1<<5) | (1<<6) | (1<<7),
  0 | (1<<1) | (1<<2) | (1<<3) | (1<<5) | (1<<6),
  0 | (1<<1) | (1<<2) | (1<<3)
};

return (OTable[id/8] & (1 << (id%8))) != 0;
#endif

// Global intrinsic function declaration type table.
#ifdef GET_INTRINSIC_GENERATOR_GLOBAL
static const unsigned IIT_Table[] = {
  0x17f1f, 0x4f, 0xee, (1U<<31) | 7012, 0x7f2f, 0x4e0, 0x10, 
  0x7f1f, 0x7f1f, (1U<<31) | 7100, (1U<<31) | 7097, (1U<<31) | 13078, 0x7f0f, 0x7f2f, 0x7f2f, 
  0xee0, (1U<<31) | 13100, 0x32f, 0x2f3, 0x7f7f2f, 0x1f, (1U<<31) | 13081, (1U<<31) | 1617, 
  (1U<<31) | 13078, (1U<<31) | 13088, 0xeee, 0xe0, 0xe, 0xee0, (1U<<31) | 10639, 0xe0, 
  0xe1, 0x1e1, (1U<<31) | 13886, 0xe, (1U<<31) | 10639, (1U<<31) | 10620, (1U<<31) | 10555, (1U<<31) | 10626, 
  (1U<<31) | 10626, 0xe, 0xee, 0xee, 0x14ee, 0xe0, (1U<<31) | 10640, 0x1f, 
  0x2ee, (1U<<31) | 418, (1U<<31) | 13955, (1U<<31) | 13872, 0x7f2f, 0x17f1f, 0x7f1f, 0x17f1f, 
  (1U<<31) | 13328, (1U<<31) | 13323, (1U<<31) | 13100, (1U<<31) | 13323, 0x0, 0x0, 0x4e, (1U<<31) | 13085, 
  (1U<<31) | 13084, 0xeee, 0xe40, 0xe50, 0x40, 0xe0, 0xe0, 0xe, 
  0xe4, 0x0, 0xe4, 0x0, 0x7f2f, 0x7f2f, 0x7f7f1f, 0x87f7f1f, 
  (1U<<31) | 13114, (1U<<31) | 13266, (1U<<31) | 13266, (1U<<31) | 13266, (1U<<31) | 13273, (1U<<31) | 13263, (1U<<31) | 13263, (1U<<31) | 13273, 
  (1U<<31) | 13114, (1U<<31) | 13282, (1U<<31) | 13273, (1U<<31) | 13282, (1U<<31) | 13140, (1U<<31) | 13134, (1U<<31) | 13134, (1U<<31) | 13316, 
  (1U<<31) | 13273, (1U<<31) | 13273, (1U<<31) | 13300, (1U<<31) | 13309, (1U<<31) | 13134, (1U<<31) | 13266, (1U<<31) | 13266, (1U<<31) | 13266, 
  (1U<<31) | 13309, (1U<<31) | 13134, (1U<<31) | 13120, (1U<<31) | 13120, (1U<<31) | 13120, (1U<<31) | 13120, (1U<<31) | 13266, (1U<<31) | 13273, 
  (1U<<31) | 13255, (1U<<31) | 13266, (1U<<31) | 13114, (1U<<31) | 13114, (1U<<31) | 13266, (1U<<31) | 13293, (1U<<31) | 13266, (1U<<31) | 13114, 
  (1U<<31) | 13293, (1U<<31) | 13079, (1U<<31) | 13079, (1U<<31) | 13079, (1U<<31) | 13872, 0xcf4f, 0x4f5, (1U<<31) | 5657, 
  (1U<<31) | 13077, (1U<<31) | 13896, 0x141f4, (1U<<31) | 13882, (1U<<31) | 13100, (1U<<31) | 13917, (1U<<31) | 13910, (1U<<31) | 13905, 
  0x3f, (1U<<31) | 1149, (1U<<31) | 9315, 0x7f3f, 0x47f7f3f, (1U<<31) | 2345, (1U<<31) | 1282, (1U<<31) | 1281, 
  0x1, 0x7f2f, 0x7f2f, 0x7f7f7f2f, 0x7f7f7f2f, 0xaf1f, 0xaf1f, (1U<<31) | 13140, 
  0x44f, (1U<<31) | 9159, 0x7f7f7f1f, 0x7f7f7f1f, 0xeee, 0xee0, 0xeee0, 0xff9f3f, 
  0x1f, 0x1f, 0x4, 0x4ee0, 0x4ee0, (1U<<31) | 13873, 0xeee0, 0x445e0, 
  0x445e0, 0x5445e0, 0x445e0, 0x4455e0, 0x4f5e0, 0x4f5e, 0xf1, (1U<<31) | 2185, 
  0x7f4f, 0x9f7f2f, 0x4f50, 0x4f50, 0xaf1f, 0xaf1f, 0x1fee, 0xe, 
  (1U<<31) | 13873, 0x4eee, 0x7f2f, 0x7f2f, 0x7f2f, 0x1f1, 0x7f7f1f, 0xaf1f, 
  0xaf1f, (1U<<31) | 120, (1U<<31) | 8753, (1U<<31) | 8720, (1U<<31) | 8732, (1U<<31) | 81, (1U<<31) | 92, 0x4419fe3f, 
  (1U<<31) | 3750, (1U<<31) | 5075, 0x447f3f, 0x7f7f2f, 0x7f7f2f, (1U<<31) | 409, (1U<<31) | 7083, (1U<<31) | 409, 
  (1U<<31) | 409, (1U<<31) | 7083, 0x19f24f0, 0x49f24f0, 0x19f24f0, 0x7f7f2f, 0x7f7f2f, 0x7f2f, 
  0xee0, 0xee0, 0xee0, 0xee0, 0xee, 0xe0, 0xe, 0xee, 
  (1U<<31) | 13873, (1U<<31) | 13873, 0xee0, 0xe0, 0xeee, 0xee, 0xee, 0xee0, 
  0xe0, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 
  0xee0, 0xeee, 0xe4, 0xe4, 0xee, 0xee, 0xee, 0x111cf1f, 
  0x40, 0x7f7f2f, 0x9f7f2f, 0x4444f0, 0x44cf4f, 0x44cf4f, 0x4cf4f, 0x54550, 
  (1U<<31) | 10846, 0x5455, 0x555, 0x545455, 0x5455, 0x555, 0x455, 0x9f7f4f, 
  (1U<<31) | 13146, (1U<<31) | 13103, (1U<<31) | 13103, 0x5, 0x0, 0x4e, 0x7f2f, 0x7f2f, 
  0x7f2f, 0x7f7f1f, (1U<<31) | 8868, 0x47f7f1f, 0x47f7f1f, 0x0, 0x0, 0x0, 
  0x0, 0x1f0, 0x1f0, 0x40, 0x0, 0x7f2f, 0x7f7f1f, 0x7f7f1f, 
  0x47f7f1f, 0x47f7f1f, (1U<<31) | 8868, 0x4f, 0x7f2f, 0x7f0f, 0x7f7f1f, 0x7f7f1f, 
  (1U<<31) | 8868, 0xe, 0xee0, 0xe0, 0xe, 0x7f1f, 0x7f4f, 0xe, 
  0x1f1, (1U<<31) | 8705, 0xe, 0x7f4f, 0x0, 0x7f2f, (1U<<31) | 13107, (1U<<31) | 13107, 
  (1U<<31) | 13146, 0x7f7f1f, (1U<<31) | 8868, 0x20, 0x47f7f1f, 0x47f7f1f, 0x7f7f1f, 0x7f7f1f, 
  0x47f7f1f, 0x47f7f1f, (1U<<31) | 8868, 0x7f7f1f, 0x7f7f1f, (1U<<31) | 8868, 0xee0, 0xe0, 
  0xe0, (1U<<31) | 10835, 0x5bf3f, 0x5bf7f3f, (1U<<31) | 1162, (1U<<31) | 1162, (1U<<31) | 1167, (1U<<31) | 1162, 
  (1U<<31) | 1162, (1U<<31) | 1162, (1U<<31) | 1162, (1U<<31) | 1167, (1U<<31) | 1162, (1U<<31) | 1162, (1U<<31) | 1162, (1U<<31) | 1162, 
  (1U<<31) | 1162, (1U<<31) | 1162, (1U<<31) | 1162, (1U<<31) | 1208, (1U<<31) | 1238, (1U<<31) | 1238, (1U<<31) | 1238, (1U<<31) | 1229, 
  (1U<<31) | 1229, (1U<<31) | 1229, (1U<<31) | 1238, (1U<<31) | 1208, (1U<<31) | 1229, (1U<<31) | 1208, (1U<<31) | 1229, (1U<<31) | 1238, 
  (1U<<31) | 1293, (1U<<31) | 1238, (1U<<31) | 1229, (1U<<31) | 1249, (1U<<31) | 1238, (1U<<31) | 1249, (1U<<31) | 1229, (1U<<31) | 1262, 
  (1U<<31) | 1262, (1U<<31) | 1262, (1U<<31) | 1262, (1U<<31) | 1238, (1U<<31) | 1249, (1U<<31) | 1249, (1U<<31) | 1238, (1U<<31) | 1198, 
  (1U<<31) | 1293, (1U<<31) | 1262, (1U<<31) | 1272, (1U<<31) | 1238, (1U<<31) | 1238, (1U<<31) | 6006, (1U<<31) | 1238, (1U<<31) | 1238, 
  (1U<<31) | 1229, (1U<<31) | 1238, (1U<<31) | 1262, (1U<<31) | 1218, (1U<<31) | 1218, (1U<<31) | 1218, (1U<<31) | 1218, (1U<<31) | 1218, 
  (1U<<31) | 1218, (1U<<31) | 1218, (1U<<31) | 1218, (1U<<31) | 1218, (1U<<31) | 1218, (1U<<31) | 1218, (1U<<31) | 1218, (1U<<31) | 1218, 
  (1U<<31) | 1229, (1U<<31) | 1229, (1U<<31) | 1229, (1U<<31) | 1229, (1U<<31) | 1197, (1U<<31) | 1238, (1U<<31) | 6006, (1U<<31) | 1262, 
  (1U<<31) | 1238, (1U<<31) | 1262, (1U<<31) | 1238, (1U<<31) | 1238, (1U<<31) | 1229, (1U<<31) | 1238, (1U<<31) | 1271, (1U<<31) | 1238, 
  (1U<<31) | 1262, (1U<<31) | 1238, (1U<<31) | 1262, (1U<<31) | 1238, (1U<<31) | 1238, (1U<<31) | 1238, (1U<<31) | 1238, (1U<<31) | 1262, 
  0x1f, (1U<<31) | 370, 0x5e0, 0x5e50, 0x5ee, 0x40, 0x0, 0x44, 
  0x54, 0x444, 0x444, 0x444, 0x444, 0x544, 0x444, 0x444, 
  0x544, 0x2c2c2c, 0x2c2c2c, 0x2c2c, 0x2c2c, 0x7f7f7f3f, 0x7f7f7f3f, 0x7f7f7f3f, 
  0x7f7f7f3f, 0x595959, 0x4a44a4a, 0x44, 0x4a44a4a, 0x4a44a4a, 0x4a4a4a4a, 0x4a4a4a, 
  0x4a4a4a4a, 0x4a4a4a4a, 0x4a4a4a, 0x4a4a4a4a, 0x59595959, 0x59595959, 0x595959, 0x59595959, 
  0x4a4a4a4a, 0x4a4a4a4a, 0x4a4a4a4a, (1U<<31) | 7212, (1U<<31) | 7212, (1U<<31) | 7212, (1U<<31) | 7212, 0x4a4a4a, 
  0x4a4a4a, 0x5595959, 0x40, 0x40, 0x84, 0x7f2f, 0x7f2f, 0x7f2f, 
  0x7f2f, 0x5, 0x5e5, 0x40, 0x5ee, 0x5e, 0x40, (1U<<31) | 10596, 
  (1U<<31) | 10607, 0x4f5, 0xeee, (1U<<31) | 10607, 0x4f5, 0x52ee, 0x7f0f, (1U<<31) | 9293, 
  0x7f7f3f, (1U<<31) | 9472, (1U<<31) | 8676, (1U<<31) | 8674, (1U<<31) | 11497, (1U<<31) | 14186, (1U<<31) | 14186, (1U<<31) | 14186, 
  0x7f3f, 0x7f7f3f, 0xffaf1f, 0xffaf1f, 0x7f7f3f, 0xbf2f, 0xaf1f, 0xaf1f, 
  0xaf1f, 0xaf1f, 0xaf1f, 0xaf1f, 0xaf1f, 0xaf1f, 0xbf3f, 0xaf1f, 
  0xaf1f, 0x7f7f2f, 0x7f7f2f, 0x7f7f3f, 0xbf2f, 0x7f7f3f, 0xbf2f, 0x7f7f2f, 
  0x7f7f2f, 0x7f7f3f, 0xbf2f, 0x7f7f3f, 0xbf2f, (1U<<31) | 11497, (1U<<31) | 11497, (1U<<31) | 11497, 
  (1U<<31) | 11497, 0x7f7f2f, 0x7f2f, 0x7f7f2f, 0x7f2f, 0x7f2f, 0x7f2f, 0x7f2f, 
  0x7f2f, 0x7f2f, 0x7f7f2f, (1U<<31) | 10326, (1U<<31) | 10316, (1U<<31) | 10304, (1U<<31) | 10326, (1U<<31) | 10372, 
  (1U<<31) | 10326, (1U<<31) | 10316, (1U<<31) | 10355, (1U<<31) | 10316, (1U<<31) | 10304, (1U<<31) | 10334, (1U<<31) | 10304, 0x7f7f3f, 
  (1U<<31) | 9305, 0x552c, (1U<<31) | 9293, (1U<<31) | 6098, (1U<<31) | 9293, 0x7f7f3f, 0xbf3f, 0xbf1f, 
  0xbf1f, 0x8f0f, 0x8f0f, 0x8f0f, (1U<<31) | 11497, 0x7f7f3f, (1U<<31) | 9300, 0x7f7f3f, 
  0x7f7f3f, 0x7f7f3f, 0xbf1f, 0x7f7f3f, 0x7f7f3f, 0xbf1f, (1U<<31) | 11497, (1U<<31) | 9305, 
  0x7f1f, 0x7f7f1f, 0x7f7f1f, 0x49f7f1f, 0x49f7f1f, (1U<<31) | 9305, 0x445, 0x7f1f, 
  0x7f7f7f1f, 0x7f7f7f1f, 0x7f7f1f, 0x49f7f1f, 0x49f7f1f, 0x7f7f1f, (1U<<31) | 6098, (1U<<31) | 6098, 
  0x7f7f1f, 0x7f7f1f, (1U<<31) | 6098, (1U<<31) | 6098, 0x7f7f1f, (1U<<31) | 9283, (1U<<31) | 9283, 0x7f7f3f, 
  0x7f7f1f, 0x7f7f1f, (1U<<31) | 6104, 0xcf7f3f0, (1U<<31) | 10426, (1U<<31) | 10446, 0xcf7f3f0, (1U<<31) | 10385, 
  (1U<<31) | 10426, (1U<<31) | 10394, (1U<<31) | 10446, (1U<<31) | 10405, (1U<<31) | 9293, 0x7f7f1f, 0x7f2c3f, 0x7f2c2c3f, 
  (1U<<31) | 8790, (1U<<31) | 8762, 0x7f2c7f3f, (1U<<31) | 8814, (1U<<31) | 8801, (1U<<31) | 8775, 0x7f7f3f, 0xbf3f, 
  0xbf1f, 0xbf1f, (1U<<31) | 11497, 0x7f7f3f, 0x7f7f3f, 0x7f7f3f, 0x7f7f3f, 0xbf1f, 
  0x7f7f3f, 0x7f7f3f, 0xbf1f, (1U<<31) | 11497, (1U<<31) | 9305, 0x7f7f1f, 0x7f7f1f, (1U<<31) | 6098, 
  0x7f7f1f, (1U<<31) | 6098, 0x7f7f1f, (1U<<31) | 9283, 0x7f3f, 0x7f7f3f, 0x7f7f1f, 0x7f3f, 
  (1U<<31) | 11497, 0x7f7f1f, (1U<<31) | 6104, (1U<<31) | 11497, 0x7f7f1f, 0x7f7f3f, 0x7f7f3f, 0x7f7f7f3f, 
  0x7f7f7f3f, 0x7f7f7f3f, 0x7f7f7f3f, 0x57f5bf3f, 0x4af1f, 0x4af1f, 0x7a3a, 0x49f2f, 
  0x49f2f, 0x3a7a, 0x47f7f3f, 0x47f7f3f, 0x4444e0, (1U<<31) | 192, (1U<<31) | 192, 0x7f7f1f, 
  0x50, 0x5e0, 0x5e0, 0x7f7f2f, 0x87, 0x7f7f3f40, (1U<<31) | 8973, (1U<<31) | 8901, 
  (1U<<31) | 9012, 0x7f3f40, (1U<<31) | 8901, 0x7f3f40, (1U<<31) | 8901, (1U<<31) | 1091, (1U<<31) | 1091, (1U<<31) | 1136, 
  (1U<<31) | 1136, 0x5, 0x5, 0x5, 0x5, (1U<<31) | 6017, (1U<<31) | 6084, 0x7f7f3f40, 
  (1U<<31) | 8973, (1U<<31) | 8901, (1U<<31) | 9012, (1U<<31) | 6017, (1U<<31) | 6084, 0x7f7f3f40, (1U<<31) | 8973, (1U<<31) | 8901, 
  (1U<<31) | 9012, 0x47f3f40, (1U<<31) | 6017, (1U<<31) | 6084, 0x7f3f40, 0x7f7f3f40, (1U<<31) | 8973, (1U<<31) | 8901, 
  (1U<<31) | 9012, (1U<<31) | 6017, (1U<<31) | 6084, 0x7f7f3f40, (1U<<31) | 8973, (1U<<31) | 8901, (1U<<31) | 9012, 0x47f3f40, 
  (1U<<31) | 6017, (1U<<31) | 6084, 0x7f3f40, 0x7f7f3f40, (1U<<31) | 8973, (1U<<31) | 8901, (1U<<31) | 9012, (1U<<31) | 6017, 
  0x5, (1U<<31) | 5499, (1U<<31) | 5499, (1U<<31) | 5475, (1U<<31) | 5475, (1U<<31) | 5491, (1U<<31) | 5491, (1U<<31) | 5507, 
  (1U<<31) | 5507, (1U<<31) | 5483, (1U<<31) | 5483, 0xe40, (1U<<31) | 1136, (1U<<31) | 1136, (1U<<31) | 1136, (1U<<31) | 1136, 
  (1U<<31) | 5208, (1U<<31) | 5216, (1U<<31) | 2358, (1U<<31) | 5208, (1U<<31) | 5216, (1U<<31) | 2358, (1U<<31) | 5999, (1U<<31) | 6062, 
  (1U<<31) | 2358, (1U<<31) | 2358, (1U<<31) | 6017, (1U<<31) | 6084, (1U<<31) | 6017, (1U<<31) | 6084, 0x7f7f3f40, (1U<<31) | 8973, 
  0x7f7f3f40, (1U<<31) | 8973, (1U<<31) | 8901, (1U<<31) | 9012, (1U<<31) | 8901, (1U<<31) | 9012, 0x50, 0x47f3f40, 
  (1U<<31) | 6017, (1U<<31) | 6084, 0x7f3f40, 0x7f7f3f40, (1U<<31) | 8973, (1U<<31) | 8901, (1U<<31) | 9012, 0x47f3f40, 
  (1U<<31) | 6017, (1U<<31) | 6084, 0x7f3f40, 0x7f7f3f40, (1U<<31) | 8973, (1U<<31) | 8901, (1U<<31) | 9012, 0x47f3f40, 
  (1U<<31) | 6017, (1U<<31) | 6084, 0x7f3f40, 0x7f7f3f40, (1U<<31) | 8973, (1U<<31) | 8901, (1U<<31) | 9012, 0x47f3f40, 
  (1U<<31) | 6017, (1U<<31) | 6084, 0x7f3f40, 0x7f7f3f40, (1U<<31) | 8973, (1U<<31) | 8901, (1U<<31) | 9012, 0x47f3f40, 
  (1U<<31) | 6017, (1U<<31) | 6084, 0x7f3f40, 0x7f7f3f40, (1U<<31) | 8973, (1U<<31) | 8901, (1U<<31) | 9012, 0x47f3f40, 
  (1U<<31) | 6017, (1U<<31) | 6084, 0x7f3f40, 0x7f7f3f40, (1U<<31) | 8973, (1U<<31) | 8901, (1U<<31) | 9012, (1U<<31) | 1136, 
  (1U<<31) | 1136, (1U<<31) | 1136, (1U<<31) | 1136, (1U<<31) | 5499, (1U<<31) | 5499, (1U<<31) | 5475, (1U<<31) | 5475, (1U<<31) | 5491, 
  (1U<<31) | 5491, (1U<<31) | 5507, (1U<<31) | 5507, (1U<<31) | 5483, (1U<<31) | 5483, 0xe40, 0x7f7f3f40, (1U<<31) | 8973, 
  (1U<<31) | 8901, (1U<<31) | 9012, 0x7f3f40, (1U<<31) | 8901, 0x7f3f40, (1U<<31) | 8901, (1U<<31) | 6017, (1U<<31) | 6084, 
  0x7f7f3f40, (1U<<31) | 8973, 0x47f3f40, (1U<<31) | 6017, (1U<<31) | 6084, 0x7f7f3f40, (1U<<31) | 8973, (1U<<31) | 1136, 
  (1U<<31) | 1136, (1U<<31) | 6084, (1U<<31) | 6017, (1U<<31) | 6084, (1U<<31) | 6084, (1U<<31) | 6017, (1U<<31) | 6084, (1U<<31) | 6017, 
  (1U<<31) | 6084, 0x7f7f3f40, (1U<<31) | 8973, 0x7f7f3f40, (1U<<31) | 8973, (1U<<31) | 8901, (1U<<31) | 9012, (1U<<31) | 8901, 
  (1U<<31) | 9012, 0x47f3f40, (1U<<31) | 6017, (1U<<31) | 6084, 0x7f3f40, 0x7f7f3f40, (1U<<31) | 8973, (1U<<31) | 8901, 
  (1U<<31) | 9012, 0x47f3f40, (1U<<31) | 6017, (1U<<31) | 6084, 0x7f3f40, 0x7f7f3f40, (1U<<31) | 8973, (1U<<31) | 8901, 
  (1U<<31) | 9012, 0x47f3f40, (1U<<31) | 6017, (1U<<31) | 6084, 0x7f3f40, 0x7f7f3f40, (1U<<31) | 8973, (1U<<31) | 8901, 
  (1U<<31) | 9012, 0x47f3f40, (1U<<31) | 6017, (1U<<31) | 6084, 0x7f3f40, 0x7f7f3f40, (1U<<31) | 8973, (1U<<31) | 8901, 
  (1U<<31) | 9012, 0x47f3f40, (1U<<31) | 6017, (1U<<31) | 6084, 0x7f3f40, 0x7f7f3f40, (1U<<31) | 8973, (1U<<31) | 8901, 
  (1U<<31) | 9012, 0x47f3f40, (1U<<31) | 6017, (1U<<31) | 6084, 0x7f3f40, 0x7f7f3f40, (1U<<31) | 8973, (1U<<31) | 8901, 
  (1U<<31) | 9012, (1U<<31) | 1136, (1U<<31) | 1136, (1U<<31) | 1136, (1U<<31) | 1136, (1U<<31) | 6017, (1U<<31) | 6084, 0x7f7f3f40, 
  (1U<<31) | 8973, (1U<<31) | 8901, (1U<<31) | 9012, 0x47f3f40, (1U<<31) | 6017, (1U<<31) | 6084, 0x7f3f40, 0x7f7f3f40, 
  (1U<<31) | 8973, (1U<<31) | 8901, (1U<<31) | 9012, (1U<<31) | 1136, (1U<<31) | 1136, (1U<<31) | 6084, (1U<<31) | 6017, (1U<<31) | 6084, 
  (1U<<31) | 6084, 0x7f3f440, (1U<<31) | 8912, (1U<<31) | 1102, 0x7f3f440, (1U<<31) | 8912, (1U<<31) | 1102, 0x7f3f40, 
  (1U<<31) | 8901, (1U<<31) | 1102, (1U<<31) | 1102, 0x0, 0x0, 0x40, 0x545, (1U<<31) | 7251, 
  (1U<<31) | 7262, (1U<<31) | 7262, 0xee0, 0x55e0, 0xe554, 0x4f54, 0xe554, 0x4f54, 
  0xee5, (1U<<31) | 8743, 0x7f7f7f3f, 0x7f7f7f3f, (1U<<31) | 8879, (1U<<31) | 8986, (1U<<31) | 9049, (1U<<31) | 8879, 
  (1U<<31) | 8851, (1U<<31) | 8849, (1U<<31) | 8879, 0x7f7f3f, 0x7f7f3f, 0x7f7f3f, 0x7f7f3f, (1U<<31) | 978, 
  (1U<<31) | 978, (1U<<31) | 981, (1U<<31) | 981, (1U<<31) | 8879, (1U<<31) | 8879, (1U<<31) | 8879, (1U<<31) | 1118, (1U<<31) | 8879, 
  (1U<<31) | 8879, (1U<<31) | 7461, (1U<<31) | 5982, 0x7f7f7f3f, 0x7f7f3f, 0x7f7f3f, (1U<<31) | 8664, (1U<<31) | 8664, 
  (1U<<31) | 14173, (1U<<31) | 7112, (1U<<31) | 14173, (1U<<31) | 7112, (1U<<31) | 14173, (1U<<31) | 7112, (1U<<31) | 14173, 0x7f7f3f, 
  (1U<<31) | 8879, (1U<<31) | 8879, (1U<<31) | 8879, (1U<<31) | 8743, (1U<<31) | 8712, (1U<<31) | 8743, (1U<<31) | 8712, (1U<<31) | 8879, 
  (1U<<31) | 8879, (1U<<31) | 8879, 0x7f7f7f3f, 0x7f7f7f3f, 0x7f7f7f3f, 0x47f7f3f, (1U<<31) | 6134, (1U<<31) | 5282, 
  (1U<<31) | 8879, (1U<<31) | 1157, (1U<<31) | 8879, (1U<<31) | 1157, (1U<<31) | 8743, (1U<<31) | 8743, (1U<<31) | 5217, (1U<<31) | 6052, 
  (1U<<31) | 8825, (1U<<31) | 7439, (1U<<31) | 8825, (1U<<31) | 7439, (1U<<31) | 8825, (1U<<31) | 7439, (1U<<31) | 8825, (1U<<31) | 7439, 
  (1U<<31) | 8825, (1U<<31) | 7439, (1U<<31) | 7439, (1U<<31) | 7439, (1U<<31) | 7439, (1U<<31) | 7439, (1U<<31) | 8825, (1U<<31) | 7439, 
  (1U<<31) | 8743, (1U<<31) | 1126, 0x45, 0x45, 0x45, 0x7f3f5, (1U<<31) | 7161, (1U<<31) | 7161, 
  (1U<<31) | 7161, (1U<<31) | 7161, 0x45, (1U<<31) | 8712, (1U<<31) | 349, (1U<<31) | 0, (1U<<31) | 9322, (1U<<31) | 9332, 
  0x57f3f, (1U<<31) | 8879, (1U<<31) | 8879, (1U<<31) | 8879, 0x7f7f7f3f, 0x7f7f7f3f, 0x7f7f7f3f, (1U<<31) | 1118, 
  0x47f7f3f, (1U<<31) | 8879, (1U<<31) | 8879, (1U<<31) | 8743, (1U<<31) | 8825, (1U<<31) | 8825, (1U<<31) | 8879, (1U<<31) | 8879, 
  (1U<<31) | 1157, (1U<<31) | 8879, (1U<<31) | 1118, (1U<<31) | 6006, 0x7f7f7f3f, (1U<<31) | 8998, (1U<<31) | 9069, (1U<<31) | 6037, 
  (1U<<31) | 5217, (1U<<31) | 8825, (1U<<31) | 8825, (1U<<31) | 8825, (1U<<31) | 8825, (1U<<31) | 8825, (1U<<31) | 10108, (1U<<31) | 8651, 
  (1U<<31) | 8638, (1U<<31) | 9531, (1U<<31) | 7779, (1U<<31) | 9544, (1U<<31) | 7753, (1U<<31) | 8625, (1U<<31) | 8851, (1U<<31) | 7779, 
  (1U<<31) | 8625, (1U<<31) | 8851, (1U<<31) | 8651, (1U<<31) | 8638, (1U<<31) | 9544, (1U<<31) | 9444, (1U<<31) | 9454, (1U<<31) | 9444, 
  (1U<<31) | 9454, (1U<<31) | 9544, (1U<<31) | 9544, (1U<<31) | 10118, (1U<<31) | 7766, (1U<<31) | 9518, (1U<<31) | 7740, (1U<<31) | 8612, 
  (1U<<31) | 10118, (1U<<31) | 7766, (1U<<31) | 9518, (1U<<31) | 7740, (1U<<31) | 8612, (1U<<31) | 8879, (1U<<31) | 8879, (1U<<31) | 8879, 
  (1U<<31) | 6124, (1U<<31) | 9381, (1U<<31) | 9413, (1U<<31) | 1126, (1U<<31) | 8889, (1U<<31) | 8879, (1U<<31) | 8986, (1U<<31) | 9049, 
  (1U<<31) | 8879, (1U<<31) | 8998, (1U<<31) | 9091, (1U<<31) | 8879, (1U<<31) | 8986, (1U<<31) | 9049, (1U<<31) | 8879, (1U<<31) | 8998, 
  (1U<<31) | 9091, (1U<<31) | 8879, (1U<<31) | 1118, (1U<<31) | 8879, (1U<<31) | 1118, (1U<<31) | 8879, (1U<<31) | 8986, (1U<<31) | 9049, 
  (1U<<31) | 8879, (1U<<31) | 8998, (1U<<31) | 9091, (1U<<31) | 8879, (1U<<31) | 8986, (1U<<31) | 9049, (1U<<31) | 8879, (1U<<31) | 8998, 
  (1U<<31) | 9091, (1U<<31) | 8879, (1U<<31) | 1118, (1U<<31) | 8879, (1U<<31) | 1118, (1U<<31) | 8889, (1U<<31) | 6052, (1U<<31) | 8889, 
  (1U<<31) | 9381, (1U<<31) | 6124, (1U<<31) | 9381, (1U<<31) | 6124, (1U<<31) | 8889, (1U<<31) | 6052, (1U<<31) | 8889, (1U<<31) | 9381, 
  (1U<<31) | 6124, (1U<<31) | 9381, (1U<<31) | 6124, 0x7f7f7f3f, (1U<<31) | 8889, (1U<<31) | 8879, 0x47f7f3f, (1U<<31) | 8879, 
  (1U<<31) | 8879, (1U<<31) | 8879, (1U<<31) | 8743, (1U<<31) | 8889, (1U<<31) | 8889, (1U<<31) | 8889, (1U<<31) | 8889, (1U<<31) | 8889, 
  (1U<<31) | 8889, 0x7f3f, 0x7f7f3f, (1U<<31) | 8743, (1U<<31) | 8743, (1U<<31) | 8953, (1U<<31) | 9031, (1U<<31) | 8743, 
  (1U<<31) | 8743, (1U<<31) | 8953, (1U<<31) | 9031, (1U<<31) | 8743, (1U<<31) | 8953, (1U<<31) | 9031, (1U<<31) | 8743, (1U<<31) | 8953, 
  (1U<<31) | 9031, (1U<<31) | 8743, (1U<<31) | 8743, 0x7f3f, 0x7f7f3f, (1U<<31) | 9418, (1U<<31) | 8743, (1U<<31) | 8879, 
  (1U<<31) | 8879, (1U<<31) | 8879, 0x47f7f3f, (1U<<31) | 9428, (1U<<31) | 9428, (1U<<31) | 8879, 0x7f7f3f, (1U<<31) | 9344, 
  (1U<<31) | 9337, (1U<<31) | 1118, (1U<<31) | 1118, (1U<<31) | 10471, (1U<<31) | 7189, (1U<<31) | 7189, (1U<<31) | 7493, (1U<<31) | 2335, 
  (1U<<31) | 2335, (1U<<31) | 2335, (1U<<31) | 2335, (1U<<31) | 10651, (1U<<31) | 10671, (1U<<31) | 10471, (1U<<31) | 10471, (1U<<31) | 10488, 
  (1U<<31) | 10510, (1U<<31) | 10536, (1U<<31) | 10471, (1U<<31) | 7189, (1U<<31) | 7189, (1U<<31) | 7493, (1U<<31) | 2335, (1U<<31) | 2335, 
  (1U<<31) | 2335, (1U<<31) | 2335, (1U<<31) | 10471, (1U<<31) | 10471, (1U<<31) | 7189, (1U<<31) | 7189, (1U<<31) | 7493, (1U<<31) | 2335, 
  (1U<<31) | 10651, (1U<<31) | 10671, (1U<<31) | 8879, (1U<<31) | 8879, (1U<<31) | 7461, (1U<<31) | 8879, (1U<<31) | 8879, (1U<<31) | 7461, 
  (1U<<31) | 8889, (1U<<31) | 8825, (1U<<31) | 8889, (1U<<31) | 6052, (1U<<31) | 8889, (1U<<31) | 8889, (1U<<31) | 6052, (1U<<31) | 8889, 
  (1U<<31) | 8889, (1U<<31) | 8879, 0x47f7f3f, (1U<<31) | 8879, (1U<<31) | 8879, 0x7f7f7f3f, (1U<<31) | 8743, (1U<<31) | 8825, 
  (1U<<31) | 8879, (1U<<31) | 8743, (1U<<31) | 8879, (1U<<31) | 8879, (1U<<31) | 8879, (1U<<31) | 8879, (1U<<31) | 1118, (1U<<31) | 7156, 
  (1U<<31) | 7165, (1U<<31) | 8712, 0x7f7f3f, 0x7f7f3f, 0x7f7f3f, (1U<<31) | 8712, 0x4e3f0, (1U<<31) | 2305, 
  (1U<<31) | 5691, (1U<<31) | 2305, (1U<<31) | 2305, (1U<<31) | 2305, (1U<<31) | 5691, (1U<<31) | 2305, (1U<<31) | 2305, (1U<<31) | 2305, 
  (1U<<31) | 5691, (1U<<31) | 2305, (1U<<31) | 2305, (1U<<31) | 2305, (1U<<31) | 5691, (1U<<31) | 2305, (1U<<31) | 2305, (1U<<31) | 2287, 
  0x7f3f1, 0x7f3f1, 0x7f3f1, 0x43f, (1U<<31) | 14252, (1U<<31) | 14252, (1U<<31) | 14252, (1U<<31) | 14252, 
  (1U<<31) | 1152, (1U<<31) | 1152, (1U<<31) | 8851, (1U<<31) | 8849, (1U<<31) | 7451, (1U<<31) | 8743, (1U<<31) | 351, (1U<<31) | 358, 
  0x7f3f, (1U<<31) | 358, (1U<<31) | 358, (1U<<31) | 358, (1U<<31) | 8743, (1U<<31) | 8743, (1U<<31) | 8743, (1U<<31) | 8743, 
  (1U<<31) | 2299, (1U<<31) | 2297, (1U<<31) | 8851, (1U<<31) | 8849, 0x7f7f7f3f, (1U<<31) | 9381, (1U<<31) | 9381, (1U<<31) | 8879, 
  (1U<<31) | 8879, (1U<<31) | 9374, (1U<<31) | 9374, (1U<<31) | 9356, (1U<<31) | 9374, (1U<<31) | 9374, (1U<<31) | 9374, (1U<<31) | 1111, 
  (1U<<31) | 9367, (1U<<31) | 9367, 0x7f7f7f3f, 0x7f7f7f3f, 0x7f7f7f3f, (1U<<31) | 8998, (1U<<31) | 9069, (1U<<31) | 10108, 
  (1U<<31) | 6634, (1U<<31) | 7413, (1U<<31) | 7426, (1U<<31) | 6621, (1U<<31) | 8858, (1U<<31) | 8935, (1U<<31) | 8879, (1U<<31) | 8879, 
  (1U<<31) | 8879, (1U<<31) | 9404, (1U<<31) | 6134, (1U<<31) | 6124, (1U<<31) | 9381, (1U<<31) | 8879, (1U<<31) | 9144, (1U<<31) | 9117, 
  0x0, (1U<<31) | 8879, (1U<<31) | 2299, (1U<<31) | 2297, (1U<<31) | 8879, (1U<<31) | 8879, 0x47f7f3f, (1U<<31) | 6647, 
  (1U<<31) | 6647, (1U<<31) | 8879, (1U<<31) | 8986, (1U<<31) | 9049, (1U<<31) | 8879, (1U<<31) | 8998, (1U<<31) | 9091, (1U<<31) | 8879, 
  (1U<<31) | 1118, (1U<<31) | 8879, (1U<<31) | 8986, (1U<<31) | 9049, (1U<<31) | 8879, (1U<<31) | 8998, (1U<<31) | 9091, (1U<<31) | 8879, 
  (1U<<31) | 1118, (1U<<31) | 9381, (1U<<31) | 6124, (1U<<31) | 9381, (1U<<31) | 6124, (1U<<31) | 9381, (1U<<31) | 6124, (1U<<31) | 9381, 
  (1U<<31) | 6124, (1U<<31) | 9404, (1U<<31) | 8879, (1U<<31) | 8879, (1U<<31) | 9374, (1U<<31) | 6116, (1U<<31) | 9374, (1U<<31) | 6116, 
  (1U<<31) | 8879, (1U<<31) | 8743, (1U<<31) | 8879, 0x7f7f3f, 0x47f7f3f, (1U<<31) | 8851, (1U<<31) | 8924, (1U<<31) | 8851, 
  (1U<<31) | 8924, (1U<<31) | 8851, (1U<<31) | 8924, (1U<<31) | 8851, (1U<<31) | 8924, 0x4444, 0x4455, 0x447f3f, 
  0x4444, 0x4455, 0x447f3f, 0x4444, 0x4455, (1U<<31) | 102, 0x3f44, 0x3f55, 
  0x447f3f, 0x4444, 0x4455, (1U<<31) | 9381, (1U<<31) | 6124, (1U<<31) | 9381, (1U<<31) | 9381, (1U<<31) | 6124, 
  (1U<<31) | 9381, (1U<<31) | 6124, (1U<<31) | 9381, (1U<<31) | 9381, (1U<<31) | 6124, 0x7f7f3f, 0x47f7f3f, (1U<<31) | 8986, 
  (1U<<31) | 9049, (1U<<31) | 8998, (1U<<31) | 9091, (1U<<31) | 9374, (1U<<31) | 6116, (1U<<31) | 9374, (1U<<31) | 6116, 0x4444, 
  0x4455, 0x447f3f, 0x4444, 0x4455, 0x447f3f, 0x4444, 0x4455, (1U<<31) | 102, 
  0x3f44, 0x3f55, 0x447f3f, 0x4444, 0x4455, (1U<<31) | 8743, (1U<<31) | 5217, (1U<<31) | 6052, 
  0x7f7f7f3f, (1U<<31) | 6052, 0x7f7f7f3f, (1U<<31) | 6052, 0x7f7f3f, 0x47f7f3f, (1U<<31) | 8879, (1U<<31) | 5991, 
  (1U<<31) | 6050, (1U<<31) | 5991, (1U<<31) | 6050, (1U<<31) | 2299, (1U<<31) | 2297, (1U<<31) | 5991, (1U<<31) | 6050, (1U<<31) | 5991, 
  (1U<<31) | 6050, (1U<<31) | 2299, (1U<<31) | 2297, (1U<<31) | 8879, (1U<<31) | 5982, (1U<<31) | 2299, (1U<<31) | 2297, (1U<<31) | 2299, 
  (1U<<31) | 2297, (1U<<31) | 8879, (1U<<31) | 8879, 0x7f7f3f, (1U<<31) | 8879, (1U<<31) | 1176, (1U<<31) | 1174, (1U<<31) | 1176, 
  (1U<<31) | 1174, (1U<<31) | 8879, 0x47f7f3f, (1U<<31) | 8879, (1U<<31) | 8986, (1U<<31) | 9049, (1U<<31) | 8998, (1U<<31) | 9091, 
  (1U<<31) | 5982, 0x47f7f3f, (1U<<31) | 6110, (1U<<31) | 6110, 0x47f7f3f, (1U<<31) | 9374, (1U<<31) | 9374, (1U<<31) | 9374, 
  (1U<<31) | 9374, (1U<<31) | 9367, (1U<<31) | 9367, (1U<<31) | 10470, (1U<<31) | 10643, (1U<<31) | 10659, (1U<<31) | 7188, (1U<<31) | 7188, 
  (1U<<31) | 7492, (1U<<31) | 2334, (1U<<31) | 2334, (1U<<31) | 2334, (1U<<31) | 2334, (1U<<31) | 10478, (1U<<31) | 10498, (1U<<31) | 10522, 
  (1U<<31) | 10470, (1U<<31) | 10643, (1U<<31) | 10659, (1U<<31) | 7188, (1U<<31) | 7188, (1U<<31) | 7492, (1U<<31) | 2334, (1U<<31) | 8879, 
  (1U<<31) | 8879, (1U<<31) | 8851, (1U<<31) | 8849, (1U<<31) | 8879, (1U<<31) | 6134, (1U<<31) | 9366, (1U<<31) | 9390, (1U<<31) | 9351, 
  (1U<<31) | 9351, (1U<<31) | 8879, (1U<<31) | 8743, (1U<<31) | 8743, (1U<<31) | 8743, (1U<<31) | 9428, (1U<<31) | 9435, (1U<<31) | 9435, 
  0x7f7f3f, (1U<<31) | 355, (1U<<31) | 355, (1U<<31) | 355, 0x7f7f3f, 0x7f7f3f, (1U<<31) | 355, (1U<<31) | 355, 
  (1U<<31) | 355, 0x7f7f3f, 0x7f7f7f3f, (1U<<31) | 9381, (1U<<31) | 9381, (1U<<31) | 8879, (1U<<31) | 8879, (1U<<31) | 9374, 
  (1U<<31) | 9374, (1U<<31) | 9356, (1U<<31) | 9374, (1U<<31) | 9374, (1U<<31) | 1111, (1U<<31) | 9367, (1U<<31) | 9367, 0x7f7f7f3f, 
  (1U<<31) | 8998, (1U<<31) | 9069, (1U<<31) | 10108, (1U<<31) | 6634, (1U<<31) | 7413, (1U<<31) | 7426, (1U<<31) | 6621, (1U<<31) | 8858, 
  (1U<<31) | 8935, (1U<<31) | 8879, (1U<<31) | 8879, (1U<<31) | 8879, (1U<<31) | 9404, (1U<<31) | 6134, (1U<<31) | 6124, (1U<<31) | 9381, 
  (1U<<31) | 8879, (1U<<31) | 8879, (1U<<31) | 8879, (1U<<31) | 8879, (1U<<31) | 8986, (1U<<31) | 9049, (1U<<31) | 8879, (1U<<31) | 8998, 
  (1U<<31) | 9091, (1U<<31) | 8879, (1U<<31) | 1118, (1U<<31) | 8879, (1U<<31) | 8986, (1U<<31) | 9049, (1U<<31) | 8879, (1U<<31) | 8998, 
  (1U<<31) | 9091, (1U<<31) | 8879, (1U<<31) | 1118, (1U<<31) | 9381, (1U<<31) | 6124, (1U<<31) | 9381, (1U<<31) | 6124, (1U<<31) | 9381, 
  (1U<<31) | 6124, (1U<<31) | 9381, (1U<<31) | 6124, (1U<<31) | 9404, (1U<<31) | 8879, (1U<<31) | 8879, (1U<<31) | 9374, (1U<<31) | 6116, 
  (1U<<31) | 9374, (1U<<31) | 6116, (1U<<31) | 8879, 0x7f7f3f, (1U<<31) | 8851, (1U<<31) | 8924, (1U<<31) | 8851, (1U<<31) | 8924, 
  0x4444, 0x4455, 0x447f3f, 0x4444, 0x4455, 0x447f3f, 0x4444, 0x4455, 
  (1U<<31) | 102, 0x3f44, 0x3f55, 0x447f3f, 0x4444, 0x4455, 0x4444, 0x4455, 
  0x447f3f, 0x4444, 0x4455, 0x447f3f, 0x4444, 0x4455, (1U<<31) | 102, 0x3f44, 
  0x3f55, 0x447f3f, 0x4444, 0x4455, (1U<<31) | 8879, (1U<<31) | 5991, (1U<<31) | 6050, (1U<<31) | 5991, 
  (1U<<31) | 6050, (1U<<31) | 2299, (1U<<31) | 2297, (1U<<31) | 8879, (1U<<31) | 2299, (1U<<31) | 2297, (1U<<31) | 8879, (1U<<31) | 8879, 
  0x7f7f3f, (1U<<31) | 8879, (1U<<31) | 1176, (1U<<31) | 1174, (1U<<31) | 8743, (1U<<31) | 8879, (1U<<31) | 8879, (1U<<31) | 8986, 
  (1U<<31) | 9049, (1U<<31) | 8998, (1U<<31) | 9091, (1U<<31) | 5982, (1U<<31) | 8743, 0x47f7f3f, (1U<<31) | 9404, (1U<<31) | 6134, 
  (1U<<31) | 6110, (1U<<31) | 6110, (1U<<31) | 9404, (1U<<31) | 8879, 0x47f7f3f, (1U<<31) | 9374, (1U<<31) | 9374, (1U<<31) | 9367, 
  (1U<<31) | 9367, (1U<<31) | 9366, (1U<<31) | 9390, (1U<<31) | 9351, (1U<<31) | 9351, (1U<<31) | 8743, (1U<<31) | 8743, (1U<<31) | 8743, 
  (1U<<31) | 8953, (1U<<31) | 9031, 0x7f7f3f, (1U<<31) | 355, (1U<<31) | 355, (1U<<31) | 355, 0x7f7f3f, 0x7f7f3f, 
  (1U<<31) | 355, (1U<<31) | 355, (1U<<31) | 355, 0x7f7f3f, (1U<<31) | 8953, (1U<<31) | 9031, 0xff9f3f, (1U<<31) | 5747, 
  (1U<<31) | 5747, (1U<<31) | 5747, (1U<<31) | 5747, (1U<<31) | 7282, 0xff9f3f, (1U<<31) | 5747, (1U<<31) | 5747, (1U<<31) | 5747, 
  (1U<<31) | 5747, (1U<<31) | 7282, 0xff9f3f, (1U<<31) | 5747, (1U<<31) | 5747, (1U<<31) | 5747, (1U<<31) | 5747, (1U<<31) | 7282, 
  0xff9f3f, (1U<<31) | 5747, (1U<<31) | 5747, (1U<<31) | 5747, (1U<<31) | 5747, (1U<<31) | 7282, 0xff9f3f, (1U<<31) | 5747, 
  (1U<<31) | 5747, (1U<<31) | 5747, (1U<<31) | 5747, (1U<<31) | 7282, 0xff9f3f, (1U<<31) | 5747, (1U<<31) | 5747, (1U<<31) | 5747, 
  (1U<<31) | 5747, (1U<<31) | 7282, 0xff9f3f, (1U<<31) | 5747, (1U<<31) | 5747, (1U<<31) | 5747, (1U<<31) | 5747, (1U<<31) | 7282, 
  0xff9f3f, (1U<<31) | 5747, (1U<<31) | 5747, (1U<<31) | 5747, (1U<<31) | 5747, (1U<<31) | 7282, 0xffcf3f, 0xffcf3f, 
  0xffcf3f, 0xffcf3f, 0xffcf3f, 0xffcf3f, 0xffcf3f, 0xffcf3f, (1U<<31) | 350, 0x47f7f3f, 
  (1U<<31) | 8953, (1U<<31) | 9031, 0x7f7f3f, (1U<<31) | 355, (1U<<31) | 355, (1U<<31) | 355, 0x7f7f3f, 0x7f7f3f, 
  (1U<<31) | 355, (1U<<31) | 355, (1U<<31) | 355, 0x7f7f3f, (1U<<31) | 8953, (1U<<31) | 9031, 0x5e7f4f, 0x50, 
  0x0, 0x5, 0x5, 0x7f7f1f, 0x4444, 0x11f, (1U<<31) | 162, (1U<<31) | 162, 
  0x1444a444, (1U<<31) | 162, (1U<<31) | 172, (1U<<31) | 162, (1U<<31) | 162, (1U<<31) | 162, (1U<<31) | 162, (1U<<31) | 162, 
  (1U<<31) | 162, (1U<<31) | 162, (1U<<31) | 162, 0x11444a0f, 0x11444a2f, (1U<<31) | 31, (1U<<31) | 41, 0x0, 
  0x0, 0x0, 0x42f1, 0x7f2f, (1U<<31) | 13924, 0x7777, 0x7777, 0x7777, 
  0x7777, 0x447, 0x447, 0x14774, 0x1479, 0x1479, 0x14774, 0x4439, 
  0x4439, 0x4474, 0x7739, 0x7739, 0x7769, 0x44474, 0x44474, 0x5, 
  (1U<<31) | 7104, 0x7f7f7f2f, (1U<<31) | 250, (1U<<31) | 240, 0x441f, 0x14f4, 0x444, (1U<<31) | 5300, 
  0x14f4, (1U<<31) | 182, (1U<<31) | 1052, (1U<<31) | 182, (1U<<31) | 182, 0x440, 0x440, 0x440, 
  0x40, 0x40, 0x40, (1U<<31) | 6, (1U<<31) | 6, 0x444, 0x441f, 0x444, 
  (1U<<31) | 9742, 0x1f0, 0x0, (1U<<31) | 61, (1U<<31) | 51, (1U<<31) | 1306, 0x7f2f, 0x4ffaf1f, 
  0x777, 0x1769697, 0x339393, 0x669696, 0x1739397, (1U<<31) | 13091, 0x394f39, (1U<<31) | 13091, 
  (1U<<31) | 13091, 0x7777, 0x7f7f7f2f, 0x7f7f7f2f, 0x777, 0x7f2f, 0xaf1f, 0x7f2f, 
  0x44f4, (1U<<31) | 13091, 0x394f39, (1U<<31) | 13091, (1U<<31) | 13091, (1U<<31) | 3868, 0x4, 0x4ff9f1f, 
  (1U<<31) | 75, 0x7f11f, 0x40, (1U<<31) | 4793, (1U<<31) | 4863, (1U<<31) | 4863, (1U<<31) | 4933, (1U<<31) | 5013, 
  (1U<<31) | 4933, (1U<<31) | 4933, (1U<<31) | 4933, (1U<<31) | 4793, (1U<<31) | 4863, (1U<<31) | 4863, (1U<<31) | 4933, (1U<<31) | 5013, 
  (1U<<31) | 4933, (1U<<31) | 4933, (1U<<31) | 4933, (1U<<31) | 4815, (1U<<31) | 4889, (1U<<31) | 4889, (1U<<31) | 4963, (1U<<31) | 5047, 
  (1U<<31) | 4963, (1U<<31) | 4963, (1U<<31) | 4963, (1U<<31) | 4793, (1U<<31) | 4863, (1U<<31) | 4863, (1U<<31) | 4933, (1U<<31) | 5013, 
  (1U<<31) | 4933, (1U<<31) | 4933, (1U<<31) | 4933, (1U<<31) | 4804, (1U<<31) | 4876, (1U<<31) | 4876, (1U<<31) | 4948, (1U<<31) | 5030, 
  (1U<<31) | 4948, (1U<<31) | 4948, (1U<<31) | 4948, (1U<<31) | 4804, (1U<<31) | 4876, (1U<<31) | 4876, (1U<<31) | 4948, (1U<<31) | 5030, 
  (1U<<31) | 4948, (1U<<31) | 4948, (1U<<31) | 4948, (1U<<31) | 4793, (1U<<31) | 4863, (1U<<31) | 4863, (1U<<31) | 4933, (1U<<31) | 5013, 
  (1U<<31) | 4933, (1U<<31) | 4933, (1U<<31) | 4933, (1U<<31) | 4793, (1U<<31) | 4863, (1U<<31) | 4863, (1U<<31) | 4933, (1U<<31) | 5013, 
  (1U<<31) | 4933, (1U<<31) | 4933, (1U<<31) | 4933, (1U<<31) | 4793, (1U<<31) | 4863, (1U<<31) | 4863, (1U<<31) | 4933, (1U<<31) | 5013, 
  (1U<<31) | 4933, (1U<<31) | 4933, (1U<<31) | 4933, (1U<<31) | 4793, (1U<<31) | 4863, (1U<<31) | 4863, (1U<<31) | 4933, (1U<<31) | 5013, 
  (1U<<31) | 4933, (1U<<31) | 4933, (1U<<31) | 4933, (1U<<31) | 4793, (1U<<31) | 4863, (1U<<31) | 4863, (1U<<31) | 4933, (1U<<31) | 5013, 
  (1U<<31) | 4933, (1U<<31) | 4933, (1U<<31) | 4933, (1U<<31) | 4793, (1U<<31) | 4863, (1U<<31) | 4863, (1U<<31) | 4933, (1U<<31) | 5013, 
  (1U<<31) | 4933, (1U<<31) | 4933, (1U<<31) | 4933, (1U<<31) | 4793, (1U<<31) | 4863, (1U<<31) | 4863, (1U<<31) | 4933, (1U<<31) | 5013, 
  (1U<<31) | 4933, (1U<<31) | 4933, (1U<<31) | 4933, (1U<<31) | 4793, (1U<<31) | 4863, (1U<<31) | 4863, (1U<<31) | 4933, (1U<<31) | 5013, 
  (1U<<31) | 4933, (1U<<31) | 4933, (1U<<31) | 4933, (1U<<31) | 4793, (1U<<31) | 4863, (1U<<31) | 4863, (1U<<31) | 4933, (1U<<31) | 5013, 
  (1U<<31) | 4933, (1U<<31) | 4933, (1U<<31) | 4933, (1U<<31) | 6607, (1U<<31) | 2424, (1U<<31) | 2488, (1U<<31) | 2813, (1U<<31) | 3065, 
  (1U<<31) | 3065, (1U<<31) | 3461, (1U<<31) | 3461, (1U<<31) | 3084, (1U<<31) | 3482, (1U<<31) | 3482, (1U<<31) | 3065, (1U<<31) | 2830, 
  (1U<<31) | 3084, (1U<<31) | 3084, (1U<<31) | 2455, (1U<<31) | 2523, (1U<<31) | 2776, (1U<<31) | 3024, (1U<<31) | 3024, (1U<<31) | 3416, 
  (1U<<31) | 3416, (1U<<31) | 3044, (1U<<31) | 3438, (1U<<31) | 3438, (1U<<31) | 3024, (1U<<31) | 2794, (1U<<31) | 3044, (1U<<31) | 3044, 
  (1U<<31) | 2523, (1U<<31) | 2599, (1U<<31) | 2599, (1U<<31) | 2541, (1U<<31) | 2619, (1U<<31) | 2619, (1U<<31) | 2523, (1U<<31) | 2523, 
  (1U<<31) | 2599, (1U<<31) | 2599, (1U<<31) | 2541, (1U<<31) | 2619, (1U<<31) | 2619, (1U<<31) | 2455, (1U<<31) | 2523, (1U<<31) | 2523, 
  (1U<<31) | 2471, (1U<<31) | 2541, (1U<<31) | 2541, (1U<<31) | 2471, (1U<<31) | 2541, (1U<<31) | 2541, (1U<<31) | 2488, (1U<<31) | 2560, 
  (1U<<31) | 2560, (1U<<31) | 2505, (1U<<31) | 2579, (1U<<31) | 2579, (1U<<31) | 2488, (1U<<31) | 2488, (1U<<31) | 2560, (1U<<31) | 2560, 
  (1U<<31) | 2505, (1U<<31) | 2579, (1U<<31) | 2579, (1U<<31) | 2424, (1U<<31) | 2488, (1U<<31) | 2488, (1U<<31) | 2439, (1U<<31) | 2505, 
  (1U<<31) | 2505, (1U<<31) | 2439, (1U<<31) | 2505, (1U<<31) | 2505, (1U<<31) | 2368, (1U<<31) | 2424, (1U<<31) | 2424, (1U<<31) | 2488, 
  (1U<<31) | 2488, (1U<<31) | 2488, (1U<<31) | 4783, (1U<<31) | 4783, (1U<<31) | 4783, (1U<<31) | 4783, (1U<<31) | 4783, (1U<<31) | 4783, 
  (1U<<31) | 4783, (1U<<31) | 4783, (1U<<31) | 4772, (1U<<31) | 4838, (1U<<31) | 4838, (1U<<31) | 4904, (1U<<31) | 4980, (1U<<31) | 4904, 
  (1U<<31) | 4904, (1U<<31) | 4904, (1U<<31) | 4838, (1U<<31) | 4904, (1U<<31) | 4904, (1U<<31) | 4980, (1U<<31) | 4980, (1U<<31) | 4980, 
  (1U<<31) | 4980, (1U<<31) | 4904, (1U<<31) | 4980, (1U<<31) | 4904, (1U<<31) | 2368, (1U<<31) | 2424, (1U<<31) | 2424, (1U<<31) | 2488, 
  (1U<<31) | 2488, (1U<<31) | 2673, (1U<<31) | 2813, (1U<<31) | 2813, (1U<<31) | 3065, (1U<<31) | 3065, (1U<<31) | 2813, (1U<<31) | 3065, 
  (1U<<31) | 3065, (1U<<31) | 3461, (1U<<31) | 3461, (1U<<31) | 3461, (1U<<31) | 2830, (1U<<31) | 3084, (1U<<31) | 3084, (1U<<31) | 3482, 
  (1U<<31) | 3482, (1U<<31) | 3482, (1U<<31) | 3065, (1U<<31) | 2688, (1U<<31) | 2830, (1U<<31) | 2830, (1U<<31) | 3084, (1U<<31) | 3084, 
  (1U<<31) | 3084, (1U<<31) | 2395, (1U<<31) | 2455, (1U<<31) | 2455, (1U<<31) | 2523, (1U<<31) | 2523, (1U<<31) | 2640, (1U<<31) | 2776, 
  (1U<<31) | 2776, (1U<<31) | 3024, (1U<<31) | 3024, (1U<<31) | 2776, (1U<<31) | 3024, (1U<<31) | 3024, (1U<<31) | 3416, (1U<<31) | 3416, 
  (1U<<31) | 3416, (1U<<31) | 2794, (1U<<31) | 3044, (1U<<31) | 3044, (1U<<31) | 3438, (1U<<31) | 3438, (1U<<31) | 3438, (1U<<31) | 3024, 
  (1U<<31) | 2656, (1U<<31) | 2794, (1U<<31) | 2794, (1U<<31) | 3044, (1U<<31) | 3044, (1U<<31) | 3044, (1U<<31) | 2739, (1U<<31) | 2887, 
  (1U<<31) | 2975, (1U<<31) | 3243, (1U<<31) | 3355, (1U<<31) | 2887, (1U<<31) | 3147, (1U<<31) | 3243, (1U<<31) | 3559, (1U<<31) | 3679, 
  (1U<<31) | 3559, (1U<<31) | 2907, (1U<<31) | 3169, (1U<<31) | 3269, (1U<<31) | 3587, (1U<<31) | 3711, (1U<<31) | 3587, (1U<<31) | 3243, 
  (1U<<31) | 2757, (1U<<31) | 2907, (1U<<31) | 2999, (1U<<31) | 3269, (1U<<31) | 3385, (1U<<31) | 3269, (1U<<31) | 2455, (1U<<31) | 2523, 
  (1U<<31) | 2523, (1U<<31) | 2599, (1U<<31) | 2599, (1U<<31) | 2599, (1U<<31) | 2471, (1U<<31) | 2541, (1U<<31) | 2541, (1U<<31) | 2619, 
  (1U<<31) | 2619, (1U<<31) | 2619, (1U<<31) | 2523, (1U<<31) | 2739, (1U<<31) | 2887, (1U<<31) | 2975, (1U<<31) | 3243, (1U<<31) | 3355, 
  (1U<<31) | 2887, (1U<<31) | 3147, (1U<<31) | 3243, (1U<<31) | 3559, (1U<<31) | 3679, (1U<<31) | 3559, (1U<<31) | 2907, (1U<<31) | 3169, 
  (1U<<31) | 3269, (1U<<31) | 3587, (1U<<31) | 3711, (1U<<31) | 3587, (1U<<31) | 3243, (1U<<31) | 2757, (1U<<31) | 2907, (1U<<31) | 2999, 
  (1U<<31) | 3269, (1U<<31) | 3385, (1U<<31) | 3269, (1U<<31) | 2455, (1U<<31) | 2523, (1U<<31) | 2523, (1U<<31) | 2599, (1U<<31) | 2599, 
  (1U<<31) | 2599, (1U<<31) | 2471, (1U<<31) | 2541, (1U<<31) | 2541, (1U<<31) | 2619, (1U<<31) | 2619, (1U<<31) | 2619, (1U<<31) | 2395, 
  (1U<<31) | 2455, (1U<<31) | 2455, (1U<<31) | 2523, (1U<<31) | 2523, (1U<<31) | 2523, (1U<<31) | 2409, (1U<<31) | 2471, (1U<<31) | 2471, 
  (1U<<31) | 2541, (1U<<31) | 2541, (1U<<31) | 2541, (1U<<31) | 2409, (1U<<31) | 2471, (1U<<31) | 2471, (1U<<31) | 2541, (1U<<31) | 2541, 
  (1U<<31) | 2541, (1U<<31) | 2704, (1U<<31) | 2848, (1U<<31) | 2928, (1U<<31) | 3192, (1U<<31) | 3296, (1U<<31) | 2848, (1U<<31) | 3104, 
  (1U<<31) | 3192, (1U<<31) | 3504, (1U<<31) | 3616, (1U<<31) | 3504, (1U<<31) | 2867, (1U<<31) | 3125, (1U<<31) | 3217, (1U<<31) | 3531, 
  (1U<<31) | 3647, (1U<<31) | 3531, (1U<<31) | 3192, (1U<<31) | 2721, (1U<<31) | 2867, (1U<<31) | 2951, (1U<<31) | 3217, (1U<<31) | 3325, 
  (1U<<31) | 3217, (1U<<31) | 2424, (1U<<31) | 2488, (1U<<31) | 2488, (1U<<31) | 2560, (1U<<31) | 2560, (1U<<31) | 2560, (1U<<31) | 2439, 
  (1U<<31) | 2505, (1U<<31) | 2505, (1U<<31) | 2579, (1U<<31) | 2579, (1U<<31) | 2579, (1U<<31) | 2488, (1U<<31) | 2704, (1U<<31) | 2848, 
  (1U<<31) | 2928, (1U<<31) | 3192, (1U<<31) | 3296, (1U<<31) | 2848, (1U<<31) | 3104, (1U<<31) | 3192, (1U<<31) | 3504, (1U<<31) | 3616, 
  (1U<<31) | 3504, (1U<<31) | 2867, (1U<<31) | 3125, (1U<<31) | 3217, (1U<<31) | 3531, (1U<<31) | 3647, (1U<<31) | 3531, (1U<<31) | 3192, 
  (1U<<31) | 2721, (1U<<31) | 2867, (1U<<31) | 2951, (1U<<31) | 3217, (1U<<31) | 3325, (1U<<31) | 3217, (1U<<31) | 2424, (1U<<31) | 2488, 
  (1U<<31) | 2488, (1U<<31) | 2560, (1U<<31) | 2560, (1U<<31) | 2560, (1U<<31) | 2439, (1U<<31) | 2505, (1U<<31) | 2505, (1U<<31) | 2579, 
  (1U<<31) | 2579, (1U<<31) | 2579, (1U<<31) | 2368, (1U<<31) | 2424, (1U<<31) | 2424, (1U<<31) | 2488, (1U<<31) | 2488, (1U<<31) | 2488, 
  (1U<<31) | 2381, (1U<<31) | 2439, (1U<<31) | 2439, (1U<<31) | 2505, (1U<<31) | 2505, (1U<<31) | 2505, (1U<<31) | 2381, (1U<<31) | 2439, 
  (1U<<31) | 2439, (1U<<31) | 2505, (1U<<31) | 2505, (1U<<31) | 2505, (1U<<31) | 4782, (1U<<31) | 4850, (1U<<31) | 4850, (1U<<31) | 4918, 
  (1U<<31) | 4996, (1U<<31) | 4918, (1U<<31) | 4918, (1U<<31) | 4918, (1U<<31) | 4850, (1U<<31) | 4918, (1U<<31) | 4918, (1U<<31) | 4996, 
  (1U<<31) | 4996, (1U<<31) | 4996, (1U<<31) | 7104, (1U<<31) | 7104, 0x50, 0x440, 0x7777, 0x17777, 
  0x7777, 0x17776, 0x44447, 0x44477, 0x414477, 0x444777, 0x4144776, 0x1f1, 
  0xe1, 0xe1, (1U<<31) | 7104, 0x10, 0x47f2f, 0x40f, 0x4, 0x4447, 
  0x4444, 0x1, 0x7f2f, 0x7f2f, 0x1f1, (1U<<31) | 3825, 0x444, 0x444, 
  (1U<<31) | 4458, (1U<<31) | 4504, (1U<<31) | 4632, (1U<<31) | 4550, (1U<<31) | 4494, (1U<<31) | 4494, (1U<<31) | 4494, (1U<<31) | 4494, 
  (1U<<31) | 4562, (1U<<31) | 4608, (1U<<31) | 4528, (1U<<31) | 4538, (1U<<31) | 4446, (1U<<31) | 4598, (1U<<31) | 4598, (1U<<31) | 4598, 
  (1U<<31) | 4598, (1U<<31) | 4690, (1U<<31) | 4654, (1U<<31) | 4632, (1U<<31) | 4642, (1U<<31) | 4550, (1U<<31) | 4666, (1U<<31) | 4678, 
  (1U<<31) | 4562, (1U<<31) | 4608, (1U<<31) | 4528, (1U<<31) | 4446, (1U<<31) | 4458, (1U<<31) | 4504, (1U<<31) | 4712, 0x4448888, 
  (1U<<31) | 4258, (1U<<31) | 4280, (1U<<31) | 4322, (1U<<31) | 4344, (1U<<31) | 4354, (1U<<31) | 4322, (1U<<31) | 4258, 0x14447f1f, 
  0x47f1f, 0x5455, 0x4a454a, 0x4444, 0x444, 0x444, 0x444, 0x444, 
  0x4444, 0x1144444, 0x44, 0x1144444, 0x1, 0x5455, (1U<<31) | 7104, (1U<<31) | 4290, 
  (1U<<31) | 4290, (1U<<31) | 4310, (1U<<31) | 4290, (1U<<31) | 4300, (1U<<31) | 4300, (1U<<31) | 4300, (1U<<31) | 4290, (1U<<31) | 4290, 
  (1U<<31) | 4290, (1U<<31) | 4290, (1U<<31) | 4290, (1U<<31) | 4290, (1U<<31) | 4290, (1U<<31) | 4290, (1U<<31) | 4290, 0x4444a0f, 
  0x4444a2f, (1U<<31) | 3893, 0x4444a0f0, 0x4444a2f0, (1U<<31) | 4740, (1U<<31) | 4740, (1U<<31) | 4760, (1U<<31) | 4740, 
  (1U<<31) | 4750, (1U<<31) | 4750, (1U<<31) | 4750, (1U<<31) | 4740, (1U<<31) | 4740, (1U<<31) | 4740, (1U<<31) | 4740, (1U<<31) | 4740, 
  (1U<<31) | 4740, (1U<<31) | 4740, (1U<<31) | 4740, (1U<<31) | 4740, (1U<<31) | 4723, (1U<<31) | 4732, (1U<<31) | 3904, (1U<<31) | 4722, 
  (1U<<31) | 4731, (1U<<31) | 4214, (1U<<31) | 4213, 0x44444a0f, (1U<<31) | 4161, 0x7f2f, 0x77, 0x44, 
  0x444, (1U<<31) | 13111, 0x7f2f, 0x7f2f, 0x77, 0x0, 0x444a0f, 0x0, 
  0x0, 0x0, 0x0, 0x40, 0x4, 0x5, 0x44, 0x40, 
  0x5, 0x5, 0x440, 0x41f, 0x440, 0x40, 0x30, 0x440, 
  0x40, 0x0, 0x40, 0x4444, 0x4444, 0x4444, 0x447f1f, 0x40, 
  0x4440, 0x1439394, 0x14444, 0x14444, 0x7f7f1f, 0x7f1f, 0x7f2f, (1U<<31) | 4470, 
  (1U<<31) | 4516, (1U<<31) | 4482, (1U<<31) | 4482, (1U<<31) | 4482, (1U<<31) | 4482, (1U<<31) | 4574, (1U<<31) | 4620, (1U<<31) | 4586, 
  (1U<<31) | 4586, (1U<<31) | 4586, (1U<<31) | 4586, (1U<<31) | 4268, (1U<<31) | 4332, 0x7f0f, 0x7f2f, 0x7f0f, 
  0x7f0f, (1U<<31) | 4171, (1U<<31) | 4171, (1U<<31) | 4193, (1U<<31) | 4171, (1U<<31) | 4182, (1U<<31) | 4182, (1U<<31) | 4182, 
  (1U<<31) | 4171, (1U<<31) | 4171, (1U<<31) | 4171, (1U<<31) | 4171, (1U<<31) | 4171, (1U<<31) | 4171, (1U<<31) | 4171, (1U<<31) | 4171, 
  (1U<<31) | 4171, 0x44444a0f, 0x44444a0f, (1U<<31) | 3915, (1U<<31) | 4161, (1U<<31) | 4161, (1U<<31) | 4223, (1U<<31) | 4223, 
  (1U<<31) | 4245, (1U<<31) | 4223, (1U<<31) | 4234, (1U<<31) | 4234, (1U<<31) | 4234, (1U<<31) | 4223, (1U<<31) | 4223, (1U<<31) | 4223, 
  (1U<<31) | 4223, (1U<<31) | 4223, (1U<<31) | 4223, (1U<<31) | 4223, (1U<<31) | 4223, (1U<<31) | 4223, (1U<<31) | 4214, (1U<<31) | 4214, 
  (1U<<31) | 3927, (1U<<31) | 4213, (1U<<31) | 4213, (1U<<31) | 4151, (1U<<31) | 4150, (1U<<31) | 4119, (1U<<31) | 4118, 0x1441414, 
  0x1441414, (1U<<31) | 18, (1U<<31) | 17, 0x47f2f, 0x447f1f, 0x1439394, 0x14444, 0x14444, 
  0x0, (1U<<31) | 151, 0x0, 0x47f1f, 0x47f1f, 0x4, (1U<<31) | 196, (1U<<31) | 230, 
  0x7f3c3c2f, 0x7f6c6c2f, (1U<<31) | 206, (1U<<31) | 218, 0x4, 0x4, 0x4, 0x4, 
  0x4, 0x4, 0x7f0f, 0x10, 0x11, 0x4444, 0x7f0f, 0x444, 
  0x4444, (1U<<31) | 4364, (1U<<31) | 4142, 0x4444, 0x44444, (1U<<31) | 4206, (1U<<31) | 4109, 0x44444, 
  0x444444, (1U<<31) | 4142, (1U<<31) | 4089, 0x442f, 0x47f42f, 0x442c, (1U<<31) | 10211, 0x42c42c, 
  (1U<<31) | 10211, 0x47f42f, 0x47f7f42f, 0x42c42c, (1U<<31) | 10141, 0x42c2c42c, (1U<<31) | 10141, 0x47f7f42f, 
  (1U<<31) | 6073, 0x42c2c42c, (1U<<31) | 10128, (1U<<31) | 2154, (1U<<31) | 10128, 0x4444440, 0x4444440, 0x0, 
  0x44, 0x54, 0xe4, 0xe4, 0xe4, 0xe4, 0x444, 0x444, 
  0x444, 0x444, 0x444, 0x444, 0x40, 0x40, 0x40, 0x4, 
  0x0, 0x40, 0x40, 0x4f4, (1U<<31) | 10577, 0xe440, 0xe440, 0xe440, 
  0xe440, 0x4f4, (1U<<31) | 10577, 0x4444440, 0x4444440, 0x444440, 0x444440, 0x444444, 
  0x444444, (1U<<31) | 4206, (1U<<31) | 4206, (1U<<31) | 9188, 0x7fbf7f3f, (1U<<31) | 9200, 0x43f5, 0xbf43f5, 
  0x43f4, 0xbf43f4, (1U<<31) | 9200, (1U<<31) | 4206, (1U<<31) | 9200, 0x7fbf7f3f, 0x7fbf7f3f, (1U<<31) | 9200, 
  (1U<<31) | 10268, (1U<<31) | 9188, (1U<<31) | 9188, (1U<<31) | 4206, (1U<<31) | 9188, 0x3f44, 0xbf3f44, 0xbf7f2f, 
  (1U<<31) | 13236, 0xbf7f2f, (1U<<31) | 13236, 0x43f44, 0xbf43f44, (1U<<31) | 9188, 0x3f44, 0xbf3f44, 
  0xbf7f2f, (1U<<31) | 13236, 0xbf7f2f, (1U<<31) | 13236, 0x43f44, 0xbf43f44, (1U<<31) | 9200, (1U<<31) | 9188, 
  (1U<<31) | 9248, (1U<<31) | 9271, 0x7fbf7f3f, 0x7fbf7f3f, (1U<<31) | 9200, (1U<<31) | 9200, 0x43f, 0x3f4, 
  0x7fbf7f3f, (1U<<31) | 9188, (1U<<31) | 9200, 0x7fbf7f3f, (1U<<31) | 9200, (1U<<31) | 9188, (1U<<31) | 9188, (1U<<31) | 9188, 
  (1U<<31) | 9178, (1U<<31) | 9167, 0x444, (1U<<31) | 4142, 0x444, (1U<<31) | 4206, 0x444, (1U<<31) | 4206, 
  (1U<<31) | 9200, 0x444, (1U<<31) | 4142, 0x444, (1U<<31) | 4206, 0x444, (1U<<31) | 4206, 0x7f3f444, 
  (1U<<31) | 10249, 0x47f7f3f, (1U<<31) | 6027, (1U<<31) | 10230, 0x47f3f, (1U<<31) | 10220, 0x7f7f443f, (1U<<31) | 10291, 
  0x7f3f, (1U<<31) | 8963, (1U<<31) | 10279, 0x7f7f43f, (1U<<31) | 10279, 0x41b, 0x41a, 0x419, 
  0x41c, 0x4bf43f, (1U<<31) | 13195, (1U<<31) | 9261, 0x47a6b6b, (1U<<31) | 280, 0x46b7a, (1U<<31) | 270, 
  0xbf43f, (1U<<31) | 13245, 0xbf43f, (1U<<31) | 13245, 0xbf43f, (1U<<31) | 13245, 0xbf43f, (1U<<31) | 13245, 
  (1U<<31) | 3861, (1U<<31) | 10200, (1U<<31) | 3885, (1U<<31) | 10163, 0x47f7f3f, 0x47f7f3f, (1U<<31) | 3861, (1U<<31) | 10200, 
  (1U<<31) | 3885, (1U<<31) | 10163, (1U<<31) | 10418, (1U<<31) | 10458, 0x4bf3f, (1U<<31) | 13177, (1U<<31) | 7003, (1U<<31) | 13206, 
  (1U<<31) | 5085, (1U<<31) | 13847, (1U<<31) | 10259, (1U<<31) | 10259, (1U<<31) | 10259, (1U<<31) | 10259, (1U<<31) | 10220, (1U<<31) | 10220, 
  (1U<<31) | 8839, (1U<<31) | 10247, (1U<<31) | 8836, (1U<<31) | 10244, (1U<<31) | 9237, (1U<<31) | 13185, 0x47f7f3f, 0x44ffbf3f, 
  0x4ffbf3f, (1U<<31) | 4700, (1U<<31) | 10175, 0x47f7f3f, (1U<<31) | 10220, 0x47f7f3f, (1U<<31) | 10220, 0x7f7f3f, 
  0x4ffbf3f, (1U<<31) | 9271, (1U<<31) | 4828, (1U<<31) | 13165, 0x47f7f3f, (1U<<31) | 10220, 0x47f7f3f, (1U<<31) | 10220, 
  0x7f7f3f, 0x447f3f, (1U<<31) | 9167, 0x47f3f, (1U<<31) | 9178, 0xbf3f, (1U<<31) | 9178, 0x47f7f3f, 
  0x7fbf7f3f, 0x7fbf7f3f, 0x7f3f, 0x7fbf7f3f, 0x7fbf7f3f, 0x7fbf7f3f, 0x7fbf7f3f, (1U<<31) | 8836, 
  (1U<<31) | 10244, 0x47f7f3f, 0x447f3f, (1U<<31) | 9167, (1U<<31) | 6027, (1U<<31) | 10230, 0x44447f3f, (1U<<31) | 10152, 
  (1U<<31) | 4828, (1U<<31) | 9223, (1U<<31) | 5199, (1U<<31) | 10189, 0x444bf3f, (1U<<31) | 9211, (1U<<31) | 4129, (1U<<31) | 13150, 
  0x47f7f3f, (1U<<31) | 10220, 0x47f7f3f, (1U<<31) | 10220, 0x4ffbf4f0, (1U<<31) | 7033, 0xbf43f0, (1U<<31) | 13217, 
  0xbf47f3f, (1U<<31) | 13226, (1U<<31) | 5663, (1U<<31) | 13859, 0x2c2c2c, 0x2c2c2c, 0x2c2c, 0x2c2c, 
  (1U<<31) | 11497, (1U<<31) | 14186, (1U<<31) | 14186, (1U<<31) | 14186, (1U<<31) | 11497, 0x4a44a4a, 0x44, 0x4a44a4a, 
  0x4a44a4a, 0x4a4a4a4a, 0x4a4a4a, 0x4a4a4a4a, 0x4a4a4a4a, 0x4a4a4a, 0x4a4a4a4a, (1U<<31) | 11497, 
  (1U<<31) | 11497, (1U<<31) | 11497, (1U<<31) | 11497, (1U<<31) | 11497, 0x7f7f3f, 0x7f7f3f, 0x7f3f, 0xffbf3f, 
  0xffbf3f, 0x7f7f7f3f, 0x7f7f3f, 0x7f7f3f, 0x7f3f, 0xbf3f, 0xbf3f, (1U<<31) | 9472, 
  0x7a3f, 0x4af1f, 0x4af1f, 0x7a3a, 0x49f2f, 0x49f2f, 0x3a7a, 0xbf3f, 
  0xbf3f, 0xbf3f, 0xbf3f, 0xbf3f, 0xbf3f, 0x7f7f3f, 0x7f7f3f, 0x7f7f3f, 
  0x7f7f3f, 0x4cf3f, (1U<<31) | 10418, (1U<<31) | 10436, (1U<<31) | 10458, (1U<<31) | 6748, (1U<<31) | 6748, (1U<<31) | 5268, 
  (1U<<31) | 6757, (1U<<31) | 6757, (1U<<31) | 5250, (1U<<31) | 6768, (1U<<31) | 6768, (1U<<31) | 5228, 0x7f7f3f, 0x7f7f3f, 
  0x7f7f3f, 0x7f7f3f, 0x7f7f3f, 0x7f7f3f, (1U<<31) | 9305, (1U<<31) | 9305, (1U<<31) | 9305, 0x7f7f3f, 
  0xbf7f3f, 0xbf7f3f, 0x7f7f3f, 0xbf3f, 0xbf3f, 0x7f7f3f, 0x7f7f3f, 0x7f7f3f, 
  0x7f7f3f, 0x7f3f, 0x7f7f3f, (1U<<31) | 9305, (1U<<31) | 9288, (1U<<31) | 9288, (1U<<31) | 9288, 0x7f3f, 
  0x7f7f7f3f, 0x7f7f7f3f, 0x7f7f3f, (1U<<31) | 9293, (1U<<31) | 9293, (1U<<31) | 9293, 0x7f7f3f, 0x7f7f3f, 
  (1U<<31) | 9293, (1U<<31) | 9293, (1U<<31) | 9293, 0x7f7f3f, 0x7f7f3f, 0x7f7f3f, (1U<<31) | 9293, 0x7f3f, 
  0x7f7f3f, 0x7f7f3f, 0x7f7f3f, 0x7f3f, 0x7f3f, 0x7f2f, 0x7f3f, 0x7f3f, 
  0x7f3f, (1U<<31) | 9293, 0x7f7f3f, 0x7f7f3f, 0x7f3f, 0x7f7f3f, (1U<<31) | 9293, 0x7f7f7f3f, 
  0x7f7f3f, 0x7f7f3f, 0x4bf4f0, 0xffbf4f0, (1U<<31) | 12230, (1U<<31) | 12240, 0x4ffbf4f0, (1U<<31) | 5598, 
  (1U<<31) | 7022, (1U<<31) | 5608, (1U<<31) | 7033, (1U<<31) | 5620, 0x2b2b2b, 0x2b2b2b2b, (1U<<31) | 882, (1U<<31) | 880, 
  0x2b2b2b2b, (1U<<31) | 882, (1U<<31) | 880, (1U<<31) | 878, 0x444, 0x444, 0x444, 0x444, 
  0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 
  0x40, 0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 0x4444, 
  0x4444, 0x4444, 0x4444, 0x5445, 0x5445, 0x4444, 0x4444, 0x4444, 
  0x4444, 0x4444, 0x4444, 0x5445, 0x5445, 0x444, 0x444, 0x444, 
  0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 
  0x444, 0x444, 0x444, 0x444, 0x444, 0xe440, 0xe440, 0xe440, 
  0xe440, 0x4f44, 0xe444, 0x4f44, 0xe444, 0x444, 0x44, 0x444, 
  0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 
  0x40, 0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 
  0x4444, 0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 0x44, 
  0x2f7, 0x2f7, 0x545, 0x9f1f41, 0x5e5, 0x5e5, 0x5e5, 0x8f40f, 
  0x5e45, 0x54f4, 0x544, 0x555, 0x2e, 0x4, 0x44, 0x44, 
  0x44, 0x44, 0x55, 0x44, 0x444, 0x444, 0x444, 0x444, 
  0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 
  0x444, 0x444, 0x555, 0x555, 0x444, 0x545, 0x444, 0x444, 
  0x555, 0x44, 0x44, 0x444, 0x444, 0x444, 0x444, 0x445, 
  0x445, 0x444, 0x555, 0x444, 0x555, 0x444, 0x555, 0x444, 
  0x555, 0x44, 0x55, 0x44, 0x44, 0x55, 0x444, 0x444, 
  0x555, 0x54, 0x54, 0x44, 0x44, 0x44, 0x44, 0x444, 
  0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 
  0x444, 0x444, 0x444, 0x444, 0x555, 0x444, 0x444, 0x444, 
  0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 
  0x44, 0x44, 0x44, 0x45, 0x44, 0x444, 0x444, 0x55, 
  0x45, 0x44, 0x55, 0x55, 0x55, 0x55, 0x555, 0x555, 
  0x555, 0x555, 0x555, 0x555, 0x555, 0x555, 0x555, 0x555, 
  0x555, 0x555, 0x555, 0x555, 0x555, 0x555, 0x555, 0x555, 
  0x555, 0x555, 0x554, 0x554, 0x554, 0x554, 0x554, 0x554, 
  0x554, 0x554, 0x55, 0x555, 0x555, 0x555, 0x555, 0x555, 
  0x555, 0x555, 0x555, 0x555, 0x555, 0x555, 0x555, 0x555, 
  0x555, 0x555, 0x555, 0x555, 0x555, 0x555, 0x5555, 0x555, 
  0x5555, 0x555, 0x555, 0x555, 0x555, 0x555, 0x555, 0x555, 
  0x555, 0x444, 0x555, 0x44, 0x44, 0x444, 0x555, 0x445, 
  0x445, 0x544, 0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 
  0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 0x445, 0x445, 
  0x444, 0x444, 0x444, 0x444, 0x555, 0x444, 0x444, 0x444, 
  0x444, 0x444, 0x444, 0x444, 0x444, 0x454, 0x554, 0x454, 
  0x554, 0x454, 0x454, 0x454, 0x454, 0x454, 0x454, 0x454, 
  0x454, 0x4555, 0x4555, 0x4555, 0x4555, 0x4555, 0x4555, 0x4555, 
  0x4555, 0x554, 0x554, 0x444, 0x455, 0x455, 0x455, 0x44, 
  0x444, 0x444, 0x44, 0x444, 0x444, 0x444, 0x444, 0x444, 
  0x554, 0x444, 0x444, 0x444, 0x444, 0x554, 0x444, 0x444, 
  0x554, 0x444, 0x444, 0x45, 0x4444, 0x4444, 0x4444, 0x4444, 
  0x44, 0x444, 0x444, 0x44, 0x44, 0x44, 0x444, 0x5545, 
  0x444, 0x4444, 0x4444, 0x4444, 0x4444, 0x444, 0x444, 0x444, 
  0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 
  0x4444, 0x4444, 0x4444, 0x4444, 0x58, 0x57, 0x85, 0x85, 
  0x87, 0x85, 0x85, 0x84, 0x84, 0x84, 0x84, 0x75, 
  0x75, 0x78, 0x75, 0x75, 0x74, 0x74, 0x74, 0x74, 
  0x58, 0x57, 0x48, 0x47, 0x48, 0x47, 0x888, 0x484, 
  0x884, 0x884, 0x884, 0x884, 0x48, 0x48, 0x888, 0x888, 
  0x888, 0x8888, 0x8888, 0x888, 0x888, 0x777, 0x474, 0x774, 
  0x774, 0x774, 0x774, 0x777, 0x777, 0x77, 0x7777, 0x7777, 
  0x47777, 0x7777, 0x7777, 0x47, 0x47, 0x777, 0x777, 0x777, 
  0x777, (1U<<31) | 6915, (1U<<31) | 10561, (1U<<31) | 10582, (1U<<31) | 6921, (1U<<31) | 10569, (1U<<31) | 10589, (1U<<31) | 6915, 
  (1U<<31) | 10561, (1U<<31) | 10582, (1U<<31) | 6915, (1U<<31) | 10561, (1U<<31) | 10582, (1U<<31) | 6915, (1U<<31) | 10561, (1U<<31) | 10582, 
  (1U<<31) | 6915, (1U<<31) | 10561, (1U<<31) | 10582, 0xe4, 0xe5, 0x4444, 0x4444, 0x4455, 
  0x4455, 0x4455, 0x4455, 0x4455, 0x4455, 0x445, 0x445, 0x444, 
  0x444, 0x444, 0x444, 0x445, 0x445, 0x445, 0x445, 0x4455, 
  0x4455, 0x4455, 0x4455, 0x4455, 0x4455, 0x444, 0x445, 0x4455, 
  0x4455, 0x445, 0x444, 0x444, 0x444, 0x444, 0x4444, 0x4444, 
  0x4444, 0x5555, 0x5555, 0x5555, 0x5555, 0x5555, 0x5555, 0x5555, 
  0x5555, 0x5555, 0x5555, 0x5555, 0x5555, 0x5555, 0x5555, 0x5555, 
  0x5555, 0x555, 0x555, 0x555, 0x555, 0x555, 0x555, 0x555, 
  0x555, 0x555, 0x555, 0x555, 0x555, 0x555, 0x555, 0x555, 
  0x555, 0x4444, 0x4444, 0x4444, 0x4444, 0x4444, 0x4444, 0x4444, 
  0x4444, 0x4444, 0x4444, 0x4444, 0x4444, 0x4444, 0x4444, 0x4444, 
  0x4444, 0x4444, 0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 
  0x444, 0x444, 0x4444, 0x4444, 0x4444, 0x4444, 0x4444, 0x4444, 
  0x4444, 0x4444, 0x4444, 0x4444, 0x4444, 0x4444, 0x4444, 0x4444, 
  0x4444, 0x4444, 0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 
  0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 
  0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 
  0x444, 0x444, 0x444, 0x444, 0x444, 0x4455, 0x4455, 0x4455, 
  0x4455, 0x4455, 0x4455, 0x4455, 0x4455, 0x445, 0x445, 0x445, 
  0x445, 0x445, 0x445, 0x445, 0x445, 0x4455, 0x4455, 0x4455, 
  0x4455, 0x4455, 0x4455, 0x4455, 0x4455, 0x445, 0x445, 0x445, 
  0x445, 0x445, 0x445, 0x445, 0x445, 0x444, 0x444, 0x444, 
  0x4444, 0x4444, 0x4444, 0x4444, 0x4444, 0x4444, 0x4444, 0x4444, 
  0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 
  0x4444, 0x4444, 0x4444, 0x4444, 0x4444, 0x4444, 0x4444, 0x4444, 
  0x444, 0x4455, 0x4455, 0x4455, 0x4455, 0x4455, 0x4455, 0x4455, 
  0x4455, 0x445, 0x445, 0x445, 0x445, 0x445, 0x445, 0x445, 
  0x445, 0x4455, 0x4455, 0x4455, 0x4455, 0x4455, 0x4455, 0x4455, 
  0x4455, 0x444, 0x4444, 0x4444, 0x4444, 0x555, 0x555, 0x5555, 
  0x5555, 0x555, 0x555, 0x555, 0x555, 0x5555, 0x5555, 0x554, 
  0x554, 0x555, 0x555, 0x4455, 0x5555, 0x5555, 0x5555, 0x4455, 
  0x4455, 0x4455, 0x4455, 0x555, 0x555, 0x445, 0x444, 0x445, 
  0x444, 0x445, 0x445, 0x554, 0x554, 0x5555, 0x5555, 0x5555, 
  0x5555, 0x555, 0x555, 0x555, 0x555, 0x4555, 0x455, 0x454, 
  0x5555, 0x555, 0x4444, 0x4444, 0x4444, 0x4444, 0x4444, 0x454, 
  0x454, 0x454, 0x454, 0x4444, 0x4444, 0x4444, 0x4444, 0x4444, 
  0x4444, 0x4444, 0x4444, 0x4444, 0x4444, 0x4444, 0x445, 0x4455, 
  0x445, 0x4455, 0x5555, 0x5555, 0x555, 0x555, 0x5555, 0x5555, 
  0x555, 0x555, 0x4444, 0x4444, 0x4444, 0x5555, 0x5555, 0x555, 
  0x4455, 0x4455, 0x445, 0x445, 0x5555, 0x5555, 0x555, 0x555, 
  0x555, 0x555, 0x555, 0x5555, 0x555, 0x5555, 0x555, 0x5555, 
  0x555, 0x5555, 0x555, 0x5555, 0x554, 0x554, 0x554, 0x554, 
  0x554, 0x554, 0x554, 0x554, 0x4444, 0x455, 0x4555, 0x4555, 
  0x4555, 0x4555, 0x4555, 0x444, 0x4444, 0x4444, 0x4444, 0x4444, 
  0x444, 0x4444, 0x455, 0x455, 0x455, 0x4555, 0x4555, 0x4555, 
  0x4555, 0x4555, 0x444, 0x4444, 0x4444, 0x4444, 0x4444, 0x444, 
  0x455, 0x455, 0x455, 0x4555, 0x4555, 0x4555, 0x4555, 0x455, 
  0x455, 0x444, 0x4444, 0x4444, 0x4444, 0x4444, 0x444, 0x444, 
  0x454, 0x455, 0x455, 0x455, 0x4555, 0x4555, 0x4555, 0x4555, 
  0x4555, 0x444, 0x4444, 0x4444, 0x4444, 0x4444, 0x444, 0x454, 
  0x455, 0x455, 0x44, 0x55, 0x44, 0x54, 0x44, 0x54, 
  0x44, 0x44, 0x54, 0x444, 0x444, 0x44, 0x54, 0x44, 
  0x54, 0x55, 0x4444, 0x544, 0x4455, 0x555, 0x44444, 0x5444, 
  0x44555, 0x5555, 0x55, 0x555, 0x455, 0x4555, 0x4555, 0x4555, 
  0x4555, 0x4555, 0x444, 0x4444, 0x4444, 0x4444, 0x4444, 0x455, 
  0x455, 0x455, 0x4555, 0x4555, 0x4555, 0x4555, 0x4555, 0x444, 
  0x4444, 0x4444, 0x4444, 0x4444, 0x4444, 0x455, 0x455, 0x455, 
  0x4555, 0x4555, 0x4555, 0x4555, 0x4555, 0x444, 0x4444, 0x4444, 
  0x4444, 0x4444, 0x455, 0x455, 0x444, 0x445, 0x554, 0x444, 
  0x444, 0x555, 0x555, 0x555, 0x555, 0x44ee, 0xe444ee, 0xe44ee, 
  0x45ee, 0xe544ee, 0xe54ee, 0x44ee, 0xe444ee, 0xe44ee, 0x44ee, 0xe444ee, 
  0xe44ee, 0x44ee, 0xe444ee, 0xe44ee, 0x4e4, 0x44, 0x44, 0x44444, 
  0x44444, 0x44444, 0x44444, 0x444, 0x444, 0x444, 0x444, 0x4555, 
  0x4555, 0x455, 0x455, 0x4555, 0x54, 0x54, 0x54, 0x55, 
  0x54, 0x55, 0x54, 0x55, 0x54, 0x55, 0x44, 0x45, 
  0x4555, 0x4555, 0x45, 0x45, 0x54, 0x555, 0x54, 0x555, 
  0x45, 0x45, 0x4444, 0x4444, 0x4444, 0x4444, 0x4444, 0x444, 
  0x454, 0x54, 0x4444, 0x544, 0x4455, 0x555, 0x444, 0x444, 
  0x444, 0x4444, 0x4444, 0x4444, 0x4444, 0x4444, 0x444, 0x5e4, 
  0x4444, 0x4444, 0x4444, 0x4455, 0x44555, 0x555, 0x555, 0x555, 
  0x555, 0x555, 0x555, 0x454, 0x454, 0x54, 0x455, 0x455, 
  0x4555, 0x4555, 0x4555, 0x4555, 0x4555, 0x444, 0x4444, 0x4444, 
  0x4444, 0x4444, 0x4444, 0x45, 0x555, 0x555, 0x44c4, 0x44d4, 
  0x4d4c, (1U<<31) | 7067, 0x4d4c, (1U<<31) | 7067, 0x44c, 0x44d, 0x44c, 0x44d, 
  0x44c, 0x44d, (1U<<31) | 375, (1U<<31) | 456, (1U<<31) | 375, (1U<<31) | 456, (1U<<31) | 377, (1U<<31) | 458, 
  (1U<<31) | 375, (1U<<31) | 456, (1U<<31) | 375, (1U<<31) | 456, (1U<<31) | 1626, (1U<<31) | 1678, (1U<<31) | 1626, (1U<<31) | 1678, 
  0xbf3f, 0xbf3f, (1U<<31) | 375, (1U<<31) | 456, (1U<<31) | 375, (1U<<31) | 456, (1U<<31) | 375, (1U<<31) | 456, 
  0x44d4d4d, (1U<<31) | 5644, (1U<<31) | 5447, (1U<<31) | 5642, 0x44d4d4d, (1U<<31) | 5644, (1U<<31) | 5447, (1U<<31) | 5642, 
  0x4e14c, 0x4e14d, (1U<<31) | 6892, (1U<<31) | 6900, (1U<<31) | 6892, (1U<<31) | 6900, 0x4e14c, 0x4e14d, 
  (1U<<31) | 6892, (1U<<31) | 6900, (1U<<31) | 6892, (1U<<31) | 6900, 0x4e14c, 0x4e14d, (1U<<31) | 6892, (1U<<31) | 6900, 
  (1U<<31) | 6892, (1U<<31) | 6900, 0x4e14c, 0x4e14d, (1U<<31) | 6892, (1U<<31) | 6900, (1U<<31) | 6892, (1U<<31) | 6900, 
  0x4c4e10, 0x4d4e10, 0x4c4e1e, 0x4d4e1e, 0x4c4e1e, 0x4d4e1e, 0x4c4e10, 0x4d4e10, 
  0x4c4e1e, 0x4d4e1e, 0x4c4e1e, 0x4d4e1e, 0x4c4e10, 0x4d4e10, 0x4c4e1e, 0x4d4e1e, 
  0x4c4e1e, 0x4d4e1e, (1U<<31) | 6741, (1U<<31) | 6885, 0x4c4e10, 0x4d4e10, 0x4c4e1e, 0x4d4e1e, 
  0x4c4e1e, 0x4d4e1e, (1U<<31) | 6741, (1U<<31) | 6885, 0x4c4e10, 0x4d4e10, 0x4c4e1e, 0x4d4e1e, 
  0x4c4e1e, 0x4d4e1e, (1U<<31) | 6741, (1U<<31) | 6885, 0x4c4e10, 0x4d4e10, 0x4c4e1e, 0x4d4e1e, 
  0x4c4e1e, 0x4d4e1e, (1U<<31) | 6741, (1U<<31) | 6885, 0x4c4c, 0x4d4d, 0x4c4c, 0x4d4d, 
  0x4c4c, 0x4d4d, 0x4c4c, 0x4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 
  0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c, 0x4d4d, 0x4c4c, 0x4d4d, 
  0x4c4c, 0x4d4d, 0x4c4c, 0x4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 
  0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 
  0x4c4c4c, 0x4d4d4d, 0x4c4c4d, (1U<<31) | 6846, 0x4c4c4d, (1U<<31) | 6846, 0x4c4c4c, 0x4d4d4d, 
  0x4c4c4c, 0x4d4d4d, 0x4d4d4d, (1U<<31) | 7072, (1U<<31) | 6703, (1U<<31) | 6816, (1U<<31) | 6703, (1U<<31) | 6816, 
  0x4c4c4c, 0x4d4d4d, 0x4d4d4d, (1U<<31) | 7072, (1U<<31) | 388, (1U<<31) | 463, (1U<<31) | 6702, (1U<<31) | 6815, 
  (1U<<31) | 400, (1U<<31) | 475, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 
  0x4d4d4d, (1U<<31) | 7072, (1U<<31) | 6703, (1U<<31) | 6816, (1U<<31) | 6703, (1U<<31) | 6816, 0x4c4c4c, 0x4d4d4d, 
  0x4d4d4d, (1U<<31) | 7072, 0x4c4c4d, (1U<<31) | 6846, 0x4c4c4d4d, (1U<<31) | 6844, 0x4c4c4d, (1U<<31) | 6846, 
  0x4c4c4d4d, (1U<<31) | 6844, 0x4c4c4c, 0x4d4d4d, 0x4d4d4d, (1U<<31) | 7072, 0x4c4c4c, 0x4d4d4d, 
  0x4c4c4c, 0x4d4d4d, 0x4d4d4d, (1U<<31) | 7072, 0x4c4c4d, (1U<<31) | 6846, 0x4c4c4d4d, (1U<<31) | 6844, 
  0x4c4c4c, 0x4d4d4d, 0x4d4d4d, (1U<<31) | 7072, 0x4c4c4c, 0x4d4d4d, 0x4d4d4d, (1U<<31) | 7072, 
  (1U<<31) | 6703, (1U<<31) | 6816, (1U<<31) | 6703, (1U<<31) | 6816, 0x4c4c4c, 0x4d4d4d, 0x4d4d4d, (1U<<31) | 7072, 
  0x44c4c4c, 0x44d4d4d, 0x44c4c4c, 0x44d4d4d, 0x4c4c4c, 0x4d4d4d, (1U<<31) | 1624, (1U<<31) | 1676, 
  (1U<<31) | 1622, (1U<<31) | 1674, (1U<<31) | 1624, (1U<<31) | 1676, (1U<<31) | 1622, (1U<<31) | 1674, (1U<<31) | 6667, (1U<<31) | 6781, 
  (1U<<31) | 6667, (1U<<31) | 6781, (1U<<31) | 5403, (1U<<31) | 5441, (1U<<31) | 5401, (1U<<31) | 5439, 0x44c4c, 0x44d4d, 
  0x44c4c4c, 0x44d4d4d, 0x4c4c4c, 0x4d4d4d, 0x44c4c, 0x44d4d, 0x44c4c4c, 0x44d4d4d, 
  0x4c4c4c, 0x4d4d4d, 0x4c4c4d4d, (1U<<31) | 6844, 0x44c4c, 0x44d4d, 0x44c4c4c, 0x44d4d4d, 
  0x44c4c4c, 0x44d4d4d, 0x44c4c4c, 0x44d4d4d, 0x44c4c4c, 0x44d4d4d, 0x44c4c4c, 0x44d4d4d, 
  0x4c4c4c, 0x4d4d4d, 0x44c4c4c, 0x44d4d4d, 0x44c4c4c, 0x44d4d4d, 0x44c4c4c, 0x44d4d4d, 
  0x44c4c4c, 0x44d4d4d, 0x4c4d4c, (1U<<31) | 6871, 0x4c4d4c, (1U<<31) | 6871, 0x4c4d4c, (1U<<31) | 6871, 
  0x4c4d4c, (1U<<31) | 6871, 0x44c4c, 0x44d4d, 0x44c4c4c, 0x44d4d4d, 0x44c4c4c, 0x44d4d4d, 
  0x44c4c4c, 0x44d4d4d, 0x44c4c4c, 0x44d4d4d, 0x44c4c4c, 0x44d4d4d, 0x44c4c4c, 0x44d4d4d, 
  0x4c4c4c, 0x4d4d4d, 0x4c4c, 0x4d4d, 0x4c4c, 0x4d4d, 0x4d4d, (1U<<31) | 7074, 
  0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 
  0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 
  0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 
  0x4c4c, 0x4d4d, 0x4c4c, 0x4d4d, 0x4c4c4d, (1U<<31) | 6846, 0x4c4c, 0x4d4d, 
  0x4c4c, 0x4d4d, 0x4c4c, 0x4d4d, 0x4d4c, (1U<<31) | 7067, 0x4c4c, 0x4d4d, 
  0x4c4c, 0x4d4d, 0x4c4c, 0x4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 
  0x4c4c, 0x4d4d, 0x4c4d, (1U<<31) | 6856, 0x4c4c, 0x4d4d, 0x4c4c4c, 0x4d4d4d, 
  0x4c4d, (1U<<31) | 6856, 0x4c4c, 0x4d4d, 0x4c4d, (1U<<31) | 6856, 0x4c4c4c, 0x4d4d4d, 
  0x4c4c, 0x4d4d, 0x4c, 0x4d, 0x4d, (1U<<31) | 7057, 0x4c4c, 0x4d4d, 
  0x4c4c4c, 0x4d4d4d, 0x4c4c, 0x4d4d, 0x44c4c4d, (1U<<31) | 5459, 0x4c4c4c, 0x4d4d4d, 
  0x4c4c4c, 0x4d4d4d, 0x4c4c4c4c, 0x4d4d4d4d, 0x44c4c, 0x44d4d, 0x44c4c4c, 0x44d4d4d, 
  0x44d4d, (1U<<31) | 5646, 0x44d4d4d, (1U<<31) | 5644, 0x44c4c, 0x44d4d, 0x44c4c4c, 0x44d4d4d, 
  0x44d4d, (1U<<31) | 5646, 0x44d4d4d, (1U<<31) | 5644, 0x44d4c, (1U<<31) | 5636, 0x44d4c4c, (1U<<31) | 5634, 
  0x44c4c, 0x44d4d, 0x44c4c4c, 0x44d4d4d, 0x44d4c, (1U<<31) | 5636, 0x44d4c4c, (1U<<31) | 5634, 
  0x44c4c, 0x44d4d, 0x44c4c4c, 0x44d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c4c, 0x4d4d4d4d, 
  0x44d4d, (1U<<31) | 5646, 0x44d4d4d, (1U<<31) | 5644, (1U<<31) | 6695, (1U<<31) | 6808, (1U<<31) | 6693, (1U<<31) | 6806, 
  (1U<<31) | 6693, (1U<<31) | 6806, (1U<<31) | 6693, (1U<<31) | 6806, (1U<<31) | 6695, (1U<<31) | 6808, (1U<<31) | 6693, (1U<<31) | 6806, 
  (1U<<31) | 6693, (1U<<31) | 6806, (1U<<31) | 6693, (1U<<31) | 6806, (1U<<31) | 6695, (1U<<31) | 6808, (1U<<31) | 6693, (1U<<31) | 6806, 
  (1U<<31) | 6693, (1U<<31) | 6806, (1U<<31) | 6693, (1U<<31) | 6806, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 
  0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c, 0x4d4d, 0x4c4c, 0x4d4d, 
  0x4c44e0, 0x4d44e0, (1U<<31) | 6674, (1U<<31) | 6797, 0x4d44e0, (1U<<31) | 7060, (1U<<31) | 6788, (1U<<31) | 7051, 
  0x4c44e0, 0x4d44e0, (1U<<31) | 6674, (1U<<31) | 6797, (1U<<31) | 6695, (1U<<31) | 6808, (1U<<31) | 6693, (1U<<31) | 6806, 
  (1U<<31) | 6693, (1U<<31) | 6806, (1U<<31) | 6693, (1U<<31) | 6806, (1U<<31) | 6695, (1U<<31) | 6808, (1U<<31) | 6693, (1U<<31) | 6806, 
  (1U<<31) | 6693, (1U<<31) | 6806, (1U<<31) | 6693, (1U<<31) | 6806, (1U<<31) | 6695, (1U<<31) | 6808, (1U<<31) | 6693, (1U<<31) | 6806, 
  (1U<<31) | 6693, (1U<<31) | 6806, (1U<<31) | 6693, (1U<<31) | 6806, (1U<<31) | 6695, (1U<<31) | 6808, (1U<<31) | 6693, (1U<<31) | 6806, 
  (1U<<31) | 6693, (1U<<31) | 6806, (1U<<31) | 6693, (1U<<31) | 6806, (1U<<31) | 6695, (1U<<31) | 6808, (1U<<31) | 6693, (1U<<31) | 6806, 
  (1U<<31) | 6693, (1U<<31) | 6806, (1U<<31) | 6693, (1U<<31) | 6806, (1U<<31) | 6695, (1U<<31) | 6808, (1U<<31) | 6693, (1U<<31) | 6806, 
  (1U<<31) | 6693, (1U<<31) | 6806, (1U<<31) | 6693, (1U<<31) | 6806, (1U<<31) | 6695, (1U<<31) | 6808, (1U<<31) | 6693, (1U<<31) | 6806, 
  (1U<<31) | 6693, (1U<<31) | 6806, (1U<<31) | 6693, (1U<<31) | 6806, (1U<<31) | 6695, (1U<<31) | 6808, (1U<<31) | 6693, (1U<<31) | 6806, 
  (1U<<31) | 6693, (1U<<31) | 6806, (1U<<31) | 6693, (1U<<31) | 6806, (1U<<31) | 6695, (1U<<31) | 6808, (1U<<31) | 6693, (1U<<31) | 6806, 
  (1U<<31) | 6693, (1U<<31) | 6806, (1U<<31) | 6693, (1U<<31) | 6806, 0x44c4c, 0x44d4d, 0x44c4c4c, 0x44d4d4d, 
  0x44c4c4c, 0x44d4d4d, 0x44c4c, 0x44d4d, 0x44c4c, 0x44d4d, 0x4c4c4c, 0x4d4d4d, 
  0x44c4c, 0x44d4d, 0x4c4c4c, 0x4d4d4d, 0x54c4c, 0x54d4d, 0x44c4c4c, 0x44d4d4d, 
  0x44c4c4c, 0x44d4d4d, (1U<<31) | 5419, (1U<<31) | 5447, (1U<<31) | 5419, (1U<<31) | 5447, 0x44c4c4c, 0x44d4d4d, 
  0x44c4c4d, (1U<<31) | 5459, 0x44c4c4d, (1U<<31) | 5459, (1U<<31) | 5429, (1U<<31) | 5457, (1U<<31) | 5429, (1U<<31) | 5457, 
  0x44c4c4d, (1U<<31) | 5459, (1U<<31) | 6741, (1U<<31) | 6885, (1U<<31) | 6741, (1U<<31) | 6885, (1U<<31) | 6741, (1U<<31) | 6885, 
  (1U<<31) | 6741, (1U<<31) | 6885, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 
  0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 
  0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 
  0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 
  0x4c4c4c, 0x4d4d4d, 0x44d4d, (1U<<31) | 5646, 0x44d4d4d, (1U<<31) | 5644, 0x4d4d4d, (1U<<31) | 7072, 
  0x44d4d, (1U<<31) | 5646, 0x44d4d4d, (1U<<31) | 5644, 0x4d4d4d, (1U<<31) | 7072, 0x44d4d, (1U<<31) | 5646, 
  0x44d4d4d, (1U<<31) | 5644, 0x54c4c4c, 0x54d4d4d, 0x44d4d, (1U<<31) | 5646, 0x44d4d4d, (1U<<31) | 5644, 
  0x54c4c4c, 0x54d4d4d, 0x54c4c4c, 0x54d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c4c, 0x4d4d4d4d, 
  0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 
  0x4c4c4d, (1U<<31) | 6846, 0x4c4c4d, (1U<<31) | 6846, 0x4c4c4d, (1U<<31) | 6846, 0x4c4c4c, 0x4d4d4d, 
  0x4c4c4d, (1U<<31) | 6846, 0x4c4c4d4d, (1U<<31) | 6844, 0x4c4c4d, (1U<<31) | 6846, 0x4c4c4d4d, (1U<<31) | 6844, 
  0x4c4c4c, 0x4d4d4d, 0x44c4d, (1U<<31) | 5469, 0x44c4d4d, (1U<<31) | 5467, 0x4c4c4d, (1U<<31) | 6846, 
  0x4c4c4d4d, (1U<<31) | 6844, 0x4c4c4d, (1U<<31) | 6846, 0x4c4c4d4d, (1U<<31) | 6844, 0x4c4c4c, 0x4d4d4d, 
  0x4c4c4d, (1U<<31) | 6846, 0x44c4d, (1U<<31) | 5469, 0x44c4d4d, (1U<<31) | 5467, 0x44c4d4d, (1U<<31) | 5467, 
  0x44c4c, 0x44d4d, 0x44c4c, 0x44d4d, 0x4c4c4d, (1U<<31) | 6846, 0x4c4c4d4d, (1U<<31) | 6844, 
  0x4c4c4d, (1U<<31) | 6846, 0x4c4c4d4d, (1U<<31) | 6844, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 
  0x4c4c4c4c, 0x4d4d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c4c, 0x4d4d4d4d, 0x4c4c4c, 0x4d4d4d, 
  0x4c4c4c4c, 0x4d4d4d4d, 0x44c4c, 0x44d4d, 0x44c4c4c, 0x44d4d4d, 0x4c4c4c, 0x4d4d4d, 
  0x44c4c, 0x44d4d, 0x44c4c4c, 0x44d4d4d, 0x44c4c, 0x44d4d, 0x44c4c4c, 0x44d4d4d, 
  0x44c4c, 0x44d4d, 0x44c4c4c, 0x44d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c4d4d, (1U<<31) | 6844, 
  0x4c4c4c, 0x4d4d4d, 0x4c4c4c4c, 0x4d4d4d4d, 0x4c4c4c4c, 0x4d4d4d4d, (1U<<31) | 8953, (1U<<31) | 8953, 
  0x44c4d, (1U<<31) | 5469, 0x44c4d4d, (1U<<31) | 5467, 0x4c4c4d, (1U<<31) | 6846, 0x4c4c4d4d, (1U<<31) | 6844, 
  0x44c4d, (1U<<31) | 5469, 0x44c4d4d, (1U<<31) | 5467, 0x44c4c, 0x44d4d, 0x44c4c4c, 0x44d4d4d, 
  0x4c4c4d, (1U<<31) | 6846, 0x4c4c4d4d, (1U<<31) | 6844, 0x4c4c4c, 0x4d4d4d, (1U<<31) | 8953, (1U<<31) | 8953, 
  (1U<<31) | 8953, (1U<<31) | 8953, (1U<<31) | 6703, (1U<<31) | 6816, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 
  0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c, 0x4d4d, 0x4c4c, 0x4d4d, 
  0x4c4c, 0x4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 
  0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 
  0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c, 0x4d4d, (1U<<31) | 395, (1U<<31) | 470, 
  (1U<<31) | 395, (1U<<31) | 470, (1U<<31) | 395, (1U<<31) | 470, 0x4c4c4c, 0x4d4d4d, 0x54c4d, (1U<<31) | 7224, 
  0x54c4d4d, (1U<<31) | 7222, 0x44c4c, 0x44d4d, 0x44c4c4c, 0x44d4d4d, 0x444d4d, (1U<<31) | 5068, 
  0x444d4d4d, (1U<<31) | 5066, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c4c, 0x4d4d4d4d, 0x4c4c4c, 0x4d4d4d, 
  0x4c4c4c4c, 0x4d4d4d4d, 0x44c4c, 0x44d4d, 0x44c4c4c, 0x44d4d4d, 0x54c4d, (1U<<31) | 7224, 
  0x54c4d4d, (1U<<31) | 7222, 0x444d4d, (1U<<31) | 5068, 0x444d4d4d, (1U<<31) | 5066, 0x4c4c4c, 0x4d4d4d, 
  0x4c4c4c4c, 0x4d4d4d4d, 0x44c4c, 0x44d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 
  0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 
  0x4c4c4c, 0x4d4d4d, 0x444d4d, (1U<<31) | 5068, 0x444d4d4d, (1U<<31) | 5066, 0x4c4c4c, 0x4d4d4d, 
  0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4d, (1U<<31) | 6856, 
  0x4c4c440, 0x4d4d440, 0x4c4c440, 0x4d4d440, (1U<<31) | 6721, (1U<<31) | 6834, 0x4c4d440, (1U<<31) | 6853, 
  0x4c4d440, (1U<<31) | 6853, (1U<<31) | 6731, (1U<<31) | 6861, 0x4c4c440, 0x4d4d440, 0x4c4c440, 0x4d4d440, 
  (1U<<31) | 6721, (1U<<31) | 6834, 0x4c4d, (1U<<31) | 6856, 0x4c4c4c, 0x4d4d4d, 0x4c4c, 0x4d4d, 
  0x4c4c4c, 0x4d4d4d, 0x4c4c, 0x4d4d, 0x4c4c4c, 0x4d4d4d, 0x44c4c4d, (1U<<31) | 5459, 
  0x4c4c4d, (1U<<31) | 6846, 0x4c4c4d, (1U<<31) | 6846, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 
  0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 
  0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4c4c4d, (1U<<31) | 6846, 0x4c4c4d, (1U<<31) | 6846, 
  0x4c4c4c, 0x4d4d4d, 0x4c4c4c, 0x4d4d4d, 0x4d4d4d, (1U<<31) | 7072, (1U<<31) | 6703, (1U<<31) | 6816, 
  (1U<<31) | 6703, (1U<<31) | 6816, 0x4c4c4c, 0x4d4d4d, 0x4d4d4d, (1U<<31) | 7072, (1U<<31) | 388, (1U<<31) | 463, 
  (1U<<31) | 6702, (1U<<31) | 6815, 0x4c4c4c, 0x4d4d4d, 0x4d4d4d, (1U<<31) | 7072, (1U<<31) | 6703, (1U<<31) | 6816, 
  (1U<<31) | 6703, (1U<<31) | 6816, 0x4c4c4c, 0x4d4d4d, 0x4d4d4d, (1U<<31) | 7072, 0x4c4c4d, (1U<<31) | 6846, 
  0x4c4c4d, (1U<<31) | 6846, 0x4c4c4c, 0x4d4d4d, 0x4d4d4d, (1U<<31) | 7072, 0x4c4c4c, 0x4d4d4d, 
  0x4c4c4c, 0x4d4d4d, 0x4d4d4d, (1U<<31) | 7072, 0x4c4c4d, (1U<<31) | 6846, 0x4c4c4c, 0x4d4d4d, 
  0x4d4d4d, (1U<<31) | 7072, 0x4c4c4c, 0x4d4d4d, 0x4d4d4d, (1U<<31) | 7072, (1U<<31) | 6703, (1U<<31) | 6816, 
  (1U<<31) | 6703, (1U<<31) | 6816, 0x4c4c4c, 0x4d4d4d, 0x4d4d4d, (1U<<31) | 7072, (1U<<31) | 6712, (1U<<31) | 6825, 
  0x44d4d, (1U<<31) | 5646, 0x44d4d4d, (1U<<31) | 5644, 0x44d4d, (1U<<31) | 5646, 0x44d4d4d, (1U<<31) | 5644, 
  0x44d4d, (1U<<31) | 5646, 0x44d4d4d, (1U<<31) | 5644, 0x4c4d, (1U<<31) | 6856, 0x4c4d, (1U<<31) | 6856, 
  0x4c4d4d, (1U<<31) | 6878, 0x4c4d4d, (1U<<31) | 6878, 0x4c4d, (1U<<31) | 6856, 0x4c4d, (1U<<31) | 6856, 
  0x4c4c4c, 0x4d4d4d, 0x4c4d, (1U<<31) | 6856, 0x4c4d, (1U<<31) | 6856, 0xe0, 0xe0, 
  0xe0, 0xe0, 0xe0, 0x4e0, 0x5e0, 0xee0, 0x4, 0x4, 
  0xe0, 0xe0, 0x4, 0x44eee, 0x44eee, 0x44eee, 0x44eee, 0x44eee, 
  0x44eee, 0x444ee, 0x445ee, 0x444ee, 0x444ee, 0x444ee, 0x4e0, 0xe0, 
  0x4ee0, 0x44e0, 0x550, 0x550, 0x40, 0x5550, 0x4440, 0x44, 
  0x444, 0x454, 0x444, 0x444, 0x444, 0x454, 0x444, 0x444, 
  0x45, 0x44, 0x455, 0x444, 0x4555, 0x4444, 0x40, 0x40, 
  0x44, 0x45, 0x44, 0x44, 0x440, 0x450, 0x440, 0x440, 
  0x555, 0x550, 0x4444f4, 0x5554f5, 0x55554f5, 0x55554f5, 0x4444f4, 0x5554f5, 
  0x4444f4, 0x5554f5, 0x4444f4, 0x5554f5, 0x4444f4, 0x5554f5, 0x4444f4, 0x5554f5, 
  0x55554f5, 0x44, 0x440, 0x40, 0x3939, 0x2a2a, 0x44, 0x2c2c2c, 
  0x595959, 0x3b3b3b, 0x4a4a4a, 0x393939, 0x393939, 0x444, 0x393939, 0x393939, 
  0x444, 0x444, 0x2c2c2c, 0x595959, 0x3b3b3b, 0x4a4a4a, 0x2c2c2c, 0x595959, 
  0x3b3b3b, 0x4a4a4a, 0x2c2c2c, 0x595959, 0x3b3b3b, 0x4a4a4a, 0x444, 0x393939, 
  0x2a2a2a, 0x393939, 0x2a2a2a, 0x2a2a2a, 0x2a2a2a, 0x2c2c2c, 0x595959, 0x3b3b3b, 
  0x4a4a4a, 0x42c2c, 0x45959, 0x43b3b, 0x44a4a, 0x444, 0x2c2c2c, 0x42c2c, 
  0x4444, 0x2c2c2c, 0x595959, 0x3b3b3b, 0x4a4a4a, 0x2c2c2c, 0x595959, 0x3b3b3b, 
  0x4a4a4a, 0x2c2c2c, 0x595959, 0x3b3b3b, 0x4a4a4a, 0x2c2c2c, 0x595959, 0x3b3b3b, 
  0x4a4a4a, 0x2c2c2c, 0x595959, 0x3b3b3b, 0x4a4a4a, 0x2c2c2c, 0x595959, 0x3b3b3b, 
  0x4a4a4a, 0x4444, 0x2c2c2c, 0x595959, 0x3b3b3b, 0x4a4a4a, 0x42c2c, 0x45959, 
  0x43b3b, 0x44a4a, 0x2c2c2c2c, 0x59595959, 0x3b3b3b3b, 0x4a4a4a4a, 0x42c2c2c, 0x4595959, 
  0x43b3b3b, 0x44a4a4a, 0x2c2c2c2c, 0x59595959, 0x3b3b3b3b, 0x4a4a4a4a, 0x42c2c2c, 0x4595959, 
  0x43b3b3b, 0x44a4a4a, 0x44, 0x2c2c2c2c, 0x42c2c2c, 0x2c2c2c2c, 0x42c2c2c, 0x2c2c2c, 
  0x595959, 0x3b3b3b, 0x4a4a4a, 0x42c2c, 0x45959, 0x43b3b, 0x44a4a, 0x2c4, 
  0x594, 0x3b4, 0x2c4, 0x4a4, 0x4, 0x2c2c2c2c, 0x42c2c2c, 0x2c2c2c, 
  0x595959, 0x3b3b3b, 0x4a4a4a, 0x42c2c, 0x45959, 0x43b3b, 0x44a4a, 0x2c4, 
  0x594, 0x3b4, 0x2c4, 0x4a4, 0x2c2c2c, 0x595959, 0x3b3b3b, 0x4a4a4a, 
  0x42c2c, 0x45959, 0x43b3b, 0x44a4a, 0x44, 0x2c2c2c, 0x595959, 0x3b3b3b, 
  0x4a4a4a, 0x2c2c2c, 0x595959, 0x3b3b3b, 0x4a4a4a, 0x42c2c, 0x45959, 0x43b3b, 
  0x44a4a, 0x42c2c, 0x45959, 0x43b3b, 0x44a4a, 0x2c2c2c, 0x595959, 0x3b3b3b, 
  0x4a4a4a, 0x2c2c2c, 0x595959, 0x3b3b3b, 0x4a4a4a, 0x42c2c, 0x45959, 0x43b3b, 
  0x44a4a, 0x42c2c, 0x45959, 0x43b3b, 0x44a4a, 0x39390, 0x39390, 0x39390, 
  0x2a2a4, 0x2a2a4, 0x2a2a4, 0x2a2a4, 0x2a2a4, 0x2a2a4, 0x2a2a0, 0x2a2a0, 
  0x2a2a0, 0x42c4, 0x4595, 0x43b4, 0x44a4, 0x42c4, 0x4595, 0x43b4, 
  0x44a4, 0x440, 0x2c2c2c, 0x595959, 0x3b3b3b, 0x4a4a4a, 0x2c2c2c, 0x595959, 
  0x3b3b3b, 0x4a4a4a, 0x4555, 0x4a4a59, 0x2c2c3b, 0x3b3b4a, 0x4a4a59, 0x2c2c3b, 
  0x3b3b4a, 0x393955, 0x4a4a5959, 0x2c2c3b3b, 0x3b3b4a4a, 0x4a4a5959, 0x2c2c3b3b, 0x3b3b4a4a, 
  0x393955, 0x4455, 0x393955, 0x393955, 0x2a2a55, 0x2a2a55, 0x393955, 0x393955, 
  0x393955, 0x4455, 0x393955, 0x393955, 0x2a2a55, 0x2a2a55, 0x4a4a5959, 0x2c2c3b3b, 
  0x3b3b4a4a, 0x4a4a5959, 0x2c2c3b3b, 0x3b3b4a4a, 0x393955, 0x454, 0x454, 0x454, 
  0x454, 0x454, 0x454, 0x898989, 0x7a7a7a, 0x898959, 0x7a7a4a, 0x898959, 
  0x7a7a4a, 0x8959, 0x7a4a, 0x898959, 0x7a7a4a, 0x898959, 0x7a7a4a, 0x898959, 
  0x7a7a4a, 0x898959, 0x7a7a4a, 0x898959, 0x7a7a4a, 0x898959, 0x7a7a4a, 0x898959, 
  0x7a7a4a, 0x898959, 0x7a7a4a, 0x898959, 0x7a7a4a, 0x898989, 0x7a7a7a, 0x7a7a6b, 
  0x89897a, 0x598989, 0x4a7a7a, 0x7a89, 0x6b7a, 0x7a89, 0x6b7a, 0x5989, 
  0x4a7a, 0x5989, 0x4a7a, 0x4a89, 0x3b7a, 0x4a89, 0x3b7a, 0x42c, 
  0x559, 0x43b, 0x44a, 0x8989, 0x7a7a, (1U<<31) | 9505, 0x7a7a7a7a, 0x898989, 
  0x7a7a7a, 0x898989, 0x7a7a7a, 0x898989, 0x7a7a7a, 0x898989, 0x7a7a7a, (1U<<31) | 9505, 
  0x7a7a7a7a, 0x898989, 0x7a7a7a, 0x8989, 0x7a7a, 0x8989, 0x7a7a, 0x8989, 
  0x7a7a, 0x898959, 0x7a7a4a, 0x898959, 0x7a7a4a, 0x898959, 0x7a7a4a, 0x898959, 
  0x7a7a4a, 0x898959, 0x7a7a4a, 0x898959, 0x7a7a4a, 0x8989, 0x7a7a, 0x898989, 
  0x7a7a7a, 0x898959, 0x7a7a4a, 0x898959, 0x7a7a4a, 0x898959, 0x7a7a4a, 0x898959, 
  0x7a7a4a, 0x898959, 0x7a7a4a, 0x8959, 0x7a4a, 0x8959, 0x7a4a, 0x7a7a3b, 
  0x89894a, 0x8959, 0x7a4a, 0x8959, 0x7a4a, 0x4a4a59, 0x2c2c3b, 0x3b3b4a, 
  0x4a4a59, 0x2c2c3b, 0x3b3b4a, 0x4a4a59, 0x2c2c3b, 0x3b3b4a, 0x4a4a59, 0x2c2c3b, 
  0x3b3b4a, 0x2c2c2c, 0x595959, 0x3b3b3b, 0x4a4a4a, 0x2c2c2c, 0x595959, 0x3b3b3b, 
  0x4a4a4a, 0x2c2c2c, 0x595959, 0x3b3b3b, 0x4a4a4a, 0x2c2c2c, 0x595959, 0x3b3b3b, 
  0x4a4a4a, 0x442c2c, 0x545959, 0x443b3b, 0x444a4a, 0x444, 0x2c42c2c, 0x5945959, 
  0x3b43b3b, 0x4a44a4a, 0x4e4, 0x4e2c, 0x4e59, 0x4e3b, 0x4e4a, 0x42c, 
  0x459, 0x43b, 0x44a, 0x4e59, 0x4e4a, 0x4e4, 0x4444, 0x4e4, 
  0x4455, 0x3b3b3b3b, 0x4a4a4a4a, 0x3b3b3b3b, 0x4a4a4a4a, 0x4455, 0x2c2c2c2c, 0x59595959, 
  0x3b3b3b3b, 0x4a4a4a4a, 0x393955, 0x393955, 0x393955, 0x393955, 0x2c2c2c, 0x595959, 
  0x3b3b3b, 0x4a4a4a, 0x2c2c2c, 0x595959, 0x3b3b3b, 0x4a4a4a, 0x2c2c2c, 0x595959, 
  0x3b3b3b, 0x4a4a4a, 0x42c2c, 0x45959, 0x43b3b, 0x44a4a, 0x42c2c, 0x45959, 
  0x43b3b, 0x44a4a, 0x2c2c2c, 0x595959, 0x3b3b3b, 0x4a4a4a, 0x2c2c2c, 0x595959, 
  0x3b3b3b, 0x4a4a4a, 0x2c2c2c, 0x595959, 0x3b3b3b, 0x4a4a4a, 0x42c2c, 0x45959, 
  0x43b3b, 0x44a4a, 0x42c2c, 0x45959, 0x43b3b, 0x44a4a, 0x2c2c2c, 0x595959, 
  0x3b3b3b, 0x4a4a4a, 0x2c2c2c, 0x595959, 0x3b3b3b, 0x4a4a4a, 0x444, 0x2c2c, 
  0x4455, 0x3b3b3b3b, 0x4a4a4a4a, 0x3b3b3b3b, 0x4a4a4a4a, 0x4455, 0x2c2c2c2c, 0x59595959, 
  0x3b3b3b3b, 0x4a4a4a4a, 0x455, 0x393939, 0x3b3b3b, 0x4a4a4a, 0x393939, 0x39394, 
  0x39394, 0x392a39, 0x392a39, 0x393939, 0x444, 0x393939, 0x444, 0x3b3b3b, 
  0x4a4a4a, 0x393955, 0x393955, 0x445, 0x445, 0x2c2c2c, 0x595959, 0x3b3b3b, 
  0x4a4a4a, 0x2c2c, 0x5959, 0x3b3b, 0x4a4a, 0x2c2c, 0x5959, 0x3b3b, 
  0x4a4a, 0x2c2c2c, 0x42c2c, 0x2c2c2c, 0x42c2c, 0x393939, 0x2c2c2c, 0x595959, 
  0x3b3b3b, 0x4a4a4a, 0x2c2c2c, 0x595959, 0x3b3b3b, 0x4a4a4a, 0x2c2c, 0x5959, 
  0x3b3b, 0x4a4a, 0x393939, 0x2a2a2a, 0x394, 0x394, 0x2a39, 0x2a39, 
  0x2a39, 0x2a39, 0x2a39, 0x2a39, 0x2a39, 0x2a39, 0x39392a, 0x44439, 
  0x44439, 0x4439, 0x39392a, 0x4439, 0x39392a, 0x4444, 0x2a4, 0x44, 
  0x439, 0x42a, 0x42c2c, 0x45959, 0x43b3b, 0x44a4a, 0x42c2c, 0x45959, 
  0x43b3b, 0x44a4a, 0x42c2c, 0x43b3b, 0x44a4a, 0x455, 0x43939, 0x42a2a, 
  0x43939, 0x444, 0x43939, 0x42a2a, 0x43939, 0x42a2a, 0x444, 0x43939, 
  0x42a2a, 0x42c2c2c, 0x4595959, 0x43b3b3b, 0x44a4a4a, 0x42c2c2c, 0x4595959, 0x43b3b3b, 
  0x44a4a4a, 0x2c2c2c, 0x595959, 0x3b3b3b, 0x4a4a4a, 0x42c2c, 0x45959, 0x43b3b, 
  0x44a4a, 0x42c2c, 0x45959, 0x43b3b, 0x44a4a, 0x42c2c, 0x45959, 0x43b3b, 
  0x44a4a, 0x2c2c2c, 0x595959, 0x3b3b3b, 0x4a4a4a, 0x42c2c, 0x45959, 0x43b3b, 
  0x44a4a, 0x2c2c2c, 0x595959, 0x3b3b3b, 0x4a4a4a, 0x42c2c, 0x45959, 0x43b3b, 
  0x44a4a, 0x2c2c2c, 0x595959, 0x3b3b3b, 0x4a4a4a, 0x42c2c, 0x45959, 0x43b3b, 
  0x44a4a, 0x2c2c2c, 0x595959, 0x3b3b3b, 0x4a4a4a, 0x42c2c, 0x45959, 0x43b3b, 
  0x44a4a, 0x4e2c0, 0x4e590, 0x4e3b0, 0x4e4a0, 0x4e590, 0x4e4a0, 0x393939, 
  0x393939, 0x444, 0x393939, 0x393939, 0x444, 0x444, 0x2c2c2c, 0x595959, 
  0x3b3b3b, 0x4a4a4a, 0x2c2c2c, 0x595959, 0x3b3b3b, 0x4a4a4a, 0x2c2c2c, 0x595959, 
  0x3b3b3b, 0x4a4a4a, 0x2c2c2c, 0x595959, 0x3b3b3b, 0x4a4a4a, 0x393939, 0x2a2a2a, 
  0x393939, 0x2a2a2a, 0x2a2a2a, 0x2a2a2a, 0x2c2c2c, 0x595959, 0x3b3b3b, 0x4a4a4a, 
  0x42c2c, 0x45959, 0x43b3b, 0x44a4a, 0x2c2c2c2c, 0x59595959, 0x3b3b3b3b, 0x4a4a4a4a, 
  0x440, 0x2c2c2c, 0x42c2c, (1U<<31) | 14215, (1U<<31) | 14168, 0x888, 0x777, 0x777, 
  0x888, 0x777, 0x777, 0x888, 0x777, 0x777, 0x888, 0x777, 
  0x777, 0x7fcf2f, 0x7fcf2f, 0x7fcf1f, 0x7fcf1f, 0x7fcf1f, 0x7fcf1f, 0x7f7fcf1f, 
  0x7f7fcf1f, 0x7fcf1f, 0x7fcf1f, 0x7fcf1f, 0x7fcf1f, 0x7fcf1f, 0x7fcf1f, 0x44f4, 
  0x44f4, 0x7fcf1f, 0x7fcf1f, 0x7fcf1f, 0x7fcf1f, 0x7fcf1f, 0x7fcf1f, 0x7fcf1f, 
  0x7fcf1f, 0x40, 0x40, 0x440, 0x0, 0x0, 0x0, 0x40, 
  0x40, 0x440, 0x0, 0x44, 0x44, 0x44, (1U<<31) | 14161, (1U<<31) | 14161, 
  0x85, 0x74, 0x47, 0x58, 0x88, 0x77, 0x77, 0x4f0, 
  0x4f0, 0x77, 0x77, (1U<<31) | 422, (1U<<31) | 1630, (1U<<31) | 422, (1U<<31) | 1630, (1U<<31) | 422, 
  (1U<<31) | 1630, (1U<<31) | 422, (1U<<31) | 1630, 0x0, 0xe0, 0xe0, (1U<<31) | 1185, (1U<<31) | 1185, 
  0x0, 0x40, 0x87, 0x87, 0x87, 0x87, 0x87, 0x87, 
  0x87, 0x87, 0x84, 0x84, 0x84, 0x84, 0x84, 0x84, 
  0x85, 0x85, 0x85, 0x85, 0x84, 0x84, 0x84, 0x84, 
  0x85, 0x85, 0x85, 0x85, 0x777, 0x777, 0x888, 0x777, 
  0x777, 0x888, 0x777, 0x777, 0x888, 0x777, 0x777, 0x888, 
  0x777, 0x777, 0x88, 0x77, 0x66, 0x6969, 0x77, (1U<<31) | 9472, 
  (1U<<31) | 9472, (1U<<31) | 9472, (1U<<31) | 9472, 0x73, 0x73, 0x74, 0x74, 0x74, 
  0x74, 0x74, 0x74, 0x74, 0x74, 0x75, 0x75, 0x75, 
  0x75, 0x75, 0x75, 0x75, 0x75, 0x74, 0x74, 0x74, 
  0x74, 0x74, 0x74, 0x74, 0x74, 0x74, 0x75, 0x75, 
  0x75, 0x75, 0x75, 0x75, 0x75, 0x75, 0x88, 0x77, 
  0x77, 0x0, (1U<<31) | 8587, (1U<<31) | 8587, (1U<<31) | 8587, (1U<<31) | 8587, 0x7769, 0x7769, 
  0x7769, 0x7769, 0x88, 0x77, 0x77, 0x8888, 0x7777, 0x7777, 
  (1U<<31) | 14213, (1U<<31) | 14164, 0x8888, 0x7777, 0x6666, 0x69696969, (1U<<31) | 14213, (1U<<31) | 14164, 
  0x7777, 0x6666, 0x69696969, (1U<<31) | 14213, (1U<<31) | 14164, 0x6666, 0x69696969, (1U<<31) | 14213, 
  (1U<<31) | 14164, 0x6666, 0x69696969, (1U<<31) | 14213, (1U<<31) | 14164, 0x6666, 0x69696969, (1U<<31) | 14213, 
  (1U<<31) | 14164, 0x6666, 0x69696969, 0x8888, 0x7777, 0x7777, 0x8888, 0x7777, 
  0x7777, (1U<<31) | 14214, (1U<<31) | 14166, 0x888, 0x777, 0x666, 0x696969, (1U<<31) | 14214, 
  (1U<<31) | 14166, 0x777, 0x666, 0x696969, (1U<<31) | 14214, (1U<<31) | 14166, 0x777, 0x666, 
  0x696969, (1U<<31) | 14214, (1U<<31) | 14166, 0x777, 0x666, 0x696969, (1U<<31) | 14214, (1U<<31) | 14166, 
  0x777, 0x666, 0x696969, (1U<<31) | 14214, (1U<<31) | 14166, 0x777, 0x666, 0x696969, 
  (1U<<31) | 14214, (1U<<31) | 14166, 0x777, 0x666, 0x696969, (1U<<31) | 14214, (1U<<31) | 14166, 0x777, 
  0x666, 0x696969, (1U<<31) | 14214, (1U<<31) | 14166, 0x888, 0x777, 0x666, 0x696969, 
  (1U<<31) | 14214, (1U<<31) | 14166, 0x777, 0x666, 0x696969, (1U<<31) | 14214, (1U<<31) | 14166, 0x777, 
  0x666, 0x696969, (1U<<31) | 14214, (1U<<31) | 14166, 0x777, 0x666, 0x696969, (1U<<31) | 14214, 
  (1U<<31) | 14166, 0x777, 0x666, 0x696969, (1U<<31) | 14214, (1U<<31) | 14166, 0x777, 0x666, 
  0x696969, (1U<<31) | 14214, (1U<<31) | 14166, 0x777, 0x666, 0x696969, (1U<<31) | 14214, (1U<<31) | 14166, 
  0x777, 0x666, 0x696969, 0x4444, 0xe4, (1U<<31) | 1189, 0x48, 0x48, 
  0x48, 0x48, 0x47, 0x47, 0x47, 0x47, 0x1, 0xe1, 
  0xe1, 0xe1, 0xe1, 0xe1, 0x51, 0x51, 0x51, 0x4cf2f, 
  0x4cf1f, 0x4cf4f, 0x4f4, 0x4f4, (1U<<31) | 6947, (1U<<31) | 6947, (1U<<31) | 6939, (1U<<31) | 6939, 
  0x4cf2f, 0x4cf1f, 0x4cf4f, 0x88, 0x77, 0x77, 0x58, 0x58, 
  0x58, 0x58, 0x57, 0x57, 0x57, 0x57, 0x448, 0x4ee, 
  (1U<<31) | 2323, (1U<<31) | 3744, (1U<<31) | 7199, 0x444, 0x544, 0xe5, 0xe5, 0x4e5, 
  (1U<<31) | 2329, (1U<<31) | 1193, 0x4e5, (1U<<31) | 2329, (1U<<31) | 1193, 0x4e0, (1U<<31) | 2318, 0xe0, 
  (1U<<31) | 1185, 0x54, 0x5e1, (1U<<31) | 7183, 0x0, 0x0, 0x0, (1U<<31) | 4023, 
  (1U<<31) | 4007, (1U<<31) | 4142, (1U<<31) | 8032, (1U<<31) | 7654, (1U<<31) | 8157, (1U<<31) | 7702, (1U<<31) | 8179, (1U<<31) | 4023, 
  (1U<<31) | 4023, (1U<<31) | 4023, (1U<<31) | 4023, (1U<<31) | 4023, (1U<<31) | 4023, (1U<<31) | 4023, (1U<<31) | 4023, (1U<<31) | 4023, 
  (1U<<31) | 4023, (1U<<31) | 4007, (1U<<31) | 4007, (1U<<31) | 4023, (1U<<31) | 4023, (1U<<31) | 4007, (1U<<31) | 4007, (1U<<31) | 4023, 
  (1U<<31) | 4023, (1U<<31) | 4007, (1U<<31) | 4007, (1U<<31) | 4023, (1U<<31) | 4023, (1U<<31) | 4007, (1U<<31) | 4007, (1U<<31) | 8048, 
  (1U<<31) | 4007, (1U<<31) | 4007, (1U<<31) | 4007, (1U<<31) | 4007, (1U<<31) | 4007, (1U<<31) | 4007, (1U<<31) | 4007, (1U<<31) | 4007, 
  (1U<<31) | 8048, (1U<<31) | 7724, (1U<<31) | 8201, (1U<<31) | 8032, (1U<<31) | 4142, (1U<<31) | 4142, (1U<<31) | 4142, (1U<<31) | 4142, 
  (1U<<31) | 4142, (1U<<31) | 4142, (1U<<31) | 4142, (1U<<31) | 4142, (1U<<31) | 4142, (1U<<31) | 4142, (1U<<31) | 4142, (1U<<31) | 4142, 
  (1U<<31) | 4142, (1U<<31) | 4142, (1U<<31) | 4142, (1U<<31) | 4142, (1U<<31) | 7628, (1U<<31) | 7676, (1U<<31) | 8495, (1U<<31) | 7628, 
  (1U<<31) | 7676, (1U<<31) | 8495, (1U<<31) | 7628, (1U<<31) | 7676, (1U<<31) | 8495, (1U<<31) | 9475, (1U<<31) | 7628, (1U<<31) | 7676, 
  (1U<<31) | 8495, (1U<<31) | 4023, (1U<<31) | 4007, (1U<<31) | 4142, 0x88, 0x77, 0x33, 0x44, 
  0x55, 0xcf4f, 0x888, 0x777, 0x777, 0x888, 0x777, 0x777, 
  0x888, 0x777, 0x777, 0x888, 0x777, 0x777, 0x444, 0x444, 
  0x444, 0x555, 0x444, 0x555, (1U<<31) | 14215, (1U<<31) | 14168, 0x4444, 0xcf4f, 
  0xcf4f, 0xcf4f, 0xcf4f, 0xcf4f, 0xcf4f, 0xcf4f, 0xcf4f, 0xcf4f, 
  0x88, 0x77, 0x88, 0x77, 0x77, 0x88, 0x77, 0x77, 
  0x88, 0x77, 0x77, 0x88, 0x77, 0x77, 0x4, 0x5, 
  0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 
  0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 
  0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 
  0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 
  0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 
  0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 
  0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 
  0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 
  0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 
  0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 
  0x4, 0x4, 0x444, 0x444, 0x444, 0x444, 0x444, 0x444, 
  0x444, 0x444, 0x4f4, 0x444, 0x455, 0x455, 0x88, 0x77, 
  0x77, 0x88, 0x77, 0x77, 0x4444, 0x4444, 0x88, 0x77, 
  0x77, 0x4477, (1U<<31) | 5184, 0x4444, (1U<<31) | 3833, 0x4477, (1U<<31) | 5184, 0x4444, 
  (1U<<31) | 3833, 0x4477, (1U<<31) | 5184, 0x4444, (1U<<31) | 3833, 0x44747, (1U<<31) | 5191, 0x44444, 
  (1U<<31) | 3877, 0x44747, (1U<<31) | 5191, 0x44444, (1U<<31) | 3877, 0x44747, (1U<<31) | 5191, 0x44444, 
  (1U<<31) | 3877, 0x44747, (1U<<31) | 5191, 0x44444, (1U<<31) | 3877, 0x4477, (1U<<31) | 5184, 0x4444, 
  (1U<<31) | 3833, 0x77, 0x77, 0x77, 0x77, 0x77, 0x88, 0x77, 
  0x77, 0x88, 0x77, 0x77, 0x88, 0x77, 0x77, 0x88, 
  0x77, 0x77, 0x4453, 0x4453, 0x4453, 0x4454, 0x4454, 0x4454, 
  0x4455, 0x4455, 0x4455, 0x4453, 0x4453, 0x4453, (1U<<31) | 5104, (1U<<31) | 5104, 
  (1U<<31) | 5104, (1U<<31) | 5120, (1U<<31) | 5120, (1U<<31) | 5120, (1U<<31) | 5137, (1U<<31) | 5137, (1U<<31) | 5137, (1U<<31) | 5104, 
  (1U<<31) | 5104, (1U<<31) | 5104, (1U<<31) | 5095, (1U<<31) | 5095, (1U<<31) | 5095, (1U<<31) | 5111, (1U<<31) | 5111, (1U<<31) | 5111, 
  (1U<<31) | 5095, (1U<<31) | 5095, (1U<<31) | 5095, 0x453, 0x453, 0x453, 0x454, 0x454, 
  0x454, 0x455, 0x455, 0x455, 0x453, 0x453, 0x453, (1U<<31) | 5685, 
  (1U<<31) | 5685, (1U<<31) | 5685, (1U<<31) | 5708, (1U<<31) | 5708, (1U<<31) | 5708, (1U<<31) | 5723, (1U<<31) | 5723, (1U<<31) | 5723, 
  (1U<<31) | 5685, (1U<<31) | 5685, (1U<<31) | 5685, (1U<<31) | 5677, (1U<<31) | 5677, (1U<<31) | 5677, (1U<<31) | 5700, (1U<<31) | 5700, 
  (1U<<31) | 5700, (1U<<31) | 5677, (1U<<31) | 5677, (1U<<31) | 5677, 0x44453, 0x44453, 0x44453, 0x44454, 
  0x44454, 0x44454, 0x44455, 0x44455, 0x44455, 0x44453, 0x44453, 0x44453, 
  (1U<<31) | 4380, (1U<<31) | 4380, (1U<<31) | 4380, (1U<<31) | 4398, (1U<<31) | 4398, (1U<<31) | 4398, (1U<<31) | 4417, (1U<<31) | 4417, 
  (1U<<31) | 4417, (1U<<31) | 4380, (1U<<31) | 4380, (1U<<31) | 4380, (1U<<31) | 4370, (1U<<31) | 4370, (1U<<31) | 4370, (1U<<31) | 4388, 
  (1U<<31) | 4388, (1U<<31) | 4388, (1U<<31) | 4370, (1U<<31) | 4370, (1U<<31) | 4370, 0x4453, 0x4453, 0x4453, 
  0x4454, 0x4454, 0x4454, 0x4455, 0x4455, 0x4455, 0x4453, 0x4453, 
  0x4453, (1U<<31) | 5104, (1U<<31) | 5104, (1U<<31) | 5104, (1U<<31) | 5120, (1U<<31) | 5120, (1U<<31) | 5120, (1U<<31) | 5137, 
  (1U<<31) | 5137, (1U<<31) | 5137, (1U<<31) | 5104, (1U<<31) | 5104, (1U<<31) | 5104, (1U<<31) | 5095, (1U<<31) | 5095, (1U<<31) | 5095, 
  (1U<<31) | 5111, (1U<<31) | 5111, (1U<<31) | 5111, (1U<<31) | 5095, (1U<<31) | 5095, (1U<<31) | 5095, 0x44453, 0x44453, 
  0x44453, 0x44454, 0x44454, 0x44454, 0x44455, 0x44455, 0x44455, 0x44453, 
  0x44453, 0x44453, (1U<<31) | 4380, (1U<<31) | 4380, (1U<<31) | 4380, (1U<<31) | 4398, (1U<<31) | 4398, (1U<<31) | 4398, 
  (1U<<31) | 4417, (1U<<31) | 4417, (1U<<31) | 4417, (1U<<31) | 4380, (1U<<31) | 4380, (1U<<31) | 4380, (1U<<31) | 4370, (1U<<31) | 4370, 
  (1U<<31) | 4370, (1U<<31) | 4388, (1U<<31) | 4388, (1U<<31) | 4388, (1U<<31) | 4370, (1U<<31) | 4370, (1U<<31) | 4370, 0x54, 
  0x54, 0x54, 0x54, 0x54, 0x54, 0x34450, 0x34450, 0x34450, 
  0x44450, 0x44450, 0x44450, 0x54450, 0x54450, 0x54450, 0x34450, 0x34450, 
  0x34450, 0x334450, 0x334450, 0x334450, 0x444450, 0x444450, 0x444450, 0x554450, 
  0x554450, 0x554450, 0x334450, 0x334450, 0x334450, 0x33334450, 0x33334450, 0x33334450, 
  0x44444450, 0x44444450, 0x44444450, 0x33334450, 0x33334450, 0x33334450, 0x3450, 0x3450, 
  0x3450, 0x4450, 0x4450, 0x4450, 0x5450, 0x5450, 0x5450, 0x3450, 
  0x3450, 0x3450, 0x33450, 0x33450, 0x33450, 0x44450, 0x44450, 0x44450, 
  0x55450, 0x55450, 0x55450, 0x33450, 0x33450, 0x33450, 0x3333450, 0x3333450, 
  0x3333450, 0x4444450, 0x4444450, 0x4444450, 0x3333450, 0x3333450, 0x3333450, 0x344450, 
  0x344450, 0x344450, 0x444450, 0x444450, 0x444450, 0x544450, 0x544450, 0x544450, 
  0x344450, 0x344450, 0x344450, 0x3344450, 0x3344450, 0x3344450, 0x4444450, 0x4444450, 
  0x4444450, 0x5544450, 0x5544450, 0x5544450, 0x3344450, 0x3344450, 0x3344450, (1U<<31) | 1028, 
  (1U<<31) | 1028, (1U<<31) | 1028, (1U<<31) | 4099, (1U<<31) | 4099, (1U<<31) | 4099, (1U<<31) | 1028, (1U<<31) | 1028, (1U<<31) | 1028, 
  0x34450, 0x34450, 0x34450, 0x44450, 0x44450, 0x44450, 0x54450, 0x54450, 
  0x54450, 0x34450, 0x34450, 0x34450, 0x334450, 0x334450, 0x334450, 0x444450, 
  0x444450, 0x444450, 0x554450, 0x554450, 0x554450, 0x334450, 0x334450, 0x334450, 
  0x33334450, 0x33334450, 0x33334450, 0x44444450, 0x44444450, 0x44444450, 0x33334450, 0x33334450, 
  0x33334450, 0x344450, 0x344450, 0x344450, 0x444450, 0x444450, 0x444450, 0x544450, 
  0x544450, 0x544450, 0x344450, 0x344450, 0x344450, 0x3344450, 0x3344450, 0x3344450, 
  0x4444450, 0x4444450, 0x4444450, 0x5544450, 0x5544450, 0x5544450, 0x3344450, 0x3344450, 
  0x3344450, (1U<<31) | 1028, (1U<<31) | 1028, (1U<<31) | 1028, (1U<<31) | 4099, (1U<<31) | 4099, (1U<<31) | 4099, (1U<<31) | 1028, 
  (1U<<31) | 1028, (1U<<31) | 1028, 0x34450, 0x44450, 0x34450, 0x334450, 0x444450, 0x334450, 
  0x33334450, 0x44444450, 0x33334450, 0x3450, 0x4450, 0x3450, 0x33450, 0x44450, 
  0x33450, 0x3333450, 0x4444450, 0x3333450, 0x344450, 0x444450, 0x344450, 0x3344450, 
  0x4444450, 0x3344450, (1U<<31) | 1028, (1U<<31) | 4099, (1U<<31) | 1028, 0x34450, 0x44450, 0x34450, 
  0x334450, 0x444450, 0x334450, 0x33334450, 0x44444450, 0x33334450, 0x344450, 0x444450, 
  0x344450, 0x3344450, 0x4444450, 0x3344450, (1U<<31) | 1028, (1U<<31) | 4099, (1U<<31) | 1028, 0x55, 
  (1U<<31) | 7967, (1U<<31) | 7955, (1U<<31) | 7955, (1U<<31) | 7885, (1U<<31) | 7874, (1U<<31) | 7874, (1U<<31) | 7811, (1U<<31) | 5144, 
  (1U<<31) | 7801, (1U<<31) | 5127, (1U<<31) | 7801, (1U<<31) | 5127, (1U<<31) | 8011, (1U<<31) | 8000, (1U<<31) | 8000, (1U<<31) | 7925, 
  (1U<<31) | 7915, (1U<<31) | 7915, (1U<<31) | 7847, (1U<<31) | 5729, (1U<<31) | 7838, (1U<<31) | 5714, (1U<<31) | 7838, (1U<<31) | 5714, 
  (1U<<31) | 8246, (1U<<31) | 8231, (1U<<31) | 8231, (1U<<31) | 7967, (1U<<31) | 7955, (1U<<31) | 7955, (1U<<31) | 7885, (1U<<31) | 4425, 
  (1U<<31) | 7874, (1U<<31) | 4406, (1U<<31) | 7874, (1U<<31) | 4406, (1U<<31) | 8302, (1U<<31) | 8288, (1U<<31) | 8288, (1U<<31) | 8011, 
  (1U<<31) | 8000, (1U<<31) | 8000, (1U<<31) | 7925, (1U<<31) | 5144, (1U<<31) | 7915, (1U<<31) | 5127, (1U<<31) | 7915, (1U<<31) | 5127, 
  (1U<<31) | 8554, (1U<<31) | 8537, (1U<<31) | 8537, (1U<<31) | 8134, (1U<<31) | 8122, (1U<<31) | 8122, (1U<<31) | 8011, (1U<<31) | 4425, 
  (1U<<31) | 8000, (1U<<31) | 4406, (1U<<31) | 8000, (1U<<31) | 4406, (1U<<31) | 8086, (1U<<31) | 8073, (1U<<31) | 8073, (1U<<31) | 7967, 
  (1U<<31) | 7955, (1U<<31) | 7955, (1U<<31) | 8134, (1U<<31) | 8122, (1U<<31) | 8122, (1U<<31) | 8011, (1U<<31) | 8000, (1U<<31) | 8000, 
  (1U<<31) | 7979, (1U<<31) | 7944, (1U<<31) | 7944, (1U<<31) | 7896, (1U<<31) | 7864, (1U<<31) | 7864, (1U<<31) | 7821, (1U<<31) | 5154, 
  (1U<<31) | 7792, (1U<<31) | 5111, (1U<<31) | 7792, (1U<<31) | 5111, (1U<<31) | 8022, (1U<<31) | 7990, (1U<<31) | 7990, (1U<<31) | 7935, 
  (1U<<31) | 7906, (1U<<31) | 7906, (1U<<31) | 7856, (1U<<31) | 5752, (1U<<31) | 7830, (1U<<31) | 5700, (1U<<31) | 7830, (1U<<31) | 5700, 
  (1U<<31) | 8261, (1U<<31) | 8217, (1U<<31) | 8217, (1U<<31) | 7979, (1U<<31) | 7944, (1U<<31) | 7944, (1U<<31) | 7896, (1U<<31) | 4436, 
  (1U<<31) | 7864, (1U<<31) | 4388, (1U<<31) | 7864, (1U<<31) | 4388, (1U<<31) | 8316, (1U<<31) | 8275, (1U<<31) | 8275, (1U<<31) | 8022, 
  (1U<<31) | 7990, (1U<<31) | 7990, (1U<<31) | 7935, (1U<<31) | 5154, (1U<<31) | 7906, (1U<<31) | 5111, (1U<<31) | 7906, (1U<<31) | 5111, 
  (1U<<31) | 8571, (1U<<31) | 8521, (1U<<31) | 8521, (1U<<31) | 8146, (1U<<31) | 8111, (1U<<31) | 8111, (1U<<31) | 8022, (1U<<31) | 4436, 
  (1U<<31) | 7990, (1U<<31) | 4388, (1U<<31) | 7990, (1U<<31) | 4388, (1U<<31) | 8099, (1U<<31) | 8061, (1U<<31) | 8061, (1U<<31) | 7979, 
  (1U<<31) | 7944, (1U<<31) | 7944, (1U<<31) | 8146, (1U<<31) | 8111, (1U<<31) | 8111, (1U<<31) | 8022, (1U<<31) | 7990, (1U<<31) | 7990, 
  (1U<<31) | 7046, 0x4f5, (1U<<31) | 7925, (1U<<31) | 7915, (1U<<31) | 7915, (1U<<31) | 7925, (1U<<31) | 7915, (1U<<31) | 7915, 
  (1U<<31) | 7925, (1U<<31) | 7915, (1U<<31) | 7915, (1U<<31) | 7925, (1U<<31) | 7915, (1U<<31) | 7915, (1U<<31) | 7935, (1U<<31) | 7906, 
  (1U<<31) | 7906, (1U<<31) | 7935, (1U<<31) | 7906, (1U<<31) | 7906, (1U<<31) | 7935, (1U<<31) | 7906, (1U<<31) | 7906, (1U<<31) | 7935, 
  (1U<<31) | 7906, (1U<<31) | 7906, 0x88, 0x77, 0x77, 0x54, 0x54, 0x54, 
  0x54, 0x54, 0x54, 0x54, 0x54, 0x48, 0x48, 0x48, 
  0x48, 0x47, 0x47, 0x47, 0x47, 0x58, 0x58, 0x58, 
  0x58, 0x57, 0x57, 0x57, 0x57, 0x11, 0x141, 0x11, 
  0x141, 0x14, 0x144, 0x11, 0x141, (1U<<31) | 6939, (1U<<31) | 6953, (1U<<31) | 6947, 
  (1U<<31) | 5528, (1U<<31) | 5544, (1U<<31) | 5537, (1U<<31) | 5537, (1U<<31) | 6947, (1U<<31) | 6939, (1U<<31) | 6953, (1U<<31) | 6947, 
  (1U<<31) | 5528, (1U<<31) | 5544, (1U<<31) | 5537, (1U<<31) | 5537, (1U<<31) | 6947, (1U<<31) | 6939, (1U<<31) | 6953, (1U<<31) | 6947, 
  (1U<<31) | 5528, (1U<<31) | 5544, (1U<<31) | 5537, (1U<<31) | 5537, (1U<<31) | 6947, (1U<<31) | 6939, (1U<<31) | 6953, (1U<<31) | 6947, 
  (1U<<31) | 5528, (1U<<31) | 5544, (1U<<31) | 5537, (1U<<31) | 5537, (1U<<31) | 6947, (1U<<31) | 6973, (1U<<31) | 6985, (1U<<31) | 6927, 
  (1U<<31) | 5565, (1U<<31) | 5578, (1U<<31) | 5515, (1U<<31) | 6973, (1U<<31) | 6985, (1U<<31) | 6927, (1U<<31) | 5565, (1U<<31) | 5578, 
  (1U<<31) | 5515, (1U<<31) | 8357, (1U<<31) | 7528, (1U<<31) | 7528, (1U<<31) | 8395, (1U<<31) | 8395, (1U<<31) | 7578, (1U<<31) | 7578, 
  (1U<<31) | 8445, (1U<<31) | 8445, (1U<<31) | 3985, (1U<<31) | 3985, (1U<<31) | 3985, (1U<<31) | 3985, (1U<<31) | 8357, (1U<<31) | 7528, 
  (1U<<31) | 7528, (1U<<31) | 8395, (1U<<31) | 8395, (1U<<31) | 7578, (1U<<31) | 7578, (1U<<31) | 8445, (1U<<31) | 8445, (1U<<31) | 3985, 
  (1U<<31) | 3985, (1U<<31) | 3985, (1U<<31) | 3985, (1U<<31) | 8357, (1U<<31) | 7528, (1U<<31) | 7528, (1U<<31) | 8395, (1U<<31) | 8395, 
  (1U<<31) | 7578, (1U<<31) | 7578, (1U<<31) | 8445, (1U<<31) | 8445, (1U<<31) | 3985, (1U<<31) | 3985, (1U<<31) | 3985, (1U<<31) | 3985, 
  (1U<<31) | 8357, (1U<<31) | 7528, (1U<<31) | 7528, (1U<<31) | 8395, (1U<<31) | 8395, (1U<<31) | 7578, (1U<<31) | 7578, (1U<<31) | 8445, 
  (1U<<31) | 8445, (1U<<31) | 3985, (1U<<31) | 3985, (1U<<31) | 3985, (1U<<31) | 3985, (1U<<31) | 7516, (1U<<31) | 8383, (1U<<31) | 4077, 
  (1U<<31) | 5863, (1U<<31) | 5886, (1U<<31) | 4050, (1U<<31) | 7516, (1U<<31) | 8383, (1U<<31) | 4077, (1U<<31) | 5863, (1U<<31) | 5886, 
  (1U<<31) | 4050, (1U<<31) | 5528, (1U<<31) | 6939, (1U<<31) | 5528, (1U<<31) | 6939, (1U<<31) | 5528, (1U<<31) | 6939, (1U<<31) | 5528, 
  (1U<<31) | 6939, (1U<<31) | 6985, (1U<<31) | 5578, (1U<<31) | 6985, (1U<<31) | 5578, (1U<<31) | 8357, (1U<<31) | 8357, (1U<<31) | 8357, 
  (1U<<31) | 8357, (1U<<31) | 8383, (1U<<31) | 5886, (1U<<31) | 8383, (1U<<31) | 5886, (1U<<31) | 6927, (1U<<31) | 6953, (1U<<31) | 6939, 
  (1U<<31) | 5515, (1U<<31) | 5544, (1U<<31) | 5528, (1U<<31) | 5528, (1U<<31) | 6939, (1U<<31) | 6927, (1U<<31) | 6953, (1U<<31) | 6939, 
  (1U<<31) | 5515, (1U<<31) | 5544, (1U<<31) | 5528, (1U<<31) | 5528, (1U<<31) | 6939, (1U<<31) | 6947, (1U<<31) | 6953, 0x4f4, 
  (1U<<31) | 5537, (1U<<31) | 5544, 0x44f4, 0x44f4, 0x4f4, (1U<<31) | 6947, (1U<<31) | 6953, 0x4f4, 
  (1U<<31) | 5537, (1U<<31) | 5544, 0x44f4, 0x44f4, 0x4f4, (1U<<31) | 6973, (1U<<31) | 6985, (1U<<31) | 6927, 
  (1U<<31) | 5565, (1U<<31) | 5578, (1U<<31) | 5515, (1U<<31) | 6973, (1U<<31) | 6985, (1U<<31) | 6927, (1U<<31) | 5565, (1U<<31) | 5578, 
  (1U<<31) | 5515, (1U<<31) | 8329, (1U<<31) | 7528, (1U<<31) | 7528, (1U<<31) | 8395, (1U<<31) | 8395, (1U<<31) | 7578, (1U<<31) | 7578, 
  (1U<<31) | 8445, (1U<<31) | 8445, (1U<<31) | 3962, (1U<<31) | 3962, (1U<<31) | 3962, (1U<<31) | 3962, (1U<<31) | 8329, (1U<<31) | 7528, 
  (1U<<31) | 7528, (1U<<31) | 8395, (1U<<31) | 8395, (1U<<31) | 7578, (1U<<31) | 7578, (1U<<31) | 8445, (1U<<31) | 8445, (1U<<31) | 3962, 
  (1U<<31) | 3962, (1U<<31) | 3962, (1U<<31) | 3962, (1U<<31) | 8329, (1U<<31) | 7528, (1U<<31) | 7528, (1U<<31) | 8395, (1U<<31) | 8395, 
  (1U<<31) | 7578, (1U<<31) | 7578, (1U<<31) | 8445, (1U<<31) | 8445, (1U<<31) | 3962, (1U<<31) | 3962, (1U<<31) | 3962, (1U<<31) | 3962, 
  (1U<<31) | 8329, (1U<<31) | 7528, (1U<<31) | 7528, (1U<<31) | 8395, (1U<<31) | 8395, (1U<<31) | 7578, (1U<<31) | 7578, (1U<<31) | 8445, 
  (1U<<31) | 8445, (1U<<31) | 3962, (1U<<31) | 3962, (1U<<31) | 3962, (1U<<31) | 3962, (1U<<31) | 7516, (1U<<31) | 8383, (1U<<31) | 4077, 
  (1U<<31) | 5863, (1U<<31) | 5886, (1U<<31) | 4050, (1U<<31) | 7516, (1U<<31) | 8383, (1U<<31) | 4077, (1U<<31) | 5863, (1U<<31) | 5886, 
  (1U<<31) | 4050, (1U<<31) | 6947, (1U<<31) | 6953, 0x4f4, (1U<<31) | 5537, (1U<<31) | 5544, 0x44f4, 0x44f4, 
  0x4f4, (1U<<31) | 6947, (1U<<31) | 6953, 0x4f4, (1U<<31) | 5537, (1U<<31) | 5544, 0x44f4, 0x44f4, 
  0x4f4, (1U<<31) | 6927, (1U<<31) | 6953, (1U<<31) | 6939, (1U<<31) | 5515, (1U<<31) | 5544, (1U<<31) | 5528, (1U<<31) | 5528, 
  (1U<<31) | 6939, (1U<<31) | 6927, (1U<<31) | 6953, (1U<<31) | 6939, (1U<<31) | 5515, (1U<<31) | 5544, (1U<<31) | 5528, (1U<<31) | 5528, 
  (1U<<31) | 6939, (1U<<31) | 6973, (1U<<31) | 6985, (1U<<31) | 6927, (1U<<31) | 5565, (1U<<31) | 5578, (1U<<31) | 5515, (1U<<31) | 6973, 
  (1U<<31) | 6985, (1U<<31) | 6927, (1U<<31) | 5565, (1U<<31) | 5578, (1U<<31) | 5515, (1U<<31) | 8329, (1U<<31) | 7528, (1U<<31) | 7528, 
  (1U<<31) | 8395, (1U<<31) | 8395, (1U<<31) | 7578, (1U<<31) | 7578, (1U<<31) | 8445, (1U<<31) | 8445, (1U<<31) | 3962, (1U<<31) | 3962, 
  (1U<<31) | 3962, (1U<<31) | 3962, (1U<<31) | 8329, (1U<<31) | 7528, (1U<<31) | 7528, (1U<<31) | 8395, (1U<<31) | 8395, (1U<<31) | 7578, 
  (1U<<31) | 7578, (1U<<31) | 8445, (1U<<31) | 8445, (1U<<31) | 3962, (1U<<31) | 3962, (1U<<31) | 3962, (1U<<31) | 3962, (1U<<31) | 8329, 
  (1U<<31) | 7528, (1U<<31) | 7528, (1U<<31) | 8395, (1U<<31) | 8395, (1U<<31) | 7578, (1U<<31) | 7578, (1U<<31) | 8445, (1U<<31) | 8445, 
  (1U<<31) | 3962, (1U<<31) | 3962, (1U<<31) | 3962, (1U<<31) | 3962, (1U<<31) | 8329, (1U<<31) | 7528, (1U<<31) | 7528, (1U<<31) | 8395, 
  (1U<<31) | 8395, (1U<<31) | 7578, (1U<<31) | 7578, (1U<<31) | 8445, (1U<<31) | 8445, (1U<<31) | 3962, (1U<<31) | 3962, (1U<<31) | 3962, 
  (1U<<31) | 3962, (1U<<31) | 7516, (1U<<31) | 8383, (1U<<31) | 4077, (1U<<31) | 5863, (1U<<31) | 5886, (1U<<31) | 4050, (1U<<31) | 7516, 
  (1U<<31) | 8383, (1U<<31) | 4077, (1U<<31) | 5863, (1U<<31) | 5886, (1U<<31) | 4050, 0x4f4, 0x44f4, 0x4f4, 
  0x44f4, (1U<<31) | 6947, (1U<<31) | 5537, (1U<<31) | 6947, (1U<<31) | 5537, (1U<<31) | 4142, (1U<<31) | 4142, 0x444f0, 
  0x4444f0, 0x444f0, 0x4444f0, 0x4f4, 0x44f4, 0x44f4, 0x4f4, 0x4f4, 
  0x44f4, 0x44f4, 0x4f4, (1U<<31) | 6947, (1U<<31) | 5537, (1U<<31) | 6947, (1U<<31) | 5537, (1U<<31) | 4142, 
  (1U<<31) | 4142, (1U<<31) | 4142, (1U<<31) | 4142, 0x444f0, 0x4444f0, 0x444f0, 0x4444f0, 0x4f8, 
  0x44f8, 0x4f8, 0x44f8, 0x4f8, 0x44f8, 0x4f8, 0x44f8, (1U<<31) | 6997, 
  (1U<<31) | 5591, (1U<<31) | 6997, (1U<<31) | 5591, (1U<<31) | 9475, (1U<<31) | 9475, (1U<<31) | 9475, (1U<<31) | 9475, (1U<<31) | 9475, 
  (1U<<31) | 9475, (1U<<31) | 9475, (1U<<31) | 9475, (1U<<31) | 9475, (1U<<31) | 9475, (1U<<31) | 9475, (1U<<31) | 9475, (1U<<31) | 9475, 
  (1U<<31) | 9475, (1U<<31) | 9475, (1U<<31) | 9475, (1U<<31) | 9475, (1U<<31) | 9475, (1U<<31) | 9475, (1U<<31) | 9475, 0x884f0, 
  0x4884f0, 0x884f0, 0x4884f0, 0x4555, (1U<<31) | 14154, 0x595959, 0x595959, 0x595959, 
  0x595959, 0x2c2c2c2c, 0x2c2c2c2c, 0x2c2c2c, 0x595959, 0x3b3b3b, 0x4a4a4a, 0x5959, 
  0x445959, 0x444a4a, 0x40, 0x0, 0x44e0, 0x44e0, 0x44e0, 0x44e0, 
  0xe2c, 0xe3b, 0xe4a, 0xe2c, 0xe2c, 0xe4a, 0xe4a, 0x3b, 
  0x4a0, 0x52c, 0x559, 0x53b, (1U<<31) | 7508, 0x54a, 0xe2c0, 0xe3b0, 
  0xe4a0, 0xe4a0, 0xe4a0, 0x2c2c2c, 0x3b3b3b, 0x4a4a4a, (1U<<31) | 14140, 0x4a4a4a, 
  (1U<<31) | 14138, (1U<<31) | 14138, 0x2c2c2c, 0x3b3b3b, 0x4a4a4a, 0x2c2c2c, 0x3b3b3b, 0x4a4a4a, 
  0x2c2c2c, 0x3b3b3b, 0x4a4a4a, 0x2c2c2c, 0x3b3b3b, 0x4a4a4a, 0x2c5959, 0x2c2c59, 
  0x44a7a, 0x595959, 0x44a7a, 0x42c2c, 0x42c2c, 0x595959, 0x2c4, 0x7a7a4a, 
  0x7a7a44, 0x7a7a4a, 0x7a7a44, 0x2c2c2c, 0x2c2c44, 0x595959, 0x595944, 0x3b3b3b, 
  0x3b3b44, (1U<<31) | 14140, (1U<<31) | 14131, 0x4a4a4a, 0x4a4a44, 0x7a7a4a, 0x7a7a44, 0x7a7a4a, 
  0x7a7a44, 0x2c2c2c, 0x2c2c44, 0x595959, 0x595944, 0x3b3b3b, 0x3b3b44, (1U<<31) | 14140, 
  (1U<<31) | 14131, 0x4a4a4a, 0x4a4a44, 0x2c2c2c, 0x2c2c44, 0x595959, 0x595944, 0x3b3b3b, 
  0x3b3b44, (1U<<31) | 14140, (1U<<31) | 14131, 0x4a4a4a, 0x4a4a44, 0x2c2c2c, 0x2c2c44, 0x3b3b3b, 
  0x3b3b44, 0x4a4a4a, 0x4a4a44, 0x2c2c2c, 0x2c2c44, 0x3b3b3b, 0x3b3b44, 0x4a4a4a, 
  0x4a4a44, 0x42c5, 0x4595, 0x43b5, 0x44a5, 0x47a4a, 0x47a4a, 0x595959, 
  0x2c4, 0x595959, (1U<<31) | 14140, 0x4a4a4a, 0x595959, (1U<<31) | 14140, 0x4a4a4a, 0x2c2c, 
  0x5959, 0x3b3b, (1U<<31) | 14133, 0x4a4a, 0x7a7a, 0x4595959, 0x4595959, 0x42c2c59, 
  0x42c2c59, 0x43b3b59, 0x43b3b59, 0x44a4a59, 0x44a4a59, 0x2c4, 0x594, 0x3b4, 
  (1U<<31) | 14118, 0x4a4, 0x2c59, 0x2c4a, (1U<<31) | 7408, 0x3b59, 0x3b4a, 0x4a59, 
  0x2c2c, (1U<<31) | 7107, 0x442c2c, 0x442c2c, 0x2c42c2c, 0x2c42c2c, 0x455959, 0x555959, 
  0x555959, 0x443b3b, 0x443b3b, 0x3b43b3b, 0x3b43b3b, 0x444a4a, 0x444a4a, 0x444a4a, 
  0x4a44a4a, 0x4a44a4a, 0x7a7a, 0x7a7a7a7a, 0x7a7a7a, 0x2c2c2c, 0x595959, 0x3b3b3b, 
  0x4a4a4a, 0x2c2c2c, 0x595959, 0x3b3b3b, 0x4a4a4a, 0x3b3b3b3b, 0x3b3b3b3b, 0x7a7a7a, 
  0x2c2c2c, 0x595959, 0x3b3b3b, 0x4a4a4a, 0x2c2c2c, 0x595959, 0x3b3b3b, 0x4a4a4a, 
  0x3b3b3b3b, (1U<<31) | 14122, 0x4a2c2c4a, 0x4a3b3b4a, 0x4a3b3b4a, 0x4a2c2c4a, (1U<<31) | 14122, 0x4a3b3b4a, 
  0x4a3b3b4a, 0x2c2c3b, (1U<<31) | 7401, 0x3b3b4a, 0x4a4a59, 0x2c2c3b, (1U<<31) | 7401, 0x3b3b4a, 
  0x4a4a59, 0x595959, 0x4a4a4a, 0x595959, 0x4a4a4a, 0x2c2c3b, (1U<<31) | 7401, 0x3b3b4a, 
  0x4a4a59, 0x2c2c3b, (1U<<31) | 7401, 0x3b3b4a, 0x4a4a59, 0x7a7a7a7a, 0x595959, 0x2c4a4a4a, 
  0x595959, 0x4a4a3b, 0x59594a, 0x59594a, 0x3b3b2c, 0x3b3b2c, 0x4a4a3b, 0x4a4a3b, 
  0x59594a, 0x3b3b2c, 0x4a4a3b, 0x5959, (1U<<31) | 14133, 0x4a4a, 0x7a7a, 0x7a7a, 
  0x7a7a, 0x7a7a, 0x7a7a, 0x2c2c2c, 0x595959, 0x59595959, 0x595959, 0x3b3b3b, 
  (1U<<31) | 14138, (1U<<31) | 14140, 0x4a4a4a, 0x4a4a4a4a, 0x4a4a4a, 0x7a7a, 0x4a4a4a4a, 0x4a4a4a, 
  0x2c2c2c, 0x42c2c2c, 0x3b3b3b, 0x4a4a4a, 0x2c2c2c, 0x4a4a4a, 0x4a4a4a, 0x2c2c2c, 
  0x3b3b3b, 0x4a4a4a, 0x2c2c2c, 0x42c2c2c, 0x3b3b3b, 0x4a4a4a, 0x2c2c2c, 0x4a4a4a, 
  0x2c2c, 0x2c44, 0x2c2c, 0x2c44, 0x3b3b, 0x3b44, 0x3b3b, 0x3b44, 
  (1U<<31) | 14140, 0x4a4a4a, (1U<<31) | 14138, (1U<<31) | 14138, 0x2c2c2c, 0x3b3b3b, 0x4a4a4a, 0x2c2c2c, 
  0x3b3b3b, 0x4a4a4a, 0x4a4a4a, 0x4a2c4a, 0x4a3b4a, 0x4a2c4a, 0x4a4a4a, 0x3b4a, 
  0x2c3b, 0x3b4a, 0x4a59, 0x3b4a, 0x2c3b, 0x3b4a, 0x4a59, (1U<<31) | 10607, 
  0xe550, (1U<<31) | 7290, (1U<<31) | 7290, (1U<<31) | 7290, (1U<<31) | 7290, (1U<<31) | 7290, (1U<<31) | 7290, (1U<<31) | 7290, 
  0x42c2c2c, 0x2c2c44, 0x42c2c2c, 0x2c2c44, 0x555, 0xf0, 0x555, (1U<<31) | 12516, 
  0x555, 0x4444, (1U<<31) | 7273, 0x555, 0x555, 0x884, 0x884, 0x884, 
  0x884, (1U<<31) | 14158, (1U<<31) | 14237, 0x5, 0x4, 0x5, 0xe0, 0xe0, 
  0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0x4e0, 
  0xe0, 0x4e0, 0xe0, 0xe0, 0xe0, 0xe0, 0x555, 0x555, 
  (1U<<31) | 14154, 0x444, 0x444, 0x0, 0x84, 0x85, 0x88, 0x88, 
  0x88, 0x88, 0x88, 0x88, 0x88, 0x88, (1U<<31) | 14153, 0x8888, 
  0x7777, 0x88, 0x77, 0x8888, 0x7777, 0x7f7f7f2f, 0x88, 0x77, 
  0x88, 0x77, 0x8888, 0x7777, 0x5, 0x5, 0x5, 0x5, 
  0xe0, 0x588, 0x0, 0x0, 0x0, 0x0, 0xe4, 0xe4, 
  0xe5, 0x0, 0x5555, 0x5555, 0x5555, (1U<<31) | 13962, (1U<<31) | 13942, (1U<<31) | 13936, 
  0x4, 0x41f, 0x4, (1U<<31) | 13962, (1U<<31) | 13942, (1U<<31) | 13936, (1U<<31) | 918, (1U<<31) | 444, 
  (1U<<31) | 3851, (1U<<31) | 3849, (1U<<31) | 3849, (1U<<31) | 3849, (1U<<31) | 3849, (1U<<31) | 3851, (1U<<31) | 3849, (1U<<31) | 3849, 
  (1U<<31) | 3849, (1U<<31) | 3849, (1U<<31) | 3816, (1U<<31) | 3814, (1U<<31) | 3814, (1U<<31) | 3814, (1U<<31) | 3814, (1U<<31) | 3805, 
  (1U<<31) | 3803, (1U<<31) | 3803, (1U<<31) | 3803, (1U<<31) | 3803, (1U<<31) | 3851, (1U<<31) | 3849, (1U<<31) | 3851, (1U<<31) | 3849, 
  (1U<<31) | 3851, (1U<<31) | 3849, (1U<<31) | 3851, (1U<<31) | 3849, (1U<<31) | 3849, (1U<<31) | 904, (1U<<31) | 902, (1U<<31) | 902, 
  (1U<<31) | 902, (1U<<31) | 902, (1U<<31) | 904, (1U<<31) | 902, (1U<<31) | 902, (1U<<31) | 902, (1U<<31) | 902, (1U<<31) | 904, 
  (1U<<31) | 902, (1U<<31) | 902, (1U<<31) | 902, (1U<<31) | 902, (1U<<31) | 895, (1U<<31) | 893, (1U<<31) | 893, (1U<<31) | 893, 
  (1U<<31) | 893, (1U<<31) | 904, (1U<<31) | 902, (1U<<31) | 904, (1U<<31) | 902, (1U<<31) | 904, (1U<<31) | 902, (1U<<31) | 904, 
  (1U<<31) | 902, (1U<<31) | 902, (1U<<31) | 434, (1U<<31) | 434, (1U<<31) | 436, 0x40, 0x40, 0x840, 
  0x440, 0x40, 0x1f40, (1U<<31) | 14154, 0x555, 0x555, 0x444, 0x444, 
  (1U<<31) | 9483, 0x555, 0x555, 0x9f1f, 0x8, (1U<<31) | 14147, (1U<<31) | 7512, 0x50, 
  0x50, 0x50, 0x50, 0x555, 0x88, 0x48, (1U<<31) | 14155, 0x4e4, 
  0x5e4, 0x8e0, 0x4e4, 0xe40, 0xe40, 0xe50, 0x4e4, (1U<<31) | 14154, 
  0x0, 0x44, 0x4444, 0x4444, 0x4444, 0x4444, 0x44, 0x4, 
  0x4550, 0x44, 0x4, 0x42f4, 0x40, 0x50, 0x4, 0x44, 
  0x4, (1U<<31) | 14150, 0x44, 0x4, 0x5, 0x4440, (1U<<31) | 7126, (1U<<31) | 911, 
  (1U<<31) | 498, 0xe89, 0xe89, 0x5e4a, 0x5e4a, (1U<<31) | 10551, 0xe4a, 0xe4a, 
  0xe890, 0xe890, 0x5e4a0, 0x5e4a0, (1U<<31) | 10550, 0xe4a0, 0xe4a0, 0x888, 
  0x888, 0x898959, 0x898944, 0x7a7a4a, 0x7a7a44, 0x898959, 0x898944, 0x7a7a4a, 
  0x7a7a44, 0x898959, 0x898944, 0x7a7a4a, 0x7a7a44, 0x2c2c, 0x897a, 0x894a, 
  0x894a, 0x3b7a, 0x2c2c, 0x7a89, 0x7a7a, 0x7a59, 0x7a59, 0x597a, 
  0x4a89, 0x597a, 0x4a89, 0x898989, 0x7a7a7a, 0x595989, 0x4a4a7a, 0x898989, 
  0x7a7a7a, 0x898989, 0x7a7a7a, 0x8989, 0x8989, 0x7a7a, 0x7a7a, 0x8989, 
  0x7a7a, 0x89894, 0x7a7a4, 0x42c4, 0x894, 0x7a4, 0x48959, 0x47a4a, 
  0x8959, 0x7a4a, 0x8959, 0x7a4a, 0x2c2c2c2c, 0x59595959, 0x3b3b3b3b, 0x4a4a4a4a, 
  (1U<<31) | 5789, 0x45959, 0x42c2c, 0x45959, 0x43b3b, 0x44a4a, 0x4594a4a, 0x4a4a4a, 
  (1U<<31) | 2143, 0x7a7a, (1U<<31) | 4063, (1U<<31) | 4063, 0x7a7a7, 0x0, (1U<<31) | 9312, 0x70, 
  0x44a4a0, 0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 
  0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 0x4, 
  0x7f2f, 0x7f2f, 0x4447a0, 0x447a0, (1U<<31) | 4063, (1U<<31) | 4063, (1U<<31) | 4063, (1U<<31) | 4063, 
  (1U<<31) | 4036, (1U<<31) | 4063, (1U<<31) | 4063, (1U<<31) | 4036, 0x4444, 0x4444, 0x4444, 0x4444, 
  0x555, 0x555, 0x555, 0x555, 0x55, 0x455, 0x555, 0x7f0f, 
  0x7f7f0f, 0x7f7f0f, 0x7f7f0f, 0x4444f4, 0x5554f5, 0x44444f4, 0x55554f5, 0x44444f4, 
  0x55554f5, 0x4444f4, 0x5554f5, 0x4444f4, 0x5554f5, 0x4444f4, 0x5554f5, 0x4444f4, 
  0x5554f5, 0x4444f4, 0x5554f5, 0x44444f4, 0x55554f5, (1U<<31) | 139, (1U<<31) | 128, 0x7f0f, 
  (1U<<31) | 12859, (1U<<31) | 12849, (1U<<31) | 12881, (1U<<31) | 12869, (1U<<31) | 12907, (1U<<31) | 12893, (1U<<31) | 12937, (1U<<31) | 12921, 
  (1U<<31) | 12971, (1U<<31) | 12953, (1U<<31) | 13009, (1U<<31) | 12989, (1U<<31) | 13051, (1U<<31) | 13029, (1U<<31) | 13801, (1U<<31) | 13835, 
  (1U<<31) | 14106, (1U<<31) | 12493, (1U<<31) | 12493, (1U<<31) | 12493, (1U<<31) | 12493, (1U<<31) | 12493, (1U<<31) | 12493, (1U<<31) | 12493, 
  (1U<<31) | 12493, (1U<<31) | 12493, (1U<<31) | 12493, (1U<<31) | 12493, (1U<<31) | 12493, (1U<<31) | 12493, (1U<<31) | 12493, (1U<<31) | 12493, 
  (1U<<31) | 12493, (1U<<31) | 12493, (1U<<31) | 12493, (1U<<31) | 12493, (1U<<31) | 12493, (1U<<31) | 12493, (1U<<31) | 12493, (1U<<31) | 13801, 
  (1U<<31) | 13835, (1U<<31) | 14106, (1U<<31) | 13781, (1U<<31) | 13781, (1U<<31) | 13768, (1U<<31) | 13768, (1U<<31) | 14093, (1U<<31) | 14093, 
  (1U<<31) | 13824, (1U<<31) | 13824, (1U<<31) | 13781, (1U<<31) | 13781, (1U<<31) | 13768, (1U<<31) | 13768, (1U<<31) | 14093, (1U<<31) | 14093, 
  (1U<<31) | 13781, (1U<<31) | 13781, (1U<<31) | 13768, (1U<<31) | 13768, (1U<<31) | 14093, (1U<<31) | 14093, (1U<<31) | 13824, (1U<<31) | 13824, 
  (1U<<31) | 13781, (1U<<31) | 13781, (1U<<31) | 13768, (1U<<31) | 13768, (1U<<31) | 14093, (1U<<31) | 14093, (1U<<31) | 13801, (1U<<31) | 13835, 
  (1U<<31) | 14106, (1U<<31) | 12493, (1U<<31) | 12493, (1U<<31) | 12493, (1U<<31) | 12493, (1U<<31) | 12493, (1U<<31) | 12493, (1U<<31) | 12493, 
  (1U<<31) | 12493, (1U<<31) | 12493, (1U<<31) | 12493, (1U<<31) | 12493, (1U<<31) | 12493, (1U<<31) | 12493, (1U<<31) | 12493, (1U<<31) | 12493, 
  (1U<<31) | 12493, (1U<<31) | 12493, (1U<<31) | 12493, (1U<<31) | 12493, (1U<<31) | 12493, (1U<<31) | 12493, (1U<<31) | 12493, (1U<<31) | 13801, 
  (1U<<31) | 13835, (1U<<31) | 14106, 0x44, 0x44, 0x44, 0x44, 0x55, 0x444, 
  0x444, 0x55, 0x444, 0x444, 0x55, 0x444, 0x55, 0x444, 
  0x44, 0x44, 0x4444, 0x4444, (1U<<31) | 14029, (1U<<31) | 14000, (1U<<31) | 14029, (1U<<31) | 14000, 
  (1U<<31) | 14029, (1U<<31) | 14000, (1U<<31) | 14029, (1U<<31) | 14000, 0x7f0f, (1U<<31) | 13677, (1U<<31) | 13719, (1U<<31) | 13677, 
  (1U<<31) | 13719, (1U<<31) | 12285, (1U<<31) | 12505, (1U<<31) | 13352, (1U<<31) | 12505, (1U<<31) | 13352, (1U<<31) | 13677, (1U<<31) | 13719, 
  (1U<<31) | 13677, (1U<<31) | 13719, (1U<<31) | 9594, 0x9f3fff, (1U<<31) | 9770, (1U<<31) | 12505, (1U<<31) | 13352, (1U<<31) | 12505, 
  (1U<<31) | 13352, (1U<<31) | 13677, (1U<<31) | 13719, (1U<<31) | 9761, (1U<<31) | 10890, (1U<<31) | 13690, (1U<<31) | 13737, (1U<<31) | 13690, 
  (1U<<31) | 13737, (1U<<31) | 12532, (1U<<31) | 13368, (1U<<31) | 12532, (1U<<31) | 13368, (1U<<31) | 13690, (1U<<31) | 13737, (1U<<31) | 13690, 
  (1U<<31) | 13737, (1U<<31) | 13677, (1U<<31) | 13719, 0x9f3fff, (1U<<31) | 9770, (1U<<31) | 13753, (1U<<31) | 13701, (1U<<31) | 13753, 
  (1U<<31) | 13701, (1U<<31) | 12505, (1U<<31) | 13352, (1U<<31) | 12285, (1U<<31) | 12505, (1U<<31) | 13352, (1U<<31) | 13753, (1U<<31) | 13701, 
  (1U<<31) | 13753, (1U<<31) | 13701, (1U<<31) | 13677, (1U<<31) | 13719, (1U<<31) | 988, (1U<<31) | 9808, (1U<<31) | 9808, (1U<<31) | 13690, 
  (1U<<31) | 13737, (1U<<31) | 13690, (1U<<31) | 13737, (1U<<31) | 13690, (1U<<31) | 13737, (1U<<31) | 12532, (1U<<31) | 13368, (1U<<31) | 12532, 
  (1U<<31) | 13368, (1U<<31) | 12532, (1U<<31) | 13368, (1U<<31) | 13690, (1U<<31) | 13737, (1U<<31) | 13690, (1U<<31) | 13737, (1U<<31) | 13753, 
  (1U<<31) | 13701, (1U<<31) | 13753, (1U<<31) | 13701, (1U<<31) | 13753, (1U<<31) | 13701, (1U<<31) | 13753, (1U<<31) | 13701, (1U<<31) | 13677, 
  (1U<<31) | 13719, (1U<<31) | 11169, (1U<<31) | 12201, (1U<<31) | 12465, (1U<<31) | 12451, (1U<<31) | 12465, (1U<<31) | 12451, (1U<<31) | 13664, 
  (1U<<31) | 13621, (1U<<31) | 13664, (1U<<31) | 13621, (1U<<31) | 9779, (1U<<31) | 10916, (1U<<31) | 13677, (1U<<31) | 13719, (1U<<31) | 12505, 
  (1U<<31) | 13352, (1U<<31) | 12505, (1U<<31) | 13352, (1U<<31) | 12505, (1U<<31) | 13352, (1U<<31) | 12505, (1U<<31) | 13352, (1U<<31) | 12505, 
  (1U<<31) | 13352, (1U<<31) | 11169, (1U<<31) | 12201, (1U<<31) | 13677, (1U<<31) | 13719, (1U<<31) | 14016, (1U<<31) | 14042, (1U<<31) | 13677, 
  (1U<<31) | 13719, (1U<<31) | 12532, (1U<<31) | 13368, (1U<<31) | 12532, (1U<<31) | 13368, (1U<<31) | 12532, (1U<<31) | 13368, (1U<<31) | 12532, 
  (1U<<31) | 13368, (1U<<31) | 12532, (1U<<31) | 13368, (1U<<31) | 13690, (1U<<31) | 13737, (1U<<31) | 13690, (1U<<31) | 13737, (1U<<31) | 14078, 
  (1U<<31) | 14060, (1U<<31) | 14078, (1U<<31) | 14060, (1U<<31) | 14016, (1U<<31) | 14042, (1U<<31) | 14078, (1U<<31) | 14060, (1U<<31) | 14078, 
  (1U<<31) | 14060, (1U<<31) | 13664, (1U<<31) | 13621, (1U<<31) | 13664, (1U<<31) | 13621, (1U<<31) | 14016, (1U<<31) | 14042, (1U<<31) | 13677, 
  (1U<<31) | 13719, 0x9f7f3f, (1U<<31) | 10904, (1U<<31) | 9584, (1U<<31) | 10858, 0x9fe7f3f, (1U<<31) | 10946, (1U<<31) | 11695, 
  (1U<<31) | 12185, 0x9fe3f, (1U<<31) | 12542, (1U<<31) | 13382, (1U<<31) | 12580, (1U<<31) | 13397, (1U<<31) | 12631, (1U<<31) | 13417, 
  (1U<<31) | 12694, (1U<<31) | 13441, (1U<<31) | 12717, (1U<<31) | 13469, (1U<<31) | 12744, (1U<<31) | 13501, (1U<<31) | 12775, (1U<<31) | 13537, 
  (1U<<31) | 12810, (1U<<31) | 13577, (1U<<31) | 11190, (1U<<31) | 11722, (1U<<31) | 9861, (1U<<31) | 10959, (1U<<31) | 11680, (1U<<31) | 12165, 
  (1U<<31) | 9906, (1U<<31) | 10977, (1U<<31) | 11661, (1U<<31) | 12141, (1U<<31) | 9963, (1U<<31) | 10999, (1U<<31) | 11638, (1U<<31) | 12113, 
  (1U<<31) | 9984, (1U<<31) | 11025, (1U<<31) | 11611, (1U<<31) | 12081, (1U<<31) | 10009, (1U<<31) | 11055, (1U<<31) | 11580, (1U<<31) | 12045, 
  (1U<<31) | 10038, (1U<<31) | 11089, (1U<<31) | 11545, (1U<<31) | 12005, (1U<<31) | 10071, (1U<<31) | 11127, (1U<<31) | 11506, (1U<<31) | 11961, 
  (1U<<31) | 11228, (1U<<31) | 11737, (1U<<31) | 11279, (1U<<31) | 11757, (1U<<31) | 11342, (1U<<31) | 11781, (1U<<31) | 11365, (1U<<31) | 11809, 
  (1U<<31) | 11392, (1U<<31) | 11841, (1U<<31) | 11423, (1U<<31) | 11877, (1U<<31) | 11458, (1U<<31) | 11917, (1U<<31) | 12542, (1U<<31) | 13382, 
  (1U<<31) | 12580, (1U<<31) | 13397, (1U<<31) | 12631, (1U<<31) | 13417, (1U<<31) | 12694, (1U<<31) | 13441, (1U<<31) | 12717, (1U<<31) | 13469, 
  (1U<<31) | 12744, (1U<<31) | 13501, (1U<<31) | 12775, (1U<<31) | 13537, (1U<<31) | 12810, (1U<<31) | 13577, (1U<<31) | 13651, (1U<<31) | 13336, 
  (1U<<31) | 12483, (1U<<31) | 12272, (1U<<31) | 13651, (1U<<31) | 13336, (1U<<31) | 9779, (1U<<31) | 9779, (1U<<31) | 12505, (1U<<31) | 13352, 
  (1U<<31) | 12505, (1U<<31) | 13352, 0x9f3f, (1U<<31) | 12285, (1U<<31) | 12483, (1U<<31) | 12269, (1U<<31) | 12483, (1U<<31) | 12269, 
  (1U<<31) | 12483, (1U<<31) | 12269, (1U<<31) | 12483, (1U<<31) | 12269, (1U<<31) | 12483, (1U<<31) | 12269, (1U<<31) | 12483, (1U<<31) | 12269, 
  (1U<<31) | 12505, (1U<<31) | 13352, (1U<<31) | 12505, (1U<<31) | 13352, (1U<<31) | 9779, (1U<<31) | 9779, (1U<<31) | 9779, (1U<<31) | 9779, 
  (1U<<31) | 12483, (1U<<31) | 12272, 0x9f7f3f, (1U<<31) | 9788, (1U<<31) | 12483, (1U<<31) | 12269, 0x9f3f, (1U<<31) | 12483, 
  (1U<<31) | 12269, (1U<<31) | 12483, (1U<<31) | 12269, (1U<<31) | 12483, (1U<<31) | 12269, (1U<<31) | 12483, (1U<<31) | 12269, 0x9f7f3f, 
  (1U<<31) | 9788, (1U<<31) | 12483, (1U<<31) | 12269, (1U<<31) | 12483, (1U<<31) | 12269, (1U<<31) | 12483, (1U<<31) | 12269, (1U<<31) | 12483, 
  (1U<<31) | 12269, (1U<<31) | 12483, (1U<<31) | 12269, 0x9f7f3f, (1U<<31) | 9788, (1U<<31) | 12505, (1U<<31) | 13352, (1U<<31) | 12505, 
  (1U<<31) | 13352, (1U<<31) | 12505, (1U<<31) | 13352, (1U<<31) | 12505, (1U<<31) | 13352, (1U<<31) | 9799, (1U<<31) | 9779, (1U<<31) | 9799, 
  (1U<<31) | 365, (1U<<31) | 9779, (1U<<31) | 9779, (1U<<31) | 14016, (1U<<31) | 14042, (1U<<31) | 14016, (1U<<31) | 14042, (1U<<31) | 13651, 
  (1U<<31) | 13336, (1U<<31) | 13651, (1U<<31) | 13336, (1U<<31) | 13813, (1U<<31) | 13968, (1U<<31) | 13813, (1U<<31) | 13968, (1U<<31) | 12505, 
  (1U<<31) | 13352, (1U<<31) | 12465, (1U<<31) | 12451, (1U<<31) | 12465, (1U<<31) | 12451, (1U<<31) | 12465, (1U<<31) | 12451, (1U<<31) | 12465, 
  (1U<<31) | 12451, (1U<<31) | 12465, (1U<<31) | 12451, (1U<<31) | 12465, (1U<<31) | 12451, (1U<<31) | 12465, (1U<<31) | 12451, (1U<<31) | 12465, 
  (1U<<31) | 12451, (1U<<31) | 12505, (1U<<31) | 13352, (1U<<31) | 12505, (1U<<31) | 13352, (1U<<31) | 9817, (1U<<31) | 10930, (1U<<31) | 11169, 
  (1U<<31) | 11706, (1U<<31) | 9749, (1U<<31) | 10873, (1U<<31) | 12505, (1U<<31) | 13352, (1U<<31) | 12505, (1U<<31) | 13352, (1U<<31) | 12505, 
  (1U<<31) | 13352, (1U<<31) | 12285, 0x9fe3f0, (1U<<31) | 9606, 0x7f7f7f1f, 0x7f7f1f, (1U<<31) | 12532, (1U<<31) | 13637, 
  (1U<<31) | 12505, (1U<<31) | 13352, (1U<<31) | 12505, (1U<<31) | 13352, (1U<<31) | 12217, (1U<<31) | 11706, (1U<<31) | 12217, (1U<<31) | 11706, 
  (1U<<31) | 12505, (1U<<31) | 13352, 0x9fe3f0, (1U<<31) | 13677, (1U<<31) | 13719, (1U<<31) | 12523, (1U<<31) | 12299, (1U<<31) | 12541, 
  (1U<<31) | 12311, (1U<<31) | 12552, (1U<<31) | 12325, (1U<<31) | 12565, (1U<<31) | 12341, (1U<<31) | 12595, (1U<<31) | 12359, (1U<<31) | 12612, 
  (1U<<31) | 12379, (1U<<31) | 12650, (1U<<31) | 12401, (1U<<31) | 12671, (1U<<31) | 12425, (1U<<31) | 12505, (1U<<31) | 13352, (1U<<31) | 12505, 
  (1U<<31) | 13352, (1U<<31) | 11180, (1U<<31) | 10683, (1U<<31) | 9828, (1U<<31) | 9616, (1U<<31) | 9837, (1U<<31) | 9628, (1U<<31) | 9848, 
  (1U<<31) | 9642, (1U<<31) | 9874, (1U<<31) | 9658, (1U<<31) | 9889, (1U<<31) | 9676, (1U<<31) | 9923, (1U<<31) | 9696, (1U<<31) | 9942, 
  (1U<<31) | 9718, (1U<<31) | 13677, (1U<<31) | 13719, (1U<<31) | 13677, (1U<<31) | 13719, (1U<<31) | 11189, (1U<<31) | 10695, (1U<<31) | 11200, 
  (1U<<31) | 10709, (1U<<31) | 11213, (1U<<31) | 10725, (1U<<31) | 11243, (1U<<31) | 10743, (1U<<31) | 11260, (1U<<31) | 10763, (1U<<31) | 11298, 
  (1U<<31) | 10785, (1U<<31) | 11319, (1U<<31) | 10809, (1U<<31) | 12505, (1U<<31) | 13352, (1U<<31) | 12505, (1U<<31) | 13352, (1U<<31) | 12505, 
  (1U<<31) | 13352, (1U<<31) | 12523, (1U<<31) | 12299, (1U<<31) | 12541, (1U<<31) | 12311, (1U<<31) | 12552, (1U<<31) | 12325, (1U<<31) | 12565, 
  (1U<<31) | 12341, (1U<<31) | 12595, (1U<<31) | 12359, (1U<<31) | 12612, (1U<<31) | 12379, (1U<<31) | 12650, (1U<<31) | 12401, (1U<<31) | 12671, 
  (1U<<31) | 12425, (1U<<31) | 13813, (1U<<31) | 13968, (1U<<31) | 12505, (1U<<31) | 13352, (1U<<31) | 13813, (1U<<31) | 13968, (1U<<31) | 12505, 
  (1U<<31) | 13352, (1U<<31) | 14029, (1U<<31) | 13984, (1U<<31) | 14029, (1U<<31) | 13984, (1U<<31) | 14029, (1U<<31) | 13984, (1U<<31) | 14029, 
  (1U<<31) | 13984, (1U<<31) | 13813, (1U<<31) | 13968, (1U<<31) | 13813, (1U<<31) | 13968, (1U<<31) | 13813, (1U<<31) | 13968, (1U<<31) | 12465, 
  (1U<<31) | 12451, (1U<<31) | 12465, (1U<<31) | 12451, (1U<<31) | 13813, (1U<<31) | 13968, (1U<<31) | 12505, (1U<<31) | 13352, (1U<<31) | 13813, 
  (1U<<31) | 13968, (1U<<31) | 12505, (1U<<31) | 13352, (1U<<31) | 12505, (1U<<31) | 13352, (1U<<31) | 12532, (1U<<31) | 13637, 0x7f7f0f, 
  0x7f7f0f, 0x7f0f, 0x4, 0x4, 0x4e4, 0xe50, 0x40, 0x40, 
  0x50, 0x4e4, 0x4e4, 0x4e0, 0x52f4, 0x4, 0x2c2c2c, 0x2c2c2c2c, 
  0x4a4a4a, 0x595959, 0x3b3b3b, 0x2c2c2c, 0x2c2c2c2c, 0x2c2c2c, 0x2c2c2c, 0x4a4a4a, 
  0x595959, 0x3b3b3b, 0x2c2c2c, 0x4a4a4a, 0x595959, 0x3b3b3b, 0x2c2c59, (1U<<31) | 940, 
  (1U<<31) | 6598, (1U<<31) | 7356, (1U<<31) | 1068, 0x43b3b, (1U<<31) | 940, (1U<<31) | 6598, (1U<<31) | 7356, (1U<<31) | 1068, 
  (1U<<31) | 940, (1U<<31) | 6598, (1U<<31) | 7356, (1U<<31) | 1068, 0x4a4a4a, 0x43b7a, 0x43b7a, 0x43b3b, 
  0x47a7a3b, (1U<<31) | 2143, (1U<<31) | 5341, (1U<<31) | 5789, (1U<<31) | 2255, 0x42c2c, 0x44a4a, 0x45959, 
  0x43b3b, 0x2c2c2c, 0x4a4a4a, 0x595959, 0x3b3b3b, 0x42c2c2c, (1U<<31) | 2165, 0x44a4a4a, 
  (1U<<31) | 5319, 0x43b3b3b, (1U<<31) | 2277, 0x42c2c2c, (1U<<31) | 2165, 0x44a4a4a, (1U<<31) | 5319, 0x43b3b3b, 
  (1U<<31) | 2277, (1U<<31) | 9496, (1U<<31) | 8592, (1U<<31) | 9496, (1U<<31) | 9496, (1U<<31) | 8592, (1U<<31) | 8592, 0x2c2c2c, 
  (1U<<31) | 940, 0x4a4a4a, (1U<<31) | 6598, 0x3b3b3b, (1U<<31) | 1068, 0x2c2c2c, (1U<<31) | 940, 0x4a4a4a, 
  (1U<<31) | 6598, 0x3b3b3b, (1U<<31) | 1068, 0x2c2c2c, (1U<<31) | 940, 0x4a4a4a, (1U<<31) | 6598, 0x3b3b3b, 
  (1U<<31) | 1068, 0x2c2c2c, (1U<<31) | 940, 0x4a4a4a, (1U<<31) | 6598, 0x3b3b3b, (1U<<31) | 1068, 0x448989, 
  0x447a7a, 0x4898989, 0x47a7a7a, 0x4898989, 0x47a7a7a, (1U<<31) | 6149, (1U<<31) | 5904, 0x3b2c2c3b, 
  0x594a4a59, 0x2c59592c, 0x4a3b3b4a, 0x2c2c3b, 0x4a4a59, 0x59592c, 0x3b3b4a, 0x2c2c, 
  (1U<<31) | 960, 0x4a4a, (1U<<31) | 6572, 0x3b3b, (1U<<31) | 1077, 0x4e2c, 0xe42c, 0xe42c, 
  0x3b2c2c3b, 0x594a4a59, 0x4a3b3b4a, 0x2c2c2c2c, 0x4a4a4a4a, 0x3b3b3b3b, 0x3b2c2c3b, 0x594a4a59, 
  0x4a3b3b4a, 0x2c2c2c2c, 0x4a4a4a4a, 0x3b3b3b3b, 0x3b2c2c3b, 0x594a4a59, 0x4a3b3b4a, 0x3b2c2c3b, 
  0x594a4a59, 0x4a3b3b4a, 0x2c2c3b, 0x4a4a59, 0x3b3b4a, 0x2c2c2c, 0x4a4a4a, 0x3b3b3b, 
  0x2c2c3b, 0x4a4a59, 0x3b3b4a, 0x2c2c2c, 0x4a4a4a, 0x3b3b3b, 0x2c2c3b, 0x4a4a59, 
  0x3b3b4a, 0x2c2c3b, 0x4a4a59, 0x3b3b4a, (1U<<31) | 2175, 0x4595959, 0x2c2c2c2c, 0x4a4a3b, 
  (1U<<31) | 6589, 0x59594a, (1U<<31) | 7325, 0x3b3b2c, (1U<<31) | 1059, 0x4a4a3b, (1U<<31) | 6589, 0x59594a, 
  (1U<<31) | 7325, 0x3b3b2c, (1U<<31) | 1059, 0x2c2c2c2c, 0x2c2c2c2c, 0x2c2c2c, 0x4a4a4a, 0x595959, 
  0x3b3b3b, 0x2c2c2c, 0x2c2c2c, 0x2c2c2c, 0x42c2c2c, 0x42c2c2c, 0x2c2c2c, 0x2c2c2c, 
  0x2c2c2c, 0x42c2c2c, 0x2c2c2c, 0x2c2c2c, 0xe42c0, (1U<<31) | 2143, (1U<<31) | 2153, (1U<<31) | 5341, 
  (1U<<31) | 5329, (1U<<31) | 2255, (1U<<31) | 2265, (1U<<31) | 2143, (1U<<31) | 2153, (1U<<31) | 5341, (1U<<31) | 5329, (1U<<31) | 2255, 
  (1U<<31) | 2265, 0xe42c0, (1U<<31) | 929, (1U<<31) | 967, (1U<<31) | 949, (1U<<31) | 929, (1U<<31) | 967, (1U<<31) | 949, 
  0x2c2c4a, 0x4a4a59, 0x3b3b59, 0x3b3b4a, 0x4a4a2c, 0x59592c, 0x2c2c4, 0x2c3b, 
  0x4a59, 0x3b4a, 0x2c3b, 0x4a59, 0x2c3b, 0x4a59, 0x3b4a, 0x3b4a, 
  0x2c3b, 0x4a59, 0x3b4a, 0xf, (1U<<31) | 13871, (1U<<31) | 13098, 0x8f0f, (1U<<31) | 13877, 
  (1U<<31) | 13893, (1U<<31) | 12476, (1U<<31) | 13891, (1U<<31) | 13948, 0x8f0f0, (1U<<31) | 13792, (1U<<31) | 13876, 0x234f4, 
  0x23cf0f0, (1U<<31) | 13871, (1U<<31) | 13128, 0x4, 0x0, 0xf0, (1U<<31) | 432, (1U<<31) | 491, 
  (1U<<31) | 432, (1U<<31) | 491, (1U<<31) | 439, (1U<<31) | 439, 0x40, 0x0, 0x40, 0x455, 
  (1U<<31) | 484, (1U<<31) | 484, 0x555, (1U<<31) | 7205, (1U<<31) | 7230, (1U<<31) | 7237, (1U<<31) | 6472, (1U<<31) | 6443, 
  (1U<<31) | 6463, (1U<<31) | 1688, (1U<<31) | 434, (1U<<31) | 493, (1U<<31) | 432, (1U<<31) | 491, (1U<<31) | 432, (1U<<31) | 491, 
  0xe5, 0xee5, (1U<<31) | 1688, 0x4e50, 0x4e50, (1U<<31) | 6251, (1U<<31) | 6452, (1U<<31) | 6488, 
  (1U<<31) | 6277, (1U<<31) | 6522, (1U<<31) | 6554, (1U<<31) | 6251, (1U<<31) | 6452, (1U<<31) | 6488, (1U<<31) | 6277, (1U<<31) | 6522, 
  (1U<<31) | 6554, (1U<<31) | 6251, (1U<<31) | 6452, (1U<<31) | 6488, (1U<<31) | 6277, (1U<<31) | 6522, (1U<<31) | 6554, (1U<<31) | 6229, 
  (1U<<31) | 5764, (1U<<31) | 6452, (1U<<31) | 6265, (1U<<31) | 6480, (1U<<31) | 6522, (1U<<31) | 6480, (1U<<31) | 6392, (1U<<31) | 6522, 
  (1U<<31) | 6480, (1U<<31) | 6392, (1U<<31) | 6522, (1U<<31) | 6251, (1U<<31) | 6452, (1U<<31) | 6488, (1U<<31) | 6277, (1U<<31) | 6522, 
  (1U<<31) | 6554, (1U<<31) | 6251, (1U<<31) | 6452, (1U<<31) | 6488, (1U<<31) | 6277, (1U<<31) | 6522, (1U<<31) | 6554, (1U<<31) | 6480, 
  (1U<<31) | 6522, (1U<<31) | 6265, (1U<<31) | 6480, (1U<<31) | 6522, (1U<<31) | 6265, (1U<<31) | 6480, (1U<<31) | 6522, (1U<<31) | 6251, 
  (1U<<31) | 6452, (1U<<31) | 6488, (1U<<31) | 6277, (1U<<31) | 6522, (1U<<31) | 6554, (1U<<31) | 6251, (1U<<31) | 6452, (1U<<31) | 6488, 
  (1U<<31) | 6277, (1U<<31) | 6522, (1U<<31) | 6554, (1U<<31) | 6251, (1U<<31) | 6452, (1U<<31) | 6488, (1U<<31) | 6277, (1U<<31) | 6522, 
  (1U<<31) | 6554, (1U<<31) | 6262, (1U<<31) | 6488, (1U<<31) | 6519, (1U<<31) | 6249, (1U<<31) | 6450, (1U<<31) | 6486, (1U<<31) | 6275, 
  (1U<<31) | 6554, (1U<<31) | 6552, (1U<<31) | 6251, (1U<<31) | 6452, (1U<<31) | 6488, (1U<<31) | 6277, (1U<<31) | 6522, (1U<<31) | 6554, 
  (1U<<31) | 6251, (1U<<31) | 6452, (1U<<31) | 6488, (1U<<31) | 6277, (1U<<31) | 6522, (1U<<31) | 6554, (1U<<31) | 1651, (1U<<31) | 1651, 
  (1U<<31) | 1647, (1U<<31) | 6232, (1U<<31) | 1647, (1U<<31) | 6232, (1U<<31) | 1647, (1U<<31) | 6232, (1U<<31) | 1647, (1U<<31) | 6232, 
  (1U<<31) | 1647, (1U<<31) | 6232, (1U<<31) | 1647, (1U<<31) | 6232, (1U<<31) | 1647, (1U<<31) | 6232, (1U<<31) | 1647, (1U<<31) | 6232, 
  (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, 
  (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, 
  (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, 
  (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 1647, (1U<<31) | 6232, (1U<<31) | 1647, (1U<<31) | 6232, 
  (1U<<31) | 1647, (1U<<31) | 6232, (1U<<31) | 1647, (1U<<31) | 6232, (1U<<31) | 1647, (1U<<31) | 6232, (1U<<31) | 1647, (1U<<31) | 6232, 
  (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, 
  (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, 
  (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, 
  (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 1647, (1U<<31) | 6232, (1U<<31) | 1647, (1U<<31) | 6232, 
  (1U<<31) | 1647, (1U<<31) | 6232, (1U<<31) | 1647, (1U<<31) | 6232, (1U<<31) | 1647, (1U<<31) | 6232, (1U<<31) | 1647, (1U<<31) | 6232, 
  (1U<<31) | 1647, (1U<<31) | 6232, (1U<<31) | 1647, (1U<<31) | 6232, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, 
  (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, 
  (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, 
  (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, 
  (1U<<31) | 1647, (1U<<31) | 6232, (1U<<31) | 1647, (1U<<31) | 6232, (1U<<31) | 1647, (1U<<31) | 6232, (1U<<31) | 1647, (1U<<31) | 6232, 
  (1U<<31) | 1647, (1U<<31) | 6232, (1U<<31) | 1647, (1U<<31) | 6232, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, 
  (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, 
  (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, 
  (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, 
  (1U<<31) | 6262, (1U<<31) | 6488, (1U<<31) | 6519, (1U<<31) | 6249, (1U<<31) | 6450, (1U<<31) | 6486, (1U<<31) | 6275, (1U<<31) | 6554, 
  (1U<<31) | 6552, (1U<<31) | 6251, (1U<<31) | 6452, (1U<<31) | 6488, (1U<<31) | 6277, (1U<<31) | 6522, (1U<<31) | 6554, (1U<<31) | 6262, 
  (1U<<31) | 6488, (1U<<31) | 6519, (1U<<31) | 6249, (1U<<31) | 6450, (1U<<31) | 6486, (1U<<31) | 6275, (1U<<31) | 6554, (1U<<31) | 6552, 
  (1U<<31) | 6262, (1U<<31) | 6488, (1U<<31) | 6519, (1U<<31) | 6249, (1U<<31) | 6450, (1U<<31) | 6486, (1U<<31) | 6275, (1U<<31) | 6554, 
  (1U<<31) | 6552, (1U<<31) | 6251, (1U<<31) | 6452, (1U<<31) | 6488, (1U<<31) | 6277, (1U<<31) | 6522, (1U<<31) | 6554, (1U<<31) | 6265, 
  (1U<<31) | 6480, (1U<<31) | 6522, (1U<<31) | 6480, (1U<<31) | 6392, (1U<<31) | 6522, (1U<<31) | 6480, (1U<<31) | 6392, (1U<<31) | 6522, 
  (1U<<31) | 6251, (1U<<31) | 6452, (1U<<31) | 6488, (1U<<31) | 6277, (1U<<31) | 6522, (1U<<31) | 6554, (1U<<31) | 6251, (1U<<31) | 6452, 
  (1U<<31) | 6488, (1U<<31) | 6277, (1U<<31) | 6522, (1U<<31) | 6554, (1U<<31) | 6251, (1U<<31) | 6452, (1U<<31) | 6488, (1U<<31) | 6277, 
  (1U<<31) | 6522, (1U<<31) | 6554, (1U<<31) | 6265, (1U<<31) | 6480, (1U<<31) | 6522, (1U<<31) | 6480, (1U<<31) | 6392, (1U<<31) | 6522, 
  (1U<<31) | 6480, (1U<<31) | 6392, (1U<<31) | 6522, (1U<<31) | 6480, (1U<<31) | 6522, (1U<<31) | 6480, (1U<<31) | 6522, (1U<<31) | 6480, 
  (1U<<31) | 6522, (1U<<31) | 6234, (1U<<31) | 6480, (1U<<31) | 6234, (1U<<31) | 6480, (1U<<31) | 6234, (1U<<31) | 6480, (1U<<31) | 6227, 
  (1U<<31) | 5762, (1U<<31) | 6450, (1U<<31) | 6277, (1U<<31) | 6522, (1U<<31) | 6554, (1U<<31) | 6227, (1U<<31) | 5762, (1U<<31) | 6450, 
  (1U<<31) | 6277, (1U<<31) | 6522, (1U<<31) | 6554, (1U<<31) | 6227, (1U<<31) | 5762, (1U<<31) | 6450, (1U<<31) | 6277, (1U<<31) | 6522, 
  (1U<<31) | 6554, (1U<<31) | 6227, (1U<<31) | 5762, (1U<<31) | 6450, (1U<<31) | 6277, (1U<<31) | 6522, (1U<<31) | 6554, (1U<<31) | 6251, 
  (1U<<31) | 6452, (1U<<31) | 6488, (1U<<31) | 6277, (1U<<31) | 6522, (1U<<31) | 6554, (1U<<31) | 6251, (1U<<31) | 6452, (1U<<31) | 6488, 
  (1U<<31) | 6277, (1U<<31) | 6522, (1U<<31) | 6554, (1U<<31) | 6251, (1U<<31) | 6452, (1U<<31) | 6488, (1U<<31) | 6277, (1U<<31) | 6522, 
  (1U<<31) | 6554, 0x5550, (1U<<31) | 7173, (1U<<31) | 7178, 0x0, (1U<<31) | 1688, 0x5555, (1U<<31) | 6452, 
  (1U<<31) | 6352, (1U<<31) | 6488, (1U<<31) | 6522, (1U<<31) | 6417, (1U<<31) | 6554, (1U<<31) | 6431, (1U<<31) | 6341, (1U<<31) | 6477, 
  (1U<<31) | 6522, (1U<<31) | 6417, (1U<<31) | 6554, (1U<<31) | 6431, (1U<<31) | 6341, (1U<<31) | 6477, (1U<<31) | 6522, (1U<<31) | 6417, 
  (1U<<31) | 6554, (1U<<31) | 6452, (1U<<31) | 6352, (1U<<31) | 6488, (1U<<31) | 6522, (1U<<31) | 6417, (1U<<31) | 6554, (1U<<31) | 6431, 
  (1U<<31) | 6341, (1U<<31) | 6477, (1U<<31) | 6522, (1U<<31) | 6417, (1U<<31) | 6554, (1U<<31) | 6452, (1U<<31) | 6352, (1U<<31) | 6488, 
  (1U<<31) | 6522, (1U<<31) | 6417, (1U<<31) | 6554, (1U<<31) | 6144, (1U<<31) | 6332, (1U<<31) | 6470, (1U<<31) | 5764, (1U<<31) | 6314, 
  (1U<<31) | 6452, (1U<<31) | 5899, (1U<<31) | 6323, (1U<<31) | 6461, (1U<<31) | 5295, (1U<<31) | 6291, (1U<<31) | 6431, (1U<<31) | 6480, 
  (1U<<31) | 6392, (1U<<31) | 6522, (1U<<31) | 6452, (1U<<31) | 6352, (1U<<31) | 6488, (1U<<31) | 6522, (1U<<31) | 6417, (1U<<31) | 6554, 
  (1U<<31) | 6431, (1U<<31) | 6341, (1U<<31) | 6477, (1U<<31) | 6522, (1U<<31) | 6417, (1U<<31) | 6554, (1U<<31) | 6431, (1U<<31) | 6341, 
  (1U<<31) | 6477, (1U<<31) | 6522, (1U<<31) | 6417, (1U<<31) | 6554, (1U<<31) | 6452, (1U<<31) | 6352, (1U<<31) | 6488, (1U<<31) | 6522, 
  (1U<<31) | 6417, (1U<<31) | 6554, (1U<<31) | 6431, (1U<<31) | 6341, (1U<<31) | 6477, (1U<<31) | 6522, (1U<<31) | 6417, (1U<<31) | 6554, 
  (1U<<31) | 6392, (1U<<31) | 6480, (1U<<31) | 6522, (1U<<31) | 6480, (1U<<31) | 6522, (1U<<31) | 6480, (1U<<31) | 6522, (1U<<31) | 6480, 
  (1U<<31) | 6392, (1U<<31) | 6522, (1U<<31) | 6480, (1U<<31) | 6392, (1U<<31) | 6522, (1U<<31) | 6480, (1U<<31) | 6522, (1U<<31) | 6480, 
  (1U<<31) | 6522, (1U<<31) | 6480, (1U<<31) | 6392, (1U<<31) | 6522, (1U<<31) | 6480, (1U<<31) | 6392, (1U<<31) | 6522, (1U<<31) | 6480, 
  (1U<<31) | 6392, (1U<<31) | 6522, (1U<<31) | 6480, (1U<<31) | 6392, (1U<<31) | 6522, (1U<<31) | 6480, (1U<<31) | 6392, (1U<<31) | 6522, 
  (1U<<31) | 6480, (1U<<31) | 6392, (1U<<31) | 6522, (1U<<31) | 6480, (1U<<31) | 6392, (1U<<31) | 6522, (1U<<31) | 6480, (1U<<31) | 6392, 
  (1U<<31) | 6522, (1U<<31) | 6452, (1U<<31) | 6352, (1U<<31) | 6488, (1U<<31) | 5762, (1U<<31) | 6312, (1U<<31) | 6450, (1U<<31) | 6522, 
  (1U<<31) | 6417, (1U<<31) | 6554, (1U<<31) | 6431, (1U<<31) | 6341, (1U<<31) | 6477, (1U<<31) | 5293, (1U<<31) | 6289, (1U<<31) | 6429, 
  (1U<<31) | 6522, (1U<<31) | 6417, (1U<<31) | 6554, (1U<<31) | 6431, (1U<<31) | 6341, (1U<<31) | 6477, (1U<<31) | 5293, (1U<<31) | 6289, 
  (1U<<31) | 6429, (1U<<31) | 6522, (1U<<31) | 6417, (1U<<31) | 6554, (1U<<31) | 6452, (1U<<31) | 6352, (1U<<31) | 6488, (1U<<31) | 5762, 
  (1U<<31) | 6312, (1U<<31) | 6450, (1U<<31) | 6522, (1U<<31) | 6417, (1U<<31) | 6554, (1U<<31) | 6431, (1U<<31) | 6341, (1U<<31) | 6477, 
  (1U<<31) | 5293, (1U<<31) | 6289, (1U<<31) | 6429, (1U<<31) | 6522, (1U<<31) | 6417, (1U<<31) | 6554, (1U<<31) | 6452, (1U<<31) | 6352, 
  (1U<<31) | 6488, (1U<<31) | 6522, (1U<<31) | 6417, (1U<<31) | 6554, (1U<<31) | 6392, (1U<<31) | 6470, (1U<<31) | 6378, (1U<<31) | 6510, 
  (1U<<31) | 6522, (1U<<31) | 6417, (1U<<31) | 6554, (1U<<31) | 6461, (1U<<31) | 6365, (1U<<31) | 6499, (1U<<31) | 6522, (1U<<31) | 6417, 
  (1U<<31) | 6554, (1U<<31) | 6470, (1U<<31) | 6378, (1U<<31) | 6510, (1U<<31) | 6522, (1U<<31) | 6417, (1U<<31) | 6554, (1U<<31) | 6461, 
  (1U<<31) | 6365, (1U<<31) | 6499, (1U<<31) | 6522, (1U<<31) | 6417, (1U<<31) | 6554, (1U<<31) | 6470, (1U<<31) | 6378, (1U<<31) | 6510, 
  (1U<<31) | 6522, (1U<<31) | 6417, (1U<<31) | 6554, (1U<<31) | 6461, (1U<<31) | 6365, (1U<<31) | 6499, (1U<<31) | 6522, (1U<<31) | 6417, 
  (1U<<31) | 6554, (1U<<31) | 6510, (1U<<31) | 6402, (1U<<31) | 6541, (1U<<31) | 6468, (1U<<31) | 6376, (1U<<31) | 6508, (1U<<31) | 6554, 
  (1U<<31) | 6415, (1U<<31) | 6552, (1U<<31) | 6499, (1U<<31) | 6389, (1U<<31) | 6530, (1U<<31) | 6459, (1U<<31) | 6363, (1U<<31) | 6497, 
  (1U<<31) | 6554, (1U<<31) | 6415, (1U<<31) | 6552, (1U<<31) | 6470, (1U<<31) | 6378, (1U<<31) | 6510, (1U<<31) | 6522, (1U<<31) | 6417, 
  (1U<<31) | 6554, (1U<<31) | 6461, (1U<<31) | 6365, (1U<<31) | 6499, (1U<<31) | 6522, (1U<<31) | 6417, (1U<<31) | 6554, (1U<<31) | 6470, 
  (1U<<31) | 6378, (1U<<31) | 6510, (1U<<31) | 6522, (1U<<31) | 6417, (1U<<31) | 6554, (1U<<31) | 6461, (1U<<31) | 6365, (1U<<31) | 6499, 
  (1U<<31) | 6522, (1U<<31) | 6417, (1U<<31) | 6554, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, 
  (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, 
  (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, 
  (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 1689, 
  (1U<<31) | 1689, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, 
  (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, 
  (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, 
  (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, 
  (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, 
  (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, 
  (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, 
  (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, 
  (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, 
  (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, 
  (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6294, (1U<<31) | 1693, (1U<<31) | 6510, (1U<<31) | 6402, (1U<<31) | 6541, 
  (1U<<31) | 6468, (1U<<31) | 6376, (1U<<31) | 6508, (1U<<31) | 6554, (1U<<31) | 6415, (1U<<31) | 6552, (1U<<31) | 6499, (1U<<31) | 6389, 
  (1U<<31) | 6530, (1U<<31) | 6459, (1U<<31) | 6363, (1U<<31) | 6497, (1U<<31) | 6554, (1U<<31) | 6415, (1U<<31) | 6552, (1U<<31) | 6470, 
  (1U<<31) | 6378, (1U<<31) | 6510, (1U<<31) | 6522, (1U<<31) | 6417, (1U<<31) | 6554, (1U<<31) | 6461, (1U<<31) | 6365, (1U<<31) | 6499, 
  (1U<<31) | 6522, (1U<<31) | 6417, (1U<<31) | 6554, (1U<<31) | 6510, (1U<<31) | 6402, (1U<<31) | 6541, (1U<<31) | 6468, (1U<<31) | 6376, 
  (1U<<31) | 6508, (1U<<31) | 6554, (1U<<31) | 6415, (1U<<31) | 6552, (1U<<31) | 6499, (1U<<31) | 6389, (1U<<31) | 6530, (1U<<31) | 6459, 
  (1U<<31) | 6363, (1U<<31) | 6497, (1U<<31) | 6554, (1U<<31) | 6415, (1U<<31) | 6552, (1U<<31) | 6510, (1U<<31) | 6402, (1U<<31) | 6541, 
  (1U<<31) | 6468, (1U<<31) | 6376, (1U<<31) | 6508, (1U<<31) | 6554, (1U<<31) | 6415, (1U<<31) | 6552, (1U<<31) | 6499, (1U<<31) | 6389, 
  (1U<<31) | 6530, (1U<<31) | 6459, (1U<<31) | 6363, (1U<<31) | 6497, (1U<<31) | 6554, (1U<<31) | 6415, (1U<<31) | 6552, (1U<<31) | 6480, 
  (1U<<31) | 6522, (1U<<31) | 6480, (1U<<31) | 6522, (1U<<31) | 6480, (1U<<31) | 6522, (1U<<31) | 6480, (1U<<31) | 6522, (1U<<31) | 6480, 
  (1U<<31) | 6522, (1U<<31) | 6480, (1U<<31) | 6522, (1U<<31) | 6480, (1U<<31) | 6522, (1U<<31) | 6480, (1U<<31) | 6522, (1U<<31) | 6480, 
  (1U<<31) | 6522, (1U<<31) | 6480, (1U<<31) | 6522, (1U<<31) | 6470, (1U<<31) | 6378, (1U<<31) | 6510, (1U<<31) | 6522, (1U<<31) | 6417, 
  (1U<<31) | 6554, (1U<<31) | 6461, (1U<<31) | 6365, (1U<<31) | 6499, (1U<<31) | 6522, (1U<<31) | 6417, (1U<<31) | 6554, (1U<<31) | 6480, 
  (1U<<31) | 1712, (1U<<31) | 6480, (1U<<31) | 1712, (1U<<31) | 5739, (1U<<31) | 1683, (1U<<31) | 6300, (1U<<31) | 6438, (1U<<31) | 5739, 
  (1U<<31) | 1683, (1U<<31) | 6300, (1U<<31) | 6438, (1U<<31) | 5739, (1U<<31) | 1683, (1U<<31) | 6300, (1U<<31) | 6438, (1U<<31) | 5739, 
  (1U<<31) | 1683, (1U<<31) | 6300, (1U<<31) | 6438, (1U<<31) | 5739, (1U<<31) | 1683, (1U<<31) | 6300, (1U<<31) | 6438, (1U<<31) | 5739, 
  (1U<<31) | 1683, (1U<<31) | 6300, (1U<<31) | 6438, (1U<<31) | 5739, (1U<<31) | 1683, (1U<<31) | 6300, (1U<<31) | 6438, (1U<<31) | 5739, 
  (1U<<31) | 1683, (1U<<31) | 6300, (1U<<31) | 6438, (1U<<31) | 6909, (1U<<31) | 6564, (1U<<31) | 6909, (1U<<31) | 6564, (1U<<31) | 6909, 
  (1U<<31) | 6564, (1U<<31) | 6909, (1U<<31) | 6564, (1U<<31) | 6909, (1U<<31) | 6564, (1U<<31) | 6909, (1U<<31) | 6564, (1U<<31) | 6909, 
  (1U<<31) | 6564, (1U<<31) | 6909, (1U<<31) | 6564, (1U<<31) | 6909, (1U<<31) | 6564, (1U<<31) | 6909, (1U<<31) | 6564, (1U<<31) | 6909, 
  (1U<<31) | 6564, (1U<<31) | 6909, (1U<<31) | 6564, (1U<<31) | 6909, (1U<<31) | 6564, (1U<<31) | 6909, (1U<<31) | 6564, (1U<<31) | 6909, 
  (1U<<31) | 6564, (1U<<31) | 6909, (1U<<31) | 6564, (1U<<31) | 6480, (1U<<31) | 6392, (1U<<31) | 6522, (1U<<31) | 6452, (1U<<31) | 6352, 
  (1U<<31) | 6488, (1U<<31) | 6522, (1U<<31) | 6417, (1U<<31) | 6554, (1U<<31) | 6431, (1U<<31) | 6341, (1U<<31) | 6477, (1U<<31) | 6522, 
  (1U<<31) | 6417, (1U<<31) | 6554, (1U<<31) | 6431, (1U<<31) | 6341, (1U<<31) | 6477, (1U<<31) | 6522, (1U<<31) | 6417, (1U<<31) | 6554, 
  (1U<<31) | 6452, (1U<<31) | 6352, (1U<<31) | 6488, (1U<<31) | 6522, (1U<<31) | 6417, (1U<<31) | 6554, (1U<<31) | 6431, (1U<<31) | 6341, 
  (1U<<31) | 6477, (1U<<31) | 6522, (1U<<31) | 6417, (1U<<31) | 6554, (1U<<31) | 6431, (1U<<31) | 6341, (1U<<31) | 6477, (1U<<31) | 6522, 
  (1U<<31) | 6417, (1U<<31) | 6554, (1U<<31) | 1701, (1U<<31) | 6352, (1U<<31) | 1710, (1U<<31) | 6417, (1U<<31) | 1655, (1U<<31) | 6238, 
  (1U<<31) | 1664, (1U<<31) | 6277, (1U<<31) | 6452, (1U<<31) | 6352, (1U<<31) | 6488, (1U<<31) | 6522, (1U<<31) | 6417, (1U<<31) | 6554, 
  (1U<<31) | 6431, (1U<<31) | 6477, (1U<<31) | 6522, (1U<<31) | 6554, (1U<<31) | 6431, (1U<<31) | 6341, (1U<<31) | 6477, (1U<<31) | 6522, 
  (1U<<31) | 6417, (1U<<31) | 6554, (1U<<31) | 6431, (1U<<31) | 6341, (1U<<31) | 6477, (1U<<31) | 6522, (1U<<31) | 6417, (1U<<31) | 6554, 
  (1U<<31) | 6452, (1U<<31) | 6352, (1U<<31) | 6488, (1U<<31) | 6522, (1U<<31) | 6417, (1U<<31) | 6554, (1U<<31) | 6431, (1U<<31) | 6341, 
  (1U<<31) | 6477, (1U<<31) | 6522, (1U<<31) | 6417, (1U<<31) | 6554, (1U<<31) | 6431, (1U<<31) | 6341, (1U<<31) | 6477, (1U<<31) | 6452, 
  (1U<<31) | 6352, (1U<<31) | 6488, (1U<<31) | 6522, (1U<<31) | 6417, (1U<<31) | 6554, (1U<<31) | 6480, (1U<<31) | 6392, (1U<<31) | 6522, 
  (1U<<31) | 6480, (1U<<31) | 1712, (1U<<31) | 6480, (1U<<31) | 6522, (1U<<31) | 6480, (1U<<31) | 6522, (1U<<31) | 6480, (1U<<31) | 6522, 
  (1U<<31) | 6480, (1U<<31) | 6522, (1U<<31) | 6480, (1U<<31) | 6522, (1U<<31) | 6480, (1U<<31) | 6522, (1U<<31) | 6480, (1U<<31) | 6522, 
  (1U<<31) | 6480, (1U<<31) | 6522, (1U<<31) | 6480, (1U<<31) | 6522, (1U<<31) | 6480, (1U<<31) | 6522, (1U<<31) | 6480, (1U<<31) | 6522, 
  (1U<<31) | 6480, (1U<<31) | 6522, (1U<<31) | 6480, (1U<<31) | 6522, (1U<<31) | 6480, (1U<<31) | 6522, (1U<<31) | 6480, (1U<<31) | 1712, 
  (1U<<31) | 6480, (1U<<31) | 6522, (1U<<31) | 6480, (1U<<31) | 6522, (1U<<31) | 6480, (1U<<31) | 6522, (1U<<31) | 6480, (1U<<31) | 6522, 
  (1U<<31) | 6480, (1U<<31) | 1712, (1U<<31) | 5738, (1U<<31) | 1682, (1U<<31) | 5738, (1U<<31) | 1682, (1U<<31) | 5738, (1U<<31) | 1682, 
  (1U<<31) | 5738, (1U<<31) | 1682, (1U<<31) | 5738, (1U<<31) | 1682, (1U<<31) | 5738, (1U<<31) | 1682, (1U<<31) | 5738, (1U<<31) | 1682, 
  (1U<<31) | 5738, (1U<<31) | 1682, (1U<<31) | 5738, (1U<<31) | 1682, (1U<<31) | 5738, (1U<<31) | 1682, (1U<<31) | 5738, (1U<<31) | 1682, 
  (1U<<31) | 5738, (1U<<31) | 1682, (1U<<31) | 6234, (1U<<31) | 6480, (1U<<31) | 5739, (1U<<31) | 6300, (1U<<31) | 6438, (1U<<31) | 5760, 
  (1U<<31) | 6448, (1U<<31) | 5762, (1U<<31) | 6312, (1U<<31) | 6450, (1U<<31) | 6522, (1U<<31) | 6417, (1U<<31) | 6554, (1U<<31) | 5293, 
  (1U<<31) | 6289, (1U<<31) | 6429, (1U<<31) | 6522, (1U<<31) | 6417, (1U<<31) | 6554, (1U<<31) | 5293, (1U<<31) | 6289, (1U<<31) | 6429, 
  (1U<<31) | 6522, (1U<<31) | 6417, (1U<<31) | 6554, (1U<<31) | 5762, (1U<<31) | 6312, (1U<<31) | 6450, (1U<<31) | 6522, (1U<<31) | 6417, 
  (1U<<31) | 6554, (1U<<31) | 5762, (1U<<31) | 6312, (1U<<31) | 6450, (1U<<31) | 6522, (1U<<31) | 6417, (1U<<31) | 6554, (1U<<31) | 5293, 
  (1U<<31) | 6289, (1U<<31) | 6429, (1U<<31) | 6522, (1U<<31) | 6417, (1U<<31) | 6554, (1U<<31) | 5293, (1U<<31) | 6289, (1U<<31) | 6429, 
  (1U<<31) | 6522, (1U<<31) | 6417, (1U<<31) | 6554, (1U<<31) | 5762, (1U<<31) | 6312, (1U<<31) | 6450, (1U<<31) | 6522, (1U<<31) | 6417, 
  (1U<<31) | 6554, (1U<<31) | 6908, (1U<<31) | 1720, (1U<<31) | 6908, (1U<<31) | 1720, (1U<<31) | 6908, (1U<<31) | 1720, (1U<<31) | 6908, 
  (1U<<31) | 1720, (1U<<31) | 6908, (1U<<31) | 1720, (1U<<31) | 6908, (1U<<31) | 1720, (1U<<31) | 6908, (1U<<31) | 1720, (1U<<31) | 6908, 
  (1U<<31) | 1720, (1U<<31) | 6908, (1U<<31) | 1720, (1U<<31) | 6908, (1U<<31) | 1720, (1U<<31) | 6908, (1U<<31) | 1720, (1U<<31) | 6908, 
  (1U<<31) | 1720, (1U<<31) | 6908, (1U<<31) | 1720, (1U<<31) | 6908, (1U<<31) | 1720, (1U<<31) | 6908, (1U<<31) | 1720, (1U<<31) | 6908, 
  (1U<<31) | 1720, (1U<<31) | 6908, (1U<<31) | 1720, (1U<<31) | 6908, (1U<<31) | 1720, (1U<<31) | 6908, (1U<<31) | 1720, (1U<<31) | 6908, 
  (1U<<31) | 1720, (1U<<31) | 6908, (1U<<31) | 1720, (1U<<31) | 6908, (1U<<31) | 1720, (1U<<31) | 6908, (1U<<31) | 1720, (1U<<31) | 6908, 
  (1U<<31) | 1720, (1U<<31) | 6452, (1U<<31) | 6352, (1U<<31) | 6488, (1U<<31) | 6522, (1U<<31) | 6417, (1U<<31) | 6554, (1U<<31) | 6431, 
  (1U<<31) | 6341, (1U<<31) | 6477, (1U<<31) | 6522, (1U<<31) | 6417, (1U<<31) | 6554, (1U<<31) | 6431, (1U<<31) | 6341, (1U<<31) | 6477, 
  (1U<<31) | 6522, (1U<<31) | 6417, (1U<<31) | 6554, (1U<<31) | 6452, (1U<<31) | 6352, (1U<<31) | 6488, (1U<<31) | 6522, (1U<<31) | 6417, 
  (1U<<31) | 6554, (1U<<31) | 6431, (1U<<31) | 6341, (1U<<31) | 6477, (1U<<31) | 6522, (1U<<31) | 6417, (1U<<31) | 6554, (1U<<31) | 6480, 
  (1U<<31) | 1712, (1U<<31) | 6480, (1U<<31) | 1712, (1U<<31) | 6480, (1U<<31) | 1712, (1U<<31) | 6452, (1U<<31) | 6352, (1U<<31) | 6488, 
  (1U<<31) | 6522, (1U<<31) | 6417, (1U<<31) | 6554, (1U<<31) | 432, (1U<<31) | 491, 0x3f4, 0x3f4, 0x7f7f3f, 
  0x3f4, 0x7f7f7f3f, 0x4e, 0x3b3b4a, (1U<<31) | 9351, (1U<<31) | 9351, (1U<<31) | 13085, (1U<<31) | 13088, 
  (1U<<31) | 7096, 0xe, 0x4e4, 0x54e4, 0x55e4, 0x7f41f, 0x41f, 0xffbf3f, 
  0xffbf3f, 0x7f7f3f, 0x7f7f3f, 0x3b3b3b, (1U<<31) | 14243, (1U<<31) | 14249, (1U<<31) | 14244, (1U<<31) | 14250, 
  0x7a3b3b7a, 0x4a2c2c4a, 0x2c2c3b, 0x7f7f7f3f, 0x7f7f7f3f, 0x7f7f3f, 0x7f7f3f, 0x7f7f7f3f, 
  0x3b3b3b, 0x2c2c2c, 0x7a4a, 0x894a, 0x7a4a, 0x894a, 0x0, (1U<<31) | 3939, 
  0x7f7f3f, 0x7f7f3f, 0x2c2c2c, (1U<<31) | 3840, (1U<<31) | 7136, (1U<<31) | 7149, (1U<<31) | 1637, (1U<<31) | 1642, 
  (1U<<31) | 7130, (1U<<31) | 7143, (1U<<31) | 14240, (1U<<31) | 14246, (1U<<31) | 428, 0xe40, 0x1f, 0xe, 
  0x1f, 0xaf1f, 0xaf1f, 0xaf1f, 0xaf1f, (1U<<31) | 13073, (1U<<31) | 13074, (1U<<31) | 13073, 
  (1U<<31) | 13073, (1U<<31) | 13073, (1U<<31) | 13073, (1U<<31) | 13073, (1U<<31) | 13073, (1U<<31) | 13073, (1U<<31) | 13073, (1U<<31) | 13074, 
  (1U<<31) | 13073, (1U<<31) | 13073, (1U<<31) | 13073, (1U<<31) | 13074, (1U<<31) | 13073, (1U<<31) | 13073, (1U<<31) | 13074, (1U<<31) | 13073, 
  (1U<<31) | 13074, (1U<<31) | 13073, (1U<<31) | 13073, (1U<<31) | 13074, (1U<<31) | 13074, 0x4e0, 0x5e0, 0x4e0, 
  0x5e0, (1U<<31) | 3760, (1U<<31) | 7244, (1U<<31) | 10612, (1U<<31) | 10612, (1U<<31) | 7365, (1U<<31) | 7365, (1U<<31) | 10612, 
  (1U<<31) | 10612, (1U<<31) | 7365, (1U<<31) | 7365, 0x595959, 0x5a5a5a, 0x5b5b5b, 0x595959, 0x5a5a5a, 
  0x5b5b5b, 0x595959, 0x5a5a5a, 0x5b5b5b, 0x595959, 0x5a5a5a, 0x5b5b5b, 0x5959, 
  0x25959, 0x4e0, 0x5e0, 0x41fe2, 0x41fe2, 0x2e1f, 0x1fe2, 0x2e1f, 
  0x1fe2, 0x2e1f, 0x1fe2, 0x41fe2, 0x41fe2, 0x41fe2, 0x8a8a8a, 0x7b7b7b, 
  (1U<<31) | 9566, 0x7b7b7b7b, 0x28a8a8a, 0x27b7b7b, 0x8a7a, 0x8a4a, 0x7b4b, 0x8a4a, 
  0x7b4b, 0x27b7b7b, 0x8a8a8a, 0x7b7b7b, 0x8a8a8a, 0x7b7b7b, 0xe2d, 0x59e89, 
  0x5ae8a, 0x4ae7a, 0x4be7b, 0x8959e0, 0x8a5ae0, 0x7a4ae0, 0x7b4be0, 0x8a8a8a, 
  0x7b7b7b, 0x8a8a8a, 0x7b7b7b, 0x8a4, 0x7b4, 0x5a5a4, 0x5a5a4, 0x5a5a4, 
  0x7b7b, 0x48a8a, 0x47b7b, 0x7b7b, 0x598989, 0x5a8a8a, 0x4a7a7a, 0x4b7b7b, 
  0x89894, 0x8a8a4, 0x7a7a4, 0x7b7b4, 0x89894, 0x8a8a4, 0x7a7a4, 0x7b7b4, 
  0x89894, 0x8a8a4, 0x7a7a4, 0x7b7b4, 0x0, 0x0, (1U<<31) | 568, (1U<<31) | 611, 
  (1U<<31) | 803, (1U<<31) | 846, (1U<<31) | 717, (1U<<31) | 771, (1U<<31) | 632, (1U<<31) | 664, (1U<<31) | 579, (1U<<31) | 590, 
  (1U<<31) | 814, (1U<<31) | 857, (1U<<31) | 728, (1U<<31) | 739, (1U<<31) | 643, (1U<<31) | 675, 0x4ae4a, 0x4be4b, 
  0x59e59, 0x5ae5a, 0x4a4ae0, 0x4b4be0, 0x5959e0, 0x5a5ae0, 0x22d2d3c, 0x4b4b3c, 
  0x3c3c2d, 0x4b4b3c, 0x3c3c2d, 0x2d2d2d, 0x3c3c3c, 0x2d2d2d2d, 0x4b4b4b, 0x4b7b7b, 
  0x4b4b4b, 0x3c3c3c, 0x3c3c3c, 0x4b4b4b, 0x3c3c3c, 0x3c3c3c, 0x2d2d3c, 0x3c3c4b, 
  0x2d4, 0x3c3c3c, 0x3c3c3c, 0x3c3c3c, 0x2d2d5a, 0x2d2d2d, 0x2d2d2d, 0x4b4b4b, 
  0x3c3c3c, 0x4a4b4b, 0x595a5a, 0x3b3c3c, 0x44b4b, 0x45a5a, 0x43c3c, 0x4a4a4a, 
  0x4b4b4b, 0x595959, 0x5a5a5a, 0x4a4b4b, 0x3b3c3c, 0x44b4b, 0x43c3c, 0x4a4a4a, 
  0x4b4b4b, 0x4a4b4b, 0x595a5a, 0x3b3c3c, 0x44b4b, 0x45a5a, 0x43c3c, 0x4a4a4a, 
  0x4b4b4b, 0x595959, 0x5a5a5a, 0x4a4a4a4a, 0x4b4b4b4b, 0x4a4a4a4a, 0x4b4b4b4b, 0x4a4a4a4a, 
  0x4b4b4b4b, 0x4a4a4a4a, 0x4b4b4b4b, 0x4a4a4a4a, 0x4b4b4b4b, 0x4a4a4a4a, 0x4b4b4b4b, 0x4a4a4a4a, 
  0x4b4b4b4b, 0x4a4a4a4a, 0x4b4b4b4b, 0x4a4a4a4a, 0x4b4b4b4b, 0x4a4a4a4a, 0x4b4b4b4b, 0x4a4a4a4a, 
  0x4b4b4b4b, 0x4a4a4a4a, 0x4b4b4b4b, 0x48b8b8b, 0x47c7c7c, 0x259, 0x25a, 0x25b, 
  0x34a, 0x34b, 0x34c, 0x4a4a, 0x4b4b, 0x4c4c, 0x5959, 0x5a5a, 
  0x5b5b, 0x458989, 0x447a7a, 0x457a7a, 0x4894, 0x4895, 0x4894, 0x4895, 
  0x47a4, 0x47a5, 0x47a4, 0x47a5, 0x447a7a, 0x458989, 0x457a7a, 0x42c2c3b, 
  0x42d2d3c, (1U<<31) | 2192, 0x48b8b8b, 0x47c7c7c, 0x428b8b8b, 0x437c7c7c, 0x48919, 0x48a1a, 
  0x48b1b, 0x47a1a, 0x47b1b, 0x47c1c, (1U<<31) | 1855, (1U<<31) | 2212, (1U<<31) | 1835, (1U<<31) | 2222, 
  (1U<<31) | 1975, (1U<<31) | 1945, (1U<<31) | 1955, (1U<<31) | 1965, (1U<<31) | 1895, (1U<<31) | 1875, (1U<<31) | 1935, (1U<<31) | 1915, 
  (1U<<31) | 1885, (1U<<31) | 1865, (1U<<31) | 1925, (1U<<31) | 1905, (1U<<31) | 1805, (1U<<31) | 1775, (1U<<31) | 1815, (1U<<31) | 1785, 
  (1U<<31) | 1795, (1U<<31) | 1765, (1U<<31) | 1845, (1U<<31) | 1825, 0x44e4b20, 0x44e4c30, 0x44e5b20, 0x44e5b20, 
  0x1b1b1b, 0x1d1d1d, (1U<<31) | 375, 0x1c1c1c, 0x1b1b4, 0x1d1d4, (1U<<31) | 382, 0x1c1c4, 
  0x1b1b4, 0x1d1d4, (1U<<31) | 382, 0x1c1c4, (1U<<31) | 2109, (1U<<31) | 2053, (1U<<31) | 260, (1U<<31) | 300, 
  (1U<<31) | 1474, (1U<<31) | 290, (1U<<31) | 329, (1U<<31) | 1573, 0x42489892, 0x4247a7a2, (1U<<31) | 110, 0x24a894a, 
  0x424b8b4b, 0x27a897a, 0x427b8b7b, 0x2598959, 0x25a8a5a, 0x425b8b5b, 0x24a894a, 0x24a8a4a, 
  0x424b8b4b, 0x2598959, 0x25a8a5a, 0x425b8b5b, 0x24a7a4a, 0x24b7b4b, 0x434c7c4c, 0x428b7b8b, 
  0x2597a59, 0x25a7a5a, 0x425b7b5b, 0x24a7a4a, 0x24b7b4b, 0x434c7c4c, 0x2597a59, 0x25a7a5a, 
  0x425b7b5b, 0x27a597a, (1U<<31) | 2064, (1U<<31) | 2098, 0x24a894a, 0x424b8b4b, 0x2598959, 0x25a8a5a, 
  0x425b8b5b, 0x24a894a, 0x24a8a4a, 0x424b8b4b, 0x2598959, 0x25a8a5a, 0x425b8b5b, 0x434c7c4c, 
  0x2597a59, 0x25a7a5a, 0x425b7b5b, 0x24a7a4a, 0x24b7b4b, 0x434c7c4c, 0x2597a59, 0x25a7a5a, 
  0x425b7b5b, 0x27a597a, (1U<<31) | 2109, (1U<<31) | 2053, (1U<<31) | 110, (1U<<31) | 546, (1U<<31) | 557, (1U<<31) | 1753, 
  (1U<<31) | 524, (1U<<31) | 535, (1U<<31) | 2200, (1U<<31) | 1741, (1U<<31) | 1729, 0x24892, 0x247a2, (1U<<31) | 1518, 
  (1U<<31) | 1584, (1U<<31) | 1496, (1U<<31) | 1595, (1U<<31) | 1562, (1U<<31) | 1529, (1U<<31) | 1540, (1U<<31) | 1551, (1U<<31) | 1375, 
  (1U<<31) | 1353, (1U<<31) | 1463, (1U<<31) | 1441, (1U<<31) | 1364, (1U<<31) | 1342, (1U<<31) | 1452, (1U<<31) | 1430, (1U<<31) | 1331, 
  (1U<<31) | 1320, (1U<<31) | 1419, (1U<<31) | 1397, (1U<<31) | 1408, (1U<<31) | 1386, (1U<<31) | 1507, (1U<<31) | 1485, 0x2898989, 
  0x28a8a8a, 0x428b8b8b, 0x27a7a7a, 0x27b7b7b, 0x437c7c7c, (1U<<31) | 2109, (1U<<31) | 2053, 0x28948989, 
  0x28a48a8a, (1U<<31) | 2122, 0x27a47a7a, 0x27b47b7b, (1U<<31) | 2234, (1U<<31) | 2075, (1U<<31) | 2030, (1U<<31) | 2109, 
  (1U<<31) | 2053, (1U<<31) | 2109, (1U<<31) | 2053, (1U<<31) | 2109, (1U<<31) | 2053, 0x22c4a2c, 0x22c4b2c, 0x32c4c2c, 
  0x24ae0, 0x24be0, 0x34ce0, 0x23b4a3b, 0x23b4b3b, 0x33c4c3c, 0x24ae0, 0x24be0, 
  0x34ce0, 0x22c592c, 0x22c5a2c, 0x22c5b2c, 0x259e0, 0x25ae0, 0x25be0, 0x24a594a, 
  0x259e0, 0x25ae0, 0x25be0, 0x23b593b, 0x23b5a3b, 0x23b5b3b, 0x259e0, 0x25ae0, 
  0x25be0, 0x22c3b2c, 0x23be0, 0x33ce0, 0x43de0, 0x22c4a2c, 0x22c4b2c, 0x32c4c2c, 
  0x24ae0, 0x24be0, 0x34ce0, 0x23b4a3b, 0x23b4b3b, 0x33c4c3c, 0x24ae0, 0x24be0, 
  0x34ce0, 0x22c592c, 0x22c5a2c, 0x22c5b2c, 0x259e0, 0x25ae0, 0x25be0, 0x24a594a, 
  0x24a5a4a, 0x24b5b4b, 0x259e0, 0x25ae0, 0x25be0, 0x23b593b, 0x23b5a3b, 0x23b5b3b, 
  0x259e0, 0x25ae0, 0x25be0, 0x22c3b2c, 0x32c3c2c, 0x42d3d2d, 0x23be0, 0x33ce0, 
  0x43de0, 0x22c4a2c, 0x22c4b2c, 0x32c4c2c, 0x24ae0, 0x24be0, 0x34ce0, 0x23b4a3b, 
  0x23b4b3b, 0x33c4c3c, 0x24ae0, 0x24be0, 0x34ce0, 0x22c592c, 0x22c5a2c, 0x22c5b2c, 
  0x259e0, 0x25ae0, 0x25be0, 0x24a594a, 0x24a5a4a, 0x24b5b4b, 0x259e0, 0x25ae0, 
  0x25be0, 0x23b593b, 0x23b5a3b, 0x23b5b3b, 0x259e0, 0x25ae0, 0x25be0, 0x22c3b2c, 
  0x32c3c2c, 0x42d3d2d, 0x23be0, 0x33ce0, 0x43de0, (1U<<31) | 792, (1U<<31) | 835, (1U<<31) | 2120, 
  (1U<<31) | 706, (1U<<31) | 760, (1U<<31) | 2232, (1U<<31) | 3791, (1U<<31) | 3779, 0x28948989, 0x28a48a8a, (1U<<31) | 2122, 
  0x27a47a7a, 0x27b47b7b, (1U<<31) | 2234, (1U<<31) | 3791, (1U<<31) | 3779, 0x28948989, 0x28a48a8a, (1U<<31) | 2122, 
  0x27a47a7a, 0x27b47b7b, (1U<<31) | 2234, (1U<<31) | 3791, (1U<<31) | 3779, (1U<<31) | 825, (1U<<31) | 868, (1U<<31) | 2132, 
  (1U<<31) | 750, (1U<<31) | 782, (1U<<31) | 2244, (1U<<31) | 2109, (1U<<31) | 2053, (1U<<31) | 6197, (1U<<31) | 5409, (1U<<31) | 5829, 
  (1U<<31) | 5962, (1U<<31) | 6207, (1U<<31) | 5391, (1U<<31) | 5839, (1U<<31) | 5952, (1U<<31) | 6167, (1U<<31) | 5779, (1U<<31) | 6187, 
  (1U<<31) | 5809, (1U<<31) | 5922, (1U<<31) | 5351, (1U<<31) | 5932, (1U<<31) | 5361, (1U<<31) | 6157, (1U<<31) | 5769, (1U<<31) | 6177, 
  (1U<<31) | 5799, (1U<<31) | 5912, (1U<<31) | 5309, (1U<<31) | 5942, (1U<<31) | 5371, (1U<<31) | 2109, (1U<<31) | 2053, (1U<<31) | 2109, 
  (1U<<31) | 2053, 0x437c3c7c, 0x23b47a3b, 0x23b47b3b, 0x33c47c3c, (1U<<31) | 546, (1U<<31) | 557, (1U<<31) | 1753, 
  (1U<<31) | 524, (1U<<31) | 535, (1U<<31) | 2200, (1U<<31) | 1741, (1U<<31) | 1729, 0x48b8b8b, 0x47c7c7c, 0x48b8b8b, 
  0x47c7c7c, 0x48b8b8b, 0x47c7c7c, 0x4c4c3d, (1U<<31) | 1084, 0x4c4c3d, (1U<<31) | 1084, (1U<<31) | 1002, 
  0x3d3d3d, 0x5a8a8a, 0x5b8b8b, 0x5a5a5a, 0x5b5b5b, 0x3b3b3b, 0x3c3c3c, 0x3d3d3d, 
  0x2c2c2c, 0x2d2d2d, (1U<<31) | 1002, 0x4c7c7c, 0x4c4c4c, (1U<<31) | 1009, 0x3d3d4c, 0x3d3d3d, 
  0x3d3d3d, 0x3d3d3d, 0x2c2c2c, 0x2d2d2d, (1U<<31) | 1002, (1U<<31) | 1016, (1U<<31) | 1002, 0x4a4c4c, 
  0x595b5b, 0x3b3d3d, 0x44c4c, 0x45b5b, 0x43d3d, 0x4c4c4c, 0x5b5b5b, 0x3b3b3b, 
  0x3c3c3c, 0x3d3d3d, 0x4a4c4c, 0x595959, 0x595a5a, 0x595b5b, 0x3b3d3d, 0x44c4c, 
  0x45959, 0x45a5a, 0x45b5b, 0x43d3d, 0x4c4c4c, 0x595959, 0x5a5a5a, 0x5b5b5b, 
  0x3b3b3b, 0x3c3c3c, 0x3d3d3d, 0x4a4c4c, 0x595b5b, 0x3b3d3d, 0x44c4c, 0x45b5b, 
  0x43d3d, 0x4c4c4c, 0x5b5b5b, 0x3b3b3b, 0x3c3c3c, 0x3d3d3d, (1U<<31) | 5341, (1U<<31) | 5381, 
  (1U<<31) | 5419, (1U<<31) | 5789, (1U<<31) | 5819, (1U<<31) | 5849, 0x2898989, 0x28a8a8a, 0x28b8b8b, 0x27a7a7a, 
  0x27b7b7b, 0x37c7c7c, (1U<<31) | 825, (1U<<31) | 750, 0x428b8b8b, 0x437c7c7c, (1U<<31) | 2109, (1U<<31) | 2053, 
  0x2898989, 0x28a8a8a, 0x28b8b8b, 0x27a7a7a, 0x27b7b7b, 0x37c7c7c, (1U<<31) | 825, (1U<<31) | 750, 
  0x428b8b8b, 0x437c7c7c, (1U<<31) | 2109, (1U<<31) | 2053, 0x48b4b2e0, 0x44c4c3e0, 0x45b4b2e0, 0x47c4c3e0, 
  0x48b5b2e0, 0x44b5b2e0, 0x45b5b2e0, 0x47b5b2e0, 0x489592e0, 0x459592e0, 0x48a5a2e0, 0x45a5a2e0, 
  0x47a592e0, 0x44a592e0, 0x47a5a2e0, 0x44a5a2e0, 0x44e4b20, 0x44e4c30, 0x44e5b20, 0x44e5b20, 
  0x4894a2e0, 0x4594a2e0, 0x48a4a2e0, 0x45a4a2e0, 0x47a4a2e0, 0x44a4a2e0, 0x47b4b2e0, 0x44b4b2e0, 
  0x49f2f, 0x48b8b, 0x47c7c, 0x48b8b8b, 0x47c7c7c, 0x49f2f, 0x4489894, 0x447a7a4, 
  0x4894, 0x4895, 0x4894, 0x4895, 0x47a4, 0x47a5, 0x47a4, 0x47a5, 
  0x47777, 0x48888, (1U<<31) | 6217, (1U<<31) | 5972, (1U<<31) | 6217, (1U<<31) | 5972, (1U<<31) | 6579, (1U<<31) | 6657, 
  (1U<<31) | 6683, (1U<<31) | 7315, (1U<<31) | 7472, (1U<<31) | 7482, 0x4a4a4a4a, 0x4b4b4b4b, 0x4c4c4c4c, 0x4a4a4a4a, 
  0x4b4b4b4b, 0x4c4c4c4c, 0x4a4a4a4a, 0x4b4b4b4b, 0x4c4c4c4c, 0x4a4a4a4a, 0x4b4b4b4b, 0x4c4c4c4c, 
  0x4a4a4a4a, 0x4b4b4b4b, 0x4c4c4c4c, 0x3b3b3b3b, 0x3c3c3c3c, 0x3d3d3d3d, (1U<<31) | 9487, (1U<<31) | 9557, 
  (1U<<31) | 9575, 0x7a4a7a7a, 0x7b4b7b7b, 0x7c4c7c7c, 0x59595959, 0x5a5a5a5a, 0x5b5b5b5b, 0x2c2c2c2c, 
  0x2d2d2d2d, (1U<<31) | 1000, 0x5b8b8b, 0x4c7c7c, 0x59595959, 0x5a5a5a5a, 0x5b5b5b5b, 0x59595959, 
  0x5a5a5a5a, 0x5b5b5b5b, 0x2c2c1c, 0x2d2d1d, (1U<<31) | 993, (1U<<31) | 8601, (1U<<31) | 8681, (1U<<31) | 8693, 
  (1U<<31) | 8688, (1U<<31) | 8700, (1U<<31) | 14186, (1U<<31) | 14195, (1U<<31) | 14204, (1U<<31) | 310, 0x46d6d6d, 0x46d6d6d, 
  0x46b1b, 0x46c1c, 0x46d1d, (1U<<31) | 1997, (1U<<31) | 319, (1U<<31) | 339, (1U<<31) | 1606, 0x4246b6b2, 
  (1U<<31) | 1997, 0x246b2, 0x26b6b6b, 0x36c6c6c, 0x446d6d6d, (1U<<31) | 1997, 0x26b46b6b, 0x36c46c6c, 
  (1U<<31) | 5163, (1U<<31) | 1985, (1U<<31) | 1997, (1U<<31) | 1997, (1U<<31) | 1997, 0x26b6b6b, 0x36c6c6c, 0x46d6d6d, 
  (1U<<31) | 696, 0x26b46b6b, 0x36c46c6c, (1U<<31) | 5163, (1U<<31) | 3767, 0x26b46b6b, 0x36c46c6c, (1U<<31) | 5163, 
  (1U<<31) | 3767, 0x26b6b6b, 0x36c6c6c, 0x46d6d6d, (1U<<31) | 696, (1U<<31) | 696, (1U<<31) | 1042, (1U<<31) | 5173, 
  (1U<<31) | 1997, (1U<<31) | 1997, (1U<<31) | 1997, 0x26b4a6b, 0x26b896b, 0x26b8a6b, 0x426b8b6b, 0x24a6b4a, 
  0x24b6b4b, 0x434c6c4c, 0x2896b89, 0x28a6b8a, 0x428b6b8b, 0x27a6b7a, 0x27b6b7b, 0x437c6c7c, 
  0x2596b59, 0x25a6b5a, 0x425b6b5b, 0x24a6b4a, 0x24b6b4b, 0x434c6c4c, 0x2596b59, 0x25a6b5a, 
  0x425b6b5b, 0x23b6b3b, 0x33c6c3c, 0x443d6d3d, 0x23b6b3b, 0x33c6c3c, 0x443d6d3d, 0x26b7a6b, 
  0x26b7b6b, 0x436c7c6c, 0x26b596b, 0x26b5a6b, (1U<<31) | 2019, (1U<<31) | 2087, (1U<<31) | 2042, (1U<<31) | 2008, 
  0x24a6b4a, 0x24b6b4b, 0x434c6c4c, 0x2596b59, 0x25a6b5a, 0x425b6b5b, 0x24a6b4a, 0x24b6b4b, 
  0x434c6c4c, 0x2596b59, 0x25a6b5a, 0x425b6b5b, 0x23b6b3b, 0x33c6c3c, 0x443d6d3d, 0x23b6b3b, 
  0x33c6c3c, 0x443d6d3d, 0x26b4a6b, 0x26b596b, 0x26b5a6b, (1U<<31) | 750, (1U<<31) | 782, (1U<<31) | 2244, 
  (1U<<31) | 2053, (1U<<31) | 750, (1U<<31) | 782, (1U<<31) | 2244, (1U<<31) | 2053, (1U<<31) | 750, (1U<<31) | 782, (1U<<31) | 2244, 
  (1U<<31) | 2053, (1U<<31) | 750, (1U<<31) | 782, (1U<<31) | 2244, (1U<<31) | 2053, (1U<<31) | 750, (1U<<31) | 782, (1U<<31) | 2244, 
  (1U<<31) | 2053, (1U<<31) | 750, (1U<<31) | 782, (1U<<31) | 2244, (1U<<31) | 2053, 0x6b6b6b, 0x6c6c6c, 0x46d6d6d, 
  0x6b6b6b, 0x6c6c6c, 0x46d6d6d, 0x46d6d6d, 0x46d6d, 0x46d6d6d, 0x446b6b4, 0x46b4, 
  0x46b5, 0x46b4, 0x46b5, 0x446b6b, 0x456b6b, 0x46b4, 0x46b5, 0x46b4, 
  0x46b5, 0x446b6b, 0x456b6b, 0x46666, (1U<<31) | 5876, 0x6b6b6b6b, 0x6c6c6c6c, (1U<<31) | 5876, 
  0x4e0, 0x5e0, 0x444, 0x555, 0x444, 0x555, 0x444, 0x555, 
  0x444, 0x555, (1U<<31) | 14218, (1U<<31) | 1181, 0xe0, 0xe0, 0xe0, 0x0, 
  0xe0, 0xe0, 0x444e4, 0x455e5, 0x4e0, 0x5e0, (1U<<31) | 7297, (1U<<31) | 7334, 
  0xee2, 0xee2, 0x4, 0x5, 0x40, 0x50, (1U<<31) | 9505, (1U<<31) | 9566, 
  0x7a7a7a7a, 0x7b7b7b7b, 0xe0, 0xe0, 0xe0, 0xe0, 0x40, 0x50, 
  0x20, 0xe40, 0xe0, 0xe0, 0xe0, 0x45959590, 0x4442, 0x4452, 
  0x4440, 0x4450, 0x0, 0x0, (1U<<31) | 10634, (1U<<31) | 12265, (1U<<31) | 13073, (1U<<31) | 13073, 
  (1U<<31) | 13073, (1U<<31) | 13073, (1U<<31) | 13073, (1U<<31) | 13073, (1U<<31) | 13073, (1U<<31) | 13073, (1U<<31) | 13073, (1U<<31) | 13073, 
  (1U<<31) | 13073, (1U<<31) | 1023, (1U<<31) | 13073, (1U<<31) | 13073, (1U<<31) | 13073, (1U<<31) | 13073, (1U<<31) | 13073, (1U<<31) | 13073, 
  (1U<<31) | 13073, (1U<<31) | 13073, (1U<<31) | 13073, (1U<<31) | 13073, (1U<<31) | 7079, (1U<<31) | 5652, (1U<<31) | 13073, (1U<<31) | 13073, 
  (1U<<31) | 13073, (1U<<31) | 13073, (1U<<31) | 13073, (1U<<31) | 12252, (1U<<31) | 13073, (1U<<31) | 13073, (1U<<31) | 13073, (1U<<31) | 13073, 
  (1U<<31) | 13073, (1U<<31) | 13073, (1U<<31) | 13073, (1U<<31) | 13073, (1U<<31) | 13073, (1U<<31) | 7092, (1U<<31) | 7092, (1U<<31) | 7092, 
  (1U<<31) | 13073, (1U<<31) | 13073, (1U<<31) | 7092, (1U<<31) | 7092, (1U<<31) | 13073, (1U<<31) | 13073, (1U<<31) | 13073, (1U<<31) | 7092, 
  (1U<<31) | 7092, (1U<<31) | 7092, (1U<<31) | 13073, (1U<<31) | 13073, (1U<<31) | 13073, (1U<<31) | 13073, (1U<<31) | 13073, (1U<<31) | 13073, 
  (1U<<31) | 13073, (1U<<31) | 13073, (1U<<31) | 13073, (1U<<31) | 13073, (1U<<31) | 13073, (1U<<31) | 13073, (1U<<31) | 13073, (1U<<31) | 13073, 
  (1U<<31) | 13073, 0x44e0, 0xee0, 0x4440, 0x2595959, 0x25a5a5a, 0x25b5b5b, 0x40, 
  0x50, 0x4, 0x5, 0x4, 0x5, 0x4, 0x4, 0x45, 
  0x45, (1U<<31) | 2314, (1U<<31) | 5673, (1U<<31) | 5859, (1U<<31) | 2314, (1U<<31) | 5673, (1U<<31) | 5859, 0x44, 
  0x55, 0x5, (1U<<31) | 5859, 0xe0, 0x0, 0xe0, 0xe0, 0xee, 
  0x50, 0x0, 0x0, 0x4a4a4a, 0x4a4a4a, 0x4a4a4a, 0x24a4a4a, 0x4a4a4a, 
  0x4a4a4a, 0x4a4a4a4a, 0xe, 0x27a7a7a, 0x27a7a7a, 0x7a7a4, 0x7a7a4, 0x7a7a4, 
  0x7a7a4, 0x7a7a4, 0x7a7a4, (1U<<31) | 9514, (1U<<31) | 12261, (1U<<31) | 12255, (1U<<31) | 8608, 0x7a4, 
  0x7a5, (1U<<31) | 9514, (1U<<31) | 8608, 0x7a4, 0x7a5, 0xe0, 0x7a7a7a, 0x7a7a7a, 
  0x7a7a7a, 0x7a7a7a, 0x7a4, (1U<<31) | 1024, 0x7a7a, 0x7a7a, 0x7a7a, 0x7a7a, 
  0x0, 0xe0, 0x7a7a4, 0x7a7a4, 0x7a7a4, 0x7a7a4, 0x7a7a4, 0x7a7a4, 
  0xe0, 0x2898989, 0x2898989, 0x89894, 0x89894, 0x89894, 0x89894, 0x89894, 
  0x89894, 0x894a, 0x897a, 0x7a4a, 0x894, 0x895, 0x897a7a, 0x894a, 
  0x7a4a, 0x894, 0x895, 0x0, 0xe2c2c0, 0x898989, 0x898989, 0x0, 
  0x898989, 0x898989, 0x894, 0x4a4a3b, 0x3b3b2c, 0x3b3b2c, 0x0, 0x2c2c2c, 
  0x3b3b3b, 0x3b3b4a, 0x2c4, 0x3b3b3b, 0x3b3b3b, 0x2c2c59, 0x4a4a4a, 0x595959, 
  0x3b3b3b, 0x44a4a, 0x45959, 0x43b3b, 0x4a4a4a, 0x3b3b3b, 0x44a4a, 0x43b3b, 
  0x4a4a4a, 0x595959, 0x3b3b3b, 0x44a4a, 0x45959, 0x43b3b, 0x89894, 0x89894, 
  0x89894, 0x89894, 0x89894, 0x89894, 0x898989, 0x7a7a7a, 0x898989, 0x7a7a7a, 
  0x898989, 0x7a7a7a, 0xe2c, 0x44e0, 0x440, (1U<<31) | 9505, 0x7a7a7a7a, 0x2898989, 
  0x27a7a7a, 0x27a7a7a, 0x22c2c3b, 0x4a4a3b, 0x2c2c2c2c, 0x3b3b, 0x59594, 0x59594, 
  0x59594, 0x48989, 0x47a7a, 0x4898989, 0x47a7a7a, 0x344, 0x444, 0x244, 
  0x555, 0x242c42c4, 0x242c42c4, 0x242c42c4, 0x242c42c4, 0x242c42c4, 0x242c42c4, (1U<<31) | 514, 
  0x22c2c4, 0x22c2c4, 0x22c2c4, 0x22c2c4, 0x22c2c4, 0x22c2c4, 0x22c2c2c, 0x2c5959, 
  0x225959, 0x595959, 0x22595959, (1U<<31) | 13074, (1U<<31) | 13074, (1U<<31) | 13074, (1U<<31) | 13073, 0x4a4a4a, 
  (1U<<31) | 13073, 0x3b3b3b, (1U<<31) | 13073, 0x3b3b3b, (1U<<31) | 13073, 0x4a4a4a, (1U<<31) | 13073, 0x3b3b3b, 
  (1U<<31) | 13073, 0x3b3b3b, (1U<<31) | 13073, 0x2c2c3b, (1U<<31) | 13073, 0x3b3b3b, (1U<<31) | 13073, 0x2c2c2c, 
  (1U<<31) | 13073, 0x2c2c2c, (1U<<31) | 13073, 0x4a4a4a, (1U<<31) | 13073, 0x3b3b3b, 0xe0, 0x0, 
  (1U<<31) | 3760, (1U<<31) | 7244, 0x444, 0x555, 0x2220, (1U<<31) | 14229, 0x2220, (1U<<31) | 14229, 
  0x2220, (1U<<31) | 14229, 0x2220, (1U<<31) | 14229, 0x2220, (1U<<31) | 14229, 0x2220, (1U<<31) | 14229, 
  0x2220, (1U<<31) | 14229, 0x2220, (1U<<31) | 14229, 0x2, 0x5e20, (1U<<31) | 7502, 0x5e20, 
  (1U<<31) | 7502, 0x0, 0x5e20, (1U<<31) | 14222, 0x20, (1U<<31) | 1038, 0x4442, 0xe0, 
  0x4442, 0xe7a, 0xe7b, 0xe7a, 0xe7b, 0xe7a, 0xe7b, 0xe7a, 
  0xe7b, 0xe7a, 0xe7b, 0xe7a, 0xe7b, (1U<<31) | 8676, (1U<<31) | 8688, 0x47a3b, 
  0x47b3b, 0x22c2c2c, 0x22d2d2d, (1U<<31) | 506, 0x22c2c2c, 0x22d2d2d, (1U<<31) | 506, 0x2c2c2c, 
  0x2d2d2d, (1U<<31) | 1002, 0x595a5a, 0x5a5a5a, 0x595a5a5a, 0x4a4a4a4a, 0x4a4a4a4a, (1U<<31) | 5341, 
  0x4a4a4a, 0x4b4b4b, 0x4a4a4a, 0x4b4b4b, 0x0, 0x0, 0x40, 0x50, 
  0x40, 0x50, 0x40, 0xe40, 0xe50, 0xe40, 0xe50, 0x20, 
  0x4, 0x0, 0x45, 0x8989, 0x8a8a, 0x7a7a, 0x7b7b, 0x8989, 
  0x7a7a, (1U<<31) | 654, (1U<<31) | 686, (1U<<31) | 601, (1U<<31) | 622, 0x2c4a, 0x2c59, 0x2c3b, 
  0x4a59, 0x2c4a, 0x2c59, 0x2c3b, 0x4a59, 0x3b4a, 0x3b59, 0x3b4a, 
  0x3b59, 0x2c3b, 0x4a59, 0x3b4a, 0x4a4a4a4a, 0x594a4a59, 0x594a4a59, 0x4a4a4a4a, 
  0x594a4a59, 0x594a4a59, 0x4a3b3b4a, 0x3b3b3b3b, 0x4a3b3b4a, 0x3b3b3b3b, 0x4a3b3b4a, 0x4a3b3b4a, 
  0x2c2c2c2c, 0x2c2c2c, 0x4a4a4a, 0x595959, 0x3b3b3b, 0x2c2c2c, 0x4a4a4a, 0x595959, 
  0x3b3b3b, 0x0, 0x44e0, 0x44e0, 0x44e0, 0x44e0, 0x44e0, 0x44e0, 
  0x44e0, 0x44e0, 0x44e0, 0x44e0, 0x44e0, 0x44e0, 0x4440, 0x0, 
  0x4, 0x44, 0xee, 0x44f0, 0x0, 0x4f0, 0x40, 0x4444, 
  (1U<<31) | 4206, 0x4f0, 0x4f0, 0x4f4, 0x4f0, 0x4, 0x4, 0x4, 
  0x44, 0x44f, 0xcf4f, 0x4f4, 0x4f4, 0x4f4, 0xe4f0, 0xe4f0, 
  0xe4f0, 0xe4f0, 0xe4f0, 0x44f4, 0x4f4, 0x4f0, 0x4f0, 0x44f0, 
  0x44f0, 0x44f4, 0x44f0, 0x4f4, 0x44f0, 0xcf4f0, 0x44f0, 0xe4f0, 
  0x440, 0x44f0, 0x44f0, 0xcf4f0, 0x40, 0x44f0, 0xe4f0, 0x444, 
  0x0, 0x4f0, 0x4f4, 0x4f4, 0xe, 0x444, 0
};

static const unsigned char IIT_LongEncodingTable[] = {
  /* 0 */ 43, 12, 1, 15, 0, 0,
  /* 6 */ 4, 27, 2, 4, 4, 4, 1, 4, 1, 1, 0,
  /* 17 */ 0, 15, 0, 10, 4, 4, 4, 4, 4, 4, 4, 1, 1, 0,
  /* 31 */ 0, 15, 0, 10, 4, 4, 4, 1, 1, 0,
  /* 41 */ 0, 15, 2, 10, 4, 4, 4, 1, 1, 0,
  /* 51 */ 0, 4, 4, 15, 3, 15, 7, 1, 1, 0,
  /* 61 */ 0, 4, 4, 15, 0, 15, 7, 15, 7, 15, 7, 1, 1, 0,
  /* 75 */ 21, 1, 15, 1, 1, 0,
  /* 81 */ 0, 15, 3, 34, 1, 0, 4, 31, 3, 1, 0,
  /* 92 */ 0, 15, 3, 15, 12, 4, 31, 3, 1, 0,
  /* 102 */ 15, 3, 15, 7, 31, 3, 1, 0,
  /* 110 */ 15, 3, 15, 7, 15, 7, 31, 3, 1, 0,
  /* 120 */ 0, 15, 3, 14, 31, 3, 1, 0,
  /* 128 */ 0, 15, 3, 15, 12, 15, 17, 31, 3, 1, 0,
  /* 139 */ 15, 3, 15, 7, 15, 12, 15, 17, 31, 3, 1, 0,
  /* 151 */ 15, 1, 15, 7, 15, 7, 4, 4, 4, 1, 0,
  /* 162 */ 15, 1, 15, 7, 10, 4, 4, 4, 1, 0,
  /* 172 */ 15, 2, 15, 7, 10, 4, 4, 4, 1, 0,
  /* 182 */ 15, 0, 27, 3, 15, 7, 4, 4, 1, 0,
  /* 192 */ 21, 5, 1, 0,
  /* 196 */ 15, 1, 12, 3, 12, 3, 15, 7, 1, 0,
  /* 206 */ 15, 1, 1, 9, 4, 1, 9, 4, 15, 7, 1, 0,
  /* 218 */ 15, 1, 1, 10, 4, 1, 10, 4, 15, 7, 1, 0,
  /* 230 */ 15, 2, 12, 6, 12, 6, 15, 7, 1, 0,
  /* 240 */ 21, 15, 2, 1, 15, 7, 15, 7, 1, 0,
  /* 250 */ 15, 2, 15, 7, 15, 7, 15, 7, 1, 0,
  /* 260 */ 9, 1, 9, 8, 9, 8, 4, 9, 1, 0,
  /* 270 */ 10, 7, 10, 7, 11, 6, 4, 10, 1, 0,
  /* 280 */ 11, 6, 11, 6, 10, 7, 4, 10, 1, 0,
  /* 290 */ 10, 1, 10, 7, 10, 7, 4, 10, 1, 0,
  /* 300 */ 10, 1, 10, 8, 10, 8, 4, 10, 1, 0,
  /* 310 */ 11, 48, 10, 7, 11, 48, 10, 1, 0,
  /* 319 */ 11, 1, 11, 6, 11, 6, 4, 11, 1, 0,
  /* 329 */ 11, 1, 11, 7, 11, 7, 4, 11, 1, 0,
  /* 339 */ 12, 1, 12, 6, 12, 6, 4, 12, 1, 0,
  /* 349 */ 15, 0, 43, 12, 1, 0,
  /* 355 */ 43, 12, 1, 43, 12, 1, 43, 12, 1, 0,
  /* 365 */ 42, 7, 15, 1, 0,
  /* 370 */ 0, 19, 15, 1, 0,
  /* 375 */ 16, 1, 16, 1, 16, 1, 0,
  /* 382 */ 4, 16, 1, 16, 1, 0,
  /* 388 */ 21, 12, 4, 16, 1, 12, 4, 12, 4, 16, 1, 0,
  /* 400 */ 12, 4, 12, 4, 12, 4, 16, 1, 0,
  /* 409 */ 0, 15, 4, 15, 12, 15, 17, 1, 0,
  /* 418 */ 2, 18, 1, 0,
  /* 422 */ 0, 27, 3, 27, 1, 0,
  /* 428 */ 4, 27, 1, 0,
  /* 432 */ 36, 1, 36, 1, 36, 1, 0,
  /* 439 */ 50, 1, 36, 1, 0,
  /* 444 */ 23, 12, 2, 12, 2, 12, 2, 12, 2, 36, 1, 0,
  /* 456 */ 47, 1, 47, 1, 47, 1, 0,
  /* 463 */ 21, 13, 4, 47, 1, 13, 4, 13, 4, 47, 1, 0,
  /* 475 */ 13, 4, 13, 4, 13, 4, 47, 1, 0,
  /* 484 */ 36, 1, 36, 1, 50, 1, 0,
  /* 491 */ 50, 1, 50, 1, 50, 1, 0,
  /* 498 */ 21, 12, 2, 12, 2, 50, 1, 0,
  /* 506 */ 16, 2, 16, 2, 16, 2, 2, 0,
  /* 514 */ 12, 2, 12, 2, 4, 12, 2, 4, 2, 0,
  /* 524 */ 10, 7, 10, 7, 10, 7, 10, 4, 4, 2, 0,
  /* 535 */ 11, 7, 11, 7, 11, 7, 11, 4, 4, 2, 0,
  /* 546 */ 9, 8, 9, 8, 9, 8, 9, 5, 4, 2, 0,
  /* 557 */ 10, 8, 10, 8, 10, 8, 10, 5, 4, 2, 0,
  /* 568 */ 10, 4, 10, 4, 14, 10, 4, 10, 4, 2, 0,
  /* 579 */ 10, 4, 10, 4, 14, 9, 5, 10, 4, 2, 0,
  /* 590 */ 10, 4, 10, 4, 14, 10, 5, 10, 4, 2, 0,
  /* 601 */ 10, 7, 10, 7, 10, 7, 10, 4, 2, 0,
  /* 611 */ 11, 4, 11, 4, 14, 11, 4, 11, 4, 2, 0,
  /* 622 */ 11, 7, 11, 7, 11, 7, 11, 4, 2, 0,
  /* 632 */ 9, 5, 9, 5, 14, 10, 4, 9, 5, 2, 0,
  /* 643 */ 9, 5, 9, 5, 14, 9, 5, 9, 5, 2, 0,
  /* 654 */ 9, 8, 9, 8, 9, 8, 9, 5, 2, 0,
  /* 664 */ 10, 5, 10, 5, 14, 10, 4, 10, 5, 2, 0,
  /* 675 */ 10, 5, 10, 5, 14, 10, 5, 10, 5, 2, 0,
  /* 686 */ 10, 8, 10, 8, 10, 8, 10, 5, 2, 0,
  /* 696 */ 11, 6, 11, 6, 11, 6, 11, 6, 2, 0,
  /* 706 */ 10, 7, 10, 7, 10, 7, 4, 10, 7, 2, 0,
  /* 717 */ 10, 7, 10, 7, 14, 10, 4, 10, 7, 2, 0,
  /* 728 */ 10, 7, 10, 7, 14, 9, 5, 10, 7, 2, 0,
  /* 739 */ 10, 7, 10, 7, 14, 10, 5, 10, 7, 2, 0,
  /* 750 */ 10, 7, 10, 7, 10, 7, 10, 7, 2, 0,
  /* 760 */ 11, 7, 11, 7, 11, 7, 4, 11, 7, 2, 0,
  /* 771 */ 11, 7, 11, 7, 14, 11, 4, 11, 7, 2, 0,
  /* 782 */ 11, 7, 11, 7, 11, 7, 11, 7, 2, 0,
  /* 792 */ 9, 8, 9, 8, 9, 8, 4, 9, 8, 2, 0,
  /* 803 */ 9, 8, 9, 8, 14, 10, 4, 9, 8, 2, 0,
  /* 814 */ 9, 8, 9, 8, 14, 9, 5, 9, 8, 2, 0,
  /* 825 */ 9, 8, 9, 8, 9, 8, 9, 8, 2, 0,
  /* 835 */ 10, 8, 10, 8, 10, 8, 4, 10, 8, 2, 0,
  /* 846 */ 10, 8, 10, 8, 14, 10, 4, 10, 8, 2, 0,
  /* 857 */ 10, 8, 10, 8, 14, 10, 5, 10, 8, 2, 0,
  /* 868 */ 10, 8, 10, 8, 10, 8, 10, 8, 2, 0,
  /* 878 */ 11, 2, 11, 2, 11, 2, 11, 2, 11, 2, 11, 2, 11, 2, 0,
  /* 893 */ 36, 1, 36, 1, 50, 1, 12, 2, 0,
  /* 902 */ 36, 1, 36, 1, 12, 2, 12, 2, 0,
  /* 911 */ 50, 1, 12, 2, 12, 2, 0,
  /* 918 */ 36, 1, 12, 2, 12, 2, 12, 2, 12, 2, 0,
  /* 929 */ 21, 12, 2, 4, 12, 2, 12, 2, 12, 2, 0,
  /* 940 */ 21, 12, 2, 4, 12, 2, 12, 2, 0,
  /* 949 */ 21, 12, 2, 4, 11, 3, 11, 3, 12, 2, 0,
  /* 960 */ 21, 12, 2, 4, 12, 2, 0,
  /* 967 */ 21, 12, 2, 4, 10, 4, 10, 4, 12, 2, 0,
  /* 978 */ 43, 12, 2, 43, 12, 2, 43, 12, 2, 0,
  /* 988 */ 42, 7, 15, 2, 0,
  /* 993 */ 16, 1, 16, 2, 16, 2, 0,
  /* 1000 */ 16, 2, 16, 2, 16, 2, 16, 2, 0,
  /* 1009 */ 13, 3, 16, 2, 16, 2, 0,
  /* 1016 */ 11, 5, 16, 2, 16, 2, 0,
  /* 1023 */ 17, 17, 17, 2, 0,
  /* 1028 */ 0, 5, 4, 4, 4, 3, 3, 3, 3, 0,
  /* 1038 */ 51, 3, 3, 0,
  /* 1042 */ 12, 6, 12, 6, 12, 6, 12, 6, 3, 0,
  /* 1052 */ 9, 3, 27, 3, 9, 3, 0,
  /* 1059 */ 21, 12, 2, 4, 11, 3, 11, 3, 0,
  /* 1068 */ 21, 11, 3, 4, 11, 3, 11, 3, 0,
  /* 1077 */ 21, 11, 3, 4, 11, 3, 0,
  /* 1084 */ 16, 2, 13, 3, 13, 3, 0,
  /* 1091 */ 0, 4, 31, 3, 1, 31, 3, 1, 15, 3, 0,
  /* 1102 */ 0, 4, 4, 31, 3, 1, 15, 3, 0,
  /* 1111 */ 5, 31, 3, 1, 15, 3, 0,
  /* 1118 */ 42, 7, 31, 3, 1, 15, 3, 0,
  /* 1126 */ 46, 7, 46, 7, 31, 3, 1, 15, 3, 0,
  /* 1136 */ 0, 4, 31, 3, 1, 31, 3, 1, 15, 7, 15, 3, 0,
  /* 1149 */ 21, 30, 7, 30, 7, 15, 3, 0,
  /* 1157 */ 42, 7, 31, 3, 1, 42, 7, 15, 3, 0,
  /* 1167 */ 42, 7, 42, 7, 15, 3, 0,
  /* 1174 */ 44, 7, 44, 7, 15, 3, 0,
  /* 1181 */ 51, 15, 3, 0,
  /* 1185 */ 0, 27, 3, 0,
  /* 1189 */ 4, 27, 3, 0,
  /* 1193 */ 5, 27, 3, 0,
  /* 1197 */ 0, 15, 3, 34, 1, 0, 31, 3, 1, 4, 0,
  /* 1208 */ 15, 3, 15, 7, 1, 31, 3, 1, 4, 0,
  /* 1218 */ 42, 7, 42, 7, 15, 3, 31, 3, 1, 4, 0,
  /* 1229 */ 15, 3, 15, 7, 31, 3, 1, 4, 0,
  /* 1238 */ 15, 3, 15, 7, 15, 7, 31, 3, 1, 4, 0,
  /* 1249 */ 15, 3, 15, 7, 15, 7, 15, 7, 31, 3, 1, 4, 0,
  /* 1262 */ 15, 3, 15, 11, 31, 3, 1, 4, 0,
  /* 1271 */ 0, 15, 3, 15, 12, 31, 3, 1, 4, 0,
  /* 1281 */ 0, 15, 3, 15, 12, 15, 17, 31, 3, 1, 4, 0,
  /* 1293 */ 31, 3, 1, 15, 3, 15, 7, 19, 31, 3, 1, 4, 0,
  /* 1306 */ 0, 4, 4, 15, 0, 15, 7, 15, 7, 15, 7, 1, 4, 0,
  /* 1320 */ 9, 5, 9, 5, 14, 10, 4, 9, 1, 4, 0,
  /* 1331 */ 9, 8, 9, 8, 14, 10, 4, 9, 1, 4, 0,
  /* 1342 */ 10, 4, 10, 4, 14, 9, 5, 9, 1, 4, 0,
  /* 1353 */ 9, 5, 9, 5, 14, 9, 5, 9, 1, 4, 0,
  /* 1364 */ 10, 7, 10, 7, 14, 9, 5, 9, 1, 4, 0,
  /* 1375 */ 9, 8, 9, 8, 14, 9, 5, 9, 1, 4, 0,
  /* 1386 */ 10, 4, 10, 4, 14, 10, 4, 10, 1, 4, 0,
  /* 1397 */ 10, 5, 10, 5, 14, 10, 4, 10, 1, 4, 0,
  /* 1408 */ 10, 7, 10, 7, 14, 10, 4, 10, 1, 4, 0,
  /* 1419 */ 10, 8, 10, 8, 14, 10, 4, 10, 1, 4, 0,
  /* 1430 */ 10, 4, 10, 4, 14, 10, 5, 10, 1, 4, 0,
  /* 1441 */ 10, 5, 10, 5, 14, 10, 5, 10, 1, 4, 0,
  /* 1452 */ 10, 7, 10, 7, 14, 10, 5, 10, 1, 4, 0,
  /* 1463 */ 10, 8, 10, 8, 14, 10, 5, 10, 1, 4, 0,
  /* 1474 */ 11, 1, 11, 8, 11, 8, 4, 11, 1, 4, 0,
  /* 1485 */ 11, 4, 11, 4, 14, 11, 4, 11, 1, 4, 0,
  /* 1496 */ 11, 5, 11, 5, 14, 11, 4, 11, 1, 4, 0,
  /* 1507 */ 11, 7, 11, 7, 14, 11, 4, 11, 1, 4, 0,
  /* 1518 */ 11, 8, 11, 8, 14, 11, 4, 11, 1, 4, 0,
  /* 1529 */ 11, 4, 11, 4, 14, 11, 5, 11, 1, 4, 0,
  /* 1540 */ 11, 5, 11, 5, 14, 11, 5, 11, 1, 4, 0,
  /* 1551 */ 11, 7, 11, 7, 14, 11, 5, 11, 1, 4, 0,
  /* 1562 */ 11, 8, 11, 8, 14, 11, 5, 11, 1, 4, 0,
  /* 1573 */ 12, 1, 12, 7, 12, 7, 4, 12, 1, 4, 0,
  /* 1584 */ 12, 4, 12, 4, 14, 12, 4, 12, 1, 4, 0,
  /* 1595 */ 12, 7, 12, 7, 14, 12, 4, 12, 1, 4, 0,
  /* 1606 */ 13, 1, 13, 6, 13, 6, 4, 13, 1, 4, 0,
  /* 1617 */ 18, 15, 1, 4, 0,
  /* 1622 */ 12, 4, 12, 4, 16, 1, 4, 0,
  /* 1630 */ 0, 27, 3, 27, 1, 4, 0,
  /* 1637 */ 54, 27, 1, 4, 0,
  /* 1642 */ 55, 27, 1, 4, 0,
  /* 1647 */ 36, 1, 50, 8, 36, 1, 4, 0,
  /* 1655 */ 50, 8, 4, 50, 8, 36, 1, 4, 0,
  /* 1664 */ 50, 8, 50, 8, 50, 8, 36, 1, 4, 0,
  /* 1674 */ 13, 4, 13, 4, 47, 1, 4, 0,
  /* 1682 */ 0, 50, 8, 50, 8, 5, 5, 50, 1, 4, 0,
  /* 1693 */ 50, 1, 50, 8, 50, 1, 4, 0,
  /* 1701 */ 50, 8, 5, 50, 8, 50, 1, 4, 0,
  /* 1710 */ 50, 8, 50, 8, 50, 8, 50, 1, 4, 0,
  /* 1720 */ 0, 50, 8, 5, 14, 50, 1, 4, 0,
  /* 1729 */ 10, 7, 10, 7, 10, 7, 10, 4, 4, 2, 4, 0,
  /* 1741 */ 9, 8, 9, 8, 9, 8, 9, 5, 4, 2, 4, 0,
  /* 1753 */ 11, 8, 11, 8, 11, 8, 11, 5, 4, 2, 4, 0,
  /* 1765 */ 10, 4, 10, 4, 14, 10, 4, 2, 4, 0,
  /* 1775 */ 9, 5, 9, 5, 14, 10, 4, 2, 4, 0,
  /* 1785 */ 10, 5, 10, 5, 14, 10, 4, 2, 4, 0,
  /* 1795 */ 10, 7, 10, 7, 14, 10, 4, 2, 4, 0,
  /* 1805 */ 9, 8, 9, 8, 14, 10, 4, 2, 4, 0,
  /* 1815 */ 10, 8, 10, 8, 14, 10, 4, 2, 4, 0,
  /* 1825 */ 11, 4, 11, 4, 14, 11, 4, 2, 4, 0,
  /* 1835 */ 11, 5, 11, 5, 14, 11, 4, 2, 4, 0,
  /* 1845 */ 11, 7, 11, 7, 14, 11, 4, 2, 4, 0,
  /* 1855 */ 11, 8, 11, 8, 14, 11, 4, 2, 4, 0,
  /* 1865 */ 10, 4, 10, 4, 14, 9, 5, 2, 4, 0,
  /* 1875 */ 9, 5, 9, 5, 14, 9, 5, 2, 4, 0,
  /* 1885 */ 10, 7, 10, 7, 14, 9, 5, 2, 4, 0,
  /* 1895 */ 9, 8, 9, 8, 14, 9, 5, 2, 4, 0,
  /* 1905 */ 10, 4, 10, 4, 14, 10, 5, 2, 4, 0,
  /* 1915 */ 10, 5, 10, 5, 14, 10, 5, 2, 4, 0,
  /* 1925 */ 10, 7, 10, 7, 14, 10, 5, 2, 4, 0,
  /* 1935 */ 10, 8, 10, 8, 14, 10, 5, 2, 4, 0,
  /* 1945 */ 11, 4, 11, 4, 14, 11, 5, 2, 4, 0,
  /* 1955 */ 11, 5, 11, 5, 14, 11, 5, 2, 4, 0,
  /* 1965 */ 11, 7, 11, 7, 14, 11, 5, 2, 4, 0,
  /* 1975 */ 11, 8, 11, 8, 14, 11, 5, 2, 4, 0,
  /* 1985 */ 11, 6, 11, 6, 11, 6, 4, 11, 6, 2, 4, 0,
  /* 1997 */ 11, 6, 11, 6, 11, 6, 11, 6, 2, 4, 0,
  /* 2008 */ 11, 6, 11, 6, 10, 7, 11, 6, 2, 4, 0,
  /* 2019 */ 11, 6, 11, 6, 9, 8, 11, 6, 2, 4, 0,
  /* 2030 */ 10, 7, 10, 7, 10, 7, 4, 10, 7, 2, 4, 0,
  /* 2042 */ 10, 7, 10, 7, 11, 6, 10, 7, 2, 4, 0,
  /* 2053 */ 10, 7, 10, 7, 10, 7, 10, 7, 2, 4, 0,
  /* 2064 */ 10, 7, 10, 7, 9, 8, 10, 7, 2, 4, 0,
  /* 2075 */ 9, 8, 9, 8, 9, 8, 4, 9, 8, 2, 4, 0,
  /* 2087 */ 9, 8, 9, 8, 11, 6, 9, 8, 2, 4, 0,
  /* 2098 */ 9, 8, 9, 8, 10, 7, 9, 8, 2, 4, 0,
  /* 2109 */ 9, 8, 9, 8, 9, 8, 9, 8, 2, 4, 0,
  /* 2120 */ 11, 8, 11, 8, 11, 8, 4, 11, 8, 2, 4, 0,
  /* 2132 */ 11, 8, 11, 8, 11, 8, 11, 8, 2, 4, 0,
  /* 2143 */ 12, 2, 12, 2, 12, 2, 12, 2, 4, 0,
  /* 2153 */ 21, 12, 2, 4, 12, 2, 12, 2, 12, 2, 4, 0,
  /* 2165 */ 21, 12, 2, 4, 12, 2, 12, 2, 4, 0,
  /* 2175 */ 12, 2, 9, 5, 9, 5, 12, 2, 4, 0,
  /* 2185 */ 31, 2, 1, 15, 2, 4, 0,
  /* 2192 */ 13, 3, 16, 2, 16, 2, 4, 0,
  /* 2200 */ 12, 7, 12, 7, 12, 7, 12, 4, 4, 3, 4, 0,
  /* 2212 */ 12, 4, 12, 4, 14, 12, 4, 3, 4, 0,
  /* 2222 */ 12, 7, 12, 7, 14, 12, 4, 3, 4, 0,
  /* 2232 */ 12, 7, 12, 7, 12, 7, 4, 12, 7, 3, 4, 0,
  /* 2244 */ 12, 7, 12, 7, 12, 7, 12, 7, 3, 4, 0,
  /* 2255 */ 11, 3, 11, 3, 11, 3, 11, 3, 4, 0,
  /* 2265 */ 21, 11, 3, 4, 11, 3, 11, 3, 11, 3, 4, 0,
  /* 2277 */ 21, 11, 3, 4, 11, 3, 11, 3, 4, 0,
  /* 2287 */ 43, 12, 1, 43, 12, 1, 15, 3, 4, 0,
  /* 2297 */ 44, 7, 44, 7, 15, 3, 4, 0,
  /* 2305 */ 0, 31, 3, 1, 14, 15, 3, 4, 0,
  /* 2314 */ 21, 3, 4, 0,
  /* 2318 */ 0, 27, 3, 4, 0,
  /* 2323 */ 27, 3, 27, 3, 4, 0,
  /* 2329 */ 5, 27, 3, 4, 0,
  /* 2334 */ 0, 15, 3, 31, 3, 1, 14, 31, 3, 4, 0,
  /* 2345 */ 15, 3, 15, 7, 15, 7, 4, 31, 3, 1, 4, 4, 0,
  /* 2358 */ 15, 3, 15, 7, 31, 3, 1, 4, 4, 0,
  /* 2368 */ 15, 0, 4, 15, 10, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 2381 */ 15, 0, 4, 4, 15, 10, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 2395 */ 15, 0, 4, 7, 15, 10, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 2409 */ 15, 0, 4, 4, 7, 15, 10, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 2424 */ 15, 0, 4, 15, 10, 15, 15, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 2439 */ 15, 0, 4, 4, 15, 10, 15, 15, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 2455 */ 15, 0, 4, 7, 15, 10, 15, 15, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 2471 */ 15, 0, 4, 4, 7, 15, 10, 15, 15, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 2488 */ 15, 0, 4, 15, 10, 15, 15, 15, 15, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 2505 */ 15, 0, 4, 4, 15, 10, 15, 15, 15, 15, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 2523 */ 15, 0, 4, 7, 15, 10, 15, 15, 15, 15, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 2541 */ 15, 0, 4, 4, 7, 15, 10, 15, 15, 15, 15, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 2560 */ 15, 0, 4, 15, 10, 15, 15, 15, 15, 15, 15, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 2579 */ 15, 0, 4, 4, 15, 10, 15, 15, 15, 15, 15, 15, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 2599 */ 15, 0, 4, 7, 15, 10, 15, 15, 15, 15, 15, 15, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 2619 */ 15, 0, 4, 4, 7, 15, 10, 15, 15, 15, 15, 15, 15, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 2640 */ 15, 0, 4, 15, 10, 7, 15, 18, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 2656 */ 15, 0, 4, 4, 15, 10, 7, 15, 18, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 2673 */ 15, 0, 4, 15, 10, 15, 18, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 2688 */ 15, 0, 4, 4, 15, 10, 15, 18, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 2704 */ 15, 0, 4, 15, 10, 15, 15, 15, 18, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 2721 */ 15, 0, 4, 4, 15, 10, 15, 15, 15, 18, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 2739 */ 15, 0, 4, 7, 15, 10, 15, 15, 15, 18, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 2757 */ 15, 0, 4, 4, 7, 15, 10, 15, 15, 15, 18, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 2776 */ 15, 0, 4, 15, 10, 7, 15, 18, 15, 23, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 2794 */ 15, 0, 4, 4, 15, 10, 7, 15, 18, 15, 23, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 2813 */ 15, 0, 4, 15, 10, 15, 18, 15, 23, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 2830 */ 15, 0, 4, 4, 15, 10, 15, 18, 15, 23, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 2848 */ 15, 0, 4, 15, 10, 15, 15, 15, 18, 15, 23, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 2867 */ 15, 0, 4, 4, 15, 10, 15, 15, 15, 18, 15, 23, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 2887 */ 15, 0, 4, 7, 15, 10, 15, 15, 15, 18, 15, 23, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 2907 */ 15, 0, 4, 4, 7, 15, 10, 15, 15, 15, 18, 15, 23, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 2928 */ 15, 0, 4, 15, 10, 15, 15, 15, 15, 15, 15, 15, 18, 15, 23, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 2951 */ 15, 0, 4, 4, 15, 10, 15, 15, 15, 15, 15, 15, 15, 18, 15, 23, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 2975 */ 15, 0, 4, 7, 15, 10, 15, 15, 15, 15, 15, 15, 15, 18, 15, 23, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 2999 */ 15, 0, 4, 4, 7, 15, 10, 15, 15, 15, 15, 15, 15, 15, 18, 15, 23, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 3024 */ 15, 0, 4, 15, 10, 7, 15, 18, 15, 23, 15, 23, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 3044 */ 15, 0, 4, 4, 15, 10, 7, 15, 18, 15, 23, 15, 23, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 3065 */ 15, 0, 4, 15, 10, 15, 18, 15, 23, 15, 23, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 3084 */ 15, 0, 4, 4, 15, 10, 15, 18, 15, 23, 15, 23, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 3104 */ 15, 0, 4, 15, 10, 15, 15, 15, 18, 15, 23, 15, 23, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 3125 */ 15, 0, 4, 4, 15, 10, 15, 15, 15, 18, 15, 23, 15, 23, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 3147 */ 15, 0, 4, 7, 15, 10, 15, 15, 15, 18, 15, 23, 15, 23, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 3169 */ 15, 0, 4, 4, 7, 15, 10, 15, 15, 15, 18, 15, 23, 15, 23, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 3192 */ 15, 0, 4, 15, 10, 15, 15, 15, 15, 15, 15, 15, 18, 15, 23, 15, 23, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 3217 */ 15, 0, 4, 4, 15, 10, 15, 15, 15, 15, 15, 15, 15, 18, 15, 23, 15, 23, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 3243 */ 15, 0, 4, 7, 15, 10, 15, 15, 15, 15, 15, 15, 15, 18, 15, 23, 15, 23, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 3269 */ 15, 0, 4, 4, 7, 15, 10, 15, 15, 15, 15, 15, 15, 15, 18, 15, 23, 15, 23, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 3296 */ 15, 0, 4, 15, 10, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 18, 15, 23, 15, 23, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 3325 */ 15, 0, 4, 4, 15, 10, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 18, 15, 23, 15, 23, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 3355 */ 15, 0, 4, 7, 15, 10, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 18, 15, 23, 15, 23, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 3385 */ 15, 0, 4, 4, 7, 15, 10, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 18, 15, 23, 15, 23, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 3416 */ 15, 0, 4, 15, 10, 7, 15, 18, 15, 23, 15, 23, 15, 23, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 3438 */ 15, 0, 4, 4, 15, 10, 7, 15, 18, 15, 23, 15, 23, 15, 23, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 3461 */ 15, 0, 4, 15, 10, 15, 18, 15, 23, 15, 23, 15, 23, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 3482 */ 15, 0, 4, 4, 15, 10, 15, 18, 15, 23, 15, 23, 15, 23, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 3504 */ 15, 0, 4, 15, 10, 15, 15, 15, 15, 15, 15, 15, 18, 15, 23, 15, 23, 15, 23, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 3531 */ 15, 0, 4, 4, 15, 10, 15, 15, 15, 15, 15, 15, 15, 18, 15, 23, 15, 23, 15, 23, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 3559 */ 15, 0, 4, 7, 15, 10, 15, 15, 15, 15, 15, 15, 15, 18, 15, 23, 15, 23, 15, 23, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 3587 */ 15, 0, 4, 4, 7, 15, 10, 15, 15, 15, 15, 15, 15, 15, 18, 15, 23, 15, 23, 15, 23, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 3616 */ 15, 0, 4, 15, 10, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 18, 15, 23, 15, 23, 15, 23, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 3647 */ 15, 0, 4, 4, 15, 10, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 18, 15, 23, 15, 23, 15, 23, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 3679 */ 15, 0, 4, 7, 15, 10, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 18, 15, 23, 15, 23, 15, 23, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 3711 */ 15, 0, 4, 4, 7, 15, 10, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 18, 15, 23, 15, 23, 15, 23, 11, 4, 10, 4, 1, 4, 4, 0,
  /* 3744 */ 21, 4, 1, 4, 4, 0,
  /* 3750 */ 0, 15, 3, 14, 15, 9, 1, 4, 4, 0,
  /* 3760 */ 21, 2, 4, 2, 4, 4, 0,
  /* 3767 */ 11, 6, 11, 6, 11, 6, 11, 6, 2, 4, 4, 0,
  /* 3779 */ 10, 7, 10, 7, 10, 7, 10, 7, 2, 4, 4, 0,
  /* 3791 */ 9, 8, 9, 8, 9, 8, 9, 8, 2, 4, 4, 0,
  /* 3803 */ 36, 1, 36, 1, 50, 1, 12, 2, 4, 4, 0,
  /* 3814 */ 36, 1, 36, 1, 12, 2, 12, 2, 4, 4, 0,
  /* 3825 */ 27, 8, 15, 4, 3, 4, 4, 0,
  /* 3833 */ 21, 4, 1, 4, 4, 4, 0,
  /* 3840 */ 0, 27, 1, 27, 1, 4, 4, 4, 0,
  /* 3849 */ 36, 1, 36, 1, 12, 2, 12, 2, 4, 4, 4, 0,
  /* 3861 */ 21, 15, 3, 4, 4, 4, 0,
  /* 3868 */ 0, 27, 1, 27, 3, 4, 4, 4, 0,
  /* 3877 */ 21, 4, 1, 4, 4, 4, 4, 0,
  /* 3885 */ 21, 15, 3, 4, 4, 4, 4, 0,
  /* 3893 */ 0, 10, 4, 27, 3, 4, 4, 4, 4, 4, 0,
  /* 3904 */ 0, 27, 8, 27, 3, 4, 4, 4, 4, 4, 0,
  /* 3915 */ 0, 10, 4, 27, 3, 4, 4, 4, 4, 4, 4, 0,
  /* 3927 */ 0, 27, 8, 27, 3, 4, 4, 4, 4, 4, 4, 0,
  /* 3939 */ 12, 2, 12, 2, 12, 2, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0,
  /* 3962 */ 40, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0,
  /* 3985 */ 40, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0,
  /* 4007 */ 23, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0,
  /* 4023 */ 23, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0,
  /* 4036 */ 10, 4, 10, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0,
  /* 4050 */ 0, 15, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0,
  /* 4063 */ 10, 7, 10, 7, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0,
  /* 4077 */ 0, 15, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0,
  /* 4089 */ 21, 4, 4, 4, 4, 4, 4, 4, 4, 0,
  /* 4099 */ 0, 5, 4, 4, 4, 4, 4, 4, 4, 0,
  /* 4109 */ 21, 4, 4, 4, 4, 4, 4, 4, 0,
  /* 4118 */ 0, 15, 0, 10, 4, 4, 4, 4, 4, 4, 0,
  /* 4129 */ 15, 3, 15, 7, 15, 11, 4, 4, 4, 4, 4, 4, 0,
  /* 4142 */ 21, 4, 4, 4, 4, 4, 4, 0,
  /* 4150 */ 0, 15, 0, 27, 8, 4, 4, 4, 4, 4, 0,
  /* 4161 */ 0, 15, 0, 10, 4, 4, 4, 4, 4, 0,
  /* 4171 */ 15, 0, 15, 7, 10, 4, 4, 4, 4, 4, 0,
  /* 4182 */ 15, 2, 15, 7, 10, 4, 4, 4, 4, 4, 0,
  /* 4193 */ 15, 1, 15, 7, 15, 7, 10, 4, 4, 4, 4, 4, 0,
  /* 4206 */ 21, 4, 4, 4, 4, 4, 0,
  /* 4213 */ 0, 15, 0, 27, 8, 4, 4, 4, 4, 0,
  /* 4223 */ 15, 0, 15, 7, 27, 8, 4, 4, 4, 4, 0,
  /* 4234 */ 15, 2, 15, 7, 27, 8, 4, 4, 4, 4, 0,
  /* 4245 */ 15, 1, 15, 7, 15, 7, 27, 8, 4, 4, 4, 4, 0,
  /* 4258 */ 10, 4, 4, 4, 10, 4, 4, 4, 4, 0,
  /* 4268 */ 10, 4, 9, 4, 10, 4, 10, 4, 4, 4, 4, 0,
  /* 4280 */ 10, 4, 5, 5, 10, 4, 4, 4, 4, 0,
  /* 4290 */ 15, 0, 15, 7, 10, 4, 4, 4, 4, 0,
  /* 4300 */ 15, 2, 15, 7, 10, 4, 4, 4, 4, 0,
  /* 4310 */ 15, 1, 15, 7, 15, 7, 10, 4, 4, 4, 4, 0,
  /* 4322 */ 12, 4, 4, 4, 12, 4, 4, 4, 4, 0,
  /* 4332 */ 12, 4, 9, 4, 10, 4, 12, 4, 4, 4, 4, 0,
  /* 4344 */ 12, 4, 5, 5, 12, 4, 4, 4, 4, 0,
  /* 4354 */ 13, 4, 4, 4, 13, 4, 4, 4, 4, 0,
  /* 4364 */ 21, 4, 4, 4, 4, 0,
  /* 4370 */ 23, 3, 3, 3, 3, 5, 4, 4, 4, 0,
  /* 4380 */ 21, 3, 3, 5, 4, 4, 4, 0,
  /* 4388 */ 23, 4, 4, 4, 4, 5, 4, 4, 4, 0,
  /* 4398 */ 21, 4, 4, 5, 4, 4, 4, 0,
  /* 4406 */ 23, 4, 4, 4, 4, 5, 5, 4, 4, 4, 0,
  /* 4417 */ 21, 5, 5, 5, 4, 4, 4, 0,
  /* 4425 */ 23, 7, 7, 7, 7, 5, 5, 4, 4, 4, 0,
  /* 4436 */ 23, 7, 7, 7, 7, 5, 4, 4, 4, 0,
  /* 4446 */ 10, 7, 9, 3, 9, 3, 10, 7, 4, 4, 4, 0,
  /* 4458 */ 10, 7, 10, 3, 10, 3, 10, 7, 4, 4, 4, 0,
  /* 4470 */ 10, 7, 10, 3, 11, 3, 10, 7, 4, 4, 4, 0,
  /* 4482 */ 10, 7, 9, 4, 10, 4, 10, 7, 4, 4, 4, 0,
  /* 4494 */ 10, 7, 5, 5, 10, 7, 4, 4, 4, 0,
  /* 4504 */ 10, 7, 10, 6, 10, 6, 10, 7, 4, 4, 4, 0,
  /* 4516 */ 10, 7, 10, 6, 11, 6, 10, 7, 4, 4, 4, 0,
  /* 4528 */ 10, 7, 7, 7, 10, 7, 4, 4, 4, 0,
  /* 4538 */ 10, 7, 9, 7, 9, 7, 10, 7, 4, 4, 4, 0,
  /* 4550 */ 12, 7, 9, 3, 9, 3, 12, 7, 4, 4, 4, 0,
  /* 4562 */ 12, 7, 10, 3, 10, 3, 12, 7, 4, 4, 4, 0,
  /* 4574 */ 12, 7, 10, 3, 11, 3, 12, 7, 4, 4, 4, 0,
  /* 4586 */ 12, 7, 9, 4, 10, 4, 12, 7, 4, 4, 4, 0,
  /* 4598 */ 12, 7, 5, 5, 12, 7, 4, 4, 4, 0,
  /* 4608 */ 12, 7, 10, 6, 10, 6, 12, 7, 4, 4, 4, 0,
  /* 4620 */ 12, 7, 10, 6, 11, 6, 12, 7, 4, 4, 4, 0,
  /* 4632 */ 12, 7, 7, 7, 12, 7, 4, 4, 4, 0,
  /* 4642 */ 12, 7, 9, 7, 9, 7, 12, 7, 4, 4, 4, 0,
  /* 4654 */ 13, 7, 9, 3, 9, 3, 13, 7, 4, 4, 4, 0,
  /* 4666 */ 13, 7, 10, 3, 10, 3, 13, 7, 4, 4, 4, 0,
  /* 4678 */ 13, 7, 10, 6, 10, 6, 13, 7, 4, 4, 4, 0,
  /* 4690 */ 13, 7, 7, 7, 13, 7, 4, 4, 4, 0,
  /* 4700 */ 15, 3, 15, 7, 15, 7, 15, 7, 4, 4, 4, 0,
  /* 4712 */ 10, 8, 8, 8, 10, 8, 4, 4, 4, 0,
  /* 4722 */ 0, 15, 0, 27, 8, 4, 4, 4, 0,
  /* 4731 */ 0, 15, 2, 27, 8, 4, 4, 4, 0,
  /* 4740 */ 15, 0, 15, 7, 27, 8, 4, 4, 4, 0,
  /* 4750 */ 15, 2, 15, 7, 27, 8, 4, 4, 4, 0,
  /* 4760 */ 15, 1, 15, 7, 15, 7, 27, 8, 4, 4, 4, 0,
  /* 4772 */ 15, 0, 4, 15, 9, 11, 4, 4, 4, 0,
  /* 4782 */ 0, 15, 2, 4, 15, 9, 11, 4, 4, 4, 0,
  /* 4793 */ 15, 1, 15, 7, 15, 9, 11, 4, 4, 4, 0,
  /* 4804 */ 15, 2, 15, 7, 15, 9, 11, 4, 4, 4, 0,
  /* 4815 */ 15, 1, 15, 7, 15, 7, 15, 9, 11, 4, 4, 4, 0,
  /* 4828 */ 15, 3, 15, 7, 15, 11, 4, 4, 4, 0,
  /* 4838 */ 15, 0, 4, 15, 9, 15, 15, 11, 4, 4, 4, 0,
  /* 4850 */ 0, 15, 2, 4, 15, 9, 15, 15, 11, 4, 4, 4, 0,
  /* 4863 */ 15, 1, 15, 7, 15, 9, 15, 15, 11, 4, 4, 4, 0,
  /* 4876 */ 15, 2, 15, 7, 15, 9, 15, 15, 11, 4, 4, 4, 0,
  /* 4889 */ 15, 1, 15, 7, 15, 7, 15, 9, 15, 15, 11, 4, 4, 4, 0,
  /* 4904 */ 15, 0, 4, 15, 9, 15, 15, 15, 15, 11, 4, 4, 4, 0,
  /* 4918 */ 0, 15, 2, 4, 15, 9, 15, 15, 15, 15, 11, 4, 4, 4, 0,
  /* 4933 */ 15, 1, 15, 7, 15, 9, 15, 15, 15, 15, 11, 4, 4, 4, 0,
  /* 4948 */ 15, 2, 15, 7, 15, 9, 15, 15, 15, 15, 11, 4, 4, 4, 0,
  /* 4963 */ 15, 1, 15, 7, 15, 7, 15, 9, 15, 15, 15, 15, 11, 4, 4, 4, 0,
  /* 4980 */ 15, 0, 4, 15, 9, 15, 15, 15, 15, 15, 15, 11, 4, 4, 4, 0,
  /* 4996 */ 0, 15, 2, 4, 15, 9, 15, 15, 15, 15, 15, 15, 11, 4, 4, 4, 0,
  /* 5013 */ 15, 1, 15, 7, 15, 9, 15, 15, 15, 15, 15, 15, 11, 4, 4, 4, 0,
  /* 5030 */ 15, 2, 15, 7, 15, 9, 15, 15, 15, 15, 15, 15, 11, 4, 4, 4, 0,
  /* 5047 */ 15, 1, 15, 7, 15, 7, 15, 9, 15, 15, 15, 15, 15, 15, 11, 4, 4, 4, 0,
  /* 5066 */ 16, 4, 16, 4, 16, 4, 4, 4, 0,
  /* 5075 */ 15, 3, 15, 11, 15, 19, 4, 4, 4, 0,
  /* 5085 */ 15, 3, 15, 12, 15, 19, 4, 4, 4, 0,
  /* 5095 */ 23, 3, 3, 3, 3, 5, 4, 4, 0,
  /* 5104 */ 21, 3, 3, 5, 4, 4, 0,
  /* 5111 */ 23, 4, 4, 4, 4, 5, 4, 4, 0,
  /* 5120 */ 21, 4, 4, 5, 4, 4, 0,
  /* 5127 */ 23, 4, 4, 4, 4, 5, 5, 4, 4, 0,
  /* 5137 */ 21, 5, 5, 5, 4, 4, 0,
  /* 5144 */ 23, 7, 7, 7, 7, 5, 5, 4, 4, 0,
  /* 5154 */ 23, 7, 7, 7, 7, 5, 4, 4, 0,
  /* 5163 */ 13, 6, 13, 6, 4, 13, 6, 4, 4, 0,
  /* 5173 */ 13, 6, 13, 6, 13, 6, 13, 6, 4, 4, 0,
  /* 5184 */ 21, 7, 1, 7, 4, 4, 0,
  /* 5191 */ 21, 7, 1, 4, 7, 4, 4, 0,
  /* 5199 */ 21, 4, 15, 3, 15, 7, 4, 4, 0,
  /* 5208 */ 21, 15, 3, 15, 7, 4, 4, 0,
  /* 5216 */ 23, 15, 3, 15, 7, 15, 7, 15, 7, 4, 4, 0,
  /* 5228 */ 23, 15, 3, 15, 7, 15, 7, 15, 7, 15, 12, 15, 7, 15, 7, 15, 7, 15, 7, 4, 4, 0,
  /* 5250 */ 22, 15, 3, 15, 7, 15, 7, 15, 12, 15, 7, 15, 7, 15, 7, 4, 4, 0,
  /* 5268 */ 21, 15, 3, 15, 7, 15, 12, 15, 7, 15, 7, 4, 4, 0,
  /* 5282 */ 15, 3, 15, 7, 45, 7, 45, 7, 4, 4, 0,
  /* 5293 */ 50, 8, 50, 8, 4, 4, 0,
  /* 5300 */ 21, 4, 4, 4, 4, 10, 4, 4, 0,
  /* 5309 */ 0, 14, 10, 1, 10, 4, 10, 4, 4, 0,
  /* 5319 */ 21, 10, 4, 4, 10, 4, 10, 4, 4, 0,
  /* 5329 */ 21, 10, 4, 4, 10, 4, 10, 4, 10, 4, 4, 0,
  /* 5341 */ 10, 4, 10, 4, 10, 4, 10, 4, 4, 0,
  /* 5351 */ 0, 14, 9, 1, 9, 5, 10, 4, 4, 0,
  /* 5361 */ 0, 14, 10, 1, 10, 5, 10, 4, 4, 0,
  /* 5371 */ 0, 14, 11, 1, 11, 4, 11, 4, 4, 0,
  /* 5381 */ 11, 4, 11, 4, 11, 4, 11, 4, 4, 0,
  /* 5391 */ 0, 14, 11, 1, 11, 5, 11, 4, 4, 0,
  /* 5401 */ 16, 1, 16, 1, 12, 4, 4, 0,
  /* 5409 */ 0, 14, 12, 1, 12, 4, 12, 4, 4, 0,
  /* 5419 */ 12, 4, 12, 4, 12, 4, 12, 4, 4, 0,
  /* 5429 */ 13, 4, 13, 4, 12, 4, 12, 4, 4, 0,
  /* 5439 */ 47, 1, 47, 1, 13, 4, 4, 0,
  /* 5447 */ 13, 4, 13, 4, 13, 4, 13, 4, 4, 0,
  /* 5457 */ 16, 4, 16, 4, 13, 4, 13, 4, 4, 0,
  /* 5467 */ 16, 4, 16, 4, 13, 4, 4, 0,
  /* 5475 */ 0, 43, 9, 1, 14, 4, 4, 0,
  /* 5483 */ 0, 43, 10, 1, 14, 4, 4, 0,
  /* 5491 */ 0, 43, 11, 1, 14, 4, 4, 0,
  /* 5499 */ 0, 43, 12, 1, 14, 4, 4, 0,
  /* 5507 */ 0, 43, 28, 1, 14, 4, 4, 0,
  /* 5515 */ 40, 4, 4, 4, 4, 4, 4, 4, 4, 15, 4, 4, 0,
  /* 5528 */ 23, 4, 4, 4, 4, 15, 4, 4, 0,
  /* 5537 */ 21, 4, 4, 15, 4, 4, 0,
  /* 5544 */ 40, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 15, 4, 4, 0,
  /* 5565 */ 23, 9, 6, 9, 6, 9, 6, 9, 6, 15, 4, 4, 0,
  /* 5578 */ 40, 7, 7, 7, 7, 7, 7, 7, 7, 15, 4, 4, 0,
  /* 5591 */ 21, 8, 8, 15, 4, 4, 0,
  /* 5598 */ 0, 15, 4, 15, 11, 15, 15, 4, 4, 0,
  /* 5608 */ 0, 15, 4, 15, 11, 15, 15, 15, 15, 4, 4, 0,
  /* 5620 */ 0, 15, 4, 15, 11, 15, 15, 15, 15, 15, 15, 4, 4, 0,
  /* 5634 */ 13, 4, 13, 4, 16, 4, 4, 0,
  /* 5642 */ 16, 4, 16, 4, 16, 4, 16, 4, 4, 0,
  /* 5652 */ 17, 17, 4, 4, 0,
  /* 5657 */ 15, 0, 18, 4, 4, 0,
  /* 5663 */ 0, 15, 4, 15, 11, 15, 19, 4, 4, 0,
  /* 5673 */ 21, 4, 4, 0,
  /* 5677 */ 23, 3, 3, 3, 3, 5, 4, 0,
  /* 5685 */ 21, 3, 3, 5, 4, 0,
  /* 5691 */ 0, 31, 3, 1, 15, 3, 5, 4, 0,
  /* 5700 */ 23, 4, 4, 4, 4, 5, 4, 0,
  /* 5708 */ 21, 4, 4, 5, 4, 0,
  /* 5714 */ 23, 4, 4, 4, 4, 5, 5, 4, 0,
  /* 5723 */ 21, 5, 5, 5, 4, 0,
  /* 5729 */ 23, 7, 7, 7, 7, 5, 5, 4, 0,
  /* 5738 */ 0, 50, 8, 50, 8, 5, 5, 4, 0,
  /* 5747 */ 59, 5, 5, 4, 0,
  /* 5752 */ 23, 7, 7, 7, 7, 5, 4, 0,
  /* 5760 */ 50, 8, 50, 8, 50, 8, 5, 4, 0,
  /* 5769 */ 0, 14, 9, 1, 10, 4, 9, 5, 4, 0,
  /* 5779 */ 0, 14, 9, 1, 9, 5, 9, 5, 4, 0,
  /* 5789 */ 9, 5, 9, 5, 9, 5, 9, 5, 4, 0,
  /* 5799 */ 0, 14, 10, 1, 10, 4, 10, 5, 4, 0,
  /* 5809 */ 0, 14, 10, 1, 10, 5, 10, 5, 4, 0,
  /* 5819 */ 10, 5, 10, 5, 10, 5, 10, 5, 4, 0,
  /* 5829 */ 0, 14, 11, 1, 11, 4, 11, 5, 4, 0,
  /* 5839 */ 0, 14, 11, 1, 11, 5, 11, 5, 4, 0,
  /* 5849 */ 11, 5, 11, 5, 11, 5, 11, 5, 4, 0,
  /* 5859 */ 21, 5, 4, 0,
  /* 5863 */ 0, 15, 4, 9, 6, 9, 6, 9, 6, 9, 6, 4, 0,
  /* 5876 */ 13, 6, 13, 6, 13, 6, 13, 6, 4, 0,
  /* 5886 */ 0, 15, 4, 7, 7, 7, 7, 7, 7, 7, 7, 4, 0,
  /* 5899 */ 50, 8, 7, 4, 0,
  /* 5904 */ 21, 10, 4, 4, 10, 7, 4, 0,
  /* 5912 */ 0, 14, 10, 1, 10, 4, 10, 7, 4, 0,
  /* 5922 */ 0, 14, 9, 1, 9, 5, 10, 7, 4, 0,
  /* 5932 */ 0, 14, 10, 1, 10, 5, 10, 7, 4, 0,
  /* 5942 */ 0, 14, 11, 1, 11, 4, 11, 7, 4, 0,
  /* 5952 */ 0, 14, 11, 1, 11, 5, 11, 7, 4, 0,
  /* 5962 */ 0, 14, 12, 1, 12, 4, 12, 7, 4, 0,
  /* 5972 */ 12, 7, 12, 7, 12, 7, 12, 7, 4, 0,
  /* 5982 */ 15, 3, 31, 3, 1, 15, 7, 4, 0,
  /* 5991 */ 44, 7, 15, 3, 15, 7, 4, 0,
  /* 5999 */ 21, 15, 3, 15, 7, 4, 0,
  /* 6006 */ 15, 3, 31, 3, 1, 15, 7, 15, 7, 4, 0,
  /* 6017 */ 0, 4, 15, 3, 15, 7, 15, 7, 4, 0,
  /* 6027 */ 21, 15, 3, 4, 15, 7, 15, 7, 4, 0,
  /* 6037 */ 15, 3, 31, 3, 1, 15, 7, 15, 7, 15, 7, 4, 0,
  /* 6050 */ 45, 7, 15, 3, 15, 7, 15, 7, 15, 7, 4, 0,
  /* 6062 */ 23, 15, 3, 15, 7, 15, 7, 15, 7, 4, 0,
  /* 6073 */ 15, 2, 4, 15, 7, 15, 7, 15, 7, 4, 0,
  /* 6084 */ 0, 4, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 4, 0,
  /* 6098 */ 15, 1, 25, 7, 4, 0,
  /* 6104 */ 15, 3, 26, 7, 4, 0,
  /* 6110 */ 15, 3, 44, 7, 4, 0,
  /* 6116 */ 15, 3, 44, 7, 44, 7, 4, 0,
  /* 6124 */ 15, 3, 15, 7, 44, 7, 44, 7, 4, 0,
  /* 6134 */ 15, 3, 15, 7, 45, 7, 45, 7, 4, 0,
  /* 6144 */ 50, 8, 8, 4, 0,
  /* 6149 */ 21, 9, 5, 4, 9, 8, 4, 0,
  /* 6157 */ 0, 14, 9, 1, 10, 4, 9, 8, 4, 0,
  /* 6167 */ 0, 14, 9, 1, 9, 5, 9, 8, 4, 0,
  /* 6177 */ 0, 14, 10, 1, 10, 4, 10, 8, 4, 0,
  /* 6187 */ 0, 14, 10, 1, 10, 5, 10, 8, 4, 0,
  /* 6197 */ 0, 14, 11, 1, 11, 4, 11, 8, 4, 0,
  /* 6207 */ 0, 14, 11, 1, 11, 5, 11, 8, 4, 0,
  /* 6217 */ 11, 8, 11, 8, 11, 8, 11, 8, 4, 0,
  /* 6227 */ 50, 8, 50, 8, 5, 36, 1, 50, 8, 4, 0,
  /* 6238 */ 50, 8, 4, 50, 8, 36, 1, 50, 8, 4, 0,
  /* 6249 */ 50, 8, 50, 8, 5, 50, 8, 36, 1, 50, 8, 4, 0,
  /* 6262 */ 50, 8, 5, 50, 8, 50, 8, 36, 1, 50, 8, 4, 0,
  /* 6275 */ 50, 8, 50, 8, 50, 8, 50, 8, 36, 1, 50, 8, 4, 0,
  /* 6289 */ 50, 8, 50, 8, 4, 50, 1, 50, 8, 4, 0,
  /* 6300 */ 50, 8, 50, 8, 5, 5, 50, 1, 50, 8, 4, 0,
  /* 6312 */ 50, 8, 50, 8, 5, 50, 1, 50, 8, 4, 0,
  /* 6323 */ 50, 8, 7, 50, 1, 50, 8, 4, 0,
  /* 6332 */ 50, 8, 8, 50, 1, 50, 8, 4, 0,
  /* 6341 */ 50, 8, 4, 50, 8, 50, 1, 50, 8, 4, 0,
  /* 6352 */ 50, 8, 5, 50, 8, 50, 1, 50, 8, 4, 0,
  /* 6363 */ 50, 8, 50, 8, 7, 50, 8, 50, 1, 50, 8, 4, 0,
  /* 6376 */ 50, 8, 50, 8, 8, 50, 8, 50, 1, 50, 8, 4, 0,
  /* 6389 */ 50, 8, 7, 50, 8, 50, 8, 50, 1, 50, 8, 4, 0,
  /* 6402 */ 50, 8, 8, 50, 8, 50, 8, 50, 1, 50, 8, 4, 0,
  /* 6415 */ 50, 8, 50, 8, 50, 8, 50, 8, 50, 1, 50, 8, 4, 0,
  /* 6429 */ 50, 8, 50, 8, 4, 50, 8, 4, 0,
  /* 6438 */ 50, 8, 50, 8, 5, 5, 50, 8, 4, 0,
  /* 6448 */ 50, 8, 50, 8, 50, 8, 5, 50, 8, 4, 0,
  /* 6459 */ 50, 8, 50, 8, 7, 50, 8, 4, 0,
  /* 6468 */ 50, 8, 50, 8, 8, 50, 8, 4, 0,
  /* 6477 */ 50, 8, 4, 50, 8, 50, 8, 4, 0,
  /* 6486 */ 50, 8, 50, 8, 5, 50, 8, 50, 8, 4, 0,
  /* 6497 */ 50, 8, 50, 8, 7, 50, 8, 50, 8, 4, 0,
  /* 6508 */ 50, 8, 50, 8, 8, 50, 8, 50, 8, 4, 0,
  /* 6519 */ 50, 8, 5, 50, 8, 50, 8, 50, 8, 4, 0,
  /* 6530 */ 50, 8, 7, 50, 8, 50, 8, 50, 8, 4, 0,
  /* 6541 */ 50, 8, 8, 50, 8, 50, 8, 50, 8, 4, 0,
  /* 6552 */ 50, 8, 50, 8, 50, 8, 50, 8, 50, 8, 4, 0,
  /* 6564 */ 50, 8, 5, 14, 50, 8, 4, 0,
  /* 6572 */ 21, 10, 4, 4, 10, 4, 0,
  /* 6579 */ 21, 10, 1, 10, 1, 10, 4, 10, 4, 0,
  /* 6589 */ 21, 11, 3, 4, 10, 4, 10, 4, 0,
  /* 6598 */ 21, 10, 4, 4, 10, 4, 10, 4, 0,
  /* 6607 */ 10, 4, 15, 1, 7, 53, 7, 15, 11, 15, 15, 10, 4, 0,
  /* 6621 */ 43, 9, 8, 43, 9, 8, 43, 9, 1, 43, 10, 4, 0,
  /* 6634 */ 43, 11, 6, 43, 11, 6, 43, 10, 1, 43, 10, 4, 0,
  /* 6647 */ 43, 10, 4, 43, 10, 4, 43, 10, 4, 0,
  /* 6657 */ 21, 11, 1, 11, 1, 11, 4, 11, 4, 0,
  /* 6667 */ 12, 4, 16, 1, 12, 4, 0,
  /* 6674 */ 0, 14, 16, 1, 4, 4, 12, 4, 0,
  /* 6683 */ 21, 12, 1, 12, 1, 12, 4, 12, 4, 0,
  /* 6693 */ 16, 1, 16, 1, 12, 4, 12, 4, 0,
  /* 6702 */ 21, 12, 4, 16, 1, 12, 4, 12, 4, 0,
  /* 6712 */ 13, 4, 16, 1, 12, 4, 12, 4, 0,
  /* 6721 */ 0, 16, 1, 4, 4, 12, 4, 12, 4, 0,
  /* 6731 */ 0, 16, 1, 4, 4, 13, 4, 12, 4, 0,
  /* 6741 */ 0, 16, 1, 14, 12, 4, 0,
  /* 6748 */ 21, 15, 3, 15, 7, 15, 12, 4, 0,
  /* 6757 */ 22, 15, 3, 15, 7, 15, 7, 15, 12, 4, 0,
  /* 6768 */ 23, 15, 3, 15, 7, 15, 7, 15, 7, 15, 12, 4, 0,
  /* 6781 */ 13, 4, 47, 1, 13, 4, 0,
  /* 6788 */ 0, 14, 16, 1, 4, 4, 13, 4, 0,
  /* 6797 */ 0, 14, 47, 1, 4, 4, 13, 4, 0,
  /* 6806 */ 47, 1, 47, 1, 13, 4, 13, 4, 0,
  /* 6815 */ 21, 13, 4, 47, 1, 13, 4, 13, 4, 0,
  /* 6825 */ 16, 4, 47, 1, 13, 4, 13, 4, 0,
  /* 6834 */ 0, 47, 1, 4, 4, 13, 4, 13, 4, 0,
  /* 6844 */ 16, 4, 16, 4, 13, 4, 13, 4, 0,
  /* 6853 */ 0, 4, 4, 16, 4, 13, 4, 0,
  /* 6861 */ 0, 47, 1, 4, 4, 16, 4, 13, 4, 0,
  /* 6871 */ 13, 4, 16, 4, 13, 4, 0,
  /* 6878 */ 16, 4, 16, 4, 13, 4, 0,
  /* 6885 */ 0, 47, 1, 14, 13, 4, 0,
  /* 6892 */ 21, 12, 4, 14, 1, 14, 4, 0,
  /* 6900 */ 21, 13, 4, 14, 1, 14, 4, 0,
  /* 6908 */ 0, 50, 8, 5, 14, 4, 0,
  /* 6915 */ 21, 4, 14, 14, 4, 0,
  /* 6921 */ 21, 5, 14, 14, 4, 0,
  /* 6927 */ 40, 4, 4, 4, 4, 4, 4, 4, 4, 15, 4, 0,
  /* 6939 */ 23, 4, 4, 4, 4, 15, 4, 0,
  /* 6947 */ 21, 4, 4, 15, 4, 0,
  /* 6953 */ 40, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 15, 4, 0,
  /* 6973 */ 23, 9, 6, 9, 6, 9, 6, 9, 6, 15, 4, 0,
  /* 6985 */ 40, 7, 7, 7, 7, 7, 7, 7, 7, 15, 4, 0,
  /* 6997 */ 21, 8, 8, 15, 4, 0,
  /* 7003 */ 21, 15, 3, 15, 11, 15, 15, 4, 0,
  /* 7012 */ 15, 1, 15, 7, 15, 12, 15, 15, 4, 0,
  /* 7022 */ 0, 15, 4, 15, 11, 15, 15, 15, 15, 4, 0,
  /* 7033 */ 0, 15, 4, 15, 11, 15, 15, 15, 15, 15, 15, 4, 0,
  /* 7046 */ 5, 19, 15, 4, 0,
  /* 7051 */ 0, 14, 47, 1, 4, 4, 16, 4, 0,
  /* 7060 */ 0, 14, 4, 4, 16, 4, 0,
  /* 7067 */ 13, 4, 16, 4, 0,
  /* 7072 */ 16, 4, 16, 4, 16, 4, 0,
  /* 7079 */ 4, 17, 4, 0,
  /* 7083 */ 0, 15, 4, 15, 12, 15, 17, 4, 0,
  /* 7092 */ 17, 17, 4, 0,
  /* 7096 */ 0, 18, 4, 0,
  /* 7100 */ 14, 18, 4, 0,
  /* 7104 */ 27, 4, 0,
  /* 7107 */ 5, 28, 35, 4, 0,
  /* 7112 */ 43, 10, 7, 43, 10, 7, 43, 11, 48, 43, 11, 48, 4, 0,
  /* 7126 */ 8, 52, 4, 0,
  /* 7130 */ 4, 27, 1, 54, 4, 0,
  /* 7136 */ 0, 27, 1, 4, 54, 4, 0,
  /* 7143 */ 4, 27, 1, 55, 4, 0,
  /* 7149 */ 0, 27, 1, 4, 55, 4, 0,
  /* 7156 */ 15, 3, 59, 4, 0,
  /* 7161 */ 5, 59, 4, 0,
  /* 7165 */ 21, 15, 3, 15, 7, 59, 4, 0,
  /* 7173 */ 5, 36, 1, 5, 0,
  /* 7178 */ 5, 50, 1, 5, 0,
  /* 7183 */ 1, 27, 3, 5, 0,
  /* 7188 */ 0, 15, 3, 31, 3, 1, 14, 31, 3, 5, 0,
  /* 7199 */ 21, 4, 1, 4, 5, 0,
  /* 7205 */ 50, 8, 50, 8, 4, 5, 0,
  /* 7212 */ 10, 4, 10, 4, 10, 4, 10, 4, 5, 0,
  /* 7222 */ 16, 4, 16, 4, 13, 4, 5, 0,
  /* 7230 */ 36, 1, 36, 1, 5, 5, 0,
  /* 7237 */ 50, 1, 50, 1, 5, 5, 0,
  /* 7244 */ 21, 2, 5, 2, 5, 5, 0,
  /* 7251 */ 0, 14, 5, 5, 5, 5, 5, 5, 5, 5, 0,
  /* 7262 */ 5, 14, 5, 5, 5, 5, 5, 5, 5, 5, 0,
  /* 7273 */ 21, 5, 5, 14, 5, 5, 5, 5, 0,
  /* 7282 */ 21, 15, 3, 15, 7, 5, 5, 0,
  /* 7290 */ 21, 5, 5, 14, 5, 5, 0,
  /* 7297 */ 39, 4, 9, 5, 9, 5, 9, 5, 9, 5, 9, 5, 9, 5, 4, 9, 5, 0,
  /* 7315 */ 21, 9, 1, 9, 1, 9, 5, 9, 5, 0,
  /* 7325 */ 21, 10, 4, 4, 9, 5, 9, 5, 0,
  /* 7334 */ 40, 4, 9, 5, 9, 5, 9, 5, 9, 5, 9, 5, 9, 5, 9, 5, 4, 9, 5, 9, 5, 0,
  /* 7356 */ 21, 9, 5, 4, 9, 5, 9, 5, 0,
  /* 7365 */ 49, 2, 9, 5, 9, 5, 9, 5, 9, 5, 9, 5, 9, 5, 9, 5, 9, 5, 14, 9, 5, 9, 5, 9, 5, 9, 5, 9, 5, 9, 5, 9, 5, 9, 5, 0,
  /* 7401 */ 28, 35, 9, 5, 9, 5, 0,
  /* 7408 */ 28, 35, 9, 5, 0,
  /* 7413 */ 43, 11, 6, 43, 11, 6, 43, 9, 1, 43, 9, 5, 0,
  /* 7426 */ 43, 10, 7, 43, 10, 7, 43, 9, 1, 43, 9, 5, 0,
  /* 7439 */ 31, 3, 1, 31, 3, 1, 15, 3, 43, 9, 5, 0,
  /* 7451 */ 43, 9, 5, 43, 9, 5, 43, 9, 5, 0,
  /* 7461 */ 15, 3, 31, 3, 1, 15, 7, 43, 9, 5, 0,
  /* 7472 */ 21, 10, 1, 10, 1, 10, 5, 10, 5, 0,
  /* 7482 */ 21, 11, 1, 11, 1, 11, 5, 11, 5, 0,
  /* 7492 */ 0, 15, 3, 31, 3, 1, 15, 11, 5, 0,
  /* 7502 */ 51, 3, 3, 14, 5, 0,
  /* 7508 */ 28, 35, 5, 0,
  /* 7512 */ 41, 41, 5, 0,
  /* 7516 */ 0, 15, 4, 9, 6, 9, 6, 9, 6, 9, 6, 0,
  /* 7528 */ 23, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 0,
  /* 7578 */ 40, 7, 7, 7, 7, 7, 7, 7, 7, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 0,
  /* 7628 */ 23, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 0,
  /* 7654 */ 21, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 0,
  /* 7676 */ 40, 7, 7, 7, 7, 7, 7, 7, 7, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 0,
  /* 7702 */ 23, 7, 7, 7, 7, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 0,
  /* 7724 */ 21, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 0,
  /* 7740 */ 43, 9, 5, 43, 9, 5, 43, 9, 1, 43, 11, 6, 0,
  /* 7753 */ 43, 9, 8, 43, 9, 8, 43, 9, 1, 43, 11, 6, 0,
  /* 7766 */ 43, 10, 4, 43, 10, 4, 43, 10, 1, 43, 11, 6, 0,
  /* 7779 */ 43, 10, 7, 43, 10, 7, 43, 10, 1, 43, 11, 6, 0,
  /* 7792 */ 23, 4, 4, 4, 4, 5, 4, 7, 0,
  /* 7801 */ 23, 4, 4, 4, 4, 5, 5, 4, 7, 0,
  /* 7811 */ 23, 7, 7, 7, 7, 5, 5, 4, 7, 0,
  /* 7821 */ 23, 7, 7, 7, 7, 5, 4, 7, 0,
  /* 7830 */ 23, 4, 4, 4, 4, 5, 7, 0,
  /* 7838 */ 23, 4, 4, 4, 4, 5, 5, 7, 0,
  /* 7847 */ 23, 7, 7, 7, 7, 5, 5, 7, 0,
  /* 7856 */ 23, 7, 7, 7, 7, 5, 7, 0,
  /* 7864 */ 23, 4, 4, 4, 4, 5, 4, 7, 7, 0,
  /* 7874 */ 23, 4, 4, 4, 4, 5, 5, 4, 7, 7, 0,
  /* 7885 */ 23, 7, 7, 7, 7, 5, 5, 4, 7, 7, 0,
  /* 7896 */ 23, 7, 7, 7, 7, 5, 4, 7, 7, 0,
  /* 7906 */ 23, 4, 4, 4, 4, 5, 7, 7, 0,
  /* 7915 */ 23, 4, 4, 4, 4, 5, 5, 7, 7, 0,
  /* 7925 */ 23, 7, 7, 7, 7, 5, 5, 7, 7, 0,
  /* 7935 */ 23, 7, 7, 7, 7, 5, 7, 7, 0,
  /* 7944 */ 23, 4, 4, 4, 4, 5, 4, 7, 7, 7, 0,
  /* 7955 */ 23, 4, 4, 4, 4, 5, 5, 4, 7, 7, 7, 0,
  /* 7967 */ 23, 7, 7, 7, 7, 5, 5, 4, 7, 7, 7, 0,
  /* 7979 */ 23, 7, 7, 7, 7, 5, 4, 7, 7, 7, 0,
  /* 7990 */ 23, 4, 4, 4, 4, 5, 7, 7, 7, 0,
  /* 8000 */ 23, 4, 4, 4, 4, 5, 5, 7, 7, 7, 0,
  /* 8011 */ 23, 7, 7, 7, 7, 5, 5, 7, 7, 7, 0,
  /* 8022 */ 23, 7, 7, 7, 7, 5, 7, 7, 7, 0,
  /* 8032 */ 23, 7, 7, 7, 7, 4, 4, 4, 4, 4, 4, 7, 7, 7, 7, 0,
  /* 8048 */ 23, 7, 7, 7, 7, 4, 4, 4, 7, 7, 7, 7, 0,
  /* 8061 */ 23, 4, 4, 4, 4, 5, 4, 7, 7, 7, 7, 0,
  /* 8073 */ 23, 4, 4, 4, 4, 5, 5, 4, 7, 7, 7, 7, 0,
  /* 8086 */ 23, 7, 7, 7, 7, 5, 5, 4, 7, 7, 7, 7, 0,
  /* 8099 */ 23, 7, 7, 7, 7, 5, 4, 7, 7, 7, 7, 0,
  /* 8111 */ 23, 4, 4, 4, 4, 5, 7, 7, 7, 7, 0,
  /* 8122 */ 23, 4, 4, 4, 4, 5, 5, 7, 7, 7, 7, 0,
  /* 8134 */ 23, 7, 7, 7, 7, 5, 5, 7, 7, 7, 7, 0,
  /* 8146 */ 23, 7, 7, 7, 7, 5, 7, 7, 7, 7, 0,
  /* 8157 */ 21, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 7, 7, 7, 7, 0,
  /* 8179 */ 23, 7, 7, 7, 7, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 7, 7, 7, 7, 0,
  /* 8201 */ 23, 7, 7, 7, 7, 9, 6, 9, 6, 9, 6, 7, 7, 7, 7, 0,
  /* 8217 */ 23, 4, 4, 4, 4, 5, 4, 7, 7, 7, 7, 7, 7, 0,
  /* 8231 */ 23, 4, 4, 4, 4, 5, 5, 4, 7, 7, 7, 7, 7, 7, 0,
  /* 8246 */ 23, 7, 7, 7, 7, 5, 5, 4, 7, 7, 7, 7, 7, 7, 0,
  /* 8261 */ 23, 7, 7, 7, 7, 5, 4, 7, 7, 7, 7, 7, 7, 0,
  /* 8275 */ 23, 4, 4, 4, 4, 5, 7, 7, 7, 7, 7, 7, 0,
  /* 8288 */ 23, 4, 4, 4, 4, 5, 5, 7, 7, 7, 7, 7, 7, 0,
  /* 8302 */ 23, 7, 7, 7, 7, 5, 5, 7, 7, 7, 7, 7, 7, 0,
  /* 8316 */ 23, 7, 7, 7, 7, 5, 7, 7, 7, 7, 7, 7, 0,
  /* 8329 */ 40, 7, 7, 7, 7, 7, 7, 7, 7, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 7, 7, 7, 7, 7, 7, 7, 7, 0,
  /* 8357 */ 40, 7, 7, 7, 7, 7, 7, 7, 7, 4, 4, 4, 4, 4, 4, 4, 4, 7, 7, 7, 7, 7, 7, 7, 7, 0,
  /* 8383 */ 0, 15, 4, 7, 7, 7, 7, 7, 7, 7, 7, 0,
  /* 8395 */ 23, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 7, 7, 7, 7, 7, 7, 7, 7, 0,
  /* 8445 */ 40, 7, 7, 7, 7, 7, 7, 7, 7, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 9, 6, 7, 7, 7, 7, 7, 7, 7, 7, 0,
  /* 8495 */ 40, 7, 7, 7, 7, 7, 7, 7, 7, 9, 6, 9, 6, 9, 6, 9, 6, 7, 7, 7, 7, 7, 7, 7, 7, 0,
  /* 8521 */ 23, 4, 4, 4, 4, 5, 7, 7, 7, 7, 7, 7, 7, 7, 7, 0,
  /* 8537 */ 23, 4, 4, 4, 4, 5, 5, 7, 7, 7, 7, 7, 7, 7, 7, 7, 0,
  /* 8554 */ 23, 7, 7, 7, 7, 5, 5, 7, 7, 7, 7, 7, 7, 7, 7, 7, 0,
  /* 8571 */ 23, 7, 7, 7, 7, 5, 7, 7, 7, 7, 7, 7, 7, 7, 7, 0,
  /* 8587 */ 9, 48, 7, 7, 0,
  /* 8592 */ 21, 10, 4, 4, 10, 7, 10, 7, 0,
  /* 8601 */ 11, 48, 10, 7, 10, 7, 0,
  /* 8608 */ 17, 10, 7, 0,
  /* 8612 */ 43, 9, 5, 43, 9, 5, 43, 9, 1, 43, 10, 7, 0,
  /* 8625 */ 43, 9, 8, 43, 9, 8, 43, 9, 1, 43, 10, 7, 0,
  /* 8638 */ 43, 11, 6, 43, 11, 6, 43, 10, 1, 43, 10, 7, 0,
  /* 8651 */ 43, 11, 48, 43, 11, 48, 43, 11, 1, 43, 10, 7, 0,
  /* 8664 */ 43, 11, 48, 43, 10, 7, 43, 10, 7, 0,
  /* 8674 */ 11, 48, 11, 48, 10, 7, 0,
  /* 8681 */ 12, 48, 11, 7, 11, 7, 0,
  /* 8688 */ 11, 48, 11, 7, 0,
  /* 8693 */ 13, 48, 12, 7, 12, 7, 0,
  /* 8700 */ 12, 48, 12, 7, 0,
  /* 8705 */ 21, 15, 1, 1, 15, 7, 0,
  /* 8712 */ 15, 3, 31, 3, 1, 15, 7, 0,
  /* 8720 */ 15, 3, 34, 1, 0, 4, 31, 3, 1, 15, 7, 0,
  /* 8732 */ 15, 3, 15, 12, 4, 31, 3, 1, 15, 7, 0,
  /* 8743 */ 15, 3, 15, 7, 31, 3, 1, 15, 7, 0,
  /* 8753 */ 15, 3, 14, 31, 3, 1, 15, 7, 0,
  /* 8762 */ 15, 3, 12, 2, 12, 2, 12, 2, 12, 2, 15, 7, 0,
  /* 8775 */ 15, 3, 15, 7, 12, 2, 12, 2, 12, 2, 12, 2, 15, 7, 0,
  /* 8790 */ 15, 3, 12, 2, 12, 2, 12, 2, 15, 7, 0,
  /* 8801 */ 15, 3, 15, 7, 12, 2, 12, 2, 12, 2, 15, 7, 0,
  /* 8814 */ 15, 3, 15, 7, 12, 2, 12, 2, 15, 7, 0,
  /* 8825 */ 31, 3, 1, 31, 3, 1, 15, 3, 15, 7, 0,
  /* 8836 */ 21, 4, 4, 4, 4, 4, 4, 4, 15, 3, 15, 7, 0,
  /* 8849 */ 44, 7, 44, 7, 15, 3, 15, 7, 0,
  /* 8858 */ 21, 46, 7, 46, 7, 15, 3, 15, 7, 0,
  /* 8868 */ 21, 15, 1, 31, 1, 1, 15, 7, 15, 7, 0,
  /* 8879 */ 15, 3, 31, 3, 1, 15, 7, 15, 7, 0,
  /* 8889 */ 15, 3, 31, 3, 1, 15, 7, 15, 7, 15, 7, 0,
  /* 8901 */ 0, 4, 15, 3, 15, 7, 15, 7, 15, 7, 0,
  /* 8912 */ 0, 4, 4, 15, 3, 15, 7, 15, 7, 15, 7, 0,
  /* 8924 */ 45, 7, 15, 3, 15, 7, 15, 7, 15, 7, 0,
  /* 8935 */ 23, 46, 7, 46, 7, 46, 7, 46, 7, 15, 3, 15, 7, 15, 7, 15, 7, 0,
  /* 8953 */ 21, 15, 3, 15, 7, 15, 7, 15, 7, 0,
  /* 8963 */ 15, 3, 4, 15, 7, 15, 7, 15, 7, 0,
  /* 8973 */ 0, 4, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 0,
  /* 8986 */ 21, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 0,
  /* 8998 */ 21, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 0,
  /* 9012 */ 0, 4, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 0,
  /* 9031 */ 23, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 0,
  /* 9049 */ 23, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 0,
  /* 9069 */ 23, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 0,
  /* 9091 */ 23, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 0,
  /* 9117 */ 23, 15, 3, 15, 7, 15, 7, 15, 7, 59, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 0,
  /* 9144 */ 21, 15, 3, 15, 7, 59, 15, 7, 15, 7, 15, 7, 15, 7, 0,
  /* 9159 */ 21, 15, 2, 15, 9, 15, 7, 0,
  /* 9167 */ 15, 3, 15, 7, 4, 4, 15, 11, 15, 7, 0,
  /* 9178 */ 15, 3, 15, 7, 4, 15, 11, 15, 7, 0,
  /* 9188 */ 15, 3, 15, 7, 15, 7, 4, 15, 11, 15, 7, 0,
  /* 9200 */ 15, 3, 15, 7, 15, 7, 15, 11, 15, 7, 0,
  /* 9211 */ 15, 3, 15, 11, 4, 4, 4, 15, 19, 15, 7, 0,
  /* 9223 */ 15, 3, 15, 7, 15, 11, 4, 4, 4, 15, 19, 15, 7, 0,
  /* 9237 */ 15, 3, 15, 11, 4, 4, 15, 19, 15, 7, 0,
  /* 9248 */ 15, 3, 15, 11, 15, 15, 4, 4, 15, 19, 15, 7, 0,
  /* 9261 */ 15, 3, 15, 11, 4, 15, 19, 15, 7, 0,
  /* 9271 */ 15, 3, 15, 11, 15, 15, 4, 15, 19, 15, 7, 0,
  /* 9283 */ 15, 1, 25, 7, 0,
  /* 9288 */ 15, 3, 25, 7, 0,
  /* 9293 */ 15, 3, 25, 7, 25, 7, 0,
  /* 9300 */ 15, 3, 26, 7, 0,
  /* 9305 */ 15, 3, 26, 7, 26, 7, 0,
  /* 9312 */ 27, 7, 0,
  /* 9315 */ 15, 3, 30, 7, 30, 7, 0,
  /* 9322 */ 15, 3, 15, 7, 31, 3, 1, 42, 7, 0,
  /* 9332 */ 15, 3, 42, 7, 0,
  /* 9337 */ 15, 3, 15, 7, 42, 7, 0,
  /* 9344 */ 15, 3, 42, 7, 42, 7, 0,
  /* 9351 */ 15, 3, 44, 7, 0,
  /* 9356 */ 15, 3, 31, 3, 1, 15, 7, 44, 7, 0,
  /* 9366 */ 21, 15, 3, 15, 7, 44, 7, 0,
  /* 9374 */ 15, 3, 44, 7, 44, 7, 0,
  /* 9381 */ 15, 3, 15, 7, 44, 7, 44, 7, 0,
  /* 9390 */ 23, 15, 3, 15, 7, 15, 7, 15, 7, 44, 7, 44, 7, 0,
  /* 9404 */ 15, 3, 15, 7, 45, 7, 45, 7, 0,
  /* 9413 */ 15, 3, 46, 7, 0,
  /* 9418 */ 15, 3, 31, 3, 1, 15, 7, 46, 7, 0,
  /* 9428 */ 15, 3, 15, 7, 46, 7, 0,
  /* 9435 */ 15, 3, 15, 7, 15, 7, 46, 7, 0,
  /* 9444 */ 21, 15, 3, 15, 7, 46, 7, 46, 7, 0,
  /* 9454 */ 23, 15, 3, 15, 7, 15, 7, 15, 7, 46, 7, 46, 7, 46, 7, 46, 7, 0,
  /* 9472 */ 48, 7, 0,
  /* 9475 */ 21, 8, 8, 8, 8, 8, 8, 0,
  /* 9483 */ 52, 8, 8, 0,
  /* 9487 */ 9, 8, 9, 8, 9, 5, 9, 8, 0,
  /* 9496 */ 21, 9, 5, 4, 9, 8, 9, 8, 0,
  /* 9505 */ 9, 8, 9, 8, 9, 8, 9, 8, 0,
  /* 9514 */ 17, 9, 8, 0,
  /* 9518 */ 43, 10, 4, 43, 10, 4, 43, 9, 1, 43, 9, 8, 0,
  /* 9531 */ 43, 11, 6, 43, 11, 6, 43, 9, 1, 43, 9, 8, 0,
  /* 9544 */ 43, 10, 7, 43, 10, 7, 43, 9, 1, 43, 9, 8, 0,
  /* 9557 */ 10, 8, 10, 8, 10, 5, 10, 8, 0,
  /* 9566 */ 10, 8, 10, 8, 10, 8, 10, 8, 0,
  /* 9575 */ 11, 8, 11, 8, 11, 5, 11, 8, 0,
  /* 9584 */ 15, 3, 15, 7, 31, 3, 1, 15, 9, 0,
  /* 9594 */ 15, 3, 15, 7, 15, 7, 31, 3, 1, 15, 9, 0,
  /* 9606 */ 0, 15, 3, 14, 31, 3, 1, 15, 9, 0,
  /* 9616 */ 0, 15, 3, 15, 7, 14, 31, 3, 1, 15, 9, 0,
  /* 9628 */ 0, 15, 3, 15, 7, 15, 7, 14, 31, 3, 1, 15, 9, 0,
  /* 9642 */ 0, 15, 3, 15, 7, 15, 7, 15, 7, 14, 31, 3, 1, 15, 9, 0,
  /* 9658 */ 0, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 14, 31, 3, 1, 15, 9, 0,
  /* 9676 */ 0, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 14, 31, 3, 1, 15, 9, 0,
  /* 9696 */ 0, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 14, 31, 3, 1, 15, 9, 0,
  /* 9718 */ 0, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 14, 31, 3, 1, 15, 9, 0,
  /* 9742 */ 21, 1, 15, 1, 15, 9, 0,
  /* 9749 */ 15, 3, 15, 7, 15, 7, 31, 3, 3, 15, 9, 0,
  /* 9761 */ 46, 7, 46, 7, 15, 3, 15, 9, 0,
  /* 9770 */ 15, 15, 15, 3, 15, 7, 15, 9, 0,
  /* 9779 */ 15, 3, 15, 7, 15, 7, 15, 9, 0,
  /* 9788 */ 15, 1, 15, 7, 15, 7, 15, 7, 15, 9, 0,
  /* 9799 */ 15, 1, 15, 7, 42, 7, 15, 9, 0,
  /* 9808 */ 15, 2, 15, 7, 42, 7, 15, 9, 0,
  /* 9817 */ 15, 3, 15, 7, 15, 7, 46, 7, 15, 9, 0,
  /* 9828 */ 0, 15, 3, 15, 7, 14, 15, 9, 0,
  /* 9837 */ 0, 15, 3, 15, 7, 15, 7, 14, 15, 9, 0,
  /* 9848 */ 0, 15, 3, 15, 7, 15, 7, 15, 7, 14, 15, 9, 0,
  /* 9861 */ 21, 15, 3, 15, 7, 15, 7, 15, 7, 14, 15, 9, 0,
  /* 9874 */ 0, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 14, 15, 9, 0,
  /* 9889 */ 0, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 14, 15, 9, 0,
  /* 9906 */ 22, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 14, 15, 9, 0,
  /* 9923 */ 0, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 14, 15, 9, 0,
  /* 9942 */ 0, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 14, 15, 9, 0,
  /* 9963 */ 23, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 14, 15, 9, 0,
  /* 9984 */ 24, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 14, 15, 9, 0,
  /* 10009 */ 38, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 14, 15, 9, 0,
  /* 10038 */ 39, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 14, 15, 9, 0,
  /* 10071 */ 40, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 14, 15, 9, 0,
  /* 10108 */ 15, 3, 15, 7, 31, 3, 1, 15, 11, 0,
  /* 10118 */ 15, 3, 46, 7, 31, 3, 1, 15, 11, 0,
  /* 10128 */ 15, 3, 4, 15, 7, 12, 2, 12, 2, 4, 15, 11, 0,
  /* 10141 */ 15, 3, 4, 15, 7, 12, 2, 4, 15, 11, 0,
  /* 10152 */ 15, 3, 15, 7, 4, 4, 4, 4, 15, 11, 0,
  /* 10163 */ 21, 15, 3, 4, 15, 7, 4, 4, 4, 15, 11, 0,
  /* 10175 */ 15, 3, 15, 7, 15, 7, 15, 7, 4, 4, 4, 15, 11, 0,
  /* 10189 */ 21, 4, 15, 3, 15, 7, 4, 4, 15, 11, 0,
  /* 10200 */ 21, 15, 3, 4, 15, 7, 4, 4, 15, 11, 0,
  /* 10211 */ 15, 3, 4, 15, 7, 4, 15, 11, 0,
  /* 10220 */ 15, 3, 15, 7, 15, 7, 4, 15, 11, 0,
  /* 10230 */ 21, 15, 3, 4, 15, 7, 15, 7, 15, 7, 4, 15, 11, 0,
  /* 10244 */ 21, 4, 4, 4, 4, 4, 4, 4, 15, 3, 15, 7, 15, 11, 0,
  /* 10259 */ 15, 3, 15, 7, 15, 7, 15, 11, 0,
  /* 10268 */ 15, 3, 15, 7, 15, 7, 15, 7, 15, 11, 0,
  /* 10279 */ 15, 3, 4, 15, 7, 15, 7, 15, 7, 15, 11, 0,
  /* 10291 */ 15, 3, 4, 4, 15, 7, 15, 7, 15, 7, 15, 11, 0,
  /* 10304 */ 23, 15, 7, 15, 7, 15, 7, 15, 3, 15, 12, 0,
  /* 10316 */ 22, 15, 7, 15, 7, 15, 3, 15, 12, 0,
  /* 10326 */ 21, 15, 7, 15, 3, 15, 12, 0,
  /* 10334 */ 23, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 3, 5, 15, 12, 0,
  /* 10355 */ 22, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 3, 5, 15, 12, 0,
  /* 10372 */ 21, 15, 7, 15, 7, 15, 7, 15, 3, 5, 15, 12, 0,
  /* 10385 */ 0, 15, 3, 15, 7, 5, 15, 12, 0,
  /* 10394 */ 0, 15, 3, 15, 7, 15, 7, 5, 15, 12, 0,
  /* 10405 */ 0, 15, 3, 15, 7, 15, 7, 15, 7, 5, 15, 12, 0,
  /* 10418 */ 21, 15, 3, 15, 7, 15, 12, 0,
  /* 10426 */ 0, 15, 3, 15, 7, 15, 7, 15, 12, 0,
  /* 10436 */ 22, 15, 3, 15, 7, 15, 7, 15, 12, 0,
  /* 10446 */ 0, 15, 3, 15, 7, 15, 7, 15, 7, 15, 12, 0,
  /* 10458 */ 23, 15, 3, 15, 7, 15, 7, 15, 7, 15, 12, 0,
  /* 10470 */ 0, 15, 3, 31, 3, 1, 14, 0,
  /* 10478 */ 0, 15, 3, 15, 7, 31, 3, 1, 14, 0,
  /* 10488 */ 21, 15, 3, 15, 7, 31, 3, 1, 14, 0,
  /* 10498 */ 0, 15, 3, 15, 7, 15, 7, 31, 3, 1, 14, 0,
  /* 10510 */ 22, 15, 3, 15, 7, 15, 7, 31, 3, 1, 14, 0,
  /* 10522 */ 0, 15, 3, 15, 7, 15, 7, 15, 7, 31, 3, 1, 14, 0,
  /* 10536 */ 23, 15, 3, 15, 7, 15, 7, 15, 7, 31, 3, 1, 14, 0,
  /* 10550 */ 0, 50, 1, 14, 0,
  /* 10555 */ 18, 4, 4, 4, 14, 0,
  /* 10561 */ 21, 4, 14, 14, 4, 4, 14, 0,
  /* 10569 */ 21, 5, 14, 14, 4, 4, 14, 0,
  /* 10577 */ 21, 4, 4, 14, 0,
  /* 10582 */ 21, 4, 14, 14, 4, 14, 0,
  /* 10589 */ 21, 5, 14, 14, 4, 14, 0,
  /* 10596 */ 40, 5, 5, 5, 5, 5, 5, 5, 5, 14, 0,
  /* 10607 */ 21, 5, 5, 14, 0,
  /* 10612 */ 21, 2, 9, 5, 9, 5, 14, 0,
  /* 10620 */ 18, 4, 14, 14, 14, 0,
  /* 10626 */ 18, 4, 4, 14, 14, 14, 14, 0,
  /* 10634 */ 0, 17, 17, 14, 0,
  /* 10639 */ 14, 18, 14, 0,
  /* 10643 */ 0, 15, 3, 15, 7, 59, 14, 0,
  /* 10651 */ 21, 15, 3, 15, 7, 59, 14, 0,
  /* 10659 */ 0, 15, 3, 15, 7, 15, 7, 15, 7, 59, 14, 0,
  /* 10671 */ 23, 15, 3, 15, 7, 15, 7, 15, 7, 59, 14, 0,
  /* 10683 */ 0, 15, 3, 14, 15, 9, 31, 3, 1, 15, 15, 0,
  /* 10695 */ 0, 15, 3, 15, 7, 14, 15, 9, 31, 3, 1, 15, 15, 0,
  /* 10709 */ 0, 15, 3, 15, 7, 15, 7, 14, 15, 9, 31, 3, 1, 15, 15, 0,
  /* 10725 */ 0, 15, 3, 15, 7, 15, 7, 15, 7, 14, 15, 9, 31, 3, 1, 15, 15, 0,
  /* 10743 */ 0, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 14, 15, 9, 31, 3, 1, 15, 15, 0,
  /* 10763 */ 0, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 14, 15, 9, 31, 3, 1, 15, 15, 0,
  /* 10785 */ 0, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 14, 15, 9, 31, 3, 1, 15, 15, 0,
  /* 10809 */ 0, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 14, 15, 9, 31, 3, 1, 15, 15, 0,
  /* 10835 */ 0, 15, 4, 15, 12, 15, 15, 4, 15, 15, 0,
  /* 10846 */ 15, 4, 15, 7, 15, 12, 15, 15, 4, 15, 15, 0,
  /* 10858 */ 15, 3, 15, 7, 31, 3, 1, 31, 3, 1, 15, 9, 15, 15, 0,
  /* 10873 */ 15, 3, 15, 7, 15, 7, 31, 3, 3, 31, 3, 1, 15, 9, 15, 15, 0,
  /* 10890 */ 46, 7, 46, 7, 15, 3, 31, 3, 1, 15, 9, 15, 15, 0,
  /* 10904 */ 15, 3, 15, 7, 31, 3, 1, 15, 9, 15, 15, 0,
  /* 10916 */ 15, 3, 15, 7, 15, 7, 31, 3, 1, 15, 9, 15, 15, 0,
  /* 10930 */ 15, 3, 15, 7, 15, 7, 46, 7, 31, 3, 1, 15, 9, 15, 15, 0,
  /* 10946 */ 15, 3, 15, 7, 14, 31, 3, 1, 15, 9, 15, 15, 0,
  /* 10959 */ 21, 15, 3, 15, 7, 15, 7, 15, 7, 14, 31, 3, 1, 15, 9, 15, 15, 0,
  /* 10977 */ 22, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 14, 31, 3, 1, 15, 9, 15, 15, 0,
  /* 10999 */ 23, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 14, 31, 3, 1, 15, 9, 15, 15, 0,
  /* 11025 */ 24, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 14, 31, 3, 1, 15, 9, 15, 15, 0,
  /* 11055 */ 38, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 14, 31, 3, 1, 15, 9, 15, 15, 0,
  /* 11089 */ 39, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 14, 31, 3, 1, 15, 9, 15, 15, 0,
  /* 11127 */ 40, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 14, 31, 3, 1, 15, 9, 15, 15, 0,
  /* 11169 */ 15, 3, 15, 7, 15, 7, 15, 9, 15, 15, 0,
  /* 11180 */ 0, 15, 3, 14, 15, 9, 15, 15, 0,
  /* 11189 */ 0, 15, 3, 15, 7, 14, 15, 9, 15, 15, 0,
  /* 11200 */ 0, 15, 3, 15, 7, 15, 7, 14, 15, 9, 15, 15, 0,
  /* 11213 */ 0, 15, 3, 15, 7, 15, 7, 15, 7, 14, 15, 9, 15, 15, 0,
  /* 11228 */ 21, 15, 3, 15, 7, 15, 7, 15, 7, 14, 15, 9, 15, 15, 0,
  /* 11243 */ 0, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 14, 15, 9, 15, 15, 0,
  /* 11260 */ 0, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 14, 15, 9, 15, 15, 0,
  /* 11279 */ 22, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 14, 15, 9, 15, 15, 0,
  /* 11298 */ 0, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 14, 15, 9, 15, 15, 0,
  /* 11319 */ 0, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 14, 15, 9, 15, 15, 0,
  /* 11342 */ 23, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 14, 15, 9, 15, 15, 0,
  /* 11365 */ 24, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 14, 15, 9, 15, 15, 0,
  /* 11392 */ 38, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 14, 15, 9, 15, 15, 0,
  /* 11423 */ 39, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 14, 15, 9, 15, 15, 0,
  /* 11458 */ 40, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 14, 15, 9, 15, 15, 0,
  /* 11497 */ 15, 3, 15, 7, 15, 11, 15, 15, 0,
  /* 11506 */ 49, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 9, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 14, 15, 15, 0,
  /* 11545 */ 40, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 9, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 14, 15, 15, 0,
  /* 11580 */ 39, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 9, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 14, 15, 15, 0,
  /* 11611 */ 38, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 9, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 14, 15, 15, 0,
  /* 11638 */ 24, 15, 3, 15, 7, 15, 7, 15, 7, 15, 9, 15, 7, 15, 7, 15, 7, 15, 7, 14, 15, 15, 0,
  /* 11661 */ 23, 15, 3, 15, 7, 15, 7, 15, 9, 15, 7, 15, 7, 15, 7, 14, 15, 15, 0,
  /* 11680 */ 22, 15, 3, 15, 7, 15, 9, 15, 7, 15, 7, 14, 15, 15, 0,
  /* 11695 */ 21, 15, 3, 15, 9, 15, 7, 14, 15, 15, 0,
  /* 11706 */ 15, 3, 15, 7, 15, 7, 15, 9, 31, 3, 1, 15, 15, 15, 15, 0,
  /* 11722 */ 15, 3, 15, 7, 14, 15, 9, 31, 3, 1, 15, 15, 15, 15, 0,
  /* 11737 */ 21, 15, 3, 15, 7, 15, 7, 15, 7, 14, 15, 9, 31, 3, 1, 15, 15, 15, 15, 0,
  /* 11757 */ 22, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 14, 15, 9, 31, 3, 1, 15, 15, 15, 15, 0,
  /* 11781 */ 23, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 14, 15, 9, 31, 3, 1, 15, 15, 15, 15, 0,
  /* 11809 */ 24, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 14, 15, 9, 31, 3, 1, 15, 15, 15, 15, 0,
  /* 11841 */ 38, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 14, 15, 9, 31, 3, 1, 15, 15, 15, 15, 0,
  /* 11877 */ 39, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 14, 15, 9, 31, 3, 1, 15, 15, 15, 15, 0,
  /* 11917 */ 40, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 14, 15, 9, 31, 3, 1, 15, 15, 15, 15, 0,
  /* 11961 */ 49, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 9, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 14, 31, 3, 1, 15, 15, 15, 15, 0,
  /* 12005 */ 40, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 9, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 14, 31, 3, 1, 15, 15, 15, 15, 0,
  /* 12045 */ 39, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 9, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 14, 31, 3, 1, 15, 15, 15, 15, 0,
  /* 12081 */ 38, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 9, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 14, 31, 3, 1, 15, 15, 15, 15, 0,
  /* 12113 */ 24, 15, 3, 15, 7, 15, 7, 15, 7, 15, 9, 15, 7, 15, 7, 15, 7, 15, 7, 14, 31, 3, 1, 15, 15, 15, 15, 0,
  /* 12141 */ 23, 15, 3, 15, 7, 15, 7, 15, 9, 15, 7, 15, 7, 15, 7, 14, 31, 3, 1, 15, 15, 15, 15, 0,
  /* 12165 */ 22, 15, 3, 15, 7, 15, 9, 15, 7, 15, 7, 14, 31, 3, 1, 15, 15, 15, 15, 0,
  /* 12185 */ 21, 15, 3, 15, 9, 15, 7, 14, 31, 3, 1, 15, 15, 15, 15, 0,
  /* 12201 */ 15, 3, 15, 7, 15, 7, 31, 3, 1, 15, 9, 15, 15, 15, 15, 0,
  /* 12217 */ 15, 3, 15, 7, 15, 7, 15, 9, 15, 15, 15, 15, 0,
  /* 12230 */ 0, 15, 4, 15, 11, 15, 15, 15, 15, 0,
  /* 12240 */ 0, 15, 4, 15, 11, 15, 15, 15, 15, 15, 15, 0,
  /* 12252 */ 4, 17, 0,
  /* 12255 */ 10, 7, 10, 7, 17, 0,
  /* 12261 */ 9, 8, 17, 0,
  /* 12265 */ 0, 14, 17, 0,
  /* 12269 */ 31, 3, 1, 31, 3, 1, 15, 3, 15, 8, 31, 3, 1, 15, 17, 0,
  /* 12285 */ 15, 3, 15, 7, 15, 7, 15, 8, 31, 3, 1, 15, 17, 0,
  /* 12299 */ 0, 15, 3, 14, 15, 11, 31, 3, 1, 15, 17, 0,
  /* 12311 */ 0, 15, 3, 15, 7, 14, 15, 11, 31, 3, 1, 15, 17, 0,
  /* 12325 */ 0, 15, 3, 15, 7, 15, 7, 14, 15, 11, 31, 3, 1, 15, 17, 0,
  /* 12341 */ 0, 15, 3, 15, 7, 15, 7, 15, 7, 14, 15, 11, 31, 3, 1, 15, 17, 0,
  /* 12359 */ 0, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 14, 15, 11, 31, 3, 1, 15, 17, 0,
  /* 12379 */ 0, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 14, 15, 11, 31, 3, 1, 15, 17, 0,
  /* 12401 */ 0, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 14, 15, 11, 31, 3, 1, 15, 17, 0,
  /* 12425 */ 0, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 14, 15, 11, 31, 3, 1, 15, 17, 0,
  /* 12451 */ 15, 3, 15, 7, 15, 11, 15, 7, 31, 11, 1, 15, 17, 0,
  /* 12465 */ 15, 3, 15, 7, 15, 11, 15, 7, 15, 17, 0,
  /* 12476 */ 15, 0, 15, 8, 15, 17, 0,
  /* 12483 */ 31, 3, 1, 15, 3, 15, 8, 15, 17, 0,
  /* 12493 */ 0, 15, 1, 15, 7, 15, 7, 15, 8, 15, 17, 0,
  /* 12505 */ 15, 3, 15, 7, 15, 7, 15, 8, 15, 17, 0,
  /* 12516 */ 15, 1, 15, 9, 15, 17, 0,
  /* 12523 */ 0, 15, 3, 14, 15, 9, 15, 17, 0,
  /* 12532 */ 15, 3, 15, 7, 15, 11, 15, 17, 0,
  /* 12541 */ 0, 15, 3, 15, 7, 14, 15, 11, 15, 17, 0,
  /* 12552 */ 0, 15, 3, 15, 7, 15, 7, 14, 15, 11, 15, 17, 0,
  /* 12565 */ 0, 15, 3, 15, 7, 15, 7, 15, 7, 14, 15, 11, 15, 17, 0,
  /* 12580 */ 21, 15, 3, 15, 7, 15, 7, 15, 7, 14, 15, 11, 15, 17, 0,
  /* 12595 */ 0, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 14, 15, 11, 15, 17, 0,
  /* 12612 */ 0, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 14, 15, 11, 15, 17, 0,
  /* 12631 */ 22, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 14, 15, 11, 15, 17, 0,
  /* 12650 */ 0, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 14, 15, 11, 15, 17, 0,
  /* 12671 */ 0, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 14, 15, 11, 15, 17, 0,
  /* 12694 */ 23, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 14, 15, 11, 15, 17, 0,
  /* 12717 */ 24, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 14, 15, 11, 15, 17, 0,
  /* 12744 */ 38, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 14, 15, 11, 15, 17, 0,
  /* 12775 */ 39, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 14, 15, 11, 15, 17, 0,
  /* 12810 */ 40, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 14, 15, 11, 15, 17, 0,
  /* 12849 */ 0, 15, 3, 15, 7, 15, 12, 15, 17, 0,
  /* 12859 */ 21, 15, 3, 15, 7, 15, 12, 15, 17, 0,
  /* 12869 */ 0, 15, 3, 15, 7, 15, 7, 15, 12, 15, 17, 0,
  /* 12881 */ 22, 15, 3, 15, 7, 15, 7, 15, 12, 15, 17, 0,
  /* 12893 */ 0, 15, 3, 15, 7, 15, 7, 15, 7, 15, 12, 15, 17, 0,
  /* 12907 */ 23, 15, 3, 15, 7, 15, 7, 15, 7, 15, 12, 15, 17, 0,
  /* 12921 */ 0, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 12, 15, 17, 0,
  /* 12937 */ 24, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 12, 15, 17, 0,
  /* 12953 */ 0, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 12, 15, 17, 0,
  /* 12971 */ 38, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 12, 15, 17, 0,
  /* 12989 */ 0, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 12, 15, 17, 0,
  /* 13009 */ 39, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 12, 15, 17, 0,
  /* 13029 */ 0, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 12, 15, 17, 0,
  /* 13051 */ 40, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 12, 15, 17, 0,
  /* 13073 */ 17, 17, 17, 0,
  /* 13077 */ 15, 0, 18, 0,
  /* 13081 */ 1, 18, 0,
  /* 13084 */ 15, 4, 18, 0,
  /* 13088 */ 14, 18, 0,
  /* 13091 */ 15, 2, 15, 12, 15, 18, 0,
  /* 13098 */ 0, 15, 0, 19, 0,
  /* 13103 */ 15, 1, 19, 0,
  /* 13107 */ 21, 14, 1, 14, 4, 19, 0,
  /* 13114 */ 15, 2, 15, 7, 19, 0,
  /* 13120 */ 15, 2, 15, 7, 15, 7, 19, 0,
  /* 13128 */ 15, 0, 15, 8, 19, 0,
  /* 13134 */ 15, 1, 15, 10, 19, 0,
  /* 13140 */ 15, 2, 15, 10, 19, 0,
  /* 13146 */ 1, 14, 19, 0,
  /* 13150 */ 15, 3, 15, 7, 15, 11, 4, 4, 4, 4, 4, 4, 15, 19, 0,
  /* 13165 */ 15, 3, 15, 7, 15, 11, 4, 4, 4, 15, 19, 0,
  /* 13177 */ 15, 3, 15, 11, 4, 15, 19, 0,
  /* 13185 */ 15, 3, 15, 7, 15, 11, 4, 15, 19, 0,
  /* 13195 */ 15, 3, 4, 15, 7, 15, 11, 4, 15, 19, 0,
  /* 13206 */ 21, 15, 3, 15, 11, 15, 15, 4, 15, 19, 0,
  /* 13217 */ 0, 15, 3, 4, 15, 11, 15, 19, 0,
  /* 13226 */ 15, 3, 15, 7, 4, 15, 11, 15, 19, 0,
  /* 13236 */ 15, 2, 15, 7, 15, 11, 15, 19, 0,
  /* 13245 */ 15, 3, 4, 15, 7, 15, 11, 15, 19, 0,
  /* 13255 */ 15, 2, 15, 7, 4, 19, 19, 0,
  /* 13263 */ 31, 2, 1, 15, 2, 15, 7, 19, 19, 0,
  /* 13273 */ 15, 2, 15, 7, 15, 7, 19, 19, 0,
  /* 13282 */ 15, 2, 15, 7, 15, 7, 15, 7, 19, 19, 0,
  /* 13293 */ 15, 2, 15, 9, 19, 19, 0,
  /* 13300 */ 15, 2, 15, 7, 15, 9, 19, 19, 0,
  /* 13309 */ 15, 1, 15, 10, 19, 19, 0,
  /* 13316 */ 15, 2, 15, 10, 19, 19, 0,
  /* 13323 */ 0, 19, 19, 19, 0,
  /* 13328 */ 0, 19, 19, 19, 19, 19, 19, 0,
  /* 13336 */ 15, 3, 15, 7, 15, 8, 15, 7, 31, 3, 1, 15, 17, 15, 23, 0,
  /* 13352 */ 15, 3, 15, 7, 15, 7, 15, 8, 31, 3, 1, 15, 17, 15, 23, 0,
  /* 13368 */ 15, 3, 15, 7, 15, 11, 31, 3, 1, 15, 17, 15, 23, 0,
  /* 13382 */ 15, 3, 15, 7, 14, 15, 11, 31, 3, 1, 15, 17, 15, 23, 0,
  /* 13397 */ 21, 15, 3, 15, 7, 15, 7, 15, 7, 14, 15, 11, 31, 3, 1, 15, 17, 15, 23, 0,
  /* 13417 */ 22, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 14, 15, 11, 31, 3, 1, 15, 17, 15, 23, 0,
  /* 13441 */ 23, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 14, 15, 11, 31, 3, 1, 15, 17, 15, 23, 0,
  /* 13469 */ 24, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 14, 15, 11, 31, 3, 1, 15, 17, 15, 23, 0,
  /* 13501 */ 38, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 14, 15, 11, 31, 3, 1, 15, 17, 15, 23, 0,
  /* 13537 */ 39, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 14, 15, 11, 31, 3, 1, 15, 17, 15, 23, 0,
  /* 13577 */ 40, 15, 3, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 15, 7, 14, 15, 11, 31, 3, 1, 15, 17, 15, 23, 0,
  /* 13621 */ 15, 3, 15, 7, 15, 11, 15, 7, 31, 11, 1, 15, 17, 15, 23, 0,
  /* 13637 */ 15, 3, 15, 7, 15, 11, 31, 11, 1, 15, 17, 15, 23, 0,
  /* 13651 */ 15, 3, 15, 7, 15, 8, 15, 7, 15, 17, 15, 23, 0,
  /* 13664 */ 15, 3, 15, 7, 15, 11, 15, 7, 15, 17, 15, 23, 0,
  /* 13677 */ 15, 3, 15, 7, 15, 7, 15, 8, 15, 17, 15, 23, 0,
  /* 13690 */ 15, 3, 15, 7, 15, 11, 15, 17, 15, 23, 0,
  /* 13701 */ 15, 3, 15, 7, 15, 8, 15, 7, 31, 3, 1, 15, 17, 15, 23, 15, 23, 0,
  /* 13719 */ 15, 3, 15, 7, 15, 7, 15, 8, 31, 3, 1, 15, 17, 15, 23, 15, 23, 0,
  /* 13737 */ 15, 3, 15, 7, 15, 11, 31, 3, 1, 15, 17, 15, 23, 15, 23, 0,
  /* 13753 */ 15, 3, 15, 7, 15, 8, 15, 7, 15, 17, 15, 23, 15, 23, 0,
  /* 13768 */ 15, 3, 15, 9, 15, 7, 15, 7, 15, 16, 15, 25, 0,
  /* 13781 */ 15, 3, 15, 9, 15, 7, 15, 16, 15, 25, 0,
  /* 13792 */ 15, 0, 15, 8, 15, 16, 15, 25, 0,
  /* 13801 */ 0, 15, 1, 15, 7, 15, 11, 15, 16, 15, 25, 0,
  /* 13813 */ 15, 3, 15, 7, 15, 11, 15, 16, 15, 25, 0,
  /* 13824 */ 15, 3, 15, 9, 15, 15, 15, 16, 15, 25, 0,
  /* 13835 */ 0, 15, 1, 15, 11, 15, 15, 15, 16, 15, 25, 0,
  /* 13847 */ 15, 3, 15, 12, 15, 19, 4, 4, 4, 15, 27, 0,
  /* 13859 */ 0, 15, 4, 15, 11, 15, 19, 4, 4, 15, 27, 0,
  /* 13871 */ 0, 15, 0, 29, 0,
  /* 13876 */ 4, 4, 15, 0, 29, 0,
  /* 13882 */ 0, 1, 29, 0,
  /* 13886 */ 1, 14, 1, 29, 0,
  /* 13891 */ 15, 0, 4, 29, 0,
  /* 13896 */ 18, 5, 4, 15, 4, 4, 4, 29, 0,
  /* 13905 */ 0, 5, 4, 29, 0,
  /* 13910 */ 0, 5, 4, 14, 4, 29, 0,
  /* 13917 */ 5, 5, 4, 14, 4, 29, 0,
  /* 13924 */ 0, 15, 4, 15, 9, 15, 16, 15, 24, 4, 29, 0,
  /* 13936 */ 7, 7, 7, 7, 29, 0,
  /* 13942 */ 8, 8, 8, 8, 29, 0,
  /* 13948 */ 15, 4, 1, 15, 8, 29, 0,
  /* 13955 */ 15, 0, 4, 14, 14, 29, 0,
  /* 13962 */ 52, 52, 52, 52, 29, 0,
  /* 13968 */ 15, 3, 15, 7, 15, 11, 15, 16, 31, 3, 1, 15, 25, 15, 31, 0,
  /* 13984 */ 15, 3, 15, 7, 15, 8, 15, 19, 31, 3, 1, 15, 25, 15, 31, 0,
  /* 14000 */ 15, 3, 15, 7, 15, 8, 15, 19, 31, 19, 1, 15, 25, 15, 31, 0,
  /* 14016 */ 15, 3, 15, 7, 15, 11, 15, 16, 15, 25, 15, 31, 0,
  /* 14029 */ 15, 3, 15, 7, 15, 8, 15, 19, 15, 25, 15, 31, 0,
  /* 14042 */ 15, 3, 15, 7, 15, 11, 15, 16, 31, 3, 1, 15, 25, 15, 31, 15, 31, 0,
  /* 14060 */ 15, 3, 15, 7, 15, 8, 15, 19, 31, 3, 1, 15, 25, 15, 31, 15, 31, 0,
  /* 14078 */ 15, 3, 15, 7, 15, 8, 15, 19, 15, 25, 15, 31, 15, 31, 0,
  /* 14093 */ 15, 3, 15, 9, 15, 7, 15, 19, 15, 24, 15, 33, 0,
  /* 14106 */ 0, 15, 1, 15, 11, 15, 19, 15, 24, 15, 33, 0,
  /* 14118 */ 4, 28, 35, 0,
  /* 14122 */ 28, 35, 9, 5, 9, 5, 28, 35, 0,
  /* 14131 */ 4, 4, 28, 35, 28, 35, 0,
  /* 14138 */ 28, 35, 28, 35, 28, 35, 28, 35, 0,
  /* 14147 */ 5, 41, 0,
  /* 14150 */ 8, 41, 0,
  /* 14153 */ 41, 41, 41, 41, 0,
  /* 14158 */ 52, 41, 0,
  /* 14161 */ 3, 48, 0,
  /* 14164 */ 9, 48, 9, 48, 9, 48, 9, 48, 0,
  /* 14173 */ 43, 10, 7, 43, 10, 7, 43, 11, 48, 43, 11, 48, 0,
  /* 14186 */ 10, 7, 10, 7, 11, 48, 11, 48, 0,
  /* 14195 */ 11, 7, 11, 7, 12, 48, 12, 48, 0,
  /* 14204 */ 12, 7, 12, 7, 13, 48, 13, 48, 0,
  /* 14213 */ 48, 48, 48, 48, 0,
  /* 14218 */ 15, 3, 51, 0,
  /* 14222 */ 0, 3, 3, 14, 5, 51, 0,
  /* 14229 */ 51, 3, 3, 3, 51, 51, 51, 0,
  /* 14237 */ 41, 52, 0,
  /* 14240 */ 0, 27, 1, 4, 54, 0,
  /* 14246 */ 0, 27, 1, 4, 55, 0,
  /* 14252 */ 59, 0,
  255
};

#endif

// Add parameter attributes that are not common to all intrinsics.
#ifdef GET_INTRINSIC_ATTRIBUTES
static AttributeSet getIntrinsicArgAttributeSet(LLVMContext &C, unsigned ID) {
  switch (ID) {
  default: llvm_unreachable("Invalid attribute set number");
  case 0:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::ImmArg),
    });
  case 1:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUndef),
    });
  case 2:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::WriteOnly),
    });
  case 3:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoCapture),
      Attribute::get(C, Attribute::ReadOnly),
    });
  case 4:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::ReadNone),
    });
  case 5:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoCapture),
    });
  case 6:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoCapture),
      Attribute::get(C, Attribute::ReadNone),
    });
  case 7:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoCapture),
      Attribute::get(C, Attribute::WriteOnly),
    });
  case 8:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoCapture),
      Attribute::get(C, Attribute::NoAlias),
      Attribute::get(C, Attribute::WriteOnly),
    });
  case 9:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoCapture),
      Attribute::get(C, Attribute::NoAlias),
      Attribute::get(C, Attribute::ReadOnly),
    });
  case 10:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::Returned),
    });
  case 11:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NonNull),
    });
  case 12:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::ReadOnly),
    });
  case 13:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::Alignment, 4),
    });
  case 14:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoAlias),
      Attribute::get(C, Attribute::WriteOnly),
    });
  case 15:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoAlias),
      Attribute::get(C, Attribute::ReadOnly),
    });
  }
}

static AttributeSet getIntrinsicFnAttributeSet(LLVMContext &C, unsigned ID) {
  switch (ID) {
  default: llvm_unreachable("Invalid attribute set number");
  case 0:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::NoCallback),
      Attribute::get(C, Attribute::NoSync),
      Attribute::get(C, Attribute::NoFree),
      Attribute::get(C, Attribute::WillReturn),
      Attribute::get(C, Attribute::Speculatable),
      Attribute::getWithMemoryEffects(C, MemoryEffects::createFromIntValue(0)),
    });
  case 1:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::NoCallback),
      Attribute::get(C, Attribute::NoSync),
      Attribute::get(C, Attribute::NoFree),
      Attribute::get(C, Attribute::WillReturn),
      Attribute::getWithMemoryEffects(C, MemoryEffects::createFromIntValue(0)),
    });
  case 2:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::NoCallback),
      Attribute::get(C, Attribute::NoSync),
      Attribute::get(C, Attribute::NoFree),
      Attribute::get(C, Attribute::WillReturn),
      Attribute::getWithMemoryEffects(C, MemoryEffects::createFromIntValue(1)),
    });
  case 3:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::NoCallback),
      Attribute::get(C, Attribute::NoSync),
      Attribute::get(C, Attribute::NoFree),
      Attribute::get(C, Attribute::WillReturn),
      Attribute::getWithMemoryEffects(C, MemoryEffects::createFromIntValue(12)),
    });
  case 4:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
    });
  case 5:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::NoCallback),
      Attribute::get(C, Attribute::NoSync),
      Attribute::get(C, Attribute::NoFree),
      Attribute::get(C, Attribute::WillReturn),
    });
  case 6:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::NoMerge),
    });
  case 7:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::NoCallback),
      Attribute::get(C, Attribute::NoSync),
      Attribute::get(C, Attribute::NoFree),
      Attribute::get(C, Attribute::WillReturn),
      Attribute::get(C, Attribute::NoDuplicate),
      Attribute::getWithMemoryEffects(C, MemoryEffects::createFromIntValue(12)),
    });
  case 8:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::getWithMemoryEffects(C, MemoryEffects::createFromIntValue(0)),
    });
  case 9:
    return AttributeSet::get(C, {
    });
  case 10:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::getWithMemoryEffects(C, MemoryEffects::createFromIntValue(3)),
    });
  case 11:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::getWithMemoryEffects(C, MemoryEffects::createFromIntValue(1)),
    });
  case 12:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::NoReturn),
    });
  case 13:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::NoCallback),
      Attribute::get(C, Attribute::NoSync),
      Attribute::get(C, Attribute::NoFree),
      Attribute::get(C, Attribute::WillReturn),
      Attribute::get(C, Attribute::StrictFP),
      Attribute::getWithMemoryEffects(C, MemoryEffects::createFromIntValue(12)),
    });
  case 14:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::NoCallback),
      Attribute::get(C, Attribute::NoSync),
      Attribute::get(C, Attribute::NoFree),
      Attribute::get(C, Attribute::WillReturn),
      Attribute::get(C, Attribute::Convergent),
      Attribute::getWithMemoryEffects(C, MemoryEffects::createFromIntValue(0)),
    });
  case 15:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoCallback),
      Attribute::get(C, Attribute::NoSync),
      Attribute::get(C, Attribute::NoFree),
      Attribute::get(C, Attribute::WillReturn),
    });
  case 16:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::NoCallback),
      Attribute::get(C, Attribute::NoSync),
      Attribute::get(C, Attribute::NoFree),
      Attribute::get(C, Attribute::WillReturn),
      Attribute::getWithMemoryEffects(C, MemoryEffects::createFromIntValue(2)),
    });
  case 17:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::NoCallback),
      Attribute::get(C, Attribute::NoSync),
      Attribute::get(C, Attribute::NoFree),
      Attribute::get(C, Attribute::WillReturn),
      Attribute::get(C, Attribute::Speculatable),
      Attribute::getWithMemoryEffects(C, MemoryEffects::createFromIntValue(12)),
    });
  case 18:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::NoCallback),
      Attribute::get(C, Attribute::NoSync),
      Attribute::get(C, Attribute::NoFree),
      Attribute::get(C, Attribute::WillReturn),
      Attribute::getWithMemoryEffects(C, MemoryEffects::createFromIntValue(3)),
    });
  case 19:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::NoCallback),
      Attribute::get(C, Attribute::NoSync),
      Attribute::get(C, Attribute::NoFree),
      Attribute::get(C, Attribute::WillReturn),
      Attribute::get(C, Attribute::NoDuplicate),
    });
  case 20:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::NoCallback),
      Attribute::get(C, Attribute::NoSync),
      Attribute::get(C, Attribute::NoFree),
      Attribute::get(C, Attribute::WillReturn),
      Attribute::getWithMemoryEffects(C, MemoryEffects::createFromIntValue(21)),
    });
  case 21:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::NoCallback),
      Attribute::get(C, Attribute::NoSync),
      Attribute::get(C, Attribute::NoFree),
      Attribute::get(C, Attribute::WillReturn),
      Attribute::getWithMemoryEffects(C, MemoryEffects::createFromIntValue(42)),
    });
  case 22:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::NoCallback),
      Attribute::get(C, Attribute::NoFree),
      Attribute::get(C, Attribute::WillReturn),
      Attribute::getWithMemoryEffects(C, MemoryEffects::createFromIntValue(3)),
    });
  case 23:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::NoSync),
      Attribute::get(C, Attribute::WillReturn),
      Attribute::getWithMemoryEffects(C, MemoryEffects::createFromIntValue(3)),
    });
  case 24:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::NoCallback),
      Attribute::get(C, Attribute::NoFree),
      Attribute::get(C, Attribute::WillReturn),
      Attribute::getWithMemoryEffects(C, MemoryEffects::createFromIntValue(2)),
    });
  case 25:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::NoSync),
      Attribute::get(C, Attribute::WillReturn),
      Attribute::getWithMemoryEffects(C, MemoryEffects::createFromIntValue(2)),
    });
  case 26:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::NoCallback),
      Attribute::get(C, Attribute::NoSync),
      Attribute::get(C, Attribute::NoFree),
      Attribute::get(C, Attribute::WillReturn),
      Attribute::getWithMemoryEffects(C, MemoryEffects::createFromIntValue(15)),
    });
  case 27:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
    });
  case 28:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::WillReturn),
      Attribute::getWithMemoryEffects(C, MemoryEffects::createFromIntValue(42)),
    });
  case 29:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::NoReturn),
      Attribute::get(C, Attribute::Cold),
    });
  case 30:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::NoCallback),
    });
  case 31:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::getWithMemoryEffects(C, MemoryEffects::createFromIntValue(42)),
    });
  case 32:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::NoReturn),
      Attribute::get(C, Attribute::Cold),
    });
  case 33:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::NoFree),
      Attribute::get(C, Attribute::WillReturn),
    });
  case 34:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::NoCallback),
      Attribute::get(C, Attribute::NoSync),
      Attribute::get(C, Attribute::NoFree),
      Attribute::get(C, Attribute::WillReturn),
    });
  case 35:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::WillReturn),
      Attribute::getWithMemoryEffects(C, MemoryEffects::createFromIntValue(15)),
    });
  case 36:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::NoCallback),
      Attribute::get(C, Attribute::NoSync),
      Attribute::get(C, Attribute::NoFree),
      Attribute::get(C, Attribute::WillReturn),
      Attribute::getWithMemoryEffects(C, MemoryEffects::createFromIntValue(4)),
    });
  case 37:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::NoCallback),
      Attribute::get(C, Attribute::NoSync),
      Attribute::get(C, Attribute::NoFree),
      Attribute::get(C, Attribute::WillReturn),
      Attribute::getWithMemoryEffects(C, MemoryEffects::createFromIntValue(8)),
    });
  case 38:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::WillReturn),
    });
  case 39:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::WillReturn),
    });
  case 40:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::NoCallback),
      Attribute::get(C, Attribute::NoFree),
      Attribute::get(C, Attribute::WillReturn),
      Attribute::get(C, Attribute::Convergent),
      Attribute::getWithMemoryEffects(C, MemoryEffects::createFromIntValue(0)),
    });
  case 41:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::NoCallback),
      Attribute::get(C, Attribute::NoFree),
      Attribute::get(C, Attribute::WillReturn),
    });
  case 42:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::NoReturn),
      Attribute::get(C, Attribute::Convergent),
    });
  case 43:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::NoCallback),
      Attribute::get(C, Attribute::NoFree),
      Attribute::get(C, Attribute::WillReturn),
    });
  case 44:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::NoCallback),
      Attribute::get(C, Attribute::NoFree),
      Attribute::get(C, Attribute::WillReturn),
      Attribute::get(C, Attribute::Convergent),
      Attribute::getWithMemoryEffects(C, MemoryEffects::createFromIntValue(3)),
    });
  case 45:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::NoCallback),
      Attribute::get(C, Attribute::NoFree),
      Attribute::get(C, Attribute::WillReturn),
      Attribute::get(C, Attribute::Convergent),
      Attribute::getWithMemoryEffects(C, MemoryEffects::createFromIntValue(12)),
    });
  case 46:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::NoCallback),
      Attribute::get(C, Attribute::NoFree),
      Attribute::get(C, Attribute::WillReturn),
      Attribute::get(C, Attribute::Convergent),
      Attribute::getWithMemoryEffects(C, MemoryEffects::createFromIntValue(8)),
    });
  case 47:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::NoCallback),
      Attribute::get(C, Attribute::NoFree),
      Attribute::get(C, Attribute::WillReturn),
      Attribute::get(C, Attribute::Convergent),
    });
  case 48:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::NoReturn),
      Attribute::get(C, Attribute::NoCallback),
      Attribute::get(C, Attribute::NoFree),
      Attribute::get(C, Attribute::Cold),
    });
  case 49:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::NoCallback),
      Attribute::get(C, Attribute::NoFree),
      Attribute::get(C, Attribute::WillReturn),
      Attribute::get(C, Attribute::Convergent),
    });
  case 50:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::NoCallback),
      Attribute::get(C, Attribute::NoFree),
    });
  case 51:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::NoCallback),
      Attribute::get(C, Attribute::NoSync),
      Attribute::get(C, Attribute::NoFree),
      Attribute::get(C, Attribute::WillReturn),
      Attribute::get(C, Attribute::Speculatable),
      Attribute::getWithMemoryEffects(C, MemoryEffects::createFromIntValue(21)),
    });
  case 52:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::NoCallback),
      Attribute::get(C, Attribute::NoFree),
      Attribute::get(C, Attribute::WillReturn),
      Attribute::get(C, Attribute::Speculatable),
      Attribute::getWithMemoryEffects(C, MemoryEffects::createFromIntValue(0)),
    });
  case 53:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::NoCallback),
      Attribute::get(C, Attribute::NoFree),
      Attribute::get(C, Attribute::WillReturn),
    });
  case 54:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
    });
  case 55:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::NoCallback),
      Attribute::get(C, Attribute::NoFree),
      Attribute::get(C, Attribute::WillReturn),
      Attribute::get(C, Attribute::Convergent),
      Attribute::get(C, Attribute::Speculatable),
      Attribute::getWithMemoryEffects(C, MemoryEffects::createFromIntValue(0)),
    });
  case 56:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::NoCallback),
      Attribute::get(C, Attribute::NoFree),
      Attribute::get(C, Attribute::Convergent),
    });
  case 57:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::NoCallback),
      Attribute::get(C, Attribute::NoFree),
      Attribute::getWithMemoryEffects(C, MemoryEffects::createFromIntValue(8)),
    });
  case 58:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::getWithMemoryEffects(C, MemoryEffects::createFromIntValue(21)),
    });
  case 59:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::WillReturn),
      Attribute::getWithMemoryEffects(C, MemoryEffects::createFromIntValue(0)),
    });
  case 60:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoCallback),
      Attribute::get(C, Attribute::NoSync),
      Attribute::get(C, Attribute::NoFree),
      Attribute::get(C, Attribute::WillReturn),
      Attribute::getWithMemoryEffects(C, MemoryEffects::createFromIntValue(0)),
    });
  case 61:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::NoCallback),
      Attribute::get(C, Attribute::NoSync),
      Attribute::get(C, Attribute::NoFree),
      Attribute::get(C, Attribute::WillReturn),
      Attribute::getWithMemoryEffects(C, MemoryEffects::createFromIntValue(3)),
    });
  case 62:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::NoCallback),
      Attribute::getWithMemoryEffects(C, MemoryEffects::createFromIntValue(3)),
    });
  case 63:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::NoCallback),
      Attribute::get(C, Attribute::Convergent),
    });
  case 64:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::NoCallback),
      Attribute::getWithMemoryEffects(C, MemoryEffects::createFromIntValue(0)),
    });
  case 65:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::NoCallback),
      Attribute::getWithMemoryEffects(C, MemoryEffects::createFromIntValue(1)),
    });
  case 66:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::NoCallback),
      Attribute::get(C, Attribute::Convergent),
      Attribute::getWithMemoryEffects(C, MemoryEffects::createFromIntValue(12)),
    });
  case 67:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::NoCallback),
      Attribute::get(C, Attribute::Convergent),
      Attribute::getWithMemoryEffects(C, MemoryEffects::createFromIntValue(2)),
    });
  case 68:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::NoCallback),
      Attribute::get(C, Attribute::Convergent),
      Attribute::getWithMemoryEffects(C, MemoryEffects::createFromIntValue(0)),
    });
  case 69:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::NoCallback),
      Attribute::get(C, Attribute::Speculatable),
      Attribute::getWithMemoryEffects(C, MemoryEffects::createFromIntValue(0)),
    });
  case 70:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::NoCallback),
      Attribute::getWithMemoryEffects(C, MemoryEffects::createFromIntValue(12)),
    });
  case 71:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::NoCallback),
      Attribute::getWithMemoryEffects(C, MemoryEffects::createFromIntValue(2)),
    });
  case 72:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::getWithMemoryEffects(C, MemoryEffects::createFromIntValue(2)),
    });
  case 73:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::NoCallback),
      Attribute::get(C, Attribute::NoSync),
      Attribute::get(C, Attribute::NoFree),
      Attribute::get(C, Attribute::WillReturn),
      Attribute::get(C, Attribute::NoMerge),
    });
  case 74:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::NoCallback),
      Attribute::get(C, Attribute::NoSync),
      Attribute::get(C, Attribute::NoFree),
      Attribute::get(C, Attribute::WillReturn),
    });
  case 75:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::NoDuplicate),
      Attribute::getWithMemoryEffects(C, MemoryEffects::createFromIntValue(2)),
    });
  case 76:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::WillReturn),
      Attribute::get(C, Attribute::Convergent),
    });
  case 77:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoReturn),
      Attribute::getWithMemoryEffects(C, MemoryEffects::createFromIntValue(42)),
    });
  case 78:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::get(C, Attribute::NoDuplicate),
      Attribute::getWithMemoryEffects(C, MemoryEffects::createFromIntValue(42)),
    });
  case 79:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::getWithMemoryEffects(C, MemoryEffects::createFromIntValue(15)),
    });
  case 80:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::getWithMemoryEffects(C, MemoryEffects::createFromIntValue(12)),
    });
  case 81:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::getWithMemoryEffects(C, MemoryEffects::createFromIntValue(15)),
    });
  case 82:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoReturn),
    });
  case 83:
    return AttributeSet::get(C, {
      Attribute::get(C, Attribute::NoUnwind),
      Attribute::getWithMemoryEffects(C, MemoryEffects::createFromIntValue(2)),
    });
  }
}

AttributeList Intrinsic::getAttributes(LLVMContext &C, ID id) {
  static const uint16_t IntrinsicsToAttributesMap[] = {
    1, // llvm.abs
    2, // llvm.addressofreturnaddress
    3, // llvm.adjust.trampoline
    4, // llvm.annotation
    5, // llvm.arithmetic.fence
    6, // llvm.asan.check.memaccess
    7, // llvm.assume
    5, // llvm.bitreverse
    5, // llvm.bswap
    8, // llvm.call.preallocated.arg
    8, // llvm.call.preallocated.setup
    8, // llvm.call.preallocated.teardown
    9, // llvm.callbr.landingpad
    5, // llvm.canonicalize
    5, // llvm.ceil
    10, // llvm.clear_cache
    11, // llvm.codeview.annotation
    2, // llvm.convert.from.fp16
    2, // llvm.convert.to.fp16
    5, // llvm.copysign
    12, // llvm.coro.align
    10, // llvm.coro.alloc
    10, // llvm.coro.alloca.alloc
    10, // llvm.coro.alloca.free
    10, // llvm.coro.alloca.get
    10, // llvm.coro.async.context.alloc
    10, // llvm.coro.async.context.dealloc
    9, // llvm.coro.async.resume
    10, // llvm.coro.async.size.replace
    13, // llvm.coro.begin
    14, // llvm.coro.destroy
    15, // llvm.coro.done
    10, // llvm.coro.end
    10, // llvm.coro.end.async
    12, // llvm.coro.frame
    16, // llvm.coro.free
    17, // llvm.coro.id
    10, // llvm.coro.id.async
    10, // llvm.coro.id.retcon
    10, // llvm.coro.id.retcon.once
    12, // llvm.coro.noop
    12, // llvm.coro.prepare.async
    12, // llvm.coro.prepare.retcon
    18, // llvm.coro.promise
    14, // llvm.coro.resume
    9, // llvm.coro.save
    12, // llvm.coro.size
    19, // llvm.coro.subfn.addr
    10, // llvm.coro.suspend
    9, // llvm.coro.suspend.async
    10, // llvm.coro.suspend.retcon
    5, // llvm.cos
    1, // llvm.ctlz
    5, // llvm.ctpop
    1, // llvm.cttz
    5, // llvm.dbg.assign
    5, // llvm.dbg.declare
    5, // llvm.dbg.label
    5, // llvm.dbg.value
    10, // llvm.debugtrap
    2, // llvm.donothing
    10, // llvm.eh.dwarf.cfa
    12, // llvm.eh.exceptioncode
    12, // llvm.eh.exceptionpointer
    2, // llvm.eh.recoverfp
    10, // llvm.eh.return.i32
    10, // llvm.eh.return.i64
    20, // llvm.eh.sjlj.callsite
    10, // llvm.eh.sjlj.functioncontext
    21, // llvm.eh.sjlj.longjmp
    12, // llvm.eh.sjlj.lsda
    10, // llvm.eh.sjlj.setjmp
    10, // llvm.eh.sjlj.setup.dispatch
    12, // llvm.eh.typeid.for
    10, // llvm.eh.unwind.init
    5, // llvm.exp
    5, // llvm.exp2
    2, // llvm.expect
    22, // llvm.expect.with.probability
    23, // llvm.experimental.constrained.ceil
    23, // llvm.experimental.constrained.cos
    23, // llvm.experimental.constrained.exp
    23, // llvm.experimental.constrained.exp2
    23, // llvm.experimental.constrained.fadd
    23, // llvm.experimental.constrained.fcmp
    23, // llvm.experimental.constrained.fcmps
    23, // llvm.experimental.constrained.fdiv
    23, // llvm.experimental.constrained.floor
    23, // llvm.experimental.constrained.fma
    23, // llvm.experimental.constrained.fmul
    23, // llvm.experimental.constrained.fmuladd
    23, // llvm.experimental.constrained.fpext
    23, // llvm.experimental.constrained.fptosi
    23, // llvm.experimental.constrained.fptoui
    23, // llvm.experimental.constrained.fptrunc
    23, // llvm.experimental.constrained.frem
    23, // llvm.experimental.constrained.fsub
    23, // llvm.experimental.constrained.ldexp
    23, // llvm.experimental.constrained.llrint
    23, // llvm.experimental.constrained.llround
    23, // llvm.experimental.constrained.log
    23, // llvm.experimental.constrained.log10
    23, // llvm.experimental.constrained.log2
    23, // llvm.experimental.constrained.lrint
    23, // llvm.experimental.constrained.lround
    23, // llvm.experimental.constrained.maximum
    23, // llvm.experimental.constrained.maxnum
    23, // llvm.experimental.constrained.minimum
    23, // llvm.experimental.constrained.minnum
    23, // llvm.experimental.constrained.nearbyint
    23, // llvm.experimental.constrained.pow
    23, // llvm.experimental.constrained.powi
    23, // llvm.experimental.constrained.rint
    23, // llvm.experimental.constrained.round
    23, // llvm.experimental.constrained.roundeven
    23, // llvm.experimental.constrained.sin
    23, // llvm.experimental.constrained.sitofp
    23, // llvm.experimental.constrained.sqrt
    23, // llvm.experimental.constrained.trunc
    23, // llvm.experimental.constrained.uitofp
    24, // llvm.experimental.convergence.anchor
    24, // llvm.experimental.convergence.entry
    24, // llvm.experimental.convergence.loop
    14, // llvm.experimental.deoptimize
    25, // llvm.experimental.gc.get.pointer.base
    25, // llvm.experimental.gc.get.pointer.offset
    26, // llvm.experimental.gc.relocate
    2, // llvm.experimental.gc.result
    27, // llvm.experimental.gc.statepoint
    26, // llvm.experimental.get.vector.length
    28, // llvm.experimental.guard
    4, // llvm.experimental.noalias.scope.decl
    14, // llvm.experimental.patchpoint.i64
    14, // llvm.experimental.patchpoint.void
    28, // llvm.experimental.stackmap
    2, // llvm.experimental.stepvector
    2, // llvm.experimental.vector.deinterleave2
    2, // llvm.experimental.vector.interleave2
    2, // llvm.experimental.vector.reverse
    22, // llvm.experimental.vector.splice
    22, // llvm.experimental.vp.splice
    29, // llvm.experimental.vp.strided.load
    30, // llvm.experimental.vp.strided.store
    31, // llvm.experimental.widenable.condition
    5, // llvm.fabs
    5, // llvm.floor
    5, // llvm.fma
    5, // llvm.fmuladd
    5, // llvm.fptosi.sat
    5, // llvm.fptoui.sat
    5, // llvm.fptrunc.round
    32, // llvm.frameaddress
    5, // llvm.frexp
    5, // llvm.fshl
    5, // llvm.fshr
    33, // llvm.gcread
    10, // llvm.gcroot
    34, // llvm.gcwrite
    2, // llvm.get.active.lane.mask
    8, // llvm.get.dynamic.area.offset
    4, // llvm.get.fpenv
    4, // llvm.get.rounding
    35, // llvm.hwasan.check.memaccess
    35, // llvm.hwasan.check.memaccess.shortgranules
    8, // llvm.icall.branch.funnel
    36, // llvm.init.trampoline
    10, // llvm.instrprof.cover
    10, // llvm.instrprof.increment
    10, // llvm.instrprof.increment.step
    10, // llvm.instrprof.timestamp
    10, // llvm.instrprof.value.profile
    37, // llvm.invariant.end
    38, // llvm.invariant.start
    24, // llvm.is.constant
    1, // llvm.is.fpclass
    39, // llvm.launder.invariant.group
    5, // llvm.ldexp
    38, // llvm.lifetime.end
    38, // llvm.lifetime.start
    5, // llvm.llrint
    5, // llvm.llround
    3, // llvm.load.relative
    2, // llvm.localaddress
    8, // llvm.localescape
    22, // llvm.localrecover
    5, // llvm.log
    5, // llvm.log10
    5, // llvm.log2
    40, // llvm.loop.decrement
    40, // llvm.loop.decrement.reg
    5, // llvm.lrint
    5, // llvm.lround
    30, // llvm.masked.compressstore
    41, // llvm.masked.expandload
    42, // llvm.masked.gather
    43, // llvm.masked.load
    44, // llvm.masked.scatter
    45, // llvm.masked.store
    46, // llvm.matrix.column.major.load
    47, // llvm.matrix.column.major.store
    48, // llvm.matrix.multiply
    49, // llvm.matrix.transpose
    5, // llvm.maximum
    5, // llvm.maxnum
    50, // llvm.memcpy
    51, // llvm.memcpy.element.unordered.atomic
    52, // llvm.memcpy.inline
    53, // llvm.memmove
    51, // llvm.memmove.element.unordered.atomic
    54, // llvm.memset
    55, // llvm.memset.element.unordered.atomic
    56, // llvm.memset.inline
    5, // llvm.minimum
    5, // llvm.minnum
    5, // llvm.nearbyint
    10, // llvm.objc.arc.annotation.bottomup.bbend
    10, // llvm.objc.arc.annotation.bottomup.bbstart
    10, // llvm.objc.arc.annotation.topdown.bbend
    10, // llvm.objc.arc.annotation.topdown.bbstart
    10, // llvm.objc.autorelease
    10, // llvm.objc.autoreleasePoolPop
    10, // llvm.objc.autoreleasePoolPush
    10, // llvm.objc.autoreleaseReturnValue
    4, // llvm.objc.clang.arc.noop.use
    10, // llvm.objc.clang.arc.use
    10, // llvm.objc.copyWeak
    10, // llvm.objc.destroyWeak
    10, // llvm.objc.initWeak
    10, // llvm.objc.loadWeak
    10, // llvm.objc.loadWeakRetained
    10, // llvm.objc.moveWeak
    10, // llvm.objc.release
    10, // llvm.objc.retain
    10, // llvm.objc.retain.autorelease
    10, // llvm.objc.retainAutorelease
    10, // llvm.objc.retainAutoreleaseReturnValue
    10, // llvm.objc.retainAutoreleasedReturnValue
    10, // llvm.objc.retainBlock
    10, // llvm.objc.retainedObject
    10, // llvm.objc.storeStrong
    10, // llvm.objc.storeWeak
    10, // llvm.objc.sync.enter
    10, // llvm.objc.sync.exit
    10, // llvm.objc.unretainedObject
    10, // llvm.objc.unretainedPointer
    10, // llvm.objc.unsafeClaimAutoreleasedReturnValue
    57, // llvm.objectsize
    8, // llvm.pcmarker
    5, // llvm.pow
    5, // llvm.powi
    58, // llvm.prefetch
    26, // llvm.preserve.array.access.index
    26, // llvm.preserve.struct.access.index
    59, // llvm.preserve.union.access.index
    4, // llvm.pseudoprobe
    4, // llvm.ptr.annotation
    60, // llvm.ptrauth.auth
    2, // llvm.ptrauth.blend
    61, // llvm.ptrauth.resign
    59, // llvm.ptrauth.sign
    2, // llvm.ptrauth.sign.generic
    59, // llvm.ptrauth.strip
    5, // llvm.ptrmask
    5, // llvm.public.type.test
    62, // llvm.read_register
    63, // llvm.read_volatile_register
    8, // llvm.readcyclecounter
    4, // llvm.reset.fpenv
    32, // llvm.returnaddress
    5, // llvm.rint
    5, // llvm.round
    5, // llvm.roundeven
    5, // llvm.sadd.sat
    5, // llvm.sadd.with.overflow
    22, // llvm.sdiv.fix
    22, // llvm.sdiv.fix.sat
    12, // llvm.seh.scope.begin
    12, // llvm.seh.scope.end
    64, // llvm.seh.try.begin
    64, // llvm.seh.try.end
    4, // llvm.set.fpenv
    40, // llvm.set.loop.iterations
    4, // llvm.set.rounding
    4, // llvm.sideeffect
    5, // llvm.sin
    5, // llvm.smax
    5, // llvm.smin
    65, // llvm.smul.fix
    65, // llvm.smul.fix.sat
    5, // llvm.smul.with.overflow
    2, // llvm.sponentry
    5, // llvm.sqrt
    66, // llvm.ssa.copy
    5, // llvm.sshl.sat
    5, // llvm.ssub.sat
    5, // llvm.ssub.with.overflow
    8, // llvm.stackguard
    8, // llvm.stackprotector
    8, // llvm.stackrestore
    8, // llvm.stacksave
    40, // llvm.start.loop.iterations
    5, // llvm.strip.invariant.group
    10, // llvm.swift.async.context.addr
    40, // llvm.test.set.loop.iterations
    40, // llvm.test.start.loop.iterations
    2, // llvm.thread.pointer
    67, // llvm.threadlocal.address
    68, // llvm.trap
    5, // llvm.trunc
    2, // llvm.type.checked.load
    2, // llvm.type.checked.load.relative
    5, // llvm.type.test
    5, // llvm.uadd.sat
    5, // llvm.uadd.with.overflow
    69, // llvm.ubsantrap
    22, // llvm.udiv.fix
    22, // llvm.udiv.fix.sat
    5, // llvm.umax
    5, // llvm.umin
    65, // llvm.umul.fix
    65, // llvm.umul.fix.sat
    5, // llvm.umul.with.overflow
    5, // llvm.ushl.sat
    5, // llvm.usub.sat
    5, // llvm.usub.with.overflow
    8, // llvm.va_copy
    8, // llvm.va_end
    8, // llvm.va_start
    4, // llvm.var.annotation
    1, // llvm.vector.extract
    65, // llvm.vector.insert
    5, // llvm.vector.reduce.add
    5, // llvm.vector.reduce.and
    5, // llvm.vector.reduce.fadd
    5, // llvm.vector.reduce.fmax
    5, // llvm.vector.reduce.fmaximum
    5, // llvm.vector.reduce.fmin
    5, // llvm.vector.reduce.fminimum
    5, // llvm.vector.reduce.fmul
    5, // llvm.vector.reduce.mul
    5, // llvm.vector.reduce.or
    5, // llvm.vector.reduce.smax
    5, // llvm.vector.reduce.smin
    5, // llvm.vector.reduce.umax
    5, // llvm.vector.reduce.umin
    5, // llvm.vector.reduce.xor
    2, // llvm.vp.abs
    2, // llvm.vp.add
    2, // llvm.vp.and
    2, // llvm.vp.ashr
    2, // llvm.vp.bitreverse
    2, // llvm.vp.bswap
    2, // llvm.vp.ceil
    2, // llvm.vp.copysign
    59, // llvm.vp.ctlz
    2, // llvm.vp.ctpop
    59, // llvm.vp.cttz
    2, // llvm.vp.fabs
    2, // llvm.vp.fadd
    2, // llvm.vp.fcmp
    2, // llvm.vp.fdiv
    2, // llvm.vp.floor
    2, // llvm.vp.fma
    2, // llvm.vp.fmul
    2, // llvm.vp.fmuladd
    2, // llvm.vp.fneg
    2, // llvm.vp.fpext
    2, // llvm.vp.fptosi
    2, // llvm.vp.fptoui
    2, // llvm.vp.fptrunc
    2, // llvm.vp.frem
    2, // llvm.vp.fshl
    2, // llvm.vp.fshr
    2, // llvm.vp.fsub
    62, // llvm.vp.gather
    2, // llvm.vp.icmp
    2, // llvm.vp.inttoptr
    29, // llvm.vp.load
    2, // llvm.vp.lshr
    2, // llvm.vp.maxnum
    2, // llvm.vp.merge
    2, // llvm.vp.minnum
    2, // llvm.vp.mul
    2, // llvm.vp.nearbyint
    2, // llvm.vp.or
    2, // llvm.vp.ptrtoint
    2, // llvm.vp.reduce.add
    2, // llvm.vp.reduce.and
    2, // llvm.vp.reduce.fadd
    2, // llvm.vp.reduce.fmax
    2, // llvm.vp.reduce.fmin
    2, // llvm.vp.reduce.fmul
    2, // llvm.vp.reduce.mul
    2, // llvm.vp.reduce.or
    2, // llvm.vp.reduce.smax
    2, // llvm.vp.reduce.smin
    2, // llvm.vp.reduce.umax
    2, // llvm.vp.reduce.umin
    2, // llvm.vp.reduce.xor
    2, // llvm.vp.rint
    2, // llvm.vp.round
    2, // llvm.vp.roundeven
    2, // llvm.vp.roundtozero
    8, // llvm.vp.scatter
    2, // llvm.vp.sdiv
    2, // llvm.vp.select
    2, // llvm.vp.sext
    2, // llvm.vp.shl
    2, // llvm.vp.sitofp
    2, // llvm.vp.smax
    2, // llvm.vp.smin
    2, // llvm.vp.sqrt
    2, // llvm.vp.srem
    30, // llvm.vp.store
    2, // llvm.vp.sub
    2, // llvm.vp.trunc
    2, // llvm.vp.udiv
    2, // llvm.vp.uitofp
    2, // llvm.vp.umax
    2, // llvm.vp.umin
    2, // llvm.vp.urem
    2, // llvm.vp.xor
    2, // llvm.vp.zext
    2, // llvm.vscale
    70, // llvm.write_register
    71, // llvm.xray.customevent
    72, // llvm.xray.typedevent
    2, // llvm.aarch64.addg
    73, // llvm.aarch64.break
    10, // llvm.aarch64.clrex
    2, // llvm.aarch64.cls
    2, // llvm.aarch64.cls64
    2, // llvm.aarch64.crc32b
    2, // llvm.aarch64.crc32cb
    2, // llvm.aarch64.crc32ch
    2, // llvm.aarch64.crc32cw
    2, // llvm.aarch64.crc32cx
    2, // llvm.aarch64.crc32h
    2, // llvm.aarch64.crc32w
    2, // llvm.aarch64.crc32x
    2, // llvm.aarch64.crypto.aesd
    2, // llvm.aarch64.crypto.aese
    2, // llvm.aarch64.crypto.aesimc
    2, // llvm.aarch64.crypto.aesmc
    2, // llvm.aarch64.crypto.bcaxs
    2, // llvm.aarch64.crypto.bcaxu
    2, // llvm.aarch64.crypto.eor3s
    2, // llvm.aarch64.crypto.eor3u
    2, // llvm.aarch64.crypto.rax1
    2, // llvm.aarch64.crypto.sha1c
    2, // llvm.aarch64.crypto.sha1h
    2, // llvm.aarch64.crypto.sha1m
    2, // llvm.aarch64.crypto.sha1p
    2, // llvm.aarch64.crypto.sha1su0
    2, // llvm.aarch64.crypto.sha1su1
    2, // llvm.aarch64.crypto.sha256h
    2, // llvm.aarch64.crypto.sha256h2
    2, // llvm.aarch64.crypto.sha256su0
    2, // llvm.aarch64.crypto.sha256su1
    2, // llvm.aarch64.crypto.sha512h
    2, // llvm.aarch64.crypto.sha512h2
    2, // llvm.aarch64.crypto.sha512su0
    2, // llvm.aarch64.crypto.sha512su1
    12, // llvm.aarch64.crypto.sm3partw1
    12, // llvm.aarch64.crypto.sm3partw2
    12, // llvm.aarch64.crypto.sm3ss1
    74, // llvm.aarch64.crypto.sm3tt1a
    74, // llvm.aarch64.crypto.sm3tt1b
    74, // llvm.aarch64.crypto.sm3tt2a
    74, // llvm.aarch64.crypto.sm3tt2b
    12, // llvm.aarch64.crypto.sm4e
    12, // llvm.aarch64.crypto.sm4ekey
    22, // llvm.aarch64.crypto.xar
    75, // llvm.aarch64.dmb
    75, // llvm.aarch64.dsb
    2, // llvm.aarch64.fjcvtzs
    2, // llvm.aarch64.frint32x
    2, // llvm.aarch64.frint32z
    2, // llvm.aarch64.frint64x
    2, // llvm.aarch64.frint64z
    76, // llvm.aarch64.get.fpcr
    2, // llvm.aarch64.gmi
    8, // llvm.aarch64.hint
    76, // llvm.aarch64.irg
    76, // llvm.aarch64.irg.sp
    75, // llvm.aarch64.isb
    10, // llvm.aarch64.ld64b
    75, // llvm.aarch64.ldaxp
    75, // llvm.aarch64.ldaxr
    62, // llvm.aarch64.ldg
    75, // llvm.aarch64.ldxp
    75, // llvm.aarch64.ldxr
    77, // llvm.aarch64.mops.memset.tag
    2, // llvm.aarch64.neon.abs
    2, // llvm.aarch64.neon.addhn
    2, // llvm.aarch64.neon.addp
    2, // llvm.aarch64.neon.bfcvt
    2, // llvm.aarch64.neon.bfcvtn
    2, // llvm.aarch64.neon.bfcvtn2
    2, // llvm.aarch64.neon.bfdot
    2, // llvm.aarch64.neon.bfmlalb
    2, // llvm.aarch64.neon.bfmlalt
    2, // llvm.aarch64.neon.bfmmla
    2, // llvm.aarch64.neon.cls
    2, // llvm.aarch64.neon.fabd
    2, // llvm.aarch64.neon.facge
    2, // llvm.aarch64.neon.facgt
    2, // llvm.aarch64.neon.faddp
    2, // llvm.aarch64.neon.faddv
    2, // llvm.aarch64.neon.fcvtas
    2, // llvm.aarch64.neon.fcvtau
    2, // llvm.aarch64.neon.fcvtms
    2, // llvm.aarch64.neon.fcvtmu
    2, // llvm.aarch64.neon.fcvtns
    2, // llvm.aarch64.neon.fcvtnu
    2, // llvm.aarch64.neon.fcvtps
    2, // llvm.aarch64.neon.fcvtpu
    2, // llvm.aarch64.neon.fcvtxn
    2, // llvm.aarch64.neon.fcvtzs
    2, // llvm.aarch64.neon.fcvtzu
    2, // llvm.aarch64.neon.fmax
    2, // llvm.aarch64.neon.fmaxnm
    2, // llvm.aarch64.neon.fmaxnmp
    2, // llvm.aarch64.neon.fmaxnmv
    2, // llvm.aarch64.neon.fmaxp
    2, // llvm.aarch64.neon.fmaxv
    2, // llvm.aarch64.neon.fmin
    2, // llvm.aarch64.neon.fminnm
    2, // llvm.aarch64.neon.fminnmp
    2, // llvm.aarch64.neon.fminnmv
    2, // llvm.aarch64.neon.fminp
    2, // llvm.aarch64.neon.fminv
    2, // llvm.aarch64.neon.fmlal
    2, // llvm.aarch64.neon.fmlal2
    2, // llvm.aarch64.neon.fmlsl
    2, // llvm.aarch64.neon.fmlsl2
    2, // llvm.aarch64.neon.fmulx
    2, // llvm.aarch64.neon.frecpe
    2, // llvm.aarch64.neon.frecps
    2, // llvm.aarch64.neon.frecpx
    2, // llvm.aarch64.neon.frint32x
    2, // llvm.aarch64.neon.frint32z
    2, // llvm.aarch64.neon.frint64x
    2, // llvm.aarch64.neon.frint64z
    2, // llvm.aarch64.neon.frsqrte
    2, // llvm.aarch64.neon.frsqrts
    3, // llvm.aarch64.neon.ld1x2
    3, // llvm.aarch64.neon.ld1x3
    3, // llvm.aarch64.neon.ld1x4
    3, // llvm.aarch64.neon.ld2
    3, // llvm.aarch64.neon.ld2lane
    3, // llvm.aarch64.neon.ld2r
    3, // llvm.aarch64.neon.ld3
    3, // llvm.aarch64.neon.ld3lane
    3, // llvm.aarch64.neon.ld3r
    3, // llvm.aarch64.neon.ld4
    3, // llvm.aarch64.neon.ld4lane
    3, // llvm.aarch64.neon.ld4r
    2, // llvm.aarch64.neon.pmul
    2, // llvm.aarch64.neon.pmull
    2, // llvm.aarch64.neon.pmull64
    2, // llvm.aarch64.neon.raddhn
    2, // llvm.aarch64.neon.rshrn
    2, // llvm.aarch64.neon.rsubhn
    2, // llvm.aarch64.neon.sabd
    2, // llvm.aarch64.neon.saddlp
    2, // llvm.aarch64.neon.saddlv
    2, // llvm.aarch64.neon.saddv
    2, // llvm.aarch64.neon.scalar.sqxtn
    2, // llvm.aarch64.neon.scalar.sqxtun
    2, // llvm.aarch64.neon.scalar.uqxtn
    2, // llvm.aarch64.neon.sdot
    2, // llvm.aarch64.neon.shadd
    2, // llvm.aarch64.neon.shll
    2, // llvm.aarch64.neon.shsub
    2, // llvm.aarch64.neon.smax
    2, // llvm.aarch64.neon.smaxp
    2, // llvm.aarch64.neon.smaxv
    2, // llvm.aarch64.neon.smin
    2, // llvm.aarch64.neon.sminp
    2, // llvm.aarch64.neon.sminv
    2, // llvm.aarch64.neon.smmla
    2, // llvm.aarch64.neon.smull
    2, // llvm.aarch64.neon.sqabs
    2, // llvm.aarch64.neon.sqadd
    2, // llvm.aarch64.neon.sqdmulh
    2, // llvm.aarch64.neon.sqdmulh.lane
    2, // llvm.aarch64.neon.sqdmulh.laneq
    2, // llvm.aarch64.neon.sqdmull
    2, // llvm.aarch64.neon.sqdmulls.scalar
    2, // llvm.aarch64.neon.sqneg
    2, // llvm.aarch64.neon.sqrdmlah
    2, // llvm.aarch64.neon.sqrdmlsh
    2, // llvm.aarch64.neon.sqrdmulh
    2, // llvm.aarch64.neon.sqrdmulh.lane
    2, // llvm.aarch64.neon.sqrdmulh.laneq
    2, // llvm.aarch64.neon.sqrshl
    2, // llvm.aarch64.neon.sqrshrn
    2, // llvm.aarch64.neon.sqrshrun
    2, // llvm.aarch64.neon.sqshl
    2, // llvm.aarch64.neon.sqshlu
    2, // llvm.aarch64.neon.sqshrn
    2, // llvm.aarch64.neon.sqshrun
    2, // llvm.aarch64.neon.sqsub
    2, // llvm.aarch64.neon.sqxtn
    2, // llvm.aarch64.neon.sqxtun
    2, // llvm.aarch64.neon.srhadd
    2, // llvm.aarch64.neon.srshl
    2, // llvm.aarch64.neon.sshl
    2, // llvm.aarch64.neon.sshll
    78, // llvm.aarch64.neon.st1x2
    79, // llvm.aarch64.neon.st1x3
    80, // llvm.aarch64.neon.st1x4
    78, // llvm.aarch64.neon.st2
    79, // llvm.aarch64.neon.st2lane
    79, // llvm.aarch64.neon.st3
    80, // llvm.aarch64.neon.st3lane
    80, // llvm.aarch64.neon.st4
    81, // llvm.aarch64.neon.st4lane
    2, // llvm.aarch64.neon.subhn
    2, // llvm.aarch64.neon.suqadd
    2, // llvm.aarch64.neon.tbl1
    2, // llvm.aarch64.neon.tbl2
    2, // llvm.aarch64.neon.tbl3
    2, // llvm.aarch64.neon.tbl4
    2, // llvm.aarch64.neon.tbx1
    2, // llvm.aarch64.neon.tbx2
    2, // llvm.aarch64.neon.tbx3
    2, // llvm.aarch64.neon.tbx4
    2, // llvm.aarch64.neon.uabd
    2, // llvm.aarch64.neon.uaddlp
    2, // llvm.aarch64.neon.uaddlv
    2, // llvm.aarch64.neon.uaddv
    2, // llvm.aarch64.neon.udot
    2, // llvm.aarch64.neon.uhadd
    2, // llvm.aarch64.neon.uhsub
    2, // llvm.aarch64.neon.umax
    2, // llvm.aarch64.neon.umaxp
    2, // llvm.aarch64.neon.umaxv
    2, // llvm.aarch64.neon.umin
    2, // llvm.aarch64.neon.uminp
    2, // llvm.aarch64.neon.uminv
    2, // llvm.aarch64.neon.ummla
    2, // llvm.aarch64.neon.umull
    2, // llvm.aarch64.neon.uqadd
    2, // llvm.aarch64.neon.uqrshl
    2, // llvm.aarch64.neon.uqrshrn
    2, // llvm.aarch64.neon.uqshl
    2, // llvm.aarch64.neon.uqshrn
    2, // llvm.aarch64.neon.uqsub
    2, // llvm.aarch64.neon.uqxtn
    2, // llvm.aarch64.neon.urecpe
    2, // llvm.aarch64.neon.urhadd
    2, // llvm.aarch64.neon.urshl
    2, // llvm.aarch64.neon.ursqrte
    2, // llvm.aarch64.neon.usdot
    2, // llvm.aarch64.neon.ushl
    2, // llvm.aarch64.neon.ushll
    2, // llvm.aarch64.neon.usmmla
    2, // llvm.aarch64.neon.usqadd
    2, // llvm.aarch64.neon.vcadd.rot270
    2, // llvm.aarch64.neon.vcadd.rot90
    2, // llvm.aarch64.neon.vcmla.rot0
    2, // llvm.aarch64.neon.vcmla.rot180
    2, // llvm.aarch64.neon.vcmla.rot270
    2, // llvm.aarch64.neon.vcmla.rot90
    2, // llvm.aarch64.neon.vcopy.lane
    2, // llvm.aarch64.neon.vcvtfp2fxs
    2, // llvm.aarch64.neon.vcvtfp2fxu
    2, // llvm.aarch64.neon.vcvtfp2hf
    2, // llvm.aarch64.neon.vcvtfxs2fp
    2, // llvm.aarch64.neon.vcvtfxu2fp
    2, // llvm.aarch64.neon.vcvthf2fp
    2, // llvm.aarch64.neon.vsli
    2, // llvm.aarch64.neon.vsri
    82, // llvm.aarch64.prefetch
    76, // llvm.aarch64.rndr
    76, // llvm.aarch64.rndrrs
    2, // llvm.aarch64.sdiv
    76, // llvm.aarch64.set.fpcr
    77, // llvm.aarch64.settag
    77, // llvm.aarch64.settag.zero
    2, // llvm.aarch64.sisd.fabd
    2, // llvm.aarch64.sisd.fcvtxn
    8, // llvm.aarch64.sme.add.write.single.za.vg1x2
    8, // llvm.aarch64.sme.add.write.single.za.vg1x4
    8, // llvm.aarch64.sme.add.write.za.vg1x2
    8, // llvm.aarch64.sme.add.write.za.vg1x4
    8, // llvm.aarch64.sme.add.za32.vg1x2
    8, // llvm.aarch64.sme.add.za32.vg1x4
    8, // llvm.aarch64.sme.add.za64.vg1x2
    8, // llvm.aarch64.sme.add.za64.vg1x4
    83, // llvm.aarch64.sme.addha
    83, // llvm.aarch64.sme.addva
    83, // llvm.aarch64.sme.bmopa.za32
    83, // llvm.aarch64.sme.bmops.za32
    2, // llvm.aarch64.sme.cntsb
    2, // llvm.aarch64.sme.cntsd
    2, // llvm.aarch64.sme.cntsh
    2, // llvm.aarch64.sme.cntsw
    84, // llvm.aarch64.sme.fdot.lane.za32.vg1x2
    85, // llvm.aarch64.sme.fdot.lane.za32.vg1x4
    8, // llvm.aarch64.sme.fdot.single.za32.vg1x2
    8, // llvm.aarch64.sme.fdot.single.za32.vg1x4
    8, // llvm.aarch64.sme.fdot.za32.vg1x2
    8, // llvm.aarch64.sme.fdot.za32.vg1x4
    84, // llvm.aarch64.sme.fmla.lane.vg1x2
    85, // llvm.aarch64.sme.fmla.lane.vg1x4
    8, // llvm.aarch64.sme.fmla.single.vg1x2
    8, // llvm.aarch64.sme.fmla.single.vg1x4
    8, // llvm.aarch64.sme.fmla.vg1x2
    8, // llvm.aarch64.sme.fmla.vg1x4
    86, // llvm.aarch64.sme.fmlal.lane.vg2x1
    84, // llvm.aarch64.sme.fmlal.lane.vg2x2
    85, // llvm.aarch64.sme.fmlal.lane.vg2x4
    8, // llvm.aarch64.sme.fmlal.single.vg2x1
    8, // llvm.aarch64.sme.fmlal.single.vg2x2
    8, // llvm.aarch64.sme.fmlal.single.vg2x4
    8, // llvm.aarch64.sme.fmlal.vg2x2
    8, // llvm.aarch64.sme.fmlal.vg2x4
    84, // llvm.aarch64.sme.fmls.lane.vg1x2
    85, // llvm.aarch64.sme.fmls.lane.vg1x4
    8, // llvm.aarch64.sme.fmls.single.vg1x2
    8, // llvm.aarch64.sme.fmls.single.vg1x4
    8, // llvm.aarch64.sme.fmls.vg1x2
    8, // llvm.aarch64.sme.fmls.vg1x4
    86, // llvm.aarch64.sme.fmlsl.lane.vg2x1
    84, // llvm.aarch64.sme.fmlsl.lane.vg2x2
    85, // llvm.aarch64.sme.fmlsl.lane.vg2x4
    8, // llvm.aarch64.sme.fmlsl.single.vg2x1
    8, // llvm.aarch64.sme.fmlsl.single.vg2x2
    8, // llvm.aarch64.sme.fmlsl.single.vg2x4
    8, // llvm.aarch64.sme.fmlsl.vg2x2
    8, // llvm.aarch64.sme.fmlsl.vg2x4
    84, // llvm.aarch64.sme.fvdot.lane.za32.vg1x2
    76, // llvm.aarch64.sme.get.tpidr2
    87, // llvm.aarch64.sme.ld1b.horiz
    87, // llvm.aarch64.sme.ld1b.vert
    87, // llvm.aarch64.sme.ld1d.horiz
    87, // llvm.aarch64.sme.ld1d.vert
    87, // llvm.aarch64.sme.ld1h.horiz
    87, // llvm.aarch64.sme.ld1h.vert
    87, // llvm.aarch64.sme.ld1q.horiz
    87, // llvm.aarch64.sme.ld1q.vert
    87, // llvm.aarch64.sme.ld1w.horiz
    87, // llvm.aarch64.sme.ld1w.vert
    8, // llvm.aarch64.sme.ldr
    83, // llvm.aarch64.sme.mopa
    83, // llvm.aarch64.sme.mopa.wide
    83, // llvm.aarch64.sme.mops
    83, // llvm.aarch64.sme.mops.wide
    8, // llvm.aarch64.sme.read.hor.vg2
    8, // llvm.aarch64.sme.read.hor.vg4
    87, // llvm.aarch64.sme.read.horiz
    8, // llvm.aarch64.sme.read.ver.vg2
    8, // llvm.aarch64.sme.read.ver.vg4
    87, // llvm.aarch64.sme.read.vert
    8, // llvm.aarch64.sme.read.vg1x2
    8, // llvm.aarch64.sme.read.vg1x4
    87, // llvm.aarch64.sme.readq.horiz
    87, // llvm.aarch64.sme.readq.vert
    84, // llvm.aarch64.sme.sdot.lane.za32.vg1x2
    85, // llvm.aarch64.sme.sdot.lane.za32.vg1x4
    84, // llvm.aarch64.sme.sdot.lane.za64.vg1x2
    85, // llvm.aarch64.sme.sdot.lane.za64.vg1x4
    8, // llvm.aarch64.sme.sdot.single.za32.vg1x2
    8, // llvm.aarch64.sme.sdot.single.za32.vg1x4
    8, // llvm.aarch64.sme.sdot.single.za64.vg1x2
    8, // llvm.aarch64.sme.sdot.single.za64.vg1x4
    8, // llvm.aarch64.sme.sdot.za32.vg1x2
    8, // llvm.aarch64.sme.sdot.za32.vg1x4
    8, // llvm.aarch64.sme.sdot.za64.vg1x2
    8, // llvm.aarch64.sme.sdot.za64.vg1x4
    76, // llvm.aarch64.sme.set.tpidr2
    86, // llvm.aarch64.sme.smla.za32.lane.vg4x1
    84, // llvm.aarch64.sme.smla.za32.lane.vg4x2
    85, // llvm.aarch64.sme.smla.za32.lane.vg4x4
    8, // llvm.aarch64.sme.smla.za32.single.vg4x1
    8, // llvm.aarch64.sme.smla.za32.single.vg4x2
    8, // llvm.aarch64.sme.smla.za32.single.vg4x4
    8, // llvm.aarch64.sme.smla.za32.vg4x2
    8, // llvm.aarch64.sme.smla.za32.vg4x4
    86, // llvm.aarch64.sme.smla.za64.lane.vg4x1
    84, // llvm.aarch64.sme.smla.za64.lane.vg4x2
    85, // llvm.aarch64.sme.smla.za64.lane.vg4x4
    8, // llvm.aarch64.sme.smla.za64.single.vg4x1
    8, // llvm.aarch64.sme.smla.za64.single.vg4x2
    8, // llvm.aarch64.sme.smla.za64.single.vg4x4
    8, // llvm.aarch64.sme.smla.za64.vg4x2
    8, // llvm.aarch64.sme.smla.za64.vg4x4
    86, // llvm.aarch64.sme.smlal.lane.vg2x1
    84, // llvm.aarch64.sme.smlal.lane.vg2x2
    85, // llvm.aarch64.sme.smlal.lane.vg2x4
    8, // llvm.aarch64.sme.smlal.single.vg2x1
    8, // llvm.aarch64.sme.smlal.single.vg2x2
    8, // llvm.aarch64.sme.smlal.single.vg2x4
    8, // llvm.aarch64.sme.smlal.vg2x2
    8, // llvm.aarch64.sme.smlal.vg2x4
    86, // llvm.aarch64.sme.smls.za32.lane.vg4x1
    84, // llvm.aarch64.sme.smls.za32.lane.vg4x2
    85, // llvm.aarch64.sme.smls.za32.lane.vg4x4
    8, // llvm.aarch64.sme.smls.za32.single.vg4x1
    8, // llvm.aarch64.sme.smls.za32.single.vg4x2
    8, // llvm.aarch64.sme.smls.za32.single.vg4x4
    8, // llvm.aarch64.sme.smls.za32.vg4x2
    8, // llvm.aarch64.sme.smls.za32.vg4x4
    86, // llvm.aarch64.sme.smls.za64.lane.vg4x1
    84, // llvm.aarch64.sme.smls.za64.lane.vg4x2
    85, // llvm.aarch64.sme.smls.za64.lane.vg4x4
    8, // llvm.aarch64.sme.smls.za64.single.vg4x1
    8, // llvm.aarch64.sme.smls.za64.single.vg4x2
    8, // llvm.aarch64.sme.smls.za64.single.vg4x4
    8, // llvm.aarch64.sme.smls.za64.vg4x2
    8, // llvm.aarch64.sme.smls.za64.vg4x4
    86, // llvm.aarch64.sme.smlsl.lane.vg2x1
    84, // llvm.aarch64.sme.smlsl.lane.vg2x2
    85, // llvm.aarch64.sme.smlsl.lane.vg2x4
    8, // llvm.aarch64.sme.smlsl.single.vg2x1
    8, // llvm.aarch64.sme.smlsl.single.vg2x2
    8, // llvm.aarch64.sme.smlsl.single.vg2x4
    8, // llvm.aarch64.sme.smlsl.vg2x2
    8, // llvm.aarch64.sme.smlsl.vg2x4
    83, // llvm.aarch64.sme.smopa.wide
    83, // llvm.aarch64.sme.smopa.za32
    83, // llvm.aarch64.sme.smops.wide
    83, // llvm.aarch64.sme.smops.za32
    87, // llvm.aarch64.sme.st1b.horiz
    87, // llvm.aarch64.sme.st1b.vert
    87, // llvm.aarch64.sme.st1d.horiz
    87, // llvm.aarch64.sme.st1d.vert
    87, // llvm.aarch64.sme.st1h.horiz
    87, // llvm.aarch64.sme.st1h.vert
    87, // llvm.aarch64.sme.st1q.horiz
    87, // llvm.aarch64.sme.st1q.vert
    87, // llvm.aarch64.sme.st1w.horiz
    87, // llvm.aarch64.sme.st1w.vert
    8, // llvm.aarch64.sme.str
    8, // llvm.aarch64.sme.sub.write.single.za.vg1x2
    8, // llvm.aarch64.sme.sub.write.single.za.vg1x4
    8, // llvm.aarch64.sme.sub.write.za.vg1x2
    8, // llvm.aarch64.sme.sub.write.za.vg1x4
    8, // llvm.aarch64.sme.sub.za32.vg1x2
    8, // llvm.aarch64.sme.sub.za32.vg1x4
    8, // llvm.aarch64.sme.sub.za64.vg1x2
    8, // llvm.aarch64.sme.sub.za64.vg1x4
    84, // llvm.aarch64.sme.sudot.lane.za32.vg1x2
    85, // llvm.aarch64.sme.sudot.lane.za32.vg1x4
    8, // llvm.aarch64.sme.sudot.single.za32.vg1x2
    8, // llvm.aarch64.sme.sudot.single.za32.vg1x4
    86, // llvm.aarch64.sme.sumla.za32.lane.vg4x1
    84, // llvm.aarch64.sme.sumla.za32.lane.vg4x2
    85, // llvm.aarch64.sme.sumla.za32.lane.vg4x4
    8, // llvm.aarch64.sme.sumla.za32.single.vg4x2
    8, // llvm.aarch64.sme.sumla.za32.single.vg4x4
    83, // llvm.aarch64.sme.sumopa.wide
    83, // llvm.aarch64.sme.sumops.wide
    85, // llvm.aarch64.sme.suvdot.lane.za32.vg1x4
    84, // llvm.aarch64.sme.svdot.lane.za32.vg1x2
    85, // llvm.aarch64.sme.svdot.lane.za32.vg1x4
    85, // llvm.aarch64.sme.svdot.lane.za64.vg1x4
    84, // llvm.aarch64.sme.udot.lane.za32.vg1x2
    85, // llvm.aarch64.sme.udot.lane.za32.vg1x4
    84, // llvm.aarch64.sme.udot.lane.za64.vg1x2
    85, // llvm.aarch64.sme.udot.lane.za64.vg1x4
    8, // llvm.aarch64.sme.udot.single.za32.vg1x2
    8, // llvm.aarch64.sme.udot.single.za32.vg1x4
    8, // llvm.aarch64.sme.udot.single.za64.vg1x2
    8, // llvm.aarch64.sme.udot.single.za64.vg1x4
    8, // llvm.aarch64.sme.udot.za32.vg1x2
    8, // llvm.aarch64.sme.udot.za32.vg1x4
    8, // llvm.aarch64.sme.udot.za64.vg1x2
    8, // llvm.aarch64.sme.udot.za64.vg1x4
    86, // llvm.aarch64.sme.umla.za32.lane.vg4x1
    84, // llvm.aarch64.sme.umla.za32.lane.vg4x2
    85, // llvm.aarch64.sme.umla.za32.lane.vg4x4
    8, // llvm.aarch64.sme.umla.za32.single.vg4x1
    8, // llvm.aarch64.sme.umla.za32.single.vg4x2
    8, // llvm.aarch64.sme.umla.za32.single.vg4x4
    8, // llvm.aarch64.sme.umla.za32.vg4x2
    8, // llvm.aarch64.sme.umla.za32.vg4x4
    86, // llvm.aarch64.sme.umla.za64.lane.vg4x1
    84, // llvm.aarch64.sme.umla.za64.lane.vg4x2
    85, // llvm.aarch64.sme.umla.za64.lane.vg4x4
    8, // llvm.aarch64.sme.umla.za64.single.vg4x1
    8, // llvm.aarch64.sme.umla.za64.single.vg4x2
    8, // llvm.aarch64.sme.umla.za64.single.vg4x4
    8, // llvm.aarch64.sme.umla.za64.vg4x2
    8, // llvm.aarch64.sme.umla.za64.vg4x4
    86, // llvm.aarch64.sme.umlal.lane.vg2x1
    84, // llvm.aarch64.sme.umlal.lane.vg2x2
    85, // llvm.aarch64.sme.umlal.lane.vg2x4
    8, // llvm.aarch64.sme.umlal.single.vg2x1
    8, // llvm.aarch64.sme.umlal.single.vg2x2
    8, // llvm.aarch64.sme.umlal.single.vg2x4
    8, // llvm.aarch64.sme.umlal.vg2x2
    8, // llvm.aarch64.sme.umlal.vg2x4
    86, // llvm.aarch64.sme.umls.za32.lane.vg4x1
    84, // llvm.aarch64.sme.umls.za32.lane.vg4x2
    85, // llvm.aarch64.sme.umls.za32.lane.vg4x4
    8, // llvm.aarch64.sme.umls.za32.single.vg4x1
    8, // llvm.aarch64.sme.umls.za32.single.vg4x2
    8, // llvm.aarch64.sme.umls.za32.single.vg4x4
    8, // llvm.aarch64.sme.umls.za32.vg4x2
    8, // llvm.aarch64.sme.umls.za32.vg4x4
    86, // llvm.aarch64.sme.umls.za64.lane.vg4x1
    84, // llvm.aarch64.sme.umls.za64.lane.vg4x2
    85, // llvm.aarch64.sme.umls.za64.lane.vg4x4
    8, // llvm.aarch64.sme.umls.za64.single.vg4x1
    8, // llvm.aarch64.sme.umls.za64.single.vg4x2
    8, // llvm.aarch64.sme.umls.za64.single.vg4x4
    8, // llvm.aarch64.sme.umls.za64.vg4x2
    8, // llvm.aarch64.sme.umls.za64.vg4x4
    86, // llvm.aarch64.sme.umlsl.lane.vg2x1
    84, // llvm.aarch64.sme.umlsl.lane.vg2x2
    85, // llvm.aarch64.sme.umlsl.lane.vg2x4
    8, // llvm.aarch64.sme.umlsl.single.vg2x1
    8, // llvm.aarch64.sme.umlsl.single.vg2x2
    8, // llvm.aarch64.sme.umlsl.single.vg2x4
    8, // llvm.aarch64.sme.umlsl.vg2x2
    8, // llvm.aarch64.sme.umlsl.vg2x4
    83, // llvm.aarch64.sme.umopa.wide
    83, // llvm.aarch64.sme.umopa.za32
    83, // llvm.aarch64.sme.umops.wide
    83, // llvm.aarch64.sme.umops.za32
    84, // llvm.aarch64.sme.usdot.lane.za32.vg1x2
    85, // llvm.aarch64.sme.usdot.lane.za32.vg1x4
    8, // llvm.aarch64.sme.usdot.single.za32.vg1x2
    8, // llvm.aarch64.sme.usdot.single.za32.vg1x4
    8, // llvm.aarch64.sme.usdot.za32.vg1x2
    8, // llvm.aarch64.sme.usdot.za32.vg1x4
    86, // llvm.aarch64.sme.usmla.za32.lane.vg4x1
    84, // llvm.aarch64.sme.usmla.za32.lane.vg4x2
    85, // llvm.aarch64.sme.usmla.za32.lane.vg4x4
    8, // llvm.aarch64.sme.usmla.za32.single.vg4x1
    8, // llvm.aarch64.sme.usmla.za32.single.vg4x2
    8, // llvm.aarch64.sme.usmla.za32.single.vg4x4
    8, // llvm.aarch64.sme.usmla.za32.vg4x2
    8, // llvm.aarch64.sme.usmla.za32.vg4x4
    83, // llvm.aarch64.sme.usmopa.wide
    83, // llvm.aarch64.sme.usmops.wide
    85, // llvm.aarch64.sme.usvdot.lane.za32.vg1x4
    84, // llvm.aarch64.sme.uvdot.lane.za32.vg1x2
    85, // llvm.aarch64.sme.uvdot.lane.za32.vg1x4
    85, // llvm.aarch64.sme.uvdot.lane.za64.vg1x4
    83, // llvm.aarch64.sme.write.hor.vg2
    83, // llvm.aarch64.sme.write.hor.vg4
    83, // llvm.aarch64.sme.write.horiz
    83, // llvm.aarch64.sme.write.ver.vg2
    83, // llvm.aarch64.sme.write.ver.vg4
    83, // llvm.aarch64.sme.write.vert
    8, // llvm.aarch64.sme.write.vg1x2
    8, // llvm.aarch64.sme.write.vg1x4
    83, // llvm.aarch64.sme.writeq.horiz
    83, // llvm.aarch64.sme.writeq.vert
    76, // llvm.aarch64.sme.za.disable
    76, // llvm.aarch64.sme.za.enable
    83, // llvm.aarch64.sme.zero
    8, // llvm.aarch64.space
    10, // llvm.aarch64.st64b
    10, // llvm.aarch64.st64bv
    10, // llvm.aarch64.st64bv0
    88, // llvm.aarch64.stg
    77, // llvm.aarch64.stgp
    75, // llvm.aarch64.stlxp
    75, // llvm.aarch64.stlxr
    75, // llvm.aarch64.stxp
    75, // llvm.aarch64.stxr
    2, // llvm.aarch64.subp
    2, // llvm.aarch64.sve.abs
    2, // llvm.aarch64.sve.adclb
    2, // llvm.aarch64.sve.adclt
    2, // llvm.aarch64.sve.add
    2, // llvm.aarch64.sve.add.single.x2
    2, // llvm.aarch64.sve.add.single.x4
    2, // llvm.aarch64.sve.add.u
    2, // llvm.aarch64.sve.addhnb
    2, // llvm.aarch64.sve.addhnt
    2, // llvm.aarch64.sve.addp
    2, // llvm.aarch64.sve.adrb
    2, // llvm.aarch64.sve.adrd
    2, // llvm.aarch64.sve.adrh
    2, // llvm.aarch64.sve.adrw
    2, // llvm.aarch64.sve.aesd
    2, // llvm.aarch64.sve.aese
    2, // llvm.aarch64.sve.aesimc
    2, // llvm.aarch64.sve.aesmc
    2, // llvm.aarch64.sve.and
    2, // llvm.aarch64.sve.and.u
    2, // llvm.aarch64.sve.and.z
    2, // llvm.aarch64.sve.andv
    2, // llvm.aarch64.sve.asr
    2, // llvm.aarch64.sve.asr.u
    2, // llvm.aarch64.sve.asr.wide
    22, // llvm.aarch64.sve.asrd
    2, // llvm.aarch64.sve.bcax
    2, // llvm.aarch64.sve.bdep.x
    2, // llvm.aarch64.sve.bext.x
    2, // llvm.aarch64.sve.bfcvt.x2
    2, // llvm.aarch64.sve.bfcvtn.x2
    2, // llvm.aarch64.sve.bfdot
    89, // llvm.aarch64.sve.bfdot.lane.v2
    2, // llvm.aarch64.sve.bfmlalb
    89, // llvm.aarch64.sve.bfmlalb.lane.v2
    2, // llvm.aarch64.sve.bfmlalt
    89, // llvm.aarch64.sve.bfmlalt.lane.v2
    2, // llvm.aarch64.sve.bfmmla
    2, // llvm.aarch64.sve.bgrp.x
    2, // llvm.aarch64.sve.bic
    2, // llvm.aarch64.sve.bic.u
    2, // llvm.aarch64.sve.bic.z
    2, // llvm.aarch64.sve.brka
    2, // llvm.aarch64.sve.brka.z
    2, // llvm.aarch64.sve.brkb
    2, // llvm.aarch64.sve.brkb.z
    2, // llvm.aarch64.sve.brkn.z
    2, // llvm.aarch64.sve.brkpa.z
    2, // llvm.aarch64.sve.brkpb.z
    2, // llvm.aarch64.sve.bsl
    2, // llvm.aarch64.sve.bsl1n
    2, // llvm.aarch64.sve.bsl2n
    22, // llvm.aarch64.sve.cadd.x
    89, // llvm.aarch64.sve.cdot
    90, // llvm.aarch64.sve.cdot.lane
    2, // llvm.aarch64.sve.clasta
    2, // llvm.aarch64.sve.clasta.n
    2, // llvm.aarch64.sve.clastb
    2, // llvm.aarch64.sve.clastb.n
    2, // llvm.aarch64.sve.cls
    2, // llvm.aarch64.sve.clz
    90, // llvm.aarch64.sve.cmla.lane.x
    89, // llvm.aarch64.sve.cmla.x
    2, // llvm.aarch64.sve.cmpeq
    2, // llvm.aarch64.sve.cmpeq.wide
    2, // llvm.aarch64.sve.cmpge
    2, // llvm.aarch64.sve.cmpge.wide
    2, // llvm.aarch64.sve.cmpgt
    2, // llvm.aarch64.sve.cmpgt.wide
    2, // llvm.aarch64.sve.cmphi
    2, // llvm.aarch64.sve.cmphi.wide
    2, // llvm.aarch64.sve.cmphs
    2, // llvm.aarch64.sve.cmphs.wide
    2, // llvm.aarch64.sve.cmple.wide
    2, // llvm.aarch64.sve.cmplo.wide
    2, // llvm.aarch64.sve.cmpls.wide
    2, // llvm.aarch64.sve.cmplt.wide
    2, // llvm.aarch64.sve.cmpne
    2, // llvm.aarch64.sve.cmpne.wide
    2, // llvm.aarch64.sve.cnot
    2, // llvm.aarch64.sve.cnt
    32, // llvm.aarch64.sve.cntb
    32, // llvm.aarch64.sve.cntd
    32, // llvm.aarch64.sve.cnth
    2, // llvm.aarch64.sve.cntp
    59, // llvm.aarch64.sve.cntp.c16
    59, // llvm.aarch64.sve.cntp.c32
    59, // llvm.aarch64.sve.cntp.c64
    59, // llvm.aarch64.sve.cntp.c8
    32, // llvm.aarch64.sve.cntw
    2, // llvm.aarch64.sve.compact
    2, // llvm.aarch64.sve.convert.from.svbool
    2, // llvm.aarch64.sve.convert.to.svbool
    2, // llvm.aarch64.sve.dup
    2, // llvm.aarch64.sve.dup.x
    2, // llvm.aarch64.sve.dupq.lane
    2, // llvm.aarch64.sve.eor
    2, // llvm.aarch64.sve.eor.u
    2, // llvm.aarch64.sve.eor.z
    2, // llvm.aarch64.sve.eor3
    2, // llvm.aarch64.sve.eorbt
    2, // llvm.aarch64.sve.eortb
    2, // llvm.aarch64.sve.eorv
    22, // llvm.aarch64.sve.ext
    2, // llvm.aarch64.sve.fabd
    2, // llvm.aarch64.sve.fabd.u
    2, // llvm.aarch64.sve.fabs
    2, // llvm.aarch64.sve.facge
    2, // llvm.aarch64.sve.facgt
    2, // llvm.aarch64.sve.fadd
    2, // llvm.aarch64.sve.fadd.u
    2, // llvm.aarch64.sve.fadda
    2, // llvm.aarch64.sve.faddp
    2, // llvm.aarch64.sve.faddv
    89, // llvm.aarch64.sve.fcadd
    2, // llvm.aarch64.sve.fclamp
    2, // llvm.aarch64.sve.fclamp.single.x2
    2, // llvm.aarch64.sve.fclamp.single.x4
    91, // llvm.aarch64.sve.fcmla
    90, // llvm.aarch64.sve.fcmla.lane
    2, // llvm.aarch64.sve.fcmpeq
    2, // llvm.aarch64.sve.fcmpge
    2, // llvm.aarch64.sve.fcmpgt
    2, // llvm.aarch64.sve.fcmpne
    2, // llvm.aarch64.sve.fcmpuo
    2, // llvm.aarch64.sve.fcvt
    2, // llvm.aarch64.sve.fcvt.bf16f32
    2, // llvm.aarch64.sve.fcvt.f16f32
    2, // llvm.aarch64.sve.fcvt.f16f64
    2, // llvm.aarch64.sve.fcvt.f32f16
    2, // llvm.aarch64.sve.fcvt.f32f64
    2, // llvm.aarch64.sve.fcvt.f64f16
    2, // llvm.aarch64.sve.fcvt.f64f32
    2, // llvm.aarch64.sve.fcvt.x2
    2, // llvm.aarch64.sve.fcvtlt.f32f16
    2, // llvm.aarch64.sve.fcvtlt.f64f32
    2, // llvm.aarch64.sve.fcvtn.x2
    2, // llvm.aarch64.sve.fcvtnt.bf16f32
    2, // llvm.aarch64.sve.fcvtnt.f16f32
    2, // llvm.aarch64.sve.fcvtnt.f32f64
    2, // llvm.aarch64.sve.fcvts.x2
    2, // llvm.aarch64.sve.fcvts.x4
    2, // llvm.aarch64.sve.fcvtu.x2
    2, // llvm.aarch64.sve.fcvtu.x4
    2, // llvm.aarch64.sve.fcvtx.f32f64
    2, // llvm.aarch64.sve.fcvtxnt.f32f64
    2, // llvm.aarch64.sve.fcvtzs
    2, // llvm.aarch64.sve.fcvtzs.i32f16
    2, // llvm.aarch64.sve.fcvtzs.i32f64
    2, // llvm.aarch64.sve.fcvtzs.i64f16
    2, // llvm.aarch64.sve.fcvtzs.i64f32
    2, // llvm.aarch64.sve.fcvtzu
    2, // llvm.aarch64.sve.fcvtzu.i32f16
    2, // llvm.aarch64.sve.fcvtzu.i32f64
    2, // llvm.aarch64.sve.fcvtzu.i64f16
    2, // llvm.aarch64.sve.fcvtzu.i64f32
    2, // llvm.aarch64.sve.fdiv
    2, // llvm.aarch64.sve.fdiv.u
    2, // llvm.aarch64.sve.fdivr
    89, // llvm.aarch64.sve.fdot.lane.x2
    2, // llvm.aarch64.sve.fdot.x2
    2, // llvm.aarch64.sve.fexpa.x
    2, // llvm.aarch64.sve.flogb
    2, // llvm.aarch64.sve.fmad
    2, // llvm.aarch64.sve.fmax
    2, // llvm.aarch64.sve.fmax.single.x2
    2, // llvm.aarch64.sve.fmax.single.x4
    2, // llvm.aarch64.sve.fmax.u
    2, // llvm.aarch64.sve.fmax.x2
    2, // llvm.aarch64.sve.fmax.x4
    2, // llvm.aarch64.sve.fmaxnm
    2, // llvm.aarch64.sve.fmaxnm.single.x2
    2, // llvm.aarch64.sve.fmaxnm.single.x4
    2, // llvm.aarch64.sve.fmaxnm.u
    2, // llvm.aarch64.sve.fmaxnm.x2
    2, // llvm.aarch64.sve.fmaxnm.x4
    2, // llvm.aarch64.sve.fmaxnmp
    2, // llvm.aarch64.sve.fmaxnmv
    2, // llvm.aarch64.sve.fmaxp
    2, // llvm.aarch64.sve.fmaxv
    2, // llvm.aarch64.sve.fmin
    2, // llvm.aarch64.sve.fmin.single.x2
    2, // llvm.aarch64.sve.fmin.single.x4
    2, // llvm.aarch64.sve.fmin.u
    2, // llvm.aarch64.sve.fmin.x2
    2, // llvm.aarch64.sve.fmin.x4
    2, // llvm.aarch64.sve.fminnm
    2, // llvm.aarch64.sve.fminnm.single.x2
    2, // llvm.aarch64.sve.fminnm.single.x4
    2, // llvm.aarch64.sve.fminnm.u
    2, // llvm.aarch64.sve.fminnm.x2
    2, // llvm.aarch64.sve.fminnm.x4
    2, // llvm.aarch64.sve.fminnmp
    2, // llvm.aarch64.sve.fminnmv
    2, // llvm.aarch64.sve.fminp
    2, // llvm.aarch64.sve.fminv
    2, // llvm.aarch64.sve.fmla
    89, // llvm.aarch64.sve.fmla.lane
    2, // llvm.aarch64.sve.fmla.u
    2, // llvm.aarch64.sve.fmlalb
    89, // llvm.aarch64.sve.fmlalb.lane
    2, // llvm.aarch64.sve.fmlalt
    89, // llvm.aarch64.sve.fmlalt.lane
    2, // llvm.aarch64.sve.fmls
    89, // llvm.aarch64.sve.fmls.lane
    2, // llvm.aarch64.sve.fmls.u
    2, // llvm.aarch64.sve.fmlslb
    89, // llvm.aarch64.sve.fmlslb.lane
    2, // llvm.aarch64.sve.fmlslt
    89, // llvm.aarch64.sve.fmlslt.lane
    2, // llvm.aarch64.sve.fmmla
    2, // llvm.aarch64.sve.fmsb
    2, // llvm.aarch64.sve.fmul
    22, // llvm.aarch64.sve.fmul.lane
    2, // llvm.aarch64.sve.fmul.u
    2, // llvm.aarch64.sve.fmulx
    2, // llvm.aarch64.sve.fmulx.u
    2, // llvm.aarch64.sve.fneg
    2, // llvm.aarch64.sve.fnmad
    2, // llvm.aarch64.sve.fnmla
    2, // llvm.aarch64.sve.fnmla.u
    2, // llvm.aarch64.sve.fnmls
    2, // llvm.aarch64.sve.fnmls.u
    2, // llvm.aarch64.sve.fnmsb
    2, // llvm.aarch64.sve.frecpe.x
    2, // llvm.aarch64.sve.frecps.x
    2, // llvm.aarch64.sve.frecpx
    2, // llvm.aarch64.sve.frinta
    2, // llvm.aarch64.sve.frinta.x2
    2, // llvm.aarch64.sve.frinta.x4
    2, // llvm.aarch64.sve.frinti
    2, // llvm.aarch64.sve.frintm
    2, // llvm.aarch64.sve.frintm.x2
    2, // llvm.aarch64.sve.frintm.x4
    2, // llvm.aarch64.sve.frintn
    2, // llvm.aarch64.sve.frintn.x2
    2, // llvm.aarch64.sve.frintn.x4
    2, // llvm.aarch64.sve.frintp
    2, // llvm.aarch64.sve.frintp.x2
    2, // llvm.aarch64.sve.frintp.x4
    2, // llvm.aarch64.sve.frintx
    2, // llvm.aarch64.sve.frintz
    2, // llvm.aarch64.sve.frsqrte.x
    2, // llvm.aarch64.sve.frsqrts.x
    2, // llvm.aarch64.sve.fscale
    2, // llvm.aarch64.sve.fsqrt
    2, // llvm.aarch64.sve.fsub
    2, // llvm.aarch64.sve.fsub.u
    2, // llvm.aarch64.sve.fsubr
    22, // llvm.aarch64.sve.ftmad.x
    2, // llvm.aarch64.sve.ftsmul.x
    2, // llvm.aarch64.sve.ftssel.x
    2, // llvm.aarch64.sve.histcnt
    2, // llvm.aarch64.sve.histseg
    2, // llvm.aarch64.sve.index
    2, // llvm.aarch64.sve.insr
    2, // llvm.aarch64.sve.lasta
    2, // llvm.aarch64.sve.lastb
    3, // llvm.aarch64.sve.ld1
    3, // llvm.aarch64.sve.ld1.gather
    3, // llvm.aarch64.sve.ld1.gather.index
    62, // llvm.aarch64.sve.ld1.gather.scalar.offset
    3, // llvm.aarch64.sve.ld1.gather.sxtw
    3, // llvm.aarch64.sve.ld1.gather.sxtw.index
    3, // llvm.aarch64.sve.ld1.gather.uxtw
    3, // llvm.aarch64.sve.ld1.gather.uxtw.index
    3, // llvm.aarch64.sve.ld1.pn.x2
    3, // llvm.aarch64.sve.ld1.pn.x4
    3, // llvm.aarch64.sve.ld1ro
    3, // llvm.aarch64.sve.ld1rq
    3, // llvm.aarch64.sve.ld2.sret
    3, // llvm.aarch64.sve.ld3.sret
    3, // llvm.aarch64.sve.ld4.sret
    92, // llvm.aarch64.sve.ldff1
    92, // llvm.aarch64.sve.ldff1.gather
    92, // llvm.aarch64.sve.ldff1.gather.index
    92, // llvm.aarch64.sve.ldff1.gather.scalar.offset
    92, // llvm.aarch64.sve.ldff1.gather.sxtw
    92, // llvm.aarch64.sve.ldff1.gather.sxtw.index
    92, // llvm.aarch64.sve.ldff1.gather.uxtw
    92, // llvm.aarch64.sve.ldff1.gather.uxtw.index
    92, // llvm.aarch64.sve.ldnf1
    3, // llvm.aarch64.sve.ldnt1
    3, // llvm.aarch64.sve.ldnt1.gather
    3, // llvm.aarch64.sve.ldnt1.gather.index
    62, // llvm.aarch64.sve.ldnt1.gather.scalar.offset
    3, // llvm.aarch64.sve.ldnt1.gather.uxtw
    3, // llvm.aarch64.sve.ldnt1.pn.x2
    3, // llvm.aarch64.sve.ldnt1.pn.x4
    2, // llvm.aarch64.sve.lsl
    2, // llvm.aarch64.sve.lsl.u
    2, // llvm.aarch64.sve.lsl.wide
    2, // llvm.aarch64.sve.lsr
    2, // llvm.aarch64.sve.lsr.u
    2, // llvm.aarch64.sve.lsr.wide
    2, // llvm.aarch64.sve.mad
    2, // llvm.aarch64.sve.match
    2, // llvm.aarch64.sve.mla
    89, // llvm.aarch64.sve.mla.lane
    2, // llvm.aarch64.sve.mla.u
    2, // llvm.aarch64.sve.mls
    89, // llvm.aarch64.sve.mls.lane
    2, // llvm.aarch64.sve.mls.u
    2, // llvm.aarch64.sve.msb
    2, // llvm.aarch64.sve.mul
    22, // llvm.aarch64.sve.mul.lane
    2, // llvm.aarch64.sve.mul.u
    2, // llvm.aarch64.sve.nand.z
    2, // llvm.aarch64.sve.nbsl
    2, // llvm.aarch64.sve.neg
    2, // llvm.aarch64.sve.nmatch
    2, // llvm.aarch64.sve.nor.z
    2, // llvm.aarch64.sve.not
    2, // llvm.aarch64.sve.orn.z
    2, // llvm.aarch64.sve.orr
    2, // llvm.aarch64.sve.orr.u
    2, // llvm.aarch64.sve.orr.z
    2, // llvm.aarch64.sve.orv
    59, // llvm.aarch64.sve.pext
    59, // llvm.aarch64.sve.pext.x2
    2, // llvm.aarch64.sve.pfirst
    2, // llvm.aarch64.sve.pmul
    2, // llvm.aarch64.sve.pmullb.pair
    2, // llvm.aarch64.sve.pmullt.pair
    2, // llvm.aarch64.sve.pnext
    93, // llvm.aarch64.sve.prf
    94, // llvm.aarch64.sve.prfb.gather.index
    95, // llvm.aarch64.sve.prfb.gather.scalar.offset
    94, // llvm.aarch64.sve.prfb.gather.sxtw.index
    94, // llvm.aarch64.sve.prfb.gather.uxtw.index
    94, // llvm.aarch64.sve.prfd.gather.index
    95, // llvm.aarch64.sve.prfd.gather.scalar.offset
    94, // llvm.aarch64.sve.prfd.gather.sxtw.index
    94, // llvm.aarch64.sve.prfd.gather.uxtw.index
    94, // llvm.aarch64.sve.prfh.gather.index
    95, // llvm.aarch64.sve.prfh.gather.scalar.offset
    94, // llvm.aarch64.sve.prfh.gather.sxtw.index
    94, // llvm.aarch64.sve.prfh.gather.uxtw.index
    94, // llvm.aarch64.sve.prfw.gather.index
    95, // llvm.aarch64.sve.prfw.gather.scalar.offset
    94, // llvm.aarch64.sve.prfw.gather.sxtw.index
    94, // llvm.aarch64.sve.prfw.gather.uxtw.index
    2, // llvm.aarch64.sve.psel
    2, // llvm.aarch64.sve.ptest.any
    2, // llvm.aarch64.sve.ptest.first
    2, // llvm.aarch64.sve.ptest.last
    32, // llvm.aarch64.sve.ptrue
    2, // llvm.aarch64.sve.ptrue.c16
    2, // llvm.aarch64.sve.ptrue.c32
    2, // llvm.aarch64.sve.ptrue.c64
    2, // llvm.aarch64.sve.ptrue.c8
    2, // llvm.aarch64.sve.punpkhi
    2, // llvm.aarch64.sve.punpklo
    2, // llvm.aarch64.sve.raddhnb
    2, // llvm.aarch64.sve.raddhnt
    2, // llvm.aarch64.sve.rax1
    2, // llvm.aarch64.sve.rbit
    96, // llvm.aarch64.sve.rdffr
    96, // llvm.aarch64.sve.rdffr.z
    2, // llvm.aarch64.sve.rev
    2, // llvm.aarch64.sve.rev.b16
    2, // llvm.aarch64.sve.rev.b32
    2, // llvm.aarch64.sve.rev.b64
    2, // llvm.aarch64.sve.revb
    2, // llvm.aarch64.sve.revd
    2, // llvm.aarch64.sve.revh
    2, // llvm.aarch64.sve.revw
    59, // llvm.aarch64.sve.rshrnb
    22, // llvm.aarch64.sve.rshrnt
    2, // llvm.aarch64.sve.rsubhnb
    2, // llvm.aarch64.sve.rsubhnt
    2, // llvm.aarch64.sve.saba
    2, // llvm.aarch64.sve.sabalb
    2, // llvm.aarch64.sve.sabalt
    2, // llvm.aarch64.sve.sabd
    2, // llvm.aarch64.sve.sabd.u
    2, // llvm.aarch64.sve.sabdlb
    2, // llvm.aarch64.sve.sabdlt
    2, // llvm.aarch64.sve.sadalp
    2, // llvm.aarch64.sve.saddlb
    2, // llvm.aarch64.sve.saddlbt
    2, // llvm.aarch64.sve.saddlt
    2, // llvm.aarch64.sve.saddv
    2, // llvm.aarch64.sve.saddwb
    2, // llvm.aarch64.sve.saddwt
    2, // llvm.aarch64.sve.sbclb
    2, // llvm.aarch64.sve.sbclt
    2, // llvm.aarch64.sve.sclamp
    2, // llvm.aarch64.sve.sclamp.single.x2
    2, // llvm.aarch64.sve.sclamp.single.x4
    2, // llvm.aarch64.sve.scvtf
    2, // llvm.aarch64.sve.scvtf.f16i32
    2, // llvm.aarch64.sve.scvtf.f16i64
    2, // llvm.aarch64.sve.scvtf.f32i64
    2, // llvm.aarch64.sve.scvtf.f64i32
    2, // llvm.aarch64.sve.scvtf.x2
    2, // llvm.aarch64.sve.scvtf.x4
    2, // llvm.aarch64.sve.sdiv
    2, // llvm.aarch64.sve.sdiv.u
    2, // llvm.aarch64.sve.sdivr
    2, // llvm.aarch64.sve.sdot
    89, // llvm.aarch64.sve.sdot.lane
    89, // llvm.aarch64.sve.sdot.lane.x2
    2, // llvm.aarch64.sve.sdot.x2
    2, // llvm.aarch64.sve.sel
    2, // llvm.aarch64.sve.sel.x2
    2, // llvm.aarch64.sve.sel.x4
    97, // llvm.aarch64.sve.setffr
    2, // llvm.aarch64.sve.shadd
    59, // llvm.aarch64.sve.shrnb
    22, // llvm.aarch64.sve.shrnt
    2, // llvm.aarch64.sve.shsub
    2, // llvm.aarch64.sve.shsubr
    22, // llvm.aarch64.sve.sli
    2, // llvm.aarch64.sve.sm4e
    2, // llvm.aarch64.sve.sm4ekey
    2, // llvm.aarch64.sve.smax
    2, // llvm.aarch64.sve.smax.single.x2
    2, // llvm.aarch64.sve.smax.single.x4
    2, // llvm.aarch64.sve.smax.u
    2, // llvm.aarch64.sve.smax.x2
    2, // llvm.aarch64.sve.smax.x4
    2, // llvm.aarch64.sve.smaxp
    2, // llvm.aarch64.sve.smaxv
    2, // llvm.aarch64.sve.smin
    2, // llvm.aarch64.sve.smin.single.x2
    2, // llvm.aarch64.sve.smin.single.x4
    2, // llvm.aarch64.sve.smin.u
    2, // llvm.aarch64.sve.smin.x2
    2, // llvm.aarch64.sve.smin.x4
    2, // llvm.aarch64.sve.sminp
    2, // llvm.aarch64.sve.sminv
    2, // llvm.aarch64.sve.smlalb
    89, // llvm.aarch64.sve.smlalb.lane
    2, // llvm.aarch64.sve.smlalt
    89, // llvm.aarch64.sve.smlalt.lane
    2, // llvm.aarch64.sve.smlslb
    89, // llvm.aarch64.sve.smlslb.lane
    2, // llvm.aarch64.sve.smlslt
    89, // llvm.aarch64.sve.smlslt.lane
    2, // llvm.aarch64.sve.smmla
    2, // llvm.aarch64.sve.smulh
    2, // llvm.aarch64.sve.smulh.u
    2, // llvm.aarch64.sve.smullb
    22, // llvm.aarch64.sve.smullb.lane
    2, // llvm.aarch64.sve.smullt
    22, // llvm.aarch64.sve.smullt.lane
    2, // llvm.aarch64.sve.splice
    2, // llvm.aarch64.sve.sqabs
    2, // llvm.aarch64.sve.sqadd
    2, // llvm.aarch64.sve.sqadd.x
    22, // llvm.aarch64.sve.sqcadd.x
    2, // llvm.aarch64.sve.sqcvt.x2
    2, // llvm.aarch64.sve.sqcvt.x4
    2, // llvm.aarch64.sve.sqcvtn.x2
    2, // llvm.aarch64.sve.sqcvtn.x4
    2, // llvm.aarch64.sve.sqcvtu.x2
    2, // llvm.aarch64.sve.sqcvtu.x4
    2, // llvm.aarch64.sve.sqcvtun.x2
    2, // llvm.aarch64.sve.sqcvtun.x4
    26, // llvm.aarch64.sve.sqdecb.n32
    26, // llvm.aarch64.sve.sqdecb.n64
    26, // llvm.aarch64.sve.sqdecd
    26, // llvm.aarch64.sve.sqdecd.n32
    26, // llvm.aarch64.sve.sqdecd.n64
    26, // llvm.aarch64.sve.sqdech
    26, // llvm.aarch64.sve.sqdech.n32
    26, // llvm.aarch64.sve.sqdech.n64
    2, // llvm.aarch64.sve.sqdecp
    2, // llvm.aarch64.sve.sqdecp.n32
    2, // llvm.aarch64.sve.sqdecp.n64
    26, // llvm.aarch64.sve.sqdecw
    26, // llvm.aarch64.sve.sqdecw.n32
    26, // llvm.aarch64.sve.sqdecw.n64
    2, // llvm.aarch64.sve.sqdmlalb
    89, // llvm.aarch64.sve.sqdmlalb.lane
    2, // llvm.aarch64.sve.sqdmlalbt
    2, // llvm.aarch64.sve.sqdmlalt
    89, // llvm.aarch64.sve.sqdmlalt.lane
    2, // llvm.aarch64.sve.sqdmlslb
    89, // llvm.aarch64.sve.sqdmlslb.lane
    2, // llvm.aarch64.sve.sqdmlslbt
    2, // llvm.aarch64.sve.sqdmlslt
    89, // llvm.aarch64.sve.sqdmlslt.lane
    2, // llvm.aarch64.sve.sqdmulh
    22, // llvm.aarch64.sve.sqdmulh.lane
    2, // llvm.aarch64.sve.sqdmulh.single.vgx2
    2, // llvm.aarch64.sve.sqdmulh.single.vgx4
    2, // llvm.aarch64.sve.sqdmulh.vgx2
    2, // llvm.aarch64.sve.sqdmulh.vgx4
    2, // llvm.aarch64.sve.sqdmullb
    22, // llvm.aarch64.sve.sqdmullb.lane
    2, // llvm.aarch64.sve.sqdmullt
    22, // llvm.aarch64.sve.sqdmullt.lane
    26, // llvm.aarch64.sve.sqincb.n32
    26, // llvm.aarch64.sve.sqincb.n64
    26, // llvm.aarch64.sve.sqincd
    26, // llvm.aarch64.sve.sqincd.n32
    26, // llvm.aarch64.sve.sqincd.n64
    26, // llvm.aarch64.sve.sqinch
    26, // llvm.aarch64.sve.sqinch.n32
    26, // llvm.aarch64.sve.sqinch.n64
    2, // llvm.aarch64.sve.sqincp
    2, // llvm.aarch64.sve.sqincp.n32
    2, // llvm.aarch64.sve.sqincp.n64
    26, // llvm.aarch64.sve.sqincw
    26, // llvm.aarch64.sve.sqincw.n32
    26, // llvm.aarch64.sve.sqincw.n64
    2, // llvm.aarch64.sve.sqneg
    90, // llvm.aarch64.sve.sqrdcmlah.lane.x
    89, // llvm.aarch64.sve.sqrdcmlah.x
    2, // llvm.aarch64.sve.sqrdmlah
    89, // llvm.aarch64.sve.sqrdmlah.lane
    2, // llvm.aarch64.sve.sqrdmlsh
    89, // llvm.aarch64.sve.sqrdmlsh.lane
    2, // llvm.aarch64.sve.sqrdmulh
    22, // llvm.aarch64.sve.sqrdmulh.lane
    2, // llvm.aarch64.sve.sqrshl
    22, // llvm.aarch64.sve.sqrshr.x2
    91, // llvm.aarch64.sve.sqrshr.x4
    22, // llvm.aarch64.sve.sqrshrn.x2
    91, // llvm.aarch64.sve.sqrshrn.x4
    59, // llvm.aarch64.sve.sqrshrnb
    22, // llvm.aarch64.sve.sqrshrnt
    22, // llvm.aarch64.sve.sqrshru.x2
    91, // llvm.aarch64.sve.sqrshru.x4
    22, // llvm.aarch64.sve.sqrshrun.x2
    91, // llvm.aarch64.sve.sqrshrun.x4
    59, // llvm.aarch64.sve.sqrshrunb
    22, // llvm.aarch64.sve.sqrshrunt
    2, // llvm.aarch64.sve.sqshl
    22, // llvm.aarch64.sve.sqshlu
    59, // llvm.aarch64.sve.sqshrnb
    22, // llvm.aarch64.sve.sqshrnt
    59, // llvm.aarch64.sve.sqshrunb
    22, // llvm.aarch64.sve.sqshrunt
    2, // llvm.aarch64.sve.sqsub
    2, // llvm.aarch64.sve.sqsub.u
    2, // llvm.aarch64.sve.sqsub.x
    2, // llvm.aarch64.sve.sqsubr
    2, // llvm.aarch64.sve.sqxtnb
    2, // llvm.aarch64.sve.sqxtnt
    2, // llvm.aarch64.sve.sqxtunb
    2, // llvm.aarch64.sve.sqxtunt
    2, // llvm.aarch64.sve.srhadd
    22, // llvm.aarch64.sve.sri
    2, // llvm.aarch64.sve.srshl
    2, // llvm.aarch64.sve.srshl.single.x2
    2, // llvm.aarch64.sve.srshl.single.x4
    2, // llvm.aarch64.sve.srshl.x2
    2, // llvm.aarch64.sve.srshl.x4
    22, // llvm.aarch64.sve.srshr
    22, // llvm.aarch64.sve.srsra
    59, // llvm.aarch64.sve.sshllb
    59, // llvm.aarch64.sve.sshllt
    22, // llvm.aarch64.sve.ssra
    2, // llvm.aarch64.sve.ssublb
    2, // llvm.aarch64.sve.ssublbt
    2, // llvm.aarch64.sve.ssublt
    2, // llvm.aarch64.sve.ssubltb
    2, // llvm.aarch64.sve.ssubwb
    2, // llvm.aarch64.sve.ssubwt
    78, // llvm.aarch64.sve.st1
    98, // llvm.aarch64.sve.st1.pn.x2
    98, // llvm.aarch64.sve.st1.pn.x4
    98, // llvm.aarch64.sve.st1.scatter
    98, // llvm.aarch64.sve.st1.scatter.index
    88, // llvm.aarch64.sve.st1.scatter.scalar.offset
    98, // llvm.aarch64.sve.st1.scatter.sxtw
    98, // llvm.aarch64.sve.st1.scatter.sxtw.index
    98, // llvm.aarch64.sve.st1.scatter.uxtw
    98, // llvm.aarch64.sve.st1.scatter.uxtw.index
    79, // llvm.aarch64.sve.st2
    80, // llvm.aarch64.sve.st3
    81, // llvm.aarch64.sve.st4
    78, // llvm.aarch64.sve.stnt1
    98, // llvm.aarch64.sve.stnt1.pn.x2
    98, // llvm.aarch64.sve.stnt1.pn.x4
    98, // llvm.aarch64.sve.stnt1.scatter
    98, // llvm.aarch64.sve.stnt1.scatter.index
    88, // llvm.aarch64.sve.stnt1.scatter.scalar.offset
    98, // llvm.aarch64.sve.stnt1.scatter.uxtw
    2, // llvm.aarch64.sve.sub
    2, // llvm.aarch64.sve.sub.u
    2, // llvm.aarch64.sve.subhnb
    2, // llvm.aarch64.sve.subhnt
    2, // llvm.aarch64.sve.subr
    89, // llvm.aarch64.sve.sudot.lane
    2, // llvm.aarch64.sve.sunpk.x2
    2, // llvm.aarch64.sve.sunpk.x4
    2, // llvm.aarch64.sve.sunpkhi
    2, // llvm.aarch64.sve.sunpklo
    2, // llvm.aarch64.sve.suqadd
    2, // llvm.aarch64.sve.sxtb
    2, // llvm.aarch64.sve.sxth
    2, // llvm.aarch64.sve.sxtw
    2, // llvm.aarch64.sve.tbl
    2, // llvm.aarch64.sve.tbl2
    2, // llvm.aarch64.sve.tbx
    2, // llvm.aarch64.sve.trn1
    2, // llvm.aarch64.sve.trn1.b16
    2, // llvm.aarch64.sve.trn1.b32
    2, // llvm.aarch64.sve.trn1.b64
    2, // llvm.aarch64.sve.trn1q
    2, // llvm.aarch64.sve.trn2
    2, // llvm.aarch64.sve.trn2.b16
    2, // llvm.aarch64.sve.trn2.b32
    2, // llvm.aarch64.sve.trn2.b64
    2, // llvm.aarch64.sve.trn2q
    2, // llvm.aarch64.sve.uaba
    2, // llvm.aarch64.sve.uabalb
    2, // llvm.aarch64.sve.uabalt
    2, // llvm.aarch64.sve.uabd
    2, // llvm.aarch64.sve.uabd.u
    2, // llvm.aarch64.sve.uabdlb
    2, // llvm.aarch64.sve.uabdlt
    2, // llvm.aarch64.sve.uadalp
    2, // llvm.aarch64.sve.uaddlb
    2, // llvm.aarch64.sve.uaddlt
    2, // llvm.aarch64.sve.uaddv
    2, // llvm.aarch64.sve.uaddwb
    2, // llvm.aarch64.sve.uaddwt
    2, // llvm.aarch64.sve.uclamp
    2, // llvm.aarch64.sve.uclamp.single.x2
    2, // llvm.aarch64.sve.uclamp.single.x4
    2, // llvm.aarch64.sve.ucvtf
    2, // llvm.aarch64.sve.ucvtf.f16i32
    2, // llvm.aarch64.sve.ucvtf.f16i64
    2, // llvm.aarch64.sve.ucvtf.f32i64
    2, // llvm.aarch64.sve.ucvtf.f64i32
    2, // llvm.aarch64.sve.ucvtf.x2
    2, // llvm.aarch64.sve.ucvtf.x4
    2, // llvm.aarch64.sve.udiv
    2, // llvm.aarch64.sve.udiv.u
    2, // llvm.aarch64.sve.udivr
    2, // llvm.aarch64.sve.udot
    89, // llvm.aarch64.sve.udot.lane
    89, // llvm.aarch64.sve.udot.lane.x2
    2, // llvm.aarch64.sve.udot.x2
    2, // llvm.aarch64.sve.uhadd
    2, // llvm.aarch64.sve.uhsub
    2, // llvm.aarch64.sve.uhsubr
    2, // llvm.aarch64.sve.umax
    2, // llvm.aarch64.sve.umax.single.x2
    2, // llvm.aarch64.sve.umax.single.x4
    2, // llvm.aarch64.sve.umax.u
    2, // llvm.aarch64.sve.umax.x2
    2, // llvm.aarch64.sve.umax.x4
    2, // llvm.aarch64.sve.umaxp
    2, // llvm.aarch64.sve.umaxv
    2, // llvm.aarch64.sve.umin
    2, // llvm.aarch64.sve.umin.single.x2
    2, // llvm.aarch64.sve.umin.single.x4
    2, // llvm.aarch64.sve.umin.u
    2, // llvm.aarch64.sve.umin.x2
    2, // llvm.aarch64.sve.umin.x4
    2, // llvm.aarch64.sve.uminp
    2, // llvm.aarch64.sve.uminv
    2, // llvm.aarch64.sve.umlalb
    89, // llvm.aarch64.sve.umlalb.lane
    2, // llvm.aarch64.sve.umlalt
    89, // llvm.aarch64.sve.umlalt.lane
    2, // llvm.aarch64.sve.umlslb
    89, // llvm.aarch64.sve.umlslb.lane
    2, // llvm.aarch64.sve.umlslt
    89, // llvm.aarch64.sve.umlslt.lane
    2, // llvm.aarch64.sve.ummla
    2, // llvm.aarch64.sve.umulh
    2, // llvm.aarch64.sve.umulh.u
    2, // llvm.aarch64.sve.umullb
    22, // llvm.aarch64.sve.umullb.lane
    2, // llvm.aarch64.sve.umullt
    22, // llvm.aarch64.sve.umullt.lane
    2, // llvm.aarch64.sve.uqadd
    2, // llvm.aarch64.sve.uqadd.x
    2, // llvm.aarch64.sve.uqcvt.x2
    2, // llvm.aarch64.sve.uqcvt.x4
    2, // llvm.aarch64.sve.uqcvtn.x2
    2, // llvm.aarch64.sve.uqcvtn.x4
    26, // llvm.aarch64.sve.uqdecb.n32
    26, // llvm.aarch64.sve.uqdecb.n64
    26, // llvm.aarch64.sve.uqdecd
    26, // llvm.aarch64.sve.uqdecd.n32
    26, // llvm.aarch64.sve.uqdecd.n64
    26, // llvm.aarch64.sve.uqdech
    26, // llvm.aarch64.sve.uqdech.n32
    26, // llvm.aarch64.sve.uqdech.n64
    2, // llvm.aarch64.sve.uqdecp
    2, // llvm.aarch64.sve.uqdecp.n32
    2, // llvm.aarch64.sve.uqdecp.n64
    26, // llvm.aarch64.sve.uqdecw
    26, // llvm.aarch64.sve.uqdecw.n32
    26, // llvm.aarch64.sve.uqdecw.n64
    26, // llvm.aarch64.sve.uqincb.n32
    26, // llvm.aarch64.sve.uqincb.n64
    26, // llvm.aarch64.sve.uqincd
    26, // llvm.aarch64.sve.uqincd.n32
    26, // llvm.aarch64.sve.uqincd.n64
    26, // llvm.aarch64.sve.uqinch
    26, // llvm.aarch64.sve.uqinch.n32
    26, // llvm.aarch64.sve.uqinch.n64
    2, // llvm.aarch64.sve.uqincp
    2, // llvm.aarch64.sve.uqincp.n32
    2, // llvm.aarch64.sve.uqincp.n64
    26, // llvm.aarch64.sve.uqincw
    26, // llvm.aarch64.sve.uqincw.n32
    26, // llvm.aarch64.sve.uqincw.n64
    2, // llvm.aarch64.sve.uqrshl
    22, // llvm.aarch64.sve.uqrshr.x2
    91, // llvm.aarch64.sve.uqrshr.x4
    22, // llvm.aarch64.sve.uqrshrn.x2
    91, // llvm.aarch64.sve.uqrshrn.x4
    59, // llvm.aarch64.sve.uqrshrnb
    22, // llvm.aarch64.sve.uqrshrnt
    2, // llvm.aarch64.sve.uqshl
    59, // llvm.aarch64.sve.uqshrnb
    22, // llvm.aarch64.sve.uqshrnt
    2, // llvm.aarch64.sve.uqsub
    2, // llvm.aarch64.sve.uqsub.u
    2, // llvm.aarch64.sve.uqsub.x
    2, // llvm.aarch64.sve.uqsubr
    2, // llvm.aarch64.sve.uqxtnb
    2, // llvm.aarch64.sve.uqxtnt
    2, // llvm.aarch64.sve.urecpe
    2, // llvm.aarch64.sve.urhadd
    2, // llvm.aarch64.sve.urshl
    2, // llvm.aarch64.sve.urshl.single.x2
    2, // llvm.aarch64.sve.urshl.single.x4
    2, // llvm.aarch64.sve.urshl.x2
    2, // llvm.aarch64.sve.urshl.x4
    22, // llvm.aarch64.sve.urshr
    2, // llvm.aarch64.sve.ursqrte
    22, // llvm.aarch64.sve.ursra
    2, // llvm.aarch64.sve.usdot
    89, // llvm.aarch64.sve.usdot.lane
    59, // llvm.aarch64.sve.ushllb
    59, // llvm.aarch64.sve.ushllt
    2, // llvm.aarch64.sve.usmmla
    2, // llvm.aarch64.sve.usqadd
    22, // llvm.aarch64.sve.usra
    2, // llvm.aarch64.sve.usublb
    2, // llvm.aarch64.sve.usublt
    2, // llvm.aarch64.sve.usubwb
    2, // llvm.aarch64.sve.usubwt
    2, // llvm.aarch64.sve.uunpk.x2
    2, // llvm.aarch64.sve.uunpk.x4
    2, // llvm.aarch64.sve.uunpkhi
    2, // llvm.aarch64.sve.uunpklo
    2, // llvm.aarch64.sve.uxtb
    2, // llvm.aarch64.sve.uxth
    2, // llvm.aarch64.sve.uxtw
    2, // llvm.aarch64.sve.uzp.x2
    2, // llvm.aarch64.sve.uzp.x4
    2, // llvm.aarch64.sve.uzp1
    2, // llvm.aarch64.sve.uzp1.b16
    2, // llvm.aarch64.sve.uzp1.b32
    2, // llvm.aarch64.sve.uzp1.b64
    2, // llvm.aarch64.sve.uzp1q
    2, // llvm.aarch64.sve.uzp2
    2, // llvm.aarch64.sve.uzp2.b16
    2, // llvm.aarch64.sve.uzp2.b32
    2, // llvm.aarch64.sve.uzp2.b64
    2, // llvm.aarch64.sve.uzp2q
    2, // llvm.aarch64.sve.uzpq.x2
    2, // llvm.aarch64.sve.uzpq.x4
    2, // llvm.aarch64.sve.whilege
    22, // llvm.aarch64.sve.whilege.c16
    22, // llvm.aarch64.sve.whilege.c32
    22, // llvm.aarch64.sve.whilege.c64
    22, // llvm.aarch64.sve.whilege.c8
    2, // llvm.aarch64.sve.whilege.x2
    2, // llvm.aarch64.sve.whilegt
    22, // llvm.aarch64.sve.whilegt.c16
    22, // llvm.aarch64.sve.whilegt.c32
    22, // llvm.aarch64.sve.whilegt.c64
    22, // llvm.aarch64.sve.whilegt.c8
    2, // llvm.aarch64.sve.whilegt.x2
    2, // llvm.aarch64.sve.whilehi
    22, // llvm.aarch64.sve.whilehi.c16
    22, // llvm.aarch64.sve.whilehi.c32
    22, // llvm.aarch64.sve.whilehi.c64
    22, // llvm.aarch64.sve.whilehi.c8
    2, // llvm.aarch64.sve.whilehi.x2
    2, // llvm.aarch64.sve.whilehs
    22, // llvm.aarch64.sve.whilehs.c16
    22, // llvm.aarch64.sve.whilehs.c32
    22, // llvm.aarch64.sve.whilehs.c64
    22, // llvm.aarch64.sve.whilehs.c8
    2, // llvm.aarch64.sve.whilehs.x2
    2, // llvm.aarch64.sve.whilele
    22, // llvm.aarch64.sve.whilele.c16
    22, // llvm.aarch64.sve.whilele.c32
    22, // llvm.aarch64.sve.whilele.c64
    22, // llvm.aarch64.sve.whilele.c8
    2, // llvm.aarch64.sve.whilele.x2
    2, // llvm.aarch64.sve.whilelo
    22, // llvm.aarch64.sve.whilelo.c16
    22, // llvm.aarch64.sve.whilelo.c32
    22, // llvm.aarch64.sve.whilelo.c64
    22, // llvm.aarch64.sve.whilelo.c8
    2, // llvm.aarch64.sve.whilelo.x2
    2, // llvm.aarch64.sve.whilels
    22, // llvm.aarch64.sve.whilels.c16
    22, // llvm.aarch64.sve.whilels.c32
    22, // llvm.aarch64.sve.whilels.c64
    22, // llvm.aarch64.sve.whilels.c8
    2, // llvm.aarch64.sve.whilels.x2
    2, // llvm.aarch64.sve.whilelt
    22, // llvm.aarch64.sve.whilelt.c16
    22, // llvm.aarch64.sve.whilelt.c32
    22, // llvm.aarch64.sve.whilelt.c64
    22, // llvm.aarch64.sve.whilelt.c8
    2, // llvm.aarch64.sve.whilelt.x2
    2, // llvm.aarch64.sve.whilerw.b
    2, // llvm.aarch64.sve.whilerw.d
    2, // llvm.aarch64.sve.whilerw.h
    2, // llvm.aarch64.sve.whilerw.s
    2, // llvm.aarch64.sve.whilewr.b
    2, // llvm.aarch64.sve.whilewr.d
    2, // llvm.aarch64.sve.whilewr.h
    2, // llvm.aarch64.sve.whilewr.s
    97, // llvm.aarch64.sve.wrffr
    22, // llvm.aarch64.sve.xar
    2, // llvm.aarch64.sve.zip.x2
    2, // llvm.aarch64.sve.zip.x4
    2, // llvm.aarch64.sve.zip1
    2, // llvm.aarch64.sve.zip1.b16
    2, // llvm.aarch64.sve.zip1.b32
    2, // llvm.aarch64.sve.zip1.b64
    2, // llvm.aarch64.sve.zip1q
    2, // llvm.aarch64.sve.zip2
    2, // llvm.aarch64.sve.zip2.b16
    2, // llvm.aarch64.sve.zip2.b32
    2, // llvm.aarch64.sve.zip2.b64
    2, // llvm.aarch64.sve.zip2q
    2, // llvm.aarch64.sve.zipq.x2
    2, // llvm.aarch64.sve.zipq.x4
    22, // llvm.aarch64.tagp
    99, // llvm.aarch64.tcancel
    100, // llvm.aarch64.tcommit
    100, // llvm.aarch64.tstart
    101, // llvm.aarch64.ttest
    2, // llvm.aarch64.udiv
    5, // llvm.amdgcn.alignbyte
    102, // llvm.amdgcn.ballot
    103, // llvm.amdgcn.buffer.atomic.add
    103, // llvm.amdgcn.buffer.atomic.and
    104, // llvm.amdgcn.buffer.atomic.cmpswap
    103, // llvm.amdgcn.buffer.atomic.csub
    103, // llvm.amdgcn.buffer.atomic.fadd
    103, // llvm.amdgcn.buffer.atomic.or
    103, // llvm.amdgcn.buffer.atomic.smax
    103, // llvm.amdgcn.buffer.atomic.smin
    103, // llvm.amdgcn.buffer.atomic.sub
    103, // llvm.amdgcn.buffer.atomic.swap
    103, // llvm.amdgcn.buffer.atomic.umax
    103, // llvm.amdgcn.buffer.atomic.umin
    103, // llvm.amdgcn.buffer.atomic.xor
    105, // llvm.amdgcn.buffer.load
    105, // llvm.amdgcn.buffer.load.format
    106, // llvm.amdgcn.buffer.store
    106, // llvm.amdgcn.buffer.store.format
    76, // llvm.amdgcn.buffer.wbinvl1
    76, // llvm.amdgcn.buffer.wbinvl1.sc
    76, // llvm.amdgcn.buffer.wbinvl1.vol
    5, // llvm.amdgcn.class
    5, // llvm.amdgcn.cos
    107, // llvm.amdgcn.cs.chain
    5, // llvm.amdgcn.cubeid
    5, // llvm.amdgcn.cubema
    5, // llvm.amdgcn.cubesc
    5, // llvm.amdgcn.cubetc
    59, // llvm.amdgcn.cvt.f32.bf8
    59, // llvm.amdgcn.cvt.f32.fp8
    89, // llvm.amdgcn.cvt.pk.bf8.f32
    59, // llvm.amdgcn.cvt.pk.f32.bf8
    59, // llvm.amdgcn.cvt.pk.f32.fp8
    89, // llvm.amdgcn.cvt.pk.fp8.f32
    5, // llvm.amdgcn.cvt.pk.i16
    5, // llvm.amdgcn.cvt.pk.u16
    5, // llvm.amdgcn.cvt.pk.u8.f32
    5, // llvm.amdgcn.cvt.pknorm.i16
    5, // llvm.amdgcn.cvt.pknorm.u16
    5, // llvm.amdgcn.cvt.pkrtz
    89, // llvm.amdgcn.cvt.sr.bf8.f32
    89, // llvm.amdgcn.cvt.sr.fp8.f32
    5, // llvm.amdgcn.dispatch.id
    108, // llvm.amdgcn.dispatch.ptr
    5, // llvm.amdgcn.div.fixup
    5, // llvm.amdgcn.div.fmas
    65, // llvm.amdgcn.div.scale
    109, // llvm.amdgcn.ds.add.gs.reg.rtn
    110, // llvm.amdgcn.ds.append
    102, // llvm.amdgcn.ds.bpermute
    111, // llvm.amdgcn.ds.bvh.stack.rtn
    110, // llvm.amdgcn.ds.consume
    112, // llvm.amdgcn.ds.fadd
    113, // llvm.amdgcn.ds.fadd.v2bf16
    112, // llvm.amdgcn.ds.fmax
    112, // llvm.amdgcn.ds.fmin
    114, // llvm.amdgcn.ds.gws.barrier
    115, // llvm.amdgcn.ds.gws.init
    114, // llvm.amdgcn.ds.gws.sema.br
    114, // llvm.amdgcn.ds.gws.sema.p
    114, // llvm.amdgcn.ds.gws.sema.release.all
    114, // llvm.amdgcn.ds.gws.sema.v
    116, // llvm.amdgcn.ds.ordered.add
    116, // llvm.amdgcn.ds.ordered.swap
    102, // llvm.amdgcn.ds.permute
    109, // llvm.amdgcn.ds.sub.gs.reg.rtn
    117, // llvm.amdgcn.ds.swizzle
    118, // llvm.amdgcn.else
    118, // llvm.amdgcn.end.cf
    119, // llvm.amdgcn.endpgm
    120, // llvm.amdgcn.exp
    121, // llvm.amdgcn.exp.compr
    122, // llvm.amdgcn.exp.row
    5, // llvm.amdgcn.exp2
    123, // llvm.amdgcn.fcmp
    5, // llvm.amdgcn.fdiv.fast
    124, // llvm.amdgcn.fdot2
    5, // llvm.amdgcn.fdot2.bf16.bf16
    5, // llvm.amdgcn.fdot2.f16.f16
    124, // llvm.amdgcn.fdot2.f32.bf16
    125, // llvm.amdgcn.flat.atomic.fadd
    125, // llvm.amdgcn.flat.atomic.fadd.v2bf16
    125, // llvm.amdgcn.flat.atomic.fmax
    125, // llvm.amdgcn.flat.atomic.fmin
    5, // llvm.amdgcn.fma.legacy
    5, // llvm.amdgcn.fmad.ftz
    5, // llvm.amdgcn.fmed3
    5, // llvm.amdgcn.fmul.legacy
    5, // llvm.amdgcn.fract
    5, // llvm.amdgcn.frexp.exp
    5, // llvm.amdgcn.frexp.mant
    125, // llvm.amdgcn.global.atomic.csub
    125, // llvm.amdgcn.global.atomic.fadd
    125, // llvm.amdgcn.global.atomic.fadd.v2bf16
    125, // llvm.amdgcn.global.atomic.fmax
    125, // llvm.amdgcn.global.atomic.fmin
    126, // llvm.amdgcn.global.load.lds
    5, // llvm.amdgcn.groupstaticsize
    123, // llvm.amdgcn.icmp
    118, // llvm.amdgcn.if
    102, // llvm.amdgcn.if.break
    127, // llvm.amdgcn.iglp.opt
    128, // llvm.amdgcn.image.atomic.add.1d
    129, // llvm.amdgcn.image.atomic.add.1darray
    129, // llvm.amdgcn.image.atomic.add.2d
    130, // llvm.amdgcn.image.atomic.add.2darray
    131, // llvm.amdgcn.image.atomic.add.2darraymsaa
    130, // llvm.amdgcn.image.atomic.add.2dmsaa
    130, // llvm.amdgcn.image.atomic.add.3d
    130, // llvm.amdgcn.image.atomic.add.cube
    128, // llvm.amdgcn.image.atomic.and.1d
    129, // llvm.amdgcn.image.atomic.and.1darray
    129, // llvm.amdgcn.image.atomic.and.2d
    130, // llvm.amdgcn.image.atomic.and.2darray
    131, // llvm.amdgcn.image.atomic.and.2darraymsaa
    130, // llvm.amdgcn.image.atomic.and.2dmsaa
    130, // llvm.amdgcn.image.atomic.and.3d
    130, // llvm.amdgcn.image.atomic.and.cube
    129, // llvm.amdgcn.image.atomic.cmpswap.1d
    130, // llvm.amdgcn.image.atomic.cmpswap.1darray
    130, // llvm.amdgcn.image.atomic.cmpswap.2d
    131, // llvm.amdgcn.image.atomic.cmpswap.2darray
    132, // llvm.amdgcn.image.atomic.cmpswap.2darraymsaa
    131, // llvm.amdgcn.image.atomic.cmpswap.2dmsaa
    131, // llvm.amdgcn.image.atomic.cmpswap.3d
    131, // llvm.amdgcn.image.atomic.cmpswap.cube
    128, // llvm.amdgcn.image.atomic.dec.1d
    129, // llvm.amdgcn.image.atomic.dec.1darray
    129, // llvm.amdgcn.image.atomic.dec.2d
    130, // llvm.amdgcn.image.atomic.dec.2darray
    131, // llvm.amdgcn.image.atomic.dec.2darraymsaa
    130, // llvm.amdgcn.image.atomic.dec.2dmsaa
    130, // llvm.amdgcn.image.atomic.dec.3d
    130, // llvm.amdgcn.image.atomic.dec.cube
    128, // llvm.amdgcn.image.atomic.fmax.1d
    129, // llvm.amdgcn.image.atomic.fmax.1darray
    129, // llvm.amdgcn.image.atomic.fmax.2d
    130, // llvm.amdgcn.image.atomic.fmax.2darray
    131, // llvm.amdgcn.image.atomic.fmax.2darraymsaa
    130, // llvm.amdgcn.image.atomic.fmax.2dmsaa
    130, // llvm.amdgcn.image.atomic.fmax.3d
    130, // llvm.amdgcn.image.atomic.fmax.cube
    128, // llvm.amdgcn.image.atomic.fmin.1d
    129, // llvm.amdgcn.image.atomic.fmin.1darray
    129, // llvm.amdgcn.image.atomic.fmin.2d
    130, // llvm.amdgcn.image.atomic.fmin.2darray
    131, // llvm.amdgcn.image.atomic.fmin.2darraymsaa
    130, // llvm.amdgcn.image.atomic.fmin.2dmsaa
    130, // llvm.amdgcn.image.atomic.fmin.3d
    130, // llvm.amdgcn.image.atomic.fmin.cube
    128, // llvm.amdgcn.image.atomic.inc.1d
    129, // llvm.amdgcn.image.atomic.inc.1darray
    129, // llvm.amdgcn.image.atomic.inc.2d
    130, // llvm.amdgcn.image.atomic.inc.2darray
    131, // llvm.amdgcn.image.atomic.inc.2darraymsaa
    130, // llvm.amdgcn.image.atomic.inc.2dmsaa
    130, // llvm.amdgcn.image.atomic.inc.3d
    130, // llvm.amdgcn.image.atomic.inc.cube
    128, // llvm.amdgcn.image.atomic.or.1d
    129, // llvm.amdgcn.image.atomic.or.1darray
    129, // llvm.amdgcn.image.atomic.or.2d
    130, // llvm.amdgcn.image.atomic.or.2darray
    131, // llvm.amdgcn.image.atomic.or.2darraymsaa
    130, // llvm.amdgcn.image.atomic.or.2dmsaa
    130, // llvm.amdgcn.image.atomic.or.3d
    130, // llvm.amdgcn.image.atomic.or.cube
    128, // llvm.amdgcn.image.atomic.smax.1d
    129, // llvm.amdgcn.image.atomic.smax.1darray
    129, // llvm.amdgcn.image.atomic.smax.2d
    130, // llvm.amdgcn.image.atomic.smax.2darray
    131, // llvm.amdgcn.image.atomic.smax.2darraymsaa
    130, // llvm.amdgcn.image.atomic.smax.2dmsaa
    130, // llvm.amdgcn.image.atomic.smax.3d
    130, // llvm.amdgcn.image.atomic.smax.cube
    128, // llvm.amdgcn.image.atomic.smin.1d
    129, // llvm.amdgcn.image.atomic.smin.1darray
    129, // llvm.amdgcn.image.atomic.smin.2d
    130, // llvm.amdgcn.image.atomic.smin.2darray
    131, // llvm.amdgcn.image.atomic.smin.2darraymsaa
    130, // llvm.amdgcn.image.atomic.smin.2dmsaa
    130, // llvm.amdgcn.image.atomic.smin.3d
    130, // llvm.amdgcn.image.atomic.smin.cube
    128, // llvm.amdgcn.image.atomic.sub.1d
    129, // llvm.amdgcn.image.atomic.sub.1darray
    129, // llvm.amdgcn.image.atomic.sub.2d
    130, // llvm.amdgcn.image.atomic.sub.2darray
    131, // llvm.amdgcn.image.atomic.sub.2darraymsaa
    130, // llvm.amdgcn.image.atomic.sub.2dmsaa
    130, // llvm.amdgcn.image.atomic.sub.3d
    130, // llvm.amdgcn.image.atomic.sub.cube
    128, // llvm.amdgcn.image.atomic.swap.1d
    129, // llvm.amdgcn.image.atomic.swap.1darray
    129, // llvm.amdgcn.image.atomic.swap.2d
    130, // llvm.amdgcn.image.atomic.swap.2darray
    131, // llvm.amdgcn.image.atomic.swap.2darraymsaa
    130, // llvm.amdgcn.image.atomic.swap.2dmsaa
    130, // llvm.amdgcn.image.atomic.swap.3d
    130, // llvm.amdgcn.image.atomic.swap.cube
    128, // llvm.amdgcn.image.atomic.umax.1d
    129, // llvm.amdgcn.image.atomic.umax.1darray
    129, // llvm.amdgcn.image.atomic.umax.2d
    130, // llvm.amdgcn.image.atomic.umax.2darray
    131, // llvm.amdgcn.image.atomic.umax.2darraymsaa
    130, // llvm.amdgcn.image.atomic.umax.2dmsaa
    130, // llvm.amdgcn.image.atomic.umax.3d
    130, // llvm.amdgcn.image.atomic.umax.cube
    128, // llvm.amdgcn.image.atomic.umin.1d
    129, // llvm.amdgcn.image.atomic.umin.1darray
    129, // llvm.amdgcn.image.atomic.umin.2d
    130, // llvm.amdgcn.image.atomic.umin.2darray
    131, // llvm.amdgcn.image.atomic.umin.2darraymsaa
    130, // llvm.amdgcn.image.atomic.umin.2dmsaa
    130, // llvm.amdgcn.image.atomic.umin.3d
    130, // llvm.amdgcn.image.atomic.umin.cube
    128, // llvm.amdgcn.image.atomic.xor.1d
    129, // llvm.amdgcn.image.atomic.xor.1darray
    129, // llvm.amdgcn.image.atomic.xor.2d
    130, // llvm.amdgcn.image.atomic.xor.2darray
    131, // llvm.amdgcn.image.atomic.xor.2darraymsaa
    130, // llvm.amdgcn.image.atomic.xor.2dmsaa
    130, // llvm.amdgcn.image.atomic.xor.3d
    130, // llvm.amdgcn.image.atomic.xor.cube
    62, // llvm.amdgcn.image.bvh.intersect.ray
    133, // llvm.amdgcn.image.gather4.2d
    134, // llvm.amdgcn.image.gather4.2darray
    134, // llvm.amdgcn.image.gather4.b.2d
    135, // llvm.amdgcn.image.gather4.b.2darray
    135, // llvm.amdgcn.image.gather4.b.cl.2d
    136, // llvm.amdgcn.image.gather4.b.cl.2darray
    136, // llvm.amdgcn.image.gather4.b.cl.cube
    136, // llvm.amdgcn.image.gather4.b.cl.o.2d
    137, // llvm.amdgcn.image.gather4.b.cl.o.2darray
    137, // llvm.amdgcn.image.gather4.b.cl.o.cube
    135, // llvm.amdgcn.image.gather4.b.cube
    135, // llvm.amdgcn.image.gather4.b.o.2d
    136, // llvm.amdgcn.image.gather4.b.o.2darray
    136, // llvm.amdgcn.image.gather4.b.o.cube
    134, // llvm.amdgcn.image.gather4.c.2d
    135, // llvm.amdgcn.image.gather4.c.2darray
    135, // llvm.amdgcn.image.gather4.c.b.2d
    136, // llvm.amdgcn.image.gather4.c.b.2darray
    136, // llvm.amdgcn.image.gather4.c.b.cl.2d
    137, // llvm.amdgcn.image.gather4.c.b.cl.2darray
    137, // llvm.amdgcn.image.gather4.c.b.cl.cube
    137, // llvm.amdgcn.image.gather4.c.b.cl.o.2d
    138, // llvm.amdgcn.image.gather4.c.b.cl.o.2darray
    138, // llvm.amdgcn.image.gather4.c.b.cl.o.cube
    136, // llvm.amdgcn.image.gather4.c.b.cube
    136, // llvm.amdgcn.image.gather4.c.b.o.2d
    137, // llvm.amdgcn.image.gather4.c.b.o.2darray
    137, // llvm.amdgcn.image.gather4.c.b.o.cube
    135, // llvm.amdgcn.image.gather4.c.cl.2d
    136, // llvm.amdgcn.image.gather4.c.cl.2darray
    136, // llvm.amdgcn.image.gather4.c.cl.cube
    136, // llvm.amdgcn.image.gather4.c.cl.o.2d
    137, // llvm.amdgcn.image.gather4.c.cl.o.2darray
    137, // llvm.amdgcn.image.gather4.c.cl.o.cube
    135, // llvm.amdgcn.image.gather4.c.cube
    135, // llvm.amdgcn.image.gather4.c.l.2d
    136, // llvm.amdgcn.image.gather4.c.l.2darray
    136, // llvm.amdgcn.image.gather4.c.l.cube
    136, // llvm.amdgcn.image.gather4.c.l.o.2d
    137, // llvm.amdgcn.image.gather4.c.l.o.2darray
    137, // llvm.amdgcn.image.gather4.c.l.o.cube
    134, // llvm.amdgcn.image.gather4.c.lz.2d
    135, // llvm.amdgcn.image.gather4.c.lz.2darray
    135, // llvm.amdgcn.image.gather4.c.lz.cube
    135, // llvm.amdgcn.image.gather4.c.lz.o.2d
    136, // llvm.amdgcn.image.gather4.c.lz.o.2darray
    136, // llvm.amdgcn.image.gather4.c.lz.o.cube
    135, // llvm.amdgcn.image.gather4.c.o.2d
    136, // llvm.amdgcn.image.gather4.c.o.2darray
    136, // llvm.amdgcn.image.gather4.c.o.cube
    134, // llvm.amdgcn.image.gather4.cl.2d
    135, // llvm.amdgcn.image.gather4.cl.2darray
    135, // llvm.amdgcn.image.gather4.cl.cube
    135, // llvm.amdgcn.image.gather4.cl.o.2d
    136, // llvm.amdgcn.image.gather4.cl.o.2darray
    136, // llvm.amdgcn.image.gather4.cl.o.cube
    134, // llvm.amdgcn.image.gather4.cube
    134, // llvm.amdgcn.image.gather4.l.2d
    135, // llvm.amdgcn.image.gather4.l.2darray
    135, // llvm.amdgcn.image.gather4.l.cube
    135, // llvm.amdgcn.image.gather4.l.o.2d
    136, // llvm.amdgcn.image.gather4.l.o.2darray
    136, // llvm.amdgcn.image.gather4.l.o.cube
    133, // llvm.amdgcn.image.gather4.lz.2d
    134, // llvm.amdgcn.image.gather4.lz.2darray
    134, // llvm.amdgcn.image.gather4.lz.cube
    134, // llvm.amdgcn.image.gather4.lz.o.2d
    135, // llvm.amdgcn.image.gather4.lz.o.2darray
    135, // llvm.amdgcn.image.gather4.lz.o.cube
    134, // llvm.amdgcn.image.gather4.o.2d
    135, // llvm.amdgcn.image.gather4.o.2darray
    135, // llvm.amdgcn.image.gather4.o.cube
    139, // llvm.amdgcn.image.getlod.1d
    140, // llvm.amdgcn.image.getlod.1darray
    140, // llvm.amdgcn.image.getlod.2d
    141, // llvm.amdgcn.image.getlod.2darray
    141, // llvm.amdgcn.image.getlod.3d
    141, // llvm.amdgcn.image.getlod.cube
    142, // llvm.amdgcn.image.getresinfo.1d
    142, // llvm.amdgcn.image.getresinfo.1darray
    142, // llvm.amdgcn.image.getresinfo.2d
    142, // llvm.amdgcn.image.getresinfo.2darray
    142, // llvm.amdgcn.image.getresinfo.2darraymsaa
    142, // llvm.amdgcn.image.getresinfo.2dmsaa
    142, // llvm.amdgcn.image.getresinfo.3d
    142, // llvm.amdgcn.image.getresinfo.cube
    143, // llvm.amdgcn.image.load.1d
    144, // llvm.amdgcn.image.load.1darray
    144, // llvm.amdgcn.image.load.2d
    145, // llvm.amdgcn.image.load.2darray
    146, // llvm.amdgcn.image.load.2darraymsaa
    145, // llvm.amdgcn.image.load.2dmsaa
    145, // llvm.amdgcn.image.load.3d
    145, // llvm.amdgcn.image.load.cube
    144, // llvm.amdgcn.image.load.mip.1d
    145, // llvm.amdgcn.image.load.mip.1darray
    145, // llvm.amdgcn.image.load.mip.2d
    146, // llvm.amdgcn.image.load.mip.2darray
    146, // llvm.amdgcn.image.load.mip.3d
    146, // llvm.amdgcn.image.load.mip.cube
    146, // llvm.amdgcn.image.msaa.load.2darraymsaa
    145, // llvm.amdgcn.image.msaa.load.2dmsaa
    146, // llvm.amdgcn.image.msaa.load.x.2darraymsaa
    145, // llvm.amdgcn.image.msaa.load.x.2dmsaa
    147, // llvm.amdgcn.image.sample.1d
    133, // llvm.amdgcn.image.sample.1darray
    133, // llvm.amdgcn.image.sample.2d
    134, // llvm.amdgcn.image.sample.2darray
    134, // llvm.amdgcn.image.sample.3d
    133, // llvm.amdgcn.image.sample.b.1d
    134, // llvm.amdgcn.image.sample.b.1darray
    134, // llvm.amdgcn.image.sample.b.2d
    135, // llvm.amdgcn.image.sample.b.2darray
    135, // llvm.amdgcn.image.sample.b.3d
    134, // llvm.amdgcn.image.sample.b.cl.1d
    135, // llvm.amdgcn.image.sample.b.cl.1darray
    135, // llvm.amdgcn.image.sample.b.cl.2d
    136, // llvm.amdgcn.image.sample.b.cl.2darray
    136, // llvm.amdgcn.image.sample.b.cl.3d
    136, // llvm.amdgcn.image.sample.b.cl.cube
    135, // llvm.amdgcn.image.sample.b.cl.o.1d
    136, // llvm.amdgcn.image.sample.b.cl.o.1darray
    136, // llvm.amdgcn.image.sample.b.cl.o.2d
    137, // llvm.amdgcn.image.sample.b.cl.o.2darray
    137, // llvm.amdgcn.image.sample.b.cl.o.3d
    137, // llvm.amdgcn.image.sample.b.cl.o.cube
    135, // llvm.amdgcn.image.sample.b.cube
    134, // llvm.amdgcn.image.sample.b.o.1d
    135, // llvm.amdgcn.image.sample.b.o.1darray
    135, // llvm.amdgcn.image.sample.b.o.2d
    136, // llvm.amdgcn.image.sample.b.o.2darray
    136, // llvm.amdgcn.image.sample.b.o.3d
    136, // llvm.amdgcn.image.sample.b.o.cube
    133, // llvm.amdgcn.image.sample.c.1d
    134, // llvm.amdgcn.image.sample.c.1darray
    134, // llvm.amdgcn.image.sample.c.2d
    135, // llvm.amdgcn.image.sample.c.2darray
    135, // llvm.amdgcn.image.sample.c.3d
    134, // llvm.amdgcn.image.sample.c.b.1d
    135, // llvm.amdgcn.image.sample.c.b.1darray
    135, // llvm.amdgcn.image.sample.c.b.2d
    136, // llvm.amdgcn.image.sample.c.b.2darray
    136, // llvm.amdgcn.image.sample.c.b.3d
    135, // llvm.amdgcn.image.sample.c.b.cl.1d
    136, // llvm.amdgcn.image.sample.c.b.cl.1darray
    136, // llvm.amdgcn.image.sample.c.b.cl.2d
    137, // llvm.amdgcn.image.sample.c.b.cl.2darray
    137, // llvm.amdgcn.image.sample.c.b.cl.3d
    137, // llvm.amdgcn.image.sample.c.b.cl.cube
    136, // llvm.amdgcn.image.sample.c.b.cl.o.1d
    137, // llvm.amdgcn.image.sample.c.b.cl.o.1darray
    137, // llvm.amdgcn.image.sample.c.b.cl.o.2d
    138, // llvm.amdgcn.image.sample.c.b.cl.o.2darray
    138, // llvm.amdgcn.image.sample.c.b.cl.o.3d
    138, // llvm.amdgcn.image.sample.c.b.cl.o.cube
    136, // llvm.amdgcn.image.sample.c.b.cube
    135, // llvm.amdgcn.image.sample.c.b.o.1d
    136, // llvm.amdgcn.image.sample.c.b.o.1darray
    136, // llvm.amdgcn.image.sample.c.b.o.2d
    137, // llvm.amdgcn.image.sample.c.b.o.2darray
    137, // llvm.amdgcn.image.sample.c.b.o.3d
    137, // llvm.amdgcn.image.sample.c.b.o.cube
    135, // llvm.amdgcn.image.sample.c.cd.1d
    136, // llvm.amdgcn.image.sample.c.cd.1darray
    138, // llvm.amdgcn.image.sample.c.cd.2d
    148, // llvm.amdgcn.image.sample.c.cd.2darray
    149, // llvm.amdgcn.image.sample.c.cd.3d
    136, // llvm.amdgcn.image.sample.c.cd.cl.1d
    137, // llvm.amdgcn.image.sample.c.cd.cl.1darray
    148, // llvm.amdgcn.image.sample.c.cd.cl.2d
    150, // llvm.amdgcn.image.sample.c.cd.cl.2darray
    151, // llvm.amdgcn.image.sample.c.cd.cl.3d
    150, // llvm.amdgcn.image.sample.c.cd.cl.cube
    137, // llvm.amdgcn.image.sample.c.cd.cl.o.1d
    138, // llvm.amdgcn.image.sample.c.cd.cl.o.1darray
    150, // llvm.amdgcn.image.sample.c.cd.cl.o.2d
    149, // llvm.amdgcn.image.sample.c.cd.cl.o.2darray
    152, // llvm.amdgcn.image.sample.c.cd.cl.o.3d
    149, // llvm.amdgcn.image.sample.c.cd.cl.o.cube
    148, // llvm.amdgcn.image.sample.c.cd.cube
    136, // llvm.amdgcn.image.sample.c.cd.o.1d
    137, // llvm.amdgcn.image.sample.c.cd.o.1darray
    148, // llvm.amdgcn.image.sample.c.cd.o.2d
    150, // llvm.amdgcn.image.sample.c.cd.o.2darray
    151, // llvm.amdgcn.image.sample.c.cd.o.3d
    150, // llvm.amdgcn.image.sample.c.cd.o.cube
    134, // llvm.amdgcn.image.sample.c.cl.1d
    135, // llvm.amdgcn.image.sample.c.cl.1darray
    135, // llvm.amdgcn.image.sample.c.cl.2d
    136, // llvm.amdgcn.image.sample.c.cl.2darray
    136, // llvm.amdgcn.image.sample.c.cl.3d
    136, // llvm.amdgcn.image.sample.c.cl.cube
    135, // llvm.amdgcn.image.sample.c.cl.o.1d
    136, // llvm.amdgcn.image.sample.c.cl.o.1darray
    136, // llvm.amdgcn.image.sample.c.cl.o.2d
    137, // llvm.amdgcn.image.sample.c.cl.o.2darray
    137, // llvm.amdgcn.image.sample.c.cl.o.3d
    137, // llvm.amdgcn.image.sample.c.cl.o.cube
    135, // llvm.amdgcn.image.sample.c.cube
    135, // llvm.amdgcn.image.sample.c.d.1d
    136, // llvm.amdgcn.image.sample.c.d.1darray
    138, // llvm.amdgcn.image.sample.c.d.2d
    148, // llvm.amdgcn.image.sample.c.d.2darray
    149, // llvm.amdgcn.image.sample.c.d.3d
    136, // llvm.amdgcn.image.sample.c.d.cl.1d
    137, // llvm.amdgcn.image.sample.c.d.cl.1darray
    148, // llvm.amdgcn.image.sample.c.d.cl.2d
    150, // llvm.amdgcn.image.sample.c.d.cl.2darray
    151, // llvm.amdgcn.image.sample.c.d.cl.3d
    150, // llvm.amdgcn.image.sample.c.d.cl.cube
    137, // llvm.amdgcn.image.sample.c.d.cl.o.1d
    138, // llvm.amdgcn.image.sample.c.d.cl.o.1darray
    150, // llvm.amdgcn.image.sample.c.d.cl.o.2d
    149, // llvm.amdgcn.image.sample.c.d.cl.o.2darray
    152, // llvm.amdgcn.image.sample.c.d.cl.o.3d
    149, // llvm.amdgcn.image.sample.c.d.cl.o.cube
    148, // llvm.amdgcn.image.sample.c.d.cube
    136, // llvm.amdgcn.image.sample.c.d.o.1d
    137, // llvm.amdgcn.image.sample.c.d.o.1darray
    148, // llvm.amdgcn.image.sample.c.d.o.2d
    150, // llvm.amdgcn.image.sample.c.d.o.2darray
    151, // llvm.amdgcn.image.sample.c.d.o.3d
    150, // llvm.amdgcn.image.sample.c.d.o.cube
    134, // llvm.amdgcn.image.sample.c.l.1d
    135, // llvm.amdgcn.image.sample.c.l.1darray
    135, // llvm.amdgcn.image.sample.c.l.2d
    136, // llvm.amdgcn.image.sample.c.l.2darray
    136, // llvm.amdgcn.image.sample.c.l.3d
    136, // llvm.amdgcn.image.sample.c.l.cube
    135, // llvm.amdgcn.image.sample.c.l.o.1d
    136, // llvm.amdgcn.image.sample.c.l.o.1darray
    136, // llvm.amdgcn.image.sample.c.l.o.2d
    137, // llvm.amdgcn.image.sample.c.l.o.2darray
    137, // llvm.amdgcn.image.sample.c.l.o.3d
    137, // llvm.amdgcn.image.sample.c.l.o.cube
    133, // llvm.amdgcn.image.sample.c.lz.1d
    134, // llvm.amdgcn.image.sample.c.lz.1darray
    134, // llvm.amdgcn.image.sample.c.lz.2d
    135, // llvm.amdgcn.image.sample.c.lz.2darray
    135, // llvm.amdgcn.image.sample.c.lz.3d
    135, // llvm.amdgcn.image.sample.c.lz.cube
    134, // llvm.amdgcn.image.sample.c.lz.o.1d
    135, // llvm.amdgcn.image.sample.c.lz.o.1darray
    135, // llvm.amdgcn.image.sample.c.lz.o.2d
    136, // llvm.amdgcn.image.sample.c.lz.o.2darray
    136, // llvm.amdgcn.image.sample.c.lz.o.3d
    136, // llvm.amdgcn.image.sample.c.lz.o.cube
    134, // llvm.amdgcn.image.sample.c.o.1d
    135, // llvm.amdgcn.image.sample.c.o.1darray
    135, // llvm.amdgcn.image.sample.c.o.2d
    136, // llvm.amdgcn.image.sample.c.o.2darray
    136, // llvm.amdgcn.image.sample.c.o.3d
    136, // llvm.amdgcn.image.sample.c.o.cube
    134, // llvm.amdgcn.image.sample.cd.1d
    135, // llvm.amdgcn.image.sample.cd.1darray
    137, // llvm.amdgcn.image.sample.cd.2d
    138, // llvm.amdgcn.image.sample.cd.2darray
    150, // llvm.amdgcn.image.sample.cd.3d
    135, // llvm.amdgcn.image.sample.cd.cl.1d
    136, // llvm.amdgcn.image.sample.cd.cl.1darray
    138, // llvm.amdgcn.image.sample.cd.cl.2d
    148, // llvm.amdgcn.image.sample.cd.cl.2darray
    149, // llvm.amdgcn.image.sample.cd.cl.3d
    148, // llvm.amdgcn.image.sample.cd.cl.cube
    136, // llvm.amdgcn.image.sample.cd.cl.o.1d
    137, // llvm.amdgcn.image.sample.cd.cl.o.1darray
    148, // llvm.amdgcn.image.sample.cd.cl.o.2d
    150, // llvm.amdgcn.image.sample.cd.cl.o.2darray
    151, // llvm.amdgcn.image.sample.cd.cl.o.3d
    150, // llvm.amdgcn.image.sample.cd.cl.o.cube
    138, // llvm.amdgcn.image.sample.cd.cube
    135, // llvm.amdgcn.image.sample.cd.o.1d
    136, // llvm.amdgcn.image.sample.cd.o.1darray
    138, // llvm.amdgcn.image.sample.cd.o.2d
    148, // llvm.amdgcn.image.sample.cd.o.2darray
    149, // llvm.amdgcn.image.sample.cd.o.3d
    148, // llvm.amdgcn.image.sample.cd.o.cube
    133, // llvm.amdgcn.image.sample.cl.1d
    134, // llvm.amdgcn.image.sample.cl.1darray
    134, // llvm.amdgcn.image.sample.cl.2d
    135, // llvm.amdgcn.image.sample.cl.2darray
    135, // llvm.amdgcn.image.sample.cl.3d
    135, // llvm.amdgcn.image.sample.cl.cube
    134, // llvm.amdgcn.image.sample.cl.o.1d
    135, // llvm.amdgcn.image.sample.cl.o.1darray
    135, // llvm.amdgcn.image.sample.cl.o.2d
    136, // llvm.amdgcn.image.sample.cl.o.2darray
    136, // llvm.amdgcn.image.sample.cl.o.3d
    136, // llvm.amdgcn.image.sample.cl.o.cube
    134, // llvm.amdgcn.image.sample.cube
    134, // llvm.amdgcn.image.sample.d.1d
    135, // llvm.amdgcn.image.sample.d.1darray
    137, // llvm.amdgcn.image.sample.d.2d
    138, // llvm.amdgcn.image.sample.d.2darray
    150, // llvm.amdgcn.image.sample.d.3d
    135, // llvm.amdgcn.image.sample.d.cl.1d
    136, // llvm.amdgcn.image.sample.d.cl.1darray
    138, // llvm.amdgcn.image.sample.d.cl.2d
    148, // llvm.amdgcn.image.sample.d.cl.2darray
    149, // llvm.amdgcn.image.sample.d.cl.3d
    148, // llvm.amdgcn.image.sample.d.cl.cube
    136, // llvm.amdgcn.image.sample.d.cl.o.1d
    137, // llvm.amdgcn.image.sample.d.cl.o.1darray
    148, // llvm.amdgcn.image.sample.d.cl.o.2d
    150, // llvm.amdgcn.image.sample.d.cl.o.2darray
    151, // llvm.amdgcn.image.sample.d.cl.o.3d
    150, // llvm.amdgcn.image.sample.d.cl.o.cube
    138, // llvm.amdgcn.image.sample.d.cube
    135, // llvm.amdgcn.image.sample.d.o.1d
    136, // llvm.amdgcn.image.sample.d.o.1darray
    138, // llvm.amdgcn.image.sample.d.o.2d
    148, // llvm.amdgcn.image.sample.d.o.2darray
    149, // llvm.amdgcn.image.sample.d.o.3d
    148, // llvm.amdgcn.image.sample.d.o.cube
    133, // llvm.amdgcn.image.sample.l.1d
    134, // llvm.amdgcn.image.sample.l.1darray
    134, // llvm.amdgcn.image.sample.l.2d
    135, // llvm.amdgcn.image.sample.l.2darray
    135, // llvm.amdgcn.image.sample.l.3d
    135, // llvm.amdgcn.image.sample.l.cube
    134, // llvm.amdgcn.image.sample.l.o.1d
    135, // llvm.amdgcn.image.sample.l.o.1darray
    135, // llvm.amdgcn.image.sample.l.o.2d
    136, // llvm.amdgcn.image.sample.l.o.2darray
    136, // llvm.amdgcn.image.sample.l.o.3d
    136, // llvm.amdgcn.image.sample.l.o.cube
    147, // llvm.amdgcn.image.sample.lz.1d
    133, // llvm.amdgcn.image.sample.lz.1darray
    133, // llvm.amdgcn.image.sample.lz.2d
    134, // llvm.amdgcn.image.sample.lz.2darray
    134, // llvm.amdgcn.image.sample.lz.3d
    134, // llvm.amdgcn.image.sample.lz.cube
    133, // llvm.amdgcn.image.sample.lz.o.1d
    134, // llvm.amdgcn.image.sample.lz.o.1darray
    134, // llvm.amdgcn.image.sample.lz.o.2d
    135, // llvm.amdgcn.image.sample.lz.o.2darray
    135, // llvm.amdgcn.image.sample.lz.o.3d
    135, // llvm.amdgcn.image.sample.lz.o.cube
    133, // llvm.amdgcn.image.sample.o.1d
    134, // llvm.amdgcn.image.sample.o.1darray
    134, // llvm.amdgcn.image.sample.o.2d
    135, // llvm.amdgcn.image.sample.o.2darray
    135, // llvm.amdgcn.image.sample.o.3d
    135, // llvm.amdgcn.image.sample.o.cube
    153, // llvm.amdgcn.image.store.1d
    154, // llvm.amdgcn.image.store.1darray
    154, // llvm.amdgcn.image.store.2d
    155, // llvm.amdgcn.image.store.2darray
    156, // llvm.amdgcn.image.store.2darraymsaa
    155, // llvm.amdgcn.image.store.2dmsaa
    155, // llvm.amdgcn.image.store.3d
    155, // llvm.amdgcn.image.store.cube
    154, // llvm.amdgcn.image.store.mip.1d
    155, // llvm.amdgcn.image.store.mip.1darray
    155, // llvm.amdgcn.image.store.mip.2d
    156, // llvm.amdgcn.image.store.mip.2darray
    156, // llvm.amdgcn.image.store.mip.3d
    156, // llvm.amdgcn.image.store.mip.cube
    108, // llvm.amdgcn.implicit.buffer.ptr
    108, // llvm.amdgcn.implicitarg.ptr
    127, // llvm.amdgcn.init.exec
    157, // llvm.amdgcn.init.exec.from.input
    5, // llvm.amdgcn.interp.inreg.p10
    124, // llvm.amdgcn.interp.inreg.p10.f16
    5, // llvm.amdgcn.interp.inreg.p2
    124, // llvm.amdgcn.interp.inreg.p2.f16
    158, // llvm.amdgcn.interp.mov
    49, // llvm.amdgcn.interp.p1
    57, // llvm.amdgcn.interp.p1.f16
    159, // llvm.amdgcn.interp.p2
    48, // llvm.amdgcn.interp.p2.f16
    102, // llvm.amdgcn.inverse.ballot
    160, // llvm.amdgcn.is.private
    160, // llvm.amdgcn.is.shared
    108, // llvm.amdgcn.kernarg.segment.ptr
    161, // llvm.amdgcn.kill
    5, // llvm.amdgcn.ldexp
    162, // llvm.amdgcn.lds.direct.load
    5, // llvm.amdgcn.lds.kernel.id
    163, // llvm.amdgcn.lds.param.load
    5, // llvm.amdgcn.lerp
    96, // llvm.amdgcn.live.mask
    5, // llvm.amdgcn.log
    5, // llvm.amdgcn.log.clamp
    118, // llvm.amdgcn.loop
    164, // llvm.amdgcn.make.buffer.rsrc
    2, // llvm.amdgcn.mbcnt.hi
    2, // llvm.amdgcn.mbcnt.lo
    165, // llvm.amdgcn.mfma.f32.16x16x16bf16.1k
    165, // llvm.amdgcn.mfma.f32.16x16x16f16
    165, // llvm.amdgcn.mfma.f32.16x16x1f32
    165, // llvm.amdgcn.mfma.f32.16x16x2bf16
    165, // llvm.amdgcn.mfma.f32.16x16x32.bf8.bf8
    165, // llvm.amdgcn.mfma.f32.16x16x32.bf8.fp8
    165, // llvm.amdgcn.mfma.f32.16x16x32.fp8.bf8
    165, // llvm.amdgcn.mfma.f32.16x16x32.fp8.fp8
    165, // llvm.amdgcn.mfma.f32.16x16x4bf16.1k
    165, // llvm.amdgcn.mfma.f32.16x16x4f16
    165, // llvm.amdgcn.mfma.f32.16x16x4f32
    165, // llvm.amdgcn.mfma.f32.16x16x8.xf32
    165, // llvm.amdgcn.mfma.f32.16x16x8bf16
    165, // llvm.amdgcn.mfma.f32.32x32x16.bf8.bf8
    165, // llvm.amdgcn.mfma.f32.32x32x16.bf8.fp8
    165, // llvm.amdgcn.mfma.f32.32x32x16.fp8.bf8
    165, // llvm.amdgcn.mfma.f32.32x32x16.fp8.fp8
    165, // llvm.amdgcn.mfma.f32.32x32x1f32
    165, // llvm.amdgcn.mfma.f32.32x32x2bf16
    165, // llvm.amdgcn.mfma.f32.32x32x2f32
    165, // llvm.amdgcn.mfma.f32.32x32x4.xf32
    165, // llvm.amdgcn.mfma.f32.32x32x4bf16
    165, // llvm.amdgcn.mfma.f32.32x32x4bf16.1k
    165, // llvm.amdgcn.mfma.f32.32x32x4f16
    165, // llvm.amdgcn.mfma.f32.32x32x8bf16.1k
    165, // llvm.amdgcn.mfma.f32.32x32x8f16
    165, // llvm.amdgcn.mfma.f32.4x4x1f32
    165, // llvm.amdgcn.mfma.f32.4x4x2bf16
    165, // llvm.amdgcn.mfma.f32.4x4x4bf16.1k
    165, // llvm.amdgcn.mfma.f32.4x4x4f16
    165, // llvm.amdgcn.mfma.f64.16x16x4f64
    165, // llvm.amdgcn.mfma.f64.4x4x4f64
    165, // llvm.amdgcn.mfma.i32.16x16x16i8
    165, // llvm.amdgcn.mfma.i32.16x16x32.i8
    165, // llvm.amdgcn.mfma.i32.16x16x4i8
    165, // llvm.amdgcn.mfma.i32.32x32x16.i8
    165, // llvm.amdgcn.mfma.i32.32x32x4i8
    165, // llvm.amdgcn.mfma.i32.32x32x8i8
    165, // llvm.amdgcn.mfma.i32.4x4x4i8
    166, // llvm.amdgcn.mov.dpp
    117, // llvm.amdgcn.mov.dpp8
    5, // llvm.amdgcn.mqsad.pk.u16.u8
    5, // llvm.amdgcn.mqsad.u32.u8
    5, // llvm.amdgcn.msad.u8
    5, // llvm.amdgcn.mul.i24
    5, // llvm.amdgcn.mul.u24
    5, // llvm.amdgcn.mulhi.i24
    5, // llvm.amdgcn.mulhi.u24
    167, // llvm.amdgcn.perm
    168, // llvm.amdgcn.permlane16
    102, // llvm.amdgcn.permlane64
    168, // llvm.amdgcn.permlanex16
    2, // llvm.amdgcn.ps.live
    5, // llvm.amdgcn.qsad.pk.u16.u8
    108, // llvm.amdgcn.queue.ptr
    103, // llvm.amdgcn.raw.buffer.atomic.add
    103, // llvm.amdgcn.raw.buffer.atomic.and
    104, // llvm.amdgcn.raw.buffer.atomic.cmpswap
    103, // llvm.amdgcn.raw.buffer.atomic.dec
    103, // llvm.amdgcn.raw.buffer.atomic.fadd
    103, // llvm.amdgcn.raw.buffer.atomic.fmax
    103, // llvm.amdgcn.raw.buffer.atomic.fmin
    103, // llvm.amdgcn.raw.buffer.atomic.inc
    103, // llvm.amdgcn.raw.buffer.atomic.or
    103, // llvm.amdgcn.raw.buffer.atomic.smax
    103, // llvm.amdgcn.raw.buffer.atomic.smin
    103, // llvm.amdgcn.raw.buffer.atomic.sub
    103, // llvm.amdgcn.raw.buffer.atomic.swap
    103, // llvm.amdgcn.raw.buffer.atomic.umax
    103, // llvm.amdgcn.raw.buffer.atomic.umin
    103, // llvm.amdgcn.raw.buffer.atomic.xor
    169, // llvm.amdgcn.raw.buffer.load
    169, // llvm.amdgcn.raw.buffer.load.format
    170, // llvm.amdgcn.raw.buffer.load.lds
    171, // llvm.amdgcn.raw.buffer.store
    171, // llvm.amdgcn.raw.buffer.store.format
    172, // llvm.amdgcn.raw.ptr.buffer.atomic.add
    172, // llvm.amdgcn.raw.ptr.buffer.atomic.and
    173, // llvm.amdgcn.raw.ptr.buffer.atomic.cmpswap
    172, // llvm.amdgcn.raw.ptr.buffer.atomic.dec
    172, // llvm.amdgcn.raw.ptr.buffer.atomic.fadd
    172, // llvm.amdgcn.raw.ptr.buffer.atomic.fmax
    172, // llvm.amdgcn.raw.ptr.buffer.atomic.fmin
    172, // llvm.amdgcn.raw.ptr.buffer.atomic.inc
    172, // llvm.amdgcn.raw.ptr.buffer.atomic.or
    172, // llvm.amdgcn.raw.ptr.buffer.atomic.smax
    172, // llvm.amdgcn.raw.ptr.buffer.atomic.smin
    172, // llvm.amdgcn.raw.ptr.buffer.atomic.sub
    172, // llvm.amdgcn.raw.ptr.buffer.atomic.swap
    172, // llvm.amdgcn.raw.ptr.buffer.atomic.umax
    172, // llvm.amdgcn.raw.ptr.buffer.atomic.umin
    172, // llvm.amdgcn.raw.ptr.buffer.atomic.xor
    174, // llvm.amdgcn.raw.ptr.buffer.load
    174, // llvm.amdgcn.raw.ptr.buffer.load.format
    175, // llvm.amdgcn.raw.ptr.buffer.load.lds
    176, // llvm.amdgcn.raw.ptr.buffer.store
    176, // llvm.amdgcn.raw.ptr.buffer.store.format
    177, // llvm.amdgcn.raw.ptr.tbuffer.load
    178, // llvm.amdgcn.raw.ptr.tbuffer.store
    105, // llvm.amdgcn.raw.tbuffer.load
    106, // llvm.amdgcn.raw.tbuffer.store
    5, // llvm.amdgcn.rcp
    5, // llvm.amdgcn.rcp.legacy
    102, // llvm.amdgcn.readfirstlane
    102, // llvm.amdgcn.readlane
    5, // llvm.amdgcn.reloc.constant
    5, // llvm.amdgcn.rsq
    5, // llvm.amdgcn.rsq.clamp
    5, // llvm.amdgcn.rsq.legacy
    179, // llvm.amdgcn.s.barrier
    22, // llvm.amdgcn.s.buffer.load
    76, // llvm.amdgcn.s.dcache.inv
    76, // llvm.amdgcn.s.dcache.inv.vol
    180, // llvm.amdgcn.s.dcache.wb
    180, // llvm.amdgcn.s.dcache.wb.vol
    181, // llvm.amdgcn.s.decperflevel
    180, // llvm.amdgcn.s.get.waveid.in.workgroup
    5, // llvm.amdgcn.s.getpc
    181, // llvm.amdgcn.s.getreg
    181, // llvm.amdgcn.s.incperflevel
    180, // llvm.amdgcn.s.memrealtime
    76, // llvm.amdgcn.s.memtime
    182, // llvm.amdgcn.s.sendmsg
    182, // llvm.amdgcn.s.sendmsg.rtn
    182, // llvm.amdgcn.s.sendmsghalt
    181, // llvm.amdgcn.s.sethalt
    181, // llvm.amdgcn.s.setprio
    181, // llvm.amdgcn.s.setreg
    181, // llvm.amdgcn.s.sleep
    101, // llvm.amdgcn.s.wait.event.export.ready
    183, // llvm.amdgcn.s.waitcnt
    5, // llvm.amdgcn.sad.hi.u8
    5, // llvm.amdgcn.sad.u16
    5, // llvm.amdgcn.sad.u8
    5, // llvm.amdgcn.sbfe
    127, // llvm.amdgcn.sched.barrier
    184, // llvm.amdgcn.sched.group.barrier
    124, // llvm.amdgcn.sdot2
    124, // llvm.amdgcn.sdot4
    124, // llvm.amdgcn.sdot8
    102, // llvm.amdgcn.set.inactive
    5, // llvm.amdgcn.sffbh
    5, // llvm.amdgcn.sin
    185, // llvm.amdgcn.smfmac.f32.16x16x32.bf16
    185, // llvm.amdgcn.smfmac.f32.16x16x32.f16
    185, // llvm.amdgcn.smfmac.f32.16x16x64.bf8.bf8
    185, // llvm.amdgcn.smfmac.f32.16x16x64.bf8.fp8
    185, // llvm.amdgcn.smfmac.f32.16x16x64.fp8.bf8
    185, // llvm.amdgcn.smfmac.f32.16x16x64.fp8.fp8
    185, // llvm.amdgcn.smfmac.f32.32x32x16.bf16
    185, // llvm.amdgcn.smfmac.f32.32x32x16.f16
    185, // llvm.amdgcn.smfmac.f32.32x32x32.bf8.bf8
    185, // llvm.amdgcn.smfmac.f32.32x32x32.bf8.fp8
    185, // llvm.amdgcn.smfmac.f32.32x32x32.fp8.bf8
    185, // llvm.amdgcn.smfmac.f32.32x32x32.fp8.fp8
    185, // llvm.amdgcn.smfmac.i32.16x16x64.i8
    185, // llvm.amdgcn.smfmac.i32.32x32x32.i8
    167, // llvm.amdgcn.softwqm
    5, // llvm.amdgcn.sqrt
    186, // llvm.amdgcn.strict.wqm
    186, // llvm.amdgcn.strict.wwm
    104, // llvm.amdgcn.struct.buffer.atomic.add
    104, // llvm.amdgcn.struct.buffer.atomic.and
    187, // llvm.amdgcn.struct.buffer.atomic.cmpswap
    104, // llvm.amdgcn.struct.buffer.atomic.dec
    104, // llvm.amdgcn.struct.buffer.atomic.fadd
    104, // llvm.amdgcn.struct.buffer.atomic.fmax
    104, // llvm.amdgcn.struct.buffer.atomic.fmin
    104, // llvm.amdgcn.struct.buffer.atomic.inc
    104, // llvm.amdgcn.struct.buffer.atomic.or
    104, // llvm.amdgcn.struct.buffer.atomic.smax
    104, // llvm.amdgcn.struct.buffer.atomic.smin
    104, // llvm.amdgcn.struct.buffer.atomic.sub
    104, // llvm.amdgcn.struct.buffer.atomic.swap
    104, // llvm.amdgcn.struct.buffer.atomic.umax
    104, // llvm.amdgcn.struct.buffer.atomic.umin
    104, // llvm.amdgcn.struct.buffer.atomic.xor
    188, // llvm.amdgcn.struct.buffer.load
    188, // llvm.amdgcn.struct.buffer.load.format
    189, // llvm.amdgcn.struct.buffer.load.lds
    190, // llvm.amdgcn.struct.buffer.store
    190, // llvm.amdgcn.struct.buffer.store.format
    191, // llvm.amdgcn.struct.ptr.buffer.atomic.add
    191, // llvm.amdgcn.struct.ptr.buffer.atomic.and
    192, // llvm.amdgcn.struct.ptr.buffer.atomic.cmpswap
    191, // llvm.amdgcn.struct.ptr.buffer.atomic.dec
    191, // llvm.amdgcn.struct.ptr.buffer.atomic.fadd
    191, // llvm.amdgcn.struct.ptr.buffer.atomic.fmax
    191, // llvm.amdgcn.struct.ptr.buffer.atomic.fmin
    191, // llvm.amdgcn.struct.ptr.buffer.atomic.inc
    191, // llvm.amdgcn.struct.ptr.buffer.atomic.or
    191, // llvm.amdgcn.struct.ptr.buffer.atomic.smax
    191, // llvm.amdgcn.struct.ptr.buffer.atomic.smin
    191, // llvm.amdgcn.struct.ptr.buffer.atomic.sub
    191, // llvm.amdgcn.struct.ptr.buffer.atomic.swap
    191, // llvm.amdgcn.struct.ptr.buffer.atomic.umax
    191, // llvm.amdgcn.struct.ptr.buffer.atomic.umin
    191, // llvm.amdgcn.struct.ptr.buffer.atomic.xor
    193, // llvm.amdgcn.struct.ptr.buffer.load
    193, // llvm.amdgcn.struct.ptr.buffer.load.format
    194, // llvm.amdgcn.struct.ptr.buffer.load.lds
    195, // llvm.amdgcn.struct.ptr.buffer.store
    195, // llvm.amdgcn.struct.ptr.buffer.store.format
    196, // llvm.amdgcn.struct.ptr.tbuffer.load
    197, // llvm.amdgcn.struct.ptr.tbuffer.store
    198, // llvm.amdgcn.struct.tbuffer.load
    199, // llvm.amdgcn.struct.tbuffer.store
    200, // llvm.amdgcn.sudot4
    200, // llvm.amdgcn.sudot8
    201, // llvm.amdgcn.tbuffer.load
    202, // llvm.amdgcn.tbuffer.store
    5, // llvm.amdgcn.trig.preop
    5, // llvm.amdgcn.ubfe
    124, // llvm.amdgcn.udot2
    124, // llvm.amdgcn.udot4
    124, // llvm.amdgcn.udot8
    203, // llvm.amdgcn.unreachable
    204, // llvm.amdgcn.update.dpp
    179, // llvm.amdgcn.wave.barrier
    117, // llvm.amdgcn.wave.reduce.umax
    117, // llvm.amdgcn.wave.reduce.umin
    5, // llvm.amdgcn.wavefrontsize
    205, // llvm.amdgcn.wmma.bf16.16x16x16.bf16
    205, // llvm.amdgcn.wmma.f16.16x16x16.f16
    102, // llvm.amdgcn.wmma.f32.16x16x16.bf16
    102, // llvm.amdgcn.wmma.f32.16x16x16.f16
    206, // llvm.amdgcn.wmma.i32.16x16x16.iu4
    206, // llvm.amdgcn.wmma.i32.16x16x16.iu8
    5, // llvm.amdgcn.workgroup.id.x
    5, // llvm.amdgcn.workgroup.id.y
    5, // llvm.amdgcn.workgroup.id.z
    5, // llvm.amdgcn.workitem.id.x
    5, // llvm.amdgcn.workitem.id.y
    5, // llvm.amdgcn.workitem.id.z
    167, // llvm.amdgcn.wqm
    207, // llvm.amdgcn.wqm.demote
    102, // llvm.amdgcn.wqm.vote
    102, // llvm.amdgcn.writelane
    186, // llvm.amdgcn.wwm
    208, // llvm.arm.cde.cx1
    209, // llvm.arm.cde.cx1a
    208, // llvm.arm.cde.cx1d
    210, // llvm.arm.cde.cx1da
    209, // llvm.arm.cde.cx2
    210, // llvm.arm.cde.cx2a
    209, // llvm.arm.cde.cx2d
    211, // llvm.arm.cde.cx2da
    210, // llvm.arm.cde.cx3
    211, // llvm.arm.cde.cx3a
    210, // llvm.arm.cde.cx3d
    212, // llvm.arm.cde.cx3da
    208, // llvm.arm.cde.vcx1
    209, // llvm.arm.cde.vcx1a
    208, // llvm.arm.cde.vcx1q
    209, // llvm.arm.cde.vcx1q.predicated
    209, // llvm.arm.cde.vcx1qa
    209, // llvm.arm.cde.vcx1qa.predicated
    209, // llvm.arm.cde.vcx2
    210, // llvm.arm.cde.vcx2a
    209, // llvm.arm.cde.vcx2q
    210, // llvm.arm.cde.vcx2q.predicated
    210, // llvm.arm.cde.vcx2qa
    210, // llvm.arm.cde.vcx2qa.predicated
    210, // llvm.arm.cde.vcx3
    211, // llvm.arm.cde.vcx3a
    210, // llvm.arm.cde.vcx3q
    211, // llvm.arm.cde.vcx3q.predicated
    211, // llvm.arm.cde.vcx3qa
    211, // llvm.arm.cde.vcx3qa.predicated
    213, // llvm.arm.cdp
    213, // llvm.arm.cdp2
    10, // llvm.arm.clrex
    2, // llvm.arm.cls
    2, // llvm.arm.cls64
    12, // llvm.arm.cmse.tt
    12, // llvm.arm.cmse.tta
    12, // llvm.arm.cmse.ttat
    12, // llvm.arm.cmse.ttt
    2, // llvm.arm.crc32b
    2, // llvm.arm.crc32cb
    2, // llvm.arm.crc32ch
    2, // llvm.arm.crc32cw
    2, // llvm.arm.crc32h
    2, // llvm.arm.crc32w
    10, // llvm.arm.dbg
    10, // llvm.arm.dmb
    10, // llvm.arm.dsb
    8, // llvm.arm.get.fpscr
    10, // llvm.arm.gnu.eabi.mcount
    10, // llvm.arm.hint
    10, // llvm.arm.isb
    10, // llvm.arm.ldaex
    10, // llvm.arm.ldaexd
    214, // llvm.arm.ldc
    214, // llvm.arm.ldc2
    214, // llvm.arm.ldc2l
    214, // llvm.arm.ldcl
    10, // llvm.arm.ldrex
    10, // llvm.arm.ldrexd
    215, // llvm.arm.mcr
    215, // llvm.arm.mcr2
    216, // llvm.arm.mcrr
    216, // llvm.arm.mcrr2
    217, // llvm.arm.mrc
    217, // llvm.arm.mrc2
    218, // llvm.arm.mrrc
    218, // llvm.arm.mrrc2
    2, // llvm.arm.mve.abd.predicated
    2, // llvm.arm.mve.abs.predicated
    2, // llvm.arm.mve.add.predicated
    2, // llvm.arm.mve.addlv
    2, // llvm.arm.mve.addlv.predicated
    2, // llvm.arm.mve.addv
    2, // llvm.arm.mve.addv.predicated
    2, // llvm.arm.mve.and.predicated
    2, // llvm.arm.mve.asrl
    2, // llvm.arm.mve.bic.predicated
    2, // llvm.arm.mve.cls.predicated
    2, // llvm.arm.mve.clz.predicated
    2, // llvm.arm.mve.eor.predicated
    2, // llvm.arm.mve.fma.predicated
    2, // llvm.arm.mve.hadd.predicated
    2, // llvm.arm.mve.hsub.predicated
    2, // llvm.arm.mve.lsll
    2, // llvm.arm.mve.max.predicated
    2, // llvm.arm.mve.maxav
    2, // llvm.arm.mve.maxav.predicated
    2, // llvm.arm.mve.maxnmav
    2, // llvm.arm.mve.maxnmav.predicated
    2, // llvm.arm.mve.maxnmv
    2, // llvm.arm.mve.maxnmv.predicated
    2, // llvm.arm.mve.maxv
    2, // llvm.arm.mve.maxv.predicated
    2, // llvm.arm.mve.min.predicated
    2, // llvm.arm.mve.minav
    2, // llvm.arm.mve.minav.predicated
    2, // llvm.arm.mve.minnmav
    2, // llvm.arm.mve.minnmav.predicated
    2, // llvm.arm.mve.minnmv
    2, // llvm.arm.mve.minnmv.predicated
    2, // llvm.arm.mve.minv
    2, // llvm.arm.mve.minv.predicated
    2, // llvm.arm.mve.mul.predicated
    2, // llvm.arm.mve.mulh.predicated
    2, // llvm.arm.mve.mull.int.predicated
    2, // llvm.arm.mve.mull.poly.predicated
    2, // llvm.arm.mve.mvn.predicated
    2, // llvm.arm.mve.neg.predicated
    2, // llvm.arm.mve.orn.predicated
    2, // llvm.arm.mve.orr.predicated
    2, // llvm.arm.mve.pred.i2v
    2, // llvm.arm.mve.pred.v2i
    2, // llvm.arm.mve.qabs.predicated
    2, // llvm.arm.mve.qadd.predicated
    2, // llvm.arm.mve.qdmulh.predicated
    2, // llvm.arm.mve.qneg.predicated
    2, // llvm.arm.mve.qrdmulh.predicated
    2, // llvm.arm.mve.qsub.predicated
    2, // llvm.arm.mve.rhadd.predicated
    2, // llvm.arm.mve.rmulh.predicated
    2, // llvm.arm.mve.shl.imm.predicated
    2, // llvm.arm.mve.shr.imm.predicated
    2, // llvm.arm.mve.sqrshr
    2, // llvm.arm.mve.sqrshrl
    2, // llvm.arm.mve.sqshl
    2, // llvm.arm.mve.sqshll
    2, // llvm.arm.mve.srshr
    2, // llvm.arm.mve.srshrl
    2, // llvm.arm.mve.sub.predicated
    2, // llvm.arm.mve.uqrshl
    2, // llvm.arm.mve.uqrshll
    2, // llvm.arm.mve.uqshl
    2, // llvm.arm.mve.uqshll
    2, // llvm.arm.mve.urshr
    2, // llvm.arm.mve.urshrl
    2, // llvm.arm.mve.vabav
    2, // llvm.arm.mve.vabav.predicated
    2, // llvm.arm.mve.vabd
    2, // llvm.arm.mve.vadc
    2, // llvm.arm.mve.vadc.predicated
    2, // llvm.arm.mve.vbrsr
    2, // llvm.arm.mve.vbrsr.predicated
    2, // llvm.arm.mve.vcaddq
    2, // llvm.arm.mve.vcaddq.predicated
    2, // llvm.arm.mve.vcls
    2, // llvm.arm.mve.vcmlaq
    2, // llvm.arm.mve.vcmlaq.predicated
    2, // llvm.arm.mve.vcmulq
    2, // llvm.arm.mve.vcmulq.predicated
    2, // llvm.arm.mve.vctp16
    2, // llvm.arm.mve.vctp32
    2, // llvm.arm.mve.vctp64
    2, // llvm.arm.mve.vctp8
    2, // llvm.arm.mve.vcvt.fix
    2, // llvm.arm.mve.vcvt.fix.predicated
    2, // llvm.arm.mve.vcvt.fp.int.predicated
    2, // llvm.arm.mve.vcvt.narrow
    2, // llvm.arm.mve.vcvt.narrow.predicated
    2, // llvm.arm.mve.vcvt.widen
    2, // llvm.arm.mve.vcvt.widen.predicated
    2, // llvm.arm.mve.vcvta
    2, // llvm.arm.mve.vcvta.predicated
    2, // llvm.arm.mve.vcvtm
    2, // llvm.arm.mve.vcvtm.predicated
    2, // llvm.arm.mve.vcvtn
    2, // llvm.arm.mve.vcvtn.predicated
    2, // llvm.arm.mve.vcvtp
    2, // llvm.arm.mve.vcvtp.predicated
    2, // llvm.arm.mve.vddup
    2, // llvm.arm.mve.vddup.predicated
    2, // llvm.arm.mve.vdwdup
    2, // llvm.arm.mve.vdwdup.predicated
    2, // llvm.arm.mve.vhadd
    2, // llvm.arm.mve.vhsub
    2, // llvm.arm.mve.vidup
    2, // llvm.arm.mve.vidup.predicated
    2, // llvm.arm.mve.viwdup
    2, // llvm.arm.mve.viwdup.predicated
    3, // llvm.arm.mve.vld2q
    3, // llvm.arm.mve.vld4q
    62, // llvm.arm.mve.vldr.gather.base
    62, // llvm.arm.mve.vldr.gather.base.predicated
    62, // llvm.arm.mve.vldr.gather.base.wb
    62, // llvm.arm.mve.vldr.gather.base.wb.predicated
    62, // llvm.arm.mve.vldr.gather.offset
    62, // llvm.arm.mve.vldr.gather.offset.predicated
    2, // llvm.arm.mve.vmaxa.predicated
    2, // llvm.arm.mve.vmaxnma.predicated
    2, // llvm.arm.mve.vmina.predicated
    2, // llvm.arm.mve.vminnma.predicated
    2, // llvm.arm.mve.vmla.n.predicated
    2, // llvm.arm.mve.vmlas.n.predicated
    2, // llvm.arm.mve.vmldava
    2, // llvm.arm.mve.vmldava.predicated
    2, // llvm.arm.mve.vmlldava
    2, // llvm.arm.mve.vmlldava.predicated
    2, // llvm.arm.mve.vmovl.predicated
    2, // llvm.arm.mve.vmovn.predicated
    2, // llvm.arm.mve.vmulh
    2, // llvm.arm.mve.vmull
    2, // llvm.arm.mve.vmull.poly
    2, // llvm.arm.mve.vqdmlad
    2, // llvm.arm.mve.vqdmlad.predicated
    2, // llvm.arm.mve.vqdmlah
    2, // llvm.arm.mve.vqdmlah.predicated
    2, // llvm.arm.mve.vqdmlash
    2, // llvm.arm.mve.vqdmlash.predicated
    2, // llvm.arm.mve.vqdmulh
    2, // llvm.arm.mve.vqdmull
    2, // llvm.arm.mve.vqdmull.predicated
    2, // llvm.arm.mve.vqmovn
    2, // llvm.arm.mve.vqmovn.predicated
    2, // llvm.arm.mve.vqrdmlah
    2, // llvm.arm.mve.vqrdmlah.predicated
    2, // llvm.arm.mve.vqrdmlash
    2, // llvm.arm.mve.vqrdmlash.predicated
    2, // llvm.arm.mve.vqrdmulh
    2, // llvm.arm.mve.vqshl.imm
    2, // llvm.arm.mve.vqshl.imm.predicated
    2, // llvm.arm.mve.vqshlu.imm
    2, // llvm.arm.mve.vqshlu.imm.predicated
    2, // llvm.arm.mve.vreinterpretq
    2, // llvm.arm.mve.vrev.predicated
    2, // llvm.arm.mve.vrhadd
    2, // llvm.arm.mve.vrinta.predicated
    2, // llvm.arm.mve.vrintm.predicated
    2, // llvm.arm.mve.vrintn
    2, // llvm.arm.mve.vrintn.predicated
    2, // llvm.arm.mve.vrintp.predicated
    2, // llvm.arm.mve.vrintx.predicated
    2, // llvm.arm.mve.vrintz.predicated
    2, // llvm.arm.mve.vrmlldavha
    2, // llvm.arm.mve.vrmlldavha.predicated
    2, // llvm.arm.mve.vrmulh
    2, // llvm.arm.mve.vrshr.imm
    2, // llvm.arm.mve.vrshr.imm.predicated
    2, // llvm.arm.mve.vsbc
    2, // llvm.arm.mve.vsbc.predicated
    2, // llvm.arm.mve.vshl.scalar
    2, // llvm.arm.mve.vshl.scalar.predicated
    2, // llvm.arm.mve.vshl.vector
    2, // llvm.arm.mve.vshl.vector.predicated
    2, // llvm.arm.mve.vshlc
    2, // llvm.arm.mve.vshlc.predicated
    2, // llvm.arm.mve.vshll.imm
    2, // llvm.arm.mve.vshll.imm.predicated
    2, // llvm.arm.mve.vshrn
    2, // llvm.arm.mve.vshrn.predicated
    2, // llvm.arm.mve.vsli
    2, // llvm.arm.mve.vsli.predicated
    2, // llvm.arm.mve.vsri
    2, // llvm.arm.mve.vsri.predicated
    98, // llvm.arm.mve.vst2q
    98, // llvm.arm.mve.vst4q
    88, // llvm.arm.mve.vstr.scatter.base
    88, // llvm.arm.mve.vstr.scatter.base.predicated
    88, // llvm.arm.mve.vstr.scatter.base.wb
    88, // llvm.arm.mve.vstr.scatter.base.wb.predicated
    88, // llvm.arm.mve.vstr.scatter.offset
    88, // llvm.arm.mve.vstr.scatter.offset.predicated
    2, // llvm.arm.neon.aesd
    2, // llvm.arm.neon.aese
    2, // llvm.arm.neon.aesimc
    2, // llvm.arm.neon.aesmc
    2, // llvm.arm.neon.bfdot
    2, // llvm.arm.neon.bfmlalb
    2, // llvm.arm.neon.bfmlalt
    2, // llvm.arm.neon.bfmmla
    2, // llvm.arm.neon.sdot
    2, // llvm.arm.neon.sha1c
    2, // llvm.arm.neon.sha1h
    2, // llvm.arm.neon.sha1m
    2, // llvm.arm.neon.sha1p
    2, // llvm.arm.neon.sha1su0
    2, // llvm.arm.neon.sha1su1
    2, // llvm.arm.neon.sha256h
    2, // llvm.arm.neon.sha256h2
    2, // llvm.arm.neon.sha256su0
    2, // llvm.arm.neon.sha256su1
    2, // llvm.arm.neon.smmla
    2, // llvm.arm.neon.udot
    2, // llvm.arm.neon.ummla
    2, // llvm.arm.neon.usdot
    2, // llvm.arm.neon.usmmla
    2, // llvm.arm.neon.vabds
    2, // llvm.arm.neon.vabdu
    2, // llvm.arm.neon.vabs
    2, // llvm.arm.neon.vacge
    2, // llvm.arm.neon.vacgt
    2, // llvm.arm.neon.vbsl
    2, // llvm.arm.neon.vcadd.rot270
    2, // llvm.arm.neon.vcadd.rot90
    2, // llvm.arm.neon.vcls
    2, // llvm.arm.neon.vcvtas
    2, // llvm.arm.neon.vcvtau
    2, // llvm.arm.neon.vcvtbfp2bf
    2, // llvm.arm.neon.vcvtfp2bf
    2, // llvm.arm.neon.vcvtfp2fxs
    2, // llvm.arm.neon.vcvtfp2fxu
    2, // llvm.arm.neon.vcvtfp2hf
    2, // llvm.arm.neon.vcvtfxs2fp
    2, // llvm.arm.neon.vcvtfxu2fp
    2, // llvm.arm.neon.vcvthf2fp
    2, // llvm.arm.neon.vcvtms
    2, // llvm.arm.neon.vcvtmu
    2, // llvm.arm.neon.vcvtns
    2, // llvm.arm.neon.vcvtnu
    2, // llvm.arm.neon.vcvtps
    2, // llvm.arm.neon.vcvtpu
    2, // llvm.arm.neon.vhadds
    2, // llvm.arm.neon.vhaddu
    2, // llvm.arm.neon.vhsubs
    2, // llvm.arm.neon.vhsubu
    3, // llvm.arm.neon.vld1
    3, // llvm.arm.neon.vld1x2
    3, // llvm.arm.neon.vld1x3
    3, // llvm.arm.neon.vld1x4
    3, // llvm.arm.neon.vld2
    3, // llvm.arm.neon.vld2dup
    3, // llvm.arm.neon.vld2lane
    3, // llvm.arm.neon.vld3
    3, // llvm.arm.neon.vld3dup
    3, // llvm.arm.neon.vld3lane
    3, // llvm.arm.neon.vld4
    3, // llvm.arm.neon.vld4dup
    3, // llvm.arm.neon.vld4lane
    2, // llvm.arm.neon.vmaxnm
    2, // llvm.arm.neon.vmaxs
    2, // llvm.arm.neon.vmaxu
    2, // llvm.arm.neon.vminnm
    2, // llvm.arm.neon.vmins
    2, // llvm.arm.neon.vminu
    2, // llvm.arm.neon.vmullp
    2, // llvm.arm.neon.vmulls
    2, // llvm.arm.neon.vmullu
    2, // llvm.arm.neon.vmulp
    2, // llvm.arm.neon.vpadals
    2, // llvm.arm.neon.vpadalu
    2, // llvm.arm.neon.vpadd
    2, // llvm.arm.neon.vpaddls
    2, // llvm.arm.neon.vpaddlu
    2, // llvm.arm.neon.vpmaxs
    2, // llvm.arm.neon.vpmaxu
    2, // llvm.arm.neon.vpmins
    2, // llvm.arm.neon.vpminu
    2, // llvm.arm.neon.vqabs
    2, // llvm.arm.neon.vqdmulh
    2, // llvm.arm.neon.vqdmull
    2, // llvm.arm.neon.vqmovns
    2, // llvm.arm.neon.vqmovnsu
    2, // llvm.arm.neon.vqmovnu
    2, // llvm.arm.neon.vqneg
    2, // llvm.arm.neon.vqrdmlah
    2, // llvm.arm.neon.vqrdmlsh
    2, // llvm.arm.neon.vqrdmulh
    2, // llvm.arm.neon.vqrshiftns
    2, // llvm.arm.neon.vqrshiftnsu
    2, // llvm.arm.neon.vqrshiftnu
    2, // llvm.arm.neon.vqrshifts
    2, // llvm.arm.neon.vqrshiftu
    2, // llvm.arm.neon.vqshiftns
    2, // llvm.arm.neon.vqshiftnsu
    2, // llvm.arm.neon.vqshiftnu
    2, // llvm.arm.neon.vqshifts
    2, // llvm.arm.neon.vqshiftsu
    2, // llvm.arm.neon.vqshiftu
    2, // llvm.arm.neon.vraddhn
    2, // llvm.arm.neon.vrecpe
    2, // llvm.arm.neon.vrecps
    2, // llvm.arm.neon.vrhadds
    2, // llvm.arm.neon.vrhaddu
    2, // llvm.arm.neon.vrinta
    2, // llvm.arm.neon.vrintm
    2, // llvm.arm.neon.vrintn
    2, // llvm.arm.neon.vrintp
    2, // llvm.arm.neon.vrintx
    2, // llvm.arm.neon.vrintz
    2, // llvm.arm.neon.vrshiftn
    2, // llvm.arm.neon.vrshifts
    2, // llvm.arm.neon.vrshiftu
    2, // llvm.arm.neon.vrsqrte
    2, // llvm.arm.neon.vrsqrts
    2, // llvm.arm.neon.vrsubhn
    2, // llvm.arm.neon.vshiftins
    2, // llvm.arm.neon.vshifts
    2, // llvm.arm.neon.vshiftu
    219, // llvm.arm.neon.vst1
    113, // llvm.arm.neon.vst1x2
    113, // llvm.arm.neon.vst1x3
    113, // llvm.arm.neon.vst1x4
    219, // llvm.arm.neon.vst2
    219, // llvm.arm.neon.vst2lane
    219, // llvm.arm.neon.vst3
    219, // llvm.arm.neon.vst3lane
    219, // llvm.arm.neon.vst4
    219, // llvm.arm.neon.vst4lane
    2, // llvm.arm.neon.vtbl1
    2, // llvm.arm.neon.vtbl2
    2, // llvm.arm.neon.vtbl3
    2, // llvm.arm.neon.vtbl4
    2, // llvm.arm.neon.vtbx1
    2, // llvm.arm.neon.vtbx2
    2, // llvm.arm.neon.vtbx3
    2, // llvm.arm.neon.vtbx4
    2, // llvm.arm.qadd
    2, // llvm.arm.qadd16
    2, // llvm.arm.qadd8
    2, // llvm.arm.qasx
    2, // llvm.arm.qsax
    2, // llvm.arm.qsub
    2, // llvm.arm.qsub16
    2, // llvm.arm.qsub8
    8, // llvm.arm.sadd16
    8, // llvm.arm.sadd8
    8, // llvm.arm.sasx
    62, // llvm.arm.sel
    8, // llvm.arm.set.fpscr
    2, // llvm.arm.shadd16
    2, // llvm.arm.shadd8
    2, // llvm.arm.shasx
    2, // llvm.arm.shsax
    2, // llvm.arm.shsub16
    2, // llvm.arm.shsub8
    2, // llvm.arm.smlabb
    2, // llvm.arm.smlabt
    2, // llvm.arm.smlad
    2, // llvm.arm.smladx
    2, // llvm.arm.smlald
    2, // llvm.arm.smlaldx
    2, // llvm.arm.smlatb
    2, // llvm.arm.smlatt
    2, // llvm.arm.smlawb
    2, // llvm.arm.smlawt
    2, // llvm.arm.smlsd
    2, // llvm.arm.smlsdx
    2, // llvm.arm.smlsld
    2, // llvm.arm.smlsldx
    2, // llvm.arm.smuad
    2, // llvm.arm.smuadx
    2, // llvm.arm.smulbb
    2, // llvm.arm.smulbt
    2, // llvm.arm.smultb
    2, // llvm.arm.smultt
    2, // llvm.arm.smulwb
    2, // llvm.arm.smulwt
    2, // llvm.arm.smusd
    2, // llvm.arm.smusdx
    220, // llvm.arm.space
    2, // llvm.arm.ssat
    2, // llvm.arm.ssat16
    8, // llvm.arm.ssax
    8, // llvm.arm.ssub16
    8, // llvm.arm.ssub8
    214, // llvm.arm.stc
    214, // llvm.arm.stc2
    214, // llvm.arm.stc2l
    214, // llvm.arm.stcl
    10, // llvm.arm.stlex
    10, // llvm.arm.stlexd
    10, // llvm.arm.strex
    10, // llvm.arm.strexd
    2, // llvm.arm.sxtab16
    2, // llvm.arm.sxtb16
    8, // llvm.arm.uadd16
    8, // llvm.arm.uadd8
    8, // llvm.arm.uasx
    2, // llvm.arm.uhadd16
    2, // llvm.arm.uhadd8
    2, // llvm.arm.uhasx
    2, // llvm.arm.uhsax
    2, // llvm.arm.uhsub16
    2, // llvm.arm.uhsub8
    10, // llvm.arm.undefined
    2, // llvm.arm.uqadd16
    2, // llvm.arm.uqadd8
    2, // llvm.arm.uqasx
    2, // llvm.arm.uqsax
    2, // llvm.arm.uqsub16
    2, // llvm.arm.uqsub8
    2, // llvm.arm.usad8
    2, // llvm.arm.usada8
    2, // llvm.arm.usat
    2, // llvm.arm.usat16
    8, // llvm.arm.usax
    8, // llvm.arm.usub16
    8, // llvm.arm.usub8
    2, // llvm.arm.uxtab16
    2, // llvm.arm.uxtb16
    2, // llvm.arm.vcvtr
    2, // llvm.arm.vcvtru
    12, // llvm.bpf.btf.type.id
    12, // llvm.bpf.compare
    221, // llvm.bpf.load.byte
    221, // llvm.bpf.load.half
    221, // llvm.bpf.load.word
    12, // llvm.bpf.passthrough
    12, // llvm.bpf.preserve.enum.value
    60, // llvm.bpf.preserve.field.info
    12, // llvm.bpf.preserve.type.info
    10, // llvm.bpf.pseudo
    100, // llvm.dx.create.handle
    222, // llvm.dx.flattened.thread.id.in.group
    222, // llvm.dx.group.id
    222, // llvm.dx.thread.id
    222, // llvm.dx.thread.id.in.group
    2, // llvm.hexagon.A2.abs
    2, // llvm.hexagon.A2.absp
    2, // llvm.hexagon.A2.abssat
    2, // llvm.hexagon.A2.add
    2, // llvm.hexagon.A2.addh.h16.hh
    2, // llvm.hexagon.A2.addh.h16.hl
    2, // llvm.hexagon.A2.addh.h16.lh
    2, // llvm.hexagon.A2.addh.h16.ll
    2, // llvm.hexagon.A2.addh.h16.sat.hh
    2, // llvm.hexagon.A2.addh.h16.sat.hl
    2, // llvm.hexagon.A2.addh.h16.sat.lh
    2, // llvm.hexagon.A2.addh.h16.sat.ll
    2, // llvm.hexagon.A2.addh.l16.hl
    2, // llvm.hexagon.A2.addh.l16.ll
    2, // llvm.hexagon.A2.addh.l16.sat.hl
    2, // llvm.hexagon.A2.addh.l16.sat.ll
    59, // llvm.hexagon.A2.addi
    2, // llvm.hexagon.A2.addp
    2, // llvm.hexagon.A2.addpsat
    2, // llvm.hexagon.A2.addsat
    2, // llvm.hexagon.A2.addsp
    2, // llvm.hexagon.A2.and
    59, // llvm.hexagon.A2.andir
    2, // llvm.hexagon.A2.andp
    2, // llvm.hexagon.A2.aslh
    2, // llvm.hexagon.A2.asrh
    2, // llvm.hexagon.A2.combine.hh
    2, // llvm.hexagon.A2.combine.hl
    2, // llvm.hexagon.A2.combine.lh
    2, // llvm.hexagon.A2.combine.ll
    208, // llvm.hexagon.A2.combineii
    2, // llvm.hexagon.A2.combinew
    2, // llvm.hexagon.A2.max
    2, // llvm.hexagon.A2.maxp
    2, // llvm.hexagon.A2.maxu
    2, // llvm.hexagon.A2.maxup
    2, // llvm.hexagon.A2.min
    2, // llvm.hexagon.A2.minp
    2, // llvm.hexagon.A2.minu
    2, // llvm.hexagon.A2.minup
    2, // llvm.hexagon.A2.neg
    2, // llvm.hexagon.A2.negp
    2, // llvm.hexagon.A2.negsat
    2, // llvm.hexagon.A2.not
    2, // llvm.hexagon.A2.notp
    2, // llvm.hexagon.A2.or
    59, // llvm.hexagon.A2.orir
    2, // llvm.hexagon.A2.orp
    2, // llvm.hexagon.A2.roundsat
    2, // llvm.hexagon.A2.sat
    2, // llvm.hexagon.A2.satb
    2, // llvm.hexagon.A2.sath
    2, // llvm.hexagon.A2.satub
    2, // llvm.hexagon.A2.satuh
    2, // llvm.hexagon.A2.sub
    2, // llvm.hexagon.A2.subh.h16.hh
    2, // llvm.hexagon.A2.subh.h16.hl
    2, // llvm.hexagon.A2.subh.h16.lh
    2, // llvm.hexagon.A2.subh.h16.ll
    2, // llvm.hexagon.A2.subh.h16.sat.hh
    2, // llvm.hexagon.A2.subh.h16.sat.hl
    2, // llvm.hexagon.A2.subh.h16.sat.lh
    2, // llvm.hexagon.A2.subh.h16.sat.ll
    2, // llvm.hexagon.A2.subh.l16.hl
    2, // llvm.hexagon.A2.subh.l16.ll
    2, // llvm.hexagon.A2.subh.l16.sat.hl
    2, // llvm.hexagon.A2.subh.l16.sat.ll
    2, // llvm.hexagon.A2.subp
    32, // llvm.hexagon.A2.subri
    2, // llvm.hexagon.A2.subsat
    2, // llvm.hexagon.A2.svaddh
    2, // llvm.hexagon.A2.svaddhs
    2, // llvm.hexagon.A2.svadduhs
    2, // llvm.hexagon.A2.svavgh
    2, // llvm.hexagon.A2.svavghs
    2, // llvm.hexagon.A2.svnavgh
    2, // llvm.hexagon.A2.svsubh
    2, // llvm.hexagon.A2.svsubhs
    2, // llvm.hexagon.A2.svsubuhs
    2, // llvm.hexagon.A2.swiz
    2, // llvm.hexagon.A2.sxtb
    2, // llvm.hexagon.A2.sxth
    2, // llvm.hexagon.A2.sxtw
    2, // llvm.hexagon.A2.tfr
    59, // llvm.hexagon.A2.tfrih
    59, // llvm.hexagon.A2.tfril
    2, // llvm.hexagon.A2.tfrp
    32, // llvm.hexagon.A2.tfrpi
    32, // llvm.hexagon.A2.tfrsi
    2, // llvm.hexagon.A2.vabsh
    2, // llvm.hexagon.A2.vabshsat
    2, // llvm.hexagon.A2.vabsw
    2, // llvm.hexagon.A2.vabswsat
    2, // llvm.hexagon.A2.vaddb.map
    2, // llvm.hexagon.A2.vaddh
    2, // llvm.hexagon.A2.vaddhs
    2, // llvm.hexagon.A2.vaddub
    2, // llvm.hexagon.A2.vaddubs
    2, // llvm.hexagon.A2.vadduhs
    2, // llvm.hexagon.A2.vaddw
    2, // llvm.hexagon.A2.vaddws
    2, // llvm.hexagon.A2.vavgh
    2, // llvm.hexagon.A2.vavghcr
    2, // llvm.hexagon.A2.vavghr
    2, // llvm.hexagon.A2.vavgub
    2, // llvm.hexagon.A2.vavgubr
    2, // llvm.hexagon.A2.vavguh
    2, // llvm.hexagon.A2.vavguhr
    2, // llvm.hexagon.A2.vavguw
    2, // llvm.hexagon.A2.vavguwr
    2, // llvm.hexagon.A2.vavgw
    2, // llvm.hexagon.A2.vavgwcr
    2, // llvm.hexagon.A2.vavgwr
    2, // llvm.hexagon.A2.vcmpbeq
    2, // llvm.hexagon.A2.vcmpbgtu
    2, // llvm.hexagon.A2.vcmpheq
    2, // llvm.hexagon.A2.vcmphgt
    2, // llvm.hexagon.A2.vcmphgtu
    2, // llvm.hexagon.A2.vcmpweq
    2, // llvm.hexagon.A2.vcmpwgt
    2, // llvm.hexagon.A2.vcmpwgtu
    2, // llvm.hexagon.A2.vconj
    2, // llvm.hexagon.A2.vmaxb
    2, // llvm.hexagon.A2.vmaxh
    2, // llvm.hexagon.A2.vmaxub
    2, // llvm.hexagon.A2.vmaxuh
    2, // llvm.hexagon.A2.vmaxuw
    2, // llvm.hexagon.A2.vmaxw
    2, // llvm.hexagon.A2.vminb
    2, // llvm.hexagon.A2.vminh
    2, // llvm.hexagon.A2.vminub
    2, // llvm.hexagon.A2.vminuh
    2, // llvm.hexagon.A2.vminuw
    2, // llvm.hexagon.A2.vminw
    2, // llvm.hexagon.A2.vnavgh
    2, // llvm.hexagon.A2.vnavghcr
    2, // llvm.hexagon.A2.vnavghr
    2, // llvm.hexagon.A2.vnavgw
    2, // llvm.hexagon.A2.vnavgwcr
    2, // llvm.hexagon.A2.vnavgwr
    2, // llvm.hexagon.A2.vraddub
    2, // llvm.hexagon.A2.vraddub.acc
    2, // llvm.hexagon.A2.vrsadub
    2, // llvm.hexagon.A2.vrsadub.acc
    2, // llvm.hexagon.A2.vsubb.map
    2, // llvm.hexagon.A2.vsubh
    2, // llvm.hexagon.A2.vsubhs
    2, // llvm.hexagon.A2.vsubub
    2, // llvm.hexagon.A2.vsububs
    2, // llvm.hexagon.A2.vsubuhs
    2, // llvm.hexagon.A2.vsubw
    2, // llvm.hexagon.A2.vsubws
    2, // llvm.hexagon.A2.xor
    2, // llvm.hexagon.A2.xorp
    2, // llvm.hexagon.A2.zxtb
    2, // llvm.hexagon.A2.zxth
    2, // llvm.hexagon.A4.andn
    2, // llvm.hexagon.A4.andnp
    2, // llvm.hexagon.A4.bitsplit
    59, // llvm.hexagon.A4.bitspliti
    2, // llvm.hexagon.A4.boundscheck
    2, // llvm.hexagon.A4.cmpbeq
    59, // llvm.hexagon.A4.cmpbeqi
    2, // llvm.hexagon.A4.cmpbgt
    59, // llvm.hexagon.A4.cmpbgti
    2, // llvm.hexagon.A4.cmpbgtu
    59, // llvm.hexagon.A4.cmpbgtui
    2, // llvm.hexagon.A4.cmpheq
    59, // llvm.hexagon.A4.cmpheqi
    2, // llvm.hexagon.A4.cmphgt
    59, // llvm.hexagon.A4.cmphgti
    2, // llvm.hexagon.A4.cmphgtu
    59, // llvm.hexagon.A4.cmphgtui
    32, // llvm.hexagon.A4.combineir
    59, // llvm.hexagon.A4.combineri
    59, // llvm.hexagon.A4.cround.ri
    2, // llvm.hexagon.A4.cround.rr
    2, // llvm.hexagon.A4.modwrapu
    2, // llvm.hexagon.A4.orn
    2, // llvm.hexagon.A4.ornp
    2, // llvm.hexagon.A4.rcmpeq
    59, // llvm.hexagon.A4.rcmpeqi
    2, // llvm.hexagon.A4.rcmpneq
    59, // llvm.hexagon.A4.rcmpneqi
    59, // llvm.hexagon.A4.round.ri
    59, // llvm.hexagon.A4.round.ri.sat
    2, // llvm.hexagon.A4.round.rr
    2, // llvm.hexagon.A4.round.rr.sat
    2, // llvm.hexagon.A4.tlbmatch
    2, // llvm.hexagon.A4.vcmpbeq.any
    59, // llvm.hexagon.A4.vcmpbeqi
    2, // llvm.hexagon.A4.vcmpbgt
    59, // llvm.hexagon.A4.vcmpbgti
    59, // llvm.hexagon.A4.vcmpbgtui
    59, // llvm.hexagon.A4.vcmpheqi
    59, // llvm.hexagon.A4.vcmphgti
    59, // llvm.hexagon.A4.vcmphgtui
    59, // llvm.hexagon.A4.vcmpweqi
    59, // llvm.hexagon.A4.vcmpwgti
    59, // llvm.hexagon.A4.vcmpwgtui
    2, // llvm.hexagon.A4.vrmaxh
    2, // llvm.hexagon.A4.vrmaxuh
    2, // llvm.hexagon.A4.vrmaxuw
    2, // llvm.hexagon.A4.vrmaxw
    2, // llvm.hexagon.A4.vrminh
    2, // llvm.hexagon.A4.vrminuh
    2, // llvm.hexagon.A4.vrminuw
    2, // llvm.hexagon.A4.vrminw
    2, // llvm.hexagon.A5.vaddhubs
    2, // llvm.hexagon.A6.vcmpbeq.notany
    59, // llvm.hexagon.A7.clip
    59, // llvm.hexagon.A7.croundd.ri
    2, // llvm.hexagon.A7.croundd.rr
    59, // llvm.hexagon.A7.vclip
    2, // llvm.hexagon.C2.all8
    2, // llvm.hexagon.C2.and
    2, // llvm.hexagon.C2.andn
    2, // llvm.hexagon.C2.any8
    2, // llvm.hexagon.C2.bitsclr
    59, // llvm.hexagon.C2.bitsclri
    2, // llvm.hexagon.C2.bitsset
    2, // llvm.hexagon.C2.cmpeq
    59, // llvm.hexagon.C2.cmpeqi
    2, // llvm.hexagon.C2.cmpeqp
    59, // llvm.hexagon.C2.cmpgei
    59, // llvm.hexagon.C2.cmpgeui
    2, // llvm.hexagon.C2.cmpgt
    59, // llvm.hexagon.C2.cmpgti
    2, // llvm.hexagon.C2.cmpgtp
    2, // llvm.hexagon.C2.cmpgtu
    59, // llvm.hexagon.C2.cmpgtui
    2, // llvm.hexagon.C2.cmpgtup
    2, // llvm.hexagon.C2.cmplt
    2, // llvm.hexagon.C2.cmpltu
    2, // llvm.hexagon.C2.mask
    2, // llvm.hexagon.C2.mux
    26, // llvm.hexagon.C2.muxii
    22, // llvm.hexagon.C2.muxir
    59, // llvm.hexagon.C2.muxri
    2, // llvm.hexagon.C2.not
    2, // llvm.hexagon.C2.or
    2, // llvm.hexagon.C2.orn
    2, // llvm.hexagon.C2.pxfer.map
    2, // llvm.hexagon.C2.tfrpr
    2, // llvm.hexagon.C2.tfrrp
    2, // llvm.hexagon.C2.vitpack
    2, // llvm.hexagon.C2.vmux
    2, // llvm.hexagon.C2.xor
    2, // llvm.hexagon.C4.and.and
    2, // llvm.hexagon.C4.and.andn
    2, // llvm.hexagon.C4.and.or
    2, // llvm.hexagon.C4.and.orn
    2, // llvm.hexagon.C4.cmplte
    59, // llvm.hexagon.C4.cmpltei
    2, // llvm.hexagon.C4.cmplteu
    59, // llvm.hexagon.C4.cmplteui
    2, // llvm.hexagon.C4.cmpneq
    59, // llvm.hexagon.C4.cmpneqi
    2, // llvm.hexagon.C4.fastcorner9
    2, // llvm.hexagon.C4.fastcorner9.not
    2, // llvm.hexagon.C4.nbitsclr
    59, // llvm.hexagon.C4.nbitsclri
    2, // llvm.hexagon.C4.nbitsset
    2, // llvm.hexagon.C4.or.and
    2, // llvm.hexagon.C4.or.andn
    2, // llvm.hexagon.C4.or.or
    2, // llvm.hexagon.C4.or.orn
    2, // llvm.hexagon.F2.conv.d2df
    2, // llvm.hexagon.F2.conv.d2sf
    2, // llvm.hexagon.F2.conv.df2d
    2, // llvm.hexagon.F2.conv.df2d.chop
    2, // llvm.hexagon.F2.conv.df2sf
    2, // llvm.hexagon.F2.conv.df2ud
    2, // llvm.hexagon.F2.conv.df2ud.chop
    2, // llvm.hexagon.F2.conv.df2uw
    2, // llvm.hexagon.F2.conv.df2uw.chop
    2, // llvm.hexagon.F2.conv.df2w
    2, // llvm.hexagon.F2.conv.df2w.chop
    2, // llvm.hexagon.F2.conv.sf2d
    2, // llvm.hexagon.F2.conv.sf2d.chop
    2, // llvm.hexagon.F2.conv.sf2df
    2, // llvm.hexagon.F2.conv.sf2ud
    2, // llvm.hexagon.F2.conv.sf2ud.chop
    2, // llvm.hexagon.F2.conv.sf2uw
    2, // llvm.hexagon.F2.conv.sf2uw.chop
    2, // llvm.hexagon.F2.conv.sf2w
    2, // llvm.hexagon.F2.conv.sf2w.chop
    2, // llvm.hexagon.F2.conv.ud2df
    2, // llvm.hexagon.F2.conv.ud2sf
    2, // llvm.hexagon.F2.conv.uw2df
    2, // llvm.hexagon.F2.conv.uw2sf
    2, // llvm.hexagon.F2.conv.w2df
    2, // llvm.hexagon.F2.conv.w2sf
    223, // llvm.hexagon.F2.dfadd
    224, // llvm.hexagon.F2.dfclass
    223, // llvm.hexagon.F2.dfcmpeq
    223, // llvm.hexagon.F2.dfcmpge
    223, // llvm.hexagon.F2.dfcmpgt
    223, // llvm.hexagon.F2.dfcmpuo
    225, // llvm.hexagon.F2.dfimm.n
    225, // llvm.hexagon.F2.dfimm.p
    223, // llvm.hexagon.F2.dfmax
    223, // llvm.hexagon.F2.dfmin
    223, // llvm.hexagon.F2.dfmpyfix
    223, // llvm.hexagon.F2.dfmpyhh
    223, // llvm.hexagon.F2.dfmpylh
    223, // llvm.hexagon.F2.dfmpyll
    223, // llvm.hexagon.F2.dfsub
    223, // llvm.hexagon.F2.sfadd
    224, // llvm.hexagon.F2.sfclass
    223, // llvm.hexagon.F2.sfcmpeq
    223, // llvm.hexagon.F2.sfcmpge
    223, // llvm.hexagon.F2.sfcmpgt
    223, // llvm.hexagon.F2.sfcmpuo
    223, // llvm.hexagon.F2.sffixupd
    223, // llvm.hexagon.F2.sffixupn
    223, // llvm.hexagon.F2.sffixupr
    223, // llvm.hexagon.F2.sffma
    223, // llvm.hexagon.F2.sffma.lib
    223, // llvm.hexagon.F2.sffma.sc
    223, // llvm.hexagon.F2.sffms
    223, // llvm.hexagon.F2.sffms.lib
    225, // llvm.hexagon.F2.sfimm.n
    225, // llvm.hexagon.F2.sfimm.p
    223, // llvm.hexagon.F2.sfmax
    223, // llvm.hexagon.F2.sfmin
    223, // llvm.hexagon.F2.sfmpy
    223, // llvm.hexagon.F2.sfsub
    62, // llvm.hexagon.L2.loadrb.pbr
    79, // llvm.hexagon.L2.loadrb.pci
    78, // llvm.hexagon.L2.loadrb.pcr
    62, // llvm.hexagon.L2.loadrd.pbr
    79, // llvm.hexagon.L2.loadrd.pci
    78, // llvm.hexagon.L2.loadrd.pcr
    62, // llvm.hexagon.L2.loadrh.pbr
    79, // llvm.hexagon.L2.loadrh.pci
    78, // llvm.hexagon.L2.loadrh.pcr
    62, // llvm.hexagon.L2.loadri.pbr
    79, // llvm.hexagon.L2.loadri.pci
    78, // llvm.hexagon.L2.loadri.pcr
    62, // llvm.hexagon.L2.loadrub.pbr
    79, // llvm.hexagon.L2.loadrub.pci
    78, // llvm.hexagon.L2.loadrub.pcr
    62, // llvm.hexagon.L2.loadruh.pbr
    79, // llvm.hexagon.L2.loadruh.pci
    78, // llvm.hexagon.L2.loadruh.pcr
    226, // llvm.hexagon.L2.loadw.locked
    226, // llvm.hexagon.L4.loadd.locked
    2, // llvm.hexagon.M2.acci
    22, // llvm.hexagon.M2.accii
    2, // llvm.hexagon.M2.cmaci.s0
    2, // llvm.hexagon.M2.cmacr.s0
    2, // llvm.hexagon.M2.cmacs.s0
    2, // llvm.hexagon.M2.cmacs.s1
    2, // llvm.hexagon.M2.cmacsc.s0
    2, // llvm.hexagon.M2.cmacsc.s1
    2, // llvm.hexagon.M2.cmpyi.s0
    2, // llvm.hexagon.M2.cmpyr.s0
    2, // llvm.hexagon.M2.cmpyrs.s0
    2, // llvm.hexagon.M2.cmpyrs.s1
    2, // llvm.hexagon.M2.cmpyrsc.s0
    2, // llvm.hexagon.M2.cmpyrsc.s1
    2, // llvm.hexagon.M2.cmpys.s0
    2, // llvm.hexagon.M2.cmpys.s1
    2, // llvm.hexagon.M2.cmpysc.s0
    2, // llvm.hexagon.M2.cmpysc.s1
    2, // llvm.hexagon.M2.cnacs.s0
    2, // llvm.hexagon.M2.cnacs.s1
    2, // llvm.hexagon.M2.cnacsc.s0
    2, // llvm.hexagon.M2.cnacsc.s1
    2, // llvm.hexagon.M2.dpmpyss.acc.s0
    2, // llvm.hexagon.M2.dpmpyss.nac.s0
    2, // llvm.hexagon.M2.dpmpyss.rnd.s0
    2, // llvm.hexagon.M2.dpmpyss.s0
    2, // llvm.hexagon.M2.dpmpyuu.acc.s0
    2, // llvm.hexagon.M2.dpmpyuu.nac.s0
    2, // llvm.hexagon.M2.dpmpyuu.s0
    2, // llvm.hexagon.M2.hmmpyh.rs1
    2, // llvm.hexagon.M2.hmmpyh.s1
    2, // llvm.hexagon.M2.hmmpyl.rs1
    2, // llvm.hexagon.M2.hmmpyl.s1
    2, // llvm.hexagon.M2.maci
    22, // llvm.hexagon.M2.macsin
    22, // llvm.hexagon.M2.macsip
    2, // llvm.hexagon.M2.mmachs.rs0
    2, // llvm.hexagon.M2.mmachs.rs1
    2, // llvm.hexagon.M2.mmachs.s0
    2, // llvm.hexagon.M2.mmachs.s1
    2, // llvm.hexagon.M2.mmacls.rs0
    2, // llvm.hexagon.M2.mmacls.rs1
    2, // llvm.hexagon.M2.mmacls.s0
    2, // llvm.hexagon.M2.mmacls.s1
    2, // llvm.hexagon.M2.mmacuhs.rs0
    2, // llvm.hexagon.M2.mmacuhs.rs1
    2, // llvm.hexagon.M2.mmacuhs.s0
    2, // llvm.hexagon.M2.mmacuhs.s1
    2, // llvm.hexagon.M2.mmaculs.rs0
    2, // llvm.hexagon.M2.mmaculs.rs1
    2, // llvm.hexagon.M2.mmaculs.s0
    2, // llvm.hexagon.M2.mmaculs.s1
    2, // llvm.hexagon.M2.mmpyh.rs0
    2, // llvm.hexagon.M2.mmpyh.rs1
    2, // llvm.hexagon.M2.mmpyh.s0
    2, // llvm.hexagon.M2.mmpyh.s1
    2, // llvm.hexagon.M2.mmpyl.rs0
    2, // llvm.hexagon.M2.mmpyl.rs1
    2, // llvm.hexagon.M2.mmpyl.s0
    2, // llvm.hexagon.M2.mmpyl.s1
    2, // llvm.hexagon.M2.mmpyuh.rs0
    2, // llvm.hexagon.M2.mmpyuh.rs1
    2, // llvm.hexagon.M2.mmpyuh.s0
    2, // llvm.hexagon.M2.mmpyuh.s1
    2, // llvm.hexagon.M2.mmpyul.rs0
    2, // llvm.hexagon.M2.mmpyul.rs1
    2, // llvm.hexagon.M2.mmpyul.s0
    2, // llvm.hexagon.M2.mmpyul.s1
    2, // llvm.hexagon.M2.mnaci
    2, // llvm.hexagon.M2.mpy.acc.hh.s0
    2, // llvm.hexagon.M2.mpy.acc.hh.s1
    2, // llvm.hexagon.M2.mpy.acc.hl.s0
    2, // llvm.hexagon.M2.mpy.acc.hl.s1
    2, // llvm.hexagon.M2.mpy.acc.lh.s0
    2, // llvm.hexagon.M2.mpy.acc.lh.s1
    2, // llvm.hexagon.M2.mpy.acc.ll.s0
    2, // llvm.hexagon.M2.mpy.acc.ll.s1
    2, // llvm.hexagon.M2.mpy.acc.sat.hh.s0
    2, // llvm.hexagon.M2.mpy.acc.sat.hh.s1
    2, // llvm.hexagon.M2.mpy.acc.sat.hl.s0
    2, // llvm.hexagon.M2.mpy.acc.sat.hl.s1
    2, // llvm.hexagon.M2.mpy.acc.sat.lh.s0
    2, // llvm.hexagon.M2.mpy.acc.sat.lh.s1
    2, // llvm.hexagon.M2.mpy.acc.sat.ll.s0
    2, // llvm.hexagon.M2.mpy.acc.sat.ll.s1
    2, // llvm.hexagon.M2.mpy.hh.s0
    2, // llvm.hexagon.M2.mpy.hh.s1
    2, // llvm.hexagon.M2.mpy.hl.s0
    2, // llvm.hexagon.M2.mpy.hl.s1
    2, // llvm.hexagon.M2.mpy.lh.s0
    2, // llvm.hexagon.M2.mpy.lh.s1
    2, // llvm.hexagon.M2.mpy.ll.s0
    2, // llvm.hexagon.M2.mpy.ll.s1
    2, // llvm.hexagon.M2.mpy.nac.hh.s0
    2, // llvm.hexagon.M2.mpy.nac.hh.s1
    2, // llvm.hexagon.M2.mpy.nac.hl.s0
    2, // llvm.hexagon.M2.mpy.nac.hl.s1
    2, // llvm.hexagon.M2.mpy.nac.lh.s0
    2, // llvm.hexagon.M2.mpy.nac.lh.s1
    2, // llvm.hexagon.M2.mpy.nac.ll.s0
    2, // llvm.hexagon.M2.mpy.nac.ll.s1
    2, // llvm.hexagon.M2.mpy.nac.sat.hh.s0
    2, // llvm.hexagon.M2.mpy.nac.sat.hh.s1
    2, // llvm.hexagon.M2.mpy.nac.sat.hl.s0
    2, // llvm.hexagon.M2.mpy.nac.sat.hl.s1
    2, // llvm.hexagon.M2.mpy.nac.sat.lh.s0
    2, // llvm.hexagon.M2.mpy.nac.sat.lh.s1
    2, // llvm.hexagon.M2.mpy.nac.sat.ll.s0
    2, // llvm.hexagon.M2.mpy.nac.sat.ll.s1
    2, // llvm.hexagon.M2.mpy.rnd.hh.s0
    2, // llvm.hexagon.M2.mpy.rnd.hh.s1
    2, // llvm.hexagon.M2.mpy.rnd.hl.s0
    2, // llvm.hexagon.M2.mpy.rnd.hl.s1
    2, // llvm.hexagon.M2.mpy.rnd.lh.s0
    2, // llvm.hexagon.M2.mpy.rnd.lh.s1
    2, // llvm.hexagon.M2.mpy.rnd.ll.s0
    2, // llvm.hexagon.M2.mpy.rnd.ll.s1
    2, // llvm.hexagon.M2.mpy.sat.hh.s0
    2, // llvm.hexagon.M2.mpy.sat.hh.s1
    2, // llvm.hexagon.M2.mpy.sat.hl.s0
    2, // llvm.hexagon.M2.mpy.sat.hl.s1
    2, // llvm.hexagon.M2.mpy.sat.lh.s0
    2, // llvm.hexagon.M2.mpy.sat.lh.s1
    2, // llvm.hexagon.M2.mpy.sat.ll.s0
    2, // llvm.hexagon.M2.mpy.sat.ll.s1
    2, // llvm.hexagon.M2.mpy.sat.rnd.hh.s0
    2, // llvm.hexagon.M2.mpy.sat.rnd.hh.s1
    2, // llvm.hexagon.M2.mpy.sat.rnd.hl.s0
    2, // llvm.hexagon.M2.mpy.sat.rnd.hl.s1
    2, // llvm.hexagon.M2.mpy.sat.rnd.lh.s0
    2, // llvm.hexagon.M2.mpy.sat.rnd.lh.s1
    2, // llvm.hexagon.M2.mpy.sat.rnd.ll.s0
    2, // llvm.hexagon.M2.mpy.sat.rnd.ll.s1
    2, // llvm.hexagon.M2.mpy.up
    2, // llvm.hexagon.M2.mpy.up.s1
    2, // llvm.hexagon.M2.mpy.up.s1.sat
    2, // llvm.hexagon.M2.mpyd.acc.hh.s0
    2, // llvm.hexagon.M2.mpyd.acc.hh.s1
    2, // llvm.hexagon.M2.mpyd.acc.hl.s0
    2, // llvm.hexagon.M2.mpyd.acc.hl.s1
    2, // llvm.hexagon.M2.mpyd.acc.lh.s0
    2, // llvm.hexagon.M2.mpyd.acc.lh.s1
    2, // llvm.hexagon.M2.mpyd.acc.ll.s0
    2, // llvm.hexagon.M2.mpyd.acc.ll.s1
    2, // llvm.hexagon.M2.mpyd.hh.s0
    2, // llvm.hexagon.M2.mpyd.hh.s1
    2, // llvm.hexagon.M2.mpyd.hl.s0
    2, // llvm.hexagon.M2.mpyd.hl.s1
    2, // llvm.hexagon.M2.mpyd.lh.s0
    2, // llvm.hexagon.M2.mpyd.lh.s1
    2, // llvm.hexagon.M2.mpyd.ll.s0
    2, // llvm.hexagon.M2.mpyd.ll.s1
    2, // llvm.hexagon.M2.mpyd.nac.hh.s0
    2, // llvm.hexagon.M2.mpyd.nac.hh.s1
    2, // llvm.hexagon.M2.mpyd.nac.hl.s0
    2, // llvm.hexagon.M2.mpyd.nac.hl.s1
    2, // llvm.hexagon.M2.mpyd.nac.lh.s0
    2, // llvm.hexagon.M2.mpyd.nac.lh.s1
    2, // llvm.hexagon.M2.mpyd.nac.ll.s0
    2, // llvm.hexagon.M2.mpyd.nac.ll.s1
    2, // llvm.hexagon.M2.mpyd.rnd.hh.s0
    2, // llvm.hexagon.M2.mpyd.rnd.hh.s1
    2, // llvm.hexagon.M2.mpyd.rnd.hl.s0
    2, // llvm.hexagon.M2.mpyd.rnd.hl.s1
    2, // llvm.hexagon.M2.mpyd.rnd.lh.s0
    2, // llvm.hexagon.M2.mpyd.rnd.lh.s1
    2, // llvm.hexagon.M2.mpyd.rnd.ll.s0
    2, // llvm.hexagon.M2.mpyd.rnd.ll.s1
    2, // llvm.hexagon.M2.mpyi
    59, // llvm.hexagon.M2.mpysmi
    2, // llvm.hexagon.M2.mpysu.up
    2, // llvm.hexagon.M2.mpyu.acc.hh.s0
    2, // llvm.hexagon.M2.mpyu.acc.hh.s1
    2, // llvm.hexagon.M2.mpyu.acc.hl.s0
    2, // llvm.hexagon.M2.mpyu.acc.hl.s1
    2, // llvm.hexagon.M2.mpyu.acc.lh.s0
    2, // llvm.hexagon.M2.mpyu.acc.lh.s1
    2, // llvm.hexagon.M2.mpyu.acc.ll.s0
    2, // llvm.hexagon.M2.mpyu.acc.ll.s1
    2, // llvm.hexagon.M2.mpyu.hh.s0
    2, // llvm.hexagon.M2.mpyu.hh.s1
    2, // llvm.hexagon.M2.mpyu.hl.s0
    2, // llvm.hexagon.M2.mpyu.hl.s1
    2, // llvm.hexagon.M2.mpyu.lh.s0
    2, // llvm.hexagon.M2.mpyu.lh.s1
    2, // llvm.hexagon.M2.mpyu.ll.s0
    2, // llvm.hexagon.M2.mpyu.ll.s1
    2, // llvm.hexagon.M2.mpyu.nac.hh.s0
    2, // llvm.hexagon.M2.mpyu.nac.hh.s1
    2, // llvm.hexagon.M2.mpyu.nac.hl.s0
    2, // llvm.hexagon.M2.mpyu.nac.hl.s1
    2, // llvm.hexagon.M2.mpyu.nac.lh.s0
    2, // llvm.hexagon.M2.mpyu.nac.lh.s1
    2, // llvm.hexagon.M2.mpyu.nac.ll.s0
    2, // llvm.hexagon.M2.mpyu.nac.ll.s1
    2, // llvm.hexagon.M2.mpyu.up
    2, // llvm.hexagon.M2.mpyud.acc.hh.s0
    2, // llvm.hexagon.M2.mpyud.acc.hh.s1
    2, // llvm.hexagon.M2.mpyud.acc.hl.s0
    2, // llvm.hexagon.M2.mpyud.acc.hl.s1
    2, // llvm.hexagon.M2.mpyud.acc.lh.s0
    2, // llvm.hexagon.M2.mpyud.acc.lh.s1
    2, // llvm.hexagon.M2.mpyud.acc.ll.s0
    2, // llvm.hexagon.M2.mpyud.acc.ll.s1
    2, // llvm.hexagon.M2.mpyud.hh.s0
    2, // llvm.hexagon.M2.mpyud.hh.s1
    2, // llvm.hexagon.M2.mpyud.hl.s0
    2, // llvm.hexagon.M2.mpyud.hl.s1
    2, // llvm.hexagon.M2.mpyud.lh.s0
    2, // llvm.hexagon.M2.mpyud.lh.s1
    2, // llvm.hexagon.M2.mpyud.ll.s0
    2, // llvm.hexagon.M2.mpyud.ll.s1
    2, // llvm.hexagon.M2.mpyud.nac.hh.s0
    2, // llvm.hexagon.M2.mpyud.nac.hh.s1
    2, // llvm.hexagon.M2.mpyud.nac.hl.s0
    2, // llvm.hexagon.M2.mpyud.nac.hl.s1
    2, // llvm.hexagon.M2.mpyud.nac.lh.s0
    2, // llvm.hexagon.M2.mpyud.nac.lh.s1
    2, // llvm.hexagon.M2.mpyud.nac.ll.s0
    2, // llvm.hexagon.M2.mpyud.nac.ll.s1
    2, // llvm.hexagon.M2.mpyui
    2, // llvm.hexagon.M2.nacci
    22, // llvm.hexagon.M2.naccii
    2, // llvm.hexagon.M2.subacc
    2, // llvm.hexagon.M2.vabsdiffh
    2, // llvm.hexagon.M2.vabsdiffw
    2, // llvm.hexagon.M2.vcmac.s0.sat.i
    2, // llvm.hexagon.M2.vcmac.s0.sat.r
    2, // llvm.hexagon.M2.vcmpy.s0.sat.i
    2, // llvm.hexagon.M2.vcmpy.s0.sat.r
    2, // llvm.hexagon.M2.vcmpy.s1.sat.i
    2, // llvm.hexagon.M2.vcmpy.s1.sat.r
    2, // llvm.hexagon.M2.vdmacs.s0
    2, // llvm.hexagon.M2.vdmacs.s1
    2, // llvm.hexagon.M2.vdmpyrs.s0
    2, // llvm.hexagon.M2.vdmpyrs.s1
    2, // llvm.hexagon.M2.vdmpys.s0
    2, // llvm.hexagon.M2.vdmpys.s1
    2, // llvm.hexagon.M2.vmac2
    2, // llvm.hexagon.M2.vmac2es
    2, // llvm.hexagon.M2.vmac2es.s0
    2, // llvm.hexagon.M2.vmac2es.s1
    2, // llvm.hexagon.M2.vmac2s.s0
    2, // llvm.hexagon.M2.vmac2s.s1
    2, // llvm.hexagon.M2.vmac2su.s0
    2, // llvm.hexagon.M2.vmac2su.s1
    2, // llvm.hexagon.M2.vmpy2es.s0
    2, // llvm.hexagon.M2.vmpy2es.s1
    2, // llvm.hexagon.M2.vmpy2s.s0
    2, // llvm.hexagon.M2.vmpy2s.s0pack
    2, // llvm.hexagon.M2.vmpy2s.s1
    2, // llvm.hexagon.M2.vmpy2s.s1pack
    2, // llvm.hexagon.M2.vmpy2su.s0
    2, // llvm.hexagon.M2.vmpy2su.s1
    2, // llvm.hexagon.M2.vraddh
    2, // llvm.hexagon.M2.vradduh
    2, // llvm.hexagon.M2.vrcmaci.s0
    2, // llvm.hexagon.M2.vrcmaci.s0c
    2, // llvm.hexagon.M2.vrcmacr.s0
    2, // llvm.hexagon.M2.vrcmacr.s0c
    2, // llvm.hexagon.M2.vrcmpyi.s0
    2, // llvm.hexagon.M2.vrcmpyi.s0c
    2, // llvm.hexagon.M2.vrcmpyr.s0
    2, // llvm.hexagon.M2.vrcmpyr.s0c
    2, // llvm.hexagon.M2.vrcmpys.acc.s1
    2, // llvm.hexagon.M2.vrcmpys.s1
    2, // llvm.hexagon.M2.vrcmpys.s1rp
    2, // llvm.hexagon.M2.vrmac.s0
    2, // llvm.hexagon.M2.vrmpy.s0
    2, // llvm.hexagon.M2.xor.xacc
    2, // llvm.hexagon.M4.and.and
    2, // llvm.hexagon.M4.and.andn
    2, // llvm.hexagon.M4.and.or
    2, // llvm.hexagon.M4.and.xor
    2, // llvm.hexagon.M4.cmpyi.wh
    2, // llvm.hexagon.M4.cmpyi.whc
    2, // llvm.hexagon.M4.cmpyr.wh
    2, // llvm.hexagon.M4.cmpyr.whc
    2, // llvm.hexagon.M4.mac.up.s1.sat
    209, // llvm.hexagon.M4.mpyri.addi
    22, // llvm.hexagon.M4.mpyri.addr
    59, // llvm.hexagon.M4.mpyri.addr.u2
    32, // llvm.hexagon.M4.mpyrr.addi
    2, // llvm.hexagon.M4.mpyrr.addr
    2, // llvm.hexagon.M4.nac.up.s1.sat
    2, // llvm.hexagon.M4.or.and
    2, // llvm.hexagon.M4.or.andn
    2, // llvm.hexagon.M4.or.or
    2, // llvm.hexagon.M4.or.xor
    2, // llvm.hexagon.M4.pmpyw
    2, // llvm.hexagon.M4.pmpyw.acc
    2, // llvm.hexagon.M4.vpmpyh
    2, // llvm.hexagon.M4.vpmpyh.acc
    2, // llvm.hexagon.M4.vrmpyeh.acc.s0
    2, // llvm.hexagon.M4.vrmpyeh.acc.s1
    2, // llvm.hexagon.M4.vrmpyeh.s0
    2, // llvm.hexagon.M4.vrmpyeh.s1
    2, // llvm.hexagon.M4.vrmpyoh.acc.s0
    2, // llvm.hexagon.M4.vrmpyoh.acc.s1
    2, // llvm.hexagon.M4.vrmpyoh.s0
    2, // llvm.hexagon.M4.vrmpyoh.s1
    2, // llvm.hexagon.M4.xor.and
    2, // llvm.hexagon.M4.xor.andn
    2, // llvm.hexagon.M4.xor.or
    2, // llvm.hexagon.M4.xor.xacc
    2, // llvm.hexagon.M5.vdmacbsu
    2, // llvm.hexagon.M5.vdmpybsu
    2, // llvm.hexagon.M5.vmacbsu
    2, // llvm.hexagon.M5.vmacbuu
    2, // llvm.hexagon.M5.vmpybsu
    2, // llvm.hexagon.M5.vmpybuu
    2, // llvm.hexagon.M5.vrmacbsu
    2, // llvm.hexagon.M5.vrmacbuu
    2, // llvm.hexagon.M5.vrmpybsu
    2, // llvm.hexagon.M5.vrmpybuu
    2, // llvm.hexagon.M6.vabsdiffb
    2, // llvm.hexagon.M6.vabsdiffub
    2, // llvm.hexagon.M7.dcmpyiw
    2, // llvm.hexagon.M7.dcmpyiw.acc
    2, // llvm.hexagon.M7.dcmpyiwc
    2, // llvm.hexagon.M7.dcmpyiwc.acc
    2, // llvm.hexagon.M7.dcmpyrw
    2, // llvm.hexagon.M7.dcmpyrw.acc
    2, // llvm.hexagon.M7.dcmpyrwc
    2, // llvm.hexagon.M7.dcmpyrwc.acc
    2, // llvm.hexagon.M7.vdmpy
    2, // llvm.hexagon.M7.vdmpy.acc
    2, // llvm.hexagon.M7.wcmpyiw
    2, // llvm.hexagon.M7.wcmpyiw.rnd
    2, // llvm.hexagon.M7.wcmpyiwc
    2, // llvm.hexagon.M7.wcmpyiwc.rnd
    2, // llvm.hexagon.M7.wcmpyrw
    2, // llvm.hexagon.M7.wcmpyrw.rnd
    2, // llvm.hexagon.M7.wcmpyrwc
    2, // llvm.hexagon.M7.wcmpyrwc.rnd
    22, // llvm.hexagon.S2.addasl.rrri
    59, // llvm.hexagon.S2.asl.i.p
    22, // llvm.hexagon.S2.asl.i.p.acc
    22, // llvm.hexagon.S2.asl.i.p.and
    22, // llvm.hexagon.S2.asl.i.p.nac
    22, // llvm.hexagon.S2.asl.i.p.or
    22, // llvm.hexagon.S2.asl.i.p.xacc
    59, // llvm.hexagon.S2.asl.i.r
    22, // llvm.hexagon.S2.asl.i.r.acc
    22, // llvm.hexagon.S2.asl.i.r.and
    22, // llvm.hexagon.S2.asl.i.r.nac
    22, // llvm.hexagon.S2.asl.i.r.or
    59, // llvm.hexagon.S2.asl.i.r.sat
    22, // llvm.hexagon.S2.asl.i.r.xacc
    59, // llvm.hexagon.S2.asl.i.vh
    59, // llvm.hexagon.S2.asl.i.vw
    2, // llvm.hexagon.S2.asl.r.p
    2, // llvm.hexagon.S2.asl.r.p.acc
    2, // llvm.hexagon.S2.asl.r.p.and
    2, // llvm.hexagon.S2.asl.r.p.nac
    2, // llvm.hexagon.S2.asl.r.p.or
    2, // llvm.hexagon.S2.asl.r.p.xor
    2, // llvm.hexagon.S2.asl.r.r
    2, // llvm.hexagon.S2.asl.r.r.acc
    2, // llvm.hexagon.S2.asl.r.r.and
    2, // llvm.hexagon.S2.asl.r.r.nac
    2, // llvm.hexagon.S2.asl.r.r.or
    2, // llvm.hexagon.S2.asl.r.r.sat
    2, // llvm.hexagon.S2.asl.r.vh
    2, // llvm.hexagon.S2.asl.r.vw
    59, // llvm.hexagon.S2.asr.i.p
    22, // llvm.hexagon.S2.asr.i.p.acc
    22, // llvm.hexagon.S2.asr.i.p.and
    22, // llvm.hexagon.S2.asr.i.p.nac
    22, // llvm.hexagon.S2.asr.i.p.or
    59, // llvm.hexagon.S2.asr.i.p.rnd
    59, // llvm.hexagon.S2.asr.i.p.rnd.goodsyntax
    59, // llvm.hexagon.S2.asr.i.r
    22, // llvm.hexagon.S2.asr.i.r.acc
    22, // llvm.hexagon.S2.asr.i.r.and
    22, // llvm.hexagon.S2.asr.i.r.nac
    22, // llvm.hexagon.S2.asr.i.r.or
    59, // llvm.hexagon.S2.asr.i.r.rnd
    59, // llvm.hexagon.S2.asr.i.r.rnd.goodsyntax
    59, // llvm.hexagon.S2.asr.i.svw.trun
    59, // llvm.hexagon.S2.asr.i.vh
    59, // llvm.hexagon.S2.asr.i.vw
    2, // llvm.hexagon.S2.asr.r.p
    2, // llvm.hexagon.S2.asr.r.p.acc
    2, // llvm.hexagon.S2.asr.r.p.and
    2, // llvm.hexagon.S2.asr.r.p.nac
    2, // llvm.hexagon.S2.asr.r.p.or
    2, // llvm.hexagon.S2.asr.r.p.xor
    2, // llvm.hexagon.S2.asr.r.r
    2, // llvm.hexagon.S2.asr.r.r.acc
    2, // llvm.hexagon.S2.asr.r.r.and
    2, // llvm.hexagon.S2.asr.r.r.nac
    2, // llvm.hexagon.S2.asr.r.r.or
    2, // llvm.hexagon.S2.asr.r.r.sat
    2, // llvm.hexagon.S2.asr.r.svw.trun
    2, // llvm.hexagon.S2.asr.r.vh
    2, // llvm.hexagon.S2.asr.r.vw
    2, // llvm.hexagon.S2.brev
    2, // llvm.hexagon.S2.brevp
    2, // llvm.hexagon.S2.cl0
    2, // llvm.hexagon.S2.cl0p
    2, // llvm.hexagon.S2.cl1
    2, // llvm.hexagon.S2.cl1p
    2, // llvm.hexagon.S2.clb
    2, // llvm.hexagon.S2.clbnorm
    2, // llvm.hexagon.S2.clbp
    59, // llvm.hexagon.S2.clrbit.i
    2, // llvm.hexagon.S2.clrbit.r
    2, // llvm.hexagon.S2.ct0
    2, // llvm.hexagon.S2.ct0p
    2, // llvm.hexagon.S2.ct1
    2, // llvm.hexagon.S2.ct1p
    2, // llvm.hexagon.S2.deinterleave
    26, // llvm.hexagon.S2.extractu
    2, // llvm.hexagon.S2.extractu.rp
    26, // llvm.hexagon.S2.extractup
    2, // llvm.hexagon.S2.extractup.rp
    227, // llvm.hexagon.S2.insert
    2, // llvm.hexagon.S2.insert.rp
    227, // llvm.hexagon.S2.insertp
    2, // llvm.hexagon.S2.insertp.rp
    2, // llvm.hexagon.S2.interleave
    2, // llvm.hexagon.S2.lfsp
    2, // llvm.hexagon.S2.lsl.r.p
    2, // llvm.hexagon.S2.lsl.r.p.acc
    2, // llvm.hexagon.S2.lsl.r.p.and
    2, // llvm.hexagon.S2.lsl.r.p.nac
    2, // llvm.hexagon.S2.lsl.r.p.or
    2, // llvm.hexagon.S2.lsl.r.p.xor
    2, // llvm.hexagon.S2.lsl.r.r
    2, // llvm.hexagon.S2.lsl.r.r.acc
    2, // llvm.hexagon.S2.lsl.r.r.and
    2, // llvm.hexagon.S2.lsl.r.r.nac
    2, // llvm.hexagon.S2.lsl.r.r.or
    2, // llvm.hexagon.S2.lsl.r.vh
    2, // llvm.hexagon.S2.lsl.r.vw
    59, // llvm.hexagon.S2.lsr.i.p
    22, // llvm.hexagon.S2.lsr.i.p.acc
    22, // llvm.hexagon.S2.lsr.i.p.and
    22, // llvm.hexagon.S2.lsr.i.p.nac
    22, // llvm.hexagon.S2.lsr.i.p.or
    22, // llvm.hexagon.S2.lsr.i.p.xacc
    59, // llvm.hexagon.S2.lsr.i.r
    22, // llvm.hexagon.S2.lsr.i.r.acc
    22, // llvm.hexagon.S2.lsr.i.r.and
    22, // llvm.hexagon.S2.lsr.i.r.nac
    22, // llvm.hexagon.S2.lsr.i.r.or
    22, // llvm.hexagon.S2.lsr.i.r.xacc
    59, // llvm.hexagon.S2.lsr.i.vh
    59, // llvm.hexagon.S2.lsr.i.vw
    2, // llvm.hexagon.S2.lsr.r.p
    2, // llvm.hexagon.S2.lsr.r.p.acc
    2, // llvm.hexagon.S2.lsr.r.p.and
    2, // llvm.hexagon.S2.lsr.r.p.nac
    2, // llvm.hexagon.S2.lsr.r.p.or
    2, // llvm.hexagon.S2.lsr.r.p.xor
    2, // llvm.hexagon.S2.lsr.r.r
    2, // llvm.hexagon.S2.lsr.r.r.acc
    2, // llvm.hexagon.S2.lsr.r.r.and
    2, // llvm.hexagon.S2.lsr.r.r.nac
    2, // llvm.hexagon.S2.lsr.r.r.or
    2, // llvm.hexagon.S2.lsr.r.vh
    2, // llvm.hexagon.S2.lsr.r.vw
    208, // llvm.hexagon.S2.mask
    2, // llvm.hexagon.S2.packhl
    2, // llvm.hexagon.S2.parityp
    59, // llvm.hexagon.S2.setbit.i
    2, // llvm.hexagon.S2.setbit.r
    2, // llvm.hexagon.S2.shuffeb
    2, // llvm.hexagon.S2.shuffeh
    2, // llvm.hexagon.S2.shuffob
    2, // llvm.hexagon.S2.shuffoh
    88, // llvm.hexagon.S2.storerb.pbr
    80, // llvm.hexagon.S2.storerb.pci
    79, // llvm.hexagon.S2.storerb.pcr
    88, // llvm.hexagon.S2.storerd.pbr
    80, // llvm.hexagon.S2.storerd.pci
    79, // llvm.hexagon.S2.storerd.pcr
    88, // llvm.hexagon.S2.storerf.pbr
    80, // llvm.hexagon.S2.storerf.pci
    79, // llvm.hexagon.S2.storerf.pcr
    88, // llvm.hexagon.S2.storerh.pbr
    80, // llvm.hexagon.S2.storerh.pci
    79, // llvm.hexagon.S2.storerh.pcr
    88, // llvm.hexagon.S2.storeri.pbr
    80, // llvm.hexagon.S2.storeri.pci
    79, // llvm.hexagon.S2.storeri.pcr
    226, // llvm.hexagon.S2.storew.locked
    2, // llvm.hexagon.S2.svsathb
    2, // llvm.hexagon.S2.svsathub
    227, // llvm.hexagon.S2.tableidxb.goodsyntax
    227, // llvm.hexagon.S2.tableidxd.goodsyntax
    227, // llvm.hexagon.S2.tableidxh.goodsyntax
    227, // llvm.hexagon.S2.tableidxw.goodsyntax
    59, // llvm.hexagon.S2.togglebit.i
    2, // llvm.hexagon.S2.togglebit.r
    59, // llvm.hexagon.S2.tstbit.i
    2, // llvm.hexagon.S2.tstbit.r
    22, // llvm.hexagon.S2.valignib
    2, // llvm.hexagon.S2.valignrb
    2, // llvm.hexagon.S2.vcnegh
    2, // llvm.hexagon.S2.vcrotate
    2, // llvm.hexagon.S2.vrcnegh
    2, // llvm.hexagon.S2.vrndpackwh
    2, // llvm.hexagon.S2.vrndpackwhs
    2, // llvm.hexagon.S2.vsathb
    2, // llvm.hexagon.S2.vsathb.nopack
    2, // llvm.hexagon.S2.vsathub
    2, // llvm.hexagon.S2.vsathub.nopack
    2, // llvm.hexagon.S2.vsatwh
    2, // llvm.hexagon.S2.vsatwh.nopack
    2, // llvm.hexagon.S2.vsatwuh
    2, // llvm.hexagon.S2.vsatwuh.nopack
    2, // llvm.hexagon.S2.vsplatrb
    2, // llvm.hexagon.S2.vsplatrh
    22, // llvm.hexagon.S2.vspliceib
    2, // llvm.hexagon.S2.vsplicerb
    2, // llvm.hexagon.S2.vsxtbh
    2, // llvm.hexagon.S2.vsxthw
    2, // llvm.hexagon.S2.vtrunehb
    2, // llvm.hexagon.S2.vtrunewh
    2, // llvm.hexagon.S2.vtrunohb
    2, // llvm.hexagon.S2.vtrunowh
    2, // llvm.hexagon.S2.vzxtbh
    2, // llvm.hexagon.S2.vzxthw
    22, // llvm.hexagon.S4.addaddi
    209, // llvm.hexagon.S4.addi.asl.ri
    209, // llvm.hexagon.S4.addi.lsr.ri
    209, // llvm.hexagon.S4.andi.asl.ri
    209, // llvm.hexagon.S4.andi.lsr.ri
    59, // llvm.hexagon.S4.clbaddi
    59, // llvm.hexagon.S4.clbpaddi
    2, // llvm.hexagon.S4.clbpnorm
    26, // llvm.hexagon.S4.extract
    2, // llvm.hexagon.S4.extract.rp
    26, // llvm.hexagon.S4.extractp
    2, // llvm.hexagon.S4.extractp.rp
    32, // llvm.hexagon.S4.lsli
    59, // llvm.hexagon.S4.ntstbit.i
    2, // llvm.hexagon.S4.ntstbit.r
    22, // llvm.hexagon.S4.or.andi
    22, // llvm.hexagon.S4.or.andix
    22, // llvm.hexagon.S4.or.ori
    209, // llvm.hexagon.S4.ori.asl.ri
    209, // llvm.hexagon.S4.ori.lsr.ri
    2, // llvm.hexagon.S4.parity
    226, // llvm.hexagon.S4.stored.locked
    59, // llvm.hexagon.S4.subaddi
    209, // llvm.hexagon.S4.subi.asl.ri
    209, // llvm.hexagon.S4.subi.lsr.ri
    22, // llvm.hexagon.S4.vrcrotate
    89, // llvm.hexagon.S4.vrcrotate.acc
    2, // llvm.hexagon.S4.vxaddsubh
    2, // llvm.hexagon.S4.vxaddsubhr
    2, // llvm.hexagon.S4.vxaddsubw
    2, // llvm.hexagon.S4.vxsubaddh
    2, // llvm.hexagon.S4.vxsubaddhr
    2, // llvm.hexagon.S4.vxsubaddw
    59, // llvm.hexagon.S5.asrhub.rnd.sat.goodsyntax
    59, // llvm.hexagon.S5.asrhub.sat
    2, // llvm.hexagon.S5.popcountp
    59, // llvm.hexagon.S5.vasrhrnd.goodsyntax
    59, // llvm.hexagon.S6.rol.i.p
    22, // llvm.hexagon.S6.rol.i.p.acc
    22, // llvm.hexagon.S6.rol.i.p.and
    22, // llvm.hexagon.S6.rol.i.p.nac
    22, // llvm.hexagon.S6.rol.i.p.or
    22, // llvm.hexagon.S6.rol.i.p.xacc
    59, // llvm.hexagon.S6.rol.i.r
    22, // llvm.hexagon.S6.rol.i.r.acc
    22, // llvm.hexagon.S6.rol.i.r.and
    22, // llvm.hexagon.S6.rol.i.r.nac
    22, // llvm.hexagon.S6.rol.i.r.or
    22, // llvm.hexagon.S6.rol.i.r.xacc
    2, // llvm.hexagon.S6.vsplatrbp
    2, // llvm.hexagon.S6.vtrunehb.ppp
    2, // llvm.hexagon.S6.vtrunohb.ppp
    2, // llvm.hexagon.V6.extractw
    2, // llvm.hexagon.V6.extractw.128B
    2, // llvm.hexagon.V6.hi
    2, // llvm.hexagon.V6.hi.128B
    2, // llvm.hexagon.V6.lo
    2, // llvm.hexagon.V6.lo.128B
    2, // llvm.hexagon.V6.lvsplatb
    2, // llvm.hexagon.V6.lvsplatb.128B
    2, // llvm.hexagon.V6.lvsplath
    2, // llvm.hexagon.V6.lvsplath.128B
    2, // llvm.hexagon.V6.lvsplatw
    2, // llvm.hexagon.V6.lvsplatw.128B
    2, // llvm.hexagon.V6.pred.and
    2, // llvm.hexagon.V6.pred.and.128B
    2, // llvm.hexagon.V6.pred.and.n
    2, // llvm.hexagon.V6.pred.and.n.128B
    2, // llvm.hexagon.V6.pred.not
    2, // llvm.hexagon.V6.pred.not.128B
    2, // llvm.hexagon.V6.pred.or
    2, // llvm.hexagon.V6.pred.or.128B
    2, // llvm.hexagon.V6.pred.or.n
    2, // llvm.hexagon.V6.pred.or.n.128B
    2, // llvm.hexagon.V6.pred.scalar2
    2, // llvm.hexagon.V6.pred.scalar2.128B
    2, // llvm.hexagon.V6.pred.scalar2v2
    2, // llvm.hexagon.V6.pred.scalar2v2.128B
    2, // llvm.hexagon.V6.pred.typecast
    2, // llvm.hexagon.V6.pred.typecast.128B
    2, // llvm.hexagon.V6.pred.xor
    2, // llvm.hexagon.V6.pred.xor.128B
    2, // llvm.hexagon.V6.shuffeqh
    2, // llvm.hexagon.V6.shuffeqh.128B
    2, // llvm.hexagon.V6.shuffeqw
    2, // llvm.hexagon.V6.shuffeqw.128B
    22, // llvm.hexagon.V6.v6mpyhubs10
    22, // llvm.hexagon.V6.v6mpyhubs10.128B
    89, // llvm.hexagon.V6.v6mpyhubs10.vxx
    89, // llvm.hexagon.V6.v6mpyhubs10.vxx.128B
    22, // llvm.hexagon.V6.v6mpyvubs10
    22, // llvm.hexagon.V6.v6mpyvubs10.128B
    89, // llvm.hexagon.V6.v6mpyvubs10.vxx
    89, // llvm.hexagon.V6.v6mpyvubs10.vxx.128B
    228, // llvm.hexagon.V6.vL32b.npred.ai
    228, // llvm.hexagon.V6.vL32b.npred.ai.128B
    228, // llvm.hexagon.V6.vL32b.npred.pi
    228, // llvm.hexagon.V6.vL32b.npred.pi.128B
    229, // llvm.hexagon.V6.vL32b.npred.ppu
    229, // llvm.hexagon.V6.vL32b.npred.ppu.128B
    228, // llvm.hexagon.V6.vL32b.nt.npred.ai
    228, // llvm.hexagon.V6.vL32b.nt.npred.ai.128B
    228, // llvm.hexagon.V6.vL32b.nt.npred.pi
    228, // llvm.hexagon.V6.vL32b.nt.npred.pi.128B
    229, // llvm.hexagon.V6.vL32b.nt.npred.ppu
    229, // llvm.hexagon.V6.vL32b.nt.npred.ppu.128B
    228, // llvm.hexagon.V6.vL32b.nt.pred.ai
    228, // llvm.hexagon.V6.vL32b.nt.pred.ai.128B
    228, // llvm.hexagon.V6.vL32b.nt.pred.pi
    228, // llvm.hexagon.V6.vL32b.nt.pred.pi.128B
    229, // llvm.hexagon.V6.vL32b.nt.pred.ppu
    229, // llvm.hexagon.V6.vL32b.nt.pred.ppu.128B
    228, // llvm.hexagon.V6.vL32b.pred.ai
    228, // llvm.hexagon.V6.vL32b.pred.ai.128B
    228, // llvm.hexagon.V6.vL32b.pred.pi
    228, // llvm.hexagon.V6.vL32b.pred.pi.128B
    229, // llvm.hexagon.V6.vL32b.pred.ppu
    229, // llvm.hexagon.V6.vL32b.pred.ppu.128B
    45, // llvm.hexagon.V6.vS32Ub.npred.ai
    45, // llvm.hexagon.V6.vS32Ub.npred.ai.128B
    45, // llvm.hexagon.V6.vS32Ub.npred.pi
    45, // llvm.hexagon.V6.vS32Ub.npred.pi.128B
    30, // llvm.hexagon.V6.vS32Ub.npred.ppu
    30, // llvm.hexagon.V6.vS32Ub.npred.ppu.128B
    45, // llvm.hexagon.V6.vS32Ub.pred.ai
    45, // llvm.hexagon.V6.vS32Ub.pred.ai.128B
    45, // llvm.hexagon.V6.vS32Ub.pred.pi
    45, // llvm.hexagon.V6.vS32Ub.pred.pi.128B
    30, // llvm.hexagon.V6.vS32Ub.pred.ppu
    30, // llvm.hexagon.V6.vS32Ub.pred.ppu.128B
    45, // llvm.hexagon.V6.vS32b.npred.ai
    45, // llvm.hexagon.V6.vS32b.npred.ai.128B
    45, // llvm.hexagon.V6.vS32b.npred.pi
    45, // llvm.hexagon.V6.vS32b.npred.pi.128B
    30, // llvm.hexagon.V6.vS32b.npred.ppu
    30, // llvm.hexagon.V6.vS32b.npred.ppu.128B
    88, // llvm.hexagon.V6.vS32b.nqpred.ai
    88, // llvm.hexagon.V6.vS32b.nqpred.ai.128B
    45, // llvm.hexagon.V6.vS32b.nt.npred.ai
    45, // llvm.hexagon.V6.vS32b.nt.npred.ai.128B
    45, // llvm.hexagon.V6.vS32b.nt.npred.pi
    45, // llvm.hexagon.V6.vS32b.nt.npred.pi.128B
    30, // llvm.hexagon.V6.vS32b.nt.npred.ppu
    30, // llvm.hexagon.V6.vS32b.nt.npred.ppu.128B
    88, // llvm.hexagon.V6.vS32b.nt.nqpred.ai
    88, // llvm.hexagon.V6.vS32b.nt.nqpred.ai.128B
    45, // llvm.hexagon.V6.vS32b.nt.pred.ai
    45, // llvm.hexagon.V6.vS32b.nt.pred.ai.128B
    45, // llvm.hexagon.V6.vS32b.nt.pred.pi
    45, // llvm.hexagon.V6.vS32b.nt.pred.pi.128B
    30, // llvm.hexagon.V6.vS32b.nt.pred.ppu
    30, // llvm.hexagon.V6.vS32b.nt.pred.ppu.128B
    88, // llvm.hexagon.V6.vS32b.nt.qpred.ai
    88, // llvm.hexagon.V6.vS32b.nt.qpred.ai.128B
    45, // llvm.hexagon.V6.vS32b.pred.ai
    45, // llvm.hexagon.V6.vS32b.pred.ai.128B
    45, // llvm.hexagon.V6.vS32b.pred.pi
    45, // llvm.hexagon.V6.vS32b.pred.pi.128B
    30, // llvm.hexagon.V6.vS32b.pred.ppu
    30, // llvm.hexagon.V6.vS32b.pred.ppu.128B
    88, // llvm.hexagon.V6.vS32b.qpred.ai
    88, // llvm.hexagon.V6.vS32b.qpred.ai.128B
    2, // llvm.hexagon.V6.vabs.hf
    2, // llvm.hexagon.V6.vabs.hf.128B
    2, // llvm.hexagon.V6.vabs.sf
    2, // llvm.hexagon.V6.vabs.sf.128B
    2, // llvm.hexagon.V6.vabsb
    2, // llvm.hexagon.V6.vabsb.128B
    2, // llvm.hexagon.V6.vabsb.sat
    2, // llvm.hexagon.V6.vabsb.sat.128B
    2, // llvm.hexagon.V6.vabsdiffh
    2, // llvm.hexagon.V6.vabsdiffh.128B
    2, // llvm.hexagon.V6.vabsdiffub
    2, // llvm.hexagon.V6.vabsdiffub.128B
    2, // llvm.hexagon.V6.vabsdiffuh
    2, // llvm.hexagon.V6.vabsdiffuh.128B
    2, // llvm.hexagon.V6.vabsdiffw
    2, // llvm.hexagon.V6.vabsdiffw.128B
    2, // llvm.hexagon.V6.vabsh
    2, // llvm.hexagon.V6.vabsh.128B
    2, // llvm.hexagon.V6.vabsh.sat
    2, // llvm.hexagon.V6.vabsh.sat.128B
    2, // llvm.hexagon.V6.vabsw
    2, // llvm.hexagon.V6.vabsw.128B
    2, // llvm.hexagon.V6.vabsw.sat
    2, // llvm.hexagon.V6.vabsw.sat.128B
    2, // llvm.hexagon.V6.vadd.hf
    2, // llvm.hexagon.V6.vadd.hf.128B
    2, // llvm.hexagon.V6.vadd.hf.hf
    2, // llvm.hexagon.V6.vadd.hf.hf.128B
    2, // llvm.hexagon.V6.vadd.qf16
    2, // llvm.hexagon.V6.vadd.qf16.128B
    2, // llvm.hexagon.V6.vadd.qf16.mix
    2, // llvm.hexagon.V6.vadd.qf16.mix.128B
    2, // llvm.hexagon.V6.vadd.qf32
    2, // llvm.hexagon.V6.vadd.qf32.128B
    2, // llvm.hexagon.V6.vadd.qf32.mix
    2, // llvm.hexagon.V6.vadd.qf32.mix.128B
    2, // llvm.hexagon.V6.vadd.sf
    2, // llvm.hexagon.V6.vadd.sf.128B
    2, // llvm.hexagon.V6.vadd.sf.bf
    2, // llvm.hexagon.V6.vadd.sf.bf.128B
    2, // llvm.hexagon.V6.vadd.sf.hf
    2, // llvm.hexagon.V6.vadd.sf.hf.128B
    2, // llvm.hexagon.V6.vadd.sf.sf
    2, // llvm.hexagon.V6.vadd.sf.sf.128B
    2, // llvm.hexagon.V6.vaddb
    2, // llvm.hexagon.V6.vaddb.128B
    2, // llvm.hexagon.V6.vaddb.dv
    2, // llvm.hexagon.V6.vaddb.dv.128B
    2, // llvm.hexagon.V6.vaddbnq
    2, // llvm.hexagon.V6.vaddbnq.128B
    2, // llvm.hexagon.V6.vaddbq
    2, // llvm.hexagon.V6.vaddbq.128B
    2, // llvm.hexagon.V6.vaddbsat
    2, // llvm.hexagon.V6.vaddbsat.128B
    2, // llvm.hexagon.V6.vaddbsat.dv
    2, // llvm.hexagon.V6.vaddbsat.dv.128B
    2, // llvm.hexagon.V6.vaddcarry
    2, // llvm.hexagon.V6.vaddcarry.128B
    2, // llvm.hexagon.V6.vaddcarryo
    2, // llvm.hexagon.V6.vaddcarryo.128B
    2, // llvm.hexagon.V6.vaddcarrysat
    2, // llvm.hexagon.V6.vaddcarrysat.128B
    2, // llvm.hexagon.V6.vaddclbh
    2, // llvm.hexagon.V6.vaddclbh.128B
    2, // llvm.hexagon.V6.vaddclbw
    2, // llvm.hexagon.V6.vaddclbw.128B
    2, // llvm.hexagon.V6.vaddh
    2, // llvm.hexagon.V6.vaddh.128B
    2, // llvm.hexagon.V6.vaddh.dv
    2, // llvm.hexagon.V6.vaddh.dv.128B
    2, // llvm.hexagon.V6.vaddhnq
    2, // llvm.hexagon.V6.vaddhnq.128B
    2, // llvm.hexagon.V6.vaddhq
    2, // llvm.hexagon.V6.vaddhq.128B
    2, // llvm.hexagon.V6.vaddhsat
    2, // llvm.hexagon.V6.vaddhsat.128B
    2, // llvm.hexagon.V6.vaddhsat.dv
    2, // llvm.hexagon.V6.vaddhsat.dv.128B
    2, // llvm.hexagon.V6.vaddhw
    2, // llvm.hexagon.V6.vaddhw.128B
    2, // llvm.hexagon.V6.vaddhw.acc
    2, // llvm.hexagon.V6.vaddhw.acc.128B
    2, // llvm.hexagon.V6.vaddubh
    2, // llvm.hexagon.V6.vaddubh.128B
    2, // llvm.hexagon.V6.vaddubh.acc
    2, // llvm.hexagon.V6.vaddubh.acc.128B
    2, // llvm.hexagon.V6.vaddubsat
    2, // llvm.hexagon.V6.vaddubsat.128B
    2, // llvm.hexagon.V6.vaddubsat.dv
    2, // llvm.hexagon.V6.vaddubsat.dv.128B
    2, // llvm.hexagon.V6.vaddububb.sat
    2, // llvm.hexagon.V6.vaddububb.sat.128B
    2, // llvm.hexagon.V6.vadduhsat
    2, // llvm.hexagon.V6.vadduhsat.128B
    2, // llvm.hexagon.V6.vadduhsat.dv
    2, // llvm.hexagon.V6.vadduhsat.dv.128B
    2, // llvm.hexagon.V6.vadduhw
    2, // llvm.hexagon.V6.vadduhw.128B
    2, // llvm.hexagon.V6.vadduhw.acc
    2, // llvm.hexagon.V6.vadduhw.acc.128B
    2, // llvm.hexagon.V6.vadduwsat
    2, // llvm.hexagon.V6.vadduwsat.128B
    2, // llvm.hexagon.V6.vadduwsat.dv
    2, // llvm.hexagon.V6.vadduwsat.dv.128B
    2, // llvm.hexagon.V6.vaddw
    2, // llvm.hexagon.V6.vaddw.128B
    2, // llvm.hexagon.V6.vaddw.dv
    2, // llvm.hexagon.V6.vaddw.dv.128B
    2, // llvm.hexagon.V6.vaddwnq
    2, // llvm.hexagon.V6.vaddwnq.128B
    2, // llvm.hexagon.V6.vaddwq
    2, // llvm.hexagon.V6.vaddwq.128B
    2, // llvm.hexagon.V6.vaddwsat
    2, // llvm.hexagon.V6.vaddwsat.128B
    2, // llvm.hexagon.V6.vaddwsat.dv
    2, // llvm.hexagon.V6.vaddwsat.dv.128B
    2, // llvm.hexagon.V6.valignb
    2, // llvm.hexagon.V6.valignb.128B
    22, // llvm.hexagon.V6.valignbi
    22, // llvm.hexagon.V6.valignbi.128B
    2, // llvm.hexagon.V6.vand
    2, // llvm.hexagon.V6.vand.128B
    2, // llvm.hexagon.V6.vandnqrt
    2, // llvm.hexagon.V6.vandnqrt.128B
    2, // llvm.hexagon.V6.vandnqrt.acc
    2, // llvm.hexagon.V6.vandnqrt.acc.128B
    2, // llvm.hexagon.V6.vandqrt
    2, // llvm.hexagon.V6.vandqrt.128B
    2, // llvm.hexagon.V6.vandqrt.acc
    2, // llvm.hexagon.V6.vandqrt.acc.128B
    2, // llvm.hexagon.V6.vandvnqv
    2, // llvm.hexagon.V6.vandvnqv.128B
    2, // llvm.hexagon.V6.vandvqv
    2, // llvm.hexagon.V6.vandvqv.128B
    2, // llvm.hexagon.V6.vandvrt
    2, // llvm.hexagon.V6.vandvrt.128B
    2, // llvm.hexagon.V6.vandvrt.acc
    2, // llvm.hexagon.V6.vandvrt.acc.128B
    2, // llvm.hexagon.V6.vaslh
    2, // llvm.hexagon.V6.vaslh.128B
    2, // llvm.hexagon.V6.vaslh.acc
    2, // llvm.hexagon.V6.vaslh.acc.128B
    2, // llvm.hexagon.V6.vaslhv
    2, // llvm.hexagon.V6.vaslhv.128B
    2, // llvm.hexagon.V6.vaslw
    2, // llvm.hexagon.V6.vaslw.128B
    2, // llvm.hexagon.V6.vaslw.acc
    2, // llvm.hexagon.V6.vaslw.acc.128B
    2, // llvm.hexagon.V6.vaslwv
    2, // llvm.hexagon.V6.vaslwv.128B
    2, // llvm.hexagon.V6.vasr.into
    2, // llvm.hexagon.V6.vasr.into.128B
    2, // llvm.hexagon.V6.vasrh
    2, // llvm.hexagon.V6.vasrh.128B
    2, // llvm.hexagon.V6.vasrh.acc
    2, // llvm.hexagon.V6.vasrh.acc.128B
    2, // llvm.hexagon.V6.vasrhbrndsat
    2, // llvm.hexagon.V6.vasrhbrndsat.128B
    2, // llvm.hexagon.V6.vasrhbsat
    2, // llvm.hexagon.V6.vasrhbsat.128B
    2, // llvm.hexagon.V6.vasrhubrndsat
    2, // llvm.hexagon.V6.vasrhubrndsat.128B
    2, // llvm.hexagon.V6.vasrhubsat
    2, // llvm.hexagon.V6.vasrhubsat.128B
    2, // llvm.hexagon.V6.vasrhv
    2, // llvm.hexagon.V6.vasrhv.128B
    2, // llvm.hexagon.V6.vasruhubrndsat
    2, // llvm.hexagon.V6.vasruhubrndsat.128B
    2, // llvm.hexagon.V6.vasruhubsat
    2, // llvm.hexagon.V6.vasruhubsat.128B
    2, // llvm.hexagon.V6.vasruwuhrndsat
    2, // llvm.hexagon.V6.vasruwuhrndsat.128B
    2, // llvm.hexagon.V6.vasruwuhsat
    2, // llvm.hexagon.V6.vasruwuhsat.128B
    2, // llvm.hexagon.V6.vasrvuhubrndsat
    2, // llvm.hexagon.V6.vasrvuhubrndsat.128B
    2, // llvm.hexagon.V6.vasrvuhubsat
    2, // llvm.hexagon.V6.vasrvuhubsat.128B
    2, // llvm.hexagon.V6.vasrvwuhrndsat
    2, // llvm.hexagon.V6.vasrvwuhrndsat.128B
    2, // llvm.hexagon.V6.vasrvwuhsat
    2, // llvm.hexagon.V6.vasrvwuhsat.128B
    2, // llvm.hexagon.V6.vasrw
    2, // llvm.hexagon.V6.vasrw.128B
    2, // llvm.hexagon.V6.vasrw.acc
    2, // llvm.hexagon.V6.vasrw.acc.128B
    2, // llvm.hexagon.V6.vasrwh
    2, // llvm.hexagon.V6.vasrwh.128B
    2, // llvm.hexagon.V6.vasrwhrndsat
    2, // llvm.hexagon.V6.vasrwhrndsat.128B
    2, // llvm.hexagon.V6.vasrwhsat
    2, // llvm.hexagon.V6.vasrwhsat.128B
    2, // llvm.hexagon.V6.vasrwuhrndsat
    2, // llvm.hexagon.V6.vasrwuhrndsat.128B
    2, // llvm.hexagon.V6.vasrwuhsat
    2, // llvm.hexagon.V6.vasrwuhsat.128B
    2, // llvm.hexagon.V6.vasrwv
    2, // llvm.hexagon.V6.vasrwv.128B
    2, // llvm.hexagon.V6.vassign
    2, // llvm.hexagon.V6.vassign.128B
    2, // llvm.hexagon.V6.vassign.fp
    2, // llvm.hexagon.V6.vassign.fp.128B
    2, // llvm.hexagon.V6.vassignp
    2, // llvm.hexagon.V6.vassignp.128B
    2, // llvm.hexagon.V6.vavgb
    2, // llvm.hexagon.V6.vavgb.128B
    2, // llvm.hexagon.V6.vavgbrnd
    2, // llvm.hexagon.V6.vavgbrnd.128B
    2, // llvm.hexagon.V6.vavgh
    2, // llvm.hexagon.V6.vavgh.128B
    2, // llvm.hexagon.V6.vavghrnd
    2, // llvm.hexagon.V6.vavghrnd.128B
    2, // llvm.hexagon.V6.vavgub
    2, // llvm.hexagon.V6.vavgub.128B
    2, // llvm.hexagon.V6.vavgubrnd
    2, // llvm.hexagon.V6.vavgubrnd.128B
    2, // llvm.hexagon.V6.vavguh
    2, // llvm.hexagon.V6.vavguh.128B
    2, // llvm.hexagon.V6.vavguhrnd
    2, // llvm.hexagon.V6.vavguhrnd.128B
    2, // llvm.hexagon.V6.vavguw
    2, // llvm.hexagon.V6.vavguw.128B
    2, // llvm.hexagon.V6.vavguwrnd
    2, // llvm.hexagon.V6.vavguwrnd.128B
    2, // llvm.hexagon.V6.vavgw
    2, // llvm.hexagon.V6.vavgw.128B
    2, // llvm.hexagon.V6.vavgwrnd
    2, // llvm.hexagon.V6.vavgwrnd.128B
    2, // llvm.hexagon.V6.vcl0h
    2, // llvm.hexagon.V6.vcl0h.128B
    2, // llvm.hexagon.V6.vcl0w
    2, // llvm.hexagon.V6.vcl0w.128B
    2, // llvm.hexagon.V6.vcombine
    2, // llvm.hexagon.V6.vcombine.128B
    2, // llvm.hexagon.V6.vconv.h.hf
    2, // llvm.hexagon.V6.vconv.h.hf.128B
    2, // llvm.hexagon.V6.vconv.hf.h
    2, // llvm.hexagon.V6.vconv.hf.h.128B
    2, // llvm.hexagon.V6.vconv.hf.qf16
    2, // llvm.hexagon.V6.vconv.hf.qf16.128B
    2, // llvm.hexagon.V6.vconv.hf.qf32
    2, // llvm.hexagon.V6.vconv.hf.qf32.128B
    2, // llvm.hexagon.V6.vconv.sf.qf32
    2, // llvm.hexagon.V6.vconv.sf.qf32.128B
    2, // llvm.hexagon.V6.vconv.sf.w
    2, // llvm.hexagon.V6.vconv.sf.w.128B
    2, // llvm.hexagon.V6.vconv.w.sf
    2, // llvm.hexagon.V6.vconv.w.sf.128B
    2, // llvm.hexagon.V6.vcvt.b.hf
    2, // llvm.hexagon.V6.vcvt.b.hf.128B
    2, // llvm.hexagon.V6.vcvt.bf.sf
    2, // llvm.hexagon.V6.vcvt.bf.sf.128B
    2, // llvm.hexagon.V6.vcvt.h.hf
    2, // llvm.hexagon.V6.vcvt.h.hf.128B
    2, // llvm.hexagon.V6.vcvt.hf.b
    2, // llvm.hexagon.V6.vcvt.hf.b.128B
    2, // llvm.hexagon.V6.vcvt.hf.h
    2, // llvm.hexagon.V6.vcvt.hf.h.128B
    2, // llvm.hexagon.V6.vcvt.hf.sf
    2, // llvm.hexagon.V6.vcvt.hf.sf.128B
    2, // llvm.hexagon.V6.vcvt.hf.ub
    2, // llvm.hexagon.V6.vcvt.hf.ub.128B
    2, // llvm.hexagon.V6.vcvt.hf.uh
    2, // llvm.hexagon.V6.vcvt.hf.uh.128B
    2, // llvm.hexagon.V6.vcvt.sf.hf
    2, // llvm.hexagon.V6.vcvt.sf.hf.128B
    2, // llvm.hexagon.V6.vcvt.ub.hf
    2, // llvm.hexagon.V6.vcvt.ub.hf.128B
    2, // llvm.hexagon.V6.vcvt.uh.hf
    2, // llvm.hexagon.V6.vcvt.uh.hf.128B
    2, // llvm.hexagon.V6.vd0
    2, // llvm.hexagon.V6.vd0.128B
    2, // llvm.hexagon.V6.vdd0
    2, // llvm.hexagon.V6.vdd0.128B
    2, // llvm.hexagon.V6.vdealb
    2, // llvm.hexagon.V6.vdealb.128B
    2, // llvm.hexagon.V6.vdealb4w
    2, // llvm.hexagon.V6.vdealb4w.128B
    2, // llvm.hexagon.V6.vdealh
    2, // llvm.hexagon.V6.vdealh.128B
    2, // llvm.hexagon.V6.vdealvdd
    2, // llvm.hexagon.V6.vdealvdd.128B
    2, // llvm.hexagon.V6.vdelta
    2, // llvm.hexagon.V6.vdelta.128B
    2, // llvm.hexagon.V6.vdmpy.sf.hf
    2, // llvm.hexagon.V6.vdmpy.sf.hf.128B
    2, // llvm.hexagon.V6.vdmpy.sf.hf.acc
    2, // llvm.hexagon.V6.vdmpy.sf.hf.acc.128B
    2, // llvm.hexagon.V6.vdmpybus
    2, // llvm.hexagon.V6.vdmpybus.128B
    2, // llvm.hexagon.V6.vdmpybus.acc
    2, // llvm.hexagon.V6.vdmpybus.acc.128B
    2, // llvm.hexagon.V6.vdmpybus.dv
    2, // llvm.hexagon.V6.vdmpybus.dv.128B
    2, // llvm.hexagon.V6.vdmpybus.dv.acc
    2, // llvm.hexagon.V6.vdmpybus.dv.acc.128B
    2, // llvm.hexagon.V6.vdmpyhb
    2, // llvm.hexagon.V6.vdmpyhb.128B
    2, // llvm.hexagon.V6.vdmpyhb.acc
    2, // llvm.hexagon.V6.vdmpyhb.acc.128B
    2, // llvm.hexagon.V6.vdmpyhb.dv
    2, // llvm.hexagon.V6.vdmpyhb.dv.128B
    2, // llvm.hexagon.V6.vdmpyhb.dv.acc
    2, // llvm.hexagon.V6.vdmpyhb.dv.acc.128B
    2, // llvm.hexagon.V6.vdmpyhisat
    2, // llvm.hexagon.V6.vdmpyhisat.128B
    2, // llvm.hexagon.V6.vdmpyhisat.acc
    2, // llvm.hexagon.V6.vdmpyhisat.acc.128B
    2, // llvm.hexagon.V6.vdmpyhsat
    2, // llvm.hexagon.V6.vdmpyhsat.128B
    2, // llvm.hexagon.V6.vdmpyhsat.acc
    2, // llvm.hexagon.V6.vdmpyhsat.acc.128B
    2, // llvm.hexagon.V6.vdmpyhsuisat
    2, // llvm.hexagon.V6.vdmpyhsuisat.128B
    2, // llvm.hexagon.V6.vdmpyhsuisat.acc
    2, // llvm.hexagon.V6.vdmpyhsuisat.acc.128B
    2, // llvm.hexagon.V6.vdmpyhsusat
    2, // llvm.hexagon.V6.vdmpyhsusat.128B
    2, // llvm.hexagon.V6.vdmpyhsusat.acc
    2, // llvm.hexagon.V6.vdmpyhsusat.acc.128B
    2, // llvm.hexagon.V6.vdmpyhvsat
    2, // llvm.hexagon.V6.vdmpyhvsat.128B
    2, // llvm.hexagon.V6.vdmpyhvsat.acc
    2, // llvm.hexagon.V6.vdmpyhvsat.acc.128B
    2, // llvm.hexagon.V6.vdsaduh
    2, // llvm.hexagon.V6.vdsaduh.128B
    2, // llvm.hexagon.V6.vdsaduh.acc
    2, // llvm.hexagon.V6.vdsaduh.acc.128B
    2, // llvm.hexagon.V6.veqb
    2, // llvm.hexagon.V6.veqb.128B
    2, // llvm.hexagon.V6.veqb.and
    2, // llvm.hexagon.V6.veqb.and.128B
    2, // llvm.hexagon.V6.veqb.or
    2, // llvm.hexagon.V6.veqb.or.128B
    2, // llvm.hexagon.V6.veqb.xor
    2, // llvm.hexagon.V6.veqb.xor.128B
    2, // llvm.hexagon.V6.veqh
    2, // llvm.hexagon.V6.veqh.128B
    2, // llvm.hexagon.V6.veqh.and
    2, // llvm.hexagon.V6.veqh.and.128B
    2, // llvm.hexagon.V6.veqh.or
    2, // llvm.hexagon.V6.veqh.or.128B
    2, // llvm.hexagon.V6.veqh.xor
    2, // llvm.hexagon.V6.veqh.xor.128B
    2, // llvm.hexagon.V6.veqw
    2, // llvm.hexagon.V6.veqw.128B
    2, // llvm.hexagon.V6.veqw.and
    2, // llvm.hexagon.V6.veqw.and.128B
    2, // llvm.hexagon.V6.veqw.or
    2, // llvm.hexagon.V6.veqw.or.128B
    2, // llvm.hexagon.V6.veqw.xor
    2, // llvm.hexagon.V6.veqw.xor.128B
    2, // llvm.hexagon.V6.vfmax.hf
    2, // llvm.hexagon.V6.vfmax.hf.128B
    2, // llvm.hexagon.V6.vfmax.sf
    2, // llvm.hexagon.V6.vfmax.sf.128B
    2, // llvm.hexagon.V6.vfmin.hf
    2, // llvm.hexagon.V6.vfmin.hf.128B
    2, // llvm.hexagon.V6.vfmin.sf
    2, // llvm.hexagon.V6.vfmin.sf.128B
    2, // llvm.hexagon.V6.vfneg.hf
    2, // llvm.hexagon.V6.vfneg.hf.128B
    2, // llvm.hexagon.V6.vfneg.sf
    2, // llvm.hexagon.V6.vfneg.sf.128B
    219, // llvm.hexagon.V6.vgathermh
    219, // llvm.hexagon.V6.vgathermh.128B
    219, // llvm.hexagon.V6.vgathermhq
    219, // llvm.hexagon.V6.vgathermhq.128B
    219, // llvm.hexagon.V6.vgathermhw
    219, // llvm.hexagon.V6.vgathermhw.128B
    219, // llvm.hexagon.V6.vgathermhwq
    219, // llvm.hexagon.V6.vgathermhwq.128B
    219, // llvm.hexagon.V6.vgathermw
    219, // llvm.hexagon.V6.vgathermw.128B
    219, // llvm.hexagon.V6.vgathermwq
    219, // llvm.hexagon.V6.vgathermwq.128B
    2, // llvm.hexagon.V6.vgtb
    2, // llvm.hexagon.V6.vgtb.128B
    2, // llvm.hexagon.V6.vgtb.and
    2, // llvm.hexagon.V6.vgtb.and.128B
    2, // llvm.hexagon.V6.vgtb.or
    2, // llvm.hexagon.V6.vgtb.or.128B
    2, // llvm.hexagon.V6.vgtb.xor
    2, // llvm.hexagon.V6.vgtb.xor.128B
    2, // llvm.hexagon.V6.vgtbf
    2, // llvm.hexagon.V6.vgtbf.128B
    2, // llvm.hexagon.V6.vgtbf.and
    2, // llvm.hexagon.V6.vgtbf.and.128B
    2, // llvm.hexagon.V6.vgtbf.or
    2, // llvm.hexagon.V6.vgtbf.or.128B
    2, // llvm.hexagon.V6.vgtbf.xor
    2, // llvm.hexagon.V6.vgtbf.xor.128B
    2, // llvm.hexagon.V6.vgth
    2, // llvm.hexagon.V6.vgth.128B
    2, // llvm.hexagon.V6.vgth.and
    2, // llvm.hexagon.V6.vgth.and.128B
    2, // llvm.hexagon.V6.vgth.or
    2, // llvm.hexagon.V6.vgth.or.128B
    2, // llvm.hexagon.V6.vgth.xor
    2, // llvm.hexagon.V6.vgth.xor.128B
    2, // llvm.hexagon.V6.vgthf
    2, // llvm.hexagon.V6.vgthf.128B
    2, // llvm.hexagon.V6.vgthf.and
    2, // llvm.hexagon.V6.vgthf.and.128B
    2, // llvm.hexagon.V6.vgthf.or
    2, // llvm.hexagon.V6.vgthf.or.128B
    2, // llvm.hexagon.V6.vgthf.xor
    2, // llvm.hexagon.V6.vgthf.xor.128B
    2, // llvm.hexagon.V6.vgtsf
    2, // llvm.hexagon.V6.vgtsf.128B
    2, // llvm.hexagon.V6.vgtsf.and
    2, // llvm.hexagon.V6.vgtsf.and.128B
    2, // llvm.hexagon.V6.vgtsf.or
    2, // llvm.hexagon.V6.vgtsf.or.128B
    2, // llvm.hexagon.V6.vgtsf.xor
    2, // llvm.hexagon.V6.vgtsf.xor.128B
    2, // llvm.hexagon.V6.vgtub
    2, // llvm.hexagon.V6.vgtub.128B
    2, // llvm.hexagon.V6.vgtub.and
    2, // llvm.hexagon.V6.vgtub.and.128B
    2, // llvm.hexagon.V6.vgtub.or
    2, // llvm.hexagon.V6.vgtub.or.128B
    2, // llvm.hexagon.V6.vgtub.xor
    2, // llvm.hexagon.V6.vgtub.xor.128B
    2, // llvm.hexagon.V6.vgtuh
    2, // llvm.hexagon.V6.vgtuh.128B
    2, // llvm.hexagon.V6.vgtuh.and
    2, // llvm.hexagon.V6.vgtuh.and.128B
    2, // llvm.hexagon.V6.vgtuh.or
    2, // llvm.hexagon.V6.vgtuh.or.128B
    2, // llvm.hexagon.V6.vgtuh.xor
    2, // llvm.hexagon.V6.vgtuh.xor.128B
    2, // llvm.hexagon.V6.vgtuw
    2, // llvm.hexagon.V6.vgtuw.128B
    2, // llvm.hexagon.V6.vgtuw.and
    2, // llvm.hexagon.V6.vgtuw.and.128B
    2, // llvm.hexagon.V6.vgtuw.or
    2, // llvm.hexagon.V6.vgtuw.or.128B
    2, // llvm.hexagon.V6.vgtuw.xor
    2, // llvm.hexagon.V6.vgtuw.xor.128B
    2, // llvm.hexagon.V6.vgtw
    2, // llvm.hexagon.V6.vgtw.128B
    2, // llvm.hexagon.V6.vgtw.and
    2, // llvm.hexagon.V6.vgtw.and.128B
    2, // llvm.hexagon.V6.vgtw.or
    2, // llvm.hexagon.V6.vgtw.or.128B
    2, // llvm.hexagon.V6.vgtw.xor
    2, // llvm.hexagon.V6.vgtw.xor.128B
    2, // llvm.hexagon.V6.vinsertwr
    2, // llvm.hexagon.V6.vinsertwr.128B
    2, // llvm.hexagon.V6.vlalignb
    2, // llvm.hexagon.V6.vlalignb.128B
    22, // llvm.hexagon.V6.vlalignbi
    22, // llvm.hexagon.V6.vlalignbi.128B
    2, // llvm.hexagon.V6.vlsrb
    2, // llvm.hexagon.V6.vlsrb.128B
    2, // llvm.hexagon.V6.vlsrh
    2, // llvm.hexagon.V6.vlsrh.128B
    2, // llvm.hexagon.V6.vlsrhv
    2, // llvm.hexagon.V6.vlsrhv.128B
    2, // llvm.hexagon.V6.vlsrw
    2, // llvm.hexagon.V6.vlsrw.128B
    2, // llvm.hexagon.V6.vlsrwv
    2, // llvm.hexagon.V6.vlsrwv.128B
    2, // llvm.hexagon.V6.vlut4
    2, // llvm.hexagon.V6.vlut4.128B
    2, // llvm.hexagon.V6.vlutvvb
    2, // llvm.hexagon.V6.vlutvvb.128B
    2, // llvm.hexagon.V6.vlutvvb.nm
    2, // llvm.hexagon.V6.vlutvvb.nm.128B
    2, // llvm.hexagon.V6.vlutvvb.oracc
    2, // llvm.hexagon.V6.vlutvvb.oracc.128B
    89, // llvm.hexagon.V6.vlutvvb.oracci
    89, // llvm.hexagon.V6.vlutvvb.oracci.128B
    22, // llvm.hexagon.V6.vlutvvbi
    22, // llvm.hexagon.V6.vlutvvbi.128B
    2, // llvm.hexagon.V6.vlutvwh
    2, // llvm.hexagon.V6.vlutvwh.128B
    2, // llvm.hexagon.V6.vlutvwh.nm
    2, // llvm.hexagon.V6.vlutvwh.nm.128B
    2, // llvm.hexagon.V6.vlutvwh.oracc
    2, // llvm.hexagon.V6.vlutvwh.oracc.128B
    89, // llvm.hexagon.V6.vlutvwh.oracci
    89, // llvm.hexagon.V6.vlutvwh.oracci.128B
    22, // llvm.hexagon.V6.vlutvwhi
    22, // llvm.hexagon.V6.vlutvwhi.128B
    88, // llvm.hexagon.V6.vmaskedstorenq
    88, // llvm.hexagon.V6.vmaskedstorenq.128B
    88, // llvm.hexagon.V6.vmaskedstorentnq
    88, // llvm.hexagon.V6.vmaskedstorentnq.128B
    88, // llvm.hexagon.V6.vmaskedstorentq
    88, // llvm.hexagon.V6.vmaskedstorentq.128B
    88, // llvm.hexagon.V6.vmaskedstoreq
    88, // llvm.hexagon.V6.vmaskedstoreq.128B
    2, // llvm.hexagon.V6.vmax.bf
    2, // llvm.hexagon.V6.vmax.bf.128B
    2, // llvm.hexagon.V6.vmax.hf
    2, // llvm.hexagon.V6.vmax.hf.128B
    2, // llvm.hexagon.V6.vmax.sf
    2, // llvm.hexagon.V6.vmax.sf.128B
    2, // llvm.hexagon.V6.vmaxb
    2, // llvm.hexagon.V6.vmaxb.128B
    2, // llvm.hexagon.V6.vmaxh
    2, // llvm.hexagon.V6.vmaxh.128B
    2, // llvm.hexagon.V6.vmaxub
    2, // llvm.hexagon.V6.vmaxub.128B
    2, // llvm.hexagon.V6.vmaxuh
    2, // llvm.hexagon.V6.vmaxuh.128B
    2, // llvm.hexagon.V6.vmaxw
    2, // llvm.hexagon.V6.vmaxw.128B
    2, // llvm.hexagon.V6.vmin.bf
    2, // llvm.hexagon.V6.vmin.bf.128B
    2, // llvm.hexagon.V6.vmin.hf
    2, // llvm.hexagon.V6.vmin.hf.128B
    2, // llvm.hexagon.V6.vmin.sf
    2, // llvm.hexagon.V6.vmin.sf.128B
    2, // llvm.hexagon.V6.vminb
    2, // llvm.hexagon.V6.vminb.128B
    2, // llvm.hexagon.V6.vminh
    2, // llvm.hexagon.V6.vminh.128B
    2, // llvm.hexagon.V6.vminub
    2, // llvm.hexagon.V6.vminub.128B
    2, // llvm.hexagon.V6.vminuh
    2, // llvm.hexagon.V6.vminuh.128B
    2, // llvm.hexagon.V6.vminw
    2, // llvm.hexagon.V6.vminw.128B
    2, // llvm.hexagon.V6.vmpabus
    2, // llvm.hexagon.V6.vmpabus.128B
    2, // llvm.hexagon.V6.vmpabus.acc
    2, // llvm.hexagon.V6.vmpabus.acc.128B
    2, // llvm.hexagon.V6.vmpabusv
    2, // llvm.hexagon.V6.vmpabusv.128B
    2, // llvm.hexagon.V6.vmpabuu
    2, // llvm.hexagon.V6.vmpabuu.128B
    2, // llvm.hexagon.V6.vmpabuu.acc
    2, // llvm.hexagon.V6.vmpabuu.acc.128B
    2, // llvm.hexagon.V6.vmpabuuv
    2, // llvm.hexagon.V6.vmpabuuv.128B
    2, // llvm.hexagon.V6.vmpahb
    2, // llvm.hexagon.V6.vmpahb.128B
    2, // llvm.hexagon.V6.vmpahb.acc
    2, // llvm.hexagon.V6.vmpahb.acc.128B
    2, // llvm.hexagon.V6.vmpahhsat
    2, // llvm.hexagon.V6.vmpahhsat.128B
    2, // llvm.hexagon.V6.vmpauhb
    2, // llvm.hexagon.V6.vmpauhb.128B
    2, // llvm.hexagon.V6.vmpauhb.acc
    2, // llvm.hexagon.V6.vmpauhb.acc.128B
    2, // llvm.hexagon.V6.vmpauhuhsat
    2, // llvm.hexagon.V6.vmpauhuhsat.128B
    2, // llvm.hexagon.V6.vmpsuhuhsat
    2, // llvm.hexagon.V6.vmpsuhuhsat.128B
    2, // llvm.hexagon.V6.vmpy.hf.hf
    2, // llvm.hexagon.V6.vmpy.hf.hf.128B
    2, // llvm.hexagon.V6.vmpy.hf.hf.acc
    2, // llvm.hexagon.V6.vmpy.hf.hf.acc.128B
    2, // llvm.hexagon.V6.vmpy.qf16
    2, // llvm.hexagon.V6.vmpy.qf16.128B
    2, // llvm.hexagon.V6.vmpy.qf16.hf
    2, // llvm.hexagon.V6.vmpy.qf16.hf.128B
    2, // llvm.hexagon.V6.vmpy.qf16.mix.hf
    2, // llvm.hexagon.V6.vmpy.qf16.mix.hf.128B
    2, // llvm.hexagon.V6.vmpy.qf32
    2, // llvm.hexagon.V6.vmpy.qf32.128B
    2, // llvm.hexagon.V6.vmpy.qf32.hf
    2, // llvm.hexagon.V6.vmpy.qf32.hf.128B
    2, // llvm.hexagon.V6.vmpy.qf32.mix.hf
    2, // llvm.hexagon.V6.vmpy.qf32.mix.hf.128B
    2, // llvm.hexagon.V6.vmpy.qf32.qf16
    2, // llvm.hexagon.V6.vmpy.qf32.qf16.128B
    2, // llvm.hexagon.V6.vmpy.qf32.sf
    2, // llvm.hexagon.V6.vmpy.qf32.sf.128B
    2, // llvm.hexagon.V6.vmpy.sf.bf
    2, // llvm.hexagon.V6.vmpy.sf.bf.128B
    2, // llvm.hexagon.V6.vmpy.sf.bf.acc
    2, // llvm.hexagon.V6.vmpy.sf.bf.acc.128B
    2, // llvm.hexagon.V6.vmpy.sf.hf
    2, // llvm.hexagon.V6.vmpy.sf.hf.128B
    2, // llvm.hexagon.V6.vmpy.sf.hf.acc
    2, // llvm.hexagon.V6.vmpy.sf.hf.acc.128B
    2, // llvm.hexagon.V6.vmpy.sf.sf
    2, // llvm.hexagon.V6.vmpy.sf.sf.128B
    2, // llvm.hexagon.V6.vmpybus
    2, // llvm.hexagon.V6.vmpybus.128B
    2, // llvm.hexagon.V6.vmpybus.acc
    2, // llvm.hexagon.V6.vmpybus.acc.128B
    2, // llvm.hexagon.V6.vmpybusv
    2, // llvm.hexagon.V6.vmpybusv.128B
    2, // llvm.hexagon.V6.vmpybusv.acc
    2, // llvm.hexagon.V6.vmpybusv.acc.128B
    2, // llvm.hexagon.V6.vmpybv
    2, // llvm.hexagon.V6.vmpybv.128B
    2, // llvm.hexagon.V6.vmpybv.acc
    2, // llvm.hexagon.V6.vmpybv.acc.128B
    2, // llvm.hexagon.V6.vmpyewuh
    2, // llvm.hexagon.V6.vmpyewuh.128B
    2, // llvm.hexagon.V6.vmpyewuh.64
    2, // llvm.hexagon.V6.vmpyewuh.64.128B
    2, // llvm.hexagon.V6.vmpyh
    2, // llvm.hexagon.V6.vmpyh.128B
    2, // llvm.hexagon.V6.vmpyh.acc
    2, // llvm.hexagon.V6.vmpyh.acc.128B
    2, // llvm.hexagon.V6.vmpyhsat.acc
    2, // llvm.hexagon.V6.vmpyhsat.acc.128B
    2, // llvm.hexagon.V6.vmpyhsrs
    2, // llvm.hexagon.V6.vmpyhsrs.128B
    2, // llvm.hexagon.V6.vmpyhss
    2, // llvm.hexagon.V6.vmpyhss.128B
    2, // llvm.hexagon.V6.vmpyhus
    2, // llvm.hexagon.V6.vmpyhus.128B
    2, // llvm.hexagon.V6.vmpyhus.acc
    2, // llvm.hexagon.V6.vmpyhus.acc.128B
    2, // llvm.hexagon.V6.vmpyhv
    2, // llvm.hexagon.V6.vmpyhv.128B
    2, // llvm.hexagon.V6.vmpyhv.acc
    2, // llvm.hexagon.V6.vmpyhv.acc.128B
    2, // llvm.hexagon.V6.vmpyhvsrs
    2, // llvm.hexagon.V6.vmpyhvsrs.128B
    2, // llvm.hexagon.V6.vmpyieoh
    2, // llvm.hexagon.V6.vmpyieoh.128B
    2, // llvm.hexagon.V6.vmpyiewh.acc
    2, // llvm.hexagon.V6.vmpyiewh.acc.128B
    2, // llvm.hexagon.V6.vmpyiewuh
    2, // llvm.hexagon.V6.vmpyiewuh.128B
    2, // llvm.hexagon.V6.vmpyiewuh.acc
    2, // llvm.hexagon.V6.vmpyiewuh.acc.128B
    2, // llvm.hexagon.V6.vmpyih
    2, // llvm.hexagon.V6.vmpyih.128B
    2, // llvm.hexagon.V6.vmpyih.acc
    2, // llvm.hexagon.V6.vmpyih.acc.128B
    2, // llvm.hexagon.V6.vmpyihb
    2, // llvm.hexagon.V6.vmpyihb.128B
    2, // llvm.hexagon.V6.vmpyihb.acc
    2, // llvm.hexagon.V6.vmpyihb.acc.128B
    2, // llvm.hexagon.V6.vmpyiowh
    2, // llvm.hexagon.V6.vmpyiowh.128B
    2, // llvm.hexagon.V6.vmpyiwb
    2, // llvm.hexagon.V6.vmpyiwb.128B
    2, // llvm.hexagon.V6.vmpyiwb.acc
    2, // llvm.hexagon.V6.vmpyiwb.acc.128B
    2, // llvm.hexagon.V6.vmpyiwh
    2, // llvm.hexagon.V6.vmpyiwh.128B
    2, // llvm.hexagon.V6.vmpyiwh.acc
    2, // llvm.hexagon.V6.vmpyiwh.acc.128B
    2, // llvm.hexagon.V6.vmpyiwub
    2, // llvm.hexagon.V6.vmpyiwub.128B
    2, // llvm.hexagon.V6.vmpyiwub.acc
    2, // llvm.hexagon.V6.vmpyiwub.acc.128B
    2, // llvm.hexagon.V6.vmpyowh
    2, // llvm.hexagon.V6.vmpyowh.128B
    2, // llvm.hexagon.V6.vmpyowh.64.acc
    2, // llvm.hexagon.V6.vmpyowh.64.acc.128B
    2, // llvm.hexagon.V6.vmpyowh.rnd
    2, // llvm.hexagon.V6.vmpyowh.rnd.128B
    2, // llvm.hexagon.V6.vmpyowh.rnd.sacc
    2, // llvm.hexagon.V6.vmpyowh.rnd.sacc.128B
    2, // llvm.hexagon.V6.vmpyowh.sacc
    2, // llvm.hexagon.V6.vmpyowh.sacc.128B
    2, // llvm.hexagon.V6.vmpyss.parts
    2, // llvm.hexagon.V6.vmpyss.parts.128B
    2, // llvm.hexagon.V6.vmpyub
    2, // llvm.hexagon.V6.vmpyub.128B
    2, // llvm.hexagon.V6.vmpyub.acc
    2, // llvm.hexagon.V6.vmpyub.acc.128B
    2, // llvm.hexagon.V6.vmpyubv
    2, // llvm.hexagon.V6.vmpyubv.128B
    2, // llvm.hexagon.V6.vmpyubv.acc
    2, // llvm.hexagon.V6.vmpyubv.acc.128B
    2, // llvm.hexagon.V6.vmpyuh
    2, // llvm.hexagon.V6.vmpyuh.128B
    2, // llvm.hexagon.V6.vmpyuh.acc
    2, // llvm.hexagon.V6.vmpyuh.acc.128B
    2, // llvm.hexagon.V6.vmpyuhe
    2, // llvm.hexagon.V6.vmpyuhe.128B
    2, // llvm.hexagon.V6.vmpyuhe.acc
    2, // llvm.hexagon.V6.vmpyuhe.acc.128B
    2, // llvm.hexagon.V6.vmpyuhv
    2, // llvm.hexagon.V6.vmpyuhv.128B
    2, // llvm.hexagon.V6.vmpyuhv.acc
    2, // llvm.hexagon.V6.vmpyuhv.acc.128B
    2, // llvm.hexagon.V6.vmpyuhvs
    2, // llvm.hexagon.V6.vmpyuhvs.128B
    2, // llvm.hexagon.V6.vmpyus.parts
    2, // llvm.hexagon.V6.vmpyus.parts.128B
    2, // llvm.hexagon.V6.vmpyuu.parts
    2, // llvm.hexagon.V6.vmpyuu.parts.128B
    2, // llvm.hexagon.V6.vmux
    2, // llvm.hexagon.V6.vmux.128B
    2, // llvm.hexagon.V6.vnavgb
    2, // llvm.hexagon.V6.vnavgb.128B
    2, // llvm.hexagon.V6.vnavgh
    2, // llvm.hexagon.V6.vnavgh.128B
    2, // llvm.hexagon.V6.vnavgub
    2, // llvm.hexagon.V6.vnavgub.128B
    2, // llvm.hexagon.V6.vnavgw
    2, // llvm.hexagon.V6.vnavgw.128B
    2, // llvm.hexagon.V6.vnormamth
    2, // llvm.hexagon.V6.vnormamth.128B
    2, // llvm.hexagon.V6.vnormamtw
    2, // llvm.hexagon.V6.vnormamtw.128B
    2, // llvm.hexagon.V6.vnot
    2, // llvm.hexagon.V6.vnot.128B
    2, // llvm.hexagon.V6.vor
    2, // llvm.hexagon.V6.vor.128B
    2, // llvm.hexagon.V6.vpackeb
    2, // llvm.hexagon.V6.vpackeb.128B
    2, // llvm.hexagon.V6.vpackeh
    2, // llvm.hexagon.V6.vpackeh.128B
    2, // llvm.hexagon.V6.vpackhb.sat
    2, // llvm.hexagon.V6.vpackhb.sat.128B
    2, // llvm.hexagon.V6.vpackhub.sat
    2, // llvm.hexagon.V6.vpackhub.sat.128B
    2, // llvm.hexagon.V6.vpackob
    2, // llvm.hexagon.V6.vpackob.128B
    2, // llvm.hexagon.V6.vpackoh
    2, // llvm.hexagon.V6.vpackoh.128B
    2, // llvm.hexagon.V6.vpackwh.sat
    2, // llvm.hexagon.V6.vpackwh.sat.128B
    2, // llvm.hexagon.V6.vpackwuh.sat
    2, // llvm.hexagon.V6.vpackwuh.sat.128B
    2, // llvm.hexagon.V6.vpopcounth
    2, // llvm.hexagon.V6.vpopcounth.128B
    2, // llvm.hexagon.V6.vprefixqb
    2, // llvm.hexagon.V6.vprefixqb.128B
    2, // llvm.hexagon.V6.vprefixqh
    2, // llvm.hexagon.V6.vprefixqh.128B
    2, // llvm.hexagon.V6.vprefixqw
    2, // llvm.hexagon.V6.vprefixqw.128B
    2, // llvm.hexagon.V6.vrdelta
    2, // llvm.hexagon.V6.vrdelta.128B
    2, // llvm.hexagon.V6.vrmpybub.rtt
    2, // llvm.hexagon.V6.vrmpybub.rtt.128B
    2, // llvm.hexagon.V6.vrmpybub.rtt.acc
    2, // llvm.hexagon.V6.vrmpybub.rtt.acc.128B
    2, // llvm.hexagon.V6.vrmpybus
    2, // llvm.hexagon.V6.vrmpybus.128B
    2, // llvm.hexagon.V6.vrmpybus.acc
    2, // llvm.hexagon.V6.vrmpybus.acc.128B
    22, // llvm.hexagon.V6.vrmpybusi
    22, // llvm.hexagon.V6.vrmpybusi.128B
    89, // llvm.hexagon.V6.vrmpybusi.acc
    89, // llvm.hexagon.V6.vrmpybusi.acc.128B
    2, // llvm.hexagon.V6.vrmpybusv
    2, // llvm.hexagon.V6.vrmpybusv.128B
    2, // llvm.hexagon.V6.vrmpybusv.acc
    2, // llvm.hexagon.V6.vrmpybusv.acc.128B
    2, // llvm.hexagon.V6.vrmpybv
    2, // llvm.hexagon.V6.vrmpybv.128B
    2, // llvm.hexagon.V6.vrmpybv.acc
    2, // llvm.hexagon.V6.vrmpybv.acc.128B
    2, // llvm.hexagon.V6.vrmpyub
    2, // llvm.hexagon.V6.vrmpyub.128B
    2, // llvm.hexagon.V6.vrmpyub.acc
    2, // llvm.hexagon.V6.vrmpyub.acc.128B
    2, // llvm.hexagon.V6.vrmpyub.rtt
    2, // llvm.hexagon.V6.vrmpyub.rtt.128B
    2, // llvm.hexagon.V6.vrmpyub.rtt.acc
    2, // llvm.hexagon.V6.vrmpyub.rtt.acc.128B
    22, // llvm.hexagon.V6.vrmpyubi
    22, // llvm.hexagon.V6.vrmpyubi.128B
    89, // llvm.hexagon.V6.vrmpyubi.acc
    89, // llvm.hexagon.V6.vrmpyubi.acc.128B
    2, // llvm.hexagon.V6.vrmpyubv
    2, // llvm.hexagon.V6.vrmpyubv.128B
    2, // llvm.hexagon.V6.vrmpyubv.acc
    2, // llvm.hexagon.V6.vrmpyubv.acc.128B
    2, // llvm.hexagon.V6.vror
    2, // llvm.hexagon.V6.vror.128B
    2, // llvm.hexagon.V6.vrotr
    2, // llvm.hexagon.V6.vrotr.128B
    2, // llvm.hexagon.V6.vroundhb
    2, // llvm.hexagon.V6.vroundhb.128B
    2, // llvm.hexagon.V6.vroundhub
    2, // llvm.hexagon.V6.vroundhub.128B
    2, // llvm.hexagon.V6.vrounduhub
    2, // llvm.hexagon.V6.vrounduhub.128B
    2, // llvm.hexagon.V6.vrounduwuh
    2, // llvm.hexagon.V6.vrounduwuh.128B
    2, // llvm.hexagon.V6.vroundwh
    2, // llvm.hexagon.V6.vroundwh.128B
    2, // llvm.hexagon.V6.vroundwuh
    2, // llvm.hexagon.V6.vroundwuh.128B
    22, // llvm.hexagon.V6.vrsadubi
    22, // llvm.hexagon.V6.vrsadubi.128B
    89, // llvm.hexagon.V6.vrsadubi.acc
    89, // llvm.hexagon.V6.vrsadubi.acc.128B
    2, // llvm.hexagon.V6.vsatdw
    2, // llvm.hexagon.V6.vsatdw.128B
    2, // llvm.hexagon.V6.vsathub
    2, // llvm.hexagon.V6.vsathub.128B
    2, // llvm.hexagon.V6.vsatuwuh
    2, // llvm.hexagon.V6.vsatuwuh.128B
    2, // llvm.hexagon.V6.vsatwh
    2, // llvm.hexagon.V6.vsatwh.128B
    2, // llvm.hexagon.V6.vsb
    2, // llvm.hexagon.V6.vsb.128B
    88, // llvm.hexagon.V6.vscattermh
    88, // llvm.hexagon.V6.vscattermh.128B
    88, // llvm.hexagon.V6.vscattermh.add
    88, // llvm.hexagon.V6.vscattermh.add.128B
    88, // llvm.hexagon.V6.vscattermhq
    88, // llvm.hexagon.V6.vscattermhq.128B
    88, // llvm.hexagon.V6.vscattermhw
    88, // llvm.hexagon.V6.vscattermhw.128B
    88, // llvm.hexagon.V6.vscattermhw.add
    88, // llvm.hexagon.V6.vscattermhw.add.128B
    88, // llvm.hexagon.V6.vscattermhwq
    88, // llvm.hexagon.V6.vscattermhwq.128B
    88, // llvm.hexagon.V6.vscattermw
    88, // llvm.hexagon.V6.vscattermw.128B
    88, // llvm.hexagon.V6.vscattermw.add
    88, // llvm.hexagon.V6.vscattermw.add.128B
    88, // llvm.hexagon.V6.vscattermwq
    88, // llvm.hexagon.V6.vscattermwq.128B
    2, // llvm.hexagon.V6.vsh
    2, // llvm.hexagon.V6.vsh.128B
    2, // llvm.hexagon.V6.vshufeh
    2, // llvm.hexagon.V6.vshufeh.128B
    2, // llvm.hexagon.V6.vshuffb
    2, // llvm.hexagon.V6.vshuffb.128B
    2, // llvm.hexagon.V6.vshuffeb
    2, // llvm.hexagon.V6.vshuffeb.128B
    2, // llvm.hexagon.V6.vshuffh
    2, // llvm.hexagon.V6.vshuffh.128B
    2, // llvm.hexagon.V6.vshuffob
    2, // llvm.hexagon.V6.vshuffob.128B
    2, // llvm.hexagon.V6.vshuffvdd
    2, // llvm.hexagon.V6.vshuffvdd.128B
    2, // llvm.hexagon.V6.vshufoeb
    2, // llvm.hexagon.V6.vshufoeb.128B
    2, // llvm.hexagon.V6.vshufoeh
    2, // llvm.hexagon.V6.vshufoeh.128B
    2, // llvm.hexagon.V6.vshufoh
    2, // llvm.hexagon.V6.vshufoh.128B
    2, // llvm.hexagon.V6.vsub.hf
    2, // llvm.hexagon.V6.vsub.hf.128B
    2, // llvm.hexagon.V6.vsub.hf.hf
    2, // llvm.hexagon.V6.vsub.hf.hf.128B
    2, // llvm.hexagon.V6.vsub.qf16
    2, // llvm.hexagon.V6.vsub.qf16.128B
    2, // llvm.hexagon.V6.vsub.qf16.mix
    2, // llvm.hexagon.V6.vsub.qf16.mix.128B
    2, // llvm.hexagon.V6.vsub.qf32
    2, // llvm.hexagon.V6.vsub.qf32.128B
    2, // llvm.hexagon.V6.vsub.qf32.mix
    2, // llvm.hexagon.V6.vsub.qf32.mix.128B
    2, // llvm.hexagon.V6.vsub.sf
    2, // llvm.hexagon.V6.vsub.sf.128B
    2, // llvm.hexagon.V6.vsub.sf.bf
    2, // llvm.hexagon.V6.vsub.sf.bf.128B
    2, // llvm.hexagon.V6.vsub.sf.hf
    2, // llvm.hexagon.V6.vsub.sf.hf.128B
    2, // llvm.hexagon.V6.vsub.sf.sf
    2, // llvm.hexagon.V6.vsub.sf.sf.128B
    2, // llvm.hexagon.V6.vsubb
    2, // llvm.hexagon.V6.vsubb.128B
    2, // llvm.hexagon.V6.vsubb.dv
    2, // llvm.hexagon.V6.vsubb.dv.128B
    2, // llvm.hexagon.V6.vsubbnq
    2, // llvm.hexagon.V6.vsubbnq.128B
    2, // llvm.hexagon.V6.vsubbq
    2, // llvm.hexagon.V6.vsubbq.128B
    2, // llvm.hexagon.V6.vsubbsat
    2, // llvm.hexagon.V6.vsubbsat.128B
    2, // llvm.hexagon.V6.vsubbsat.dv
    2, // llvm.hexagon.V6.vsubbsat.dv.128B
    2, // llvm.hexagon.V6.vsubcarry
    2, // llvm.hexagon.V6.vsubcarry.128B
    2, // llvm.hexagon.V6.vsubcarryo
    2, // llvm.hexagon.V6.vsubcarryo.128B
    2, // llvm.hexagon.V6.vsubh
    2, // llvm.hexagon.V6.vsubh.128B
    2, // llvm.hexagon.V6.vsubh.dv
    2, // llvm.hexagon.V6.vsubh.dv.128B
    2, // llvm.hexagon.V6.vsubhnq
    2, // llvm.hexagon.V6.vsubhnq.128B
    2, // llvm.hexagon.V6.vsubhq
    2, // llvm.hexagon.V6.vsubhq.128B
    2, // llvm.hexagon.V6.vsubhsat
    2, // llvm.hexagon.V6.vsubhsat.128B
    2, // llvm.hexagon.V6.vsubhsat.dv
    2, // llvm.hexagon.V6.vsubhsat.dv.128B
    2, // llvm.hexagon.V6.vsubhw
    2, // llvm.hexagon.V6.vsubhw.128B
    2, // llvm.hexagon.V6.vsububh
    2, // llvm.hexagon.V6.vsububh.128B
    2, // llvm.hexagon.V6.vsububsat
    2, // llvm.hexagon.V6.vsububsat.128B
    2, // llvm.hexagon.V6.vsububsat.dv
    2, // llvm.hexagon.V6.vsububsat.dv.128B
    2, // llvm.hexagon.V6.vsubububb.sat
    2, // llvm.hexagon.V6.vsubububb.sat.128B
    2, // llvm.hexagon.V6.vsubuhsat
    2, // llvm.hexagon.V6.vsubuhsat.128B
    2, // llvm.hexagon.V6.vsubuhsat.dv
    2, // llvm.hexagon.V6.vsubuhsat.dv.128B
    2, // llvm.hexagon.V6.vsubuhw
    2, // llvm.hexagon.V6.vsubuhw.128B
    2, // llvm.hexagon.V6.vsubuwsat
    2, // llvm.hexagon.V6.vsubuwsat.128B
    2, // llvm.hexagon.V6.vsubuwsat.dv
    2, // llvm.hexagon.V6.vsubuwsat.dv.128B
    2, // llvm.hexagon.V6.vsubw
    2, // llvm.hexagon.V6.vsubw.128B
    2, // llvm.hexagon.V6.vsubw.dv
    2, // llvm.hexagon.V6.vsubw.dv.128B
    2, // llvm.hexagon.V6.vsubwnq
    2, // llvm.hexagon.V6.vsubwnq.128B
    2, // llvm.hexagon.V6.vsubwq
    2, // llvm.hexagon.V6.vsubwq.128B
    2, // llvm.hexagon.V6.vsubwsat
    2, // llvm.hexagon.V6.vsubwsat.128B
    2, // llvm.hexagon.V6.vsubwsat.dv
    2, // llvm.hexagon.V6.vsubwsat.dv.128B
    2, // llvm.hexagon.V6.vswap
    2, // llvm.hexagon.V6.vswap.128B
    2, // llvm.hexagon.V6.vtmpyb
    2, // llvm.hexagon.V6.vtmpyb.128B
    2, // llvm.hexagon.V6.vtmpyb.acc
    2, // llvm.hexagon.V6.vtmpyb.acc.128B
    2, // llvm.hexagon.V6.vtmpybus
    2, // llvm.hexagon.V6.vtmpybus.128B
    2, // llvm.hexagon.V6.vtmpybus.acc
    2, // llvm.hexagon.V6.vtmpybus.acc.128B
    2, // llvm.hexagon.V6.vtmpyhb
    2, // llvm.hexagon.V6.vtmpyhb.128B
    2, // llvm.hexagon.V6.vtmpyhb.acc
    2, // llvm.hexagon.V6.vtmpyhb.acc.128B
    2, // llvm.hexagon.V6.vunpackb
    2, // llvm.hexagon.V6.vunpackb.128B
    2, // llvm.hexagon.V6.vunpackh
    2, // llvm.hexagon.V6.vunpackh.128B
    2, // llvm.hexagon.V6.vunpackob
    2, // llvm.hexagon.V6.vunpackob.128B
    2, // llvm.hexagon.V6.vunpackoh
    2, // llvm.hexagon.V6.vunpackoh.128B
    2, // llvm.hexagon.V6.vunpackub
    2, // llvm.hexagon.V6.vunpackub.128B
    2, // llvm.hexagon.V6.vunpackuh
    2, // llvm.hexagon.V6.vunpackuh.128B
    2, // llvm.hexagon.V6.vxor
    2, // llvm.hexagon.V6.vxor.128B
    2, // llvm.hexagon.V6.vzb
    2, // llvm.hexagon.V6.vzb.128B
    2, // llvm.hexagon.V6.vzh
    2, // llvm.hexagon.V6.vzh.128B
    8, // llvm.hexagon.Y2.dccleana
    8, // llvm.hexagon.Y2.dccleaninva
    8, // llvm.hexagon.Y2.dcfetch
    8, // llvm.hexagon.Y2.dcinva
    8, // llvm.hexagon.Y2.dczeroa
    8, // llvm.hexagon.Y4.l2fetch
    8, // llvm.hexagon.Y5.l2fetch
    230, // llvm.hexagon.Y6.dmlink
    230, // llvm.hexagon.Y6.dmpause
    230, // llvm.hexagon.Y6.dmpoll
    230, // llvm.hexagon.Y6.dmresume
    230, // llvm.hexagon.Y6.dmstart
    230, // llvm.hexagon.Y6.dmwait
    231, // llvm.hexagon.circ.ldb
    231, // llvm.hexagon.circ.ldd
    231, // llvm.hexagon.circ.ldh
    231, // llvm.hexagon.circ.ldub
    231, // llvm.hexagon.circ.lduh
    231, // llvm.hexagon.circ.ldw
    232, // llvm.hexagon.circ.stb
    232, // llvm.hexagon.circ.std
    232, // llvm.hexagon.circ.sth
    232, // llvm.hexagon.circ.sthhi
    232, // llvm.hexagon.circ.stw
    4, // llvm.hexagon.instrprof.custom
    8, // llvm.hexagon.prefetch
    233, // llvm.hexagon.vmemcpy
    234, // llvm.hexagon.vmemset
    10, // llvm.loongarch.asrtgt.d
    10, // llvm.loongarch.asrtle.d
    220, // llvm.loongarch.break
    235, // llvm.loongarch.cacop.d
    235, // llvm.loongarch.cacop.w
    10, // llvm.loongarch.cpucfg
    10, // llvm.loongarch.crc.w.b.w
    10, // llvm.loongarch.crc.w.d.w
    10, // llvm.loongarch.crc.w.h.w
    10, // llvm.loongarch.crc.w.w.w
    10, // llvm.loongarch.crcc.w.b.w
    10, // llvm.loongarch.crcc.w.d.w
    10, // llvm.loongarch.crcc.w.h.w
    10, // llvm.loongarch.crcc.w.w.w
    220, // llvm.loongarch.csrrd.d
    220, // llvm.loongarch.csrrd.w
    6, // llvm.loongarch.csrwr.d
    6, // llvm.loongarch.csrwr.w
    35, // llvm.loongarch.csrxchg.d
    35, // llvm.loongarch.csrxchg.w
    220, // llvm.loongarch.dbar
    220, // llvm.loongarch.ibar
    10, // llvm.loongarch.iocsrrd.b
    10, // llvm.loongarch.iocsrrd.d
    10, // llvm.loongarch.iocsrrd.h
    10, // llvm.loongarch.iocsrrd.w
    10, // llvm.loongarch.iocsrwr.b
    10, // llvm.loongarch.iocsrwr.d
    10, // llvm.loongarch.iocsrwr.h
    10, // llvm.loongarch.iocsrwr.w
    6, // llvm.loongarch.lddir.d
    6, // llvm.loongarch.ldpte.d
    236, // llvm.loongarch.masked.atomicrmw.add.i32
    236, // llvm.loongarch.masked.atomicrmw.add.i64
    237, // llvm.loongarch.masked.atomicrmw.max.i64
    237, // llvm.loongarch.masked.atomicrmw.min.i64
    236, // llvm.loongarch.masked.atomicrmw.nand.i32
    236, // llvm.loongarch.masked.atomicrmw.nand.i64
    236, // llvm.loongarch.masked.atomicrmw.sub.i32
    236, // llvm.loongarch.masked.atomicrmw.sub.i64
    236, // llvm.loongarch.masked.atomicrmw.umax.i32
    236, // llvm.loongarch.masked.atomicrmw.umax.i64
    236, // llvm.loongarch.masked.atomicrmw.umin.i32
    236, // llvm.loongarch.masked.atomicrmw.umin.i64
    236, // llvm.loongarch.masked.atomicrmw.xchg.i32
    236, // llvm.loongarch.masked.atomicrmw.xchg.i64
    237, // llvm.loongarch.masked.cmpxchg.i64
    220, // llvm.loongarch.movfcsr2gr
    220, // llvm.loongarch.movgr2fcsr
    220, // llvm.loongarch.syscall
    10, // llvm.mips.absq.s.ph
    10, // llvm.mips.absq.s.qb
    10, // llvm.mips.absq.s.w
    12, // llvm.mips.add.a.b
    12, // llvm.mips.add.a.d
    12, // llvm.mips.add.a.h
    12, // llvm.mips.add.a.w
    12, // llvm.mips.addq.ph
    12, // llvm.mips.addq.s.ph
    10, // llvm.mips.addq.s.w
    12, // llvm.mips.addqh.ph
    12, // llvm.mips.addqh.r.ph
    12, // llvm.mips.addqh.r.w
    12, // llvm.mips.addqh.w
    12, // llvm.mips.adds.a.b
    12, // llvm.mips.adds.a.d
    12, // llvm.mips.adds.a.h
    12, // llvm.mips.adds.a.w
    12, // llvm.mips.adds.s.b
    12, // llvm.mips.adds.s.d
    12, // llvm.mips.adds.s.h
    12, // llvm.mips.adds.s.w
    12, // llvm.mips.adds.u.b
    12, // llvm.mips.adds.u.d
    12, // llvm.mips.adds.u.h
    12, // llvm.mips.adds.u.w
    10, // llvm.mips.addsc
    10, // llvm.mips.addu.ph
    12, // llvm.mips.addu.qb
    10, // llvm.mips.addu.s.ph
    12, // llvm.mips.addu.s.qb
    12, // llvm.mips.adduh.qb
    12, // llvm.mips.adduh.r.qb
    12, // llvm.mips.addv.b
    12, // llvm.mips.addv.d
    12, // llvm.mips.addv.h
    12, // llvm.mips.addv.w
    60, // llvm.mips.addvi.b
    60, // llvm.mips.addvi.d
    60, // llvm.mips.addvi.h
    60, // llvm.mips.addvi.w
    10, // llvm.mips.addwc
    12, // llvm.mips.and.v
    60, // llvm.mips.andi.b
    238, // llvm.mips.append
    12, // llvm.mips.asub.s.b
    12, // llvm.mips.asub.s.d
    12, // llvm.mips.asub.s.h
    12, // llvm.mips.asub.s.w
    12, // llvm.mips.asub.u.b
    12, // llvm.mips.asub.u.d
    12, // llvm.mips.asub.u.h
    12, // llvm.mips.asub.u.w
    12, // llvm.mips.ave.s.b
    12, // llvm.mips.ave.s.d
    12, // llvm.mips.ave.s.h
    12, // llvm.mips.ave.s.w
    12, // llvm.mips.ave.u.b
    12, // llvm.mips.ave.u.d
    12, // llvm.mips.ave.u.h
    12, // llvm.mips.ave.u.w
    12, // llvm.mips.aver.s.b
    12, // llvm.mips.aver.s.d
    12, // llvm.mips.aver.s.h
    12, // llvm.mips.aver.s.w
    12, // llvm.mips.aver.u.b
    12, // llvm.mips.aver.u.d
    12, // llvm.mips.aver.u.h
    12, // llvm.mips.aver.u.w
    238, // llvm.mips.balign
    12, // llvm.mips.bclr.b
    12, // llvm.mips.bclr.d
    12, // llvm.mips.bclr.h
    12, // llvm.mips.bclr.w
    60, // llvm.mips.bclri.b
    60, // llvm.mips.bclri.d
    60, // llvm.mips.bclri.h
    60, // llvm.mips.bclri.w
    12, // llvm.mips.binsl.b
    12, // llvm.mips.binsl.d
    12, // llvm.mips.binsl.h
    12, // llvm.mips.binsl.w
    238, // llvm.mips.binsli.b
    238, // llvm.mips.binsli.d
    238, // llvm.mips.binsli.h
    238, // llvm.mips.binsli.w
    12, // llvm.mips.binsr.b
    12, // llvm.mips.binsr.d
    12, // llvm.mips.binsr.h
    12, // llvm.mips.binsr.w
    238, // llvm.mips.binsri.b
    238, // llvm.mips.binsri.d
    238, // llvm.mips.binsri.h
    238, // llvm.mips.binsri.w
    12, // llvm.mips.bitrev
    12, // llvm.mips.bmnz.v
    238, // llvm.mips.bmnzi.b
    12, // llvm.mips.bmz.v
    238, // llvm.mips.bmzi.b
    12, // llvm.mips.bneg.b
    12, // llvm.mips.bneg.d
    12, // llvm.mips.bneg.h
    12, // llvm.mips.bneg.w
    60, // llvm.mips.bnegi.b
    60, // llvm.mips.bnegi.d
    60, // llvm.mips.bnegi.h
    60, // llvm.mips.bnegi.w
    12, // llvm.mips.bnz.b
    12, // llvm.mips.bnz.d
    12, // llvm.mips.bnz.h
    12, // llvm.mips.bnz.v
    12, // llvm.mips.bnz.w
    221, // llvm.mips.bposge32
    12, // llvm.mips.bsel.v
    238, // llvm.mips.bseli.b
    12, // llvm.mips.bset.b
    12, // llvm.mips.bset.d
    12, // llvm.mips.bset.h
    12, // llvm.mips.bset.w
    60, // llvm.mips.bseti.b
    60, // llvm.mips.bseti.d
    60, // llvm.mips.bseti.h
    60, // llvm.mips.bseti.w
    12, // llvm.mips.bz.b
    12, // llvm.mips.bz.d
    12, // llvm.mips.bz.h
    12, // llvm.mips.bz.v
    12, // llvm.mips.bz.w
    12, // llvm.mips.ceq.b
    12, // llvm.mips.ceq.d
    12, // llvm.mips.ceq.h
    12, // llvm.mips.ceq.w
    60, // llvm.mips.ceqi.b
    60, // llvm.mips.ceqi.d
    60, // llvm.mips.ceqi.h
    60, // llvm.mips.ceqi.w
    220, // llvm.mips.cfcmsa
    12, // llvm.mips.cle.s.b
    12, // llvm.mips.cle.s.d
    12, // llvm.mips.cle.s.h
    12, // llvm.mips.cle.s.w
    12, // llvm.mips.cle.u.b
    12, // llvm.mips.cle.u.d
    12, // llvm.mips.cle.u.h
    12, // llvm.mips.cle.u.w
    60, // llvm.mips.clei.s.b
    60, // llvm.mips.clei.s.d
    60, // llvm.mips.clei.s.h
    60, // llvm.mips.clei.s.w
    60, // llvm.mips.clei.u.b
    60, // llvm.mips.clei.u.d
    60, // llvm.mips.clei.u.h
    60, // llvm.mips.clei.u.w
    12, // llvm.mips.clt.s.b
    12, // llvm.mips.clt.s.d
    12, // llvm.mips.clt.s.h
    12, // llvm.mips.clt.s.w
    12, // llvm.mips.clt.u.b
    12, // llvm.mips.clt.u.d
    12, // llvm.mips.clt.u.h
    12, // llvm.mips.clt.u.w
    60, // llvm.mips.clti.s.b
    60, // llvm.mips.clti.s.d
    60, // llvm.mips.clti.s.h
    60, // llvm.mips.clti.s.w
    60, // llvm.mips.clti.u.b
    60, // llvm.mips.clti.u.d
    60, // llvm.mips.clti.u.h
    60, // llvm.mips.clti.u.w
    10, // llvm.mips.cmp.eq.ph
    10, // llvm.mips.cmp.le.ph
    10, // llvm.mips.cmp.lt.ph
    10, // llvm.mips.cmpgdu.eq.qb
    10, // llvm.mips.cmpgdu.le.qb
    10, // llvm.mips.cmpgdu.lt.qb
    10, // llvm.mips.cmpgu.eq.qb
    10, // llvm.mips.cmpgu.le.qb
    10, // llvm.mips.cmpgu.lt.qb
    10, // llvm.mips.cmpu.eq.qb
    10, // llvm.mips.cmpu.le.qb
    10, // llvm.mips.cmpu.lt.qb
    12, // llvm.mips.copy.s.b
    12, // llvm.mips.copy.s.d
    12, // llvm.mips.copy.s.h
    12, // llvm.mips.copy.s.w
    12, // llvm.mips.copy.u.b
    12, // llvm.mips.copy.u.d
    12, // llvm.mips.copy.u.h
    12, // llvm.mips.copy.u.w
    220, // llvm.mips.ctcmsa
    12, // llvm.mips.div.s.b
    12, // llvm.mips.div.s.d
    12, // llvm.mips.div.s.h
    12, // llvm.mips.div.s.w
    12, // llvm.mips.div.u.b
    12, // llvm.mips.div.u.d
    12, // llvm.mips.div.u.h
    12, // llvm.mips.div.u.w
    12, // llvm.mips.dlsa
    12, // llvm.mips.dotp.s.d
    12, // llvm.mips.dotp.s.h
    12, // llvm.mips.dotp.s.w
    12, // llvm.mips.dotp.u.d
    12, // llvm.mips.dotp.u.h
    12, // llvm.mips.dotp.u.w
    12, // llvm.mips.dpa.w.ph
    12, // llvm.mips.dpadd.s.d
    12, // llvm.mips.dpadd.s.h
    12, // llvm.mips.dpadd.s.w
    12, // llvm.mips.dpadd.u.d
    12, // llvm.mips.dpadd.u.h
    12, // llvm.mips.dpadd.u.w
    10, // llvm.mips.dpaq.s.w.ph
    10, // llvm.mips.dpaq.sa.l.w
    10, // llvm.mips.dpaqx.s.w.ph
    10, // llvm.mips.dpaqx.sa.w.ph
    12, // llvm.mips.dpau.h.qbl
    12, // llvm.mips.dpau.h.qbr
    12, // llvm.mips.dpax.w.ph
    12, // llvm.mips.dps.w.ph
    10, // llvm.mips.dpsq.s.w.ph
    10, // llvm.mips.dpsq.sa.l.w
    10, // llvm.mips.dpsqx.s.w.ph
    10, // llvm.mips.dpsqx.sa.w.ph
    12, // llvm.mips.dpsu.h.qbl
    12, // llvm.mips.dpsu.h.qbr
    12, // llvm.mips.dpsub.s.d
    12, // llvm.mips.dpsub.s.h
    12, // llvm.mips.dpsub.s.w
    12, // llvm.mips.dpsub.u.d
    12, // llvm.mips.dpsub.u.h
    12, // llvm.mips.dpsub.u.w
    12, // llvm.mips.dpsx.w.ph
    10, // llvm.mips.extp
    10, // llvm.mips.extpdp
    10, // llvm.mips.extr.r.w
    10, // llvm.mips.extr.rs.w
    10, // llvm.mips.extr.s.h
    10, // llvm.mips.extr.w
    12, // llvm.mips.fadd.d
    12, // llvm.mips.fadd.w
    12, // llvm.mips.fcaf.d
    12, // llvm.mips.fcaf.w
    12, // llvm.mips.fceq.d
    12, // llvm.mips.fceq.w
    12, // llvm.mips.fclass.d
    12, // llvm.mips.fclass.w
    12, // llvm.mips.fcle.d
    12, // llvm.mips.fcle.w
    12, // llvm.mips.fclt.d
    12, // llvm.mips.fclt.w
    12, // llvm.mips.fcne.d
    12, // llvm.mips.fcne.w
    12, // llvm.mips.fcor.d
    12, // llvm.mips.fcor.w
    12, // llvm.mips.fcueq.d
    12, // llvm.mips.fcueq.w
    12, // llvm.mips.fcule.d
    12, // llvm.mips.fcule.w
    12, // llvm.mips.fcult.d
    12, // llvm.mips.fcult.w
    12, // llvm.mips.fcun.d
    12, // llvm.mips.fcun.w
    12, // llvm.mips.fcune.d
    12, // llvm.mips.fcune.w
    12, // llvm.mips.fdiv.d
    12, // llvm.mips.fdiv.w
    12, // llvm.mips.fexdo.h
    12, // llvm.mips.fexdo.w
    12, // llvm.mips.fexp2.d
    12, // llvm.mips.fexp2.w
    12, // llvm.mips.fexupl.d
    12, // llvm.mips.fexupl.w
    12, // llvm.mips.fexupr.d
    12, // llvm.mips.fexupr.w
    12, // llvm.mips.ffint.s.d
    12, // llvm.mips.ffint.s.w
    12, // llvm.mips.ffint.u.d
    12, // llvm.mips.ffint.u.w
    12, // llvm.mips.ffql.d
    12, // llvm.mips.ffql.w
    12, // llvm.mips.ffqr.d
    12, // llvm.mips.ffqr.w
    12, // llvm.mips.fill.b
    12, // llvm.mips.fill.d
    12, // llvm.mips.fill.h
    12, // llvm.mips.fill.w
    12, // llvm.mips.flog2.d
    12, // llvm.mips.flog2.w
    12, // llvm.mips.fmadd.d
    12, // llvm.mips.fmadd.w
    12, // llvm.mips.fmax.a.d
    12, // llvm.mips.fmax.a.w
    12, // llvm.mips.fmax.d
    12, // llvm.mips.fmax.w
    12, // llvm.mips.fmin.a.d
    12, // llvm.mips.fmin.a.w
    12, // llvm.mips.fmin.d
    12, // llvm.mips.fmin.w
    12, // llvm.mips.fmsub.d
    12, // llvm.mips.fmsub.w
    12, // llvm.mips.fmul.d
    12, // llvm.mips.fmul.w
    12, // llvm.mips.frcp.d
    12, // llvm.mips.frcp.w
    12, // llvm.mips.frint.d
    12, // llvm.mips.frint.w
    12, // llvm.mips.frsqrt.d
    12, // llvm.mips.frsqrt.w
    12, // llvm.mips.fsaf.d
    12, // llvm.mips.fsaf.w
    12, // llvm.mips.fseq.d
    12, // llvm.mips.fseq.w
    12, // llvm.mips.fsle.d
    12, // llvm.mips.fsle.w
    12, // llvm.mips.fslt.d
    12, // llvm.mips.fslt.w
    12, // llvm.mips.fsne.d
    12, // llvm.mips.fsne.w
    12, // llvm.mips.fsor.d
    12, // llvm.mips.fsor.w
    12, // llvm.mips.fsqrt.d
    12, // llvm.mips.fsqrt.w
    12, // llvm.mips.fsub.d
    12, // llvm.mips.fsub.w
    12, // llvm.mips.fsueq.d
    12, // llvm.mips.fsueq.w
    12, // llvm.mips.fsule.d
    12, // llvm.mips.fsule.w
    12, // llvm.mips.fsult.d
    12, // llvm.mips.fsult.w
    12, // llvm.mips.fsun.d
    12, // llvm.mips.fsun.w
    12, // llvm.mips.fsune.d
    12, // llvm.mips.fsune.w
    12, // llvm.mips.ftint.s.d
    12, // llvm.mips.ftint.s.w
    12, // llvm.mips.ftint.u.d
    12, // llvm.mips.ftint.u.w
    12, // llvm.mips.ftq.h
    12, // llvm.mips.ftq.w
    12, // llvm.mips.ftrunc.s.d
    12, // llvm.mips.ftrunc.s.w
    12, // llvm.mips.ftrunc.u.d
    12, // llvm.mips.ftrunc.u.w
    12, // llvm.mips.hadd.s.d
    12, // llvm.mips.hadd.s.h
    12, // llvm.mips.hadd.s.w
    12, // llvm.mips.hadd.u.d
    12, // llvm.mips.hadd.u.h
    12, // llvm.mips.hadd.u.w
    12, // llvm.mips.hsub.s.d
    12, // llvm.mips.hsub.s.h
    12, // llvm.mips.hsub.s.w
    12, // llvm.mips.hsub.u.d
    12, // llvm.mips.hsub.u.h
    12, // llvm.mips.hsub.u.w
    12, // llvm.mips.ilvev.b
    12, // llvm.mips.ilvev.d
    12, // llvm.mips.ilvev.h
    12, // llvm.mips.ilvev.w
    12, // llvm.mips.ilvl.b
    12, // llvm.mips.ilvl.d
    12, // llvm.mips.ilvl.h
    12, // llvm.mips.ilvl.w
    12, // llvm.mips.ilvod.b
    12, // llvm.mips.ilvod.d
    12, // llvm.mips.ilvod.h
    12, // llvm.mips.ilvod.w
    12, // llvm.mips.ilvr.b
    12, // llvm.mips.ilvr.d
    12, // llvm.mips.ilvr.h
    12, // llvm.mips.ilvr.w
    12, // llvm.mips.insert.b
    12, // llvm.mips.insert.d
    12, // llvm.mips.insert.h
    12, // llvm.mips.insert.w
    221, // llvm.mips.insv
    60, // llvm.mips.insve.b
    60, // llvm.mips.insve.d
    60, // llvm.mips.insve.h
    60, // llvm.mips.insve.w
    33, // llvm.mips.lbux
    33, // llvm.mips.ld.b
    33, // llvm.mips.ld.d
    33, // llvm.mips.ld.h
    33, // llvm.mips.ld.w
    20, // llvm.mips.ldi.b
    20, // llvm.mips.ldi.d
    20, // llvm.mips.ldi.h
    20, // llvm.mips.ldi.w
    33, // llvm.mips.ldr.d
    33, // llvm.mips.ldr.w
    33, // llvm.mips.lhx
    12, // llvm.mips.lsa
    33, // llvm.mips.lwx
    12, // llvm.mips.madd
    12, // llvm.mips.madd.q.h
    12, // llvm.mips.madd.q.w
    12, // llvm.mips.maddr.q.h
    12, // llvm.mips.maddr.q.w
    12, // llvm.mips.maddu
    12, // llvm.mips.maddv.b
    12, // llvm.mips.maddv.d
    12, // llvm.mips.maddv.h
    12, // llvm.mips.maddv.w
    10, // llvm.mips.maq.s.w.phl
    10, // llvm.mips.maq.s.w.phr
    10, // llvm.mips.maq.sa.w.phl
    10, // llvm.mips.maq.sa.w.phr
    12, // llvm.mips.max.a.b
    12, // llvm.mips.max.a.d
    12, // llvm.mips.max.a.h
    12, // llvm.mips.max.a.w
    12, // llvm.mips.max.s.b
    12, // llvm.mips.max.s.d
    12, // llvm.mips.max.s.h
    12, // llvm.mips.max.s.w
    12, // llvm.mips.max.u.b
    12, // llvm.mips.max.u.d
    12, // llvm.mips.max.u.h
    12, // llvm.mips.max.u.w
    60, // llvm.mips.maxi.s.b
    60, // llvm.mips.maxi.s.d
    60, // llvm.mips.maxi.s.h
    60, // llvm.mips.maxi.s.w
    60, // llvm.mips.maxi.u.b
    60, // llvm.mips.maxi.u.d
    60, // llvm.mips.maxi.u.h
    60, // llvm.mips.maxi.u.w
    12, // llvm.mips.min.a.b
    12, // llvm.mips.min.a.d
    12, // llvm.mips.min.a.h
    12, // llvm.mips.min.a.w
    12, // llvm.mips.min.s.b
    12, // llvm.mips.min.s.d
    12, // llvm.mips.min.s.h
    12, // llvm.mips.min.s.w
    12, // llvm.mips.min.u.b
    12, // llvm.mips.min.u.d
    12, // llvm.mips.min.u.h
    12, // llvm.mips.min.u.w
    60, // llvm.mips.mini.s.b
    60, // llvm.mips.mini.s.d
    60, // llvm.mips.mini.s.h
    60, // llvm.mips.mini.s.w
    60, // llvm.mips.mini.u.b
    60, // llvm.mips.mini.u.d
    60, // llvm.mips.mini.u.h
    60, // llvm.mips.mini.u.w
    12, // llvm.mips.mod.s.b
    12, // llvm.mips.mod.s.d
    12, // llvm.mips.mod.s.h
    12, // llvm.mips.mod.s.w
    12, // llvm.mips.mod.u.b
    12, // llvm.mips.mod.u.d
    12, // llvm.mips.mod.u.h
    12, // llvm.mips.mod.u.w
    12, // llvm.mips.modsub
    12, // llvm.mips.move.v
    12, // llvm.mips.msub
    12, // llvm.mips.msub.q.h
    12, // llvm.mips.msub.q.w
    12, // llvm.mips.msubr.q.h
    12, // llvm.mips.msubr.q.w
    12, // llvm.mips.msubu
    12, // llvm.mips.msubv.b
    12, // llvm.mips.msubv.d
    12, // llvm.mips.msubv.h
    12, // llvm.mips.msubv.w
    10, // llvm.mips.mthlip
    10, // llvm.mips.mul.ph
    12, // llvm.mips.mul.q.h
    12, // llvm.mips.mul.q.w
    10, // llvm.mips.mul.s.ph
    10, // llvm.mips.muleq.s.w.phl
    10, // llvm.mips.muleq.s.w.phr
    10, // llvm.mips.muleu.s.ph.qbl
    10, // llvm.mips.muleu.s.ph.qbr
    10, // llvm.mips.mulq.rs.ph
    10, // llvm.mips.mulq.rs.w
    10, // llvm.mips.mulq.s.ph
    10, // llvm.mips.mulq.s.w
    12, // llvm.mips.mulr.q.h
    12, // llvm.mips.mulr.q.w
    12, // llvm.mips.mulsa.w.ph
    10, // llvm.mips.mulsaq.s.w.ph
    12, // llvm.mips.mult
    12, // llvm.mips.multu
    12, // llvm.mips.mulv.b
    12, // llvm.mips.mulv.d
    12, // llvm.mips.mulv.h
    12, // llvm.mips.mulv.w
    12, // llvm.mips.nloc.b
    12, // llvm.mips.nloc.d
    12, // llvm.mips.nloc.h
    12, // llvm.mips.nloc.w
    12, // llvm.mips.nlzc.b
    12, // llvm.mips.nlzc.d
    12, // llvm.mips.nlzc.h
    12, // llvm.mips.nlzc.w
    12, // llvm.mips.nor.v
    60, // llvm.mips.nori.b
    12, // llvm.mips.or.v
    60, // llvm.mips.ori.b
    12, // llvm.mips.packrl.ph
    12, // llvm.mips.pckev.b
    12, // llvm.mips.pckev.d
    12, // llvm.mips.pckev.h
    12, // llvm.mips.pckev.w
    12, // llvm.mips.pckod.b
    12, // llvm.mips.pckod.d
    12, // llvm.mips.pckod.h
    12, // llvm.mips.pckod.w
    12, // llvm.mips.pcnt.b
    12, // llvm.mips.pcnt.d
    12, // llvm.mips.pcnt.h
    12, // llvm.mips.pcnt.w
    221, // llvm.mips.pick.ph
    221, // llvm.mips.pick.qb
    12, // llvm.mips.preceq.w.phl
    12, // llvm.mips.preceq.w.phr
    12, // llvm.mips.precequ.ph.qbl
    12, // llvm.mips.precequ.ph.qbla
    12, // llvm.mips.precequ.ph.qbr
    12, // llvm.mips.precequ.ph.qbra
    12, // llvm.mips.preceu.ph.qbl
    12, // llvm.mips.preceu.ph.qbla
    12, // llvm.mips.preceu.ph.qbr
    12, // llvm.mips.preceu.ph.qbra
    10, // llvm.mips.precr.qb.ph
    238, // llvm.mips.precr.sra.ph.w
    238, // llvm.mips.precr.sra.r.ph.w
    12, // llvm.mips.precrq.ph.w
    12, // llvm.mips.precrq.qb.ph
    10, // llvm.mips.precrq.rs.ph.w
    10, // llvm.mips.precrqu.s.qb.ph
    238, // llvm.mips.prepend
    12, // llvm.mips.raddu.w.qb
    239, // llvm.mips.rddsp
    12, // llvm.mips.repl.ph
    12, // llvm.mips.repl.qb
    60, // llvm.mips.sat.s.b
    60, // llvm.mips.sat.s.d
    60, // llvm.mips.sat.s.h
    60, // llvm.mips.sat.s.w
    60, // llvm.mips.sat.u.b
    60, // llvm.mips.sat.u.d
    60, // llvm.mips.sat.u.h
    60, // llvm.mips.sat.u.w
    60, // llvm.mips.shf.b
    60, // llvm.mips.shf.h
    60, // llvm.mips.shf.w
    12, // llvm.mips.shilo
    10, // llvm.mips.shll.ph
    10, // llvm.mips.shll.qb
    10, // llvm.mips.shll.s.ph
    10, // llvm.mips.shll.s.w
    12, // llvm.mips.shra.ph
    12, // llvm.mips.shra.qb
    12, // llvm.mips.shra.r.ph
    12, // llvm.mips.shra.r.qb
    12, // llvm.mips.shra.r.w
    12, // llvm.mips.shrl.ph
    12, // llvm.mips.shrl.qb
    12, // llvm.mips.sld.b
    12, // llvm.mips.sld.d
    12, // llvm.mips.sld.h
    12, // llvm.mips.sld.w
    238, // llvm.mips.sldi.b
    238, // llvm.mips.sldi.d
    238, // llvm.mips.sldi.h
    238, // llvm.mips.sldi.w
    12, // llvm.mips.sll.b
    12, // llvm.mips.sll.d
    12, // llvm.mips.sll.h
    12, // llvm.mips.sll.w
    60, // llvm.mips.slli.b
    60, // llvm.mips.slli.d
    60, // llvm.mips.slli.h
    60, // llvm.mips.slli.w
    12, // llvm.mips.splat.b
    12, // llvm.mips.splat.d
    12, // llvm.mips.splat.h
    12, // llvm.mips.splat.w
    60, // llvm.mips.splati.b
    60, // llvm.mips.splati.d
    60, // llvm.mips.splati.h
    60, // llvm.mips.splati.w
    12, // llvm.mips.sra.b
    12, // llvm.mips.sra.d
    12, // llvm.mips.sra.h
    12, // llvm.mips.sra.w
    60, // llvm.mips.srai.b
    60, // llvm.mips.srai.d
    60, // llvm.mips.srai.h
    60, // llvm.mips.srai.w
    12, // llvm.mips.srar.b
    12, // llvm.mips.srar.d
    12, // llvm.mips.srar.h
    12, // llvm.mips.srar.w
    60, // llvm.mips.srari.b
    60, // llvm.mips.srari.d
    60, // llvm.mips.srari.h
    60, // llvm.mips.srari.w
    12, // llvm.mips.srl.b
    12, // llvm.mips.srl.d
    12, // llvm.mips.srl.h
    12, // llvm.mips.srl.w
    60, // llvm.mips.srli.b
    60, // llvm.mips.srli.d
    60, // llvm.mips.srli.h
    60, // llvm.mips.srli.w
    12, // llvm.mips.srlr.b
    12, // llvm.mips.srlr.d
    12, // llvm.mips.srlr.h
    12, // llvm.mips.srlr.w
    60, // llvm.mips.srlri.b
    60, // llvm.mips.srlri.d
    60, // llvm.mips.srlri.h
    60, // llvm.mips.srlri.w
    240, // llvm.mips.st.b
    240, // llvm.mips.st.d
    240, // llvm.mips.st.h
    240, // llvm.mips.st.w
    240, // llvm.mips.str.d
    240, // llvm.mips.str.w
    12, // llvm.mips.subq.ph
    12, // llvm.mips.subq.s.ph
    10, // llvm.mips.subq.s.w
    12, // llvm.mips.subqh.ph
    12, // llvm.mips.subqh.r.ph
    12, // llvm.mips.subqh.r.w
    12, // llvm.mips.subqh.w
    12, // llvm.mips.subs.s.b
    12, // llvm.mips.subs.s.d
    12, // llvm.mips.subs.s.h
    12, // llvm.mips.subs.s.w
    12, // llvm.mips.subs.u.b
    12, // llvm.mips.subs.u.d
    12, // llvm.mips.subs.u.h
    12, // llvm.mips.subs.u.w
    12, // llvm.mips.subsus.u.b
    12, // llvm.mips.subsus.u.d
    12, // llvm.mips.subsus.u.h
    12, // llvm.mips.subsus.u.w
    12, // llvm.mips.subsuu.s.b
    12, // llvm.mips.subsuu.s.d
    12, // llvm.mips.subsuu.s.h
    12, // llvm.mips.subsuu.s.w
    10, // llvm.mips.subu.ph
    12, // llvm.mips.subu.qb
    10, // llvm.mips.subu.s.ph
    12, // llvm.mips.subu.s.qb
    12, // llvm.mips.subuh.qb
    12, // llvm.mips.subuh.r.qb
    12, // llvm.mips.subv.b
    12, // llvm.mips.subv.d
    12, // llvm.mips.subv.h
    12, // llvm.mips.subv.w
    60, // llvm.mips.subvi.b
    60, // llvm.mips.subvi.d
    60, // llvm.mips.subvi.h
    60, // llvm.mips.subvi.w
    12, // llvm.mips.vshf.b
    12, // llvm.mips.vshf.d
    12, // llvm.mips.vshf.h
    12, // llvm.mips.vshf.w
    6, // llvm.mips.wrdsp
    12, // llvm.mips.xor.v
    60, // llvm.mips.xori.b
    2, // llvm.nvvm.abs.bf16
    2, // llvm.nvvm.abs.bf16x2
    5, // llvm.nvvm.add.rm.d
    5, // llvm.nvvm.add.rm.f
    5, // llvm.nvvm.add.rm.ftz.f
    5, // llvm.nvvm.add.rn.d
    5, // llvm.nvvm.add.rn.f
    5, // llvm.nvvm.add.rn.ftz.f
    5, // llvm.nvvm.add.rp.d
    5, // llvm.nvvm.add.rp.f
    5, // llvm.nvvm.add.rp.ftz.f
    5, // llvm.nvvm.add.rz.d
    5, // llvm.nvvm.add.rz.f
    5, // llvm.nvvm.add.rz.ftz.f
    241, // llvm.nvvm.atomic.add.gen.f.cta
    241, // llvm.nvvm.atomic.add.gen.f.sys
    241, // llvm.nvvm.atomic.add.gen.i.cta
    241, // llvm.nvvm.atomic.add.gen.i.sys
    241, // llvm.nvvm.atomic.and.gen.i.cta
    241, // llvm.nvvm.atomic.and.gen.i.sys
    241, // llvm.nvvm.atomic.cas.gen.i.cta
    241, // llvm.nvvm.atomic.cas.gen.i.sys
    241, // llvm.nvvm.atomic.dec.gen.i.cta
    241, // llvm.nvvm.atomic.dec.gen.i.sys
    241, // llvm.nvvm.atomic.exch.gen.i.cta
    241, // llvm.nvvm.atomic.exch.gen.i.sys
    241, // llvm.nvvm.atomic.inc.gen.i.cta
    241, // llvm.nvvm.atomic.inc.gen.i.sys
    241, // llvm.nvvm.atomic.load.dec.32
    241, // llvm.nvvm.atomic.load.inc.32
    241, // llvm.nvvm.atomic.max.gen.i.cta
    241, // llvm.nvvm.atomic.max.gen.i.sys
    241, // llvm.nvvm.atomic.min.gen.i.cta
    241, // llvm.nvvm.atomic.min.gen.i.sys
    241, // llvm.nvvm.atomic.or.gen.i.cta
    241, // llvm.nvvm.atomic.or.gen.i.sys
    241, // llvm.nvvm.atomic.xor.gen.i.cta
    241, // llvm.nvvm.atomic.xor.gen.i.sys
    242, // llvm.nvvm.bar.sync
    242, // llvm.nvvm.bar.warp.sync
    242, // llvm.nvvm.barrier
    242, // llvm.nvvm.barrier.cluster.arrive
    242, // llvm.nvvm.barrier.cluster.arrive.relaxed
    242, // llvm.nvvm.barrier.cluster.wait
    242, // llvm.nvvm.barrier.n
    242, // llvm.nvvm.barrier.sync
    242, // llvm.nvvm.barrier.sync.cnt
    242, // llvm.nvvm.barrier0
    242, // llvm.nvvm.barrier0.and
    242, // llvm.nvvm.barrier0.or
    242, // llvm.nvvm.barrier0.popc
    5, // llvm.nvvm.bf2h.rn
    5, // llvm.nvvm.bf2h.rn.ftz
    5, // llvm.nvvm.bitcast.d2ll
    5, // llvm.nvvm.bitcast.f2i
    5, // llvm.nvvm.bitcast.i2f
    5, // llvm.nvvm.bitcast.ll2d
    5, // llvm.nvvm.ceil.d
    5, // llvm.nvvm.ceil.f
    5, // llvm.nvvm.ceil.ftz.f
    10, // llvm.nvvm.compiler.error
    10, // llvm.nvvm.compiler.warn
    2, // llvm.nvvm.cos.approx.f
    2, // llvm.nvvm.cos.approx.ftz.f
    243, // llvm.nvvm.cp.async.ca.shared.global.16
    243, // llvm.nvvm.cp.async.ca.shared.global.16.s
    243, // llvm.nvvm.cp.async.ca.shared.global.4
    243, // llvm.nvvm.cp.async.ca.shared.global.4.s
    243, // llvm.nvvm.cp.async.ca.shared.global.8
    243, // llvm.nvvm.cp.async.ca.shared.global.8.s
    243, // llvm.nvvm.cp.async.cg.shared.global.16
    243, // llvm.nvvm.cp.async.cg.shared.global.16.s
    10, // llvm.nvvm.cp.async.commit.group
    242, // llvm.nvvm.cp.async.mbarrier.arrive
    242, // llvm.nvvm.cp.async.mbarrier.arrive.noinc
    242, // llvm.nvvm.cp.async.mbarrier.arrive.noinc.shared
    242, // llvm.nvvm.cp.async.mbarrier.arrive.shared
    10, // llvm.nvvm.cp.async.wait.all
    220, // llvm.nvvm.cp.async.wait.group
    5, // llvm.nvvm.d2f.rm
    5, // llvm.nvvm.d2f.rm.ftz
    5, // llvm.nvvm.d2f.rn
    5, // llvm.nvvm.d2f.rn.ftz
    5, // llvm.nvvm.d2f.rp
    5, // llvm.nvvm.d2f.rp.ftz
    5, // llvm.nvvm.d2f.rz
    5, // llvm.nvvm.d2f.rz.ftz
    5, // llvm.nvvm.d2i.hi
    5, // llvm.nvvm.d2i.lo
    5, // llvm.nvvm.d2i.rm
    5, // llvm.nvvm.d2i.rn
    5, // llvm.nvvm.d2i.rp
    5, // llvm.nvvm.d2i.rz
    5, // llvm.nvvm.d2ll.rm
    5, // llvm.nvvm.d2ll.rn
    5, // llvm.nvvm.d2ll.rp
    5, // llvm.nvvm.d2ll.rz
    5, // llvm.nvvm.d2ui.rm
    5, // llvm.nvvm.d2ui.rn
    5, // llvm.nvvm.d2ui.rp
    5, // llvm.nvvm.d2ui.rz
    5, // llvm.nvvm.d2ull.rm
    5, // llvm.nvvm.d2ull.rn
    5, // llvm.nvvm.d2ull.rp
    5, // llvm.nvvm.d2ull.rz
    2, // llvm.nvvm.div.approx.f
    2, // llvm.nvvm.div.approx.ftz.f
    2, // llvm.nvvm.div.rm.d
    2, // llvm.nvvm.div.rm.f
    2, // llvm.nvvm.div.rm.ftz.f
    2, // llvm.nvvm.div.rn.d
    2, // llvm.nvvm.div.rn.f
    2, // llvm.nvvm.div.rn.ftz.f
    2, // llvm.nvvm.div.rp.d
    2, // llvm.nvvm.div.rp.f
    2, // llvm.nvvm.div.rp.ftz.f
    2, // llvm.nvvm.div.rz.d
    2, // llvm.nvvm.div.rz.f
    2, // llvm.nvvm.div.rz.ftz.f
    2, // llvm.nvvm.ex2.approx.d
    2, // llvm.nvvm.ex2.approx.f
    2, // llvm.nvvm.ex2.approx.f16
    2, // llvm.nvvm.ex2.approx.f16x2
    2, // llvm.nvvm.ex2.approx.ftz.f
    244, // llvm.nvvm.f2bf16.rn
    244, // llvm.nvvm.f2bf16.rn.relu
    244, // llvm.nvvm.f2bf16.rz
    244, // llvm.nvvm.f2bf16.rz.relu
    5, // llvm.nvvm.f2h.rn
    5, // llvm.nvvm.f2h.rn.ftz
    5, // llvm.nvvm.f2i.rm
    5, // llvm.nvvm.f2i.rm.ftz
    5, // llvm.nvvm.f2i.rn
    5, // llvm.nvvm.f2i.rn.ftz
    5, // llvm.nvvm.f2i.rp
    5, // llvm.nvvm.f2i.rp.ftz
    5, // llvm.nvvm.f2i.rz
    5, // llvm.nvvm.f2i.rz.ftz
    5, // llvm.nvvm.f2ll.rm
    5, // llvm.nvvm.f2ll.rm.ftz
    5, // llvm.nvvm.f2ll.rn
    5, // llvm.nvvm.f2ll.rn.ftz
    5, // llvm.nvvm.f2ll.rp
    5, // llvm.nvvm.f2ll.rp.ftz
    5, // llvm.nvvm.f2ll.rz
    5, // llvm.nvvm.f2ll.rz.ftz
    244, // llvm.nvvm.f2tf32.rna
    5, // llvm.nvvm.f2ui.rm
    5, // llvm.nvvm.f2ui.rm.ftz
    5, // llvm.nvvm.f2ui.rn
    5, // llvm.nvvm.f2ui.rn.ftz
    5, // llvm.nvvm.f2ui.rp
    5, // llvm.nvvm.f2ui.rp.ftz
    5, // llvm.nvvm.f2ui.rz
    5, // llvm.nvvm.f2ui.rz.ftz
    5, // llvm.nvvm.f2ull.rm
    5, // llvm.nvvm.f2ull.rm.ftz
    5, // llvm.nvvm.f2ull.rn
    5, // llvm.nvvm.f2ull.rn.ftz
    5, // llvm.nvvm.f2ull.rp
    5, // llvm.nvvm.f2ull.rp.ftz
    5, // llvm.nvvm.f2ull.rz
    5, // llvm.nvvm.f2ull.rz.ftz
    5, // llvm.nvvm.fabs.d
    5, // llvm.nvvm.fabs.f
    5, // llvm.nvvm.fabs.ftz.f
    70, // llvm.nvvm.fence.sc.cluster
    244, // llvm.nvvm.ff2bf16x2.rn
    244, // llvm.nvvm.ff2bf16x2.rn.relu
    244, // llvm.nvvm.ff2bf16x2.rz
    12, // llvm.nvvm.ff2bf16x2.rz.relu
    244, // llvm.nvvm.ff2f16x2.rn
    244, // llvm.nvvm.ff2f16x2.rn.relu
    244, // llvm.nvvm.ff2f16x2.rz
    244, // llvm.nvvm.ff2f16x2.rz.relu
    5, // llvm.nvvm.floor.d
    5, // llvm.nvvm.floor.f
    5, // llvm.nvvm.floor.ftz.f
    5, // llvm.nvvm.fma.rm.d
    5, // llvm.nvvm.fma.rm.f
    5, // llvm.nvvm.fma.rm.ftz.f
    5, // llvm.nvvm.fma.rn.bf16
    5, // llvm.nvvm.fma.rn.bf16x2
    5, // llvm.nvvm.fma.rn.d
    5, // llvm.nvvm.fma.rn.f
    5, // llvm.nvvm.fma.rn.f16
    5, // llvm.nvvm.fma.rn.f16x2
    5, // llvm.nvvm.fma.rn.ftz.bf16
    5, // llvm.nvvm.fma.rn.ftz.bf16x2
    5, // llvm.nvvm.fma.rn.ftz.f
    5, // llvm.nvvm.fma.rn.ftz.f16
    5, // llvm.nvvm.fma.rn.ftz.f16x2
    5, // llvm.nvvm.fma.rn.ftz.relu.bf16
    5, // llvm.nvvm.fma.rn.ftz.relu.bf16x2
    5, // llvm.nvvm.fma.rn.ftz.relu.f16
    5, // llvm.nvvm.fma.rn.ftz.relu.f16x2
    5, // llvm.nvvm.fma.rn.ftz.sat.bf16
    5, // llvm.nvvm.fma.rn.ftz.sat.bf16x2
    5, // llvm.nvvm.fma.rn.ftz.sat.f16
    5, // llvm.nvvm.fma.rn.ftz.sat.f16x2
    5, // llvm.nvvm.fma.rn.relu.bf16
    5, // llvm.nvvm.fma.rn.relu.bf16x2
    5, // llvm.nvvm.fma.rn.relu.f16
    5, // llvm.nvvm.fma.rn.relu.f16x2
    5, // llvm.nvvm.fma.rn.sat.bf16
    5, // llvm.nvvm.fma.rn.sat.bf16x2
    5, // llvm.nvvm.fma.rn.sat.f16
    5, // llvm.nvvm.fma.rn.sat.f16x2
    5, // llvm.nvvm.fma.rp.d
    5, // llvm.nvvm.fma.rp.f
    5, // llvm.nvvm.fma.rp.ftz.f
    5, // llvm.nvvm.fma.rz.d
    5, // llvm.nvvm.fma.rz.f
    5, // llvm.nvvm.fma.rz.ftz.f
    5, // llvm.nvvm.fmax.bf16
    5, // llvm.nvvm.fmax.bf16x2
    5, // llvm.nvvm.fmax.d
    5, // llvm.nvvm.fmax.f
    5, // llvm.nvvm.fmax.f16
    5, // llvm.nvvm.fmax.f16x2
    5, // llvm.nvvm.fmax.ftz.bf16
    5, // llvm.nvvm.fmax.ftz.bf16x2
    5, // llvm.nvvm.fmax.ftz.f
    5, // llvm.nvvm.fmax.ftz.f16
    5, // llvm.nvvm.fmax.ftz.f16x2
    5, // llvm.nvvm.fmax.ftz.nan.bf16
    5, // llvm.nvvm.fmax.ftz.nan.bf16x2
    5, // llvm.nvvm.fmax.ftz.nan.f
    5, // llvm.nvvm.fmax.ftz.nan.f16
    5, // llvm.nvvm.fmax.ftz.nan.f16x2
    5, // llvm.nvvm.fmax.ftz.nan.xorsign.abs.bf16
    5, // llvm.nvvm.fmax.ftz.nan.xorsign.abs.bf16x2
    5, // llvm.nvvm.fmax.ftz.nan.xorsign.abs.f
    5, // llvm.nvvm.fmax.ftz.nan.xorsign.abs.f16
    5, // llvm.nvvm.fmax.ftz.nan.xorsign.abs.f16x2
    5, // llvm.nvvm.fmax.ftz.xorsign.abs.bf16
    5, // llvm.nvvm.fmax.ftz.xorsign.abs.bf16x2
    5, // llvm.nvvm.fmax.ftz.xorsign.abs.f
    5, // llvm.nvvm.fmax.ftz.xorsign.abs.f16
    5, // llvm.nvvm.fmax.ftz.xorsign.abs.f16x2
    5, // llvm.nvvm.fmax.nan.bf16
    5, // llvm.nvvm.fmax.nan.bf16x2
    5, // llvm.nvvm.fmax.nan.f
    5, // llvm.nvvm.fmax.nan.f16
    5, // llvm.nvvm.fmax.nan.f16x2
    5, // llvm.nvvm.fmax.nan.xorsign.abs.bf16
    5, // llvm.nvvm.fmax.nan.xorsign.abs.bf16x2
    5, // llvm.nvvm.fmax.nan.xorsign.abs.f
    5, // llvm.nvvm.fmax.nan.xorsign.abs.f16
    5, // llvm.nvvm.fmax.nan.xorsign.abs.f16x2
    5, // llvm.nvvm.fmax.xorsign.abs.bf16
    5, // llvm.nvvm.fmax.xorsign.abs.bf16x2
    5, // llvm.nvvm.fmax.xorsign.abs.f
    5, // llvm.nvvm.fmax.xorsign.abs.f16
    5, // llvm.nvvm.fmax.xorsign.abs.f16x2
    5, // llvm.nvvm.fmin.bf16
    5, // llvm.nvvm.fmin.bf16x2
    5, // llvm.nvvm.fmin.d
    5, // llvm.nvvm.fmin.f
    5, // llvm.nvvm.fmin.f16
    5, // llvm.nvvm.fmin.f16x2
    5, // llvm.nvvm.fmin.ftz.bf16
    5, // llvm.nvvm.fmin.ftz.bf16x2
    5, // llvm.nvvm.fmin.ftz.f
    5, // llvm.nvvm.fmin.ftz.f16
    5, // llvm.nvvm.fmin.ftz.f16x2
    5, // llvm.nvvm.fmin.ftz.nan.bf16
    5, // llvm.nvvm.fmin.ftz.nan.bf16x2
    5, // llvm.nvvm.fmin.ftz.nan.f
    5, // llvm.nvvm.fmin.ftz.nan.f16
    5, // llvm.nvvm.fmin.ftz.nan.f16x2
    5, // llvm.nvvm.fmin.ftz.nan.xorsign.abs.bf16
    5, // llvm.nvvm.fmin.ftz.nan.xorsign.abs.bf16x2
    5, // llvm.nvvm.fmin.ftz.nan.xorsign.abs.f
    5, // llvm.nvvm.fmin.ftz.nan.xorsign.abs.f16
    5, // llvm.nvvm.fmin.ftz.nan.xorsign.abs.f16x2
    5, // llvm.nvvm.fmin.ftz.xorsign.abs.bf16
    5, // llvm.nvvm.fmin.ftz.xorsign.abs.bf16x2
    5, // llvm.nvvm.fmin.ftz.xorsign.abs.f
    5, // llvm.nvvm.fmin.ftz.xorsign.abs.f16
    5, // llvm.nvvm.fmin.ftz.xorsign.abs.f16x2
    5, // llvm.nvvm.fmin.nan.bf16
    5, // llvm.nvvm.fmin.nan.bf16x2
    5, // llvm.nvvm.fmin.nan.f
    5, // llvm.nvvm.fmin.nan.f16
    5, // llvm.nvvm.fmin.nan.f16x2
    5, // llvm.nvvm.fmin.nan.xorsign.abs.bf16
    5, // llvm.nvvm.fmin.nan.xorsign.abs.bf16x2
    5, // llvm.nvvm.fmin.nan.xorsign.abs.f
    5, // llvm.nvvm.fmin.nan.xorsign.abs.f16
    5, // llvm.nvvm.fmin.nan.xorsign.abs.f16x2
    5, // llvm.nvvm.fmin.xorsign.abs.bf16
    5, // llvm.nvvm.fmin.xorsign.abs.bf16x2
    5, // llvm.nvvm.fmin.xorsign.abs.f
    5, // llvm.nvvm.fmin.xorsign.abs.f16
    5, // llvm.nvvm.fmin.xorsign.abs.f16x2
    2, // llvm.nvvm.fns
    160, // llvm.nvvm.getctarank
    160, // llvm.nvvm.getctarank.shared.cluster
    5, // llvm.nvvm.i2d.rm
    5, // llvm.nvvm.i2d.rn
    5, // llvm.nvvm.i2d.rp
    5, // llvm.nvvm.i2d.rz
    5, // llvm.nvvm.i2f.rm
    5, // llvm.nvvm.i2f.rn
    5, // llvm.nvvm.i2f.rp
    5, // llvm.nvvm.i2f.rz
    245, // llvm.nvvm.is_explicit_cluster
    160, // llvm.nvvm.isspacep.const
    160, // llvm.nvvm.isspacep.global
    160, // llvm.nvvm.isspacep.local
    160, // llvm.nvvm.isspacep.shared
    160, // llvm.nvvm.isspacep.shared.cluster
    12, // llvm.nvvm.istypep.sampler
    12, // llvm.nvvm.istypep.surface
    12, // llvm.nvvm.istypep.texture
    246, // llvm.nvvm.ldg.global.f
    246, // llvm.nvvm.ldg.global.i
    246, // llvm.nvvm.ldg.global.p
    247, // llvm.nvvm.ldmatrix.sync.aligned.m8n8.x1.b16
    247, // llvm.nvvm.ldmatrix.sync.aligned.m8n8.x1.trans.b16
    247, // llvm.nvvm.ldmatrix.sync.aligned.m8n8.x2.b16
    247, // llvm.nvvm.ldmatrix.sync.aligned.m8n8.x2.trans.b16
    247, // llvm.nvvm.ldmatrix.sync.aligned.m8n8.x4.b16
    247, // llvm.nvvm.ldmatrix.sync.aligned.m8n8.x4.trans.b16
    246, // llvm.nvvm.ldu.global.f
    246, // llvm.nvvm.ldu.global.i
    246, // llvm.nvvm.ldu.global.p
    2, // llvm.nvvm.lg2.approx.d
    2, // llvm.nvvm.lg2.approx.f
    2, // llvm.nvvm.lg2.approx.ftz.f
    5, // llvm.nvvm.ll2d.rm
    5, // llvm.nvvm.ll2d.rn
    5, // llvm.nvvm.ll2d.rp
    5, // llvm.nvvm.ll2d.rz
    5, // llvm.nvvm.ll2f.rm
    5, // llvm.nvvm.ll2f.rn
    5, // llvm.nvvm.ll2f.rp
    5, // llvm.nvvm.ll2f.rz
    5, // llvm.nvvm.lohi.i2d
    160, // llvm.nvvm.mapa
    160, // llvm.nvvm.mapa.shared.cluster
    248, // llvm.nvvm.match.all.sync.i32p
    248, // llvm.nvvm.match.all.sync.i64p
    248, // llvm.nvvm.match.any.sync.i32
    248, // llvm.nvvm.match.any.sync.i64
    242, // llvm.nvvm.mbarrier.arrive
    242, // llvm.nvvm.mbarrier.arrive.drop
    242, // llvm.nvvm.mbarrier.arrive.drop.noComplete
    242, // llvm.nvvm.mbarrier.arrive.drop.noComplete.shared
    242, // llvm.nvvm.mbarrier.arrive.drop.shared
    242, // llvm.nvvm.mbarrier.arrive.noComplete
    242, // llvm.nvvm.mbarrier.arrive.noComplete.shared
    242, // llvm.nvvm.mbarrier.arrive.shared
    242, // llvm.nvvm.mbarrier.init
    242, // llvm.nvvm.mbarrier.init.shared
    249, // llvm.nvvm.mbarrier.inval
    249, // llvm.nvvm.mbarrier.inval.shared
    250, // llvm.nvvm.mbarrier.pending.count
    242, // llvm.nvvm.mbarrier.test.wait
    242, // llvm.nvvm.mbarrier.test.wait.shared
    70, // llvm.nvvm.membar.cta
    70, // llvm.nvvm.membar.gl
    70, // llvm.nvvm.membar.sys
    244, // llvm.nvvm.mma.and.popc.m16n8k128.row.col.b1
    244, // llvm.nvvm.mma.and.popc.m16n8k256.row.col.b1
    244, // llvm.nvvm.mma.and.popc.m8n8k128.row.col.b1
    244, // llvm.nvvm.mma.m16n8k16.row.col.bf16
    244, // llvm.nvvm.mma.m16n8k16.row.col.f16.f16
    244, // llvm.nvvm.mma.m16n8k16.row.col.f16.f32
    244, // llvm.nvvm.mma.m16n8k16.row.col.f32.f16
    244, // llvm.nvvm.mma.m16n8k16.row.col.f32.f32
    244, // llvm.nvvm.mma.m16n8k16.row.col.s8
    244, // llvm.nvvm.mma.m16n8k16.row.col.s8.u8
    244, // llvm.nvvm.mma.m16n8k16.row.col.satfinite.s8
    244, // llvm.nvvm.mma.m16n8k16.row.col.satfinite.s8.u8
    244, // llvm.nvvm.mma.m16n8k16.row.col.satfinite.u8
    244, // llvm.nvvm.mma.m16n8k16.row.col.satfinite.u8.s8
    244, // llvm.nvvm.mma.m16n8k16.row.col.u8
    244, // llvm.nvvm.mma.m16n8k16.row.col.u8.s8
    244, // llvm.nvvm.mma.m16n8k32.row.col.s4
    244, // llvm.nvvm.mma.m16n8k32.row.col.s4.u4
    244, // llvm.nvvm.mma.m16n8k32.row.col.s8
    244, // llvm.nvvm.mma.m16n8k32.row.col.s8.u8
    244, // llvm.nvvm.mma.m16n8k32.row.col.satfinite.s4
    244, // llvm.nvvm.mma.m16n8k32.row.col.satfinite.s4.u4
    244, // llvm.nvvm.mma.m16n8k32.row.col.satfinite.s8
    244, // llvm.nvvm.mma.m16n8k32.row.col.satfinite.s8.u8
    244, // llvm.nvvm.mma.m16n8k32.row.col.satfinite.u4
    244, // llvm.nvvm.mma.m16n8k32.row.col.satfinite.u4.s4
    244, // llvm.nvvm.mma.m16n8k32.row.col.satfinite.u8
    244, // llvm.nvvm.mma.m16n8k32.row.col.satfinite.u8.s8
    244, // llvm.nvvm.mma.m16n8k32.row.col.u4
    244, // llvm.nvvm.mma.m16n8k32.row.col.u4.s4
    244, // llvm.nvvm.mma.m16n8k32.row.col.u8
    244, // llvm.nvvm.mma.m16n8k32.row.col.u8.s8
    244, // llvm.nvvm.mma.m16n8k4.row.col.tf32
    244, // llvm.nvvm.mma.m16n8k64.row.col.s4
    244, // llvm.nvvm.mma.m16n8k64.row.col.s4.u4
    244, // llvm.nvvm.mma.m16n8k64.row.col.satfinite.s4
    244, // llvm.nvvm.mma.m16n8k64.row.col.satfinite.s4.u4
    244, // llvm.nvvm.mma.m16n8k64.row.col.satfinite.u4
    244, // llvm.nvvm.mma.m16n8k64.row.col.satfinite.u4.s4
    244, // llvm.nvvm.mma.m16n8k64.row.col.u4
    244, // llvm.nvvm.mma.m16n8k64.row.col.u4.s4
    244, // llvm.nvvm.mma.m16n8k8.row.col.bf16
    244, // llvm.nvvm.mma.m16n8k8.row.col.f16.f16
    244, // llvm.nvvm.mma.m16n8k8.row.col.f32.f32
    244, // llvm.nvvm.mma.m16n8k8.row.col.tf32
    244, // llvm.nvvm.mma.m8n8k16.row.col.s8
    244, // llvm.nvvm.mma.m8n8k16.row.col.s8.u8
    244, // llvm.nvvm.mma.m8n8k16.row.col.satfinite.s8
    244, // llvm.nvvm.mma.m8n8k16.row.col.satfinite.s8.u8
    244, // llvm.nvvm.mma.m8n8k16.row.col.satfinite.u8
    244, // llvm.nvvm.mma.m8n8k16.row.col.satfinite.u8.s8
    244, // llvm.nvvm.mma.m8n8k16.row.col.u8
    244, // llvm.nvvm.mma.m8n8k16.row.col.u8.s8
    244, // llvm.nvvm.mma.m8n8k32.row.col.s4
    244, // llvm.nvvm.mma.m8n8k32.row.col.s4.u4
    244, // llvm.nvvm.mma.m8n8k32.row.col.satfinite.s4
    244, // llvm.nvvm.mma.m8n8k32.row.col.satfinite.s4.u4
    244, // llvm.nvvm.mma.m8n8k32.row.col.satfinite.u4
    244, // llvm.nvvm.mma.m8n8k32.row.col.satfinite.u4.s4
    244, // llvm.nvvm.mma.m8n8k32.row.col.u4
    244, // llvm.nvvm.mma.m8n8k32.row.col.u4.s4
    244, // llvm.nvvm.mma.m8n8k4.col.col.f16.f16
    244, // llvm.nvvm.mma.m8n8k4.col.col.f32.f16
    244, // llvm.nvvm.mma.m8n8k4.col.col.f32.f32
    244, // llvm.nvvm.mma.m8n8k4.col.row.f16.f16
    244, // llvm.nvvm.mma.m8n8k4.col.row.f32.f16
    244, // llvm.nvvm.mma.m8n8k4.col.row.f32.f32
    244, // llvm.nvvm.mma.m8n8k4.row.col.f16.f16
    244, // llvm.nvvm.mma.m8n8k4.row.col.f32.f16
    244, // llvm.nvvm.mma.m8n8k4.row.col.f32.f32
    244, // llvm.nvvm.mma.m8n8k4.row.col.f64
    244, // llvm.nvvm.mma.m8n8k4.row.row.f16.f16
    244, // llvm.nvvm.mma.m8n8k4.row.row.f32.f16
    244, // llvm.nvvm.mma.m8n8k4.row.row.f32.f32
    244, // llvm.nvvm.mma.xor.popc.m16n8k128.row.col.b1
    244, // llvm.nvvm.mma.xor.popc.m16n8k256.row.col.b1
    244, // llvm.nvvm.mma.xor.popc.m8n8k128.row.col.b1
    12, // llvm.nvvm.move.double
    12, // llvm.nvvm.move.float
    12, // llvm.nvvm.move.i16
    12, // llvm.nvvm.move.i32
    12, // llvm.nvvm.move.i64
    18, // llvm.nvvm.move.ptr
    5, // llvm.nvvm.mul.rm.d
    5, // llvm.nvvm.mul.rm.f
    5, // llvm.nvvm.mul.rm.ftz.f
    5, // llvm.nvvm.mul.rn.d
    5, // llvm.nvvm.mul.rn.f
    5, // llvm.nvvm.mul.rn.ftz.f
    5, // llvm.nvvm.mul.rp.d
    5, // llvm.nvvm.mul.rp.f
    5, // llvm.nvvm.mul.rp.ftz.f
    5, // llvm.nvvm.mul.rz.d
    5, // llvm.nvvm.mul.rz.f
    5, // llvm.nvvm.mul.rz.ftz.f
    5, // llvm.nvvm.mul24.i
    5, // llvm.nvvm.mul24.ui
    5, // llvm.nvvm.mulhi.i
    5, // llvm.nvvm.mulhi.ll
    5, // llvm.nvvm.mulhi.ui
    5, // llvm.nvvm.mulhi.ull
    2, // llvm.nvvm.neg.bf16
    2, // llvm.nvvm.neg.bf16x2
    5, // llvm.nvvm.prmt
    5, // llvm.nvvm.ptr.constant.to.gen
    5, // llvm.nvvm.ptr.gen.to.constant
    5, // llvm.nvvm.ptr.gen.to.global
    5, // llvm.nvvm.ptr.gen.to.local
    251, // llvm.nvvm.ptr.gen.to.param
    5, // llvm.nvvm.ptr.gen.to.shared
    5, // llvm.nvvm.ptr.global.to.gen
    5, // llvm.nvvm.ptr.local.to.gen
    5, // llvm.nvvm.ptr.shared.to.gen
    2, // llvm.nvvm.rcp.approx.ftz.d
    2, // llvm.nvvm.rcp.approx.ftz.f
    2, // llvm.nvvm.rcp.rm.d
    2, // llvm.nvvm.rcp.rm.f
    2, // llvm.nvvm.rcp.rm.ftz.f
    2, // llvm.nvvm.rcp.rn.d
    2, // llvm.nvvm.rcp.rn.f
    2, // llvm.nvvm.rcp.rn.ftz.f
    2, // llvm.nvvm.rcp.rp.d
    2, // llvm.nvvm.rcp.rp.f
    2, // llvm.nvvm.rcp.rp.ftz.f
    2, // llvm.nvvm.rcp.rz.d
    2, // llvm.nvvm.rcp.rz.f
    2, // llvm.nvvm.rcp.rz.ftz.f
    252, // llvm.nvvm.read.ptx.sreg.clock
    252, // llvm.nvvm.read.ptx.sreg.clock64
    245, // llvm.nvvm.read.ptx.sreg.cluster.ctaid.w
    245, // llvm.nvvm.read.ptx.sreg.cluster.ctaid.x
    245, // llvm.nvvm.read.ptx.sreg.cluster.ctaid.y
    245, // llvm.nvvm.read.ptx.sreg.cluster.ctaid.z
    245, // llvm.nvvm.read.ptx.sreg.cluster.ctarank
    245, // llvm.nvvm.read.ptx.sreg.cluster.nctaid.w
    245, // llvm.nvvm.read.ptx.sreg.cluster.nctaid.x
    245, // llvm.nvvm.read.ptx.sreg.cluster.nctaid.y
    245, // llvm.nvvm.read.ptx.sreg.cluster.nctaid.z
    245, // llvm.nvvm.read.ptx.sreg.cluster.nctarank
    245, // llvm.nvvm.read.ptx.sreg.clusterid.w
    245, // llvm.nvvm.read.ptx.sreg.clusterid.x
    245, // llvm.nvvm.read.ptx.sreg.clusterid.y
    245, // llvm.nvvm.read.ptx.sreg.clusterid.z
    245, // llvm.nvvm.read.ptx.sreg.ctaid.w
    245, // llvm.nvvm.read.ptx.sreg.ctaid.x
    245, // llvm.nvvm.read.ptx.sreg.ctaid.y
    245, // llvm.nvvm.read.ptx.sreg.ctaid.z
    245, // llvm.nvvm.read.ptx.sreg.envreg0
    245, // llvm.nvvm.read.ptx.sreg.envreg1
    245, // llvm.nvvm.read.ptx.sreg.envreg10
    245, // llvm.nvvm.read.ptx.sreg.envreg11
    245, // llvm.nvvm.read.ptx.sreg.envreg12
    245, // llvm.nvvm.read.ptx.sreg.envreg13
    245, // llvm.nvvm.read.ptx.sreg.envreg14
    245, // llvm.nvvm.read.ptx.sreg.envreg15
    245, // llvm.nvvm.read.ptx.sreg.envreg16
    245, // llvm.nvvm.read.ptx.sreg.envreg17
    245, // llvm.nvvm.read.ptx.sreg.envreg18
    245, // llvm.nvvm.read.ptx.sreg.envreg19
    245, // llvm.nvvm.read.ptx.sreg.envreg2
    245, // llvm.nvvm.read.ptx.sreg.envreg20
    245, // llvm.nvvm.read.ptx.sreg.envreg21
    245, // llvm.nvvm.read.ptx.sreg.envreg22
    245, // llvm.nvvm.read.ptx.sreg.envreg23
    245, // llvm.nvvm.read.ptx.sreg.envreg24
    245, // llvm.nvvm.read.ptx.sreg.envreg25
    245, // llvm.nvvm.read.ptx.sreg.envreg26
    245, // llvm.nvvm.read.ptx.sreg.envreg27
    245, // llvm.nvvm.read.ptx.sreg.envreg28
    245, // llvm.nvvm.read.ptx.sreg.envreg29
    245, // llvm.nvvm.read.ptx.sreg.envreg3
    245, // llvm.nvvm.read.ptx.sreg.envreg30
    245, // llvm.nvvm.read.ptx.sreg.envreg31
    245, // llvm.nvvm.read.ptx.sreg.envreg4
    245, // llvm.nvvm.read.ptx.sreg.envreg5
    245, // llvm.nvvm.read.ptx.sreg.envreg6
    245, // llvm.nvvm.read.ptx.sreg.envreg7
    245, // llvm.nvvm.read.ptx.sreg.envreg8
    245, // llvm.nvvm.read.ptx.sreg.envreg9
    245, // llvm.nvvm.read.ptx.sreg.gridid
    245, // llvm.nvvm.read.ptx.sreg.laneid
    245, // llvm.nvvm.read.ptx.sreg.lanemask.eq
    245, // llvm.nvvm.read.ptx.sreg.lanemask.ge
    245, // llvm.nvvm.read.ptx.sreg.lanemask.gt
    245, // llvm.nvvm.read.ptx.sreg.lanemask.le
    245, // llvm.nvvm.read.ptx.sreg.lanemask.lt
    245, // llvm.nvvm.read.ptx.sreg.nclusterid.w
    245, // llvm.nvvm.read.ptx.sreg.nclusterid.x
    245, // llvm.nvvm.read.ptx.sreg.nclusterid.y
    245, // llvm.nvvm.read.ptx.sreg.nclusterid.z
    245, // llvm.nvvm.read.ptx.sreg.nctaid.w
    245, // llvm.nvvm.read.ptx.sreg.nctaid.x
    245, // llvm.nvvm.read.ptx.sreg.nctaid.y
    245, // llvm.nvvm.read.ptx.sreg.nctaid.z
    245, // llvm.nvvm.read.ptx.sreg.nsmid
    245, // llvm.nvvm.read.ptx.sreg.ntid.w
    245, // llvm.nvvm.read.ptx.sreg.ntid.x
    245, // llvm.nvvm.read.ptx.sreg.ntid.y
    245, // llvm.nvvm.read.ptx.sreg.ntid.z
    245, // llvm.nvvm.read.ptx.sreg.nwarpid
    252, // llvm.nvvm.read.ptx.sreg.pm0
    252, // llvm.nvvm.read.ptx.sreg.pm1
    252, // llvm.nvvm.read.ptx.sreg.pm2
    252, // llvm.nvvm.read.ptx.sreg.pm3
    245, // llvm.nvvm.read.ptx.sreg.smid
    245, // llvm.nvvm.read.ptx.sreg.tid.w
    245, // llvm.nvvm.read.ptx.sreg.tid.x
    245, // llvm.nvvm.read.ptx.sreg.tid.y
    245, // llvm.nvvm.read.ptx.sreg.tid.z
    245, // llvm.nvvm.read.ptx.sreg.warpid
    245, // llvm.nvvm.read.ptx.sreg.warpsize
    248, // llvm.nvvm.redux.sync.add
    248, // llvm.nvvm.redux.sync.and
    248, // llvm.nvvm.redux.sync.max
    248, // llvm.nvvm.redux.sync.min
    248, // llvm.nvvm.redux.sync.or
    248, // llvm.nvvm.redux.sync.umax
    248, // llvm.nvvm.redux.sync.umin
    248, // llvm.nvvm.redux.sync.xor
    12, // llvm.nvvm.reflect
    5, // llvm.nvvm.rotate.b32
    5, // llvm.nvvm.rotate.b64
    5, // llvm.nvvm.rotate.right.b64
    5, // llvm.nvvm.round.d
    5, // llvm.nvvm.round.f
    5, // llvm.nvvm.round.ftz.f
    2, // llvm.nvvm.rsqrt.approx.d
    2, // llvm.nvvm.rsqrt.approx.f
    2, // llvm.nvvm.rsqrt.approx.ftz.f
    2, // llvm.nvvm.sad.i
    2, // llvm.nvvm.sad.ui
    5, // llvm.nvvm.saturate.d
    5, // llvm.nvvm.saturate.f
    5, // llvm.nvvm.saturate.ftz.f
    248, // llvm.nvvm.shfl.bfly.f32
    248, // llvm.nvvm.shfl.bfly.f32p
    248, // llvm.nvvm.shfl.bfly.i32
    248, // llvm.nvvm.shfl.bfly.i32p
    248, // llvm.nvvm.shfl.down.f32
    248, // llvm.nvvm.shfl.down.f32p
    248, // llvm.nvvm.shfl.down.i32
    248, // llvm.nvvm.shfl.down.i32p
    248, // llvm.nvvm.shfl.idx.f32
    248, // llvm.nvvm.shfl.idx.f32p
    248, // llvm.nvvm.shfl.idx.i32
    248, // llvm.nvvm.shfl.idx.i32p
    248, // llvm.nvvm.shfl.sync.bfly.f32
    248, // llvm.nvvm.shfl.sync.bfly.f32p
    248, // llvm.nvvm.shfl.sync.bfly.i32
    248, // llvm.nvvm.shfl.sync.bfly.i32p
    248, // llvm.nvvm.shfl.sync.down.f32
    248, // llvm.nvvm.shfl.sync.down.f32p
    248, // llvm.nvvm.shfl.sync.down.i32
    248, // llvm.nvvm.shfl.sync.down.i32p
    248, // llvm.nvvm.shfl.sync.idx.f32
    248, // llvm.nvvm.shfl.sync.idx.f32p
    248, // llvm.nvvm.shfl.sync.idx.i32
    248, // llvm.nvvm.shfl.sync.idx.i32p
    248, // llvm.nvvm.shfl.sync.up.f32
    248, // llvm.nvvm.shfl.sync.up.f32p
    248, // llvm.nvvm.shfl.sync.up.i32
    248, // llvm.nvvm.shfl.sync.up.i32p
    248, // llvm.nvvm.shfl.up.f32
    248, // llvm.nvvm.shfl.up.f32p
    248, // llvm.nvvm.shfl.up.i32
    248, // llvm.nvvm.shfl.up.i32p
    2, // llvm.nvvm.sin.approx.f
    2, // llvm.nvvm.sin.approx.ftz.f
    2, // llvm.nvvm.sqrt.approx.f
    2, // llvm.nvvm.sqrt.approx.ftz.f
    2, // llvm.nvvm.sqrt.f
    2, // llvm.nvvm.sqrt.rm.d
    2, // llvm.nvvm.sqrt.rm.f
    2, // llvm.nvvm.sqrt.rm.ftz.f
    2, // llvm.nvvm.sqrt.rn.d
    2, // llvm.nvvm.sqrt.rn.f
    2, // llvm.nvvm.sqrt.rn.ftz.f
    2, // llvm.nvvm.sqrt.rp.d
    2, // llvm.nvvm.sqrt.rp.f
    2, // llvm.nvvm.sqrt.rp.ftz.f
    2, // llvm.nvvm.sqrt.rz.d
    2, // llvm.nvvm.sqrt.rz.f
    2, // llvm.nvvm.sqrt.rz.ftz.f
    10, // llvm.nvvm.suld.1d.array.i16.clamp
    10, // llvm.nvvm.suld.1d.array.i16.trap
    10, // llvm.nvvm.suld.1d.array.i16.zero
    10, // llvm.nvvm.suld.1d.array.i32.clamp
    10, // llvm.nvvm.suld.1d.array.i32.trap
    10, // llvm.nvvm.suld.1d.array.i32.zero
    10, // llvm.nvvm.suld.1d.array.i64.clamp
    10, // llvm.nvvm.suld.1d.array.i64.trap
    10, // llvm.nvvm.suld.1d.array.i64.zero
    10, // llvm.nvvm.suld.1d.array.i8.clamp
    10, // llvm.nvvm.suld.1d.array.i8.trap
    10, // llvm.nvvm.suld.1d.array.i8.zero
    10, // llvm.nvvm.suld.1d.array.v2i16.clamp
    10, // llvm.nvvm.suld.1d.array.v2i16.trap
    10, // llvm.nvvm.suld.1d.array.v2i16.zero
    10, // llvm.nvvm.suld.1d.array.v2i32.clamp
    10, // llvm.nvvm.suld.1d.array.v2i32.trap
    10, // llvm.nvvm.suld.1d.array.v2i32.zero
    10, // llvm.nvvm.suld.1d.array.v2i64.clamp
    10, // llvm.nvvm.suld.1d.array.v2i64.trap
    10, // llvm.nvvm.suld.1d.array.v2i64.zero
    10, // llvm.nvvm.suld.1d.array.v2i8.clamp
    10, // llvm.nvvm.suld.1d.array.v2i8.trap
    10, // llvm.nvvm.suld.1d.array.v2i8.zero
    10, // llvm.nvvm.suld.1d.array.v4i16.clamp
    10, // llvm.nvvm.suld.1d.array.v4i16.trap
    10, // llvm.nvvm.suld.1d.array.v4i16.zero
    10, // llvm.nvvm.suld.1d.array.v4i32.clamp
    10, // llvm.nvvm.suld.1d.array.v4i32.trap
    10, // llvm.nvvm.suld.1d.array.v4i32.zero
    10, // llvm.nvvm.suld.1d.array.v4i8.clamp
    10, // llvm.nvvm.suld.1d.array.v4i8.trap
    10, // llvm.nvvm.suld.1d.array.v4i8.zero
    10, // llvm.nvvm.suld.1d.i16.clamp
    10, // llvm.nvvm.suld.1d.i16.trap
    10, // llvm.nvvm.suld.1d.i16.zero
    10, // llvm.nvvm.suld.1d.i32.clamp
    10, // llvm.nvvm.suld.1d.i32.trap
    10, // llvm.nvvm.suld.1d.i32.zero
    10, // llvm.nvvm.suld.1d.i64.clamp
    10, // llvm.nvvm.suld.1d.i64.trap
    10, // llvm.nvvm.suld.1d.i64.zero
    10, // llvm.nvvm.suld.1d.i8.clamp
    10, // llvm.nvvm.suld.1d.i8.trap
    10, // llvm.nvvm.suld.1d.i8.zero
    10, // llvm.nvvm.suld.1d.v2i16.clamp
    10, // llvm.nvvm.suld.1d.v2i16.trap
    10, // llvm.nvvm.suld.1d.v2i16.zero
    10, // llvm.nvvm.suld.1d.v2i32.clamp
    10, // llvm.nvvm.suld.1d.v2i32.trap
    10, // llvm.nvvm.suld.1d.v2i32.zero
    10, // llvm.nvvm.suld.1d.v2i64.clamp
    10, // llvm.nvvm.suld.1d.v2i64.trap
    10, // llvm.nvvm.suld.1d.v2i64.zero
    10, // llvm.nvvm.suld.1d.v2i8.clamp
    10, // llvm.nvvm.suld.1d.v2i8.trap
    10, // llvm.nvvm.suld.1d.v2i8.zero
    10, // llvm.nvvm.suld.1d.v4i16.clamp
    10, // llvm.nvvm.suld.1d.v4i16.trap
    10, // llvm.nvvm.suld.1d.v4i16.zero
    10, // llvm.nvvm.suld.1d.v4i32.clamp
    10, // llvm.nvvm.suld.1d.v4i32.trap
    10, // llvm.nvvm.suld.1d.v4i32.zero
    10, // llvm.nvvm.suld.1d.v4i8.clamp
    10, // llvm.nvvm.suld.1d.v4i8.trap
    10, // llvm.nvvm.suld.1d.v4i8.zero
    10, // llvm.nvvm.suld.2d.array.i16.clamp
    10, // llvm.nvvm.suld.2d.array.i16.trap
    10, // llvm.nvvm.suld.2d.array.i16.zero
    10, // llvm.nvvm.suld.2d.array.i32.clamp
    10, // llvm.nvvm.suld.2d.array.i32.trap
    10, // llvm.nvvm.suld.2d.array.i32.zero
    10, // llvm.nvvm.suld.2d.array.i64.clamp
    10, // llvm.nvvm.suld.2d.array.i64.trap
    10, // llvm.nvvm.suld.2d.array.i64.zero
    10, // llvm.nvvm.suld.2d.array.i8.clamp
    10, // llvm.nvvm.suld.2d.array.i8.trap
    10, // llvm.nvvm.suld.2d.array.i8.zero
    10, // llvm.nvvm.suld.2d.array.v2i16.clamp
    10, // llvm.nvvm.suld.2d.array.v2i16.trap
    10, // llvm.nvvm.suld.2d.array.v2i16.zero
    10, // llvm.nvvm.suld.2d.array.v2i32.clamp
    10, // llvm.nvvm.suld.2d.array.v2i32.trap
    10, // llvm.nvvm.suld.2d.array.v2i32.zero
    10, // llvm.nvvm.suld.2d.array.v2i64.clamp
    10, // llvm.nvvm.suld.2d.array.v2i64.trap
    10, // llvm.nvvm.suld.2d.array.v2i64.zero
    10, // llvm.nvvm.suld.2d.array.v2i8.clamp
    10, // llvm.nvvm.suld.2d.array.v2i8.trap
    10, // llvm.nvvm.suld.2d.array.v2i8.zero
    10, // llvm.nvvm.suld.2d.array.v4i16.clamp
    10, // llvm.nvvm.suld.2d.array.v4i16.trap
    10, // llvm.nvvm.suld.2d.array.v4i16.zero
    10, // llvm.nvvm.suld.2d.array.v4i32.clamp
    10, // llvm.nvvm.suld.2d.array.v4i32.trap
    10, // llvm.nvvm.suld.2d.array.v4i32.zero
    10, // llvm.nvvm.suld.2d.array.v4i8.clamp
    10, // llvm.nvvm.suld.2d.array.v4i8.trap
    10, // llvm.nvvm.suld.2d.array.v4i8.zero
    10, // llvm.nvvm.suld.2d.i16.clamp
    10, // llvm.nvvm.suld.2d.i16.trap
    10, // llvm.nvvm.suld.2d.i16.zero
    10, // llvm.nvvm.suld.2d.i32.clamp
    10, // llvm.nvvm.suld.2d.i32.trap
    10, // llvm.nvvm.suld.2d.i32.zero
    10, // llvm.nvvm.suld.2d.i64.clamp
    10, // llvm.nvvm.suld.2d.i64.trap
    10, // llvm.nvvm.suld.2d.i64.zero
    10, // llvm.nvvm.suld.2d.i8.clamp
    10, // llvm.nvvm.suld.2d.i8.trap
    10, // llvm.nvvm.suld.2d.i8.zero
    10, // llvm.nvvm.suld.2d.v2i16.clamp
    10, // llvm.nvvm.suld.2d.v2i16.trap
    10, // llvm.nvvm.suld.2d.v2i16.zero
    10, // llvm.nvvm.suld.2d.v2i32.clamp
    10, // llvm.nvvm.suld.2d.v2i32.trap
    10, // llvm.nvvm.suld.2d.v2i32.zero
    10, // llvm.nvvm.suld.2d.v2i64.clamp
    10, // llvm.nvvm.suld.2d.v2i64.trap
    10, // llvm.nvvm.suld.2d.v2i64.zero
    10, // llvm.nvvm.suld.2d.v2i8.clamp
    10, // llvm.nvvm.suld.2d.v2i8.trap
    10, // llvm.nvvm.suld.2d.v2i8.zero
    10, // llvm.nvvm.suld.2d.v4i16.clamp
    10, // llvm.nvvm.suld.2d.v4i16.trap
    10, // llvm.nvvm.suld.2d.v4i16.zero
    10, // llvm.nvvm.suld.2d.v4i32.clamp
    10, // llvm.nvvm.suld.2d.v4i32.trap
    10, // llvm.nvvm.suld.2d.v4i32.zero
    10, // llvm.nvvm.suld.2d.v4i8.clamp
    10, // llvm.nvvm.suld.2d.v4i8.trap
    10, // llvm.nvvm.suld.2d.v4i8.zero
    10, // llvm.nvvm.suld.3d.i16.clamp
    10, // llvm.nvvm.suld.3d.i16.trap
    10, // llvm.nvvm.suld.3d.i16.zero
    10, // llvm.nvvm.suld.3d.i32.clamp
    10, // llvm.nvvm.suld.3d.i32.trap
    10, // llvm.nvvm.suld.3d.i32.zero
    10, // llvm.nvvm.suld.3d.i64.clamp
    10, // llvm.nvvm.suld.3d.i64.trap
    10, // llvm.nvvm.suld.3d.i64.zero
    10, // llvm.nvvm.suld.3d.i8.clamp
    10, // llvm.nvvm.suld.3d.i8.trap
    10, // llvm.nvvm.suld.3d.i8.zero
    10, // llvm.nvvm.suld.3d.v2i16.clamp
    10, // llvm.nvvm.suld.3d.v2i16.trap
    10, // llvm.nvvm.suld.3d.v2i16.zero
    10, // llvm.nvvm.suld.3d.v2i32.clamp
    10, // llvm.nvvm.suld.3d.v2i32.trap
    10, // llvm.nvvm.suld.3d.v2i32.zero
    10, // llvm.nvvm.suld.3d.v2i64.clamp
    10, // llvm.nvvm.suld.3d.v2i64.trap
    10, // llvm.nvvm.suld.3d.v2i64.zero
    10, // llvm.nvvm.suld.3d.v2i8.clamp
    10, // llvm.nvvm.suld.3d.v2i8.trap
    10, // llvm.nvvm.suld.3d.v2i8.zero
    10, // llvm.nvvm.suld.3d.v4i16.clamp
    10, // llvm.nvvm.suld.3d.v4i16.trap
    10, // llvm.nvvm.suld.3d.v4i16.zero
    10, // llvm.nvvm.suld.3d.v4i32.clamp
    10, // llvm.nvvm.suld.3d.v4i32.trap
    10, // llvm.nvvm.suld.3d.v4i32.zero
    10, // llvm.nvvm.suld.3d.v4i8.clamp
    10, // llvm.nvvm.suld.3d.v4i8.trap
    10, // llvm.nvvm.suld.3d.v4i8.zero
    12, // llvm.nvvm.suq.array.size
    12, // llvm.nvvm.suq.channel.data.type
    12, // llvm.nvvm.suq.channel.order
    12, // llvm.nvvm.suq.depth
    12, // llvm.nvvm.suq.height
    12, // llvm.nvvm.suq.width
    10, // llvm.nvvm.sust.b.1d.array.i16.clamp
    10, // llvm.nvvm.sust.b.1d.array.i16.trap
    10, // llvm.nvvm.sust.b.1d.array.i16.zero
    10, // llvm.nvvm.sust.b.1d.array.i32.clamp
    10, // llvm.nvvm.sust.b.1d.array.i32.trap
    10, // llvm.nvvm.sust.b.1d.array.i32.zero
    10, // llvm.nvvm.sust.b.1d.array.i64.clamp
    10, // llvm.nvvm.sust.b.1d.array.i64.trap
    10, // llvm.nvvm.sust.b.1d.array.i64.zero
    10, // llvm.nvvm.sust.b.1d.array.i8.clamp
    10, // llvm.nvvm.sust.b.1d.array.i8.trap
    10, // llvm.nvvm.sust.b.1d.array.i8.zero
    10, // llvm.nvvm.sust.b.1d.array.v2i16.clamp
    10, // llvm.nvvm.sust.b.1d.array.v2i16.trap
    10, // llvm.nvvm.sust.b.1d.array.v2i16.zero
    10, // llvm.nvvm.sust.b.1d.array.v2i32.clamp
    10, // llvm.nvvm.sust.b.1d.array.v2i32.trap
    10, // llvm.nvvm.sust.b.1d.array.v2i32.zero
    10, // llvm.nvvm.sust.b.1d.array.v2i64.clamp
    10, // llvm.nvvm.sust.b.1d.array.v2i64.trap
    10, // llvm.nvvm.sust.b.1d.array.v2i64.zero
    10, // llvm.nvvm.sust.b.1d.array.v2i8.clamp
    10, // llvm.nvvm.sust.b.1d.array.v2i8.trap
    10, // llvm.nvvm.sust.b.1d.array.v2i8.zero
    10, // llvm.nvvm.sust.b.1d.array.v4i16.clamp
    10, // llvm.nvvm.sust.b.1d.array.v4i16.trap
    10, // llvm.nvvm.sust.b.1d.array.v4i16.zero
    10, // llvm.nvvm.sust.b.1d.array.v4i32.clamp
    10, // llvm.nvvm.sust.b.1d.array.v4i32.trap
    10, // llvm.nvvm.sust.b.1d.array.v4i32.zero
    10, // llvm.nvvm.sust.b.1d.array.v4i8.clamp
    10, // llvm.nvvm.sust.b.1d.array.v4i8.trap
    10, // llvm.nvvm.sust.b.1d.array.v4i8.zero
    10, // llvm.nvvm.sust.b.1d.i16.clamp
    10, // llvm.nvvm.sust.b.1d.i16.trap
    10, // llvm.nvvm.sust.b.1d.i16.zero
    10, // llvm.nvvm.sust.b.1d.i32.clamp
    10, // llvm.nvvm.sust.b.1d.i32.trap
    10, // llvm.nvvm.sust.b.1d.i32.zero
    10, // llvm.nvvm.sust.b.1d.i64.clamp
    10, // llvm.nvvm.sust.b.1d.i64.trap
    10, // llvm.nvvm.sust.b.1d.i64.zero
    10, // llvm.nvvm.sust.b.1d.i8.clamp
    10, // llvm.nvvm.sust.b.1d.i8.trap
    10, // llvm.nvvm.sust.b.1d.i8.zero
    10, // llvm.nvvm.sust.b.1d.v2i16.clamp
    10, // llvm.nvvm.sust.b.1d.v2i16.trap
    10, // llvm.nvvm.sust.b.1d.v2i16.zero
    10, // llvm.nvvm.sust.b.1d.v2i32.clamp
    10, // llvm.nvvm.sust.b.1d.v2i32.trap
    10, // llvm.nvvm.sust.b.1d.v2i32.zero
    10, // llvm.nvvm.sust.b.1d.v2i64.clamp
    10, // llvm.nvvm.sust.b.1d.v2i64.trap
    10, // llvm.nvvm.sust.b.1d.v2i64.zero
    10, // llvm.nvvm.sust.b.1d.v2i8.clamp
    10, // llvm.nvvm.sust.b.1d.v2i8.trap
    10, // llvm.nvvm.sust.b.1d.v2i8.zero
    10, // llvm.nvvm.sust.b.1d.v4i16.clamp
    10, // llvm.nvvm.sust.b.1d.v4i16.trap
    10, // llvm.nvvm.sust.b.1d.v4i16.zero
    10, // llvm.nvvm.sust.b.1d.v4i32.clamp
    10, // llvm.nvvm.sust.b.1d.v4i32.trap
    10, // llvm.nvvm.sust.b.1d.v4i32.zero
    10, // llvm.nvvm.sust.b.1d.v4i8.clamp
    10, // llvm.nvvm.sust.b.1d.v4i8.trap
    10, // llvm.nvvm.sust.b.1d.v4i8.zero
    10, // llvm.nvvm.sust.b.2d.array.i16.clamp
    10, // llvm.nvvm.sust.b.2d.array.i16.trap
    10, // llvm.nvvm.sust.b.2d.array.i16.zero
    10, // llvm.nvvm.sust.b.2d.array.i32.clamp
    10, // llvm.nvvm.sust.b.2d.array.i32.trap
    10, // llvm.nvvm.sust.b.2d.array.i32.zero
    10, // llvm.nvvm.sust.b.2d.array.i64.clamp
    10, // llvm.nvvm.sust.b.2d.array.i64.trap
    10, // llvm.nvvm.sust.b.2d.array.i64.zero
    10, // llvm.nvvm.sust.b.2d.array.i8.clamp
    10, // llvm.nvvm.sust.b.2d.array.i8.trap
    10, // llvm.nvvm.sust.b.2d.array.i8.zero
    10, // llvm.nvvm.sust.b.2d.array.v2i16.clamp
    10, // llvm.nvvm.sust.b.2d.array.v2i16.trap
    10, // llvm.nvvm.sust.b.2d.array.v2i16.zero
    10, // llvm.nvvm.sust.b.2d.array.v2i32.clamp
    10, // llvm.nvvm.sust.b.2d.array.v2i32.trap
    10, // llvm.nvvm.sust.b.2d.array.v2i32.zero
    10, // llvm.nvvm.sust.b.2d.array.v2i64.clamp
    10, // llvm.nvvm.sust.b.2d.array.v2i64.trap
    10, // llvm.nvvm.sust.b.2d.array.v2i64.zero
    10, // llvm.nvvm.sust.b.2d.array.v2i8.clamp
    10, // llvm.nvvm.sust.b.2d.array.v2i8.trap
    10, // llvm.nvvm.sust.b.2d.array.v2i8.zero
    10, // llvm.nvvm.sust.b.2d.array.v4i16.clamp
    10, // llvm.nvvm.sust.b.2d.array.v4i16.trap
    10, // llvm.nvvm.sust.b.2d.array.v4i16.zero
    10, // llvm.nvvm.sust.b.2d.array.v4i32.clamp
    10, // llvm.nvvm.sust.b.2d.array.v4i32.trap
    10, // llvm.nvvm.sust.b.2d.array.v4i32.zero
    10, // llvm.nvvm.sust.b.2d.array.v4i8.clamp
    10, // llvm.nvvm.sust.b.2d.array.v4i8.trap
    10, // llvm.nvvm.sust.b.2d.array.v4i8.zero
    10, // llvm.nvvm.sust.b.2d.i16.clamp
    10, // llvm.nvvm.sust.b.2d.i16.trap
    10, // llvm.nvvm.sust.b.2d.i16.zero
    10, // llvm.nvvm.sust.b.2d.i32.clamp
    10, // llvm.nvvm.sust.b.2d.i32.trap
    10, // llvm.nvvm.sust.b.2d.i32.zero
    10, // llvm.nvvm.sust.b.2d.i64.clamp
    10, // llvm.nvvm.sust.b.2d.i64.trap
    10, // llvm.nvvm.sust.b.2d.i64.zero
    10, // llvm.nvvm.sust.b.2d.i8.clamp
    10, // llvm.nvvm.sust.b.2d.i8.trap
    10, // llvm.nvvm.sust.b.2d.i8.zero
    10, // llvm.nvvm.sust.b.2d.v2i16.clamp
    10, // llvm.nvvm.sust.b.2d.v2i16.trap
    10, // llvm.nvvm.sust.b.2d.v2i16.zero
    10, // llvm.nvvm.sust.b.2d.v2i32.clamp
    10, // llvm.nvvm.sust.b.2d.v2i32.trap
    10, // llvm.nvvm.sust.b.2d.v2i32.zero
    10, // llvm.nvvm.sust.b.2d.v2i64.clamp
    10, // llvm.nvvm.sust.b.2d.v2i64.trap
    10, // llvm.nvvm.sust.b.2d.v2i64.zero
    10, // llvm.nvvm.sust.b.2d.v2i8.clamp
    10, // llvm.nvvm.sust.b.2d.v2i8.trap
    10, // llvm.nvvm.sust.b.2d.v2i8.zero
    10, // llvm.nvvm.sust.b.2d.v4i16.clamp
    10, // llvm.nvvm.sust.b.2d.v4i16.trap
    10, // llvm.nvvm.sust.b.2d.v4i16.zero
    10, // llvm.nvvm.sust.b.2d.v4i32.clamp
    10, // llvm.nvvm.sust.b.2d.v4i32.trap
    10, // llvm.nvvm.sust.b.2d.v4i32.zero
    10, // llvm.nvvm.sust.b.2d.v4i8.clamp
    10, // llvm.nvvm.sust.b.2d.v4i8.trap
    10, // llvm.nvvm.sust.b.2d.v4i8.zero
    10, // llvm.nvvm.sust.b.3d.i16.clamp
    10, // llvm.nvvm.sust.b.3d.i16.trap
    10, // llvm.nvvm.sust.b.3d.i16.zero
    10, // llvm.nvvm.sust.b.3d.i32.clamp
    10, // llvm.nvvm.sust.b.3d.i32.trap
    10, // llvm.nvvm.sust.b.3d.i32.zero
    10, // llvm.nvvm.sust.b.3d.i64.clamp
    10, // llvm.nvvm.sust.b.3d.i64.trap
    10, // llvm.nvvm.sust.b.3d.i64.zero
    10, // llvm.nvvm.sust.b.3d.i8.clamp
    10, // llvm.nvvm.sust.b.3d.i8.trap
    10, // llvm.nvvm.sust.b.3d.i8.zero
    10, // llvm.nvvm.sust.b.3d.v2i16.clamp
    10, // llvm.nvvm.sust.b.3d.v2i16.trap
    10, // llvm.nvvm.sust.b.3d.v2i16.zero
    10, // llvm.nvvm.sust.b.3d.v2i32.clamp
    10, // llvm.nvvm.sust.b.3d.v2i32.trap
    10, // llvm.nvvm.sust.b.3d.v2i32.zero
    10, // llvm.nvvm.sust.b.3d.v2i64.clamp
    10, // llvm.nvvm.sust.b.3d.v2i64.trap
    10, // llvm.nvvm.sust.b.3d.v2i64.zero
    10, // llvm.nvvm.sust.b.3d.v2i8.clamp
    10, // llvm.nvvm.sust.b.3d.v2i8.trap
    10, // llvm.nvvm.sust.b.3d.v2i8.zero
    10, // llvm.nvvm.sust.b.3d.v4i16.clamp
    10, // llvm.nvvm.sust.b.3d.v4i16.trap
    10, // llvm.nvvm.sust.b.3d.v4i16.zero
    10, // llvm.nvvm.sust.b.3d.v4i32.clamp
    10, // llvm.nvvm.sust.b.3d.v4i32.trap
    10, // llvm.nvvm.sust.b.3d.v4i32.zero
    10, // llvm.nvvm.sust.b.3d.v4i8.clamp
    10, // llvm.nvvm.sust.b.3d.v4i8.trap
    10, // llvm.nvvm.sust.b.3d.v4i8.zero
    10, // llvm.nvvm.sust.p.1d.array.i16.trap
    10, // llvm.nvvm.sust.p.1d.array.i32.trap
    10, // llvm.nvvm.sust.p.1d.array.i8.trap
    10, // llvm.nvvm.sust.p.1d.array.v2i16.trap
    10, // llvm.nvvm.sust.p.1d.array.v2i32.trap
    10, // llvm.nvvm.sust.p.1d.array.v2i8.trap
    10, // llvm.nvvm.sust.p.1d.array.v4i16.trap
    10, // llvm.nvvm.sust.p.1d.array.v4i32.trap
    10, // llvm.nvvm.sust.p.1d.array.v4i8.trap
    10, // llvm.nvvm.sust.p.1d.i16.trap
    10, // llvm.nvvm.sust.p.1d.i32.trap
    10, // llvm.nvvm.sust.p.1d.i8.trap
    10, // llvm.nvvm.sust.p.1d.v2i16.trap
    10, // llvm.nvvm.sust.p.1d.v2i32.trap
    10, // llvm.nvvm.sust.p.1d.v2i8.trap
    10, // llvm.nvvm.sust.p.1d.v4i16.trap
    10, // llvm.nvvm.sust.p.1d.v4i32.trap
    10, // llvm.nvvm.sust.p.1d.v4i8.trap
    10, // llvm.nvvm.sust.p.2d.array.i16.trap
    10, // llvm.nvvm.sust.p.2d.array.i32.trap
    10, // llvm.nvvm.sust.p.2d.array.i8.trap
    10, // llvm.nvvm.sust.p.2d.array.v2i16.trap
    10, // llvm.nvvm.sust.p.2d.array.v2i32.trap
    10, // llvm.nvvm.sust.p.2d.array.v2i8.trap
    10, // llvm.nvvm.sust.p.2d.array.v4i16.trap
    10, // llvm.nvvm.sust.p.2d.array.v4i32.trap
    10, // llvm.nvvm.sust.p.2d.array.v4i8.trap
    10, // llvm.nvvm.sust.p.2d.i16.trap
    10, // llvm.nvvm.sust.p.2d.i32.trap
    10, // llvm.nvvm.sust.p.2d.i8.trap
    10, // llvm.nvvm.sust.p.2d.v2i16.trap
    10, // llvm.nvvm.sust.p.2d.v2i32.trap
    10, // llvm.nvvm.sust.p.2d.v2i8.trap
    10, // llvm.nvvm.sust.p.2d.v4i16.trap
    10, // llvm.nvvm.sust.p.2d.v4i32.trap
    10, // llvm.nvvm.sust.p.2d.v4i8.trap
    10, // llvm.nvvm.sust.p.3d.i16.trap
    10, // llvm.nvvm.sust.p.3d.i32.trap
    10, // llvm.nvvm.sust.p.3d.i8.trap
    10, // llvm.nvvm.sust.p.3d.v2i16.trap
    10, // llvm.nvvm.sust.p.3d.v2i32.trap
    10, // llvm.nvvm.sust.p.3d.v2i8.trap
    10, // llvm.nvvm.sust.p.3d.v4i16.trap
    10, // llvm.nvvm.sust.p.3d.v4i32.trap
    10, // llvm.nvvm.sust.p.3d.v4i8.trap
    5, // llvm.nvvm.swap.lo.hi.b64
    10, // llvm.nvvm.tex.1d.array.grad.v4f32.f32
    10, // llvm.nvvm.tex.1d.array.grad.v4s32.f32
    10, // llvm.nvvm.tex.1d.array.grad.v4u32.f32
    10, // llvm.nvvm.tex.1d.array.level.v4f32.f32
    10, // llvm.nvvm.tex.1d.array.level.v4s32.f32
    10, // llvm.nvvm.tex.1d.array.level.v4u32.f32
    10, // llvm.nvvm.tex.1d.array.v4f32.f32
    10, // llvm.nvvm.tex.1d.array.v4f32.s32
    10, // llvm.nvvm.tex.1d.array.v4s32.f32
    10, // llvm.nvvm.tex.1d.array.v4s32.s32
    10, // llvm.nvvm.tex.1d.array.v4u32.f32
    10, // llvm.nvvm.tex.1d.array.v4u32.s32
    10, // llvm.nvvm.tex.1d.grad.v4f32.f32
    10, // llvm.nvvm.tex.1d.grad.v4s32.f32
    10, // llvm.nvvm.tex.1d.grad.v4u32.f32
    10, // llvm.nvvm.tex.1d.level.v4f32.f32
    10, // llvm.nvvm.tex.1d.level.v4s32.f32
    10, // llvm.nvvm.tex.1d.level.v4u32.f32
    10, // llvm.nvvm.tex.1d.v4f32.f32
    10, // llvm.nvvm.tex.1d.v4f32.s32
    10, // llvm.nvvm.tex.1d.v4s32.f32
    10, // llvm.nvvm.tex.1d.v4s32.s32
    10, // llvm.nvvm.tex.1d.v4u32.f32
    10, // llvm.nvvm.tex.1d.v4u32.s32
    10, // llvm.nvvm.tex.2d.array.grad.v4f32.f32
    10, // llvm.nvvm.tex.2d.array.grad.v4s32.f32
    10, // llvm.nvvm.tex.2d.array.grad.v4u32.f32
    10, // llvm.nvvm.tex.2d.array.level.v4f32.f32
    10, // llvm.nvvm.tex.2d.array.level.v4s32.f32
    10, // llvm.nvvm.tex.2d.array.level.v4u32.f32
    10, // llvm.nvvm.tex.2d.array.v4f32.f32
    10, // llvm.nvvm.tex.2d.array.v4f32.s32
    10, // llvm.nvvm.tex.2d.array.v4s32.f32
    10, // llvm.nvvm.tex.2d.array.v4s32.s32
    10, // llvm.nvvm.tex.2d.array.v4u32.f32
    10, // llvm.nvvm.tex.2d.array.v4u32.s32
    10, // llvm.nvvm.tex.2d.grad.v4f32.f32
    10, // llvm.nvvm.tex.2d.grad.v4s32.f32
    10, // llvm.nvvm.tex.2d.grad.v4u32.f32
    10, // llvm.nvvm.tex.2d.level.v4f32.f32
    10, // llvm.nvvm.tex.2d.level.v4s32.f32
    10, // llvm.nvvm.tex.2d.level.v4u32.f32
    10, // llvm.nvvm.tex.2d.v4f32.f32
    10, // llvm.nvvm.tex.2d.v4f32.s32
    10, // llvm.nvvm.tex.2d.v4s32.f32
    10, // llvm.nvvm.tex.2d.v4s32.s32
    10, // llvm.nvvm.tex.2d.v4u32.f32
    10, // llvm.nvvm.tex.2d.v4u32.s32
    10, // llvm.nvvm.tex.3d.grad.v4f32.f32
    10, // llvm.nvvm.tex.3d.grad.v4s32.f32
    10, // llvm.nvvm.tex.3d.grad.v4u32.f32
    10, // llvm.nvvm.tex.3d.level.v4f32.f32
    10, // llvm.nvvm.tex.3d.level.v4s32.f32
    10, // llvm.nvvm.tex.3d.level.v4u32.f32
    10, // llvm.nvvm.tex.3d.v4f32.f32
    10, // llvm.nvvm.tex.3d.v4f32.s32
    10, // llvm.nvvm.tex.3d.v4s32.f32
    10, // llvm.nvvm.tex.3d.v4s32.s32
    10, // llvm.nvvm.tex.3d.v4u32.f32
    10, // llvm.nvvm.tex.3d.v4u32.s32
    10, // llvm.nvvm.tex.cube.array.level.v4f32.f32
    10, // llvm.nvvm.tex.cube.array.level.v4s32.f32
    10, // llvm.nvvm.tex.cube.array.level.v4u32.f32
    10, // llvm.nvvm.tex.cube.array.v4f32.f32
    10, // llvm.nvvm.tex.cube.array.v4s32.f32
    10, // llvm.nvvm.tex.cube.array.v4u32.f32
    10, // llvm.nvvm.tex.cube.level.v4f32.f32
    10, // llvm.nvvm.tex.cube.level.v4s32.f32
    10, // llvm.nvvm.tex.cube.level.v4u32.f32
    10, // llvm.nvvm.tex.cube.v4f32.f32
    10, // llvm.nvvm.tex.cube.v4s32.f32
    10, // llvm.nvvm.tex.cube.v4u32.f32
    10, // llvm.nvvm.tex.unified.1d.array.grad.v4f32.f32
    10, // llvm.nvvm.tex.unified.1d.array.grad.v4s32.f32
    10, // llvm.nvvm.tex.unified.1d.array.grad.v4u32.f32
    10, // llvm.nvvm.tex.unified.1d.array.level.v4f32.f32
    10, // llvm.nvvm.tex.unified.1d.array.level.v4s32.f32
    10, // llvm.nvvm.tex.unified.1d.array.level.v4u32.f32
    10, // llvm.nvvm.tex.unified.1d.array.v4f32.f32
    10, // llvm.nvvm.tex.unified.1d.array.v4f32.s32
    10, // llvm.nvvm.tex.unified.1d.array.v4s32.f32
    10, // llvm.nvvm.tex.unified.1d.array.v4s32.s32
    10, // llvm.nvvm.tex.unified.1d.array.v4u32.f32
    10, // llvm.nvvm.tex.unified.1d.array.v4u32.s32
    10, // llvm.nvvm.tex.unified.1d.grad.v4f32.f32
    10, // llvm.nvvm.tex.unified.1d.grad.v4s32.f32
    10, // llvm.nvvm.tex.unified.1d.grad.v4u32.f32
    10, // llvm.nvvm.tex.unified.1d.level.v4f32.f32
    10, // llvm.nvvm.tex.unified.1d.level.v4s32.f32
    10, // llvm.nvvm.tex.unified.1d.level.v4u32.f32
    10, // llvm.nvvm.tex.unified.1d.v4f32.f32
    10, // llvm.nvvm.tex.unified.1d.v4f32.s32
    10, // llvm.nvvm.tex.unified.1d.v4s32.f32
    10, // llvm.nvvm.tex.unified.1d.v4s32.s32
    10, // llvm.nvvm.tex.unified.1d.v4u32.f32
    10, // llvm.nvvm.tex.unified.1d.v4u32.s32
    10, // llvm.nvvm.tex.unified.2d.array.grad.v4f32.f32
    10, // llvm.nvvm.tex.unified.2d.array.grad.v4s32.f32
    10, // llvm.nvvm.tex.unified.2d.array.grad.v4u32.f32
    10, // llvm.nvvm.tex.unified.2d.array.level.v4f32.f32
    10, // llvm.nvvm.tex.unified.2d.array.level.v4s32.f32
    10, // llvm.nvvm.tex.unified.2d.array.level.v4u32.f32
    10, // llvm.nvvm.tex.unified.2d.array.v4f32.f32
    10, // llvm.nvvm.tex.unified.2d.array.v4f32.s32
    10, // llvm.nvvm.tex.unified.2d.array.v4s32.f32
    10, // llvm.nvvm.tex.unified.2d.array.v4s32.s32
    10, // llvm.nvvm.tex.unified.2d.array.v4u32.f32
    10, // llvm.nvvm.tex.unified.2d.array.v4u32.s32
    10, // llvm.nvvm.tex.unified.2d.grad.v4f32.f32
    10, // llvm.nvvm.tex.unified.2d.grad.v4s32.f32
    10, // llvm.nvvm.tex.unified.2d.grad.v4u32.f32
    10, // llvm.nvvm.tex.unified.2d.level.v4f32.f32
    10, // llvm.nvvm.tex.unified.2d.level.v4s32.f32
    10, // llvm.nvvm.tex.unified.2d.level.v4u32.f32
    10, // llvm.nvvm.tex.unified.2d.v4f32.f32
    10, // llvm.nvvm.tex.unified.2d.v4f32.s32
    10, // llvm.nvvm.tex.unified.2d.v4s32.f32
    10, // llvm.nvvm.tex.unified.2d.v4s32.s32
    10, // llvm.nvvm.tex.unified.2d.v4u32.f32
    10, // llvm.nvvm.tex.unified.2d.v4u32.s32
    10, // llvm.nvvm.tex.unified.3d.grad.v4f32.f32
    10, // llvm.nvvm.tex.unified.3d.grad.v4s32.f32
    10, // llvm.nvvm.tex.unified.3d.grad.v4u32.f32
    10, // llvm.nvvm.tex.unified.3d.level.v4f32.f32
    10, // llvm.nvvm.tex.unified.3d.level.v4s32.f32
    10, // llvm.nvvm.tex.unified.3d.level.v4u32.f32
    10, // llvm.nvvm.tex.unified.3d.v4f32.f32
    10, // llvm.nvvm.tex.unified.3d.v4f32.s32
    10, // llvm.nvvm.tex.unified.3d.v4s32.f32
    10, // llvm.nvvm.tex.unified.3d.v4s32.s32
    10, // llvm.nvvm.tex.unified.3d.v4u32.f32
    10, // llvm.nvvm.tex.unified.3d.v4u32.s32
    10, // llvm.nvvm.tex.unified.cube.array.level.v4f32.f32
    10, // llvm.nvvm.tex.unified.cube.array.level.v4s32.f32
    10, // llvm.nvvm.tex.unified.cube.array.level.v4u32.f32
    10, // llvm.nvvm.tex.unified.cube.array.v4f32.f32
    10, // llvm.nvvm.tex.unified.cube.array.v4s32.f32
    10, // llvm.nvvm.tex.unified.cube.array.v4u32.f32
    10, // llvm.nvvm.tex.unified.cube.level.v4f32.f32
    10, // llvm.nvvm.tex.unified.cube.level.v4s32.f32
    10, // llvm.nvvm.tex.unified.cube.level.v4u32.f32
    10, // llvm.nvvm.tex.unified.cube.v4f32.f32
    10, // llvm.nvvm.tex.unified.cube.v4s32.f32
    10, // llvm.nvvm.tex.unified.cube.v4u32.f32
    12, // llvm.nvvm.texsurf.handle
    12, // llvm.nvvm.texsurf.handle.internal
    10, // llvm.nvvm.tld4.a.2d.v4f32.f32
    10, // llvm.nvvm.tld4.a.2d.v4s32.f32
    10, // llvm.nvvm.tld4.a.2d.v4u32.f32
    10, // llvm.nvvm.tld4.b.2d.v4f32.f32
    10, // llvm.nvvm.tld4.b.2d.v4s32.f32
    10, // llvm.nvvm.tld4.b.2d.v4u32.f32
    10, // llvm.nvvm.tld4.g.2d.v4f32.f32
    10, // llvm.nvvm.tld4.g.2d.v4s32.f32
    10, // llvm.nvvm.tld4.g.2d.v4u32.f32
    10, // llvm.nvvm.tld4.r.2d.v4f32.f32
    10, // llvm.nvvm.tld4.r.2d.v4s32.f32
    10, // llvm.nvvm.tld4.r.2d.v4u32.f32
    10, // llvm.nvvm.tld4.unified.a.2d.v4f32.f32
    10, // llvm.nvvm.tld4.unified.a.2d.v4s32.f32
    10, // llvm.nvvm.tld4.unified.a.2d.v4u32.f32
    10, // llvm.nvvm.tld4.unified.b.2d.v4f32.f32
    10, // llvm.nvvm.tld4.unified.b.2d.v4s32.f32
    10, // llvm.nvvm.tld4.unified.b.2d.v4u32.f32
    10, // llvm.nvvm.tld4.unified.g.2d.v4f32.f32
    10, // llvm.nvvm.tld4.unified.g.2d.v4s32.f32
    10, // llvm.nvvm.tld4.unified.g.2d.v4u32.f32
    10, // llvm.nvvm.tld4.unified.r.2d.v4f32.f32
    10, // llvm.nvvm.tld4.unified.r.2d.v4s32.f32
    10, // llvm.nvvm.tld4.unified.r.2d.v4u32.f32
    5, // llvm.nvvm.trunc.d
    5, // llvm.nvvm.trunc.f
    5, // llvm.nvvm.trunc.ftz.f
    12, // llvm.nvvm.txq.array.size
    12, // llvm.nvvm.txq.channel.data.type
    12, // llvm.nvvm.txq.channel.order
    12, // llvm.nvvm.txq.depth
    12, // llvm.nvvm.txq.height
    12, // llvm.nvvm.txq.num.mipmap.levels
    12, // llvm.nvvm.txq.num.samples
    12, // llvm.nvvm.txq.width
    5, // llvm.nvvm.ui2d.rm
    5, // llvm.nvvm.ui2d.rn
    5, // llvm.nvvm.ui2d.rp
    5, // llvm.nvvm.ui2d.rz
    5, // llvm.nvvm.ui2f.rm
    5, // llvm.nvvm.ui2f.rn
    5, // llvm.nvvm.ui2f.rp
    5, // llvm.nvvm.ui2f.rz
    5, // llvm.nvvm.ull2d.rm
    5, // llvm.nvvm.ull2d.rn
    5, // llvm.nvvm.ull2d.rp
    5, // llvm.nvvm.ull2d.rz
    5, // llvm.nvvm.ull2f.rm
    5, // llvm.nvvm.ull2f.rn
    5, // llvm.nvvm.ull2f.rp
    5, // llvm.nvvm.ull2f.rz
    248, // llvm.nvvm.vote.all
    248, // llvm.nvvm.vote.all.sync
    248, // llvm.nvvm.vote.any
    248, // llvm.nvvm.vote.any.sync
    248, // llvm.nvvm.vote.ballot
    248, // llvm.nvvm.vote.ballot.sync
    248, // llvm.nvvm.vote.uni
    248, // llvm.nvvm.vote.uni.sync
    247, // llvm.nvvm.wmma.m16n16k16.load.a.col.bf16
    247, // llvm.nvvm.wmma.m16n16k16.load.a.col.f16
    247, // llvm.nvvm.wmma.m16n16k16.load.a.col.s8
    247, // llvm.nvvm.wmma.m16n16k16.load.a.col.stride.bf16
    247, // llvm.nvvm.wmma.m16n16k16.load.a.col.stride.f16
    247, // llvm.nvvm.wmma.m16n16k16.load.a.col.stride.s8
    247, // llvm.nvvm.wmma.m16n16k16.load.a.col.stride.u8
    247, // llvm.nvvm.wmma.m16n16k16.load.a.col.u8
    247, // llvm.nvvm.wmma.m16n16k16.load.a.row.bf16
    247, // llvm.nvvm.wmma.m16n16k16.load.a.row.f16
    247, // llvm.nvvm.wmma.m16n16k16.load.a.row.s8
    247, // llvm.nvvm.wmma.m16n16k16.load.a.row.stride.bf16
    247, // llvm.nvvm.wmma.m16n16k16.load.a.row.stride.f16
    247, // llvm.nvvm.wmma.m16n16k16.load.a.row.stride.s8
    247, // llvm.nvvm.wmma.m16n16k16.load.a.row.stride.u8
    247, // llvm.nvvm.wmma.m16n16k16.load.a.row.u8
    247, // llvm.nvvm.wmma.m16n16k16.load.b.col.bf16
    247, // llvm.nvvm.wmma.m16n16k16.load.b.col.f16
    247, // llvm.nvvm.wmma.m16n16k16.load.b.col.s8
    247, // llvm.nvvm.wmma.m16n16k16.load.b.col.stride.bf16
    247, // llvm.nvvm.wmma.m16n16k16.load.b.col.stride.f16
    247, // llvm.nvvm.wmma.m16n16k16.load.b.col.stride.s8
    247, // llvm.nvvm.wmma.m16n16k16.load.b.col.stride.u8
    247, // llvm.nvvm.wmma.m16n16k16.load.b.col.u8
    247, // llvm.nvvm.wmma.m16n16k16.load.b.row.bf16
    247, // llvm.nvvm.wmma.m16n16k16.load.b.row.f16
    247, // llvm.nvvm.wmma.m16n16k16.load.b.row.s8
    247, // llvm.nvvm.wmma.m16n16k16.load.b.row.stride.bf16
    247, // llvm.nvvm.wmma.m16n16k16.load.b.row.stride.f16
    247, // llvm.nvvm.wmma.m16n16k16.load.b.row.stride.s8
    247, // llvm.nvvm.wmma.m16n16k16.load.b.row.stride.u8
    247, // llvm.nvvm.wmma.m16n16k16.load.b.row.u8
    247, // llvm.nvvm.wmma.m16n16k16.load.c.col.f16
    247, // llvm.nvvm.wmma.m16n16k16.load.c.col.f32
    247, // llvm.nvvm.wmma.m16n16k16.load.c.col.s32
    247, // llvm.nvvm.wmma.m16n16k16.load.c.col.stride.f16
    247, // llvm.nvvm.wmma.m16n16k16.load.c.col.stride.f32
    247, // llvm.nvvm.wmma.m16n16k16.load.c.col.stride.s32
    247, // llvm.nvvm.wmma.m16n16k16.load.c.row.f16
    247, // llvm.nvvm.wmma.m16n16k16.load.c.row.f32
    247, // llvm.nvvm.wmma.m16n16k16.load.c.row.s32
    247, // llvm.nvvm.wmma.m16n16k16.load.c.row.stride.f16
    247, // llvm.nvvm.wmma.m16n16k16.load.c.row.stride.f32
    247, // llvm.nvvm.wmma.m16n16k16.load.c.row.stride.s32
    244, // llvm.nvvm.wmma.m16n16k16.mma.col.col.bf16
    244, // llvm.nvvm.wmma.m16n16k16.mma.col.col.f16.f16
    244, // llvm.nvvm.wmma.m16n16k16.mma.col.col.f16.f16.satfinite
    244, // llvm.nvvm.wmma.m16n16k16.mma.col.col.f16.f32
    244, // llvm.nvvm.wmma.m16n16k16.mma.col.col.f16.f32.satfinite
    244, // llvm.nvvm.wmma.m16n16k16.mma.col.col.f32.f16
    244, // llvm.nvvm.wmma.m16n16k16.mma.col.col.f32.f16.satfinite
    244, // llvm.nvvm.wmma.m16n16k16.mma.col.col.f32.f32
    244, // llvm.nvvm.wmma.m16n16k16.mma.col.col.f32.f32.satfinite
    244, // llvm.nvvm.wmma.m16n16k16.mma.col.col.s8
    244, // llvm.nvvm.wmma.m16n16k16.mma.col.col.s8.satfinite
    244, // llvm.nvvm.wmma.m16n16k16.mma.col.col.u8
    244, // llvm.nvvm.wmma.m16n16k16.mma.col.col.u8.satfinite
    244, // llvm.nvvm.wmma.m16n16k16.mma.col.row.bf16
    244, // llvm.nvvm.wmma.m16n16k16.mma.col.row.f16.f16
    244, // llvm.nvvm.wmma.m16n16k16.mma.col.row.f16.f16.satfinite
    244, // llvm.nvvm.wmma.m16n16k16.mma.col.row.f16.f32
    244, // llvm.nvvm.wmma.m16n16k16.mma.col.row.f16.f32.satfinite
    244, // llvm.nvvm.wmma.m16n16k16.mma.col.row.f32.f16
    244, // llvm.nvvm.wmma.m16n16k16.mma.col.row.f32.f16.satfinite
    244, // llvm.nvvm.wmma.m16n16k16.mma.col.row.f32.f32
    244, // llvm.nvvm.wmma.m16n16k16.mma.col.row.f32.f32.satfinite
    244, // llvm.nvvm.wmma.m16n16k16.mma.col.row.s8
    244, // llvm.nvvm.wmma.m16n16k16.mma.col.row.s8.satfinite
    244, // llvm.nvvm.wmma.m16n16k16.mma.col.row.u8
    244, // llvm.nvvm.wmma.m16n16k16.mma.col.row.u8.satfinite
    244, // llvm.nvvm.wmma.m16n16k16.mma.row.col.bf16
    244, // llvm.nvvm.wmma.m16n16k16.mma.row.col.f16.f16
    244, // llvm.nvvm.wmma.m16n16k16.mma.row.col.f16.f16.satfinite
    244, // llvm.nvvm.wmma.m16n16k16.mma.row.col.f16.f32
    244, // llvm.nvvm.wmma.m16n16k16.mma.row.col.f16.f32.satfinite
    244, // llvm.nvvm.wmma.m16n16k16.mma.row.col.f32.f16
    244, // llvm.nvvm.wmma.m16n16k16.mma.row.col.f32.f16.satfinite
    244, // llvm.nvvm.wmma.m16n16k16.mma.row.col.f32.f32
    244, // llvm.nvvm.wmma.m16n16k16.mma.row.col.f32.f32.satfinite
    244, // llvm.nvvm.wmma.m16n16k16.mma.row.col.s8
    244, // llvm.nvvm.wmma.m16n16k16.mma.row.col.s8.satfinite
    244, // llvm.nvvm.wmma.m16n16k16.mma.row.col.u8
    244, // llvm.nvvm.wmma.m16n16k16.mma.row.col.u8.satfinite
    244, // llvm.nvvm.wmma.m16n16k16.mma.row.row.bf16
    244, // llvm.nvvm.wmma.m16n16k16.mma.row.row.f16.f16
    244, // llvm.nvvm.wmma.m16n16k16.mma.row.row.f16.f16.satfinite
    244, // llvm.nvvm.wmma.m16n16k16.mma.row.row.f16.f32
    244, // llvm.nvvm.wmma.m16n16k16.mma.row.row.f16.f32.satfinite
    244, // llvm.nvvm.wmma.m16n16k16.mma.row.row.f32.f16
    244, // llvm.nvvm.wmma.m16n16k16.mma.row.row.f32.f16.satfinite
    244, // llvm.nvvm.wmma.m16n16k16.mma.row.row.f32.f32
    244, // llvm.nvvm.wmma.m16n16k16.mma.row.row.f32.f32.satfinite
    244, // llvm.nvvm.wmma.m16n16k16.mma.row.row.s8
    244, // llvm.nvvm.wmma.m16n16k16.mma.row.row.s8.satfinite
    244, // llvm.nvvm.wmma.m16n16k16.mma.row.row.u8
    244, // llvm.nvvm.wmma.m16n16k16.mma.row.row.u8.satfinite
    253, // llvm.nvvm.wmma.m16n16k16.store.d.col.f16
    253, // llvm.nvvm.wmma.m16n16k16.store.d.col.f32
    253, // llvm.nvvm.wmma.m16n16k16.store.d.col.s32
    253, // llvm.nvvm.wmma.m16n16k16.store.d.col.stride.f16
    253, // llvm.nvvm.wmma.m16n16k16.store.d.col.stride.f32
    253, // llvm.nvvm.wmma.m16n16k16.store.d.col.stride.s32
    253, // llvm.nvvm.wmma.m16n16k16.store.d.row.f16
    253, // llvm.nvvm.wmma.m16n16k16.store.d.row.f32
    253, // llvm.nvvm.wmma.m16n16k16.store.d.row.s32
    253, // llvm.nvvm.wmma.m16n16k16.store.d.row.stride.f16
    253, // llvm.nvvm.wmma.m16n16k16.store.d.row.stride.f32
    253, // llvm.nvvm.wmma.m16n16k16.store.d.row.stride.s32
    247, // llvm.nvvm.wmma.m16n16k8.load.a.col.stride.tf32
    247, // llvm.nvvm.wmma.m16n16k8.load.a.col.tf32
    247, // llvm.nvvm.wmma.m16n16k8.load.a.row.stride.tf32
    247, // llvm.nvvm.wmma.m16n16k8.load.a.row.tf32
    247, // llvm.nvvm.wmma.m16n16k8.load.b.col.stride.tf32
    247, // llvm.nvvm.wmma.m16n16k8.load.b.col.tf32
    247, // llvm.nvvm.wmma.m16n16k8.load.b.row.stride.tf32
    247, // llvm.nvvm.wmma.m16n16k8.load.b.row.tf32
    247, // llvm.nvvm.wmma.m16n16k8.load.c.col.f32
    247, // llvm.nvvm.wmma.m16n16k8.load.c.col.stride.f32
    247, // llvm.nvvm.wmma.m16n16k8.load.c.row.f32
    247, // llvm.nvvm.wmma.m16n16k8.load.c.row.stride.f32
    244, // llvm.nvvm.wmma.m16n16k8.mma.col.col.tf32
    244, // llvm.nvvm.wmma.m16n16k8.mma.col.row.tf32
    244, // llvm.nvvm.wmma.m16n16k8.mma.row.col.tf32
    244, // llvm.nvvm.wmma.m16n16k8.mma.row.row.tf32
    253, // llvm.nvvm.wmma.m16n16k8.store.d.col.f32
    253, // llvm.nvvm.wmma.m16n16k8.store.d.col.stride.f32
    253, // llvm.nvvm.wmma.m16n16k8.store.d.row.f32
    253, // llvm.nvvm.wmma.m16n16k8.store.d.row.stride.f32
    247, // llvm.nvvm.wmma.m32n8k16.load.a.col.bf16
    247, // llvm.nvvm.wmma.m32n8k16.load.a.col.f16
    247, // llvm.nvvm.wmma.m32n8k16.load.a.col.s8
    247, // llvm.nvvm.wmma.m32n8k16.load.a.col.stride.bf16
    247, // llvm.nvvm.wmma.m32n8k16.load.a.col.stride.f16
    247, // llvm.nvvm.wmma.m32n8k16.load.a.col.stride.s8
    247, // llvm.nvvm.wmma.m32n8k16.load.a.col.stride.u8
    247, // llvm.nvvm.wmma.m32n8k16.load.a.col.u8
    247, // llvm.nvvm.wmma.m32n8k16.load.a.row.bf16
    247, // llvm.nvvm.wmma.m32n8k16.load.a.row.f16
    247, // llvm.nvvm.wmma.m32n8k16.load.a.row.s8
    247, // llvm.nvvm.wmma.m32n8k16.load.a.row.stride.bf16
    247, // llvm.nvvm.wmma.m32n8k16.load.a.row.stride.f16
    247, // llvm.nvvm.wmma.m32n8k16.load.a.row.stride.s8
    247, // llvm.nvvm.wmma.m32n8k16.load.a.row.stride.u8
    247, // llvm.nvvm.wmma.m32n8k16.load.a.row.u8
    247, // llvm.nvvm.wmma.m32n8k16.load.b.col.bf16
    247, // llvm.nvvm.wmma.m32n8k16.load.b.col.f16
    247, // llvm.nvvm.wmma.m32n8k16.load.b.col.s8
    247, // llvm.nvvm.wmma.m32n8k16.load.b.col.stride.bf16
    247, // llvm.nvvm.wmma.m32n8k16.load.b.col.stride.f16
    247, // llvm.nvvm.wmma.m32n8k16.load.b.col.stride.s8
    247, // llvm.nvvm.wmma.m32n8k16.load.b.col.stride.u8
    247, // llvm.nvvm.wmma.m32n8k16.load.b.col.u8
    247, // llvm.nvvm.wmma.m32n8k16.load.b.row.bf16
    247, // llvm.nvvm.wmma.m32n8k16.load.b.row.f16
    247, // llvm.nvvm.wmma.m32n8k16.load.b.row.s8
    247, // llvm.nvvm.wmma.m32n8k16.load.b.row.stride.bf16
    247, // llvm.nvvm.wmma.m32n8k16.load.b.row.stride.f16
    247, // llvm.nvvm.wmma.m32n8k16.load.b.row.stride.s8
    247, // llvm.nvvm.wmma.m32n8k16.load.b.row.stride.u8
    247, // llvm.nvvm.wmma.m32n8k16.load.b.row.u8
    247, // llvm.nvvm.wmma.m32n8k16.load.c.col.f16
    247, // llvm.nvvm.wmma.m32n8k16.load.c.col.f32
    247, // llvm.nvvm.wmma.m32n8k16.load.c.col.s32
    247, // llvm.nvvm.wmma.m32n8k16.load.c.col.stride.f16
    247, // llvm.nvvm.wmma.m32n8k16.load.c.col.stride.f32
    247, // llvm.nvvm.wmma.m32n8k16.load.c.col.stride.s32
    247, // llvm.nvvm.wmma.m32n8k16.load.c.row.f16
    247, // llvm.nvvm.wmma.m32n8k16.load.c.row.f32
    247, // llvm.nvvm.wmma.m32n8k16.load.c.row.s32
    247, // llvm.nvvm.wmma.m32n8k16.load.c.row.stride.f16
    247, // llvm.nvvm.wmma.m32n8k16.load.c.row.stride.f32
    247, // llvm.nvvm.wmma.m32n8k16.load.c.row.stride.s32
    244, // llvm.nvvm.wmma.m32n8k16.mma.col.col.bf16
    244, // llvm.nvvm.wmma.m32n8k16.mma.col.col.f16.f16
    244, // llvm.nvvm.wmma.m32n8k16.mma.col.col.f16.f16.satfinite
    244, // llvm.nvvm.wmma.m32n8k16.mma.col.col.f16.f32
    244, // llvm.nvvm.wmma.m32n8k16.mma.col.col.f16.f32.satfinite
    244, // llvm.nvvm.wmma.m32n8k16.mma.col.col.f32.f16
    244, // llvm.nvvm.wmma.m32n8k16.mma.col.col.f32.f16.satfinite
    244, // llvm.nvvm.wmma.m32n8k16.mma.col.col.f32.f32
    244, // llvm.nvvm.wmma.m32n8k16.mma.col.col.f32.f32.satfinite
    244, // llvm.nvvm.wmma.m32n8k16.mma.col.col.s8
    244, // llvm.nvvm.wmma.m32n8k16.mma.col.col.s8.satfinite
    244, // llvm.nvvm.wmma.m32n8k16.mma.col.col.u8
    244, // llvm.nvvm.wmma.m32n8k16.mma.col.col.u8.satfinite
    244, // llvm.nvvm.wmma.m32n8k16.mma.col.row.bf16
    244, // llvm.nvvm.wmma.m32n8k16.mma.col.row.f16.f16
    244, // llvm.nvvm.wmma.m32n8k16.mma.col.row.f16.f16.satfinite
    244, // llvm.nvvm.wmma.m32n8k16.mma.col.row.f16.f32
    244, // llvm.nvvm.wmma.m32n8k16.mma.col.row.f16.f32.satfinite
    244, // llvm.nvvm.wmma.m32n8k16.mma.col.row.f32.f16
    244, // llvm.nvvm.wmma.m32n8k16.mma.col.row.f32.f16.satfinite
    244, // llvm.nvvm.wmma.m32n8k16.mma.col.row.f32.f32
    244, // llvm.nvvm.wmma.m32n8k16.mma.col.row.f32.f32.satfinite
    244, // llvm.nvvm.wmma.m32n8k16.mma.col.row.s8
    244, // llvm.nvvm.wmma.m32n8k16.mma.col.row.s8.satfinite
    244, // llvm.nvvm.wmma.m32n8k16.mma.col.row.u8
    244, // llvm.nvvm.wmma.m32n8k16.mma.col.row.u8.satfinite
    244, // llvm.nvvm.wmma.m32n8k16.mma.row.col.bf16
    244, // llvm.nvvm.wmma.m32n8k16.mma.row.col.f16.f16
    244, // llvm.nvvm.wmma.m32n8k16.mma.row.col.f16.f16.satfinite
    244, // llvm.nvvm.wmma.m32n8k16.mma.row.col.f16.f32
    244, // llvm.nvvm.wmma.m32n8k16.mma.row.col.f16.f32.satfinite
    244, // llvm.nvvm.wmma.m32n8k16.mma.row.col.f32.f16
    244, // llvm.nvvm.wmma.m32n8k16.mma.row.col.f32.f16.satfinite
    244, // llvm.nvvm.wmma.m32n8k16.mma.row.col.f32.f32
    244, // llvm.nvvm.wmma.m32n8k16.mma.row.col.f32.f32.satfinite
    244, // llvm.nvvm.wmma.m32n8k16.mma.row.col.s8
    244, // llvm.nvvm.wmma.m32n8k16.mma.row.col.s8.satfinite
    244, // llvm.nvvm.wmma.m32n8k16.mma.row.col.u8
    244, // llvm.nvvm.wmma.m32n8k16.mma.row.col.u8.satfinite
    244, // llvm.nvvm.wmma.m32n8k16.mma.row.row.bf16
    244, // llvm.nvvm.wmma.m32n8k16.mma.row.row.f16.f16
    244, // llvm.nvvm.wmma.m32n8k16.mma.row.row.f16.f16.satfinite
    244, // llvm.nvvm.wmma.m32n8k16.mma.row.row.f16.f32
    244, // llvm.nvvm.wmma.m32n8k16.mma.row.row.f16.f32.satfinite
    244, // llvm.nvvm.wmma.m32n8k16.mma.row.row.f32.f16
    244, // llvm.nvvm.wmma.m32n8k16.mma.row.row.f32.f16.satfinite
    244, // llvm.nvvm.wmma.m32n8k16.mma.row.row.f32.f32
    244, // llvm.nvvm.wmma.m32n8k16.mma.row.row.f32.f32.satfinite
    244, // llvm.nvvm.wmma.m32n8k16.mma.row.row.s8
    244, // llvm.nvvm.wmma.m32n8k16.mma.row.row.s8.satfinite
    244, // llvm.nvvm.wmma.m32n8k16.mma.row.row.u8
    244, // llvm.nvvm.wmma.m32n8k16.mma.row.row.u8.satfinite
    253, // llvm.nvvm.wmma.m32n8k16.store.d.col.f16
    253, // llvm.nvvm.wmma.m32n8k16.store.d.col.f32
    253, // llvm.nvvm.wmma.m32n8k16.store.d.col.s32
    253, // llvm.nvvm.wmma.m32n8k16.store.d.col.stride.f16
    253, // llvm.nvvm.wmma.m32n8k16.store.d.col.stride.f32
    253, // llvm.nvvm.wmma.m32n8k16.store.d.col.stride.s32
    253, // llvm.nvvm.wmma.m32n8k16.store.d.row.f16
    253, // llvm.nvvm.wmma.m32n8k16.store.d.row.f32
    253, // llvm.nvvm.wmma.m32n8k16.store.d.row.s32
    253, // llvm.nvvm.wmma.m32n8k16.store.d.row.stride.f16
    253, // llvm.nvvm.wmma.m32n8k16.store.d.row.stride.f32
    253, // llvm.nvvm.wmma.m32n8k16.store.d.row.stride.s32
    247, // llvm.nvvm.wmma.m8n32k16.load.a.col.bf16
    247, // llvm.nvvm.wmma.m8n32k16.load.a.col.f16
    247, // llvm.nvvm.wmma.m8n32k16.load.a.col.s8
    247, // llvm.nvvm.wmma.m8n32k16.load.a.col.stride.bf16
    247, // llvm.nvvm.wmma.m8n32k16.load.a.col.stride.f16
    247, // llvm.nvvm.wmma.m8n32k16.load.a.col.stride.s8
    247, // llvm.nvvm.wmma.m8n32k16.load.a.col.stride.u8
    247, // llvm.nvvm.wmma.m8n32k16.load.a.col.u8
    247, // llvm.nvvm.wmma.m8n32k16.load.a.row.bf16
    247, // llvm.nvvm.wmma.m8n32k16.load.a.row.f16
    247, // llvm.nvvm.wmma.m8n32k16.load.a.row.s8
    247, // llvm.nvvm.wmma.m8n32k16.load.a.row.stride.bf16
    247, // llvm.nvvm.wmma.m8n32k16.load.a.row.stride.f16
    247, // llvm.nvvm.wmma.m8n32k16.load.a.row.stride.s8
    247, // llvm.nvvm.wmma.m8n32k16.load.a.row.stride.u8
    247, // llvm.nvvm.wmma.m8n32k16.load.a.row.u8
    247, // llvm.nvvm.wmma.m8n32k16.load.b.col.bf16
    247, // llvm.nvvm.wmma.m8n32k16.load.b.col.f16
    247, // llvm.nvvm.wmma.m8n32k16.load.b.col.s8
    247, // llvm.nvvm.wmma.m8n32k16.load.b.col.stride.bf16
    247, // llvm.nvvm.wmma.m8n32k16.load.b.col.stride.f16
    247, // llvm.nvvm.wmma.m8n32k16.load.b.col.stride.s8
    247, // llvm.nvvm.wmma.m8n32k16.load.b.col.stride.u8
    247, // llvm.nvvm.wmma.m8n32k16.load.b.col.u8
    247, // llvm.nvvm.wmma.m8n32k16.load.b.row.bf16
    247, // llvm.nvvm.wmma.m8n32k16.load.b.row.f16
    247, // llvm.nvvm.wmma.m8n32k16.load.b.row.s8
    247, // llvm.nvvm.wmma.m8n32k16.load.b.row.stride.bf16
    247, // llvm.nvvm.wmma.m8n32k16.load.b.row.stride.f16
    247, // llvm.nvvm.wmma.m8n32k16.load.b.row.stride.s8
    247, // llvm.nvvm.wmma.m8n32k16.load.b.row.stride.u8
    247, // llvm.nvvm.wmma.m8n32k16.load.b.row.u8
    247, // llvm.nvvm.wmma.m8n32k16.load.c.col.f16
    247, // llvm.nvvm.wmma.m8n32k16.load.c.col.f32
    247, // llvm.nvvm.wmma.m8n32k16.load.c.col.s32
    247, // llvm.nvvm.wmma.m8n32k16.load.c.col.stride.f16
    247, // llvm.nvvm.wmma.m8n32k16.load.c.col.stride.f32
    247, // llvm.nvvm.wmma.m8n32k16.load.c.col.stride.s32
    247, // llvm.nvvm.wmma.m8n32k16.load.c.row.f16
    247, // llvm.nvvm.wmma.m8n32k16.load.c.row.f32
    247, // llvm.nvvm.wmma.m8n32k16.load.c.row.s32
    247, // llvm.nvvm.wmma.m8n32k16.load.c.row.stride.f16
    247, // llvm.nvvm.wmma.m8n32k16.load.c.row.stride.f32
    247, // llvm.nvvm.wmma.m8n32k16.load.c.row.stride.s32
    244, // llvm.nvvm.wmma.m8n32k16.mma.col.col.bf16
    244, // llvm.nvvm.wmma.m8n32k16.mma.col.col.f16.f16
    244, // llvm.nvvm.wmma.m8n32k16.mma.col.col.f16.f16.satfinite
    244, // llvm.nvvm.wmma.m8n32k16.mma.col.col.f16.f32
    244, // llvm.nvvm.wmma.m8n32k16.mma.col.col.f16.f32.satfinite
    244, // llvm.nvvm.wmma.m8n32k16.mma.col.col.f32.f16
    244, // llvm.nvvm.wmma.m8n32k16.mma.col.col.f32.f16.satfinite
    244, // llvm.nvvm.wmma.m8n32k16.mma.col.col.f32.f32
    244, // llvm.nvvm.wmma.m8n32k16.mma.col.col.f32.f32.satfinite
    244, // llvm.nvvm.wmma.m8n32k16.mma.col.col.s8
    244, // llvm.nvvm.wmma.m8n32k16.mma.col.col.s8.satfinite
    244, // llvm.nvvm.wmma.m8n32k16.mma.col.col.u8
    244, // llvm.nvvm.wmma.m8n32k16.mma.col.col.u8.satfinite
    244, // llvm.nvvm.wmma.m8n32k16.mma.col.row.bf16
    244, // llvm.nvvm.wmma.m8n32k16.mma.col.row.f16.f16
    244, // llvm.nvvm.wmma.m8n32k16.mma.col.row.f16.f16.satfinite
    244, // llvm.nvvm.wmma.m8n32k16.mma.col.row.f16.f32
    244, // llvm.nvvm.wmma.m8n32k16.mma.col.row.f16.f32.satfinite
    244, // llvm.nvvm.wmma.m8n32k16.mma.col.row.f32.f16
    244, // llvm.nvvm.wmma.m8n32k16.mma.col.row.f32.f16.satfinite
    244, // llvm.nvvm.wmma.m8n32k16.mma.col.row.f32.f32
    244, // llvm.nvvm.wmma.m8n32k16.mma.col.row.f32.f32.satfinite
    244, // llvm.nvvm.wmma.m8n32k16.mma.col.row.s8
    244, // llvm.nvvm.wmma.m8n32k16.mma.col.row.s8.satfinite
    244, // llvm.nvvm.wmma.m8n32k16.mma.col.row.u8
    244, // llvm.nvvm.wmma.m8n32k16.mma.col.row.u8.satfinite
    244, // llvm.nvvm.wmma.m8n32k16.mma.row.col.bf16
    244, // llvm.nvvm.wmma.m8n32k16.mma.row.col.f16.f16
    244, // llvm.nvvm.wmma.m8n32k16.mma.row.col.f16.f16.satfinite
    244, // llvm.nvvm.wmma.m8n32k16.mma.row.col.f16.f32
    244, // llvm.nvvm.wmma.m8n32k16.mma.row.col.f16.f32.satfinite
    244, // llvm.nvvm.wmma.m8n32k16.mma.row.col.f32.f16
    244, // llvm.nvvm.wmma.m8n32k16.mma.row.col.f32.f16.satfinite
    244, // llvm.nvvm.wmma.m8n32k16.mma.row.col.f32.f32
    244, // llvm.nvvm.wmma.m8n32k16.mma.row.col.f32.f32.satfinite
    244, // llvm.nvvm.wmma.m8n32k16.mma.row.col.s8
    244, // llvm.nvvm.wmma.m8n32k16.mma.row.col.s8.satfinite
    244, // llvm.nvvm.wmma.m8n32k16.mma.row.col.u8
    244, // llvm.nvvm.wmma.m8n32k16.mma.row.col.u8.satfinite
    244, // llvm.nvvm.wmma.m8n32k16.mma.row.row.bf16
    244, // llvm.nvvm.wmma.m8n32k16.mma.row.row.f16.f16
    244, // llvm.nvvm.wmma.m8n32k16.mma.row.row.f16.f16.satfinite
    244, // llvm.nvvm.wmma.m8n32k16.mma.row.row.f16.f32
    244, // llvm.nvvm.wmma.m8n32k16.mma.row.row.f16.f32.satfinite
    244, // llvm.nvvm.wmma.m8n32k16.mma.row.row.f32.f16
    244, // llvm.nvvm.wmma.m8n32k16.mma.row.row.f32.f16.satfinite
    244, // llvm.nvvm.wmma.m8n32k16.mma.row.row.f32.f32
    244, // llvm.nvvm.wmma.m8n32k16.mma.row.row.f32.f32.satfinite
    244, // llvm.nvvm.wmma.m8n32k16.mma.row.row.s8
    244, // llvm.nvvm.wmma.m8n32k16.mma.row.row.s8.satfinite
    244, // llvm.nvvm.wmma.m8n32k16.mma.row.row.u8
    244, // llvm.nvvm.wmma.m8n32k16.mma.row.row.u8.satfinite
    253, // llvm.nvvm.wmma.m8n32k16.store.d.col.f16
    253, // llvm.nvvm.wmma.m8n32k16.store.d.col.f32
    253, // llvm.nvvm.wmma.m8n32k16.store.d.col.s32
    253, // llvm.nvvm.wmma.m8n32k16.store.d.col.stride.f16
    253, // llvm.nvvm.wmma.m8n32k16.store.d.col.stride.f32
    253, // llvm.nvvm.wmma.m8n32k16.store.d.col.stride.s32
    253, // llvm.nvvm.wmma.m8n32k16.store.d.row.f16
    253, // llvm.nvvm.wmma.m8n32k16.store.d.row.f32
    253, // llvm.nvvm.wmma.m8n32k16.store.d.row.s32
    253, // llvm.nvvm.wmma.m8n32k16.store.d.row.stride.f16
    253, // llvm.nvvm.wmma.m8n32k16.store.d.row.stride.f32
    253, // llvm.nvvm.wmma.m8n32k16.store.d.row.stride.s32
    247, // llvm.nvvm.wmma.m8n8k128.load.a.row.b1
    247, // llvm.nvvm.wmma.m8n8k128.load.a.row.stride.b1
    247, // llvm.nvvm.wmma.m8n8k128.load.b.col.b1
    247, // llvm.nvvm.wmma.m8n8k128.load.b.col.stride.b1
    247, // llvm.nvvm.wmma.m8n8k128.load.c.col.s32
    247, // llvm.nvvm.wmma.m8n8k128.load.c.col.stride.s32
    247, // llvm.nvvm.wmma.m8n8k128.load.c.row.s32
    247, // llvm.nvvm.wmma.m8n8k128.load.c.row.stride.s32
    244, // llvm.nvvm.wmma.m8n8k128.mma.and.popc.row.col.b1
    244, // llvm.nvvm.wmma.m8n8k128.mma.xor.popc.row.col.b1
    253, // llvm.nvvm.wmma.m8n8k128.store.d.col.s32
    253, // llvm.nvvm.wmma.m8n8k128.store.d.col.stride.s32
    253, // llvm.nvvm.wmma.m8n8k128.store.d.row.s32
    253, // llvm.nvvm.wmma.m8n8k128.store.d.row.stride.s32
    247, // llvm.nvvm.wmma.m8n8k32.load.a.row.s4
    247, // llvm.nvvm.wmma.m8n8k32.load.a.row.stride.s4
    247, // llvm.nvvm.wmma.m8n8k32.load.a.row.stride.u4
    247, // llvm.nvvm.wmma.m8n8k32.load.a.row.u4
    247, // llvm.nvvm.wmma.m8n8k32.load.b.col.s4
    247, // llvm.nvvm.wmma.m8n8k32.load.b.col.stride.s4
    247, // llvm.nvvm.wmma.m8n8k32.load.b.col.stride.u4
    247, // llvm.nvvm.wmma.m8n8k32.load.b.col.u4
    247, // llvm.nvvm.wmma.m8n8k32.load.c.col.s32
    247, // llvm.nvvm.wmma.m8n8k32.load.c.col.stride.s32
    247, // llvm.nvvm.wmma.m8n8k32.load.c.row.s32
    247, // llvm.nvvm.wmma.m8n8k32.load.c.row.stride.s32
    244, // llvm.nvvm.wmma.m8n8k32.mma.row.col.s4
    244, // llvm.nvvm.wmma.m8n8k32.mma.row.col.s4.satfinite
    244, // llvm.nvvm.wmma.m8n8k32.mma.row.col.u4
    244, // llvm.nvvm.wmma.m8n8k32.mma.row.col.u4.satfinite
    253, // llvm.nvvm.wmma.m8n8k32.store.d.col.s32
    253, // llvm.nvvm.wmma.m8n8k32.store.d.col.stride.s32
    253, // llvm.nvvm.wmma.m8n8k32.store.d.row.s32
    253, // llvm.nvvm.wmma.m8n8k32.store.d.row.stride.s32
    247, // llvm.nvvm.wmma.m8n8k4.load.a.col.f64
    247, // llvm.nvvm.wmma.m8n8k4.load.a.col.stride.f64
    247, // llvm.nvvm.wmma.m8n8k4.load.a.row.f64
    247, // llvm.nvvm.wmma.m8n8k4.load.a.row.stride.f64
    247, // llvm.nvvm.wmma.m8n8k4.load.b.col.f64
    247, // llvm.nvvm.wmma.m8n8k4.load.b.col.stride.f64
    247, // llvm.nvvm.wmma.m8n8k4.load.b.row.f64
    247, // llvm.nvvm.wmma.m8n8k4.load.b.row.stride.f64
    247, // llvm.nvvm.wmma.m8n8k4.load.c.col.f64
    247, // llvm.nvvm.wmma.m8n8k4.load.c.col.stride.f64
    247, // llvm.nvvm.wmma.m8n8k4.load.c.row.f64
    247, // llvm.nvvm.wmma.m8n8k4.load.c.row.stride.f64
    244, // llvm.nvvm.wmma.m8n8k4.mma.col.col.f64
    244, // llvm.nvvm.wmma.m8n8k4.mma.col.col.rm.f64
    244, // llvm.nvvm.wmma.m8n8k4.mma.col.col.rn.f64
    244, // llvm.nvvm.wmma.m8n8k4.mma.col.col.rp.f64
    244, // llvm.nvvm.wmma.m8n8k4.mma.col.col.rz.f64
    244, // llvm.nvvm.wmma.m8n8k4.mma.col.row.f64
    244, // llvm.nvvm.wmma.m8n8k4.mma.col.row.rm.f64
    244, // llvm.nvvm.wmma.m8n8k4.mma.col.row.rn.f64
    244, // llvm.nvvm.wmma.m8n8k4.mma.col.row.rp.f64
    244, // llvm.nvvm.wmma.m8n8k4.mma.col.row.rz.f64
    244, // llvm.nvvm.wmma.m8n8k4.mma.row.col.f64
    244, // llvm.nvvm.wmma.m8n8k4.mma.row.col.rm.f64
    244, // llvm.nvvm.wmma.m8n8k4.mma.row.col.rn.f64
    244, // llvm.nvvm.wmma.m8n8k4.mma.row.col.rp.f64
    244, // llvm.nvvm.wmma.m8n8k4.mma.row.col.rz.f64
    244, // llvm.nvvm.wmma.m8n8k4.mma.row.row.f64
    244, // llvm.nvvm.wmma.m8n8k4.mma.row.row.rm.f64
    244, // llvm.nvvm.wmma.m8n8k4.mma.row.row.rn.f64
    244, // llvm.nvvm.wmma.m8n8k4.mma.row.row.rp.f64
    244, // llvm.nvvm.wmma.m8n8k4.mma.row.row.rz.f64
    253, // llvm.nvvm.wmma.m8n8k4.store.d.col.f64
    253, // llvm.nvvm.wmma.m8n8k4.store.d.col.stride.f64
    253, // llvm.nvvm.wmma.m8n8k4.store.d.row.f64
    253, // llvm.nvvm.wmma.m8n8k4.store.d.row.stride.f64
    254, // llvm.ppc.addex
    2, // llvm.ppc.addf128.round.to.odd
    2, // llvm.ppc.altivec.crypto.vcipher
    2, // llvm.ppc.altivec.crypto.vcipherlast
    2, // llvm.ppc.altivec.crypto.vncipher
    2, // llvm.ppc.altivec.crypto.vncipherlast
    2, // llvm.ppc.altivec.crypto.vpermxor
    2, // llvm.ppc.altivec.crypto.vpermxor.be
    2, // llvm.ppc.altivec.crypto.vpmsumb
    2, // llvm.ppc.altivec.crypto.vpmsumd
    2, // llvm.ppc.altivec.crypto.vpmsumh
    2, // llvm.ppc.altivec.crypto.vpmsumw
    2, // llvm.ppc.altivec.crypto.vsbox
    26, // llvm.ppc.altivec.crypto.vshasigmad
    26, // llvm.ppc.altivec.crypto.vshasigmaw
    10, // llvm.ppc.altivec.dss
    10, // llvm.ppc.altivec.dssall
    10, // llvm.ppc.altivec.dst
    10, // llvm.ppc.altivec.dstst
    10, // llvm.ppc.altivec.dststt
    10, // llvm.ppc.altivec.dstt
    3, // llvm.ppc.altivec.lvebx
    3, // llvm.ppc.altivec.lvehx
    3, // llvm.ppc.altivec.lvewx
    2, // llvm.ppc.altivec.lvsl
    2, // llvm.ppc.altivec.lvsr
    3, // llvm.ppc.altivec.lvx
    3, // llvm.ppc.altivec.lvxl
    255, // llvm.ppc.altivec.mfvscr
    255, // llvm.ppc.altivec.mtvscr
    2, // llvm.ppc.altivec.mtvsrbm
    2, // llvm.ppc.altivec.mtvsrdm
    2, // llvm.ppc.altivec.mtvsrhm
    2, // llvm.ppc.altivec.mtvsrqm
    2, // llvm.ppc.altivec.mtvsrwm
    98, // llvm.ppc.altivec.stvebx
    98, // llvm.ppc.altivec.stvehx
    98, // llvm.ppc.altivec.stvewx
    98, // llvm.ppc.altivec.stvx
    98, // llvm.ppc.altivec.stvxl
    2, // llvm.ppc.altivec.vabsdub
    2, // llvm.ppc.altivec.vabsduh
    2, // llvm.ppc.altivec.vabsduw
    2, // llvm.ppc.altivec.vaddcuq
    2, // llvm.ppc.altivec.vaddcuw
    2, // llvm.ppc.altivec.vaddecuq
    2, // llvm.ppc.altivec.vaddeuqm
    2, // llvm.ppc.altivec.vaddsbs
    2, // llvm.ppc.altivec.vaddshs
    2, // llvm.ppc.altivec.vaddsws
    2, // llvm.ppc.altivec.vaddubs
    2, // llvm.ppc.altivec.vadduhs
    2, // llvm.ppc.altivec.vadduws
    2, // llvm.ppc.altivec.vavgsb
    2, // llvm.ppc.altivec.vavgsh
    2, // llvm.ppc.altivec.vavgsw
    2, // llvm.ppc.altivec.vavgub
    2, // llvm.ppc.altivec.vavguh
    2, // llvm.ppc.altivec.vavguw
    2, // llvm.ppc.altivec.vbpermd
    2, // llvm.ppc.altivec.vbpermq
    59, // llvm.ppc.altivec.vcfsx
    2, // llvm.ppc.altivec.vcfuged
    59, // llvm.ppc.altivec.vcfux
    2, // llvm.ppc.altivec.vclrlb
    2, // llvm.ppc.altivec.vclrrb
    2, // llvm.ppc.altivec.vclzdm
    2, // llvm.ppc.altivec.vclzlsbb
    2, // llvm.ppc.altivec.vcmpbfp
    2, // llvm.ppc.altivec.vcmpbfp.p
    2, // llvm.ppc.altivec.vcmpeqfp
    2, // llvm.ppc.altivec.vcmpeqfp.p
    2, // llvm.ppc.altivec.vcmpequb
    2, // llvm.ppc.altivec.vcmpequb.p
    2, // llvm.ppc.altivec.vcmpequd
    2, // llvm.ppc.altivec.vcmpequd.p
    2, // llvm.ppc.altivec.vcmpequh
    2, // llvm.ppc.altivec.vcmpequh.p
    2, // llvm.ppc.altivec.vcmpequq
    2, // llvm.ppc.altivec.vcmpequq.p
    2, // llvm.ppc.altivec.vcmpequw
    2, // llvm.ppc.altivec.vcmpequw.p
    2, // llvm.ppc.altivec.vcmpgefp
    2, // llvm.ppc.altivec.vcmpgefp.p
    2, // llvm.ppc.altivec.vcmpgtfp
    2, // llvm.ppc.altivec.vcmpgtfp.p
    2, // llvm.ppc.altivec.vcmpgtsb
    2, // llvm.ppc.altivec.vcmpgtsb.p
    2, // llvm.ppc.altivec.vcmpgtsd
    2, // llvm.ppc.altivec.vcmpgtsd.p
    2, // llvm.ppc.altivec.vcmpgtsh
    2, // llvm.ppc.altivec.vcmpgtsh.p
    2, // llvm.ppc.altivec.vcmpgtsq
    2, // llvm.ppc.altivec.vcmpgtsq.p
    2, // llvm.ppc.altivec.vcmpgtsw
    2, // llvm.ppc.altivec.vcmpgtsw.p
    2, // llvm.ppc.altivec.vcmpgtub
    2, // llvm.ppc.altivec.vcmpgtub.p
    2, // llvm.ppc.altivec.vcmpgtud
    2, // llvm.ppc.altivec.vcmpgtud.p
    2, // llvm.ppc.altivec.vcmpgtuh
    2, // llvm.ppc.altivec.vcmpgtuh.p
    2, // llvm.ppc.altivec.vcmpgtuq
    2, // llvm.ppc.altivec.vcmpgtuq.p
    2, // llvm.ppc.altivec.vcmpgtuw
    2, // llvm.ppc.altivec.vcmpgtuw.p
    2, // llvm.ppc.altivec.vcmpneb
    2, // llvm.ppc.altivec.vcmpneb.p
    2, // llvm.ppc.altivec.vcmpneh
    2, // llvm.ppc.altivec.vcmpneh.p
    2, // llvm.ppc.altivec.vcmpnew
    2, // llvm.ppc.altivec.vcmpnew.p
    2, // llvm.ppc.altivec.vcmpnezb
    2, // llvm.ppc.altivec.vcmpnezb.p
    2, // llvm.ppc.altivec.vcmpnezh
    2, // llvm.ppc.altivec.vcmpnezh.p
    2, // llvm.ppc.altivec.vcmpnezw
    2, // llvm.ppc.altivec.vcmpnezw.p
    59, // llvm.ppc.altivec.vcntmbb
    59, // llvm.ppc.altivec.vcntmbd
    59, // llvm.ppc.altivec.vcntmbh
    59, // llvm.ppc.altivec.vcntmbw
    59, // llvm.ppc.altivec.vctsxs
    59, // llvm.ppc.altivec.vctuxs
    2, // llvm.ppc.altivec.vctzdm
    2, // llvm.ppc.altivec.vctzlsbb
    2, // llvm.ppc.altivec.vdivesd
    2, // llvm.ppc.altivec.vdivesq
    2, // llvm.ppc.altivec.vdivesw
    2, // llvm.ppc.altivec.vdiveud
    2, // llvm.ppc.altivec.vdiveuq
    2, // llvm.ppc.altivec.vdiveuw
    2, // llvm.ppc.altivec.vexpandbm
    2, // llvm.ppc.altivec.vexpanddm
    2, // llvm.ppc.altivec.vexpandhm
    2, // llvm.ppc.altivec.vexpandqm
    2, // llvm.ppc.altivec.vexpandwm
    2, // llvm.ppc.altivec.vexptefp
    2, // llvm.ppc.altivec.vextddvlx
    2, // llvm.ppc.altivec.vextddvrx
    2, // llvm.ppc.altivec.vextdubvlx
    2, // llvm.ppc.altivec.vextdubvrx
    2, // llvm.ppc.altivec.vextduhvlx
    2, // llvm.ppc.altivec.vextduhvrx
    2, // llvm.ppc.altivec.vextduwvlx
    2, // llvm.ppc.altivec.vextduwvrx
    2, // llvm.ppc.altivec.vextractbm
    2, // llvm.ppc.altivec.vextractdm
    2, // llvm.ppc.altivec.vextracthm
    2, // llvm.ppc.altivec.vextractqm
    2, // llvm.ppc.altivec.vextractwm
    2, // llvm.ppc.altivec.vextsb2d
    2, // llvm.ppc.altivec.vextsb2w
    2, // llvm.ppc.altivec.vextsd2q
    2, // llvm.ppc.altivec.vextsh2d
    2, // llvm.ppc.altivec.vextsh2w
    2, // llvm.ppc.altivec.vextsw2d
    2, // llvm.ppc.altivec.vgbbd
    59, // llvm.ppc.altivec.vgnb
    2, // llvm.ppc.altivec.vinsblx
    2, // llvm.ppc.altivec.vinsbrx
    2, // llvm.ppc.altivec.vinsbvlx
    2, // llvm.ppc.altivec.vinsbvrx
    22, // llvm.ppc.altivec.vinsd
    2, // llvm.ppc.altivec.vinsdlx
    2, // llvm.ppc.altivec.vinsdrx
    2, // llvm.ppc.altivec.vinshlx
    2, // llvm.ppc.altivec.vinshrx
    2, // llvm.ppc.altivec.vinshvlx
    2, // llvm.ppc.altivec.vinshvrx
    22, // llvm.ppc.altivec.vinsw
    2, // llvm.ppc.altivec.vinswlx
    2, // llvm.ppc.altivec.vinswrx
    2, // llvm.ppc.altivec.vinswvlx
    2, // llvm.ppc.altivec.vinswvrx
    2, // llvm.ppc.altivec.vlogefp
    2, // llvm.ppc.altivec.vmaddfp
    2, // llvm.ppc.altivec.vmaxfp
    2, // llvm.ppc.altivec.vmaxsb
    2, // llvm.ppc.altivec.vmaxsd
    2, // llvm.ppc.altivec.vmaxsh
    2, // llvm.ppc.altivec.vmaxsw
    2, // llvm.ppc.altivec.vmaxub
    2, // llvm.ppc.altivec.vmaxud
    2, // llvm.ppc.altivec.vmaxuh
    2, // llvm.ppc.altivec.vmaxuw
    76, // llvm.ppc.altivec.vmhaddshs
    76, // llvm.ppc.altivec.vmhraddshs
    2, // llvm.ppc.altivec.vminfp
    2, // llvm.ppc.altivec.vminsb
    2, // llvm.ppc.altivec.vminsd
    2, // llvm.ppc.altivec.vminsh
    2, // llvm.ppc.altivec.vminsw
    2, // llvm.ppc.altivec.vminub
    2, // llvm.ppc.altivec.vminud
    2, // llvm.ppc.altivec.vminuh
    2, // llvm.ppc.altivec.vminuw
    2, // llvm.ppc.altivec.vmladduhm
    2, // llvm.ppc.altivec.vmsumcud
    2, // llvm.ppc.altivec.vmsummbm
    2, // llvm.ppc.altivec.vmsumshm
    76, // llvm.ppc.altivec.vmsumshs
    2, // llvm.ppc.altivec.vmsumubm
    2, // llvm.ppc.altivec.vmsumudm
    2, // llvm.ppc.altivec.vmsumuhm
    76, // llvm.ppc.altivec.vmsumuhs
    2, // llvm.ppc.altivec.vmulesb
    2, // llvm.ppc.altivec.vmulesd
    2, // llvm.ppc.altivec.vmulesh
    2, // llvm.ppc.altivec.vmulesw
    2, // llvm.ppc.altivec.vmuleub
    2, // llvm.ppc.altivec.vmuleud
    2, // llvm.ppc.altivec.vmuleuh
    2, // llvm.ppc.altivec.vmuleuw
    2, // llvm.ppc.altivec.vmulhsd
    2, // llvm.ppc.altivec.vmulhsw
    2, // llvm.ppc.altivec.vmulhud
    2, // llvm.ppc.altivec.vmulhuw
    2, // llvm.ppc.altivec.vmulosb
    2, // llvm.ppc.altivec.vmulosd
    2, // llvm.ppc.altivec.vmulosh
    2, // llvm.ppc.altivec.vmulosw
    2, // llvm.ppc.altivec.vmuloub
    2, // llvm.ppc.altivec.vmuloud
    2, // llvm.ppc.altivec.vmulouh
    2, // llvm.ppc.altivec.vmulouw
    2, // llvm.ppc.altivec.vnmsubfp
    2, // llvm.ppc.altivec.vpdepd
    2, // llvm.ppc.altivec.vperm
    2, // llvm.ppc.altivec.vpextd
    2, // llvm.ppc.altivec.vpkpx
    76, // llvm.ppc.altivec.vpksdss
    76, // llvm.ppc.altivec.vpksdus
    76, // llvm.ppc.altivec.vpkshss
    76, // llvm.ppc.altivec.vpkshus
    76, // llvm.ppc.altivec.vpkswss
    76, // llvm.ppc.altivec.vpkswus
    76, // llvm.ppc.altivec.vpkudus
    76, // llvm.ppc.altivec.vpkuhus
    76, // llvm.ppc.altivec.vpkuwus
    2, // llvm.ppc.altivec.vprtybd
    2, // llvm.ppc.altivec.vprtybq
    2, // llvm.ppc.altivec.vprtybw
    2, // llvm.ppc.altivec.vrefp
    2, // llvm.ppc.altivec.vrfim
    2, // llvm.ppc.altivec.vrfin
    2, // llvm.ppc.altivec.vrfip
    2, // llvm.ppc.altivec.vrfiz
    2, // llvm.ppc.altivec.vrlb
    2, // llvm.ppc.altivec.vrld
    2, // llvm.ppc.altivec.vrldmi
    2, // llvm.ppc.altivec.vrldnm
    2, // llvm.ppc.altivec.vrlh
    2, // llvm.ppc.altivec.vrlqmi
    2, // llvm.ppc.altivec.vrlqnm
    2, // llvm.ppc.altivec.vrlw
    2, // llvm.ppc.altivec.vrlwmi
    2, // llvm.ppc.altivec.vrlwnm
    2, // llvm.ppc.altivec.vrsqrtefp
    2, // llvm.ppc.altivec.vsel
    2, // llvm.ppc.altivec.vsl
    2, // llvm.ppc.altivec.vslb
    22, // llvm.ppc.altivec.vsldbi
    2, // llvm.ppc.altivec.vslh
    2, // llvm.ppc.altivec.vslo
    2, // llvm.ppc.altivec.vslv
    2, // llvm.ppc.altivec.vslw
    2, // llvm.ppc.altivec.vsr
    2, // llvm.ppc.altivec.vsrab
    2, // llvm.ppc.altivec.vsrah
    2, // llvm.ppc.altivec.vsraw
    2, // llvm.ppc.altivec.vsrb
    22, // llvm.ppc.altivec.vsrdbi
    2, // llvm.ppc.altivec.vsrh
    2, // llvm.ppc.altivec.vsro
    2, // llvm.ppc.altivec.vsrv
    2, // llvm.ppc.altivec.vsrw
    2, // llvm.ppc.altivec.vstribl
    2, // llvm.ppc.altivec.vstribl.p
    2, // llvm.ppc.altivec.vstribr
    2, // llvm.ppc.altivec.vstribr.p
    2, // llvm.ppc.altivec.vstrihl
    2, // llvm.ppc.altivec.vstrihl.p
    2, // llvm.ppc.altivec.vstrihr
    2, // llvm.ppc.altivec.vstrihr.p
    2, // llvm.ppc.altivec.vsubcuq
    2, // llvm.ppc.altivec.vsubcuw
    2, // llvm.ppc.altivec.vsubecuq
    2, // llvm.ppc.altivec.vsubeuqm
    2, // llvm.ppc.altivec.vsubsbs
    2, // llvm.ppc.altivec.vsubshs
    2, // llvm.ppc.altivec.vsubsws
    2, // llvm.ppc.altivec.vsububs
    2, // llvm.ppc.altivec.vsubuhs
    2, // llvm.ppc.altivec.vsubuws
    76, // llvm.ppc.altivec.vsum2sws
    76, // llvm.ppc.altivec.vsum4sbs
    76, // llvm.ppc.altivec.vsum4shs
    76, // llvm.ppc.altivec.vsum4ubs
    76, // llvm.ppc.altivec.vsumsws
    2, // llvm.ppc.altivec.vupkhpx
    2, // llvm.ppc.altivec.vupkhsb
    2, // llvm.ppc.altivec.vupkhsh
    2, // llvm.ppc.altivec.vupkhsw
    2, // llvm.ppc.altivec.vupklpx
    2, // llvm.ppc.altivec.vupklsb
    2, // llvm.ppc.altivec.vupklsh
    2, // llvm.ppc.altivec.vupklsw
    256, // llvm.ppc.atomic.load.i128
    257, // llvm.ppc.atomic.store.i128
    226, // llvm.ppc.atomicrmw.add.i128
    226, // llvm.ppc.atomicrmw.and.i128
    226, // llvm.ppc.atomicrmw.nand.i128
    226, // llvm.ppc.atomicrmw.or.i128
    226, // llvm.ppc.atomicrmw.sub.i128
    226, // llvm.ppc.atomicrmw.xchg.i128
    226, // llvm.ppc.atomicrmw.xor.i128
    22, // llvm.ppc.bcdadd
    32, // llvm.ppc.bcdadd.p
    22, // llvm.ppc.bcdsub
    32, // llvm.ppc.bcdsub.p
    2, // llvm.ppc.bpermd
    10, // llvm.ppc.cfence
    2, // llvm.ppc.cfuged
    2, // llvm.ppc.cmpb
    2, // llvm.ppc.cmpeqb
    32, // llvm.ppc.cmprb
    226, // llvm.ppc.cmpxchg.i128
    2, // llvm.ppc.cntlzdm
    2, // llvm.ppc.cnttzdm
    2, // llvm.ppc.compare.exp.eq
    2, // llvm.ppc.compare.exp.gt
    2, // llvm.ppc.compare.exp.lt
    2, // llvm.ppc.compare.exp.uo
    2, // llvm.ppc.convert.f128.to.ppcf128
    2, // llvm.ppc.convert.ppcf128.to.f128
    258, // llvm.ppc.darn
    258, // llvm.ppc.darn32
    258, // llvm.ppc.darnraw
    10, // llvm.ppc.dcba
    240, // llvm.ppc.dcbf
    240, // llvm.ppc.dcbfl
    240, // llvm.ppc.dcbflp
    240, // llvm.ppc.dcbfps
    10, // llvm.ppc.dcbi
    10, // llvm.ppc.dcbst
    240, // llvm.ppc.dcbstps
    226, // llvm.ppc.dcbt
    259, // llvm.ppc.dcbt.with.hint
    226, // llvm.ppc.dcbtst
    259, // llvm.ppc.dcbtst.with.hint
    226, // llvm.ppc.dcbtstt
    226, // llvm.ppc.dcbtt
    10, // llvm.ppc.dcbz
    10, // llvm.ppc.dcbzl
    2, // llvm.ppc.divde
    2, // llvm.ppc.divdeu
    2, // llvm.ppc.divf128.round.to.odd
    2, // llvm.ppc.divwe
    2, // llvm.ppc.divweu
    10, // llvm.ppc.eieio
    2, // llvm.ppc.extract.exp
    2, // llvm.ppc.extract.sig
    2, // llvm.ppc.fcfid
    2, // llvm.ppc.fcfud
    2, // llvm.ppc.fctid
    2, // llvm.ppc.fctidz
    2, // llvm.ppc.fctiw
    2, // llvm.ppc.fctiwz
    2, // llvm.ppc.fctudz
    2, // llvm.ppc.fctuwz
    2, // llvm.ppc.fmaf128.round.to.odd
    2, // llvm.ppc.fmsub
    2, // llvm.ppc.fmsubs
    2, // llvm.ppc.fnabs
    2, // llvm.ppc.fnabss
    2, // llvm.ppc.fnmadd
    2, // llvm.ppc.fnmadds
    2, // llvm.ppc.fnmsub
    2, // llvm.ppc.fre
    2, // llvm.ppc.fres
    2, // llvm.ppc.frsqrte
    2, // llvm.ppc.frsqrtes
    2, // llvm.ppc.fsel
    2, // llvm.ppc.fsels
    10, // llvm.ppc.get.texasr
    10, // llvm.ppc.get.texasru
    10, // llvm.ppc.get.tfhar
    10, // llvm.ppc.get.tfiar
    10, // llvm.ppc.icbt
    2, // llvm.ppc.insert.exp
    10, // llvm.ppc.iospace.eieio
    10, // llvm.ppc.iospace.lwsync
    10, // llvm.ppc.iospace.sync
    10, // llvm.ppc.isync
    3, // llvm.ppc.load2r
    3, // llvm.ppc.load4r
    3, // llvm.ppc.load8r
    10, // llvm.ppc.lwsync
    2, // llvm.ppc.maddhd
    2, // llvm.ppc.maddhdu
    2, // llvm.ppc.maddld
    2, // llvm.ppc.maxfe
    2, // llvm.ppc.maxfl
    2, // llvm.ppc.maxfs
    2, // llvm.ppc.mfmsr
    83, // llvm.ppc.mfspr
    2, // llvm.ppc.mftbu
    2, // llvm.ppc.minfe
    2, // llvm.ppc.minfl
    2, // llvm.ppc.minfs
    2, // llvm.ppc.mma.assemble.acc
    2, // llvm.ppc.mma.disassemble.acc
    2, // llvm.ppc.mma.pmxvbf16ger2
    2, // llvm.ppc.mma.pmxvbf16ger2nn
    2, // llvm.ppc.mma.pmxvbf16ger2np
    2, // llvm.ppc.mma.pmxvbf16ger2pn
    2, // llvm.ppc.mma.pmxvbf16ger2pp
    2, // llvm.ppc.mma.pmxvf16ger2
    2, // llvm.ppc.mma.pmxvf16ger2nn
    2, // llvm.ppc.mma.pmxvf16ger2np
    2, // llvm.ppc.mma.pmxvf16ger2pn
    2, // llvm.ppc.mma.pmxvf16ger2pp
    2, // llvm.ppc.mma.pmxvf32ger
    2, // llvm.ppc.mma.pmxvf32gernn
    2, // llvm.ppc.mma.pmxvf32gernp
    2, // llvm.ppc.mma.pmxvf32gerpn
    2, // llvm.ppc.mma.pmxvf32gerpp
    2, // llvm.ppc.mma.pmxvf64ger
    2, // llvm.ppc.mma.pmxvf64gernn
    2, // llvm.ppc.mma.pmxvf64gernp
    2, // llvm.ppc.mma.pmxvf64gerpn
    2, // llvm.ppc.mma.pmxvf64gerpp
    2, // llvm.ppc.mma.pmxvi16ger2
    2, // llvm.ppc.mma.pmxvi16ger2pp
    2, // llvm.ppc.mma.pmxvi16ger2s
    2, // llvm.ppc.mma.pmxvi16ger2spp
    2, // llvm.ppc.mma.pmxvi4ger8
    2, // llvm.ppc.mma.pmxvi4ger8pp
    2, // llvm.ppc.mma.pmxvi8ger4
    2, // llvm.ppc.mma.pmxvi8ger4pp
    2, // llvm.ppc.mma.pmxvi8ger4spp
    2, // llvm.ppc.mma.xvbf16ger2
    2, // llvm.ppc.mma.xvbf16ger2nn
    2, // llvm.ppc.mma.xvbf16ger2np
    2, // llvm.ppc.mma.xvbf16ger2pn
    2, // llvm.ppc.mma.xvbf16ger2pp
    2, // llvm.ppc.mma.xvf16ger2
    2, // llvm.ppc.mma.xvf16ger2nn
    2, // llvm.ppc.mma.xvf16ger2np
    2, // llvm.ppc.mma.xvf16ger2pn
    2, // llvm.ppc.mma.xvf16ger2pp
    2, // llvm.ppc.mma.xvf32ger
    2, // llvm.ppc.mma.xvf32gernn
    2, // llvm.ppc.mma.xvf32gernp
    2, // llvm.ppc.mma.xvf32gerpn
    2, // llvm.ppc.mma.xvf32gerpp
    2, // llvm.ppc.mma.xvf64ger
    2, // llvm.ppc.mma.xvf64gernn
    2, // llvm.ppc.mma.xvf64gernp
    2, // llvm.ppc.mma.xvf64gerpn
    2, // llvm.ppc.mma.xvf64gerpp
    2, // llvm.ppc.mma.xvi16ger2
    2, // llvm.ppc.mma.xvi16ger2pp
    2, // llvm.ppc.mma.xvi16ger2s
    2, // llvm.ppc.mma.xvi16ger2spp
    2, // llvm.ppc.mma.xvi4ger8
    2, // llvm.ppc.mma.xvi4ger8pp
    2, // llvm.ppc.mma.xvi8ger4
    2, // llvm.ppc.mma.xvi8ger4pp
    2, // llvm.ppc.mma.xvi8ger4spp
    2, // llvm.ppc.mma.xxmfacc
    2, // llvm.ppc.mma.xxmtacc
    2, // llvm.ppc.mma.xxsetaccz
    181, // llvm.ppc.mtfsb0
    181, // llvm.ppc.mtfsb1
    181, // llvm.ppc.mtfsf
    260, // llvm.ppc.mtfsfi
    10, // llvm.ppc.mtmsr
    83, // llvm.ppc.mtspr
    2, // llvm.ppc.mulf128.round.to.odd
    2, // llvm.ppc.mulhd
    2, // llvm.ppc.mulhdu
    2, // llvm.ppc.mulhw
    2, // llvm.ppc.mulhwu
    2, // llvm.ppc.pack.longdouble
    2, // llvm.ppc.pdepd
    2, // llvm.ppc.pextd
    2, // llvm.ppc.popcntb
    258, // llvm.ppc.readflm
    2, // llvm.ppc.scalar.extract.expq
    2, // llvm.ppc.scalar.insert.exp.qp
    10, // llvm.ppc.set.texasr
    10, // llvm.ppc.set.texasru
    10, // llvm.ppc.set.tfhar
    10, // llvm.ppc.set.tfiar
    2, // llvm.ppc.setb
    261, // llvm.ppc.setflm
    261, // llvm.ppc.setrnd
    2, // llvm.ppc.sqrtf128.round.to.odd
    262, // llvm.ppc.stbcx
    262, // llvm.ppc.stdcx
    88, // llvm.ppc.stfiw
    262, // llvm.ppc.sthcx
    263, // llvm.ppc.store2r
    263, // llvm.ppc.store4r
    263, // llvm.ppc.store8r
    264, // llvm.ppc.stwcx
    2, // llvm.ppc.subf128.round.to.odd
    10, // llvm.ppc.sync
    10, // llvm.ppc.tabort
    10, // llvm.ppc.tabortdc
    10, // llvm.ppc.tabortdci
    10, // llvm.ppc.tabortwc
    10, // llvm.ppc.tabortwci
    220, // llvm.ppc.tbegin
    10, // llvm.ppc.tcheck
    35, // llvm.ppc.tdw
    220, // llvm.ppc.tend
    10, // llvm.ppc.tendall
    59, // llvm.ppc.test.data.class
    10, // llvm.ppc.trap
    10, // llvm.ppc.trapd
    10, // llvm.ppc.trechkpt
    10, // llvm.ppc.treclaim
    10, // llvm.ppc.tresume
    2, // llvm.ppc.truncf128.round.to.odd
    10, // llvm.ppc.tsr
    10, // llvm.ppc.tsuspend
    10, // llvm.ppc.ttest
    35, // llvm.ppc.tw
    2, // llvm.ppc.unpack.longdouble
    2, // llvm.ppc.vsx.assemble.pair
    2, // llvm.ppc.vsx.disassemble.pair
    3, // llvm.ppc.vsx.lxvd2x
    3, // llvm.ppc.vsx.lxvd2x.be
    3, // llvm.ppc.vsx.lxvl
    3, // llvm.ppc.vsx.lxvll
    3, // llvm.ppc.vsx.lxvp
    3, // llvm.ppc.vsx.lxvw4x
    3, // llvm.ppc.vsx.lxvw4x.be
    264, // llvm.ppc.vsx.stxvd2x
    264, // llvm.ppc.vsx.stxvd2x.be
    264, // llvm.ppc.vsx.stxvl
    264, // llvm.ppc.vsx.stxvll
    264, // llvm.ppc.vsx.stxvp
    264, // llvm.ppc.vsx.stxvw4x
    264, // llvm.ppc.vsx.stxvw4x.be
    2, // llvm.ppc.vsx.xsmaxdp
    2, // llvm.ppc.vsx.xsmindp
    2, // llvm.ppc.vsx.xvcmpeqdp
    2, // llvm.ppc.vsx.xvcmpeqdp.p
    2, // llvm.ppc.vsx.xvcmpeqsp
    2, // llvm.ppc.vsx.xvcmpeqsp.p
    2, // llvm.ppc.vsx.xvcmpgedp
    2, // llvm.ppc.vsx.xvcmpgedp.p
    2, // llvm.ppc.vsx.xvcmpgesp
    2, // llvm.ppc.vsx.xvcmpgesp.p
    2, // llvm.ppc.vsx.xvcmpgtdp
    2, // llvm.ppc.vsx.xvcmpgtdp.p
    2, // llvm.ppc.vsx.xvcmpgtsp
    2, // llvm.ppc.vsx.xvcmpgtsp.p
    2, // llvm.ppc.vsx.xvcvbf16spn
    2, // llvm.ppc.vsx.xvcvdpsp
    2, // llvm.ppc.vsx.xvcvdpsxws
    2, // llvm.ppc.vsx.xvcvdpuxws
    2, // llvm.ppc.vsx.xvcvhpsp
    2, // llvm.ppc.vsx.xvcvspbf16
    2, // llvm.ppc.vsx.xvcvspdp
    2, // llvm.ppc.vsx.xvcvsphp
    2, // llvm.ppc.vsx.xvcvspsxds
    2, // llvm.ppc.vsx.xvcvspuxds
    2, // llvm.ppc.vsx.xvcvsxdsp
    2, // llvm.ppc.vsx.xvcvsxwdp
    2, // llvm.ppc.vsx.xvcvuxdsp
    2, // llvm.ppc.vsx.xvcvuxwdp
    2, // llvm.ppc.vsx.xvdivdp
    2, // llvm.ppc.vsx.xvdivsp
    2, // llvm.ppc.vsx.xviexpdp
    2, // llvm.ppc.vsx.xviexpsp
    2, // llvm.ppc.vsx.xvmaxdp
    2, // llvm.ppc.vsx.xvmaxsp
    2, // llvm.ppc.vsx.xvmindp
    2, // llvm.ppc.vsx.xvminsp
    2, // llvm.ppc.vsx.xvrdpip
    2, // llvm.ppc.vsx.xvredp
    2, // llvm.ppc.vsx.xvresp
    2, // llvm.ppc.vsx.xvrspip
    2, // llvm.ppc.vsx.xvrsqrtedp
    2, // llvm.ppc.vsx.xvrsqrtesp
    2, // llvm.ppc.vsx.xvtdivdp
    2, // llvm.ppc.vsx.xvtdivsp
    2, // llvm.ppc.vsx.xvtlsbb
    2, // llvm.ppc.vsx.xvtsqrtdp
    2, // llvm.ppc.vsx.xvtsqrtsp
    59, // llvm.ppc.vsx.xvtstdcdp
    59, // llvm.ppc.vsx.xvtstdcsp
    2, // llvm.ppc.vsx.xvxexpdp
    2, // llvm.ppc.vsx.xvxexpsp
    2, // llvm.ppc.vsx.xvxsigdp
    2, // llvm.ppc.vsx.xvxsigsp
    2, // llvm.ppc.vsx.xxblendvb
    2, // llvm.ppc.vsx.xxblendvd
    2, // llvm.ppc.vsx.xxblendvh
    2, // llvm.ppc.vsx.xxblendvw
    89, // llvm.ppc.vsx.xxeval
    2, // llvm.ppc.vsx.xxextractuw
    2, // llvm.ppc.vsx.xxgenpcvbm
    2, // llvm.ppc.vsx.xxgenpcvdm
    2, // llvm.ppc.vsx.xxgenpcvhm
    2, // llvm.ppc.vsx.xxgenpcvwm
    2, // llvm.ppc.vsx.xxinsertw
    2, // llvm.ppc.vsx.xxleqv
    89, // llvm.ppc.vsx.xxpermx
    5, // llvm.r600.cube
    2, // llvm.r600.ddx
    2, // llvm.r600.ddy
    5, // llvm.r600.dot4
    265, // llvm.r600.group.barrier
    5, // llvm.r600.implicitarg.ptr
    8, // llvm.r600.kill
    8, // llvm.r600.rat.store.typed
    5, // llvm.r600.read.global.size.x
    5, // llvm.r600.read.global.size.y
    5, // llvm.r600.read.global.size.z
    5, // llvm.r600.read.local.size.x
    5, // llvm.r600.read.local.size.y
    5, // llvm.r600.read.local.size.z
    5, // llvm.r600.read.ngroups.x
    5, // llvm.r600.read.ngroups.y
    5, // llvm.r600.read.ngroups.z
    5, // llvm.r600.read.tgid.x
    5, // llvm.r600.read.tgid.y
    5, // llvm.r600.read.tgid.z
    5, // llvm.r600.read.tidig.x
    5, // llvm.r600.read.tidig.y
    5, // llvm.r600.read.tidig.z
    5, // llvm.r600.recipsqrt.clamped
    5, // llvm.r600.recipsqrt.ieee
    8, // llvm.r600.store.stream.output
    266, // llvm.r600.store.swizzle
    2, // llvm.r600.tex
    2, // llvm.r600.texc
    2, // llvm.r600.txb
    2, // llvm.r600.txbc
    2, // llvm.r600.txf
    2, // llvm.r600.txl
    2, // llvm.r600.txlc
    2, // llvm.r600.txq
    65, // llvm.riscv.aes32dsi
    65, // llvm.riscv.aes32dsmi
    65, // llvm.riscv.aes32esi
    65, // llvm.riscv.aes32esmi
    5, // llvm.riscv.aes64ds
    5, // llvm.riscv.aes64dsm
    5, // llvm.riscv.aes64es
    5, // llvm.riscv.aes64esm
    5, // llvm.riscv.aes64im
    1, // llvm.riscv.aes64ks1i
    5, // llvm.riscv.aes64ks2
    5, // llvm.riscv.brev8
    5, // llvm.riscv.clmul
    5, // llvm.riscv.clmulh
    5, // llvm.riscv.clmulr
    236, // llvm.riscv.masked.atomicrmw.add.i32
    236, // llvm.riscv.masked.atomicrmw.add.i64
    237, // llvm.riscv.masked.atomicrmw.max.i32
    237, // llvm.riscv.masked.atomicrmw.max.i64
    237, // llvm.riscv.masked.atomicrmw.min.i32
    237, // llvm.riscv.masked.atomicrmw.min.i64
    236, // llvm.riscv.masked.atomicrmw.nand.i32
    236, // llvm.riscv.masked.atomicrmw.nand.i64
    236, // llvm.riscv.masked.atomicrmw.sub.i32
    236, // llvm.riscv.masked.atomicrmw.sub.i64
    236, // llvm.riscv.masked.atomicrmw.umax.i32
    236, // llvm.riscv.masked.atomicrmw.umax.i64
    236, // llvm.riscv.masked.atomicrmw.umin.i32
    236, // llvm.riscv.masked.atomicrmw.umin.i64
    236, // llvm.riscv.masked.atomicrmw.xchg.i32
    236, // llvm.riscv.masked.atomicrmw.xchg.i64
    237, // llvm.riscv.masked.cmpxchg.i32
    237, // llvm.riscv.masked.cmpxchg.i64
    267, // llvm.riscv.masked.strided.load
    268, // llvm.riscv.masked.strided.store
    5, // llvm.riscv.orc.b
    41, // llvm.riscv.seg2.load
    269, // llvm.riscv.seg2.store
    41, // llvm.riscv.seg3.load
    270, // llvm.riscv.seg3.store
    41, // llvm.riscv.seg4.load
    271, // llvm.riscv.seg4.store
    41, // llvm.riscv.seg5.load
    272, // llvm.riscv.seg5.store
    41, // llvm.riscv.seg6.load
    273, // llvm.riscv.seg6.store
    41, // llvm.riscv.seg7.load
    274, // llvm.riscv.seg7.store
    41, // llvm.riscv.seg8.load
    275, // llvm.riscv.seg8.store
    276, // llvm.riscv.sf.vc.fv.se
    182, // llvm.riscv.sf.vc.fvv.se
    182, // llvm.riscv.sf.vc.fvw.se
    277, // llvm.riscv.sf.vc.i.se.e16m1
    277, // llvm.riscv.sf.vc.i.se.e16m2
    277, // llvm.riscv.sf.vc.i.se.e16m4
    277, // llvm.riscv.sf.vc.i.se.e16m8
    277, // llvm.riscv.sf.vc.i.se.e16mf2
    277, // llvm.riscv.sf.vc.i.se.e16mf4
    277, // llvm.riscv.sf.vc.i.se.e32m1
    277, // llvm.riscv.sf.vc.i.se.e32m2
    277, // llvm.riscv.sf.vc.i.se.e32m4
    277, // llvm.riscv.sf.vc.i.se.e32m8
    277, // llvm.riscv.sf.vc.i.se.e32mf2
    277, // llvm.riscv.sf.vc.i.se.e64m1
    277, // llvm.riscv.sf.vc.i.se.e64m2
    277, // llvm.riscv.sf.vc.i.se.e64m4
    277, // llvm.riscv.sf.vc.i.se.e64m8
    277, // llvm.riscv.sf.vc.i.se.e8m1
    277, // llvm.riscv.sf.vc.i.se.e8m2
    277, // llvm.riscv.sf.vc.i.se.e8m4
    277, // llvm.riscv.sf.vc.i.se.e8m8
    277, // llvm.riscv.sf.vc.i.se.e8mf2
    277, // llvm.riscv.sf.vc.i.se.e8mf4
    277, // llvm.riscv.sf.vc.i.se.e8mf8
    278, // llvm.riscv.sf.vc.iv.se
    279, // llvm.riscv.sf.vc.ivv.se
    279, // llvm.riscv.sf.vc.ivw.se
    20, // llvm.riscv.sf.vc.v.fv
    182, // llvm.riscv.sf.vc.v.fv.se
    20, // llvm.riscv.sf.vc.v.fvv
    182, // llvm.riscv.sf.vc.v.fvv.se
    20, // llvm.riscv.sf.vc.v.fvw
    182, // llvm.riscv.sf.vc.v.fvw.se
    280, // llvm.riscv.sf.vc.v.i
    281, // llvm.riscv.sf.vc.v.i.se
    282, // llvm.riscv.sf.vc.v.iv
    283, // llvm.riscv.sf.vc.v.iv.se
    284, // llvm.riscv.sf.vc.v.ivv
    279, // llvm.riscv.sf.vc.v.ivv.se
    284, // llvm.riscv.sf.vc.v.ivw
    279, // llvm.riscv.sf.vc.v.ivw.se
    20, // llvm.riscv.sf.vc.v.vv
    182, // llvm.riscv.sf.vc.v.vv.se
    20, // llvm.riscv.sf.vc.v.vvv
    182, // llvm.riscv.sf.vc.v.vvv.se
    20, // llvm.riscv.sf.vc.v.vvw
    182, // llvm.riscv.sf.vc.v.vvw.se
    285, // llvm.riscv.sf.vc.v.x
    276, // llvm.riscv.sf.vc.v.x.se
    20, // llvm.riscv.sf.vc.v.xv
    182, // llvm.riscv.sf.vc.v.xv.se
    20, // llvm.riscv.sf.vc.v.xvv
    182, // llvm.riscv.sf.vc.v.xvv.se
    20, // llvm.riscv.sf.vc.v.xvw
    182, // llvm.riscv.sf.vc.v.xvw.se
    276, // llvm.riscv.sf.vc.vv.se
    182, // llvm.riscv.sf.vc.vvv.se
    182, // llvm.riscv.sf.vc.vvw.se
    281, // llvm.riscv.sf.vc.x.se.e16m1
    281, // llvm.riscv.sf.vc.x.se.e16m2
    281, // llvm.riscv.sf.vc.x.se.e16m4
    281, // llvm.riscv.sf.vc.x.se.e16m8
    281, // llvm.riscv.sf.vc.x.se.e16mf2
    281, // llvm.riscv.sf.vc.x.se.e16mf4
    281, // llvm.riscv.sf.vc.x.se.e32m1
    281, // llvm.riscv.sf.vc.x.se.e32m2
    281, // llvm.riscv.sf.vc.x.se.e32m4
    281, // llvm.riscv.sf.vc.x.se.e32m8
    281, // llvm.riscv.sf.vc.x.se.e32mf2
    281, // llvm.riscv.sf.vc.x.se.e64m1
    281, // llvm.riscv.sf.vc.x.se.e64m2
    281, // llvm.riscv.sf.vc.x.se.e64m4
    281, // llvm.riscv.sf.vc.x.se.e64m8
    281, // llvm.riscv.sf.vc.x.se.e8m1
    281, // llvm.riscv.sf.vc.x.se.e8m2
    281, // llvm.riscv.sf.vc.x.se.e8m4
    281, // llvm.riscv.sf.vc.x.se.e8m8
    281, // llvm.riscv.sf.vc.x.se.e8mf2
    281, // llvm.riscv.sf.vc.x.se.e8mf4
    281, // llvm.riscv.sf.vc.x.se.e8mf8
    276, // llvm.riscv.sf.vc.xv.se
    182, // llvm.riscv.sf.vc.xvv.se
    182, // llvm.riscv.sf.vc.xvw.se
    5, // llvm.riscv.sha256sig0
    5, // llvm.riscv.sha256sig1
    5, // llvm.riscv.sha256sum0
    5, // llvm.riscv.sha256sum1
    5, // llvm.riscv.sha512sig0
    5, // llvm.riscv.sha512sig0h
    5, // llvm.riscv.sha512sig0l
    5, // llvm.riscv.sha512sig1
    5, // llvm.riscv.sha512sig1h
    5, // llvm.riscv.sha512sig1l
    5, // llvm.riscv.sha512sum0
    5, // llvm.riscv.sha512sum0r
    5, // llvm.riscv.sha512sum1
    5, // llvm.riscv.sha512sum1r
    5, // llvm.riscv.sm3p0
    5, // llvm.riscv.sm3p1
    65, // llvm.riscv.sm4ed
    65, // llvm.riscv.sm4ks
    91, // llvm.riscv.th.vmaqa
    286, // llvm.riscv.th.vmaqa.mask
    91, // llvm.riscv.th.vmaqasu
    286, // llvm.riscv.th.vmaqasu.mask
    91, // llvm.riscv.th.vmaqau
    286, // llvm.riscv.th.vmaqau.mask
    91, // llvm.riscv.th.vmaqaus
    286, // llvm.riscv.th.vmaqaus.mask
    5, // llvm.riscv.unzip
    74, // llvm.riscv.vaadd
    287, // llvm.riscv.vaadd.mask
    74, // llvm.riscv.vaaddu
    287, // llvm.riscv.vaaddu.mask
    2, // llvm.riscv.vadc
    2, // llvm.riscv.vadd
    286, // llvm.riscv.vadd.mask
    2, // llvm.riscv.vand
    286, // llvm.riscv.vand.mask
    74, // llvm.riscv.vasub
    287, // llvm.riscv.vasub.mask
    74, // llvm.riscv.vasubu
    287, // llvm.riscv.vasubu.mask
    2, // llvm.riscv.vcompress
    2, // llvm.riscv.vcpop
    2, // llvm.riscv.vcpop.mask
    2, // llvm.riscv.vdiv
    286, // llvm.riscv.vdiv.mask
    2, // llvm.riscv.vdivu
    286, // llvm.riscv.vdivu.mask
    89, // llvm.riscv.vfadd
    288, // llvm.riscv.vfadd.mask
    2, // llvm.riscv.vfclass
    91, // llvm.riscv.vfclass.mask
    22, // llvm.riscv.vfcvt.f.x.v
    289, // llvm.riscv.vfcvt.f.x.v.mask
    22, // llvm.riscv.vfcvt.f.xu.v
    289, // llvm.riscv.vfcvt.f.xu.v.mask
    2, // llvm.riscv.vfcvt.rtz.x.f.v
    91, // llvm.riscv.vfcvt.rtz.x.f.v.mask
    2, // llvm.riscv.vfcvt.rtz.xu.f.v
    91, // llvm.riscv.vfcvt.rtz.xu.f.v.mask
    22, // llvm.riscv.vfcvt.x.f.v
    289, // llvm.riscv.vfcvt.x.f.v.mask
    22, // llvm.riscv.vfcvt.xu.f.v
    289, // llvm.riscv.vfcvt.xu.f.v.mask
    89, // llvm.riscv.vfdiv
    288, // llvm.riscv.vfdiv.mask
    2, // llvm.riscv.vfirst
    2, // llvm.riscv.vfirst.mask
    289, // llvm.riscv.vfmacc
    288, // llvm.riscv.vfmacc.mask
    289, // llvm.riscv.vfmadd
    288, // llvm.riscv.vfmadd.mask
    2, // llvm.riscv.vfmax
    286, // llvm.riscv.vfmax.mask
    2, // llvm.riscv.vfmerge
    2, // llvm.riscv.vfmin
    286, // llvm.riscv.vfmin.mask
    289, // llvm.riscv.vfmsac
    288, // llvm.riscv.vfmsac.mask
    289, // llvm.riscv.vfmsub
    288, // llvm.riscv.vfmsub.mask
    89, // llvm.riscv.vfmul
    288, // llvm.riscv.vfmul.mask
    2, // llvm.riscv.vfmv.f.s
    2, // llvm.riscv.vfmv.s.f
    2, // llvm.riscv.vfmv.v.f
    22, // llvm.riscv.vfncvt.f.f.w
    289, // llvm.riscv.vfncvt.f.f.w.mask
    22, // llvm.riscv.vfncvt.f.x.w
    289, // llvm.riscv.vfncvt.f.x.w.mask
    22, // llvm.riscv.vfncvt.f.xu.w
    289, // llvm.riscv.vfncvt.f.xu.w.mask
    2, // llvm.riscv.vfncvt.rod.f.f.w
    91, // llvm.riscv.vfncvt.rod.f.f.w.mask
    2, // llvm.riscv.vfncvt.rtz.x.f.w
    91, // llvm.riscv.vfncvt.rtz.x.f.w.mask
    2, // llvm.riscv.vfncvt.rtz.xu.f.w
    91, // llvm.riscv.vfncvt.rtz.xu.f.w.mask
    22, // llvm.riscv.vfncvt.x.f.w
    289, // llvm.riscv.vfncvt.x.f.w.mask
    22, // llvm.riscv.vfncvt.xu.f.w
    289, // llvm.riscv.vfncvt.xu.f.w.mask
    289, // llvm.riscv.vfnmacc
    288, // llvm.riscv.vfnmacc.mask
    289, // llvm.riscv.vfnmadd
    288, // llvm.riscv.vfnmadd.mask
    289, // llvm.riscv.vfnmsac
    288, // llvm.riscv.vfnmsac.mask
    289, // llvm.riscv.vfnmsub
    288, // llvm.riscv.vfnmsub.mask
    89, // llvm.riscv.vfrdiv
    288, // llvm.riscv.vfrdiv.mask
    22, // llvm.riscv.vfrec7
    289, // llvm.riscv.vfrec7.mask
    2, // llvm.riscv.vfredmax
    2, // llvm.riscv.vfredmax.mask
    2, // llvm.riscv.vfredmin
    2, // llvm.riscv.vfredmin.mask
    89, // llvm.riscv.vfredosum
    91, // llvm.riscv.vfredosum.mask
    89, // llvm.riscv.vfredusum
    91, // llvm.riscv.vfredusum.mask
    2, // llvm.riscv.vfrsqrt7
    91, // llvm.riscv.vfrsqrt7.mask
    89, // llvm.riscv.vfrsub
    288, // llvm.riscv.vfrsub.mask
    2, // llvm.riscv.vfsgnj
    286, // llvm.riscv.vfsgnj.mask
    2, // llvm.riscv.vfsgnjn
    286, // llvm.riscv.vfsgnjn.mask
    2, // llvm.riscv.vfsgnjx
    286, // llvm.riscv.vfsgnjx.mask
    2, // llvm.riscv.vfslide1down
    286, // llvm.riscv.vfslide1down.mask
    2, // llvm.riscv.vfslide1up
    286, // llvm.riscv.vfslide1up.mask
    22, // llvm.riscv.vfsqrt
    289, // llvm.riscv.vfsqrt.mask
    89, // llvm.riscv.vfsub
    288, // llvm.riscv.vfsub.mask
    89, // llvm.riscv.vfwadd
    288, // llvm.riscv.vfwadd.mask
    89, // llvm.riscv.vfwadd.w
    288, // llvm.riscv.vfwadd.w.mask
    2, // llvm.riscv.vfwcvt.f.f.v
    91, // llvm.riscv.vfwcvt.f.f.v.mask
    2, // llvm.riscv.vfwcvt.f.x.v
    91, // llvm.riscv.vfwcvt.f.x.v.mask
    2, // llvm.riscv.vfwcvt.f.xu.v
    91, // llvm.riscv.vfwcvt.f.xu.v.mask
    2, // llvm.riscv.vfwcvt.rtz.x.f.v
    91, // llvm.riscv.vfwcvt.rtz.x.f.v.mask
    2, // llvm.riscv.vfwcvt.rtz.xu.f.v
    91, // llvm.riscv.vfwcvt.rtz.xu.f.v.mask
    22, // llvm.riscv.vfwcvt.x.f.v
    289, // llvm.riscv.vfwcvt.x.f.v.mask
    22, // llvm.riscv.vfwcvt.xu.f.v
    289, // llvm.riscv.vfwcvt.xu.f.v.mask
    289, // llvm.riscv.vfwmacc
    288, // llvm.riscv.vfwmacc.mask
    289, // llvm.riscv.vfwmsac
    288, // llvm.riscv.vfwmsac.mask
    89, // llvm.riscv.vfwmul
    288, // llvm.riscv.vfwmul.mask
    289, // llvm.riscv.vfwnmacc
    288, // llvm.riscv.vfwnmacc.mask
    289, // llvm.riscv.vfwnmsac
    288, // llvm.riscv.vfwnmsac.mask
    89, // llvm.riscv.vfwredosum
    91, // llvm.riscv.vfwredosum.mask
    89, // llvm.riscv.vfwredusum
    91, // llvm.riscv.vfwredusum.mask
    89, // llvm.riscv.vfwsub
    288, // llvm.riscv.vfwsub.mask
    89, // llvm.riscv.vfwsub.w
    288, // llvm.riscv.vfwsub.w.mask
    2, // llvm.riscv.vid
    89, // llvm.riscv.vid.mask
    2, // llvm.riscv.viota
    91, // llvm.riscv.viota.mask
    267, // llvm.riscv.vle
    290, // llvm.riscv.vle.mask
    291, // llvm.riscv.vleff
    292, // llvm.riscv.vleff.mask
    41, // llvm.riscv.vlm
    267, // llvm.riscv.vloxei
    293, // llvm.riscv.vloxei.mask
    294, // llvm.riscv.vloxseg2
    295, // llvm.riscv.vloxseg2.mask
    296, // llvm.riscv.vloxseg3
    297, // llvm.riscv.vloxseg3.mask
    298, // llvm.riscv.vloxseg4
    299, // llvm.riscv.vloxseg4.mask
    300, // llvm.riscv.vloxseg5
    301, // llvm.riscv.vloxseg5.mask
    302, // llvm.riscv.vloxseg6
    303, // llvm.riscv.vloxseg6.mask
    304, // llvm.riscv.vloxseg7
    305, // llvm.riscv.vloxseg7.mask
    306, // llvm.riscv.vloxseg8
    307, // llvm.riscv.vloxseg8.mask
    267, // llvm.riscv.vlse
    293, // llvm.riscv.vlse.mask
    294, // llvm.riscv.vlseg2
    308, // llvm.riscv.vlseg2.mask
    309, // llvm.riscv.vlseg2ff
    310, // llvm.riscv.vlseg2ff.mask
    296, // llvm.riscv.vlseg3
    311, // llvm.riscv.vlseg3.mask
    312, // llvm.riscv.vlseg3ff
    313, // llvm.riscv.vlseg3ff.mask
    298, // llvm.riscv.vlseg4
    314, // llvm.riscv.vlseg4.mask
    315, // llvm.riscv.vlseg4ff
    316, // llvm.riscv.vlseg4ff.mask
    300, // llvm.riscv.vlseg5
    317, // llvm.riscv.vlseg5.mask
    318, // llvm.riscv.vlseg5ff
    319, // llvm.riscv.vlseg5ff.mask
    302, // llvm.riscv.vlseg6
    320, // llvm.riscv.vlseg6.mask
    321, // llvm.riscv.vlseg6ff
    322, // llvm.riscv.vlseg6ff.mask
    304, // llvm.riscv.vlseg7
    323, // llvm.riscv.vlseg7.mask
    324, // llvm.riscv.vlseg7ff
    325, // llvm.riscv.vlseg7ff.mask
    306, // llvm.riscv.vlseg8
    326, // llvm.riscv.vlseg8.mask
    327, // llvm.riscv.vlseg8ff
    328, // llvm.riscv.vlseg8ff.mask
    294, // llvm.riscv.vlsseg2
    295, // llvm.riscv.vlsseg2.mask
    296, // llvm.riscv.vlsseg3
    297, // llvm.riscv.vlsseg3.mask
    298, // llvm.riscv.vlsseg4
    299, // llvm.riscv.vlsseg4.mask
    300, // llvm.riscv.vlsseg5
    301, // llvm.riscv.vlsseg5.mask
    302, // llvm.riscv.vlsseg6
    303, // llvm.riscv.vlsseg6.mask
    304, // llvm.riscv.vlsseg7
    305, // llvm.riscv.vlsseg7.mask
    306, // llvm.riscv.vlsseg8
    307, // llvm.riscv.vlsseg8.mask
    267, // llvm.riscv.vluxei
    293, // llvm.riscv.vluxei.mask
    294, // llvm.riscv.vluxseg2
    295, // llvm.riscv.vluxseg2.mask
    296, // llvm.riscv.vluxseg3
    297, // llvm.riscv.vluxseg3.mask
    298, // llvm.riscv.vluxseg4
    299, // llvm.riscv.vluxseg4.mask
    300, // llvm.riscv.vluxseg5
    301, // llvm.riscv.vluxseg5.mask
    302, // llvm.riscv.vluxseg6
    303, // llvm.riscv.vluxseg6.mask
    304, // llvm.riscv.vluxseg7
    305, // llvm.riscv.vluxseg7.mask
    306, // llvm.riscv.vluxseg8
    307, // llvm.riscv.vluxseg8.mask
    91, // llvm.riscv.vmacc
    286, // llvm.riscv.vmacc.mask
    2, // llvm.riscv.vmadc
    2, // llvm.riscv.vmadc.carry.in
    91, // llvm.riscv.vmadd
    286, // llvm.riscv.vmadd.mask
    2, // llvm.riscv.vmand
    2, // llvm.riscv.vmandn
    2, // llvm.riscv.vmax
    286, // llvm.riscv.vmax.mask
    2, // llvm.riscv.vmaxu
    286, // llvm.riscv.vmaxu.mask
    2, // llvm.riscv.vmclr
    2, // llvm.riscv.vmerge
    2, // llvm.riscv.vmfeq
    2, // llvm.riscv.vmfeq.mask
    2, // llvm.riscv.vmfge
    2, // llvm.riscv.vmfge.mask
    2, // llvm.riscv.vmfgt
    2, // llvm.riscv.vmfgt.mask
    2, // llvm.riscv.vmfle
    2, // llvm.riscv.vmfle.mask
    2, // llvm.riscv.vmflt
    2, // llvm.riscv.vmflt.mask
    2, // llvm.riscv.vmfne
    2, // llvm.riscv.vmfne.mask
    2, // llvm.riscv.vmin
    286, // llvm.riscv.vmin.mask
    2, // llvm.riscv.vminu
    286, // llvm.riscv.vminu.mask
    2, // llvm.riscv.vmnand
    2, // llvm.riscv.vmnor
    2, // llvm.riscv.vmor
    2, // llvm.riscv.vmorn
    2, // llvm.riscv.vmsbc
    2, // llvm.riscv.vmsbc.borrow.in
    2, // llvm.riscv.vmsbf
    2, // llvm.riscv.vmsbf.mask
    2, // llvm.riscv.vmseq
    2, // llvm.riscv.vmseq.mask
    2, // llvm.riscv.vmset
    2, // llvm.riscv.vmsge
    2, // llvm.riscv.vmsge.mask
    2, // llvm.riscv.vmsgeu
    2, // llvm.riscv.vmsgeu.mask
    2, // llvm.riscv.vmsgt
    2, // llvm.riscv.vmsgt.mask
    2, // llvm.riscv.vmsgtu
    2, // llvm.riscv.vmsgtu.mask
    2, // llvm.riscv.vmsif
    2, // llvm.riscv.vmsif.mask
    2, // llvm.riscv.vmsle
    2, // llvm.riscv.vmsle.mask
    2, // llvm.riscv.vmsleu
    2, // llvm.riscv.vmsleu.mask
    2, // llvm.riscv.vmslt
    2, // llvm.riscv.vmslt.mask
    2, // llvm.riscv.vmsltu
    2, // llvm.riscv.vmsltu.mask
    2, // llvm.riscv.vmsne
    2, // llvm.riscv.vmsne.mask
    2, // llvm.riscv.vmsof
    2, // llvm.riscv.vmsof.mask
    2, // llvm.riscv.vmul
    286, // llvm.riscv.vmul.mask
    2, // llvm.riscv.vmulh
    286, // llvm.riscv.vmulh.mask
    2, // llvm.riscv.vmulhsu
    286, // llvm.riscv.vmulhsu.mask
    2, // llvm.riscv.vmulhu
    286, // llvm.riscv.vmulhu.mask
    2, // llvm.riscv.vmv.s.x
    2, // llvm.riscv.vmv.v.v
    2, // llvm.riscv.vmv.v.x
    2, // llvm.riscv.vmv.x.s
    2, // llvm.riscv.vmxnor
    2, // llvm.riscv.vmxor
    329, // llvm.riscv.vnclip
    330, // llvm.riscv.vnclip.mask
    329, // llvm.riscv.vnclipu
    330, // llvm.riscv.vnclipu.mask
    91, // llvm.riscv.vnmsac
    286, // llvm.riscv.vnmsac.mask
    91, // llvm.riscv.vnmsub
    286, // llvm.riscv.vnmsub.mask
    2, // llvm.riscv.vnsra
    286, // llvm.riscv.vnsra.mask
    2, // llvm.riscv.vnsrl
    286, // llvm.riscv.vnsrl.mask
    2, // llvm.riscv.vor
    286, // llvm.riscv.vor.mask
    2, // llvm.riscv.vredand
    2, // llvm.riscv.vredand.mask
    2, // llvm.riscv.vredmax
    2, // llvm.riscv.vredmax.mask
    2, // llvm.riscv.vredmaxu
    2, // llvm.riscv.vredmaxu.mask
    2, // llvm.riscv.vredmin
    2, // llvm.riscv.vredmin.mask
    2, // llvm.riscv.vredminu
    2, // llvm.riscv.vredminu.mask
    2, // llvm.riscv.vredor
    2, // llvm.riscv.vredor.mask
    2, // llvm.riscv.vredsum
    2, // llvm.riscv.vredsum.mask
    2, // llvm.riscv.vredxor
    2, // llvm.riscv.vredxor.mask
    2, // llvm.riscv.vrem
    286, // llvm.riscv.vrem.mask
    2, // llvm.riscv.vremu
    286, // llvm.riscv.vremu.mask
    2, // llvm.riscv.vrgather.vv
    286, // llvm.riscv.vrgather.vv.mask
    2, // llvm.riscv.vrgather.vx
    286, // llvm.riscv.vrgather.vx.mask
    2, // llvm.riscv.vrgatherei16.vv
    286, // llvm.riscv.vrgatherei16.vv.mask
    2, // llvm.riscv.vrsub
    286, // llvm.riscv.vrsub.mask
    76, // llvm.riscv.vsadd
    331, // llvm.riscv.vsadd.mask
    76, // llvm.riscv.vsaddu
    331, // llvm.riscv.vsaddu.mask
    2, // llvm.riscv.vsbc
    268, // llvm.riscv.vse
    268, // llvm.riscv.vse.mask
    332, // llvm.riscv.vsetvli
    285, // llvm.riscv.vsetvlimax
    2, // llvm.riscv.vsext
    91, // llvm.riscv.vsext.mask
    2, // llvm.riscv.vslide1down
    286, // llvm.riscv.vslide1down.mask
    2, // llvm.riscv.vslide1up
    286, // llvm.riscv.vslide1up.mask
    91, // llvm.riscv.vslidedown
    286, // llvm.riscv.vslidedown.mask
    91, // llvm.riscv.vslideup
    286, // llvm.riscv.vslideup.mask
    2, // llvm.riscv.vsll
    286, // llvm.riscv.vsll.mask
    268, // llvm.riscv.vsm
    74, // llvm.riscv.vsmul
    287, // llvm.riscv.vsmul.mask
    268, // llvm.riscv.vsoxei
    268, // llvm.riscv.vsoxei.mask
    269, // llvm.riscv.vsoxseg2
    269, // llvm.riscv.vsoxseg2.mask
    270, // llvm.riscv.vsoxseg3
    270, // llvm.riscv.vsoxseg3.mask
    271, // llvm.riscv.vsoxseg4
    271, // llvm.riscv.vsoxseg4.mask
    272, // llvm.riscv.vsoxseg5
    272, // llvm.riscv.vsoxseg5.mask
    273, // llvm.riscv.vsoxseg6
    273, // llvm.riscv.vsoxseg6.mask
    274, // llvm.riscv.vsoxseg7
    274, // llvm.riscv.vsoxseg7.mask
    275, // llvm.riscv.vsoxseg8
    275, // llvm.riscv.vsoxseg8.mask
    2, // llvm.riscv.vsra
    286, // llvm.riscv.vsra.mask
    2, // llvm.riscv.vsrl
    286, // llvm.riscv.vsrl.mask
    268, // llvm.riscv.vsse
    268, // llvm.riscv.vsse.mask
    269, // llvm.riscv.vsseg2
    269, // llvm.riscv.vsseg2.mask
    270, // llvm.riscv.vsseg3
    270, // llvm.riscv.vsseg3.mask
    271, // llvm.riscv.vsseg4
    271, // llvm.riscv.vsseg4.mask
    272, // llvm.riscv.vsseg5
    272, // llvm.riscv.vsseg5.mask
    273, // llvm.riscv.vsseg6
    273, // llvm.riscv.vsseg6.mask
    274, // llvm.riscv.vsseg7
    274, // llvm.riscv.vsseg7.mask
    275, // llvm.riscv.vsseg8
    275, // llvm.riscv.vsseg8.mask
    329, // llvm.riscv.vssra
    330, // llvm.riscv.vssra.mask
    329, // llvm.riscv.vssrl
    330, // llvm.riscv.vssrl.mask
    269, // llvm.riscv.vssseg2
    269, // llvm.riscv.vssseg2.mask
    270, // llvm.riscv.vssseg3
    270, // llvm.riscv.vssseg3.mask
    271, // llvm.riscv.vssseg4
    271, // llvm.riscv.vssseg4.mask
    272, // llvm.riscv.vssseg5
    272, // llvm.riscv.vssseg5.mask
    273, // llvm.riscv.vssseg6
    273, // llvm.riscv.vssseg6.mask
    274, // llvm.riscv.vssseg7
    274, // llvm.riscv.vssseg7.mask
    275, // llvm.riscv.vssseg8
    275, // llvm.riscv.vssseg8.mask
    76, // llvm.riscv.vssub
    331, // llvm.riscv.vssub.mask
    76, // llvm.riscv.vssubu
    331, // llvm.riscv.vssubu.mask
    2, // llvm.riscv.vsub
    286, // llvm.riscv.vsub.mask
    268, // llvm.riscv.vsuxei
    268, // llvm.riscv.vsuxei.mask
    269, // llvm.riscv.vsuxseg2
    269, // llvm.riscv.vsuxseg2.mask
    270, // llvm.riscv.vsuxseg3
    270, // llvm.riscv.vsuxseg3.mask
    271, // llvm.riscv.vsuxseg4
    271, // llvm.riscv.vsuxseg4.mask
    272, // llvm.riscv.vsuxseg5
    272, // llvm.riscv.vsuxseg5.mask
    273, // llvm.riscv.vsuxseg6
    273, // llvm.riscv.vsuxseg6.mask
    274, // llvm.riscv.vsuxseg7
    274, // llvm.riscv.vsuxseg7.mask
    275, // llvm.riscv.vsuxseg8
    275, // llvm.riscv.vsuxseg8.mask
    2, // llvm.riscv.vwadd
    286, // llvm.riscv.vwadd.mask
    2, // llvm.riscv.vwadd.w
    286, // llvm.riscv.vwadd.w.mask
    2, // llvm.riscv.vwaddu
    286, // llvm.riscv.vwaddu.mask
    2, // llvm.riscv.vwaddu.w
    286, // llvm.riscv.vwaddu.w.mask
    91, // llvm.riscv.vwmacc
    286, // llvm.riscv.vwmacc.mask
    91, // llvm.riscv.vwmaccsu
    286, // llvm.riscv.vwmaccsu.mask
    91, // llvm.riscv.vwmaccu
    286, // llvm.riscv.vwmaccu.mask
    91, // llvm.riscv.vwmaccus
    286, // llvm.riscv.vwmaccus.mask
    2, // llvm.riscv.vwmul
    286, // llvm.riscv.vwmul.mask
    2, // llvm.riscv.vwmulsu
    286, // llvm.riscv.vwmulsu.mask
    2, // llvm.riscv.vwmulu
    286, // llvm.riscv.vwmulu.mask
    2, // llvm.riscv.vwredsum
    2, // llvm.riscv.vwredsum.mask
    2, // llvm.riscv.vwredsumu
    2, // llvm.riscv.vwredsumu.mask
    2, // llvm.riscv.vwsub
    286, // llvm.riscv.vwsub.mask
    2, // llvm.riscv.vwsub.w
    286, // llvm.riscv.vwsub.w.mask
    2, // llvm.riscv.vwsubu
    286, // llvm.riscv.vwsubu.mask
    2, // llvm.riscv.vwsubu.w
    286, // llvm.riscv.vwsubu.w.mask
    2, // llvm.riscv.vxor
    286, // llvm.riscv.vxor.mask
    2, // llvm.riscv.vzext
    91, // llvm.riscv.vzext.mask
    5, // llvm.riscv.xperm4
    5, // llvm.riscv.xperm8
    5, // llvm.riscv.zip
    10, // llvm.s390.efpc
    12, // llvm.s390.etnd
    60, // llvm.s390.lcbb
    264, // llvm.s390.ntstg
    10, // llvm.s390.ppa.txassist
    10, // llvm.s390.sfpc
    333, // llvm.s390.tabort
    334, // llvm.s390.tbegin
    334, // llvm.s390.tbegin.nofloat
    334, // llvm.s390.tbeginc
    12, // llvm.s390.tdc
    10, // llvm.s390.tend
    12, // llvm.s390.vaccb
    12, // llvm.s390.vacccq
    12, // llvm.s390.vaccf
    12, // llvm.s390.vaccg
    12, // llvm.s390.vacch
    12, // llvm.s390.vaccq
    12, // llvm.s390.vacq
    12, // llvm.s390.vaq
    12, // llvm.s390.vavgb
    12, // llvm.s390.vavgf
    12, // llvm.s390.vavgg
    12, // llvm.s390.vavgh
    12, // llvm.s390.vavglb
    12, // llvm.s390.vavglf
    12, // llvm.s390.vavglg
    12, // llvm.s390.vavglh
    12, // llvm.s390.vbperm
    12, // llvm.s390.vceqbs
    12, // llvm.s390.vceqfs
    12, // llvm.s390.vceqgs
    12, // llvm.s390.vceqhs
    60, // llvm.s390.vcfn
    12, // llvm.s390.vchbs
    12, // llvm.s390.vchfs
    12, // llvm.s390.vchgs
    12, // llvm.s390.vchhs
    12, // llvm.s390.vchlbs
    12, // llvm.s390.vchlfs
    12, // llvm.s390.vchlgs
    12, // llvm.s390.vchlhs
    12, // llvm.s390.vcksm
    60, // llvm.s390.vclfnhs
    60, // llvm.s390.vclfnls
    60, // llvm.s390.vcnf
    238, // llvm.s390.vcrnfs
    74, // llvm.s390.verimb
    74, // llvm.s390.verimf
    74, // llvm.s390.verimg
    74, // llvm.s390.verimh
    12, // llvm.s390.verllb
    12, // llvm.s390.verllf
    12, // llvm.s390.verllg
    12, // llvm.s390.verllh
    12, // llvm.s390.verllvb
    12, // llvm.s390.verllvf
    12, // llvm.s390.verllvg
    12, // llvm.s390.verllvh
    238, // llvm.s390.vfaeb
    238, // llvm.s390.vfaebs
    238, // llvm.s390.vfaef
    238, // llvm.s390.vfaefs
    238, // llvm.s390.vfaeh
    238, // llvm.s390.vfaehs
    238, // llvm.s390.vfaezb
    238, // llvm.s390.vfaezbs
    238, // llvm.s390.vfaezf
    238, // llvm.s390.vfaezfs
    238, // llvm.s390.vfaezh
    238, // llvm.s390.vfaezhs
    12, // llvm.s390.vfcedbs
    12, // llvm.s390.vfcesbs
    12, // llvm.s390.vfchdbs
    12, // llvm.s390.vfchedbs
    12, // llvm.s390.vfchesbs
    12, // llvm.s390.vfchsbs
    12, // llvm.s390.vfeeb
    12, // llvm.s390.vfeebs
    12, // llvm.s390.vfeef
    12, // llvm.s390.vfeefs
    12, // llvm.s390.vfeeh
    12, // llvm.s390.vfeehs
    12, // llvm.s390.vfeezb
    12, // llvm.s390.vfeezbs
    12, // llvm.s390.vfeezf
    12, // llvm.s390.vfeezfs
    12, // llvm.s390.vfeezh
    12, // llvm.s390.vfeezhs
    12, // llvm.s390.vfeneb
    12, // llvm.s390.vfenebs
    12, // llvm.s390.vfenef
    12, // llvm.s390.vfenefs
    12, // llvm.s390.vfeneh
    12, // llvm.s390.vfenehs
    12, // llvm.s390.vfenezb
    12, // llvm.s390.vfenezbs
    12, // llvm.s390.vfenezf
    12, // llvm.s390.vfenezfs
    12, // llvm.s390.vfenezh
    12, // llvm.s390.vfenezhs
    332, // llvm.s390.vfidb
    332, // llvm.s390.vfisb
    238, // llvm.s390.vfmaxdb
    238, // llvm.s390.vfmaxsb
    238, // llvm.s390.vfmindb
    238, // llvm.s390.vfminsb
    60, // llvm.s390.vftcidb
    60, // llvm.s390.vftcisb
    12, // llvm.s390.vgfmab
    12, // llvm.s390.vgfmaf
    12, // llvm.s390.vgfmag
    12, // llvm.s390.vgfmah
    12, // llvm.s390.vgfmb
    12, // llvm.s390.vgfmf
    12, // llvm.s390.vgfmg
    12, // llvm.s390.vgfmh
    12, // llvm.s390.vistrb
    12, // llvm.s390.vistrbs
    12, // llvm.s390.vistrf
    12, // llvm.s390.vistrfs
    12, // llvm.s390.vistrh
    12, // llvm.s390.vistrhs
    335, // llvm.s390.vlbb
    33, // llvm.s390.vll
    33, // llvm.s390.vlrl
    12, // llvm.s390.vmaeb
    12, // llvm.s390.vmaef
    12, // llvm.s390.vmaeh
    12, // llvm.s390.vmahb
    12, // llvm.s390.vmahf
    12, // llvm.s390.vmahh
    12, // llvm.s390.vmaleb
    12, // llvm.s390.vmalef
    12, // llvm.s390.vmaleh
    12, // llvm.s390.vmalhb
    12, // llvm.s390.vmalhf
    12, // llvm.s390.vmalhh
    12, // llvm.s390.vmalob
    12, // llvm.s390.vmalof
    12, // llvm.s390.vmaloh
    12, // llvm.s390.vmaob
    12, // llvm.s390.vmaof
    12, // llvm.s390.vmaoh
    12, // llvm.s390.vmeb
    12, // llvm.s390.vmef
    12, // llvm.s390.vmeh
    12, // llvm.s390.vmhb
    12, // llvm.s390.vmhf
    12, // llvm.s390.vmhh
    12, // llvm.s390.vmleb
    12, // llvm.s390.vmlef
    12, // llvm.s390.vmleh
    12, // llvm.s390.vmlhb
    12, // llvm.s390.vmlhf
    12, // llvm.s390.vmlhh
    12, // llvm.s390.vmlob
    12, // llvm.s390.vmlof
    12, // llvm.s390.vmloh
    12, // llvm.s390.vmob
    12, // llvm.s390.vmof
    12, // llvm.s390.vmoh
    74, // llvm.s390.vmslg
    238, // llvm.s390.vpdi
    12, // llvm.s390.vperm
    12, // llvm.s390.vpklsf
    12, // llvm.s390.vpklsfs
    12, // llvm.s390.vpklsg
    12, // llvm.s390.vpklsgs
    12, // llvm.s390.vpklsh
    12, // llvm.s390.vpklshs
    12, // llvm.s390.vpksf
    12, // llvm.s390.vpksfs
    12, // llvm.s390.vpksg
    12, // llvm.s390.vpksgs
    12, // llvm.s390.vpksh
    12, // llvm.s390.vpkshs
    12, // llvm.s390.vsbcbiq
    12, // llvm.s390.vsbiq
    12, // llvm.s390.vscbib
    12, // llvm.s390.vscbif
    12, // llvm.s390.vscbig
    12, // llvm.s390.vscbih
    12, // llvm.s390.vscbiq
    12, // llvm.s390.vsl
    12, // llvm.s390.vslb
    238, // llvm.s390.vsld
    238, // llvm.s390.vsldb
    12, // llvm.s390.vsq
    12, // llvm.s390.vsra
    12, // llvm.s390.vsrab
    238, // llvm.s390.vsrd
    12, // llvm.s390.vsrl
    12, // llvm.s390.vsrlb
    264, // llvm.s390.vstl
    74, // llvm.s390.vstrcb
    74, // llvm.s390.vstrcbs
    74, // llvm.s390.vstrcf
    74, // llvm.s390.vstrcfs
    74, // llvm.s390.vstrch
    74, // llvm.s390.vstrchs
    74, // llvm.s390.vstrczb
    74, // llvm.s390.vstrczbs
    74, // llvm.s390.vstrczf
    74, // llvm.s390.vstrczfs
    74, // llvm.s390.vstrczh
    74, // llvm.s390.vstrczhs
    264, // llvm.s390.vstrl
    12, // llvm.s390.vstrsb
    12, // llvm.s390.vstrsf
    12, // llvm.s390.vstrsh
    12, // llvm.s390.vstrszb
    12, // llvm.s390.vstrszf
    12, // llvm.s390.vstrszh
    12, // llvm.s390.vsumb
    12, // llvm.s390.vsumgf
    12, // llvm.s390.vsumgh
    12, // llvm.s390.vsumh
    12, // llvm.s390.vsumqf
    12, // llvm.s390.vsumqg
    12, // llvm.s390.vtm
    12, // llvm.s390.vuphb
    12, // llvm.s390.vuphf
    12, // llvm.s390.vuphh
    12, // llvm.s390.vuplb
    12, // llvm.s390.vuplf
    12, // llvm.s390.vuplhb
    12, // llvm.s390.vuplhf
    12, // llvm.s390.vuplhh
    12, // llvm.s390.vuplhw
    12, // llvm.s390.vupllb
    12, // llvm.s390.vupllf
    12, // llvm.s390.vupllh
    10, // llvm.spv.alloca
    10, // llvm.spv.assign.name
    10, // llvm.spv.assign.type
    10, // llvm.spv.bitcast
    10, // llvm.spv.cmpxchg
    10, // llvm.spv.const.composite
    10, // llvm.spv.extractelt
    10, // llvm.spv.extractv
    220, // llvm.spv.gep
    10, // llvm.spv.init.global
    10, // llvm.spv.insertelt
    10, // llvm.spv.insertv
    336, // llvm.spv.load
    337, // llvm.spv.store
    10, // llvm.spv.switch
    10, // llvm.spv.track.constant
    10, // llvm.spv.undef
    10, // llvm.spv.unreachable
    10, // llvm.spv.unref.global
    12, // llvm.ve.vl.andm.MMM
    12, // llvm.ve.vl.andm.mmm
    12, // llvm.ve.vl.eqvm.MMM
    12, // llvm.ve.vl.eqvm.mmm
    12, // llvm.ve.vl.extract.vm512l
    12, // llvm.ve.vl.extract.vm512u
    255, // llvm.ve.vl.fencec.s
    255, // llvm.ve.vl.fencei
    255, // llvm.ve.vl.fencem.s
    255, // llvm.ve.vl.fidcr.sss
    12, // llvm.ve.vl.insert.vm512l
    12, // llvm.ve.vl.insert.vm512u
    12, // llvm.ve.vl.lcr.sss
    12, // llvm.ve.vl.lsv.vvss
    12, // llvm.ve.vl.lvm.MMss
    12, // llvm.ve.vl.lvm.mmss
    12, // llvm.ve.vl.lvsd.svs
    12, // llvm.ve.vl.lvsl.svs
    12, // llvm.ve.vl.lvss.svs
    12, // llvm.ve.vl.lzvm.sml
    12, // llvm.ve.vl.negm.MM
    12, // llvm.ve.vl.negm.mm
    12, // llvm.ve.vl.nndm.MMM
    12, // llvm.ve.vl.nndm.mmm
    12, // llvm.ve.vl.orm.MMM
    12, // llvm.ve.vl.orm.mmm
    221, // llvm.ve.vl.pack.f32a
    221, // llvm.ve.vl.pack.f32p
    12, // llvm.ve.vl.pcvm.sml
    338, // llvm.ve.vl.pfchv.ssl
    338, // llvm.ve.vl.pfchvnc.ssl
    12, // llvm.ve.vl.pvadds.vsvMvl
    12, // llvm.ve.vl.pvadds.vsvl
    12, // llvm.ve.vl.pvadds.vsvvl
    12, // llvm.ve.vl.pvadds.vvvMvl
    12, // llvm.ve.vl.pvadds.vvvl
    12, // llvm.ve.vl.pvadds.vvvvl
    12, // llvm.ve.vl.pvaddu.vsvMvl
    12, // llvm.ve.vl.pvaddu.vsvl
    12, // llvm.ve.vl.pvaddu.vsvvl
    12, // llvm.ve.vl.pvaddu.vvvMvl
    12, // llvm.ve.vl.pvaddu.vvvl
    12, // llvm.ve.vl.pvaddu.vvvvl
    12, // llvm.ve.vl.pvand.vsvMvl
    12, // llvm.ve.vl.pvand.vsvl
    12, // llvm.ve.vl.pvand.vsvvl
    12, // llvm.ve.vl.pvand.vvvMvl
    12, // llvm.ve.vl.pvand.vvvl
    12, // llvm.ve.vl.pvand.vvvvl
    12, // llvm.ve.vl.pvbrd.vsMvl
    12, // llvm.ve.vl.pvbrd.vsl
    12, // llvm.ve.vl.pvbrd.vsvl
    12, // llvm.ve.vl.pvbrv.vvMvl
    12, // llvm.ve.vl.pvbrv.vvl
    12, // llvm.ve.vl.pvbrv.vvvl
    12, // llvm.ve.vl.pvbrvlo.vvl
    12, // llvm.ve.vl.pvbrvlo.vvmvl
    12, // llvm.ve.vl.pvbrvlo.vvvl
    12, // llvm.ve.vl.pvbrvup.vvl
    12, // llvm.ve.vl.pvbrvup.vvmvl
    12, // llvm.ve.vl.pvbrvup.vvvl
    12, // llvm.ve.vl.pvcmps.vsvMvl
    12, // llvm.ve.vl.pvcmps.vsvl
    12, // llvm.ve.vl.pvcmps.vsvvl
    12, // llvm.ve.vl.pvcmps.vvvMvl
    12, // llvm.ve.vl.pvcmps.vvvl
    12, // llvm.ve.vl.pvcmps.vvvvl
    12, // llvm.ve.vl.pvcmpu.vsvMvl
    12, // llvm.ve.vl.pvcmpu.vsvl
    12, // llvm.ve.vl.pvcmpu.vsvvl
    12, // llvm.ve.vl.pvcmpu.vvvMvl
    12, // llvm.ve.vl.pvcmpu.vvvl
    12, // llvm.ve.vl.pvcmpu.vvvvl
    12, // llvm.ve.vl.pvcvtsw.vvl
    12, // llvm.ve.vl.pvcvtsw.vvvl
    12, // llvm.ve.vl.pvcvtws.vvMvl
    12, // llvm.ve.vl.pvcvtws.vvl
    12, // llvm.ve.vl.pvcvtws.vvvl
    12, // llvm.ve.vl.pvcvtwsrz.vvMvl
    12, // llvm.ve.vl.pvcvtwsrz.vvl
    12, // llvm.ve.vl.pvcvtwsrz.vvvl
    12, // llvm.ve.vl.pveqv.vsvMvl
    12, // llvm.ve.vl.pveqv.vsvl
    12, // llvm.ve.vl.pveqv.vsvvl
    12, // llvm.ve.vl.pveqv.vvvMvl
    12, // llvm.ve.vl.pveqv.vvvl
    12, // llvm.ve.vl.pveqv.vvvvl
    12, // llvm.ve.vl.pvfadd.vsvMvl
    12, // llvm.ve.vl.pvfadd.vsvl
    12, // llvm.ve.vl.pvfadd.vsvvl
    12, // llvm.ve.vl.pvfadd.vvvMvl
    12, // llvm.ve.vl.pvfadd.vvvl
    12, // llvm.ve.vl.pvfadd.vvvvl
    12, // llvm.ve.vl.pvfcmp.vsvMvl
    12, // llvm.ve.vl.pvfcmp.vsvl
    12, // llvm.ve.vl.pvfcmp.vsvvl
    12, // llvm.ve.vl.pvfcmp.vvvMvl
    12, // llvm.ve.vl.pvfcmp.vvvl
    12, // llvm.ve.vl.pvfcmp.vvvvl
    12, // llvm.ve.vl.pvfmad.vsvvMvl
    12, // llvm.ve.vl.pvfmad.vsvvl
    12, // llvm.ve.vl.pvfmad.vsvvvl
    12, // llvm.ve.vl.pvfmad.vvsvMvl
    12, // llvm.ve.vl.pvfmad.vvsvl
    12, // llvm.ve.vl.pvfmad.vvsvvl
    12, // llvm.ve.vl.pvfmad.vvvvMvl
    12, // llvm.ve.vl.pvfmad.vvvvl
    12, // llvm.ve.vl.pvfmad.vvvvvl
    12, // llvm.ve.vl.pvfmax.vsvMvl
    12, // llvm.ve.vl.pvfmax.vsvl
    12, // llvm.ve.vl.pvfmax.vsvvl
    12, // llvm.ve.vl.pvfmax.vvvMvl
    12, // llvm.ve.vl.pvfmax.vvvl
    12, // llvm.ve.vl.pvfmax.vvvvl
    12, // llvm.ve.vl.pvfmin.vsvMvl
    12, // llvm.ve.vl.pvfmin.vsvl
    12, // llvm.ve.vl.pvfmin.vsvvl
    12, // llvm.ve.vl.pvfmin.vvvMvl
    12, // llvm.ve.vl.pvfmin.vvvl
    12, // llvm.ve.vl.pvfmin.vvvvl
    12, // llvm.ve.vl.pvfmkaf.Ml
    12, // llvm.ve.vl.pvfmkat.Ml
    12, // llvm.ve.vl.pvfmkseq.MvMl
    12, // llvm.ve.vl.pvfmkseq.Mvl
    12, // llvm.ve.vl.pvfmkseqnan.MvMl
    12, // llvm.ve.vl.pvfmkseqnan.Mvl
    12, // llvm.ve.vl.pvfmksge.MvMl
    12, // llvm.ve.vl.pvfmksge.Mvl
    12, // llvm.ve.vl.pvfmksgenan.MvMl
    12, // llvm.ve.vl.pvfmksgenan.Mvl
    12, // llvm.ve.vl.pvfmksgt.MvMl
    12, // llvm.ve.vl.pvfmksgt.Mvl
    12, // llvm.ve.vl.pvfmksgtnan.MvMl
    12, // llvm.ve.vl.pvfmksgtnan.Mvl
    12, // llvm.ve.vl.pvfmksle.MvMl
    12, // llvm.ve.vl.pvfmksle.Mvl
    12, // llvm.ve.vl.pvfmkslenan.MvMl
    12, // llvm.ve.vl.pvfmkslenan.Mvl
    12, // llvm.ve.vl.pvfmksloeq.mvl
    12, // llvm.ve.vl.pvfmksloeq.mvml
    12, // llvm.ve.vl.pvfmksloeqnan.mvl
    12, // llvm.ve.vl.pvfmksloeqnan.mvml
    12, // llvm.ve.vl.pvfmksloge.mvl
    12, // llvm.ve.vl.pvfmksloge.mvml
    12, // llvm.ve.vl.pvfmkslogenan.mvl
    12, // llvm.ve.vl.pvfmkslogenan.mvml
    12, // llvm.ve.vl.pvfmkslogt.mvl
    12, // llvm.ve.vl.pvfmkslogt.mvml
    12, // llvm.ve.vl.pvfmkslogtnan.mvl
    12, // llvm.ve.vl.pvfmkslogtnan.mvml
    12, // llvm.ve.vl.pvfmkslole.mvl
    12, // llvm.ve.vl.pvfmkslole.mvml
    12, // llvm.ve.vl.pvfmkslolenan.mvl
    12, // llvm.ve.vl.pvfmkslolenan.mvml
    12, // llvm.ve.vl.pvfmkslolt.mvl
    12, // llvm.ve.vl.pvfmkslolt.mvml
    12, // llvm.ve.vl.pvfmksloltnan.mvl
    12, // llvm.ve.vl.pvfmksloltnan.mvml
    12, // llvm.ve.vl.pvfmkslonan.mvl
    12, // llvm.ve.vl.pvfmkslonan.mvml
    12, // llvm.ve.vl.pvfmkslone.mvl
    12, // llvm.ve.vl.pvfmkslone.mvml
    12, // llvm.ve.vl.pvfmkslonenan.mvl
    12, // llvm.ve.vl.pvfmkslonenan.mvml
    12, // llvm.ve.vl.pvfmkslonum.mvl
    12, // llvm.ve.vl.pvfmkslonum.mvml
    12, // llvm.ve.vl.pvfmkslt.MvMl
    12, // llvm.ve.vl.pvfmkslt.Mvl
    12, // llvm.ve.vl.pvfmksltnan.MvMl
    12, // llvm.ve.vl.pvfmksltnan.Mvl
    12, // llvm.ve.vl.pvfmksnan.MvMl
    12, // llvm.ve.vl.pvfmksnan.Mvl
    12, // llvm.ve.vl.pvfmksne.MvMl
    12, // llvm.ve.vl.pvfmksne.Mvl
    12, // llvm.ve.vl.pvfmksnenan.MvMl
    12, // llvm.ve.vl.pvfmksnenan.Mvl
    12, // llvm.ve.vl.pvfmksnum.MvMl
    12, // llvm.ve.vl.pvfmksnum.Mvl
    12, // llvm.ve.vl.pvfmksupeq.mvl
    12, // llvm.ve.vl.pvfmksupeq.mvml
    12, // llvm.ve.vl.pvfmksupeqnan.mvl
    12, // llvm.ve.vl.pvfmksupeqnan.mvml
    12, // llvm.ve.vl.pvfmksupge.mvl
    12, // llvm.ve.vl.pvfmksupge.mvml
    12, // llvm.ve.vl.pvfmksupgenan.mvl
    12, // llvm.ve.vl.pvfmksupgenan.mvml
    12, // llvm.ve.vl.pvfmksupgt.mvl
    12, // llvm.ve.vl.pvfmksupgt.mvml
    12, // llvm.ve.vl.pvfmksupgtnan.mvl
    12, // llvm.ve.vl.pvfmksupgtnan.mvml
    12, // llvm.ve.vl.pvfmksuple.mvl
    12, // llvm.ve.vl.pvfmksuple.mvml
    12, // llvm.ve.vl.pvfmksuplenan.mvl
    12, // llvm.ve.vl.pvfmksuplenan.mvml
    12, // llvm.ve.vl.pvfmksuplt.mvl
    12, // llvm.ve.vl.pvfmksuplt.mvml
    12, // llvm.ve.vl.pvfmksupltnan.mvl
    12, // llvm.ve.vl.pvfmksupltnan.mvml
    12, // llvm.ve.vl.pvfmksupnan.mvl
    12, // llvm.ve.vl.pvfmksupnan.mvml
    12, // llvm.ve.vl.pvfmksupne.mvl
    12, // llvm.ve.vl.pvfmksupne.mvml
    12, // llvm.ve.vl.pvfmksupnenan.mvl
    12, // llvm.ve.vl.pvfmksupnenan.mvml
    12, // llvm.ve.vl.pvfmksupnum.mvl
    12, // llvm.ve.vl.pvfmksupnum.mvml
    12, // llvm.ve.vl.pvfmkweq.MvMl
    12, // llvm.ve.vl.pvfmkweq.Mvl
    12, // llvm.ve.vl.pvfmkweqnan.MvMl
    12, // llvm.ve.vl.pvfmkweqnan.Mvl
    12, // llvm.ve.vl.pvfmkwge.MvMl
    12, // llvm.ve.vl.pvfmkwge.Mvl
    12, // llvm.ve.vl.pvfmkwgenan.MvMl
    12, // llvm.ve.vl.pvfmkwgenan.Mvl
    12, // llvm.ve.vl.pvfmkwgt.MvMl
    12, // llvm.ve.vl.pvfmkwgt.Mvl
    12, // llvm.ve.vl.pvfmkwgtnan.MvMl
    12, // llvm.ve.vl.pvfmkwgtnan.Mvl
    12, // llvm.ve.vl.pvfmkwle.MvMl
    12, // llvm.ve.vl.pvfmkwle.Mvl
    12, // llvm.ve.vl.pvfmkwlenan.MvMl
    12, // llvm.ve.vl.pvfmkwlenan.Mvl
    12, // llvm.ve.vl.pvfmkwloeq.mvl
    12, // llvm.ve.vl.pvfmkwloeq.mvml
    12, // llvm.ve.vl.pvfmkwloeqnan.mvl
    12, // llvm.ve.vl.pvfmkwloeqnan.mvml
    12, // llvm.ve.vl.pvfmkwloge.mvl
    12, // llvm.ve.vl.pvfmkwloge.mvml
    12, // llvm.ve.vl.pvfmkwlogenan.mvl
    12, // llvm.ve.vl.pvfmkwlogenan.mvml
    12, // llvm.ve.vl.pvfmkwlogt.mvl
    12, // llvm.ve.vl.pvfmkwlogt.mvml
    12, // llvm.ve.vl.pvfmkwlogtnan.mvl
    12, // llvm.ve.vl.pvfmkwlogtnan.mvml
    12, // llvm.ve.vl.pvfmkwlole.mvl
    12, // llvm.ve.vl.pvfmkwlole.mvml
    12, // llvm.ve.vl.pvfmkwlolenan.mvl
    12, // llvm.ve.vl.pvfmkwlolenan.mvml
    12, // llvm.ve.vl.pvfmkwlolt.mvl
    12, // llvm.ve.vl.pvfmkwlolt.mvml
    12, // llvm.ve.vl.pvfmkwloltnan.mvl
    12, // llvm.ve.vl.pvfmkwloltnan.mvml
    12, // llvm.ve.vl.pvfmkwlonan.mvl
    12, // llvm.ve.vl.pvfmkwlonan.mvml
    12, // llvm.ve.vl.pvfmkwlone.mvl
    12, // llvm.ve.vl.pvfmkwlone.mvml
    12, // llvm.ve.vl.pvfmkwlonenan.mvl
    12, // llvm.ve.vl.pvfmkwlonenan.mvml
    12, // llvm.ve.vl.pvfmkwlonum.mvl
    12, // llvm.ve.vl.pvfmkwlonum.mvml
    12, // llvm.ve.vl.pvfmkwlt.MvMl
    12, // llvm.ve.vl.pvfmkwlt.Mvl
    12, // llvm.ve.vl.pvfmkwltnan.MvMl
    12, // llvm.ve.vl.pvfmkwltnan.Mvl
    12, // llvm.ve.vl.pvfmkwnan.MvMl
    12, // llvm.ve.vl.pvfmkwnan.Mvl
    12, // llvm.ve.vl.pvfmkwne.MvMl
    12, // llvm.ve.vl.pvfmkwne.Mvl
    12, // llvm.ve.vl.pvfmkwnenan.MvMl
    12, // llvm.ve.vl.pvfmkwnenan.Mvl
    12, // llvm.ve.vl.pvfmkwnum.MvMl
    12, // llvm.ve.vl.pvfmkwnum.Mvl
    12, // llvm.ve.vl.pvfmkwupeq.mvl
    12, // llvm.ve.vl.pvfmkwupeq.mvml
    12, // llvm.ve.vl.pvfmkwupeqnan.mvl
    12, // llvm.ve.vl.pvfmkwupeqnan.mvml
    12, // llvm.ve.vl.pvfmkwupge.mvl
    12, // llvm.ve.vl.pvfmkwupge.mvml
    12, // llvm.ve.vl.pvfmkwupgenan.mvl
    12, // llvm.ve.vl.pvfmkwupgenan.mvml
    12, // llvm.ve.vl.pvfmkwupgt.mvl
    12, // llvm.ve.vl.pvfmkwupgt.mvml
    12, // llvm.ve.vl.pvfmkwupgtnan.mvl
    12, // llvm.ve.vl.pvfmkwupgtnan.mvml
    12, // llvm.ve.vl.pvfmkwuple.mvl
    12, // llvm.ve.vl.pvfmkwuple.mvml
    12, // llvm.ve.vl.pvfmkwuplenan.mvl
    12, // llvm.ve.vl.pvfmkwuplenan.mvml
    12, // llvm.ve.vl.pvfmkwuplt.mvl
    12, // llvm.ve.vl.pvfmkwuplt.mvml
    12, // llvm.ve.vl.pvfmkwupltnan.mvl
    12, // llvm.ve.vl.pvfmkwupltnan.mvml
    12, // llvm.ve.vl.pvfmkwupnan.mvl
    12, // llvm.ve.vl.pvfmkwupnan.mvml
    12, // llvm.ve.vl.pvfmkwupne.mvl
    12, // llvm.ve.vl.pvfmkwupne.mvml
    12, // llvm.ve.vl.pvfmkwupnenan.mvl
    12, // llvm.ve.vl.pvfmkwupnenan.mvml
    12, // llvm.ve.vl.pvfmkwupnum.mvl
    12, // llvm.ve.vl.pvfmkwupnum.mvml
    12, // llvm.ve.vl.pvfmsb.vsvvMvl
    12, // llvm.ve.vl.pvfmsb.vsvvl
    12, // llvm.ve.vl.pvfmsb.vsvvvl
    12, // llvm.ve.vl.pvfmsb.vvsvMvl
    12, // llvm.ve.vl.pvfmsb.vvsvl
    12, // llvm.ve.vl.pvfmsb.vvsvvl
    12, // llvm.ve.vl.pvfmsb.vvvvMvl
    12, // llvm.ve.vl.pvfmsb.vvvvl
    12, // llvm.ve.vl.pvfmsb.vvvvvl
    12, // llvm.ve.vl.pvfmul.vsvMvl
    12, // llvm.ve.vl.pvfmul.vsvl
    12, // llvm.ve.vl.pvfmul.vsvvl
    12, // llvm.ve.vl.pvfmul.vvvMvl
    12, // llvm.ve.vl.pvfmul.vvvl
    12, // llvm.ve.vl.pvfmul.vvvvl
    12, // llvm.ve.vl.pvfnmad.vsvvMvl
    12, // llvm.ve.vl.pvfnmad.vsvvl
    12, // llvm.ve.vl.pvfnmad.vsvvvl
    12, // llvm.ve.vl.pvfnmad.vvsvMvl
    12, // llvm.ve.vl.pvfnmad.vvsvl
    12, // llvm.ve.vl.pvfnmad.vvsvvl
    12, // llvm.ve.vl.pvfnmad.vvvvMvl
    12, // llvm.ve.vl.pvfnmad.vvvvl
    12, // llvm.ve.vl.pvfnmad.vvvvvl
    12, // llvm.ve.vl.pvfnmsb.vsvvMvl
    12, // llvm.ve.vl.pvfnmsb.vsvvl
    12, // llvm.ve.vl.pvfnmsb.vsvvvl
    12, // llvm.ve.vl.pvfnmsb.vvsvMvl
    12, // llvm.ve.vl.pvfnmsb.vvsvl
    12, // llvm.ve.vl.pvfnmsb.vvsvvl
    12, // llvm.ve.vl.pvfnmsb.vvvvMvl
    12, // llvm.ve.vl.pvfnmsb.vvvvl
    12, // llvm.ve.vl.pvfnmsb.vvvvvl
    12, // llvm.ve.vl.pvfsub.vsvMvl
    12, // llvm.ve.vl.pvfsub.vsvl
    12, // llvm.ve.vl.pvfsub.vsvvl
    12, // llvm.ve.vl.pvfsub.vvvMvl
    12, // llvm.ve.vl.pvfsub.vvvl
    12, // llvm.ve.vl.pvfsub.vvvvl
    12, // llvm.ve.vl.pvldz.vvMvl
    12, // llvm.ve.vl.pvldz.vvl
    12, // llvm.ve.vl.pvldz.vvvl
    12, // llvm.ve.vl.pvldzlo.vvl
    12, // llvm.ve.vl.pvldzlo.vvmvl
    12, // llvm.ve.vl.pvldzlo.vvvl
    12, // llvm.ve.vl.pvldzup.vvl
    12, // llvm.ve.vl.pvldzup.vvmvl
    12, // llvm.ve.vl.pvldzup.vvvl
    12, // llvm.ve.vl.pvmaxs.vsvMvl
    12, // llvm.ve.vl.pvmaxs.vsvl
    12, // llvm.ve.vl.pvmaxs.vsvvl
    12, // llvm.ve.vl.pvmaxs.vvvMvl
    12, // llvm.ve.vl.pvmaxs.vvvl
    12, // llvm.ve.vl.pvmaxs.vvvvl
    12, // llvm.ve.vl.pvmins.vsvMvl
    12, // llvm.ve.vl.pvmins.vsvl
    12, // llvm.ve.vl.pvmins.vsvvl
    12, // llvm.ve.vl.pvmins.vvvMvl
    12, // llvm.ve.vl.pvmins.vvvl
    12, // llvm.ve.vl.pvmins.vvvvl
    12, // llvm.ve.vl.pvor.vsvMvl
    12, // llvm.ve.vl.pvor.vsvl
    12, // llvm.ve.vl.pvor.vsvvl
    12, // llvm.ve.vl.pvor.vvvMvl
    12, // llvm.ve.vl.pvor.vvvl
    12, // llvm.ve.vl.pvor.vvvvl
    12, // llvm.ve.vl.pvpcnt.vvMvl
    12, // llvm.ve.vl.pvpcnt.vvl
    12, // llvm.ve.vl.pvpcnt.vvvl
    12, // llvm.ve.vl.pvpcntlo.vvl
    12, // llvm.ve.vl.pvpcntlo.vvmvl
    12, // llvm.ve.vl.pvpcntlo.vvvl
    12, // llvm.ve.vl.pvpcntup.vvl
    12, // llvm.ve.vl.pvpcntup.vvmvl
    12, // llvm.ve.vl.pvpcntup.vvvl
    12, // llvm.ve.vl.pvrcp.vvl
    12, // llvm.ve.vl.pvrcp.vvvl
    12, // llvm.ve.vl.pvrsqrt.vvl
    12, // llvm.ve.vl.pvrsqrt.vvvl
    12, // llvm.ve.vl.pvrsqrtnex.vvl
    12, // llvm.ve.vl.pvrsqrtnex.vvvl
    12, // llvm.ve.vl.pvseq.vl
    12, // llvm.ve.vl.pvseq.vvl
    12, // llvm.ve.vl.pvseqlo.vl
    12, // llvm.ve.vl.pvseqlo.vvl
    12, // llvm.ve.vl.pvsequp.vl
    12, // llvm.ve.vl.pvsequp.vvl
    12, // llvm.ve.vl.pvsla.vvsMvl
    12, // llvm.ve.vl.pvsla.vvsl
    12, // llvm.ve.vl.pvsla.vvsvl
    12, // llvm.ve.vl.pvsla.vvvMvl
    12, // llvm.ve.vl.pvsla.vvvl
    12, // llvm.ve.vl.pvsla.vvvvl
    12, // llvm.ve.vl.pvsll.vvsMvl
    12, // llvm.ve.vl.pvsll.vvsl
    12, // llvm.ve.vl.pvsll.vvsvl
    12, // llvm.ve.vl.pvsll.vvvMvl
    12, // llvm.ve.vl.pvsll.vvvl
    12, // llvm.ve.vl.pvsll.vvvvl
    12, // llvm.ve.vl.pvsra.vvsMvl
    12, // llvm.ve.vl.pvsra.vvsl
    12, // llvm.ve.vl.pvsra.vvsvl
    12, // llvm.ve.vl.pvsra.vvvMvl
    12, // llvm.ve.vl.pvsra.vvvl
    12, // llvm.ve.vl.pvsra.vvvvl
    12, // llvm.ve.vl.pvsrl.vvsMvl
    12, // llvm.ve.vl.pvsrl.vvsl
    12, // llvm.ve.vl.pvsrl.vvsvl
    12, // llvm.ve.vl.pvsrl.vvvMvl
    12, // llvm.ve.vl.pvsrl.vvvl
    12, // llvm.ve.vl.pvsrl.vvvvl
    12, // llvm.ve.vl.pvsubs.vsvMvl
    12, // llvm.ve.vl.pvsubs.vsvl
    12, // llvm.ve.vl.pvsubs.vsvvl
    12, // llvm.ve.vl.pvsubs.vvvMvl
    12, // llvm.ve.vl.pvsubs.vvvl
    12, // llvm.ve.vl.pvsubs.vvvvl
    12, // llvm.ve.vl.pvsubu.vsvMvl
    12, // llvm.ve.vl.pvsubu.vsvl
    12, // llvm.ve.vl.pvsubu.vsvvl
    12, // llvm.ve.vl.pvsubu.vvvMvl
    12, // llvm.ve.vl.pvsubu.vvvl
    12, // llvm.ve.vl.pvsubu.vvvvl
    12, // llvm.ve.vl.pvxor.vsvMvl
    12, // llvm.ve.vl.pvxor.vsvl
    12, // llvm.ve.vl.pvxor.vsvvl
    12, // llvm.ve.vl.pvxor.vvvMvl
    12, // llvm.ve.vl.pvxor.vvvl
    12, // llvm.ve.vl.pvxor.vvvvl
    255, // llvm.ve.vl.scr.sss
    12, // llvm.ve.vl.svm.sMs
    12, // llvm.ve.vl.svm.sms
    255, // llvm.ve.vl.svob
    12, // llvm.ve.vl.tovm.sml
    255, // llvm.ve.vl.tscr.ssss
    12, // llvm.ve.vl.vaddsl.vsvl
    12, // llvm.ve.vl.vaddsl.vsvmvl
    12, // llvm.ve.vl.vaddsl.vsvvl
    12, // llvm.ve.vl.vaddsl.vvvl
    12, // llvm.ve.vl.vaddsl.vvvmvl
    12, // llvm.ve.vl.vaddsl.vvvvl
    12, // llvm.ve.vl.vaddswsx.vsvl
    12, // llvm.ve.vl.vaddswsx.vsvmvl
    12, // llvm.ve.vl.vaddswsx.vsvvl
    12, // llvm.ve.vl.vaddswsx.vvvl
    12, // llvm.ve.vl.vaddswsx.vvvmvl
    12, // llvm.ve.vl.vaddswsx.vvvvl
    12, // llvm.ve.vl.vaddswzx.vsvl
    12, // llvm.ve.vl.vaddswzx.vsvmvl
    12, // llvm.ve.vl.vaddswzx.vsvvl
    12, // llvm.ve.vl.vaddswzx.vvvl
    12, // llvm.ve.vl.vaddswzx.vvvmvl
    12, // llvm.ve.vl.vaddswzx.vvvvl
    12, // llvm.ve.vl.vaddul.vsvl
    12, // llvm.ve.vl.vaddul.vsvmvl
    12, // llvm.ve.vl.vaddul.vsvvl
    12, // llvm.ve.vl.vaddul.vvvl
    12, // llvm.ve.vl.vaddul.vvvmvl
    12, // llvm.ve.vl.vaddul.vvvvl
    12, // llvm.ve.vl.vadduw.vsvl
    12, // llvm.ve.vl.vadduw.vsvmvl
    12, // llvm.ve.vl.vadduw.vsvvl
    12, // llvm.ve.vl.vadduw.vvvl
    12, // llvm.ve.vl.vadduw.vvvmvl
    12, // llvm.ve.vl.vadduw.vvvvl
    12, // llvm.ve.vl.vand.vsvl
    12, // llvm.ve.vl.vand.vsvmvl
    12, // llvm.ve.vl.vand.vsvvl
    12, // llvm.ve.vl.vand.vvvl
    12, // llvm.ve.vl.vand.vvvmvl
    12, // llvm.ve.vl.vand.vvvvl
    12, // llvm.ve.vl.vbrdd.vsl
    12, // llvm.ve.vl.vbrdd.vsmvl
    12, // llvm.ve.vl.vbrdd.vsvl
    12, // llvm.ve.vl.vbrdl.vsl
    12, // llvm.ve.vl.vbrdl.vsmvl
    12, // llvm.ve.vl.vbrdl.vsvl
    12, // llvm.ve.vl.vbrds.vsl
    12, // llvm.ve.vl.vbrds.vsmvl
    12, // llvm.ve.vl.vbrds.vsvl
    12, // llvm.ve.vl.vbrdw.vsl
    12, // llvm.ve.vl.vbrdw.vsmvl
    12, // llvm.ve.vl.vbrdw.vsvl
    12, // llvm.ve.vl.vbrv.vvl
    12, // llvm.ve.vl.vbrv.vvmvl
    12, // llvm.ve.vl.vbrv.vvvl
    12, // llvm.ve.vl.vcmpsl.vsvl
    12, // llvm.ve.vl.vcmpsl.vsvmvl
    12, // llvm.ve.vl.vcmpsl.vsvvl
    12, // llvm.ve.vl.vcmpsl.vvvl
    12, // llvm.ve.vl.vcmpsl.vvvmvl
    12, // llvm.ve.vl.vcmpsl.vvvvl
    12, // llvm.ve.vl.vcmpswsx.vsvl
    12, // llvm.ve.vl.vcmpswsx.vsvmvl
    12, // llvm.ve.vl.vcmpswsx.vsvvl
    12, // llvm.ve.vl.vcmpswsx.vvvl
    12, // llvm.ve.vl.vcmpswsx.vvvmvl
    12, // llvm.ve.vl.vcmpswsx.vvvvl
    12, // llvm.ve.vl.vcmpswzx.vsvl
    12, // llvm.ve.vl.vcmpswzx.vsvmvl
    12, // llvm.ve.vl.vcmpswzx.vsvvl
    12, // llvm.ve.vl.vcmpswzx.vvvl
    12, // llvm.ve.vl.vcmpswzx.vvvmvl
    12, // llvm.ve.vl.vcmpswzx.vvvvl
    12, // llvm.ve.vl.vcmpul.vsvl
    12, // llvm.ve.vl.vcmpul.vsvmvl
    12, // llvm.ve.vl.vcmpul.vsvvl
    12, // llvm.ve.vl.vcmpul.vvvl
    12, // llvm.ve.vl.vcmpul.vvvmvl
    12, // llvm.ve.vl.vcmpul.vvvvl
    12, // llvm.ve.vl.vcmpuw.vsvl
    12, // llvm.ve.vl.vcmpuw.vsvmvl
    12, // llvm.ve.vl.vcmpuw.vsvvl
    12, // llvm.ve.vl.vcmpuw.vvvl
    12, // llvm.ve.vl.vcmpuw.vvvmvl
    12, // llvm.ve.vl.vcmpuw.vvvvl
    12, // llvm.ve.vl.vcp.vvmvl
    12, // llvm.ve.vl.vcvtdl.vvl
    12, // llvm.ve.vl.vcvtdl.vvvl
    12, // llvm.ve.vl.vcvtds.vvl
    12, // llvm.ve.vl.vcvtds.vvvl
    12, // llvm.ve.vl.vcvtdw.vvl
    12, // llvm.ve.vl.vcvtdw.vvvl
    12, // llvm.ve.vl.vcvtld.vvl
    12, // llvm.ve.vl.vcvtld.vvmvl
    12, // llvm.ve.vl.vcvtld.vvvl
    12, // llvm.ve.vl.vcvtldrz.vvl
    12, // llvm.ve.vl.vcvtldrz.vvmvl
    12, // llvm.ve.vl.vcvtldrz.vvvl
    12, // llvm.ve.vl.vcvtsd.vvl
    12, // llvm.ve.vl.vcvtsd.vvvl
    12, // llvm.ve.vl.vcvtsw.vvl
    12, // llvm.ve.vl.vcvtsw.vvvl
    12, // llvm.ve.vl.vcvtwdsx.vvl
    12, // llvm.ve.vl.vcvtwdsx.vvmvl
    12, // llvm.ve.vl.vcvtwdsx.vvvl
    12, // llvm.ve.vl.vcvtwdsxrz.vvl
    12, // llvm.ve.vl.vcvtwdsxrz.vvmvl
    12, // llvm.ve.vl.vcvtwdsxrz.vvvl
    12, // llvm.ve.vl.vcvtwdzx.vvl
    12, // llvm.ve.vl.vcvtwdzx.vvmvl
    12, // llvm.ve.vl.vcvtwdzx.vvvl
    12, // llvm.ve.vl.vcvtwdzxrz.vvl
    12, // llvm.ve.vl.vcvtwdzxrz.vvmvl
    12, // llvm.ve.vl.vcvtwdzxrz.vvvl
    12, // llvm.ve.vl.vcvtwssx.vvl
    12, // llvm.ve.vl.vcvtwssx.vvmvl
    12, // llvm.ve.vl.vcvtwssx.vvvl
    12, // llvm.ve.vl.vcvtwssxrz.vvl
    12, // llvm.ve.vl.vcvtwssxrz.vvmvl
    12, // llvm.ve.vl.vcvtwssxrz.vvvl
    12, // llvm.ve.vl.vcvtwszx.vvl
    12, // llvm.ve.vl.vcvtwszx.vvmvl
    12, // llvm.ve.vl.vcvtwszx.vvvl
    12, // llvm.ve.vl.vcvtwszxrz.vvl
    12, // llvm.ve.vl.vcvtwszxrz.vvmvl
    12, // llvm.ve.vl.vcvtwszxrz.vvvl
    12, // llvm.ve.vl.vdivsl.vsvl
    12, // llvm.ve.vl.vdivsl.vsvmvl
    12, // llvm.ve.vl.vdivsl.vsvvl
    12, // llvm.ve.vl.vdivsl.vvsl
    12, // llvm.ve.vl.vdivsl.vvsmvl
    12, // llvm.ve.vl.vdivsl.vvsvl
    12, // llvm.ve.vl.vdivsl.vvvl
    12, // llvm.ve.vl.vdivsl.vvvmvl
    12, // llvm.ve.vl.vdivsl.vvvvl
    12, // llvm.ve.vl.vdivswsx.vsvl
    12, // llvm.ve.vl.vdivswsx.vsvmvl
    12, // llvm.ve.vl.vdivswsx.vsvvl
    12, // llvm.ve.vl.vdivswsx.vvsl
    12, // llvm.ve.vl.vdivswsx.vvsmvl
    12, // llvm.ve.vl.vdivswsx.vvsvl
    12, // llvm.ve.vl.vdivswsx.vvvl
    12, // llvm.ve.vl.vdivswsx.vvvmvl
    12, // llvm.ve.vl.vdivswsx.vvvvl
    12, // llvm.ve.vl.vdivswzx.vsvl
    12, // llvm.ve.vl.vdivswzx.vsvmvl
    12, // llvm.ve.vl.vdivswzx.vsvvl
    12, // llvm.ve.vl.vdivswzx.vvsl
    12, // llvm.ve.vl.vdivswzx.vvsmvl
    12, // llvm.ve.vl.vdivswzx.vvsvl
    12, // llvm.ve.vl.vdivswzx.vvvl
    12, // llvm.ve.vl.vdivswzx.vvvmvl
    12, // llvm.ve.vl.vdivswzx.vvvvl
    12, // llvm.ve.vl.vdivul.vsvl
    12, // llvm.ve.vl.vdivul.vsvmvl
    12, // llvm.ve.vl.vdivul.vsvvl
    12, // llvm.ve.vl.vdivul.vvsl
    12, // llvm.ve.vl.vdivul.vvsmvl
    12, // llvm.ve.vl.vdivul.vvsvl
    12, // llvm.ve.vl.vdivul.vvvl
    12, // llvm.ve.vl.vdivul.vvvmvl
    12, // llvm.ve.vl.vdivul.vvvvl
    12, // llvm.ve.vl.vdivuw.vsvl
    12, // llvm.ve.vl.vdivuw.vsvmvl
    12, // llvm.ve.vl.vdivuw.vsvvl
    12, // llvm.ve.vl.vdivuw.vvsl
    12, // llvm.ve.vl.vdivuw.vvsmvl
    12, // llvm.ve.vl.vdivuw.vvsvl
    12, // llvm.ve.vl.vdivuw.vvvl
    12, // llvm.ve.vl.vdivuw.vvvmvl
    12, // llvm.ve.vl.vdivuw.vvvvl
    12, // llvm.ve.vl.veqv.vsvl
    12, // llvm.ve.vl.veqv.vsvmvl
    12, // llvm.ve.vl.veqv.vsvvl
    12, // llvm.ve.vl.veqv.vvvl
    12, // llvm.ve.vl.veqv.vvvmvl
    12, // llvm.ve.vl.veqv.vvvvl
    12, // llvm.ve.vl.vex.vvmvl
    12, // llvm.ve.vl.vfaddd.vsvl
    12, // llvm.ve.vl.vfaddd.vsvmvl
    12, // llvm.ve.vl.vfaddd.vsvvl
    12, // llvm.ve.vl.vfaddd.vvvl
    12, // llvm.ve.vl.vfaddd.vvvmvl
    12, // llvm.ve.vl.vfaddd.vvvvl
    12, // llvm.ve.vl.vfadds.vsvl
    12, // llvm.ve.vl.vfadds.vsvmvl
    12, // llvm.ve.vl.vfadds.vsvvl
    12, // llvm.ve.vl.vfadds.vvvl
    12, // llvm.ve.vl.vfadds.vvvmvl
    12, // llvm.ve.vl.vfadds.vvvvl
    12, // llvm.ve.vl.vfcmpd.vsvl
    12, // llvm.ve.vl.vfcmpd.vsvmvl
    12, // llvm.ve.vl.vfcmpd.vsvvl
    12, // llvm.ve.vl.vfcmpd.vvvl
    12, // llvm.ve.vl.vfcmpd.vvvmvl
    12, // llvm.ve.vl.vfcmpd.vvvvl
    12, // llvm.ve.vl.vfcmps.vsvl
    12, // llvm.ve.vl.vfcmps.vsvmvl
    12, // llvm.ve.vl.vfcmps.vsvvl
    12, // llvm.ve.vl.vfcmps.vvvl
    12, // llvm.ve.vl.vfcmps.vvvmvl
    12, // llvm.ve.vl.vfcmps.vvvvl
    12, // llvm.ve.vl.vfdivd.vsvl
    12, // llvm.ve.vl.vfdivd.vsvmvl
    12, // llvm.ve.vl.vfdivd.vsvvl
    12, // llvm.ve.vl.vfdivd.vvvl
    12, // llvm.ve.vl.vfdivd.vvvmvl
    12, // llvm.ve.vl.vfdivd.vvvvl
    12, // llvm.ve.vl.vfdivs.vsvl
    12, // llvm.ve.vl.vfdivs.vsvmvl
    12, // llvm.ve.vl.vfdivs.vsvvl
    12, // llvm.ve.vl.vfdivs.vvvl
    12, // llvm.ve.vl.vfdivs.vvvmvl
    12, // llvm.ve.vl.vfdivs.vvvvl
    12, // llvm.ve.vl.vfmadd.vsvvl
    12, // llvm.ve.vl.vfmadd.vsvvmvl
    12, // llvm.ve.vl.vfmadd.vsvvvl
    12, // llvm.ve.vl.vfmadd.vvsvl
    12, // llvm.ve.vl.vfmadd.vvsvmvl
    12, // llvm.ve.vl.vfmadd.vvsvvl
    12, // llvm.ve.vl.vfmadd.vvvvl
    12, // llvm.ve.vl.vfmadd.vvvvmvl
    12, // llvm.ve.vl.vfmadd.vvvvvl
    12, // llvm.ve.vl.vfmads.vsvvl
    12, // llvm.ve.vl.vfmads.vsvvmvl
    12, // llvm.ve.vl.vfmads.vsvvvl
    12, // llvm.ve.vl.vfmads.vvsvl
    12, // llvm.ve.vl.vfmads.vvsvmvl
    12, // llvm.ve.vl.vfmads.vvsvvl
    12, // llvm.ve.vl.vfmads.vvvvl
    12, // llvm.ve.vl.vfmads.vvvvmvl
    12, // llvm.ve.vl.vfmads.vvvvvl
    12, // llvm.ve.vl.vfmaxd.vsvl
    12, // llvm.ve.vl.vfmaxd.vsvmvl
    12, // llvm.ve.vl.vfmaxd.vsvvl
    12, // llvm.ve.vl.vfmaxd.vvvl
    12, // llvm.ve.vl.vfmaxd.vvvmvl
    12, // llvm.ve.vl.vfmaxd.vvvvl
    12, // llvm.ve.vl.vfmaxs.vsvl
    12, // llvm.ve.vl.vfmaxs.vsvmvl
    12, // llvm.ve.vl.vfmaxs.vsvvl
    12, // llvm.ve.vl.vfmaxs.vvvl
    12, // llvm.ve.vl.vfmaxs.vvvmvl
    12, // llvm.ve.vl.vfmaxs.vvvvl
    12, // llvm.ve.vl.vfmind.vsvl
    12, // llvm.ve.vl.vfmind.vsvmvl
    12, // llvm.ve.vl.vfmind.vsvvl
    12, // llvm.ve.vl.vfmind.vvvl
    12, // llvm.ve.vl.vfmind.vvvmvl
    12, // llvm.ve.vl.vfmind.vvvvl
    12, // llvm.ve.vl.vfmins.vsvl
    12, // llvm.ve.vl.vfmins.vsvmvl
    12, // llvm.ve.vl.vfmins.vsvvl
    12, // llvm.ve.vl.vfmins.vvvl
    12, // llvm.ve.vl.vfmins.vvvmvl
    12, // llvm.ve.vl.vfmins.vvvvl
    12, // llvm.ve.vl.vfmkdeq.mvl
    12, // llvm.ve.vl.vfmkdeq.mvml
    12, // llvm.ve.vl.vfmkdeqnan.mvl
    12, // llvm.ve.vl.vfmkdeqnan.mvml
    12, // llvm.ve.vl.vfmkdge.mvl
    12, // llvm.ve.vl.vfmkdge.mvml
    12, // llvm.ve.vl.vfmkdgenan.mvl
    12, // llvm.ve.vl.vfmkdgenan.mvml
    12, // llvm.ve.vl.vfmkdgt.mvl
    12, // llvm.ve.vl.vfmkdgt.mvml
    12, // llvm.ve.vl.vfmkdgtnan.mvl
    12, // llvm.ve.vl.vfmkdgtnan.mvml
    12, // llvm.ve.vl.vfmkdle.mvl
    12, // llvm.ve.vl.vfmkdle.mvml
    12, // llvm.ve.vl.vfmkdlenan.mvl
    12, // llvm.ve.vl.vfmkdlenan.mvml
    12, // llvm.ve.vl.vfmkdlt.mvl
    12, // llvm.ve.vl.vfmkdlt.mvml
    12, // llvm.ve.vl.vfmkdltnan.mvl
    12, // llvm.ve.vl.vfmkdltnan.mvml
    12, // llvm.ve.vl.vfmkdnan.mvl
    12, // llvm.ve.vl.vfmkdnan.mvml
    12, // llvm.ve.vl.vfmkdne.mvl
    12, // llvm.ve.vl.vfmkdne.mvml
    12, // llvm.ve.vl.vfmkdnenan.mvl
    12, // llvm.ve.vl.vfmkdnenan.mvml
    12, // llvm.ve.vl.vfmkdnum.mvl
    12, // llvm.ve.vl.vfmkdnum.mvml
    12, // llvm.ve.vl.vfmklaf.ml
    12, // llvm.ve.vl.vfmklat.ml
    12, // llvm.ve.vl.vfmkleq.mvl
    12, // llvm.ve.vl.vfmkleq.mvml
    12, // llvm.ve.vl.vfmkleqnan.mvl
    12, // llvm.ve.vl.vfmkleqnan.mvml
    12, // llvm.ve.vl.vfmklge.mvl
    12, // llvm.ve.vl.vfmklge.mvml
    12, // llvm.ve.vl.vfmklgenan.mvl
    12, // llvm.ve.vl.vfmklgenan.mvml
    12, // llvm.ve.vl.vfmklgt.mvl
    12, // llvm.ve.vl.vfmklgt.mvml
    12, // llvm.ve.vl.vfmklgtnan.mvl
    12, // llvm.ve.vl.vfmklgtnan.mvml
    12, // llvm.ve.vl.vfmklle.mvl
    12, // llvm.ve.vl.vfmklle.mvml
    12, // llvm.ve.vl.vfmkllenan.mvl
    12, // llvm.ve.vl.vfmkllenan.mvml
    12, // llvm.ve.vl.vfmkllt.mvl
    12, // llvm.ve.vl.vfmkllt.mvml
    12, // llvm.ve.vl.vfmklltnan.mvl
    12, // llvm.ve.vl.vfmklltnan.mvml
    12, // llvm.ve.vl.vfmklnan.mvl
    12, // llvm.ve.vl.vfmklnan.mvml
    12, // llvm.ve.vl.vfmklne.mvl
    12, // llvm.ve.vl.vfmklne.mvml
    12, // llvm.ve.vl.vfmklnenan.mvl
    12, // llvm.ve.vl.vfmklnenan.mvml
    12, // llvm.ve.vl.vfmklnum.mvl
    12, // llvm.ve.vl.vfmklnum.mvml
    12, // llvm.ve.vl.vfmkseq.mvl
    12, // llvm.ve.vl.vfmkseq.mvml
    12, // llvm.ve.vl.vfmkseqnan.mvl
    12, // llvm.ve.vl.vfmkseqnan.mvml
    12, // llvm.ve.vl.vfmksge.mvl
    12, // llvm.ve.vl.vfmksge.mvml
    12, // llvm.ve.vl.vfmksgenan.mvl
    12, // llvm.ve.vl.vfmksgenan.mvml
    12, // llvm.ve.vl.vfmksgt.mvl
    12, // llvm.ve.vl.vfmksgt.mvml
    12, // llvm.ve.vl.vfmksgtnan.mvl
    12, // llvm.ve.vl.vfmksgtnan.mvml
    12, // llvm.ve.vl.vfmksle.mvl
    12, // llvm.ve.vl.vfmksle.mvml
    12, // llvm.ve.vl.vfmkslenan.mvl
    12, // llvm.ve.vl.vfmkslenan.mvml
    12, // llvm.ve.vl.vfmkslt.mvl
    12, // llvm.ve.vl.vfmkslt.mvml
    12, // llvm.ve.vl.vfmksltnan.mvl
    12, // llvm.ve.vl.vfmksltnan.mvml
    12, // llvm.ve.vl.vfmksnan.mvl
    12, // llvm.ve.vl.vfmksnan.mvml
    12, // llvm.ve.vl.vfmksne.mvl
    12, // llvm.ve.vl.vfmksne.mvml
    12, // llvm.ve.vl.vfmksnenan.mvl
    12, // llvm.ve.vl.vfmksnenan.mvml
    12, // llvm.ve.vl.vfmksnum.mvl
    12, // llvm.ve.vl.vfmksnum.mvml
    12, // llvm.ve.vl.vfmkweq.mvl
    12, // llvm.ve.vl.vfmkweq.mvml
    12, // llvm.ve.vl.vfmkweqnan.mvl
    12, // llvm.ve.vl.vfmkweqnan.mvml
    12, // llvm.ve.vl.vfmkwge.mvl
    12, // llvm.ve.vl.vfmkwge.mvml
    12, // llvm.ve.vl.vfmkwgenan.mvl
    12, // llvm.ve.vl.vfmkwgenan.mvml
    12, // llvm.ve.vl.vfmkwgt.mvl
    12, // llvm.ve.vl.vfmkwgt.mvml
    12, // llvm.ve.vl.vfmkwgtnan.mvl
    12, // llvm.ve.vl.vfmkwgtnan.mvml
    12, // llvm.ve.vl.vfmkwle.mvl
    12, // llvm.ve.vl.vfmkwle.mvml
    12, // llvm.ve.vl.vfmkwlenan.mvl
    12, // llvm.ve.vl.vfmkwlenan.mvml
    12, // llvm.ve.vl.vfmkwlt.mvl
    12, // llvm.ve.vl.vfmkwlt.mvml
    12, // llvm.ve.vl.vfmkwltnan.mvl
    12, // llvm.ve.vl.vfmkwltnan.mvml
    12, // llvm.ve.vl.vfmkwnan.mvl
    12, // llvm.ve.vl.vfmkwnan.mvml
    12, // llvm.ve.vl.vfmkwne.mvl
    12, // llvm.ve.vl.vfmkwne.mvml
    12, // llvm.ve.vl.vfmkwnenan.mvl
    12, // llvm.ve.vl.vfmkwnenan.mvml
    12, // llvm.ve.vl.vfmkwnum.mvl
    12, // llvm.ve.vl.vfmkwnum.mvml
    12, // llvm.ve.vl.vfmsbd.vsvvl
    12, // llvm.ve.vl.vfmsbd.vsvvmvl
    12, // llvm.ve.vl.vfmsbd.vsvvvl
    12, // llvm.ve.vl.vfmsbd.vvsvl
    12, // llvm.ve.vl.vfmsbd.vvsvmvl
    12, // llvm.ve.vl.vfmsbd.vvsvvl
    12, // llvm.ve.vl.vfmsbd.vvvvl
    12, // llvm.ve.vl.vfmsbd.vvvvmvl
    12, // llvm.ve.vl.vfmsbd.vvvvvl
    12, // llvm.ve.vl.vfmsbs.vsvvl
    12, // llvm.ve.vl.vfmsbs.vsvvmvl
    12, // llvm.ve.vl.vfmsbs.vsvvvl
    12, // llvm.ve.vl.vfmsbs.vvsvl
    12, // llvm.ve.vl.vfmsbs.vvsvmvl
    12, // llvm.ve.vl.vfmsbs.vvsvvl
    12, // llvm.ve.vl.vfmsbs.vvvvl
    12, // llvm.ve.vl.vfmsbs.vvvvmvl
    12, // llvm.ve.vl.vfmsbs.vvvvvl
    12, // llvm.ve.vl.vfmuld.vsvl
    12, // llvm.ve.vl.vfmuld.vsvmvl
    12, // llvm.ve.vl.vfmuld.vsvvl
    12, // llvm.ve.vl.vfmuld.vvvl
    12, // llvm.ve.vl.vfmuld.vvvmvl
    12, // llvm.ve.vl.vfmuld.vvvvl
    12, // llvm.ve.vl.vfmuls.vsvl
    12, // llvm.ve.vl.vfmuls.vsvmvl
    12, // llvm.ve.vl.vfmuls.vsvvl
    12, // llvm.ve.vl.vfmuls.vvvl
    12, // llvm.ve.vl.vfmuls.vvvmvl
    12, // llvm.ve.vl.vfmuls.vvvvl
    12, // llvm.ve.vl.vfnmadd.vsvvl
    12, // llvm.ve.vl.vfnmadd.vsvvmvl
    12, // llvm.ve.vl.vfnmadd.vsvvvl
    12, // llvm.ve.vl.vfnmadd.vvsvl
    12, // llvm.ve.vl.vfnmadd.vvsvmvl
    12, // llvm.ve.vl.vfnmadd.vvsvvl
    12, // llvm.ve.vl.vfnmadd.vvvvl
    12, // llvm.ve.vl.vfnmadd.vvvvmvl
    12, // llvm.ve.vl.vfnmadd.vvvvvl
    12, // llvm.ve.vl.vfnmads.vsvvl
    12, // llvm.ve.vl.vfnmads.vsvvmvl
    12, // llvm.ve.vl.vfnmads.vsvvvl
    12, // llvm.ve.vl.vfnmads.vvsvl
    12, // llvm.ve.vl.vfnmads.vvsvmvl
    12, // llvm.ve.vl.vfnmads.vvsvvl
    12, // llvm.ve.vl.vfnmads.vvvvl
    12, // llvm.ve.vl.vfnmads.vvvvmvl
    12, // llvm.ve.vl.vfnmads.vvvvvl
    12, // llvm.ve.vl.vfnmsbd.vsvvl
    12, // llvm.ve.vl.vfnmsbd.vsvvmvl
    12, // llvm.ve.vl.vfnmsbd.vsvvvl
    12, // llvm.ve.vl.vfnmsbd.vvsvl
    12, // llvm.ve.vl.vfnmsbd.vvsvmvl
    12, // llvm.ve.vl.vfnmsbd.vvsvvl
    12, // llvm.ve.vl.vfnmsbd.vvvvl
    12, // llvm.ve.vl.vfnmsbd.vvvvmvl
    12, // llvm.ve.vl.vfnmsbd.vvvvvl
    12, // llvm.ve.vl.vfnmsbs.vsvvl
    12, // llvm.ve.vl.vfnmsbs.vsvvmvl
    12, // llvm.ve.vl.vfnmsbs.vsvvvl
    12, // llvm.ve.vl.vfnmsbs.vvsvl
    12, // llvm.ve.vl.vfnmsbs.vvsvmvl
    12, // llvm.ve.vl.vfnmsbs.vvsvvl
    12, // llvm.ve.vl.vfnmsbs.vvvvl
    12, // llvm.ve.vl.vfnmsbs.vvvvmvl
    12, // llvm.ve.vl.vfnmsbs.vvvvvl
    12, // llvm.ve.vl.vfrmaxdfst.vvl
    12, // llvm.ve.vl.vfrmaxdfst.vvvl
    12, // llvm.ve.vl.vfrmaxdlst.vvl
    12, // llvm.ve.vl.vfrmaxdlst.vvvl
    12, // llvm.ve.vl.vfrmaxsfst.vvl
    12, // llvm.ve.vl.vfrmaxsfst.vvvl
    12, // llvm.ve.vl.vfrmaxslst.vvl
    12, // llvm.ve.vl.vfrmaxslst.vvvl
    12, // llvm.ve.vl.vfrmindfst.vvl
    12, // llvm.ve.vl.vfrmindfst.vvvl
    12, // llvm.ve.vl.vfrmindlst.vvl
    12, // llvm.ve.vl.vfrmindlst.vvvl
    12, // llvm.ve.vl.vfrminsfst.vvl
    12, // llvm.ve.vl.vfrminsfst.vvvl
    12, // llvm.ve.vl.vfrminslst.vvl
    12, // llvm.ve.vl.vfrminslst.vvvl
    12, // llvm.ve.vl.vfsqrtd.vvl
    12, // llvm.ve.vl.vfsqrtd.vvvl
    12, // llvm.ve.vl.vfsqrts.vvl
    12, // llvm.ve.vl.vfsqrts.vvvl
    12, // llvm.ve.vl.vfsubd.vsvl
    12, // llvm.ve.vl.vfsubd.vsvmvl
    12, // llvm.ve.vl.vfsubd.vsvvl
    12, // llvm.ve.vl.vfsubd.vvvl
    12, // llvm.ve.vl.vfsubd.vvvmvl
    12, // llvm.ve.vl.vfsubd.vvvvl
    12, // llvm.ve.vl.vfsubs.vsvl
    12, // llvm.ve.vl.vfsubs.vsvmvl
    12, // llvm.ve.vl.vfsubs.vsvvl
    12, // llvm.ve.vl.vfsubs.vvvl
    12, // llvm.ve.vl.vfsubs.vvvmvl
    12, // llvm.ve.vl.vfsubs.vvvvl
    12, // llvm.ve.vl.vfsumd.vvl
    12, // llvm.ve.vl.vfsumd.vvml
    12, // llvm.ve.vl.vfsums.vvl
    12, // llvm.ve.vl.vfsums.vvml
    221, // llvm.ve.vl.vgt.vvssl
    221, // llvm.ve.vl.vgt.vvssml
    221, // llvm.ve.vl.vgt.vvssmvl
    221, // llvm.ve.vl.vgt.vvssvl
    221, // llvm.ve.vl.vgtlsx.vvssl
    221, // llvm.ve.vl.vgtlsx.vvssml
    221, // llvm.ve.vl.vgtlsx.vvssmvl
    221, // llvm.ve.vl.vgtlsx.vvssvl
    221, // llvm.ve.vl.vgtlsxnc.vvssl
    221, // llvm.ve.vl.vgtlsxnc.vvssml
    221, // llvm.ve.vl.vgtlsxnc.vvssmvl
    221, // llvm.ve.vl.vgtlsxnc.vvssvl
    221, // llvm.ve.vl.vgtlzx.vvssl
    221, // llvm.ve.vl.vgtlzx.vvssml
    221, // llvm.ve.vl.vgtlzx.vvssmvl
    221, // llvm.ve.vl.vgtlzx.vvssvl
    221, // llvm.ve.vl.vgtlzxnc.vvssl
    221, // llvm.ve.vl.vgtlzxnc.vvssml
    221, // llvm.ve.vl.vgtlzxnc.vvssmvl
    221, // llvm.ve.vl.vgtlzxnc.vvssvl
    221, // llvm.ve.vl.vgtnc.vvssl
    221, // llvm.ve.vl.vgtnc.vvssml
    221, // llvm.ve.vl.vgtnc.vvssmvl
    221, // llvm.ve.vl.vgtnc.vvssvl
    221, // llvm.ve.vl.vgtu.vvssl
    221, // llvm.ve.vl.vgtu.vvssml
    221, // llvm.ve.vl.vgtu.vvssmvl
    221, // llvm.ve.vl.vgtu.vvssvl
    221, // llvm.ve.vl.vgtunc.vvssl
    221, // llvm.ve.vl.vgtunc.vvssml
    221, // llvm.ve.vl.vgtunc.vvssmvl
    221, // llvm.ve.vl.vgtunc.vvssvl
    221, // llvm.ve.vl.vld.vssl
    221, // llvm.ve.vl.vld.vssvl
    221, // llvm.ve.vl.vld2d.vssl
    221, // llvm.ve.vl.vld2d.vssvl
    221, // llvm.ve.vl.vld2dnc.vssl
    221, // llvm.ve.vl.vld2dnc.vssvl
    221, // llvm.ve.vl.vldl2dsx.vssl
    221, // llvm.ve.vl.vldl2dsx.vssvl
    221, // llvm.ve.vl.vldl2dsxnc.vssl
    221, // llvm.ve.vl.vldl2dsxnc.vssvl
    221, // llvm.ve.vl.vldl2dzx.vssl
    221, // llvm.ve.vl.vldl2dzx.vssvl
    221, // llvm.ve.vl.vldl2dzxnc.vssl
    221, // llvm.ve.vl.vldl2dzxnc.vssvl
    221, // llvm.ve.vl.vldlsx.vssl
    221, // llvm.ve.vl.vldlsx.vssvl
    221, // llvm.ve.vl.vldlsxnc.vssl
    221, // llvm.ve.vl.vldlsxnc.vssvl
    221, // llvm.ve.vl.vldlzx.vssl
    221, // llvm.ve.vl.vldlzx.vssvl
    221, // llvm.ve.vl.vldlzxnc.vssl
    221, // llvm.ve.vl.vldlzxnc.vssvl
    221, // llvm.ve.vl.vldnc.vssl
    221, // llvm.ve.vl.vldnc.vssvl
    221, // llvm.ve.vl.vldu.vssl
    221, // llvm.ve.vl.vldu.vssvl
    221, // llvm.ve.vl.vldu2d.vssl
    221, // llvm.ve.vl.vldu2d.vssvl
    221, // llvm.ve.vl.vldu2dnc.vssl
    221, // llvm.ve.vl.vldu2dnc.vssvl
    221, // llvm.ve.vl.vldunc.vssl
    221, // llvm.ve.vl.vldunc.vssvl
    12, // llvm.ve.vl.vldz.vvl
    12, // llvm.ve.vl.vldz.vvmvl
    12, // llvm.ve.vl.vldz.vvvl
    12, // llvm.ve.vl.vmaxsl.vsvl
    12, // llvm.ve.vl.vmaxsl.vsvmvl
    12, // llvm.ve.vl.vmaxsl.vsvvl
    12, // llvm.ve.vl.vmaxsl.vvvl
    12, // llvm.ve.vl.vmaxsl.vvvmvl
    12, // llvm.ve.vl.vmaxsl.vvvvl
    12, // llvm.ve.vl.vmaxswsx.vsvl
    12, // llvm.ve.vl.vmaxswsx.vsvmvl
    12, // llvm.ve.vl.vmaxswsx.vsvvl
    12, // llvm.ve.vl.vmaxswsx.vvvl
    12, // llvm.ve.vl.vmaxswsx.vvvmvl
    12, // llvm.ve.vl.vmaxswsx.vvvvl
    12, // llvm.ve.vl.vmaxswzx.vsvl
    12, // llvm.ve.vl.vmaxswzx.vsvmvl
    12, // llvm.ve.vl.vmaxswzx.vsvvl
    12, // llvm.ve.vl.vmaxswzx.vvvl
    12, // llvm.ve.vl.vmaxswzx.vvvmvl
    12, // llvm.ve.vl.vmaxswzx.vvvvl
    12, // llvm.ve.vl.vminsl.vsvl
    12, // llvm.ve.vl.vminsl.vsvmvl
    12, // llvm.ve.vl.vminsl.vsvvl
    12, // llvm.ve.vl.vminsl.vvvl
    12, // llvm.ve.vl.vminsl.vvvmvl
    12, // llvm.ve.vl.vminsl.vvvvl
    12, // llvm.ve.vl.vminswsx.vsvl
    12, // llvm.ve.vl.vminswsx.vsvmvl
    12, // llvm.ve.vl.vminswsx.vsvvl
    12, // llvm.ve.vl.vminswsx.vvvl
    12, // llvm.ve.vl.vminswsx.vvvmvl
    12, // llvm.ve.vl.vminswsx.vvvvl
    12, // llvm.ve.vl.vminswzx.vsvl
    12, // llvm.ve.vl.vminswzx.vsvmvl
    12, // llvm.ve.vl.vminswzx.vsvvl
    12, // llvm.ve.vl.vminswzx.vvvl
    12, // llvm.ve.vl.vminswzx.vvvmvl
    12, // llvm.ve.vl.vminswzx.vvvvl
    12, // llvm.ve.vl.vmrg.vsvml
    12, // llvm.ve.vl.vmrg.vsvmvl
    12, // llvm.ve.vl.vmrg.vvvml
    12, // llvm.ve.vl.vmrg.vvvmvl
    12, // llvm.ve.vl.vmrgw.vsvMl
    12, // llvm.ve.vl.vmrgw.vsvMvl
    12, // llvm.ve.vl.vmrgw.vvvMl
    12, // llvm.ve.vl.vmrgw.vvvMvl
    12, // llvm.ve.vl.vmulsl.vsvl
    12, // llvm.ve.vl.vmulsl.vsvmvl
    12, // llvm.ve.vl.vmulsl.vsvvl
    12, // llvm.ve.vl.vmulsl.vvvl
    12, // llvm.ve.vl.vmulsl.vvvmvl
    12, // llvm.ve.vl.vmulsl.vvvvl
    12, // llvm.ve.vl.vmulslw.vsvl
    12, // llvm.ve.vl.vmulslw.vsvvl
    12, // llvm.ve.vl.vmulslw.vvvl
    12, // llvm.ve.vl.vmulslw.vvvvl
    12, // llvm.ve.vl.vmulswsx.vsvl
    12, // llvm.ve.vl.vmulswsx.vsvmvl
    12, // llvm.ve.vl.vmulswsx.vsvvl
    12, // llvm.ve.vl.vmulswsx.vvvl
    12, // llvm.ve.vl.vmulswsx.vvvmvl
    12, // llvm.ve.vl.vmulswsx.vvvvl
    12, // llvm.ve.vl.vmulswzx.vsvl
    12, // llvm.ve.vl.vmulswzx.vsvmvl
    12, // llvm.ve.vl.vmulswzx.vsvvl
    12, // llvm.ve.vl.vmulswzx.vvvl
    12, // llvm.ve.vl.vmulswzx.vvvmvl
    12, // llvm.ve.vl.vmulswzx.vvvvl
    12, // llvm.ve.vl.vmulul.vsvl
    12, // llvm.ve.vl.vmulul.vsvmvl
    12, // llvm.ve.vl.vmulul.vsvvl
    12, // llvm.ve.vl.vmulul.vvvl
    12, // llvm.ve.vl.vmulul.vvvmvl
    12, // llvm.ve.vl.vmulul.vvvvl
    12, // llvm.ve.vl.vmuluw.vsvl
    12, // llvm.ve.vl.vmuluw.vsvmvl
    12, // llvm.ve.vl.vmuluw.vsvvl
    12, // llvm.ve.vl.vmuluw.vvvl
    12, // llvm.ve.vl.vmuluw.vvvmvl
    12, // llvm.ve.vl.vmuluw.vvvvl
    12, // llvm.ve.vl.vmv.vsvl
    12, // llvm.ve.vl.vmv.vsvmvl
    12, // llvm.ve.vl.vmv.vsvvl
    12, // llvm.ve.vl.vor.vsvl
    12, // llvm.ve.vl.vor.vsvmvl
    12, // llvm.ve.vl.vor.vsvvl
    12, // llvm.ve.vl.vor.vvvl
    12, // llvm.ve.vl.vor.vvvmvl
    12, // llvm.ve.vl.vor.vvvvl
    12, // llvm.ve.vl.vpcnt.vvl
    12, // llvm.ve.vl.vpcnt.vvmvl
    12, // llvm.ve.vl.vpcnt.vvvl
    12, // llvm.ve.vl.vrand.vvl
    12, // llvm.ve.vl.vrand.vvml
    12, // llvm.ve.vl.vrcpd.vvl
    12, // llvm.ve.vl.vrcpd.vvvl
    12, // llvm.ve.vl.vrcps.vvl
    12, // llvm.ve.vl.vrcps.vvvl
    12, // llvm.ve.vl.vrmaxslfst.vvl
    12, // llvm.ve.vl.vrmaxslfst.vvvl
    12, // llvm.ve.vl.vrmaxsllst.vvl
    12, // llvm.ve.vl.vrmaxsllst.vvvl
    12, // llvm.ve.vl.vrmaxswfstsx.vvl
    12, // llvm.ve.vl.vrmaxswfstsx.vvvl
    12, // llvm.ve.vl.vrmaxswfstzx.vvl
    12, // llvm.ve.vl.vrmaxswfstzx.vvvl
    12, // llvm.ve.vl.vrmaxswlstsx.vvl
    12, // llvm.ve.vl.vrmaxswlstsx.vvvl
    12, // llvm.ve.vl.vrmaxswlstzx.vvl
    12, // llvm.ve.vl.vrmaxswlstzx.vvvl
    12, // llvm.ve.vl.vrminslfst.vvl
    12, // llvm.ve.vl.vrminslfst.vvvl
    12, // llvm.ve.vl.vrminsllst.vvl
    12, // llvm.ve.vl.vrminsllst.vvvl
    12, // llvm.ve.vl.vrminswfstsx.vvl
    12, // llvm.ve.vl.vrminswfstsx.vvvl
    12, // llvm.ve.vl.vrminswfstzx.vvl
    12, // llvm.ve.vl.vrminswfstzx.vvvl
    12, // llvm.ve.vl.vrminswlstsx.vvl
    12, // llvm.ve.vl.vrminswlstsx.vvvl
    12, // llvm.ve.vl.vrminswlstzx.vvl
    12, // llvm.ve.vl.vrminswlstzx.vvvl
    12, // llvm.ve.vl.vror.vvl
    12, // llvm.ve.vl.vror.vvml
    12, // llvm.ve.vl.vrsqrtd.vvl
    12, // llvm.ve.vl.vrsqrtd.vvvl
    12, // llvm.ve.vl.vrsqrtdnex.vvl
    12, // llvm.ve.vl.vrsqrtdnex.vvvl
    12, // llvm.ve.vl.vrsqrts.vvl
    12, // llvm.ve.vl.vrsqrts.vvvl
    12, // llvm.ve.vl.vrsqrtsnex.vvl
    12, // llvm.ve.vl.vrsqrtsnex.vvvl
    12, // llvm.ve.vl.vrxor.vvl
    12, // llvm.ve.vl.vrxor.vvml
    263, // llvm.ve.vl.vsc.vvssl
    263, // llvm.ve.vl.vsc.vvssml
    263, // llvm.ve.vl.vscl.vvssl
    263, // llvm.ve.vl.vscl.vvssml
    263, // llvm.ve.vl.vsclnc.vvssl
    263, // llvm.ve.vl.vsclnc.vvssml
    263, // llvm.ve.vl.vsclncot.vvssl
    263, // llvm.ve.vl.vsclncot.vvssml
    263, // llvm.ve.vl.vsclot.vvssl
    263, // llvm.ve.vl.vsclot.vvssml
    263, // llvm.ve.vl.vscnc.vvssl
    263, // llvm.ve.vl.vscnc.vvssml
    263, // llvm.ve.vl.vscncot.vvssl
    263, // llvm.ve.vl.vscncot.vvssml
    263, // llvm.ve.vl.vscot.vvssl
    263, // llvm.ve.vl.vscot.vvssml
    263, // llvm.ve.vl.vscu.vvssl
    263, // llvm.ve.vl.vscu.vvssml
    263, // llvm.ve.vl.vscunc.vvssl
    263, // llvm.ve.vl.vscunc.vvssml
    263, // llvm.ve.vl.vscuncot.vvssl
    263, // llvm.ve.vl.vscuncot.vvssml
    263, // llvm.ve.vl.vscuot.vvssl
    263, // llvm.ve.vl.vscuot.vvssml
    12, // llvm.ve.vl.vseq.vl
    12, // llvm.ve.vl.vseq.vvl
    12, // llvm.ve.vl.vsfa.vvssl
    12, // llvm.ve.vl.vsfa.vvssmvl
    12, // llvm.ve.vl.vsfa.vvssvl
    12, // llvm.ve.vl.vshf.vvvsl
    12, // llvm.ve.vl.vshf.vvvsvl
    12, // llvm.ve.vl.vslal.vvsl
    12, // llvm.ve.vl.vslal.vvsmvl
    12, // llvm.ve.vl.vslal.vvsvl
    12, // llvm.ve.vl.vslal.vvvl
    12, // llvm.ve.vl.vslal.vvvmvl
    12, // llvm.ve.vl.vslal.vvvvl
    12, // llvm.ve.vl.vslawsx.vvsl
    12, // llvm.ve.vl.vslawsx.vvsmvl
    12, // llvm.ve.vl.vslawsx.vvsvl
    12, // llvm.ve.vl.vslawsx.vvvl
    12, // llvm.ve.vl.vslawsx.vvvmvl
    12, // llvm.ve.vl.vslawsx.vvvvl
    12, // llvm.ve.vl.vslawzx.vvsl
    12, // llvm.ve.vl.vslawzx.vvsmvl
    12, // llvm.ve.vl.vslawzx.vvsvl
    12, // llvm.ve.vl.vslawzx.vvvl
    12, // llvm.ve.vl.vslawzx.vvvmvl
    12, // llvm.ve.vl.vslawzx.vvvvl
    12, // llvm.ve.vl.vsll.vvsl
    12, // llvm.ve.vl.vsll.vvsmvl
    12, // llvm.ve.vl.vsll.vvsvl
    12, // llvm.ve.vl.vsll.vvvl
    12, // llvm.ve.vl.vsll.vvvmvl
    12, // llvm.ve.vl.vsll.vvvvl
    12, // llvm.ve.vl.vsral.vvsl
    12, // llvm.ve.vl.vsral.vvsmvl
    12, // llvm.ve.vl.vsral.vvsvl
    12, // llvm.ve.vl.vsral.vvvl
    12, // llvm.ve.vl.vsral.vvvmvl
    12, // llvm.ve.vl.vsral.vvvvl
    12, // llvm.ve.vl.vsrawsx.vvsl
    12, // llvm.ve.vl.vsrawsx.vvsmvl
    12, // llvm.ve.vl.vsrawsx.vvsvl
    12, // llvm.ve.vl.vsrawsx.vvvl
    12, // llvm.ve.vl.vsrawsx.vvvmvl
    12, // llvm.ve.vl.vsrawsx.vvvvl
    12, // llvm.ve.vl.vsrawzx.vvsl
    12, // llvm.ve.vl.vsrawzx.vvsmvl
    12, // llvm.ve.vl.vsrawzx.vvsvl
    12, // llvm.ve.vl.vsrawzx.vvvl
    12, // llvm.ve.vl.vsrawzx.vvvmvl
    12, // llvm.ve.vl.vsrawzx.vvvvl
    12, // llvm.ve.vl.vsrl.vvsl
    12, // llvm.ve.vl.vsrl.vvsmvl
    12, // llvm.ve.vl.vsrl.vvsvl
    12, // llvm.ve.vl.vsrl.vvvl
    12, // llvm.ve.vl.vsrl.vvvmvl
    12, // llvm.ve.vl.vsrl.vvvvl
    263, // llvm.ve.vl.vst.vssl
    263, // llvm.ve.vl.vst.vssml
    263, // llvm.ve.vl.vst2d.vssl
    263, // llvm.ve.vl.vst2d.vssml
    263, // llvm.ve.vl.vst2dnc.vssl
    263, // llvm.ve.vl.vst2dnc.vssml
    263, // llvm.ve.vl.vst2dncot.vssl
    263, // llvm.ve.vl.vst2dncot.vssml
    263, // llvm.ve.vl.vst2dot.vssl
    263, // llvm.ve.vl.vst2dot.vssml
    263, // llvm.ve.vl.vstl.vssl
    263, // llvm.ve.vl.vstl.vssml
    263, // llvm.ve.vl.vstl2d.vssl
    263, // llvm.ve.vl.vstl2d.vssml
    263, // llvm.ve.vl.vstl2dnc.vssl
    263, // llvm.ve.vl.vstl2dnc.vssml
    263, // llvm.ve.vl.vstl2dncot.vssl
    263, // llvm.ve.vl.vstl2dncot.vssml
    263, // llvm.ve.vl.vstl2dot.vssl
    263, // llvm.ve.vl.vstl2dot.vssml
    263, // llvm.ve.vl.vstlnc.vssl
    263, // llvm.ve.vl.vstlnc.vssml
    263, // llvm.ve.vl.vstlncot.vssl
    263, // llvm.ve.vl.vstlncot.vssml
    263, // llvm.ve.vl.vstlot.vssl
    263, // llvm.ve.vl.vstlot.vssml
    263, // llvm.ve.vl.vstnc.vssl
    263, // llvm.ve.vl.vstnc.vssml
    263, // llvm.ve.vl.vstncot.vssl
    263, // llvm.ve.vl.vstncot.vssml
    263, // llvm.ve.vl.vstot.vssl
    263, // llvm.ve.vl.vstot.vssml
    263, // llvm.ve.vl.vstu.vssl
    263, // llvm.ve.vl.vstu.vssml
    263, // llvm.ve.vl.vstu2d.vssl
    263, // llvm.ve.vl.vstu2d.vssml
    263, // llvm.ve.vl.vstu2dnc.vssl
    263, // llvm.ve.vl.vstu2dnc.vssml
    263, // llvm.ve.vl.vstu2dncot.vssl
    263, // llvm.ve.vl.vstu2dncot.vssml
    263, // llvm.ve.vl.vstu2dot.vssl
    263, // llvm.ve.vl.vstu2dot.vssml
    263, // llvm.ve.vl.vstunc.vssl
    263, // llvm.ve.vl.vstunc.vssml
    263, // llvm.ve.vl.vstuncot.vssl
    263, // llvm.ve.vl.vstuncot.vssml
    263, // llvm.ve.vl.vstuot.vssl
    263, // llvm.ve.vl.vstuot.vssml
    12, // llvm.ve.vl.vsubsl.vsvl
    12, // llvm.ve.vl.vsubsl.vsvmvl
    12, // llvm.ve.vl.vsubsl.vsvvl
    12, // llvm.ve.vl.vsubsl.vvvl
    12, // llvm.ve.vl.vsubsl.vvvmvl
    12, // llvm.ve.vl.vsubsl.vvvvl
    12, // llvm.ve.vl.vsubswsx.vsvl
    12, // llvm.ve.vl.vsubswsx.vsvmvl
    12, // llvm.ve.vl.vsubswsx.vsvvl
    12, // llvm.ve.vl.vsubswsx.vvvl
    12, // llvm.ve.vl.vsubswsx.vvvmvl
    12, // llvm.ve.vl.vsubswsx.vvvvl
    12, // llvm.ve.vl.vsubswzx.vsvl
    12, // llvm.ve.vl.vsubswzx.vsvmvl
    12, // llvm.ve.vl.vsubswzx.vsvvl
    12, // llvm.ve.vl.vsubswzx.vvvl
    12, // llvm.ve.vl.vsubswzx.vvvmvl
    12, // llvm.ve.vl.vsubswzx.vvvvl
    12, // llvm.ve.vl.vsubul.vsvl
    12, // llvm.ve.vl.vsubul.vsvmvl
    12, // llvm.ve.vl.vsubul.vsvvl
    12, // llvm.ve.vl.vsubul.vvvl
    12, // llvm.ve.vl.vsubul.vvvmvl
    12, // llvm.ve.vl.vsubul.vvvvl
    12, // llvm.ve.vl.vsubuw.vsvl
    12, // llvm.ve.vl.vsubuw.vsvmvl
    12, // llvm.ve.vl.vsubuw.vsvvl
    12, // llvm.ve.vl.vsubuw.vvvl
    12, // llvm.ve.vl.vsubuw.vvvmvl
    12, // llvm.ve.vl.vsubuw.vvvvl
    12, // llvm.ve.vl.vsuml.vvl
    12, // llvm.ve.vl.vsuml.vvml
    12, // llvm.ve.vl.vsumwsx.vvl
    12, // llvm.ve.vl.vsumwsx.vvml
    12, // llvm.ve.vl.vsumwzx.vvl
    12, // llvm.ve.vl.vsumwzx.vvml
    12, // llvm.ve.vl.vxor.vsvl
    12, // llvm.ve.vl.vxor.vsvmvl
    12, // llvm.ve.vl.vxor.vsvvl
    12, // llvm.ve.vl.vxor.vvvl
    12, // llvm.ve.vl.vxor.vvvmvl
    12, // llvm.ve.vl.vxor.vvvvl
    12, // llvm.ve.vl.xorm.MMM
    12, // llvm.ve.vl.xorm.mmm
    5, // llvm.wasm.alltrue
    5, // llvm.wasm.anytrue
    5, // llvm.wasm.avgr.unsigned
    5, // llvm.wasm.bitmask
    5, // llvm.wasm.bitselect
    339, // llvm.wasm.catch
    5, // llvm.wasm.dot
    5, // llvm.wasm.extadd.pairwise.signed
    5, // llvm.wasm.extadd.pairwise.unsigned
    261, // llvm.wasm.get.ehselector
    261, // llvm.wasm.get.exception
    59, // llvm.wasm.landingpad.index
    2, // llvm.wasm.lsda
    340, // llvm.wasm.memory.atomic.notify
    341, // llvm.wasm.memory.atomic.wait32
    341, // llvm.wasm.memory.atomic.wait64
    8, // llvm.wasm.memory.grow
    62, // llvm.wasm.memory.size
    5, // llvm.wasm.narrow.signed
    5, // llvm.wasm.narrow.unsigned
    5, // llvm.wasm.pmax
    5, // llvm.wasm.pmin
    5, // llvm.wasm.q15mulr.sat.signed
    2, // llvm.wasm.ref.is_null.extern
    2, // llvm.wasm.ref.is_null.func
    2, // llvm.wasm.ref.null.extern
    2, // llvm.wasm.ref.null.func
    5, // llvm.wasm.relaxed.dot.bf16x8.add.f32
    5, // llvm.wasm.relaxed.dot.i8x16.i7x16.add.signed
    5, // llvm.wasm.relaxed.dot.i8x16.i7x16.signed
    5, // llvm.wasm.relaxed.laneselect
    5, // llvm.wasm.relaxed.madd
    5, // llvm.wasm.relaxed.max
    5, // llvm.wasm.relaxed.min
    5, // llvm.wasm.relaxed.nmadd
    5, // llvm.wasm.relaxed.q15mulr.signed
    5, // llvm.wasm.relaxed.swizzle
    5, // llvm.wasm.relaxed.trunc.signed
    5, // llvm.wasm.relaxed.trunc.signed.zero
    5, // llvm.wasm.relaxed.trunc.unsigned
    5, // llvm.wasm.relaxed.trunc.unsigned.zero
    342, // llvm.wasm.rethrow
    343, // llvm.wasm.shuffle
    5, // llvm.wasm.sub.sat.signed
    5, // llvm.wasm.sub.sat.unsigned
    5, // llvm.wasm.swizzle
    8, // llvm.wasm.table.copy
    8, // llvm.wasm.table.fill.externref
    8, // llvm.wasm.table.fill.funcref
    62, // llvm.wasm.table.get.externref
    62, // llvm.wasm.table.get.funcref
    8, // llvm.wasm.table.grow.externref
    8, // llvm.wasm.table.grow.funcref
    88, // llvm.wasm.table.set.externref
    88, // llvm.wasm.table.set.funcref
    62, // llvm.wasm.table.size
    344, // llvm.wasm.throw
    5, // llvm.wasm.tls.align
    62, // llvm.wasm.tls.base
    5, // llvm.wasm.tls.size
    5, // llvm.wasm.trunc.saturate.signed
    5, // llvm.wasm.trunc.saturate.unsigned
    12, // llvm.wasm.trunc.signed
    12, // llvm.wasm.trunc.unsigned
    2, // llvm.x86.3dnow.pavgusb
    2, // llvm.x86.3dnow.pf2id
    2, // llvm.x86.3dnow.pfacc
    2, // llvm.x86.3dnow.pfadd
    2, // llvm.x86.3dnow.pfcmpeq
    2, // llvm.x86.3dnow.pfcmpge
    2, // llvm.x86.3dnow.pfcmpgt
    2, // llvm.x86.3dnow.pfmax
    2, // llvm.x86.3dnow.pfmin
    2, // llvm.x86.3dnow.pfmul
    2, // llvm.x86.3dnow.pfrcp
    2, // llvm.x86.3dnow.pfrcpit1
    2, // llvm.x86.3dnow.pfrcpit2
    2, // llvm.x86.3dnow.pfrsqit1
    2, // llvm.x86.3dnow.pfrsqrt
    2, // llvm.x86.3dnow.pfsub
    2, // llvm.x86.3dnow.pfsubr
    2, // llvm.x86.3dnow.pi2fd
    2, // llvm.x86.3dnow.pmulhrw
    2, // llvm.x86.3dnowa.pf2iw
    2, // llvm.x86.3dnowa.pfnacc
    2, // llvm.x86.3dnowa.pfpnacc
    2, // llvm.x86.3dnowa.pi2fw
    2, // llvm.x86.3dnowa.pswapd
    240, // llvm.x86.aadd32
    240, // llvm.x86.aadd64
    240, // llvm.x86.aand32
    240, // llvm.x86.aand64
    2, // llvm.x86.addcarry.32
    2, // llvm.x86.addcarry.64
    10, // llvm.x86.aesdec128kl
    10, // llvm.x86.aesdec256kl
    10, // llvm.x86.aesdecwide128kl
    10, // llvm.x86.aesdecwide256kl
    10, // llvm.x86.aesenc128kl
    10, // llvm.x86.aesenc256kl
    10, // llvm.x86.aesencwide128kl
    10, // llvm.x86.aesencwide256kl
    2, // llvm.x86.aesni.aesdec
    2, // llvm.x86.aesni.aesdec.256
    2, // llvm.x86.aesni.aesdec.512
    2, // llvm.x86.aesni.aesdeclast
    2, // llvm.x86.aesni.aesdeclast.256
    2, // llvm.x86.aesni.aesdeclast.512
    2, // llvm.x86.aesni.aesenc
    2, // llvm.x86.aesni.aesenc.256
    2, // llvm.x86.aesni.aesenc.512
    2, // llvm.x86.aesni.aesenclast
    2, // llvm.x86.aesni.aesenclast.256
    2, // llvm.x86.aesni.aesenclast.512
    2, // llvm.x86.aesni.aesimc
    59, // llvm.x86.aesni.aeskeygenassist
    240, // llvm.x86.aor32
    240, // llvm.x86.aor64
    35, // llvm.x86.atomic.add.cc
    35, // llvm.x86.atomic.and.cc
    6, // llvm.x86.atomic.btc
    10, // llvm.x86.atomic.btc.rm
    6, // llvm.x86.atomic.btr
    10, // llvm.x86.atomic.btr.rm
    6, // llvm.x86.atomic.bts
    10, // llvm.x86.atomic.bts.rm
    35, // llvm.x86.atomic.or.cc
    35, // llvm.x86.atomic.sub.cc
    35, // llvm.x86.atomic.xor.cc
    2, // llvm.x86.avx.addsub.pd.256
    2, // llvm.x86.avx.addsub.ps.256
    2, // llvm.x86.avx.blendv.pd.256
    2, // llvm.x86.avx.blendv.ps.256
    22, // llvm.x86.avx.cmp.pd.256
    22, // llvm.x86.avx.cmp.ps.256
    2, // llvm.x86.avx.cvt.pd2.ps.256
    2, // llvm.x86.avx.cvt.pd2dq.256
    2, // llvm.x86.avx.cvt.ps2dq.256
    2, // llvm.x86.avx.cvtt.pd2dq.256
    2, // llvm.x86.avx.cvtt.ps2dq.256
    22, // llvm.x86.avx.dp.ps.256
    2, // llvm.x86.avx.hadd.pd.256
    2, // llvm.x86.avx.hadd.ps.256
    2, // llvm.x86.avx.hsub.pd.256
    2, // llvm.x86.avx.hsub.ps.256
    62, // llvm.x86.avx.ldu.dq.256
    3, // llvm.x86.avx.maskload.pd
    3, // llvm.x86.avx.maskload.pd.256
    3, // llvm.x86.avx.maskload.ps
    3, // llvm.x86.avx.maskload.ps.256
    240, // llvm.x86.avx.maskstore.pd
    240, // llvm.x86.avx.maskstore.pd.256
    240, // llvm.x86.avx.maskstore.ps
    240, // llvm.x86.avx.maskstore.ps.256
    2, // llvm.x86.avx.max.pd.256
    2, // llvm.x86.avx.max.ps.256
    2, // llvm.x86.avx.min.pd.256
    2, // llvm.x86.avx.min.ps.256
    2, // llvm.x86.avx.movmsk.pd.256
    2, // llvm.x86.avx.movmsk.ps.256
    2, // llvm.x86.avx.ptestc.256
    2, // llvm.x86.avx.ptestnzc.256
    2, // llvm.x86.avx.ptestz.256
    2, // llvm.x86.avx.rcp.ps.256
    59, // llvm.x86.avx.round.pd.256
    59, // llvm.x86.avx.round.ps.256
    2, // llvm.x86.avx.rsqrt.ps.256
    2, // llvm.x86.avx.vpermilvar.pd
    2, // llvm.x86.avx.vpermilvar.pd.256
    2, // llvm.x86.avx.vpermilvar.ps
    2, // llvm.x86.avx.vpermilvar.ps.256
    2, // llvm.x86.avx.vtestc.pd
    2, // llvm.x86.avx.vtestc.pd.256
    2, // llvm.x86.avx.vtestc.ps
    2, // llvm.x86.avx.vtestc.ps.256
    2, // llvm.x86.avx.vtestnzc.pd
    2, // llvm.x86.avx.vtestnzc.pd.256
    2, // llvm.x86.avx.vtestnzc.ps
    2, // llvm.x86.avx.vtestnzc.ps.256
    2, // llvm.x86.avx.vtestz.pd
    2, // llvm.x86.avx.vtestz.pd.256
    2, // llvm.x86.avx.vtestz.ps
    2, // llvm.x86.avx.vtestz.ps.256
    255, // llvm.x86.avx.vzeroall
    255, // llvm.x86.avx.vzeroupper
    188, // llvm.x86.avx2.gather.d.d
    188, // llvm.x86.avx2.gather.d.d.256
    188, // llvm.x86.avx2.gather.d.pd
    188, // llvm.x86.avx2.gather.d.pd.256
    188, // llvm.x86.avx2.gather.d.ps
    188, // llvm.x86.avx2.gather.d.ps.256
    188, // llvm.x86.avx2.gather.d.q
    188, // llvm.x86.avx2.gather.d.q.256
    188, // llvm.x86.avx2.gather.q.d
    188, // llvm.x86.avx2.gather.q.d.256
    188, // llvm.x86.avx2.gather.q.pd
    188, // llvm.x86.avx2.gather.q.pd.256
    188, // llvm.x86.avx2.gather.q.ps
    188, // llvm.x86.avx2.gather.q.ps.256
    188, // llvm.x86.avx2.gather.q.q
    188, // llvm.x86.avx2.gather.q.q.256
    3, // llvm.x86.avx2.maskload.d
    3, // llvm.x86.avx2.maskload.d.256
    3, // llvm.x86.avx2.maskload.q
    3, // llvm.x86.avx2.maskload.q.256
    240, // llvm.x86.avx2.maskstore.d
    240, // llvm.x86.avx2.maskstore.d.256
    240, // llvm.x86.avx2.maskstore.q
    240, // llvm.x86.avx2.maskstore.q.256
    22, // llvm.x86.avx2.mpsadbw
    2, // llvm.x86.avx2.packssdw
    2, // llvm.x86.avx2.packsswb
    2, // llvm.x86.avx2.packusdw
    2, // llvm.x86.avx2.packuswb
    2, // llvm.x86.avx2.pavg.b
    2, // llvm.x86.avx2.pavg.w
    2, // llvm.x86.avx2.pblendvb
    2, // llvm.x86.avx2.permd
    2, // llvm.x86.avx2.permps
    2, // llvm.x86.avx2.phadd.d
    2, // llvm.x86.avx2.phadd.sw
    2, // llvm.x86.avx2.phadd.w
    2, // llvm.x86.avx2.phsub.d
    2, // llvm.x86.avx2.phsub.sw
    2, // llvm.x86.avx2.phsub.w
    2, // llvm.x86.avx2.pmadd.ub.sw
    2, // llvm.x86.avx2.pmadd.wd
    2, // llvm.x86.avx2.pmovmskb
    2, // llvm.x86.avx2.pmul.hr.sw
    2, // llvm.x86.avx2.pmulh.w
    2, // llvm.x86.avx2.pmulhu.w
    2, // llvm.x86.avx2.psad.bw
    2, // llvm.x86.avx2.pshuf.b
    2, // llvm.x86.avx2.psign.b
    2, // llvm.x86.avx2.psign.d
    2, // llvm.x86.avx2.psign.w
    2, // llvm.x86.avx2.psll.d
    2, // llvm.x86.avx2.psll.q
    2, // llvm.x86.avx2.psll.w
    2, // llvm.x86.avx2.pslli.d
    2, // llvm.x86.avx2.pslli.q
    2, // llvm.x86.avx2.pslli.w
    2, // llvm.x86.avx2.psllv.d
    2, // llvm.x86.avx2.psllv.d.256
    2, // llvm.x86.avx2.psllv.q
    2, // llvm.x86.avx2.psllv.q.256
    2, // llvm.x86.avx2.psra.d
    2, // llvm.x86.avx2.psra.w
    2, // llvm.x86.avx2.psrai.d
    2, // llvm.x86.avx2.psrai.w
    2, // llvm.x86.avx2.psrav.d
    2, // llvm.x86.avx2.psrav.d.256
    2, // llvm.x86.avx2.psrl.d
    2, // llvm.x86.avx2.psrl.q
    2, // llvm.x86.avx2.psrl.w
    2, // llvm.x86.avx2.psrli.d
    2, // llvm.x86.avx2.psrli.q
    2, // llvm.x86.avx2.psrli.w
    2, // llvm.x86.avx2.psrlv.d
    2, // llvm.x86.avx2.psrlv.d.256
    2, // llvm.x86.avx2.psrlv.q
    2, // llvm.x86.avx2.psrlv.q.256
    2, // llvm.x86.avx2.vpdpbssd.128
    2, // llvm.x86.avx2.vpdpbssd.256
    2, // llvm.x86.avx2.vpdpbssds.128
    2, // llvm.x86.avx2.vpdpbssds.256
    2, // llvm.x86.avx2.vpdpbsud.128
    2, // llvm.x86.avx2.vpdpbsud.256
    2, // llvm.x86.avx2.vpdpbsuds.128
    2, // llvm.x86.avx2.vpdpbsuds.256
    2, // llvm.x86.avx2.vpdpbuud.128
    2, // llvm.x86.avx2.vpdpbuud.256
    2, // llvm.x86.avx2.vpdpbuuds.128
    2, // llvm.x86.avx2.vpdpbuuds.256
    2, // llvm.x86.avx2.vpdpwsud.128
    2, // llvm.x86.avx2.vpdpwsud.256
    2, // llvm.x86.avx2.vpdpwsuds.128
    2, // llvm.x86.avx2.vpdpwsuds.256
    2, // llvm.x86.avx2.vpdpwusd.128
    2, // llvm.x86.avx2.vpdpwusd.256
    2, // llvm.x86.avx2.vpdpwusds.128
    2, // llvm.x86.avx2.vpdpwusds.256
    2, // llvm.x86.avx2.vpdpwuud.128
    2, // llvm.x86.avx2.vpdpwuud.256
    2, // llvm.x86.avx2.vpdpwuuds.128
    2, // llvm.x86.avx2.vpdpwuuds.256
    22, // llvm.x86.avx512.add.pd.512
    22, // llvm.x86.avx512.add.ps.512
    2, // llvm.x86.avx512.broadcastmb.128
    2, // llvm.x86.avx512.broadcastmb.256
    2, // llvm.x86.avx512.broadcastmb.512
    2, // llvm.x86.avx512.broadcastmw.128
    2, // llvm.x86.avx512.broadcastmw.256
    2, // llvm.x86.avx512.broadcastmw.512
    2, // llvm.x86.avx512.conflict.d.128
    2, // llvm.x86.avx512.conflict.d.256
    2, // llvm.x86.avx512.conflict.d.512
    2, // llvm.x86.avx512.conflict.q.128
    2, // llvm.x86.avx512.conflict.q.256
    2, // llvm.x86.avx512.conflict.q.512
    22, // llvm.x86.avx512.cvtsi2sd64
    22, // llvm.x86.avx512.cvtsi2ss32
    22, // llvm.x86.avx512.cvtsi2ss64
    59, // llvm.x86.avx512.cvttsd2si
    59, // llvm.x86.avx512.cvttsd2si64
    59, // llvm.x86.avx512.cvttsd2usi
    59, // llvm.x86.avx512.cvttsd2usi64
    59, // llvm.x86.avx512.cvttss2si
    59, // llvm.x86.avx512.cvttss2si64
    59, // llvm.x86.avx512.cvttss2usi
    59, // llvm.x86.avx512.cvttss2usi64
    22, // llvm.x86.avx512.cvtusi2ss
    22, // llvm.x86.avx512.cvtusi642sd
    22, // llvm.x86.avx512.cvtusi642ss
    22, // llvm.x86.avx512.dbpsadbw.128
    22, // llvm.x86.avx512.dbpsadbw.256
    22, // llvm.x86.avx512.dbpsadbw.512
    22, // llvm.x86.avx512.div.pd.512
    22, // llvm.x86.avx512.div.ps.512
    89, // llvm.x86.avx512.exp2.pd
    89, // llvm.x86.avx512.exp2.ps
    59, // llvm.x86.avx512.fpclass.pd.128
    59, // llvm.x86.avx512.fpclass.pd.256
    59, // llvm.x86.avx512.fpclass.pd.512
    59, // llvm.x86.avx512.fpclass.ps.128
    59, // llvm.x86.avx512.fpclass.ps.256
    59, // llvm.x86.avx512.fpclass.ps.512
    188, // llvm.x86.avx512.gather.dpd.512
    188, // llvm.x86.avx512.gather.dpi.512
    188, // llvm.x86.avx512.gather.dpq.512
    188, // llvm.x86.avx512.gather.dps.512
    188, // llvm.x86.avx512.gather.qpd.512
    188, // llvm.x86.avx512.gather.qpi.512
    188, // llvm.x86.avx512.gather.qpq.512
    188, // llvm.x86.avx512.gather.qps.512
    188, // llvm.x86.avx512.gather3div2.df
    188, // llvm.x86.avx512.gather3div2.di
    188, // llvm.x86.avx512.gather3div4.df
    188, // llvm.x86.avx512.gather3div4.di
    188, // llvm.x86.avx512.gather3div4.sf
    188, // llvm.x86.avx512.gather3div4.si
    188, // llvm.x86.avx512.gather3div8.sf
    188, // llvm.x86.avx512.gather3div8.si
    188, // llvm.x86.avx512.gather3siv2.df
    188, // llvm.x86.avx512.gather3siv2.di
    188, // llvm.x86.avx512.gather3siv4.df
    188, // llvm.x86.avx512.gather3siv4.di
    188, // llvm.x86.avx512.gather3siv4.sf
    188, // llvm.x86.avx512.gather3siv4.si
    188, // llvm.x86.avx512.gather3siv8.sf
    188, // llvm.x86.avx512.gather3siv8.si
    345, // llvm.x86.avx512.gatherpf.dpd.512
    345, // llvm.x86.avx512.gatherpf.dps.512
    345, // llvm.x86.avx512.gatherpf.qpd.512
    345, // llvm.x86.avx512.gatherpf.qps.512
    2, // llvm.x86.avx512.kadd.b
    2, // llvm.x86.avx512.kadd.d
    2, // llvm.x86.avx512.kadd.q
    2, // llvm.x86.avx512.kadd.w
    2, // llvm.x86.avx512.ktestc.b
    2, // llvm.x86.avx512.ktestc.d
    2, // llvm.x86.avx512.ktestc.q
    2, // llvm.x86.avx512.ktestc.w
    2, // llvm.x86.avx512.ktestz.b
    2, // llvm.x86.avx512.ktestz.d
    2, // llvm.x86.avx512.ktestz.q
    2, // llvm.x86.avx512.ktestz.w
    91, // llvm.x86.avx512.mask.add.sd.round
    91, // llvm.x86.avx512.mask.add.ss.round
    22, // llvm.x86.avx512.mask.cmp.pd.128
    22, // llvm.x86.avx512.mask.cmp.pd.256
    346, // llvm.x86.avx512.mask.cmp.pd.512
    22, // llvm.x86.avx512.mask.cmp.ps.128
    22, // llvm.x86.avx512.mask.cmp.ps.256
    346, // llvm.x86.avx512.mask.cmp.ps.512
    346, // llvm.x86.avx512.mask.cmp.sd
    346, // llvm.x86.avx512.mask.cmp.ss
    2, // llvm.x86.avx512.mask.compress
    2, // llvm.x86.avx512.mask.cvtpd2dq.128
    89, // llvm.x86.avx512.mask.cvtpd2dq.512
    2, // llvm.x86.avx512.mask.cvtpd2ps
    89, // llvm.x86.avx512.mask.cvtpd2ps.512
    2, // llvm.x86.avx512.mask.cvtpd2qq.128
    2, // llvm.x86.avx512.mask.cvtpd2qq.256
    89, // llvm.x86.avx512.mask.cvtpd2qq.512
    2, // llvm.x86.avx512.mask.cvtpd2udq.128
    2, // llvm.x86.avx512.mask.cvtpd2udq.256
    89, // llvm.x86.avx512.mask.cvtpd2udq.512
    2, // llvm.x86.avx512.mask.cvtpd2uqq.128
    2, // llvm.x86.avx512.mask.cvtpd2uqq.256
    89, // llvm.x86.avx512.mask.cvtpd2uqq.512
    2, // llvm.x86.avx512.mask.cvtps2dq.128
    2, // llvm.x86.avx512.mask.cvtps2dq.256
    89, // llvm.x86.avx512.mask.cvtps2dq.512
    89, // llvm.x86.avx512.mask.cvtps2pd.512
    2, // llvm.x86.avx512.mask.cvtps2qq.128
    2, // llvm.x86.avx512.mask.cvtps2qq.256
    89, // llvm.x86.avx512.mask.cvtps2qq.512
    2, // llvm.x86.avx512.mask.cvtps2udq.128
    2, // llvm.x86.avx512.mask.cvtps2udq.256
    89, // llvm.x86.avx512.mask.cvtps2udq.512
    2, // llvm.x86.avx512.mask.cvtps2uqq.128
    2, // llvm.x86.avx512.mask.cvtps2uqq.256
    89, // llvm.x86.avx512.mask.cvtps2uqq.512
    2, // llvm.x86.avx512.mask.cvtqq2ps.128
    91, // llvm.x86.avx512.mask.cvtsd2ss.round
    91, // llvm.x86.avx512.mask.cvtss2sd.round
    2, // llvm.x86.avx512.mask.cvttpd2dq.128
    89, // llvm.x86.avx512.mask.cvttpd2dq.512
    2, // llvm.x86.avx512.mask.cvttpd2qq.128
    2, // llvm.x86.avx512.mask.cvttpd2qq.256
    89, // llvm.x86.avx512.mask.cvttpd2qq.512
    2, // llvm.x86.avx512.mask.cvttpd2udq.128
    2, // llvm.x86.avx512.mask.cvttpd2udq.256
    89, // llvm.x86.avx512.mask.cvttpd2udq.512
    2, // llvm.x86.avx512.mask.cvttpd2uqq.128
    2, // llvm.x86.avx512.mask.cvttpd2uqq.256
    89, // llvm.x86.avx512.mask.cvttpd2uqq.512
    89, // llvm.x86.avx512.mask.cvttps2dq.512
    2, // llvm.x86.avx512.mask.cvttps2qq.128
    2, // llvm.x86.avx512.mask.cvttps2qq.256
    89, // llvm.x86.avx512.mask.cvttps2qq.512
    2, // llvm.x86.avx512.mask.cvttps2udq.128
    2, // llvm.x86.avx512.mask.cvttps2udq.256
    89, // llvm.x86.avx512.mask.cvttps2udq.512
    2, // llvm.x86.avx512.mask.cvttps2uqq.128
    2, // llvm.x86.avx512.mask.cvttps2uqq.256
    89, // llvm.x86.avx512.mask.cvttps2uqq.512
    2, // llvm.x86.avx512.mask.cvtuqq2ps.128
    91, // llvm.x86.avx512.mask.div.sd.round
    91, // llvm.x86.avx512.mask.div.ss.round
    2, // llvm.x86.avx512.mask.expand
    89, // llvm.x86.avx512.mask.fixupimm.pd.128
    89, // llvm.x86.avx512.mask.fixupimm.pd.256
    289, // llvm.x86.avx512.mask.fixupimm.pd.512
    89, // llvm.x86.avx512.mask.fixupimm.ps.128
    89, // llvm.x86.avx512.mask.fixupimm.ps.256
    289, // llvm.x86.avx512.mask.fixupimm.ps.512
    289, // llvm.x86.avx512.mask.fixupimm.sd
    289, // llvm.x86.avx512.mask.fixupimm.ss
    59, // llvm.x86.avx512.mask.fpclass.sd
    59, // llvm.x86.avx512.mask.fpclass.ss
    188, // llvm.x86.avx512.mask.gather.dpd.512
    188, // llvm.x86.avx512.mask.gather.dpi.512
    188, // llvm.x86.avx512.mask.gather.dpq.512
    188, // llvm.x86.avx512.mask.gather.dps.512
    188, // llvm.x86.avx512.mask.gather.qpd.512
    188, // llvm.x86.avx512.mask.gather.qpi.512
    188, // llvm.x86.avx512.mask.gather.qpq.512
    188, // llvm.x86.avx512.mask.gather.qps.512
    188, // llvm.x86.avx512.mask.gather3div2.df
    188, // llvm.x86.avx512.mask.gather3div2.di
    188, // llvm.x86.avx512.mask.gather3div4.df
    188, // llvm.x86.avx512.mask.gather3div4.di
    188, // llvm.x86.avx512.mask.gather3div4.sf
    188, // llvm.x86.avx512.mask.gather3div4.si
    188, // llvm.x86.avx512.mask.gather3div8.sf
    188, // llvm.x86.avx512.mask.gather3div8.si
    188, // llvm.x86.avx512.mask.gather3siv2.df
    188, // llvm.x86.avx512.mask.gather3siv2.di
    188, // llvm.x86.avx512.mask.gather3siv4.df
    188, // llvm.x86.avx512.mask.gather3siv4.di
    188, // llvm.x86.avx512.mask.gather3siv4.sf
    188, // llvm.x86.avx512.mask.gather3siv4.si
    188, // llvm.x86.avx512.mask.gather3siv8.sf
    188, // llvm.x86.avx512.mask.gather3siv8.si
    2, // llvm.x86.avx512.mask.getexp.pd.128
    2, // llvm.x86.avx512.mask.getexp.pd.256
    89, // llvm.x86.avx512.mask.getexp.pd.512
    2, // llvm.x86.avx512.mask.getexp.ps.128
    2, // llvm.x86.avx512.mask.getexp.ps.256
    89, // llvm.x86.avx512.mask.getexp.ps.512
    91, // llvm.x86.avx512.mask.getexp.sd
    91, // llvm.x86.avx512.mask.getexp.ss
    59, // llvm.x86.avx512.mask.getmant.pd.128
    59, // llvm.x86.avx512.mask.getmant.pd.256
    347, // llvm.x86.avx512.mask.getmant.pd.512
    59, // llvm.x86.avx512.mask.getmant.ps.128
    59, // llvm.x86.avx512.mask.getmant.ps.256
    347, // llvm.x86.avx512.mask.getmant.ps.512
    348, // llvm.x86.avx512.mask.getmant.sd
    348, // llvm.x86.avx512.mask.getmant.ss
    91, // llvm.x86.avx512.mask.max.sd.round
    91, // llvm.x86.avx512.mask.max.ss.round
    91, // llvm.x86.avx512.mask.min.sd.round
    91, // llvm.x86.avx512.mask.min.ss.round
    91, // llvm.x86.avx512.mask.mul.sd.round
    91, // llvm.x86.avx512.mask.mul.ss.round
    2, // llvm.x86.avx512.mask.pmov.db.128
    2, // llvm.x86.avx512.mask.pmov.db.256
    2, // llvm.x86.avx512.mask.pmov.db.512
    219, // llvm.x86.avx512.mask.pmov.db.mem.128
    219, // llvm.x86.avx512.mask.pmov.db.mem.256
    219, // llvm.x86.avx512.mask.pmov.db.mem.512
    2, // llvm.x86.avx512.mask.pmov.dw.128
    2, // llvm.x86.avx512.mask.pmov.dw.256
    2, // llvm.x86.avx512.mask.pmov.dw.512
    219, // llvm.x86.avx512.mask.pmov.dw.mem.128
    219, // llvm.x86.avx512.mask.pmov.dw.mem.256
    219, // llvm.x86.avx512.mask.pmov.dw.mem.512
    2, // llvm.x86.avx512.mask.pmov.qb.128
    2, // llvm.x86.avx512.mask.pmov.qb.256
    2, // llvm.x86.avx512.mask.pmov.qb.512
    219, // llvm.x86.avx512.mask.pmov.qb.mem.128
    219, // llvm.x86.avx512.mask.pmov.qb.mem.256
    219, // llvm.x86.avx512.mask.pmov.qb.mem.512
    2, // llvm.x86.avx512.mask.pmov.qd.128
    219, // llvm.x86.avx512.mask.pmov.qd.mem.128
    219, // llvm.x86.avx512.mask.pmov.qd.mem.256
    219, // llvm.x86.avx512.mask.pmov.qd.mem.512
    2, // llvm.x86.avx512.mask.pmov.qw.128
    2, // llvm.x86.avx512.mask.pmov.qw.256
    2, // llvm.x86.avx512.mask.pmov.qw.512
    219, // llvm.x86.avx512.mask.pmov.qw.mem.128
    219, // llvm.x86.avx512.mask.pmov.qw.mem.256
    219, // llvm.x86.avx512.mask.pmov.qw.mem.512
    2, // llvm.x86.avx512.mask.pmov.wb.128
    219, // llvm.x86.avx512.mask.pmov.wb.mem.128
    219, // llvm.x86.avx512.mask.pmov.wb.mem.256
    219, // llvm.x86.avx512.mask.pmov.wb.mem.512
    2, // llvm.x86.avx512.mask.pmovs.db.128
    2, // llvm.x86.avx512.mask.pmovs.db.256
    2, // llvm.x86.avx512.mask.pmovs.db.512
    219, // llvm.x86.avx512.mask.pmovs.db.mem.128
    219, // llvm.x86.avx512.mask.pmovs.db.mem.256
    219, // llvm.x86.avx512.mask.pmovs.db.mem.512
    2, // llvm.x86.avx512.mask.pmovs.dw.128
    2, // llvm.x86.avx512.mask.pmovs.dw.256
    2, // llvm.x86.avx512.mask.pmovs.dw.512
    219, // llvm.x86.avx512.mask.pmovs.dw.mem.128
    219, // llvm.x86.avx512.mask.pmovs.dw.mem.256
    219, // llvm.x86.avx512.mask.pmovs.dw.mem.512
    2, // llvm.x86.avx512.mask.pmovs.qb.128
    2, // llvm.x86.avx512.mask.pmovs.qb.256
    2, // llvm.x86.avx512.mask.pmovs.qb.512
    219, // llvm.x86.avx512.mask.pmovs.qb.mem.128
    219, // llvm.x86.avx512.mask.pmovs.qb.mem.256
    219, // llvm.x86.avx512.mask.pmovs.qb.mem.512
    2, // llvm.x86.avx512.mask.pmovs.qd.128
    2, // llvm.x86.avx512.mask.pmovs.qd.256
    2, // llvm.x86.avx512.mask.pmovs.qd.512
    219, // llvm.x86.avx512.mask.pmovs.qd.mem.128
    219, // llvm.x86.avx512.mask.pmovs.qd.mem.256
    219, // llvm.x86.avx512.mask.pmovs.qd.mem.512
    2, // llvm.x86.avx512.mask.pmovs.qw.128
    2, // llvm.x86.avx512.mask.pmovs.qw.256
    2, // llvm.x86.avx512.mask.pmovs.qw.512
    219, // llvm.x86.avx512.mask.pmovs.qw.mem.128
    219, // llvm.x86.avx512.mask.pmovs.qw.mem.256
    219, // llvm.x86.avx512.mask.pmovs.qw.mem.512
    2, // llvm.x86.avx512.mask.pmovs.wb.128
    2, // llvm.x86.avx512.mask.pmovs.wb.256
    2, // llvm.x86.avx512.mask.pmovs.wb.512
    219, // llvm.x86.avx512.mask.pmovs.wb.mem.128
    219, // llvm.x86.avx512.mask.pmovs.wb.mem.256
    219, // llvm.x86.avx512.mask.pmovs.wb.mem.512
    2, // llvm.x86.avx512.mask.pmovus.db.128
    2, // llvm.x86.avx512.mask.pmovus.db.256
    2, // llvm.x86.avx512.mask.pmovus.db.512
    219, // llvm.x86.avx512.mask.pmovus.db.mem.128
    219, // llvm.x86.avx512.mask.pmovus.db.mem.256
    219, // llvm.x86.avx512.mask.pmovus.db.mem.512
    2, // llvm.x86.avx512.mask.pmovus.dw.128
    2, // llvm.x86.avx512.mask.pmovus.dw.256
    2, // llvm.x86.avx512.mask.pmovus.dw.512
    219, // llvm.x86.avx512.mask.pmovus.dw.mem.128
    219, // llvm.x86.avx512.mask.pmovus.dw.mem.256
    219, // llvm.x86.avx512.mask.pmovus.dw.mem.512
    2, // llvm.x86.avx512.mask.pmovus.qb.128
    2, // llvm.x86.avx512.mask.pmovus.qb.256
    2, // llvm.x86.avx512.mask.pmovus.qb.512
    219, // llvm.x86.avx512.mask.pmovus.qb.mem.128
    219, // llvm.x86.avx512.mask.pmovus.qb.mem.256
    219, // llvm.x86.avx512.mask.pmovus.qb.mem.512
    2, // llvm.x86.avx512.mask.pmovus.qd.128
    2, // llvm.x86.avx512.mask.pmovus.qd.256
    2, // llvm.x86.avx512.mask.pmovus.qd.512
    219, // llvm.x86.avx512.mask.pmovus.qd.mem.128
    219, // llvm.x86.avx512.mask.pmovus.qd.mem.256
    219, // llvm.x86.avx512.mask.pmovus.qd.mem.512
    2, // llvm.x86.avx512.mask.pmovus.qw.128
    2, // llvm.x86.avx512.mask.pmovus.qw.256
    2, // llvm.x86.avx512.mask.pmovus.qw.512
    219, // llvm.x86.avx512.mask.pmovus.qw.mem.128
    219, // llvm.x86.avx512.mask.pmovus.qw.mem.256
    219, // llvm.x86.avx512.mask.pmovus.qw.mem.512
    2, // llvm.x86.avx512.mask.pmovus.wb.128
    2, // llvm.x86.avx512.mask.pmovus.wb.256
    2, // llvm.x86.avx512.mask.pmovus.wb.512
    219, // llvm.x86.avx512.mask.pmovus.wb.mem.128
    219, // llvm.x86.avx512.mask.pmovus.wb.mem.256
    219, // llvm.x86.avx512.mask.pmovus.wb.mem.512
    22, // llvm.x86.avx512.mask.range.pd.128
    22, // llvm.x86.avx512.mask.range.pd.256
    348, // llvm.x86.avx512.mask.range.pd.512
    22, // llvm.x86.avx512.mask.range.ps.128
    22, // llvm.x86.avx512.mask.range.ps.256
    348, // llvm.x86.avx512.mask.range.ps.512
    349, // llvm.x86.avx512.mask.range.sd
    349, // llvm.x86.avx512.mask.range.ss
    59, // llvm.x86.avx512.mask.reduce.pd.128
    59, // llvm.x86.avx512.mask.reduce.pd.256
    347, // llvm.x86.avx512.mask.reduce.pd.512
    59, // llvm.x86.avx512.mask.reduce.ps.128
    59, // llvm.x86.avx512.mask.reduce.ps.256
    347, // llvm.x86.avx512.mask.reduce.ps.512
    349, // llvm.x86.avx512.mask.reduce.sd
    349, // llvm.x86.avx512.mask.reduce.ss
    59, // llvm.x86.avx512.mask.rndscale.pd.128
    59, // llvm.x86.avx512.mask.rndscale.pd.256
    347, // llvm.x86.avx512.mask.rndscale.pd.512
    59, // llvm.x86.avx512.mask.rndscale.ps.128
    59, // llvm.x86.avx512.mask.rndscale.ps.256
    347, // llvm.x86.avx512.mask.rndscale.ps.512
    349, // llvm.x86.avx512.mask.rndscale.sd
    349, // llvm.x86.avx512.mask.rndscale.ss
    2, // llvm.x86.avx512.mask.scalef.pd.128
    2, // llvm.x86.avx512.mask.scalef.pd.256
    91, // llvm.x86.avx512.mask.scalef.pd.512
    2, // llvm.x86.avx512.mask.scalef.ps.128
    2, // llvm.x86.avx512.mask.scalef.ps.256
    91, // llvm.x86.avx512.mask.scalef.ps.512
    91, // llvm.x86.avx512.mask.scalef.sd
    91, // llvm.x86.avx512.mask.scalef.ss
    350, // llvm.x86.avx512.mask.scatter.dpd.512
    350, // llvm.x86.avx512.mask.scatter.dpi.512
    350, // llvm.x86.avx512.mask.scatter.dpq.512
    350, // llvm.x86.avx512.mask.scatter.dps.512
    350, // llvm.x86.avx512.mask.scatter.qpd.512
    350, // llvm.x86.avx512.mask.scatter.qpi.512
    350, // llvm.x86.avx512.mask.scatter.qpq.512
    350, // llvm.x86.avx512.mask.scatter.qps.512
    350, // llvm.x86.avx512.mask.scatterdiv2.df
    350, // llvm.x86.avx512.mask.scatterdiv2.di
    350, // llvm.x86.avx512.mask.scatterdiv4.df
    350, // llvm.x86.avx512.mask.scatterdiv4.di
    350, // llvm.x86.avx512.mask.scatterdiv4.sf
    350, // llvm.x86.avx512.mask.scatterdiv4.si
    350, // llvm.x86.avx512.mask.scatterdiv8.sf
    350, // llvm.x86.avx512.mask.scatterdiv8.si
    350, // llvm.x86.avx512.mask.scattersiv2.df
    350, // llvm.x86.avx512.mask.scattersiv2.di
    350, // llvm.x86.avx512.mask.scattersiv4.df
    350, // llvm.x86.avx512.mask.scattersiv4.di
    350, // llvm.x86.avx512.mask.scattersiv4.sf
    350, // llvm.x86.avx512.mask.scattersiv4.si
    350, // llvm.x86.avx512.mask.scattersiv8.sf
    350, // llvm.x86.avx512.mask.scattersiv8.si
    91, // llvm.x86.avx512.mask.sqrt.sd
    91, // llvm.x86.avx512.mask.sqrt.ss
    91, // llvm.x86.avx512.mask.sub.sd.round
    91, // llvm.x86.avx512.mask.sub.ss.round
    89, // llvm.x86.avx512.mask.vcvtph2ps.512
    59, // llvm.x86.avx512.mask.vcvtps2ph.128
    59, // llvm.x86.avx512.mask.vcvtps2ph.256
    59, // llvm.x86.avx512.mask.vcvtps2ph.512
    89, // llvm.x86.avx512.maskz.fixupimm.pd.128
    89, // llvm.x86.avx512.maskz.fixupimm.pd.256
    289, // llvm.x86.avx512.maskz.fixupimm.pd.512
    89, // llvm.x86.avx512.maskz.fixupimm.ps.128
    89, // llvm.x86.avx512.maskz.fixupimm.ps.256
    289, // llvm.x86.avx512.maskz.fixupimm.ps.512
    289, // llvm.x86.avx512.maskz.fixupimm.sd
    289, // llvm.x86.avx512.maskz.fixupimm.ss
    22, // llvm.x86.avx512.max.pd.512
    22, // llvm.x86.avx512.max.ps.512
    22, // llvm.x86.avx512.min.pd.512
    22, // llvm.x86.avx512.min.ps.512
    22, // llvm.x86.avx512.mul.pd.512
    22, // llvm.x86.avx512.mul.ps.512
    2, // llvm.x86.avx512.packssdw.512
    2, // llvm.x86.avx512.packsswb.512
    2, // llvm.x86.avx512.packusdw.512
    2, // llvm.x86.avx512.packuswb.512
    2, // llvm.x86.avx512.pavg.b.512
    2, // llvm.x86.avx512.pavg.w.512
    2, // llvm.x86.avx512.permvar.df.256
    2, // llvm.x86.avx512.permvar.df.512
    2, // llvm.x86.avx512.permvar.di.256
    2, // llvm.x86.avx512.permvar.di.512
    2, // llvm.x86.avx512.permvar.hi.128
    2, // llvm.x86.avx512.permvar.hi.256
    2, // llvm.x86.avx512.permvar.hi.512
    2, // llvm.x86.avx512.permvar.qi.128
    2, // llvm.x86.avx512.permvar.qi.256
    2, // llvm.x86.avx512.permvar.qi.512
    2, // llvm.x86.avx512.permvar.sf.512
    2, // llvm.x86.avx512.permvar.si.512
    2, // llvm.x86.avx512.pmaddubs.w.512
    2, // llvm.x86.avx512.pmaddw.d.512
    2, // llvm.x86.avx512.pmul.hr.sw.512
    2, // llvm.x86.avx512.pmulh.w.512
    2, // llvm.x86.avx512.pmulhu.w.512
    2, // llvm.x86.avx512.pmultishift.qb.128
    2, // llvm.x86.avx512.pmultishift.qb.256
    2, // llvm.x86.avx512.pmultishift.qb.512
    2, // llvm.x86.avx512.psad.bw.512
    2, // llvm.x86.avx512.pshuf.b.512
    2, // llvm.x86.avx512.psll.d.512
    2, // llvm.x86.avx512.psll.q.512
    2, // llvm.x86.avx512.psll.w.512
    2, // llvm.x86.avx512.pslli.d.512
    2, // llvm.x86.avx512.pslli.q.512
    2, // llvm.x86.avx512.pslli.w.512
    2, // llvm.x86.avx512.psllv.d.512
    2, // llvm.x86.avx512.psllv.q.512
    2, // llvm.x86.avx512.psllv.w.128
    2, // llvm.x86.avx512.psllv.w.256
    2, // llvm.x86.avx512.psllv.w.512
    2, // llvm.x86.avx512.psra.d.512
    2, // llvm.x86.avx512.psra.q.128
    2, // llvm.x86.avx512.psra.q.256
    2, // llvm.x86.avx512.psra.q.512
    2, // llvm.x86.avx512.psra.w.512
    2, // llvm.x86.avx512.psrai.d.512
    2, // llvm.x86.avx512.psrai.q.128
    2, // llvm.x86.avx512.psrai.q.256
    2, // llvm.x86.avx512.psrai.q.512
    2, // llvm.x86.avx512.psrai.w.512
    2, // llvm.x86.avx512.psrav.d.512
    2, // llvm.x86.avx512.psrav.q.128
    2, // llvm.x86.avx512.psrav.q.256
    2, // llvm.x86.avx512.psrav.q.512
    2, // llvm.x86.avx512.psrav.w.128
    2, // llvm.x86.avx512.psrav.w.256
    2, // llvm.x86.avx512.psrav.w.512
    2, // llvm.x86.avx512.psrl.d.512
    2, // llvm.x86.avx512.psrl.q.512
    2, // llvm.x86.avx512.psrl.w.512
    2, // llvm.x86.avx512.psrli.d.512
    2, // llvm.x86.avx512.psrli.q.512
    2, // llvm.x86.avx512.psrli.w.512
    2, // llvm.x86.avx512.psrlv.d.512
    2, // llvm.x86.avx512.psrlv.q.512
    2, // llvm.x86.avx512.psrlv.w.128
    2, // llvm.x86.avx512.psrlv.w.256
    2, // llvm.x86.avx512.psrlv.w.512
    89, // llvm.x86.avx512.pternlog.d.128
    89, // llvm.x86.avx512.pternlog.d.256
    89, // llvm.x86.avx512.pternlog.d.512
    89, // llvm.x86.avx512.pternlog.q.128
    89, // llvm.x86.avx512.pternlog.q.256
    89, // llvm.x86.avx512.pternlog.q.512
    2, // llvm.x86.avx512.rcp14.pd.128
    2, // llvm.x86.avx512.rcp14.pd.256
    2, // llvm.x86.avx512.rcp14.pd.512
    2, // llvm.x86.avx512.rcp14.ps.128
    2, // llvm.x86.avx512.rcp14.ps.256
    2, // llvm.x86.avx512.rcp14.ps.512
    2, // llvm.x86.avx512.rcp14.sd
    2, // llvm.x86.avx512.rcp14.ss
    89, // llvm.x86.avx512.rcp28.pd
    89, // llvm.x86.avx512.rcp28.ps
    91, // llvm.x86.avx512.rcp28.sd
    91, // llvm.x86.avx512.rcp28.ss
    2, // llvm.x86.avx512.rsqrt14.pd.128
    2, // llvm.x86.avx512.rsqrt14.pd.256
    2, // llvm.x86.avx512.rsqrt14.pd.512
    2, // llvm.x86.avx512.rsqrt14.ps.128
    2, // llvm.x86.avx512.rsqrt14.ps.256
    2, // llvm.x86.avx512.rsqrt14.ps.512
    2, // llvm.x86.avx512.rsqrt14.sd
    2, // llvm.x86.avx512.rsqrt14.ss
    89, // llvm.x86.avx512.rsqrt28.pd
    89, // llvm.x86.avx512.rsqrt28.ps
    91, // llvm.x86.avx512.rsqrt28.sd
    91, // llvm.x86.avx512.rsqrt28.ss
    350, // llvm.x86.avx512.scatter.dpd.512
    350, // llvm.x86.avx512.scatter.dpi.512
    350, // llvm.x86.avx512.scatter.dpq.512
    350, // llvm.x86.avx512.scatter.dps.512
    350, // llvm.x86.avx512.scatter.qpd.512
    350, // llvm.x86.avx512.scatter.qpi.512
    350, // llvm.x86.avx512.scatter.qpq.512
    350, // llvm.x86.avx512.scatter.qps.512
    350, // llvm.x86.avx512.scatterdiv2.df
    350, // llvm.x86.avx512.scatterdiv2.di
    350, // llvm.x86.avx512.scatterdiv4.df
    350, // llvm.x86.avx512.scatterdiv4.di
    350, // llvm.x86.avx512.scatterdiv4.sf
    350, // llvm.x86.avx512.scatterdiv4.si
    350, // llvm.x86.avx512.scatterdiv8.sf
    350, // llvm.x86.avx512.scatterdiv8.si
    345, // llvm.x86.avx512.scatterpf.dpd.512
    345, // llvm.x86.avx512.scatterpf.dps.512
    345, // llvm.x86.avx512.scatterpf.qpd.512
    345, // llvm.x86.avx512.scatterpf.qps.512
    350, // llvm.x86.avx512.scattersiv2.df
    350, // llvm.x86.avx512.scattersiv2.di
    350, // llvm.x86.avx512.scattersiv4.df
    350, // llvm.x86.avx512.scattersiv4.di
    350, // llvm.x86.avx512.scattersiv4.sf
    350, // llvm.x86.avx512.scattersiv4.si
    350, // llvm.x86.avx512.scattersiv8.sf
    350, // llvm.x86.avx512.scattersiv8.si
    59, // llvm.x86.avx512.sitofp.round
    59, // llvm.x86.avx512.sqrt.pd.512
    59, // llvm.x86.avx512.sqrt.ps.512
    22, // llvm.x86.avx512.sub.pd.512
    22, // llvm.x86.avx512.sub.ps.512
    59, // llvm.x86.avx512.uitofp.round
    227, // llvm.x86.avx512.vcomi.sd
    227, // llvm.x86.avx512.vcomi.ss
    59, // llvm.x86.avx512.vcvtsd2si32
    59, // llvm.x86.avx512.vcvtsd2si64
    59, // llvm.x86.avx512.vcvtsd2usi32
    59, // llvm.x86.avx512.vcvtsd2usi64
    59, // llvm.x86.avx512.vcvtss2si32
    59, // llvm.x86.avx512.vcvtss2si64
    59, // llvm.x86.avx512.vcvtss2usi32
    59, // llvm.x86.avx512.vcvtss2usi64
    89, // llvm.x86.avx512.vfmadd.f32
    89, // llvm.x86.avx512.vfmadd.f64
    89, // llvm.x86.avx512.vfmadd.pd.512
    89, // llvm.x86.avx512.vfmadd.ps.512
    89, // llvm.x86.avx512.vfmaddsub.pd.512
    89, // llvm.x86.avx512.vfmaddsub.ps.512
    2, // llvm.x86.avx512.vp2intersect.d.128
    2, // llvm.x86.avx512.vp2intersect.d.256
    2, // llvm.x86.avx512.vp2intersect.d.512
    2, // llvm.x86.avx512.vp2intersect.q.128
    2, // llvm.x86.avx512.vp2intersect.q.256
    2, // llvm.x86.avx512.vp2intersect.q.512
    2, // llvm.x86.avx512.vpdpbusd.128
    2, // llvm.x86.avx512.vpdpbusd.256
    2, // llvm.x86.avx512.vpdpbusd.512
    2, // llvm.x86.avx512.vpdpbusds.128
    2, // llvm.x86.avx512.vpdpbusds.256
    2, // llvm.x86.avx512.vpdpbusds.512
    2, // llvm.x86.avx512.vpdpwssd.128
    2, // llvm.x86.avx512.vpdpwssd.256
    2, // llvm.x86.avx512.vpdpwssd.512
    2, // llvm.x86.avx512.vpdpwssds.128
    2, // llvm.x86.avx512.vpdpwssds.256
    2, // llvm.x86.avx512.vpdpwssds.512
    2, // llvm.x86.avx512.vpermi2var.d.128
    2, // llvm.x86.avx512.vpermi2var.d.256
    2, // llvm.x86.avx512.vpermi2var.d.512
    2, // llvm.x86.avx512.vpermi2var.hi.128
    2, // llvm.x86.avx512.vpermi2var.hi.256
    2, // llvm.x86.avx512.vpermi2var.hi.512
    2, // llvm.x86.avx512.vpermi2var.pd.128
    2, // llvm.x86.avx512.vpermi2var.pd.256
    2, // llvm.x86.avx512.vpermi2var.pd.512
    2, // llvm.x86.avx512.vpermi2var.ps.128
    2, // llvm.x86.avx512.vpermi2var.ps.256
    2, // llvm.x86.avx512.vpermi2var.ps.512
    2, // llvm.x86.avx512.vpermi2var.q.128
    2, // llvm.x86.avx512.vpermi2var.q.256
    2, // llvm.x86.avx512.vpermi2var.q.512
    2, // llvm.x86.avx512.vpermi2var.qi.128
    2, // llvm.x86.avx512.vpermi2var.qi.256
    2, // llvm.x86.avx512.vpermi2var.qi.512
    2, // llvm.x86.avx512.vpermilvar.pd.512
    2, // llvm.x86.avx512.vpermilvar.ps.512
    2, // llvm.x86.avx512.vpmadd52h.uq.128
    2, // llvm.x86.avx512.vpmadd52h.uq.256
    2, // llvm.x86.avx512.vpmadd52h.uq.512
    2, // llvm.x86.avx512.vpmadd52l.uq.128
    2, // llvm.x86.avx512.vpmadd52l.uq.256
    2, // llvm.x86.avx512.vpmadd52l.uq.512
    2, // llvm.x86.avx512.vpshufbitqmb.128
    2, // llvm.x86.avx512.vpshufbitqmb.256
    2, // llvm.x86.avx512.vpshufbitqmb.512
    2, // llvm.x86.avx512bf16.cvtne2ps2bf16.128
    2, // llvm.x86.avx512bf16.cvtne2ps2bf16.256
    2, // llvm.x86.avx512bf16.cvtne2ps2bf16.512
    2, // llvm.x86.avx512bf16.cvtneps2bf16.256
    2, // llvm.x86.avx512bf16.cvtneps2bf16.512
    2, // llvm.x86.avx512bf16.dpbf16ps.128
    2, // llvm.x86.avx512bf16.dpbf16ps.256
    2, // llvm.x86.avx512bf16.dpbf16ps.512
    2, // llvm.x86.avx512bf16.mask.cvtneps2bf16.128
    22, // llvm.x86.avx512fp16.add.ph.512
    22, // llvm.x86.avx512fp16.div.ph.512
    59, // llvm.x86.avx512fp16.fpclass.ph.128
    59, // llvm.x86.avx512fp16.fpclass.ph.256
    59, // llvm.x86.avx512fp16.fpclass.ph.512
    91, // llvm.x86.avx512fp16.mask.add.sh.round
    22, // llvm.x86.avx512fp16.mask.cmp.ph.128
    22, // llvm.x86.avx512fp16.mask.cmp.ph.256
    346, // llvm.x86.avx512fp16.mask.cmp.ph.512
    346, // llvm.x86.avx512fp16.mask.cmp.sh
    91, // llvm.x86.avx512fp16.mask.div.sh.round
    59, // llvm.x86.avx512fp16.mask.fpclass.sh
    2, // llvm.x86.avx512fp16.mask.getexp.ph.128
    2, // llvm.x86.avx512fp16.mask.getexp.ph.256
    89, // llvm.x86.avx512fp16.mask.getexp.ph.512
    91, // llvm.x86.avx512fp16.mask.getexp.sh
    59, // llvm.x86.avx512fp16.mask.getmant.ph.128
    59, // llvm.x86.avx512fp16.mask.getmant.ph.256
    347, // llvm.x86.avx512fp16.mask.getmant.ph.512
    348, // llvm.x86.avx512fp16.mask.getmant.sh
    91, // llvm.x86.avx512fp16.mask.max.sh.round
    91, // llvm.x86.avx512fp16.mask.min.sh.round
    91, // llvm.x86.avx512fp16.mask.mul.sh.round
    2, // llvm.x86.avx512fp16.mask.rcp.ph.128
    2, // llvm.x86.avx512fp16.mask.rcp.ph.256
    2, // llvm.x86.avx512fp16.mask.rcp.ph.512
    2, // llvm.x86.avx512fp16.mask.rcp.sh
    59, // llvm.x86.avx512fp16.mask.reduce.ph.128
    59, // llvm.x86.avx512fp16.mask.reduce.ph.256
    347, // llvm.x86.avx512fp16.mask.reduce.ph.512
    349, // llvm.x86.avx512fp16.mask.reduce.sh
    59, // llvm.x86.avx512fp16.mask.rndscale.ph.128
    59, // llvm.x86.avx512fp16.mask.rndscale.ph.256
    347, // llvm.x86.avx512fp16.mask.rndscale.ph.512
    349, // llvm.x86.avx512fp16.mask.rndscale.sh
    2, // llvm.x86.avx512fp16.mask.rsqrt.ph.128
    2, // llvm.x86.avx512fp16.mask.rsqrt.ph.256
    2, // llvm.x86.avx512fp16.mask.rsqrt.ph.512
    2, // llvm.x86.avx512fp16.mask.rsqrt.sh
    2, // llvm.x86.avx512fp16.mask.scalef.ph.128
    2, // llvm.x86.avx512fp16.mask.scalef.ph.256
    91, // llvm.x86.avx512fp16.mask.scalef.ph.512
    91, // llvm.x86.avx512fp16.mask.scalef.sh
    91, // llvm.x86.avx512fp16.mask.sqrt.sh
    91, // llvm.x86.avx512fp16.mask.sub.sh.round
    2, // llvm.x86.avx512fp16.mask.vcvtdq2ph.128
    2, // llvm.x86.avx512fp16.mask.vcvtpd2ph.128
    2, // llvm.x86.avx512fp16.mask.vcvtpd2ph.256
    89, // llvm.x86.avx512fp16.mask.vcvtpd2ph.512
    2, // llvm.x86.avx512fp16.mask.vcvtph2dq.128
    2, // llvm.x86.avx512fp16.mask.vcvtph2dq.256
    89, // llvm.x86.avx512fp16.mask.vcvtph2dq.512
    2, // llvm.x86.avx512fp16.mask.vcvtph2pd.128
    2, // llvm.x86.avx512fp16.mask.vcvtph2pd.256
    89, // llvm.x86.avx512fp16.mask.vcvtph2pd.512
    2, // llvm.x86.avx512fp16.mask.vcvtph2psx.128
    2, // llvm.x86.avx512fp16.mask.vcvtph2psx.256
    89, // llvm.x86.avx512fp16.mask.vcvtph2psx.512
    2, // llvm.x86.avx512fp16.mask.vcvtph2qq.128
    2, // llvm.x86.avx512fp16.mask.vcvtph2qq.256
    89, // llvm.x86.avx512fp16.mask.vcvtph2qq.512
    2, // llvm.x86.avx512fp16.mask.vcvtph2udq.128
    2, // llvm.x86.avx512fp16.mask.vcvtph2udq.256
    89, // llvm.x86.avx512fp16.mask.vcvtph2udq.512
    2, // llvm.x86.avx512fp16.mask.vcvtph2uqq.128
    2, // llvm.x86.avx512fp16.mask.vcvtph2uqq.256
    89, // llvm.x86.avx512fp16.mask.vcvtph2uqq.512
    2, // llvm.x86.avx512fp16.mask.vcvtph2uw.128
    2, // llvm.x86.avx512fp16.mask.vcvtph2uw.256
    89, // llvm.x86.avx512fp16.mask.vcvtph2uw.512
    2, // llvm.x86.avx512fp16.mask.vcvtph2w.128
    2, // llvm.x86.avx512fp16.mask.vcvtph2w.256
    89, // llvm.x86.avx512fp16.mask.vcvtph2w.512
    2, // llvm.x86.avx512fp16.mask.vcvtps2phx.128
    2, // llvm.x86.avx512fp16.mask.vcvtps2phx.256
    89, // llvm.x86.avx512fp16.mask.vcvtps2phx.512
    2, // llvm.x86.avx512fp16.mask.vcvtqq2ph.128
    2, // llvm.x86.avx512fp16.mask.vcvtqq2ph.256
    91, // llvm.x86.avx512fp16.mask.vcvtsd2sh.round
    91, // llvm.x86.avx512fp16.mask.vcvtsh2sd.round
    91, // llvm.x86.avx512fp16.mask.vcvtsh2ss.round
    91, // llvm.x86.avx512fp16.mask.vcvtss2sh.round
    2, // llvm.x86.avx512fp16.mask.vcvttph2dq.128
    2, // llvm.x86.avx512fp16.mask.vcvttph2dq.256
    89, // llvm.x86.avx512fp16.mask.vcvttph2dq.512
    2, // llvm.x86.avx512fp16.mask.vcvttph2qq.128
    2, // llvm.x86.avx512fp16.mask.vcvttph2qq.256
    89, // llvm.x86.avx512fp16.mask.vcvttph2qq.512
    2, // llvm.x86.avx512fp16.mask.vcvttph2udq.128
    2, // llvm.x86.avx512fp16.mask.vcvttph2udq.256
    89, // llvm.x86.avx512fp16.mask.vcvttph2udq.512
    2, // llvm.x86.avx512fp16.mask.vcvttph2uqq.128
    2, // llvm.x86.avx512fp16.mask.vcvttph2uqq.256
    89, // llvm.x86.avx512fp16.mask.vcvttph2uqq.512
    2, // llvm.x86.avx512fp16.mask.vcvttph2uw.128
    2, // llvm.x86.avx512fp16.mask.vcvttph2uw.256
    89, // llvm.x86.avx512fp16.mask.vcvttph2uw.512
    2, // llvm.x86.avx512fp16.mask.vcvttph2w.128
    2, // llvm.x86.avx512fp16.mask.vcvttph2w.256
    89, // llvm.x86.avx512fp16.mask.vcvttph2w.512
    2, // llvm.x86.avx512fp16.mask.vcvtudq2ph.128
    2, // llvm.x86.avx512fp16.mask.vcvtuqq2ph.128
    2, // llvm.x86.avx512fp16.mask.vcvtuqq2ph.256
    2, // llvm.x86.avx512fp16.mask.vfcmadd.cph.128
    2, // llvm.x86.avx512fp16.mask.vfcmadd.cph.256
    91, // llvm.x86.avx512fp16.mask.vfcmadd.cph.512
    91, // llvm.x86.avx512fp16.mask.vfcmadd.csh
    2, // llvm.x86.avx512fp16.mask.vfcmul.cph.128
    2, // llvm.x86.avx512fp16.mask.vfcmul.cph.256
    91, // llvm.x86.avx512fp16.mask.vfcmul.cph.512
    91, // llvm.x86.avx512fp16.mask.vfcmul.csh
    2, // llvm.x86.avx512fp16.mask.vfmadd.cph.128
    2, // llvm.x86.avx512fp16.mask.vfmadd.cph.256
    91, // llvm.x86.avx512fp16.mask.vfmadd.cph.512
    91, // llvm.x86.avx512fp16.mask.vfmadd.csh
    2, // llvm.x86.avx512fp16.mask.vfmul.cph.128
    2, // llvm.x86.avx512fp16.mask.vfmul.cph.256
    91, // llvm.x86.avx512fp16.mask.vfmul.cph.512
    91, // llvm.x86.avx512fp16.mask.vfmul.csh
    2, // llvm.x86.avx512fp16.maskz.vfcmadd.cph.128
    2, // llvm.x86.avx512fp16.maskz.vfcmadd.cph.256
    91, // llvm.x86.avx512fp16.maskz.vfcmadd.cph.512
    91, // llvm.x86.avx512fp16.maskz.vfcmadd.csh
    2, // llvm.x86.avx512fp16.maskz.vfmadd.cph.128
    2, // llvm.x86.avx512fp16.maskz.vfmadd.cph.256
    91, // llvm.x86.avx512fp16.maskz.vfmadd.cph.512
    91, // llvm.x86.avx512fp16.maskz.vfmadd.csh
    2, // llvm.x86.avx512fp16.max.ph.128
    2, // llvm.x86.avx512fp16.max.ph.256
    22, // llvm.x86.avx512fp16.max.ph.512
    2, // llvm.x86.avx512fp16.min.ph.128
    2, // llvm.x86.avx512fp16.min.ph.256
    22, // llvm.x86.avx512fp16.min.ph.512
    22, // llvm.x86.avx512fp16.mul.ph.512
    59, // llvm.x86.avx512fp16.sqrt.ph.512
    22, // llvm.x86.avx512fp16.sub.ph.512
    227, // llvm.x86.avx512fp16.vcomi.sh
    59, // llvm.x86.avx512fp16.vcvtsh2si32
    59, // llvm.x86.avx512fp16.vcvtsh2si64
    59, // llvm.x86.avx512fp16.vcvtsh2usi32
    59, // llvm.x86.avx512fp16.vcvtsh2usi64
    22, // llvm.x86.avx512fp16.vcvtsi2sh
    22, // llvm.x86.avx512fp16.vcvtsi642sh
    59, // llvm.x86.avx512fp16.vcvttsh2si32
    59, // llvm.x86.avx512fp16.vcvttsh2si64
    59, // llvm.x86.avx512fp16.vcvttsh2usi32
    59, // llvm.x86.avx512fp16.vcvttsh2usi64
    22, // llvm.x86.avx512fp16.vcvtusi2sh
    22, // llvm.x86.avx512fp16.vcvtusi642sh
    89, // llvm.x86.avx512fp16.vfmadd.f16
    89, // llvm.x86.avx512fp16.vfmadd.ph.512
    2, // llvm.x86.avx512fp16.vfmaddsub.ph.128
    2, // llvm.x86.avx512fp16.vfmaddsub.ph.256
    89, // llvm.x86.avx512fp16.vfmaddsub.ph.512
    240, // llvm.x86.axor32
    240, // llvm.x86.axor64
    2, // llvm.x86.bmi.bextr.32
    2, // llvm.x86.bmi.bextr.64
    2, // llvm.x86.bmi.bzhi.32
    2, // llvm.x86.bmi.bzhi.64
    2, // llvm.x86.bmi.pdep.32
    2, // llvm.x86.bmi.pdep.64
    2, // llvm.x86.bmi.pext.32
    2, // llvm.x86.bmi.pext.64
    2, // llvm.x86.cast.tile.to.vector
    2, // llvm.x86.cast.vector.to.tile
    10, // llvm.x86.cldemote
    10, // llvm.x86.clflushopt
    10, // llvm.x86.clrssbsy
    10, // llvm.x86.clui
    10, // llvm.x86.clwb
    10, // llvm.x86.clzero
    351, // llvm.x86.cmpccxadd32
    351, // llvm.x86.cmpccxadd64
    10, // llvm.x86.directstore32
    10, // llvm.x86.directstore64
    10, // llvm.x86.encodekey128
    10, // llvm.x86.encodekey256
    10, // llvm.x86.enqcmd
    10, // llvm.x86.enqcmds
    10, // llvm.x86.flags.read.u32
    10, // llvm.x86.flags.read.u64
    10, // llvm.x86.flags.write.u32
    10, // llvm.x86.flags.write.u64
    2, // llvm.x86.fma.vfmaddsub.pd
    2, // llvm.x86.fma.vfmaddsub.pd.256
    2, // llvm.x86.fma.vfmaddsub.ps
    2, // llvm.x86.fma.vfmaddsub.ps.256
    10, // llvm.x86.fxrstor
    10, // llvm.x86.fxrstor64
    10, // llvm.x86.fxsave
    10, // llvm.x86.fxsave64
    10, // llvm.x86.incsspd
    10, // llvm.x86.incsspq
    220, // llvm.x86.int
    10, // llvm.x86.invpcid
    10, // llvm.x86.ldtilecfg
    10, // llvm.x86.ldtilecfg.internal
    10, // llvm.x86.llwpcb
    10, // llvm.x86.loadiwkey
    35, // llvm.x86.lwpins32
    35, // llvm.x86.lwpins64
    35, // llvm.x86.lwpval32
    35, // llvm.x86.lwpval64
    10, // llvm.x86.mmx.emms
    10, // llvm.x86.mmx.femms
    10, // llvm.x86.mmx.maskmovq
    10, // llvm.x86.mmx.movnt.dq
    2, // llvm.x86.mmx.packssdw
    2, // llvm.x86.mmx.packsswb
    2, // llvm.x86.mmx.packuswb
    2, // llvm.x86.mmx.padd.b
    2, // llvm.x86.mmx.padd.d
    2, // llvm.x86.mmx.padd.q
    2, // llvm.x86.mmx.padd.w
    2, // llvm.x86.mmx.padds.b
    2, // llvm.x86.mmx.padds.w
    2, // llvm.x86.mmx.paddus.b
    2, // llvm.x86.mmx.paddus.w
    22, // llvm.x86.mmx.palignr.b
    2, // llvm.x86.mmx.pand
    2, // llvm.x86.mmx.pandn
    2, // llvm.x86.mmx.pavg.b
    2, // llvm.x86.mmx.pavg.w
    2, // llvm.x86.mmx.pcmpeq.b
    2, // llvm.x86.mmx.pcmpeq.d
    2, // llvm.x86.mmx.pcmpeq.w
    2, // llvm.x86.mmx.pcmpgt.b
    2, // llvm.x86.mmx.pcmpgt.d
    2, // llvm.x86.mmx.pcmpgt.w
    59, // llvm.x86.mmx.pextr.w
    22, // llvm.x86.mmx.pinsr.w
    2, // llvm.x86.mmx.pmadd.wd
    2, // llvm.x86.mmx.pmaxs.w
    2, // llvm.x86.mmx.pmaxu.b
    2, // llvm.x86.mmx.pmins.w
    2, // llvm.x86.mmx.pminu.b
    2, // llvm.x86.mmx.pmovmskb
    2, // llvm.x86.mmx.pmulh.w
    2, // llvm.x86.mmx.pmulhu.w
    2, // llvm.x86.mmx.pmull.w
    2, // llvm.x86.mmx.pmulu.dq
    2, // llvm.x86.mmx.por
    2, // llvm.x86.mmx.psad.bw
    2, // llvm.x86.mmx.psll.d
    2, // llvm.x86.mmx.psll.q
    2, // llvm.x86.mmx.psll.w
    2, // llvm.x86.mmx.pslli.d
    2, // llvm.x86.mmx.pslli.q
    2, // llvm.x86.mmx.pslli.w
    2, // llvm.x86.mmx.psra.d
    2, // llvm.x86.mmx.psra.w
    2, // llvm.x86.mmx.psrai.d
    2, // llvm.x86.mmx.psrai.w
    2, // llvm.x86.mmx.psrl.d
    2, // llvm.x86.mmx.psrl.q
    2, // llvm.x86.mmx.psrl.w
    2, // llvm.x86.mmx.psrli.d
    2, // llvm.x86.mmx.psrli.q
    2, // llvm.x86.mmx.psrli.w
    2, // llvm.x86.mmx.psub.b
    2, // llvm.x86.mmx.psub.d
    2, // llvm.x86.mmx.psub.q
    2, // llvm.x86.mmx.psub.w
    2, // llvm.x86.mmx.psubs.b
    2, // llvm.x86.mmx.psubs.w
    2, // llvm.x86.mmx.psubus.b
    2, // llvm.x86.mmx.psubus.w
    2, // llvm.x86.mmx.punpckhbw
    2, // llvm.x86.mmx.punpckhdq
    2, // llvm.x86.mmx.punpckhwd
    2, // llvm.x86.mmx.punpcklbw
    2, // llvm.x86.mmx.punpckldq
    2, // llvm.x86.mmx.punpcklwd
    2, // llvm.x86.mmx.pxor
    10, // llvm.x86.monitorx
    10, // llvm.x86.movdir64b
    10, // llvm.x86.mwaitx
    22, // llvm.x86.pclmulqdq
    22, // llvm.x86.pclmulqdq.256
    22, // llvm.x86.pclmulqdq.512
    10, // llvm.x86.ptwrite32
    10, // llvm.x86.ptwrite64
    10, // llvm.x86.rdfsbase.32
    10, // llvm.x86.rdfsbase.64
    10, // llvm.x86.rdgsbase.32
    10, // llvm.x86.rdgsbase.64
    10, // llvm.x86.rdpid
    10, // llvm.x86.rdpkru
    10, // llvm.x86.rdpmc
    10, // llvm.x86.rdpru
    10, // llvm.x86.rdrand.16
    10, // llvm.x86.rdrand.32
    10, // llvm.x86.rdrand.64
    10, // llvm.x86.rdseed.16
    10, // llvm.x86.rdseed.32
    10, // llvm.x86.rdseed.64
    10, // llvm.x86.rdsspd
    10, // llvm.x86.rdsspq
    10, // llvm.x86.rdtsc
    10, // llvm.x86.rdtscp
    10, // llvm.x86.rstorssp
    10, // llvm.x86.saveprevssp
    10, // llvm.x86.seh.ehguard
    10, // llvm.x86.seh.ehregnode
    12, // llvm.x86.seh.lsda
    10, // llvm.x86.senduipi
    10, // llvm.x86.serialize
    10, // llvm.x86.setssbsy
    2, // llvm.x86.sha1msg1
    2, // llvm.x86.sha1msg2
    2, // llvm.x86.sha1nexte
    22, // llvm.x86.sha1rnds4
    2, // llvm.x86.sha256msg1
    2, // llvm.x86.sha256msg2
    2, // llvm.x86.sha256rnds2
    10, // llvm.x86.slwpcb
    22, // llvm.x86.sse.cmp.ps
    22, // llvm.x86.sse.cmp.ss
    2, // llvm.x86.sse.comieq.ss
    2, // llvm.x86.sse.comige.ss
    2, // llvm.x86.sse.comigt.ss
    2, // llvm.x86.sse.comile.ss
    2, // llvm.x86.sse.comilt.ss
    2, // llvm.x86.sse.comineq.ss
    2, // llvm.x86.sse.cvtpd2pi
    2, // llvm.x86.sse.cvtpi2pd
    2, // llvm.x86.sse.cvtpi2ps
    2, // llvm.x86.sse.cvtps2pi
    2, // llvm.x86.sse.cvtss2si
    2, // llvm.x86.sse.cvtss2si64
    2, // llvm.x86.sse.cvttpd2pi
    2, // llvm.x86.sse.cvttps2pi
    2, // llvm.x86.sse.cvttss2si
    2, // llvm.x86.sse.cvttss2si64
    63, // llvm.x86.sse.ldmxcsr
    2, // llvm.x86.sse.max.ps
    2, // llvm.x86.sse.max.ss
    2, // llvm.x86.sse.min.ps
    2, // llvm.x86.sse.min.ss
    2, // llvm.x86.sse.movmsk.ps
    59, // llvm.x86.sse.pshuf.w
    2, // llvm.x86.sse.rcp.ps
    2, // llvm.x86.sse.rcp.ss
    2, // llvm.x86.sse.rsqrt.ps
    2, // llvm.x86.sse.rsqrt.ss
    10, // llvm.x86.sse.sfence
    352, // llvm.x86.sse.stmxcsr
    2, // llvm.x86.sse.ucomieq.ss
    2, // llvm.x86.sse.ucomige.ss
    2, // llvm.x86.sse.ucomigt.ss
    2, // llvm.x86.sse.ucomile.ss
    2, // llvm.x86.sse.ucomilt.ss
    2, // llvm.x86.sse.ucomineq.ss
    10, // llvm.x86.sse2.clflush
    22, // llvm.x86.sse2.cmp.pd
    22, // llvm.x86.sse2.cmp.sd
    2, // llvm.x86.sse2.comieq.sd
    2, // llvm.x86.sse2.comige.sd
    2, // llvm.x86.sse2.comigt.sd
    2, // llvm.x86.sse2.comile.sd
    2, // llvm.x86.sse2.comilt.sd
    2, // llvm.x86.sse2.comineq.sd
    2, // llvm.x86.sse2.cvtpd2dq
    2, // llvm.x86.sse2.cvtpd2ps
    2, // llvm.x86.sse2.cvtps2dq
    2, // llvm.x86.sse2.cvtsd2si
    2, // llvm.x86.sse2.cvtsd2si64
    2, // llvm.x86.sse2.cvtsd2ss
    2, // llvm.x86.sse2.cvttpd2dq
    2, // llvm.x86.sse2.cvttps2dq
    2, // llvm.x86.sse2.cvttsd2si
    2, // llvm.x86.sse2.cvttsd2si64
    10, // llvm.x86.sse2.lfence
    10, // llvm.x86.sse2.maskmov.dqu
    2, // llvm.x86.sse2.max.pd
    2, // llvm.x86.sse2.max.sd
    10, // llvm.x86.sse2.mfence
    2, // llvm.x86.sse2.min.pd
    2, // llvm.x86.sse2.min.sd
    2, // llvm.x86.sse2.movmsk.pd
    2, // llvm.x86.sse2.packssdw.128
    2, // llvm.x86.sse2.packsswb.128
    2, // llvm.x86.sse2.packuswb.128
    10, // llvm.x86.sse2.pause
    2, // llvm.x86.sse2.pavg.b
    2, // llvm.x86.sse2.pavg.w
    2, // llvm.x86.sse2.pmadd.wd
    2, // llvm.x86.sse2.pmovmskb.128
    2, // llvm.x86.sse2.pmulh.w
    2, // llvm.x86.sse2.pmulhu.w
    2, // llvm.x86.sse2.psad.bw
    2, // llvm.x86.sse2.psll.d
    2, // llvm.x86.sse2.psll.q
    2, // llvm.x86.sse2.psll.w
    2, // llvm.x86.sse2.pslli.d
    2, // llvm.x86.sse2.pslli.q
    2, // llvm.x86.sse2.pslli.w
    2, // llvm.x86.sse2.psra.d
    2, // llvm.x86.sse2.psra.w
    2, // llvm.x86.sse2.psrai.d
    2, // llvm.x86.sse2.psrai.w
    2, // llvm.x86.sse2.psrl.d
    2, // llvm.x86.sse2.psrl.q
    2, // llvm.x86.sse2.psrl.w
    2, // llvm.x86.sse2.psrli.d
    2, // llvm.x86.sse2.psrli.q
    2, // llvm.x86.sse2.psrli.w
    2, // llvm.x86.sse2.ucomieq.sd
    2, // llvm.x86.sse2.ucomige.sd
    2, // llvm.x86.sse2.ucomigt.sd
    2, // llvm.x86.sse2.ucomile.sd
    2, // llvm.x86.sse2.ucomilt.sd
    2, // llvm.x86.sse2.ucomineq.sd
    2, // llvm.x86.sse3.addsub.pd
    2, // llvm.x86.sse3.addsub.ps
    2, // llvm.x86.sse3.hadd.pd
    2, // llvm.x86.sse3.hadd.ps
    2, // llvm.x86.sse3.hsub.pd
    2, // llvm.x86.sse3.hsub.ps
    62, // llvm.x86.sse3.ldu.dq
    10, // llvm.x86.sse3.monitor
    10, // llvm.x86.sse3.mwait
    2, // llvm.x86.sse41.blendvpd
    2, // llvm.x86.sse41.blendvps
    22, // llvm.x86.sse41.dppd
    22, // llvm.x86.sse41.dpps
    22, // llvm.x86.sse41.insertps
    22, // llvm.x86.sse41.mpsadbw
    2, // llvm.x86.sse41.packusdw
    2, // llvm.x86.sse41.pblendvb
    2, // llvm.x86.sse41.phminposuw
    2, // llvm.x86.sse41.ptestc
    2, // llvm.x86.sse41.ptestnzc
    2, // llvm.x86.sse41.ptestz
    59, // llvm.x86.sse41.round.pd
    59, // llvm.x86.sse41.round.ps
    22, // llvm.x86.sse41.round.sd
    22, // llvm.x86.sse41.round.ss
    2, // llvm.x86.sse42.crc32.32.16
    2, // llvm.x86.sse42.crc32.32.32
    2, // llvm.x86.sse42.crc32.32.8
    2, // llvm.x86.sse42.crc32.64.64
    91, // llvm.x86.sse42.pcmpestri128
    91, // llvm.x86.sse42.pcmpestria128
    91, // llvm.x86.sse42.pcmpestric128
    91, // llvm.x86.sse42.pcmpestrio128
    91, // llvm.x86.sse42.pcmpestris128
    91, // llvm.x86.sse42.pcmpestriz128
    91, // llvm.x86.sse42.pcmpestrm128
    22, // llvm.x86.sse42.pcmpistri128
    22, // llvm.x86.sse42.pcmpistria128
    22, // llvm.x86.sse42.pcmpistric128
    22, // llvm.x86.sse42.pcmpistrio128
    22, // llvm.x86.sse42.pcmpistris128
    22, // llvm.x86.sse42.pcmpistriz128
    22, // llvm.x86.sse42.pcmpistrm128
    2, // llvm.x86.sse4a.extrq
    26, // llvm.x86.sse4a.extrqi
    2, // llvm.x86.sse4a.insertq
    227, // llvm.x86.sse4a.insertqi
    2, // llvm.x86.ssse3.pabs.b
    2, // llvm.x86.ssse3.pabs.d
    2, // llvm.x86.ssse3.pabs.w
    2, // llvm.x86.ssse3.phadd.d
    2, // llvm.x86.ssse3.phadd.d.128
    2, // llvm.x86.ssse3.phadd.sw
    2, // llvm.x86.ssse3.phadd.sw.128
    2, // llvm.x86.ssse3.phadd.w
    2, // llvm.x86.ssse3.phadd.w.128
    2, // llvm.x86.ssse3.phsub.d
    2, // llvm.x86.ssse3.phsub.d.128
    2, // llvm.x86.ssse3.phsub.sw
    2, // llvm.x86.ssse3.phsub.sw.128
    2, // llvm.x86.ssse3.phsub.w
    2, // llvm.x86.ssse3.phsub.w.128
    2, // llvm.x86.ssse3.pmadd.ub.sw
    2, // llvm.x86.ssse3.pmadd.ub.sw.128
    2, // llvm.x86.ssse3.pmul.hr.sw
    2, // llvm.x86.ssse3.pmul.hr.sw.128
    2, // llvm.x86.ssse3.pshuf.b
    2, // llvm.x86.ssse3.pshuf.b.128
    2, // llvm.x86.ssse3.psign.b
    2, // llvm.x86.ssse3.psign.b.128
    2, // llvm.x86.ssse3.psign.d
    2, // llvm.x86.ssse3.psign.d.128
    2, // llvm.x86.ssse3.psign.w
    2, // llvm.x86.ssse3.psign.w.128
    10, // llvm.x86.sttilecfg
    10, // llvm.x86.stui
    2, // llvm.x86.subborrow.32
    2, // llvm.x86.subborrow.64
    59, // llvm.x86.tbm.bextri.u32
    59, // llvm.x86.tbm.bextri.u64
    218, // llvm.x86.tcmmimfp16ps
    10, // llvm.x86.tcmmimfp16ps.internal
    218, // llvm.x86.tcmmrlfp16ps
    10, // llvm.x86.tcmmrlfp16ps.internal
    218, // llvm.x86.tdpbf16ps
    10, // llvm.x86.tdpbf16ps.internal
    218, // llvm.x86.tdpbssd
    10, // llvm.x86.tdpbssd.internal
    218, // llvm.x86.tdpbsud
    10, // llvm.x86.tdpbsud.internal
    218, // llvm.x86.tdpbusd
    10, // llvm.x86.tdpbusd.internal
    218, // llvm.x86.tdpbuud
    10, // llvm.x86.tdpbuud.internal
    218, // llvm.x86.tdpfp16ps
    10, // llvm.x86.tdpfp16ps.internal
    10, // llvm.x86.testui
    220, // llvm.x86.tileloadd64
    10, // llvm.x86.tileloadd64.internal
    220, // llvm.x86.tileloaddt164
    10, // llvm.x86.tileloaddt164.internal
    10, // llvm.x86.tilerelease
    220, // llvm.x86.tilestored64
    10, // llvm.x86.tilestored64.internal
    220, // llvm.x86.tilezero
    10, // llvm.x86.tilezero.internal
    10, // llvm.x86.tpause
    10, // llvm.x86.umonitor
    10, // llvm.x86.umwait
    3, // llvm.x86.vbcstnebf162ps128
    3, // llvm.x86.vbcstnebf162ps256
    3, // llvm.x86.vbcstnesh2ps128
    3, // llvm.x86.vbcstnesh2ps256
    3, // llvm.x86.vcvtneebf162ps128
    3, // llvm.x86.vcvtneebf162ps256
    3, // llvm.x86.vcvtneeph2ps128
    3, // llvm.x86.vcvtneeph2ps256
    3, // llvm.x86.vcvtneobf162ps128
    3, // llvm.x86.vcvtneobf162ps256
    3, // llvm.x86.vcvtneoph2ps128
    3, // llvm.x86.vcvtneoph2ps256
    2, // llvm.x86.vcvtneps2bf16128
    2, // llvm.x86.vcvtneps2bf16256
    59, // llvm.x86.vcvtps2ph.128
    59, // llvm.x86.vcvtps2ph.256
    22, // llvm.x86.vgf2p8affineinvqb.128
    22, // llvm.x86.vgf2p8affineinvqb.256
    22, // llvm.x86.vgf2p8affineinvqb.512
    22, // llvm.x86.vgf2p8affineqb.128
    22, // llvm.x86.vgf2p8affineqb.256
    22, // llvm.x86.vgf2p8affineqb.512
    2, // llvm.x86.vgf2p8mulb.128
    2, // llvm.x86.vgf2p8mulb.256
    2, // llvm.x86.vgf2p8mulb.512
    2, // llvm.x86.vsha512msg1
    2, // llvm.x86.vsha512msg2
    2, // llvm.x86.vsha512rnds2
    2, // llvm.x86.vsm3msg1
    2, // llvm.x86.vsm3msg2
    89, // llvm.x86.vsm3rnds2
    2, // llvm.x86.vsm4key4128
    2, // llvm.x86.vsm4key4256
    2, // llvm.x86.vsm4rnds4128
    2, // llvm.x86.vsm4rnds4256
    10, // llvm.x86.wbinvd
    10, // llvm.x86.wbnoinvd
    10, // llvm.x86.wrfsbase.32
    10, // llvm.x86.wrfsbase.64
    10, // llvm.x86.wrgsbase.32
    10, // llvm.x86.wrgsbase.64
    10, // llvm.x86.wrpkru
    10, // llvm.x86.wrssd
    10, // llvm.x86.wrssq
    10, // llvm.x86.wrussd
    10, // llvm.x86.wrussq
    220, // llvm.x86.xabort
    10, // llvm.x86.xbegin
    10, // llvm.x86.xend
    10, // llvm.x86.xgetbv
    2, // llvm.x86.xop.vfrcz.pd
    2, // llvm.x86.xop.vfrcz.pd.256
    2, // llvm.x86.xop.vfrcz.ps
    2, // llvm.x86.xop.vfrcz.ps.256
    2, // llvm.x86.xop.vfrcz.sd
    2, // llvm.x86.xop.vfrcz.ss
    89, // llvm.x86.xop.vpermil2pd
    89, // llvm.x86.xop.vpermil2pd.256
    89, // llvm.x86.xop.vpermil2ps
    89, // llvm.x86.xop.vpermil2ps.256
    2, // llvm.x86.xop.vphaddbd
    2, // llvm.x86.xop.vphaddbq
    2, // llvm.x86.xop.vphaddbw
    2, // llvm.x86.xop.vphadddq
    2, // llvm.x86.xop.vphaddubd
    2, // llvm.x86.xop.vphaddubq
    2, // llvm.x86.xop.vphaddubw
    2, // llvm.x86.xop.vphaddudq
    2, // llvm.x86.xop.vphadduwd
    2, // llvm.x86.xop.vphadduwq
    2, // llvm.x86.xop.vphaddwd
    2, // llvm.x86.xop.vphaddwq
    2, // llvm.x86.xop.vphsubbw
    2, // llvm.x86.xop.vphsubdq
    2, // llvm.x86.xop.vphsubwd
    2, // llvm.x86.xop.vpmacsdd
    2, // llvm.x86.xop.vpmacsdqh
    2, // llvm.x86.xop.vpmacsdql
    2, // llvm.x86.xop.vpmacssdd
    2, // llvm.x86.xop.vpmacssdqh
    2, // llvm.x86.xop.vpmacssdql
    2, // llvm.x86.xop.vpmacsswd
    2, // llvm.x86.xop.vpmacssww
    2, // llvm.x86.xop.vpmacswd
    2, // llvm.x86.xop.vpmacsww
    2, // llvm.x86.xop.vpmadcsswd
    2, // llvm.x86.xop.vpmadcswd
    2, // llvm.x86.xop.vpperm
    2, // llvm.x86.xop.vpshab
    2, // llvm.x86.xop.vpshad
    2, // llvm.x86.xop.vpshaq
    2, // llvm.x86.xop.vpshaw
    2, // llvm.x86.xop.vpshlb
    2, // llvm.x86.xop.vpshld
    2, // llvm.x86.xop.vpshlq
    2, // llvm.x86.xop.vpshlw
    10, // llvm.x86.xresldtrk
    10, // llvm.x86.xrstor
    10, // llvm.x86.xrstor64
    10, // llvm.x86.xrstors
    10, // llvm.x86.xrstors64
    10, // llvm.x86.xsave
    10, // llvm.x86.xsave64
    10, // llvm.x86.xsavec
    10, // llvm.x86.xsavec64
    10, // llvm.x86.xsaveopt
    10, // llvm.x86.xsaveopt64
    10, // llvm.x86.xsaves
    10, // llvm.x86.xsaves64
    10, // llvm.x86.xsetbv
    10, // llvm.x86.xsusldtrk
    10, // llvm.x86.xtest
    12, // llvm.xcore.bitrev
    10, // llvm.xcore.checkevent
    353, // llvm.xcore.chkct
    10, // llvm.xcore.clre
    353, // llvm.xcore.clrpt
    10, // llvm.xcore.clrsr
    12, // llvm.xcore.crc32
    12, // llvm.xcore.crc8
    353, // llvm.xcore.edu
    353, // llvm.xcore.eeu
    353, // llvm.xcore.endin
    353, // llvm.xcore.freer
    10, // llvm.xcore.geted
    10, // llvm.xcore.getet
    12, // llvm.xcore.getid
    10, // llvm.xcore.getps
    10, // llvm.xcore.getr
    353, // llvm.xcore.getst
    353, // llvm.xcore.getts
    353, // llvm.xcore.in
    353, // llvm.xcore.inct
    353, // llvm.xcore.initcp
    353, // llvm.xcore.initdp
    353, // llvm.xcore.initlr
    353, // llvm.xcore.initpc
    353, // llvm.xcore.initsp
    353, // llvm.xcore.inshr
    353, // llvm.xcore.int
    353, // llvm.xcore.mjoin
    353, // llvm.xcore.msync
    353, // llvm.xcore.out
    353, // llvm.xcore.outct
    353, // llvm.xcore.outshr
    353, // llvm.xcore.outt
    353, // llvm.xcore.peek
    353, // llvm.xcore.setc
    354, // llvm.xcore.setclk
    353, // llvm.xcore.setd
    353, // llvm.xcore.setev
    10, // llvm.xcore.setps
    353, // llvm.xcore.setpsc
    353, // llvm.xcore.setpt
    354, // llvm.xcore.setrdy
    10, // llvm.xcore.setsr
    353, // llvm.xcore.settw
    353, // llvm.xcore.setv
    12, // llvm.xcore.sext
    10, // llvm.xcore.ssync
    353, // llvm.xcore.syncr
    353, // llvm.xcore.testct
    353, // llvm.xcore.testwct
    221, // llvm.xcore.waitevent
    12, // llvm.xcore.zext
  };

  std::pair<unsigned, AttributeSet> AS[20];
  unsigned NumAttrs = 0;
  if (id != 0) {
    switch(IntrinsicsToAttributesMap[id - 1]) {
    default: llvm_unreachable("Invalid attribute number");
    case 10: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 4)};
      NumAttrs = 1;
      break;
    }
    case 350: {
      AS[0] = {5, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 4)};
      NumAttrs = 2;
      break;
    }
    case 345: {
      AS[0] = {4, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {5, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 4)};
      NumAttrs = 3;
      break;
    }
    case 35: {
      AS[0] = {3, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 4)};
      NumAttrs = 2;
      break;
    }
    case 337: {
      AS[0] = {3, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {4, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 4)};
      NumAttrs = 3;
      break;
    }
    case 13: {
      AS[0] = {2, getIntrinsicArgAttributeSet(C, 2)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 4)};
      NumAttrs = 2;
      break;
    }
    case 6: {
      AS[0] = {2, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 4)};
      NumAttrs = 2;
      break;
    }
    case 336: {
      AS[0] = {2, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {3, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 4)};
      NumAttrs = 3;
      break;
    }
    case 353: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 4)};
      NumAttrs = 2;
      break;
    }
    case 354: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {2, getIntrinsicArgAttributeSet(C, 5)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 4)};
      NumAttrs = 3;
      break;
    }
    case 220: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 4)};
      NumAttrs = 2;
      break;
    }
    case 235: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {3, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 4)};
      NumAttrs = 3;
      break;
    }
    case 214: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {2, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 4)};
      NumAttrs = 3;
      break;
    }
    case 216: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {2, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {5, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 4)};
      NumAttrs = 4;
      break;
    }
    case 215: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {2, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {4, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {5, getIntrinsicArgAttributeSet(C, 0)};
      AS[4] = {6, getIntrinsicArgAttributeSet(C, 0)};
      AS[5] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 4)};
      NumAttrs = 6;
      break;
    }
    case 218: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {2, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {3, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 4)};
      NumAttrs = 4;
      break;
    }
    case 217: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {2, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {3, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {4, getIntrinsicArgAttributeSet(C, 0)};
      AS[4] = {5, getIntrinsicArgAttributeSet(C, 0)};
      AS[5] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 4)};
      NumAttrs = 6;
      break;
    }
    case 213: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {2, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {3, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {4, getIntrinsicArgAttributeSet(C, 0)};
      AS[4] = {5, getIntrinsicArgAttributeSet(C, 0)};
      AS[5] = {6, getIntrinsicArgAttributeSet(C, 0)};
      AS[6] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 4)};
      NumAttrs = 7;
      break;
    }
    case 263: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 31)};
      NumAttrs = 1;
      break;
    }
    case 72: {
      AS[0] = {2, getIntrinsicArgAttributeSet(C, 3)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 31)};
      NumAttrs = 2;
      break;
    }
    case 71: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 3)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 31)};
      NumAttrs = 2;
      break;
    }
    case 221: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 58)};
      NumAttrs = 1;
      break;
    }
    case 239: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 58)};
      NumAttrs = 2;
      break;
    }
    case 338: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 79)};
      NumAttrs = 1;
      break;
    }
    case 240: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 10)};
      NumAttrs = 1;
      break;
    }
    case 351: {
      AS[0] = {4, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 10)};
      NumAttrs = 2;
      break;
    }
    case 34: {
      AS[0] = {2, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {3, getIntrinsicArgAttributeSet(C, 5)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 10)};
      NumAttrs = 3;
      break;
    }
    case 226: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 10)};
      NumAttrs = 2;
      break;
    }
    case 237: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {5, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 10)};
      NumAttrs = 3;
      break;
    }
    case 236: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {4, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 10)};
      NumAttrs = 3;
      break;
    }
    case 259: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {2, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 10)};
      NumAttrs = 3;
      break;
    }
    case 15: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 3)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 10)};
      NumAttrs = 2;
      break;
    }
    case 264: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 72)};
      NumAttrs = 1;
      break;
    }
    case 257: {
      AS[0] = {3, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 72)};
      NumAttrs = 2;
      break;
    }
    case 33: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 11)};
      NumAttrs = 1;
      break;
    }
    case 16: {
      AS[0] = {2, getIntrinsicArgAttributeSet(C, 3)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 11)};
      NumAttrs = 2;
      break;
    }
    case 335: {
      AS[0] = {2, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 11)};
      NumAttrs = 2;
      break;
    }
    case 256: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 11)};
      NumAttrs = 2;
      break;
    }
    case 12: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 8)};
      NumAttrs = 1;
      break;
    }
    case 287: {
      AS[0] = {5, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {7, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 8)};
      NumAttrs = 3;
      break;
    }
    case 74: {
      AS[0] = {4, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 8)};
      NumAttrs = 2;
      break;
    }
    case 238: {
      AS[0] = {3, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 8)};
      NumAttrs = 2;
      break;
    }
    case 60: {
      AS[0] = {2, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 8)};
      NumAttrs = 2;
      break;
    }
    case 61: {
      AS[0] = {2, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {4, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 8)};
      NumAttrs = 3;
      break;
    }
    case 332: {
      AS[0] = {2, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {3, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 8)};
      NumAttrs = 3;
      break;
    }
    case 18: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 8)};
      NumAttrs = 2;
      break;
    }
    case 20: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 8)};
      NumAttrs = 2;
      break;
    }
    case 284: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {4, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 8)};
      NumAttrs = 3;
      break;
    }
    case 282: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {3, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 8)};
      NumAttrs = 3;
      break;
    }
    case 285: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {2, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 8)};
      NumAttrs = 3;
      break;
    }
    case 280: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {2, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {3, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 8)};
      NumAttrs = 4;
      break;
    }
    case 63: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 27)};
      NumAttrs = 1;
      break;
    }
    case 341: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 3)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 81)};
      NumAttrs = 2;
      break;
    }
    case 340: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 80)};
      NumAttrs = 2;
      break;
    }
    case 352: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 83)};
      NumAttrs = 1;
      break;
    }
    case 255: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 54)};
      NumAttrs = 1;
      break;
    }
    case 182: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 54)};
      NumAttrs = 2;
      break;
    }
    case 279: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {4, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 54)};
      NumAttrs = 3;
      break;
    }
    case 283: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {3, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 54)};
      NumAttrs = 3;
      break;
    }
    case 276: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {2, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 54)};
      NumAttrs = 3;
      break;
    }
    case 278: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {2, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {4, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 54)};
      NumAttrs = 4;
      break;
    }
    case 281: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {2, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {3, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 54)};
      NumAttrs = 4;
      break;
    }
    case 277: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {2, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {3, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {4, getIntrinsicArgAttributeSet(C, 0)};
      AS[4] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 54)};
      NumAttrs = 5;
      break;
    }
    case 100: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 38)};
      NumAttrs = 1;
      break;
    }
    case 99: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 38)};
      NumAttrs = 2;
      break;
    }
    case 64: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 28)};
      NumAttrs = 1;
      break;
    }
    case 82: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 12)};
      AS[1] = {2, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {3, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {4, getIntrinsicArgAttributeSet(C, 0)};
      AS[4] = {5, getIntrinsicArgAttributeSet(C, 0)};
      AS[5] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 35)};
      NumAttrs = 6;
      break;
    }
    case 222: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 59)};
      NumAttrs = 1;
      break;
    }
    case 101: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 39)};
      NumAttrs = 1;
      break;
    }
    case 265: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 76)};
      NumAttrs = 1;
      break;
    }
    case 75: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 33)};
      NumAttrs = 1;
      break;
    }
    case 51: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 7)};
      AS[1] = {2, getIntrinsicArgAttributeSet(C, 3)};
      AS[2] = {4, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 23)};
      NumAttrs = 4;
      break;
    }
    case 55: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 7)};
      AS[1] = {4, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 25)};
      NumAttrs = 3;
      break;
    }
    case 70: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 30)};
      NumAttrs = 1;
      break;
    }
    case 252: {
      AS[0] = {0, getIntrinsicArgAttributeSet(C, 1)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 70)};
      NumAttrs = 2;
      break;
    }
    case 241: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 62)};
      NumAttrs = 2;
      break;
    }
    case 243: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 14)};
      AS[1] = {2, getIntrinsicArgAttributeSet(C, 15)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 62)};
      NumAttrs = 3;
      break;
    }
    case 253: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 7)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 71)};
      NumAttrs = 2;
      break;
    }
    case 246: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 65)};
      NumAttrs = 2;
      break;
    }
    case 247: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 3)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 65)};
      NumAttrs = 2;
      break;
    }
    case 244: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 64)};
      NumAttrs = 1;
      break;
    }
    case 251: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 69)};
      NumAttrs = 1;
      break;
    }
    case 242: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 63)};
      NumAttrs = 1;
      break;
    }
    case 248: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 66)};
      NumAttrs = 1;
      break;
    }
    case 249: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 7)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 67)};
      NumAttrs = 2;
      break;
    }
    case 250: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 68)};
      NumAttrs = 1;
      break;
    }
    case 161: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 50)};
      NumAttrs = 1;
      break;
    }
    case 207: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 57)};
      NumAttrs = 1;
      break;
    }
    case 203: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 56)};
      NumAttrs = 1;
      break;
    }
    case 266: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 41)};
      NumAttrs = 1;
      break;
    }
    case 187: {
      AS[0] = {7, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 41)};
      NumAttrs = 2;
      break;
    }
    case 104: {
      AS[0] = {6, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 41)};
      NumAttrs = 2;
      break;
    }
    case 103: {
      AS[0] = {5, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 41)};
      NumAttrs = 2;
      break;
    }
    case 111: {
      AS[0] = {4, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 41)};
      NumAttrs = 2;
      break;
    }
    case 189: {
      AS[0] = {2, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {3, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {7, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {8, getIntrinsicArgAttributeSet(C, 0)};
      AS[4] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 41)};
      NumAttrs = 5;
      break;
    }
    case 170: {
      AS[0] = {2, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {3, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {6, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {7, getIntrinsicArgAttributeSet(C, 0)};
      AS[4] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 41)};
      NumAttrs = 5;
      break;
    }
    case 116: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {3, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {4, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {5, getIntrinsicArgAttributeSet(C, 0)};
      AS[4] = {6, getIntrinsicArgAttributeSet(C, 0)};
      AS[5] = {7, getIntrinsicArgAttributeSet(C, 0)};
      AS[6] = {8, getIntrinsicArgAttributeSet(C, 0)};
      AS[7] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 41)};
      NumAttrs = 8;
      break;
    }
    case 126: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {2, getIntrinsicArgAttributeSet(C, 5)};
      AS[2] = {3, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {4, getIntrinsicArgAttributeSet(C, 0)};
      AS[4] = {5, getIntrinsicArgAttributeSet(C, 0)};
      AS[5] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 41)};
      NumAttrs = 6;
      break;
    }
    case 192: {
      AS[0] = {3, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {7, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 22)};
      NumAttrs = 3;
      break;
    }
    case 173: {
      AS[0] = {3, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {6, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 22)};
      NumAttrs = 3;
      break;
    }
    case 191: {
      AS[0] = {2, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {6, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 22)};
      NumAttrs = 3;
      break;
    }
    case 172: {
      AS[0] = {2, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {5, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 22)};
      NumAttrs = 3;
      break;
    }
    case 125: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 22)};
      NumAttrs = 2;
      break;
    }
    case 112: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {3, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {4, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {5, getIntrinsicArgAttributeSet(C, 0)};
      AS[4] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 22)};
      NumAttrs = 5;
      break;
    }
    case 50: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 8)};
      AS[1] = {2, getIntrinsicArgAttributeSet(C, 9)};
      AS[2] = {4, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 22)};
      NumAttrs = 4;
      break;
    }
    case 52: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 8)};
      AS[1] = {2, getIntrinsicArgAttributeSet(C, 9)};
      AS[2] = {3, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {4, getIntrinsicArgAttributeSet(C, 0)};
      AS[4] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 22)};
      NumAttrs = 5;
      break;
    }
    case 194: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 3)};
      AS[1] = {2, getIntrinsicArgAttributeSet(C, 7)};
      AS[2] = {3, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {7, getIntrinsicArgAttributeSet(C, 0)};
      AS[4] = {8, getIntrinsicArgAttributeSet(C, 0)};
      AS[5] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 22)};
      NumAttrs = 6;
      break;
    }
    case 175: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 3)};
      AS[1] = {2, getIntrinsicArgAttributeSet(C, 7)};
      AS[2] = {3, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {6, getIntrinsicArgAttributeSet(C, 0)};
      AS[4] = {7, getIntrinsicArgAttributeSet(C, 0)};
      AS[5] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 22)};
      NumAttrs = 6;
      break;
    }
    case 53: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 7)};
      AS[1] = {2, getIntrinsicArgAttributeSet(C, 3)};
      AS[2] = {4, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 22)};
      NumAttrs = 4;
      break;
    }
    case 54: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 7)};
      AS[1] = {4, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 24)};
      NumAttrs = 3;
      break;
    }
    case 56: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 7)};
      AS[1] = {3, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {4, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 24)};
      NumAttrs = 4;
      break;
    }
    case 109: {
      AS[0] = {2, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 43)};
      NumAttrs = 2;
      break;
    }
    case 180: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 53)};
      NumAttrs = 1;
      break;
    }
    case 183: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 53)};
      NumAttrs = 2;
      break;
    }
    case 167: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 52)};
      NumAttrs = 1;
      break;
    }
    case 118: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 47)};
      NumAttrs = 1;
      break;
    }
    case 114: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 45)};
      NumAttrs = 1;
      break;
    }
    case 115: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 46)};
      NumAttrs = 1;
      break;
    }
    case 110: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {2, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 44)};
      NumAttrs = 3;
      break;
    }
    case 102: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 40)};
      NumAttrs = 1;
      break;
    }
    case 168: {
      AS[0] = {5, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {6, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 40)};
      NumAttrs = 3;
      break;
    }
    case 205: {
      AS[0] = {4, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 40)};
      NumAttrs = 2;
      break;
    }
    case 123: {
      AS[0] = {3, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 40)};
      NumAttrs = 2;
      break;
    }
    case 204: {
      AS[0] = {3, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {4, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {5, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {6, getIntrinsicArgAttributeSet(C, 0)};
      AS[4] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 40)};
      NumAttrs = 5;
      break;
    }
    case 117: {
      AS[0] = {2, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 40)};
      NumAttrs = 2;
      break;
    }
    case 166: {
      AS[0] = {2, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {3, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {4, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {5, getIntrinsicArgAttributeSet(C, 0)};
      AS[4] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 40)};
      NumAttrs = 5;
      break;
    }
    case 206: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {3, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {6, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 40)};
      NumAttrs = 4;
      break;
    }
    case 179: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 49)};
      NumAttrs = 1;
      break;
    }
    case 157: {
      AS[0] = {2, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 49)};
      NumAttrs = 2;
      break;
    }
    case 127: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 49)};
      NumAttrs = 2;
      break;
    }
    case 184: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {2, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {3, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 49)};
      NumAttrs = 4;
      break;
    }
    case 186: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 55)};
      NumAttrs = 1;
      break;
    }
    case 8: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 5)};
      NumAttrs = 1;
      break;
    }
    case 327: {
      AS[0] = {9, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 5)};
      NumAttrs = 2;
      break;
    }
    case 328: {
      AS[0] = {9, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {12, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 5)};
      NumAttrs = 3;
      break;
    }
    case 324: {
      AS[0] = {8, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 5)};
      NumAttrs = 2;
      break;
    }
    case 325: {
      AS[0] = {8, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {11, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 5)};
      NumAttrs = 3;
      break;
    }
    case 132: {
      AS[0] = {8, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {9, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 5)};
      NumAttrs = 3;
      break;
    }
    case 321: {
      AS[0] = {7, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 5)};
      NumAttrs = 2;
      break;
    }
    case 322: {
      AS[0] = {7, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {10, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 5)};
      NumAttrs = 3;
      break;
    }
    case 85: {
      AS[0] = {7, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 5)};
      NumAttrs = 2;
      break;
    }
    case 131: {
      AS[0] = {7, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {8, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 5)};
      NumAttrs = 3;
      break;
    }
    case 318: {
      AS[0] = {6, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 5)};
      NumAttrs = 2;
      break;
    }
    case 319: {
      AS[0] = {6, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {9, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 5)};
      NumAttrs = 3;
      break;
    }
    case 130: {
      AS[0] = {6, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {7, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 5)};
      NumAttrs = 3;
      break;
    }
    case 315: {
      AS[0] = {5, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 5)};
      NumAttrs = 2;
      break;
    }
    case 316: {
      AS[0] = {5, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {8, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 5)};
      NumAttrs = 3;
      break;
    }
    case 84: {
      AS[0] = {5, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 5)};
      NumAttrs = 2;
      break;
    }
    case 129: {
      AS[0] = {5, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {6, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 5)};
      NumAttrs = 3;
      break;
    }
    case 312: {
      AS[0] = {4, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 5)};
      NumAttrs = 2;
      break;
    }
    case 313: {
      AS[0] = {4, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {7, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 5)};
      NumAttrs = 3;
      break;
    }
    case 86: {
      AS[0] = {4, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 5)};
      NumAttrs = 2;
      break;
    }
    case 128: {
      AS[0] = {4, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {5, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 5)};
      NumAttrs = 3;
      break;
    }
    case 309: {
      AS[0] = {3, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 5)};
      NumAttrs = 2;
      break;
    }
    case 310: {
      AS[0] = {3, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {6, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 5)};
      NumAttrs = 3;
      break;
    }
    case 87: {
      AS[0] = {3, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 5)};
      NumAttrs = 2;
      break;
    }
    case 291: {
      AS[0] = {2, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 5)};
      NumAttrs = 2;
      break;
    }
    case 292: {
      AS[0] = {2, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {5, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 5)};
      NumAttrs = 3;
      break;
    }
    case 83: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 5)};
      NumAttrs = 2;
      break;
    }
    case 88: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 21)};
      NumAttrs = 1;
      break;
    }
    case 275: {
      AS[0] = {9, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 21)};
      NumAttrs = 2;
      break;
    }
    case 274: {
      AS[0] = {8, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 21)};
      NumAttrs = 2;
      break;
    }
    case 273: {
      AS[0] = {7, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 21)};
      NumAttrs = 2;
      break;
    }
    case 272: {
      AS[0] = {6, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 21)};
      NumAttrs = 2;
      break;
    }
    case 190: {
      AS[0] = {6, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 21)};
      NumAttrs = 2;
      break;
    }
    case 199: {
      AS[0] = {6, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {7, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 21)};
      NumAttrs = 3;
      break;
    }
    case 202: {
      AS[0] = {6, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {7, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {8, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {9, getIntrinsicArgAttributeSet(C, 0)};
      AS[4] = {10, getIntrinsicArgAttributeSet(C, 0)};
      AS[5] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 21)};
      NumAttrs = 6;
      break;
    }
    case 271: {
      AS[0] = {5, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 21)};
      NumAttrs = 2;
      break;
    }
    case 171: {
      AS[0] = {5, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 21)};
      NumAttrs = 2;
      break;
    }
    case 106: {
      AS[0] = {5, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {6, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 21)};
      NumAttrs = 3;
      break;
    }
    case 270: {
      AS[0] = {4, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 21)};
      NumAttrs = 2;
      break;
    }
    case 232: {
      AS[0] = {4, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 21)};
      NumAttrs = 2;
      break;
    }
    case 269: {
      AS[0] = {3, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 21)};
      NumAttrs = 2;
      break;
    }
    case 44: {
      AS[0] = {3, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 21)};
      NumAttrs = 2;
      break;
    }
    case 268: {
      AS[0] = {2, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 21)};
      NumAttrs = 2;
      break;
    }
    case 156: {
      AS[0] = {2, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {8, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {9, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 21)};
      NumAttrs = 4;
      break;
    }
    case 155: {
      AS[0] = {2, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {7, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {8, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 21)};
      NumAttrs = 4;
      break;
    }
    case 154: {
      AS[0] = {2, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {6, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {7, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 21)};
      NumAttrs = 4;
      break;
    }
    case 153: {
      AS[0] = {2, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {5, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {6, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 21)};
      NumAttrs = 4;
      break;
    }
    case 62: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 20)};
      NumAttrs = 1;
      break;
    }
    case 306: {
      AS[0] = {9, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 20)};
      NumAttrs = 2;
      break;
    }
    case 307: {
      AS[0] = {9, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {13, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 20)};
      NumAttrs = 3;
      break;
    }
    case 326: {
      AS[0] = {9, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {12, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 20)};
      NumAttrs = 3;
      break;
    }
    case 304: {
      AS[0] = {8, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 20)};
      NumAttrs = 2;
      break;
    }
    case 305: {
      AS[0] = {8, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {12, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 20)};
      NumAttrs = 3;
      break;
    }
    case 323: {
      AS[0] = {8, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {11, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 20)};
      NumAttrs = 3;
      break;
    }
    case 302: {
      AS[0] = {7, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 20)};
      NumAttrs = 2;
      break;
    }
    case 303: {
      AS[0] = {7, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {11, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 20)};
      NumAttrs = 3;
      break;
    }
    case 320: {
      AS[0] = {7, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {10, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 20)};
      NumAttrs = 3;
      break;
    }
    case 300: {
      AS[0] = {6, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 20)};
      NumAttrs = 2;
      break;
    }
    case 301: {
      AS[0] = {6, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {10, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 20)};
      NumAttrs = 3;
      break;
    }
    case 317: {
      AS[0] = {6, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {9, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 20)};
      NumAttrs = 3;
      break;
    }
    case 298: {
      AS[0] = {5, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 20)};
      NumAttrs = 2;
      break;
    }
    case 299: {
      AS[0] = {5, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {9, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 20)};
      NumAttrs = 3;
      break;
    }
    case 314: {
      AS[0] = {5, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {8, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 20)};
      NumAttrs = 3;
      break;
    }
    case 188: {
      AS[0] = {5, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 20)};
      NumAttrs = 2;
      break;
    }
    case 198: {
      AS[0] = {5, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {6, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 20)};
      NumAttrs = 3;
      break;
    }
    case 201: {
      AS[0] = {5, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {6, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {7, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {8, getIntrinsicArgAttributeSet(C, 0)};
      AS[4] = {9, getIntrinsicArgAttributeSet(C, 0)};
      AS[5] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 20)};
      NumAttrs = 6;
      break;
    }
    case 296: {
      AS[0] = {4, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 20)};
      NumAttrs = 2;
      break;
    }
    case 297: {
      AS[0] = {4, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {8, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 20)};
      NumAttrs = 3;
      break;
    }
    case 311: {
      AS[0] = {4, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {7, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 20)};
      NumAttrs = 3;
      break;
    }
    case 169: {
      AS[0] = {4, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 20)};
      NumAttrs = 2;
      break;
    }
    case 105: {
      AS[0] = {4, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {5, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 20)};
      NumAttrs = 3;
      break;
    }
    case 294: {
      AS[0] = {3, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 20)};
      NumAttrs = 2;
      break;
    }
    case 295: {
      AS[0] = {3, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {7, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 20)};
      NumAttrs = 3;
      break;
    }
    case 308: {
      AS[0] = {3, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {6, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 20)};
      NumAttrs = 3;
      break;
    }
    case 267: {
      AS[0] = {2, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 20)};
      NumAttrs = 2;
      break;
    }
    case 293: {
      AS[0] = {2, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {6, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 20)};
      NumAttrs = 3;
      break;
    }
    case 290: {
      AS[0] = {2, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {5, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 20)};
      NumAttrs = 3;
      break;
    }
    case 42: {
      AS[0] = {2, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 20)};
      NumAttrs = 2;
      break;
    }
    case 41: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 20)};
      NumAttrs = 2;
      break;
    }
    case 152: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {16, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {17, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {18, getIntrinsicArgAttributeSet(C, 0)};
      AS[4] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 20)};
      NumAttrs = 5;
      break;
    }
    case 151: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {15, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {16, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {17, getIntrinsicArgAttributeSet(C, 0)};
      AS[4] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 20)};
      NumAttrs = 5;
      break;
    }
    case 149: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {14, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {15, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {16, getIntrinsicArgAttributeSet(C, 0)};
      AS[4] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 20)};
      NumAttrs = 5;
      break;
    }
    case 150: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {13, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {14, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {15, getIntrinsicArgAttributeSet(C, 0)};
      AS[4] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 20)};
      NumAttrs = 5;
      break;
    }
    case 148: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {12, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {13, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {14, getIntrinsicArgAttributeSet(C, 0)};
      AS[4] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 20)};
      NumAttrs = 5;
      break;
    }
    case 138: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {11, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {12, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {13, getIntrinsicArgAttributeSet(C, 0)};
      AS[4] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 20)};
      NumAttrs = 5;
      break;
    }
    case 137: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {10, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {11, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {12, getIntrinsicArgAttributeSet(C, 0)};
      AS[4] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 20)};
      NumAttrs = 5;
      break;
    }
    case 136: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {9, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {10, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {11, getIntrinsicArgAttributeSet(C, 0)};
      AS[4] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 20)};
      NumAttrs = 5;
      break;
    }
    case 135: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {8, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {9, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {10, getIntrinsicArgAttributeSet(C, 0)};
      AS[4] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 20)};
      NumAttrs = 5;
      break;
    }
    case 146: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {7, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {8, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 20)};
      NumAttrs = 4;
      break;
    }
    case 134: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {7, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {8, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {9, getIntrinsicArgAttributeSet(C, 0)};
      AS[4] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 20)};
      NumAttrs = 5;
      break;
    }
    case 145: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {6, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {7, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 20)};
      NumAttrs = 4;
      break;
    }
    case 133: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {6, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {7, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {8, getIntrinsicArgAttributeSet(C, 0)};
      AS[4] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 20)};
      NumAttrs = 5;
      break;
    }
    case 144: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {5, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {6, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 20)};
      NumAttrs = 4;
      break;
    }
    case 147: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {5, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {6, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {7, getIntrinsicArgAttributeSet(C, 0)};
      AS[4] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 20)};
      NumAttrs = 5;
      break;
    }
    case 143: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {4, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {5, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 20)};
      NumAttrs = 4;
      break;
    }
    case 92: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 26)};
      NumAttrs = 1;
      break;
    }
    case 95: {
      AS[0] = {4, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 26)};
      NumAttrs = 2;
      break;
    }
    case 94: {
      AS[0] = {2, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {4, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 26)};
      NumAttrs = 3;
      break;
    }
    case 58: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 3)};
      AS[1] = {2, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {3, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {4, getIntrinsicArgAttributeSet(C, 0)};
      AS[4] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 26)};
      NumAttrs = 5;
      break;
    }
    case 4: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 3)};
      NumAttrs = 1;
      break;
    }
    case 7: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 1)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 3)};
      NumAttrs = 2;
      break;
    }
    case 97: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 37)};
      NumAttrs = 1;
      break;
    }
    case 122: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {2, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {7, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 37)};
      NumAttrs = 4;
      break;
    }
    case 120: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {2, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {7, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {8, getIntrinsicArgAttributeSet(C, 0)};
      AS[4] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 37)};
      NumAttrs = 5;
      break;
    }
    case 121: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {2, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {5, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {6, getIntrinsicArgAttributeSet(C, 0)};
      AS[4] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 37)};
      NumAttrs = 5;
      break;
    }
    case 96: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 36)};
      NumAttrs = 1;
      break;
    }
    case 219: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 18)};
      NumAttrs = 1;
      break;
    }
    case 81: {
      AS[0] = {6, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 18)};
      NumAttrs = 2;
      break;
    }
    case 80: {
      AS[0] = {5, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 18)};
      NumAttrs = 2;
      break;
    }
    case 79: {
      AS[0] = {4, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 18)};
      NumAttrs = 2;
      break;
    }
    case 231: {
      AS[0] = {4, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 18)};
      NumAttrs = 2;
      break;
    }
    case 78: {
      AS[0] = {3, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 18)};
      NumAttrs = 2;
      break;
    }
    case 93: {
      AS[0] = {3, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 18)};
      NumAttrs = 2;
      break;
    }
    case 37: {
      AS[0] = {2, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {3, getIntrinsicArgAttributeSet(C, 5)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 18)};
      NumAttrs = 3;
      break;
    }
    case 113: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 18)};
      NumAttrs = 2;
      break;
    }
    case 234: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 7)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 18)};
      NumAttrs = 2;
      break;
    }
    case 233: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 7)};
      AS[1] = {2, getIntrinsicArgAttributeSet(C, 3)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 18)};
      NumAttrs = 3;
      break;
    }
    case 36: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 7)};
      AS[1] = {2, getIntrinsicArgAttributeSet(C, 4)};
      AS[2] = {3, getIntrinsicArgAttributeSet(C, 4)};
      AS[3] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 18)};
      NumAttrs = 4;
      break;
    }
    case 38: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {2, getIntrinsicArgAttributeSet(C, 5)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 18)};
      NumAttrs = 3;
      break;
    }
    case 98: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 16)};
      NumAttrs = 1;
      break;
    }
    case 30: {
      AS[0] = {2, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 16)};
      NumAttrs = 2;
      break;
    }
    case 45: {
      AS[0] = {2, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {3, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 16)};
      NumAttrs = 3;
      break;
    }
    case 195: {
      AS[0] = {2, getIntrinsicArgAttributeSet(C, 7)};
      AS[1] = {6, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 16)};
      NumAttrs = 3;
      break;
    }
    case 197: {
      AS[0] = {2, getIntrinsicArgAttributeSet(C, 7)};
      AS[1] = {6, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {7, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 16)};
      NumAttrs = 4;
      break;
    }
    case 176: {
      AS[0] = {2, getIntrinsicArgAttributeSet(C, 7)};
      AS[1] = {5, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 16)};
      NumAttrs = 3;
      break;
    }
    case 178: {
      AS[0] = {2, getIntrinsicArgAttributeSet(C, 7)};
      AS[1] = {5, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {6, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 16)};
      NumAttrs = 4;
      break;
    }
    case 47: {
      AS[0] = {2, getIntrinsicArgAttributeSet(C, 7)};
      AS[1] = {4, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {5, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {6, getIntrinsicArgAttributeSet(C, 0)};
      AS[4] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 16)};
      NumAttrs = 5;
      break;
    }
    case 77: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 7)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 16)};
      NumAttrs = 2;
      break;
    }
    case 3: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 2)};
      NumAttrs = 1;
      break;
    }
    case 229: {
      AS[0] = {2, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 2)};
      NumAttrs = 2;
      break;
    }
    case 228: {
      AS[0] = {2, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {3, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 2)};
      NumAttrs = 3;
      break;
    }
    case 17: {
      AS[0] = {2, getIntrinsicArgAttributeSet(C, 4)};
      AS[1] = {3, getIntrinsicArgAttributeSet(C, 3)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 2)};
      NumAttrs = 3;
      break;
    }
    case 29: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 2)};
      NumAttrs = 2;
      break;
    }
    case 46: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {3, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {4, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {5, getIntrinsicArgAttributeSet(C, 0)};
      AS[4] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 2)};
      NumAttrs = 5;
      break;
    }
    case 43: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {2, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 2)};
      NumAttrs = 3;
      break;
    }
    case 19: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 3)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 2)};
      NumAttrs = 2;
      break;
    }
    case 193: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 3)};
      AS[1] = {5, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 2)};
      NumAttrs = 3;
      break;
    }
    case 196: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 3)};
      AS[1] = {5, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {6, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 2)};
      NumAttrs = 4;
      break;
    }
    case 174: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 3)};
      AS[1] = {4, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 2)};
      NumAttrs = 3;
      break;
    }
    case 177: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 3)};
      AS[1] = {4, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {5, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 2)};
      NumAttrs = 4;
      break;
    }
    case 2: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 1)};
      NumAttrs = 1;
      break;
    }
    case 286: {
      AS[0] = {6, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 1)};
      NumAttrs = 2;
      break;
    }
    case 91: {
      AS[0] = {5, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 1)};
      NumAttrs = 2;
      break;
    }
    case 288: {
      AS[0] = {5, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {7, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 1)};
      NumAttrs = 3;
      break;
    }
    case 349: {
      AS[0] = {5, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {6, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 1)};
      NumAttrs = 3;
      break;
    }
    case 89: {
      AS[0] = {4, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 1)};
      NumAttrs = 2;
      break;
    }
    case 289: {
      AS[0] = {4, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {6, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 1)};
      NumAttrs = 3;
      break;
    }
    case 90: {
      AS[0] = {4, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {5, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 1)};
      NumAttrs = 3;
      break;
    }
    case 22: {
      AS[0] = {3, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 1)};
      NumAttrs = 2;
      break;
    }
    case 348: {
      AS[0] = {3, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {6, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 1)};
      NumAttrs = 3;
      break;
    }
    case 346: {
      AS[0] = {3, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {5, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 1)};
      NumAttrs = 3;
      break;
    }
    case 227: {
      AS[0] = {3, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {4, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 1)};
      NumAttrs = 3;
      break;
    }
    case 59: {
      AS[0] = {2, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 1)};
      NumAttrs = 2;
      break;
    }
    case 347: {
      AS[0] = {2, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {5, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 1)};
      NumAttrs = 3;
      break;
    }
    case 26: {
      AS[0] = {2, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {3, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 1)};
      NumAttrs = 3;
      break;
    }
    case 25: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 6)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 1)};
      NumAttrs = 2;
      break;
    }
    case 66: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 10)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 1)};
      NumAttrs = 2;
      break;
    }
    case 32: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 1)};
      NumAttrs = 2;
      break;
    }
    case 141: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {7, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {8, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {9, getIntrinsicArgAttributeSet(C, 0)};
      AS[4] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 1)};
      NumAttrs = 5;
      break;
    }
    case 212: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {6, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 1)};
      NumAttrs = 3;
      break;
    }
    case 140: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {6, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {7, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {8, getIntrinsicArgAttributeSet(C, 0)};
      AS[4] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 1)};
      NumAttrs = 5;
      break;
    }
    case 211: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {5, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 1)};
      NumAttrs = 3;
      break;
    }
    case 139: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {5, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {6, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {7, getIntrinsicArgAttributeSet(C, 0)};
      AS[4] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 1)};
      NumAttrs = 5;
      break;
    }
    case 210: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {4, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 1)};
      NumAttrs = 3;
      break;
    }
    case 142: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {4, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {5, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 1)};
      NumAttrs = 4;
      break;
    }
    case 209: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {3, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 1)};
      NumAttrs = 3;
      break;
    }
    case 208: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {2, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 1)};
      NumAttrs = 3;
      break;
    }
    case 23: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 13)};
      NumAttrs = 1;
      break;
    }
    case 261: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 74)};
      NumAttrs = 1;
      break;
    }
    case 339: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 74)};
      NumAttrs = 2;
      break;
    }
    case 230: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 61)};
      NumAttrs = 1;
      break;
    }
    case 76: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 34)};
      NumAttrs = 1;
      break;
    }
    case 331: {
      AS[0] = {6, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 34)};
      NumAttrs = 2;
      break;
    }
    case 330: {
      AS[0] = {5, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {7, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 34)};
      NumAttrs = 3;
      break;
    }
    case 329: {
      AS[0] = {4, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 34)};
      NumAttrs = 2;
      break;
    }
    case 254: {
      AS[0] = {3, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 34)};
      NumAttrs = 2;
      break;
    }
    case 181: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 34)};
      NumAttrs = 2;
      break;
    }
    case 260: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {2, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 34)};
      NumAttrs = 3;
      break;
    }
    case 162: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 51)};
      NumAttrs = 1;
      break;
    }
    case 39: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 17)};
      NumAttrs = 1;
      break;
    }
    case 31: {
      AS[0] = {0, getIntrinsicArgAttributeSet(C, 1)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 17)};
      NumAttrs = 2;
      break;
    }
    case 5: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 0)};
      NumAttrs = 1;
      break;
    }
    case 124: {
      AS[0] = {4, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 0)};
      NumAttrs = 2;
      break;
    }
    case 65: {
      AS[0] = {3, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 0)};
      NumAttrs = 2;
      break;
    }
    case 159: {
      AS[0] = {3, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {4, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 0)};
      NumAttrs = 3;
      break;
    }
    case 48: {
      AS[0] = {3, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {4, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {5, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 0)};
      NumAttrs = 4;
      break;
    }
    case 343: {
      AS[0] = {3, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {4, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {5, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {6, getIntrinsicArgAttributeSet(C, 0)};
      AS[4] = {7, getIntrinsicArgAttributeSet(C, 0)};
      AS[5] = {8, getIntrinsicArgAttributeSet(C, 0)};
      AS[6] = {9, getIntrinsicArgAttributeSet(C, 0)};
      AS[7] = {10, getIntrinsicArgAttributeSet(C, 0)};
      AS[8] = {11, getIntrinsicArgAttributeSet(C, 0)};
      AS[9] = {12, getIntrinsicArgAttributeSet(C, 0)};
      AS[10] = {13, getIntrinsicArgAttributeSet(C, 0)};
      AS[11] = {14, getIntrinsicArgAttributeSet(C, 0)};
      AS[12] = {15, getIntrinsicArgAttributeSet(C, 0)};
      AS[13] = {16, getIntrinsicArgAttributeSet(C, 0)};
      AS[14] = {17, getIntrinsicArgAttributeSet(C, 0)};
      AS[15] = {18, getIntrinsicArgAttributeSet(C, 0)};
      AS[16] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 0)};
      NumAttrs = 17;
      break;
    }
    case 1: {
      AS[0] = {2, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 0)};
      NumAttrs = 2;
      break;
    }
    case 49: {
      AS[0] = {2, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {3, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 0)};
      NumAttrs = 3;
      break;
    }
    case 57: {
      AS[0] = {2, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {3, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {4, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 0)};
      NumAttrs = 4;
      break;
    }
    case 160: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 5)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 0)};
      NumAttrs = 2;
      break;
    }
    case 164: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 6)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 0)};
      NumAttrs = 2;
      break;
    }
    case 200: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {3, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {6, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 0)};
      NumAttrs = 4;
      break;
    }
    case 163: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {2, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 0)};
      NumAttrs = 3;
      break;
    }
    case 158: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {2, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {3, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 0)};
      NumAttrs = 4;
      break;
    }
    case 245: {
      AS[0] = {0, getIntrinsicArgAttributeSet(C, 1)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 0)};
      NumAttrs = 2;
      break;
    }
    case 67: {
      AS[0] = {0, getIntrinsicArgAttributeSet(C, 11)};
      AS[1] = {1, getIntrinsicArgAttributeSet(C, 11)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 0)};
      NumAttrs = 3;
      break;
    }
    case 108: {
      AS[0] = {0, getIntrinsicArgAttributeSet(C, 13)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 0)};
      NumAttrs = 2;
      break;
    }
    case 24: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 14)};
      NumAttrs = 1;
      break;
    }
    case 185: {
      AS[0] = {5, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {6, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 14)};
      NumAttrs = 3;
      break;
    }
    case 165: {
      AS[0] = {4, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {5, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {6, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 14)};
      NumAttrs = 4;
      break;
    }
    case 21: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 12)};
      NumAttrs = 1;
      break;
    }
    case 107: {
      AS[0] = {5, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 42)};
      NumAttrs = 2;
      break;
    }
    case 68: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 29)};
      NumAttrs = 1;
      break;
    }
    case 69: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 29)};
      NumAttrs = 2;
      break;
    }
    case 73: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 32)};
      NumAttrs = 2;
      break;
    }
    case 119: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 48)};
      NumAttrs = 1;
      break;
    }
    case 9: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 6)};
      NumAttrs = 1;
      break;
    }
    case 258: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 73)};
      NumAttrs = 1;
      break;
    }
    case 334: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 78)};
      NumAttrs = 1;
      break;
    }
    case 262: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 75)};
      NumAttrs = 1;
      break;
    }
    case 40: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 19)};
      NumAttrs = 1;
      break;
    }
    case 11: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 7)};
      NumAttrs = 1;
      break;
    }
    case 14: {
      return AttributeList();
    }
    case 27: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {2, getIntrinsicArgAttributeSet(C, 0)};
      AS[2] = {4, getIntrinsicArgAttributeSet(C, 0)};
      AS[3] = {5, getIntrinsicArgAttributeSet(C, 0)};
      NumAttrs = 4;
      break;
    }
    case 28: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 15)};
      NumAttrs = 1;
      break;
    }
    case 223: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 60)};
      NumAttrs = 1;
      break;
    }
    case 224: {
      AS[0] = {2, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 60)};
      NumAttrs = 2;
      break;
    }
    case 225: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 60)};
      NumAttrs = 2;
      break;
    }
    case 342: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 82)};
      NumAttrs = 1;
      break;
    }
    case 344: {
      AS[0] = {1, getIntrinsicArgAttributeSet(C, 0)};
      AS[1] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 82)};
      NumAttrs = 2;
      break;
    }
    case 333: {
      AS[0] = {AttributeList::FunctionIndex, getIntrinsicFnAttributeSet(C, 77)};
      NumAttrs = 1;
      break;
    }
    }
  }
  return AttributeList::get(C, ArrayRef(AS, NumAttrs));
}
#endif // GET_INTRINSIC_ATTRIBUTES

// Get the LLVM intrinsic that corresponds to a builtin.
// This is used by the C front-end.  The builtin name is passed
// in as BuiltinName, and a target prefix (e.g. 'ppc') is passed
// in as TargetPrefix.  The result is assigned to 'IntrinsicID'.
#ifdef GET_LLVM_INTRINSIC_FOR_CLANG_BUILTIN
Intrinsic::ID Intrinsic::getIntrinsicForClangBuiltin(const char *TargetPrefixStr, StringRef BuiltinNameStr) {
  static const char BuiltinNames[] = {
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'd', 'j', 'u', 's',
  't', '_', 't', 'r', 'a', 'm', 'p', 'o', 'l', 'i', 'n', 'e', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'd', 'e', 'b', 'u', 'g', 't', 'r',
  'a', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'u', 'n',
  'w', 'i', 'n', 'd', '_', 'i', 'n', 'i', 't', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'n', 'i', 't', '_', 't', 'r', 'a', 'm', 'p',
  'o', 'l', 'i', 'n', 'e', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'o', 'b', 'j', 'e', 'c', 't', '_', 's', 'i', 'z', 'e', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', 't', 'a', 'c', 'k', '_', 'r',
  'e', 's', 't', 'o', 'r', 'e', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 's', 't', 'a', 'c', 'k', '_', 's', 'a', 'v', 'e', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 't', 'h', 'r', 'e', 'a', 'd', '_',
  'p', 'o', 'i', 'n', 't', 'e', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'a', 'r', 'm', '_', 'd', 'm', 'b', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 'd', 's', 'b', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 'i', 's',
  'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm',
  '_', 'p', 'r', 'e', 'f', 'e', 't', 'c', 'h', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 's', 'v', 'e', '_', 's', 'v', 'a', 'e', 's', 'd',
  '_', 'u', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's',
  'v', 'e', '_', 's', 'v', 'a', 'e', 's', 'e', '_', 'u', '8', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', 'v', 'e', '_', 's', 'v', 'a',
  'e', 's', 'i', 'm', 'c', '_', 'u', '8', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 's', 'v', 'e', '_', 's', 'v', 'a', 'e', 's', 'm', 'c',
  '_', 'u', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's',
  'v', 'e', '_', 's', 'v', 'r', 'a', 'x', '1', '_', 'u', '6', '4', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', 'v', 'e', '_', 's', 'v',
  'r', 'd', 'f', 'f', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 's', 'v', 'e', '_', 's', 'v', 'r', 'd', 'f', 'f', 'r', '_', 'z', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', 'v', 'e', '_', 's',
  'v', 's', 'e', 't', 'f', 'f', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 's', 'v', 'e', '_', 's', 'v', 's', 'm', '4', 'e', '_', 'u',
  '3', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', 'v',
  'e', '_', 's', 'v', 's', 'm', '4', 'e', 'k', 'e', 'y', '_', 'u', '3', '2',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', 'v', 'e', '_',
  's', 'v', 'w', 'r', 'f', 'f', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'a', 'r', 'm', '_', 't', 'c', 'a', 'n', 'c', 'e', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 't',
  'c', 'o', 'm', 'm', 'i', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'a', 'r', 'm', '_', 't', 's', 't', 'a', 'r', 't', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 't', 't', 'e',
  's', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm',
  'd', 'g', 'c', 'n', '_', 'a', 'l', 'i', 'g', 'n', 'b', 'y', 't', 'e', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c',
  'n', '_', 'b', 'u', 'f', 'f', 'e', 'r', '_', 'w', 'b', 'i', 'n', 'v', 'l',
  '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd',
  'g', 'c', 'n', '_', 'b', 'u', 'f', 'f', 'e', 'r', '_', 'w', 'b', 'i', 'n',
  'v', 'l', '1', '_', 's', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'b', 'u', 'f', 'f', 'e', 'r',
  '_', 'w', 'b', 'i', 'n', 'v', 'l', '1', '_', 'v', 'o', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_',
  'c', 'u', 'b', 'e', 'i', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'c', 'u', 'b', 'e', 'm', 'a',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g',
  'c', 'n', '_', 'c', 'u', 'b', 'e', 's', 'c', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'c', 'u', 'b',
  'e', 't', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a',
  'm', 'd', 'g', 'c', 'n', '_', 'c', 'v', 't', '_', 'f', '3', '2', '_', 'b',
  'f', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm',
  'd', 'g', 'c', 'n', '_', 'c', 'v', 't', '_', 'f', '3', '2', '_', 'f', 'p',
  '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd',
  'g', 'c', 'n', '_', 'c', 'v', 't', '_', 'p', 'k', '_', 'b', 'f', '8', '_',
  'f', '3', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a',
  'm', 'd', 'g', 'c', 'n', '_', 'c', 'v', 't', '_', 'p', 'k', '_', 'f', '3',
  '2', '_', 'b', 'f', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'c', 'v', 't', '_', 'p', 'k', '_',
  'f', '3', '2', '_', 'f', 'p', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'c', 'v', 't', '_', 'p',
  'k', '_', 'f', 'p', '8', '_', 'f', '3', '2', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'c', 'v', 't',
  '_', 'p', 'k', '_', 'i', '1', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'c', 'v', 't', '_', 'p',
  'k', '_', 'u', '1', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'c', 'v', 't', '_', 'p', 'k', '_',
  'u', '8', '_', 'f', '3', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'c', 'v', 't', '_', 'p', 'k',
  'n', 'o', 'r', 'm', '_', 'i', '1', '6', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'c', 'v', 't', '_',
  'p', 'k', 'n', 'o', 'r', 'm', '_', 'u', '1', '6', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'c', 'v',
  't', '_', 'p', 'k', 'r', 't', 'z', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'c', 'v', 't', '_', 's',
  'r', '_', 'b', 'f', '8', '_', 'f', '3', '2', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'c', 'v', 't',
  '_', 's', 'r', '_', 'f', 'p', '8', '_', 'f', '3', '2', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'd',
  'i', 's', 'p', 'a', 't', 'c', 'h', '_', 'i', 'd', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'd', 's',
  '_', 'a', 'd', 'd', '_', 'g', 's', '_', 'r', 'e', 'g', '_', 'r', 't', 'n',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g',
  'c', 'n', '_', 'd', 's', '_', 'b', 'p', 'e', 'r', 'm', 'u', 't', 'e', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c',
  'n', '_', 'd', 's', '_', 'a', 't', 'o', 'm', 'i', 'c', '_', 'f', 'a', 'd',
  'd', '_', 'v', '2', 'b', 'f', '1', '6', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'd', 's', '_', 'g',
  'w', 's', '_', 'b', 'a', 'r', 'r', 'i', 'e', 'r', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'd', 's',
  '_', 'g', 'w', 's', '_', 'i', 'n', 'i', 't', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'd', 's', '_',
  'g', 'w', 's', '_', 's', 'e', 'm', 'a', '_', 'b', 'r', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'd',
  's', '_', 'g', 'w', 's', '_', 's', 'e', 'm', 'a', '_', 'p', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_',
  'd', 's', '_', 'g', 'w', 's', '_', 's', 'e', 'm', 'a', '_', 'r', 'e', 'l',
  'e', 'a', 's', 'e', '_', 'a', 'l', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'd', 's', '_', 'g',
  'w', 's', '_', 's', 'e', 'm', 'a', '_', 'v', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'd', 's', '_',
  'p', 'e', 'r', 'm', 'u', 't', 'e', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'd', 's', '_', 's', 'u',
  'b', '_', 'g', 's', '_', 'r', 'e', 'g', '_', 'r', 't', 'n', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_',
  'd', 's', '_', 's', 'w', 'i', 'z', 'z', 'l', 'e', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'e', 'n',
  'd', 'p', 'g', 'm', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'a', 'm', 'd', 'g', 'c', 'n', '_', 'f', 'd', 'o', 't', '2', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_',
  'f', 'd', 'o', 't', '2', '_', 'b', 'f', '1', '6', '_', 'b', 'f', '1', '6',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g',
  'c', 'n', '_', 'f', 'd', 'o', 't', '2', '_', 'f', '1', '6', '_', 'f', '1',
  '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd',
  'g', 'c', 'n', '_', 'f', 'd', 'o', 't', '2', '_', 'f', '3', '2', '_', 'b',
  'f', '1', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a',
  'm', 'd', 'g', 'c', 'n', '_', 'f', 'm', 'u', 'l', '_', 'l', 'e', 'g', 'a',
  'c', 'y', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm',
  'd', 'g', 'c', 'n', '_', 'g', 'r', 'o', 'u', 'p', 's', 't', 'a', 't', 'i',
  'c', 's', 'i', 'z', 'e', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'i', 'g', 'l', 'p', '_', 'o', 'p',
  't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd',
  'g', 'c', 'n', '_', 'i', 'm', 'p', 'l', 'i', 'c', 'i', 't', '_', 'b', 'u',
  'f', 'f', 'e', 'r', '_', 'p', 't', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'i', 'm', 'p', 'l',
  'i', 'c', 'i', 't', 'a', 'r', 'g', '_', 'p', 't', 'r', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'i',
  'n', 't', 'e', 'r', 'p', '_', 'm', 'o', 'v', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'i', 'n', 't',
  'e', 'r', 'p', '_', 'p', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'i', 'n', 't', 'e', 'r', 'p',
  '_', 'p', '1', '_', 'f', '1', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'i', 'n', 't', 'e', 'r',
  'p', '_', 'p', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'a', 'm', 'd', 'g', 'c', 'n', '_', 'i', 'n', 't', 'e', 'r', 'p', '_', 'p',
  '2', '_', 'f', '1', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'i', 's', '_', 'p', 'r', 'i', 'v',
  'a', 't', 'e', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a',
  'm', 'd', 'g', 'c', 'n', '_', 'i', 's', '_', 's', 'h', 'a', 'r', 'e', 'd',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g',
  'c', 'n', '_', 'k', 'e', 'r', 'n', 'a', 'r', 'g', '_', 's', 'e', 'g', 'm',
  'e', 'n', 't', '_', 'p', 't', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'l', 'e', 'r', 'p', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c',
  'n', '_', 'm', 'b', 'c', 'n', 't', '_', 'h', 'i', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'm', 'b',
  'c', 'n', 't', '_', 'l', 'o', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'm', 'f', 'm', 'a', '_', 'f',
  '3', '2', '_', '1', '6', 'x', '1', '6', 'x', '1', '6', 'b', 'f', '1', '6',
  '_', '1', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a',
  'm', 'd', 'g', 'c', 'n', '_', 'm', 'f', 'm', 'a', '_', 'f', '3', '2', '_',
  '1', '6', 'x', '1', '6', 'x', '1', '6', 'f', '1', '6', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'm',
  'f', 'm', 'a', '_', 'f', '3', '2', '_', '1', '6', 'x', '1', '6', 'x', '1',
  'f', '3', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a',
  'm', 'd', 'g', 'c', 'n', '_', 'm', 'f', 'm', 'a', '_', 'f', '3', '2', '_',
  '1', '6', 'x', '1', '6', 'x', '2', 'b', 'f', '1', '6', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'm',
  'f', 'm', 'a', '_', 'f', '3', '2', '_', '1', '6', 'x', '1', '6', 'x', '3',
  '2', '_', 'b', 'f', '8', '_', 'b', 'f', '8', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'm', 'f', 'm',
  'a', '_', 'f', '3', '2', '_', '1', '6', 'x', '1', '6', 'x', '3', '2', '_',
  'b', 'f', '8', '_', 'f', 'p', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'm', 'f', 'm', 'a', '_',
  'f', '3', '2', '_', '1', '6', 'x', '1', '6', 'x', '3', '2', '_', 'f', 'p',
  '8', '_', 'b', 'f', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'm', 'f', 'm', 'a', '_', 'f', '3',
  '2', '_', '1', '6', 'x', '1', '6', 'x', '3', '2', '_', 'f', 'p', '8', '_',
  'f', 'p', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a',
  'm', 'd', 'g', 'c', 'n', '_', 'm', 'f', 'm', 'a', '_', 'f', '3', '2', '_',
  '1', '6', 'x', '1', '6', 'x', '4', 'b', 'f', '1', '6', '_', '1', 'k', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c',
  'n', '_', 'm', 'f', 'm', 'a', '_', 'f', '3', '2', '_', '1', '6', 'x', '1',
  '6', 'x', '4', 'f', '1', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'm', 'f', 'm', 'a', '_', 'f',
  '3', '2', '_', '1', '6', 'x', '1', '6', 'x', '4', 'f', '3', '2', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n',
  '_', 'm', 'f', 'm', 'a', '_', 'f', '3', '2', '_', '1', '6', 'x', '1', '6',
  'x', '8', '_', 'x', 'f', '3', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'm', 'f', 'm', 'a', '_',
  'f', '3', '2', '_', '1', '6', 'x', '1', '6', 'x', '8', 'b', 'f', '1', '6',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g',
  'c', 'n', '_', 'm', 'f', 'm', 'a', '_', 'f', '3', '2', '_', '3', '2', 'x',
  '3', '2', 'x', '1', '6', '_', 'b', 'f', '8', '_', 'b', 'f', '8', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n',
  '_', 'm', 'f', 'm', 'a', '_', 'f', '3', '2', '_', '3', '2', 'x', '3', '2',
  'x', '1', '6', '_', 'b', 'f', '8', '_', 'f', 'p', '8', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'm',
  'f', 'm', 'a', '_', 'f', '3', '2', '_', '3', '2', 'x', '3', '2', 'x', '1',
  '6', '_', 'f', 'p', '8', '_', 'b', 'f', '8', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'm', 'f', 'm',
  'a', '_', 'f', '3', '2', '_', '3', '2', 'x', '3', '2', 'x', '1', '6', '_',
  'f', 'p', '8', '_', 'f', 'p', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'm', 'f', 'm', 'a', '_',
  'f', '3', '2', '_', '3', '2', 'x', '3', '2', 'x', '1', 'f', '3', '2', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c',
  'n', '_', 'm', 'f', 'm', 'a', '_', 'f', '3', '2', '_', '3', '2', 'x', '3',
  '2', 'x', '2', 'b', 'f', '1', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'm', 'f', 'm', 'a', '_',
  'f', '3', '2', '_', '3', '2', 'x', '3', '2', 'x', '2', 'f', '3', '2', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c',
  'n', '_', 'm', 'f', 'm', 'a', '_', 'f', '3', '2', '_', '3', '2', 'x', '3',
  '2', 'x', '4', '_', 'x', 'f', '3', '2', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'm', 'f', 'm', 'a',
  '_', 'f', '3', '2', '_', '3', '2', 'x', '3', '2', 'x', '4', 'b', 'f', '1',
  '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd',
  'g', 'c', 'n', '_', 'm', 'f', 'm', 'a', '_', 'f', '3', '2', '_', '3', '2',
  'x', '3', '2', 'x', '4', 'b', 'f', '1', '6', '_', '1', 'k', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_',
  'm', 'f', 'm', 'a', '_', 'f', '3', '2', '_', '3', '2', 'x', '3', '2', 'x',
  '4', 'f', '1', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'a', 'm', 'd', 'g', 'c', 'n', '_', 'm', 'f', 'm', 'a', '_', 'f', '3', '2',
  '_', '3', '2', 'x', '3', '2', 'x', '8', 'b', 'f', '1', '6', '_', '1', 'k',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g',
  'c', 'n', '_', 'm', 'f', 'm', 'a', '_', 'f', '3', '2', '_', '3', '2', 'x',
  '3', '2', 'x', '8', 'f', '1', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'm', 'f', 'm', 'a', '_',
  'f', '3', '2', '_', '4', 'x', '4', 'x', '1', 'f', '3', '2', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_',
  'm', 'f', 'm', 'a', '_', 'f', '3', '2', '_', '4', 'x', '4', 'x', '2', 'b',
  'f', '1', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a',
  'm', 'd', 'g', 'c', 'n', '_', 'm', 'f', 'm', 'a', '_', 'f', '3', '2', '_',
  '4', 'x', '4', 'x', '4', 'b', 'f', '1', '6', '_', '1', 'k', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_',
  'm', 'f', 'm', 'a', '_', 'f', '3', '2', '_', '4', 'x', '4', 'x', '4', 'f',
  '1', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm',
  'd', 'g', 'c', 'n', '_', 'm', 'f', 'm', 'a', '_', 'f', '6', '4', '_', '1',
  '6', 'x', '1', '6', 'x', '4', 'f', '6', '4', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'm', 'f', 'm',
  'a', '_', 'f', '6', '4', '_', '4', 'x', '4', 'x', '4', 'f', '6', '4', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c',
  'n', '_', 'm', 'f', 'm', 'a', '_', 'i', '3', '2', '_', '1', '6', 'x', '1',
  '6', 'x', '1', '6', 'i', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'm', 'f', 'm', 'a', '_', 'i',
  '3', '2', '_', '1', '6', 'x', '1', '6', 'x', '3', '2', '_', 'i', '8', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c',
  'n', '_', 'm', 'f', 'm', 'a', '_', 'i', '3', '2', '_', '1', '6', 'x', '1',
  '6', 'x', '4', 'i', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'm', 'f', 'm', 'a', '_', 'i', '3',
  '2', '_', '3', '2', 'x', '3', '2', 'x', '1', '6', '_', 'i', '8', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n',
  '_', 'm', 'f', 'm', 'a', '_', 'i', '3', '2', '_', '3', '2', 'x', '3', '2',
  'x', '4', 'i', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'a', 'm', 'd', 'g', 'c', 'n', '_', 'm', 'f', 'm', 'a', '_', 'i', '3', '2',
  '_', '3', '2', 'x', '3', '2', 'x', '8', 'i', '8', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'm', 'f',
  'm', 'a', '_', 'i', '3', '2', '_', '4', 'x', '4', 'x', '4', 'i', '8', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c',
  'n', '_', 'm', 'q', 's', 'a', 'd', '_', 'p', 'k', '_', 'u', '1', '6', '_',
  'u', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm',
  'd', 'g', 'c', 'n', '_', 'm', 'q', 's', 'a', 'd', '_', 'u', '3', '2', '_',
  'u', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm',
  'd', 'g', 'c', 'n', '_', 'm', 's', 'a', 'd', '_', 'u', '8', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_',
  'p', 'e', 'r', 'm', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'a', 'm', 'd', 'g', 'c', 'n', '_', 'p', 'e', 'r', 'm', 'l', 'a', 'n', 'e',
  '1', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm',
  'd', 'g', 'c', 'n', '_', 'p', 'e', 'r', 'm', 'l', 'a', 'n', 'e', '6', '4',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g',
  'c', 'n', '_', 'p', 'e', 'r', 'm', 'l', 'a', 'n', 'e', 'x', '1', '6', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c',
  'n', '_', 'q', 's', 'a', 'd', '_', 'p', 'k', '_', 'u', '1', '6', '_', 'u',
  '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd',
  'g', 'c', 'n', '_', 'q', 'u', 'e', 'u', 'e', '_', 'p', 't', 'r', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n',
  '_', 'r', 'c', 'p', '_', 'l', 'e', 'g', 'a', 'c', 'y', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'r',
  'e', 'a', 'd', 'f', 'i', 'r', 's', 't', 'l', 'a', 'n', 'e', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_',
  'r', 'e', 'a', 'd', 'l', 'a', 'n', 'e', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'r', 's', 'q', '_',
  'l', 'e', 'g', 'a', 'c', 'y', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 's', '_', 'b', 'a', 'r', 'r',
  'i', 'e', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a',
  'm', 'd', 'g', 'c', 'n', '_', 's', '_', 'd', 'c', 'a', 'c', 'h', 'e', '_',
  'i', 'n', 'v', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a',
  'm', 'd', 'g', 'c', 'n', '_', 's', '_', 'd', 'c', 'a', 'c', 'h', 'e', '_',
  'i', 'n', 'v', '_', 'v', 'o', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 's', '_', 'd', 'c', 'a',
  'c', 'h', 'e', '_', 'w', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 's', '_', 'd', 'c', 'a', 'c',
  'h', 'e', '_', 'w', 'b', '_', 'v', 'o', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 's', '_', 'd',
  'e', 'c', 'p', 'e', 'r', 'f', 'l', 'e', 'v', 'e', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 's',
  '_', 'g', 'e', 't', '_', 'w', 'a', 'v', 'e', 'i', 'd', '_', 'i', 'n', '_',
  'w', 'o', 'r', 'k', 'g', 'r', 'o', 'u', 'p', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 's', '_', 'g',
  'e', 't', 'p', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'a', 'm', 'd', 'g', 'c', 'n', '_', 's', '_', 'g', 'e', 't', 'r', 'e', 'g',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g',
  'c', 'n', '_', 's', '_', 'i', 'n', 'c', 'p', 'e', 'r', 'f', 'l', 'e', 'v',
  'e', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm',
  'd', 'g', 'c', 'n', '_', 's', '_', 'm', 'e', 'm', 'r', 'e', 'a', 'l', 't',
  'i', 'm', 'e', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a',
  'm', 'd', 'g', 'c', 'n', '_', 's', '_', 'm', 'e', 'm', 't', 'i', 'm', 'e',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g',
  'c', 'n', '_', 's', '_', 's', 'e', 'n', 'd', 'm', 's', 'g', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_',
  's', '_', 's', 'e', 'n', 'd', 'm', 's', 'g', 'h', 'a', 'l', 't', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n',
  '_', 's', '_', 's', 'e', 't', 'p', 'r', 'i', 'o', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 's', '_',
  's', 'e', 't', 'r', 'e', 'g', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 's', '_', 's', 'l', 'e', 'e',
  'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd',
  'g', 'c', 'n', '_', 's', '_', 'w', 'a', 'i', 't', '_', 'e', 'v', 'e', 'n',
  't', '_', 'e', 'x', 'p', 'o', 'r', 't', '_', 'r', 'e', 'a', 'd', 'y', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c',
  'n', '_', 's', '_', 'w', 'a', 'i', 't', 'c', 'n', 't', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 's',
  'a', 'd', '_', 'h', 'i', '_', 'u', '8', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 's', 'a', 'd', '_',
  'u', '1', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a',
  'm', 'd', 'g', 'c', 'n', '_', 's', 'a', 'd', '_', 'u', '8', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_',
  's', 'c', 'h', 'e', 'd', '_', 'b', 'a', 'r', 'r', 'i', 'e', 'r', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n',
  '_', 's', 'c', 'h', 'e', 'd', '_', 'g', 'r', 'o', 'u', 'p', '_', 'b', 'a',
  'r', 'r', 'i', 'e', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 's', 'd', 'o', 't', '2', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n',
  '_', 's', 'd', 'o', 't', '4', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 's', 'd', 'o', 't', '8', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c',
  'n', '_', 's', 'm', 'f', 'm', 'a', 'c', '_', 'f', '3', '2', '_', '1', '6',
  'x', '1', '6', 'x', '3', '2', '_', 'b', 'f', '1', '6', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 's',
  'm', 'f', 'm', 'a', 'c', '_', 'f', '3', '2', '_', '1', '6', 'x', '1', '6',
  'x', '3', '2', '_', 'f', '1', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 's', 'm', 'f', 'm', 'a',
  'c', '_', 'f', '3', '2', '_', '1', '6', 'x', '1', '6', 'x', '6', '4', '_',
  'b', 'f', '8', '_', 'b', 'f', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 's', 'm', 'f', 'm', 'a',
  'c', '_', 'f', '3', '2', '_', '1', '6', 'x', '1', '6', 'x', '6', '4', '_',
  'b', 'f', '8', '_', 'f', 'p', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 's', 'm', 'f', 'm', 'a',
  'c', '_', 'f', '3', '2', '_', '1', '6', 'x', '1', '6', 'x', '6', '4', '_',
  'f', 'p', '8', '_', 'b', 'f', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 's', 'm', 'f', 'm', 'a',
  'c', '_', 'f', '3', '2', '_', '1', '6', 'x', '1', '6', 'x', '6', '4', '_',
  'f', 'p', '8', '_', 'f', 'p', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 's', 'm', 'f', 'm', 'a',
  'c', '_', 'f', '3', '2', '_', '3', '2', 'x', '3', '2', 'x', '1', '6', '_',
  'b', 'f', '1', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'a', 'm', 'd', 'g', 'c', 'n', '_', 's', 'm', 'f', 'm', 'a', 'c', '_', 'f',
  '3', '2', '_', '3', '2', 'x', '3', '2', 'x', '1', '6', '_', 'f', '1', '6',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g',
  'c', 'n', '_', 's', 'm', 'f', 'm', 'a', 'c', '_', 'f', '3', '2', '_', '3',
  '2', 'x', '3', '2', 'x', '3', '2', '_', 'b', 'f', '8', '_', 'b', 'f', '8',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g',
  'c', 'n', '_', 's', 'm', 'f', 'm', 'a', 'c', '_', 'f', '3', '2', '_', '3',
  '2', 'x', '3', '2', 'x', '3', '2', '_', 'b', 'f', '8', '_', 'f', 'p', '8',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g',
  'c', 'n', '_', 's', 'm', 'f', 'm', 'a', 'c', '_', 'f', '3', '2', '_', '3',
  '2', 'x', '3', '2', 'x', '3', '2', '_', 'f', 'p', '8', '_', 'b', 'f', '8',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g',
  'c', 'n', '_', 's', 'm', 'f', 'm', 'a', 'c', '_', 'f', '3', '2', '_', '3',
  '2', 'x', '3', '2', 'x', '3', '2', '_', 'f', 'p', '8', '_', 'f', 'p', '8',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g',
  'c', 'n', '_', 's', 'm', 'f', 'm', 'a', 'c', '_', 'i', '3', '2', '_', '1',
  '6', 'x', '1', '6', 'x', '6', '4', '_', 'i', '8', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 's', 'm',
  'f', 'm', 'a', 'c', '_', 'i', '3', '2', '_', '3', '2', 'x', '3', '2', 'x',
  '3', '2', '_', 'i', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 's', 'u', 'd', 'o', 't', '4', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c',
  'n', '_', 's', 'u', 'd', 'o', 't', '8', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'u', 'd', 'o', 't',
  '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm', 'd',
  'g', 'c', 'n', '_', 'u', 'd', 'o', 't', '4', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'u', 'd', 'o',
  't', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm',
  'd', 'g', 'c', 'n', '_', 'w', 'a', 'v', 'e', '_', 'b', 'a', 'r', 'r', 'i',
  'e', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'm',
  'd', 'g', 'c', 'n', '_', 'w', 'a', 'v', 'e', 'f', 'r', 'o', 'n', 't', 's',
  'i', 'z', 'e', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a',
  'm', 'd', 'g', 'c', 'n', '_', 'w', 'o', 'r', 'k', 'g', 'r', 'o', 'u', 'p',
  '_', 'i', 'd', '_', 'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'w', 'o', 'r', 'k', 'g', 'r', 'o',
  'u', 'p', '_', 'i', 'd', '_', 'y', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'w', 'o', 'r', 'k', 'g',
  'r', 'o', 'u', 'p', '_', 'i', 'd', '_', 'z', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'a', 'm', 'd', 'g', 'c', 'n', '_', 'w', 'r', 'i',
  't', 'e', 'l', 'a', 'n', 'e', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'a', 'r', 'm', '_', 'c', 'd', 'p', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 'c', 'd', 'p', '2', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 'c', 'm',
  's', 'e', '_', 'T', 'T', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'a', 'r', 'm', '_', 'c', 'm', 's', 'e', '_', 'T', 'T', 'A', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 'c', 'm',
  's', 'e', '_', 'T', 'T', 'A', 'T', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'a', 'r', 'm', '_', 'c', 'm', 's', 'e', '_', 'T', 'T', 'T',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_',
  'g', 'e', 't', '_', 'f', 'p', 's', 'c', 'r', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 'l', 'd', 'c', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 'l', 'd', 'c',
  '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm',
  '_', 'l', 'd', 'c', '2', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'a', 'r', 'm', '_', 'l', 'd', 'c', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 'm', 'c', 'r', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 'm', 'c',
  'r', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r',
  'm', '_', 'm', 'r', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'a', 'r', 'm', '_', 'm', 'r', 'c', '2', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 'q', 'a', 'd', 'd', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 'q', 'a',
  'd', 'd', '1', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'a', 'r', 'm', '_', 'q', 'a', 'd', 'd', '8', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 'q', 'a', 's', 'x', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 'q', 's',
  'a', 'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r',
  'm', '_', 'q', 's', 'u', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'a', 'r', 'm', '_', 'q', 's', 'u', 'b', '1', '6', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 'q', 's', 'u',
  'b', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r',
  'm', '_', 's', 'a', 'd', 'd', '1', '6', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'a', 'r', 'm', '_', 's', 'a', 'd', 'd', '8', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 's', 'a',
  's', 'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r',
  'm', '_', 's', 'e', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'a', 'r', 'm', '_', 's', 'e', 't', '_', 'f', 'p', 's', 'c', 'r', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 's',
  'h', 'a', 'd', 'd', '1', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'a', 'r', 'm', '_', 's', 'h', 'a', 'd', 'd', '8', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 's', 'h', 'a',
  's', 'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r',
  'm', '_', 's', 'h', 's', 'a', 'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'a', 'r', 'm', '_', 's', 'h', 's', 'u', 'b', '1', '6', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 's',
  'h', 's', 'u', 'b', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'a', 'r', 'm', '_', 's', 'm', 'l', 'a', 'b', 'b', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 's', 'm', 'l', 'a',
  'b', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r',
  'm', '_', 's', 'm', 'l', 'a', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'a', 'r', 'm', '_', 's', 'm', 'l', 'a', 'd', 'x', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 's', 'm',
  'l', 'a', 'l', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'a', 'r', 'm', '_', 's', 'm', 'l', 'a', 'l', 'd', 'x', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 's', 'm', 'l', 'a',
  't', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r',
  'm', '_', 's', 'm', 'l', 'a', 't', 't', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'a', 'r', 'm', '_', 's', 'm', 'l', 'a', 'w', 'b', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 's',
  'm', 'l', 'a', 'w', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'a', 'r', 'm', '_', 's', 'm', 'l', 's', 'd', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 's', 'm', 'l', 's', 'd',
  'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm',
  '_', 's', 'm', 'l', 's', 'l', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'a', 'r', 'm', '_', 's', 'm', 'l', 's', 'l', 'd', 'x', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 's',
  'm', 'u', 'a', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'a', 'r', 'm', '_', 's', 'm', 'u', 'a', 'd', 'x', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 's', 'm', 'u', 'l', 'b',
  'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm',
  '_', 's', 'm', 'u', 'l', 'b', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'a', 'r', 'm', '_', 's', 'm', 'u', 'l', 't', 'b', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 's', 'm',
  'u', 'l', 't', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'a', 'r', 'm', '_', 's', 'm', 'u', 'l', 'w', 'b', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 's', 'm', 'u', 'l', 'w',
  't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm',
  '_', 's', 'm', 'u', 's', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'a', 'r', 'm', '_', 's', 'm', 'u', 's', 'd', 'x', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 's', 's', 'a',
  't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm',
  '_', 's', 's', 'a', 't', '1', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'a', 'r', 'm', '_', 's', 's', 'a', 'x', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 's', 's', 'u', 'b',
  '1', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r',
  'm', '_', 's', 's', 'u', 'b', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'a', 'r', 'm', '_', 's', 't', 'c', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 's', 't', 'c', '2', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 's',
  't', 'c', '2', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'a', 'r', 'm', '_', 's', 't', 'c', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'a', 'r', 'm', '_', 's', 'x', 't', 'a', 'b', '1', '6',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_',
  's', 'x', 't', 'b', '1', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'a', 'r', 'm', '_', 'u', 'a', 'd', 'd', '1', '6', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 'u', 'a', 'd',
  'd', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r',
  'm', '_', 'u', 'a', 's', 'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'a', 'r', 'm', '_', 'u', 'h', 'a', 'd', 'd', '1', '6', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 'u', 'h',
  'a', 'd', 'd', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'a', 'r', 'm', '_', 'u', 'h', 'a', 's', 'x', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 'u', 'h', 's', 'a', 'x', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 'u',
  'h', 's', 'u', 'b', '1', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'a', 'r', 'm', '_', 'u', 'h', 's', 'u', 'b', '8', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 'u', 'q', 'a',
  'd', 'd', '1', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'a', 'r', 'm', '_', 'u', 'q', 'a', 'd', 'd', '8', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 'u', 'q', 'a', 's', 'x',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_',
  'u', 'q', 's', 'a', 'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'a', 'r', 'm', '_', 'u', 'q', 's', 'u', 'b', '1', '6', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 'u', 'q', 's',
  'u', 'b', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a',
  'r', 'm', '_', 'u', 's', 'a', 'd', '8', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'a', 'r', 'm', '_', 'u', 's', 'a', 'd', 'a', '8', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 'u',
  's', 'a', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a',
  'r', 'm', '_', 'u', 's', 'a', 't', '1', '6', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 'u', 's', 'a', 'x', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 'u', 's',
  'u', 'b', '1', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'a', 'r', 'm', '_', 'u', 's', 'u', 'b', '8', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'a', 'r', 'm', '_', 'u', 'x', 't', 'a', 'b', '1',
  '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'r', 'm',
  '_', 'u', 'x', 't', 'b', '1', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'b', 'p', 'f', '_', 'b', 't', 'f', '_', 't', 'y', 'p', 'e',
  '_', 'i', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'b',
  'p', 'f', '_', 'c', 'o', 'm', 'p', 'a', 'r', 'e', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'b', 'p', 'f', '_', 'l', 'o', 'a', 'd', '_',
  'b', 'y', 't', 'e', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'b', 'p', 'f', '_', 'l', 'o', 'a', 'd', '_', 'h', 'a', 'l', 'f', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'b', 'p', 'f', '_', 'l', 'o',
  'a', 'd', '_', 'w', 'o', 'r', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'b', 'p', 'f', '_', 'p', 'a', 's', 's', 't', 'h', 'r', 'o',
  'u', 'g', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'b',
  'p', 'f', '_', 'p', 'r', 'e', 's', 'e', 'r', 'v', 'e', '_', 'e', 'n', 'u',
  'm', '_', 'v', 'a', 'l', 'u', 'e', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'b', 'p', 'f', '_', 'p', 'r', 'e', 's', 'e', 'r', 'v', 'e',
  '_', 'f', 'i', 'e', 'l', 'd', '_', 'i', 'n', 'f', 'o', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'b', 'p', 'f', '_', 'p', 'r', 'e', 's',
  'e', 'r', 'v', 'e', '_', 't', 'y', 'p', 'e', '_', 'i', 'n', 'f', 'o', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'b', 'p', 'f', '_', 'p',
  's', 'e', 'u', 'd', 'o', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'h', 'l', 's', 'l', '_', 'c', 'r', 'e', 'a', 't', 'e', '_', 'h', 'a',
  'n', 'd', 'l', 'e', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'a', 'b', 's', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'A', '2', '_', 'a', 'b', 's', 'p', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A',
  '2', '_', 'a', 'b', 's', 's', 'a', 't', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_',
  'a', 'd', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'a', 'd', 'd', 'h', '_',
  'h', '1', '6', '_', 'h', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'a', 'd',
  'd', 'h', '_', 'h', '1', '6', '_', 'h', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2',
  '_', 'a', 'd', 'd', 'h', '_', 'h', '1', '6', '_', 'l', 'h', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'A', '2', '_', 'a', 'd', 'd', 'h', '_', 'h', '1', '6', '_', 'l', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'A', '2', '_', 'a', 'd', 'd', 'h', '_', 'h', '1', '6',
  '_', 's', 'a', 't', '_', 'h', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'a',
  'd', 'd', 'h', '_', 'h', '1', '6', '_', 's', 'a', 't', '_', 'h', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'A', '2', '_', 'a', 'd', 'd', 'h', '_', 'h', '1', '6', '_',
  's', 'a', 't', '_', 'l', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'a', 'd',
  'd', 'h', '_', 'h', '1', '6', '_', 's', 'a', 't', '_', 'l', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'A', '2', '_', 'a', 'd', 'd', 'h', '_', 'l', '1', '6', '_', 'h',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'a', 'd', 'd', 'h', '_', 'l', '1',
  '6', '_', 'l', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'a', 'd', 'd', 'h',
  '_', 'l', '1', '6', '_', 's', 'a', 't', '_', 'h', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'A', '2', '_', 'a', 'd', 'd', 'h', '_', 'l', '1', '6', '_', 's', 'a', 't',
  '_', 'l', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'a', 'd', 'd', 'i', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'A', '2', '_', 'a', 'd', 'd', 'p', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A',
  '2', '_', 'a', 'd', 'd', 'p', 's', 'a', 't', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2',
  '_', 'a', 'd', 'd', 's', 'a', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'a',
  'd', 'd', 's', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'a', 'n', 'd', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'A', '2', '_', 'a', 'n', 'd', 'i', 'r', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'A', '2', '_', 'a', 'n', 'd', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'a',
  's', 'l', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'a', 's', 'r', 'h', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'A', '2', '_', 'c', 'o', 'm', 'b', 'i', 'n', 'e', '_', 'h',
  'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'c', 'o', 'm', 'b', 'i', 'n', 'e',
  '_', 'h', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'c', 'o', 'm', 'b', 'i',
  'n', 'e', '_', 'l', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'c', 'o', 'm',
  'b', 'i', 'n', 'e', '_', 'l', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'c',
  'o', 'm', 'b', 'i', 'n', 'e', 'i', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_',
  'c', 'o', 'm', 'b', 'i', 'n', 'e', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_',
  'm', 'a', 'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'm', 'a', 'x', 'p', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'A', '2', '_', 'm', 'a', 'x', 'u', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A',
  '2', '_', 'm', 'a', 'x', 'u', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'm',
  'i', 'n', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'm', 'i', 'n', 'p', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'A', '2', '_', 'm', 'i', 'n', 'u', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2',
  '_', 'm', 'i', 'n', 'u', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'n', 'e',
  'g', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'n', 'e', 'g', 'p', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'A', '2', '_', 'n', 'e', 'g', 's', 'a', 't', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A',
  '2', '_', 'n', 'o', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'n', 'o', 't',
  'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'o', 'r', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A',
  '2', '_', 'o', 'r', 'i', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'o', 'r',
  'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'r', 'o', 'u', 'n', 'd', 's', 'a',
  't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'A', '2', '_', 's', 'a', 't', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'A', '2', '_', 's', 'a', 't', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 's',
  'a', 't', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 's', 'a', 't', 'u', 'b',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'A', '2', '_', 's', 'a', 't', 'u', 'h', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'A', '2', '_', 's', 'u', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 's',
  'u', 'b', 'h', '_', 'h', '1', '6', '_', 'h', 'h', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A',
  '2', '_', 's', 'u', 'b', 'h', '_', 'h', '1', '6', '_', 'h', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'A', '2', '_', 's', 'u', 'b', 'h', '_', 'h', '1', '6', '_', 'l',
  'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'A', '2', '_', 's', 'u', 'b', 'h', '_', 'h', '1',
  '6', '_', 'l', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 's', 'u', 'b', 'h',
  '_', 'h', '1', '6', '_', 's', 'a', 't', '_', 'h', 'h', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'A', '2', '_', 's', 'u', 'b', 'h', '_', 'h', '1', '6', '_', 's', 'a', 't',
  '_', 'h', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 's', 'u', 'b', 'h', '_',
  'h', '1', '6', '_', 's', 'a', 't', '_', 'l', 'h', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A',
  '2', '_', 's', 'u', 'b', 'h', '_', 'h', '1', '6', '_', 's', 'a', 't', '_',
  'l', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 's', 'u', 'b', 'h', '_', 'l',
  '1', '6', '_', 'h', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 's', 'u', 'b',
  'h', '_', 'l', '1', '6', '_', 'l', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_',
  's', 'u', 'b', 'h', '_', 'l', '1', '6', '_', 's', 'a', 't', '_', 'h', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'A', '2', '_', 's', 'u', 'b', 'h', '_', 'l', '1', '6',
  '_', 's', 'a', 't', '_', 'l', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 's',
  'u', 'b', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 's', 'u', 'b', 'r', 'i',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'A', '2', '_', 's', 'u', 'b', 's', 'a', 't', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'A', '2', '_', 's', 'v', 'a', 'd', 'd', 'h', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'A', '2', '_', 's', 'v', 'a', 'd', 'd', 'h', 's', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A',
  '2', '_', 's', 'v', 'a', 'd', 'd', 'u', 'h', 's', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A',
  '2', '_', 's', 'v', 'a', 'v', 'g', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_',
  's', 'v', 'a', 'v', 'g', 'h', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 's',
  'v', 'n', 'a', 'v', 'g', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 's', 'v',
  's', 'u', 'b', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 's', 'v', 's', 'u',
  'b', 'h', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 's', 'v', 's', 'u', 'b',
  'u', 'h', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 's', 'w', 'i', 'z', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'A', '2', '_', 's', 'x', 't', 'b', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A',
  '2', '_', 's', 'x', 't', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 's', 'x',
  't', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 't', 'f', 'r', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'A', '2', '_', 't', 'f', 'r', 'i', 'h', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2',
  '_', 't', 'f', 'r', 'i', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 't', 'f',
  'r', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 't', 'f', 'r', 'p', 'i', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'A', '2', '_', 't', 'f', 'r', 's', 'i', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'A', '2', '_', 'v', 'a', 'b', 's', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_',
  'v', 'a', 'b', 's', 'h', 's', 'a', 't', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_',
  'v', 'a', 'b', 's', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'v', 'a', 'b',
  's', 'w', 's', 'a', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'v', 'a', 'd',
  'd', 'b', '_', 'm', 'a', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'v', 'a',
  'd', 'd', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'v', 'a', 'd', 'd', 'h',
  's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'v', 'a', 'd', 'd', 'u', 'b', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'A', '2', '_', 'v', 'a', 'd', 'd', 'u', 'b', 's', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'A', '2', '_', 'v', 'a', 'd', 'd', 'u', 'h', 's', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'A', '2', '_', 'v', 'a', 'd', 'd', 'w', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2',
  '_', 'v', 'a', 'd', 'd', 'w', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'v',
  'a', 'v', 'g', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'v', 'a', 'v', 'g',
  'h', 'c', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'v', 'a', 'v', 'g', 'h',
  'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'v', 'a', 'v', 'g', 'u', 'b', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'A', '2', '_', 'v', 'a', 'v', 'g', 'u', 'b', 'r', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'A', '2', '_', 'v', 'a', 'v', 'g', 'u', 'h', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'A', '2', '_', 'v', 'a', 'v', 'g', 'u', 'h', 'r', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A',
  '2', '_', 'v', 'a', 'v', 'g', 'u', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_',
  'v', 'a', 'v', 'g', 'u', 'w', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'v',
  'a', 'v', 'g', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'v', 'a', 'v', 'g',
  'w', 'c', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'v', 'a', 'v', 'g', 'w',
  'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'v', 'c', 'm', 'p', 'b', 'e', 'q',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'A', '2', '_', 'v', 'c', 'm', 'p', 'b', 'g', 't', 'u',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'A', '2', '_', 'v', 'c', 'm', 'p', 'h', 'e', 'q', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'A', '2', '_', 'v', 'c', 'm', 'p', 'h', 'g', 't', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'A', '2', '_', 'v', 'c', 'm', 'p', 'h', 'g', 't', 'u', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'A', '2', '_', 'v', 'c', 'm', 'p', 'w', 'e', 'q', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'A', '2', '_', 'v', 'c', 'm', 'p', 'w', 'g', 't', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'A', '2', '_', 'v', 'c', 'm', 'p', 'w', 'g', 't', 'u', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'A', '2', '_', 'v', 'c', 'o', 'n', 'j', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_',
  'v', 'm', 'a', 'x', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'v', 'm', 'a',
  'x', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'v', 'm', 'a', 'x', 'u', 'b',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'A', '2', '_', 'v', 'm', 'a', 'x', 'u', 'h', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'A', '2', '_', 'v', 'm', 'a', 'x', 'u', 'w', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'A', '2', '_', 'v', 'm', 'a', 'x', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_',
  'v', 'm', 'i', 'n', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'v', 'm', 'i',
  'n', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'v', 'm', 'i', 'n', 'u', 'b',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'A', '2', '_', 'v', 'm', 'i', 'n', 'u', 'h', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'A', '2', '_', 'v', 'm', 'i', 'n', 'u', 'w', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'A', '2', '_', 'v', 'm', 'i', 'n', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_',
  'v', 'n', 'a', 'v', 'g', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'v', 'n',
  'a', 'v', 'g', 'h', 'c', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'v', 'n',
  'a', 'v', 'g', 'h', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'v', 'n', 'a',
  'v', 'g', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'v', 'n', 'a', 'v', 'g',
  'w', 'c', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'v', 'n', 'a', 'v', 'g',
  'w', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'v', 'r', 'a', 'd', 'd', 'u',
  'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'v', 'r', 'a', 'd', 'd', 'u', 'b',
  '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'v', 'r', 's', 'a',
  'd', 'u', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'v', 'r', 's', 'a', 'd',
  'u', 'b', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'v', 's',
  'u', 'b', 'b', '_', 'm', 'a', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'v',
  's', 'u', 'b', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'v', 's', 'u', 'b',
  'h', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'v', 's', 'u', 'b', 'u', 'b',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'A', '2', '_', 'v', 's', 'u', 'b', 'u', 'b', 's', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'A', '2', '_', 'v', 's', 'u', 'b', 'u', 'h', 's', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'A', '2', '_', 'v', 's', 'u', 'b', 'w', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A',
  '2', '_', 'v', 's', 'u', 'b', 'w', 's', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_',
  'x', 'o', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '2', '_', 'x', 'o', 'r', 'p', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'A', '2', '_', 'z', 'x', 't', 'b', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A',
  '2', '_', 'z', 'x', 't', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '4', '_', 'a', 'n',
  'd', 'n', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'A', '4', '_', 'a', 'n', 'd', 'n', 'p', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'A', '4', '_', 'b', 'i', 't', 's', 'p', 'l', 'i', 't', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'A', '4', '_', 'b', 'i', 't', 's', 'p', 'l', 'i', 't', 'i',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'A', '4', '_', 'b', 'o', 'u', 'n', 'd', 's', 'c', 'h',
  'e', 'c', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '4', '_', 'c', 'm', 'p', 'b', 'e',
  'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'A', '4', '_', 'c', 'm', 'p', 'b', 'e', 'q', 'i',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'A', '4', '_', 'c', 'm', 'p', 'b', 'g', 't', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'A', '4', '_', 'c', 'm', 'p', 'b', 'g', 't', 'i', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'A', '4', '_', 'c', 'm', 'p', 'b', 'g', 't', 'u', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'A', '4', '_', 'c', 'm', 'p', 'b', 'g', 't', 'u', 'i', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'A', '4', '_', 'c', 'm', 'p', 'h', 'e', 'q', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '4',
  '_', 'c', 'm', 'p', 'h', 'e', 'q', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '4', '_',
  'c', 'm', 'p', 'h', 'g', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '4', '_', 'c', 'm',
  'p', 'h', 'g', 't', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '4', '_', 'c', 'm', 'p',
  'h', 'g', 't', 'u', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '4', '_', 'c', 'm', 'p', 'h',
  'g', 't', 'u', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '4', '_', 'c', 'o', 'm', 'b',
  'i', 'n', 'e', 'i', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '4', '_', 'c', 'o', 'm',
  'b', 'i', 'n', 'e', 'r', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '4', '_', 'c', 'r',
  'o', 'u', 'n', 'd', '_', 'r', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '4', '_', 'c',
  'r', 'o', 'u', 'n', 'd', '_', 'r', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '4', '_',
  'm', 'o', 'd', 'w', 'r', 'a', 'p', 'u', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '4', '_',
  'o', 'r', 'n', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '4', '_', 'o', 'r', 'n', 'p', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'A', '4', '_', 'r', 'c', 'm', 'p', 'e', 'q', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'A', '4', '_', 'r', 'c', 'm', 'p', 'e', 'q', 'i', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'A', '4', '_', 'r', 'c', 'm', 'p', 'n', 'e', 'q', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A',
  '4', '_', 'r', 'c', 'm', 'p', 'n', 'e', 'q', 'i', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A',
  '4', '_', 'r', 'o', 'u', 'n', 'd', '_', 'r', 'i', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A',
  '4', '_', 'r', 'o', 'u', 'n', 'd', '_', 'r', 'i', '_', 's', 'a', 't', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'A', '4', '_', 'r', 'o', 'u', 'n', 'd', '_', 'r', 'r', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'A', '4', '_', 'r', 'o', 'u', 'n', 'd', '_', 'r', 'r', '_',
  's', 'a', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '4', '_', 't', 'l', 'b', 'm', 'a',
  't', 'c', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '4', '_', 'v', 'c', 'm', 'p', 'b',
  'e', 'q', '_', 'a', 'n', 'y', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '4', '_', 'v', 'c',
  'm', 'p', 'b', 'e', 'q', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '4', '_', 'v', 'c',
  'm', 'p', 'b', 'g', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '4', '_', 'v', 'c', 'm',
  'p', 'b', 'g', 't', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '4', '_', 'v', 'c', 'm',
  'p', 'b', 'g', 't', 'u', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '4', '_', 'v', 'c',
  'm', 'p', 'h', 'e', 'q', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '4', '_', 'v', 'c',
  'm', 'p', 'h', 'g', 't', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '4', '_', 'v', 'c',
  'm', 'p', 'h', 'g', 't', 'u', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '4', '_', 'v',
  'c', 'm', 'p', 'w', 'e', 'q', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '4', '_', 'v',
  'c', 'm', 'p', 'w', 'g', 't', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '4', '_', 'v',
  'c', 'm', 'p', 'w', 'g', 't', 'u', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '4', '_',
  'v', 'r', 'm', 'a', 'x', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '4', '_', 'v', 'r',
  'm', 'a', 'x', 'u', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '4', '_', 'v', 'r', 'm',
  'a', 'x', 'u', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'A', '4', '_', 'v', 'r', 'm', 'a',
  'x', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'A', '4', '_', 'v', 'r', 'm', 'i', 'n', 'h',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'A', '4', '_', 'v', 'r', 'm', 'i', 'n', 'u', 'h', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'A', '4', '_', 'v', 'r', 'm', 'i', 'n', 'u', 'w', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'A', '4', '_', 'v', 'r', 'm', 'i', 'n', 'w', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'A', '5', '_', 'v', 'a', 'd', 'd', 'h', 'u', 'b', 's', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'A', '6', '_', 'v', 'c', 'm', 'p', 'b', 'e', 'q', '_', 'n', 'o', 't', 'a',
  'n', 'y', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'A', '7', '_', 'c', 'l', 'i', 'p', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'A', '7', '_', 'c', 'r', 'o', 'u', 'n', 'd', 'd', '_', 'r', 'i',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'A', '7', '_', 'c', 'r', 'o', 'u', 'n', 'd', 'd', '_',
  'r', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'A', '7', '_', 'v', 'c', 'l', 'i', 'p', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'C', '2', '_', 'a', 'l', 'l', '8', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'C',
  '2', '_', 'a', 'n', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'C', '2', '_', 'a', 'n', 'd',
  'n', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'C', '2', '_', 'a', 'n', 'y', '8', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'C', '2', '_', 'b', 'i', 't', 's', 'c', 'l', 'r', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'C', '2', '_', 'b', 'i', 't', 's', 'c', 'l', 'r', 'i', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'C', '2', '_', 'b', 'i', 't', 's', 's', 'e', 't', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'C',
  '2', '_', 'c', 'm', 'p', 'e', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'C', '2', '_', 'c',
  'm', 'p', 'e', 'q', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'C', '2', '_', 'c', 'm', 'p',
  'e', 'q', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'C', '2', '_', 'c', 'm', 'p', 'g', 'e',
  'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'C', '2', '_', 'c', 'm', 'p', 'g', 'e', 'u', 'i',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'C', '2', '_', 'c', 'm', 'p', 'g', 't', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'C', '2', '_', 'c', 'm', 'p', 'g', 't', 'i', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'C',
  '2', '_', 'c', 'm', 'p', 'g', 't', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'C', '2', '_',
  'c', 'm', 'p', 'g', 't', 'u', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'C', '2', '_', 'c', 'm',
  'p', 'g', 't', 'u', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'C', '2', '_', 'c', 'm', 'p',
  'g', 't', 'u', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'C', '2', '_', 'c', 'm', 'p', 'l',
  't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'C', '2', '_', 'c', 'm', 'p', 'l', 't', 'u', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'C', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'C',
  '2', '_', 'm', 'u', 'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'C', '2', '_', 'm', 'u', 'x',
  'i', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'C', '2', '_', 'm', 'u', 'x', 'i', 'r', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'C', '2', '_', 'm', 'u', 'x', 'r', 'i', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'C', '2', '_', 'n', 'o', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'C', '2', '_', 'o', 'r',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'C', '2', '_', 'o', 'r', 'n', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'C',
  '2', '_', 'p', 'x', 'f', 'e', 'r', '_', 'm', 'a', 'p', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'C', '2', '_', 't', 'f', 'r', 'p', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'C', '2', '_',
  't', 'f', 'r', 'r', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'C', '2', '_', 'v', 'i', 't',
  'p', 'a', 'c', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'C', '2', '_', 'v', 'm', 'u', 'x',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'C', '2', '_', 'x', 'o', 'r', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'C',
  '4', '_', 'a', 'n', 'd', '_', 'a', 'n', 'd', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'C', '4',
  '_', 'a', 'n', 'd', '_', 'a', 'n', 'd', 'n', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'C', '4',
  '_', 'a', 'n', 'd', '_', 'o', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'C', '4', '_', 'a',
  'n', 'd', '_', 'o', 'r', 'n', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'C', '4', '_', 'c', 'm',
  'p', 'l', 't', 'e', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'C', '4', '_', 'c', 'm', 'p', 'l',
  't', 'e', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'C', '4', '_', 'c', 'm', 'p', 'l', 't',
  'e', 'u', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'C', '4', '_', 'c', 'm', 'p', 'l', 't', 'e',
  'u', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'C', '4', '_', 'c', 'm', 'p', 'n', 'e', 'q',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'C', '4', '_', 'c', 'm', 'p', 'n', 'e', 'q', 'i', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'C', '4', '_', 'f', 'a', 's', 't', 'c', 'o', 'r', 'n', 'e',
  'r', '9', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'C', '4', '_', 'f', 'a', 's', 't', 'c', 'o',
  'r', 'n', 'e', 'r', '9', '_', 'n', 'o', 't', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'C', '4',
  '_', 'n', 'b', 'i', 't', 's', 'c', 'l', 'r', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'C', '4',
  '_', 'n', 'b', 'i', 't', 's', 'c', 'l', 'r', 'i', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'C',
  '4', '_', 'n', 'b', 'i', 't', 's', 's', 'e', 't', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'C',
  '4', '_', 'o', 'r', '_', 'a', 'n', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'C', '4', '_',
  'o', 'r', '_', 'a', 'n', 'd', 'n', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'C', '4', '_', 'o',
  'r', '_', 'o', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'C', '4', '_', 'o', 'r', '_', 'o',
  'r', 'n', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_', 'c', 'o', 'n', 'v', '_', 'd',
  '2', 'd', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_', 'c', 'o', 'n', 'v', '_',
  'd', '2', 's', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_', 'c', 'o', 'n', 'v',
  '_', 'd', 'f', '2', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_', 'c', 'o', 'n',
  'v', '_', 'd', 'f', '2', 'd', '_', 'c', 'h', 'o', 'p', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'F', '2', '_', 'c', 'o', 'n', 'v', '_', 'd', 'f', '2', 's', 'f', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'F', '2', '_', 'c', 'o', 'n', 'v', '_', 'd', 'f', '2', 'u', 'd',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'F', '2', '_', 'c', 'o', 'n', 'v', '_', 'd', 'f', '2',
  'u', 'd', '_', 'c', 'h', 'o', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_', 'c',
  'o', 'n', 'v', '_', 'd', 'f', '2', 'u', 'w', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2',
  '_', 'c', 'o', 'n', 'v', '_', 'd', 'f', '2', 'u', 'w', '_', 'c', 'h', 'o',
  'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'F', '2', '_', 'c', 'o', 'n', 'v', '_', 'd', 'f',
  '2', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_', 'c', 'o', 'n', 'v', '_', 'd',
  'f', '2', 'w', '_', 'c', 'h', 'o', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_',
  'c', 'o', 'n', 'v', '_', 's', 'f', '2', 'd', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2',
  '_', 'c', 'o', 'n', 'v', '_', 's', 'f', '2', 'd', '_', 'c', 'h', 'o', 'p',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'F', '2', '_', 'c', 'o', 'n', 'v', '_', 's', 'f', '2',
  'd', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_', 'c', 'o', 'n', 'v', '_', 's',
  'f', '2', 'u', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_', 'c', 'o', 'n', 'v',
  '_', 's', 'f', '2', 'u', 'd', '_', 'c', 'h', 'o', 'p', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'F', '2', '_', 'c', 'o', 'n', 'v', '_', 's', 'f', '2', 'u', 'w', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'F', '2', '_', 'c', 'o', 'n', 'v', '_', 's', 'f', '2', 'u', 'w',
  '_', 'c', 'h', 'o', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_', 'c', 'o', 'n',
  'v', '_', 's', 'f', '2', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_', 'c', 'o',
  'n', 'v', '_', 's', 'f', '2', 'w', '_', 'c', 'h', 'o', 'p', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'F', '2', '_', 'c', 'o', 'n', 'v', '_', 'u', 'd', '2', 'd', 'f', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'F', '2', '_', 'c', 'o', 'n', 'v', '_', 'u', 'd', '2', 's',
  'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'F', '2', '_', 'c', 'o', 'n', 'v', '_', 'u', 'w',
  '2', 'd', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_', 'c', 'o', 'n', 'v', '_',
  'u', 'w', '2', 's', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_', 'c', 'o', 'n',
  'v', '_', 'w', '2', 'd', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_', 'c', 'o',
  'n', 'v', '_', 'w', '2', 's', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_', 'd',
  'f', 'a', 'd', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_', 'd', 'f', 'c', 'l',
  'a', 's', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_', 'd', 'f', 'c', 'm', 'p',
  'e', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_', 'd', 'f', 'c', 'm', 'p', 'g',
  'e', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'F', '2', '_', 'd', 'f', 'c', 'm', 'p', 'g', 't',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'F', '2', '_', 'd', 'f', 'c', 'm', 'p', 'u', 'o', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'F', '2', '_', 'd', 'f', 'i', 'm', 'm', '_', 'n', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'F', '2', '_', 'd', 'f', 'i', 'm', 'm', '_', 'p', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'F', '2', '_', 'd', 'f', 'm', 'a', 'x', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2',
  '_', 'd', 'f', 'm', 'i', 'n', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_', 'd', 'f',
  'm', 'p', 'y', 'f', 'i', 'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_', 'd', 'f',
  'm', 'p', 'y', 'h', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_', 'd', 'f', 'm',
  'p', 'y', 'l', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_', 'd', 'f', 'm', 'p',
  'y', 'l', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_', 'd', 'f', 's', 'u', 'b',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'F', '2', '_', 's', 'f', 'a', 'd', 'd', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'F', '2', '_', 's', 'f', 'c', 'l', 'a', 's', 's', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'F', '2', '_', 's', 'f', 'c', 'm', 'p', 'e', 'q', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'F',
  '2', '_', 's', 'f', 'c', 'm', 'p', 'g', 'e', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2',
  '_', 's', 'f', 'c', 'm', 'p', 'g', 't', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_',
  's', 'f', 'c', 'm', 'p', 'u', 'o', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_', 's',
  'f', 'f', 'i', 'x', 'u', 'p', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_', 's',
  'f', 'f', 'i', 'x', 'u', 'p', 'n', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_', 's',
  'f', 'f', 'i', 'x', 'u', 'p', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_', 's',
  'f', 'f', 'm', 'a', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_', 's', 'f', 'f', 'm',
  'a', '_', 'l', 'i', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_', 's', 'f', 'f',
  'm', 'a', '_', 's', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_', 's', 'f', 'f',
  'm', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_', 's', 'f', 'f', 'm', 's', '_',
  'l', 'i', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_', 's', 'f', 'i', 'm', 'm',
  '_', 'n', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_', 's', 'f', 'i', 'm', 'm', '_',
  'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'F', '2', '_', 's', 'f', 'm', 'a', 'x', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'F', '2', '_', 's', 'f', 'm', 'i', 'n', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'F',
  '2', '_', 's', 'f', 'm', 'p', 'y', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'F', '2', '_', 's',
  'f', 's', 'u', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'L', '2', '_', 'l', 'o', 'a', 'd',
  'w', '_', 'l', 'o', 'c', 'k', 'e', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'L', '4',
  '_', 'l', 'o', 'a', 'd', 'd', '_', 'l', 'o', 'c', 'k', 'e', 'd', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'M', '2', '_', 'a', 'c', 'c', 'i', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2',
  '_', 'a', 'c', 'c', 'i', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'c', 'm',
  'a', 'c', 'i', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'c', 'm',
  'a', 'c', 'r', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'c', 'm',
  'a', 'c', 's', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'c', 'm',
  'a', 'c', 's', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'c', 'm',
  'a', 'c', 's', 'c', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'c',
  'm', 'a', 'c', 's', 'c', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_',
  'c', 'm', 'p', 'y', 'i', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_',
  'c', 'm', 'p', 'y', 'r', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_',
  'c', 'm', 'p', 'y', 'r', 's', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2',
  '_', 'c', 'm', 'p', 'y', 'r', 's', '_', 's', '1', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M',
  '2', '_', 'c', 'm', 'p', 'y', 'r', 's', 'c', '_', 's', '0', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'M', '2', '_', 'c', 'm', 'p', 'y', 'r', 's', 'c', '_', 's', '1', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'M', '2', '_', 'c', 'm', 'p', 'y', 's', '_', 's', '0', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'M', '2', '_', 'c', 'm', 'p', 'y', 's', '_', 's', '1', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'M', '2', '_', 'c', 'm', 'p', 'y', 's', 'c', '_', 's', '0',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'M', '2', '_', 'c', 'm', 'p', 'y', 's', 'c', '_', 's',
  '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'c', 'n', 'a', 'c', 's', '_', 's',
  '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'c', 'n', 'a', 'c', 's', '_', 's',
  '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'c', 'n', 'a', 'c', 's', 'c', '_',
  's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'c', 'n', 'a', 'c', 's', 'c',
  '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'd', 'p', 'm', 'p', 'y',
  's', 's', '_', 'a', 'c', 'c', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2',
  '_', 'd', 'p', 'm', 'p', 'y', 's', 's', '_', 'n', 'a', 'c', '_', 's', '0',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'M', '2', '_', 'd', 'p', 'm', 'p', 'y', 's', 's', '_',
  'r', 'n', 'd', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'd', 'p',
  'm', 'p', 'y', 's', 's', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_',
  'd', 'p', 'm', 'p', 'y', 'u', 'u', '_', 'a', 'c', 'c', '_', 's', '0', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'M', '2', '_', 'd', 'p', 'm', 'p', 'y', 'u', 'u', '_', 'n',
  'a', 'c', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'd', 'p', 'm',
  'p', 'y', 'u', 'u', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'h',
  'm', 'm', 'p', 'y', 'h', '_', 'r', 's', '1', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2',
  '_', 'h', 'm', 'm', 'p', 'y', 'h', '_', 's', '1', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M',
  '2', '_', 'h', 'm', 'm', 'p', 'y', 'l', '_', 'r', 's', '1', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'M', '2', '_', 'h', 'm', 'm', 'p', 'y', 'l', '_', 's', '1', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'M', '2', '_', 'm', 'a', 'c', 'i', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2',
  '_', 'm', 'a', 'c', 's', 'i', 'n', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm',
  'a', 'c', 's', 'i', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'm', 'a',
  'c', 'h', 's', '_', 'r', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm',
  'm', 'a', 'c', 'h', 's', '_', 'r', 's', '1', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2',
  '_', 'm', 'm', 'a', 'c', 'h', 's', '_', 's', '0', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M',
  '2', '_', 'm', 'm', 'a', 'c', 'h', 's', '_', 's', '1', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'M', '2', '_', 'm', 'm', 'a', 'c', 'l', 's', '_', 'r', 's', '0', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'M', '2', '_', 'm', 'm', 'a', 'c', 'l', 's', '_', 'r', 's', '1',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'm', 'a', 'c', 'l', 's', '_', 's',
  '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'm', 'a', 'c', 'l', 's', '_',
  's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'm', 'a', 'c', 'u', 'h',
  's', '_', 'r', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'm', 'a',
  'c', 'u', 'h', 's', '_', 'r', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_',
  'm', 'm', 'a', 'c', 'u', 'h', 's', '_', 's', '0', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M',
  '2', '_', 'm', 'm', 'a', 'c', 'u', 'h', 's', '_', 's', '1', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'M', '2', '_', 'm', 'm', 'a', 'c', 'u', 'l', 's', '_', 'r', 's', '0',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'm', 'a', 'c', 'u', 'l', 's', '_',
  'r', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'm', 'a', 'c', 'u',
  'l', 's', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'm', 'a',
  'c', 'u', 'l', 's', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm',
  'm', 'p', 'y', 'h', '_', 'r', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_',
  'm', 'm', 'p', 'y', 'h', '_', 'r', 's', '1', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2',
  '_', 'm', 'm', 'p', 'y', 'h', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2',
  '_', 'm', 'm', 'p', 'y', 'h', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2',
  '_', 'm', 'm', 'p', 'y', 'l', '_', 'r', 's', '0', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M',
  '2', '_', 'm', 'm', 'p', 'y', 'l', '_', 'r', 's', '1', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'M', '2', '_', 'm', 'm', 'p', 'y', 'l', '_', 's', '0', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'M', '2', '_', 'm', 'm', 'p', 'y', 'l', '_', 's', '1', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'M', '2', '_', 'm', 'm', 'p', 'y', 'u', 'h', '_', 'r', 's', '0', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'M', '2', '_', 'm', 'm', 'p', 'y', 'u', 'h', '_', 'r', 's', '1',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'm', 'p', 'y', 'u', 'h', '_', 's',
  '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'm', 'p', 'y', 'u', 'h', '_',
  's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'm', 'p', 'y', 'u', 'l',
  '_', 'r', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'm', 'p', 'y',
  'u', 'l', '_', 'r', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'm',
  'p', 'y', 'u', 'l', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm',
  'm', 'p', 'y', 'u', 'l', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_',
  'm', 'n', 'a', 'c', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y',
  '_', 'a', 'c', 'c', '_', 'h', 'h', '_', 's', '0', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M',
  '2', '_', 'm', 'p', 'y', '_', 'a', 'c', 'c', '_', 'h', 'h', '_', 's', '1',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', '_', 'a', 'c', 'c', '_',
  'h', 'l', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y',
  '_', 'a', 'c', 'c', '_', 'h', 'l', '_', 's', '1', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M',
  '2', '_', 'm', 'p', 'y', '_', 'a', 'c', 'c', '_', 'l', 'h', '_', 's', '0',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', '_', 'a', 'c', 'c', '_',
  'l', 'h', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y',
  '_', 'a', 'c', 'c', '_', 'l', 'l', '_', 's', '0', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M',
  '2', '_', 'm', 'p', 'y', '_', 'a', 'c', 'c', '_', 'l', 'l', '_', 's', '1',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', '_', 'a', 'c', 'c', '_',
  's', 'a', 't', '_', 'h', 'h', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2',
  '_', 'm', 'p', 'y', '_', 'a', 'c', 'c', '_', 's', 'a', 't', '_', 'h', 'h',
  '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', '_', 'a',
  'c', 'c', '_', 's', 'a', 't', '_', 'h', 'l', '_', 's', '0', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'M', '2', '_', 'm', 'p', 'y', '_', 'a', 'c', 'c', '_', 's', 'a', 't',
  '_', 'h', 'l', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p',
  'y', '_', 'a', 'c', 'c', '_', 's', 'a', 't', '_', 'l', 'h', '_', 's', '0',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', '_', 'a', 'c', 'c', '_',
  's', 'a', 't', '_', 'l', 'h', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2',
  '_', 'm', 'p', 'y', '_', 'a', 'c', 'c', '_', 's', 'a', 't', '_', 'l', 'l',
  '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', '_', 'a',
  'c', 'c', '_', 's', 'a', 't', '_', 'l', 'l', '_', 's', '1', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'M', '2', '_', 'm', 'p', 'y', '_', 'h', 'h', '_', 's', '0', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'M', '2', '_', 'm', 'p', 'y', '_', 'h', 'h', '_', 's', '1', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', '_', 'h', 'l', '_', 's', '0',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', '_', 'h', 'l', '_', 's',
  '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', '_', 'l', 'h', '_',
  's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', '_', 'l', 'h',
  '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', '_', 'l',
  'l', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', '_',
  'l', 'l', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y',
  '_', 'n', 'a', 'c', '_', 'h', 'h', '_', 's', '0', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M',
  '2', '_', 'm', 'p', 'y', '_', 'n', 'a', 'c', '_', 'h', 'h', '_', 's', '1',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', '_', 'n', 'a', 'c', '_',
  'h', 'l', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y',
  '_', 'n', 'a', 'c', '_', 'h', 'l', '_', 's', '1', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M',
  '2', '_', 'm', 'p', 'y', '_', 'n', 'a', 'c', '_', 'l', 'h', '_', 's', '0',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', '_', 'n', 'a', 'c', '_',
  'l', 'h', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y',
  '_', 'n', 'a', 'c', '_', 'l', 'l', '_', 's', '0', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M',
  '2', '_', 'm', 'p', 'y', '_', 'n', 'a', 'c', '_', 'l', 'l', '_', 's', '1',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', '_', 'n', 'a', 'c', '_',
  's', 'a', 't', '_', 'h', 'h', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2',
  '_', 'm', 'p', 'y', '_', 'n', 'a', 'c', '_', 's', 'a', 't', '_', 'h', 'h',
  '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', '_', 'n',
  'a', 'c', '_', 's', 'a', 't', '_', 'h', 'l', '_', 's', '0', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'M', '2', '_', 'm', 'p', 'y', '_', 'n', 'a', 'c', '_', 's', 'a', 't',
  '_', 'h', 'l', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p',
  'y', '_', 'n', 'a', 'c', '_', 's', 'a', 't', '_', 'l', 'h', '_', 's', '0',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', '_', 'n', 'a', 'c', '_',
  's', 'a', 't', '_', 'l', 'h', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2',
  '_', 'm', 'p', 'y', '_', 'n', 'a', 'c', '_', 's', 'a', 't', '_', 'l', 'l',
  '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', '_', 'n',
  'a', 'c', '_', 's', 'a', 't', '_', 'l', 'l', '_', 's', '1', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'M', '2', '_', 'm', 'p', 'y', '_', 'r', 'n', 'd', '_', 'h', 'h', '_',
  's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', '_', 'r', 'n',
  'd', '_', 'h', 'h', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm',
  'p', 'y', '_', 'r', 'n', 'd', '_', 'h', 'l', '_', 's', '0', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'M', '2', '_', 'm', 'p', 'y', '_', 'r', 'n', 'd', '_', 'h', 'l', '_',
  's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', '_', 'r', 'n',
  'd', '_', 'l', 'h', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm',
  'p', 'y', '_', 'r', 'n', 'd', '_', 'l', 'h', '_', 's', '1', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'M', '2', '_', 'm', 'p', 'y', '_', 'r', 'n', 'd', '_', 'l', 'l', '_',
  's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', '_', 'r', 'n',
  'd', '_', 'l', 'l', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm',
  'p', 'y', '_', 's', 'a', 't', '_', 'h', 'h', '_', 's', '0', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'M', '2', '_', 'm', 'p', 'y', '_', 's', 'a', 't', '_', 'h', 'h', '_',
  's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', '_', 's', 'a',
  't', '_', 'h', 'l', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm',
  'p', 'y', '_', 's', 'a', 't', '_', 'h', 'l', '_', 's', '1', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'M', '2', '_', 'm', 'p', 'y', '_', 's', 'a', 't', '_', 'l', 'h', '_',
  's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', '_', 's', 'a',
  't', '_', 'l', 'h', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm',
  'p', 'y', '_', 's', 'a', 't', '_', 'l', 'l', '_', 's', '0', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'M', '2', '_', 'm', 'p', 'y', '_', 's', 'a', 't', '_', 'l', 'l', '_',
  's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', '_', 's', 'a',
  't', '_', 'r', 'n', 'd', '_', 'h', 'h', '_', 's', '0', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'M', '2', '_', 'm', 'p', 'y', '_', 's', 'a', 't', '_', 'r', 'n', 'd', '_',
  'h', 'h', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y',
  '_', 's', 'a', 't', '_', 'r', 'n', 'd', '_', 'h', 'l', '_', 's', '0', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', '_', 's', 'a', 't', '_', 'r',
  'n', 'd', '_', 'h', 'l', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_',
  'm', 'p', 'y', '_', 's', 'a', 't', '_', 'r', 'n', 'd', '_', 'l', 'h', '_',
  's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', '_', 's', 'a',
  't', '_', 'r', 'n', 'd', '_', 'l', 'h', '_', 's', '1', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'M', '2', '_', 'm', 'p', 'y', '_', 's', 'a', 't', '_', 'r', 'n', 'd', '_',
  'l', 'l', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y',
  '_', 's', 'a', 't', '_', 'r', 'n', 'd', '_', 'l', 'l', '_', 's', '1', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', '_', 'u', 'p', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'M', '2', '_', 'm', 'p', 'y', '_', 'u', 'p', '_', 's', '1', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'M', '2', '_', 'm', 'p', 'y', '_', 'u', 'p', '_', 's', '1', '_',
  's', 'a', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'd', '_',
  'a', 'c', 'c', '_', 'h', 'h', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2',
  '_', 'm', 'p', 'y', 'd', '_', 'a', 'c', 'c', '_', 'h', 'h', '_', 's', '1',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'd', '_', 'a', 'c', 'c',
  '_', 'h', 'l', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p',
  'y', 'd', '_', 'a', 'c', 'c', '_', 'h', 'l', '_', 's', '1', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'M', '2', '_', 'm', 'p', 'y', 'd', '_', 'a', 'c', 'c', '_', 'l', 'h',
  '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'd', '_',
  'a', 'c', 'c', '_', 'l', 'h', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2',
  '_', 'm', 'p', 'y', 'd', '_', 'a', 'c', 'c', '_', 'l', 'l', '_', 's', '0',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'd', '_', 'a', 'c', 'c',
  '_', 'l', 'l', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p',
  'y', 'd', '_', 'h', 'h', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_',
  'm', 'p', 'y', 'd', '_', 'h', 'h', '_', 's', '1', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M',
  '2', '_', 'm', 'p', 'y', 'd', '_', 'h', 'l', '_', 's', '0', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'M', '2', '_', 'm', 'p', 'y', 'd', '_', 'h', 'l', '_', 's', '1', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'd', '_', 'l', 'h', '_', 's',
  '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'd', '_', 'l', 'h',
  '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'd', '_',
  'l', 'l', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y',
  'd', '_', 'l', 'l', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm',
  'p', 'y', 'd', '_', 'n', 'a', 'c', '_', 'h', 'h', '_', 's', '0', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'd', '_', 'n', 'a', 'c', '_', 'h',
  'h', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'd',
  '_', 'n', 'a', 'c', '_', 'h', 'l', '_', 's', '0', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M',
  '2', '_', 'm', 'p', 'y', 'd', '_', 'n', 'a', 'c', '_', 'h', 'l', '_', 's',
  '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'd', '_', 'n', 'a',
  'c', '_', 'l', 'h', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm',
  'p', 'y', 'd', '_', 'n', 'a', 'c', '_', 'l', 'h', '_', 's', '1', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'd', '_', 'n', 'a', 'c', '_', 'l',
  'l', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'd',
  '_', 'n', 'a', 'c', '_', 'l', 'l', '_', 's', '1', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M',
  '2', '_', 'm', 'p', 'y', 'd', '_', 'r', 'n', 'd', '_', 'h', 'h', '_', 's',
  '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'd', '_', 'r', 'n',
  'd', '_', 'h', 'h', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm',
  'p', 'y', 'd', '_', 'r', 'n', 'd', '_', 'h', 'l', '_', 's', '0', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'd', '_', 'r', 'n', 'd', '_', 'h',
  'l', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'd',
  '_', 'r', 'n', 'd', '_', 'l', 'h', '_', 's', '0', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M',
  '2', '_', 'm', 'p', 'y', 'd', '_', 'r', 'n', 'd', '_', 'l', 'h', '_', 's',
  '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'd', '_', 'r', 'n',
  'd', '_', 'l', 'l', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm',
  'p', 'y', 'd', '_', 'r', 'n', 'd', '_', 'l', 'l', '_', 's', '1', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'i', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2',
  '_', 'm', 'p', 'y', 's', 'm', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm',
  'p', 'y', 's', 'u', '_', 'u', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm',
  'p', 'y', 'u', '_', 'a', 'c', 'c', '_', 'h', 'h', '_', 's', '0', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'u', '_', 'a', 'c', 'c', '_', 'h',
  'h', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'u',
  '_', 'a', 'c', 'c', '_', 'h', 'l', '_', 's', '0', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M',
  '2', '_', 'm', 'p', 'y', 'u', '_', 'a', 'c', 'c', '_', 'h', 'l', '_', 's',
  '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'u', '_', 'a', 'c',
  'c', '_', 'l', 'h', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm',
  'p', 'y', 'u', '_', 'a', 'c', 'c', '_', 'l', 'h', '_', 's', '1', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'u', '_', 'a', 'c', 'c', '_', 'l',
  'l', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'u',
  '_', 'a', 'c', 'c', '_', 'l', 'l', '_', 's', '1', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M',
  '2', '_', 'm', 'p', 'y', 'u', '_', 'h', 'h', '_', 's', '0', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'M', '2', '_', 'm', 'p', 'y', 'u', '_', 'h', 'h', '_', 's', '1', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'u', '_', 'h', 'l', '_', 's',
  '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'u', '_', 'h', 'l',
  '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'u', '_',
  'l', 'h', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y',
  'u', '_', 'l', 'h', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm',
  'p', 'y', 'u', '_', 'l', 'l', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2',
  '_', 'm', 'p', 'y', 'u', '_', 'l', 'l', '_', 's', '1', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'M', '2', '_', 'm', 'p', 'y', 'u', '_', 'n', 'a', 'c', '_', 'h', 'h', '_',
  's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'u', '_', 'n',
  'a', 'c', '_', 'h', 'h', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_',
  'm', 'p', 'y', 'u', '_', 'n', 'a', 'c', '_', 'h', 'l', '_', 's', '0', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'u', '_', 'n', 'a', 'c', '_',
  'h', 'l', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y',
  'u', '_', 'n', 'a', 'c', '_', 'l', 'h', '_', 's', '0', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'M', '2', '_', 'm', 'p', 'y', 'u', '_', 'n', 'a', 'c', '_', 'l', 'h', '_',
  's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'u', '_', 'n',
  'a', 'c', '_', 'l', 'l', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_',
  'm', 'p', 'y', 'u', '_', 'n', 'a', 'c', '_', 'l', 'l', '_', 's', '1', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'u', '_', 'u', 'p', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'u', 'd', '_', 'a', 'c', 'c', '_',
  'h', 'h', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y',
  'u', 'd', '_', 'a', 'c', 'c', '_', 'h', 'h', '_', 's', '1', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'M', '2', '_', 'm', 'p', 'y', 'u', 'd', '_', 'a', 'c', 'c', '_', 'h',
  'l', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'u',
  'd', '_', 'a', 'c', 'c', '_', 'h', 'l', '_', 's', '1', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'M', '2', '_', 'm', 'p', 'y', 'u', 'd', '_', 'a', 'c', 'c', '_', 'l', 'h',
  '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'u', 'd',
  '_', 'a', 'c', 'c', '_', 'l', 'h', '_', 's', '1', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M',
  '2', '_', 'm', 'p', 'y', 'u', 'd', '_', 'a', 'c', 'c', '_', 'l', 'l', '_',
  's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'u', 'd', '_',
  'a', 'c', 'c', '_', 'l', 'l', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2',
  '_', 'm', 'p', 'y', 'u', 'd', '_', 'h', 'h', '_', 's', '0', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'M', '2', '_', 'm', 'p', 'y', 'u', 'd', '_', 'h', 'h', '_', 's', '1',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'u', 'd', '_', 'h', 'l',
  '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'u', 'd',
  '_', 'h', 'l', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p',
  'y', 'u', 'd', '_', 'l', 'h', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2',
  '_', 'm', 'p', 'y', 'u', 'd', '_', 'l', 'h', '_', 's', '1', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'M', '2', '_', 'm', 'p', 'y', 'u', 'd', '_', 'l', 'l', '_', 's', '0',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'u', 'd', '_', 'l', 'l',
  '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'u', 'd',
  '_', 'n', 'a', 'c', '_', 'h', 'h', '_', 's', '0', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M',
  '2', '_', 'm', 'p', 'y', 'u', 'd', '_', 'n', 'a', 'c', '_', 'h', 'h', '_',
  's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'u', 'd', '_',
  'n', 'a', 'c', '_', 'h', 'l', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2',
  '_', 'm', 'p', 'y', 'u', 'd', '_', 'n', 'a', 'c', '_', 'h', 'l', '_', 's',
  '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'u', 'd', '_', 'n',
  'a', 'c', '_', 'l', 'h', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_',
  'm', 'p', 'y', 'u', 'd', '_', 'n', 'a', 'c', '_', 'l', 'h', '_', 's', '1',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'u', 'd', '_', 'n', 'a',
  'c', '_', 'l', 'l', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'm',
  'p', 'y', 'u', 'd', '_', 'n', 'a', 'c', '_', 'l', 'l', '_', 's', '1', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'M', '2', '_', 'm', 'p', 'y', 'u', 'i', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'M', '2', '_', 'n', 'a', 'c', 'c', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_',
  'n', 'a', 'c', 'c', 'i', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 's', 'u',
  'b', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'v', 'a', 'b', 's',
  'd', 'i', 'f', 'f', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'v', 'a', 'b',
  's', 'd', 'i', 'f', 'f', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'v', 'c',
  'm', 'a', 'c', '_', 's', '0', '_', 's', 'a', 't', '_', 'i', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'M', '2', '_', 'v', 'c', 'm', 'a', 'c', '_', 's', '0', '_', 's', 'a',
  't', '_', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'v', 'c', 'm', 'p', 'y',
  '_', 's', '0', '_', 's', 'a', 't', '_', 'i', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2',
  '_', 'v', 'c', 'm', 'p', 'y', '_', 's', '0', '_', 's', 'a', 't', '_', 'r',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'M', '2', '_', 'v', 'c', 'm', 'p', 'y', '_', 's', '1',
  '_', 's', 'a', 't', '_', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'v', 'c',
  'm', 'p', 'y', '_', 's', '1', '_', 's', 'a', 't', '_', 'r', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'M', '2', '_', 'v', 'd', 'm', 'a', 'c', 's', '_', 's', '0', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'M', '2', '_', 'v', 'd', 'm', 'a', 'c', 's', '_', 's', '1', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'M', '2', '_', 'v', 'd', 'm', 'p', 'y', 'r', 's', '_', 's',
  '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'v', 'd', 'm', 'p', 'y', 'r', 's',
  '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'v', 'd', 'm', 'p', 'y',
  's', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'v', 'd', 'm', 'p',
  'y', 's', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'v', 'm', 'a',
  'c', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'v', 'm', 'a', 'c', '2', 'e',
  's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'v', 'm', 'a', 'c', '2', 'e', 's',
  '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'v', 'm', 'a', 'c', '2',
  'e', 's', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'v', 'm', 'a',
  'c', '2', 's', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'v', 'm',
  'a', 'c', '2', 's', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'v',
  'm', 'a', 'c', '2', 's', 'u', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2',
  '_', 'v', 'm', 'a', 'c', '2', 's', 'u', '_', 's', '1', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'M', '2', '_', 'v', 'm', 'p', 'y', '2', 'e', 's', '_', 's', '0', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'M', '2', '_', 'v', 'm', 'p', 'y', '2', 'e', 's', '_', 's', '1',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'M', '2', '_', 'v', 'm', 'p', 'y', '2', 's', '_', 's',
  '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'v', 'm', 'p', 'y', '2', 's', '_',
  's', '0', 'p', 'a', 'c', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'v', 'm',
  'p', 'y', '2', 's', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'v',
  'm', 'p', 'y', '2', 's', '_', 's', '1', 'p', 'a', 'c', 'k', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'M', '2', '_', 'v', 'm', 'p', 'y', '2', 's', 'u', '_', 's', '0', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'M', '2', '_', 'v', 'm', 'p', 'y', '2', 's', 'u', '_', 's',
  '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'v', 'r', 'a', 'd', 'd', 'h', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'M', '2', '_', 'v', 'r', 'a', 'd', 'd', 'u', 'h', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'M', '2', '_', 'v', 'r', 'c', 'm', 'a', 'c', 'i', '_', 's', '0',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'M', '2', '_', 'v', 'r', 'c', 'm', 'a', 'c', 'i', '_',
  's', '0', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'v', 'r', 'c', 'm', 'a',
  'c', 'r', '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'v', 'r', 'c',
  'm', 'a', 'c', 'r', '_', 's', '0', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_',
  'v', 'r', 'c', 'm', 'p', 'y', 'i', '_', 's', '0', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M',
  '2', '_', 'v', 'r', 'c', 'm', 'p', 'y', 'i', '_', 's', '0', 'c', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'M', '2', '_', 'v', 'r', 'c', 'm', 'p', 'y', 'r', '_', 's', '0',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'M', '2', '_', 'v', 'r', 'c', 'm', 'p', 'y', 'r', '_',
  's', '0', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2', '_', 'v', 'r', 'c', 'm', 'p',
  'y', 's', '_', 'a', 'c', 'c', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '2',
  '_', 'v', 'r', 'c', 'm', 'p', 'y', 's', '_', 's', '1', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'M', '2', '_', 'v', 'r', 'c', 'm', 'p', 'y', 's', '_', 's', '1', 'r', 'p',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'M', '2', '_', 'v', 'r', 'm', 'a', 'c', '_', 's', '0',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'M', '2', '_', 'v', 'r', 'm', 'p', 'y', '_', 's', '0',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'M', '2', '_', 'x', 'o', 'r', '_', 'x', 'a', 'c', 'c',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'M', '4', '_', 'a', 'n', 'd', '_', 'a', 'n', 'd', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'M', '4', '_', 'a', 'n', 'd', '_', 'a', 'n', 'd', 'n', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'M', '4', '_', 'a', 'n', 'd', '_', 'o', 'r', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'M', '4', '_', 'a', 'n', 'd', '_', 'x', 'o', 'r', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'M', '4', '_', 'c', 'm', 'p', 'y', 'i', '_', 'w', 'h', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'M', '4', '_', 'c', 'm', 'p', 'y', 'i', '_', 'w', 'h', 'c', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'M', '4', '_', 'c', 'm', 'p', 'y', 'r', '_', 'w', 'h', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'M', '4', '_', 'c', 'm', 'p', 'y', 'r', '_', 'w', 'h', 'c', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'M', '4', '_', 'm', 'a', 'c', '_', 'u', 'p', '_', 's', '1', '_',
  's', 'a', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '4', '_', 'm', 'p', 'y', 'r', 'i',
  '_', 'a', 'd', 'd', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '4', '_', 'm', 'p', 'y',
  'r', 'i', '_', 'a', 'd', 'd', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '4', '_', 'm',
  'p', 'y', 'r', 'i', '_', 'a', 'd', 'd', 'r', '_', 'u', '2', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'M', '4', '_', 'm', 'p', 'y', 'r', 'r', '_', 'a', 'd', 'd', 'i', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'M', '4', '_', 'm', 'p', 'y', 'r', 'r', '_', 'a', 'd', 'd',
  'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'M', '4', '_', 'n', 'a', 'c', '_', 'u', 'p', '_',
  's', '1', '_', 's', 'a', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '4', '_', 'o', 'r',
  '_', 'a', 'n', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '4', '_', 'o', 'r', '_', 'a',
  'n', 'd', 'n', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '4', '_', 'o', 'r', '_', 'o', 'r',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'M', '4', '_', 'o', 'r', '_', 'x', 'o', 'r', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'M', '4', '_', 'p', 'm', 'p', 'y', 'w', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M',
  '4', '_', 'p', 'm', 'p', 'y', 'w', '_', 'a', 'c', 'c', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'M', '4', '_', 'v', 'p', 'm', 'p', 'y', 'h', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '4',
  '_', 'v', 'p', 'm', 'p', 'y', 'h', '_', 'a', 'c', 'c', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'M', '4', '_', 'v', 'r', 'm', 'p', 'y', 'e', 'h', '_', 'a', 'c', 'c', '_',
  's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'M', '4', '_', 'v', 'r', 'm', 'p', 'y', 'e',
  'h', '_', 'a', 'c', 'c', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '4', '_',
  'v', 'r', 'm', 'p', 'y', 'e', 'h', '_', 's', '0', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M',
  '4', '_', 'v', 'r', 'm', 'p', 'y', 'e', 'h', '_', 's', '1', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'M', '4', '_', 'v', 'r', 'm', 'p', 'y', 'o', 'h', '_', 'a', 'c', 'c',
  '_', 's', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '4', '_', 'v', 'r', 'm', 'p', 'y',
  'o', 'h', '_', 'a', 'c', 'c', '_', 's', '1', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '4',
  '_', 'v', 'r', 'm', 'p', 'y', 'o', 'h', '_', 's', '0', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'M', '4', '_', 'v', 'r', 'm', 'p', 'y', 'o', 'h', '_', 's', '1', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'M', '4', '_', 'x', 'o', 'r', '_', 'a', 'n', 'd', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'M', '4', '_', 'x', 'o', 'r', '_', 'a', 'n', 'd', 'n', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'M', '4', '_', 'x', 'o', 'r', '_', 'o', 'r', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M',
  '4', '_', 'x', 'o', 'r', '_', 'x', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M',
  '5', '_', 'v', 'd', 'm', 'a', 'c', 'b', 's', 'u', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M',
  '5', '_', 'v', 'd', 'm', 'p', 'y', 'b', 's', 'u', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M',
  '5', '_', 'v', 'm', 'a', 'c', 'b', 's', 'u', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '5',
  '_', 'v', 'm', 'a', 'c', 'b', 'u', 'u', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '5', '_',
  'v', 'm', 'p', 'y', 'b', 's', 'u', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '5', '_', 'v',
  'm', 'p', 'y', 'b', 'u', 'u', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '5', '_', 'v', 'r',
  'm', 'a', 'c', 'b', 's', 'u', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '5', '_', 'v', 'r',
  'm', 'a', 'c', 'b', 'u', 'u', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '5', '_', 'v', 'r',
  'm', 'p', 'y', 'b', 's', 'u', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '5', '_', 'v', 'r',
  'm', 'p', 'y', 'b', 'u', 'u', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '6', '_', 'v', 'a',
  'b', 's', 'd', 'i', 'f', 'f', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '6', '_', 'v',
  'a', 'b', 's', 'd', 'i', 'f', 'f', 'u', 'b', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '7',
  '_', 'd', 'c', 'm', 'p', 'y', 'i', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '7', '_',
  'd', 'c', 'm', 'p', 'y', 'i', 'w', '_', 'a', 'c', 'c', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'M', '7', '_', 'd', 'c', 'm', 'p', 'y', 'i', 'w', 'c', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'M', '7', '_', 'd', 'c', 'm', 'p', 'y', 'i', 'w', 'c', '_', 'a', 'c', 'c',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'M', '7', '_', 'd', 'c', 'm', 'p', 'y', 'r', 'w', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'M', '7', '_', 'd', 'c', 'm', 'p', 'y', 'r', 'w', '_', 'a',
  'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'M', '7', '_', 'd', 'c', 'm', 'p', 'y', 'r',
  'w', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'M', '7', '_', 'd', 'c', 'm', 'p', 'y', 'r',
  'w', 'c', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '7', '_', 'v', 'd',
  'm', 'p', 'y', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '7', '_', 'v', 'd', 'm', 'p', 'y',
  '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '7', '_', 'w', 'c', 'm', 'p',
  'y', 'i', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '7', '_', 'w', 'c', 'm', 'p', 'y',
  'i', 'w', '_', 'r', 'n', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '7', '_', 'w', 'c',
  'm', 'p', 'y', 'i', 'w', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '7', '_', 'w', 'c',
  'm', 'p', 'y', 'i', 'w', 'c', '_', 'r', 'n', 'd', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M',
  '7', '_', 'w', 'c', 'm', 'p', 'y', 'r', 'w', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'M', '7',
  '_', 'w', 'c', 'm', 'p', 'y', 'r', 'w', '_', 'r', 'n', 'd', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'M', '7', '_', 'w', 'c', 'm', 'p', 'y', 'r', 'w', 'c', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'M', '7', '_', 'w', 'c', 'm', 'p', 'y', 'r', 'w', 'c', '_', 'r', 'n',
  'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'a', 'd', 'd', 'a', 's', 'l', '_',
  'r', 'r', 'r', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'a', 's', 'l', '_',
  'i', '_', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'a', 's', 'l', '_', 'i',
  '_', 'p', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'a', 's',
  'l', '_', 'i', '_', 'p', '_', 'a', 'n', 'd', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2',
  '_', 'a', 's', 'l', '_', 'i', '_', 'p', '_', 'n', 'a', 'c', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'S', '2', '_', 'a', 's', 'l', '_', 'i', '_', 'p', '_', 'o', 'r', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'S', '2', '_', 'a', 's', 'l', '_', 'i', '_', 'p', '_', 'x',
  'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'a', 's', 'l', '_', 'i',
  '_', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'a', 's', 'l', '_', 'i', '_',
  'r', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'a', 's', 'l',
  '_', 'i', '_', 'r', '_', 'a', 'n', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_',
  'a', 's', 'l', '_', 'i', '_', 'r', '_', 'n', 'a', 'c', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'S', '2', '_', 'a', 's', 'l', '_', 'i', '_', 'r', '_', 'o', 'r', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'S', '2', '_', 'a', 's', 'l', '_', 'i', '_', 'r', '_', 's', 'a',
  't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'a', 's', 'l', '_', 'i', '_', 'r',
  '_', 'x', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'a', 's', 'l',
  '_', 'i', '_', 'v', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'a', 's', 'l',
  '_', 'i', '_', 'v', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'a', 's', 'l',
  '_', 'r', '_', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'a', 's', 'l', '_',
  'r', '_', 'p', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'a',
  's', 'l', '_', 'r', '_', 'p', '_', 'a', 'n', 'd', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S',
  '2', '_', 'a', 's', 'l', '_', 'r', '_', 'p', '_', 'n', 'a', 'c', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'S', '2', '_', 'a', 's', 'l', '_', 'r', '_', 'p', '_', 'o', 'r',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'S', '2', '_', 'a', 's', 'l', '_', 'r', '_', 'p', '_',
  'x', 'o', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'a', 's', 'l', '_', 'r',
  '_', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'a', 's', 'l', '_', 'r', '_',
  'r', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'a', 's', 'l',
  '_', 'r', '_', 'r', '_', 'a', 'n', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_',
  'a', 's', 'l', '_', 'r', '_', 'r', '_', 'n', 'a', 'c', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'S', '2', '_', 'a', 's', 'l', '_', 'r', '_', 'r', '_', 'o', 'r', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'S', '2', '_', 'a', 's', 'l', '_', 'r', '_', 'r', '_', 's', 'a',
  't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'a', 's', 'l', '_', 'r', '_', 'v',
  'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'a', 's', 'l', '_', 'r', '_', 'v',
  'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'a', 's', 'r', '_', 'i', '_', 'p',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'S', '2', '_', 'a', 's', 'r', '_', 'i', '_', 'p', '_',
  'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'a', 's', 'r', '_', 'i',
  '_', 'p', '_', 'a', 'n', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'a', 's',
  'r', '_', 'i', '_', 'p', '_', 'n', 'a', 'c', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2',
  '_', 'a', 's', 'r', '_', 'i', '_', 'p', '_', 'o', 'r', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'S', '2', '_', 'a', 's', 'r', '_', 'i', '_', 'p', '_', 'r', 'n', 'd', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'S', '2', '_', 'a', 's', 'r', '_', 'i', '_', 'p', '_', 'r',
  'n', 'd', '_', 'g', 'o', 'o', 'd', 's', 'y', 'n', 't', 'a', 'x', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'S', '2', '_', 'a', 's', 'r', '_', 'i', '_', 'r', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'S', '2', '_', 'a', 's', 'r', '_', 'i', '_', 'r', '_', 'a', 'c', 'c',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'S', '2', '_', 'a', 's', 'r', '_', 'i', '_', 'r', '_',
  'a', 'n', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'a', 's', 'r', '_', 'i',
  '_', 'r', '_', 'n', 'a', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'a', 's',
  'r', '_', 'i', '_', 'r', '_', 'o', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_',
  'a', 's', 'r', '_', 'i', '_', 'r', '_', 'r', 'n', 'd', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'S', '2', '_', 'a', 's', 'r', '_', 'i', '_', 'r', '_', 'r', 'n', 'd', '_',
  'g', 'o', 'o', 'd', 's', 'y', 'n', 't', 'a', 'x', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S',
  '2', '_', 'a', 's', 'r', '_', 'i', '_', 's', 'v', 'w', '_', 't', 'r', 'u',
  'n', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'a', 's', 'r', '_', 'i', '_', 'v',
  'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'a', 's', 'r', '_', 'i', '_', 'v',
  'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'a', 's', 'r', '_', 'r', '_', 'p',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'S', '2', '_', 'a', 's', 'r', '_', 'r', '_', 'p', '_',
  'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'a', 's', 'r', '_', 'r',
  '_', 'p', '_', 'a', 'n', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'a', 's',
  'r', '_', 'r', '_', 'p', '_', 'n', 'a', 'c', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2',
  '_', 'a', 's', 'r', '_', 'r', '_', 'p', '_', 'o', 'r', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'S', '2', '_', 'a', 's', 'r', '_', 'r', '_', 'p', '_', 'x', 'o', 'r', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'S', '2', '_', 'a', 's', 'r', '_', 'r', '_', 'r', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'S', '2', '_', 'a', 's', 'r', '_', 'r', '_', 'r', '_', 'a', 'c',
  'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'a', 's', 'r', '_', 'r', '_', 'r',
  '_', 'a', 'n', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'a', 's', 'r', '_',
  'r', '_', 'r', '_', 'n', 'a', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'a',
  's', 'r', '_', 'r', '_', 'r', '_', 'o', 'r', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2',
  '_', 'a', 's', 'r', '_', 'r', '_', 'r', '_', 's', 'a', 't', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'S', '2', '_', 'a', 's', 'r', '_', 'r', '_', 's', 'v', 'w', '_', 't',
  'r', 'u', 'n', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'a', 's', 'r', '_', 'r',
  '_', 'v', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'a', 's', 'r', '_', 'r',
  '_', 'v', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'b', 'r', 'e', 'v', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'S', '2', '_', 'b', 'r', 'e', 'v', 'p', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'S', '2', '_', 'c', 'l', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'c', 'l',
  '0', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'c', 'l', '1', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'S', '2', '_', 'c', 'l', '1', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_',
  'c', 'l', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'c', 'l', 'b', 'n', 'o',
  'r', 'm', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'c', 'l', 'b', 'p', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'S', '2', '_', 'c', 'l', 'r', 'b', 'i', 't', '_', 'i', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'S', '2', '_', 'c', 'l', 'r', 'b', 'i', 't', '_', 'r', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'S', '2', '_', 'c', 't', '0', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_',
  'c', 't', '0', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'c', 't', '1', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'S', '2', '_', 'c', 't', '1', 'p', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S',
  '2', '_', 'd', 'e', 'i', 'n', 't', 'e', 'r', 'l', 'e', 'a', 'v', 'e', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'S', '2', '_', 'e', 'x', 't', 'r', 'a', 'c', 't', 'u', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'S', '2', '_', 'e', 'x', 't', 'r', 'a', 'c', 't', 'u', '_',
  'r', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'e', 'x', 't', 'r', 'a', 'c',
  't', 'u', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'e', 'x', 't', 'r', 'a',
  'c', 't', 'u', 'p', '_', 'r', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'i',
  'n', 's', 'e', 'r', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'i', 'n', 's',
  'e', 'r', 't', '_', 'r', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'i', 'n',
  's', 'e', 'r', 't', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'i', 'n', 's',
  'e', 'r', 't', 'p', '_', 'r', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'i',
  'n', 't', 'e', 'r', 'l', 'e', 'a', 'v', 'e', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2',
  '_', 'l', 'f', 's', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'l', 's', 'l',
  '_', 'r', '_', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'l', 's', 'l', '_',
  'r', '_', 'p', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'l',
  's', 'l', '_', 'r', '_', 'p', '_', 'a', 'n', 'd', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S',
  '2', '_', 'l', 's', 'l', '_', 'r', '_', 'p', '_', 'n', 'a', 'c', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'S', '2', '_', 'l', 's', 'l', '_', 'r', '_', 'p', '_', 'o', 'r',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'S', '2', '_', 'l', 's', 'l', '_', 'r', '_', 'p', '_',
  'x', 'o', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'l', 's', 'l', '_', 'r',
  '_', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'l', 's', 'l', '_', 'r', '_',
  'r', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'l', 's', 'l',
  '_', 'r', '_', 'r', '_', 'a', 'n', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_',
  'l', 's', 'l', '_', 'r', '_', 'r', '_', 'n', 'a', 'c', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'S', '2', '_', 'l', 's', 'l', '_', 'r', '_', 'r', '_', 'o', 'r', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'S', '2', '_', 'l', 's', 'l', '_', 'r', '_', 'v', 'h', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'S', '2', '_', 'l', 's', 'l', '_', 'r', '_', 'v', 'w', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'S', '2', '_', 'l', 's', 'r', '_', 'i', '_', 'p', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'S', '2', '_', 'l', 's', 'r', '_', 'i', '_', 'p', '_', 'a', 'c', 'c',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'S', '2', '_', 'l', 's', 'r', '_', 'i', '_', 'p', '_',
  'a', 'n', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'l', 's', 'r', '_', 'i',
  '_', 'p', '_', 'n', 'a', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'l', 's',
  'r', '_', 'i', '_', 'p', '_', 'o', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_',
  'l', 's', 'r', '_', 'i', '_', 'p', '_', 'x', 'a', 'c', 'c', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'S', '2', '_', 'l', 's', 'r', '_', 'i', '_', 'r', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'S', '2', '_', 'l', 's', 'r', '_', 'i', '_', 'r', '_', 'a', 'c', 'c', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'S', '2', '_', 'l', 's', 'r', '_', 'i', '_', 'r', '_', 'a',
  'n', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'l', 's', 'r', '_', 'i', '_',
  'r', '_', 'n', 'a', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'l', 's', 'r',
  '_', 'i', '_', 'r', '_', 'o', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'l',
  's', 'r', '_', 'i', '_', 'r', '_', 'x', 'a', 'c', 'c', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'S', '2', '_', 'l', 's', 'r', '_', 'i', '_', 'v', 'h', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'S', '2', '_', 'l', 's', 'r', '_', 'i', '_', 'v', 'w', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'S', '2', '_', 'l', 's', 'r', '_', 'r', '_', 'p', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S',
  '2', '_', 'l', 's', 'r', '_', 'r', '_', 'p', '_', 'a', 'c', 'c', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'S', '2', '_', 'l', 's', 'r', '_', 'r', '_', 'p', '_', 'a', 'n',
  'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'l', 's', 'r', '_', 'r', '_', 'p',
  '_', 'n', 'a', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'l', 's', 'r', '_',
  'r', '_', 'p', '_', 'o', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'l', 's',
  'r', '_', 'r', '_', 'p', '_', 'x', 'o', 'r', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2',
  '_', 'l', 's', 'r', '_', 'r', '_', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_',
  'l', 's', 'r', '_', 'r', '_', 'r', '_', 'a', 'c', 'c', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'S', '2', '_', 'l', 's', 'r', '_', 'r', '_', 'r', '_', 'a', 'n', 'd', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'S', '2', '_', 'l', 's', 'r', '_', 'r', '_', 'r', '_', 'n',
  'a', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'l', 's', 'r', '_', 'r', '_',
  'r', '_', 'o', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'l', 's', 'r', '_',
  'r', '_', 'v', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'l', 's', 'r', '_',
  'r', '_', 'v', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'm', 'a', 's', 'k',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'S', '2', '_', 'p', 'a', 'c', 'k', 'h', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'S', '2', '_', 'p', 'a', 'r', 'i', 't', 'y', 'p', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'S', '2', '_', 's', 'e', 't', 'b', 'i', 't', '_', 'i', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'S', '2', '_', 's', 'e', 't', 'b', 'i', 't', '_', 'r', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'S', '2', '_', 's', 'h', 'u', 'f', 'f', 'e', 'b', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'S', '2', '_', 's', 'h', 'u', 'f', 'f', 'e', 'h', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S',
  '2', '_', 's', 'h', 'u', 'f', 'f', 'o', 'b', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2',
  '_', 's', 'h', 'u', 'f', 'f', 'o', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'b', 'r', 'e', 'v', '_', 's', 't', 'b', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'b', 'r', 'e', 'v', '_', 's', 't',
  'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'b', 'r', 'e',
  'v', '_', 's', 't', 'h', 'h', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'b', 'r', 'e', 'v', '_', 's', 't', 'h', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'b', 'r', 'e', 'v', '_', 's', 't', 'w',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'S', '2', '_', 's', 't', 'o', 'r', 'e', 'w', '_', 'l',
  'o', 'c', 'k', 'e', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 's', 'v', 's',
  'a', 't', 'h', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 's', 'v', 's', 'a',
  't', 'h', 'u', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 't', 'a', 'b', 'l',
  'e', 'i', 'd', 'x', 'b', '_', 'g', 'o', 'o', 'd', 's', 'y', 'n', 't', 'a',
  'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'S', '2', '_', 't', 'a', 'b', 'l', 'e', 'i', 'd',
  'x', 'd', '_', 'g', 'o', 'o', 'd', 's', 'y', 'n', 't', 'a', 'x', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'S', '2', '_', 't', 'a', 'b', 'l', 'e', 'i', 'd', 'x', 'h', '_',
  'g', 'o', 'o', 'd', 's', 'y', 'n', 't', 'a', 'x', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S',
  '2', '_', 't', 'a', 'b', 'l', 'e', 'i', 'd', 'x', 'w', '_', 'g', 'o', 'o',
  'd', 's', 'y', 'n', 't', 'a', 'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 't',
  'o', 'g', 'g', 'l', 'e', 'b', 'i', 't', '_', 'i', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S',
  '2', '_', 't', 'o', 'g', 'g', 'l', 'e', 'b', 'i', 't', '_', 'r', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'S', '2', '_', 't', 's', 't', 'b', 'i', 't', '_', 'i', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'S', '2', '_', 't', 's', 't', 'b', 'i', 't', '_', 'r', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'S', '2', '_', 'v', 'a', 'l', 'i', 'g', 'n', 'i', 'b', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'S', '2', '_', 'v', 'a', 'l', 'i', 'g', 'n', 'r', 'b', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'S', '2', '_', 'v', 'c', 'n', 'e', 'g', 'h', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'S', '2', '_', 'v', 'c', 'r', 'o', 't', 'a', 't', 'e', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'S', '2', '_', 'v', 'r', 'c', 'n', 'e', 'g', 'h', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S',
  '2', '_', 'v', 'r', 'n', 'd', 'p', 'a', 'c', 'k', 'w', 'h', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'S', '2', '_', 'v', 'r', 'n', 'd', 'p', 'a', 'c', 'k', 'w', 'h', 's',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'S', '2', '_', 'v', 's', 'a', 't', 'h', 'b', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'S', '2', '_', 'v', 's', 'a', 't', 'h', 'b', '_', 'n', 'o', 'p',
  'a', 'c', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'v', 's', 'a', 't', 'h',
  'u', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'v', 's', 'a', 't', 'h', 'u',
  'b', '_', 'n', 'o', 'p', 'a', 'c', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_',
  'v', 's', 'a', 't', 'w', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'v', 's',
  'a', 't', 'w', 'h', '_', 'n', 'o', 'p', 'a', 'c', 'k', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'S', '2', '_', 'v', 's', 'a', 't', 'w', 'u', 'h', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S',
  '2', '_', 'v', 's', 'a', 't', 'w', 'u', 'h', '_', 'n', 'o', 'p', 'a', 'c',
  'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'v', 's', 'p', 'l', 'a', 't', 'r',
  'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'v', 's', 'p', 'l', 'a', 't', 'r',
  'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'v', 's', 'p', 'l', 'i', 'c', 'e',
  'i', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'v', 's', 'p', 'l', 'i', 'c',
  'e', 'r', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'v', 's', 'x', 't', 'b',
  'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'S', '2', '_', 'v', 's', 'x', 't', 'h', 'w', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'S', '2', '_', 'v', 't', 'r', 'u', 'n', 'e', 'h', 'b', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'S', '2', '_', 'v', 't', 'r', 'u', 'n', 'e', 'w', 'h', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'S', '2', '_', 'v', 't', 'r', 'u', 'n', 'o', 'h', 'b', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'S', '2', '_', 'v', 't', 'r', 'u', 'n', 'o', 'w', 'h', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'S', '2', '_', 'v', 'z', 'x', 't', 'b', 'h', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'S', '2', '_', 'v', 'z', 'x', 't', 'h', 'w', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S',
  '4', '_', 'a', 'd', 'd', 'a', 'd', 'd', 'i', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '4',
  '_', 'a', 'd', 'd', 'i', '_', 'a', 's', 'l', '_', 'r', 'i', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'S', '4', '_', 'a', 'd', 'd', 'i', '_', 'l', 's', 'r', '_', 'r', 'i',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'S', '4', '_', 'a', 'n', 'd', 'i', '_', 'a', 's', 'l',
  '_', 'r', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '4', '_', 'a', 'n', 'd', 'i', '_',
  'l', 's', 'r', '_', 'r', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '4', '_', 'c', 'l',
  'b', 'a', 'd', 'd', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '4', '_', 'c', 'l', 'b',
  'p', 'a', 'd', 'd', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '4', '_', 'c', 'l', 'b',
  'p', 'n', 'o', 'r', 'm', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '4', '_', 'e', 'x', 't',
  'r', 'a', 'c', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '4', '_', 'e', 'x', 't', 'r',
  'a', 'c', 't', '_', 'r', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '4', '_', 'e', 'x',
  't', 'r', 'a', 'c', 't', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '4', '_', 'e', 'x',
  't', 'r', 'a', 'c', 't', 'p', '_', 'r', 'p', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '4',
  '_', 'l', 's', 'l', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '4', '_', 'n', 't', 's',
  't', 'b', 'i', 't', '_', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '4', '_', 'n', 't',
  's', 't', 'b', 'i', 't', '_', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '4', '_', 'o',
  'r', '_', 'a', 'n', 'd', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '4', '_', 'o', 'r',
  '_', 'a', 'n', 'd', 'i', 'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '4', '_', 'o', 'r',
  '_', 'o', 'r', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '4', '_', 'o', 'r', 'i', '_',
  'a', 's', 'l', '_', 'r', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '4', '_', 'o', 'r',
  'i', '_', 'l', 's', 'r', '_', 'r', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '4', '_',
  'p', 'a', 'r', 'i', 't', 'y', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '4', '_', 's', 't',
  'o', 'r', 'e', 'd', '_', 'l', 'o', 'c', 'k', 'e', 'd', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'S', '4', '_', 's', 'u', 'b', 'a', 'd', 'd', 'i', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S',
  '4', '_', 's', 'u', 'b', 'i', '_', 'a', 's', 'l', '_', 'r', 'i', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'S', '4', '_', 's', 'u', 'b', 'i', '_', 'l', 's', 'r', '_', 'r',
  'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'S', '4', '_', 'v', 'r', 'c', 'r', 'o', 't', 'a',
  't', 'e', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'S', '4', '_', 'v', 'r', 'c', 'r', 'o', 't',
  'a', 't', 'e', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '4', '_', 'v',
  'x', 'a', 'd', 'd', 's', 'u', 'b', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '4', '_',
  'v', 'x', 'a', 'd', 'd', 's', 'u', 'b', 'h', 'r', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S',
  '4', '_', 'v', 'x', 'a', 'd', 'd', 's', 'u', 'b', 'w', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'S', '4', '_', 'v', 'x', 's', 'u', 'b', 'a', 'd', 'd', 'h', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'S', '4', '_', 'v', 'x', 's', 'u', 'b', 'a', 'd', 'd', 'h', 'r', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'S', '4', '_', 'v', 'x', 's', 'u', 'b', 'a', 'd', 'd', 'w',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'S', '5', '_', 'a', 's', 'r', 'h', 'u', 'b', '_', 'r',
  'n', 'd', '_', 's', 'a', 't', '_', 'g', 'o', 'o', 'd', 's', 'y', 'n', 't',
  'a', 'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'S', '5', '_', 'a', 's', 'r', 'h', 'u', 'b',
  '_', 's', 'a', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '5', '_', 'p', 'o', 'p', 'c',
  'o', 'u', 'n', 't', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '5', '_', 'v', 'a', 's',
  'r', 'h', 'r', 'n', 'd', '_', 'g', 'o', 'o', 'd', 's', 'y', 'n', 't', 'a',
  'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'S', '6', '_', 'r', 'o', 'l', '_', 'i', '_', 'p',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'S', '6', '_', 'r', 'o', 'l', '_', 'i', '_', 'p', '_',
  'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '6', '_', 'r', 'o', 'l', '_', 'i',
  '_', 'p', '_', 'a', 'n', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '6', '_', 'r', 'o',
  'l', '_', 'i', '_', 'p', '_', 'n', 'a', 'c', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '6',
  '_', 'r', 'o', 'l', '_', 'i', '_', 'p', '_', 'o', 'r', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'S', '6', '_', 'r', 'o', 'l', '_', 'i', '_', 'p', '_', 'x', 'a', 'c', 'c',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'S', '6', '_', 'r', 'o', 'l', '_', 'i', '_', 'r', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'S', '6', '_', 'r', 'o', 'l', '_', 'i', '_', 'r', '_', 'a',
  'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'S', '6', '_', 'r', 'o', 'l', '_', 'i', '_',
  'r', '_', 'a', 'n', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '6', '_', 'r', 'o', 'l',
  '_', 'i', '_', 'r', '_', 'n', 'a', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '6', '_',
  'r', 'o', 'l', '_', 'i', '_', 'r', '_', 'o', 'r', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S',
  '6', '_', 'r', 'o', 'l', '_', 'i', '_', 'r', '_', 'x', 'a', 'c', 'c', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'S', '6', '_', 'v', 's', 'p', 'l', 'a', 't', 'r', 'b', 'p',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'S', '6', '_', 'v', 't', 'r', 'u', 'n', 'e', 'h', 'b',
  '_', 'p', 'p', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'S', '6', '_', 'v', 't', 'r', 'u',
  'n', 'o', 'h', 'b', '_', 'p', 'p', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
  'e', 'x', 't', 'r', 'a', 'c', 't', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
  'e', 'x', 't', 'r', 'a', 'c', 't', 'w', '_', '1', '2', '8', 'B', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'h', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'h',
  'i', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'l', 'o',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'l', 'o', '_', '1', '2', '8', 'B', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'l', 'v', 's', 'p', 'l', 'a', 't', 'b', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'l', 'v', 's', 'p', 'l', 'a', 't', 'b', '_',
  '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'l', 'v', 's', 'p',
  'l', 'a', 't', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'l', 'v', 's', 'p',
  'l', 'a', 't', 'h', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
  '_', 'l', 'v', 's', 'p', 'l', 'a', 't', 'w', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
  '_', 'l', 'v', 's', 'p', 'l', 'a', 't', 'w', '_', '1', '2', '8', 'B', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'p', 'r', 'e', 'd', '_', 'a', 'n', 'd', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'p', 'r', 'e', 'd', '_', 'a', 'n', 'd', '_',
  '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'p', 'r', 'e', 'd',
  '_', 'a', 'n', 'd', '_', 'n', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'p', 'r',
  'e', 'd', '_', 'a', 'n', 'd', '_', 'n', '_', '1', '2', '8', 'B', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'p', 'r', 'e', 'd', '_', 'n', 'o', 't', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'p', 'r', 'e', 'd', '_', 'n', 'o', 't', '_', '1',
  '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'p', 'r', 'e', 'd', '_',
  'o', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'p', 'r', 'e', 'd', '_', 'o',
  'r', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'p', 'r',
  'e', 'd', '_', 'o', 'r', '_', 'n', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'p',
  'r', 'e', 'd', '_', 'o', 'r', '_', 'n', '_', '1', '2', '8', 'B', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'p', 'r', 'e', 'd', '_', 's', 'c', 'a', 'l', 'a',
  'r', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'p', 'r', 'e', 'd', '_', 's',
  'c', 'a', 'l', 'a', 'r', '2', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'p', 'r', 'e', 'd', '_', 's', 'c', 'a', 'l', 'a', 'r', '2',
  'v', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'p', 'r', 'e', 'd', '_', 's',
  'c', 'a', 'l', 'a', 'r', '2', 'v', '2', '_', '1', '2', '8', 'B', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'p', 'r', 'e', 'd', '_', 'x', 'o', 'r', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'p', 'r', 'e', 'd', '_', 'x', 'o', 'r', '_', '1',
  '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 's', 'h', 'u', 'f', 'f',
  'e', 'q', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 's', 'h', 'u', 'f', 'f',
  'e', 'q', 'h', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
  's', 'h', 'u', 'f', 'f', 'e', 'q', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
  's', 'h', 'u', 'f', 'f', 'e', 'q', 'w', '_', '1', '2', '8', 'B', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', '6', 'm', 'p', 'y', 'h', 'u', 'b', 's', '1',
  '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', '6', 'm', 'p', 'y', 'h', 'u',
  'b', 's', '1', '0', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
  '_', 'v', '6', 'm', 'p', 'y', 'h', 'u', 'b', 's', '1', '0', '_', 'v', 'x',
  'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', '6', 'm', 'p', 'y', 'h', 'u',
  'b', 's', '1', '0', '_', 'v', 'x', 'x', '_', '1', '2', '8', 'B', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', '6', 'm', 'p', 'y', 'v', 'u', 'b', 's', '1',
  '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', '6', 'm', 'p', 'y', 'v', 'u',
  'b', 's', '1', '0', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
  '_', 'v', '6', 'm', 'p', 'y', 'v', 'u', 'b', 's', '1', '0', '_', 'v', 'x',
  'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', '6', 'm', 'p', 'y', 'v', 'u',
  'b', 's', '1', '0', '_', 'v', 'x', 'x', '_', '1', '2', '8', 'B', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 'S', '3', '2', 'b', '_', 'n', 'q', 'p', 'r',
  'e', 'd', '_', 'a', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'S', '3',
  '2', 'b', '_', 'n', 'q', 'p', 'r', 'e', 'd', '_', 'a', 'i', '_', '1', '2',
  '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'S', '3', '2', 'b', '_',
  'n', 't', '_', 'n', 'q', 'p', 'r', 'e', 'd', '_', 'a', 'i', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'V', '6', '_', 'v', 'S', '3', '2', 'b', '_', 'n', 't', '_', 'n', 'q',
  'p', 'r', 'e', 'd', '_', 'a', 'i', '_', '1', '2', '8', 'B', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'V', '6', '_', 'v', 'S', '3', '2', 'b', '_', 'n', 't', '_', 'q', 'p',
  'r', 'e', 'd', '_', 'a', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'S',
  '3', '2', 'b', '_', 'n', 't', '_', 'q', 'p', 'r', 'e', 'd', '_', 'a', 'i',
  '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'S', '3',
  '2', 'b', '_', 'q', 'p', 'r', 'e', 'd', '_', 'a', 'i', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 'S', '3', '2', 'b', '_', 'q', 'p', 'r', 'e', 'd', '_',
  'a', 'i', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  'a', 'b', 's', '_', 'h', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a',
  'b', 's', '_', 'h', 'f', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
  '6', '_', 'v', 'a', 'b', 's', '_', 's', 'f', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
  '_', 'v', 'a', 'b', 's', '_', 's', 'f', '_', '1', '2', '8', 'B', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 'a', 'b', 's', 'b', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
  '6', '_', 'v', 'a', 'b', 's', 'b', '_', '1', '2', '8', 'B', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'V', '6', '_', 'v', 'a', 'b', 's', 'b', '_', 's', 'a', 't', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 'a', 'b', 's', 'b', '_', 's', 'a', 't', '_',
  '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'b', 's',
  'd', 'i', 'f', 'f', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'b',
  's', 'd', 'i', 'f', 'f', 'h', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 'a', 'b', 's', 'd', 'i', 'f', 'f', 'u', 'b', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 'a', 'b', 's', 'd', 'i', 'f', 'f', 'u', 'b',
  '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'b',
  's', 'd', 'i', 'f', 'f', 'u', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  'a', 'b', 's', 'd', 'i', 'f', 'f', 'u', 'h', '_', '1', '2', '8', 'B', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'b', 's', 'd', 'i', 'f', 'f', 'w',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'b', 's', 'd', 'i', 'f', 'f',
  'w', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a',
  'b', 's', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'b', 's', 'h',
  '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'b',
  's', 'h', '_', 's', 'a', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a',
  'b', 's', 'h', '_', 's', 'a', 't', '_', '1', '2', '8', 'B', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'V', '6', '_', 'v', 'a', 'b', 's', 'w', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
  '_', 'v', 'a', 'b', 's', 'w', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 'a', 'b', 's', 'w', '_', 's', 'a', 't', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'V', '6', '_', 'v', 'a', 'b', 's', 'w', '_', 's', 'a', 't', '_', '1',
  '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd', '_',
  'h', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd', '_', 'h',
  'f', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a',
  'd', 'd', '_', 'h', 'f', '_', 'h', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
  'v', 'a', 'd', 'd', '_', 'h', 'f', '_', 'h', 'f', '_', '1', '2', '8', 'B',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd', '_', 'q', 'f', '1',
  '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd', '_', 'q', 'f',
  '1', '6', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  'a', 'd', 'd', '_', 'q', 'f', '1', '6', '_', 'm', 'i', 'x', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'V', '6', '_', 'v', 'a', 'd', 'd', '_', 'q', 'f', '1', '6', '_', 'm',
  'i', 'x', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  'a', 'd', 'd', '_', 'q', 'f', '3', '2', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
  'v', 'a', 'd', 'd', '_', 'q', 'f', '3', '2', '_', '1', '2', '8', 'B', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd', '_', 'q', 'f', '3', '2',
  '_', 'm', 'i', 'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd',
  '_', 'q', 'f', '3', '2', '_', 'm', 'i', 'x', '_', '1', '2', '8', 'B', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd', '_', 's', 'f', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd', '_', 's', 'f', '_', '1', '2',
  '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd', '_', 's',
  'f', '_', 'b', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd',
  '_', 's', 'f', '_', 'b', 'f', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 'a', 'd', 'd', '_', 's', 'f', '_', 'h', 'f', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd', '_', 's', 'f', '_', 'h', 'f',
  '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd',
  'd', '_', 's', 'f', '_', 's', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  'a', 'd', 'd', '_', 's', 'f', '_', 's', 'f', '_', '1', '2', '8', 'B', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd', 'b', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 'a', 'd', 'd', 'b', '_', '1', '2', '8', 'B', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd', 'b', '_', 'd', 'v', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd', 'b', '_', 'd', 'v', '_', '1',
  '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd', 'b',
  'n', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd', 'b', 'n',
  'q', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a',
  'd', 'd', 'b', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd',
  'b', 'q', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  'a', 'd', 'd', 'b', 's', 'a', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  'a', 'd', 'd', 'b', 's', 'a', 't', '_', '1', '2', '8', 'B', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'V', '6', '_', 'v', 'a', 'd', 'd', 'b', 's', 'a', 't', '_', 'd', 'v',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd', 'b', 's', 'a', 't',
  '_', 'd', 'v', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
  'v', 'a', 'd', 'd', 'c', 'a', 'r', 'r', 'y', 's', 'a', 't', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'V', '6', '_', 'v', 'a', 'd', 'd', 'c', 'a', 'r', 'r', 'y', 's', 'a',
  't', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a',
  'd', 'd', 'c', 'l', 'b', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a',
  'd', 'd', 'c', 'l', 'b', 'h', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 'a', 'd', 'd', 'c', 'l', 'b', 'w', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 'a', 'd', 'd', 'c', 'l', 'b', 'w', '_', '1', '2', '8',
  'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd', 'h', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd', 'h', '_', '1', '2', '8', 'B',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd', 'h', '_', 'd', 'v',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd', 'h', '_', 'd', 'v',
  '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd',
  'd', 'h', 'n', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd',
  'h', 'n', 'q', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
  'v', 'a', 'd', 'd', 'h', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a',
  'd', 'd', 'h', 'q', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
  '_', 'v', 'a', 'd', 'd', 'h', 's', 'a', 't', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
  '_', 'v', 'a', 'd', 'd', 'h', 's', 'a', 't', '_', '1', '2', '8', 'B', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd', 'h', 's', 'a', 't', '_',
  'd', 'v', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd', 'h', 's',
  'a', 't', '_', 'd', 'v', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
  '6', '_', 'v', 'a', 'd', 'd', 'h', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
  'v', 'a', 'd', 'd', 'h', 'w', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 'a', 'd', 'd', 'h', 'w', '_', 'a', 'c', 'c', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd', 'h', 'w', '_', 'a', 'c', 'c',
  '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd',
  'd', 'u', 'b', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd',
  'u', 'b', 'h', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
  'v', 'a', 'd', 'd', 'u', 'b', 'h', '_', 'a', 'c', 'c', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 'a', 'd', 'd', 'u', 'b', 'h', '_', 'a', 'c', 'c', '_',
  '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd',
  'u', 'b', 's', 'a', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd',
  'd', 'u', 'b', 's', 'a', 't', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 'a', 'd', 'd', 'u', 'b', 's', 'a', 't', '_', 'd', 'v',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd', 'u', 'b', 's', 'a',
  't', '_', 'd', 'v', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
  '_', 'v', 'a', 'd', 'd', 'u', 'b', 'u', 'b', 'b', '_', 's', 'a', 't', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd', 'u', 'b', 'u', 'b', 'b',
  '_', 's', 'a', 't', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
  '_', 'v', 'a', 'd', 'd', 'u', 'h', 's', 'a', 't', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
  '6', '_', 'v', 'a', 'd', 'd', 'u', 'h', 's', 'a', 't', '_', '1', '2', '8',
  'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd', 'u', 'h', 's',
  'a', 't', '_', 'd', 'v', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd',
  'd', 'u', 'h', 's', 'a', 't', '_', 'd', 'v', '_', '1', '2', '8', 'B', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd', 'u', 'h', 'w', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd', 'u', 'h', 'w', '_', '1', '2',
  '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd', 'u', 'h',
  'w', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd',
  'd', 'u', 'h', 'w', '_', 'a', 'c', 'c', '_', '1', '2', '8', 'B', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd', 'u', 'w', 's', 'a', 't', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd', 'u', 'w', 's', 'a', 't',
  '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd',
  'd', 'u', 'w', 's', 'a', 't', '_', 'd', 'v', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
  '_', 'v', 'a', 'd', 'd', 'u', 'w', 's', 'a', 't', '_', 'd', 'v', '_', '1',
  '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd', 'w',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd', 'w', '_', '1', '2',
  '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd', 'w', '_',
  'd', 'v', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd', 'w', '_',
  'd', 'v', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  'a', 'd', 'd', 'w', 'n', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a',
  'd', 'd', 'w', 'n', 'q', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
  '6', '_', 'v', 'a', 'd', 'd', 'w', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
  'v', 'a', 'd', 'd', 'w', 'q', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 'a', 'd', 'd', 'w', 's', 'a', 't', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 'a', 'd', 'd', 'w', 's', 'a', 't', '_', '1', '2', '8',
  'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd', 'w', 's', 'a',
  't', '_', 'd', 'v', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'd', 'd',
  'w', 's', 'a', 't', '_', 'd', 'v', '_', '1', '2', '8', 'B', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'V', '6', '_', 'v', 'a', 'l', 'i', 'g', 'n', 'b', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 'a', 'l', 'i', 'g', 'n', 'b', '_', '1', '2', '8', 'B',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'l', 'i', 'g', 'n', 'b', 'i',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'l', 'i', 'g', 'n', 'b', 'i',
  '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'n',
  'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'n', 'd', '_', '1', '2',
  '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'n', 'd', 'n', 'q',
  'r', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'n', 'd', 'n', 'q',
  'r', 't', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  'a', 'n', 'd', 'n', 'q', 'r', 't', '_', 'a', 'c', 'c', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 'a', 'n', 'd', 'n', 'q', 'r', 't', '_', 'a', 'c', 'c',
  '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'n',
  'd', 'q', 'r', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'n', 'd',
  'q', 'r', 't', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
  'v', 'a', 'n', 'd', 'q', 'r', 't', '_', 'a', 'c', 'c', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 'a', 'n', 'd', 'q', 'r', 't', '_', 'a', 'c', 'c', '_',
  '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'n', 'd',
  'v', 'n', 'q', 'v', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'n', 'd',
  'v', 'n', 'q', 'v', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
  '_', 'v', 'a', 'n', 'd', 'v', 'q', 'v', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
  'v', 'a', 'n', 'd', 'v', 'q', 'v', '_', '1', '2', '8', 'B', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'V', '6', '_', 'v', 'a', 'n', 'd', 'v', 'r', 't', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 'a', 'n', 'd', 'v', 'r', 't', '_', '1', '2', '8', 'B',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'n', 'd', 'v', 'r', 't', '_',
  'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'n', 'd', 'v',
  'r', 't', '_', 'a', 'c', 'c', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 'a', 's', 'l', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
  'v', 'a', 's', 'l', 'h', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
  '6', '_', 'v', 'a', 's', 'l', 'h', '_', 'a', 'c', 'c', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 'a', 's', 'l', 'h', '_', 'a', 'c', 'c', '_', '1', '2',
  '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 's', 'l', 'h', 'v',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 's', 'l', 'h', 'v', '_', '1',
  '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 's', 'l', 'w',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 's', 'l', 'w', '_', '1', '2',
  '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 's', 'l', 'w', '_',
  'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 's', 'l', 'w',
  '_', 'a', 'c', 'c', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
  '_', 'v', 'a', 's', 'l', 'w', 'v', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  'a', 's', 'l', 'w', 'v', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
  '6', '_', 'v', 'a', 's', 'r', '_', 'i', 'n', 't', 'o', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 'a', 's', 'r', '_', 'i', 'n', 't', 'o', '_', '1', '2',
  '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 's', 'r', 'h', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'a', 's', 'r', 'h', '_', '1', '2', '8',
  'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 's', 'r', 'h', '_', 'a',
  'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 's', 'r', 'h', '_',
  'a', 'c', 'c', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
  'v', 'a', 's', 'r', 'h', 'b', 'r', 'n', 'd', 's', 'a', 't', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'V', '6', '_', 'v', 'a', 's', 'r', 'h', 'b', 'r', 'n', 'd', 's', 'a',
  't', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a',
  's', 'r', 'h', 'b', 's', 'a', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  'a', 's', 'r', 'h', 'b', 's', 'a', 't', '_', '1', '2', '8', 'B', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 'a', 's', 'r', 'h', 'u', 'b', 'r', 'n', 'd',
  's', 'a', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 's', 'r', 'h',
  'u', 'b', 'r', 'n', 'd', 's', 'a', 't', '_', '1', '2', '8', 'B', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 'a', 's', 'r', 'h', 'u', 'b', 's', 'a', 't',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 's', 'r', 'h', 'u', 'b', 's',
  'a', 't', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  'a', 's', 'r', 'h', 'v', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 's',
  'r', 'h', 'v', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
  'v', 'a', 's', 'r', 'u', 'h', 'u', 'b', 'r', 'n', 'd', 's', 'a', 't', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'a', 's', 'r', 'u', 'h', 'u', 'b', 'r',
  'n', 'd', 's', 'a', 't', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
  '6', '_', 'v', 'a', 's', 'r', 'u', 'h', 'u', 'b', 's', 'a', 't', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 'a', 's', 'r', 'u', 'h', 'u', 'b', 's', 'a',
  't', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a',
  's', 'r', 'u', 'w', 'u', 'h', 'r', 'n', 'd', 's', 'a', 't', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'V', '6', '_', 'v', 'a', 's', 'r', 'u', 'w', 'u', 'h', 'r', 'n', 'd',
  's', 'a', 't', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
  'v', 'a', 's', 'r', 'u', 'w', 'u', 'h', 's', 'a', 't', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 'a', 's', 'r', 'u', 'w', 'u', 'h', 's', 'a', 't', '_',
  '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 's', 'r',
  'v', 'u', 'h', 'u', 'b', 'r', 'n', 'd', 's', 'a', 't', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 'a', 's', 'r', 'v', 'u', 'h', 'u', 'b', 'r', 'n', 'd',
  's', 'a', 't', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
  'v', 'a', 's', 'r', 'v', 'u', 'h', 'u', 'b', 's', 'a', 't', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'V', '6', '_', 'v', 'a', 's', 'r', 'v', 'u', 'h', 'u', 'b', 's', 'a',
  't', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a',
  's', 'r', 'v', 'w', 'u', 'h', 'r', 'n', 'd', 's', 'a', 't', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'V', '6', '_', 'v', 'a', 's', 'r', 'v', 'w', 'u', 'h', 'r', 'n', 'd',
  's', 'a', 't', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
  'v', 'a', 's', 'r', 'v', 'w', 'u', 'h', 's', 'a', 't', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 'a', 's', 'r', 'v', 'w', 'u', 'h', 's', 'a', 't', '_',
  '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 's', 'r',
  'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 's', 'r', 'w', '_', '1',
  '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 's', 'r', 'w',
  '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 's', 'r',
  'w', '_', 'a', 'c', 'c', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
  '6', '_', 'v', 'a', 's', 'r', 'w', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
  'v', 'a', 's', 'r', 'w', 'h', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 'a', 's', 'r', 'w', 'h', 'r', 'n', 'd', 's', 'a', 't',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 's', 'r', 'w', 'h', 'r', 'n',
  'd', 's', 'a', 't', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
  '_', 'v', 'a', 's', 'r', 'w', 'h', 's', 'a', 't', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
  '6', '_', 'v', 'a', 's', 'r', 'w', 'h', 's', 'a', 't', '_', '1', '2', '8',
  'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 's', 'r', 'w', 'u', 'h',
  'r', 'n', 'd', 's', 'a', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a',
  's', 'r', 'w', 'u', 'h', 'r', 'n', 'd', 's', 'a', 't', '_', '1', '2', '8',
  'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 's', 'r', 'w', 'u', 'h',
  's', 'a', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 's', 'r', 'w',
  'u', 'h', 's', 'a', 't', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
  '6', '_', 'v', 'a', 's', 'r', 'w', 'v', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
  'v', 'a', 's', 'r', 'w', 'v', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 'a', 's', 's', 'i', 'g', 'n', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
  '6', '_', 'v', 'a', 's', 's', 'i', 'g', 'n', '_', '1', '2', '8', 'B', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'a', 's', 's', 'i', 'g', 'n', '_', 'f',
  'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 's', 's', 'i', 'g', 'n',
  '_', 'f', 'p', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
  'v', 'a', 's', 's', 'i', 'g', 'n', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
  'v', 'a', 's', 's', 'i', 'g', 'n', 'p', '_', '1', '2', '8', 'B', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 'a', 'v', 'g', 'b', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
  '6', '_', 'v', 'a', 'v', 'g', 'b', '_', '1', '2', '8', 'B', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'V', '6', '_', 'v', 'a', 'v', 'g', 'b', 'r', 'n', 'd', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'V', '6', '_', 'v', 'a', 'v', 'g', 'b', 'r', 'n', 'd', '_', '1', '2',
  '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'v', 'g', 'h', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'v', 'g', 'h', '_', '1', '2', '8',
  'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'v', 'g', 'h', 'r', 'n',
  'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'v', 'g', 'h', 'r', 'n',
  'd', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a',
  'v', 'g', 'u', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'v', 'g',
  'u', 'b', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  'a', 'v', 'g', 'u', 'b', 'r', 'n', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
  'v', 'a', 'v', 'g', 'u', 'b', 'r', 'n', 'd', '_', '1', '2', '8', 'B', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'v', 'g', 'u', 'h', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'V', '6', '_', 'v', 'a', 'v', 'g', 'u', 'h', '_', '1', '2', '8', 'B',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'v', 'g', 'u', 'h', 'r', 'n',
  'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'v', 'g', 'u', 'h', 'r',
  'n', 'd', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  'a', 'v', 'g', 'u', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'v',
  'g', 'u', 'w', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
  'v', 'a', 'v', 'g', 'u', 'w', 'r', 'n', 'd', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
  '_', 'v', 'a', 'v', 'g', 'u', 'w', 'r', 'n', 'd', '_', '1', '2', '8', 'B',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'v', 'g', 'w', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'V', '6', '_', 'v', 'a', 'v', 'g', 'w', '_', '1', '2', '8', 'B', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'v', 'g', 'w', 'r', 'n', 'd', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'a', 'v', 'g', 'w', 'r', 'n', 'd', '_',
  '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'c', 'l', '0',
  'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'c', 'l', '0', 'h', '_', '1',
  '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'c', 'l', '0', 'w',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'c', 'l', '0', 'w', '_', '1', '2',
  '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'c', 'o', 'm', 'b', 'i',
  'n', 'e', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'c', 'o', 'm', 'b', 'i',
  'n', 'e', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  'c', 'o', 'n', 'v', '_', 'h', '_', 'h', 'f', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
  '_', 'v', 'c', 'o', 'n', 'v', '_', 'h', '_', 'h', 'f', '_', '1', '2', '8',
  'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'c', 'o', 'n', 'v', '_', 'h',
  'f', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'c', 'o', 'n', 'v',
  '_', 'h', 'f', '_', 'h', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
  '6', '_', 'v', 'c', 'o', 'n', 'v', '_', 'h', 'f', '_', 'q', 'f', '1', '6',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'c', 'o', 'n', 'v', '_', 'h', 'f',
  '_', 'q', 'f', '1', '6', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
  '6', '_', 'v', 'c', 'o', 'n', 'v', '_', 'h', 'f', '_', 'q', 'f', '3', '2',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'c', 'o', 'n', 'v', '_', 'h', 'f',
  '_', 'q', 'f', '3', '2', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
  '6', '_', 'v', 'c', 'o', 'n', 'v', '_', 's', 'f', '_', 'q', 'f', '3', '2',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'c', 'o', 'n', 'v', '_', 's', 'f',
  '_', 'q', 'f', '3', '2', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
  '6', '_', 'v', 'c', 'o', 'n', 'v', '_', 's', 'f', '_', 'w', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'V', '6', '_', 'v', 'c', 'o', 'n', 'v', '_', 's', 'f', '_', 'w', '_',
  '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'c', 'o', 'n',
  'v', '_', 'w', '_', 's', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'c',
  'o', 'n', 'v', '_', 'w', '_', 's', 'f', '_', '1', '2', '8', 'B', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 'c', 'v', 't', '_', 'b', '_', 'h', 'f', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'c', 'v', 't', '_', 'b', '_', 'h', 'f',
  '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'c', 'v',
  't', '_', 'b', 'f', '_', 's', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  'c', 'v', 't', '_', 'b', 'f', '_', 's', 'f', '_', '1', '2', '8', 'B', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'c', 'v', 't', '_', 'h', '_', 'h', 'f',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'c', 'v', 't', '_', 'h', '_', 'h',
  'f', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'c',
  'v', 't', '_', 'h', 'f', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  'c', 'v', 't', '_', 'h', 'f', '_', 'b', '_', '1', '2', '8', 'B', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 'c', 'v', 't', '_', 'h', 'f', '_', 'h', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'c', 'v', 't', '_', 'h', 'f', '_', 'h',
  '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'c', 'v',
  't', '_', 'h', 'f', '_', 's', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  'c', 'v', 't', '_', 'h', 'f', '_', 's', 'f', '_', '1', '2', '8', 'B', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'c', 'v', 't', '_', 'h', 'f', '_', 'u',
  'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'c', 'v', 't', '_', 'h', 'f',
  '_', 'u', 'b', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
  'v', 'c', 'v', 't', '_', 'h', 'f', '_', 'u', 'h', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
  '6', '_', 'v', 'c', 'v', 't', '_', 'h', 'f', '_', 'u', 'h', '_', '1', '2',
  '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'c', 'v', 't', '_', 's',
  'f', '_', 'h', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'c', 'v', 't',
  '_', 's', 'f', '_', 'h', 'f', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 'c', 'v', 't', '_', 'u', 'b', '_', 'h', 'f', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 'c', 'v', 't', '_', 'u', 'b', '_', 'h', 'f',
  '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'c', 'v',
  't', '_', 'u', 'h', '_', 'h', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  'c', 'v', 't', '_', 'u', 'h', '_', 'h', 'f', '_', '1', '2', '8', 'B', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'd', '0', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
  '_', 'v', 'd', '0', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
  '_', 'v', 'd', 'd', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'd', 'd',
  '0', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'd',
  'e', 'a', 'l', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'd', 'e', 'a',
  'l', 'b', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  'd', 'e', 'a', 'l', 'b', '4', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  'd', 'e', 'a', 'l', 'b', '4', 'w', '_', '1', '2', '8', 'B', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'V', '6', '_', 'v', 'd', 'e', 'a', 'l', 'h', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
  '6', '_', 'v', 'd', 'e', 'a', 'l', 'h', '_', '1', '2', '8', 'B', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 'd', 'e', 'a', 'l', 'v', 'd', 'd', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 'd', 'e', 'a', 'l', 'v', 'd', 'd', '_', '1',
  '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'd', 'e', 'l', 't',
  'a', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'd', 'e', 'l', 't', 'a', '_',
  '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'd', 'm', 'p',
  'y', '_', 's', 'f', '_', 'h', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  'd', 'm', 'p', 'y', '_', 's', 'f', '_', 'h', 'f', '_', '1', '2', '8', 'B',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'd', 'm', 'p', 'y', '_', 's', 'f',
  '_', 'h', 'f', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  'd', 'm', 'p', 'y', '_', 's', 'f', '_', 'h', 'f', '_', 'a', 'c', 'c', '_',
  '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'd', 'm', 'p',
  'y', 'b', 'u', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'd', 'm', 'p',
  'y', 'b', 'u', 's', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
  '_', 'v', 'd', 'm', 'p', 'y', 'b', 'u', 's', '_', 'a', 'c', 'c', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 'd', 'm', 'p', 'y', 'b', 'u', 's', '_', 'a',
  'c', 'c', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  'd', 'm', 'p', 'y', 'b', 'u', 's', '_', 'd', 'v', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
  '6', '_', 'v', 'd', 'm', 'p', 'y', 'b', 'u', 's', '_', 'd', 'v', '_', '1',
  '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'd', 'm', 'p', 'y',
  'b', 'u', 's', '_', 'd', 'v', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
  '6', '_', 'v', 'd', 'm', 'p', 'y', 'b', 'u', 's', '_', 'd', 'v', '_', 'a',
  'c', 'c', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  'd', 'm', 'p', 'y', 'h', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'd',
  'm', 'p', 'y', 'h', 'b', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
  '6', '_', 'v', 'd', 'm', 'p', 'y', 'h', 'b', '_', 'a', 'c', 'c', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 'd', 'm', 'p', 'y', 'h', 'b', '_', 'a', 'c',
  'c', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'd',
  'm', 'p', 'y', 'h', 'b', '_', 'd', 'v', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
  'v', 'd', 'm', 'p', 'y', 'h', 'b', '_', 'd', 'v', '_', '1', '2', '8', 'B',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'd', 'm', 'p', 'y', 'h', 'b', '_',
  'd', 'v', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'd',
  'm', 'p', 'y', 'h', 'b', '_', 'd', 'v', '_', 'a', 'c', 'c', '_', '1', '2',
  '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'd', 'm', 'p', 'y', 'h',
  'i', 's', 'a', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'd', 'm', 'p',
  'y', 'h', 'i', 's', 'a', 't', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 'd', 'm', 'p', 'y', 'h', 'i', 's', 'a', 't', '_', 'a',
  'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'd', 'm', 'p', 'y', 'h',
  'i', 's', 'a', 't', '_', 'a', 'c', 'c', '_', '1', '2', '8', 'B', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 'd', 'm', 'p', 'y', 'h', 's', 'a', 't', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'd', 'm', 'p', 'y', 'h', 's', 'a', 't',
  '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'd', 'm',
  'p', 'y', 'h', 's', 'a', 't', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
  '6', '_', 'v', 'd', 'm', 'p', 'y', 'h', 's', 'a', 't', '_', 'a', 'c', 'c',
  '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'd', 'm',
  'p', 'y', 'h', 's', 'u', 'i', 's', 'a', 't', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
  '_', 'v', 'd', 'm', 'p', 'y', 'h', 's', 'u', 'i', 's', 'a', 't', '_', '1',
  '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'd', 'm', 'p', 'y',
  'h', 's', 'u', 'i', 's', 'a', 't', '_', 'a', 'c', 'c', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 'd', 'm', 'p', 'y', 'h', 's', 'u', 'i', 's', 'a', 't',
  '_', 'a', 'c', 'c', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
  '_', 'v', 'd', 'm', 'p', 'y', 'h', 's', 'u', 's', 'a', 't', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'V', '6', '_', 'v', 'd', 'm', 'p', 'y', 'h', 's', 'u', 's', 'a', 't',
  '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'd', 'm',
  'p', 'y', 'h', 's', 'u', 's', 'a', 't', '_', 'a', 'c', 'c', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'V', '6', '_', 'v', 'd', 'm', 'p', 'y', 'h', 's', 'u', 's', 'a', 't',
  '_', 'a', 'c', 'c', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
  '_', 'v', 'd', 'm', 'p', 'y', 'h', 'v', 's', 'a', 't', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 'd', 'm', 'p', 'y', 'h', 'v', 's', 'a', 't', '_', '1',
  '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'd', 'm', 'p', 'y',
  'h', 'v', 's', 'a', 't', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
  '_', 'v', 'd', 'm', 'p', 'y', 'h', 'v', 's', 'a', 't', '_', 'a', 'c', 'c',
  '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'd', 's',
  'a', 'd', 'u', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'd', 's', 'a',
  'd', 'u', 'h', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
  'v', 'd', 's', 'a', 'd', 'u', 'h', '_', 'a', 'c', 'c', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 'd', 's', 'a', 'd', 'u', 'h', '_', 'a', 'c', 'c', '_',
  '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'e', 'q', 'b',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'e', 'q', 'b', '_', '1', '2', '8',
  'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'e', 'q', 'b', '_', 'a', 'n',
  'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'e', 'q', 'b', '_', 'a', 'n',
  'd', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'e',
  'q', 'b', '_', 'o', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'e', 'q',
  'b', '_', 'o', 'r', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
  '_', 'v', 'e', 'q', 'b', '_', 'x', 'o', 'r', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
  '_', 'v', 'e', 'q', 'b', '_', 'x', 'o', 'r', '_', '1', '2', '8', 'B', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'e', 'q', 'h', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
  '6', '_', 'v', 'e', 'q', 'h', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 'e', 'q', 'h', '_', 'a', 'n', 'd', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 'e', 'q', 'h', '_', 'a', 'n', 'd', '_', '1', '2', '8',
  'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'e', 'q', 'h', '_', 'o', 'r',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'e', 'q', 'h', '_', 'o', 'r', '_',
  '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'e', 'q', 'h',
  '_', 'x', 'o', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'e', 'q', 'h',
  '_', 'x', 'o', 'r', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
  '_', 'v', 'e', 'q', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'e', 'q',
  'w', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'e',
  'q', 'w', '_', 'a', 'n', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'e',
  'q', 'w', '_', 'a', 'n', 'd', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 'e', 'q', 'w', '_', 'o', 'r', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
  '6', '_', 'v', 'e', 'q', 'w', '_', 'o', 'r', '_', '1', '2', '8', 'B', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'e', 'q', 'w', '_', 'x', 'o', 'r', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'e', 'q', 'w', '_', 'x', 'o', 'r', '_',
  '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'f', 'm', 'a',
  'x', '_', 'h', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'f', 'm', 'a',
  'x', '_', 'h', 'f', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
  '_', 'v', 'f', 'm', 'a', 'x', '_', 's', 'f', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
  '_', 'v', 'f', 'm', 'a', 'x', '_', 's', 'f', '_', '1', '2', '8', 'B', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'f', 'm', 'i', 'n', '_', 'h', 'f', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'f', 'm', 'i', 'n', '_', 'h', 'f', '_',
  '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'f', 'm', 'i',
  'n', '_', 's', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'f', 'm', 'i',
  'n', '_', 's', 'f', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
  '_', 'v', 'f', 'n', 'e', 'g', '_', 'h', 'f', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
  '_', 'v', 'f', 'n', 'e', 'g', '_', 'h', 'f', '_', '1', '2', '8', 'B', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'f', 'n', 'e', 'g', '_', 's', 'f', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'f', 'n', 'e', 'g', '_', 's', 'f', '_',
  '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g', 'a', 't',
  'h', 'e', 'r', 'm', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g', 'a',
  't', 'h', 'e', 'r', 'm', 'h', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 'g', 'a', 't', 'h', 'e', 'r', 'm', 'h', 'q', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 'g', 'a', 't', 'h', 'e', 'r', 'm', 'h', 'q',
  '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g', 'a',
  't', 'h', 'e', 'r', 'm', 'h', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  'g', 'a', 't', 'h', 'e', 'r', 'm', 'h', 'w', '_', '1', '2', '8', 'B', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'g', 'a', 't', 'h', 'e', 'r', 'm', 'h',
  'w', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g', 'a', 't', 'h', 'e',
  'r', 'm', 'h', 'w', 'q', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
  '6', '_', 'v', 'g', 'a', 't', 'h', 'e', 'r', 'm', 'w', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 'g', 'a', 't', 'h', 'e', 'r', 'm', 'w', '_', '1', '2',
  '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g', 'a', 't', 'h', 'e',
  'r', 'm', 'w', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g', 'a', 't',
  'h', 'e', 'r', 'm', 'w', 'q', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 'g', 't', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  'g', 't', 'b', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
  'v', 'g', 't', 'b', '_', 'a', 'n', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
  'v', 'g', 't', 'b', '_', 'a', 'n', 'd', '_', '1', '2', '8', 'B', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 'g', 't', 'b', '_', 'o', 'r', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'V', '6', '_', 'v', 'g', 't', 'b', '_', 'o', 'r', '_', '1', '2', '8',
  'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g', 't', 'b', '_', 'x', 'o',
  'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g', 't', 'b', '_', 'x', 'o',
  'r', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g',
  't', 'b', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g', 't', 'b', 'f',
  '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g', 't',
  'b', 'f', '_', 'a', 'n', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g',
  't', 'b', 'f', '_', 'a', 'n', 'd', '_', '1', '2', '8', 'B', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'V', '6', '_', 'v', 'g', 't', 'b', 'f', '_', 'o', 'r', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'V', '6', '_', 'v', 'g', 't', 'b', 'f', '_', 'o', 'r', '_', '1', '2',
  '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g', 't', 'b', 'f', '_',
  'x', 'o', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g', 't', 'b', 'f',
  '_', 'x', 'o', 'r', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
  '_', 'v', 'g', 't', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g', 't',
  'h', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g',
  't', 'h', '_', 'a', 'n', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g',
  't', 'h', '_', 'a', 'n', 'd', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 'g', 't', 'h', '_', 'o', 'r', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
  '6', '_', 'v', 'g', 't', 'h', '_', 'o', 'r', '_', '1', '2', '8', 'B', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'g', 't', 'h', '_', 'x', 'o', 'r', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'g', 't', 'h', '_', 'x', 'o', 'r', '_',
  '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g', 't', 'h',
  'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g', 't', 'h', 'f', '_', '1',
  '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g', 't', 'h', 'f',
  '_', 'a', 'n', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g', 't', 'h',
  'f', '_', 'a', 'n', 'd', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
  '6', '_', 'v', 'g', 't', 'h', 'f', '_', 'o', 'r', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
  '6', '_', 'v', 'g', 't', 'h', 'f', '_', 'o', 'r', '_', '1', '2', '8', 'B',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g', 't', 'h', 'f', '_', 'x', 'o',
  'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g', 't', 'h', 'f', '_', 'x',
  'o', 'r', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  'g', 't', 's', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g', 't', 's',
  'f', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g',
  't', 's', 'f', '_', 'a', 'n', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  'g', 't', 's', 'f', '_', 'a', 'n', 'd', '_', '1', '2', '8', 'B', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 'g', 't', 's', 'f', '_', 'o', 'r', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 'g', 't', 's', 'f', '_', 'o', 'r', '_', '1',
  '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g', 't', 's', 'f',
  '_', 'x', 'o', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g', 't', 's',
  'f', '_', 'x', 'o', 'r', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
  '6', '_', 'v', 'g', 't', 'u', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  'g', 't', 'u', 'b', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
  '_', 'v', 'g', 't', 'u', 'b', '_', 'a', 'n', 'd', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
  '6', '_', 'v', 'g', 't', 'u', 'b', '_', 'a', 'n', 'd', '_', '1', '2', '8',
  'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g', 't', 'u', 'b', '_', 'o',
  'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g', 't', 'u', 'b', '_', 'o',
  'r', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g',
  't', 'u', 'b', '_', 'x', 'o', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  'g', 't', 'u', 'b', '_', 'x', 'o', 'r', '_', '1', '2', '8', 'B', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 'g', 't', 'u', 'h', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
  '6', '_', 'v', 'g', 't', 'u', 'h', '_', '1', '2', '8', 'B', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'V', '6', '_', 'v', 'g', 't', 'u', 'h', '_', 'a', 'n', 'd', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 'g', 't', 'u', 'h', '_', 'a', 'n', 'd', '_',
  '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g', 't', 'u',
  'h', '_', 'o', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g', 't', 'u',
  'h', '_', 'o', 'r', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
  '_', 'v', 'g', 't', 'u', 'h', '_', 'x', 'o', 'r', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
  '6', '_', 'v', 'g', 't', 'u', 'h', '_', 'x', 'o', 'r', '_', '1', '2', '8',
  'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g', 't', 'u', 'w', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 'g', 't', 'u', 'w', '_', '1', '2', '8', 'B',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g', 't', 'u', 'w', '_', 'a', 'n',
  'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g', 't', 'u', 'w', '_', 'a',
  'n', 'd', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  'g', 't', 'u', 'w', '_', 'o', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  'g', 't', 'u', 'w', '_', 'o', 'r', '_', '1', '2', '8', 'B', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'V', '6', '_', 'v', 'g', 't', 'u', 'w', '_', 'x', 'o', 'r', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 'g', 't', 'u', 'w', '_', 'x', 'o', 'r', '_',
  '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g', 't', 'w',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g', 't', 'w', '_', '1', '2', '8',
  'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g', 't', 'w', '_', 'a', 'n',
  'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g', 't', 'w', '_', 'a', 'n',
  'd', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g',
  't', 'w', '_', 'o', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'g', 't',
  'w', '_', 'o', 'r', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
  '_', 'v', 'g', 't', 'w', '_', 'x', 'o', 'r', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
  '_', 'v', 'g', 't', 'w', '_', 'x', 'o', 'r', '_', '1', '2', '8', 'B', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'i', 'n', 's', 'e', 'r', 't', 'w', 'r',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'i', 'n', 's', 'e', 'r', 't', 'w',
  'r', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'l',
  'a', 'l', 'i', 'g', 'n', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'l',
  'a', 'l', 'i', 'g', 'n', 'b', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 'l', 'a', 'l', 'i', 'g', 'n', 'b', 'i', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'V', '6', '_', 'v', 'l', 'a', 'l', 'i', 'g', 'n', 'b', 'i', '_', '1',
  '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'l', 's', 'r', 'b',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'l', 's', 'r', 'b', '_', '1', '2',
  '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'l', 's', 'r', 'h', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'l', 's', 'r', 'h', '_', '1', '2', '8',
  'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'l', 's', 'r', 'h', 'v', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'l', 's', 'r', 'h', 'v', '_', '1', '2',
  '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'l', 's', 'r', 'w', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'l', 's', 'r', 'w', '_', '1', '2', '8',
  'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'l', 's', 'r', 'w', 'v', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'l', 's', 'r', 'w', 'v', '_', '1', '2',
  '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'l', 'u', 't', '4', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'l', 'u', 't', '4', '_', '1', '2', '8',
  'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'l', 'u', 't', 'v', 'v', 'b',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'l', 'u', 't', 'v', 'v', 'b', '_',
  '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'l', 'u', 't',
  'v', 'v', 'b', '_', 'n', 'm', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'l',
  'u', 't', 'v', 'v', 'b', '_', 'n', 'm', '_', '1', '2', '8', 'B', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 'l', 'u', 't', 'v', 'v', 'b', '_', 'o', 'r',
  'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'l', 'u', 't', 'v',
  'v', 'b', '_', 'o', 'r', 'a', 'c', 'c', '_', '1', '2', '8', 'B', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 'l', 'u', 't', 'v', 'v', 'b', '_', 'o', 'r',
  'a', 'c', 'c', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'l', 'u', 't',
  'v', 'v', 'b', '_', 'o', 'r', 'a', 'c', 'c', 'i', '_', '1', '2', '8', 'B',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'l', 'u', 't', 'v', 'v', 'b', 'i',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'l', 'u', 't', 'v', 'v', 'b', 'i',
  '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'l', 'u',
  't', 'v', 'w', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'l', 'u', 't',
  'v', 'w', 'h', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
  'v', 'l', 'u', 't', 'v', 'w', 'h', '_', 'n', 'm', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
  '6', '_', 'v', 'l', 'u', 't', 'v', 'w', 'h', '_', 'n', 'm', '_', '1', '2',
  '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'l', 'u', 't', 'v', 'w',
  'h', '_', 'o', 'r', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  'l', 'u', 't', 'v', 'w', 'h', '_', 'o', 'r', 'a', 'c', 'c', '_', '1', '2',
  '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'l', 'u', 't', 'v', 'w',
  'h', '_', 'o', 'r', 'a', 'c', 'c', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
  'v', 'l', 'u', 't', 'v', 'w', 'h', '_', 'o', 'r', 'a', 'c', 'c', 'i', '_',
  '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'l', 'u', 't',
  'v', 'w', 'h', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'l', 'u', 't',
  'v', 'w', 'h', 'i', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
  '_', 'v', 'm', 'a', 'x', '_', 'b', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
  'v', 'm', 'a', 'x', '_', 'b', 'f', '_', '1', '2', '8', 'B', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'V', '6', '_', 'v', 'm', 'a', 'x', '_', 'h', 'f', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 'm', 'a', 'x', '_', 'h', 'f', '_', '1', '2', '8', 'B',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'a', 'x', '_', 's', 'f', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'a', 'x', '_', 's', 'f', '_', '1',
  '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'a', 'x', 'b',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'a', 'x', 'b', '_', '1', '2',
  '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'a', 'x', 'h', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'a', 'x', 'h', '_', '1', '2', '8',
  'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'a', 'x', 'u', 'b', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'a', 'x', 'u', 'b', '_', '1', '2',
  '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'a', 'x', 'u', 'h',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'a', 'x', 'u', 'h', '_', '1',
  '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'a', 'x', 'w',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'a', 'x', 'w', '_', '1', '2',
  '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'i', 'n', '_', 'b',
  'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'i', 'n', '_', 'b', 'f',
  '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'i',
  'n', '_', 'h', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'i', 'n',
  '_', 'h', 'f', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
  'v', 'm', 'i', 'n', '_', 's', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  'm', 'i', 'n', '_', 's', 'f', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 'm', 'i', 'n', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
  'v', 'm', 'i', 'n', 'b', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
  '6', '_', 'v', 'm', 'i', 'n', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  'm', 'i', 'n', 'h', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
  '_', 'v', 'm', 'i', 'n', 'u', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  'm', 'i', 'n', 'u', 'b', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
  '6', '_', 'v', 'm', 'i', 'n', 'u', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
  'v', 'm', 'i', 'n', 'u', 'h', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 'm', 'i', 'n', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
  'v', 'm', 'i', 'n', 'w', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
  '6', '_', 'v', 'm', 'p', 'a', 'b', 'u', 's', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
  '_', 'v', 'm', 'p', 'a', 'b', 'u', 's', '_', '1', '2', '8', 'B', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'a', 'b', 'u', 's', '_', 'a', 'c',
  'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'a', 'b', 'u', 's',
  '_', 'a', 'c', 'c', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
  '_', 'v', 'm', 'p', 'a', 'b', 'u', 's', 'v', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
  '_', 'v', 'm', 'p', 'a', 'b', 'u', 's', 'v', '_', '1', '2', '8', 'B', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'a', 'b', 'u', 'u', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'a', 'b', 'u', 'u', '_', '1', '2',
  '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'a', 'b', 'u',
  'u', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p',
  'a', 'b', 'u', 'u', '_', 'a', 'c', 'c', '_', '1', '2', '8', 'B', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'a', 'b', 'u', 'u', 'v', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'a', 'b', 'u', 'u', 'v', '_', '1',
  '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'a', 'h',
  'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'a', 'h', 'b', '_',
  '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'a',
  'h', 'b', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm',
  'p', 'a', 'h', 'b', '_', 'a', 'c', 'c', '_', '1', '2', '8', 'B', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'a', 'h', 'h', 's', 'a', 't', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'a', 'h', 'h', 's', 'a', 't',
  '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p',
  'a', 'u', 'h', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'a',
  'u', 'h', 'b', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
  'v', 'm', 'p', 'a', 'u', 'h', 'b', '_', 'a', 'c', 'c', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 'm', 'p', 'a', 'u', 'h', 'b', '_', 'a', 'c', 'c', '_',
  '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'a',
  'u', 'h', 'u', 'h', 's', 'a', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  'm', 'p', 'a', 'u', 'h', 'u', 'h', 's', 'a', 't', '_', '1', '2', '8', 'B',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 's', 'u', 'h', 'u', 'h',
  's', 'a', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 's', 'u',
  'h', 'u', 'h', 's', 'a', 't', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 'm', 'p', 'y', '_', 'h', 'f', '_', 'h', 'f', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', '_', 'h', 'f', '_', 'h', 'f',
  '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p',
  'y', '_', 'h', 'f', '_', 'h', 'f', '_', 'a', 'c', 'c', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 'm', 'p', 'y', '_', 'h', 'f', '_', 'h', 'f', '_', 'a',
  'c', 'c', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  'm', 'p', 'y', '_', 'q', 'f', '1', '6', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
  'v', 'm', 'p', 'y', '_', 'q', 'f', '1', '6', '_', '1', '2', '8', 'B', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', '_', 'q', 'f', '1', '6',
  '_', 'h', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', '_',
  'q', 'f', '1', '6', '_', 'h', 'f', '_', '1', '2', '8', 'B', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'V', '6', '_', 'v', 'm', 'p', 'y', '_', 'q', 'f', '1', '6', '_', 'm',
  'i', 'x', '_', 'h', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p',
  'y', '_', 'q', 'f', '1', '6', '_', 'm', 'i', 'x', '_', 'h', 'f', '_', '1',
  '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', '_',
  'q', 'f', '3', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y',
  '_', 'q', 'f', '3', '2', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
  '6', '_', 'v', 'm', 'p', 'y', '_', 'q', 'f', '3', '2', '_', 'h', 'f', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', '_', 'q', 'f', '3', '2',
  '_', 'h', 'f', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
  'v', 'm', 'p', 'y', '_', 'q', 'f', '3', '2', '_', 'm', 'i', 'x', '_', 'h',
  'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', '_', 'q', 'f',
  '3', '2', '_', 'm', 'i', 'x', '_', 'h', 'f', '_', '1', '2', '8', 'B', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', '_', 'q', 'f', '3', '2',
  '_', 'q', 'f', '1', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p',
  'y', '_', 'q', 'f', '3', '2', '_', 'q', 'f', '1', '6', '_', '1', '2', '8',
  'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', '_', 'q', 'f',
  '3', '2', '_', 's', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p',
  'y', '_', 'q', 'f', '3', '2', '_', 's', 'f', '_', '1', '2', '8', 'B', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', '_', 's', 'f', '_', 'b',
  'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', '_', 's', 'f',
  '_', 'b', 'f', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
  'v', 'm', 'p', 'y', '_', 's', 'f', '_', 'b', 'f', '_', 'a', 'c', 'c', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', '_', 's', 'f', '_', 'b',
  'f', '_', 'a', 'c', 'c', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
  '6', '_', 'v', 'm', 'p', 'y', '_', 's', 'f', '_', 'h', 'f', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'V', '6', '_', 'v', 'm', 'p', 'y', '_', 's', 'f', '_', 'h', 'f', '_',
  '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y',
  '_', 's', 'f', '_', 'h', 'f', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
  '6', '_', 'v', 'm', 'p', 'y', '_', 's', 'f', '_', 'h', 'f', '_', 'a', 'c',
  'c', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm',
  'p', 'y', '_', 's', 'f', '_', 's', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
  'v', 'm', 'p', 'y', '_', 's', 'f', '_', 's', 'f', '_', '1', '2', '8', 'B',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'b', 'u', 's', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'b', 'u', 's', '_', '1',
  '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'b',
  'u', 's', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm',
  'p', 'y', 'b', 'u', 's', '_', 'a', 'c', 'c', '_', '1', '2', '8', 'B', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'b', 'u', 's', 'v', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'b', 'u', 's', 'v', '_',
  '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y',
  'b', 'u', 's', 'v', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
  'v', 'm', 'p', 'y', 'b', 'u', 's', 'v', '_', 'a', 'c', 'c', '_', '1', '2',
  '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'b', 'v',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'b', 'v', '_', '1',
  '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'b',
  'v', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p',
  'y', 'b', 'v', '_', 'a', 'c', 'c', '_', '1', '2', '8', 'B', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'e', 'w', 'u', 'h', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'e', 'w', 'u', 'h', '_', '1', '2',
  '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'e', 'w',
  'u', 'h', '_', '6', '4', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p',
  'y', 'e', 'w', 'u', 'h', '_', '6', '4', '_', '1', '2', '8', 'B', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'h', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
  '6', '_', 'v', 'm', 'p', 'y', 'h', '_', '1', '2', '8', 'B', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'h', '_', 'a', 'c', 'c', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'h', '_', 'a', 'c', 'c', '_',
  '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y',
  'h', 's', 'a', 't', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
  'v', 'm', 'p', 'y', 'h', 's', 'a', 't', '_', 'a', 'c', 'c', '_', '1', '2',
  '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'h', 's',
  'r', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'h', 's',
  'r', 's', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  'm', 'p', 'y', 'h', 's', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm',
  'p', 'y', 'h', 's', 's', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
  '6', '_', 'v', 'm', 'p', 'y', 'h', 'u', 's', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
  '_', 'v', 'm', 'p', 'y', 'h', 'u', 's', '_', '1', '2', '8', 'B', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'h', 'u', 's', '_', 'a', 'c',
  'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'h', 'u', 's',
  '_', 'a', 'c', 'c', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
  '_', 'v', 'm', 'p', 'y', 'h', 'v', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  'm', 'p', 'y', 'h', 'v', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
  '6', '_', 'v', 'm', 'p', 'y', 'h', 'v', '_', 'a', 'c', 'c', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'h', 'v', '_', 'a', 'c', 'c', '_',
  '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y',
  'h', 'v', 's', 'r', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p',
  'y', 'h', 'v', 's', 'r', 's', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 'm', 'p', 'y', 'i', 'e', 'o', 'h', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 'm', 'p', 'y', 'i', 'e', 'o', 'h', '_', '1', '2', '8',
  'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'i', 'e', 'w',
  'h', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p',
  'y', 'i', 'e', 'w', 'h', '_', 'a', 'c', 'c', '_', '1', '2', '8', 'B', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'i', 'e', 'w', 'u', 'h',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'i', 'e', 'w', 'u',
  'h', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm',
  'p', 'y', 'i', 'e', 'w', 'u', 'h', '_', 'a', 'c', 'c', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 'm', 'p', 'y', 'i', 'e', 'w', 'u', 'h', '_', 'a', 'c',
  'c', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm',
  'p', 'y', 'i', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y',
  'i', 'h', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  'm', 'p', 'y', 'i', 'h', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
  '_', 'v', 'm', 'p', 'y', 'i', 'h', '_', 'a', 'c', 'c', '_', '1', '2', '8',
  'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'i', 'h', 'b',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'i', 'h', 'b', '_',
  '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y',
  'i', 'h', 'b', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  'm', 'p', 'y', 'i', 'h', 'b', '_', 'a', 'c', 'c', '_', '1', '2', '8', 'B',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'i', 'o', 'w', 'h',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'i', 'o', 'w', 'h',
  '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p',
  'y', 'i', 'w', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y',
  'i', 'w', 'b', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
  'v', 'm', 'p', 'y', 'i', 'w', 'b', '_', 'a', 'c', 'c', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 'm', 'p', 'y', 'i', 'w', 'b', '_', 'a', 'c', 'c', '_',
  '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y',
  'i', 'w', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'i',
  'w', 'h', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  'm', 'p', 'y', 'i', 'w', 'h', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
  '6', '_', 'v', 'm', 'p', 'y', 'i', 'w', 'h', '_', 'a', 'c', 'c', '_', '1',
  '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'i',
  'w', 'u', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'i',
  'w', 'u', 'b', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
  'v', 'm', 'p', 'y', 'i', 'w', 'u', 'b', '_', 'a', 'c', 'c', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'i', 'w', 'u', 'b', '_', 'a', 'c',
  'c', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm',
  'p', 'y', 'o', 'w', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p',
  'y', 'o', 'w', 'h', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
  '_', 'v', 'm', 'p', 'y', 'o', 'w', 'h', '_', '6', '4', '_', 'a', 'c', 'c',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'o', 'w', 'h', '_',
  '6', '4', '_', 'a', 'c', 'c', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 'm', 'p', 'y', 'o', 'w', 'h', '_', 'r', 'n', 'd', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'o', 'w', 'h', '_', 'r',
  'n', 'd', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  'm', 'p', 'y', 'o', 'w', 'h', '_', 'r', 'n', 'd', '_', 's', 'a', 'c', 'c',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'o', 'w', 'h', '_',
  'r', 'n', 'd', '_', 's', 'a', 'c', 'c', '_', '1', '2', '8', 'B', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'o', 'w', 'h', '_', 's', 'a',
  'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'o', 'w',
  'h', '_', 's', 'a', 'c', 'c', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 'm', 'p', 'y', 'u', 'b', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
  '_', 'v', 'm', 'p', 'y', 'u', 'b', '_', '1', '2', '8', 'B', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'u', 'b', '_', 'a', 'c', 'c', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'u', 'b', '_', 'a', 'c',
  'c', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm',
  'p', 'y', 'u', 'b', 'v', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p',
  'y', 'u', 'b', 'v', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
  '_', 'v', 'm', 'p', 'y', 'u', 'b', 'v', '_', 'a', 'c', 'c', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'u', 'b', 'v', '_', 'a', 'c', 'c',
  '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p',
  'y', 'u', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'u',
  'h', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm',
  'p', 'y', 'u', 'h', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
  'v', 'm', 'p', 'y', 'u', 'h', '_', 'a', 'c', 'c', '_', '1', '2', '8', 'B',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'u', 'h', 'e', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'u', 'h', 'e', '_', '1',
  '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'u',
  'h', 'e', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm',
  'p', 'y', 'u', 'h', 'e', '_', 'a', 'c', 'c', '_', '1', '2', '8', 'B', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'u', 'h', 'v', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'u', 'h', 'v', '_', '1', '2',
  '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'u', 'h',
  'v', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'p',
  'y', 'u', 'h', 'v', '_', 'a', 'c', 'c', '_', '1', '2', '8', 'B', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'u', 'h', 'v', 's', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 'm', 'p', 'y', 'u', 'h', 'v', 's', '_', '1',
  '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'u', 'x', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'm', 'u', 'x', '_', '1', '2', '8', 'B',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'n', 'a', 'v', 'g', 'b', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 'n', 'a', 'v', 'g', 'b', '_', '1', '2', '8',
  'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'n', 'a', 'v', 'g', 'h', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'n', 'a', 'v', 'g', 'h', '_', '1', '2',
  '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'n', 'a', 'v', 'g', 'u',
  'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'n', 'a', 'v', 'g', 'u', 'b',
  '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'n', 'a',
  'v', 'g', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'n', 'a', 'v', 'g',
  'w', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'n',
  'o', 'r', 'm', 'a', 'm', 't', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  'n', 'o', 'r', 'm', 'a', 'm', 't', 'h', '_', '1', '2', '8', 'B', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 'n', 'o', 'r', 'm', 'a', 'm', 't', 'w', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'n', 'o', 'r', 'm', 'a', 'm', 't', 'w',
  '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'n', 'o',
  't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'n', 'o', 't', '_', '1', '2',
  '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'o', 'r', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'V', '6', '_', 'v', 'o', 'r', '_', '1', '2', '8', 'B', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'V', '6', '_', 'v', 'p', 'a', 'c', 'k', 'e', 'b', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 'p', 'a', 'c', 'k', 'e', 'b', '_', '1', '2', '8', 'B',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'p', 'a', 'c', 'k', 'e', 'h', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'p', 'a', 'c', 'k', 'e', 'h', '_', '1',
  '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'p', 'a', 'c', 'k',
  'h', 'b', '_', 's', 'a', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'p',
  'a', 'c', 'k', 'h', 'b', '_', 's', 'a', 't', '_', '1', '2', '8', 'B', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'p', 'a', 'c', 'k', 'h', 'u', 'b', '_',
  's', 'a', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'p', 'a', 'c', 'k',
  'h', 'u', 'b', '_', 's', 'a', 't', '_', '1', '2', '8', 'B', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'V', '6', '_', 'v', 'p', 'a', 'c', 'k', 'o', 'b', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 'p', 'a', 'c', 'k', 'o', 'b', '_', '1', '2', '8', 'B',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'p', 'a', 'c', 'k', 'o', 'h', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'p', 'a', 'c', 'k', 'o', 'h', '_', '1',
  '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'p', 'a', 'c', 'k',
  'w', 'h', '_', 's', 'a', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'p',
  'a', 'c', 'k', 'w', 'h', '_', 's', 'a', 't', '_', '1', '2', '8', 'B', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'p', 'a', 'c', 'k', 'w', 'u', 'h', '_',
  's', 'a', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'p', 'a', 'c', 'k',
  'w', 'u', 'h', '_', 's', 'a', 't', '_', '1', '2', '8', 'B', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'V', '6', '_', 'v', 'p', 'o', 'p', 'c', 'o', 'u', 'n', 't', 'h', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'p', 'o', 'p', 'c', 'o', 'u', 'n', 't',
  'h', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'p',
  'r', 'e', 'f', 'i', 'x', 'q', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  'p', 'r', 'e', 'f', 'i', 'x', 'q', 'b', '_', '1', '2', '8', 'B', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 'p', 'r', 'e', 'f', 'i', 'x', 'q', 'h', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'p', 'r', 'e', 'f', 'i', 'x', 'q', 'h',
  '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'p', 'r',
  'e', 'f', 'i', 'x', 'q', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'p',
  'r', 'e', 'f', 'i', 'x', 'q', 'w', '_', '1', '2', '8', 'B', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'V', '6', '_', 'v', 'r', 'd', 'e', 'l', 't', 'a', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 'r', 'd', 'e', 'l', 't', 'a', '_', '1', '2', '8', 'B',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'r', 'm', 'p', 'y', 'b', 'u', 'b',
  '_', 'r', 't', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'r', 'm', 'p',
  'y', 'b', 'u', 'b', '_', 'r', 't', 't', '_', '1', '2', '8', 'B', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 'r', 'm', 'p', 'y', 'b', 'u', 'b', '_', 'r',
  't', 't', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'r',
  'm', 'p', 'y', 'b', 'u', 'b', '_', 'r', 't', 't', '_', 'a', 'c', 'c', '_',
  '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'r', 'm', 'p',
  'y', 'b', 'u', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'r', 'm', 'p',
  'y', 'b', 'u', 's', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
  '_', 'v', 'r', 'm', 'p', 'y', 'b', 'u', 's', '_', 'a', 'c', 'c', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 'r', 'm', 'p', 'y', 'b', 'u', 's', '_', 'a',
  'c', 'c', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  'r', 'm', 'p', 'y', 'b', 'u', 's', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
  'v', 'r', 'm', 'p', 'y', 'b', 'u', 's', 'i', '_', '1', '2', '8', 'B', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'r', 'm', 'p', 'y', 'b', 'u', 's', 'i',
  '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'r', 'm', 'p',
  'y', 'b', 'u', 's', 'i', '_', 'a', 'c', 'c', '_', '1', '2', '8', 'B', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'r', 'm', 'p', 'y', 'b', 'u', 's', 'v',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'r', 'm', 'p', 'y', 'b', 'u', 's',
  'v', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'r',
  'm', 'p', 'y', 'b', 'u', 's', 'v', '_', 'a', 'c', 'c', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 'r', 'm', 'p', 'y', 'b', 'u', 's', 'v', '_', 'a', 'c',
  'c', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'r',
  'm', 'p', 'y', 'b', 'v', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'r', 'm',
  'p', 'y', 'b', 'v', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
  '_', 'v', 'r', 'm', 'p', 'y', 'b', 'v', '_', 'a', 'c', 'c', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'V', '6', '_', 'v', 'r', 'm', 'p', 'y', 'b', 'v', '_', 'a', 'c', 'c',
  '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'r', 'm',
  'p', 'y', 'u', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'r', 'm', 'p',
  'y', 'u', 'b', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
  'v', 'r', 'm', 'p', 'y', 'u', 'b', '_', 'a', 'c', 'c', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 'r', 'm', 'p', 'y', 'u', 'b', '_', 'a', 'c', 'c', '_',
  '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'r', 'm', 'p',
  'y', 'u', 'b', '_', 'r', 't', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  'r', 'm', 'p', 'y', 'u', 'b', '_', 'r', 't', 't', '_', '1', '2', '8', 'B',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'r', 'm', 'p', 'y', 'u', 'b', '_',
  'r', 't', 't', '_', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  'r', 'm', 'p', 'y', 'u', 'b', '_', 'r', 't', 't', '_', 'a', 'c', 'c', '_',
  '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'r', 'm', 'p',
  'y', 'u', 'b', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'r', 'm', 'p',
  'y', 'u', 'b', 'i', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
  '_', 'v', 'r', 'm', 'p', 'y', 'u', 'b', 'i', '_', 'a', 'c', 'c', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 'r', 'm', 'p', 'y', 'u', 'b', 'i', '_', 'a',
  'c', 'c', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  'r', 'm', 'p', 'y', 'u', 'b', 'v', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  'r', 'm', 'p', 'y', 'u', 'b', 'v', '_', '1', '2', '8', 'B', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'V', '6', '_', 'v', 'r', 'm', 'p', 'y', 'u', 'b', 'v', '_', 'a', 'c',
  'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'r', 'm', 'p', 'y', 'u', 'b',
  'v', '_', 'a', 'c', 'c', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
  '6', '_', 'v', 'r', 'o', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'r',
  'o', 'r', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  'r', 'o', 't', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'r', 'o', 't',
  'r', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'r',
  'o', 'u', 'n', 'd', 'h', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'r',
  'o', 'u', 'n', 'd', 'h', 'b', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 'r', 'o', 'u', 'n', 'd', 'h', 'u', 'b', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'V', '6', '_', 'v', 'r', 'o', 'u', 'n', 'd', 'h', 'u', 'b', '_', '1',
  '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'r', 'o', 'u', 'n',
  'd', 'u', 'h', 'u', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'r', 'o',
  'u', 'n', 'd', 'u', 'h', 'u', 'b', '_', '1', '2', '8', 'B', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'V', '6', '_', 'v', 'r', 'o', 'u', 'n', 'd', 'u', 'w', 'u', 'h', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 'r', 'o', 'u', 'n', 'd', 'u', 'w', 'u',
  'h', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'r',
  'o', 'u', 'n', 'd', 'w', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'r',
  'o', 'u', 'n', 'd', 'w', 'h', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 'r', 'o', 'u', 'n', 'd', 'w', 'u', 'h', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'V', '6', '_', 'v', 'r', 'o', 'u', 'n', 'd', 'w', 'u', 'h', '_', '1',
  '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'r', 's', 'a', 'd',
  'u', 'b', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'r', 's', 'a', 'd',
  'u', 'b', 'i', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
  'v', 'r', 's', 'a', 'd', 'u', 'b', 'i', '_', 'a', 'c', 'c', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'V', '6', '_', 'v', 'r', 's', 'a', 'd', 'u', 'b', 'i', '_', 'a', 'c',
  'c', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's',
  'a', 't', 'd', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'a', 't',
  'd', 'w', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  's', 'a', 't', 'h', 'u', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's',
  'a', 't', 'h', 'u', 'b', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
  '6', '_', 'v', 's', 'a', 't', 'u', 'w', 'u', 'h', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
  '6', '_', 'v', 's', 'a', 't', 'u', 'w', 'u', 'h', '_', '1', '2', '8', 'B',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'a', 't', 'w', 'h', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 's', 'a', 't', 'w', 'h', '_', '1', '2', '8',
  'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'b', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 's', 'b', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 's', 'c', 'a', 't', 't', 'e', 'r', 'm', 'h', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 's', 'c', 'a', 't', 't', 'e', 'r', 'm', 'h',
  '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'c',
  'a', 't', 't', 'e', 'r', 'm', 'h', '_', 'a', 'd', 'd', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 's', 'c', 'a', 't', 't', 'e', 'r', 'm', 'h', '_', 'a',
  'd', 'd', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  's', 'c', 'a', 't', 't', 'e', 'r', 'm', 'h', 'q', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
  '6', '_', 'v', 's', 'c', 'a', 't', 't', 'e', 'r', 'm', 'h', 'q', '_', '1',
  '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'c', 'a', 't',
  't', 'e', 'r', 'm', 'h', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's',
  'c', 'a', 't', 't', 'e', 'r', 'm', 'h', 'w', '_', '1', '2', '8', 'B', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 's', 'c', 'a', 't', 't', 'e', 'r', 'm',
  'h', 'w', '_', 'a', 'd', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's',
  'c', 'a', 't', 't', 'e', 'r', 'm', 'h', 'w', '_', 'a', 'd', 'd', '_', '1',
  '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'c', 'a', 't',
  't', 'e', 'r', 'm', 'h', 'w', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  's', 'c', 'a', 't', 't', 'e', 'r', 'm', 'h', 'w', 'q', '_', '1', '2', '8',
  'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'c', 'a', 't', 't', 'e',
  'r', 'm', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'c', 'a', 't',
  't', 'e', 'r', 'm', 'w', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
  '6', '_', 'v', 's', 'c', 'a', 't', 't', 'e', 'r', 'm', 'w', '_', 'a', 'd',
  'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'c', 'a', 't', 't', 'e',
  'r', 'm', 'w', '_', 'a', 'd', 'd', '_', '1', '2', '8', 'B', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'V', '6', '_', 'v', 's', 'c', 'a', 't', 't', 'e', 'r', 'm', 'w', 'q',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'c', 'a', 't', 't', 'e', 'r',
  'm', 'w', 'q', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
  'v', 's', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'h', '_', '1',
  '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'h', 'u', 'f',
  'e', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'h', 'u', 'f', 'e',
  'h', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's',
  'h', 'u', 'f', 'f', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'h',
  'u', 'f', 'f', 'b', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
  '_', 'v', 's', 'h', 'u', 'f', 'f', 'e', 'b', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
  '_', 'v', 's', 'h', 'u', 'f', 'f', 'e', 'b', '_', '1', '2', '8', 'B', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 's', 'h', 'u', 'f', 'f', 'h', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 's', 'h', 'u', 'f', 'f', 'h', '_', '1', '2',
  '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'h', 'u', 'f', 'f',
  'o', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'h', 'u', 'f', 'f',
  'o', 'b', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  's', 'h', 'u', 'f', 'f', 'v', 'd', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
  'v', 's', 'h', 'u', 'f', 'f', 'v', 'd', 'd', '_', '1', '2', '8', 'B', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 's', 'h', 'u', 'f', 'o', 'e', 'b', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 's', 'h', 'u', 'f', 'o', 'e', 'b', '_',
  '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'h', 'u',
  'f', 'o', 'e', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'h', 'u',
  'f', 'o', 'e', 'h', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
  '_', 'v', 's', 'h', 'u', 'f', 'o', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
  'v', 's', 'h', 'u', 'f', 'o', 'h', '_', '1', '2', '8', 'B', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'V', '6', '_', 'v', 's', 'u', 'b', '_', 'h', 'f', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 's', 'u', 'b', '_', 'h', 'f', '_', '1', '2', '8', 'B',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'u', 'b', '_', 'h', 'f', '_',
  'h', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'u', 'b', '_', 'h',
  'f', '_', 'h', 'f', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
  '_', 'v', 's', 'u', 'b', '_', 'q', 'f', '1', '6', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
  '6', '_', 'v', 's', 'u', 'b', '_', 'q', 'f', '1', '6', '_', '1', '2', '8',
  'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'u', 'b', '_', 'q', 'f',
  '1', '6', '_', 'm', 'i', 'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's',
  'u', 'b', '_', 'q', 'f', '1', '6', '_', 'm', 'i', 'x', '_', '1', '2', '8',
  'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'u', 'b', '_', 'q', 'f',
  '3', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'u', 'b', '_', 'q',
  'f', '3', '2', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
  'v', 's', 'u', 'b', '_', 'q', 'f', '3', '2', '_', 'm', 'i', 'x', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 's', 'u', 'b', '_', 'q', 'f', '3', '2', '_',
  'm', 'i', 'x', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
  'v', 's', 'u', 'b', '_', 's', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  's', 'u', 'b', '_', 's', 'f', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 's', 'u', 'b', '_', 's', 'f', '_', 'b', 'f', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 's', 'u', 'b', '_', 's', 'f', '_', 'b', 'f',
  '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'u',
  'b', '_', 's', 'f', '_', 'h', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  's', 'u', 'b', '_', 's', 'f', '_', 'h', 'f', '_', '1', '2', '8', 'B', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 's', 'u', 'b', '_', 's', 'f', '_', 's',
  'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'u', 'b', '_', 's', 'f',
  '_', 's', 'f', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
  'v', 's', 'u', 'b', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'u',
  'b', 'b', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  's', 'u', 'b', 'b', '_', 'd', 'v', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  's', 'u', 'b', 'b', '_', 'd', 'v', '_', '1', '2', '8', 'B', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'V', '6', '_', 'v', 's', 'u', 'b', 'b', 'n', 'q', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 's', 'u', 'b', 'b', 'n', 'q', '_', '1', '2', '8', 'B',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'u', 'b', 'b', 'q', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 's', 'u', 'b', 'b', 'q', '_', '1', '2', '8',
  'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'u', 'b', 'b', 's', 'a',
  't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'u', 'b', 'b', 's', 'a',
  't', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's',
  'u', 'b', 'b', 's', 'a', 't', '_', 'd', 'v', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
  '_', 'v', 's', 'u', 'b', 'b', 's', 'a', 't', '_', 'd', 'v', '_', '1', '2',
  '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'u', 'b', 'h', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 's', 'u', 'b', 'h', '_', '1', '2', '8',
  'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'u', 'b', 'h', '_', 'd',
  'v', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'u', 'b', 'h', '_', 'd',
  'v', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's',
  'u', 'b', 'h', 'n', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'u',
  'b', 'h', 'n', 'q', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
  '_', 'v', 's', 'u', 'b', 'h', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  's', 'u', 'b', 'h', 'q', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
  '6', '_', 'v', 's', 'u', 'b', 'h', 's', 'a', 't', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
  '6', '_', 'v', 's', 'u', 'b', 'h', 's', 'a', 't', '_', '1', '2', '8', 'B',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'u', 'b', 'h', 's', 'a', 't',
  '_', 'd', 'v', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'u', 'b', 'h',
  's', 'a', 't', '_', 'd', 'v', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 's', 'u', 'b', 'h', 'w', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
  '_', 'v', 's', 'u', 'b', 'h', 'w', '_', '1', '2', '8', 'B', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'V', '6', '_', 'v', 's', 'u', 'b', 'u', 'b', 'h', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 's', 'u', 'b', 'u', 'b', 'h', '_', '1', '2', '8', 'B',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'u', 'b', 'u', 'b', 's', 'a',
  't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'u', 'b', 'u', 'b', 's',
  'a', 't', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  's', 'u', 'b', 'u', 'b', 's', 'a', 't', '_', 'd', 'v', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 's', 'u', 'b', 'u', 'b', 's', 'a', 't', '_', 'd', 'v',
  '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'u',
  'b', 'u', 'b', 'u', 'b', 'b', '_', 's', 'a', 't', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
  '6', '_', 'v', 's', 'u', 'b', 'u', 'b', 'u', 'b', 'b', '_', 's', 'a', 't',
  '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'u',
  'b', 'u', 'h', 's', 'a', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's',
  'u', 'b', 'u', 'h', 's', 'a', 't', '_', '1', '2', '8', 'B', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'V', '6', '_', 'v', 's', 'u', 'b', 'u', 'h', 's', 'a', 't', '_', 'd',
  'v', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'u', 'b', 'u', 'h', 's',
  'a', 't', '_', 'd', 'v', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
  '6', '_', 'v', 's', 'u', 'b', 'u', 'h', 'w', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
  '_', 'v', 's', 'u', 'b', 'u', 'h', 'w', '_', '1', '2', '8', 'B', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 's', 'u', 'b', 'u', 'w', 's', 'a', 't', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 's', 'u', 'b', 'u', 'w', 's', 'a', 't',
  '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'u',
  'b', 'u', 'w', 's', 'a', 't', '_', 'd', 'v', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
  '_', 'v', 's', 'u', 'b', 'u', 'w', 's', 'a', 't', '_', 'd', 'v', '_', '1',
  '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'u', 'b', 'w',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'u', 'b', 'w', '_', '1', '2',
  '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'u', 'b', 'w', '_',
  'd', 'v', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'u', 'b', 'w', '_',
  'd', 'v', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v',
  's', 'u', 'b', 'w', 'n', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's',
  'u', 'b', 'w', 'n', 'q', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
  '6', '_', 'v', 's', 'u', 'b', 'w', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
  'v', 's', 'u', 'b', 'w', 'q', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 's', 'u', 'b', 'w', 's', 'a', 't', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 's', 'u', 'b', 'w', 's', 'a', 't', '_', '1', '2', '8',
  'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'u', 'b', 'w', 's', 'a',
  't', '_', 'd', 'v', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 's', 'u', 'b',
  'w', 's', 'a', 't', '_', 'd', 'v', '_', '1', '2', '8', 'B', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'V', '6', '_', 'v', 's', 'w', 'a', 'p', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
  '_', 'v', 's', 'w', 'a', 'p', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 't', 'm', 'p', 'y', 'b', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
  '_', 'v', 't', 'm', 'p', 'y', 'b', '_', '1', '2', '8', 'B', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'V', '6', '_', 'v', 't', 'm', 'p', 'y', 'b', '_', 'a', 'c', 'c', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G',
  'O', 'N', '_', 'V', '6', '_', 'v', 't', 'm', 'p', 'y', 'b', '_', 'a', 'c',
  'c', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 't',
  'm', 'p', 'y', 'b', 'u', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 't',
  'm', 'p', 'y', 'b', 'u', 's', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 't', 'm', 'p', 'y', 'b', 'u', 's', '_', 'a', 'c', 'c',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 't', 'm', 'p', 'y', 'b', 'u', 's',
  '_', 'a', 'c', 'c', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
  '_', 'v', 't', 'm', 'p', 'y', 'h', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
  'v', 't', 'm', 'p', 'y', 'h', 'b', '_', '1', '2', '8', 'B', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'V', '6', '_', 'v', 't', 'm', 'p', 'y', 'h', 'b', '_', 'a', 'c', 'c',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'V', '6', '_', 'v', 't', 'm', 'p', 'y', 'h', 'b', '_',
  'a', 'c', 'c', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
  'v', 'u', 'n', 'p', 'a', 'c', 'k', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
  'v', 'u', 'n', 'p', 'a', 'c', 'k', 'b', '_', '1', '2', '8', 'B', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 'u', 'n', 'p', 'a', 'c', 'k', 'h', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 'u', 'n', 'p', 'a', 'c', 'k', 'h', '_', '1',
  '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'u', 'n', 'p', 'a',
  'c', 'k', 'o', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'u', 'n', 'p',
  'a', 'c', 'k', 'o', 'b', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
  '6', '_', 'v', 'u', 'n', 'p', 'a', 'c', 'k', 'o', 'h', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_',
  'V', '6', '_', 'v', 'u', 'n', 'p', 'a', 'c', 'k', 'o', 'h', '_', '1', '2',
  '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'u', 'n', 'p', 'a', 'c',
  'k', 'u', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'u', 'n', 'p', 'a',
  'c', 'k', 'u', 'b', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6',
  '_', 'v', 'u', 'n', 'p', 'a', 'c', 'k', 'u', 'h', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V',
  '6', '_', 'v', 'u', 'n', 'p', 'a', 'c', 'k', 'u', 'h', '_', '1', '2', '8',
  'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'x', 'o', 'r', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'V', '6', '_', 'v', 'x', 'o', 'r', '_', '1', '2', '8', 'B', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'V', '6', '_', 'v', 'z', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
  'v', 'z', 'b', '_', '1', '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_',
  'v', 'z', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'V', '6', '_', 'v', 'z', 'h', '_', '1',
  '2', '8', 'B', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'Y', '2', '_', 'd', 'c', 'c', 'l', 'e',
  'a', 'n', 'a', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'Y', '2', '_', 'd', 'c', 'c', 'l', 'e',
  'a', 'n', 'i', 'n', 'v', 'a', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'Y', '2', '_', 'd', 'c',
  'f', 'e', 't', 'c', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'Y', '2', '_', 'd', 'c', 'i',
  'n', 'v', 'a', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H',
  'E', 'X', 'A', 'G', 'O', 'N', '_', 'Y', '2', '_', 'd', 'c', 'z', 'e', 'r',
  'o', 'a', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E',
  'X', 'A', 'G', 'O', 'N', '_', 'Y', '4', '_', 'l', '2', 'f', 'e', 't', 'c',
  'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X',
  'A', 'G', 'O', 'N', '_', 'Y', '5', '_', 'l', '2', 'f', 'e', 't', 'c', 'h',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A',
  'G', 'O', 'N', '_', 'Y', '6', '_', 'd', 'm', 'l', 'i', 'n', 'k', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O',
  'N', '_', 'Y', '6', '_', 'd', 'm', 'p', 'a', 'u', 's', 'e', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N',
  '_', 'Y', '6', '_', 'd', 'm', 'p', 'o', 'l', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'Y',
  '6', '_', 'd', 'm', 'r', 'e', 's', 'u', 'm', 'e', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'Y',
  '6', '_', 'd', 'm', 's', 't', 'a', 'r', 't', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'Y', '6',
  '_', 'd', 'm', 'w', 'a', 'i', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'c', 'i', 'r', 'c', '_', 'l', 'd', 'b', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'c', 'i', 'r', 'c', '_', 'l', 'd', 'd',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'c', 'i', 'r', 'c',
  '_', 'l', 'd', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'c', 'i', 'r', 'c', '_', 'l', 'd', 'u', 'b', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'c', 'i', 'r', 'c', '_', 'l', 'd', 'u', 'h', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'c', 'i', 'r', 'c', '_',
  'l', 'd', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'c',
  'i', 'r', 'c', '_', 's', 't', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'c', 'i', 'r', 'c', '_', 's', 't', 'd', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'c', 'i', 'r', 'c', '_', 's', 't', 'h',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'c', 'i', 'r', 'c',
  '_', 's', 't', 'h', 'h', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'c', 'i', 'r', 'c', '_', 's', 't', 'w', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'H', 'E', 'X', 'A', 'G', 'O', 'N', '_', 'p',
  'r', 'e', 'f', 'e', 't', 'c', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'h', 'e', 'x', 'a', 'g', 'o', 'n', '_', 'v', 'm', 'e', 'm',
  'c', 'p', 'y', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'h',
  'e', 'x', 'a', 'g', 'o', 'n', '_', 'v', 'm', 'e', 'm', 's', 'e', 't', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_',
  'a', 'b', 's', 'q', '_', 's', '_', 'p', 'h', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'a', 'b', 's', 'q', '_',
  's', '_', 'q', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'm', 'i', 'p', 's', '_', 'a', 'b', 's', 'q', '_', 's', '_', 'w', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'a', 'd',
  'd', '_', 'a', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'm', 's', 'a', '_', 'a', 'd', 'd', '_', 'a', '_', 'd', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'a', 'd', 'd',
  '_', 'a', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'm', 's', 'a', '_', 'a', 'd', 'd', '_', 'a', '_', 'w', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'a', 'd', 'd',
  'q', '_', 'p', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'm', 'i', 'p', 's', '_', 'a', 'd', 'd', 'q', '_', 's', '_', 'p', 'h', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_',
  'a', 'd', 'd', 'q', '_', 's', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'a', 'd', 'd', 'q', 'h', '_',
  'p', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i',
  'p', 's', '_', 'a', 'd', 'd', 'q', 'h', '_', 'r', '_', 'p', 'h', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'a',
  'd', 'd', 'q', 'h', '_', 'r', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'a', 'd', 'd', 'q', 'h', '_',
  'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
  '_', 'a', 'd', 'd', 's', '_', 'a', '_', 'b', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'a', 'd', 'd', 's', '_', 'a',
  '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
  'a', '_', 'a', 'd', 'd', 's', '_', 'a', '_', 'h', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'a', 'd', 'd', 's', '_',
  'a', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
  's', 'a', '_', 'a', 'd', 'd', 's', '_', 's', '_', 'b', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'a', 'd', 'd', 's',
  '_', 's', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'm', 's', 'a', '_', 'a', 'd', 'd', 's', '_', 's', '_', 'h', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'a', 'd', 'd',
  's', '_', 's', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'm', 's', 'a', '_', 'a', 'd', 'd', 's', '_', 'u', '_', 'b', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'a', 'd',
  'd', 's', '_', 'u', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'm', 's', 'a', '_', 'a', 'd', 'd', 's', '_', 'u', '_', 'h', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'a',
  'd', 'd', 's', '_', 'u', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'a', 'd', 'd', 's', 'c', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'a',
  'd', 'd', 'u', '_', 'p', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'm', 'i', 'p', 's', '_', 'a', 'd', 'd', 'u', '_', 'q', 'b', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_',
  'a', 'd', 'd', 'u', '_', 's', '_', 'p', 'h', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'a', 'd', 'd', 'u', '_',
  's', '_', 'q', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'm', 'i', 'p', 's', '_', 'a', 'd', 'd', 'u', 'h', '_', 'q', 'b', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'a',
  'd', 'd', 'u', 'h', '_', 'r', '_', 'q', 'b', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'a', 'd', 'd', 'v', '_', 'b',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
  'a', 'd', 'd', 'v', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'm', 's', 'a', '_', 'a', 'd', 'd', 'v', '_', 'h', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'a', 'd', 'd',
  'v', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
  's', 'a', '_', 'a', 'd', 'd', 'v', 'i', '_', 'b', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'a', 'd', 'd', 'v', 'i',
  '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
  'a', '_', 'a', 'd', 'd', 'v', 'i', '_', 'h', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'a', 'd', 'd', 'v', 'i', '_',
  'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p',
  's', '_', 'a', 'd', 'd', 'w', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'm', 's', 'a', '_', 'a', 'n', 'd', '_', 'v', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'a', 'n', 'd',
  'i', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
  'i', 'p', 's', '_', 'a', 'p', 'p', 'e', 'n', 'd', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'a', 's', 'u', 'b', '_',
  's', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
  's', 'a', '_', 'a', 's', 'u', 'b', '_', 's', '_', 'd', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'a', 's', 'u', 'b',
  '_', 's', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'm', 's', 'a', '_', 'a', 's', 'u', 'b', '_', 's', '_', 'w', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'a', 's', 'u',
  'b', '_', 'u', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'm', 's', 'a', '_', 'a', 's', 'u', 'b', '_', 'u', '_', 'd', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'a', 's',
  'u', 'b', '_', 'u', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'm', 's', 'a', '_', 'a', 's', 'u', 'b', '_', 'u', '_', 'w', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'a',
  'v', 'e', '_', 's', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'm', 's', 'a', '_', 'a', 'v', 'e', '_', 's', '_', 'd', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'a', 'v',
  'e', '_', 's', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'm', 's', 'a', '_', 'a', 'v', 'e', '_', 's', '_', 'w', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'a', 'v', 'e',
  '_', 'u', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'm', 's', 'a', '_', 'a', 'v', 'e', '_', 'u', '_', 'd', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'a', 'v', 'e', '_',
  'u', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
  's', 'a', '_', 'a', 'v', 'e', '_', 'u', '_', 'w', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'a', 'v', 'e', 'r', '_',
  's', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
  's', 'a', '_', 'a', 'v', 'e', 'r', '_', 's', '_', 'd', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'a', 'v', 'e', 'r',
  '_', 's', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'm', 's', 'a', '_', 'a', 'v', 'e', 'r', '_', 's', '_', 'w', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'a', 'v', 'e',
  'r', '_', 'u', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'm', 's', 'a', '_', 'a', 'v', 'e', 'r', '_', 'u', '_', 'd', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'a', 'v',
  'e', 'r', '_', 'u', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'm', 's', 'a', '_', 'a', 'v', 'e', 'r', '_', 'u', '_', 'w', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_',
  'b', 'a', 'l', 'i', 'g', 'n', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'm', 's', 'a', '_', 'b', 'c', 'l', 'r', '_', 'b', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'b', 'c', 'l',
  'r', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
  's', 'a', '_', 'b', 'c', 'l', 'r', '_', 'h', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'b', 'c', 'l', 'r', '_', 'w',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
  'b', 'c', 'l', 'r', 'i', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'm', 's', 'a', '_', 'b', 'c', 'l', 'r', 'i', '_', 'd', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'b',
  'c', 'l', 'r', 'i', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'm', 's', 'a', '_', 'b', 'c', 'l', 'r', 'i', '_', 'w', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'b', 'i',
  'n', 's', 'l', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'm', 's', 'a', '_', 'b', 'i', 'n', 's', 'l', '_', 'd', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'b', 'i', 'n',
  's', 'l', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'm', 's', 'a', '_', 'b', 'i', 'n', 's', 'l', '_', 'w', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'b', 'i', 'n', 's',
  'l', 'i', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'm', 's', 'a', '_', 'b', 'i', 'n', 's', 'l', 'i', '_', 'd', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'b', 'i', 'n',
  's', 'l', 'i', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'm', 's', 'a', '_', 'b', 'i', 'n', 's', 'l', 'i', '_', 'w', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'b', 'i',
  'n', 's', 'r', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'm', 's', 'a', '_', 'b', 'i', 'n', 's', 'r', '_', 'd', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'b', 'i', 'n',
  's', 'r', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'm', 's', 'a', '_', 'b', 'i', 'n', 's', 'r', '_', 'w', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'b', 'i', 'n', 's',
  'r', 'i', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'm', 's', 'a', '_', 'b', 'i', 'n', 's', 'r', 'i', '_', 'd', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'b', 'i', 'n',
  's', 'r', 'i', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'm', 's', 'a', '_', 'b', 'i', 'n', 's', 'r', 'i', '_', 'w', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'b',
  'i', 't', 'r', 'e', 'v', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'm', 's', 'a', '_', 'b', 'm', 'n', 'z', '_', 'v', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'b', 'm', 'n', 'z',
  'i', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
  's', 'a', '_', 'b', 'm', 'z', '_', 'v', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'm', 's', 'a', '_', 'b', 'm', 'z', 'i', '_', 'b', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'b',
  'n', 'e', 'g', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'm', 's', 'a', '_', 'b', 'n', 'e', 'g', '_', 'd', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'b', 'n', 'e', 'g',
  '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
  'a', '_', 'b', 'n', 'e', 'g', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'm', 's', 'a', '_', 'b', 'n', 'e', 'g', 'i', '_', 'b',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
  'b', 'n', 'e', 'g', 'i', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'm', 's', 'a', '_', 'b', 'n', 'e', 'g', 'i', '_', 'h', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'b',
  'n', 'e', 'g', 'i', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'm', 's', 'a', '_', 'b', 'n', 'z', '_', 'b', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'b', 'n', 'z', '_',
  'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
  '_', 'b', 'n', 'z', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'm', 's', 'a', '_', 'b', 'n', 'z', '_', 'v', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'b', 'n', 'z', '_',
  'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p',
  's', '_', 'b', 'p', 'o', 's', 'g', 'e', '3', '2', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'b', 's', 'e', 'l', '_',
  'v', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
  '_', 'b', 's', 'e', 'l', 'i', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'm', 's', 'a', '_', 'b', 's', 'e', 't', '_', 'b', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'b',
  's', 'e', 't', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'm', 's', 'a', '_', 'b', 's', 'e', 't', '_', 'h', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'b', 's', 'e', 't',
  '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
  'a', '_', 'b', 's', 'e', 't', 'i', '_', 'b', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'b', 's', 'e', 't', 'i', '_',
  'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
  '_', 'b', 's', 'e', 't', 'i', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'm', 's', 'a', '_', 'b', 's', 'e', 't', 'i', '_', 'w',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
  'b', 'z', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'm', 's', 'a', '_', 'b', 'z', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'm', 's', 'a', '_', 'b', 'z', '_', 'h', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'b', 'z', '_',
  'v', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
  '_', 'b', 'z', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'm', 's', 'a', '_', 'c', 'e', 'q', '_', 'b', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'c', 'e', 'q', '_', 'd',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
  'c', 'e', 'q', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'm', 's', 'a', '_', 'c', 'e', 'q', '_', 'w', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'c', 'e', 'q', 'i', '_',
  'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
  '_', 'c', 'e', 'q', 'i', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'm', 's', 'a', '_', 'c', 'e', 'q', 'i', '_', 'h', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'c', 'e',
  'q', 'i', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'm', 's', 'a', '_', 'c', 'f', 'c', 'm', 's', 'a', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'c', 'l', 'e', '_', 's',
  '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
  'a', '_', 'c', 'l', 'e', '_', 's', '_', 'd', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'c', 'l', 'e', '_', 's', '_',
  'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
  '_', 'c', 'l', 'e', '_', 's', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'm', 's', 'a', '_', 'c', 'l', 'e', '_', 'u', '_', 'b',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
  'c', 'l', 'e', '_', 'u', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'm', 's', 'a', '_', 'c', 'l', 'e', '_', 'u', '_', 'h', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'c',
  'l', 'e', '_', 'u', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'm', 's', 'a', '_', 'c', 'l', 'e', 'i', '_', 's', '_', 'b', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'c',
  'l', 'e', 'i', '_', 's', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'm', 's', 'a', '_', 'c', 'l', 'e', 'i', '_', 's', '_', 'h',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
  'c', 'l', 'e', 'i', '_', 's', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'm', 's', 'a', '_', 'c', 'l', 'e', 'i', '_', 'u', '_',
  'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
  '_', 'c', 'l', 'e', 'i', '_', 'u', '_', 'd', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'c', 'l', 'e', 'i', '_', 'u',
  '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
  'a', '_', 'c', 'l', 'e', 'i', '_', 'u', '_', 'w', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'c', 'l', 't', '_', 's',
  '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
  'a', '_', 'c', 'l', 't', '_', 's', '_', 'd', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'c', 'l', 't', '_', 's', '_',
  'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
  '_', 'c', 'l', 't', '_', 's', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'm', 's', 'a', '_', 'c', 'l', 't', '_', 'u', '_', 'b',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
  'c', 'l', 't', '_', 'u', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'm', 's', 'a', '_', 'c', 'l', 't', '_', 'u', '_', 'h', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'c',
  'l', 't', '_', 'u', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'm', 's', 'a', '_', 'c', 'l', 't', 'i', '_', 's', '_', 'b', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'c',
  'l', 't', 'i', '_', 's', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'm', 's', 'a', '_', 'c', 'l', 't', 'i', '_', 's', '_', 'h',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
  'c', 'l', 't', 'i', '_', 's', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'm', 's', 'a', '_', 'c', 'l', 't', 'i', '_', 'u', '_',
  'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
  '_', 'c', 'l', 't', 'i', '_', 'u', '_', 'd', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'c', 'l', 't', 'i', '_', 'u',
  '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
  'a', '_', 'c', 'l', 't', 'i', '_', 'u', '_', 'w', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'c', 'm', 'p', '_',
  'e', 'q', '_', 'p', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'm', 'i', 'p', 's', '_', 'c', 'm', 'p', '_', 'l', 'e', '_', 'p', 'h',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's',
  '_', 'c', 'm', 'p', '_', 'l', 't', '_', 'p', 'h', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'c', 'm', 'p', 'g',
  'd', 'u', '_', 'e', 'q', '_', 'q', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'c', 'm', 'p', 'g', 'd', 'u',
  '_', 'l', 'e', '_', 'q', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'm', 'i', 'p', 's', '_', 'c', 'm', 'p', 'g', 'd', 'u', '_', 'l',
  't', '_', 'q', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'm', 'i', 'p', 's', '_', 'c', 'm', 'p', 'g', 'u', '_', 'e', 'q', '_', 'q',
  'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p',
  's', '_', 'c', 'm', 'p', 'g', 'u', '_', 'l', 'e', '_', 'q', 'b', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'c',
  'm', 'p', 'g', 'u', '_', 'l', 't', '_', 'q', 'b', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'c', 'm', 'p', 'u',
  '_', 'e', 'q', '_', 'q', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'm', 'i', 'p', 's', '_', 'c', 'm', 'p', 'u', '_', 'l', 'e', '_',
  'q', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i',
  'p', 's', '_', 'c', 'm', 'p', 'u', '_', 'l', 't', '_', 'q', 'b', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'c', 'o',
  'p', 'y', '_', 's', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'm', 's', 'a', '_', 'c', 'o', 'p', 'y', '_', 's', '_', 'd', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'c',
  'o', 'p', 'y', '_', 's', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'm', 's', 'a', '_', 'c', 'o', 'p', 'y', '_', 's', '_', 'w',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
  'c', 'o', 'p', 'y', '_', 'u', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'm', 's', 'a', '_', 'c', 'o', 'p', 'y', '_', 'u', '_',
  'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
  '_', 'c', 'o', 'p', 'y', '_', 'u', '_', 'h', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'c', 'o', 'p', 'y', '_', 'u',
  '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
  'a', '_', 'c', 't', 'c', 'm', 's', 'a', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'm', 's', 'a', '_', 'd', 'i', 'v', '_', 's', '_', 'b',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
  'd', 'i', 'v', '_', 's', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'm', 's', 'a', '_', 'd', 'i', 'v', '_', 's', '_', 'h', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'd',
  'i', 'v', '_', 's', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'm', 's', 'a', '_', 'd', 'i', 'v', '_', 'u', '_', 'b', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'd', 'i',
  'v', '_', 'u', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'm', 's', 'a', '_', 'd', 'i', 'v', '_', 'u', '_', 'h', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'd', 'i', 'v',
  '_', 'u', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'm', 'i', 'p', 's', '_', 'd', 'l', 's', 'a', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'd', 'o', 't', 'p', '_', 's',
  '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
  'a', '_', 'd', 'o', 't', 'p', '_', 's', '_', 'h', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'd', 'o', 't', 'p', '_',
  's', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
  's', 'a', '_', 'd', 'o', 't', 'p', '_', 'u', '_', 'd', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'd', 'o', 't', 'p',
  '_', 'u', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'm', 's', 'a', '_', 'd', 'o', 't', 'p', '_', 'u', '_', 'w', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'd', 'p',
  'a', '_', 'w', '_', 'p', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'm', 's', 'a', '_', 'd', 'p', 'a', 'd', 'd', '_', 's', '_', 'd',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
  'd', 'p', 'a', 'd', 'd', '_', 's', '_', 'h', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'd', 'p', 'a', 'd', 'd', '_',
  's', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
  's', 'a', '_', 'd', 'p', 'a', 'd', 'd', '_', 'u', '_', 'd', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'd', 'p', 'a',
  'd', 'd', '_', 'u', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'm', 's', 'a', '_', 'd', 'p', 'a', 'd', 'd', '_', 'u', '_', 'w',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's',
  '_', 'd', 'p', 'a', 'q', '_', 's', '_', 'w', '_', 'p', 'h', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'd', 'p',
  'a', 'q', '_', 's', 'a', '_', 'l', '_', 'w', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'd', 'p', 'a', 'q', 'x',
  '_', 's', '_', 'w', '_', 'p', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'd', 'p', 'a', 'q', 'x', '_', 's',
  'a', '_', 'w', '_', 'p', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'm', 'i', 'p', 's', '_', 'd', 'p', 'a', 'u', '_', 'h', '_', 'q',
  'b', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i',
  'p', 's', '_', 'd', 'p', 'a', 'u', '_', 'h', '_', 'q', 'b', 'r', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'd',
  'p', 'a', 'x', '_', 'w', '_', 'p', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'd', 'p', 's', '_', 'w', '_',
  'p', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i',
  'p', 's', '_', 'd', 'p', 's', 'q', '_', 's', '_', 'w', '_', 'p', 'h', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_',
  'd', 'p', 's', 'q', '_', 's', 'a', '_', 'l', '_', 'w', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'd', 'p', 's',
  'q', 'x', '_', 's', '_', 'w', '_', 'p', 'h', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'd', 'p', 's', 'q', 'x',
  '_', 's', 'a', '_', 'w', '_', 'p', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'd', 'p', 's', 'u', '_', 'h',
  '_', 'q', 'b', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'm', 'i', 'p', 's', '_', 'd', 'p', 's', 'u', '_', 'h', '_', 'q', 'b', 'r',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
  'd', 'p', 's', 'u', 'b', '_', 's', '_', 'd', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'd', 'p', 's', 'u', 'b', '_',
  's', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
  's', 'a', '_', 'd', 'p', 's', 'u', 'b', '_', 's', '_', 'w', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'd', 'p', 's',
  'u', 'b', '_', 'u', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'm', 's', 'a', '_', 'd', 'p', 's', 'u', 'b', '_', 'u', '_', 'h',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
  'd', 'p', 's', 'u', 'b', '_', 'u', '_', 'w', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'd', 'p', 's', 'x', '_',
  'w', '_', 'p', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'm', 'i', 'p', 's', '_', 'e', 'x', 't', 'p', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'e', 'x', 't', 'p', 'd',
  'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p',
  's', '_', 'e', 'x', 't', 'r', '_', 'r', '_', 'w', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'e', 'x', 't', 'r',
  '_', 'r', 's', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'm', 'i', 'p', 's', '_', 'e', 'x', 't', 'r', '_', 's', '_', 'h', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_',
  'e', 'x', 't', 'r', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'm', 's', 'a', '_', 'f', 'a', 'd', 'd', '_', 'd', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 'a', 'd',
  'd', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
  's', 'a', '_', 'f', 'c', 'a', 'f', '_', 'd', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 'c', 'a', 'f', '_', 'w',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
  'f', 'c', 'e', 'q', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'm', 's', 'a', '_', 'f', 'c', 'e', 'q', '_', 'w', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 'c', 'l',
  'a', 's', 's', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'm', 's', 'a', '_', 'f', 'c', 'l', 'a', 's', 's', '_', 'w', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 'c',
  'l', 'e', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'm', 's', 'a', '_', 'f', 'c', 'l', 'e', '_', 'w', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 'c', 'l', 't', '_',
  'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
  '_', 'f', 'c', 'l', 't', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'm', 's', 'a', '_', 'f', 'c', 'n', 'e', '_', 'd', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 'c',
  'n', 'e', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'm', 's', 'a', '_', 'f', 'c', 'o', 'r', '_', 'd', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 'c', 'o', 'r', '_',
  'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
  '_', 'f', 'c', 'u', 'e', 'q', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 'c', 'u', 'e', 'q', '_', 'w',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
  'f', 'c', 'u', 'l', 'e', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'm', 's', 'a', '_', 'f', 'c', 'u', 'l', 'e', '_', 'w', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f',
  'c', 'u', 'l', 't', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'm', 's', 'a', '_', 'f', 'c', 'u', 'l', 't', '_', 'w', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 'c',
  'u', 'n', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'm', 's', 'a', '_', 'f', 'c', 'u', 'n', '_', 'w', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 'c', 'u', 'n', 'e',
  '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
  'a', '_', 'f', 'c', 'u', 'n', 'e', '_', 'w', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 'd', 'i', 'v', '_', 'd',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
  'f', 'd', 'i', 'v', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'm', 's', 'a', '_', 'f', 'e', 'x', 'd', 'o', '_', 'h', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 'e',
  'x', 'd', 'o', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'm', 's', 'a', '_', 'f', 'e', 'x', 'p', '2', '_', 'd', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 'e', 'x',
  'p', '2', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'm', 's', 'a', '_', 'f', 'e', 'x', 'u', 'p', 'l', '_', 'd', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 'e', 'x',
  'u', 'p', 'l', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'm', 's', 'a', '_', 'f', 'e', 'x', 'u', 'p', 'r', '_', 'd', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 'e',
  'x', 'u', 'p', 'r', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'm', 's', 'a', '_', 'f', 'f', 'i', 'n', 't', '_', 's', '_', 'd',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
  'f', 'f', 'i', 'n', 't', '_', 's', '_', 'w', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 'f', 'i', 'n', 't', '_',
  'u', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
  's', 'a', '_', 'f', 'f', 'i', 'n', 't', '_', 'u', '_', 'w', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 'f', 'q',
  'l', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
  's', 'a', '_', 'f', 'f', 'q', 'l', '_', 'w', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 'f', 'q', 'r', '_', 'd',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
  'f', 'f', 'q', 'r', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'm', 's', 'a', '_', 'f', 'i', 'l', 'l', '_', 'b', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 'i', 'l',
  'l', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
  's', 'a', '_', 'f', 'i', 'l', 'l', '_', 'h', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 'i', 'l', 'l', '_', 'w',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
  'f', 'l', 'o', 'g', '2', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'm', 's', 'a', '_', 'f', 'l', 'o', 'g', '2', '_', 'w', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f',
  'm', 'a', 'd', 'd', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'm', 's', 'a', '_', 'f', 'm', 'a', 'd', 'd', '_', 'w', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 'm',
  'a', 'x', '_', 'a', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'm', 's', 'a', '_', 'f', 'm', 'a', 'x', '_', 'a', '_', 'w', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f',
  'm', 'a', 'x', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'm', 's', 'a', '_', 'f', 'm', 'a', 'x', '_', 'w', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 'm', 'i', 'n',
  '_', 'a', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'm', 's', 'a', '_', 'f', 'm', 'i', 'n', '_', 'a', '_', 'w', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 'm', 'i',
  'n', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
  's', 'a', '_', 'f', 'm', 'i', 'n', '_', 'w', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 'm', 's', 'u', 'b', '_',
  'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
  '_', 'f', 'm', 's', 'u', 'b', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 'm', 'u', 'l', '_', 'd', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f',
  'm', 'u', 'l', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'm', 's', 'a', '_', 'f', 'r', 'c', 'p', '_', 'd', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 'r', 'c', 'p',
  '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
  'a', '_', 'f', 'r', 'i', 'n', 't', '_', 'd', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 'r', 'i', 'n', 't', '_',
  'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
  '_', 'f', 'r', 's', 'q', 'r', 't', '_', 'd', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 'r', 's', 'q', 'r', 't',
  '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
  'a', '_', 'f', 's', 'a', 'f', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 's', 'a', 'f', '_', 'w', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f',
  's', 'e', 'q', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'm', 's', 'a', '_', 'f', 's', 'e', 'q', '_', 'w', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 's', 'l', 'e',
  '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
  'a', '_', 'f', 's', 'l', 'e', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 's', 'l', 't', '_', 'd', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f',
  's', 'l', 't', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'm', 's', 'a', '_', 'f', 's', 'n', 'e', '_', 'd', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 's', 'n', 'e',
  '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
  'a', '_', 'f', 's', 'o', 'r', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 's', 'o', 'r', '_', 'w', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f',
  's', 'q', 'r', 't', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'm', 's', 'a', '_', 'f', 's', 'q', 'r', 't', '_', 'w', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 's',
  'u', 'b', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'm', 's', 'a', '_', 'f', 's', 'u', 'b', '_', 'w', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 's', 'u', 'e', 'q',
  '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
  'a', '_', 'f', 's', 'u', 'e', 'q', '_', 'w', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 's', 'u', 'l', 'e', '_',
  'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
  '_', 'f', 's', 'u', 'l', 'e', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 's', 'u', 'l', 't', '_', 'd',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
  'f', 's', 'u', 'l', 't', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'm', 's', 'a', '_', 'f', 's', 'u', 'n', '_', 'd', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 's',
  'u', 'n', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'm', 's', 'a', '_', 'f', 's', 'u', 'n', 'e', '_', 'd', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 's', 'u', 'n',
  'e', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
  's', 'a', '_', 'f', 't', 'i', 'n', 't', '_', 's', '_', 'd', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 't', 'i',
  'n', 't', '_', 's', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'm', 's', 'a', '_', 'f', 't', 'i', 'n', 't', '_', 'u', '_', 'd',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
  'f', 't', 'i', 'n', 't', '_', 'u', '_', 'w', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 't', 'q', '_', 'h', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f',
  't', 'q', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'm', 's', 'a', '_', 'f', 't', 'r', 'u', 'n', 'c', '_', 's', '_', 'd', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f',
  't', 'r', 'u', 'n', 'c', '_', 's', '_', 'w', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'f', 't', 'r', 'u', 'n', 'c',
  '_', 'u', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'm', 's', 'a', '_', 'f', 't', 'r', 'u', 'n', 'c', '_', 'u', '_', 'w', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'h',
  'a', 'd', 'd', '_', 's', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'm', 's', 'a', '_', 'h', 'a', 'd', 'd', '_', 's', '_', 'h',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
  'h', 'a', 'd', 'd', '_', 's', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'm', 's', 'a', '_', 'h', 'a', 'd', 'd', '_', 'u', '_',
  'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
  '_', 'h', 'a', 'd', 'd', '_', 'u', '_', 'h', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'h', 'a', 'd', 'd', '_', 'u',
  '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
  'a', '_', 'h', 's', 'u', 'b', '_', 's', '_', 'd', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'h', 's', 'u', 'b', '_',
  's', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
  's', 'a', '_', 'h', 's', 'u', 'b', '_', 's', '_', 'w', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'h', 's', 'u', 'b',
  '_', 'u', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'm', 's', 'a', '_', 'h', 's', 'u', 'b', '_', 'u', '_', 'h', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'h', 's', 'u',
  'b', '_', 'u', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'm', 's', 'a', '_', 'i', 'l', 'v', 'e', 'v', '_', 'b', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'i', 'l', 'v',
  'e', 'v', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'm', 's', 'a', '_', 'i', 'l', 'v', 'e', 'v', '_', 'h', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'i', 'l', 'v', 'e',
  'v', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
  's', 'a', '_', 'i', 'l', 'v', 'l', '_', 'b', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'i', 'l', 'v', 'l', '_', 'd',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
  'i', 'l', 'v', 'l', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'm', 's', 'a', '_', 'i', 'l', 'v', 'l', '_', 'w', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'i', 'l', 'v',
  'o', 'd', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'm', 's', 'a', '_', 'i', 'l', 'v', 'o', 'd', '_', 'd', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'i', 'l', 'v', 'o',
  'd', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
  's', 'a', '_', 'i', 'l', 'v', 'o', 'd', '_', 'w', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'i', 'l', 'v', 'r', '_',
  'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
  '_', 'i', 'l', 'v', 'r', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'm', 's', 'a', '_', 'i', 'l', 'v', 'r', '_', 'h', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'i', 'l',
  'v', 'r', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'm', 's', 'a', '_', 'i', 'n', 's', 'e', 'r', 't', '_', 'b', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'i', 'n', 's',
  'e', 'r', 't', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'm', 's', 'a', '_', 'i', 'n', 's', 'e', 'r', 't', '_', 'h', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'i', 'n',
  's', 'e', 'r', 't', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'm', 'i', 'p', 's', '_', 'i', 'n', 's', 'v', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'i', 'n', 's', 'v',
  'e', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
  's', 'a', '_', 'i', 'n', 's', 'v', 'e', '_', 'd', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'i', 'n', 's', 'v', 'e',
  '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
  'a', '_', 'i', 'n', 's', 'v', 'e', '_', 'w', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'l', 'b', 'u', 'x', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'l',
  'd', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
  's', 'a', '_', 'l', 'd', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'm', 's', 'a', '_', 'l', 'd', '_', 'h', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'l', 'd', '_', 'w',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
  'l', 'd', 'i', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'm', 's', 'a', '_', 'l', 'd', 'i', '_', 'd', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'l', 'd', 'i', '_', 'h',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
  'l', 'd', 'i', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'm', 's', 'a', '_', 'l', 'd', 'r', '_', 'd', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'l', 'd', 'r', '_', 'w',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's',
  '_', 'l', 'h', 'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'm', 'i', 'p', 's', '_', 'l', 's', 'a', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'l', 'w', 'x', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'm', 'a',
  'd', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
  'a', '_', 'm', 'a', 'd', 'd', '_', 'q', '_', 'h', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm', 'a', 'd', 'd', '_',
  'q', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
  's', 'a', '_', 'm', 'a', 'd', 'd', 'r', '_', 'q', '_', 'h', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm', 'a', 'd',
  'd', 'r', '_', 'q', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'm', 'i', 'p', 's', '_', 'm', 'a', 'd', 'd', 'u', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm', 'a', 'd',
  'd', 'v', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'm', 's', 'a', '_', 'm', 'a', 'd', 'd', 'v', '_', 'd', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm', 'a', 'd', 'd',
  'v', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
  's', 'a', '_', 'm', 'a', 'd', 'd', 'v', '_', 'w', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'm', 'a', 'q', '_',
  's', '_', 'w', '_', 'p', 'h', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'm', 'a', 'q', '_', 's', '_', 'w',
  '_', 'p', 'h', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'm', 'i', 'p', 's', '_', 'm', 'a', 'q', '_', 's', 'a', '_', 'w', '_', 'p',
  'h', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i',
  'p', 's', '_', 'm', 'a', 'q', '_', 's', 'a', '_', 'w', '_', 'p', 'h', 'r',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
  'm', 'a', 'x', '_', 'a', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'm', 's', 'a', '_', 'm', 'a', 'x', '_', 'a', '_', 'd', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm',
  'a', 'x', '_', 'a', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'm', 's', 'a', '_', 'm', 'a', 'x', '_', 'a', '_', 'w', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm', 'a',
  'x', '_', 's', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'm', 's', 'a', '_', 'm', 'a', 'x', '_', 's', '_', 'd', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm', 'a', 'x',
  '_', 's', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'm', 's', 'a', '_', 'm', 'a', 'x', '_', 's', '_', 'w', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm', 'a', 'x', '_',
  'u', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
  's', 'a', '_', 'm', 'a', 'x', '_', 'u', '_', 'd', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm', 'a', 'x', '_', 'u',
  '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
  'a', '_', 'm', 'a', 'x', '_', 'u', '_', 'w', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm', 'a', 'x', 'i', '_', 's',
  '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
  'a', '_', 'm', 'a', 'x', 'i', '_', 's', '_', 'd', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm', 'a', 'x', 'i', '_',
  's', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
  's', 'a', '_', 'm', 'a', 'x', 'i', '_', 's', '_', 'w', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm', 'a', 'x', 'i',
  '_', 'u', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'm', 's', 'a', '_', 'm', 'a', 'x', 'i', '_', 'u', '_', 'd', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm', 'a', 'x',
  'i', '_', 'u', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'm', 's', 'a', '_', 'm', 'a', 'x', 'i', '_', 'u', '_', 'w', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm', 'i',
  'n', '_', 'a', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'm', 's', 'a', '_', 'm', 'i', 'n', '_', 'a', '_', 'd', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm', 'i', 'n',
  '_', 'a', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'm', 's', 'a', '_', 'm', 'i', 'n', '_', 'a', '_', 'w', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm', 'i', 'n', '_',
  's', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
  's', 'a', '_', 'm', 'i', 'n', '_', 's', '_', 'd', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm', 'i', 'n', '_', 's',
  '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
  'a', '_', 'm', 'i', 'n', '_', 's', '_', 'w', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm', 'i', 'n', '_', 'u', '_',
  'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
  '_', 'm', 'i', 'n', '_', 'u', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm', 'i', 'n', '_', 'u', '_', 'h',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
  'm', 'i', 'n', '_', 'u', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'm', 's', 'a', '_', 'm', 'i', 'n', 'i', '_', 's', '_', 'b',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
  'm', 'i', 'n', 'i', '_', 's', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm', 'i', 'n', 'i', '_', 's', '_',
  'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
  '_', 'm', 'i', 'n', 'i', '_', 's', '_', 'w', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm', 'i', 'n', 'i', '_', 'u',
  '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
  'a', '_', 'm', 'i', 'n', 'i', '_', 'u', '_', 'd', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm', 'i', 'n', 'i', '_',
  'u', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
  's', 'a', '_', 'm', 'i', 'n', 'i', '_', 'u', '_', 'w', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm', 'o', 'd', '_',
  's', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
  's', 'a', '_', 'm', 'o', 'd', '_', 's', '_', 'd', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm', 'o', 'd', '_', 's',
  '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
  'a', '_', 'm', 'o', 'd', '_', 's', '_', 'w', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm', 'o', 'd', '_', 'u', '_',
  'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
  '_', 'm', 'o', 'd', '_', 'u', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm', 'o', 'd', '_', 'u', '_', 'h',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
  'm', 'o', 'd', '_', 'u', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'm', 'o', 'd', 's', 'u', 'b', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm',
  'o', 'v', 'e', '_', 'v', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'm', 'i', 'p', 's', '_', 'm', 's', 'u', 'b', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm', 's', 'u', 'b', '_',
  'q', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
  's', 'a', '_', 'm', 's', 'u', 'b', '_', 'q', '_', 'w', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm', 's', 'u', 'b',
  'r', '_', 'q', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'm', 's', 'a', '_', 'm', 's', 'u', 'b', 'r', '_', 'q', '_', 'w', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_',
  'm', 's', 'u', 'b', 'u', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'm', 's', 'a', '_', 'm', 's', 'u', 'b', 'v', '_', 'b', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm', 's', 'u',
  'b', 'v', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'm', 's', 'a', '_', 'm', 's', 'u', 'b', 'v', '_', 'h', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm', 's', 'u', 'b',
  'v', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
  'i', 'p', 's', '_', 'm', 't', 'h', 'l', 'i', 'p', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'm', 'u', 'l', '_',
  'p', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
  'a', '_', 'm', 'u', 'l', '_', 'q', '_', 'h', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm', 'u', 'l', '_', 'q', '_',
  'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p',
  's', '_', 'm', 'u', 'l', '_', 's', '_', 'p', 'h', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'm', 'u', 'l', 'e',
  'q', '_', 's', '_', 'w', '_', 'p', 'h', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'm', 'u', 'l', 'e', 'q',
  '_', 's', '_', 'w', '_', 'p', 'h', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'm', 'u', 'l', 'e', 'u', '_',
  's', '_', 'p', 'h', '_', 'q', 'b', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'm', 'u', 'l', 'e', 'u', '_',
  's', '_', 'p', 'h', '_', 'q', 'b', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'm', 'u', 'l', 'q', '_', 'r',
  's', '_', 'p', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'm', 'i', 'p', 's', '_', 'm', 'u', 'l', 'q', '_', 'r', 's', '_', 'w', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_',
  'm', 'u', 'l', 'q', '_', 's', '_', 'p', 'h', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'm', 'u', 'l', 'q', '_',
  's', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
  's', 'a', '_', 'm', 'u', 'l', 'r', '_', 'q', '_', 'h', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm', 'u', 'l', 'r',
  '_', 'q', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'm', 'i', 'p', 's', '_', 'm', 'u', 'l', 's', 'a', '_', 'w', '_', 'p', 'h',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's',
  '_', 'm', 'u', 'l', 's', 'a', 'q', '_', 's', '_', 'w', '_', 'p', 'h', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_',
  'm', 'u', 'l', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'm', 'i', 'p', 's', '_', 'm', 'u', 'l', 't', 'u', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm', 'u', 'l', 'v', '_',
  'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
  '_', 'm', 'u', 'l', 'v', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'm', 's', 'a', '_', 'm', 'u', 'l', 'v', '_', 'h', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'm', 'u',
  'l', 'v', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'm', 's', 'a', '_', 'n', 'l', 'o', 'c', '_', 'b', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'n', 'l', 'o', 'c', '_',
  'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
  '_', 'n', 'l', 'o', 'c', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'm', 's', 'a', '_', 'n', 'l', 'o', 'c', '_', 'w', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'n', 'l',
  'z', 'c', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'm', 's', 'a', '_', 'n', 'l', 'z', 'c', '_', 'd', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'n', 'l', 'z', 'c', '_',
  'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
  '_', 'n', 'l', 'z', 'c', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'm', 's', 'a', '_', 'n', 'o', 'r', '_', 'v', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'n', 'o', 'r',
  'i', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
  's', 'a', '_', 'o', 'r', '_', 'v', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'm', 's', 'a', '_', 'o', 'r', 'i', '_', 'b', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'p', 'a',
  'c', 'k', 'r', 'l', '_', 'p', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'm', 's', 'a', '_', 'p', 'c', 'k', 'e', 'v', '_', 'b', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'p',
  'c', 'k', 'e', 'v', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'm', 's', 'a', '_', 'p', 'c', 'k', 'e', 'v', '_', 'h', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'p', 'c',
  'k', 'e', 'v', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'm', 's', 'a', '_', 'p', 'c', 'k', 'o', 'd', '_', 'b', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'p', 'c', 'k',
  'o', 'd', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'm', 's', 'a', '_', 'p', 'c', 'k', 'o', 'd', '_', 'h', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'p', 'c', 'k', 'o',
  'd', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
  's', 'a', '_', 'p', 'c', 'n', 't', '_', 'b', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'p', 'c', 'n', 't', '_', 'd',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
  'p', 'c', 'n', 't', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'm', 's', 'a', '_', 'p', 'c', 'n', 't', '_', 'w', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'p', 'i',
  'c', 'k', '_', 'p', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'm', 'i', 'p', 's', '_', 'p', 'i', 'c', 'k', '_', 'q', 'b', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'p',
  'r', 'e', 'c', 'e', 'q', '_', 'w', '_', 'p', 'h', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'p', 'r', 'e',
  'c', 'e', 'q', '_', 'w', '_', 'p', 'h', 'r', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'p', 'r', 'e', 'c', 'e',
  'q', 'u', '_', 'p', 'h', '_', 'q', 'b', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'p', 'r', 'e', 'c', 'e',
  'q', 'u', '_', 'p', 'h', '_', 'q', 'b', 'l', 'a', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'p', 'r', 'e', 'c',
  'e', 'q', 'u', '_', 'p', 'h', '_', 'q', 'b', 'r', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'p', 'r', 'e', 'c',
  'e', 'q', 'u', '_', 'p', 'h', '_', 'q', 'b', 'r', 'a', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'p', 'r', 'e',
  'c', 'e', 'u', '_', 'p', 'h', '_', 'q', 'b', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'p', 'r', 'e', 'c',
  'e', 'u', '_', 'p', 'h', '_', 'q', 'b', 'l', 'a', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'p', 'r', 'e', 'c',
  'e', 'u', '_', 'p', 'h', '_', 'q', 'b', 'r', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'p', 'r', 'e', 'c', 'e',
  'u', '_', 'p', 'h', '_', 'q', 'b', 'r', 'a', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'p', 'r', 'e', 'c', 'r',
  '_', 'q', 'b', '_', 'p', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'm', 'i', 'p', 's', '_', 'p', 'r', 'e', 'c', 'r', '_', 's', 'r',
  'a', '_', 'p', 'h', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'm', 'i', 'p', 's', '_', 'p', 'r', 'e', 'c', 'r', '_', 's', 'r',
  'a', '_', 'r', '_', 'p', 'h', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'p', 'r', 'e', 'c', 'r', 'q',
  '_', 'p', 'h', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'm', 'i', 'p', 's', '_', 'p', 'r', 'e', 'c', 'r', 'q', '_', 'q', 'b',
  '_', 'p', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
  'i', 'p', 's', '_', 'p', 'r', 'e', 'c', 'r', 'q', '_', 'r', 's', '_', 'p',
  'h', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
  'i', 'p', 's', '_', 'p', 'r', 'e', 'c', 'r', 'q', 'u', '_', 's', '_', 'q',
  'b', '_', 'p', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'm', 'i', 'p', 's', '_', 'p', 'r', 'e', 'p', 'e', 'n', 'd', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'r', 'a',
  'd', 'd', 'u', '_', 'w', '_', 'q', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'r', 'd', 'd', 's', 'p', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_',
  'r', 'e', 'p', 'l', '_', 'p', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'r', 'e', 'p', 'l', '_', 'q', 'b',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
  's', 'a', 't', '_', 's', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'm', 's', 'a', '_', 's', 'a', 't', '_', 's', '_', 'd', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's',
  'a', 't', '_', 's', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'm', 's', 'a', '_', 's', 'a', 't', '_', 's', '_', 'w', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'a',
  't', '_', 'u', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'm', 's', 'a', '_', 's', 'a', 't', '_', 'u', '_', 'd', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'a', 't',
  '_', 'u', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'm', 's', 'a', '_', 's', 'a', 't', '_', 'u', '_', 'w', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'h', 'f', '_',
  'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
  '_', 's', 'h', 'f', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'm', 's', 'a', '_', 's', 'h', 'f', '_', 'w', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 's', 'h', 'i',
  'l', 'o', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i',
  'p', 's', '_', 's', 'h', 'l', 'l', '_', 'p', 'h', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 's', 'h', 'l', 'l',
  '_', 'q', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
  'i', 'p', 's', '_', 's', 'h', 'l', 'l', '_', 's', '_', 'p', 'h', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 's',
  'h', 'l', 'l', '_', 's', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'm', 'i', 'p', 's', '_', 's', 'h', 'r', 'a', '_', 'p', 'h',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's',
  '_', 's', 'h', 'r', 'a', '_', 'q', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 's', 'h', 'r', 'a', '_', 'r',
  '_', 'p', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
  'i', 'p', 's', '_', 's', 'h', 'r', 'a', '_', 'r', '_', 'q', 'b', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 's',
  'h', 'r', 'a', '_', 'r', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'm', 'i', 'p', 's', '_', 's', 'h', 'r', 'l', '_', 'p', 'h',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's',
  '_', 's', 'h', 'r', 'l', '_', 'q', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'l', 'd', '_', 'b', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'l',
  'd', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
  's', 'a', '_', 's', 'l', 'd', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'l', 'd', '_', 'w', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'l',
  'd', 'i', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'm', 's', 'a', '_', 's', 'l', 'd', 'i', '_', 'd', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'l', 'd', 'i', '_',
  'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
  '_', 's', 'l', 'd', 'i', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'm', 's', 'a', '_', 's', 'l', 'l', '_', 'b', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'l', 'l',
  '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
  'a', '_', 's', 'l', 'l', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'm', 's', 'a', '_', 's', 'l', 'l', '_', 'w', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'l', 'l',
  'i', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
  's', 'a', '_', 's', 'l', 'l', 'i', '_', 'd', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'l', 'l', 'i', '_', 'h',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
  's', 'l', 'l', 'i', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'm', 's', 'a', '_', 's', 'p', 'l', 'a', 't', '_', 'b', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'p',
  'l', 'a', 't', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'm', 's', 'a', '_', 's', 'p', 'l', 'a', 't', '_', 'h', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'p', 'l',
  'a', 't', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'm', 's', 'a', '_', 's', 'p', 'l', 'a', 't', 'i', '_', 'b', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'p', 'l',
  'a', 't', 'i', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'm', 's', 'a', '_', 's', 'p', 'l', 'a', 't', 'i', '_', 'h', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'p',
  'l', 'a', 't', 'i', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'm', 's', 'a', '_', 's', 'r', 'a', '_', 'b', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'r', 'a', '_',
  'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
  '_', 's', 'r', 'a', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'm', 's', 'a', '_', 's', 'r', 'a', '_', 'w', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'r', 'a', 'i',
  '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
  'a', '_', 's', 'r', 'a', 'i', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'r', 'a', 'i', '_', 'h', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's',
  'r', 'a', 'i', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'm', 's', 'a', '_', 's', 'r', 'a', 'r', '_', 'b', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'r', 'a', 'r',
  '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
  'a', '_', 's', 'r', 'a', 'r', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'r', 'a', 'r', '_', 'w', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's',
  'r', 'a', 'r', 'i', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'm', 's', 'a', '_', 's', 'r', 'a', 'r', 'i', '_', 'd', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'r',
  'a', 'r', 'i', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'm', 's', 'a', '_', 's', 'r', 'a', 'r', 'i', '_', 'w', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'r', 'l',
  '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
  'a', '_', 's', 'r', 'l', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'm', 's', 'a', '_', 's', 'r', 'l', '_', 'h', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'r', 'l',
  '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
  'a', '_', 's', 'r', 'l', 'i', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'r', 'l', 'i', '_', 'd', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's',
  'r', 'l', 'i', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'm', 's', 'a', '_', 's', 'r', 'l', 'i', '_', 'w', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'r', 'l', 'r',
  '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
  'a', '_', 's', 'r', 'l', 'r', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'r', 'l', 'r', '_', 'h', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's',
  'r', 'l', 'r', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'm', 's', 'a', '_', 's', 'r', 'l', 'r', 'i', '_', 'b', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'r', 'l',
  'r', 'i', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'm', 's', 'a', '_', 's', 'r', 'l', 'r', 'i', '_', 'h', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'r', 'l', 'r',
  'i', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
  's', 'a', '_', 's', 't', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'm', 's', 'a', '_', 's', 't', '_', 'd', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 't', '_', 'h',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
  's', 't', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'm', 's', 'a', '_', 's', 't', 'r', '_', 'd', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 't', 'r', '_', 'w', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_',
  's', 'u', 'b', 'q', '_', 'p', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'm', 'i', 'p', 's', '_', 's', 'u', 'b', 'q', '_', 's', '_',
  'p', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i',
  'p', 's', '_', 's', 'u', 'b', 'q', '_', 's', '_', 'w', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 's', 'u', 'b',
  'q', 'h', '_', 'p', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'm', 'i', 'p', 's', '_', 's', 'u', 'b', 'q', 'h', '_', 'r', '_', 'p',
  'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p',
  's', '_', 's', 'u', 'b', 'q', 'h', '_', 'r', '_', 'w', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 's', 'u', 'b',
  'q', 'h', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'm', 's', 'a', '_', 's', 'u', 'b', 's', '_', 's', '_', 'b', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'u', 'b',
  's', '_', 's', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'm', 's', 'a', '_', 's', 'u', 'b', 's', '_', 's', '_', 'h', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'u',
  'b', 's', '_', 's', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'm', 's', 'a', '_', 's', 'u', 'b', 's', '_', 'u', '_', 'b', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's',
  'u', 'b', 's', '_', 'u', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'm', 's', 'a', '_', 's', 'u', 'b', 's', '_', 'u', '_', 'h',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
  's', 'u', 'b', 's', '_', 'u', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'u', 'b', 's', 'u', 's', '_',
  'u', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
  's', 'a', '_', 's', 'u', 'b', 's', 'u', 's', '_', 'u', '_', 'd', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'u',
  'b', 's', 'u', 's', '_', 'u', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'u', 'b', 's', 'u', 's', '_',
  'u', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
  's', 'a', '_', 's', 'u', 'b', 's', 'u', 'u', '_', 's', '_', 'b', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'u',
  'b', 's', 'u', 'u', '_', 's', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'u', 'b', 's', 'u', 'u', '_',
  's', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
  's', 'a', '_', 's', 'u', 'b', 's', 'u', 'u', '_', 's', '_', 'w', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 's',
  'u', 'b', 'u', '_', 'p', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'm', 'i', 'p', 's', '_', 's', 'u', 'b', 'u', '_', 'q', 'b', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_',
  's', 'u', 'b', 'u', '_', 's', '_', 'p', 'h', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 's', 'u', 'b', 'u', '_',
  's', '_', 'q', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'm', 'i', 'p', 's', '_', 's', 'u', 'b', 'u', 'h', '_', 'q', 'b', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 's',
  'u', 'b', 'u', 'h', '_', 'r', '_', 'q', 'b', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'u', 'b', 'v', '_', 'b',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_',
  's', 'u', 'b', 'v', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'm', 's', 'a', '_', 's', 'u', 'b', 'v', '_', 'h', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'u', 'b',
  'v', '_', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm',
  's', 'a', '_', 's', 'u', 'b', 'v', 'i', '_', 'b', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'u', 'b', 'v', 'i',
  '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's',
  'a', '_', 's', 'u', 'b', 'v', 'i', '_', 'h', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 's', 'u', 'b', 'v', 'i', '_',
  'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
  '_', 'v', 's', 'h', 'f', '_', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'm', 's', 'a', '_', 'v', 's', 'h', 'f', '_', 'd', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a', '_', 'v', 's',
  'h', 'f', '_', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'm', 's', 'a', '_', 'v', 's', 'h', 'f', '_', 'w', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'm', 'i', 'p', 's', '_', 'w', 'r', 'd', 's',
  'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'm', 's', 'a',
  '_', 'x', 'o', 'r', '_', 'v', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'm', 's', 'a', '_', 'x', 'o', 'r', 'i', '_', 'b', '\000', '_', '_',
  'n', 'v', 'v', 'm', '_', 'a', 'b', 's', '_', 'b', 'f', '1', '6', '\000', '_',
  '_', 'n', 'v', 'v', 'm', '_', 'a', 'b', 's', '_', 'b', 'f', '1', '6', 'x',
  '2', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'a', 'd', 'd', '_', 'r', 'm',
  '_', 'd', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'a', 'd', 'd', '_', 'r',
  'm', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'a', 'd', 'd', '_',
  'r', 'm', '_', 'f', 't', 'z', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm',
  '_', 'a', 'd', 'd', '_', 'r', 'n', '_', 'd', '\000', '_', '_', 'n', 'v', 'v',
  'm', '_', 'a', 'd', 'd', '_', 'r', 'n', '_', 'f', '\000', '_', '_', 'n', 'v',
  'v', 'm', '_', 'a', 'd', 'd', '_', 'r', 'n', '_', 'f', 't', 'z', '_', 'f',
  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'a', 'd', 'd', '_', 'r', 'p', '_',
  'd', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'a', 'd', 'd', '_', 'r', 'p',
  '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'a', 'd', 'd', '_', 'r',
  'p', '_', 'f', 't', 'z', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
  'a', 'd', 'd', '_', 'r', 'z', '_', 'd', '\000', '_', '_', 'n', 'v', 'v', 'm',
  '_', 'a', 'd', 'd', '_', 'r', 'z', '_', 'f', '\000', '_', '_', 'n', 'v', 'v',
  'm', '_', 'a', 'd', 'd', '_', 'r', 'z', '_', 'f', 't', 'z', '_', 'f', '\000',
  '_', '_', 'n', 'v', 'v', 'm', '_', 'b', 'a', 'r', '_', 's', 'y', 'n', 'c',
  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'b', 'a', 'r', '_', 'w', 'a', 'r',
  'p', '_', 's', 'y', 'n', 'c', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'b',
  'a', 'r', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'b', 'a', 'r', '_', 'n',
  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'b', 'a', 'r', 'r', 'i', 'e', 'r',
  '_', 's', 'y', 'n', 'c', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'b', 'a',
  'r', 'r', 'i', 'e', 'r', '_', 's', 'y', 'n', 'c', '_', 'c', 'n', 't', '\000',
  '_', '_', 's', 'y', 'n', 'c', 't', 'h', 'r', 'e', 'a', 'd', 's', '\000', '_',
  '_', 'n', 'v', 'v', 'm', '_', 'b', 'a', 'r', '0', '_', 'a', 'n', 'd', '\000',
  '_', '_', 'n', 'v', 'v', 'm', '_', 'b', 'a', 'r', '0', '_', 'o', 'r', '\000',
  '_', '_', 'n', 'v', 'v', 'm', '_', 'b', 'a', 'r', '0', '_', 'p', 'o', 'p',
  'c', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'b', 'f', '2', 'h', '_', 'r',
  'n', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'b', 'f', '2', 'h', '_', 'r',
  'n', '_', 'f', 't', 'z', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'b', 'i',
  't', 'c', 'a', 's', 't', '_', 'd', '2', 'l', 'l', '\000', '_', '_', 'n', 'v',
  'v', 'm', '_', 'b', 'i', 't', 'c', 'a', 's', 't', '_', 'f', '2', 'i', '\000',
  '_', '_', 'n', 'v', 'v', 'm', '_', 'b', 'i', 't', 'c', 'a', 's', 't', '_',
  'i', '2', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'b', 'i', 't', 'c',
  'a', 's', 't', '_', 'l', 'l', '2', 'd', '\000', '_', '_', 'n', 'v', 'v', 'm',
  '_', 'c', 'e', 'i', 'l', '_', 'd', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
  'c', 'e', 'i', 'l', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'c',
  'e', 'i', 'l', '_', 'f', 't', 'z', '_', 'f', '\000', '_', '_', 'n', 'v', 'v',
  'm', '_', 'c', 'o', 's', '_', 'a', 'p', 'p', 'r', 'o', 'x', '_', 'f', '\000',
  '_', '_', 'n', 'v', 'v', 'm', '_', 'c', 'o', 's', '_', 'a', 'p', 'p', 'r',
  'o', 'x', '_', 'f', 't', 'z', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm',
  '_', 'c', 'p', '_', 'a', 's', 'y', 'n', 'c', '_', 'c', 'o', 'm', 'm', 'i',
  't', '_', 'g', 'r', 'o', 'u', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
  'c', 'p', '_', 'a', 's', 'y', 'n', 'c', '_', 'm', 'b', 'a', 'r', 'r', 'i',
  'e', 'r', '_', 'a', 'r', 'r', 'i', 'v', 'e', '\000', '_', '_', 'n', 'v', 'v',
  'm', '_', 'c', 'p', '_', 'a', 's', 'y', 'n', 'c', '_', 'm', 'b', 'a', 'r',
  'r', 'i', 'e', 'r', '_', 'a', 'r', 'r', 'i', 'v', 'e', '_', 'n', 'o', 'i',
  'n', 'c', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'c', 'p', '_', 'a', 's',
  'y', 'n', 'c', '_', 'm', 'b', 'a', 'r', 'r', 'i', 'e', 'r', '_', 'a', 'r',
  'r', 'i', 'v', 'e', '_', 'n', 'o', 'i', 'n', 'c', '_', 's', 'h', 'a', 'r',
  'e', 'd', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'c', 'p', '_', 'a', 's',
  'y', 'n', 'c', '_', 'm', 'b', 'a', 'r', 'r', 'i', 'e', 'r', '_', 'a', 'r',
  'r', 'i', 'v', 'e', '_', 's', 'h', 'a', 'r', 'e', 'd', '\000', '_', '_', 'n',
  'v', 'v', 'm', '_', 'c', 'p', '_', 'a', 's', 'y', 'n', 'c', '_', 'w', 'a',
  'i', 't', '_', 'a', 'l', 'l', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'c',
  'p', '_', 'a', 's', 'y', 'n', 'c', '_', 'w', 'a', 'i', 't', '_', 'g', 'r',
  'o', 'u', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'd', '2', 'f', '_',
  'r', 'm', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'd', '2', 'f', '_', 'r',
  'm', '_', 'f', 't', 'z', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'd', '2',
  'f', '_', 'r', 'n', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'd', '2', 'f',
  '_', 'r', 'n', '_', 'f', 't', 'z', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
  'd', '2', 'f', '_', 'r', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'd',
  '2', 'f', '_', 'r', 'p', '_', 'f', 't', 'z', '\000', '_', '_', 'n', 'v', 'v',
  'm', '_', 'd', '2', 'f', '_', 'r', 'z', '\000', '_', '_', 'n', 'v', 'v', 'm',
  '_', 'd', '2', 'f', '_', 'r', 'z', '_', 'f', 't', 'z', '\000', '_', '_', 'n',
  'v', 'v', 'm', '_', 'd', '2', 'i', '_', 'h', 'i', '\000', '_', '_', 'n', 'v',
  'v', 'm', '_', 'd', '2', 'i', '_', 'l', 'o', '\000', '_', '_', 'n', 'v', 'v',
  'm', '_', 'd', '2', 'i', '_', 'r', 'm', '\000', '_', '_', 'n', 'v', 'v', 'm',
  '_', 'd', '2', 'i', '_', 'r', 'n', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
  'd', '2', 'i', '_', 'r', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'd',
  '2', 'i', '_', 'r', 'z', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'd', '2',
  'l', 'l', '_', 'r', 'm', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'd', '2',
  'l', 'l', '_', 'r', 'n', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'd', '2',
  'l', 'l', '_', 'r', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'd', '2',
  'l', 'l', '_', 'r', 'z', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'd', '2',
  'u', 'i', '_', 'r', 'm', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'd', '2',
  'u', 'i', '_', 'r', 'n', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'd', '2',
  'u', 'i', '_', 'r', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'd', '2',
  'u', 'i', '_', 'r', 'z', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'd', '2',
  'u', 'l', 'l', '_', 'r', 'm', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'd',
  '2', 'u', 'l', 'l', '_', 'r', 'n', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
  'd', '2', 'u', 'l', 'l', '_', 'r', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm',
  '_', 'd', '2', 'u', 'l', 'l', '_', 'r', 'z', '\000', '_', '_', 'n', 'v', 'v',
  'm', '_', 'd', 'i', 'v', '_', 'a', 'p', 'p', 'r', 'o', 'x', '_', 'f', '\000',
  '_', '_', 'n', 'v', 'v', 'm', '_', 'd', 'i', 'v', '_', 'a', 'p', 'p', 'r',
  'o', 'x', '_', 'f', 't', 'z', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm',
  '_', 'd', 'i', 'v', '_', 'r', 'm', '_', 'd', '\000', '_', '_', 'n', 'v', 'v',
  'm', '_', 'd', 'i', 'v', '_', 'r', 'm', '_', 'f', '\000', '_', '_', 'n', 'v',
  'v', 'm', '_', 'd', 'i', 'v', '_', 'r', 'm', '_', 'f', 't', 'z', '_', 'f',
  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'd', 'i', 'v', '_', 'r', 'n', '_',
  'd', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'd', 'i', 'v', '_', 'r', 'n',
  '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'd', 'i', 'v', '_', 'r',
  'n', '_', 'f', 't', 'z', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
  'd', 'i', 'v', '_', 'r', 'p', '_', 'd', '\000', '_', '_', 'n', 'v', 'v', 'm',
  '_', 'd', 'i', 'v', '_', 'r', 'p', '_', 'f', '\000', '_', '_', 'n', 'v', 'v',
  'm', '_', 'd', 'i', 'v', '_', 'r', 'p', '_', 'f', 't', 'z', '_', 'f', '\000',
  '_', '_', 'n', 'v', 'v', 'm', '_', 'd', 'i', 'v', '_', 'r', 'z', '_', 'd',
  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'd', 'i', 'v', '_', 'r', 'z', '_',
  'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'd', 'i', 'v', '_', 'r', 'z',
  '_', 'f', 't', 'z', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'e',
  'x', '2', '_', 'a', 'p', 'p', 'r', 'o', 'x', '_', 'd', '\000', '_', '_', 'n',
  'v', 'v', 'm', '_', 'e', 'x', '2', '_', 'a', 'p', 'p', 'r', 'o', 'x', '_',
  'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'e', 'x', '2', '_', 'a', 'p',
  'p', 'r', 'o', 'x', '_', 'f', 't', 'z', '_', 'f', '\000', '_', '_', 'n', 'v',
  'v', 'm', '_', 'f', '2', 'b', 'f', '1', '6', '_', 'r', 'n', '\000', '_', '_',
  'n', 'v', 'v', 'm', '_', 'f', '2', 'b', 'f', '1', '6', '_', 'r', 'n', '_',
  'r', 'e', 'l', 'u', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', '2', 'b',
  'f', '1', '6', '_', 'r', 'z', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f',
  '2', 'b', 'f', '1', '6', '_', 'r', 'z', '_', 'r', 'e', 'l', 'u', '\000', '_',
  '_', 'n', 'v', 'v', 'm', '_', 'f', '2', 'h', '_', 'r', 'n', '\000', '_', '_',
  'n', 'v', 'v', 'm', '_', 'f', '2', 'h', '_', 'r', 'n', '_', 'f', 't', 'z',
  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', '2', 'i', '_', 'r', 'm', '\000',
  '_', '_', 'n', 'v', 'v', 'm', '_', 'f', '2', 'i', '_', 'r', 'm', '_', 'f',
  't', 'z', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', '2', 'i', '_', 'r',
  'n', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', '2', 'i', '_', 'r', 'n',
  '_', 'f', 't', 'z', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', '2', 'i',
  '_', 'r', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', '2', 'i', '_',
  'r', 'p', '_', 'f', 't', 'z', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f',
  '2', 'i', '_', 'r', 'z', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', '2',
  'i', '_', 'r', 'z', '_', 'f', 't', 'z', '\000', '_', '_', 'n', 'v', 'v', 'm',
  '_', 'f', '2', 'l', 'l', '_', 'r', 'm', '\000', '_', '_', 'n', 'v', 'v', 'm',
  '_', 'f', '2', 'l', 'l', '_', 'r', 'm', '_', 'f', 't', 'z', '\000', '_', '_',
  'n', 'v', 'v', 'm', '_', 'f', '2', 'l', 'l', '_', 'r', 'n', '\000', '_', '_',
  'n', 'v', 'v', 'm', '_', 'f', '2', 'l', 'l', '_', 'r', 'n', '_', 'f', 't',
  'z', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', '2', 'l', 'l', '_', 'r',
  'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', '2', 'l', 'l', '_', 'r',
  'p', '_', 'f', 't', 'z', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', '2',
  'l', 'l', '_', 'r', 'z', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', '2',
  'l', 'l', '_', 'r', 'z', '_', 'f', 't', 'z', '\000', '_', '_', 'n', 'v', 'v',
  'm', '_', 'f', '2', 't', 'f', '3', '2', '_', 'r', 'n', 'a', '\000', '_', '_',
  'n', 'v', 'v', 'm', '_', 'f', '2', 'u', 'i', '_', 'r', 'm', '\000', '_', '_',
  'n', 'v', 'v', 'm', '_', 'f', '2', 'u', 'i', '_', 'r', 'm', '_', 'f', 't',
  'z', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', '2', 'u', 'i', '_', 'r',
  'n', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', '2', 'u', 'i', '_', 'r',
  'n', '_', 'f', 't', 'z', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', '2',
  'u', 'i', '_', 'r', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', '2',
  'u', 'i', '_', 'r', 'p', '_', 'f', 't', 'z', '\000', '_', '_', 'n', 'v', 'v',
  'm', '_', 'f', '2', 'u', 'i', '_', 'r', 'z', '\000', '_', '_', 'n', 'v', 'v',
  'm', '_', 'f', '2', 'u', 'i', '_', 'r', 'z', '_', 'f', 't', 'z', '\000', '_',
  '_', 'n', 'v', 'v', 'm', '_', 'f', '2', 'u', 'l', 'l', '_', 'r', 'm', '\000',
  '_', '_', 'n', 'v', 'v', 'm', '_', 'f', '2', 'u', 'l', 'l', '_', 'r', 'm',
  '_', 'f', 't', 'z', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', '2', 'u',
  'l', 'l', '_', 'r', 'n', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', '2',
  'u', 'l', 'l', '_', 'r', 'n', '_', 'f', 't', 'z', '\000', '_', '_', 'n', 'v',
  'v', 'm', '_', 'f', '2', 'u', 'l', 'l', '_', 'r', 'p', '\000', '_', '_', 'n',
  'v', 'v', 'm', '_', 'f', '2', 'u', 'l', 'l', '_', 'r', 'p', '_', 'f', 't',
  'z', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', '2', 'u', 'l', 'l', '_',
  'r', 'z', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', '2', 'u', 'l', 'l',
  '_', 'r', 'z', '_', 'f', 't', 'z', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
  'f', 'a', 'b', 's', '_', 'd', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f',
  'a', 'b', 's', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', 'a',
  'b', 's', '_', 'f', 't', 'z', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm',
  '_', 'f', 'f', '2', 'b', 'f', '1', '6', 'x', '2', '_', 'r', 'n', '\000', '_',
  '_', 'n', 'v', 'v', 'm', '_', 'f', 'f', '2', 'b', 'f', '1', '6', 'x', '2',
  '_', 'r', 'n', '_', 'r', 'e', 'l', 'u', '\000', '_', '_', 'n', 'v', 'v', 'm',
  '_', 'f', 'f', '2', 'b', 'f', '1', '6', 'x', '2', '_', 'r', 'z', '\000', '_',
  '_', 'n', 'v', 'v', 'm', '_', 'f', 'f', '2', 'b', 'f', '1', '6', 'x', '2',
  '_', 'r', 'z', '_', 'r', 'e', 'l', 'u', '\000', '_', '_', 'n', 'v', 'v', 'm',
  '_', 'f', 'f', '2', 'f', '1', '6', 'x', '2', '_', 'r', 'n', '\000', '_', '_',
  'n', 'v', 'v', 'm', '_', 'f', 'f', '2', 'f', '1', '6', 'x', '2', '_', 'r',
  'n', '_', 'r', 'e', 'l', 'u', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f',
  'f', '2', 'f', '1', '6', 'x', '2', '_', 'r', 'z', '\000', '_', '_', 'n', 'v',
  'v', 'm', '_', 'f', 'f', '2', 'f', '1', '6', 'x', '2', '_', 'r', 'z', '_',
  'r', 'e', 'l', 'u', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', 'l', 'o',
  'o', 'r', '_', 'd', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', 'l', 'o',
  'o', 'r', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', 'l', 'o',
  'o', 'r', '_', 'f', 't', 'z', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm',
  '_', 'f', 'm', 'a', '_', 'r', 'm', '_', 'd', '\000', '_', '_', 'n', 'v', 'v',
  'm', '_', 'f', 'm', 'a', '_', 'r', 'm', '_', 'f', '\000', '_', '_', 'n', 'v',
  'v', 'm', '_', 'f', 'm', 'a', '_', 'r', 'm', '_', 'f', 't', 'z', '_', 'f',
  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', 'm', 'a', '_', 'r', 'n', '_',
  'b', 'f', '1', '6', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', 'm', 'a',
  '_', 'r', 'n', '_', 'b', 'f', '1', '6', 'x', '2', '\000', '_', '_', 'n', 'v',
  'v', 'm', '_', 'f', 'm', 'a', '_', 'r', 'n', '_', 'd', '\000', '_', '_', 'n',
  'v', 'v', 'm', '_', 'f', 'm', 'a', '_', 'r', 'n', '_', 'f', '\000', '_', '_',
  'n', 'v', 'v', 'm', '_', 'f', 'm', 'a', '_', 'r', 'n', '_', 'f', 't', 'z',
  '_', 'b', 'f', '1', '6', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', 'm',
  'a', '_', 'r', 'n', '_', 'f', 't', 'z', '_', 'b', 'f', '1', '6', 'x', '2',
  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', 'm', 'a', '_', 'r', 'n', '_',
  'f', 't', 'z', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', 'm',
  'a', '_', 'r', 'n', '_', 'f', 't', 'z', '_', 'r', 'e', 'l', 'u', '_', 'b',
  'f', '1', '6', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', 'm', 'a', '_',
  'r', 'n', '_', 'f', 't', 'z', '_', 'r', 'e', 'l', 'u', '_', 'b', 'f', '1',
  '6', 'x', '2', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', 'm', 'a', '_',
  'r', 'n', '_', 'f', 't', 'z', '_', 's', 'a', 't', '_', 'b', 'f', '1', '6',
  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', 'm', 'a', '_', 'r', 'n', '_',
  'f', 't', 'z', '_', 's', 'a', 't', '_', 'b', 'f', '1', '6', 'x', '2', '\000',
  '_', '_', 'n', 'v', 'v', 'm', '_', 'f', 'm', 'a', '_', 'r', 'n', '_', 'r',
  'e', 'l', 'u', '_', 'b', 'f', '1', '6', '\000', '_', '_', 'n', 'v', 'v', 'm',
  '_', 'f', 'm', 'a', '_', 'r', 'n', '_', 'r', 'e', 'l', 'u', '_', 'b', 'f',
  '1', '6', 'x', '2', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', 'm', 'a',
  '_', 'r', 'n', '_', 's', 'a', 't', '_', 'b', 'f', '1', '6', '\000', '_', '_',
  'n', 'v', 'v', 'm', '_', 'f', 'm', 'a', '_', 'r', 'n', '_', 's', 'a', 't',
  '_', 'b', 'f', '1', '6', 'x', '2', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
  'f', 'm', 'a', '_', 'r', 'p', '_', 'd', '\000', '_', '_', 'n', 'v', 'v', 'm',
  '_', 'f', 'm', 'a', '_', 'r', 'p', '_', 'f', '\000', '_', '_', 'n', 'v', 'v',
  'm', '_', 'f', 'm', 'a', '_', 'r', 'p', '_', 'f', 't', 'z', '_', 'f', '\000',
  '_', '_', 'n', 'v', 'v', 'm', '_', 'f', 'm', 'a', '_', 'r', 'z', '_', 'd',
  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', 'm', 'a', '_', 'r', 'z', '_',
  'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', 'm', 'a', '_', 'r', 'z',
  '_', 'f', 't', 'z', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f',
  'm', 'a', 'x', '_', 'b', 'f', '1', '6', '\000', '_', '_', 'n', 'v', 'v', 'm',
  '_', 'f', 'm', 'a', 'x', '_', 'b', 'f', '1', '6', 'x', '2', '\000', '_', '_',
  'n', 'v', 'v', 'm', '_', 'f', 'm', 'a', 'x', '_', 'd', '\000', '_', '_', 'n',
  'v', 'v', 'm', '_', 'f', 'm', 'a', 'x', '_', 'f', '\000', '_', '_', 'n', 'v',
  'v', 'm', '_', 'f', 'm', 'a', 'x', '_', 'f', 't', 'z', '_', 'b', 'f', '1',
  '6', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', 'm', 'a', 'x', '_', 'f',
  't', 'z', '_', 'b', 'f', '1', '6', 'x', '2', '\000', '_', '_', 'n', 'v', 'v',
  'm', '_', 'f', 'm', 'a', 'x', '_', 'f', 't', 'z', '_', 'f', '\000', '_', '_',
  'n', 'v', 'v', 'm', '_', 'f', 'm', 'a', 'x', '_', 'f', 't', 'z', '_', 'n',
  'a', 'n', '_', 'b', 'f', '1', '6', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
  'f', 'm', 'a', 'x', '_', 'f', 't', 'z', '_', 'n', 'a', 'n', '_', 'b', 'f',
  '1', '6', 'x', '2', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', 'm', 'a',
  'x', '_', 'f', 't', 'z', '_', 'n', 'a', 'n', '_', 'f', '\000', '_', '_', 'n',
  'v', 'v', 'm', '_', 'f', 'm', 'a', 'x', '_', 'f', 't', 'z', '_', 'n', 'a',
  'n', '_', 'x', 'o', 'r', 's', 'i', 'g', 'n', '_', 'a', 'b', 's', '_', 'b',
  'f', '1', '6', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', 'm', 'a', 'x',
  '_', 'f', 't', 'z', '_', 'n', 'a', 'n', '_', 'x', 'o', 'r', 's', 'i', 'g',
  'n', '_', 'a', 'b', 's', '_', 'b', 'f', '1', '6', 'x', '2', '\000', '_', '_',
  'n', 'v', 'v', 'm', '_', 'f', 'm', 'a', 'x', '_', 'f', 't', 'z', '_', 'n',
  'a', 'n', '_', 'x', 'o', 'r', 's', 'i', 'g', 'n', '_', 'a', 'b', 's', '_',
  'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', 'm', 'a', 'x', '_', 'f',
  't', 'z', '_', 'x', 'o', 'r', 's', 'i', 'g', 'n', '_', 'a', 'b', 's', '_',
  'b', 'f', '1', '6', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', 'm', 'a',
  'x', '_', 'f', 't', 'z', '_', 'x', 'o', 'r', 's', 'i', 'g', 'n', '_', 'a',
  'b', 's', '_', 'b', 'f', '1', '6', 'x', '2', '\000', '_', '_', 'n', 'v', 'v',
  'm', '_', 'f', 'm', 'a', 'x', '_', 'f', 't', 'z', '_', 'x', 'o', 'r', 's',
  'i', 'g', 'n', '_', 'a', 'b', 's', '_', 'f', '\000', '_', '_', 'n', 'v', 'v',
  'm', '_', 'f', 'm', 'a', 'x', '_', 'n', 'a', 'n', '_', 'b', 'f', '1', '6',
  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', 'm', 'a', 'x', '_', 'n', 'a',
  'n', '_', 'b', 'f', '1', '6', 'x', '2', '\000', '_', '_', 'n', 'v', 'v', 'm',
  '_', 'f', 'm', 'a', 'x', '_', 'n', 'a', 'n', '_', 'f', '\000', '_', '_', 'n',
  'v', 'v', 'm', '_', 'f', 'm', 'a', 'x', '_', 'n', 'a', 'n', '_', 'x', 'o',
  'r', 's', 'i', 'g', 'n', '_', 'a', 'b', 's', '_', 'b', 'f', '1', '6', '\000',
  '_', '_', 'n', 'v', 'v', 'm', '_', 'f', 'm', 'a', 'x', '_', 'n', 'a', 'n',
  '_', 'x', 'o', 'r', 's', 'i', 'g', 'n', '_', 'a', 'b', 's', '_', 'b', 'f',
  '1', '6', 'x', '2', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', 'm', 'a',
  'x', '_', 'n', 'a', 'n', '_', 'x', 'o', 'r', 's', 'i', 'g', 'n', '_', 'a',
  'b', 's', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', 'm', 'a',
  'x', '_', 'x', 'o', 'r', 's', 'i', 'g', 'n', '_', 'a', 'b', 's', '_', 'b',
  'f', '1', '6', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', 'm', 'a', 'x',
  '_', 'x', 'o', 'r', 's', 'i', 'g', 'n', '_', 'a', 'b', 's', '_', 'b', 'f',
  '1', '6', 'x', '2', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', 'm', 'a',
  'x', '_', 'x', 'o', 'r', 's', 'i', 'g', 'n', '_', 'a', 'b', 's', '_', 'f',
  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', 'm', 'i', 'n', '_', 'b', 'f',
  '1', '6', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', 'm', 'i', 'n', '_',
  'b', 'f', '1', '6', 'x', '2', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f',
  'm', 'i', 'n', '_', 'd', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', 'm',
  'i', 'n', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', 'm', 'i',
  'n', '_', 'f', 't', 'z', '_', 'b', 'f', '1', '6', '\000', '_', '_', 'n', 'v',
  'v', 'm', '_', 'f', 'm', 'i', 'n', '_', 'f', 't', 'z', '_', 'b', 'f', '1',
  '6', 'x', '2', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', 'm', 'i', 'n',
  '_', 'f', 't', 'z', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f',
  'm', 'i', 'n', '_', 'f', 't', 'z', '_', 'n', 'a', 'n', '_', 'b', 'f', '1',
  '6', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', 'm', 'i', 'n', '_', 'f',
  't', 'z', '_', 'n', 'a', 'n', '_', 'b', 'f', '1', '6', 'x', '2', '\000', '_',
  '_', 'n', 'v', 'v', 'm', '_', 'f', 'm', 'i', 'n', '_', 'f', 't', 'z', '_',
  'n', 'a', 'n', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', 'm',
  'i', 'n', '_', 'f', 't', 'z', '_', 'n', 'a', 'n', '_', 'x', 'o', 'r', 's',
  'i', 'g', 'n', '_', 'a', 'b', 's', '_', 'b', 'f', '1', '6', '\000', '_', '_',
  'n', 'v', 'v', 'm', '_', 'f', 'm', 'i', 'n', '_', 'f', 't', 'z', '_', 'n',
  'a', 'n', '_', 'x', 'o', 'r', 's', 'i', 'g', 'n', '_', 'a', 'b', 's', '_',
  'b', 'f', '1', '6', 'x', '2', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f',
  'm', 'i', 'n', '_', 'f', 't', 'z', '_', 'n', 'a', 'n', '_', 'x', 'o', 'r',
  's', 'i', 'g', 'n', '_', 'a', 'b', 's', '_', 'f', '\000', '_', '_', 'n', 'v',
  'v', 'm', '_', 'f', 'm', 'i', 'n', '_', 'f', 't', 'z', '_', 'x', 'o', 'r',
  's', 'i', 'g', 'n', '_', 'a', 'b', 's', '_', 'b', 'f', '1', '6', '\000', '_',
  '_', 'n', 'v', 'v', 'm', '_', 'f', 'm', 'i', 'n', '_', 'f', 't', 'z', '_',
  'x', 'o', 'r', 's', 'i', 'g', 'n', '_', 'a', 'b', 's', '_', 'b', 'f', '1',
  '6', 'x', '2', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', 'm', 'i', 'n',
  '_', 'f', 't', 'z', '_', 'x', 'o', 'r', 's', 'i', 'g', 'n', '_', 'a', 'b',
  's', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', 'm', 'i', 'n',
  '_', 'n', 'a', 'n', '_', 'b', 'f', '1', '6', '\000', '_', '_', 'n', 'v', 'v',
  'm', '_', 'f', 'm', 'i', 'n', '_', 'n', 'a', 'n', '_', 'b', 'f', '1', '6',
  'x', '2', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', 'm', 'i', 'n', '_',
  'n', 'a', 'n', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'f', 'm',
  'i', 'n', '_', 'n', 'a', 'n', '_', 'x', 'o', 'r', 's', 'i', 'g', 'n', '_',
  'a', 'b', 's', '_', 'b', 'f', '1', '6', '\000', '_', '_', 'n', 'v', 'v', 'm',
  '_', 'f', 'm', 'i', 'n', '_', 'n', 'a', 'n', '_', 'x', 'o', 'r', 's', 'i',
  'g', 'n', '_', 'a', 'b', 's', '_', 'b', 'f', '1', '6', 'x', '2', '\000', '_',
  '_', 'n', 'v', 'v', 'm', '_', 'f', 'm', 'i', 'n', '_', 'n', 'a', 'n', '_',
  'x', 'o', 'r', 's', 'i', 'g', 'n', '_', 'a', 'b', 's', '_', 'f', '\000', '_',
  '_', 'n', 'v', 'v', 'm', '_', 'f', 'm', 'i', 'n', '_', 'x', 'o', 'r', 's',
  'i', 'g', 'n', '_', 'a', 'b', 's', '_', 'b', 'f', '1', '6', '\000', '_', '_',
  'n', 'v', 'v', 'm', '_', 'f', 'm', 'i', 'n', '_', 'x', 'o', 'r', 's', 'i',
  'g', 'n', '_', 'a', 'b', 's', '_', 'b', 'f', '1', '6', 'x', '2', '\000', '_',
  '_', 'n', 'v', 'v', 'm', '_', 'f', 'm', 'i', 'n', '_', 'x', 'o', 'r', 's',
  'i', 'g', 'n', '_', 'a', 'b', 's', '_', 'f', '\000', '_', '_', 'n', 'v', 'v',
  'm', '_', 'f', 'n', 's', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'i', '2',
  'd', '_', 'r', 'm', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'i', '2', 'd',
  '_', 'r', 'n', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'i', '2', 'd', '_',
  'r', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'i', '2', 'd', '_', 'r',
  'z', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'i', '2', 'f', '_', 'r', 'm',
  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'i', '2', 'f', '_', 'r', 'n', '\000',
  '_', '_', 'n', 'v', 'v', 'm', '_', 'i', '2', 'f', '_', 'r', 'p', '\000', '_',
  '_', 'n', 'v', 'v', 'm', '_', 'i', '2', 'f', '_', 'r', 'z', '\000', '_', '_',
  'n', 'v', 'v', 'm', '_', 'i', 's', 's', 'p', 'a', 'c', 'e', 'p', '_', 'c',
  'o', 'n', 's', 't', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'i', 's', 's',
  'p', 'a', 'c', 'e', 'p', '_', 'g', 'l', 'o', 'b', 'a', 'l', '\000', '_', '_',
  'n', 'v', 'v', 'm', '_', 'i', 's', 's', 'p', 'a', 'c', 'e', 'p', '_', 'l',
  'o', 'c', 'a', 'l', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'i', 's', 's',
  'p', 'a', 'c', 'e', 'p', '_', 's', 'h', 'a', 'r', 'e', 'd', '\000', '_', '_',
  'n', 'v', 'v', 'm', '_', 'i', 's', 't', 'y', 'p', 'e', 'p', '_', 's', 'a',
  'm', 'p', 'l', 'e', 'r', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'i', 's',
  't', 'y', 'p', 'e', 'p', '_', 's', 'u', 'r', 'f', 'a', 'c', 'e', '\000', '_',
  '_', 'n', 'v', 'v', 'm', '_', 'i', 's', 't', 'y', 'p', 'e', 'p', '_', 't',
  'e', 'x', 't', 'u', 'r', 'e', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'l',
  'g', '2', '_', 'a', 'p', 'p', 'r', 'o', 'x', '_', 'd', '\000', '_', '_', 'n',
  'v', 'v', 'm', '_', 'l', 'g', '2', '_', 'a', 'p', 'p', 'r', 'o', 'x', '_',
  'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'l', 'g', '2', '_', 'a', 'p',
  'p', 'r', 'o', 'x', '_', 'f', 't', 'z', '_', 'f', '\000', '_', '_', 'n', 'v',
  'v', 'm', '_', 'l', 'l', '2', 'd', '_', 'r', 'm', '\000', '_', '_', 'n', 'v',
  'v', 'm', '_', 'l', 'l', '2', 'd', '_', 'r', 'n', '\000', '_', '_', 'n', 'v',
  'v', 'm', '_', 'l', 'l', '2', 'd', '_', 'r', 'p', '\000', '_', '_', 'n', 'v',
  'v', 'm', '_', 'l', 'l', '2', 'd', '_', 'r', 'z', '\000', '_', '_', 'n', 'v',
  'v', 'm', '_', 'l', 'l', '2', 'f', '_', 'r', 'm', '\000', '_', '_', 'n', 'v',
  'v', 'm', '_', 'l', 'l', '2', 'f', '_', 'r', 'n', '\000', '_', '_', 'n', 'v',
  'v', 'm', '_', 'l', 'l', '2', 'f', '_', 'r', 'p', '\000', '_', '_', 'n', 'v',
  'v', 'm', '_', 'l', 'l', '2', 'f', '_', 'r', 'z', '\000', '_', '_', 'n', 'v',
  'v', 'm', '_', 'l', 'o', 'h', 'i', '_', 'i', '2', 'd', '\000', '_', '_', 'n',
  'v', 'v', 'm', '_', 'm', 'a', 't', 'c', 'h', '_', 'a', 'n', 'y', '_', 's',
  'y', 'n', 'c', '_', 'i', '3', '2', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
  'm', 'a', 't', 'c', 'h', '_', 'a', 'n', 'y', '_', 's', 'y', 'n', 'c', '_',
  'i', '6', '4', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'm', 'b', 'a', 'r',
  'r', 'i', 'e', 'r', '_', 'a', 'r', 'r', 'i', 'v', 'e', '\000', '_', '_', 'n',
  'v', 'v', 'm', '_', 'm', 'b', 'a', 'r', 'r', 'i', 'e', 'r', '_', 'a', 'r',
  'r', 'i', 'v', 'e', '_', 'd', 'r', 'o', 'p', '\000', '_', '_', 'n', 'v', 'v',
  'm', '_', 'm', 'b', 'a', 'r', 'r', 'i', 'e', 'r', '_', 'a', 'r', 'r', 'i',
  'v', 'e', '_', 'd', 'r', 'o', 'p', '_', 'n', 'o', 'C', 'o', 'm', 'p', 'l',
  'e', 't', 'e', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'm', 'b', 'a', 'r',
  'r', 'i', 'e', 'r', '_', 'a', 'r', 'r', 'i', 'v', 'e', '_', 'd', 'r', 'o',
  'p', '_', 'n', 'o', 'C', 'o', 'm', 'p', 'l', 'e', 't', 'e', '_', 's', 'h',
  'a', 'r', 'e', 'd', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'm', 'b', 'a',
  'r', 'r', 'i', 'e', 'r', '_', 'a', 'r', 'r', 'i', 'v', 'e', '_', 'd', 'r',
  'o', 'p', '_', 's', 'h', 'a', 'r', 'e', 'd', '\000', '_', '_', 'n', 'v', 'v',
  'm', '_', 'm', 'b', 'a', 'r', 'r', 'i', 'e', 'r', '_', 'a', 'r', 'r', 'i',
  'v', 'e', '_', 'n', 'o', 'C', 'o', 'm', 'p', 'l', 'e', 't', 'e', '\000', '_',
  '_', 'n', 'v', 'v', 'm', '_', 'm', 'b', 'a', 'r', 'r', 'i', 'e', 'r', '_',
  'a', 'r', 'r', 'i', 'v', 'e', '_', 'n', 'o', 'C', 'o', 'm', 'p', 'l', 'e',
  't', 'e', '_', 's', 'h', 'a', 'r', 'e', 'd', '\000', '_', '_', 'n', 'v', 'v',
  'm', '_', 'm', 'b', 'a', 'r', 'r', 'i', 'e', 'r', '_', 'a', 'r', 'r', 'i',
  'v', 'e', '_', 's', 'h', 'a', 'r', 'e', 'd', '\000', '_', '_', 'n', 'v', 'v',
  'm', '_', 'm', 'b', 'a', 'r', 'r', 'i', 'e', 'r', '_', 'i', 'n', 'i', 't',
  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'm', 'b', 'a', 'r', 'r', 'i', 'e',
  'r', '_', 'i', 'n', 'i', 't', '_', 's', 'h', 'a', 'r', 'e', 'd', '\000', '_',
  '_', 'n', 'v', 'v', 'm', '_', 'm', 'b', 'a', 'r', 'r', 'i', 'e', 'r', '_',
  'i', 'n', 'v', 'a', 'l', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'm', 'b',
  'a', 'r', 'r', 'i', 'e', 'r', '_', 'i', 'n', 'v', 'a', 'l', '_', 's', 'h',
  'a', 'r', 'e', 'd', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'm', 'b', 'a',
  'r', 'r', 'i', 'e', 'r', '_', 'p', 'e', 'n', 'd', 'i', 'n', 'g', '_', 'c',
  'o', 'u', 'n', 't', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'm', 'b', 'a',
  'r', 'r', 'i', 'e', 'r', '_', 't', 'e', 's', 't', '_', 'w', 'a', 'i', 't',
  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'm', 'b', 'a', 'r', 'r', 'i', 'e',
  'r', '_', 't', 'e', 's', 't', '_', 'w', 'a', 'i', 't', '_', 's', 'h', 'a',
  'r', 'e', 'd', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'm', 'e', 'm', 'b',
  'a', 'r', '_', 'c', 't', 'a', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'm',
  'e', 'm', 'b', 'a', 'r', '_', 'g', 'l', '\000', '_', '_', 'n', 'v', 'v', 'm',
  '_', 'm', 'e', 'm', 'b', 'a', 'r', '_', 's', 'y', 's', '\000', '_', '_', 'n',
  'v', 'v', 'm', '_', 'm', 'u', 'l', '_', 'r', 'm', '_', 'd', '\000', '_', '_',
  'n', 'v', 'v', 'm', '_', 'm', 'u', 'l', '_', 'r', 'm', '_', 'f', '\000', '_',
  '_', 'n', 'v', 'v', 'm', '_', 'm', 'u', 'l', '_', 'r', 'm', '_', 'f', 't',
  'z', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'm', 'u', 'l', '_',
  'r', 'n', '_', 'd', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'm', 'u', 'l',
  '_', 'r', 'n', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'm', 'u',
  'l', '_', 'r', 'n', '_', 'f', 't', 'z', '_', 'f', '\000', '_', '_', 'n', 'v',
  'v', 'm', '_', 'm', 'u', 'l', '_', 'r', 'p', '_', 'd', '\000', '_', '_', 'n',
  'v', 'v', 'm', '_', 'm', 'u', 'l', '_', 'r', 'p', '_', 'f', '\000', '_', '_',
  'n', 'v', 'v', 'm', '_', 'm', 'u', 'l', '_', 'r', 'p', '_', 'f', 't', 'z',
  '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'm', 'u', 'l', '_', 'r',
  'z', '_', 'd', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'm', 'u', 'l', '_',
  'r', 'z', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'm', 'u', 'l',
  '_', 'r', 'z', '_', 'f', 't', 'z', '_', 'f', '\000', '_', '_', 'n', 'v', 'v',
  'm', '_', 'm', 'u', 'l', '2', '4', '_', 'i', '\000', '_', '_', 'n', 'v', 'v',
  'm', '_', 'm', 'u', 'l', '2', '4', '_', 'u', 'i', '\000', '_', '_', 'n', 'v',
  'v', 'm', '_', 'm', 'u', 'l', 'h', 'i', '_', 'i', '\000', '_', '_', 'n', 'v',
  'v', 'm', '_', 'm', 'u', 'l', 'h', 'i', '_', 'l', 'l', '\000', '_', '_', 'n',
  'v', 'v', 'm', '_', 'm', 'u', 'l', 'h', 'i', '_', 'u', 'i', '\000', '_', '_',
  'n', 'v', 'v', 'm', '_', 'm', 'u', 'l', 'h', 'i', '_', 'u', 'l', 'l', '\000',
  '_', '_', 'n', 'v', 'v', 'm', '_', 'n', 'e', 'g', '_', 'b', 'f', '1', '6',
  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'n', 'e', 'g', '_', 'b', 'f', '1',
  '6', 'x', '2', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'p', 'r', 'm', 't',
  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'c', 'p', '_', 'a', 'p', 'p',
  'r', 'o', 'x', '_', 'f', 't', 'z', '_', 'd', '\000', '_', '_', 'n', 'v', 'v',
  'm', '_', 'r', 'c', 'p', '_', 'a', 'p', 'p', 'r', 'o', 'x', '_', 'f', 't',
  'z', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'c', 'p', '_',
  'r', 'm', '_', 'd', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'c', 'p',
  '_', 'r', 'm', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'c',
  'p', '_', 'r', 'm', '_', 'f', 't', 'z', '_', 'f', '\000', '_', '_', 'n', 'v',
  'v', 'm', '_', 'r', 'c', 'p', '_', 'r', 'n', '_', 'd', '\000', '_', '_', 'n',
  'v', 'v', 'm', '_', 'r', 'c', 'p', '_', 'r', 'n', '_', 'f', '\000', '_', '_',
  'n', 'v', 'v', 'm', '_', 'r', 'c', 'p', '_', 'r', 'n', '_', 'f', 't', 'z',
  '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'c', 'p', '_', 'r',
  'p', '_', 'd', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'c', 'p', '_',
  'r', 'p', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'c', 'p',
  '_', 'r', 'p', '_', 'f', 't', 'z', '_', 'f', '\000', '_', '_', 'n', 'v', 'v',
  'm', '_', 'r', 'c', 'p', '_', 'r', 'z', '_', 'd', '\000', '_', '_', 'n', 'v',
  'v', 'm', '_', 'r', 'c', 'p', '_', 'r', 'z', '_', 'f', '\000', '_', '_', 'n',
  'v', 'v', 'm', '_', 'r', 'c', 'p', '_', 'r', 'z', '_', 'f', 't', 'z', '_',
  'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'e', 'a', 'd', '_', 'p',
  't', 'x', '_', 's', 'r', 'e', 'g', '_', 'c', 'l', 'o', 'c', 'k', '\000', '_',
  '_', 'n', 'v', 'v', 'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_',
  's', 'r', 'e', 'g', '_', 'c', 'l', 'o', 'c', 'k', '6', '4', '\000', '_', '_',
  'n', 'v', 'v', 'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's',
  'r', 'e', 'g', '_', 'c', 't', 'a', 'i', 'd', '_', 'w', '\000', '_', '_', 'n',
  'v', 'v', 'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r',
  'e', 'g', '_', 'c', 't', 'a', 'i', 'd', '_', 'x', '\000', '_', '_', 'n', 'v',
  'v', 'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e',
  'g', '_', 'c', 't', 'a', 'i', 'd', '_', 'y', '\000', '_', '_', 'n', 'v', 'v',
  'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g',
  '_', 'c', 't', 'a', 'i', 'd', '_', 'z', '\000', '_', '_', 'n', 'v', 'v', 'm',
  '_', 'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g', '_',
  'e', 'n', 'v', 'r', 'e', 'g', '0', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
  'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g', '_', 'e',
  'n', 'v', 'r', 'e', 'g', '1', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r',
  'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g', '_', 'e', 'n',
  'v', 'r', 'e', 'g', '1', '0', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r',
  'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g', '_', 'e', 'n',
  'v', 'r', 'e', 'g', '1', '1', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r',
  'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g', '_', 'e', 'n',
  'v', 'r', 'e', 'g', '1', '2', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r',
  'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g', '_', 'e', 'n',
  'v', 'r', 'e', 'g', '1', '3', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r',
  'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g', '_', 'e', 'n',
  'v', 'r', 'e', 'g', '1', '4', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r',
  'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g', '_', 'e', 'n',
  'v', 'r', 'e', 'g', '1', '5', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r',
  'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g', '_', 'e', 'n',
  'v', 'r', 'e', 'g', '1', '6', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r',
  'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g', '_', 'e', 'n',
  'v', 'r', 'e', 'g', '1', '7', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r',
  'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g', '_', 'e', 'n',
  'v', 'r', 'e', 'g', '1', '8', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r',
  'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g', '_', 'e', 'n',
  'v', 'r', 'e', 'g', '1', '9', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r',
  'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g', '_', 'e', 'n',
  'v', 'r', 'e', 'g', '2', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'e',
  'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g', '_', 'e', 'n', 'v',
  'r', 'e', 'g', '2', '0', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'e',
  'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g', '_', 'e', 'n', 'v',
  'r', 'e', 'g', '2', '1', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'e',
  'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g', '_', 'e', 'n', 'v',
  'r', 'e', 'g', '2', '2', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'e',
  'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g', '_', 'e', 'n', 'v',
  'r', 'e', 'g', '2', '3', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'e',
  'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g', '_', 'e', 'n', 'v',
  'r', 'e', 'g', '2', '4', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'e',
  'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g', '_', 'e', 'n', 'v',
  'r', 'e', 'g', '2', '5', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'e',
  'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g', '_', 'e', 'n', 'v',
  'r', 'e', 'g', '2', '6', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'e',
  'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g', '_', 'e', 'n', 'v',
  'r', 'e', 'g', '2', '7', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'e',
  'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g', '_', 'e', 'n', 'v',
  'r', 'e', 'g', '2', '8', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'e',
  'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g', '_', 'e', 'n', 'v',
  'r', 'e', 'g', '2', '9', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'e',
  'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g', '_', 'e', 'n', 'v',
  'r', 'e', 'g', '3', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'e', 'a',
  'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g', '_', 'e', 'n', 'v', 'r',
  'e', 'g', '3', '0', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'e', 'a',
  'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g', '_', 'e', 'n', 'v', 'r',
  'e', 'g', '3', '1', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'e', 'a',
  'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g', '_', 'e', 'n', 'v', 'r',
  'e', 'g', '4', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'e', 'a', 'd',
  '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g', '_', 'e', 'n', 'v', 'r', 'e',
  'g', '5', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'e', 'a', 'd', '_',
  'p', 't', 'x', '_', 's', 'r', 'e', 'g', '_', 'e', 'n', 'v', 'r', 'e', 'g',
  '6', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'e', 'a', 'd', '_', 'p',
  't', 'x', '_', 's', 'r', 'e', 'g', '_', 'e', 'n', 'v', 'r', 'e', 'g', '7',
  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't',
  'x', '_', 's', 'r', 'e', 'g', '_', 'e', 'n', 'v', 'r', 'e', 'g', '8', '\000',
  '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't', 'x',
  '_', 's', 'r', 'e', 'g', '_', 'e', 'n', 'v', 'r', 'e', 'g', '9', '\000', '_',
  '_', 'n', 'v', 'v', 'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_',
  's', 'r', 'e', 'g', '_', 'g', 'r', 'i', 'd', 'i', 'd', '\000', '_', '_', 'n',
  'v', 'v', 'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r',
  'e', 'g', '_', 'l', 'a', 'n', 'e', 'i', 'd', '\000', '_', '_', 'n', 'v', 'v',
  'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g',
  '_', 'l', 'a', 'n', 'e', 'm', 'a', 's', 'k', '_', 'e', 'q', '\000', '_', '_',
  'n', 'v', 'v', 'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's',
  'r', 'e', 'g', '_', 'l', 'a', 'n', 'e', 'm', 'a', 's', 'k', '_', 'g', 'e',
  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't',
  'x', '_', 's', 'r', 'e', 'g', '_', 'l', 'a', 'n', 'e', 'm', 'a', 's', 'k',
  '_', 'g', 't', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'e', 'a', 'd',
  '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g', '_', 'l', 'a', 'n', 'e', 'm',
  'a', 's', 'k', '_', 'l', 'e', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r',
  'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g', '_', 'l', 'a',
  'n', 'e', 'm', 'a', 's', 'k', '_', 'l', 't', '\000', '_', '_', 'n', 'v', 'v',
  'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g',
  '_', 'n', 'c', 't', 'a', 'i', 'd', '_', 'w', '\000', '_', '_', 'n', 'v', 'v',
  'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g',
  '_', 'n', 'c', 't', 'a', 'i', 'd', '_', 'x', '\000', '_', '_', 'n', 'v', 'v',
  'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g',
  '_', 'n', 'c', 't', 'a', 'i', 'd', '_', 'y', '\000', '_', '_', 'n', 'v', 'v',
  'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g',
  '_', 'n', 'c', 't', 'a', 'i', 'd', '_', 'z', '\000', '_', '_', 'n', 'v', 'v',
  'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g',
  '_', 'n', 's', 'm', 'i', 'd', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r',
  'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g', '_', 'n', 't',
  'i', 'd', '_', 'w', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'e', 'a',
  'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g', '_', 'n', 't', 'i', 'd',
  '_', 'x', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'e', 'a', 'd', '_',
  'p', 't', 'x', '_', 's', 'r', 'e', 'g', '_', 'n', 't', 'i', 'd', '_', 'y',
  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't',
  'x', '_', 's', 'r', 'e', 'g', '_', 'n', 't', 'i', 'd', '_', 'z', '\000', '_',
  '_', 'n', 'v', 'v', 'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_',
  's', 'r', 'e', 'g', '_', 'n', 'w', 'a', 'r', 'p', 'i', 'd', '\000', '_', '_',
  'n', 'v', 'v', 'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's',
  'r', 'e', 'g', '_', 'p', 'm', '0', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
  'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g', '_', 'p',
  'm', '1', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'e', 'a', 'd', '_',
  'p', 't', 'x', '_', 's', 'r', 'e', 'g', '_', 'p', 'm', '2', '\000', '_', '_',
  'n', 'v', 'v', 'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's',
  'r', 'e', 'g', '_', 'p', 'm', '3', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
  'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g', '_', 's',
  'm', 'i', 'd', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'e', 'a', 'd',
  '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g', '_', 't', 'i', 'd', '_', 'w',
  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't',
  'x', '_', 's', 'r', 'e', 'g', '_', 't', 'i', 'd', '_', 'x', '\000', '_', '_',
  'n', 'v', 'v', 'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's',
  'r', 'e', 'g', '_', 't', 'i', 'd', '_', 'y', '\000', '_', '_', 'n', 'v', 'v',
  'm', '_', 'r', 'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g',
  '_', 't', 'i', 'd', '_', 'z', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r',
  'e', 'a', 'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g', '_', 'w', 'a',
  'r', 'p', 'i', 'd', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'e', 'a',
  'd', '_', 'p', 't', 'x', '_', 's', 'r', 'e', 'g', '_', 'w', 'a', 'r', 'p',
  's', 'i', 'z', 'e', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'e', 'd',
  'u', 'x', '_', 's', 'y', 'n', 'c', '_', 'a', 'd', 'd', '\000', '_', '_', 'n',
  'v', 'v', 'm', '_', 'r', 'e', 'd', 'u', 'x', '_', 's', 'y', 'n', 'c', '_',
  'a', 'n', 'd', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'e', 'd', 'u',
  'x', '_', 's', 'y', 'n', 'c', '_', 'm', 'a', 'x', '\000', '_', '_', 'n', 'v',
  'v', 'm', '_', 'r', 'e', 'd', 'u', 'x', '_', 's', 'y', 'n', 'c', '_', 'm',
  'i', 'n', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'e', 'd', 'u', 'x',
  '_', 's', 'y', 'n', 'c', '_', 'o', 'r', '\000', '_', '_', 'n', 'v', 'v', 'm',
  '_', 'r', 'e', 'd', 'u', 'x', '_', 's', 'y', 'n', 'c', '_', 'u', 'm', 'a',
  'x', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'e', 'd', 'u', 'x', '_',
  's', 'y', 'n', 'c', '_', 'u', 'm', 'i', 'n', '\000', '_', '_', 'n', 'v', 'v',
  'm', '_', 'r', 'e', 'd', 'u', 'x', '_', 's', 'y', 'n', 'c', '_', 'x', 'o',
  'r', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'o', 't', 'a', 't', 'e',
  '_', 'b', '3', '2', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'o', 't',
  'a', 't', 'e', '_', 'b', '6', '4', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
  'r', 'o', 't', 'a', 't', 'e', '_', 'r', 'i', 'g', 'h', 't', '_', 'b', '6',
  '4', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'o', 'u', 'n', 'd', '_',
  'd', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'o', 'u', 'n', 'd', '_',
  'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 'o', 'u', 'n', 'd', '_',
  'f', 't', 'z', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 's',
  'q', 'r', 't', '_', 'a', 'p', 'p', 'r', 'o', 'x', '_', 'd', '\000', '_', '_',
  'n', 'v', 'v', 'm', '_', 'r', 's', 'q', 'r', 't', '_', 'a', 'p', 'p', 'r',
  'o', 'x', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'r', 's', 'q',
  'r', 't', '_', 'a', 'p', 'p', 'r', 'o', 'x', '_', 'f', 't', 'z', '_', 'f',
  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'a', 'd', '_', 'i', '\000', '_',
  '_', 'n', 'v', 'v', 'm', '_', 's', 'a', 'd', '_', 'u', 'i', '\000', '_', '_',
  'n', 'v', 'v', 'm', '_', 's', 'a', 't', 'u', 'r', 'a', 't', 'e', '_', 'd',
  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'a', 't', 'u', 'r', 'a', 't',
  'e', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'a', 't', 'u',
  'r', 'a', 't', 'e', '_', 'f', 't', 'z', '_', 'f', '\000', '_', '_', 'n', 'v',
  'v', 'm', '_', 's', 'h', 'f', 'l', '_', 'b', 'f', 'l', 'y', '_', 'f', '3',
  '2', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'h', 'f', 'l', '_', 'b',
  'f', 'l', 'y', '_', 'i', '3', '2', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
  's', 'h', 'f', 'l', '_', 'd', 'o', 'w', 'n', '_', 'f', '3', '2', '\000', '_',
  '_', 'n', 'v', 'v', 'm', '_', 's', 'h', 'f', 'l', '_', 'd', 'o', 'w', 'n',
  '_', 'i', '3', '2', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'h', 'f',
  'l', '_', 'i', 'd', 'x', '_', 'f', '3', '2', '\000', '_', '_', 'n', 'v', 'v',
  'm', '_', 's', 'h', 'f', 'l', '_', 'i', 'd', 'x', '_', 'i', '3', '2', '\000',
  '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'h', 'f', 'l', '_', 's', 'y', 'n',
  'c', '_', 'b', 'f', 'l', 'y', '_', 'f', '3', '2', '\000', '_', '_', 'n', 'v',
  'v', 'm', '_', 's', 'h', 'f', 'l', '_', 's', 'y', 'n', 'c', '_', 'b', 'f',
  'l', 'y', '_', 'i', '3', '2', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's',
  'h', 'f', 'l', '_', 's', 'y', 'n', 'c', '_', 'd', 'o', 'w', 'n', '_', 'f',
  '3', '2', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'h', 'f', 'l', '_',
  's', 'y', 'n', 'c', '_', 'd', 'o', 'w', 'n', '_', 'i', '3', '2', '\000', '_',
  '_', 'n', 'v', 'v', 'm', '_', 's', 'h', 'f', 'l', '_', 's', 'y', 'n', 'c',
  '_', 'i', 'd', 'x', '_', 'f', '3', '2', '\000', '_', '_', 'n', 'v', 'v', 'm',
  '_', 's', 'h', 'f', 'l', '_', 's', 'y', 'n', 'c', '_', 'i', 'd', 'x', '_',
  'i', '3', '2', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'h', 'f', 'l',
  '_', 's', 'y', 'n', 'c', '_', 'u', 'p', '_', 'f', '3', '2', '\000', '_', '_',
  'n', 'v', 'v', 'm', '_', 's', 'h', 'f', 'l', '_', 's', 'y', 'n', 'c', '_',
  'u', 'p', '_', 'i', '3', '2', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's',
  'h', 'f', 'l', '_', 'u', 'p', '_', 'f', '3', '2', '\000', '_', '_', 'n', 'v',
  'v', 'm', '_', 's', 'h', 'f', 'l', '_', 'u', 'p', '_', 'i', '3', '2', '\000',
  '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'i', 'n', '_', 'a', 'p', 'p', 'r',
  'o', 'x', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'i', 'n',
  '_', 'a', 'p', 'p', 'r', 'o', 'x', '_', 'f', 't', 'z', '_', 'f', '\000', '_',
  '_', 'n', 'v', 'v', 'm', '_', 's', 'q', 'r', 't', '_', 'a', 'p', 'p', 'r',
  'o', 'x', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'q', 'r',
  't', '_', 'a', 'p', 'p', 'r', 'o', 'x', '_', 'f', 't', 'z', '_', 'f', '\000',
  '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'q', 'r', 't', '_', 'f', '\000', '_',
  '_', 'n', 'v', 'v', 'm', '_', 's', 'q', 'r', 't', '_', 'r', 'm', '_', 'd',
  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'q', 'r', 't', '_', 'r', 'm',
  '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'q', 'r', 't', '_',
  'r', 'm', '_', 'f', 't', 'z', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm',
  '_', 's', 'q', 'r', 't', '_', 'r', 'n', '_', 'd', '\000', '_', '_', 'n', 'v',
  'v', 'm', '_', 's', 'q', 'r', 't', '_', 'r', 'n', '_', 'f', '\000', '_', '_',
  'n', 'v', 'v', 'm', '_', 's', 'q', 'r', 't', '_', 'r', 'n', '_', 'f', 't',
  'z', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'q', 'r', 't',
  '_', 'r', 'p', '_', 'd', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'q',
  'r', 't', '_', 'r', 'p', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
  's', 'q', 'r', 't', '_', 'r', 'p', '_', 'f', 't', 'z', '_', 'f', '\000', '_',
  '_', 'n', 'v', 'v', 'm', '_', 's', 'q', 'r', 't', '_', 'r', 'z', '_', 'd',
  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'q', 'r', 't', '_', 'r', 'z',
  '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'q', 'r', 't', '_',
  'r', 'z', '_', 'f', 't', 'z', '_', 'f', '\000', '_', '_', 'n', 'v', 'v', 'm',
  '_', 's', 'u', 'q', '_', 'a', 'r', 'r', 'a', 'y', '_', 's', 'i', 'z', 'e',
  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 'q', '_', 'c', 'h', 'a',
  'n', 'n', 'e', 'l', '_', 'd', 'a', 't', 'a', '_', 't', 'y', 'p', 'e', '\000',
  '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 'q', '_', 'c', 'h', 'a', 'n',
  'n', 'e', 'l', '_', 'o', 'r', 'd', 'e', 'r', '\000', '_', '_', 'n', 'v', 'v',
  'm', '_', 's', 'u', 'q', '_', 'd', 'e', 'p', 't', 'h', '\000', '_', '_', 'n',
  'v', 'v', 'm', '_', 's', 'u', 'q', '_', 'h', 'e', 'i', 'g', 'h', 't', '\000',
  '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 'q', '_', 'w', 'i', 'd', 't',
  'h', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b',
  '_', '1', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'i', '1', '6', '_', 'c',
  'l', 'a', 'm', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's',
  't', '_', 'b', '_', '1', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'i', '1',
  '6', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's',
  'u', 's', 't', '_', 'b', '_', '1', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_',
  'i', '1', '6', '_', 'z', 'e', 'r', 'o', '\000', '_', '_', 'n', 'v', 'v', 'm',
  '_', 's', 'u', 's', 't', '_', 'b', '_', '1', 'd', '_', 'a', 'r', 'r', 'a',
  'y', '_', 'i', '3', '2', '_', 'c', 'l', 'a', 'm', 'p', '\000', '_', '_', 'n',
  'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '1', 'd', '_', 'a',
  'r', 'r', 'a', 'y', '_', 'i', '3', '2', '_', 't', 'r', 'a', 'p', '\000', '_',
  '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '1', 'd',
  '_', 'a', 'r', 'r', 'a', 'y', '_', 'i', '3', '2', '_', 'z', 'e', 'r', 'o',
  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_',
  '1', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'i', '6', '4', '_', 'c', 'l',
  'a', 'm', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't',
  '_', 'b', '_', '1', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'i', '6', '4',
  '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u',
  's', 't', '_', 'b', '_', '1', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'i',
  '6', '4', '_', 'z', 'e', 'r', 'o', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
  's', 'u', 's', 't', '_', 'b', '_', '1', 'd', '_', 'a', 'r', 'r', 'a', 'y',
  '_', 'i', '8', '_', 'c', 'l', 'a', 'm', 'p', '\000', '_', '_', 'n', 'v', 'v',
  'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '1', 'd', '_', 'a', 'r', 'r',
  'a', 'y', '_', 'i', '8', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v',
  'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '1', 'd', '_', 'a', 'r',
  'r', 'a', 'y', '_', 'i', '8', '_', 'z', 'e', 'r', 'o', '\000', '_', '_', 'n',
  'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '1', 'd', '_', 'a',
  'r', 'r', 'a', 'y', '_', 'v', '2', 'i', '1', '6', '_', 'c', 'l', 'a', 'm',
  'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b',
  '_', '1', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'v', '2', 'i', '1', '6',
  '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u',
  's', 't', '_', 'b', '_', '1', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'v',
  '2', 'i', '1', '6', '_', 'z', 'e', 'r', 'o', '\000', '_', '_', 'n', 'v', 'v',
  'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '1', 'd', '_', 'a', 'r', 'r',
  'a', 'y', '_', 'v', '2', 'i', '3', '2', '_', 'c', 'l', 'a', 'm', 'p', '\000',
  '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '1',
  'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'v', '2', 'i', '3', '2', '_', 't',
  'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't',
  '_', 'b', '_', '1', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'v', '2', 'i',
  '3', '2', '_', 'z', 'e', 'r', 'o', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
  's', 'u', 's', 't', '_', 'b', '_', '1', 'd', '_', 'a', 'r', 'r', 'a', 'y',
  '_', 'v', '2', 'i', '6', '4', '_', 'c', 'l', 'a', 'm', 'p', '\000', '_', '_',
  'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '1', 'd', '_',
  'a', 'r', 'r', 'a', 'y', '_', 'v', '2', 'i', '6', '4', '_', 't', 'r', 'a',
  'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b',
  '_', '1', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'v', '2', 'i', '6', '4',
  '_', 'z', 'e', 'r', 'o', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u',
  's', 't', '_', 'b', '_', '1', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'v',
  '2', 'i', '8', '_', 'c', 'l', 'a', 'm', 'p', '\000', '_', '_', 'n', 'v', 'v',
  'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '1', 'd', '_', 'a', 'r', 'r',
  'a', 'y', '_', 'v', '2', 'i', '8', '_', 't', 'r', 'a', 'p', '\000', '_', '_',
  'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '1', 'd', '_',
  'a', 'r', 'r', 'a', 'y', '_', 'v', '2', 'i', '8', '_', 'z', 'e', 'r', 'o',
  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_',
  '1', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'v', '4', 'i', '1', '6', '_',
  'c', 'l', 'a', 'm', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u',
  's', 't', '_', 'b', '_', '1', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'v',
  '4', 'i', '1', '6', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v',
  'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '1', 'd', '_', 'a', 'r', 'r',
  'a', 'y', '_', 'v', '4', 'i', '1', '6', '_', 'z', 'e', 'r', 'o', '\000', '_',
  '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '1', 'd',
  '_', 'a', 'r', 'r', 'a', 'y', '_', 'v', '4', 'i', '3', '2', '_', 'c', 'l',
  'a', 'm', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't',
  '_', 'b', '_', '1', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'v', '4', 'i',
  '3', '2', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
  's', 'u', 's', 't', '_', 'b', '_', '1', 'd', '_', 'a', 'r', 'r', 'a', 'y',
  '_', 'v', '4', 'i', '3', '2', '_', 'z', 'e', 'r', 'o', '\000', '_', '_', 'n',
  'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '1', 'd', '_', 'a',
  'r', 'r', 'a', 'y', '_', 'v', '4', 'i', '8', '_', 'c', 'l', 'a', 'm', 'p',
  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_',
  '1', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'v', '4', 'i', '8', '_', 't',
  'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't',
  '_', 'b', '_', '1', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'v', '4', 'i',
  '8', '_', 'z', 'e', 'r', 'o', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's',
  'u', 's', 't', '_', 'b', '_', '1', 'd', '_', 'i', '1', '6', '_', 'c', 'l',
  'a', 'm', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't',
  '_', 'b', '_', '1', 'd', '_', 'i', '1', '6', '_', 't', 'r', 'a', 'p', '\000',
  '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '1',
  'd', '_', 'i', '1', '6', '_', 'z', 'e', 'r', 'o', '\000', '_', '_', 'n', 'v',
  'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '1', 'd', '_', 'i', '3',
  '2', '_', 'c', 'l', 'a', 'm', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
  's', 'u', 's', 't', '_', 'b', '_', '1', 'd', '_', 'i', '3', '2', '_', 't',
  'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't',
  '_', 'b', '_', '1', 'd', '_', 'i', '3', '2', '_', 'z', 'e', 'r', 'o', '\000',
  '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '1',
  'd', '_', 'i', '6', '4', '_', 'c', 'l', 'a', 'm', 'p', '\000', '_', '_', 'n',
  'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '1', 'd', '_', 'i',
  '6', '4', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
  's', 'u', 's', 't', '_', 'b', '_', '1', 'd', '_', 'i', '6', '4', '_', 'z',
  'e', 'r', 'o', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't',
  '_', 'b', '_', '1', 'd', '_', 'i', '8', '_', 'c', 'l', 'a', 'm', 'p', '\000',
  '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '1',
  'd', '_', 'i', '8', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v',
  'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '1', 'd', '_', 'i', '8', '_',
  'z', 'e', 'r', 'o', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's',
  't', '_', 'b', '_', '1', 'd', '_', 'v', '2', 'i', '1', '6', '_', 'c', 'l',
  'a', 'm', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't',
  '_', 'b', '_', '1', 'd', '_', 'v', '2', 'i', '1', '6', '_', 't', 'r', 'a',
  'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b',
  '_', '1', 'd', '_', 'v', '2', 'i', '1', '6', '_', 'z', 'e', 'r', 'o', '\000',
  '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '1',
  'd', '_', 'v', '2', 'i', '3', '2', '_', 'c', 'l', 'a', 'm', 'p', '\000', '_',
  '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '1', 'd',
  '_', 'v', '2', 'i', '3', '2', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n',
  'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '1', 'd', '_', 'v',
  '2', 'i', '3', '2', '_', 'z', 'e', 'r', 'o', '\000', '_', '_', 'n', 'v', 'v',
  'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '1', 'd', '_', 'v', '2', 'i',
  '6', '4', '_', 'c', 'l', 'a', 'm', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm',
  '_', 's', 'u', 's', 't', '_', 'b', '_', '1', 'd', '_', 'v', '2', 'i', '6',
  '4', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's',
  'u', 's', 't', '_', 'b', '_', '1', 'd', '_', 'v', '2', 'i', '6', '4', '_',
  'z', 'e', 'r', 'o', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's',
  't', '_', 'b', '_', '1', 'd', '_', 'v', '2', 'i', '8', '_', 'c', 'l', 'a',
  'm', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_',
  'b', '_', '1', 'd', '_', 'v', '2', 'i', '8', '_', 't', 'r', 'a', 'p', '\000',
  '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '1',
  'd', '_', 'v', '2', 'i', '8', '_', 'z', 'e', 'r', 'o', '\000', '_', '_', 'n',
  'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '1', 'd', '_', 'v',
  '4', 'i', '1', '6', '_', 'c', 'l', 'a', 'm', 'p', '\000', '_', '_', 'n', 'v',
  'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '1', 'd', '_', 'v', '4',
  'i', '1', '6', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm',
  '_', 's', 'u', 's', 't', '_', 'b', '_', '1', 'd', '_', 'v', '4', 'i', '1',
  '6', '_', 'z', 'e', 'r', 'o', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's',
  'u', 's', 't', '_', 'b', '_', '1', 'd', '_', 'v', '4', 'i', '3', '2', '_',
  'c', 'l', 'a', 'm', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u',
  's', 't', '_', 'b', '_', '1', 'd', '_', 'v', '4', 'i', '3', '2', '_', 't',
  'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't',
  '_', 'b', '_', '1', 'd', '_', 'v', '4', 'i', '3', '2', '_', 'z', 'e', 'r',
  'o', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b',
  '_', '1', 'd', '_', 'v', '4', 'i', '8', '_', 'c', 'l', 'a', 'm', 'p', '\000',
  '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '1',
  'd', '_', 'v', '4', 'i', '8', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n',
  'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '1', 'd', '_', 'v',
  '4', 'i', '8', '_', 'z', 'e', 'r', 'o', '\000', '_', '_', 'n', 'v', 'v', 'm',
  '_', 's', 'u', 's', 't', '_', 'b', '_', '2', 'd', '_', 'a', 'r', 'r', 'a',
  'y', '_', 'i', '1', '6', '_', 'c', 'l', 'a', 'm', 'p', '\000', '_', '_', 'n',
  'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '2', 'd', '_', 'a',
  'r', 'r', 'a', 'y', '_', 'i', '1', '6', '_', 't', 'r', 'a', 'p', '\000', '_',
  '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '2', 'd',
  '_', 'a', 'r', 'r', 'a', 'y', '_', 'i', '1', '6', '_', 'z', 'e', 'r', 'o',
  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_',
  '2', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'i', '3', '2', '_', 'c', 'l',
  'a', 'm', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't',
  '_', 'b', '_', '2', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'i', '3', '2',
  '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u',
  's', 't', '_', 'b', '_', '2', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'i',
  '3', '2', '_', 'z', 'e', 'r', 'o', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
  's', 'u', 's', 't', '_', 'b', '_', '2', 'd', '_', 'a', 'r', 'r', 'a', 'y',
  '_', 'i', '6', '4', '_', 'c', 'l', 'a', 'm', 'p', '\000', '_', '_', 'n', 'v',
  'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '2', 'd', '_', 'a', 'r',
  'r', 'a', 'y', '_', 'i', '6', '4', '_', 't', 'r', 'a', 'p', '\000', '_', '_',
  'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '2', 'd', '_',
  'a', 'r', 'r', 'a', 'y', '_', 'i', '6', '4', '_', 'z', 'e', 'r', 'o', '\000',
  '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '2',
  'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'i', '8', '_', 'c', 'l', 'a', 'm',
  'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b',
  '_', '2', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'i', '8', '_', 't', 'r',
  'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_',
  'b', '_', '2', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'i', '8', '_', 'z',
  'e', 'r', 'o', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't',
  '_', 'b', '_', '2', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'v', '2', 'i',
  '1', '6', '_', 'c', 'l', 'a', 'm', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm',
  '_', 's', 'u', 's', 't', '_', 'b', '_', '2', 'd', '_', 'a', 'r', 'r', 'a',
  'y', '_', 'v', '2', 'i', '1', '6', '_', 't', 'r', 'a', 'p', '\000', '_', '_',
  'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '2', 'd', '_',
  'a', 'r', 'r', 'a', 'y', '_', 'v', '2', 'i', '1', '6', '_', 'z', 'e', 'r',
  'o', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b',
  '_', '2', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'v', '2', 'i', '3', '2',
  '_', 'c', 'l', 'a', 'm', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's',
  'u', 's', 't', '_', 'b', '_', '2', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_',
  'v', '2', 'i', '3', '2', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v',
  'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '2', 'd', '_', 'a', 'r',
  'r', 'a', 'y', '_', 'v', '2', 'i', '3', '2', '_', 'z', 'e', 'r', 'o', '\000',
  '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '2',
  'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'v', '2', 'i', '6', '4', '_', 'c',
  'l', 'a', 'm', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's',
  't', '_', 'b', '_', '2', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'v', '2',
  'i', '6', '4', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm',
  '_', 's', 'u', 's', 't', '_', 'b', '_', '2', 'd', '_', 'a', 'r', 'r', 'a',
  'y', '_', 'v', '2', 'i', '6', '4', '_', 'z', 'e', 'r', 'o', '\000', '_', '_',
  'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '2', 'd', '_',
  'a', 'r', 'r', 'a', 'y', '_', 'v', '2', 'i', '8', '_', 'c', 'l', 'a', 'm',
  'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b',
  '_', '2', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'v', '2', 'i', '8', '_',
  't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's',
  't', '_', 'b', '_', '2', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'v', '2',
  'i', '8', '_', 'z', 'e', 'r', 'o', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
  's', 'u', 's', 't', '_', 'b', '_', '2', 'd', '_', 'a', 'r', 'r', 'a', 'y',
  '_', 'v', '4', 'i', '1', '6', '_', 'c', 'l', 'a', 'm', 'p', '\000', '_', '_',
  'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '2', 'd', '_',
  'a', 'r', 'r', 'a', 'y', '_', 'v', '4', 'i', '1', '6', '_', 't', 'r', 'a',
  'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b',
  '_', '2', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'v', '4', 'i', '1', '6',
  '_', 'z', 'e', 'r', 'o', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u',
  's', 't', '_', 'b', '_', '2', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'v',
  '4', 'i', '3', '2', '_', 'c', 'l', 'a', 'm', 'p', '\000', '_', '_', 'n', 'v',
  'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '2', 'd', '_', 'a', 'r',
  'r', 'a', 'y', '_', 'v', '4', 'i', '3', '2', '_', 't', 'r', 'a', 'p', '\000',
  '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '2',
  'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'v', '4', 'i', '3', '2', '_', 'z',
  'e', 'r', 'o', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't',
  '_', 'b', '_', '2', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'v', '4', 'i',
  '8', '_', 'c', 'l', 'a', 'm', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
  's', 'u', 's', 't', '_', 'b', '_', '2', 'd', '_', 'a', 'r', 'r', 'a', 'y',
  '_', 'v', '4', 'i', '8', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v',
  'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '2', 'd', '_', 'a', 'r',
  'r', 'a', 'y', '_', 'v', '4', 'i', '8', '_', 'z', 'e', 'r', 'o', '\000', '_',
  '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '2', 'd',
  '_', 'i', '1', '6', '_', 'c', 'l', 'a', 'm', 'p', '\000', '_', '_', 'n', 'v',
  'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '2', 'd', '_', 'i', '1',
  '6', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's',
  'u', 's', 't', '_', 'b', '_', '2', 'd', '_', 'i', '1', '6', '_', 'z', 'e',
  'r', 'o', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_',
  'b', '_', '2', 'd', '_', 'i', '3', '2', '_', 'c', 'l', 'a', 'm', 'p', '\000',
  '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '2',
  'd', '_', 'i', '3', '2', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v',
  'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '2', 'd', '_', 'i', '3',
  '2', '_', 'z', 'e', 'r', 'o', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's',
  'u', 's', 't', '_', 'b', '_', '2', 'd', '_', 'i', '6', '4', '_', 'c', 'l',
  'a', 'm', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't',
  '_', 'b', '_', '2', 'd', '_', 'i', '6', '4', '_', 't', 'r', 'a', 'p', '\000',
  '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '2',
  'd', '_', 'i', '6', '4', '_', 'z', 'e', 'r', 'o', '\000', '_', '_', 'n', 'v',
  'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '2', 'd', '_', 'i', '8',
  '_', 'c', 'l', 'a', 'm', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's',
  'u', 's', 't', '_', 'b', '_', '2', 'd', '_', 'i', '8', '_', 't', 'r', 'a',
  'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b',
  '_', '2', 'd', '_', 'i', '8', '_', 'z', 'e', 'r', 'o', '\000', '_', '_', 'n',
  'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '2', 'd', '_', 'v',
  '2', 'i', '1', '6', '_', 'c', 'l', 'a', 'm', 'p', '\000', '_', '_', 'n', 'v',
  'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '2', 'd', '_', 'v', '2',
  'i', '1', '6', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm',
  '_', 's', 'u', 's', 't', '_', 'b', '_', '2', 'd', '_', 'v', '2', 'i', '1',
  '6', '_', 'z', 'e', 'r', 'o', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's',
  'u', 's', 't', '_', 'b', '_', '2', 'd', '_', 'v', '2', 'i', '3', '2', '_',
  'c', 'l', 'a', 'm', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u',
  's', 't', '_', 'b', '_', '2', 'd', '_', 'v', '2', 'i', '3', '2', '_', 't',
  'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't',
  '_', 'b', '_', '2', 'd', '_', 'v', '2', 'i', '3', '2', '_', 'z', 'e', 'r',
  'o', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b',
  '_', '2', 'd', '_', 'v', '2', 'i', '6', '4', '_', 'c', 'l', 'a', 'm', 'p',
  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_',
  '2', 'd', '_', 'v', '2', 'i', '6', '4', '_', 't', 'r', 'a', 'p', '\000', '_',
  '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '2', 'd',
  '_', 'v', '2', 'i', '6', '4', '_', 'z', 'e', 'r', 'o', '\000', '_', '_', 'n',
  'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '2', 'd', '_', 'v',
  '2', 'i', '8', '_', 'c', 'l', 'a', 'm', 'p', '\000', '_', '_', 'n', 'v', 'v',
  'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '2', 'd', '_', 'v', '2', 'i',
  '8', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's',
  'u', 's', 't', '_', 'b', '_', '2', 'd', '_', 'v', '2', 'i', '8', '_', 'z',
  'e', 'r', 'o', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't',
  '_', 'b', '_', '2', 'd', '_', 'v', '4', 'i', '1', '6', '_', 'c', 'l', 'a',
  'm', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_',
  'b', '_', '2', 'd', '_', 'v', '4', 'i', '1', '6', '_', 't', 'r', 'a', 'p',
  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_',
  '2', 'd', '_', 'v', '4', 'i', '1', '6', '_', 'z', 'e', 'r', 'o', '\000', '_',
  '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '2', 'd',
  '_', 'v', '4', 'i', '3', '2', '_', 'c', 'l', 'a', 'm', 'p', '\000', '_', '_',
  'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '2', 'd', '_',
  'v', '4', 'i', '3', '2', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v',
  'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '2', 'd', '_', 'v', '4',
  'i', '3', '2', '_', 'z', 'e', 'r', 'o', '\000', '_', '_', 'n', 'v', 'v', 'm',
  '_', 's', 'u', 's', 't', '_', 'b', '_', '2', 'd', '_', 'v', '4', 'i', '8',
  '_', 'c', 'l', 'a', 'm', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's',
  'u', 's', 't', '_', 'b', '_', '2', 'd', '_', 'v', '4', 'i', '8', '_', 't',
  'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't',
  '_', 'b', '_', '2', 'd', '_', 'v', '4', 'i', '8', '_', 'z', 'e', 'r', 'o',
  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_',
  '3', 'd', '_', 'i', '1', '6', '_', 'c', 'l', 'a', 'm', 'p', '\000', '_', '_',
  'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '3', 'd', '_',
  'i', '1', '6', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm',
  '_', 's', 'u', 's', 't', '_', 'b', '_', '3', 'd', '_', 'i', '1', '6', '_',
  'z', 'e', 'r', 'o', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's',
  't', '_', 'b', '_', '3', 'd', '_', 'i', '3', '2', '_', 'c', 'l', 'a', 'm',
  'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b',
  '_', '3', 'd', '_', 'i', '3', '2', '_', 't', 'r', 'a', 'p', '\000', '_', '_',
  'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '3', 'd', '_',
  'i', '3', '2', '_', 'z', 'e', 'r', 'o', '\000', '_', '_', 'n', 'v', 'v', 'm',
  '_', 's', 'u', 's', 't', '_', 'b', '_', '3', 'd', '_', 'i', '6', '4', '_',
  'c', 'l', 'a', 'm', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u',
  's', 't', '_', 'b', '_', '3', 'd', '_', 'i', '6', '4', '_', 't', 'r', 'a',
  'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b',
  '_', '3', 'd', '_', 'i', '6', '4', '_', 'z', 'e', 'r', 'o', '\000', '_', '_',
  'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '3', 'd', '_',
  'i', '8', '_', 'c', 'l', 'a', 'm', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm',
  '_', 's', 'u', 's', 't', '_', 'b', '_', '3', 'd', '_', 'i', '8', '_', 't',
  'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't',
  '_', 'b', '_', '3', 'd', '_', 'i', '8', '_', 'z', 'e', 'r', 'o', '\000', '_',
  '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '3', 'd',
  '_', 'v', '2', 'i', '1', '6', '_', 'c', 'l', 'a', 'm', 'p', '\000', '_', '_',
  'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '3', 'd', '_',
  'v', '2', 'i', '1', '6', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v',
  'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '3', 'd', '_', 'v', '2',
  'i', '1', '6', '_', 'z', 'e', 'r', 'o', '\000', '_', '_', 'n', 'v', 'v', 'm',
  '_', 's', 'u', 's', 't', '_', 'b', '_', '3', 'd', '_', 'v', '2', 'i', '3',
  '2', '_', 'c', 'l', 'a', 'm', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
  's', 'u', 's', 't', '_', 'b', '_', '3', 'd', '_', 'v', '2', 'i', '3', '2',
  '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u',
  's', 't', '_', 'b', '_', '3', 'd', '_', 'v', '2', 'i', '3', '2', '_', 'z',
  'e', 'r', 'o', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't',
  '_', 'b', '_', '3', 'd', '_', 'v', '2', 'i', '6', '4', '_', 'c', 'l', 'a',
  'm', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_',
  'b', '_', '3', 'd', '_', 'v', '2', 'i', '6', '4', '_', 't', 'r', 'a', 'p',
  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_',
  '3', 'd', '_', 'v', '2', 'i', '6', '4', '_', 'z', 'e', 'r', 'o', '\000', '_',
  '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '3', 'd',
  '_', 'v', '2', 'i', '8', '_', 'c', 'l', 'a', 'm', 'p', '\000', '_', '_', 'n',
  'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '3', 'd', '_', 'v',
  '2', 'i', '8', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm',
  '_', 's', 'u', 's', 't', '_', 'b', '_', '3', 'd', '_', 'v', '2', 'i', '8',
  '_', 'z', 'e', 'r', 'o', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u',
  's', 't', '_', 'b', '_', '3', 'd', '_', 'v', '4', 'i', '1', '6', '_', 'c',
  'l', 'a', 'm', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's',
  't', '_', 'b', '_', '3', 'd', '_', 'v', '4', 'i', '1', '6', '_', 't', 'r',
  'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_',
  'b', '_', '3', 'd', '_', 'v', '4', 'i', '1', '6', '_', 'z', 'e', 'r', 'o',
  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_',
  '3', 'd', '_', 'v', '4', 'i', '3', '2', '_', 'c', 'l', 'a', 'm', 'p', '\000',
  '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '3',
  'd', '_', 'v', '4', 'i', '3', '2', '_', 't', 'r', 'a', 'p', '\000', '_', '_',
  'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '3', 'd', '_',
  'v', '4', 'i', '3', '2', '_', 'z', 'e', 'r', 'o', '\000', '_', '_', 'n', 'v',
  'v', 'm', '_', 's', 'u', 's', 't', '_', 'b', '_', '3', 'd', '_', 'v', '4',
  'i', '8', '_', 'c', 'l', 'a', 'm', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm',
  '_', 's', 'u', 's', 't', '_', 'b', '_', '3', 'd', '_', 'v', '4', 'i', '8',
  '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u',
  's', 't', '_', 'b', '_', '3', 'd', '_', 'v', '4', 'i', '8', '_', 'z', 'e',
  'r', 'o', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_',
  'p', '_', '1', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'i', '1', '6', '_',
  't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's',
  't', '_', 'p', '_', '1', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'i', '3',
  '2', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's',
  'u', 's', 't', '_', 'p', '_', '1', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_',
  'i', '8', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
  's', 'u', 's', 't', '_', 'p', '_', '1', 'd', '_', 'a', 'r', 'r', 'a', 'y',
  '_', 'v', '2', 'i', '1', '6', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n',
  'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'p', '_', '1', 'd', '_', 'a',
  'r', 'r', 'a', 'y', '_', 'v', '2', 'i', '3', '2', '_', 't', 'r', 'a', 'p',
  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'p', '_',
  '1', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'v', '2', 'i', '8', '_', 't',
  'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't',
  '_', 'p', '_', '1', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'v', '4', 'i',
  '1', '6', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
  's', 'u', 's', 't', '_', 'p', '_', '1', 'd', '_', 'a', 'r', 'r', 'a', 'y',
  '_', 'v', '4', 'i', '3', '2', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n',
  'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'p', '_', '1', 'd', '_', 'a',
  'r', 'r', 'a', 'y', '_', 'v', '4', 'i', '8', '_', 't', 'r', 'a', 'p', '\000',
  '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'p', '_', '1',
  'd', '_', 'i', '1', '6', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v',
  'v', 'm', '_', 's', 'u', 's', 't', '_', 'p', '_', '1', 'd', '_', 'i', '3',
  '2', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's',
  'u', 's', 't', '_', 'p', '_', '1', 'd', '_', 'i', '8', '_', 't', 'r', 'a',
  'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'p',
  '_', '1', 'd', '_', 'v', '2', 'i', '1', '6', '_', 't', 'r', 'a', 'p', '\000',
  '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'p', '_', '1',
  'd', '_', 'v', '2', 'i', '3', '2', '_', 't', 'r', 'a', 'p', '\000', '_', '_',
  'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'p', '_', '1', 'd', '_',
  'v', '2', 'i', '8', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v',
  'm', '_', 's', 'u', 's', 't', '_', 'p', '_', '1', 'd', '_', 'v', '4', 'i',
  '1', '6', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
  's', 'u', 's', 't', '_', 'p', '_', '1', 'd', '_', 'v', '4', 'i', '3', '2',
  '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u',
  's', 't', '_', 'p', '_', '1', 'd', '_', 'v', '4', 'i', '8', '_', 't', 'r',
  'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_',
  'p', '_', '2', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'i', '1', '6', '_',
  't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's',
  't', '_', 'p', '_', '2', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'i', '3',
  '2', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's',
  'u', 's', 't', '_', 'p', '_', '2', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_',
  'i', '8', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
  's', 'u', 's', 't', '_', 'p', '_', '2', 'd', '_', 'a', 'r', 'r', 'a', 'y',
  '_', 'v', '2', 'i', '1', '6', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n',
  'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'p', '_', '2', 'd', '_', 'a',
  'r', 'r', 'a', 'y', '_', 'v', '2', 'i', '3', '2', '_', 't', 'r', 'a', 'p',
  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'p', '_',
  '2', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'v', '2', 'i', '8', '_', 't',
  'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't',
  '_', 'p', '_', '2', 'd', '_', 'a', 'r', 'r', 'a', 'y', '_', 'v', '4', 'i',
  '1', '6', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
  's', 'u', 's', 't', '_', 'p', '_', '2', 'd', '_', 'a', 'r', 'r', 'a', 'y',
  '_', 'v', '4', 'i', '3', '2', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n',
  'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'p', '_', '2', 'd', '_', 'a',
  'r', 'r', 'a', 'y', '_', 'v', '4', 'i', '8', '_', 't', 'r', 'a', 'p', '\000',
  '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'p', '_', '2',
  'd', '_', 'i', '1', '6', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v',
  'v', 'm', '_', 's', 'u', 's', 't', '_', 'p', '_', '2', 'd', '_', 'i', '3',
  '2', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's',
  'u', 's', 't', '_', 'p', '_', '2', 'd', '_', 'i', '8', '_', 't', 'r', 'a',
  'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'p',
  '_', '2', 'd', '_', 'v', '2', 'i', '1', '6', '_', 't', 'r', 'a', 'p', '\000',
  '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'p', '_', '2',
  'd', '_', 'v', '2', 'i', '3', '2', '_', 't', 'r', 'a', 'p', '\000', '_', '_',
  'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'p', '_', '2', 'd', '_',
  'v', '2', 'i', '8', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v',
  'm', '_', 's', 'u', 's', 't', '_', 'p', '_', '2', 'd', '_', 'v', '4', 'i',
  '1', '6', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
  's', 'u', 's', 't', '_', 'p', '_', '2', 'd', '_', 'v', '4', 'i', '3', '2',
  '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u',
  's', 't', '_', 'p', '_', '2', 'd', '_', 'v', '4', 'i', '8', '_', 't', 'r',
  'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_',
  'p', '_', '3', 'd', '_', 'i', '1', '6', '_', 't', 'r', 'a', 'p', '\000', '_',
  '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'p', '_', '3', 'd',
  '_', 'i', '3', '2', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v',
  'm', '_', 's', 'u', 's', 't', '_', 'p', '_', '3', 'd', '_', 'i', '8', '_',
  't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's',
  't', '_', 'p', '_', '3', 'd', '_', 'v', '2', 'i', '1', '6', '_', 't', 'r',
  'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_',
  'p', '_', '3', 'd', '_', 'v', '2', 'i', '3', '2', '_', 't', 'r', 'a', 'p',
  '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'p', '_',
  '3', 'd', '_', 'v', '2', 'i', '8', '_', 't', 'r', 'a', 'p', '\000', '_', '_',
  'n', 'v', 'v', 'm', '_', 's', 'u', 's', 't', '_', 'p', '_', '3', 'd', '_',
  'v', '4', 'i', '1', '6', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v',
  'v', 'm', '_', 's', 'u', 's', 't', '_', 'p', '_', '3', 'd', '_', 'v', '4',
  'i', '3', '2', '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm',
  '_', 's', 'u', 's', 't', '_', 'p', '_', '3', 'd', '_', 'v', '4', 'i', '8',
  '_', 't', 'r', 'a', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 's', 'w',
  'a', 'p', '_', 'l', 'o', '_', 'h', 'i', '_', 'b', '6', '4', '\000', '_', '_',
  'n', 'v', 'v', 'm', '_', 't', 'r', 'u', 'n', 'c', '_', 'd', '\000', '_', '_',
  'n', 'v', 'v', 'm', '_', 't', 'r', 'u', 'n', 'c', '_', 'f', '\000', '_', '_',
  'n', 'v', 'v', 'm', '_', 't', 'r', 'u', 'n', 'c', '_', 'f', 't', 'z', '_',
  'f', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 't', 'x', 'q', '_', 'a', 'r',
  'r', 'a', 'y', '_', 's', 'i', 'z', 'e', '\000', '_', '_', 'n', 'v', 'v', 'm',
  '_', 't', 'x', 'q', '_', 'c', 'h', 'a', 'n', 'n', 'e', 'l', '_', 'd', 'a',
  't', 'a', '_', 't', 'y', 'p', 'e', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
  't', 'x', 'q', '_', 'c', 'h', 'a', 'n', 'n', 'e', 'l', '_', 'o', 'r', 'd',
  'e', 'r', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 't', 'x', 'q', '_', 'd',
  'e', 'p', 't', 'h', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 't', 'x', 'q',
  '_', 'h', 'e', 'i', 'g', 'h', 't', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
  't', 'x', 'q', '_', 'n', 'u', 'm', '_', 'm', 'i', 'p', 'm', 'a', 'p', '_',
  'l', 'e', 'v', 'e', 'l', 's', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 't',
  'x', 'q', '_', 'n', 'u', 'm', '_', 's', 'a', 'm', 'p', 'l', 'e', 's', '\000',
  '_', '_', 'n', 'v', 'v', 'm', '_', 't', 'x', 'q', '_', 'w', 'i', 'd', 't',
  'h', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'u', 'i', '2', 'd', '_', 'r',
  'm', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'u', 'i', '2', 'd', '_', 'r',
  'n', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'u', 'i', '2', 'd', '_', 'r',
  'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'u', 'i', '2', 'd', '_', 'r',
  'z', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'u', 'i', '2', 'f', '_', 'r',
  'm', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'u', 'i', '2', 'f', '_', 'r',
  'n', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'u', 'i', '2', 'f', '_', 'r',
  'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'u', 'i', '2', 'f', '_', 'r',
  'z', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'u', 'l', 'l', '2', 'd', '_',
  'r', 'm', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'u', 'l', 'l', '2', 'd',
  '_', 'r', 'n', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'u', 'l', 'l', '2',
  'd', '_', 'r', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'u', 'l', 'l',
  '2', 'd', '_', 'r', 'z', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'u', 'l',
  'l', '2', 'f', '_', 'r', 'm', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'u',
  'l', 'l', '2', 'f', '_', 'r', 'n', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
  'u', 'l', 'l', '2', 'f', '_', 'r', 'p', '\000', '_', '_', 'n', 'v', 'v', 'm',
  '_', 'u', 'l', 'l', '2', 'f', '_', 'r', 'z', '\000', '_', '_', 'n', 'v', 'v',
  'm', '_', 'v', 'o', 't', 'e', '_', 'a', 'l', 'l', '\000', '_', '_', 'n', 'v',
  'v', 'm', '_', 'v', 'o', 't', 'e', '_', 'a', 'l', 'l', '_', 's', 'y', 'n',
  'c', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'v', 'o', 't', 'e', '_', 'a',
  'n', 'y', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'v', 'o', 't', 'e', '_',
  'a', 'n', 'y', '_', 's', 'y', 'n', 'c', '\000', '_', '_', 'n', 'v', 'v', 'm',
  '_', 'v', 'o', 't', 'e', '_', 'b', 'a', 'l', 'l', 'o', 't', '\000', '_', '_',
  'n', 'v', 'v', 'm', '_', 'v', 'o', 't', 'e', '_', 'b', 'a', 'l', 'l', 'o',
  't', '_', 's', 'y', 'n', 'c', '\000', '_', '_', 'n', 'v', 'v', 'm', '_', 'v',
  'o', 't', 'e', '_', 'u', 'n', 'i', '\000', '_', '_', 'n', 'v', 'v', 'm', '_',
  'v', 'o', 't', 'e', '_', 'u', 'n', 'i', '_', 's', 'y', 'n', 'c', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'p', 'p', 'c', '_', 'a', 'd',
  'd', 'e', 'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a',
  'd', 'd', 'f', '1', '2', '8', '_', 'r', 'o', 'u', 'n', 'd', '_', 't', 'o',
  '_', 'o', 'd', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'c', 'r', 'y', 'p', 't', 'o', '_',
  'v', 'c', 'i', 'p', 'h', 'e', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'c', 'r', 'y', 'p',
  't', 'o', '_', 'v', 'c', 'i', 'p', 'h', 'e', 'r', 'l', 'a', 's', 't', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v',
  'e', 'c', '_', 'c', 'r', 'y', 'p', 't', 'o', '_', 'v', 'n', 'c', 'i', 'p',
  'h', 'e', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a',
  'l', 't', 'i', 'v', 'e', 'c', '_', 'c', 'r', 'y', 'p', 't', 'o', '_', 'v',
  'n', 'c', 'i', 'p', 'h', 'e', 'r', 'l', 'a', 's', 't', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_',
  'c', 'r', 'y', 'p', 't', 'o', '_', 'v', 'p', 'e', 'r', 'm', 'x', 'o', 'r',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i',
  'v', 'e', 'c', '_', 'c', 'r', 'y', 'p', 't', 'o', '_', 'v', 'p', 'e', 'r',
  'm', 'x', 'o', 'r', '_', 'b', 'e', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'c', 'r', 'y', 'p',
  't', 'o', '_', 'v', 'p', 'm', 's', 'u', 'm', 'b', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'c',
  'r', 'y', 'p', 't', 'o', '_', 'v', 'p', 'm', 's', 'u', 'm', 'd', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e',
  'c', '_', 'c', 'r', 'y', 'p', 't', 'o', '_', 'v', 'p', 'm', 's', 'u', 'm',
  'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't',
  'i', 'v', 'e', 'c', '_', 'c', 'r', 'y', 'p', 't', 'o', '_', 'v', 'p', 'm',
  's', 'u', 'm', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'c', 'r', 'y', 'p', 't', 'o', '_',
  'v', 's', 'b', 'o', 'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'c', 'r', 'y', 'p', 't', 'o',
  '_', 'v', 's', 'h', 'a', 's', 'i', 'g', 'm', 'a', 'd', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_',
  'c', 'r', 'y', 'p', 't', 'o', '_', 'v', 's', 'h', 'a', 's', 'i', 'g', 'm',
  'a', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l',
  't', 'i', 'v', 'e', 'c', '_', 'd', 's', 's', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'd', 's',
  's', 'a', 'l', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'd', 's', 't', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_',
  'd', 's', 't', 's', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'd', 's', 't', 's', 't', 't',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i',
  'v', 'e', 'c', '_', 'd', 's', 't', 't', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'm', 'f', 'v',
  's', 'c', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a',
  'l', 't', 'i', 'v', 'e', 'c', '_', 'm', 't', 'v', 's', 'c', 'r', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e',
  'c', '_', 'm', 't', 'v', 's', 'r', 'b', 'm', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'm', 't',
  'v', 's', 'r', 'd', 'm', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'm', 't', 'v', 's', 'r', 'h',
  'm', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't',
  'i', 'v', 'e', 'c', '_', 'm', 't', 'v', 's', 'r', 'q', 'm', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c',
  '_', 'm', 't', 'v', 's', 'r', 'w', 'm', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'a', 'b',
  's', 'd', 'u', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'a', 'b', 's', 'd', 'u', 'h',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i',
  'v', 'e', 'c', '_', 'v', 'a', 'b', 's', 'd', 'u', 'w', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_',
  'v', 'a', 'd', 'd', 'c', 'u', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'a', 'd', 'd',
  'c', 'u', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a',
  'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'a', 'd', 'd', 'e', 'c', 'u', 'q',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i',
  'v', 'e', 'c', '_', 'v', 'a', 'd', 'd', 'e', 'u', 'q', 'm', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c',
  '_', 'v', 'a', 'd', 'd', 's', 'b', 's', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'a', 'd',
  'd', 's', 'h', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'a', 'd', 'd', 's', 'w', 's',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i',
  'v', 'e', 'c', '_', 'v', 'a', 'd', 'd', 'u', 'b', 's', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_',
  'v', 'a', 'd', 'd', 'u', 'h', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'a', 'd', 'd',
  'u', 'w', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a',
  'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'a', 'v', 'g', 's', 'b', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e',
  'c', '_', 'v', 'a', 'v', 'g', 's', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'a', 'v',
  'g', 's', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a',
  'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'a', 'v', 'g', 'u', 'b', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e',
  'c', '_', 'v', 'a', 'v', 'g', 'u', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'a', 'v',
  'g', 'u', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a',
  'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'b', 'p', 'e', 'r', 'm', 'd', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v',
  'e', 'c', '_', 'v', 'b', 'p', 'e', 'r', 'm', 'q', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v',
  'c', 'f', 's', 'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'c', 'f', 'u', 'g', 'e', 'd',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i',
  'v', 'e', 'c', '_', 'v', 'c', 'f', 'u', 'x', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'c',
  'l', 'r', 'l', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'c', 'l', 'r', 'r', 'b', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v',
  'e', 'c', '_', 'v', 'c', 'l', 'z', 'd', 'm', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'c',
  'l', 'z', 'l', 's', 'b', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'c', 'm', 'p', 'b',
  'f', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l',
  't', 'i', 'v', 'e', 'c', '_', 'v', 'c', 'm', 'p', 'b', 'f', 'p', '_', 'p',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i',
  'v', 'e', 'c', '_', 'v', 'c', 'm', 'p', 'e', 'q', 'f', 'p', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c',
  '_', 'v', 'c', 'm', 'p', 'e', 'q', 'f', 'p', '_', 'p', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_',
  'v', 'c', 'm', 'p', 'e', 'q', 'u', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'c', 'm',
  'p', 'e', 'q', 'u', 'b', '_', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'c', 'm', 'p',
  'e', 'q', 'u', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'c', 'm', 'p', 'e', 'q', 'u',
  'd', '_', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a',
  'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'c', 'm', 'p', 'e', 'q', 'u', 'h',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i',
  'v', 'e', 'c', '_', 'v', 'c', 'm', 'p', 'e', 'q', 'u', 'h', '_', 'p', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v',
  'e', 'c', '_', 'v', 'c', 'm', 'p', 'e', 'q', 'u', 'q', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_',
  'v', 'c', 'm', 'p', 'e', 'q', 'u', 'q', '_', 'p', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v',
  'c', 'm', 'p', 'e', 'q', 'u', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'c', 'm', 'p',
  'e', 'q', 'u', 'w', '_', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'c', 'm', 'p', 'g',
  'e', 'f', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a',
  'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'c', 'm', 'p', 'g', 'e', 'f', 'p',
  '_', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l',
  't', 'i', 'v', 'e', 'c', '_', 'v', 'c', 'm', 'p', 'g', 't', 'f', 'p', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v',
  'e', 'c', '_', 'v', 'c', 'm', 'p', 'g', 't', 'f', 'p', '_', 'p', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e',
  'c', '_', 'v', 'c', 'm', 'p', 'g', 't', 's', 'b', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v',
  'c', 'm', 'p', 'g', 't', 's', 'b', '_', 'p', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'c',
  'm', 'p', 'g', 't', 's', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'c', 'm', 'p', 'g',
  't', 's', 'd', '_', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'c', 'm', 'p', 'g', 't',
  's', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l',
  't', 'i', 'v', 'e', 'c', '_', 'v', 'c', 'm', 'p', 'g', 't', 's', 'h', '_',
  'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't',
  'i', 'v', 'e', 'c', '_', 'v', 'c', 'm', 'p', 'g', 't', 's', 'q', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e',
  'c', '_', 'v', 'c', 'm', 'p', 'g', 't', 's', 'q', '_', 'p', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c',
  '_', 'v', 'c', 'm', 'p', 'g', 't', 's', 'w', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'c',
  'm', 'p', 'g', 't', 's', 'w', '_', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'c', 'm',
  'p', 'g', 't', 'u', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'c', 'm', 'p', 'g', 't',
  'u', 'b', '_', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'c', 'm', 'p', 'g', 't', 'u',
  'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't',
  'i', 'v', 'e', 'c', '_', 'v', 'c', 'm', 'p', 'g', 't', 'u', 'd', '_', 'p',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i',
  'v', 'e', 'c', '_', 'v', 'c', 'm', 'p', 'g', 't', 'u', 'h', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c',
  '_', 'v', 'c', 'm', 'p', 'g', 't', 'u', 'h', '_', 'p', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_',
  'v', 'c', 'm', 'p', 'g', 't', 'u', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'c', 'm',
  'p', 'g', 't', 'u', 'q', '_', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'c', 'm', 'p',
  'g', 't', 'u', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'c', 'm', 'p', 'g', 't', 'u',
  'w', '_', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a',
  'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'c', 'm', 'p', 'n', 'e', 'b', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v',
  'e', 'c', '_', 'v', 'c', 'm', 'p', 'n', 'e', 'b', '_', 'p', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c',
  '_', 'v', 'c', 'm', 'p', 'n', 'e', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'c', 'm',
  'p', 'n', 'e', 'h', '_', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'c', 'm', 'p', 'n',
  'e', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l',
  't', 'i', 'v', 'e', 'c', '_', 'v', 'c', 'm', 'p', 'n', 'e', 'w', '_', 'p',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i',
  'v', 'e', 'c', '_', 'v', 'c', 'm', 'p', 'n', 'e', 'z', 'b', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c',
  '_', 'v', 'c', 'm', 'p', 'n', 'e', 'z', 'b', '_', 'p', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_',
  'v', 'c', 'm', 'p', 'n', 'e', 'z', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'c', 'm',
  'p', 'n', 'e', 'z', 'h', '_', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'c', 'm', 'p',
  'n', 'e', 'z', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'c', 'm', 'p', 'n', 'e', 'z',
  'w', '_', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a',
  'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'c', 'n', 't', 'm', 'b', 'b', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v',
  'e', 'c', '_', 'v', 'c', 'n', 't', 'm', 'b', 'd', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v',
  'c', 'n', 't', 'm', 'b', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'c', 'n', 't', 'm',
  'b', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l',
  't', 'i', 'v', 'e', 'c', '_', 'v', 'c', 't', 's', 'x', 's', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c',
  '_', 'v', 'c', 't', 'u', 'x', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'c', 't', 'z',
  'd', 'm', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l',
  't', 'i', 'v', 'e', 'c', '_', 'v', 'c', 't', 'z', 'l', 's', 'b', 'b', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v',
  'e', 'c', '_', 'v', 'd', 'i', 'v', 'e', 's', 'd', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v',
  'd', 'i', 'v', 'e', 's', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'd', 'i', 'v', 'e',
  's', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l',
  't', 'i', 'v', 'e', 'c', '_', 'v', 'd', 'i', 'v', 'e', 'u', 'd', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e',
  'c', '_', 'v', 'd', 'i', 'v', 'e', 'u', 'q', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'd',
  'i', 'v', 'e', 'u', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'e', 'x', 'p', 'a', 'n',
  'd', 'b', 'm', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a',
  'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'e', 'x', 'p', 'a', 'n', 'd', 'd',
  'm', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't',
  'i', 'v', 'e', 'c', '_', 'v', 'e', 'x', 'p', 'a', 'n', 'd', 'h', 'm', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v',
  'e', 'c', '_', 'v', 'e', 'x', 'p', 'a', 'n', 'd', 'q', 'm', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c',
  '_', 'v', 'e', 'x', 'p', 'a', 'n', 'd', 'w', 'm', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v',
  'e', 'x', 'p', 't', 'e', 'f', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'e', 'x', 't',
  'd', 'd', 'v', 'l', 'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'e', 'x', 't', 'd', 'd',
  'v', 'r', 'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a',
  'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'e', 'x', 't', 'd', 'u', 'b', 'v',
  'l', 'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l',
  't', 'i', 'v', 'e', 'c', '_', 'v', 'e', 'x', 't', 'd', 'u', 'b', 'v', 'r',
  'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't',
  'i', 'v', 'e', 'c', '_', 'v', 'e', 'x', 't', 'd', 'u', 'h', 'v', 'l', 'x',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i',
  'v', 'e', 'c', '_', 'v', 'e', 'x', 't', 'd', 'u', 'h', 'v', 'r', 'x', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v',
  'e', 'c', '_', 'v', 'e', 'x', 't', 'd', 'u', 'w', 'v', 'l', 'x', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e',
  'c', '_', 'v', 'e', 'x', 't', 'd', 'u', 'w', 'v', 'r', 'x', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c',
  '_', 'v', 'e', 'x', 't', 'r', 'a', 'c', 't', 'b', 'm', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_',
  'v', 'e', 'x', 't', 'r', 'a', 'c', 't', 'd', 'm', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v',
  'e', 'x', 't', 'r', 'a', 'c', 't', 'h', 'm', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'e',
  'x', 't', 'r', 'a', 'c', 't', 'q', 'm', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'e', 'x',
  't', 'r', 'a', 'c', 't', 'w', 'm', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'e', 'x', 't',
  's', 'b', '2', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'e', 'x', 't', 's', 'b', '2',
  'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't',
  'i', 'v', 'e', 'c', '_', 'v', 'e', 'x', 't', 's', 'd', '2', 'q', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e',
  'c', '_', 'v', 'e', 'x', 't', 's', 'h', '2', 'd', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v',
  'e', 'x', 't', 's', 'h', '2', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'e', 'x', 't',
  's', 'w', '2', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'g', 'b', 'b', 'd', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e',
  'c', '_', 'v', 'g', 'n', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'i', 'n', 's', 'b',
  'l', 'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l',
  't', 'i', 'v', 'e', 'c', '_', 'v', 'i', 'n', 's', 'b', 'r', 'x', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e',
  'c', '_', 'v', 'i', 'n', 's', 'b', 'v', 'l', 'x', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v',
  'i', 'n', 's', 'b', 'v', 'r', 'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'i', 'n', 's',
  'd', 'l', 'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a',
  'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'i', 'n', 's', 'd', 'r', 'x', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v',
  'e', 'c', '_', 'v', 'i', 'n', 's', 'h', 'l', 'x', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v',
  'i', 'n', 's', 'h', 'r', 'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'i', 'n', 's', 'h',
  'v', 'l', 'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a',
  'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'i', 'n', 's', 'h', 'v', 'r', 'x',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i',
  'v', 'e', 'c', '_', 'v', 'i', 'n', 's', 'w', 'l', 'x', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_',
  'v', 'i', 'n', 's', 'w', 'r', 'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'i', 'n', 's',
  'w', 'v', 'l', 'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'i', 'n', 's', 'w', 'v', 'r',
  'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't',
  'i', 'v', 'e', 'c', '_', 'v', 'l', 'o', 'g', 'e', 'f', 'p', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c',
  '_', 'v', 'm', 'a', 'd', 'd', 'f', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'm', 'a',
  'x', 'f', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a',
  'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'm', 'a', 'x', 's', 'b', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e',
  'c', '_', 'v', 'm', 'a', 'x', 's', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'm', 'a',
  'x', 's', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a',
  'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'm', 'a', 'x', 's', 'w', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e',
  'c', '_', 'v', 'm', 'a', 'x', 'u', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'm', 'a',
  'x', 'u', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a',
  'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'm', 'a', 'x', 'u', 'h', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e',
  'c', '_', 'v', 'm', 'a', 'x', 'u', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'm', 'h',
  'a', 'd', 'd', 's', 'h', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'm', 'h', 'r', 'a',
  'd', 'd', 's', 'h', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'm', 'i', 'n', 'f', 'p',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i',
  'v', 'e', 'c', '_', 'v', 'm', 'i', 'n', 's', 'b', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v',
  'm', 'i', 'n', 's', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'm', 'i', 'n', 's', 'h',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i',
  'v', 'e', 'c', '_', 'v', 'm', 'i', 'n', 's', 'w', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v',
  'm', 'i', 'n', 'u', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'm', 'i', 'n', 'u', 'd',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i',
  'v', 'e', 'c', '_', 'v', 'm', 'i', 'n', 'u', 'h', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v',
  'm', 'i', 'n', 'u', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'm', 'l', 'a', 'd', 'd',
  'u', 'h', 'm', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a',
  'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'm', 's', 'u', 'm', 'c', 'u', 'd',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i',
  'v', 'e', 'c', '_', 'v', 'm', 's', 'u', 'm', 'm', 'b', 'm', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c',
  '_', 'v', 'm', 's', 'u', 'm', 's', 'h', 'm', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'm',
  's', 'u', 'm', 's', 'h', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'm', 's', 'u', 'm',
  'u', 'b', 'm', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a',
  'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'm', 's', 'u', 'm', 'u', 'd', 'm',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i',
  'v', 'e', 'c', '_', 'v', 'm', 's', 'u', 'm', 'u', 'h', 'm', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c',
  '_', 'v', 'm', 's', 'u', 'm', 'u', 'h', 's', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'm',
  'u', 'l', 'e', 's', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'm', 'u', 'l', 'e', 's',
  'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't',
  'i', 'v', 'e', 'c', '_', 'v', 'm', 'u', 'l', 'e', 's', 'h', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c',
  '_', 'v', 'm', 'u', 'l', 'e', 's', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'm', 'u',
  'l', 'e', 'u', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'm', 'u', 'l', 'e', 'u', 'd',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i',
  'v', 'e', 'c', '_', 'v', 'm', 'u', 'l', 'e', 'u', 'h', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_',
  'v', 'm', 'u', 'l', 'e', 'u', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'm', 'u', 'l',
  'h', 's', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a',
  'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'm', 'u', 'l', 'h', 's', 'w', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v',
  'e', 'c', '_', 'v', 'm', 'u', 'l', 'h', 'u', 'd', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v',
  'm', 'u', 'l', 'h', 'u', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'm', 'u', 'l', 'o',
  's', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l',
  't', 'i', 'v', 'e', 'c', '_', 'v', 'm', 'u', 'l', 'o', 's', 'd', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e',
  'c', '_', 'v', 'm', 'u', 'l', 'o', 's', 'h', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'm',
  'u', 'l', 'o', 's', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'm', 'u', 'l', 'o', 'u',
  'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't',
  'i', 'v', 'e', 'c', '_', 'v', 'm', 'u', 'l', 'o', 'u', 'd', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c',
  '_', 'v', 'm', 'u', 'l', 'o', 'u', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'm', 'u',
  'l', 'o', 'u', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'n', 'm', 's', 'u', 'b', 'f',
  'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't',
  'i', 'v', 'e', 'c', '_', 'v', 'p', 'd', 'e', 'p', 'd', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_',
  'v', 'p', 'e', 'r', 'm', '_', '4', 's', 'i', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'p',
  'e', 'x', 't', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'p', 'k', 'p', 'x', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e',
  'c', '_', 'v', 'p', 'k', 's', 'd', 's', 's', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'p',
  'k', 's', 'd', 'u', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'p', 'k', 's', 'h', 's',
  's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't',
  'i', 'v', 'e', 'c', '_', 'v', 'p', 'k', 's', 'h', 'u', 's', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c',
  '_', 'v', 'p', 'k', 's', 'w', 's', 's', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'p', 'k',
  's', 'w', 'u', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'p', 'k', 'u', 'd', 'u', 's',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i',
  'v', 'e', 'c', '_', 'v', 'p', 'k', 'u', 'h', 'u', 's', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_',
  'v', 'p', 'k', 'u', 'w', 'u', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'p', 'r', 't',
  'y', 'b', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a',
  'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'p', 'r', 't', 'y', 'b', 'q', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v',
  'e', 'c', '_', 'v', 'p', 'r', 't', 'y', 'b', 'w', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v',
  'r', 'e', 'f', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'r', 'f', 'i', 'm', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e',
  'c', '_', 'v', 'r', 'f', 'i', 'n', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'r', 'f', 'i',
  'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't',
  'i', 'v', 'e', 'c', '_', 'v', 'r', 'f', 'i', 'z', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v',
  'r', 'l', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a',
  'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'r', 'l', 'd', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_',
  'v', 'r', 'l', 'd', 'm', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'r', 'l', 'd', 'n',
  'm', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't',
  'i', 'v', 'e', 'c', '_', 'v', 'r', 'l', 'h', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'r',
  'l', 'q', 'm', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'r', 'l', 'q', 'n', 'm', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v',
  'e', 'c', '_', 'v', 'r', 'l', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'r', 'l', 'w',
  'm', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l',
  't', 'i', 'v', 'e', 'c', '_', 'v', 'r', 'l', 'w', 'n', 'm', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c',
  '_', 'v', 'r', 's', 'q', 'r', 't', 'e', 'f', 'p', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v',
  's', 'e', 'l', '_', '4', 's', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 's', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v',
  'e', 'c', '_', 'v', 's', 'l', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 's', 'l', 'd',
  'b', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l',
  't', 'i', 'v', 'e', 'c', '_', 'v', 's', 'l', 'h', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v',
  's', 'l', 'o', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a',
  'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 's', 'l', 'v', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_',
  'v', 's', 'l', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 's', 'r', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_',
  'v', 's', 'r', 'a', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 's', 'r', 'a', 'h', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v',
  'e', 'c', '_', 'v', 's', 'r', 'a', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 's', 'r',
  'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't',
  'i', 'v', 'e', 'c', '_', 'v', 's', 'r', 'd', 'b', 'i', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_',
  'v', 's', 'r', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 's', 'r', 'o', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c',
  '_', 'v', 's', 'r', 'v', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 's', 'r', 'w', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e',
  'c', '_', 'v', 's', 't', 'r', 'i', 'b', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 's',
  't', 'r', 'i', 'b', 'l', '_', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 's', 't', 'r',
  'i', 'b', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a',
  'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 's', 't', 'r', 'i', 'b', 'r', '_',
  'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't',
  'i', 'v', 'e', 'c', '_', 'v', 's', 't', 'r', 'i', 'h', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c',
  '_', 'v', 's', 't', 'r', 'i', 'h', 'l', '_', 'p', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v',
  's', 't', 'r', 'i', 'h', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 's', 't', 'r', 'i',
  'h', 'r', '_', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 's', 'u', 'b', 'c', 'u', 'q',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i',
  'v', 'e', 'c', '_', 'v', 's', 'u', 'b', 'c', 'u', 'w', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_',
  'v', 's', 'u', 'b', 'e', 'c', 'u', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 's', 'u',
  'b', 'e', 'u', 'q', 'm', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 's', 'u', 'b', 's', 'b',
  's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't',
  'i', 'v', 'e', 'c', '_', 'v', 's', 'u', 'b', 's', 'h', 's', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c',
  '_', 'v', 's', 'u', 'b', 's', 'w', 's', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 's', 'u',
  'b', 'u', 'b', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 's', 'u', 'b', 'u', 'h', 's',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i',
  'v', 'e', 'c', '_', 'v', 's', 'u', 'b', 'u', 'w', 's', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_',
  'v', 's', 'u', 'm', '2', 's', 'w', 's', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 's', 'u',
  'm', '4', 's', 'b', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 's', 'u', 'm', '4', 's',
  'h', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l',
  't', 'i', 'v', 'e', 'c', '_', 'v', 's', 'u', 'm', '4', 'u', 'b', 's', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v',
  'e', 'c', '_', 'v', 's', 'u', 'm', 's', 'w', 's', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v',
  'u', 'p', 'k', 'h', 'p', 'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'u', 'p', 'k', 'h',
  's', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l',
  't', 'i', 'v', 'e', 'c', '_', 'v', 'u', 'p', 'k', 'h', 's', 'h', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e',
  'c', '_', 'v', 'u', 'p', 'k', 'h', 's', 'w', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'u',
  'p', 'k', 'l', 'p', 'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'a', 'l', 't', 'i', 'v', 'e', 'c', '_', 'v', 'u', 'p', 'k', 'l', 's',
  'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't',
  'i', 'v', 'e', 'c', '_', 'v', 'u', 'p', 'k', 'l', 's', 'h', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'a', 'l', 't', 'i', 'v', 'e', 'c',
  '_', 'v', 'u', 'p', 'k', 'l', 's', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'p', 'p', 'c', '_', 'b', 'c', 'd', 'a', 'd', 'd', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'p', 'p', 'c', '_', 'b',
  'c', 'd', 'a', 'd', 'd', '_', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'p', 'p', 'c', '_', 'b', 'c', 'd', 's', 'u', 'b', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'p', 'p', 'c', '_', 'b', 'c',
  'd', 's', 'u', 'b', '_', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'b', 'p', 'e', 'r', 'm', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'c', 'f', 'u', 'g', 'e', 'd', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'p', 'p', 'c', '_', 'c', 'm', 'p', 'e', 'q',
  'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'p', 'p', 'c',
  '_', 'c', 'm', 'p', 'r', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'c', 'n', 't', 'l', 'z', 'd', 'm', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'c', 'n', 't', 't', 'z', 'd', 'm', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'p', 'p', 'c', '_', 'c', 'o', 'm',
  'p', 'a', 'r', 'e', '_', 'e', 'x', 'p', '_', 'e', 'q', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'p', 'p', 'c', '_', 'c', 'o', 'm', 'p',
  'a', 'r', 'e', '_', 'e', 'x', 'p', '_', 'g', 't', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'p', 'p', 'c', '_', 'c', 'o', 'm', 'p', 'a',
  'r', 'e', '_', 'e', 'x', 'p', '_', 'l', 't', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'p', 'p', 'c', '_', 'c', 'o', 'm', 'p', 'a', 'r',
  'e', '_', 'e', 'x', 'p', '_', 'u', 'o', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'd', 'a', 'r', 'n', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'd', 'a', 'r', 'n', '_', '3', '2', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'd', 'a', 'r', 'n', '_', 'r', 'a', 'w',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'd', 'c', 'b', 'f',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'p', 'p', 'c', '_',
  'd', 'c', 'b', 'f', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'p', 'p', 'c', '_', 'd', 'c', 'b', 'f', 'l', 'p', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'p', 'p', 'c', '_', 'd', 'c', 'b', 's',
  't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'p', 'p', 'c',
  '_', 'd', 'c', 'b', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'p', 'p', 'c', '_', 'd', 'c', 'b', 't', 's', 't', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'p', 'p', 'c', '_', 'd', 'c', 'b', 't',
  's', 't', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'p',
  'p', 'c', '_', 'd', 'c', 'b', 't', 't', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'p', 'p', 'c', '_', 'd', 'c', 'b', 'z', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'd', 'i', 'v', 'd', 'e', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'd', 'i', 'v', 'd', 'e', 'u',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'd', 'i', 'v', 'f',
  '1', '2', '8', '_', 'r', 'o', 'u', 'n', 'd', '_', 't', 'o', '_', 'o', 'd',
  'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'd', 'i', 'v',
  'w', 'e', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'd', 'i',
  'v', 'w', 'e', 'u', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'p', 'p', 'c', '_', 'e', 'i', 'e', 'i', 'o', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'p', 'p', 'c', '_', 'e', 'x', 't', 'r', 'a', 'c',
  't', '_', 'e', 'x', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'p', 'p', 'c', '_', 'e', 'x', 't', 'r', 'a', 'c', 't', '_', 's', 'i',
  'g', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'p', 'p', 'c',
  '_', 'f', 'c', 'f', 'i', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'p', 'p', 'c', '_', 'f', 'c', 'f', 'u', 'd', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'p', 'p', 'c', '_', 'f', 'c', 't', 'i',
  'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'p', 'p', 'c',
  '_', 'f', 'c', 't', 'i', 'd', 'z', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'p', 'p', 'c', '_', 'f', 'c', 't', 'i', 'w', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'p', 'p', 'c', '_', 'f', 'c', 't',
  'i', 'w', 'z', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'p',
  'p', 'c', '_', 'f', 'c', 't', 'u', 'd', 'z', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'p', 'p', 'c', '_', 'f', 'c', 't', 'u', 'w', 'z',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'f', 'm', 'a', 'f',
  '1', '2', '8', '_', 'r', 'o', 'u', 'n', 'd', '_', 't', 'o', '_', 'o', 'd',
  'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'p', 'p', 'c',
  '_', 'f', 'm', 's', 'u', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'p', 'p', 'c', '_', 'f', 'm', 's', 'u', 'b', 's', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'p', 'p', 'c', '_', 'f', 'n', 'a',
  'b', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'p', 'p',
  'c', '_', 'f', 'n', 'a', 'b', 's', 's', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'p', 'p', 'c', '_', 'f', 'n', 'm', 'a', 'd', 'd', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'p', 'p', 'c', '_', 'f',
  'n', 'm', 'a', 'd', 'd', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'p', 'p', 'c', '_', 'f', 'r', 'e', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'p', 'p', 'c', '_', 'f', 'r', 'e', 's', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'p', 'p', 'c', '_', 'f', 'r',
  's', 'q', 'r', 't', 'e', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'p', 'p', 'c', '_', 'f', 'r', 's', 'q', 'r', 't', 'e', 's', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'p', 'p', 'c', '_', 'f', 's',
  'e', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'p', 'p',
  'c', '_', 'f', 's', 'e', 'l', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'g', 'e', 't', '_', 't', 'e', 'x', 'a', 's', 'r', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'g', 'e', 't', '_', 't', 'e',
  'x', 'a', 's', 'r', 'u', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'g', 'e', 't', '_', 't', 'f', 'h', 'a', 'r', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'g', 'e', 't', '_', 't', 'f', 'i', 'a', 'r',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'p', 'p', 'c', '_',
  'i', 'c', 'b', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'p', 'p', 'c', '_', 'i', 'n', 's', 'e', 'r', 't', '_', 'e', 'x', 'p', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'p', 'p', 'c', '_', 'i',
  'o', 's', 'p', 'a', 'c', 'e', '_', 'e', 'i', 'e', 'i', 'o', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'p', 'p', 'c', '_', 'i', 'o', 's',
  'p', 'a', 'c', 'e', '_', 'l', 'w', 's', 'y', 'n', 'c', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'p', 'p', 'c', '_', 'i', 'o', 's', 'p',
  'a', 'c', 'e', '_', 's', 'y', 'n', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'p', 'p', 'c', '_', 'i', 's', 'y', 'n', 'c', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'p', 'p', 'c', '_', 'l', 'o',
  'a', 'd', '4', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'p', 'p', 'c', '_', 'l', 'o', 'a', 'd', '8', 'r', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'p', 'p', 'c', '_', 'l', 'w', 's', 'y', 'n',
  'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'p', 'p', 'c',
  '_', 'm', 'a', 'd', 'd', 'h', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'p', 'p', 'c', '_', 'm', 'a', 'd', 'd', 'h', 'd', 'u', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'p', 'p', 'c', '_', 'm',
  'a', 'd', 'd', 'l', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'p', 'p', 'c', '_', 'm', 'f', 'm', 's', 'r', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'p', 'p', 'c', '_', 'm', 'f', 't', 'b', 'u',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'p', 'p', 'c', '_',
  'm', 't', 'f', 's', 'b', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'p', 'p', 'c', '_', 'm', 't', 'f', 's', 'b', '1', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'p', 'p', 'c', '_', 'm', 't', 'f',
  's', 'f', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'p',
  'p', 'c', '_', 'm', 't', 'm', 's', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'm', 'u', 'l', 'f', '1', '2', '8', '_', 'r', 'o', 'u',
  'n', 'd', '_', 't', 'o', '_', 'o', 'd', 'd', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'p', 'p', 'c', '_', 'm', 'u', 'l', 'h', 'd', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'p', 'p', 'c', '_', 'm',
  'u', 'l', 'h', 'd', 'u', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'p', 'p', 'c', '_', 'm', 'u', 'l', 'h', 'w', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'p', 'p', 'c', '_', 'm', 'u', 'l', 'h', 'w',
  'u', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'p', 'a', 'c',
  'k', '_', 'l', 'o', 'n', 'g', 'd', 'o', 'u', 'b', 'l', 'e', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'p', 'd', 'e', 'p', 'd', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'p', 'e', 'x', 't', 'd', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'r', 'e', 'a', 'd', 'f',
  'l', 'm', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 's',
  'x', '_', 's', 'c', 'a', 'l', 'a', 'r', '_', 'e', 'x', 't', 'r', 'a', 'c',
  't', '_', 'e', 'x', 'p', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 's', 'x', '_', 's', 'c', 'a', 'l', 'a', 'r', '_', 'i', 'n',
  's', 'e', 'r', 't', '_', 'e', 'x', 'p', '_', 'q', 'p', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 's', 'e', 't', '_', 't', 'e', 'x', 'a',
  's', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', 'e',
  't', '_', 't', 'e', 'x', 'a', 's', 'r', 'u', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 's', 'e', 't', '_', 't', 'f', 'h', 'a', 'r', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', 'e', 't', '_', 't',
  'f', 'i', 'a', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'p', 'p', 'c', '_', 's', 'e', 't', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 's', 'e', 't', 'f', 'l', 'm', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 's', 'e', 't', 'r', 'n', 'd', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', 'q', 'r', 't', 'f', '1', '2',
  '8', '_', 'r', 'o', 'u', 'n', 'd', '_', 't', 'o', '_', 'o', 'd', 'd', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'p', 'p', 'c', '_', 's',
  't', 'b', 'c', 'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'p', 'p', 'c', '_', 's', 't', 'd', 'c', 'x', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'p', 'p', 'c', '_', 's', 't', 'f', 'i', 'w', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'p', 'p', 'c', '_', 's',
  't', 'o', 'r', 'e', '2', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'p', 'p', 'c', '_', 's', 't', 'o', 'r', 'e', '4', 'r', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'p', 'p', 'c', '_', 's', 't',
  'o', 'r', 'e', '8', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'p', 'p', 'c', '_', 's', 't', 'w', 'c', 'x', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 's', 'u', 'b', 'f', '1', '2', '8', '_', 'r',
  'o', 'u', 'n', 'd', '_', 't', 'o', '_', 'o', 'd', 'd', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'p', 'p', 'c', '_', 's', 'y', 'n', 'c',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 't', 'a', 'b', 'o',
  'r', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 't', 'a',
  'b', 'o', 'r', 't', 'd', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 't', 'a', 'b', 'o', 'r', 't', 'd', 'c', 'i', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 't', 'a', 'b', 'o', 'r', 't', 'w', 'c',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 't', 'a', 'b', 'o',
  'r', 't', 'w', 'c', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 't', 'b', 'e', 'g', 'i', 'n', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 't', 'c', 'h', 'e', 'c', 'k', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'p', 'p', 'c', '_', 't', 'd', 'w', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 't', 'e', 'n', 'd', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 't', 'e', 'n', 'd', 'a', 'l', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'p', 'p', 'c', '_',
  't', 'r', 'a', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'p', 'p', 'c', '_', 't', 'r', 'a', 'p', 'd', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 't', 'r', 'e', 'c', 'h', 'k', 'p', 't', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 't', 'r', 'e', 'c', 'l', 'a',
  'i', 'm', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 't', 'r',
  'e', 's', 'u', 'm', 'e', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 't', 'r', 'u', 'n', 'c', 'f', '1', '2', '8', '_', 'r', 'o', 'u', 'n',
  'd', '_', 't', 'o', '_', 'o', 'd', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 't', 's', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 't', 's', 'u', 's', 'p', 'e', 'n', 'd', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 't', 't', 'e', 's', 't', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'p', 'p', 'c', '_', 't', 'w', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'u', 'n', 'p', 'a', 'c',
  'k', '_', 'l', 'o', 'n', 'g', 'd', 'o', 'u', 'b', 'l', 'e', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 's', 'x', '_', 'x', 's', 'm',
  'a', 'x', 'd', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 's', 'x', '_', 'x', 's', 'm', 'i', 'n', 'd', 'p', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 's', 'x', '_', 'x', 'v', 'c', 'm',
  'p', 'e', 'q', 'd', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 's', 'x', '_', 'x', 'v', 'c', 'm', 'p', 'e', 'q', 'd', 'p', '_',
  'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 's', 'x',
  '_', 'x', 'v', 'c', 'm', 'p', 'e', 'q', 's', 'p', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 's', 'x', '_', 'x', 'v', 'c', 'm', 'p',
  'e', 'q', 's', 'p', '_', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 's', 'x', '_', 'x', 'v', 'c', 'm', 'p', 'g', 'e', 'd', 'p',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 's', 'x', '_',
  'x', 'v', 'c', 'm', 'p', 'g', 'e', 'd', 'p', '_', 'p', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 's', 'x', '_', 'x', 'v', 'c', 'm',
  'p', 'g', 'e', 's', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 's', 'x', '_', 'x', 'v', 'c', 'm', 'p', 'g', 'e', 's', 'p', '_',
  'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 's', 'x',
  '_', 'x', 'v', 'c', 'm', 'p', 'g', 't', 'd', 'p', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 's', 'x', '_', 'x', 'v', 'c', 'm', 'p',
  'g', 't', 'd', 'p', '_', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 's', 'x', '_', 'x', 'v', 'c', 'm', 'p', 'g', 't', 's', 'p',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 's', 'x', '_',
  'x', 'v', 'c', 'm', 'p', 'g', 't', 's', 'p', '_', 'p', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 's', 'x', '_', 'x', 'v', 'c', 'v',
  'b', 'f', '1', '6', 's', 'p', 'n', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 's', 'x', '_', 'x', 'v', 'c', 'v', 'd', 'p', 's', 'p',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 's', 'x', '_',
  'x', 'v', 'c', 'v', 'd', 'p', 's', 'x', 'w', 's', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 's', 'x', '_', 'x', 'v', 'c', 'v', 'd',
  'p', 'u', 'x', 'w', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 's', 'x', '_', 'x', 'v', 'c', 'v', 'h', 'p', 's', 'p', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 's', 'x', '_', 'x', 'v',
  'c', 'v', 's', 'p', 'b', 'f', '1', '6', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 's', 'x', '_', 'x', 'v', 'c', 'v', 's', 'p', 'd',
  'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 's', 'x',
  '_', 'x', 'v', 'c', 'v', 's', 'p', 'h', 'p', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 's', 'x', '_', 'x', 'v', 'c', 'v', 's', 'p',
  's', 'x', 'd', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 's', 'x', '_', 'x', 'v', 'c', 'v', 's', 'p', 'u', 'x', 'd', 's', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 's', 'x', '_', 'x',
  'v', 'c', 'v', 's', 'x', 'd', 's', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 's', 'x', '_', 'x', 'v', 'c', 'v', 's', 'x', 'w',
  'd', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 's',
  'x', '_', 'x', 'v', 'c', 'v', 'u', 'x', 'd', 's', 'p', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 's', 'x', '_', 'x', 'v', 'c', 'v',
  'u', 'x', 'w', 'd', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 's', 'x', '_', 'x', 'v', 'd', 'i', 'v', 'd', 'p', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 's', 'x', '_', 'x', 'v', 'd',
  'i', 'v', 's', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 's', 'x', '_', 'x', 'v', 'i', 'e', 'x', 'p', 'd', 'p', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 's', 'x', '_', 'x', 'v', 'i',
  'e', 'x', 'p', 's', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 's', 'x', '_', 'x', 'v', 'm', 'a', 'x', 'd', 'p', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 's', 'x', '_', 'x', 'v', 'm',
  'a', 'x', 's', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 's', 'x', '_', 'x', 'v', 'm', 'i', 'n', 'd', 'p', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 's', 'x', '_', 'x', 'v', 'm', 'i',
  'n', 's', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  's', 'x', '_', 'x', 'v', 'r', 'e', 'd', 'p', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 's', 'x', '_', 'x', 'v', 'r', 'e', 's', 'p',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 's', 'x', '_',
  'x', 'v', 'r', 's', 'q', 'r', 't', 'e', 'd', 'p', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 's', 'x', '_', 'x', 'v', 'r', 's', 'q',
  'r', 't', 'e', 's', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 's', 'x', '_', 'x', 'v', 't', 'd', 'i', 'v', 'd', 'p', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 's', 'x', '_', 'x', 'v',
  't', 'd', 'i', 'v', 's', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 's', 'x', '_', 'x', 'v', 't', 'l', 's', 'b', 'b', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 's', 'x', '_', 'x', 'v',
  't', 's', 'q', 'r', 't', 'd', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 's', 'x', '_', 'x', 'v', 't', 's', 'q', 'r', 't', 's',
  'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 's', 'x',
  '_', 'x', 'v', 't', 's', 't', 'd', 'c', 'd', 'p', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 's', 'x', '_', 'x', 'v', 't', 's', 't',
  'd', 'c', 's', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 's', 'x', '_', 'x', 'v', 'x', 'e', 'x', 'p', 'd', 'p', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 's', 'x', '_', 'x', 'v', 'x',
  'e', 'x', 'p', 's', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 's', 'x', '_', 'x', 'v', 'x', 's', 'i', 'g', 'd', 'p', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 's', 'x', '_', 'x', 'v',
  'x', 's', 'i', 'g', 's', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 's', 'x', '_', 'x', 'x', 'b', 'l', 'e', 'n', 'd', 'v', 'b',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 's', 'x', '_',
  'x', 'x', 'b', 'l', 'e', 'n', 'd', 'v', 'd', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 's', 'x', '_', 'x', 'x', 'b', 'l', 'e', 'n',
  'd', 'v', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  's', 'x', '_', 'x', 'x', 'b', 'l', 'e', 'n', 'd', 'v', 'w', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 's', 'x', '_', 'x', 'x', 'e',
  'v', 'a', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  's', 'x', '_', 'x', 'x', 'e', 'x', 't', 'r', 'a', 'c', 't', 'u', 'w', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 's', 'x', '_', 'x',
  'x', 'g', 'e', 'n', 'p', 'c', 'v', 'b', 'm', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 's', 'x', '_', 'x', 'x', 'g', 'e', 'n', 'p',
  'c', 'v', 'd', 'm', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 's', 'x', '_', 'x', 'x', 'g', 'e', 'n', 'p', 'c', 'v', 'h', 'm', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 's', 'x', '_', 'x',
  'x', 'g', 'e', 'n', 'p', 'c', 'v', 'w', 'm', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 's', 'x', '_', 'x', 'x', 'i', 'n', 's', 'e',
  'r', 't', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  's', 'x', '_', 'x', 'x', 'l', 'e', 'q', 'v', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 's', 'x', '_', 'x', 'x', 'p', 'e', 'r', 'm',
  'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'r', '6', '0',
  '0', '_', 'g', 'r', 'o', 'u', 'p', '_', 'b', 'a', 'r', 'r', 'i', 'e', 'r',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'r', '6', '0', '0',
  '_', 'i', 'm', 'p', 'l', 'i', 'c', 'i', 't', 'a', 'r', 'g', '_', 'p', 't',
  'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'r', '6', '0',
  '0', '_', 'r', 'a', 't', '_', 's', 't', 'o', 'r', 'e', '_', 't', 'y', 'p',
  'e', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'r', '6',
  '0', '0', '_', 'r', 'e', 'a', 'd', '_', 'g', 'l', 'o', 'b', 'a', 'l', '_',
  's', 'i', 'z', 'e', '_', 'x', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'r', '6', '0', '0', '_', 'r', 'e', 'a', 'd', '_', 'g', 'l', 'o',
  'b', 'a', 'l', '_', 's', 'i', 'z', 'e', '_', 'y', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'r', '6', '0', '0', '_', 'r', 'e', 'a', 'd',
  '_', 'g', 'l', 'o', 'b', 'a', 'l', '_', 's', 'i', 'z', 'e', '_', 'z', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'r', '6', '0', '0', '_',
  'r', 'e', 'a', 'd', '_', 'n', 'g', 'r', 'o', 'u', 'p', 's', '_', 'x', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'r', '6', '0', '0', '_',
  'r', 'e', 'a', 'd', '_', 'n', 'g', 'r', 'o', 'u', 'p', 's', '_', 'y', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'r', '6', '0', '0', '_',
  'r', 'e', 'a', 'd', '_', 'n', 'g', 'r', 'o', 'u', 'p', 's', '_', 'z', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'r', '6', '0', '0', '_',
  'r', 'e', 'a', 'd', '_', 't', 'g', 'i', 'd', '_', 'x', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'r', '6', '0', '0', '_', 'r', 'e', 'a',
  'd', '_', 't', 'g', 'i', 'd', '_', 'y', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'r', '6', '0', '0', '_', 'r', 'e', 'a', 'd', '_', 't',
  'g', 'i', 'd', '_', 'z', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'r', 'i', 's', 'c', 'v', '_', 'a', 'e', 's', '3', '2', 'd', 's', 'i',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'r', 'i', 's', 'c',
  'v', '_', 'a', 'e', 's', '3', '2', 'd', 's', 'm', 'i', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'r', 'i', 's', 'c', 'v', '_', 'a', 'e',
  's', '3', '2', 'e', 's', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'r', 'i', 's', 'c', 'v', '_', 'a', 'e', 's', '3', '2', 'e', 's',
  'm', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'r', 'i',
  's', 'c', 'v', '_', 'a', 'e', 's', '6', '4', 'd', 's', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'r', 'i', 's', 'c', 'v', '_', 'a', 'e',
  's', '6', '4', 'd', 's', 'm', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'r', 'i', 's', 'c', 'v', '_', 'a', 'e', 's', '6', '4', 'e', 's',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'r', 'i', 's', 'c',
  'v', '_', 'a', 'e', 's', '6', '4', 'e', 's', 'm', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'r', 'i', 's', 'c', 'v', '_', 'a', 'e', 's',
  '6', '4', 'i', 'm', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'r', 'i', 's', 'c', 'v', '_', 'a', 'e', 's', '6', '4', 'k', 's', '1', 'i',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'r', 'i', 's', 'c',
  'v', '_', 'a', 'e', 's', '6', '4', 'k', 's', '2', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'r', 'i', 's', 'c', 'v', '_', 's', 'h', 'a',
  '5', '1', '2', 's', 'i', 'g', '0', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'r', 'i', 's', 'c', 'v', '_', 's', 'h', 'a', '5', '1', '2',
  's', 'i', 'g', '0', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'r', 'i', 's', 'c', 'v', '_', 's', 'h', 'a', '5', '1', '2', 's', 'i',
  'g', '0', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'r',
  'i', 's', 'c', 'v', '_', 's', 'h', 'a', '5', '1', '2', 's', 'i', 'g', '1',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'r', 'i', 's', 'c',
  'v', '_', 's', 'h', 'a', '5', '1', '2', 's', 'i', 'g', '1', 'h', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'r', 'i', 's', 'c', 'v', '_',
  's', 'h', 'a', '5', '1', '2', 's', 'i', 'g', '1', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'r', 'i', 's', 'c', 'v', '_', 's', 'h',
  'a', '5', '1', '2', 's', 'u', 'm', '0', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'r', 'i', 's', 'c', 'v', '_', 's', 'h', 'a', '5', '1',
  '2', 's', 'u', 'm', '0', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'r', 'i', 's', 'c', 'v', '_', 's', 'h', 'a', '5', '1', '2', 's',
  'u', 'm', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'r',
  'i', 's', 'c', 'v', '_', 's', 'h', 'a', '5', '1', '2', 's', 'u', 'm', '1',
  'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9',
  '0', '_', 'e', 'f', 'p', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 't', 'x', '_', 'n', 'e', 's', 't', 'i', 'n', 'g', '_', 'd', 'e',
  'p', 't', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's',
  '3', '9', '0', '_', 'l', 'c', 'b', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 't', 'x', '_', 'a', 's', 's', 'i', 's', 't', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 's',
  'f', 'p', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's',
  '3', '9', '0', '_', 'v', 'a', 'c', 'c', 'b', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'a', 'c', 'c', 'c',
  'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9',
  '0', '_', 'v', 'a', 'c', 'c', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'a', 'c', 'c', 'g', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v',
  'a', 'c', 'c', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  's', '3', '9', '0', '_', 'v', 'a', 'c', 'c', 'q', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'a', 'c', 'q',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0',
  '_', 'v', 'a', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  's', '3', '9', '0', '_', 'v', 'a', 'v', 'g', 'b', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'a', 'v', 'g',
  'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9',
  '0', '_', 'v', 'a', 'v', 'g', 'g', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'a', 'v', 'g', 'h', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v',
  'a', 'v', 'g', 'l', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 's', '3', '9', '0', '_', 'v', 'a', 'v', 'g', 'l', 'f', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'a',
  'v', 'g', 'l', 'g', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  's', '3', '9', '0', '_', 'v', 'a', 'v', 'g', 'l', 'h', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'b', 'p',
  'e', 'r', 'm', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's',
  '3', '9', '0', '_', 'v', 'c', 'f', 'n', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'c', 'k', 's', 'm', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_',
  'v', 'c', 'l', 'f', 'n', 'h', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'c', 'l', 'f', 'n', 'l', 's',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0',
  '_', 'v', 'c', 'n', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 's', '3', '9', '0', '_', 'v', 'c', 'r', 'n', 'f', 's', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'e',
  'r', 'i', 'm', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  's', '3', '9', '0', '_', 'v', 'e', 'r', 'i', 'm', 'f', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'e', 'r',
  'i', 'm', 'g', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's',
  '3', '9', '0', '_', 'v', 'e', 'r', 'i', 'm', 'h', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'e', 'r', 'l',
  'l', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3',
  '9', '0', '_', 'v', 'e', 'r', 'l', 'l', 'f', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'e', 'r', 'l', 'l',
  'g', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9',
  '0', '_', 'v', 'e', 'r', 'l', 'l', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'e', 'r', 'l', 'l', 'v',
  'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9',
  '0', '_', 'v', 'e', 'r', 'l', 'l', 'v', 'f', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'e', 'r', 'l', 'l',
  'v', 'g', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3',
  '9', '0', '_', 'v', 'e', 'r', 'l', 'l', 'v', 'h', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'f', 'a', 'e',
  'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9',
  '0', '_', 'v', 'f', 'a', 'e', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'f', 'a', 'e', 'h', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v',
  'f', 'a', 'e', 'z', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 's', '3', '9', '0', '_', 'v', 'f', 'a', 'e', 'z', 'f', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'f',
  'a', 'e', 'z', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  's', '3', '9', '0', '_', 'v', 'f', 'e', 'e', 'b', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'f', 'e', 'e',
  'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9',
  '0', '_', 'v', 'f', 'e', 'e', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'f', 'e', 'e', 'z', 'b', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_',
  'v', 'f', 'e', 'e', 'z', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 's', '3', '9', '0', '_', 'v', 'f', 'e', 'e', 'z', 'h', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v',
  'f', 'e', 'n', 'e', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 's', '3', '9', '0', '_', 'v', 'f', 'e', 'n', 'e', 'f', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'f',
  'e', 'n', 'e', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  's', '3', '9', '0', '_', 'v', 'f', 'e', 'n', 'e', 'z', 'b', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'f',
  'e', 'n', 'e', 'z', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 's', '3', '9', '0', '_', 'v', 'f', 'e', 'n', 'e', 'z', 'h', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v',
  'g', 'f', 'm', 'a', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 's', '3', '9', '0', '_', 'v', 'g', 'f', 'm', 'a', 'f', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'g',
  'f', 'm', 'a', 'g', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  's', '3', '9', '0', '_', 'v', 'g', 'f', 'm', 'a', 'h', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'g', 'f',
  'm', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3',
  '9', '0', '_', 'v', 'g', 'f', 'm', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'g', 'f', 'm', 'g', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_',
  'v', 'g', 'f', 'm', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 's', '3', '9', '0', '_', 'v', 'i', 's', 't', 'r', 'b', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'i',
  's', 't', 'r', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  's', '3', '9', '0', '_', 'v', 'i', 's', 't', 'r', 'h', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'l', 'b',
  'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9',
  '0', '_', 'v', 'l', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 's', '3', '9', '0', '_', 'v', 'l', 'r', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'm', 'a', 'e',
  'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9',
  '0', '_', 'v', 'm', 'a', 'e', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'm', 'a', 'e', 'h', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v',
  'm', 'a', 'h', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  's', '3', '9', '0', '_', 'v', 'm', 'a', 'h', 'f', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'm', 'a', 'h',
  'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9',
  '0', '_', 'v', 'm', 'a', 'l', 'e', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'm', 'a', 'l', 'e', 'f',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0',
  '_', 'v', 'm', 'a', 'l', 'e', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'm', 'a', 'l', 'h', 'b', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_',
  'v', 'm', 'a', 'l', 'h', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 's', '3', '9', '0', '_', 'v', 'm', 'a', 'l', 'h', 'h', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v',
  'm', 'a', 'l', 'o', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 's', '3', '9', '0', '_', 'v', 'm', 'a', 'l', 'o', 'f', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'm',
  'a', 'l', 'o', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  's', '3', '9', '0', '_', 'v', 'm', 'a', 'o', 'b', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'm', 'a', 'o',
  'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9',
  '0', '_', 'v', 'm', 'a', 'o', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'm', 'e', 'b', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'm',
  'e', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3',
  '9', '0', '_', 'v', 'm', 'e', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'm', 'h', 'b', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'm',
  'h', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3',
  '9', '0', '_', 'v', 'm', 'h', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'm', 'l', 'e', 'b', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v',
  'm', 'l', 'e', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  's', '3', '9', '0', '_', 'v', 'm', 'l', 'e', 'h', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'm', 'l', 'h',
  'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9',
  '0', '_', 'v', 'm', 'l', 'h', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'm', 'l', 'h', 'h', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v',
  'm', 'l', 'o', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  's', '3', '9', '0', '_', 'v', 'm', 'l', 'o', 'f', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'm', 'l', 'o',
  'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9',
  '0', '_', 'v', 'm', 'o', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 's', '3', '9', '0', '_', 'v', 'm', 'o', 'f', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'm', 'o',
  'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9',
  '0', '_', 'v', 'm', 's', 'l', 'g', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'p', 'd', 'i', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'p',
  'e', 'r', 'm', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's',
  '3', '9', '0', '_', 'v', 'p', 'k', 'l', 's', 'f', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'p', 'k', 'l',
  's', 'g', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3',
  '9', '0', '_', 'v', 'p', 'k', 'l', 's', 'h', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'p', 'k', 's', 'f',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0',
  '_', 'v', 'p', 'k', 's', 'g', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 's', '3', '9', '0', '_', 'v', 'p', 'k', 's', 'h', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 's',
  'b', 'c', 'b', 'i', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 's', '3', '9', '0', '_', 'v', 's', 'b', 'i', 'q', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 's', 'c',
  'b', 'i', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's',
  '3', '9', '0', '_', 'v', 's', 'c', 'b', 'i', 'f', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 's', 'c', 'b',
  'i', 'g', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3',
  '9', '0', '_', 'v', 's', 'c', 'b', 'i', 'h', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 's', 'c', 'b', 'i',
  'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9',
  '0', '_', 'v', 's', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 's', '3', '9', '0', '_', 'v', 's', 'l', 'b', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 's', 'l', 'd',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0',
  '_', 'v', 's', 'l', 'd', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 's', '3', '9', '0', '_', 'v', 's', 'q', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 's', 'r', 'a',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0',
  '_', 'v', 's', 'r', 'a', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 's', '3', '9', '0', '_', 'v', 's', 'r', 'd', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 's', 'r',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9',
  '0', '_', 'v', 's', 'r', 'l', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 's', 't', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 's',
  't', 'r', 'c', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  's', '3', '9', '0', '_', 'v', 's', 't', 'r', 'c', 'f', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 's', 't',
  'r', 'c', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's',
  '3', '9', '0', '_', 'v', 's', 't', 'r', 'c', 'z', 'b', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 's', 't',
  'r', 'c', 'z', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  's', '3', '9', '0', '_', 'v', 's', 't', 'r', 'c', 'z', 'h', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 's',
  't', 'r', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's',
  '3', '9', '0', '_', 'v', 's', 'u', 'm', 'b', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 's', 'u', 'm', 'g',
  'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9',
  '0', '_', 'v', 's', 'u', 'm', 'g', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 's', 'u', 'm', 'h', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_',
  'v', 's', 'u', 'm', 'q', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 's', '3', '9', '0', '_', 'v', 's', 'u', 'm', 'q', 'g', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v',
  't', 'm', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3',
  '9', '0', '_', 'v', 'u', 'p', 'h', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'u', 'p', 'h', 'f', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_',
  'v', 'u', 'p', 'h', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 's', '3', '9', '0', '_', 'v', 'u', 'p', 'l', 'b', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'u', 'p',
  'l', 'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3',
  '9', '0', '_', 'v', 'u', 'p', 'l', 'h', 'b', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'u', 'p', 'l', 'h',
  'f', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9',
  '0', '_', 'v', 'u', 'p', 'l', 'h', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'u', 'p', 'l', 'h', 'w',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0',
  '_', 'v', 'u', 'p', 'l', 'l', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 's', '3', '9', '0', '_', 'v', 'u', 'p', 'l', 'l', 'f', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 's', '3', '9', '0', '_',
  'v', 'u', 'p', 'l', 'l', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'a', 'n', 'd', 'm', '_', 'M', 'M',
  'M', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'a', 'n', 'd', 'm', '_', 'm', 'm', 'm', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'e', 'q',
  'v', 'm', '_', 'M', 'M', 'M', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'e', 'q', 'v', 'm', '_', 'm', 'm',
  'm', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'e', 'x', 't', 'r', 'a', 'c', 't', '_', 'v', 'm', '5', '1',
  '2', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'e', 'x', 't', 'r', 'a', 'c', 't', '_', 'v', 'm', '5',
  '1', '2', 'u', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'f', 'e', 'n', 'c', 'e', 'c', '_', 's', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'f', 'e', 'n', 'c', 'e', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'f', 'e', 'n', 'c', 'e', 'm', '_',
  's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'f', 'i', 'd', 'c', 'r', '_', 's', 's', 's', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'i',
  'n', 's', 'e', 'r', 't', '_', 'v', 'm', '5', '1', '2', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'i',
  'n', 's', 'e', 'r', 't', '_', 'v', 'm', '5', '1', '2', 'u', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'l',
  'c', 'r', '_', 's', 's', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'l', 's', 'v', '_', 'v', 'v', 's',
  's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'l', 'v', 'm', '_', 'M', 'M', 's', 's', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'l', 'v',
  'm', '_', 'm', 'm', 's', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'l', 'v', 's', 'd', '_', 's', 'v',
  's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'l', 'v', 's', 'l', '_', 's', 'v', 's', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'l', 'v',
  's', 's', '_', 's', 'v', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'l', 'z', 'v', 'm', '_', 's', 'm',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'n', 'e', 'g', 'm', '_', 'M', 'M', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'n', 'e', 'g',
  'm', '_', 'm', 'm', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'n', 'n', 'd', 'm', '_', 'M', 'M', 'M', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'n', 'n', 'd', 'm', '_', 'm', 'm', 'm', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'o', 'r', 'm', '_',
  'M', 'M', 'M', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'o', 'r', 'm', '_', 'm', 'm', 'm', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p',
  'a', 'c', 'k', '_', 'f', '3', '2', 'a', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'a', 'c', 'k', '_',
  'f', '3', '2', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'p', 'c', 'v', 'm', '_', 's', 'm', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'p', 'f', 'c', 'h', 'v', '_', 's', 's', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'f', 'c',
  'h', 'v', 'n', 'c', '_', 's', 's', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'a', 'd', 'd',
  's', '_', 'v', 's', 'v', 'M', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'a', 'd', 'd',
  's', '_', 'v', 's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'a', 'd', 'd', 's', '_',
  'v', 's', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'a', 'd', 'd', 's', '_', 'v',
  'v', 'v', 'M', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'a', 'd', 'd', 's', '_', 'v',
  'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'p', 'v', 'a', 'd', 'd', 's', '_', 'v', 'v', 'v',
  'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'p', 'v', 'a', 'd', 'd', 'u', '_', 'v', 's', 'v', 'M',
  'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'p', 'v', 'a', 'd', 'd', 'u', '_', 'v', 's', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'p', 'v', 'a', 'd', 'd', 'u', '_', 'v', 's', 'v', 'v', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'p', 'v', 'a', 'd', 'd', 'u', '_', 'v', 'v', 'v', 'M', 'v', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'p', 'v', 'a', 'd', 'd', 'u', '_', 'v', 'v', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p',
  'v', 'a', 'd', 'd', 'u', '_', 'v', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v',
  'a', 'n', 'd', '_', 'v', 's', 'v', 'M', 'v', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'a',
  'n', 'd', '_', 'v', 's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'a', 'n', 'd', '_',
  'v', 's', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'a', 'n', 'd', '_', 'v', 'v',
  'v', 'M', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'a', 'n', 'd', '_', 'v', 'v', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'p', 'v', 'a', 'n', 'd', '_', 'v', 'v', 'v', 'v', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'p', 'v', 'b', 'r', 'd', '_', 'v', 's', 'M', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p',
  'v', 'b', 'r', 'd', '_', 'v', 's', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'b', 'r', 'd',
  '_', 'v', 's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'b', 'r', 'v', '_', 'v', 'v',
  'M', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'p', 'v', 'b', 'r', 'v', '_', 'v', 'v', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'p', 'v', 'b', 'r', 'v', '_', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v',
  'b', 'r', 'v', 'l', 'o', '_', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'b', 'r',
  'v', 'l', 'o', '_', 'v', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'b', 'r',
  'v', 'l', 'o', '_', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'b', 'r', 'v',
  'u', 'p', '_', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'b', 'r', 'v', 'u', 'p',
  '_', 'v', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'b', 'r', 'v', 'u', 'p',
  '_', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'c', 'm', 'p', 's', '_', 'v',
  's', 'v', 'M', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'c', 'm', 'p', 's', '_', 'v',
  's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'p', 'v', 'c', 'm', 'p', 's', '_', 'v', 's', 'v',
  'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'p', 'v', 'c', 'm', 'p', 's', '_', 'v', 'v', 'v', 'M',
  'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'p', 'v', 'c', 'm', 'p', 's', '_', 'v', 'v', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'p', 'v', 'c', 'm', 'p', 's', '_', 'v', 'v', 'v', 'v', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'p', 'v', 'c', 'm', 'p', 'u', '_', 'v', 's', 'v', 'M', 'v', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'p', 'v', 'c', 'm', 'p', 'u', '_', 'v', 's', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p',
  'v', 'c', 'm', 'p', 'u', '_', 'v', 's', 'v', 'v', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v',
  'c', 'm', 'p', 'u', '_', 'v', 'v', 'v', 'M', 'v', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v',
  'c', 'm', 'p', 'u', '_', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'c', 'm',
  'p', 'u', '_', 'v', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'c', 'v', 't',
  's', 'w', '_', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'c', 'v', 't', 's', 'w',
  '_', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'c', 'v', 't', 'w', 's', '_',
  'v', 'v', 'M', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'c', 'v', 't', 'w', 's', '_',
  'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'p', 'v', 'c', 'v', 't', 'w', 's', '_', 'v', 'v',
  'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'p', 'v', 'c', 'v', 't', 'w', 's', 'r', 'z', '_', 'v',
  'v', 'M', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'c', 'v', 't', 'w', 's', 'r', 'z',
  '_', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'c', 'v', 't', 'w', 's', 'r', 'z',
  '_', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'e', 'q', 'v', '_', 'v', 's',
  'v', 'M', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'e', 'q', 'v', '_', 'v', 's', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'p', 'v', 'e', 'q', 'v', '_', 'v', 's', 'v', 'v', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'p', 'v', 'e', 'q', 'v', '_', 'v', 'v', 'v', 'M', 'v', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'p', 'v', 'e', 'q', 'v', '_', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'e',
  'q', 'v', '_', 'v', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'a', 'd',
  'd', '_', 'v', 's', 'v', 'M', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'a', 'd',
  'd', '_', 'v', 's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'a', 'd', 'd', '_',
  'v', 's', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'a', 'd', 'd', '_', 'v',
  'v', 'v', 'M', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'a', 'd', 'd', '_', 'v',
  'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'a', 'd', 'd', '_', 'v', 'v', 'v',
  'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'p', 'v', 'f', 'c', 'm', 'p', '_', 'v', 's', 'v', 'M',
  'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'p', 'v', 'f', 'c', 'm', 'p', '_', 'v', 's', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'p', 'v', 'f', 'c', 'm', 'p', '_', 'v', 's', 'v', 'v', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'p', 'v', 'f', 'c', 'm', 'p', '_', 'v', 'v', 'v', 'M', 'v', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'p', 'v', 'f', 'c', 'm', 'p', '_', 'v', 'v', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p',
  'v', 'f', 'c', 'm', 'p', '_', 'v', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v',
  'f', 'm', 'a', 'd', '_', 'v', 's', 'v', 'v', 'M', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p',
  'v', 'f', 'm', 'a', 'd', '_', 'v', 's', 'v', 'v', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v',
  'f', 'm', 'a', 'd', '_', 'v', 's', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v',
  'f', 'm', 'a', 'd', '_', 'v', 'v', 's', 'v', 'M', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p',
  'v', 'f', 'm', 'a', 'd', '_', 'v', 'v', 's', 'v', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v',
  'f', 'm', 'a', 'd', '_', 'v', 'v', 's', 'v', 'v', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v',
  'f', 'm', 'a', 'd', '_', 'v', 'v', 'v', 'v', 'M', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p',
  'v', 'f', 'm', 'a', 'd', '_', 'v', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v',
  'f', 'm', 'a', 'd', '_', 'v', 'v', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v',
  'f', 'm', 'a', 'x', '_', 'v', 's', 'v', 'M', 'v', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v',
  'f', 'm', 'a', 'x', '_', 'v', 's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm',
  'a', 'x', '_', 'v', 's', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'a',
  'x', '_', 'v', 'v', 'v', 'M', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'a',
  'x', '_', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'a', 'x', '_',
  'v', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'i', 'n', '_', 'v',
  's', 'v', 'M', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'i', 'n', '_', 'v',
  's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'i', 'n', '_', 'v', 's', 'v',
  'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'i', 'n', '_', 'v', 'v', 'v', 'M',
  'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'i', 'n', '_', 'v', 'v', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'p', 'v', 'f', 'm', 'i', 'n', '_', 'v', 'v', 'v', 'v', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'p', 'v', 'f', 'm', 'k', 'a', 'f', '_', 'M', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v',
  'f', 'm', 'k', 'a', 't', '_', 'M', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k',
  's', 'e', 'q', '_', 'M', 'v', 'M', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k',
  's', 'e', 'q', '_', 'M', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 's',
  'e', 'q', 'n', 'a', 'n', '_', 'M', 'v', 'M', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f',
  'm', 'k', 's', 'e', 'q', 'n', 'a', 'n', '_', 'M', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p',
  'v', 'f', 'm', 'k', 's', 'g', 'e', '_', 'M', 'v', 'M', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p',
  'v', 'f', 'm', 'k', 's', 'g', 'e', '_', 'M', 'v', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v',
  'f', 'm', 'k', 's', 'g', 'e', 'n', 'a', 'n', '_', 'M', 'v', 'M', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'p', 'v', 'f', 'm', 'k', 's', 'g', 'e', 'n', 'a', 'n', '_', 'M', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 's', 'g', 't', '_', 'M', 'v', 'M',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 's', 'g', 't', '_', 'M', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'p', 'v', 'f', 'm', 'k', 's', 'g', 't', 'n', 'a', 'n', '_', 'M',
  'v', 'M', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 's', 'g', 't', 'n', 'a',
  'n', '_', 'M', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 's', 'l', 'e',
  '_', 'M', 'v', 'M', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 's', 'l', 'e',
  '_', 'M', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 's', 'l', 'e', 'n',
  'a', 'n', '_', 'M', 'v', 'M', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 's',
  'l', 'e', 'n', 'a', 'n', '_', 'M', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm',
  'k', 's', 'l', 'o', 'e', 'q', '_', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f',
  'm', 'k', 's', 'l', 'o', 'e', 'q', '_', 'm', 'v', 'm', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p',
  'v', 'f', 'm', 'k', 's', 'l', 'o', 'e', 'q', 'n', 'a', 'n', '_', 'm', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 's', 'l', 'o', 'e', 'q', 'n', 'a',
  'n', '_', 'm', 'v', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 's', 'l',
  'o', 'g', 'e', '_', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 's',
  'l', 'o', 'g', 'e', '_', 'm', 'v', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm',
  'k', 's', 'l', 'o', 'g', 'e', 'n', 'a', 'n', '_', 'm', 'v', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'p', 'v', 'f', 'm', 'k', 's', 'l', 'o', 'g', 'e', 'n', 'a', 'n', '_', 'm',
  'v', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 's', 'l', 'o', 'g', 't',
  '_', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 's', 'l', 'o', 'g',
  't', '_', 'm', 'v', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 's', 'l',
  'o', 'g', 't', 'n', 'a', 'n', '_', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f',
  'm', 'k', 's', 'l', 'o', 'g', 't', 'n', 'a', 'n', '_', 'm', 'v', 'm', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'p', 'v', 'f', 'm', 'k', 's', 'l', 'o', 'l', 'e', '_', 'm', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 's', 'l', 'o', 'l', 'e', '_', 'm',
  'v', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 's', 'l', 'o', 'l', 'e',
  'n', 'a', 'n', '_', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 's',
  'l', 'o', 'l', 'e', 'n', 'a', 'n', '_', 'm', 'v', 'm', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p',
  'v', 'f', 'm', 'k', 's', 'l', 'o', 'l', 't', '_', 'm', 'v', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'p', 'v', 'f', 'm', 'k', 's', 'l', 'o', 'l', 't', '_', 'm', 'v', 'm', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'p', 'v', 'f', 'm', 'k', 's', 'l', 'o', 'l', 't', 'n', 'a', 'n',
  '_', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 's', 'l', 'o', 'l',
  't', 'n', 'a', 'n', '_', 'm', 'v', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm',
  'k', 's', 'l', 'o', 'n', 'a', 'n', '_', 'm', 'v', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v',
  'f', 'm', 'k', 's', 'l', 'o', 'n', 'a', 'n', '_', 'm', 'v', 'm', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'p', 'v', 'f', 'm', 'k', 's', 'l', 'o', 'n', 'e', '_', 'm', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'p', 'v', 'f', 'm', 'k', 's', 'l', 'o', 'n', 'e', '_', 'm', 'v',
  'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 's', 'l', 'o', 'n', 'e', 'n',
  'a', 'n', '_', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 's', 'l',
  'o', 'n', 'e', 'n', 'a', 'n', '_', 'm', 'v', 'm', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v',
  'f', 'm', 'k', 's', 'l', 'o', 'n', 'u', 'm', '_', 'm', 'v', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'p', 'v', 'f', 'm', 'k', 's', 'l', 'o', 'n', 'u', 'm', '_', 'm', 'v', 'm',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 's', 'l', 't', '_', 'M', 'v', 'M',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 's', 'l', 't', '_', 'M', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'p', 'v', 'f', 'm', 'k', 's', 'l', 't', 'n', 'a', 'n', '_', 'M',
  'v', 'M', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 's', 'l', 't', 'n', 'a',
  'n', '_', 'M', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 's', 'n', 'a',
  'n', '_', 'M', 'v', 'M', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 's', 'n',
  'a', 'n', '_', 'M', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 's', 'n',
  'e', '_', 'M', 'v', 'M', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 's', 'n',
  'e', '_', 'M', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 's', 'n', 'e',
  'n', 'a', 'n', '_', 'M', 'v', 'M', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k',
  's', 'n', 'e', 'n', 'a', 'n', '_', 'M', 'v', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f',
  'm', 'k', 's', 'n', 'u', 'm', '_', 'M', 'v', 'M', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v',
  'f', 'm', 'k', 's', 'n', 'u', 'm', '_', 'M', 'v', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v',
  'f', 'm', 'k', 's', 'u', 'p', 'e', 'q', '_', 'm', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p',
  'v', 'f', 'm', 'k', 's', 'u', 'p', 'e', 'q', '_', 'm', 'v', 'm', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'p', 'v', 'f', 'm', 'k', 's', 'u', 'p', 'e', 'q', 'n', 'a', 'n', '_',
  'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 's', 'u', 'p', 'e', 'q',
  'n', 'a', 'n', '_', 'm', 'v', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k',
  's', 'u', 'p', 'g', 'e', '_', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm',
  'k', 's', 'u', 'p', 'g', 'e', '_', 'm', 'v', 'm', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v',
  'f', 'm', 'k', 's', 'u', 'p', 'g', 'e', 'n', 'a', 'n', '_', 'm', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'p', 'v', 'f', 'm', 'k', 's', 'u', 'p', 'g', 'e', 'n', 'a', 'n',
  '_', 'm', 'v', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 's', 'u', 'p',
  'g', 't', '_', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 's', 'u',
  'p', 'g', 't', '_', 'm', 'v', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k',
  's', 'u', 'p', 'g', 't', 'n', 'a', 'n', '_', 'm', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p',
  'v', 'f', 'm', 'k', 's', 'u', 'p', 'g', 't', 'n', 'a', 'n', '_', 'm', 'v',
  'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 's', 'u', 'p', 'l', 'e', '_',
  'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 's', 'u', 'p', 'l', 'e',
  '_', 'm', 'v', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 's', 'u', 'p',
  'l', 'e', 'n', 'a', 'n', '_', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm',
  'k', 's', 'u', 'p', 'l', 'e', 'n', 'a', 'n', '_', 'm', 'v', 'm', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'p', 'v', 'f', 'm', 'k', 's', 'u', 'p', 'l', 't', '_', 'm', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'p', 'v', 'f', 'm', 'k', 's', 'u', 'p', 'l', 't', '_', 'm', 'v',
  'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 's', 'u', 'p', 'l', 't', 'n',
  'a', 'n', '_', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 's', 'u',
  'p', 'l', 't', 'n', 'a', 'n', '_', 'm', 'v', 'm', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v',
  'f', 'm', 'k', 's', 'u', 'p', 'n', 'a', 'n', '_', 'm', 'v', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'p', 'v', 'f', 'm', 'k', 's', 'u', 'p', 'n', 'a', 'n', '_', 'm', 'v', 'm',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 's', 'u', 'p', 'n', 'e', '_', 'm',
  'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 's', 'u', 'p', 'n', 'e', '_',
  'm', 'v', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 's', 'u', 'p', 'n',
  'e', 'n', 'a', 'n', '_', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k',
  's', 'u', 'p', 'n', 'e', 'n', 'a', 'n', '_', 'm', 'v', 'm', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'p', 'v', 'f', 'm', 'k', 's', 'u', 'p', 'n', 'u', 'm', '_', 'm', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'p', 'v', 'f', 'm', 'k', 's', 'u', 'p', 'n', 'u', 'm', '_', 'm',
  'v', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 'w', 'e', 'q', '_', 'M',
  'v', 'M', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 'w', 'e', 'q', '_', 'M',
  'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 'w', 'e', 'q', 'n', 'a', 'n',
  '_', 'M', 'v', 'M', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 'w', 'e', 'q',
  'n', 'a', 'n', '_', 'M', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 'w',
  'g', 'e', '_', 'M', 'v', 'M', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 'w',
  'g', 'e', '_', 'M', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 'w', 'g',
  'e', 'n', 'a', 'n', '_', 'M', 'v', 'M', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm',
  'k', 'w', 'g', 'e', 'n', 'a', 'n', '_', 'M', 'v', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v',
  'f', 'm', 'k', 'w', 'g', 't', '_', 'M', 'v', 'M', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v',
  'f', 'm', 'k', 'w', 'g', 't', '_', 'M', 'v', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f',
  'm', 'k', 'w', 'g', 't', 'n', 'a', 'n', '_', 'M', 'v', 'M', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'p', 'v', 'f', 'm', 'k', 'w', 'g', 't', 'n', 'a', 'n', '_', 'M', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'p', 'v', 'f', 'm', 'k', 'w', 'l', 'e', '_', 'M', 'v', 'M', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'p', 'v', 'f', 'm', 'k', 'w', 'l', 'e', '_', 'M', 'v', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'p', 'v', 'f', 'm', 'k', 'w', 'l', 'e', 'n', 'a', 'n', '_', 'M', 'v',
  'M', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 'w', 'l', 'e', 'n', 'a', 'n',
  '_', 'M', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 'w', 'l', 'o', 'e',
  'q', '_', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 'w', 'l', 'o',
  'e', 'q', '_', 'm', 'v', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 'w',
  'l', 'o', 'e', 'q', 'n', 'a', 'n', '_', 'm', 'v', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v',
  'f', 'm', 'k', 'w', 'l', 'o', 'e', 'q', 'n', 'a', 'n', '_', 'm', 'v', 'm',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 'w', 'l', 'o', 'g', 'e', '_', 'm',
  'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 'w', 'l', 'o', 'g', 'e', '_',
  'm', 'v', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 'w', 'l', 'o', 'g',
  'e', 'n', 'a', 'n', '_', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k',
  'w', 'l', 'o', 'g', 'e', 'n', 'a', 'n', '_', 'm', 'v', 'm', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'p', 'v', 'f', 'm', 'k', 'w', 'l', 'o', 'g', 't', '_', 'm', 'v', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'p', 'v', 'f', 'm', 'k', 'w', 'l', 'o', 'g', 't', '_', 'm', 'v', 'm',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 'w', 'l', 'o', 'g', 't', 'n', 'a',
  'n', '_', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 'w', 'l', 'o',
  'g', 't', 'n', 'a', 'n', '_', 'm', 'v', 'm', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f',
  'm', 'k', 'w', 'l', 'o', 'l', 'e', '_', 'm', 'v', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v',
  'f', 'm', 'k', 'w', 'l', 'o', 'l', 'e', '_', 'm', 'v', 'm', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'p', 'v', 'f', 'm', 'k', 'w', 'l', 'o', 'l', 'e', 'n', 'a', 'n', '_', 'm',
  'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 'w', 'l', 'o', 'l', 'e', 'n',
  'a', 'n', '_', 'm', 'v', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 'w',
  'l', 'o', 'l', 't', '_', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k',
  'w', 'l', 'o', 'l', 't', '_', 'm', 'v', 'm', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f',
  'm', 'k', 'w', 'l', 'o', 'l', 't', 'n', 'a', 'n', '_', 'm', 'v', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'p', 'v', 'f', 'm', 'k', 'w', 'l', 'o', 'l', 't', 'n', 'a', 'n', '_',
  'm', 'v', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 'w', 'l', 'o', 'n',
  'a', 'n', '_', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 'w', 'l',
  'o', 'n', 'a', 'n', '_', 'm', 'v', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm',
  'k', 'w', 'l', 'o', 'n', 'e', '_', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f',
  'm', 'k', 'w', 'l', 'o', 'n', 'e', '_', 'm', 'v', 'm', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p',
  'v', 'f', 'm', 'k', 'w', 'l', 'o', 'n', 'e', 'n', 'a', 'n', '_', 'm', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 'w', 'l', 'o', 'n', 'e', 'n', 'a',
  'n', '_', 'm', 'v', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 'w', 'l',
  'o', 'n', 'u', 'm', '_', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k',
  'w', 'l', 'o', 'n', 'u', 'm', '_', 'm', 'v', 'm', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v',
  'f', 'm', 'k', 'w', 'l', 't', '_', 'M', 'v', 'M', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v',
  'f', 'm', 'k', 'w', 'l', 't', '_', 'M', 'v', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f',
  'm', 'k', 'w', 'l', 't', 'n', 'a', 'n', '_', 'M', 'v', 'M', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'p', 'v', 'f', 'm', 'k', 'w', 'l', 't', 'n', 'a', 'n', '_', 'M', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'p', 'v', 'f', 'm', 'k', 'w', 'n', 'a', 'n', '_', 'M', 'v', 'M',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 'w', 'n', 'a', 'n', '_', 'M', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 'w', 'n', 'e', '_', 'M', 'v', 'M',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 'w', 'n', 'e', '_', 'M', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'p', 'v', 'f', 'm', 'k', 'w', 'n', 'e', 'n', 'a', 'n', '_', 'M',
  'v', 'M', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 'w', 'n', 'e', 'n', 'a',
  'n', '_', 'M', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 'w', 'n', 'u',
  'm', '_', 'M', 'v', 'M', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 'w', 'n',
  'u', 'm', '_', 'M', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 'w', 'u',
  'p', 'e', 'q', '_', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 'w',
  'u', 'p', 'e', 'q', '_', 'm', 'v', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm',
  'k', 'w', 'u', 'p', 'e', 'q', 'n', 'a', 'n', '_', 'm', 'v', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'p', 'v', 'f', 'm', 'k', 'w', 'u', 'p', 'e', 'q', 'n', 'a', 'n', '_', 'm',
  'v', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 'w', 'u', 'p', 'g', 'e',
  '_', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 'w', 'u', 'p', 'g',
  'e', '_', 'm', 'v', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 'w', 'u',
  'p', 'g', 'e', 'n', 'a', 'n', '_', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f',
  'm', 'k', 'w', 'u', 'p', 'g', 'e', 'n', 'a', 'n', '_', 'm', 'v', 'm', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'p', 'v', 'f', 'm', 'k', 'w', 'u', 'p', 'g', 't', '_', 'm', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 'w', 'u', 'p', 'g', 't', '_', 'm',
  'v', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 'w', 'u', 'p', 'g', 't',
  'n', 'a', 'n', '_', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 'w',
  'u', 'p', 'g', 't', 'n', 'a', 'n', '_', 'm', 'v', 'm', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p',
  'v', 'f', 'm', 'k', 'w', 'u', 'p', 'l', 'e', '_', 'm', 'v', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'p', 'v', 'f', 'm', 'k', 'w', 'u', 'p', 'l', 'e', '_', 'm', 'v', 'm', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'p', 'v', 'f', 'm', 'k', 'w', 'u', 'p', 'l', 'e', 'n', 'a', 'n',
  '_', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 'w', 'u', 'p', 'l',
  'e', 'n', 'a', 'n', '_', 'm', 'v', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm',
  'k', 'w', 'u', 'p', 'l', 't', '_', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f',
  'm', 'k', 'w', 'u', 'p', 'l', 't', '_', 'm', 'v', 'm', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p',
  'v', 'f', 'm', 'k', 'w', 'u', 'p', 'l', 't', 'n', 'a', 'n', '_', 'm', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 'w', 'u', 'p', 'l', 't', 'n', 'a',
  'n', '_', 'm', 'v', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 'w', 'u',
  'p', 'n', 'a', 'n', '_', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k',
  'w', 'u', 'p', 'n', 'a', 'n', '_', 'm', 'v', 'm', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v',
  'f', 'm', 'k', 'w', 'u', 'p', 'n', 'e', '_', 'm', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p',
  'v', 'f', 'm', 'k', 'w', 'u', 'p', 'n', 'e', '_', 'm', 'v', 'm', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'p', 'v', 'f', 'm', 'k', 'w', 'u', 'p', 'n', 'e', 'n', 'a', 'n', '_',
  'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k', 'w', 'u', 'p', 'n', 'e',
  'n', 'a', 'n', '_', 'm', 'v', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'k',
  'w', 'u', 'p', 'n', 'u', 'm', '_', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f',
  'm', 'k', 'w', 'u', 'p', 'n', 'u', 'm', '_', 'm', 'v', 'm', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'p', 'v', 'f', 'm', 's', 'b', '_', 'v', 's', 'v', 'v', 'M', 'v', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'p', 'v', 'f', 'm', 's', 'b', '_', 'v', 's', 'v', 'v', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'p', 'v', 'f', 'm', 's', 'b', '_', 'v', 's', 'v', 'v', 'v', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'p', 'v', 'f', 'm', 's', 'b', '_', 'v', 'v', 's', 'v', 'M', 'v', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'p', 'v', 'f', 'm', 's', 'b', '_', 'v', 'v', 's', 'v', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'p', 'v', 'f', 'm', 's', 'b', '_', 'v', 'v', 's', 'v', 'v', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'p', 'v', 'f', 'm', 's', 'b', '_', 'v', 'v', 'v', 'v', 'M', 'v', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'p', 'v', 'f', 'm', 's', 'b', '_', 'v', 'v', 'v', 'v', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'p', 'v', 'f', 'm', 's', 'b', '_', 'v', 'v', 'v', 'v', 'v', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'p', 'v', 'f', 'm', 'u', 'l', '_', 'v', 's', 'v', 'M', 'v', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'p', 'v', 'f', 'm', 'u', 'l', '_', 'v', 's', 'v', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v',
  'f', 'm', 'u', 'l', '_', 'v', 's', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f',
  'm', 'u', 'l', '_', 'v', 'v', 'v', 'M', 'v', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f',
  'm', 'u', 'l', '_', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'm', 'u',
  'l', '_', 'v', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'n', 'm', 'a',
  'd', '_', 'v', 's', 'v', 'v', 'M', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'n',
  'm', 'a', 'd', '_', 'v', 's', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'n',
  'm', 'a', 'd', '_', 'v', 's', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f',
  'n', 'm', 'a', 'd', '_', 'v', 'v', 's', 'v', 'M', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p',
  'v', 'f', 'n', 'm', 'a', 'd', '_', 'v', 'v', 's', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p',
  'v', 'f', 'n', 'm', 'a', 'd', '_', 'v', 'v', 's', 'v', 'v', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'p', 'v', 'f', 'n', 'm', 'a', 'd', '_', 'v', 'v', 'v', 'v', 'M', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'p', 'v', 'f', 'n', 'm', 'a', 'd', '_', 'v', 'v', 'v', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'p', 'v', 'f', 'n', 'm', 'a', 'd', '_', 'v', 'v', 'v', 'v', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'p', 'v', 'f', 'n', 'm', 's', 'b', '_', 'v', 's', 'v', 'v',
  'M', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'n', 'm', 's', 'b', '_', 'v', 's',
  'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'n', 'm', 's', 'b', '_', 'v', 's',
  'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'n', 'm', 's', 'b', '_', 'v',
  'v', 's', 'v', 'M', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'n', 'm', 's', 'b',
  '_', 'v', 'v', 's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'n', 'm', 's', 'b',
  '_', 'v', 'v', 's', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'n', 'm', 's',
  'b', '_', 'v', 'v', 'v', 'v', 'M', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'n',
  'm', 's', 'b', '_', 'v', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 'n',
  'm', 's', 'b', '_', 'v', 'v', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f',
  's', 'u', 'b', '_', 'v', 's', 'v', 'M', 'v', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f',
  's', 'u', 'b', '_', 'v', 's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 's', 'u',
  'b', '_', 'v', 's', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 's', 'u', 'b',
  '_', 'v', 'v', 'v', 'M', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 's', 'u', 'b',
  '_', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'f', 's', 'u', 'b', '_', 'v',
  'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'l', 'd', 'z', '_', 'v', 'v', 'M',
  'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'p', 'v', 'l', 'd', 'z', '_', 'v', 'v', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'p', 'v', 'l', 'd', 'z', '_', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'l',
  'd', 'z', 'l', 'o', '_', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'l', 'd', 'z',
  'l', 'o', '_', 'v', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'l', 'd', 'z',
  'l', 'o', '_', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'l', 'd', 'z', 'u',
  'p', '_', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'l', 'd', 'z', 'u', 'p', '_',
  'v', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'l', 'd', 'z', 'u', 'p', '_',
  'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'm', 'a', 'x', 's', '_', 'v', 's',
  'v', 'M', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'm', 'a', 'x', 's', '_', 'v', 's',
  'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'p', 'v', 'm', 'a', 'x', 's', '_', 'v', 's', 'v', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'p', 'v', 'm', 'a', 'x', 's', '_', 'v', 'v', 'v', 'M', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'p', 'v', 'm', 'a', 'x', 's', '_', 'v', 'v', 'v', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'p', 'v', 'm', 'a', 'x', 's', '_', 'v', 'v', 'v', 'v', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'p', 'v', 'm', 'i', 'n', 's', '_', 'v', 's', 'v', 'M', 'v', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'p', 'v', 'm', 'i', 'n', 's', '_', 'v', 's', 'v', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v',
  'm', 'i', 'n', 's', '_', 'v', 's', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'm',
  'i', 'n', 's', '_', 'v', 'v', 'v', 'M', 'v', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'm',
  'i', 'n', 's', '_', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'm', 'i', 'n',
  's', '_', 'v', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'o', 'r', '_', 'v',
  's', 'v', 'M', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'o', 'r', '_', 'v', 's', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'p', 'v', 'o', 'r', '_', 'v', 's', 'v', 'v', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'p', 'v', 'o', 'r', '_', 'v', 'v', 'v', 'M', 'v', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v',
  'o', 'r', '_', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'o', 'r', '_', 'v',
  'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'p', 'c', 'n', 't', '_', 'v', 'v',
  'M', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'p', 'v', 'p', 'c', 'n', 't', '_', 'v', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'p', 'v', 'p', 'c', 'n', 't', '_', 'v', 'v', 'v', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'p', 'v', 'p', 'c', 'n', 't', 'l', 'o', '_', 'v', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p',
  'v', 'p', 'c', 'n', 't', 'l', 'o', '_', 'v', 'v', 'm', 'v', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'p', 'v', 'p', 'c', 'n', 't', 'l', 'o', '_', 'v', 'v', 'v', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'p', 'v', 'p', 'c', 'n', 't', 'u', 'p', '_', 'v', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p',
  'v', 'p', 'c', 'n', 't', 'u', 'p', '_', 'v', 'v', 'm', 'v', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'p', 'v', 'p', 'c', 'n', 't', 'u', 'p', '_', 'v', 'v', 'v', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'p', 'v', 'r', 'c', 'p', '_', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'r', 'c',
  'p', '_', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'r', 's', 'q', 'r', 't',
  '_', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'r', 's', 'q', 'r', 't', '_', 'v',
  'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'p', 'v', 'r', 's', 'q', 'r', 't', 'n', 'e', 'x',
  '_', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'r', 's', 'q', 'r', 't', 'n', 'e',
  'x', '_', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 's', 'e', 'q', '_', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'p', 'v', 's', 'e', 'q', '_', 'v', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p',
  'v', 's', 'e', 'q', 'l', 'o', '_', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 's', 'e',
  'q', 'l', 'o', '_', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 's', 'e', 'q', 'u',
  'p', '_', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 's', 'e', 'q', 'u', 'p', '_', 'v',
  'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'p', 'v', 's', 'l', 'a', '_', 'v', 'v', 's', 'M', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'p', 'v', 's', 'l', 'a', '_', 'v', 'v', 's', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'p', 'v', 's', 'l', 'a', '_', 'v', 'v', 's', 'v', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v',
  's', 'l', 'a', '_', 'v', 'v', 'v', 'M', 'v', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 's',
  'l', 'a', '_', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 's', 'l', 'a', '_',
  'v', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 's', 'l', 'l', '_', 'v', 'v',
  's', 'M', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 's', 'l', 'l', '_', 'v', 'v', 's',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'p', 'v', 's', 'l', 'l', '_', 'v', 'v', 's', 'v', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'p', 'v', 's', 'l', 'l', '_', 'v', 'v', 'v', 'M', 'v', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'p', 'v', 's', 'l', 'l', '_', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 's',
  'l', 'l', '_', 'v', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 's', 'r', 'a',
  '_', 'v', 'v', 's', 'M', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 's', 'r', 'a', '_',
  'v', 'v', 's', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 's', 'r', 'a', '_', 'v', 'v', 's',
  'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'p', 'v', 's', 'r', 'a', '_', 'v', 'v', 'v', 'M', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'p', 'v', 's', 'r', 'a', '_', 'v', 'v', 'v', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'p', 'v', 's', 'r', 'a', '_', 'v', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v',
  's', 'r', 'l', '_', 'v', 'v', 's', 'M', 'v', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 's',
  'r', 'l', '_', 'v', 'v', 's', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 's', 'r', 'l', '_',
  'v', 'v', 's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 's', 'r', 'l', '_', 'v', 'v',
  'v', 'M', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 's', 'r', 'l', '_', 'v', 'v', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'p', 'v', 's', 'r', 'l', '_', 'v', 'v', 'v', 'v', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'p', 'v', 's', 'u', 'b', 's', '_', 'v', 's', 'v', 'M', 'v', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'p', 'v', 's', 'u', 'b', 's', '_', 'v', 's', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p',
  'v', 's', 'u', 'b', 's', '_', 'v', 's', 'v', 'v', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v',
  's', 'u', 'b', 's', '_', 'v', 'v', 'v', 'M', 'v', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v',
  's', 'u', 'b', 's', '_', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 's', 'u',
  'b', 's', '_', 'v', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 's', 'u', 'b',
  'u', '_', 'v', 's', 'v', 'M', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 's', 'u', 'b',
  'u', '_', 'v', 's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 's', 'u', 'b', 'u', '_',
  'v', 's', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 's', 'u', 'b', 'u', '_', 'v',
  'v', 'v', 'M', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 's', 'u', 'b', 'u', '_', 'v',
  'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'p', 'v', 's', 'u', 'b', 'u', '_', 'v', 'v', 'v',
  'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'p', 'v', 'x', 'o', 'r', '_', 'v', 's', 'v', 'M', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'p', 'v', 'x', 'o', 'r', '_', 'v', 's', 'v', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'p', 'v', 'x', 'o', 'r', '_', 'v', 's', 'v', 'v', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v',
  'x', 'o', 'r', '_', 'v', 'v', 'v', 'M', 'v', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'x',
  'o', 'r', '_', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'p', 'v', 'x', 'o', 'r', '_',
  'v', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 's', 'c', 'r', '_', 's', 's', 's', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 's', 'v', 'm', '_', 's', 'M', 's', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 's', 'v', 'm', '_', 's',
  'm', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 's', 'v', 'o', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 't', 'o', 'v', 'm', '_',
  's', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 't', 's', 'c', 'r', '_', 's', 's', 's', 's', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'v', 'a', 'd', 'd', 's', 'l', '_', 'v', 's', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'a', 'd', 'd', 's', 'l', '_', 'v', 's', 'v', 'm', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'a', 'd', 'd', 's', 'l', '_', 'v', 's', 'v', 'v', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'a',
  'd', 'd', 's', 'l', '_', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'a', 'd', 'd',
  's', 'l', '_', 'v', 'v', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'a', 'd', 'd',
  's', 'l', '_', 'v', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'a', 'd', 'd', 's',
  'w', 's', 'x', '_', 'v', 's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'a', 'd', 'd', 's',
  'w', 's', 'x', '_', 'v', 's', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'a', 'd',
  'd', 's', 'w', 's', 'x', '_', 'v', 's', 'v', 'v', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'a',
  'd', 'd', 's', 'w', 's', 'x', '_', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'a',
  'd', 'd', 's', 'w', 's', 'x', '_', 'v', 'v', 'v', 'm', 'v', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'v', 'a', 'd', 'd', 's', 'w', 's', 'x', '_', 'v', 'v', 'v', 'v', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'v', 'a', 'd', 'd', 's', 'w', 'z', 'x', '_', 'v', 's', 'v', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'v', 'a', 'd', 'd', 's', 'w', 'z', 'x', '_', 'v', 's', 'v', 'm', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 'a', 'd', 'd', 's', 'w', 'z', 'x', '_', 'v', 's', 'v',
  'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'v', 'a', 'd', 'd', 's', 'w', 'z', 'x', '_', 'v', 'v',
  'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'v', 'a', 'd', 'd', 's', 'w', 'z', 'x', '_', 'v', 'v',
  'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'v', 'a', 'd', 'd', 's', 'w', 'z', 'x', '_',
  'v', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'a', 'd', 'd', 'u', 'l', '_', 'v',
  's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 'a', 'd', 'd', 'u', 'l', '_', 'v', 's', 'v',
  'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 'a', 'd', 'd', 'u', 'l', '_', 'v', 's', 'v',
  'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'v', 'a', 'd', 'd', 'u', 'l', '_', 'v', 'v', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'a', 'd', 'd', 'u', 'l', '_', 'v', 'v', 'v', 'm', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'a', 'd', 'd', 'u', 'l', '_', 'v', 'v', 'v', 'v', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'v', 'a', 'd', 'd', 'u', 'w', '_', 'v', 's', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'a', 'd', 'd', 'u', 'w', '_', 'v', 's', 'v', 'm', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'a', 'd', 'd', 'u', 'w', '_', 'v', 's', 'v', 'v', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'a',
  'd', 'd', 'u', 'w', '_', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'a', 'd', 'd',
  'u', 'w', '_', 'v', 'v', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'a', 'd', 'd',
  'u', 'w', '_', 'v', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'a', 'n', 'd', '_',
  'v', 's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'v', 'a', 'n', 'd', '_', 'v', 's', 'v', 'm',
  'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'v', 'a', 'n', 'd', '_', 'v', 's', 'v', 'v', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'v', 'a', 'n', 'd', '_', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'a', 'n',
  'd', '_', 'v', 'v', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'a', 'n', 'd', '_',
  'v', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'b', 'r', 'd', 'd', '_', 'v', 's',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 'b', 'r', 'd', 'd', '_', 'v', 's', 'm', 'v', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'v', 'b', 'r', 'd', 'd', '_', 'v', 's', 'v', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'b',
  'r', 'd', 'l', '_', 'v', 's', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'b', 'r', 'd', 'l', '_',
  'v', 's', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'b', 'r', 'd', 'l', '_', 'v', 's',
  'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'v', 'b', 'r', 'd', 's', '_', 'v', 's', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'v', 'b', 'r', 'd', 's', '_', 'v', 's', 'm', 'v', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'b',
  'r', 'd', 's', '_', 'v', 's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'b', 'r', 'd', 'w',
  '_', 'v', 's', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'v', 'b', 'r', 'd', 'w', '_', 'v', 's', 'm',
  'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'v', 'b', 'r', 'd', 'w', '_', 'v', 's', 'v', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'v', 'b', 'r', 'v', '_', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'b', 'r', 'v',
  '_', 'v', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'b', 'r', 'v', '_', 'v', 'v',
  'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'v', 'c', 'm', 'p', 's', 'l', '_', 'v', 's', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'c', 'm', 'p', 's', 'l', '_', 'v', 's', 'v', 'm', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'c', 'm', 'p', 's', 'l', '_', 'v', 's', 'v', 'v', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'v', 'c', 'm', 'p', 's', 'l', '_', 'v', 'v', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'c', 'm', 'p', 's', 'l', '_', 'v', 'v', 'v', 'm', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'c', 'm', 'p', 's', 'l', '_', 'v', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'c',
  'm', 'p', 's', 'w', 's', 'x', '_', 'v', 's', 'v', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'c',
  'm', 'p', 's', 'w', 's', 'x', '_', 'v', 's', 'v', 'm', 'v', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'v', 'c', 'm', 'p', 's', 'w', 's', 'x', '_', 'v', 's', 'v', 'v', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'v', 'c', 'm', 'p', 's', 'w', 's', 'x', '_', 'v', 'v', 'v', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'v', 'c', 'm', 'p', 's', 'w', 's', 'x', '_', 'v', 'v', 'v', 'm', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 'c', 'm', 'p', 's', 'w', 's', 'x', '_', 'v', 'v', 'v',
  'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'v', 'c', 'm', 'p', 's', 'w', 'z', 'x', '_', 'v', 's',
  'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'v', 'c', 'm', 'p', 's', 'w', 'z', 'x', '_', 'v', 's',
  'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'v', 'c', 'm', 'p', 's', 'w', 'z', 'x', '_',
  'v', 's', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'c', 'm', 'p', 's', 'w', 'z', 'x',
  '_', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'c', 'm', 'p', 's', 'w', 'z', 'x',
  '_', 'v', 'v', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'c', 'm', 'p', 's', 'w',
  'z', 'x', '_', 'v', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'c', 'm', 'p', 'u',
  'l', '_', 'v', 's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'c', 'm', 'p', 'u', 'l', '_',
  'v', 's', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'c', 'm', 'p', 'u', 'l', '_',
  'v', 's', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'c', 'm', 'p', 'u', 'l', '_', 'v',
  'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 'c', 'm', 'p', 'u', 'l', '_', 'v', 'v', 'v',
  'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 'c', 'm', 'p', 'u', 'l', '_', 'v', 'v', 'v',
  'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'v', 'c', 'm', 'p', 'u', 'w', '_', 'v', 's', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'c', 'm', 'p', 'u', 'w', '_', 'v', 's', 'v', 'm', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'c', 'm', 'p', 'u', 'w', '_', 'v', 's', 'v', 'v', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'v', 'c', 'm', 'p', 'u', 'w', '_', 'v', 'v', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'c', 'm', 'p', 'u', 'w', '_', 'v', 'v', 'v', 'm', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'c', 'm', 'p', 'u', 'w', '_', 'v', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'c',
  'p', '_', 'v', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'c', 'v', 't', 'd', 'l',
  '_', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'v', 'c', 'v', 't', 'd', 'l', '_', 'v', 'v',
  'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'v', 'c', 'v', 't', 'd', 's', '_', 'v', 'v', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'v', 'c', 'v', 't', 'd', 's', '_', 'v', 'v', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'c', 'v', 't', 'd', 'w', '_', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'c', 'v', 't',
  'd', 'w', '_', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'c', 'v', 't', 'l', 'd',
  '_', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'v', 'c', 'v', 't', 'l', 'd', '_', 'v', 'v',
  'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 'c', 'v', 't', 'l', 'd', '_', 'v', 'v', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 'c', 'v', 't', 'l', 'd', 'r', 'z', '_', 'v', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'c', 'v', 't', 'l', 'd', 'r', 'z', '_', 'v', 'v', 'm', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 'c', 'v', 't', 'l', 'd', 'r', 'z', '_', 'v', 'v', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 'c', 'v', 't', 's', 'd', '_', 'v', 'v', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'v', 'c', 'v', 't', 's', 'd', '_', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'c',
  'v', 't', 's', 'w', '_', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'c', 'v', 't', 's',
  'w', '_', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'c', 'v', 't', 'w', 'd', 's',
  'x', '_', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'c', 'v', 't', 'w', 'd', 's', 'x',
  '_', 'v', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'c', 'v', 't', 'w', 'd', 's',
  'x', '_', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'c', 'v', 't', 'w', 'd', 's',
  'x', 'r', 'z', '_', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'c', 'v', 't', 'w', 'd',
  's', 'x', 'r', 'z', '_', 'v', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'c', 'v',
  't', 'w', 'd', 's', 'x', 'r', 'z', '_', 'v', 'v', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'c', 'v', 't', 'w', 'd', 'z', 'x', '_', 'v', 'v', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'c',
  'v', 't', 'w', 'd', 'z', 'x', '_', 'v', 'v', 'm', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'c', 'v', 't', 'w', 'd', 'z', 'x', '_', 'v', 'v', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'c', 'v', 't', 'w', 'd', 'z', 'x', 'r', 'z', '_', 'v', 'v', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'v', 'c', 'v', 't', 'w', 'd', 'z', 'x', 'r', 'z', '_', 'v', 'v', 'm', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 'c', 'v', 't', 'w', 'd', 'z', 'x', 'r', 'z', '_', 'v',
  'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 'c', 'v', 't', 'w', 's', 's', 'x', '_', 'v',
  'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'v', 'c', 'v', 't', 'w', 's', 's', 'x', '_', 'v', 'v',
  'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 'c', 'v', 't', 'w', 's', 's', 'x', '_', 'v',
  'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 'c', 'v', 't', 'w', 's', 's', 'x', 'r', 'z',
  '_', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'v', 'c', 'v', 't', 'w', 's', 's', 'x', 'r',
  'z', '_', 'v', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'c', 'v', 't', 'w', 's',
  's', 'x', 'r', 'z', '_', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'c', 'v', 't',
  'w', 's', 'z', 'x', '_', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'c', 'v', 't', 'w',
  's', 'z', 'x', '_', 'v', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'c', 'v', 't',
  'w', 's', 'z', 'x', '_', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'c', 'v', 't',
  'w', 's', 'z', 'x', 'r', 'z', '_', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'c', 'v',
  't', 'w', 's', 'z', 'x', 'r', 'z', '_', 'v', 'v', 'm', 'v', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'v', 'c', 'v', 't', 'w', 's', 'z', 'x', 'r', 'z', '_', 'v', 'v', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'd', 'i', 'v', 's', 'l', '_', 'v', 's', 'v', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'v', 'd', 'i', 'v', 's', 'l', '_', 'v', 's', 'v', 'm', 'v', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'v', 'd', 'i', 'v', 's', 'l', '_', 'v', 's', 'v', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'd', 'i', 'v', 's', 'l', '_', 'v', 'v', 's', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'd', 'i',
  'v', 's', 'l', '_', 'v', 'v', 's', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'd', 'i',
  'v', 's', 'l', '_', 'v', 'v', 's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'd', 'i', 'v',
  's', 'l', '_', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'd', 'i', 'v', 's', 'l',
  '_', 'v', 'v', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'd', 'i', 'v', 's', 'l',
  '_', 'v', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'd', 'i', 'v', 's', 'w', 's',
  'x', '_', 'v', 's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'd', 'i', 'v', 's', 'w', 's',
  'x', '_', 'v', 's', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'd', 'i', 'v', 's',
  'w', 's', 'x', '_', 'v', 's', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'd', 'i', 'v',
  's', 'w', 's', 'x', '_', 'v', 'v', 's', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'd', 'i', 'v',
  's', 'w', 's', 'x', '_', 'v', 'v', 's', 'm', 'v', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'd',
  'i', 'v', 's', 'w', 's', 'x', '_', 'v', 'v', 's', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'd', 'i', 'v', 's', 'w', 's', 'x', '_', 'v', 'v', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'd', 'i', 'v', 's', 'w', 's', 'x', '_', 'v', 'v', 'v', 'm', 'v', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'v', 'd', 'i', 'v', 's', 'w', 's', 'x', '_', 'v', 'v', 'v', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'd', 'i', 'v', 's', 'w', 'z', 'x', '_', 'v', 's', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'd', 'i', 'v', 's', 'w', 'z', 'x', '_', 'v', 's', 'v', 'm',
  'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'v', 'd', 'i', 'v', 's', 'w', 'z', 'x', '_', 'v', 's',
  'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 'd', 'i', 'v', 's', 'w', 'z', 'x', '_', 'v',
  'v', 's', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 'd', 'i', 'v', 's', 'w', 'z', 'x', '_', 'v',
  'v', 's', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'd', 'i', 'v', 's', 'w', 'z', 'x',
  '_', 'v', 'v', 's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'd', 'i', 'v', 's', 'w', 'z',
  'x', '_', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'd', 'i', 'v', 's', 'w', 'z',
  'x', '_', 'v', 'v', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'd', 'i', 'v', 's',
  'w', 'z', 'x', '_', 'v', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'd', 'i', 'v',
  'u', 'l', '_', 'v', 's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'd', 'i', 'v', 'u', 'l',
  '_', 'v', 's', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'd', 'i', 'v', 'u', 'l',
  '_', 'v', 's', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'd', 'i', 'v', 'u', 'l', '_',
  'v', 'v', 's', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'v', 'd', 'i', 'v', 'u', 'l', '_', 'v', 'v',
  's', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'v', 'd', 'i', 'v', 'u', 'l', '_', 'v', 'v',
  's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 'd', 'i', 'v', 'u', 'l', '_', 'v', 'v', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 'd', 'i', 'v', 'u', 'l', '_', 'v', 'v', 'v', 'm', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 'd', 'i', 'v', 'u', 'l', '_', 'v', 'v', 'v', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'd', 'i', 'v', 'u', 'w', '_', 'v', 's', 'v', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'v', 'd', 'i', 'v', 'u', 'w', '_', 'v', 's', 'v', 'm', 'v', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'v', 'd', 'i', 'v', 'u', 'w', '_', 'v', 's', 'v', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'd', 'i', 'v', 'u', 'w', '_', 'v', 'v', 's', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'd', 'i',
  'v', 'u', 'w', '_', 'v', 'v', 's', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'd', 'i',
  'v', 'u', 'w', '_', 'v', 'v', 's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'd', 'i', 'v',
  'u', 'w', '_', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'd', 'i', 'v', 'u', 'w',
  '_', 'v', 'v', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'd', 'i', 'v', 'u', 'w',
  '_', 'v', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'e', 'q', 'v', '_', 'v', 's',
  'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'v', 'e', 'q', 'v', '_', 'v', 's', 'v', 'm', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'e', 'q', 'v', '_', 'v', 's', 'v', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'e', 'q', 'v', '_', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'e', 'q', 'v', '_',
  'v', 'v', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'e', 'q', 'v', '_', 'v', 'v',
  'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 'e', 'x', '_', 'v', 'v', 'm', 'v', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'v', 'f', 'a', 'd', 'd', 'd', '_', 'v', 's', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'f', 'a', 'd', 'd', 'd', '_', 'v', 's', 'v', 'm', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'f', 'a', 'd', 'd', 'd', '_', 'v', 's', 'v', 'v', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f',
  'a', 'd', 'd', 'd', '_', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'a', 'd',
  'd', 'd', '_', 'v', 'v', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'a', 'd',
  'd', 'd', '_', 'v', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'a', 'd', 'd',
  's', '_', 'v', 's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'a', 'd', 'd', 's', '_',
  'v', 's', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'a', 'd', 'd', 's', '_',
  'v', 's', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'a', 'd', 'd', 's', '_', 'v',
  'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 'f', 'a', 'd', 'd', 's', '_', 'v', 'v', 'v',
  'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 'f', 'a', 'd', 'd', 's', '_', 'v', 'v', 'v',
  'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'v', 'f', 'c', 'm', 'p', 'd', '_', 'v', 's', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'f', 'c', 'm', 'p', 'd', '_', 'v', 's', 'v', 'm', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'f', 'c', 'm', 'p', 'd', '_', 'v', 's', 'v', 'v', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'v', 'f', 'c', 'm', 'p', 'd', '_', 'v', 'v', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'f', 'c', 'm', 'p', 'd', '_', 'v', 'v', 'v', 'm', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'f', 'c', 'm', 'p', 'd', '_', 'v', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f',
  'c', 'm', 'p', 's', '_', 'v', 's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'c', 'm',
  'p', 's', '_', 'v', 's', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'c', 'm',
  'p', 's', '_', 'v', 's', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'c', 'm', 'p',
  's', '_', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'c', 'm', 'p', 's', '_',
  'v', 'v', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'c', 'm', 'p', 's', '_',
  'v', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'd', 'i', 'v', 'd', '_', 'v',
  's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 'f', 'd', 'i', 'v', 'd', '_', 'v', 's', 'v',
  'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 'f', 'd', 'i', 'v', 'd', '_', 'v', 's', 'v',
  'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'v', 'f', 'd', 'i', 'v', 'd', '_', 'v', 'v', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'f', 'd', 'i', 'v', 'd', '_', 'v', 'v', 'v', 'm', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'f', 'd', 'i', 'v', 'd', '_', 'v', 'v', 'v', 'v', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'v', 'f', 'd', 'i', 'v', 's', '_', 'v', 's', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'f', 'd', 'i', 'v', 's', '_', 'v', 's', 'v', 'm', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'f', 'd', 'i', 'v', 's', '_', 'v', 's', 'v', 'v', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f',
  'd', 'i', 'v', 's', '_', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'd', 'i',
  'v', 's', '_', 'v', 'v', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'd', 'i',
  'v', 's', '_', 'v', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'a', 'd',
  'd', '_', 'v', 's', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'a', 'd', 'd',
  '_', 'v', 's', 'v', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'a', 'd',
  'd', '_', 'v', 's', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'a', 'd',
  'd', '_', 'v', 'v', 's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'a', 'd', 'd',
  '_', 'v', 'v', 's', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'a', 'd',
  'd', '_', 'v', 'v', 's', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'a', 'd',
  'd', '_', 'v', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'a', 'd', 'd',
  '_', 'v', 'v', 'v', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'a', 'd',
  'd', '_', 'v', 'v', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'a', 'd',
  's', '_', 'v', 's', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'a', 'd', 's',
  '_', 'v', 's', 'v', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'a', 'd',
  's', '_', 'v', 's', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'a', 'd',
  's', '_', 'v', 'v', 's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'a', 'd', 's',
  '_', 'v', 'v', 's', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'a', 'd',
  's', '_', 'v', 'v', 's', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'a', 'd',
  's', '_', 'v', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'a', 'd', 's',
  '_', 'v', 'v', 'v', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'a', 'd',
  's', '_', 'v', 'v', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'a', 'x',
  'd', '_', 'v', 's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'a', 'x', 'd', '_',
  'v', 's', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'a', 'x', 'd', '_',
  'v', 's', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'a', 'x', 'd', '_', 'v',
  'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'a', 'x', 'd', '_', 'v', 'v', 'v',
  'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'a', 'x', 'd', '_', 'v', 'v', 'v',
  'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'v', 'f', 'm', 'a', 'x', 's', '_', 'v', 's', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'f', 'm', 'a', 'x', 's', '_', 'v', 's', 'v', 'm', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'f', 'm', 'a', 'x', 's', '_', 'v', 's', 'v', 'v', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'v', 'f', 'm', 'a', 'x', 's', '_', 'v', 'v', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'f', 'm', 'a', 'x', 's', '_', 'v', 'v', 'v', 'm', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'f', 'm', 'a', 'x', 's', '_', 'v', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f',
  'm', 'i', 'n', 'd', '_', 'v', 's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'i',
  'n', 'd', '_', 'v', 's', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'i',
  'n', 'd', '_', 'v', 's', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'i', 'n',
  'd', '_', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'i', 'n', 'd', '_',
  'v', 'v', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'i', 'n', 'd', '_',
  'v', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'i', 'n', 's', '_', 'v',
  's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'i', 'n', 's', '_', 'v', 's', 'v',
  'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'i', 'n', 's', '_', 'v', 's', 'v',
  'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'v', 'f', 'm', 'i', 'n', 's', '_', 'v', 'v', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'f', 'm', 'i', 'n', 's', '_', 'v', 'v', 'v', 'm', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'f', 'm', 'i', 'n', 's', '_', 'v', 'v', 'v', 'v', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'v', 'f', 'm', 'k', 'd', 'e', 'q', '_', 'm', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'f', 'm', 'k', 'd', 'e', 'q', '_', 'm', 'v', 'm', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f',
  'm', 'k', 'd', 'e', 'q', 'n', 'a', 'n', '_', 'm', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'f', 'm', 'k', 'd', 'e', 'q', 'n', 'a', 'n', '_', 'm', 'v', 'm', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'v', 'f', 'm', 'k', 'd', 'g', 'e', '_', 'm', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'f', 'm', 'k', 'd', 'g', 'e', '_', 'm', 'v', 'm', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f',
  'm', 'k', 'd', 'g', 'e', 'n', 'a', 'n', '_', 'm', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'f', 'm', 'k', 'd', 'g', 'e', 'n', 'a', 'n', '_', 'm', 'v', 'm', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'v', 'f', 'm', 'k', 'd', 'g', 't', '_', 'm', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'f', 'm', 'k', 'd', 'g', 't', '_', 'm', 'v', 'm', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f',
  'm', 'k', 'd', 'g', 't', 'n', 'a', 'n', '_', 'm', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'f', 'm', 'k', 'd', 'g', 't', 'n', 'a', 'n', '_', 'm', 'v', 'm', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'v', 'f', 'm', 'k', 'd', 'l', 'e', '_', 'm', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'f', 'm', 'k', 'd', 'l', 'e', '_', 'm', 'v', 'm', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f',
  'm', 'k', 'd', 'l', 'e', 'n', 'a', 'n', '_', 'm', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'f', 'm', 'k', 'd', 'l', 'e', 'n', 'a', 'n', '_', 'm', 'v', 'm', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'v', 'f', 'm', 'k', 'd', 'l', 't', '_', 'm', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'f', 'm', 'k', 'd', 'l', 't', '_', 'm', 'v', 'm', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f',
  'm', 'k', 'd', 'l', 't', 'n', 'a', 'n', '_', 'm', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'f', 'm', 'k', 'd', 'l', 't', 'n', 'a', 'n', '_', 'm', 'v', 'm', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'v', 'f', 'm', 'k', 'd', 'n', 'a', 'n', '_', 'm', 'v', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'v', 'f', 'm', 'k', 'd', 'n', 'a', 'n', '_', 'm', 'v', 'm', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'v', 'f', 'm', 'k', 'd', 'n', 'e', '_', 'm', 'v', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f',
  'm', 'k', 'd', 'n', 'e', '_', 'm', 'v', 'm', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm',
  'k', 'd', 'n', 'e', 'n', 'a', 'n', '_', 'm', 'v', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f',
  'm', 'k', 'd', 'n', 'e', 'n', 'a', 'n', '_', 'm', 'v', 'm', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'v', 'f', 'm', 'k', 'd', 'n', 'u', 'm', '_', 'm', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'f', 'm', 'k', 'd', 'n', 'u', 'm', '_', 'm', 'v', 'm', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'f', 'm', 'k', 'l', 'a', 'f', '_', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'k',
  'l', 'a', 't', '_', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'k', 'l', 'e', 'q',
  '_', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'k', 'l', 'e', 'q', '_', 'm',
  'v', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'k', 'l', 'e', 'q', 'n', 'a', 'n',
  '_', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'k', 'l', 'e', 'q', 'n', 'a',
  'n', '_', 'm', 'v', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'k', 'l', 'g', 'e',
  '_', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'k', 'l', 'g', 'e', '_', 'm',
  'v', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'k', 'l', 'g', 'e', 'n', 'a', 'n',
  '_', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'k', 'l', 'g', 'e', 'n', 'a',
  'n', '_', 'm', 'v', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'k', 'l', 'g', 't',
  '_', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'k', 'l', 'g', 't', '_', 'm',
  'v', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'k', 'l', 'g', 't', 'n', 'a', 'n',
  '_', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'k', 'l', 'g', 't', 'n', 'a',
  'n', '_', 'm', 'v', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'k', 'l', 'l', 'e',
  '_', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'k', 'l', 'l', 'e', '_', 'm',
  'v', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'k', 'l', 'l', 'e', 'n', 'a', 'n',
  '_', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'k', 'l', 'l', 'e', 'n', 'a',
  'n', '_', 'm', 'v', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'k', 'l', 'l', 't',
  '_', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'k', 'l', 'l', 't', '_', 'm',
  'v', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'k', 'l', 'l', 't', 'n', 'a', 'n',
  '_', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'k', 'l', 'l', 't', 'n', 'a',
  'n', '_', 'm', 'v', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'k', 'l', 'n', 'a',
  'n', '_', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'k', 'l', 'n', 'a', 'n',
  '_', 'm', 'v', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'k', 'l', 'n', 'e', '_',
  'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'k', 'l', 'n', 'e', '_', 'm', 'v',
  'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'v', 'f', 'm', 'k', 'l', 'n', 'e', 'n', 'a', 'n', '_',
  'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'k', 'l', 'n', 'e', 'n', 'a', 'n',
  '_', 'm', 'v', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'k', 'l', 'n', 'u', 'm',
  '_', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'k', 'l', 'n', 'u', 'm', '_',
  'm', 'v', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'k', 's', 'e', 'q', '_', 'm',
  'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'v', 'f', 'm', 'k', 's', 'e', 'q', '_', 'm', 'v', 'm',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 'f', 'm', 'k', 's', 'e', 'q', 'n', 'a', 'n', '_', 'm',
  'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'v', 'f', 'm', 'k', 's', 'e', 'q', 'n', 'a', 'n', '_',
  'm', 'v', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'k', 's', 'g', 'e', '_', 'm',
  'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'v', 'f', 'm', 'k', 's', 'g', 'e', '_', 'm', 'v', 'm',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 'f', 'm', 'k', 's', 'g', 'e', 'n', 'a', 'n', '_', 'm',
  'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'v', 'f', 'm', 'k', 's', 'g', 'e', 'n', 'a', 'n', '_',
  'm', 'v', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'k', 's', 'g', 't', '_', 'm',
  'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'v', 'f', 'm', 'k', 's', 'g', 't', '_', 'm', 'v', 'm',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 'f', 'm', 'k', 's', 'g', 't', 'n', 'a', 'n', '_', 'm',
  'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'v', 'f', 'm', 'k', 's', 'g', 't', 'n', 'a', 'n', '_',
  'm', 'v', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'k', 's', 'l', 'e', '_', 'm',
  'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'v', 'f', 'm', 'k', 's', 'l', 'e', '_', 'm', 'v', 'm',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 'f', 'm', 'k', 's', 'l', 'e', 'n', 'a', 'n', '_', 'm',
  'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'v', 'f', 'm', 'k', 's', 'l', 'e', 'n', 'a', 'n', '_',
  'm', 'v', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'k', 's', 'l', 't', '_', 'm',
  'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'v', 'f', 'm', 'k', 's', 'l', 't', '_', 'm', 'v', 'm',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 'f', 'm', 'k', 's', 'l', 't', 'n', 'a', 'n', '_', 'm',
  'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'v', 'f', 'm', 'k', 's', 'l', 't', 'n', 'a', 'n', '_',
  'm', 'v', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'k', 's', 'n', 'a', 'n', '_',
  'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'k', 's', 'n', 'a', 'n', '_', 'm',
  'v', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'k', 's', 'n', 'e', '_', 'm', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 'f', 'm', 'k', 's', 'n', 'e', '_', 'm', 'v', 'm', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'f', 'm', 'k', 's', 'n', 'e', 'n', 'a', 'n', '_', 'm', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 'f', 'm', 'k', 's', 'n', 'e', 'n', 'a', 'n', '_', 'm',
  'v', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'k', 's', 'n', 'u', 'm', '_', 'm',
  'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'v', 'f', 'm', 'k', 's', 'n', 'u', 'm', '_', 'm', 'v',
  'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'v', 'f', 'm', 'k', 'w', 'e', 'q', '_', 'm', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'f', 'm', 'k', 'w', 'e', 'q', '_', 'm', 'v', 'm', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'v', 'f', 'm', 'k', 'w', 'e', 'q', 'n', 'a', 'n', '_', 'm', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'f', 'm', 'k', 'w', 'e', 'q', 'n', 'a', 'n', '_', 'm', 'v',
  'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'v', 'f', 'm', 'k', 'w', 'g', 'e', '_', 'm', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'f', 'm', 'k', 'w', 'g', 'e', '_', 'm', 'v', 'm', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'v', 'f', 'm', 'k', 'w', 'g', 'e', 'n', 'a', 'n', '_', 'm', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'f', 'm', 'k', 'w', 'g', 'e', 'n', 'a', 'n', '_', 'm', 'v',
  'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'v', 'f', 'm', 'k', 'w', 'g', 't', '_', 'm', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'f', 'm', 'k', 'w', 'g', 't', '_', 'm', 'v', 'm', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'v', 'f', 'm', 'k', 'w', 'g', 't', 'n', 'a', 'n', '_', 'm', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'f', 'm', 'k', 'w', 'g', 't', 'n', 'a', 'n', '_', 'm', 'v',
  'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'v', 'f', 'm', 'k', 'w', 'l', 'e', '_', 'm', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'f', 'm', 'k', 'w', 'l', 'e', '_', 'm', 'v', 'm', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'v', 'f', 'm', 'k', 'w', 'l', 'e', 'n', 'a', 'n', '_', 'm', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'f', 'm', 'k', 'w', 'l', 'e', 'n', 'a', 'n', '_', 'm', 'v',
  'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'v', 'f', 'm', 'k', 'w', 'l', 't', '_', 'm', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'f', 'm', 'k', 'w', 'l', 't', '_', 'm', 'v', 'm', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'v', 'f', 'm', 'k', 'w', 'l', 't', 'n', 'a', 'n', '_', 'm', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'f', 'm', 'k', 'w', 'l', 't', 'n', 'a', 'n', '_', 'm', 'v',
  'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'v', 'f', 'm', 'k', 'w', 'n', 'a', 'n', '_', 'm', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 'f', 'm', 'k', 'w', 'n', 'a', 'n', '_', 'm', 'v', 'm',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 'f', 'm', 'k', 'w', 'n', 'e', '_', 'm', 'v', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'v', 'f', 'm', 'k', 'w', 'n', 'e', '_', 'm', 'v', 'm', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'v', 'f', 'm', 'k', 'w', 'n', 'e', 'n', 'a', 'n', '_', 'm', 'v', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'v', 'f', 'm', 'k', 'w', 'n', 'e', 'n', 'a', 'n', '_', 'm', 'v', 'm',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 'f', 'm', 'k', 'w', 'n', 'u', 'm', '_', 'm', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'f', 'm', 'k', 'w', 'n', 'u', 'm', '_', 'm', 'v', 'm', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'f', 'm', 's', 'b', 'd', '_', 'v', 's', 'v', 'v', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'v', 'f', 'm', 's', 'b', 'd', '_', 'v', 's', 'v', 'v', 'm', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'f', 'm', 's', 'b', 'd', '_', 'v', 's', 'v', 'v', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'f', 'm', 's', 'b', 'd', '_', 'v', 'v', 's', 'v', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'v', 'f', 'm', 's', 'b', 'd', '_', 'v', 'v', 's', 'v', 'm', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'f', 'm', 's', 'b', 'd', '_', 'v', 'v', 's', 'v', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'f', 'm', 's', 'b', 'd', '_', 'v', 'v', 'v', 'v', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'v', 'f', 'm', 's', 'b', 'd', '_', 'v', 'v', 'v', 'v', 'm', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'f', 'm', 's', 'b', 'd', '_', 'v', 'v', 'v', 'v', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'f', 'm', 's', 'b', 's', '_', 'v', 's', 'v', 'v', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'v', 'f', 'm', 's', 'b', 's', '_', 'v', 's', 'v', 'v', 'm', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'f', 'm', 's', 'b', 's', '_', 'v', 's', 'v', 'v', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'f', 'm', 's', 'b', 's', '_', 'v', 'v', 's', 'v', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'v', 'f', 'm', 's', 'b', 's', '_', 'v', 'v', 's', 'v', 'm', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'f', 'm', 's', 'b', 's', '_', 'v', 'v', 's', 'v', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'f', 'm', 's', 'b', 's', '_', 'v', 'v', 'v', 'v', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'v', 'f', 'm', 's', 'b', 's', '_', 'v', 'v', 'v', 'v', 'm', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'f', 'm', 's', 'b', 's', '_', 'v', 'v', 'v', 'v', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'f', 'm', 'u', 'l', 'd', '_', 'v', 's', 'v', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'v', 'f', 'm', 'u', 'l', 'd', '_', 'v', 's', 'v', 'm', 'v', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'v', 'f', 'm', 'u', 'l', 'd', '_', 'v', 's', 'v', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'f', 'm', 'u', 'l', 'd', '_', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm',
  'u', 'l', 'd', '_', 'v', 'v', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm',
  'u', 'l', 'd', '_', 'v', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'u',
  'l', 's', '_', 'v', 's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'u', 'l', 's',
  '_', 'v', 's', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'u', 'l', 's',
  '_', 'v', 's', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'u', 'l', 's', '_',
  'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'u', 'l', 's', '_', 'v', 'v',
  'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'm', 'u', 'l', 's', '_', 'v', 'v',
  'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 'f', 'n', 'm', 'a', 'd', 'd', '_', 'v', 's',
  'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 'f', 'n', 'm', 'a', 'd', 'd', '_', 'v', 's',
  'v', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'n', 'm', 'a', 'd', 'd', '_',
  'v', 's', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'n', 'm', 'a', 'd', 'd',
  '_', 'v', 'v', 's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'n', 'm', 'a', 'd', 'd',
  '_', 'v', 'v', 's', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'n', 'm', 'a',
  'd', 'd', '_', 'v', 'v', 's', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'n', 'm',
  'a', 'd', 'd', '_', 'v', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'n', 'm',
  'a', 'd', 'd', '_', 'v', 'v', 'v', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f',
  'n', 'm', 'a', 'd', 'd', '_', 'v', 'v', 'v', 'v', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'f', 'n', 'm', 'a', 'd', 's', '_', 'v', 's', 'v', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'f', 'n', 'm', 'a', 'd', 's', '_', 'v', 's', 'v', 'v', 'm', 'v', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'v', 'f', 'n', 'm', 'a', 'd', 's', '_', 'v', 's', 'v', 'v', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'f', 'n', 'm', 'a', 'd', 's', '_', 'v', 'v', 's', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'f', 'n', 'm', 'a', 'd', 's', '_', 'v', 'v', 's', 'v', 'm',
  'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'v', 'f', 'n', 'm', 'a', 'd', 's', '_', 'v', 'v', 's',
  'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 'f', 'n', 'm', 'a', 'd', 's', '_', 'v', 'v',
  'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 'f', 'n', 'm', 'a', 'd', 's', '_', 'v', 'v',
  'v', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'n', 'm', 'a', 'd', 's', '_',
  'v', 'v', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'n', 'm', 's', 'b', 'd',
  '_', 'v', 's', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'n', 'm', 's', 'b', 'd',
  '_', 'v', 's', 'v', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'n', 'm', 's',
  'b', 'd', '_', 'v', 's', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'n', 'm',
  's', 'b', 'd', '_', 'v', 'v', 's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'n', 'm',
  's', 'b', 'd', '_', 'v', 'v', 's', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f',
  'n', 'm', 's', 'b', 'd', '_', 'v', 'v', 's', 'v', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'f', 'n', 'm', 's', 'b', 'd', '_', 'v', 'v', 'v', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'f', 'n', 'm', 's', 'b', 'd', '_', 'v', 'v', 'v', 'v', 'm', 'v', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'v', 'f', 'n', 'm', 's', 'b', 'd', '_', 'v', 'v', 'v', 'v', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'f', 'n', 'm', 's', 'b', 's', '_', 'v', 's', 'v', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'f', 'n', 'm', 's', 'b', 's', '_', 'v', 's', 'v', 'v', 'm',
  'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'v', 'f', 'n', 'm', 's', 'b', 's', '_', 'v', 's', 'v',
  'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 'f', 'n', 'm', 's', 'b', 's', '_', 'v', 'v',
  's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 'f', 'n', 'm', 's', 'b', 's', '_', 'v', 'v',
  's', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'n', 'm', 's', 'b', 's', '_',
  'v', 'v', 's', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'n', 'm', 's', 'b', 's',
  '_', 'v', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'n', 'm', 's', 'b', 's',
  '_', 'v', 'v', 'v', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'n', 'm', 's',
  'b', 's', '_', 'v', 'v', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'r', 'm',
  'a', 'x', 'd', 'f', 's', 't', '_', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'r',
  'm', 'a', 'x', 'd', 'f', 's', 't', '_', 'v', 'v', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'f', 'r', 'm', 'a', 'x', 'd', 'l', 's', 't', '_', 'v', 'v', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'v', 'f', 'r', 'm', 'a', 'x', 'd', 'l', 's', 't', '_', 'v', 'v', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'f', 'r', 'm', 'a', 'x', 's', 'f', 's', 't', '_', 'v', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 'f', 'r', 'm', 'a', 'x', 's', 'f', 's', 't', '_', 'v',
  'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 'f', 'r', 'm', 'a', 'x', 's', 'l', 's', 't',
  '_', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'r', 'm', 'a', 'x', 's', 'l', 's',
  't', '_', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'r', 'm', 'i', 'n', 'd',
  'f', 's', 't', '_', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'r', 'm', 'i', 'n',
  'd', 'f', 's', 't', '_', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'r', 'm',
  'i', 'n', 'd', 'l', 's', 't', '_', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 'r',
  'm', 'i', 'n', 'd', 'l', 's', 't', '_', 'v', 'v', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'f', 'r', 'm', 'i', 'n', 's', 'f', 's', 't', '_', 'v', 'v', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'v', 'f', 'r', 'm', 'i', 'n', 's', 'f', 's', 't', '_', 'v', 'v', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'f', 'r', 'm', 'i', 'n', 's', 'l', 's', 't', '_', 'v', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 'f', 'r', 'm', 'i', 'n', 's', 'l', 's', 't', '_', 'v',
  'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 'f', 's', 'q', 'r', 't', 'd', '_', 'v', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 'f', 's', 'q', 'r', 't', 'd', '_', 'v', 'v', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'f', 's', 'q', 'r', 't', 's', '_', 'v', 'v', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'v', 'f', 's', 'q', 'r', 't', 's', '_', 'v', 'v', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'f', 's', 'u', 'b', 'd', '_', 'v', 's', 'v', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 's',
  'u', 'b', 'd', '_', 'v', 's', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 's',
  'u', 'b', 'd', '_', 'v', 's', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 's', 'u',
  'b', 'd', '_', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 's', 'u', 'b', 'd',
  '_', 'v', 'v', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 's', 'u', 'b', 'd',
  '_', 'v', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 's', 'u', 'b', 's', '_',
  'v', 's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 's', 'u', 'b', 's', '_', 'v', 's',
  'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 's', 'u', 'b', 's', '_', 'v', 's',
  'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 'f', 's', 'u', 'b', 's', '_', 'v', 'v', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 'f', 's', 'u', 'b', 's', '_', 'v', 'v', 'v', 'm', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 'f', 's', 'u', 'b', 's', '_', 'v', 'v', 'v', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'f', 's', 'u', 'm', 'd', '_', 'v', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'f', 's', 'u', 'm', 'd', '_', 'v', 'v', 'm', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 's',
  'u', 'm', 's', '_', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'f', 's', 'u', 'm', 's',
  '_', 'v', 'v', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'g', 't', '_', 'v', 'v', 's', 's',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 'g', 't', '_', 'v', 'v', 's', 's', 'm', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'v', 'g', 't', '_', 'v', 'v', 's', 's', 'm', 'v', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'g',
  't', '_', 'v', 'v', 's', 's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'g', 't', 'l', 's',
  'x', '_', 'v', 'v', 's', 's', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'g', 't', 'l', 's', 'x',
  '_', 'v', 'v', 's', 's', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'g', 't', 'l', 's', 'x',
  '_', 'v', 'v', 's', 's', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'g', 't', 'l', 's',
  'x', '_', 'v', 'v', 's', 's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'g', 't', 'l', 's',
  'x', 'n', 'c', '_', 'v', 'v', 's', 's', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'g', 't', 'l',
  's', 'x', 'n', 'c', '_', 'v', 'v', 's', 's', 'm', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'g',
  't', 'l', 's', 'x', 'n', 'c', '_', 'v', 'v', 's', 's', 'm', 'v', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'v', 'g', 't', 'l', 's', 'x', 'n', 'c', '_', 'v', 'v', 's', 's', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 'g', 't', 'l', 'z', 'x', '_', 'v', 'v', 's', 's', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'g', 't', 'l', 'z', 'x', '_', 'v', 'v', 's', 's', 'm', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'g', 't', 'l', 'z', 'x', '_', 'v', 'v', 's', 's', 'm', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 'g', 't', 'l', 'z', 'x', '_', 'v', 'v', 's', 's', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 'g', 't', 'l', 'z', 'x', 'n', 'c', '_', 'v', 'v', 's',
  's', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'v', 'g', 't', 'l', 'z', 'x', 'n', 'c', '_', 'v', 'v',
  's', 's', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'v', 'g', 't', 'l', 'z', 'x', 'n', 'c', '_',
  'v', 'v', 's', 's', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'g', 't', 'l', 'z', 'x',
  'n', 'c', '_', 'v', 'v', 's', 's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'g', 't', 'n',
  'c', '_', 'v', 'v', 's', 's', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'g', 't', 'n', 'c', '_',
  'v', 'v', 's', 's', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'g', 't', 'n', 'c', '_', 'v',
  'v', 's', 's', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'g', 't', 'n', 'c', '_', 'v',
  'v', 's', 's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'g', 't', 'u', '_', 'v', 'v', 's',
  's', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'v', 'g', 't', 'u', '_', 'v', 'v', 's', 's', 'm', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'g', 't', 'u', '_', 'v', 'v', 's', 's', 'm', 'v', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'v', 'g', 't', 'u', '_', 'v', 'v', 's', 's', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'g', 't', 'u', 'n', 'c', '_', 'v', 'v', 's', 's', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'g',
  't', 'u', 'n', 'c', '_', 'v', 'v', 's', 's', 'm', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'g',
  't', 'u', 'n', 'c', '_', 'v', 'v', 's', 's', 'm', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'g', 't', 'u', 'n', 'c', '_', 'v', 'v', 's', 's', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'l', 'd', '_', 'v', 's', 's', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'l', 'd', '_', 'v', 's',
  's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 'l', 'd', '2', 'd', '_', 'v', 's', 's', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'l', 'd', '2', 'd', '_', 'v', 's', 's', 'v', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'v', 'l', 'd', '2', 'd', 'n', 'c', '_', 'v', 's', 's', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'l', 'd', '2', 'd', 'n', 'c', '_', 'v', 's', 's', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'l', 'd', 'l', '2', 'd', 's', 'x', '_', 'v', 's', 's', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'l', 'd', 'l', '2', 'd', 's', 'x', '_', 'v', 's', 's', 'v', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'v', 'l', 'd', 'l', '2', 'd', 's', 'x', 'n', 'c', '_', 'v', 's', 's', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'l', 'd', 'l', '2', 'd', 's', 'x', 'n', 'c', '_', 'v', 's',
  's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 'l', 'd', 'l', '2', 'd', 'z', 'x', '_', 'v',
  's', 's', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 'l', 'd', 'l', '2', 'd', 'z', 'x', '_', 'v',
  's', 's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'v', 'l', 'd', 'l', '2', 'd', 'z', 'x', 'n',
  'c', '_', 'v', 's', 's', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'l', 'd', 'l', '2', 'd', 'z',
  'x', 'n', 'c', '_', 'v', 's', 's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'l', 'd', 'l',
  's', 'x', '_', 'v', 's', 's', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'l', 'd', 'l', 's', 'x',
  '_', 'v', 's', 's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'l', 'd', 'l', 's', 'x', 'n',
  'c', '_', 'v', 's', 's', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'l', 'd', 'l', 's', 'x', 'n',
  'c', '_', 'v', 's', 's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'l', 'd', 'l', 'z', 'x',
  '_', 'v', 's', 's', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'l', 'd', 'l', 'z', 'x', '_', 'v',
  's', 's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'v', 'l', 'd', 'l', 'z', 'x', 'n', 'c', '_',
  'v', 's', 's', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'v', 'l', 'd', 'l', 'z', 'x', 'n', 'c', '_',
  'v', 's', 's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'l', 'd', 'n', 'c', '_', 'v', 's',
  's', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'v', 'l', 'd', 'n', 'c', '_', 'v', 's', 's', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'l', 'd', 'u', '_', 'v', 's', 's', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'l',
  'd', 'u', '_', 'v', 's', 's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'l', 'd', 'u', '2',
  'd', '_', 'v', 's', 's', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'l', 'd', 'u', '2', 'd', '_',
  'v', 's', 's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'l', 'd', 'u', '2', 'd', 'n', 'c',
  '_', 'v', 's', 's', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'l', 'd', 'u', '2', 'd', 'n', 'c',
  '_', 'v', 's', 's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'l', 'd', 'u', 'n', 'c', '_',
  'v', 's', 's', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'v', 'l', 'd', 'u', 'n', 'c', '_', 'v', 's',
  's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 'l', 'd', 'z', '_', 'v', 'v', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'v', 'l', 'd', 'z', '_', 'v', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'l', 'd',
  'z', '_', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'm', 'a', 'x', 's', 'l', '_',
  'v', 's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'v', 'm', 'a', 'x', 's', 'l', '_', 'v', 's',
  'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'v', 'm', 'a', 'x', 's', 'l', '_', 'v', 's',
  'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 'm', 'a', 'x', 's', 'l', '_', 'v', 'v', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 'm', 'a', 'x', 's', 'l', '_', 'v', 'v', 'v', 'm', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 'm', 'a', 'x', 's', 'l', '_', 'v', 'v', 'v', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'm', 'a', 'x', 's', 'w', 's', 'x', '_', 'v', 's', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'm', 'a', 'x', 's', 'w', 's', 'x', '_', 'v', 's', 'v', 'm',
  'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'v', 'm', 'a', 'x', 's', 'w', 's', 'x', '_', 'v', 's',
  'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 'm', 'a', 'x', 's', 'w', 's', 'x', '_', 'v',
  'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 'm', 'a', 'x', 's', 'w', 's', 'x', '_', 'v',
  'v', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'm', 'a', 'x', 's', 'w', 's', 'x',
  '_', 'v', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'm', 'a', 'x', 's', 'w', 'z',
  'x', '_', 'v', 's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'm', 'a', 'x', 's', 'w', 'z',
  'x', '_', 'v', 's', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'm', 'a', 'x', 's',
  'w', 'z', 'x', '_', 'v', 's', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'm', 'a', 'x',
  's', 'w', 'z', 'x', '_', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'm', 'a', 'x',
  's', 'w', 'z', 'x', '_', 'v', 'v', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'm',
  'a', 'x', 's', 'w', 'z', 'x', '_', 'v', 'v', 'v', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'm', 'i', 'n', 's', 'l', '_', 'v', 's', 'v', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'm', 'i',
  'n', 's', 'l', '_', 'v', 's', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'm', 'i',
  'n', 's', 'l', '_', 'v', 's', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'm', 'i', 'n',
  's', 'l', '_', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'm', 'i', 'n', 's', 'l',
  '_', 'v', 'v', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'm', 'i', 'n', 's', 'l',
  '_', 'v', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'm', 'i', 'n', 's', 'w', 's',
  'x', '_', 'v', 's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'm', 'i', 'n', 's', 'w', 's',
  'x', '_', 'v', 's', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'm', 'i', 'n', 's',
  'w', 's', 'x', '_', 'v', 's', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'm', 'i', 'n',
  's', 'w', 's', 'x', '_', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'm', 'i', 'n',
  's', 'w', 's', 'x', '_', 'v', 'v', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'm',
  'i', 'n', 's', 'w', 's', 'x', '_', 'v', 'v', 'v', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'm', 'i', 'n', 's', 'w', 'z', 'x', '_', 'v', 's', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'm', 'i', 'n', 's', 'w', 'z', 'x', '_', 'v', 's', 'v', 'm', 'v', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'v', 'm', 'i', 'n', 's', 'w', 'z', 'x', '_', 'v', 's', 'v', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'm', 'i', 'n', 's', 'w', 'z', 'x', '_', 'v', 'v', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'm', 'i', 'n', 's', 'w', 'z', 'x', '_', 'v', 'v', 'v', 'm',
  'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'v', 'm', 'i', 'n', 's', 'w', 'z', 'x', '_', 'v', 'v',
  'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 'm', 'r', 'g', '_', 'v', 's', 'v', 'm', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'm', 'r', 'g', '_', 'v', 's', 'v', 'm', 'v', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'v', 'm', 'r', 'g', '_', 'v', 'v', 'v', 'm', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'm', 'r',
  'g', '_', 'v', 'v', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'm', 'r', 'g', 'w',
  '_', 'v', 's', 'v', 'M', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'm', 'r', 'g', 'w', '_', 'v',
  's', 'v', 'M', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'm', 'r', 'g', 'w', '_', 'v', 'v',
  'v', 'M', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 'm', 'r', 'g', 'w', '_', 'v', 'v', 'v', 'M',
  'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'v', 'm', 'u', 'l', 's', 'l', '_', 'v', 's', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'm', 'u', 'l', 's', 'l', '_', 'v', 's', 'v', 'm', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'm', 'u', 'l', 's', 'l', '_', 'v', 's', 'v', 'v', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'v', 'm', 'u', 'l', 's', 'l', '_', 'v', 'v', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'm', 'u', 'l', 's', 'l', '_', 'v', 'v', 'v', 'm', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'm', 'u', 'l', 's', 'l', '_', 'v', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'm',
  'u', 'l', 's', 'l', 'w', '_', 'v', 's', 'v', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'm', 'u',
  'l', 's', 'l', 'w', '_', 'v', 's', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'm', 'u',
  'l', 's', 'l', 'w', '_', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'm', 'u', 'l',
  's', 'l', 'w', '_', 'v', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'm', 'u', 'l',
  's', 'w', 's', 'x', '_', 'v', 's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'm', 'u', 'l',
  's', 'w', 's', 'x', '_', 'v', 's', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'm',
  'u', 'l', 's', 'w', 's', 'x', '_', 'v', 's', 'v', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'm', 'u', 'l', 's', 'w', 's', 'x', '_', 'v', 'v', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'm', 'u', 'l', 's', 'w', 's', 'x', '_', 'v', 'v', 'v', 'm', 'v', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'v', 'm', 'u', 'l', 's', 'w', 's', 'x', '_', 'v', 'v', 'v', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'm', 'u', 'l', 's', 'w', 'z', 'x', '_', 'v', 's', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'm', 'u', 'l', 's', 'w', 'z', 'x', '_', 'v', 's', 'v', 'm',
  'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'v', 'm', 'u', 'l', 's', 'w', 'z', 'x', '_', 'v', 's',
  'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 'm', 'u', 'l', 's', 'w', 'z', 'x', '_', 'v',
  'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 'm', 'u', 'l', 's', 'w', 'z', 'x', '_', 'v',
  'v', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'm', 'u', 'l', 's', 'w', 'z', 'x',
  '_', 'v', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'm', 'u', 'l', 'u', 'l', '_',
  'v', 's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'v', 'm', 'u', 'l', 'u', 'l', '_', 'v', 's',
  'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'v', 'm', 'u', 'l', 'u', 'l', '_', 'v', 's',
  'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 'm', 'u', 'l', 'u', 'l', '_', 'v', 'v', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 'm', 'u', 'l', 'u', 'l', '_', 'v', 'v', 'v', 'm', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 'm', 'u', 'l', 'u', 'l', '_', 'v', 'v', 'v', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'm', 'u', 'l', 'u', 'w', '_', 'v', 's', 'v', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'v', 'm', 'u', 'l', 'u', 'w', '_', 'v', 's', 'v', 'm', 'v', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'v', 'm', 'u', 'l', 'u', 'w', '_', 'v', 's', 'v', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'm', 'u', 'l', 'u', 'w', '_', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'm', 'u',
  'l', 'u', 'w', '_', 'v', 'v', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'm', 'u',
  'l', 'u', 'w', '_', 'v', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'm', 'v', '_',
  'v', 's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'v', 'm', 'v', '_', 'v', 's', 'v', 'm', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 'm', 'v', '_', 'v', 's', 'v', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'o', 'r', '_', 'v', 's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'o', 'r', '_', 'v', 's',
  'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'v', 'o', 'r', '_', 'v', 's', 'v', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'o', 'r', '_', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'o', 'r',
  '_', 'v', 'v', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'o', 'r', '_', 'v', 'v',
  'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 'p', 'c', 'n', 't', '_', 'v', 'v', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'v', 'p', 'c', 'n', 't', '_', 'v', 'v', 'm', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'p', 'c', 'n', 't', '_', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'r', 'a', 'n',
  'd', '_', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'r', 'a', 'n', 'd', '_', 'v', 'v',
  'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'v', 'r', 'c', 'p', 'd', '_', 'v', 'v', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'v', 'r', 'c', 'p', 'd', '_', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'r', 'c',
  'p', 's', '_', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'r', 'c', 'p', 's', '_', 'v',
  'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 'r', 'm', 'a', 'x', 's', 'l', 'f', 's', 't',
  '_', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'v', 'r', 'm', 'a', 'x', 's', 'l', 'f', 's',
  't', '_', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'r', 'm', 'a', 'x', 's', 'l',
  'l', 's', 't', '_', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'r', 'm', 'a', 'x', 's',
  'l', 'l', 's', 't', '_', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'r', 'm', 'a',
  'x', 's', 'w', 'f', 's', 't', 's', 'x', '_', 'v', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'r', 'm', 'a', 'x', 's', 'w', 'f', 's', 't', 's', 'x', '_', 'v', 'v', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 'r', 'm', 'a', 'x', 's', 'w', 'f', 's', 't', 'z', 'x',
  '_', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'v', 'r', 'm', 'a', 'x', 's', 'w', 'f', 's',
  't', 'z', 'x', '_', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'r', 'm', 'a', 'x',
  's', 'w', 'l', 's', 't', 's', 'x', '_', 'v', 'v', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'r',
  'm', 'a', 'x', 's', 'w', 'l', 's', 't', 's', 'x', '_', 'v', 'v', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'r', 'm', 'a', 'x', 's', 'w', 'l', 's', 't', 'z', 'x', '_',
  'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 'r', 'm', 'a', 'x', 's', 'w', 'l', 's', 't',
  'z', 'x', '_', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'r', 'm', 'i', 'n', 's',
  'l', 'f', 's', 't', '_', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'r', 'm', 'i', 'n',
  's', 'l', 'f', 's', 't', '_', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'r', 'm',
  'i', 'n', 's', 'l', 'l', 's', 't', '_', 'v', 'v', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'r',
  'm', 'i', 'n', 's', 'l', 'l', 's', 't', '_', 'v', 'v', 'v', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'v', 'r', 'm', 'i', 'n', 's', 'w', 'f', 's', 't', 's', 'x', '_', 'v', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 'r', 'm', 'i', 'n', 's', 'w', 'f', 's', 't', 's', 'x',
  '_', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'r', 'm', 'i', 'n', 's', 'w', 'f',
  's', 't', 'z', 'x', '_', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'r', 'm', 'i', 'n',
  's', 'w', 'f', 's', 't', 'z', 'x', '_', 'v', 'v', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  'r', 'm', 'i', 'n', 's', 'w', 'l', 's', 't', 's', 'x', '_', 'v', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'r', 'm', 'i', 'n', 's', 'w', 'l', 's', 't', 's', 'x', '_',
  'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'v', 'r', 'm', 'i', 'n', 's', 'w', 'l', 's',
  't', 'z', 'x', '_', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'r', 'm', 'i', 'n', 's',
  'w', 'l', 's', 't', 'z', 'x', '_', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'r',
  'o', 'r', '_', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'r', 'o', 'r', '_', 'v', 'v',
  'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'v', 'r', 's', 'q', 'r', 't', 'd', '_', 'v', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'r', 's', 'q', 'r', 't', 'd', '_', 'v', 'v', 'v', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'v', 'r', 's', 'q', 'r', 't', 'd', 'n', 'e', 'x', '_', 'v', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'r', 's', 'q', 'r', 't', 'd', 'n', 'e', 'x', '_', 'v', 'v',
  'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'v', 'r', 's', 'q', 'r', 't', 's', '_', 'v', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'r', 's', 'q', 'r', 't', 's', '_', 'v', 'v', 'v', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'v', 'r', 's', 'q', 'r', 't', 's', 'n', 'e', 'x', '_', 'v', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 'r', 's', 'q', 'r', 't', 's', 'n', 'e', 'x', '_', 'v', 'v',
  'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'v', 'r', 'x', 'o', 'r', '_', 'v', 'v', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'v', 'r', 'x', 'o', 'r', '_', 'v', 'v', 'm', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 's', 'c',
  '_', 'v', 'v', 's', 's', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 's', 'c', '_', 'v', 'v', 's',
  's', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 's', 'c', 'l', '_', 'v', 'v', 's', 's', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 's', 'c', 'l', '_', 'v', 'v', 's', 's', 'm', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'v', 's', 'c', 'l', 'n', 'c', '_', 'v', 'v', 's', 's', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  's', 'c', 'l', 'n', 'c', '_', 'v', 'v', 's', 's', 'm', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  's', 'c', 'l', 'n', 'c', 'o', 't', '_', 'v', 'v', 's', 's', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'v', 's', 'c', 'l', 'n', 'c', 'o', 't', '_', 'v', 'v', 's', 's', 'm', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 's', 'c', 'l', 'o', 't', '_', 'v', 'v', 's', 's', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'v', 's', 'c', 'l', 'o', 't', '_', 'v', 'v', 's', 's', 'm', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'v', 's', 'c', 'n', 'c', '_', 'v', 'v', 's', 's', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  's', 'c', 'n', 'c', '_', 'v', 'v', 's', 's', 'm', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 's',
  'c', 'n', 'c', 'o', 't', '_', 'v', 'v', 's', 's', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 's',
  'c', 'n', 'c', 'o', 't', '_', 'v', 'v', 's', 's', 'm', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  's', 'c', 'o', 't', '_', 'v', 'v', 's', 's', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 's', 'c',
  'o', 't', '_', 'v', 'v', 's', 's', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 's', 'c', 'u',
  '_', 'v', 'v', 's', 's', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 's', 'c', 'u', '_', 'v', 'v',
  's', 's', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'v', 's', 'c', 'u', 'n', 'c', '_', 'v', 'v',
  's', 's', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 's', 'c', 'u', 'n', 'c', '_', 'v', 'v', 's',
  's', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 's', 'c', 'u', 'n', 'c', 'o', 't', '_', 'v',
  'v', 's', 's', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'v', 's', 'c', 'u', 'n', 'c', 'o', 't', '_',
  'v', 'v', 's', 's', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 's', 'c', 'u', 'o', 't', '_',
  'v', 'v', 's', 's', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 's', 'c', 'u', 'o', 't', '_', 'v',
  'v', 's', 's', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 's', 'e', 'q', '_', 'v', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'v', 's', 'e', 'q', '_', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 's', 'f', 'a',
  '_', 'v', 'v', 's', 's', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 's', 'f', 'a', '_', 'v', 'v',
  's', 's', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 's', 'f', 'a', '_', 'v', 'v', 's',
  's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 's', 'h', 'f', '_', 'v', 'v', 'v', 's', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 's', 'h', 'f', '_', 'v', 'v', 'v', 's', 'v', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'v', 's', 'l', 'a', 'l', '_', 'v', 'v', 's', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 's', 'l',
  'a', 'l', '_', 'v', 'v', 's', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 's', 'l', 'a',
  'l', '_', 'v', 'v', 's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 's', 'l', 'a', 'l', '_',
  'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'v', 's', 'l', 'a', 'l', '_', 'v', 'v', 'v',
  'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 's', 'l', 'a', 'l', '_', 'v', 'v', 'v', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 's', 'l', 'a', 'w', 's', 'x', '_', 'v', 'v', 's', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 's', 'l', 'a', 'w', 's', 'x', '_', 'v', 'v', 's', 'm', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 's', 'l', 'a', 'w', 's', 'x', '_', 'v', 'v', 's', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 's', 'l', 'a', 'w', 's', 'x', '_', 'v', 'v', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 's', 'l', 'a', 'w', 's', 'x', '_', 'v', 'v', 'v', 'm', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 's', 'l', 'a', 'w', 's', 'x', '_', 'v', 'v', 'v', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 's', 'l', 'a', 'w', 'z', 'x', '_', 'v', 'v', 's', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 's', 'l', 'a', 'w', 'z', 'x', '_', 'v', 'v', 's', 'm', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 's', 'l', 'a', 'w', 'z', 'x', '_', 'v', 'v', 's', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 's', 'l', 'a', 'w', 'z', 'x', '_', 'v', 'v', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 's', 'l', 'a', 'w', 'z', 'x', '_', 'v', 'v', 'v', 'm', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 's', 'l', 'a', 'w', 'z', 'x', '_', 'v', 'v', 'v', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 's', 'l', 'l', '_', 'v', 'v', 's', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  's', 'l', 'l', '_', 'v', 'v', 's', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 's', 'l',
  'l', '_', 'v', 'v', 's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 's', 'l', 'l', '_', 'v',
  'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 's', 'l', 'l', '_', 'v', 'v', 'v', 'm', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 's', 'l', 'l', '_', 'v', 'v', 'v', 'v', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'v', 's', 'r', 'a', 'l', '_', 'v', 'v', 's', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 's', 'r',
  'a', 'l', '_', 'v', 'v', 's', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 's', 'r', 'a',
  'l', '_', 'v', 'v', 's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 's', 'r', 'a', 'l', '_',
  'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'v', 's', 'r', 'a', 'l', '_', 'v', 'v', 'v',
  'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 's', 'r', 'a', 'l', '_', 'v', 'v', 'v', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 's', 'r', 'a', 'w', 's', 'x', '_', 'v', 'v', 's', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 's', 'r', 'a', 'w', 's', 'x', '_', 'v', 'v', 's', 'm', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 's', 'r', 'a', 'w', 's', 'x', '_', 'v', 'v', 's', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 's', 'r', 'a', 'w', 's', 'x', '_', 'v', 'v', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 's', 'r', 'a', 'w', 's', 'x', '_', 'v', 'v', 'v', 'm', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 's', 'r', 'a', 'w', 's', 'x', '_', 'v', 'v', 'v', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 's', 'r', 'a', 'w', 'z', 'x', '_', 'v', 'v', 's', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 's', 'r', 'a', 'w', 'z', 'x', '_', 'v', 'v', 's', 'm', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 's', 'r', 'a', 'w', 'z', 'x', '_', 'v', 'v', 's', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 's', 'r', 'a', 'w', 'z', 'x', '_', 'v', 'v', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 's', 'r', 'a', 'w', 'z', 'x', '_', 'v', 'v', 'v', 'm', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 's', 'r', 'a', 'w', 'z', 'x', '_', 'v', 'v', 'v', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 's', 'r', 'l', '_', 'v', 'v', 's', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  's', 'r', 'l', '_', 'v', 'v', 's', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 's', 'r',
  'l', '_', 'v', 'v', 's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 's', 'r', 'l', '_', 'v',
  'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 's', 'r', 'l', '_', 'v', 'v', 'v', 'm', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 's', 'r', 'l', '_', 'v', 'v', 'v', 'v', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'v', 's', 't', '_', 'v', 's', 's', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 's', 't', '_', 'v',
  's', 's', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'v', 's', 't', '2', 'd', '_', 'v', 's', 's',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 's', 't', '2', 'd', '_', 'v', 's', 's', 'm', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'v', 's', 't', '2', 'd', 'n', 'c', '_', 'v', 's', 's', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'v', 's', 't', '2', 'd', 'n', 'c', '_', 'v', 's', 's', 'm', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'v', 's', 't', '2', 'd', 'n', 'c', 'o', 't', '_', 'v', 's', 's', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'v', 's', 't', '2', 'd', 'n', 'c', 'o', 't', '_', 'v', 's', 's', 'm',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 's', 't', '2', 'd', 'o', 't', '_', 'v', 's', 's', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 's', 't', '2', 'd', 'o', 't', '_', 'v', 's', 's', 'm', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 's', 't', 'l', '_', 'v', 's', 's', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 's',
  't', 'l', '_', 'v', 's', 's', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 's', 't', 'l', '2',
  'd', '_', 'v', 's', 's', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 's', 't', 'l', '2', 'd', '_',
  'v', 's', 's', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 's', 't', 'l', '2', 'd', 'n', 'c',
  '_', 'v', 's', 's', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 's', 't', 'l', '2', 'd', 'n', 'c',
  '_', 'v', 's', 's', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 's', 't', 'l', '2', 'd', 'n',
  'c', 'o', 't', '_', 'v', 's', 's', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 's', 't', 'l', '2',
  'd', 'n', 'c', 'o', 't', '_', 'v', 's', 's', 'm', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 's',
  't', 'l', '2', 'd', 'o', 't', '_', 'v', 's', 's', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 's',
  't', 'l', '2', 'd', 'o', 't', '_', 'v', 's', 's', 'm', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  's', 't', 'l', 'n', 'c', '_', 'v', 's', 's', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 's', 't',
  'l', 'n', 'c', '_', 'v', 's', 's', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 's', 't', 'l',
  'n', 'c', 'o', 't', '_', 'v', 's', 's', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 's', 't', 'l',
  'n', 'c', 'o', 't', '_', 'v', 's', 's', 'm', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 's', 't',
  'l', 'o', 't', '_', 'v', 's', 's', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 's', 't', 'l', 'o',
  't', '_', 'v', 's', 's', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 's', 't', 'n', 'c', '_',
  'v', 's', 's', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'v', 's', 't', 'n', 'c', '_', 'v', 's', 's',
  'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'v', 's', 't', 'n', 'c', 'o', 't', '_', 'v', 's', 's',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 's', 't', 'n', 'c', 'o', 't', '_', 'v', 's', 's', 'm',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 's', 't', 'o', 't', '_', 'v', 's', 's', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'v', 's', 't', 'o', 't', '_', 'v', 's', 's', 'm', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 's',
  't', 'u', '_', 'v', 's', 's', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 's', 't', 'u', '_', 'v',
  's', 's', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'v', 's', 't', 'u', '2', 'd', '_', 'v', 's',
  's', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'v', 's', 't', 'u', '2', 'd', '_', 'v', 's', 's', 'm',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 's', 't', 'u', '2', 'd', 'n', 'c', '_', 'v', 's', 's',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 's', 't', 'u', '2', 'd', 'n', 'c', '_', 'v', 's', 's',
  'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'v', 's', 't', 'u', '2', 'd', 'n', 'c', 'o', 't', '_',
  'v', 's', 's', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'v', 's', 't', 'u', '2', 'd', 'n', 'c', 'o',
  't', '_', 'v', 's', 's', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 's', 't', 'u', '2', 'd',
  'o', 't', '_', 'v', 's', 's', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 's', 't', 'u', '2', 'd',
  'o', 't', '_', 'v', 's', 's', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 's', 't', 'u', 'n',
  'c', '_', 'v', 's', 's', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 's', 't', 'u', 'n', 'c', '_',
  'v', 's', 's', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 's', 't', 'u', 'n', 'c', 'o', 't',
  '_', 'v', 's', 's', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 's', 't', 'u', 'n', 'c', 'o', 't',
  '_', 'v', 's', 's', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 's', 't', 'u', 'o', 't', '_',
  'v', 's', 's', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'v', 's', 't', 'u', 'o', 't', '_', 'v', 's',
  's', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 's', 'u', 'b', 's', 'l', '_', 'v', 's', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 's', 'u', 'b', 's', 'l', '_', 'v', 's', 'v', 'm', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 's', 'u', 'b', 's', 'l', '_', 'v', 's', 'v', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 's', 'u', 'b', 's', 'l', '_', 'v', 'v', 'v', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'v', 's', 'u', 'b', 's', 'l', '_', 'v', 'v', 'v', 'm', 'v', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'v', 's', 'u', 'b', 's', 'l', '_', 'v', 'v', 'v', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  's', 'u', 'b', 's', 'w', 's', 'x', '_', 'v', 's', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  's', 'u', 'b', 's', 'w', 's', 'x', '_', 'v', 's', 'v', 'm', 'v', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'v', 's', 'u', 'b', 's', 'w', 's', 'x', '_', 'v', 's', 'v', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 's', 'u', 'b', 's', 'w', 's', 'x', '_', 'v', 'v', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 's', 'u', 'b', 's', 'w', 's', 'x', '_', 'v', 'v', 'v', 'm',
  'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'v', 's', 'u', 'b', 's', 'w', 's', 'x', '_', 'v', 'v',
  'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 's', 'u', 'b', 's', 'w', 'z', 'x', '_', 'v',
  's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 's', 'u', 'b', 's', 'w', 'z', 'x', '_', 'v',
  's', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 's', 'u', 'b', 's', 'w', 'z', 'x',
  '_', 'v', 's', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 's', 'u', 'b', 's', 'w', 'z',
  'x', '_', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 's', 'u', 'b', 's', 'w', 'z',
  'x', '_', 'v', 'v', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 's', 'u', 'b', 's',
  'w', 'z', 'x', '_', 'v', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 's', 'u', 'b',
  'u', 'l', '_', 'v', 's', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 's', 'u', 'b', 'u', 'l',
  '_', 'v', 's', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 's', 'u', 'b', 'u', 'l',
  '_', 'v', 's', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 's', 'u', 'b', 'u', 'l', '_',
  'v', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'v', 's', 'u', 'b', 'u', 'l', '_', 'v', 'v',
  'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'v', 'e', '_', 'v', 'l', '_', 'v', 's', 'u', 'b', 'u', 'l', '_', 'v', 'v',
  'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 's', 'u', 'b', 'u', 'w', '_', 'v', 's', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 's', 'u', 'b', 'u', 'w', '_', 'v', 's', 'v', 'm', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 's', 'u', 'b', 'u', 'w', '_', 'v', 's', 'v', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 's', 'u', 'b', 'u', 'w', '_', 'v', 'v', 'v', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'v', 's', 'u', 'b', 'u', 'w', '_', 'v', 'v', 'v', 'm', 'v', 'l', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_',
  'v', 's', 'u', 'b', 'u', 'w', '_', 'v', 'v', 'v', 'v', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v',
  's', 'u', 'm', 'l', '_', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 's', 'u', 'm', 'l',
  '_', 'v', 'v', 'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 's', 'u', 'm', 'w', 's', 'x', '_',
  'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v',
  'e', '_', 'v', 'l', '_', 'v', 's', 'u', 'm', 'w', 's', 'x', '_', 'v', 'v',
  'm', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e',
  '_', 'v', 'l', '_', 'v', 's', 'u', 'm', 'w', 'z', 'x', '_', 'v', 'v', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v',
  'l', '_', 'v', 's', 'u', 'm', 'w', 'z', 'x', '_', 'v', 'v', 'm', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'v', 'x', 'o', 'r', '_', 'v', 's', 'v', 'l', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'x', 'o',
  'r', '_', 'v', 's', 'v', 'm', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'x', 'o', 'r', '_',
  'v', 's', 'v', 'v', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'v', 'e', '_', 'v', 'l', '_', 'v', 'x', 'o', 'r', '_', 'v', 'v', 'v',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_',
  'v', 'l', '_', 'v', 'x', 'o', 'r', '_', 'v', 'v', 'v', 'm', 'v', 'l', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l',
  '_', 'v', 'x', 'o', 'r', '_', 'v', 'v', 'v', 'v', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'x', 'o',
  'r', 'm', '_', 'M', 'M', 'M', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'v', 'e', '_', 'v', 'l', '_', 'x', 'o', 'r', 'm', '_', 'm', 'm',
  'm', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 'p', 'a', 'v', 'g', 'u', 's', 'b', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'f', '2', 'i', 'd',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'p', 'f', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'p', 'f', 'a', 'd', 'd', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'f',
  'c', 'm', 'p', 'e', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'p', 'f', 'c', 'm', 'p', 'g', 'e', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p',
  'f', 'c', 'm', 'p', 'g', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'p', 'f', 'm', 'a', 'x', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'f',
  'm', 'i', 'n', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'p', 'f', 'm', 'u', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'f', 'r', 'c', 'p',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'p', 'f', 'r', 'c', 'p', 'i', 't', '1', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'f', 'r', 'c', 'p',
  'i', 't', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'p', 'f', 'r', 's', 'q', 'i', 't', '1', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'f',
  'r', 's', 'q', 'r', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'p', 'f', 's', 'u', 'b', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'f', 's',
  'u', 'b', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'p', 'i', '2', 'f', 'd', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'u', 'l', 'h',
  'r', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'p', 'f', '2', 'i', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'f', 'n', 'a', 'c', 'c',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'p', 'f', 'p', 'n', 'a', 'c', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'i', '2', 'f', 'w', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'a', 'a', 'd', 'd', '3', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'a', 'a', 'd', 'd', '6', '4', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'a',
  'a', 'n', 'd', '3', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'a', 'a', 'n', 'd', '6', '4', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'a', 'e',
  's', 'd', 'e', 'c', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'a', 'e', 's', 'd', 'e', 'c', '2',
  '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'a', 'e', 's', 'd', 'e', 'c', '5', '1', '2', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'a', 'e',
  's', 'd', 'e', 'c', 'l', 'a', 's', 't', '1', '2', '8', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'a', 'e', 's',
  'd', 'e', 'c', 'l', 'a', 's', 't', '2', '5', '6', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'a', 'e', 's', 'd',
  'e', 'c', 'l', 'a', 's', 't', '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'a', 'e', 's', 'e', 'n',
  'c', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'a', 'e', 's', 'e', 'n', 'c', '2', '5', '6', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'a', 'e', 's', 'e', 'n', 'c', '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'a', 'e', 's', 'e', 'n',
  'c', 'l', 'a', 's', 't', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'a', 'e', 's', 'e', 'n', 'c',
  'l', 'a', 's', 't', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'a', 'e', 's', 'e', 'n', 'c', 'l',
  'a', 's', 't', '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'a', 'e', 's', 'i', 'm', 'c', '1', '2',
  '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 'a', 'e', 's', 'k', 'e', 'y', 'g', 'e', 'n', 'a', 's', 's', 'i',
  's', 't', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'a', 'o', 'r', '3', '2', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'a', 'o', 'r',
  '6', '4', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'a', 'd', 'd', 's', 'u', 'b', 'p', 'd', '2', '5', '6', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'a', 'd', 'd', 's', 'u', 'b', 'p', 's', '2', '5', '6', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'b', 'l', 'e',
  'n', 'd', 'v', 'p', 'd', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'b', 'l', 'e', 'n', 'd', 'v',
  'p', 's', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 'p', 'd', '2', 'p', 's', '2',
  '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'c', 'v', 't', 'p', 'd', '2', 'd', 'q', '2', '5', '6', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'c', 'v', 't', 'p', 's', '2', 'd', 'q', '2', '5', '6', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't',
  't', 'p', 'd', '2', 'd', 'q', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 't', 'p',
  's', '2', 'd', 'q', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'd', 'p', 'p', 's', '2', '5', '6',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'h', 'a', 'd', 'd', 'p', 'd', '2', '5', '6', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'h', 'a', 'd', 'd',
  'p', 's', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'h', 's', 'u', 'b', 'p', 'd', '2', '5', '6',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'h', 's', 'u', 'b', 'p', 's', '2', '5', '6', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'l', 'd', 'd', 'q',
  'u', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'm', 'a', 's', 'k', 'l', 'o', 'a', 'd', 'p', 'd',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'm', 'a', 's', 'k', 'l', 'o', 'a', 'd', 'p', 'd', '2', '5', '6', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'm', 'a', 's', 'k', 'l', 'o', 'a', 'd', 'p', 's', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'm', 'a', 's', 'k',
  'l', 'o', 'a', 'd', 'p', 's', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'm', 'a', 's', 'k', 's',
  't', 'o', 'r', 'e', 'p', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'm', 'a', 's', 'k', 's', 't', 'o', 'r',
  'e', 'p', 'd', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'm', 'a', 's', 'k', 's', 't', 'o', 'r',
  'e', 'p', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'm', 'a', 's', 'k', 's', 't', 'o', 'r', 'e', 'p', 's',
  '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'm', 'a', 'x', 'p', 'd', '2', '5', '6', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'm', 'a',
  'x', 'p', 's', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'm', 'i', 'n', 'p', 'd', '2', '5', '6',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'm', 'i', 'n', 'p', 's', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'm', 'o', 'v', 'm', 's',
  'k', 'p', 'd', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'm', 'o', 'v', 'm', 's', 'k', 'p', 's',
  '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'p', 't', 'e', 's', 't', 'c', '2', '5', '6', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p',
  't', 'e', 's', 't', 'n', 'z', 'c', '2', '5', '6', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 't', 'e', 's',
  't', 'z', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'r', 'c', 'p', 'p', 's', '2', '5', '6', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'r', 'o', 'u', 'n', 'd', 'p', 'd', '2', '5', '6', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r', 'o', 'u', 'n',
  'd', 'p', 's', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'r', 's', 'q', 'r', 't', 'p', 's', '2',
  '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'v', 'p', 'e', 'r', 'm', 'i', 'l', 'v', 'a', 'r', 'p', 'd',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'v', 'p', 'e', 'r', 'm', 'i', 'l', 'v', 'a', 'r', 'p', 'd', '2', '5',
  '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 'v', 'p', 'e', 'r', 'm', 'i', 'l', 'v', 'a', 'r', 'p', 's', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'v', 'p', 'e', 'r', 'm', 'i', 'l', 'v', 'a', 'r', 'p', 's', '2', '5', '6',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'v', 't', 'e', 's', 't', 'c', 'p', 'd', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 't', 'e', 's', 't',
  'c', 'p', 'd', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'v', 't', 'e', 's', 't', 'c', 'p', 's',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'v', 't', 'e', 's', 't', 'c', 'p', 's', '2', '5', '6', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 't',
  'e', 's', 't', 'n', 'z', 'c', 'p', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 't', 'e', 's', 't', 'n',
  'z', 'c', 'p', 'd', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 't', 'e', 's', 't', 'n', 'z',
  'c', 'p', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'v', 't', 'e', 's', 't', 'n', 'z', 'c', 'p', 's', '2',
  '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'v', 't', 'e', 's', 't', 'z', 'p', 'd', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 't', 'e',
  's', 't', 'z', 'p', 'd', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 't', 'e', 's', 't', 'z',
  'p', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'v', 't', 'e', 's', 't', 'z', 'p', 's', '2', '5', '6', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'v', 'z', 'e', 'r', 'o', 'a', 'l', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'z', 'e', 'r', 'o', 'u',
  'p', 'p', 'e', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'g', 'a', 't', 'h', 'e', 'r', 'd', '_', 'd', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'g', 'a', 't', 'h', 'e', 'r', 'd', '_', 'd', '2', '5', '6', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'g', 'a',
  't', 'h', 'e', 'r', 'd', '_', 'p', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'g', 'a', 't', 'h', 'e', 'r',
  'd', '_', 'p', 'd', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'g', 'a', 't', 'h', 'e', 'r', 'd',
  '_', 'p', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'g', 'a', 't', 'h', 'e', 'r', 'd', '_', 'p', 's', '2',
  '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'g', 'a', 't', 'h', 'e', 'r', 'd', '_', 'q', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'g', 'a',
  't', 'h', 'e', 'r', 'd', '_', 'q', '2', '5', '6', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'g', 'a', 't', 'h',
  'e', 'r', 'q', '_', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'g', 'a', 't', 'h', 'e', 'r', 'q', '_', 'd',
  '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'g', 'a', 't', 'h', 'e', 'r', 'q', '_', 'p', 'd', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'g', 'a', 't', 'h', 'e', 'r', 'q', '_', 'p', 'd', '2', '5', '6', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'g',
  'a', 't', 'h', 'e', 'r', 'q', '_', 'p', 's', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'g', 'a', 't', 'h', 'e',
  'r', 'q', '_', 'p', 's', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'g', 'a', 't', 'h', 'e', 'r',
  'q', '_', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'g', 'a', 't', 'h', 'e', 'r', 'q', '_', 'q', '2', '5',
  '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 'm', 'a', 's', 'k', 'l', 'o', 'a', 'd', 'd', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'm', 'a', 's',
  'k', 'l', 'o', 'a', 'd', 'd', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'm', 'a', 's', 'k', 'l',
  'o', 'a', 'd', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'm', 'a', 's', 'k', 'l', 'o', 'a', 'd', 'q', '2',
  '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'm', 'a', 's', 'k', 's', 't', 'o', 'r', 'e', 'd', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'm',
  'a', 's', 'k', 's', 't', 'o', 'r', 'e', 'd', '2', '5', '6', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'm', 'a',
  's', 'k', 's', 't', 'o', 'r', 'e', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'm', 'a', 's', 'k', 's', 't',
  'o', 'r', 'e', 'q', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'm', 'p', 's', 'a', 'd', 'b', 'w',
  '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'p', 'a', 'c', 'k', 's', 's', 'd', 'w', '2', '5', '6',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'p', 'a', 'c', 'k', 's', 's', 'w', 'b', '2', '5', '6', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'a',
  'c', 'k', 'u', 's', 'd', 'w', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'a', 'c', 'k', 'u',
  's', 'w', 'b', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'p', 'a', 'v', 'g', 'b', '2', '5', '6',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'p', 'a', 'v', 'g', 'w', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'b', 'l', 'e', 'n',
  'd', 'v', 'b', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'p', 'e', 'r', 'm', 'v', 'a', 'r', 's',
  'i', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'p', 'e', 'r', 'm', 'v', 'a', 'r', 's', 'f', '2',
  '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'p', 'h', 'a', 'd', 'd', 'd', '2', '5', '6', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'h',
  'a', 'd', 'd', 's', 'w', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'h', 'a', 'd', 'd', 'w',
  '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'p', 'h', 's', 'u', 'b', 'd', '2', '5', '6', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p',
  'h', 's', 'u', 'b', 's', 'w', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'h', 's', 'u', 'b',
  'w', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'p', 'm', 'a', 'd', 'd', 'u', 'b', 's', 'w', '2',
  '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'p', 'm', 'a', 'd', 'd', 'w', 'd', '2', '5', '6', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p',
  'm', 'o', 'v', 'm', 's', 'k', 'b', '2', '5', '6', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'u', 'l',
  'h', 'r', 's', 'w', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'u', 'l', 'h', 'w', '2',
  '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'p', 'm', 'u', 'l', 'h', 'u', 'w', '2', '5', '6', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p',
  's', 'a', 'd', 'b', 'w', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'h', 'u', 'f', 'b',
  '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'p', 's', 'i', 'g', 'n', 'b', '2', '5', '6', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p',
  's', 'i', 'g', 'n', 'd', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'i', 'g', 'n', 'w',
  '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'p', 's', 'l', 'l', 'd', '2', '5', '6', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's',
  'l', 'l', 'q', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'l', 'l', 'w', '2', '5', '6',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'p', 's', 'l', 'l', 'd', 'i', '2', '5', '6', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'l', 'l',
  'q', 'i', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'p', 's', 'l', 'l', 'w', 'i', '2', '5', '6',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'p', 's', 'l', 'l', 'v', '4', 's', 'i', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'l', 'l', 'v',
  '8', 's', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'p', 's', 'l', 'l', 'v', '2', 'd', 'i', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's',
  'l', 'l', 'v', '4', 'd', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'r', 'a', 'd', '2', '5', '6',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'p', 's', 'r', 'a', 'w', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'r', 'a', 'd',
  'i', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'p', 's', 'r', 'a', 'w', 'i', '2', '5', '6', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'p', 's', 'r', 'a', 'v', '4', 's', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'r', 'a', 'v', '8',
  's', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'p', 's', 'r', 'l', 'd', '2', '5', '6', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'r',
  'l', 'q', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'p', 's', 'r', 'l', 'w', '2', '5', '6', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'p', 's', 'r', 'l', 'd', 'i', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'r', 'l', 'q',
  'i', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'p', 's', 'r', 'l', 'w', 'i', '2', '5', '6', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'p', 's', 'r', 'l', 'v', '4', 's', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'r', 'l', 'v', '8',
  's', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'p', 's', 'r', 'l', 'v', '2', 'd', 'i', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'r',
  'l', 'v', '4', 'd', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'd', 'p', 'b', 's', 's', 'd', '1',
  '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'v', 'p', 'd', 'p', 'b', 's', 's', 'd', '2', '5', '6', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'v', 'p', 'd', 'p', 'b', 's', 's', 'd', 's', '1', '2', '8', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p',
  'd', 'p', 'b', 's', 's', 'd', 's', '2', '5', '6', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'd', 'p',
  'b', 's', 'u', 'd', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'd', 'p', 'b', 's', 'u',
  'd', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'v', 'p', 'd', 'p', 'b', 's', 'u', 'd', 's', '1',
  '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'v', 'p', 'd', 'p', 'b', 's', 'u', 'd', 's', '2', '5', '6',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'v', 'p', 'd', 'p', 'b', 'u', 'u', 'd', '1', '2', '8', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p',
  'd', 'p', 'b', 'u', 'u', 'd', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'd', 'p', 'b',
  'u', 'u', 'd', 's', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'd', 'p', 'b', 'u', 'u',
  'd', 's', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'd', 'p', 'w', 's', 'u', 'd', '1',
  '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'v', 'p', 'd', 'p', 'w', 's', 'u', 'd', '2', '5', '6', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'v', 'p', 'd', 'p', 'w', 's', 'u', 'd', 's', '1', '2', '8', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p',
  'd', 'p', 'w', 's', 'u', 'd', 's', '2', '5', '6', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'd', 'p',
  'w', 'u', 's', 'd', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'd', 'p', 'w', 'u', 's',
  'd', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'v', 'p', 'd', 'p', 'w', 'u', 's', 'd', 's', '1',
  '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'v', 'p', 'd', 'p', 'w', 'u', 's', 'd', 's', '2', '5', '6',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'v', 'p', 'd', 'p', 'w', 'u', 'u', 'd', '1', '2', '8', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p',
  'd', 'p', 'w', 'u', 'u', 'd', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'd', 'p', 'w',
  'u', 'u', 'd', 's', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'd', 'p', 'w', 'u', 'u',
  'd', 's', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'a', 'd', 'd', 'p', 'd', '5', '1', '2', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'a', 'd', 'd', 'p', 's', '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'b', 'r', 'o', 'a', 'd', 'c',
  'a', 's', 't', 'm', 'b', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'b', 'r', 'o', 'a', 'd', 'c',
  'a', 's', 't', 'm', 'b', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'b', 'r', 'o', 'a', 'd', 'c',
  'a', 's', 't', 'm', 'b', '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'b', 'r', 'o', 'a', 'd', 'c',
  'a', 's', 't', 'm', 'w', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'b', 'r', 'o', 'a', 'd', 'c',
  'a', 's', 't', 'm', 'w', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'b', 'r', 'o', 'a', 'd', 'c',
  'a', 's', 't', 'm', 'w', '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'c', 'o', 'n', 'f',
  'l', 'i', 'c', 't', 's', 'i', '_', '1', '2', '8', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'c', 'o',
  'n', 'f', 'l', 'i', 'c', 't', 's', 'i', '_', '2', '5', '6', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p',
  'c', 'o', 'n', 'f', 'l', 'i', 'c', 't', 's', 'i', '_', '5', '1', '2', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'v', 'p', 'c', 'o', 'n', 'f', 'l', 'i', 'c', 't', 'd', 'i', '_', '1', '2',
  '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 'v', 'p', 'c', 'o', 'n', 'f', 'l', 'i', 'c', 't', 'd', 'i', '_',
  '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'v', 'p', 'c', 'o', 'n', 'f', 'l', 'i', 'c', 't', 'd',
  'i', '_', '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 's', 'i', '2', 's', 'd', '6',
  '4', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 'c', 'v', 't', 's', 'i', '2', 's', 's', '3', '2', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v',
  't', 's', 'i', '2', 's', 's', '6', '4', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v', 't', 't', 's',
  'd', '2', 's', 'i', '3', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v', 't', 't', 's', 'd', '2',
  's', 'i', '6', '4', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'v', 'c', 'v', 't', 't', 's', 'd', '2', 'u', 's',
  'i', '3', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'v', 'c', 'v', 't', 't', 's', 'd', '2', 'u', 's', 'i',
  '6', '4', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'v', 'c', 'v', 't', 't', 's', 's', '2', 's', 'i', '3', '2',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'v', 'c', 'v', 't', 't', 's', 's', '2', 's', 'i', '6', '4', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v',
  'c', 'v', 't', 't', 's', 's', '2', 'u', 's', 'i', '3', '2', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c',
  'v', 't', 't', 's', 's', '2', 'u', 's', 'i', '6', '4', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't',
  'u', 's', 'i', '2', 's', 's', '3', '2', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 'u', 's', 'i',
  '2', 's', 'd', '6', '4', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 'u', 's', 'i', '2', 's', 's',
  '6', '4', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'd', 'b', 'p', 's', 'a', 'd', 'b', 'w', '1', '2', '8', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'd', 'b', 'p', 's', 'a', 'd', 'b', 'w', '2', '5', '6', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'd', 'b', 'p',
  's', 'a', 'd', 'b', 'w', '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'd', 'i', 'v', 'p', 'd', '5',
  '1', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'd', 'i', 'v', 'p', 's', '5', '1', '2', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'e', 'x', 'p',
  '2', 'p', 'd', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'e', 'x', 'p', '2', 'p', 's',
  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'g', 'a', 't', 'h', 'e', 'r', 'p', 'f', 'd',
  'p', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'g', 'a', 't', 'h', 'e', 'r', 'p', 'f', 'd', 'p', 's', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'g', 'a', 't', 'h', 'e', 'r', 'p', 'f', 'q', 'p', 'd', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'g', 'a', 't',
  'h', 'e', 'r', 'p', 'f', 'q', 'p', 's', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'a', 'd', 'd', 's', 'd', '_',
  'r', 'o', 'u', 'n', 'd', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'a', 'd', 'd', 's',
  's', '_', 'r', 'o', 'u', 'n', 'd', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'm',
  'p', 's', 'd', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'm', 'p', 's', 's', '_',
  'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'c', 'v', 't', 'p', 'd', '2', 'd', 'q', '1', '2',
  '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 'p', 'd', '2', 'd', 'q',
  '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 'p', 'd', '2',
  'p', 's', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 'p', 'd', '2', 'p',
  's', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 'p', 'd',
  '2', 'q', 'q', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't',
  'p', 'd', '2', 'q', 'q', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c',
  'v', 't', 'p', 'd', '2', 'q', 'q', '5', '1', '2', '_', 'm', 'a', 's', 'k',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'c', 'v', 't', 'p', 'd', '2', 'u', 'd', 'q', '1', '2', '8', '_', 'm',
  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'c', 'v', 't', 'p', 'd', '2', 'u', 'd', 'q', '2', '5',
  '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 'p', 'd', '2', 'u', 'd',
  'q', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 'p', 'd',
  '2', 'u', 'q', 'q', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v',
  't', 'p', 'd', '2', 'u', 'q', 'q', '2', '5', '6', '_', 'm', 'a', 's', 'k',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'c', 'v', 't', 'p', 'd', '2', 'u', 'q', 'q', '5', '1', '2', '_', 'm',
  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'c', 'v', 't', 'p', 's', '2', 'd', 'q', '1', '2', '8',
  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 'p', 's', '2', 'd', 'q', '2',
  '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 'p', 's', '2', 'd',
  'q', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 'p', 's',
  '2', 'p', 'd', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't',
  'p', 's', '2', 'q', 'q', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c',
  'v', 't', 'p', 's', '2', 'q', 'q', '2', '5', '6', '_', 'm', 'a', 's', 'k',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'c', 'v', 't', 'p', 's', '2', 'q', 'q', '5', '1', '2', '_', 'm', 'a',
  's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'c', 'v', 't', 'p', 's', '2', 'u', 'd', 'q', '1', '2', '8',
  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 'p', 's', '2', 'u', 'd', 'q',
  '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 'p', 's', '2',
  'u', 'd', 'q', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't',
  'p', 's', '2', 'u', 'q', 'q', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'c', 'v', 't', 'p', 's', '2', 'u', 'q', 'q', '2', '5', '6', '_', 'm', 'a',
  's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'c', 'v', 't', 'p', 's', '2', 'u', 'q', 'q', '5', '1', '2',
  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 'q', 'q', '2', 'p', 's', '1',
  '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 's', 'd', '2', 's',
  's', '_', 'r', 'o', 'u', 'n', 'd', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v',
  't', 's', 's', '2', 's', 'd', '_', 'r', 'o', 'u', 'n', 'd', '_', 'm', 'a',
  's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'c', 'v', 't', 't', 'p', 'd', '2', 'd', 'q', '1', '2', '8',
  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 't', 'p', 'd', '2', 'd', 'q',
  '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 't', 'p', 'd',
  '2', 'q', 'q', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't',
  't', 'p', 'd', '2', 'q', 'q', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'c', 'v', 't', 't', 'p', 'd', '2', 'q', 'q', '5', '1', '2', '_', 'm', 'a',
  's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'c', 'v', 't', 't', 'p', 'd', '2', 'u', 'd', 'q', '1', '2',
  '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 't', 'p', 'd', '2', 'u',
  'd', 'q', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 't',
  'p', 'd', '2', 'u', 'd', 'q', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'c', 'v', 't', 't', 'p', 'd', '2', 'u', 'q', 'q', '1', '2', '8', '_', 'm',
  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'c', 'v', 't', 't', 'p', 'd', '2', 'u', 'q', 'q', '2',
  '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 't', 'p', 'd', '2',
  'u', 'q', 'q', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't',
  't', 'p', 's', '2', 'd', 'q', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'c', 'v', 't', 't', 'p', 's', '2', 'q', 'q', '1', '2', '8', '_', 'm', 'a',
  's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'c', 'v', 't', 't', 'p', 's', '2', 'q', 'q', '2', '5', '6',
  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 't', 'p', 's', '2', 'q', 'q',
  '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 't', 'p', 's',
  '2', 'u', 'd', 'q', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v',
  't', 't', 'p', 's', '2', 'u', 'd', 'q', '2', '5', '6', '_', 'm', 'a', 's',
  'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 'c', 'v', 't', 't', 'p', 's', '2', 'u', 'd', 'q', '5', '1', '2',
  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 't', 'p', 's', '2', 'u', 'q',
  'q', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 't', 'p',
  's', '2', 'u', 'q', 'q', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c',
  'v', 't', 't', 'p', 's', '2', 'u', 'q', 'q', '5', '1', '2', '_', 'm', 'a',
  's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'c', 'v', 't', 'u', 'q', 'q', '2', 'p', 's', '1', '2', '8',
  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'd', 'i', 'v', 's', 'd', '_', 'r', 'o', 'u',
  'n', 'd', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'd', 'i', 'v', 's', 's', '_', 'r',
  'o', 'u', 'n', 'd', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'f', 'i', 'x', 'u', 'p',
  'i', 'm', 'm', 'p', 'd', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'f',
  'i', 'x', 'u', 'p', 'i', 'm', 'm', 'p', 'd', '2', '5', '6', '_', 'm', 'a',
  's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'f', 'i', 'x', 'u', 'p', 'i', 'm', 'm', 'p', 'd', '5', '1',
  '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'f', 'i', 'x', 'u', 'p', 'i', 'm', 'm',
  'p', 's', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'f', 'i', 'x', 'u',
  'p', 'i', 'm', 'm', 'p', 's', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'f', 'i', 'x', 'u', 'p', 'i', 'm', 'm', 'p', 's', '5', '1', '2', '_', 'm',
  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'f', 'i', 'x', 'u', 'p', 'i', 'm', 'm', 's', 'd', '_',
  'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'f', 'i', 'x', 'u', 'p', 'i', 'm', 'm', 's', 's',
  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'f', 'p', 'c', 'l', 'a', 's', 's', 's', 'd',
  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'f', 'p', 'c', 'l', 'a', 's', 's', 's', 's',
  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'g', 'e', 't', 'e', 'x', 'p', 'p', 'd', '1',
  '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'g', 'e', 't', 'e', 'x', 'p', 'p',
  'd', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'g', 'e', 't', 'e', 'x',
  'p', 'p', 'd', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'g', 'e', 't',
  'e', 'x', 'p', 'p', 's', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'g',
  'e', 't', 'e', 'x', 'p', 'p', 's', '2', '5', '6', '_', 'm', 'a', 's', 'k',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'g', 'e', 't', 'e', 'x', 'p', 'p', 's', '5', '1', '2', '_', 'm', 'a',
  's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'g', 'e', 't', 'e', 'x', 'p', 's', 'd', '1', '2', '8', '_',
  'r', 'o', 'u', 'n', 'd', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'g', 'e', 't', 'e',
  'x', 'p', 's', 's', '1', '2', '8', '_', 'r', 'o', 'u', 'n', 'd', '_', 'm',
  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'g', 'e', 't', 'm', 'a', 'n', 't', 'p', 'd', '1', '2',
  '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'g', 'e', 't', 'm', 'a', 'n', 't', 'p',
  'd', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'g', 'e', 't', 'm', 'a',
  'n', 't', 'p', 'd', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'g', 'e',
  't', 'm', 'a', 'n', 't', 'p', 's', '1', '2', '8', '_', 'm', 'a', 's', 'k',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'g', 'e', 't', 'm', 'a', 'n', 't', 'p', 's', '2', '5', '6', '_', 'm',
  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'g', 'e', 't', 'm', 'a', 'n', 't', 'p', 's', '5', '1',
  '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'g', 'e', 't', 'm', 'a', 'n', 't', 's',
  'd', '_', 'r', 'o', 'u', 'n', 'd', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'g', 'e',
  't', 'm', 'a', 'n', 't', 's', 's', '_', 'r', 'o', 'u', 'n', 'd', '_', 'm',
  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'm', 'a', 'x', 's', 'd', '_', 'r', 'o', 'u', 'n', 'd',
  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'm', 'a', 'x', 's', 's', '_', 'r', 'o', 'u',
  'n', 'd', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'm', 'i', 'n', 's', 'd', '_', 'r',
  'o', 'u', 'n', 'd', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'm', 'i', 'n', 's', 's',
  '_', 'r', 'o', 'u', 'n', 'd', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'm', 'u', 'l',
  's', 'd', '_', 'r', 'o', 'u', 'n', 'd', '_', 'm', 'a', 's', 'k', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'm',
  'u', 'l', 's', 's', '_', 'r', 'o', 'u', 'n', 'd', '_', 'm', 'a', 's', 'k',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'p', 'm', 'o', 'v', 'd', 'b', '1', '2', '8', '_', 'm', 'a', 's', 'k',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'p', 'm', 'o', 'v', 'd', 'b', '2', '5', '6', '_', 'm', 'a', 's', 'k',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'p', 'm', 'o', 'v', 'd', 'b', '1', '2', '8', 'm', 'e', 'm', '_', 'm',
  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'd', 'b', '2', '5', '6', 'm', 'e',
  'm', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'd', 'b', '5', '1',
  '2', 'm', 'e', 'm', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'd',
  'w', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'd',
  'w', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'd',
  'w', '1', '2', '8', 'm', 'e', 'm', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm',
  'o', 'v', 'd', 'w', '2', '5', '6', 'm', 'e', 'm', '_', 'm', 'a', 's', 'k',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'p', 'm', 'o', 'v', 'd', 'w', '5', '1', '2', 'm', 'e', 'm', '_', 'm',
  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'q', 'b', '1', '2', '8', '_', 'm',
  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'q', 'b', '2', '5', '6', '_', 'm',
  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'q', 'b', '5', '1', '2', '_', 'm',
  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'q', 'b', '1', '2', '8', 'm', 'e',
  'm', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'q', 'b', '2', '5',
  '6', 'm', 'e', 'm', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'q',
  'b', '5', '1', '2', 'm', 'e', 'm', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm',
  'o', 'v', 'q', 'd', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm',
  'o', 'v', 'q', 'd', '1', '2', '8', 'm', 'e', 'm', '_', 'm', 'a', 's', 'k',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'p', 'm', 'o', 'v', 'q', 'd', '2', '5', '6', 'm', 'e', 'm', '_', 'm',
  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'q', 'd', '5', '1', '2', 'm', 'e',
  'm', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'q', 'w', '1', '2',
  '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'q', 'w', '2', '5',
  '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'q', 'w', '1', '2',
  '8', 'm', 'e', 'm', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'q',
  'w', '2', '5', '6', 'm', 'e', 'm', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm',
  'o', 'v', 'q', 'w', '5', '1', '2', 'm', 'e', 'm', '_', 'm', 'a', 's', 'k',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'p', 'm', 'o', 'v', 'w', 'b', '1', '2', '8', '_', 'm', 'a', 's', 'k',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'p', 'm', 'o', 'v', 'w', 'b', '1', '2', '8', 'm', 'e', 'm', '_', 'm',
  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'w', 'b', '2', '5', '6', 'm', 'e',
  'm', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'w', 'b', '5', '1',
  '2', 'm', 'e', 'm', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 's',
  'd', 'b', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v',
  's', 'd', 'b', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o',
  'v', 's', 'd', 'b', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm',
  'o', 'v', 's', 'd', 'b', '1', '2', '8', 'm', 'e', 'm', '_', 'm', 'a', 's',
  'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 'p', 'm', 'o', 'v', 's', 'd', 'b', '2', '5', '6', 'm', 'e', 'm',
  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 's', 'd', 'b', '5', '1',
  '2', 'm', 'e', 'm', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 's',
  'd', 'w', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v',
  's', 'd', 'w', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o',
  'v', 's', 'd', 'w', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm',
  'o', 'v', 's', 'd', 'w', '1', '2', '8', 'm', 'e', 'm', '_', 'm', 'a', 's',
  'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 'p', 'm', 'o', 'v', 's', 'd', 'w', '2', '5', '6', 'm', 'e', 'm',
  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 's', 'd', 'w', '5', '1',
  '2', 'm', 'e', 'm', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 's',
  'q', 'b', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v',
  's', 'q', 'b', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o',
  'v', 's', 'q', 'b', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm',
  'o', 'v', 's', 'q', 'b', '1', '2', '8', 'm', 'e', 'm', '_', 'm', 'a', 's',
  'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 'p', 'm', 'o', 'v', 's', 'q', 'b', '2', '5', '6', 'm', 'e', 'm',
  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 's', 'q', 'b', '5', '1',
  '2', 'm', 'e', 'm', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 's',
  'q', 'd', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v',
  's', 'q', 'd', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o',
  'v', 's', 'q', 'd', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm',
  'o', 'v', 's', 'q', 'd', '1', '2', '8', 'm', 'e', 'm', '_', 'm', 'a', 's',
  'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 'p', 'm', 'o', 'v', 's', 'q', 'd', '2', '5', '6', 'm', 'e', 'm',
  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 's', 'q', 'd', '5', '1',
  '2', 'm', 'e', 'm', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 's',
  'q', 'w', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v',
  's', 'q', 'w', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o',
  'v', 's', 'q', 'w', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm',
  'o', 'v', 's', 'q', 'w', '1', '2', '8', 'm', 'e', 'm', '_', 'm', 'a', 's',
  'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 'p', 'm', 'o', 'v', 's', 'q', 'w', '2', '5', '6', 'm', 'e', 'm',
  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 's', 'q', 'w', '5', '1',
  '2', 'm', 'e', 'm', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 's',
  'w', 'b', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v',
  's', 'w', 'b', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o',
  'v', 's', 'w', 'b', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm',
  'o', 'v', 's', 'w', 'b', '1', '2', '8', 'm', 'e', 'm', '_', 'm', 'a', 's',
  'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 'p', 'm', 'o', 'v', 's', 'w', 'b', '2', '5', '6', 'm', 'e', 'm',
  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 's', 'w', 'b', '5', '1',
  '2', 'm', 'e', 'm', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'u',
  's', 'd', 'b', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o',
  'v', 'u', 's', 'd', 'b', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p',
  'm', 'o', 'v', 'u', 's', 'd', 'b', '5', '1', '2', '_', 'm', 'a', 's', 'k',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'p', 'm', 'o', 'v', 'u', 's', 'd', 'b', '1', '2', '8', 'm', 'e', 'm',
  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'u', 's', 'd', 'b', '2',
  '5', '6', 'm', 'e', 'm', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v',
  'u', 's', 'd', 'b', '5', '1', '2', 'm', 'e', 'm', '_', 'm', 'a', 's', 'k',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'p', 'm', 'o', 'v', 'u', 's', 'd', 'w', '1', '2', '8', '_', 'm', 'a',
  's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'p', 'm', 'o', 'v', 'u', 's', 'd', 'w', '2', '5', '6', '_',
  'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'u', 's', 'd', 'w', '5', '1',
  '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'u', 's', 'd', 'w',
  '1', '2', '8', 'm', 'e', 'm', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o',
  'v', 'u', 's', 'd', 'w', '2', '5', '6', 'm', 'e', 'm', '_', 'm', 'a', 's',
  'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 'p', 'm', 'o', 'v', 'u', 's', 'd', 'w', '5', '1', '2', 'm', 'e',
  'm', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'u', 's', 'q', 'b',
  '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'u', 's',
  'q', 'b', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v',
  'u', 's', 'q', 'b', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm',
  'o', 'v', 'u', 's', 'q', 'b', '1', '2', '8', 'm', 'e', 'm', '_', 'm', 'a',
  's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'p', 'm', 'o', 'v', 'u', 's', 'q', 'b', '2', '5', '6', 'm',
  'e', 'm', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'u', 's', 'q',
  'b', '5', '1', '2', 'm', 'e', 'm', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm',
  'o', 'v', 'u', 's', 'q', 'd', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'p', 'm', 'o', 'v', 'u', 's', 'q', 'd', '2', '5', '6', '_', 'm', 'a', 's',
  'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 'p', 'm', 'o', 'v', 'u', 's', 'q', 'd', '5', '1', '2', '_', 'm',
  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'u', 's', 'q', 'd', '1', '2', '8',
  'm', 'e', 'm', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'u', 's',
  'q', 'd', '2', '5', '6', 'm', 'e', 'm', '_', 'm', 'a', 's', 'k', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p',
  'm', 'o', 'v', 'u', 's', 'q', 'd', '5', '1', '2', 'm', 'e', 'm', '_', 'm',
  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'u', 's', 'q', 'w', '1', '2', '8',
  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'u', 's', 'q', 'w', '2',
  '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'u', 's', 'q',
  'w', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'u',
  's', 'q', 'w', '1', '2', '8', 'm', 'e', 'm', '_', 'm', 'a', 's', 'k', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'p', 'm', 'o', 'v', 'u', 's', 'q', 'w', '2', '5', '6', 'm', 'e', 'm', '_',
  'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'u', 's', 'q', 'w', '5', '1',
  '2', 'm', 'e', 'm', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'u',
  's', 'w', 'b', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o',
  'v', 'u', 's', 'w', 'b', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p',
  'm', 'o', 'v', 'u', 's', 'w', 'b', '5', '1', '2', '_', 'm', 'a', 's', 'k',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'p', 'm', 'o', 'v', 'u', 's', 'w', 'b', '1', '2', '8', 'm', 'e', 'm',
  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v', 'u', 's', 'w', 'b', '2',
  '5', '6', 'm', 'e', 'm', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o', 'v',
  'u', 's', 'w', 'b', '5', '1', '2', 'm', 'e', 'm', '_', 'm', 'a', 's', 'k',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'r', 'a', 'n', 'g', 'e', 'p', 'd', '1', '2', '8', '_', 'm', 'a', 's',
  'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 'r', 'a', 'n', 'g', 'e', 'p', 'd', '2', '5', '6', '_', 'm', 'a',
  's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'r', 'a', 'n', 'g', 'e', 'p', 'd', '5', '1', '2', '_', 'm',
  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'r', 'a', 'n', 'g', 'e', 'p', 's', '1', '2', '8', '_',
  'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'r', 'a', 'n', 'g', 'e', 'p', 's', '2', '5', '6',
  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'r', 'a', 'n', 'g', 'e', 'p', 's', '5', '1',
  '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'r', 'a', 'n', 'g', 'e', 's', 'd', '1',
  '2', '8', '_', 'r', 'o', 'u', 'n', 'd', '_', 'm', 'a', 's', 'k', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r',
  'a', 'n', 'g', 'e', 's', 's', '1', '2', '8', '_', 'r', 'o', 'u', 'n', 'd',
  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'r', 'e', 'd', 'u', 'c', 'e', 'p', 'd', '1',
  '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r', 'e', 'd', 'u', 'c', 'e', 'p',
  'd', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r', 'e', 'd', 'u', 'c',
  'e', 'p', 'd', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r', 'e', 'd',
  'u', 'c', 'e', 'p', 's', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r',
  'e', 'd', 'u', 'c', 'e', 'p', 's', '2', '5', '6', '_', 'm', 'a', 's', 'k',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'r', 'e', 'd', 'u', 'c', 'e', 'p', 's', '5', '1', '2', '_', 'm', 'a',
  's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'r', 'e', 'd', 'u', 'c', 'e', 's', 'd', '_', 'm', 'a', 's',
  'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 'r', 'e', 'd', 'u', 'c', 'e', 's', 's', '_', 'm', 'a', 's', 'k',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'r', 'n', 'd', 's', 'c', 'a', 'l', 'e', 'p', 'd', '_', '1', '2', '8',
  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'r', 'n', 'd', 's', 'c', 'a', 'l', 'e', 'p',
  'd', '_', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r', 'n', 'd', 's',
  'c', 'a', 'l', 'e', 'p', 'd', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r', 'n', 'd',
  's', 'c', 'a', 'l', 'e', 'p', 's', '_', '1', '2', '8', '_', 'm', 'a', 's',
  'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 'r', 'n', 'd', 's', 'c', 'a', 'l', 'e', 'p', 's', '_', '2', '5',
  '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'r', 'n', 'd', 's', 'c', 'a', 'l', 'e',
  'p', 's', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r', 'n', 'd', 's', 'c', 'a', 'l',
  'e', 's', 'd', '_', 'r', 'o', 'u', 'n', 'd', '_', 'm', 'a', 's', 'k', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'r', 'n', 'd', 's', 'c', 'a', 'l', 'e', 's', 's', '_', 'r', 'o', 'u', 'n',
  'd', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 's', 'c', 'a', 'l', 'e', 'f', 'p', 'd',
  '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 's', 'c', 'a', 'l', 'e', 'f',
  'p', 'd', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 's', 'c', 'a', 'l',
  'e', 'f', 'p', 'd', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 's', 'c',
  'a', 'l', 'e', 'f', 'p', 's', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  's', 'c', 'a', 'l', 'e', 'f', 'p', 's', '2', '5', '6', '_', 'm', 'a', 's',
  'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 's', 'c', 'a', 'l', 'e', 'f', 'p', 's', '5', '1', '2', '_', 'm',
  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 's', 'c', 'a', 'l', 'e', 'f', 's', 'd', '_', 'r', 'o',
  'u', 'n', 'd', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 's', 'c', 'a', 'l', 'e', 'f',
  's', 's', '_', 'r', 'o', 'u', 'n', 'd', '_', 'm', 'a', 's', 'k', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 's',
  'u', 'b', 's', 'd', '_', 'r', 'o', 'u', 'n', 'd', '_', 'm', 'a', 's', 'k',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 's', 'u', 'b', 's', 's', '_', 'r', 'o', 'u', 'n', 'd', '_', 'm', 'a',
  's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'v', 'c', 'v', 't', 'p', 's', '2', 'p', 'h', '_', 'm', 'a',
  's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'v', 'c', 'v', 't', 'p', 's', '2', 'p', 'h', '2', '5', '6',
  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v', 't', 'p', 's', '2', 'p', 'h',
  '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'f', 'i', 'x', 'u', 'p', 'i',
  'm', 'm', 'p', 'd', '1', '2', '8', '_', 'm', 'a', 's', 'k', 'z', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'f',
  'i', 'x', 'u', 'p', 'i', 'm', 'm', 'p', 'd', '2', '5', '6', '_', 'm', 'a',
  's', 'k', 'z', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'f', 'i', 'x', 'u', 'p', 'i', 'm', 'm', 'p', 'd', '5',
  '1', '2', '_', 'm', 'a', 's', 'k', 'z', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'f', 'i', 'x', 'u', 'p', 'i',
  'm', 'm', 'p', 's', '1', '2', '8', '_', 'm', 'a', 's', 'k', 'z', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'f',
  'i', 'x', 'u', 'p', 'i', 'm', 'm', 'p', 's', '2', '5', '6', '_', 'm', 'a',
  's', 'k', 'z', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'f', 'i', 'x', 'u', 'p', 'i', 'm', 'm', 'p', 's', '5',
  '1', '2', '_', 'm', 'a', 's', 'k', 'z', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'f', 'i', 'x', 'u', 'p', 'i',
  'm', 'm', 's', 'd', '_', 'm', 'a', 's', 'k', 'z', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'f', 'i', 'x', 'u',
  'p', 'i', 'm', 'm', 's', 's', '_', 'm', 'a', 's', 'k', 'z', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'm', 'a',
  'x', 'p', 'd', '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'm', 'a', 'x', 'p', 's', '5', '1', '2',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'm', 'i', 'n', 'p', 'd', '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'm', 'i', 'n', 'p', 's',
  '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'm', 'u', 'l', 'p', 'd', '5', '1', '2', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'm', 'u',
  'l', 'p', 's', '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'p', 'a', 'c', 'k', 's', 's', 'd', 'w',
  '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'p', 'a', 'c', 'k', 's', 's', 'w', 'b', '5', '1', '2',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'p', 'a', 'c', 'k', 'u', 's', 'd', 'w', '5', '1', '2', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'a',
  'c', 'k', 'u', 's', 'w', 'b', '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'a', 'v', 'g', 'b',
  '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'p', 'a', 'v', 'g', 'w', '5', '1', '2', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'e',
  'r', 'm', 'v', 'a', 'r', 'd', 'f', '2', '5', '6', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'e', 'r', 'm',
  'v', 'a', 'r', 'd', 'f', '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'e', 'r', 'm', 'v', 'a',
  'r', 'd', 'i', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'p', 'e', 'r', 'm', 'v', 'a', 'r', 'd',
  'i', '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'p', 'e', 'r', 'm', 'v', 'a', 'r', 'h', 'i', '1',
  '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'p', 'e', 'r', 'm', 'v', 'a', 'r', 'h', 'i', '2', '5', '6',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'p', 'e', 'r', 'm', 'v', 'a', 'r', 'h', 'i', '5', '1', '2', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p',
  'e', 'r', 'm', 'v', 'a', 'r', 'q', 'i', '1', '2', '8', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'e', 'r',
  'm', 'v', 'a', 'r', 'q', 'i', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'e', 'r', 'm', 'v',
  'a', 'r', 'q', 'i', '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'e', 'r', 'm', 'v', 'a', 'r',
  's', 'f', '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'p', 'e', 'r', 'm', 'v', 'a', 'r', 's', 'i',
  '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'p', 'm', 'a', 'd', 'd', 'u', 'b', 's', 'w', '5', '1',
  '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 'p', 'm', 'a', 'd', 'd', 'w', 'd', '5', '1', '2', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm',
  'u', 'l', 'h', 'r', 's', 'w', '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'u', 'l', 'h',
  'w', '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'p', 'm', 'u', 'l', 'h', 'u', 'w', '5', '1', '2',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'v', 'p', 'm', 'u', 'l', 't', 'i', 's', 'h', 'i', 'f', 't', 'q', 'b',
  '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'v', 'p', 'm', 'u', 'l', 't', 'i', 's', 'h', 'i', 'f',
  't', 'q', 'b', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'm', 'u', 'l', 't', 'i', 's',
  'h', 'i', 'f', 't', 'q', 'b', '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'a', 'd', 'b',
  'w', '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'p', 's', 'h', 'u', 'f', 'b', '5', '1', '2', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'p', 's', 'l', 'l', 'd', '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'l', 'l', 'q', '5',
  '1', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'p', 's', 'l', 'l', 'w', '5', '1', '2', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'l',
  'l', 'd', 'i', '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'l', 'l', 'q', 'i', '5', '1',
  '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 'p', 's', 'l', 'l', 'w', 'i', '5', '1', '2', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'l',
  'l', 'v', '1', '6', 's', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'l', 'l', 'v', '8', 'd', 'i',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'p', 's', 'l', 'l', 'v', '8', 'h', 'i', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'l', 'l', 'v',
  '1', '6', 'h', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'p', 's', 'l', 'l', 'v', '3', '2', 'h', 'i', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'p', 's', 'r', 'a', 'd', '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'r', 'a', 'q', '1',
  '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'p', 's', 'r', 'a', 'q', '2', '5', '6', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'r',
  'a', 'q', '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'p', 's', 'r', 'a', 'w', '5', '1', '2', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'p', 's', 'r', 'a', 'd', 'i', '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'r', 'a', 'q',
  'i', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'p', 's', 'r', 'a', 'q', 'i', '2', '5', '6', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'p', 's', 'r', 'a', 'q', 'i', '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'r', 'a', 'w',
  'i', '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'p', 's', 'r', 'a', 'v', '1', '6', 's', 'i', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'p', 's', 'r', 'a', 'v', 'q', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'r', 'a', 'v',
  'q', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'p', 's', 'r', 'a', 'v', '8', 'd', 'i', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p',
  's', 'r', 'a', 'v', '8', 'h', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'r', 'a', 'v', '1', '6',
  'h', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'p', 's', 'r', 'a', 'v', '3', '2', 'h', 'i', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's',
  'r', 'l', 'd', '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'r', 'l', 'q', '5', '1', '2',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'p', 's', 'r', 'l', 'w', '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'r', 'l', 'd',
  'i', '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'p', 's', 'r', 'l', 'q', 'i', '5', '1', '2', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'p', 's', 'r', 'l', 'w', 'i', '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'r', 'l', 'v',
  '1', '6', 's', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'p', 's', 'r', 'l', 'v', '8', 'd', 'i', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p',
  's', 'r', 'l', 'v', '8', 'h', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'r', 'l', 'v', '1', '6',
  'h', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'p', 's', 'r', 'l', 'v', '3', '2', 'h', 'i', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 't',
  'e', 'r', 'n', 'l', 'o', 'g', 'd', '1', '2', '8', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 't', 'e', 'r',
  'n', 'l', 'o', 'g', 'd', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 't', 'e', 'r', 'n', 'l',
  'o', 'g', 'd', '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'p', 't', 'e', 'r', 'n', 'l', 'o', 'g',
  'q', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'p', 't', 'e', 'r', 'n', 'l', 'o', 'g', 'q', '2',
  '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'p', 't', 'e', 'r', 'n', 'l', 'o', 'g', 'q', '5', '1', '2',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'r', 'c', 'p', '1', '4', 'p', 'd', '1', '2', '8', '_', 'm', 'a', 's',
  'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 'r', 'c', 'p', '1', '4', 'p', 'd', '2', '5', '6', '_', 'm', 'a',
  's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'r', 'c', 'p', '1', '4', 'p', 'd', '5', '1', '2', '_', 'm',
  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'r', 'c', 'p', '1', '4', 'p', 's', '1', '2', '8', '_',
  'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'r', 'c', 'p', '1', '4', 'p', 's', '2', '5', '6',
  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'r', 'c', 'p', '1', '4', 'p', 's', '5', '1',
  '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'r', 'c', 'p', '1', '4', 's', 'd', '_',
  'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'r', 'c', 'p', '1', '4', 's', 's', '_', 'm', 'a',
  's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'r', 'c', 'p', '2', '8', 'p', 'd', '_', 'm', 'a', 's', 'k',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'r', 'c', 'p', '2', '8', 'p', 's', '_', 'm', 'a', 's', 'k', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r',
  'c', 'p', '2', '8', 's', 'd', '_', 'r', 'o', 'u', 'n', 'd', '_', 'm', 'a',
  's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'r', 'c', 'p', '2', '8', 's', 's', '_', 'r', 'o', 'u', 'n',
  'd', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'r', 's', 'q', 'r', 't', '1', '4', 'p',
  'd', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r', 's', 'q', 'r', 't',
  '1', '4', 'p', 'd', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r', 's',
  'q', 'r', 't', '1', '4', 'p', 'd', '5', '1', '2', '_', 'm', 'a', 's', 'k',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'r', 's', 'q', 'r', 't', '1', '4', 'p', 's', '1', '2', '8', '_', 'm',
  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'r', 's', 'q', 'r', 't', '1', '4', 'p', 's', '2', '5',
  '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'r', 's', 'q', 'r', 't', '1', '4', 'p',
  's', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r', 's', 'q', 'r', 't',
  '1', '4', 's', 'd', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r', 's', 'q', 'r', 't',
  '1', '4', 's', 's', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r', 's', 'q', 'r', 't',
  '2', '8', 'p', 'd', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r', 's', 'q', 'r', 't',
  '2', '8', 'p', 's', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r', 's', 'q', 'r', 't',
  '2', '8', 's', 'd', '_', 'r', 'o', 'u', 'n', 'd', '_', 'm', 'a', 's', 'k',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'r', 's', 'q', 'r', 't', '2', '8', 's', 's', '_', 'r', 'o', 'u', 'n',
  'd', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 's', 'c', 'a', 't', 't', 'e', 'r', 'p',
  'f', 'd', 'p', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 's', 'c', 'a', 't', 't', 'e', 'r', 'p', 'f', 'd',
  'p', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 's', 'c', 'a', 't', 't', 'e', 'r', 'p', 'f', 'q', 'p', 'd',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 's', 'c', 'a', 't', 't', 'e', 'r', 'p', 'f', 'q', 'p', 's', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 's',
  'u', 'b', 'p', 'd', '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 's', 'u', 'b', 'p', 's', '5', '1',
  '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 'v', 'c', 'o', 'm', 'i', 's', 'd', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'o', 'm', 'i',
  's', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'v', 'c', 'v', 't', 's', 'd', '2', 's', 'i', '3', '2', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'v', 'c', 'v', 't', 's', 'd', '2', 's', 'i', '6', '4', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v',
  't', 's', 'd', '2', 'u', 's', 'i', '3', '2', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v', 't', 's',
  'd', '2', 'u', 's', 'i', '6', '4', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v', 't', 's', 's', '2',
  's', 'i', '3', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'v', 'c', 'v', 't', 's', 's', '2', 's', 'i', '6',
  '4', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 'v', 'c', 'v', 't', 's', 's', '2', 'u', 's', 'i', '3', '2', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'v', 'c', 'v', 't', 's', 's', '2', 'u', 's', 'i', '6', '4', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p',
  'd', 'p', 'b', 'u', 's', 'd', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'd', 'p', 'b',
  'u', 's', 'd', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'd', 'p', 'b', 'u', 's', 'd',
  '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'v', 'p', 'd', 'p', 'b', 'u', 's', 'd', 's', '1', '2',
  '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 'v', 'p', 'd', 'p', 'b', 'u', 's', 'd', 's', '2', '5', '6', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'v', 'p', 'd', 'p', 'b', 'u', 's', 'd', 's', '5', '1', '2', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p',
  'd', 'p', 'w', 's', 's', 'd', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'd', 'p', 'w',
  's', 's', 'd', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'd', 'p', 'w', 's', 's', 'd',
  '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'v', 'p', 'd', 'p', 'w', 's', 's', 'd', 's', '1', '2',
  '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 'v', 'p', 'd', 'p', 'w', 's', 's', 'd', 's', '2', '5', '6', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'v', 'p', 'd', 'p', 'w', 's', 's', 'd', 's', '5', '1', '2', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p',
  'e', 'r', 'm', 'i', '2', 'v', 'a', 'r', 'd', '1', '2', '8', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p',
  'e', 'r', 'm', 'i', '2', 'v', 'a', 'r', 'd', '2', '5', '6', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p',
  'e', 'r', 'm', 'i', '2', 'v', 'a', 'r', 'd', '5', '1', '2', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p',
  'e', 'r', 'm', 'i', '2', 'v', 'a', 'r', 'h', 'i', '1', '2', '8', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v',
  'p', 'e', 'r', 'm', 'i', '2', 'v', 'a', 'r', 'h', 'i', '2', '5', '6', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'v', 'p', 'e', 'r', 'm', 'i', '2', 'v', 'a', 'r', 'h', 'i', '5', '1', '2',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'v', 'p', 'e', 'r', 'm', 'i', '2', 'v', 'a', 'r', 'p', 'd', '1', '2',
  '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 'v', 'p', 'e', 'r', 'm', 'i', '2', 'v', 'a', 'r', 'p', 'd', '2',
  '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'v', 'p', 'e', 'r', 'm', 'i', '2', 'v', 'a', 'r', 'p', 'd',
  '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'v', 'p', 'e', 'r', 'm', 'i', '2', 'v', 'a', 'r', 'p',
  's', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'v', 'p', 'e', 'r', 'm', 'i', '2', 'v', 'a', 'r',
  'p', 's', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'e', 'r', 'm', 'i', '2', 'v', 'a',
  'r', 'p', 's', '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'e', 'r', 'm', 'i', '2', 'v',
  'a', 'r', 'q', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'e', 'r', 'm', 'i', '2', 'v',
  'a', 'r', 'q', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'e', 'r', 'm', 'i', '2', 'v',
  'a', 'r', 'q', '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'e', 'r', 'm', 'i', '2', 'v',
  'a', 'r', 'q', 'i', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'e', 'r', 'm', 'i', '2',
  'v', 'a', 'r', 'q', 'i', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'e', 'r', 'm', 'i',
  '2', 'v', 'a', 'r', 'q', 'i', '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'e', 'r', 'm',
  'i', 'l', 'v', 'a', 'r', 'p', 'd', '5', '1', '2', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'e', 'r',
  'm', 'i', 'l', 'v', 'a', 'r', 'p', 's', '5', '1', '2', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'm',
  'a', 'd', 'd', '5', '2', 'h', 'u', 'q', '1', '2', '8', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'm',
  'a', 'd', 'd', '5', '2', 'h', 'u', 'q', '2', '5', '6', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'm',
  'a', 'd', 'd', '5', '2', 'h', 'u', 'q', '5', '1', '2', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'm',
  'a', 'd', 'd', '5', '2', 'l', 'u', 'q', '1', '2', '8', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'm',
  'a', 'd', 'd', '5', '2', 'l', 'u', 'q', '2', '5', '6', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'm',
  'a', 'd', 'd', '5', '2', 'l', 'u', 'q', '5', '1', '2', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't',
  'n', 'e', '2', 'p', 's', '2', 'b', 'f', '1', '6', '_', '1', '2', '8', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'c', 'v', 't', 'n', 'e', '2', 'p', 's', '2', 'b', 'f', '1', '6', '_', '2',
  '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'c', 'v', 't', 'n', 'e', '2', 'p', 's', '2', 'b', 'f', '1',
  '6', '_', '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 'n', 'e', 'p', 's', '2', 'b',
  'f', '1', '6', '_', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 'n', 'e', 'p', 's',
  '2', 'b', 'f', '1', '6', '_', '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'd', 'p', 'b', 'f', '1',
  '6', 'p', 's', '_', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'd', 'p', 'b', 'f', '1', '6', 'p',
  's', '_', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'd', 'p', 'b', 'f', '1', '6', 'p', 's', '_',
  '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'a', 'd', 'd', 'p', 'h', '5', '1', '2', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'd', 'i',
  'v', 'p', 'h', '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'a', 'd', 'd', 's', 'h', '_', 'r', 'o',
  'u', 'n', 'd', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'm', 'p', 's', 'h', '_',
  'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'd', 'i', 'v', 's', 'h', '_', 'r', 'o', 'u', 'n',
  'd', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'f', 'p', 'c', 'l', 'a', 's', 's', 's',
  'h', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'g', 'e', 't', 'e', 'x', 'p', 'p', 'h',
  '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'g', 'e', 't', 'e', 'x', 'p',
  'p', 'h', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'g', 'e', 't', 'e',
  'x', 'p', 'p', 'h', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'g', 'e',
  't', 'e', 'x', 'p', 's', 'h', '1', '2', '8', '_', 'r', 'o', 'u', 'n', 'd',
  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'g', 'e', 't', 'm', 'a', 'n', 't', 'p', 'h',
  '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'g', 'e', 't', 'm', 'a', 'n',
  't', 'p', 'h', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'g', 'e', 't',
  'm', 'a', 'n', 't', 'p', 'h', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'g', 'e', 't', 'm', 'a', 'n', 't', 's', 'h', '_', 'r', 'o', 'u', 'n', 'd',
  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'm', 'a', 'x', 's', 'h', '_', 'r', 'o', 'u',
  'n', 'd', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'm', 'i', 'n', 's', 'h', '_', 'r',
  'o', 'u', 'n', 'd', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'm', 'u', 'l', 's', 'h',
  '_', 'r', 'o', 'u', 'n', 'd', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r', 'c', 'p',
  'p', 'h', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r', 'c', 'p', 'p',
  'h', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r', 'c', 'p', 'p', 'h',
  '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r', 'c', 'p', 's', 'h', '_',
  'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'r', 'e', 'd', 'u', 'c', 'e', 'p', 'h', '1', '2',
  '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'r', 'e', 'd', 'u', 'c', 'e', 'p', 'h',
  '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r', 'e', 'd', 'u', 'c', 'e',
  'p', 'h', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r', 'e', 'd', 'u',
  'c', 'e', 's', 'h', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r', 'n', 'd', 's', 'c',
  'a', 'l', 'e', 'p', 'h', '_', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'r', 'n', 'd', 's', 'c', 'a', 'l', 'e', 'p', 'h', '_', '2', '5', '6', '_',
  'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'r', 'n', 'd', 's', 'c', 'a', 'l', 'e', 'p', 'h',
  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'r', 'n', 'd', 's', 'c', 'a', 'l', 'e', 's',
  'h', '_', 'r', 'o', 'u', 'n', 'd', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r', 's',
  'q', 'r', 't', 'p', 'h', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r',
  's', 'q', 'r', 't', 'p', 'h', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'r', 's', 'q', 'r', 't', 'p', 'h', '5', '1', '2', '_', 'm', 'a', 's', 'k',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'r', 's', 'q', 'r', 't', 's', 'h', '_', 'm', 'a', 's', 'k', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 's',
  'c', 'a', 'l', 'e', 'f', 'p', 'h', '1', '2', '8', '_', 'm', 'a', 's', 'k',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 's', 'c', 'a', 'l', 'e', 'f', 'p', 'h', '2', '5', '6', '_', 'm', 'a',
  's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 's', 'c', 'a', 'l', 'e', 'f', 'p', 'h', '5', '1', '2', '_',
  'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 's', 'c', 'a', 'l', 'e', 'f', 's', 'h', '_', 'r',
  'o', 'u', 'n', 'd', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 's', 'u', 'b', 's', 'h',
  '_', 'r', 'o', 'u', 'n', 'd', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v',
  't', 'd', 'q', '2', 'p', 'h', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'v', 'c', 'v', 't', 'p', 'd', '2', 'p', 'h', '1', '2', '8', '_', 'm', 'a',
  's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'v', 'c', 'v', 't', 'p', 'd', '2', 'p', 'h', '2', '5', '6',
  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v', 't', 'p', 'd', '2', 'p', 'h',
  '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v', 't', 'p', 'h',
  '2', 'd', 'q', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v',
  't', 'p', 'h', '2', 'd', 'q', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'v', 'c', 'v', 't', 'p', 'h', '2', 'd', 'q', '5', '1', '2', '_', 'm', 'a',
  's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'v', 'c', 'v', 't', 'p', 'h', '2', 'p', 'd', '1', '2', '8',
  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v', 't', 'p', 'h', '2', 'p', 'd',
  '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v', 't', 'p', 'h',
  '2', 'p', 'd', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v',
  't', 'p', 'h', '2', 'p', 's', 'x', '1', '2', '8', '_', 'm', 'a', 's', 'k',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'v', 'c', 'v', 't', 'p', 'h', '2', 'p', 's', 'x', '2', '5', '6', '_',
  'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'v', 'c', 'v', 't', 'p', 'h', '2', 'p', 's', 'x',
  '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v', 't', 'p', 'h',
  '2', 'q', 'q', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v',
  't', 'p', 'h', '2', 'q', 'q', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'v', 'c', 'v', 't', 'p', 'h', '2', 'q', 'q', '5', '1', '2', '_', 'm', 'a',
  's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'v', 'c', 'v', 't', 'p', 'h', '2', 'u', 'd', 'q', '1', '2',
  '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v', 't', 'p', 'h', '2', 'u',
  'd', 'q', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v', 't',
  'p', 'h', '2', 'u', 'd', 'q', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'v', 'c', 'v', 't', 'p', 'h', '2', 'u', 'q', 'q', '1', '2', '8', '_', 'm',
  'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'v', 'c', 'v', 't', 'p', 'h', '2', 'u', 'q', 'q', '2',
  '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v', 't', 'p', 'h', '2',
  'u', 'q', 'q', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v',
  't', 'p', 'h', '2', 'u', 'w', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'v', 'c', 'v', 't', 'p', 'h', '2', 'u', 'w', '2', '5', '6', '_', 'm', 'a',
  's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'v', 'c', 'v', 't', 'p', 'h', '2', 'u', 'w', '5', '1', '2',
  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v', 't', 'p', 'h', '2', 'w', '1',
  '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v', 't', 'p', 'h', '2',
  'w', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v', 't', 'p',
  'h', '2', 'w', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v',
  't', 'p', 's', '2', 'p', 'h', 'x', '1', '2', '8', '_', 'm', 'a', 's', 'k',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'v', 'c', 'v', 't', 'p', 's', '2', 'p', 'h', 'x', '2', '5', '6', '_',
  'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'v', 'c', 'v', 't', 'p', 's', '2', 'p', 'h', 'x',
  '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v', 't', 'q', 'q',
  '2', 'p', 'h', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v',
  't', 'q', 'q', '2', 'p', 'h', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'v', 'c', 'v', 't', 's', 'd', '2', 's', 'h', '_', 'r', 'o', 'u', 'n', 'd',
  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v', 't', 's', 'h', '2', 's', 'd',
  '_', 'r', 'o', 'u', 'n', 'd', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v',
  't', 's', 'h', '2', 's', 's', '_', 'r', 'o', 'u', 'n', 'd', '_', 'm', 'a',
  's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'v', 'c', 'v', 't', 's', 's', '2', 's', 'h', '_', 'r', 'o',
  'u', 'n', 'd', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v', 't', 't', 'p',
  'h', '2', 'd', 'q', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c',
  'v', 't', 't', 'p', 'h', '2', 'd', 'q', '2', '5', '6', '_', 'm', 'a', 's',
  'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 'v', 'c', 'v', 't', 't', 'p', 'h', '2', 'd', 'q', '5', '1', '2',
  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v', 't', 't', 'p', 'h', '2', 'q',
  'q', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v', 't', 't',
  'p', 'h', '2', 'q', 'q', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v',
  'c', 'v', 't', 't', 'p', 'h', '2', 'q', 'q', '5', '1', '2', '_', 'm', 'a',
  's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'v', 'c', 'v', 't', 't', 'p', 'h', '2', 'u', 'd', 'q', '1',
  '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v', 't', 't', 'p', 'h',
  '2', 'u', 'd', 'q', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c',
  'v', 't', 't', 'p', 'h', '2', 'u', 'd', 'q', '5', '1', '2', '_', 'm', 'a',
  's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'v', 'c', 'v', 't', 't', 'p', 'h', '2', 'u', 'q', 'q', '1',
  '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v', 't', 't', 'p', 'h',
  '2', 'u', 'q', 'q', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c',
  'v', 't', 't', 'p', 'h', '2', 'u', 'q', 'q', '5', '1', '2', '_', 'm', 'a',
  's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'v', 'c', 'v', 't', 't', 'p', 'h', '2', 'u', 'w', '1', '2',
  '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v', 't', 't', 'p', 'h', '2',
  'u', 'w', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v', 't',
  't', 'p', 'h', '2', 'u', 'w', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'v', 'c', 'v', 't', 't', 'p', 'h', '2', 'w', '1', '2', '8', '_', 'm', 'a',
  's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'v', 'c', 'v', 't', 't', 'p', 'h', '2', 'w', '2', '5', '6',
  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v', 't', 't', 'p', 'h', '2', 'w',
  '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v', 't', 'u', 'd',
  'q', '2', 'p', 'h', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c',
  'v', 't', 'u', 'q', 'q', '2', 'p', 'h', '1', '2', '8', '_', 'm', 'a', 's',
  'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 'v', 'c', 'v', 't', 'u', 'q', 'q', '2', 'p', 'h', '2', '5', '6',
  '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'v', 'f', 'c', 'm', 'a', 'd', 'd', 'c', 'p',
  'h', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'f', 'c', 'm', 'a',
  'd', 'd', 'c', 'p', 'h', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v',
  'f', 'c', 'm', 'a', 'd', 'd', 'c', 'p', 'h', '5', '1', '2', '_', 'm', 'a',
  's', 'k', '3', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'v', 'f', 'c', 'm', 'a', 'd', 'd', 'c', 's', 'h', '_',
  'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'v', 'f', 'c', 'm', 'u', 'l', 'c', 'p', 'h', '1',
  '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'f', 'c', 'm', 'u', 'l', 'c',
  'p', 'h', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'f', 'c', 'm',
  'u', 'l', 'c', 'p', 'h', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v',
  'f', 'c', 'm', 'u', 'l', 'c', 's', 'h', '_', 'm', 'a', 's', 'k', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v',
  'f', 'm', 'a', 'd', 'd', 'c', 'p', 'h', '1', '2', '8', '_', 'm', 'a', 's',
  'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 'v', 'f', 'm', 'a', 'd', 'd', 'c', 'p', 'h', '2', '5', '6', '_',
  'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'v', 'f', 'm', 'a', 'd', 'd', 'c', 'p', 'h', '5',
  '1', '2', '_', 'm', 'a', 's', 'k', '3', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'f', 'm', 'a', 'd', 'd',
  'c', 's', 'h', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'f', 'm', 'u', 'l', 'c',
  'p', 'h', '1', '2', '8', '_', 'm', 'a', 's', 'k', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'f', 'm', 'u',
  'l', 'c', 'p', 'h', '2', '5', '6', '_', 'm', 'a', 's', 'k', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'f',
  'm', 'u', 'l', 'c', 'p', 'h', '5', '1', '2', '_', 'm', 'a', 's', 'k', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'v', 'f', 'm', 'u', 'l', 'c', 's', 'h', '_', 'm', 'a', 's', 'k', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v',
  'f', 'c', 'm', 'a', 'd', 'd', 'c', 'p', 'h', '1', '2', '8', '_', 'm', 'a',
  's', 'k', 'z', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'v', 'f', 'c', 'm', 'a', 'd', 'd', 'c', 'p', 'h', '2',
  '5', '6', '_', 'm', 'a', 's', 'k', 'z', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'f', 'c', 'm', 'a', 'd',
  'd', 'c', 'p', 'h', '5', '1', '2', '_', 'm', 'a', 's', 'k', 'z', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v',
  'f', 'c', 'm', 'a', 'd', 'd', 'c', 's', 'h', '_', 'm', 'a', 's', 'k', 'z',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'v', 'f', 'm', 'a', 'd', 'd', 'c', 'p', 'h', '1', '2', '8', '_', 'm',
  'a', 's', 'k', 'z', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'v', 'f', 'm', 'a', 'd', 'd', 'c', 'p', 'h', '2',
  '5', '6', '_', 'm', 'a', 's', 'k', 'z', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'f', 'm', 'a', 'd', 'd',
  'c', 'p', 'h', '5', '1', '2', '_', 'm', 'a', 's', 'k', 'z', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'f',
  'm', 'a', 'd', 'd', 'c', 's', 'h', '_', 'm', 'a', 's', 'k', 'z', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'm',
  'a', 'x', 'p', 'h', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'm', 'a', 'x', 'p', 'h', '2', '5',
  '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 'm', 'a', 'x', 'p', 'h', '5', '1', '2', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'm', 'i', 'n', 'p',
  'h', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'm', 'i', 'n', 'p', 'h', '2', '5', '6', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'm',
  'i', 'n', 'p', 'h', '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'm', 'u', 'l', 'p', 'h', '5', '1',
  '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 's', 'u', 'b', 'p', 'h', '5', '1', '2', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'o', 'm',
  'i', 's', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'v', 'c', 'v', 't', 's', 'h', '2', 's', 'i', '3', '2',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'v', 'c', 'v', 't', 's', 'h', '2', 's', 'i', '6', '4', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c',
  'v', 't', 's', 'h', '2', 'u', 's', 'i', '3', '2', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v', 't',
  's', 'h', '2', 'u', 's', 'i', '6', '4', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v', 't', 's', 'i',
  '2', 's', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'v', 'c', 'v', 't', 's', 'i', '6', '4', '2', 's', 'h',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'v', 'c', 'v', 't', 't', 's', 'h', '2', 's', 'i', '3', '2', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v',
  'c', 'v', 't', 't', 's', 'h', '2', 's', 'i', '6', '4', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v',
  't', 't', 's', 'h', '2', 'u', 's', 'i', '3', '2', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v', 't',
  't', 's', 'h', '2', 'u', 's', 'i', '6', '4', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v', 't', 'u',
  's', 'i', '2', 's', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v', 't', 'u', 's', 'i', '6', '4',
  '2', 's', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'v', 'f', 'm', 'a', 'd', 'd', 's', 'u', 'b', 'p', 'h',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'v', 'f', 'm', 'a', 'd', 'd', 's', 'u', 'b', 'p', 'h', '2', '5', '6',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'a', 'x', 'o', 'r', '3', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'a', 'x', 'o', 'r', '6', '4', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'b', 'e', 'x', 't', 'r', '_', 'u', '3', '2', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'b', 'e', 'x', 't', 'r',
  '_', 'u', '6', '4', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'b', 'z', 'h', 'i', '_', 's', 'i', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'b', 'z',
  'h', 'i', '_', 'd', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'p', 'd', 'e', 'p', '_', 's', 'i', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p',
  'd', 'e', 'p', '_', 'd', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'p', 'e', 'x', 't', '_', 's', 'i', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'p', 'e', 'x', 't', '_', 'd', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'l', 'd', 'e', 'm', 'o', 't',
  'e', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 'c', 'l', 'f', 'l', 'u', 's', 'h', 'o', 'p', 't', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'l',
  'r', 's', 's', 'b', 's', 'y', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'c', 'l', 'u', 'i', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'l', 'w',
  'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 'c', 'l', 'z', 'e', 'r', 'o', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'm', 'p', 'c', 'c', 'x',
  'a', 'd', 'd', '3', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'c', 'm', 'p', 'c', 'c', 'x', 'a', 'd', 'd',
  '6', '4', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'd', 'i', 'r', 'e', 'c', 't', 's', 't', 'o', 'r', 'e', '_',
  'u', '3', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'd', 'i', 'r', 'e', 'c', 't', 's', 't', 'o', 'r', 'e',
  '_', 'u', '6', '4', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'e', 'n', 'q', 'c', 'm', 'd', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'e', 'n', 'q',
  'c', 'm', 'd', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'r', 'e', 'a', 'd', 'e', 'f', 'l', 'a', 'g', 's',
  '_', 'u', '3', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'r', 'e', 'a', 'd', 'e', 'f', 'l', 'a', 'g', 's',
  '_', 'u', '6', '4', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'w', 'r', 'i', 't', 'e', 'e', 'f', 'l', 'a', 'g',
  's', '_', 'u', '3', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'w', 'r', 'i', 't', 'e', 'e', 'f', 'l', 'a',
  'g', 's', '_', 'u', '6', '4', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'v', 'f', 'm', 'a', 'd', 'd', 's', 'u',
  'b', 'p', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'v', 'f', 'm', 'a', 'd', 'd', 's', 'u', 'b', 'p', 'd',
  '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'v', 'f', 'm', 'a', 'd', 'd', 's', 'u', 'b', 'p', 's',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'v', 'f', 'm', 'a', 'd', 'd', 's', 'u', 'b', 'p', 's', '2', '5', '6',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'f', 'x', 'r', 's', 't', 'o', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'f', 'x', 'r', 's', 't', 'o',
  'r', '6', '4', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'f', 'x', 's', 'a', 'v', 'e', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'f', 'x', 's', 'a',
  'v', 'e', '6', '4', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'i', 'n', 'c', 's', 's', 'p', 'd', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'i', 'n',
  'c', 's', 's', 'p', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'i', 'n', 'v', 'p', 'c', 'i', 'd', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 't',
  'i', 'l', 'e', '_', 'l', 'o', 'a', 'd', 'c', 'o', 'n', 'f', 'i', 'g', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  't', 'i', 'l', 'e', '_', 'l', 'o', 'a', 'd', 'c', 'o', 'n', 'f', 'i', 'g',
  '_', 'i', 'n', 't', 'e', 'r', 'n', 'a', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'l', 'l', 'w', 'p', 'c',
  'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 'l', 'o', 'a', 'd', 'i', 'w', 'k', 'e', 'y', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'l', 'w', 'p',
  'i', 'n', 's', '3', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'l', 'w', 'p', 'i', 'n', 's', '6', '4', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'l', 'w', 'p', 'v', 'a', 'l', '3', '2', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'l', 'w', 'p', 'v', 'a', 'l',
  '6', '4', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'e', 'm', 'm', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'f', 'e', 'm', 'm', 's', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'm',
  'a', 's', 'k', 'm', 'o', 'v', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'm', 'o', 'v', 'n', 't', 'q', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'p', 'a', 'c', 'k', 's', 's', 'd', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'a', 'c', 'k', 's', 's',
  'w', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'p', 'a', 'c', 'k', 'u', 's', 'w', 'b', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'a', 'd',
  'd', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'p', 'a', 'd', 'd', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'a', 'd', 'd', 'q', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'p', 'a', 'd', 'd', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'p', 'a', 'd', 'd', 's', 'b', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'a',
  'd', 'd', 's', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'p', 'a', 'd', 'd', 'u', 's', 'b', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'a',
  'd', 'd', 'u', 's', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'p', 'a', 'l', 'i', 'g', 'n', 'r', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p',
  'a', 'n', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'p', 'a', 'n', 'd', 'n', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'a', 'v', 'g', 'b',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'p', 'a', 'v', 'g', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'p', 'c', 'm', 'p', 'e', 'q', 'b', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'p', 'c', 'm', 'p', 'e', 'q', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'c', 'm', 'p', 'e', 'q', 'w',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'p', 'c', 'm', 'p', 'g', 't', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'c', 'm', 'p', 'g', 't',
  'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 'p', 'c', 'm', 'p', 'g', 't', 'w', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'e', 'c', '_', 'e',
  'x', 't', '_', 'v', '4', 'h', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'e', 'c', '_', 's', 'e', 't',
  '_', 'v', '4', 'h', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'a', 'd', 'd', 'w', 'd', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p',
  'm', 'a', 'x', 's', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'a', 'x', 'u', 'b', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm',
  'i', 'n', 's', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'p', 'm', 'i', 'n', 'u', 'b', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'o',
  'v', 'm', 's', 'k', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'u', 'l', 'h', 'w', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm',
  'u', 'l', 'h', 'u', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'u', 'l', 'l', 'w', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm',
  'u', 'l', 'u', 'd', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'p', 'o', 'r', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'a', 'd', 'b',
  'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 'p', 's', 'l', 'l', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'l', 'l', 'q', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p',
  's', 'l', 'l', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'p', 's', 'l', 'l', 'd', 'i', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'l',
  'l', 'q', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'p', 's', 'l', 'l', 'w', 'i', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'r', 'a',
  'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 'p', 's', 'r', 'a', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'r', 'a', 'd', 'i', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'p', 's', 'r', 'a', 'w', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'r', 'l', 'd', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's',
  'r', 'l', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'p', 's', 'r', 'l', 'w', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'r', 'l', 'd',
  'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 'p', 's', 'r', 'l', 'q', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'r', 'l', 'w', 'i',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'p', 's', 'u', 'b', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'u', 'b', 'd', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's',
  'u', 'b', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'p', 's', 'u', 'b', 'w', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'u', 'b', 's',
  'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 'p', 's', 'u', 'b', 's', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'u', 'b', 'u', 's',
  'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 'p', 's', 'u', 'b', 'u', 's', 'w', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'u', 'n', 'p', 'c',
  'k', 'h', 'b', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'p', 'u', 'n', 'p', 'c', 'k', 'h', 'd', 'q', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'p', 'u', 'n', 'p', 'c', 'k', 'h', 'w', 'd', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'u', 'n', 'p', 'c',
  'k', 'l', 'b', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'p', 'u', 'n', 'p', 'c', 'k', 'l', 'd', 'q', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'p', 'u', 'n', 'p', 'c', 'k', 'l', 'w', 'd', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'x', 'o', 'r', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'm', 'o', 'n', 'i', 't', 'o', 'r', 'x', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'm', 'o', 'v', 'd', 'i', 'r',
  '6', '4', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'm', 'w', 'a', 'i', 't', 'x', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'c', 'l', 'm',
  'u', 'l', 'q', 'd', 'q', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'c', 'l', 'm', 'u', 'l',
  'q', 'd', 'q', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'p', 'c', 'l', 'm', 'u', 'l', 'q', 'd',
  'q', '5', '1', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'p', 't', 'w', 'r', 'i', 't', 'e', '3', '2', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'p', 't', 'w', 'r', 'i', 't', 'e', '6', '4', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r', 'd', 'f', 's', 'b',
  'a', 's', 'e', '3', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'r', 'd', 'f', 's', 'b', 'a', 's', 'e', '6',
  '4', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 'r', 'd', 'g', 's', 'b', 'a', 's', 'e', '3', '2', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r', 'd',
  'g', 's', 'b', 'a', 's', 'e', '6', '4', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r', 'd', 'p', 'i', 'd', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'r', 'd', 'p', 'k', 'r', 'u', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'r', 'd', 'p', 'm', 'c', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r', 'd',
  'p', 'r', 'u', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'r', 'd', 's', 's', 'p', 'd', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r', 'd', 's', 's',
  'p', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'r', 'd', 't', 's', 'c', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r', 's', 't', 'o', 'r', 's',
  's', 'p', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 's', 'a', 'v', 'e', 'p', 'r', 'e', 'v', 's', 's', 'p', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  's', 'e', 'n', 'd', 'u', 'i', 'p', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 's', 'e', 'r', 'i', 'a', 'l',
  'i', 'z', 'e', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 's', 'e', 't', 's', 's', 'b', 's', 'y', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 's', 'h',
  'a', '1', 'm', 's', 'g', '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 's', 'h', 'a', '1', 'm', 's', 'g', '2',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 's', 'h', 'a', '1', 'n', 'e', 'x', 't', 'e', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 's', 'h', 'a', '1',
  'r', 'n', 'd', 's', '4', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 's', 'h', 'a', '2', '5', '6', 'm', 's', 'g',
  '1', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 's', 'h', 'a', '2', '5', '6', 'm', 's', 'g', '2', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 's', 'h',
  'a', '2', '5', '6', 'r', 'n', 'd', 's', '2', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 's', 'l', 'w', 'p', 'c',
  'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 'c', 'm', 'p', 's', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'o', 'm', 'i', 'e', 'q', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'c', 'o', 'm', 'i', 'g', 'e', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'c', 'o', 'm', 'i', 'g', 't', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c',
  'o', 'm', 'i', 'l', 'e', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'c', 'o', 'm', 'i', 'l', 't', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'o',
  'm', 'i', 'n', 'e', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 'p', 'd', '2', 'p', 'i', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'c', 'v', 't', 'p', 'i', '2', 'p', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 'p', 'i', '2',
  'p', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'c', 'v', 't', 'p', 's', '2', 'p', 'i', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't',
  's', 's', '2', 's', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 's', 's', '2', 's', 'i', '6',
  '4', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 'c', 'v', 't', 't', 'p', 'd', '2', 'p', 'i', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't',
  't', 'p', 's', '2', 'p', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 't', 's', 's', '2', 's',
  'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 'c', 'v', 't', 't', 's', 's', '2', 's', 'i', '6', '4', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'm',
  'a', 'x', 'p', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'm', 'a', 'x', 's', 's', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'm', 'i', 'n', 'p',
  's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 'm', 'i', 'n', 's', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'm', 'o', 'v', 'm', 's', 'k', 'p',
  's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 'p', 's', 'h', 'u', 'f', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r', 'c', 'p', 'p', 's', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'r', 'c', 'p', 's', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'r', 's', 'q', 'r', 't', 'p', 's', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r',
  's', 'q', 'r', 't', 's', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 's', 'f', 'e', 'n', 'c', 'e', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'u',
  'c', 'o', 'm', 'i', 'e', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'u', 'c', 'o', 'm', 'i', 'g', 'e', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'u', 'c', 'o', 'm', 'i', 'g', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'u', 'c', 'o', 'm', 'i', 'l', 'e',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'u', 'c', 'o', 'm', 'i', 'l', 't', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'u', 'c', 'o', 'm', 'i', 'n',
  'e', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'c', 'l', 'f', 'l', 'u', 's', 'h', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'm', 'p', 's',
  'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 'c', 'o', 'm', 'i', 's', 'd', 'e', 'q', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'o', 'm', 'i',
  's', 'd', 'g', 'e', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'c', 'o', 'm', 'i', 's', 'd', 'g', 't', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c',
  'o', 'm', 'i', 's', 'd', 'l', 'e', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'o', 'm', 'i', 's', 'd', 'l',
  't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 'c', 'o', 'm', 'i', 's', 'd', 'n', 'e', 'q', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't',
  'p', 'd', '2', 'd', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 'p', 'd', '2', 'p', 's', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'c', 'v', 't', 'p', 's', '2', 'd', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 's', 'd', '2',
  's', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'c', 'v', 't', 's', 'd', '2', 's', 'i', '6', '4', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c',
  'v', 't', 's', 'd', '2', 's', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 't', 'p', 'd', '2',
  'd', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'c', 'v', 't', 't', 'p', 's', '2', 'd', 'q', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v',
  't', 't', 's', 'd', '2', 's', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'v', 't', 't', 's', 'd', '2',
  's', 'i', '6', '4', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'l', 'f', 'e', 'n', 'c', 'e', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'm', 'a', 's',
  'k', 'm', 'o', 'v', 'd', 'q', 'u', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'm', 'a', 'x', 'p', 'd', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'm',
  'a', 'x', 's', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'm', 'f', 'e', 'n', 'c', 'e', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'm', 'i', 'n',
  'p', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'm', 'i', 'n', 's', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'm', 'o', 'v', 'm', 's', 'k',
  'p', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'p', 'a', 'c', 'k', 's', 's', 'd', 'w', '1', '2', '8', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'p', 'a', 'c', 'k', 's', 's', 'w', 'b', '1', '2', '8', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'a', 'c',
  'k', 'u', 's', 'w', 'b', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'a', 'u', 's', 'e', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'p', 'a', 'v', 'g', 'b', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'a', 'v', 'g', 'w', '1',
  '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'p', 'm', 'a', 'd', 'd', 'w', 'd', '1', '2', '8', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p',
  'm', 'o', 'v', 'm', 's', 'k', 'b', '1', '2', '8', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'u', 'l',
  'h', 'w', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'u', 'l', 'h', 'u', 'w', '1', '2',
  '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 'p', 's', 'a', 'd', 'b', 'w', '1', '2', '8', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'l',
  'l', 'd', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'p', 's', 'l', 'l', 'q', '1', '2', '8', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'p', 's', 'l', 'l', 'w', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'l', 'l', 'd', 'i',
  '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'p', 's', 'l', 'l', 'q', 'i', '1', '2', '8', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p',
  's', 'l', 'l', 'w', 'i', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'r', 'a', 'd', '1',
  '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'p', 's', 'r', 'a', 'w', '1', '2', '8', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'r',
  'a', 'd', 'i', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'r', 'a', 'w', 'i', '1', '2',
  '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 'p', 's', 'r', 'l', 'd', '1', '2', '8', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'r', 'l',
  'q', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'p', 's', 'r', 'l', 'w', '1', '2', '8', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p',
  's', 'r', 'l', 'd', 'i', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'r', 'l', 'q', 'i',
  '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'p', 's', 'r', 'l', 'w', 'i', '1', '2', '8', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'u',
  'c', 'o', 'm', 'i', 's', 'd', 'e', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'u', 'c', 'o', 'm', 'i', 's',
  'd', 'g', 'e', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'u', 'c', 'o', 'm', 'i', 's', 'd', 'g', 't', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'u',
  'c', 'o', 'm', 'i', 's', 'd', 'l', 'e', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'u', 'c', 'o', 'm', 'i', 's',
  'd', 'l', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'u', 'c', 'o', 'm', 'i', 's', 'd', 'n', 'e', 'q', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'a', 'd', 'd', 's', 'u', 'b', 'p', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'a', 'd', 'd', 's', 'u', 'b',
  'p', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'h', 'a', 'd', 'd', 'p', 'd', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'h', 'a', 'd', 'd', 'p',
  's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 'h', 's', 'u', 'b', 'p', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'h', 's', 'u', 'b', 'p', 's',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'l', 'd', 'd', 'q', 'u', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'm', 'o', 'n', 'i', 't', 'o', 'r', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'm', 'w', 'a', 'i', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'b', 'l', 'e', 'n', 'd', 'v', 'p', 'd', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'b', 'l', 'e', 'n', 'd', 'v', 'p', 's', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'd', 'p', 'p', 'd', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'd',
  'p', 'p', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'i', 'n', 's', 'e', 'r', 't', 'p', 's', '1', '2', '8',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'm', 'p', 's', 'a', 'd', 'b', 'w', '1', '2', '8', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'a', 'c',
  'k', 'u', 's', 'd', 'w', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'b', 'l', 'e', 'n', 'd',
  'v', 'b', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'p', 'h', 'm', 'i', 'n', 'p', 'o', 's', 'u',
  'w', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'p', 't', 'e', 's', 't', 'c', '1', '2', '8', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'p', 't', 'e', 's', 't', 'n', 'z', 'c', '1', '2', '8', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 't', 'e',
  's', 't', 'z', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'r', 'o', 'u', 'n', 'd', 'p', 'd', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'r', 'o', 'u', 'n', 'd', 'p', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'r', 'o', 'u', 'n', 'd', 's', 'd',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'r', 'o', 'u', 'n', 'd', 's', 's', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'r', 'c', '3', '2', 'h',
  'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 'c', 'r', 'c', '3', '2', 's', 'i', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'c', 'r', 'c', '3', '2',
  'q', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'c', 'r', 'c', '3', '2', 'd', 'i', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'c', 'm', 'p',
  'e', 's', 't', 'r', 'i', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'c', 'm', 'p', 'e', 's',
  't', 'r', 'i', 'a', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'c', 'm', 'p', 'e', 's', 't',
  'r', 'i', 'c', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'p', 'c', 'm', 'p', 'e', 's', 't', 'r',
  'i', 'o', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'p', 'c', 'm', 'p', 'e', 's', 't', 'r', 'i',
  's', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'p', 'c', 'm', 'p', 'e', 's', 't', 'r', 'i', 'z',
  '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'p', 'c', 'm', 'p', 'e', 's', 't', 'r', 'm', '1', '2',
  '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 'p', 'c', 'm', 'p', 'i', 's', 't', 'r', 'i', '1', '2', '8', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'p', 'c', 'm', 'p', 'i', 's', 't', 'r', 'i', 'a', '1', '2', '8', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p',
  'c', 'm', 'p', 'i', 's', 't', 'r', 'i', 'c', '1', '2', '8', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'c',
  'm', 'p', 'i', 's', 't', 'r', 'i', 'o', '1', '2', '8', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'c', 'm',
  'p', 'i', 's', 't', 'r', 'i', 's', '1', '2', '8', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'c', 'm', 'p',
  'i', 's', 't', 'r', 'i', 'z', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'c', 'm', 'p', 'i',
  's', 't', 'r', 'm', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'e', 'x', 't', 'r', 'q', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'e',
  'x', 't', 'r', 'q', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'i', 'n', 's', 'e', 'r', 't', 'q', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'i',
  'n', 's', 'e', 'r', 't', 'q', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'a', 'b', 's', 'b', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p',
  'a', 'b', 's', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'p', 'a', 'b', 's', 'w', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'h', 'a', 'd',
  'd', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'p', 'h', 'a', 'd', 'd', 'd', '1', '2', '8', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'h',
  'a', 'd', 'd', 's', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'p', 'h', 'a', 'd', 'd', 's', 'w', '1', '2',
  '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 'p', 'h', 'a', 'd', 'd', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'h', 'a', 'd', 'd', 'w',
  '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'p', 'h', 's', 'u', 'b', 'd', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'h', 's', 'u',
  'b', 'd', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'p', 'h', 's', 'u', 'b', 's', 'w', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p',
  'h', 's', 'u', 'b', 's', 'w', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'h', 's', 'u', 'b',
  'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 'p', 'h', 's', 'u', 'b', 'w', '1', '2', '8', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'a',
  'd', 'd', 'u', 'b', 's', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'p', 'm', 'a', 'd', 'd', 'u', 'b', 's',
  'w', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'p', 'm', 'u', 'l', 'h', 'r', 's', 'w', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p',
  'm', 'u', 'l', 'h', 'r', 's', 'w', '1', '2', '8', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'h', 'u',
  'f', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'p', 's', 'h', 'u', 'f', 'b', '1', '2', '8', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's',
  'i', 'g', 'n', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'p', 's', 'i', 'g', 'n', 'b', '1', '2', '8', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'p', 's', 'i', 'g', 'n', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'i', 'g', 'n', 'd', '1', '2',
  '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 'p', 's', 'i', 'g', 'n', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'p', 's', 'i', 'g', 'n', 'w',
  '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 't', 'i', 'l', 'e', '_', 's', 't', 'o', 'r', 'e', 'c',
  'o', 'n', 'f', 'i', 'g', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 's', 't', 'u', 'i', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'b', 'e', 'x', 't',
  'r', 'i', '_', 'u', '3', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'b', 'e', 'x', 't', 'r', 'i', '_', 'u',
  '6', '4', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 't', 'c', 'm', 'm', 'i', 'm', 'f', 'p', '1', '6', 'p', 's',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 't', 'c', 'm', 'm', 'i', 'm', 'f', 'p', '1', '6', 'p', 's', '_', 'i',
  'n', 't', 'e', 'r', 'n', 'a', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 't', 'c', 'm', 'm', 'r', 'l', 'f',
  'p', '1', '6', 'p', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 't', 'c', 'm', 'm', 'r', 'l', 'f', 'p', '1',
  '6', 'p', 's', '_', 'i', 'n', 't', 'e', 'r', 'n', 'a', 'l', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 't', 'd',
  'p', 'b', 'f', '1', '6', 'p', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 't', 'd', 'p', 'b', 'f', '1', '6',
  'p', 's', '_', 'i', 'n', 't', 'e', 'r', 'n', 'a', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 't', 'd', 'p',
  'b', 's', 's', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 't', 'd', 'p', 'b', 's', 's', 'd', '_', 'i', 'n',
  't', 'e', 'r', 'n', 'a', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 't', 'd', 'p', 'b', 's', 'u', 'd', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  't', 'd', 'p', 'b', 's', 'u', 'd', '_', 'i', 'n', 't', 'e', 'r', 'n', 'a',
  'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 't', 'd', 'p', 'b', 'u', 's', 'd', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 't', 'd', 'p', 'b', 'u',
  's', 'd', '_', 'i', 'n', 't', 'e', 'r', 'n', 'a', 'l', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 't', 'd', 'p',
  'b', 'u', 'u', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 't', 'd', 'p', 'b', 'u', 'u', 'd', '_', 'i', 'n',
  't', 'e', 'r', 'n', 'a', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 't', 'd', 'p', 'f', 'p', '1', '6', 'p',
  's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 't', 'd', 'p', 'f', 'p', '1', '6', 'p', 's', '_', 'i', 'n', 't',
  'e', 'r', 'n', 'a', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 't', 'e', 's', 't', 'u', 'i', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 't', 'i',
  'l', 'e', 'l', 'o', 'a', 'd', 'd', '6', '4', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 't', 'i', 'l', 'e', 'l',
  'o', 'a', 'd', 'd', '6', '4', '_', 'i', 'n', 't', 'e', 'r', 'n', 'a', 'l',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 't', 'i', 'l', 'e', 'l', 'o', 'a', 'd', 'd', 't', '1', '6', '4', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  't', 'i', 'l', 'e', 'l', 'o', 'a', 'd', 'd', 't', '1', '6', '4', '_', 'i',
  'n', 't', 'e', 'r', 'n', 'a', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 't', 'i', 'l', 'e', 'r', 'e', 'l',
  'e', 'a', 's', 'e', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 't', 'i', 'l', 'e', 's', 't', 'o', 'r', 'e', 'd',
  '6', '4', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 't', 'i', 'l', 'e', 's', 't', 'o', 'r', 'e', 'd', '6', '4',
  '_', 'i', 'n', 't', 'e', 'r', 'n', 'a', 'l', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 't', 'i', 'l', 'e', 'z',
  'e', 'r', 'o', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 't', 'i', 'l', 'e', 'z', 'e', 'r', 'o', '_', 'i', 'n',
  't', 'e', 'r', 'n', 'a', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 't', 'p', 'a', 'u', 's', 'e', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'u',
  'm', 'o', 'n', 'i', 't', 'o', 'r', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'u', 'm', 'w', 'a', 'i', 't', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'v', 'b', 'c', 's', 't', 'n', 'e', 'b', 'f', '1', '6', '2', 'p', 's', '1',
  '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'v', 'b', 'c', 's', 't', 'n', 'e', 'b', 'f', '1', '6', '2',
  'p', 's', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'v', 'b', 'c', 's', 't', 'n', 'e', 's', 'h',
  '2', 'p', 's', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'v', 'b', 'c', 's', 't', 'n', 'e', 's',
  'h', '2', 'p', 's', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v', 't', 'n', 'e', 'e',
  'b', 'f', '1', '6', '2', 'p', 's', '1', '2', '8', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v', 't',
  'n', 'e', 'e', 'b', 'f', '1', '6', '2', 'p', 's', '2', '5', '6', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v',
  'c', 'v', 't', 'n', 'e', 'e', 'p', 'h', '2', 'p', 's', '1', '2', '8', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'v', 'c', 'v', 't', 'n', 'e', 'e', 'p', 'h', '2', 'p', 's', '2', '5', '6',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'v', 'c', 'v', 't', 'n', 'e', 'o', 'b', 'f', '1', '6', '2', 'p', 's',
  '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'v', 'c', 'v', 't', 'n', 'e', 'o', 'b', 'f', '1', '6',
  '2', 'p', 's', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v', 't', 'n', 'e', 'o', 'p',
  'h', '2', 'p', 's', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v', 't', 'n', 'e', 'o',
  'p', 'h', '2', 'p', 's', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v', 't', 'n', 'e',
  'p', 's', '2', 'b', 'f', '1', '6', '1', '2', '8', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v', 't',
  'n', 'e', 'p', 's', '2', 'b', 'f', '1', '6', '2', '5', '6', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c',
  'v', 't', 'p', 's', '2', 'p', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'c', 'v', 't', 'p', 's', '2',
  'p', 'h', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'v', 'g', 'f', '2', 'p', '8', 'a', 'f', 'f',
  'i', 'n', 'e', 'i', 'n', 'v', 'q', 'b', '_', 'v', '1', '6', 'q', 'i', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'v', 'g', 'f', '2', 'p', '8', 'a', 'f', 'f', 'i', 'n', 'e', 'i', 'n', 'v',
  'q', 'b', '_', 'v', '3', '2', 'q', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'g', 'f', '2', 'p', '8',
  'a', 'f', 'f', 'i', 'n', 'e', 'i', 'n', 'v', 'q', 'b', '_', 'v', '6', '4',
  'q', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'v', 'g', 'f', '2', 'p', '8', 'a', 'f', 'f', 'i', 'n', 'e',
  'q', 'b', '_', 'v', '1', '6', 'q', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'g', 'f', '2', 'p', '8',
  'a', 'f', 'f', 'i', 'n', 'e', 'q', 'b', '_', 'v', '3', '2', 'q', 'i', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'v', 'g', 'f', '2', 'p', '8', 'a', 'f', 'f', 'i', 'n', 'e', 'q', 'b', '_',
  'v', '6', '4', 'q', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'v', 'g', 'f', '2', 'p', '8', 'm', 'u', 'l',
  'b', '_', 'v', '1', '6', 'q', 'i', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'g', 'f', '2', 'p', '8', 'm',
  'u', 'l', 'b', '_', 'v', '3', '2', 'q', 'i', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'g', 'f', '2', 'p',
  '8', 'm', 'u', 'l', 'b', '_', 'v', '6', '4', 'q', 'i', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 's', 'h',
  'a', '5', '1', '2', 'm', 's', 'g', '1', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 's', 'h', 'a', '5', '1',
  '2', 'm', 's', 'g', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'i', 'a', '3', '2', '_', 'v', 's', 'h', 'a', '5', '1', '2', 'r', 'n',
  'd', 's', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'v', 's', 'm', '3', 'm', 's', 'g', '1', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 's',
  'm', '3', 'm', 's', 'g', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'v', 's', 'm', '3', 'r', 'n', 'd', 's',
  '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 'v', 's', 'm', '4', 'k', 'e', 'y', '4', '1', '2', '8', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v',
  's', 'm', '4', 'k', 'e', 'y', '4', '2', '5', '6', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 's', 'm', '4',
  'r', 'n', 'd', 's', '4', '1', '2', '8', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 's', 'm', '4', 'r', 'n',
  'd', 's', '4', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'w', 'b', 'i', 'n', 'v', 'd', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'w',
  'b', 'n', 'o', 'i', 'n', 'v', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'w', 'r', 'f', 's', 'b', 'a', 's',
  'e', '3', '2', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'w', 'r', 'f', 's', 'b', 'a', 's', 'e', '6', '4', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'w', 'r', 'g', 's', 'b', 'a', 's', 'e', '3', '2', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'w', 'r', 'g', 's',
  'b', 'a', 's', 'e', '6', '4', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'w', 'r', 'p', 'k', 'r', 'u', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'w',
  'r', 's', 's', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'w', 'r', 's', 's', 'q', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'w', 'r', 'u', 's',
  's', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'w', 'r', 'u', 's', 's', 'q', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'x', 'a', 'b', 'o', 'r',
  't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 'x', 'b', 'e', 'g', 'i', 'n', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'x', 'e', 'n', 'd', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v',
  'f', 'r', 'c', 'z', 'p', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'v', 'f', 'r', 'c', 'z', 'p', 'd', '2',
  '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'v', 'f', 'r', 'c', 'z', 'p', 's', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'f', 'r', 'c',
  'z', 'p', 's', '2', '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'v', 'f', 'r', 'c', 'z', 's', 'd', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'v', 'f', 'r', 'c', 'z', 's', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'e', 'r', 'm', 'i', 'l',
  '2', 'p', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'v', 'p', 'e', 'r', 'm', 'i', 'l', '2', 'p', 'd', '2',
  '5', '6', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'v', 'p', 'e', 'r', 'm', 'i', 'l', '2', 'p', 's', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v',
  'p', 'e', 'r', 'm', 'i', 'l', '2', 'p', 's', '2', '5', '6', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p',
  'h', 'a', 'd', 'd', 'b', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'h', 'a', 'd', 'd', 'b', 'q',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'v', 'p', 'h', 'a', 'd', 'd', 'b', 'w', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'h', 'a', 'd',
  'd', 'd', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'v', 'p', 'h', 'a', 'd', 'd', 'u', 'b', 'd', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v',
  'p', 'h', 'a', 'd', 'd', 'u', 'b', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'h', 'a', 'd', 'd',
  'u', 'b', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'v', 'p', 'h', 'a', 'd', 'd', 'u', 'd', 'q', '\000', '_',
  '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v',
  'p', 'h', 'a', 'd', 'd', 'u', 'w', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'h', 'a', 'd', 'd',
  'u', 'w', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'v', 'p', 'h', 'a', 'd', 'd', 'w', 'd', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p',
  'h', 'a', 'd', 'd', 'w', 'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'h', 's', 'u', 'b', 'b', 'w',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'v', 'p', 'h', 's', 'u', 'b', 'd', 'q', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'h', 's', 'u',
  'b', 'w', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'v', 'p', 'm', 'a', 'c', 's', 'd', 'd', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p',
  'm', 'a', 'c', 's', 'd', 'q', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'm', 'a', 'c', 's', 'd',
  'q', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'v', 'p', 'm', 'a', 'c', 's', 's', 'd', 'd', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p',
  'm', 'a', 'c', 's', 's', 'd', 'q', 'h', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'm', 'a', 'c', 's',
  's', 'd', 'q', 'l', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'i', 'a', '3', '2', '_', 'v', 'p', 'm', 'a', 'c', 's', 's', 'w', 'd', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'v', 'p', 'm', 'a', 'c', 's', 's', 'w', 'w', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'm', 'a', 'c',
  's', 'w', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'v', 'p', 'm', 'a', 'c', 's', 'w', 'w', '\000', '_', '_',
  'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p',
  'm', 'a', 'd', 'c', 's', 's', 'w', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 'm', 'a', 'd', 'c',
  's', 'w', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i',
  'a', '3', '2', '_', 'v', 'p', 'p', 'e', 'r', 'm', '\000', '_', '_', 'b', 'u',
  'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 's', 'h',
  'a', 'b', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a',
  '3', '2', '_', 'v', 'p', 's', 'h', 'a', 'd', '\000', '_', '_', 'b', 'u', 'i',
  'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 's', 'h', 'a',
  'q', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 'v', 'p', 's', 'h', 'a', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l',
  't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 's', 'h', 'l', 'b',
  '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2',
  '_', 'v', 'p', 's', 'h', 'l', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't',
  'i', 'n', '_', 'i', 'a', '3', '2', '_', 'v', 'p', 's', 'h', 'l', 'q', '\000',
  '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_',
  'v', 'p', 's', 'h', 'l', 'w', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 'i', 'a', '3', '2', '_', 'x', 'r', 'e', 's', 'l', 'd', 't', 'r',
  'k', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3',
  '2', '_', 'x', 's', 'u', 's', 'l', 'd', 't', 'r', 'k', '\000', '_', '_', 'b',
  'u', 'i', 'l', 't', 'i', 'n', '_', 'i', 'a', '3', '2', '_', 'x', 't', 'e',
  's', 't', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_', 'b', 'i',
  't', 'r', 'e', 'v', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n', '_',
  'g', 'e', 't', 'i', 'd', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i', 'n',
  '_', 'g', 'e', 't', 'p', 's', '\000', '_', '_', 'b', 'u', 'i', 'l', 't', 'i',
  'n', '_', 's', 'e', 't', 'p', 's', '\000',
  };

  struct BuiltinEntry {
    Intrinsic::ID IntrinID;
    unsigned StrTabOffset;
    const char *getName() const {
      return &BuiltinNames[StrTabOffset];
    }
    bool operator<(StringRef RHS) const {
      return strncmp(getName(), RHS.data(), RHS.size()) < 0;
    }
  };
  StringRef TargetPrefix(TargetPrefixStr);

  /* Target Independent Builtins */ {
    static const BuiltinEntry Names[] = {
      {Intrinsic::adjust_trampoline, 0}, // __builtin_adjust_trampoline
      {Intrinsic::debugtrap, 28}, // __builtin_debugtrap
      {Intrinsic::init_trampoline, 70}, // __builtin_init_trampoline
      {Intrinsic::objectsize, 96}, // __builtin_object_size
      {Intrinsic::stackrestore, 118}, // __builtin_stack_restore
      {Intrinsic::stacksave, 142}, // __builtin_stack_save
      {Intrinsic::thread_pointer, 163}, // __builtin_thread_pointer
      {Intrinsic::trap, 188}, // __builtin_trap
      {Intrinsic::eh_unwind_init, 48}, // __builtin_unwind_init
    };
    auto I = std::lower_bound(std::begin(Names),
                              std::end(Names),
                              BuiltinNameStr);
    if (I != std::end(Names) &&
        I->getName() == BuiltinNameStr)
      return I->IntrinID;
  }
  if (TargetPrefix == "aarch64") {
    static const BuiltinEntry aarch64Names[] = {
      {Intrinsic::aarch64_dmb, 203}, // __builtin_arm_dmb
      {Intrinsic::aarch64_dsb, 221}, // __builtin_arm_dsb
      {Intrinsic::aarch64_isb, 239}, // __builtin_arm_isb
      {Intrinsic::aarch64_prefetch, 257}, // __builtin_arm_prefetch
      {Intrinsic::aarch64_tcancel, 548}, // __builtin_arm_tcancel
      {Intrinsic::aarch64_tcommit, 570}, // __builtin_arm_tcommit
      {Intrinsic::aarch64_tstart, 592}, // __builtin_arm_tstart
      {Intrinsic::aarch64_ttest, 613}, // __builtin_arm_ttest
      {Intrinsic::aarch64_sve_aesd, 280}, // __builtin_sve_svaesd_u8
      {Intrinsic::aarch64_sve_aese, 304}, // __builtin_sve_svaese_u8
      {Intrinsic::aarch64_sve_aesimc, 328}, // __builtin_sve_svaesimc_u8
      {Intrinsic::aarch64_sve_aesmc, 354}, // __builtin_sve_svaesmc_u8
      {Intrinsic::aarch64_sve_rax1, 379}, // __builtin_sve_svrax1_u64
      {Intrinsic::aarch64_sve_rdffr, 404}, // __builtin_sve_svrdffr
      {Intrinsic::aarch64_sve_rdffr_z, 426}, // __builtin_sve_svrdffr_z
      {Intrinsic::aarch64_sve_setffr, 450}, // __builtin_sve_svsetffr
      {Intrinsic::aarch64_sve_sm4e, 473}, // __builtin_sve_svsm4e_u32
      {Intrinsic::aarch64_sve_sm4ekey, 498}, // __builtin_sve_svsm4ekey_u32
      {Intrinsic::aarch64_sve_wrffr, 526}, // __builtin_sve_svwrffr
    };
    auto I = std::lower_bound(std::begin(aarch64Names),
                              std::end(aarch64Names),
                              BuiltinNameStr);
    if (I != std::end(aarch64Names) &&
        I->getName() == BuiltinNameStr)
      return I->IntrinID;
  }
  if (TargetPrefix == "amdgcn") {
    static const BuiltinEntry amdgcnNames[] = {
      {Intrinsic::amdgcn_alignbyte, 633}, // __builtin_amdgcn_alignbyte
      {Intrinsic::amdgcn_buffer_wbinvl1, 660}, // __builtin_amdgcn_buffer_wbinvl1
      {Intrinsic::amdgcn_buffer_wbinvl1_sc, 692}, // __builtin_amdgcn_buffer_wbinvl1_sc
      {Intrinsic::amdgcn_buffer_wbinvl1_vol, 727}, // __builtin_amdgcn_buffer_wbinvl1_vol
      {Intrinsic::amdgcn_cubeid, 763}, // __builtin_amdgcn_cubeid
      {Intrinsic::amdgcn_cubema, 787}, // __builtin_amdgcn_cubema
      {Intrinsic::amdgcn_cubesc, 811}, // __builtin_amdgcn_cubesc
      {Intrinsic::amdgcn_cubetc, 835}, // __builtin_amdgcn_cubetc
      {Intrinsic::amdgcn_cvt_f32_bf8, 859}, // __builtin_amdgcn_cvt_f32_bf8
      {Intrinsic::amdgcn_cvt_f32_fp8, 888}, // __builtin_amdgcn_cvt_f32_fp8
      {Intrinsic::amdgcn_cvt_pk_bf8_f32, 917}, // __builtin_amdgcn_cvt_pk_bf8_f32
      {Intrinsic::amdgcn_cvt_pk_f32_bf8, 949}, // __builtin_amdgcn_cvt_pk_f32_bf8
      {Intrinsic::amdgcn_cvt_pk_f32_fp8, 981}, // __builtin_amdgcn_cvt_pk_f32_fp8
      {Intrinsic::amdgcn_cvt_pk_fp8_f32, 1013}, // __builtin_amdgcn_cvt_pk_fp8_f32
      {Intrinsic::amdgcn_cvt_pk_i16, 1045}, // __builtin_amdgcn_cvt_pk_i16
      {Intrinsic::amdgcn_cvt_pk_u16, 1073}, // __builtin_amdgcn_cvt_pk_u16
      {Intrinsic::amdgcn_cvt_pk_u8_f32, 1101}, // __builtin_amdgcn_cvt_pk_u8_f32
      {Intrinsic::amdgcn_cvt_pknorm_i16, 1132}, // __builtin_amdgcn_cvt_pknorm_i16
      {Intrinsic::amdgcn_cvt_pknorm_u16, 1164}, // __builtin_amdgcn_cvt_pknorm_u16
      {Intrinsic::amdgcn_cvt_pkrtz, 1196}, // __builtin_amdgcn_cvt_pkrtz
      {Intrinsic::amdgcn_cvt_sr_bf8_f32, 1223}, // __builtin_amdgcn_cvt_sr_bf8_f32
      {Intrinsic::amdgcn_cvt_sr_fp8_f32, 1255}, // __builtin_amdgcn_cvt_sr_fp8_f32
      {Intrinsic::amdgcn_dispatch_id, 1287}, // __builtin_amdgcn_dispatch_id
      {Intrinsic::amdgcn_ds_add_gs_reg_rtn, 1316}, // __builtin_amdgcn_ds_add_gs_reg_rtn
      {Intrinsic::amdgcn_ds_fadd_v2bf16, 1380}, // __builtin_amdgcn_ds_atomic_fadd_v2bf16
      {Intrinsic::amdgcn_ds_bpermute, 1351}, // __builtin_amdgcn_ds_bpermute
      {Intrinsic::amdgcn_ds_gws_barrier, 1419}, // __builtin_amdgcn_ds_gws_barrier
      {Intrinsic::amdgcn_ds_gws_init, 1451}, // __builtin_amdgcn_ds_gws_init
      {Intrinsic::amdgcn_ds_gws_sema_br, 1480}, // __builtin_amdgcn_ds_gws_sema_br
      {Intrinsic::amdgcn_ds_gws_sema_p, 1512}, // __builtin_amdgcn_ds_gws_sema_p
      {Intrinsic::amdgcn_ds_gws_sema_release_all, 1543}, // __builtin_amdgcn_ds_gws_sema_release_all
      {Intrinsic::amdgcn_ds_gws_sema_v, 1584}, // __builtin_amdgcn_ds_gws_sema_v
      {Intrinsic::amdgcn_ds_permute, 1615}, // __builtin_amdgcn_ds_permute
      {Intrinsic::amdgcn_ds_sub_gs_reg_rtn, 1643}, // __builtin_amdgcn_ds_sub_gs_reg_rtn
      {Intrinsic::amdgcn_ds_swizzle, 1678}, // __builtin_amdgcn_ds_swizzle
      {Intrinsic::amdgcn_endpgm, 1706}, // __builtin_amdgcn_endpgm
      {Intrinsic::amdgcn_fdot2, 1730}, // __builtin_amdgcn_fdot2
      {Intrinsic::amdgcn_fdot2_bf16_bf16, 1753}, // __builtin_amdgcn_fdot2_bf16_bf16
      {Intrinsic::amdgcn_fdot2_f16_f16, 1786}, // __builtin_amdgcn_fdot2_f16_f16
      {Intrinsic::amdgcn_fdot2_f32_bf16, 1817}, // __builtin_amdgcn_fdot2_f32_bf16
      {Intrinsic::amdgcn_fmul_legacy, 1849}, // __builtin_amdgcn_fmul_legacy
      {Intrinsic::amdgcn_groupstaticsize, 1878}, // __builtin_amdgcn_groupstaticsize
      {Intrinsic::amdgcn_iglp_opt, 1911}, // __builtin_amdgcn_iglp_opt
      {Intrinsic::amdgcn_implicit_buffer_ptr, 1937}, // __builtin_amdgcn_implicit_buffer_ptr
      {Intrinsic::amdgcn_implicitarg_ptr, 1974}, // __builtin_amdgcn_implicitarg_ptr
      {Intrinsic::amdgcn_interp_mov, 2007}, // __builtin_amdgcn_interp_mov
      {Intrinsic::amdgcn_interp_p1, 2035}, // __builtin_amdgcn_interp_p1
      {Intrinsic::amdgcn_interp_p1_f16, 2062}, // __builtin_amdgcn_interp_p1_f16
      {Intrinsic::amdgcn_interp_p2, 2093}, // __builtin_amdgcn_interp_p2
      {Intrinsic::amdgcn_interp_p2_f16, 2120}, // __builtin_amdgcn_interp_p2_f16
      {Intrinsic::amdgcn_is_private, 2151}, // __builtin_amdgcn_is_private
      {Intrinsic::amdgcn_is_shared, 2179}, // __builtin_amdgcn_is_shared
      {Intrinsic::amdgcn_kernarg_segment_ptr, 2206}, // __builtin_amdgcn_kernarg_segment_ptr
      {Intrinsic::amdgcn_lerp, 2243}, // __builtin_amdgcn_lerp
      {Intrinsic::amdgcn_mbcnt_hi, 2265}, // __builtin_amdgcn_mbcnt_hi
      {Intrinsic::amdgcn_mbcnt_lo, 2291}, // __builtin_amdgcn_mbcnt_lo
      {Intrinsic::amdgcn_mfma_f32_16x16x16bf16_1k, 2317}, // __builtin_amdgcn_mfma_f32_16x16x16bf16_1k
      {Intrinsic::amdgcn_mfma_f32_16x16x16f16, 2359}, // __builtin_amdgcn_mfma_f32_16x16x16f16
      {Intrinsic::amdgcn_mfma_f32_16x16x1f32, 2397}, // __builtin_amdgcn_mfma_f32_16x16x1f32
      {Intrinsic::amdgcn_mfma_f32_16x16x2bf16, 2434}, // __builtin_amdgcn_mfma_f32_16x16x2bf16
      {Intrinsic::amdgcn_mfma_f32_16x16x32_bf8_bf8, 2472}, // __builtin_amdgcn_mfma_f32_16x16x32_bf8_bf8
      {Intrinsic::amdgcn_mfma_f32_16x16x32_bf8_fp8, 2515}, // __builtin_amdgcn_mfma_f32_16x16x32_bf8_fp8
      {Intrinsic::amdgcn_mfma_f32_16x16x32_fp8_bf8, 2558}, // __builtin_amdgcn_mfma_f32_16x16x32_fp8_bf8
      {Intrinsic::amdgcn_mfma_f32_16x16x32_fp8_fp8, 2601}, // __builtin_amdgcn_mfma_f32_16x16x32_fp8_fp8
      {Intrinsic::amdgcn_mfma_f32_16x16x4bf16_1k, 2644}, // __builtin_amdgcn_mfma_f32_16x16x4bf16_1k
      {Intrinsic::amdgcn_mfma_f32_16x16x4f16, 2685}, // __builtin_amdgcn_mfma_f32_16x16x4f16
      {Intrinsic::amdgcn_mfma_f32_16x16x4f32, 2722}, // __builtin_amdgcn_mfma_f32_16x16x4f32
      {Intrinsic::amdgcn_mfma_f32_16x16x8_xf32, 2759}, // __builtin_amdgcn_mfma_f32_16x16x8_xf32
      {Intrinsic::amdgcn_mfma_f32_16x16x8bf16, 2798}, // __builtin_amdgcn_mfma_f32_16x16x8bf16
      {Intrinsic::amdgcn_mfma_f32_32x32x16_bf8_bf8, 2836}, // __builtin_amdgcn_mfma_f32_32x32x16_bf8_bf8
      {Intrinsic::amdgcn_mfma_f32_32x32x16_bf8_fp8, 2879}, // __builtin_amdgcn_mfma_f32_32x32x16_bf8_fp8
      {Intrinsic::amdgcn_mfma_f32_32x32x16_fp8_bf8, 2922}, // __builtin_amdgcn_mfma_f32_32x32x16_fp8_bf8
      {Intrinsic::amdgcn_mfma_f32_32x32x16_fp8_fp8, 2965}, // __builtin_amdgcn_mfma_f32_32x32x16_fp8_fp8
      {Intrinsic::amdgcn_mfma_f32_32x32x1f32, 3008}, // __builtin_amdgcn_mfma_f32_32x32x1f32
      {Intrinsic::amdgcn_mfma_f32_32x32x2bf16, 3045}, // __builtin_amdgcn_mfma_f32_32x32x2bf16
      {Intrinsic::amdgcn_mfma_f32_32x32x2f32, 3083}, // __builtin_amdgcn_mfma_f32_32x32x2f32
      {Intrinsic::amdgcn_mfma_f32_32x32x4_xf32, 3120}, // __builtin_amdgcn_mfma_f32_32x32x4_xf32
      {Intrinsic::amdgcn_mfma_f32_32x32x4bf16, 3159}, // __builtin_amdgcn_mfma_f32_32x32x4bf16
      {Intrinsic::amdgcn_mfma_f32_32x32x4bf16_1k, 3197}, // __builtin_amdgcn_mfma_f32_32x32x4bf16_1k
      {Intrinsic::amdgcn_mfma_f32_32x32x4f16, 3238}, // __builtin_amdgcn_mfma_f32_32x32x4f16
      {Intrinsic::amdgcn_mfma_f32_32x32x8bf16_1k, 3275}, // __builtin_amdgcn_mfma_f32_32x32x8bf16_1k
      {Intrinsic::amdgcn_mfma_f32_32x32x8f16, 3316}, // __builtin_amdgcn_mfma_f32_32x32x8f16
      {Intrinsic::amdgcn_mfma_f32_4x4x1f32, 3353}, // __builtin_amdgcn_mfma_f32_4x4x1f32
      {Intrinsic::amdgcn_mfma_f32_4x4x2bf16, 3388}, // __builtin_amdgcn_mfma_f32_4x4x2bf16
      {Intrinsic::amdgcn_mfma_f32_4x4x4bf16_1k, 3424}, // __builtin_amdgcn_mfma_f32_4x4x4bf16_1k
      {Intrinsic::amdgcn_mfma_f32_4x4x4f16, 3463}, // __builtin_amdgcn_mfma_f32_4x4x4f16
      {Intrinsic::amdgcn_mfma_f64_16x16x4f64, 3498}, // __builtin_amdgcn_mfma_f64_16x16x4f64
      {Intrinsic::amdgcn_mfma_f64_4x4x4f64, 3535}, // __builtin_amdgcn_mfma_f64_4x4x4f64
      {Intrinsic::amdgcn_mfma_i32_16x16x16i8, 3570}, // __builtin_amdgcn_mfma_i32_16x16x16i8
      {Intrinsic::amdgcn_mfma_i32_16x16x32_i8, 3607}, // __builtin_amdgcn_mfma_i32_16x16x32_i8
      {Intrinsic::amdgcn_mfma_i32_16x16x4i8, 3645}, // __builtin_amdgcn_mfma_i32_16x16x4i8
      {Intrinsic::amdgcn_mfma_i32_32x32x16_i8, 3681}, // __builtin_amdgcn_mfma_i32_32x32x16_i8
      {Intrinsic::amdgcn_mfma_i32_32x32x4i8, 3719}, // __builtin_amdgcn_mfma_i32_32x32x4i8
      {Intrinsic::amdgcn_mfma_i32_32x32x8i8, 3755}, // __builtin_amdgcn_mfma_i32_32x32x8i8
      {Intrinsic::amdgcn_mfma_i32_4x4x4i8, 3791}, // __builtin_amdgcn_mfma_i32_4x4x4i8
      {Intrinsic::amdgcn_mqsad_pk_u16_u8, 3825}, // __builtin_amdgcn_mqsad_pk_u16_u8
      {Intrinsic::amdgcn_mqsad_u32_u8, 3858}, // __builtin_amdgcn_mqsad_u32_u8
      {Intrinsic::amdgcn_msad_u8, 3888}, // __builtin_amdgcn_msad_u8
      {Intrinsic::amdgcn_perm, 3913}, // __builtin_amdgcn_perm
      {Intrinsic::amdgcn_permlane16, 3935}, // __builtin_amdgcn_permlane16
      {Intrinsic::amdgcn_permlane64, 3963}, // __builtin_amdgcn_permlane64
      {Intrinsic::amdgcn_permlanex16, 3991}, // __builtin_amdgcn_permlanex16
      {Intrinsic::amdgcn_qsad_pk_u16_u8, 4020}, // __builtin_amdgcn_qsad_pk_u16_u8
      {Intrinsic::amdgcn_queue_ptr, 4052}, // __builtin_amdgcn_queue_ptr
      {Intrinsic::amdgcn_rcp_legacy, 4079}, // __builtin_amdgcn_rcp_legacy
      {Intrinsic::amdgcn_readfirstlane, 4107}, // __builtin_amdgcn_readfirstlane
      {Intrinsic::amdgcn_readlane, 4138}, // __builtin_amdgcn_readlane
      {Intrinsic::amdgcn_rsq_legacy, 4164}, // __builtin_amdgcn_rsq_legacy
      {Intrinsic::amdgcn_s_barrier, 4192}, // __builtin_amdgcn_s_barrier
      {Intrinsic::amdgcn_s_dcache_inv, 4219}, // __builtin_amdgcn_s_dcache_inv
      {Intrinsic::amdgcn_s_dcache_inv_vol, 4249}, // __builtin_amdgcn_s_dcache_inv_vol
      {Intrinsic::amdgcn_s_dcache_wb, 4283}, // __builtin_amdgcn_s_dcache_wb
      {Intrinsic::amdgcn_s_dcache_wb_vol, 4312}, // __builtin_amdgcn_s_dcache_wb_vol
      {Intrinsic::amdgcn_s_decperflevel, 4345}, // __builtin_amdgcn_s_decperflevel
      {Intrinsic::amdgcn_s_get_waveid_in_workgroup, 4377}, // __builtin_amdgcn_s_get_waveid_in_workgroup
      {Intrinsic::amdgcn_s_getpc, 4420}, // __builtin_amdgcn_s_getpc
      {Intrinsic::amdgcn_s_getreg, 4445}, // __builtin_amdgcn_s_getreg
      {Intrinsic::amdgcn_s_incperflevel, 4471}, // __builtin_amdgcn_s_incperflevel
      {Intrinsic::amdgcn_s_memrealtime, 4503}, // __builtin_amdgcn_s_memrealtime
      {Intrinsic::amdgcn_s_memtime, 4534}, // __builtin_amdgcn_s_memtime
      {Intrinsic::amdgcn_s_sendmsg, 4561}, // __builtin_amdgcn_s_sendmsg
      {Intrinsic::amdgcn_s_sendmsghalt, 4588}, // __builtin_amdgcn_s_sendmsghalt
      {Intrinsic::amdgcn_s_setprio, 4619}, // __builtin_amdgcn_s_setprio
      {Intrinsic::amdgcn_s_setreg, 4646}, // __builtin_amdgcn_s_setreg
      {Intrinsic::amdgcn_s_sleep, 4672}, // __builtin_amdgcn_s_sleep
      {Intrinsic::amdgcn_s_wait_event_export_ready, 4697}, // __builtin_amdgcn_s_wait_event_export_ready
      {Intrinsic::amdgcn_s_waitcnt, 4740}, // __builtin_amdgcn_s_waitcnt
      {Intrinsic::amdgcn_sad_hi_u8, 4767}, // __builtin_amdgcn_sad_hi_u8
      {Intrinsic::amdgcn_sad_u16, 4794}, // __builtin_amdgcn_sad_u16
      {Intrinsic::amdgcn_sad_u8, 4819}, // __builtin_amdgcn_sad_u8
      {Intrinsic::amdgcn_sched_barrier, 4843}, // __builtin_amdgcn_sched_barrier
      {Intrinsic::amdgcn_sched_group_barrier, 4874}, // __builtin_amdgcn_sched_group_barrier
      {Intrinsic::amdgcn_sdot2, 4911}, // __builtin_amdgcn_sdot2
      {Intrinsic::amdgcn_sdot4, 4934}, // __builtin_amdgcn_sdot4
      {Intrinsic::amdgcn_sdot8, 4957}, // __builtin_amdgcn_sdot8
      {Intrinsic::amdgcn_smfmac_f32_16x16x32_bf16, 4980}, // __builtin_amdgcn_smfmac_f32_16x16x32_bf16
      {Intrinsic::amdgcn_smfmac_f32_16x16x32_f16, 5022}, // __builtin_amdgcn_smfmac_f32_16x16x32_f16
      {Intrinsic::amdgcn_smfmac_f32_16x16x64_bf8_bf8, 5063}, // __builtin_amdgcn_smfmac_f32_16x16x64_bf8_bf8
      {Intrinsic::amdgcn_smfmac_f32_16x16x64_bf8_fp8, 5108}, // __builtin_amdgcn_smfmac_f32_16x16x64_bf8_fp8
      {Intrinsic::amdgcn_smfmac_f32_16x16x64_fp8_bf8, 5153}, // __builtin_amdgcn_smfmac_f32_16x16x64_fp8_bf8
      {Intrinsic::amdgcn_smfmac_f32_16x16x64_fp8_fp8, 5198}, // __builtin_amdgcn_smfmac_f32_16x16x64_fp8_fp8
      {Intrinsic::amdgcn_smfmac_f32_32x32x16_bf16, 5243}, // __builtin_amdgcn_smfmac_f32_32x32x16_bf16
      {Intrinsic::amdgcn_smfmac_f32_32x32x16_f16, 5285}, // __builtin_amdgcn_smfmac_f32_32x32x16_f16
      {Intrinsic::amdgcn_smfmac_f32_32x32x32_bf8_bf8, 5326}, // __builtin_amdgcn_smfmac_f32_32x32x32_bf8_bf8
      {Intrinsic::amdgcn_smfmac_f32_32x32x32_bf8_fp8, 5371}, // __builtin_amdgcn_smfmac_f32_32x32x32_bf8_fp8
      {Intrinsic::amdgcn_smfmac_f32_32x32x32_fp8_bf8, 5416}, // __builtin_amdgcn_smfmac_f32_32x32x32_fp8_bf8
      {Intrinsic::amdgcn_smfmac_f32_32x32x32_fp8_fp8, 5461}, // __builtin_amdgcn_smfmac_f32_32x32x32_fp8_fp8
      {Intrinsic::amdgcn_smfmac_i32_16x16x64_i8, 5506}, // __builtin_amdgcn_smfmac_i32_16x16x64_i8
      {Intrinsic::amdgcn_smfmac_i32_32x32x32_i8, 5546}, // __builtin_amdgcn_smfmac_i32_32x32x32_i8
      {Intrinsic::amdgcn_sudot4, 5586}, // __builtin_amdgcn_sudot4
      {Intrinsic::amdgcn_sudot8, 5610}, // __builtin_amdgcn_sudot8
      {Intrinsic::amdgcn_udot2, 5634}, // __builtin_amdgcn_udot2
      {Intrinsic::amdgcn_udot4, 5657}, // __builtin_amdgcn_udot4
      {Intrinsic::amdgcn_udot8, 5680}, // __builtin_amdgcn_udot8
      {Intrinsic::amdgcn_wave_barrier, 5703}, // __builtin_amdgcn_wave_barrier
      {Intrinsic::amdgcn_wavefrontsize, 5733}, // __builtin_amdgcn_wavefrontsize
      {Intrinsic::amdgcn_workgroup_id_x, 5764}, // __builtin_amdgcn_workgroup_id_x
      {Intrinsic::amdgcn_workgroup_id_y, 5796}, // __builtin_amdgcn_workgroup_id_y
      {Intrinsic::amdgcn_workgroup_id_z, 5828}, // __builtin_amdgcn_workgroup_id_z
      {Intrinsic::amdgcn_writelane, 5860}, // __builtin_amdgcn_writelane
    };
    auto I = std::lower_bound(std::begin(amdgcnNames),
                              std::end(amdgcnNames),
                              BuiltinNameStr);
    if (I != std::end(amdgcnNames) &&
        I->getName() == BuiltinNameStr)
      return I->IntrinID;
  }
  if (TargetPrefix == "arm") {
    static const BuiltinEntry armNames[] = {
      {Intrinsic::arm_cdp, 5887}, // __builtin_arm_cdp
      {Intrinsic::arm_cdp2, 5905}, // __builtin_arm_cdp2
      {Intrinsic::arm_cmse_tt, 5924}, // __builtin_arm_cmse_TT
      {Intrinsic::arm_cmse_tta, 5946}, // __builtin_arm_cmse_TTA
      {Intrinsic::arm_cmse_ttat, 5969}, // __builtin_arm_cmse_TTAT
      {Intrinsic::arm_cmse_ttt, 5993}, // __builtin_arm_cmse_TTT
      {Intrinsic::arm_dmb, 203}, // __builtin_arm_dmb
      {Intrinsic::arm_dsb, 221}, // __builtin_arm_dsb
      {Intrinsic::arm_get_fpscr, 6016}, // __builtin_arm_get_fpscr
      {Intrinsic::arm_isb, 239}, // __builtin_arm_isb
      {Intrinsic::arm_ldc, 6040}, // __builtin_arm_ldc
      {Intrinsic::arm_ldc2, 6058}, // __builtin_arm_ldc2
      {Intrinsic::arm_ldc2l, 6077}, // __builtin_arm_ldc2l
      {Intrinsic::arm_ldcl, 6097}, // __builtin_arm_ldcl
      {Intrinsic::arm_mcr, 6116}, // __builtin_arm_mcr
      {Intrinsic::arm_mcr2, 6134}, // __builtin_arm_mcr2
      {Intrinsic::arm_mrc, 6153}, // __builtin_arm_mrc
      {Intrinsic::arm_mrc2, 6171}, // __builtin_arm_mrc2
      {Intrinsic::arm_qadd, 6190}, // __builtin_arm_qadd
      {Intrinsic::arm_qadd16, 6209}, // __builtin_arm_qadd16
      {Intrinsic::arm_qadd8, 6230}, // __builtin_arm_qadd8
      {Intrinsic::arm_qasx, 6250}, // __builtin_arm_qasx
      {Intrinsic::arm_qsax, 6269}, // __builtin_arm_qsax
      {Intrinsic::arm_qsub, 6288}, // __builtin_arm_qsub
      {Intrinsic::arm_qsub16, 6307}, // __builtin_arm_qsub16
      {Intrinsic::arm_qsub8, 6328}, // __builtin_arm_qsub8
      {Intrinsic::arm_sadd16, 6348}, // __builtin_arm_sadd16
      {Intrinsic::arm_sadd8, 6369}, // __builtin_arm_sadd8
      {Intrinsic::arm_sasx, 6389}, // __builtin_arm_sasx
      {Intrinsic::arm_sel, 6408}, // __builtin_arm_sel
      {Intrinsic::arm_set_fpscr, 6426}, // __builtin_arm_set_fpscr
      {Intrinsic::arm_shadd16, 6450}, // __builtin_arm_shadd16
      {Intrinsic::arm_shadd8, 6472}, // __builtin_arm_shadd8
      {Intrinsic::arm_shasx, 6493}, // __builtin_arm_shasx
      {Intrinsic::arm_shsax, 6513}, // __builtin_arm_shsax
      {Intrinsic::arm_shsub16, 6533}, // __builtin_arm_shsub16
      {Intrinsic::arm_shsub8, 6555}, // __builtin_arm_shsub8
      {Intrinsic::arm_smlabb, 6576}, // __builtin_arm_smlabb
      {Intrinsic::arm_smlabt, 6597}, // __builtin_arm_smlabt
      {Intrinsic::arm_smlad, 6618}, // __builtin_arm_smlad
      {Intrinsic::arm_smladx, 6638}, // __builtin_arm_smladx
      {Intrinsic::arm_smlald, 6659}, // __builtin_arm_smlald
      {Intrinsic::arm_smlaldx, 6680}, // __builtin_arm_smlaldx
      {Intrinsic::arm_smlatb, 6702}, // __builtin_arm_smlatb
      {Intrinsic::arm_smlatt, 6723}, // __builtin_arm_smlatt
      {Intrinsic::arm_smlawb, 6744}, // __builtin_arm_smlawb
      {Intrinsic::arm_smlawt, 6765}, // __builtin_arm_smlawt
      {Intrinsic::arm_smlsd, 6786}, // __builtin_arm_smlsd
      {Intrinsic::arm_smlsdx, 6806}, // __builtin_arm_smlsdx
      {Intrinsic::arm_smlsld, 6827}, // __builtin_arm_smlsld
      {Intrinsic::arm_smlsldx, 6848}, // __builtin_arm_smlsldx
      {Intrinsic::arm_smuad, 6870}, // __builtin_arm_smuad
      {Intrinsic::arm_smuadx, 6890}, // __builtin_arm_smuadx
      {Intrinsic::arm_smulbb, 6911}, // __builtin_arm_smulbb
      {Intrinsic::arm_smulbt, 6932}, // __builtin_arm_smulbt
      {Intrinsic::arm_smultb, 6953}, // __builtin_arm_smultb
      {Intrinsic::arm_smultt, 6974}, // __builtin_arm_smultt
      {Intrinsic::arm_smulwb, 6995}, // __builtin_arm_smulwb
      {Intrinsic::arm_smulwt, 7016}, // __builtin_arm_smulwt
      {Intrinsic::arm_smusd, 7037}, // __builtin_arm_smusd
      {Intrinsic::arm_smusdx, 7057}, // __builtin_arm_smusdx
      {Intrinsic::arm_ssat, 7078}, // __builtin_arm_ssat
      {Intrinsic::arm_ssat16, 7097}, // __builtin_arm_ssat16
      {Intrinsic::arm_ssax, 7118}, // __builtin_arm_ssax
      {Intrinsic::arm_ssub16, 7137}, // __builtin_arm_ssub16
      {Intrinsic::arm_ssub8, 7158}, // __builtin_arm_ssub8
      {Intrinsic::arm_stc, 7178}, // __builtin_arm_stc
      {Intrinsic::arm_stc2, 7196}, // __builtin_arm_stc2
      {Intrinsic::arm_stc2l, 7215}, // __builtin_arm_stc2l
      {Intrinsic::arm_stcl, 7235}, // __builtin_arm_stcl
      {Intrinsic::arm_sxtab16, 7254}, // __builtin_arm_sxtab16
      {Intrinsic::arm_sxtb16, 7276}, // __builtin_arm_sxtb16
      {Intrinsic::arm_uadd16, 7297}, // __builtin_arm_uadd16
      {Intrinsic::arm_uadd8, 7318}, // __builtin_arm_uadd8
      {Intrinsic::arm_uasx, 7338}, // __builtin_arm_uasx
      {Intrinsic::arm_uhadd16, 7357}, // __builtin_arm_uhadd16
      {Intrinsic::arm_uhadd8, 7379}, // __builtin_arm_uhadd8
      {Intrinsic::arm_uhasx, 7400}, // __builtin_arm_uhasx
      {Intrinsic::arm_uhsax, 7420}, // __builtin_arm_uhsax
      {Intrinsic::arm_uhsub16, 7440}, // __builtin_arm_uhsub16
      {Intrinsic::arm_uhsub8, 7462}, // __builtin_arm_uhsub8
      {Intrinsic::arm_uqadd16, 7483}, // __builtin_arm_uqadd16
      {Intrinsic::arm_uqadd8, 7505}, // __builtin_arm_uqadd8
      {Intrinsic::arm_uqasx, 7526}, // __builtin_arm_uqasx
      {Intrinsic::arm_uqsax, 7546}, // __builtin_arm_uqsax
      {Intrinsic::arm_uqsub16, 7566}, // __builtin_arm_uqsub16
      {Intrinsic::arm_uqsub8, 7588}, // __builtin_arm_uqsub8
      {Intrinsic::arm_usad8, 7609}, // __builtin_arm_usad8
      {Intrinsic::arm_usada8, 7629}, // __builtin_arm_usada8
      {Intrinsic::arm_usat, 7650}, // __builtin_arm_usat
      {Intrinsic::arm_usat16, 7669}, // __builtin_arm_usat16
      {Intrinsic::arm_usax, 7690}, // __builtin_arm_usax
      {Intrinsic::arm_usub16, 7709}, // __builtin_arm_usub16
      {Intrinsic::arm_usub8, 7730}, // __builtin_arm_usub8
      {Intrinsic::arm_uxtab16, 7750}, // __builtin_arm_uxtab16
      {Intrinsic::arm_uxtb16, 7772}, // __builtin_arm_uxtb16
    };
    auto I = std::lower_bound(std::begin(armNames),
                              std::end(armNames),
                              BuiltinNameStr);
    if (I != std::end(armNames) &&
        I->getName() == BuiltinNameStr)
      return I->IntrinID;
  }
  if (TargetPrefix == "bpf") {
    static const BuiltinEntry bpfNames[] = {
      {Intrinsic::bpf_btf_type_id, 7793}, // __builtin_bpf_btf_type_id
      {Intrinsic::bpf_compare, 7819}, // __builtin_bpf_compare
      {Intrinsic::bpf_load_byte, 7841}, // __builtin_bpf_load_byte
      {Intrinsic::bpf_load_half, 7865}, // __builtin_bpf_load_half
      {Intrinsic::bpf_load_word, 7889}, // __builtin_bpf_load_word
      {Intrinsic::bpf_passthrough, 7913}, // __builtin_bpf_passthrough
      {Intrinsic::bpf_preserve_enum_value, 7939}, // __builtin_bpf_preserve_enum_value
      {Intrinsic::bpf_preserve_field_info, 7973}, // __builtin_bpf_preserve_field_info
      {Intrinsic::bpf_preserve_type_info, 8007}, // __builtin_bpf_preserve_type_info
      {Intrinsic::bpf_pseudo, 8040}, // __builtin_bpf_pseudo
    };
    auto I = std::lower_bound(std::begin(bpfNames),
                              std::end(bpfNames),
                              BuiltinNameStr);
    if (I != std::end(bpfNames) &&
        I->getName() == BuiltinNameStr)
      return I->IntrinID;
  }
  if (TargetPrefix == "dx") {
    static const BuiltinEntry dxNames[] = {
      {Intrinsic::dx_create_handle, 8061}, // __builtin_hlsl_create_handle
    };
    auto I = std::lower_bound(std::begin(dxNames),
                              std::end(dxNames),
                              BuiltinNameStr);
    if (I != std::end(dxNames) &&
        I->getName() == BuiltinNameStr)
      return I->IntrinID;
  }
  if (TargetPrefix == "hexagon") {
    static const BuiltinEntry hexagonNames[] = {
      {Intrinsic::hexagon_A2_abs, 8090}, // __builtin_HEXAGON_A2_abs
      {Intrinsic::hexagon_A2_absp, 8115}, // __builtin_HEXAGON_A2_absp
      {Intrinsic::hexagon_A2_abssat, 8141}, // __builtin_HEXAGON_A2_abssat
      {Intrinsic::hexagon_A2_add, 8169}, // __builtin_HEXAGON_A2_add
      {Intrinsic::hexagon_A2_addh_h16_hh, 8194}, // __builtin_HEXAGON_A2_addh_h16_hh
      {Intrinsic::hexagon_A2_addh_h16_hl, 8227}, // __builtin_HEXAGON_A2_addh_h16_hl
      {Intrinsic::hexagon_A2_addh_h16_lh, 8260}, // __builtin_HEXAGON_A2_addh_h16_lh
      {Intrinsic::hexagon_A2_addh_h16_ll, 8293}, // __builtin_HEXAGON_A2_addh_h16_ll
      {Intrinsic::hexagon_A2_addh_h16_sat_hh, 8326}, // __builtin_HEXAGON_A2_addh_h16_sat_hh
      {Intrinsic::hexagon_A2_addh_h16_sat_hl, 8363}, // __builtin_HEXAGON_A2_addh_h16_sat_hl
      {Intrinsic::hexagon_A2_addh_h16_sat_lh, 8400}, // __builtin_HEXAGON_A2_addh_h16_sat_lh
      {Intrinsic::hexagon_A2_addh_h16_sat_ll, 8437}, // __builtin_HEXAGON_A2_addh_h16_sat_ll
      {Intrinsic::hexagon_A2_addh_l16_hl, 8474}, // __builtin_HEXAGON_A2_addh_l16_hl
      {Intrinsic::hexagon_A2_addh_l16_ll, 8507}, // __builtin_HEXAGON_A2_addh_l16_ll
      {Intrinsic::hexagon_A2_addh_l16_sat_hl, 8540}, // __builtin_HEXAGON_A2_addh_l16_sat_hl
      {Intrinsic::hexagon_A2_addh_l16_sat_ll, 8577}, // __builtin_HEXAGON_A2_addh_l16_sat_ll
      {Intrinsic::hexagon_A2_addi, 8614}, // __builtin_HEXAGON_A2_addi
      {Intrinsic::hexagon_A2_addp, 8640}, // __builtin_HEXAGON_A2_addp
      {Intrinsic::hexagon_A2_addpsat, 8666}, // __builtin_HEXAGON_A2_addpsat
      {Intrinsic::hexagon_A2_addsat, 8695}, // __builtin_HEXAGON_A2_addsat
      {Intrinsic::hexagon_A2_addsp, 8723}, // __builtin_HEXAGON_A2_addsp
      {Intrinsic::hexagon_A2_and, 8750}, // __builtin_HEXAGON_A2_and
      {Intrinsic::hexagon_A2_andir, 8775}, // __builtin_HEXAGON_A2_andir
      {Intrinsic::hexagon_A2_andp, 8802}, // __builtin_HEXAGON_A2_andp
      {Intrinsic::hexagon_A2_aslh, 8828}, // __builtin_HEXAGON_A2_aslh
      {Intrinsic::hexagon_A2_asrh, 8854}, // __builtin_HEXAGON_A2_asrh
      {Intrinsic::hexagon_A2_combine_hh, 8880}, // __builtin_HEXAGON_A2_combine_hh
      {Intrinsic::hexagon_A2_combine_hl, 8912}, // __builtin_HEXAGON_A2_combine_hl
      {Intrinsic::hexagon_A2_combine_lh, 8944}, // __builtin_HEXAGON_A2_combine_lh
      {Intrinsic::hexagon_A2_combine_ll, 8976}, // __builtin_HEXAGON_A2_combine_ll
      {Intrinsic::hexagon_A2_combineii, 9008}, // __builtin_HEXAGON_A2_combineii
      {Intrinsic::hexagon_A2_combinew, 9039}, // __builtin_HEXAGON_A2_combinew
      {Intrinsic::hexagon_A2_max, 9069}, // __builtin_HEXAGON_A2_max
      {Intrinsic::hexagon_A2_maxp, 9094}, // __builtin_HEXAGON_A2_maxp
      {Intrinsic::hexagon_A2_maxu, 9120}, // __builtin_HEXAGON_A2_maxu
      {Intrinsic::hexagon_A2_maxup, 9146}, // __builtin_HEXAGON_A2_maxup
      {Intrinsic::hexagon_A2_min, 9173}, // __builtin_HEXAGON_A2_min
      {Intrinsic::hexagon_A2_minp, 9198}, // __builtin_HEXAGON_A2_minp
      {Intrinsic::hexagon_A2_minu, 9224}, // __builtin_HEXAGON_A2_minu
      {Intrinsic::hexagon_A2_minup, 9250}, // __builtin_HEXAGON_A2_minup
      {Intrinsic::hexagon_A2_neg, 9277}, // __builtin_HEXAGON_A2_neg
      {Intrinsic::hexagon_A2_negp, 9302}, // __builtin_HEXAGON_A2_negp
      {Intrinsic::hexagon_A2_negsat, 9328}, // __builtin_HEXAGON_A2_negsat
      {Intrinsic::hexagon_A2_not, 9356}, // __builtin_HEXAGON_A2_not
      {Intrinsic::hexagon_A2_notp, 9381}, // __builtin_HEXAGON_A2_notp
      {Intrinsic::hexagon_A2_or, 9407}, // __builtin_HEXAGON_A2_or
      {Intrinsic::hexagon_A2_orir, 9431}, // __builtin_HEXAGON_A2_orir
      {Intrinsic::hexagon_A2_orp, 9457}, // __builtin_HEXAGON_A2_orp
      {Intrinsic::hexagon_A2_roundsat, 9482}, // __builtin_HEXAGON_A2_roundsat
      {Intrinsic::hexagon_A2_sat, 9512}, // __builtin_HEXAGON_A2_sat
      {Intrinsic::hexagon_A2_satb, 9537}, // __builtin_HEXAGON_A2_satb
      {Intrinsic::hexagon_A2_sath, 9563}, // __builtin_HEXAGON_A2_sath
      {Intrinsic::hexagon_A2_satub, 9589}, // __builtin_HEXAGON_A2_satub
      {Intrinsic::hexagon_A2_satuh, 9616}, // __builtin_HEXAGON_A2_satuh
      {Intrinsic::hexagon_A2_sub, 9643}, // __builtin_HEXAGON_A2_sub
      {Intrinsic::hexagon_A2_subh_h16_hh, 9668}, // __builtin_HEXAGON_A2_subh_h16_hh
      {Intrinsic::hexagon_A2_subh_h16_hl, 9701}, // __builtin_HEXAGON_A2_subh_h16_hl
      {Intrinsic::hexagon_A2_subh_h16_lh, 9734}, // __builtin_HEXAGON_A2_subh_h16_lh
      {Intrinsic::hexagon_A2_subh_h16_ll, 9767}, // __builtin_HEXAGON_A2_subh_h16_ll
      {Intrinsic::hexagon_A2_subh_h16_sat_hh, 9800}, // __builtin_HEXAGON_A2_subh_h16_sat_hh
      {Intrinsic::hexagon_A2_subh_h16_sat_hl, 9837}, // __builtin_HEXAGON_A2_subh_h16_sat_hl
      {Intrinsic::hexagon_A2_subh_h16_sat_lh, 9874}, // __builtin_HEXAGON_A2_subh_h16_sat_lh
      {Intrinsic::hexagon_A2_subh_h16_sat_ll, 9911}, // __builtin_HEXAGON_A2_subh_h16_sat_ll
      {Intrinsic::hexagon_A2_subh_l16_hl, 9948}, // __builtin_HEXAGON_A2_subh_l16_hl
      {Intrinsic::hexagon_A2_subh_l16_ll, 9981}, // __builtin_HEXAGON_A2_subh_l16_ll
      {Intrinsic::hexagon_A2_subh_l16_sat_hl, 10014}, // __builtin_HEXAGON_A2_subh_l16_sat_hl
      {Intrinsic::hexagon_A2_subh_l16_sat_ll, 10051}, // __builtin_HEXAGON_A2_subh_l16_sat_ll
      {Intrinsic::hexagon_A2_subp, 10088}, // __builtin_HEXAGON_A2_subp
      {Intrinsic::hexagon_A2_subri, 10114}, // __builtin_HEXAGON_A2_subri
      {Intrinsic::hexagon_A2_subsat, 10141}, // __builtin_HEXAGON_A2_subsat
      {Intrinsic::hexagon_A2_svaddh, 10169}, // __builtin_HEXAGON_A2_svaddh
      {Intrinsic::hexagon_A2_svaddhs, 10197}, // __builtin_HEXAGON_A2_svaddhs
      {Intrinsic::hexagon_A2_svadduhs, 10226}, // __builtin_HEXAGON_A2_svadduhs
      {Intrinsic::hexagon_A2_svavgh, 10256}, // __builtin_HEXAGON_A2_svavgh
      {Intrinsic::hexagon_A2_svavghs, 10284}, // __builtin_HEXAGON_A2_svavghs
      {Intrinsic::hexagon_A2_svnavgh, 10313}, // __builtin_HEXAGON_A2_svnavgh
      {Intrinsic::hexagon_A2_svsubh, 10342}, // __builtin_HEXAGON_A2_svsubh
      {Intrinsic::hexagon_A2_svsubhs, 10370}, // __builtin_HEXAGON_A2_svsubhs
      {Intrinsic::hexagon_A2_svsubuhs, 10399}, // __builtin_HEXAGON_A2_svsubuhs
      {Intrinsic::hexagon_A2_swiz, 10429}, // __builtin_HEXAGON_A2_swiz
      {Intrinsic::hexagon_A2_sxtb, 10455}, // __builtin_HEXAGON_A2_sxtb
      {Intrinsic::hexagon_A2_sxth, 10481}, // __builtin_HEXAGON_A2_sxth
      {Intrinsic::hexagon_A2_sxtw, 10507}, // __builtin_HEXAGON_A2_sxtw
      {Intrinsic::hexagon_A2_tfr, 10533}, // __builtin_HEXAGON_A2_tfr
      {Intrinsic::hexagon_A2_tfrih, 10558}, // __builtin_HEXAGON_A2_tfrih
      {Intrinsic::hexagon_A2_tfril, 10585}, // __builtin_HEXAGON_A2_tfril
      {Intrinsic::hexagon_A2_tfrp, 10612}, // __builtin_HEXAGON_A2_tfrp
      {Intrinsic::hexagon_A2_tfrpi, 10638}, // __builtin_HEXAGON_A2_tfrpi
      {Intrinsic::hexagon_A2_tfrsi, 10665}, // __builtin_HEXAGON_A2_tfrsi
      {Intrinsic::hexagon_A2_vabsh, 10692}, // __builtin_HEXAGON_A2_vabsh
      {Intrinsic::hexagon_A2_vabshsat, 10719}, // __builtin_HEXAGON_A2_vabshsat
      {Intrinsic::hexagon_A2_vabsw, 10749}, // __builtin_HEXAGON_A2_vabsw
      {Intrinsic::hexagon_A2_vabswsat, 10776}, // __builtin_HEXAGON_A2_vabswsat
      {Intrinsic::hexagon_A2_vaddb_map, 10806}, // __builtin_HEXAGON_A2_vaddb_map
      {Intrinsic::hexagon_A2_vaddh, 10837}, // __builtin_HEXAGON_A2_vaddh
      {Intrinsic::hexagon_A2_vaddhs, 10864}, // __builtin_HEXAGON_A2_vaddhs
      {Intrinsic::hexagon_A2_vaddub, 10892}, // __builtin_HEXAGON_A2_vaddub
      {Intrinsic::hexagon_A2_vaddubs, 10920}, // __builtin_HEXAGON_A2_vaddubs
      {Intrinsic::hexagon_A2_vadduhs, 10949}, // __builtin_HEXAGON_A2_vadduhs
      {Intrinsic::hexagon_A2_vaddw, 10978}, // __builtin_HEXAGON_A2_vaddw
      {Intrinsic::hexagon_A2_vaddws, 11005}, // __builtin_HEXAGON_A2_vaddws
      {Intrinsic::hexagon_A2_vavgh, 11033}, // __builtin_HEXAGON_A2_vavgh
      {Intrinsic::hexagon_A2_vavghcr, 11060}, // __builtin_HEXAGON_A2_vavghcr
      {Intrinsic::hexagon_A2_vavghr, 11089}, // __builtin_HEXAGON_A2_vavghr
      {Intrinsic::hexagon_A2_vavgub, 11117}, // __builtin_HEXAGON_A2_vavgub
      {Intrinsic::hexagon_A2_vavgubr, 11145}, // __builtin_HEXAGON_A2_vavgubr
      {Intrinsic::hexagon_A2_vavguh, 11174}, // __builtin_HEXAGON_A2_vavguh
      {Intrinsic::hexagon_A2_vavguhr, 11202}, // __builtin_HEXAGON_A2_vavguhr
      {Intrinsic::hexagon_A2_vavguw, 11231}, // __builtin_HEXAGON_A2_vavguw
      {Intrinsic::hexagon_A2_vavguwr, 11259}, // __builtin_HEXAGON_A2_vavguwr
      {Intrinsic::hexagon_A2_vavgw, 11288}, // __builtin_HEXAGON_A2_vavgw
      {Intrinsic::hexagon_A2_vavgwcr, 11315}, // __builtin_HEXAGON_A2_vavgwcr
      {Intrinsic::hexagon_A2_vavgwr, 11344}, // __builtin_HEXAGON_A2_vavgwr
      {Intrinsic::hexagon_A2_vcmpbeq, 11372}, // __builtin_HEXAGON_A2_vcmpbeq
      {Intrinsic::hexagon_A2_vcmpbgtu, 11401}, // __builtin_HEXAGON_A2_vcmpbgtu
      {Intrinsic::hexagon_A2_vcmpheq, 11431}, // __builtin_HEXAGON_A2_vcmpheq
      {Intrinsic::hexagon_A2_vcmphgt, 11460}, // __builtin_HEXAGON_A2_vcmphgt
      {Intrinsic::hexagon_A2_vcmphgtu, 11489}, // __builtin_HEXAGON_A2_vcmphgtu
      {Intrinsic::hexagon_A2_vcmpweq, 11519}, // __builtin_HEXAGON_A2_vcmpweq
      {Intrinsic::hexagon_A2_vcmpwgt, 11548}, // __builtin_HEXAGON_A2_vcmpwgt
      {Intrinsic::hexagon_A2_vcmpwgtu, 11577}, // __builtin_HEXAGON_A2_vcmpwgtu
      {Intrinsic::hexagon_A2_vconj, 11607}, // __builtin_HEXAGON_A2_vconj
      {Intrinsic::hexagon_A2_vmaxb, 11634}, // __builtin_HEXAGON_A2_vmaxb
      {Intrinsic::hexagon_A2_vmaxh, 11661}, // __builtin_HEXAGON_A2_vmaxh
      {Intrinsic::hexagon_A2_vmaxub, 11688}, // __builtin_HEXAGON_A2_vmaxub
      {Intrinsic::hexagon_A2_vmaxuh, 11716}, // __builtin_HEXAGON_A2_vmaxuh
      {Intrinsic::hexagon_A2_vmaxuw, 11744}, // __builtin_HEXAGON_A2_vmaxuw
      {Intrinsic::hexagon_A2_vmaxw, 11772}, // __builtin_HEXAGON_A2_vmaxw
      {Intrinsic::hexagon_A2_vminb, 11799}, // __builtin_HEXAGON_A2_vminb
      {Intrinsic::hexagon_A2_vminh, 11826}, // __builtin_HEXAGON_A2_vminh
      {Intrinsic::hexagon_A2_vminub, 11853}, // __builtin_HEXAGON_A2_vminub
      {Intrinsic::hexagon_A2_vminuh, 11881}, // __builtin_HEXAGON_A2_vminuh
      {Intrinsic::hexagon_A2_vminuw, 11909}, // __builtin_HEXAGON_A2_vminuw
      {Intrinsic::hexagon_A2_vminw, 11937}, // __builtin_HEXAGON_A2_vminw
      {Intrinsic::hexagon_A2_vnavgh, 11964}, // __builtin_HEXAGON_A2_vnavgh
      {Intrinsic::hexagon_A2_vnavghcr, 11992}, // __builtin_HEXAGON_A2_vnavghcr
      {Intrinsic::hexagon_A2_vnavghr, 12022}, // __builtin_HEXAGON_A2_vnavghr
      {Intrinsic::hexagon_A2_vnavgw, 12051}, // __builtin_HEXAGON_A2_vnavgw
      {Intrinsic::hexagon_A2_vnavgwcr, 12079}, // __builtin_HEXAGON_A2_vnavgwcr
      {Intrinsic::hexagon_A2_vnavgwr, 12109}, // __builtin_HEXAGON_A2_vnavgwr
      {Intrinsic::hexagon_A2_vraddub, 12138}, // __builtin_HEXAGON_A2_vraddub
      {Intrinsic::hexagon_A2_vraddub_acc, 12167}, // __builtin_HEXAGON_A2_vraddub_acc
      {Intrinsic::hexagon_A2_vrsadub, 12200}, // __builtin_HEXAGON_A2_vrsadub
      {Intrinsic::hexagon_A2_vrsadub_acc, 12229}, // __builtin_HEXAGON_A2_vrsadub_acc
      {Intrinsic::hexagon_A2_vsubb_map, 12262}, // __builtin_HEXAGON_A2_vsubb_map
      {Intrinsic::hexagon_A2_vsubh, 12293}, // __builtin_HEXAGON_A2_vsubh
      {Intrinsic::hexagon_A2_vsubhs, 12320}, // __builtin_HEXAGON_A2_vsubhs
      {Intrinsic::hexagon_A2_vsubub, 12348}, // __builtin_HEXAGON_A2_vsubub
      {Intrinsic::hexagon_A2_vsububs, 12376}, // __builtin_HEXAGON_A2_vsububs
      {Intrinsic::hexagon_A2_vsubuhs, 12405}, // __builtin_HEXAGON_A2_vsubuhs
      {Intrinsic::hexagon_A2_vsubw, 12434}, // __builtin_HEXAGON_A2_vsubw
      {Intrinsic::hexagon_A2_vsubws, 12461}, // __builtin_HEXAGON_A2_vsubws
      {Intrinsic::hexagon_A2_xor, 12489}, // __builtin_HEXAGON_A2_xor
      {Intrinsic::hexagon_A2_xorp, 12514}, // __builtin_HEXAGON_A2_xorp
      {Intrinsic::hexagon_A2_zxtb, 12540}, // __builtin_HEXAGON_A2_zxtb
      {Intrinsic::hexagon_A2_zxth, 12566}, // __builtin_HEXAGON_A2_zxth
      {Intrinsic::hexagon_A4_andn, 12592}, // __builtin_HEXAGON_A4_andn
      {Intrinsic::hexagon_A4_andnp, 12618}, // __builtin_HEXAGON_A4_andnp
      {Intrinsic::hexagon_A4_bitsplit, 12645}, // __builtin_HEXAGON_A4_bitsplit
      {Intrinsic::hexagon_A4_bitspliti, 12675}, // __builtin_HEXAGON_A4_bitspliti
      {Intrinsic::hexagon_A4_boundscheck, 12706}, // __builtin_HEXAGON_A4_boundscheck
      {Intrinsic::hexagon_A4_cmpbeq, 12739}, // __builtin_HEXAGON_A4_cmpbeq
      {Intrinsic::hexagon_A4_cmpbeqi, 12767}, // __builtin_HEXAGON_A4_cmpbeqi
      {Intrinsic::hexagon_A4_cmpbgt, 12796}, // __builtin_HEXAGON_A4_cmpbgt
      {Intrinsic::hexagon_A4_cmpbgti, 12824}, // __builtin_HEXAGON_A4_cmpbgti
      {Intrinsic::hexagon_A4_cmpbgtu, 12853}, // __builtin_HEXAGON_A4_cmpbgtu
      {Intrinsic::hexagon_A4_cmpbgtui, 12882}, // __builtin_HEXAGON_A4_cmpbgtui
      {Intrinsic::hexagon_A4_cmpheq, 12912}, // __builtin_HEXAGON_A4_cmpheq
      {Intrinsic::hexagon_A4_cmpheqi, 12940}, // __builtin_HEXAGON_A4_cmpheqi
      {Intrinsic::hexagon_A4_cmphgt, 12969}, // __builtin_HEXAGON_A4_cmphgt
      {Intrinsic::hexagon_A4_cmphgti, 12997}, // __builtin_HEXAGON_A4_cmphgti
      {Intrinsic::hexagon_A4_cmphgtu, 13026}, // __builtin_HEXAGON_A4_cmphgtu
      {Intrinsic::hexagon_A4_cmphgtui, 13055}, // __builtin_HEXAGON_A4_cmphgtui
      {Intrinsic::hexagon_A4_combineir, 13085}, // __builtin_HEXAGON_A4_combineir
      {Intrinsic::hexagon_A4_combineri, 13116}, // __builtin_HEXAGON_A4_combineri
      {Intrinsic::hexagon_A4_cround_ri, 13147}, // __builtin_HEXAGON_A4_cround_ri
      {Intrinsic::hexagon_A4_cround_rr, 13178}, // __builtin_HEXAGON_A4_cround_rr
      {Intrinsic::hexagon_A4_modwrapu, 13209}, // __builtin_HEXAGON_A4_modwrapu
      {Intrinsic::hexagon_A4_orn, 13239}, // __builtin_HEXAGON_A4_orn
      {Intrinsic::hexagon_A4_ornp, 13264}, // __builtin_HEXAGON_A4_ornp
      {Intrinsic::hexagon_A4_rcmpeq, 13290}, // __builtin_HEXAGON_A4_rcmpeq
      {Intrinsic::hexagon_A4_rcmpeqi, 13318}, // __builtin_HEXAGON_A4_rcmpeqi
      {Intrinsic::hexagon_A4_rcmpneq, 13347}, // __builtin_HEXAGON_A4_rcmpneq
      {Intrinsic::hexagon_A4_rcmpneqi, 13376}, // __builtin_HEXAGON_A4_rcmpneqi
      {Intrinsic::hexagon_A4_round_ri, 13406}, // __builtin_HEXAGON_A4_round_ri
      {Intrinsic::hexagon_A4_round_ri_sat, 13436}, // __builtin_HEXAGON_A4_round_ri_sat
      {Intrinsic::hexagon_A4_round_rr, 13470}, // __builtin_HEXAGON_A4_round_rr
      {Intrinsic::hexagon_A4_round_rr_sat, 13500}, // __builtin_HEXAGON_A4_round_rr_sat
      {Intrinsic::hexagon_A4_tlbmatch, 13534}, // __builtin_HEXAGON_A4_tlbmatch
      {Intrinsic::hexagon_A4_vcmpbeq_any, 13564}, // __builtin_HEXAGON_A4_vcmpbeq_any
      {Intrinsic::hexagon_A4_vcmpbeqi, 13597}, // __builtin_HEXAGON_A4_vcmpbeqi
      {Intrinsic::hexagon_A4_vcmpbgt, 13627}, // __builtin_HEXAGON_A4_vcmpbgt
      {Intrinsic::hexagon_A4_vcmpbgti, 13656}, // __builtin_HEXAGON_A4_vcmpbgti
      {Intrinsic::hexagon_A4_vcmpbgtui, 13686}, // __builtin_HEXAGON_A4_vcmpbgtui
      {Intrinsic::hexagon_A4_vcmpheqi, 13717}, // __builtin_HEXAGON_A4_vcmpheqi
      {Intrinsic::hexagon_A4_vcmphgti, 13747}, // __builtin_HEXAGON_A4_vcmphgti
      {Intrinsic::hexagon_A4_vcmphgtui, 13777}, // __builtin_HEXAGON_A4_vcmphgtui
      {Intrinsic::hexagon_A4_vcmpweqi, 13808}, // __builtin_HEXAGON_A4_vcmpweqi
      {Intrinsic::hexagon_A4_vcmpwgti, 13838}, // __builtin_HEXAGON_A4_vcmpwgti
      {Intrinsic::hexagon_A4_vcmpwgtui, 13868}, // __builtin_HEXAGON_A4_vcmpwgtui
      {Intrinsic::hexagon_A4_vrmaxh, 13899}, // __builtin_HEXAGON_A4_vrmaxh
      {Intrinsic::hexagon_A4_vrmaxuh, 13927}, // __builtin_HEXAGON_A4_vrmaxuh
      {Intrinsic::hexagon_A4_vrmaxuw, 13956}, // __builtin_HEXAGON_A4_vrmaxuw
      {Intrinsic::hexagon_A4_vrmaxw, 13985}, // __builtin_HEXAGON_A4_vrmaxw
      {Intrinsic::hexagon_A4_vrminh, 14013}, // __builtin_HEXAGON_A4_vrminh
      {Intrinsic::hexagon_A4_vrminuh, 14041}, // __builtin_HEXAGON_A4_vrminuh
      {Intrinsic::hexagon_A4_vrminuw, 14070}, // __builtin_HEXAGON_A4_vrminuw
      {Intrinsic::hexagon_A4_vrminw, 14099}, // __builtin_HEXAGON_A4_vrminw
      {Intrinsic::hexagon_A5_vaddhubs, 14127}, // __builtin_HEXAGON_A5_vaddhubs
      {Intrinsic::hexagon_A6_vcmpbeq_notany, 14157}, // __builtin_HEXAGON_A6_vcmpbeq_notany
      {Intrinsic::hexagon_A7_clip, 14193}, // __builtin_HEXAGON_A7_clip
      {Intrinsic::hexagon_A7_croundd_ri, 14219}, // __builtin_HEXAGON_A7_croundd_ri
      {Intrinsic::hexagon_A7_croundd_rr, 14251}, // __builtin_HEXAGON_A7_croundd_rr
      {Intrinsic::hexagon_A7_vclip, 14283}, // __builtin_HEXAGON_A7_vclip
      {Intrinsic::hexagon_C2_all8, 14310}, // __builtin_HEXAGON_C2_all8
      {Intrinsic::hexagon_C2_and, 14336}, // __builtin_HEXAGON_C2_and
      {Intrinsic::hexagon_C2_andn, 14361}, // __builtin_HEXAGON_C2_andn
      {Intrinsic::hexagon_C2_any8, 14387}, // __builtin_HEXAGON_C2_any8
      {Intrinsic::hexagon_C2_bitsclr, 14413}, // __builtin_HEXAGON_C2_bitsclr
      {Intrinsic::hexagon_C2_bitsclri, 14442}, // __builtin_HEXAGON_C2_bitsclri
      {Intrinsic::hexagon_C2_bitsset, 14472}, // __builtin_HEXAGON_C2_bitsset
      {Intrinsic::hexagon_C2_cmpeq, 14501}, // __builtin_HEXAGON_C2_cmpeq
      {Intrinsic::hexagon_C2_cmpeqi, 14528}, // __builtin_HEXAGON_C2_cmpeqi
      {Intrinsic::hexagon_C2_cmpeqp, 14556}, // __builtin_HEXAGON_C2_cmpeqp
      {Intrinsic::hexagon_C2_cmpgei, 14584}, // __builtin_HEXAGON_C2_cmpgei
      {Intrinsic::hexagon_C2_cmpgeui, 14612}, // __builtin_HEXAGON_C2_cmpgeui
      {Intrinsic::hexagon_C2_cmpgt, 14641}, // __builtin_HEXAGON_C2_cmpgt
      {Intrinsic::hexagon_C2_cmpgti, 14668}, // __builtin_HEXAGON_C2_cmpgti
      {Intrinsic::hexagon_C2_cmpgtp, 14696}, // __builtin_HEXAGON_C2_cmpgtp
      {Intrinsic::hexagon_C2_cmpgtu, 14724}, // __builtin_HEXAGON_C2_cmpgtu
      {Intrinsic::hexagon_C2_cmpgtui, 14752}, // __builtin_HEXAGON_C2_cmpgtui
      {Intrinsic::hexagon_C2_cmpgtup, 14781}, // __builtin_HEXAGON_C2_cmpgtup
      {Intrinsic::hexagon_C2_cmplt, 14810}, // __builtin_HEXAGON_C2_cmplt
      {Intrinsic::hexagon_C2_cmpltu, 14837}, // __builtin_HEXAGON_C2_cmpltu
      {Intrinsic::hexagon_C2_mask, 14865}, // __builtin_HEXAGON_C2_mask
      {Intrinsic::hexagon_C2_mux, 14891}, // __builtin_HEXAGON_C2_mux
      {Intrinsic::hexagon_C2_muxii, 14916}, // __builtin_HEXAGON_C2_muxii
      {Intrinsic::hexagon_C2_muxir, 14943}, // __builtin_HEXAGON_C2_muxir
      {Intrinsic::hexagon_C2_muxri, 14970}, // __builtin_HEXAGON_C2_muxri
      {Intrinsic::hexagon_C2_not, 14997}, // __builtin_HEXAGON_C2_not
      {Intrinsic::hexagon_C2_or, 15022}, // __builtin_HEXAGON_C2_or
      {Intrinsic::hexagon_C2_orn, 15046}, // __builtin_HEXAGON_C2_orn
      {Intrinsic::hexagon_C2_pxfer_map, 15071}, // __builtin_HEXAGON_C2_pxfer_map
      {Intrinsic::hexagon_C2_tfrpr, 15102}, // __builtin_HEXAGON_C2_tfrpr
      {Intrinsic::hexagon_C2_tfrrp, 15129}, // __builtin_HEXAGON_C2_tfrrp
      {Intrinsic::hexagon_C2_vitpack, 15156}, // __builtin_HEXAGON_C2_vitpack
      {Intrinsic::hexagon_C2_vmux, 15185}, // __builtin_HEXAGON_C2_vmux
      {Intrinsic::hexagon_C2_xor, 15211}, // __builtin_HEXAGON_C2_xor
      {Intrinsic::hexagon_C4_and_and, 15236}, // __builtin_HEXAGON_C4_and_and
      {Intrinsic::hexagon_C4_and_andn, 15265}, // __builtin_HEXAGON_C4_and_andn
      {Intrinsic::hexagon_C4_and_or, 15295}, // __builtin_HEXAGON_C4_and_or
      {Intrinsic::hexagon_C4_and_orn, 15323}, // __builtin_HEXAGON_C4_and_orn
      {Intrinsic::hexagon_C4_cmplte, 15352}, // __builtin_HEXAGON_C4_cmplte
      {Intrinsic::hexagon_C4_cmpltei, 15380}, // __builtin_HEXAGON_C4_cmpltei
      {Intrinsic::hexagon_C4_cmplteu, 15409}, // __builtin_HEXAGON_C4_cmplteu
      {Intrinsic::hexagon_C4_cmplteui, 15438}, // __builtin_HEXAGON_C4_cmplteui
      {Intrinsic::hexagon_C4_cmpneq, 15468}, // __builtin_HEXAGON_C4_cmpneq
      {Intrinsic::hexagon_C4_cmpneqi, 15496}, // __builtin_HEXAGON_C4_cmpneqi
      {Intrinsic::hexagon_C4_fastcorner9, 15525}, // __builtin_HEXAGON_C4_fastcorner9
      {Intrinsic::hexagon_C4_fastcorner9_not, 15558}, // __builtin_HEXAGON_C4_fastcorner9_not
      {Intrinsic::hexagon_C4_nbitsclr, 15595}, // __builtin_HEXAGON_C4_nbitsclr
      {Intrinsic::hexagon_C4_nbitsclri, 15625}, // __builtin_HEXAGON_C4_nbitsclri
      {Intrinsic::hexagon_C4_nbitsset, 15656}, // __builtin_HEXAGON_C4_nbitsset
      {Intrinsic::hexagon_C4_or_and, 15686}, // __builtin_HEXAGON_C4_or_and
      {Intrinsic::hexagon_C4_or_andn, 15714}, // __builtin_HEXAGON_C4_or_andn
      {Intrinsic::hexagon_C4_or_or, 15743}, // __builtin_HEXAGON_C4_or_or
      {Intrinsic::hexagon_C4_or_orn, 15770}, // __builtin_HEXAGON_C4_or_orn
      {Intrinsic::hexagon_F2_conv_d2df, 15798}, // __builtin_HEXAGON_F2_conv_d2df
      {Intrinsic::hexagon_F2_conv_d2sf, 15829}, // __builtin_HEXAGON_F2_conv_d2sf
      {Intrinsic::hexagon_F2_conv_df2d, 15860}, // __builtin_HEXAGON_F2_conv_df2d
      {Intrinsic::hexagon_F2_conv_df2d_chop, 15891}, // __builtin_HEXAGON_F2_conv_df2d_chop
      {Intrinsic::hexagon_F2_conv_df2sf, 15927}, // __builtin_HEXAGON_F2_conv_df2sf
      {Intrinsic::hexagon_F2_conv_df2ud, 15959}, // __builtin_HEXAGON_F2_conv_df2ud
      {Intrinsic::hexagon_F2_conv_df2ud_chop, 15991}, // __builtin_HEXAGON_F2_conv_df2ud_chop
      {Intrinsic::hexagon_F2_conv_df2uw, 16028}, // __builtin_HEXAGON_F2_conv_df2uw
      {Intrinsic::hexagon_F2_conv_df2uw_chop, 16060}, // __builtin_HEXAGON_F2_conv_df2uw_chop
      {Intrinsic::hexagon_F2_conv_df2w, 16097}, // __builtin_HEXAGON_F2_conv_df2w
      {Intrinsic::hexagon_F2_conv_df2w_chop, 16128}, // __builtin_HEXAGON_F2_conv_df2w_chop
      {Intrinsic::hexagon_F2_conv_sf2d, 16164}, // __builtin_HEXAGON_F2_conv_sf2d
      {Intrinsic::hexagon_F2_conv_sf2d_chop, 16195}, // __builtin_HEXAGON_F2_conv_sf2d_chop
      {Intrinsic::hexagon_F2_conv_sf2df, 16231}, // __builtin_HEXAGON_F2_conv_sf2df
      {Intrinsic::hexagon_F2_conv_sf2ud, 16263}, // __builtin_HEXAGON_F2_conv_sf2ud
      {Intrinsic::hexagon_F2_conv_sf2ud_chop, 16295}, // __builtin_HEXAGON_F2_conv_sf2ud_chop
      {Intrinsic::hexagon_F2_conv_sf2uw, 16332}, // __builtin_HEXAGON_F2_conv_sf2uw
      {Intrinsic::hexagon_F2_conv_sf2uw_chop, 16364}, // __builtin_HEXAGON_F2_conv_sf2uw_chop
      {Intrinsic::hexagon_F2_conv_sf2w, 16401}, // __builtin_HEXAGON_F2_conv_sf2w
      {Intrinsic::hexagon_F2_conv_sf2w_chop, 16432}, // __builtin_HEXAGON_F2_conv_sf2w_chop
      {Intrinsic::hexagon_F2_conv_ud2df, 16468}, // __builtin_HEXAGON_F2_conv_ud2df
      {Intrinsic::hexagon_F2_conv_ud2sf, 16500}, // __builtin_HEXAGON_F2_conv_ud2sf
      {Intrinsic::hexagon_F2_conv_uw2df, 16532}, // __builtin_HEXAGON_F2_conv_uw2df
      {Intrinsic::hexagon_F2_conv_uw2sf, 16564}, // __builtin_HEXAGON_F2_conv_uw2sf
      {Intrinsic::hexagon_F2_conv_w2df, 16596}, // __builtin_HEXAGON_F2_conv_w2df
      {Intrinsic::hexagon_F2_conv_w2sf, 16627}, // __builtin_HEXAGON_F2_conv_w2sf
      {Intrinsic::hexagon_F2_dfadd, 16658}, // __builtin_HEXAGON_F2_dfadd
      {Intrinsic::hexagon_F2_dfclass, 16685}, // __builtin_HEXAGON_F2_dfclass
      {Intrinsic::hexagon_F2_dfcmpeq, 16714}, // __builtin_HEXAGON_F2_dfcmpeq
      {Intrinsic::hexagon_F2_dfcmpge, 16743}, // __builtin_HEXAGON_F2_dfcmpge
      {Intrinsic::hexagon_F2_dfcmpgt, 16772}, // __builtin_HEXAGON_F2_dfcmpgt
      {Intrinsic::hexagon_F2_dfcmpuo, 16801}, // __builtin_HEXAGON_F2_dfcmpuo
      {Intrinsic::hexagon_F2_dfimm_n, 16830}, // __builtin_HEXAGON_F2_dfimm_n
      {Intrinsic::hexagon_F2_dfimm_p, 16859}, // __builtin_HEXAGON_F2_dfimm_p
      {Intrinsic::hexagon_F2_dfmax, 16888}, // __builtin_HEXAGON_F2_dfmax
      {Intrinsic::hexagon_F2_dfmin, 16915}, // __builtin_HEXAGON_F2_dfmin
      {Intrinsic::hexagon_F2_dfmpyfix, 16942}, // __builtin_HEXAGON_F2_dfmpyfix
      {Intrinsic::hexagon_F2_dfmpyhh, 16972}, // __builtin_HEXAGON_F2_dfmpyhh
      {Intrinsic::hexagon_F2_dfmpylh, 17001}, // __builtin_HEXAGON_F2_dfmpylh
      {Intrinsic::hexagon_F2_dfmpyll, 17030}, // __builtin_HEXAGON_F2_dfmpyll
      {Intrinsic::hexagon_F2_dfsub, 17059}, // __builtin_HEXAGON_F2_dfsub
      {Intrinsic::hexagon_F2_sfadd, 17086}, // __builtin_HEXAGON_F2_sfadd
      {Intrinsic::hexagon_F2_sfclass, 17113}, // __builtin_HEXAGON_F2_sfclass
      {Intrinsic::hexagon_F2_sfcmpeq, 17142}, // __builtin_HEXAGON_F2_sfcmpeq
      {Intrinsic::hexagon_F2_sfcmpge, 17171}, // __builtin_HEXAGON_F2_sfcmpge
      {Intrinsic::hexagon_F2_sfcmpgt, 17200}, // __builtin_HEXAGON_F2_sfcmpgt
      {Intrinsic::hexagon_F2_sfcmpuo, 17229}, // __builtin_HEXAGON_F2_sfcmpuo
      {Intrinsic::hexagon_F2_sffixupd, 17258}, // __builtin_HEXAGON_F2_sffixupd
      {Intrinsic::hexagon_F2_sffixupn, 17288}, // __builtin_HEXAGON_F2_sffixupn
      {Intrinsic::hexagon_F2_sffixupr, 17318}, // __builtin_HEXAGON_F2_sffixupr
      {Intrinsic::hexagon_F2_sffma, 17348}, // __builtin_HEXAGON_F2_sffma
      {Intrinsic::hexagon_F2_sffma_lib, 17375}, // __builtin_HEXAGON_F2_sffma_lib
      {Intrinsic::hexagon_F2_sffma_sc, 17406}, // __builtin_HEXAGON_F2_sffma_sc
      {Intrinsic::hexagon_F2_sffms, 17436}, // __builtin_HEXAGON_F2_sffms
      {Intrinsic::hexagon_F2_sffms_lib, 17463}, // __builtin_HEXAGON_F2_sffms_lib
      {Intrinsic::hexagon_F2_sfimm_n, 17494}, // __builtin_HEXAGON_F2_sfimm_n
      {Intrinsic::hexagon_F2_sfimm_p, 17523}, // __builtin_HEXAGON_F2_sfimm_p
      {Intrinsic::hexagon_F2_sfmax, 17552}, // __builtin_HEXAGON_F2_sfmax
      {Intrinsic::hexagon_F2_sfmin, 17579}, // __builtin_HEXAGON_F2_sfmin
      {Intrinsic::hexagon_F2_sfmpy, 17606}, // __builtin_HEXAGON_F2_sfmpy
      {Intrinsic::hexagon_F2_sfsub, 17633}, // __builtin_HEXAGON_F2_sfsub
      {Intrinsic::hexagon_L2_loadw_locked, 17660}, // __builtin_HEXAGON_L2_loadw_locked
      {Intrinsic::hexagon_M2_acci, 17729}, // __builtin_HEXAGON_M2_acci
      {Intrinsic::hexagon_M2_accii, 17755}, // __builtin_HEXAGON_M2_accii
      {Intrinsic::hexagon_M2_cmaci_s0, 17782}, // __builtin_HEXAGON_M2_cmaci_s0
      {Intrinsic::hexagon_M2_cmacr_s0, 17812}, // __builtin_HEXAGON_M2_cmacr_s0
      {Intrinsic::hexagon_M2_cmacs_s0, 17842}, // __builtin_HEXAGON_M2_cmacs_s0
      {Intrinsic::hexagon_M2_cmacs_s1, 17872}, // __builtin_HEXAGON_M2_cmacs_s1
      {Intrinsic::hexagon_M2_cmacsc_s0, 17902}, // __builtin_HEXAGON_M2_cmacsc_s0
      {Intrinsic::hexagon_M2_cmacsc_s1, 17933}, // __builtin_HEXAGON_M2_cmacsc_s1
      {Intrinsic::hexagon_M2_cmpyi_s0, 17964}, // __builtin_HEXAGON_M2_cmpyi_s0
      {Intrinsic::hexagon_M2_cmpyr_s0, 17994}, // __builtin_HEXAGON_M2_cmpyr_s0
      {Intrinsic::hexagon_M2_cmpyrs_s0, 18024}, // __builtin_HEXAGON_M2_cmpyrs_s0
      {Intrinsic::hexagon_M2_cmpyrs_s1, 18055}, // __builtin_HEXAGON_M2_cmpyrs_s1
      {Intrinsic::hexagon_M2_cmpyrsc_s0, 18086}, // __builtin_HEXAGON_M2_cmpyrsc_s0
      {Intrinsic::hexagon_M2_cmpyrsc_s1, 18118}, // __builtin_HEXAGON_M2_cmpyrsc_s1
      {Intrinsic::hexagon_M2_cmpys_s0, 18150}, // __builtin_HEXAGON_M2_cmpys_s0
      {Intrinsic::hexagon_M2_cmpys_s1, 18180}, // __builtin_HEXAGON_M2_cmpys_s1
      {Intrinsic::hexagon_M2_cmpysc_s0, 18210}, // __builtin_HEXAGON_M2_cmpysc_s0
      {Intrinsic::hexagon_M2_cmpysc_s1, 18241}, // __builtin_HEXAGON_M2_cmpysc_s1
      {Intrinsic::hexagon_M2_cnacs_s0, 18272}, // __builtin_HEXAGON_M2_cnacs_s0
      {Intrinsic::hexagon_M2_cnacs_s1, 18302}, // __builtin_HEXAGON_M2_cnacs_s1
      {Intrinsic::hexagon_M2_cnacsc_s0, 18332}, // __builtin_HEXAGON_M2_cnacsc_s0
      {Intrinsic::hexagon_M2_cnacsc_s1, 18363}, // __builtin_HEXAGON_M2_cnacsc_s1
      {Intrinsic::hexagon_M2_dpmpyss_acc_s0, 18394}, // __builtin_HEXAGON_M2_dpmpyss_acc_s0
      {Intrinsic::hexagon_M2_dpmpyss_nac_s0, 18430}, // __builtin_HEXAGON_M2_dpmpyss_nac_s0
      {Intrinsic::hexagon_M2_dpmpyss_rnd_s0, 18466}, // __builtin_HEXAGON_M2_dpmpyss_rnd_s0
      {Intrinsic::hexagon_M2_dpmpyss_s0, 18502}, // __builtin_HEXAGON_M2_dpmpyss_s0
      {Intrinsic::hexagon_M2_dpmpyuu_acc_s0, 18534}, // __builtin_HEXAGON_M2_dpmpyuu_acc_s0
      {Intrinsic::hexagon_M2_dpmpyuu_nac_s0, 18570}, // __builtin_HEXAGON_M2_dpmpyuu_nac_s0
      {Intrinsic::hexagon_M2_dpmpyuu_s0, 18606}, // __builtin_HEXAGON_M2_dpmpyuu_s0
      {Intrinsic::hexagon_M2_hmmpyh_rs1, 18638}, // __builtin_HEXAGON_M2_hmmpyh_rs1
      {Intrinsic::hexagon_M2_hmmpyh_s1, 18670}, // __builtin_HEXAGON_M2_hmmpyh_s1
      {Intrinsic::hexagon_M2_hmmpyl_rs1, 18701}, // __builtin_HEXAGON_M2_hmmpyl_rs1
      {Intrinsic::hexagon_M2_hmmpyl_s1, 18733}, // __builtin_HEXAGON_M2_hmmpyl_s1
      {Intrinsic::hexagon_M2_maci, 18764}, // __builtin_HEXAGON_M2_maci
      {Intrinsic::hexagon_M2_macsin, 18790}, // __builtin_HEXAGON_M2_macsin
      {Intrinsic::hexagon_M2_macsip, 18818}, // __builtin_HEXAGON_M2_macsip
      {Intrinsic::hexagon_M2_mmachs_rs0, 18846}, // __builtin_HEXAGON_M2_mmachs_rs0
      {Intrinsic::hexagon_M2_mmachs_rs1, 18878}, // __builtin_HEXAGON_M2_mmachs_rs1
      {Intrinsic::hexagon_M2_mmachs_s0, 18910}, // __builtin_HEXAGON_M2_mmachs_s0
      {Intrinsic::hexagon_M2_mmachs_s1, 18941}, // __builtin_HEXAGON_M2_mmachs_s1
      {Intrinsic::hexagon_M2_mmacls_rs0, 18972}, // __builtin_HEXAGON_M2_mmacls_rs0
      {Intrinsic::hexagon_M2_mmacls_rs1, 19004}, // __builtin_HEXAGON_M2_mmacls_rs1
      {Intrinsic::hexagon_M2_mmacls_s0, 19036}, // __builtin_HEXAGON_M2_mmacls_s0
      {Intrinsic::hexagon_M2_mmacls_s1, 19067}, // __builtin_HEXAGON_M2_mmacls_s1
      {Intrinsic::hexagon_M2_mmacuhs_rs0, 19098}, // __builtin_HEXAGON_M2_mmacuhs_rs0
      {Intrinsic::hexagon_M2_mmacuhs_rs1, 19131}, // __builtin_HEXAGON_M2_mmacuhs_rs1
      {Intrinsic::hexagon_M2_mmacuhs_s0, 19164}, // __builtin_HEXAGON_M2_mmacuhs_s0
      {Intrinsic::hexagon_M2_mmacuhs_s1, 19196}, // __builtin_HEXAGON_M2_mmacuhs_s1
      {Intrinsic::hexagon_M2_mmaculs_rs0, 19228}, // __builtin_HEXAGON_M2_mmaculs_rs0
      {Intrinsic::hexagon_M2_mmaculs_rs1, 19261}, // __builtin_HEXAGON_M2_mmaculs_rs1
      {Intrinsic::hexagon_M2_mmaculs_s0, 19294}, // __builtin_HEXAGON_M2_mmaculs_s0
      {Intrinsic::hexagon_M2_mmaculs_s1, 19326}, // __builtin_HEXAGON_M2_mmaculs_s1
      {Intrinsic::hexagon_M2_mmpyh_rs0, 19358}, // __builtin_HEXAGON_M2_mmpyh_rs0
      {Intrinsic::hexagon_M2_mmpyh_rs1, 19389}, // __builtin_HEXAGON_M2_mmpyh_rs1
      {Intrinsic::hexagon_M2_mmpyh_s0, 19420}, // __builtin_HEXAGON_M2_mmpyh_s0
      {Intrinsic::hexagon_M2_mmpyh_s1, 19450}, // __builtin_HEXAGON_M2_mmpyh_s1
      {Intrinsic::hexagon_M2_mmpyl_rs0, 19480}, // __builtin_HEXAGON_M2_mmpyl_rs0
      {Intrinsic::hexagon_M2_mmpyl_rs1, 19511}, // __builtin_HEXAGON_M2_mmpyl_rs1
      {Intrinsic::hexagon_M2_mmpyl_s0, 19542}, // __builtin_HEXAGON_M2_mmpyl_s0
      {Intrinsic::hexagon_M2_mmpyl_s1, 19572}, // __builtin_HEXAGON_M2_mmpyl_s1
      {Intrinsic::hexagon_M2_mmpyuh_rs0, 19602}, // __builtin_HEXAGON_M2_mmpyuh_rs0
      {Intrinsic::hexagon_M2_mmpyuh_rs1, 19634}, // __builtin_HEXAGON_M2_mmpyuh_rs1
      {Intrinsic::hexagon_M2_mmpyuh_s0, 19666}, // __builtin_HEXAGON_M2_mmpyuh_s0
      {Intrinsic::hexagon_M2_mmpyuh_s1, 19697}, // __builtin_HEXAGON_M2_mmpyuh_s1
      {Intrinsic::hexagon_M2_mmpyul_rs0, 19728}, // __builtin_HEXAGON_M2_mmpyul_rs0
      {Intrinsic::hexagon_M2_mmpyul_rs1, 19760}, // __builtin_HEXAGON_M2_mmpyul_rs1
      {Intrinsic::hexagon_M2_mmpyul_s0, 19792}, // __builtin_HEXAGON_M2_mmpyul_s0
      {Intrinsic::hexagon_M2_mmpyul_s1, 19823}, // __builtin_HEXAGON_M2_mmpyul_s1
      {Intrinsic::hexagon_M2_mnaci, 19854}, // __builtin_HEXAGON_M2_mnaci
      {Intrinsic::hexagon_M2_mpy_acc_hh_s0, 19881}, // __builtin_HEXAGON_M2_mpy_acc_hh_s0
      {Intrinsic::hexagon_M2_mpy_acc_hh_s1, 19916}, // __builtin_HEXAGON_M2_mpy_acc_hh_s1
      {Intrinsic::hexagon_M2_mpy_acc_hl_s0, 19951}, // __builtin_HEXAGON_M2_mpy_acc_hl_s0
      {Intrinsic::hexagon_M2_mpy_acc_hl_s1, 19986}, // __builtin_HEXAGON_M2_mpy_acc_hl_s1
      {Intrinsic::hexagon_M2_mpy_acc_lh_s0, 20021}, // __builtin_HEXAGON_M2_mpy_acc_lh_s0
      {Intrinsic::hexagon_M2_mpy_acc_lh_s1, 20056}, // __builtin_HEXAGON_M2_mpy_acc_lh_s1
      {Intrinsic::hexagon_M2_mpy_acc_ll_s0, 20091}, // __builtin_HEXAGON_M2_mpy_acc_ll_s0
      {Intrinsic::hexagon_M2_mpy_acc_ll_s1, 20126}, // __builtin_HEXAGON_M2_mpy_acc_ll_s1
      {Intrinsic::hexagon_M2_mpy_acc_sat_hh_s0, 20161}, // __builtin_HEXAGON_M2_mpy_acc_sat_hh_s0
      {Intrinsic::hexagon_M2_mpy_acc_sat_hh_s1, 20200}, // __builtin_HEXAGON_M2_mpy_acc_sat_hh_s1
      {Intrinsic::hexagon_M2_mpy_acc_sat_hl_s0, 20239}, // __builtin_HEXAGON_M2_mpy_acc_sat_hl_s0
      {Intrinsic::hexagon_M2_mpy_acc_sat_hl_s1, 20278}, // __builtin_HEXAGON_M2_mpy_acc_sat_hl_s1
      {Intrinsic::hexagon_M2_mpy_acc_sat_lh_s0, 20317}, // __builtin_HEXAGON_M2_mpy_acc_sat_lh_s0
      {Intrinsic::hexagon_M2_mpy_acc_sat_lh_s1, 20356}, // __builtin_HEXAGON_M2_mpy_acc_sat_lh_s1
      {Intrinsic::hexagon_M2_mpy_acc_sat_ll_s0, 20395}, // __builtin_HEXAGON_M2_mpy_acc_sat_ll_s0
      {Intrinsic::hexagon_M2_mpy_acc_sat_ll_s1, 20434}, // __builtin_HEXAGON_M2_mpy_acc_sat_ll_s1
      {Intrinsic::hexagon_M2_mpy_hh_s0, 20473}, // __builtin_HEXAGON_M2_mpy_hh_s0
      {Intrinsic::hexagon_M2_mpy_hh_s1, 20504}, // __builtin_HEXAGON_M2_mpy_hh_s1
      {Intrinsic::hexagon_M2_mpy_hl_s0, 20535}, // __builtin_HEXAGON_M2_mpy_hl_s0
      {Intrinsic::hexagon_M2_mpy_hl_s1, 20566}, // __builtin_HEXAGON_M2_mpy_hl_s1
      {Intrinsic::hexagon_M2_mpy_lh_s0, 20597}, // __builtin_HEXAGON_M2_mpy_lh_s0
      {Intrinsic::hexagon_M2_mpy_lh_s1, 20628}, // __builtin_HEXAGON_M2_mpy_lh_s1
      {Intrinsic::hexagon_M2_mpy_ll_s0, 20659}, // __builtin_HEXAGON_M2_mpy_ll_s0
      {Intrinsic::hexagon_M2_mpy_ll_s1, 20690}, // __builtin_HEXAGON_M2_mpy_ll_s1
      {Intrinsic::hexagon_M2_mpy_nac_hh_s0, 20721}, // __builtin_HEXAGON_M2_mpy_nac_hh_s0
      {Intrinsic::hexagon_M2_mpy_nac_hh_s1, 20756}, // __builtin_HEXAGON_M2_mpy_nac_hh_s1
      {Intrinsic::hexagon_M2_mpy_nac_hl_s0, 20791}, // __builtin_HEXAGON_M2_mpy_nac_hl_s0
      {Intrinsic::hexagon_M2_mpy_nac_hl_s1, 20826}, // __builtin_HEXAGON_M2_mpy_nac_hl_s1
      {Intrinsic::hexagon_M2_mpy_nac_lh_s0, 20861}, // __builtin_HEXAGON_M2_mpy_nac_lh_s0
      {Intrinsic::hexagon_M2_mpy_nac_lh_s1, 20896}, // __builtin_HEXAGON_M2_mpy_nac_lh_s1
      {Intrinsic::hexagon_M2_mpy_nac_ll_s0, 20931}, // __builtin_HEXAGON_M2_mpy_nac_ll_s0
      {Intrinsic::hexagon_M2_mpy_nac_ll_s1, 20966}, // __builtin_HEXAGON_M2_mpy_nac_ll_s1
      {Intrinsic::hexagon_M2_mpy_nac_sat_hh_s0, 21001}, // __builtin_HEXAGON_M2_mpy_nac_sat_hh_s0
      {Intrinsic::hexagon_M2_mpy_nac_sat_hh_s1, 21040}, // __builtin_HEXAGON_M2_mpy_nac_sat_hh_s1
      {Intrinsic::hexagon_M2_mpy_nac_sat_hl_s0, 21079}, // __builtin_HEXAGON_M2_mpy_nac_sat_hl_s0
      {Intrinsic::hexagon_M2_mpy_nac_sat_hl_s1, 21118}, // __builtin_HEXAGON_M2_mpy_nac_sat_hl_s1
      {Intrinsic::hexagon_M2_mpy_nac_sat_lh_s0, 21157}, // __builtin_HEXAGON_M2_mpy_nac_sat_lh_s0
      {Intrinsic::hexagon_M2_mpy_nac_sat_lh_s1, 21196}, // __builtin_HEXAGON_M2_mpy_nac_sat_lh_s1
      {Intrinsic::hexagon_M2_mpy_nac_sat_ll_s0, 21235}, // __builtin_HEXAGON_M2_mpy_nac_sat_ll_s0
      {Intrinsic::hexagon_M2_mpy_nac_sat_ll_s1, 21274}, // __builtin_HEXAGON_M2_mpy_nac_sat_ll_s1
      {Intrinsic::hexagon_M2_mpy_rnd_hh_s0, 21313}, // __builtin_HEXAGON_M2_mpy_rnd_hh_s0
      {Intrinsic::hexagon_M2_mpy_rnd_hh_s1, 21348}, // __builtin_HEXAGON_M2_mpy_rnd_hh_s1
      {Intrinsic::hexagon_M2_mpy_rnd_hl_s0, 21383}, // __builtin_HEXAGON_M2_mpy_rnd_hl_s0
      {Intrinsic::hexagon_M2_mpy_rnd_hl_s1, 21418}, // __builtin_HEXAGON_M2_mpy_rnd_hl_s1
      {Intrinsic::hexagon_M2_mpy_rnd_lh_s0, 21453}, // __builtin_HEXAGON_M2_mpy_rnd_lh_s0
      {Intrinsic::hexagon_M2_mpy_rnd_lh_s1, 21488}, // __builtin_HEXAGON_M2_mpy_rnd_lh_s1
      {Intrinsic::hexagon_M2_mpy_rnd_ll_s0, 21523}, // __builtin_HEXAGON_M2_mpy_rnd_ll_s0
      {Intrinsic::hexagon_M2_mpy_rnd_ll_s1, 21558}, // __builtin_HEXAGON_M2_mpy_rnd_ll_s1
      {Intrinsic::hexagon_M2_mpy_sat_hh_s0, 21593}, // __builtin_HEXAGON_M2_mpy_sat_hh_s0
      {Intrinsic::hexagon_M2_mpy_sat_hh_s1, 21628}, // __builtin_HEXAGON_M2_mpy_sat_hh_s1
      {Intrinsic::hexagon_M2_mpy_sat_hl_s0, 21663}, // __builtin_HEXAGON_M2_mpy_sat_hl_s0
      {Intrinsic::hexagon_M2_mpy_sat_hl_s1, 21698}, // __builtin_HEXAGON_M2_mpy_sat_hl_s1
      {Intrinsic::hexagon_M2_mpy_sat_lh_s0, 21733}, // __builtin_HEXAGON_M2_mpy_sat_lh_s0
      {Intrinsic::hexagon_M2_mpy_sat_lh_s1, 21768}, // __builtin_HEXAGON_M2_mpy_sat_lh_s1
      {Intrinsic::hexagon_M2_mpy_sat_ll_s0, 21803}, // __builtin_HEXAGON_M2_mpy_sat_ll_s0
      {Intrinsic::hexagon_M2_mpy_sat_ll_s1, 21838}, // __builtin_HEXAGON_M2_mpy_sat_ll_s1
      {Intrinsic::hexagon_M2_mpy_sat_rnd_hh_s0, 21873}, // __builtin_HEXAGON_M2_mpy_sat_rnd_hh_s0
      {Intrinsic::hexagon_M2_mpy_sat_rnd_hh_s1, 21912}, // __builtin_HEXAGON_M2_mpy_sat_rnd_hh_s1
      {Intrinsic::hexagon_M2_mpy_sat_rnd_hl_s0, 21951}, // __builtin_HEXAGON_M2_mpy_sat_rnd_hl_s0
      {Intrinsic::hexagon_M2_mpy_sat_rnd_hl_s1, 21990}, // __builtin_HEXAGON_M2_mpy_sat_rnd_hl_s1
      {Intrinsic::hexagon_M2_mpy_sat_rnd_lh_s0, 22029}, // __builtin_HEXAGON_M2_mpy_sat_rnd_lh_s0
      {Intrinsic::hexagon_M2_mpy_sat_rnd_lh_s1, 22068}, // __builtin_HEXAGON_M2_mpy_sat_rnd_lh_s1
      {Intrinsic::hexagon_M2_mpy_sat_rnd_ll_s0, 22107}, // __builtin_HEXAGON_M2_mpy_sat_rnd_ll_s0
      {Intrinsic::hexagon_M2_mpy_sat_rnd_ll_s1, 22146}, // __builtin_HEXAGON_M2_mpy_sat_rnd_ll_s1
      {Intrinsic::hexagon_M2_mpy_up, 22185}, // __builtin_HEXAGON_M2_mpy_up
      {Intrinsic::hexagon_M2_mpy_up_s1, 22213}, // __builtin_HEXAGON_M2_mpy_up_s1
      {Intrinsic::hexagon_M2_mpy_up_s1_sat, 22244}, // __builtin_HEXAGON_M2_mpy_up_s1_sat
      {Intrinsic::hexagon_M2_mpyd_acc_hh_s0, 22279}, // __builtin_HEXAGON_M2_mpyd_acc_hh_s0
      {Intrinsic::hexagon_M2_mpyd_acc_hh_s1, 22315}, // __builtin_HEXAGON_M2_mpyd_acc_hh_s1
      {Intrinsic::hexagon_M2_mpyd_acc_hl_s0, 22351}, // __builtin_HEXAGON_M2_mpyd_acc_hl_s0
      {Intrinsic::hexagon_M2_mpyd_acc_hl_s1, 22387}, // __builtin_HEXAGON_M2_mpyd_acc_hl_s1
      {Intrinsic::hexagon_M2_mpyd_acc_lh_s0, 22423}, // __builtin_HEXAGON_M2_mpyd_acc_lh_s0
      {Intrinsic::hexagon_M2_mpyd_acc_lh_s1, 22459}, // __builtin_HEXAGON_M2_mpyd_acc_lh_s1
      {Intrinsic::hexagon_M2_mpyd_acc_ll_s0, 22495}, // __builtin_HEXAGON_M2_mpyd_acc_ll_s0
      {Intrinsic::hexagon_M2_mpyd_acc_ll_s1, 22531}, // __builtin_HEXAGON_M2_mpyd_acc_ll_s1
      {Intrinsic::hexagon_M2_mpyd_hh_s0, 22567}, // __builtin_HEXAGON_M2_mpyd_hh_s0
      {Intrinsic::hexagon_M2_mpyd_hh_s1, 22599}, // __builtin_HEXAGON_M2_mpyd_hh_s1
      {Intrinsic::hexagon_M2_mpyd_hl_s0, 22631}, // __builtin_HEXAGON_M2_mpyd_hl_s0
      {Intrinsic::hexagon_M2_mpyd_hl_s1, 22663}, // __builtin_HEXAGON_M2_mpyd_hl_s1
      {Intrinsic::hexagon_M2_mpyd_lh_s0, 22695}, // __builtin_HEXAGON_M2_mpyd_lh_s0
      {Intrinsic::hexagon_M2_mpyd_lh_s1, 22727}, // __builtin_HEXAGON_M2_mpyd_lh_s1
      {Intrinsic::hexagon_M2_mpyd_ll_s0, 22759}, // __builtin_HEXAGON_M2_mpyd_ll_s0
      {Intrinsic::hexagon_M2_mpyd_ll_s1, 22791}, // __builtin_HEXAGON_M2_mpyd_ll_s1
      {Intrinsic::hexagon_M2_mpyd_nac_hh_s0, 22823}, // __builtin_HEXAGON_M2_mpyd_nac_hh_s0
      {Intrinsic::hexagon_M2_mpyd_nac_hh_s1, 22859}, // __builtin_HEXAGON_M2_mpyd_nac_hh_s1
      {Intrinsic::hexagon_M2_mpyd_nac_hl_s0, 22895}, // __builtin_HEXAGON_M2_mpyd_nac_hl_s0
      {Intrinsic::hexagon_M2_mpyd_nac_hl_s1, 22931}, // __builtin_HEXAGON_M2_mpyd_nac_hl_s1
      {Intrinsic::hexagon_M2_mpyd_nac_lh_s0, 22967}, // __builtin_HEXAGON_M2_mpyd_nac_lh_s0
      {Intrinsic::hexagon_M2_mpyd_nac_lh_s1, 23003}, // __builtin_HEXAGON_M2_mpyd_nac_lh_s1
      {Intrinsic::hexagon_M2_mpyd_nac_ll_s0, 23039}, // __builtin_HEXAGON_M2_mpyd_nac_ll_s0
      {Intrinsic::hexagon_M2_mpyd_nac_ll_s1, 23075}, // __builtin_HEXAGON_M2_mpyd_nac_ll_s1
      {Intrinsic::hexagon_M2_mpyd_rnd_hh_s0, 23111}, // __builtin_HEXAGON_M2_mpyd_rnd_hh_s0
      {Intrinsic::hexagon_M2_mpyd_rnd_hh_s1, 23147}, // __builtin_HEXAGON_M2_mpyd_rnd_hh_s1
      {Intrinsic::hexagon_M2_mpyd_rnd_hl_s0, 23183}, // __builtin_HEXAGON_M2_mpyd_rnd_hl_s0
      {Intrinsic::hexagon_M2_mpyd_rnd_hl_s1, 23219}, // __builtin_HEXAGON_M2_mpyd_rnd_hl_s1
      {Intrinsic::hexagon_M2_mpyd_rnd_lh_s0, 23255}, // __builtin_HEXAGON_M2_mpyd_rnd_lh_s0
      {Intrinsic::hexagon_M2_mpyd_rnd_lh_s1, 23291}, // __builtin_HEXAGON_M2_mpyd_rnd_lh_s1
      {Intrinsic::hexagon_M2_mpyd_rnd_ll_s0, 23327}, // __builtin_HEXAGON_M2_mpyd_rnd_ll_s0
      {Intrinsic::hexagon_M2_mpyd_rnd_ll_s1, 23363}, // __builtin_HEXAGON_M2_mpyd_rnd_ll_s1
      {Intrinsic::hexagon_M2_mpyi, 23399}, // __builtin_HEXAGON_M2_mpyi
      {Intrinsic::hexagon_M2_mpysmi, 23425}, // __builtin_HEXAGON_M2_mpysmi
      {Intrinsic::hexagon_M2_mpysu_up, 23453}, // __builtin_HEXAGON_M2_mpysu_up
      {Intrinsic::hexagon_M2_mpyu_acc_hh_s0, 23483}, // __builtin_HEXAGON_M2_mpyu_acc_hh_s0
      {Intrinsic::hexagon_M2_mpyu_acc_hh_s1, 23519}, // __builtin_HEXAGON_M2_mpyu_acc_hh_s1
      {Intrinsic::hexagon_M2_mpyu_acc_hl_s0, 23555}, // __builtin_HEXAGON_M2_mpyu_acc_hl_s0
      {Intrinsic::hexagon_M2_mpyu_acc_hl_s1, 23591}, // __builtin_HEXAGON_M2_mpyu_acc_hl_s1
      {Intrinsic::hexagon_M2_mpyu_acc_lh_s0, 23627}, // __builtin_HEXAGON_M2_mpyu_acc_lh_s0
      {Intrinsic::hexagon_M2_mpyu_acc_lh_s1, 23663}, // __builtin_HEXAGON_M2_mpyu_acc_lh_s1
      {Intrinsic::hexagon_M2_mpyu_acc_ll_s0, 23699}, // __builtin_HEXAGON_M2_mpyu_acc_ll_s0
      {Intrinsic::hexagon_M2_mpyu_acc_ll_s1, 23735}, // __builtin_HEXAGON_M2_mpyu_acc_ll_s1
      {Intrinsic::hexagon_M2_mpyu_hh_s0, 23771}, // __builtin_HEXAGON_M2_mpyu_hh_s0
      {Intrinsic::hexagon_M2_mpyu_hh_s1, 23803}, // __builtin_HEXAGON_M2_mpyu_hh_s1
      {Intrinsic::hexagon_M2_mpyu_hl_s0, 23835}, // __builtin_HEXAGON_M2_mpyu_hl_s0
      {Intrinsic::hexagon_M2_mpyu_hl_s1, 23867}, // __builtin_HEXAGON_M2_mpyu_hl_s1
      {Intrinsic::hexagon_M2_mpyu_lh_s0, 23899}, // __builtin_HEXAGON_M2_mpyu_lh_s0
      {Intrinsic::hexagon_M2_mpyu_lh_s1, 23931}, // __builtin_HEXAGON_M2_mpyu_lh_s1
      {Intrinsic::hexagon_M2_mpyu_ll_s0, 23963}, // __builtin_HEXAGON_M2_mpyu_ll_s0
      {Intrinsic::hexagon_M2_mpyu_ll_s1, 23995}, // __builtin_HEXAGON_M2_mpyu_ll_s1
      {Intrinsic::hexagon_M2_mpyu_nac_hh_s0, 24027}, // __builtin_HEXAGON_M2_mpyu_nac_hh_s0
      {Intrinsic::hexagon_M2_mpyu_nac_hh_s1, 24063}, // __builtin_HEXAGON_M2_mpyu_nac_hh_s1
      {Intrinsic::hexagon_M2_mpyu_nac_hl_s0, 24099}, // __builtin_HEXAGON_M2_mpyu_nac_hl_s0
      {Intrinsic::hexagon_M2_mpyu_nac_hl_s1, 24135}, // __builtin_HEXAGON_M2_mpyu_nac_hl_s1
      {Intrinsic::hexagon_M2_mpyu_nac_lh_s0, 24171}, // __builtin_HEXAGON_M2_mpyu_nac_lh_s0
      {Intrinsic::hexagon_M2_mpyu_nac_lh_s1, 24207}, // __builtin_HEXAGON_M2_mpyu_nac_lh_s1
      {Intrinsic::hexagon_M2_mpyu_nac_ll_s0, 24243}, // __builtin_HEXAGON_M2_mpyu_nac_ll_s0
      {Intrinsic::hexagon_M2_mpyu_nac_ll_s1, 24279}, // __builtin_HEXAGON_M2_mpyu_nac_ll_s1
      {Intrinsic::hexagon_M2_mpyu_up, 24315}, // __builtin_HEXAGON_M2_mpyu_up
      {Intrinsic::hexagon_M2_mpyud_acc_hh_s0, 24344}, // __builtin_HEXAGON_M2_mpyud_acc_hh_s0
      {Intrinsic::hexagon_M2_mpyud_acc_hh_s1, 24381}, // __builtin_HEXAGON_M2_mpyud_acc_hh_s1
      {Intrinsic::hexagon_M2_mpyud_acc_hl_s0, 24418}, // __builtin_HEXAGON_M2_mpyud_acc_hl_s0
      {Intrinsic::hexagon_M2_mpyud_acc_hl_s1, 24455}, // __builtin_HEXAGON_M2_mpyud_acc_hl_s1
      {Intrinsic::hexagon_M2_mpyud_acc_lh_s0, 24492}, // __builtin_HEXAGON_M2_mpyud_acc_lh_s0
      {Intrinsic::hexagon_M2_mpyud_acc_lh_s1, 24529}, // __builtin_HEXAGON_M2_mpyud_acc_lh_s1
      {Intrinsic::hexagon_M2_mpyud_acc_ll_s0, 24566}, // __builtin_HEXAGON_M2_mpyud_acc_ll_s0
      {Intrinsic::hexagon_M2_mpyud_acc_ll_s1, 24603}, // __builtin_HEXAGON_M2_mpyud_acc_ll_s1
      {Intrinsic::hexagon_M2_mpyud_hh_s0, 24640}, // __builtin_HEXAGON_M2_mpyud_hh_s0
      {Intrinsic::hexagon_M2_mpyud_hh_s1, 24673}, // __builtin_HEXAGON_M2_mpyud_hh_s1
      {Intrinsic::hexagon_M2_mpyud_hl_s0, 24706}, // __builtin_HEXAGON_M2_mpyud_hl_s0
      {Intrinsic::hexagon_M2_mpyud_hl_s1, 24739}, // __builtin_HEXAGON_M2_mpyud_hl_s1
      {Intrinsic::hexagon_M2_mpyud_lh_s0, 24772}, // __builtin_HEXAGON_M2_mpyud_lh_s0
      {Intrinsic::hexagon_M2_mpyud_lh_s1, 24805}, // __builtin_HEXAGON_M2_mpyud_lh_s1
      {Intrinsic::hexagon_M2_mpyud_ll_s0, 24838}, // __builtin_HEXAGON_M2_mpyud_ll_s0
      {Intrinsic::hexagon_M2_mpyud_ll_s1, 24871}, // __builtin_HEXAGON_M2_mpyud_ll_s1
      {Intrinsic::hexagon_M2_mpyud_nac_hh_s0, 24904}, // __builtin_HEXAGON_M2_mpyud_nac_hh_s0
      {Intrinsic::hexagon_M2_mpyud_nac_hh_s1, 24941}, // __builtin_HEXAGON_M2_mpyud_nac_hh_s1
      {Intrinsic::hexagon_M2_mpyud_nac_hl_s0, 24978}, // __builtin_HEXAGON_M2_mpyud_nac_hl_s0
      {Intrinsic::hexagon_M2_mpyud_nac_hl_s1, 25015}, // __builtin_HEXAGON_M2_mpyud_nac_hl_s1
      {Intrinsic::hexagon_M2_mpyud_nac_lh_s0, 25052}, // __builtin_HEXAGON_M2_mpyud_nac_lh_s0
      {Intrinsic::hexagon_M2_mpyud_nac_lh_s1, 25089}, // __builtin_HEXAGON_M2_mpyud_nac_lh_s1
      {Intrinsic::hexagon_M2_mpyud_nac_ll_s0, 25126}, // __builtin_HEXAGON_M2_mpyud_nac_ll_s0
      {Intrinsic::hexagon_M2_mpyud_nac_ll_s1, 25163}, // __builtin_HEXAGON_M2_mpyud_nac_ll_s1
      {Intrinsic::hexagon_M2_mpyui, 25200}, // __builtin_HEXAGON_M2_mpyui
      {Intrinsic::hexagon_M2_nacci, 25227}, // __builtin_HEXAGON_M2_nacci
      {Intrinsic::hexagon_M2_naccii, 25254}, // __builtin_HEXAGON_M2_naccii
      {Intrinsic::hexagon_M2_subacc, 25282}, // __builtin_HEXAGON_M2_subacc
      {Intrinsic::hexagon_M2_vabsdiffh, 25310}, // __builtin_HEXAGON_M2_vabsdiffh
      {Intrinsic::hexagon_M2_vabsdiffw, 25341}, // __builtin_HEXAGON_M2_vabsdiffw
      {Intrinsic::hexagon_M2_vcmac_s0_sat_i, 25372}, // __builtin_HEXAGON_M2_vcmac_s0_sat_i
      {Intrinsic::hexagon_M2_vcmac_s0_sat_r, 25408}, // __builtin_HEXAGON_M2_vcmac_s0_sat_r
      {Intrinsic::hexagon_M2_vcmpy_s0_sat_i, 25444}, // __builtin_HEXAGON_M2_vcmpy_s0_sat_i
      {Intrinsic::hexagon_M2_vcmpy_s0_sat_r, 25480}, // __builtin_HEXAGON_M2_vcmpy_s0_sat_r
      {Intrinsic::hexagon_M2_vcmpy_s1_sat_i, 25516}, // __builtin_HEXAGON_M2_vcmpy_s1_sat_i
      {Intrinsic::hexagon_M2_vcmpy_s1_sat_r, 25552}, // __builtin_HEXAGON_M2_vcmpy_s1_sat_r
      {Intrinsic::hexagon_M2_vdmacs_s0, 25588}, // __builtin_HEXAGON_M2_vdmacs_s0
      {Intrinsic::hexagon_M2_vdmacs_s1, 25619}, // __builtin_HEXAGON_M2_vdmacs_s1
      {Intrinsic::hexagon_M2_vdmpyrs_s0, 25650}, // __builtin_HEXAGON_M2_vdmpyrs_s0
      {Intrinsic::hexagon_M2_vdmpyrs_s1, 25682}, // __builtin_HEXAGON_M2_vdmpyrs_s1
      {Intrinsic::hexagon_M2_vdmpys_s0, 25714}, // __builtin_HEXAGON_M2_vdmpys_s0
      {Intrinsic::hexagon_M2_vdmpys_s1, 25745}, // __builtin_HEXAGON_M2_vdmpys_s1
      {Intrinsic::hexagon_M2_vmac2, 25776}, // __builtin_HEXAGON_M2_vmac2
      {Intrinsic::hexagon_M2_vmac2es, 25803}, // __builtin_HEXAGON_M2_vmac2es
      {Intrinsic::hexagon_M2_vmac2es_s0, 25832}, // __builtin_HEXAGON_M2_vmac2es_s0
      {Intrinsic::hexagon_M2_vmac2es_s1, 25864}, // __builtin_HEXAGON_M2_vmac2es_s1
      {Intrinsic::hexagon_M2_vmac2s_s0, 25896}, // __builtin_HEXAGON_M2_vmac2s_s0
      {Intrinsic::hexagon_M2_vmac2s_s1, 25927}, // __builtin_HEXAGON_M2_vmac2s_s1
      {Intrinsic::hexagon_M2_vmac2su_s0, 25958}, // __builtin_HEXAGON_M2_vmac2su_s0
      {Intrinsic::hexagon_M2_vmac2su_s1, 25990}, // __builtin_HEXAGON_M2_vmac2su_s1
      {Intrinsic::hexagon_M2_vmpy2es_s0, 26022}, // __builtin_HEXAGON_M2_vmpy2es_s0
      {Intrinsic::hexagon_M2_vmpy2es_s1, 26054}, // __builtin_HEXAGON_M2_vmpy2es_s1
      {Intrinsic::hexagon_M2_vmpy2s_s0, 26086}, // __builtin_HEXAGON_M2_vmpy2s_s0
      {Intrinsic::hexagon_M2_vmpy2s_s0pack, 26117}, // __builtin_HEXAGON_M2_vmpy2s_s0pack
      {Intrinsic::hexagon_M2_vmpy2s_s1, 26152}, // __builtin_HEXAGON_M2_vmpy2s_s1
      {Intrinsic::hexagon_M2_vmpy2s_s1pack, 26183}, // __builtin_HEXAGON_M2_vmpy2s_s1pack
      {Intrinsic::hexagon_M2_vmpy2su_s0, 26218}, // __builtin_HEXAGON_M2_vmpy2su_s0
      {Intrinsic::hexagon_M2_vmpy2su_s1, 26250}, // __builtin_HEXAGON_M2_vmpy2su_s1
      {Intrinsic::hexagon_M2_vraddh, 26282}, // __builtin_HEXAGON_M2_vraddh
      {Intrinsic::hexagon_M2_vradduh, 26310}, // __builtin_HEXAGON_M2_vradduh
      {Intrinsic::hexagon_M2_vrcmaci_s0, 26339}, // __builtin_HEXAGON_M2_vrcmaci_s0
      {Intrinsic::hexagon_M2_vrcmaci_s0c, 26371}, // __builtin_HEXAGON_M2_vrcmaci_s0c
      {Intrinsic::hexagon_M2_vrcmacr_s0, 26404}, // __builtin_HEXAGON_M2_vrcmacr_s0
      {Intrinsic::hexagon_M2_vrcmacr_s0c, 26436}, // __builtin_HEXAGON_M2_vrcmacr_s0c
      {Intrinsic::hexagon_M2_vrcmpyi_s0, 26469}, // __builtin_HEXAGON_M2_vrcmpyi_s0
      {Intrinsic::hexagon_M2_vrcmpyi_s0c, 26501}, // __builtin_HEXAGON_M2_vrcmpyi_s0c
      {Intrinsic::hexagon_M2_vrcmpyr_s0, 26534}, // __builtin_HEXAGON_M2_vrcmpyr_s0
      {Intrinsic::hexagon_M2_vrcmpyr_s0c, 26566}, // __builtin_HEXAGON_M2_vrcmpyr_s0c
      {Intrinsic::hexagon_M2_vrcmpys_acc_s1, 26599}, // __builtin_HEXAGON_M2_vrcmpys_acc_s1
      {Intrinsic::hexagon_M2_vrcmpys_s1, 26635}, // __builtin_HEXAGON_M2_vrcmpys_s1
      {Intrinsic::hexagon_M2_vrcmpys_s1rp, 26667}, // __builtin_HEXAGON_M2_vrcmpys_s1rp
      {Intrinsic::hexagon_M2_vrmac_s0, 26701}, // __builtin_HEXAGON_M2_vrmac_s0
      {Intrinsic::hexagon_M2_vrmpy_s0, 26731}, // __builtin_HEXAGON_M2_vrmpy_s0
      {Intrinsic::hexagon_M2_xor_xacc, 26761}, // __builtin_HEXAGON_M2_xor_xacc
      {Intrinsic::hexagon_M4_and_and, 26791}, // __builtin_HEXAGON_M4_and_and
      {Intrinsic::hexagon_M4_and_andn, 26820}, // __builtin_HEXAGON_M4_and_andn
      {Intrinsic::hexagon_M4_and_or, 26850}, // __builtin_HEXAGON_M4_and_or
      {Intrinsic::hexagon_M4_and_xor, 26878}, // __builtin_HEXAGON_M4_and_xor
      {Intrinsic::hexagon_M4_cmpyi_wh, 26907}, // __builtin_HEXAGON_M4_cmpyi_wh
      {Intrinsic::hexagon_M4_cmpyi_whc, 26937}, // __builtin_HEXAGON_M4_cmpyi_whc
      {Intrinsic::hexagon_M4_cmpyr_wh, 26968}, // __builtin_HEXAGON_M4_cmpyr_wh
      {Intrinsic::hexagon_M4_cmpyr_whc, 26998}, // __builtin_HEXAGON_M4_cmpyr_whc
      {Intrinsic::hexagon_M4_mac_up_s1_sat, 27029}, // __builtin_HEXAGON_M4_mac_up_s1_sat
      {Intrinsic::hexagon_M4_mpyri_addi, 27064}, // __builtin_HEXAGON_M4_mpyri_addi
      {Intrinsic::hexagon_M4_mpyri_addr, 27096}, // __builtin_HEXAGON_M4_mpyri_addr
      {Intrinsic::hexagon_M4_mpyri_addr_u2, 27128}, // __builtin_HEXAGON_M4_mpyri_addr_u2
      {Intrinsic::hexagon_M4_mpyrr_addi, 27163}, // __builtin_HEXAGON_M4_mpyrr_addi
      {Intrinsic::hexagon_M4_mpyrr_addr, 27195}, // __builtin_HEXAGON_M4_mpyrr_addr
      {Intrinsic::hexagon_M4_nac_up_s1_sat, 27227}, // __builtin_HEXAGON_M4_nac_up_s1_sat
      {Intrinsic::hexagon_M4_or_and, 27262}, // __builtin_HEXAGON_M4_or_and
      {Intrinsic::hexagon_M4_or_andn, 27290}, // __builtin_HEXAGON_M4_or_andn
      {Intrinsic::hexagon_M4_or_or, 27319}, // __builtin_HEXAGON_M4_or_or
      {Intrinsic::hexagon_M4_or_xor, 27346}, // __builtin_HEXAGON_M4_or_xor
      {Intrinsic::hexagon_M4_pmpyw, 27374}, // __builtin_HEXAGON_M4_pmpyw
      {Intrinsic::hexagon_M4_pmpyw_acc, 27401}, // __builtin_HEXAGON_M4_pmpyw_acc
      {Intrinsic::hexagon_M4_vpmpyh, 27432}, // __builtin_HEXAGON_M4_vpmpyh
      {Intrinsic::hexagon_M4_vpmpyh_acc, 27460}, // __builtin_HEXAGON_M4_vpmpyh_acc
      {Intrinsic::hexagon_M4_vrmpyeh_acc_s0, 27492}, // __builtin_HEXAGON_M4_vrmpyeh_acc_s0
      {Intrinsic::hexagon_M4_vrmpyeh_acc_s1, 27528}, // __builtin_HEXAGON_M4_vrmpyeh_acc_s1
      {Intrinsic::hexagon_M4_vrmpyeh_s0, 27564}, // __builtin_HEXAGON_M4_vrmpyeh_s0
      {Intrinsic::hexagon_M4_vrmpyeh_s1, 27596}, // __builtin_HEXAGON_M4_vrmpyeh_s1
      {Intrinsic::hexagon_M4_vrmpyoh_acc_s0, 27628}, // __builtin_HEXAGON_M4_vrmpyoh_acc_s0
      {Intrinsic::hexagon_M4_vrmpyoh_acc_s1, 27664}, // __builtin_HEXAGON_M4_vrmpyoh_acc_s1
      {Intrinsic::hexagon_M4_vrmpyoh_s0, 27700}, // __builtin_HEXAGON_M4_vrmpyoh_s0
      {Intrinsic::hexagon_M4_vrmpyoh_s1, 27732}, // __builtin_HEXAGON_M4_vrmpyoh_s1
      {Intrinsic::hexagon_M4_xor_and, 27764}, // __builtin_HEXAGON_M4_xor_and
      {Intrinsic::hexagon_M4_xor_andn, 27793}, // __builtin_HEXAGON_M4_xor_andn
      {Intrinsic::hexagon_M4_xor_or, 27823}, // __builtin_HEXAGON_M4_xor_or
      {Intrinsic::hexagon_M4_xor_xacc, 27851}, // __builtin_HEXAGON_M4_xor_xacc
      {Intrinsic::hexagon_M5_vdmacbsu, 27881}, // __builtin_HEXAGON_M5_vdmacbsu
      {Intrinsic::hexagon_M5_vdmpybsu, 27911}, // __builtin_HEXAGON_M5_vdmpybsu
      {Intrinsic::hexagon_M5_vmacbsu, 27941}, // __builtin_HEXAGON_M5_vmacbsu
      {Intrinsic::hexagon_M5_vmacbuu, 27970}, // __builtin_HEXAGON_M5_vmacbuu
      {Intrinsic::hexagon_M5_vmpybsu, 27999}, // __builtin_HEXAGON_M5_vmpybsu
      {Intrinsic::hexagon_M5_vmpybuu, 28028}, // __builtin_HEXAGON_M5_vmpybuu
      {Intrinsic::hexagon_M5_vrmacbsu, 28057}, // __builtin_HEXAGON_M5_vrmacbsu
      {Intrinsic::hexagon_M5_vrmacbuu, 28087}, // __builtin_HEXAGON_M5_vrmacbuu
      {Intrinsic::hexagon_M5_vrmpybsu, 28117}, // __builtin_HEXAGON_M5_vrmpybsu
      {Intrinsic::hexagon_M5_vrmpybuu, 28147}, // __builtin_HEXAGON_M5_vrmpybuu
      {Intrinsic::hexagon_M6_vabsdiffb, 28177}, // __builtin_HEXAGON_M6_vabsdiffb
      {Intrinsic::hexagon_M6_vabsdiffub, 28208}, // __builtin_HEXAGON_M6_vabsdiffub
      {Intrinsic::hexagon_M7_dcmpyiw, 28240}, // __builtin_HEXAGON_M7_dcmpyiw
      {Intrinsic::hexagon_M7_dcmpyiw_acc, 28269}, // __builtin_HEXAGON_M7_dcmpyiw_acc
      {Intrinsic::hexagon_M7_dcmpyiwc, 28302}, // __builtin_HEXAGON_M7_dcmpyiwc
      {Intrinsic::hexagon_M7_dcmpyiwc_acc, 28332}, // __builtin_HEXAGON_M7_dcmpyiwc_acc
      {Intrinsic::hexagon_M7_dcmpyrw, 28366}, // __builtin_HEXAGON_M7_dcmpyrw
      {Intrinsic::hexagon_M7_dcmpyrw_acc, 28395}, // __builtin_HEXAGON_M7_dcmpyrw_acc
      {Intrinsic::hexagon_M7_dcmpyrwc, 28428}, // __builtin_HEXAGON_M7_dcmpyrwc
      {Intrinsic::hexagon_M7_dcmpyrwc_acc, 28458}, // __builtin_HEXAGON_M7_dcmpyrwc_acc
      {Intrinsic::hexagon_M7_vdmpy, 28492}, // __builtin_HEXAGON_M7_vdmpy
      {Intrinsic::hexagon_M7_vdmpy_acc, 28519}, // __builtin_HEXAGON_M7_vdmpy_acc
      {Intrinsic::hexagon_M7_wcmpyiw, 28550}, // __builtin_HEXAGON_M7_wcmpyiw
      {Intrinsic::hexagon_M7_wcmpyiw_rnd, 28579}, // __builtin_HEXAGON_M7_wcmpyiw_rnd
      {Intrinsic::hexagon_M7_wcmpyiwc, 28612}, // __builtin_HEXAGON_M7_wcmpyiwc
      {Intrinsic::hexagon_M7_wcmpyiwc_rnd, 28642}, // __builtin_HEXAGON_M7_wcmpyiwc_rnd
      {Intrinsic::hexagon_M7_wcmpyrw, 28676}, // __builtin_HEXAGON_M7_wcmpyrw
      {Intrinsic::hexagon_M7_wcmpyrw_rnd, 28705}, // __builtin_HEXAGON_M7_wcmpyrw_rnd
      {Intrinsic::hexagon_M7_wcmpyrwc, 28738}, // __builtin_HEXAGON_M7_wcmpyrwc
      {Intrinsic::hexagon_M7_wcmpyrwc_rnd, 28768}, // __builtin_HEXAGON_M7_wcmpyrwc_rnd
      {Intrinsic::hexagon_S2_addasl_rrri, 28802}, // __builtin_HEXAGON_S2_addasl_rrri
      {Intrinsic::hexagon_S2_asl_i_p, 28835}, // __builtin_HEXAGON_S2_asl_i_p
      {Intrinsic::hexagon_S2_asl_i_p_acc, 28864}, // __builtin_HEXAGON_S2_asl_i_p_acc
      {Intrinsic::hexagon_S2_asl_i_p_and, 28897}, // __builtin_HEXAGON_S2_asl_i_p_and
      {Intrinsic::hexagon_S2_asl_i_p_nac, 28930}, // __builtin_HEXAGON_S2_asl_i_p_nac
      {Intrinsic::hexagon_S2_asl_i_p_or, 28963}, // __builtin_HEXAGON_S2_asl_i_p_or
      {Intrinsic::hexagon_S2_asl_i_p_xacc, 28995}, // __builtin_HEXAGON_S2_asl_i_p_xacc
      {Intrinsic::hexagon_S2_asl_i_r, 29029}, // __builtin_HEXAGON_S2_asl_i_r
      {Intrinsic::hexagon_S2_asl_i_r_acc, 29058}, // __builtin_HEXAGON_S2_asl_i_r_acc
      {Intrinsic::hexagon_S2_asl_i_r_and, 29091}, // __builtin_HEXAGON_S2_asl_i_r_and
      {Intrinsic::hexagon_S2_asl_i_r_nac, 29124}, // __builtin_HEXAGON_S2_asl_i_r_nac
      {Intrinsic::hexagon_S2_asl_i_r_or, 29157}, // __builtin_HEXAGON_S2_asl_i_r_or
      {Intrinsic::hexagon_S2_asl_i_r_sat, 29189}, // __builtin_HEXAGON_S2_asl_i_r_sat
      {Intrinsic::hexagon_S2_asl_i_r_xacc, 29222}, // __builtin_HEXAGON_S2_asl_i_r_xacc
      {Intrinsic::hexagon_S2_asl_i_vh, 29256}, // __builtin_HEXAGON_S2_asl_i_vh
      {Intrinsic::hexagon_S2_asl_i_vw, 29286}, // __builtin_HEXAGON_S2_asl_i_vw
      {Intrinsic::hexagon_S2_asl_r_p, 29316}, // __builtin_HEXAGON_S2_asl_r_p
      {Intrinsic::hexagon_S2_asl_r_p_acc, 29345}, // __builtin_HEXAGON_S2_asl_r_p_acc
      {Intrinsic::hexagon_S2_asl_r_p_and, 29378}, // __builtin_HEXAGON_S2_asl_r_p_and
      {Intrinsic::hexagon_S2_asl_r_p_nac, 29411}, // __builtin_HEXAGON_S2_asl_r_p_nac
      {Intrinsic::hexagon_S2_asl_r_p_or, 29444}, // __builtin_HEXAGON_S2_asl_r_p_or
      {Intrinsic::hexagon_S2_asl_r_p_xor, 29476}, // __builtin_HEXAGON_S2_asl_r_p_xor
      {Intrinsic::hexagon_S2_asl_r_r, 29509}, // __builtin_HEXAGON_S2_asl_r_r
      {Intrinsic::hexagon_S2_asl_r_r_acc, 29538}, // __builtin_HEXAGON_S2_asl_r_r_acc
      {Intrinsic::hexagon_S2_asl_r_r_and, 29571}, // __builtin_HEXAGON_S2_asl_r_r_and
      {Intrinsic::hexagon_S2_asl_r_r_nac, 29604}, // __builtin_HEXAGON_S2_asl_r_r_nac
      {Intrinsic::hexagon_S2_asl_r_r_or, 29637}, // __builtin_HEXAGON_S2_asl_r_r_or
      {Intrinsic::hexagon_S2_asl_r_r_sat, 29669}, // __builtin_HEXAGON_S2_asl_r_r_sat
      {Intrinsic::hexagon_S2_asl_r_vh, 29702}, // __builtin_HEXAGON_S2_asl_r_vh
      {Intrinsic::hexagon_S2_asl_r_vw, 29732}, // __builtin_HEXAGON_S2_asl_r_vw
      {Intrinsic::hexagon_S2_asr_i_p, 29762}, // __builtin_HEXAGON_S2_asr_i_p
      {Intrinsic::hexagon_S2_asr_i_p_acc, 29791}, // __builtin_HEXAGON_S2_asr_i_p_acc
      {Intrinsic::hexagon_S2_asr_i_p_and, 29824}, // __builtin_HEXAGON_S2_asr_i_p_and
      {Intrinsic::hexagon_S2_asr_i_p_nac, 29857}, // __builtin_HEXAGON_S2_asr_i_p_nac
      {Intrinsic::hexagon_S2_asr_i_p_or, 29890}, // __builtin_HEXAGON_S2_asr_i_p_or
      {Intrinsic::hexagon_S2_asr_i_p_rnd, 29922}, // __builtin_HEXAGON_S2_asr_i_p_rnd
      {Intrinsic::hexagon_S2_asr_i_p_rnd_goodsyntax, 29955}, // __builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax
      {Intrinsic::hexagon_S2_asr_i_r, 29999}, // __builtin_HEXAGON_S2_asr_i_r
      {Intrinsic::hexagon_S2_asr_i_r_acc, 30028}, // __builtin_HEXAGON_S2_asr_i_r_acc
      {Intrinsic::hexagon_S2_asr_i_r_and, 30061}, // __builtin_HEXAGON_S2_asr_i_r_and
      {Intrinsic::hexagon_S2_asr_i_r_nac, 30094}, // __builtin_HEXAGON_S2_asr_i_r_nac
      {Intrinsic::hexagon_S2_asr_i_r_or, 30127}, // __builtin_HEXAGON_S2_asr_i_r_or
      {Intrinsic::hexagon_S2_asr_i_r_rnd, 30159}, // __builtin_HEXAGON_S2_asr_i_r_rnd
      {Intrinsic::hexagon_S2_asr_i_r_rnd_goodsyntax, 30192}, // __builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax
      {Intrinsic::hexagon_S2_asr_i_svw_trun, 30236}, // __builtin_HEXAGON_S2_asr_i_svw_trun
      {Intrinsic::hexagon_S2_asr_i_vh, 30272}, // __builtin_HEXAGON_S2_asr_i_vh
      {Intrinsic::hexagon_S2_asr_i_vw, 30302}, // __builtin_HEXAGON_S2_asr_i_vw
      {Intrinsic::hexagon_S2_asr_r_p, 30332}, // __builtin_HEXAGON_S2_asr_r_p
      {Intrinsic::hexagon_S2_asr_r_p_acc, 30361}, // __builtin_HEXAGON_S2_asr_r_p_acc
      {Intrinsic::hexagon_S2_asr_r_p_and, 30394}, // __builtin_HEXAGON_S2_asr_r_p_and
      {Intrinsic::hexagon_S2_asr_r_p_nac, 30427}, // __builtin_HEXAGON_S2_asr_r_p_nac
      {Intrinsic::hexagon_S2_asr_r_p_or, 30460}, // __builtin_HEXAGON_S2_asr_r_p_or
      {Intrinsic::hexagon_S2_asr_r_p_xor, 30492}, // __builtin_HEXAGON_S2_asr_r_p_xor
      {Intrinsic::hexagon_S2_asr_r_r, 30525}, // __builtin_HEXAGON_S2_asr_r_r
      {Intrinsic::hexagon_S2_asr_r_r_acc, 30554}, // __builtin_HEXAGON_S2_asr_r_r_acc
      {Intrinsic::hexagon_S2_asr_r_r_and, 30587}, // __builtin_HEXAGON_S2_asr_r_r_and
      {Intrinsic::hexagon_S2_asr_r_r_nac, 30620}, // __builtin_HEXAGON_S2_asr_r_r_nac
      {Intrinsic::hexagon_S2_asr_r_r_or, 30653}, // __builtin_HEXAGON_S2_asr_r_r_or
      {Intrinsic::hexagon_S2_asr_r_r_sat, 30685}, // __builtin_HEXAGON_S2_asr_r_r_sat
      {Intrinsic::hexagon_S2_asr_r_svw_trun, 30718}, // __builtin_HEXAGON_S2_asr_r_svw_trun
      {Intrinsic::hexagon_S2_asr_r_vh, 30754}, // __builtin_HEXAGON_S2_asr_r_vh
      {Intrinsic::hexagon_S2_asr_r_vw, 30784}, // __builtin_HEXAGON_S2_asr_r_vw
      {Intrinsic::hexagon_S2_brev, 30814}, // __builtin_HEXAGON_S2_brev
      {Intrinsic::hexagon_S2_brevp, 30840}, // __builtin_HEXAGON_S2_brevp
      {Intrinsic::hexagon_S2_cl0, 30867}, // __builtin_HEXAGON_S2_cl0
      {Intrinsic::hexagon_S2_cl0p, 30892}, // __builtin_HEXAGON_S2_cl0p
      {Intrinsic::hexagon_S2_cl1, 30918}, // __builtin_HEXAGON_S2_cl1
      {Intrinsic::hexagon_S2_cl1p, 30943}, // __builtin_HEXAGON_S2_cl1p
      {Intrinsic::hexagon_S2_clb, 30969}, // __builtin_HEXAGON_S2_clb
      {Intrinsic::hexagon_S2_clbnorm, 30994}, // __builtin_HEXAGON_S2_clbnorm
      {Intrinsic::hexagon_S2_clbp, 31023}, // __builtin_HEXAGON_S2_clbp
      {Intrinsic::hexagon_S2_clrbit_i, 31049}, // __builtin_HEXAGON_S2_clrbit_i
      {Intrinsic::hexagon_S2_clrbit_r, 31079}, // __builtin_HEXAGON_S2_clrbit_r
      {Intrinsic::hexagon_S2_ct0, 31109}, // __builtin_HEXAGON_S2_ct0
      {Intrinsic::hexagon_S2_ct0p, 31134}, // __builtin_HEXAGON_S2_ct0p
      {Intrinsic::hexagon_S2_ct1, 31160}, // __builtin_HEXAGON_S2_ct1
      {Intrinsic::hexagon_S2_ct1p, 31185}, // __builtin_HEXAGON_S2_ct1p
      {Intrinsic::hexagon_S2_deinterleave, 31211}, // __builtin_HEXAGON_S2_deinterleave
      {Intrinsic::hexagon_S2_extractu, 31245}, // __builtin_HEXAGON_S2_extractu
      {Intrinsic::hexagon_S2_extractu_rp, 31275}, // __builtin_HEXAGON_S2_extractu_rp
      {Intrinsic::hexagon_S2_extractup, 31308}, // __builtin_HEXAGON_S2_extractup
      {Intrinsic::hexagon_S2_extractup_rp, 31339}, // __builtin_HEXAGON_S2_extractup_rp
      {Intrinsic::hexagon_S2_insert, 31373}, // __builtin_HEXAGON_S2_insert
      {Intrinsic::hexagon_S2_insert_rp, 31401}, // __builtin_HEXAGON_S2_insert_rp
      {Intrinsic::hexagon_S2_insertp, 31432}, // __builtin_HEXAGON_S2_insertp
      {Intrinsic::hexagon_S2_insertp_rp, 31461}, // __builtin_HEXAGON_S2_insertp_rp
      {Intrinsic::hexagon_S2_interleave, 31493}, // __builtin_HEXAGON_S2_interleave
      {Intrinsic::hexagon_S2_lfsp, 31525}, // __builtin_HEXAGON_S2_lfsp
      {Intrinsic::hexagon_S2_lsl_r_p, 31551}, // __builtin_HEXAGON_S2_lsl_r_p
      {Intrinsic::hexagon_S2_lsl_r_p_acc, 31580}, // __builtin_HEXAGON_S2_lsl_r_p_acc
      {Intrinsic::hexagon_S2_lsl_r_p_and, 31613}, // __builtin_HEXAGON_S2_lsl_r_p_and
      {Intrinsic::hexagon_S2_lsl_r_p_nac, 31646}, // __builtin_HEXAGON_S2_lsl_r_p_nac
      {Intrinsic::hexagon_S2_lsl_r_p_or, 31679}, // __builtin_HEXAGON_S2_lsl_r_p_or
      {Intrinsic::hexagon_S2_lsl_r_p_xor, 31711}, // __builtin_HEXAGON_S2_lsl_r_p_xor
      {Intrinsic::hexagon_S2_lsl_r_r, 31744}, // __builtin_HEXAGON_S2_lsl_r_r
      {Intrinsic::hexagon_S2_lsl_r_r_acc, 31773}, // __builtin_HEXAGON_S2_lsl_r_r_acc
      {Intrinsic::hexagon_S2_lsl_r_r_and, 31806}, // __builtin_HEXAGON_S2_lsl_r_r_and
      {Intrinsic::hexagon_S2_lsl_r_r_nac, 31839}, // __builtin_HEXAGON_S2_lsl_r_r_nac
      {Intrinsic::hexagon_S2_lsl_r_r_or, 31872}, // __builtin_HEXAGON_S2_lsl_r_r_or
      {Intrinsic::hexagon_S2_lsl_r_vh, 31904}, // __builtin_HEXAGON_S2_lsl_r_vh
      {Intrinsic::hexagon_S2_lsl_r_vw, 31934}, // __builtin_HEXAGON_S2_lsl_r_vw
      {Intrinsic::hexagon_S2_lsr_i_p, 31964}, // __builtin_HEXAGON_S2_lsr_i_p
      {Intrinsic::hexagon_S2_lsr_i_p_acc, 31993}, // __builtin_HEXAGON_S2_lsr_i_p_acc
      {Intrinsic::hexagon_S2_lsr_i_p_and, 32026}, // __builtin_HEXAGON_S2_lsr_i_p_and
      {Intrinsic::hexagon_S2_lsr_i_p_nac, 32059}, // __builtin_HEXAGON_S2_lsr_i_p_nac
      {Intrinsic::hexagon_S2_lsr_i_p_or, 32092}, // __builtin_HEXAGON_S2_lsr_i_p_or
      {Intrinsic::hexagon_S2_lsr_i_p_xacc, 32124}, // __builtin_HEXAGON_S2_lsr_i_p_xacc
      {Intrinsic::hexagon_S2_lsr_i_r, 32158}, // __builtin_HEXAGON_S2_lsr_i_r
      {Intrinsic::hexagon_S2_lsr_i_r_acc, 32187}, // __builtin_HEXAGON_S2_lsr_i_r_acc
      {Intrinsic::hexagon_S2_lsr_i_r_and, 32220}, // __builtin_HEXAGON_S2_lsr_i_r_and
      {Intrinsic::hexagon_S2_lsr_i_r_nac, 32253}, // __builtin_HEXAGON_S2_lsr_i_r_nac
      {Intrinsic::hexagon_S2_lsr_i_r_or, 32286}, // __builtin_HEXAGON_S2_lsr_i_r_or
      {Intrinsic::hexagon_S2_lsr_i_r_xacc, 32318}, // __builtin_HEXAGON_S2_lsr_i_r_xacc
      {Intrinsic::hexagon_S2_lsr_i_vh, 32352}, // __builtin_HEXAGON_S2_lsr_i_vh
      {Intrinsic::hexagon_S2_lsr_i_vw, 32382}, // __builtin_HEXAGON_S2_lsr_i_vw
      {Intrinsic::hexagon_S2_lsr_r_p, 32412}, // __builtin_HEXAGON_S2_lsr_r_p
      {Intrinsic::hexagon_S2_lsr_r_p_acc, 32441}, // __builtin_HEXAGON_S2_lsr_r_p_acc
      {Intrinsic::hexagon_S2_lsr_r_p_and, 32474}, // __builtin_HEXAGON_S2_lsr_r_p_and
      {Intrinsic::hexagon_S2_lsr_r_p_nac, 32507}, // __builtin_HEXAGON_S2_lsr_r_p_nac
      {Intrinsic::hexagon_S2_lsr_r_p_or, 32540}, // __builtin_HEXAGON_S2_lsr_r_p_or
      {Intrinsic::hexagon_S2_lsr_r_p_xor, 32572}, // __builtin_HEXAGON_S2_lsr_r_p_xor
      {Intrinsic::hexagon_S2_lsr_r_r, 32605}, // __builtin_HEXAGON_S2_lsr_r_r
      {Intrinsic::hexagon_S2_lsr_r_r_acc, 32634}, // __builtin_HEXAGON_S2_lsr_r_r_acc
      {Intrinsic::hexagon_S2_lsr_r_r_and, 32667}, // __builtin_HEXAGON_S2_lsr_r_r_and
      {Intrinsic::hexagon_S2_lsr_r_r_nac, 32700}, // __builtin_HEXAGON_S2_lsr_r_r_nac
      {Intrinsic::hexagon_S2_lsr_r_r_or, 32733}, // __builtin_HEXAGON_S2_lsr_r_r_or
      {Intrinsic::hexagon_S2_lsr_r_vh, 32765}, // __builtin_HEXAGON_S2_lsr_r_vh
      {Intrinsic::hexagon_S2_lsr_r_vw, 32795}, // __builtin_HEXAGON_S2_lsr_r_vw
      {Intrinsic::hexagon_S2_mask, 32825}, // __builtin_HEXAGON_S2_mask
      {Intrinsic::hexagon_S2_packhl, 32851}, // __builtin_HEXAGON_S2_packhl
      {Intrinsic::hexagon_S2_parityp, 32879}, // __builtin_HEXAGON_S2_parityp
      {Intrinsic::hexagon_S2_setbit_i, 32908}, // __builtin_HEXAGON_S2_setbit_i
      {Intrinsic::hexagon_S2_setbit_r, 32938}, // __builtin_HEXAGON_S2_setbit_r
      {Intrinsic::hexagon_S2_shuffeb, 32968}, // __builtin_HEXAGON_S2_shuffeb
      {Intrinsic::hexagon_S2_shuffeh, 32997}, // __builtin_HEXAGON_S2_shuffeh
      {Intrinsic::hexagon_S2_shuffob, 33026}, // __builtin_HEXAGON_S2_shuffob
      {Intrinsic::hexagon_S2_shuffoh, 33055}, // __builtin_HEXAGON_S2_shuffoh
      {Intrinsic::hexagon_S2_storew_locked, 33181}, // __builtin_HEXAGON_S2_storew_locked
      {Intrinsic::hexagon_S2_svsathb, 33216}, // __builtin_HEXAGON_S2_svsathb
      {Intrinsic::hexagon_S2_svsathub, 33245}, // __builtin_HEXAGON_S2_svsathub
      {Intrinsic::hexagon_S2_tableidxb_goodsyntax, 33275}, // __builtin_HEXAGON_S2_tableidxb_goodsyntax
      {Intrinsic::hexagon_S2_tableidxd_goodsyntax, 33317}, // __builtin_HEXAGON_S2_tableidxd_goodsyntax
      {Intrinsic::hexagon_S2_tableidxh_goodsyntax, 33359}, // __builtin_HEXAGON_S2_tableidxh_goodsyntax
      {Intrinsic::hexagon_S2_tableidxw_goodsyntax, 33401}, // __builtin_HEXAGON_S2_tableidxw_goodsyntax
      {Intrinsic::hexagon_S2_togglebit_i, 33443}, // __builtin_HEXAGON_S2_togglebit_i
      {Intrinsic::hexagon_S2_togglebit_r, 33476}, // __builtin_HEXAGON_S2_togglebit_r
      {Intrinsic::hexagon_S2_tstbit_i, 33509}, // __builtin_HEXAGON_S2_tstbit_i
      {Intrinsic::hexagon_S2_tstbit_r, 33539}, // __builtin_HEXAGON_S2_tstbit_r
      {Intrinsic::hexagon_S2_valignib, 33569}, // __builtin_HEXAGON_S2_valignib
      {Intrinsic::hexagon_S2_valignrb, 33599}, // __builtin_HEXAGON_S2_valignrb
      {Intrinsic::hexagon_S2_vcnegh, 33629}, // __builtin_HEXAGON_S2_vcnegh
      {Intrinsic::hexagon_S2_vcrotate, 33657}, // __builtin_HEXAGON_S2_vcrotate
      {Intrinsic::hexagon_S2_vrcnegh, 33687}, // __builtin_HEXAGON_S2_vrcnegh
      {Intrinsic::hexagon_S2_vrndpackwh, 33716}, // __builtin_HEXAGON_S2_vrndpackwh
      {Intrinsic::hexagon_S2_vrndpackwhs, 33748}, // __builtin_HEXAGON_S2_vrndpackwhs
      {Intrinsic::hexagon_S2_vsathb, 33781}, // __builtin_HEXAGON_S2_vsathb
      {Intrinsic::hexagon_S2_vsathb_nopack, 33809}, // __builtin_HEXAGON_S2_vsathb_nopack
      {Intrinsic::hexagon_S2_vsathub, 33844}, // __builtin_HEXAGON_S2_vsathub
      {Intrinsic::hexagon_S2_vsathub_nopack, 33873}, // __builtin_HEXAGON_S2_vsathub_nopack
      {Intrinsic::hexagon_S2_vsatwh, 33909}, // __builtin_HEXAGON_S2_vsatwh
      {Intrinsic::hexagon_S2_vsatwh_nopack, 33937}, // __builtin_HEXAGON_S2_vsatwh_nopack
      {Intrinsic::hexagon_S2_vsatwuh, 33972}, // __builtin_HEXAGON_S2_vsatwuh
      {Intrinsic::hexagon_S2_vsatwuh_nopack, 34001}, // __builtin_HEXAGON_S2_vsatwuh_nopack
      {Intrinsic::hexagon_S2_vsplatrb, 34037}, // __builtin_HEXAGON_S2_vsplatrb
      {Intrinsic::hexagon_S2_vsplatrh, 34067}, // __builtin_HEXAGON_S2_vsplatrh
      {Intrinsic::hexagon_S2_vspliceib, 34097}, // __builtin_HEXAGON_S2_vspliceib
      {Intrinsic::hexagon_S2_vsplicerb, 34128}, // __builtin_HEXAGON_S2_vsplicerb
      {Intrinsic::hexagon_S2_vsxtbh, 34159}, // __builtin_HEXAGON_S2_vsxtbh
      {Intrinsic::hexagon_S2_vsxthw, 34187}, // __builtin_HEXAGON_S2_vsxthw
      {Intrinsic::hexagon_S2_vtrunehb, 34215}, // __builtin_HEXAGON_S2_vtrunehb
      {Intrinsic::hexagon_S2_vtrunewh, 34245}, // __builtin_HEXAGON_S2_vtrunewh
      {Intrinsic::hexagon_S2_vtrunohb, 34275}, // __builtin_HEXAGON_S2_vtrunohb
      {Intrinsic::hexagon_S2_vtrunowh, 34305}, // __builtin_HEXAGON_S2_vtrunowh
      {Intrinsic::hexagon_S2_vzxtbh, 34335}, // __builtin_HEXAGON_S2_vzxtbh
      {Intrinsic::hexagon_S2_vzxthw, 34363}, // __builtin_HEXAGON_S2_vzxthw
      {Intrinsic::hexagon_S4_addaddi, 34391}, // __builtin_HEXAGON_S4_addaddi
      {Intrinsic::hexagon_S4_addi_asl_ri, 34420}, // __builtin_HEXAGON_S4_addi_asl_ri
      {Intrinsic::hexagon_S4_addi_lsr_ri, 34453}, // __builtin_HEXAGON_S4_addi_lsr_ri
      {Intrinsic::hexagon_S4_andi_asl_ri, 34486}, // __builtin_HEXAGON_S4_andi_asl_ri
      {Intrinsic::hexagon_S4_andi_lsr_ri, 34519}, // __builtin_HEXAGON_S4_andi_lsr_ri
      {Intrinsic::hexagon_S4_clbaddi, 34552}, // __builtin_HEXAGON_S4_clbaddi
      {Intrinsic::hexagon_S4_clbpaddi, 34581}, // __builtin_HEXAGON_S4_clbpaddi
      {Intrinsic::hexagon_S4_clbpnorm, 34611}, // __builtin_HEXAGON_S4_clbpnorm
      {Intrinsic::hexagon_S4_extract, 34641}, // __builtin_HEXAGON_S4_extract
      {Intrinsic::hexagon_S4_extract_rp, 34670}, // __builtin_HEXAGON_S4_extract_rp
      {Intrinsic::hexagon_S4_extractp, 34702}, // __builtin_HEXAGON_S4_extractp
      {Intrinsic::hexagon_S4_extractp_rp, 34732}, // __builtin_HEXAGON_S4_extractp_rp
      {Intrinsic::hexagon_S4_lsli, 34765}, // __builtin_HEXAGON_S4_lsli
      {Intrinsic::hexagon_S4_ntstbit_i, 34791}, // __builtin_HEXAGON_S4_ntstbit_i
      {Intrinsic::hexagon_S4_ntstbit_r, 34822}, // __builtin_HEXAGON_S4_ntstbit_r
      {Intrinsic::hexagon_S4_or_andi, 34853}, // __builtin_HEXAGON_S4_or_andi
      {Intrinsic::hexagon_S4_or_andix, 34882}, // __builtin_HEXAGON_S4_or_andix
      {Intrinsic::hexagon_S4_or_ori, 34912}, // __builtin_HEXAGON_S4_or_ori
      {Intrinsic::hexagon_S4_ori_asl_ri, 34940}, // __builtin_HEXAGON_S4_ori_asl_ri
      {Intrinsic::hexagon_S4_ori_lsr_ri, 34972}, // __builtin_HEXAGON_S4_ori_lsr_ri
      {Intrinsic::hexagon_S4_parity, 35004}, // __builtin_HEXAGON_S4_parity
      {Intrinsic::hexagon_S4_stored_locked, 35032}, // __builtin_HEXAGON_S4_stored_locked
      {Intrinsic::hexagon_S4_subaddi, 35067}, // __builtin_HEXAGON_S4_subaddi
      {Intrinsic::hexagon_S4_subi_asl_ri, 35096}, // __builtin_HEXAGON_S4_subi_asl_ri
      {Intrinsic::hexagon_S4_subi_lsr_ri, 35129}, // __builtin_HEXAGON_S4_subi_lsr_ri
      {Intrinsic::hexagon_S4_vrcrotate, 35162}, // __builtin_HEXAGON_S4_vrcrotate
      {Intrinsic::hexagon_S4_vrcrotate_acc, 35193}, // __builtin_HEXAGON_S4_vrcrotate_acc
      {Intrinsic::hexagon_S4_vxaddsubh, 35228}, // __builtin_HEXAGON_S4_vxaddsubh
      {Intrinsic::hexagon_S4_vxaddsubhr, 35259}, // __builtin_HEXAGON_S4_vxaddsubhr
      {Intrinsic::hexagon_S4_vxaddsubw, 35291}, // __builtin_HEXAGON_S4_vxaddsubw
      {Intrinsic::hexagon_S4_vxsubaddh, 35322}, // __builtin_HEXAGON_S4_vxsubaddh
      {Intrinsic::hexagon_S4_vxsubaddhr, 35353}, // __builtin_HEXAGON_S4_vxsubaddhr
      {Intrinsic::hexagon_S4_vxsubaddw, 35385}, // __builtin_HEXAGON_S4_vxsubaddw
      {Intrinsic::hexagon_S5_asrhub_rnd_sat_goodsyntax, 35416}, // __builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax
      {Intrinsic::hexagon_S5_asrhub_sat, 35463}, // __builtin_HEXAGON_S5_asrhub_sat
      {Intrinsic::hexagon_S5_popcountp, 35495}, // __builtin_HEXAGON_S5_popcountp
      {Intrinsic::hexagon_S5_vasrhrnd_goodsyntax, 35526}, // __builtin_HEXAGON_S5_vasrhrnd_goodsyntax
      {Intrinsic::hexagon_S6_rol_i_p, 35567}, // __builtin_HEXAGON_S6_rol_i_p
      {Intrinsic::hexagon_S6_rol_i_p_acc, 35596}, // __builtin_HEXAGON_S6_rol_i_p_acc
      {Intrinsic::hexagon_S6_rol_i_p_and, 35629}, // __builtin_HEXAGON_S6_rol_i_p_and
      {Intrinsic::hexagon_S6_rol_i_p_nac, 35662}, // __builtin_HEXAGON_S6_rol_i_p_nac
      {Intrinsic::hexagon_S6_rol_i_p_or, 35695}, // __builtin_HEXAGON_S6_rol_i_p_or
      {Intrinsic::hexagon_S6_rol_i_p_xacc, 35727}, // __builtin_HEXAGON_S6_rol_i_p_xacc
      {Intrinsic::hexagon_S6_rol_i_r, 35761}, // __builtin_HEXAGON_S6_rol_i_r
      {Intrinsic::hexagon_S6_rol_i_r_acc, 35790}, // __builtin_HEXAGON_S6_rol_i_r_acc
      {Intrinsic::hexagon_S6_rol_i_r_and, 35823}, // __builtin_HEXAGON_S6_rol_i_r_and
      {Intrinsic::hexagon_S6_rol_i_r_nac, 35856}, // __builtin_HEXAGON_S6_rol_i_r_nac
      {Intrinsic::hexagon_S6_rol_i_r_or, 35889}, // __builtin_HEXAGON_S6_rol_i_r_or
      {Intrinsic::hexagon_S6_rol_i_r_xacc, 35921}, // __builtin_HEXAGON_S6_rol_i_r_xacc
      {Intrinsic::hexagon_S6_vsplatrbp, 35955}, // __builtin_HEXAGON_S6_vsplatrbp
      {Intrinsic::hexagon_S6_vtrunehb_ppp, 35986}, // __builtin_HEXAGON_S6_vtrunehb_ppp
      {Intrinsic::hexagon_S6_vtrunohb_ppp, 36020}, // __builtin_HEXAGON_S6_vtrunohb_ppp
      {Intrinsic::hexagon_V6_extractw, 36054}, // __builtin_HEXAGON_V6_extractw
      {Intrinsic::hexagon_V6_extractw_128B, 36084}, // __builtin_HEXAGON_V6_extractw_128B
      {Intrinsic::hexagon_V6_hi, 36119}, // __builtin_HEXAGON_V6_hi
      {Intrinsic::hexagon_V6_hi_128B, 36143}, // __builtin_HEXAGON_V6_hi_128B
      {Intrinsic::hexagon_V6_lo, 36172}, // __builtin_HEXAGON_V6_lo
      {Intrinsic::hexagon_V6_lo_128B, 36196}, // __builtin_HEXAGON_V6_lo_128B
      {Intrinsic::hexagon_V6_lvsplatb, 36225}, // __builtin_HEXAGON_V6_lvsplatb
      {Intrinsic::hexagon_V6_lvsplatb_128B, 36255}, // __builtin_HEXAGON_V6_lvsplatb_128B
      {Intrinsic::hexagon_V6_lvsplath, 36290}, // __builtin_HEXAGON_V6_lvsplath
      {Intrinsic::hexagon_V6_lvsplath_128B, 36320}, // __builtin_HEXAGON_V6_lvsplath_128B
      {Intrinsic::hexagon_V6_lvsplatw, 36355}, // __builtin_HEXAGON_V6_lvsplatw
      {Intrinsic::hexagon_V6_lvsplatw_128B, 36385}, // __builtin_HEXAGON_V6_lvsplatw_128B
      {Intrinsic::hexagon_V6_pred_and, 36420}, // __builtin_HEXAGON_V6_pred_and
      {Intrinsic::hexagon_V6_pred_and_128B, 36450}, // __builtin_HEXAGON_V6_pred_and_128B
      {Intrinsic::hexagon_V6_pred_and_n, 36485}, // __builtin_HEXAGON_V6_pred_and_n
      {Intrinsic::hexagon_V6_pred_and_n_128B, 36517}, // __builtin_HEXAGON_V6_pred_and_n_128B
      {Intrinsic::hexagon_V6_pred_not, 36554}, // __builtin_HEXAGON_V6_pred_not
      {Intrinsic::hexagon_V6_pred_not_128B, 36584}, // __builtin_HEXAGON_V6_pred_not_128B
      {Intrinsic::hexagon_V6_pred_or, 36619}, // __builtin_HEXAGON_V6_pred_or
      {Intrinsic::hexagon_V6_pred_or_128B, 36648}, // __builtin_HEXAGON_V6_pred_or_128B
      {Intrinsic::hexagon_V6_pred_or_n, 36682}, // __builtin_HEXAGON_V6_pred_or_n
      {Intrinsic::hexagon_V6_pred_or_n_128B, 36713}, // __builtin_HEXAGON_V6_pred_or_n_128B
      {Intrinsic::hexagon_V6_pred_scalar2, 36749}, // __builtin_HEXAGON_V6_pred_scalar2
      {Intrinsic::hexagon_V6_pred_scalar2_128B, 36783}, // __builtin_HEXAGON_V6_pred_scalar2_128B
      {Intrinsic::hexagon_V6_pred_scalar2v2, 36822}, // __builtin_HEXAGON_V6_pred_scalar2v2
      {Intrinsic::hexagon_V6_pred_scalar2v2_128B, 36858}, // __builtin_HEXAGON_V6_pred_scalar2v2_128B
      {Intrinsic::hexagon_V6_pred_xor, 36899}, // __builtin_HEXAGON_V6_pred_xor
      {Intrinsic::hexagon_V6_pred_xor_128B, 36929}, // __builtin_HEXAGON_V6_pred_xor_128B
      {Intrinsic::hexagon_V6_shuffeqh, 36964}, // __builtin_HEXAGON_V6_shuffeqh
      {Intrinsic::hexagon_V6_shuffeqh_128B, 36994}, // __builtin_HEXAGON_V6_shuffeqh_128B
      {Intrinsic::hexagon_V6_shuffeqw, 37029}, // __builtin_HEXAGON_V6_shuffeqw
      {Intrinsic::hexagon_V6_shuffeqw_128B, 37059}, // __builtin_HEXAGON_V6_shuffeqw_128B
      {Intrinsic::hexagon_V6_v6mpyhubs10, 37094}, // __builtin_HEXAGON_V6_v6mpyhubs10
      {Intrinsic::hexagon_V6_v6mpyhubs10_128B, 37127}, // __builtin_HEXAGON_V6_v6mpyhubs10_128B
      {Intrinsic::hexagon_V6_v6mpyhubs10_vxx, 37165}, // __builtin_HEXAGON_V6_v6mpyhubs10_vxx
      {Intrinsic::hexagon_V6_v6mpyhubs10_vxx_128B, 37202}, // __builtin_HEXAGON_V6_v6mpyhubs10_vxx_128B
      {Intrinsic::hexagon_V6_v6mpyvubs10, 37244}, // __builtin_HEXAGON_V6_v6mpyvubs10
      {Intrinsic::hexagon_V6_v6mpyvubs10_128B, 37277}, // __builtin_HEXAGON_V6_v6mpyvubs10_128B
      {Intrinsic::hexagon_V6_v6mpyvubs10_vxx, 37315}, // __builtin_HEXAGON_V6_v6mpyvubs10_vxx
      {Intrinsic::hexagon_V6_v6mpyvubs10_vxx_128B, 37352}, // __builtin_HEXAGON_V6_v6mpyvubs10_vxx_128B
      {Intrinsic::hexagon_V6_vS32b_nqpred_ai, 37394}, // __builtin_HEXAGON_V6_vS32b_nqpred_ai
      {Intrinsic::hexagon_V6_vS32b_nqpred_ai_128B, 37431}, // __builtin_HEXAGON_V6_vS32b_nqpred_ai_128B
      {Intrinsic::hexagon_V6_vS32b_nt_nqpred_ai, 37473}, // __builtin_HEXAGON_V6_vS32b_nt_nqpred_ai
      {Intrinsic::hexagon_V6_vS32b_nt_nqpred_ai_128B, 37513}, // __builtin_HEXAGON_V6_vS32b_nt_nqpred_ai_128B
      {Intrinsic::hexagon_V6_vS32b_nt_qpred_ai, 37558}, // __builtin_HEXAGON_V6_vS32b_nt_qpred_ai
      {Intrinsic::hexagon_V6_vS32b_nt_qpred_ai_128B, 37597}, // __builtin_HEXAGON_V6_vS32b_nt_qpred_ai_128B
      {Intrinsic::hexagon_V6_vS32b_qpred_ai, 37641}, // __builtin_HEXAGON_V6_vS32b_qpred_ai
      {Intrinsic::hexagon_V6_vS32b_qpred_ai_128B, 37677}, // __builtin_HEXAGON_V6_vS32b_qpred_ai_128B
      {Intrinsic::hexagon_V6_vabs_hf, 37718}, // __builtin_HEXAGON_V6_vabs_hf
      {Intrinsic::hexagon_V6_vabs_hf_128B, 37747}, // __builtin_HEXAGON_V6_vabs_hf_128B
      {Intrinsic::hexagon_V6_vabs_sf, 37781}, // __builtin_HEXAGON_V6_vabs_sf
      {Intrinsic::hexagon_V6_vabs_sf_128B, 37810}, // __builtin_HEXAGON_V6_vabs_sf_128B
      {Intrinsic::hexagon_V6_vabsb, 37844}, // __builtin_HEXAGON_V6_vabsb
      {Intrinsic::hexagon_V6_vabsb_128B, 37871}, // __builtin_HEXAGON_V6_vabsb_128B
      {Intrinsic::hexagon_V6_vabsb_sat, 37903}, // __builtin_HEXAGON_V6_vabsb_sat
      {Intrinsic::hexagon_V6_vabsb_sat_128B, 37934}, // __builtin_HEXAGON_V6_vabsb_sat_128B
      {Intrinsic::hexagon_V6_vabsdiffh, 37970}, // __builtin_HEXAGON_V6_vabsdiffh
      {Intrinsic::hexagon_V6_vabsdiffh_128B, 38001}, // __builtin_HEXAGON_V6_vabsdiffh_128B
      {Intrinsic::hexagon_V6_vabsdiffub, 38037}, // __builtin_HEXAGON_V6_vabsdiffub
      {Intrinsic::hexagon_V6_vabsdiffub_128B, 38069}, // __builtin_HEXAGON_V6_vabsdiffub_128B
      {Intrinsic::hexagon_V6_vabsdiffuh, 38106}, // __builtin_HEXAGON_V6_vabsdiffuh
      {Intrinsic::hexagon_V6_vabsdiffuh_128B, 38138}, // __builtin_HEXAGON_V6_vabsdiffuh_128B
      {Intrinsic::hexagon_V6_vabsdiffw, 38175}, // __builtin_HEXAGON_V6_vabsdiffw
      {Intrinsic::hexagon_V6_vabsdiffw_128B, 38206}, // __builtin_HEXAGON_V6_vabsdiffw_128B
      {Intrinsic::hexagon_V6_vabsh, 38242}, // __builtin_HEXAGON_V6_vabsh
      {Intrinsic::hexagon_V6_vabsh_128B, 38269}, // __builtin_HEXAGON_V6_vabsh_128B
      {Intrinsic::hexagon_V6_vabsh_sat, 38301}, // __builtin_HEXAGON_V6_vabsh_sat
      {Intrinsic::hexagon_V6_vabsh_sat_128B, 38332}, // __builtin_HEXAGON_V6_vabsh_sat_128B
      {Intrinsic::hexagon_V6_vabsw, 38368}, // __builtin_HEXAGON_V6_vabsw
      {Intrinsic::hexagon_V6_vabsw_128B, 38395}, // __builtin_HEXAGON_V6_vabsw_128B
      {Intrinsic::hexagon_V6_vabsw_sat, 38427}, // __builtin_HEXAGON_V6_vabsw_sat
      {Intrinsic::hexagon_V6_vabsw_sat_128B, 38458}, // __builtin_HEXAGON_V6_vabsw_sat_128B
      {Intrinsic::hexagon_V6_vadd_hf, 38494}, // __builtin_HEXAGON_V6_vadd_hf
      {Intrinsic::hexagon_V6_vadd_hf_128B, 38523}, // __builtin_HEXAGON_V6_vadd_hf_128B
      {Intrinsic::hexagon_V6_vadd_hf_hf, 38557}, // __builtin_HEXAGON_V6_vadd_hf_hf
      {Intrinsic::hexagon_V6_vadd_hf_hf_128B, 38589}, // __builtin_HEXAGON_V6_vadd_hf_hf_128B
      {Intrinsic::hexagon_V6_vadd_qf16, 38626}, // __builtin_HEXAGON_V6_vadd_qf16
      {Intrinsic::hexagon_V6_vadd_qf16_128B, 38657}, // __builtin_HEXAGON_V6_vadd_qf16_128B
      {Intrinsic::hexagon_V6_vadd_qf16_mix, 38693}, // __builtin_HEXAGON_V6_vadd_qf16_mix
      {Intrinsic::hexagon_V6_vadd_qf16_mix_128B, 38728}, // __builtin_HEXAGON_V6_vadd_qf16_mix_128B
      {Intrinsic::hexagon_V6_vadd_qf32, 38768}, // __builtin_HEXAGON_V6_vadd_qf32
      {Intrinsic::hexagon_V6_vadd_qf32_128B, 38799}, // __builtin_HEXAGON_V6_vadd_qf32_128B
      {Intrinsic::hexagon_V6_vadd_qf32_mix, 38835}, // __builtin_HEXAGON_V6_vadd_qf32_mix
      {Intrinsic::hexagon_V6_vadd_qf32_mix_128B, 38870}, // __builtin_HEXAGON_V6_vadd_qf32_mix_128B
      {Intrinsic::hexagon_V6_vadd_sf, 38910}, // __builtin_HEXAGON_V6_vadd_sf
      {Intrinsic::hexagon_V6_vadd_sf_128B, 38939}, // __builtin_HEXAGON_V6_vadd_sf_128B
      {Intrinsic::hexagon_V6_vadd_sf_bf, 38973}, // __builtin_HEXAGON_V6_vadd_sf_bf
      {Intrinsic::hexagon_V6_vadd_sf_bf_128B, 39005}, // __builtin_HEXAGON_V6_vadd_sf_bf_128B
      {Intrinsic::hexagon_V6_vadd_sf_hf, 39042}, // __builtin_HEXAGON_V6_vadd_sf_hf
      {Intrinsic::hexagon_V6_vadd_sf_hf_128B, 39074}, // __builtin_HEXAGON_V6_vadd_sf_hf_128B
      {Intrinsic::hexagon_V6_vadd_sf_sf, 39111}, // __builtin_HEXAGON_V6_vadd_sf_sf
      {Intrinsic::hexagon_V6_vadd_sf_sf_128B, 39143}, // __builtin_HEXAGON_V6_vadd_sf_sf_128B
      {Intrinsic::hexagon_V6_vaddb, 39180}, // __builtin_HEXAGON_V6_vaddb
      {Intrinsic::hexagon_V6_vaddb_128B, 39207}, // __builtin_HEXAGON_V6_vaddb_128B
      {Intrinsic::hexagon_V6_vaddb_dv, 39239}, // __builtin_HEXAGON_V6_vaddb_dv
      {Intrinsic::hexagon_V6_vaddb_dv_128B, 39269}, // __builtin_HEXAGON_V6_vaddb_dv_128B
      {Intrinsic::hexagon_V6_vaddbnq, 39304}, // __builtin_HEXAGON_V6_vaddbnq
      {Intrinsic::hexagon_V6_vaddbnq_128B, 39333}, // __builtin_HEXAGON_V6_vaddbnq_128B
      {Intrinsic::hexagon_V6_vaddbq, 39367}, // __builtin_HEXAGON_V6_vaddbq
      {Intrinsic::hexagon_V6_vaddbq_128B, 39395}, // __builtin_HEXAGON_V6_vaddbq_128B
      {Intrinsic::hexagon_V6_vaddbsat, 39428}, // __builtin_HEXAGON_V6_vaddbsat
      {Intrinsic::hexagon_V6_vaddbsat_128B, 39458}, // __builtin_HEXAGON_V6_vaddbsat_128B
      {Intrinsic::hexagon_V6_vaddbsat_dv, 39493}, // __builtin_HEXAGON_V6_vaddbsat_dv
      {Intrinsic::hexagon_V6_vaddbsat_dv_128B, 39526}, // __builtin_HEXAGON_V6_vaddbsat_dv_128B
      {Intrinsic::hexagon_V6_vaddcarrysat, 39564}, // __builtin_HEXAGON_V6_vaddcarrysat
      {Intrinsic::hexagon_V6_vaddcarrysat_128B, 39598}, // __builtin_HEXAGON_V6_vaddcarrysat_128B
      {Intrinsic::hexagon_V6_vaddclbh, 39637}, // __builtin_HEXAGON_V6_vaddclbh
      {Intrinsic::hexagon_V6_vaddclbh_128B, 39667}, // __builtin_HEXAGON_V6_vaddclbh_128B
      {Intrinsic::hexagon_V6_vaddclbw, 39702}, // __builtin_HEXAGON_V6_vaddclbw
      {Intrinsic::hexagon_V6_vaddclbw_128B, 39732}, // __builtin_HEXAGON_V6_vaddclbw_128B
      {Intrinsic::hexagon_V6_vaddh, 39767}, // __builtin_HEXAGON_V6_vaddh
      {Intrinsic::hexagon_V6_vaddh_128B, 39794}, // __builtin_HEXAGON_V6_vaddh_128B
      {Intrinsic::hexagon_V6_vaddh_dv, 39826}, // __builtin_HEXAGON_V6_vaddh_dv
      {Intrinsic::hexagon_V6_vaddh_dv_128B, 39856}, // __builtin_HEXAGON_V6_vaddh_dv_128B
      {Intrinsic::hexagon_V6_vaddhnq, 39891}, // __builtin_HEXAGON_V6_vaddhnq
      {Intrinsic::hexagon_V6_vaddhnq_128B, 39920}, // __builtin_HEXAGON_V6_vaddhnq_128B
      {Intrinsic::hexagon_V6_vaddhq, 39954}, // __builtin_HEXAGON_V6_vaddhq
      {Intrinsic::hexagon_V6_vaddhq_128B, 39982}, // __builtin_HEXAGON_V6_vaddhq_128B
      {Intrinsic::hexagon_V6_vaddhsat, 40015}, // __builtin_HEXAGON_V6_vaddhsat
      {Intrinsic::hexagon_V6_vaddhsat_128B, 40045}, // __builtin_HEXAGON_V6_vaddhsat_128B
      {Intrinsic::hexagon_V6_vaddhsat_dv, 40080}, // __builtin_HEXAGON_V6_vaddhsat_dv
      {Intrinsic::hexagon_V6_vaddhsat_dv_128B, 40113}, // __builtin_HEXAGON_V6_vaddhsat_dv_128B
      {Intrinsic::hexagon_V6_vaddhw, 40151}, // __builtin_HEXAGON_V6_vaddhw
      {Intrinsic::hexagon_V6_vaddhw_128B, 40179}, // __builtin_HEXAGON_V6_vaddhw_128B
      {Intrinsic::hexagon_V6_vaddhw_acc, 40212}, // __builtin_HEXAGON_V6_vaddhw_acc
      {Intrinsic::hexagon_V6_vaddhw_acc_128B, 40244}, // __builtin_HEXAGON_V6_vaddhw_acc_128B
      {Intrinsic::hexagon_V6_vaddubh, 40281}, // __builtin_HEXAGON_V6_vaddubh
      {Intrinsic::hexagon_V6_vaddubh_128B, 40310}, // __builtin_HEXAGON_V6_vaddubh_128B
      {Intrinsic::hexagon_V6_vaddubh_acc, 40344}, // __builtin_HEXAGON_V6_vaddubh_acc
      {Intrinsic::hexagon_V6_vaddubh_acc_128B, 40377}, // __builtin_HEXAGON_V6_vaddubh_acc_128B
      {Intrinsic::hexagon_V6_vaddubsat, 40415}, // __builtin_HEXAGON_V6_vaddubsat
      {Intrinsic::hexagon_V6_vaddubsat_128B, 40446}, // __builtin_HEXAGON_V6_vaddubsat_128B
      {Intrinsic::hexagon_V6_vaddubsat_dv, 40482}, // __builtin_HEXAGON_V6_vaddubsat_dv
      {Intrinsic::hexagon_V6_vaddubsat_dv_128B, 40516}, // __builtin_HEXAGON_V6_vaddubsat_dv_128B
      {Intrinsic::hexagon_V6_vaddububb_sat, 40555}, // __builtin_HEXAGON_V6_vaddububb_sat
      {Intrinsic::hexagon_V6_vaddububb_sat_128B, 40590}, // __builtin_HEXAGON_V6_vaddububb_sat_128B
      {Intrinsic::hexagon_V6_vadduhsat, 40630}, // __builtin_HEXAGON_V6_vadduhsat
      {Intrinsic::hexagon_V6_vadduhsat_128B, 40661}, // __builtin_HEXAGON_V6_vadduhsat_128B
      {Intrinsic::hexagon_V6_vadduhsat_dv, 40697}, // __builtin_HEXAGON_V6_vadduhsat_dv
      {Intrinsic::hexagon_V6_vadduhsat_dv_128B, 40731}, // __builtin_HEXAGON_V6_vadduhsat_dv_128B
      {Intrinsic::hexagon_V6_vadduhw, 40770}, // __builtin_HEXAGON_V6_vadduhw
      {Intrinsic::hexagon_V6_vadduhw_128B, 40799}, // __builtin_HEXAGON_V6_vadduhw_128B
      {Intrinsic::hexagon_V6_vadduhw_acc, 40833}, // __builtin_HEXAGON_V6_vadduhw_acc
      {Intrinsic::hexagon_V6_vadduhw_acc_128B, 40866}, // __builtin_HEXAGON_V6_vadduhw_acc_128B
      {Intrinsic::hexagon_V6_vadduwsat, 40904}, // __builtin_HEXAGON_V6_vadduwsat
      {Intrinsic::hexagon_V6_vadduwsat_128B, 40935}, // __builtin_HEXAGON_V6_vadduwsat_128B
      {Intrinsic::hexagon_V6_vadduwsat_dv, 40971}, // __builtin_HEXAGON_V6_vadduwsat_dv
      {Intrinsic::hexagon_V6_vadduwsat_dv_128B, 41005}, // __builtin_HEXAGON_V6_vadduwsat_dv_128B
      {Intrinsic::hexagon_V6_vaddw, 41044}, // __builtin_HEXAGON_V6_vaddw
      {Intrinsic::hexagon_V6_vaddw_128B, 41071}, // __builtin_HEXAGON_V6_vaddw_128B
      {Intrinsic::hexagon_V6_vaddw_dv, 41103}, // __builtin_HEXAGON_V6_vaddw_dv
      {Intrinsic::hexagon_V6_vaddw_dv_128B, 41133}, // __builtin_HEXAGON_V6_vaddw_dv_128B
      {Intrinsic::hexagon_V6_vaddwnq, 41168}, // __builtin_HEXAGON_V6_vaddwnq
      {Intrinsic::hexagon_V6_vaddwnq_128B, 41197}, // __builtin_HEXAGON_V6_vaddwnq_128B
      {Intrinsic::hexagon_V6_vaddwq, 41231}, // __builtin_HEXAGON_V6_vaddwq
      {Intrinsic::hexagon_V6_vaddwq_128B, 41259}, // __builtin_HEXAGON_V6_vaddwq_128B
      {Intrinsic::hexagon_V6_vaddwsat, 41292}, // __builtin_HEXAGON_V6_vaddwsat
      {Intrinsic::hexagon_V6_vaddwsat_128B, 41322}, // __builtin_HEXAGON_V6_vaddwsat_128B
      {Intrinsic::hexagon_V6_vaddwsat_dv, 41357}, // __builtin_HEXAGON_V6_vaddwsat_dv
      {Intrinsic::hexagon_V6_vaddwsat_dv_128B, 41390}, // __builtin_HEXAGON_V6_vaddwsat_dv_128B
      {Intrinsic::hexagon_V6_valignb, 41428}, // __builtin_HEXAGON_V6_valignb
      {Intrinsic::hexagon_V6_valignb_128B, 41457}, // __builtin_HEXAGON_V6_valignb_128B
      {Intrinsic::hexagon_V6_valignbi, 41491}, // __builtin_HEXAGON_V6_valignbi
      {Intrinsic::hexagon_V6_valignbi_128B, 41521}, // __builtin_HEXAGON_V6_valignbi_128B
      {Intrinsic::hexagon_V6_vand, 41556}, // __builtin_HEXAGON_V6_vand
      {Intrinsic::hexagon_V6_vand_128B, 41582}, // __builtin_HEXAGON_V6_vand_128B
      {Intrinsic::hexagon_V6_vandnqrt, 41613}, // __builtin_HEXAGON_V6_vandnqrt
      {Intrinsic::hexagon_V6_vandnqrt_128B, 41643}, // __builtin_HEXAGON_V6_vandnqrt_128B
      {Intrinsic::hexagon_V6_vandnqrt_acc, 41678}, // __builtin_HEXAGON_V6_vandnqrt_acc
      {Intrinsic::hexagon_V6_vandnqrt_acc_128B, 41712}, // __builtin_HEXAGON_V6_vandnqrt_acc_128B
      {Intrinsic::hexagon_V6_vandqrt, 41751}, // __builtin_HEXAGON_V6_vandqrt
      {Intrinsic::hexagon_V6_vandqrt_128B, 41780}, // __builtin_HEXAGON_V6_vandqrt_128B
      {Intrinsic::hexagon_V6_vandqrt_acc, 41814}, // __builtin_HEXAGON_V6_vandqrt_acc
      {Intrinsic::hexagon_V6_vandqrt_acc_128B, 41847}, // __builtin_HEXAGON_V6_vandqrt_acc_128B
      {Intrinsic::hexagon_V6_vandvnqv, 41885}, // __builtin_HEXAGON_V6_vandvnqv
      {Intrinsic::hexagon_V6_vandvnqv_128B, 41915}, // __builtin_HEXAGON_V6_vandvnqv_128B
      {Intrinsic::hexagon_V6_vandvqv, 41950}, // __builtin_HEXAGON_V6_vandvqv
      {Intrinsic::hexagon_V6_vandvqv_128B, 41979}, // __builtin_HEXAGON_V6_vandvqv_128B
      {Intrinsic::hexagon_V6_vandvrt, 42013}, // __builtin_HEXAGON_V6_vandvrt
      {Intrinsic::hexagon_V6_vandvrt_128B, 42042}, // __builtin_HEXAGON_V6_vandvrt_128B
      {Intrinsic::hexagon_V6_vandvrt_acc, 42076}, // __builtin_HEXAGON_V6_vandvrt_acc
      {Intrinsic::hexagon_V6_vandvrt_acc_128B, 42109}, // __builtin_HEXAGON_V6_vandvrt_acc_128B
      {Intrinsic::hexagon_V6_vaslh, 42147}, // __builtin_HEXAGON_V6_vaslh
      {Intrinsic::hexagon_V6_vaslh_128B, 42174}, // __builtin_HEXAGON_V6_vaslh_128B
      {Intrinsic::hexagon_V6_vaslh_acc, 42206}, // __builtin_HEXAGON_V6_vaslh_acc
      {Intrinsic::hexagon_V6_vaslh_acc_128B, 42237}, // __builtin_HEXAGON_V6_vaslh_acc_128B
      {Intrinsic::hexagon_V6_vaslhv, 42273}, // __builtin_HEXAGON_V6_vaslhv
      {Intrinsic::hexagon_V6_vaslhv_128B, 42301}, // __builtin_HEXAGON_V6_vaslhv_128B
      {Intrinsic::hexagon_V6_vaslw, 42334}, // __builtin_HEXAGON_V6_vaslw
      {Intrinsic::hexagon_V6_vaslw_128B, 42361}, // __builtin_HEXAGON_V6_vaslw_128B
      {Intrinsic::hexagon_V6_vaslw_acc, 42393}, // __builtin_HEXAGON_V6_vaslw_acc
      {Intrinsic::hexagon_V6_vaslw_acc_128B, 42424}, // __builtin_HEXAGON_V6_vaslw_acc_128B
      {Intrinsic::hexagon_V6_vaslwv, 42460}, // __builtin_HEXAGON_V6_vaslwv
      {Intrinsic::hexagon_V6_vaslwv_128B, 42488}, // __builtin_HEXAGON_V6_vaslwv_128B
      {Intrinsic::hexagon_V6_vasr_into, 42521}, // __builtin_HEXAGON_V6_vasr_into
      {Intrinsic::hexagon_V6_vasr_into_128B, 42552}, // __builtin_HEXAGON_V6_vasr_into_128B
      {Intrinsic::hexagon_V6_vasrh, 42588}, // __builtin_HEXAGON_V6_vasrh
      {Intrinsic::hexagon_V6_vasrh_128B, 42615}, // __builtin_HEXAGON_V6_vasrh_128B
      {Intrinsic::hexagon_V6_vasrh_acc, 42647}, // __builtin_HEXAGON_V6_vasrh_acc
      {Intrinsic::hexagon_V6_vasrh_acc_128B, 42678}, // __builtin_HEXAGON_V6_vasrh_acc_128B
      {Intrinsic::hexagon_V6_vasrhbrndsat, 42714}, // __builtin_HEXAGON_V6_vasrhbrndsat
      {Intrinsic::hexagon_V6_vasrhbrndsat_128B, 42748}, // __builtin_HEXAGON_V6_vasrhbrndsat_128B
      {Intrinsic::hexagon_V6_vasrhbsat, 42787}, // __builtin_HEXAGON_V6_vasrhbsat
      {Intrinsic::hexagon_V6_vasrhbsat_128B, 42818}, // __builtin_HEXAGON_V6_vasrhbsat_128B
      {Intrinsic::hexagon_V6_vasrhubrndsat, 42854}, // __builtin_HEXAGON_V6_vasrhubrndsat
      {Intrinsic::hexagon_V6_vasrhubrndsat_128B, 42889}, // __builtin_HEXAGON_V6_vasrhubrndsat_128B
      {Intrinsic::hexagon_V6_vasrhubsat, 42929}, // __builtin_HEXAGON_V6_vasrhubsat
      {Intrinsic::hexagon_V6_vasrhubsat_128B, 42961}, // __builtin_HEXAGON_V6_vasrhubsat_128B
      {Intrinsic::hexagon_V6_vasrhv, 42998}, // __builtin_HEXAGON_V6_vasrhv
      {Intrinsic::hexagon_V6_vasrhv_128B, 43026}, // __builtin_HEXAGON_V6_vasrhv_128B
      {Intrinsic::hexagon_V6_vasruhubrndsat, 43059}, // __builtin_HEXAGON_V6_vasruhubrndsat
      {Intrinsic::hexagon_V6_vasruhubrndsat_128B, 43095}, // __builtin_HEXAGON_V6_vasruhubrndsat_128B
      {Intrinsic::hexagon_V6_vasruhubsat, 43136}, // __builtin_HEXAGON_V6_vasruhubsat
      {Intrinsic::hexagon_V6_vasruhubsat_128B, 43169}, // __builtin_HEXAGON_V6_vasruhubsat_128B
      {Intrinsic::hexagon_V6_vasruwuhrndsat, 43207}, // __builtin_HEXAGON_V6_vasruwuhrndsat
      {Intrinsic::hexagon_V6_vasruwuhrndsat_128B, 43243}, // __builtin_HEXAGON_V6_vasruwuhrndsat_128B
      {Intrinsic::hexagon_V6_vasruwuhsat, 43284}, // __builtin_HEXAGON_V6_vasruwuhsat
      {Intrinsic::hexagon_V6_vasruwuhsat_128B, 43317}, // __builtin_HEXAGON_V6_vasruwuhsat_128B
      {Intrinsic::hexagon_V6_vasrvuhubrndsat, 43355}, // __builtin_HEXAGON_V6_vasrvuhubrndsat
      {Intrinsic::hexagon_V6_vasrvuhubrndsat_128B, 43392}, // __builtin_HEXAGON_V6_vasrvuhubrndsat_128B
      {Intrinsic::hexagon_V6_vasrvuhubsat, 43434}, // __builtin_HEXAGON_V6_vasrvuhubsat
      {Intrinsic::hexagon_V6_vasrvuhubsat_128B, 43468}, // __builtin_HEXAGON_V6_vasrvuhubsat_128B
      {Intrinsic::hexagon_V6_vasrvwuhrndsat, 43507}, // __builtin_HEXAGON_V6_vasrvwuhrndsat
      {Intrinsic::hexagon_V6_vasrvwuhrndsat_128B, 43543}, // __builtin_HEXAGON_V6_vasrvwuhrndsat_128B
      {Intrinsic::hexagon_V6_vasrvwuhsat, 43584}, // __builtin_HEXAGON_V6_vasrvwuhsat
      {Intrinsic::hexagon_V6_vasrvwuhsat_128B, 43617}, // __builtin_HEXAGON_V6_vasrvwuhsat_128B
      {Intrinsic::hexagon_V6_vasrw, 43655}, // __builtin_HEXAGON_V6_vasrw
      {Intrinsic::hexagon_V6_vasrw_128B, 43682}, // __builtin_HEXAGON_V6_vasrw_128B
      {Intrinsic::hexagon_V6_vasrw_acc, 43714}, // __builtin_HEXAGON_V6_vasrw_acc
      {Intrinsic::hexagon_V6_vasrw_acc_128B, 43745}, // __builtin_HEXAGON_V6_vasrw_acc_128B
      {Intrinsic::hexagon_V6_vasrwh, 43781}, // __builtin_HEXAGON_V6_vasrwh
      {Intrinsic::hexagon_V6_vasrwh_128B, 43809}, // __builtin_HEXAGON_V6_vasrwh_128B
      {Intrinsic::hexagon_V6_vasrwhrndsat, 43842}, // __builtin_HEXAGON_V6_vasrwhrndsat
      {Intrinsic::hexagon_V6_vasrwhrndsat_128B, 43876}, // __builtin_HEXAGON_V6_vasrwhrndsat_128B
      {Intrinsic::hexagon_V6_vasrwhsat, 43915}, // __builtin_HEXAGON_V6_vasrwhsat
      {Intrinsic::hexagon_V6_vasrwhsat_128B, 43946}, // __builtin_HEXAGON_V6_vasrwhsat_128B
      {Intrinsic::hexagon_V6_vasrwuhrndsat, 43982}, // __builtin_HEXAGON_V6_vasrwuhrndsat
      {Intrinsic::hexagon_V6_vasrwuhrndsat_128B, 44017}, // __builtin_HEXAGON_V6_vasrwuhrndsat_128B
      {Intrinsic::hexagon_V6_vasrwuhsat, 44057}, // __builtin_HEXAGON_V6_vasrwuhsat
      {Intrinsic::hexagon_V6_vasrwuhsat_128B, 44089}, // __builtin_HEXAGON_V6_vasrwuhsat_128B
      {Intrinsic::hexagon_V6_vasrwv, 44126}, // __builtin_HEXAGON_V6_vasrwv
      {Intrinsic::hexagon_V6_vasrwv_128B, 44154}, // __builtin_HEXAGON_V6_vasrwv_128B
      {Intrinsic::hexagon_V6_vassign, 44187}, // __builtin_HEXAGON_V6_vassign
      {Intrinsic::hexagon_V6_vassign_128B, 44216}, // __builtin_HEXAGON_V6_vassign_128B
      {Intrinsic::hexagon_V6_vassign_fp, 44250}, // __builtin_HEXAGON_V6_vassign_fp
      {Intrinsic::hexagon_V6_vassign_fp_128B, 44282}, // __builtin_HEXAGON_V6_vassign_fp_128B
      {Intrinsic::hexagon_V6_vassignp, 44319}, // __builtin_HEXAGON_V6_vassignp
      {Intrinsic::hexagon_V6_vassignp_128B, 44349}, // __builtin_HEXAGON_V6_vassignp_128B
      {Intrinsic::hexagon_V6_vavgb, 44384}, // __builtin_HEXAGON_V6_vavgb
      {Intrinsic::hexagon_V6_vavgb_128B, 44411}, // __builtin_HEXAGON_V6_vavgb_128B
      {Intrinsic::hexagon_V6_vavgbrnd, 44443}, // __builtin_HEXAGON_V6_vavgbrnd
      {Intrinsic::hexagon_V6_vavgbrnd_128B, 44473}, // __builtin_HEXAGON_V6_vavgbrnd_128B
      {Intrinsic::hexagon_V6_vavgh, 44508}, // __builtin_HEXAGON_V6_vavgh
      {Intrinsic::hexagon_V6_vavgh_128B, 44535}, // __builtin_HEXAGON_V6_vavgh_128B
      {Intrinsic::hexagon_V6_vavghrnd, 44567}, // __builtin_HEXAGON_V6_vavghrnd
      {Intrinsic::hexagon_V6_vavghrnd_128B, 44597}, // __builtin_HEXAGON_V6_vavghrnd_128B
      {Intrinsic::hexagon_V6_vavgub, 44632}, // __builtin_HEXAGON_V6_vavgub
      {Intrinsic::hexagon_V6_vavgub_128B, 44660}, // __builtin_HEXAGON_V6_vavgub_128B
      {Intrinsic::hexagon_V6_vavgubrnd, 44693}, // __builtin_HEXAGON_V6_vavgubrnd
      {Intrinsic::hexagon_V6_vavgubrnd_128B, 44724}, // __builtin_HEXAGON_V6_vavgubrnd_128B
      {Intrinsic::hexagon_V6_vavguh, 44760}, // __builtin_HEXAGON_V6_vavguh
      {Intrinsic::hexagon_V6_vavguh_128B, 44788}, // __builtin_HEXAGON_V6_vavguh_128B
      {Intrinsic::hexagon_V6_vavguhrnd, 44821}, // __builtin_HEXAGON_V6_vavguhrnd
      {Intrinsic::hexagon_V6_vavguhrnd_128B, 44852}, // __builtin_HEXAGON_V6_vavguhrnd_128B
      {Intrinsic::hexagon_V6_vavguw, 44888}, // __builtin_HEXAGON_V6_vavguw
      {Intrinsic::hexagon_V6_vavguw_128B, 44916}, // __builtin_HEXAGON_V6_vavguw_128B
      {Intrinsic::hexagon_V6_vavguwrnd, 44949}, // __builtin_HEXAGON_V6_vavguwrnd
      {Intrinsic::hexagon_V6_vavguwrnd_128B, 44980}, // __builtin_HEXAGON_V6_vavguwrnd_128B
      {Intrinsic::hexagon_V6_vavgw, 45016}, // __builtin_HEXAGON_V6_vavgw
      {Intrinsic::hexagon_V6_vavgw_128B, 45043}, // __builtin_HEXAGON_V6_vavgw_128B
      {Intrinsic::hexagon_V6_vavgwrnd, 45075}, // __builtin_HEXAGON_V6_vavgwrnd
      {Intrinsic::hexagon_V6_vavgwrnd_128B, 45105}, // __builtin_HEXAGON_V6_vavgwrnd_128B
      {Intrinsic::hexagon_V6_vcl0h, 45140}, // __builtin_HEXAGON_V6_vcl0h
      {Intrinsic::hexagon_V6_vcl0h_128B, 45167}, // __builtin_HEXAGON_V6_vcl0h_128B
      {Intrinsic::hexagon_V6_vcl0w, 45199}, // __builtin_HEXAGON_V6_vcl0w
      {Intrinsic::hexagon_V6_vcl0w_128B, 45226}, // __builtin_HEXAGON_V6_vcl0w_128B
      {Intrinsic::hexagon_V6_vcombine, 45258}, // __builtin_HEXAGON_V6_vcombine
      {Intrinsic::hexagon_V6_vcombine_128B, 45288}, // __builtin_HEXAGON_V6_vcombine_128B
      {Intrinsic::hexagon_V6_vconv_h_hf, 45323}, // __builtin_HEXAGON_V6_vconv_h_hf
      {Intrinsic::hexagon_V6_vconv_h_hf_128B, 45355}, // __builtin_HEXAGON_V6_vconv_h_hf_128B
      {Intrinsic::hexagon_V6_vconv_hf_h, 45392}, // __builtin_HEXAGON_V6_vconv_hf_h
      {Intrinsic::hexagon_V6_vconv_hf_h_128B, 45424}, // __builtin_HEXAGON_V6_vconv_hf_h_128B
      {Intrinsic::hexagon_V6_vconv_hf_qf16, 45461}, // __builtin_HEXAGON_V6_vconv_hf_qf16
      {Intrinsic::hexagon_V6_vconv_hf_qf16_128B, 45496}, // __builtin_HEXAGON_V6_vconv_hf_qf16_128B
      {Intrinsic::hexagon_V6_vconv_hf_qf32, 45536}, // __builtin_HEXAGON_V6_vconv_hf_qf32
      {Intrinsic::hexagon_V6_vconv_hf_qf32_128B, 45571}, // __builtin_HEXAGON_V6_vconv_hf_qf32_128B
      {Intrinsic::hexagon_V6_vconv_sf_qf32, 45611}, // __builtin_HEXAGON_V6_vconv_sf_qf32
      {Intrinsic::hexagon_V6_vconv_sf_qf32_128B, 45646}, // __builtin_HEXAGON_V6_vconv_sf_qf32_128B
      {Intrinsic::hexagon_V6_vconv_sf_w, 45686}, // __builtin_HEXAGON_V6_vconv_sf_w
      {Intrinsic::hexagon_V6_vconv_sf_w_128B, 45718}, // __builtin_HEXAGON_V6_vconv_sf_w_128B
      {Intrinsic::hexagon_V6_vconv_w_sf, 45755}, // __builtin_HEXAGON_V6_vconv_w_sf
      {Intrinsic::hexagon_V6_vconv_w_sf_128B, 45787}, // __builtin_HEXAGON_V6_vconv_w_sf_128B
      {Intrinsic::hexagon_V6_vcvt_b_hf, 45824}, // __builtin_HEXAGON_V6_vcvt_b_hf
      {Intrinsic::hexagon_V6_vcvt_b_hf_128B, 45855}, // __builtin_HEXAGON_V6_vcvt_b_hf_128B
      {Intrinsic::hexagon_V6_vcvt_bf_sf, 45891}, // __builtin_HEXAGON_V6_vcvt_bf_sf
      {Intrinsic::hexagon_V6_vcvt_bf_sf_128B, 45923}, // __builtin_HEXAGON_V6_vcvt_bf_sf_128B
      {Intrinsic::hexagon_V6_vcvt_h_hf, 45960}, // __builtin_HEXAGON_V6_vcvt_h_hf
      {Intrinsic::hexagon_V6_vcvt_h_hf_128B, 45991}, // __builtin_HEXAGON_V6_vcvt_h_hf_128B
      {Intrinsic::hexagon_V6_vcvt_hf_b, 46027}, // __builtin_HEXAGON_V6_vcvt_hf_b
      {Intrinsic::hexagon_V6_vcvt_hf_b_128B, 46058}, // __builtin_HEXAGON_V6_vcvt_hf_b_128B
      {Intrinsic::hexagon_V6_vcvt_hf_h, 46094}, // __builtin_HEXAGON_V6_vcvt_hf_h
      {Intrinsic::hexagon_V6_vcvt_hf_h_128B, 46125}, // __builtin_HEXAGON_V6_vcvt_hf_h_128B
      {Intrinsic::hexagon_V6_vcvt_hf_sf, 46161}, // __builtin_HEXAGON_V6_vcvt_hf_sf
      {Intrinsic::hexagon_V6_vcvt_hf_sf_128B, 46193}, // __builtin_HEXAGON_V6_vcvt_hf_sf_128B
      {Intrinsic::hexagon_V6_vcvt_hf_ub, 46230}, // __builtin_HEXAGON_V6_vcvt_hf_ub
      {Intrinsic::hexagon_V6_vcvt_hf_ub_128B, 46262}, // __builtin_HEXAGON_V6_vcvt_hf_ub_128B
      {Intrinsic::hexagon_V6_vcvt_hf_uh, 46299}, // __builtin_HEXAGON_V6_vcvt_hf_uh
      {Intrinsic::hexagon_V6_vcvt_hf_uh_128B, 46331}, // __builtin_HEXAGON_V6_vcvt_hf_uh_128B
      {Intrinsic::hexagon_V6_vcvt_sf_hf, 46368}, // __builtin_HEXAGON_V6_vcvt_sf_hf
      {Intrinsic::hexagon_V6_vcvt_sf_hf_128B, 46400}, // __builtin_HEXAGON_V6_vcvt_sf_hf_128B
      {Intrinsic::hexagon_V6_vcvt_ub_hf, 46437}, // __builtin_HEXAGON_V6_vcvt_ub_hf
      {Intrinsic::hexagon_V6_vcvt_ub_hf_128B, 46469}, // __builtin_HEXAGON_V6_vcvt_ub_hf_128B
      {Intrinsic::hexagon_V6_vcvt_uh_hf, 46506}, // __builtin_HEXAGON_V6_vcvt_uh_hf
      {Intrinsic::hexagon_V6_vcvt_uh_hf_128B, 46538}, // __builtin_HEXAGON_V6_vcvt_uh_hf_128B
      {Intrinsic::hexagon_V6_vd0, 46575}, // __builtin_HEXAGON_V6_vd0
      {Intrinsic::hexagon_V6_vd0_128B, 46600}, // __builtin_HEXAGON_V6_vd0_128B
      {Intrinsic::hexagon_V6_vdd0, 46630}, // __builtin_HEXAGON_V6_vdd0
      {Intrinsic::hexagon_V6_vdd0_128B, 46656}, // __builtin_HEXAGON_V6_vdd0_128B
      {Intrinsic::hexagon_V6_vdealb, 46687}, // __builtin_HEXAGON_V6_vdealb
      {Intrinsic::hexagon_V6_vdealb4w, 46748}, // __builtin_HEXAGON_V6_vdealb4w
      {Intrinsic::hexagon_V6_vdealb4w_128B, 46778}, // __builtin_HEXAGON_V6_vdealb4w_128B
      {Intrinsic::hexagon_V6_vdealb_128B, 46715}, // __builtin_HEXAGON_V6_vdealb_128B
      {Intrinsic::hexagon_V6_vdealh, 46813}, // __builtin_HEXAGON_V6_vdealh
      {Intrinsic::hexagon_V6_vdealh_128B, 46841}, // __builtin_HEXAGON_V6_vdealh_128B
      {Intrinsic::hexagon_V6_vdealvdd, 46874}, // __builtin_HEXAGON_V6_vdealvdd
      {Intrinsic::hexagon_V6_vdealvdd_128B, 46904}, // __builtin_HEXAGON_V6_vdealvdd_128B
      {Intrinsic::hexagon_V6_vdelta, 46939}, // __builtin_HEXAGON_V6_vdelta
      {Intrinsic::hexagon_V6_vdelta_128B, 46967}, // __builtin_HEXAGON_V6_vdelta_128B
      {Intrinsic::hexagon_V6_vdmpy_sf_hf, 47000}, // __builtin_HEXAGON_V6_vdmpy_sf_hf
      {Intrinsic::hexagon_V6_vdmpy_sf_hf_128B, 47033}, // __builtin_HEXAGON_V6_vdmpy_sf_hf_128B
      {Intrinsic::hexagon_V6_vdmpy_sf_hf_acc, 47071}, // __builtin_HEXAGON_V6_vdmpy_sf_hf_acc
      {Intrinsic::hexagon_V6_vdmpy_sf_hf_acc_128B, 47108}, // __builtin_HEXAGON_V6_vdmpy_sf_hf_acc_128B
      {Intrinsic::hexagon_V6_vdmpybus, 47150}, // __builtin_HEXAGON_V6_vdmpybus
      {Intrinsic::hexagon_V6_vdmpybus_128B, 47180}, // __builtin_HEXAGON_V6_vdmpybus_128B
      {Intrinsic::hexagon_V6_vdmpybus_acc, 47215}, // __builtin_HEXAGON_V6_vdmpybus_acc
      {Intrinsic::hexagon_V6_vdmpybus_acc_128B, 47249}, // __builtin_HEXAGON_V6_vdmpybus_acc_128B
      {Intrinsic::hexagon_V6_vdmpybus_dv, 47288}, // __builtin_HEXAGON_V6_vdmpybus_dv
      {Intrinsic::hexagon_V6_vdmpybus_dv_128B, 47321}, // __builtin_HEXAGON_V6_vdmpybus_dv_128B
      {Intrinsic::hexagon_V6_vdmpybus_dv_acc, 47359}, // __builtin_HEXAGON_V6_vdmpybus_dv_acc
      {Intrinsic::hexagon_V6_vdmpybus_dv_acc_128B, 47396}, // __builtin_HEXAGON_V6_vdmpybus_dv_acc_128B
      {Intrinsic::hexagon_V6_vdmpyhb, 47438}, // __builtin_HEXAGON_V6_vdmpyhb
      {Intrinsic::hexagon_V6_vdmpyhb_128B, 47467}, // __builtin_HEXAGON_V6_vdmpyhb_128B
      {Intrinsic::hexagon_V6_vdmpyhb_acc, 47501}, // __builtin_HEXAGON_V6_vdmpyhb_acc
      {Intrinsic::hexagon_V6_vdmpyhb_acc_128B, 47534}, // __builtin_HEXAGON_V6_vdmpyhb_acc_128B
      {Intrinsic::hexagon_V6_vdmpyhb_dv, 47572}, // __builtin_HEXAGON_V6_vdmpyhb_dv
      {Intrinsic::hexagon_V6_vdmpyhb_dv_128B, 47604}, // __builtin_HEXAGON_V6_vdmpyhb_dv_128B
      {Intrinsic::hexagon_V6_vdmpyhb_dv_acc, 47641}, // __builtin_HEXAGON_V6_vdmpyhb_dv_acc
      {Intrinsic::hexagon_V6_vdmpyhb_dv_acc_128B, 47677}, // __builtin_HEXAGON_V6_vdmpyhb_dv_acc_128B
      {Intrinsic::hexagon_V6_vdmpyhisat, 47718}, // __builtin_HEXAGON_V6_vdmpyhisat
      {Intrinsic::hexagon_V6_vdmpyhisat_128B, 47750}, // __builtin_HEXAGON_V6_vdmpyhisat_128B
      {Intrinsic::hexagon_V6_vdmpyhisat_acc, 47787}, // __builtin_HEXAGON_V6_vdmpyhisat_acc
      {Intrinsic::hexagon_V6_vdmpyhisat_acc_128B, 47823}, // __builtin_HEXAGON_V6_vdmpyhisat_acc_128B
      {Intrinsic::hexagon_V6_vdmpyhsat, 47864}, // __builtin_HEXAGON_V6_vdmpyhsat
      {Intrinsic::hexagon_V6_vdmpyhsat_128B, 47895}, // __builtin_HEXAGON_V6_vdmpyhsat_128B
      {Intrinsic::hexagon_V6_vdmpyhsat_acc, 47931}, // __builtin_HEXAGON_V6_vdmpyhsat_acc
      {Intrinsic::hexagon_V6_vdmpyhsat_acc_128B, 47966}, // __builtin_HEXAGON_V6_vdmpyhsat_acc_128B
      {Intrinsic::hexagon_V6_vdmpyhsuisat, 48006}, // __builtin_HEXAGON_V6_vdmpyhsuisat
      {Intrinsic::hexagon_V6_vdmpyhsuisat_128B, 48040}, // __builtin_HEXAGON_V6_vdmpyhsuisat_128B
      {Intrinsic::hexagon_V6_vdmpyhsuisat_acc, 48079}, // __builtin_HEXAGON_V6_vdmpyhsuisat_acc
      {Intrinsic::hexagon_V6_vdmpyhsuisat_acc_128B, 48117}, // __builtin_HEXAGON_V6_vdmpyhsuisat_acc_128B
      {Intrinsic::hexagon_V6_vdmpyhsusat, 48160}, // __builtin_HEXAGON_V6_vdmpyhsusat
      {Intrinsic::hexagon_V6_vdmpyhsusat_128B, 48193}, // __builtin_HEXAGON_V6_vdmpyhsusat_128B
      {Intrinsic::hexagon_V6_vdmpyhsusat_acc, 48231}, // __builtin_HEXAGON_V6_vdmpyhsusat_acc
      {Intrinsic::hexagon_V6_vdmpyhsusat_acc_128B, 48268}, // __builtin_HEXAGON_V6_vdmpyhsusat_acc_128B
      {Intrinsic::hexagon_V6_vdmpyhvsat, 48310}, // __builtin_HEXAGON_V6_vdmpyhvsat
      {Intrinsic::hexagon_V6_vdmpyhvsat_128B, 48342}, // __builtin_HEXAGON_V6_vdmpyhvsat_128B
      {Intrinsic::hexagon_V6_vdmpyhvsat_acc, 48379}, // __builtin_HEXAGON_V6_vdmpyhvsat_acc
      {Intrinsic::hexagon_V6_vdmpyhvsat_acc_128B, 48415}, // __builtin_HEXAGON_V6_vdmpyhvsat_acc_128B
      {Intrinsic::hexagon_V6_vdsaduh, 48456}, // __builtin_HEXAGON_V6_vdsaduh
      {Intrinsic::hexagon_V6_vdsaduh_128B, 48485}, // __builtin_HEXAGON_V6_vdsaduh_128B
      {Intrinsic::hexagon_V6_vdsaduh_acc, 48519}, // __builtin_HEXAGON_V6_vdsaduh_acc
      {Intrinsic::hexagon_V6_vdsaduh_acc_128B, 48552}, // __builtin_HEXAGON_V6_vdsaduh_acc_128B
      {Intrinsic::hexagon_V6_veqb, 48590}, // __builtin_HEXAGON_V6_veqb
      {Intrinsic::hexagon_V6_veqb_128B, 48616}, // __builtin_HEXAGON_V6_veqb_128B
      {Intrinsic::hexagon_V6_veqb_and, 48647}, // __builtin_HEXAGON_V6_veqb_and
      {Intrinsic::hexagon_V6_veqb_and_128B, 48677}, // __builtin_HEXAGON_V6_veqb_and_128B
      {Intrinsic::hexagon_V6_veqb_or, 48712}, // __builtin_HEXAGON_V6_veqb_or
      {Intrinsic::hexagon_V6_veqb_or_128B, 48741}, // __builtin_HEXAGON_V6_veqb_or_128B
      {Intrinsic::hexagon_V6_veqb_xor, 48775}, // __builtin_HEXAGON_V6_veqb_xor
      {Intrinsic::hexagon_V6_veqb_xor_128B, 48805}, // __builtin_HEXAGON_V6_veqb_xor_128B
      {Intrinsic::hexagon_V6_veqh, 48840}, // __builtin_HEXAGON_V6_veqh
      {Intrinsic::hexagon_V6_veqh_128B, 48866}, // __builtin_HEXAGON_V6_veqh_128B
      {Intrinsic::hexagon_V6_veqh_and, 48897}, // __builtin_HEXAGON_V6_veqh_and
      {Intrinsic::hexagon_V6_veqh_and_128B, 48927}, // __builtin_HEXAGON_V6_veqh_and_128B
      {Intrinsic::hexagon_V6_veqh_or, 48962}, // __builtin_HEXAGON_V6_veqh_or
      {Intrinsic::hexagon_V6_veqh_or_128B, 48991}, // __builtin_HEXAGON_V6_veqh_or_128B
      {Intrinsic::hexagon_V6_veqh_xor, 49025}, // __builtin_HEXAGON_V6_veqh_xor
      {Intrinsic::hexagon_V6_veqh_xor_128B, 49055}, // __builtin_HEXAGON_V6_veqh_xor_128B
      {Intrinsic::hexagon_V6_veqw, 49090}, // __builtin_HEXAGON_V6_veqw
      {Intrinsic::hexagon_V6_veqw_128B, 49116}, // __builtin_HEXAGON_V6_veqw_128B
      {Intrinsic::hexagon_V6_veqw_and, 49147}, // __builtin_HEXAGON_V6_veqw_and
      {Intrinsic::hexagon_V6_veqw_and_128B, 49177}, // __builtin_HEXAGON_V6_veqw_and_128B
      {Intrinsic::hexagon_V6_veqw_or, 49212}, // __builtin_HEXAGON_V6_veqw_or
      {Intrinsic::hexagon_V6_veqw_or_128B, 49241}, // __builtin_HEXAGON_V6_veqw_or_128B
      {Intrinsic::hexagon_V6_veqw_xor, 49275}, // __builtin_HEXAGON_V6_veqw_xor
      {Intrinsic::hexagon_V6_veqw_xor_128B, 49305}, // __builtin_HEXAGON_V6_veqw_xor_128B
      {Intrinsic::hexagon_V6_vfmax_hf, 49340}, // __builtin_HEXAGON_V6_vfmax_hf
      {Intrinsic::hexagon_V6_vfmax_hf_128B, 49370}, // __builtin_HEXAGON_V6_vfmax_hf_128B
      {Intrinsic::hexagon_V6_vfmax_sf, 49405}, // __builtin_HEXAGON_V6_vfmax_sf
      {Intrinsic::hexagon_V6_vfmax_sf_128B, 49435}, // __builtin_HEXAGON_V6_vfmax_sf_128B
      {Intrinsic::hexagon_V6_vfmin_hf, 49470}, // __builtin_HEXAGON_V6_vfmin_hf
      {Intrinsic::hexagon_V6_vfmin_hf_128B, 49500}, // __builtin_HEXAGON_V6_vfmin_hf_128B
      {Intrinsic::hexagon_V6_vfmin_sf, 49535}, // __builtin_HEXAGON_V6_vfmin_sf
      {Intrinsic::hexagon_V6_vfmin_sf_128B, 49565}, // __builtin_HEXAGON_V6_vfmin_sf_128B
      {Intrinsic::hexagon_V6_vfneg_hf, 49600}, // __builtin_HEXAGON_V6_vfneg_hf
      {Intrinsic::hexagon_V6_vfneg_hf_128B, 49630}, // __builtin_HEXAGON_V6_vfneg_hf_128B
      {Intrinsic::hexagon_V6_vfneg_sf, 49665}, // __builtin_HEXAGON_V6_vfneg_sf
      {Intrinsic::hexagon_V6_vfneg_sf_128B, 49695}, // __builtin_HEXAGON_V6_vfneg_sf_128B
      {Intrinsic::hexagon_V6_vgathermh, 49730}, // __builtin_HEXAGON_V6_vgathermh
      {Intrinsic::hexagon_V6_vgathermh_128B, 49761}, // __builtin_HEXAGON_V6_vgathermh_128B
      {Intrinsic::hexagon_V6_vgathermhq, 49797}, // __builtin_HEXAGON_V6_vgathermhq
      {Intrinsic::hexagon_V6_vgathermhq_128B, 49829}, // __builtin_HEXAGON_V6_vgathermhq_128B
      {Intrinsic::hexagon_V6_vgathermhw, 49866}, // __builtin_HEXAGON_V6_vgathermhw
      {Intrinsic::hexagon_V6_vgathermhw_128B, 49898}, // __builtin_HEXAGON_V6_vgathermhw_128B
      {Intrinsic::hexagon_V6_vgathermhwq, 49935}, // __builtin_HEXAGON_V6_vgathermhwq
      {Intrinsic::hexagon_V6_vgathermhwq_128B, 49968}, // __builtin_HEXAGON_V6_vgathermhwq_128B
      {Intrinsic::hexagon_V6_vgathermw, 50006}, // __builtin_HEXAGON_V6_vgathermw
      {Intrinsic::hexagon_V6_vgathermw_128B, 50037}, // __builtin_HEXAGON_V6_vgathermw_128B
      {Intrinsic::hexagon_V6_vgathermwq, 50073}, // __builtin_HEXAGON_V6_vgathermwq
      {Intrinsic::hexagon_V6_vgathermwq_128B, 50105}, // __builtin_HEXAGON_V6_vgathermwq_128B
      {Intrinsic::hexagon_V6_vgtb, 50142}, // __builtin_HEXAGON_V6_vgtb
      {Intrinsic::hexagon_V6_vgtb_128B, 50168}, // __builtin_HEXAGON_V6_vgtb_128B
      {Intrinsic::hexagon_V6_vgtb_and, 50199}, // __builtin_HEXAGON_V6_vgtb_and
      {Intrinsic::hexagon_V6_vgtb_and_128B, 50229}, // __builtin_HEXAGON_V6_vgtb_and_128B
      {Intrinsic::hexagon_V6_vgtb_or, 50264}, // __builtin_HEXAGON_V6_vgtb_or
      {Intrinsic::hexagon_V6_vgtb_or_128B, 50293}, // __builtin_HEXAGON_V6_vgtb_or_128B
      {Intrinsic::hexagon_V6_vgtb_xor, 50327}, // __builtin_HEXAGON_V6_vgtb_xor
      {Intrinsic::hexagon_V6_vgtb_xor_128B, 50357}, // __builtin_HEXAGON_V6_vgtb_xor_128B
      {Intrinsic::hexagon_V6_vgtbf, 50392}, // __builtin_HEXAGON_V6_vgtbf
      {Intrinsic::hexagon_V6_vgtbf_128B, 50419}, // __builtin_HEXAGON_V6_vgtbf_128B
      {Intrinsic::hexagon_V6_vgtbf_and, 50451}, // __builtin_HEXAGON_V6_vgtbf_and
      {Intrinsic::hexagon_V6_vgtbf_and_128B, 50482}, // __builtin_HEXAGON_V6_vgtbf_and_128B
      {Intrinsic::hexagon_V6_vgtbf_or, 50518}, // __builtin_HEXAGON_V6_vgtbf_or
      {Intrinsic::hexagon_V6_vgtbf_or_128B, 50548}, // __builtin_HEXAGON_V6_vgtbf_or_128B
      {Intrinsic::hexagon_V6_vgtbf_xor, 50583}, // __builtin_HEXAGON_V6_vgtbf_xor
      {Intrinsic::hexagon_V6_vgtbf_xor_128B, 50614}, // __builtin_HEXAGON_V6_vgtbf_xor_128B
      {Intrinsic::hexagon_V6_vgth, 50650}, // __builtin_HEXAGON_V6_vgth
      {Intrinsic::hexagon_V6_vgth_128B, 50676}, // __builtin_HEXAGON_V6_vgth_128B
      {Intrinsic::hexagon_V6_vgth_and, 50707}, // __builtin_HEXAGON_V6_vgth_and
      {Intrinsic::hexagon_V6_vgth_and_128B, 50737}, // __builtin_HEXAGON_V6_vgth_and_128B
      {Intrinsic::hexagon_V6_vgth_or, 50772}, // __builtin_HEXAGON_V6_vgth_or
      {Intrinsic::hexagon_V6_vgth_or_128B, 50801}, // __builtin_HEXAGON_V6_vgth_or_128B
      {Intrinsic::hexagon_V6_vgth_xor, 50835}, // __builtin_HEXAGON_V6_vgth_xor
      {Intrinsic::hexagon_V6_vgth_xor_128B, 50865}, // __builtin_HEXAGON_V6_vgth_xor_128B
      {Intrinsic::hexagon_V6_vgthf, 50900}, // __builtin_HEXAGON_V6_vgthf
      {Intrinsic::hexagon_V6_vgthf_128B, 50927}, // __builtin_HEXAGON_V6_vgthf_128B
      {Intrinsic::hexagon_V6_vgthf_and, 50959}, // __builtin_HEXAGON_V6_vgthf_and
      {Intrinsic::hexagon_V6_vgthf_and_128B, 50990}, // __builtin_HEXAGON_V6_vgthf_and_128B
      {Intrinsic::hexagon_V6_vgthf_or, 51026}, // __builtin_HEXAGON_V6_vgthf_or
      {Intrinsic::hexagon_V6_vgthf_or_128B, 51056}, // __builtin_HEXAGON_V6_vgthf_or_128B
      {Intrinsic::hexagon_V6_vgthf_xor, 51091}, // __builtin_HEXAGON_V6_vgthf_xor
      {Intrinsic::hexagon_V6_vgthf_xor_128B, 51122}, // __builtin_HEXAGON_V6_vgthf_xor_128B
      {Intrinsic::hexagon_V6_vgtsf, 51158}, // __builtin_HEXAGON_V6_vgtsf
      {Intrinsic::hexagon_V6_vgtsf_128B, 51185}, // __builtin_HEXAGON_V6_vgtsf_128B
      {Intrinsic::hexagon_V6_vgtsf_and, 51217}, // __builtin_HEXAGON_V6_vgtsf_and
      {Intrinsic::hexagon_V6_vgtsf_and_128B, 51248}, // __builtin_HEXAGON_V6_vgtsf_and_128B
      {Intrinsic::hexagon_V6_vgtsf_or, 51284}, // __builtin_HEXAGON_V6_vgtsf_or
      {Intrinsic::hexagon_V6_vgtsf_or_128B, 51314}, // __builtin_HEXAGON_V6_vgtsf_or_128B
      {Intrinsic::hexagon_V6_vgtsf_xor, 51349}, // __builtin_HEXAGON_V6_vgtsf_xor
      {Intrinsic::hexagon_V6_vgtsf_xor_128B, 51380}, // __builtin_HEXAGON_V6_vgtsf_xor_128B
      {Intrinsic::hexagon_V6_vgtub, 51416}, // __builtin_HEXAGON_V6_vgtub
      {Intrinsic::hexagon_V6_vgtub_128B, 51443}, // __builtin_HEXAGON_V6_vgtub_128B
      {Intrinsic::hexagon_V6_vgtub_and, 51475}, // __builtin_HEXAGON_V6_vgtub_and
      {Intrinsic::hexagon_V6_vgtub_and_128B, 51506}, // __builtin_HEXAGON_V6_vgtub_and_128B
      {Intrinsic::hexagon_V6_vgtub_or, 51542}, // __builtin_HEXAGON_V6_vgtub_or
      {Intrinsic::hexagon_V6_vgtub_or_128B, 51572}, // __builtin_HEXAGON_V6_vgtub_or_128B
      {Intrinsic::hexagon_V6_vgtub_xor, 51607}, // __builtin_HEXAGON_V6_vgtub_xor
      {Intrinsic::hexagon_V6_vgtub_xor_128B, 51638}, // __builtin_HEXAGON_V6_vgtub_xor_128B
      {Intrinsic::hexagon_V6_vgtuh, 51674}, // __builtin_HEXAGON_V6_vgtuh
      {Intrinsic::hexagon_V6_vgtuh_128B, 51701}, // __builtin_HEXAGON_V6_vgtuh_128B
      {Intrinsic::hexagon_V6_vgtuh_and, 51733}, // __builtin_HEXAGON_V6_vgtuh_and
      {Intrinsic::hexagon_V6_vgtuh_and_128B, 51764}, // __builtin_HEXAGON_V6_vgtuh_and_128B
      {Intrinsic::hexagon_V6_vgtuh_or, 51800}, // __builtin_HEXAGON_V6_vgtuh_or
      {Intrinsic::hexagon_V6_vgtuh_or_128B, 51830}, // __builtin_HEXAGON_V6_vgtuh_or_128B
      {Intrinsic::hexagon_V6_vgtuh_xor, 51865}, // __builtin_HEXAGON_V6_vgtuh_xor
      {Intrinsic::hexagon_V6_vgtuh_xor_128B, 51896}, // __builtin_HEXAGON_V6_vgtuh_xor_128B
      {Intrinsic::hexagon_V6_vgtuw, 51932}, // __builtin_HEXAGON_V6_vgtuw
      {Intrinsic::hexagon_V6_vgtuw_128B, 51959}, // __builtin_HEXAGON_V6_vgtuw_128B
      {Intrinsic::hexagon_V6_vgtuw_and, 51991}, // __builtin_HEXAGON_V6_vgtuw_and
      {Intrinsic::hexagon_V6_vgtuw_and_128B, 52022}, // __builtin_HEXAGON_V6_vgtuw_and_128B
      {Intrinsic::hexagon_V6_vgtuw_or, 52058}, // __builtin_HEXAGON_V6_vgtuw_or
      {Intrinsic::hexagon_V6_vgtuw_or_128B, 52088}, // __builtin_HEXAGON_V6_vgtuw_or_128B
      {Intrinsic::hexagon_V6_vgtuw_xor, 52123}, // __builtin_HEXAGON_V6_vgtuw_xor
      {Intrinsic::hexagon_V6_vgtuw_xor_128B, 52154}, // __builtin_HEXAGON_V6_vgtuw_xor_128B
      {Intrinsic::hexagon_V6_vgtw, 52190}, // __builtin_HEXAGON_V6_vgtw
      {Intrinsic::hexagon_V6_vgtw_128B, 52216}, // __builtin_HEXAGON_V6_vgtw_128B
      {Intrinsic::hexagon_V6_vgtw_and, 52247}, // __builtin_HEXAGON_V6_vgtw_and
      {Intrinsic::hexagon_V6_vgtw_and_128B, 52277}, // __builtin_HEXAGON_V6_vgtw_and_128B
      {Intrinsic::hexagon_V6_vgtw_or, 52312}, // __builtin_HEXAGON_V6_vgtw_or
      {Intrinsic::hexagon_V6_vgtw_or_128B, 52341}, // __builtin_HEXAGON_V6_vgtw_or_128B
      {Intrinsic::hexagon_V6_vgtw_xor, 52375}, // __builtin_HEXAGON_V6_vgtw_xor
      {Intrinsic::hexagon_V6_vgtw_xor_128B, 52405}, // __builtin_HEXAGON_V6_vgtw_xor_128B
      {Intrinsic::hexagon_V6_vinsertwr, 52440}, // __builtin_HEXAGON_V6_vinsertwr
      {Intrinsic::hexagon_V6_vinsertwr_128B, 52471}, // __builtin_HEXAGON_V6_vinsertwr_128B
      {Intrinsic::hexagon_V6_vlalignb, 52507}, // __builtin_HEXAGON_V6_vlalignb
      {Intrinsic::hexagon_V6_vlalignb_128B, 52537}, // __builtin_HEXAGON_V6_vlalignb_128B
      {Intrinsic::hexagon_V6_vlalignbi, 52572}, // __builtin_HEXAGON_V6_vlalignbi
      {Intrinsic::hexagon_V6_vlalignbi_128B, 52603}, // __builtin_HEXAGON_V6_vlalignbi_128B
      {Intrinsic::hexagon_V6_vlsrb, 52639}, // __builtin_HEXAGON_V6_vlsrb
      {Intrinsic::hexagon_V6_vlsrb_128B, 52666}, // __builtin_HEXAGON_V6_vlsrb_128B
      {Intrinsic::hexagon_V6_vlsrh, 52698}, // __builtin_HEXAGON_V6_vlsrh
      {Intrinsic::hexagon_V6_vlsrh_128B, 52725}, // __builtin_HEXAGON_V6_vlsrh_128B
      {Intrinsic::hexagon_V6_vlsrhv, 52757}, // __builtin_HEXAGON_V6_vlsrhv
      {Intrinsic::hexagon_V6_vlsrhv_128B, 52785}, // __builtin_HEXAGON_V6_vlsrhv_128B
      {Intrinsic::hexagon_V6_vlsrw, 52818}, // __builtin_HEXAGON_V6_vlsrw
      {Intrinsic::hexagon_V6_vlsrw_128B, 52845}, // __builtin_HEXAGON_V6_vlsrw_128B
      {Intrinsic::hexagon_V6_vlsrwv, 52877}, // __builtin_HEXAGON_V6_vlsrwv
      {Intrinsic::hexagon_V6_vlsrwv_128B, 52905}, // __builtin_HEXAGON_V6_vlsrwv_128B
      {Intrinsic::hexagon_V6_vlut4, 52938}, // __builtin_HEXAGON_V6_vlut4
      {Intrinsic::hexagon_V6_vlut4_128B, 52965}, // __builtin_HEXAGON_V6_vlut4_128B
      {Intrinsic::hexagon_V6_vlutvvb, 52997}, // __builtin_HEXAGON_V6_vlutvvb
      {Intrinsic::hexagon_V6_vlutvvb_128B, 53026}, // __builtin_HEXAGON_V6_vlutvvb_128B
      {Intrinsic::hexagon_V6_vlutvvb_nm, 53060}, // __builtin_HEXAGON_V6_vlutvvb_nm
      {Intrinsic::hexagon_V6_vlutvvb_nm_128B, 53092}, // __builtin_HEXAGON_V6_vlutvvb_nm_128B
      {Intrinsic::hexagon_V6_vlutvvb_oracc, 53129}, // __builtin_HEXAGON_V6_vlutvvb_oracc
      {Intrinsic::hexagon_V6_vlutvvb_oracc_128B, 53164}, // __builtin_HEXAGON_V6_vlutvvb_oracc_128B
      {Intrinsic::hexagon_V6_vlutvvb_oracci, 53204}, // __builtin_HEXAGON_V6_vlutvvb_oracci
      {Intrinsic::hexagon_V6_vlutvvb_oracci_128B, 53240}, // __builtin_HEXAGON_V6_vlutvvb_oracci_128B
      {Intrinsic::hexagon_V6_vlutvvbi, 53281}, // __builtin_HEXAGON_V6_vlutvvbi
      {Intrinsic::hexagon_V6_vlutvvbi_128B, 53311}, // __builtin_HEXAGON_V6_vlutvvbi_128B
      {Intrinsic::hexagon_V6_vlutvwh, 53346}, // __builtin_HEXAGON_V6_vlutvwh
      {Intrinsic::hexagon_V6_vlutvwh_128B, 53375}, // __builtin_HEXAGON_V6_vlutvwh_128B
      {Intrinsic::hexagon_V6_vlutvwh_nm, 53409}, // __builtin_HEXAGON_V6_vlutvwh_nm
      {Intrinsic::hexagon_V6_vlutvwh_nm_128B, 53441}, // __builtin_HEXAGON_V6_vlutvwh_nm_128B
      {Intrinsic::hexagon_V6_vlutvwh_oracc, 53478}, // __builtin_HEXAGON_V6_vlutvwh_oracc
      {Intrinsic::hexagon_V6_vlutvwh_oracc_128B, 53513}, // __builtin_HEXAGON_V6_vlutvwh_oracc_128B
      {Intrinsic::hexagon_V6_vlutvwh_oracci, 53553}, // __builtin_HEXAGON_V6_vlutvwh_oracci
      {Intrinsic::hexagon_V6_vlutvwh_oracci_128B, 53589}, // __builtin_HEXAGON_V6_vlutvwh_oracci_128B
      {Intrinsic::hexagon_V6_vlutvwhi, 53630}, // __builtin_HEXAGON_V6_vlutvwhi
      {Intrinsic::hexagon_V6_vlutvwhi_128B, 53660}, // __builtin_HEXAGON_V6_vlutvwhi_128B
      {Intrinsic::hexagon_V6_vmax_bf, 53695}, // __builtin_HEXAGON_V6_vmax_bf
      {Intrinsic::hexagon_V6_vmax_bf_128B, 53724}, // __builtin_HEXAGON_V6_vmax_bf_128B
      {Intrinsic::hexagon_V6_vmax_hf, 53758}, // __builtin_HEXAGON_V6_vmax_hf
      {Intrinsic::hexagon_V6_vmax_hf_128B, 53787}, // __builtin_HEXAGON_V6_vmax_hf_128B
      {Intrinsic::hexagon_V6_vmax_sf, 53821}, // __builtin_HEXAGON_V6_vmax_sf
      {Intrinsic::hexagon_V6_vmax_sf_128B, 53850}, // __builtin_HEXAGON_V6_vmax_sf_128B
      {Intrinsic::hexagon_V6_vmaxb, 53884}, // __builtin_HEXAGON_V6_vmaxb
      {Intrinsic::hexagon_V6_vmaxb_128B, 53911}, // __builtin_HEXAGON_V6_vmaxb_128B
      {Intrinsic::hexagon_V6_vmaxh, 53943}, // __builtin_HEXAGON_V6_vmaxh
      {Intrinsic::hexagon_V6_vmaxh_128B, 53970}, // __builtin_HEXAGON_V6_vmaxh_128B
      {Intrinsic::hexagon_V6_vmaxub, 54002}, // __builtin_HEXAGON_V6_vmaxub
      {Intrinsic::hexagon_V6_vmaxub_128B, 54030}, // __builtin_HEXAGON_V6_vmaxub_128B
      {Intrinsic::hexagon_V6_vmaxuh, 54063}, // __builtin_HEXAGON_V6_vmaxuh
      {Intrinsic::hexagon_V6_vmaxuh_128B, 54091}, // __builtin_HEXAGON_V6_vmaxuh_128B
      {Intrinsic::hexagon_V6_vmaxw, 54124}, // __builtin_HEXAGON_V6_vmaxw
      {Intrinsic::hexagon_V6_vmaxw_128B, 54151}, // __builtin_HEXAGON_V6_vmaxw_128B
      {Intrinsic::hexagon_V6_vmin_bf, 54183}, // __builtin_HEXAGON_V6_vmin_bf
      {Intrinsic::hexagon_V6_vmin_bf_128B, 54212}, // __builtin_HEXAGON_V6_vmin_bf_128B
      {Intrinsic::hexagon_V6_vmin_hf, 54246}, // __builtin_HEXAGON_V6_vmin_hf
      {Intrinsic::hexagon_V6_vmin_hf_128B, 54275}, // __builtin_HEXAGON_V6_vmin_hf_128B
      {Intrinsic::hexagon_V6_vmin_sf, 54309}, // __builtin_HEXAGON_V6_vmin_sf
      {Intrinsic::hexagon_V6_vmin_sf_128B, 54338}, // __builtin_HEXAGON_V6_vmin_sf_128B
      {Intrinsic::hexagon_V6_vminb, 54372}, // __builtin_HEXAGON_V6_vminb
      {Intrinsic::hexagon_V6_vminb_128B, 54399}, // __builtin_HEXAGON_V6_vminb_128B
      {Intrinsic::hexagon_V6_vminh, 54431}, // __builtin_HEXAGON_V6_vminh
      {Intrinsic::hexagon_V6_vminh_128B, 54458}, // __builtin_HEXAGON_V6_vminh_128B
      {Intrinsic::hexagon_V6_vminub, 54490}, // __builtin_HEXAGON_V6_vminub
      {Intrinsic::hexagon_V6_vminub_128B, 54518}, // __builtin_HEXAGON_V6_vminub_128B
      {Intrinsic::hexagon_V6_vminuh, 54551}, // __builtin_HEXAGON_V6_vminuh
      {Intrinsic::hexagon_V6_vminuh_128B, 54579}, // __builtin_HEXAGON_V6_vminuh_128B
      {Intrinsic::hexagon_V6_vminw, 54612}, // __builtin_HEXAGON_V6_vminw
      {Intrinsic::hexagon_V6_vminw_128B, 54639}, // __builtin_HEXAGON_V6_vminw_128B
      {Intrinsic::hexagon_V6_vmpabus, 54671}, // __builtin_HEXAGON_V6_vmpabus
      {Intrinsic::hexagon_V6_vmpabus_128B, 54700}, // __builtin_HEXAGON_V6_vmpabus_128B
      {Intrinsic::hexagon_V6_vmpabus_acc, 54734}, // __builtin_HEXAGON_V6_vmpabus_acc
      {Intrinsic::hexagon_V6_vmpabus_acc_128B, 54767}, // __builtin_HEXAGON_V6_vmpabus_acc_128B
      {Intrinsic::hexagon_V6_vmpabusv, 54805}, // __builtin_HEXAGON_V6_vmpabusv
      {Intrinsic::hexagon_V6_vmpabusv_128B, 54835}, // __builtin_HEXAGON_V6_vmpabusv_128B
      {Intrinsic::hexagon_V6_vmpabuu, 54870}, // __builtin_HEXAGON_V6_vmpabuu
      {Intrinsic::hexagon_V6_vmpabuu_128B, 54899}, // __builtin_HEXAGON_V6_vmpabuu_128B
      {Intrinsic::hexagon_V6_vmpabuu_acc, 54933}, // __builtin_HEXAGON_V6_vmpabuu_acc
      {Intrinsic::hexagon_V6_vmpabuu_acc_128B, 54966}, // __builtin_HEXAGON_V6_vmpabuu_acc_128B
      {Intrinsic::hexagon_V6_vmpabuuv, 55004}, // __builtin_HEXAGON_V6_vmpabuuv
      {Intrinsic::hexagon_V6_vmpabuuv_128B, 55034}, // __builtin_HEXAGON_V6_vmpabuuv_128B
      {Intrinsic::hexagon_V6_vmpahb, 55069}, // __builtin_HEXAGON_V6_vmpahb
      {Intrinsic::hexagon_V6_vmpahb_128B, 55097}, // __builtin_HEXAGON_V6_vmpahb_128B
      {Intrinsic::hexagon_V6_vmpahb_acc, 55130}, // __builtin_HEXAGON_V6_vmpahb_acc
      {Intrinsic::hexagon_V6_vmpahb_acc_128B, 55162}, // __builtin_HEXAGON_V6_vmpahb_acc_128B
      {Intrinsic::hexagon_V6_vmpahhsat, 55199}, // __builtin_HEXAGON_V6_vmpahhsat
      {Intrinsic::hexagon_V6_vmpahhsat_128B, 55230}, // __builtin_HEXAGON_V6_vmpahhsat_128B
      {Intrinsic::hexagon_V6_vmpauhb, 55266}, // __builtin_HEXAGON_V6_vmpauhb
      {Intrinsic::hexagon_V6_vmpauhb_128B, 55295}, // __builtin_HEXAGON_V6_vmpauhb_128B
      {Intrinsic::hexagon_V6_vmpauhb_acc, 55329}, // __builtin_HEXAGON_V6_vmpauhb_acc
      {Intrinsic::hexagon_V6_vmpauhb_acc_128B, 55362}, // __builtin_HEXAGON_V6_vmpauhb_acc_128B
      {Intrinsic::hexagon_V6_vmpauhuhsat, 55400}, // __builtin_HEXAGON_V6_vmpauhuhsat
      {Intrinsic::hexagon_V6_vmpauhuhsat_128B, 55433}, // __builtin_HEXAGON_V6_vmpauhuhsat_128B
      {Intrinsic::hexagon_V6_vmpsuhuhsat, 55471}, // __builtin_HEXAGON_V6_vmpsuhuhsat
      {Intrinsic::hexagon_V6_vmpsuhuhsat_128B, 55504}, // __builtin_HEXAGON_V6_vmpsuhuhsat_128B
      {Intrinsic::hexagon_V6_vmpy_hf_hf, 55542}, // __builtin_HEXAGON_V6_vmpy_hf_hf
      {Intrinsic::hexagon_V6_vmpy_hf_hf_128B, 55574}, // __builtin_HEXAGON_V6_vmpy_hf_hf_128B
      {Intrinsic::hexagon_V6_vmpy_hf_hf_acc, 55611}, // __builtin_HEXAGON_V6_vmpy_hf_hf_acc
      {Intrinsic::hexagon_V6_vmpy_hf_hf_acc_128B, 55647}, // __builtin_HEXAGON_V6_vmpy_hf_hf_acc_128B
      {Intrinsic::hexagon_V6_vmpy_qf16, 55688}, // __builtin_HEXAGON_V6_vmpy_qf16
      {Intrinsic::hexagon_V6_vmpy_qf16_128B, 55719}, // __builtin_HEXAGON_V6_vmpy_qf16_128B
      {Intrinsic::hexagon_V6_vmpy_qf16_hf, 55755}, // __builtin_HEXAGON_V6_vmpy_qf16_hf
      {Intrinsic::hexagon_V6_vmpy_qf16_hf_128B, 55789}, // __builtin_HEXAGON_V6_vmpy_qf16_hf_128B
      {Intrinsic::hexagon_V6_vmpy_qf16_mix_hf, 55828}, // __builtin_HEXAGON_V6_vmpy_qf16_mix_hf
      {Intrinsic::hexagon_V6_vmpy_qf16_mix_hf_128B, 55866}, // __builtin_HEXAGON_V6_vmpy_qf16_mix_hf_128B
      {Intrinsic::hexagon_V6_vmpy_qf32, 55909}, // __builtin_HEXAGON_V6_vmpy_qf32
      {Intrinsic::hexagon_V6_vmpy_qf32_128B, 55940}, // __builtin_HEXAGON_V6_vmpy_qf32_128B
      {Intrinsic::hexagon_V6_vmpy_qf32_hf, 55976}, // __builtin_HEXAGON_V6_vmpy_qf32_hf
      {Intrinsic::hexagon_V6_vmpy_qf32_hf_128B, 56010}, // __builtin_HEXAGON_V6_vmpy_qf32_hf_128B
      {Intrinsic::hexagon_V6_vmpy_qf32_mix_hf, 56049}, // __builtin_HEXAGON_V6_vmpy_qf32_mix_hf
      {Intrinsic::hexagon_V6_vmpy_qf32_mix_hf_128B, 56087}, // __builtin_HEXAGON_V6_vmpy_qf32_mix_hf_128B
      {Intrinsic::hexagon_V6_vmpy_qf32_qf16, 56130}, // __builtin_HEXAGON_V6_vmpy_qf32_qf16
      {Intrinsic::hexagon_V6_vmpy_qf32_qf16_128B, 56166}, // __builtin_HEXAGON_V6_vmpy_qf32_qf16_128B
      {Intrinsic::hexagon_V6_vmpy_qf32_sf, 56207}, // __builtin_HEXAGON_V6_vmpy_qf32_sf
      {Intrinsic::hexagon_V6_vmpy_qf32_sf_128B, 56241}, // __builtin_HEXAGON_V6_vmpy_qf32_sf_128B
      {Intrinsic::hexagon_V6_vmpy_sf_bf, 56280}, // __builtin_HEXAGON_V6_vmpy_sf_bf
      {Intrinsic::hexagon_V6_vmpy_sf_bf_128B, 56312}, // __builtin_HEXAGON_V6_vmpy_sf_bf_128B
      {Intrinsic::hexagon_V6_vmpy_sf_bf_acc, 56349}, // __builtin_HEXAGON_V6_vmpy_sf_bf_acc
      {Intrinsic::hexagon_V6_vmpy_sf_bf_acc_128B, 56385}, // __builtin_HEXAGON_V6_vmpy_sf_bf_acc_128B
      {Intrinsic::hexagon_V6_vmpy_sf_hf, 56426}, // __builtin_HEXAGON_V6_vmpy_sf_hf
      {Intrinsic::hexagon_V6_vmpy_sf_hf_128B, 56458}, // __builtin_HEXAGON_V6_vmpy_sf_hf_128B
      {Intrinsic::hexagon_V6_vmpy_sf_hf_acc, 56495}, // __builtin_HEXAGON_V6_vmpy_sf_hf_acc
      {Intrinsic::hexagon_V6_vmpy_sf_hf_acc_128B, 56531}, // __builtin_HEXAGON_V6_vmpy_sf_hf_acc_128B
      {Intrinsic::hexagon_V6_vmpy_sf_sf, 56572}, // __builtin_HEXAGON_V6_vmpy_sf_sf
      {Intrinsic::hexagon_V6_vmpy_sf_sf_128B, 56604}, // __builtin_HEXAGON_V6_vmpy_sf_sf_128B
      {Intrinsic::hexagon_V6_vmpybus, 56641}, // __builtin_HEXAGON_V6_vmpybus
      {Intrinsic::hexagon_V6_vmpybus_128B, 56670}, // __builtin_HEXAGON_V6_vmpybus_128B
      {Intrinsic::hexagon_V6_vmpybus_acc, 56704}, // __builtin_HEXAGON_V6_vmpybus_acc
      {Intrinsic::hexagon_V6_vmpybus_acc_128B, 56737}, // __builtin_HEXAGON_V6_vmpybus_acc_128B
      {Intrinsic::hexagon_V6_vmpybusv, 56775}, // __builtin_HEXAGON_V6_vmpybusv
      {Intrinsic::hexagon_V6_vmpybusv_128B, 56805}, // __builtin_HEXAGON_V6_vmpybusv_128B
      {Intrinsic::hexagon_V6_vmpybusv_acc, 56840}, // __builtin_HEXAGON_V6_vmpybusv_acc
      {Intrinsic::hexagon_V6_vmpybusv_acc_128B, 56874}, // __builtin_HEXAGON_V6_vmpybusv_acc_128B
      {Intrinsic::hexagon_V6_vmpybv, 56913}, // __builtin_HEXAGON_V6_vmpybv
      {Intrinsic::hexagon_V6_vmpybv_128B, 56941}, // __builtin_HEXAGON_V6_vmpybv_128B
      {Intrinsic::hexagon_V6_vmpybv_acc, 56974}, // __builtin_HEXAGON_V6_vmpybv_acc
      {Intrinsic::hexagon_V6_vmpybv_acc_128B, 57006}, // __builtin_HEXAGON_V6_vmpybv_acc_128B
      {Intrinsic::hexagon_V6_vmpyewuh, 57043}, // __builtin_HEXAGON_V6_vmpyewuh
      {Intrinsic::hexagon_V6_vmpyewuh_128B, 57073}, // __builtin_HEXAGON_V6_vmpyewuh_128B
      {Intrinsic::hexagon_V6_vmpyewuh_64, 57108}, // __builtin_HEXAGON_V6_vmpyewuh_64
      {Intrinsic::hexagon_V6_vmpyewuh_64_128B, 57141}, // __builtin_HEXAGON_V6_vmpyewuh_64_128B
      {Intrinsic::hexagon_V6_vmpyh, 57179}, // __builtin_HEXAGON_V6_vmpyh
      {Intrinsic::hexagon_V6_vmpyh_128B, 57206}, // __builtin_HEXAGON_V6_vmpyh_128B
      {Intrinsic::hexagon_V6_vmpyh_acc, 57238}, // __builtin_HEXAGON_V6_vmpyh_acc
      {Intrinsic::hexagon_V6_vmpyh_acc_128B, 57269}, // __builtin_HEXAGON_V6_vmpyh_acc_128B
      {Intrinsic::hexagon_V6_vmpyhsat_acc, 57305}, // __builtin_HEXAGON_V6_vmpyhsat_acc
      {Intrinsic::hexagon_V6_vmpyhsat_acc_128B, 57339}, // __builtin_HEXAGON_V6_vmpyhsat_acc_128B
      {Intrinsic::hexagon_V6_vmpyhsrs, 57378}, // __builtin_HEXAGON_V6_vmpyhsrs
      {Intrinsic::hexagon_V6_vmpyhsrs_128B, 57408}, // __builtin_HEXAGON_V6_vmpyhsrs_128B
      {Intrinsic::hexagon_V6_vmpyhss, 57443}, // __builtin_HEXAGON_V6_vmpyhss
      {Intrinsic::hexagon_V6_vmpyhss_128B, 57472}, // __builtin_HEXAGON_V6_vmpyhss_128B
      {Intrinsic::hexagon_V6_vmpyhus, 57506}, // __builtin_HEXAGON_V6_vmpyhus
      {Intrinsic::hexagon_V6_vmpyhus_128B, 57535}, // __builtin_HEXAGON_V6_vmpyhus_128B
      {Intrinsic::hexagon_V6_vmpyhus_acc, 57569}, // __builtin_HEXAGON_V6_vmpyhus_acc
      {Intrinsic::hexagon_V6_vmpyhus_acc_128B, 57602}, // __builtin_HEXAGON_V6_vmpyhus_acc_128B
      {Intrinsic::hexagon_V6_vmpyhv, 57640}, // __builtin_HEXAGON_V6_vmpyhv
      {Intrinsic::hexagon_V6_vmpyhv_128B, 57668}, // __builtin_HEXAGON_V6_vmpyhv_128B
      {Intrinsic::hexagon_V6_vmpyhv_acc, 57701}, // __builtin_HEXAGON_V6_vmpyhv_acc
      {Intrinsic::hexagon_V6_vmpyhv_acc_128B, 57733}, // __builtin_HEXAGON_V6_vmpyhv_acc_128B
      {Intrinsic::hexagon_V6_vmpyhvsrs, 57770}, // __builtin_HEXAGON_V6_vmpyhvsrs
      {Intrinsic::hexagon_V6_vmpyhvsrs_128B, 57801}, // __builtin_HEXAGON_V6_vmpyhvsrs_128B
      {Intrinsic::hexagon_V6_vmpyieoh, 57837}, // __builtin_HEXAGON_V6_vmpyieoh
      {Intrinsic::hexagon_V6_vmpyieoh_128B, 57867}, // __builtin_HEXAGON_V6_vmpyieoh_128B
      {Intrinsic::hexagon_V6_vmpyiewh_acc, 57902}, // __builtin_HEXAGON_V6_vmpyiewh_acc
      {Intrinsic::hexagon_V6_vmpyiewh_acc_128B, 57936}, // __builtin_HEXAGON_V6_vmpyiewh_acc_128B
      {Intrinsic::hexagon_V6_vmpyiewuh, 57975}, // __builtin_HEXAGON_V6_vmpyiewuh
      {Intrinsic::hexagon_V6_vmpyiewuh_128B, 58006}, // __builtin_HEXAGON_V6_vmpyiewuh_128B
      {Intrinsic::hexagon_V6_vmpyiewuh_acc, 58042}, // __builtin_HEXAGON_V6_vmpyiewuh_acc
      {Intrinsic::hexagon_V6_vmpyiewuh_acc_128B, 58077}, // __builtin_HEXAGON_V6_vmpyiewuh_acc_128B
      {Intrinsic::hexagon_V6_vmpyih, 58117}, // __builtin_HEXAGON_V6_vmpyih
      {Intrinsic::hexagon_V6_vmpyih_128B, 58145}, // __builtin_HEXAGON_V6_vmpyih_128B
      {Intrinsic::hexagon_V6_vmpyih_acc, 58178}, // __builtin_HEXAGON_V6_vmpyih_acc
      {Intrinsic::hexagon_V6_vmpyih_acc_128B, 58210}, // __builtin_HEXAGON_V6_vmpyih_acc_128B
      {Intrinsic::hexagon_V6_vmpyihb, 58247}, // __builtin_HEXAGON_V6_vmpyihb
      {Intrinsic::hexagon_V6_vmpyihb_128B, 58276}, // __builtin_HEXAGON_V6_vmpyihb_128B
      {Intrinsic::hexagon_V6_vmpyihb_acc, 58310}, // __builtin_HEXAGON_V6_vmpyihb_acc
      {Intrinsic::hexagon_V6_vmpyihb_acc_128B, 58343}, // __builtin_HEXAGON_V6_vmpyihb_acc_128B
      {Intrinsic::hexagon_V6_vmpyiowh, 58381}, // __builtin_HEXAGON_V6_vmpyiowh
      {Intrinsic::hexagon_V6_vmpyiowh_128B, 58411}, // __builtin_HEXAGON_V6_vmpyiowh_128B
      {Intrinsic::hexagon_V6_vmpyiwb, 58446}, // __builtin_HEXAGON_V6_vmpyiwb
      {Intrinsic::hexagon_V6_vmpyiwb_128B, 58475}, // __builtin_HEXAGON_V6_vmpyiwb_128B
      {Intrinsic::hexagon_V6_vmpyiwb_acc, 58509}, // __builtin_HEXAGON_V6_vmpyiwb_acc
      {Intrinsic::hexagon_V6_vmpyiwb_acc_128B, 58542}, // __builtin_HEXAGON_V6_vmpyiwb_acc_128B
      {Intrinsic::hexagon_V6_vmpyiwh, 58580}, // __builtin_HEXAGON_V6_vmpyiwh
      {Intrinsic::hexagon_V6_vmpyiwh_128B, 58609}, // __builtin_HEXAGON_V6_vmpyiwh_128B
      {Intrinsic::hexagon_V6_vmpyiwh_acc, 58643}, // __builtin_HEXAGON_V6_vmpyiwh_acc
      {Intrinsic::hexagon_V6_vmpyiwh_acc_128B, 58676}, // __builtin_HEXAGON_V6_vmpyiwh_acc_128B
      {Intrinsic::hexagon_V6_vmpyiwub, 58714}, // __builtin_HEXAGON_V6_vmpyiwub
      {Intrinsic::hexagon_V6_vmpyiwub_128B, 58744}, // __builtin_HEXAGON_V6_vmpyiwub_128B
      {Intrinsic::hexagon_V6_vmpyiwub_acc, 58779}, // __builtin_HEXAGON_V6_vmpyiwub_acc
      {Intrinsic::hexagon_V6_vmpyiwub_acc_128B, 58813}, // __builtin_HEXAGON_V6_vmpyiwub_acc_128B
      {Intrinsic::hexagon_V6_vmpyowh, 58852}, // __builtin_HEXAGON_V6_vmpyowh
      {Intrinsic::hexagon_V6_vmpyowh_128B, 58881}, // __builtin_HEXAGON_V6_vmpyowh_128B
      {Intrinsic::hexagon_V6_vmpyowh_64_acc, 58915}, // __builtin_HEXAGON_V6_vmpyowh_64_acc
      {Intrinsic::hexagon_V6_vmpyowh_64_acc_128B, 58951}, // __builtin_HEXAGON_V6_vmpyowh_64_acc_128B
      {Intrinsic::hexagon_V6_vmpyowh_rnd, 58992}, // __builtin_HEXAGON_V6_vmpyowh_rnd
      {Intrinsic::hexagon_V6_vmpyowh_rnd_128B, 59025}, // __builtin_HEXAGON_V6_vmpyowh_rnd_128B
      {Intrinsic::hexagon_V6_vmpyowh_rnd_sacc, 59063}, // __builtin_HEXAGON_V6_vmpyowh_rnd_sacc
      {Intrinsic::hexagon_V6_vmpyowh_rnd_sacc_128B, 59101}, // __builtin_HEXAGON_V6_vmpyowh_rnd_sacc_128B
      {Intrinsic::hexagon_V6_vmpyowh_sacc, 59144}, // __builtin_HEXAGON_V6_vmpyowh_sacc
      {Intrinsic::hexagon_V6_vmpyowh_sacc_128B, 59178}, // __builtin_HEXAGON_V6_vmpyowh_sacc_128B
      {Intrinsic::hexagon_V6_vmpyub, 59217}, // __builtin_HEXAGON_V6_vmpyub
      {Intrinsic::hexagon_V6_vmpyub_128B, 59245}, // __builtin_HEXAGON_V6_vmpyub_128B
      {Intrinsic::hexagon_V6_vmpyub_acc, 59278}, // __builtin_HEXAGON_V6_vmpyub_acc
      {Intrinsic::hexagon_V6_vmpyub_acc_128B, 59310}, // __builtin_HEXAGON_V6_vmpyub_acc_128B
      {Intrinsic::hexagon_V6_vmpyubv, 59347}, // __builtin_HEXAGON_V6_vmpyubv
      {Intrinsic::hexagon_V6_vmpyubv_128B, 59376}, // __builtin_HEXAGON_V6_vmpyubv_128B
      {Intrinsic::hexagon_V6_vmpyubv_acc, 59410}, // __builtin_HEXAGON_V6_vmpyubv_acc
      {Intrinsic::hexagon_V6_vmpyubv_acc_128B, 59443}, // __builtin_HEXAGON_V6_vmpyubv_acc_128B
      {Intrinsic::hexagon_V6_vmpyuh, 59481}, // __builtin_HEXAGON_V6_vmpyuh
      {Intrinsic::hexagon_V6_vmpyuh_128B, 59509}, // __builtin_HEXAGON_V6_vmpyuh_128B
      {Intrinsic::hexagon_V6_vmpyuh_acc, 59542}, // __builtin_HEXAGON_V6_vmpyuh_acc
      {Intrinsic::hexagon_V6_vmpyuh_acc_128B, 59574}, // __builtin_HEXAGON_V6_vmpyuh_acc_128B
      {Intrinsic::hexagon_V6_vmpyuhe, 59611}, // __builtin_HEXAGON_V6_vmpyuhe
      {Intrinsic::hexagon_V6_vmpyuhe_128B, 59640}, // __builtin_HEXAGON_V6_vmpyuhe_128B
      {Intrinsic::hexagon_V6_vmpyuhe_acc, 59674}, // __builtin_HEXAGON_V6_vmpyuhe_acc
      {Intrinsic::hexagon_V6_vmpyuhe_acc_128B, 59707}, // __builtin_HEXAGON_V6_vmpyuhe_acc_128B
      {Intrinsic::hexagon_V6_vmpyuhv, 59745}, // __builtin_HEXAGON_V6_vmpyuhv
      {Intrinsic::hexagon_V6_vmpyuhv_128B, 59774}, // __builtin_HEXAGON_V6_vmpyuhv_128B
      {Intrinsic::hexagon_V6_vmpyuhv_acc, 59808}, // __builtin_HEXAGON_V6_vmpyuhv_acc
      {Intrinsic::hexagon_V6_vmpyuhv_acc_128B, 59841}, // __builtin_HEXAGON_V6_vmpyuhv_acc_128B
      {Intrinsic::hexagon_V6_vmpyuhvs, 59879}, // __builtin_HEXAGON_V6_vmpyuhvs
      {Intrinsic::hexagon_V6_vmpyuhvs_128B, 59909}, // __builtin_HEXAGON_V6_vmpyuhvs_128B
      {Intrinsic::hexagon_V6_vmux, 59944}, // __builtin_HEXAGON_V6_vmux
      {Intrinsic::hexagon_V6_vmux_128B, 59970}, // __builtin_HEXAGON_V6_vmux_128B
      {Intrinsic::hexagon_V6_vnavgb, 60001}, // __builtin_HEXAGON_V6_vnavgb
      {Intrinsic::hexagon_V6_vnavgb_128B, 60029}, // __builtin_HEXAGON_V6_vnavgb_128B
      {Intrinsic::hexagon_V6_vnavgh, 60062}, // __builtin_HEXAGON_V6_vnavgh
      {Intrinsic::hexagon_V6_vnavgh_128B, 60090}, // __builtin_HEXAGON_V6_vnavgh_128B
      {Intrinsic::hexagon_V6_vnavgub, 60123}, // __builtin_HEXAGON_V6_vnavgub
      {Intrinsic::hexagon_V6_vnavgub_128B, 60152}, // __builtin_HEXAGON_V6_vnavgub_128B
      {Intrinsic::hexagon_V6_vnavgw, 60186}, // __builtin_HEXAGON_V6_vnavgw
      {Intrinsic::hexagon_V6_vnavgw_128B, 60214}, // __builtin_HEXAGON_V6_vnavgw_128B
      {Intrinsic::hexagon_V6_vnormamth, 60247}, // __builtin_HEXAGON_V6_vnormamth
      {Intrinsic::hexagon_V6_vnormamth_128B, 60278}, // __builtin_HEXAGON_V6_vnormamth_128B
      {Intrinsic::hexagon_V6_vnormamtw, 60314}, // __builtin_HEXAGON_V6_vnormamtw
      {Intrinsic::hexagon_V6_vnormamtw_128B, 60345}, // __builtin_HEXAGON_V6_vnormamtw_128B
      {Intrinsic::hexagon_V6_vnot, 60381}, // __builtin_HEXAGON_V6_vnot
      {Intrinsic::hexagon_V6_vnot_128B, 60407}, // __builtin_HEXAGON_V6_vnot_128B
      {Intrinsic::hexagon_V6_vor, 60438}, // __builtin_HEXAGON_V6_vor
      {Intrinsic::hexagon_V6_vor_128B, 60463}, // __builtin_HEXAGON_V6_vor_128B
      {Intrinsic::hexagon_V6_vpackeb, 60493}, // __builtin_HEXAGON_V6_vpackeb
      {Intrinsic::hexagon_V6_vpackeb_128B, 60522}, // __builtin_HEXAGON_V6_vpackeb_128B
      {Intrinsic::hexagon_V6_vpackeh, 60556}, // __builtin_HEXAGON_V6_vpackeh
      {Intrinsic::hexagon_V6_vpackeh_128B, 60585}, // __builtin_HEXAGON_V6_vpackeh_128B
      {Intrinsic::hexagon_V6_vpackhb_sat, 60619}, // __builtin_HEXAGON_V6_vpackhb_sat
      {Intrinsic::hexagon_V6_vpackhb_sat_128B, 60652}, // __builtin_HEXAGON_V6_vpackhb_sat_128B
      {Intrinsic::hexagon_V6_vpackhub_sat, 60690}, // __builtin_HEXAGON_V6_vpackhub_sat
      {Intrinsic::hexagon_V6_vpackhub_sat_128B, 60724}, // __builtin_HEXAGON_V6_vpackhub_sat_128B
      {Intrinsic::hexagon_V6_vpackob, 60763}, // __builtin_HEXAGON_V6_vpackob
      {Intrinsic::hexagon_V6_vpackob_128B, 60792}, // __builtin_HEXAGON_V6_vpackob_128B
      {Intrinsic::hexagon_V6_vpackoh, 60826}, // __builtin_HEXAGON_V6_vpackoh
      {Intrinsic::hexagon_V6_vpackoh_128B, 60855}, // __builtin_HEXAGON_V6_vpackoh_128B
      {Intrinsic::hexagon_V6_vpackwh_sat, 60889}, // __builtin_HEXAGON_V6_vpackwh_sat
      {Intrinsic::hexagon_V6_vpackwh_sat_128B, 60922}, // __builtin_HEXAGON_V6_vpackwh_sat_128B
      {Intrinsic::hexagon_V6_vpackwuh_sat, 60960}, // __builtin_HEXAGON_V6_vpackwuh_sat
      {Intrinsic::hexagon_V6_vpackwuh_sat_128B, 60994}, // __builtin_HEXAGON_V6_vpackwuh_sat_128B
      {Intrinsic::hexagon_V6_vpopcounth, 61033}, // __builtin_HEXAGON_V6_vpopcounth
      {Intrinsic::hexagon_V6_vpopcounth_128B, 61065}, // __builtin_HEXAGON_V6_vpopcounth_128B
      {Intrinsic::hexagon_V6_vprefixqb, 61102}, // __builtin_HEXAGON_V6_vprefixqb
      {Intrinsic::hexagon_V6_vprefixqb_128B, 61133}, // __builtin_HEXAGON_V6_vprefixqb_128B
      {Intrinsic::hexagon_V6_vprefixqh, 61169}, // __builtin_HEXAGON_V6_vprefixqh
      {Intrinsic::hexagon_V6_vprefixqh_128B, 61200}, // __builtin_HEXAGON_V6_vprefixqh_128B
      {Intrinsic::hexagon_V6_vprefixqw, 61236}, // __builtin_HEXAGON_V6_vprefixqw
      {Intrinsic::hexagon_V6_vprefixqw_128B, 61267}, // __builtin_HEXAGON_V6_vprefixqw_128B
      {Intrinsic::hexagon_V6_vrdelta, 61303}, // __builtin_HEXAGON_V6_vrdelta
      {Intrinsic::hexagon_V6_vrdelta_128B, 61332}, // __builtin_HEXAGON_V6_vrdelta_128B
      {Intrinsic::hexagon_V6_vrmpybub_rtt, 61366}, // __builtin_HEXAGON_V6_vrmpybub_rtt
      {Intrinsic::hexagon_V6_vrmpybub_rtt_128B, 61400}, // __builtin_HEXAGON_V6_vrmpybub_rtt_128B
      {Intrinsic::hexagon_V6_vrmpybub_rtt_acc, 61439}, // __builtin_HEXAGON_V6_vrmpybub_rtt_acc
      {Intrinsic::hexagon_V6_vrmpybub_rtt_acc_128B, 61477}, // __builtin_HEXAGON_V6_vrmpybub_rtt_acc_128B
      {Intrinsic::hexagon_V6_vrmpybus, 61520}, // __builtin_HEXAGON_V6_vrmpybus
      {Intrinsic::hexagon_V6_vrmpybus_128B, 61550}, // __builtin_HEXAGON_V6_vrmpybus_128B
      {Intrinsic::hexagon_V6_vrmpybus_acc, 61585}, // __builtin_HEXAGON_V6_vrmpybus_acc
      {Intrinsic::hexagon_V6_vrmpybus_acc_128B, 61619}, // __builtin_HEXAGON_V6_vrmpybus_acc_128B
      {Intrinsic::hexagon_V6_vrmpybusi, 61658}, // __builtin_HEXAGON_V6_vrmpybusi
      {Intrinsic::hexagon_V6_vrmpybusi_128B, 61689}, // __builtin_HEXAGON_V6_vrmpybusi_128B
      {Intrinsic::hexagon_V6_vrmpybusi_acc, 61725}, // __builtin_HEXAGON_V6_vrmpybusi_acc
      {Intrinsic::hexagon_V6_vrmpybusi_acc_128B, 61760}, // __builtin_HEXAGON_V6_vrmpybusi_acc_128B
      {Intrinsic::hexagon_V6_vrmpybusv, 61800}, // __builtin_HEXAGON_V6_vrmpybusv
      {Intrinsic::hexagon_V6_vrmpybusv_128B, 61831}, // __builtin_HEXAGON_V6_vrmpybusv_128B
      {Intrinsic::hexagon_V6_vrmpybusv_acc, 61867}, // __builtin_HEXAGON_V6_vrmpybusv_acc
      {Intrinsic::hexagon_V6_vrmpybusv_acc_128B, 61902}, // __builtin_HEXAGON_V6_vrmpybusv_acc_128B
      {Intrinsic::hexagon_V6_vrmpybv, 61942}, // __builtin_HEXAGON_V6_vrmpybv
      {Intrinsic::hexagon_V6_vrmpybv_128B, 61971}, // __builtin_HEXAGON_V6_vrmpybv_128B
      {Intrinsic::hexagon_V6_vrmpybv_acc, 62005}, // __builtin_HEXAGON_V6_vrmpybv_acc
      {Intrinsic::hexagon_V6_vrmpybv_acc_128B, 62038}, // __builtin_HEXAGON_V6_vrmpybv_acc_128B
      {Intrinsic::hexagon_V6_vrmpyub, 62076}, // __builtin_HEXAGON_V6_vrmpyub
      {Intrinsic::hexagon_V6_vrmpyub_128B, 62105}, // __builtin_HEXAGON_V6_vrmpyub_128B
      {Intrinsic::hexagon_V6_vrmpyub_acc, 62139}, // __builtin_HEXAGON_V6_vrmpyub_acc
      {Intrinsic::hexagon_V6_vrmpyub_acc_128B, 62172}, // __builtin_HEXAGON_V6_vrmpyub_acc_128B
      {Intrinsic::hexagon_V6_vrmpyub_rtt, 62210}, // __builtin_HEXAGON_V6_vrmpyub_rtt
      {Intrinsic::hexagon_V6_vrmpyub_rtt_128B, 62243}, // __builtin_HEXAGON_V6_vrmpyub_rtt_128B
      {Intrinsic::hexagon_V6_vrmpyub_rtt_acc, 62281}, // __builtin_HEXAGON_V6_vrmpyub_rtt_acc
      {Intrinsic::hexagon_V6_vrmpyub_rtt_acc_128B, 62318}, // __builtin_HEXAGON_V6_vrmpyub_rtt_acc_128B
      {Intrinsic::hexagon_V6_vrmpyubi, 62360}, // __builtin_HEXAGON_V6_vrmpyubi
      {Intrinsic::hexagon_V6_vrmpyubi_128B, 62390}, // __builtin_HEXAGON_V6_vrmpyubi_128B
      {Intrinsic::hexagon_V6_vrmpyubi_acc, 62425}, // __builtin_HEXAGON_V6_vrmpyubi_acc
      {Intrinsic::hexagon_V6_vrmpyubi_acc_128B, 62459}, // __builtin_HEXAGON_V6_vrmpyubi_acc_128B
      {Intrinsic::hexagon_V6_vrmpyubv, 62498}, // __builtin_HEXAGON_V6_vrmpyubv
      {Intrinsic::hexagon_V6_vrmpyubv_128B, 62528}, // __builtin_HEXAGON_V6_vrmpyubv_128B
      {Intrinsic::hexagon_V6_vrmpyubv_acc, 62563}, // __builtin_HEXAGON_V6_vrmpyubv_acc
      {Intrinsic::hexagon_V6_vrmpyubv_acc_128B, 62597}, // __builtin_HEXAGON_V6_vrmpyubv_acc_128B
      {Intrinsic::hexagon_V6_vror, 62636}, // __builtin_HEXAGON_V6_vror
      {Intrinsic::hexagon_V6_vror_128B, 62662}, // __builtin_HEXAGON_V6_vror_128B
      {Intrinsic::hexagon_V6_vrotr, 62693}, // __builtin_HEXAGON_V6_vrotr
      {Intrinsic::hexagon_V6_vrotr_128B, 62720}, // __builtin_HEXAGON_V6_vrotr_128B
      {Intrinsic::hexagon_V6_vroundhb, 62752}, // __builtin_HEXAGON_V6_vroundhb
      {Intrinsic::hexagon_V6_vroundhb_128B, 62782}, // __builtin_HEXAGON_V6_vroundhb_128B
      {Intrinsic::hexagon_V6_vroundhub, 62817}, // __builtin_HEXAGON_V6_vroundhub
      {Intrinsic::hexagon_V6_vroundhub_128B, 62848}, // __builtin_HEXAGON_V6_vroundhub_128B
      {Intrinsic::hexagon_V6_vrounduhub, 62884}, // __builtin_HEXAGON_V6_vrounduhub
      {Intrinsic::hexagon_V6_vrounduhub_128B, 62916}, // __builtin_HEXAGON_V6_vrounduhub_128B
      {Intrinsic::hexagon_V6_vrounduwuh, 62953}, // __builtin_HEXAGON_V6_vrounduwuh
      {Intrinsic::hexagon_V6_vrounduwuh_128B, 62985}, // __builtin_HEXAGON_V6_vrounduwuh_128B
      {Intrinsic::hexagon_V6_vroundwh, 63022}, // __builtin_HEXAGON_V6_vroundwh
      {Intrinsic::hexagon_V6_vroundwh_128B, 63052}, // __builtin_HEXAGON_V6_vroundwh_128B
      {Intrinsic::hexagon_V6_vroundwuh, 63087}, // __builtin_HEXAGON_V6_vroundwuh
      {Intrinsic::hexagon_V6_vroundwuh_128B, 63118}, // __builtin_HEXAGON_V6_vroundwuh_128B
      {Intrinsic::hexagon_V6_vrsadubi, 63154}, // __builtin_HEXAGON_V6_vrsadubi
      {Intrinsic::hexagon_V6_vrsadubi_128B, 63184}, // __builtin_HEXAGON_V6_vrsadubi_128B
      {Intrinsic::hexagon_V6_vrsadubi_acc, 63219}, // __builtin_HEXAGON_V6_vrsadubi_acc
      {Intrinsic::hexagon_V6_vrsadubi_acc_128B, 63253}, // __builtin_HEXAGON_V6_vrsadubi_acc_128B
      {Intrinsic::hexagon_V6_vsatdw, 63292}, // __builtin_HEXAGON_V6_vsatdw
      {Intrinsic::hexagon_V6_vsatdw_128B, 63320}, // __builtin_HEXAGON_V6_vsatdw_128B
      {Intrinsic::hexagon_V6_vsathub, 63353}, // __builtin_HEXAGON_V6_vsathub
      {Intrinsic::hexagon_V6_vsathub_128B, 63382}, // __builtin_HEXAGON_V6_vsathub_128B
      {Intrinsic::hexagon_V6_vsatuwuh, 63416}, // __builtin_HEXAGON_V6_vsatuwuh
      {Intrinsic::hexagon_V6_vsatuwuh_128B, 63446}, // __builtin_HEXAGON_V6_vsatuwuh_128B
      {Intrinsic::hexagon_V6_vsatwh, 63481}, // __builtin_HEXAGON_V6_vsatwh
      {Intrinsic::hexagon_V6_vsatwh_128B, 63509}, // __builtin_HEXAGON_V6_vsatwh_128B
      {Intrinsic::hexagon_V6_vsb, 63542}, // __builtin_HEXAGON_V6_vsb
      {Intrinsic::hexagon_V6_vsb_128B, 63567}, // __builtin_HEXAGON_V6_vsb_128B
      {Intrinsic::hexagon_V6_vscattermh, 63597}, // __builtin_HEXAGON_V6_vscattermh
      {Intrinsic::hexagon_V6_vscattermh_128B, 63629}, // __builtin_HEXAGON_V6_vscattermh_128B
      {Intrinsic::hexagon_V6_vscattermh_add, 63666}, // __builtin_HEXAGON_V6_vscattermh_add
      {Intrinsic::hexagon_V6_vscattermh_add_128B, 63702}, // __builtin_HEXAGON_V6_vscattermh_add_128B
      {Intrinsic::hexagon_V6_vscattermhq, 63743}, // __builtin_HEXAGON_V6_vscattermhq
      {Intrinsic::hexagon_V6_vscattermhq_128B, 63776}, // __builtin_HEXAGON_V6_vscattermhq_128B
      {Intrinsic::hexagon_V6_vscattermhw, 63814}, // __builtin_HEXAGON_V6_vscattermhw
      {Intrinsic::hexagon_V6_vscattermhw_128B, 63847}, // __builtin_HEXAGON_V6_vscattermhw_128B
      {Intrinsic::hexagon_V6_vscattermhw_add, 63885}, // __builtin_HEXAGON_V6_vscattermhw_add
      {Intrinsic::hexagon_V6_vscattermhw_add_128B, 63922}, // __builtin_HEXAGON_V6_vscattermhw_add_128B
      {Intrinsic::hexagon_V6_vscattermhwq, 63964}, // __builtin_HEXAGON_V6_vscattermhwq
      {Intrinsic::hexagon_V6_vscattermhwq_128B, 63998}, // __builtin_HEXAGON_V6_vscattermhwq_128B
      {Intrinsic::hexagon_V6_vscattermw, 64037}, // __builtin_HEXAGON_V6_vscattermw
      {Intrinsic::hexagon_V6_vscattermw_128B, 64069}, // __builtin_HEXAGON_V6_vscattermw_128B
      {Intrinsic::hexagon_V6_vscattermw_add, 64106}, // __builtin_HEXAGON_V6_vscattermw_add
      {Intrinsic::hexagon_V6_vscattermw_add_128B, 64142}, // __builtin_HEXAGON_V6_vscattermw_add_128B
      {Intrinsic::hexagon_V6_vscattermwq, 64183}, // __builtin_HEXAGON_V6_vscattermwq
      {Intrinsic::hexagon_V6_vscattermwq_128B, 64216}, // __builtin_HEXAGON_V6_vscattermwq_128B
      {Intrinsic::hexagon_V6_vsh, 64254}, // __builtin_HEXAGON_V6_vsh
      {Intrinsic::hexagon_V6_vsh_128B, 64279}, // __builtin_HEXAGON_V6_vsh_128B
      {Intrinsic::hexagon_V6_vshufeh, 64309}, // __builtin_HEXAGON_V6_vshufeh
      {Intrinsic::hexagon_V6_vshufeh_128B, 64338}, // __builtin_HEXAGON_V6_vshufeh_128B
      {Intrinsic::hexagon_V6_vshuffb, 64372}, // __builtin_HEXAGON_V6_vshuffb
      {Intrinsic::hexagon_V6_vshuffb_128B, 64401}, // __builtin_HEXAGON_V6_vshuffb_128B
      {Intrinsic::hexagon_V6_vshuffeb, 64435}, // __builtin_HEXAGON_V6_vshuffeb
      {Intrinsic::hexagon_V6_vshuffeb_128B, 64465}, // __builtin_HEXAGON_V6_vshuffeb_128B
      {Intrinsic::hexagon_V6_vshuffh, 64500}, // __builtin_HEXAGON_V6_vshuffh
      {Intrinsic::hexagon_V6_vshuffh_128B, 64529}, // __builtin_HEXAGON_V6_vshuffh_128B
      {Intrinsic::hexagon_V6_vshuffob, 64563}, // __builtin_HEXAGON_V6_vshuffob
      {Intrinsic::hexagon_V6_vshuffob_128B, 64593}, // __builtin_HEXAGON_V6_vshuffob_128B
      {Intrinsic::hexagon_V6_vshuffvdd, 64628}, // __builtin_HEXAGON_V6_vshuffvdd
      {Intrinsic::hexagon_V6_vshuffvdd_128B, 64659}, // __builtin_HEXAGON_V6_vshuffvdd_128B
      {Intrinsic::hexagon_V6_vshufoeb, 64695}, // __builtin_HEXAGON_V6_vshufoeb
      {Intrinsic::hexagon_V6_vshufoeb_128B, 64725}, // __builtin_HEXAGON_V6_vshufoeb_128B
      {Intrinsic::hexagon_V6_vshufoeh, 64760}, // __builtin_HEXAGON_V6_vshufoeh
      {Intrinsic::hexagon_V6_vshufoeh_128B, 64790}, // __builtin_HEXAGON_V6_vshufoeh_128B
      {Intrinsic::hexagon_V6_vshufoh, 64825}, // __builtin_HEXAGON_V6_vshufoh
      {Intrinsic::hexagon_V6_vshufoh_128B, 64854}, // __builtin_HEXAGON_V6_vshufoh_128B
      {Intrinsic::hexagon_V6_vsub_hf, 64888}, // __builtin_HEXAGON_V6_vsub_hf
      {Intrinsic::hexagon_V6_vsub_hf_128B, 64917}, // __builtin_HEXAGON_V6_vsub_hf_128B
      {Intrinsic::hexagon_V6_vsub_hf_hf, 64951}, // __builtin_HEXAGON_V6_vsub_hf_hf
      {Intrinsic::hexagon_V6_vsub_hf_hf_128B, 64983}, // __builtin_HEXAGON_V6_vsub_hf_hf_128B
      {Intrinsic::hexagon_V6_vsub_qf16, 65020}, // __builtin_HEXAGON_V6_vsub_qf16
      {Intrinsic::hexagon_V6_vsub_qf16_128B, 65051}, // __builtin_HEXAGON_V6_vsub_qf16_128B
      {Intrinsic::hexagon_V6_vsub_qf16_mix, 65087}, // __builtin_HEXAGON_V6_vsub_qf16_mix
      {Intrinsic::hexagon_V6_vsub_qf16_mix_128B, 65122}, // __builtin_HEXAGON_V6_vsub_qf16_mix_128B
      {Intrinsic::hexagon_V6_vsub_qf32, 65162}, // __builtin_HEXAGON_V6_vsub_qf32
      {Intrinsic::hexagon_V6_vsub_qf32_128B, 65193}, // __builtin_HEXAGON_V6_vsub_qf32_128B
      {Intrinsic::hexagon_V6_vsub_qf32_mix, 65229}, // __builtin_HEXAGON_V6_vsub_qf32_mix
      {Intrinsic::hexagon_V6_vsub_qf32_mix_128B, 65264}, // __builtin_HEXAGON_V6_vsub_qf32_mix_128B
      {Intrinsic::hexagon_V6_vsub_sf, 65304}, // __builtin_HEXAGON_V6_vsub_sf
      {Intrinsic::hexagon_V6_vsub_sf_128B, 65333}, // __builtin_HEXAGON_V6_vsub_sf_128B
      {Intrinsic::hexagon_V6_vsub_sf_bf, 65367}, // __builtin_HEXAGON_V6_vsub_sf_bf
      {Intrinsic::hexagon_V6_vsub_sf_bf_128B, 65399}, // __builtin_HEXAGON_V6_vsub_sf_bf_128B
      {Intrinsic::hexagon_V6_vsub_sf_hf, 65436}, // __builtin_HEXAGON_V6_vsub_sf_hf
      {Intrinsic::hexagon_V6_vsub_sf_hf_128B, 65468}, // __builtin_HEXAGON_V6_vsub_sf_hf_128B
      {Intrinsic::hexagon_V6_vsub_sf_sf, 65505}, // __builtin_HEXAGON_V6_vsub_sf_sf
      {Intrinsic::hexagon_V6_vsub_sf_sf_128B, 65537}, // __builtin_HEXAGON_V6_vsub_sf_sf_128B
      {Intrinsic::hexagon_V6_vsubb, 65574}, // __builtin_HEXAGON_V6_vsubb
      {Intrinsic::hexagon_V6_vsubb_128B, 65601}, // __builtin_HEXAGON_V6_vsubb_128B
      {Intrinsic::hexagon_V6_vsubb_dv, 65633}, // __builtin_HEXAGON_V6_vsubb_dv
      {Intrinsic::hexagon_V6_vsubb_dv_128B, 65663}, // __builtin_HEXAGON_V6_vsubb_dv_128B
      {Intrinsic::hexagon_V6_vsubbnq, 65698}, // __builtin_HEXAGON_V6_vsubbnq
      {Intrinsic::hexagon_V6_vsubbnq_128B, 65727}, // __builtin_HEXAGON_V6_vsubbnq_128B
      {Intrinsic::hexagon_V6_vsubbq, 65761}, // __builtin_HEXAGON_V6_vsubbq
      {Intrinsic::hexagon_V6_vsubbq_128B, 65789}, // __builtin_HEXAGON_V6_vsubbq_128B
      {Intrinsic::hexagon_V6_vsubbsat, 65822}, // __builtin_HEXAGON_V6_vsubbsat
      {Intrinsic::hexagon_V6_vsubbsat_128B, 65852}, // __builtin_HEXAGON_V6_vsubbsat_128B
      {Intrinsic::hexagon_V6_vsubbsat_dv, 65887}, // __builtin_HEXAGON_V6_vsubbsat_dv
      {Intrinsic::hexagon_V6_vsubbsat_dv_128B, 65920}, // __builtin_HEXAGON_V6_vsubbsat_dv_128B
      {Intrinsic::hexagon_V6_vsubh, 65958}, // __builtin_HEXAGON_V6_vsubh
      {Intrinsic::hexagon_V6_vsubh_128B, 65985}, // __builtin_HEXAGON_V6_vsubh_128B
      {Intrinsic::hexagon_V6_vsubh_dv, 66017}, // __builtin_HEXAGON_V6_vsubh_dv
      {Intrinsic::hexagon_V6_vsubh_dv_128B, 66047}, // __builtin_HEXAGON_V6_vsubh_dv_128B
      {Intrinsic::hexagon_V6_vsubhnq, 66082}, // __builtin_HEXAGON_V6_vsubhnq
      {Intrinsic::hexagon_V6_vsubhnq_128B, 66111}, // __builtin_HEXAGON_V6_vsubhnq_128B
      {Intrinsic::hexagon_V6_vsubhq, 66145}, // __builtin_HEXAGON_V6_vsubhq
      {Intrinsic::hexagon_V6_vsubhq_128B, 66173}, // __builtin_HEXAGON_V6_vsubhq_128B
      {Intrinsic::hexagon_V6_vsubhsat, 66206}, // __builtin_HEXAGON_V6_vsubhsat
      {Intrinsic::hexagon_V6_vsubhsat_128B, 66236}, // __builtin_HEXAGON_V6_vsubhsat_128B
      {Intrinsic::hexagon_V6_vsubhsat_dv, 66271}, // __builtin_HEXAGON_V6_vsubhsat_dv
      {Intrinsic::hexagon_V6_vsubhsat_dv_128B, 66304}, // __builtin_HEXAGON_V6_vsubhsat_dv_128B
      {Intrinsic::hexagon_V6_vsubhw, 66342}, // __builtin_HEXAGON_V6_vsubhw
      {Intrinsic::hexagon_V6_vsubhw_128B, 66370}, // __builtin_HEXAGON_V6_vsubhw_128B
      {Intrinsic::hexagon_V6_vsububh, 66403}, // __builtin_HEXAGON_V6_vsububh
      {Intrinsic::hexagon_V6_vsububh_128B, 66432}, // __builtin_HEXAGON_V6_vsububh_128B
      {Intrinsic::hexagon_V6_vsububsat, 66466}, // __builtin_HEXAGON_V6_vsububsat
      {Intrinsic::hexagon_V6_vsububsat_128B, 66497}, // __builtin_HEXAGON_V6_vsububsat_128B
      {Intrinsic::hexagon_V6_vsububsat_dv, 66533}, // __builtin_HEXAGON_V6_vsububsat_dv
      {Intrinsic::hexagon_V6_vsububsat_dv_128B, 66567}, // __builtin_HEXAGON_V6_vsububsat_dv_128B
      {Intrinsic::hexagon_V6_vsubububb_sat, 66606}, // __builtin_HEXAGON_V6_vsubububb_sat
      {Intrinsic::hexagon_V6_vsubububb_sat_128B, 66641}, // __builtin_HEXAGON_V6_vsubububb_sat_128B
      {Intrinsic::hexagon_V6_vsubuhsat, 66681}, // __builtin_HEXAGON_V6_vsubuhsat
      {Intrinsic::hexagon_V6_vsubuhsat_128B, 66712}, // __builtin_HEXAGON_V6_vsubuhsat_128B
      {Intrinsic::hexagon_V6_vsubuhsat_dv, 66748}, // __builtin_HEXAGON_V6_vsubuhsat_dv
      {Intrinsic::hexagon_V6_vsubuhsat_dv_128B, 66782}, // __builtin_HEXAGON_V6_vsubuhsat_dv_128B
      {Intrinsic::hexagon_V6_vsubuhw, 66821}, // __builtin_HEXAGON_V6_vsubuhw
      {Intrinsic::hexagon_V6_vsubuhw_128B, 66850}, // __builtin_HEXAGON_V6_vsubuhw_128B
      {Intrinsic::hexagon_V6_vsubuwsat, 66884}, // __builtin_HEXAGON_V6_vsubuwsat
      {Intrinsic::hexagon_V6_vsubuwsat_128B, 66915}, // __builtin_HEXAGON_V6_vsubuwsat_128B
      {Intrinsic::hexagon_V6_vsubuwsat_dv, 66951}, // __builtin_HEXAGON_V6_vsubuwsat_dv
      {Intrinsic::hexagon_V6_vsubuwsat_dv_128B, 66985}, // __builtin_HEXAGON_V6_vsubuwsat_dv_128B
      {Intrinsic::hexagon_V6_vsubw, 67024}, // __builtin_HEXAGON_V6_vsubw
      {Intrinsic::hexagon_V6_vsubw_128B, 67051}, // __builtin_HEXAGON_V6_vsubw_128B
      {Intrinsic::hexagon_V6_vsubw_dv, 67083}, // __builtin_HEXAGON_V6_vsubw_dv
      {Intrinsic::hexagon_V6_vsubw_dv_128B, 67113}, // __builtin_HEXAGON_V6_vsubw_dv_128B
      {Intrinsic::hexagon_V6_vsubwnq, 67148}, // __builtin_HEXAGON_V6_vsubwnq
      {Intrinsic::hexagon_V6_vsubwnq_128B, 67177}, // __builtin_HEXAGON_V6_vsubwnq_128B
      {Intrinsic::hexagon_V6_vsubwq, 67211}, // __builtin_HEXAGON_V6_vsubwq
      {Intrinsic::hexagon_V6_vsubwq_128B, 67239}, // __builtin_HEXAGON_V6_vsubwq_128B
      {Intrinsic::hexagon_V6_vsubwsat, 67272}, // __builtin_HEXAGON_V6_vsubwsat
      {Intrinsic::hexagon_V6_vsubwsat_128B, 67302}, // __builtin_HEXAGON_V6_vsubwsat_128B
      {Intrinsic::hexagon_V6_vsubwsat_dv, 67337}, // __builtin_HEXAGON_V6_vsubwsat_dv
      {Intrinsic::hexagon_V6_vsubwsat_dv_128B, 67370}, // __builtin_HEXAGON_V6_vsubwsat_dv_128B
      {Intrinsic::hexagon_V6_vswap, 67408}, // __builtin_HEXAGON_V6_vswap
      {Intrinsic::hexagon_V6_vswap_128B, 67435}, // __builtin_HEXAGON_V6_vswap_128B
      {Intrinsic::hexagon_V6_vtmpyb, 67467}, // __builtin_HEXAGON_V6_vtmpyb
      {Intrinsic::hexagon_V6_vtmpyb_128B, 67495}, // __builtin_HEXAGON_V6_vtmpyb_128B
      {Intrinsic::hexagon_V6_vtmpyb_acc, 67528}, // __builtin_HEXAGON_V6_vtmpyb_acc
      {Intrinsic::hexagon_V6_vtmpyb_acc_128B, 67560}, // __builtin_HEXAGON_V6_vtmpyb_acc_128B
      {Intrinsic::hexagon_V6_vtmpybus, 67597}, // __builtin_HEXAGON_V6_vtmpybus
      {Intrinsic::hexagon_V6_vtmpybus_128B, 67627}, // __builtin_HEXAGON_V6_vtmpybus_128B
      {Intrinsic::hexagon_V6_vtmpybus_acc, 67662}, // __builtin_HEXAGON_V6_vtmpybus_acc
      {Intrinsic::hexagon_V6_vtmpybus_acc_128B, 67696}, // __builtin_HEXAGON_V6_vtmpybus_acc_128B
      {Intrinsic::hexagon_V6_vtmpyhb, 67735}, // __builtin_HEXAGON_V6_vtmpyhb
      {Intrinsic::hexagon_V6_vtmpyhb_128B, 67764}, // __builtin_HEXAGON_V6_vtmpyhb_128B
      {Intrinsic::hexagon_V6_vtmpyhb_acc, 67798}, // __builtin_HEXAGON_V6_vtmpyhb_acc
      {Intrinsic::hexagon_V6_vtmpyhb_acc_128B, 67831}, // __builtin_HEXAGON_V6_vtmpyhb_acc_128B
      {Intrinsic::hexagon_V6_vunpackb, 67869}, // __builtin_HEXAGON_V6_vunpackb
      {Intrinsic::hexagon_V6_vunpackb_128B, 67899}, // __builtin_HEXAGON_V6_vunpackb_128B
      {Intrinsic::hexagon_V6_vunpackh, 67934}, // __builtin_HEXAGON_V6_vunpackh
      {Intrinsic::hexagon_V6_vunpackh_128B, 67964}, // __builtin_HEXAGON_V6_vunpackh_128B
      {Intrinsic::hexagon_V6_vunpackob, 67999}, // __builtin_HEXAGON_V6_vunpackob
      {Intrinsic::hexagon_V6_vunpackob_128B, 68030}, // __builtin_HEXAGON_V6_vunpackob_128B
      {Intrinsic::hexagon_V6_vunpackoh, 68066}, // __builtin_HEXAGON_V6_vunpackoh
      {Intrinsic::hexagon_V6_vunpackoh_128B, 68097}, // __builtin_HEXAGON_V6_vunpackoh_128B
      {Intrinsic::hexagon_V6_vunpackub, 68133}, // __builtin_HEXAGON_V6_vunpackub
      {Intrinsic::hexagon_V6_vunpackub_128B, 68164}, // __builtin_HEXAGON_V6_vunpackub_128B
      {Intrinsic::hexagon_V6_vunpackuh, 68200}, // __builtin_HEXAGON_V6_vunpackuh
      {Intrinsic::hexagon_V6_vunpackuh_128B, 68231}, // __builtin_HEXAGON_V6_vunpackuh_128B
      {Intrinsic::hexagon_V6_vxor, 68267}, // __builtin_HEXAGON_V6_vxor
      {Intrinsic::hexagon_V6_vxor_128B, 68293}, // __builtin_HEXAGON_V6_vxor_128B
      {Intrinsic::hexagon_V6_vzb, 68324}, // __builtin_HEXAGON_V6_vzb
      {Intrinsic::hexagon_V6_vzb_128B, 68349}, // __builtin_HEXAGON_V6_vzb_128B
      {Intrinsic::hexagon_V6_vzh, 68379}, // __builtin_HEXAGON_V6_vzh
      {Intrinsic::hexagon_V6_vzh_128B, 68404}, // __builtin_HEXAGON_V6_vzh_128B
      {Intrinsic::hexagon_Y2_dccleana, 68434}, // __builtin_HEXAGON_Y2_dccleana
      {Intrinsic::hexagon_Y2_dccleaninva, 68464}, // __builtin_HEXAGON_Y2_dccleaninva
      {Intrinsic::hexagon_Y2_dcfetch, 68497}, // __builtin_HEXAGON_Y2_dcfetch
      {Intrinsic::hexagon_Y2_dcinva, 68526}, // __builtin_HEXAGON_Y2_dcinva
      {Intrinsic::hexagon_Y2_dczeroa, 68554}, // __builtin_HEXAGON_Y2_dczeroa
      {Intrinsic::hexagon_Y4_l2fetch, 68583}, // __builtin_HEXAGON_Y4_l2fetch
      {Intrinsic::hexagon_Y5_l2fetch, 68612}, // __builtin_HEXAGON_Y5_l2fetch
      {Intrinsic::hexagon_Y6_dmlink, 68641}, // __builtin_HEXAGON_Y6_dmlink
      {Intrinsic::hexagon_Y6_dmpause, 68669}, // __builtin_HEXAGON_Y6_dmpause
      {Intrinsic::hexagon_Y6_dmpoll, 68698}, // __builtin_HEXAGON_Y6_dmpoll
      {Intrinsic::hexagon_Y6_dmresume, 68726}, // __builtin_HEXAGON_Y6_dmresume
      {Intrinsic::hexagon_Y6_dmstart, 68756}, // __builtin_HEXAGON_Y6_dmstart
      {Intrinsic::hexagon_Y6_dmwait, 68785}, // __builtin_HEXAGON_Y6_dmwait
      {Intrinsic::hexagon_prefetch, 69026}, // __builtin_HEXAGON_prefetch
      {Intrinsic::hexagon_L4_loadd_locked, 17694}, // __builtin__HEXAGON_L4_loadd_locked
      {Intrinsic::hexagon_S2_storerb_pbr, 33084}, // __builtin_brev_stb
      {Intrinsic::hexagon_S2_storerd_pbr, 33103}, // __builtin_brev_std
      {Intrinsic::hexagon_S2_storerh_pbr, 33143}, // __builtin_brev_sth
      {Intrinsic::hexagon_S2_storerf_pbr, 33122}, // __builtin_brev_sthhi
      {Intrinsic::hexagon_S2_storeri_pbr, 33162}, // __builtin_brev_stw
      {Intrinsic::hexagon_circ_ldb, 68813}, // __builtin_circ_ldb
      {Intrinsic::hexagon_circ_ldd, 68832}, // __builtin_circ_ldd
      {Intrinsic::hexagon_circ_ldh, 68851}, // __builtin_circ_ldh
      {Intrinsic::hexagon_circ_ldub, 68870}, // __builtin_circ_ldub
      {Intrinsic::hexagon_circ_lduh, 68890}, // __builtin_circ_lduh
      {Intrinsic::hexagon_circ_ldw, 68910}, // __builtin_circ_ldw
      {Intrinsic::hexagon_circ_stb, 68929}, // __builtin_circ_stb
      {Intrinsic::hexagon_circ_std, 68948}, // __builtin_circ_std
      {Intrinsic::hexagon_circ_sth, 68967}, // __builtin_circ_sth
      {Intrinsic::hexagon_circ_sthhi, 68986}, // __builtin_circ_sthhi
      {Intrinsic::hexagon_circ_stw, 69007}, // __builtin_circ_stw
      {Intrinsic::hexagon_vmemcpy, 69053}, // __builtin_hexagon_vmemcpy
      {Intrinsic::hexagon_vmemset, 69079}, // __builtin_hexagon_vmemset
    };
    auto I = std::lower_bound(std::begin(hexagonNames),
                              std::end(hexagonNames),
                              BuiltinNameStr);
    if (I != std::end(hexagonNames) &&
        I->getName() == BuiltinNameStr)
      return I->IntrinID;
  }
  if (TargetPrefix == "mips") {
    static const BuiltinEntry mipsNames[] = {
      {Intrinsic::mips_absq_s_ph, 69105}, // __builtin_mips_absq_s_ph
      {Intrinsic::mips_absq_s_qb, 69130}, // __builtin_mips_absq_s_qb
      {Intrinsic::mips_absq_s_w, 69155}, // __builtin_mips_absq_s_w
      {Intrinsic::mips_addq_ph, 69267}, // __builtin_mips_addq_ph
      {Intrinsic::mips_addq_s_ph, 69290}, // __builtin_mips_addq_s_ph
      {Intrinsic::mips_addq_s_w, 69315}, // __builtin_mips_addq_s_w
      {Intrinsic::mips_addqh_ph, 69339}, // __builtin_mips_addqh_ph
      {Intrinsic::mips_addqh_r_ph, 69363}, // __builtin_mips_addqh_r_ph
      {Intrinsic::mips_addqh_r_w, 69389}, // __builtin_mips_addqh_r_w
      {Intrinsic::mips_addqh_w, 69414}, // __builtin_mips_addqh_w
      {Intrinsic::mips_addsc, 69713}, // __builtin_mips_addsc
      {Intrinsic::mips_addu_ph, 69734}, // __builtin_mips_addu_ph
      {Intrinsic::mips_addu_qb, 69757}, // __builtin_mips_addu_qb
      {Intrinsic::mips_addu_s_ph, 69780}, // __builtin_mips_addu_s_ph
      {Intrinsic::mips_addu_s_qb, 69805}, // __builtin_mips_addu_s_qb
      {Intrinsic::mips_adduh_qb, 69830}, // __builtin_mips_adduh_qb
      {Intrinsic::mips_adduh_r_qb, 69854}, // __builtin_mips_adduh_r_qb
      {Intrinsic::mips_addwc, 70052}, // __builtin_mips_addwc
      {Intrinsic::mips_append, 70114}, // __builtin_mips_append
      {Intrinsic::mips_balign, 70680}, // __builtin_mips_balign
      {Intrinsic::mips_bitrev, 71234}, // __builtin_mips_bitrev
      {Intrinsic::mips_bposge32, 71612}, // __builtin_mips_bposge32
      {Intrinsic::mips_cmp_eq_ph, 72851}, // __builtin_mips_cmp_eq_ph
      {Intrinsic::mips_cmp_le_ph, 72876}, // __builtin_mips_cmp_le_ph
      {Intrinsic::mips_cmp_lt_ph, 72901}, // __builtin_mips_cmp_lt_ph
      {Intrinsic::mips_cmpgdu_eq_qb, 72926}, // __builtin_mips_cmpgdu_eq_qb
      {Intrinsic::mips_cmpgdu_le_qb, 72954}, // __builtin_mips_cmpgdu_le_qb
      {Intrinsic::mips_cmpgdu_lt_qb, 72982}, // __builtin_mips_cmpgdu_lt_qb
      {Intrinsic::mips_cmpgu_eq_qb, 73010}, // __builtin_mips_cmpgu_eq_qb
      {Intrinsic::mips_cmpgu_le_qb, 73037}, // __builtin_mips_cmpgu_le_qb
      {Intrinsic::mips_cmpgu_lt_qb, 73064}, // __builtin_mips_cmpgu_lt_qb
      {Intrinsic::mips_cmpu_eq_qb, 73091}, // __builtin_mips_cmpu_eq_qb
      {Intrinsic::mips_cmpu_le_qb, 73117}, // __builtin_mips_cmpu_le_qb
      {Intrinsic::mips_cmpu_lt_qb, 73143}, // __builtin_mips_cmpu_lt_qb
      {Intrinsic::mips_dlsa, 73550}, // __builtin_mips_dlsa
      {Intrinsic::mips_dpa_w_ph, 73708}, // __builtin_mips_dpa_w_ph
      {Intrinsic::mips_dpaq_s_w_ph, 73876}, // __builtin_mips_dpaq_s_w_ph
      {Intrinsic::mips_dpaq_sa_l_w, 73903}, // __builtin_mips_dpaq_sa_l_w
      {Intrinsic::mips_dpaqx_s_w_ph, 73930}, // __builtin_mips_dpaqx_s_w_ph
      {Intrinsic::mips_dpaqx_sa_w_ph, 73958}, // __builtin_mips_dpaqx_sa_w_ph
      {Intrinsic::mips_dpau_h_qbl, 73987}, // __builtin_mips_dpau_h_qbl
      {Intrinsic::mips_dpau_h_qbr, 74013}, // __builtin_mips_dpau_h_qbr
      {Intrinsic::mips_dpax_w_ph, 74039}, // __builtin_mips_dpax_w_ph
      {Intrinsic::mips_dps_w_ph, 74064}, // __builtin_mips_dps_w_ph
      {Intrinsic::mips_dpsq_s_w_ph, 74088}, // __builtin_mips_dpsq_s_w_ph
      {Intrinsic::mips_dpsq_sa_l_w, 74115}, // __builtin_mips_dpsq_sa_l_w
      {Intrinsic::mips_dpsqx_s_w_ph, 74142}, // __builtin_mips_dpsqx_s_w_ph
      {Intrinsic::mips_dpsqx_sa_w_ph, 74170}, // __builtin_mips_dpsqx_sa_w_ph
      {Intrinsic::mips_dpsu_h_qbl, 74199}, // __builtin_mips_dpsu_h_qbl
      {Intrinsic::mips_dpsu_h_qbr, 74225}, // __builtin_mips_dpsu_h_qbr
      {Intrinsic::mips_dpsx_w_ph, 74395}, // __builtin_mips_dpsx_w_ph
      {Intrinsic::mips_extp, 74420}, // __builtin_mips_extp
      {Intrinsic::mips_extpdp, 74440}, // __builtin_mips_extpdp
      {Intrinsic::mips_extr_r_w, 74462}, // __builtin_mips_extr_r_w
      {Intrinsic::mips_extr_rs_w, 74486}, // __builtin_mips_extr_rs_w
      {Intrinsic::mips_extr_s_h, 74511}, // __builtin_mips_extr_s_h
      {Intrinsic::mips_extr_w, 74535}, // __builtin_mips_extr_w
      {Intrinsic::mips_insv, 77587}, // __builtin_mips_insv
      {Intrinsic::mips_lbux, 77695}, // __builtin_mips_lbux
      {Intrinsic::mips_lhx, 77911}, // __builtin_mips_lhx
      {Intrinsic::mips_lsa, 77930}, // __builtin_mips_lsa
      {Intrinsic::mips_lwx, 77949}, // __builtin_mips_lwx
      {Intrinsic::mips_madd, 77968}, // __builtin_mips_madd
      {Intrinsic::mips_maddu, 78082}, // __builtin_mips_maddu
      {Intrinsic::mips_maq_s_w_phl, 78191}, // __builtin_mips_maq_s_w_phl
      {Intrinsic::mips_maq_s_w_phr, 78218}, // __builtin_mips_maq_s_w_phr
      {Intrinsic::mips_maq_sa_w_phl, 78245}, // __builtin_mips_maq_sa_w_phl
      {Intrinsic::mips_maq_sa_w_phr, 78273}, // __builtin_mips_maq_sa_w_phr
      {Intrinsic::mips_modsub, 79373}, // __builtin_mips_modsub
      {Intrinsic::mips_msub, 79416}, // __builtin_mips_msub
      {Intrinsic::mips_msubu, 79530}, // __builtin_mips_msubu
      {Intrinsic::mips_mthlip, 79639}, // __builtin_mips_mthlip
      {Intrinsic::mips_mul_ph, 79661}, // __builtin_mips_mul_ph
      {Intrinsic::mips_mul_s_ph, 79727}, // __builtin_mips_mul_s_ph
      {Intrinsic::mips_muleq_s_w_phl, 79751}, // __builtin_mips_muleq_s_w_phl
      {Intrinsic::mips_muleq_s_w_phr, 79780}, // __builtin_mips_muleq_s_w_phr
      {Intrinsic::mips_muleu_s_ph_qbl, 79809}, // __builtin_mips_muleu_s_ph_qbl
      {Intrinsic::mips_muleu_s_ph_qbr, 79839}, // __builtin_mips_muleu_s_ph_qbr
      {Intrinsic::mips_mulq_rs_ph, 79869}, // __builtin_mips_mulq_rs_ph
      {Intrinsic::mips_mulq_rs_w, 79895}, // __builtin_mips_mulq_rs_w
      {Intrinsic::mips_mulq_s_ph, 79920}, // __builtin_mips_mulq_s_ph
      {Intrinsic::mips_mulq_s_w, 79945}, // __builtin_mips_mulq_s_w
      {Intrinsic::mips_mulsa_w_ph, 80015}, // __builtin_mips_mulsa_w_ph
      {Intrinsic::mips_mulsaq_s_w_ph, 80041}, // __builtin_mips_mulsaq_s_w_ph
      {Intrinsic::mips_mult, 80070}, // __builtin_mips_mult
      {Intrinsic::mips_multu, 80090}, // __builtin_mips_multu
      {Intrinsic::mips_packrl_ph, 80443}, // __builtin_mips_packrl_ph
      {Intrinsic::mips_pick_ph, 80728}, // __builtin_mips_pick_ph
      {Intrinsic::mips_pick_qb, 80751}, // __builtin_mips_pick_qb
      {Intrinsic::mips_preceq_w_phl, 80774}, // __builtin_mips_preceq_w_phl
      {Intrinsic::mips_preceq_w_phr, 80802}, // __builtin_mips_preceq_w_phr
      {Intrinsic::mips_precequ_ph_qbl, 80830}, // __builtin_mips_precequ_ph_qbl
      {Intrinsic::mips_precequ_ph_qbla, 80860}, // __builtin_mips_precequ_ph_qbla
      {Intrinsic::mips_precequ_ph_qbr, 80891}, // __builtin_mips_precequ_ph_qbr
      {Intrinsic::mips_precequ_ph_qbra, 80921}, // __builtin_mips_precequ_ph_qbra
      {Intrinsic::mips_preceu_ph_qbl, 80952}, // __builtin_mips_preceu_ph_qbl
      {Intrinsic::mips_preceu_ph_qbla, 80981}, // __builtin_mips_preceu_ph_qbla
      {Intrinsic::mips_preceu_ph_qbr, 81011}, // __builtin_mips_preceu_ph_qbr
      {Intrinsic::mips_preceu_ph_qbra, 81040}, // __builtin_mips_preceu_ph_qbra
      {Intrinsic::mips_precr_qb_ph, 81070}, // __builtin_mips_precr_qb_ph
      {Intrinsic::mips_precr_sra_ph_w, 81097}, // __builtin_mips_precr_sra_ph_w
      {Intrinsic::mips_precr_sra_r_ph_w, 81127}, // __builtin_mips_precr_sra_r_ph_w
      {Intrinsic::mips_precrq_ph_w, 81159}, // __builtin_mips_precrq_ph_w
      {Intrinsic::mips_precrq_qb_ph, 81186}, // __builtin_mips_precrq_qb_ph
      {Intrinsic::mips_precrq_rs_ph_w, 81214}, // __builtin_mips_precrq_rs_ph_w
      {Intrinsic::mips_precrqu_s_qb_ph, 81244}, // __builtin_mips_precrqu_s_qb_ph
      {Intrinsic::mips_prepend, 81275}, // __builtin_mips_prepend
      {Intrinsic::mips_raddu_w_qb, 81298}, // __builtin_mips_raddu_w_qb
      {Intrinsic::mips_rddsp, 81324}, // __builtin_mips_rddsp
      {Intrinsic::mips_repl_ph, 81345}, // __builtin_mips_repl_ph
      {Intrinsic::mips_repl_qb, 81368}, // __builtin_mips_repl_qb
      {Intrinsic::mips_shilo, 81627}, // __builtin_mips_shilo
      {Intrinsic::mips_shll_ph, 81648}, // __builtin_mips_shll_ph
      {Intrinsic::mips_shll_qb, 81671}, // __builtin_mips_shll_qb
      {Intrinsic::mips_shll_s_ph, 81694}, // __builtin_mips_shll_s_ph
      {Intrinsic::mips_shll_s_w, 81719}, // __builtin_mips_shll_s_w
      {Intrinsic::mips_shra_ph, 81743}, // __builtin_mips_shra_ph
      {Intrinsic::mips_shra_qb, 81766}, // __builtin_mips_shra_qb
      {Intrinsic::mips_shra_r_ph, 81789}, // __builtin_mips_shra_r_ph
      {Intrinsic::mips_shra_r_qb, 81814}, // __builtin_mips_shra_r_qb
      {Intrinsic::mips_shra_r_w, 81839}, // __builtin_mips_shra_r_w
      {Intrinsic::mips_shrl_ph, 81863}, // __builtin_mips_shrl_ph
      {Intrinsic::mips_shrl_qb, 81886}, // __builtin_mips_shrl_qb
      {Intrinsic::mips_subq_ph, 83205}, // __builtin_mips_subq_ph
      {Intrinsic::mips_subq_s_ph, 83228}, // __builtin_mips_subq_s_ph
      {Intrinsic::mips_subq_s_w, 83253}, // __builtin_mips_subq_s_w
      {Intrinsic::mips_subqh_ph, 83277}, // __builtin_mips_subqh_ph
      {Intrinsic::mips_subqh_r_ph, 83301}, // __builtin_mips_subqh_r_ph
      {Intrinsic::mips_subqh_r_w, 83327}, // __builtin_mips_subqh_r_w
      {Intrinsic::mips_subqh_w, 83352}, // __builtin_mips_subqh_w
      {Intrinsic::mips_subu_ph, 83759}, // __builtin_mips_subu_ph
      {Intrinsic::mips_subu_qb, 83782}, // __builtin_mips_subu_qb
      {Intrinsic::mips_subu_s_ph, 83805}, // __builtin_mips_subu_s_ph
      {Intrinsic::mips_subu_s_qb, 83830}, // __builtin_mips_subu_s_qb
      {Intrinsic::mips_subuh_qb, 83855}, // __builtin_mips_subuh_qb
      {Intrinsic::mips_subuh_r_qb, 83879}, // __builtin_mips_subuh_r_qb
      {Intrinsic::mips_wrdsp, 84161}, // __builtin_mips_wrdsp
      {Intrinsic::mips_add_a_b, 69179}, // __builtin_msa_add_a_b
      {Intrinsic::mips_add_a_d, 69201}, // __builtin_msa_add_a_d
      {Intrinsic::mips_add_a_h, 69223}, // __builtin_msa_add_a_h
      {Intrinsic::mips_add_a_w, 69245}, // __builtin_msa_add_a_w
      {Intrinsic::mips_adds_a_b, 69437}, // __builtin_msa_adds_a_b
      {Intrinsic::mips_adds_a_d, 69460}, // __builtin_msa_adds_a_d
      {Intrinsic::mips_adds_a_h, 69483}, // __builtin_msa_adds_a_h
      {Intrinsic::mips_adds_a_w, 69506}, // __builtin_msa_adds_a_w
      {Intrinsic::mips_adds_s_b, 69529}, // __builtin_msa_adds_s_b
      {Intrinsic::mips_adds_s_d, 69552}, // __builtin_msa_adds_s_d
      {Intrinsic::mips_adds_s_h, 69575}, // __builtin_msa_adds_s_h
      {Intrinsic::mips_adds_s_w, 69598}, // __builtin_msa_adds_s_w
      {Intrinsic::mips_adds_u_b, 69621}, // __builtin_msa_adds_u_b
      {Intrinsic::mips_adds_u_d, 69644}, // __builtin_msa_adds_u_d
      {Intrinsic::mips_adds_u_h, 69667}, // __builtin_msa_adds_u_h
      {Intrinsic::mips_adds_u_w, 69690}, // __builtin_msa_adds_u_w
      {Intrinsic::mips_addv_b, 69880}, // __builtin_msa_addv_b
      {Intrinsic::mips_addv_d, 69901}, // __builtin_msa_addv_d
      {Intrinsic::mips_addv_h, 69922}, // __builtin_msa_addv_h
      {Intrinsic::mips_addv_w, 69943}, // __builtin_msa_addv_w
      {Intrinsic::mips_addvi_b, 69964}, // __builtin_msa_addvi_b
      {Intrinsic::mips_addvi_d, 69986}, // __builtin_msa_addvi_d
      {Intrinsic::mips_addvi_h, 70008}, // __builtin_msa_addvi_h
      {Intrinsic::mips_addvi_w, 70030}, // __builtin_msa_addvi_w
      {Intrinsic::mips_and_v, 70073}, // __builtin_msa_and_v
      {Intrinsic::mips_andi_b, 70093}, // __builtin_msa_andi_b
      {Intrinsic::mips_asub_s_b, 70136}, // __builtin_msa_asub_s_b
      {Intrinsic::mips_asub_s_d, 70159}, // __builtin_msa_asub_s_d
      {Intrinsic::mips_asub_s_h, 70182}, // __builtin_msa_asub_s_h
      {Intrinsic::mips_asub_s_w, 70205}, // __builtin_msa_asub_s_w
      {Intrinsic::mips_asub_u_b, 70228}, // __builtin_msa_asub_u_b
      {Intrinsic::mips_asub_u_d, 70251}, // __builtin_msa_asub_u_d
      {Intrinsic::mips_asub_u_h, 70274}, // __builtin_msa_asub_u_h
      {Intrinsic::mips_asub_u_w, 70297}, // __builtin_msa_asub_u_w
      {Intrinsic::mips_ave_s_b, 70320}, // __builtin_msa_ave_s_b
      {Intrinsic::mips_ave_s_d, 70342}, // __builtin_msa_ave_s_d
      {Intrinsic::mips_ave_s_h, 70364}, // __builtin_msa_ave_s_h
      {Intrinsic::mips_ave_s_w, 70386}, // __builtin_msa_ave_s_w
      {Intrinsic::mips_ave_u_b, 70408}, // __builtin_msa_ave_u_b
      {Intrinsic::mips_ave_u_d, 70430}, // __builtin_msa_ave_u_d
      {Intrinsic::mips_ave_u_h, 70452}, // __builtin_msa_ave_u_h
      {Intrinsic::mips_ave_u_w, 70474}, // __builtin_msa_ave_u_w
      {Intrinsic::mips_aver_s_b, 70496}, // __builtin_msa_aver_s_b
      {Intrinsic::mips_aver_s_d, 70519}, // __builtin_msa_aver_s_d
      {Intrinsic::mips_aver_s_h, 70542}, // __builtin_msa_aver_s_h
      {Intrinsic::mips_aver_s_w, 70565}, // __builtin_msa_aver_s_w
      {Intrinsic::mips_aver_u_b, 70588}, // __builtin_msa_aver_u_b
      {Intrinsic::mips_aver_u_d, 70611}, // __builtin_msa_aver_u_d
      {Intrinsic::mips_aver_u_h, 70634}, // __builtin_msa_aver_u_h
      {Intrinsic::mips_aver_u_w, 70657}, // __builtin_msa_aver_u_w
      {Intrinsic::mips_bclr_b, 70702}, // __builtin_msa_bclr_b
      {Intrinsic::mips_bclr_d, 70723}, // __builtin_msa_bclr_d
      {Intrinsic::mips_bclr_h, 70744}, // __builtin_msa_bclr_h
      {Intrinsic::mips_bclr_w, 70765}, // __builtin_msa_bclr_w
      {Intrinsic::mips_bclri_b, 70786}, // __builtin_msa_bclri_b
      {Intrinsic::mips_bclri_d, 70808}, // __builtin_msa_bclri_d
      {Intrinsic::mips_bclri_h, 70830}, // __builtin_msa_bclri_h
      {Intrinsic::mips_bclri_w, 70852}, // __builtin_msa_bclri_w
      {Intrinsic::mips_binsl_b, 70874}, // __builtin_msa_binsl_b
      {Intrinsic::mips_binsl_d, 70896}, // __builtin_msa_binsl_d
      {Intrinsic::mips_binsl_h, 70918}, // __builtin_msa_binsl_h
      {Intrinsic::mips_binsl_w, 70940}, // __builtin_msa_binsl_w
      {Intrinsic::mips_binsli_b, 70962}, // __builtin_msa_binsli_b
      {Intrinsic::mips_binsli_d, 70985}, // __builtin_msa_binsli_d
      {Intrinsic::mips_binsli_h, 71008}, // __builtin_msa_binsli_h
      {Intrinsic::mips_binsli_w, 71031}, // __builtin_msa_binsli_w
      {Intrinsic::mips_binsr_b, 71054}, // __builtin_msa_binsr_b
      {Intrinsic::mips_binsr_d, 71076}, // __builtin_msa_binsr_d
      {Intrinsic::mips_binsr_h, 71098}, // __builtin_msa_binsr_h
      {Intrinsic::mips_binsr_w, 71120}, // __builtin_msa_binsr_w
      {Intrinsic::mips_binsri_b, 71142}, // __builtin_msa_binsri_b
      {Intrinsic::mips_binsri_d, 71165}, // __builtin_msa_binsri_d
      {Intrinsic::mips_binsri_h, 71188}, // __builtin_msa_binsri_h
      {Intrinsic::mips_binsri_w, 71211}, // __builtin_msa_binsri_w
      {Intrinsic::mips_bmnz_v, 71256}, // __builtin_msa_bmnz_v
      {Intrinsic::mips_bmnzi_b, 71277}, // __builtin_msa_bmnzi_b
      {Intrinsic::mips_bmz_v, 71299}, // __builtin_msa_bmz_v
      {Intrinsic::mips_bmzi_b, 71319}, // __builtin_msa_bmzi_b
      {Intrinsic::mips_bneg_b, 71340}, // __builtin_msa_bneg_b
      {Intrinsic::mips_bneg_d, 71361}, // __builtin_msa_bneg_d
      {Intrinsic::mips_bneg_h, 71382}, // __builtin_msa_bneg_h
      {Intrinsic::mips_bneg_w, 71403}, // __builtin_msa_bneg_w
      {Intrinsic::mips_bnegi_b, 71424}, // __builtin_msa_bnegi_b
      {Intrinsic::mips_bnegi_d, 71446}, // __builtin_msa_bnegi_d
      {Intrinsic::mips_bnegi_h, 71468}, // __builtin_msa_bnegi_h
      {Intrinsic::mips_bnegi_w, 71490}, // __builtin_msa_bnegi_w
      {Intrinsic::mips_bnz_b, 71512}, // __builtin_msa_bnz_b
      {Intrinsic::mips_bnz_d, 71532}, // __builtin_msa_bnz_d
      {Intrinsic::mips_bnz_h, 71552}, // __builtin_msa_bnz_h
      {Intrinsic::mips_bnz_v, 71572}, // __builtin_msa_bnz_v
      {Intrinsic::mips_bnz_w, 71592}, // __builtin_msa_bnz_w
      {Intrinsic::mips_bsel_v, 71636}, // __builtin_msa_bsel_v
      {Intrinsic::mips_bseli_b, 71657}, // __builtin_msa_bseli_b
      {Intrinsic::mips_bset_b, 71679}, // __builtin_msa_bset_b
      {Intrinsic::mips_bset_d, 71700}, // __builtin_msa_bset_d
      {Intrinsic::mips_bset_h, 71721}, // __builtin_msa_bset_h
      {Intrinsic::mips_bset_w, 71742}, // __builtin_msa_bset_w
      {Intrinsic::mips_bseti_b, 71763}, // __builtin_msa_bseti_b
      {Intrinsic::mips_bseti_d, 71785}, // __builtin_msa_bseti_d
      {Intrinsic::mips_bseti_h, 71807}, // __builtin_msa_bseti_h
      {Intrinsic::mips_bseti_w, 71829}, // __builtin_msa_bseti_w
      {Intrinsic::mips_bz_b, 71851}, // __builtin_msa_bz_b
      {Intrinsic::mips_bz_d, 71870}, // __builtin_msa_bz_d
      {Intrinsic::mips_bz_h, 71889}, // __builtin_msa_bz_h
      {Intrinsic::mips_bz_v, 71908}, // __builtin_msa_bz_v
      {Intrinsic::mips_bz_w, 71927}, // __builtin_msa_bz_w
      {Intrinsic::mips_ceq_b, 71946}, // __builtin_msa_ceq_b
      {Intrinsic::mips_ceq_d, 71966}, // __builtin_msa_ceq_d
      {Intrinsic::mips_ceq_h, 71986}, // __builtin_msa_ceq_h
      {Intrinsic::mips_ceq_w, 72006}, // __builtin_msa_ceq_w
      {Intrinsic::mips_ceqi_b, 72026}, // __builtin_msa_ceqi_b
      {Intrinsic::mips_ceqi_d, 72047}, // __builtin_msa_ceqi_d
      {Intrinsic::mips_ceqi_h, 72068}, // __builtin_msa_ceqi_h
      {Intrinsic::mips_ceqi_w, 72089}, // __builtin_msa_ceqi_w
      {Intrinsic::mips_cfcmsa, 72110}, // __builtin_msa_cfcmsa
      {Intrinsic::mips_cle_s_b, 72131}, // __builtin_msa_cle_s_b
      {Intrinsic::mips_cle_s_d, 72153}, // __builtin_msa_cle_s_d
      {Intrinsic::mips_cle_s_h, 72175}, // __builtin_msa_cle_s_h
      {Intrinsic::mips_cle_s_w, 72197}, // __builtin_msa_cle_s_w
      {Intrinsic::mips_cle_u_b, 72219}, // __builtin_msa_cle_u_b
      {Intrinsic::mips_cle_u_d, 72241}, // __builtin_msa_cle_u_d
      {Intrinsic::mips_cle_u_h, 72263}, // __builtin_msa_cle_u_h
      {Intrinsic::mips_cle_u_w, 72285}, // __builtin_msa_cle_u_w
      {Intrinsic::mips_clei_s_b, 72307}, // __builtin_msa_clei_s_b
      {Intrinsic::mips_clei_s_d, 72330}, // __builtin_msa_clei_s_d
      {Intrinsic::mips_clei_s_h, 72353}, // __builtin_msa_clei_s_h
      {Intrinsic::mips_clei_s_w, 72376}, // __builtin_msa_clei_s_w
      {Intrinsic::mips_clei_u_b, 72399}, // __builtin_msa_clei_u_b
      {Intrinsic::mips_clei_u_d, 72422}, // __builtin_msa_clei_u_d
      {Intrinsic::mips_clei_u_h, 72445}, // __builtin_msa_clei_u_h
      {Intrinsic::mips_clei_u_w, 72468}, // __builtin_msa_clei_u_w
      {Intrinsic::mips_clt_s_b, 72491}, // __builtin_msa_clt_s_b
      {Intrinsic::mips_clt_s_d, 72513}, // __builtin_msa_clt_s_d
      {Intrinsic::mips_clt_s_h, 72535}, // __builtin_msa_clt_s_h
      {Intrinsic::mips_clt_s_w, 72557}, // __builtin_msa_clt_s_w
      {Intrinsic::mips_clt_u_b, 72579}, // __builtin_msa_clt_u_b
      {Intrinsic::mips_clt_u_d, 72601}, // __builtin_msa_clt_u_d
      {Intrinsic::mips_clt_u_h, 72623}, // __builtin_msa_clt_u_h
      {Intrinsic::mips_clt_u_w, 72645}, // __builtin_msa_clt_u_w
      {Intrinsic::mips_clti_s_b, 72667}, // __builtin_msa_clti_s_b
      {Intrinsic::mips_clti_s_d, 72690}, // __builtin_msa_clti_s_d
      {Intrinsic::mips_clti_s_h, 72713}, // __builtin_msa_clti_s_h
      {Intrinsic::mips_clti_s_w, 72736}, // __builtin_msa_clti_s_w
      {Intrinsic::mips_clti_u_b, 72759}, // __builtin_msa_clti_u_b
      {Intrinsic::mips_clti_u_d, 72782}, // __builtin_msa_clti_u_d
      {Intrinsic::mips_clti_u_h, 72805}, // __builtin_msa_clti_u_h
      {Intrinsic::mips_clti_u_w, 72828}, // __builtin_msa_clti_u_w
      {Intrinsic::mips_copy_s_b, 73169}, // __builtin_msa_copy_s_b
      {Intrinsic::mips_copy_s_d, 73192}, // __builtin_msa_copy_s_d
      {Intrinsic::mips_copy_s_h, 73215}, // __builtin_msa_copy_s_h
      {Intrinsic::mips_copy_s_w, 73238}, // __builtin_msa_copy_s_w
      {Intrinsic::mips_copy_u_b, 73261}, // __builtin_msa_copy_u_b
      {Intrinsic::mips_copy_u_d, 73284}, // __builtin_msa_copy_u_d
      {Intrinsic::mips_copy_u_h, 73307}, // __builtin_msa_copy_u_h
      {Intrinsic::mips_copy_u_w, 73330}, // __builtin_msa_copy_u_w
      {Intrinsic::mips_ctcmsa, 73353}, // __builtin_msa_ctcmsa
      {Intrinsic::mips_div_s_b, 73374}, // __builtin_msa_div_s_b
      {Intrinsic::mips_div_s_d, 73396}, // __builtin_msa_div_s_d
      {Intrinsic::mips_div_s_h, 73418}, // __builtin_msa_div_s_h
      {Intrinsic::mips_div_s_w, 73440}, // __builtin_msa_div_s_w
      {Intrinsic::mips_div_u_b, 73462}, // __builtin_msa_div_u_b
      {Intrinsic::mips_div_u_d, 73484}, // __builtin_msa_div_u_d
      {Intrinsic::mips_div_u_h, 73506}, // __builtin_msa_div_u_h
      {Intrinsic::mips_div_u_w, 73528}, // __builtin_msa_div_u_w
      {Intrinsic::mips_dotp_s_d, 73570}, // __builtin_msa_dotp_s_d
      {Intrinsic::mips_dotp_s_h, 73593}, // __builtin_msa_dotp_s_h
      {Intrinsic::mips_dotp_s_w, 73616}, // __builtin_msa_dotp_s_w
      {Intrinsic::mips_dotp_u_d, 73639}, // __builtin_msa_dotp_u_d
      {Intrinsic::mips_dotp_u_h, 73662}, // __builtin_msa_dotp_u_h
      {Intrinsic::mips_dotp_u_w, 73685}, // __builtin_msa_dotp_u_w
      {Intrinsic::mips_dpadd_s_d, 73732}, // __builtin_msa_dpadd_s_d
      {Intrinsic::mips_dpadd_s_h, 73756}, // __builtin_msa_dpadd_s_h
      {Intrinsic::mips_dpadd_s_w, 73780}, // __builtin_msa_dpadd_s_w
      {Intrinsic::mips_dpadd_u_d, 73804}, // __builtin_msa_dpadd_u_d
      {Intrinsic::mips_dpadd_u_h, 73828}, // __builtin_msa_dpadd_u_h
      {Intrinsic::mips_dpadd_u_w, 73852}, // __builtin_msa_dpadd_u_w
      {Intrinsic::mips_dpsub_s_d, 74251}, // __builtin_msa_dpsub_s_d
      {Intrinsic::mips_dpsub_s_h, 74275}, // __builtin_msa_dpsub_s_h
      {Intrinsic::mips_dpsub_s_w, 74299}, // __builtin_msa_dpsub_s_w
      {Intrinsic::mips_dpsub_u_d, 74323}, // __builtin_msa_dpsub_u_d
      {Intrinsic::mips_dpsub_u_h, 74347}, // __builtin_msa_dpsub_u_h
      {Intrinsic::mips_dpsub_u_w, 74371}, // __builtin_msa_dpsub_u_w
      {Intrinsic::mips_fadd_d, 74557}, // __builtin_msa_fadd_d
      {Intrinsic::mips_fadd_w, 74578}, // __builtin_msa_fadd_w
      {Intrinsic::mips_fcaf_d, 74599}, // __builtin_msa_fcaf_d
      {Intrinsic::mips_fcaf_w, 74620}, // __builtin_msa_fcaf_w
      {Intrinsic::mips_fceq_d, 74641}, // __builtin_msa_fceq_d
      {Intrinsic::mips_fceq_w, 74662}, // __builtin_msa_fceq_w
      {Intrinsic::mips_fclass_d, 74683}, // __builtin_msa_fclass_d
      {Intrinsic::mips_fclass_w, 74706}, // __builtin_msa_fclass_w
      {Intrinsic::mips_fcle_d, 74729}, // __builtin_msa_fcle_d
      {Intrinsic::mips_fcle_w, 74750}, // __builtin_msa_fcle_w
      {Intrinsic::mips_fclt_d, 74771}, // __builtin_msa_fclt_d
      {Intrinsic::mips_fclt_w, 74792}, // __builtin_msa_fclt_w
      {Intrinsic::mips_fcne_d, 74813}, // __builtin_msa_fcne_d
      {Intrinsic::mips_fcne_w, 74834}, // __builtin_msa_fcne_w
      {Intrinsic::mips_fcor_d, 74855}, // __builtin_msa_fcor_d
      {Intrinsic::mips_fcor_w, 74876}, // __builtin_msa_fcor_w
      {Intrinsic::mips_fcueq_d, 74897}, // __builtin_msa_fcueq_d
      {Intrinsic::mips_fcueq_w, 74919}, // __builtin_msa_fcueq_w
      {Intrinsic::mips_fcule_d, 74941}, // __builtin_msa_fcule_d
      {Intrinsic::mips_fcule_w, 74963}, // __builtin_msa_fcule_w
      {Intrinsic::mips_fcult_d, 74985}, // __builtin_msa_fcult_d
      {Intrinsic::mips_fcult_w, 75007}, // __builtin_msa_fcult_w
      {Intrinsic::mips_fcun_d, 75029}, // __builtin_msa_fcun_d
      {Intrinsic::mips_fcun_w, 75050}, // __builtin_msa_fcun_w
      {Intrinsic::mips_fcune_d, 75071}, // __builtin_msa_fcune_d
      {Intrinsic::mips_fcune_w, 75093}, // __builtin_msa_fcune_w
      {Intrinsic::mips_fdiv_d, 75115}, // __builtin_msa_fdiv_d
      {Intrinsic::mips_fdiv_w, 75136}, // __builtin_msa_fdiv_w
      {Intrinsic::mips_fexdo_h, 75157}, // __builtin_msa_fexdo_h
      {Intrinsic::mips_fexdo_w, 75179}, // __builtin_msa_fexdo_w
      {Intrinsic::mips_fexp2_d, 75201}, // __builtin_msa_fexp2_d
      {Intrinsic::mips_fexp2_w, 75223}, // __builtin_msa_fexp2_w
      {Intrinsic::mips_fexupl_d, 75245}, // __builtin_msa_fexupl_d
      {Intrinsic::mips_fexupl_w, 75268}, // __builtin_msa_fexupl_w
      {Intrinsic::mips_fexupr_d, 75291}, // __builtin_msa_fexupr_d
      {Intrinsic::mips_fexupr_w, 75314}, // __builtin_msa_fexupr_w
      {Intrinsic::mips_ffint_s_d, 75337}, // __builtin_msa_ffint_s_d
      {Intrinsic::mips_ffint_s_w, 75361}, // __builtin_msa_ffint_s_w
      {Intrinsic::mips_ffint_u_d, 75385}, // __builtin_msa_ffint_u_d
      {Intrinsic::mips_ffint_u_w, 75409}, // __builtin_msa_ffint_u_w
      {Intrinsic::mips_ffql_d, 75433}, // __builtin_msa_ffql_d
      {Intrinsic::mips_ffql_w, 75454}, // __builtin_msa_ffql_w
      {Intrinsic::mips_ffqr_d, 75475}, // __builtin_msa_ffqr_d
      {Intrinsic::mips_ffqr_w, 75496}, // __builtin_msa_ffqr_w
      {Intrinsic::mips_fill_b, 75517}, // __builtin_msa_fill_b
      {Intrinsic::mips_fill_d, 75538}, // __builtin_msa_fill_d
      {Intrinsic::mips_fill_h, 75559}, // __builtin_msa_fill_h
      {Intrinsic::mips_fill_w, 75580}, // __builtin_msa_fill_w
      {Intrinsic::mips_flog2_d, 75601}, // __builtin_msa_flog2_d
      {Intrinsic::mips_flog2_w, 75623}, // __builtin_msa_flog2_w
      {Intrinsic::mips_fmadd_d, 75645}, // __builtin_msa_fmadd_d
      {Intrinsic::mips_fmadd_w, 75667}, // __builtin_msa_fmadd_w
      {Intrinsic::mips_fmax_a_d, 75689}, // __builtin_msa_fmax_a_d
      {Intrinsic::mips_fmax_a_w, 75712}, // __builtin_msa_fmax_a_w
      {Intrinsic::mips_fmax_d, 75735}, // __builtin_msa_fmax_d
      {Intrinsic::mips_fmax_w, 75756}, // __builtin_msa_fmax_w
      {Intrinsic::mips_fmin_a_d, 75777}, // __builtin_msa_fmin_a_d
      {Intrinsic::mips_fmin_a_w, 75800}, // __builtin_msa_fmin_a_w
      {Intrinsic::mips_fmin_d, 75823}, // __builtin_msa_fmin_d
      {Intrinsic::mips_fmin_w, 75844}, // __builtin_msa_fmin_w
      {Intrinsic::mips_fmsub_d, 75865}, // __builtin_msa_fmsub_d
      {Intrinsic::mips_fmsub_w, 75887}, // __builtin_msa_fmsub_w
      {Intrinsic::mips_fmul_d, 75909}, // __builtin_msa_fmul_d
      {Intrinsic::mips_fmul_w, 75930}, // __builtin_msa_fmul_w
      {Intrinsic::mips_frcp_d, 75951}, // __builtin_msa_frcp_d
      {Intrinsic::mips_frcp_w, 75972}, // __builtin_msa_frcp_w
      {Intrinsic::mips_frint_d, 75993}, // __builtin_msa_frint_d
      {Intrinsic::mips_frint_w, 76015}, // __builtin_msa_frint_w
      {Intrinsic::mips_frsqrt_d, 76037}, // __builtin_msa_frsqrt_d
      {Intrinsic::mips_frsqrt_w, 76060}, // __builtin_msa_frsqrt_w
      {Intrinsic::mips_fsaf_d, 76083}, // __builtin_msa_fsaf_d
      {Intrinsic::mips_fsaf_w, 76104}, // __builtin_msa_fsaf_w
      {Intrinsic::mips_fseq_d, 76125}, // __builtin_msa_fseq_d
      {Intrinsic::mips_fseq_w, 76146}, // __builtin_msa_fseq_w
      {Intrinsic::mips_fsle_d, 76167}, // __builtin_msa_fsle_d
      {Intrinsic::mips_fsle_w, 76188}, // __builtin_msa_fsle_w
      {Intrinsic::mips_fslt_d, 76209}, // __builtin_msa_fslt_d
      {Intrinsic::mips_fslt_w, 76230}, // __builtin_msa_fslt_w
      {Intrinsic::mips_fsne_d, 76251}, // __builtin_msa_fsne_d
      {Intrinsic::mips_fsne_w, 76272}, // __builtin_msa_fsne_w
      {Intrinsic::mips_fsor_d, 76293}, // __builtin_msa_fsor_d
      {Intrinsic::mips_fsor_w, 76314}, // __builtin_msa_fsor_w
      {Intrinsic::mips_fsqrt_d, 76335}, // __builtin_msa_fsqrt_d
      {Intrinsic::mips_fsqrt_w, 76357}, // __builtin_msa_fsqrt_w
      {Intrinsic::mips_fsub_d, 76379}, // __builtin_msa_fsub_d
      {Intrinsic::mips_fsub_w, 76400}, // __builtin_msa_fsub_w
      {Intrinsic::mips_fsueq_d, 76421}, // __builtin_msa_fsueq_d
      {Intrinsic::mips_fsueq_w, 76443}, // __builtin_msa_fsueq_w
      {Intrinsic::mips_fsule_d, 76465}, // __builtin_msa_fsule_d
      {Intrinsic::mips_fsule_w, 76487}, // __builtin_msa_fsule_w
      {Intrinsic::mips_fsult_d, 76509}, // __builtin_msa_fsult_d
      {Intrinsic::mips_fsult_w, 76531}, // __builtin_msa_fsult_w
      {Intrinsic::mips_fsun_d, 76553}, // __builtin_msa_fsun_d
      {Intrinsic::mips_fsun_w, 76574}, // __builtin_msa_fsun_w
      {Intrinsic::mips_fsune_d, 76595}, // __builtin_msa_fsune_d
      {Intrinsic::mips_fsune_w, 76617}, // __builtin_msa_fsune_w
      {Intrinsic::mips_ftint_s_d, 76639}, // __builtin_msa_ftint_s_d
      {Intrinsic::mips_ftint_s_w, 76663}, // __builtin_msa_ftint_s_w
      {Intrinsic::mips_ftint_u_d, 76687}, // __builtin_msa_ftint_u_d
      {Intrinsic::mips_ftint_u_w, 76711}, // __builtin_msa_ftint_u_w
      {Intrinsic::mips_ftq_h, 76735}, // __builtin_msa_ftq_h
      {Intrinsic::mips_ftq_w, 76755}, // __builtin_msa_ftq_w
      {Intrinsic::mips_ftrunc_s_d, 76775}, // __builtin_msa_ftrunc_s_d
      {Intrinsic::mips_ftrunc_s_w, 76800}, // __builtin_msa_ftrunc_s_w
      {Intrinsic::mips_ftrunc_u_d, 76825}, // __builtin_msa_ftrunc_u_d
      {Intrinsic::mips_ftrunc_u_w, 76850}, // __builtin_msa_ftrunc_u_w
      {Intrinsic::mips_hadd_s_d, 76875}, // __builtin_msa_hadd_s_d
      {Intrinsic::mips_hadd_s_h, 76898}, // __builtin_msa_hadd_s_h
      {Intrinsic::mips_hadd_s_w, 76921}, // __builtin_msa_hadd_s_w
      {Intrinsic::mips_hadd_u_d, 76944}, // __builtin_msa_hadd_u_d
      {Intrinsic::mips_hadd_u_h, 76967}, // __builtin_msa_hadd_u_h
      {Intrinsic::mips_hadd_u_w, 76990}, // __builtin_msa_hadd_u_w
      {Intrinsic::mips_hsub_s_d, 77013}, // __builtin_msa_hsub_s_d
      {Intrinsic::mips_hsub_s_h, 77036}, // __builtin_msa_hsub_s_h
      {Intrinsic::mips_hsub_s_w, 77059}, // __builtin_msa_hsub_s_w
      {Intrinsic::mips_hsub_u_d, 77082}, // __builtin_msa_hsub_u_d
      {Intrinsic::mips_hsub_u_h, 77105}, // __builtin_msa_hsub_u_h
      {Intrinsic::mips_hsub_u_w, 77128}, // __builtin_msa_hsub_u_w
      {Intrinsic::mips_ilvev_b, 77151}, // __builtin_msa_ilvev_b
      {Intrinsic::mips_ilvev_d, 77173}, // __builtin_msa_ilvev_d
      {Intrinsic::mips_ilvev_h, 77195}, // __builtin_msa_ilvev_h
      {Intrinsic::mips_ilvev_w, 77217}, // __builtin_msa_ilvev_w
      {Intrinsic::mips_ilvl_b, 77239}, // __builtin_msa_ilvl_b
      {Intrinsic::mips_ilvl_d, 77260}, // __builtin_msa_ilvl_d
      {Intrinsic::mips_ilvl_h, 77281}, // __builtin_msa_ilvl_h
      {Intrinsic::mips_ilvl_w, 77302}, // __builtin_msa_ilvl_w
      {Intrinsic::mips_ilvod_b, 77323}, // __builtin_msa_ilvod_b
      {Intrinsic::mips_ilvod_d, 77345}, // __builtin_msa_ilvod_d
      {Intrinsic::mips_ilvod_h, 77367}, // __builtin_msa_ilvod_h
      {Intrinsic::mips_ilvod_w, 77389}, // __builtin_msa_ilvod_w
      {Intrinsic::mips_ilvr_b, 77411}, // __builtin_msa_ilvr_b
      {Intrinsic::mips_ilvr_d, 77432}, // __builtin_msa_ilvr_d
      {Intrinsic::mips_ilvr_h, 77453}, // __builtin_msa_ilvr_h
      {Intrinsic::mips_ilvr_w, 77474}, // __builtin_msa_ilvr_w
      {Intrinsic::mips_insert_b, 77495}, // __builtin_msa_insert_b
      {Intrinsic::mips_insert_d, 77518}, // __builtin_msa_insert_d
      {Intrinsic::mips_insert_h, 77541}, // __builtin_msa_insert_h
      {Intrinsic::mips_insert_w, 77564}, // __builtin_msa_insert_w
      {Intrinsic::mips_insve_b, 77607}, // __builtin_msa_insve_b
      {Intrinsic::mips_insve_d, 77629}, // __builtin_msa_insve_d
      {Intrinsic::mips_insve_h, 77651}, // __builtin_msa_insve_h
      {Intrinsic::mips_insve_w, 77673}, // __builtin_msa_insve_w
      {Intrinsic::mips_ld_b, 77715}, // __builtin_msa_ld_b
      {Intrinsic::mips_ld_d, 77734}, // __builtin_msa_ld_d
      {Intrinsic::mips_ld_h, 77753}, // __builtin_msa_ld_h
      {Intrinsic::mips_ld_w, 77772}, // __builtin_msa_ld_w
      {Intrinsic::mips_ldi_b, 77791}, // __builtin_msa_ldi_b
      {Intrinsic::mips_ldi_d, 77811}, // __builtin_msa_ldi_d
      {Intrinsic::mips_ldi_h, 77831}, // __builtin_msa_ldi_h
      {Intrinsic::mips_ldi_w, 77851}, // __builtin_msa_ldi_w
      {Intrinsic::mips_ldr_d, 77871}, // __builtin_msa_ldr_d
      {Intrinsic::mips_ldr_w, 77891}, // __builtin_msa_ldr_w
      {Intrinsic::mips_madd_q_h, 77988}, // __builtin_msa_madd_q_h
      {Intrinsic::mips_madd_q_w, 78011}, // __builtin_msa_madd_q_w
      {Intrinsic::mips_maddr_q_h, 78034}, // __builtin_msa_maddr_q_h
      {Intrinsic::mips_maddr_q_w, 78058}, // __builtin_msa_maddr_q_w
      {Intrinsic::mips_maddv_b, 78103}, // __builtin_msa_maddv_b
      {Intrinsic::mips_maddv_d, 78125}, // __builtin_msa_maddv_d
      {Intrinsic::mips_maddv_h, 78147}, // __builtin_msa_maddv_h
      {Intrinsic::mips_maddv_w, 78169}, // __builtin_msa_maddv_w
      {Intrinsic::mips_max_a_b, 78301}, // __builtin_msa_max_a_b
      {Intrinsic::mips_max_a_d, 78323}, // __builtin_msa_max_a_d
      {Intrinsic::mips_max_a_h, 78345}, // __builtin_msa_max_a_h
      {Intrinsic::mips_max_a_w, 78367}, // __builtin_msa_max_a_w
      {Intrinsic::mips_max_s_b, 78389}, // __builtin_msa_max_s_b
      {Intrinsic::mips_max_s_d, 78411}, // __builtin_msa_max_s_d
      {Intrinsic::mips_max_s_h, 78433}, // __builtin_msa_max_s_h
      {Intrinsic::mips_max_s_w, 78455}, // __builtin_msa_max_s_w
      {Intrinsic::mips_max_u_b, 78477}, // __builtin_msa_max_u_b
      {Intrinsic::mips_max_u_d, 78499}, // __builtin_msa_max_u_d
      {Intrinsic::mips_max_u_h, 78521}, // __builtin_msa_max_u_h
      {Intrinsic::mips_max_u_w, 78543}, // __builtin_msa_max_u_w
      {Intrinsic::mips_maxi_s_b, 78565}, // __builtin_msa_maxi_s_b
      {Intrinsic::mips_maxi_s_d, 78588}, // __builtin_msa_maxi_s_d
      {Intrinsic::mips_maxi_s_h, 78611}, // __builtin_msa_maxi_s_h
      {Intrinsic::mips_maxi_s_w, 78634}, // __builtin_msa_maxi_s_w
      {Intrinsic::mips_maxi_u_b, 78657}, // __builtin_msa_maxi_u_b
      {Intrinsic::mips_maxi_u_d, 78680}, // __builtin_msa_maxi_u_d
      {Intrinsic::mips_maxi_u_h, 78703}, // __builtin_msa_maxi_u_h
      {Intrinsic::mips_maxi_u_w, 78726}, // __builtin_msa_maxi_u_w
      {Intrinsic::mips_min_a_b, 78749}, // __builtin_msa_min_a_b
      {Intrinsic::mips_min_a_d, 78771}, // __builtin_msa_min_a_d
      {Intrinsic::mips_min_a_h, 78793}, // __builtin_msa_min_a_h
      {Intrinsic::mips_min_a_w, 78815}, // __builtin_msa_min_a_w
      {Intrinsic::mips_min_s_b, 78837}, // __builtin_msa_min_s_b
      {Intrinsic::mips_min_s_d, 78859}, // __builtin_msa_min_s_d
      {Intrinsic::mips_min_s_h, 78881}, // __builtin_msa_min_s_h
      {Intrinsic::mips_min_s_w, 78903}, // __builtin_msa_min_s_w
      {Intrinsic::mips_min_u_b, 78925}, // __builtin_msa_min_u_b
      {Intrinsic::mips_min_u_d, 78947}, // __builtin_msa_min_u_d
      {Intrinsic::mips_min_u_h, 78969}, // __builtin_msa_min_u_h
      {Intrinsic::mips_min_u_w, 78991}, // __builtin_msa_min_u_w
      {Intrinsic::mips_mini_s_b, 79013}, // __builtin_msa_mini_s_b
      {Intrinsic::mips_mini_s_d, 79036}, // __builtin_msa_mini_s_d
      {Intrinsic::mips_mini_s_h, 79059}, // __builtin_msa_mini_s_h
      {Intrinsic::mips_mini_s_w, 79082}, // __builtin_msa_mini_s_w
      {Intrinsic::mips_mini_u_b, 79105}, // __builtin_msa_mini_u_b
      {Intrinsic::mips_mini_u_d, 79128}, // __builtin_msa_mini_u_d
      {Intrinsic::mips_mini_u_h, 79151}, // __builtin_msa_mini_u_h
      {Intrinsic::mips_mini_u_w, 79174}, // __builtin_msa_mini_u_w
      {Intrinsic::mips_mod_s_b, 79197}, // __builtin_msa_mod_s_b
      {Intrinsic::mips_mod_s_d, 79219}, // __builtin_msa_mod_s_d
      {Intrinsic::mips_mod_s_h, 79241}, // __builtin_msa_mod_s_h
      {Intrinsic::mips_mod_s_w, 79263}, // __builtin_msa_mod_s_w
      {Intrinsic::mips_mod_u_b, 79285}, // __builtin_msa_mod_u_b
      {Intrinsic::mips_mod_u_d, 79307}, // __builtin_msa_mod_u_d
      {Intrinsic::mips_mod_u_h, 79329}, // __builtin_msa_mod_u_h
      {Intrinsic::mips_mod_u_w, 79351}, // __builtin_msa_mod_u_w
      {Intrinsic::mips_move_v, 79395}, // __builtin_msa_move_v
      {Intrinsic::mips_msub_q_h, 79436}, // __builtin_msa_msub_q_h
      {Intrinsic::mips_msub_q_w, 79459}, // __builtin_msa_msub_q_w
      {Intrinsic::mips_msubr_q_h, 79482}, // __builtin_msa_msubr_q_h
      {Intrinsic::mips_msubr_q_w, 79506}, // __builtin_msa_msubr_q_w
      {Intrinsic::mips_msubv_b, 79551}, // __builtin_msa_msubv_b
      {Intrinsic::mips_msubv_d, 79573}, // __builtin_msa_msubv_d
      {Intrinsic::mips_msubv_h, 79595}, // __builtin_msa_msubv_h
      {Intrinsic::mips_msubv_w, 79617}, // __builtin_msa_msubv_w
      {Intrinsic::mips_mul_q_h, 79683}, // __builtin_msa_mul_q_h
      {Intrinsic::mips_mul_q_w, 79705}, // __builtin_msa_mul_q_w
      {Intrinsic::mips_mulr_q_h, 79969}, // __builtin_msa_mulr_q_h
      {Intrinsic::mips_mulr_q_w, 79992}, // __builtin_msa_mulr_q_w
      {Intrinsic::mips_mulv_b, 80111}, // __builtin_msa_mulv_b
      {Intrinsic::mips_mulv_d, 80132}, // __builtin_msa_mulv_d
      {Intrinsic::mips_mulv_h, 80153}, // __builtin_msa_mulv_h
      {Intrinsic::mips_mulv_w, 80174}, // __builtin_msa_mulv_w
      {Intrinsic::mips_nloc_b, 80195}, // __builtin_msa_nloc_b
      {Intrinsic::mips_nloc_d, 80216}, // __builtin_msa_nloc_d
      {Intrinsic::mips_nloc_h, 80237}, // __builtin_msa_nloc_h
      {Intrinsic::mips_nloc_w, 80258}, // __builtin_msa_nloc_w
      {Intrinsic::mips_nlzc_b, 80279}, // __builtin_msa_nlzc_b
      {Intrinsic::mips_nlzc_d, 80300}, // __builtin_msa_nlzc_d
      {Intrinsic::mips_nlzc_h, 80321}, // __builtin_msa_nlzc_h
      {Intrinsic::mips_nlzc_w, 80342}, // __builtin_msa_nlzc_w
      {Intrinsic::mips_nor_v, 80363}, // __builtin_msa_nor_v
      {Intrinsic::mips_nori_b, 80383}, // __builtin_msa_nori_b
      {Intrinsic::mips_or_v, 80404}, // __builtin_msa_or_v
      {Intrinsic::mips_ori_b, 80423}, // __builtin_msa_ori_b
      {Intrinsic::mips_pckev_b, 80468}, // __builtin_msa_pckev_b
      {Intrinsic::mips_pckev_d, 80490}, // __builtin_msa_pckev_d
      {Intrinsic::mips_pckev_h, 80512}, // __builtin_msa_pckev_h
      {Intrinsic::mips_pckev_w, 80534}, // __builtin_msa_pckev_w
      {Intrinsic::mips_pckod_b, 80556}, // __builtin_msa_pckod_b
      {Intrinsic::mips_pckod_d, 80578}, // __builtin_msa_pckod_d
      {Intrinsic::mips_pckod_h, 80600}, // __builtin_msa_pckod_h
      {Intrinsic::mips_pckod_w, 80622}, // __builtin_msa_pckod_w
      {Intrinsic::mips_pcnt_b, 80644}, // __builtin_msa_pcnt_b
      {Intrinsic::mips_pcnt_d, 80665}, // __builtin_msa_pcnt_d
      {Intrinsic::mips_pcnt_h, 80686}, // __builtin_msa_pcnt_h
      {Intrinsic::mips_pcnt_w, 80707}, // __builtin_msa_pcnt_w
      {Intrinsic::mips_sat_s_b, 81391}, // __builtin_msa_sat_s_b
      {Intrinsic::mips_sat_s_d, 81413}, // __builtin_msa_sat_s_d
      {Intrinsic::mips_sat_s_h, 81435}, // __builtin_msa_sat_s_h
      {Intrinsic::mips_sat_s_w, 81457}, // __builtin_msa_sat_s_w
      {Intrinsic::mips_sat_u_b, 81479}, // __builtin_msa_sat_u_b
      {Intrinsic::mips_sat_u_d, 81501}, // __builtin_msa_sat_u_d
      {Intrinsic::mips_sat_u_h, 81523}, // __builtin_msa_sat_u_h
      {Intrinsic::mips_sat_u_w, 81545}, // __builtin_msa_sat_u_w
      {Intrinsic::mips_shf_b, 81567}, // __builtin_msa_shf_b
      {Intrinsic::mips_shf_h, 81587}, // __builtin_msa_shf_h
      {Intrinsic::mips_shf_w, 81607}, // __builtin_msa_shf_w
      {Intrinsic::mips_sld_b, 81909}, // __builtin_msa_sld_b
      {Intrinsic::mips_sld_d, 81929}, // __builtin_msa_sld_d
      {Intrinsic::mips_sld_h, 81949}, // __builtin_msa_sld_h
      {Intrinsic::mips_sld_w, 81969}, // __builtin_msa_sld_w
      {Intrinsic::mips_sldi_b, 81989}, // __builtin_msa_sldi_b
      {Intrinsic::mips_sldi_d, 82010}, // __builtin_msa_sldi_d
      {Intrinsic::mips_sldi_h, 82031}, // __builtin_msa_sldi_h
      {Intrinsic::mips_sldi_w, 82052}, // __builtin_msa_sldi_w
      {Intrinsic::mips_sll_b, 82073}, // __builtin_msa_sll_b
      {Intrinsic::mips_sll_d, 82093}, // __builtin_msa_sll_d
      {Intrinsic::mips_sll_h, 82113}, // __builtin_msa_sll_h
      {Intrinsic::mips_sll_w, 82133}, // __builtin_msa_sll_w
      {Intrinsic::mips_slli_b, 82153}, // __builtin_msa_slli_b
      {Intrinsic::mips_slli_d, 82174}, // __builtin_msa_slli_d
      {Intrinsic::mips_slli_h, 82195}, // __builtin_msa_slli_h
      {Intrinsic::mips_slli_w, 82216}, // __builtin_msa_slli_w
      {Intrinsic::mips_splat_b, 82237}, // __builtin_msa_splat_b
      {Intrinsic::mips_splat_d, 82259}, // __builtin_msa_splat_d
      {Intrinsic::mips_splat_h, 82281}, // __builtin_msa_splat_h
      {Intrinsic::mips_splat_w, 82303}, // __builtin_msa_splat_w
      {Intrinsic::mips_splati_b, 82325}, // __builtin_msa_splati_b
      {Intrinsic::mips_splati_d, 82348}, // __builtin_msa_splati_d
      {Intrinsic::mips_splati_h, 82371}, // __builtin_msa_splati_h
      {Intrinsic::mips_splati_w, 82394}, // __builtin_msa_splati_w
      {Intrinsic::mips_sra_b, 82417}, // __builtin_msa_sra_b
      {Intrinsic::mips_sra_d, 82437}, // __builtin_msa_sra_d
      {Intrinsic::mips_sra_h, 82457}, // __builtin_msa_sra_h
      {Intrinsic::mips_sra_w, 82477}, // __builtin_msa_sra_w
      {Intrinsic::mips_srai_b, 82497}, // __builtin_msa_srai_b
      {Intrinsic::mips_srai_d, 82518}, // __builtin_msa_srai_d
      {Intrinsic::mips_srai_h, 82539}, // __builtin_msa_srai_h
      {Intrinsic::mips_srai_w, 82560}, // __builtin_msa_srai_w
      {Intrinsic::mips_srar_b, 82581}, // __builtin_msa_srar_b
      {Intrinsic::mips_srar_d, 82602}, // __builtin_msa_srar_d
      {Intrinsic::mips_srar_h, 82623}, // __builtin_msa_srar_h
      {Intrinsic::mips_srar_w, 82644}, // __builtin_msa_srar_w
      {Intrinsic::mips_srari_b, 82665}, // __builtin_msa_srari_b
      {Intrinsic::mips_srari_d, 82687}, // __builtin_msa_srari_d
      {Intrinsic::mips_srari_h, 82709}, // __builtin_msa_srari_h
      {Intrinsic::mips_srari_w, 82731}, // __builtin_msa_srari_w
      {Intrinsic::mips_srl_b, 82753}, // __builtin_msa_srl_b
      {Intrinsic::mips_srl_d, 82773}, // __builtin_msa_srl_d
      {Intrinsic::mips_srl_h, 82793}, // __builtin_msa_srl_h
      {Intrinsic::mips_srl_w, 82813}, // __builtin_msa_srl_w
      {Intrinsic::mips_srli_b, 82833}, // __builtin_msa_srli_b
      {Intrinsic::mips_srli_d, 82854}, // __builtin_msa_srli_d
      {Intrinsic::mips_srli_h, 82875}, // __builtin_msa_srli_h
      {Intrinsic::mips_srli_w, 82896}, // __builtin_msa_srli_w
      {Intrinsic::mips_srlr_b, 82917}, // __builtin_msa_srlr_b
      {Intrinsic::mips_srlr_d, 82938}, // __builtin_msa_srlr_d
      {Intrinsic::mips_srlr_h, 82959}, // __builtin_msa_srlr_h
      {Intrinsic::mips_srlr_w, 82980}, // __builtin_msa_srlr_w
      {Intrinsic::mips_srlri_b, 83001}, // __builtin_msa_srlri_b
      {Intrinsic::mips_srlri_d, 83023}, // __builtin_msa_srlri_d
      {Intrinsic::mips_srlri_h, 83045}, // __builtin_msa_srlri_h
      {Intrinsic::mips_srlri_w, 83067}, // __builtin_msa_srlri_w
      {Intrinsic::mips_st_b, 83089}, // __builtin_msa_st_b
      {Intrinsic::mips_st_d, 83108}, // __builtin_msa_st_d
      {Intrinsic::mips_st_h, 83127}, // __builtin_msa_st_h
      {Intrinsic::mips_st_w, 83146}, // __builtin_msa_st_w
      {Intrinsic::mips_str_d, 83165}, // __builtin_msa_str_d
      {Intrinsic::mips_str_w, 83185}, // __builtin_msa_str_w
      {Intrinsic::mips_subs_s_b, 83375}, // __builtin_msa_subs_s_b
      {Intrinsic::mips_subs_s_d, 83398}, // __builtin_msa_subs_s_d
      {Intrinsic::mips_subs_s_h, 83421}, // __builtin_msa_subs_s_h
      {Intrinsic::mips_subs_s_w, 83444}, // __builtin_msa_subs_s_w
      {Intrinsic::mips_subs_u_b, 83467}, // __builtin_msa_subs_u_b
      {Intrinsic::mips_subs_u_d, 83490}, // __builtin_msa_subs_u_d
      {Intrinsic::mips_subs_u_h, 83513}, // __builtin_msa_subs_u_h
      {Intrinsic::mips_subs_u_w, 83536}, // __builtin_msa_subs_u_w
      {Intrinsic::mips_subsus_u_b, 83559}, // __builtin_msa_subsus_u_b
      {Intrinsic::mips_subsus_u_d, 83584}, // __builtin_msa_subsus_u_d
      {Intrinsic::mips_subsus_u_h, 83609}, // __builtin_msa_subsus_u_h
      {Intrinsic::mips_subsus_u_w, 83634}, // __builtin_msa_subsus_u_w
      {Intrinsic::mips_subsuu_s_b, 83659}, // __builtin_msa_subsuu_s_b
      {Intrinsic::mips_subsuu_s_d, 83684}, // __builtin_msa_subsuu_s_d
      {Intrinsic::mips_subsuu_s_h, 83709}, // __builtin_msa_subsuu_s_h
      {Intrinsic::mips_subsuu_s_w, 83734}, // __builtin_msa_subsuu_s_w
      {Intrinsic::mips_subv_b, 83905}, // __builtin_msa_subv_b
      {Intrinsic::mips_subv_d, 83926}, // __builtin_msa_subv_d
      {Intrinsic::mips_subv_h, 83947}, // __builtin_msa_subv_h
      {Intrinsic::mips_subv_w, 83968}, // __builtin_msa_subv_w
      {Intrinsic::mips_subvi_b, 83989}, // __builtin_msa_subvi_b
      {Intrinsic::mips_subvi_d, 84011}, // __builtin_msa_subvi_d
      {Intrinsic::mips_subvi_h, 84033}, // __builtin_msa_subvi_h
      {Intrinsic::mips_subvi_w, 84055}, // __builtin_msa_subvi_w
      {Intrinsic::mips_vshf_b, 84077}, // __builtin_msa_vshf_b
      {Intrinsic::mips_vshf_d, 84098}, // __builtin_msa_vshf_d
      {Intrinsic::mips_vshf_h, 84119}, // __builtin_msa_vshf_h
      {Intrinsic::mips_vshf_w, 84140}, // __builtin_msa_vshf_w
      {Intrinsic::mips_xor_v, 84182}, // __builtin_msa_xor_v
      {Intrinsic::mips_xori_b, 84202}, // __builtin_msa_xori_b
    };
    auto I = std::lower_bound(std::begin(mipsNames),
                              std::end(mipsNames),
                              BuiltinNameStr);
    if (I != std::end(mipsNames) &&
        I->getName() == BuiltinNameStr)
      return I->IntrinID;
  }
  if (TargetPrefix == "nvvm") {
    static const BuiltinEntry nvvmNames[] = {
      {Intrinsic::nvvm_abs_bf16, 84223}, // __nvvm_abs_bf16
      {Intrinsic::nvvm_abs_bf16x2, 84239}, // __nvvm_abs_bf16x2
      {Intrinsic::nvvm_add_rm_d, 84257}, // __nvvm_add_rm_d
      {Intrinsic::nvvm_add_rm_f, 84273}, // __nvvm_add_rm_f
      {Intrinsic::nvvm_add_rm_ftz_f, 84289}, // __nvvm_add_rm_ftz_f
      {Intrinsic::nvvm_add_rn_d, 84309}, // __nvvm_add_rn_d
      {Intrinsic::nvvm_add_rn_f, 84325}, // __nvvm_add_rn_f
      {Intrinsic::nvvm_add_rn_ftz_f, 84341}, // __nvvm_add_rn_ftz_f
      {Intrinsic::nvvm_add_rp_d, 84361}, // __nvvm_add_rp_d
      {Intrinsic::nvvm_add_rp_f, 84377}, // __nvvm_add_rp_f
      {Intrinsic::nvvm_add_rp_ftz_f, 84393}, // __nvvm_add_rp_ftz_f
      {Intrinsic::nvvm_add_rz_d, 84413}, // __nvvm_add_rz_d
      {Intrinsic::nvvm_add_rz_f, 84429}, // __nvvm_add_rz_f
      {Intrinsic::nvvm_add_rz_ftz_f, 84445}, // __nvvm_add_rz_ftz_f
      {Intrinsic::nvvm_barrier, 84502}, // __nvvm_bar
      {Intrinsic::nvvm_barrier0_and, 84584}, // __nvvm_bar0_and
      {Intrinsic::nvvm_barrier0_or, 84600}, // __nvvm_bar0_or
      {Intrinsic::nvvm_barrier0_popc, 84615}, // __nvvm_bar0_popc
      {Intrinsic::nvvm_barrier_n, 84513}, // __nvvm_bar_n
      {Intrinsic::nvvm_bar_sync, 84465}, // __nvvm_bar_sync
      {Intrinsic::nvvm_bar_warp_sync, 84481}, // __nvvm_bar_warp_sync
      {Intrinsic::nvvm_barrier_sync, 84526}, // __nvvm_barrier_sync
      {Intrinsic::nvvm_barrier_sync_cnt, 84546}, // __nvvm_barrier_sync_cnt
      {Intrinsic::nvvm_bf2h_rn, 84632}, // __nvvm_bf2h_rn
      {Intrinsic::nvvm_bf2h_rn_ftz, 84647}, // __nvvm_bf2h_rn_ftz
      {Intrinsic::nvvm_bitcast_d2ll, 84666}, // __nvvm_bitcast_d2ll
      {Intrinsic::nvvm_bitcast_f2i, 84686}, // __nvvm_bitcast_f2i
      {Intrinsic::nvvm_bitcast_i2f, 84705}, // __nvvm_bitcast_i2f
      {Intrinsic::nvvm_bitcast_ll2d, 84724}, // __nvvm_bitcast_ll2d
      {Intrinsic::nvvm_ceil_d, 84744}, // __nvvm_ceil_d
      {Intrinsic::nvvm_ceil_f, 84758}, // __nvvm_ceil_f
      {Intrinsic::nvvm_ceil_ftz_f, 84772}, // __nvvm_ceil_ftz_f
      {Intrinsic::nvvm_cos_approx_f, 84790}, // __nvvm_cos_approx_f
      {Intrinsic::nvvm_cos_approx_ftz_f, 84810}, // __nvvm_cos_approx_ftz_f
      {Intrinsic::nvvm_cp_async_commit_group, 84834}, // __nvvm_cp_async_commit_group
      {Intrinsic::nvvm_cp_async_mbarrier_arrive, 84863}, // __nvvm_cp_async_mbarrier_arrive
      {Intrinsic::nvvm_cp_async_mbarrier_arrive_noinc, 84895}, // __nvvm_cp_async_mbarrier_arrive_noinc
      {Intrinsic::nvvm_cp_async_mbarrier_arrive_noinc_shared, 84933}, // __nvvm_cp_async_mbarrier_arrive_noinc_shared
      {Intrinsic::nvvm_cp_async_mbarrier_arrive_shared, 84978}, // __nvvm_cp_async_mbarrier_arrive_shared
      {Intrinsic::nvvm_cp_async_wait_all, 85017}, // __nvvm_cp_async_wait_all
      {Intrinsic::nvvm_cp_async_wait_group, 85042}, // __nvvm_cp_async_wait_group
      {Intrinsic::nvvm_d2f_rm, 85069}, // __nvvm_d2f_rm
      {Intrinsic::nvvm_d2f_rm_ftz, 85083}, // __nvvm_d2f_rm_ftz
      {Intrinsic::nvvm_d2f_rn, 85101}, // __nvvm_d2f_rn
      {Intrinsic::nvvm_d2f_rn_ftz, 85115}, // __nvvm_d2f_rn_ftz
      {Intrinsic::nvvm_d2f_rp, 85133}, // __nvvm_d2f_rp
      {Intrinsic::nvvm_d2f_rp_ftz, 85147}, // __nvvm_d2f_rp_ftz
      {Intrinsic::nvvm_d2f_rz, 85165}, // __nvvm_d2f_rz
      {Intrinsic::nvvm_d2f_rz_ftz, 85179}, // __nvvm_d2f_rz_ftz
      {Intrinsic::nvvm_d2i_hi, 85197}, // __nvvm_d2i_hi
      {Intrinsic::nvvm_d2i_lo, 85211}, // __nvvm_d2i_lo
      {Intrinsic::nvvm_d2i_rm, 85225}, // __nvvm_d2i_rm
      {Intrinsic::nvvm_d2i_rn, 85239}, // __nvvm_d2i_rn
      {Intrinsic::nvvm_d2i_rp, 85253}, // __nvvm_d2i_rp
      {Intrinsic::nvvm_d2i_rz, 85267}, // __nvvm_d2i_rz
      {Intrinsic::nvvm_d2ll_rm, 85281}, // __nvvm_d2ll_rm
      {Intrinsic::nvvm_d2ll_rn, 85296}, // __nvvm_d2ll_rn
      {Intrinsic::nvvm_d2ll_rp, 85311}, // __nvvm_d2ll_rp
      {Intrinsic::nvvm_d2ll_rz, 85326}, // __nvvm_d2ll_rz
      {Intrinsic::nvvm_d2ui_rm, 85341}, // __nvvm_d2ui_rm
      {Intrinsic::nvvm_d2ui_rn, 85356}, // __nvvm_d2ui_rn
      {Intrinsic::nvvm_d2ui_rp, 85371}, // __nvvm_d2ui_rp
      {Intrinsic::nvvm_d2ui_rz, 85386}, // __nvvm_d2ui_rz
      {Intrinsic::nvvm_d2ull_rm, 85401}, // __nvvm_d2ull_rm
      {Intrinsic::nvvm_d2ull_rn, 85417}, // __nvvm_d2ull_rn
      {Intrinsic::nvvm_d2ull_rp, 85433}, // __nvvm_d2ull_rp
      {Intrinsic::nvvm_d2ull_rz, 85449}, // __nvvm_d2ull_rz
      {Intrinsic::nvvm_div_approx_f, 85465}, // __nvvm_div_approx_f
      {Intrinsic::nvvm_div_approx_ftz_f, 85485}, // __nvvm_div_approx_ftz_f
      {Intrinsic::nvvm_div_rm_d, 85509}, // __nvvm_div_rm_d
      {Intrinsic::nvvm_div_rm_f, 85525}, // __nvvm_div_rm_f
      {Intrinsic::nvvm_div_rm_ftz_f, 85541}, // __nvvm_div_rm_ftz_f
      {Intrinsic::nvvm_div_rn_d, 85561}, // __nvvm_div_rn_d
      {Intrinsic::nvvm_div_rn_f, 85577}, // __nvvm_div_rn_f
      {Intrinsic::nvvm_div_rn_ftz_f, 85593}, // __nvvm_div_rn_ftz_f
      {Intrinsic::nvvm_div_rp_d, 85613}, // __nvvm_div_rp_d
      {Intrinsic::nvvm_div_rp_f, 85629}, // __nvvm_div_rp_f
      {Intrinsic::nvvm_div_rp_ftz_f, 85645}, // __nvvm_div_rp_ftz_f
      {Intrinsic::nvvm_div_rz_d, 85665}, // __nvvm_div_rz_d
      {Intrinsic::nvvm_div_rz_f, 85681}, // __nvvm_div_rz_f
      {Intrinsic::nvvm_div_rz_ftz_f, 85697}, // __nvvm_div_rz_ftz_f
      {Intrinsic::nvvm_ex2_approx_d, 85717}, // __nvvm_ex2_approx_d
      {Intrinsic::nvvm_ex2_approx_f, 85737}, // __nvvm_ex2_approx_f
      {Intrinsic::nvvm_ex2_approx_ftz_f, 85757}, // __nvvm_ex2_approx_ftz_f
      {Intrinsic::nvvm_f2bf16_rn, 85781}, // __nvvm_f2bf16_rn
      {Intrinsic::nvvm_f2bf16_rn_relu, 85798}, // __nvvm_f2bf16_rn_relu
      {Intrinsic::nvvm_f2bf16_rz, 85820}, // __nvvm_f2bf16_rz
      {Intrinsic::nvvm_f2bf16_rz_relu, 85837}, // __nvvm_f2bf16_rz_relu
      {Intrinsic::nvvm_f2h_rn, 85859}, // __nvvm_f2h_rn
      {Intrinsic::nvvm_f2h_rn_ftz, 85873}, // __nvvm_f2h_rn_ftz
      {Intrinsic::nvvm_f2i_rm, 85891}, // __nvvm_f2i_rm
      {Intrinsic::nvvm_f2i_rm_ftz, 85905}, // __nvvm_f2i_rm_ftz
      {Intrinsic::nvvm_f2i_rn, 85923}, // __nvvm_f2i_rn
      {Intrinsic::nvvm_f2i_rn_ftz, 85937}, // __nvvm_f2i_rn_ftz
      {Intrinsic::nvvm_f2i_rp, 85955}, // __nvvm_f2i_rp
      {Intrinsic::nvvm_f2i_rp_ftz, 85969}, // __nvvm_f2i_rp_ftz
      {Intrinsic::nvvm_f2i_rz, 85987}, // __nvvm_f2i_rz
      {Intrinsic::nvvm_f2i_rz_ftz, 86001}, // __nvvm_f2i_rz_ftz
      {Intrinsic::nvvm_f2ll_rm, 86019}, // __nvvm_f2ll_rm
      {Intrinsic::nvvm_f2ll_rm_ftz, 86034}, // __nvvm_f2ll_rm_ftz
      {Intrinsic::nvvm_f2ll_rn, 86053}, // __nvvm_f2ll_rn
      {Intrinsic::nvvm_f2ll_rn_ftz, 86068}, // __nvvm_f2ll_rn_ftz
      {Intrinsic::nvvm_f2ll_rp, 86087}, // __nvvm_f2ll_rp
      {Intrinsic::nvvm_f2ll_rp_ftz, 86102}, // __nvvm_f2ll_rp_ftz
      {Intrinsic::nvvm_f2ll_rz, 86121}, // __nvvm_f2ll_rz
      {Intrinsic::nvvm_f2ll_rz_ftz, 86136}, // __nvvm_f2ll_rz_ftz
      {Intrinsic::nvvm_f2tf32_rna, 86155}, // __nvvm_f2tf32_rna
      {Intrinsic::nvvm_f2ui_rm, 86173}, // __nvvm_f2ui_rm
      {Intrinsic::nvvm_f2ui_rm_ftz, 86188}, // __nvvm_f2ui_rm_ftz
      {Intrinsic::nvvm_f2ui_rn, 86207}, // __nvvm_f2ui_rn
      {Intrinsic::nvvm_f2ui_rn_ftz, 86222}, // __nvvm_f2ui_rn_ftz
      {Intrinsic::nvvm_f2ui_rp, 86241}, // __nvvm_f2ui_rp
      {Intrinsic::nvvm_f2ui_rp_ftz, 86256}, // __nvvm_f2ui_rp_ftz
      {Intrinsic::nvvm_f2ui_rz, 86275}, // __nvvm_f2ui_rz
      {Intrinsic::nvvm_f2ui_rz_ftz, 86290}, // __nvvm_f2ui_rz_ftz
      {Intrinsic::nvvm_f2ull_rm, 86309}, // __nvvm_f2ull_rm
      {Intrinsic::nvvm_f2ull_rm_ftz, 86325}, // __nvvm_f2ull_rm_ftz
      {Intrinsic::nvvm_f2ull_rn, 86345}, // __nvvm_f2ull_rn
      {Intrinsic::nvvm_f2ull_rn_ftz, 86361}, // __nvvm_f2ull_rn_ftz
      {Intrinsic::nvvm_f2ull_rp, 86381}, // __nvvm_f2ull_rp
      {Intrinsic::nvvm_f2ull_rp_ftz, 86397}, // __nvvm_f2ull_rp_ftz
      {Intrinsic::nvvm_f2ull_rz, 86417}, // __nvvm_f2ull_rz
      {Intrinsic::nvvm_f2ull_rz_ftz, 86433}, // __nvvm_f2ull_rz_ftz
      {Intrinsic::nvvm_fabs_d, 86453}, // __nvvm_fabs_d
      {Intrinsic::nvvm_fabs_f, 86467}, // __nvvm_fabs_f
      {Intrinsic::nvvm_fabs_ftz_f, 86481}, // __nvvm_fabs_ftz_f
      {Intrinsic::nvvm_ff2bf16x2_rn, 86499}, // __nvvm_ff2bf16x2_rn
      {Intrinsic::nvvm_ff2bf16x2_rn_relu, 86519}, // __nvvm_ff2bf16x2_rn_relu
      {Intrinsic::nvvm_ff2bf16x2_rz, 86544}, // __nvvm_ff2bf16x2_rz
      {Intrinsic::nvvm_ff2bf16x2_rz_relu, 86564}, // __nvvm_ff2bf16x2_rz_relu
      {Intrinsic::nvvm_ff2f16x2_rn, 86589}, // __nvvm_ff2f16x2_rn
      {Intrinsic::nvvm_ff2f16x2_rn_relu, 86608}, // __nvvm_ff2f16x2_rn_relu
      {Intrinsic::nvvm_ff2f16x2_rz, 86632}, // __nvvm_ff2f16x2_rz
      {Intrinsic::nvvm_ff2f16x2_rz_relu, 86651}, // __nvvm_ff2f16x2_rz_relu
      {Intrinsic::nvvm_floor_d, 86675}, // __nvvm_floor_d
      {Intrinsic::nvvm_floor_f, 86690}, // __nvvm_floor_f
      {Intrinsic::nvvm_floor_ftz_f, 86705}, // __nvvm_floor_ftz_f
      {Intrinsic::nvvm_fma_rm_d, 86724}, // __nvvm_fma_rm_d
      {Intrinsic::nvvm_fma_rm_f, 86740}, // __nvvm_fma_rm_f
      {Intrinsic::nvvm_fma_rm_ftz_f, 86756}, // __nvvm_fma_rm_ftz_f
      {Intrinsic::nvvm_fma_rn_bf16, 86776}, // __nvvm_fma_rn_bf16
      {Intrinsic::nvvm_fma_rn_bf16x2, 86795}, // __nvvm_fma_rn_bf16x2
      {Intrinsic::nvvm_fma_rn_d, 86816}, // __nvvm_fma_rn_d
      {Intrinsic::nvvm_fma_rn_f, 86832}, // __nvvm_fma_rn_f
      {Intrinsic::nvvm_fma_rn_ftz_bf16, 86848}, // __nvvm_fma_rn_ftz_bf16
      {Intrinsic::nvvm_fma_rn_ftz_bf16x2, 86871}, // __nvvm_fma_rn_ftz_bf16x2
      {Intrinsic::nvvm_fma_rn_ftz_f, 86896}, // __nvvm_fma_rn_ftz_f
      {Intrinsic::nvvm_fma_rn_ftz_relu_bf16, 86916}, // __nvvm_fma_rn_ftz_relu_bf16
      {Intrinsic::nvvm_fma_rn_ftz_relu_bf16x2, 86944}, // __nvvm_fma_rn_ftz_relu_bf16x2
      {Intrinsic::nvvm_fma_rn_ftz_sat_bf16, 86974}, // __nvvm_fma_rn_ftz_sat_bf16
      {Intrinsic::nvvm_fma_rn_ftz_sat_bf16x2, 87001}, // __nvvm_fma_rn_ftz_sat_bf16x2
      {Intrinsic::nvvm_fma_rn_relu_bf16, 87030}, // __nvvm_fma_rn_relu_bf16
      {Intrinsic::nvvm_fma_rn_relu_bf16x2, 87054}, // __nvvm_fma_rn_relu_bf16x2
      {Intrinsic::nvvm_fma_rn_sat_bf16, 87080}, // __nvvm_fma_rn_sat_bf16
      {Intrinsic::nvvm_fma_rn_sat_bf16x2, 87103}, // __nvvm_fma_rn_sat_bf16x2
      {Intrinsic::nvvm_fma_rp_d, 87128}, // __nvvm_fma_rp_d
      {Intrinsic::nvvm_fma_rp_f, 87144}, // __nvvm_fma_rp_f
      {Intrinsic::nvvm_fma_rp_ftz_f, 87160}, // __nvvm_fma_rp_ftz_f
      {Intrinsic::nvvm_fma_rz_d, 87180}, // __nvvm_fma_rz_d
      {Intrinsic::nvvm_fma_rz_f, 87196}, // __nvvm_fma_rz_f
      {Intrinsic::nvvm_fma_rz_ftz_f, 87212}, // __nvvm_fma_rz_ftz_f
      {Intrinsic::nvvm_fmax_bf16, 87232}, // __nvvm_fmax_bf16
      {Intrinsic::nvvm_fmax_bf16x2, 87249}, // __nvvm_fmax_bf16x2
      {Intrinsic::nvvm_fmax_d, 87268}, // __nvvm_fmax_d
      {Intrinsic::nvvm_fmax_f, 87282}, // __nvvm_fmax_f
      {Intrinsic::nvvm_fmax_ftz_bf16, 87296}, // __nvvm_fmax_ftz_bf16
      {Intrinsic::nvvm_fmax_ftz_bf16x2, 87317}, // __nvvm_fmax_ftz_bf16x2
      {Intrinsic::nvvm_fmax_ftz_f, 87340}, // __nvvm_fmax_ftz_f
      {Intrinsic::nvvm_fmax_ftz_nan_bf16, 87358}, // __nvvm_fmax_ftz_nan_bf16
      {Intrinsic::nvvm_fmax_ftz_nan_bf16x2, 87383}, // __nvvm_fmax_ftz_nan_bf16x2
      {Intrinsic::nvvm_fmax_ftz_nan_f, 87410}, // __nvvm_fmax_ftz_nan_f
      {Intrinsic::nvvm_fmax_ftz_nan_xorsign_abs_bf16, 87432}, // __nvvm_fmax_ftz_nan_xorsign_abs_bf16
      {Intrinsic::nvvm_fmax_ftz_nan_xorsign_abs_bf16x2, 87469}, // __nvvm_fmax_ftz_nan_xorsign_abs_bf16x2
      {Intrinsic::nvvm_fmax_ftz_nan_xorsign_abs_f, 87508}, // __nvvm_fmax_ftz_nan_xorsign_abs_f
      {Intrinsic::nvvm_fmax_ftz_xorsign_abs_bf16, 87542}, // __nvvm_fmax_ftz_xorsign_abs_bf16
      {Intrinsic::nvvm_fmax_ftz_xorsign_abs_bf16x2, 87575}, // __nvvm_fmax_ftz_xorsign_abs_bf16x2
      {Intrinsic::nvvm_fmax_ftz_xorsign_abs_f, 87610}, // __nvvm_fmax_ftz_xorsign_abs_f
      {Intrinsic::nvvm_fmax_nan_bf16, 87640}, // __nvvm_fmax_nan_bf16
      {Intrinsic::nvvm_fmax_nan_bf16x2, 87661}, // __nvvm_fmax_nan_bf16x2
      {Intrinsic::nvvm_fmax_nan_f, 87684}, // __nvvm_fmax_nan_f
      {Intrinsic::nvvm_fmax_nan_xorsign_abs_bf16, 87702}, // __nvvm_fmax_nan_xorsign_abs_bf16
      {Intrinsic::nvvm_fmax_nan_xorsign_abs_bf16x2, 87735}, // __nvvm_fmax_nan_xorsign_abs_bf16x2
      {Intrinsic::nvvm_fmax_nan_xorsign_abs_f, 87770}, // __nvvm_fmax_nan_xorsign_abs_f
      {Intrinsic::nvvm_fmax_xorsign_abs_bf16, 87800}, // __nvvm_fmax_xorsign_abs_bf16
      {Intrinsic::nvvm_fmax_xorsign_abs_bf16x2, 87829}, // __nvvm_fmax_xorsign_abs_bf16x2
      {Intrinsic::nvvm_fmax_xorsign_abs_f, 87860}, // __nvvm_fmax_xorsign_abs_f
      {Intrinsic::nvvm_fmin_bf16, 87886}, // __nvvm_fmin_bf16
      {Intrinsic::nvvm_fmin_bf16x2, 87903}, // __nvvm_fmin_bf16x2
      {Intrinsic::nvvm_fmin_d, 87922}, // __nvvm_fmin_d
      {Intrinsic::nvvm_fmin_f, 87936}, // __nvvm_fmin_f
      {Intrinsic::nvvm_fmin_ftz_bf16, 87950}, // __nvvm_fmin_ftz_bf16
      {Intrinsic::nvvm_fmin_ftz_bf16x2, 87971}, // __nvvm_fmin_ftz_bf16x2
      {Intrinsic::nvvm_fmin_ftz_f, 87994}, // __nvvm_fmin_ftz_f
      {Intrinsic::nvvm_fmin_ftz_nan_bf16, 88012}, // __nvvm_fmin_ftz_nan_bf16
      {Intrinsic::nvvm_fmin_ftz_nan_bf16x2, 88037}, // __nvvm_fmin_ftz_nan_bf16x2
      {Intrinsic::nvvm_fmin_ftz_nan_f, 88064}, // __nvvm_fmin_ftz_nan_f
      {Intrinsic::nvvm_fmin_ftz_nan_xorsign_abs_bf16, 88086}, // __nvvm_fmin_ftz_nan_xorsign_abs_bf16
      {Intrinsic::nvvm_fmin_ftz_nan_xorsign_abs_bf16x2, 88123}, // __nvvm_fmin_ftz_nan_xorsign_abs_bf16x2
      {Intrinsic::nvvm_fmin_ftz_nan_xorsign_abs_f, 88162}, // __nvvm_fmin_ftz_nan_xorsign_abs_f
      {Intrinsic::nvvm_fmin_ftz_xorsign_abs_bf16, 88196}, // __nvvm_fmin_ftz_xorsign_abs_bf16
      {Intrinsic::nvvm_fmin_ftz_xorsign_abs_bf16x2, 88229}, // __nvvm_fmin_ftz_xorsign_abs_bf16x2
      {Intrinsic::nvvm_fmin_ftz_xorsign_abs_f, 88264}, // __nvvm_fmin_ftz_xorsign_abs_f
      {Intrinsic::nvvm_fmin_nan_bf16, 88294}, // __nvvm_fmin_nan_bf16
      {Intrinsic::nvvm_fmin_nan_bf16x2, 88315}, // __nvvm_fmin_nan_bf16x2
      {Intrinsic::nvvm_fmin_nan_f, 88338}, // __nvvm_fmin_nan_f
      {Intrinsic::nvvm_fmin_nan_xorsign_abs_bf16, 88356}, // __nvvm_fmin_nan_xorsign_abs_bf16
      {Intrinsic::nvvm_fmin_nan_xorsign_abs_bf16x2, 88389}, // __nvvm_fmin_nan_xorsign_abs_bf16x2
      {Intrinsic::nvvm_fmin_nan_xorsign_abs_f, 88424}, // __nvvm_fmin_nan_xorsign_abs_f
      {Intrinsic::nvvm_fmin_xorsign_abs_bf16, 88454}, // __nvvm_fmin_xorsign_abs_bf16
      {Intrinsic::nvvm_fmin_xorsign_abs_bf16x2, 88483}, // __nvvm_fmin_xorsign_abs_bf16x2
      {Intrinsic::nvvm_fmin_xorsign_abs_f, 88514}, // __nvvm_fmin_xorsign_abs_f
      {Intrinsic::nvvm_fns, 88540}, // __nvvm_fns
      {Intrinsic::nvvm_i2d_rm, 88551}, // __nvvm_i2d_rm
      {Intrinsic::nvvm_i2d_rn, 88565}, // __nvvm_i2d_rn
      {Intrinsic::nvvm_i2d_rp, 88579}, // __nvvm_i2d_rp
      {Intrinsic::nvvm_i2d_rz, 88593}, // __nvvm_i2d_rz
      {Intrinsic::nvvm_i2f_rm, 88607}, // __nvvm_i2f_rm
      {Intrinsic::nvvm_i2f_rn, 88621}, // __nvvm_i2f_rn
      {Intrinsic::nvvm_i2f_rp, 88635}, // __nvvm_i2f_rp
      {Intrinsic::nvvm_i2f_rz, 88649}, // __nvvm_i2f_rz
      {Intrinsic::nvvm_isspacep_const, 88663}, // __nvvm_isspacep_const
      {Intrinsic::nvvm_isspacep_global, 88685}, // __nvvm_isspacep_global
      {Intrinsic::nvvm_isspacep_local, 88708}, // __nvvm_isspacep_local
      {Intrinsic::nvvm_isspacep_shared, 88730}, // __nvvm_isspacep_shared
      {Intrinsic::nvvm_istypep_sampler, 88753}, // __nvvm_istypep_sampler
      {Intrinsic::nvvm_istypep_surface, 88776}, // __nvvm_istypep_surface
      {Intrinsic::nvvm_istypep_texture, 88799}, // __nvvm_istypep_texture
      {Intrinsic::nvvm_lg2_approx_d, 88822}, // __nvvm_lg2_approx_d
      {Intrinsic::nvvm_lg2_approx_f, 88842}, // __nvvm_lg2_approx_f
      {Intrinsic::nvvm_lg2_approx_ftz_f, 88862}, // __nvvm_lg2_approx_ftz_f
      {Intrinsic::nvvm_ll2d_rm, 88886}, // __nvvm_ll2d_rm
      {Intrinsic::nvvm_ll2d_rn, 88901}, // __nvvm_ll2d_rn
      {Intrinsic::nvvm_ll2d_rp, 88916}, // __nvvm_ll2d_rp
      {Intrinsic::nvvm_ll2d_rz, 88931}, // __nvvm_ll2d_rz
      {Intrinsic::nvvm_ll2f_rm, 88946}, // __nvvm_ll2f_rm
      {Intrinsic::nvvm_ll2f_rn, 88961}, // __nvvm_ll2f_rn
      {Intrinsic::nvvm_ll2f_rp, 88976}, // __nvvm_ll2f_rp
      {Intrinsic::nvvm_ll2f_rz, 88991}, // __nvvm_ll2f_rz
      {Intrinsic::nvvm_lohi_i2d, 89006}, // __nvvm_lohi_i2d
      {Intrinsic::nvvm_match_any_sync_i32, 89022}, // __nvvm_match_any_sync_i32
      {Intrinsic::nvvm_match_any_sync_i64, 89048}, // __nvvm_match_any_sync_i64
      {Intrinsic::nvvm_mbarrier_arrive, 89074}, // __nvvm_mbarrier_arrive
      {Intrinsic::nvvm_mbarrier_arrive_drop, 89097}, // __nvvm_mbarrier_arrive_drop
      {Intrinsic::nvvm_mbarrier_arrive_drop_noComplete, 89125}, // __nvvm_mbarrier_arrive_drop_noComplete
      {Intrinsic::nvvm_mbarrier_arrive_drop_noComplete_shared, 89164}, // __nvvm_mbarrier_arrive_drop_noComplete_shared
      {Intrinsic::nvvm_mbarrier_arrive_drop_shared, 89210}, // __nvvm_mbarrier_arrive_drop_shared
      {Intrinsic::nvvm_mbarrier_arrive_noComplete, 89245}, // __nvvm_mbarrier_arrive_noComplete
      {Intrinsic::nvvm_mbarrier_arrive_noComplete_shared, 89279}, // __nvvm_mbarrier_arrive_noComplete_shared
      {Intrinsic::nvvm_mbarrier_arrive_shared, 89320}, // __nvvm_mbarrier_arrive_shared
      {Intrinsic::nvvm_mbarrier_init, 89350}, // __nvvm_mbarrier_init
      {Intrinsic::nvvm_mbarrier_init_shared, 89371}, // __nvvm_mbarrier_init_shared
      {Intrinsic::nvvm_mbarrier_inval, 89399}, // __nvvm_mbarrier_inval
      {Intrinsic::nvvm_mbarrier_inval_shared, 89421}, // __nvvm_mbarrier_inval_shared
      {Intrinsic::nvvm_mbarrier_pending_count, 89450}, // __nvvm_mbarrier_pending_count
      {Intrinsic::nvvm_mbarrier_test_wait, 89480}, // __nvvm_mbarrier_test_wait
      {Intrinsic::nvvm_mbarrier_test_wait_shared, 89506}, // __nvvm_mbarrier_test_wait_shared
      {Intrinsic::nvvm_membar_cta, 89539}, // __nvvm_membar_cta
      {Intrinsic::nvvm_membar_gl, 89557}, // __nvvm_membar_gl
      {Intrinsic::nvvm_membar_sys, 89574}, // __nvvm_membar_sys
      {Intrinsic::nvvm_mul24_i, 89800}, // __nvvm_mul24_i
      {Intrinsic::nvvm_mul24_ui, 89815}, // __nvvm_mul24_ui
      {Intrinsic::nvvm_mul_rm_d, 89592}, // __nvvm_mul_rm_d
      {Intrinsic::nvvm_mul_rm_f, 89608}, // __nvvm_mul_rm_f
      {Intrinsic::nvvm_mul_rm_ftz_f, 89624}, // __nvvm_mul_rm_ftz_f
      {Intrinsic::nvvm_mul_rn_d, 89644}, // __nvvm_mul_rn_d
      {Intrinsic::nvvm_mul_rn_f, 89660}, // __nvvm_mul_rn_f
      {Intrinsic::nvvm_mul_rn_ftz_f, 89676}, // __nvvm_mul_rn_ftz_f
      {Intrinsic::nvvm_mul_rp_d, 89696}, // __nvvm_mul_rp_d
      {Intrinsic::nvvm_mul_rp_f, 89712}, // __nvvm_mul_rp_f
      {Intrinsic::nvvm_mul_rp_ftz_f, 89728}, // __nvvm_mul_rp_ftz_f
      {Intrinsic::nvvm_mul_rz_d, 89748}, // __nvvm_mul_rz_d
      {Intrinsic::nvvm_mul_rz_f, 89764}, // __nvvm_mul_rz_f
      {Intrinsic::nvvm_mul_rz_ftz_f, 89780}, // __nvvm_mul_rz_ftz_f
      {Intrinsic::nvvm_mulhi_i, 89831}, // __nvvm_mulhi_i
      {Intrinsic::nvvm_mulhi_ll, 89846}, // __nvvm_mulhi_ll
      {Intrinsic::nvvm_mulhi_ui, 89862}, // __nvvm_mulhi_ui
      {Intrinsic::nvvm_mulhi_ull, 89878}, // __nvvm_mulhi_ull
      {Intrinsic::nvvm_neg_bf16, 89895}, // __nvvm_neg_bf16
      {Intrinsic::nvvm_neg_bf16x2, 89911}, // __nvvm_neg_bf16x2
      {Intrinsic::nvvm_prmt, 89929}, // __nvvm_prmt
      {Intrinsic::nvvm_rcp_approx_ftz_d, 89941}, // __nvvm_rcp_approx_ftz_d
      {Intrinsic::nvvm_rcp_approx_ftz_f, 89965}, // __nvvm_rcp_approx_ftz_f
      {Intrinsic::nvvm_rcp_rm_d, 89989}, // __nvvm_rcp_rm_d
      {Intrinsic::nvvm_rcp_rm_f, 90005}, // __nvvm_rcp_rm_f
      {Intrinsic::nvvm_rcp_rm_ftz_f, 90021}, // __nvvm_rcp_rm_ftz_f
      {Intrinsic::nvvm_rcp_rn_d, 90041}, // __nvvm_rcp_rn_d
      {Intrinsic::nvvm_rcp_rn_f, 90057}, // __nvvm_rcp_rn_f
      {Intrinsic::nvvm_rcp_rn_ftz_f, 90073}, // __nvvm_rcp_rn_ftz_f
      {Intrinsic::nvvm_rcp_rp_d, 90093}, // __nvvm_rcp_rp_d
      {Intrinsic::nvvm_rcp_rp_f, 90109}, // __nvvm_rcp_rp_f
      {Intrinsic::nvvm_rcp_rp_ftz_f, 90125}, // __nvvm_rcp_rp_ftz_f
      {Intrinsic::nvvm_rcp_rz_d, 90145}, // __nvvm_rcp_rz_d
      {Intrinsic::nvvm_rcp_rz_f, 90161}, // __nvvm_rcp_rz_f
      {Intrinsic::nvvm_rcp_rz_ftz_f, 90177}, // __nvvm_rcp_rz_ftz_f
      {Intrinsic::nvvm_read_ptx_sreg_clock, 90197}, // __nvvm_read_ptx_sreg_clock
      {Intrinsic::nvvm_read_ptx_sreg_clock64, 90224}, // __nvvm_read_ptx_sreg_clock64
      {Intrinsic::nvvm_read_ptx_sreg_ctaid_w, 90253}, // __nvvm_read_ptx_sreg_ctaid_w
      {Intrinsic::nvvm_read_ptx_sreg_ctaid_x, 90282}, // __nvvm_read_ptx_sreg_ctaid_x
      {Intrinsic::nvvm_read_ptx_sreg_ctaid_y, 90311}, // __nvvm_read_ptx_sreg_ctaid_y
      {Intrinsic::nvvm_read_ptx_sreg_ctaid_z, 90340}, // __nvvm_read_ptx_sreg_ctaid_z
      {Intrinsic::nvvm_read_ptx_sreg_envreg0, 90369}, // __nvvm_read_ptx_sreg_envreg0
      {Intrinsic::nvvm_read_ptx_sreg_envreg1, 90398}, // __nvvm_read_ptx_sreg_envreg1
      {Intrinsic::nvvm_read_ptx_sreg_envreg10, 90427}, // __nvvm_read_ptx_sreg_envreg10
      {Intrinsic::nvvm_read_ptx_sreg_envreg11, 90457}, // __nvvm_read_ptx_sreg_envreg11
      {Intrinsic::nvvm_read_ptx_sreg_envreg12, 90487}, // __nvvm_read_ptx_sreg_envreg12
      {Intrinsic::nvvm_read_ptx_sreg_envreg13, 90517}, // __nvvm_read_ptx_sreg_envreg13
      {Intrinsic::nvvm_read_ptx_sreg_envreg14, 90547}, // __nvvm_read_ptx_sreg_envreg14
      {Intrinsic::nvvm_read_ptx_sreg_envreg15, 90577}, // __nvvm_read_ptx_sreg_envreg15
      {Intrinsic::nvvm_read_ptx_sreg_envreg16, 90607}, // __nvvm_read_ptx_sreg_envreg16
      {Intrinsic::nvvm_read_ptx_sreg_envreg17, 90637}, // __nvvm_read_ptx_sreg_envreg17
      {Intrinsic::nvvm_read_ptx_sreg_envreg18, 90667}, // __nvvm_read_ptx_sreg_envreg18
      {Intrinsic::nvvm_read_ptx_sreg_envreg19, 90697}, // __nvvm_read_ptx_sreg_envreg19
      {Intrinsic::nvvm_read_ptx_sreg_envreg2, 90727}, // __nvvm_read_ptx_sreg_envreg2
      {Intrinsic::nvvm_read_ptx_sreg_envreg20, 90756}, // __nvvm_read_ptx_sreg_envreg20
      {Intrinsic::nvvm_read_ptx_sreg_envreg21, 90786}, // __nvvm_read_ptx_sreg_envreg21
      {Intrinsic::nvvm_read_ptx_sreg_envreg22, 90816}, // __nvvm_read_ptx_sreg_envreg22
      {Intrinsic::nvvm_read_ptx_sreg_envreg23, 90846}, // __nvvm_read_ptx_sreg_envreg23
      {Intrinsic::nvvm_read_ptx_sreg_envreg24, 90876}, // __nvvm_read_ptx_sreg_envreg24
      {Intrinsic::nvvm_read_ptx_sreg_envreg25, 90906}, // __nvvm_read_ptx_sreg_envreg25
      {Intrinsic::nvvm_read_ptx_sreg_envreg26, 90936}, // __nvvm_read_ptx_sreg_envreg26
      {Intrinsic::nvvm_read_ptx_sreg_envreg27, 90966}, // __nvvm_read_ptx_sreg_envreg27
      {Intrinsic::nvvm_read_ptx_sreg_envreg28, 90996}, // __nvvm_read_ptx_sreg_envreg28
      {Intrinsic::nvvm_read_ptx_sreg_envreg29, 91026}, // __nvvm_read_ptx_sreg_envreg29
      {Intrinsic::nvvm_read_ptx_sreg_envreg3, 91056}, // __nvvm_read_ptx_sreg_envreg3
      {Intrinsic::nvvm_read_ptx_sreg_envreg30, 91085}, // __nvvm_read_ptx_sreg_envreg30
      {Intrinsic::nvvm_read_ptx_sreg_envreg31, 91115}, // __nvvm_read_ptx_sreg_envreg31
      {Intrinsic::nvvm_read_ptx_sreg_envreg4, 91145}, // __nvvm_read_ptx_sreg_envreg4
      {Intrinsic::nvvm_read_ptx_sreg_envreg5, 91174}, // __nvvm_read_ptx_sreg_envreg5
      {Intrinsic::nvvm_read_ptx_sreg_envreg6, 91203}, // __nvvm_read_ptx_sreg_envreg6
      {Intrinsic::nvvm_read_ptx_sreg_envreg7, 91232}, // __nvvm_read_ptx_sreg_envreg7
      {Intrinsic::nvvm_read_ptx_sreg_envreg8, 91261}, // __nvvm_read_ptx_sreg_envreg8
      {Intrinsic::nvvm_read_ptx_sreg_envreg9, 91290}, // __nvvm_read_ptx_sreg_envreg9
      {Intrinsic::nvvm_read_ptx_sreg_gridid, 91319}, // __nvvm_read_ptx_sreg_gridid
      {Intrinsic::nvvm_read_ptx_sreg_laneid, 91347}, // __nvvm_read_ptx_sreg_laneid
      {Intrinsic::nvvm_read_ptx_sreg_lanemask_eq, 91375}, // __nvvm_read_ptx_sreg_lanemask_eq
      {Intrinsic::nvvm_read_ptx_sreg_lanemask_ge, 91408}, // __nvvm_read_ptx_sreg_lanemask_ge
      {Intrinsic::nvvm_read_ptx_sreg_lanemask_gt, 91441}, // __nvvm_read_ptx_sreg_lanemask_gt
      {Intrinsic::nvvm_read_ptx_sreg_lanemask_le, 91474}, // __nvvm_read_ptx_sreg_lanemask_le
      {Intrinsic::nvvm_read_ptx_sreg_lanemask_lt, 91507}, // __nvvm_read_ptx_sreg_lanemask_lt
      {Intrinsic::nvvm_read_ptx_sreg_nctaid_w, 91540}, // __nvvm_read_ptx_sreg_nctaid_w
      {Intrinsic::nvvm_read_ptx_sreg_nctaid_x, 91570}, // __nvvm_read_ptx_sreg_nctaid_x
      {Intrinsic::nvvm_read_ptx_sreg_nctaid_y, 91600}, // __nvvm_read_ptx_sreg_nctaid_y
      {Intrinsic::nvvm_read_ptx_sreg_nctaid_z, 91630}, // __nvvm_read_ptx_sreg_nctaid_z
      {Intrinsic::nvvm_read_ptx_sreg_nsmid, 91660}, // __nvvm_read_ptx_sreg_nsmid
      {Intrinsic::nvvm_read_ptx_sreg_ntid_w, 91687}, // __nvvm_read_ptx_sreg_ntid_w
      {Intrinsic::nvvm_read_ptx_sreg_ntid_x, 91715}, // __nvvm_read_ptx_sreg_ntid_x
      {Intrinsic::nvvm_read_ptx_sreg_ntid_y, 91743}, // __nvvm_read_ptx_sreg_ntid_y
      {Intrinsic::nvvm_read_ptx_sreg_ntid_z, 91771}, // __nvvm_read_ptx_sreg_ntid_z
      {Intrinsic::nvvm_read_ptx_sreg_nwarpid, 91799}, // __nvvm_read_ptx_sreg_nwarpid
      {Intrinsic::nvvm_read_ptx_sreg_pm0, 91828}, // __nvvm_read_ptx_sreg_pm0
      {Intrinsic::nvvm_read_ptx_sreg_pm1, 91853}, // __nvvm_read_ptx_sreg_pm1
      {Intrinsic::nvvm_read_ptx_sreg_pm2, 91878}, // __nvvm_read_ptx_sreg_pm2
      {Intrinsic::nvvm_read_ptx_sreg_pm3, 91903}, // __nvvm_read_ptx_sreg_pm3
      {Intrinsic::nvvm_read_ptx_sreg_smid, 91928}, // __nvvm_read_ptx_sreg_smid
      {Intrinsic::nvvm_read_ptx_sreg_tid_w, 91954}, // __nvvm_read_ptx_sreg_tid_w
      {Intrinsic::nvvm_read_ptx_sreg_tid_x, 91981}, // __nvvm_read_ptx_sreg_tid_x
      {Intrinsic::nvvm_read_ptx_sreg_tid_y, 92008}, // __nvvm_read_ptx_sreg_tid_y
      {Intrinsic::nvvm_read_ptx_sreg_tid_z, 92035}, // __nvvm_read_ptx_sreg_tid_z
      {Intrinsic::nvvm_read_ptx_sreg_warpid, 92062}, // __nvvm_read_ptx_sreg_warpid
      {Intrinsic::nvvm_read_ptx_sreg_warpsize, 92090}, // __nvvm_read_ptx_sreg_warpsize
      {Intrinsic::nvvm_redux_sync_add, 92120}, // __nvvm_redux_sync_add
      {Intrinsic::nvvm_redux_sync_and, 92142}, // __nvvm_redux_sync_and
      {Intrinsic::nvvm_redux_sync_max, 92164}, // __nvvm_redux_sync_max
      {Intrinsic::nvvm_redux_sync_min, 92186}, // __nvvm_redux_sync_min
      {Intrinsic::nvvm_redux_sync_or, 92208}, // __nvvm_redux_sync_or
      {Intrinsic::nvvm_redux_sync_umax, 92229}, // __nvvm_redux_sync_umax
      {Intrinsic::nvvm_redux_sync_umin, 92252}, // __nvvm_redux_sync_umin
      {Intrinsic::nvvm_redux_sync_xor, 92275}, // __nvvm_redux_sync_xor
      {Intrinsic::nvvm_rotate_b32, 92297}, // __nvvm_rotate_b32
      {Intrinsic::nvvm_rotate_b64, 92315}, // __nvvm_rotate_b64
      {Intrinsic::nvvm_rotate_right_b64, 92333}, // __nvvm_rotate_right_b64
      {Intrinsic::nvvm_round_d, 92357}, // __nvvm_round_d
      {Intrinsic::nvvm_round_f, 92372}, // __nvvm_round_f
      {Intrinsic::nvvm_round_ftz_f, 92387}, // __nvvm_round_ftz_f
      {Intrinsic::nvvm_rsqrt_approx_d, 92406}, // __nvvm_rsqrt_approx_d
      {Intrinsic::nvvm_rsqrt_approx_f, 92428}, // __nvvm_rsqrt_approx_f
      {Intrinsic::nvvm_rsqrt_approx_ftz_f, 92450}, // __nvvm_rsqrt_approx_ftz_f
      {Intrinsic::nvvm_sad_i, 92476}, // __nvvm_sad_i
      {Intrinsic::nvvm_sad_ui, 92489}, // __nvvm_sad_ui
      {Intrinsic::nvvm_saturate_d, 92503}, // __nvvm_saturate_d
      {Intrinsic::nvvm_saturate_f, 92521}, // __nvvm_saturate_f
      {Intrinsic::nvvm_saturate_ftz_f, 92539}, // __nvvm_saturate_ftz_f
      {Intrinsic::nvvm_shfl_bfly_f32, 92561}, // __nvvm_shfl_bfly_f32
      {Intrinsic::nvvm_shfl_bfly_i32, 92582}, // __nvvm_shfl_bfly_i32
      {Intrinsic::nvvm_shfl_down_f32, 92603}, // __nvvm_shfl_down_f32
      {Intrinsic::nvvm_shfl_down_i32, 92624}, // __nvvm_shfl_down_i32
      {Intrinsic::nvvm_shfl_idx_f32, 92645}, // __nvvm_shfl_idx_f32
      {Intrinsic::nvvm_shfl_idx_i32, 92665}, // __nvvm_shfl_idx_i32
      {Intrinsic::nvvm_shfl_sync_bfly_f32, 92685}, // __nvvm_shfl_sync_bfly_f32
      {Intrinsic::nvvm_shfl_sync_bfly_i32, 92711}, // __nvvm_shfl_sync_bfly_i32
      {Intrinsic::nvvm_shfl_sync_down_f32, 92737}, // __nvvm_shfl_sync_down_f32
      {Intrinsic::nvvm_shfl_sync_down_i32, 92763}, // __nvvm_shfl_sync_down_i32
      {Intrinsic::nvvm_shfl_sync_idx_f32, 92789}, // __nvvm_shfl_sync_idx_f32
      {Intrinsic::nvvm_shfl_sync_idx_i32, 92814}, // __nvvm_shfl_sync_idx_i32
      {Intrinsic::nvvm_shfl_sync_up_f32, 92839}, // __nvvm_shfl_sync_up_f32
      {Intrinsic::nvvm_shfl_sync_up_i32, 92863}, // __nvvm_shfl_sync_up_i32
      {Intrinsic::nvvm_shfl_up_f32, 92887}, // __nvvm_shfl_up_f32
      {Intrinsic::nvvm_shfl_up_i32, 92906}, // __nvvm_shfl_up_i32
      {Intrinsic::nvvm_sin_approx_f, 92925}, // __nvvm_sin_approx_f
      {Intrinsic::nvvm_sin_approx_ftz_f, 92945}, // __nvvm_sin_approx_ftz_f
      {Intrinsic::nvvm_sqrt_approx_f, 92969}, // __nvvm_sqrt_approx_f
      {Intrinsic::nvvm_sqrt_approx_ftz_f, 92990}, // __nvvm_sqrt_approx_ftz_f
      {Intrinsic::nvvm_sqrt_f, 93015}, // __nvvm_sqrt_f
      {Intrinsic::nvvm_sqrt_rm_d, 93029}, // __nvvm_sqrt_rm_d
      {Intrinsic::nvvm_sqrt_rm_f, 93046}, // __nvvm_sqrt_rm_f
      {Intrinsic::nvvm_sqrt_rm_ftz_f, 93063}, // __nvvm_sqrt_rm_ftz_f
      {Intrinsic::nvvm_sqrt_rn_d, 93084}, // __nvvm_sqrt_rn_d
      {Intrinsic::nvvm_sqrt_rn_f, 93101}, // __nvvm_sqrt_rn_f
      {Intrinsic::nvvm_sqrt_rn_ftz_f, 93118}, // __nvvm_sqrt_rn_ftz_f
      {Intrinsic::nvvm_sqrt_rp_d, 93139}, // __nvvm_sqrt_rp_d
      {Intrinsic::nvvm_sqrt_rp_f, 93156}, // __nvvm_sqrt_rp_f
      {Intrinsic::nvvm_sqrt_rp_ftz_f, 93173}, // __nvvm_sqrt_rp_ftz_f
      {Intrinsic::nvvm_sqrt_rz_d, 93194}, // __nvvm_sqrt_rz_d
      {Intrinsic::nvvm_sqrt_rz_f, 93211}, // __nvvm_sqrt_rz_f
      {Intrinsic::nvvm_sqrt_rz_ftz_f, 93228}, // __nvvm_sqrt_rz_ftz_f
      {Intrinsic::nvvm_suq_array_size, 93249}, // __nvvm_suq_array_size
      {Intrinsic::nvvm_suq_channel_data_type, 93271}, // __nvvm_suq_channel_data_type
      {Intrinsic::nvvm_suq_channel_order, 93300}, // __nvvm_suq_channel_order
      {Intrinsic::nvvm_suq_depth, 93325}, // __nvvm_suq_depth
      {Intrinsic::nvvm_suq_height, 93342}, // __nvvm_suq_height
      {Intrinsic::nvvm_suq_width, 93360}, // __nvvm_suq_width
      {Intrinsic::nvvm_sust_b_1d_array_i16_clamp, 93377}, // __nvvm_sust_b_1d_array_i16_clamp
      {Intrinsic::nvvm_sust_b_1d_array_i16_trap, 93410}, // __nvvm_sust_b_1d_array_i16_trap
      {Intrinsic::nvvm_sust_b_1d_array_i16_zero, 93442}, // __nvvm_sust_b_1d_array_i16_zero
      {Intrinsic::nvvm_sust_b_1d_array_i32_clamp, 93474}, // __nvvm_sust_b_1d_array_i32_clamp
      {Intrinsic::nvvm_sust_b_1d_array_i32_trap, 93507}, // __nvvm_sust_b_1d_array_i32_trap
      {Intrinsic::nvvm_sust_b_1d_array_i32_zero, 93539}, // __nvvm_sust_b_1d_array_i32_zero
      {Intrinsic::nvvm_sust_b_1d_array_i64_clamp, 93571}, // __nvvm_sust_b_1d_array_i64_clamp
      {Intrinsic::nvvm_sust_b_1d_array_i64_trap, 93604}, // __nvvm_sust_b_1d_array_i64_trap
      {Intrinsic::nvvm_sust_b_1d_array_i64_zero, 93636}, // __nvvm_sust_b_1d_array_i64_zero
      {Intrinsic::nvvm_sust_b_1d_array_i8_clamp, 93668}, // __nvvm_sust_b_1d_array_i8_clamp
      {Intrinsic::nvvm_sust_b_1d_array_i8_trap, 93700}, // __nvvm_sust_b_1d_array_i8_trap
      {Intrinsic::nvvm_sust_b_1d_array_i8_zero, 93731}, // __nvvm_sust_b_1d_array_i8_zero
      {Intrinsic::nvvm_sust_b_1d_array_v2i16_clamp, 93762}, // __nvvm_sust_b_1d_array_v2i16_clamp
      {Intrinsic::nvvm_sust_b_1d_array_v2i16_trap, 93797}, // __nvvm_sust_b_1d_array_v2i16_trap
      {Intrinsic::nvvm_sust_b_1d_array_v2i16_zero, 93831}, // __nvvm_sust_b_1d_array_v2i16_zero
      {Intrinsic::nvvm_sust_b_1d_array_v2i32_clamp, 93865}, // __nvvm_sust_b_1d_array_v2i32_clamp
      {Intrinsic::nvvm_sust_b_1d_array_v2i32_trap, 93900}, // __nvvm_sust_b_1d_array_v2i32_trap
      {Intrinsic::nvvm_sust_b_1d_array_v2i32_zero, 93934}, // __nvvm_sust_b_1d_array_v2i32_zero
      {Intrinsic::nvvm_sust_b_1d_array_v2i64_clamp, 93968}, // __nvvm_sust_b_1d_array_v2i64_clamp
      {Intrinsic::nvvm_sust_b_1d_array_v2i64_trap, 94003}, // __nvvm_sust_b_1d_array_v2i64_trap
      {Intrinsic::nvvm_sust_b_1d_array_v2i64_zero, 94037}, // __nvvm_sust_b_1d_array_v2i64_zero
      {Intrinsic::nvvm_sust_b_1d_array_v2i8_clamp, 94071}, // __nvvm_sust_b_1d_array_v2i8_clamp
      {Intrinsic::nvvm_sust_b_1d_array_v2i8_trap, 94105}, // __nvvm_sust_b_1d_array_v2i8_trap
      {Intrinsic::nvvm_sust_b_1d_array_v2i8_zero, 94138}, // __nvvm_sust_b_1d_array_v2i8_zero
      {Intrinsic::nvvm_sust_b_1d_array_v4i16_clamp, 94171}, // __nvvm_sust_b_1d_array_v4i16_clamp
      {Intrinsic::nvvm_sust_b_1d_array_v4i16_trap, 94206}, // __nvvm_sust_b_1d_array_v4i16_trap
      {Intrinsic::nvvm_sust_b_1d_array_v4i16_zero, 94240}, // __nvvm_sust_b_1d_array_v4i16_zero
      {Intrinsic::nvvm_sust_b_1d_array_v4i32_clamp, 94274}, // __nvvm_sust_b_1d_array_v4i32_clamp
      {Intrinsic::nvvm_sust_b_1d_array_v4i32_trap, 94309}, // __nvvm_sust_b_1d_array_v4i32_trap
      {Intrinsic::nvvm_sust_b_1d_array_v4i32_zero, 94343}, // __nvvm_sust_b_1d_array_v4i32_zero
      {Intrinsic::nvvm_sust_b_1d_array_v4i8_clamp, 94377}, // __nvvm_sust_b_1d_array_v4i8_clamp
      {Intrinsic::nvvm_sust_b_1d_array_v4i8_trap, 94411}, // __nvvm_sust_b_1d_array_v4i8_trap
      {Intrinsic::nvvm_sust_b_1d_array_v4i8_zero, 94444}, // __nvvm_sust_b_1d_array_v4i8_zero
      {Intrinsic::nvvm_sust_b_1d_i16_clamp, 94477}, // __nvvm_sust_b_1d_i16_clamp
      {Intrinsic::nvvm_sust_b_1d_i16_trap, 94504}, // __nvvm_sust_b_1d_i16_trap
      {Intrinsic::nvvm_sust_b_1d_i16_zero, 94530}, // __nvvm_sust_b_1d_i16_zero
      {Intrinsic::nvvm_sust_b_1d_i32_clamp, 94556}, // __nvvm_sust_b_1d_i32_clamp
      {Intrinsic::nvvm_sust_b_1d_i32_trap, 94583}, // __nvvm_sust_b_1d_i32_trap
      {Intrinsic::nvvm_sust_b_1d_i32_zero, 94609}, // __nvvm_sust_b_1d_i32_zero
      {Intrinsic::nvvm_sust_b_1d_i64_clamp, 94635}, // __nvvm_sust_b_1d_i64_clamp
      {Intrinsic::nvvm_sust_b_1d_i64_trap, 94662}, // __nvvm_sust_b_1d_i64_trap
      {Intrinsic::nvvm_sust_b_1d_i64_zero, 94688}, // __nvvm_sust_b_1d_i64_zero
      {Intrinsic::nvvm_sust_b_1d_i8_clamp, 94714}, // __nvvm_sust_b_1d_i8_clamp
      {Intrinsic::nvvm_sust_b_1d_i8_trap, 94740}, // __nvvm_sust_b_1d_i8_trap
      {Intrinsic::nvvm_sust_b_1d_i8_zero, 94765}, // __nvvm_sust_b_1d_i8_zero
      {Intrinsic::nvvm_sust_b_1d_v2i16_clamp, 94790}, // __nvvm_sust_b_1d_v2i16_clamp
      {Intrinsic::nvvm_sust_b_1d_v2i16_trap, 94819}, // __nvvm_sust_b_1d_v2i16_trap
      {Intrinsic::nvvm_sust_b_1d_v2i16_zero, 94847}, // __nvvm_sust_b_1d_v2i16_zero
      {Intrinsic::nvvm_sust_b_1d_v2i32_clamp, 94875}, // __nvvm_sust_b_1d_v2i32_clamp
      {Intrinsic::nvvm_sust_b_1d_v2i32_trap, 94904}, // __nvvm_sust_b_1d_v2i32_trap
      {Intrinsic::nvvm_sust_b_1d_v2i32_zero, 94932}, // __nvvm_sust_b_1d_v2i32_zero
      {Intrinsic::nvvm_sust_b_1d_v2i64_clamp, 94960}, // __nvvm_sust_b_1d_v2i64_clamp
      {Intrinsic::nvvm_sust_b_1d_v2i64_trap, 94989}, // __nvvm_sust_b_1d_v2i64_trap
      {Intrinsic::nvvm_sust_b_1d_v2i64_zero, 95017}, // __nvvm_sust_b_1d_v2i64_zero
      {Intrinsic::nvvm_sust_b_1d_v2i8_clamp, 95045}, // __nvvm_sust_b_1d_v2i8_clamp
      {Intrinsic::nvvm_sust_b_1d_v2i8_trap, 95073}, // __nvvm_sust_b_1d_v2i8_trap
      {Intrinsic::nvvm_sust_b_1d_v2i8_zero, 95100}, // __nvvm_sust_b_1d_v2i8_zero
      {Intrinsic::nvvm_sust_b_1d_v4i16_clamp, 95127}, // __nvvm_sust_b_1d_v4i16_clamp
      {Intrinsic::nvvm_sust_b_1d_v4i16_trap, 95156}, // __nvvm_sust_b_1d_v4i16_trap
      {Intrinsic::nvvm_sust_b_1d_v4i16_zero, 95184}, // __nvvm_sust_b_1d_v4i16_zero
      {Intrinsic::nvvm_sust_b_1d_v4i32_clamp, 95212}, // __nvvm_sust_b_1d_v4i32_clamp
      {Intrinsic::nvvm_sust_b_1d_v4i32_trap, 95241}, // __nvvm_sust_b_1d_v4i32_trap
      {Intrinsic::nvvm_sust_b_1d_v4i32_zero, 95269}, // __nvvm_sust_b_1d_v4i32_zero
      {Intrinsic::nvvm_sust_b_1d_v4i8_clamp, 95297}, // __nvvm_sust_b_1d_v4i8_clamp
      {Intrinsic::nvvm_sust_b_1d_v4i8_trap, 95325}, // __nvvm_sust_b_1d_v4i8_trap
      {Intrinsic::nvvm_sust_b_1d_v4i8_zero, 95352}, // __nvvm_sust_b_1d_v4i8_zero
      {Intrinsic::nvvm_sust_b_2d_array_i16_clamp, 95379}, // __nvvm_sust_b_2d_array_i16_clamp
      {Intrinsic::nvvm_sust_b_2d_array_i16_trap, 95412}, // __nvvm_sust_b_2d_array_i16_trap
      {Intrinsic::nvvm_sust_b_2d_array_i16_zero, 95444}, // __nvvm_sust_b_2d_array_i16_zero
      {Intrinsic::nvvm_sust_b_2d_array_i32_clamp, 95476}, // __nvvm_sust_b_2d_array_i32_clamp
      {Intrinsic::nvvm_sust_b_2d_array_i32_trap, 95509}, // __nvvm_sust_b_2d_array_i32_trap
      {Intrinsic::nvvm_sust_b_2d_array_i32_zero, 95541}, // __nvvm_sust_b_2d_array_i32_zero
      {Intrinsic::nvvm_sust_b_2d_array_i64_clamp, 95573}, // __nvvm_sust_b_2d_array_i64_clamp
      {Intrinsic::nvvm_sust_b_2d_array_i64_trap, 95606}, // __nvvm_sust_b_2d_array_i64_trap
      {Intrinsic::nvvm_sust_b_2d_array_i64_zero, 95638}, // __nvvm_sust_b_2d_array_i64_zero
      {Intrinsic::nvvm_sust_b_2d_array_i8_clamp, 95670}, // __nvvm_sust_b_2d_array_i8_clamp
      {Intrinsic::nvvm_sust_b_2d_array_i8_trap, 95702}, // __nvvm_sust_b_2d_array_i8_trap
      {Intrinsic::nvvm_sust_b_2d_array_i8_zero, 95733}, // __nvvm_sust_b_2d_array_i8_zero
      {Intrinsic::nvvm_sust_b_2d_array_v2i16_clamp, 95764}, // __nvvm_sust_b_2d_array_v2i16_clamp
      {Intrinsic::nvvm_sust_b_2d_array_v2i16_trap, 95799}, // __nvvm_sust_b_2d_array_v2i16_trap
      {Intrinsic::nvvm_sust_b_2d_array_v2i16_zero, 95833}, // __nvvm_sust_b_2d_array_v2i16_zero
      {Intrinsic::nvvm_sust_b_2d_array_v2i32_clamp, 95867}, // __nvvm_sust_b_2d_array_v2i32_clamp
      {Intrinsic::nvvm_sust_b_2d_array_v2i32_trap, 95902}, // __nvvm_sust_b_2d_array_v2i32_trap
      {Intrinsic::nvvm_sust_b_2d_array_v2i32_zero, 95936}, // __nvvm_sust_b_2d_array_v2i32_zero
      {Intrinsic::nvvm_sust_b_2d_array_v2i64_clamp, 95970}, // __nvvm_sust_b_2d_array_v2i64_clamp
      {Intrinsic::nvvm_sust_b_2d_array_v2i64_trap, 96005}, // __nvvm_sust_b_2d_array_v2i64_trap
      {Intrinsic::nvvm_sust_b_2d_array_v2i64_zero, 96039}, // __nvvm_sust_b_2d_array_v2i64_zero
      {Intrinsic::nvvm_sust_b_2d_array_v2i8_clamp, 96073}, // __nvvm_sust_b_2d_array_v2i8_clamp
      {Intrinsic::nvvm_sust_b_2d_array_v2i8_trap, 96107}, // __nvvm_sust_b_2d_array_v2i8_trap
      {Intrinsic::nvvm_sust_b_2d_array_v2i8_zero, 96140}, // __nvvm_sust_b_2d_array_v2i8_zero
      {Intrinsic::nvvm_sust_b_2d_array_v4i16_clamp, 96173}, // __nvvm_sust_b_2d_array_v4i16_clamp
      {Intrinsic::nvvm_sust_b_2d_array_v4i16_trap, 96208}, // __nvvm_sust_b_2d_array_v4i16_trap
      {Intrinsic::nvvm_sust_b_2d_array_v4i16_zero, 96242}, // __nvvm_sust_b_2d_array_v4i16_zero
      {Intrinsic::nvvm_sust_b_2d_array_v4i32_clamp, 96276}, // __nvvm_sust_b_2d_array_v4i32_clamp
      {Intrinsic::nvvm_sust_b_2d_array_v4i32_trap, 96311}, // __nvvm_sust_b_2d_array_v4i32_trap
      {Intrinsic::nvvm_sust_b_2d_array_v4i32_zero, 96345}, // __nvvm_sust_b_2d_array_v4i32_zero
      {Intrinsic::nvvm_sust_b_2d_array_v4i8_clamp, 96379}, // __nvvm_sust_b_2d_array_v4i8_clamp
      {Intrinsic::nvvm_sust_b_2d_array_v4i8_trap, 96413}, // __nvvm_sust_b_2d_array_v4i8_trap
      {Intrinsic::nvvm_sust_b_2d_array_v4i8_zero, 96446}, // __nvvm_sust_b_2d_array_v4i8_zero
      {Intrinsic::nvvm_sust_b_2d_i16_clamp, 96479}, // __nvvm_sust_b_2d_i16_clamp
      {Intrinsic::nvvm_sust_b_2d_i16_trap, 96506}, // __nvvm_sust_b_2d_i16_trap
      {Intrinsic::nvvm_sust_b_2d_i16_zero, 96532}, // __nvvm_sust_b_2d_i16_zero
      {Intrinsic::nvvm_sust_b_2d_i32_clamp, 96558}, // __nvvm_sust_b_2d_i32_clamp
      {Intrinsic::nvvm_sust_b_2d_i32_trap, 96585}, // __nvvm_sust_b_2d_i32_trap
      {Intrinsic::nvvm_sust_b_2d_i32_zero, 96611}, // __nvvm_sust_b_2d_i32_zero
      {Intrinsic::nvvm_sust_b_2d_i64_clamp, 96637}, // __nvvm_sust_b_2d_i64_clamp
      {Intrinsic::nvvm_sust_b_2d_i64_trap, 96664}, // __nvvm_sust_b_2d_i64_trap
      {Intrinsic::nvvm_sust_b_2d_i64_zero, 96690}, // __nvvm_sust_b_2d_i64_zero
      {Intrinsic::nvvm_sust_b_2d_i8_clamp, 96716}, // __nvvm_sust_b_2d_i8_clamp
      {Intrinsic::nvvm_sust_b_2d_i8_trap, 96742}, // __nvvm_sust_b_2d_i8_trap
      {Intrinsic::nvvm_sust_b_2d_i8_zero, 96767}, // __nvvm_sust_b_2d_i8_zero
      {Intrinsic::nvvm_sust_b_2d_v2i16_clamp, 96792}, // __nvvm_sust_b_2d_v2i16_clamp
      {Intrinsic::nvvm_sust_b_2d_v2i16_trap, 96821}, // __nvvm_sust_b_2d_v2i16_trap
      {Intrinsic::nvvm_sust_b_2d_v2i16_zero, 96849}, // __nvvm_sust_b_2d_v2i16_zero
      {Intrinsic::nvvm_sust_b_2d_v2i32_clamp, 96877}, // __nvvm_sust_b_2d_v2i32_clamp
      {Intrinsic::nvvm_sust_b_2d_v2i32_trap, 96906}, // __nvvm_sust_b_2d_v2i32_trap
      {Intrinsic::nvvm_sust_b_2d_v2i32_zero, 96934}, // __nvvm_sust_b_2d_v2i32_zero
      {Intrinsic::nvvm_sust_b_2d_v2i64_clamp, 96962}, // __nvvm_sust_b_2d_v2i64_clamp
      {Intrinsic::nvvm_sust_b_2d_v2i64_trap, 96991}, // __nvvm_sust_b_2d_v2i64_trap
      {Intrinsic::nvvm_sust_b_2d_v2i64_zero, 97019}, // __nvvm_sust_b_2d_v2i64_zero
      {Intrinsic::nvvm_sust_b_2d_v2i8_clamp, 97047}, // __nvvm_sust_b_2d_v2i8_clamp
      {Intrinsic::nvvm_sust_b_2d_v2i8_trap, 97075}, // __nvvm_sust_b_2d_v2i8_trap
      {Intrinsic::nvvm_sust_b_2d_v2i8_zero, 97102}, // __nvvm_sust_b_2d_v2i8_zero
      {Intrinsic::nvvm_sust_b_2d_v4i16_clamp, 97129}, // __nvvm_sust_b_2d_v4i16_clamp
      {Intrinsic::nvvm_sust_b_2d_v4i16_trap, 97158}, // __nvvm_sust_b_2d_v4i16_trap
      {Intrinsic::nvvm_sust_b_2d_v4i16_zero, 97186}, // __nvvm_sust_b_2d_v4i16_zero
      {Intrinsic::nvvm_sust_b_2d_v4i32_clamp, 97214}, // __nvvm_sust_b_2d_v4i32_clamp
      {Intrinsic::nvvm_sust_b_2d_v4i32_trap, 97243}, // __nvvm_sust_b_2d_v4i32_trap
      {Intrinsic::nvvm_sust_b_2d_v4i32_zero, 97271}, // __nvvm_sust_b_2d_v4i32_zero
      {Intrinsic::nvvm_sust_b_2d_v4i8_clamp, 97299}, // __nvvm_sust_b_2d_v4i8_clamp
      {Intrinsic::nvvm_sust_b_2d_v4i8_trap, 97327}, // __nvvm_sust_b_2d_v4i8_trap
      {Intrinsic::nvvm_sust_b_2d_v4i8_zero, 97354}, // __nvvm_sust_b_2d_v4i8_zero
      {Intrinsic::nvvm_sust_b_3d_i16_clamp, 97381}, // __nvvm_sust_b_3d_i16_clamp
      {Intrinsic::nvvm_sust_b_3d_i16_trap, 97408}, // __nvvm_sust_b_3d_i16_trap
      {Intrinsic::nvvm_sust_b_3d_i16_zero, 97434}, // __nvvm_sust_b_3d_i16_zero
      {Intrinsic::nvvm_sust_b_3d_i32_clamp, 97460}, // __nvvm_sust_b_3d_i32_clamp
      {Intrinsic::nvvm_sust_b_3d_i32_trap, 97487}, // __nvvm_sust_b_3d_i32_trap
      {Intrinsic::nvvm_sust_b_3d_i32_zero, 97513}, // __nvvm_sust_b_3d_i32_zero
      {Intrinsic::nvvm_sust_b_3d_i64_clamp, 97539}, // __nvvm_sust_b_3d_i64_clamp
      {Intrinsic::nvvm_sust_b_3d_i64_trap, 97566}, // __nvvm_sust_b_3d_i64_trap
      {Intrinsic::nvvm_sust_b_3d_i64_zero, 97592}, // __nvvm_sust_b_3d_i64_zero
      {Intrinsic::nvvm_sust_b_3d_i8_clamp, 97618}, // __nvvm_sust_b_3d_i8_clamp
      {Intrinsic::nvvm_sust_b_3d_i8_trap, 97644}, // __nvvm_sust_b_3d_i8_trap
      {Intrinsic::nvvm_sust_b_3d_i8_zero, 97669}, // __nvvm_sust_b_3d_i8_zero
      {Intrinsic::nvvm_sust_b_3d_v2i16_clamp, 97694}, // __nvvm_sust_b_3d_v2i16_clamp
      {Intrinsic::nvvm_sust_b_3d_v2i16_trap, 97723}, // __nvvm_sust_b_3d_v2i16_trap
      {Intrinsic::nvvm_sust_b_3d_v2i16_zero, 97751}, // __nvvm_sust_b_3d_v2i16_zero
      {Intrinsic::nvvm_sust_b_3d_v2i32_clamp, 97779}, // __nvvm_sust_b_3d_v2i32_clamp
      {Intrinsic::nvvm_sust_b_3d_v2i32_trap, 97808}, // __nvvm_sust_b_3d_v2i32_trap
      {Intrinsic::nvvm_sust_b_3d_v2i32_zero, 97836}, // __nvvm_sust_b_3d_v2i32_zero
      {Intrinsic::nvvm_sust_b_3d_v2i64_clamp, 97864}, // __nvvm_sust_b_3d_v2i64_clamp
      {Intrinsic::nvvm_sust_b_3d_v2i64_trap, 97893}, // __nvvm_sust_b_3d_v2i64_trap
      {Intrinsic::nvvm_sust_b_3d_v2i64_zero, 97921}, // __nvvm_sust_b_3d_v2i64_zero
      {Intrinsic::nvvm_sust_b_3d_v2i8_clamp, 97949}, // __nvvm_sust_b_3d_v2i8_clamp
      {Intrinsic::nvvm_sust_b_3d_v2i8_trap, 97977}, // __nvvm_sust_b_3d_v2i8_trap
      {Intrinsic::nvvm_sust_b_3d_v2i8_zero, 98004}, // __nvvm_sust_b_3d_v2i8_zero
      {Intrinsic::nvvm_sust_b_3d_v4i16_clamp, 98031}, // __nvvm_sust_b_3d_v4i16_clamp
      {Intrinsic::nvvm_sust_b_3d_v4i16_trap, 98060}, // __nvvm_sust_b_3d_v4i16_trap
      {Intrinsic::nvvm_sust_b_3d_v4i16_zero, 98088}, // __nvvm_sust_b_3d_v4i16_zero
      {Intrinsic::nvvm_sust_b_3d_v4i32_clamp, 98116}, // __nvvm_sust_b_3d_v4i32_clamp
      {Intrinsic::nvvm_sust_b_3d_v4i32_trap, 98145}, // __nvvm_sust_b_3d_v4i32_trap
      {Intrinsic::nvvm_sust_b_3d_v4i32_zero, 98173}, // __nvvm_sust_b_3d_v4i32_zero
      {Intrinsic::nvvm_sust_b_3d_v4i8_clamp, 98201}, // __nvvm_sust_b_3d_v4i8_clamp
      {Intrinsic::nvvm_sust_b_3d_v4i8_trap, 98229}, // __nvvm_sust_b_3d_v4i8_trap
      {Intrinsic::nvvm_sust_b_3d_v4i8_zero, 98256}, // __nvvm_sust_b_3d_v4i8_zero
      {Intrinsic::nvvm_sust_p_1d_array_i16_trap, 98283}, // __nvvm_sust_p_1d_array_i16_trap
      {Intrinsic::nvvm_sust_p_1d_array_i32_trap, 98315}, // __nvvm_sust_p_1d_array_i32_trap
      {Intrinsic::nvvm_sust_p_1d_array_i8_trap, 98347}, // __nvvm_sust_p_1d_array_i8_trap
      {Intrinsic::nvvm_sust_p_1d_array_v2i16_trap, 98378}, // __nvvm_sust_p_1d_array_v2i16_trap
      {Intrinsic::nvvm_sust_p_1d_array_v2i32_trap, 98412}, // __nvvm_sust_p_1d_array_v2i32_trap
      {Intrinsic::nvvm_sust_p_1d_array_v2i8_trap, 98446}, // __nvvm_sust_p_1d_array_v2i8_trap
      {Intrinsic::nvvm_sust_p_1d_array_v4i16_trap, 98479}, // __nvvm_sust_p_1d_array_v4i16_trap
      {Intrinsic::nvvm_sust_p_1d_array_v4i32_trap, 98513}, // __nvvm_sust_p_1d_array_v4i32_trap
      {Intrinsic::nvvm_sust_p_1d_array_v4i8_trap, 98547}, // __nvvm_sust_p_1d_array_v4i8_trap
      {Intrinsic::nvvm_sust_p_1d_i16_trap, 98580}, // __nvvm_sust_p_1d_i16_trap
      {Intrinsic::nvvm_sust_p_1d_i32_trap, 98606}, // __nvvm_sust_p_1d_i32_trap
      {Intrinsic::nvvm_sust_p_1d_i8_trap, 98632}, // __nvvm_sust_p_1d_i8_trap
      {Intrinsic::nvvm_sust_p_1d_v2i16_trap, 98657}, // __nvvm_sust_p_1d_v2i16_trap
      {Intrinsic::nvvm_sust_p_1d_v2i32_trap, 98685}, // __nvvm_sust_p_1d_v2i32_trap
      {Intrinsic::nvvm_sust_p_1d_v2i8_trap, 98713}, // __nvvm_sust_p_1d_v2i8_trap
      {Intrinsic::nvvm_sust_p_1d_v4i16_trap, 98740}, // __nvvm_sust_p_1d_v4i16_trap
      {Intrinsic::nvvm_sust_p_1d_v4i32_trap, 98768}, // __nvvm_sust_p_1d_v4i32_trap
      {Intrinsic::nvvm_sust_p_1d_v4i8_trap, 98796}, // __nvvm_sust_p_1d_v4i8_trap
      {Intrinsic::nvvm_sust_p_2d_array_i16_trap, 98823}, // __nvvm_sust_p_2d_array_i16_trap
      {Intrinsic::nvvm_sust_p_2d_array_i32_trap, 98855}, // __nvvm_sust_p_2d_array_i32_trap
      {Intrinsic::nvvm_sust_p_2d_array_i8_trap, 98887}, // __nvvm_sust_p_2d_array_i8_trap
      {Intrinsic::nvvm_sust_p_2d_array_v2i16_trap, 98918}, // __nvvm_sust_p_2d_array_v2i16_trap
      {Intrinsic::nvvm_sust_p_2d_array_v2i32_trap, 98952}, // __nvvm_sust_p_2d_array_v2i32_trap
      {Intrinsic::nvvm_sust_p_2d_array_v2i8_trap, 98986}, // __nvvm_sust_p_2d_array_v2i8_trap
      {Intrinsic::nvvm_sust_p_2d_array_v4i16_trap, 99019}, // __nvvm_sust_p_2d_array_v4i16_trap
      {Intrinsic::nvvm_sust_p_2d_array_v4i32_trap, 99053}, // __nvvm_sust_p_2d_array_v4i32_trap
      {Intrinsic::nvvm_sust_p_2d_array_v4i8_trap, 99087}, // __nvvm_sust_p_2d_array_v4i8_trap
      {Intrinsic::nvvm_sust_p_2d_i16_trap, 99120}, // __nvvm_sust_p_2d_i16_trap
      {Intrinsic::nvvm_sust_p_2d_i32_trap, 99146}, // __nvvm_sust_p_2d_i32_trap
      {Intrinsic::nvvm_sust_p_2d_i8_trap, 99172}, // __nvvm_sust_p_2d_i8_trap
      {Intrinsic::nvvm_sust_p_2d_v2i16_trap, 99197}, // __nvvm_sust_p_2d_v2i16_trap
      {Intrinsic::nvvm_sust_p_2d_v2i32_trap, 99225}, // __nvvm_sust_p_2d_v2i32_trap
      {Intrinsic::nvvm_sust_p_2d_v2i8_trap, 99253}, // __nvvm_sust_p_2d_v2i8_trap
      {Intrinsic::nvvm_sust_p_2d_v4i16_trap, 99280}, // __nvvm_sust_p_2d_v4i16_trap
      {Intrinsic::nvvm_sust_p_2d_v4i32_trap, 99308}, // __nvvm_sust_p_2d_v4i32_trap
      {Intrinsic::nvvm_sust_p_2d_v4i8_trap, 99336}, // __nvvm_sust_p_2d_v4i8_trap
      {Intrinsic::nvvm_sust_p_3d_i16_trap, 99363}, // __nvvm_sust_p_3d_i16_trap
      {Intrinsic::nvvm_sust_p_3d_i32_trap, 99389}, // __nvvm_sust_p_3d_i32_trap
      {Intrinsic::nvvm_sust_p_3d_i8_trap, 99415}, // __nvvm_sust_p_3d_i8_trap
      {Intrinsic::nvvm_sust_p_3d_v2i16_trap, 99440}, // __nvvm_sust_p_3d_v2i16_trap
      {Intrinsic::nvvm_sust_p_3d_v2i32_trap, 99468}, // __nvvm_sust_p_3d_v2i32_trap
      {Intrinsic::nvvm_sust_p_3d_v2i8_trap, 99496}, // __nvvm_sust_p_3d_v2i8_trap
      {Intrinsic::nvvm_sust_p_3d_v4i16_trap, 99523}, // __nvvm_sust_p_3d_v4i16_trap
      {Intrinsic::nvvm_sust_p_3d_v4i32_trap, 99551}, // __nvvm_sust_p_3d_v4i32_trap
      {Intrinsic::nvvm_sust_p_3d_v4i8_trap, 99579}, // __nvvm_sust_p_3d_v4i8_trap
      {Intrinsic::nvvm_swap_lo_hi_b64, 99606}, // __nvvm_swap_lo_hi_b64
      {Intrinsic::nvvm_trunc_d, 99628}, // __nvvm_trunc_d
      {Intrinsic::nvvm_trunc_f, 99643}, // __nvvm_trunc_f
      {Intrinsic::nvvm_trunc_ftz_f, 99658}, // __nvvm_trunc_ftz_f
      {Intrinsic::nvvm_txq_array_size, 99677}, // __nvvm_txq_array_size
      {Intrinsic::nvvm_txq_channel_data_type, 99699}, // __nvvm_txq_channel_data_type
      {Intrinsic::nvvm_txq_channel_order, 99728}, // __nvvm_txq_channel_order
      {Intrinsic::nvvm_txq_depth, 99753}, // __nvvm_txq_depth
      {Intrinsic::nvvm_txq_height, 99770}, // __nvvm_txq_height
      {Intrinsic::nvvm_txq_num_mipmap_levels, 99788}, // __nvvm_txq_num_mipmap_levels
      {Intrinsic::nvvm_txq_num_samples, 99817}, // __nvvm_txq_num_samples
      {Intrinsic::nvvm_txq_width, 99840}, // __nvvm_txq_width
      {Intrinsic::nvvm_ui2d_rm, 99857}, // __nvvm_ui2d_rm
      {Intrinsic::nvvm_ui2d_rn, 99872}, // __nvvm_ui2d_rn
      {Intrinsic::nvvm_ui2d_rp, 99887}, // __nvvm_ui2d_rp
      {Intrinsic::nvvm_ui2d_rz, 99902}, // __nvvm_ui2d_rz
      {Intrinsic::nvvm_ui2f_rm, 99917}, // __nvvm_ui2f_rm
      {Intrinsic::nvvm_ui2f_rn, 99932}, // __nvvm_ui2f_rn
      {Intrinsic::nvvm_ui2f_rp, 99947}, // __nvvm_ui2f_rp
      {Intrinsic::nvvm_ui2f_rz, 99962}, // __nvvm_ui2f_rz
      {Intrinsic::nvvm_ull2d_rm, 99977}, // __nvvm_ull2d_rm
      {Intrinsic::nvvm_ull2d_rn, 99993}, // __nvvm_ull2d_rn
      {Intrinsic::nvvm_ull2d_rp, 100009}, // __nvvm_ull2d_rp
      {Intrinsic::nvvm_ull2d_rz, 100025}, // __nvvm_ull2d_rz
      {Intrinsic::nvvm_ull2f_rm, 100041}, // __nvvm_ull2f_rm
      {Intrinsic::nvvm_ull2f_rn, 100057}, // __nvvm_ull2f_rn
      {Intrinsic::nvvm_ull2f_rp, 100073}, // __nvvm_ull2f_rp
      {Intrinsic::nvvm_ull2f_rz, 100089}, // __nvvm_ull2f_rz
      {Intrinsic::nvvm_vote_all, 100105}, // __nvvm_vote_all
      {Intrinsic::nvvm_vote_all_sync, 100121}, // __nvvm_vote_all_sync
      {Intrinsic::nvvm_vote_any, 100142}, // __nvvm_vote_any
      {Intrinsic::nvvm_vote_any_sync, 100158}, // __nvvm_vote_any_sync
      {Intrinsic::nvvm_vote_ballot, 100179}, // __nvvm_vote_ballot
      {Intrinsic::nvvm_vote_ballot_sync, 100198}, // __nvvm_vote_ballot_sync
      {Intrinsic::nvvm_vote_uni, 100222}, // __nvvm_vote_uni
      {Intrinsic::nvvm_vote_uni_sync, 100238}, // __nvvm_vote_uni_sync
      {Intrinsic::nvvm_barrier0, 84570}, // __syncthreads
    };
    auto I = std::lower_bound(std::begin(nvvmNames),
                              std::end(nvvmNames),
                              BuiltinNameStr);
    if (I != std::end(nvvmNames) &&
        I->getName() == BuiltinNameStr)
      return I->IntrinID;
  }
  if (TargetPrefix == "ppc") {
    static const BuiltinEntry ppcNames[] = {
      {Intrinsic::ppc_addf128_round_to_odd, 100279}, // __builtin_addf128_round_to_odd
      {Intrinsic::ppc_altivec_crypto_vcipher, 100310}, // __builtin_altivec_crypto_vcipher
      {Intrinsic::ppc_altivec_crypto_vcipherlast, 100343}, // __builtin_altivec_crypto_vcipherlast
      {Intrinsic::ppc_altivec_crypto_vncipher, 100380}, // __builtin_altivec_crypto_vncipher
      {Intrinsic::ppc_altivec_crypto_vncipherlast, 100414}, // __builtin_altivec_crypto_vncipherlast
      {Intrinsic::ppc_altivec_crypto_vpermxor, 100452}, // __builtin_altivec_crypto_vpermxor
      {Intrinsic::ppc_altivec_crypto_vpermxor_be, 100486}, // __builtin_altivec_crypto_vpermxor_be
      {Intrinsic::ppc_altivec_crypto_vpmsumb, 100523}, // __builtin_altivec_crypto_vpmsumb
      {Intrinsic::ppc_altivec_crypto_vpmsumd, 100556}, // __builtin_altivec_crypto_vpmsumd
      {Intrinsic::ppc_altivec_crypto_vpmsumh, 100589}, // __builtin_altivec_crypto_vpmsumh
      {Intrinsic::ppc_altivec_crypto_vpmsumw, 100622}, // __builtin_altivec_crypto_vpmsumw
      {Intrinsic::ppc_altivec_crypto_vsbox, 100655}, // __builtin_altivec_crypto_vsbox
      {Intrinsic::ppc_altivec_crypto_vshasigmad, 100686}, // __builtin_altivec_crypto_vshasigmad
      {Intrinsic::ppc_altivec_crypto_vshasigmaw, 100722}, // __builtin_altivec_crypto_vshasigmaw
      {Intrinsic::ppc_altivec_dss, 100758}, // __builtin_altivec_dss
      {Intrinsic::ppc_altivec_dssall, 100780}, // __builtin_altivec_dssall
      {Intrinsic::ppc_altivec_dst, 100805}, // __builtin_altivec_dst
      {Intrinsic::ppc_altivec_dstst, 100827}, // __builtin_altivec_dstst
      {Intrinsic::ppc_altivec_dststt, 100851}, // __builtin_altivec_dststt
      {Intrinsic::ppc_altivec_dstt, 100876}, // __builtin_altivec_dstt
      {Intrinsic::ppc_altivec_mfvscr, 100899}, // __builtin_altivec_mfvscr
      {Intrinsic::ppc_altivec_mtvscr, 100924}, // __builtin_altivec_mtvscr
      {Intrinsic::ppc_altivec_mtvsrbm, 100949}, // __builtin_altivec_mtvsrbm
      {Intrinsic::ppc_altivec_mtvsrdm, 100975}, // __builtin_altivec_mtvsrdm
      {Intrinsic::ppc_altivec_mtvsrhm, 101001}, // __builtin_altivec_mtvsrhm
      {Intrinsic::ppc_altivec_mtvsrqm, 101027}, // __builtin_altivec_mtvsrqm
      {Intrinsic::ppc_altivec_mtvsrwm, 101053}, // __builtin_altivec_mtvsrwm
      {Intrinsic::ppc_altivec_vabsdub, 101079}, // __builtin_altivec_vabsdub
      {Intrinsic::ppc_altivec_vabsduh, 101105}, // __builtin_altivec_vabsduh
      {Intrinsic::ppc_altivec_vabsduw, 101131}, // __builtin_altivec_vabsduw
      {Intrinsic::ppc_altivec_vaddcuq, 101157}, // __builtin_altivec_vaddcuq
      {Intrinsic::ppc_altivec_vaddcuw, 101183}, // __builtin_altivec_vaddcuw
      {Intrinsic::ppc_altivec_vaddecuq, 101209}, // __builtin_altivec_vaddecuq
      {Intrinsic::ppc_altivec_vaddeuqm, 101236}, // __builtin_altivec_vaddeuqm
      {Intrinsic::ppc_altivec_vaddsbs, 101263}, // __builtin_altivec_vaddsbs
      {Intrinsic::ppc_altivec_vaddshs, 101289}, // __builtin_altivec_vaddshs
      {Intrinsic::ppc_altivec_vaddsws, 101315}, // __builtin_altivec_vaddsws
      {Intrinsic::ppc_altivec_vaddubs, 101341}, // __builtin_altivec_vaddubs
      {Intrinsic::ppc_altivec_vadduhs, 101367}, // __builtin_altivec_vadduhs
      {Intrinsic::ppc_altivec_vadduws, 101393}, // __builtin_altivec_vadduws
      {Intrinsic::ppc_altivec_vavgsb, 101419}, // __builtin_altivec_vavgsb
      {Intrinsic::ppc_altivec_vavgsh, 101444}, // __builtin_altivec_vavgsh
      {Intrinsic::ppc_altivec_vavgsw, 101469}, // __builtin_altivec_vavgsw
      {Intrinsic::ppc_altivec_vavgub, 101494}, // __builtin_altivec_vavgub
      {Intrinsic::ppc_altivec_vavguh, 101519}, // __builtin_altivec_vavguh
      {Intrinsic::ppc_altivec_vavguw, 101544}, // __builtin_altivec_vavguw
      {Intrinsic::ppc_altivec_vbpermd, 101569}, // __builtin_altivec_vbpermd
      {Intrinsic::ppc_altivec_vbpermq, 101595}, // __builtin_altivec_vbpermq
      {Intrinsic::ppc_altivec_vcfsx, 101621}, // __builtin_altivec_vcfsx
      {Intrinsic::ppc_altivec_vcfuged, 101645}, // __builtin_altivec_vcfuged
      {Intrinsic::ppc_altivec_vcfux, 101671}, // __builtin_altivec_vcfux
      {Intrinsic::ppc_altivec_vclrlb, 101695}, // __builtin_altivec_vclrlb
      {Intrinsic::ppc_altivec_vclrrb, 101720}, // __builtin_altivec_vclrrb
      {Intrinsic::ppc_altivec_vclzdm, 101745}, // __builtin_altivec_vclzdm
      {Intrinsic::ppc_altivec_vclzlsbb, 101770}, // __builtin_altivec_vclzlsbb
      {Intrinsic::ppc_altivec_vcmpbfp, 101797}, // __builtin_altivec_vcmpbfp
      {Intrinsic::ppc_altivec_vcmpbfp_p, 101823}, // __builtin_altivec_vcmpbfp_p
      {Intrinsic::ppc_altivec_vcmpeqfp, 101851}, // __builtin_altivec_vcmpeqfp
      {Intrinsic::ppc_altivec_vcmpeqfp_p, 101878}, // __builtin_altivec_vcmpeqfp_p
      {Intrinsic::ppc_altivec_vcmpequb, 101907}, // __builtin_altivec_vcmpequb
      {Intrinsic::ppc_altivec_vcmpequb_p, 101934}, // __builtin_altivec_vcmpequb_p
      {Intrinsic::ppc_altivec_vcmpequd, 101963}, // __builtin_altivec_vcmpequd
      {Intrinsic::ppc_altivec_vcmpequd_p, 101990}, // __builtin_altivec_vcmpequd_p
      {Intrinsic::ppc_altivec_vcmpequh, 102019}, // __builtin_altivec_vcmpequh
      {Intrinsic::ppc_altivec_vcmpequh_p, 102046}, // __builtin_altivec_vcmpequh_p
      {Intrinsic::ppc_altivec_vcmpequq, 102075}, // __builtin_altivec_vcmpequq
      {Intrinsic::ppc_altivec_vcmpequq_p, 102102}, // __builtin_altivec_vcmpequq_p
      {Intrinsic::ppc_altivec_vcmpequw, 102131}, // __builtin_altivec_vcmpequw
      {Intrinsic::ppc_altivec_vcmpequw_p, 102158}, // __builtin_altivec_vcmpequw_p
      {Intrinsic::ppc_altivec_vcmpgefp, 102187}, // __builtin_altivec_vcmpgefp
      {Intrinsic::ppc_altivec_vcmpgefp_p, 102214}, // __builtin_altivec_vcmpgefp_p
      {Intrinsic::ppc_altivec_vcmpgtfp, 102243}, // __builtin_altivec_vcmpgtfp
      {Intrinsic::ppc_altivec_vcmpgtfp_p, 102270}, // __builtin_altivec_vcmpgtfp_p
      {Intrinsic::ppc_altivec_vcmpgtsb, 102299}, // __builtin_altivec_vcmpgtsb
      {Intrinsic::ppc_altivec_vcmpgtsb_p, 102326}, // __builtin_altivec_vcmpgtsb_p
      {Intrinsic::ppc_altivec_vcmpgtsd, 102355}, // __builtin_altivec_vcmpgtsd
      {Intrinsic::ppc_altivec_vcmpgtsd_p, 102382}, // __builtin_altivec_vcmpgtsd_p
      {Intrinsic::ppc_altivec_vcmpgtsh, 102411}, // __builtin_altivec_vcmpgtsh
      {Intrinsic::ppc_altivec_vcmpgtsh_p, 102438}, // __builtin_altivec_vcmpgtsh_p
      {Intrinsic::ppc_altivec_vcmpgtsq, 102467}, // __builtin_altivec_vcmpgtsq
      {Intrinsic::ppc_altivec_vcmpgtsq_p, 102494}, // __builtin_altivec_vcmpgtsq_p
      {Intrinsic::ppc_altivec_vcmpgtsw, 102523}, // __builtin_altivec_vcmpgtsw
      {Intrinsic::ppc_altivec_vcmpgtsw_p, 102550}, // __builtin_altivec_vcmpgtsw_p
      {Intrinsic::ppc_altivec_vcmpgtub, 102579}, // __builtin_altivec_vcmpgtub
      {Intrinsic::ppc_altivec_vcmpgtub_p, 102606}, // __builtin_altivec_vcmpgtub_p
      {Intrinsic::ppc_altivec_vcmpgtud, 102635}, // __builtin_altivec_vcmpgtud
      {Intrinsic::ppc_altivec_vcmpgtud_p, 102662}, // __builtin_altivec_vcmpgtud_p
      {Intrinsic::ppc_altivec_vcmpgtuh, 102691}, // __builtin_altivec_vcmpgtuh
      {Intrinsic::ppc_altivec_vcmpgtuh_p, 102718}, // __builtin_altivec_vcmpgtuh_p
      {Intrinsic::ppc_altivec_vcmpgtuq, 102747}, // __builtin_altivec_vcmpgtuq
      {Intrinsic::ppc_altivec_vcmpgtuq_p, 102774}, // __builtin_altivec_vcmpgtuq_p
      {Intrinsic::ppc_altivec_vcmpgtuw, 102803}, // __builtin_altivec_vcmpgtuw
      {Intrinsic::ppc_altivec_vcmpgtuw_p, 102830}, // __builtin_altivec_vcmpgtuw_p
      {Intrinsic::ppc_altivec_vcmpneb, 102859}, // __builtin_altivec_vcmpneb
      {Intrinsic::ppc_altivec_vcmpneb_p, 102885}, // __builtin_altivec_vcmpneb_p
      {Intrinsic::ppc_altivec_vcmpneh, 102913}, // __builtin_altivec_vcmpneh
      {Intrinsic::ppc_altivec_vcmpneh_p, 102939}, // __builtin_altivec_vcmpneh_p
      {Intrinsic::ppc_altivec_vcmpnew, 102967}, // __builtin_altivec_vcmpnew
      {Intrinsic::ppc_altivec_vcmpnew_p, 102993}, // __builtin_altivec_vcmpnew_p
      {Intrinsic::ppc_altivec_vcmpnezb, 103021}, // __builtin_altivec_vcmpnezb
      {Intrinsic::ppc_altivec_vcmpnezb_p, 103048}, // __builtin_altivec_vcmpnezb_p
      {Intrinsic::ppc_altivec_vcmpnezh, 103077}, // __builtin_altivec_vcmpnezh
      {Intrinsic::ppc_altivec_vcmpnezh_p, 103104}, // __builtin_altivec_vcmpnezh_p
      {Intrinsic::ppc_altivec_vcmpnezw, 103133}, // __builtin_altivec_vcmpnezw
      {Intrinsic::ppc_altivec_vcmpnezw_p, 103160}, // __builtin_altivec_vcmpnezw_p
      {Intrinsic::ppc_altivec_vcntmbb, 103189}, // __builtin_altivec_vcntmbb
      {Intrinsic::ppc_altivec_vcntmbd, 103215}, // __builtin_altivec_vcntmbd
      {Intrinsic::ppc_altivec_vcntmbh, 103241}, // __builtin_altivec_vcntmbh
      {Intrinsic::ppc_altivec_vcntmbw, 103267}, // __builtin_altivec_vcntmbw
      {Intrinsic::ppc_altivec_vctsxs, 103293}, // __builtin_altivec_vctsxs
      {Intrinsic::ppc_altivec_vctuxs, 103318}, // __builtin_altivec_vctuxs
      {Intrinsic::ppc_altivec_vctzdm, 103343}, // __builtin_altivec_vctzdm
      {Intrinsic::ppc_altivec_vctzlsbb, 103368}, // __builtin_altivec_vctzlsbb
      {Intrinsic::ppc_altivec_vdivesd, 103395}, // __builtin_altivec_vdivesd
      {Intrinsic::ppc_altivec_vdivesq, 103421}, // __builtin_altivec_vdivesq
      {Intrinsic::ppc_altivec_vdivesw, 103447}, // __builtin_altivec_vdivesw
      {Intrinsic::ppc_altivec_vdiveud, 103473}, // __builtin_altivec_vdiveud
      {Intrinsic::ppc_altivec_vdiveuq, 103499}, // __builtin_altivec_vdiveuq
      {Intrinsic::ppc_altivec_vdiveuw, 103525}, // __builtin_altivec_vdiveuw
      {Intrinsic::ppc_altivec_vexpandbm, 103551}, // __builtin_altivec_vexpandbm
      {Intrinsic::ppc_altivec_vexpanddm, 103579}, // __builtin_altivec_vexpanddm
      {Intrinsic::ppc_altivec_vexpandhm, 103607}, // __builtin_altivec_vexpandhm
      {Intrinsic::ppc_altivec_vexpandqm, 103635}, // __builtin_altivec_vexpandqm
      {Intrinsic::ppc_altivec_vexpandwm, 103663}, // __builtin_altivec_vexpandwm
      {Intrinsic::ppc_altivec_vexptefp, 103691}, // __builtin_altivec_vexptefp
      {Intrinsic::ppc_altivec_vextddvlx, 103718}, // __builtin_altivec_vextddvlx
      {Intrinsic::ppc_altivec_vextddvrx, 103746}, // __builtin_altivec_vextddvrx
      {Intrinsic::ppc_altivec_vextdubvlx, 103774}, // __builtin_altivec_vextdubvlx
      {Intrinsic::ppc_altivec_vextdubvrx, 103803}, // __builtin_altivec_vextdubvrx
      {Intrinsic::ppc_altivec_vextduhvlx, 103832}, // __builtin_altivec_vextduhvlx
      {Intrinsic::ppc_altivec_vextduhvrx, 103861}, // __builtin_altivec_vextduhvrx
      {Intrinsic::ppc_altivec_vextduwvlx, 103890}, // __builtin_altivec_vextduwvlx
      {Intrinsic::ppc_altivec_vextduwvrx, 103919}, // __builtin_altivec_vextduwvrx
      {Intrinsic::ppc_altivec_vextractbm, 103948}, // __builtin_altivec_vextractbm
      {Intrinsic::ppc_altivec_vextractdm, 103977}, // __builtin_altivec_vextractdm
      {Intrinsic::ppc_altivec_vextracthm, 104006}, // __builtin_altivec_vextracthm
      {Intrinsic::ppc_altivec_vextractqm, 104035}, // __builtin_altivec_vextractqm
      {Intrinsic::ppc_altivec_vextractwm, 104064}, // __builtin_altivec_vextractwm
      {Intrinsic::ppc_altivec_vextsb2d, 104093}, // __builtin_altivec_vextsb2d
      {Intrinsic::ppc_altivec_vextsb2w, 104120}, // __builtin_altivec_vextsb2w
      {Intrinsic::ppc_altivec_vextsd2q, 104147}, // __builtin_altivec_vextsd2q
      {Intrinsic::ppc_altivec_vextsh2d, 104174}, // __builtin_altivec_vextsh2d
      {Intrinsic::ppc_altivec_vextsh2w, 104201}, // __builtin_altivec_vextsh2w
      {Intrinsic::ppc_altivec_vextsw2d, 104228}, // __builtin_altivec_vextsw2d
      {Intrinsic::ppc_altivec_vgbbd, 104255}, // __builtin_altivec_vgbbd
      {Intrinsic::ppc_altivec_vgnb, 104279}, // __builtin_altivec_vgnb
      {Intrinsic::ppc_altivec_vinsblx, 104302}, // __builtin_altivec_vinsblx
      {Intrinsic::ppc_altivec_vinsbrx, 104328}, // __builtin_altivec_vinsbrx
      {Intrinsic::ppc_altivec_vinsbvlx, 104354}, // __builtin_altivec_vinsbvlx
      {Intrinsic::ppc_altivec_vinsbvrx, 104381}, // __builtin_altivec_vinsbvrx
      {Intrinsic::ppc_altivec_vinsdlx, 104408}, // __builtin_altivec_vinsdlx
      {Intrinsic::ppc_altivec_vinsdrx, 104434}, // __builtin_altivec_vinsdrx
      {Intrinsic::ppc_altivec_vinshlx, 104460}, // __builtin_altivec_vinshlx
      {Intrinsic::ppc_altivec_vinshrx, 104486}, // __builtin_altivec_vinshrx
      {Intrinsic::ppc_altivec_vinshvlx, 104512}, // __builtin_altivec_vinshvlx
      {Intrinsic::ppc_altivec_vinshvrx, 104539}, // __builtin_altivec_vinshvrx
      {Intrinsic::ppc_altivec_vinswlx, 104566}, // __builtin_altivec_vinswlx
      {Intrinsic::ppc_altivec_vinswrx, 104592}, // __builtin_altivec_vinswrx
      {Intrinsic::ppc_altivec_vinswvlx, 104618}, // __builtin_altivec_vinswvlx
      {Intrinsic::ppc_altivec_vinswvrx, 104645}, // __builtin_altivec_vinswvrx
      {Intrinsic::ppc_altivec_vlogefp, 104672}, // __builtin_altivec_vlogefp
      {Intrinsic::ppc_altivec_vmaddfp, 104698}, // __builtin_altivec_vmaddfp
      {Intrinsic::ppc_altivec_vmaxfp, 104724}, // __builtin_altivec_vmaxfp
      {Intrinsic::ppc_altivec_vmaxsb, 104749}, // __builtin_altivec_vmaxsb
      {Intrinsic::ppc_altivec_vmaxsd, 104774}, // __builtin_altivec_vmaxsd
      {Intrinsic::ppc_altivec_vmaxsh, 104799}, // __builtin_altivec_vmaxsh
      {Intrinsic::ppc_altivec_vmaxsw, 104824}, // __builtin_altivec_vmaxsw
      {Intrinsic::ppc_altivec_vmaxub, 104849}, // __builtin_altivec_vmaxub
      {Intrinsic::ppc_altivec_vmaxud, 104874}, // __builtin_altivec_vmaxud
      {Intrinsic::ppc_altivec_vmaxuh, 104899}, // __builtin_altivec_vmaxuh
      {Intrinsic::ppc_altivec_vmaxuw, 104924}, // __builtin_altivec_vmaxuw
      {Intrinsic::ppc_altivec_vmhaddshs, 104949}, // __builtin_altivec_vmhaddshs
      {Intrinsic::ppc_altivec_vmhraddshs, 104977}, // __builtin_altivec_vmhraddshs
      {Intrinsic::ppc_altivec_vminfp, 105006}, // __builtin_altivec_vminfp
      {Intrinsic::ppc_altivec_vminsb, 105031}, // __builtin_altivec_vminsb
      {Intrinsic::ppc_altivec_vminsd, 105056}, // __builtin_altivec_vminsd
      {Intrinsic::ppc_altivec_vminsh, 105081}, // __builtin_altivec_vminsh
      {Intrinsic::ppc_altivec_vminsw, 105106}, // __builtin_altivec_vminsw
      {Intrinsic::ppc_altivec_vminub, 105131}, // __builtin_altivec_vminub
      {Intrinsic::ppc_altivec_vminud, 105156}, // __builtin_altivec_vminud
      {Intrinsic::ppc_altivec_vminuh, 105181}, // __builtin_altivec_vminuh
      {Intrinsic::ppc_altivec_vminuw, 105206}, // __builtin_altivec_vminuw
      {Intrinsic::ppc_altivec_vmladduhm, 105231}, // __builtin_altivec_vmladduhm
      {Intrinsic::ppc_altivec_vmsumcud, 105259}, // __builtin_altivec_vmsumcud
      {Intrinsic::ppc_altivec_vmsummbm, 105286}, // __builtin_altivec_vmsummbm
      {Intrinsic::ppc_altivec_vmsumshm, 105313}, // __builtin_altivec_vmsumshm
      {Intrinsic::ppc_altivec_vmsumshs, 105340}, // __builtin_altivec_vmsumshs
      {Intrinsic::ppc_altivec_vmsumubm, 105367}, // __builtin_altivec_vmsumubm
      {Intrinsic::ppc_altivec_vmsumudm, 105394}, // __builtin_altivec_vmsumudm
      {Intrinsic::ppc_altivec_vmsumuhm, 105421}, // __builtin_altivec_vmsumuhm
      {Intrinsic::ppc_altivec_vmsumuhs, 105448}, // __builtin_altivec_vmsumuhs
      {Intrinsic::ppc_altivec_vmulesb, 105475}, // __builtin_altivec_vmulesb
      {Intrinsic::ppc_altivec_vmulesd, 105501}, // __builtin_altivec_vmulesd
      {Intrinsic::ppc_altivec_vmulesh, 105527}, // __builtin_altivec_vmulesh
      {Intrinsic::ppc_altivec_vmulesw, 105553}, // __builtin_altivec_vmulesw
      {Intrinsic::ppc_altivec_vmuleub, 105579}, // __builtin_altivec_vmuleub
      {Intrinsic::ppc_altivec_vmuleud, 105605}, // __builtin_altivec_vmuleud
      {Intrinsic::ppc_altivec_vmuleuh, 105631}, // __builtin_altivec_vmuleuh
      {Intrinsic::ppc_altivec_vmuleuw, 105657}, // __builtin_altivec_vmuleuw
      {Intrinsic::ppc_altivec_vmulhsd, 105683}, // __builtin_altivec_vmulhsd
      {Intrinsic::ppc_altivec_vmulhsw, 105709}, // __builtin_altivec_vmulhsw
      {Intrinsic::ppc_altivec_vmulhud, 105735}, // __builtin_altivec_vmulhud
      {Intrinsic::ppc_altivec_vmulhuw, 105761}, // __builtin_altivec_vmulhuw
      {Intrinsic::ppc_altivec_vmulosb, 105787}, // __builtin_altivec_vmulosb
      {Intrinsic::ppc_altivec_vmulosd, 105813}, // __builtin_altivec_vmulosd
      {Intrinsic::ppc_altivec_vmulosh, 105839}, // __builtin_altivec_vmulosh
      {Intrinsic::ppc_altivec_vmulosw, 105865}, // __builtin_altivec_vmulosw
      {Intrinsic::ppc_altivec_vmuloub, 105891}, // __builtin_altivec_vmuloub
      {Intrinsic::ppc_altivec_vmuloud, 105917}, // __builtin_altivec_vmuloud
      {Intrinsic::ppc_altivec_vmulouh, 105943}, // __builtin_altivec_vmulouh
      {Intrinsic::ppc_altivec_vmulouw, 105969}, // __builtin_altivec_vmulouw
      {Intrinsic::ppc_altivec_vnmsubfp, 105995}, // __builtin_altivec_vnmsubfp
      {Intrinsic::ppc_altivec_vpdepd, 106022}, // __builtin_altivec_vpdepd
      {Intrinsic::ppc_altivec_vperm, 106047}, // __builtin_altivec_vperm_4si
      {Intrinsic::ppc_altivec_vpextd, 106075}, // __builtin_altivec_vpextd
      {Intrinsic::ppc_altivec_vpkpx, 106100}, // __builtin_altivec_vpkpx
      {Intrinsic::ppc_altivec_vpksdss, 106124}, // __builtin_altivec_vpksdss
      {Intrinsic::ppc_altivec_vpksdus, 106150}, // __builtin_altivec_vpksdus
      {Intrinsic::ppc_altivec_vpkshss, 106176}, // __builtin_altivec_vpkshss
      {Intrinsic::ppc_altivec_vpkshus, 106202}, // __builtin_altivec_vpkshus
      {Intrinsic::ppc_altivec_vpkswss, 106228}, // __builtin_altivec_vpkswss
      {Intrinsic::ppc_altivec_vpkswus, 106254}, // __builtin_altivec_vpkswus
      {Intrinsic::ppc_altivec_vpkudus, 106280}, // __builtin_altivec_vpkudus
      {Intrinsic::ppc_altivec_vpkuhus, 106306}, // __builtin_altivec_vpkuhus
      {Intrinsic::ppc_altivec_vpkuwus, 106332}, // __builtin_altivec_vpkuwus
      {Intrinsic::ppc_altivec_vprtybd, 106358}, // __builtin_altivec_vprtybd
      {Intrinsic::ppc_altivec_vprtybq, 106384}, // __builtin_altivec_vprtybq
      {Intrinsic::ppc_altivec_vprtybw, 106410}, // __builtin_altivec_vprtybw
      {Intrinsic::ppc_altivec_vrefp, 106436}, // __builtin_altivec_vrefp
      {Intrinsic::ppc_altivec_vrfim, 106460}, // __builtin_altivec_vrfim
      {Intrinsic::ppc_altivec_vrfin, 106484}, // __builtin_altivec_vrfin
      {Intrinsic::ppc_altivec_vrfip, 106508}, // __builtin_altivec_vrfip
      {Intrinsic::ppc_altivec_vrfiz, 106532}, // __builtin_altivec_vrfiz
      {Intrinsic::ppc_altivec_vrlb, 106556}, // __builtin_altivec_vrlb
      {Intrinsic::ppc_altivec_vrld, 106579}, // __builtin_altivec_vrld
      {Intrinsic::ppc_altivec_vrldmi, 106602}, // __builtin_altivec_vrldmi
      {Intrinsic::ppc_altivec_vrldnm, 106627}, // __builtin_altivec_vrldnm
      {Intrinsic::ppc_altivec_vrlh, 106652}, // __builtin_altivec_vrlh
      {Intrinsic::ppc_altivec_vrlqmi, 106675}, // __builtin_altivec_vrlqmi
      {Intrinsic::ppc_altivec_vrlqnm, 106700}, // __builtin_altivec_vrlqnm
      {Intrinsic::ppc_altivec_vrlw, 106725}, // __builtin_altivec_vrlw
      {Intrinsic::ppc_altivec_vrlwmi, 106748}, // __builtin_altivec_vrlwmi
      {Intrinsic::ppc_altivec_vrlwnm, 106773}, // __builtin_altivec_vrlwnm
      {Intrinsic::ppc_altivec_vrsqrtefp, 106798}, // __builtin_altivec_vrsqrtefp
      {Intrinsic::ppc_altivec_vsel, 106826}, // __builtin_altivec_vsel_4si
      {Intrinsic::ppc_altivec_vsl, 106853}, // __builtin_altivec_vsl
      {Intrinsic::ppc_altivec_vslb, 106875}, // __builtin_altivec_vslb
      {Intrinsic::ppc_altivec_vsldbi, 106898}, // __builtin_altivec_vsldbi
      {Intrinsic::ppc_altivec_vslh, 106923}, // __builtin_altivec_vslh
      {Intrinsic::ppc_altivec_vslo, 106946}, // __builtin_altivec_vslo
      {Intrinsic::ppc_altivec_vslv, 106969}, // __builtin_altivec_vslv
      {Intrinsic::ppc_altivec_vslw, 106992}, // __builtin_altivec_vslw
      {Intrinsic::ppc_altivec_vsr, 107015}, // __builtin_altivec_vsr
      {Intrinsic::ppc_altivec_vsrab, 107037}, // __builtin_altivec_vsrab
      {Intrinsic::ppc_altivec_vsrah, 107061}, // __builtin_altivec_vsrah
      {Intrinsic::ppc_altivec_vsraw, 107085}, // __builtin_altivec_vsraw
      {Intrinsic::ppc_altivec_vsrb, 107109}, // __builtin_altivec_vsrb
      {Intrinsic::ppc_altivec_vsrdbi, 107132}, // __builtin_altivec_vsrdbi
      {Intrinsic::ppc_altivec_vsrh, 107157}, // __builtin_altivec_vsrh
      {Intrinsic::ppc_altivec_vsro, 107180}, // __builtin_altivec_vsro
      {Intrinsic::ppc_altivec_vsrv, 107203}, // __builtin_altivec_vsrv
      {Intrinsic::ppc_altivec_vsrw, 107226}, // __builtin_altivec_vsrw
      {Intrinsic::ppc_altivec_vstribl, 107249}, // __builtin_altivec_vstribl
      {Intrinsic::ppc_altivec_vstribl_p, 107275}, // __builtin_altivec_vstribl_p
      {Intrinsic::ppc_altivec_vstribr, 107303}, // __builtin_altivec_vstribr
      {Intrinsic::ppc_altivec_vstribr_p, 107329}, // __builtin_altivec_vstribr_p
      {Intrinsic::ppc_altivec_vstrihl, 107357}, // __builtin_altivec_vstrihl
      {Intrinsic::ppc_altivec_vstrihl_p, 107383}, // __builtin_altivec_vstrihl_p
      {Intrinsic::ppc_altivec_vstrihr, 107411}, // __builtin_altivec_vstrihr
      {Intrinsic::ppc_altivec_vstrihr_p, 107437}, // __builtin_altivec_vstrihr_p
      {Intrinsic::ppc_altivec_vsubcuq, 107465}, // __builtin_altivec_vsubcuq
      {Intrinsic::ppc_altivec_vsubcuw, 107491}, // __builtin_altivec_vsubcuw
      {Intrinsic::ppc_altivec_vsubecuq, 107517}, // __builtin_altivec_vsubecuq
      {Intrinsic::ppc_altivec_vsubeuqm, 107544}, // __builtin_altivec_vsubeuqm
      {Intrinsic::ppc_altivec_vsubsbs, 107571}, // __builtin_altivec_vsubsbs
      {Intrinsic::ppc_altivec_vsubshs, 107597}, // __builtin_altivec_vsubshs
      {Intrinsic::ppc_altivec_vsubsws, 107623}, // __builtin_altivec_vsubsws
      {Intrinsic::ppc_altivec_vsububs, 107649}, // __builtin_altivec_vsububs
      {Intrinsic::ppc_altivec_vsubuhs, 107675}, // __builtin_altivec_vsubuhs
      {Intrinsic::ppc_altivec_vsubuws, 107701}, // __builtin_altivec_vsubuws
      {Intrinsic::ppc_altivec_vsum2sws, 107727}, // __builtin_altivec_vsum2sws
      {Intrinsic::ppc_altivec_vsum4sbs, 107754}, // __builtin_altivec_vsum4sbs
      {Intrinsic::ppc_altivec_vsum4shs, 107781}, // __builtin_altivec_vsum4shs
      {Intrinsic::ppc_altivec_vsum4ubs, 107808}, // __builtin_altivec_vsum4ubs
      {Intrinsic::ppc_altivec_vsumsws, 107835}, // __builtin_altivec_vsumsws
      {Intrinsic::ppc_altivec_vupkhpx, 107861}, // __builtin_altivec_vupkhpx
      {Intrinsic::ppc_altivec_vupkhsb, 107887}, // __builtin_altivec_vupkhsb
      {Intrinsic::ppc_altivec_vupkhsh, 107913}, // __builtin_altivec_vupkhsh
      {Intrinsic::ppc_altivec_vupkhsw, 107939}, // __builtin_altivec_vupkhsw
      {Intrinsic::ppc_altivec_vupklpx, 107965}, // __builtin_altivec_vupklpx
      {Intrinsic::ppc_altivec_vupklsb, 107991}, // __builtin_altivec_vupklsb
      {Intrinsic::ppc_altivec_vupklsh, 108017}, // __builtin_altivec_vupklsh
      {Intrinsic::ppc_altivec_vupklsw, 108043}, // __builtin_altivec_vupklsw
      {Intrinsic::ppc_bpermd, 108157}, // __builtin_bpermd
      {Intrinsic::ppc_cfuged, 108174}, // __builtin_cfuged
      {Intrinsic::ppc_cntlzdm, 108232}, // __builtin_cntlzdm
      {Intrinsic::ppc_cnttzdm, 108250}, // __builtin_cnttzdm
      {Intrinsic::ppc_darn, 108384}, // __builtin_darn
      {Intrinsic::ppc_darn32, 108399}, // __builtin_darn_32
      {Intrinsic::ppc_darnraw, 108417}, // __builtin_darn_raw
      {Intrinsic::ppc_dcbf, 108436}, // __builtin_dcbf
      {Intrinsic::ppc_divde, 108613}, // __builtin_divde
      {Intrinsic::ppc_divdeu, 108629}, // __builtin_divdeu
      {Intrinsic::ppc_divf128_round_to_odd, 108646}, // __builtin_divf128_round_to_odd
      {Intrinsic::ppc_divwe, 108677}, // __builtin_divwe
      {Intrinsic::ppc_divweu, 108693}, // __builtin_divweu
      {Intrinsic::ppc_fmaf128_round_to_odd, 108946}, // __builtin_fmaf128_round_to_odd
      {Intrinsic::ppc_get_texasr, 109223}, // __builtin_get_texasr
      {Intrinsic::ppc_get_texasru, 109244}, // __builtin_get_texasru
      {Intrinsic::ppc_get_tfhar, 109266}, // __builtin_get_tfhar
      {Intrinsic::ppc_get_tfiar, 109286}, // __builtin_get_tfiar
      {Intrinsic::ppc_mulf128_round_to_odd, 109704}, // __builtin_mulf128_round_to_odd
      {Intrinsic::ppc_pack_longdouble, 109817}, // __builtin_pack_longdouble
      {Intrinsic::ppc_pdepd, 109843}, // __builtin_pdepd
      {Intrinsic::ppc_pextd, 109859}, // __builtin_pextd
      {Intrinsic::ppc_addex, 100259}, // __builtin_ppc_addex
      {Intrinsic::ppc_bcdadd, 108069}, // __builtin_ppc_bcdadd
      {Intrinsic::ppc_bcdadd_p, 108090}, // __builtin_ppc_bcdadd_p
      {Intrinsic::ppc_bcdsub, 108113}, // __builtin_ppc_bcdsub
      {Intrinsic::ppc_bcdsub_p, 108134}, // __builtin_ppc_bcdsub_p
      {Intrinsic::ppc_cmpeqb, 108191}, // __builtin_ppc_cmpeqb
      {Intrinsic::ppc_cmprb, 108212}, // __builtin_ppc_cmprb
      {Intrinsic::ppc_compare_exp_eq, 108268}, // __builtin_ppc_compare_exp_eq
      {Intrinsic::ppc_compare_exp_gt, 108297}, // __builtin_ppc_compare_exp_gt
      {Intrinsic::ppc_compare_exp_lt, 108326}, // __builtin_ppc_compare_exp_lt
      {Intrinsic::ppc_compare_exp_uo, 108355}, // __builtin_ppc_compare_exp_uo
      {Intrinsic::ppc_dcbfl, 108451}, // __builtin_ppc_dcbfl
      {Intrinsic::ppc_dcbflp, 108471}, // __builtin_ppc_dcbflp
      {Intrinsic::ppc_dcbst, 108492}, // __builtin_ppc_dcbst
      {Intrinsic::ppc_dcbt, 108512}, // __builtin_ppc_dcbt
      {Intrinsic::ppc_dcbtst, 108531}, // __builtin_ppc_dcbtst
      {Intrinsic::ppc_dcbtstt, 108552}, // __builtin_ppc_dcbtstt
      {Intrinsic::ppc_dcbtt, 108574}, // __builtin_ppc_dcbtt
      {Intrinsic::ppc_dcbz, 108594}, // __builtin_ppc_dcbz
      {Intrinsic::ppc_eieio, 108710}, // __builtin_ppc_eieio
      {Intrinsic::ppc_extract_exp, 108730}, // __builtin_ppc_extract_exp
      {Intrinsic::ppc_extract_sig, 108756}, // __builtin_ppc_extract_sig
      {Intrinsic::ppc_fcfid, 108782}, // __builtin_ppc_fcfid
      {Intrinsic::ppc_fcfud, 108802}, // __builtin_ppc_fcfud
      {Intrinsic::ppc_fctid, 108822}, // __builtin_ppc_fctid
      {Intrinsic::ppc_fctidz, 108842}, // __builtin_ppc_fctidz
      {Intrinsic::ppc_fctiw, 108863}, // __builtin_ppc_fctiw
      {Intrinsic::ppc_fctiwz, 108883}, // __builtin_ppc_fctiwz
      {Intrinsic::ppc_fctudz, 108904}, // __builtin_ppc_fctudz
      {Intrinsic::ppc_fctuwz, 108925}, // __builtin_ppc_fctuwz
      {Intrinsic::ppc_fmsub, 108977}, // __builtin_ppc_fmsub
      {Intrinsic::ppc_fmsubs, 108997}, // __builtin_ppc_fmsubs
      {Intrinsic::ppc_fnabs, 109018}, // __builtin_ppc_fnabs
      {Intrinsic::ppc_fnabss, 109038}, // __builtin_ppc_fnabss
      {Intrinsic::ppc_fnmadd, 109059}, // __builtin_ppc_fnmadd
      {Intrinsic::ppc_fnmadds, 109080}, // __builtin_ppc_fnmadds
      {Intrinsic::ppc_fre, 109102}, // __builtin_ppc_fre
      {Intrinsic::ppc_fres, 109120}, // __builtin_ppc_fres
      {Intrinsic::ppc_frsqrte, 109139}, // __builtin_ppc_frsqrte
      {Intrinsic::ppc_frsqrtes, 109161}, // __builtin_ppc_frsqrtes
      {Intrinsic::ppc_fsel, 109184}, // __builtin_ppc_fsel
      {Intrinsic::ppc_fsels, 109203}, // __builtin_ppc_fsels
      {Intrinsic::ppc_icbt, 109306}, // __builtin_ppc_icbt
      {Intrinsic::ppc_insert_exp, 109325}, // __builtin_ppc_insert_exp
      {Intrinsic::ppc_iospace_eieio, 109350}, // __builtin_ppc_iospace_eieio
      {Intrinsic::ppc_iospace_lwsync, 109378}, // __builtin_ppc_iospace_lwsync
      {Intrinsic::ppc_iospace_sync, 109407}, // __builtin_ppc_iospace_sync
      {Intrinsic::ppc_isync, 109434}, // __builtin_ppc_isync
      {Intrinsic::ppc_load4r, 109454}, // __builtin_ppc_load4r
      {Intrinsic::ppc_load8r, 109475}, // __builtin_ppc_load8r
      {Intrinsic::ppc_lwsync, 109496}, // __builtin_ppc_lwsync
      {Intrinsic::ppc_maddhd, 109517}, // __builtin_ppc_maddhd
      {Intrinsic::ppc_maddhdu, 109538}, // __builtin_ppc_maddhdu
      {Intrinsic::ppc_maddld, 109560}, // __builtin_ppc_maddld
      {Intrinsic::ppc_mfmsr, 109581}, // __builtin_ppc_mfmsr
      {Intrinsic::ppc_mftbu, 109601}, // __builtin_ppc_mftbu
      {Intrinsic::ppc_mtfsb0, 109621}, // __builtin_ppc_mtfsb0
      {Intrinsic::ppc_mtfsb1, 109642}, // __builtin_ppc_mtfsb1
      {Intrinsic::ppc_mtfsfi, 109663}, // __builtin_ppc_mtfsfi
      {Intrinsic::ppc_mtmsr, 109684}, // __builtin_ppc_mtmsr
      {Intrinsic::ppc_mulhd, 109735}, // __builtin_ppc_mulhd
      {Intrinsic::ppc_mulhdu, 109755}, // __builtin_ppc_mulhdu
      {Intrinsic::ppc_mulhw, 109776}, // __builtin_ppc_mulhw
      {Intrinsic::ppc_mulhwu, 109796}, // __builtin_ppc_mulhwu
      {Intrinsic::ppc_setb, 110045}, // __builtin_ppc_setb
      {Intrinsic::ppc_stbcx, 110130}, // __builtin_ppc_stbcx
      {Intrinsic::ppc_stdcx, 110150}, // __builtin_ppc_stdcx
      {Intrinsic::ppc_stfiw, 110170}, // __builtin_ppc_stfiw
      {Intrinsic::ppc_store2r, 110190}, // __builtin_ppc_store2r
      {Intrinsic::ppc_store4r, 110212}, // __builtin_ppc_store4r
      {Intrinsic::ppc_store8r, 110234}, // __builtin_ppc_store8r
      {Intrinsic::ppc_stwcx, 110256}, // __builtin_ppc_stwcx
      {Intrinsic::ppc_sync, 110307}, // __builtin_ppc_sync
      {Intrinsic::ppc_tdw, 110455}, // __builtin_ppc_tdw
      {Intrinsic::ppc_trap, 110506}, // __builtin_ppc_trap
      {Intrinsic::ppc_trapd, 110525}, // __builtin_ppc_trapd
      {Intrinsic::ppc_tw, 110683}, // __builtin_ppc_tw
      {Intrinsic::ppc_readflm, 109875}, // __builtin_readflm
      {Intrinsic::ppc_set_texasr, 109962}, // __builtin_set_texasr
      {Intrinsic::ppc_set_texasru, 109983}, // __builtin_set_texasru
      {Intrinsic::ppc_set_tfhar, 110005}, // __builtin_set_tfhar
      {Intrinsic::ppc_set_tfiar, 110025}, // __builtin_set_tfiar
      {Intrinsic::ppc_setflm, 110064}, // __builtin_setflm
      {Intrinsic::ppc_setrnd, 110081}, // __builtin_setrnd
      {Intrinsic::ppc_sqrtf128_round_to_odd, 110098}, // __builtin_sqrtf128_round_to_odd
      {Intrinsic::ppc_subf128_round_to_odd, 110276}, // __builtin_subf128_round_to_odd
      {Intrinsic::ppc_tabort, 110326}, // __builtin_tabort
      {Intrinsic::ppc_tabortdc, 110343}, // __builtin_tabortdc
      {Intrinsic::ppc_tabortdci, 110362}, // __builtin_tabortdci
      {Intrinsic::ppc_tabortwc, 110382}, // __builtin_tabortwc
      {Intrinsic::ppc_tabortwci, 110401}, // __builtin_tabortwci
      {Intrinsic::ppc_tbegin, 110421}, // __builtin_tbegin
      {Intrinsic::ppc_tcheck, 110438}, // __builtin_tcheck
      {Intrinsic::ppc_tend, 110473}, // __builtin_tend
      {Intrinsic::ppc_tendall, 110488}, // __builtin_tendall
      {Intrinsic::ppc_trechkpt, 110545}, // __builtin_trechkpt
      {Intrinsic::ppc_treclaim, 110564}, // __builtin_treclaim
      {Intrinsic::ppc_tresume, 110583}, // __builtin_tresume
      {Intrinsic::ppc_truncf128_round_to_odd, 110601}, // __builtin_truncf128_round_to_odd
      {Intrinsic::ppc_tsr, 110634}, // __builtin_tsr
      {Intrinsic::ppc_tsuspend, 110648}, // __builtin_tsuspend
      {Intrinsic::ppc_ttest, 110667}, // __builtin_ttest
      {Intrinsic::ppc_unpack_longdouble, 110700}, // __builtin_unpack_longdouble
      {Intrinsic::ppc_scalar_extract_expq, 109893}, // __builtin_vsx_scalar_extract_expq
      {Intrinsic::ppc_scalar_insert_exp_qp, 109927}, // __builtin_vsx_scalar_insert_exp_qp
      {Intrinsic::ppc_vsx_xsmaxdp, 110728}, // __builtin_vsx_xsmaxdp
      {Intrinsic::ppc_vsx_xsmindp, 110750}, // __builtin_vsx_xsmindp
      {Intrinsic::ppc_vsx_xvcmpeqdp, 110772}, // __builtin_vsx_xvcmpeqdp
      {Intrinsic::ppc_vsx_xvcmpeqdp_p, 110796}, // __builtin_vsx_xvcmpeqdp_p
      {Intrinsic::ppc_vsx_xvcmpeqsp, 110822}, // __builtin_vsx_xvcmpeqsp
      {Intrinsic::ppc_vsx_xvcmpeqsp_p, 110846}, // __builtin_vsx_xvcmpeqsp_p
      {Intrinsic::ppc_vsx_xvcmpgedp, 110872}, // __builtin_vsx_xvcmpgedp
      {Intrinsic::ppc_vsx_xvcmpgedp_p, 110896}, // __builtin_vsx_xvcmpgedp_p
      {Intrinsic::ppc_vsx_xvcmpgesp, 110922}, // __builtin_vsx_xvcmpgesp
      {Intrinsic::ppc_vsx_xvcmpgesp_p, 110946}, // __builtin_vsx_xvcmpgesp_p
      {Intrinsic::ppc_vsx_xvcmpgtdp, 110972}, // __builtin_vsx_xvcmpgtdp
      {Intrinsic::ppc_vsx_xvcmpgtdp_p, 110996}, // __builtin_vsx_xvcmpgtdp_p
      {Intrinsic::ppc_vsx_xvcmpgtsp, 111022}, // __builtin_vsx_xvcmpgtsp
      {Intrinsic::ppc_vsx_xvcmpgtsp_p, 111046}, // __builtin_vsx_xvcmpgtsp_p
      {Intrinsic::ppc_vsx_xvcvbf16spn, 111072}, // __builtin_vsx_xvcvbf16spn
      {Intrinsic::ppc_vsx_xvcvdpsp, 111098}, // __builtin_vsx_xvcvdpsp
      {Intrinsic::ppc_vsx_xvcvdpsxws, 111121}, // __builtin_vsx_xvcvdpsxws
      {Intrinsic::ppc_vsx_xvcvdpuxws, 111146}, // __builtin_vsx_xvcvdpuxws
      {Intrinsic::ppc_vsx_xvcvhpsp, 111171}, // __builtin_vsx_xvcvhpsp
      {Intrinsic::ppc_vsx_xvcvspbf16, 111194}, // __builtin_vsx_xvcvspbf16
      {Intrinsic::ppc_vsx_xvcvspdp, 111219}, // __builtin_vsx_xvcvspdp
      {Intrinsic::ppc_vsx_xvcvsphp, 111242}, // __builtin_vsx_xvcvsphp
      {Intrinsic::ppc_vsx_xvcvspsxds, 111265}, // __builtin_vsx_xvcvspsxds
      {Intrinsic::ppc_vsx_xvcvspuxds, 111290}, // __builtin_vsx_xvcvspuxds
      {Intrinsic::ppc_vsx_xvcvsxdsp, 111315}, // __builtin_vsx_xvcvsxdsp
      {Intrinsic::ppc_vsx_xvcvsxwdp, 111339}, // __builtin_vsx_xvcvsxwdp
      {Intrinsic::ppc_vsx_xvcvuxdsp, 111363}, // __builtin_vsx_xvcvuxdsp
      {Intrinsic::ppc_vsx_xvcvuxwdp, 111387}, // __builtin_vsx_xvcvuxwdp
      {Intrinsic::ppc_vsx_xvdivdp, 111411}, // __builtin_vsx_xvdivdp
      {Intrinsic::ppc_vsx_xvdivsp, 111433}, // __builtin_vsx_xvdivsp
      {Intrinsic::ppc_vsx_xviexpdp, 111455}, // __builtin_vsx_xviexpdp
      {Intrinsic::ppc_vsx_xviexpsp, 111478}, // __builtin_vsx_xviexpsp
      {Intrinsic::ppc_vsx_xvmaxdp, 111501}, // __builtin_vsx_xvmaxdp
      {Intrinsic::ppc_vsx_xvmaxsp, 111523}, // __builtin_vsx_xvmaxsp
      {Intrinsic::ppc_vsx_xvmindp, 111545}, // __builtin_vsx_xvmindp
      {Intrinsic::ppc_vsx_xvminsp, 111567}, // __builtin_vsx_xvminsp
      {Intrinsic::ppc_vsx_xvredp, 111589}, // __builtin_vsx_xvredp
      {Intrinsic::ppc_vsx_xvresp, 111610}, // __builtin_vsx_xvresp
      {Intrinsic::ppc_vsx_xvrsqrtedp, 111631}, // __builtin_vsx_xvrsqrtedp
      {Intrinsic::ppc_vsx_xvrsqrtesp, 111656}, // __builtin_vsx_xvrsqrtesp
      {Intrinsic::ppc_vsx_xvtdivdp, 111681}, // __builtin_vsx_xvtdivdp
      {Intrinsic::ppc_vsx_xvtdivsp, 111704}, // __builtin_vsx_xvtdivsp
      {Intrinsic::ppc_vsx_xvtlsbb, 111727}, // __builtin_vsx_xvtlsbb
      {Intrinsic::ppc_vsx_xvtsqrtdp, 111749}, // __builtin_vsx_xvtsqrtdp
      {Intrinsic::ppc_vsx_xvtsqrtsp, 111773}, // __builtin_vsx_xvtsqrtsp
      {Intrinsic::ppc_vsx_xvtstdcdp, 111797}, // __builtin_vsx_xvtstdcdp
      {Intrinsic::ppc_vsx_xvtstdcsp, 111821}, // __builtin_vsx_xvtstdcsp
      {Intrinsic::ppc_vsx_xvxexpdp, 111845}, // __builtin_vsx_xvxexpdp
      {Intrinsic::ppc_vsx_xvxexpsp, 111868}, // __builtin_vsx_xvxexpsp
      {Intrinsic::ppc_vsx_xvxsigdp, 111891}, // __builtin_vsx_xvxsigdp
      {Intrinsic::ppc_vsx_xvxsigsp, 111914}, // __builtin_vsx_xvxsigsp
      {Intrinsic::ppc_vsx_xxblendvb, 111937}, // __builtin_vsx_xxblendvb
      {Intrinsic::ppc_vsx_xxblendvd, 111961}, // __builtin_vsx_xxblendvd
      {Intrinsic::ppc_vsx_xxblendvh, 111985}, // __builtin_vsx_xxblendvh
      {Intrinsic::ppc_vsx_xxblendvw, 112009}, // __builtin_vsx_xxblendvw
      {Intrinsic::ppc_vsx_xxeval, 112033}, // __builtin_vsx_xxeval
      {Intrinsic::ppc_vsx_xxextractuw, 112054}, // __builtin_vsx_xxextractuw
      {Intrinsic::ppc_vsx_xxgenpcvbm, 112080}, // __builtin_vsx_xxgenpcvbm
      {Intrinsic::ppc_vsx_xxgenpcvdm, 112105}, // __builtin_vsx_xxgenpcvdm
      {Intrinsic::ppc_vsx_xxgenpcvhm, 112130}, // __builtin_vsx_xxgenpcvhm
      {Intrinsic::ppc_vsx_xxgenpcvwm, 112155}, // __builtin_vsx_xxgenpcvwm
      {Intrinsic::ppc_vsx_xxinsertw, 112180}, // __builtin_vsx_xxinsertw
      {Intrinsic::ppc_vsx_xxleqv, 112204}, // __builtin_vsx_xxleqv
      {Intrinsic::ppc_vsx_xxpermx, 112225}, // __builtin_vsx_xxpermx
    };
    auto I = std::lower_bound(std::begin(ppcNames),
                              std::end(ppcNames),
                              BuiltinNameStr);
    if (I != std::end(ppcNames) &&
        I->getName() == BuiltinNameStr)
      return I->IntrinID;
  }
  if (TargetPrefix == "r600") {
    static const BuiltinEntry r600Names[] = {
      {Intrinsic::r600_group_barrier, 112247}, // __builtin_r600_group_barrier
      {Intrinsic::r600_implicitarg_ptr, 112276}, // __builtin_r600_implicitarg_ptr
      {Intrinsic::r600_rat_store_typed, 112307}, // __builtin_r600_rat_store_typed
      {Intrinsic::r600_read_global_size_x, 112338}, // __builtin_r600_read_global_size_x
      {Intrinsic::r600_read_global_size_y, 112372}, // __builtin_r600_read_global_size_y
      {Intrinsic::r600_read_global_size_z, 112406}, // __builtin_r600_read_global_size_z
      {Intrinsic::r600_read_ngroups_x, 112440}, // __builtin_r600_read_ngroups_x
      {Intrinsic::r600_read_ngroups_y, 112470}, // __builtin_r600_read_ngroups_y
      {Intrinsic::r600_read_ngroups_z, 112500}, // __builtin_r600_read_ngroups_z
      {Intrinsic::r600_read_tgid_x, 112530}, // __builtin_r600_read_tgid_x
      {Intrinsic::r600_read_tgid_y, 112557}, // __builtin_r600_read_tgid_y
      {Intrinsic::r600_read_tgid_z, 112584}, // __builtin_r600_read_tgid_z
    };
    auto I = std::lower_bound(std::begin(r600Names),
                              std::end(r600Names),
                              BuiltinNameStr);
    if (I != std::end(r600Names) &&
        I->getName() == BuiltinNameStr)
      return I->IntrinID;
  }
  if (TargetPrefix == "riscv") {
    static const BuiltinEntry riscvNames[] = {
      {Intrinsic::riscv_aes32dsi, 112611}, // __builtin_riscv_aes32dsi
      {Intrinsic::riscv_aes32dsmi, 112636}, // __builtin_riscv_aes32dsmi
      {Intrinsic::riscv_aes32esi, 112662}, // __builtin_riscv_aes32esi
      {Intrinsic::riscv_aes32esmi, 112687}, // __builtin_riscv_aes32esmi
      {Intrinsic::riscv_aes64ds, 112713}, // __builtin_riscv_aes64ds
      {Intrinsic::riscv_aes64dsm, 112737}, // __builtin_riscv_aes64dsm
      {Intrinsic::riscv_aes64es, 112762}, // __builtin_riscv_aes64es
      {Intrinsic::riscv_aes64esm, 112786}, // __builtin_riscv_aes64esm
      {Intrinsic::riscv_aes64im, 112811}, // __builtin_riscv_aes64im
      {Intrinsic::riscv_aes64ks1i, 112835}, // __builtin_riscv_aes64ks1i
      {Intrinsic::riscv_aes64ks2, 112861}, // __builtin_riscv_aes64ks2
      {Intrinsic::riscv_sha512sig0, 112886}, // __builtin_riscv_sha512sig0
      {Intrinsic::riscv_sha512sig0h, 112913}, // __builtin_riscv_sha512sig0h
      {Intrinsic::riscv_sha512sig0l, 112941}, // __builtin_riscv_sha512sig0l
      {Intrinsic::riscv_sha512sig1, 112969}, // __builtin_riscv_sha512sig1
      {Intrinsic::riscv_sha512sig1h, 112996}, // __builtin_riscv_sha512sig1h
      {Intrinsic::riscv_sha512sig1l, 113024}, // __builtin_riscv_sha512sig1l
      {Intrinsic::riscv_sha512sum0, 113052}, // __builtin_riscv_sha512sum0
      {Intrinsic::riscv_sha512sum0r, 113079}, // __builtin_riscv_sha512sum0r
      {Intrinsic::riscv_sha512sum1, 113107}, // __builtin_riscv_sha512sum1
      {Intrinsic::riscv_sha512sum1r, 113134}, // __builtin_riscv_sha512sum1r
    };
    auto I = std::lower_bound(std::begin(riscvNames),
                              std::end(riscvNames),
                              BuiltinNameStr);
    if (I != std::end(riscvNames) &&
        I->getName() == BuiltinNameStr)
      return I->IntrinID;
  }
  if (TargetPrefix == "s390") {
    static const BuiltinEntry s390Names[] = {
      {Intrinsic::s390_efpc, 113162}, // __builtin_s390_efpc
      {Intrinsic::s390_lcbb, 113209}, // __builtin_s390_lcbb
      {Intrinsic::s390_sfpc, 113249}, // __builtin_s390_sfpc
      {Intrinsic::s390_vaccb, 113269}, // __builtin_s390_vaccb
      {Intrinsic::s390_vacccq, 113290}, // __builtin_s390_vacccq
      {Intrinsic::s390_vaccf, 113312}, // __builtin_s390_vaccf
      {Intrinsic::s390_vaccg, 113333}, // __builtin_s390_vaccg
      {Intrinsic::s390_vacch, 113354}, // __builtin_s390_vacch
      {Intrinsic::s390_vaccq, 113375}, // __builtin_s390_vaccq
      {Intrinsic::s390_vacq, 113396}, // __builtin_s390_vacq
      {Intrinsic::s390_vaq, 113416}, // __builtin_s390_vaq
      {Intrinsic::s390_vavgb, 113435}, // __builtin_s390_vavgb
      {Intrinsic::s390_vavgf, 113456}, // __builtin_s390_vavgf
      {Intrinsic::s390_vavgg, 113477}, // __builtin_s390_vavgg
      {Intrinsic::s390_vavgh, 113498}, // __builtin_s390_vavgh
      {Intrinsic::s390_vavglb, 113519}, // __builtin_s390_vavglb
      {Intrinsic::s390_vavglf, 113541}, // __builtin_s390_vavglf
      {Intrinsic::s390_vavglg, 113563}, // __builtin_s390_vavglg
      {Intrinsic::s390_vavglh, 113585}, // __builtin_s390_vavglh
      {Intrinsic::s390_vbperm, 113607}, // __builtin_s390_vbperm
      {Intrinsic::s390_vcfn, 113629}, // __builtin_s390_vcfn
      {Intrinsic::s390_vcksm, 113649}, // __builtin_s390_vcksm
      {Intrinsic::s390_vclfnhs, 113670}, // __builtin_s390_vclfnhs
      {Intrinsic::s390_vclfnls, 113693}, // __builtin_s390_vclfnls
      {Intrinsic::s390_vcnf, 113716}, // __builtin_s390_vcnf
      {Intrinsic::s390_vcrnfs, 113736}, // __builtin_s390_vcrnfs
      {Intrinsic::s390_verimb, 113758}, // __builtin_s390_verimb
      {Intrinsic::s390_verimf, 113780}, // __builtin_s390_verimf
      {Intrinsic::s390_verimg, 113802}, // __builtin_s390_verimg
      {Intrinsic::s390_verimh, 113824}, // __builtin_s390_verimh
      {Intrinsic::s390_verllb, 113846}, // __builtin_s390_verllb
      {Intrinsic::s390_verllf, 113868}, // __builtin_s390_verllf
      {Intrinsic::s390_verllg, 113890}, // __builtin_s390_verllg
      {Intrinsic::s390_verllh, 113912}, // __builtin_s390_verllh
      {Intrinsic::s390_verllvb, 113934}, // __builtin_s390_verllvb
      {Intrinsic::s390_verllvf, 113957}, // __builtin_s390_verllvf
      {Intrinsic::s390_verllvg, 113980}, // __builtin_s390_verllvg
      {Intrinsic::s390_verllvh, 114003}, // __builtin_s390_verllvh
      {Intrinsic::s390_vfaeb, 114026}, // __builtin_s390_vfaeb
      {Intrinsic::s390_vfaef, 114047}, // __builtin_s390_vfaef
      {Intrinsic::s390_vfaeh, 114068}, // __builtin_s390_vfaeh
      {Intrinsic::s390_vfaezb, 114089}, // __builtin_s390_vfaezb
      {Intrinsic::s390_vfaezf, 114111}, // __builtin_s390_vfaezf
      {Intrinsic::s390_vfaezh, 114133}, // __builtin_s390_vfaezh
      {Intrinsic::s390_vfeeb, 114155}, // __builtin_s390_vfeeb
      {Intrinsic::s390_vfeef, 114176}, // __builtin_s390_vfeef
      {Intrinsic::s390_vfeeh, 114197}, // __builtin_s390_vfeeh
      {Intrinsic::s390_vfeezb, 114218}, // __builtin_s390_vfeezb
      {Intrinsic::s390_vfeezf, 114240}, // __builtin_s390_vfeezf
      {Intrinsic::s390_vfeezh, 114262}, // __builtin_s390_vfeezh
      {Intrinsic::s390_vfeneb, 114284}, // __builtin_s390_vfeneb
      {Intrinsic::s390_vfenef, 114306}, // __builtin_s390_vfenef
      {Intrinsic::s390_vfeneh, 114328}, // __builtin_s390_vfeneh
      {Intrinsic::s390_vfenezb, 114350}, // __builtin_s390_vfenezb
      {Intrinsic::s390_vfenezf, 114373}, // __builtin_s390_vfenezf
      {Intrinsic::s390_vfenezh, 114396}, // __builtin_s390_vfenezh
      {Intrinsic::s390_vgfmab, 114419}, // __builtin_s390_vgfmab
      {Intrinsic::s390_vgfmaf, 114441}, // __builtin_s390_vgfmaf
      {Intrinsic::s390_vgfmag, 114463}, // __builtin_s390_vgfmag
      {Intrinsic::s390_vgfmah, 114485}, // __builtin_s390_vgfmah
      {Intrinsic::s390_vgfmb, 114507}, // __builtin_s390_vgfmb
      {Intrinsic::s390_vgfmf, 114528}, // __builtin_s390_vgfmf
      {Intrinsic::s390_vgfmg, 114549}, // __builtin_s390_vgfmg
      {Intrinsic::s390_vgfmh, 114570}, // __builtin_s390_vgfmh
      {Intrinsic::s390_vistrb, 114591}, // __builtin_s390_vistrb
      {Intrinsic::s390_vistrf, 114613}, // __builtin_s390_vistrf
      {Intrinsic::s390_vistrh, 114635}, // __builtin_s390_vistrh
      {Intrinsic::s390_vlbb, 114657}, // __builtin_s390_vlbb
      {Intrinsic::s390_vll, 114677}, // __builtin_s390_vll
      {Intrinsic::s390_vlrl, 114696}, // __builtin_s390_vlrl
      {Intrinsic::s390_vmaeb, 114716}, // __builtin_s390_vmaeb
      {Intrinsic::s390_vmaef, 114737}, // __builtin_s390_vmaef
      {Intrinsic::s390_vmaeh, 114758}, // __builtin_s390_vmaeh
      {Intrinsic::s390_vmahb, 114779}, // __builtin_s390_vmahb
      {Intrinsic::s390_vmahf, 114800}, // __builtin_s390_vmahf
      {Intrinsic::s390_vmahh, 114821}, // __builtin_s390_vmahh
      {Intrinsic::s390_vmaleb, 114842}, // __builtin_s390_vmaleb
      {Intrinsic::s390_vmalef, 114864}, // __builtin_s390_vmalef
      {Intrinsic::s390_vmaleh, 114886}, // __builtin_s390_vmaleh
      {Intrinsic::s390_vmalhb, 114908}, // __builtin_s390_vmalhb
      {Intrinsic::s390_vmalhf, 114930}, // __builtin_s390_vmalhf
      {Intrinsic::s390_vmalhh, 114952}, // __builtin_s390_vmalhh
      {Intrinsic::s390_vmalob, 114974}, // __builtin_s390_vmalob
      {Intrinsic::s390_vmalof, 114996}, // __builtin_s390_vmalof
      {Intrinsic::s390_vmaloh, 115018}, // __builtin_s390_vmaloh
      {Intrinsic::s390_vmaob, 115040}, // __builtin_s390_vmaob
      {Intrinsic::s390_vmaof, 115061}, // __builtin_s390_vmaof
      {Intrinsic::s390_vmaoh, 115082}, // __builtin_s390_vmaoh
      {Intrinsic::s390_vmeb, 115103}, // __builtin_s390_vmeb
      {Intrinsic::s390_vmef, 115123}, // __builtin_s390_vmef
      {Intrinsic::s390_vmeh, 115143}, // __builtin_s390_vmeh
      {Intrinsic::s390_vmhb, 115163}, // __builtin_s390_vmhb
      {Intrinsic::s390_vmhf, 115183}, // __builtin_s390_vmhf
      {Intrinsic::s390_vmhh, 115203}, // __builtin_s390_vmhh
      {Intrinsic::s390_vmleb, 115223}, // __builtin_s390_vmleb
      {Intrinsic::s390_vmlef, 115244}, // __builtin_s390_vmlef
      {Intrinsic::s390_vmleh, 115265}, // __builtin_s390_vmleh
      {Intrinsic::s390_vmlhb, 115286}, // __builtin_s390_vmlhb
      {Intrinsic::s390_vmlhf, 115307}, // __builtin_s390_vmlhf
      {Intrinsic::s390_vmlhh, 115328}, // __builtin_s390_vmlhh
      {Intrinsic::s390_vmlob, 115349}, // __builtin_s390_vmlob
      {Intrinsic::s390_vmlof, 115370}, // __builtin_s390_vmlof
      {Intrinsic::s390_vmloh, 115391}, // __builtin_s390_vmloh
      {Intrinsic::s390_vmob, 115412}, // __builtin_s390_vmob
      {Intrinsic::s390_vmof, 115432}, // __builtin_s390_vmof
      {Intrinsic::s390_vmoh, 115452}, // __builtin_s390_vmoh
      {Intrinsic::s390_vmslg, 115472}, // __builtin_s390_vmslg
      {Intrinsic::s390_vpdi, 115493}, // __builtin_s390_vpdi
      {Intrinsic::s390_vperm, 115513}, // __builtin_s390_vperm
      {Intrinsic::s390_vpklsf, 115534}, // __builtin_s390_vpklsf
      {Intrinsic::s390_vpklsg, 115556}, // __builtin_s390_vpklsg
      {Intrinsic::s390_vpklsh, 115578}, // __builtin_s390_vpklsh
      {Intrinsic::s390_vpksf, 115600}, // __builtin_s390_vpksf
      {Intrinsic::s390_vpksg, 115621}, // __builtin_s390_vpksg
      {Intrinsic::s390_vpksh, 115642}, // __builtin_s390_vpksh
      {Intrinsic::s390_vsbcbiq, 115663}, // __builtin_s390_vsbcbiq
      {Intrinsic::s390_vsbiq, 115686}, // __builtin_s390_vsbiq
      {Intrinsic::s390_vscbib, 115707}, // __builtin_s390_vscbib
      {Intrinsic::s390_vscbif, 115729}, // __builtin_s390_vscbif
      {Intrinsic::s390_vscbig, 115751}, // __builtin_s390_vscbig
      {Intrinsic::s390_vscbih, 115773}, // __builtin_s390_vscbih
      {Intrinsic::s390_vscbiq, 115795}, // __builtin_s390_vscbiq
      {Intrinsic::s390_vsl, 115817}, // __builtin_s390_vsl
      {Intrinsic::s390_vslb, 115836}, // __builtin_s390_vslb
      {Intrinsic::s390_vsld, 115856}, // __builtin_s390_vsld
      {Intrinsic::s390_vsldb, 115876}, // __builtin_s390_vsldb
      {Intrinsic::s390_vsq, 115897}, // __builtin_s390_vsq
      {Intrinsic::s390_vsra, 115916}, // __builtin_s390_vsra
      {Intrinsic::s390_vsrab, 115936}, // __builtin_s390_vsrab
      {Intrinsic::s390_vsrd, 115957}, // __builtin_s390_vsrd
      {Intrinsic::s390_vsrl, 115977}, // __builtin_s390_vsrl
      {Intrinsic::s390_vsrlb, 115997}, // __builtin_s390_vsrlb
      {Intrinsic::s390_vstl, 116018}, // __builtin_s390_vstl
      {Intrinsic::s390_vstrcb, 116038}, // __builtin_s390_vstrcb
      {Intrinsic::s390_vstrcf, 116060}, // __builtin_s390_vstrcf
      {Intrinsic::s390_vstrch, 116082}, // __builtin_s390_vstrch
      {Intrinsic::s390_vstrczb, 116104}, // __builtin_s390_vstrczb
      {Intrinsic::s390_vstrczf, 116127}, // __builtin_s390_vstrczf
      {Intrinsic::s390_vstrczh, 116150}, // __builtin_s390_vstrczh
      {Intrinsic::s390_vstrl, 116173}, // __builtin_s390_vstrl
      {Intrinsic::s390_vsumb, 116194}, // __builtin_s390_vsumb
      {Intrinsic::s390_vsumgf, 116215}, // __builtin_s390_vsumgf
      {Intrinsic::s390_vsumgh, 116237}, // __builtin_s390_vsumgh
      {Intrinsic::s390_vsumh, 116259}, // __builtin_s390_vsumh
      {Intrinsic::s390_vsumqf, 116280}, // __builtin_s390_vsumqf
      {Intrinsic::s390_vsumqg, 116302}, // __builtin_s390_vsumqg
      {Intrinsic::s390_vtm, 116324}, // __builtin_s390_vtm
      {Intrinsic::s390_vuphb, 116343}, // __builtin_s390_vuphb
      {Intrinsic::s390_vuphf, 116364}, // __builtin_s390_vuphf
      {Intrinsic::s390_vuphh, 116385}, // __builtin_s390_vuphh
      {Intrinsic::s390_vuplb, 116406}, // __builtin_s390_vuplb
      {Intrinsic::s390_vuplf, 116427}, // __builtin_s390_vuplf
      {Intrinsic::s390_vuplhb, 116448}, // __builtin_s390_vuplhb
      {Intrinsic::s390_vuplhf, 116470}, // __builtin_s390_vuplhf
      {Intrinsic::s390_vuplhh, 116492}, // __builtin_s390_vuplhh
      {Intrinsic::s390_vuplhw, 116514}, // __builtin_s390_vuplhw
      {Intrinsic::s390_vupllb, 116536}, // __builtin_s390_vupllb
      {Intrinsic::s390_vupllf, 116558}, // __builtin_s390_vupllf
      {Intrinsic::s390_vupllh, 116580}, // __builtin_s390_vupllh
      {Intrinsic::s390_tend, 110473}, // __builtin_tend
      {Intrinsic::s390_ppa_txassist, 113229}, // __builtin_tx_assist
      {Intrinsic::s390_etnd, 113182}, // __builtin_tx_nesting_depth
    };
    auto I = std::lower_bound(std::begin(s390Names),
                              std::end(s390Names),
                              BuiltinNameStr);
    if (I != std::end(s390Names) &&
        I->getName() == BuiltinNameStr)
      return I->IntrinID;
  }
  if (TargetPrefix == "ve") {
    static const BuiltinEntry veNames[] = {
      {Intrinsic::ve_vl_andm_MMM, 116602}, // __builtin_ve_vl_andm_MMM
      {Intrinsic::ve_vl_andm_mmm, 116627}, // __builtin_ve_vl_andm_mmm
      {Intrinsic::ve_vl_eqvm_MMM, 116652}, // __builtin_ve_vl_eqvm_MMM
      {Intrinsic::ve_vl_eqvm_mmm, 116677}, // __builtin_ve_vl_eqvm_mmm
      {Intrinsic::ve_vl_extract_vm512l, 116702}, // __builtin_ve_vl_extract_vm512l
      {Intrinsic::ve_vl_extract_vm512u, 116733}, // __builtin_ve_vl_extract_vm512u
      {Intrinsic::ve_vl_fencec_s, 116764}, // __builtin_ve_vl_fencec_s
      {Intrinsic::ve_vl_fencei, 116789}, // __builtin_ve_vl_fencei
      {Intrinsic::ve_vl_fencem_s, 116812}, // __builtin_ve_vl_fencem_s
      {Intrinsic::ve_vl_fidcr_sss, 116837}, // __builtin_ve_vl_fidcr_sss
      {Intrinsic::ve_vl_insert_vm512l, 116863}, // __builtin_ve_vl_insert_vm512l
      {Intrinsic::ve_vl_insert_vm512u, 116893}, // __builtin_ve_vl_insert_vm512u
      {Intrinsic::ve_vl_lcr_sss, 116923}, // __builtin_ve_vl_lcr_sss
      {Intrinsic::ve_vl_lsv_vvss, 116947}, // __builtin_ve_vl_lsv_vvss
      {Intrinsic::ve_vl_lvm_MMss, 116972}, // __builtin_ve_vl_lvm_MMss
      {Intrinsic::ve_vl_lvm_mmss, 116997}, // __builtin_ve_vl_lvm_mmss
      {Intrinsic::ve_vl_lvsd_svs, 117022}, // __builtin_ve_vl_lvsd_svs
      {Intrinsic::ve_vl_lvsl_svs, 117047}, // __builtin_ve_vl_lvsl_svs
      {Intrinsic::ve_vl_lvss_svs, 117072}, // __builtin_ve_vl_lvss_svs
      {Intrinsic::ve_vl_lzvm_sml, 117097}, // __builtin_ve_vl_lzvm_sml
      {Intrinsic::ve_vl_negm_MM, 117122}, // __builtin_ve_vl_negm_MM
      {Intrinsic::ve_vl_negm_mm, 117146}, // __builtin_ve_vl_negm_mm
      {Intrinsic::ve_vl_nndm_MMM, 117170}, // __builtin_ve_vl_nndm_MMM
      {Intrinsic::ve_vl_nndm_mmm, 117195}, // __builtin_ve_vl_nndm_mmm
      {Intrinsic::ve_vl_orm_MMM, 117220}, // __builtin_ve_vl_orm_MMM
      {Intrinsic::ve_vl_orm_mmm, 117244}, // __builtin_ve_vl_orm_mmm
      {Intrinsic::ve_vl_pack_f32a, 117268}, // __builtin_ve_vl_pack_f32a
      {Intrinsic::ve_vl_pack_f32p, 117294}, // __builtin_ve_vl_pack_f32p
      {Intrinsic::ve_vl_pcvm_sml, 117320}, // __builtin_ve_vl_pcvm_sml
      {Intrinsic::ve_vl_pfchv_ssl, 117345}, // __builtin_ve_vl_pfchv_ssl
      {Intrinsic::ve_vl_pfchvnc_ssl, 117371}, // __builtin_ve_vl_pfchvnc_ssl
      {Intrinsic::ve_vl_pvadds_vsvMvl, 117399}, // __builtin_ve_vl_pvadds_vsvMvl
      {Intrinsic::ve_vl_pvadds_vsvl, 117429}, // __builtin_ve_vl_pvadds_vsvl
      {Intrinsic::ve_vl_pvadds_vsvvl, 117457}, // __builtin_ve_vl_pvadds_vsvvl
      {Intrinsic::ve_vl_pvadds_vvvMvl, 117486}, // __builtin_ve_vl_pvadds_vvvMvl
      {Intrinsic::ve_vl_pvadds_vvvl, 117516}, // __builtin_ve_vl_pvadds_vvvl
      {Intrinsic::ve_vl_pvadds_vvvvl, 117544}, // __builtin_ve_vl_pvadds_vvvvl
      {Intrinsic::ve_vl_pvaddu_vsvMvl, 117573}, // __builtin_ve_vl_pvaddu_vsvMvl
      {Intrinsic::ve_vl_pvaddu_vsvl, 117603}, // __builtin_ve_vl_pvaddu_vsvl
      {Intrinsic::ve_vl_pvaddu_vsvvl, 117631}, // __builtin_ve_vl_pvaddu_vsvvl
      {Intrinsic::ve_vl_pvaddu_vvvMvl, 117660}, // __builtin_ve_vl_pvaddu_vvvMvl
      {Intrinsic::ve_vl_pvaddu_vvvl, 117690}, // __builtin_ve_vl_pvaddu_vvvl
      {Intrinsic::ve_vl_pvaddu_vvvvl, 117718}, // __builtin_ve_vl_pvaddu_vvvvl
      {Intrinsic::ve_vl_pvand_vsvMvl, 117747}, // __builtin_ve_vl_pvand_vsvMvl
      {Intrinsic::ve_vl_pvand_vsvl, 117776}, // __builtin_ve_vl_pvand_vsvl
      {Intrinsic::ve_vl_pvand_vsvvl, 117803}, // __builtin_ve_vl_pvand_vsvvl
      {Intrinsic::ve_vl_pvand_vvvMvl, 117831}, // __builtin_ve_vl_pvand_vvvMvl
      {Intrinsic::ve_vl_pvand_vvvl, 117860}, // __builtin_ve_vl_pvand_vvvl
      {Intrinsic::ve_vl_pvand_vvvvl, 117887}, // __builtin_ve_vl_pvand_vvvvl
      {Intrinsic::ve_vl_pvbrd_vsMvl, 117915}, // __builtin_ve_vl_pvbrd_vsMvl
      {Intrinsic::ve_vl_pvbrd_vsl, 117943}, // __builtin_ve_vl_pvbrd_vsl
      {Intrinsic::ve_vl_pvbrd_vsvl, 117969}, // __builtin_ve_vl_pvbrd_vsvl
      {Intrinsic::ve_vl_pvbrv_vvMvl, 117996}, // __builtin_ve_vl_pvbrv_vvMvl
      {Intrinsic::ve_vl_pvbrv_vvl, 118024}, // __builtin_ve_vl_pvbrv_vvl
      {Intrinsic::ve_vl_pvbrv_vvvl, 118050}, // __builtin_ve_vl_pvbrv_vvvl
      {Intrinsic::ve_vl_pvbrvlo_vvl, 118077}, // __builtin_ve_vl_pvbrvlo_vvl
      {Intrinsic::ve_vl_pvbrvlo_vvmvl, 118105}, // __builtin_ve_vl_pvbrvlo_vvmvl
      {Intrinsic::ve_vl_pvbrvlo_vvvl, 118135}, // __builtin_ve_vl_pvbrvlo_vvvl
      {Intrinsic::ve_vl_pvbrvup_vvl, 118164}, // __builtin_ve_vl_pvbrvup_vvl
      {Intrinsic::ve_vl_pvbrvup_vvmvl, 118192}, // __builtin_ve_vl_pvbrvup_vvmvl
      {Intrinsic::ve_vl_pvbrvup_vvvl, 118222}, // __builtin_ve_vl_pvbrvup_vvvl
      {Intrinsic::ve_vl_pvcmps_vsvMvl, 118251}, // __builtin_ve_vl_pvcmps_vsvMvl
      {Intrinsic::ve_vl_pvcmps_vsvl, 118281}, // __builtin_ve_vl_pvcmps_vsvl
      {Intrinsic::ve_vl_pvcmps_vsvvl, 118309}, // __builtin_ve_vl_pvcmps_vsvvl
      {Intrinsic::ve_vl_pvcmps_vvvMvl, 118338}, // __builtin_ve_vl_pvcmps_vvvMvl
      {Intrinsic::ve_vl_pvcmps_vvvl, 118368}, // __builtin_ve_vl_pvcmps_vvvl
      {Intrinsic::ve_vl_pvcmps_vvvvl, 118396}, // __builtin_ve_vl_pvcmps_vvvvl
      {Intrinsic::ve_vl_pvcmpu_vsvMvl, 118425}, // __builtin_ve_vl_pvcmpu_vsvMvl
      {Intrinsic::ve_vl_pvcmpu_vsvl, 118455}, // __builtin_ve_vl_pvcmpu_vsvl
      {Intrinsic::ve_vl_pvcmpu_vsvvl, 118483}, // __builtin_ve_vl_pvcmpu_vsvvl
      {Intrinsic::ve_vl_pvcmpu_vvvMvl, 118512}, // __builtin_ve_vl_pvcmpu_vvvMvl
      {Intrinsic::ve_vl_pvcmpu_vvvl, 118542}, // __builtin_ve_vl_pvcmpu_vvvl
      {Intrinsic::ve_vl_pvcmpu_vvvvl, 118570}, // __builtin_ve_vl_pvcmpu_vvvvl
      {Intrinsic::ve_vl_pvcvtsw_vvl, 118599}, // __builtin_ve_vl_pvcvtsw_vvl
      {Intrinsic::ve_vl_pvcvtsw_vvvl, 118627}, // __builtin_ve_vl_pvcvtsw_vvvl
      {Intrinsic::ve_vl_pvcvtws_vvMvl, 118656}, // __builtin_ve_vl_pvcvtws_vvMvl
      {Intrinsic::ve_vl_pvcvtws_vvl, 118686}, // __builtin_ve_vl_pvcvtws_vvl
      {Intrinsic::ve_vl_pvcvtws_vvvl, 118714}, // __builtin_ve_vl_pvcvtws_vvvl
      {Intrinsic::ve_vl_pvcvtwsrz_vvMvl, 118743}, // __builtin_ve_vl_pvcvtwsrz_vvMvl
      {Intrinsic::ve_vl_pvcvtwsrz_vvl, 118775}, // __builtin_ve_vl_pvcvtwsrz_vvl
      {Intrinsic::ve_vl_pvcvtwsrz_vvvl, 118805}, // __builtin_ve_vl_pvcvtwsrz_vvvl
      {Intrinsic::ve_vl_pveqv_vsvMvl, 118836}, // __builtin_ve_vl_pveqv_vsvMvl
      {Intrinsic::ve_vl_pveqv_vsvl, 118865}, // __builtin_ve_vl_pveqv_vsvl
      {Intrinsic::ve_vl_pveqv_vsvvl, 118892}, // __builtin_ve_vl_pveqv_vsvvl
      {Intrinsic::ve_vl_pveqv_vvvMvl, 118920}, // __builtin_ve_vl_pveqv_vvvMvl
      {Intrinsic::ve_vl_pveqv_vvvl, 118949}, // __builtin_ve_vl_pveqv_vvvl
      {Intrinsic::ve_vl_pveqv_vvvvl, 118976}, // __builtin_ve_vl_pveqv_vvvvl
      {Intrinsic::ve_vl_pvfadd_vsvMvl, 119004}, // __builtin_ve_vl_pvfadd_vsvMvl
      {Intrinsic::ve_vl_pvfadd_vsvl, 119034}, // __builtin_ve_vl_pvfadd_vsvl
      {Intrinsic::ve_vl_pvfadd_vsvvl, 119062}, // __builtin_ve_vl_pvfadd_vsvvl
      {Intrinsic::ve_vl_pvfadd_vvvMvl, 119091}, // __builtin_ve_vl_pvfadd_vvvMvl
      {Intrinsic::ve_vl_pvfadd_vvvl, 119121}, // __builtin_ve_vl_pvfadd_vvvl
      {Intrinsic::ve_vl_pvfadd_vvvvl, 119149}, // __builtin_ve_vl_pvfadd_vvvvl
      {Intrinsic::ve_vl_pvfcmp_vsvMvl, 119178}, // __builtin_ve_vl_pvfcmp_vsvMvl
      {Intrinsic::ve_vl_pvfcmp_vsvl, 119208}, // __builtin_ve_vl_pvfcmp_vsvl
      {Intrinsic::ve_vl_pvfcmp_vsvvl, 119236}, // __builtin_ve_vl_pvfcmp_vsvvl
      {Intrinsic::ve_vl_pvfcmp_vvvMvl, 119265}, // __builtin_ve_vl_pvfcmp_vvvMvl
      {Intrinsic::ve_vl_pvfcmp_vvvl, 119295}, // __builtin_ve_vl_pvfcmp_vvvl
      {Intrinsic::ve_vl_pvfcmp_vvvvl, 119323}, // __builtin_ve_vl_pvfcmp_vvvvl
      {Intrinsic::ve_vl_pvfmad_vsvvMvl, 119352}, // __builtin_ve_vl_pvfmad_vsvvMvl
      {Intrinsic::ve_vl_pvfmad_vsvvl, 119383}, // __builtin_ve_vl_pvfmad_vsvvl
      {Intrinsic::ve_vl_pvfmad_vsvvvl, 119412}, // __builtin_ve_vl_pvfmad_vsvvvl
      {Intrinsic::ve_vl_pvfmad_vvsvMvl, 119442}, // __builtin_ve_vl_pvfmad_vvsvMvl
      {Intrinsic::ve_vl_pvfmad_vvsvl, 119473}, // __builtin_ve_vl_pvfmad_vvsvl
      {Intrinsic::ve_vl_pvfmad_vvsvvl, 119502}, // __builtin_ve_vl_pvfmad_vvsvvl
      {Intrinsic::ve_vl_pvfmad_vvvvMvl, 119532}, // __builtin_ve_vl_pvfmad_vvvvMvl
      {Intrinsic::ve_vl_pvfmad_vvvvl, 119563}, // __builtin_ve_vl_pvfmad_vvvvl
      {Intrinsic::ve_vl_pvfmad_vvvvvl, 119592}, // __builtin_ve_vl_pvfmad_vvvvvl
      {Intrinsic::ve_vl_pvfmax_vsvMvl, 119622}, // __builtin_ve_vl_pvfmax_vsvMvl
      {Intrinsic::ve_vl_pvfmax_vsvl, 119652}, // __builtin_ve_vl_pvfmax_vsvl
      {Intrinsic::ve_vl_pvfmax_vsvvl, 119680}, // __builtin_ve_vl_pvfmax_vsvvl
      {Intrinsic::ve_vl_pvfmax_vvvMvl, 119709}, // __builtin_ve_vl_pvfmax_vvvMvl
      {Intrinsic::ve_vl_pvfmax_vvvl, 119739}, // __builtin_ve_vl_pvfmax_vvvl
      {Intrinsic::ve_vl_pvfmax_vvvvl, 119767}, // __builtin_ve_vl_pvfmax_vvvvl
      {Intrinsic::ve_vl_pvfmin_vsvMvl, 119796}, // __builtin_ve_vl_pvfmin_vsvMvl
      {Intrinsic::ve_vl_pvfmin_vsvl, 119826}, // __builtin_ve_vl_pvfmin_vsvl
      {Intrinsic::ve_vl_pvfmin_vsvvl, 119854}, // __builtin_ve_vl_pvfmin_vsvvl
      {Intrinsic::ve_vl_pvfmin_vvvMvl, 119883}, // __builtin_ve_vl_pvfmin_vvvMvl
      {Intrinsic::ve_vl_pvfmin_vvvl, 119913}, // __builtin_ve_vl_pvfmin_vvvl
      {Intrinsic::ve_vl_pvfmin_vvvvl, 119941}, // __builtin_ve_vl_pvfmin_vvvvl
      {Intrinsic::ve_vl_pvfmkaf_Ml, 119970}, // __builtin_ve_vl_pvfmkaf_Ml
      {Intrinsic::ve_vl_pvfmkat_Ml, 119997}, // __builtin_ve_vl_pvfmkat_Ml
      {Intrinsic::ve_vl_pvfmkseq_MvMl, 120024}, // __builtin_ve_vl_pvfmkseq_MvMl
      {Intrinsic::ve_vl_pvfmkseq_Mvl, 120054}, // __builtin_ve_vl_pvfmkseq_Mvl
      {Intrinsic::ve_vl_pvfmkseqnan_MvMl, 120083}, // __builtin_ve_vl_pvfmkseqnan_MvMl
      {Intrinsic::ve_vl_pvfmkseqnan_Mvl, 120116}, // __builtin_ve_vl_pvfmkseqnan_Mvl
      {Intrinsic::ve_vl_pvfmksge_MvMl, 120148}, // __builtin_ve_vl_pvfmksge_MvMl
      {Intrinsic::ve_vl_pvfmksge_Mvl, 120178}, // __builtin_ve_vl_pvfmksge_Mvl
      {Intrinsic::ve_vl_pvfmksgenan_MvMl, 120207}, // __builtin_ve_vl_pvfmksgenan_MvMl
      {Intrinsic::ve_vl_pvfmksgenan_Mvl, 120240}, // __builtin_ve_vl_pvfmksgenan_Mvl
      {Intrinsic::ve_vl_pvfmksgt_MvMl, 120272}, // __builtin_ve_vl_pvfmksgt_MvMl
      {Intrinsic::ve_vl_pvfmksgt_Mvl, 120302}, // __builtin_ve_vl_pvfmksgt_Mvl
      {Intrinsic::ve_vl_pvfmksgtnan_MvMl, 120331}, // __builtin_ve_vl_pvfmksgtnan_MvMl
      {Intrinsic::ve_vl_pvfmksgtnan_Mvl, 120364}, // __builtin_ve_vl_pvfmksgtnan_Mvl
      {Intrinsic::ve_vl_pvfmksle_MvMl, 120396}, // __builtin_ve_vl_pvfmksle_MvMl
      {Intrinsic::ve_vl_pvfmksle_Mvl, 120426}, // __builtin_ve_vl_pvfmksle_Mvl
      {Intrinsic::ve_vl_pvfmkslenan_MvMl, 120455}, // __builtin_ve_vl_pvfmkslenan_MvMl
      {Intrinsic::ve_vl_pvfmkslenan_Mvl, 120488}, // __builtin_ve_vl_pvfmkslenan_Mvl
      {Intrinsic::ve_vl_pvfmksloeq_mvl, 120520}, // __builtin_ve_vl_pvfmksloeq_mvl
      {Intrinsic::ve_vl_pvfmksloeq_mvml, 120551}, // __builtin_ve_vl_pvfmksloeq_mvml
      {Intrinsic::ve_vl_pvfmksloeqnan_mvl, 120583}, // __builtin_ve_vl_pvfmksloeqnan_mvl
      {Intrinsic::ve_vl_pvfmksloeqnan_mvml, 120617}, // __builtin_ve_vl_pvfmksloeqnan_mvml
      {Intrinsic::ve_vl_pvfmksloge_mvl, 120652}, // __builtin_ve_vl_pvfmksloge_mvl
      {Intrinsic::ve_vl_pvfmksloge_mvml, 120683}, // __builtin_ve_vl_pvfmksloge_mvml
      {Intrinsic::ve_vl_pvfmkslogenan_mvl, 120715}, // __builtin_ve_vl_pvfmkslogenan_mvl
      {Intrinsic::ve_vl_pvfmkslogenan_mvml, 120749}, // __builtin_ve_vl_pvfmkslogenan_mvml
      {Intrinsic::ve_vl_pvfmkslogt_mvl, 120784}, // __builtin_ve_vl_pvfmkslogt_mvl
      {Intrinsic::ve_vl_pvfmkslogt_mvml, 120815}, // __builtin_ve_vl_pvfmkslogt_mvml
      {Intrinsic::ve_vl_pvfmkslogtnan_mvl, 120847}, // __builtin_ve_vl_pvfmkslogtnan_mvl
      {Intrinsic::ve_vl_pvfmkslogtnan_mvml, 120881}, // __builtin_ve_vl_pvfmkslogtnan_mvml
      {Intrinsic::ve_vl_pvfmkslole_mvl, 120916}, // __builtin_ve_vl_pvfmkslole_mvl
      {Intrinsic::ve_vl_pvfmkslole_mvml, 120947}, // __builtin_ve_vl_pvfmkslole_mvml
      {Intrinsic::ve_vl_pvfmkslolenan_mvl, 120979}, // __builtin_ve_vl_pvfmkslolenan_mvl
      {Intrinsic::ve_vl_pvfmkslolenan_mvml, 121013}, // __builtin_ve_vl_pvfmkslolenan_mvml
      {Intrinsic::ve_vl_pvfmkslolt_mvl, 121048}, // __builtin_ve_vl_pvfmkslolt_mvl
      {Intrinsic::ve_vl_pvfmkslolt_mvml, 121079}, // __builtin_ve_vl_pvfmkslolt_mvml
      {Intrinsic::ve_vl_pvfmksloltnan_mvl, 121111}, // __builtin_ve_vl_pvfmksloltnan_mvl
      {Intrinsic::ve_vl_pvfmksloltnan_mvml, 121145}, // __builtin_ve_vl_pvfmksloltnan_mvml
      {Intrinsic::ve_vl_pvfmkslonan_mvl, 121180}, // __builtin_ve_vl_pvfmkslonan_mvl
      {Intrinsic::ve_vl_pvfmkslonan_mvml, 121212}, // __builtin_ve_vl_pvfmkslonan_mvml
      {Intrinsic::ve_vl_pvfmkslone_mvl, 121245}, // __builtin_ve_vl_pvfmkslone_mvl
      {Intrinsic::ve_vl_pvfmkslone_mvml, 121276}, // __builtin_ve_vl_pvfmkslone_mvml
      {Intrinsic::ve_vl_pvfmkslonenan_mvl, 121308}, // __builtin_ve_vl_pvfmkslonenan_mvl
      {Intrinsic::ve_vl_pvfmkslonenan_mvml, 121342}, // __builtin_ve_vl_pvfmkslonenan_mvml
      {Intrinsic::ve_vl_pvfmkslonum_mvl, 121377}, // __builtin_ve_vl_pvfmkslonum_mvl
      {Intrinsic::ve_vl_pvfmkslonum_mvml, 121409}, // __builtin_ve_vl_pvfmkslonum_mvml
      {Intrinsic::ve_vl_pvfmkslt_MvMl, 121442}, // __builtin_ve_vl_pvfmkslt_MvMl
      {Intrinsic::ve_vl_pvfmkslt_Mvl, 121472}, // __builtin_ve_vl_pvfmkslt_Mvl
      {Intrinsic::ve_vl_pvfmksltnan_MvMl, 121501}, // __builtin_ve_vl_pvfmksltnan_MvMl
      {Intrinsic::ve_vl_pvfmksltnan_Mvl, 121534}, // __builtin_ve_vl_pvfmksltnan_Mvl
      {Intrinsic::ve_vl_pvfmksnan_MvMl, 121566}, // __builtin_ve_vl_pvfmksnan_MvMl
      {Intrinsic::ve_vl_pvfmksnan_Mvl, 121597}, // __builtin_ve_vl_pvfmksnan_Mvl
      {Intrinsic::ve_vl_pvfmksne_MvMl, 121627}, // __builtin_ve_vl_pvfmksne_MvMl
      {Intrinsic::ve_vl_pvfmksne_Mvl, 121657}, // __builtin_ve_vl_pvfmksne_Mvl
      {Intrinsic::ve_vl_pvfmksnenan_MvMl, 121686}, // __builtin_ve_vl_pvfmksnenan_MvMl
      {Intrinsic::ve_vl_pvfmksnenan_Mvl, 121719}, // __builtin_ve_vl_pvfmksnenan_Mvl
      {Intrinsic::ve_vl_pvfmksnum_MvMl, 121751}, // __builtin_ve_vl_pvfmksnum_MvMl
      {Intrinsic::ve_vl_pvfmksnum_Mvl, 121782}, // __builtin_ve_vl_pvfmksnum_Mvl
      {Intrinsic::ve_vl_pvfmksupeq_mvl, 121812}, // __builtin_ve_vl_pvfmksupeq_mvl
      {Intrinsic::ve_vl_pvfmksupeq_mvml, 121843}, // __builtin_ve_vl_pvfmksupeq_mvml
      {Intrinsic::ve_vl_pvfmksupeqnan_mvl, 121875}, // __builtin_ve_vl_pvfmksupeqnan_mvl
      {Intrinsic::ve_vl_pvfmksupeqnan_mvml, 121909}, // __builtin_ve_vl_pvfmksupeqnan_mvml
      {Intrinsic::ve_vl_pvfmksupge_mvl, 121944}, // __builtin_ve_vl_pvfmksupge_mvl
      {Intrinsic::ve_vl_pvfmksupge_mvml, 121975}, // __builtin_ve_vl_pvfmksupge_mvml
      {Intrinsic::ve_vl_pvfmksupgenan_mvl, 122007}, // __builtin_ve_vl_pvfmksupgenan_mvl
      {Intrinsic::ve_vl_pvfmksupgenan_mvml, 122041}, // __builtin_ve_vl_pvfmksupgenan_mvml
      {Intrinsic::ve_vl_pvfmksupgt_mvl, 122076}, // __builtin_ve_vl_pvfmksupgt_mvl
      {Intrinsic::ve_vl_pvfmksupgt_mvml, 122107}, // __builtin_ve_vl_pvfmksupgt_mvml
      {Intrinsic::ve_vl_pvfmksupgtnan_mvl, 122139}, // __builtin_ve_vl_pvfmksupgtnan_mvl
      {Intrinsic::ve_vl_pvfmksupgtnan_mvml, 122173}, // __builtin_ve_vl_pvfmksupgtnan_mvml
      {Intrinsic::ve_vl_pvfmksuple_mvl, 122208}, // __builtin_ve_vl_pvfmksuple_mvl
      {Intrinsic::ve_vl_pvfmksuple_mvml, 122239}, // __builtin_ve_vl_pvfmksuple_mvml
      {Intrinsic::ve_vl_pvfmksuplenan_mvl, 122271}, // __builtin_ve_vl_pvfmksuplenan_mvl
      {Intrinsic::ve_vl_pvfmksuplenan_mvml, 122305}, // __builtin_ve_vl_pvfmksuplenan_mvml
      {Intrinsic::ve_vl_pvfmksuplt_mvl, 122340}, // __builtin_ve_vl_pvfmksuplt_mvl
      {Intrinsic::ve_vl_pvfmksuplt_mvml, 122371}, // __builtin_ve_vl_pvfmksuplt_mvml
      {Intrinsic::ve_vl_pvfmksupltnan_mvl, 122403}, // __builtin_ve_vl_pvfmksupltnan_mvl
      {Intrinsic::ve_vl_pvfmksupltnan_mvml, 122437}, // __builtin_ve_vl_pvfmksupltnan_mvml
      {Intrinsic::ve_vl_pvfmksupnan_mvl, 122472}, // __builtin_ve_vl_pvfmksupnan_mvl
      {Intrinsic::ve_vl_pvfmksupnan_mvml, 122504}, // __builtin_ve_vl_pvfmksupnan_mvml
      {Intrinsic::ve_vl_pvfmksupne_mvl, 122537}, // __builtin_ve_vl_pvfmksupne_mvl
      {Intrinsic::ve_vl_pvfmksupne_mvml, 122568}, // __builtin_ve_vl_pvfmksupne_mvml
      {Intrinsic::ve_vl_pvfmksupnenan_mvl, 122600}, // __builtin_ve_vl_pvfmksupnenan_mvl
      {Intrinsic::ve_vl_pvfmksupnenan_mvml, 122634}, // __builtin_ve_vl_pvfmksupnenan_mvml
      {Intrinsic::ve_vl_pvfmksupnum_mvl, 122669}, // __builtin_ve_vl_pvfmksupnum_mvl
      {Intrinsic::ve_vl_pvfmksupnum_mvml, 122701}, // __builtin_ve_vl_pvfmksupnum_mvml
      {Intrinsic::ve_vl_pvfmkweq_MvMl, 122734}, // __builtin_ve_vl_pvfmkweq_MvMl
      {Intrinsic::ve_vl_pvfmkweq_Mvl, 122764}, // __builtin_ve_vl_pvfmkweq_Mvl
      {Intrinsic::ve_vl_pvfmkweqnan_MvMl, 122793}, // __builtin_ve_vl_pvfmkweqnan_MvMl
      {Intrinsic::ve_vl_pvfmkweqnan_Mvl, 122826}, // __builtin_ve_vl_pvfmkweqnan_Mvl
      {Intrinsic::ve_vl_pvfmkwge_MvMl, 122858}, // __builtin_ve_vl_pvfmkwge_MvMl
      {Intrinsic::ve_vl_pvfmkwge_Mvl, 122888}, // __builtin_ve_vl_pvfmkwge_Mvl
      {Intrinsic::ve_vl_pvfmkwgenan_MvMl, 122917}, // __builtin_ve_vl_pvfmkwgenan_MvMl
      {Intrinsic::ve_vl_pvfmkwgenan_Mvl, 122950}, // __builtin_ve_vl_pvfmkwgenan_Mvl
      {Intrinsic::ve_vl_pvfmkwgt_MvMl, 122982}, // __builtin_ve_vl_pvfmkwgt_MvMl
      {Intrinsic::ve_vl_pvfmkwgt_Mvl, 123012}, // __builtin_ve_vl_pvfmkwgt_Mvl
      {Intrinsic::ve_vl_pvfmkwgtnan_MvMl, 123041}, // __builtin_ve_vl_pvfmkwgtnan_MvMl
      {Intrinsic::ve_vl_pvfmkwgtnan_Mvl, 123074}, // __builtin_ve_vl_pvfmkwgtnan_Mvl
      {Intrinsic::ve_vl_pvfmkwle_MvMl, 123106}, // __builtin_ve_vl_pvfmkwle_MvMl
      {Intrinsic::ve_vl_pvfmkwle_Mvl, 123136}, // __builtin_ve_vl_pvfmkwle_Mvl
      {Intrinsic::ve_vl_pvfmkwlenan_MvMl, 123165}, // __builtin_ve_vl_pvfmkwlenan_MvMl
      {Intrinsic::ve_vl_pvfmkwlenan_Mvl, 123198}, // __builtin_ve_vl_pvfmkwlenan_Mvl
      {Intrinsic::ve_vl_pvfmkwloeq_mvl, 123230}, // __builtin_ve_vl_pvfmkwloeq_mvl
      {Intrinsic::ve_vl_pvfmkwloeq_mvml, 123261}, // __builtin_ve_vl_pvfmkwloeq_mvml
      {Intrinsic::ve_vl_pvfmkwloeqnan_mvl, 123293}, // __builtin_ve_vl_pvfmkwloeqnan_mvl
      {Intrinsic::ve_vl_pvfmkwloeqnan_mvml, 123327}, // __builtin_ve_vl_pvfmkwloeqnan_mvml
      {Intrinsic::ve_vl_pvfmkwloge_mvl, 123362}, // __builtin_ve_vl_pvfmkwloge_mvl
      {Intrinsic::ve_vl_pvfmkwloge_mvml, 123393}, // __builtin_ve_vl_pvfmkwloge_mvml
      {Intrinsic::ve_vl_pvfmkwlogenan_mvl, 123425}, // __builtin_ve_vl_pvfmkwlogenan_mvl
      {Intrinsic::ve_vl_pvfmkwlogenan_mvml, 123459}, // __builtin_ve_vl_pvfmkwlogenan_mvml
      {Intrinsic::ve_vl_pvfmkwlogt_mvl, 123494}, // __builtin_ve_vl_pvfmkwlogt_mvl
      {Intrinsic::ve_vl_pvfmkwlogt_mvml, 123525}, // __builtin_ve_vl_pvfmkwlogt_mvml
      {Intrinsic::ve_vl_pvfmkwlogtnan_mvl, 123557}, // __builtin_ve_vl_pvfmkwlogtnan_mvl
      {Intrinsic::ve_vl_pvfmkwlogtnan_mvml, 123591}, // __builtin_ve_vl_pvfmkwlogtnan_mvml
      {Intrinsic::ve_vl_pvfmkwlole_mvl, 123626}, // __builtin_ve_vl_pvfmkwlole_mvl
      {Intrinsic::ve_vl_pvfmkwlole_mvml, 123657}, // __builtin_ve_vl_pvfmkwlole_mvml
      {Intrinsic::ve_vl_pvfmkwlolenan_mvl, 123689}, // __builtin_ve_vl_pvfmkwlolenan_mvl
      {Intrinsic::ve_vl_pvfmkwlolenan_mvml, 123723}, // __builtin_ve_vl_pvfmkwlolenan_mvml
      {Intrinsic::ve_vl_pvfmkwlolt_mvl, 123758}, // __builtin_ve_vl_pvfmkwlolt_mvl
      {Intrinsic::ve_vl_pvfmkwlolt_mvml, 123789}, // __builtin_ve_vl_pvfmkwlolt_mvml
      {Intrinsic::ve_vl_pvfmkwloltnan_mvl, 123821}, // __builtin_ve_vl_pvfmkwloltnan_mvl
      {Intrinsic::ve_vl_pvfmkwloltnan_mvml, 123855}, // __builtin_ve_vl_pvfmkwloltnan_mvml
      {Intrinsic::ve_vl_pvfmkwlonan_mvl, 123890}, // __builtin_ve_vl_pvfmkwlonan_mvl
      {Intrinsic::ve_vl_pvfmkwlonan_mvml, 123922}, // __builtin_ve_vl_pvfmkwlonan_mvml
      {Intrinsic::ve_vl_pvfmkwlone_mvl, 123955}, // __builtin_ve_vl_pvfmkwlone_mvl
      {Intrinsic::ve_vl_pvfmkwlone_mvml, 123986}, // __builtin_ve_vl_pvfmkwlone_mvml
      {Intrinsic::ve_vl_pvfmkwlonenan_mvl, 124018}, // __builtin_ve_vl_pvfmkwlonenan_mvl
      {Intrinsic::ve_vl_pvfmkwlonenan_mvml, 124052}, // __builtin_ve_vl_pvfmkwlonenan_mvml
      {Intrinsic::ve_vl_pvfmkwlonum_mvl, 124087}, // __builtin_ve_vl_pvfmkwlonum_mvl
      {Intrinsic::ve_vl_pvfmkwlonum_mvml, 124119}, // __builtin_ve_vl_pvfmkwlonum_mvml
      {Intrinsic::ve_vl_pvfmkwlt_MvMl, 124152}, // __builtin_ve_vl_pvfmkwlt_MvMl
      {Intrinsic::ve_vl_pvfmkwlt_Mvl, 124182}, // __builtin_ve_vl_pvfmkwlt_Mvl
      {Intrinsic::ve_vl_pvfmkwltnan_MvMl, 124211}, // __builtin_ve_vl_pvfmkwltnan_MvMl
      {Intrinsic::ve_vl_pvfmkwltnan_Mvl, 124244}, // __builtin_ve_vl_pvfmkwltnan_Mvl
      {Intrinsic::ve_vl_pvfmkwnan_MvMl, 124276}, // __builtin_ve_vl_pvfmkwnan_MvMl
      {Intrinsic::ve_vl_pvfmkwnan_Mvl, 124307}, // __builtin_ve_vl_pvfmkwnan_Mvl
      {Intrinsic::ve_vl_pvfmkwne_MvMl, 124337}, // __builtin_ve_vl_pvfmkwne_MvMl
      {Intrinsic::ve_vl_pvfmkwne_Mvl, 124367}, // __builtin_ve_vl_pvfmkwne_Mvl
      {Intrinsic::ve_vl_pvfmkwnenan_MvMl, 124396}, // __builtin_ve_vl_pvfmkwnenan_MvMl
      {Intrinsic::ve_vl_pvfmkwnenan_Mvl, 124429}, // __builtin_ve_vl_pvfmkwnenan_Mvl
      {Intrinsic::ve_vl_pvfmkwnum_MvMl, 124461}, // __builtin_ve_vl_pvfmkwnum_MvMl
      {Intrinsic::ve_vl_pvfmkwnum_Mvl, 124492}, // __builtin_ve_vl_pvfmkwnum_Mvl
      {Intrinsic::ve_vl_pvfmkwupeq_mvl, 124522}, // __builtin_ve_vl_pvfmkwupeq_mvl
      {Intrinsic::ve_vl_pvfmkwupeq_mvml, 124553}, // __builtin_ve_vl_pvfmkwupeq_mvml
      {Intrinsic::ve_vl_pvfmkwupeqnan_mvl, 124585}, // __builtin_ve_vl_pvfmkwupeqnan_mvl
      {Intrinsic::ve_vl_pvfmkwupeqnan_mvml, 124619}, // __builtin_ve_vl_pvfmkwupeqnan_mvml
      {Intrinsic::ve_vl_pvfmkwupge_mvl, 124654}, // __builtin_ve_vl_pvfmkwupge_mvl
      {Intrinsic::ve_vl_pvfmkwupge_mvml, 124685}, // __builtin_ve_vl_pvfmkwupge_mvml
      {Intrinsic::ve_vl_pvfmkwupgenan_mvl, 124717}, // __builtin_ve_vl_pvfmkwupgenan_mvl
      {Intrinsic::ve_vl_pvfmkwupgenan_mvml, 124751}, // __builtin_ve_vl_pvfmkwupgenan_mvml
      {Intrinsic::ve_vl_pvfmkwupgt_mvl, 124786}, // __builtin_ve_vl_pvfmkwupgt_mvl
      {Intrinsic::ve_vl_pvfmkwupgt_mvml, 124817}, // __builtin_ve_vl_pvfmkwupgt_mvml
      {Intrinsic::ve_vl_pvfmkwupgtnan_mvl, 124849}, // __builtin_ve_vl_pvfmkwupgtnan_mvl
      {Intrinsic::ve_vl_pvfmkwupgtnan_mvml, 124883}, // __builtin_ve_vl_pvfmkwupgtnan_mvml
      {Intrinsic::ve_vl_pvfmkwuple_mvl, 124918}, // __builtin_ve_vl_pvfmkwuple_mvl
      {Intrinsic::ve_vl_pvfmkwuple_mvml, 124949}, // __builtin_ve_vl_pvfmkwuple_mvml
      {Intrinsic::ve_vl_pvfmkwuplenan_mvl, 124981}, // __builtin_ve_vl_pvfmkwuplenan_mvl
      {Intrinsic::ve_vl_pvfmkwuplenan_mvml, 125015}, // __builtin_ve_vl_pvfmkwuplenan_mvml
      {Intrinsic::ve_vl_pvfmkwuplt_mvl, 125050}, // __builtin_ve_vl_pvfmkwuplt_mvl
      {Intrinsic::ve_vl_pvfmkwuplt_mvml, 125081}, // __builtin_ve_vl_pvfmkwuplt_mvml
      {Intrinsic::ve_vl_pvfmkwupltnan_mvl, 125113}, // __builtin_ve_vl_pvfmkwupltnan_mvl
      {Intrinsic::ve_vl_pvfmkwupltnan_mvml, 125147}, // __builtin_ve_vl_pvfmkwupltnan_mvml
      {Intrinsic::ve_vl_pvfmkwupnan_mvl, 125182}, // __builtin_ve_vl_pvfmkwupnan_mvl
      {Intrinsic::ve_vl_pvfmkwupnan_mvml, 125214}, // __builtin_ve_vl_pvfmkwupnan_mvml
      {Intrinsic::ve_vl_pvfmkwupne_mvl, 125247}, // __builtin_ve_vl_pvfmkwupne_mvl
      {Intrinsic::ve_vl_pvfmkwupne_mvml, 125278}, // __builtin_ve_vl_pvfmkwupne_mvml
      {Intrinsic::ve_vl_pvfmkwupnenan_mvl, 125310}, // __builtin_ve_vl_pvfmkwupnenan_mvl
      {Intrinsic::ve_vl_pvfmkwupnenan_mvml, 125344}, // __builtin_ve_vl_pvfmkwupnenan_mvml
      {Intrinsic::ve_vl_pvfmkwupnum_mvl, 125379}, // __builtin_ve_vl_pvfmkwupnum_mvl
      {Intrinsic::ve_vl_pvfmkwupnum_mvml, 125411}, // __builtin_ve_vl_pvfmkwupnum_mvml
      {Intrinsic::ve_vl_pvfmsb_vsvvMvl, 125444}, // __builtin_ve_vl_pvfmsb_vsvvMvl
      {Intrinsic::ve_vl_pvfmsb_vsvvl, 125475}, // __builtin_ve_vl_pvfmsb_vsvvl
      {Intrinsic::ve_vl_pvfmsb_vsvvvl, 125504}, // __builtin_ve_vl_pvfmsb_vsvvvl
      {Intrinsic::ve_vl_pvfmsb_vvsvMvl, 125534}, // __builtin_ve_vl_pvfmsb_vvsvMvl
      {Intrinsic::ve_vl_pvfmsb_vvsvl, 125565}, // __builtin_ve_vl_pvfmsb_vvsvl
      {Intrinsic::ve_vl_pvfmsb_vvsvvl, 125594}, // __builtin_ve_vl_pvfmsb_vvsvvl
      {Intrinsic::ve_vl_pvfmsb_vvvvMvl, 125624}, // __builtin_ve_vl_pvfmsb_vvvvMvl
      {Intrinsic::ve_vl_pvfmsb_vvvvl, 125655}, // __builtin_ve_vl_pvfmsb_vvvvl
      {Intrinsic::ve_vl_pvfmsb_vvvvvl, 125684}, // __builtin_ve_vl_pvfmsb_vvvvvl
      {Intrinsic::ve_vl_pvfmul_vsvMvl, 125714}, // __builtin_ve_vl_pvfmul_vsvMvl
      {Intrinsic::ve_vl_pvfmul_vsvl, 125744}, // __builtin_ve_vl_pvfmul_vsvl
      {Intrinsic::ve_vl_pvfmul_vsvvl, 125772}, // __builtin_ve_vl_pvfmul_vsvvl
      {Intrinsic::ve_vl_pvfmul_vvvMvl, 125801}, // __builtin_ve_vl_pvfmul_vvvMvl
      {Intrinsic::ve_vl_pvfmul_vvvl, 125831}, // __builtin_ve_vl_pvfmul_vvvl
      {Intrinsic::ve_vl_pvfmul_vvvvl, 125859}, // __builtin_ve_vl_pvfmul_vvvvl
      {Intrinsic::ve_vl_pvfnmad_vsvvMvl, 125888}, // __builtin_ve_vl_pvfnmad_vsvvMvl
      {Intrinsic::ve_vl_pvfnmad_vsvvl, 125920}, // __builtin_ve_vl_pvfnmad_vsvvl
      {Intrinsic::ve_vl_pvfnmad_vsvvvl, 125950}, // __builtin_ve_vl_pvfnmad_vsvvvl
      {Intrinsic::ve_vl_pvfnmad_vvsvMvl, 125981}, // __builtin_ve_vl_pvfnmad_vvsvMvl
      {Intrinsic::ve_vl_pvfnmad_vvsvl, 126013}, // __builtin_ve_vl_pvfnmad_vvsvl
      {Intrinsic::ve_vl_pvfnmad_vvsvvl, 126043}, // __builtin_ve_vl_pvfnmad_vvsvvl
      {Intrinsic::ve_vl_pvfnmad_vvvvMvl, 126074}, // __builtin_ve_vl_pvfnmad_vvvvMvl
      {Intrinsic::ve_vl_pvfnmad_vvvvl, 126106}, // __builtin_ve_vl_pvfnmad_vvvvl
      {Intrinsic::ve_vl_pvfnmad_vvvvvl, 126136}, // __builtin_ve_vl_pvfnmad_vvvvvl
      {Intrinsic::ve_vl_pvfnmsb_vsvvMvl, 126167}, // __builtin_ve_vl_pvfnmsb_vsvvMvl
      {Intrinsic::ve_vl_pvfnmsb_vsvvl, 126199}, // __builtin_ve_vl_pvfnmsb_vsvvl
      {Intrinsic::ve_vl_pvfnmsb_vsvvvl, 126229}, // __builtin_ve_vl_pvfnmsb_vsvvvl
      {Intrinsic::ve_vl_pvfnmsb_vvsvMvl, 126260}, // __builtin_ve_vl_pvfnmsb_vvsvMvl
      {Intrinsic::ve_vl_pvfnmsb_vvsvl, 126292}, // __builtin_ve_vl_pvfnmsb_vvsvl
      {Intrinsic::ve_vl_pvfnmsb_vvsvvl, 126322}, // __builtin_ve_vl_pvfnmsb_vvsvvl
      {Intrinsic::ve_vl_pvfnmsb_vvvvMvl, 126353}, // __builtin_ve_vl_pvfnmsb_vvvvMvl
      {Intrinsic::ve_vl_pvfnmsb_vvvvl, 126385}, // __builtin_ve_vl_pvfnmsb_vvvvl
      {Intrinsic::ve_vl_pvfnmsb_vvvvvl, 126415}, // __builtin_ve_vl_pvfnmsb_vvvvvl
      {Intrinsic::ve_vl_pvfsub_vsvMvl, 126446}, // __builtin_ve_vl_pvfsub_vsvMvl
      {Intrinsic::ve_vl_pvfsub_vsvl, 126476}, // __builtin_ve_vl_pvfsub_vsvl
      {Intrinsic::ve_vl_pvfsub_vsvvl, 126504}, // __builtin_ve_vl_pvfsub_vsvvl
      {Intrinsic::ve_vl_pvfsub_vvvMvl, 126533}, // __builtin_ve_vl_pvfsub_vvvMvl
      {Intrinsic::ve_vl_pvfsub_vvvl, 126563}, // __builtin_ve_vl_pvfsub_vvvl
      {Intrinsic::ve_vl_pvfsub_vvvvl, 126591}, // __builtin_ve_vl_pvfsub_vvvvl
      {Intrinsic::ve_vl_pvldz_vvMvl, 126620}, // __builtin_ve_vl_pvldz_vvMvl
      {Intrinsic::ve_vl_pvldz_vvl, 126648}, // __builtin_ve_vl_pvldz_vvl
      {Intrinsic::ve_vl_pvldz_vvvl, 126674}, // __builtin_ve_vl_pvldz_vvvl
      {Intrinsic::ve_vl_pvldzlo_vvl, 126701}, // __builtin_ve_vl_pvldzlo_vvl
      {Intrinsic::ve_vl_pvldzlo_vvmvl, 126729}, // __builtin_ve_vl_pvldzlo_vvmvl
      {Intrinsic::ve_vl_pvldzlo_vvvl, 126759}, // __builtin_ve_vl_pvldzlo_vvvl
      {Intrinsic::ve_vl_pvldzup_vvl, 126788}, // __builtin_ve_vl_pvldzup_vvl
      {Intrinsic::ve_vl_pvldzup_vvmvl, 126816}, // __builtin_ve_vl_pvldzup_vvmvl
      {Intrinsic::ve_vl_pvldzup_vvvl, 126846}, // __builtin_ve_vl_pvldzup_vvvl
      {Intrinsic::ve_vl_pvmaxs_vsvMvl, 126875}, // __builtin_ve_vl_pvmaxs_vsvMvl
      {Intrinsic::ve_vl_pvmaxs_vsvl, 126905}, // __builtin_ve_vl_pvmaxs_vsvl
      {Intrinsic::ve_vl_pvmaxs_vsvvl, 126933}, // __builtin_ve_vl_pvmaxs_vsvvl
      {Intrinsic::ve_vl_pvmaxs_vvvMvl, 126962}, // __builtin_ve_vl_pvmaxs_vvvMvl
      {Intrinsic::ve_vl_pvmaxs_vvvl, 126992}, // __builtin_ve_vl_pvmaxs_vvvl
      {Intrinsic::ve_vl_pvmaxs_vvvvl, 127020}, // __builtin_ve_vl_pvmaxs_vvvvl
      {Intrinsic::ve_vl_pvmins_vsvMvl, 127049}, // __builtin_ve_vl_pvmins_vsvMvl
      {Intrinsic::ve_vl_pvmins_vsvl, 127079}, // __builtin_ve_vl_pvmins_vsvl
      {Intrinsic::ve_vl_pvmins_vsvvl, 127107}, // __builtin_ve_vl_pvmins_vsvvl
      {Intrinsic::ve_vl_pvmins_vvvMvl, 127136}, // __builtin_ve_vl_pvmins_vvvMvl
      {Intrinsic::ve_vl_pvmins_vvvl, 127166}, // __builtin_ve_vl_pvmins_vvvl
      {Intrinsic::ve_vl_pvmins_vvvvl, 127194}, // __builtin_ve_vl_pvmins_vvvvl
      {Intrinsic::ve_vl_pvor_vsvMvl, 127223}, // __builtin_ve_vl_pvor_vsvMvl
      {Intrinsic::ve_vl_pvor_vsvl, 127251}, // __builtin_ve_vl_pvor_vsvl
      {Intrinsic::ve_vl_pvor_vsvvl, 127277}, // __builtin_ve_vl_pvor_vsvvl
      {Intrinsic::ve_vl_pvor_vvvMvl, 127304}, // __builtin_ve_vl_pvor_vvvMvl
      {Intrinsic::ve_vl_pvor_vvvl, 127332}, // __builtin_ve_vl_pvor_vvvl
      {Intrinsic::ve_vl_pvor_vvvvl, 127358}, // __builtin_ve_vl_pvor_vvvvl
      {Intrinsic::ve_vl_pvpcnt_vvMvl, 127385}, // __builtin_ve_vl_pvpcnt_vvMvl
      {Intrinsic::ve_vl_pvpcnt_vvl, 127414}, // __builtin_ve_vl_pvpcnt_vvl
      {Intrinsic::ve_vl_pvpcnt_vvvl, 127441}, // __builtin_ve_vl_pvpcnt_vvvl
      {Intrinsic::ve_vl_pvpcntlo_vvl, 127469}, // __builtin_ve_vl_pvpcntlo_vvl
      {Intrinsic::ve_vl_pvpcntlo_vvmvl, 127498}, // __builtin_ve_vl_pvpcntlo_vvmvl
      {Intrinsic::ve_vl_pvpcntlo_vvvl, 127529}, // __builtin_ve_vl_pvpcntlo_vvvl
      {Intrinsic::ve_vl_pvpcntup_vvl, 127559}, // __builtin_ve_vl_pvpcntup_vvl
      {Intrinsic::ve_vl_pvpcntup_vvmvl, 127588}, // __builtin_ve_vl_pvpcntup_vvmvl
      {Intrinsic::ve_vl_pvpcntup_vvvl, 127619}, // __builtin_ve_vl_pvpcntup_vvvl
      {Intrinsic::ve_vl_pvrcp_vvl, 127649}, // __builtin_ve_vl_pvrcp_vvl
      {Intrinsic::ve_vl_pvrcp_vvvl, 127675}, // __builtin_ve_vl_pvrcp_vvvl
      {Intrinsic::ve_vl_pvrsqrt_vvl, 127702}, // __builtin_ve_vl_pvrsqrt_vvl
      {Intrinsic::ve_vl_pvrsqrt_vvvl, 127730}, // __builtin_ve_vl_pvrsqrt_vvvl
      {Intrinsic::ve_vl_pvrsqrtnex_vvl, 127759}, // __builtin_ve_vl_pvrsqrtnex_vvl
      {Intrinsic::ve_vl_pvrsqrtnex_vvvl, 127790}, // __builtin_ve_vl_pvrsqrtnex_vvvl
      {Intrinsic::ve_vl_pvseq_vl, 127822}, // __builtin_ve_vl_pvseq_vl
      {Intrinsic::ve_vl_pvseq_vvl, 127847}, // __builtin_ve_vl_pvseq_vvl
      {Intrinsic::ve_vl_pvseqlo_vl, 127873}, // __builtin_ve_vl_pvseqlo_vl
      {Intrinsic::ve_vl_pvseqlo_vvl, 127900}, // __builtin_ve_vl_pvseqlo_vvl
      {Intrinsic::ve_vl_pvsequp_vl, 127928}, // __builtin_ve_vl_pvsequp_vl
      {Intrinsic::ve_vl_pvsequp_vvl, 127955}, // __builtin_ve_vl_pvsequp_vvl
      {Intrinsic::ve_vl_pvsla_vvsMvl, 127983}, // __builtin_ve_vl_pvsla_vvsMvl
      {Intrinsic::ve_vl_pvsla_vvsl, 128012}, // __builtin_ve_vl_pvsla_vvsl
      {Intrinsic::ve_vl_pvsla_vvsvl, 128039}, // __builtin_ve_vl_pvsla_vvsvl
      {Intrinsic::ve_vl_pvsla_vvvMvl, 128067}, // __builtin_ve_vl_pvsla_vvvMvl
      {Intrinsic::ve_vl_pvsla_vvvl, 128096}, // __builtin_ve_vl_pvsla_vvvl
      {Intrinsic::ve_vl_pvsla_vvvvl, 128123}, // __builtin_ve_vl_pvsla_vvvvl
      {Intrinsic::ve_vl_pvsll_vvsMvl, 128151}, // __builtin_ve_vl_pvsll_vvsMvl
      {Intrinsic::ve_vl_pvsll_vvsl, 128180}, // __builtin_ve_vl_pvsll_vvsl
      {Intrinsic::ve_vl_pvsll_vvsvl, 128207}, // __builtin_ve_vl_pvsll_vvsvl
      {Intrinsic::ve_vl_pvsll_vvvMvl, 128235}, // __builtin_ve_vl_pvsll_vvvMvl
      {Intrinsic::ve_vl_pvsll_vvvl, 128264}, // __builtin_ve_vl_pvsll_vvvl
      {Intrinsic::ve_vl_pvsll_vvvvl, 128291}, // __builtin_ve_vl_pvsll_vvvvl
      {Intrinsic::ve_vl_pvsra_vvsMvl, 128319}, // __builtin_ve_vl_pvsra_vvsMvl
      {Intrinsic::ve_vl_pvsra_vvsl, 128348}, // __builtin_ve_vl_pvsra_vvsl
      {Intrinsic::ve_vl_pvsra_vvsvl, 128375}, // __builtin_ve_vl_pvsra_vvsvl
      {Intrinsic::ve_vl_pvsra_vvvMvl, 128403}, // __builtin_ve_vl_pvsra_vvvMvl
      {Intrinsic::ve_vl_pvsra_vvvl, 128432}, // __builtin_ve_vl_pvsra_vvvl
      {Intrinsic::ve_vl_pvsra_vvvvl, 128459}, // __builtin_ve_vl_pvsra_vvvvl
      {Intrinsic::ve_vl_pvsrl_vvsMvl, 128487}, // __builtin_ve_vl_pvsrl_vvsMvl
      {Intrinsic::ve_vl_pvsrl_vvsl, 128516}, // __builtin_ve_vl_pvsrl_vvsl
      {Intrinsic::ve_vl_pvsrl_vvsvl, 128543}, // __builtin_ve_vl_pvsrl_vvsvl
      {Intrinsic::ve_vl_pvsrl_vvvMvl, 128571}, // __builtin_ve_vl_pvsrl_vvvMvl
      {Intrinsic::ve_vl_pvsrl_vvvl, 128600}, // __builtin_ve_vl_pvsrl_vvvl
      {Intrinsic::ve_vl_pvsrl_vvvvl, 128627}, // __builtin_ve_vl_pvsrl_vvvvl
      {Intrinsic::ve_vl_pvsubs_vsvMvl, 128655}, // __builtin_ve_vl_pvsubs_vsvMvl
      {Intrinsic::ve_vl_pvsubs_vsvl, 128685}, // __builtin_ve_vl_pvsubs_vsvl
      {Intrinsic::ve_vl_pvsubs_vsvvl, 128713}, // __builtin_ve_vl_pvsubs_vsvvl
      {Intrinsic::ve_vl_pvsubs_vvvMvl, 128742}, // __builtin_ve_vl_pvsubs_vvvMvl
      {Intrinsic::ve_vl_pvsubs_vvvl, 128772}, // __builtin_ve_vl_pvsubs_vvvl
      {Intrinsic::ve_vl_pvsubs_vvvvl, 128800}, // __builtin_ve_vl_pvsubs_vvvvl
      {Intrinsic::ve_vl_pvsubu_vsvMvl, 128829}, // __builtin_ve_vl_pvsubu_vsvMvl
      {Intrinsic::ve_vl_pvsubu_vsvl, 128859}, // __builtin_ve_vl_pvsubu_vsvl
      {Intrinsic::ve_vl_pvsubu_vsvvl, 128887}, // __builtin_ve_vl_pvsubu_vsvvl
      {Intrinsic::ve_vl_pvsubu_vvvMvl, 128916}, // __builtin_ve_vl_pvsubu_vvvMvl
      {Intrinsic::ve_vl_pvsubu_vvvl, 128946}, // __builtin_ve_vl_pvsubu_vvvl
      {Intrinsic::ve_vl_pvsubu_vvvvl, 128974}, // __builtin_ve_vl_pvsubu_vvvvl
      {Intrinsic::ve_vl_pvxor_vsvMvl, 129003}, // __builtin_ve_vl_pvxor_vsvMvl
      {Intrinsic::ve_vl_pvxor_vsvl, 129032}, // __builtin_ve_vl_pvxor_vsvl
      {Intrinsic::ve_vl_pvxor_vsvvl, 129059}, // __builtin_ve_vl_pvxor_vsvvl
      {Intrinsic::ve_vl_pvxor_vvvMvl, 129087}, // __builtin_ve_vl_pvxor_vvvMvl
      {Intrinsic::ve_vl_pvxor_vvvl, 129116}, // __builtin_ve_vl_pvxor_vvvl
      {Intrinsic::ve_vl_pvxor_vvvvl, 129143}, // __builtin_ve_vl_pvxor_vvvvl
      {Intrinsic::ve_vl_scr_sss, 129171}, // __builtin_ve_vl_scr_sss
      {Intrinsic::ve_vl_svm_sMs, 129195}, // __builtin_ve_vl_svm_sMs
      {Intrinsic::ve_vl_svm_sms, 129219}, // __builtin_ve_vl_svm_sms
      {Intrinsic::ve_vl_svob, 129243}, // __builtin_ve_vl_svob
      {Intrinsic::ve_vl_tovm_sml, 129264}, // __builtin_ve_vl_tovm_sml
      {Intrinsic::ve_vl_tscr_ssss, 129289}, // __builtin_ve_vl_tscr_ssss
      {Intrinsic::ve_vl_vaddsl_vsvl, 129315}, // __builtin_ve_vl_vaddsl_vsvl
      {Intrinsic::ve_vl_vaddsl_vsvmvl, 129343}, // __builtin_ve_vl_vaddsl_vsvmvl
      {Intrinsic::ve_vl_vaddsl_vsvvl, 129373}, // __builtin_ve_vl_vaddsl_vsvvl
      {Intrinsic::ve_vl_vaddsl_vvvl, 129402}, // __builtin_ve_vl_vaddsl_vvvl
      {Intrinsic::ve_vl_vaddsl_vvvmvl, 129430}, // __builtin_ve_vl_vaddsl_vvvmvl
      {Intrinsic::ve_vl_vaddsl_vvvvl, 129460}, // __builtin_ve_vl_vaddsl_vvvvl
      {Intrinsic::ve_vl_vaddswsx_vsvl, 129489}, // __builtin_ve_vl_vaddswsx_vsvl
      {Intrinsic::ve_vl_vaddswsx_vsvmvl, 129519}, // __builtin_ve_vl_vaddswsx_vsvmvl
      {Intrinsic::ve_vl_vaddswsx_vsvvl, 129551}, // __builtin_ve_vl_vaddswsx_vsvvl
      {Intrinsic::ve_vl_vaddswsx_vvvl, 129582}, // __builtin_ve_vl_vaddswsx_vvvl
      {Intrinsic::ve_vl_vaddswsx_vvvmvl, 129612}, // __builtin_ve_vl_vaddswsx_vvvmvl
      {Intrinsic::ve_vl_vaddswsx_vvvvl, 129644}, // __builtin_ve_vl_vaddswsx_vvvvl
      {Intrinsic::ve_vl_vaddswzx_vsvl, 129675}, // __builtin_ve_vl_vaddswzx_vsvl
      {Intrinsic::ve_vl_vaddswzx_vsvmvl, 129705}, // __builtin_ve_vl_vaddswzx_vsvmvl
      {Intrinsic::ve_vl_vaddswzx_vsvvl, 129737}, // __builtin_ve_vl_vaddswzx_vsvvl
      {Intrinsic::ve_vl_vaddswzx_vvvl, 129768}, // __builtin_ve_vl_vaddswzx_vvvl
      {Intrinsic::ve_vl_vaddswzx_vvvmvl, 129798}, // __builtin_ve_vl_vaddswzx_vvvmvl
      {Intrinsic::ve_vl_vaddswzx_vvvvl, 129830}, // __builtin_ve_vl_vaddswzx_vvvvl
      {Intrinsic::ve_vl_vaddul_vsvl, 129861}, // __builtin_ve_vl_vaddul_vsvl
      {Intrinsic::ve_vl_vaddul_vsvmvl, 129889}, // __builtin_ve_vl_vaddul_vsvmvl
      {Intrinsic::ve_vl_vaddul_vsvvl, 129919}, // __builtin_ve_vl_vaddul_vsvvl
      {Intrinsic::ve_vl_vaddul_vvvl, 129948}, // __builtin_ve_vl_vaddul_vvvl
      {Intrinsic::ve_vl_vaddul_vvvmvl, 129976}, // __builtin_ve_vl_vaddul_vvvmvl
      {Intrinsic::ve_vl_vaddul_vvvvl, 130006}, // __builtin_ve_vl_vaddul_vvvvl
      {Intrinsic::ve_vl_vadduw_vsvl, 130035}, // __builtin_ve_vl_vadduw_vsvl
      {Intrinsic::ve_vl_vadduw_vsvmvl, 130063}, // __builtin_ve_vl_vadduw_vsvmvl
      {Intrinsic::ve_vl_vadduw_vsvvl, 130093}, // __builtin_ve_vl_vadduw_vsvvl
      {Intrinsic::ve_vl_vadduw_vvvl, 130122}, // __builtin_ve_vl_vadduw_vvvl
      {Intrinsic::ve_vl_vadduw_vvvmvl, 130150}, // __builtin_ve_vl_vadduw_vvvmvl
      {Intrinsic::ve_vl_vadduw_vvvvl, 130180}, // __builtin_ve_vl_vadduw_vvvvl
      {Intrinsic::ve_vl_vand_vsvl, 130209}, // __builtin_ve_vl_vand_vsvl
      {Intrinsic::ve_vl_vand_vsvmvl, 130235}, // __builtin_ve_vl_vand_vsvmvl
      {Intrinsic::ve_vl_vand_vsvvl, 130263}, // __builtin_ve_vl_vand_vsvvl
      {Intrinsic::ve_vl_vand_vvvl, 130290}, // __builtin_ve_vl_vand_vvvl
      {Intrinsic::ve_vl_vand_vvvmvl, 130316}, // __builtin_ve_vl_vand_vvvmvl
      {Intrinsic::ve_vl_vand_vvvvl, 130344}, // __builtin_ve_vl_vand_vvvvl
      {Intrinsic::ve_vl_vbrdd_vsl, 130371}, // __builtin_ve_vl_vbrdd_vsl
      {Intrinsic::ve_vl_vbrdd_vsmvl, 130397}, // __builtin_ve_vl_vbrdd_vsmvl
      {Intrinsic::ve_vl_vbrdd_vsvl, 130425}, // __builtin_ve_vl_vbrdd_vsvl
      {Intrinsic::ve_vl_vbrdl_vsl, 130452}, // __builtin_ve_vl_vbrdl_vsl
      {Intrinsic::ve_vl_vbrdl_vsmvl, 130478}, // __builtin_ve_vl_vbrdl_vsmvl
      {Intrinsic::ve_vl_vbrdl_vsvl, 130506}, // __builtin_ve_vl_vbrdl_vsvl
      {Intrinsic::ve_vl_vbrds_vsl, 130533}, // __builtin_ve_vl_vbrds_vsl
      {Intrinsic::ve_vl_vbrds_vsmvl, 130559}, // __builtin_ve_vl_vbrds_vsmvl
      {Intrinsic::ve_vl_vbrds_vsvl, 130587}, // __builtin_ve_vl_vbrds_vsvl
      {Intrinsic::ve_vl_vbrdw_vsl, 130614}, // __builtin_ve_vl_vbrdw_vsl
      {Intrinsic::ve_vl_vbrdw_vsmvl, 130640}, // __builtin_ve_vl_vbrdw_vsmvl
      {Intrinsic::ve_vl_vbrdw_vsvl, 130668}, // __builtin_ve_vl_vbrdw_vsvl
      {Intrinsic::ve_vl_vbrv_vvl, 130695}, // __builtin_ve_vl_vbrv_vvl
      {Intrinsic::ve_vl_vbrv_vvmvl, 130720}, // __builtin_ve_vl_vbrv_vvmvl
      {Intrinsic::ve_vl_vbrv_vvvl, 130747}, // __builtin_ve_vl_vbrv_vvvl
      {Intrinsic::ve_vl_vcmpsl_vsvl, 130773}, // __builtin_ve_vl_vcmpsl_vsvl
      {Intrinsic::ve_vl_vcmpsl_vsvmvl, 130801}, // __builtin_ve_vl_vcmpsl_vsvmvl
      {Intrinsic::ve_vl_vcmpsl_vsvvl, 130831}, // __builtin_ve_vl_vcmpsl_vsvvl
      {Intrinsic::ve_vl_vcmpsl_vvvl, 130860}, // __builtin_ve_vl_vcmpsl_vvvl
      {Intrinsic::ve_vl_vcmpsl_vvvmvl, 130888}, // __builtin_ve_vl_vcmpsl_vvvmvl
      {Intrinsic::ve_vl_vcmpsl_vvvvl, 130918}, // __builtin_ve_vl_vcmpsl_vvvvl
      {Intrinsic::ve_vl_vcmpswsx_vsvl, 130947}, // __builtin_ve_vl_vcmpswsx_vsvl
      {Intrinsic::ve_vl_vcmpswsx_vsvmvl, 130977}, // __builtin_ve_vl_vcmpswsx_vsvmvl
      {Intrinsic::ve_vl_vcmpswsx_vsvvl, 131009}, // __builtin_ve_vl_vcmpswsx_vsvvl
      {Intrinsic::ve_vl_vcmpswsx_vvvl, 131040}, // __builtin_ve_vl_vcmpswsx_vvvl
      {Intrinsic::ve_vl_vcmpswsx_vvvmvl, 131070}, // __builtin_ve_vl_vcmpswsx_vvvmvl
      {Intrinsic::ve_vl_vcmpswsx_vvvvl, 131102}, // __builtin_ve_vl_vcmpswsx_vvvvl
      {Intrinsic::ve_vl_vcmpswzx_vsvl, 131133}, // __builtin_ve_vl_vcmpswzx_vsvl
      {Intrinsic::ve_vl_vcmpswzx_vsvmvl, 131163}, // __builtin_ve_vl_vcmpswzx_vsvmvl
      {Intrinsic::ve_vl_vcmpswzx_vsvvl, 131195}, // __builtin_ve_vl_vcmpswzx_vsvvl
      {Intrinsic::ve_vl_vcmpswzx_vvvl, 131226}, // __builtin_ve_vl_vcmpswzx_vvvl
      {Intrinsic::ve_vl_vcmpswzx_vvvmvl, 131256}, // __builtin_ve_vl_vcmpswzx_vvvmvl
      {Intrinsic::ve_vl_vcmpswzx_vvvvl, 131288}, // __builtin_ve_vl_vcmpswzx_vvvvl
      {Intrinsic::ve_vl_vcmpul_vsvl, 131319}, // __builtin_ve_vl_vcmpul_vsvl
      {Intrinsic::ve_vl_vcmpul_vsvmvl, 131347}, // __builtin_ve_vl_vcmpul_vsvmvl
      {Intrinsic::ve_vl_vcmpul_vsvvl, 131377}, // __builtin_ve_vl_vcmpul_vsvvl
      {Intrinsic::ve_vl_vcmpul_vvvl, 131406}, // __builtin_ve_vl_vcmpul_vvvl
      {Intrinsic::ve_vl_vcmpul_vvvmvl, 131434}, // __builtin_ve_vl_vcmpul_vvvmvl
      {Intrinsic::ve_vl_vcmpul_vvvvl, 131464}, // __builtin_ve_vl_vcmpul_vvvvl
      {Intrinsic::ve_vl_vcmpuw_vsvl, 131493}, // __builtin_ve_vl_vcmpuw_vsvl
      {Intrinsic::ve_vl_vcmpuw_vsvmvl, 131521}, // __builtin_ve_vl_vcmpuw_vsvmvl
      {Intrinsic::ve_vl_vcmpuw_vsvvl, 131551}, // __builtin_ve_vl_vcmpuw_vsvvl
      {Intrinsic::ve_vl_vcmpuw_vvvl, 131580}, // __builtin_ve_vl_vcmpuw_vvvl
      {Intrinsic::ve_vl_vcmpuw_vvvmvl, 131608}, // __builtin_ve_vl_vcmpuw_vvvmvl
      {Intrinsic::ve_vl_vcmpuw_vvvvl, 131638}, // __builtin_ve_vl_vcmpuw_vvvvl
      {Intrinsic::ve_vl_vcp_vvmvl, 131667}, // __builtin_ve_vl_vcp_vvmvl
      {Intrinsic::ve_vl_vcvtdl_vvl, 131693}, // __builtin_ve_vl_vcvtdl_vvl
      {Intrinsic::ve_vl_vcvtdl_vvvl, 131720}, // __builtin_ve_vl_vcvtdl_vvvl
      {Intrinsic::ve_vl_vcvtds_vvl, 131748}, // __builtin_ve_vl_vcvtds_vvl
      {Intrinsic::ve_vl_vcvtds_vvvl, 131775}, // __builtin_ve_vl_vcvtds_vvvl
      {Intrinsic::ve_vl_vcvtdw_vvl, 131803}, // __builtin_ve_vl_vcvtdw_vvl
      {Intrinsic::ve_vl_vcvtdw_vvvl, 131830}, // __builtin_ve_vl_vcvtdw_vvvl
      {Intrinsic::ve_vl_vcvtld_vvl, 131858}, // __builtin_ve_vl_vcvtld_vvl
      {Intrinsic::ve_vl_vcvtld_vvmvl, 131885}, // __builtin_ve_vl_vcvtld_vvmvl
      {Intrinsic::ve_vl_vcvtld_vvvl, 131914}, // __builtin_ve_vl_vcvtld_vvvl
      {Intrinsic::ve_vl_vcvtldrz_vvl, 131942}, // __builtin_ve_vl_vcvtldrz_vvl
      {Intrinsic::ve_vl_vcvtldrz_vvmvl, 131971}, // __builtin_ve_vl_vcvtldrz_vvmvl
      {Intrinsic::ve_vl_vcvtldrz_vvvl, 132002}, // __builtin_ve_vl_vcvtldrz_vvvl
      {Intrinsic::ve_vl_vcvtsd_vvl, 132032}, // __builtin_ve_vl_vcvtsd_vvl
      {Intrinsic::ve_vl_vcvtsd_vvvl, 132059}, // __builtin_ve_vl_vcvtsd_vvvl
      {Intrinsic::ve_vl_vcvtsw_vvl, 132087}, // __builtin_ve_vl_vcvtsw_vvl
      {Intrinsic::ve_vl_vcvtsw_vvvl, 132114}, // __builtin_ve_vl_vcvtsw_vvvl
      {Intrinsic::ve_vl_vcvtwdsx_vvl, 132142}, // __builtin_ve_vl_vcvtwdsx_vvl
      {Intrinsic::ve_vl_vcvtwdsx_vvmvl, 132171}, // __builtin_ve_vl_vcvtwdsx_vvmvl
      {Intrinsic::ve_vl_vcvtwdsx_vvvl, 132202}, // __builtin_ve_vl_vcvtwdsx_vvvl
      {Intrinsic::ve_vl_vcvtwdsxrz_vvl, 132232}, // __builtin_ve_vl_vcvtwdsxrz_vvl
      {Intrinsic::ve_vl_vcvtwdsxrz_vvmvl, 132263}, // __builtin_ve_vl_vcvtwdsxrz_vvmvl
      {Intrinsic::ve_vl_vcvtwdsxrz_vvvl, 132296}, // __builtin_ve_vl_vcvtwdsxrz_vvvl
      {Intrinsic::ve_vl_vcvtwdzx_vvl, 132328}, // __builtin_ve_vl_vcvtwdzx_vvl
      {Intrinsic::ve_vl_vcvtwdzx_vvmvl, 132357}, // __builtin_ve_vl_vcvtwdzx_vvmvl
      {Intrinsic::ve_vl_vcvtwdzx_vvvl, 132388}, // __builtin_ve_vl_vcvtwdzx_vvvl
      {Intrinsic::ve_vl_vcvtwdzxrz_vvl, 132418}, // __builtin_ve_vl_vcvtwdzxrz_vvl
      {Intrinsic::ve_vl_vcvtwdzxrz_vvmvl, 132449}, // __builtin_ve_vl_vcvtwdzxrz_vvmvl
      {Intrinsic::ve_vl_vcvtwdzxrz_vvvl, 132482}, // __builtin_ve_vl_vcvtwdzxrz_vvvl
      {Intrinsic::ve_vl_vcvtwssx_vvl, 132514}, // __builtin_ve_vl_vcvtwssx_vvl
      {Intrinsic::ve_vl_vcvtwssx_vvmvl, 132543}, // __builtin_ve_vl_vcvtwssx_vvmvl
      {Intrinsic::ve_vl_vcvtwssx_vvvl, 132574}, // __builtin_ve_vl_vcvtwssx_vvvl
      {Intrinsic::ve_vl_vcvtwssxrz_vvl, 132604}, // __builtin_ve_vl_vcvtwssxrz_vvl
      {Intrinsic::ve_vl_vcvtwssxrz_vvmvl, 132635}, // __builtin_ve_vl_vcvtwssxrz_vvmvl
      {Intrinsic::ve_vl_vcvtwssxrz_vvvl, 132668}, // __builtin_ve_vl_vcvtwssxrz_vvvl
      {Intrinsic::ve_vl_vcvtwszx_vvl, 132700}, // __builtin_ve_vl_vcvtwszx_vvl
      {Intrinsic::ve_vl_vcvtwszx_vvmvl, 132729}, // __builtin_ve_vl_vcvtwszx_vvmvl
      {Intrinsic::ve_vl_vcvtwszx_vvvl, 132760}, // __builtin_ve_vl_vcvtwszx_vvvl
      {Intrinsic::ve_vl_vcvtwszxrz_vvl, 132790}, // __builtin_ve_vl_vcvtwszxrz_vvl
      {Intrinsic::ve_vl_vcvtwszxrz_vvmvl, 132821}, // __builtin_ve_vl_vcvtwszxrz_vvmvl
      {Intrinsic::ve_vl_vcvtwszxrz_vvvl, 132854}, // __builtin_ve_vl_vcvtwszxrz_vvvl
      {Intrinsic::ve_vl_vdivsl_vsvl, 132886}, // __builtin_ve_vl_vdivsl_vsvl
      {Intrinsic::ve_vl_vdivsl_vsvmvl, 132914}, // __builtin_ve_vl_vdivsl_vsvmvl
      {Intrinsic::ve_vl_vdivsl_vsvvl, 132944}, // __builtin_ve_vl_vdivsl_vsvvl
      {Intrinsic::ve_vl_vdivsl_vvsl, 132973}, // __builtin_ve_vl_vdivsl_vvsl
      {Intrinsic::ve_vl_vdivsl_vvsmvl, 133001}, // __builtin_ve_vl_vdivsl_vvsmvl
      {Intrinsic::ve_vl_vdivsl_vvsvl, 133031}, // __builtin_ve_vl_vdivsl_vvsvl
      {Intrinsic::ve_vl_vdivsl_vvvl, 133060}, // __builtin_ve_vl_vdivsl_vvvl
      {Intrinsic::ve_vl_vdivsl_vvvmvl, 133088}, // __builtin_ve_vl_vdivsl_vvvmvl
      {Intrinsic::ve_vl_vdivsl_vvvvl, 133118}, // __builtin_ve_vl_vdivsl_vvvvl
      {Intrinsic::ve_vl_vdivswsx_vsvl, 133147}, // __builtin_ve_vl_vdivswsx_vsvl
      {Intrinsic::ve_vl_vdivswsx_vsvmvl, 133177}, // __builtin_ve_vl_vdivswsx_vsvmvl
      {Intrinsic::ve_vl_vdivswsx_vsvvl, 133209}, // __builtin_ve_vl_vdivswsx_vsvvl
      {Intrinsic::ve_vl_vdivswsx_vvsl, 133240}, // __builtin_ve_vl_vdivswsx_vvsl
      {Intrinsic::ve_vl_vdivswsx_vvsmvl, 133270}, // __builtin_ve_vl_vdivswsx_vvsmvl
      {Intrinsic::ve_vl_vdivswsx_vvsvl, 133302}, // __builtin_ve_vl_vdivswsx_vvsvl
      {Intrinsic::ve_vl_vdivswsx_vvvl, 133333}, // __builtin_ve_vl_vdivswsx_vvvl
      {Intrinsic::ve_vl_vdivswsx_vvvmvl, 133363}, // __builtin_ve_vl_vdivswsx_vvvmvl
      {Intrinsic::ve_vl_vdivswsx_vvvvl, 133395}, // __builtin_ve_vl_vdivswsx_vvvvl
      {Intrinsic::ve_vl_vdivswzx_vsvl, 133426}, // __builtin_ve_vl_vdivswzx_vsvl
      {Intrinsic::ve_vl_vdivswzx_vsvmvl, 133456}, // __builtin_ve_vl_vdivswzx_vsvmvl
      {Intrinsic::ve_vl_vdivswzx_vsvvl, 133488}, // __builtin_ve_vl_vdivswzx_vsvvl
      {Intrinsic::ve_vl_vdivswzx_vvsl, 133519}, // __builtin_ve_vl_vdivswzx_vvsl
      {Intrinsic::ve_vl_vdivswzx_vvsmvl, 133549}, // __builtin_ve_vl_vdivswzx_vvsmvl
      {Intrinsic::ve_vl_vdivswzx_vvsvl, 133581}, // __builtin_ve_vl_vdivswzx_vvsvl
      {Intrinsic::ve_vl_vdivswzx_vvvl, 133612}, // __builtin_ve_vl_vdivswzx_vvvl
      {Intrinsic::ve_vl_vdivswzx_vvvmvl, 133642}, // __builtin_ve_vl_vdivswzx_vvvmvl
      {Intrinsic::ve_vl_vdivswzx_vvvvl, 133674}, // __builtin_ve_vl_vdivswzx_vvvvl
      {Intrinsic::ve_vl_vdivul_vsvl, 133705}, // __builtin_ve_vl_vdivul_vsvl
      {Intrinsic::ve_vl_vdivul_vsvmvl, 133733}, // __builtin_ve_vl_vdivul_vsvmvl
      {Intrinsic::ve_vl_vdivul_vsvvl, 133763}, // __builtin_ve_vl_vdivul_vsvvl
      {Intrinsic::ve_vl_vdivul_vvsl, 133792}, // __builtin_ve_vl_vdivul_vvsl
      {Intrinsic::ve_vl_vdivul_vvsmvl, 133820}, // __builtin_ve_vl_vdivul_vvsmvl
      {Intrinsic::ve_vl_vdivul_vvsvl, 133850}, // __builtin_ve_vl_vdivul_vvsvl
      {Intrinsic::ve_vl_vdivul_vvvl, 133879}, // __builtin_ve_vl_vdivul_vvvl
      {Intrinsic::ve_vl_vdivul_vvvmvl, 133907}, // __builtin_ve_vl_vdivul_vvvmvl
      {Intrinsic::ve_vl_vdivul_vvvvl, 133937}, // __builtin_ve_vl_vdivul_vvvvl
      {Intrinsic::ve_vl_vdivuw_vsvl, 133966}, // __builtin_ve_vl_vdivuw_vsvl
      {Intrinsic::ve_vl_vdivuw_vsvmvl, 133994}, // __builtin_ve_vl_vdivuw_vsvmvl
      {Intrinsic::ve_vl_vdivuw_vsvvl, 134024}, // __builtin_ve_vl_vdivuw_vsvvl
      {Intrinsic::ve_vl_vdivuw_vvsl, 134053}, // __builtin_ve_vl_vdivuw_vvsl
      {Intrinsic::ve_vl_vdivuw_vvsmvl, 134081}, // __builtin_ve_vl_vdivuw_vvsmvl
      {Intrinsic::ve_vl_vdivuw_vvsvl, 134111}, // __builtin_ve_vl_vdivuw_vvsvl
      {Intrinsic::ve_vl_vdivuw_vvvl, 134140}, // __builtin_ve_vl_vdivuw_vvvl
      {Intrinsic::ve_vl_vdivuw_vvvmvl, 134168}, // __builtin_ve_vl_vdivuw_vvvmvl
      {Intrinsic::ve_vl_vdivuw_vvvvl, 134198}, // __builtin_ve_vl_vdivuw_vvvvl
      {Intrinsic::ve_vl_veqv_vsvl, 134227}, // __builtin_ve_vl_veqv_vsvl
      {Intrinsic::ve_vl_veqv_vsvmvl, 134253}, // __builtin_ve_vl_veqv_vsvmvl
      {Intrinsic::ve_vl_veqv_vsvvl, 134281}, // __builtin_ve_vl_veqv_vsvvl
      {Intrinsic::ve_vl_veqv_vvvl, 134308}, // __builtin_ve_vl_veqv_vvvl
      {Intrinsic::ve_vl_veqv_vvvmvl, 134334}, // __builtin_ve_vl_veqv_vvvmvl
      {Intrinsic::ve_vl_veqv_vvvvl, 134362}, // __builtin_ve_vl_veqv_vvvvl
      {Intrinsic::ve_vl_vex_vvmvl, 134389}, // __builtin_ve_vl_vex_vvmvl
      {Intrinsic::ve_vl_vfaddd_vsvl, 134415}, // __builtin_ve_vl_vfaddd_vsvl
      {Intrinsic::ve_vl_vfaddd_vsvmvl, 134443}, // __builtin_ve_vl_vfaddd_vsvmvl
      {Intrinsic::ve_vl_vfaddd_vsvvl, 134473}, // __builtin_ve_vl_vfaddd_vsvvl
      {Intrinsic::ve_vl_vfaddd_vvvl, 134502}, // __builtin_ve_vl_vfaddd_vvvl
      {Intrinsic::ve_vl_vfaddd_vvvmvl, 134530}, // __builtin_ve_vl_vfaddd_vvvmvl
      {Intrinsic::ve_vl_vfaddd_vvvvl, 134560}, // __builtin_ve_vl_vfaddd_vvvvl
      {Intrinsic::ve_vl_vfadds_vsvl, 134589}, // __builtin_ve_vl_vfadds_vsvl
      {Intrinsic::ve_vl_vfadds_vsvmvl, 134617}, // __builtin_ve_vl_vfadds_vsvmvl
      {Intrinsic::ve_vl_vfadds_vsvvl, 134647}, // __builtin_ve_vl_vfadds_vsvvl
      {Intrinsic::ve_vl_vfadds_vvvl, 134676}, // __builtin_ve_vl_vfadds_vvvl
      {Intrinsic::ve_vl_vfadds_vvvmvl, 134704}, // __builtin_ve_vl_vfadds_vvvmvl
      {Intrinsic::ve_vl_vfadds_vvvvl, 134734}, // __builtin_ve_vl_vfadds_vvvvl
      {Intrinsic::ve_vl_vfcmpd_vsvl, 134763}, // __builtin_ve_vl_vfcmpd_vsvl
      {Intrinsic::ve_vl_vfcmpd_vsvmvl, 134791}, // __builtin_ve_vl_vfcmpd_vsvmvl
      {Intrinsic::ve_vl_vfcmpd_vsvvl, 134821}, // __builtin_ve_vl_vfcmpd_vsvvl
      {Intrinsic::ve_vl_vfcmpd_vvvl, 134850}, // __builtin_ve_vl_vfcmpd_vvvl
      {Intrinsic::ve_vl_vfcmpd_vvvmvl, 134878}, // __builtin_ve_vl_vfcmpd_vvvmvl
      {Intrinsic::ve_vl_vfcmpd_vvvvl, 134908}, // __builtin_ve_vl_vfcmpd_vvvvl
      {Intrinsic::ve_vl_vfcmps_vsvl, 134937}, // __builtin_ve_vl_vfcmps_vsvl
      {Intrinsic::ve_vl_vfcmps_vsvmvl, 134965}, // __builtin_ve_vl_vfcmps_vsvmvl
      {Intrinsic::ve_vl_vfcmps_vsvvl, 134995}, // __builtin_ve_vl_vfcmps_vsvvl
      {Intrinsic::ve_vl_vfcmps_vvvl, 135024}, // __builtin_ve_vl_vfcmps_vvvl
      {Intrinsic::ve_vl_vfcmps_vvvmvl, 135052}, // __builtin_ve_vl_vfcmps_vvvmvl
      {Intrinsic::ve_vl_vfcmps_vvvvl, 135082}, // __builtin_ve_vl_vfcmps_vvvvl
      {Intrinsic::ve_vl_vfdivd_vsvl, 135111}, // __builtin_ve_vl_vfdivd_vsvl
      {Intrinsic::ve_vl_vfdivd_vsvmvl, 135139}, // __builtin_ve_vl_vfdivd_vsvmvl
      {Intrinsic::ve_vl_vfdivd_vsvvl, 135169}, // __builtin_ve_vl_vfdivd_vsvvl
      {Intrinsic::ve_vl_vfdivd_vvvl, 135198}, // __builtin_ve_vl_vfdivd_vvvl
      {Intrinsic::ve_vl_vfdivd_vvvmvl, 135226}, // __builtin_ve_vl_vfdivd_vvvmvl
      {Intrinsic::ve_vl_vfdivd_vvvvl, 135256}, // __builtin_ve_vl_vfdivd_vvvvl
      {Intrinsic::ve_vl_vfdivs_vsvl, 135285}, // __builtin_ve_vl_vfdivs_vsvl
      {Intrinsic::ve_vl_vfdivs_vsvmvl, 135313}, // __builtin_ve_vl_vfdivs_vsvmvl
      {Intrinsic::ve_vl_vfdivs_vsvvl, 135343}, // __builtin_ve_vl_vfdivs_vsvvl
      {Intrinsic::ve_vl_vfdivs_vvvl, 135372}, // __builtin_ve_vl_vfdivs_vvvl
      {Intrinsic::ve_vl_vfdivs_vvvmvl, 135400}, // __builtin_ve_vl_vfdivs_vvvmvl
      {Intrinsic::ve_vl_vfdivs_vvvvl, 135430}, // __builtin_ve_vl_vfdivs_vvvvl
      {Intrinsic::ve_vl_vfmadd_vsvvl, 135459}, // __builtin_ve_vl_vfmadd_vsvvl
      {Intrinsic::ve_vl_vfmadd_vsvvmvl, 135488}, // __builtin_ve_vl_vfmadd_vsvvmvl
      {Intrinsic::ve_vl_vfmadd_vsvvvl, 135519}, // __builtin_ve_vl_vfmadd_vsvvvl
      {Intrinsic::ve_vl_vfmadd_vvsvl, 135549}, // __builtin_ve_vl_vfmadd_vvsvl
      {Intrinsic::ve_vl_vfmadd_vvsvmvl, 135578}, // __builtin_ve_vl_vfmadd_vvsvmvl
      {Intrinsic::ve_vl_vfmadd_vvsvvl, 135609}, // __builtin_ve_vl_vfmadd_vvsvvl
      {Intrinsic::ve_vl_vfmadd_vvvvl, 135639}, // __builtin_ve_vl_vfmadd_vvvvl
      {Intrinsic::ve_vl_vfmadd_vvvvmvl, 135668}, // __builtin_ve_vl_vfmadd_vvvvmvl
      {Intrinsic::ve_vl_vfmadd_vvvvvl, 135699}, // __builtin_ve_vl_vfmadd_vvvvvl
      {Intrinsic::ve_vl_vfmads_vsvvl, 135729}, // __builtin_ve_vl_vfmads_vsvvl
      {Intrinsic::ve_vl_vfmads_vsvvmvl, 135758}, // __builtin_ve_vl_vfmads_vsvvmvl
      {Intrinsic::ve_vl_vfmads_vsvvvl, 135789}, // __builtin_ve_vl_vfmads_vsvvvl
      {Intrinsic::ve_vl_vfmads_vvsvl, 135819}, // __builtin_ve_vl_vfmads_vvsvl
      {Intrinsic::ve_vl_vfmads_vvsvmvl, 135848}, // __builtin_ve_vl_vfmads_vvsvmvl
      {Intrinsic::ve_vl_vfmads_vvsvvl, 135879}, // __builtin_ve_vl_vfmads_vvsvvl
      {Intrinsic::ve_vl_vfmads_vvvvl, 135909}, // __builtin_ve_vl_vfmads_vvvvl
      {Intrinsic::ve_vl_vfmads_vvvvmvl, 135938}, // __builtin_ve_vl_vfmads_vvvvmvl
      {Intrinsic::ve_vl_vfmads_vvvvvl, 135969}, // __builtin_ve_vl_vfmads_vvvvvl
      {Intrinsic::ve_vl_vfmaxd_vsvl, 135999}, // __builtin_ve_vl_vfmaxd_vsvl
      {Intrinsic::ve_vl_vfmaxd_vsvmvl, 136027}, // __builtin_ve_vl_vfmaxd_vsvmvl
      {Intrinsic::ve_vl_vfmaxd_vsvvl, 136057}, // __builtin_ve_vl_vfmaxd_vsvvl
      {Intrinsic::ve_vl_vfmaxd_vvvl, 136086}, // __builtin_ve_vl_vfmaxd_vvvl
      {Intrinsic::ve_vl_vfmaxd_vvvmvl, 136114}, // __builtin_ve_vl_vfmaxd_vvvmvl
      {Intrinsic::ve_vl_vfmaxd_vvvvl, 136144}, // __builtin_ve_vl_vfmaxd_vvvvl
      {Intrinsic::ve_vl_vfmaxs_vsvl, 136173}, // __builtin_ve_vl_vfmaxs_vsvl
      {Intrinsic::ve_vl_vfmaxs_vsvmvl, 136201}, // __builtin_ve_vl_vfmaxs_vsvmvl
      {Intrinsic::ve_vl_vfmaxs_vsvvl, 136231}, // __builtin_ve_vl_vfmaxs_vsvvl
      {Intrinsic::ve_vl_vfmaxs_vvvl, 136260}, // __builtin_ve_vl_vfmaxs_vvvl
      {Intrinsic::ve_vl_vfmaxs_vvvmvl, 136288}, // __builtin_ve_vl_vfmaxs_vvvmvl
      {Intrinsic::ve_vl_vfmaxs_vvvvl, 136318}, // __builtin_ve_vl_vfmaxs_vvvvl
      {Intrinsic::ve_vl_vfmind_vsvl, 136347}, // __builtin_ve_vl_vfmind_vsvl
      {Intrinsic::ve_vl_vfmind_vsvmvl, 136375}, // __builtin_ve_vl_vfmind_vsvmvl
      {Intrinsic::ve_vl_vfmind_vsvvl, 136405}, // __builtin_ve_vl_vfmind_vsvvl
      {Intrinsic::ve_vl_vfmind_vvvl, 136434}, // __builtin_ve_vl_vfmind_vvvl
      {Intrinsic::ve_vl_vfmind_vvvmvl, 136462}, // __builtin_ve_vl_vfmind_vvvmvl
      {Intrinsic::ve_vl_vfmind_vvvvl, 136492}, // __builtin_ve_vl_vfmind_vvvvl
      {Intrinsic::ve_vl_vfmins_vsvl, 136521}, // __builtin_ve_vl_vfmins_vsvl
      {Intrinsic::ve_vl_vfmins_vsvmvl, 136549}, // __builtin_ve_vl_vfmins_vsvmvl
      {Intrinsic::ve_vl_vfmins_vsvvl, 136579}, // __builtin_ve_vl_vfmins_vsvvl
      {Intrinsic::ve_vl_vfmins_vvvl, 136608}, // __builtin_ve_vl_vfmins_vvvl
      {Intrinsic::ve_vl_vfmins_vvvmvl, 136636}, // __builtin_ve_vl_vfmins_vvvmvl
      {Intrinsic::ve_vl_vfmins_vvvvl, 136666}, // __builtin_ve_vl_vfmins_vvvvl
      {Intrinsic::ve_vl_vfmkdeq_mvl, 136695}, // __builtin_ve_vl_vfmkdeq_mvl
      {Intrinsic::ve_vl_vfmkdeq_mvml, 136723}, // __builtin_ve_vl_vfmkdeq_mvml
      {Intrinsic::ve_vl_vfmkdeqnan_mvl, 136752}, // __builtin_ve_vl_vfmkdeqnan_mvl
      {Intrinsic::ve_vl_vfmkdeqnan_mvml, 136783}, // __builtin_ve_vl_vfmkdeqnan_mvml
      {Intrinsic::ve_vl_vfmkdge_mvl, 136815}, // __builtin_ve_vl_vfmkdge_mvl
      {Intrinsic::ve_vl_vfmkdge_mvml, 136843}, // __builtin_ve_vl_vfmkdge_mvml
      {Intrinsic::ve_vl_vfmkdgenan_mvl, 136872}, // __builtin_ve_vl_vfmkdgenan_mvl
      {Intrinsic::ve_vl_vfmkdgenan_mvml, 136903}, // __builtin_ve_vl_vfmkdgenan_mvml
      {Intrinsic::ve_vl_vfmkdgt_mvl, 136935}, // __builtin_ve_vl_vfmkdgt_mvl
      {Intrinsic::ve_vl_vfmkdgt_mvml, 136963}, // __builtin_ve_vl_vfmkdgt_mvml
      {Intrinsic::ve_vl_vfmkdgtnan_mvl, 136992}, // __builtin_ve_vl_vfmkdgtnan_mvl
      {Intrinsic::ve_vl_vfmkdgtnan_mvml, 137023}, // __builtin_ve_vl_vfmkdgtnan_mvml
      {Intrinsic::ve_vl_vfmkdle_mvl, 137055}, // __builtin_ve_vl_vfmkdle_mvl
      {Intrinsic::ve_vl_vfmkdle_mvml, 137083}, // __builtin_ve_vl_vfmkdle_mvml
      {Intrinsic::ve_vl_vfmkdlenan_mvl, 137112}, // __builtin_ve_vl_vfmkdlenan_mvl
      {Intrinsic::ve_vl_vfmkdlenan_mvml, 137143}, // __builtin_ve_vl_vfmkdlenan_mvml
      {Intrinsic::ve_vl_vfmkdlt_mvl, 137175}, // __builtin_ve_vl_vfmkdlt_mvl
      {Intrinsic::ve_vl_vfmkdlt_mvml, 137203}, // __builtin_ve_vl_vfmkdlt_mvml
      {Intrinsic::ve_vl_vfmkdltnan_mvl, 137232}, // __builtin_ve_vl_vfmkdltnan_mvl
      {Intrinsic::ve_vl_vfmkdltnan_mvml, 137263}, // __builtin_ve_vl_vfmkdltnan_mvml
      {Intrinsic::ve_vl_vfmkdnan_mvl, 137295}, // __builtin_ve_vl_vfmkdnan_mvl
      {Intrinsic::ve_vl_vfmkdnan_mvml, 137324}, // __builtin_ve_vl_vfmkdnan_mvml
      {Intrinsic::ve_vl_vfmkdne_mvl, 137354}, // __builtin_ve_vl_vfmkdne_mvl
      {Intrinsic::ve_vl_vfmkdne_mvml, 137382}, // __builtin_ve_vl_vfmkdne_mvml
      {Intrinsic::ve_vl_vfmkdnenan_mvl, 137411}, // __builtin_ve_vl_vfmkdnenan_mvl
      {Intrinsic::ve_vl_vfmkdnenan_mvml, 137442}, // __builtin_ve_vl_vfmkdnenan_mvml
      {Intrinsic::ve_vl_vfmkdnum_mvl, 137474}, // __builtin_ve_vl_vfmkdnum_mvl
      {Intrinsic::ve_vl_vfmkdnum_mvml, 137503}, // __builtin_ve_vl_vfmkdnum_mvml
      {Intrinsic::ve_vl_vfmklaf_ml, 137533}, // __builtin_ve_vl_vfmklaf_ml
      {Intrinsic::ve_vl_vfmklat_ml, 137560}, // __builtin_ve_vl_vfmklat_ml
      {Intrinsic::ve_vl_vfmkleq_mvl, 137587}, // __builtin_ve_vl_vfmkleq_mvl
      {Intrinsic::ve_vl_vfmkleq_mvml, 137615}, // __builtin_ve_vl_vfmkleq_mvml
      {Intrinsic::ve_vl_vfmkleqnan_mvl, 137644}, // __builtin_ve_vl_vfmkleqnan_mvl
      {Intrinsic::ve_vl_vfmkleqnan_mvml, 137675}, // __builtin_ve_vl_vfmkleqnan_mvml
      {Intrinsic::ve_vl_vfmklge_mvl, 137707}, // __builtin_ve_vl_vfmklge_mvl
      {Intrinsic::ve_vl_vfmklge_mvml, 137735}, // __builtin_ve_vl_vfmklge_mvml
      {Intrinsic::ve_vl_vfmklgenan_mvl, 137764}, // __builtin_ve_vl_vfmklgenan_mvl
      {Intrinsic::ve_vl_vfmklgenan_mvml, 137795}, // __builtin_ve_vl_vfmklgenan_mvml
      {Intrinsic::ve_vl_vfmklgt_mvl, 137827}, // __builtin_ve_vl_vfmklgt_mvl
      {Intrinsic::ve_vl_vfmklgt_mvml, 137855}, // __builtin_ve_vl_vfmklgt_mvml
      {Intrinsic::ve_vl_vfmklgtnan_mvl, 137884}, // __builtin_ve_vl_vfmklgtnan_mvl
      {Intrinsic::ve_vl_vfmklgtnan_mvml, 137915}, // __builtin_ve_vl_vfmklgtnan_mvml
      {Intrinsic::ve_vl_vfmklle_mvl, 137947}, // __builtin_ve_vl_vfmklle_mvl
      {Intrinsic::ve_vl_vfmklle_mvml, 137975}, // __builtin_ve_vl_vfmklle_mvml
      {Intrinsic::ve_vl_vfmkllenan_mvl, 138004}, // __builtin_ve_vl_vfmkllenan_mvl
      {Intrinsic::ve_vl_vfmkllenan_mvml, 138035}, // __builtin_ve_vl_vfmkllenan_mvml
      {Intrinsic::ve_vl_vfmkllt_mvl, 138067}, // __builtin_ve_vl_vfmkllt_mvl
      {Intrinsic::ve_vl_vfmkllt_mvml, 138095}, // __builtin_ve_vl_vfmkllt_mvml
      {Intrinsic::ve_vl_vfmklltnan_mvl, 138124}, // __builtin_ve_vl_vfmklltnan_mvl
      {Intrinsic::ve_vl_vfmklltnan_mvml, 138155}, // __builtin_ve_vl_vfmklltnan_mvml
      {Intrinsic::ve_vl_vfmklnan_mvl, 138187}, // __builtin_ve_vl_vfmklnan_mvl
      {Intrinsic::ve_vl_vfmklnan_mvml, 138216}, // __builtin_ve_vl_vfmklnan_mvml
      {Intrinsic::ve_vl_vfmklne_mvl, 138246}, // __builtin_ve_vl_vfmklne_mvl
      {Intrinsic::ve_vl_vfmklne_mvml, 138274}, // __builtin_ve_vl_vfmklne_mvml
      {Intrinsic::ve_vl_vfmklnenan_mvl, 138303}, // __builtin_ve_vl_vfmklnenan_mvl
      {Intrinsic::ve_vl_vfmklnenan_mvml, 138334}, // __builtin_ve_vl_vfmklnenan_mvml
      {Intrinsic::ve_vl_vfmklnum_mvl, 138366}, // __builtin_ve_vl_vfmklnum_mvl
      {Intrinsic::ve_vl_vfmklnum_mvml, 138395}, // __builtin_ve_vl_vfmklnum_mvml
      {Intrinsic::ve_vl_vfmkseq_mvl, 138425}, // __builtin_ve_vl_vfmkseq_mvl
      {Intrinsic::ve_vl_vfmkseq_mvml, 138453}, // __builtin_ve_vl_vfmkseq_mvml
      {Intrinsic::ve_vl_vfmkseqnan_mvl, 138482}, // __builtin_ve_vl_vfmkseqnan_mvl
      {Intrinsic::ve_vl_vfmkseqnan_mvml, 138513}, // __builtin_ve_vl_vfmkseqnan_mvml
      {Intrinsic::ve_vl_vfmksge_mvl, 138545}, // __builtin_ve_vl_vfmksge_mvl
      {Intrinsic::ve_vl_vfmksge_mvml, 138573}, // __builtin_ve_vl_vfmksge_mvml
      {Intrinsic::ve_vl_vfmksgenan_mvl, 138602}, // __builtin_ve_vl_vfmksgenan_mvl
      {Intrinsic::ve_vl_vfmksgenan_mvml, 138633}, // __builtin_ve_vl_vfmksgenan_mvml
      {Intrinsic::ve_vl_vfmksgt_mvl, 138665}, // __builtin_ve_vl_vfmksgt_mvl
      {Intrinsic::ve_vl_vfmksgt_mvml, 138693}, // __builtin_ve_vl_vfmksgt_mvml
      {Intrinsic::ve_vl_vfmksgtnan_mvl, 138722}, // __builtin_ve_vl_vfmksgtnan_mvl
      {Intrinsic::ve_vl_vfmksgtnan_mvml, 138753}, // __builtin_ve_vl_vfmksgtnan_mvml
      {Intrinsic::ve_vl_vfmksle_mvl, 138785}, // __builtin_ve_vl_vfmksle_mvl
      {Intrinsic::ve_vl_vfmksle_mvml, 138813}, // __builtin_ve_vl_vfmksle_mvml
      {Intrinsic::ve_vl_vfmkslenan_mvl, 138842}, // __builtin_ve_vl_vfmkslenan_mvl
      {Intrinsic::ve_vl_vfmkslenan_mvml, 138873}, // __builtin_ve_vl_vfmkslenan_mvml
      {Intrinsic::ve_vl_vfmkslt_mvl, 138905}, // __builtin_ve_vl_vfmkslt_mvl
      {Intrinsic::ve_vl_vfmkslt_mvml, 138933}, // __builtin_ve_vl_vfmkslt_mvml
      {Intrinsic::ve_vl_vfmksltnan_mvl, 138962}, // __builtin_ve_vl_vfmksltnan_mvl
      {Intrinsic::ve_vl_vfmksltnan_mvml, 138993}, // __builtin_ve_vl_vfmksltnan_mvml
      {Intrinsic::ve_vl_vfmksnan_mvl, 139025}, // __builtin_ve_vl_vfmksnan_mvl
      {Intrinsic::ve_vl_vfmksnan_mvml, 139054}, // __builtin_ve_vl_vfmksnan_mvml
      {Intrinsic::ve_vl_vfmksne_mvl, 139084}, // __builtin_ve_vl_vfmksne_mvl
      {Intrinsic::ve_vl_vfmksne_mvml, 139112}, // __builtin_ve_vl_vfmksne_mvml
      {Intrinsic::ve_vl_vfmksnenan_mvl, 139141}, // __builtin_ve_vl_vfmksnenan_mvl
      {Intrinsic::ve_vl_vfmksnenan_mvml, 139172}, // __builtin_ve_vl_vfmksnenan_mvml
      {Intrinsic::ve_vl_vfmksnum_mvl, 139204}, // __builtin_ve_vl_vfmksnum_mvl
      {Intrinsic::ve_vl_vfmksnum_mvml, 139233}, // __builtin_ve_vl_vfmksnum_mvml
      {Intrinsic::ve_vl_vfmkweq_mvl, 139263}, // __builtin_ve_vl_vfmkweq_mvl
      {Intrinsic::ve_vl_vfmkweq_mvml, 139291}, // __builtin_ve_vl_vfmkweq_mvml
      {Intrinsic::ve_vl_vfmkweqnan_mvl, 139320}, // __builtin_ve_vl_vfmkweqnan_mvl
      {Intrinsic::ve_vl_vfmkweqnan_mvml, 139351}, // __builtin_ve_vl_vfmkweqnan_mvml
      {Intrinsic::ve_vl_vfmkwge_mvl, 139383}, // __builtin_ve_vl_vfmkwge_mvl
      {Intrinsic::ve_vl_vfmkwge_mvml, 139411}, // __builtin_ve_vl_vfmkwge_mvml
      {Intrinsic::ve_vl_vfmkwgenan_mvl, 139440}, // __builtin_ve_vl_vfmkwgenan_mvl
      {Intrinsic::ve_vl_vfmkwgenan_mvml, 139471}, // __builtin_ve_vl_vfmkwgenan_mvml
      {Intrinsic::ve_vl_vfmkwgt_mvl, 139503}, // __builtin_ve_vl_vfmkwgt_mvl
      {Intrinsic::ve_vl_vfmkwgt_mvml, 139531}, // __builtin_ve_vl_vfmkwgt_mvml
      {Intrinsic::ve_vl_vfmkwgtnan_mvl, 139560}, // __builtin_ve_vl_vfmkwgtnan_mvl
      {Intrinsic::ve_vl_vfmkwgtnan_mvml, 139591}, // __builtin_ve_vl_vfmkwgtnan_mvml
      {Intrinsic::ve_vl_vfmkwle_mvl, 139623}, // __builtin_ve_vl_vfmkwle_mvl
      {Intrinsic::ve_vl_vfmkwle_mvml, 139651}, // __builtin_ve_vl_vfmkwle_mvml
      {Intrinsic::ve_vl_vfmkwlenan_mvl, 139680}, // __builtin_ve_vl_vfmkwlenan_mvl
      {Intrinsic::ve_vl_vfmkwlenan_mvml, 139711}, // __builtin_ve_vl_vfmkwlenan_mvml
      {Intrinsic::ve_vl_vfmkwlt_mvl, 139743}, // __builtin_ve_vl_vfmkwlt_mvl
      {Intrinsic::ve_vl_vfmkwlt_mvml, 139771}, // __builtin_ve_vl_vfmkwlt_mvml
      {Intrinsic::ve_vl_vfmkwltnan_mvl, 139800}, // __builtin_ve_vl_vfmkwltnan_mvl
      {Intrinsic::ve_vl_vfmkwltnan_mvml, 139831}, // __builtin_ve_vl_vfmkwltnan_mvml
      {Intrinsic::ve_vl_vfmkwnan_mvl, 139863}, // __builtin_ve_vl_vfmkwnan_mvl
      {Intrinsic::ve_vl_vfmkwnan_mvml, 139892}, // __builtin_ve_vl_vfmkwnan_mvml
      {Intrinsic::ve_vl_vfmkwne_mvl, 139922}, // __builtin_ve_vl_vfmkwne_mvl
      {Intrinsic::ve_vl_vfmkwne_mvml, 139950}, // __builtin_ve_vl_vfmkwne_mvml
      {Intrinsic::ve_vl_vfmkwnenan_mvl, 139979}, // __builtin_ve_vl_vfmkwnenan_mvl
      {Intrinsic::ve_vl_vfmkwnenan_mvml, 140010}, // __builtin_ve_vl_vfmkwnenan_mvml
      {Intrinsic::ve_vl_vfmkwnum_mvl, 140042}, // __builtin_ve_vl_vfmkwnum_mvl
      {Intrinsic::ve_vl_vfmkwnum_mvml, 140071}, // __builtin_ve_vl_vfmkwnum_mvml
      {Intrinsic::ve_vl_vfmsbd_vsvvl, 140101}, // __builtin_ve_vl_vfmsbd_vsvvl
      {Intrinsic::ve_vl_vfmsbd_vsvvmvl, 140130}, // __builtin_ve_vl_vfmsbd_vsvvmvl
      {Intrinsic::ve_vl_vfmsbd_vsvvvl, 140161}, // __builtin_ve_vl_vfmsbd_vsvvvl
      {Intrinsic::ve_vl_vfmsbd_vvsvl, 140191}, // __builtin_ve_vl_vfmsbd_vvsvl
      {Intrinsic::ve_vl_vfmsbd_vvsvmvl, 140220}, // __builtin_ve_vl_vfmsbd_vvsvmvl
      {Intrinsic::ve_vl_vfmsbd_vvsvvl, 140251}, // __builtin_ve_vl_vfmsbd_vvsvvl
      {Intrinsic::ve_vl_vfmsbd_vvvvl, 140281}, // __builtin_ve_vl_vfmsbd_vvvvl
      {Intrinsic::ve_vl_vfmsbd_vvvvmvl, 140310}, // __builtin_ve_vl_vfmsbd_vvvvmvl
      {Intrinsic::ve_vl_vfmsbd_vvvvvl, 140341}, // __builtin_ve_vl_vfmsbd_vvvvvl
      {Intrinsic::ve_vl_vfmsbs_vsvvl, 140371}, // __builtin_ve_vl_vfmsbs_vsvvl
      {Intrinsic::ve_vl_vfmsbs_vsvvmvl, 140400}, // __builtin_ve_vl_vfmsbs_vsvvmvl
      {Intrinsic::ve_vl_vfmsbs_vsvvvl, 140431}, // __builtin_ve_vl_vfmsbs_vsvvvl
      {Intrinsic::ve_vl_vfmsbs_vvsvl, 140461}, // __builtin_ve_vl_vfmsbs_vvsvl
      {Intrinsic::ve_vl_vfmsbs_vvsvmvl, 140490}, // __builtin_ve_vl_vfmsbs_vvsvmvl
      {Intrinsic::ve_vl_vfmsbs_vvsvvl, 140521}, // __builtin_ve_vl_vfmsbs_vvsvvl
      {Intrinsic::ve_vl_vfmsbs_vvvvl, 140551}, // __builtin_ve_vl_vfmsbs_vvvvl
      {Intrinsic::ve_vl_vfmsbs_vvvvmvl, 140580}, // __builtin_ve_vl_vfmsbs_vvvvmvl
      {Intrinsic::ve_vl_vfmsbs_vvvvvl, 140611}, // __builtin_ve_vl_vfmsbs_vvvvvl
      {Intrinsic::ve_vl_vfmuld_vsvl, 140641}, // __builtin_ve_vl_vfmuld_vsvl
      {Intrinsic::ve_vl_vfmuld_vsvmvl, 140669}, // __builtin_ve_vl_vfmuld_vsvmvl
      {Intrinsic::ve_vl_vfmuld_vsvvl, 140699}, // __builtin_ve_vl_vfmuld_vsvvl
      {Intrinsic::ve_vl_vfmuld_vvvl, 140728}, // __builtin_ve_vl_vfmuld_vvvl
      {Intrinsic::ve_vl_vfmuld_vvvmvl, 140756}, // __builtin_ve_vl_vfmuld_vvvmvl
      {Intrinsic::ve_vl_vfmuld_vvvvl, 140786}, // __builtin_ve_vl_vfmuld_vvvvl
      {Intrinsic::ve_vl_vfmuls_vsvl, 140815}, // __builtin_ve_vl_vfmuls_vsvl
      {Intrinsic::ve_vl_vfmuls_vsvmvl, 140843}, // __builtin_ve_vl_vfmuls_vsvmvl
      {Intrinsic::ve_vl_vfmuls_vsvvl, 140873}, // __builtin_ve_vl_vfmuls_vsvvl
      {Intrinsic::ve_vl_vfmuls_vvvl, 140902}, // __builtin_ve_vl_vfmuls_vvvl
      {Intrinsic::ve_vl_vfmuls_vvvmvl, 140930}, // __builtin_ve_vl_vfmuls_vvvmvl
      {Intrinsic::ve_vl_vfmuls_vvvvl, 140960}, // __builtin_ve_vl_vfmuls_vvvvl
      {Intrinsic::ve_vl_vfnmadd_vsvvl, 140989}, // __builtin_ve_vl_vfnmadd_vsvvl
      {Intrinsic::ve_vl_vfnmadd_vsvvmvl, 141019}, // __builtin_ve_vl_vfnmadd_vsvvmvl
      {Intrinsic::ve_vl_vfnmadd_vsvvvl, 141051}, // __builtin_ve_vl_vfnmadd_vsvvvl
      {Intrinsic::ve_vl_vfnmadd_vvsvl, 141082}, // __builtin_ve_vl_vfnmadd_vvsvl
      {Intrinsic::ve_vl_vfnmadd_vvsvmvl, 141112}, // __builtin_ve_vl_vfnmadd_vvsvmvl
      {Intrinsic::ve_vl_vfnmadd_vvsvvl, 141144}, // __builtin_ve_vl_vfnmadd_vvsvvl
      {Intrinsic::ve_vl_vfnmadd_vvvvl, 141175}, // __builtin_ve_vl_vfnmadd_vvvvl
      {Intrinsic::ve_vl_vfnmadd_vvvvmvl, 141205}, // __builtin_ve_vl_vfnmadd_vvvvmvl
      {Intrinsic::ve_vl_vfnmadd_vvvvvl, 141237}, // __builtin_ve_vl_vfnmadd_vvvvvl
      {Intrinsic::ve_vl_vfnmads_vsvvl, 141268}, // __builtin_ve_vl_vfnmads_vsvvl
      {Intrinsic::ve_vl_vfnmads_vsvvmvl, 141298}, // __builtin_ve_vl_vfnmads_vsvvmvl
      {Intrinsic::ve_vl_vfnmads_vsvvvl, 141330}, // __builtin_ve_vl_vfnmads_vsvvvl
      {Intrinsic::ve_vl_vfnmads_vvsvl, 141361}, // __builtin_ve_vl_vfnmads_vvsvl
      {Intrinsic::ve_vl_vfnmads_vvsvmvl, 141391}, // __builtin_ve_vl_vfnmads_vvsvmvl
      {Intrinsic::ve_vl_vfnmads_vvsvvl, 141423}, // __builtin_ve_vl_vfnmads_vvsvvl
      {Intrinsic::ve_vl_vfnmads_vvvvl, 141454}, // __builtin_ve_vl_vfnmads_vvvvl
      {Intrinsic::ve_vl_vfnmads_vvvvmvl, 141484}, // __builtin_ve_vl_vfnmads_vvvvmvl
      {Intrinsic::ve_vl_vfnmads_vvvvvl, 141516}, // __builtin_ve_vl_vfnmads_vvvvvl
      {Intrinsic::ve_vl_vfnmsbd_vsvvl, 141547}, // __builtin_ve_vl_vfnmsbd_vsvvl
      {Intrinsic::ve_vl_vfnmsbd_vsvvmvl, 141577}, // __builtin_ve_vl_vfnmsbd_vsvvmvl
      {Intrinsic::ve_vl_vfnmsbd_vsvvvl, 141609}, // __builtin_ve_vl_vfnmsbd_vsvvvl
      {Intrinsic::ve_vl_vfnmsbd_vvsvl, 141640}, // __builtin_ve_vl_vfnmsbd_vvsvl
      {Intrinsic::ve_vl_vfnmsbd_vvsvmvl, 141670}, // __builtin_ve_vl_vfnmsbd_vvsvmvl
      {Intrinsic::ve_vl_vfnmsbd_vvsvvl, 141702}, // __builtin_ve_vl_vfnmsbd_vvsvvl
      {Intrinsic::ve_vl_vfnmsbd_vvvvl, 141733}, // __builtin_ve_vl_vfnmsbd_vvvvl
      {Intrinsic::ve_vl_vfnmsbd_vvvvmvl, 141763}, // __builtin_ve_vl_vfnmsbd_vvvvmvl
      {Intrinsic::ve_vl_vfnmsbd_vvvvvl, 141795}, // __builtin_ve_vl_vfnmsbd_vvvvvl
      {Intrinsic::ve_vl_vfnmsbs_vsvvl, 141826}, // __builtin_ve_vl_vfnmsbs_vsvvl
      {Intrinsic::ve_vl_vfnmsbs_vsvvmvl, 141856}, // __builtin_ve_vl_vfnmsbs_vsvvmvl
      {Intrinsic::ve_vl_vfnmsbs_vsvvvl, 141888}, // __builtin_ve_vl_vfnmsbs_vsvvvl
      {Intrinsic::ve_vl_vfnmsbs_vvsvl, 141919}, // __builtin_ve_vl_vfnmsbs_vvsvl
      {Intrinsic::ve_vl_vfnmsbs_vvsvmvl, 141949}, // __builtin_ve_vl_vfnmsbs_vvsvmvl
      {Intrinsic::ve_vl_vfnmsbs_vvsvvl, 141981}, // __builtin_ve_vl_vfnmsbs_vvsvvl
      {Intrinsic::ve_vl_vfnmsbs_vvvvl, 142012}, // __builtin_ve_vl_vfnmsbs_vvvvl
      {Intrinsic::ve_vl_vfnmsbs_vvvvmvl, 142042}, // __builtin_ve_vl_vfnmsbs_vvvvmvl
      {Intrinsic::ve_vl_vfnmsbs_vvvvvl, 142074}, // __builtin_ve_vl_vfnmsbs_vvvvvl
      {Intrinsic::ve_vl_vfrmaxdfst_vvl, 142105}, // __builtin_ve_vl_vfrmaxdfst_vvl
      {Intrinsic::ve_vl_vfrmaxdfst_vvvl, 142136}, // __builtin_ve_vl_vfrmaxdfst_vvvl
      {Intrinsic::ve_vl_vfrmaxdlst_vvl, 142168}, // __builtin_ve_vl_vfrmaxdlst_vvl
      {Intrinsic::ve_vl_vfrmaxdlst_vvvl, 142199}, // __builtin_ve_vl_vfrmaxdlst_vvvl
      {Intrinsic::ve_vl_vfrmaxsfst_vvl, 142231}, // __builtin_ve_vl_vfrmaxsfst_vvl
      {Intrinsic::ve_vl_vfrmaxsfst_vvvl, 142262}, // __builtin_ve_vl_vfrmaxsfst_vvvl
      {Intrinsic::ve_vl_vfrmaxslst_vvl, 142294}, // __builtin_ve_vl_vfrmaxslst_vvl
      {Intrinsic::ve_vl_vfrmaxslst_vvvl, 142325}, // __builtin_ve_vl_vfrmaxslst_vvvl
      {Intrinsic::ve_vl_vfrmindfst_vvl, 142357}, // __builtin_ve_vl_vfrmindfst_vvl
      {Intrinsic::ve_vl_vfrmindfst_vvvl, 142388}, // __builtin_ve_vl_vfrmindfst_vvvl
      {Intrinsic::ve_vl_vfrmindlst_vvl, 142420}, // __builtin_ve_vl_vfrmindlst_vvl
      {Intrinsic::ve_vl_vfrmindlst_vvvl, 142451}, // __builtin_ve_vl_vfrmindlst_vvvl
      {Intrinsic::ve_vl_vfrminsfst_vvl, 142483}, // __builtin_ve_vl_vfrminsfst_vvl
      {Intrinsic::ve_vl_vfrminsfst_vvvl, 142514}, // __builtin_ve_vl_vfrminsfst_vvvl
      {Intrinsic::ve_vl_vfrminslst_vvl, 142546}, // __builtin_ve_vl_vfrminslst_vvl
      {Intrinsic::ve_vl_vfrminslst_vvvl, 142577}, // __builtin_ve_vl_vfrminslst_vvvl
      {Intrinsic::ve_vl_vfsqrtd_vvl, 142609}, // __builtin_ve_vl_vfsqrtd_vvl
      {Intrinsic::ve_vl_vfsqrtd_vvvl, 142637}, // __builtin_ve_vl_vfsqrtd_vvvl
      {Intrinsic::ve_vl_vfsqrts_vvl, 142666}, // __builtin_ve_vl_vfsqrts_vvl
      {Intrinsic::ve_vl_vfsqrts_vvvl, 142694}, // __builtin_ve_vl_vfsqrts_vvvl
      {Intrinsic::ve_vl_vfsubd_vsvl, 142723}, // __builtin_ve_vl_vfsubd_vsvl
      {Intrinsic::ve_vl_vfsubd_vsvmvl, 142751}, // __builtin_ve_vl_vfsubd_vsvmvl
      {Intrinsic::ve_vl_vfsubd_vsvvl, 142781}, // __builtin_ve_vl_vfsubd_vsvvl
      {Intrinsic::ve_vl_vfsubd_vvvl, 142810}, // __builtin_ve_vl_vfsubd_vvvl
      {Intrinsic::ve_vl_vfsubd_vvvmvl, 142838}, // __builtin_ve_vl_vfsubd_vvvmvl
      {Intrinsic::ve_vl_vfsubd_vvvvl, 142868}, // __builtin_ve_vl_vfsubd_vvvvl
      {Intrinsic::ve_vl_vfsubs_vsvl, 142897}, // __builtin_ve_vl_vfsubs_vsvl
      {Intrinsic::ve_vl_vfsubs_vsvmvl, 142925}, // __builtin_ve_vl_vfsubs_vsvmvl
      {Intrinsic::ve_vl_vfsubs_vsvvl, 142955}, // __builtin_ve_vl_vfsubs_vsvvl
      {Intrinsic::ve_vl_vfsubs_vvvl, 142984}, // __builtin_ve_vl_vfsubs_vvvl
      {Intrinsic::ve_vl_vfsubs_vvvmvl, 143012}, // __builtin_ve_vl_vfsubs_vvvmvl
      {Intrinsic::ve_vl_vfsubs_vvvvl, 143042}, // __builtin_ve_vl_vfsubs_vvvvl
      {Intrinsic::ve_vl_vfsumd_vvl, 143071}, // __builtin_ve_vl_vfsumd_vvl
      {Intrinsic::ve_vl_vfsumd_vvml, 143098}, // __builtin_ve_vl_vfsumd_vvml
      {Intrinsic::ve_vl_vfsums_vvl, 143126}, // __builtin_ve_vl_vfsums_vvl
      {Intrinsic::ve_vl_vfsums_vvml, 143153}, // __builtin_ve_vl_vfsums_vvml
      {Intrinsic::ve_vl_vgt_vvssl, 143181}, // __builtin_ve_vl_vgt_vvssl
      {Intrinsic::ve_vl_vgt_vvssml, 143207}, // __builtin_ve_vl_vgt_vvssml
      {Intrinsic::ve_vl_vgt_vvssmvl, 143234}, // __builtin_ve_vl_vgt_vvssmvl
      {Intrinsic::ve_vl_vgt_vvssvl, 143262}, // __builtin_ve_vl_vgt_vvssvl
      {Intrinsic::ve_vl_vgtlsx_vvssl, 143289}, // __builtin_ve_vl_vgtlsx_vvssl
      {Intrinsic::ve_vl_vgtlsx_vvssml, 143318}, // __builtin_ve_vl_vgtlsx_vvssml
      {Intrinsic::ve_vl_vgtlsx_vvssmvl, 143348}, // __builtin_ve_vl_vgtlsx_vvssmvl
      {Intrinsic::ve_vl_vgtlsx_vvssvl, 143379}, // __builtin_ve_vl_vgtlsx_vvssvl
      {Intrinsic::ve_vl_vgtlsxnc_vvssl, 143409}, // __builtin_ve_vl_vgtlsxnc_vvssl
      {Intrinsic::ve_vl_vgtlsxnc_vvssml, 143440}, // __builtin_ve_vl_vgtlsxnc_vvssml
      {Intrinsic::ve_vl_vgtlsxnc_vvssmvl, 143472}, // __builtin_ve_vl_vgtlsxnc_vvssmvl
      {Intrinsic::ve_vl_vgtlsxnc_vvssvl, 143505}, // __builtin_ve_vl_vgtlsxnc_vvssvl
      {Intrinsic::ve_vl_vgtlzx_vvssl, 143537}, // __builtin_ve_vl_vgtlzx_vvssl
      {Intrinsic::ve_vl_vgtlzx_vvssml, 143566}, // __builtin_ve_vl_vgtlzx_vvssml
      {Intrinsic::ve_vl_vgtlzx_vvssmvl, 143596}, // __builtin_ve_vl_vgtlzx_vvssmvl
      {Intrinsic::ve_vl_vgtlzx_vvssvl, 143627}, // __builtin_ve_vl_vgtlzx_vvssvl
      {Intrinsic::ve_vl_vgtlzxnc_vvssl, 143657}, // __builtin_ve_vl_vgtlzxnc_vvssl
      {Intrinsic::ve_vl_vgtlzxnc_vvssml, 143688}, // __builtin_ve_vl_vgtlzxnc_vvssml
      {Intrinsic::ve_vl_vgtlzxnc_vvssmvl, 143720}, // __builtin_ve_vl_vgtlzxnc_vvssmvl
      {Intrinsic::ve_vl_vgtlzxnc_vvssvl, 143753}, // __builtin_ve_vl_vgtlzxnc_vvssvl
      {Intrinsic::ve_vl_vgtnc_vvssl, 143785}, // __builtin_ve_vl_vgtnc_vvssl
      {Intrinsic::ve_vl_vgtnc_vvssml, 143813}, // __builtin_ve_vl_vgtnc_vvssml
      {Intrinsic::ve_vl_vgtnc_vvssmvl, 143842}, // __builtin_ve_vl_vgtnc_vvssmvl
      {Intrinsic::ve_vl_vgtnc_vvssvl, 143872}, // __builtin_ve_vl_vgtnc_vvssvl
      {Intrinsic::ve_vl_vgtu_vvssl, 143901}, // __builtin_ve_vl_vgtu_vvssl
      {Intrinsic::ve_vl_vgtu_vvssml, 143928}, // __builtin_ve_vl_vgtu_vvssml
      {Intrinsic::ve_vl_vgtu_vvssmvl, 143956}, // __builtin_ve_vl_vgtu_vvssmvl
      {Intrinsic::ve_vl_vgtu_vvssvl, 143985}, // __builtin_ve_vl_vgtu_vvssvl
      {Intrinsic::ve_vl_vgtunc_vvssl, 144013}, // __builtin_ve_vl_vgtunc_vvssl
      {Intrinsic::ve_vl_vgtunc_vvssml, 144042}, // __builtin_ve_vl_vgtunc_vvssml
      {Intrinsic::ve_vl_vgtunc_vvssmvl, 144072}, // __builtin_ve_vl_vgtunc_vvssmvl
      {Intrinsic::ve_vl_vgtunc_vvssvl, 144103}, // __builtin_ve_vl_vgtunc_vvssvl
      {Intrinsic::ve_vl_vld2d_vssl, 144184}, // __builtin_ve_vl_vld2d_vssl
      {Intrinsic::ve_vl_vld2d_vssvl, 144211}, // __builtin_ve_vl_vld2d_vssvl
      {Intrinsic::ve_vl_vld2dnc_vssl, 144239}, // __builtin_ve_vl_vld2dnc_vssl
      {Intrinsic::ve_vl_vld2dnc_vssvl, 144268}, // __builtin_ve_vl_vld2dnc_vssvl
      {Intrinsic::ve_vl_vld_vssl, 144133}, // __builtin_ve_vl_vld_vssl
      {Intrinsic::ve_vl_vld_vssvl, 144158}, // __builtin_ve_vl_vld_vssvl
      {Intrinsic::ve_vl_vldl2dsx_vssl, 144298}, // __builtin_ve_vl_vldl2dsx_vssl
      {Intrinsic::ve_vl_vldl2dsx_vssvl, 144328}, // __builtin_ve_vl_vldl2dsx_vssvl
      {Intrinsic::ve_vl_vldl2dsxnc_vssl, 144359}, // __builtin_ve_vl_vldl2dsxnc_vssl
      {Intrinsic::ve_vl_vldl2dsxnc_vssvl, 144391}, // __builtin_ve_vl_vldl2dsxnc_vssvl
      {Intrinsic::ve_vl_vldl2dzx_vssl, 144424}, // __builtin_ve_vl_vldl2dzx_vssl
      {Intrinsic::ve_vl_vldl2dzx_vssvl, 144454}, // __builtin_ve_vl_vldl2dzx_vssvl
      {Intrinsic::ve_vl_vldl2dzxnc_vssl, 144485}, // __builtin_ve_vl_vldl2dzxnc_vssl
      {Intrinsic::ve_vl_vldl2dzxnc_vssvl, 144517}, // __builtin_ve_vl_vldl2dzxnc_vssvl
      {Intrinsic::ve_vl_vldlsx_vssl, 144550}, // __builtin_ve_vl_vldlsx_vssl
      {Intrinsic::ve_vl_vldlsx_vssvl, 144578}, // __builtin_ve_vl_vldlsx_vssvl
      {Intrinsic::ve_vl_vldlsxnc_vssl, 144607}, // __builtin_ve_vl_vldlsxnc_vssl
      {Intrinsic::ve_vl_vldlsxnc_vssvl, 144637}, // __builtin_ve_vl_vldlsxnc_vssvl
      {Intrinsic::ve_vl_vldlzx_vssl, 144668}, // __builtin_ve_vl_vldlzx_vssl
      {Intrinsic::ve_vl_vldlzx_vssvl, 144696}, // __builtin_ve_vl_vldlzx_vssvl
      {Intrinsic::ve_vl_vldlzxnc_vssl, 144725}, // __builtin_ve_vl_vldlzxnc_vssl
      {Intrinsic::ve_vl_vldlzxnc_vssvl, 144755}, // __builtin_ve_vl_vldlzxnc_vssvl
      {Intrinsic::ve_vl_vldnc_vssl, 144786}, // __builtin_ve_vl_vldnc_vssl
      {Intrinsic::ve_vl_vldnc_vssvl, 144813}, // __builtin_ve_vl_vldnc_vssvl
      {Intrinsic::ve_vl_vldu2d_vssl, 144894}, // __builtin_ve_vl_vldu2d_vssl
      {Intrinsic::ve_vl_vldu2d_vssvl, 144922}, // __builtin_ve_vl_vldu2d_vssvl
      {Intrinsic::ve_vl_vldu2dnc_vssl, 144951}, // __builtin_ve_vl_vldu2dnc_vssl
      {Intrinsic::ve_vl_vldu2dnc_vssvl, 144981}, // __builtin_ve_vl_vldu2dnc_vssvl
      {Intrinsic::ve_vl_vldu_vssl, 144841}, // __builtin_ve_vl_vldu_vssl
      {Intrinsic::ve_vl_vldu_vssvl, 144867}, // __builtin_ve_vl_vldu_vssvl
      {Intrinsic::ve_vl_vldunc_vssl, 145012}, // __builtin_ve_vl_vldunc_vssl
      {Intrinsic::ve_vl_vldunc_vssvl, 145040}, // __builtin_ve_vl_vldunc_vssvl
      {Intrinsic::ve_vl_vldz_vvl, 145069}, // __builtin_ve_vl_vldz_vvl
      {Intrinsic::ve_vl_vldz_vvmvl, 145094}, // __builtin_ve_vl_vldz_vvmvl
      {Intrinsic::ve_vl_vldz_vvvl, 145121}, // __builtin_ve_vl_vldz_vvvl
      {Intrinsic::ve_vl_vmaxsl_vsvl, 145147}, // __builtin_ve_vl_vmaxsl_vsvl
      {Intrinsic::ve_vl_vmaxsl_vsvmvl, 145175}, // __builtin_ve_vl_vmaxsl_vsvmvl
      {Intrinsic::ve_vl_vmaxsl_vsvvl, 145205}, // __builtin_ve_vl_vmaxsl_vsvvl
      {Intrinsic::ve_vl_vmaxsl_vvvl, 145234}, // __builtin_ve_vl_vmaxsl_vvvl
      {Intrinsic::ve_vl_vmaxsl_vvvmvl, 145262}, // __builtin_ve_vl_vmaxsl_vvvmvl
      {Intrinsic::ve_vl_vmaxsl_vvvvl, 145292}, // __builtin_ve_vl_vmaxsl_vvvvl
      {Intrinsic::ve_vl_vmaxswsx_vsvl, 145321}, // __builtin_ve_vl_vmaxswsx_vsvl
      {Intrinsic::ve_vl_vmaxswsx_vsvmvl, 145351}, // __builtin_ve_vl_vmaxswsx_vsvmvl
      {Intrinsic::ve_vl_vmaxswsx_vsvvl, 145383}, // __builtin_ve_vl_vmaxswsx_vsvvl
      {Intrinsic::ve_vl_vmaxswsx_vvvl, 145414}, // __builtin_ve_vl_vmaxswsx_vvvl
      {Intrinsic::ve_vl_vmaxswsx_vvvmvl, 145444}, // __builtin_ve_vl_vmaxswsx_vvvmvl
      {Intrinsic::ve_vl_vmaxswsx_vvvvl, 145476}, // __builtin_ve_vl_vmaxswsx_vvvvl
      {Intrinsic::ve_vl_vmaxswzx_vsvl, 145507}, // __builtin_ve_vl_vmaxswzx_vsvl
      {Intrinsic::ve_vl_vmaxswzx_vsvmvl, 145537}, // __builtin_ve_vl_vmaxswzx_vsvmvl
      {Intrinsic::ve_vl_vmaxswzx_vsvvl, 145569}, // __builtin_ve_vl_vmaxswzx_vsvvl
      {Intrinsic::ve_vl_vmaxswzx_vvvl, 145600}, // __builtin_ve_vl_vmaxswzx_vvvl
      {Intrinsic::ve_vl_vmaxswzx_vvvmvl, 145630}, // __builtin_ve_vl_vmaxswzx_vvvmvl
      {Intrinsic::ve_vl_vmaxswzx_vvvvl, 145662}, // __builtin_ve_vl_vmaxswzx_vvvvl
      {Intrinsic::ve_vl_vminsl_vsvl, 145693}, // __builtin_ve_vl_vminsl_vsvl
      {Intrinsic::ve_vl_vminsl_vsvmvl, 145721}, // __builtin_ve_vl_vminsl_vsvmvl
      {Intrinsic::ve_vl_vminsl_vsvvl, 145751}, // __builtin_ve_vl_vminsl_vsvvl
      {Intrinsic::ve_vl_vminsl_vvvl, 145780}, // __builtin_ve_vl_vminsl_vvvl
      {Intrinsic::ve_vl_vminsl_vvvmvl, 145808}, // __builtin_ve_vl_vminsl_vvvmvl
      {Intrinsic::ve_vl_vminsl_vvvvl, 145838}, // __builtin_ve_vl_vminsl_vvvvl
      {Intrinsic::ve_vl_vminswsx_vsvl, 145867}, // __builtin_ve_vl_vminswsx_vsvl
      {Intrinsic::ve_vl_vminswsx_vsvmvl, 145897}, // __builtin_ve_vl_vminswsx_vsvmvl
      {Intrinsic::ve_vl_vminswsx_vsvvl, 145929}, // __builtin_ve_vl_vminswsx_vsvvl
      {Intrinsic::ve_vl_vminswsx_vvvl, 145960}, // __builtin_ve_vl_vminswsx_vvvl
      {Intrinsic::ve_vl_vminswsx_vvvmvl, 145990}, // __builtin_ve_vl_vminswsx_vvvmvl
      {Intrinsic::ve_vl_vminswsx_vvvvl, 146022}, // __builtin_ve_vl_vminswsx_vvvvl
      {Intrinsic::ve_vl_vminswzx_vsvl, 146053}, // __builtin_ve_vl_vminswzx_vsvl
      {Intrinsic::ve_vl_vminswzx_vsvmvl, 146083}, // __builtin_ve_vl_vminswzx_vsvmvl
      {Intrinsic::ve_vl_vminswzx_vsvvl, 146115}, // __builtin_ve_vl_vminswzx_vsvvl
      {Intrinsic::ve_vl_vminswzx_vvvl, 146146}, // __builtin_ve_vl_vminswzx_vvvl
      {Intrinsic::ve_vl_vminswzx_vvvmvl, 146176}, // __builtin_ve_vl_vminswzx_vvvmvl
      {Intrinsic::ve_vl_vminswzx_vvvvl, 146208}, // __builtin_ve_vl_vminswzx_vvvvl
      {Intrinsic::ve_vl_vmrg_vsvml, 146239}, // __builtin_ve_vl_vmrg_vsvml
      {Intrinsic::ve_vl_vmrg_vsvmvl, 146266}, // __builtin_ve_vl_vmrg_vsvmvl
      {Intrinsic::ve_vl_vmrg_vvvml, 146294}, // __builtin_ve_vl_vmrg_vvvml
      {Intrinsic::ve_vl_vmrg_vvvmvl, 146321}, // __builtin_ve_vl_vmrg_vvvmvl
      {Intrinsic::ve_vl_vmrgw_vsvMl, 146349}, // __builtin_ve_vl_vmrgw_vsvMl
      {Intrinsic::ve_vl_vmrgw_vsvMvl, 146377}, // __builtin_ve_vl_vmrgw_vsvMvl
      {Intrinsic::ve_vl_vmrgw_vvvMl, 146406}, // __builtin_ve_vl_vmrgw_vvvMl
      {Intrinsic::ve_vl_vmrgw_vvvMvl, 146434}, // __builtin_ve_vl_vmrgw_vvvMvl
      {Intrinsic::ve_vl_vmulsl_vsvl, 146463}, // __builtin_ve_vl_vmulsl_vsvl
      {Intrinsic::ve_vl_vmulsl_vsvmvl, 146491}, // __builtin_ve_vl_vmulsl_vsvmvl
      {Intrinsic::ve_vl_vmulsl_vsvvl, 146521}, // __builtin_ve_vl_vmulsl_vsvvl
      {Intrinsic::ve_vl_vmulsl_vvvl, 146550}, // __builtin_ve_vl_vmulsl_vvvl
      {Intrinsic::ve_vl_vmulsl_vvvmvl, 146578}, // __builtin_ve_vl_vmulsl_vvvmvl
      {Intrinsic::ve_vl_vmulsl_vvvvl, 146608}, // __builtin_ve_vl_vmulsl_vvvvl
      {Intrinsic::ve_vl_vmulslw_vsvl, 146637}, // __builtin_ve_vl_vmulslw_vsvl
      {Intrinsic::ve_vl_vmulslw_vsvvl, 146666}, // __builtin_ve_vl_vmulslw_vsvvl
      {Intrinsic::ve_vl_vmulslw_vvvl, 146696}, // __builtin_ve_vl_vmulslw_vvvl
      {Intrinsic::ve_vl_vmulslw_vvvvl, 146725}, // __builtin_ve_vl_vmulslw_vvvvl
      {Intrinsic::ve_vl_vmulswsx_vsvl, 146755}, // __builtin_ve_vl_vmulswsx_vsvl
      {Intrinsic::ve_vl_vmulswsx_vsvmvl, 146785}, // __builtin_ve_vl_vmulswsx_vsvmvl
      {Intrinsic::ve_vl_vmulswsx_vsvvl, 146817}, // __builtin_ve_vl_vmulswsx_vsvvl
      {Intrinsic::ve_vl_vmulswsx_vvvl, 146848}, // __builtin_ve_vl_vmulswsx_vvvl
      {Intrinsic::ve_vl_vmulswsx_vvvmvl, 146878}, // __builtin_ve_vl_vmulswsx_vvvmvl
      {Intrinsic::ve_vl_vmulswsx_vvvvl, 146910}, // __builtin_ve_vl_vmulswsx_vvvvl
      {Intrinsic::ve_vl_vmulswzx_vsvl, 146941}, // __builtin_ve_vl_vmulswzx_vsvl
      {Intrinsic::ve_vl_vmulswzx_vsvmvl, 146971}, // __builtin_ve_vl_vmulswzx_vsvmvl
      {Intrinsic::ve_vl_vmulswzx_vsvvl, 147003}, // __builtin_ve_vl_vmulswzx_vsvvl
      {Intrinsic::ve_vl_vmulswzx_vvvl, 147034}, // __builtin_ve_vl_vmulswzx_vvvl
      {Intrinsic::ve_vl_vmulswzx_vvvmvl, 147064}, // __builtin_ve_vl_vmulswzx_vvvmvl
      {Intrinsic::ve_vl_vmulswzx_vvvvl, 147096}, // __builtin_ve_vl_vmulswzx_vvvvl
      {Intrinsic::ve_vl_vmulul_vsvl, 147127}, // __builtin_ve_vl_vmulul_vsvl
      {Intrinsic::ve_vl_vmulul_vsvmvl, 147155}, // __builtin_ve_vl_vmulul_vsvmvl
      {Intrinsic::ve_vl_vmulul_vsvvl, 147185}, // __builtin_ve_vl_vmulul_vsvvl
      {Intrinsic::ve_vl_vmulul_vvvl, 147214}, // __builtin_ve_vl_vmulul_vvvl
      {Intrinsic::ve_vl_vmulul_vvvmvl, 147242}, // __builtin_ve_vl_vmulul_vvvmvl
      {Intrinsic::ve_vl_vmulul_vvvvl, 147272}, // __builtin_ve_vl_vmulul_vvvvl
      {Intrinsic::ve_vl_vmuluw_vsvl, 147301}, // __builtin_ve_vl_vmuluw_vsvl
      {Intrinsic::ve_vl_vmuluw_vsvmvl, 147329}, // __builtin_ve_vl_vmuluw_vsvmvl
      {Intrinsic::ve_vl_vmuluw_vsvvl, 147359}, // __builtin_ve_vl_vmuluw_vsvvl
      {Intrinsic::ve_vl_vmuluw_vvvl, 147388}, // __builtin_ve_vl_vmuluw_vvvl
      {Intrinsic::ve_vl_vmuluw_vvvmvl, 147416}, // __builtin_ve_vl_vmuluw_vvvmvl
      {Intrinsic::ve_vl_vmuluw_vvvvl, 147446}, // __builtin_ve_vl_vmuluw_vvvvl
      {Intrinsic::ve_vl_vmv_vsvl, 147475}, // __builtin_ve_vl_vmv_vsvl
      {Intrinsic::ve_vl_vmv_vsvmvl, 147500}, // __builtin_ve_vl_vmv_vsvmvl
      {Intrinsic::ve_vl_vmv_vsvvl, 147527}, // __builtin_ve_vl_vmv_vsvvl
      {Intrinsic::ve_vl_vor_vsvl, 147553}, // __builtin_ve_vl_vor_vsvl
      {Intrinsic::ve_vl_vor_vsvmvl, 147578}, // __builtin_ve_vl_vor_vsvmvl
      {Intrinsic::ve_vl_vor_vsvvl, 147605}, // __builtin_ve_vl_vor_vsvvl
      {Intrinsic::ve_vl_vor_vvvl, 147631}, // __builtin_ve_vl_vor_vvvl
      {Intrinsic::ve_vl_vor_vvvmvl, 147656}, // __builtin_ve_vl_vor_vvvmvl
      {Intrinsic::ve_vl_vor_vvvvl, 147683}, // __builtin_ve_vl_vor_vvvvl
      {Intrinsic::ve_vl_vpcnt_vvl, 147709}, // __builtin_ve_vl_vpcnt_vvl
      {Intrinsic::ve_vl_vpcnt_vvmvl, 147735}, // __builtin_ve_vl_vpcnt_vvmvl
      {Intrinsic::ve_vl_vpcnt_vvvl, 147763}, // __builtin_ve_vl_vpcnt_vvvl
      {Intrinsic::ve_vl_vrand_vvl, 147790}, // __builtin_ve_vl_vrand_vvl
      {Intrinsic::ve_vl_vrand_vvml, 147816}, // __builtin_ve_vl_vrand_vvml
      {Intrinsic::ve_vl_vrcpd_vvl, 147843}, // __builtin_ve_vl_vrcpd_vvl
      {Intrinsic::ve_vl_vrcpd_vvvl, 147869}, // __builtin_ve_vl_vrcpd_vvvl
      {Intrinsic::ve_vl_vrcps_vvl, 147896}, // __builtin_ve_vl_vrcps_vvl
      {Intrinsic::ve_vl_vrcps_vvvl, 147922}, // __builtin_ve_vl_vrcps_vvvl
      {Intrinsic::ve_vl_vrmaxslfst_vvl, 147949}, // __builtin_ve_vl_vrmaxslfst_vvl
      {Intrinsic::ve_vl_vrmaxslfst_vvvl, 147980}, // __builtin_ve_vl_vrmaxslfst_vvvl
      {Intrinsic::ve_vl_vrmaxsllst_vvl, 148012}, // __builtin_ve_vl_vrmaxsllst_vvl
      {Intrinsic::ve_vl_vrmaxsllst_vvvl, 148043}, // __builtin_ve_vl_vrmaxsllst_vvvl
      {Intrinsic::ve_vl_vrmaxswfstsx_vvl, 148075}, // __builtin_ve_vl_vrmaxswfstsx_vvl
      {Intrinsic::ve_vl_vrmaxswfstsx_vvvl, 148108}, // __builtin_ve_vl_vrmaxswfstsx_vvvl
      {Intrinsic::ve_vl_vrmaxswfstzx_vvl, 148142}, // __builtin_ve_vl_vrmaxswfstzx_vvl
      {Intrinsic::ve_vl_vrmaxswfstzx_vvvl, 148175}, // __builtin_ve_vl_vrmaxswfstzx_vvvl
      {Intrinsic::ve_vl_vrmaxswlstsx_vvl, 148209}, // __builtin_ve_vl_vrmaxswlstsx_vvl
      {Intrinsic::ve_vl_vrmaxswlstsx_vvvl, 148242}, // __builtin_ve_vl_vrmaxswlstsx_vvvl
      {Intrinsic::ve_vl_vrmaxswlstzx_vvl, 148276}, // __builtin_ve_vl_vrmaxswlstzx_vvl
      {Intrinsic::ve_vl_vrmaxswlstzx_vvvl, 148309}, // __builtin_ve_vl_vrmaxswlstzx_vvvl
      {Intrinsic::ve_vl_vrminslfst_vvl, 148343}, // __builtin_ve_vl_vrminslfst_vvl
      {Intrinsic::ve_vl_vrminslfst_vvvl, 148374}, // __builtin_ve_vl_vrminslfst_vvvl
      {Intrinsic::ve_vl_vrminsllst_vvl, 148406}, // __builtin_ve_vl_vrminsllst_vvl
      {Intrinsic::ve_vl_vrminsllst_vvvl, 148437}, // __builtin_ve_vl_vrminsllst_vvvl
      {Intrinsic::ve_vl_vrminswfstsx_vvl, 148469}, // __builtin_ve_vl_vrminswfstsx_vvl
      {Intrinsic::ve_vl_vrminswfstsx_vvvl, 148502}, // __builtin_ve_vl_vrminswfstsx_vvvl
      {Intrinsic::ve_vl_vrminswfstzx_vvl, 148536}, // __builtin_ve_vl_vrminswfstzx_vvl
      {Intrinsic::ve_vl_vrminswfstzx_vvvl, 148569}, // __builtin_ve_vl_vrminswfstzx_vvvl
      {Intrinsic::ve_vl_vrminswlstsx_vvl, 148603}, // __builtin_ve_vl_vrminswlstsx_vvl
      {Intrinsic::ve_vl_vrminswlstsx_vvvl, 148636}, // __builtin_ve_vl_vrminswlstsx_vvvl
      {Intrinsic::ve_vl_vrminswlstzx_vvl, 148670}, // __builtin_ve_vl_vrminswlstzx_vvl
      {Intrinsic::ve_vl_vrminswlstzx_vvvl, 148703}, // __builtin_ve_vl_vrminswlstzx_vvvl
      {Intrinsic::ve_vl_vror_vvl, 148737}, // __builtin_ve_vl_vror_vvl
      {Intrinsic::ve_vl_vror_vvml, 148762}, // __builtin_ve_vl_vror_vvml
      {Intrinsic::ve_vl_vrsqrtd_vvl, 148788}, // __builtin_ve_vl_vrsqrtd_vvl
      {Intrinsic::ve_vl_vrsqrtd_vvvl, 148816}, // __builtin_ve_vl_vrsqrtd_vvvl
      {Intrinsic::ve_vl_vrsqrtdnex_vvl, 148845}, // __builtin_ve_vl_vrsqrtdnex_vvl
      {Intrinsic::ve_vl_vrsqrtdnex_vvvl, 148876}, // __builtin_ve_vl_vrsqrtdnex_vvvl
      {Intrinsic::ve_vl_vrsqrts_vvl, 148908}, // __builtin_ve_vl_vrsqrts_vvl
      {Intrinsic::ve_vl_vrsqrts_vvvl, 148936}, // __builtin_ve_vl_vrsqrts_vvvl
      {Intrinsic::ve_vl_vrsqrtsnex_vvl, 148965}, // __builtin_ve_vl_vrsqrtsnex_vvl
      {Intrinsic::ve_vl_vrsqrtsnex_vvvl, 148996}, // __builtin_ve_vl_vrsqrtsnex_vvvl
      {Intrinsic::ve_vl_vrxor_vvl, 149028}, // __builtin_ve_vl_vrxor_vvl
      {Intrinsic::ve_vl_vrxor_vvml, 149054}, // __builtin_ve_vl_vrxor_vvml
      {Intrinsic::ve_vl_vsc_vvssl, 149081}, // __builtin_ve_vl_vsc_vvssl
      {Intrinsic::ve_vl_vsc_vvssml, 149107}, // __builtin_ve_vl_vsc_vvssml
      {Intrinsic::ve_vl_vscl_vvssl, 149134}, // __builtin_ve_vl_vscl_vvssl
      {Intrinsic::ve_vl_vscl_vvssml, 149161}, // __builtin_ve_vl_vscl_vvssml
      {Intrinsic::ve_vl_vsclnc_vvssl, 149189}, // __builtin_ve_vl_vsclnc_vvssl
      {Intrinsic::ve_vl_vsclnc_vvssml, 149218}, // __builtin_ve_vl_vsclnc_vvssml
      {Intrinsic::ve_vl_vsclncot_vvssl, 149248}, // __builtin_ve_vl_vsclncot_vvssl
      {Intrinsic::ve_vl_vsclncot_vvssml, 149279}, // __builtin_ve_vl_vsclncot_vvssml
      {Intrinsic::ve_vl_vsclot_vvssl, 149311}, // __builtin_ve_vl_vsclot_vvssl
      {Intrinsic::ve_vl_vsclot_vvssml, 149340}, // __builtin_ve_vl_vsclot_vvssml
      {Intrinsic::ve_vl_vscnc_vvssl, 149370}, // __builtin_ve_vl_vscnc_vvssl
      {Intrinsic::ve_vl_vscnc_vvssml, 149398}, // __builtin_ve_vl_vscnc_vvssml
      {Intrinsic::ve_vl_vscncot_vvssl, 149427}, // __builtin_ve_vl_vscncot_vvssl
      {Intrinsic::ve_vl_vscncot_vvssml, 149457}, // __builtin_ve_vl_vscncot_vvssml
      {Intrinsic::ve_vl_vscot_vvssl, 149488}, // __builtin_ve_vl_vscot_vvssl
      {Intrinsic::ve_vl_vscot_vvssml, 149516}, // __builtin_ve_vl_vscot_vvssml
      {Intrinsic::ve_vl_vscu_vvssl, 149545}, // __builtin_ve_vl_vscu_vvssl
      {Intrinsic::ve_vl_vscu_vvssml, 149572}, // __builtin_ve_vl_vscu_vvssml
      {Intrinsic::ve_vl_vscunc_vvssl, 149600}, // __builtin_ve_vl_vscunc_vvssl
      {Intrinsic::ve_vl_vscunc_vvssml, 149629}, // __builtin_ve_vl_vscunc_vvssml
      {Intrinsic::ve_vl_vscuncot_vvssl, 149659}, // __builtin_ve_vl_vscuncot_vvssl
      {Intrinsic::ve_vl_vscuncot_vvssml, 149690}, // __builtin_ve_vl_vscuncot_vvssml
      {Intrinsic::ve_vl_vscuot_vvssl, 149722}, // __builtin_ve_vl_vscuot_vvssl
      {Intrinsic::ve_vl_vscuot_vvssml, 149751}, // __builtin_ve_vl_vscuot_vvssml
      {Intrinsic::ve_vl_vseq_vl, 149781}, // __builtin_ve_vl_vseq_vl
      {Intrinsic::ve_vl_vseq_vvl, 149805}, // __builtin_ve_vl_vseq_vvl
      {Intrinsic::ve_vl_vsfa_vvssl, 149830}, // __builtin_ve_vl_vsfa_vvssl
      {Intrinsic::ve_vl_vsfa_vvssmvl, 149857}, // __builtin_ve_vl_vsfa_vvssmvl
      {Intrinsic::ve_vl_vsfa_vvssvl, 149886}, // __builtin_ve_vl_vsfa_vvssvl
      {Intrinsic::ve_vl_vshf_vvvsl, 149914}, // __builtin_ve_vl_vshf_vvvsl
      {Intrinsic::ve_vl_vshf_vvvsvl, 149941}, // __builtin_ve_vl_vshf_vvvsvl
      {Intrinsic::ve_vl_vslal_vvsl, 149969}, // __builtin_ve_vl_vslal_vvsl
      {Intrinsic::ve_vl_vslal_vvsmvl, 149996}, // __builtin_ve_vl_vslal_vvsmvl
      {Intrinsic::ve_vl_vslal_vvsvl, 150025}, // __builtin_ve_vl_vslal_vvsvl
      {Intrinsic::ve_vl_vslal_vvvl, 150053}, // __builtin_ve_vl_vslal_vvvl
      {Intrinsic::ve_vl_vslal_vvvmvl, 150080}, // __builtin_ve_vl_vslal_vvvmvl
      {Intrinsic::ve_vl_vslal_vvvvl, 150109}, // __builtin_ve_vl_vslal_vvvvl
      {Intrinsic::ve_vl_vslawsx_vvsl, 150137}, // __builtin_ve_vl_vslawsx_vvsl
      {Intrinsic::ve_vl_vslawsx_vvsmvl, 150166}, // __builtin_ve_vl_vslawsx_vvsmvl
      {Intrinsic::ve_vl_vslawsx_vvsvl, 150197}, // __builtin_ve_vl_vslawsx_vvsvl
      {Intrinsic::ve_vl_vslawsx_vvvl, 150227}, // __builtin_ve_vl_vslawsx_vvvl
      {Intrinsic::ve_vl_vslawsx_vvvmvl, 150256}, // __builtin_ve_vl_vslawsx_vvvmvl
      {Intrinsic::ve_vl_vslawsx_vvvvl, 150287}, // __builtin_ve_vl_vslawsx_vvvvl
      {Intrinsic::ve_vl_vslawzx_vvsl, 150317}, // __builtin_ve_vl_vslawzx_vvsl
      {Intrinsic::ve_vl_vslawzx_vvsmvl, 150346}, // __builtin_ve_vl_vslawzx_vvsmvl
      {Intrinsic::ve_vl_vslawzx_vvsvl, 150377}, // __builtin_ve_vl_vslawzx_vvsvl
      {Intrinsic::ve_vl_vslawzx_vvvl, 150407}, // __builtin_ve_vl_vslawzx_vvvl
      {Intrinsic::ve_vl_vslawzx_vvvmvl, 150436}, // __builtin_ve_vl_vslawzx_vvvmvl
      {Intrinsic::ve_vl_vslawzx_vvvvl, 150467}, // __builtin_ve_vl_vslawzx_vvvvl
      {Intrinsic::ve_vl_vsll_vvsl, 150497}, // __builtin_ve_vl_vsll_vvsl
      {Intrinsic::ve_vl_vsll_vvsmvl, 150523}, // __builtin_ve_vl_vsll_vvsmvl
      {Intrinsic::ve_vl_vsll_vvsvl, 150551}, // __builtin_ve_vl_vsll_vvsvl
      {Intrinsic::ve_vl_vsll_vvvl, 150578}, // __builtin_ve_vl_vsll_vvvl
      {Intrinsic::ve_vl_vsll_vvvmvl, 150604}, // __builtin_ve_vl_vsll_vvvmvl
      {Intrinsic::ve_vl_vsll_vvvvl, 150632}, // __builtin_ve_vl_vsll_vvvvl
      {Intrinsic::ve_vl_vsral_vvsl, 150659}, // __builtin_ve_vl_vsral_vvsl
      {Intrinsic::ve_vl_vsral_vvsmvl, 150686}, // __builtin_ve_vl_vsral_vvsmvl
      {Intrinsic::ve_vl_vsral_vvsvl, 150715}, // __builtin_ve_vl_vsral_vvsvl
      {Intrinsic::ve_vl_vsral_vvvl, 150743}, // __builtin_ve_vl_vsral_vvvl
      {Intrinsic::ve_vl_vsral_vvvmvl, 150770}, // __builtin_ve_vl_vsral_vvvmvl
      {Intrinsic::ve_vl_vsral_vvvvl, 150799}, // __builtin_ve_vl_vsral_vvvvl
      {Intrinsic::ve_vl_vsrawsx_vvsl, 150827}, // __builtin_ve_vl_vsrawsx_vvsl
      {Intrinsic::ve_vl_vsrawsx_vvsmvl, 150856}, // __builtin_ve_vl_vsrawsx_vvsmvl
      {Intrinsic::ve_vl_vsrawsx_vvsvl, 150887}, // __builtin_ve_vl_vsrawsx_vvsvl
      {Intrinsic::ve_vl_vsrawsx_vvvl, 150917}, // __builtin_ve_vl_vsrawsx_vvvl
      {Intrinsic::ve_vl_vsrawsx_vvvmvl, 150946}, // __builtin_ve_vl_vsrawsx_vvvmvl
      {Intrinsic::ve_vl_vsrawsx_vvvvl, 150977}, // __builtin_ve_vl_vsrawsx_vvvvl
      {Intrinsic::ve_vl_vsrawzx_vvsl, 151007}, // __builtin_ve_vl_vsrawzx_vvsl
      {Intrinsic::ve_vl_vsrawzx_vvsmvl, 151036}, // __builtin_ve_vl_vsrawzx_vvsmvl
      {Intrinsic::ve_vl_vsrawzx_vvsvl, 151067}, // __builtin_ve_vl_vsrawzx_vvsvl
      {Intrinsic::ve_vl_vsrawzx_vvvl, 151097}, // __builtin_ve_vl_vsrawzx_vvvl
      {Intrinsic::ve_vl_vsrawzx_vvvmvl, 151126}, // __builtin_ve_vl_vsrawzx_vvvmvl
      {Intrinsic::ve_vl_vsrawzx_vvvvl, 151157}, // __builtin_ve_vl_vsrawzx_vvvvl
      {Intrinsic::ve_vl_vsrl_vvsl, 151187}, // __builtin_ve_vl_vsrl_vvsl
      {Intrinsic::ve_vl_vsrl_vvsmvl, 151213}, // __builtin_ve_vl_vsrl_vvsmvl
      {Intrinsic::ve_vl_vsrl_vvsvl, 151241}, // __builtin_ve_vl_vsrl_vvsvl
      {Intrinsic::ve_vl_vsrl_vvvl, 151268}, // __builtin_ve_vl_vsrl_vvvl
      {Intrinsic::ve_vl_vsrl_vvvmvl, 151294}, // __builtin_ve_vl_vsrl_vvvmvl
      {Intrinsic::ve_vl_vsrl_vvvvl, 151322}, // __builtin_ve_vl_vsrl_vvvvl
      {Intrinsic::ve_vl_vst2d_vssl, 151400}, // __builtin_ve_vl_vst2d_vssl
      {Intrinsic::ve_vl_vst2d_vssml, 151427}, // __builtin_ve_vl_vst2d_vssml
      {Intrinsic::ve_vl_vst2dnc_vssl, 151455}, // __builtin_ve_vl_vst2dnc_vssl
      {Intrinsic::ve_vl_vst2dnc_vssml, 151484}, // __builtin_ve_vl_vst2dnc_vssml
      {Intrinsic::ve_vl_vst2dncot_vssl, 151514}, // __builtin_ve_vl_vst2dncot_vssl
      {Intrinsic::ve_vl_vst2dncot_vssml, 151545}, // __builtin_ve_vl_vst2dncot_vssml
      {Intrinsic::ve_vl_vst2dot_vssl, 151577}, // __builtin_ve_vl_vst2dot_vssl
      {Intrinsic::ve_vl_vst2dot_vssml, 151606}, // __builtin_ve_vl_vst2dot_vssml
      {Intrinsic::ve_vl_vst_vssl, 151349}, // __builtin_ve_vl_vst_vssl
      {Intrinsic::ve_vl_vst_vssml, 151374}, // __builtin_ve_vl_vst_vssml
      {Intrinsic::ve_vl_vstl2d_vssl, 151689}, // __builtin_ve_vl_vstl2d_vssl
      {Intrinsic::ve_vl_vstl2d_vssml, 151717}, // __builtin_ve_vl_vstl2d_vssml
      {Intrinsic::ve_vl_vstl2dnc_vssl, 151746}, // __builtin_ve_vl_vstl2dnc_vssl
      {Intrinsic::ve_vl_vstl2dnc_vssml, 151776}, // __builtin_ve_vl_vstl2dnc_vssml
      {Intrinsic::ve_vl_vstl2dncot_vssl, 151807}, // __builtin_ve_vl_vstl2dncot_vssl
      {Intrinsic::ve_vl_vstl2dncot_vssml, 151839}, // __builtin_ve_vl_vstl2dncot_vssml
      {Intrinsic::ve_vl_vstl2dot_vssl, 151872}, // __builtin_ve_vl_vstl2dot_vssl
      {Intrinsic::ve_vl_vstl2dot_vssml, 151902}, // __builtin_ve_vl_vstl2dot_vssml
      {Intrinsic::ve_vl_vstl_vssl, 151636}, // __builtin_ve_vl_vstl_vssl
      {Intrinsic::ve_vl_vstl_vssml, 151662}, // __builtin_ve_vl_vstl_vssml
      {Intrinsic::ve_vl_vstlnc_vssl, 151933}, // __builtin_ve_vl_vstlnc_vssl
      {Intrinsic::ve_vl_vstlnc_vssml, 151961}, // __builtin_ve_vl_vstlnc_vssml
      {Intrinsic::ve_vl_vstlncot_vssl, 151990}, // __builtin_ve_vl_vstlncot_vssl
      {Intrinsic::ve_vl_vstlncot_vssml, 152020}, // __builtin_ve_vl_vstlncot_vssml
      {Intrinsic::ve_vl_vstlot_vssl, 152051}, // __builtin_ve_vl_vstlot_vssl
      {Intrinsic::ve_vl_vstlot_vssml, 152079}, // __builtin_ve_vl_vstlot_vssml
      {Intrinsic::ve_vl_vstnc_vssl, 152108}, // __builtin_ve_vl_vstnc_vssl
      {Intrinsic::ve_vl_vstnc_vssml, 152135}, // __builtin_ve_vl_vstnc_vssml
      {Intrinsic::ve_vl_vstncot_vssl, 152163}, // __builtin_ve_vl_vstncot_vssl
      {Intrinsic::ve_vl_vstncot_vssml, 152192}, // __builtin_ve_vl_vstncot_vssml
      {Intrinsic::ve_vl_vstot_vssl, 152222}, // __builtin_ve_vl_vstot_vssl
      {Intrinsic::ve_vl_vstot_vssml, 152249}, // __builtin_ve_vl_vstot_vssml
      {Intrinsic::ve_vl_vstu2d_vssl, 152330}, // __builtin_ve_vl_vstu2d_vssl
      {Intrinsic::ve_vl_vstu2d_vssml, 152358}, // __builtin_ve_vl_vstu2d_vssml
      {Intrinsic::ve_vl_vstu2dnc_vssl, 152387}, // __builtin_ve_vl_vstu2dnc_vssl
      {Intrinsic::ve_vl_vstu2dnc_vssml, 152417}, // __builtin_ve_vl_vstu2dnc_vssml
      {Intrinsic::ve_vl_vstu2dncot_vssl, 152448}, // __builtin_ve_vl_vstu2dncot_vssl
      {Intrinsic::ve_vl_vstu2dncot_vssml, 152480}, // __builtin_ve_vl_vstu2dncot_vssml
      {Intrinsic::ve_vl_vstu2dot_vssl, 152513}, // __builtin_ve_vl_vstu2dot_vssl
      {Intrinsic::ve_vl_vstu2dot_vssml, 152543}, // __builtin_ve_vl_vstu2dot_vssml
      {Intrinsic::ve_vl_vstu_vssl, 152277}, // __builtin_ve_vl_vstu_vssl
      {Intrinsic::ve_vl_vstu_vssml, 152303}, // __builtin_ve_vl_vstu_vssml
      {Intrinsic::ve_vl_vstunc_vssl, 152574}, // __builtin_ve_vl_vstunc_vssl
      {Intrinsic::ve_vl_vstunc_vssml, 152602}, // __builtin_ve_vl_vstunc_vssml
      {Intrinsic::ve_vl_vstuncot_vssl, 152631}, // __builtin_ve_vl_vstuncot_vssl
      {Intrinsic::ve_vl_vstuncot_vssml, 152661}, // __builtin_ve_vl_vstuncot_vssml
      {Intrinsic::ve_vl_vstuot_vssl, 152692}, // __builtin_ve_vl_vstuot_vssl
      {Intrinsic::ve_vl_vstuot_vssml, 152720}, // __builtin_ve_vl_vstuot_vssml
      {Intrinsic::ve_vl_vsubsl_vsvl, 152749}, // __builtin_ve_vl_vsubsl_vsvl
      {Intrinsic::ve_vl_vsubsl_vsvmvl, 152777}, // __builtin_ve_vl_vsubsl_vsvmvl
      {Intrinsic::ve_vl_vsubsl_vsvvl, 152807}, // __builtin_ve_vl_vsubsl_vsvvl
      {Intrinsic::ve_vl_vsubsl_vvvl, 152836}, // __builtin_ve_vl_vsubsl_vvvl
      {Intrinsic::ve_vl_vsubsl_vvvmvl, 152864}, // __builtin_ve_vl_vsubsl_vvvmvl
      {Intrinsic::ve_vl_vsubsl_vvvvl, 152894}, // __builtin_ve_vl_vsubsl_vvvvl
      {Intrinsic::ve_vl_vsubswsx_vsvl, 152923}, // __builtin_ve_vl_vsubswsx_vsvl
      {Intrinsic::ve_vl_vsubswsx_vsvmvl, 152953}, // __builtin_ve_vl_vsubswsx_vsvmvl
      {Intrinsic::ve_vl_vsubswsx_vsvvl, 152985}, // __builtin_ve_vl_vsubswsx_vsvvl
      {Intrinsic::ve_vl_vsubswsx_vvvl, 153016}, // __builtin_ve_vl_vsubswsx_vvvl
      {Intrinsic::ve_vl_vsubswsx_vvvmvl, 153046}, // __builtin_ve_vl_vsubswsx_vvvmvl
      {Intrinsic::ve_vl_vsubswsx_vvvvl, 153078}, // __builtin_ve_vl_vsubswsx_vvvvl
      {Intrinsic::ve_vl_vsubswzx_vsvl, 153109}, // __builtin_ve_vl_vsubswzx_vsvl
      {Intrinsic::ve_vl_vsubswzx_vsvmvl, 153139}, // __builtin_ve_vl_vsubswzx_vsvmvl
      {Intrinsic::ve_vl_vsubswzx_vsvvl, 153171}, // __builtin_ve_vl_vsubswzx_vsvvl
      {Intrinsic::ve_vl_vsubswzx_vvvl, 153202}, // __builtin_ve_vl_vsubswzx_vvvl
      {Intrinsic::ve_vl_vsubswzx_vvvmvl, 153232}, // __builtin_ve_vl_vsubswzx_vvvmvl
      {Intrinsic::ve_vl_vsubswzx_vvvvl, 153264}, // __builtin_ve_vl_vsubswzx_vvvvl
      {Intrinsic::ve_vl_vsubul_vsvl, 153295}, // __builtin_ve_vl_vsubul_vsvl
      {Intrinsic::ve_vl_vsubul_vsvmvl, 153323}, // __builtin_ve_vl_vsubul_vsvmvl
      {Intrinsic::ve_vl_vsubul_vsvvl, 153353}, // __builtin_ve_vl_vsubul_vsvvl
      {Intrinsic::ve_vl_vsubul_vvvl, 153382}, // __builtin_ve_vl_vsubul_vvvl
      {Intrinsic::ve_vl_vsubul_vvvmvl, 153410}, // __builtin_ve_vl_vsubul_vvvmvl
      {Intrinsic::ve_vl_vsubul_vvvvl, 153440}, // __builtin_ve_vl_vsubul_vvvvl
      {Intrinsic::ve_vl_vsubuw_vsvl, 153469}, // __builtin_ve_vl_vsubuw_vsvl
      {Intrinsic::ve_vl_vsubuw_vsvmvl, 153497}, // __builtin_ve_vl_vsubuw_vsvmvl
      {Intrinsic::ve_vl_vsubuw_vsvvl, 153527}, // __builtin_ve_vl_vsubuw_vsvvl
      {Intrinsic::ve_vl_vsubuw_vvvl, 153556}, // __builtin_ve_vl_vsubuw_vvvl
      {Intrinsic::ve_vl_vsubuw_vvvmvl, 153584}, // __builtin_ve_vl_vsubuw_vvvmvl
      {Intrinsic::ve_vl_vsubuw_vvvvl, 153614}, // __builtin_ve_vl_vsubuw_vvvvl
      {Intrinsic::ve_vl_vsuml_vvl, 153643}, // __builtin_ve_vl_vsuml_vvl
      {Intrinsic::ve_vl_vsuml_vvml, 153669}, // __builtin_ve_vl_vsuml_vvml
      {Intrinsic::ve_vl_vsumwsx_vvl, 153696}, // __builtin_ve_vl_vsumwsx_vvl
      {Intrinsic::ve_vl_vsumwsx_vvml, 153724}, // __builtin_ve_vl_vsumwsx_vvml
      {Intrinsic::ve_vl_vsumwzx_vvl, 153753}, // __builtin_ve_vl_vsumwzx_vvl
      {Intrinsic::ve_vl_vsumwzx_vvml, 153781}, // __builtin_ve_vl_vsumwzx_vvml
      {Intrinsic::ve_vl_vxor_vsvl, 153810}, // __builtin_ve_vl_vxor_vsvl
      {Intrinsic::ve_vl_vxor_vsvmvl, 153836}, // __builtin_ve_vl_vxor_vsvmvl
      {Intrinsic::ve_vl_vxor_vsvvl, 153864}, // __builtin_ve_vl_vxor_vsvvl
      {Intrinsic::ve_vl_vxor_vvvl, 153891}, // __builtin_ve_vl_vxor_vvvl
      {Intrinsic::ve_vl_vxor_vvvmvl, 153917}, // __builtin_ve_vl_vxor_vvvmvl
      {Intrinsic::ve_vl_vxor_vvvvl, 153945}, // __builtin_ve_vl_vxor_vvvvl
      {Intrinsic::ve_vl_xorm_MMM, 153972}, // __builtin_ve_vl_xorm_MMM
      {Intrinsic::ve_vl_xorm_mmm, 153997}, // __builtin_ve_vl_xorm_mmm
    };
    auto I = std::lower_bound(std::begin(veNames),
                              std::end(veNames),
                              BuiltinNameStr);
    if (I != std::end(veNames) &&
        I->getName() == BuiltinNameStr)
      return I->IntrinID;
  }
  if (TargetPrefix == "x86") {
    static const BuiltinEntry x86Names[] = {
      {Intrinsic::x86_aadd32, 154530}, // __builtin_ia32_aadd32
      {Intrinsic::x86_aadd64, 154552}, // __builtin_ia32_aadd64
      {Intrinsic::x86_aand32, 154574}, // __builtin_ia32_aand32
      {Intrinsic::x86_aand64, 154596}, // __builtin_ia32_aand64
      {Intrinsic::x86_avx512_add_pd_512, 159111}, // __builtin_ia32_addpd512
      {Intrinsic::x86_avx512fp16_add_ph_512, 172504}, // __builtin_ia32_addph512
      {Intrinsic::x86_avx512_add_ps_512, 159135}, // __builtin_ia32_addps512
      {Intrinsic::x86_avx512_mask_add_sd_round, 160209}, // __builtin_ia32_addsd_round_mask
      {Intrinsic::x86_avx512fp16_mask_add_sh_round, 172552}, // __builtin_ia32_addsh_round_mask
      {Intrinsic::x86_avx512_mask_add_ss_round, 160241}, // __builtin_ia32_addss_round_mask
      {Intrinsic::x86_sse3_addsub_pd, 182775}, // __builtin_ia32_addsubpd
      {Intrinsic::x86_avx_addsub_pd_256, 155043}, // __builtin_ia32_addsubpd256
      {Intrinsic::x86_sse3_addsub_ps, 182799}, // __builtin_ia32_addsubps
      {Intrinsic::x86_avx_addsub_ps_256, 155070}, // __builtin_ia32_addsubps256
      {Intrinsic::x86_aesni_aesdec, 154618}, // __builtin_ia32_aesdec128
      {Intrinsic::x86_aesni_aesdec_256, 154643}, // __builtin_ia32_aesdec256
      {Intrinsic::x86_aesni_aesdec_512, 154668}, // __builtin_ia32_aesdec512
      {Intrinsic::x86_aesni_aesdeclast, 154693}, // __builtin_ia32_aesdeclast128
      {Intrinsic::x86_aesni_aesdeclast_256, 154722}, // __builtin_ia32_aesdeclast256
      {Intrinsic::x86_aesni_aesdeclast_512, 154751}, // __builtin_ia32_aesdeclast512
      {Intrinsic::x86_aesni_aesenc, 154780}, // __builtin_ia32_aesenc128
      {Intrinsic::x86_aesni_aesenc_256, 154805}, // __builtin_ia32_aesenc256
      {Intrinsic::x86_aesni_aesenc_512, 154830}, // __builtin_ia32_aesenc512
      {Intrinsic::x86_aesni_aesenclast, 154855}, // __builtin_ia32_aesenclast128
      {Intrinsic::x86_aesni_aesenclast_256, 154884}, // __builtin_ia32_aesenclast256
      {Intrinsic::x86_aesni_aesenclast_512, 154913}, // __builtin_ia32_aesenclast512
      {Intrinsic::x86_aesni_aesimc, 154942}, // __builtin_ia32_aesimc128
      {Intrinsic::x86_aesni_aeskeygenassist, 154967}, // __builtin_ia32_aeskeygenassist128
      {Intrinsic::x86_aor32, 155001}, // __builtin_ia32_aor32
      {Intrinsic::x86_aor64, 155022}, // __builtin_ia32_aor64
      {Intrinsic::x86_axor32, 177046}, // __builtin_ia32_axor32
      {Intrinsic::x86_axor64, 177068}, // __builtin_ia32_axor64
      {Intrinsic::x86_bmi_bextr_32, 177090}, // __builtin_ia32_bextr_u32
      {Intrinsic::x86_bmi_bextr_64, 177115}, // __builtin_ia32_bextr_u64
      {Intrinsic::x86_tbm_bextri_u32, 184646}, // __builtin_ia32_bextri_u32
      {Intrinsic::x86_tbm_bextri_u64, 184672}, // __builtin_ia32_bextri_u64
      {Intrinsic::x86_sse41_blendvpd, 182976}, // __builtin_ia32_blendvpd
      {Intrinsic::x86_avx_blendv_pd_256, 155097}, // __builtin_ia32_blendvpd256
      {Intrinsic::x86_sse41_blendvps, 183000}, // __builtin_ia32_blendvps
      {Intrinsic::x86_avx_blendv_ps_256, 155124}, // __builtin_ia32_blendvps256
      {Intrinsic::x86_avx512_broadcastmb_128, 159159}, // __builtin_ia32_broadcastmb128
      {Intrinsic::x86_avx512_broadcastmb_256, 159189}, // __builtin_ia32_broadcastmb256
      {Intrinsic::x86_avx512_broadcastmb_512, 159219}, // __builtin_ia32_broadcastmb512
      {Intrinsic::x86_avx512_broadcastmw_128, 159249}, // __builtin_ia32_broadcastmw128
      {Intrinsic::x86_avx512_broadcastmw_256, 159279}, // __builtin_ia32_broadcastmw256
      {Intrinsic::x86_avx512_broadcastmw_512, 159309}, // __builtin_ia32_broadcastmw512
      {Intrinsic::x86_bmi_bzhi_64, 177163}, // __builtin_ia32_bzhi_di
      {Intrinsic::x86_bmi_bzhi_32, 177140}, // __builtin_ia32_bzhi_si
      {Intrinsic::x86_cldemote, 177278}, // __builtin_ia32_cldemote
      {Intrinsic::x86_sse2_clflush, 181338}, // __builtin_ia32_clflush
      {Intrinsic::x86_clflushopt, 177302}, // __builtin_ia32_clflushopt
      {Intrinsic::x86_clrssbsy, 177328}, // __builtin_ia32_clrssbsy
      {Intrinsic::x86_clui, 177352}, // __builtin_ia32_clui
      {Intrinsic::x86_clwb, 177372}, // __builtin_ia32_clwb
      {Intrinsic::x86_clzero, 177392}, // __builtin_ia32_clzero
      {Intrinsic::x86_cmpccxadd32, 177414}, // __builtin_ia32_cmpccxadd32
      {Intrinsic::x86_cmpccxadd64, 177441}, // __builtin_ia32_cmpccxadd64
      {Intrinsic::x86_sse2_cmp_sd, 181361}, // __builtin_ia32_cmpsd
      {Intrinsic::x86_avx512_mask_cmp_sd, 160273}, // __builtin_ia32_cmpsd_mask
      {Intrinsic::x86_avx512fp16_mask_cmp_sh, 172584}, // __builtin_ia32_cmpsh_mask
      {Intrinsic::x86_sse_cmp_ss, 180557}, // __builtin_ia32_cmpss
      {Intrinsic::x86_avx512_mask_cmp_ss, 160299}, // __builtin_ia32_cmpss_mask
      {Intrinsic::x86_sse_comieq_ss, 180578}, // __builtin_ia32_comieq
      {Intrinsic::x86_sse_comige_ss, 180600}, // __builtin_ia32_comige
      {Intrinsic::x86_sse_comigt_ss, 180622}, // __builtin_ia32_comigt
      {Intrinsic::x86_sse_comile_ss, 180644}, // __builtin_ia32_comile
      {Intrinsic::x86_sse_comilt_ss, 180666}, // __builtin_ia32_comilt
      {Intrinsic::x86_sse_comineq_ss, 180688}, // __builtin_ia32_comineq
      {Intrinsic::x86_sse2_comieq_sd, 181382}, // __builtin_ia32_comisdeq
      {Intrinsic::x86_sse2_comige_sd, 181406}, // __builtin_ia32_comisdge
      {Intrinsic::x86_sse2_comigt_sd, 181430}, // __builtin_ia32_comisdgt
      {Intrinsic::x86_sse2_comile_sd, 181454}, // __builtin_ia32_comisdle
      {Intrinsic::x86_sse2_comilt_sd, 181478}, // __builtin_ia32_comisdlt
      {Intrinsic::x86_sse2_comineq_sd, 181502}, // __builtin_ia32_comisdneq
      {Intrinsic::x86_sse42_crc32_64_64, 183438}, // __builtin_ia32_crc32di
      {Intrinsic::x86_sse42_crc32_32_16, 183369}, // __builtin_ia32_crc32hi
      {Intrinsic::x86_sse42_crc32_32_8, 183415}, // __builtin_ia32_crc32qi
      {Intrinsic::x86_sse42_crc32_32_32, 183392}, // __builtin_ia32_crc32si
      {Intrinsic::x86_avx512bf16_cvtne2ps2bf16_128, 172257}, // __builtin_ia32_cvtne2ps2bf16_128
      {Intrinsic::x86_avx512bf16_cvtne2ps2bf16_256, 172290}, // __builtin_ia32_cvtne2ps2bf16_256
      {Intrinsic::x86_avx512bf16_cvtne2ps2bf16_512, 172323}, // __builtin_ia32_cvtne2ps2bf16_512
      {Intrinsic::x86_avx512bf16_cvtneps2bf16_256, 172356}, // __builtin_ia32_cvtneps2bf16_256
      {Intrinsic::x86_avx512bf16_cvtneps2bf16_512, 172388}, // __builtin_ia32_cvtneps2bf16_512
      {Intrinsic::x86_sse2_cvtpd2dq, 181527}, // __builtin_ia32_cvtpd2dq
      {Intrinsic::x86_avx512_mask_cvtpd2dq_128, 160325}, // __builtin_ia32_cvtpd2dq128_mask
      {Intrinsic::x86_avx_cvt_pd2dq_256, 155178}, // __builtin_ia32_cvtpd2dq256
      {Intrinsic::x86_avx512_mask_cvtpd2dq_512, 160357}, // __builtin_ia32_cvtpd2dq512_mask
      {Intrinsic::x86_sse_cvtpd2pi, 180711}, // __builtin_ia32_cvtpd2pi
      {Intrinsic::x86_sse2_cvtpd2ps, 181551}, // __builtin_ia32_cvtpd2ps
      {Intrinsic::x86_avx_cvt_pd2_ps_256, 155151}, // __builtin_ia32_cvtpd2ps256
      {Intrinsic::x86_avx512_mask_cvtpd2ps_512, 160418}, // __builtin_ia32_cvtpd2ps512_mask
      {Intrinsic::x86_avx512_mask_cvtpd2ps, 160389}, // __builtin_ia32_cvtpd2ps_mask
      {Intrinsic::x86_avx512_mask_cvtpd2qq_128, 160450}, // __builtin_ia32_cvtpd2qq128_mask
      {Intrinsic::x86_avx512_mask_cvtpd2qq_256, 160482}, // __builtin_ia32_cvtpd2qq256_mask
      {Intrinsic::x86_avx512_mask_cvtpd2qq_512, 160514}, // __builtin_ia32_cvtpd2qq512_mask
      {Intrinsic::x86_avx512_mask_cvtpd2udq_128, 160546}, // __builtin_ia32_cvtpd2udq128_mask
      {Intrinsic::x86_avx512_mask_cvtpd2udq_256, 160579}, // __builtin_ia32_cvtpd2udq256_mask
      {Intrinsic::x86_avx512_mask_cvtpd2udq_512, 160612}, // __builtin_ia32_cvtpd2udq512_mask
      {Intrinsic::x86_avx512_mask_cvtpd2uqq_128, 160645}, // __builtin_ia32_cvtpd2uqq128_mask
      {Intrinsic::x86_avx512_mask_cvtpd2uqq_256, 160678}, // __builtin_ia32_cvtpd2uqq256_mask
      {Intrinsic::x86_avx512_mask_cvtpd2uqq_512, 160711}, // __builtin_ia32_cvtpd2uqq512_mask
      {Intrinsic::x86_sse_cvtpi2pd, 180735}, // __builtin_ia32_cvtpi2pd
      {Intrinsic::x86_sse_cvtpi2ps, 180759}, // __builtin_ia32_cvtpi2ps
      {Intrinsic::x86_sse2_cvtps2dq, 181575}, // __builtin_ia32_cvtps2dq
      {Intrinsic::x86_avx512_mask_cvtps2dq_128, 160744}, // __builtin_ia32_cvtps2dq128_mask
      {Intrinsic::x86_avx_cvt_ps2dq_256, 155205}, // __builtin_ia32_cvtps2dq256
      {Intrinsic::x86_avx512_mask_cvtps2dq_256, 160776}, // __builtin_ia32_cvtps2dq256_mask
      {Intrinsic::x86_avx512_mask_cvtps2dq_512, 160808}, // __builtin_ia32_cvtps2dq512_mask
      {Intrinsic::x86_avx512_mask_cvtps2pd_512, 160840}, // __builtin_ia32_cvtps2pd512_mask
      {Intrinsic::x86_sse_cvtps2pi, 180783}, // __builtin_ia32_cvtps2pi
      {Intrinsic::x86_avx512_mask_cvtps2qq_128, 160872}, // __builtin_ia32_cvtps2qq128_mask
      {Intrinsic::x86_avx512_mask_cvtps2qq_256, 160904}, // __builtin_ia32_cvtps2qq256_mask
      {Intrinsic::x86_avx512_mask_cvtps2qq_512, 160936}, // __builtin_ia32_cvtps2qq512_mask
      {Intrinsic::x86_avx512_mask_cvtps2udq_128, 160968}, // __builtin_ia32_cvtps2udq128_mask
      {Intrinsic::x86_avx512_mask_cvtps2udq_256, 161001}, // __builtin_ia32_cvtps2udq256_mask
      {Intrinsic::x86_avx512_mask_cvtps2udq_512, 161034}, // __builtin_ia32_cvtps2udq512_mask
      {Intrinsic::x86_avx512_mask_cvtps2uqq_128, 161067}, // __builtin_ia32_cvtps2uqq128_mask
      {Intrinsic::x86_avx512_mask_cvtps2uqq_256, 161100}, // __builtin_ia32_cvtps2uqq256_mask
      {Intrinsic::x86_avx512_mask_cvtps2uqq_512, 161133}, // __builtin_ia32_cvtps2uqq512_mask
      {Intrinsic::x86_avx512_mask_cvtqq2ps_128, 161166}, // __builtin_ia32_cvtqq2ps128_mask
      {Intrinsic::x86_sse2_cvtsd2si, 181599}, // __builtin_ia32_cvtsd2si
      {Intrinsic::x86_sse2_cvtsd2si64, 181623}, // __builtin_ia32_cvtsd2si64
      {Intrinsic::x86_sse2_cvtsd2ss, 181649}, // __builtin_ia32_cvtsd2ss
      {Intrinsic::x86_avx512_mask_cvtsd2ss_round, 161198}, // __builtin_ia32_cvtsd2ss_round_mask
      {Intrinsic::x86_avx512_cvtsi2sd64, 159531}, // __builtin_ia32_cvtsi2sd64
      {Intrinsic::x86_avx512_cvtsi2ss32, 159557}, // __builtin_ia32_cvtsi2ss32
      {Intrinsic::x86_avx512_cvtsi2ss64, 159583}, // __builtin_ia32_cvtsi2ss64
      {Intrinsic::x86_avx512_mask_cvtss2sd_round, 161233}, // __builtin_ia32_cvtss2sd_round_mask
      {Intrinsic::x86_sse_cvtss2si, 180807}, // __builtin_ia32_cvtss2si
      {Intrinsic::x86_sse_cvtss2si64, 180831}, // __builtin_ia32_cvtss2si64
      {Intrinsic::x86_sse2_cvttpd2dq, 181673}, // __builtin_ia32_cvttpd2dq
      {Intrinsic::x86_avx512_mask_cvttpd2dq_128, 161268}, // __builtin_ia32_cvttpd2dq128_mask
      {Intrinsic::x86_avx_cvtt_pd2dq_256, 155232}, // __builtin_ia32_cvttpd2dq256
      {Intrinsic::x86_avx512_mask_cvttpd2dq_512, 161301}, // __builtin_ia32_cvttpd2dq512_mask
      {Intrinsic::x86_sse_cvttpd2pi, 180857}, // __builtin_ia32_cvttpd2pi
      {Intrinsic::x86_avx512_mask_cvttpd2qq_128, 161334}, // __builtin_ia32_cvttpd2qq128_mask
      {Intrinsic::x86_avx512_mask_cvttpd2qq_256, 161367}, // __builtin_ia32_cvttpd2qq256_mask
      {Intrinsic::x86_avx512_mask_cvttpd2qq_512, 161400}, // __builtin_ia32_cvttpd2qq512_mask
      {Intrinsic::x86_avx512_mask_cvttpd2udq_128, 161433}, // __builtin_ia32_cvttpd2udq128_mask
      {Intrinsic::x86_avx512_mask_cvttpd2udq_256, 161467}, // __builtin_ia32_cvttpd2udq256_mask
      {Intrinsic::x86_avx512_mask_cvttpd2udq_512, 161501}, // __builtin_ia32_cvttpd2udq512_mask
      {Intrinsic::x86_avx512_mask_cvttpd2uqq_128, 161535}, // __builtin_ia32_cvttpd2uqq128_mask
      {Intrinsic::x86_avx512_mask_cvttpd2uqq_256, 161569}, // __builtin_ia32_cvttpd2uqq256_mask
      {Intrinsic::x86_avx512_mask_cvttpd2uqq_512, 161603}, // __builtin_ia32_cvttpd2uqq512_mask
      {Intrinsic::x86_sse2_cvttps2dq, 181698}, // __builtin_ia32_cvttps2dq
      {Intrinsic::x86_avx_cvtt_ps2dq_256, 155260}, // __builtin_ia32_cvttps2dq256
      {Intrinsic::x86_avx512_mask_cvttps2dq_512, 161637}, // __builtin_ia32_cvttps2dq512_mask
      {Intrinsic::x86_sse_cvttps2pi, 180882}, // __builtin_ia32_cvttps2pi
      {Intrinsic::x86_avx512_mask_cvttps2qq_128, 161670}, // __builtin_ia32_cvttps2qq128_mask
      {Intrinsic::x86_avx512_mask_cvttps2qq_256, 161703}, // __builtin_ia32_cvttps2qq256_mask
      {Intrinsic::x86_avx512_mask_cvttps2qq_512, 161736}, // __builtin_ia32_cvttps2qq512_mask
      {Intrinsic::x86_avx512_mask_cvttps2udq_128, 161769}, // __builtin_ia32_cvttps2udq128_mask
      {Intrinsic::x86_avx512_mask_cvttps2udq_256, 161803}, // __builtin_ia32_cvttps2udq256_mask
      {Intrinsic::x86_avx512_mask_cvttps2udq_512, 161837}, // __builtin_ia32_cvttps2udq512_mask
      {Intrinsic::x86_avx512_mask_cvttps2uqq_128, 161871}, // __builtin_ia32_cvttps2uqq128_mask
      {Intrinsic::x86_avx512_mask_cvttps2uqq_256, 161905}, // __builtin_ia32_cvttps2uqq256_mask
      {Intrinsic::x86_avx512_mask_cvttps2uqq_512, 161939}, // __builtin_ia32_cvttps2uqq512_mask
      {Intrinsic::x86_sse2_cvttsd2si, 181723}, // __builtin_ia32_cvttsd2si
      {Intrinsic::x86_sse2_cvttsd2si64, 181748}, // __builtin_ia32_cvttsd2si64
      {Intrinsic::x86_sse_cvttss2si, 180907}, // __builtin_ia32_cvttss2si
      {Intrinsic::x86_sse_cvttss2si64, 180932}, // __builtin_ia32_cvttss2si64
      {Intrinsic::x86_avx512_mask_cvtuqq2ps_128, 161973}, // __builtin_ia32_cvtuqq2ps128_mask
      {Intrinsic::x86_avx512_cvtusi642sd, 159864}, // __builtin_ia32_cvtusi2sd64
      {Intrinsic::x86_avx512_cvtusi2ss, 159837}, // __builtin_ia32_cvtusi2ss32
      {Intrinsic::x86_avx512_cvtusi642ss, 159891}, // __builtin_ia32_cvtusi2ss64
      {Intrinsic::x86_avx512_dbpsadbw_128, 159918}, // __builtin_ia32_dbpsadbw128
      {Intrinsic::x86_avx512_dbpsadbw_256, 159945}, // __builtin_ia32_dbpsadbw256
      {Intrinsic::x86_avx512_dbpsadbw_512, 159972}, // __builtin_ia32_dbpsadbw512
      {Intrinsic::x86_directstore32, 177468}, // __builtin_ia32_directstore_u32
      {Intrinsic::x86_directstore64, 177499}, // __builtin_ia32_directstore_u64
      {Intrinsic::x86_avx512_div_pd_512, 159999}, // __builtin_ia32_divpd512
      {Intrinsic::x86_avx512fp16_div_ph_512, 172528}, // __builtin_ia32_divph512
      {Intrinsic::x86_avx512_div_ps_512, 160023}, // __builtin_ia32_divps512
      {Intrinsic::x86_avx512_mask_div_sd_round, 162006}, // __builtin_ia32_divsd_round_mask
      {Intrinsic::x86_avx512fp16_mask_div_sh_round, 172610}, // __builtin_ia32_divsh_round_mask
      {Intrinsic::x86_avx512_mask_div_ss_round, 162038}, // __builtin_ia32_divss_round_mask
      {Intrinsic::x86_avx512bf16_dpbf16ps_128, 172420}, // __builtin_ia32_dpbf16ps_128
      {Intrinsic::x86_avx512bf16_dpbf16ps_256, 172448}, // __builtin_ia32_dpbf16ps_256
      {Intrinsic::x86_avx512bf16_dpbf16ps_512, 172476}, // __builtin_ia32_dpbf16ps_512
      {Intrinsic::x86_sse41_dppd, 183024}, // __builtin_ia32_dppd
      {Intrinsic::x86_sse41_dpps, 183044}, // __builtin_ia32_dpps
      {Intrinsic::x86_avx_dp_ps_256, 155288}, // __builtin_ia32_dpps256
      {Intrinsic::x86_mmx_emms, 178188}, // __builtin_ia32_emms
      {Intrinsic::x86_enqcmd, 177530}, // __builtin_ia32_enqcmd
      {Intrinsic::x86_enqcmds, 177552}, // __builtin_ia32_enqcmds
      {Intrinsic::x86_avx512_exp2_pd, 160047}, // __builtin_ia32_exp2pd_mask
      {Intrinsic::x86_avx512_exp2_ps, 160074}, // __builtin_ia32_exp2ps_mask
      {Intrinsic::x86_sse4a_extrq, 183863}, // __builtin_ia32_extrq
      {Intrinsic::x86_sse4a_extrqi, 183884}, // __builtin_ia32_extrqi
      {Intrinsic::x86_mmx_femms, 178208}, // __builtin_ia32_femms
      {Intrinsic::x86_avx512_mask_fixupimm_pd_128, 162070}, // __builtin_ia32_fixupimmpd128_mask
      {Intrinsic::x86_avx512_maskz_fixupimm_pd_128, 167634}, // __builtin_ia32_fixupimmpd128_maskz
      {Intrinsic::x86_avx512_mask_fixupimm_pd_256, 162104}, // __builtin_ia32_fixupimmpd256_mask
      {Intrinsic::x86_avx512_maskz_fixupimm_pd_256, 167669}, // __builtin_ia32_fixupimmpd256_maskz
      {Intrinsic::x86_avx512_mask_fixupimm_pd_512, 162138}, // __builtin_ia32_fixupimmpd512_mask
      {Intrinsic::x86_avx512_maskz_fixupimm_pd_512, 167704}, // __builtin_ia32_fixupimmpd512_maskz
      {Intrinsic::x86_avx512_mask_fixupimm_ps_128, 162172}, // __builtin_ia32_fixupimmps128_mask
      {Intrinsic::x86_avx512_maskz_fixupimm_ps_128, 167739}, // __builtin_ia32_fixupimmps128_maskz
      {Intrinsic::x86_avx512_mask_fixupimm_ps_256, 162206}, // __builtin_ia32_fixupimmps256_mask
      {Intrinsic::x86_avx512_maskz_fixupimm_ps_256, 167774}, // __builtin_ia32_fixupimmps256_maskz
      {Intrinsic::x86_avx512_mask_fixupimm_ps_512, 162240}, // __builtin_ia32_fixupimmps512_mask
      {Intrinsic::x86_avx512_maskz_fixupimm_ps_512, 167809}, // __builtin_ia32_fixupimmps512_maskz
      {Intrinsic::x86_avx512_mask_fixupimm_sd, 162274}, // __builtin_ia32_fixupimmsd_mask
      {Intrinsic::x86_avx512_maskz_fixupimm_sd, 167844}, // __builtin_ia32_fixupimmsd_maskz
      {Intrinsic::x86_avx512_mask_fixupimm_ss, 162305}, // __builtin_ia32_fixupimmss_mask
      {Intrinsic::x86_avx512_maskz_fixupimm_ss, 167876}, // __builtin_ia32_fixupimmss_maskz
      {Intrinsic::x86_avx512_mask_fpclass_sd, 162336}, // __builtin_ia32_fpclasssd_mask
      {Intrinsic::x86_avx512fp16_mask_fpclass_sh, 172642}, // __builtin_ia32_fpclasssh_mask
      {Intrinsic::x86_avx512_mask_fpclass_ss, 162366}, // __builtin_ia32_fpclassss_mask
      {Intrinsic::x86_fxrstor, 177811}, // __builtin_ia32_fxrstor
      {Intrinsic::x86_fxrstor64, 177834}, // __builtin_ia32_fxrstor64
      {Intrinsic::x86_fxsave, 177859}, // __builtin_ia32_fxsave
      {Intrinsic::x86_fxsave64, 177881}, // __builtin_ia32_fxsave64
      {Intrinsic::x86_avx2_gather_d_d, 156470}, // __builtin_ia32_gatherd_d
      {Intrinsic::x86_avx2_gather_d_d_256, 156495}, // __builtin_ia32_gatherd_d256
      {Intrinsic::x86_avx2_gather_d_pd, 156523}, // __builtin_ia32_gatherd_pd
      {Intrinsic::x86_avx2_gather_d_pd_256, 156549}, // __builtin_ia32_gatherd_pd256
      {Intrinsic::x86_avx2_gather_d_ps, 156578}, // __builtin_ia32_gatherd_ps
      {Intrinsic::x86_avx2_gather_d_ps_256, 156604}, // __builtin_ia32_gatherd_ps256
      {Intrinsic::x86_avx2_gather_d_q, 156633}, // __builtin_ia32_gatherd_q
      {Intrinsic::x86_avx2_gather_d_q_256, 156658}, // __builtin_ia32_gatherd_q256
      {Intrinsic::x86_avx512_gatherpf_dpd_512, 160101}, // __builtin_ia32_gatherpfdpd
      {Intrinsic::x86_avx512_gatherpf_dps_512, 160128}, // __builtin_ia32_gatherpfdps
      {Intrinsic::x86_avx512_gatherpf_qpd_512, 160155}, // __builtin_ia32_gatherpfqpd
      {Intrinsic::x86_avx512_gatherpf_qps_512, 160182}, // __builtin_ia32_gatherpfqps
      {Intrinsic::x86_avx2_gather_q_d, 156686}, // __builtin_ia32_gatherq_d
      {Intrinsic::x86_avx2_gather_q_d_256, 156711}, // __builtin_ia32_gatherq_d256
      {Intrinsic::x86_avx2_gather_q_pd, 156739}, // __builtin_ia32_gatherq_pd
      {Intrinsic::x86_avx2_gather_q_pd_256, 156765}, // __builtin_ia32_gatherq_pd256
      {Intrinsic::x86_avx2_gather_q_ps, 156794}, // __builtin_ia32_gatherq_ps
      {Intrinsic::x86_avx2_gather_q_ps_256, 156820}, // __builtin_ia32_gatherq_ps256
      {Intrinsic::x86_avx2_gather_q_q, 156849}, // __builtin_ia32_gatherq_q
      {Intrinsic::x86_avx2_gather_q_q_256, 156874}, // __builtin_ia32_gatherq_q256
      {Intrinsic::x86_avx512_mask_getexp_pd_128, 162396}, // __builtin_ia32_getexppd128_mask
      {Intrinsic::x86_avx512_mask_getexp_pd_256, 162428}, // __builtin_ia32_getexppd256_mask
      {Intrinsic::x86_avx512_mask_getexp_pd_512, 162460}, // __builtin_ia32_getexppd512_mask
      {Intrinsic::x86_avx512fp16_mask_getexp_ph_128, 172672}, // __builtin_ia32_getexpph128_mask
      {Intrinsic::x86_avx512fp16_mask_getexp_ph_256, 172704}, // __builtin_ia32_getexpph256_mask
      {Intrinsic::x86_avx512fp16_mask_getexp_ph_512, 172736}, // __builtin_ia32_getexpph512_mask
      {Intrinsic::x86_avx512_mask_getexp_ps_128, 162492}, // __builtin_ia32_getexpps128_mask
      {Intrinsic::x86_avx512_mask_getexp_ps_256, 162524}, // __builtin_ia32_getexpps256_mask
      {Intrinsic::x86_avx512_mask_getexp_ps_512, 162556}, // __builtin_ia32_getexpps512_mask
      {Intrinsic::x86_avx512_mask_getexp_sd, 162588}, // __builtin_ia32_getexpsd128_round_mask
      {Intrinsic::x86_avx512fp16_mask_getexp_sh, 172768}, // __builtin_ia32_getexpsh128_round_mask
      {Intrinsic::x86_avx512_mask_getexp_ss, 162626}, // __builtin_ia32_getexpss128_round_mask
      {Intrinsic::x86_avx512_mask_getmant_pd_128, 162664}, // __builtin_ia32_getmantpd128_mask
      {Intrinsic::x86_avx512_mask_getmant_pd_256, 162697}, // __builtin_ia32_getmantpd256_mask
      {Intrinsic::x86_avx512_mask_getmant_pd_512, 162730}, // __builtin_ia32_getmantpd512_mask
      {Intrinsic::x86_avx512fp16_mask_getmant_ph_128, 172806}, // __builtin_ia32_getmantph128_mask
      {Intrinsic::x86_avx512fp16_mask_getmant_ph_256, 172839}, // __builtin_ia32_getmantph256_mask
      {Intrinsic::x86_avx512fp16_mask_getmant_ph_512, 172872}, // __builtin_ia32_getmantph512_mask
      {Intrinsic::x86_avx512_mask_getmant_ps_128, 162763}, // __builtin_ia32_getmantps128_mask
      {Intrinsic::x86_avx512_mask_getmant_ps_256, 162796}, // __builtin_ia32_getmantps256_mask
      {Intrinsic::x86_avx512_mask_getmant_ps_512, 162829}, // __builtin_ia32_getmantps512_mask
      {Intrinsic::x86_avx512_mask_getmant_sd, 162862}, // __builtin_ia32_getmantsd_round_mask
      {Intrinsic::x86_avx512fp16_mask_getmant_sh, 172905}, // __builtin_ia32_getmantsh_round_mask
      {Intrinsic::x86_avx512_mask_getmant_ss, 162898}, // __builtin_ia32_getmantss_round_mask
      {Intrinsic::x86_sse3_hadd_pd, 182823}, // __builtin_ia32_haddpd
      {Intrinsic::x86_avx_hadd_pd_256, 155311}, // __builtin_ia32_haddpd256
      {Intrinsic::x86_sse3_hadd_ps, 182845}, // __builtin_ia32_haddps
      {Intrinsic::x86_avx_hadd_ps_256, 155336}, // __builtin_ia32_haddps256
      {Intrinsic::x86_sse3_hsub_pd, 182867}, // __builtin_ia32_hsubpd
      {Intrinsic::x86_avx_hsub_pd_256, 155361}, // __builtin_ia32_hsubpd256
      {Intrinsic::x86_sse3_hsub_ps, 182889}, // __builtin_ia32_hsubps
      {Intrinsic::x86_avx_hsub_ps_256, 155386}, // __builtin_ia32_hsubps256
      {Intrinsic::x86_incsspd, 177905}, // __builtin_ia32_incsspd
      {Intrinsic::x86_incsspq, 177928}, // __builtin_ia32_incsspq
      {Intrinsic::x86_sse41_insertps, 183064}, // __builtin_ia32_insertps128
      {Intrinsic::x86_sse4a_insertq, 183906}, // __builtin_ia32_insertq
      {Intrinsic::x86_sse4a_insertqi, 183929}, // __builtin_ia32_insertqi
      {Intrinsic::x86_invpcid, 177951}, // __builtin_ia32_invpcid
      {Intrinsic::x86_sse3_ldu_dq, 182911}, // __builtin_ia32_lddqu
      {Intrinsic::x86_avx_ldu_dq_256, 155411}, // __builtin_ia32_lddqu256
      {Intrinsic::x86_sse2_lfence, 181775}, // __builtin_ia32_lfence
      {Intrinsic::x86_llwpcb, 178045}, // __builtin_ia32_llwpcb
      {Intrinsic::x86_loadiwkey, 178067}, // __builtin_ia32_loadiwkey
      {Intrinsic::x86_lwpins32, 178092}, // __builtin_ia32_lwpins32
      {Intrinsic::x86_lwpins64, 178116}, // __builtin_ia32_lwpins64
      {Intrinsic::x86_lwpval32, 178140}, // __builtin_ia32_lwpval32
      {Intrinsic::x86_lwpval64, 178164}, // __builtin_ia32_lwpval64
      {Intrinsic::x86_avx2_maskload_d, 156902}, // __builtin_ia32_maskloadd
      {Intrinsic::x86_avx2_maskload_d_256, 156927}, // __builtin_ia32_maskloadd256
      {Intrinsic::x86_avx_maskload_pd, 155435}, // __builtin_ia32_maskloadpd
      {Intrinsic::x86_avx_maskload_pd_256, 155461}, // __builtin_ia32_maskloadpd256
      {Intrinsic::x86_avx_maskload_ps, 155490}, // __builtin_ia32_maskloadps
      {Intrinsic::x86_avx_maskload_ps_256, 155516}, // __builtin_ia32_maskloadps256
      {Intrinsic::x86_avx2_maskload_q, 156955}, // __builtin_ia32_maskloadq
      {Intrinsic::x86_avx2_maskload_q_256, 156980}, // __builtin_ia32_maskloadq256
      {Intrinsic::x86_sse2_maskmov_dqu, 181797}, // __builtin_ia32_maskmovdqu
      {Intrinsic::x86_mmx_maskmovq, 178229}, // __builtin_ia32_maskmovq
      {Intrinsic::x86_avx2_maskstore_d, 157008}, // __builtin_ia32_maskstored
      {Intrinsic::x86_avx2_maskstore_d_256, 157034}, // __builtin_ia32_maskstored256
      {Intrinsic::x86_avx_maskstore_pd, 155545}, // __builtin_ia32_maskstorepd
      {Intrinsic::x86_avx_maskstore_pd_256, 155572}, // __builtin_ia32_maskstorepd256
      {Intrinsic::x86_avx_maskstore_ps, 155602}, // __builtin_ia32_maskstoreps
      {Intrinsic::x86_avx_maskstore_ps_256, 155629}, // __builtin_ia32_maskstoreps256
      {Intrinsic::x86_avx2_maskstore_q, 157063}, // __builtin_ia32_maskstoreq
      {Intrinsic::x86_avx2_maskstore_q_256, 157089}, // __builtin_ia32_maskstoreq256
      {Intrinsic::x86_sse2_max_pd, 181823}, // __builtin_ia32_maxpd
      {Intrinsic::x86_avx_max_pd_256, 155659}, // __builtin_ia32_maxpd256
      {Intrinsic::x86_avx512_max_pd_512, 167908}, // __builtin_ia32_maxpd512
      {Intrinsic::x86_avx512fp16_max_ph_128, 176444}, // __builtin_ia32_maxph128
      {Intrinsic::x86_avx512fp16_max_ph_256, 176468}, // __builtin_ia32_maxph256
      {Intrinsic::x86_avx512fp16_max_ph_512, 176492}, // __builtin_ia32_maxph512
      {Intrinsic::x86_sse_max_ps, 180959}, // __builtin_ia32_maxps
      {Intrinsic::x86_avx_max_ps_256, 155683}, // __builtin_ia32_maxps256
      {Intrinsic::x86_avx512_max_ps_512, 167932}, // __builtin_ia32_maxps512
      {Intrinsic::x86_sse2_max_sd, 181844}, // __builtin_ia32_maxsd
      {Intrinsic::x86_avx512_mask_max_sd_round, 162934}, // __builtin_ia32_maxsd_round_mask
      {Intrinsic::x86_avx512fp16_mask_max_sh_round, 172941}, // __builtin_ia32_maxsh_round_mask
      {Intrinsic::x86_sse_max_ss, 180980}, // __builtin_ia32_maxss
      {Intrinsic::x86_avx512_mask_max_ss_round, 162966}, // __builtin_ia32_maxss_round_mask
      {Intrinsic::x86_sse2_mfence, 181865}, // __builtin_ia32_mfence
      {Intrinsic::x86_sse2_min_pd, 181887}, // __builtin_ia32_minpd
      {Intrinsic::x86_avx_min_pd_256, 155707}, // __builtin_ia32_minpd256
      {Intrinsic::x86_avx512_min_pd_512, 167956}, // __builtin_ia32_minpd512
      {Intrinsic::x86_avx512fp16_min_ph_128, 176516}, // __builtin_ia32_minph128
      {Intrinsic::x86_avx512fp16_min_ph_256, 176540}, // __builtin_ia32_minph256
      {Intrinsic::x86_avx512fp16_min_ph_512, 176564}, // __builtin_ia32_minph512
      {Intrinsic::x86_sse_min_ps, 181001}, // __builtin_ia32_minps
      {Intrinsic::x86_avx_min_ps_256, 155731}, // __builtin_ia32_minps256
      {Intrinsic::x86_avx512_min_ps_512, 167980}, // __builtin_ia32_minps512
      {Intrinsic::x86_sse2_min_sd, 181908}, // __builtin_ia32_minsd
      {Intrinsic::x86_avx512_mask_min_sd_round, 162998}, // __builtin_ia32_minsd_round_mask
      {Intrinsic::x86_avx512fp16_mask_min_sh_round, 172973}, // __builtin_ia32_minsh_round_mask
      {Intrinsic::x86_sse_min_ss, 181022}, // __builtin_ia32_minss
      {Intrinsic::x86_avx512_mask_min_ss_round, 163030}, // __builtin_ia32_minss_round_mask
      {Intrinsic::x86_sse3_monitor, 182932}, // __builtin_ia32_monitor
      {Intrinsic::x86_monitorx, 179775}, // __builtin_ia32_monitorx
      {Intrinsic::x86_movdir64b, 179799}, // __builtin_ia32_movdir64b
      {Intrinsic::x86_sse2_movmsk_pd, 181929}, // __builtin_ia32_movmskpd
      {Intrinsic::x86_avx_movmsk_pd_256, 155755}, // __builtin_ia32_movmskpd256
      {Intrinsic::x86_sse_movmsk_ps, 181043}, // __builtin_ia32_movmskps
      {Intrinsic::x86_avx_movmsk_ps_256, 155782}, // __builtin_ia32_movmskps256
      {Intrinsic::x86_mmx_movnt_dq, 178253}, // __builtin_ia32_movntq
      {Intrinsic::x86_sse41_mpsadbw, 183091}, // __builtin_ia32_mpsadbw128
      {Intrinsic::x86_avx2_mpsadbw, 157118}, // __builtin_ia32_mpsadbw256
      {Intrinsic::x86_avx512_mul_pd_512, 168004}, // __builtin_ia32_mulpd512
      {Intrinsic::x86_avx512fp16_mul_ph_512, 176588}, // __builtin_ia32_mulph512
      {Intrinsic::x86_avx512_mul_ps_512, 168028}, // __builtin_ia32_mulps512
      {Intrinsic::x86_avx512_mask_mul_sd_round, 163062}, // __builtin_ia32_mulsd_round_mask
      {Intrinsic::x86_avx512fp16_mask_mul_sh_round, 173005}, // __builtin_ia32_mulsh_round_mask
      {Intrinsic::x86_avx512_mask_mul_ss_round, 163094}, // __builtin_ia32_mulss_round_mask
      {Intrinsic::x86_sse3_mwait, 182955}, // __builtin_ia32_mwait
      {Intrinsic::x86_mwaitx, 179824}, // __builtin_ia32_mwaitx
      {Intrinsic::x86_ssse3_pabs_b, 183953}, // __builtin_ia32_pabsb
      {Intrinsic::x86_ssse3_pabs_d, 183974}, // __builtin_ia32_pabsd
      {Intrinsic::x86_ssse3_pabs_w, 183995}, // __builtin_ia32_pabsw
      {Intrinsic::x86_mmx_packssdw, 178275}, // __builtin_ia32_packssdw
      {Intrinsic::x86_sse2_packssdw_128, 181953}, // __builtin_ia32_packssdw128
      {Intrinsic::x86_avx2_packssdw, 157144}, // __builtin_ia32_packssdw256
      {Intrinsic::x86_avx512_packssdw_512, 168052}, // __builtin_ia32_packssdw512
      {Intrinsic::x86_mmx_packsswb, 178299}, // __builtin_ia32_packsswb
      {Intrinsic::x86_sse2_packsswb_128, 181980}, // __builtin_ia32_packsswb128
      {Intrinsic::x86_avx2_packsswb, 157171}, // __builtin_ia32_packsswb256
      {Intrinsic::x86_avx512_packsswb_512, 168079}, // __builtin_ia32_packsswb512
      {Intrinsic::x86_sse41_packusdw, 183117}, // __builtin_ia32_packusdw128
      {Intrinsic::x86_avx2_packusdw, 157198}, // __builtin_ia32_packusdw256
      {Intrinsic::x86_avx512_packusdw_512, 168106}, // __builtin_ia32_packusdw512
      {Intrinsic::x86_mmx_packuswb, 178323}, // __builtin_ia32_packuswb
      {Intrinsic::x86_sse2_packuswb_128, 182007}, // __builtin_ia32_packuswb128
      {Intrinsic::x86_avx2_packuswb, 157225}, // __builtin_ia32_packuswb256
      {Intrinsic::x86_avx512_packuswb_512, 168133}, // __builtin_ia32_packuswb512
      {Intrinsic::x86_mmx_padd_b, 178347}, // __builtin_ia32_paddb
      {Intrinsic::x86_mmx_padd_d, 178368}, // __builtin_ia32_paddd
      {Intrinsic::x86_mmx_padd_q, 178389}, // __builtin_ia32_paddq
      {Intrinsic::x86_mmx_padds_b, 178431}, // __builtin_ia32_paddsb
      {Intrinsic::x86_mmx_padds_w, 178453}, // __builtin_ia32_paddsw
      {Intrinsic::x86_mmx_paddus_b, 178475}, // __builtin_ia32_paddusb
      {Intrinsic::x86_mmx_paddus_w, 178498}, // __builtin_ia32_paddusw
      {Intrinsic::x86_mmx_padd_w, 178410}, // __builtin_ia32_paddw
      {Intrinsic::x86_mmx_palignr_b, 178521}, // __builtin_ia32_palignr
      {Intrinsic::x86_mmx_pand, 178544}, // __builtin_ia32_pand
      {Intrinsic::x86_mmx_pandn, 178564}, // __builtin_ia32_pandn
      {Intrinsic::x86_sse2_pause, 182034}, // __builtin_ia32_pause
      {Intrinsic::x86_mmx_pavg_b, 178585}, // __builtin_ia32_pavgb
      {Intrinsic::x86_sse2_pavg_b, 182055}, // __builtin_ia32_pavgb128
      {Intrinsic::x86_avx2_pavg_b, 157252}, // __builtin_ia32_pavgb256
      {Intrinsic::x86_avx512_pavg_b_512, 168160}, // __builtin_ia32_pavgb512
      {Intrinsic::x86_3dnow_pavgusb, 154022}, // __builtin_ia32_pavgusb
      {Intrinsic::x86_mmx_pavg_w, 178606}, // __builtin_ia32_pavgw
      {Intrinsic::x86_sse2_pavg_w, 182079}, // __builtin_ia32_pavgw128
      {Intrinsic::x86_avx2_pavg_w, 157276}, // __builtin_ia32_pavgw256
      {Intrinsic::x86_avx512_pavg_w_512, 168184}, // __builtin_ia32_pavgw512
      {Intrinsic::x86_sse41_pblendvb, 183144}, // __builtin_ia32_pblendvb128
      {Intrinsic::x86_avx2_pblendvb, 157300}, // __builtin_ia32_pblendvb256
      {Intrinsic::x86_pclmulqdq, 179846}, // __builtin_ia32_pclmulqdq128
      {Intrinsic::x86_pclmulqdq_256, 179874}, // __builtin_ia32_pclmulqdq256
      {Intrinsic::x86_pclmulqdq_512, 179902}, // __builtin_ia32_pclmulqdq512
      {Intrinsic::x86_mmx_pcmpeq_b, 178627}, // __builtin_ia32_pcmpeqb
      {Intrinsic::x86_mmx_pcmpeq_d, 178650}, // __builtin_ia32_pcmpeqd
      {Intrinsic::x86_mmx_pcmpeq_w, 178673}, // __builtin_ia32_pcmpeqw
      {Intrinsic::x86_sse42_pcmpestri128, 183461}, // __builtin_ia32_pcmpestri128
      {Intrinsic::x86_sse42_pcmpestria128, 183489}, // __builtin_ia32_pcmpestria128
      {Intrinsic::x86_sse42_pcmpestric128, 183518}, // __builtin_ia32_pcmpestric128
      {Intrinsic::x86_sse42_pcmpestrio128, 183547}, // __builtin_ia32_pcmpestrio128
      {Intrinsic::x86_sse42_pcmpestris128, 183576}, // __builtin_ia32_pcmpestris128
      {Intrinsic::x86_sse42_pcmpestriz128, 183605}, // __builtin_ia32_pcmpestriz128
      {Intrinsic::x86_sse42_pcmpestrm128, 183634}, // __builtin_ia32_pcmpestrm128
      {Intrinsic::x86_mmx_pcmpgt_b, 178696}, // __builtin_ia32_pcmpgtb
      {Intrinsic::x86_mmx_pcmpgt_d, 178719}, // __builtin_ia32_pcmpgtd
      {Intrinsic::x86_mmx_pcmpgt_w, 178742}, // __builtin_ia32_pcmpgtw
      {Intrinsic::x86_sse42_pcmpistri128, 183662}, // __builtin_ia32_pcmpistri128
      {Intrinsic::x86_sse42_pcmpistria128, 183690}, // __builtin_ia32_pcmpistria128
      {Intrinsic::x86_sse42_pcmpistric128, 183719}, // __builtin_ia32_pcmpistric128
      {Intrinsic::x86_sse42_pcmpistrio128, 183748}, // __builtin_ia32_pcmpistrio128
      {Intrinsic::x86_sse42_pcmpistris128, 183777}, // __builtin_ia32_pcmpistris128
      {Intrinsic::x86_sse42_pcmpistriz128, 183806}, // __builtin_ia32_pcmpistriz128
      {Intrinsic::x86_sse42_pcmpistrm128, 183835}, // __builtin_ia32_pcmpistrm128
      {Intrinsic::x86_bmi_pdep_64, 177209}, // __builtin_ia32_pdep_di
      {Intrinsic::x86_bmi_pdep_32, 177186}, // __builtin_ia32_pdep_si
      {Intrinsic::x86_avx512_permvar_df_256, 168208}, // __builtin_ia32_permvardf256
      {Intrinsic::x86_avx512_permvar_df_512, 168236}, // __builtin_ia32_permvardf512
      {Intrinsic::x86_avx512_permvar_di_256, 168264}, // __builtin_ia32_permvardi256
      {Intrinsic::x86_avx512_permvar_di_512, 168292}, // __builtin_ia32_permvardi512
      {Intrinsic::x86_avx512_permvar_hi_128, 168320}, // __builtin_ia32_permvarhi128
      {Intrinsic::x86_avx512_permvar_hi_256, 168348}, // __builtin_ia32_permvarhi256
      {Intrinsic::x86_avx512_permvar_hi_512, 168376}, // __builtin_ia32_permvarhi512
      {Intrinsic::x86_avx512_permvar_qi_128, 168404}, // __builtin_ia32_permvarqi128
      {Intrinsic::x86_avx512_permvar_qi_256, 168432}, // __builtin_ia32_permvarqi256
      {Intrinsic::x86_avx512_permvar_qi_512, 168460}, // __builtin_ia32_permvarqi512
      {Intrinsic::x86_avx2_permps, 157355}, // __builtin_ia32_permvarsf256
      {Intrinsic::x86_avx512_permvar_sf_512, 168488}, // __builtin_ia32_permvarsf512
      {Intrinsic::x86_avx2_permd, 157327}, // __builtin_ia32_permvarsi256
      {Intrinsic::x86_avx512_permvar_si_512, 168516}, // __builtin_ia32_permvarsi512
      {Intrinsic::x86_bmi_pext_64, 177255}, // __builtin_ia32_pext_di
      {Intrinsic::x86_bmi_pext_32, 177232}, // __builtin_ia32_pext_si
      {Intrinsic::x86_3dnow_pf2id, 154045}, // __builtin_ia32_pf2id
      {Intrinsic::x86_3dnowa_pf2iw, 154443}, // __builtin_ia32_pf2iw
      {Intrinsic::x86_3dnow_pfacc, 154066}, // __builtin_ia32_pfacc
      {Intrinsic::x86_3dnow_pfadd, 154087}, // __builtin_ia32_pfadd
      {Intrinsic::x86_3dnow_pfcmpeq, 154108}, // __builtin_ia32_pfcmpeq
      {Intrinsic::x86_3dnow_pfcmpge, 154131}, // __builtin_ia32_pfcmpge
      {Intrinsic::x86_3dnow_pfcmpgt, 154154}, // __builtin_ia32_pfcmpgt
      {Intrinsic::x86_3dnow_pfmax, 154177}, // __builtin_ia32_pfmax
      {Intrinsic::x86_3dnow_pfmin, 154198}, // __builtin_ia32_pfmin
      {Intrinsic::x86_3dnow_pfmul, 154219}, // __builtin_ia32_pfmul
      {Intrinsic::x86_3dnowa_pfnacc, 154464}, // __builtin_ia32_pfnacc
      {Intrinsic::x86_3dnowa_pfpnacc, 154486}, // __builtin_ia32_pfpnacc
      {Intrinsic::x86_3dnow_pfrcp, 154240}, // __builtin_ia32_pfrcp
      {Intrinsic::x86_3dnow_pfrcpit1, 154261}, // __builtin_ia32_pfrcpit1
      {Intrinsic::x86_3dnow_pfrcpit2, 154285}, // __builtin_ia32_pfrcpit2
      {Intrinsic::x86_3dnow_pfrsqit1, 154309}, // __builtin_ia32_pfrsqit1
      {Intrinsic::x86_3dnow_pfrsqrt, 154333}, // __builtin_ia32_pfrsqrt
      {Intrinsic::x86_3dnow_pfsub, 154356}, // __builtin_ia32_pfsub
      {Intrinsic::x86_3dnow_pfsubr, 154377}, // __builtin_ia32_pfsubr
      {Intrinsic::x86_ssse3_phadd_d, 184016}, // __builtin_ia32_phaddd
      {Intrinsic::x86_ssse3_phadd_d_128, 184038}, // __builtin_ia32_phaddd128
      {Intrinsic::x86_avx2_phadd_d, 157383}, // __builtin_ia32_phaddd256
      {Intrinsic::x86_ssse3_phadd_sw, 184063}, // __builtin_ia32_phaddsw
      {Intrinsic::x86_ssse3_phadd_sw_128, 184086}, // __builtin_ia32_phaddsw128
      {Intrinsic::x86_avx2_phadd_sw, 157408}, // __builtin_ia32_phaddsw256
      {Intrinsic::x86_ssse3_phadd_w, 184112}, // __builtin_ia32_phaddw
      {Intrinsic::x86_ssse3_phadd_w_128, 184134}, // __builtin_ia32_phaddw128
      {Intrinsic::x86_avx2_phadd_w, 157434}, // __builtin_ia32_phaddw256
      {Intrinsic::x86_sse41_phminposuw, 183171}, // __builtin_ia32_phminposuw128
      {Intrinsic::x86_ssse3_phsub_d, 184159}, // __builtin_ia32_phsubd
      {Intrinsic::x86_ssse3_phsub_d_128, 184181}, // __builtin_ia32_phsubd128
      {Intrinsic::x86_avx2_phsub_d, 157459}, // __builtin_ia32_phsubd256
      {Intrinsic::x86_ssse3_phsub_sw, 184206}, // __builtin_ia32_phsubsw
      {Intrinsic::x86_ssse3_phsub_sw_128, 184229}, // __builtin_ia32_phsubsw128
      {Intrinsic::x86_avx2_phsub_sw, 157484}, // __builtin_ia32_phsubsw256
      {Intrinsic::x86_ssse3_phsub_w, 184255}, // __builtin_ia32_phsubw
      {Intrinsic::x86_ssse3_phsub_w_128, 184277}, // __builtin_ia32_phsubw128
      {Intrinsic::x86_avx2_phsub_w, 157510}, // __builtin_ia32_phsubw256
      {Intrinsic::x86_3dnow_pi2fd, 154399}, // __builtin_ia32_pi2fd
      {Intrinsic::x86_3dnowa_pi2fw, 154509}, // __builtin_ia32_pi2fw
      {Intrinsic::x86_ssse3_pmadd_ub_sw, 184302}, // __builtin_ia32_pmaddubsw
      {Intrinsic::x86_ssse3_pmadd_ub_sw_128, 184327}, // __builtin_ia32_pmaddubsw128
      {Intrinsic::x86_avx2_pmadd_ub_sw, 157535}, // __builtin_ia32_pmaddubsw256
      {Intrinsic::x86_avx512_pmaddubs_w_512, 168544}, // __builtin_ia32_pmaddubsw512
      {Intrinsic::x86_mmx_pmadd_wd, 178821}, // __builtin_ia32_pmaddwd
      {Intrinsic::x86_sse2_pmadd_wd, 182103}, // __builtin_ia32_pmaddwd128
      {Intrinsic::x86_avx2_pmadd_wd, 157563}, // __builtin_ia32_pmaddwd256
      {Intrinsic::x86_avx512_pmaddw_d_512, 168572}, // __builtin_ia32_pmaddwd512
      {Intrinsic::x86_mmx_pmaxs_w, 178844}, // __builtin_ia32_pmaxsw
      {Intrinsic::x86_mmx_pmaxu_b, 178866}, // __builtin_ia32_pmaxub
      {Intrinsic::x86_mmx_pmins_w, 178888}, // __builtin_ia32_pminsw
      {Intrinsic::x86_mmx_pminu_b, 178910}, // __builtin_ia32_pminub
      {Intrinsic::x86_avx512_mask_pmov_db_128, 163126}, // __builtin_ia32_pmovdb128_mask
      {Intrinsic::x86_avx512_mask_pmov_db_mem_128, 163186}, // __builtin_ia32_pmovdb128mem_mask
      {Intrinsic::x86_avx512_mask_pmov_db_256, 163156}, // __builtin_ia32_pmovdb256_mask
      {Intrinsic::x86_avx512_mask_pmov_db_mem_256, 163219}, // __builtin_ia32_pmovdb256mem_mask
      {Intrinsic::x86_avx512_mask_pmov_db_mem_512, 163252}, // __builtin_ia32_pmovdb512mem_mask
      {Intrinsic::x86_avx512_mask_pmov_dw_128, 163285}, // __builtin_ia32_pmovdw128_mask
      {Intrinsic::x86_avx512_mask_pmov_dw_mem_128, 163345}, // __builtin_ia32_pmovdw128mem_mask
      {Intrinsic::x86_avx512_mask_pmov_dw_256, 163315}, // __builtin_ia32_pmovdw256_mask
      {Intrinsic::x86_avx512_mask_pmov_dw_mem_256, 163378}, // __builtin_ia32_pmovdw256mem_mask
      {Intrinsic::x86_avx512_mask_pmov_dw_mem_512, 163411}, // __builtin_ia32_pmovdw512mem_mask
      {Intrinsic::x86_mmx_pmovmskb, 178932}, // __builtin_ia32_pmovmskb
      {Intrinsic::x86_sse2_pmovmskb_128, 182129}, // __builtin_ia32_pmovmskb128
      {Intrinsic::x86_avx2_pmovmskb, 157589}, // __builtin_ia32_pmovmskb256
      {Intrinsic::x86_avx512_mask_pmov_qb_128, 163444}, // __builtin_ia32_pmovqb128_mask
      {Intrinsic::x86_avx512_mask_pmov_qb_mem_128, 163534}, // __builtin_ia32_pmovqb128mem_mask
      {Intrinsic::x86_avx512_mask_pmov_qb_256, 163474}, // __builtin_ia32_pmovqb256_mask
      {Intrinsic::x86_avx512_mask_pmov_qb_mem_256, 163567}, // __builtin_ia32_pmovqb256mem_mask
      {Intrinsic::x86_avx512_mask_pmov_qb_512, 163504}, // __builtin_ia32_pmovqb512_mask
      {Intrinsic::x86_avx512_mask_pmov_qb_mem_512, 163600}, // __builtin_ia32_pmovqb512mem_mask
      {Intrinsic::x86_avx512_mask_pmov_qd_128, 163633}, // __builtin_ia32_pmovqd128_mask
      {Intrinsic::x86_avx512_mask_pmov_qd_mem_128, 163663}, // __builtin_ia32_pmovqd128mem_mask
      {Intrinsic::x86_avx512_mask_pmov_qd_mem_256, 163696}, // __builtin_ia32_pmovqd256mem_mask
      {Intrinsic::x86_avx512_mask_pmov_qd_mem_512, 163729}, // __builtin_ia32_pmovqd512mem_mask
      {Intrinsic::x86_avx512_mask_pmov_qw_128, 163762}, // __builtin_ia32_pmovqw128_mask
      {Intrinsic::x86_avx512_mask_pmov_qw_mem_128, 163822}, // __builtin_ia32_pmovqw128mem_mask
      {Intrinsic::x86_avx512_mask_pmov_qw_256, 163792}, // __builtin_ia32_pmovqw256_mask
      {Intrinsic::x86_avx512_mask_pmov_qw_mem_256, 163855}, // __builtin_ia32_pmovqw256mem_mask
      {Intrinsic::x86_avx512_mask_pmov_qw_mem_512, 163888}, // __builtin_ia32_pmovqw512mem_mask
      {Intrinsic::x86_avx512_mask_pmovs_db_128, 164050}, // __builtin_ia32_pmovsdb128_mask
      {Intrinsic::x86_avx512_mask_pmovs_db_mem_128, 164143}, // __builtin_ia32_pmovsdb128mem_mask
      {Intrinsic::x86_avx512_mask_pmovs_db_256, 164081}, // __builtin_ia32_pmovsdb256_mask
      {Intrinsic::x86_avx512_mask_pmovs_db_mem_256, 164177}, // __builtin_ia32_pmovsdb256mem_mask
      {Intrinsic::x86_avx512_mask_pmovs_db_512, 164112}, // __builtin_ia32_pmovsdb512_mask
      {Intrinsic::x86_avx512_mask_pmovs_db_mem_512, 164211}, // __builtin_ia32_pmovsdb512mem_mask
      {Intrinsic::x86_avx512_mask_pmovs_dw_128, 164245}, // __builtin_ia32_pmovsdw128_mask
      {Intrinsic::x86_avx512_mask_pmovs_dw_mem_128, 164338}, // __builtin_ia32_pmovsdw128mem_mask
      {Intrinsic::x86_avx512_mask_pmovs_dw_256, 164276}, // __builtin_ia32_pmovsdw256_mask
      {Intrinsic::x86_avx512_mask_pmovs_dw_mem_256, 164372}, // __builtin_ia32_pmovsdw256mem_mask
      {Intrinsic::x86_avx512_mask_pmovs_dw_512, 164307}, // __builtin_ia32_pmovsdw512_mask
      {Intrinsic::x86_avx512_mask_pmovs_dw_mem_512, 164406}, // __builtin_ia32_pmovsdw512mem_mask
      {Intrinsic::x86_avx512_mask_pmovs_qb_128, 164440}, // __builtin_ia32_pmovsqb128_mask
      {Intrinsic::x86_avx512_mask_pmovs_qb_mem_128, 164533}, // __builtin_ia32_pmovsqb128mem_mask
      {Intrinsic::x86_avx512_mask_pmovs_qb_256, 164471}, // __builtin_ia32_pmovsqb256_mask
      {Intrinsic::x86_avx512_mask_pmovs_qb_mem_256, 164567}, // __builtin_ia32_pmovsqb256mem_mask
      {Intrinsic::x86_avx512_mask_pmovs_qb_512, 164502}, // __builtin_ia32_pmovsqb512_mask
      {Intrinsic::x86_avx512_mask_pmovs_qb_mem_512, 164601}, // __builtin_ia32_pmovsqb512mem_mask
      {Intrinsic::x86_avx512_mask_pmovs_qd_128, 164635}, // __builtin_ia32_pmovsqd128_mask
      {Intrinsic::x86_avx512_mask_pmovs_qd_mem_128, 164728}, // __builtin_ia32_pmovsqd128mem_mask
      {Intrinsic::x86_avx512_mask_pmovs_qd_256, 164666}, // __builtin_ia32_pmovsqd256_mask
      {Intrinsic::x86_avx512_mask_pmovs_qd_mem_256, 164762}, // __builtin_ia32_pmovsqd256mem_mask
      {Intrinsic::x86_avx512_mask_pmovs_qd_512, 164697}, // __builtin_ia32_pmovsqd512_mask
      {Intrinsic::x86_avx512_mask_pmovs_qd_mem_512, 164796}, // __builtin_ia32_pmovsqd512mem_mask
      {Intrinsic::x86_avx512_mask_pmovs_qw_128, 164830}, // __builtin_ia32_pmovsqw128_mask
      {Intrinsic::x86_avx512_mask_pmovs_qw_mem_128, 164923}, // __builtin_ia32_pmovsqw128mem_mask
      {Intrinsic::x86_avx512_mask_pmovs_qw_256, 164861}, // __builtin_ia32_pmovsqw256_mask
      {Intrinsic::x86_avx512_mask_pmovs_qw_mem_256, 164957}, // __builtin_ia32_pmovsqw256mem_mask
      {Intrinsic::x86_avx512_mask_pmovs_qw_512, 164892}, // __builtin_ia32_pmovsqw512_mask
      {Intrinsic::x86_avx512_mask_pmovs_qw_mem_512, 164991}, // __builtin_ia32_pmovsqw512mem_mask
      {Intrinsic::x86_avx512_mask_pmovs_wb_128, 165025}, // __builtin_ia32_pmovswb128_mask
      {Intrinsic::x86_avx512_mask_pmovs_wb_mem_128, 165118}, // __builtin_ia32_pmovswb128mem_mask
      {Intrinsic::x86_avx512_mask_pmovs_wb_256, 165056}, // __builtin_ia32_pmovswb256_mask
      {Intrinsic::x86_avx512_mask_pmovs_wb_mem_256, 165152}, // __builtin_ia32_pmovswb256mem_mask
      {Intrinsic::x86_avx512_mask_pmovs_wb_512, 165087}, // __builtin_ia32_pmovswb512_mask
      {Intrinsic::x86_avx512_mask_pmovs_wb_mem_512, 165186}, // __builtin_ia32_pmovswb512mem_mask
      {Intrinsic::x86_avx512_mask_pmovus_db_128, 165220}, // __builtin_ia32_pmovusdb128_mask
      {Intrinsic::x86_avx512_mask_pmovus_db_mem_128, 165316}, // __builtin_ia32_pmovusdb128mem_mask
      {Intrinsic::x86_avx512_mask_pmovus_db_256, 165252}, // __builtin_ia32_pmovusdb256_mask
      {Intrinsic::x86_avx512_mask_pmovus_db_mem_256, 165351}, // __builtin_ia32_pmovusdb256mem_mask
      {Intrinsic::x86_avx512_mask_pmovus_db_512, 165284}, // __builtin_ia32_pmovusdb512_mask
      {Intrinsic::x86_avx512_mask_pmovus_db_mem_512, 165386}, // __builtin_ia32_pmovusdb512mem_mask
      {Intrinsic::x86_avx512_mask_pmovus_dw_128, 165421}, // __builtin_ia32_pmovusdw128_mask
      {Intrinsic::x86_avx512_mask_pmovus_dw_mem_128, 165517}, // __builtin_ia32_pmovusdw128mem_mask
      {Intrinsic::x86_avx512_mask_pmovus_dw_256, 165453}, // __builtin_ia32_pmovusdw256_mask
      {Intrinsic::x86_avx512_mask_pmovus_dw_mem_256, 165552}, // __builtin_ia32_pmovusdw256mem_mask
      {Intrinsic::x86_avx512_mask_pmovus_dw_512, 165485}, // __builtin_ia32_pmovusdw512_mask
      {Intrinsic::x86_avx512_mask_pmovus_dw_mem_512, 165587}, // __builtin_ia32_pmovusdw512mem_mask
      {Intrinsic::x86_avx512_mask_pmovus_qb_128, 165622}, // __builtin_ia32_pmovusqb128_mask
      {Intrinsic::x86_avx512_mask_pmovus_qb_mem_128, 165718}, // __builtin_ia32_pmovusqb128mem_mask
      {Intrinsic::x86_avx512_mask_pmovus_qb_256, 165654}, // __builtin_ia32_pmovusqb256_mask
      {Intrinsic::x86_avx512_mask_pmovus_qb_mem_256, 165753}, // __builtin_ia32_pmovusqb256mem_mask
      {Intrinsic::x86_avx512_mask_pmovus_qb_512, 165686}, // __builtin_ia32_pmovusqb512_mask
      {Intrinsic::x86_avx512_mask_pmovus_qb_mem_512, 165788}, // __builtin_ia32_pmovusqb512mem_mask
      {Intrinsic::x86_avx512_mask_pmovus_qd_128, 165823}, // __builtin_ia32_pmovusqd128_mask
      {Intrinsic::x86_avx512_mask_pmovus_qd_mem_128, 165919}, // __builtin_ia32_pmovusqd128mem_mask
      {Intrinsic::x86_avx512_mask_pmovus_qd_256, 165855}, // __builtin_ia32_pmovusqd256_mask
      {Intrinsic::x86_avx512_mask_pmovus_qd_mem_256, 165954}, // __builtin_ia32_pmovusqd256mem_mask
      {Intrinsic::x86_avx512_mask_pmovus_qd_512, 165887}, // __builtin_ia32_pmovusqd512_mask
      {Intrinsic::x86_avx512_mask_pmovus_qd_mem_512, 165989}, // __builtin_ia32_pmovusqd512mem_mask
      {Intrinsic::x86_avx512_mask_pmovus_qw_128, 166024}, // __builtin_ia32_pmovusqw128_mask
      {Intrinsic::x86_avx512_mask_pmovus_qw_mem_128, 166120}, // __builtin_ia32_pmovusqw128mem_mask
      {Intrinsic::x86_avx512_mask_pmovus_qw_256, 166056}, // __builtin_ia32_pmovusqw256_mask
      {Intrinsic::x86_avx512_mask_pmovus_qw_mem_256, 166155}, // __builtin_ia32_pmovusqw256mem_mask
      {Intrinsic::x86_avx512_mask_pmovus_qw_512, 166088}, // __builtin_ia32_pmovusqw512_mask
      {Intrinsic::x86_avx512_mask_pmovus_qw_mem_512, 166190}, // __builtin_ia32_pmovusqw512mem_mask
      {Intrinsic::x86_avx512_mask_pmovus_wb_128, 166225}, // __builtin_ia32_pmovuswb128_mask
      {Intrinsic::x86_avx512_mask_pmovus_wb_mem_128, 166321}, // __builtin_ia32_pmovuswb128mem_mask
      {Intrinsic::x86_avx512_mask_pmovus_wb_256, 166257}, // __builtin_ia32_pmovuswb256_mask
      {Intrinsic::x86_avx512_mask_pmovus_wb_mem_256, 166356}, // __builtin_ia32_pmovuswb256mem_mask
      {Intrinsic::x86_avx512_mask_pmovus_wb_512, 166289}, // __builtin_ia32_pmovuswb512_mask
      {Intrinsic::x86_avx512_mask_pmovus_wb_mem_512, 166391}, // __builtin_ia32_pmovuswb512mem_mask
      {Intrinsic::x86_avx512_mask_pmov_wb_128, 163921}, // __builtin_ia32_pmovwb128_mask
      {Intrinsic::x86_avx512_mask_pmov_wb_mem_128, 163951}, // __builtin_ia32_pmovwb128mem_mask
      {Intrinsic::x86_avx512_mask_pmov_wb_mem_256, 163984}, // __builtin_ia32_pmovwb256mem_mask
      {Intrinsic::x86_avx512_mask_pmov_wb_mem_512, 164017}, // __builtin_ia32_pmovwb512mem_mask
      {Intrinsic::x86_ssse3_pmul_hr_sw, 184355}, // __builtin_ia32_pmulhrsw
      {Intrinsic::x86_ssse3_pmul_hr_sw_128, 184379}, // __builtin_ia32_pmulhrsw128
      {Intrinsic::x86_avx2_pmul_hr_sw, 157616}, // __builtin_ia32_pmulhrsw256
      {Intrinsic::x86_avx512_pmul_hr_sw_512, 168598}, // __builtin_ia32_pmulhrsw512
      {Intrinsic::x86_3dnow_pmulhrw, 154420}, // __builtin_ia32_pmulhrw
      {Intrinsic::x86_mmx_pmulhu_w, 178978}, // __builtin_ia32_pmulhuw
      {Intrinsic::x86_sse2_pmulhu_w, 182181}, // __builtin_ia32_pmulhuw128
      {Intrinsic::x86_avx2_pmulhu_w, 157668}, // __builtin_ia32_pmulhuw256
      {Intrinsic::x86_avx512_pmulhu_w_512, 168650}, // __builtin_ia32_pmulhuw512
      {Intrinsic::x86_mmx_pmulh_w, 178956}, // __builtin_ia32_pmulhw
      {Intrinsic::x86_sse2_pmulh_w, 182156}, // __builtin_ia32_pmulhw128
      {Intrinsic::x86_avx2_pmulh_w, 157643}, // __builtin_ia32_pmulhw256
      {Intrinsic::x86_avx512_pmulh_w_512, 168625}, // __builtin_ia32_pmulhw512
      {Intrinsic::x86_mmx_pmull_w, 179001}, // __builtin_ia32_pmullw
      {Intrinsic::x86_mmx_pmulu_dq, 179023}, // __builtin_ia32_pmuludq
      {Intrinsic::x86_mmx_por, 179046}, // __builtin_ia32_por
      {Intrinsic::x86_mmx_psad_bw, 179065}, // __builtin_ia32_psadbw
      {Intrinsic::x86_sse2_psad_bw, 182207}, // __builtin_ia32_psadbw128
      {Intrinsic::x86_avx2_psad_bw, 157694}, // __builtin_ia32_psadbw256
      {Intrinsic::x86_avx512_psad_bw_512, 168775}, // __builtin_ia32_psadbw512
      {Intrinsic::x86_ssse3_pshuf_b, 184406}, // __builtin_ia32_pshufb
      {Intrinsic::x86_ssse3_pshuf_b_128, 184428}, // __builtin_ia32_pshufb128
      {Intrinsic::x86_avx2_pshuf_b, 157719}, // __builtin_ia32_pshufb256
      {Intrinsic::x86_avx512_pshuf_b_512, 168800}, // __builtin_ia32_pshufb512
      {Intrinsic::x86_sse_pshuf_w, 181067}, // __builtin_ia32_pshufw
      {Intrinsic::x86_ssse3_psign_b, 184453}, // __builtin_ia32_psignb
      {Intrinsic::x86_ssse3_psign_b_128, 184475}, // __builtin_ia32_psignb128
      {Intrinsic::x86_avx2_psign_b, 157744}, // __builtin_ia32_psignb256
      {Intrinsic::x86_ssse3_psign_d, 184500}, // __builtin_ia32_psignd
      {Intrinsic::x86_ssse3_psign_d_128, 184522}, // __builtin_ia32_psignd128
      {Intrinsic::x86_avx2_psign_d, 157769}, // __builtin_ia32_psignd256
      {Intrinsic::x86_ssse3_psign_w, 184547}, // __builtin_ia32_psignw
      {Intrinsic::x86_ssse3_psign_w_128, 184569}, // __builtin_ia32_psignw128
      {Intrinsic::x86_avx2_psign_w, 157794}, // __builtin_ia32_psignw256
      {Intrinsic::x86_mmx_psll_d, 179087}, // __builtin_ia32_pslld
      {Intrinsic::x86_sse2_psll_d, 182232}, // __builtin_ia32_pslld128
      {Intrinsic::x86_avx2_psll_d, 157819}, // __builtin_ia32_pslld256
      {Intrinsic::x86_avx512_psll_d_512, 168825}, // __builtin_ia32_pslld512
      {Intrinsic::x86_mmx_pslli_d, 179150}, // __builtin_ia32_pslldi
      {Intrinsic::x86_sse2_pslli_d, 182304}, // __builtin_ia32_pslldi128
      {Intrinsic::x86_avx2_pslli_d, 157891}, // __builtin_ia32_pslldi256
      {Intrinsic::x86_avx512_pslli_d_512, 168897}, // __builtin_ia32_pslldi512
      {Intrinsic::x86_mmx_psll_q, 179108}, // __builtin_ia32_psllq
      {Intrinsic::x86_sse2_psll_q, 182256}, // __builtin_ia32_psllq128
      {Intrinsic::x86_avx2_psll_q, 157843}, // __builtin_ia32_psllq256
      {Intrinsic::x86_avx512_psll_q_512, 168849}, // __builtin_ia32_psllq512
      {Intrinsic::x86_mmx_pslli_q, 179172}, // __builtin_ia32_psllqi
      {Intrinsic::x86_sse2_pslli_q, 182329}, // __builtin_ia32_psllqi128
      {Intrinsic::x86_avx2_pslli_q, 157916}, // __builtin_ia32_psllqi256
      {Intrinsic::x86_avx512_pslli_q_512, 168922}, // __builtin_ia32_psllqi512
      {Intrinsic::x86_avx512_psllv_w_256, 169045}, // __builtin_ia32_psllv16hi
      {Intrinsic::x86_avx512_psllv_d_512, 168972}, // __builtin_ia32_psllv16si
      {Intrinsic::x86_avx2_psllv_q, 158014}, // __builtin_ia32_psllv2di
      {Intrinsic::x86_avx512_psllv_w_512, 169070}, // __builtin_ia32_psllv32hi
      {Intrinsic::x86_avx2_psllv_q_256, 158038}, // __builtin_ia32_psllv4di
      {Intrinsic::x86_avx2_psllv_d, 157966}, // __builtin_ia32_psllv4si
      {Intrinsic::x86_avx512_psllv_q_512, 168997}, // __builtin_ia32_psllv8di
      {Intrinsic::x86_avx512_psllv_w_128, 169021}, // __builtin_ia32_psllv8hi
      {Intrinsic::x86_avx2_psllv_d_256, 157990}, // __builtin_ia32_psllv8si
      {Intrinsic::x86_mmx_psll_w, 179129}, // __builtin_ia32_psllw
      {Intrinsic::x86_sse2_psll_w, 182280}, // __builtin_ia32_psllw128
      {Intrinsic::x86_avx2_psll_w, 157867}, // __builtin_ia32_psllw256
      {Intrinsic::x86_avx512_psll_w_512, 168873}, // __builtin_ia32_psllw512
      {Intrinsic::x86_mmx_pslli_w, 179194}, // __builtin_ia32_psllwi
      {Intrinsic::x86_sse2_pslli_w, 182354}, // __builtin_ia32_psllwi128
      {Intrinsic::x86_avx2_pslli_w, 157941}, // __builtin_ia32_psllwi256
      {Intrinsic::x86_avx512_pslli_w_512, 168947}, // __builtin_ia32_psllwi512
      {Intrinsic::x86_mmx_psra_d, 179216}, // __builtin_ia32_psrad
      {Intrinsic::x86_sse2_psra_d, 182379}, // __builtin_ia32_psrad128
      {Intrinsic::x86_avx2_psra_d, 158062}, // __builtin_ia32_psrad256
      {Intrinsic::x86_avx512_psra_d_512, 169095}, // __builtin_ia32_psrad512
      {Intrinsic::x86_mmx_psrai_d, 179258}, // __builtin_ia32_psradi
      {Intrinsic::x86_sse2_psrai_d, 182427}, // __builtin_ia32_psradi128
      {Intrinsic::x86_avx2_psrai_d, 158110}, // __builtin_ia32_psradi256
      {Intrinsic::x86_avx512_psrai_d_512, 169215}, // __builtin_ia32_psradi512
      {Intrinsic::x86_avx512_psra_q_128, 169119}, // __builtin_ia32_psraq128
      {Intrinsic::x86_avx512_psra_q_256, 169143}, // __builtin_ia32_psraq256
      {Intrinsic::x86_avx512_psra_q_512, 169167}, // __builtin_ia32_psraq512
      {Intrinsic::x86_avx512_psrai_q_128, 169240}, // __builtin_ia32_psraqi128
      {Intrinsic::x86_avx512_psrai_q_256, 169265}, // __builtin_ia32_psraqi256
      {Intrinsic::x86_avx512_psrai_q_512, 169290}, // __builtin_ia32_psraqi512
      {Intrinsic::x86_avx512_psrav_w_256, 169463}, // __builtin_ia32_psrav16hi
      {Intrinsic::x86_avx512_psrav_d_512, 169340}, // __builtin_ia32_psrav16si
      {Intrinsic::x86_avx512_psrav_w_512, 169488}, // __builtin_ia32_psrav32hi
      {Intrinsic::x86_avx2_psrav_d, 158160}, // __builtin_ia32_psrav4si
      {Intrinsic::x86_avx512_psrav_q_512, 169415}, // __builtin_ia32_psrav8di
      {Intrinsic::x86_avx512_psrav_w_128, 169439}, // __builtin_ia32_psrav8hi
      {Intrinsic::x86_avx2_psrav_d_256, 158184}, // __builtin_ia32_psrav8si
      {Intrinsic::x86_avx512_psrav_q_128, 169365}, // __builtin_ia32_psravq128
      {Intrinsic::x86_avx512_psrav_q_256, 169390}, // __builtin_ia32_psravq256
      {Intrinsic::x86_mmx_psra_w, 179237}, // __builtin_ia32_psraw
      {Intrinsic::x86_sse2_psra_w, 182403}, // __builtin_ia32_psraw128
      {Intrinsic::x86_avx2_psra_w, 158086}, // __builtin_ia32_psraw256
      {Intrinsic::x86_avx512_psra_w_512, 169191}, // __builtin_ia32_psraw512
      {Intrinsic::x86_mmx_psrai_w, 179280}, // __builtin_ia32_psrawi
      {Intrinsic::x86_sse2_psrai_w, 182452}, // __builtin_ia32_psrawi128
      {Intrinsic::x86_avx2_psrai_w, 158135}, // __builtin_ia32_psrawi256
      {Intrinsic::x86_avx512_psrai_w_512, 169315}, // __builtin_ia32_psrawi512
      {Intrinsic::x86_mmx_psrl_d, 179302}, // __builtin_ia32_psrld
      {Intrinsic::x86_sse2_psrl_d, 182477}, // __builtin_ia32_psrld128
      {Intrinsic::x86_avx2_psrl_d, 158208}, // __builtin_ia32_psrld256
      {Intrinsic::x86_avx512_psrl_d_512, 169513}, // __builtin_ia32_psrld512
      {Intrinsic::x86_mmx_psrli_d, 179365}, // __builtin_ia32_psrldi
      {Intrinsic::x86_sse2_psrli_d, 182549}, // __builtin_ia32_psrldi128
      {Intrinsic::x86_avx2_psrli_d, 158280}, // __builtin_ia32_psrldi256
      {Intrinsic::x86_avx512_psrli_d_512, 169585}, // __builtin_ia32_psrldi512
      {Intrinsic::x86_mmx_psrl_q, 179323}, // __builtin_ia32_psrlq
      {Intrinsic::x86_sse2_psrl_q, 182501}, // __builtin_ia32_psrlq128
      {Intrinsic::x86_avx2_psrl_q, 158232}, // __builtin_ia32_psrlq256
      {Intrinsic::x86_avx512_psrl_q_512, 169537}, // __builtin_ia32_psrlq512
      {Intrinsic::x86_mmx_psrli_q, 179387}, // __builtin_ia32_psrlqi
      {Intrinsic::x86_sse2_psrli_q, 182574}, // __builtin_ia32_psrlqi128
      {Intrinsic::x86_avx2_psrli_q, 158305}, // __builtin_ia32_psrlqi256
      {Intrinsic::x86_avx512_psrli_q_512, 169610}, // __builtin_ia32_psrlqi512
      {Intrinsic::x86_avx512_psrlv_w_256, 169733}, // __builtin_ia32_psrlv16hi
      {Intrinsic::x86_avx512_psrlv_d_512, 169660}, // __builtin_ia32_psrlv16si
      {Intrinsic::x86_avx2_psrlv_q, 158403}, // __builtin_ia32_psrlv2di
      {Intrinsic::x86_avx512_psrlv_w_512, 169758}, // __builtin_ia32_psrlv32hi
      {Intrinsic::x86_avx2_psrlv_q_256, 158427}, // __builtin_ia32_psrlv4di
      {Intrinsic::x86_avx2_psrlv_d, 158355}, // __builtin_ia32_psrlv4si
      {Intrinsic::x86_avx512_psrlv_q_512, 169685}, // __builtin_ia32_psrlv8di
      {Intrinsic::x86_avx512_psrlv_w_128, 169709}, // __builtin_ia32_psrlv8hi
      {Intrinsic::x86_avx2_psrlv_d_256, 158379}, // __builtin_ia32_psrlv8si
      {Intrinsic::x86_mmx_psrl_w, 179344}, // __builtin_ia32_psrlw
      {Intrinsic::x86_sse2_psrl_w, 182525}, // __builtin_ia32_psrlw128
      {Intrinsic::x86_avx2_psrl_w, 158256}, // __builtin_ia32_psrlw256
      {Intrinsic::x86_avx512_psrl_w_512, 169561}, // __builtin_ia32_psrlw512
      {Intrinsic::x86_mmx_psrli_w, 179409}, // __builtin_ia32_psrlwi
      {Intrinsic::x86_sse2_psrli_w, 182599}, // __builtin_ia32_psrlwi128
      {Intrinsic::x86_avx2_psrli_w, 158330}, // __builtin_ia32_psrlwi256
      {Intrinsic::x86_avx512_psrli_w_512, 169635}, // __builtin_ia32_psrlwi512
      {Intrinsic::x86_mmx_psub_b, 179431}, // __builtin_ia32_psubb
      {Intrinsic::x86_mmx_psub_d, 179452}, // __builtin_ia32_psubd
      {Intrinsic::x86_mmx_psub_q, 179473}, // __builtin_ia32_psubq
      {Intrinsic::x86_mmx_psubs_b, 179515}, // __builtin_ia32_psubsb
      {Intrinsic::x86_mmx_psubs_w, 179537}, // __builtin_ia32_psubsw
      {Intrinsic::x86_mmx_psubus_b, 179559}, // __builtin_ia32_psubusb
      {Intrinsic::x86_mmx_psubus_w, 179582}, // __builtin_ia32_psubusw
      {Intrinsic::x86_mmx_psub_w, 179494}, // __builtin_ia32_psubw
      {Intrinsic::x86_avx512_pternlog_d_128, 169783}, // __builtin_ia32_pternlogd128
      {Intrinsic::x86_avx512_pternlog_d_256, 169811}, // __builtin_ia32_pternlogd256
      {Intrinsic::x86_avx512_pternlog_d_512, 169839}, // __builtin_ia32_pternlogd512
      {Intrinsic::x86_avx512_pternlog_q_128, 169867}, // __builtin_ia32_pternlogq128
      {Intrinsic::x86_avx512_pternlog_q_256, 169895}, // __builtin_ia32_pternlogq256
      {Intrinsic::x86_avx512_pternlog_q_512, 169923}, // __builtin_ia32_pternlogq512
      {Intrinsic::x86_sse41_ptestc, 183200}, // __builtin_ia32_ptestc128
      {Intrinsic::x86_avx_ptestc_256, 155809}, // __builtin_ia32_ptestc256
      {Intrinsic::x86_sse41_ptestnzc, 183225}, // __builtin_ia32_ptestnzc128
      {Intrinsic::x86_avx_ptestnzc_256, 155834}, // __builtin_ia32_ptestnzc256
      {Intrinsic::x86_sse41_ptestz, 183252}, // __builtin_ia32_ptestz128
      {Intrinsic::x86_avx_ptestz_256, 155861}, // __builtin_ia32_ptestz256
      {Intrinsic::x86_ptwrite32, 179930}, // __builtin_ia32_ptwrite32
      {Intrinsic::x86_ptwrite64, 179955}, // __builtin_ia32_ptwrite64
      {Intrinsic::x86_mmx_punpckhbw, 179605}, // __builtin_ia32_punpckhbw
      {Intrinsic::x86_mmx_punpckhdq, 179630}, // __builtin_ia32_punpckhdq
      {Intrinsic::x86_mmx_punpckhwd, 179655}, // __builtin_ia32_punpckhwd
      {Intrinsic::x86_mmx_punpcklbw, 179680}, // __builtin_ia32_punpcklbw
      {Intrinsic::x86_mmx_punpckldq, 179705}, // __builtin_ia32_punpckldq
      {Intrinsic::x86_mmx_punpcklwd, 179730}, // __builtin_ia32_punpcklwd
      {Intrinsic::x86_mmx_pxor, 179755}, // __builtin_ia32_pxor
      {Intrinsic::x86_avx512_mask_range_pd_128, 166426}, // __builtin_ia32_rangepd128_mask
      {Intrinsic::x86_avx512_mask_range_pd_256, 166457}, // __builtin_ia32_rangepd256_mask
      {Intrinsic::x86_avx512_mask_range_pd_512, 166488}, // __builtin_ia32_rangepd512_mask
      {Intrinsic::x86_avx512_mask_range_ps_128, 166519}, // __builtin_ia32_rangeps128_mask
      {Intrinsic::x86_avx512_mask_range_ps_256, 166550}, // __builtin_ia32_rangeps256_mask
      {Intrinsic::x86_avx512_mask_range_ps_512, 166581}, // __builtin_ia32_rangeps512_mask
      {Intrinsic::x86_avx512_mask_range_sd, 166612}, // __builtin_ia32_rangesd128_round_mask
      {Intrinsic::x86_avx512_mask_range_ss, 166649}, // __builtin_ia32_rangess128_round_mask
      {Intrinsic::x86_avx512_rcp14_pd_128, 169951}, // __builtin_ia32_rcp14pd128_mask
      {Intrinsic::x86_avx512_rcp14_pd_256, 169982}, // __builtin_ia32_rcp14pd256_mask
      {Intrinsic::x86_avx512_rcp14_pd_512, 170013}, // __builtin_ia32_rcp14pd512_mask
      {Intrinsic::x86_avx512_rcp14_ps_128, 170044}, // __builtin_ia32_rcp14ps128_mask
      {Intrinsic::x86_avx512_rcp14_ps_256, 170075}, // __builtin_ia32_rcp14ps256_mask
      {Intrinsic::x86_avx512_rcp14_ps_512, 170106}, // __builtin_ia32_rcp14ps512_mask
      {Intrinsic::x86_avx512_rcp14_sd, 170137}, // __builtin_ia32_rcp14sd_mask
      {Intrinsic::x86_avx512_rcp14_ss, 170165}, // __builtin_ia32_rcp14ss_mask
      {Intrinsic::x86_avx512_rcp28_pd, 170193}, // __builtin_ia32_rcp28pd_mask
      {Intrinsic::x86_avx512_rcp28_ps, 170221}, // __builtin_ia32_rcp28ps_mask
      {Intrinsic::x86_avx512_rcp28_sd, 170249}, // __builtin_ia32_rcp28sd_round_mask
      {Intrinsic::x86_avx512_rcp28_ss, 170283}, // __builtin_ia32_rcp28ss_round_mask
      {Intrinsic::x86_avx512fp16_mask_rcp_ph_128, 173037}, // __builtin_ia32_rcpph128_mask
      {Intrinsic::x86_avx512fp16_mask_rcp_ph_256, 173066}, // __builtin_ia32_rcpph256_mask
      {Intrinsic::x86_avx512fp16_mask_rcp_ph_512, 173095}, // __builtin_ia32_rcpph512_mask
      {Intrinsic::x86_sse_rcp_ps, 181089}, // __builtin_ia32_rcpps
      {Intrinsic::x86_avx_rcp_ps_256, 155886}, // __builtin_ia32_rcpps256
      {Intrinsic::x86_avx512fp16_mask_rcp_sh, 173124}, // __builtin_ia32_rcpsh_mask
      {Intrinsic::x86_sse_rcp_ss, 181110}, // __builtin_ia32_rcpss
      {Intrinsic::x86_rdfsbase_32, 179980}, // __builtin_ia32_rdfsbase32
      {Intrinsic::x86_rdfsbase_64, 180006}, // __builtin_ia32_rdfsbase64
      {Intrinsic::x86_rdgsbase_32, 180032}, // __builtin_ia32_rdgsbase32
      {Intrinsic::x86_rdgsbase_64, 180058}, // __builtin_ia32_rdgsbase64
      {Intrinsic::x86_rdpid, 180084}, // __builtin_ia32_rdpid
      {Intrinsic::x86_rdpkru, 180105}, // __builtin_ia32_rdpkru
      {Intrinsic::x86_rdpmc, 180127}, // __builtin_ia32_rdpmc
      {Intrinsic::x86_rdpru, 180148}, // __builtin_ia32_rdpru
      {Intrinsic::x86_rdsspd, 180169}, // __builtin_ia32_rdsspd
      {Intrinsic::x86_rdsspq, 180191}, // __builtin_ia32_rdsspq
      {Intrinsic::x86_rdtsc, 180213}, // __builtin_ia32_rdtsc
      {Intrinsic::x86_flags_read_u32, 177575}, // __builtin_ia32_readeflags_u32
      {Intrinsic::x86_flags_read_u64, 177605}, // __builtin_ia32_readeflags_u64
      {Intrinsic::x86_avx512_mask_reduce_pd_128, 166686}, // __builtin_ia32_reducepd128_mask
      {Intrinsic::x86_avx512_mask_reduce_pd_256, 166718}, // __builtin_ia32_reducepd256_mask
      {Intrinsic::x86_avx512_mask_reduce_pd_512, 166750}, // __builtin_ia32_reducepd512_mask
      {Intrinsic::x86_avx512fp16_mask_reduce_ph_128, 173150}, // __builtin_ia32_reduceph128_mask
      {Intrinsic::x86_avx512fp16_mask_reduce_ph_256, 173182}, // __builtin_ia32_reduceph256_mask
      {Intrinsic::x86_avx512fp16_mask_reduce_ph_512, 173214}, // __builtin_ia32_reduceph512_mask
      {Intrinsic::x86_avx512_mask_reduce_ps_128, 166782}, // __builtin_ia32_reduceps128_mask
      {Intrinsic::x86_avx512_mask_reduce_ps_256, 166814}, // __builtin_ia32_reduceps256_mask
      {Intrinsic::x86_avx512_mask_reduce_ps_512, 166846}, // __builtin_ia32_reduceps512_mask
      {Intrinsic::x86_avx512_mask_reduce_sd, 166878}, // __builtin_ia32_reducesd_mask
      {Intrinsic::x86_avx512fp16_mask_reduce_sh, 173246}, // __builtin_ia32_reducesh_mask
      {Intrinsic::x86_avx512_mask_reduce_ss, 166907}, // __builtin_ia32_reducess_mask
      {Intrinsic::x86_avx512_mask_rndscale_pd_128, 166936}, // __builtin_ia32_rndscalepd_128_mask
      {Intrinsic::x86_avx512_mask_rndscale_pd_256, 166971}, // __builtin_ia32_rndscalepd_256_mask
      {Intrinsic::x86_avx512_mask_rndscale_pd_512, 167006}, // __builtin_ia32_rndscalepd_mask
      {Intrinsic::x86_avx512fp16_mask_rndscale_ph_128, 173275}, // __builtin_ia32_rndscaleph_128_mask
      {Intrinsic::x86_avx512fp16_mask_rndscale_ph_256, 173310}, // __builtin_ia32_rndscaleph_256_mask
      {Intrinsic::x86_avx512fp16_mask_rndscale_ph_512, 173345}, // __builtin_ia32_rndscaleph_mask
      {Intrinsic::x86_avx512_mask_rndscale_ps_128, 167037}, // __builtin_ia32_rndscaleps_128_mask
      {Intrinsic::x86_avx512_mask_rndscale_ps_256, 167072}, // __builtin_ia32_rndscaleps_256_mask
      {Intrinsic::x86_avx512_mask_rndscale_ps_512, 167107}, // __builtin_ia32_rndscaleps_mask
      {Intrinsic::x86_avx512_mask_rndscale_sd, 167138}, // __builtin_ia32_rndscalesd_round_mask
      {Intrinsic::x86_avx512fp16_mask_rndscale_sh, 173376}, // __builtin_ia32_rndscalesh_round_mask
      {Intrinsic::x86_avx512_mask_rndscale_ss, 167175}, // __builtin_ia32_rndscaless_round_mask
      {Intrinsic::x86_sse41_round_pd, 183277}, // __builtin_ia32_roundpd
      {Intrinsic::x86_avx_round_pd_256, 155910}, // __builtin_ia32_roundpd256
      {Intrinsic::x86_sse41_round_ps, 183300}, // __builtin_ia32_roundps
      {Intrinsic::x86_avx_round_ps_256, 155936}, // __builtin_ia32_roundps256
      {Intrinsic::x86_sse41_round_sd, 183323}, // __builtin_ia32_roundsd
      {Intrinsic::x86_sse41_round_ss, 183346}, // __builtin_ia32_roundss
      {Intrinsic::x86_avx512_rsqrt14_pd_128, 170317}, // __builtin_ia32_rsqrt14pd128_mask
      {Intrinsic::x86_avx512_rsqrt14_pd_256, 170350}, // __builtin_ia32_rsqrt14pd256_mask
      {Intrinsic::x86_avx512_rsqrt14_pd_512, 170383}, // __builtin_ia32_rsqrt14pd512_mask
      {Intrinsic::x86_avx512_rsqrt14_ps_128, 170416}, // __builtin_ia32_rsqrt14ps128_mask
      {Intrinsic::x86_avx512_rsqrt14_ps_256, 170449}, // __builtin_ia32_rsqrt14ps256_mask
      {Intrinsic::x86_avx512_rsqrt14_ps_512, 170482}, // __builtin_ia32_rsqrt14ps512_mask
      {Intrinsic::x86_avx512_rsqrt14_sd, 170515}, // __builtin_ia32_rsqrt14sd_mask
      {Intrinsic::x86_avx512_rsqrt14_ss, 170545}, // __builtin_ia32_rsqrt14ss_mask
      {Intrinsic::x86_avx512_rsqrt28_pd, 170575}, // __builtin_ia32_rsqrt28pd_mask
      {Intrinsic::x86_avx512_rsqrt28_ps, 170605}, // __builtin_ia32_rsqrt28ps_mask
      {Intrinsic::x86_avx512_rsqrt28_sd, 170635}, // __builtin_ia32_rsqrt28sd_round_mask
      {Intrinsic::x86_avx512_rsqrt28_ss, 170671}, // __builtin_ia32_rsqrt28ss_round_mask
      {Intrinsic::x86_avx512fp16_mask_rsqrt_ph_128, 173413}, // __builtin_ia32_rsqrtph128_mask
      {Intrinsic::x86_avx512fp16_mask_rsqrt_ph_256, 173444}, // __builtin_ia32_rsqrtph256_mask
      {Intrinsic::x86_avx512fp16_mask_rsqrt_ph_512, 173475}, // __builtin_ia32_rsqrtph512_mask
      {Intrinsic::x86_sse_rsqrt_ps, 181131}, // __builtin_ia32_rsqrtps
      {Intrinsic::x86_avx_rsqrt_ps_256, 155962}, // __builtin_ia32_rsqrtps256
      {Intrinsic::x86_avx512fp16_mask_rsqrt_sh, 173506}, // __builtin_ia32_rsqrtsh_mask
      {Intrinsic::x86_sse_rsqrt_ss, 181154}, // __builtin_ia32_rsqrtss
      {Intrinsic::x86_rstorssp, 180234}, // __builtin_ia32_rstorssp
      {Intrinsic::x86_saveprevssp, 180258}, // __builtin_ia32_saveprevssp
      {Intrinsic::x86_avx512_mask_scalef_pd_128, 167212}, // __builtin_ia32_scalefpd128_mask
      {Intrinsic::x86_avx512_mask_scalef_pd_256, 167244}, // __builtin_ia32_scalefpd256_mask
      {Intrinsic::x86_avx512_mask_scalef_pd_512, 167276}, // __builtin_ia32_scalefpd512_mask
      {Intrinsic::x86_avx512fp16_mask_scalef_ph_128, 173534}, // __builtin_ia32_scalefph128_mask
      {Intrinsic::x86_avx512fp16_mask_scalef_ph_256, 173566}, // __builtin_ia32_scalefph256_mask
      {Intrinsic::x86_avx512fp16_mask_scalef_ph_512, 173598}, // __builtin_ia32_scalefph512_mask
      {Intrinsic::x86_avx512_mask_scalef_ps_128, 167308}, // __builtin_ia32_scalefps128_mask
      {Intrinsic::x86_avx512_mask_scalef_ps_256, 167340}, // __builtin_ia32_scalefps256_mask
      {Intrinsic::x86_avx512_mask_scalef_ps_512, 167372}, // __builtin_ia32_scalefps512_mask
      {Intrinsic::x86_avx512_mask_scalef_sd, 167404}, // __builtin_ia32_scalefsd_round_mask
      {Intrinsic::x86_avx512fp16_mask_scalef_sh, 173630}, // __builtin_ia32_scalefsh_round_mask
      {Intrinsic::x86_avx512_mask_scalef_ss, 167439}, // __builtin_ia32_scalefss_round_mask
      {Intrinsic::x86_avx512_scatterpf_dpd_512, 170707}, // __builtin_ia32_scatterpfdpd
      {Intrinsic::x86_avx512_scatterpf_dps_512, 170735}, // __builtin_ia32_scatterpfdps
      {Intrinsic::x86_avx512_scatterpf_qpd_512, 170763}, // __builtin_ia32_scatterpfqpd
      {Intrinsic::x86_avx512_scatterpf_qps_512, 170791}, // __builtin_ia32_scatterpfqps
      {Intrinsic::x86_senduipi, 180285}, // __builtin_ia32_senduipi
      {Intrinsic::x86_serialize, 180309}, // __builtin_ia32_serialize
      {Intrinsic::x86_setssbsy, 180334}, // __builtin_ia32_setssbsy
      {Intrinsic::x86_sse_sfence, 181177}, // __builtin_ia32_sfence
      {Intrinsic::x86_sha1msg1, 180358}, // __builtin_ia32_sha1msg1
      {Intrinsic::x86_sha1msg2, 180382}, // __builtin_ia32_sha1msg2
      {Intrinsic::x86_sha1nexte, 180406}, // __builtin_ia32_sha1nexte
      {Intrinsic::x86_sha1rnds4, 180431}, // __builtin_ia32_sha1rnds4
      {Intrinsic::x86_sha256msg1, 180456}, // __builtin_ia32_sha256msg1
      {Intrinsic::x86_sha256msg2, 180482}, // __builtin_ia32_sha256msg2
      {Intrinsic::x86_sha256rnds2, 180508}, // __builtin_ia32_sha256rnds2
      {Intrinsic::x86_slwpcb, 180535}, // __builtin_ia32_slwpcb
      {Intrinsic::x86_stui, 184626}, // __builtin_ia32_stui
      {Intrinsic::x86_avx512_sub_pd_512, 170819}, // __builtin_ia32_subpd512
      {Intrinsic::x86_avx512fp16_sub_ph_512, 176612}, // __builtin_ia32_subph512
      {Intrinsic::x86_avx512_sub_ps_512, 170843}, // __builtin_ia32_subps512
      {Intrinsic::x86_avx512_mask_sub_sd_round, 167474}, // __builtin_ia32_subsd_round_mask
      {Intrinsic::x86_avx512fp16_mask_sub_sh_round, 173665}, // __builtin_ia32_subsh_round_mask
      {Intrinsic::x86_avx512_mask_sub_ss_round, 167506}, // __builtin_ia32_subss_round_mask
      {Intrinsic::x86_tcmmimfp16ps, 184698}, // __builtin_ia32_tcmmimfp16ps
      {Intrinsic::x86_tcmmimfp16ps_internal, 184726}, // __builtin_ia32_tcmmimfp16ps_internal
      {Intrinsic::x86_tcmmrlfp16ps, 184763}, // __builtin_ia32_tcmmrlfp16ps
      {Intrinsic::x86_tcmmrlfp16ps_internal, 184791}, // __builtin_ia32_tcmmrlfp16ps_internal
      {Intrinsic::x86_tdpbf16ps, 184828}, // __builtin_ia32_tdpbf16ps
      {Intrinsic::x86_tdpbf16ps_internal, 184853}, // __builtin_ia32_tdpbf16ps_internal
      {Intrinsic::x86_tdpbssd, 184887}, // __builtin_ia32_tdpbssd
      {Intrinsic::x86_tdpbssd_internal, 184910}, // __builtin_ia32_tdpbssd_internal
      {Intrinsic::x86_tdpbsud, 184942}, // __builtin_ia32_tdpbsud
      {Intrinsic::x86_tdpbsud_internal, 184965}, // __builtin_ia32_tdpbsud_internal
      {Intrinsic::x86_tdpbusd, 184997}, // __builtin_ia32_tdpbusd
      {Intrinsic::x86_tdpbusd_internal, 185020}, // __builtin_ia32_tdpbusd_internal
      {Intrinsic::x86_tdpbuud, 185052}, // __builtin_ia32_tdpbuud
      {Intrinsic::x86_tdpbuud_internal, 185075}, // __builtin_ia32_tdpbuud_internal
      {Intrinsic::x86_tdpfp16ps, 185107}, // __builtin_ia32_tdpfp16ps
      {Intrinsic::x86_tdpfp16ps_internal, 185132}, // __builtin_ia32_tdpfp16ps_internal
      {Intrinsic::x86_testui, 185166}, // __builtin_ia32_testui
      {Intrinsic::x86_ldtilecfg, 177974}, // __builtin_ia32_tile_loadconfig
      {Intrinsic::x86_ldtilecfg_internal, 178005}, // __builtin_ia32_tile_loadconfig_internal
      {Intrinsic::x86_sttilecfg, 184594}, // __builtin_ia32_tile_storeconfig
      {Intrinsic::x86_tileloadd64, 185188}, // __builtin_ia32_tileloadd64
      {Intrinsic::x86_tileloadd64_internal, 185215}, // __builtin_ia32_tileloadd64_internal
      {Intrinsic::x86_tileloaddt164, 185251}, // __builtin_ia32_tileloaddt164
      {Intrinsic::x86_tileloaddt164_internal, 185280}, // __builtin_ia32_tileloaddt164_internal
      {Intrinsic::x86_tilerelease, 185318}, // __builtin_ia32_tilerelease
      {Intrinsic::x86_tilestored64, 185345}, // __builtin_ia32_tilestored64
      {Intrinsic::x86_tilestored64_internal, 185373}, // __builtin_ia32_tilestored64_internal
      {Intrinsic::x86_tilezero, 185410}, // __builtin_ia32_tilezero
      {Intrinsic::x86_tilezero_internal, 185434}, // __builtin_ia32_tilezero_internal
      {Intrinsic::x86_tpause, 185467}, // __builtin_ia32_tpause
      {Intrinsic::x86_sse_ucomieq_ss, 181199}, // __builtin_ia32_ucomieq
      {Intrinsic::x86_sse_ucomige_ss, 181222}, // __builtin_ia32_ucomige
      {Intrinsic::x86_sse_ucomigt_ss, 181245}, // __builtin_ia32_ucomigt
      {Intrinsic::x86_sse_ucomile_ss, 181268}, // __builtin_ia32_ucomile
      {Intrinsic::x86_sse_ucomilt_ss, 181291}, // __builtin_ia32_ucomilt
      {Intrinsic::x86_sse_ucomineq_ss, 181314}, // __builtin_ia32_ucomineq
      {Intrinsic::x86_sse2_ucomieq_sd, 182624}, // __builtin_ia32_ucomisdeq
      {Intrinsic::x86_sse2_ucomige_sd, 182649}, // __builtin_ia32_ucomisdge
      {Intrinsic::x86_sse2_ucomigt_sd, 182674}, // __builtin_ia32_ucomisdgt
      {Intrinsic::x86_sse2_ucomile_sd, 182699}, // __builtin_ia32_ucomisdle
      {Intrinsic::x86_sse2_ucomilt_sd, 182724}, // __builtin_ia32_ucomisdlt
      {Intrinsic::x86_sse2_ucomineq_sd, 182749}, // __builtin_ia32_ucomisdneq
      {Intrinsic::x86_umonitor, 185489}, // __builtin_ia32_umonitor
      {Intrinsic::x86_umwait, 185513}, // __builtin_ia32_umwait
      {Intrinsic::x86_vbcstnebf162ps128, 185535}, // __builtin_ia32_vbcstnebf162ps128
      {Intrinsic::x86_vbcstnebf162ps256, 185568}, // __builtin_ia32_vbcstnebf162ps256
      {Intrinsic::x86_vbcstnesh2ps128, 185601}, // __builtin_ia32_vbcstnesh2ps128
      {Intrinsic::x86_vbcstnesh2ps256, 185632}, // __builtin_ia32_vbcstnesh2ps256
      {Intrinsic::x86_avx512_vcomi_sd, 170867}, // __builtin_ia32_vcomisd
      {Intrinsic::x86_avx512fp16_vcomi_sh, 176636}, // __builtin_ia32_vcomish
      {Intrinsic::x86_avx512_vcomi_ss, 170890}, // __builtin_ia32_vcomiss
      {Intrinsic::x86_avx512fp16_mask_vcvtdq2ph_128, 173697}, // __builtin_ia32_vcvtdq2ph128_mask
      {Intrinsic::x86_vcvtneebf162ps128, 185663}, // __builtin_ia32_vcvtneebf162ps128
      {Intrinsic::x86_vcvtneebf162ps256, 185696}, // __builtin_ia32_vcvtneebf162ps256
      {Intrinsic::x86_vcvtneeph2ps128, 185729}, // __builtin_ia32_vcvtneeph2ps128
      {Intrinsic::x86_vcvtneeph2ps256, 185760}, // __builtin_ia32_vcvtneeph2ps256
      {Intrinsic::x86_vcvtneobf162ps128, 185791}, // __builtin_ia32_vcvtneobf162ps128
      {Intrinsic::x86_vcvtneobf162ps256, 185824}, // __builtin_ia32_vcvtneobf162ps256
      {Intrinsic::x86_vcvtneoph2ps128, 185857}, // __builtin_ia32_vcvtneoph2ps128
      {Intrinsic::x86_vcvtneoph2ps256, 185888}, // __builtin_ia32_vcvtneoph2ps256
      {Intrinsic::x86_vcvtneps2bf16128, 185919}, // __builtin_ia32_vcvtneps2bf16128
      {Intrinsic::x86_vcvtneps2bf16256, 185951}, // __builtin_ia32_vcvtneps2bf16256
      {Intrinsic::x86_avx512fp16_mask_vcvtpd2ph_128, 173730}, // __builtin_ia32_vcvtpd2ph128_mask
      {Intrinsic::x86_avx512fp16_mask_vcvtpd2ph_256, 173763}, // __builtin_ia32_vcvtpd2ph256_mask
      {Intrinsic::x86_avx512fp16_mask_vcvtpd2ph_512, 173796}, // __builtin_ia32_vcvtpd2ph512_mask
      {Intrinsic::x86_avx512fp16_mask_vcvtph2dq_128, 173829}, // __builtin_ia32_vcvtph2dq128_mask
      {Intrinsic::x86_avx512fp16_mask_vcvtph2dq_256, 173862}, // __builtin_ia32_vcvtph2dq256_mask
      {Intrinsic::x86_avx512fp16_mask_vcvtph2dq_512, 173895}, // __builtin_ia32_vcvtph2dq512_mask
      {Intrinsic::x86_avx512fp16_mask_vcvtph2pd_128, 173928}, // __builtin_ia32_vcvtph2pd128_mask
      {Intrinsic::x86_avx512fp16_mask_vcvtph2pd_256, 173961}, // __builtin_ia32_vcvtph2pd256_mask
      {Intrinsic::x86_avx512fp16_mask_vcvtph2pd_512, 173994}, // __builtin_ia32_vcvtph2pd512_mask
      {Intrinsic::x86_avx512fp16_mask_vcvtph2psx_128, 174027}, // __builtin_ia32_vcvtph2psx128_mask
      {Intrinsic::x86_avx512fp16_mask_vcvtph2psx_256, 174061}, // __builtin_ia32_vcvtph2psx256_mask
      {Intrinsic::x86_avx512fp16_mask_vcvtph2psx_512, 174095}, // __builtin_ia32_vcvtph2psx512_mask
      {Intrinsic::x86_avx512fp16_mask_vcvtph2qq_128, 174129}, // __builtin_ia32_vcvtph2qq128_mask
      {Intrinsic::x86_avx512fp16_mask_vcvtph2qq_256, 174162}, // __builtin_ia32_vcvtph2qq256_mask
      {Intrinsic::x86_avx512fp16_mask_vcvtph2qq_512, 174195}, // __builtin_ia32_vcvtph2qq512_mask
      {Intrinsic::x86_avx512fp16_mask_vcvtph2udq_128, 174228}, // __builtin_ia32_vcvtph2udq128_mask
      {Intrinsic::x86_avx512fp16_mask_vcvtph2udq_256, 174262}, // __builtin_ia32_vcvtph2udq256_mask
      {Intrinsic::x86_avx512fp16_mask_vcvtph2udq_512, 174296}, // __builtin_ia32_vcvtph2udq512_mask
      {Intrinsic::x86_avx512fp16_mask_vcvtph2uqq_128, 174330}, // __builtin_ia32_vcvtph2uqq128_mask
      {Intrinsic::x86_avx512fp16_mask_vcvtph2uqq_256, 174364}, // __builtin_ia32_vcvtph2uqq256_mask
      {Intrinsic::x86_avx512fp16_mask_vcvtph2uqq_512, 174398}, // __builtin_ia32_vcvtph2uqq512_mask
      {Intrinsic::x86_avx512fp16_mask_vcvtph2uw_128, 174432}, // __builtin_ia32_vcvtph2uw128_mask
      {Intrinsic::x86_avx512fp16_mask_vcvtph2uw_256, 174465}, // __builtin_ia32_vcvtph2uw256_mask
      {Intrinsic::x86_avx512fp16_mask_vcvtph2uw_512, 174498}, // __builtin_ia32_vcvtph2uw512_mask
      {Intrinsic::x86_avx512fp16_mask_vcvtph2w_128, 174531}, // __builtin_ia32_vcvtph2w128_mask
      {Intrinsic::x86_avx512fp16_mask_vcvtph2w_256, 174563}, // __builtin_ia32_vcvtph2w256_mask
      {Intrinsic::x86_avx512fp16_mask_vcvtph2w_512, 174595}, // __builtin_ia32_vcvtph2w512_mask
      {Intrinsic::x86_vcvtps2ph_128, 185983}, // __builtin_ia32_vcvtps2ph
      {Intrinsic::x86_vcvtps2ph_256, 186008}, // __builtin_ia32_vcvtps2ph256
      {Intrinsic::x86_avx512_mask_vcvtps2ph_256, 167568}, // __builtin_ia32_vcvtps2ph256_mask
      {Intrinsic::x86_avx512_mask_vcvtps2ph_512, 167601}, // __builtin_ia32_vcvtps2ph512_mask
      {Intrinsic::x86_avx512_mask_vcvtps2ph_128, 167538}, // __builtin_ia32_vcvtps2ph_mask
      {Intrinsic::x86_avx512fp16_mask_vcvtps2phx_128, 174627}, // __builtin_ia32_vcvtps2phx128_mask
      {Intrinsic::x86_avx512fp16_mask_vcvtps2phx_256, 174661}, // __builtin_ia32_vcvtps2phx256_mask
      {Intrinsic::x86_avx512fp16_mask_vcvtps2phx_512, 174695}, // __builtin_ia32_vcvtps2phx512_mask
      {Intrinsic::x86_avx512fp16_mask_vcvtqq2ph_128, 174729}, // __builtin_ia32_vcvtqq2ph128_mask
      {Intrinsic::x86_avx512fp16_mask_vcvtqq2ph_256, 174762}, // __builtin_ia32_vcvtqq2ph256_mask
      {Intrinsic::x86_avx512fp16_mask_vcvtsd2sh_round, 174795}, // __builtin_ia32_vcvtsd2sh_round_mask
      {Intrinsic::x86_avx512_vcvtsd2si32, 170913}, // __builtin_ia32_vcvtsd2si32
      {Intrinsic::x86_avx512_vcvtsd2si64, 170940}, // __builtin_ia32_vcvtsd2si64
      {Intrinsic::x86_avx512_vcvtsd2usi32, 170967}, // __builtin_ia32_vcvtsd2usi32
      {Intrinsic::x86_avx512_vcvtsd2usi64, 170995}, // __builtin_ia32_vcvtsd2usi64
      {Intrinsic::x86_avx512fp16_mask_vcvtsh2sd_round, 174831}, // __builtin_ia32_vcvtsh2sd_round_mask
      {Intrinsic::x86_avx512fp16_vcvtsh2si32, 176659}, // __builtin_ia32_vcvtsh2si32
      {Intrinsic::x86_avx512fp16_vcvtsh2si64, 176686}, // __builtin_ia32_vcvtsh2si64
      {Intrinsic::x86_avx512fp16_mask_vcvtsh2ss_round, 174867}, // __builtin_ia32_vcvtsh2ss_round_mask
      {Intrinsic::x86_avx512fp16_vcvtsh2usi32, 176713}, // __builtin_ia32_vcvtsh2usi32
      {Intrinsic::x86_avx512fp16_vcvtsh2usi64, 176741}, // __builtin_ia32_vcvtsh2usi64
      {Intrinsic::x86_avx512fp16_vcvtsi2sh, 176769}, // __builtin_ia32_vcvtsi2sh
      {Intrinsic::x86_avx512fp16_vcvtsi642sh, 176794}, // __builtin_ia32_vcvtsi642sh
      {Intrinsic::x86_avx512fp16_mask_vcvtss2sh_round, 174903}, // __builtin_ia32_vcvtss2sh_round_mask
      {Intrinsic::x86_avx512_vcvtss2si32, 171023}, // __builtin_ia32_vcvtss2si32
      {Intrinsic::x86_avx512_vcvtss2si64, 171050}, // __builtin_ia32_vcvtss2si64
      {Intrinsic::x86_avx512_vcvtss2usi32, 171077}, // __builtin_ia32_vcvtss2usi32
      {Intrinsic::x86_avx512_vcvtss2usi64, 171105}, // __builtin_ia32_vcvtss2usi64
      {Intrinsic::x86_avx512fp16_mask_vcvttph2dq_128, 174939}, // __builtin_ia32_vcvttph2dq128_mask
      {Intrinsic::x86_avx512fp16_mask_vcvttph2dq_256, 174973}, // __builtin_ia32_vcvttph2dq256_mask
      {Intrinsic::x86_avx512fp16_mask_vcvttph2dq_512, 175007}, // __builtin_ia32_vcvttph2dq512_mask
      {Intrinsic::x86_avx512fp16_mask_vcvttph2qq_128, 175041}, // __builtin_ia32_vcvttph2qq128_mask
      {Intrinsic::x86_avx512fp16_mask_vcvttph2qq_256, 175075}, // __builtin_ia32_vcvttph2qq256_mask
      {Intrinsic::x86_avx512fp16_mask_vcvttph2qq_512, 175109}, // __builtin_ia32_vcvttph2qq512_mask
      {Intrinsic::x86_avx512fp16_mask_vcvttph2udq_128, 175143}, // __builtin_ia32_vcvttph2udq128_mask
      {Intrinsic::x86_avx512fp16_mask_vcvttph2udq_256, 175178}, // __builtin_ia32_vcvttph2udq256_mask
      {Intrinsic::x86_avx512fp16_mask_vcvttph2udq_512, 175213}, // __builtin_ia32_vcvttph2udq512_mask
      {Intrinsic::x86_avx512fp16_mask_vcvttph2uqq_128, 175248}, // __builtin_ia32_vcvttph2uqq128_mask
      {Intrinsic::x86_avx512fp16_mask_vcvttph2uqq_256, 175283}, // __builtin_ia32_vcvttph2uqq256_mask
      {Intrinsic::x86_avx512fp16_mask_vcvttph2uqq_512, 175318}, // __builtin_ia32_vcvttph2uqq512_mask
      {Intrinsic::x86_avx512fp16_mask_vcvttph2uw_128, 175353}, // __builtin_ia32_vcvttph2uw128_mask
      {Intrinsic::x86_avx512fp16_mask_vcvttph2uw_256, 175387}, // __builtin_ia32_vcvttph2uw256_mask
      {Intrinsic::x86_avx512fp16_mask_vcvttph2uw_512, 175421}, // __builtin_ia32_vcvttph2uw512_mask
      {Intrinsic::x86_avx512fp16_mask_vcvttph2w_128, 175455}, // __builtin_ia32_vcvttph2w128_mask
      {Intrinsic::x86_avx512fp16_mask_vcvttph2w_256, 175488}, // __builtin_ia32_vcvttph2w256_mask
      {Intrinsic::x86_avx512fp16_mask_vcvttph2w_512, 175521}, // __builtin_ia32_vcvttph2w512_mask
      {Intrinsic::x86_avx512_cvttsd2si, 159609}, // __builtin_ia32_vcvttsd2si32
      {Intrinsic::x86_avx512_cvttsd2si64, 159637}, // __builtin_ia32_vcvttsd2si64
      {Intrinsic::x86_avx512_cvttsd2usi, 159665}, // __builtin_ia32_vcvttsd2usi32
      {Intrinsic::x86_avx512_cvttsd2usi64, 159694}, // __builtin_ia32_vcvttsd2usi64
      {Intrinsic::x86_avx512fp16_vcvttsh2si32, 176821}, // __builtin_ia32_vcvttsh2si32
      {Intrinsic::x86_avx512fp16_vcvttsh2si64, 176849}, // __builtin_ia32_vcvttsh2si64
      {Intrinsic::x86_avx512fp16_vcvttsh2usi32, 176877}, // __builtin_ia32_vcvttsh2usi32
      {Intrinsic::x86_avx512fp16_vcvttsh2usi64, 176906}, // __builtin_ia32_vcvttsh2usi64
      {Intrinsic::x86_avx512_cvttss2si, 159723}, // __builtin_ia32_vcvttss2si32
      {Intrinsic::x86_avx512_cvttss2si64, 159751}, // __builtin_ia32_vcvttss2si64
      {Intrinsic::x86_avx512_cvttss2usi, 159779}, // __builtin_ia32_vcvttss2usi32
      {Intrinsic::x86_avx512_cvttss2usi64, 159808}, // __builtin_ia32_vcvttss2usi64
      {Intrinsic::x86_avx512fp16_mask_vcvtudq2ph_128, 175554}, // __builtin_ia32_vcvtudq2ph128_mask
      {Intrinsic::x86_avx512fp16_mask_vcvtuqq2ph_128, 175588}, // __builtin_ia32_vcvtuqq2ph128_mask
      {Intrinsic::x86_avx512fp16_mask_vcvtuqq2ph_256, 175622}, // __builtin_ia32_vcvtuqq2ph256_mask
      {Intrinsic::x86_avx512fp16_vcvtusi2sh, 176935}, // __builtin_ia32_vcvtusi2sh
      {Intrinsic::x86_avx512fp16_vcvtusi642sh, 176961}, // __builtin_ia32_vcvtusi642sh
      {Intrinsic::x86_mmx_pextr_w, 178765}, // __builtin_ia32_vec_ext_v4hi
      {Intrinsic::x86_mmx_pinsr_w, 178793}, // __builtin_ia32_vec_set_v4hi
      {Intrinsic::x86_avx512fp16_mask_vfcmadd_cph_128, 175656}, // __builtin_ia32_vfcmaddcph128_mask
      {Intrinsic::x86_avx512fp16_maskz_vfcmadd_cph_128, 176174}, // __builtin_ia32_vfcmaddcph128_maskz
      {Intrinsic::x86_avx512fp16_mask_vfcmadd_cph_256, 175690}, // __builtin_ia32_vfcmaddcph256_mask
      {Intrinsic::x86_avx512fp16_maskz_vfcmadd_cph_256, 176209}, // __builtin_ia32_vfcmaddcph256_maskz
      {Intrinsic::x86_avx512fp16_mask_vfcmadd_cph_512, 175724}, // __builtin_ia32_vfcmaddcph512_mask3
      {Intrinsic::x86_avx512fp16_maskz_vfcmadd_cph_512, 176244}, // __builtin_ia32_vfcmaddcph512_maskz
      {Intrinsic::x86_avx512fp16_mask_vfcmadd_csh, 175759}, // __builtin_ia32_vfcmaddcsh_mask
      {Intrinsic::x86_avx512fp16_maskz_vfcmadd_csh, 176279}, // __builtin_ia32_vfcmaddcsh_maskz
      {Intrinsic::x86_avx512fp16_mask_vfcmul_cph_128, 175790}, // __builtin_ia32_vfcmulcph128_mask
      {Intrinsic::x86_avx512fp16_mask_vfcmul_cph_256, 175823}, // __builtin_ia32_vfcmulcph256_mask
      {Intrinsic::x86_avx512fp16_mask_vfcmul_cph_512, 175856}, // __builtin_ia32_vfcmulcph512_mask
      {Intrinsic::x86_avx512fp16_mask_vfcmul_csh, 175889}, // __builtin_ia32_vfcmulcsh_mask
      {Intrinsic::x86_avx512fp16_mask_vfmadd_cph_128, 175919}, // __builtin_ia32_vfmaddcph128_mask
      {Intrinsic::x86_avx512fp16_maskz_vfmadd_cph_128, 176311}, // __builtin_ia32_vfmaddcph128_maskz
      {Intrinsic::x86_avx512fp16_mask_vfmadd_cph_256, 175952}, // __builtin_ia32_vfmaddcph256_mask
      {Intrinsic::x86_avx512fp16_maskz_vfmadd_cph_256, 176345}, // __builtin_ia32_vfmaddcph256_maskz
      {Intrinsic::x86_avx512fp16_mask_vfmadd_cph_512, 175985}, // __builtin_ia32_vfmaddcph512_mask3
      {Intrinsic::x86_avx512fp16_maskz_vfmadd_cph_512, 176379}, // __builtin_ia32_vfmaddcph512_maskz
      {Intrinsic::x86_avx512fp16_mask_vfmadd_csh, 176019}, // __builtin_ia32_vfmaddcsh_mask
      {Intrinsic::x86_avx512fp16_maskz_vfmadd_csh, 176413}, // __builtin_ia32_vfmaddcsh_maskz
      {Intrinsic::x86_fma_vfmaddsub_pd, 177697}, // __builtin_ia32_vfmaddsubpd
      {Intrinsic::x86_fma_vfmaddsub_pd_256, 177724}, // __builtin_ia32_vfmaddsubpd256
      {Intrinsic::x86_avx512fp16_vfmaddsub_ph_128, 176989}, // __builtin_ia32_vfmaddsubph
      {Intrinsic::x86_avx512fp16_vfmaddsub_ph_256, 177016}, // __builtin_ia32_vfmaddsubph256
      {Intrinsic::x86_fma_vfmaddsub_ps, 177754}, // __builtin_ia32_vfmaddsubps
      {Intrinsic::x86_fma_vfmaddsub_ps_256, 177781}, // __builtin_ia32_vfmaddsubps256
      {Intrinsic::x86_avx512fp16_mask_vfmul_cph_128, 176049}, // __builtin_ia32_vfmulcph128_mask
      {Intrinsic::x86_avx512fp16_mask_vfmul_cph_256, 176081}, // __builtin_ia32_vfmulcph256_mask
      {Intrinsic::x86_avx512fp16_mask_vfmul_cph_512, 176113}, // __builtin_ia32_vfmulcph512_mask
      {Intrinsic::x86_avx512fp16_mask_vfmul_csh, 176145}, // __builtin_ia32_vfmulcsh_mask
      {Intrinsic::x86_xop_vfrcz_pd, 186944}, // __builtin_ia32_vfrczpd
      {Intrinsic::x86_xop_vfrcz_pd_256, 186967}, // __builtin_ia32_vfrczpd256
      {Intrinsic::x86_xop_vfrcz_ps, 186993}, // __builtin_ia32_vfrczps
      {Intrinsic::x86_xop_vfrcz_ps_256, 187016}, // __builtin_ia32_vfrczps256
      {Intrinsic::x86_xop_vfrcz_sd, 187042}, // __builtin_ia32_vfrczsd
      {Intrinsic::x86_xop_vfrcz_ss, 187065}, // __builtin_ia32_vfrczss
      {Intrinsic::x86_vgf2p8affineinvqb_128, 186036}, // __builtin_ia32_vgf2p8affineinvqb_v16qi
      {Intrinsic::x86_vgf2p8affineinvqb_256, 186075}, // __builtin_ia32_vgf2p8affineinvqb_v32qi
      {Intrinsic::x86_vgf2p8affineinvqb_512, 186114}, // __builtin_ia32_vgf2p8affineinvqb_v64qi
      {Intrinsic::x86_vgf2p8affineqb_128, 186153}, // __builtin_ia32_vgf2p8affineqb_v16qi
      {Intrinsic::x86_vgf2p8affineqb_256, 186189}, // __builtin_ia32_vgf2p8affineqb_v32qi
      {Intrinsic::x86_vgf2p8affineqb_512, 186225}, // __builtin_ia32_vgf2p8affineqb_v64qi
      {Intrinsic::x86_vgf2p8mulb_128, 186261}, // __builtin_ia32_vgf2p8mulb_v16qi
      {Intrinsic::x86_vgf2p8mulb_256, 186293}, // __builtin_ia32_vgf2p8mulb_v32qi
      {Intrinsic::x86_vgf2p8mulb_512, 186325}, // __builtin_ia32_vgf2p8mulb_v64qi
      {Intrinsic::x86_avx512_conflict_q_128, 159435}, // __builtin_ia32_vpconflictdi_128
      {Intrinsic::x86_avx512_conflict_q_256, 159467}, // __builtin_ia32_vpconflictdi_256
      {Intrinsic::x86_avx512_conflict_q_512, 159499}, // __builtin_ia32_vpconflictdi_512
      {Intrinsic::x86_avx512_conflict_d_128, 159339}, // __builtin_ia32_vpconflictsi_128
      {Intrinsic::x86_avx512_conflict_d_256, 159371}, // __builtin_ia32_vpconflictsi_256
      {Intrinsic::x86_avx512_conflict_d_512, 159403}, // __builtin_ia32_vpconflictsi_512
      {Intrinsic::x86_avx2_vpdpbssd_128, 158451}, // __builtin_ia32_vpdpbssd128
      {Intrinsic::x86_avx2_vpdpbssd_256, 158478}, // __builtin_ia32_vpdpbssd256
      {Intrinsic::x86_avx2_vpdpbssds_128, 158505}, // __builtin_ia32_vpdpbssds128
      {Intrinsic::x86_avx2_vpdpbssds_256, 158533}, // __builtin_ia32_vpdpbssds256
      {Intrinsic::x86_avx2_vpdpbsud_128, 158561}, // __builtin_ia32_vpdpbsud128
      {Intrinsic::x86_avx2_vpdpbsud_256, 158588}, // __builtin_ia32_vpdpbsud256
      {Intrinsic::x86_avx2_vpdpbsuds_128, 158615}, // __builtin_ia32_vpdpbsuds128
      {Intrinsic::x86_avx2_vpdpbsuds_256, 158643}, // __builtin_ia32_vpdpbsuds256
      {Intrinsic::x86_avx512_vpdpbusd_128, 171133}, // __builtin_ia32_vpdpbusd128
      {Intrinsic::x86_avx512_vpdpbusd_256, 171160}, // __builtin_ia32_vpdpbusd256
      {Intrinsic::x86_avx512_vpdpbusd_512, 171187}, // __builtin_ia32_vpdpbusd512
      {Intrinsic::x86_avx512_vpdpbusds_128, 171214}, // __builtin_ia32_vpdpbusds128
      {Intrinsic::x86_avx512_vpdpbusds_256, 171242}, // __builtin_ia32_vpdpbusds256
      {Intrinsic::x86_avx512_vpdpbusds_512, 171270}, // __builtin_ia32_vpdpbusds512
      {Intrinsic::x86_avx2_vpdpbuud_128, 158671}, // __builtin_ia32_vpdpbuud128
      {Intrinsic::x86_avx2_vpdpbuud_256, 158698}, // __builtin_ia32_vpdpbuud256
      {Intrinsic::x86_avx2_vpdpbuuds_128, 158725}, // __builtin_ia32_vpdpbuuds128
      {Intrinsic::x86_avx2_vpdpbuuds_256, 158753}, // __builtin_ia32_vpdpbuuds256
      {Intrinsic::x86_avx512_vpdpwssd_128, 171298}, // __builtin_ia32_vpdpwssd128
      {Intrinsic::x86_avx512_vpdpwssd_256, 171325}, // __builtin_ia32_vpdpwssd256
      {Intrinsic::x86_avx512_vpdpwssd_512, 171352}, // __builtin_ia32_vpdpwssd512
      {Intrinsic::x86_avx512_vpdpwssds_128, 171379}, // __builtin_ia32_vpdpwssds128
      {Intrinsic::x86_avx512_vpdpwssds_256, 171407}, // __builtin_ia32_vpdpwssds256
      {Intrinsic::x86_avx512_vpdpwssds_512, 171435}, // __builtin_ia32_vpdpwssds512
      {Intrinsic::x86_avx2_vpdpwsud_128, 158781}, // __builtin_ia32_vpdpwsud128
      {Intrinsic::x86_avx2_vpdpwsud_256, 158808}, // __builtin_ia32_vpdpwsud256
      {Intrinsic::x86_avx2_vpdpwsuds_128, 158835}, // __builtin_ia32_vpdpwsuds128
      {Intrinsic::x86_avx2_vpdpwsuds_256, 158863}, // __builtin_ia32_vpdpwsuds256
      {Intrinsic::x86_avx2_vpdpwusd_128, 158891}, // __builtin_ia32_vpdpwusd128
      {Intrinsic::x86_avx2_vpdpwusd_256, 158918}, // __builtin_ia32_vpdpwusd256
      {Intrinsic::x86_avx2_vpdpwusds_128, 158945}, // __builtin_ia32_vpdpwusds128
      {Intrinsic::x86_avx2_vpdpwusds_256, 158973}, // __builtin_ia32_vpdpwusds256
      {Intrinsic::x86_avx2_vpdpwuud_128, 159001}, // __builtin_ia32_vpdpwuud128
      {Intrinsic::x86_avx2_vpdpwuud_256, 159028}, // __builtin_ia32_vpdpwuud256
      {Intrinsic::x86_avx2_vpdpwuuds_128, 159055}, // __builtin_ia32_vpdpwuuds128
      {Intrinsic::x86_avx2_vpdpwuuds_256, 159083}, // __builtin_ia32_vpdpwuuds256
      {Intrinsic::x86_avx512_vpermi2var_d_128, 171463}, // __builtin_ia32_vpermi2vard128
      {Intrinsic::x86_avx512_vpermi2var_d_256, 171493}, // __builtin_ia32_vpermi2vard256
      {Intrinsic::x86_avx512_vpermi2var_d_512, 171523}, // __builtin_ia32_vpermi2vard512
      {Intrinsic::x86_avx512_vpermi2var_hi_128, 171553}, // __builtin_ia32_vpermi2varhi128
      {Intrinsic::x86_avx512_vpermi2var_hi_256, 171584}, // __builtin_ia32_vpermi2varhi256
      {Intrinsic::x86_avx512_vpermi2var_hi_512, 171615}, // __builtin_ia32_vpermi2varhi512
      {Intrinsic::x86_avx512_vpermi2var_pd_128, 171646}, // __builtin_ia32_vpermi2varpd128
      {Intrinsic::x86_avx512_vpermi2var_pd_256, 171677}, // __builtin_ia32_vpermi2varpd256
      {Intrinsic::x86_avx512_vpermi2var_pd_512, 171708}, // __builtin_ia32_vpermi2varpd512
      {Intrinsic::x86_avx512_vpermi2var_ps_128, 171739}, // __builtin_ia32_vpermi2varps128
      {Intrinsic::x86_avx512_vpermi2var_ps_256, 171770}, // __builtin_ia32_vpermi2varps256
      {Intrinsic::x86_avx512_vpermi2var_ps_512, 171801}, // __builtin_ia32_vpermi2varps512
      {Intrinsic::x86_avx512_vpermi2var_q_128, 171832}, // __builtin_ia32_vpermi2varq128
      {Intrinsic::x86_avx512_vpermi2var_q_256, 171862}, // __builtin_ia32_vpermi2varq256
      {Intrinsic::x86_avx512_vpermi2var_q_512, 171892}, // __builtin_ia32_vpermi2varq512
      {Intrinsic::x86_avx512_vpermi2var_qi_128, 171922}, // __builtin_ia32_vpermi2varqi128
      {Intrinsic::x86_avx512_vpermi2var_qi_256, 171953}, // __builtin_ia32_vpermi2varqi256
      {Intrinsic::x86_avx512_vpermi2var_qi_512, 171984}, // __builtin_ia32_vpermi2varqi512
      {Intrinsic::x86_xop_vpermil2pd, 187088}, // __builtin_ia32_vpermil2pd
      {Intrinsic::x86_xop_vpermil2pd_256, 187114}, // __builtin_ia32_vpermil2pd256
      {Intrinsic::x86_xop_vpermil2ps, 187143}, // __builtin_ia32_vpermil2ps
      {Intrinsic::x86_xop_vpermil2ps_256, 187169}, // __builtin_ia32_vpermil2ps256
      {Intrinsic::x86_avx_vpermilvar_pd, 155988}, // __builtin_ia32_vpermilvarpd
      {Intrinsic::x86_avx_vpermilvar_pd_256, 156016}, // __builtin_ia32_vpermilvarpd256
      {Intrinsic::x86_avx512_vpermilvar_pd_512, 172015}, // __builtin_ia32_vpermilvarpd512
      {Intrinsic::x86_avx_vpermilvar_ps, 156047}, // __builtin_ia32_vpermilvarps
      {Intrinsic::x86_avx_vpermilvar_ps_256, 156075}, // __builtin_ia32_vpermilvarps256
      {Intrinsic::x86_avx512_vpermilvar_ps_512, 172046}, // __builtin_ia32_vpermilvarps512
      {Intrinsic::x86_xop_vphaddbd, 187198}, // __builtin_ia32_vphaddbd
      {Intrinsic::x86_xop_vphaddbq, 187222}, // __builtin_ia32_vphaddbq
      {Intrinsic::x86_xop_vphaddbw, 187246}, // __builtin_ia32_vphaddbw
      {Intrinsic::x86_xop_vphadddq, 187270}, // __builtin_ia32_vphadddq
      {Intrinsic::x86_xop_vphaddubd, 187294}, // __builtin_ia32_vphaddubd
      {Intrinsic::x86_xop_vphaddubq, 187319}, // __builtin_ia32_vphaddubq
      {Intrinsic::x86_xop_vphaddubw, 187344}, // __builtin_ia32_vphaddubw
      {Intrinsic::x86_xop_vphaddudq, 187369}, // __builtin_ia32_vphaddudq
      {Intrinsic::x86_xop_vphadduwd, 187394}, // __builtin_ia32_vphadduwd
      {Intrinsic::x86_xop_vphadduwq, 187419}, // __builtin_ia32_vphadduwq
      {Intrinsic::x86_xop_vphaddwd, 187444}, // __builtin_ia32_vphaddwd
      {Intrinsic::x86_xop_vphaddwq, 187468}, // __builtin_ia32_vphaddwq
      {Intrinsic::x86_xop_vphsubbw, 187492}, // __builtin_ia32_vphsubbw
      {Intrinsic::x86_xop_vphsubdq, 187516}, // __builtin_ia32_vphsubdq
      {Intrinsic::x86_xop_vphsubwd, 187540}, // __builtin_ia32_vphsubwd
      {Intrinsic::x86_xop_vpmacsdd, 187564}, // __builtin_ia32_vpmacsdd
      {Intrinsic::x86_xop_vpmacsdqh, 187588}, // __builtin_ia32_vpmacsdqh
      {Intrinsic::x86_xop_vpmacsdql, 187613}, // __builtin_ia32_vpmacsdql
      {Intrinsic::x86_xop_vpmacssdd, 187638}, // __builtin_ia32_vpmacssdd
      {Intrinsic::x86_xop_vpmacssdqh, 187663}, // __builtin_ia32_vpmacssdqh
      {Intrinsic::x86_xop_vpmacssdql, 187689}, // __builtin_ia32_vpmacssdql
      {Intrinsic::x86_xop_vpmacsswd, 187715}, // __builtin_ia32_vpmacsswd
      {Intrinsic::x86_xop_vpmacssww, 187740}, // __builtin_ia32_vpmacssww
      {Intrinsic::x86_xop_vpmacswd, 187765}, // __builtin_ia32_vpmacswd
      {Intrinsic::x86_xop_vpmacsww, 187789}, // __builtin_ia32_vpmacsww
      {Intrinsic::x86_xop_vpmadcsswd, 187813}, // __builtin_ia32_vpmadcsswd
      {Intrinsic::x86_xop_vpmadcswd, 187839}, // __builtin_ia32_vpmadcswd
      {Intrinsic::x86_avx512_vpmadd52h_uq_128, 172077}, // __builtin_ia32_vpmadd52huq128
      {Intrinsic::x86_avx512_vpmadd52h_uq_256, 172107}, // __builtin_ia32_vpmadd52huq256
      {Intrinsic::x86_avx512_vpmadd52h_uq_512, 172137}, // __builtin_ia32_vpmadd52huq512
      {Intrinsic::x86_avx512_vpmadd52l_uq_128, 172167}, // __builtin_ia32_vpmadd52luq128
      {Intrinsic::x86_avx512_vpmadd52l_uq_256, 172197}, // __builtin_ia32_vpmadd52luq256
      {Intrinsic::x86_avx512_vpmadd52l_uq_512, 172227}, // __builtin_ia32_vpmadd52luq512
      {Intrinsic::x86_avx512_pmultishift_qb_128, 168676}, // __builtin_ia32_vpmultishiftqb128
      {Intrinsic::x86_avx512_pmultishift_qb_256, 168709}, // __builtin_ia32_vpmultishiftqb256
      {Intrinsic::x86_avx512_pmultishift_qb_512, 168742}, // __builtin_ia32_vpmultishiftqb512
      {Intrinsic::x86_xop_vpperm, 187864}, // __builtin_ia32_vpperm
      {Intrinsic::x86_xop_vpshab, 187886}, // __builtin_ia32_vpshab
      {Intrinsic::x86_xop_vpshad, 187908}, // __builtin_ia32_vpshad
      {Intrinsic::x86_xop_vpshaq, 187930}, // __builtin_ia32_vpshaq
      {Intrinsic::x86_xop_vpshaw, 187952}, // __builtin_ia32_vpshaw
      {Intrinsic::x86_xop_vpshlb, 187974}, // __builtin_ia32_vpshlb
      {Intrinsic::x86_xop_vpshld, 187996}, // __builtin_ia32_vpshld
      {Intrinsic::x86_xop_vpshlq, 188018}, // __builtin_ia32_vpshlq
      {Intrinsic::x86_xop_vpshlw, 188040}, // __builtin_ia32_vpshlw
      {Intrinsic::x86_vsha512msg1, 186357}, // __builtin_ia32_vsha512msg1
      {Intrinsic::x86_vsha512msg2, 186384}, // __builtin_ia32_vsha512msg2
      {Intrinsic::x86_vsha512rnds2, 186411}, // __builtin_ia32_vsha512rnds2
      {Intrinsic::x86_vsm3msg1, 186439}, // __builtin_ia32_vsm3msg1
      {Intrinsic::x86_vsm3msg2, 186463}, // __builtin_ia32_vsm3msg2
      {Intrinsic::x86_vsm3rnds2, 186487}, // __builtin_ia32_vsm3rnds2
      {Intrinsic::x86_vsm4key4128, 186512}, // __builtin_ia32_vsm4key4128
      {Intrinsic::x86_vsm4key4256, 186539}, // __builtin_ia32_vsm4key4256
      {Intrinsic::x86_vsm4rnds4128, 186566}, // __builtin_ia32_vsm4rnds4128
      {Intrinsic::x86_vsm4rnds4256, 186594}, // __builtin_ia32_vsm4rnds4256
      {Intrinsic::x86_avx_vtestc_pd, 156106}, // __builtin_ia32_vtestcpd
      {Intrinsic::x86_avx_vtestc_pd_256, 156130}, // __builtin_ia32_vtestcpd256
      {Intrinsic::x86_avx_vtestc_ps, 156157}, // __builtin_ia32_vtestcps
      {Intrinsic::x86_avx_vtestc_ps_256, 156181}, // __builtin_ia32_vtestcps256
      {Intrinsic::x86_avx_vtestnzc_pd, 156208}, // __builtin_ia32_vtestnzcpd
      {Intrinsic::x86_avx_vtestnzc_pd_256, 156234}, // __builtin_ia32_vtestnzcpd256
      {Intrinsic::x86_avx_vtestnzc_ps, 156263}, // __builtin_ia32_vtestnzcps
      {Intrinsic::x86_avx_vtestnzc_ps_256, 156289}, // __builtin_ia32_vtestnzcps256
      {Intrinsic::x86_avx_vtestz_pd, 156318}, // __builtin_ia32_vtestzpd
      {Intrinsic::x86_avx_vtestz_pd_256, 156342}, // __builtin_ia32_vtestzpd256
      {Intrinsic::x86_avx_vtestz_ps, 156369}, // __builtin_ia32_vtestzps
      {Intrinsic::x86_avx_vtestz_ps_256, 156393}, // __builtin_ia32_vtestzps256
      {Intrinsic::x86_avx_vzeroall, 156420}, // __builtin_ia32_vzeroall
      {Intrinsic::x86_avx_vzeroupper, 156444}, // __builtin_ia32_vzeroupper
      {Intrinsic::x86_wbinvd, 186622}, // __builtin_ia32_wbinvd
      {Intrinsic::x86_wbnoinvd, 186644}, // __builtin_ia32_wbnoinvd
      {Intrinsic::x86_wrfsbase_32, 186668}, // __builtin_ia32_wrfsbase32
      {Intrinsic::x86_wrfsbase_64, 186694}, // __builtin_ia32_wrfsbase64
      {Intrinsic::x86_wrgsbase_32, 186720}, // __builtin_ia32_wrgsbase32
      {Intrinsic::x86_wrgsbase_64, 186746}, // __builtin_ia32_wrgsbase64
      {Intrinsic::x86_flags_write_u32, 177635}, // __builtin_ia32_writeeflags_u32
      {Intrinsic::x86_flags_write_u64, 177666}, // __builtin_ia32_writeeflags_u64
      {Intrinsic::x86_wrpkru, 186772}, // __builtin_ia32_wrpkru
      {Intrinsic::x86_wrssd, 186794}, // __builtin_ia32_wrssd
      {Intrinsic::x86_wrssq, 186815}, // __builtin_ia32_wrssq
      {Intrinsic::x86_wrussd, 186836}, // __builtin_ia32_wrussd
      {Intrinsic::x86_wrussq, 186858}, // __builtin_ia32_wrussq
      {Intrinsic::x86_xabort, 186880}, // __builtin_ia32_xabort
      {Intrinsic::x86_xbegin, 186902}, // __builtin_ia32_xbegin
      {Intrinsic::x86_xend, 186924}, // __builtin_ia32_xend
      {Intrinsic::x86_xresldtrk, 188062}, // __builtin_ia32_xresldtrk
      {Intrinsic::x86_xsusldtrk, 188087}, // __builtin_ia32_xsusldtrk
      {Intrinsic::x86_xtest, 188112}, // __builtin_ia32_xtest
    };
    auto I = std::lower_bound(std::begin(x86Names),
                              std::end(x86Names),
                              BuiltinNameStr);
    if (I != std::end(x86Names) &&
        I->getName() == BuiltinNameStr)
      return I->IntrinID;
  }
  if (TargetPrefix == "xcore") {
    static const BuiltinEntry xcoreNames[] = {
      {Intrinsic::xcore_bitrev, 188133}, // __builtin_bitrev
      {Intrinsic::xcore_getid, 188150}, // __builtin_getid
      {Intrinsic::xcore_getps, 188166}, // __builtin_getps
      {Intrinsic::xcore_setps, 188182}, // __builtin_setps
    };
    auto I = std::lower_bound(std::begin(xcoreNames),
                              std::end(xcoreNames),
                              BuiltinNameStr);
    if (I != std::end(xcoreNames) &&
        I->getName() == BuiltinNameStr)
      return I->IntrinID;
  }
  return Intrinsic::not_intrinsic;
}
#endif

// Get the LLVM intrinsic that corresponds to a builtin.
// This is used by the C front-end.  The builtin name is passed
// in as BuiltinName, and a target prefix (e.g. 'ppc') is passed
// in as TargetPrefix.  The result is assigned to 'IntrinsicID'.
#ifdef GET_LLVM_INTRINSIC_FOR_MS_BUILTIN
Intrinsic::ID Intrinsic::getIntrinsicForMSBuiltin(const char *TargetPrefixStr, StringRef BuiltinNameStr) {
  static const char BuiltinNames[] = {
  '_', '_', 'd', 'm', 'b', '\000', '_', '_', 'd', 's', 'b', '\000', '_', '_', 'i',
  's', 'b', '\000', '_', 'M', 'o', 'v', 'e', 'F', 'r', 'o', 'm', 'C', 'o', 'p',
  'r', 'o', 'c', 'e', 's', 's', 'o', 'r', '\000', '_', 'M', 'o', 'v', 'e', 'F',
  'r', 'o', 'm', 'C', 'o', 'p', 'r', 'o', 'c', 'e', 's', 's', 'o', 'r', '2',
  '\000',
  };

  struct BuiltinEntry {
    Intrinsic::ID IntrinID;
    unsigned StrTabOffset;
    const char *getName() const {
      return &BuiltinNames[StrTabOffset];
    }
    bool operator<(StringRef RHS) const {
      return strncmp(getName(), RHS.data(), RHS.size()) < 0;
    }
  };
  StringRef TargetPrefix(TargetPrefixStr);

  if (TargetPrefix == "aarch64") {
    static const BuiltinEntry aarch64Names[] = {
      {Intrinsic::aarch64_dmb, 0}, // __dmb
      {Intrinsic::aarch64_dsb, 6}, // __dsb
      {Intrinsic::aarch64_isb, 12}, // __isb
    };
    auto I = std::lower_bound(std::begin(aarch64Names),
                              std::end(aarch64Names),
                              BuiltinNameStr);
    if (I != std::end(aarch64Names) &&
        I->getName() == BuiltinNameStr)
      return I->IntrinID;
  }
  if (TargetPrefix == "arm") {
    static const BuiltinEntry armNames[] = {
      {Intrinsic::arm_mrc, 18}, // _MoveFromCoprocessor
      {Intrinsic::arm_mrc2, 39}, // _MoveFromCoprocessor2
      {Intrinsic::arm_dmb, 0}, // __dmb
      {Intrinsic::arm_dsb, 6}, // __dsb
      {Intrinsic::arm_isb, 12}, // __isb
    };
    auto I = std::lower_bound(std::begin(armNames),
                              std::end(armNames),
                              BuiltinNameStr);
    if (I != std::end(armNames) &&
        I->getName() == BuiltinNameStr)
      return I->IntrinID;
  }
  return Intrinsic::not_intrinsic;
}
#endif

PKjwFZ������IR/DiagnosticInfo.hnu�[���//===- llvm/IR/DiagnosticInfo.h - Diagnostic Declaration --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the different classes involved in low level diagnostics.
//
// Diagnostics reporting is still done as part of the LLVMContext.
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_DIAGNOSTICINFO_H
#define LLVM_IR_DIAGNOSTICINFO_H

#include "llvm-c/Types.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/Support/CBindingWrapping.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/TypeSize.h"
#include <algorithm>
#include <cstdint>
#include <functional>
#include <iterator>
#include <optional>
#include <string>

namespace llvm {

// Forward declarations.
class DiagnosticPrinter;
class DIFile;
class DISubprogram;
class CallInst;
class Function;
class Instruction;
class InstructionCost;
class Module;
class Type;
class Value;

/// Defines the different supported severity of a diagnostic.
enum DiagnosticSeverity : char {
  DS_Error,
  DS_Warning,
  DS_Remark,
  // A note attaches additional information to one of the previous diagnostic
  // types.
  DS_Note
};

/// Defines the different supported kind of a diagnostic.
/// This enum should be extended with a new ID for each added concrete subclass.
enum DiagnosticKind {
  DK_InlineAsm,
  DK_ResourceLimit,
  DK_StackSize,
  DK_Linker,
  DK_Lowering,
  DK_DebugMetadataVersion,
  DK_DebugMetadataInvalid,
  DK_ISelFallback,
  DK_SampleProfile,
  DK_OptimizationRemark,
  DK_OptimizationRemarkMissed,
  DK_OptimizationRemarkAnalysis,
  DK_OptimizationRemarkAnalysisFPCommute,
  DK_OptimizationRemarkAnalysisAliasing,
  DK_OptimizationFailure,
  DK_FirstRemark = DK_OptimizationRemark,
  DK_LastRemark = DK_OptimizationFailure,
  DK_MachineOptimizationRemark,
  DK_MachineOptimizationRemarkMissed,
  DK_MachineOptimizationRemarkAnalysis,
  DK_FirstMachineRemark = DK_MachineOptimizationRemark,
  DK_LastMachineRemark = DK_MachineOptimizationRemarkAnalysis,
  DK_MIRParser,
  DK_PGOProfile,
  DK_Unsupported,
  DK_SrcMgr,
  DK_DontCall,
  DK_MisExpect,
  DK_FirstPluginKind // Must be last value to work with
                     // getNextAvailablePluginDiagnosticKind
};

/// Get the next available kind ID for a plugin diagnostic.
/// Each time this function is called, it returns a different number.
/// Therefore, a plugin that wants to "identify" its own classes
/// with a dynamic identifier, just have to use this method to get a new ID
/// and assign it to each of its classes.
/// The returned ID will be greater than or equal to DK_FirstPluginKind.
/// Thus, the plugin identifiers will not conflict with the
/// DiagnosticKind values.
int getNextAvailablePluginDiagnosticKind();

/// This is the base abstract class for diagnostic reporting in
/// the backend.
/// The print method must be overloaded by the subclasses to print a
/// user-friendly message in the client of the backend (let us call it a
/// frontend).
class DiagnosticInfo {
private:
  /// Kind defines the kind of report this is about.
  const /* DiagnosticKind */ int Kind;
  /// Severity gives the severity of the diagnostic.
  const DiagnosticSeverity Severity;

  virtual void anchor();
public:
  DiagnosticInfo(/* DiagnosticKind */ int Kind, DiagnosticSeverity Severity)
      : Kind(Kind), Severity(Severity) {}

  virtual ~DiagnosticInfo() = default;

  /* DiagnosticKind */ int getKind() const { return Kind; }
  DiagnosticSeverity getSeverity() const { return Severity; }

  /// Print using the given \p DP a user-friendly message.
  /// This is the default message that will be printed to the user.
  /// It is used when the frontend does not directly take advantage
  /// of the information contained in fields of the subclasses.
  /// The printed message must not end with '.' nor start with a severity
  /// keyword.
  virtual void print(DiagnosticPrinter &DP) const = 0;
};

using DiagnosticHandlerFunction = std::function<void(const DiagnosticInfo &)>;

/// Diagnostic information for inline asm reporting.
/// This is basically a message and an optional location.
class DiagnosticInfoInlineAsm : public DiagnosticInfo {
private:
  /// Optional line information. 0 if not set.
  uint64_t LocCookie = 0;
  /// Message to be reported.
  const Twine &MsgStr;
  /// Optional origin of the problem.
  const Instruction *Instr = nullptr;

public:
  /// \p MsgStr is the message to be reported to the frontend.
  /// This class does not copy \p MsgStr, therefore the reference must be valid
  /// for the whole life time of the Diagnostic.
  DiagnosticInfoInlineAsm(const Twine &MsgStr,
                          DiagnosticSeverity Severity = DS_Error)
      : DiagnosticInfo(DK_InlineAsm, Severity), MsgStr(MsgStr) {}

  /// \p LocCookie if non-zero gives the line number for this report.
  /// \p MsgStr gives the message.
  /// This class does not copy \p MsgStr, therefore the reference must be valid
  /// for the whole life time of the Diagnostic.
  DiagnosticInfoInlineAsm(uint64_t LocCookie, const Twine &MsgStr,
                          DiagnosticSeverity Severity = DS_Error)
      : DiagnosticInfo(DK_InlineAsm, Severity), LocCookie(LocCookie),
        MsgStr(MsgStr) {}

  /// \p Instr gives the original instruction that triggered the diagnostic.
  /// \p MsgStr gives the message.
  /// This class does not copy \p MsgStr, therefore the reference must be valid
  /// for the whole life time of the Diagnostic.
  /// Same for \p I.
  DiagnosticInfoInlineAsm(const Instruction &I, const Twine &MsgStr,
                          DiagnosticSeverity Severity = DS_Error);

  uint64_t getLocCookie() const { return LocCookie; }
  const Twine &getMsgStr() const { return MsgStr; }
  const Instruction *getInstruction() const { return Instr; }

  /// \see DiagnosticInfo::print.
  void print(DiagnosticPrinter &DP) const override;

  static bool classof(const DiagnosticInfo *DI) {
    return DI->getKind() == DK_InlineAsm;
  }
};

/// Diagnostic information for debug metadata version reporting.
/// This is basically a module and a version.
class DiagnosticInfoDebugMetadataVersion : public DiagnosticInfo {
private:
  /// The module that is concerned by this debug metadata version diagnostic.
  const Module &M;
  /// The actual metadata version.
  unsigned MetadataVersion;

public:
  /// \p The module that is concerned by this debug metadata version diagnostic.
  /// \p The actual metadata version.
  DiagnosticInfoDebugMetadataVersion(const Module &M, unsigned MetadataVersion,
                                     DiagnosticSeverity Severity = DS_Warning)
      : DiagnosticInfo(DK_DebugMetadataVersion, Severity), M(M),
        MetadataVersion(MetadataVersion) {}

  const Module &getModule() const { return M; }
  unsigned getMetadataVersion() const { return MetadataVersion; }

  /// \see DiagnosticInfo::print.
  void print(DiagnosticPrinter &DP) const override;

  static bool classof(const DiagnosticInfo *DI) {
    return DI->getKind() == DK_DebugMetadataVersion;
  }
};

/// Diagnostic information for stripping invalid debug metadata.
class DiagnosticInfoIgnoringInvalidDebugMetadata : public DiagnosticInfo {
private:
  /// The module that is concerned by this debug metadata version diagnostic.
  const Module &M;

public:
  /// \p The module that is concerned by this debug metadata version diagnostic.
  DiagnosticInfoIgnoringInvalidDebugMetadata(
      const Module &M, DiagnosticSeverity Severity = DS_Warning)
      : DiagnosticInfo(DK_DebugMetadataVersion, Severity), M(M) {}

  const Module &getModule() const { return M; }

  /// \see DiagnosticInfo::print.
  void print(DiagnosticPrinter &DP) const override;

  static bool classof(const DiagnosticInfo *DI) {
    return DI->getKind() == DK_DebugMetadataInvalid;
  }
};

/// Diagnostic information for the sample profiler.
class DiagnosticInfoSampleProfile : public DiagnosticInfo {
public:
  DiagnosticInfoSampleProfile(StringRef FileName, unsigned LineNum,
                              const Twine &Msg,
                              DiagnosticSeverity Severity = DS_Error)
      : DiagnosticInfo(DK_SampleProfile, Severity), FileName(FileName),
        LineNum(LineNum), Msg(Msg) {}
  DiagnosticInfoSampleProfile(StringRef FileName, const Twine &Msg,
                              DiagnosticSeverity Severity = DS_Error)
      : DiagnosticInfo(DK_SampleProfile, Severity), FileName(FileName),
        Msg(Msg) {}
  DiagnosticInfoSampleProfile(const Twine &Msg,
                              DiagnosticSeverity Severity = DS_Error)
      : DiagnosticInfo(DK_SampleProfile, Severity), Msg(Msg) {}

  /// \see DiagnosticInfo::print.
  void print(DiagnosticPrinter &DP) const override;

  static bool classof(const DiagnosticInfo *DI) {
    return DI->getKind() == DK_SampleProfile;
  }

  StringRef getFileName() const { return FileName; }
  unsigned getLineNum() const { return LineNum; }
  const Twine &getMsg() const { return Msg; }

private:
  /// Name of the input file associated with this diagnostic.
  StringRef FileName;

  /// Line number where the diagnostic occurred. If 0, no line number will
  /// be emitted in the message.
  unsigned LineNum = 0;

  /// Message to report.
  const Twine &Msg;
};

/// Diagnostic information for the PGO profiler.
class DiagnosticInfoPGOProfile : public DiagnosticInfo {
public:
  DiagnosticInfoPGOProfile(const char *FileName, const Twine &Msg,
                           DiagnosticSeverity Severity = DS_Error)
      : DiagnosticInfo(DK_PGOProfile, Severity), FileName(FileName), Msg(Msg) {}

  /// \see DiagnosticInfo::print.
  void print(DiagnosticPrinter &DP) const override;

  static bool classof(const DiagnosticInfo *DI) {
    return DI->getKind() == DK_PGOProfile;
  }

  const char *getFileName() const { return FileName; }
  const Twine &getMsg() const { return Msg; }

private:
  /// Name of the input file associated with this diagnostic.
  const char *FileName;

  /// Message to report.
  const Twine &Msg;
};

class DiagnosticLocation {
  DIFile *File = nullptr;
  unsigned Line = 0;
  unsigned Column = 0;

public:
  DiagnosticLocation() = default;
  DiagnosticLocation(const DebugLoc &DL);
  DiagnosticLocation(const DISubprogram *SP);

  bool isValid() const { return File; }
  /// Return the full path to the file.
  std::string getAbsolutePath() const;
  /// Return the file name relative to the compilation directory.
  StringRef getRelativePath() const;
  unsigned getLine() const { return Line; }
  unsigned getColumn() const { return Column; }
};

/// Common features for diagnostics with an associated location.
class DiagnosticInfoWithLocationBase : public DiagnosticInfo {
  void anchor() override;
public:
  /// \p Fn is the function where the diagnostic is being emitted. \p Loc is
  /// the location information to use in the diagnostic.
  DiagnosticInfoWithLocationBase(enum DiagnosticKind Kind,
                                 enum DiagnosticSeverity Severity,
                                 const Function &Fn,
                                 const DiagnosticLocation &Loc)
      : DiagnosticInfo(Kind, Severity), Fn(Fn), Loc(Loc) {}

  /// Return true if location information is available for this diagnostic.
  bool isLocationAvailable() const { return Loc.isValid(); }

  /// Return a string with the location information for this diagnostic
  /// in the format "file:line:col". If location information is not available,
  /// it returns "<unknown>:0:0".
  std::string getLocationStr() const;

  /// Return location information for this diagnostic in three parts:
  /// the relative source file path, line number and column.
  void getLocation(StringRef &RelativePath, unsigned &Line,
                   unsigned &Column) const;

  /// Return the absolute path tot the file.
  std::string getAbsolutePath() const;
  
  const Function &getFunction() const { return Fn; }
  DiagnosticLocation getLocation() const { return Loc; }

private:
  /// Function where this diagnostic is triggered.
  const Function &Fn;

  /// Debug location where this diagnostic is triggered.
  DiagnosticLocation Loc;
};

/// Diagnostic information for stack size etc. reporting.
/// This is basically a function and a size.
class DiagnosticInfoResourceLimit : public DiagnosticInfoWithLocationBase {
private:
  /// The function that is concerned by this resource limit diagnostic.
  const Function &Fn;

  /// Description of the resource type (e.g. stack size)
  const char *ResourceName;

  /// The computed size usage
  uint64_t ResourceSize;

  // Threshould passed
  uint64_t ResourceLimit;

public:
  /// \p The function that is concerned by this stack size diagnostic.
  /// \p The computed stack size.
  DiagnosticInfoResourceLimit(const Function &Fn, const char *ResourceName,
                              uint64_t ResourceSize, uint64_t ResourceLimit,
                              DiagnosticSeverity Severity = DS_Warning,
                              DiagnosticKind Kind = DK_ResourceLimit);

  const Function &getFunction() const { return Fn; }
  const char *getResourceName() const { return ResourceName; }
  uint64_t getResourceSize() const { return ResourceSize; }
  uint64_t getResourceLimit() const { return ResourceLimit; }

  /// \see DiagnosticInfo::print.
  void print(DiagnosticPrinter &DP) const override;

  static bool classof(const DiagnosticInfo *DI) {
    return DI->getKind() == DK_ResourceLimit || DI->getKind() == DK_StackSize;
  }
};

class DiagnosticInfoStackSize : public DiagnosticInfoResourceLimit {
  void anchor() override;

public:
  DiagnosticInfoStackSize(const Function &Fn, uint64_t StackSize,
                          uint64_t StackLimit,
                          DiagnosticSeverity Severity = DS_Warning)
      : DiagnosticInfoResourceLimit(Fn, "stack frame size", StackSize,
                                    StackLimit, Severity, DK_StackSize) {}

  uint64_t getStackSize() const { return getResourceSize(); }
  uint64_t getStackLimit() const { return getResourceLimit(); }

  static bool classof(const DiagnosticInfo *DI) {
    return DI->getKind() == DK_StackSize;
  }
};

/// Common features for diagnostics dealing with optimization remarks
/// that are used by both IR and MIR passes.
class DiagnosticInfoOptimizationBase : public DiagnosticInfoWithLocationBase {
public:
  /// Used to set IsVerbose via the stream interface.
  struct setIsVerbose {};

  /// When an instance of this is inserted into the stream, the arguments
  /// following will not appear in the remark printed in the compiler output
  /// (-Rpass) but only in the optimization record file
  /// (-fsave-optimization-record).
  struct setExtraArgs {};

  /// Used in the streaming interface as the general argument type.  It
  /// internally converts everything into a key-value pair.
  struct Argument {
    std::string Key;
    std::string Val;
    // If set, the debug location corresponding to the value.
    DiagnosticLocation Loc;

    explicit Argument(StringRef Str = "") : Key("String"), Val(Str) {}
    Argument(StringRef Key, const Value *V);
    Argument(StringRef Key, const Type *T);
    Argument(StringRef Key, StringRef S);
    Argument(StringRef Key, const char *S) : Argument(Key, StringRef(S)) {};
    Argument(StringRef Key, int N);
    Argument(StringRef Key, float N);
    Argument(StringRef Key, long N);
    Argument(StringRef Key, long long N);
    Argument(StringRef Key, unsigned N);
    Argument(StringRef Key, unsigned long N);
    Argument(StringRef Key, unsigned long long N);
    Argument(StringRef Key, ElementCount EC);
    Argument(StringRef Key, bool B) : Key(Key), Val(B ? "true" : "false") {}
    Argument(StringRef Key, DebugLoc dl);
    Argument(StringRef Key, InstructionCost C);
  };

  /// \p PassName is the name of the pass emitting this diagnostic. \p
  /// RemarkName is a textual identifier for the remark (single-word,
  /// camel-case). \p Fn is the function where the diagnostic is being emitted.
  /// \p Loc is the location information to use in the diagnostic. If line table
  /// information is available, the diagnostic will include the source code
  /// location.
  DiagnosticInfoOptimizationBase(enum DiagnosticKind Kind,
                                 enum DiagnosticSeverity Severity,
                                 const char *PassName, StringRef RemarkName,
                                 const Function &Fn,
                                 const DiagnosticLocation &Loc)
      : DiagnosticInfoWithLocationBase(Kind, Severity, Fn, Loc),
        PassName(PassName), RemarkName(RemarkName) {}

  void insert(StringRef S);
  void insert(Argument A);
  void insert(setIsVerbose V);
  void insert(setExtraArgs EA);

  /// \see DiagnosticInfo::print.
  void print(DiagnosticPrinter &DP) const override;

  /// Return true if this optimization remark is enabled by one of
  /// of the LLVM command line flags (-pass-remarks, -pass-remarks-missed,
  /// or -pass-remarks-analysis). Note that this only handles the LLVM
  /// flags. We cannot access Clang flags from here (they are handled
  /// in BackendConsumer::OptimizationRemarkHandler).
  virtual bool isEnabled() const = 0;

  StringRef getPassName() const { return PassName; }
  StringRef getRemarkName() const { return RemarkName; }
  std::string getMsg() const;
  std::optional<uint64_t> getHotness() const { return Hotness; }
  void setHotness(std::optional<uint64_t> H) { Hotness = H; }

  bool isVerbose() const { return IsVerbose; }

  ArrayRef<Argument> getArgs() const { return Args; }

  static bool classof(const DiagnosticInfo *DI) {
    return (DI->getKind() >= DK_FirstRemark &&
            DI->getKind() <= DK_LastRemark) ||
           (DI->getKind() >= DK_FirstMachineRemark &&
            DI->getKind() <= DK_LastMachineRemark);
  }

  bool isPassed() const {
    return (getKind() == DK_OptimizationRemark ||
            getKind() == DK_MachineOptimizationRemark);
  }

  bool isMissed() const {
    return (getKind() == DK_OptimizationRemarkMissed ||
            getKind() == DK_MachineOptimizationRemarkMissed);
  }

  bool isAnalysis() const {
    return (getKind() == DK_OptimizationRemarkAnalysis ||
            getKind() == DK_MachineOptimizationRemarkAnalysis);
  }

protected:
  /// Name of the pass that triggers this report. If this matches the
  /// regular expression given in -Rpass=regexp, then the remark will
  /// be emitted.
  const char *PassName;

  /// Textual identifier for the remark (single-word, camel-case). Can be used
  /// by external tools reading the output file for optimization remarks to
  /// identify the remark.
  StringRef RemarkName;

  /// If profile information is available, this is the number of times the
  /// corresponding code was executed in a profile instrumentation run.
  std::optional<uint64_t> Hotness;

  /// Arguments collected via the streaming interface.
  SmallVector<Argument, 4> Args;

  /// The remark is expected to be noisy.
  bool IsVerbose = false;

  /// If positive, the index of the first argument that only appear in
  /// the optimization records and not in the remark printed in the compiler
  /// output.
  int FirstExtraArgIndex = -1;
};

/// Allow the insertion operator to return the actual remark type rather than a
/// common base class.  This allows returning the result of the insertion
/// directly by value, e.g. return OptimizationRemarkAnalysis(...) << "blah".
template <class RemarkT>
RemarkT &
operator<<(RemarkT &R,
           std::enable_if_t<
               std::is_base_of<DiagnosticInfoOptimizationBase, RemarkT>::value,
               StringRef>
               S) {
  R.insert(S);
  return R;
}

/// Also allow r-value for the remark to allow insertion into a
/// temporarily-constructed remark.
template <class RemarkT>
RemarkT &
operator<<(RemarkT &&R,
           std::enable_if_t<
               std::is_base_of<DiagnosticInfoOptimizationBase, RemarkT>::value,
               StringRef>
               S) {
  R.insert(S);
  return R;
}

template <class RemarkT>
RemarkT &
operator<<(RemarkT &R,
           std::enable_if_t<
               std::is_base_of<DiagnosticInfoOptimizationBase, RemarkT>::value,
               DiagnosticInfoOptimizationBase::Argument>
               A) {
  R.insert(A);
  return R;
}

template <class RemarkT>
RemarkT &
operator<<(RemarkT &&R,
           std::enable_if_t<
               std::is_base_of<DiagnosticInfoOptimizationBase, RemarkT>::value,
               DiagnosticInfoOptimizationBase::Argument>
               A) {
  R.insert(A);
  return R;
}

template <class RemarkT>
RemarkT &
operator<<(RemarkT &R,
           std::enable_if_t<
               std::is_base_of<DiagnosticInfoOptimizationBase, RemarkT>::value,
               DiagnosticInfoOptimizationBase::setIsVerbose>
               V) {
  R.insert(V);
  return R;
}

template <class RemarkT>
RemarkT &
operator<<(RemarkT &&R,
           std::enable_if_t<
               std::is_base_of<DiagnosticInfoOptimizationBase, RemarkT>::value,
               DiagnosticInfoOptimizationBase::setIsVerbose>
               V) {
  R.insert(V);
  return R;
}

template <class RemarkT>
RemarkT &
operator<<(RemarkT &R,
           std::enable_if_t<
               std::is_base_of<DiagnosticInfoOptimizationBase, RemarkT>::value,
               DiagnosticInfoOptimizationBase::setExtraArgs>
               EA) {
  R.insert(EA);
  return R;
}

/// Common features for diagnostics dealing with optimization remarks
/// that are used by IR passes.
class DiagnosticInfoIROptimization : public DiagnosticInfoOptimizationBase {
  void anchor() override;
public:
  /// \p PassName is the name of the pass emitting this diagnostic. \p
  /// RemarkName is a textual identifier for the remark (single-word,
  /// camel-case). \p Fn is the function where the diagnostic is being emitted.
  /// \p Loc is the location information to use in the diagnostic. If line table
  /// information is available, the diagnostic will include the source code
  /// location. \p CodeRegion is IR value (currently basic block) that the
  /// optimization operates on. This is currently used to provide run-time
  /// hotness information with PGO.
  DiagnosticInfoIROptimization(enum DiagnosticKind Kind,
                               enum DiagnosticSeverity Severity,
                               const char *PassName, StringRef RemarkName,
                               const Function &Fn,
                               const DiagnosticLocation &Loc,
                               const Value *CodeRegion = nullptr)
      : DiagnosticInfoOptimizationBase(Kind, Severity, PassName, RemarkName, Fn,
                                       Loc),
        CodeRegion(CodeRegion) {}

  /// This is ctor variant allows a pass to build an optimization remark
  /// from an existing remark.
  ///
  /// This is useful when a transformation pass (e.g LV) wants to emit a remark
  /// (\p Orig) generated by one of its analyses (e.g. LAA) as its own analysis
  /// remark.  The string \p Prepend will be emitted before the original
  /// message.
  DiagnosticInfoIROptimization(const char *PassName, StringRef Prepend,
                               const DiagnosticInfoIROptimization &Orig)
      : DiagnosticInfoOptimizationBase(
            (DiagnosticKind)Orig.getKind(), Orig.getSeverity(), PassName,
            Orig.RemarkName, Orig.getFunction(), Orig.getLocation()),
        CodeRegion(Orig.getCodeRegion()) {
    *this << Prepend;
    std::copy(Orig.Args.begin(), Orig.Args.end(), std::back_inserter(Args));
  }

  /// Legacy interface.
  /// \p PassName is the name of the pass emitting this diagnostic.
  /// \p Fn is the function where the diagnostic is being emitted. \p Loc is
  /// the location information to use in the diagnostic. If line table
  /// information is available, the diagnostic will include the source code
  /// location. \p Msg is the message to show. Note that this class does not
  /// copy this message, so this reference must be valid for the whole life time
  /// of the diagnostic.
  DiagnosticInfoIROptimization(enum DiagnosticKind Kind,
                               enum DiagnosticSeverity Severity,
                               const char *PassName, const Function &Fn,
                               const DiagnosticLocation &Loc, const Twine &Msg)
      : DiagnosticInfoOptimizationBase(Kind, Severity, PassName, "", Fn, Loc) {
    *this << Msg.str();
  }

  const Value *getCodeRegion() const { return CodeRegion; }

  static bool classof(const DiagnosticInfo *DI) {
    return DI->getKind() >= DK_FirstRemark && DI->getKind() <= DK_LastRemark;
  }

private:
  /// The IR value (currently basic block) that the optimization operates on.
  /// This is currently used to provide run-time hotness information with PGO.
  const Value *CodeRegion = nullptr;
};

/// Diagnostic information for applied optimization remarks.
class OptimizationRemark : public DiagnosticInfoIROptimization {
public:
  /// \p PassName is the name of the pass emitting this diagnostic. If this name
  /// matches the regular expression given in -Rpass=, then the diagnostic will
  /// be emitted. \p RemarkName is a textual identifier for the remark (single-
  /// word, camel-case). \p Loc is the debug location and \p CodeRegion is the
  /// region that the optimization operates on (currently only block is
  /// supported).
  OptimizationRemark(const char *PassName, StringRef RemarkName,
                     const DiagnosticLocation &Loc, const Value *CodeRegion);

  /// Same as above, but the debug location and code region are derived from \p
  /// Instr.
  OptimizationRemark(const char *PassName, StringRef RemarkName,
                     const Instruction *Inst);

  /// Same as above, but the debug location and code region are derived from \p
  /// Func.
  OptimizationRemark(const char *PassName, StringRef RemarkName,
                     const Function *Func);

  static bool classof(const DiagnosticInfo *DI) {
    return DI->getKind() == DK_OptimizationRemark;
  }

  /// \see DiagnosticInfoOptimizationBase::isEnabled.
  bool isEnabled() const override;

private:
  /// This is deprecated now and only used by the function API below.
  /// \p PassName is the name of the pass emitting this diagnostic. If
  /// this name matches the regular expression given in -Rpass=, then the
  /// diagnostic will be emitted. \p Fn is the function where the diagnostic
  /// is being emitted. \p Loc is the location information to use in the
  /// diagnostic. If line table information is available, the diagnostic
  /// will include the source code location. \p Msg is the message to show.
  /// Note that this class does not copy this message, so this reference
  /// must be valid for the whole life time of the diagnostic.
  OptimizationRemark(const char *PassName, const Function &Fn,
                     const DiagnosticLocation &Loc, const Twine &Msg)
      : DiagnosticInfoIROptimization(DK_OptimizationRemark, DS_Remark, PassName,
                                     Fn, Loc, Msg) {}
};

/// Diagnostic information for missed-optimization remarks.
class OptimizationRemarkMissed : public DiagnosticInfoIROptimization {
public:
  /// \p PassName is the name of the pass emitting this diagnostic. If this name
  /// matches the regular expression given in -Rpass-missed=, then the
  /// diagnostic will be emitted. \p RemarkName is a textual identifier for the
  /// remark (single-word, camel-case). \p Loc is the debug location and \p
  /// CodeRegion is the region that the optimization operates on (currently only
  /// block is supported).
  OptimizationRemarkMissed(const char *PassName, StringRef RemarkName,
                           const DiagnosticLocation &Loc,
                           const Value *CodeRegion);

  /// Same as above but \p Inst is used to derive code region and debug
  /// location.
  OptimizationRemarkMissed(const char *PassName, StringRef RemarkName,
                           const Instruction *Inst);

  /// Same as above but \p F is used to derive code region and debug
  /// location.
  OptimizationRemarkMissed(const char *PassName, StringRef RemarkName,
                           const Function *F);

  static bool classof(const DiagnosticInfo *DI) {
    return DI->getKind() == DK_OptimizationRemarkMissed;
  }

  /// \see DiagnosticInfoOptimizationBase::isEnabled.
  bool isEnabled() const override;

private:
  /// This is deprecated now and only used by the function API below.
  /// \p PassName is the name of the pass emitting this diagnostic. If
  /// this name matches the regular expression given in -Rpass-missed=, then the
  /// diagnostic will be emitted. \p Fn is the function where the diagnostic
  /// is being emitted. \p Loc is the location information to use in the
  /// diagnostic. If line table information is available, the diagnostic
  /// will include the source code location. \p Msg is the message to show.
  /// Note that this class does not copy this message, so this reference
  /// must be valid for the whole life time of the diagnostic.
  OptimizationRemarkMissed(const char *PassName, const Function &Fn,
                           const DiagnosticLocation &Loc, const Twine &Msg)
      : DiagnosticInfoIROptimization(DK_OptimizationRemarkMissed, DS_Remark,
                                     PassName, Fn, Loc, Msg) {}
};

/// Diagnostic information for optimization analysis remarks.
class OptimizationRemarkAnalysis : public DiagnosticInfoIROptimization {
public:
  /// \p PassName is the name of the pass emitting this diagnostic. If this name
  /// matches the regular expression given in -Rpass-analysis=, then the
  /// diagnostic will be emitted. \p RemarkName is a textual identifier for the
  /// remark (single-word, camel-case). \p Loc is the debug location and \p
  /// CodeRegion is the region that the optimization operates on (currently only
  /// block is supported).
  OptimizationRemarkAnalysis(const char *PassName, StringRef RemarkName,
                             const DiagnosticLocation &Loc,
                             const Value *CodeRegion);

  /// This is ctor variant allows a pass to build an optimization remark
  /// from an existing remark.
  ///
  /// This is useful when a transformation pass (e.g LV) wants to emit a remark
  /// (\p Orig) generated by one of its analyses (e.g. LAA) as its own analysis
  /// remark.  The string \p Prepend will be emitted before the original
  /// message.
  OptimizationRemarkAnalysis(const char *PassName, StringRef Prepend,
                             const OptimizationRemarkAnalysis &Orig)
      : DiagnosticInfoIROptimization(PassName, Prepend, Orig) {}

  /// Same as above but \p Inst is used to derive code region and debug
  /// location.
  OptimizationRemarkAnalysis(const char *PassName, StringRef RemarkName,
                             const Instruction *Inst);

  /// Same as above but \p F is used to derive code region and debug
  /// location.
  OptimizationRemarkAnalysis(const char *PassName, StringRef RemarkName,
                             const Function *F);

  static bool classof(const DiagnosticInfo *DI) {
    return DI->getKind() == DK_OptimizationRemarkAnalysis;
  }

  /// \see DiagnosticInfoOptimizationBase::isEnabled.
  bool isEnabled() const override;

  static const char *AlwaysPrint;

  bool shouldAlwaysPrint() const { return getPassName() == AlwaysPrint; }

protected:
  OptimizationRemarkAnalysis(enum DiagnosticKind Kind, const char *PassName,
                             const Function &Fn, const DiagnosticLocation &Loc,
                             const Twine &Msg)
      : DiagnosticInfoIROptimization(Kind, DS_Remark, PassName, Fn, Loc, Msg) {}

  OptimizationRemarkAnalysis(enum DiagnosticKind Kind, const char *PassName,
                             StringRef RemarkName,
                             const DiagnosticLocation &Loc,
                             const Value *CodeRegion);

private:
  /// This is deprecated now and only used by the function API below.
  /// \p PassName is the name of the pass emitting this diagnostic. If
  /// this name matches the regular expression given in -Rpass-analysis=, then
  /// the diagnostic will be emitted. \p Fn is the function where the diagnostic
  /// is being emitted. \p Loc is the location information to use in the
  /// diagnostic. If line table information is available, the diagnostic will
  /// include the source code location. \p Msg is the message to show. Note that
  /// this class does not copy this message, so this reference must be valid for
  /// the whole life time of the diagnostic.
  OptimizationRemarkAnalysis(const char *PassName, const Function &Fn,
                             const DiagnosticLocation &Loc, const Twine &Msg)
      : DiagnosticInfoIROptimization(DK_OptimizationRemarkAnalysis, DS_Remark,
                                     PassName, Fn, Loc, Msg) {}
};

/// Diagnostic information for optimization analysis remarks related to
/// floating-point non-commutativity.
class OptimizationRemarkAnalysisFPCommute : public OptimizationRemarkAnalysis {
  void anchor() override;
public:
  /// \p PassName is the name of the pass emitting this diagnostic. If this name
  /// matches the regular expression given in -Rpass-analysis=, then the
  /// diagnostic will be emitted. \p RemarkName is a textual identifier for the
  /// remark (single-word, camel-case). \p Loc is the debug location and \p
  /// CodeRegion is the region that the optimization operates on (currently only
  /// block is supported). The front-end will append its own message related to
  /// options that address floating-point non-commutativity.
  OptimizationRemarkAnalysisFPCommute(const char *PassName,
                                      StringRef RemarkName,
                                      const DiagnosticLocation &Loc,
                                      const Value *CodeRegion)
      : OptimizationRemarkAnalysis(DK_OptimizationRemarkAnalysisFPCommute,
                                   PassName, RemarkName, Loc, CodeRegion) {}

  static bool classof(const DiagnosticInfo *DI) {
    return DI->getKind() == DK_OptimizationRemarkAnalysisFPCommute;
  }

private:
  /// This is deprecated now and only used by the function API below.
  /// \p PassName is the name of the pass emitting this diagnostic. If
  /// this name matches the regular expression given in -Rpass-analysis=, then
  /// the diagnostic will be emitted. \p Fn is the function where the diagnostic
  /// is being emitted. \p Loc is the location information to use in the
  /// diagnostic. If line table information is available, the diagnostic will
  /// include the source code location. \p Msg is the message to show. The
  /// front-end will append its own message related to options that address
  /// floating-point non-commutativity. Note that this class does not copy this
  /// message, so this reference must be valid for the whole life time of the
  /// diagnostic.
  OptimizationRemarkAnalysisFPCommute(const char *PassName, const Function &Fn,
                                      const DiagnosticLocation &Loc,
                                      const Twine &Msg)
      : OptimizationRemarkAnalysis(DK_OptimizationRemarkAnalysisFPCommute,
                                   PassName, Fn, Loc, Msg) {}
};

/// Diagnostic information for optimization analysis remarks related to
/// pointer aliasing.
class OptimizationRemarkAnalysisAliasing : public OptimizationRemarkAnalysis {
  void anchor() override;
public:
  /// \p PassName is the name of the pass emitting this diagnostic. If this name
  /// matches the regular expression given in -Rpass-analysis=, then the
  /// diagnostic will be emitted. \p RemarkName is a textual identifier for the
  /// remark (single-word, camel-case). \p Loc is the debug location and \p
  /// CodeRegion is the region that the optimization operates on (currently only
  /// block is supported). The front-end will append its own message related to
  /// options that address pointer aliasing legality.
  OptimizationRemarkAnalysisAliasing(const char *PassName, StringRef RemarkName,
                                     const DiagnosticLocation &Loc,
                                     const Value *CodeRegion)
      : OptimizationRemarkAnalysis(DK_OptimizationRemarkAnalysisAliasing,
                                   PassName, RemarkName, Loc, CodeRegion) {}

  static bool classof(const DiagnosticInfo *DI) {
    return DI->getKind() == DK_OptimizationRemarkAnalysisAliasing;
  }

private:
  /// This is deprecated now and only used by the function API below.
  /// \p PassName is the name of the pass emitting this diagnostic. If
  /// this name matches the regular expression given in -Rpass-analysis=, then
  /// the diagnostic will be emitted. \p Fn is the function where the diagnostic
  /// is being emitted. \p Loc is the location information to use in the
  /// diagnostic. If line table information is available, the diagnostic will
  /// include the source code location. \p Msg is the message to show. The
  /// front-end will append its own message related to options that address
  /// pointer aliasing legality. Note that this class does not copy this
  /// message, so this reference must be valid for the whole life time of the
  /// diagnostic.
  OptimizationRemarkAnalysisAliasing(const char *PassName, const Function &Fn,
                                     const DiagnosticLocation &Loc,
                                     const Twine &Msg)
      : OptimizationRemarkAnalysis(DK_OptimizationRemarkAnalysisAliasing,
                                   PassName, Fn, Loc, Msg) {}
};

/// Diagnostic information for machine IR parser.
// FIXME: Remove this, use DiagnosticInfoSrcMgr instead.
class DiagnosticInfoMIRParser : public DiagnosticInfo {
  const SMDiagnostic &Diagnostic;

public:
  DiagnosticInfoMIRParser(DiagnosticSeverity Severity,
                          const SMDiagnostic &Diagnostic)
      : DiagnosticInfo(DK_MIRParser, Severity), Diagnostic(Diagnostic) {}

  const SMDiagnostic &getDiagnostic() const { return Diagnostic; }

  void print(DiagnosticPrinter &DP) const override;

  static bool classof(const DiagnosticInfo *DI) {
    return DI->getKind() == DK_MIRParser;
  }
};

/// Diagnostic information for ISel fallback path.
class DiagnosticInfoISelFallback : public DiagnosticInfo {
  /// The function that is concerned by this diagnostic.
  const Function &Fn;

public:
  DiagnosticInfoISelFallback(const Function &Fn,
                             DiagnosticSeverity Severity = DS_Warning)
      : DiagnosticInfo(DK_ISelFallback, Severity), Fn(Fn) {}

  const Function &getFunction() const { return Fn; }

  void print(DiagnosticPrinter &DP) const override;

  static bool classof(const DiagnosticInfo *DI) {
    return DI->getKind() == DK_ISelFallback;
  }
};

// Create wrappers for C Binding types (see CBindingWrapping.h).
DEFINE_SIMPLE_CONVERSION_FUNCTIONS(DiagnosticInfo, LLVMDiagnosticInfoRef)

/// Diagnostic information for optimization failures.
class DiagnosticInfoOptimizationFailure : public DiagnosticInfoIROptimization {
public:
  /// \p Fn is the function where the diagnostic is being emitted. \p Loc is
  /// the location information to use in the diagnostic. If line table
  /// information is available, the diagnostic will include the source code
  /// location. \p Msg is the message to show. Note that this class does not
  /// copy this message, so this reference must be valid for the whole life time
  /// of the diagnostic.
  DiagnosticInfoOptimizationFailure(const Function &Fn,
                                    const DiagnosticLocation &Loc,
                                    const Twine &Msg)
      : DiagnosticInfoIROptimization(DK_OptimizationFailure, DS_Warning,
                                     nullptr, Fn, Loc, Msg) {}

  /// \p PassName is the name of the pass emitting this diagnostic.  \p
  /// RemarkName is a textual identifier for the remark (single-word,
  /// camel-case).  \p Loc is the debug location and \p CodeRegion is the
  /// region that the optimization operates on (currently basic block is
  /// supported).
  DiagnosticInfoOptimizationFailure(const char *PassName, StringRef RemarkName,
                                    const DiagnosticLocation &Loc,
                                    const Value *CodeRegion);

  static bool classof(const DiagnosticInfo *DI) {
    return DI->getKind() == DK_OptimizationFailure;
  }

  /// \see DiagnosticInfoOptimizationBase::isEnabled.
  bool isEnabled() const override;
};

/// Diagnostic information for unsupported feature in backend.
class DiagnosticInfoUnsupported : public DiagnosticInfoWithLocationBase {
private:
  Twine Msg;

public:
  /// \p Fn is the function where the diagnostic is being emitted. \p Loc is
  /// the location information to use in the diagnostic. If line table
  /// information is available, the diagnostic will include the source code
  /// location. \p Msg is the message to show. Note that this class does not
  /// copy this message, so this reference must be valid for the whole life time
  /// of the diagnostic.
  DiagnosticInfoUnsupported(
      const Function &Fn, const Twine &Msg,
      const DiagnosticLocation &Loc = DiagnosticLocation(),
      DiagnosticSeverity Severity = DS_Error)
      : DiagnosticInfoWithLocationBase(DK_Unsupported, Severity, Fn, Loc),
        Msg(Msg) {}

  static bool classof(const DiagnosticInfo *DI) {
    return DI->getKind() == DK_Unsupported;
  }

  const Twine &getMessage() const { return Msg; }

  void print(DiagnosticPrinter &DP) const override;
};

/// Diagnostic information for MisExpect analysis.
class DiagnosticInfoMisExpect : public DiagnosticInfoWithLocationBase {
public:
  DiagnosticInfoMisExpect(const Instruction *Inst, Twine &Msg);

  /// \see DiagnosticInfo::print.
  void print(DiagnosticPrinter &DP) const override;

  static bool classof(const DiagnosticInfo *DI) {
    return DI->getKind() == DK_MisExpect;
  }

  const Twine &getMsg() const { return Msg; }

private:
  /// Message to report.
  const Twine &Msg;
};

static DiagnosticSeverity getDiagnosticSeverity(SourceMgr::DiagKind DK) {
  switch (DK) {
  case llvm::SourceMgr::DK_Error:
    return DS_Error;
    break;
  case llvm::SourceMgr::DK_Warning:
    return DS_Warning;
    break;
  case llvm::SourceMgr::DK_Note:
    return DS_Note;
    break;
  case llvm::SourceMgr::DK_Remark:
    return DS_Remark;
    break;
  }
  llvm_unreachable("unknown SourceMgr::DiagKind");
}

/// Diagnostic information for SMDiagnostic reporting.
class DiagnosticInfoSrcMgr : public DiagnosticInfo {
  const SMDiagnostic &Diagnostic;
  StringRef ModName;

  // For inlineasm !srcloc translation.
  bool InlineAsmDiag;
  unsigned LocCookie;

public:
  DiagnosticInfoSrcMgr(const SMDiagnostic &Diagnostic, StringRef ModName,
                       bool InlineAsmDiag = true, unsigned LocCookie = 0)
      : DiagnosticInfo(DK_SrcMgr, getDiagnosticSeverity(Diagnostic.getKind())),
        Diagnostic(Diagnostic), ModName(ModName), InlineAsmDiag(InlineAsmDiag),
        LocCookie(LocCookie) {}

  StringRef getModuleName() const { return ModName; }
  bool isInlineAsmDiag() const { return InlineAsmDiag; }
  const SMDiagnostic &getSMDiag() const { return Diagnostic; }
  unsigned getLocCookie() const { return LocCookie; }
  void print(DiagnosticPrinter &DP) const override;

  static bool classof(const DiagnosticInfo *DI) {
    return DI->getKind() == DK_SrcMgr;
  }
};

void diagnoseDontCall(const CallInst &CI);

class DiagnosticInfoDontCall : public DiagnosticInfo {
  StringRef CalleeName;
  StringRef Note;
  unsigned LocCookie;

public:
  DiagnosticInfoDontCall(StringRef CalleeName, StringRef Note,
                         DiagnosticSeverity DS, unsigned LocCookie)
      : DiagnosticInfo(DK_DontCall, DS), CalleeName(CalleeName), Note(Note),
        LocCookie(LocCookie) {}
  StringRef getFunctionName() const { return CalleeName; }
  StringRef getNote() const { return Note; }
  unsigned getLocCookie() const { return LocCookie; }
  void print(DiagnosticPrinter &DP) const override;
  static bool classof(const DiagnosticInfo *DI) {
    return DI->getKind() == DK_DontCall;
  }
};

} // end namespace llvm

#endif // LLVM_IR_DIAGNOSTICINFO_H
PKjwFZ޵��??IR/InstIterator.hnu�[���//===- InstIterator.h - Classes for inst iteration --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains definitions of two iterators for iterating over the
// instructions in a function.  This is effectively a wrapper around a two level
// iterator that can probably be genericized later.
//
// Note that this iterator gets invalidated any time that basic blocks or
// instructions are moved around.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_INSTITERATOR_H
#define LLVM_IR_INSTITERATOR_H

#include "llvm/ADT/iterator_range.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/SymbolTableListTraits.h"
#include <iterator>

namespace llvm {

// This class implements inst_begin() & inst_end() for
// inst_iterator and const_inst_iterator's.
//
template <class BB_t, class BB_i_t, class BI_t, class II_t> class InstIterator {
  using BBty = BB_t;
  using BBIty = BB_i_t;
  using BIty = BI_t;
  using IIty = II_t;
  BB_t *BBs; // BasicBlocksType
  BB_i_t BB; // BasicBlocksType::iterator
  BI_t BI;   // BasicBlock::iterator

public:
  using iterator_category = std::bidirectional_iterator_tag;
  using value_type = IIty;
  using difference_type = signed;
  using pointer = IIty *;
  using reference = IIty &;

  // Default constructor
  InstIterator() = default;

  // Copy constructor...
  template<typename A, typename B, typename C, typename D>
  InstIterator(const InstIterator<A,B,C,D> &II)
    : BBs(II.BBs), BB(II.BB), BI(II.BI) {}

  template<typename A, typename B, typename C, typename D>
  InstIterator(InstIterator<A,B,C,D> &II)
    : BBs(II.BBs), BB(II.BB), BI(II.BI) {}

  template<class M> InstIterator(M &m)
    : BBs(&m.getBasicBlockList()), BB(BBs->begin()) {    // begin ctor
    if (BB != BBs->end()) {
      BI = BB->begin();
      advanceToNextBB();
    }
  }

  template<class M> InstIterator(M &m, bool)
    : BBs(&m.getBasicBlockList()), BB(BBs->end()) {    // end ctor
  }

  // Accessors to get at the underlying iterators...
  inline BBIty &getBasicBlockIterator()  { return BB; }
  inline BIty  &getInstructionIterator() { return BI; }

  inline reference operator*()  const { return *BI; }
  inline pointer operator->() const { return &operator*(); }

  inline bool operator==(const InstIterator &y) const {
    return BB == y.BB && (BB == BBs->end() || BI == y.BI);
  }
  inline bool operator!=(const InstIterator& y) const {
    return !operator==(y);
  }

  InstIterator& operator++() {
    ++BI;
    advanceToNextBB();
    return *this;
  }
  inline InstIterator operator++(int) {
    InstIterator tmp = *this; ++*this; return tmp;
  }

  InstIterator& operator--() {
    while (BB == BBs->end() || BI == BB->begin()) {
      --BB;
      BI = BB->end();
    }
    --BI;
    return *this;
  }
  inline InstIterator operator--(int) {
    InstIterator tmp = *this; --*this; return tmp;
  }

  inline bool atEnd() const { return BB == BBs->end(); }

private:
  inline void advanceToNextBB() {
    // The only way that the II could be broken is if it is now pointing to
    // the end() of the current BasicBlock and there are successor BBs.
    while (BI == BB->end()) {
      ++BB;
      if (BB == BBs->end()) break;
      BI = BB->begin();
    }
  }
};

using inst_iterator =
    InstIterator<SymbolTableList<BasicBlock>, Function::iterator,
                 BasicBlock::iterator, Instruction>;
using const_inst_iterator =
    InstIterator<const SymbolTableList<BasicBlock>,
                 Function::const_iterator, BasicBlock::const_iterator,
                 const Instruction>;
using inst_range = iterator_range<inst_iterator>;
using const_inst_range = iterator_range<const_inst_iterator>;

inline inst_iterator inst_begin(Function *F) { return inst_iterator(*F); }
inline inst_iterator inst_end(Function *F)   { return inst_iterator(*F, true); }
inline inst_range instructions(Function *F) {
  return inst_range(inst_begin(F), inst_end(F));
}
inline const_inst_iterator inst_begin(const Function *F) {
  return const_inst_iterator(*F);
}
inline const_inst_iterator inst_end(const Function *F) {
  return const_inst_iterator(*F, true);
}
inline const_inst_range instructions(const Function *F) {
  return const_inst_range(inst_begin(F), inst_end(F));
}
inline inst_iterator inst_begin(Function &F) { return inst_iterator(F); }
inline inst_iterator inst_end(Function &F)   { return inst_iterator(F, true); }
inline inst_range instructions(Function &F) {
  return inst_range(inst_begin(F), inst_end(F));
}
inline const_inst_iterator inst_begin(const Function &F) {
  return const_inst_iterator(F);
}
inline const_inst_iterator inst_end(const Function &F) {
  return const_inst_iterator(F, true);
}
inline const_inst_range instructions(const Function &F) {
  return const_inst_range(inst_begin(F), inst_end(F));
}

} // end namespace llvm

#endif // LLVM_IR_INSTITERATOR_H
PKjwFZF��W����IR/Constants.hnu�[���//===-- llvm/Constants.h - Constant class subclass definitions --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// @file
/// This file contains the declarations for the subclasses of Constant,
/// which represent the different flavors of constant values that live in LLVM.
/// Note that Constants are immutable (once created they never change) and are
/// fully shared by structural equivalence.  This means that two structurally
/// equivalent constants will always have the same address.  Constants are
/// created on demand as needed and never deleted: thus clients don't have to
/// worry about the lifetime of the objects.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_CONSTANTS_H
#define LLVM_IR_CONSTANTS_H

#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/OperandTraits.h"
#include "llvm/IR/User.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <optional>

namespace llvm {

template <class ConstantClass> struct ConstantAggrKeyType;

/// Base class for constants with no operands.
///
/// These constants have no operands; they represent their data directly.
/// Since they can be in use by unrelated modules (and are never based on
/// GlobalValues), it never makes sense to RAUW them.
class ConstantData : public Constant {
  friend class Constant;

  Value *handleOperandChangeImpl(Value *From, Value *To) {
    llvm_unreachable("Constant data does not have operands!");
  }

protected:
  explicit ConstantData(Type *Ty, ValueTy VT) : Constant(Ty, VT, nullptr, 0) {}

  void *operator new(size_t S) { return User::operator new(S, 0); }

public:
  void operator delete(void *Ptr) { User::operator delete(Ptr); }

  ConstantData(const ConstantData &) = delete;

  /// Methods to support type inquiry through isa, cast, and dyn_cast.
  static bool classof(const Value *V) {
    return V->getValueID() >= ConstantDataFirstVal &&
           V->getValueID() <= ConstantDataLastVal;
  }
};

//===----------------------------------------------------------------------===//
/// This is the shared class of boolean and integer constants. This class
/// represents both boolean and integral constants.
/// Class for constant integers.
class ConstantInt final : public ConstantData {
  friend class Constant;

  APInt Val;

  ConstantInt(IntegerType *Ty, const APInt &V);

  void destroyConstantImpl();

public:
  ConstantInt(const ConstantInt &) = delete;

  static ConstantInt *getTrue(LLVMContext &Context);
  static ConstantInt *getFalse(LLVMContext &Context);
  static ConstantInt *getBool(LLVMContext &Context, bool V);
  static Constant *getTrue(Type *Ty);
  static Constant *getFalse(Type *Ty);
  static Constant *getBool(Type *Ty, bool V);

  /// If Ty is a vector type, return a Constant with a splat of the given
  /// value. Otherwise return a ConstantInt for the given value.
  static Constant *get(Type *Ty, uint64_t V, bool IsSigned = false);

  /// Return a ConstantInt with the specified integer value for the specified
  /// type. If the type is wider than 64 bits, the value will be zero-extended
  /// to fit the type, unless IsSigned is true, in which case the value will
  /// be interpreted as a 64-bit signed integer and sign-extended to fit
  /// the type.
  /// Get a ConstantInt for a specific value.
  static ConstantInt *get(IntegerType *Ty, uint64_t V, bool IsSigned = false);

  /// Return a ConstantInt with the specified value for the specified type. The
  /// value V will be canonicalized to a an unsigned APInt. Accessing it with
  /// either getSExtValue() or getZExtValue() will yield a correctly sized and
  /// signed value for the type Ty.
  /// Get a ConstantInt for a specific signed value.
  static ConstantInt *getSigned(IntegerType *Ty, int64_t V) {
    return get(Ty, V, true);
  }
  static Constant *getSigned(Type *Ty, int64_t V) {
    return get(Ty, V, true);
  }

  /// Return a ConstantInt with the specified value and an implied Type. The
  /// type is the integer type that corresponds to the bit width of the value.
  static ConstantInt *get(LLVMContext &Context, const APInt &V);

  /// Return a ConstantInt constructed from the string strStart with the given
  /// radix.
  static ConstantInt *get(IntegerType *Ty, StringRef Str, uint8_t Radix);

  /// If Ty is a vector type, return a Constant with a splat of the given
  /// value. Otherwise return a ConstantInt for the given value.
  static Constant *get(Type *Ty, const APInt &V);

  /// Return the constant as an APInt value reference. This allows clients to
  /// obtain a full-precision copy of the value.
  /// Return the constant's value.
  inline const APInt &getValue() const { return Val; }

  /// getBitWidth - Return the bitwidth of this constant.
  unsigned getBitWidth() const { return Val.getBitWidth(); }

  /// Return the constant as a 64-bit unsigned integer value after it
  /// has been zero extended as appropriate for the type of this constant. Note
  /// that this method can assert if the value does not fit in 64 bits.
  /// Return the zero extended value.
  inline uint64_t getZExtValue() const { return Val.getZExtValue(); }

  /// Return the constant as a 64-bit integer value after it has been sign
  /// extended as appropriate for the type of this constant. Note that
  /// this method can assert if the value does not fit in 64 bits.
  /// Return the sign extended value.
  inline int64_t getSExtValue() const { return Val.getSExtValue(); }

  /// Return the constant as an llvm::MaybeAlign.
  /// Note that this method can assert if the value does not fit in 64 bits or
  /// is not a power of two.
  inline MaybeAlign getMaybeAlignValue() const {
    return MaybeAlign(getZExtValue());
  }

  /// Return the constant as an llvm::Align, interpreting `0` as `Align(1)`.
  /// Note that this method can assert if the value does not fit in 64 bits or
  /// is not a power of two.
  inline Align getAlignValue() const {
    return getMaybeAlignValue().valueOrOne();
  }

  /// A helper method that can be used to determine if the constant contained
  /// within is equal to a constant.  This only works for very small values,
  /// because this is all that can be represented with all types.
  /// Determine if this constant's value is same as an unsigned char.
  bool equalsInt(uint64_t V) const { return Val == V; }

  /// getType - Specialize the getType() method to always return an IntegerType,
  /// which reduces the amount of casting needed in parts of the compiler.
  ///
  inline IntegerType *getType() const {
    return cast<IntegerType>(Value::getType());
  }

  /// This static method returns true if the type Ty is big enough to
  /// represent the value V. This can be used to avoid having the get method
  /// assert when V is larger than Ty can represent. Note that there are two
  /// versions of this method, one for unsigned and one for signed integers.
  /// Although ConstantInt canonicalizes everything to an unsigned integer,
  /// the signed version avoids callers having to convert a signed quantity
  /// to the appropriate unsigned type before calling the method.
  /// @returns true if V is a valid value for type Ty
  /// Determine if the value is in range for the given type.
  static bool isValueValidForType(Type *Ty, uint64_t V);
  static bool isValueValidForType(Type *Ty, int64_t V);

  bool isNegative() const { return Val.isNegative(); }

  /// This is just a convenience method to make client code smaller for a
  /// common code. It also correctly performs the comparison without the
  /// potential for an assertion from getZExtValue().
  bool isZero() const { return Val.isZero(); }

  /// This is just a convenience method to make client code smaller for a
  /// common case. It also correctly performs the comparison without the
  /// potential for an assertion from getZExtValue().
  /// Determine if the value is one.
  bool isOne() const { return Val.isOne(); }

  /// This function will return true iff every bit in this constant is set
  /// to true.
  /// @returns true iff this constant's bits are all set to true.
  /// Determine if the value is all ones.
  bool isMinusOne() const { return Val.isAllOnes(); }

  /// This function will return true iff this constant represents the largest
  /// value that may be represented by the constant's type.
  /// @returns true iff this is the largest value that may be represented
  /// by this type.
  /// Determine if the value is maximal.
  bool isMaxValue(bool IsSigned) const {
    if (IsSigned)
      return Val.isMaxSignedValue();
    else
      return Val.isMaxValue();
  }

  /// This function will return true iff this constant represents the smallest
  /// value that may be represented by this constant's type.
  /// @returns true if this is the smallest value that may be represented by
  /// this type.
  /// Determine if the value is minimal.
  bool isMinValue(bool IsSigned) const {
    if (IsSigned)
      return Val.isMinSignedValue();
    else
      return Val.isMinValue();
  }

  /// This function will return true iff this constant represents a value with
  /// active bits bigger than 64 bits or a value greater than the given uint64_t
  /// value.
  /// @returns true iff this constant is greater or equal to the given number.
  /// Determine if the value is greater or equal to the given number.
  bool uge(uint64_t Num) const { return Val.uge(Num); }

  /// getLimitedValue - If the value is smaller than the specified limit,
  /// return it, otherwise return the limit value.  This causes the value
  /// to saturate to the limit.
  /// @returns the min of the value of the constant and the specified value
  /// Get the constant's value with a saturation limit
  uint64_t getLimitedValue(uint64_t Limit = ~0ULL) const {
    return Val.getLimitedValue(Limit);
  }

  /// Methods to support type inquiry through isa, cast, and dyn_cast.
  static bool classof(const Value *V) {
    return V->getValueID() == ConstantIntVal;
  }
};

//===----------------------------------------------------------------------===//
/// ConstantFP - Floating Point Values [float, double]
///
class ConstantFP final : public ConstantData {
  friend class Constant;

  APFloat Val;

  ConstantFP(Type *Ty, const APFloat &V);

  void destroyConstantImpl();

public:
  ConstantFP(const ConstantFP &) = delete;

  /// This returns a ConstantFP, or a vector containing a splat of a ConstantFP,
  /// for the specified value in the specified type. This should only be used
  /// for simple constant values like 2.0/1.0 etc, that are known-valid both as
  /// host double and as the target format.
  static Constant *get(Type *Ty, double V);

  /// If Ty is a vector type, return a Constant with a splat of the given
  /// value. Otherwise return a ConstantFP for the given value.
  static Constant *get(Type *Ty, const APFloat &V);

  static Constant *get(Type *Ty, StringRef Str);
  static ConstantFP *get(LLVMContext &Context, const APFloat &V);
  static Constant *getNaN(Type *Ty, bool Negative = false,
                          uint64_t Payload = 0);
  static Constant *getQNaN(Type *Ty, bool Negative = false,
                           APInt *Payload = nullptr);
  static Constant *getSNaN(Type *Ty, bool Negative = false,
                           APInt *Payload = nullptr);
  static Constant *getZero(Type *Ty, bool Negative = false);
  static Constant *getNegativeZero(Type *Ty) { return getZero(Ty, true); }
  static Constant *getInfinity(Type *Ty, bool Negative = false);

  /// Return true if Ty is big enough to represent V.
  static bool isValueValidForType(Type *Ty, const APFloat &V);
  inline const APFloat &getValueAPF() const { return Val; }
  inline const APFloat &getValue() const { return Val; }

  /// Return true if the value is positive or negative zero.
  bool isZero() const { return Val.isZero(); }

  /// Return true if the sign bit is set.
  bool isNegative() const { return Val.isNegative(); }

  /// Return true if the value is infinity
  bool isInfinity() const { return Val.isInfinity(); }

  /// Return true if the value is a NaN.
  bool isNaN() const { return Val.isNaN(); }

  /// We don't rely on operator== working on double values, as it returns true
  /// for things that are clearly not equal, like -0.0 and 0.0.
  /// As such, this method can be used to do an exact bit-for-bit comparison of
  /// two floating point values.  The version with a double operand is retained
  /// because it's so convenient to write isExactlyValue(2.0), but please use
  /// it only for simple constants.
  bool isExactlyValue(const APFloat &V) const;

  bool isExactlyValue(double V) const {
    bool ignored;
    APFloat FV(V);
    FV.convert(Val.getSemantics(), APFloat::rmNearestTiesToEven, &ignored);
    return isExactlyValue(FV);
  }

  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Value *V) {
    return V->getValueID() == ConstantFPVal;
  }
};

//===----------------------------------------------------------------------===//
/// All zero aggregate value
///
class ConstantAggregateZero final : public ConstantData {
  friend class Constant;

  explicit ConstantAggregateZero(Type *Ty)
      : ConstantData(Ty, ConstantAggregateZeroVal) {}

  void destroyConstantImpl();

public:
  ConstantAggregateZero(const ConstantAggregateZero &) = delete;

  static ConstantAggregateZero *get(Type *Ty);

  /// If this CAZ has array or vector type, return a zero with the right element
  /// type.
  Constant *getSequentialElement() const;

  /// If this CAZ has struct type, return a zero with the right element type for
  /// the specified element.
  Constant *getStructElement(unsigned Elt) const;

  /// Return a zero of the right value for the specified GEP index if we can,
  /// otherwise return null (e.g. if C is a ConstantExpr).
  Constant *getElementValue(Constant *C) const;

  /// Return a zero of the right value for the specified GEP index.
  Constant *getElementValue(unsigned Idx) const;

  /// Return the number of elements in the array, vector, or struct.
  ElementCount getElementCount() const;

  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  ///
  static bool classof(const Value *V) {
    return V->getValueID() == ConstantAggregateZeroVal;
  }
};

/// Base class for aggregate constants (with operands).
///
/// These constants are aggregates of other constants, which are stored as
/// operands.
///
/// Subclasses are \a ConstantStruct, \a ConstantArray, and \a
/// ConstantVector.
///
/// \note Some subclasses of \a ConstantData are semantically aggregates --
/// such as \a ConstantDataArray -- but are not subclasses of this because they
/// use operands.
class ConstantAggregate : public Constant {
protected:
  ConstantAggregate(Type *T, ValueTy VT, ArrayRef<Constant *> V);

public:
  /// Transparently provide more efficient getOperand methods.
  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Constant);

  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Value *V) {
    return V->getValueID() >= ConstantAggregateFirstVal &&
           V->getValueID() <= ConstantAggregateLastVal;
  }
};

template <>
struct OperandTraits<ConstantAggregate>
    : public VariadicOperandTraits<ConstantAggregate> {};

DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ConstantAggregate, Constant)

//===----------------------------------------------------------------------===//
/// ConstantArray - Constant Array Declarations
///
class ConstantArray final : public ConstantAggregate {
  friend struct ConstantAggrKeyType<ConstantArray>;
  friend class Constant;

  ConstantArray(ArrayType *T, ArrayRef<Constant *> Val);

  void destroyConstantImpl();
  Value *handleOperandChangeImpl(Value *From, Value *To);

public:
  // ConstantArray accessors
  static Constant *get(ArrayType *T, ArrayRef<Constant *> V);

private:
  static Constant *getImpl(ArrayType *T, ArrayRef<Constant *> V);

public:
  /// Specialize the getType() method to always return an ArrayType,
  /// which reduces the amount of casting needed in parts of the compiler.
  inline ArrayType *getType() const {
    return cast<ArrayType>(Value::getType());
  }

  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Value *V) {
    return V->getValueID() == ConstantArrayVal;
  }
};

//===----------------------------------------------------------------------===//
// Constant Struct Declarations
//
class ConstantStruct final : public ConstantAggregate {
  friend struct ConstantAggrKeyType<ConstantStruct>;
  friend class Constant;

  ConstantStruct(StructType *T, ArrayRef<Constant *> Val);

  void destroyConstantImpl();
  Value *handleOperandChangeImpl(Value *From, Value *To);

public:
  // ConstantStruct accessors
  static Constant *get(StructType *T, ArrayRef<Constant *> V);

  template <typename... Csts>
  static std::enable_if_t<are_base_of<Constant, Csts...>::value, Constant *>
  get(StructType *T, Csts *...Vs) {
    return get(T, ArrayRef<Constant *>({Vs...}));
  }

  /// Return an anonymous struct that has the specified elements.
  /// If the struct is possibly empty, then you must specify a context.
  static Constant *getAnon(ArrayRef<Constant *> V, bool Packed = false) {
    return get(getTypeForElements(V, Packed), V);
  }
  static Constant *getAnon(LLVMContext &Ctx, ArrayRef<Constant *> V,
                           bool Packed = false) {
    return get(getTypeForElements(Ctx, V, Packed), V);
  }

  /// Return an anonymous struct type to use for a constant with the specified
  /// set of elements. The list must not be empty.
  static StructType *getTypeForElements(ArrayRef<Constant *> V,
                                        bool Packed = false);
  /// This version of the method allows an empty list.
  static StructType *getTypeForElements(LLVMContext &Ctx,
                                        ArrayRef<Constant *> V,
                                        bool Packed = false);

  /// Specialization - reduce amount of casting.
  inline StructType *getType() const {
    return cast<StructType>(Value::getType());
  }

  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Value *V) {
    return V->getValueID() == ConstantStructVal;
  }
};

//===----------------------------------------------------------------------===//
/// Constant Vector Declarations
///
class ConstantVector final : public ConstantAggregate {
  friend struct ConstantAggrKeyType<ConstantVector>;
  friend class Constant;

  ConstantVector(VectorType *T, ArrayRef<Constant *> Val);

  void destroyConstantImpl();
  Value *handleOperandChangeImpl(Value *From, Value *To);

public:
  // ConstantVector accessors
  static Constant *get(ArrayRef<Constant *> V);

private:
  static Constant *getImpl(ArrayRef<Constant *> V);

public:
  /// Return a ConstantVector with the specified constant in each element.
  /// Note that this might not return an instance of ConstantVector
  static Constant *getSplat(ElementCount EC, Constant *Elt);

  /// Specialize the getType() method to always return a FixedVectorType,
  /// which reduces the amount of casting needed in parts of the compiler.
  inline FixedVectorType *getType() const {
    return cast<FixedVectorType>(Value::getType());
  }

  /// If all elements of the vector constant have the same value, return that
  /// value. Otherwise, return nullptr. Ignore undefined elements by setting
  /// AllowUndefs to true.
  Constant *getSplatValue(bool AllowUndefs = false) const;

  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Value *V) {
    return V->getValueID() == ConstantVectorVal;
  }
};

//===----------------------------------------------------------------------===//
/// A constant pointer value that points to null
///
class ConstantPointerNull final : public ConstantData {
  friend class Constant;

  explicit ConstantPointerNull(PointerType *T)
      : ConstantData(T, Value::ConstantPointerNullVal) {}

  void destroyConstantImpl();

public:
  ConstantPointerNull(const ConstantPointerNull &) = delete;

  /// Static factory methods - Return objects of the specified value
  static ConstantPointerNull *get(PointerType *T);

  /// Specialize the getType() method to always return an PointerType,
  /// which reduces the amount of casting needed in parts of the compiler.
  inline PointerType *getType() const {
    return cast<PointerType>(Value::getType());
  }

  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Value *V) {
    return V->getValueID() == ConstantPointerNullVal;
  }
};

//===----------------------------------------------------------------------===//
/// ConstantDataSequential - A vector or array constant whose element type is a
/// simple 1/2/4/8-byte integer or half/bfloat/float/double, and whose elements
/// are just simple data values (i.e. ConstantInt/ConstantFP).  This Constant
/// node has no operands because it stores all of the elements of the constant
/// as densely packed data, instead of as Value*'s.
///
/// This is the common base class of ConstantDataArray and ConstantDataVector.
///
class ConstantDataSequential : public ConstantData {
  friend class LLVMContextImpl;
  friend class Constant;

  /// A pointer to the bytes underlying this constant (which is owned by the
  /// uniquing StringMap).
  const char *DataElements;

  /// This forms a link list of ConstantDataSequential nodes that have
  /// the same value but different type.  For example, 0,0,0,1 could be a 4
  /// element array of i8, or a 1-element array of i32.  They'll both end up in
  /// the same StringMap bucket, linked up.
  std::unique_ptr<ConstantDataSequential> Next;

  void destroyConstantImpl();

protected:
  explicit ConstantDataSequential(Type *ty, ValueTy VT, const char *Data)
      : ConstantData(ty, VT), DataElements(Data) {}

  static Constant *getImpl(StringRef Bytes, Type *Ty);

public:
  ConstantDataSequential(const ConstantDataSequential &) = delete;

  /// Return true if a ConstantDataSequential can be formed with a vector or
  /// array of the specified element type.
  /// ConstantDataArray only works with normal float and int types that are
  /// stored densely in memory, not with things like i42 or x86_f80.
  static bool isElementTypeCompatible(Type *Ty);

  /// If this is a sequential container of integers (of any size), return the
  /// specified element in the low bits of a uint64_t.
  uint64_t getElementAsInteger(unsigned i) const;

  /// If this is a sequential container of integers (of any size), return the
  /// specified element as an APInt.
  APInt getElementAsAPInt(unsigned i) const;

  /// If this is a sequential container of floating point type, return the
  /// specified element as an APFloat.
  APFloat getElementAsAPFloat(unsigned i) const;

  /// If this is an sequential container of floats, return the specified element
  /// as a float.
  float getElementAsFloat(unsigned i) const;

  /// If this is an sequential container of doubles, return the specified
  /// element as a double.
  double getElementAsDouble(unsigned i) const;

  /// Return a Constant for a specified index's element.
  /// Note that this has to compute a new constant to return, so it isn't as
  /// efficient as getElementAsInteger/Float/Double.
  Constant *getElementAsConstant(unsigned i) const;

  /// Return the element type of the array/vector.
  Type *getElementType() const;

  /// Return the number of elements in the array or vector.
  unsigned getNumElements() const;

  /// Return the size (in bytes) of each element in the array/vector.
  /// The size of the elements is known to be a multiple of one byte.
  uint64_t getElementByteSize() const;

  /// This method returns true if this is an array of \p CharSize integers.
  bool isString(unsigned CharSize = 8) const;

  /// This method returns true if the array "isString", ends with a null byte,
  /// and does not contains any other null bytes.
  bool isCString() const;

  /// If this array is isString(), then this method returns the array as a
  /// StringRef. Otherwise, it asserts out.
  StringRef getAsString() const {
    assert(isString() && "Not a string");
    return getRawDataValues();
  }

  /// If this array is isCString(), then this method returns the array (without
  /// the trailing null byte) as a StringRef. Otherwise, it asserts out.
  StringRef getAsCString() const {
    assert(isCString() && "Isn't a C string");
    StringRef Str = getAsString();
    return Str.substr(0, Str.size() - 1);
  }

  /// Return the raw, underlying, bytes of this data. Note that this is an
  /// extremely tricky thing to work with, as it exposes the host endianness of
  /// the data elements.
  StringRef getRawDataValues() const;

  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Value *V) {
    return V->getValueID() == ConstantDataArrayVal ||
           V->getValueID() == ConstantDataVectorVal;
  }

private:
  const char *getElementPointer(unsigned Elt) const;
};

//===----------------------------------------------------------------------===//
/// An array constant whose element type is a simple 1/2/4/8-byte integer or
/// float/double, and whose elements are just simple data values
/// (i.e. ConstantInt/ConstantFP). This Constant node has no operands because it
/// stores all of the elements of the constant as densely packed data, instead
/// of as Value*'s.
class ConstantDataArray final : public ConstantDataSequential {
  friend class ConstantDataSequential;

  explicit ConstantDataArray(Type *ty, const char *Data)
      : ConstantDataSequential(ty, ConstantDataArrayVal, Data) {}

public:
  ConstantDataArray(const ConstantDataArray &) = delete;

  /// get() constructor - Return a constant with array type with an element
  /// count and element type matching the ArrayRef passed in.  Note that this
  /// can return a ConstantAggregateZero object.
  template <typename ElementTy>
  static Constant *get(LLVMContext &Context, ArrayRef<ElementTy> Elts) {
    const char *Data = reinterpret_cast<const char *>(Elts.data());
    return getRaw(StringRef(Data, Elts.size() * sizeof(ElementTy)), Elts.size(),
                  Type::getScalarTy<ElementTy>(Context));
  }

  /// get() constructor - ArrayTy needs to be compatible with
  /// ArrayRef<ElementTy>. Calls get(LLVMContext, ArrayRef<ElementTy>).
  template <typename ArrayTy>
  static Constant *get(LLVMContext &Context, ArrayTy &Elts) {
    return ConstantDataArray::get(Context, ArrayRef(Elts));
  }

  /// getRaw() constructor - Return a constant with array type with an element
  /// count and element type matching the NumElements and ElementTy parameters
  /// passed in. Note that this can return a ConstantAggregateZero object.
  /// ElementTy must be one of i8/i16/i32/i64/half/bfloat/float/double. Data is
  /// the buffer containing the elements. Be careful to make sure Data uses the
  /// right endianness, the buffer will be used as-is.
  static Constant *getRaw(StringRef Data, uint64_t NumElements,
                          Type *ElementTy) {
    Type *Ty = ArrayType::get(ElementTy, NumElements);
    return getImpl(Data, Ty);
  }

  /// getFP() constructors - Return a constant of array type with a float
  /// element type taken from argument `ElementType', and count taken from
  /// argument `Elts'.  The amount of bits of the contained type must match the
  /// number of bits of the type contained in the passed in ArrayRef.
  /// (i.e. half or bfloat for 16bits, float for 32bits, double for 64bits) Note
  /// that this can return a ConstantAggregateZero object.
  static Constant *getFP(Type *ElementType, ArrayRef<uint16_t> Elts);
  static Constant *getFP(Type *ElementType, ArrayRef<uint32_t> Elts);
  static Constant *getFP(Type *ElementType, ArrayRef<uint64_t> Elts);

  /// This method constructs a CDS and initializes it with a text string.
  /// The default behavior (AddNull==true) causes a null terminator to
  /// be placed at the end of the array (increasing the length of the string by
  /// one more than the StringRef would normally indicate.  Pass AddNull=false
  /// to disable this behavior.
  static Constant *getString(LLVMContext &Context, StringRef Initializer,
                             bool AddNull = true);

  /// Specialize the getType() method to always return an ArrayType,
  /// which reduces the amount of casting needed in parts of the compiler.
  inline ArrayType *getType() const {
    return cast<ArrayType>(Value::getType());
  }

  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Value *V) {
    return V->getValueID() == ConstantDataArrayVal;
  }
};

//===----------------------------------------------------------------------===//
/// A vector constant whose element type is a simple 1/2/4/8-byte integer or
/// float/double, and whose elements are just simple data values
/// (i.e. ConstantInt/ConstantFP). This Constant node has no operands because it
/// stores all of the elements of the constant as densely packed data, instead
/// of as Value*'s.
class ConstantDataVector final : public ConstantDataSequential {
  friend class ConstantDataSequential;

  explicit ConstantDataVector(Type *ty, const char *Data)
      : ConstantDataSequential(ty, ConstantDataVectorVal, Data),
        IsSplatSet(false) {}
  // Cache whether or not the constant is a splat.
  mutable bool IsSplatSet : 1;
  mutable bool IsSplat : 1;
  bool isSplatData() const;

public:
  ConstantDataVector(const ConstantDataVector &) = delete;

  /// get() constructors - Return a constant with vector type with an element
  /// count and element type matching the ArrayRef passed in.  Note that this
  /// can return a ConstantAggregateZero object.
  static Constant *get(LLVMContext &Context, ArrayRef<uint8_t> Elts);
  static Constant *get(LLVMContext &Context, ArrayRef<uint16_t> Elts);
  static Constant *get(LLVMContext &Context, ArrayRef<uint32_t> Elts);
  static Constant *get(LLVMContext &Context, ArrayRef<uint64_t> Elts);
  static Constant *get(LLVMContext &Context, ArrayRef<float> Elts);
  static Constant *get(LLVMContext &Context, ArrayRef<double> Elts);

  /// getRaw() constructor - Return a constant with vector type with an element
  /// count and element type matching the NumElements and ElementTy parameters
  /// passed in. Note that this can return a ConstantAggregateZero object.
  /// ElementTy must be one of i8/i16/i32/i64/half/bfloat/float/double. Data is
  /// the buffer containing the elements. Be careful to make sure Data uses the
  /// right endianness, the buffer will be used as-is.
  static Constant *getRaw(StringRef Data, uint64_t NumElements,
                          Type *ElementTy) {
    Type *Ty = VectorType::get(ElementTy, ElementCount::getFixed(NumElements));
    return getImpl(Data, Ty);
  }

  /// getFP() constructors - Return a constant of vector type with a float
  /// element type taken from argument `ElementType', and count taken from
  /// argument `Elts'.  The amount of bits of the contained type must match the
  /// number of bits of the type contained in the passed in ArrayRef.
  /// (i.e. half or bfloat for 16bits, float for 32bits, double for 64bits) Note
  /// that this can return a ConstantAggregateZero object.
  static Constant *getFP(Type *ElementType, ArrayRef<uint16_t> Elts);
  static Constant *getFP(Type *ElementType, ArrayRef<uint32_t> Elts);
  static Constant *getFP(Type *ElementType, ArrayRef<uint64_t> Elts);

  /// Return a ConstantVector with the specified constant in each element.
  /// The specified constant has to be a of a compatible type (i8/i16/
  /// i32/i64/half/bfloat/float/double) and must be a ConstantFP or ConstantInt.
  static Constant *getSplat(unsigned NumElts, Constant *Elt);

  /// Returns true if this is a splat constant, meaning that all elements have
  /// the same value.
  bool isSplat() const;

  /// If this is a splat constant, meaning that all of the elements have the
  /// same value, return that value. Otherwise return NULL.
  Constant *getSplatValue() const;

  /// Specialize the getType() method to always return a FixedVectorType,
  /// which reduces the amount of casting needed in parts of the compiler.
  inline FixedVectorType *getType() const {
    return cast<FixedVectorType>(Value::getType());
  }

  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Value *V) {
    return V->getValueID() == ConstantDataVectorVal;
  }
};

//===----------------------------------------------------------------------===//
/// A constant token which is empty
///
class ConstantTokenNone final : public ConstantData {
  friend class Constant;

  explicit ConstantTokenNone(LLVMContext &Context)
      : ConstantData(Type::getTokenTy(Context), ConstantTokenNoneVal) {}

  void destroyConstantImpl();

public:
  ConstantTokenNone(const ConstantTokenNone &) = delete;

  /// Return the ConstantTokenNone.
  static ConstantTokenNone *get(LLVMContext &Context);

  /// Methods to support type inquiry through isa, cast, and dyn_cast.
  static bool classof(const Value *V) {
    return V->getValueID() == ConstantTokenNoneVal;
  }
};

/// A constant target extension type default initializer
class ConstantTargetNone final : public ConstantData {
  friend class Constant;

  explicit ConstantTargetNone(TargetExtType *T)
      : ConstantData(T, Value::ConstantTargetNoneVal) {}

  void destroyConstantImpl();

public:
  ConstantTargetNone(const ConstantTargetNone &) = delete;

  /// Static factory methods - Return objects of the specified value.
  static ConstantTargetNone *get(TargetExtType *T);

  /// Specialize the getType() method to always return an TargetExtType,
  /// which reduces the amount of casting needed in parts of the compiler.
  inline TargetExtType *getType() const {
    return cast<TargetExtType>(Value::getType());
  }

  /// Methods for support type inquiry through isa, cast, and dyn_cast.
  static bool classof(const Value *V) {
    return V->getValueID() == ConstantTargetNoneVal;
  }
};

/// The address of a basic block.
///
class BlockAddress final : public Constant {
  friend class Constant;

  BlockAddress(Function *F, BasicBlock *BB);

  void *operator new(size_t S) { return User::operator new(S, 2); }

  void destroyConstantImpl();
  Value *handleOperandChangeImpl(Value *From, Value *To);

public:
  void operator delete(void *Ptr) { User::operator delete(Ptr); }

  /// Return a BlockAddress for the specified function and basic block.
  static BlockAddress *get(Function *F, BasicBlock *BB);

  /// Return a BlockAddress for the specified basic block.  The basic
  /// block must be embedded into a function.
  static BlockAddress *get(BasicBlock *BB);

  /// Lookup an existing \c BlockAddress constant for the given BasicBlock.
  ///
  /// \returns 0 if \c !BB->hasAddressTaken(), otherwise the \c BlockAddress.
  static BlockAddress *lookup(const BasicBlock *BB);

  /// Transparently provide more efficient getOperand methods.
  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);

  Function *getFunction() const { return (Function *)Op<0>().get(); }
  BasicBlock *getBasicBlock() const { return (BasicBlock *)Op<1>().get(); }

  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Value *V) {
    return V->getValueID() == BlockAddressVal;
  }
};

template <>
struct OperandTraits<BlockAddress>
    : public FixedNumOperandTraits<BlockAddress, 2> {};

DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BlockAddress, Value)

/// Wrapper for a function that represents a value that
/// functionally represents the original function. This can be a function,
/// global alias to a function, or an ifunc.
class DSOLocalEquivalent final : public Constant {
  friend class Constant;

  DSOLocalEquivalent(GlobalValue *GV);

  void *operator new(size_t S) { return User::operator new(S, 1); }

  void destroyConstantImpl();
  Value *handleOperandChangeImpl(Value *From, Value *To);

public:
  void operator delete(void *Ptr) { User::operator delete(Ptr); }

  /// Return a DSOLocalEquivalent for the specified global value.
  static DSOLocalEquivalent *get(GlobalValue *GV);

  /// Transparently provide more efficient getOperand methods.
  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);

  GlobalValue *getGlobalValue() const {
    return cast<GlobalValue>(Op<0>().get());
  }

  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Value *V) {
    return V->getValueID() == DSOLocalEquivalentVal;
  }
};

template <>
struct OperandTraits<DSOLocalEquivalent>
    : public FixedNumOperandTraits<DSOLocalEquivalent, 1> {};

DEFINE_TRANSPARENT_OPERAND_ACCESSORS(DSOLocalEquivalent, Value)

/// Wrapper for a value that won't be replaced with a CFI jump table
/// pointer in LowerTypeTestsModule.
class NoCFIValue final : public Constant {
  friend class Constant;

  NoCFIValue(GlobalValue *GV);

  void *operator new(size_t S) { return User::operator new(S, 1); }

  void destroyConstantImpl();
  Value *handleOperandChangeImpl(Value *From, Value *To);

public:
  /// Return a NoCFIValue for the specified function.
  static NoCFIValue *get(GlobalValue *GV);

  /// Transparently provide more efficient getOperand methods.
  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);

  GlobalValue *getGlobalValue() const {
    return cast<GlobalValue>(Op<0>().get());
  }

  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Value *V) {
    return V->getValueID() == NoCFIValueVal;
  }
};

template <>
struct OperandTraits<NoCFIValue> : public FixedNumOperandTraits<NoCFIValue, 1> {
};

DEFINE_TRANSPARENT_OPERAND_ACCESSORS(NoCFIValue, Value)

//===----------------------------------------------------------------------===//
/// A constant value that is initialized with an expression using
/// other constant values.
///
/// This class uses the standard Instruction opcodes to define the various
/// constant expressions.  The Opcode field for the ConstantExpr class is
/// maintained in the Value::SubclassData field.
class ConstantExpr : public Constant {
  friend struct ConstantExprKeyType;
  friend class Constant;

  void destroyConstantImpl();
  Value *handleOperandChangeImpl(Value *From, Value *To);

protected:
  ConstantExpr(Type *ty, unsigned Opcode, Use *Ops, unsigned NumOps)
      : Constant(ty, ConstantExprVal, Ops, NumOps) {
    // Operation type (an Instruction opcode) is stored as the SubclassData.
    setValueSubclassData(Opcode);
  }

  ~ConstantExpr() = default;

public:
  // Static methods to construct a ConstantExpr of different kinds.  Note that
  // these methods may return a object that is not an instance of the
  // ConstantExpr class, because they will attempt to fold the constant
  // expression into something simpler if possible.

  /// getAlignOf constant expr - computes the alignment of a type in a target
  /// independent way (Note: the return type is an i64).
  static Constant *getAlignOf(Type *Ty);

  /// getSizeOf constant expr - computes the (alloc) size of a type (in
  /// address-units, not bits) in a target independent way (Note: the return
  /// type is an i64).
  ///
  static Constant *getSizeOf(Type *Ty);

  static Constant *getNeg(Constant *C, bool HasNUW = false,
                          bool HasNSW = false);
  static Constant *getNot(Constant *C);
  static Constant *getAdd(Constant *C1, Constant *C2, bool HasNUW = false,
                          bool HasNSW = false);
  static Constant *getSub(Constant *C1, Constant *C2, bool HasNUW = false,
                          bool HasNSW = false);
  static Constant *getMul(Constant *C1, Constant *C2, bool HasNUW = false,
                          bool HasNSW = false);
  static Constant *getAnd(Constant *C1, Constant *C2);
  static Constant *getOr(Constant *C1, Constant *C2);
  static Constant *getXor(Constant *C1, Constant *C2);
  static Constant *getShl(Constant *C1, Constant *C2, bool HasNUW = false,
                          bool HasNSW = false);
  static Constant *getLShr(Constant *C1, Constant *C2, bool isExact = false);
  static Constant *getAShr(Constant *C1, Constant *C2, bool isExact = false);
  static Constant *getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced = false);
  static Constant *getSExt(Constant *C, Type *Ty, bool OnlyIfReduced = false);
  static Constant *getZExt(Constant *C, Type *Ty, bool OnlyIfReduced = false);
  static Constant *getFPTrunc(Constant *C, Type *Ty,
                              bool OnlyIfReduced = false);
  static Constant *getFPExtend(Constant *C, Type *Ty,
                               bool OnlyIfReduced = false);
  static Constant *getUIToFP(Constant *C, Type *Ty, bool OnlyIfReduced = false);
  static Constant *getSIToFP(Constant *C, Type *Ty, bool OnlyIfReduced = false);
  static Constant *getFPToUI(Constant *C, Type *Ty, bool OnlyIfReduced = false);
  static Constant *getFPToSI(Constant *C, Type *Ty, bool OnlyIfReduced = false);
  static Constant *getPtrToInt(Constant *C, Type *Ty,
                               bool OnlyIfReduced = false);
  static Constant *getIntToPtr(Constant *C, Type *Ty,
                               bool OnlyIfReduced = false);
  static Constant *getBitCast(Constant *C, Type *Ty,
                              bool OnlyIfReduced = false);
  static Constant *getAddrSpaceCast(Constant *C, Type *Ty,
                                    bool OnlyIfReduced = false);

  static Constant *getNSWNeg(Constant *C) { return getNeg(C, false, true); }
  static Constant *getNUWNeg(Constant *C) { return getNeg(C, true, false); }

  static Constant *getNSWAdd(Constant *C1, Constant *C2) {
    return getAdd(C1, C2, false, true);
  }

  static Constant *getNUWAdd(Constant *C1, Constant *C2) {
    return getAdd(C1, C2, true, false);
  }

  static Constant *getNSWSub(Constant *C1, Constant *C2) {
    return getSub(C1, C2, false, true);
  }

  static Constant *getNUWSub(Constant *C1, Constant *C2) {
    return getSub(C1, C2, true, false);
  }

  static Constant *getNSWMul(Constant *C1, Constant *C2) {
    return getMul(C1, C2, false, true);
  }

  static Constant *getNUWMul(Constant *C1, Constant *C2) {
    return getMul(C1, C2, true, false);
  }

  static Constant *getNSWShl(Constant *C1, Constant *C2) {
    return getShl(C1, C2, false, true);
  }

  static Constant *getNUWShl(Constant *C1, Constant *C2) {
    return getShl(C1, C2, true, false);
  }

  static Constant *getExactAShr(Constant *C1, Constant *C2) {
    return getAShr(C1, C2, true);
  }

  static Constant *getExactLShr(Constant *C1, Constant *C2) {
    return getLShr(C1, C2, true);
  }

  /// If C is a scalar/fixed width vector of known powers of 2, then this
  /// function returns a new scalar/fixed width vector obtained from logBase2
  /// of C. Undef vector elements are set to zero.
  /// Return a null pointer otherwise.
  static Constant *getExactLogBase2(Constant *C);

  /// Return the identity constant for a binary opcode.
  /// The identity constant C is defined as X op C = X and C op X = X for every
  /// X when the binary operation is commutative. If the binop is not
  /// commutative, callers can acquire the operand 1 identity constant by
  /// setting AllowRHSConstant to true. For example, any shift has a zero
  /// identity constant for operand 1: X shift 0 = X.
  /// If this is a fadd/fsub operation and we don't care about signed zeros,
  /// then setting NSZ to true returns the identity +0.0 instead of -0.0.
  /// Return nullptr if the operator does not have an identity constant.
  static Constant *getBinOpIdentity(unsigned Opcode, Type *Ty,
                                    bool AllowRHSConstant = false,
                                    bool NSZ = false);

  /// Return the absorbing element for the given binary
  /// operation, i.e. a constant C such that X op C = C and C op X = C for
  /// every X.  For example, this returns zero for integer multiplication.
  /// It returns null if the operator doesn't have an absorbing element.
  static Constant *getBinOpAbsorber(unsigned Opcode, Type *Ty);

  /// Transparently provide more efficient getOperand methods.
  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Constant);

  /// Convenience function for getting a Cast operation.
  ///
  /// \param ops The opcode for the conversion
  /// \param C  The constant to be converted
  /// \param Ty The type to which the constant is converted
  /// \param OnlyIfReduced see \a getWithOperands() docs.
  static Constant *getCast(unsigned ops, Constant *C, Type *Ty,
                           bool OnlyIfReduced = false);

  // Create a ZExt or BitCast cast constant expression
  static Constant *
  getZExtOrBitCast(Constant *C, ///< The constant to zext or bitcast
                   Type *Ty     ///< The type to zext or bitcast C to
  );

  // Create a SExt or BitCast cast constant expression
  static Constant *
  getSExtOrBitCast(Constant *C, ///< The constant to sext or bitcast
                   Type *Ty     ///< The type to sext or bitcast C to
  );

  // Create a Trunc or BitCast cast constant expression
  static Constant *
  getTruncOrBitCast(Constant *C, ///< The constant to trunc or bitcast
                    Type *Ty     ///< The type to trunc or bitcast C to
  );

  /// Create either an sext, trunc or nothing, depending on whether Ty is
  /// wider, narrower or the same as C->getType(). This only works with
  /// integer or vector of integer types.
  static Constant *getSExtOrTrunc(Constant *C, Type *Ty);

  /// Create a BitCast, AddrSpaceCast, or a PtrToInt cast constant
  /// expression.
  static Constant *
  getPointerCast(Constant *C, ///< The pointer value to be casted (operand 0)
                 Type *Ty     ///< The type to which cast should be made
  );

  /// Create a BitCast or AddrSpaceCast for a pointer type depending on
  /// the address space.
  static Constant *getPointerBitCastOrAddrSpaceCast(
      Constant *C, ///< The constant to addrspacecast or bitcast
      Type *Ty     ///< The type to bitcast or addrspacecast C to
  );

  /// Create a ZExt, Bitcast or Trunc for integer -> integer casts
  static Constant *
  getIntegerCast(Constant *C,  ///< The integer constant to be casted
                 Type *Ty,     ///< The integer type to cast to
                 bool IsSigned ///< Whether C should be treated as signed or not
  );

  /// Create a FPExt, Bitcast or FPTrunc for fp -> fp casts
  static Constant *getFPCast(Constant *C, ///< The integer constant to be casted
                             Type *Ty     ///< The integer type to cast to
  );

  /// Return true if this is a convert constant expression
  bool isCast() const;

  /// Return true if this is a compare constant expression
  bool isCompare() const;

  /// get - Return a binary or shift operator constant expression,
  /// folding if possible.
  ///
  /// \param OnlyIfReducedTy see \a getWithOperands() docs.
  static Constant *get(unsigned Opcode, Constant *C1, Constant *C2,
                       unsigned Flags = 0, Type *OnlyIfReducedTy = nullptr);

  /// Return an ICmp or FCmp comparison operator constant expression.
  ///
  /// \param OnlyIfReduced see \a getWithOperands() docs.
  static Constant *getCompare(unsigned short pred, Constant *C1, Constant *C2,
                              bool OnlyIfReduced = false);

  /// get* - Return some common constants without having to
  /// specify the full Instruction::OPCODE identifier.
  ///
  static Constant *getICmp(unsigned short pred, Constant *LHS, Constant *RHS,
                           bool OnlyIfReduced = false);
  static Constant *getFCmp(unsigned short pred, Constant *LHS, Constant *RHS,
                           bool OnlyIfReduced = false);

  /// Getelementptr form.  Value* is only accepted for convenience;
  /// all elements must be Constants.
  ///
  /// \param InRangeIndex the inrange index if present or std::nullopt.
  /// \param OnlyIfReducedTy see \a getWithOperands() docs.
  static Constant *
  getGetElementPtr(Type *Ty, Constant *C, ArrayRef<Constant *> IdxList,
                   bool InBounds = false,
                   std::optional<unsigned> InRangeIndex = std::nullopt,
                   Type *OnlyIfReducedTy = nullptr) {
    return getGetElementPtr(
        Ty, C, ArrayRef((Value *const *)IdxList.data(), IdxList.size()),
        InBounds, InRangeIndex, OnlyIfReducedTy);
  }
  static Constant *
  getGetElementPtr(Type *Ty, Constant *C, Constant *Idx, bool InBounds = false,
                   std::optional<unsigned> InRangeIndex = std::nullopt,
                   Type *OnlyIfReducedTy = nullptr) {
    // This form of the function only exists to avoid ambiguous overload
    // warnings about whether to convert Idx to ArrayRef<Constant *> or
    // ArrayRef<Value *>.
    return getGetElementPtr(Ty, C, cast<Value>(Idx), InBounds, InRangeIndex,
                            OnlyIfReducedTy);
  }
  static Constant *
  getGetElementPtr(Type *Ty, Constant *C, ArrayRef<Value *> IdxList,
                   bool InBounds = false,
                   std::optional<unsigned> InRangeIndex = std::nullopt,
                   Type *OnlyIfReducedTy = nullptr);

  /// Create an "inbounds" getelementptr. See the documentation for the
  /// "inbounds" flag in LangRef.html for details.
  static Constant *getInBoundsGetElementPtr(Type *Ty, Constant *C,
                                            ArrayRef<Constant *> IdxList) {
    return getGetElementPtr(Ty, C, IdxList, true);
  }
  static Constant *getInBoundsGetElementPtr(Type *Ty, Constant *C,
                                            Constant *Idx) {
    // This form of the function only exists to avoid ambiguous overload
    // warnings about whether to convert Idx to ArrayRef<Constant *> or
    // ArrayRef<Value *>.
    return getGetElementPtr(Ty, C, Idx, true);
  }
  static Constant *getInBoundsGetElementPtr(Type *Ty, Constant *C,
                                            ArrayRef<Value *> IdxList) {
    return getGetElementPtr(Ty, C, IdxList, true);
  }

  static Constant *getExtractElement(Constant *Vec, Constant *Idx,
                                     Type *OnlyIfReducedTy = nullptr);
  static Constant *getInsertElement(Constant *Vec, Constant *Elt, Constant *Idx,
                                    Type *OnlyIfReducedTy = nullptr);
  static Constant *getShuffleVector(Constant *V1, Constant *V2,
                                    ArrayRef<int> Mask,
                                    Type *OnlyIfReducedTy = nullptr);

  /// Return the opcode at the root of this constant expression
  unsigned getOpcode() const { return getSubclassDataFromValue(); }

  /// Return the ICMP or FCMP predicate value. Assert if this is not an ICMP or
  /// FCMP constant expression.
  unsigned getPredicate() const;

  /// Assert that this is a shufflevector and return the mask. See class
  /// ShuffleVectorInst for a description of the mask representation.
  ArrayRef<int> getShuffleMask() const;

  /// Assert that this is a shufflevector and return the mask.
  ///
  /// TODO: This is a temporary hack until we update the bitcode format for
  /// shufflevector.
  Constant *getShuffleMaskForBitcode() const;

  /// Return a string representation for an opcode.
  const char *getOpcodeName() const;

  /// This returns the current constant expression with the operands replaced
  /// with the specified values. The specified array must have the same number
  /// of operands as our current one.
  Constant *getWithOperands(ArrayRef<Constant *> Ops) const {
    return getWithOperands(Ops, getType());
  }

  /// Get the current expression with the operands replaced.
  ///
  /// Return the current constant expression with the operands replaced with \c
  /// Ops and the type with \c Ty.  The new operands must have the same number
  /// as the current ones.
  ///
  /// If \c OnlyIfReduced is \c true, nullptr will be returned unless something
  /// gets constant-folded, the type changes, or the expression is otherwise
  /// canonicalized.  This parameter should almost always be \c false.
  Constant *getWithOperands(ArrayRef<Constant *> Ops, Type *Ty,
                            bool OnlyIfReduced = false,
                            Type *SrcTy = nullptr) const;

  /// Returns an Instruction which implements the same operation as this
  /// ConstantExpr. If \p InsertBefore is not null, the new instruction is
  /// inserted before it, otherwise it is not inserted into any basic block.
  ///
  /// A better approach to this could be to have a constructor for Instruction
  /// which would take a ConstantExpr parameter, but that would have spread
  /// implementation details of ConstantExpr outside of Constants.cpp, which
  /// would make it harder to remove ConstantExprs altogether.
  Instruction *getAsInstruction(Instruction *InsertBefore = nullptr) const;

  /// Whether creating a constant expression for this binary operator is
  /// desirable.
  static bool isDesirableBinOp(unsigned Opcode);

  /// Whether creating a constant expression for this binary operator is
  /// supported.
  static bool isSupportedBinOp(unsigned Opcode);

  /// Whether creating a constant expression for this getelementptr type is
  /// supported.
  static bool isSupportedGetElementPtr(const Type *SrcElemTy) {
    return !SrcElemTy->isScalableTy();
  }

  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Value *V) {
    return V->getValueID() == ConstantExprVal;
  }

private:
  // Shadow Value::setValueSubclassData with a private forwarding method so that
  // subclasses cannot accidentally use it.
  void setValueSubclassData(unsigned short D) {
    Value::setValueSubclassData(D);
  }
};

template <>
struct OperandTraits<ConstantExpr>
    : public VariadicOperandTraits<ConstantExpr, 1> {};

DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ConstantExpr, Constant)

//===----------------------------------------------------------------------===//
/// 'undef' values are things that do not have specified contents.
/// These are used for a variety of purposes, including global variable
/// initializers and operands to instructions.  'undef' values can occur with
/// any first-class type.
///
/// Undef values aren't exactly constants; if they have multiple uses, they
/// can appear to have different bit patterns at each use. See
/// LangRef.html#undefvalues for details.
///
class UndefValue : public ConstantData {
  friend class Constant;

  explicit UndefValue(Type *T) : ConstantData(T, UndefValueVal) {}

  void destroyConstantImpl();

protected:
  explicit UndefValue(Type *T, ValueTy vty) : ConstantData(T, vty) {}

public:
  UndefValue(const UndefValue &) = delete;

  /// Static factory methods - Return an 'undef' object of the specified type.
  static UndefValue *get(Type *T);

  /// If this Undef has array or vector type, return a undef with the right
  /// element type.
  UndefValue *getSequentialElement() const;

  /// If this undef has struct type, return a undef with the right element type
  /// for the specified element.
  UndefValue *getStructElement(unsigned Elt) const;

  /// Return an undef of the right value for the specified GEP index if we can,
  /// otherwise return null (e.g. if C is a ConstantExpr).
  UndefValue *getElementValue(Constant *C) const;

  /// Return an undef of the right value for the specified GEP index.
  UndefValue *getElementValue(unsigned Idx) const;

  /// Return the number of elements in the array, vector, or struct.
  unsigned getNumElements() const;

  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Value *V) {
    return V->getValueID() == UndefValueVal ||
           V->getValueID() == PoisonValueVal;
  }
};

//===----------------------------------------------------------------------===//
/// In order to facilitate speculative execution, many instructions do not
/// invoke immediate undefined behavior when provided with illegal operands,
/// and return a poison value instead.
///
/// see LangRef.html#poisonvalues for details.
///
class PoisonValue final : public UndefValue {
  friend class Constant;

  explicit PoisonValue(Type *T) : UndefValue(T, PoisonValueVal) {}

  void destroyConstantImpl();

public:
  PoisonValue(const PoisonValue &) = delete;

  /// Static factory methods - Return an 'poison' object of the specified type.
  static PoisonValue *get(Type *T);

  /// If this poison has array or vector type, return a poison with the right
  /// element type.
  PoisonValue *getSequentialElement() const;

  /// If this poison has struct type, return a poison with the right element
  /// type for the specified element.
  PoisonValue *getStructElement(unsigned Elt) const;

  /// Return an poison of the right value for the specified GEP index if we can,
  /// otherwise return null (e.g. if C is a ConstantExpr).
  PoisonValue *getElementValue(Constant *C) const;

  /// Return an poison of the right value for the specified GEP index.
  PoisonValue *getElementValue(unsigned Idx) const;

  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Value *V) {
    return V->getValueID() == PoisonValueVal;
  }
};

} // end namespace llvm

#endif // LLVM_IR_CONSTANTS_H
PKjwFZS�-�q�q�
IR/Value.hnu�[���//===- llvm/Value.h - Definition of the Value class -------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the Value class.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_VALUE_H
#define LLVM_IR_VALUE_H

#include "llvm-c/Types.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/IR/Use.h"
#include "llvm/Support/Alignment.h"
#include "llvm/Support/CBindingWrapping.h"
#include "llvm/Support/Casting.h"
#include <cassert>
#include <iterator>
#include <memory>

namespace llvm {

class APInt;
class Argument;
class BasicBlock;
class Constant;
class ConstantData;
class ConstantAggregate;
class DataLayout;
class Function;
class GlobalAlias;
class GlobalIFunc;
class GlobalObject;
class GlobalValue;
class GlobalVariable;
class InlineAsm;
class Instruction;
class LLVMContext;
class MDNode;
class Module;
class ModuleSlotTracker;
class raw_ostream;
template<typename ValueTy> class StringMapEntry;
class Twine;
class Type;
class User;

using ValueName = StringMapEntry<Value *>;

//===----------------------------------------------------------------------===//
//                                 Value Class
//===----------------------------------------------------------------------===//

/// LLVM Value Representation
///
/// This is a very important LLVM class. It is the base class of all values
/// computed by a program that may be used as operands to other values. Value is
/// the super class of other important classes such as Instruction and Function.
/// All Values have a Type. Type is not a subclass of Value. Some values can
/// have a name and they belong to some Module.  Setting the name on the Value
/// automatically updates the module's symbol table.
///
/// Every value has a "use list" that keeps track of which other Values are
/// using this Value.  A Value can also have an arbitrary number of ValueHandle
/// objects that watch it and listen to RAUW and Destroy events.  See
/// llvm/IR/ValueHandle.h for details.
class Value {
  Type *VTy;
  Use *UseList;

  friend class ValueAsMetadata; // Allow access to IsUsedByMD.
  friend class ValueHandleBase;

  const unsigned char SubclassID;   // Subclass identifier (for isa/dyn_cast)
  unsigned char HasValueHandle : 1; // Has a ValueHandle pointing to this?

protected:
  /// Hold subclass data that can be dropped.
  ///
  /// This member is similar to SubclassData, however it is for holding
  /// information which may be used to aid optimization, but which may be
  /// cleared to zero without affecting conservative interpretation.
  unsigned char SubclassOptionalData : 7;

private:
  /// Hold arbitrary subclass data.
  ///
  /// This member is defined by this class, but is not used for anything.
  /// Subclasses can use it to hold whatever state they find useful.  This
  /// field is initialized to zero by the ctor.
  unsigned short SubclassData;

protected:
  /// The number of operands in the subclass.
  ///
  /// This member is defined by this class, but not used for anything.
  /// Subclasses can use it to store their number of operands, if they have
  /// any.
  ///
  /// This is stored here to save space in User on 64-bit hosts.  Since most
  /// instances of Value have operands, 32-bit hosts aren't significantly
  /// affected.
  ///
  /// Note, this should *NOT* be used directly by any class other than User.
  /// User uses this value to find the Use list.
  enum : unsigned { NumUserOperandsBits = 27 };
  unsigned NumUserOperands : NumUserOperandsBits;

  // Use the same type as the bitfield above so that MSVC will pack them.
  unsigned IsUsedByMD : 1;
  unsigned HasName : 1;
  unsigned HasMetadata : 1; // Has metadata attached to this?
  unsigned HasHungOffUses : 1;
  unsigned HasDescriptor : 1;

private:
  template <typename UseT> // UseT == 'Use' or 'const Use'
  class use_iterator_impl {
    friend class Value;

    UseT *U;

    explicit use_iterator_impl(UseT *u) : U(u) {}

  public:
    using iterator_category = std::forward_iterator_tag;
    using value_type = UseT *;
    using difference_type = std::ptrdiff_t;
    using pointer = value_type *;
    using reference = value_type &;

    use_iterator_impl() : U() {}

    bool operator==(const use_iterator_impl &x) const { return U == x.U; }
    bool operator!=(const use_iterator_impl &x) const { return !operator==(x); }

    use_iterator_impl &operator++() { // Preincrement
      assert(U && "Cannot increment end iterator!");
      U = U->getNext();
      return *this;
    }

    use_iterator_impl operator++(int) { // Postincrement
      auto tmp = *this;
      ++*this;
      return tmp;
    }

    UseT &operator*() const {
      assert(U && "Cannot dereference end iterator!");
      return *U;
    }

    UseT *operator->() const { return &operator*(); }

    operator use_iterator_impl<const UseT>() const {
      return use_iterator_impl<const UseT>(U);
    }
  };

  template <typename UserTy> // UserTy == 'User' or 'const User'
  class user_iterator_impl {
    use_iterator_impl<Use> UI;
    explicit user_iterator_impl(Use *U) : UI(U) {}
    friend class Value;

  public:
    using iterator_category = std::forward_iterator_tag;
    using value_type = UserTy *;
    using difference_type = std::ptrdiff_t;
    using pointer = value_type *;
    using reference = value_type &;

    user_iterator_impl() = default;

    bool operator==(const user_iterator_impl &x) const { return UI == x.UI; }
    bool operator!=(const user_iterator_impl &x) const { return !operator==(x); }

    /// Returns true if this iterator is equal to user_end() on the value.
    bool atEnd() const { return *this == user_iterator_impl(); }

    user_iterator_impl &operator++() { // Preincrement
      ++UI;
      return *this;
    }

    user_iterator_impl operator++(int) { // Postincrement
      auto tmp = *this;
      ++*this;
      return tmp;
    }

    // Retrieve a pointer to the current User.
    UserTy *operator*() const {
      return UI->getUser();
    }

    UserTy *operator->() const { return operator*(); }

    operator user_iterator_impl<const UserTy>() const {
      return user_iterator_impl<const UserTy>(*UI);
    }

    Use &getUse() const { return *UI; }
  };

protected:
  Value(Type *Ty, unsigned scid);

  /// Value's destructor should be virtual by design, but that would require
  /// that Value and all of its subclasses have a vtable that effectively
  /// duplicates the information in the value ID. As a size optimization, the
  /// destructor has been protected, and the caller should manually call
  /// deleteValue.
  ~Value(); // Use deleteValue() to delete a generic Value.

public:
  Value(const Value &) = delete;
  Value &operator=(const Value &) = delete;

  /// Delete a pointer to a generic Value.
  void deleteValue();

  /// Support for debugging, callable in GDB: V->dump()
  void dump() const;

  /// Implement operator<< on Value.
  /// @{
  void print(raw_ostream &O, bool IsForDebug = false) const;
  void print(raw_ostream &O, ModuleSlotTracker &MST,
             bool IsForDebug = false) const;
  /// @}

  /// Print the name of this Value out to the specified raw_ostream.
  ///
  /// This is useful when you just want to print 'int %reg126', not the
  /// instruction that generated it. If you specify a Module for context, then
  /// even constanst get pretty-printed; for example, the type of a null
  /// pointer is printed symbolically.
  /// @{
  void printAsOperand(raw_ostream &O, bool PrintType = true,
                      const Module *M = nullptr) const;
  void printAsOperand(raw_ostream &O, bool PrintType,
                      ModuleSlotTracker &MST) const;
  /// @}

  /// All values are typed, get the type of this value.
  Type *getType() const { return VTy; }

  /// All values hold a context through their type.
  LLVMContext &getContext() const;

  // All values can potentially be named.
  bool hasName() const { return HasName; }
  ValueName *getValueName() const;
  void setValueName(ValueName *VN);

private:
  void destroyValueName();
  enum class ReplaceMetadataUses { No, Yes };
  void doRAUW(Value *New, ReplaceMetadataUses);
  void setNameImpl(const Twine &Name);

public:
  /// Return a constant reference to the value's name.
  ///
  /// This guaranteed to return the same reference as long as the value is not
  /// modified.  If the value has a name, this does a hashtable lookup, so it's
  /// not free.
  StringRef getName() const;

  /// Change the name of the value.
  ///
  /// Choose a new unique name if the provided name is taken.
  ///
  /// \param Name The new name; or "" if the value's name should be removed.
  void setName(const Twine &Name);

  /// Transfer the name from V to this value.
  ///
  /// After taking V's name, sets V's name to empty.
  ///
  /// \note It is an error to call V->takeName(V).
  void takeName(Value *V);

#ifndef NDEBUG
  std::string getNameOrAsOperand() const;
#endif

  /// Change all uses of this to point to a new Value.
  ///
  /// Go through the uses list for this definition and make each use point to
  /// "V" instead of "this".  After this completes, 'this's use list is
  /// guaranteed to be empty.
  void replaceAllUsesWith(Value *V);

  /// Change non-metadata uses of this to point to a new Value.
  ///
  /// Go through the uses list for this definition and make each use point to
  /// "V" instead of "this". This function skips metadata entries in the list.
  void replaceNonMetadataUsesWith(Value *V);

  /// Go through the uses list for this definition and make each use point
  /// to "V" if the callback ShouldReplace returns true for the given Use.
  /// Unlike replaceAllUsesWith() this function does not support basic block
  /// values.
  void replaceUsesWithIf(Value *New,
                         llvm::function_ref<bool(Use &U)> ShouldReplace);

  /// replaceUsesOutsideBlock - Go through the uses list for this definition and
  /// make each use point to "V" instead of "this" when the use is outside the
  /// block. 'This's use list is expected to have at least one element.
  /// Unlike replaceAllUsesWith() this function does not support basic block
  /// values.
  void replaceUsesOutsideBlock(Value *V, BasicBlock *BB);

  //----------------------------------------------------------------------
  // Methods for handling the chain of uses of this Value.
  //
  // Materializing a function can introduce new uses, so these methods come in
  // two variants:
  // The methods that start with materialized_ check the uses that are
  // currently known given which functions are materialized. Be very careful
  // when using them since you might not get all uses.
  // The methods that don't start with materialized_ assert that modules is
  // fully materialized.
  void assertModuleIsMaterializedImpl() const;
  // This indirection exists so we can keep assertModuleIsMaterializedImpl()
  // around in release builds of Value.cpp to be linked with other code built
  // in debug mode. But this avoids calling it in any of the release built code.
  void assertModuleIsMaterialized() const {
#ifndef NDEBUG
    assertModuleIsMaterializedImpl();
#endif
  }

  bool use_empty() const {
    assertModuleIsMaterialized();
    return UseList == nullptr;
  }

  bool materialized_use_empty() const {
    return UseList == nullptr;
  }

  using use_iterator = use_iterator_impl<Use>;
  using const_use_iterator = use_iterator_impl<const Use>;

  use_iterator materialized_use_begin() { return use_iterator(UseList); }
  const_use_iterator materialized_use_begin() const {
    return const_use_iterator(UseList);
  }
  use_iterator use_begin() {
    assertModuleIsMaterialized();
    return materialized_use_begin();
  }
  const_use_iterator use_begin() const {
    assertModuleIsMaterialized();
    return materialized_use_begin();
  }
  use_iterator use_end() { return use_iterator(); }
  const_use_iterator use_end() const { return const_use_iterator(); }
  iterator_range<use_iterator> materialized_uses() {
    return make_range(materialized_use_begin(), use_end());
  }
  iterator_range<const_use_iterator> materialized_uses() const {
    return make_range(materialized_use_begin(), use_end());
  }
  iterator_range<use_iterator> uses() {
    assertModuleIsMaterialized();
    return materialized_uses();
  }
  iterator_range<const_use_iterator> uses() const {
    assertModuleIsMaterialized();
    return materialized_uses();
  }

  bool user_empty() const {
    assertModuleIsMaterialized();
    return UseList == nullptr;
  }

  using user_iterator = user_iterator_impl<User>;
  using const_user_iterator = user_iterator_impl<const User>;

  user_iterator materialized_user_begin() { return user_iterator(UseList); }
  const_user_iterator materialized_user_begin() const {
    return const_user_iterator(UseList);
  }
  user_iterator user_begin() {
    assertModuleIsMaterialized();
    return materialized_user_begin();
  }
  const_user_iterator user_begin() const {
    assertModuleIsMaterialized();
    return materialized_user_begin();
  }
  user_iterator user_end() { return user_iterator(); }
  const_user_iterator user_end() const { return const_user_iterator(); }
  User *user_back() {
    assertModuleIsMaterialized();
    return *materialized_user_begin();
  }
  const User *user_back() const {
    assertModuleIsMaterialized();
    return *materialized_user_begin();
  }
  iterator_range<user_iterator> materialized_users() {
    return make_range(materialized_user_begin(), user_end());
  }
  iterator_range<const_user_iterator> materialized_users() const {
    return make_range(materialized_user_begin(), user_end());
  }
  iterator_range<user_iterator> users() {
    assertModuleIsMaterialized();
    return materialized_users();
  }
  iterator_range<const_user_iterator> users() const {
    assertModuleIsMaterialized();
    return materialized_users();
  }

  /// Return true if there is exactly one use of this value.
  ///
  /// This is specialized because it is a common request and does not require
  /// traversing the whole use list.
  bool hasOneUse() const { return hasSingleElement(uses()); }

  /// Return true if this Value has exactly N uses.
  bool hasNUses(unsigned N) const;

  /// Return true if this value has N uses or more.
  ///
  /// This is logically equivalent to getNumUses() >= N.
  bool hasNUsesOrMore(unsigned N) const;

  /// Return true if there is exactly one user of this value.
  ///
  /// Note that this is not the same as "has one use". If a value has one use,
  /// then there certainly is a single user. But if value has several uses,
  /// it is possible that all uses are in a single user, or not.
  ///
  /// This check is potentially costly, since it requires traversing,
  /// in the worst case, the whole use list of a value.
  bool hasOneUser() const;

  /// Return true if there is exactly one use of this value that cannot be
  /// dropped.
  Use *getSingleUndroppableUse();
  const Use *getSingleUndroppableUse() const {
    return const_cast<Value *>(this)->getSingleUndroppableUse();
  }

  /// Return true if there is exactly one unique user of this value that cannot be
  /// dropped (that user can have multiple uses of this value).
  User *getUniqueUndroppableUser();
  const User *getUniqueUndroppableUser() const {
    return const_cast<Value *>(this)->getUniqueUndroppableUser();
  }

  /// Return true if there this value.
  ///
  /// This is specialized because it is a common request and does not require
  /// traversing the whole use list.
  bool hasNUndroppableUses(unsigned N) const;

  /// Return true if this value has N uses or more.
  ///
  /// This is logically equivalent to getNumUses() >= N.
  bool hasNUndroppableUsesOrMore(unsigned N) const;

  /// Remove every uses that can safely be removed.
  ///
  /// This will remove for example uses in llvm.assume.
  /// This should be used when performing want to perform a tranformation but
  /// some Droppable uses pervent it.
  /// This function optionally takes a filter to only remove some droppable
  /// uses.
  void dropDroppableUses(llvm::function_ref<bool(const Use *)> ShouldDrop =
                             [](const Use *) { return true; });

  /// Remove every use of this value in \p User that can safely be removed.
  void dropDroppableUsesIn(User &Usr);

  /// Remove the droppable use \p U.
  static void dropDroppableUse(Use &U);

  /// Check if this value is used in the specified basic block.
  bool isUsedInBasicBlock(const BasicBlock *BB) const;

  /// This method computes the number of uses of this Value.
  ///
  /// This is a linear time operation.  Use hasOneUse, hasNUses, or
  /// hasNUsesOrMore to check for specific values.
  unsigned getNumUses() const;

  /// This method should only be used by the Use class.
  void addUse(Use &U) { U.addToList(&UseList); }

  /// Concrete subclass of this.
  ///
  /// An enumeration for keeping track of the concrete subclass of Value that
  /// is actually instantiated. Values of this enumeration are kept in the
  /// Value classes SubclassID field. They are used for concrete type
  /// identification.
  enum ValueTy {
#define HANDLE_VALUE(Name) Name##Val,
#include "llvm/IR/Value.def"

    // Markers:
#define HANDLE_CONSTANT_MARKER(Marker, Constant) Marker = Constant##Val,
#include "llvm/IR/Value.def"
  };

  /// Return an ID for the concrete type of this object.
  ///
  /// This is used to implement the classof checks.  This should not be used
  /// for any other purpose, as the values may change as LLVM evolves.  Also,
  /// note that for instructions, the Instruction's opcode is added to
  /// InstructionVal. So this means three things:
  /// # there is no value with code InstructionVal (no opcode==0).
  /// # there are more possible values for the value type than in ValueTy enum.
  /// # the InstructionVal enumerator must be the highest valued enumerator in
  ///   the ValueTy enum.
  unsigned getValueID() const {
    return SubclassID;
  }

  /// Return the raw optional flags value contained in this value.
  ///
  /// This should only be used when testing two Values for equivalence.
  unsigned getRawSubclassOptionalData() const {
    return SubclassOptionalData;
  }

  /// Clear the optional flags contained in this value.
  void clearSubclassOptionalData() {
    SubclassOptionalData = 0;
  }

  /// Check the optional flags for equality.
  bool hasSameSubclassOptionalData(const Value *V) const {
    return SubclassOptionalData == V->SubclassOptionalData;
  }

  /// Return true if there is a value handle associated with this value.
  bool hasValueHandle() const { return HasValueHandle; }

  /// Return true if there is metadata referencing this value.
  bool isUsedByMetadata() const { return IsUsedByMD; }

protected:
  /// Get the current metadata attachments for the given kind, if any.
  ///
  /// These functions require that the value have at most a single attachment
  /// of the given kind, and return \c nullptr if such an attachment is missing.
  /// @{
  MDNode *getMetadata(unsigned KindID) const;
  MDNode *getMetadata(StringRef Kind) const;
  /// @}

  /// Appends all attachments with the given ID to \c MDs in insertion order.
  /// If the Value has no attachments with the given ID, or if ID is invalid,
  /// leaves MDs unchanged.
  /// @{
  void getMetadata(unsigned KindID, SmallVectorImpl<MDNode *> &MDs) const;
  void getMetadata(StringRef Kind, SmallVectorImpl<MDNode *> &MDs) const;
  /// @}

  /// Appends all metadata attached to this value to \c MDs, sorting by
  /// KindID. The first element of each pair returned is the KindID, the second
  /// element is the metadata value. Attachments with the same ID appear in
  /// insertion order.
  void
  getAllMetadata(SmallVectorImpl<std::pair<unsigned, MDNode *>> &MDs) const;

  /// Return true if this value has any metadata attached to it.
  bool hasMetadata() const { return (bool)HasMetadata; }

  /// Return true if this value has the given type of metadata attached.
  /// @{
  bool hasMetadata(unsigned KindID) const {
    return getMetadata(KindID) != nullptr;
  }
  bool hasMetadata(StringRef Kind) const {
    return getMetadata(Kind) != nullptr;
  }
  /// @}

  /// Set a particular kind of metadata attachment.
  ///
  /// Sets the given attachment to \c MD, erasing it if \c MD is \c nullptr or
  /// replacing it if it already exists.
  /// @{
  void setMetadata(unsigned KindID, MDNode *Node);
  void setMetadata(StringRef Kind, MDNode *Node);
  /// @}

  /// Add a metadata attachment.
  /// @{
  void addMetadata(unsigned KindID, MDNode &MD);
  void addMetadata(StringRef Kind, MDNode &MD);
  /// @}

  /// Erase all metadata attachments with the given kind.
  ///
  /// \returns true if any metadata was removed.
  bool eraseMetadata(unsigned KindID);

  /// Erase all metadata attached to this Value.
  void clearMetadata();

public:
  /// Return true if this value is a swifterror value.
  ///
  /// swifterror values can be either a function argument or an alloca with a
  /// swifterror attribute.
  bool isSwiftError() const;

  /// Strip off pointer casts, all-zero GEPs and address space casts.
  ///
  /// Returns the original uncasted value.  If this is called on a non-pointer
  /// value, it returns 'this'.
  const Value *stripPointerCasts() const;
  Value *stripPointerCasts() {
    return const_cast<Value *>(
        static_cast<const Value *>(this)->stripPointerCasts());
  }

  /// Strip off pointer casts, all-zero GEPs, address space casts, and aliases.
  ///
  /// Returns the original uncasted value.  If this is called on a non-pointer
  /// value, it returns 'this'.
  const Value *stripPointerCastsAndAliases() const;
  Value *stripPointerCastsAndAliases() {
    return const_cast<Value *>(
        static_cast<const Value *>(this)->stripPointerCastsAndAliases());
  }

  /// Strip off pointer casts, all-zero GEPs and address space casts
  /// but ensures the representation of the result stays the same.
  ///
  /// Returns the original uncasted value with the same representation. If this
  /// is called on a non-pointer value, it returns 'this'.
  const Value *stripPointerCastsSameRepresentation() const;
  Value *stripPointerCastsSameRepresentation() {
    return const_cast<Value *>(static_cast<const Value *>(this)
                                   ->stripPointerCastsSameRepresentation());
  }

  /// Strip off pointer casts, all-zero GEPs, single-argument phi nodes and
  /// invariant group info.
  ///
  /// Returns the original uncasted value.  If this is called on a non-pointer
  /// value, it returns 'this'. This function should be used only in
  /// Alias analysis.
  const Value *stripPointerCastsForAliasAnalysis() const;
  Value *stripPointerCastsForAliasAnalysis() {
    return const_cast<Value *>(static_cast<const Value *>(this)
                                   ->stripPointerCastsForAliasAnalysis());
  }

  /// Strip off pointer casts and all-constant inbounds GEPs.
  ///
  /// Returns the original pointer value.  If this is called on a non-pointer
  /// value, it returns 'this'.
  const Value *stripInBoundsConstantOffsets() const;
  Value *stripInBoundsConstantOffsets() {
    return const_cast<Value *>(
              static_cast<const Value *>(this)->stripInBoundsConstantOffsets());
  }

  /// Accumulate the constant offset this value has compared to a base pointer.
  /// Only 'getelementptr' instructions (GEPs) are accumulated but other
  /// instructions, e.g., casts, are stripped away as well.
  /// The accumulated constant offset is added to \p Offset and the base
  /// pointer is returned.
  ///
  /// The APInt \p Offset has to have a bit-width equal to the IntPtr type for
  /// the address space of 'this' pointer value, e.g., use
  /// DataLayout::getIndexTypeSizeInBits(Ty).
  ///
  /// If \p AllowNonInbounds is true, offsets in GEPs are stripped and
  /// accumulated even if the GEP is not "inbounds".
  ///
  /// If \p AllowInvariantGroup is true then this method also looks through
  /// strip.invariant.group and launder.invariant.group intrinsics.
  ///
  /// If \p ExternalAnalysis is provided it will be used to calculate a offset
  /// when a operand of GEP is not constant.
  /// For example, for a value \p ExternalAnalysis might try to calculate a
  /// lower bound. If \p ExternalAnalysis is successful, it should return true.
  ///
  /// If this is called on a non-pointer value, it returns 'this' and the
  /// \p Offset is not modified.
  ///
  /// Note that this function will never return a nullptr. It will also never
  /// manipulate the \p Offset in a way that would not match the difference
  /// between the underlying value and the returned one. Thus, if no constant
  /// offset was found, the returned value is the underlying one and \p Offset
  /// is unchanged.
  const Value *stripAndAccumulateConstantOffsets(
      const DataLayout &DL, APInt &Offset, bool AllowNonInbounds,
      bool AllowInvariantGroup = false,
      function_ref<bool(Value &Value, APInt &Offset)> ExternalAnalysis =
          nullptr) const;
  Value *stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset,
                                           bool AllowNonInbounds,
                                           bool AllowInvariantGroup = false) {
    return const_cast<Value *>(
        static_cast<const Value *>(this)->stripAndAccumulateConstantOffsets(
            DL, Offset, AllowNonInbounds, AllowInvariantGroup));
  }

  /// This is a wrapper around stripAndAccumulateConstantOffsets with the
  /// in-bounds requirement set to false.
  const Value *stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL,
                                                         APInt &Offset) const {
    return stripAndAccumulateConstantOffsets(DL, Offset,
                                             /* AllowNonInbounds */ false);
  }
  Value *stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL,
                                                   APInt &Offset) {
    return stripAndAccumulateConstantOffsets(DL, Offset,
                                             /* AllowNonInbounds */ false);
  }

  /// Strip off pointer casts and inbounds GEPs.
  ///
  /// Returns the original pointer value.  If this is called on a non-pointer
  /// value, it returns 'this'.
  const Value *stripInBoundsOffsets(function_ref<void(const Value *)> Func =
                                        [](const Value *) {}) const;
  inline Value *stripInBoundsOffsets(function_ref<void(const Value *)> Func =
                                  [](const Value *) {}) {
    return const_cast<Value *>(
        static_cast<const Value *>(this)->stripInBoundsOffsets(Func));
  }

  /// If this ptr is provably equal to \p Other plus a constant offset, return
  /// that offset in bytes. Essentially `ptr this` subtract `ptr Other`.
  std::optional<int64_t> getPointerOffsetFrom(const Value *Other,
                                              const DataLayout &DL) const;

  /// Return true if the memory object referred to by V can by freed in the
  /// scope for which the SSA value defining the allocation is statically
  /// defined.  E.g.  deallocation after the static scope of a value does not
  /// count, but a deallocation before that does.
  bool canBeFreed() const;

  /// Returns the number of bytes known to be dereferenceable for the
  /// pointer value.
  ///
  /// If CanBeNull is set by this function the pointer can either be null or be
  /// dereferenceable up to the returned number of bytes.
  ///
  /// IF CanBeFreed is true, the pointer is known to be dereferenceable at
  /// point of definition only.  Caller must prove that allocation is not
  /// deallocated between point of definition and use.
  uint64_t getPointerDereferenceableBytes(const DataLayout &DL,
                                          bool &CanBeNull,
                                          bool &CanBeFreed) const;

  /// Returns an alignment of the pointer value.
  ///
  /// Returns an alignment which is either specified explicitly, e.g. via
  /// align attribute of a function argument, or guaranteed by DataLayout.
  Align getPointerAlignment(const DataLayout &DL) const;

  /// Translate PHI node to its predecessor from the given basic block.
  ///
  /// If this value is a PHI node with CurBB as its parent, return the value in
  /// the PHI node corresponding to PredBB.  If not, return ourself.  This is
  /// useful if you want to know the value something has in a predecessor
  /// block.
  const Value *DoPHITranslation(const BasicBlock *CurBB,
                                const BasicBlock *PredBB) const;
  Value *DoPHITranslation(const BasicBlock *CurBB, const BasicBlock *PredBB) {
    return const_cast<Value *>(
             static_cast<const Value *>(this)->DoPHITranslation(CurBB, PredBB));
  }

  /// The maximum alignment for instructions.
  ///
  /// This is the greatest alignment value supported by load, store, and alloca
  /// instructions, and global values.
  static constexpr unsigned MaxAlignmentExponent = 32;
  static constexpr uint64_t MaximumAlignment = 1ULL << MaxAlignmentExponent;

  /// Mutate the type of this Value to be of the specified type.
  ///
  /// Note that this is an extremely dangerous operation which can create
  /// completely invalid IR very easily.  It is strongly recommended that you
  /// recreate IR objects with the right types instead of mutating them in
  /// place.
  void mutateType(Type *Ty) {
    VTy = Ty;
  }

  /// Sort the use-list.
  ///
  /// Sorts the Value's use-list by Cmp using a stable mergesort.  Cmp is
  /// expected to compare two \a Use references.
  template <class Compare> void sortUseList(Compare Cmp);

  /// Reverse the use-list.
  void reverseUseList();

private:
  /// Merge two lists together.
  ///
  /// Merges \c L and \c R using \c Cmp.  To enable stable sorts, always pushes
  /// "equal" items from L before items from R.
  ///
  /// \return the first element in the list.
  ///
  /// \note Completely ignores \a Use::Prev (doesn't read, doesn't update).
  template <class Compare>
  static Use *mergeUseLists(Use *L, Use *R, Compare Cmp) {
    Use *Merged;
    Use **Next = &Merged;

    while (true) {
      if (!L) {
        *Next = R;
        break;
      }
      if (!R) {
        *Next = L;
        break;
      }
      if (Cmp(*R, *L)) {
        *Next = R;
        Next = &R->Next;
        R = R->Next;
      } else {
        *Next = L;
        Next = &L->Next;
        L = L->Next;
      }
    }

    return Merged;
  }

protected:
  unsigned short getSubclassDataFromValue() const { return SubclassData; }
  void setValueSubclassData(unsigned short D) { SubclassData = D; }
};

struct ValueDeleter { void operator()(Value *V) { V->deleteValue(); } };

/// Use this instead of std::unique_ptr<Value> or std::unique_ptr<Instruction>.
/// Those don't work because Value and Instruction's destructors are protected,
/// aren't virtual, and won't destroy the complete object.
using unique_value = std::unique_ptr<Value, ValueDeleter>;

inline raw_ostream &operator<<(raw_ostream &OS, const Value &V) {
  V.print(OS);
  return OS;
}

void Use::set(Value *V) {
  if (Val) removeFromList();
  Val = V;
  if (V) V->addUse(*this);
}

Value *Use::operator=(Value *RHS) {
  set(RHS);
  return RHS;
}

const Use &Use::operator=(const Use &RHS) {
  set(RHS.Val);
  return *this;
}

template <class Compare> void Value::sortUseList(Compare Cmp) {
  if (!UseList || !UseList->Next)
    // No need to sort 0 or 1 uses.
    return;

  // Note: this function completely ignores Prev pointers until the end when
  // they're fixed en masse.

  // Create a binomial vector of sorted lists, visiting uses one at a time and
  // merging lists as necessary.
  const unsigned MaxSlots = 32;
  Use *Slots[MaxSlots];

  // Collect the first use, turning it into a single-item list.
  Use *Next = UseList->Next;
  UseList->Next = nullptr;
  unsigned NumSlots = 1;
  Slots[0] = UseList;

  // Collect all but the last use.
  while (Next->Next) {
    Use *Current = Next;
    Next = Current->Next;

    // Turn Current into a single-item list.
    Current->Next = nullptr;

    // Save Current in the first available slot, merging on collisions.
    unsigned I;
    for (I = 0; I < NumSlots; ++I) {
      if (!Slots[I])
        break;

      // Merge two lists, doubling the size of Current and emptying slot I.
      //
      // Since the uses in Slots[I] originally preceded those in Current, send
      // Slots[I] in as the left parameter to maintain a stable sort.
      Current = mergeUseLists(Slots[I], Current, Cmp);
      Slots[I] = nullptr;
    }
    // Check if this is a new slot.
    if (I == NumSlots) {
      ++NumSlots;
      assert(NumSlots <= MaxSlots && "Use list bigger than 2^32");
    }

    // Found an open slot.
    Slots[I] = Current;
  }

  // Merge all the lists together.
  assert(Next && "Expected one more Use");
  assert(!Next->Next && "Expected only one Use");
  UseList = Next;
  for (unsigned I = 0; I < NumSlots; ++I)
    if (Slots[I])
      // Since the uses in Slots[I] originally preceded those in UseList, send
      // Slots[I] in as the left parameter to maintain a stable sort.
      UseList = mergeUseLists(Slots[I], UseList, Cmp);

  // Fix the Prev pointers.
  for (Use *I = UseList, **Prev = &UseList; I; I = I->Next) {
    I->Prev = Prev;
    Prev = &I->Next;
  }
}

// isa - Provide some specializations of isa so that we don't have to include
// the subtype header files to test to see if the value is a subclass...
//
template <> struct isa_impl<Constant, Value> {
  static inline bool doit(const Value &Val) {
    static_assert(Value::ConstantFirstVal == 0, "Val.getValueID() >= Value::ConstantFirstVal");
    return Val.getValueID() <= Value::ConstantLastVal;
  }
};

template <> struct isa_impl<ConstantData, Value> {
  static inline bool doit(const Value &Val) {
    return Val.getValueID() >= Value::ConstantDataFirstVal &&
           Val.getValueID() <= Value::ConstantDataLastVal;
  }
};

template <> struct isa_impl<ConstantAggregate, Value> {
  static inline bool doit(const Value &Val) {
    return Val.getValueID() >= Value::ConstantAggregateFirstVal &&
           Val.getValueID() <= Value::ConstantAggregateLastVal;
  }
};

template <> struct isa_impl<Argument, Value> {
  static inline bool doit (const Value &Val) {
    return Val.getValueID() == Value::ArgumentVal;
  }
};

template <> struct isa_impl<InlineAsm, Value> {
  static inline bool doit(const Value &Val) {
    return Val.getValueID() == Value::InlineAsmVal;
  }
};

template <> struct isa_impl<Instruction, Value> {
  static inline bool doit(const Value &Val) {
    return Val.getValueID() >= Value::InstructionVal;
  }
};

template <> struct isa_impl<BasicBlock, Value> {
  static inline bool doit(const Value &Val) {
    return Val.getValueID() == Value::BasicBlockVal;
  }
};

template <> struct isa_impl<Function, Value> {
  static inline bool doit(const Value &Val) {
    return Val.getValueID() == Value::FunctionVal;
  }
};

template <> struct isa_impl<GlobalVariable, Value> {
  static inline bool doit(const Value &Val) {
    return Val.getValueID() == Value::GlobalVariableVal;
  }
};

template <> struct isa_impl<GlobalAlias, Value> {
  static inline bool doit(const Value &Val) {
    return Val.getValueID() == Value::GlobalAliasVal;
  }
};

template <> struct isa_impl<GlobalIFunc, Value> {
  static inline bool doit(const Value &Val) {
    return Val.getValueID() == Value::GlobalIFuncVal;
  }
};

template <> struct isa_impl<GlobalValue, Value> {
  static inline bool doit(const Value &Val) {
    return isa<GlobalObject>(Val) || isa<GlobalAlias>(Val);
  }
};

template <> struct isa_impl<GlobalObject, Value> {
  static inline bool doit(const Value &Val) {
    return isa<GlobalVariable>(Val) || isa<Function>(Val) ||
           isa<GlobalIFunc>(Val);
  }
};

// Create wrappers for C Binding types (see CBindingWrapping.h).
DEFINE_ISA_CONVERSION_FUNCTIONS(Value, LLVMValueRef)

// Specialized opaque value conversions.
inline Value **unwrap(LLVMValueRef *Vals) {
  return reinterpret_cast<Value**>(Vals);
}

template<typename T>
inline T **unwrap(LLVMValueRef *Vals, unsigned Length) {
#ifndef NDEBUG
  for (LLVMValueRef *I = Vals, *E = Vals + Length; I != E; ++I)
    unwrap<T>(*I); // For side effect of calling assert on invalid usage.
#endif
  (void)Length;
  return reinterpret_cast<T**>(Vals);
}

inline LLVMValueRef *wrap(const Value **Vals) {
  return reinterpret_cast<LLVMValueRef*>(const_cast<Value**>(Vals));
}

} // end namespace llvm

#endif // LLVM_IR_VALUE_H
PKjwFZAP�	�	IR/Comdat.hnu�[���//===- llvm/IR/Comdat.h - Comdat definitions --------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// @file
/// This file contains the declaration of the Comdat class, which represents a
/// single COMDAT in LLVM.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_COMDAT_H
#define LLVM_IR_COMDAT_H

#include "llvm-c/Types.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Support/CBindingWrapping.h"

namespace llvm {

class GlobalObject;
class raw_ostream;
class StringRef;
template <typename ValueTy> class StringMapEntry;

// This is a Name X SelectionKind pair. The reason for having this be an
// independent object instead of just adding the name and the SelectionKind
// to a GlobalObject is that it is invalid to have two Comdats with the same
// name but different SelectionKind. This structure makes that unrepresentable.
class Comdat {
public:
  enum SelectionKind {
    Any,           ///< The linker may choose any COMDAT.
    ExactMatch,    ///< The data referenced by the COMDAT must be the same.
    Largest,       ///< The linker will choose the largest COMDAT.
    NoDeduplicate, ///< No deduplication is performed.
    SameSize,      ///< The data referenced by the COMDAT must be the same size.
  };

  Comdat(const Comdat &) = delete;
  Comdat(Comdat &&C);

  SelectionKind getSelectionKind() const { return SK; }
  void setSelectionKind(SelectionKind Val) { SK = Val; }
  StringRef getName() const;
  void print(raw_ostream &OS, bool IsForDebug = false) const;
  void dump() const;
  const SmallPtrSetImpl<GlobalObject *> &getUsers() const { return Users; }

private:
  friend class Module;
  friend class GlobalObject;

  Comdat();
  void addUser(GlobalObject *GO);
  void removeUser(GlobalObject *GO);

  // Points to the map in Module.
  StringMapEntry<Comdat> *Name = nullptr;
  SelectionKind SK = Any;
  // Globals using this comdat.
  SmallPtrSet<GlobalObject *, 2> Users;
};

// Create wrappers for C Binding types (see CBindingWrapping.h).
DEFINE_SIMPLE_CONVERSION_FUNCTIONS(Comdat, LLVMComdatRef)

inline raw_ostream &operator<<(raw_ostream &OS, const Comdat &C) {
  C.print(OS);
  return OS;
}

} // end namespace llvm

#endif // LLVM_IR_COMDAT_H
PKjwFZ��RV}j}jIR/BasicBlock.hnu�[���//===- llvm/BasicBlock.h - Represent a basic block in the VM ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the declaration of the BasicBlock class.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_BASICBLOCK_H
#define LLVM_IR_BASICBLOCK_H

#include "llvm-c/Types.h"
#include "llvm/ADT/Twine.h"
#include "llvm/ADT/ilist.h"
#include "llvm/ADT/ilist_node.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/SymbolTableListTraits.h"
#include "llvm/IR/Value.h"
#include <cassert>
#include <cstddef>
#include <iterator>

namespace llvm {

class AssemblyAnnotationWriter;
class CallInst;
class Function;
class LandingPadInst;
class LLVMContext;
class Module;
class PHINode;
class ValueSymbolTable;

/// LLVM Basic Block Representation
///
/// This represents a single basic block in LLVM. A basic block is simply a
/// container of instructions that execute sequentially. Basic blocks are Values
/// because they are referenced by instructions such as branches and switch
/// tables. The type of a BasicBlock is "Type::LabelTy" because the basic block
/// represents a label to which a branch can jump.
///
/// A well formed basic block is formed of a list of non-terminating
/// instructions followed by a single terminator instruction. Terminator
/// instructions may not occur in the middle of basic blocks, and must terminate
/// the blocks. The BasicBlock class allows malformed basic blocks to occur
/// because it may be useful in the intermediate stage of constructing or
/// modifying a program. However, the verifier will ensure that basic blocks are
/// "well formed".
class BasicBlock final : public Value, // Basic blocks are data objects also
                         public ilist_node_with_parent<BasicBlock, Function> {
public:
  using InstListType = SymbolTableList<Instruction>;

private:
  friend class BlockAddress;
  friend class SymbolTableListTraits<BasicBlock>;

  InstListType InstList;
  Function *Parent;

  void setParent(Function *parent);

  /// Constructor.
  ///
  /// If the function parameter is specified, the basic block is automatically
  /// inserted at either the end of the function (if InsertBefore is null), or
  /// before the specified basic block.
  explicit BasicBlock(LLVMContext &C, const Twine &Name = "",
                      Function *Parent = nullptr,
                      BasicBlock *InsertBefore = nullptr);

public:
  BasicBlock(const BasicBlock &) = delete;
  BasicBlock &operator=(const BasicBlock &) = delete;
  ~BasicBlock();

  /// Get the context in which this basic block lives.
  LLVMContext &getContext() const;

  /// Instruction iterators...
  using iterator = InstListType::iterator;
  using const_iterator = InstListType::const_iterator;
  using reverse_iterator = InstListType::reverse_iterator;
  using const_reverse_iterator = InstListType::const_reverse_iterator;

  // These functions and classes need access to the instruction list.
  friend void Instruction::removeFromParent();
  friend iplist<Instruction>::iterator Instruction::eraseFromParent();
  friend BasicBlock::iterator Instruction::insertInto(BasicBlock *BB,
                                                      BasicBlock::iterator It);
  friend class llvm::SymbolTableListTraits<llvm::Instruction>;
  friend class llvm::ilist_node_with_parent<llvm::Instruction, llvm::BasicBlock>;

  /// Creates a new BasicBlock.
  ///
  /// If the Parent parameter is specified, the basic block is automatically
  /// inserted at either the end of the function (if InsertBefore is 0), or
  /// before the specified basic block.
  static BasicBlock *Create(LLVMContext &Context, const Twine &Name = "",
                            Function *Parent = nullptr,
                            BasicBlock *InsertBefore = nullptr) {
    return new BasicBlock(Context, Name, Parent, InsertBefore);
  }

  /// Return the enclosing method, or null if none.
  const Function *getParent() const { return Parent; }
        Function *getParent()       { return Parent; }

  /// Return the module owning the function this basic block belongs to, or
  /// nullptr if the function does not have a module.
  ///
  /// Note: this is undefined behavior if the block does not have a parent.
  const Module *getModule() const;
  Module *getModule() {
    return const_cast<Module *>(
                            static_cast<const BasicBlock *>(this)->getModule());
  }

  /// Returns the terminator instruction if the block is well formed or null
  /// if the block is not well formed.
  const Instruction *getTerminator() const LLVM_READONLY {
    if (InstList.empty() || !InstList.back().isTerminator())
      return nullptr;
    return &InstList.back();
  }
  Instruction *getTerminator() {
    return const_cast<Instruction *>(
        static_cast<const BasicBlock *>(this)->getTerminator());
  }

  /// Returns the call instruction calling \@llvm.experimental.deoptimize
  /// prior to the terminating return instruction of this basic block, if such
  /// a call is present.  Otherwise, returns null.
  const CallInst *getTerminatingDeoptimizeCall() const;
  CallInst *getTerminatingDeoptimizeCall() {
    return const_cast<CallInst *>(
         static_cast<const BasicBlock *>(this)->getTerminatingDeoptimizeCall());
  }

  /// Returns the call instruction calling \@llvm.experimental.deoptimize
  /// that is present either in current basic block or in block that is a unique
  /// successor to current block, if such call is present. Otherwise, returns null.
  const CallInst *getPostdominatingDeoptimizeCall() const;
  CallInst *getPostdominatingDeoptimizeCall() {
    return const_cast<CallInst *>(
         static_cast<const BasicBlock *>(this)->getPostdominatingDeoptimizeCall());
  }

  /// Returns the call instruction marked 'musttail' prior to the terminating
  /// return instruction of this basic block, if such a call is present.
  /// Otherwise, returns null.
  const CallInst *getTerminatingMustTailCall() const;
  CallInst *getTerminatingMustTailCall() {
    return const_cast<CallInst *>(
           static_cast<const BasicBlock *>(this)->getTerminatingMustTailCall());
  }

  /// Returns a pointer to the first instruction in this block that is not a
  /// PHINode instruction.
  ///
  /// When adding instructions to the beginning of the basic block, they should
  /// be added before the returned value, not before the first instruction,
  /// which might be PHI. Returns 0 is there's no non-PHI instruction.
  const Instruction* getFirstNonPHI() const;
  Instruction* getFirstNonPHI() {
    return const_cast<Instruction *>(
                       static_cast<const BasicBlock *>(this)->getFirstNonPHI());
  }

  /// Returns a pointer to the first instruction in this block that is not a
  /// PHINode or a debug intrinsic, or any pseudo operation if \c SkipPseudoOp
  /// is true.
  const Instruction *getFirstNonPHIOrDbg(bool SkipPseudoOp = true) const;
  Instruction *getFirstNonPHIOrDbg(bool SkipPseudoOp = true) {
    return const_cast<Instruction *>(
        static_cast<const BasicBlock *>(this)->getFirstNonPHIOrDbg(
            SkipPseudoOp));
  }

  /// Returns a pointer to the first instruction in this block that is not a
  /// PHINode, a debug intrinsic, or a lifetime intrinsic, or any pseudo
  /// operation if \c SkipPseudoOp is true.
  const Instruction *
  getFirstNonPHIOrDbgOrLifetime(bool SkipPseudoOp = true) const;
  Instruction *getFirstNonPHIOrDbgOrLifetime(bool SkipPseudoOp = true) {
    return const_cast<Instruction *>(
        static_cast<const BasicBlock *>(this)->getFirstNonPHIOrDbgOrLifetime(
            SkipPseudoOp));
  }

  /// Returns an iterator to the first instruction in this block that is
  /// suitable for inserting a non-PHI instruction.
  ///
  /// In particular, it skips all PHIs and LandingPad instructions.
  const_iterator getFirstInsertionPt() const;
  iterator getFirstInsertionPt() {
    return static_cast<const BasicBlock *>(this)
                                          ->getFirstInsertionPt().getNonConst();
  }

  /// Returns an iterator to the first instruction in this block that is
  /// not a PHINode, a debug intrinsic, a static alloca or any pseudo operation.
  const_iterator getFirstNonPHIOrDbgOrAlloca() const;
  iterator getFirstNonPHIOrDbgOrAlloca() {
    return static_cast<const BasicBlock *>(this)
        ->getFirstNonPHIOrDbgOrAlloca()
        .getNonConst();
  }

  /// Returns the first potential AsynchEH faulty instruction
  /// currently it checks for loads/stores (which may dereference a null
  /// pointer) and calls/invokes (which may propagate exceptions)
  const Instruction* getFirstMayFaultInst() const;
  Instruction* getFirstMayFaultInst() {
      return const_cast<Instruction*>(
          static_cast<const BasicBlock*>(this)->getFirstMayFaultInst());
  }

  /// Return a const iterator range over the instructions in the block, skipping
  /// any debug instructions. Skip any pseudo operations as well if \c
  /// SkipPseudoOp is true.
  iterator_range<filter_iterator<BasicBlock::const_iterator,
                                 std::function<bool(const Instruction &)>>>
  instructionsWithoutDebug(bool SkipPseudoOp = true) const;

  /// Return an iterator range over the instructions in the block, skipping any
  /// debug instructions. Skip and any pseudo operations as well if \c
  /// SkipPseudoOp is true.
  iterator_range<
      filter_iterator<BasicBlock::iterator, std::function<bool(Instruction &)>>>
  instructionsWithoutDebug(bool SkipPseudoOp = true);

  /// Return the size of the basic block ignoring debug instructions
  filter_iterator<BasicBlock::const_iterator,
                  std::function<bool(const Instruction &)>>::difference_type
  sizeWithoutDebug() const;

  /// Unlink 'this' from the containing function, but do not delete it.
  void removeFromParent();

  /// Unlink 'this' from the containing function and delete it.
  ///
  // \returns an iterator pointing to the element after the erased one.
  SymbolTableList<BasicBlock>::iterator eraseFromParent();

  /// Unlink this basic block from its current function and insert it into
  /// the function that \p MovePos lives in, right before \p MovePos.
  inline void moveBefore(BasicBlock *MovePos) {
    moveBefore(MovePos->getIterator());
  }
  void moveBefore(SymbolTableList<BasicBlock>::iterator MovePos);

  /// Unlink this basic block from its current function and insert it
  /// right after \p MovePos in the function \p MovePos lives in.
  void moveAfter(BasicBlock *MovePos);

  /// Insert unlinked basic block into a function.
  ///
  /// Inserts an unlinked basic block into \c Parent.  If \c InsertBefore is
  /// provided, inserts before that basic block, otherwise inserts at the end.
  ///
  /// \pre \a getParent() is \c nullptr.
  void insertInto(Function *Parent, BasicBlock *InsertBefore = nullptr);

  /// Return the predecessor of this block if it has a single predecessor
  /// block. Otherwise return a null pointer.
  const BasicBlock *getSinglePredecessor() const;
  BasicBlock *getSinglePredecessor() {
    return const_cast<BasicBlock *>(
                 static_cast<const BasicBlock *>(this)->getSinglePredecessor());
  }

  /// Return the predecessor of this block if it has a unique predecessor
  /// block. Otherwise return a null pointer.
  ///
  /// Note that unique predecessor doesn't mean single edge, there can be
  /// multiple edges from the unique predecessor to this block (for example a
  /// switch statement with multiple cases having the same destination).
  const BasicBlock *getUniquePredecessor() const;
  BasicBlock *getUniquePredecessor() {
    return const_cast<BasicBlock *>(
                 static_cast<const BasicBlock *>(this)->getUniquePredecessor());
  }

  /// Return true if this block has exactly N predecessors.
  bool hasNPredecessors(unsigned N) const;

  /// Return true if this block has N predecessors or more.
  bool hasNPredecessorsOrMore(unsigned N) const;

  /// Return the successor of this block if it has a single successor.
  /// Otherwise return a null pointer.
  ///
  /// This method is analogous to getSinglePredecessor above.
  const BasicBlock *getSingleSuccessor() const;
  BasicBlock *getSingleSuccessor() {
    return const_cast<BasicBlock *>(
                   static_cast<const BasicBlock *>(this)->getSingleSuccessor());
  }

  /// Return the successor of this block if it has a unique successor.
  /// Otherwise return a null pointer.
  ///
  /// This method is analogous to getUniquePredecessor above.
  const BasicBlock *getUniqueSuccessor() const;
  BasicBlock *getUniqueSuccessor() {
    return const_cast<BasicBlock *>(
                   static_cast<const BasicBlock *>(this)->getUniqueSuccessor());
  }

  /// Print the basic block to an output stream with an optional
  /// AssemblyAnnotationWriter.
  void print(raw_ostream &OS, AssemblyAnnotationWriter *AAW = nullptr,
             bool ShouldPreserveUseListOrder = false,
             bool IsForDebug = false) const;

  //===--------------------------------------------------------------------===//
  /// Instruction iterator methods
  ///
  inline iterator                begin()       { return InstList.begin(); }
  inline const_iterator          begin() const { return InstList.begin(); }
  inline iterator                end  ()       { return InstList.end();   }
  inline const_iterator          end  () const { return InstList.end();   }

  inline reverse_iterator        rbegin()       { return InstList.rbegin(); }
  inline const_reverse_iterator  rbegin() const { return InstList.rbegin(); }
  inline reverse_iterator        rend  ()       { return InstList.rend();   }
  inline const_reverse_iterator  rend  () const { return InstList.rend();   }

  inline size_t                   size() const { return InstList.size();  }
  inline bool                    empty() const { return InstList.empty(); }
  inline const Instruction      &front() const { return InstList.front(); }
  inline       Instruction      &front()       { return InstList.front(); }
  inline const Instruction       &back() const { return InstList.back();  }
  inline       Instruction       &back()       { return InstList.back();  }

  /// Iterator to walk just the phi nodes in the basic block.
  template <typename PHINodeT = PHINode, typename BBIteratorT = iterator>
  class phi_iterator_impl
      : public iterator_facade_base<phi_iterator_impl<PHINodeT, BBIteratorT>,
                                    std::forward_iterator_tag, PHINodeT> {
    friend BasicBlock;

    PHINodeT *PN;

    phi_iterator_impl(PHINodeT *PN) : PN(PN) {}

  public:
    // Allow default construction to build variables, but this doesn't build
    // a useful iterator.
    phi_iterator_impl() = default;

    // Allow conversion between instantiations where valid.
    template <typename PHINodeU, typename BBIteratorU,
              typename = std::enable_if_t<
                  std::is_convertible<PHINodeU *, PHINodeT *>::value>>
    phi_iterator_impl(const phi_iterator_impl<PHINodeU, BBIteratorU> &Arg)
        : PN(Arg.PN) {}

    bool operator==(const phi_iterator_impl &Arg) const { return PN == Arg.PN; }

    PHINodeT &operator*() const { return *PN; }

    using phi_iterator_impl::iterator_facade_base::operator++;
    phi_iterator_impl &operator++() {
      assert(PN && "Cannot increment the end iterator!");
      PN = dyn_cast<PHINodeT>(std::next(BBIteratorT(PN)));
      return *this;
    }
  };
  using phi_iterator = phi_iterator_impl<>;
  using const_phi_iterator =
      phi_iterator_impl<const PHINode, BasicBlock::const_iterator>;

  /// Returns a range that iterates over the phis in the basic block.
  ///
  /// Note that this cannot be used with basic blocks that have no terminator.
  iterator_range<const_phi_iterator> phis() const {
    return const_cast<BasicBlock *>(this)->phis();
  }
  iterator_range<phi_iterator> phis();

private:
  /// Return the underlying instruction list container.
  /// This is deliberately private because we have implemented an adequate set
  /// of functions to modify the list, including BasicBlock::splice(),
  /// BasicBlock::erase(), Instruction::insertInto() etc.
  const InstListType &getInstList() const { return InstList; }
  InstListType &getInstList() { return InstList; }

  /// Returns a pointer to a member of the instruction list.
  /// This is private on purpose, just like `getInstList()`.
  static InstListType BasicBlock::*getSublistAccess(Instruction *) {
    return &BasicBlock::InstList;
  }

public:
  /// Returns a pointer to the symbol table if one exists.
  ValueSymbolTable *getValueSymbolTable();

  /// Methods for support type inquiry through isa, cast, and dyn_cast.
  static bool classof(const Value *V) {
    return V->getValueID() == Value::BasicBlockVal;
  }

  /// Cause all subinstructions to "let go" of all the references that said
  /// subinstructions are maintaining.
  ///
  /// This allows one to 'delete' a whole class at a time, even though there may
  /// be circular references... first all references are dropped, and all use
  /// counts go to zero.  Then everything is delete'd for real.  Note that no
  /// operations are valid on an object that has "dropped all references",
  /// except operator delete.
  void dropAllReferences();

  /// Update PHI nodes in this BasicBlock before removal of predecessor \p Pred.
  /// Note that this function does not actually remove the predecessor.
  ///
  /// If \p KeepOneInputPHIs is true then don't remove PHIs that are left with
  /// zero or one incoming values, and don't simplify PHIs with all incoming
  /// values the same.
  void removePredecessor(BasicBlock *Pred, bool KeepOneInputPHIs = false);

  bool canSplitPredecessors() const;

  /// Split the basic block into two basic blocks at the specified instruction.
  ///
  /// If \p Before is true, splitBasicBlockBefore handles the
  /// block splitting. Otherwise, execution proceeds as described below.
  ///
  /// Note that all instructions BEFORE the specified iterator
  /// stay as part of the original basic block, an unconditional branch is added
  /// to the original BB, and the rest of the instructions in the BB are moved
  /// to the new BB, including the old terminator.  The newly formed basic block
  /// is returned. This function invalidates the specified iterator.
  ///
  /// Note that this only works on well formed basic blocks (must have a
  /// terminator), and \p 'I' must not be the end of instruction list (which
  /// would cause a degenerate basic block to be formed, having a terminator
  /// inside of the basic block).
  ///
  /// Also note that this doesn't preserve any passes. To split blocks while
  /// keeping loop information consistent, use the SplitBlock utility function.
  BasicBlock *splitBasicBlock(iterator I, const Twine &BBName = "",
                              bool Before = false);
  BasicBlock *splitBasicBlock(Instruction *I, const Twine &BBName = "",
                              bool Before = false) {
    return splitBasicBlock(I->getIterator(), BBName, Before);
  }

  /// Split the basic block into two basic blocks at the specified instruction
  /// and insert the new basic blocks as the predecessor of the current block.
  ///
  /// This function ensures all instructions AFTER and including the specified
  /// iterator \p I are part of the original basic block. All Instructions
  /// BEFORE the iterator \p I are moved to the new BB and an unconditional
  /// branch is added to the new BB. The new basic block is returned.
  ///
  /// Note that this only works on well formed basic blocks (must have a
  /// terminator), and \p 'I' must not be the end of instruction list (which
  /// would cause a degenerate basic block to be formed, having a terminator
  /// inside of the basic block).  \p 'I' cannot be a iterator for a PHINode
  /// with multiple incoming blocks.
  ///
  /// Also note that this doesn't preserve any passes. To split blocks while
  /// keeping loop information consistent, use the SplitBlockBefore utility
  /// function.
  BasicBlock *splitBasicBlockBefore(iterator I, const Twine &BBName = "");
  BasicBlock *splitBasicBlockBefore(Instruction *I, const Twine &BBName = "") {
    return splitBasicBlockBefore(I->getIterator(), BBName);
  }

  /// Transfer all instructions from \p FromBB to this basic block at \p ToIt.
  void splice(BasicBlock::iterator ToIt, BasicBlock *FromBB) {
    splice(ToIt, FromBB, FromBB->begin(), FromBB->end());
  }

  /// Transfer one instruction from \p FromBB at \p FromIt to this basic block
  /// at \p ToIt.
  void splice(BasicBlock::iterator ToIt, BasicBlock *FromBB,
              BasicBlock::iterator FromIt) {
    auto FromItNext = std::next(FromIt);
    // Single-element splice is a noop if destination == source.
    if (ToIt == FromIt || ToIt == FromItNext)
      return;
    splice(ToIt, FromBB, FromIt, FromItNext);
  }

  /// Transfer a range of instructions that belong to \p FromBB from \p
  /// FromBeginIt to \p FromEndIt, to this basic block at \p ToIt.
  void splice(BasicBlock::iterator ToIt, BasicBlock *FromBB,
              BasicBlock::iterator FromBeginIt,
              BasicBlock::iterator FromEndIt);

  /// Erases a range of instructions from \p FromIt to (not including) \p ToIt.
  /// \Returns \p ToIt.
  BasicBlock::iterator erase(BasicBlock::iterator FromIt, BasicBlock::iterator ToIt);

  /// Returns true if there are any uses of this basic block other than
  /// direct branches, switches, etc. to it.
  bool hasAddressTaken() const {
    return getBasicBlockBits().BlockAddressRefCount != 0;
  }

  /// Update all phi nodes in this basic block to refer to basic block \p New
  /// instead of basic block \p Old.
  void replacePhiUsesWith(BasicBlock *Old, BasicBlock *New);

  /// Update all phi nodes in this basic block's successors to refer to basic
  /// block \p New instead of basic block \p Old.
  void replaceSuccessorsPhiUsesWith(BasicBlock *Old, BasicBlock *New);

  /// Update all phi nodes in this basic block's successors to refer to basic
  /// block \p New instead of to it.
  void replaceSuccessorsPhiUsesWith(BasicBlock *New);

  /// Return true if this basic block is an exception handling block.
  bool isEHPad() const { return getFirstNonPHI()->isEHPad(); }

  /// Return true if this basic block is a landing pad.
  ///
  /// Being a ``landing pad'' means that the basic block is the destination of
  /// the 'unwind' edge of an invoke instruction.
  bool isLandingPad() const;

  /// Return the landingpad instruction associated with the landing pad.
  const LandingPadInst *getLandingPadInst() const;
  LandingPadInst *getLandingPadInst() {
    return const_cast<LandingPadInst *>(
                    static_cast<const BasicBlock *>(this)->getLandingPadInst());
  }

  /// Return true if it is legal to hoist instructions into this block.
  bool isLegalToHoistInto() const;

  /// Return true if this is the entry block of the containing function.
  /// This method can only be used on blocks that have a parent function.
  bool isEntryBlock() const;

  std::optional<uint64_t> getIrrLoopHeaderWeight() const;

  /// Returns true if the Order field of child Instructions is valid.
  bool isInstrOrderValid() const {
    return getBasicBlockBits().InstrOrderValid;
  }

  /// Mark instruction ordering invalid. Done on every instruction insert.
  void invalidateOrders() {
    validateInstrOrdering();
    BasicBlockBits Bits = getBasicBlockBits();
    Bits.InstrOrderValid = false;
    setBasicBlockBits(Bits);
  }

  /// Renumber instructions and mark the ordering as valid.
  void renumberInstructions();

  /// Asserts that instruction order numbers are marked invalid, or that they
  /// are in ascending order. This is constant time if the ordering is invalid,
  /// and linear in the number of instructions if the ordering is valid. Callers
  /// should be careful not to call this in ways that make common operations
  /// O(n^2). For example, it takes O(n) time to assign order numbers to
  /// instructions, so the order should be validated no more than once after
  /// each ordering to ensure that transforms have the same algorithmic
  /// complexity when asserts are enabled as when they are disabled.
  void validateInstrOrdering() const;

private:
#if defined(_AIX) && (!defined(__GNUC__) || defined(__clang__))
// Except for GCC; by default, AIX compilers store bit-fields in 4-byte words
// and give the `pack` pragma push semantics.
#define BEGIN_TWO_BYTE_PACK() _Pragma("pack(2)")
#define END_TWO_BYTE_PACK() _Pragma("pack(pop)")
#else
#define BEGIN_TWO_BYTE_PACK()
#define END_TWO_BYTE_PACK()
#endif

  BEGIN_TWO_BYTE_PACK()
  /// Bitfield to help interpret the bits in Value::SubclassData.
  struct BasicBlockBits {
    unsigned short BlockAddressRefCount : 15;
    unsigned short InstrOrderValid : 1;
  };
  END_TWO_BYTE_PACK()

#undef BEGIN_TWO_BYTE_PACK
#undef END_TWO_BYTE_PACK

  /// Safely reinterpret the subclass data bits to a more useful form.
  BasicBlockBits getBasicBlockBits() const {
    static_assert(sizeof(BasicBlockBits) == sizeof(unsigned short),
                  "too many bits for Value::SubclassData");
    unsigned short ValueData = getSubclassDataFromValue();
    BasicBlockBits AsBits;
    memcpy(&AsBits, &ValueData, sizeof(AsBits));
    return AsBits;
  }

  /// Reinterpret our subclass bits and store them back into Value.
  void setBasicBlockBits(BasicBlockBits AsBits) {
    unsigned short D;
    memcpy(&D, &AsBits, sizeof(D));
    Value::setValueSubclassData(D);
  }

  /// Increment the internal refcount of the number of BlockAddresses
  /// referencing this BasicBlock by \p Amt.
  ///
  /// This is almost always 0, sometimes one possibly, but almost never 2, and
  /// inconceivably 3 or more.
  void AdjustBlockAddressRefCount(int Amt) {
    BasicBlockBits Bits = getBasicBlockBits();
    Bits.BlockAddressRefCount += Amt;
    setBasicBlockBits(Bits);
    assert(Bits.BlockAddressRefCount < 255 && "Refcount wrap-around");
  }

  /// Shadow Value::setValueSubclassData with a private forwarding method so
  /// that any future subclasses cannot accidentally use it.
  void setValueSubclassData(unsigned short D) {
    Value::setValueSubclassData(D);
  }
};

// Create wrappers for C Binding types (see CBindingWrapping.h).
DEFINE_SIMPLE_CONVERSION_FUNCTIONS(BasicBlock, LLVMBasicBlockRef)

/// Advance \p It while it points to a debug instruction and return the result.
/// This assumes that \p It is not at the end of a block.
BasicBlock::iterator skipDebugIntrinsics(BasicBlock::iterator It);

#ifdef NDEBUG
/// In release builds, this is a no-op. For !NDEBUG builds, the checks are
/// implemented in the .cpp file to avoid circular header deps.
inline void BasicBlock::validateInstrOrdering() const {}
#endif

} // end namespace llvm

#endif // LLVM_IR_BASICBLOCK_H
PKjwFZЊ��}>}>IR/IntrinsicsRISCV.tdnu�[���//===- IntrinsicsRISCV.td - Defines RISCV intrinsics -------*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines all of the RISCV-specific intrinsics.
//
//===----------------------------------------------------------------------===//

//===----------------------------------------------------------------------===//
// Atomics

// Atomic Intrinsics have multiple versions for different access widths, which
// all follow one of the following signatures (depending on how many arguments
// they require). We carefully instantiate only specific versions of these for
// specific integer widths, rather than using `llvm_anyint_ty`.
//
// In fact, as these intrinsics take `llvm_anyptr_ty`, the given names are the
// canonical names, and the intrinsics used in the code will have a name
// suffixed with the pointer type they are specialised for (denoted `<p>` in the
// names below), in order to avoid type conflicts.

let TargetPrefix = "riscv" in {

  // T @llvm.<name>.T.<p>(any*, T, T, T imm);
  class MaskedAtomicRMWFourArg<LLVMType itype>
      : Intrinsic<[itype], [llvm_anyptr_ty, itype, itype, itype],
                  [IntrArgMemOnly, NoCapture<ArgIndex<0>>, ImmArg<ArgIndex<3>>]>;
  // T @llvm.<name>.T.<p>(any*, T, T, T, T imm);
  class MaskedAtomicRMWFiveArg<LLVMType itype>
      : Intrinsic<[itype], [llvm_anyptr_ty, itype, itype, itype, itype],
                  [IntrArgMemOnly, NoCapture<ArgIndex<0>>, ImmArg<ArgIndex<4>>]>;

  // We define 32-bit and 64-bit variants of the above, where T stands for i32
  // or i64 respectively:
  multiclass MaskedAtomicRMWFourArgIntrinsics {
    // i32 @llvm.<name>.i32.<p>(any*, i32, i32, i32 imm);
    def _i32 : MaskedAtomicRMWFourArg<llvm_i32_ty>;
    // i64 @llvm.<name>.i32.<p>(any*, i64, i64, i64 imm);
    def _i64 : MaskedAtomicRMWFourArg<llvm_i64_ty>;
  }

  multiclass MaskedAtomicRMWFiveArgIntrinsics {
    // i32 @llvm.<name>.i32.<p>(any*, i32, i32, i32, i32 imm);
    def _i32 : MaskedAtomicRMWFiveArg<llvm_i32_ty>;
    // i64 @llvm.<name>.i64.<p>(any*, i64, i64, i64, i64 imm);
    def _i64 : MaskedAtomicRMWFiveArg<llvm_i64_ty>;
  }

  // These intrinsics are intended only for internal compiler use (i.e. as
  // part of AtomicExpandpass via the emitMaskedAtomic*Intrinsic hooks). Their
  // names and semantics could change in the future.

  // @llvm.riscv.masked.atomicrmw.*.{i32,i64}.<p>(
  //   ptr addr, ixlen oparg, ixlen mask, ixlenimm ordering)
  defm int_riscv_masked_atomicrmw_xchg : MaskedAtomicRMWFourArgIntrinsics;
  defm int_riscv_masked_atomicrmw_add : MaskedAtomicRMWFourArgIntrinsics;
  defm int_riscv_masked_atomicrmw_sub : MaskedAtomicRMWFourArgIntrinsics;
  defm int_riscv_masked_atomicrmw_nand : MaskedAtomicRMWFourArgIntrinsics;
  defm int_riscv_masked_atomicrmw_umax : MaskedAtomicRMWFourArgIntrinsics;
  defm int_riscv_masked_atomicrmw_umin : MaskedAtomicRMWFourArgIntrinsics;
  // Signed min and max need an extra operand to do sign extension with.
  // @llvm.riscv.masked.atomicrmw.{max,min}.{i32,i64}.<p>(
  //   ptr addr, ixlen oparg, ixlen mask, ixlen shamt, ixlenimm ordering)
  defm int_riscv_masked_atomicrmw_max : MaskedAtomicRMWFiveArgIntrinsics;
  defm int_riscv_masked_atomicrmw_min : MaskedAtomicRMWFiveArgIntrinsics;

  // @llvm.riscv.masked.cmpxchg.{i32,i64}.<p>(
  //   ptr addr, ixlen cmpval, ixlen newval, ixlen mask, ixlenimm ordering)
  defm int_riscv_masked_cmpxchg : MaskedAtomicRMWFiveArgIntrinsics;

} // TargetPrefix = "riscv"

//===----------------------------------------------------------------------===//
// Bitmanip (Bit Manipulation) Extension

let TargetPrefix = "riscv" in {

  class BitManipGPRIntrinsics
      : DefaultAttrsIntrinsic<[llvm_any_ty],
                              [LLVMMatchType<0>],
                              [IntrNoMem, IntrSpeculatable]>;
  class BitManipGPRGPRIntrinsics
      : DefaultAttrsIntrinsic<[llvm_any_ty],
                              [LLVMMatchType<0>, LLVMMatchType<0>],
                              [IntrNoMem, IntrSpeculatable]>;

  // Zbb
  def int_riscv_orc_b : BitManipGPRIntrinsics;

  // Zbc or Zbkc
  def int_riscv_clmul  : BitManipGPRGPRIntrinsics;
  def int_riscv_clmulh : BitManipGPRGPRIntrinsics;

  // Zbc
  def int_riscv_clmulr : BitManipGPRGPRIntrinsics;

  // Zbkb
  def int_riscv_brev8 : BitManipGPRIntrinsics;
  def int_riscv_zip   : BitManipGPRIntrinsics;
  def int_riscv_unzip : BitManipGPRIntrinsics;

  // Zbkx
  def int_riscv_xperm4  : BitManipGPRGPRIntrinsics;
  def int_riscv_xperm8  : BitManipGPRGPRIntrinsics;
} // TargetPrefix = "riscv"

//===----------------------------------------------------------------------===//
// Vectors

// The intrinsic does not have any operand that must be extended.
defvar NoScalarOperand = 0xF;

// The intrinsic does not have a VL operand.
// (e.g., riscv_vmv_x_s and riscv_vfmv_f_s)
defvar NoVLOperand = 0x1F;

class RISCVVIntrinsic {
  // These intrinsics may accept illegal integer values in their llvm_any_ty
  // operand, so they have to be extended.
  Intrinsic IntrinsicID = !cast<Intrinsic>(NAME);
  bits<4> ScalarOperand = NoScalarOperand;
  bits<5> VLOperand = NoVLOperand;
}

let TargetPrefix = "riscv" in {
  // We use anyint here but we only support XLen.
  def int_riscv_vsetvli   : Intrinsic<[llvm_anyint_ty],
                           /* AVL */  [LLVMMatchType<0>,
                           /* VSEW */  LLVMMatchType<0>,
                           /* VLMUL */ LLVMMatchType<0>],
                                      [IntrNoMem,
                                       ImmArg<ArgIndex<1>>,
                                       ImmArg<ArgIndex<2>>]>;
  def int_riscv_vsetvlimax : Intrinsic<[llvm_anyint_ty],
                            /* VSEW */ [LLVMMatchType<0>,
                            /* VLMUL */ LLVMMatchType<0>],
                                      [IntrNoMem,
                                       ImmArg<ArgIndex<0>>,
                                       ImmArg<ArgIndex<1>>]>;

  // For unit stride mask load
  // Input: (pointer, vl)
  class RISCVUSMLoad
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                    [llvm_ptr_ty, llvm_anyint_ty],
                    [NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic {
    let VLOperand = 1;
  }
  // For unit stride load
  // Input: (passthru, pointer, vl)
  class RISCVUSLoad
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                    [LLVMMatchType<0>, llvm_ptr_ty, llvm_anyint_ty],
                    [NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic {
    let VLOperand = 2;
  }
  // For unit stride fault-only-first load
  // Input: (passthru, pointer, vl)
  // Output: (data, vl)
  // NOTE: We model this with default memory properties since we model writing
  // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
  class RISCVUSLoadFF
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty, llvm_anyint_ty],
                    [LLVMMatchType<0>, llvm_ptr_ty, LLVMMatchType<1>],
                    [NoCapture<ArgIndex<1>>]>,
                    RISCVVIntrinsic {
    let VLOperand = 2;
  }
  // For unit stride load with mask
  // Input: (maskedoff, pointer, mask, vl, policy)
  class RISCVUSLoadMasked
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty ],
                    [LLVMMatchType<0>, llvm_ptr_ty,
                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                     llvm_anyint_ty, LLVMMatchType<1>],
                    [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<4>>, IntrReadMem]>,
                    RISCVVIntrinsic {
    let VLOperand = 3;
  }
  // For unit stride fault-only-first load with mask
  // Input: (maskedoff, pointer, mask, vl, policy)
  // Output: (data, vl)
  // NOTE: We model this with default memory properties since we model writing
  // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
  class RISCVUSLoadFFMasked
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty, llvm_anyint_ty],
                    [LLVMMatchType<0>, llvm_ptr_ty,
                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                     LLVMMatchType<1>, LLVMMatchType<1>],
                    [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<4>>]>, RISCVVIntrinsic {
    let VLOperand = 3;
  }
  // For strided load with passthru operand
  // Input: (passthru, pointer, stride, vl)
  class RISCVSLoad
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                    [LLVMMatchType<0>, llvm_ptr_ty,
                     llvm_anyint_ty, LLVMMatchType<1>],
                    [NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic {
    let VLOperand = 3;
  }
  // For strided load with mask
  // Input: (maskedoff, pointer, stride, mask, vl, policy)
  class RISCVSLoadMasked
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty ],
                    [LLVMMatchType<0>, llvm_ptr_ty, llvm_anyint_ty,
                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>,
                     LLVMMatchType<1>],
                    [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<5>>, IntrReadMem]>,
                    RISCVVIntrinsic {
    let VLOperand = 4;
  }
  // For indexed load with passthru operand
  // Input: (passthru, pointer, index, vl)
  class RISCVILoad
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                    [LLVMMatchType<0>, llvm_ptr_ty,
                     llvm_anyvector_ty, llvm_anyint_ty],
                    [NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic {
    let VLOperand = 3;
  }
  // For indexed load with mask
  // Input: (maskedoff, pointer, index, mask, vl, policy)
  class RISCVILoadMasked
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty ],
                    [LLVMMatchType<0>, llvm_ptr_ty, llvm_anyvector_ty,
                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
                     LLVMMatchType<2>],
                    [NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<5>>, IntrReadMem]>,
                    RISCVVIntrinsic {
    let VLOperand = 4;
  }
  // For unit stride store
  // Input: (vector_in, pointer, vl)
  class RISCVUSStore
        : DefaultAttrsIntrinsic<[],
                    [llvm_anyvector_ty, llvm_ptr_ty, llvm_anyint_ty],
                    [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
    let VLOperand = 2;
  }
  // For unit stride store with mask
  // Input: (vector_in, pointer, mask, vl)
  class RISCVUSStoreMasked
        : DefaultAttrsIntrinsic<[],
                    [llvm_anyvector_ty, llvm_ptr_ty,
                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                     llvm_anyint_ty],
                    [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
    let VLOperand = 3;
  }
  // For strided store
  // Input: (vector_in, pointer, stride, vl)
  class RISCVSStore
        : DefaultAttrsIntrinsic<[],
                    [llvm_anyvector_ty, llvm_ptr_ty,
                     llvm_anyint_ty, LLVMMatchType<1>],
                    [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
    let VLOperand = 3;
  }
  // For stride store with mask
  // Input: (vector_in, pointer, stirde, mask, vl)
  class RISCVSStoreMasked
        : DefaultAttrsIntrinsic<[],
                    [llvm_anyvector_ty, llvm_ptr_ty, llvm_anyint_ty,
                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>],
                    [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
    let VLOperand = 4;
  }
  // For indexed store
  // Input: (vector_in, pointer, index, vl)
  class RISCVIStore
        : DefaultAttrsIntrinsic<[],
                    [llvm_anyvector_ty, llvm_ptr_ty,
                     llvm_anyint_ty, llvm_anyint_ty],
                    [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
    let VLOperand = 3;
  }
  // For indexed store with mask
  // Input: (vector_in, pointer, index, mask, vl)
  class RISCVIStoreMasked
        : DefaultAttrsIntrinsic<[],
                    [llvm_anyvector_ty, llvm_ptr_ty, llvm_anyvector_ty,
                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
                    [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic {
    let VLOperand = 4;
  }
  // For destination vector type is the same as source vector.
  // Input: (passthru, vector_in, vl)
  class RISCVUnaryAAUnMasked
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty],
                    [IntrNoMem]>, RISCVVIntrinsic {
    let VLOperand = 2;
  }
  // For destination vector type is the same as first source vector (with mask).
  // Input: (vector_in, vector_in, mask, vl, policy)
  class RISCVUnaryAAMasked
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                    [LLVMMatchType<0>, LLVMMatchType<0>,
                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
                     LLVMMatchType<1>],
                    [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
    let VLOperand = 3;
  }
  // For destination vector type is the same as source vector.
  // Input: (passthru, vector_in, frm, vl)
  class RISCVUnaryAAUnMaskedRoundingMode
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty, LLVMMatchType<1>],
                    [ImmArg<ArgIndex<2>>, IntrNoMem]>, RISCVVIntrinsic {
    let VLOperand = 3;
  }
  // For destination vector type is the same as first source vector (with mask).
  // Input: (vector_in, vector_in, mask, frm, vl, policy)
  class RISCVUnaryAAMaskedRoundingMode
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                    [LLVMMatchType<0>, LLVMMatchType<0>,
                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
                     LLVMMatchType<1>, LLVMMatchType<1>],
                    [ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
    let VLOperand = 4;
  }
  // Input: (passthru, vector_in, vector_in, mask, vl)
  class RISCVCompress
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                    [LLVMMatchType<0>, LLVMMatchType<0>,
                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
                    [IntrNoMem]>, RISCVVIntrinsic {
    let VLOperand = 3;
  }
  // For destination vector type is the same as first and second source vector.
  // Input: (vector_in, vector_in, vl)
  class RISCVBinaryAAAUnMasked
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty],
                    [IntrNoMem]>, RISCVVIntrinsic {
    let VLOperand = 2;
  }
  // For destination vector type is the same as first and second source vector.
  // Input: (passthru, vector_in, int_vector_in, vl)
  class RISCVRGatherVVUnMasked
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                    [LLVMMatchType<0>, LLVMMatchType<0>,
                     LLVMVectorOfBitcastsToInt<0>, llvm_anyint_ty],
                    [IntrNoMem]>, RISCVVIntrinsic {
    let VLOperand = 3;
  }
  // For destination vector type is the same as first and second source vector.
  // Input: (vector_in, vector_in, int_vector_in, vl, policy)
  class RISCVRGatherVVMasked
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                    [LLVMMatchType<0>, LLVMMatchType<0>, LLVMVectorOfBitcastsToInt<0>,
                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
                     LLVMMatchType<1>],
                    [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
    let VLOperand = 4;
  }
  // Input: (passthru, vector_in, int16_vector_in, vl)
  class RISCVRGatherEI16VVUnMasked
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                    [LLVMMatchType<0>, LLVMMatchType<0>,
                     LLVMScalarOrSameVectorWidth<0, llvm_i16_ty>,
                     llvm_anyint_ty],
                    [IntrNoMem]>, RISCVVIntrinsic {
    let VLOperand = 3;
  }
  // For destination vector type is the same as first and second source vector.
  // Input: (vector_in, vector_in, int16_vector_in, vl, policy)
  class RISCVRGatherEI16VVMasked
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                    [LLVMMatchType<0>, LLVMMatchType<0>,
                     LLVMScalarOrSameVectorWidth<0, llvm_i16_ty>,
                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
                     LLVMMatchType<1>],
                    [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
    let VLOperand = 4;
  }
  // For destination vector type is the same as first source vector, and the
  // second operand is XLen.
  // Input: (passthru, vector_in, xlen_in, vl)
  class RISCVGatherVXUnMasked
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
                     LLVMMatchType<1>],
                    [IntrNoMem]>, RISCVVIntrinsic {
    let VLOperand = 3;
  }
  // For destination vector type is the same as first source vector (with mask).
  // Second operand is XLen.
  // Input: (maskedoff, vector_in, xlen_in, mask, vl, policy)
  class RISCVGatherVXMasked
       : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
                    LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>,
                    LLVMMatchType<1>],
                   [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
    let VLOperand = 4;
  }
  // For destination vector type is the same as first source vector.
  // Input: (passthru, vector_in, vector_in/scalar_in, vl)
  class RISCVBinaryAAXUnMasked
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
                     llvm_anyint_ty],
                    [IntrNoMem]>, RISCVVIntrinsic {
    let ScalarOperand = 2;
    let VLOperand = 3;
  }
  // For destination vector type is the same as first source vector (with mask).
  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
  class RISCVBinaryAAXMasked
       : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
                    LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
                    LLVMMatchType<2>],
                   [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
    let ScalarOperand = 2;
    let VLOperand = 4;
  }
  // For destination vector type is the same as first source vector.
  // Input: (passthru, vector_in, vector_in/scalar_in, frm, vl)
  class RISCVBinaryAAXUnMaskedRoundingMode
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
                     llvm_anyint_ty, LLVMMatchType<2>],
                    [ImmArg<ArgIndex<3>>, IntrNoMem]>, RISCVVIntrinsic {
    let ScalarOperand = 2;
    let VLOperand = 4;
  }
  // For destination vector type is the same as first source vector (with mask).
  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, frm, vl, policy)
  class RISCVBinaryAAXMaskedRoundingMode
       : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
                    LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
                    LLVMMatchType<2>, LLVMMatchType<2>],
                   [ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<6>>, IntrNoMem]>, RISCVVIntrinsic {
    let ScalarOperand = 2;
    let VLOperand = 5;
  }
  // For destination vector type is the same as first source vector. The
  // second source operand must match the destination type or be an XLen scalar.
  // Input: (passthru, vector_in, vector_in/scalar_in, vl)
  class RISCVBinaryAAShiftUnMasked
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
                     llvm_anyint_ty],
                    [IntrNoMem]>, RISCVVIntrinsic {
    let VLOperand = 3;
  }
  // For destination vector type is the same as first source vector (with mask).
  // The second source operand must match the destination type or be an XLen scalar.
  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
  class RISCVBinaryAAShiftMasked
       : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
                    LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
                    LLVMMatchType<2>],
                   [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
    let VLOperand = 4;
  }
  // For destination vector type is NOT the same as first source vector.
  // Input: (passthru, vector_in, vector_in/scalar_in, vl)
  class RISCVBinaryABXUnMasked
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
                     llvm_anyint_ty],
                    [IntrNoMem]>, RISCVVIntrinsic {
    let ScalarOperand = 2;
    let VLOperand = 3;
  }
  // For destination vector type is NOT the same as first source vector (with mask).
  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
  class RISCVBinaryABXMasked
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
                     LLVMMatchType<3>],
                    [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
    let ScalarOperand = 2;
    let VLOperand = 4;
  }
  // For destination vector type is NOT the same as first source vector.
  // Input: (passthru, vector_in, vector_in/scalar_in, frm, vl)
  class RISCVBinaryABXUnMaskedRoundingMode
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
                     llvm_anyint_ty, LLVMMatchType<3>],
                    [ImmArg<ArgIndex<3>>, IntrNoMem]>, RISCVVIntrinsic {
    let ScalarOperand = 2;
    let VLOperand = 4;
  }
  // For destination vector type is NOT the same as first source vector (with mask).
  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, frm, vl, policy)
  class RISCVBinaryABXMaskedRoundingMode
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
                     LLVMMatchType<3>, LLVMMatchType<3>],
                    [ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<6>>, IntrNoMem]>, RISCVVIntrinsic {
    let ScalarOperand = 2;
    let VLOperand = 5;
  }
  // For destination vector type is NOT the same as first source vector. The
  // second source operand must match the destination type or be an XLen scalar.
  // Input: (passthru, vector_in, vector_in/scalar_in, vl)
  class RISCVBinaryABShiftUnMasked
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
                     llvm_anyint_ty],
                    [IntrNoMem]>, RISCVVIntrinsic {
    let VLOperand = 3;
  }
  // For destination vector type is NOT the same as first source vector (with mask).
  // The second source operand must match the destination type or be an XLen scalar.
  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
  class RISCVBinaryABShiftMasked
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
                     LLVMMatchType<3>],
                    [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
    let VLOperand = 4;
  }
  // For binary operations with V0 as input.
  // Input: (passthru, vector_in, vector_in/scalar_in, V0, vl)
  class RISCVBinaryWithV0
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                     llvm_anyint_ty],
                    [IntrNoMem]>, RISCVVIntrinsic {
    let ScalarOperand = 2;
    let VLOperand = 4;
  }
  // For binary operations with mask type output and V0 as input.
  // Output: (mask type output)
  // Input: (vector_in, vector_in/scalar_in, V0, vl)
  class RISCVBinaryMOutWithV0
        :DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
                   [llvm_anyvector_ty, llvm_any_ty,
                    LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                    llvm_anyint_ty],
                   [IntrNoMem]>, RISCVVIntrinsic {
    let ScalarOperand = 1;
    let VLOperand = 3;
  }
  // For binary operations with mask type output.
  // Output: (mask type output)
  // Input: (vector_in, vector_in/scalar_in, vl)
  class RISCVBinaryMOut
        : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
                    [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
                    [IntrNoMem]>, RISCVVIntrinsic {
    let ScalarOperand = 1;
    let VLOperand = 2;
  }
  // For binary operations with mask type output without mask.
  // Output: (mask type output)
  // Input: (vector_in, vector_in/scalar_in, vl)
  class RISCVCompareUnMasked
        : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
                    [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
                    [IntrNoMem]>, RISCVVIntrinsic {
    let ScalarOperand = 1;
    let VLOperand = 2;
  }
  // For binary operations with mask type output with mask.
  // Output: (mask type output)
  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl)
  class RISCVCompareMasked
        : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
                    [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                     llvm_anyvector_ty, llvm_any_ty,
                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
                    [IntrNoMem]>, RISCVVIntrinsic {
    let ScalarOperand = 2;
    let VLOperand = 4;
  }
  // For FP classify operations.
  // Output: (bit mask type output)
  // Input: (passthru, vector_in, vl)
  class RISCVClassifyUnMasked
        : DefaultAttrsIntrinsic<[LLVMVectorOfBitcastsToInt<0>],
                    [LLVMVectorOfBitcastsToInt<0>, llvm_anyvector_ty,
                      llvm_anyint_ty],
                    [IntrNoMem]>, RISCVVIntrinsic {
    let VLOperand = 1;
  }
  // For FP classify operations with mask.
  // Output: (bit mask type output)
  // Input: (maskedoff, vector_in, mask, vl, policy)
  class RISCVClassifyMasked
        : DefaultAttrsIntrinsic<[LLVMVectorOfBitcastsToInt<0>],
                    [LLVMVectorOfBitcastsToInt<0>, llvm_anyvector_ty,
                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, 
                     llvm_anyint_ty, LLVMMatchType<1>],
                    [IntrNoMem, ImmArg<ArgIndex<4>>]>, RISCVVIntrinsic {
    let VLOperand = 3;
  }
  // For Saturating binary operations.
  // The destination vector type is the same as first source vector.
  // Input: (passthru, vector_in, vector_in/scalar_in, vl)
  class RISCVSaturatingBinaryAAXUnMasked
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
                     llvm_anyint_ty],
                    [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
    let ScalarOperand = 2;
    let VLOperand = 3;
  }
  // For Saturating binary operations with rounding-mode operand
  // The destination vector type is the same as first source vector.
  // Input: (passthru, vector_in, vector_in/scalar_in, vxrm, vl)
  class RISCVSaturatingBinaryAAXUnMaskedRoundingMode
        : Intrinsic<[llvm_anyvector_ty],
                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
                     llvm_anyint_ty, LLVMMatchType<2>],
                    [ImmArg<ArgIndex<3>>, IntrNoMem]>, RISCVVIntrinsic {
    let ScalarOperand = 2;
    let VLOperand = 4;
  }
  // For Saturating binary operations with mask.
  // The destination vector type is the same as first source vector.
  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
  class RISCVSaturatingBinaryAAXMasked
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
                     LLVMMatchType<2>],
                    [ImmArg<ArgIndex<5>>, IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
    let ScalarOperand = 2;
    let VLOperand = 4;
  }
  // For Saturating binary operations with mask and rounding-mode operand
  // The destination vector type is the same as first source vector.
  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vxrm, vl, policy)
  class RISCVSaturatingBinaryAAXMaskedRoundingMode
        : Intrinsic<[llvm_anyvector_ty],
                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
                     LLVMMatchType<2>, LLVMMatchType<2>],
                    [ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<6>>, IntrNoMem]>, RISCVVIntrinsic {
    let ScalarOperand = 2;
    let VLOperand = 5;
  }
  // For Saturating binary operations.
  // The destination vector type is the same as first source vector.
  // The second source operand matches the destination type or is an XLen scalar.
  // Input: (passthru, vector_in, vector_in/scalar_in, vl)
  class RISCVSaturatingBinaryAAShiftUnMasked
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
                     llvm_anyint_ty],
                    [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
    let VLOperand = 3;
  }
  // For Saturating binary operations with mask.
  // The destination vector type is the same as first source vector.
  // The second source operand matches the destination type or is an XLen scalar.
  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
  class RISCVSaturatingBinaryAAShiftMasked
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
                     LLVMMatchType<2>],
                    [ImmArg<ArgIndex<5>>, IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
    let VLOperand = 4;
  }
  // For Saturating binary operations.
  // The destination vector type is the same as first source vector.
  // The second source operand matches the destination type or is an XLen scalar.
  // Input: (passthru, vector_in, vector_in/scalar_in, vxrm, vl)
  class RISCVSaturatingBinaryAAShiftUnMaskedRoundingMode
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
                     llvm_anyint_ty, LLVMMatchType<2>],
                    [ImmArg<ArgIndex<3>>, IntrNoMem, IntrHasSideEffects]>,
                    RISCVVIntrinsic {
    let VLOperand = 4;
  }
  // For Saturating binary operations with mask.
  // The destination vector type is the same as first source vector.
  // The second source operand matches the destination type or is an XLen scalar.
  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vxrm, vl, policy)
  class RISCVSaturatingBinaryAAShiftMaskedRoundingMode
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty,
                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
                     LLVMMatchType<2>, LLVMMatchType<2>],
                    [ImmArg<ArgIndex<4>>,ImmArg<ArgIndex<6>>, IntrNoMem, IntrHasSideEffects]>,
                    RISCVVIntrinsic {
    let VLOperand = 6;
  }
  // For Saturating binary operations.
  // The destination vector type is NOT the same as first source vector.
  // The second source operand matches the destination type or is an XLen scalar.
  // Input: (passthru, vector_in, vector_in/scalar_in, vl)
  class RISCVSaturatingBinaryABShiftUnMasked
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
                     llvm_anyint_ty],
                    [IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
    let VLOperand = 3;
  }
  // For Saturating binary operations with mask.
  // The destination vector type is NOT the same as first source vector (with mask).
  // The second source operand matches the destination type or is an XLen scalar.
  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
  class RISCVSaturatingBinaryABShiftMasked
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
                     LLVMMatchType<3>],
                    [ImmArg<ArgIndex<5>>, IntrNoMem, IntrHasSideEffects]>, RISCVVIntrinsic {
    let VLOperand = 4;
  }
  // For Saturating binary operations.
  // The destination vector type is NOT the same as first source vector.
  // The second source operand matches the destination type or is an XLen scalar.
  // Input: (passthru, vector_in, vector_in/scalar_in, vxrm, vl)
  class RISCVSaturatingBinaryABShiftUnMaskedRoundingMode
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
                     llvm_anyint_ty, LLVMMatchType<3>],
                    [ImmArg<ArgIndex<3>>, IntrNoMem, IntrHasSideEffects]>,
                    RISCVVIntrinsic {
    let VLOperand = 4;
  }
  // For Saturating binary operations with mask.
  // The destination vector type is NOT the same as first source vector (with mask).
  // The second source operand matches the destination type or is an XLen scalar.
  // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vxrm, vl, policy)
  class RISCVSaturatingBinaryABShiftMaskedRoundingMode
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
                     LLVMMatchType<3>, LLVMMatchType<3>],
                    [ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<6>>, IntrNoMem,
                     IntrHasSideEffects]>, RISCVVIntrinsic {
    let VLOperand = 5;
  }
  // Input: (vector_in, vector_in, scalar_in, vl, policy)
  class RVVSlideUnMasked
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
                     LLVMMatchType<1>, LLVMMatchType<1>],
                    [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
    let VLOperand = 3;
  }
  // Input: (vector_in, vector_in, vector_in/scalar_in, mask, vl, policy)
  class RVVSlideMasked
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                    [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                     LLVMMatchType<1>, LLVMMatchType<1>],
                    [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
    let VLOperand = 4;
  }
  // UnMasked Vector Multiply-Add operations, its first operand can not be undef.
  // Input: (vector_in, vector_in/scalar, vector_in, vl, policy)
  class RISCVTernaryAAXAUnMasked
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                    [LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>,
                     llvm_anyint_ty, LLVMMatchType<2>],
                    [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
    let ScalarOperand = 1;
    let VLOperand = 3;
  }
  // Masked Vector Multiply-Add operations, its first operand can not be undef.
  // Input: (vector_in, vector_in/scalar, vector_in, mask, vl, policy
  class RISCVTernaryAAXAMasked
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                    [LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>,
                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                     llvm_anyint_ty, LLVMMatchType<2>],
                    [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
    let ScalarOperand = 1;
    let VLOperand = 4;
  }
  // UnMasked Vector Multiply-Add operations, its first operand can not be undef.
  // Input: (vector_in, vector_in/scalar, vector_in, frm, vl, policy)
  class RISCVTernaryAAXAUnMaskedRoundingMode
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                    [LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>,
                     llvm_anyint_ty, LLVMMatchType<2>, LLVMMatchType<2>],
                    [ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<5>>, IntrNoMem]>,
                    RISCVVIntrinsic {
    let ScalarOperand = 1;
    let VLOperand = 4;
  }
  // Masked Vector Multiply-Add operations, its first operand can not be undef.
  // Input: (vector_in, vector_in/scalar, vector_in, mask, frm, vl, policy
  class RISCVTernaryAAXAMaskedRoundingMode
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                    [LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>,
                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                     llvm_anyint_ty, LLVMMatchType<2>, LLVMMatchType<2>],
                    [ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<6>>, IntrNoMem]>,
                    RISCVVIntrinsic {
    let ScalarOperand = 1;
    let VLOperand = 5;
  }
  // UnMasked Widening Vector Multiply-Add operations, its first operand can not be undef.
  // Input: (vector_in, vector_in/scalar, vector_in, vl, policy)
  class RISCVTernaryWideUnMasked
        : DefaultAttrsIntrinsic< [llvm_anyvector_ty],
                     [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty,
                      llvm_anyint_ty, LLVMMatchType<3>],
                     [ImmArg<ArgIndex<4>>, IntrNoMem] >, RISCVVIntrinsic {
    let ScalarOperand = 1;
    let VLOperand = 3;
  }
  // Masked Widening Vector Multiply-Add operations, its first operand can not be undef.
  // Input: (vector_in, vector_in/scalar, vector_in, mask, vl, policy
  class RISCVTernaryWideMasked
        : DefaultAttrsIntrinsic< [llvm_anyvector_ty],
                     [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty,
                      LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                      llvm_anyint_ty, LLVMMatchType<3>],
                     [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
    let ScalarOperand = 1;
    let VLOperand = 4;
  }
  // UnMasked Widening Vector Multiply-Add operations, its first operand can not be undef.
  // Input: (vector_in, vector_in/scalar, vector_in, frm, vl, policy)
  class RISCVTernaryWideUnMaskedRoundingMode
        : DefaultAttrsIntrinsic< [llvm_anyvector_ty],
                     [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty,
                      llvm_anyint_ty, LLVMMatchType<3>, LLVMMatchType<3>],
                     [ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<5>>, IntrNoMem] >,
                     RISCVVIntrinsic {
    let ScalarOperand = 1;
    let VLOperand = 4;
  }
  // Masked Widening Vector Multiply-Add operations, its first operand can not be undef.
  // Input: (vector_in, vector_in/scalar, vector_in, mask, frm, vl, policy
  class RISCVTernaryWideMaskedRoundingMode
        : DefaultAttrsIntrinsic< [llvm_anyvector_ty],
                     [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty,
                      LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                      llvm_anyint_ty, LLVMMatchType<3>, LLVMMatchType<3>],
                     [ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<6>>, IntrNoMem]>,
                     RISCVVIntrinsic {
    let ScalarOperand = 1;
    let VLOperand = 5;
  }
  // For Reduction ternary operations.
  // For destination vector type is the same as first and third source vector.
  // Input: (vector_in, vector_in, vector_in, vl)
  class RISCVReductionUnMasked
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                    [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>,
                     llvm_anyint_ty],
                    [IntrNoMem]>, RISCVVIntrinsic {
    let VLOperand = 3;
  }
  // For Reduction ternary operations with mask.
  // For destination vector type is the same as first and third source vector.
  // The mask type come from second source vector.
  // Input: (maskedoff, vector_in, vector_in, vector_in, mask, vl)
  class RISCVReductionMasked
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                    [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>,
                     LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>, llvm_anyint_ty],
                    [IntrNoMem]>, RISCVVIntrinsic {
    let VLOperand = 4;
  }
  // For Reduction ternary operations.
  // For destination vector type is the same as first and third source vector.
  // Input: (vector_in, vector_in, vector_in, frm, vl)
  class RISCVReductionUnMaskedRoundingMode
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                    [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>,
                     llvm_anyint_ty, LLVMMatchType<2>],
                    [ImmArg<ArgIndex<3>>, IntrNoMem]>, RISCVVIntrinsic {
    let VLOperand = 4;
  }
  // For Reduction ternary operations with mask.
  // For destination vector type is the same as first and third source vector.
  // The mask type come from second source vector.
  // Input: (vector_in, vector_in, vector_in, mask, frm, vl)
  class RISCVReductionMaskedRoundingMode
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                    [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>,
                     LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>, llvm_anyint_ty,
                     LLVMMatchType<2>],
                    [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
    let VLOperand = 5;
  }
  // For unary operations with scalar type output without mask
  // Output: (scalar type)
  // Input: (vector_in, vl)
  class RISCVMaskedUnarySOutUnMasked
        : DefaultAttrsIntrinsic<[LLVMMatchType<1>],
                    [llvm_anyvector_ty, llvm_anyint_ty],
                    [IntrNoMem]>, RISCVVIntrinsic {
    let VLOperand = 1;
  }
  // For unary operations with scalar type output with mask
  // Output: (scalar type)
  // Input: (vector_in, mask, vl)
  class RISCVMaskedUnarySOutMasked
        : DefaultAttrsIntrinsic<[LLVMMatchType<1>],
                    [llvm_anyvector_ty, LLVMMatchType<0>, llvm_anyint_ty],
                    [IntrNoMem]>, RISCVVIntrinsic {
    let VLOperand = 2;
  }
  // For destination vector type is NOT the same as source vector.
  // Input: (passthru, vector_in, vl)
  class RISCVUnaryABUnMasked
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_anyint_ty],
                    [IntrNoMem]>, RISCVVIntrinsic {
    let VLOperand = 2;
  }
  // For destination vector type is NOT the same as source vector (with mask).
  // Input: (maskedoff, vector_in, mask, vl, policy)
  class RISCVUnaryABMasked
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                    [LLVMMatchType<0>, llvm_anyvector_ty,
                     LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>,
                     llvm_anyint_ty, LLVMMatchType<2>],
                    [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
    let VLOperand = 3;
  }
  // For unary operations with the same vector type in/out without mask
  // Output: (vector)
  // Input: (vector_in, vl)
  class RISCVUnaryUnMasked
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                    [LLVMMatchType<0>, llvm_anyint_ty],
                    [IntrNoMem]>, RISCVVIntrinsic {
    let VLOperand = 1;
  }
  // For mask unary operations with mask type in/out with mask
  // Output: (mask type output)
  // Input: (mask type maskedoff, mask type vector_in, mask, vl)
  class RISCVMaskedUnaryMOutMasked
        : DefaultAttrsIntrinsic<[llvm_anyint_ty],
                    [LLVMMatchType<0>, LLVMMatchType<0>,
                     LLVMMatchType<0>, llvm_anyint_ty],
                    [IntrNoMem]>, RISCVVIntrinsic {
    let VLOperand = 3;
  }
  // Output: (vector)
  // Input: (vl)
  class RISCVNullaryIntrinsic
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                    [llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic {
    let VLOperand = 1;
  }
  // Output: (vector)
  // Input: (passthru, vl)
  class RISCVID
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                    [LLVMMatchType<0>, llvm_anyint_ty],
                    [IntrNoMem]>, RISCVVIntrinsic {
    let VLOperand = 1;
  }
  // For Conversion unary operations.
  // Input: (passthru, vector_in, vl)
  class RISCVConversionUnMasked
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_anyint_ty],
                    [IntrNoMem]>, RISCVVIntrinsic {
    let VLOperand = 2;
  }
  // For Conversion unary operations with mask.
  // Input: (maskedoff, vector_in, mask, vl, policy)
  class RISCVConversionMasked
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                    [LLVMMatchType<0>, llvm_anyvector_ty,
                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
                     LLVMMatchType<2>],
                    [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
    let VLOperand = 3;
  }
  // For Conversion unary operations.
  // Input: (passthru, vector_in, frm, vl)
  class RISCVConversionUnMaskedRoundingMode
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                    [LLVMMatchType<0>, llvm_anyvector_ty, llvm_anyint_ty,
                     LLVMMatchType<2>],
                    [ImmArg<ArgIndex<2>>, IntrNoMem]>, RISCVVIntrinsic {
    let VLOperand = 3;
  }
  // For Conversion unary operations with mask.
  // Input: (maskedoff, vector_in, mask, frm, vl, policy)
  class RISCVConversionMaskedRoundingMode
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                    [LLVMMatchType<0>, llvm_anyvector_ty,
                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
                     LLVMMatchType<2>, LLVMMatchType<2>],
                    [ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
    let VLOperand = 4;
  }

  // For unit stride segment load
  // Input: (passthru, pointer, vl)
  class RISCVUSSegLoad<int nf>
        : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
                                !add(nf, -1))),
                    !listconcat(!listsplat(LLVMMatchType<0>, nf),
                                [llvm_ptr_ty, llvm_anyint_ty]),
                    [NoCapture<ArgIndex<nf>>, IntrReadMem]>, RISCVVIntrinsic {
    let VLOperand = !add(nf, 1);
  }
  // For unit stride segment load with mask
  // Input: (maskedoff, pointer, mask, vl, policy)
  class RISCVUSSegLoadMasked<int nf>
        : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
                                !add(nf, -1))),
                    !listconcat(!listsplat(LLVMMatchType<0>, nf),
                                [llvm_ptr_ty,
                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                                 llvm_anyint_ty, LLVMMatchType<1>]),
                    [ImmArg<ArgIndex<!add(nf, 3)>>, NoCapture<ArgIndex<nf>>, IntrReadMem]>,
                    RISCVVIntrinsic {
    let VLOperand = !add(nf, 2);
  }

  // For unit stride fault-only-first segment load
  // Input: (passthru, pointer, vl)
  // Output: (data, vl)
  // NOTE: We model this with default memory properties since we model writing
  // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
  class RISCVUSSegLoadFF<int nf>
        : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
                                !add(nf, -1)), [llvm_anyint_ty]),
                    !listconcat(!listsplat(LLVMMatchType<0>, nf),
                    [llvm_ptr_ty, LLVMMatchType<1>]),
                    [NoCapture<ArgIndex<nf>>]>, RISCVVIntrinsic {
    let VLOperand = !add(nf, 1);
  }
  // For unit stride fault-only-first segment load with mask
  // Input: (maskedoff, pointer, mask, vl, policy)
  // Output: (data, vl)
  // NOTE: We model this with default memory properties since we model writing
  // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work.
  class RISCVUSSegLoadFFMasked<int nf>
        : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
                                !add(nf, -1)), [llvm_anyint_ty]),
                    !listconcat(!listsplat(LLVMMatchType<0>, nf),
                     [llvm_ptr_ty,
                      LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                      LLVMMatchType<1>, LLVMMatchType<1>]),
                    [ImmArg<ArgIndex<!add(nf, 3)>>, NoCapture<ArgIndex<nf>>]>,
                    RISCVVIntrinsic {
    let VLOperand = !add(nf, 2);
  }

  // For stride segment load
  // Input: (passthru, pointer, offset, vl)
  class RISCVSSegLoad<int nf>
        : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
                                !add(nf, -1))),
                    !listconcat(!listsplat(LLVMMatchType<0>, nf),
                    [llvm_ptr_ty, llvm_anyint_ty, LLVMMatchType<1>]),
                    [NoCapture<ArgIndex<nf>>, IntrReadMem]>, RISCVVIntrinsic {
    let VLOperand = !add(nf, 2);
  }
  // For stride segment load with mask
  // Input: (maskedoff, pointer, offset, mask, vl, policy)
  class RISCVSSegLoadMasked<int nf>
        : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
                                !add(nf, -1))),
                    !listconcat(!listsplat(LLVMMatchType<0>, nf),
                                [llvm_ptr_ty,
                                 llvm_anyint_ty,
                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                                 LLVMMatchType<1>, LLVMMatchType<1>]),
                    [ImmArg<ArgIndex<!add(nf, 4)>>, NoCapture<ArgIndex<nf>>, IntrReadMem]>,
                    RISCVVIntrinsic {
    let VLOperand = !add(nf, 3);
  }

  // For indexed segment load
  // Input: (passthru, pointer, index, vl)
  class RISCVISegLoad<int nf>
        : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
                                !add(nf, -1))),
                    !listconcat(!listsplat(LLVMMatchType<0>, nf),
                    [llvm_ptr_ty, llvm_anyvector_ty, llvm_anyint_ty]),
                    [NoCapture<ArgIndex<nf>>, IntrReadMem]>, RISCVVIntrinsic {
    let VLOperand = !add(nf, 2);
  }
  // For indexed segment load with mask
  // Input: (maskedoff, pointer, index, mask, vl, policy)
  class RISCVISegLoadMasked<int nf>
        : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
                                !add(nf, -1))),
                    !listconcat(!listsplat(LLVMMatchType<0>, nf),
                                [llvm_ptr_ty,
                                 llvm_anyvector_ty,
                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                                 llvm_anyint_ty, LLVMMatchType<2>]),
                    [ImmArg<ArgIndex<!add(nf, 4)>>, NoCapture<ArgIndex<nf>>, IntrReadMem]>,
                    RISCVVIntrinsic {
    let VLOperand = !add(nf, 3);
  }

  // For unit stride segment store
  // Input: (value, pointer, vl)
  class RISCVUSSegStore<int nf>
        : DefaultAttrsIntrinsic<[],
                    !listconcat([llvm_anyvector_ty],
                                !listsplat(LLVMMatchType<0>, !add(nf, -1)),
                                [llvm_ptr_ty, llvm_anyint_ty]),
                    [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
    let VLOperand = !add(nf, 1);
  }
  // For unit stride segment store with mask
  // Input: (value, pointer, mask, vl)
  class RISCVUSSegStoreMasked<int nf>
        : DefaultAttrsIntrinsic<[],
                    !listconcat([llvm_anyvector_ty],
                                !listsplat(LLVMMatchType<0>, !add(nf, -1)),
                                [llvm_ptr_ty,
                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                                 llvm_anyint_ty]),
                    [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
    let VLOperand = !add(nf, 2);
  }

  // For stride segment store
  // Input: (value, pointer, offset, vl)
  class RISCVSSegStore<int nf>
        : DefaultAttrsIntrinsic<[],
                    !listconcat([llvm_anyvector_ty],
                                !listsplat(LLVMMatchType<0>, !add(nf, -1)),
                                [llvm_ptr_ty, llvm_anyint_ty,
                                 LLVMMatchType<1>]),
                    [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
    let VLOperand = !add(nf, 2);
  }
  // For stride segment store with mask
  // Input: (value, pointer, offset, mask, vl)
  class RISCVSSegStoreMasked<int nf>
        : DefaultAttrsIntrinsic<[],
                    !listconcat([llvm_anyvector_ty],
                                !listsplat(LLVMMatchType<0>, !add(nf, -1)),
                                [llvm_ptr_ty, llvm_anyint_ty,
                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                                 LLVMMatchType<1>]),
                    [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
    let VLOperand = !add(nf, 3);
  }

  // For indexed segment store
  // Input: (value, pointer, offset, vl)
  class RISCVISegStore<int nf>
        : DefaultAttrsIntrinsic<[],
                    !listconcat([llvm_anyvector_ty],
                                !listsplat(LLVMMatchType<0>, !add(nf, -1)),
                                [llvm_ptr_ty, llvm_anyvector_ty,
                                 llvm_anyint_ty]),
                    [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
    let VLOperand = !add(nf, 2);
  }
  // For indexed segment store with mask
  // Input: (value, pointer, offset, mask, vl)
  class RISCVISegStoreMasked<int nf>
        : DefaultAttrsIntrinsic<[],
                    !listconcat([llvm_anyvector_ty],
                                !listsplat(LLVMMatchType<0>, !add(nf, -1)),
                                [llvm_ptr_ty, llvm_anyvector_ty,
                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                                 llvm_anyint_ty]),
                    [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic {
    let VLOperand = !add(nf, 3);
  }

  multiclass RISCVUSLoad {
    def "int_riscv_" # NAME : RISCVUSLoad;
    def "int_riscv_" # NAME # "_mask" : RISCVUSLoadMasked;
  }
  multiclass RISCVUSLoadFF {
    def "int_riscv_" # NAME : RISCVUSLoadFF;
    def "int_riscv_" # NAME # "_mask" : RISCVUSLoadFFMasked;
  }
  multiclass RISCVSLoad {
    def "int_riscv_" # NAME : RISCVSLoad;
    def "int_riscv_" # NAME # "_mask" : RISCVSLoadMasked;
  }
  multiclass RISCVILoad {
    def "int_riscv_" # NAME : RISCVILoad;
    def "int_riscv_" # NAME # "_mask" : RISCVILoadMasked;
  }
  multiclass RISCVUSStore {
    def "int_riscv_" # NAME : RISCVUSStore;
    def "int_riscv_" # NAME # "_mask" : RISCVUSStoreMasked;
  }
  multiclass RISCVSStore {
    def "int_riscv_" # NAME : RISCVSStore;
    def "int_riscv_" # NAME # "_mask" : RISCVSStoreMasked;
  }

  multiclass RISCVIStore {
    def "int_riscv_" # NAME : RISCVIStore;
    def "int_riscv_" # NAME # "_mask" : RISCVIStoreMasked;
  }
  multiclass RISCVUnaryAA {
    def "int_riscv_" # NAME : RISCVUnaryAAUnMasked;
    def "int_riscv_" # NAME # "_mask" : RISCVUnaryAAMasked;
  }
  multiclass RISCVUnaryAARoundingMode {
    def "int_riscv_" # NAME : RISCVUnaryAAUnMaskedRoundingMode;
    def "int_riscv_" # NAME # "_mask" : RISCVUnaryAAMaskedRoundingMode;
  }
  multiclass RISCVUnaryAB {
    def "int_riscv_" # NAME : RISCVUnaryABUnMasked;
    def "int_riscv_" # NAME # "_mask" : RISCVUnaryABMasked;
  }
  // AAX means the destination type(A) is the same as the first source
  // type(A). X means any type for the second source operand.
  multiclass RISCVBinaryAAX {
    def "int_riscv_" # NAME : RISCVBinaryAAXUnMasked;
    def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAXMasked;
  }
  multiclass RISCVBinaryAAXRoundingMode {
    def "int_riscv_" # NAME : RISCVBinaryAAXUnMaskedRoundingMode;
    def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAXMaskedRoundingMode;
  }
  // Like RISCVBinaryAAX, but the second operand is used a shift amount so it
  // must be a vector or an XLen scalar.
  multiclass RISCVBinaryAAShift {
    def "int_riscv_" # NAME : RISCVBinaryAAShiftUnMasked;
    def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAShiftMasked;
  }
  multiclass RISCVRGatherVV {
    def "int_riscv_" # NAME : RISCVRGatherVVUnMasked;
    def "int_riscv_" # NAME # "_mask" : RISCVRGatherVVMasked;
  }
  multiclass RISCVRGatherVX {
    def "int_riscv_" # NAME : RISCVGatherVXUnMasked;
    def "int_riscv_" # NAME # "_mask" : RISCVGatherVXMasked;
  }
  multiclass RISCVRGatherEI16VV {
    def "int_riscv_" # NAME : RISCVRGatherEI16VVUnMasked;
    def "int_riscv_" # NAME # "_mask" : RISCVRGatherEI16VVMasked;
  }
  // ABX means the destination type(A) is different from the first source
  // type(B). X means any type for the second source operand.
  multiclass RISCVBinaryABX {
    def "int_riscv_" # NAME : RISCVBinaryABXUnMasked;
    def "int_riscv_" # NAME # "_mask" : RISCVBinaryABXMasked;
  }
  multiclass RISCVBinaryABXRoundingMode {
    def "int_riscv_" # NAME : RISCVBinaryABXUnMaskedRoundingMode;
    def "int_riscv_" # NAME # "_mask" : RISCVBinaryABXMaskedRoundingMode;
  }
  // Like RISCVBinaryABX, but the second operand is used a shift amount so it
  // must be a vector or an XLen scalar.
  multiclass RISCVBinaryABShift {
    def "int_riscv_" # NAME : RISCVBinaryABShiftUnMasked;
    def "int_riscv_" # NAME # "_mask" : RISCVBinaryABShiftMasked;
  }
  multiclass RISCVBinaryWithV0 {
    def "int_riscv_" # NAME : RISCVBinaryWithV0;
  }
  multiclass RISCVBinaryMaskOutWithV0 {
    def "int_riscv_" # NAME : RISCVBinaryMOutWithV0;
  }
  multiclass RISCVBinaryMaskOut {
    def "int_riscv_" # NAME : RISCVBinaryMOut;
  }
  multiclass RISCVSaturatingBinaryAAX {
    def "int_riscv_" # NAME : RISCVSaturatingBinaryAAXUnMasked;
    def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAXMasked;
  }
  multiclass RISCVSaturatingBinaryAAXRoundingMode {
    def "int_riscv_" # NAME : RISCVSaturatingBinaryAAXUnMaskedRoundingMode;
    def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAXMaskedRoundingMode;
  }
  multiclass RISCVSaturatingBinaryAAShiftRoundingMode {
    def "int_riscv_" # NAME : RISCVSaturatingBinaryAAShiftUnMaskedRoundingMode;
    def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryAAShiftMaskedRoundingMode;
  }
  multiclass RISCVSaturatingBinaryABShiftRoundingMode {
    def "int_riscv_" # NAME : RISCVSaturatingBinaryABShiftUnMaskedRoundingMode;
    def "int_riscv_" # NAME # "_mask" : RISCVSaturatingBinaryABShiftMaskedRoundingMode;
  }
  multiclass RVVSlide {
    def "int_riscv_" # NAME : RVVSlideUnMasked;
    def "int_riscv_" # NAME # "_mask" : RVVSlideMasked;
  }
  multiclass RISCVTernaryAAXA {
    def "int_riscv_" # NAME : RISCVTernaryAAXAUnMasked;
    def "int_riscv_" # NAME # "_mask" : RISCVTernaryAAXAMasked;
  }
  multiclass RISCVTernaryAAXARoundingMode {
    def "int_riscv_" # NAME : RISCVTernaryAAXAUnMaskedRoundingMode;
    def "int_riscv_" # NAME # "_mask" : RISCVTernaryAAXAMaskedRoundingMode;
  }
  multiclass RISCVCompare {
    def "int_riscv_" # NAME : RISCVCompareUnMasked;
    def "int_riscv_" # NAME # "_mask" : RISCVCompareMasked;
  }
  multiclass RISCVClassify {
    def "int_riscv_" # NAME : RISCVClassifyUnMasked;
    def "int_riscv_" # NAME # "_mask" : RISCVClassifyMasked;
  }
  multiclass RISCVTernaryWide {
    def "int_riscv_" # NAME : RISCVTernaryWideUnMasked;
    def "int_riscv_" # NAME # "_mask" : RISCVTernaryWideMasked;
  }
  multiclass RISCVTernaryWideRoundingMode {
    def "int_riscv_" # NAME : RISCVTernaryWideUnMaskedRoundingMode;
    def "int_riscv_" # NAME # "_mask" : RISCVTernaryWideMaskedRoundingMode;
  }
  multiclass RISCVReduction {
    def "int_riscv_" # NAME : RISCVReductionUnMasked;
    def "int_riscv_" # NAME # "_mask" : RISCVReductionMasked;
  }
  multiclass RISCVReductionRoundingMode {
    def "int_riscv_" # NAME : RISCVReductionUnMaskedRoundingMode;
    def "int_riscv_" # NAME # "_mask" : RISCVReductionMaskedRoundingMode;
  }
  multiclass RISCVMaskedUnarySOut {
    def "int_riscv_" # NAME : RISCVMaskedUnarySOutUnMasked;
    def "int_riscv_" # NAME # "_mask" : RISCVMaskedUnarySOutMasked;
  }
  multiclass RISCVMaskedUnaryMOut {
    def "int_riscv_" # NAME : RISCVUnaryUnMasked;
    def "int_riscv_" # NAME # "_mask" : RISCVMaskedUnaryMOutMasked;
  }
  multiclass RISCVConversion {
    def "int_riscv_" #NAME :RISCVConversionUnMasked;
    def "int_riscv_" # NAME # "_mask" : RISCVConversionMasked;
  }
  multiclass RISCVConversionRoundingMode {
    def "int_riscv_" #NAME :RISCVConversionUnMaskedRoundingMode;
    def "int_riscv_" # NAME # "_mask" : RISCVConversionMaskedRoundingMode;
  }
  multiclass RISCVUSSegLoad<int nf> {
    def "int_riscv_" # NAME : RISCVUSSegLoad<nf>;
    def "int_riscv_" # NAME # "_mask" : RISCVUSSegLoadMasked<nf>;
  }
  multiclass RISCVUSSegLoadFF<int nf> {
    def "int_riscv_" # NAME : RISCVUSSegLoadFF<nf>;
    def "int_riscv_" # NAME # "_mask" : RISCVUSSegLoadFFMasked<nf>;
  }
  multiclass RISCVSSegLoad<int nf> {
    def "int_riscv_" # NAME : RISCVSSegLoad<nf>;
    def "int_riscv_" # NAME # "_mask" : RISCVSSegLoadMasked<nf>;
  }
  multiclass RISCVISegLoad<int nf> {
    def "int_riscv_" # NAME : RISCVISegLoad<nf>;
    def "int_riscv_" # NAME # "_mask" : RISCVISegLoadMasked<nf>;
  }
  multiclass RISCVUSSegStore<int nf> {
    def "int_riscv_" # NAME : RISCVUSSegStore<nf>;
    def "int_riscv_" # NAME # "_mask" : RISCVUSSegStoreMasked<nf>;
  }
  multiclass RISCVSSegStore<int nf> {
    def "int_riscv_" # NAME : RISCVSSegStore<nf>;
    def "int_riscv_" # NAME # "_mask" : RISCVSSegStoreMasked<nf>;
  }
  multiclass RISCVISegStore<int nf> {
    def "int_riscv_" # NAME : RISCVISegStore<nf>;
    def "int_riscv_" # NAME # "_mask" : RISCVISegStoreMasked<nf>;
  }

  defm vle : RISCVUSLoad;
  defm vleff : RISCVUSLoadFF;
  defm vse : RISCVUSStore;
  defm vlse: RISCVSLoad;
  defm vsse: RISCVSStore;
  defm vluxei : RISCVILoad;
  defm vloxei : RISCVILoad;
  defm vsoxei : RISCVIStore;
  defm vsuxei : RISCVIStore;

  def int_riscv_vlm : RISCVUSMLoad;
  def int_riscv_vsm : RISCVUSStore;

  defm vadd : RISCVBinaryAAX;
  defm vsub : RISCVBinaryAAX;
  defm vrsub : RISCVBinaryAAX;

  defm vwaddu : RISCVBinaryABX;
  defm vwadd : RISCVBinaryABX;
  defm vwaddu_w : RISCVBinaryAAX;
  defm vwadd_w : RISCVBinaryAAX;
  defm vwsubu : RISCVBinaryABX;
  defm vwsub : RISCVBinaryABX;
  defm vwsubu_w : RISCVBinaryAAX;
  defm vwsub_w : RISCVBinaryAAX;

  defm vzext : RISCVUnaryAB;
  defm vsext : RISCVUnaryAB;

  defm vadc : RISCVBinaryWithV0;
  defm vmadc_carry_in : RISCVBinaryMaskOutWithV0;
  defm vmadc : RISCVBinaryMaskOut;

  defm vsbc : RISCVBinaryWithV0;
  defm vmsbc_borrow_in : RISCVBinaryMaskOutWithV0;
  defm vmsbc : RISCVBinaryMaskOut;

  defm vand : RISCVBinaryAAX;
  defm vor : RISCVBinaryAAX;
  defm vxor : RISCVBinaryAAX;

  defm vsll : RISCVBinaryAAShift;
  defm vsrl : RISCVBinaryAAShift;
  defm vsra : RISCVBinaryAAShift;

  defm vnsrl : RISCVBinaryABShift;
  defm vnsra : RISCVBinaryABShift;

  defm vmseq : RISCVCompare;
  defm vmsne : RISCVCompare;
  defm vmsltu : RISCVCompare;
  defm vmslt : RISCVCompare;
  defm vmsleu : RISCVCompare;
  defm vmsle : RISCVCompare;
  defm vmsgtu : RISCVCompare;
  defm vmsgt : RISCVCompare;
  defm vmsgeu : RISCVCompare;
  defm vmsge : RISCVCompare;

  defm vminu : RISCVBinaryAAX;
  defm vmin : RISCVBinaryAAX;
  defm vmaxu : RISCVBinaryAAX;
  defm vmax : RISCVBinaryAAX;

  defm vmul : RISCVBinaryAAX;
  defm vmulh : RISCVBinaryAAX;
  defm vmulhu : RISCVBinaryAAX;
  defm vmulhsu : RISCVBinaryAAX;

  defm vdivu : RISCVBinaryAAX;
  defm vdiv : RISCVBinaryAAX;
  defm vremu : RISCVBinaryAAX;
  defm vrem : RISCVBinaryAAX;

  defm vwmul : RISCVBinaryABX;
  defm vwmulu : RISCVBinaryABX;
  defm vwmulsu : RISCVBinaryABX;

  defm vmacc : RISCVTernaryAAXA;
  defm vnmsac : RISCVTernaryAAXA;
  defm vmadd : RISCVTernaryAAXA;
  defm vnmsub : RISCVTernaryAAXA;

  defm vwmaccu  : RISCVTernaryWide;
  defm vwmacc   : RISCVTernaryWide;
  defm vwmaccus : RISCVTernaryWide;
  defm vwmaccsu : RISCVTernaryWide;

  defm vfadd : RISCVBinaryAAXRoundingMode;
  defm vfsub : RISCVBinaryAAXRoundingMode;
  defm vfrsub : RISCVBinaryAAXRoundingMode;

  defm vfwadd : RISCVBinaryABXRoundingMode;
  defm vfwsub : RISCVBinaryABXRoundingMode;
  defm vfwadd_w : RISCVBinaryAAXRoundingMode;
  defm vfwsub_w : RISCVBinaryAAXRoundingMode;

  defm vsaddu : RISCVSaturatingBinaryAAX;
  defm vsadd : RISCVSaturatingBinaryAAX;
  defm vssubu : RISCVSaturatingBinaryAAX;
  defm vssub : RISCVSaturatingBinaryAAX;

  defm vmerge : RISCVBinaryWithV0;

  // Output: (vector)
  // Input: (passthru, vector_in, vl)
  def int_riscv_vmv_v_v : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                                                [LLVMMatchType<0>,
                                                 LLVMMatchType<0>,
                                                 llvm_anyint_ty],
                                                [IntrNoMem]>, RISCVVIntrinsic {
    let VLOperand = 2;
  }
  // Output: (vector)
  // Input: (passthru, scalar, vl)
  def int_riscv_vmv_v_x : DefaultAttrsIntrinsic<[llvm_anyint_ty],
                                                 [LLVMMatchType<0>,
                                                  LLVMVectorElementType<0>,
                                                  llvm_anyint_ty],
                                                 [IntrNoMem]>, RISCVVIntrinsic {
    let VLOperand = 2;
  }
  // Output: (vector)
  // Input: (passthru, scalar, vl)
  def int_riscv_vfmv_v_f : DefaultAttrsIntrinsic<[llvm_anyfloat_ty],
                                                 [LLVMMatchType<0>,
                                                  LLVMVectorElementType<0>,
                                                  llvm_anyint_ty],
                                                 [IntrNoMem]>, RISCVVIntrinsic {
    let VLOperand = 2;
  }

  def int_riscv_vmv_x_s : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>],
                                                [llvm_anyint_ty],
                                                [IntrNoMem]>, RISCVVIntrinsic;
  def int_riscv_vmv_s_x : DefaultAttrsIntrinsic<[llvm_anyint_ty],
                                                [LLVMMatchType<0>,
                                                 LLVMVectorElementType<0>,
                                                 llvm_anyint_ty],
                                                [IntrNoMem]>, RISCVVIntrinsic {
    let VLOperand = 2;
  }

  def int_riscv_vfmv_f_s : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>],
                                                 [llvm_anyfloat_ty],
                                                 [IntrNoMem]>, RISCVVIntrinsic;
  def int_riscv_vfmv_s_f : DefaultAttrsIntrinsic<[llvm_anyfloat_ty],
                                                 [LLVMMatchType<0>,
                                                  LLVMVectorElementType<0>,
                                                  llvm_anyint_ty],
                                                 [IntrNoMem]>, RISCVVIntrinsic {
    let VLOperand = 2;
  }

  defm vfmul : RISCVBinaryAAXRoundingMode;
  defm vfdiv : RISCVBinaryAAXRoundingMode;
  defm vfrdiv : RISCVBinaryAAXRoundingMode;

  defm vfwmul : RISCVBinaryABXRoundingMode;

  defm vfmacc : RISCVTernaryAAXARoundingMode;
  defm vfnmacc : RISCVTernaryAAXARoundingMode;
  defm vfmsac : RISCVTernaryAAXARoundingMode;
  defm vfnmsac : RISCVTernaryAAXARoundingMode;
  defm vfmadd : RISCVTernaryAAXARoundingMode;
  defm vfnmadd : RISCVTernaryAAXARoundingMode;
  defm vfmsub : RISCVTernaryAAXARoundingMode;
  defm vfnmsub : RISCVTernaryAAXARoundingMode;

  defm vfwmacc : RISCVTernaryWideRoundingMode;
  defm vfwnmacc : RISCVTernaryWideRoundingMode;
  defm vfwmsac : RISCVTernaryWideRoundingMode;
  defm vfwnmsac : RISCVTernaryWideRoundingMode;

  defm vfsqrt : RISCVUnaryAARoundingMode;
  defm vfrsqrt7 : RISCVUnaryAA;
  defm vfrec7 : RISCVUnaryAARoundingMode;

  defm vfmin : RISCVBinaryAAX;
  defm vfmax : RISCVBinaryAAX;

  defm vfsgnj : RISCVBinaryAAX;
  defm vfsgnjn : RISCVBinaryAAX;
  defm vfsgnjx : RISCVBinaryAAX;

  defm vfclass : RISCVClassify;

  defm vfmerge : RISCVBinaryWithV0;

  defm vslideup : RVVSlide;
  defm vslidedown : RVVSlide;

  defm vslide1up : RISCVBinaryAAX;
  defm vslide1down : RISCVBinaryAAX;
  defm vfslide1up : RISCVBinaryAAX;
  defm vfslide1down : RISCVBinaryAAX;

  defm vrgather_vv : RISCVRGatherVV;
  defm vrgather_vx : RISCVRGatherVX;
  defm vrgatherei16_vv : RISCVRGatherEI16VV;

  def "int_riscv_vcompress" : RISCVCompress;

  defm vaaddu : RISCVSaturatingBinaryAAXRoundingMode;
  defm vaadd : RISCVSaturatingBinaryAAXRoundingMode;
  defm vasubu : RISCVSaturatingBinaryAAXRoundingMode;
  defm vasub : RISCVSaturatingBinaryAAXRoundingMode;

  defm vsmul : RISCVSaturatingBinaryAAXRoundingMode;

  defm vssrl : RISCVSaturatingBinaryAAShiftRoundingMode;
  defm vssra : RISCVSaturatingBinaryAAShiftRoundingMode;

  defm vnclipu : RISCVSaturatingBinaryABShiftRoundingMode;
  defm vnclip : RISCVSaturatingBinaryABShiftRoundingMode;

  defm vmfeq : RISCVCompare;
  defm vmfne : RISCVCompare;
  defm vmflt : RISCVCompare;
  defm vmfle : RISCVCompare;
  defm vmfgt : RISCVCompare;
  defm vmfge : RISCVCompare;

  defm vredsum : RISCVReduction;
  defm vredand : RISCVReduction;
  defm vredor : RISCVReduction;
  defm vredxor : RISCVReduction;
  defm vredminu : RISCVReduction;
  defm vredmin : RISCVReduction;
  defm vredmaxu : RISCVReduction;
  defm vredmax : RISCVReduction;

  defm vwredsumu : RISCVReduction;
  defm vwredsum : RISCVReduction;

  defm vfredosum : RISCVReductionRoundingMode;
  defm vfredusum : RISCVReductionRoundingMode;
  defm vfredmin : RISCVReduction;
  defm vfredmax : RISCVReduction;

  defm vfwredusum : RISCVReductionRoundingMode;
  defm vfwredosum : RISCVReductionRoundingMode;

  def int_riscv_vmand: RISCVBinaryAAAUnMasked;
  def int_riscv_vmnand: RISCVBinaryAAAUnMasked;
  def int_riscv_vmandn: RISCVBinaryAAAUnMasked;
  def int_riscv_vmxor: RISCVBinaryAAAUnMasked;
  def int_riscv_vmor: RISCVBinaryAAAUnMasked;
  def int_riscv_vmnor: RISCVBinaryAAAUnMasked;
  def int_riscv_vmorn: RISCVBinaryAAAUnMasked;
  def int_riscv_vmxnor: RISCVBinaryAAAUnMasked;
  def int_riscv_vmclr : RISCVNullaryIntrinsic;
  def int_riscv_vmset : RISCVNullaryIntrinsic;

  defm vcpop : RISCVMaskedUnarySOut;
  defm vfirst : RISCVMaskedUnarySOut;
  defm vmsbf : RISCVMaskedUnaryMOut;
  defm vmsof : RISCVMaskedUnaryMOut;
  defm vmsif : RISCVMaskedUnaryMOut;

  defm vfcvt_xu_f_v : RISCVConversionRoundingMode;
  defm vfcvt_x_f_v : RISCVConversionRoundingMode;
  defm vfcvt_rtz_xu_f_v : RISCVConversion;
  defm vfcvt_rtz_x_f_v : RISCVConversion;
  defm vfcvt_f_xu_v : RISCVConversionRoundingMode;
  defm vfcvt_f_x_v : RISCVConversionRoundingMode;

  defm vfwcvt_f_xu_v : RISCVConversion;
  defm vfwcvt_f_x_v : RISCVConversion;
  defm vfwcvt_xu_f_v : RISCVConversionRoundingMode;
  defm vfwcvt_x_f_v : RISCVConversionRoundingMode;
  defm vfwcvt_rtz_xu_f_v : RISCVConversion;
  defm vfwcvt_rtz_x_f_v : RISCVConversion;
  defm vfwcvt_f_f_v : RISCVConversion;

  defm vfncvt_f_xu_w : RISCVConversionRoundingMode;
  defm vfncvt_f_x_w : RISCVConversionRoundingMode;
  defm vfncvt_xu_f_w : RISCVConversionRoundingMode;
  defm vfncvt_x_f_w : RISCVConversionRoundingMode;
  defm vfncvt_rtz_xu_f_w : RISCVConversion;
  defm vfncvt_rtz_x_f_w : RISCVConversion;
  defm vfncvt_f_f_w : RISCVConversionRoundingMode;
  defm vfncvt_rod_f_f_w : RISCVConversion;

  // Output: (vector)
  // Input: (passthru, mask type input, vl)
  def int_riscv_viota
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                                [LLVMMatchType<0>,
                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                                 llvm_anyint_ty],
                                [IntrNoMem]>, RISCVVIntrinsic {
    let VLOperand = 2;
  }
  // Output: (vector)
  // Input: (maskedoff, mask type vector_in, mask, vl, policy)
  def int_riscv_viota_mask
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                                [LLVMMatchType<0>,
                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                                 llvm_anyint_ty, LLVMMatchType<1>],
                                [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
    let VLOperand = 3;
  }
  // Output: (vector)
  // Input: (passthru, vl)
  def int_riscv_vid : RISCVID;

  // Output: (vector)
  // Input: (maskedoff, mask, vl, policy)
  def int_riscv_vid_mask
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                                [LLVMMatchType<0>,
                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                                 llvm_anyint_ty, LLVMMatchType<1>],
                                [ImmArg<ArgIndex<3>>, IntrNoMem]>, RISCVVIntrinsic {
    let VLOperand = 2;
  }

  foreach nf = [2, 3, 4, 5, 6, 7, 8] in {
    defm vlseg # nf : RISCVUSSegLoad<nf>;
    defm vlseg # nf # ff : RISCVUSSegLoadFF<nf>;
    defm vlsseg # nf : RISCVSSegLoad<nf>;
    defm vloxseg # nf : RISCVISegLoad<nf>;
    defm vluxseg # nf : RISCVISegLoad<nf>;
    defm vsseg # nf : RISCVUSSegStore<nf>;
    defm vssseg # nf : RISCVSSegStore<nf>;
    defm vsoxseg # nf : RISCVISegStore<nf>;
    defm vsuxseg # nf : RISCVISegStore<nf>;
  }

  // Strided loads/stores for fixed vectors.
  def int_riscv_masked_strided_load
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                                [LLVMMatchType<0>, llvm_anyptr_ty,
                                 llvm_anyint_ty,
                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
                                [NoCapture<ArgIndex<1>>, IntrReadMem]>;
  def int_riscv_masked_strided_store
        : DefaultAttrsIntrinsic<[],
                                [llvm_anyvector_ty, llvm_anyptr_ty,
                                 llvm_anyint_ty,
                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
                                [NoCapture<ArgIndex<1>>, IntrWriteMem]>;

  // Segment loads/stores for fixed vectors.
  foreach nf = [2, 3, 4, 5, 6, 7, 8] in {
    def int_riscv_seg # nf # _load
          : DefaultAttrsIntrinsic<!listconcat([llvm_anyvector_ty],
                                              !listsplat(LLVMMatchType<0>,
                                              !add(nf, -1))),
                                  [llvm_anyptr_ty, llvm_anyint_ty],
                                  [NoCapture<ArgIndex<0>>, IntrReadMem]>;
    def int_riscv_seg # nf # _store
          : DefaultAttrsIntrinsic<[],
                                  !listconcat([llvm_anyvector_ty],
                                              !listsplat(LLVMMatchType<0>,
                                                          !add(nf, -1)),
                                              [llvm_anyptr_ty, llvm_anyint_ty]),
                                  [NoCapture<ArgIndex<nf>>, IntrWriteMem]>;
  }

} // TargetPrefix = "riscv"

//===----------------------------------------------------------------------===//
// Scalar Cryptography
//
// These intrinsics will lower directly into the corresponding instructions
// added by the scalar cyptography extension, if the extension is present.

let TargetPrefix = "riscv" in {

class ScalarCryptoByteSelect32
    : DefaultAttrsIntrinsic<[llvm_i32_ty],
                            [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
                            [IntrNoMem, IntrSpeculatable,
                             ImmArg<ArgIndex<2>>]>;

class ScalarCryptoGprGprIntrinsic32
    : DefaultAttrsIntrinsic<[llvm_i32_ty],
                            [llvm_i32_ty, llvm_i32_ty],
                            [IntrNoMem, IntrSpeculatable]>;

class ScalarCryptoGprGprIntrinsic64
    : DefaultAttrsIntrinsic<[llvm_i64_ty],
                            [llvm_i64_ty, llvm_i64_ty],
                            [IntrNoMem, IntrSpeculatable]>;

class ScalarCryptoGprIntrinsic32
    : DefaultAttrsIntrinsic<[llvm_i32_ty],
                            [llvm_i32_ty],
                            [IntrNoMem, IntrSpeculatable]>;

class ScalarCryptoGprIntrinsic64
    : DefaultAttrsIntrinsic<[llvm_i64_ty],
                            [llvm_i64_ty],
                            [IntrNoMem, IntrSpeculatable]>;

// Zknd
def int_riscv_aes32dsi  : ScalarCryptoByteSelect32,
                          ClangBuiltin<"__builtin_riscv_aes32dsi">;
def int_riscv_aes32dsmi : ScalarCryptoByteSelect32,
                          ClangBuiltin<"__builtin_riscv_aes32dsmi">;

def int_riscv_aes64ds   : ScalarCryptoGprGprIntrinsic64,
                          ClangBuiltin<"__builtin_riscv_aes64ds">;
def int_riscv_aes64dsm  : ScalarCryptoGprGprIntrinsic64,
                          ClangBuiltin<"__builtin_riscv_aes64dsm">;

def int_riscv_aes64im   : ScalarCryptoGprIntrinsic64,
                          ClangBuiltin<"__builtin_riscv_aes64im">;

// Zkne
def int_riscv_aes32esi  : ScalarCryptoByteSelect32,
                          ClangBuiltin<"__builtin_riscv_aes32esi">;
def int_riscv_aes32esmi : ScalarCryptoByteSelect32,
                          ClangBuiltin<"__builtin_riscv_aes32esmi">;

def int_riscv_aes64es   : ScalarCryptoGprGprIntrinsic64,
                          ClangBuiltin<"__builtin_riscv_aes64es">;
def int_riscv_aes64esm  : ScalarCryptoGprGprIntrinsic64,
                          ClangBuiltin<"__builtin_riscv_aes64esm">;

// Zknd & Zkne
def int_riscv_aes64ks2  : ScalarCryptoGprGprIntrinsic64,
                          ClangBuiltin<"__builtin_riscv_aes64ks2">;
def int_riscv_aes64ks1i : DefaultAttrsIntrinsic<[llvm_i64_ty],
                                                [llvm_i64_ty, llvm_i32_ty],
                                                [IntrNoMem, IntrSpeculatable,
                                                 ImmArg<ArgIndex<1>>]>,
                          ClangBuiltin<"__builtin_riscv_aes64ks1i">;

// Zknh
def int_riscv_sha256sig0 : ScalarCryptoGprIntrinsic32;
def int_riscv_sha256sig1 : ScalarCryptoGprIntrinsic32;
def int_riscv_sha256sum0 : ScalarCryptoGprIntrinsic32;
def int_riscv_sha256sum1 : ScalarCryptoGprIntrinsic32;

def int_riscv_sha512sig0l : ScalarCryptoGprGprIntrinsic32,
                            ClangBuiltin<"__builtin_riscv_sha512sig0l">;
def int_riscv_sha512sig0h : ScalarCryptoGprGprIntrinsic32,
                            ClangBuiltin<"__builtin_riscv_sha512sig0h">;
def int_riscv_sha512sig1l : ScalarCryptoGprGprIntrinsic32,
                            ClangBuiltin<"__builtin_riscv_sha512sig1l">;
def int_riscv_sha512sig1h : ScalarCryptoGprGprIntrinsic32,
                            ClangBuiltin<"__builtin_riscv_sha512sig1h">;
def int_riscv_sha512sum0r : ScalarCryptoGprGprIntrinsic32,
                            ClangBuiltin<"__builtin_riscv_sha512sum0r">;
def int_riscv_sha512sum1r : ScalarCryptoGprGprIntrinsic32,
                            ClangBuiltin<"__builtin_riscv_sha512sum1r">;

def int_riscv_sha512sig0 : ScalarCryptoGprIntrinsic64,
                           ClangBuiltin<"__builtin_riscv_sha512sig0">;
def int_riscv_sha512sig1 : ScalarCryptoGprIntrinsic64,
                           ClangBuiltin<"__builtin_riscv_sha512sig1">;
def int_riscv_sha512sum0 : ScalarCryptoGprIntrinsic64,
                           ClangBuiltin<"__builtin_riscv_sha512sum0">;
def int_riscv_sha512sum1 : ScalarCryptoGprIntrinsic64,
                           ClangBuiltin<"__builtin_riscv_sha512sum1">;

// Zksed
def int_riscv_sm4ks      : ScalarCryptoByteSelect32;
def int_riscv_sm4ed      : ScalarCryptoByteSelect32;

// Zksh
def int_riscv_sm3p0      : ScalarCryptoGprIntrinsic32;
def int_riscv_sm3p1      : ScalarCryptoGprIntrinsic32;
} // TargetPrefix = "riscv"

//===----------------------------------------------------------------------===//
// Vendor extensions
//===----------------------------------------------------------------------===//
include "llvm/IR/IntrinsicsRISCVXTHead.td"
include "llvm/IR/IntrinsicsRISCVXsf.td"
PKjwFZ䧃����IR/Intrinsics.tdnu�[���//===- Intrinsics.td - Defines all LLVM intrinsics ---------*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines properties of all LLVM intrinsics.
//
//===----------------------------------------------------------------------===//

include "llvm/CodeGen/ValueTypes.td"
include "llvm/CodeGen/SDNodeProperties.td"

//===----------------------------------------------------------------------===//
//  Properties we keep track of for intrinsics.
//===----------------------------------------------------------------------===//

class IntrinsicProperty<bit is_default = false> {
  bit IsDefault = is_default;
}

// Intr*Mem - Memory properties.  If no property is set, the worst case
// is assumed (it may read and write any memory it can get access to and it may
// have other side effects).

// IntrNoMem - The intrinsic does not access memory or have any other side
// effects.  It may be CSE'd deleted if dead, etc.
def IntrNoMem : IntrinsicProperty;

// IntrReadMem - This intrinsic only reads from memory. It does not write to
// memory and has no other side effects. Therefore, it cannot be moved across
// potentially aliasing stores. However, it can be reordered otherwise and can
// be deleted if dead.
def IntrReadMem : IntrinsicProperty;

// IntrWriteMem - This intrinsic only writes to memory, but does not read from
// memory, and has no other side effects. This means dead stores before calls
// to this intrinsics may be removed.
def IntrWriteMem : IntrinsicProperty;

// IntrArgMemOnly - This intrinsic only accesses memory that its pointer-typed
// argument(s) points to, but may access an unspecified amount. Other than
// reads from and (possibly volatile) writes to memory, it has no side effects.
def IntrArgMemOnly : IntrinsicProperty;

// IntrInaccessibleMemOnly -- This intrinsic only accesses memory that is not
// accessible by the module being compiled. This is a weaker form of IntrNoMem.
def IntrInaccessibleMemOnly : IntrinsicProperty;

// IntrInaccessibleMemOrArgMemOnly -- This intrinsic only accesses memory that
// its pointer-typed arguments point to or memory that is not accessible
// by the module being compiled. This is a weaker form of IntrArgMemOnly.
def IntrInaccessibleMemOrArgMemOnly : IntrinsicProperty;

// Commutative - This intrinsic is commutative: X op Y == Y op X.
def Commutative : IntrinsicProperty;

// Throws - This intrinsic can throw.
def Throws : IntrinsicProperty;

// Attribute index needs to match `AttrIndex` defined `Attributes.h`.
class AttrIndex<int idx> {
  int Value = idx;
}
def FuncIndex : AttrIndex<-1>;
def RetIndex : AttrIndex<0>;
class ArgIndex<int argNo> : AttrIndex<!add(argNo, 1)>;

// NoCapture - The specified argument pointer is not captured by the intrinsic.
class NoCapture<AttrIndex idx> : IntrinsicProperty {
  int ArgNo = idx.Value;
}

// NoAlias - The specified argument pointer is not aliasing other "noalias" pointer
// arguments of the intrinsic wrt. the intrinsic scope.
class NoAlias<AttrIndex idx> : IntrinsicProperty {
  int ArgNo = idx.Value;
}

// NoUndef - The specified argument is neither undef nor poison.
class NoUndef<AttrIndex idx> : IntrinsicProperty {
  int ArgNo = idx.Value;
}

// NonNull - The specified argument is not null.
class NonNull<AttrIndex idx> : IntrinsicProperty {
  int ArgNo = idx.Value;
}

class Align<AttrIndex idx, int align> : IntrinsicProperty {
  int ArgNo = idx.Value;
  int Align = align;
}

class Dereferenceable<AttrIndex idx, int bytes> : IntrinsicProperty {
  int ArgNo = idx.Value;
  int Bytes = bytes;
}

// Returned - The specified argument is always the return value of the
// intrinsic.
class Returned<AttrIndex idx> : IntrinsicProperty {
  int ArgNo = idx.Value;
}

// ImmArg - The specified argument must be an immediate.
class ImmArg<AttrIndex idx> : IntrinsicProperty {
  int ArgNo = idx.Value;
}

// ReadOnly - The specified argument pointer is not written to through the
// pointer by the intrinsic.
class ReadOnly<AttrIndex idx> : IntrinsicProperty {
  int ArgNo = idx.Value;
}

// WriteOnly - The intrinsic does not read memory through the specified
// argument pointer.
class WriteOnly<AttrIndex idx> : IntrinsicProperty {
  int ArgNo = idx.Value;
}

// ReadNone - The specified argument pointer is not dereferenced by the
// intrinsic.
class ReadNone<AttrIndex idx> : IntrinsicProperty {
  int ArgNo = idx.Value;
}

def IntrNoReturn : IntrinsicProperty;

// Applied by default.
def IntrNoCallback : IntrinsicProperty<1>;

// IntrNoSync - Threads executing the intrinsic will not synchronize using
// memory or other means. Applied by default.
def IntrNoSync : IntrinsicProperty<1>;

// Applied by default.
def IntrNoFree : IntrinsicProperty<1>;

// Applied by default.
def IntrWillReturn : IntrinsicProperty<1>;

// IntrCold - Calls to this intrinsic are cold.
// Parallels the cold attribute on LLVM IR functions.
def IntrCold : IntrinsicProperty;

// IntrNoDuplicate - Calls to this intrinsic cannot be duplicated.
// Parallels the noduplicate attribute on LLVM IR functions.
def IntrNoDuplicate : IntrinsicProperty;

// IntrNoMerge - Calls to this intrinsic cannot be merged
// Parallels the nomerge attribute on LLVM IR functions.
def IntrNoMerge : IntrinsicProperty;

// IntrConvergent - Calls to this intrinsic are convergent and may not be made
// control-dependent on any additional values.
// Parallels the convergent attribute on LLVM IR functions.
def IntrConvergent : IntrinsicProperty;

// This property indicates that the intrinsic is safe to speculate.
def IntrSpeculatable : IntrinsicProperty;

// This property can be used to override the 'has no other side effects'
// language of the IntrNoMem, IntrReadMem, IntrWriteMem, and IntrArgMemOnly
// intrinsic properties.  By default, intrinsics are assumed to have side
// effects, so this property is only necessary if you have defined one of
// the memory properties listed above.
// For this property, 'side effects' has the same meaning as 'side effects'
// defined by the hasSideEffects property of the TableGen Instruction class.
def IntrHasSideEffects : IntrinsicProperty;

//===----------------------------------------------------------------------===//
// IIT constants and utils
//===----------------------------------------------------------------------===//

// llvm::Intrinsic::IITDescriptor::ArgKind::AK_%
def ArgKind {
  int Any        = 0;
  int AnyInteger = 1;
  int AnyFloat   = 2;
  int AnyVector  = 3;
  int AnyPointer = 4;

  int MatchType  = 7;
}

// Encode placeholder.
// [15:8] is the ID used how to resolve ArgCode.

// (ACIdx << 3) | ArgCode
class EncAnyType<int ArgCode=0> {
  int ID = 0x100;
  int ret = !or(ID, ArgCode);
}

// (Mapping[Num] << 3) | AK.MatchType
class EncMatchType<int Num=0> {
  int ID = 0x200;
  int ret = !or(ID, Num);
}

// (Mapping[Num] << 3) | ArgCodes[Mapping[Num]]
class EncSameWidth<int Num=0> {
  int ID = 0x300;
  int ret = !or(ID, Num);
}

// ACIdx
class EncNextArgA<int dummy=0> {
  int ID = 0x400;
  int ret = !or(ID, dummy);
}

// Mapping[Num]
class EncNextArgN<int Num=0> {
  int ID = 0x500;
  int ret = !or(ID, Num);
}

class ResolveArgCode<
    list<int> Mapping,
    list<int> ArgCodes,
    int ACIdx,
    int ax> {
  int ah = !and(ax, 0xFF00);
  int al = !and(ax, 0x00FF);
  int num = Mapping[al];
  int ret = !cond(
    !eq(ah, EncAnyType<>.ID)   : !or(!shl(ACIdx, 3), al),
    !eq(ah, EncMatchType<>.ID) : !or(!shl(num, 3), ArgKind.MatchType),
    !eq(ah, EncSameWidth<>.ID) : !or(!shl(num, 3), ArgCodes[num]),
    !eq(ah, EncNextArgA<>.ID)  : ACIdx,
    !eq(ah, EncNextArgN<>.ID)  : num,
    true : al);
}

//===----------------------------------------------------------------------===//
// IIT_Info
//===----------------------------------------------------------------------===//

class IIT_Base<int num> {
  int Number = num;
  list<ValueType> VTs = ?;
}

class IIT_VT<ValueType vt, int num> : IIT_Base<num> {
  let VTs = [vt];
}

class IIT_Int<int size, int num> : IIT_Base<num> {
  let VTs = !filter(vti, ValueTypes,
    !and(vti.isInteger, !eq(vti.Size, size)));
}

class IIT_Vec<int nelem, int num> : IIT_Base<num> {
  let VTs = !filter(vti, ValueTypes,
    !and(vti.isVector, !eq(vti.nElem, nelem)));
}

defset list<IIT_Base> IIT_all = {
def IIT_Done : IIT_Base<    0>;
def IIT_I1   : IIT_Int<1,   1>;
def IIT_I8   : IIT_Int<8,   2>;
def IIT_I16  : IIT_Int<16,  3>;
def IIT_I32  : IIT_Int<32,  4>;
def IIT_I64  : IIT_Int<64,  5>;
def IIT_F16  : IIT_VT<f16,  6>;
def IIT_F32  : IIT_VT<f32,  7>;
def IIT_F64  : IIT_VT<f64,  8>;
def IIT_V2   : IIT_Vec<2,   9>;
def IIT_V4   : IIT_Vec<4,  10>;
def IIT_V8   : IIT_Vec<8,  11>;
def IIT_V16  : IIT_Vec<16, 12>;
def IIT_V32  : IIT_Vec<32, 13>;
def IIT_PTR  : IIT_Base<   14>;
def IIT_ARG  : IIT_Base<   15>;

def IIT_V64 : IIT_Vec<64, 16>;
def IIT_MMX : IIT_VT<x86mmx, 17>;
def IIT_TOKEN : IIT_VT<token, 18>;
def IIT_METADATA : IIT_VT<MetadataVT, 19>;
def IIT_EMPTYSTRUCT : IIT_VT<OtherVT, 20>;
def IIT_STRUCT2 : IIT_Base<21>;
def IIT_STRUCT3 : IIT_Base<22>;
def IIT_STRUCT4 : IIT_Base<23>;
def IIT_STRUCT5 : IIT_Base<24>;
def IIT_EXTEND_ARG : IIT_Base<25>;
def IIT_TRUNC_ARG : IIT_Base<26>;
def IIT_ANYPTR : IIT_Base<27>;
def IIT_V1 : IIT_Vec<1, 28>;
def IIT_VARARG : IIT_VT<isVoid, 29>;
def IIT_HALF_VEC_ARG : IIT_Base<30>;
def IIT_SAME_VEC_WIDTH_ARG : IIT_Base<31>;
def IIT_VEC_OF_ANYPTRS_TO_ELT : IIT_Base<34>;
def IIT_I128 : IIT_Int<128, 35>;
def IIT_V512 : IIT_Vec<512, 36>;
def IIT_V1024 : IIT_Vec<1024, 37>;
def IIT_STRUCT6 : IIT_Base<38>;
def IIT_STRUCT7 : IIT_Base<39>;
def IIT_STRUCT8 : IIT_Base<40>;
def IIT_F128 : IIT_VT<f128, 41>;
def IIT_VEC_ELEMENT : IIT_Base<42>;
def IIT_SCALABLE_VEC : IIT_Base<43>;
def IIT_SUBDIVIDE2_ARG : IIT_Base<44>;
def IIT_SUBDIVIDE4_ARG : IIT_Base<45>;
def IIT_VEC_OF_BITCASTS_TO_INT : IIT_Base<46>;
def IIT_V128 : IIT_Vec<128, 47>;
def IIT_BF16 : IIT_VT<bf16, 48>;
def IIT_STRUCT9 : IIT_Base<49>;
def IIT_V256 : IIT_Vec<256, 50>;
def IIT_AMX : IIT_VT<x86amx, 51>;
def IIT_PPCF128 : IIT_VT<ppcf128, 52>;
def IIT_V3 : IIT_Vec<3, 53>;
def IIT_EXTERNREF : IIT_VT<externref, 54>;
def IIT_FUNCREF : IIT_VT<funcref, 55>;
def IIT_I2 : IIT_Int<2, 57>;
def IIT_I4 : IIT_Int<4, 58>;
def IIT_AARCH64_SVCOUNT : IIT_VT<aarch64svcount, 59>;
}

defvar IIT_all_FixedTypes = !filter(iit, IIT_all,
  !or(!isa<IIT_VT>(iit), !isa<IIT_Int>(iit)));

defvar IIT_all_VectorTypes = !filter(iit, IIT_all,
  !isa<IIT_Vec>(iit));

defvar IIT_RetNumbers = [
  [IIT_Done.Number],
  []<int>,
  [IIT_STRUCT2.Number],
  [IIT_STRUCT3.Number],
  [IIT_STRUCT4.Number],
  [IIT_STRUCT5.Number],
  [IIT_STRUCT6.Number],
  [IIT_STRUCT7.Number],
  [IIT_STRUCT8.Number],
  [IIT_STRUCT9.Number],
];

//===----------------------------------------------------------------------===//
// Types used by intrinsics.
//===----------------------------------------------------------------------===//

class LLVMType<ValueType vt> {
  ValueType VT = vt;
  int isAny = vt.isOverloaded;

  int ArgCode = ?;
  int Number = ?;

  list<IIT_Base> IITs = !filter(iit, IIT_all_FixedTypes,
    !not(!empty(!filter(iit_vt, iit.VTs,
      !eq(iit_vt, !if(vt.isVector, vt.ElementType, vt))))));
  assert !le(!size(IITs), 1), "Duplicate type";

  list<IIT_Base> IIT_Vecs = !if(vt.isVector,
    !filter(iit, IIT_all_VectorTypes,
      !not(!empty(!filter(iit_vt, iit.VTs, !and(
        !eq(iit_vt.ElementType, vt.ElementType),
        !eq(iit_vt.nElem, vt.nElem)))))),
    []);
  assert !le(!size(IIT_Vecs), 1), "Duplicate type";

  list<int> Sig = !listconcat(
    !if(vt.isScalable, [IIT_SCALABLE_VEC.Number], []),
    !foreach(iit, IIT_Vecs, iit.Number),
    !foreach(iit, IITs,     iit.Number));
}

class LLVMAnyType<ValueType vt> : LLVMType<vt> {
  let ArgCode = !cond(
    !eq(vt, Any)     : ArgKind.Any,
    !eq(vt, iAny)    : ArgKind.AnyInteger,
    !eq(vt, fAny)    : ArgKind.AnyFloat,
    !eq(vt, vAny)    : ArgKind.AnyVector,
    !eq(vt, iPTRAny) : ArgKind.AnyPointer,
  );
  let Sig = [
    IIT_ARG.Number,
    EncAnyType<ArgCode>.ret,
  ];

  assert isAny, "LLVMAnyType.VT should have isOverloaded";
}

class LLVMQualPointerType<int addrspace>
  : LLVMType<iPTR> {
  assert !and(!le(0, addrspace), !le(addrspace, 255)),
    "Address space exceeds 255";

  let Sig =
    !if(addrspace, [
      IIT_ANYPTR.Number,
      addrspace,
    ], [
      IIT_PTR.Number,
    ]);
}

class LLVMAnyPointerType : LLVMAnyType<iPTRAny> {
  assert isAny, "iPTRAny should have isOverloaded";
}

// Match the type of another intrinsic parameter.  Number is an index into the
// list of overloaded types for the intrinsic, excluding all the fixed types.
// The Number value must refer to a previously listed type.  For example:
//   Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_anyfloat_ty, LLVMMatchType<0>]>
// has two overloaded types, the 2nd and 3rd arguments.  LLVMMatchType<0>
// refers to the first overloaded type, which is the 2nd argument.
class LLVMMatchType<int num, IIT_Base IIT_Info = IIT_ARG>
  : LLVMType<OtherVT>{
  let Number = num;
  let Sig = [
    IIT_Info.Number,
    EncMatchType<num>.ret,
  ];
}

class LLVMMatchTypeNextArg<int num, IIT_Base IIT_Info>
  : LLVMMatchType<num, IIT_Info> {
  let Sig = [
    IIT_Info.Number,
    EncNextArgA<>.ret,
    EncNextArgN<num>.ret,
  ];
}

// Match the type of another intrinsic parameter that is expected to be based on
// an integral type (i.e. either iN or <N x iM>), but change the scalar size to
// be twice as wide or half as wide as the other type.  This is only useful when
// the intrinsic is overloaded, so the matched type should be declared as iAny.
class LLVMExtendedType<int num> : LLVMMatchType<num, IIT_EXTEND_ARG>;
class LLVMTruncatedType<int num> : LLVMMatchType<num, IIT_TRUNC_ARG>;

// Match the scalar/vector of another intrinsic parameter but with a different
// element type. Either both are scalars or both are vectors with the same
// number of elements.
class LLVMScalarOrSameVectorWidth<int idx, LLVMType elty>
  : LLVMMatchType<idx, IIT_SAME_VEC_WIDTH_ARG> {
  let Sig = !listconcat([
    IIT_SAME_VEC_WIDTH_ARG.Number,
    EncSameWidth<idx>.ret,
  ], elty.Sig);
}

class LLVMVectorOfAnyPointersToElt<int num>
  : LLVMMatchTypeNextArg<num, IIT_VEC_OF_ANYPTRS_TO_ELT>;
class LLVMVectorElementType<int num> : LLVMMatchType<num, IIT_VEC_ELEMENT>;

// Match the type of another intrinsic parameter that is expected to be a
// vector type, but change the element count to be half as many.
class LLVMHalfElementsVectorType<int num>
  : LLVMMatchType<num, IIT_HALF_VEC_ARG>;

// Match the type of another intrinsic parameter that is expected to be a
// vector type (i.e. <N x iM>) but with each element subdivided to
// form a vector with more elements that are smaller than the original.
class LLVMSubdivide2VectorType<int num>
  : LLVMMatchType<num, IIT_SUBDIVIDE2_ARG>;
class LLVMSubdivide4VectorType<int num>
  : LLVMMatchType<num, IIT_SUBDIVIDE4_ARG>;

// Match the element count and bit width of another intrinsic parameter, but
// change the element type to an integer.
class LLVMVectorOfBitcastsToInt<int num>
  : LLVMMatchType<num, IIT_VEC_OF_BITCASTS_TO_INT>;

def llvm_void_ty       : LLVMType<isVoid>;

def llvm_any_ty        : LLVMAnyType<Any>;
def llvm_anyint_ty     : LLVMAnyType<iAny>;
def llvm_anyfloat_ty   : LLVMAnyType<fAny>;
def llvm_anyvector_ty  : LLVMAnyType<vAny>;

def llvm_i1_ty         : LLVMType<i1>;
def llvm_i8_ty         : LLVMType<i8>;
def llvm_i16_ty        : LLVMType<i16>;
def llvm_i32_ty        : LLVMType<i32>;
def llvm_i64_ty        : LLVMType<i64>;
def llvm_i128_ty       : LLVMType<i128>;
def llvm_half_ty       : LLVMType<f16>;
def llvm_bfloat_ty     : LLVMType<bf16>;
def llvm_float_ty      : LLVMType<f32>;
def llvm_double_ty     : LLVMType<f64>;
def llvm_f80_ty        : LLVMType<f80>;
def llvm_f128_ty       : LLVMType<f128>;
def llvm_ppcf128_ty    : LLVMType<ppcf128>;
def llvm_ptr_ty        : LLVMQualPointerType<0>; // ptr
def llvm_anyptr_ty     : LLVMAnyPointerType;     // ptr addrspace(N)
def llvm_empty_ty      : LLVMType<OtherVT>;      // { }
def llvm_metadata_ty   : LLVMType<MetadataVT>;   // !{...}
def llvm_token_ty      : LLVMType<token>;        // token

def llvm_x86mmx_ty     : LLVMType<x86mmx>;

def llvm_aarch64_svcount_ty : LLVMType<aarch64svcount>;

def llvm_x86amx_ty     : LLVMType<x86amx>;

def llvm_v2i1_ty       : LLVMType<v2i1>;     //   2 x i1
def llvm_v4i1_ty       : LLVMType<v4i1>;     //   4 x i1
def llvm_v8i1_ty       : LLVMType<v8i1>;     //   8 x i1
def llvm_v16i1_ty      : LLVMType<v16i1>;    //  16 x i1
def llvm_v32i1_ty      : LLVMType<v32i1>;    //  32 x i1
def llvm_v64i1_ty      : LLVMType<v64i1>;    //  64 x i1
def llvm_v128i1_ty     : LLVMType<v128i1>;   // 128 x i1
def llvm_v256i1_ty     : LLVMType<v256i1>;   // 256 x i1
def llvm_v512i1_ty     : LLVMType<v512i1>;   // 512 x i1
def llvm_v1024i1_ty    : LLVMType<v1024i1>;  //1024 x i1
def llvm_v2048i1_ty    : LLVMType<v2048i1>;  //2048 x i1

def llvm_v1i8_ty       : LLVMType<v1i8>;     //  1 x i8
def llvm_v2i8_ty       : LLVMType<v2i8>;     //  2 x i8
def llvm_v4i8_ty       : LLVMType<v4i8>;     //  4 x i8
def llvm_v8i8_ty       : LLVMType<v8i8>;     //  8 x i8
def llvm_v16i8_ty      : LLVMType<v16i8>;    // 16 x i8
def llvm_v32i8_ty      : LLVMType<v32i8>;    // 32 x i8
def llvm_v64i8_ty      : LLVMType<v64i8>;    // 64 x i8
def llvm_v128i8_ty     : LLVMType<v128i8>;   //128 x i8
def llvm_v256i8_ty     : LLVMType<v256i8>;   //256 x i8

def llvm_v1i16_ty      : LLVMType<v1i16>;    //  1 x i16
def llvm_v2i16_ty      : LLVMType<v2i16>;    //  2 x i16
def llvm_v4i16_ty      : LLVMType<v4i16>;    //  4 x i16
def llvm_v8i16_ty      : LLVMType<v8i16>;    //  8 x i16
def llvm_v16i16_ty     : LLVMType<v16i16>;   // 16 x i16
def llvm_v32i16_ty     : LLVMType<v32i16>;   // 32 x i16
def llvm_v64i16_ty     : LLVMType<v64i16>;   // 64 x i16
def llvm_v128i16_ty    : LLVMType<v128i16>;  //128 x i16

def llvm_v1i32_ty      : LLVMType<v1i32>;    //  1 x i32
def llvm_v2i32_ty      : LLVMType<v2i32>;    //  2 x i32
def llvm_v4i32_ty      : LLVMType<v4i32>;    //  4 x i32
def llvm_v8i32_ty      : LLVMType<v8i32>;    //  8 x i32
def llvm_v16i32_ty     : LLVMType<v16i32>;   // 16 x i32
def llvm_v32i32_ty     : LLVMType<v32i32>;   // 32 x i32
def llvm_v64i32_ty     : LLVMType<v64i32>;   // 64 x i32
def llvm_v256i32_ty    : LLVMType<v256i32>;  //256 x i32

def llvm_v1i64_ty      : LLVMType<v1i64>;    //  1 x i64
def llvm_v2i64_ty      : LLVMType<v2i64>;    //  2 x i64
def llvm_v4i64_ty      : LLVMType<v4i64>;    //  4 x i64
def llvm_v8i64_ty      : LLVMType<v8i64>;    //  8 x i64
def llvm_v16i64_ty     : LLVMType<v16i64>;   // 16 x i64
def llvm_v32i64_ty     : LLVMType<v32i64>;   // 32 x i64

def llvm_v1i128_ty     : LLVMType<v1i128>;   //  1 x i128

def llvm_v2f16_ty      : LLVMType<v2f16>;    //  2 x half (__fp16)
def llvm_v4f16_ty      : LLVMType<v4f16>;    //  4 x half (__fp16)
def llvm_v8f16_ty      : LLVMType<v8f16>;    //  8 x half (__fp16)
def llvm_v16f16_ty     : LLVMType<v16f16>;   // 16 x half (__fp16)
def llvm_v32f16_ty     : LLVMType<v32f16>;   // 32 x half (__fp16)
def llvm_v2bf16_ty     : LLVMType<v2bf16>;   //  2 x bfloat (__bf16)
def llvm_v4bf16_ty     : LLVMType<v4bf16>;   //  4 x bfloat (__bf16)
def llvm_v8bf16_ty     : LLVMType<v8bf16>;   //  8 x bfloat (__bf16)
def llvm_v16bf16_ty    : LLVMType<v16bf16>;  // 16 x bfloat (__bf16)
def llvm_v32bf16_ty    : LLVMType<v32bf16>;  // 32 x bfloat (__bf16)
def llvm_v1f32_ty      : LLVMType<v1f32>;    //  1 x float
def llvm_v2f32_ty      : LLVMType<v2f32>;    //  2 x float
def llvm_v3f32_ty      : LLVMType<v3f32>;    //  3 x float
def llvm_v4f32_ty      : LLVMType<v4f32>;    //  4 x float
def llvm_v8f32_ty      : LLVMType<v8f32>;    //  8 x float
def llvm_v16f32_ty     : LLVMType<v16f32>;   // 16 x float
def llvm_v32f32_ty     : LLVMType<v32f32>;   // 32 x float
def llvm_v1f64_ty      : LLVMType<v1f64>;    //  1 x double
def llvm_v2f64_ty      : LLVMType<v2f64>;    //  2 x double
def llvm_v4f64_ty      : LLVMType<v4f64>;    //  4 x double
def llvm_v8f64_ty      : LLVMType<v8f64>;    //  8 x double
def llvm_v16f64_ty     : LLVMType<v16f64>;   // 16 x double

def llvm_vararg_ty     : LLVMType<isVoid>;   // this means vararg here

def llvm_externref_ty  : LLVMType<externref>;
def llvm_funcref_ty    : LLVMType<funcref>;

//===----------------------------------------------------------------------===//

class MakeIdx<list<int> Set> {
  list<int> IdxsR = !foreach(i, !range(Set),
    !if(Set[i],
      !foldl(0, !range(0, i), m, j, !add(m, Set[j])),
      -1));

  list<int> RIdxsR = !foreach(i, !range(Set),
    !foldl(-1, !range(Set), m, j,
      !if(!and(Set[j], !eq(IdxsR[j], i)), j, m)));

  list<int> Idxs  = !foreach(a, IdxsR,  !if(!ge(a, 0), a, ?));
  list<int> RIdxs = !foreach(a, RIdxsR, !if(!ge(a, 0), a, ?));
}

class TypeInfoGen<
    list<LLVMType> RetTypes,
    list<LLVMType> ParamTypes> {
  list<LLVMType> AllTypes = !listconcat(RetTypes, ParamTypes);

  // ArgCodes for NextArg -- isAny or MatchTypeNextArg
  list<int> ACIdxs = MakeIdx<
    !foreach(ty, AllTypes,
      !or(ty.isAny, !isa<LLVMMatchTypeNextArg>(ty)))>.Idxs;

  // ArgCodes (only for isAny or MatchTypeNextArg)
  list<LLVMType> ACTys = !filter(ty, AllTypes,
    !or(ty.isAny, !isa<LLVMMatchTypeNextArg>(ty)));

  list<int> ArgCodes = !foreach(ty, ACTys, ty.ArgCode);

  // Mappings MatchTypeIdx to ACTys
  list<int> MappingRIdxs = MakeIdx<
    !foreach(ty, ACTys, ty.isAny)>.RIdxs;

  // D63507: Exclude LLVMPointerType<llvm_any_ty>
  bit isOverloaded = !not(!empty(!filter(ty, AllTypes,
    !isa<LLVMAnyType>(ty))));

  list<LLVMType> Types = !foreach(ty, AllTypes,
    !if(!isa<LLVMMatchType>(ty), ACTys[MappingRIdxs[ty.Number]], ty));

  list<list<int>> TypeSig = !listconcat(
    [IIT_RetNumbers[!size(RetTypes)]],
    !foreach(i, !range(AllTypes),
      !foreach(a, AllTypes[i].Sig,
        ResolveArgCode<
          MappingRIdxs,
          ArgCodes,
          ACIdxs[i],
          a>.ret)));
}

//===----------------------------------------------------------------------===//
// Intrinsic Definitions.
//===----------------------------------------------------------------------===//

// Intrinsic class - This is used to define one LLVM intrinsic.  The name of the
// intrinsic definition should start with "int_", then match the LLVM intrinsic
// name with the "llvm." prefix removed, and all "."s turned into "_"s.  For
// example, llvm.bswap.i16 -> int_bswap_i16.
//
//  * RetTypes is a list containing the return types expected for the
//    intrinsic.
//  * ParamTypes is a list containing the parameter types expected for the
//    intrinsic.
//  * Properties can be set to describe the behavior of the intrinsic.
//
class Intrinsic<list<LLVMType> ret_types,
                list<LLVMType> param_types = [],
                list<IntrinsicProperty> intr_properties = [],
                string name = "",
                list<SDNodeProperty> sd_properties = [],
                bit disable_default_attributes = true> : SDPatternOperator {
  string LLVMName = name;
  string TargetPrefix = "";   // Set to a prefix for target-specific intrinsics.
  list<LLVMType> RetTypes = ret_types;
  list<LLVMType> ParamTypes = param_types;
  list<IntrinsicProperty> IntrProperties = intr_properties;
  let Properties = sd_properties;

  // Disable applying IntrinsicProperties that are marked default with
  // IntrinsicProperty<1>
  bit DisableDefaultAttributes = disable_default_attributes;

  bit isTarget = false;

  TypeInfoGen TypeInfo = TypeInfoGen<RetTypes, ParamTypes>;
  bit isOverloaded = TypeInfo.isOverloaded;
  list<LLVMType> Types = TypeInfo.Types;
  list<list<int>> TypeSig = TypeInfo.TypeSig;
}

// Intrinsic with default attributes (disable_default_attributes = false).
class DefaultAttrsIntrinsic<list<LLVMType> ret_types,
                list<LLVMType> param_types = [],
                list<IntrinsicProperty> intr_properties = [],
                string name = "",
                list<SDNodeProperty> sd_properties = []>
                : Intrinsic<ret_types, param_types,
                            intr_properties, name,
                            sd_properties, /*disable_default_attributes*/ 0> {}

/// ClangBuiltin - If this intrinsic exactly corresponds to a Clang builtin, this
/// specifies the name of the builtin.  This provides automatic CBE and CFE
/// support.
class ClangBuiltin<string name> {
  string ClangBuiltinName = name;
}

class MSBuiltin<string name> {
  string MSBuiltinName = name;
}

#ifndef TEST_INTRINSICS_SUPPRESS_DEFS

//===--------------- Variable Argument Handling Intrinsics ----------------===//
//

def int_vastart : DefaultAttrsIntrinsic<[], [llvm_ptr_ty], [], "llvm.va_start">;
def int_vacopy  : DefaultAttrsIntrinsic<[], [llvm_ptr_ty, llvm_ptr_ty], [],
                            "llvm.va_copy">;
def int_vaend   : DefaultAttrsIntrinsic<[], [llvm_ptr_ty], [], "llvm.va_end">;

//===------------------- Garbage Collection Intrinsics --------------------===//
//
def int_gcroot  : Intrinsic<[],
                            [llvm_ptr_ty, llvm_ptr_ty]>;
def int_gcread  : Intrinsic<[llvm_ptr_ty],
                            [llvm_ptr_ty, llvm_ptr_ty],
                            [IntrReadMem, IntrArgMemOnly]>;
def int_gcwrite : Intrinsic<[],
                            [llvm_ptr_ty, llvm_ptr_ty, llvm_ptr_ty],
                            [IntrArgMemOnly, NoCapture<ArgIndex<1>>,
                             NoCapture<ArgIndex<2>>]>;

//===------------------- ObjC ARC runtime Intrinsics --------------------===//
//
// Note these are to support the Objective-C ARC optimizer which wants to
// eliminate retain and releases where possible.

def int_objc_autorelease                    : Intrinsic<[llvm_ptr_ty],
                                                        [llvm_ptr_ty]>;
def int_objc_autoreleasePoolPop             : Intrinsic<[], [llvm_ptr_ty]>;
def int_objc_autoreleasePoolPush            : Intrinsic<[llvm_ptr_ty], []>;
def int_objc_autoreleaseReturnValue         : Intrinsic<[llvm_ptr_ty],
                                                        [llvm_ptr_ty]>;
def int_objc_copyWeak                       : Intrinsic<[],
                                                        [llvm_ptr_ty,
                                                         llvm_ptr_ty]>;
def int_objc_destroyWeak                    : Intrinsic<[], [llvm_ptr_ty]>;
def int_objc_initWeak                       : Intrinsic<[llvm_ptr_ty],
                                                        [llvm_ptr_ty,
                                                         llvm_ptr_ty]>;
def int_objc_loadWeak                       : Intrinsic<[llvm_ptr_ty],
                                                        [llvm_ptr_ty]>;
def int_objc_loadWeakRetained               : Intrinsic<[llvm_ptr_ty],
                                                        [llvm_ptr_ty]>;
def int_objc_moveWeak                       : Intrinsic<[],
                                                        [llvm_ptr_ty,
                                                         llvm_ptr_ty]>;
def int_objc_release                        : Intrinsic<[], [llvm_ptr_ty]>;
def int_objc_retain                         : Intrinsic<[llvm_ptr_ty],
                                                        [llvm_ptr_ty]>;
def int_objc_retainAutorelease              : Intrinsic<[llvm_ptr_ty],
                                                        [llvm_ptr_ty]>;
def int_objc_retainAutoreleaseReturnValue   : Intrinsic<[llvm_ptr_ty],
                                                        [llvm_ptr_ty]>;
def int_objc_retainAutoreleasedReturnValue  : Intrinsic<[llvm_ptr_ty],
                                                        [llvm_ptr_ty]>;
def int_objc_retainBlock                    : Intrinsic<[llvm_ptr_ty],
                                                        [llvm_ptr_ty]>;
def int_objc_storeStrong                    : Intrinsic<[],
                                                        [llvm_ptr_ty,
                                                         llvm_ptr_ty]>;
def int_objc_storeWeak                      : Intrinsic<[llvm_ptr_ty],
                                                        [llvm_ptr_ty,
                                                         llvm_ptr_ty]>;
def int_objc_clang_arc_use                  : Intrinsic<[],
                                                        [llvm_vararg_ty]>;
def int_objc_clang_arc_noop_use : DefaultAttrsIntrinsic<[],
                                                        [llvm_vararg_ty],
                                                        [IntrInaccessibleMemOnly]>;
def int_objc_unsafeClaimAutoreleasedReturnValue : Intrinsic<[llvm_ptr_ty],
                                                            [llvm_ptr_ty]>;
def int_objc_retainedObject                 : Intrinsic<[llvm_ptr_ty],
                                                        [llvm_ptr_ty]>;
def int_objc_unretainedObject               : Intrinsic<[llvm_ptr_ty],
                                                        [llvm_ptr_ty]>;
def int_objc_unretainedPointer              : Intrinsic<[llvm_ptr_ty],
                                                        [llvm_ptr_ty]>;
def int_objc_retain_autorelease             : Intrinsic<[llvm_ptr_ty],
                                                        [llvm_ptr_ty]>;
def int_objc_sync_enter                     : Intrinsic<[llvm_i32_ty],
                                                        [llvm_ptr_ty]>;
def int_objc_sync_exit                      : Intrinsic<[llvm_i32_ty],
                                                        [llvm_ptr_ty]>;
def int_objc_arc_annotation_topdown_bbstart : Intrinsic<[],
                                                        [llvm_ptr_ty,
                                                         llvm_ptr_ty]>;
def int_objc_arc_annotation_topdown_bbend   : Intrinsic<[],
                                                        [llvm_ptr_ty,
                                                         llvm_ptr_ty]>;
def int_objc_arc_annotation_bottomup_bbstart  : Intrinsic<[],
                                                          [llvm_ptr_ty,
                                                           llvm_ptr_ty]>;
def int_objc_arc_annotation_bottomup_bbend  : Intrinsic<[],
                                                        [llvm_ptr_ty,
                                                         llvm_ptr_ty]>;
//===--------------- Swift asynchronous context intrinsics ----------------===//

// Returns the location of the Swift asynchronous context (usually stored just
// before the frame pointer), and triggers the creation of a null context if it
// would otherwise be unneeded.
def int_swift_async_context_addr : Intrinsic<[llvm_ptr_ty], [], []>;

//===--------------------- Code Generator Intrinsics ----------------------===//
//
def int_returnaddress : DefaultAttrsIntrinsic<[llvm_ptr_ty], [llvm_i32_ty],
                                  [IntrNoMem, ImmArg<ArgIndex<0>>]>;
def int_addressofreturnaddress : DefaultAttrsIntrinsic<[llvm_anyptr_ty], [], [IntrNoMem]>;
def int_frameaddress : DefaultAttrsIntrinsic<[llvm_anyptr_ty], [llvm_i32_ty],
                                 [IntrNoMem, ImmArg<ArgIndex<0>>]>;
def int_sponentry  : DefaultAttrsIntrinsic<[llvm_anyptr_ty], [], [IntrNoMem]>;
def int_read_register  : DefaultAttrsIntrinsic<[llvm_anyint_ty], [llvm_metadata_ty],
                                   [IntrReadMem], "llvm.read_register">;
def int_write_register : Intrinsic<[], [llvm_metadata_ty, llvm_anyint_ty],
                                   [IntrNoCallback], "llvm.write_register">;
def int_read_volatile_register  : Intrinsic<[llvm_anyint_ty], [llvm_metadata_ty],
                                            [IntrHasSideEffects],
                                             "llvm.read_volatile_register">;

// Gets the address of the local variable area. This is typically a copy of the
// stack, frame, or base pointer depending on the type of prologue.
def int_localaddress : DefaultAttrsIntrinsic<[llvm_ptr_ty], [], [IntrNoMem]>;

// Escapes local variables to allow access from other functions.
def int_localescape : DefaultAttrsIntrinsic<[], [llvm_vararg_ty]>;

// Given a function and the localaddress of a parent frame, returns a pointer
// to an escaped allocation indicated by the index.
def int_localrecover : DefaultAttrsIntrinsic<[llvm_ptr_ty],
                                 [llvm_ptr_ty, llvm_ptr_ty, llvm_i32_ty],
                                 [IntrNoMem, ImmArg<ArgIndex<2>>]>;

// Given the frame pointer passed into an SEH filter function, returns a
// pointer to the local variable area suitable for use with llvm.localrecover.
def int_eh_recoverfp : DefaultAttrsIntrinsic<[llvm_ptr_ty],
                                 [llvm_ptr_ty, llvm_ptr_ty],
                                 [IntrNoMem]>;

// To mark the beginning/end of a try-scope for Windows SEH -EHa
//  calls/invokes to these intrinsics are placed to model control flows
//    caused by HW exceptions under option -EHa.
//  calls/invokes to these intrinsics will be discarded during a codegen pass
//   after EH tables are generated
def int_seh_try_begin : Intrinsic<[], [], [IntrWriteMem, IntrWillReturn]>;
def int_seh_try_end : Intrinsic<[], [], [IntrWriteMem, IntrWillReturn]>;
def int_seh_scope_begin : Intrinsic<[], [], [IntrNoMem]>;
def int_seh_scope_end : Intrinsic<[], [], [IntrNoMem]>;

// Note: we treat stacksave/stackrestore as writemem because we don't otherwise
// model their dependencies on allocas.
def int_stacksave     : DefaultAttrsIntrinsic<[llvm_ptr_ty]>,
                        ClangBuiltin<"__builtin_stack_save">;
def int_stackrestore  : DefaultAttrsIntrinsic<[], [llvm_ptr_ty]>,
                        ClangBuiltin<"__builtin_stack_restore">;

def int_get_dynamic_area_offset : DefaultAttrsIntrinsic<[llvm_anyint_ty]>;

def int_thread_pointer : DefaultAttrsIntrinsic<[llvm_ptr_ty], [], [IntrNoMem]>,
                         ClangBuiltin<"__builtin_thread_pointer">;

// IntrInaccessibleMemOrArgMemOnly is a little more pessimistic than strictly
// necessary for prefetch, however it does conveniently prevent the prefetch
// from being reordered overly much with respect to nearby access to the same
// memory while not impeding optimization.
def int_prefetch
    : DefaultAttrsIntrinsic<[], [ llvm_anyptr_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty ],
                [IntrInaccessibleMemOrArgMemOnly, IntrWillReturn,
                 ReadOnly<ArgIndex<0>>, NoCapture<ArgIndex<0>>,
                 ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>]>;
def int_pcmarker      : DefaultAttrsIntrinsic<[], [llvm_i32_ty]>;

def int_readcyclecounter : DefaultAttrsIntrinsic<[llvm_i64_ty]>;

// The assume intrinsic is marked InaccessibleMemOnly so that proper control
// dependencies will be maintained.
def int_assume : DefaultAttrsIntrinsic<
    [], [llvm_i1_ty], [IntrInaccessibleMemOnly, NoUndef<ArgIndex<0>>]>;

// 'llvm.experimental.noalias.scope.decl' intrinsic: Inserted at the location of
// noalias scope declaration. Makes it possible to identify that a noalias scope
// is only valid inside the body of a loop.
//
// Purpose of the different arguments:
// - arg0: id.scope: metadata representing the scope declaration.
def int_experimental_noalias_scope_decl
    : DefaultAttrsIntrinsic<[], [llvm_metadata_ty],
        [IntrInaccessibleMemOnly]>; // blocks LICM and some more

// Stack Protector Intrinsic - The stackprotector intrinsic writes the stack
// guard to the correct place on the stack frame.
def int_stackprotector : DefaultAttrsIntrinsic<[], [llvm_ptr_ty, llvm_ptr_ty], []>;
def int_stackguard : DefaultAttrsIntrinsic<[llvm_ptr_ty], [], []>;

// A cover for instrumentation based profiling.
def int_instrprof_cover : Intrinsic<[], [llvm_ptr_ty, llvm_i64_ty,
                                         llvm_i32_ty, llvm_i32_ty]>;

// A counter increment for instrumentation based profiling.
def int_instrprof_increment : Intrinsic<[],
                                        [llvm_ptr_ty, llvm_i64_ty,
                                         llvm_i32_ty, llvm_i32_ty]>;

// A counter increment with step for instrumentation based profiling.
def int_instrprof_increment_step : Intrinsic<[],
                                        [llvm_ptr_ty, llvm_i64_ty,
                                         llvm_i32_ty, llvm_i32_ty, llvm_i64_ty]>;

// A timestamp for instrumentation based profiling.
def int_instrprof_timestamp : Intrinsic<[], [llvm_ptr_ty, llvm_i64_ty,
                                             llvm_i32_ty, llvm_i32_ty]>;

// A call to profile runtime for value profiling of target expressions
// through instrumentation based profiling.
def int_instrprof_value_profile : Intrinsic<[],
                                            [llvm_ptr_ty, llvm_i64_ty,
                                             llvm_i64_ty, llvm_i32_ty,
                                             llvm_i32_ty]>;

def int_call_preallocated_setup : DefaultAttrsIntrinsic<[llvm_token_ty], [llvm_i32_ty]>;
def int_call_preallocated_arg : DefaultAttrsIntrinsic<[llvm_ptr_ty], [llvm_token_ty, llvm_i32_ty]>;
def int_call_preallocated_teardown : DefaultAttrsIntrinsic<[], [llvm_token_ty]>;

// This intrinsic is intentionally undocumented and users shouldn't call it;
// it's produced then quickly consumed during codegen.
def int_callbr_landingpad : Intrinsic<[llvm_any_ty], [LLVMMatchType<0>],
                                      [IntrNoMerge]>;

//===------------------- Standard C Library Intrinsics --------------------===//
//

def int_memcpy  : Intrinsic<[],
                            [llvm_anyptr_ty, llvm_anyptr_ty, llvm_anyint_ty,
                             llvm_i1_ty],
                            [IntrArgMemOnly, IntrWillReturn, IntrNoFree,
                             IntrNoCallback,
                             NoCapture<ArgIndex<0>>, NoCapture<ArgIndex<1>>,
                             NoAlias<ArgIndex<0>>, NoAlias<ArgIndex<1>>,
                             WriteOnly<ArgIndex<0>>, ReadOnly<ArgIndex<1>>,
                             ImmArg<ArgIndex<3>>]>;

// Memcpy semantic that is guaranteed to be inlined.
// In particular this means that the generated code is not allowed to call any
// external function.
// The third argument (specifying the size) must be a constant.
def int_memcpy_inline
    : Intrinsic<[],
      [llvm_anyptr_ty, llvm_anyptr_ty, llvm_anyint_ty, llvm_i1_ty],
      [IntrArgMemOnly, IntrWillReturn, IntrNoFree, IntrNoCallback,
       NoCapture<ArgIndex<0>>, NoCapture<ArgIndex<1>>,
       NoAlias<ArgIndex<0>>, NoAlias<ArgIndex<1>>,
       WriteOnly<ArgIndex<0>>, ReadOnly<ArgIndex<1>>,
       ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>]>;

def int_memmove : Intrinsic<[],
                            [llvm_anyptr_ty, llvm_anyptr_ty, llvm_anyint_ty,
                             llvm_i1_ty],
                            [IntrArgMemOnly, IntrWillReturn, IntrNoFree,
                             IntrNoCallback,
                             NoCapture<ArgIndex<0>>, NoCapture<ArgIndex<1>>,
                             WriteOnly<ArgIndex<0>>, ReadOnly<ArgIndex<1>>,
                             ImmArg<ArgIndex<3>>]>;
def int_memset  : Intrinsic<[],
                            [llvm_anyptr_ty, llvm_i8_ty, llvm_anyint_ty,
                             llvm_i1_ty],
                            [IntrWriteMem, IntrArgMemOnly, IntrWillReturn,
                             IntrNoFree, IntrNoCallback,
                             NoCapture<ArgIndex<0>>, WriteOnly<ArgIndex<0>>,
                             ImmArg<ArgIndex<3>>]>;

// Memset version that is guaranteed to be inlined.
// In particular this means that the generated code is not allowed to call any
// external function.
// The third argument (specifying the size) must be a constant.
def int_memset_inline
    : Intrinsic<[],
      [llvm_anyptr_ty, llvm_i8_ty, llvm_anyint_ty, llvm_i1_ty],
      [IntrWriteMem, IntrArgMemOnly, IntrWillReturn, IntrNoFree, IntrNoCallback,
       NoCapture<ArgIndex<0>>, WriteOnly<ArgIndex<0>>,
       ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>]>;

// FIXME: Add version of these floating point intrinsics which allow non-default
// rounding modes and FP exception handling.

let IntrProperties = [IntrNoMem, IntrSpeculatable, IntrWillReturn] in {
  def int_fma  : DefaultAttrsIntrinsic<[llvm_anyfloat_ty],
                           [LLVMMatchType<0>, LLVMMatchType<0>,
                            LLVMMatchType<0>]>;
  def int_fmuladd : DefaultAttrsIntrinsic<[llvm_anyfloat_ty],
                              [LLVMMatchType<0>, LLVMMatchType<0>,
                               LLVMMatchType<0>]>;

  // These functions do not read memory, but are sensitive to the
  // rounding mode. LLVM purposely does not model changes to the FP
  // environment so they can be treated as readnone.
  def int_sqrt : DefaultAttrsIntrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
  def int_powi : DefaultAttrsIntrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, llvm_anyint_ty]>;
  def int_sin  : DefaultAttrsIntrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
  def int_cos  : DefaultAttrsIntrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
  def int_pow  : DefaultAttrsIntrinsic<[llvm_anyfloat_ty],
                           [LLVMMatchType<0>, LLVMMatchType<0>]>;
  def int_log  : DefaultAttrsIntrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
  def int_log10: DefaultAttrsIntrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
  def int_log2 : DefaultAttrsIntrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
  def int_exp  : DefaultAttrsIntrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
  def int_exp2 : DefaultAttrsIntrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
  def int_fabs : DefaultAttrsIntrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
  def int_copysign : DefaultAttrsIntrinsic<[llvm_anyfloat_ty],
                               [LLVMMatchType<0>, LLVMMatchType<0>]>;
  def int_floor : DefaultAttrsIntrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
  def int_ceil  : DefaultAttrsIntrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
  def int_trunc : DefaultAttrsIntrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
  def int_rint  : DefaultAttrsIntrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
  def int_nearbyint : DefaultAttrsIntrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
  def int_round : DefaultAttrsIntrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
  def int_roundeven    : DefaultAttrsIntrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;

  // Truncate a floating point number with a specific rounding mode
  def int_fptrunc_round : DefaultAttrsIntrinsic<[ llvm_anyfloat_ty ],
                                                [ llvm_anyfloat_ty, llvm_metadata_ty ]>;

  def int_canonicalize : DefaultAttrsIntrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>],
                                   [IntrNoMem]>;
  // Arithmetic fence intrinsic.
  def int_arithmetic_fence : DefaultAttrsIntrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>],
                                                   [IntrNoMem]>;

  def int_lround : DefaultAttrsIntrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty]>;
  def int_llround : DefaultAttrsIntrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty]>;
  def int_lrint : DefaultAttrsIntrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty]>;
  def int_llrint : DefaultAttrsIntrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty]>;

  // TODO: int operand should be constrained to same number of elements as the result.
  def int_ldexp : DefaultAttrsIntrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>,
                                                             llvm_anyint_ty]>;

  // TODO: Should constrain all element counts to match
  def int_frexp : DefaultAttrsIntrinsic<[llvm_anyfloat_ty, llvm_anyint_ty], [LLVMMatchType<0>]>;
}

def int_minnum : DefaultAttrsIntrinsic<[llvm_anyfloat_ty],
  [LLVMMatchType<0>, LLVMMatchType<0>],
  [IntrNoMem, IntrSpeculatable, IntrWillReturn, Commutative]
>;
def int_maxnum : DefaultAttrsIntrinsic<[llvm_anyfloat_ty],
  [LLVMMatchType<0>, LLVMMatchType<0>],
  [IntrNoMem, IntrSpeculatable, IntrWillReturn, Commutative]
>;
def int_minimum : DefaultAttrsIntrinsic<[llvm_anyfloat_ty],
  [LLVMMatchType<0>, LLVMMatchType<0>],
  [IntrNoMem, IntrSpeculatable, IntrWillReturn, Commutative]
>;
def int_maximum : DefaultAttrsIntrinsic<[llvm_anyfloat_ty],
  [LLVMMatchType<0>, LLVMMatchType<0>],
  [IntrNoMem, IntrSpeculatable, IntrWillReturn, Commutative]
>;

// Internal interface for object size checking
def int_objectsize : DefaultAttrsIntrinsic<[llvm_anyint_ty],
                               [llvm_anyptr_ty, llvm_i1_ty,
                                llvm_i1_ty, llvm_i1_ty],
                               [IntrNoMem, IntrSpeculatable, IntrWillReturn,
                                ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>,
                                ImmArg<ArgIndex<3>>]>,
                               ClangBuiltin<"__builtin_object_size">;

//===--------------- Access to Floating Point Environment -----------------===//
//

let IntrProperties = [IntrInaccessibleMemOnly, IntrWillReturn] in {
  def int_get_rounding  : DefaultAttrsIntrinsic<[llvm_i32_ty], []>;
  def int_set_rounding  : DefaultAttrsIntrinsic<[], [llvm_i32_ty]>;
  def int_get_fpenv     : DefaultAttrsIntrinsic<[llvm_anyint_ty], []>;
  def int_set_fpenv     : DefaultAttrsIntrinsic<[], [llvm_anyint_ty]>;
  def int_reset_fpenv   : DefaultAttrsIntrinsic<[], []>;
}

//===--------------- Floating Point Properties ----------------------------===//
//

def int_is_fpclass
    : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
                            [llvm_anyfloat_ty, llvm_i32_ty],
                            [IntrNoMem, IntrSpeculatable, ImmArg<ArgIndex<1>>]>;

//===--------------- Constrained Floating Point Intrinsics ----------------===//
//

/// IntrStrictFP - The intrinsic is allowed to be used in an alternate
/// floating point environment.
def IntrStrictFP : IntrinsicProperty;

let IntrProperties = [IntrInaccessibleMemOnly, IntrWillReturn, IntrStrictFP] in {
  def int_experimental_constrained_fadd : DefaultAttrsIntrinsic<[ llvm_anyfloat_ty ],
                                                    [ LLVMMatchType<0>,
                                                      LLVMMatchType<0>,
                                                      llvm_metadata_ty,
                                                      llvm_metadata_ty ]>;
  def int_experimental_constrained_fsub : DefaultAttrsIntrinsic<[ llvm_anyfloat_ty ],
                                                    [ LLVMMatchType<0>,
                                                      LLVMMatchType<0>,
                                                      llvm_metadata_ty,
                                                      llvm_metadata_ty ]>;
  def int_experimental_constrained_fmul : DefaultAttrsIntrinsic<[ llvm_anyfloat_ty ],
                                                    [ LLVMMatchType<0>,
                                                      LLVMMatchType<0>,
                                                      llvm_metadata_ty,
                                                      llvm_metadata_ty ]>;
  def int_experimental_constrained_fdiv : DefaultAttrsIntrinsic<[ llvm_anyfloat_ty ],
                                                    [ LLVMMatchType<0>,
                                                      LLVMMatchType<0>,
                                                      llvm_metadata_ty,
                                                      llvm_metadata_ty ]>;
  def int_experimental_constrained_frem : DefaultAttrsIntrinsic<[ llvm_anyfloat_ty ],
                                                    [ LLVMMatchType<0>,
                                                      LLVMMatchType<0>,
                                                      llvm_metadata_ty,
                                                      llvm_metadata_ty ]>;

  def int_experimental_constrained_fma : DefaultAttrsIntrinsic<[ llvm_anyfloat_ty ],
                                                    [ LLVMMatchType<0>,
                                                      LLVMMatchType<0>,
                                                      LLVMMatchType<0>,
                                                      llvm_metadata_ty,
                                                      llvm_metadata_ty ]>;

  def int_experimental_constrained_fmuladd : DefaultAttrsIntrinsic<[ llvm_anyfloat_ty ],
                                                       [ LLVMMatchType<0>,
                                                         LLVMMatchType<0>,
                                                         LLVMMatchType<0>,
                                                         llvm_metadata_ty,
                                                         llvm_metadata_ty ]>;

  def int_experimental_constrained_fptosi : DefaultAttrsIntrinsic<[ llvm_anyint_ty ],
                                                    [ llvm_anyfloat_ty,
                                                      llvm_metadata_ty ]>;

  def int_experimental_constrained_fptoui : DefaultAttrsIntrinsic<[ llvm_anyint_ty ],
                                                    [ llvm_anyfloat_ty,
                                                      llvm_metadata_ty ]>;

  def int_experimental_constrained_sitofp : DefaultAttrsIntrinsic<[ llvm_anyfloat_ty ],
                                                       [ llvm_anyint_ty,
                                                         llvm_metadata_ty,
                                                         llvm_metadata_ty ]>;

  def int_experimental_constrained_uitofp : DefaultAttrsIntrinsic<[ llvm_anyfloat_ty ],
                                                       [ llvm_anyint_ty,
                                                         llvm_metadata_ty,
                                                         llvm_metadata_ty ]>;

  def int_experimental_constrained_fptrunc : DefaultAttrsIntrinsic<[ llvm_anyfloat_ty ],
                                                       [ llvm_anyfloat_ty,
                                                         llvm_metadata_ty,
                                                         llvm_metadata_ty ]>;

  def int_experimental_constrained_fpext : DefaultAttrsIntrinsic<[ llvm_anyfloat_ty ],
                                                     [ llvm_anyfloat_ty,
                                                       llvm_metadata_ty ]>;

  // These intrinsics are sensitive to the rounding mode so we need constrained
  // versions of each of them.  When strict rounding and exception control are
  // not required the non-constrained versions of these intrinsics should be
  // used.
  def int_experimental_constrained_sqrt : DefaultAttrsIntrinsic<[ llvm_anyfloat_ty ],
                                                    [ LLVMMatchType<0>,
                                                      llvm_metadata_ty,
                                                      llvm_metadata_ty ]>;
  def int_experimental_constrained_powi : DefaultAttrsIntrinsic<[ llvm_anyfloat_ty ],
                                                    [ LLVMMatchType<0>,
                                                      llvm_i32_ty,
                                                      llvm_metadata_ty,
                                                      llvm_metadata_ty ]>;
  def int_experimental_constrained_ldexp : DefaultAttrsIntrinsic<[ llvm_anyfloat_ty ],
                                                    [ LLVMMatchType<0>,
                                                      llvm_anyint_ty,
                                                      llvm_metadata_ty,
                                                      llvm_metadata_ty ]>;
  def int_experimental_constrained_sin  : DefaultAttrsIntrinsic<[ llvm_anyfloat_ty ],
                                                    [ LLVMMatchType<0>,
                                                      llvm_metadata_ty,
                                                      llvm_metadata_ty ]>;
  def int_experimental_constrained_cos  : DefaultAttrsIntrinsic<[ llvm_anyfloat_ty ],
                                                    [ LLVMMatchType<0>,
                                                      llvm_metadata_ty,
                                                      llvm_metadata_ty ]>;
  def int_experimental_constrained_pow  : DefaultAttrsIntrinsic<[ llvm_anyfloat_ty ],
                                                    [ LLVMMatchType<0>,
                                                      LLVMMatchType<0>,
                                                      llvm_metadata_ty,
                                                      llvm_metadata_ty ]>;
  def int_experimental_constrained_log  : DefaultAttrsIntrinsic<[ llvm_anyfloat_ty ],
                                                    [ LLVMMatchType<0>,
                                                      llvm_metadata_ty,
                                                      llvm_metadata_ty ]>;
  def int_experimental_constrained_log10: DefaultAttrsIntrinsic<[ llvm_anyfloat_ty ],
                                                    [ LLVMMatchType<0>,
                                                      llvm_metadata_ty,
                                                      llvm_metadata_ty ]>;
  def int_experimental_constrained_log2 : DefaultAttrsIntrinsic<[ llvm_anyfloat_ty ],
                                                    [ LLVMMatchType<0>,
                                                      llvm_metadata_ty,
                                                      llvm_metadata_ty ]>;
  def int_experimental_constrained_exp  : DefaultAttrsIntrinsic<[ llvm_anyfloat_ty ],
                                                    [ LLVMMatchType<0>,
                                                      llvm_metadata_ty,
                                                      llvm_metadata_ty ]>;
  def int_experimental_constrained_exp2 : DefaultAttrsIntrinsic<[ llvm_anyfloat_ty ],
                                                    [ LLVMMatchType<0>,
                                                      llvm_metadata_ty,
                                                      llvm_metadata_ty ]>;
  def int_experimental_constrained_rint  : DefaultAttrsIntrinsic<[ llvm_anyfloat_ty ],
                                                     [ LLVMMatchType<0>,
                                                       llvm_metadata_ty,
                                                       llvm_metadata_ty ]>;
  def int_experimental_constrained_nearbyint : DefaultAttrsIntrinsic<[ llvm_anyfloat_ty ],
                                                         [ LLVMMatchType<0>,
                                                           llvm_metadata_ty,
                                                           llvm_metadata_ty ]>;
  def int_experimental_constrained_lrint : DefaultAttrsIntrinsic<[ llvm_anyint_ty ],
                                                     [ llvm_anyfloat_ty,
                                                       llvm_metadata_ty,
                                                       llvm_metadata_ty ]>;
  def int_experimental_constrained_llrint : DefaultAttrsIntrinsic<[ llvm_anyint_ty ],
                                                      [ llvm_anyfloat_ty,
                                                        llvm_metadata_ty,
                                                        llvm_metadata_ty ]>;
  def int_experimental_constrained_maxnum : DefaultAttrsIntrinsic<[ llvm_anyfloat_ty ],
                                                      [ LLVMMatchType<0>,
                                                        LLVMMatchType<0>,
                                                        llvm_metadata_ty ]>;
  def int_experimental_constrained_minnum : DefaultAttrsIntrinsic<[ llvm_anyfloat_ty ],
                                                      [ LLVMMatchType<0>,
                                                        LLVMMatchType<0>,
                                                        llvm_metadata_ty ]>;
  def int_experimental_constrained_maximum : DefaultAttrsIntrinsic<[ llvm_anyfloat_ty ],
                                                       [ LLVMMatchType<0>,
                                                         LLVMMatchType<0>,
                                                         llvm_metadata_ty ]>;
  def int_experimental_constrained_minimum : DefaultAttrsIntrinsic<[ llvm_anyfloat_ty ],
                                                       [ LLVMMatchType<0>,
                                                         LLVMMatchType<0>,
                                                         llvm_metadata_ty ]>;
  def int_experimental_constrained_ceil : DefaultAttrsIntrinsic<[ llvm_anyfloat_ty ],
                                                    [ LLVMMatchType<0>,
                                                      llvm_metadata_ty ]>;
  def int_experimental_constrained_floor : DefaultAttrsIntrinsic<[ llvm_anyfloat_ty ],
                                                     [ LLVMMatchType<0>,
                                                       llvm_metadata_ty ]>;
  def int_experimental_constrained_lround : DefaultAttrsIntrinsic<[ llvm_anyint_ty ],
                                                      [ llvm_anyfloat_ty,
                                                        llvm_metadata_ty ]>;
  def int_experimental_constrained_llround : DefaultAttrsIntrinsic<[ llvm_anyint_ty ],
                                                       [ llvm_anyfloat_ty,
                                                         llvm_metadata_ty ]>;
  def int_experimental_constrained_round : DefaultAttrsIntrinsic<[ llvm_anyfloat_ty ],
                                                     [ LLVMMatchType<0>,
                                                      llvm_metadata_ty ]>;
  def int_experimental_constrained_roundeven : DefaultAttrsIntrinsic<[ llvm_anyfloat_ty ],
                                                         [ LLVMMatchType<0>,
                                                           llvm_metadata_ty ]>;
  def int_experimental_constrained_trunc : DefaultAttrsIntrinsic<[ llvm_anyfloat_ty ],
                                                     [ LLVMMatchType<0>,
                                                       llvm_metadata_ty ]>;

  // Constrained floating-point comparison (quiet and signaling variants).
  // Third operand is the predicate represented as a metadata string.
  def int_experimental_constrained_fcmp
      : DefaultAttrsIntrinsic<[ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty> ],
                  [ llvm_anyfloat_ty, LLVMMatchType<0>,
                    llvm_metadata_ty, llvm_metadata_ty ]>;
  def int_experimental_constrained_fcmps
      : DefaultAttrsIntrinsic<[ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty> ],
                  [ llvm_anyfloat_ty, LLVMMatchType<0>,
                    llvm_metadata_ty, llvm_metadata_ty ]>;
}
// FIXME: Consider maybe adding intrinsics for sitofp, uitofp.


//===------------------------- Expect Intrinsics --------------------------===//
//
def int_expect : DefaultAttrsIntrinsic<[llvm_anyint_ty],
  [LLVMMatchType<0>, LLVMMatchType<0>], [IntrNoMem, IntrWillReturn]>;

def int_expect_with_probability : DefaultAttrsIntrinsic<[llvm_anyint_ty],
  [LLVMMatchType<0>, LLVMMatchType<0>, llvm_double_ty],
  [IntrNoMem, IntrWillReturn, ImmArg<ArgIndex<2>>]>;

//===-------------------- Bit Manipulation Intrinsics ---------------------===//
//

// None of these intrinsics accesses memory at all.
let IntrProperties = [IntrNoMem, IntrSpeculatable, IntrWillReturn] in {
  def int_bswap: DefaultAttrsIntrinsic<[llvm_anyint_ty], [LLVMMatchType<0>]>;
  def int_ctpop: DefaultAttrsIntrinsic<[llvm_anyint_ty], [LLVMMatchType<0>]>;
  def int_bitreverse : DefaultAttrsIntrinsic<[llvm_anyint_ty], [LLVMMatchType<0>]>;
  def int_fshl : DefaultAttrsIntrinsic<[llvm_anyint_ty],
      [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>]>;
  def int_fshr : DefaultAttrsIntrinsic<[llvm_anyint_ty],
      [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>]>;
}

let IntrProperties = [IntrNoMem, IntrSpeculatable, IntrWillReturn,
                      ImmArg<ArgIndex<1>>] in {
  def int_ctlz : DefaultAttrsIntrinsic<[llvm_anyint_ty], [LLVMMatchType<0>, llvm_i1_ty]>;
  def int_cttz : DefaultAttrsIntrinsic<[llvm_anyint_ty], [LLVMMatchType<0>, llvm_i1_ty]>;
}

//===------------------------ Debugger Intrinsics -------------------------===//
//

// None of these intrinsics accesses memory at all...but that doesn't
// mean the optimizers can change them aggressively.  Special handling
// needed in a few places. These synthetic intrinsics have no
// side-effects and just mark information about their operands.
let IntrProperties = [IntrNoMem, IntrSpeculatable, IntrWillReturn] in {
  def int_dbg_declare      : DefaultAttrsIntrinsic<[],
                                       [llvm_metadata_ty,
                                        llvm_metadata_ty,
                                        llvm_metadata_ty]>;
  def int_dbg_value        : DefaultAttrsIntrinsic<[],
                                       [llvm_metadata_ty,
                                        llvm_metadata_ty,
                                        llvm_metadata_ty]>;
  def int_dbg_assign        : DefaultAttrsIntrinsic<[],
                                       [llvm_metadata_ty,
                                        llvm_metadata_ty,
                                        llvm_metadata_ty,
                                        llvm_metadata_ty,
                                        llvm_metadata_ty,
                                        llvm_metadata_ty]>;
  def int_dbg_label        : DefaultAttrsIntrinsic<[],
                                       [llvm_metadata_ty]>;
}

//===------------------ Exception Handling Intrinsics----------------------===//
//

// The result of eh.typeid.for depends on the enclosing function, but inside a
// given function it is 'const' and may be CSE'd etc.
def int_eh_typeid_for : Intrinsic<[llvm_i32_ty], [llvm_ptr_ty], [IntrNoMem]>;

def int_eh_return_i32 : Intrinsic<[], [llvm_i32_ty, llvm_ptr_ty]>;
def int_eh_return_i64 : Intrinsic<[], [llvm_i64_ty, llvm_ptr_ty]>;

// eh.exceptionpointer returns the pointer to the exception caught by
// the given `catchpad`.
def int_eh_exceptionpointer : Intrinsic<[llvm_anyptr_ty], [llvm_token_ty],
                                        [IntrNoMem]>;

// Gets the exception code from a catchpad token. Only used on some platforms.
def int_eh_exceptioncode : Intrinsic<[llvm_i32_ty], [llvm_token_ty], [IntrNoMem]>;

// __builtin_unwind_init is an undocumented GCC intrinsic that causes all
// callee-saved registers to be saved and restored (regardless of whether they
// are used) in the calling function. It is used by libgcc_eh.
def int_eh_unwind_init: Intrinsic<[]>,
                        ClangBuiltin<"__builtin_unwind_init">;

def int_eh_dwarf_cfa  : Intrinsic<[llvm_ptr_ty], [llvm_i32_ty]>;

def int_eh_sjlj_lsda             : Intrinsic<[llvm_ptr_ty], [], [IntrNoMem]>;
def int_eh_sjlj_callsite         : Intrinsic<[], [llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<0>>]>;

def int_eh_sjlj_functioncontext : Intrinsic<[], [llvm_ptr_ty]>;
def int_eh_sjlj_setjmp          : Intrinsic<[llvm_i32_ty], [llvm_ptr_ty]>;
def int_eh_sjlj_longjmp         : Intrinsic<[], [llvm_ptr_ty], [IntrNoReturn]>;
def int_eh_sjlj_setup_dispatch  : Intrinsic<[], []>;

//===---------------- Generic Variable Attribute Intrinsics----------------===//
//
def int_var_annotation : DefaultAttrsIntrinsic<
    [], [llvm_anyptr_ty, llvm_anyptr_ty, LLVMMatchType<1>, llvm_i32_ty, LLVMMatchType<1>],
    [IntrInaccessibleMemOnly], "llvm.var.annotation">;

def int_ptr_annotation : DefaultAttrsIntrinsic<
    [llvm_anyptr_ty],
    [LLVMMatchType<0>, llvm_anyptr_ty, LLVMMatchType<1>, llvm_i32_ty, LLVMMatchType<1>],
    [IntrInaccessibleMemOnly], "llvm.ptr.annotation">;

def int_annotation : DefaultAttrsIntrinsic<
    [llvm_anyint_ty],
    [LLVMMatchType<0>, llvm_anyptr_ty, LLVMMatchType<1>, llvm_i32_ty],
    [IntrInaccessibleMemOnly], "llvm.annotation">;

// Annotates the current program point with metadata strings which are emitted
// as CodeView debug info records. This is expensive, as it disables inlining
// and is modelled as having side effects.
def int_codeview_annotation : DefaultAttrsIntrinsic<[], [llvm_metadata_ty],
                                        [IntrInaccessibleMemOnly, IntrNoDuplicate, IntrWillReturn],
                                        "llvm.codeview.annotation">;

//===------------------------ Trampoline Intrinsics -----------------------===//
//
def int_init_trampoline : DefaultAttrsIntrinsic<
    [], [llvm_ptr_ty, llvm_ptr_ty, llvm_ptr_ty],
    [IntrArgMemOnly, NoCapture<ArgIndex<0>>, WriteOnly<ArgIndex<0>>,
     ReadNone<ArgIndex<1>>, ReadNone<ArgIndex<2>>]>,
    ClangBuiltin<"__builtin_init_trampoline">;

def int_adjust_trampoline : DefaultAttrsIntrinsic<
    [llvm_ptr_ty], [llvm_ptr_ty], [IntrReadMem, IntrArgMemOnly]>,
    ClangBuiltin<"__builtin_adjust_trampoline">;

//===------------------------ Overflow Intrinsics -------------------------===//
//

// Expose the carry flag from add operations on two integrals.
let IntrProperties = [IntrNoMem, IntrSpeculatable, IntrWillReturn] in {
  def int_sadd_with_overflow : DefaultAttrsIntrinsic<[llvm_anyint_ty,
                                          LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
                                         [LLVMMatchType<0>, LLVMMatchType<0>]>;
  def int_uadd_with_overflow : DefaultAttrsIntrinsic<[llvm_anyint_ty,
                                          LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
                                         [LLVMMatchType<0>, LLVMMatchType<0>]>;

  def int_ssub_with_overflow : DefaultAttrsIntrinsic<[llvm_anyint_ty,
                                          LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
                                         [LLVMMatchType<0>, LLVMMatchType<0>]>;
  def int_usub_with_overflow : DefaultAttrsIntrinsic<[llvm_anyint_ty,
                                          LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
                                         [LLVMMatchType<0>, LLVMMatchType<0>]>;

  def int_smul_with_overflow : DefaultAttrsIntrinsic<[llvm_anyint_ty,
                                          LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
                                         [LLVMMatchType<0>, LLVMMatchType<0>]>;
  def int_umul_with_overflow : DefaultAttrsIntrinsic<[llvm_anyint_ty,
                                          LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
                                         [LLVMMatchType<0>, LLVMMatchType<0>]>;
}
//===------------------------- Saturation Arithmetic Intrinsics ---------------------===//
//
def int_sadd_sat : DefaultAttrsIntrinsic<[llvm_anyint_ty],
                             [LLVMMatchType<0>, LLVMMatchType<0>],
                             [IntrNoMem, IntrSpeculatable, IntrWillReturn, Commutative]>;
def int_uadd_sat : DefaultAttrsIntrinsic<[llvm_anyint_ty],
                             [LLVMMatchType<0>, LLVMMatchType<0>],
                             [IntrNoMem, IntrSpeculatable, IntrWillReturn, Commutative]>;
def int_ssub_sat : DefaultAttrsIntrinsic<[llvm_anyint_ty],
                             [LLVMMatchType<0>, LLVMMatchType<0>],
                             [IntrNoMem, IntrSpeculatable, IntrWillReturn]>;
def int_usub_sat : DefaultAttrsIntrinsic<[llvm_anyint_ty],
                             [LLVMMatchType<0>, LLVMMatchType<0>],
                             [IntrNoMem, IntrSpeculatable, IntrWillReturn]>;
def int_sshl_sat : DefaultAttrsIntrinsic<[llvm_anyint_ty],
                             [LLVMMatchType<0>, LLVMMatchType<0>],
                             [IntrNoMem, IntrSpeculatable, IntrWillReturn]>;
def int_ushl_sat : DefaultAttrsIntrinsic<[llvm_anyint_ty],
                             [LLVMMatchType<0>, LLVMMatchType<0>],
                             [IntrNoMem, IntrSpeculatable, IntrWillReturn]>;

//===------------------------- Fixed Point Arithmetic Intrinsics ---------------------===//
//
def int_smul_fix : DefaultAttrsIntrinsic<[llvm_anyint_ty],
                             [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty],
                             [IntrNoMem, IntrSpeculatable, IntrWillReturn,
                              Commutative, ImmArg<ArgIndex<2>>]>;

def int_umul_fix : DefaultAttrsIntrinsic<[llvm_anyint_ty],
                             [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty],
                             [IntrNoMem, IntrSpeculatable, IntrWillReturn,
                              Commutative, ImmArg<ArgIndex<2>>]>;

def int_sdiv_fix : DefaultAttrsIntrinsic<[llvm_anyint_ty],
                             [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty],
                             [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_udiv_fix : DefaultAttrsIntrinsic<[llvm_anyint_ty],
                             [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty],
                             [IntrNoMem, ImmArg<ArgIndex<2>>]>;

//===------------------- Fixed Point Saturation Arithmetic Intrinsics ----------------===//
//
def int_smul_fix_sat : DefaultAttrsIntrinsic<[llvm_anyint_ty],
                                 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty],
                                 [IntrNoMem, IntrSpeculatable, IntrWillReturn,
                                  Commutative, ImmArg<ArgIndex<2>>]>;
def int_umul_fix_sat : DefaultAttrsIntrinsic<[llvm_anyint_ty],
                                 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty],
                                 [IntrNoMem, IntrSpeculatable, IntrWillReturn,
                                  Commutative, ImmArg<ArgIndex<2>>]>;

def int_sdiv_fix_sat : DefaultAttrsIntrinsic<[llvm_anyint_ty],
                                 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty],
                                 [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_udiv_fix_sat : DefaultAttrsIntrinsic<[llvm_anyint_ty],
                                 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty],
                                 [IntrNoMem, ImmArg<ArgIndex<2>>]>;

//===------------------ Integer Min/Max/Abs Intrinsics --------------------===//
//
def int_abs : DefaultAttrsIntrinsic<
    [llvm_anyint_ty], [LLVMMatchType<0>, llvm_i1_ty],
    [IntrNoMem, IntrSpeculatable, IntrWillReturn, ImmArg<ArgIndex<1>>]>;

def int_smax : DefaultAttrsIntrinsic<
    [llvm_anyint_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
    [IntrNoMem, IntrSpeculatable, IntrWillReturn]>;
def int_smin : DefaultAttrsIntrinsic<
    [llvm_anyint_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
    [IntrNoMem, IntrSpeculatable, IntrWillReturn]>;
def int_umax : DefaultAttrsIntrinsic<
    [llvm_anyint_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
    [IntrNoMem, IntrSpeculatable, IntrWillReturn]>;
def int_umin : DefaultAttrsIntrinsic<
    [llvm_anyint_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
    [IntrNoMem, IntrSpeculatable, IntrWillReturn]>;

//===------------------------- Memory Use Markers -------------------------===//
//
def int_lifetime_start  : DefaultAttrsIntrinsic<[],
                                    [llvm_i64_ty, llvm_anyptr_ty],
                                    [IntrArgMemOnly, IntrWillReturn,
                                     NoCapture<ArgIndex<1>>,
                                     ImmArg<ArgIndex<0>>]>;
def int_lifetime_end    : DefaultAttrsIntrinsic<[],
                                    [llvm_i64_ty, llvm_anyptr_ty],
                                    [IntrArgMemOnly, IntrWillReturn,
                                     NoCapture<ArgIndex<1>>,
                                     ImmArg<ArgIndex<0>>]>;
def int_invariant_start : DefaultAttrsIntrinsic<[llvm_ptr_ty],
                                    [llvm_i64_ty, llvm_anyptr_ty],
                                    [IntrArgMemOnly, IntrWillReturn,
                                     NoCapture<ArgIndex<1>>,
                                     ImmArg<ArgIndex<0>>]>;
def int_invariant_end   : DefaultAttrsIntrinsic<[],
                                    [llvm_ptr_ty, llvm_i64_ty,
                                     llvm_anyptr_ty],
                                    [IntrArgMemOnly, IntrWillReturn,
                                     NoCapture<ArgIndex<2>>,
                                     ImmArg<ArgIndex<1>>]>;

// launder.invariant.group can't be marked with 'readnone' (IntrNoMem),
// because it would cause CSE of two barriers with the same argument.
// Inaccessiblememonly says that the barrier doesn't read the argument,
// but it changes state not accessible to this module. This way
// we can DSE through the barrier because it doesn't read the value
// after store. Although the barrier doesn't modify any memory it
// can't be marked as readonly, because it would be possible to
// CSE 2 barriers with store in between.
// The argument also can't be marked with 'returned' attribute, because
// it would remove barrier.
// Note that it is still experimental, which means that its semantics
// might change in the future.
def int_launder_invariant_group : DefaultAttrsIntrinsic<[llvm_anyptr_ty],
                                            [LLVMMatchType<0>],
                                            [IntrInaccessibleMemOnly, IntrSpeculatable, IntrWillReturn]>;


def int_strip_invariant_group : DefaultAttrsIntrinsic<[llvm_anyptr_ty],
                                          [LLVMMatchType<0>],
                                          [IntrSpeculatable, IntrNoMem, IntrWillReturn]>;

//===------------------------ Stackmap Intrinsics -------------------------===//
//
def int_experimental_stackmap : DefaultAttrsIntrinsic<[],
                                  [llvm_i64_ty, llvm_i32_ty, llvm_vararg_ty],
                                  [Throws]>;
def int_experimental_patchpoint_void : Intrinsic<[],
                                                 [llvm_i64_ty, llvm_i32_ty,
                                                  llvm_ptr_ty, llvm_i32_ty,
                                                  llvm_vararg_ty],
                                                  [Throws]>;
def int_experimental_patchpoint_i64 : Intrinsic<[llvm_i64_ty],
                                                [llvm_i64_ty, llvm_i32_ty,
                                                 llvm_ptr_ty, llvm_i32_ty,
                                                 llvm_vararg_ty],
                                                 [Throws]>;


//===------------------------ Garbage Collection Intrinsics ---------------===//
// These are documented in docs/Statepoint.rst

def int_experimental_gc_statepoint : Intrinsic<[llvm_token_ty],
                               [llvm_i64_ty, llvm_i32_ty,
                                llvm_anyptr_ty, llvm_i32_ty,
                                llvm_i32_ty, llvm_vararg_ty],
                               [Throws, ImmArg<ArgIndex<0>>,
                                ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<3>>,
                                ImmArg<ArgIndex<4>>]>;

def int_experimental_gc_result : DefaultAttrsIntrinsic<
    [llvm_any_ty], [llvm_token_ty], [IntrNoMem]>;

def int_experimental_gc_relocate : DefaultAttrsIntrinsic<
    [llvm_any_ty], [llvm_token_ty, llvm_i32_ty, llvm_i32_ty],
    [IntrNoMem, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>]>;

def int_experimental_gc_get_pointer_base : DefaultAttrsIntrinsic<
    [llvm_anyptr_ty], [llvm_anyptr_ty],
    [IntrNoMem, IntrWillReturn, ReadNone<ArgIndex<0>>, NoCapture<ArgIndex<0>>]>;

def int_experimental_gc_get_pointer_offset : DefaultAttrsIntrinsic<
    [llvm_i64_ty], [llvm_anyptr_ty],
    [IntrNoMem, IntrWillReturn, ReadNone<ArgIndex<0>>, NoCapture<ArgIndex<0>>]>;

//===------------------------ Coroutine Intrinsics ---------------===//
// These are documented in docs/Coroutines.rst

// Coroutine Structure Intrinsics.

def int_coro_id : DefaultAttrsIntrinsic<[llvm_token_ty],
    [llvm_i32_ty, llvm_ptr_ty, llvm_ptr_ty, llvm_ptr_ty],
    [IntrArgMemOnly, IntrReadMem, ReadNone<ArgIndex<1>>, ReadOnly<ArgIndex<2>>,
     NoCapture<ArgIndex<2>>]>;
def int_coro_id_retcon : Intrinsic<[llvm_token_ty],
    [llvm_i32_ty, llvm_i32_ty, llvm_ptr_ty,
     llvm_ptr_ty, llvm_ptr_ty, llvm_ptr_ty],
    []>;
def int_coro_id_retcon_once : Intrinsic<[llvm_token_ty],
    [llvm_i32_ty, llvm_i32_ty, llvm_ptr_ty,
     llvm_ptr_ty, llvm_ptr_ty, llvm_ptr_ty],
    []>;
def int_coro_alloc : Intrinsic<[llvm_i1_ty], [llvm_token_ty], []>;
def int_coro_id_async : Intrinsic<[llvm_token_ty],
  [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_ptr_ty],
  []>;
def int_coro_async_context_alloc : Intrinsic<[llvm_ptr_ty],
    [llvm_ptr_ty, llvm_ptr_ty],
    []>;
def int_coro_async_context_dealloc : Intrinsic<[],
    [llvm_ptr_ty],
    []>;
def int_coro_async_resume : Intrinsic<[llvm_ptr_ty],
    [],
    [IntrNoMerge]>;
def int_coro_async_size_replace : Intrinsic<[], [llvm_ptr_ty, llvm_ptr_ty], []>;
def int_coro_suspend_async
    : Intrinsic<[llvm_any_ty],
                [llvm_i32_ty, llvm_ptr_ty, llvm_ptr_ty, llvm_vararg_ty],
                [IntrNoMerge]>;
def int_coro_prepare_async : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty],
                                       [IntrNoMem]>;
def int_coro_begin : Intrinsic<[llvm_ptr_ty], [llvm_token_ty, llvm_ptr_ty],
                               [WriteOnly<ArgIndex<1>>]>;

def int_coro_free : Intrinsic<[llvm_ptr_ty], [llvm_token_ty, llvm_ptr_ty],
                              [IntrReadMem, IntrArgMemOnly,
                               ReadOnly<ArgIndex<1>>,
                               NoCapture<ArgIndex<1>>]>;
def int_coro_end : Intrinsic<[llvm_i1_ty], [llvm_ptr_ty, llvm_i1_ty], []>;
def int_coro_end_async
    : Intrinsic<[llvm_i1_ty], [llvm_ptr_ty, llvm_i1_ty, llvm_vararg_ty], []>;

def int_coro_frame : Intrinsic<[llvm_ptr_ty], [], [IntrNoMem]>;
def int_coro_noop : Intrinsic<[llvm_ptr_ty], [], [IntrNoMem]>;
def int_coro_size : Intrinsic<[llvm_anyint_ty], [], [IntrNoMem]>;
def int_coro_align : Intrinsic<[llvm_anyint_ty], [], [IntrNoMem]>;

def int_coro_save : Intrinsic<[llvm_token_ty], [llvm_ptr_ty], [IntrNoMerge]>;
def int_coro_suspend : Intrinsic<[llvm_i8_ty], [llvm_token_ty, llvm_i1_ty], []>;
def int_coro_suspend_retcon : Intrinsic<[llvm_any_ty], [llvm_vararg_ty], []>;
def int_coro_prepare_retcon : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty],
                                        [IntrNoMem]>;
def int_coro_alloca_alloc : Intrinsic<[llvm_token_ty],
                                      [llvm_anyint_ty, llvm_i32_ty], []>;
def int_coro_alloca_get : Intrinsic<[llvm_ptr_ty], [llvm_token_ty], []>;
def int_coro_alloca_free : Intrinsic<[], [llvm_token_ty], []>;

// Coroutine Manipulation Intrinsics.

def int_coro_resume : Intrinsic<[], [llvm_ptr_ty], [Throws]>;
def int_coro_destroy : Intrinsic<[], [llvm_ptr_ty], [Throws]>;
def int_coro_done : Intrinsic<[llvm_i1_ty], [llvm_ptr_ty],
                              [IntrArgMemOnly, ReadOnly<ArgIndex<0>>,
                               NoCapture<ArgIndex<0>>]>;
def int_coro_promise : Intrinsic<[llvm_ptr_ty],
                                 [llvm_ptr_ty, llvm_i32_ty, llvm_i1_ty],
                                 [IntrNoMem, NoCapture<ArgIndex<0>>]>;

// Coroutine Lowering Intrinsics. Used internally by coroutine passes.

def int_coro_subfn_addr : DefaultAttrsIntrinsic<
    [llvm_ptr_ty], [llvm_ptr_ty, llvm_i8_ty],
    [IntrReadMem, IntrArgMemOnly, ReadOnly<ArgIndex<0>>,
     NoCapture<ArgIndex<0>>]>;

///===-------------------------- Other Intrinsics --------------------------===//
//
def int_trap : Intrinsic<[], [], [IntrNoReturn, IntrCold]>,
               ClangBuiltin<"__builtin_trap">;
def int_debugtrap : Intrinsic<[]>,
                    ClangBuiltin<"__builtin_debugtrap">;
def int_ubsantrap : Intrinsic<[], [llvm_i8_ty],
                              [IntrNoReturn, IntrCold, ImmArg<ArgIndex<0>>]>;

// Support for dynamic deoptimization (or de-specialization)
def int_experimental_deoptimize : Intrinsic<[llvm_any_ty], [llvm_vararg_ty],
                                            [Throws]>;

// Support for speculative runtime guards
def int_experimental_guard : DefaultAttrsIntrinsic<[], [llvm_i1_ty, llvm_vararg_ty],
                                       [Throws]>;

// Supports widenable conditions for guards represented as explicit branches.
def int_experimental_widenable_condition : DefaultAttrsIntrinsic<[llvm_i1_ty], [],
        [IntrInaccessibleMemOnly, IntrWillReturn, IntrSpeculatable, NoUndef<RetIndex>]>;

// NOP: calls/invokes to this intrinsic are removed by codegen
def int_donothing : DefaultAttrsIntrinsic<[], [], [IntrNoMem, IntrWillReturn]>;

// This instruction has no actual effect, though it is treated by the optimizer
// has having opaque side effects. This may be inserted into loops to ensure
// that they are not removed even if they turn out to be empty, for languages
// which specify that infinite loops must be preserved.
def int_sideeffect : DefaultAttrsIntrinsic<[], [], [IntrInaccessibleMemOnly, IntrWillReturn]>;

// The pseudoprobe intrinsic works as a place holder to the block it probes.
// Like the sideeffect intrinsic defined above, this intrinsic is treated by the
// optimizer as having opaque side effects so that it won't be get rid of or moved
// out of the block it probes.
def int_pseudoprobe : DefaultAttrsIntrinsic<[], [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_i64_ty],
                                    [IntrInaccessibleMemOnly, IntrWillReturn]>;

// Intrinsics to support half precision floating point format
let IntrProperties = [IntrNoMem, IntrWillReturn] in {
def int_convert_to_fp16   : DefaultAttrsIntrinsic<[llvm_i16_ty], [llvm_anyfloat_ty]>;
def int_convert_from_fp16 : DefaultAttrsIntrinsic<[llvm_anyfloat_ty], [llvm_i16_ty]>;
}

// Saturating floating point to integer intrinsics
let IntrProperties = [IntrNoMem, IntrSpeculatable, IntrWillReturn] in {
def int_fptoui_sat : DefaultAttrsIntrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty]>;
def int_fptosi_sat : DefaultAttrsIntrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty]>;
}

// Clear cache intrinsic, default to ignore (ie. emit nothing)
// maps to void __clear_cache() on supporting platforms
def int_clear_cache : Intrinsic<[], [llvm_ptr_ty, llvm_ptr_ty],
                                [], "llvm.clear_cache">;

// Intrinsic to detect whether its argument is a constant.
def int_is_constant : DefaultAttrsIntrinsic<[llvm_i1_ty], [llvm_any_ty],
                                [IntrNoMem, IntrWillReturn, IntrConvergent],
                                "llvm.is.constant">;

// Intrinsic to mask out bits of a pointer.
def int_ptrmask: DefaultAttrsIntrinsic<[llvm_anyptr_ty], [LLVMMatchType<0>, llvm_anyint_ty],
                           [IntrNoMem, IntrSpeculatable, IntrWillReturn]>;

// Intrinsic to wrap a thread local variable.
def int_threadlocal_address : DefaultAttrsIntrinsic<[llvm_anyptr_ty], [LLVMMatchType<0>],
                                                    [NonNull<RetIndex>, NonNull<ArgIndex<0>>,
                                                     IntrNoMem, IntrSpeculatable, IntrWillReturn]>;

def int_experimental_stepvector : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                                                        [], [IntrNoMem]>;

//===---------------- Vector Predication Intrinsics --------------===//
// Memory Intrinsics
def int_vp_store : DefaultAttrsIntrinsic<[],
                             [ llvm_anyvector_ty,
                               llvm_anyptr_ty,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty],
                             [ NoCapture<ArgIndex<1>>, IntrNoSync, IntrWriteMem, IntrArgMemOnly, IntrWillReturn ]>;

def int_vp_load  : DefaultAttrsIntrinsic<[ llvm_anyvector_ty],
                             [ llvm_anyptr_ty,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty],
                             [ NoCapture<ArgIndex<0>>, IntrNoSync, IntrReadMem, IntrWillReturn, IntrArgMemOnly ]>;

def int_vp_gather: DefaultAttrsIntrinsic<[ llvm_anyvector_ty],
                             [ LLVMVectorOfAnyPointersToElt<0>,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty],
                             [ IntrReadMem, IntrNoSync, IntrWillReturn]>;

def int_vp_scatter: DefaultAttrsIntrinsic<[],
                             [ llvm_anyvector_ty,
                               LLVMVectorOfAnyPointersToElt<0>,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty],
                             [ IntrNoSync, IntrWillReturn ]>; // TODO allow IntrNoCapture for vectors of pointers

// Experimental strided memory accesses
def int_experimental_vp_strided_store : DefaultAttrsIntrinsic<[],
                             [ llvm_anyvector_ty,
                               llvm_anyptr_ty,
                               llvm_anyint_ty, // Stride in bytes
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty],
                             [ NoCapture<ArgIndex<1>>, IntrNoSync, IntrWriteMem, IntrArgMemOnly, IntrWillReturn ]>;

def int_experimental_vp_strided_load  : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                             [ llvm_anyptr_ty,
                               llvm_anyint_ty, // Stride in bytes
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty],
                             [ NoCapture<ArgIndex<0>>, IntrNoSync, IntrReadMem, IntrWillReturn, IntrArgMemOnly ]>;

// Operators
let IntrProperties = [IntrNoMem, IntrNoSync, IntrWillReturn] in {
  // Integer arithmetic
  def int_vp_add : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
                             [ LLVMMatchType<0>,
                               LLVMMatchType<0>,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_sub : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
                             [ LLVMMatchType<0>,
                               LLVMMatchType<0>,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_mul  : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
                             [ LLVMMatchType<0>,
                               LLVMMatchType<0>,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_ashr : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
                             [ LLVMMatchType<0>,
                               LLVMMatchType<0>,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_lshr : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
                             [ LLVMMatchType<0>,
                               LLVMMatchType<0>,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_shl : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
                             [ LLVMMatchType<0>,
                               LLVMMatchType<0>,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_or : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
                             [ LLVMMatchType<0>,
                               LLVMMatchType<0>,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_and : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
                             [ LLVMMatchType<0>,
                               LLVMMatchType<0>,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_xor : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
                             [ LLVMMatchType<0>,
                               LLVMMatchType<0>,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_sdiv : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
                             [ LLVMMatchType<0>,
                               LLVMMatchType<0>,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_udiv : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
                             [ LLVMMatchType<0>,
                               LLVMMatchType<0>,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_srem : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
                             [ LLVMMatchType<0>,
                               LLVMMatchType<0>,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_urem : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
                             [ LLVMMatchType<0>,
                               LLVMMatchType<0>,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_abs : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
                             [ LLVMMatchType<0>,
                               llvm_i1_ty,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_smin : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
                             [ LLVMMatchType<0>,
                               LLVMMatchType<0>,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_smax : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
                             [ LLVMMatchType<0>,
                               LLVMMatchType<0>,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_umin : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
                             [ LLVMMatchType<0>,
                               LLVMMatchType<0>,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_umax : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
                             [ LLVMMatchType<0>,
                               LLVMMatchType<0>,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_bswap : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
                             [ LLVMMatchType<0>,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_bitreverse : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
                             [ LLVMMatchType<0>,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_ctpop : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
                             [ LLVMMatchType<0>,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_fshl : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
                             [ LLVMMatchType<0>,
                               LLVMMatchType<0>,
                               LLVMMatchType<0>,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_fshr : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
                             [ LLVMMatchType<0>,
                               LLVMMatchType<0>,
                               LLVMMatchType<0>,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;

  // Floating-point arithmetic
  def int_vp_fadd : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
                             [ LLVMMatchType<0>,
                               LLVMMatchType<0>,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_fsub : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
                             [ LLVMMatchType<0>,
                               LLVMMatchType<0>,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_fmul  : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
                             [ LLVMMatchType<0>,
                               LLVMMatchType<0>,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_fdiv : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
                             [ LLVMMatchType<0>,
                               LLVMMatchType<0>,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_frem : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
                             [ LLVMMatchType<0>,
                               LLVMMatchType<0>,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_fneg : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
                             [ LLVMMatchType<0>,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_fabs : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
                             [ LLVMMatchType<0>,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_sqrt : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
                             [ LLVMMatchType<0>,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_fma : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
                             [ LLVMMatchType<0>,
                               LLVMMatchType<0>,
                               LLVMMatchType<0>,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_fmuladd : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
                             [ LLVMMatchType<0>,
                               LLVMMatchType<0>,
                               LLVMMatchType<0>,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_minnum : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
                             [ LLVMMatchType<0>,
                               LLVMMatchType<0>,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_maxnum : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
                             [ LLVMMatchType<0>,
                               LLVMMatchType<0>,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_copysign : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
                             [ LLVMMatchType<0>,
                               LLVMMatchType<0>,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_ceil : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
                             [ LLVMMatchType<0>,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_floor : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
                             [ LLVMMatchType<0>,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_round : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
                             [ LLVMMatchType<0>,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_roundeven : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
                             [ LLVMMatchType<0>,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_roundtozero : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
                             [ LLVMMatchType<0>,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_rint : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
                             [ LLVMMatchType<0>,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_nearbyint : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
                             [ LLVMMatchType<0>,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;

  // Casts
  def int_vp_trunc : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
                             [ llvm_anyvector_ty,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_zext : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
                             [ llvm_anyvector_ty,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_sext : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
                             [ llvm_anyvector_ty,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_fptrunc : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
                             [ llvm_anyvector_ty,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_fpext : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
                             [ llvm_anyvector_ty,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_fptoui : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
                             [ llvm_anyvector_ty,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_fptosi : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
                             [ llvm_anyvector_ty,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_uitofp : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
                             [ llvm_anyvector_ty,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_sitofp : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
                             [ llvm_anyvector_ty,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_ptrtoint : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
                             [ llvm_anyvector_ty,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_inttoptr : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
                             [ llvm_anyvector_ty,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  // Shuffles
  def int_vp_select : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
                             [ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               LLVMMatchType<0>,
                               LLVMMatchType<0>,
                               llvm_i32_ty]>;
  def int_vp_merge : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
                             [ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               LLVMMatchType<0>,
                               LLVMMatchType<0>,
                               llvm_i32_ty]>;

  // Comparisons
  def int_vp_fcmp : DefaultAttrsIntrinsic<[ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty> ],
                             [ llvm_anyvector_ty,
                               LLVMMatchType<0>,
                               llvm_metadata_ty,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_icmp : DefaultAttrsIntrinsic<[ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty> ],
                             [ llvm_anyvector_ty,
                               LLVMMatchType<0>,
                               llvm_metadata_ty,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;

  // Reductions
  def int_vp_reduce_fadd : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>],
                             [ LLVMVectorElementType<0>,
                               llvm_anyvector_ty,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_reduce_fmul : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>],
                             [ LLVMVectorElementType<0>,
                               llvm_anyvector_ty,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_reduce_add  : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>],
                             [ LLVMVectorElementType<0>,
                               llvm_anyvector_ty,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_reduce_mul : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>],
                             [ LLVMVectorElementType<0>,
                               llvm_anyvector_ty,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_reduce_and : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>],
                             [ LLVMVectorElementType<0>,
                               llvm_anyvector_ty,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_reduce_or : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>],
                             [ LLVMVectorElementType<0>,
                               llvm_anyvector_ty,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_reduce_xor : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>],
                             [ LLVMVectorElementType<0>,
                               llvm_anyvector_ty,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_reduce_smax : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>],
                             [ LLVMVectorElementType<0>,
                               llvm_anyvector_ty,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_reduce_smin : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>],
                             [ LLVMVectorElementType<0>,
                               llvm_anyvector_ty,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_reduce_umax : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>],
                             [ LLVMVectorElementType<0>,
                               llvm_anyvector_ty,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_reduce_umin : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>],
                             [ LLVMVectorElementType<0>,
                               llvm_anyvector_ty,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_reduce_fmax : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>],
                             [ LLVMVectorElementType<0>,
                               llvm_anyvector_ty,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_reduce_fmin : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>],
                             [ LLVMVectorElementType<0>,
                               llvm_anyvector_ty,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
}

let IntrProperties = [IntrNoMem, IntrNoSync, IntrWillReturn, ImmArg<ArgIndex<1>>] in {
  def int_vp_ctlz : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
                             [ LLVMMatchType<0>,
                               llvm_i1_ty,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
  def int_vp_cttz : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ],
                             [ LLVMMatchType<0>,
                               llvm_i1_ty,
                               LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                               llvm_i32_ty]>;
}

def int_get_active_lane_mask:
  DefaultAttrsIntrinsic<[llvm_anyvector_ty],
            [llvm_anyint_ty, LLVMMatchType<1>],
            [IntrNoMem, IntrNoSync, IntrWillReturn]>;

def int_experimental_get_vector_length:
  DefaultAttrsIntrinsic<[llvm_i32_ty],
                        [llvm_anyint_ty, llvm_i32_ty, llvm_i1_ty],
                        [IntrNoMem, IntrNoSync, IntrWillReturn,
                         ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>]>;

def int_experimental_vp_splice:
  DefaultAttrsIntrinsic<[llvm_anyvector_ty],
            [LLVMMatchType<0>,
             LLVMMatchType<0>,
             llvm_i32_ty,
             LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
             llvm_i32_ty, llvm_i32_ty],
            [IntrNoMem, ImmArg<ArgIndex<2>>]>;

//===-------------------------- Masked Intrinsics -------------------------===//
//
def int_masked_load:
  DefaultAttrsIntrinsic<[llvm_anyvector_ty],
            [llvm_anyptr_ty, llvm_i32_ty,
             LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<0>],
            [IntrReadMem, IntrArgMemOnly, IntrWillReturn, ImmArg<ArgIndex<1>>,
             NoCapture<ArgIndex<0>>]>;

def int_masked_store:
  DefaultAttrsIntrinsic<[],
            [llvm_anyvector_ty, llvm_anyptr_ty,
             llvm_i32_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
            [IntrWriteMem, IntrArgMemOnly, IntrWillReturn,
             ImmArg<ArgIndex<2>>, NoCapture<ArgIndex<1>>]>;

def int_masked_gather:
  DefaultAttrsIntrinsic<[llvm_anyvector_ty],
            [LLVMVectorOfAnyPointersToElt<0>, llvm_i32_ty,
             LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<0>],
            [IntrReadMem, IntrWillReturn, ImmArg<ArgIndex<1>>]>;

def int_masked_scatter:
  DefaultAttrsIntrinsic<[],
            [llvm_anyvector_ty, LLVMVectorOfAnyPointersToElt<0>, llvm_i32_ty,
             LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
            [IntrWriteMem, IntrWillReturn, ImmArg<ArgIndex<2>>]>;

def int_masked_expandload:
  DefaultAttrsIntrinsic<[llvm_anyvector_ty],
            [llvm_ptr_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
             LLVMMatchType<0>],
            [IntrReadMem, IntrWillReturn, NoCapture<ArgIndex<0>>]>;

def int_masked_compressstore:
  DefaultAttrsIntrinsic<[],
            [llvm_anyvector_ty, llvm_ptr_ty,
             LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
            [IntrWriteMem, IntrArgMemOnly, IntrWillReturn,
             NoCapture<ArgIndex<1>>]>;

// Test whether a pointer is associated with a type metadata identifier.
def int_type_test : DefaultAttrsIntrinsic<[llvm_i1_ty], [llvm_ptr_ty, llvm_metadata_ty],
                              [IntrNoMem, IntrWillReturn, IntrSpeculatable]>;

// Safely loads a function pointer from a virtual table pointer using type metadata.
def int_type_checked_load : DefaultAttrsIntrinsic<[llvm_ptr_ty, llvm_i1_ty],
                                      [llvm_ptr_ty, llvm_i32_ty, llvm_metadata_ty],
                                      [IntrNoMem, IntrWillReturn]>;

// Safely loads a relative function pointer from a virtual table pointer using type metadata.
def int_type_checked_load_relative : DefaultAttrsIntrinsic<[llvm_ptr_ty, llvm_i1_ty],
                                      [llvm_ptr_ty, llvm_i32_ty, llvm_metadata_ty],
                                      [IntrNoMem, IntrWillReturn]>;

// Test whether a pointer is associated with a type metadata identifier. Used
// for public visibility classes that may later be refined to private
// visibility.
def int_public_type_test : DefaultAttrsIntrinsic<[llvm_i1_ty], [llvm_ptr_ty, llvm_metadata_ty],
                              [IntrNoMem, IntrWillReturn, IntrSpeculatable]>;

// Create a branch funnel that implements an indirect call to a limited set of
// callees. This needs to be a musttail call.
def int_icall_branch_funnel : DefaultAttrsIntrinsic<[], [llvm_vararg_ty], []>;

def int_load_relative: DefaultAttrsIntrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_anyint_ty],
                                 [IntrReadMem, IntrArgMemOnly]>;

def int_asan_check_memaccess :
  Intrinsic<[],[llvm_ptr_ty, llvm_i32_ty], [ImmArg<ArgIndex<1>>]>;

def int_hwasan_check_memaccess :
  Intrinsic<[], [llvm_ptr_ty, llvm_ptr_ty, llvm_i32_ty],
            [ImmArg<ArgIndex<2>>]>;
def int_hwasan_check_memaccess_shortgranules :
  Intrinsic<[], [llvm_ptr_ty, llvm_ptr_ty, llvm_i32_ty],
            [ImmArg<ArgIndex<2>>]>;

// Xray intrinsics
//===----------------------------------------------------------------------===//
// Custom event logging for x-ray.
// Takes a pointer to a string and the length of the string.
def int_xray_customevent : Intrinsic<[], [llvm_ptr_ty, llvm_i64_ty],
                                     [IntrWriteMem, NoCapture<ArgIndex<0>>,
                                      ReadOnly<ArgIndex<0>>]>;
// Typed event logging for x-ray.
// Takes a numeric type tag, a pointer to a string and the length of the string.
def int_xray_typedevent : Intrinsic<[], [llvm_i64_ty, llvm_ptr_ty, llvm_i64_ty],
                                        [IntrWriteMem, NoCapture<ArgIndex<1>>,
                                         ReadOnly<ArgIndex<1>>]>;
//===----------------------------------------------------------------------===//

//===------ Memory intrinsics with element-wise atomicity guarantees ------===//
//

// @llvm.memcpy.element.unordered.atomic.*(dest, src, length, elementsize)
def int_memcpy_element_unordered_atomic
    : Intrinsic<[],
                [llvm_anyptr_ty, llvm_anyptr_ty, llvm_anyint_ty, llvm_i32_ty],
                [IntrArgMemOnly, IntrWillReturn, IntrNoSync,
                 NoCapture<ArgIndex<0>>, NoCapture<ArgIndex<1>>,
                 WriteOnly<ArgIndex<0>>, ReadOnly<ArgIndex<1>>,
                 ImmArg<ArgIndex<3>>]>;

// @llvm.memmove.element.unordered.atomic.*(dest, src, length, elementsize)
def int_memmove_element_unordered_atomic
    : Intrinsic<[],
                [llvm_anyptr_ty, llvm_anyptr_ty, llvm_anyint_ty, llvm_i32_ty],
                [IntrArgMemOnly, IntrWillReturn, IntrNoSync,
                 NoCapture<ArgIndex<0>>, NoCapture<ArgIndex<1>>,
                 WriteOnly<ArgIndex<0>>, ReadOnly<ArgIndex<1>>,
                 ImmArg<ArgIndex<3>>]>;

// @llvm.memset.element.unordered.atomic.*(dest, value, length, elementsize)
def int_memset_element_unordered_atomic
    : Intrinsic<[], [llvm_anyptr_ty, llvm_i8_ty, llvm_anyint_ty, llvm_i32_ty],
                [IntrWriteMem, IntrArgMemOnly, IntrWillReturn, IntrNoSync,
                 NoCapture<ArgIndex<0>>, WriteOnly<ArgIndex<0>>,
                 ImmArg<ArgIndex<3>>]>;

//===------------------------ Reduction Intrinsics ------------------------===//
//
let IntrProperties = [IntrNoMem, IntrSpeculatable] in {

  def int_vector_reduce_fadd : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>],
                                         [LLVMVectorElementType<0>,
                                          llvm_anyvector_ty]>;
  def int_vector_reduce_fmul : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>],
                                         [LLVMVectorElementType<0>,
                                          llvm_anyvector_ty]>;
  def int_vector_reduce_add : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>],
                                        [llvm_anyvector_ty]>;
  def int_vector_reduce_mul : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>],
                                        [llvm_anyvector_ty]>;
  def int_vector_reduce_and : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>],
                                        [llvm_anyvector_ty]>;
  def int_vector_reduce_or : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>],
                                       [llvm_anyvector_ty]>;
  def int_vector_reduce_xor : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>],
                                        [llvm_anyvector_ty]>;
  def int_vector_reduce_smax : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>],
                                         [llvm_anyvector_ty]>;
  def int_vector_reduce_smin : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>],
                                         [llvm_anyvector_ty]>;
  def int_vector_reduce_umax : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>],
                                         [llvm_anyvector_ty]>;
  def int_vector_reduce_umin : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>],
                                         [llvm_anyvector_ty]>;
  def int_vector_reduce_fmax : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>],
                                         [llvm_anyvector_ty]>;
  def int_vector_reduce_fmin : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>],
                                         [llvm_anyvector_ty]>;
  def int_vector_reduce_fminimum: DefaultAttrsIntrinsic<[LLVMVectorElementType<0>],
                                         [llvm_anyvector_ty]>;
  def int_vector_reduce_fmaximum: DefaultAttrsIntrinsic<[LLVMVectorElementType<0>],
                                         [llvm_anyvector_ty]>;
}

//===----- Matrix intrinsics ---------------------------------------------===//

def int_matrix_transpose
  : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
              [LLVMMatchType<0>, llvm_i32_ty, llvm_i32_ty],
              [ IntrNoSync, IntrWillReturn, IntrNoMem, IntrSpeculatable, ImmArg<ArgIndex<1>>,
               ImmArg<ArgIndex<2>>]>;

def int_matrix_multiply
  : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
              [llvm_anyvector_ty, llvm_anyvector_ty, llvm_i32_ty, llvm_i32_ty,
               llvm_i32_ty],
              [IntrNoSync, IntrWillReturn, IntrNoMem, IntrSpeculatable, ImmArg<ArgIndex<2>>,
               ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>]>;

def int_matrix_column_major_load
  : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
              [llvm_ptr_ty, llvm_anyint_ty, llvm_i1_ty,
               llvm_i32_ty, llvm_i32_ty],
              [IntrNoSync, IntrWillReturn, IntrArgMemOnly, IntrReadMem,
               NoCapture<ArgIndex<0>>, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>,
               ImmArg<ArgIndex<4>>]>;

def int_matrix_column_major_store
  : DefaultAttrsIntrinsic<[],
              [llvm_anyvector_ty, llvm_ptr_ty,
               llvm_anyint_ty, llvm_i1_ty, llvm_i32_ty, llvm_i32_ty],
              [IntrNoSync, IntrWillReturn, IntrArgMemOnly, IntrWriteMem,
               WriteOnly<ArgIndex<1>>, NoCapture<ArgIndex<1>>,
               ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>]>;

//===---------- Intrinsics to control hardware supported loops ----------===//

// Specify that the value given is the number of iterations that the next loop
// will execute.
def int_set_loop_iterations :
  DefaultAttrsIntrinsic<[], [llvm_anyint_ty], [IntrNoDuplicate]>;

// Same as the above, but produces a value (the same as the input operand) to
// be fed into the loop.
def int_start_loop_iterations :
  DefaultAttrsIntrinsic<[llvm_anyint_ty], [LLVMMatchType<0>], [IntrNoDuplicate]>;

// Specify that the value given is the number of iterations that the next loop
// will execute. Also test that the given count is not zero, allowing it to
// control entry to a 'while' loop.
def int_test_set_loop_iterations :
  DefaultAttrsIntrinsic<[llvm_i1_ty], [llvm_anyint_ty], [IntrNoDuplicate]>;

// Same as the above, but produces an extra value (the same as the input
// operand) to be fed into the loop.
def int_test_start_loop_iterations :
  DefaultAttrsIntrinsic<[llvm_anyint_ty, llvm_i1_ty], [LLVMMatchType<0>],
                        [IntrNoDuplicate]>;

// Decrement loop counter by the given argument. Return false if the loop
// should exit.
def int_loop_decrement :
  DefaultAttrsIntrinsic<[llvm_i1_ty], [llvm_anyint_ty], [IntrNoDuplicate]>;

// Decrement the first operand (the loop counter) by the second operand (the
// maximum number of elements processed in an iteration). Return the remaining
// number of iterations still to be executed. This is effectively a sub which
// can be used with a phi, icmp and br to control the number of iterations
// executed, as usual. Any optimisations are allowed to treat it is a sub, and
// it's scevable, so it's the backends responsibility to handle cases where it
// may be optimised.
def int_loop_decrement_reg :
  DefaultAttrsIntrinsic<[llvm_anyint_ty],
            [LLVMMatchType<0>, LLVMMatchType<0>], [IntrNoDuplicate]>;

//===----- Intrinsics that are used to provide predicate information -----===//

def int_ssa_copy : DefaultAttrsIntrinsic<[llvm_any_ty], [LLVMMatchType<0>],
                             [IntrNoMem, Returned<ArgIndex<0>>]>;

//===------- Intrinsics that are used to preserve debug information -------===//

def int_preserve_array_access_index : DefaultAttrsIntrinsic<[llvm_anyptr_ty],
                                                [llvm_anyptr_ty, llvm_i32_ty,
                                                 llvm_i32_ty],
                                                [IntrNoMem,
                                                 ImmArg<ArgIndex<1>>,
                                                 ImmArg<ArgIndex<2>>]>;
def int_preserve_union_access_index : DefaultAttrsIntrinsic<[llvm_anyptr_ty],
                                                [llvm_anyptr_ty, llvm_i32_ty],
                                                [IntrNoMem,
                                                 ImmArg<ArgIndex<1>>]>;
def int_preserve_struct_access_index : DefaultAttrsIntrinsic<[llvm_anyptr_ty],
                                                 [llvm_anyptr_ty, llvm_i32_ty,
                                                  llvm_i32_ty],
                                                 [IntrNoMem,
                                                  ImmArg<ArgIndex<1>>,
                                                  ImmArg<ArgIndex<2>>]>;

//===------------ Intrinsics to perform common vector shuffles ------------===//

def int_experimental_vector_reverse : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                                                            [LLVMMatchType<0>],
                                                            [IntrNoMem]>;

def int_experimental_vector_splice : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                                                           [LLVMMatchType<0>,
                                                            LLVMMatchType<0>,
                                                            llvm_i32_ty],
                                                           [IntrNoMem, ImmArg<ArgIndex<2>>]>;

//===---------- Intrinsics to query properties of scalable vectors --------===//
def int_vscale : DefaultAttrsIntrinsic<[llvm_anyint_ty], [], [IntrNoMem]>;

//===---------- Intrinsics to perform subvector insertion/extraction ------===//
def int_vector_insert : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                                              [LLVMMatchType<0>, llvm_anyvector_ty, llvm_i64_ty],
                                              [IntrNoMem, IntrSpeculatable, ImmArg<ArgIndex<2>>]>;

def int_vector_extract : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                                               [llvm_anyvector_ty, llvm_i64_ty],
                                               [IntrNoMem, IntrSpeculatable, ImmArg<ArgIndex<1>>]>;


def int_experimental_vector_interleave2   : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                                                                  [LLVMHalfElementsVectorType<0>,
                                                                   LLVMHalfElementsVectorType<0>],
                                                                  [IntrNoMem]>;

def int_experimental_vector_deinterleave2 : DefaultAttrsIntrinsic<[LLVMHalfElementsVectorType<0>,
                                                                   LLVMHalfElementsVectorType<0>],
                                                                  [llvm_anyvector_ty],
                                                                  [IntrNoMem]>;

//===----------------- Pointer Authentication Intrinsics ------------------===//
//

// Sign an unauthenticated pointer using the specified key and discriminator,
// passed in that order.
// Returns the first argument, with some known bits replaced with a signature.
def int_ptrauth_sign :
  DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty, llvm_i64_ty],
                        [IntrNoMem, ImmArg<ArgIndex<1>>]>;

// Authenticate a signed pointer, using the specified key and discriminator.
// Returns the first argument, with the signature bits removed.
// The signature must be valid.
def int_ptrauth_auth : Intrinsic<[llvm_i64_ty],
                                 [llvm_i64_ty, llvm_i32_ty, llvm_i64_ty],
                                 [IntrNoMem,ImmArg<ArgIndex<1>>]>;

// Authenticate a signed pointer and resign it.
// The second (key) and third (discriminator) arguments specify the signing
// schema used for authenticating.
// The fourth and fifth arguments specify the schema used for signing.
// The signature must be valid.
// This is a combined form of @llvm.ptrauth.sign and @llvm.ptrauth.auth, with
// an additional integrity guarantee on the intermediate value.
def int_ptrauth_resign : Intrinsic<[llvm_i64_ty],
                                   [llvm_i64_ty, llvm_i32_ty, llvm_i64_ty,
                                    llvm_i32_ty, llvm_i64_ty],
                                   [IntrNoMem, ImmArg<ArgIndex<1>>,
                                    ImmArg<ArgIndex<3>>]>;

// Strip the embedded signature out of a signed pointer.
// The second argument specifies the key.
// This behaves like @llvm.ptrauth.auth, but doesn't require the signature to
// be valid.
def int_ptrauth_strip :
  DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty],
                        [IntrNoMem, ImmArg<ArgIndex<1>>]>;

// Blend a small integer discriminator with an address discriminator, producing
// a new discriminator value.
def int_ptrauth_blend :
  DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty], [IntrNoMem]>;

// Compute the signature of a value, using a given discriminator.
// This differs from @llvm.ptrauth.sign in that it doesn't embed the computed
// signature in the pointer, but instead returns the signature as a value.
// That allows it to be used to sign non-pointer data: in that sense, it is
// generic.  There is no generic @llvm.ptrauth.auth: instead, the signature
// can be computed using @llvm.ptrauth.sign_generic, and compared with icmp.
def int_ptrauth_sign_generic :
  DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty], [IntrNoMem]>;

//===----------------------------------------------------------------------===//
//===------- Convergence Intrinsics ---------------------------------------===//

def int_experimental_convergence_entry
  : DefaultAttrsIntrinsic<[llvm_token_ty], [], [IntrNoMem, IntrConvergent]>;
def int_experimental_convergence_anchor
  : DefaultAttrsIntrinsic<[llvm_token_ty], [], [IntrNoMem, IntrConvergent]>;
def int_experimental_convergence_loop
  : DefaultAttrsIntrinsic<[llvm_token_ty], [], [IntrNoMem, IntrConvergent]>;

//===----------------------------------------------------------------------===//
// Target-specific intrinsics
//===----------------------------------------------------------------------===//

include "llvm/IR/IntrinsicsPowerPC.td"
include "llvm/IR/IntrinsicsX86.td"
include "llvm/IR/IntrinsicsARM.td"
include "llvm/IR/IntrinsicsAArch64.td"
include "llvm/IR/IntrinsicsXCore.td"
include "llvm/IR/IntrinsicsHexagon.td"
include "llvm/IR/IntrinsicsNVVM.td"
include "llvm/IR/IntrinsicsMips.td"
include "llvm/IR/IntrinsicsAMDGPU.td"
include "llvm/IR/IntrinsicsBPF.td"
include "llvm/IR/IntrinsicsSystemZ.td"
include "llvm/IR/IntrinsicsWebAssembly.td"
include "llvm/IR/IntrinsicsRISCV.td"
include "llvm/IR/IntrinsicsSPIRV.td"
include "llvm/IR/IntrinsicsVE.td"
include "llvm/IR/IntrinsicsDirectX.td"
include "llvm/IR/IntrinsicsLoongArch.td"

#endif // TEST_INTRINSICS_SUPPRESS_DEFS
PKjwFZ�X�STST	IR/Type.hnu�[���//===- llvm/Type.h - Classes for handling data types ------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the declaration of the Type class.  For more "Type"
// stuff, look in DerivedTypes.h.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_TYPE_H
#define LLVM_IR_TYPE_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/Support/CBindingWrapping.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TypeSize.h"
#include <cassert>
#include <cstdint>
#include <iterator>

namespace llvm {

class IntegerType;
struct fltSemantics;
class LLVMContext;
class PointerType;
class raw_ostream;
class StringRef;
template <typename PtrType> class SmallPtrSetImpl;

/// The instances of the Type class are immutable: once they are created,
/// they are never changed.  Also note that only one instance of a particular
/// type is ever created.  Thus seeing if two types are equal is a matter of
/// doing a trivial pointer comparison. To enforce that no two equal instances
/// are created, Type instances can only be created via static factory methods
/// in class Type and in derived classes.  Once allocated, Types are never
/// free'd.
///
class Type {
public:
  //===--------------------------------------------------------------------===//
  /// Definitions of all of the base types for the Type system.  Based on this
  /// value, you can cast to a class defined in DerivedTypes.h.
  /// Note: If you add an element to this, you need to add an element to the
  /// Type::getPrimitiveType function, or else things will break!
  /// Also update LLVMTypeKind and LLVMGetTypeKind () in the C binding.
  ///
  enum TypeID {
    // PrimitiveTypes
    HalfTyID = 0,  ///< 16-bit floating point type
    BFloatTyID,    ///< 16-bit floating point type (7-bit significand)
    FloatTyID,     ///< 32-bit floating point type
    DoubleTyID,    ///< 64-bit floating point type
    X86_FP80TyID,  ///< 80-bit floating point type (X87)
    FP128TyID,     ///< 128-bit floating point type (112-bit significand)
    PPC_FP128TyID, ///< 128-bit floating point type (two 64-bits, PowerPC)
    VoidTyID,      ///< type with no size
    LabelTyID,     ///< Labels
    MetadataTyID,  ///< Metadata
    X86_MMXTyID,   ///< MMX vectors (64 bits, X86 specific)
    X86_AMXTyID,   ///< AMX vectors (8192 bits, X86 specific)
    TokenTyID,     ///< Tokens

    // Derived types... see DerivedTypes.h file.
    IntegerTyID,        ///< Arbitrary bit width integers
    FunctionTyID,       ///< Functions
    PointerTyID,        ///< Pointers
    StructTyID,         ///< Structures
    ArrayTyID,          ///< Arrays
    FixedVectorTyID,    ///< Fixed width SIMD vector type
    ScalableVectorTyID, ///< Scalable SIMD vector type
    TypedPointerTyID,   ///< Typed pointer used by some GPU targets
    TargetExtTyID,      ///< Target extension type
  };

private:
  /// This refers to the LLVMContext in which this type was uniqued.
  LLVMContext &Context;

  TypeID   ID : 8;            // The current base type of this type.
  unsigned SubclassData : 24; // Space for subclasses to store data.
                              // Note that this should be synchronized with
                              // MAX_INT_BITS value in IntegerType class.

protected:
  friend class LLVMContextImpl;

  explicit Type(LLVMContext &C, TypeID tid)
    : Context(C), ID(tid), SubclassData(0) {}
  ~Type() = default;

  unsigned getSubclassData() const { return SubclassData; }

  void setSubclassData(unsigned val) {
    SubclassData = val;
    // Ensure we don't have any accidental truncation.
    assert(getSubclassData() == val && "Subclass data too large for field");
  }

  /// Keeps track of how many Type*'s there are in the ContainedTys list.
  unsigned NumContainedTys = 0;

  /// A pointer to the array of Types contained by this Type. For example, this
  /// includes the arguments of a function type, the elements of a structure,
  /// the pointee of a pointer, the element type of an array, etc. This pointer
  /// may be 0 for types that don't contain other types (Integer, Double,
  /// Float).
  Type * const *ContainedTys = nullptr;

public:
  /// Print the current type.
  /// Omit the type details if \p NoDetails == true.
  /// E.g., let %st = type { i32, i16 }
  /// When \p NoDetails is true, we only print %st.
  /// Put differently, \p NoDetails prints the type as if
  /// inlined with the operands when printing an instruction.
  void print(raw_ostream &O, bool IsForDebug = false,
             bool NoDetails = false) const;

  void dump() const;

  /// Return the LLVMContext in which this type was uniqued.
  LLVMContext &getContext() const { return Context; }

  //===--------------------------------------------------------------------===//
  // Accessors for working with types.
  //

  /// Return the type id for the type. This will return one of the TypeID enum
  /// elements defined above.
  TypeID getTypeID() const { return ID; }

  /// Return true if this is 'void'.
  bool isVoidTy() const { return getTypeID() == VoidTyID; }

  /// Return true if this is 'half', a 16-bit IEEE fp type.
  bool isHalfTy() const { return getTypeID() == HalfTyID; }

  /// Return true if this is 'bfloat', a 16-bit bfloat type.
  bool isBFloatTy() const { return getTypeID() == BFloatTyID; }

  /// Return true if this is a 16-bit float type.
  bool is16bitFPTy() const {
    return getTypeID() == BFloatTyID || getTypeID() == HalfTyID;
  }

  /// Return true if this is 'float', a 32-bit IEEE fp type.
  bool isFloatTy() const { return getTypeID() == FloatTyID; }

  /// Return true if this is 'double', a 64-bit IEEE fp type.
  bool isDoubleTy() const { return getTypeID() == DoubleTyID; }

  /// Return true if this is x86 long double.
  bool isX86_FP80Ty() const { return getTypeID() == X86_FP80TyID; }

  /// Return true if this is 'fp128'.
  bool isFP128Ty() const { return getTypeID() == FP128TyID; }

  /// Return true if this is powerpc long double.
  bool isPPC_FP128Ty() const { return getTypeID() == PPC_FP128TyID; }

  /// Return true if this is a well-behaved IEEE-like type, which has a IEEE
  /// compatible layout as defined by isIEEE(), and does not have unnormal
  /// values
  bool isIEEELikeFPTy() const {
    switch (getTypeID()) {
    case DoubleTyID:
    case FloatTyID:
    case HalfTyID:
    case BFloatTyID:
    case FP128TyID:
      return true;
    default:
      return false;
    }
  }

  /// Return true if this is one of the floating-point types
  bool isFloatingPointTy() const {
    return isIEEELikeFPTy() || getTypeID() == X86_FP80TyID ||
           getTypeID() == PPC_FP128TyID;
  }

  /// Returns true if this is a floating-point type that is an unevaluated sum
  /// of multiple floating-point units.
  /// An example of such a type is ppc_fp128, also known as double-double, which
  /// consists of two IEEE 754 doubles.
  bool isMultiUnitFPType() const {
    return getTypeID() == PPC_FP128TyID;
  }

  const fltSemantics &getFltSemantics() const;

  /// Return true if this is X86 MMX.
  bool isX86_MMXTy() const { return getTypeID() == X86_MMXTyID; }

  /// Return true if this is X86 AMX.
  bool isX86_AMXTy() const { return getTypeID() == X86_AMXTyID; }

  /// Return true if this is a target extension type.
  bool isTargetExtTy() const { return getTypeID() == TargetExtTyID; }

  /// Return true if this is a target extension type with a scalable layout.
  bool isScalableTargetExtTy() const;

  /// Return true if this is a scalable vector type or a target extension type
  /// with a scalable layout.
  bool isScalableTy() const;

  /// Return true if this is a FP type or a vector of FP.
  bool isFPOrFPVectorTy() const { return getScalarType()->isFloatingPointTy(); }

  /// Return true if this is 'label'.
  bool isLabelTy() const { return getTypeID() == LabelTyID; }

  /// Return true if this is 'metadata'.
  bool isMetadataTy() const { return getTypeID() == MetadataTyID; }

  /// Return true if this is 'token'.
  bool isTokenTy() const { return getTypeID() == TokenTyID; }

  /// True if this is an instance of IntegerType.
  bool isIntegerTy() const { return getTypeID() == IntegerTyID; }

  /// Return true if this is an IntegerType of the given width.
  bool isIntegerTy(unsigned Bitwidth) const;

  /// Return true if this is an integer type or a vector of integer types.
  bool isIntOrIntVectorTy() const { return getScalarType()->isIntegerTy(); }

  /// Return true if this is an integer type or a vector of integer types of
  /// the given width.
  bool isIntOrIntVectorTy(unsigned BitWidth) const {
    return getScalarType()->isIntegerTy(BitWidth);
  }

  /// Return true if this is an integer type or a pointer type.
  bool isIntOrPtrTy() const { return isIntegerTy() || isPointerTy(); }

  /// True if this is an instance of FunctionType.
  bool isFunctionTy() const { return getTypeID() == FunctionTyID; }

  /// True if this is an instance of StructType.
  bool isStructTy() const { return getTypeID() == StructTyID; }

  /// True if this is an instance of ArrayType.
  bool isArrayTy() const { return getTypeID() == ArrayTyID; }

  /// True if this is an instance of PointerType.
  bool isPointerTy() const { return getTypeID() == PointerTyID; }

  /// True if this is an instance of an opaque PointerType.
  LLVM_DEPRECATED("Use isPointerTy() instead", "isPointerTy")
  bool isOpaquePointerTy() const { return isPointerTy(); };

  /// Return true if this is a pointer type or a vector of pointer types.
  bool isPtrOrPtrVectorTy() const { return getScalarType()->isPointerTy(); }

  /// True if this is an instance of VectorType.
  inline bool isVectorTy() const {
    return getTypeID() == ScalableVectorTyID || getTypeID() == FixedVectorTyID;
  }

  /// Return true if this type could be converted with a lossless BitCast to
  /// type 'Ty'. For example, i8* to i32*. BitCasts are valid for types of the
  /// same size only where no re-interpretation of the bits is done.
  /// Determine if this type could be losslessly bitcast to Ty
  bool canLosslesslyBitCastTo(Type *Ty) const;

  /// Return true if this type is empty, that is, it has no elements or all of
  /// its elements are empty.
  bool isEmptyTy() const;

  /// Return true if the type is "first class", meaning it is a valid type for a
  /// Value.
  bool isFirstClassType() const {
    return getTypeID() != FunctionTyID && getTypeID() != VoidTyID;
  }

  /// Return true if the type is a valid type for a register in codegen. This
  /// includes all first-class types except struct and array types.
  bool isSingleValueType() const {
    return isFloatingPointTy() || isX86_MMXTy() || isIntegerTy() ||
           isPointerTy() || isVectorTy() || isX86_AMXTy() || isTargetExtTy();
  }

  /// Return true if the type is an aggregate type. This means it is valid as
  /// the first operand of an insertvalue or extractvalue instruction. This
  /// includes struct and array types, but does not include vector types.
  bool isAggregateType() const {
    return getTypeID() == StructTyID || getTypeID() == ArrayTyID;
  }

  /// Return true if it makes sense to take the size of this type. To get the
  /// actual size for a particular target, it is reasonable to use the
  /// DataLayout subsystem to do this.
  bool isSized(SmallPtrSetImpl<Type*> *Visited = nullptr) const {
    // If it's a primitive, it is always sized.
    if (getTypeID() == IntegerTyID || isFloatingPointTy() ||
        getTypeID() == PointerTyID || getTypeID() == X86_MMXTyID ||
        getTypeID() == X86_AMXTyID)
      return true;
    // If it is not something that can have a size (e.g. a function or label),
    // it doesn't have a size.
    if (getTypeID() != StructTyID && getTypeID() != ArrayTyID &&
        !isVectorTy() && getTypeID() != TargetExtTyID)
      return false;
    // Otherwise we have to try harder to decide.
    return isSizedDerivedType(Visited);
  }

  /// Return the basic size of this type if it is a primitive type. These are
  /// fixed by LLVM and are not target-dependent.
  /// This will return zero if the type does not have a size or is not a
  /// primitive type.
  ///
  /// If this is a scalable vector type, the scalable property will be set and
  /// the runtime size will be a positive integer multiple of the base size.
  ///
  /// Note that this may not reflect the size of memory allocated for an
  /// instance of the type or the number of bytes that are written when an
  /// instance of the type is stored to memory. The DataLayout class provides
  /// additional query functions to provide this information.
  ///
  TypeSize getPrimitiveSizeInBits() const LLVM_READONLY;

  /// If this is a vector type, return the getPrimitiveSizeInBits value for the
  /// element type. Otherwise return the getPrimitiveSizeInBits value for this
  /// type.
  unsigned getScalarSizeInBits() const LLVM_READONLY;

  /// Return the width of the mantissa of this type. This is only valid on
  /// floating-point types. If the FP type does not have a stable mantissa (e.g.
  /// ppc long double), this method returns -1.
  int getFPMantissaWidth() const;

  /// Return whether the type is IEEE compatible, as defined by the eponymous
  /// method in APFloat.
  bool isIEEE() const;

  /// If this is a vector type, return the element type, otherwise return
  /// 'this'.
  inline Type *getScalarType() const {
    if (isVectorTy())
      return getContainedType(0);
    return const_cast<Type *>(this);
  }

  //===--------------------------------------------------------------------===//
  // Type Iteration support.
  //
  using subtype_iterator = Type * const *;

  subtype_iterator subtype_begin() const { return ContainedTys; }
  subtype_iterator subtype_end() const { return &ContainedTys[NumContainedTys];}
  ArrayRef<Type*> subtypes() const {
    return ArrayRef(subtype_begin(), subtype_end());
  }

  using subtype_reverse_iterator = std::reverse_iterator<subtype_iterator>;

  subtype_reverse_iterator subtype_rbegin() const {
    return subtype_reverse_iterator(subtype_end());
  }
  subtype_reverse_iterator subtype_rend() const {
    return subtype_reverse_iterator(subtype_begin());
  }

  /// This method is used to implement the type iterator (defined at the end of
  /// the file). For derived types, this returns the types 'contained' in the
  /// derived type.
  Type *getContainedType(unsigned i) const {
    assert(i < NumContainedTys && "Index out of range!");
    return ContainedTys[i];
  }

  /// Return the number of types in the derived type.
  unsigned getNumContainedTypes() const { return NumContainedTys; }

  //===--------------------------------------------------------------------===//
  // Helper methods corresponding to subclass methods.  This forces a cast to
  // the specified subclass and calls its accessor.  "getArrayNumElements" (for
  // example) is shorthand for cast<ArrayType>(Ty)->getNumElements().  This is
  // only intended to cover the core methods that are frequently used, helper
  // methods should not be added here.

  inline unsigned getIntegerBitWidth() const;

  inline Type *getFunctionParamType(unsigned i) const;
  inline unsigned getFunctionNumParams() const;
  inline bool isFunctionVarArg() const;

  inline StringRef getStructName() const;
  inline unsigned getStructNumElements() const;
  inline Type *getStructElementType(unsigned N) const;

  inline uint64_t getArrayNumElements() const;

  Type *getArrayElementType() const {
    assert(getTypeID() == ArrayTyID);
    return ContainedTys[0];
  }

  inline StringRef getTargetExtName() const;

  /// Only use this method in code that is not reachable with opaque pointers,
  /// or part of deprecated methods that will be removed as part of the opaque
  /// pointers transition.
  [[deprecated("Pointers no longer have element types")]]
  Type *getNonOpaquePointerElementType() const {
    llvm_unreachable("Pointers no longer have element types");
  }

  /// Given vector type, change the element type,
  /// whilst keeping the old number of elements.
  /// For non-vectors simply returns \p EltTy.
  inline Type *getWithNewType(Type *EltTy) const;

  /// Given an integer or vector type, change the lane bitwidth to NewBitwidth,
  /// whilst keeping the old number of lanes.
  inline Type *getWithNewBitWidth(unsigned NewBitWidth) const;

  /// Given scalar/vector integer type, returns a type with elements twice as
  /// wide as in the original type. For vectors, preserves element count.
  inline Type *getExtendedType() const;

  /// Get the address space of this pointer or pointer vector type.
  inline unsigned getPointerAddressSpace() const;

  //===--------------------------------------------------------------------===//
  // Static members exported by the Type class itself.  Useful for getting
  // instances of Type.
  //

  /// Return a type based on an identifier.
  static Type *getPrimitiveType(LLVMContext &C, TypeID IDNumber);

  //===--------------------------------------------------------------------===//
  // These are the builtin types that are always available.
  //
  static Type *getVoidTy(LLVMContext &C);
  static Type *getLabelTy(LLVMContext &C);
  static Type *getHalfTy(LLVMContext &C);
  static Type *getBFloatTy(LLVMContext &C);
  static Type *getFloatTy(LLVMContext &C);
  static Type *getDoubleTy(LLVMContext &C);
  static Type *getMetadataTy(LLVMContext &C);
  static Type *getX86_FP80Ty(LLVMContext &C);
  static Type *getFP128Ty(LLVMContext &C);
  static Type *getPPC_FP128Ty(LLVMContext &C);
  static Type *getX86_MMXTy(LLVMContext &C);
  static Type *getX86_AMXTy(LLVMContext &C);
  static Type *getTokenTy(LLVMContext &C);
  static IntegerType *getIntNTy(LLVMContext &C, unsigned N);
  static IntegerType *getInt1Ty(LLVMContext &C);
  static IntegerType *getInt8Ty(LLVMContext &C);
  static IntegerType *getInt16Ty(LLVMContext &C);
  static IntegerType *getInt32Ty(LLVMContext &C);
  static IntegerType *getInt64Ty(LLVMContext &C);
  static IntegerType *getInt128Ty(LLVMContext &C);
  template <typename ScalarTy> static Type *getScalarTy(LLVMContext &C) {
    int noOfBits = sizeof(ScalarTy) * CHAR_BIT;
    if (std::is_integral<ScalarTy>::value) {
      return (Type*) Type::getIntNTy(C, noOfBits);
    } else if (std::is_floating_point<ScalarTy>::value) {
      switch (noOfBits) {
      case 32:
        return Type::getFloatTy(C);
      case 64:
        return Type::getDoubleTy(C);
      }
    }
    llvm_unreachable("Unsupported type in Type::getScalarTy");
  }
  static Type *getFloatingPointTy(LLVMContext &C, const fltSemantics &S);

  //===--------------------------------------------------------------------===//
  // Convenience methods for getting pointer types with one of the above builtin
  // types as pointee.
  //
  static PointerType *getHalfPtrTy(LLVMContext &C, unsigned AS = 0);
  static PointerType *getBFloatPtrTy(LLVMContext &C, unsigned AS = 0);
  static PointerType *getFloatPtrTy(LLVMContext &C, unsigned AS = 0);
  static PointerType *getDoublePtrTy(LLVMContext &C, unsigned AS = 0);
  static PointerType *getX86_FP80PtrTy(LLVMContext &C, unsigned AS = 0);
  static PointerType *getFP128PtrTy(LLVMContext &C, unsigned AS = 0);
  static PointerType *getPPC_FP128PtrTy(LLVMContext &C, unsigned AS = 0);
  static PointerType *getX86_MMXPtrTy(LLVMContext &C, unsigned AS = 0);
  static PointerType *getX86_AMXPtrTy(LLVMContext &C, unsigned AS = 0);
  static PointerType *getIntNPtrTy(LLVMContext &C, unsigned N, unsigned AS = 0);
  static PointerType *getInt1PtrTy(LLVMContext &C, unsigned AS = 0);
  static PointerType *getInt8PtrTy(LLVMContext &C, unsigned AS = 0);
  static PointerType *getInt16PtrTy(LLVMContext &C, unsigned AS = 0);
  static PointerType *getInt32PtrTy(LLVMContext &C, unsigned AS = 0);
  static PointerType *getInt64PtrTy(LLVMContext &C, unsigned AS = 0);
  static Type *getWasm_ExternrefTy(LLVMContext &C);
  static Type *getWasm_FuncrefTy(LLVMContext &C);

  /// Return a pointer to the current type. This is equivalent to
  /// PointerType::get(Foo, AddrSpace).
  /// TODO: Remove this after opaque pointer transition is complete.
  PointerType *getPointerTo(unsigned AddrSpace = 0) const;

private:
  /// Derived types like structures and arrays are sized iff all of the members
  /// of the type are sized as well. Since asking for their size is relatively
  /// uncommon, move this operation out-of-line.
  bool isSizedDerivedType(SmallPtrSetImpl<Type*> *Visited = nullptr) const;
};

// Printing of types.
inline raw_ostream &operator<<(raw_ostream &OS, const Type &T) {
  T.print(OS);
  return OS;
}

// allow isa<PointerType>(x) to work without DerivedTypes.h included.
template <> struct isa_impl<PointerType, Type> {
  static inline bool doit(const Type &Ty) {
    return Ty.getTypeID() == Type::PointerTyID;
  }
};

// Create wrappers for C Binding types (see CBindingWrapping.h).
DEFINE_ISA_CONVERSION_FUNCTIONS(Type, LLVMTypeRef)

/* Specialized opaque type conversions.
 */
inline Type **unwrap(LLVMTypeRef* Tys) {
  return reinterpret_cast<Type**>(Tys);
}

inline LLVMTypeRef *wrap(Type **Tys) {
  return reinterpret_cast<LLVMTypeRef*>(const_cast<Type**>(Tys));
}

} // end namespace llvm

#endif // LLVM_IR_TYPE_H
PKjwFZ'M';ssIR/InstrTypes.hnu�[���//===- llvm/InstrTypes.h - Important Instruction subclasses -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines various meta classes of instructions that exist in the VM
// representation.  Specific concrete subclasses of these may be found in the
// i*.h files...
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_INSTRTYPES_H
#define LLVM_IR_INSTRTYPES_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/Sequence.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/Twine.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/OperandTraits.h"
#include "llvm/IR/User.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <optional>
#include <string>
#include <vector>

namespace llvm {

class StringRef;
class Type;
class Value;

namespace Intrinsic {
typedef unsigned ID;
}

//===----------------------------------------------------------------------===//
//                          UnaryInstruction Class
//===----------------------------------------------------------------------===//

class UnaryInstruction : public Instruction {
protected:
  UnaryInstruction(Type *Ty, unsigned iType, Value *V,
                   Instruction *IB = nullptr)
    : Instruction(Ty, iType, &Op<0>(), 1, IB) {
    Op<0>() = V;
  }
  UnaryInstruction(Type *Ty, unsigned iType, Value *V, BasicBlock *IAE)
    : Instruction(Ty, iType, &Op<0>(), 1, IAE) {
    Op<0>() = V;
  }

public:
  // allocate space for exactly one operand
  void *operator new(size_t S) { return User::operator new(S, 1); }
  void operator delete(void *Ptr) { User::operator delete(Ptr); }

  /// Transparently provide more efficient getOperand methods.
  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);

  // Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Instruction *I) {
    return I->isUnaryOp() ||
           I->getOpcode() == Instruction::Alloca ||
           I->getOpcode() == Instruction::Load ||
           I->getOpcode() == Instruction::VAArg ||
           I->getOpcode() == Instruction::ExtractValue ||
           (I->getOpcode() >= CastOpsBegin && I->getOpcode() < CastOpsEnd);
  }
  static bool classof(const Value *V) {
    return isa<Instruction>(V) && classof(cast<Instruction>(V));
  }
};

template <>
struct OperandTraits<UnaryInstruction> :
  public FixedNumOperandTraits<UnaryInstruction, 1> {
};

DEFINE_TRANSPARENT_OPERAND_ACCESSORS(UnaryInstruction, Value)

//===----------------------------------------------------------------------===//
//                                UnaryOperator Class
//===----------------------------------------------------------------------===//

class UnaryOperator : public UnaryInstruction {
  void AssertOK();

protected:
  UnaryOperator(UnaryOps iType, Value *S, Type *Ty,
                const Twine &Name, Instruction *InsertBefore);
  UnaryOperator(UnaryOps iType, Value *S, Type *Ty,
                const Twine &Name, BasicBlock *InsertAtEnd);

  // Note: Instruction needs to be a friend here to call cloneImpl.
  friend class Instruction;

  UnaryOperator *cloneImpl() const;

public:

  /// Construct a unary instruction, given the opcode and an operand.
  /// Optionally (if InstBefore is specified) insert the instruction
  /// into a BasicBlock right before the specified instruction.  The specified
  /// Instruction is allowed to be a dereferenced end iterator.
  ///
  static UnaryOperator *Create(UnaryOps Op, Value *S,
                               const Twine &Name = Twine(),
                               Instruction *InsertBefore = nullptr);

  /// Construct a unary instruction, given the opcode and an operand.
  /// Also automatically insert this instruction to the end of the
  /// BasicBlock specified.
  ///
  static UnaryOperator *Create(UnaryOps Op, Value *S,
                               const Twine &Name,
                               BasicBlock *InsertAtEnd);

  /// These methods just forward to Create, and are useful when you
  /// statically know what type of instruction you're going to create.  These
  /// helpers just save some typing.
#define HANDLE_UNARY_INST(N, OPC, CLASS) \
  static UnaryOperator *Create##OPC(Value *V, const Twine &Name = "") {\
    return Create(Instruction::OPC, V, Name);\
  }
#include "llvm/IR/Instruction.def"
#define HANDLE_UNARY_INST(N, OPC, CLASS) \
  static UnaryOperator *Create##OPC(Value *V, const Twine &Name, \
                                    BasicBlock *BB) {\
    return Create(Instruction::OPC, V, Name, BB);\
  }
#include "llvm/IR/Instruction.def"
#define HANDLE_UNARY_INST(N, OPC, CLASS) \
  static UnaryOperator *Create##OPC(Value *V, const Twine &Name, \
                                    Instruction *I) {\
    return Create(Instruction::OPC, V, Name, I);\
  }
#include "llvm/IR/Instruction.def"

  static UnaryOperator *
  CreateWithCopiedFlags(UnaryOps Opc, Value *V, Instruction *CopyO,
                        const Twine &Name = "",
                        Instruction *InsertBefore = nullptr) {
    UnaryOperator *UO = Create(Opc, V, Name, InsertBefore);
    UO->copyIRFlags(CopyO);
    return UO;
  }

  static UnaryOperator *CreateFNegFMF(Value *Op, Instruction *FMFSource,
                                      const Twine &Name = "",
                                      Instruction *InsertBefore = nullptr) {
    return CreateWithCopiedFlags(Instruction::FNeg, Op, FMFSource, Name,
                                 InsertBefore);
  }

  UnaryOps getOpcode() const {
    return static_cast<UnaryOps>(Instruction::getOpcode());
  }

  // Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Instruction *I) {
    return I->isUnaryOp();
  }
  static bool classof(const Value *V) {
    return isa<Instruction>(V) && classof(cast<Instruction>(V));
  }
};

//===----------------------------------------------------------------------===//
//                           BinaryOperator Class
//===----------------------------------------------------------------------===//

class BinaryOperator : public Instruction {
  void AssertOK();

protected:
  BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty,
                 const Twine &Name, Instruction *InsertBefore);
  BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty,
                 const Twine &Name, BasicBlock *InsertAtEnd);

  // Note: Instruction needs to be a friend here to call cloneImpl.
  friend class Instruction;

  BinaryOperator *cloneImpl() const;

public:
  // allocate space for exactly two operands
  void *operator new(size_t S) { return User::operator new(S, 2); }
  void operator delete(void *Ptr) { User::operator delete(Ptr); }

  /// Transparently provide more efficient getOperand methods.
  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);

  /// Construct a binary instruction, given the opcode and the two
  /// operands.  Optionally (if InstBefore is specified) insert the instruction
  /// into a BasicBlock right before the specified instruction.  The specified
  /// Instruction is allowed to be a dereferenced end iterator.
  ///
  static BinaryOperator *Create(BinaryOps Op, Value *S1, Value *S2,
                                const Twine &Name = Twine(),
                                Instruction *InsertBefore = nullptr);

  /// Construct a binary instruction, given the opcode and the two
  /// operands.  Also automatically insert this instruction to the end of the
  /// BasicBlock specified.
  ///
  static BinaryOperator *Create(BinaryOps Op, Value *S1, Value *S2,
                                const Twine &Name, BasicBlock *InsertAtEnd);

  /// These methods just forward to Create, and are useful when you
  /// statically know what type of instruction you're going to create.  These
  /// helpers just save some typing.
#define HANDLE_BINARY_INST(N, OPC, CLASS) \
  static BinaryOperator *Create##OPC(Value *V1, Value *V2, \
                                     const Twine &Name = "") {\
    return Create(Instruction::OPC, V1, V2, Name);\
  }
#include "llvm/IR/Instruction.def"
#define HANDLE_BINARY_INST(N, OPC, CLASS) \
  static BinaryOperator *Create##OPC(Value *V1, Value *V2, \
                                     const Twine &Name, BasicBlock *BB) {\
    return Create(Instruction::OPC, V1, V2, Name, BB);\
  }
#include "llvm/IR/Instruction.def"
#define HANDLE_BINARY_INST(N, OPC, CLASS) \
  static BinaryOperator *Create##OPC(Value *V1, Value *V2, \
                                     const Twine &Name, Instruction *I) {\
    return Create(Instruction::OPC, V1, V2, Name, I);\
  }
#include "llvm/IR/Instruction.def"

  static BinaryOperator *
  CreateWithCopiedFlags(BinaryOps Opc, Value *V1, Value *V2, Value *CopyO,
                        const Twine &Name = "",
                        Instruction *InsertBefore = nullptr) {
    BinaryOperator *BO = Create(Opc, V1, V2, Name, InsertBefore);
    BO->copyIRFlags(CopyO);
    return BO;
  }

  static BinaryOperator *CreateFAddFMF(Value *V1, Value *V2,
                                       Instruction *FMFSource,
                                       const Twine &Name = "") {
    return CreateWithCopiedFlags(Instruction::FAdd, V1, V2, FMFSource, Name);
  }
  static BinaryOperator *CreateFSubFMF(Value *V1, Value *V2,
                                       Instruction *FMFSource,
                                       const Twine &Name = "") {
    return CreateWithCopiedFlags(Instruction::FSub, V1, V2, FMFSource, Name);
  }
  static BinaryOperator *CreateFMulFMF(Value *V1, Value *V2,
                                       Instruction *FMFSource,
                                       const Twine &Name = "") {
    return CreateWithCopiedFlags(Instruction::FMul, V1, V2, FMFSource, Name);
  }
  static BinaryOperator *CreateFDivFMF(Value *V1, Value *V2,
                                       Instruction *FMFSource,
                                       const Twine &Name = "") {
    return CreateWithCopiedFlags(Instruction::FDiv, V1, V2, FMFSource, Name);
  }
  static BinaryOperator *CreateFRemFMF(Value *V1, Value *V2,
                                       Instruction *FMFSource,
                                       const Twine &Name = "") {
    return CreateWithCopiedFlags(Instruction::FRem, V1, V2, FMFSource, Name);
  }

  static BinaryOperator *CreateNSW(BinaryOps Opc, Value *V1, Value *V2,
                                   const Twine &Name = "") {
    BinaryOperator *BO = Create(Opc, V1, V2, Name);
    BO->setHasNoSignedWrap(true);
    return BO;
  }
  static BinaryOperator *CreateNSW(BinaryOps Opc, Value *V1, Value *V2,
                                   const Twine &Name, BasicBlock *BB) {
    BinaryOperator *BO = Create(Opc, V1, V2, Name, BB);
    BO->setHasNoSignedWrap(true);
    return BO;
  }
  static BinaryOperator *CreateNSW(BinaryOps Opc, Value *V1, Value *V2,
                                   const Twine &Name, Instruction *I) {
    BinaryOperator *BO = Create(Opc, V1, V2, Name, I);
    BO->setHasNoSignedWrap(true);
    return BO;
  }

  static BinaryOperator *CreateNUW(BinaryOps Opc, Value *V1, Value *V2,
                                   const Twine &Name = "") {
    BinaryOperator *BO = Create(Opc, V1, V2, Name);
    BO->setHasNoUnsignedWrap(true);
    return BO;
  }
  static BinaryOperator *CreateNUW(BinaryOps Opc, Value *V1, Value *V2,
                                   const Twine &Name, BasicBlock *BB) {
    BinaryOperator *BO = Create(Opc, V1, V2, Name, BB);
    BO->setHasNoUnsignedWrap(true);
    return BO;
  }
  static BinaryOperator *CreateNUW(BinaryOps Opc, Value *V1, Value *V2,
                                   const Twine &Name, Instruction *I) {
    BinaryOperator *BO = Create(Opc, V1, V2, Name, I);
    BO->setHasNoUnsignedWrap(true);
    return BO;
  }

  static BinaryOperator *CreateExact(BinaryOps Opc, Value *V1, Value *V2,
                                     const Twine &Name = "") {
    BinaryOperator *BO = Create(Opc, V1, V2, Name);
    BO->setIsExact(true);
    return BO;
  }
  static BinaryOperator *CreateExact(BinaryOps Opc, Value *V1, Value *V2,
                                     const Twine &Name, BasicBlock *BB) {
    BinaryOperator *BO = Create(Opc, V1, V2, Name, BB);
    BO->setIsExact(true);
    return BO;
  }
  static BinaryOperator *CreateExact(BinaryOps Opc, Value *V1, Value *V2,
                                     const Twine &Name, Instruction *I) {
    BinaryOperator *BO = Create(Opc, V1, V2, Name, I);
    BO->setIsExact(true);
    return BO;
  }

#define DEFINE_HELPERS(OPC, NUWNSWEXACT)                                       \
  static BinaryOperator *Create##NUWNSWEXACT##OPC(Value *V1, Value *V2,        \
                                                  const Twine &Name = "") {    \
    return Create##NUWNSWEXACT(Instruction::OPC, V1, V2, Name);                \
  }                                                                            \
  static BinaryOperator *Create##NUWNSWEXACT##OPC(                             \
      Value *V1, Value *V2, const Twine &Name, BasicBlock *BB) {               \
    return Create##NUWNSWEXACT(Instruction::OPC, V1, V2, Name, BB);            \
  }                                                                            \
  static BinaryOperator *Create##NUWNSWEXACT##OPC(                             \
      Value *V1, Value *V2, const Twine &Name, Instruction *I) {               \
    return Create##NUWNSWEXACT(Instruction::OPC, V1, V2, Name, I);             \
  }

  DEFINE_HELPERS(Add, NSW) // CreateNSWAdd
  DEFINE_HELPERS(Add, NUW) // CreateNUWAdd
  DEFINE_HELPERS(Sub, NSW) // CreateNSWSub
  DEFINE_HELPERS(Sub, NUW) // CreateNUWSub
  DEFINE_HELPERS(Mul, NSW) // CreateNSWMul
  DEFINE_HELPERS(Mul, NUW) // CreateNUWMul
  DEFINE_HELPERS(Shl, NSW) // CreateNSWShl
  DEFINE_HELPERS(Shl, NUW) // CreateNUWShl

  DEFINE_HELPERS(SDiv, Exact)  // CreateExactSDiv
  DEFINE_HELPERS(UDiv, Exact)  // CreateExactUDiv
  DEFINE_HELPERS(AShr, Exact)  // CreateExactAShr
  DEFINE_HELPERS(LShr, Exact)  // CreateExactLShr

#undef DEFINE_HELPERS

  /// Helper functions to construct and inspect unary operations (NEG and NOT)
  /// via binary operators SUB and XOR:
  ///
  /// Create the NEG and NOT instructions out of SUB and XOR instructions.
  ///
  static BinaryOperator *CreateNeg(Value *Op, const Twine &Name = "",
                                   Instruction *InsertBefore = nullptr);
  static BinaryOperator *CreateNeg(Value *Op, const Twine &Name,
                                   BasicBlock *InsertAtEnd);
  static BinaryOperator *CreateNSWNeg(Value *Op, const Twine &Name = "",
                                      Instruction *InsertBefore = nullptr);
  static BinaryOperator *CreateNSWNeg(Value *Op, const Twine &Name,
                                      BasicBlock *InsertAtEnd);
  static BinaryOperator *CreateNUWNeg(Value *Op, const Twine &Name = "",
                                      Instruction *InsertBefore = nullptr);
  static BinaryOperator *CreateNUWNeg(Value *Op, const Twine &Name,
                                      BasicBlock *InsertAtEnd);
  static BinaryOperator *CreateNot(Value *Op, const Twine &Name = "",
                                   Instruction *InsertBefore = nullptr);
  static BinaryOperator *CreateNot(Value *Op, const Twine &Name,
                                   BasicBlock *InsertAtEnd);

  BinaryOps getOpcode() const {
    return static_cast<BinaryOps>(Instruction::getOpcode());
  }

  /// Exchange the two operands to this instruction.
  /// This instruction is safe to use on any binary instruction and
  /// does not modify the semantics of the instruction.  If the instruction
  /// cannot be reversed (ie, it's a Div), then return true.
  ///
  bool swapOperands();

  // Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Instruction *I) {
    return I->isBinaryOp();
  }
  static bool classof(const Value *V) {
    return isa<Instruction>(V) && classof(cast<Instruction>(V));
  }
};

template <>
struct OperandTraits<BinaryOperator> :
  public FixedNumOperandTraits<BinaryOperator, 2> {
};

DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BinaryOperator, Value)

//===----------------------------------------------------------------------===//
//                               CastInst Class
//===----------------------------------------------------------------------===//

/// This is the base class for all instructions that perform data
/// casts. It is simply provided so that instruction category testing
/// can be performed with code like:
///
/// if (isa<CastInst>(Instr)) { ... }
/// Base class of casting instructions.
class CastInst : public UnaryInstruction {
protected:
  /// Constructor with insert-before-instruction semantics for subclasses
  CastInst(Type *Ty, unsigned iType, Value *S,
           const Twine &NameStr = "", Instruction *InsertBefore = nullptr)
    : UnaryInstruction(Ty, iType, S, InsertBefore) {
    setName(NameStr);
  }
  /// Constructor with insert-at-end-of-block semantics for subclasses
  CastInst(Type *Ty, unsigned iType, Value *S,
           const Twine &NameStr, BasicBlock *InsertAtEnd)
    : UnaryInstruction(Ty, iType, S, InsertAtEnd) {
    setName(NameStr);
  }

public:
  /// Provides a way to construct any of the CastInst subclasses using an
  /// opcode instead of the subclass's constructor. The opcode must be in the
  /// CastOps category (Instruction::isCast(opcode) returns true). This
  /// constructor has insert-before-instruction semantics to automatically
  /// insert the new CastInst before InsertBefore (if it is non-null).
  /// Construct any of the CastInst subclasses
  static CastInst *Create(
    Instruction::CastOps,    ///< The opcode of the cast instruction
    Value *S,                ///< The value to be casted (operand 0)
    Type *Ty,          ///< The type to which cast should be made
    const Twine &Name = "", ///< Name for the instruction
    Instruction *InsertBefore = nullptr ///< Place to insert the instruction
  );
  /// Provides a way to construct any of the CastInst subclasses using an
  /// opcode instead of the subclass's constructor. The opcode must be in the
  /// CastOps category. This constructor has insert-at-end-of-block semantics
  /// to automatically insert the new CastInst at the end of InsertAtEnd (if
  /// its non-null).
  /// Construct any of the CastInst subclasses
  static CastInst *Create(
    Instruction::CastOps,    ///< The opcode for the cast instruction
    Value *S,                ///< The value to be casted (operand 0)
    Type *Ty,          ///< The type to which operand is casted
    const Twine &Name, ///< The name for the instruction
    BasicBlock *InsertAtEnd  ///< The block to insert the instruction into
  );

  /// Create a ZExt or BitCast cast instruction
  static CastInst *CreateZExtOrBitCast(
    Value *S,                ///< The value to be casted (operand 0)
    Type *Ty,          ///< The type to which cast should be made
    const Twine &Name = "", ///< Name for the instruction
    Instruction *InsertBefore = nullptr ///< Place to insert the instruction
  );

  /// Create a ZExt or BitCast cast instruction
  static CastInst *CreateZExtOrBitCast(
    Value *S,                ///< The value to be casted (operand 0)
    Type *Ty,          ///< The type to which operand is casted
    const Twine &Name, ///< The name for the instruction
    BasicBlock *InsertAtEnd  ///< The block to insert the instruction into
  );

  /// Create a SExt or BitCast cast instruction
  static CastInst *CreateSExtOrBitCast(
    Value *S,                ///< The value to be casted (operand 0)
    Type *Ty,          ///< The type to which cast should be made
    const Twine &Name = "", ///< Name for the instruction
    Instruction *InsertBefore = nullptr ///< Place to insert the instruction
  );

  /// Create a SExt or BitCast cast instruction
  static CastInst *CreateSExtOrBitCast(
    Value *S,                ///< The value to be casted (operand 0)
    Type *Ty,          ///< The type to which operand is casted
    const Twine &Name, ///< The name for the instruction
    BasicBlock *InsertAtEnd  ///< The block to insert the instruction into
  );

  /// Create a BitCast AddrSpaceCast, or a PtrToInt cast instruction.
  static CastInst *CreatePointerCast(
    Value *S,                ///< The pointer value to be casted (operand 0)
    Type *Ty,          ///< The type to which operand is casted
    const Twine &Name, ///< The name for the instruction
    BasicBlock *InsertAtEnd  ///< The block to insert the instruction into
  );

  /// Create a BitCast, AddrSpaceCast or a PtrToInt cast instruction.
  static CastInst *CreatePointerCast(
    Value *S,                ///< The pointer value to be casted (operand 0)
    Type *Ty,          ///< The type to which cast should be made
    const Twine &Name = "", ///< Name for the instruction
    Instruction *InsertBefore = nullptr ///< Place to insert the instruction
  );

  /// Create a BitCast or an AddrSpaceCast cast instruction.
  static CastInst *CreatePointerBitCastOrAddrSpaceCast(
    Value *S,                ///< The pointer value to be casted (operand 0)
    Type *Ty,          ///< The type to which operand is casted
    const Twine &Name, ///< The name for the instruction
    BasicBlock *InsertAtEnd  ///< The block to insert the instruction into
  );

  /// Create a BitCast or an AddrSpaceCast cast instruction.
  static CastInst *CreatePointerBitCastOrAddrSpaceCast(
    Value *S,                ///< The pointer value to be casted (operand 0)
    Type *Ty,          ///< The type to which cast should be made
    const Twine &Name = "", ///< Name for the instruction
    Instruction *InsertBefore = nullptr ///< Place to insert the instruction
  );

  /// Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.
  ///
  /// If the value is a pointer type and the destination an integer type,
  /// creates a PtrToInt cast. If the value is an integer type and the
  /// destination a pointer type, creates an IntToPtr cast. Otherwise, creates
  /// a bitcast.
  static CastInst *CreateBitOrPointerCast(
    Value *S,                ///< The pointer value to be casted (operand 0)
    Type *Ty,          ///< The type to which cast should be made
    const Twine &Name = "", ///< Name for the instruction
    Instruction *InsertBefore = nullptr ///< Place to insert the instruction
  );

  /// Create a ZExt, BitCast, or Trunc for int -> int casts.
  static CastInst *CreateIntegerCast(
    Value *S,                ///< The pointer value to be casted (operand 0)
    Type *Ty,          ///< The type to which cast should be made
    bool isSigned,           ///< Whether to regard S as signed or not
    const Twine &Name = "", ///< Name for the instruction
    Instruction *InsertBefore = nullptr ///< Place to insert the instruction
  );

  /// Create a ZExt, BitCast, or Trunc for int -> int casts.
  static CastInst *CreateIntegerCast(
    Value *S,                ///< The integer value to be casted (operand 0)
    Type *Ty,          ///< The integer type to which operand is casted
    bool isSigned,           ///< Whether to regard S as signed or not
    const Twine &Name, ///< The name for the instruction
    BasicBlock *InsertAtEnd  ///< The block to insert the instruction into
  );

  /// Create an FPExt, BitCast, or FPTrunc for fp -> fp casts
  static CastInst *CreateFPCast(
    Value *S,                ///< The floating point value to be casted
    Type *Ty,          ///< The floating point type to cast to
    const Twine &Name = "", ///< Name for the instruction
    Instruction *InsertBefore = nullptr ///< Place to insert the instruction
  );

  /// Create an FPExt, BitCast, or FPTrunc for fp -> fp casts
  static CastInst *CreateFPCast(
    Value *S,                ///< The floating point value to be casted
    Type *Ty,          ///< The floating point type to cast to
    const Twine &Name, ///< The name for the instruction
    BasicBlock *InsertAtEnd  ///< The block to insert the instruction into
  );

  /// Create a Trunc or BitCast cast instruction
  static CastInst *CreateTruncOrBitCast(
    Value *S,                ///< The value to be casted (operand 0)
    Type *Ty,          ///< The type to which cast should be made
    const Twine &Name = "", ///< Name for the instruction
    Instruction *InsertBefore = nullptr ///< Place to insert the instruction
  );

  /// Create a Trunc or BitCast cast instruction
  static CastInst *CreateTruncOrBitCast(
    Value *S,                ///< The value to be casted (operand 0)
    Type *Ty,          ///< The type to which operand is casted
    const Twine &Name, ///< The name for the instruction
    BasicBlock *InsertAtEnd  ///< The block to insert the instruction into
  );

  /// Check whether a bitcast between these types is valid
  static bool isBitCastable(
    Type *SrcTy, ///< The Type from which the value should be cast.
    Type *DestTy ///< The Type to which the value should be cast.
  );

  /// Check whether a bitcast, inttoptr, or ptrtoint cast between these
  /// types is valid and a no-op.
  ///
  /// This ensures that any pointer<->integer cast has enough bits in the
  /// integer and any other cast is a bitcast.
  static bool isBitOrNoopPointerCastable(
      Type *SrcTy,  ///< The Type from which the value should be cast.
      Type *DestTy, ///< The Type to which the value should be cast.
      const DataLayout &DL);

  /// Returns the opcode necessary to cast Val into Ty using usual casting
  /// rules.
  /// Infer the opcode for cast operand and type
  static Instruction::CastOps getCastOpcode(
    const Value *Val, ///< The value to cast
    bool SrcIsSigned, ///< Whether to treat the source as signed
    Type *Ty,   ///< The Type to which the value should be casted
    bool DstIsSigned  ///< Whether to treate the dest. as signed
  );

  /// There are several places where we need to know if a cast instruction
  /// only deals with integer source and destination types. To simplify that
  /// logic, this method is provided.
  /// @returns true iff the cast has only integral typed operand and dest type.
  /// Determine if this is an integer-only cast.
  bool isIntegerCast() const;

  /// A no-op cast is one that can be effected without changing any bits.
  /// It implies that the source and destination types are the same size. The
  /// DataLayout argument is to determine the pointer size when examining casts
  /// involving Integer and Pointer types. They are no-op casts if the integer
  /// is the same size as the pointer. However, pointer size varies with
  /// platform.  Note that a precondition of this method is that the cast is
  /// legal - i.e. the instruction formed with these operands would verify.
  static bool isNoopCast(
    Instruction::CastOps Opcode, ///< Opcode of cast
    Type *SrcTy,         ///< SrcTy of cast
    Type *DstTy,         ///< DstTy of cast
    const DataLayout &DL ///< DataLayout to get the Int Ptr type from.
  );

  /// Determine if this cast is a no-op cast.
  ///
  /// \param DL is the DataLayout to determine pointer size.
  bool isNoopCast(const DataLayout &DL) const;

  /// Determine how a pair of casts can be eliminated, if they can be at all.
  /// This is a helper function for both CastInst and ConstantExpr.
  /// @returns 0 if the CastInst pair can't be eliminated, otherwise
  /// returns Instruction::CastOps value for a cast that can replace
  /// the pair, casting SrcTy to DstTy.
  /// Determine if a cast pair is eliminable
  static unsigned isEliminableCastPair(
    Instruction::CastOps firstOpcode,  ///< Opcode of first cast
    Instruction::CastOps secondOpcode, ///< Opcode of second cast
    Type *SrcTy, ///< SrcTy of 1st cast
    Type *MidTy, ///< DstTy of 1st cast & SrcTy of 2nd cast
    Type *DstTy, ///< DstTy of 2nd cast
    Type *SrcIntPtrTy, ///< Integer type corresponding to Ptr SrcTy, or null
    Type *MidIntPtrTy, ///< Integer type corresponding to Ptr MidTy, or null
    Type *DstIntPtrTy  ///< Integer type corresponding to Ptr DstTy, or null
  );

  /// Return the opcode of this CastInst
  Instruction::CastOps getOpcode() const {
    return Instruction::CastOps(Instruction::getOpcode());
  }

  /// Return the source type, as a convenience
  Type* getSrcTy() const { return getOperand(0)->getType(); }
  /// Return the destination type, as a convenience
  Type* getDestTy() const { return getType(); }

  /// This method can be used to determine if a cast from SrcTy to DstTy using
  /// Opcode op is valid or not.
  /// @returns true iff the proposed cast is valid.
  /// Determine if a cast is valid without creating one.
  static bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy);
  static bool castIsValid(Instruction::CastOps op, Value *S, Type *DstTy) {
    return castIsValid(op, S->getType(), DstTy);
  }

  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Instruction *I) {
    return I->isCast();
  }
  static bool classof(const Value *V) {
    return isa<Instruction>(V) && classof(cast<Instruction>(V));
  }
};

//===----------------------------------------------------------------------===//
//                               CmpInst Class
//===----------------------------------------------------------------------===//

/// This class is the base class for the comparison instructions.
/// Abstract base class of comparison instructions.
class CmpInst : public Instruction {
public:
  /// This enumeration lists the possible predicates for CmpInst subclasses.
  /// Values in the range 0-31 are reserved for FCmpInst, while values in the
  /// range 32-64 are reserved for ICmpInst. This is necessary to ensure the
  /// predicate values are not overlapping between the classes.
  ///
  /// Some passes (e.g. InstCombine) depend on the bit-wise characteristics of
  /// FCMP_* values. Changing the bit patterns requires a potential change to
  /// those passes.
  enum Predicate : unsigned {
    // Opcode            U L G E    Intuitive operation
    FCMP_FALSE = 0, ///< 0 0 0 0    Always false (always folded)
    FCMP_OEQ = 1,   ///< 0 0 0 1    True if ordered and equal
    FCMP_OGT = 2,   ///< 0 0 1 0    True if ordered and greater than
    FCMP_OGE = 3,   ///< 0 0 1 1    True if ordered and greater than or equal
    FCMP_OLT = 4,   ///< 0 1 0 0    True if ordered and less than
    FCMP_OLE = 5,   ///< 0 1 0 1    True if ordered and less than or equal
    FCMP_ONE = 6,   ///< 0 1 1 0    True if ordered and operands are unequal
    FCMP_ORD = 7,   ///< 0 1 1 1    True if ordered (no nans)
    FCMP_UNO = 8,   ///< 1 0 0 0    True if unordered: isnan(X) | isnan(Y)
    FCMP_UEQ = 9,   ///< 1 0 0 1    True if unordered or equal
    FCMP_UGT = 10,  ///< 1 0 1 0    True if unordered or greater than
    FCMP_UGE = 11,  ///< 1 0 1 1    True if unordered, greater than, or equal
    FCMP_ULT = 12,  ///< 1 1 0 0    True if unordered or less than
    FCMP_ULE = 13,  ///< 1 1 0 1    True if unordered, less than, or equal
    FCMP_UNE = 14,  ///< 1 1 1 0    True if unordered or not equal
    FCMP_TRUE = 15, ///< 1 1 1 1    Always true (always folded)
    FIRST_FCMP_PREDICATE = FCMP_FALSE,
    LAST_FCMP_PREDICATE = FCMP_TRUE,
    BAD_FCMP_PREDICATE = FCMP_TRUE + 1,
    ICMP_EQ = 32,  ///< equal
    ICMP_NE = 33,  ///< not equal
    ICMP_UGT = 34, ///< unsigned greater than
    ICMP_UGE = 35, ///< unsigned greater or equal
    ICMP_ULT = 36, ///< unsigned less than
    ICMP_ULE = 37, ///< unsigned less or equal
    ICMP_SGT = 38, ///< signed greater than
    ICMP_SGE = 39, ///< signed greater or equal
    ICMP_SLT = 40, ///< signed less than
    ICMP_SLE = 41, ///< signed less or equal
    FIRST_ICMP_PREDICATE = ICMP_EQ,
    LAST_ICMP_PREDICATE = ICMP_SLE,
    BAD_ICMP_PREDICATE = ICMP_SLE + 1
  };
  using PredicateField =
      Bitfield::Element<Predicate, 0, 6, LAST_ICMP_PREDICATE>;

  /// Returns the sequence of all FCmp predicates.
  static auto FCmpPredicates() {
    return enum_seq_inclusive(Predicate::FIRST_FCMP_PREDICATE,
                              Predicate::LAST_FCMP_PREDICATE,
                              force_iteration_on_noniterable_enum);
  }

  /// Returns the sequence of all ICmp predicates.
  static auto ICmpPredicates() {
    return enum_seq_inclusive(Predicate::FIRST_ICMP_PREDICATE,
                              Predicate::LAST_ICMP_PREDICATE,
                              force_iteration_on_noniterable_enum);
  }

protected:
  CmpInst(Type *ty, Instruction::OtherOps op, Predicate pred,
          Value *LHS, Value *RHS, const Twine &Name = "",
          Instruction *InsertBefore = nullptr,
          Instruction *FlagsSource = nullptr);

  CmpInst(Type *ty, Instruction::OtherOps op, Predicate pred,
          Value *LHS, Value *RHS, const Twine &Name,
          BasicBlock *InsertAtEnd);

public:
  // allocate space for exactly two operands
  void *operator new(size_t S) { return User::operator new(S, 2); }
  void operator delete(void *Ptr) { User::operator delete(Ptr); }

  /// Construct a compare instruction, given the opcode, the predicate and
  /// the two operands.  Optionally (if InstBefore is specified) insert the
  /// instruction into a BasicBlock right before the specified instruction.
  /// The specified Instruction is allowed to be a dereferenced end iterator.
  /// Create a CmpInst
  static CmpInst *Create(OtherOps Op,
                         Predicate predicate, Value *S1,
                         Value *S2, const Twine &Name = "",
                         Instruction *InsertBefore = nullptr);

  /// Construct a compare instruction, given the opcode, the predicate and the
  /// two operands.  Also automatically insert this instruction to the end of
  /// the BasicBlock specified.
  /// Create a CmpInst
  static CmpInst *Create(OtherOps Op, Predicate predicate, Value *S1,
                         Value *S2, const Twine &Name, BasicBlock *InsertAtEnd);

  /// Get the opcode casted to the right type
  OtherOps getOpcode() const {
    return static_cast<OtherOps>(Instruction::getOpcode());
  }

  /// Return the predicate for this instruction.
  Predicate getPredicate() const { return getSubclassData<PredicateField>(); }

  /// Set the predicate for this instruction to the specified value.
  void setPredicate(Predicate P) { setSubclassData<PredicateField>(P); }

  static bool isFPPredicate(Predicate P) {
    static_assert(FIRST_FCMP_PREDICATE == 0,
                  "FIRST_FCMP_PREDICATE is required to be 0");
    return P <= LAST_FCMP_PREDICATE;
  }

  static bool isIntPredicate(Predicate P) {
    return P >= FIRST_ICMP_PREDICATE && P <= LAST_ICMP_PREDICATE;
  }

  static StringRef getPredicateName(Predicate P);

  bool isFPPredicate() const { return isFPPredicate(getPredicate()); }
  bool isIntPredicate() const { return isIntPredicate(getPredicate()); }

  /// For example, EQ -> NE, UGT -> ULE, SLT -> SGE,
  ///              OEQ -> UNE, UGT -> OLE, OLT -> UGE, etc.
  /// @returns the inverse predicate for the instruction's current predicate.
  /// Return the inverse of the instruction's predicate.
  Predicate getInversePredicate() const {
    return getInversePredicate(getPredicate());
  }

  /// Returns the ordered variant of a floating point compare.
  ///
  /// For example, UEQ -> OEQ, ULT -> OLT, OEQ -> OEQ
  static Predicate getOrderedPredicate(Predicate Pred) {
    return static_cast<Predicate>(Pred & FCMP_ORD);
  }

  Predicate getOrderedPredicate() const {
    return getOrderedPredicate(getPredicate());
  }

  /// Returns the unordered variant of a floating point compare.
  ///
  /// For example, OEQ -> UEQ, OLT -> ULT, OEQ -> UEQ
  static Predicate getUnorderedPredicate(Predicate Pred) {
    return static_cast<Predicate>(Pred | FCMP_UNO);
  }

  Predicate getUnorderedPredicate() const {
    return getUnorderedPredicate(getPredicate());
  }

  /// For example, EQ -> NE, UGT -> ULE, SLT -> SGE,
  ///              OEQ -> UNE, UGT -> OLE, OLT -> UGE, etc.
  /// @returns the inverse predicate for predicate provided in \p pred.
  /// Return the inverse of a given predicate
  static Predicate getInversePredicate(Predicate pred);

  /// For example, EQ->EQ, SLE->SGE, ULT->UGT,
  ///              OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
  /// @returns the predicate that would be the result of exchanging the two
  /// operands of the CmpInst instruction without changing the result
  /// produced.
  /// Return the predicate as if the operands were swapped
  Predicate getSwappedPredicate() const {
    return getSwappedPredicate(getPredicate());
  }

  /// This is a static version that you can use without an instruction
  /// available.
  /// Return the predicate as if the operands were swapped.
  static Predicate getSwappedPredicate(Predicate pred);

  /// This is a static version that you can use without an instruction
  /// available.
  /// @returns true if the comparison predicate is strict, false otherwise.
  static bool isStrictPredicate(Predicate predicate);

  /// @returns true if the comparison predicate is strict, false otherwise.
  /// Determine if this instruction is using an strict comparison predicate.
  bool isStrictPredicate() const { return isStrictPredicate(getPredicate()); }

  /// This is a static version that you can use without an instruction
  /// available.
  /// @returns true if the comparison predicate is non-strict, false otherwise.
  static bool isNonStrictPredicate(Predicate predicate);

  /// @returns true if the comparison predicate is non-strict, false otherwise.
  /// Determine if this instruction is using an non-strict comparison predicate.
  bool isNonStrictPredicate() const {
    return isNonStrictPredicate(getPredicate());
  }

  /// For example, SGE -> SGT, SLE -> SLT, ULE -> ULT, UGE -> UGT.
  /// Returns the strict version of non-strict comparisons.
  Predicate getStrictPredicate() const {
    return getStrictPredicate(getPredicate());
  }

  /// This is a static version that you can use without an instruction
  /// available.
  /// @returns the strict version of comparison provided in \p pred.
  /// If \p pred is not a strict comparison predicate, returns \p pred.
  /// Returns the strict version of non-strict comparisons.
  static Predicate getStrictPredicate(Predicate pred);

  /// For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
  /// Returns the non-strict version of strict comparisons.
  Predicate getNonStrictPredicate() const {
    return getNonStrictPredicate(getPredicate());
  }

  /// This is a static version that you can use without an instruction
  /// available.
  /// @returns the non-strict version of comparison provided in \p pred.
  /// If \p pred is not a strict comparison predicate, returns \p pred.
  /// Returns the non-strict version of strict comparisons.
  static Predicate getNonStrictPredicate(Predicate pred);

  /// This is a static version that you can use without an instruction
  /// available.
  /// Return the flipped strictness of predicate
  static Predicate getFlippedStrictnessPredicate(Predicate pred);

  /// For predicate of kind "is X or equal to 0" returns the predicate "is X".
  /// For predicate of kind "is X" returns the predicate "is X or equal to 0".
  /// does not support other kind of predicates.
  /// @returns the predicate that does not contains is equal to zero if
  /// it had and vice versa.
  /// Return the flipped strictness of predicate
  Predicate getFlippedStrictnessPredicate() const {
    return getFlippedStrictnessPredicate(getPredicate());
  }

  /// Provide more efficient getOperand methods.
  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);

  /// This is just a convenience that dispatches to the subclasses.
  /// Swap the operands and adjust predicate accordingly to retain
  /// the same comparison.
  void swapOperands();

  /// This is just a convenience that dispatches to the subclasses.
  /// Determine if this CmpInst is commutative.
  bool isCommutative() const;

  /// Determine if this is an equals/not equals predicate.
  /// This is a static version that you can use without an instruction
  /// available.
  static bool isEquality(Predicate pred);

  /// Determine if this is an equals/not equals predicate.
  bool isEquality() const { return isEquality(getPredicate()); }

  /// Return true if the predicate is relational (not EQ or NE).
  static bool isRelational(Predicate P) { return !isEquality(P); }

  /// Return true if the predicate is relational (not EQ or NE).
  bool isRelational() const { return !isEquality(); }

  /// @returns true if the comparison is signed, false otherwise.
  /// Determine if this instruction is using a signed comparison.
  bool isSigned() const {
    return isSigned(getPredicate());
  }

  /// @returns true if the comparison is unsigned, false otherwise.
  /// Determine if this instruction is using an unsigned comparison.
  bool isUnsigned() const {
    return isUnsigned(getPredicate());
  }

  /// For example, ULT->SLT, ULE->SLE, UGT->SGT, UGE->SGE, SLT->Failed assert
  /// @returns the signed version of the unsigned predicate pred.
  /// return the signed version of a predicate
  static Predicate getSignedPredicate(Predicate pred);

  /// For example, ULT->SLT, ULE->SLE, UGT->SGT, UGE->SGE, SLT->Failed assert
  /// @returns the signed version of the predicate for this instruction (which
  /// has to be an unsigned predicate).
  /// return the signed version of a predicate
  Predicate getSignedPredicate() {
    return getSignedPredicate(getPredicate());
  }

  /// For example, SLT->ULT, SLE->ULE, SGT->UGT, SGE->UGE, ULT->Failed assert
  /// @returns the unsigned version of the signed predicate pred.
  static Predicate getUnsignedPredicate(Predicate pred);

  /// For example, SLT->ULT, SLE->ULE, SGT->UGT, SGE->UGE, ULT->Failed assert
  /// @returns the unsigned version of the predicate for this instruction (which
  /// has to be an signed predicate).
  /// return the unsigned version of a predicate
  Predicate getUnsignedPredicate() {
    return getUnsignedPredicate(getPredicate());
  }

  /// For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->Failed assert
  /// @returns the unsigned version of the signed predicate pred or
  ///          the signed version of the signed predicate pred.
  static Predicate getFlippedSignednessPredicate(Predicate pred);

  /// For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->Failed assert
  /// @returns the unsigned version of the signed predicate pred or
  ///          the signed version of the signed predicate pred.
  Predicate getFlippedSignednessPredicate() {
    return getFlippedSignednessPredicate(getPredicate());
  }

  /// This is just a convenience.
  /// Determine if this is true when both operands are the same.
  bool isTrueWhenEqual() const {
    return isTrueWhenEqual(getPredicate());
  }

  /// This is just a convenience.
  /// Determine if this is false when both operands are the same.
  bool isFalseWhenEqual() const {
    return isFalseWhenEqual(getPredicate());
  }

  /// @returns true if the predicate is unsigned, false otherwise.
  /// Determine if the predicate is an unsigned operation.
  static bool isUnsigned(Predicate predicate);

  /// @returns true if the predicate is signed, false otherwise.
  /// Determine if the predicate is an signed operation.
  static bool isSigned(Predicate predicate);

  /// Determine if the predicate is an ordered operation.
  static bool isOrdered(Predicate predicate);

  /// Determine if the predicate is an unordered operation.
  static bool isUnordered(Predicate predicate);

  /// Determine if the predicate is true when comparing a value with itself.
  static bool isTrueWhenEqual(Predicate predicate);

  /// Determine if the predicate is false when comparing a value with itself.
  static bool isFalseWhenEqual(Predicate predicate);

  /// Determine if Pred1 implies Pred2 is true when two compares have matching
  /// operands.
  static bool isImpliedTrueByMatchingCmp(Predicate Pred1, Predicate Pred2);

  /// Determine if Pred1 implies Pred2 is false when two compares have matching
  /// operands.
  static bool isImpliedFalseByMatchingCmp(Predicate Pred1, Predicate Pred2);

  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Instruction *I) {
    return I->getOpcode() == Instruction::ICmp ||
           I->getOpcode() == Instruction::FCmp;
  }
  static bool classof(const Value *V) {
    return isa<Instruction>(V) && classof(cast<Instruction>(V));
  }

  /// Create a result type for fcmp/icmp
  static Type* makeCmpResultType(Type* opnd_type) {
    if (VectorType* vt = dyn_cast<VectorType>(opnd_type)) {
      return VectorType::get(Type::getInt1Ty(opnd_type->getContext()),
                             vt->getElementCount());
    }
    return Type::getInt1Ty(opnd_type->getContext());
  }

private:
  // Shadow Value::setValueSubclassData with a private forwarding method so that
  // subclasses cannot accidentally use it.
  void setValueSubclassData(unsigned short D) {
    Value::setValueSubclassData(D);
  }
};

// FIXME: these are redundant if CmpInst < BinaryOperator
template <>
struct OperandTraits<CmpInst> : public FixedNumOperandTraits<CmpInst, 2> {
};

DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CmpInst, Value)

raw_ostream &operator<<(raw_ostream &OS, CmpInst::Predicate Pred);

/// A lightweight accessor for an operand bundle meant to be passed
/// around by value.
struct OperandBundleUse {
  ArrayRef<Use> Inputs;

  OperandBundleUse() = default;
  explicit OperandBundleUse(StringMapEntry<uint32_t> *Tag, ArrayRef<Use> Inputs)
      : Inputs(Inputs), Tag(Tag) {}

  /// Return true if the operand at index \p Idx in this operand bundle
  /// has the attribute A.
  bool operandHasAttr(unsigned Idx, Attribute::AttrKind A) const {
    if (isDeoptOperandBundle())
      if (A == Attribute::ReadOnly || A == Attribute::NoCapture)
        return Inputs[Idx]->getType()->isPointerTy();

    // Conservative answer:  no operands have any attributes.
    return false;
  }

  /// Return the tag of this operand bundle as a string.
  StringRef getTagName() const {
    return Tag->getKey();
  }

  /// Return the tag of this operand bundle as an integer.
  ///
  /// Operand bundle tags are interned by LLVMContextImpl::getOrInsertBundleTag,
  /// and this function returns the unique integer getOrInsertBundleTag
  /// associated the tag of this operand bundle to.
  uint32_t getTagID() const {
    return Tag->getValue();
  }

  /// Return true if this is a "deopt" operand bundle.
  bool isDeoptOperandBundle() const {
    return getTagID() == LLVMContext::OB_deopt;
  }

  /// Return true if this is a "funclet" operand bundle.
  bool isFuncletOperandBundle() const {
    return getTagID() == LLVMContext::OB_funclet;
  }

  /// Return true if this is a "cfguardtarget" operand bundle.
  bool isCFGuardTargetOperandBundle() const {
    return getTagID() == LLVMContext::OB_cfguardtarget;
  }

private:
  /// Pointer to an entry in LLVMContextImpl::getOrInsertBundleTag.
  StringMapEntry<uint32_t> *Tag;
};

/// A container for an operand bundle being viewed as a set of values
/// rather than a set of uses.
///
/// Unlike OperandBundleUse, OperandBundleDefT owns the memory it carries, and
/// so it is possible to create and pass around "self-contained" instances of
/// OperandBundleDef and ConstOperandBundleDef.
template <typename InputTy> class OperandBundleDefT {
  std::string Tag;
  std::vector<InputTy> Inputs;

public:
  explicit OperandBundleDefT(std::string Tag, std::vector<InputTy> Inputs)
      : Tag(std::move(Tag)), Inputs(std::move(Inputs)) {}
  explicit OperandBundleDefT(std::string Tag, ArrayRef<InputTy> Inputs)
      : Tag(std::move(Tag)), Inputs(Inputs) {}

  explicit OperandBundleDefT(const OperandBundleUse &OBU) {
    Tag = std::string(OBU.getTagName());
    llvm::append_range(Inputs, OBU.Inputs);
  }

  ArrayRef<InputTy> inputs() const { return Inputs; }

  using input_iterator = typename std::vector<InputTy>::const_iterator;

  size_t input_size() const { return Inputs.size(); }
  input_iterator input_begin() const { return Inputs.begin(); }
  input_iterator input_end() const { return Inputs.end(); }

  StringRef getTag() const { return Tag; }
};

using OperandBundleDef = OperandBundleDefT<Value *>;
using ConstOperandBundleDef = OperandBundleDefT<const Value *>;

//===----------------------------------------------------------------------===//
//                               CallBase Class
//===----------------------------------------------------------------------===//

/// Base class for all callable instructions (InvokeInst and CallInst)
/// Holds everything related to calling a function.
///
/// All call-like instructions are required to use a common operand layout:
/// - Zero or more arguments to the call,
/// - Zero or more operand bundles with zero or more operand inputs each
///   bundle,
/// - Zero or more subclass controlled operands
/// - The called function.
///
/// This allows this base class to easily access the called function and the
/// start of the arguments without knowing how many other operands a particular
/// subclass requires. Note that accessing the end of the argument list isn't
/// as cheap as most other operations on the base class.
class CallBase : public Instruction {
protected:
  // The first two bits are reserved by CallInst for fast retrieval,
  using CallInstReservedField = Bitfield::Element<unsigned, 0, 2>;
  using CallingConvField =
      Bitfield::Element<CallingConv::ID, CallInstReservedField::NextBit, 10,
                        CallingConv::MaxID>;
  static_assert(
      Bitfield::areContiguous<CallInstReservedField, CallingConvField>(),
      "Bitfields must be contiguous");

  /// The last operand is the called operand.
  static constexpr int CalledOperandOpEndIdx = -1;

  AttributeList Attrs; ///< parameter attributes for callable
  FunctionType *FTy;

  template <class... ArgsTy>
  CallBase(AttributeList const &A, FunctionType *FT, ArgsTy &&... Args)
      : Instruction(std::forward<ArgsTy>(Args)...), Attrs(A), FTy(FT) {}

  using Instruction::Instruction;

  bool hasDescriptor() const { return Value::HasDescriptor; }

  unsigned getNumSubclassExtraOperands() const {
    switch (getOpcode()) {
    case Instruction::Call:
      return 0;
    case Instruction::Invoke:
      return 2;
    case Instruction::CallBr:
      return getNumSubclassExtraOperandsDynamic();
    }
    llvm_unreachable("Invalid opcode!");
  }

  /// Get the number of extra operands for instructions that don't have a fixed
  /// number of extra operands.
  unsigned getNumSubclassExtraOperandsDynamic() const;

public:
  using Instruction::getContext;

  /// Create a clone of \p CB with a different set of operand bundles and
  /// insert it before \p InsertPt.
  ///
  /// The returned call instruction is identical \p CB in every way except that
  /// the operand bundles for the new instruction are set to the operand bundles
  /// in \p Bundles.
  static CallBase *Create(CallBase *CB, ArrayRef<OperandBundleDef> Bundles,
                          Instruction *InsertPt = nullptr);

  /// Create a clone of \p CB with the operand bundle with the tag matching
  /// \p Bundle's tag replaced with Bundle, and insert it before \p InsertPt.
  ///
  /// The returned call instruction is identical \p CI in every way except that
  /// the specified operand bundle has been replaced.
  static CallBase *Create(CallBase *CB,
                          OperandBundleDef Bundle,
                          Instruction *InsertPt = nullptr);

  /// Create a clone of \p CB with operand bundle \p OB added.
  static CallBase *addOperandBundle(CallBase *CB, uint32_t ID,
                                    OperandBundleDef OB,
                                    Instruction *InsertPt = nullptr);

  /// Create a clone of \p CB with operand bundle \p ID removed.
  static CallBase *removeOperandBundle(CallBase *CB, uint32_t ID,
                                       Instruction *InsertPt = nullptr);

  static bool classof(const Instruction *I) {
    return I->getOpcode() == Instruction::Call ||
           I->getOpcode() == Instruction::Invoke ||
           I->getOpcode() == Instruction::CallBr;
  }
  static bool classof(const Value *V) {
    return isa<Instruction>(V) && classof(cast<Instruction>(V));
  }

  FunctionType *getFunctionType() const { return FTy; }

  void mutateFunctionType(FunctionType *FTy) {
    Value::mutateType(FTy->getReturnType());
    this->FTy = FTy;
  }

  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);

  /// data_operands_begin/data_operands_end - Return iterators iterating over
  /// the call / invoke argument list and bundle operands.  For invokes, this is
  /// the set of instruction operands except the invoke target and the two
  /// successor blocks; and for calls this is the set of instruction operands
  /// except the call target.
  User::op_iterator data_operands_begin() { return op_begin(); }
  User::const_op_iterator data_operands_begin() const {
    return const_cast<CallBase *>(this)->data_operands_begin();
  }
  User::op_iterator data_operands_end() {
    // Walk from the end of the operands over the called operand and any
    // subclass operands.
    return op_end() - getNumSubclassExtraOperands() - 1;
  }
  User::const_op_iterator data_operands_end() const {
    return const_cast<CallBase *>(this)->data_operands_end();
  }
  iterator_range<User::op_iterator> data_ops() {
    return make_range(data_operands_begin(), data_operands_end());
  }
  iterator_range<User::const_op_iterator> data_ops() const {
    return make_range(data_operands_begin(), data_operands_end());
  }
  bool data_operands_empty() const {
    return data_operands_end() == data_operands_begin();
  }
  unsigned data_operands_size() const {
    return std::distance(data_operands_begin(), data_operands_end());
  }

  bool isDataOperand(const Use *U) const {
    assert(this == U->getUser() &&
           "Only valid to query with a use of this instruction!");
    return data_operands_begin() <= U && U < data_operands_end();
  }
  bool isDataOperand(Value::const_user_iterator UI) const {
    return isDataOperand(&UI.getUse());
  }

  /// Given a value use iterator, return the data operand corresponding to it.
  /// Iterator must actually correspond to a data operand.
  unsigned getDataOperandNo(Value::const_user_iterator UI) const {
    return getDataOperandNo(&UI.getUse());
  }

  /// Given a use for a data operand, get the data operand number that
  /// corresponds to it.
  unsigned getDataOperandNo(const Use *U) const {
    assert(isDataOperand(U) && "Data operand # out of range!");
    return U - data_operands_begin();
  }

  /// Return the iterator pointing to the beginning of the argument list.
  User::op_iterator arg_begin() { return op_begin(); }
  User::const_op_iterator arg_begin() const {
    return const_cast<CallBase *>(this)->arg_begin();
  }

  /// Return the iterator pointing to the end of the argument list.
  User::op_iterator arg_end() {
    // From the end of the data operands, walk backwards past the bundle
    // operands.
    return data_operands_end() - getNumTotalBundleOperands();
  }
  User::const_op_iterator arg_end() const {
    return const_cast<CallBase *>(this)->arg_end();
  }

  /// Iteration adapter for range-for loops.
  iterator_range<User::op_iterator> args() {
    return make_range(arg_begin(), arg_end());
  }
  iterator_range<User::const_op_iterator> args() const {
    return make_range(arg_begin(), arg_end());
  }
  bool arg_empty() const { return arg_end() == arg_begin(); }
  unsigned arg_size() const { return arg_end() - arg_begin(); }

  Value *getArgOperand(unsigned i) const {
    assert(i < arg_size() && "Out of bounds!");
    return getOperand(i);
  }

  void setArgOperand(unsigned i, Value *v) {
    assert(i < arg_size() && "Out of bounds!");
    setOperand(i, v);
  }

  /// Wrappers for getting the \c Use of a call argument.
  const Use &getArgOperandUse(unsigned i) const {
    assert(i < arg_size() && "Out of bounds!");
    return User::getOperandUse(i);
  }
  Use &getArgOperandUse(unsigned i) {
    assert(i < arg_size() && "Out of bounds!");
    return User::getOperandUse(i);
  }

  bool isArgOperand(const Use *U) const {
    assert(this == U->getUser() &&
           "Only valid to query with a use of this instruction!");
    return arg_begin() <= U && U < arg_end();
  }
  bool isArgOperand(Value::const_user_iterator UI) const {
    return isArgOperand(&UI.getUse());
  }

  /// Given a use for a arg operand, get the arg operand number that
  /// corresponds to it.
  unsigned getArgOperandNo(const Use *U) const {
    assert(isArgOperand(U) && "Arg operand # out of range!");
    return U - arg_begin();
  }

  /// Given a value use iterator, return the arg operand number corresponding to
  /// it. Iterator must actually correspond to a data operand.
  unsigned getArgOperandNo(Value::const_user_iterator UI) const {
    return getArgOperandNo(&UI.getUse());
  }

  /// Returns true if this CallSite passes the given Value* as an argument to
  /// the called function.
  bool hasArgument(const Value *V) const {
    return llvm::is_contained(args(), V);
  }

  Value *getCalledOperand() const { return Op<CalledOperandOpEndIdx>(); }

  const Use &getCalledOperandUse() const { return Op<CalledOperandOpEndIdx>(); }
  Use &getCalledOperandUse() { return Op<CalledOperandOpEndIdx>(); }

  /// Returns the function called, or null if this is an indirect function
  /// invocation or the function signature does not match the call signature.
  Function *getCalledFunction() const {
    if (auto *F = dyn_cast_or_null<Function>(getCalledOperand()))
      if (F->getValueType() == getFunctionType())
        return F;
    return nullptr;
  }

  /// Return true if the callsite is an indirect call.
  bool isIndirectCall() const;

  /// Determine whether the passed iterator points to the callee operand's Use.
  bool isCallee(Value::const_user_iterator UI) const {
    return isCallee(&UI.getUse());
  }

  /// Determine whether this Use is the callee operand's Use.
  bool isCallee(const Use *U) const { return &getCalledOperandUse() == U; }

  /// Helper to get the caller (the parent function).
  Function *getCaller();
  const Function *getCaller() const {
    return const_cast<CallBase *>(this)->getCaller();
  }

  /// Tests if this call site must be tail call optimized. Only a CallInst can
  /// be tail call optimized.
  bool isMustTailCall() const;

  /// Tests if this call site is marked as a tail call.
  bool isTailCall() const;

  /// Returns the intrinsic ID of the intrinsic called or
  /// Intrinsic::not_intrinsic if the called function is not an intrinsic, or if
  /// this is an indirect call.
  Intrinsic::ID getIntrinsicID() const;

  void setCalledOperand(Value *V) { Op<CalledOperandOpEndIdx>() = V; }

  /// Sets the function called, including updating the function type.
  void setCalledFunction(Function *Fn) {
    setCalledFunction(Fn->getFunctionType(), Fn);
  }

  /// Sets the function called, including updating the function type.
  void setCalledFunction(FunctionCallee Fn) {
    setCalledFunction(Fn.getFunctionType(), Fn.getCallee());
  }

  /// Sets the function called, including updating to the specified function
  /// type.
  void setCalledFunction(FunctionType *FTy, Value *Fn) {
    this->FTy = FTy;
    // This function doesn't mutate the return type, only the function
    // type. Seems broken, but I'm just gonna stick an assert in for now.
    assert(getType() == FTy->getReturnType());
    setCalledOperand(Fn);
  }

  CallingConv::ID getCallingConv() const {
    return getSubclassData<CallingConvField>();
  }

  void setCallingConv(CallingConv::ID CC) {
    setSubclassData<CallingConvField>(CC);
  }

  /// Check if this call is an inline asm statement.
  bool isInlineAsm() const { return isa<InlineAsm>(getCalledOperand()); }

  /// \name Attribute API
  ///
  /// These methods access and modify attributes on this call (including
  /// looking through to the attributes on the called function when necessary).
  ///@{

  /// Return the parameter attributes for this call.
  ///
  AttributeList getAttributes() const { return Attrs; }

  /// Set the parameter attributes for this call.
  ///
  void setAttributes(AttributeList A) { Attrs = A; }

  /// Determine whether this call has the given attribute. If it does not
  /// then determine if the called function has the attribute, but only if
  /// the attribute is allowed for the call.
  bool hasFnAttr(Attribute::AttrKind Kind) const {
    assert(Kind != Attribute::NoBuiltin &&
           "Use CallBase::isNoBuiltin() to check for Attribute::NoBuiltin");
    return hasFnAttrImpl(Kind);
  }

  /// Determine whether this call has the given attribute. If it does not
  /// then determine if the called function has the attribute, but only if
  /// the attribute is allowed for the call.
  bool hasFnAttr(StringRef Kind) const { return hasFnAttrImpl(Kind); }

  // TODO: remove non-AtIndex versions of these methods.
  /// adds the attribute to the list of attributes.
  void addAttributeAtIndex(unsigned i, Attribute::AttrKind Kind) {
    Attrs = Attrs.addAttributeAtIndex(getContext(), i, Kind);
  }

  /// adds the attribute to the list of attributes.
  void addAttributeAtIndex(unsigned i, Attribute Attr) {
    Attrs = Attrs.addAttributeAtIndex(getContext(), i, Attr);
  }

  /// Adds the attribute to the function.
  void addFnAttr(Attribute::AttrKind Kind) {
    Attrs = Attrs.addFnAttribute(getContext(), Kind);
  }

  /// Adds the attribute to the function.
  void addFnAttr(Attribute Attr) {
    Attrs = Attrs.addFnAttribute(getContext(), Attr);
  }

  /// Adds the attribute to the return value.
  void addRetAttr(Attribute::AttrKind Kind) {
    Attrs = Attrs.addRetAttribute(getContext(), Kind);
  }

  /// Adds the attribute to the return value.
  void addRetAttr(Attribute Attr) {
    Attrs = Attrs.addRetAttribute(getContext(), Attr);
  }

  /// Adds the attribute to the indicated argument
  void addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) {
    assert(ArgNo < arg_size() && "Out of bounds");
    Attrs = Attrs.addParamAttribute(getContext(), ArgNo, Kind);
  }

  /// Adds the attribute to the indicated argument
  void addParamAttr(unsigned ArgNo, Attribute Attr) {
    assert(ArgNo < arg_size() && "Out of bounds");
    Attrs = Attrs.addParamAttribute(getContext(), ArgNo, Attr);
  }

  /// removes the attribute from the list of attributes.
  void removeAttributeAtIndex(unsigned i, Attribute::AttrKind Kind) {
    Attrs = Attrs.removeAttributeAtIndex(getContext(), i, Kind);
  }

  /// removes the attribute from the list of attributes.
  void removeAttributeAtIndex(unsigned i, StringRef Kind) {
    Attrs = Attrs.removeAttributeAtIndex(getContext(), i, Kind);
  }

  /// Removes the attributes from the function
  void removeFnAttrs(const AttributeMask &AttrsToRemove) {
    Attrs = Attrs.removeFnAttributes(getContext(), AttrsToRemove);
  }

  /// Removes the attribute from the function
  void removeFnAttr(Attribute::AttrKind Kind) {
    Attrs = Attrs.removeFnAttribute(getContext(), Kind);
  }

  /// Removes the attribute from the function
  void removeFnAttr(StringRef Kind) {
    Attrs = Attrs.removeFnAttribute(getContext(), Kind);
  }

  /// Removes the attribute from the return value
  void removeRetAttr(Attribute::AttrKind Kind) {
    Attrs = Attrs.removeRetAttribute(getContext(), Kind);
  }

  /// Removes the attributes from the return value
  void removeRetAttrs(const AttributeMask &AttrsToRemove) {
    Attrs = Attrs.removeRetAttributes(getContext(), AttrsToRemove);
  }

  /// Removes the attribute from the given argument
  void removeParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) {
    assert(ArgNo < arg_size() && "Out of bounds");
    Attrs = Attrs.removeParamAttribute(getContext(), ArgNo, Kind);
  }

  /// Removes the attribute from the given argument
  void removeParamAttr(unsigned ArgNo, StringRef Kind) {
    assert(ArgNo < arg_size() && "Out of bounds");
    Attrs = Attrs.removeParamAttribute(getContext(), ArgNo, Kind);
  }

  /// Removes the attributes from the given argument
  void removeParamAttrs(unsigned ArgNo, const AttributeMask &AttrsToRemove) {
    Attrs = Attrs.removeParamAttributes(getContext(), ArgNo, AttrsToRemove);
  }

  /// adds the dereferenceable attribute to the list of attributes.
  void addDereferenceableParamAttr(unsigned i, uint64_t Bytes) {
    Attrs = Attrs.addDereferenceableParamAttr(getContext(), i, Bytes);
  }

  /// adds the dereferenceable attribute to the list of attributes.
  void addDereferenceableRetAttr(uint64_t Bytes) {
    Attrs = Attrs.addDereferenceableRetAttr(getContext(), Bytes);
  }

  /// Determine whether the return value has the given attribute.
  bool hasRetAttr(Attribute::AttrKind Kind) const {
    return hasRetAttrImpl(Kind);
  }
  /// Determine whether the return value has the given attribute.
  bool hasRetAttr(StringRef Kind) const { return hasRetAttrImpl(Kind); }

  /// Determine whether the argument or parameter has the given attribute.
  bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const;

  /// Get the attribute of a given kind at a position.
  Attribute getAttributeAtIndex(unsigned i, Attribute::AttrKind Kind) const {
    return getAttributes().getAttributeAtIndex(i, Kind);
  }

  /// Get the attribute of a given kind at a position.
  Attribute getAttributeAtIndex(unsigned i, StringRef Kind) const {
    return getAttributes().getAttributeAtIndex(i, Kind);
  }

  /// Get the attribute of a given kind for the function.
  Attribute getFnAttr(StringRef Kind) const {
    Attribute Attr = getAttributes().getFnAttr(Kind);
    if (Attr.isValid())
      return Attr;
    return getFnAttrOnCalledFunction(Kind);
  }

  /// Get the attribute of a given kind for the function.
  Attribute getFnAttr(Attribute::AttrKind Kind) const {
    Attribute A = getAttributes().getFnAttr(Kind);
    if (A.isValid())
      return A;
    return getFnAttrOnCalledFunction(Kind);
  }

  /// Get the attribute of a given kind from a given arg
  Attribute getParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
    assert(ArgNo < arg_size() && "Out of bounds");
    return getAttributes().getParamAttr(ArgNo, Kind);
  }

  /// Get the attribute of a given kind from a given arg
  Attribute getParamAttr(unsigned ArgNo, StringRef Kind) const {
    assert(ArgNo < arg_size() && "Out of bounds");
    return getAttributes().getParamAttr(ArgNo, Kind);
  }

  /// Return true if the data operand at index \p i has the attribute \p
  /// A.
  ///
  /// Data operands include call arguments and values used in operand bundles,
  /// but does not include the callee operand.
  ///
  /// The index \p i is interpreted as
  ///
  ///  \p i in [0, arg_size)  -> argument number (\p i)
  ///  \p i in [arg_size, data_operand_size) -> bundle operand at index
  ///     (\p i) in the operand list.
  bool dataOperandHasImpliedAttr(unsigned i, Attribute::AttrKind Kind) const {
    // Note that we have to add one because `i` isn't zero-indexed.
    assert(i < arg_size() + getNumTotalBundleOperands() &&
           "Data operand index out of bounds!");

    // The attribute A can either be directly specified, if the operand in
    // question is a call argument; or be indirectly implied by the kind of its
    // containing operand bundle, if the operand is a bundle operand.

    if (i < arg_size())
      return paramHasAttr(i, Kind);

    assert(hasOperandBundles() && i >= getBundleOperandsStartIndex() &&
           "Must be either a call argument or an operand bundle!");
    return bundleOperandHasAttr(i, Kind);
  }

  /// Determine whether this data operand is not captured.
  // FIXME: Once this API is no longer duplicated in `CallSite`, rename this to
  // better indicate that this may return a conservative answer.
  bool doesNotCapture(unsigned OpNo) const {
    return dataOperandHasImpliedAttr(OpNo, Attribute::NoCapture);
  }

  /// Determine whether this argument is passed by value.
  bool isByValArgument(unsigned ArgNo) const {
    return paramHasAttr(ArgNo, Attribute::ByVal);
  }

  /// Determine whether this argument is passed in an alloca.
  bool isInAllocaArgument(unsigned ArgNo) const {
    return paramHasAttr(ArgNo, Attribute::InAlloca);
  }

  /// Determine whether this argument is passed by value, in an alloca, or is
  /// preallocated.
  bool isPassPointeeByValueArgument(unsigned ArgNo) const {
    return paramHasAttr(ArgNo, Attribute::ByVal) ||
           paramHasAttr(ArgNo, Attribute::InAlloca) ||
           paramHasAttr(ArgNo, Attribute::Preallocated);
  }

  /// Determine whether passing undef to this argument is undefined behavior.
  /// If passing undef to this argument is UB, passing poison is UB as well
  /// because poison is more undefined than undef.
  bool isPassingUndefUB(unsigned ArgNo) const {
    return paramHasAttr(ArgNo, Attribute::NoUndef) ||
           // dereferenceable implies noundef.
           paramHasAttr(ArgNo, Attribute::Dereferenceable) ||
           // dereferenceable implies noundef, and null is a well-defined value.
           paramHasAttr(ArgNo, Attribute::DereferenceableOrNull);
  }

  /// Determine if there are is an inalloca argument. Only the last argument can
  /// have the inalloca attribute.
  bool hasInAllocaArgument() const {
    return !arg_empty() && paramHasAttr(arg_size() - 1, Attribute::InAlloca);
  }

  // FIXME: Once this API is no longer duplicated in `CallSite`, rename this to
  // better indicate that this may return a conservative answer.
  bool doesNotAccessMemory(unsigned OpNo) const {
    return dataOperandHasImpliedAttr(OpNo, Attribute::ReadNone);
  }

  // FIXME: Once this API is no longer duplicated in `CallSite`, rename this to
  // better indicate that this may return a conservative answer.
  bool onlyReadsMemory(unsigned OpNo) const {
    return dataOperandHasImpliedAttr(OpNo, Attribute::ReadOnly) ||
           dataOperandHasImpliedAttr(OpNo, Attribute::ReadNone);
  }

  // FIXME: Once this API is no longer duplicated in `CallSite`, rename this to
  // better indicate that this may return a conservative answer.
  bool onlyWritesMemory(unsigned OpNo) const {
    return dataOperandHasImpliedAttr(OpNo, Attribute::WriteOnly) ||
           dataOperandHasImpliedAttr(OpNo, Attribute::ReadNone);
  }

  /// Extract the alignment of the return value.
  MaybeAlign getRetAlign() const {
    if (auto Align = Attrs.getRetAlignment())
      return Align;
    if (const Function *F = getCalledFunction())
      return F->getAttributes().getRetAlignment();
    return std::nullopt;
  }

  /// Extract the alignment for a call or parameter (0=unknown).
  MaybeAlign getParamAlign(unsigned ArgNo) const {
    return Attrs.getParamAlignment(ArgNo);
  }

  MaybeAlign getParamStackAlign(unsigned ArgNo) const {
    return Attrs.getParamStackAlignment(ArgNo);
  }

  /// Extract the byval type for a call or parameter.
  Type *getParamByValType(unsigned ArgNo) const {
    if (auto *Ty = Attrs.getParamByValType(ArgNo))
      return Ty;
    if (const Function *F = getCalledFunction())
      return F->getAttributes().getParamByValType(ArgNo);
    return nullptr;
  }

  /// Extract the preallocated type for a call or parameter.
  Type *getParamPreallocatedType(unsigned ArgNo) const {
    if (auto *Ty = Attrs.getParamPreallocatedType(ArgNo))
      return Ty;
    if (const Function *F = getCalledFunction())
      return F->getAttributes().getParamPreallocatedType(ArgNo);
    return nullptr;
  }

  /// Extract the inalloca type for a call or parameter.
  Type *getParamInAllocaType(unsigned ArgNo) const {
    if (auto *Ty = Attrs.getParamInAllocaType(ArgNo))
      return Ty;
    if (const Function *F = getCalledFunction())
      return F->getAttributes().getParamInAllocaType(ArgNo);
    return nullptr;
  }

  /// Extract the sret type for a call or parameter.
  Type *getParamStructRetType(unsigned ArgNo) const {
    if (auto *Ty = Attrs.getParamStructRetType(ArgNo))
      return Ty;
    if (const Function *F = getCalledFunction())
      return F->getAttributes().getParamStructRetType(ArgNo);
    return nullptr;
  }

  /// Extract the elementtype type for a parameter.
  /// Note that elementtype() can only be applied to call arguments, not
  /// function declaration parameters.
  Type *getParamElementType(unsigned ArgNo) const {
    return Attrs.getParamElementType(ArgNo);
  }

  /// Extract the number of dereferenceable bytes for a call or
  /// parameter (0=unknown).
  uint64_t getRetDereferenceableBytes() const {
    uint64_t Bytes = Attrs.getRetDereferenceableBytes();
    if (const Function *F = getCalledFunction())
      Bytes = std::max(Bytes, F->getAttributes().getRetDereferenceableBytes());
    return Bytes;
  }

  /// Extract the number of dereferenceable bytes for a call or
  /// parameter (0=unknown).
  uint64_t getParamDereferenceableBytes(unsigned i) const {
    return Attrs.getParamDereferenceableBytes(i);
  }

  /// Extract the number of dereferenceable_or_null bytes for a call
  /// (0=unknown).
  uint64_t getRetDereferenceableOrNullBytes() const {
    uint64_t Bytes = Attrs.getRetDereferenceableOrNullBytes();
    if (const Function *F = getCalledFunction()) {
      Bytes = std::max(Bytes,
                       F->getAttributes().getRetDereferenceableOrNullBytes());
    }

    return Bytes;
  }

  /// Extract the number of dereferenceable_or_null bytes for a
  /// parameter (0=unknown).
  uint64_t getParamDereferenceableOrNullBytes(unsigned i) const {
    return Attrs.getParamDereferenceableOrNullBytes(i);
  }

  /// Extract a test mask for disallowed floating-point value classes for the
  /// return value.
  FPClassTest getRetNoFPClass() const;

  /// Extract a test mask for disallowed floating-point value classes for the
  /// parameter.
  FPClassTest getParamNoFPClass(unsigned i) const;

  /// Return true if the return value is known to be not null.
  /// This may be because it has the nonnull attribute, or because at least
  /// one byte is dereferenceable and the pointer is in addrspace(0).
  bool isReturnNonNull() const;

  /// Determine if the return value is marked with NoAlias attribute.
  bool returnDoesNotAlias() const {
    return Attrs.hasRetAttr(Attribute::NoAlias);
  }

  /// If one of the arguments has the 'returned' attribute, returns its
  /// operand value. Otherwise, return nullptr.
  Value *getReturnedArgOperand() const {
    return getArgOperandWithAttribute(Attribute::Returned);
  }

  /// If one of the arguments has the specified attribute, returns its
  /// operand value. Otherwise, return nullptr.
  Value *getArgOperandWithAttribute(Attribute::AttrKind Kind) const;

  /// Return true if the call should not be treated as a call to a
  /// builtin.
  bool isNoBuiltin() const {
    return hasFnAttrImpl(Attribute::NoBuiltin) &&
           !hasFnAttrImpl(Attribute::Builtin);
  }

  /// Determine if the call requires strict floating point semantics.
  bool isStrictFP() const { return hasFnAttr(Attribute::StrictFP); }

  /// Return true if the call should not be inlined.
  bool isNoInline() const { return hasFnAttr(Attribute::NoInline); }
  void setIsNoInline() { addFnAttr(Attribute::NoInline); }

  MemoryEffects getMemoryEffects() const;
  void setMemoryEffects(MemoryEffects ME);

  /// Determine if the call does not access memory.
  bool doesNotAccessMemory() const;
  void setDoesNotAccessMemory();

  /// Determine if the call does not access or only reads memory.
  bool onlyReadsMemory() const;
  void setOnlyReadsMemory();

  /// Determine if the call does not access or only writes memory.
  bool onlyWritesMemory() const;
  void setOnlyWritesMemory();

  /// Determine if the call can access memmory only using pointers based
  /// on its arguments.
  bool onlyAccessesArgMemory() const;
  void setOnlyAccessesArgMemory();

  /// Determine if the function may only access memory that is
  /// inaccessible from the IR.
  bool onlyAccessesInaccessibleMemory() const;
  void setOnlyAccessesInaccessibleMemory();

  /// Determine if the function may only access memory that is
  /// either inaccessible from the IR or pointed to by its arguments.
  bool onlyAccessesInaccessibleMemOrArgMem() const;
  void setOnlyAccessesInaccessibleMemOrArgMem();

  /// Determine if the call cannot return.
  bool doesNotReturn() const { return hasFnAttr(Attribute::NoReturn); }
  void setDoesNotReturn() { addFnAttr(Attribute::NoReturn); }

  /// Determine if the call should not perform indirect branch tracking.
  bool doesNoCfCheck() const { return hasFnAttr(Attribute::NoCfCheck); }

  /// Determine if the call cannot unwind.
  bool doesNotThrow() const { return hasFnAttr(Attribute::NoUnwind); }
  void setDoesNotThrow() { addFnAttr(Attribute::NoUnwind); }

  /// Determine if the invoke cannot be duplicated.
  bool cannotDuplicate() const { return hasFnAttr(Attribute::NoDuplicate); }
  void setCannotDuplicate() { addFnAttr(Attribute::NoDuplicate); }

  /// Determine if the call cannot be tail merged.
  bool cannotMerge() const { return hasFnAttr(Attribute::NoMerge); }
  void setCannotMerge() { addFnAttr(Attribute::NoMerge); }

  /// Determine if the invoke is convergent
  bool isConvergent() const { return hasFnAttr(Attribute::Convergent); }
  void setConvergent() { addFnAttr(Attribute::Convergent); }
  void setNotConvergent() { removeFnAttr(Attribute::Convergent); }

  /// Determine if the call returns a structure through first
  /// pointer argument.
  bool hasStructRetAttr() const {
    if (arg_empty())
      return false;

    // Be friendly and also check the callee.
    return paramHasAttr(0, Attribute::StructRet);
  }

  /// Determine if any call argument is an aggregate passed by value.
  bool hasByValArgument() const {
    return Attrs.hasAttrSomewhere(Attribute::ByVal);
  }

  ///@}
  // End of attribute API.

  /// \name Operand Bundle API
  ///
  /// This group of methods provides the API to access and manipulate operand
  /// bundles on this call.
  /// @{

  /// Return the number of operand bundles associated with this User.
  unsigned getNumOperandBundles() const {
    return std::distance(bundle_op_info_begin(), bundle_op_info_end());
  }

  /// Return true if this User has any operand bundles.
  bool hasOperandBundles() const { return getNumOperandBundles() != 0; }

  /// Return the index of the first bundle operand in the Use array.
  unsigned getBundleOperandsStartIndex() const {
    assert(hasOperandBundles() && "Don't call otherwise!");
    return bundle_op_info_begin()->Begin;
  }

  /// Return the index of the last bundle operand in the Use array.
  unsigned getBundleOperandsEndIndex() const {
    assert(hasOperandBundles() && "Don't call otherwise!");
    return bundle_op_info_end()[-1].End;
  }

  /// Return true if the operand at index \p Idx is a bundle operand.
  bool isBundleOperand(unsigned Idx) const {
    return hasOperandBundles() && Idx >= getBundleOperandsStartIndex() &&
           Idx < getBundleOperandsEndIndex();
  }

  /// Return true if the operand at index \p Idx is a bundle operand that has
  /// tag ID \p ID.
  bool isOperandBundleOfType(uint32_t ID, unsigned Idx) const {
    return isBundleOperand(Idx) &&
           getOperandBundleForOperand(Idx).getTagID() == ID;
  }

  /// Returns true if the use is a bundle operand.
  bool isBundleOperand(const Use *U) const {
    assert(this == U->getUser() &&
           "Only valid to query with a use of this instruction!");
    return hasOperandBundles() && isBundleOperand(U - op_begin());
  }
  bool isBundleOperand(Value::const_user_iterator UI) const {
    return isBundleOperand(&UI.getUse());
  }

  /// Return the total number operands (not operand bundles) used by
  /// every operand bundle in this OperandBundleUser.
  unsigned getNumTotalBundleOperands() const {
    if (!hasOperandBundles())
      return 0;

    unsigned Begin = getBundleOperandsStartIndex();
    unsigned End = getBundleOperandsEndIndex();

    assert(Begin <= End && "Should be!");
    return End - Begin;
  }

  /// Return the operand bundle at a specific index.
  OperandBundleUse getOperandBundleAt(unsigned Index) const {
    assert(Index < getNumOperandBundles() && "Index out of bounds!");
    return operandBundleFromBundleOpInfo(*(bundle_op_info_begin() + Index));
  }

  /// Return the number of operand bundles with the tag Name attached to
  /// this instruction.
  unsigned countOperandBundlesOfType(StringRef Name) const {
    unsigned Count = 0;
    for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i)
      if (getOperandBundleAt(i).getTagName() == Name)
        Count++;

    return Count;
  }

  /// Return the number of operand bundles with the tag ID attached to
  /// this instruction.
  unsigned countOperandBundlesOfType(uint32_t ID) const {
    unsigned Count = 0;
    for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i)
      if (getOperandBundleAt(i).getTagID() == ID)
        Count++;

    return Count;
  }

  /// Return an operand bundle by name, if present.
  ///
  /// It is an error to call this for operand bundle types that may have
  /// multiple instances of them on the same instruction.
  std::optional<OperandBundleUse> getOperandBundle(StringRef Name) const {
    assert(countOperandBundlesOfType(Name) < 2 && "Precondition violated!");

    for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i) {
      OperandBundleUse U = getOperandBundleAt(i);
      if (U.getTagName() == Name)
        return U;
    }

    return std::nullopt;
  }

  /// Return an operand bundle by tag ID, if present.
  ///
  /// It is an error to call this for operand bundle types that may have
  /// multiple instances of them on the same instruction.
  std::optional<OperandBundleUse> getOperandBundle(uint32_t ID) const {
    assert(countOperandBundlesOfType(ID) < 2 && "Precondition violated!");

    for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i) {
      OperandBundleUse U = getOperandBundleAt(i);
      if (U.getTagID() == ID)
        return U;
    }

    return std::nullopt;
  }

  /// Return the list of operand bundles attached to this instruction as
  /// a vector of OperandBundleDefs.
  ///
  /// This function copies the OperandBundeUse instances associated with this
  /// OperandBundleUser to a vector of OperandBundleDefs.  Note:
  /// OperandBundeUses and OperandBundleDefs are non-trivially *different*
  /// representations of operand bundles (see documentation above).
  void getOperandBundlesAsDefs(SmallVectorImpl<OperandBundleDef> &Defs) const;

  /// Return the operand bundle for the operand at index OpIdx.
  ///
  /// It is an error to call this with an OpIdx that does not correspond to an
  /// bundle operand.
  OperandBundleUse getOperandBundleForOperand(unsigned OpIdx) const {
    return operandBundleFromBundleOpInfo(getBundleOpInfoForOperand(OpIdx));
  }

  /// Return true if this operand bundle user has operand bundles that
  /// may read from the heap.
  bool hasReadingOperandBundles() const;

  /// Return true if this operand bundle user has operand bundles that
  /// may write to the heap.
  bool hasClobberingOperandBundles() const;

  /// Return true if the bundle operand at index \p OpIdx has the
  /// attribute \p A.
  bool bundleOperandHasAttr(unsigned OpIdx,  Attribute::AttrKind A) const {
    auto &BOI = getBundleOpInfoForOperand(OpIdx);
    auto OBU = operandBundleFromBundleOpInfo(BOI);
    return OBU.operandHasAttr(OpIdx - BOI.Begin, A);
  }

  /// Return true if \p Other has the same sequence of operand bundle
  /// tags with the same number of operands on each one of them as this
  /// OperandBundleUser.
  bool hasIdenticalOperandBundleSchema(const CallBase &Other) const {
    if (getNumOperandBundles() != Other.getNumOperandBundles())
      return false;

    return std::equal(bundle_op_info_begin(), bundle_op_info_end(),
                      Other.bundle_op_info_begin());
  }

  /// Return true if this operand bundle user contains operand bundles
  /// with tags other than those specified in \p IDs.
  bool hasOperandBundlesOtherThan(ArrayRef<uint32_t> IDs) const {
    for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i) {
      uint32_t ID = getOperandBundleAt(i).getTagID();
      if (!is_contained(IDs, ID))
        return true;
    }
    return false;
  }

  /// Used to keep track of an operand bundle.  See the main comment on
  /// OperandBundleUser above.
  struct BundleOpInfo {
    /// The operand bundle tag, interned by
    /// LLVMContextImpl::getOrInsertBundleTag.
    StringMapEntry<uint32_t> *Tag;

    /// The index in the Use& vector where operands for this operand
    /// bundle starts.
    uint32_t Begin;

    /// The index in the Use& vector where operands for this operand
    /// bundle ends.
    uint32_t End;

    bool operator==(const BundleOpInfo &Other) const {
      return Tag == Other.Tag && Begin == Other.Begin && End == Other.End;
    }
  };

  /// Simple helper function to map a BundleOpInfo to an
  /// OperandBundleUse.
  OperandBundleUse
  operandBundleFromBundleOpInfo(const BundleOpInfo &BOI) const {
    const auto *begin = op_begin();
    ArrayRef<Use> Inputs(begin + BOI.Begin, begin + BOI.End);
    return OperandBundleUse(BOI.Tag, Inputs);
  }

  using bundle_op_iterator = BundleOpInfo *;
  using const_bundle_op_iterator = const BundleOpInfo *;

  /// Return the start of the list of BundleOpInfo instances associated
  /// with this OperandBundleUser.
  ///
  /// OperandBundleUser uses the descriptor area co-allocated with the host User
  /// to store some meta information about which operands are "normal" operands,
  /// and which ones belong to some operand bundle.
  ///
  /// The layout of an operand bundle user is
  ///
  ///          +-----------uint32_t End-------------------------------------+
  ///          |                                                            |
  ///          |  +--------uint32_t Begin--------------------+              |
  ///          |  |                                          |              |
  ///          ^  ^                                          v              v
  ///  |------|------|----|----|----|----|----|---------|----|---------|----|-----
  ///  | BOI0 | BOI1 | .. | DU | U0 | U1 | .. | BOI0_U0 | .. | BOI1_U0 | .. | Un
  ///  |------|------|----|----|----|----|----|---------|----|---------|----|-----
  ///   v  v                                  ^              ^
  ///   |  |                                  |              |
  ///   |  +--------uint32_t Begin------------+              |
  ///   |                                                    |
  ///   +-----------uint32_t End-----------------------------+
  ///
  ///
  /// BOI0, BOI1 ... are descriptions of operand bundles in this User's use
  /// list. These descriptions are installed and managed by this class, and
  /// they're all instances of OperandBundleUser<T>::BundleOpInfo.
  ///
  /// DU is an additional descriptor installed by User's 'operator new' to keep
  /// track of the 'BOI0 ... BOIN' co-allocation.  OperandBundleUser does not
  /// access or modify DU in any way, it's an implementation detail private to
  /// User.
  ///
  /// The regular Use& vector for the User starts at U0.  The operand bundle
  /// uses are part of the Use& vector, just like normal uses.  In the diagram
  /// above, the operand bundle uses start at BOI0_U0.  Each instance of
  /// BundleOpInfo has information about a contiguous set of uses constituting
  /// an operand bundle, and the total set of operand bundle uses themselves
  /// form a contiguous set of uses (i.e. there are no gaps between uses
  /// corresponding to individual operand bundles).
  ///
  /// This class does not know the location of the set of operand bundle uses
  /// within the use list -- that is decided by the User using this class via
  /// the BeginIdx argument in populateBundleOperandInfos.
  ///
  /// Currently operand bundle users with hung-off operands are not supported.
  bundle_op_iterator bundle_op_info_begin() {
    if (!hasDescriptor())
      return nullptr;

    uint8_t *BytesBegin = getDescriptor().begin();
    return reinterpret_cast<bundle_op_iterator>(BytesBegin);
  }

  /// Return the start of the list of BundleOpInfo instances associated
  /// with this OperandBundleUser.
  const_bundle_op_iterator bundle_op_info_begin() const {
    auto *NonConstThis = const_cast<CallBase *>(this);
    return NonConstThis->bundle_op_info_begin();
  }

  /// Return the end of the list of BundleOpInfo instances associated
  /// with this OperandBundleUser.
  bundle_op_iterator bundle_op_info_end() {
    if (!hasDescriptor())
      return nullptr;

    uint8_t *BytesEnd = getDescriptor().end();
    return reinterpret_cast<bundle_op_iterator>(BytesEnd);
  }

  /// Return the end of the list of BundleOpInfo instances associated
  /// with this OperandBundleUser.
  const_bundle_op_iterator bundle_op_info_end() const {
    auto *NonConstThis = const_cast<CallBase *>(this);
    return NonConstThis->bundle_op_info_end();
  }

  /// Return the range [\p bundle_op_info_begin, \p bundle_op_info_end).
  iterator_range<bundle_op_iterator> bundle_op_infos() {
    return make_range(bundle_op_info_begin(), bundle_op_info_end());
  }

  /// Return the range [\p bundle_op_info_begin, \p bundle_op_info_end).
  iterator_range<const_bundle_op_iterator> bundle_op_infos() const {
    return make_range(bundle_op_info_begin(), bundle_op_info_end());
  }

  /// Populate the BundleOpInfo instances and the Use& vector from \p
  /// Bundles.  Return the op_iterator pointing to the Use& one past the last
  /// last bundle operand use.
  ///
  /// Each \p OperandBundleDef instance is tracked by a OperandBundleInfo
  /// instance allocated in this User's descriptor.
  op_iterator populateBundleOperandInfos(ArrayRef<OperandBundleDef> Bundles,
                                         const unsigned BeginIndex);

public:
  /// Return the BundleOpInfo for the operand at index OpIdx.
  ///
  /// It is an error to call this with an OpIdx that does not correspond to an
  /// bundle operand.
  BundleOpInfo &getBundleOpInfoForOperand(unsigned OpIdx);
  const BundleOpInfo &getBundleOpInfoForOperand(unsigned OpIdx) const {
    return const_cast<CallBase *>(this)->getBundleOpInfoForOperand(OpIdx);
  }

protected:
  /// Return the total number of values used in \p Bundles.
  static unsigned CountBundleInputs(ArrayRef<OperandBundleDef> Bundles) {
    unsigned Total = 0;
    for (const auto &B : Bundles)
      Total += B.input_size();
    return Total;
  }

  /// @}
  // End of operand bundle API.

private:
  bool hasFnAttrOnCalledFunction(Attribute::AttrKind Kind) const;
  bool hasFnAttrOnCalledFunction(StringRef Kind) const;

  template <typename AttrKind> bool hasFnAttrImpl(AttrKind Kind) const {
    if (Attrs.hasFnAttr(Kind))
      return true;

    return hasFnAttrOnCalledFunction(Kind);
  }
  template <typename AK> Attribute getFnAttrOnCalledFunction(AK Kind) const;

  /// Determine whether the return value has the given attribute. Supports
  /// Attribute::AttrKind and StringRef as \p AttrKind types.
  template <typename AttrKind> bool hasRetAttrImpl(AttrKind Kind) const {
    if (Attrs.hasRetAttr(Kind))
      return true;

    // Look at the callee, if available.
    if (const Function *F = getCalledFunction())
      return F->getAttributes().hasRetAttr(Kind);
    return false;
  }
};

template <>
struct OperandTraits<CallBase> : public VariadicOperandTraits<CallBase, 1> {};

DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CallBase, Value)

//===----------------------------------------------------------------------===//
//                           FuncletPadInst Class
//===----------------------------------------------------------------------===//
class FuncletPadInst : public Instruction {
private:
  FuncletPadInst(const FuncletPadInst &CPI);

  explicit FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
                          ArrayRef<Value *> Args, unsigned Values,
                          const Twine &NameStr, Instruction *InsertBefore);
  explicit FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
                          ArrayRef<Value *> Args, unsigned Values,
                          const Twine &NameStr, BasicBlock *InsertAtEnd);

  void init(Value *ParentPad, ArrayRef<Value *> Args, const Twine &NameStr);

protected:
  // Note: Instruction needs to be a friend here to call cloneImpl.
  friend class Instruction;
  friend class CatchPadInst;
  friend class CleanupPadInst;

  FuncletPadInst *cloneImpl() const;

public:
  /// Provide fast operand accessors
  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);

  /// arg_size - Return the number of funcletpad arguments.
  ///
  unsigned arg_size() const { return getNumOperands() - 1; }

  /// Convenience accessors

  /// Return the outer EH-pad this funclet is nested within.
  ///
  /// Note: This returns the associated CatchSwitchInst if this FuncletPadInst
  /// is a CatchPadInst.
  Value *getParentPad() const { return Op<-1>(); }
  void setParentPad(Value *ParentPad) {
    assert(ParentPad);
    Op<-1>() = ParentPad;
  }

  /// getArgOperand/setArgOperand - Return/set the i-th funcletpad argument.
  ///
  Value *getArgOperand(unsigned i) const { return getOperand(i); }
  void setArgOperand(unsigned i, Value *v) { setOperand(i, v); }

  /// arg_operands - iteration adapter for range-for loops.
  op_range arg_operands() { return op_range(op_begin(), op_end() - 1); }

  /// arg_operands - iteration adapter for range-for loops.
  const_op_range arg_operands() const {
    return const_op_range(op_begin(), op_end() - 1);
  }

  // Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Instruction *I) { return I->isFuncletPad(); }
  static bool classof(const Value *V) {
    return isa<Instruction>(V) && classof(cast<Instruction>(V));
  }
};

template <>
struct OperandTraits<FuncletPadInst>
    : public VariadicOperandTraits<FuncletPadInst, /*MINARITY=*/1> {};

DEFINE_TRANSPARENT_OPERAND_ACCESSORS(FuncletPadInst, Value)

} // end namespace llvm

#endif // LLVM_IR_INSTRTYPES_H
PKjwFZ@dp��n�nIR/IntrinsicsHexagonDep.tdnu�[���//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// Automatically generated file, do not edit!
//===----------------------------------------------------------------------===//

// tag : A2_abs
class Hexagon_i32_i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_i32_ty], [llvm_i32_ty],
       intr_properties>;

// tag : A2_absp
class Hexagon_i64_i64_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_i64_ty], [llvm_i64_ty],
       intr_properties>;

// tag : A2_add
class Hexagon_i32_i32i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_i32_ty], [llvm_i32_ty,llvm_i32_ty],
       intr_properties>;

// tag : A2_addp
class Hexagon_i64_i64i64_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_i64_ty], [llvm_i64_ty,llvm_i64_ty],
       intr_properties>;

// tag : A2_addsp
class Hexagon_i64_i32i64_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_i64_ty], [llvm_i32_ty,llvm_i64_ty],
       intr_properties>;

// tag : A2_combineii
class Hexagon_i64_i32i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_i64_ty], [llvm_i32_ty,llvm_i32_ty],
       intr_properties>;

// tag : A2_roundsat
class Hexagon_i32_i64_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_i32_ty], [llvm_i64_ty],
       intr_properties>;

// tag : A2_sxtw
class Hexagon_i64_i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_i64_ty], [llvm_i32_ty],
       intr_properties>;

// tag : A2_vcmpbeq
class Hexagon_i32_i64i64_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_i32_ty], [llvm_i64_ty,llvm_i64_ty],
       intr_properties>;

// tag : A2_vraddub_acc
class Hexagon_i64_i64i64i64_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_i64_ty], [llvm_i64_ty,llvm_i64_ty,llvm_i64_ty],
       intr_properties>;

// tag : A4_boundscheck
class Hexagon_i32_i32i64_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_i32_ty], [llvm_i32_ty,llvm_i64_ty],
       intr_properties>;

// tag : A4_tlbmatch
class Hexagon_i32_i64i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_i32_ty], [llvm_i64_ty,llvm_i32_ty],
       intr_properties>;

// tag : A4_vrmaxh
class Hexagon_i64_i64i64i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_i64_ty], [llvm_i64_ty,llvm_i64_ty,llvm_i32_ty],
       intr_properties>;

// tag : A7_croundd_ri
class Hexagon_i64_i64i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_i64_ty], [llvm_i64_ty,llvm_i32_ty],
       intr_properties>;

// tag : C2_mux
class Hexagon_i32_i32i32i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_i32_ty], [llvm_i32_ty,llvm_i32_ty,llvm_i32_ty],
       intr_properties>;

// tag : C2_vmux
class Hexagon_i64_i32i64i64_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_i64_ty], [llvm_i32_ty,llvm_i64_ty,llvm_i64_ty],
       intr_properties>;

// tag : F2_conv_d2df
class Hexagon_double_i64_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_double_ty], [llvm_i64_ty],
       intr_properties>;

// tag : F2_conv_d2sf
class Hexagon_float_i64_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_float_ty], [llvm_i64_ty],
       intr_properties>;

// tag : F2_conv_df2d
class Hexagon_i64_double_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_i64_ty], [llvm_double_ty],
       intr_properties>;

// tag : F2_conv_df2sf
class Hexagon_float_double_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_float_ty], [llvm_double_ty],
       intr_properties>;

// tag : F2_conv_df2uw
class Hexagon_i32_double_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_i32_ty], [llvm_double_ty],
       intr_properties>;

// tag : F2_conv_sf2d
class Hexagon_i64_float_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_i64_ty], [llvm_float_ty],
       intr_properties>;

// tag : F2_conv_sf2df
class Hexagon_double_float_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_double_ty], [llvm_float_ty],
       intr_properties>;

// tag : F2_conv_sf2uw
class Hexagon_i32_float_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_i32_ty], [llvm_float_ty],
       intr_properties>;

// tag : F2_conv_uw2df
class Hexagon_double_i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_double_ty], [llvm_i32_ty],
       intr_properties>;

// tag : F2_conv_uw2sf
class Hexagon_float_i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_float_ty], [llvm_i32_ty],
       intr_properties>;

// tag : F2_dfadd
class Hexagon_double_doubledouble_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_double_ty], [llvm_double_ty,llvm_double_ty],
       intr_properties>;

// tag : F2_dfclass
class Hexagon_i32_doublei32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_i32_ty], [llvm_double_ty,llvm_i32_ty],
       intr_properties>;

// tag : F2_dfcmpeq
class Hexagon_i32_doubledouble_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_i32_ty], [llvm_double_ty,llvm_double_ty],
       intr_properties>;

// tag : F2_dfmpyhh
class Hexagon_double_doubledoubledouble_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_double_ty], [llvm_double_ty,llvm_double_ty,llvm_double_ty],
       intr_properties>;

// tag : F2_sfadd
class Hexagon_float_floatfloat_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_float_ty], [llvm_float_ty,llvm_float_ty],
       intr_properties>;

// tag : F2_sfclass
class Hexagon_i32_floati32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_i32_ty], [llvm_float_ty,llvm_i32_ty],
       intr_properties>;

// tag : F2_sfcmpeq
class Hexagon_i32_floatfloat_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_i32_ty], [llvm_float_ty,llvm_float_ty],
       intr_properties>;

// tag : F2_sffixupr
class Hexagon_float_float_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_float_ty], [llvm_float_ty],
       intr_properties>;

// tag : F2_sffma
class Hexagon_float_floatfloatfloat_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_float_ty], [llvm_float_ty,llvm_float_ty,llvm_float_ty],
       intr_properties>;

// tag : F2_sffma_sc
class Hexagon_float_floatfloatfloati32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_float_ty], [llvm_float_ty,llvm_float_ty,llvm_float_ty,llvm_i32_ty],
       intr_properties>;

// tag : M2_cmaci_s0
class Hexagon_i64_i64i32i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_i64_ty], [llvm_i64_ty,llvm_i32_ty,llvm_i32_ty],
       intr_properties>;

// tag : S2_insert
class Hexagon_i32_i32i32i32i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_i32_ty], [llvm_i32_ty,llvm_i32_ty,llvm_i32_ty,llvm_i32_ty],
       intr_properties>;

// tag : S2_insert_rp
class Hexagon_i32_i32i32i64_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_i32_ty], [llvm_i32_ty,llvm_i32_ty,llvm_i64_ty],
       intr_properties>;

// tag : S2_insertp
class Hexagon_i64_i64i64i32i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_i64_ty], [llvm_i64_ty,llvm_i64_ty,llvm_i32_ty,llvm_i32_ty],
       intr_properties>;

// tag : V6_extractw
class Hexagon_i32_v16i32i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_i32_ty], [llvm_v16i32_ty,llvm_i32_ty],
       intr_properties>;

// tag : V6_extractw
class Hexagon_i32_v32i32i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_i32_ty], [llvm_v32i32_ty,llvm_i32_ty],
       intr_properties>;

// tag : V6_hi
class Hexagon_v16i32_v32i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v16i32_ty], [llvm_v32i32_ty],
       intr_properties>;

// tag : V6_hi
class Hexagon_v32i32_v64i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v32i32_ty], [llvm_v64i32_ty],
       intr_properties>;

// tag : V6_lvsplatb
class Hexagon_v16i32_i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v16i32_ty], [llvm_i32_ty],
       intr_properties>;

// tag : V6_lvsplatb
class Hexagon_v32i32_i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v32i32_ty], [llvm_i32_ty],
       intr_properties>;

// tag : V6_pred_and
class Hexagon_v64i1_v64i1v64i1_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v64i1_ty], [llvm_v64i1_ty,llvm_v64i1_ty],
       intr_properties>;

// tag : V6_pred_and
class Hexagon_v128i1_v128i1v128i1_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v128i1_ty], [llvm_v128i1_ty,llvm_v128i1_ty],
       intr_properties>;

// tag : V6_pred_not
class Hexagon_v64i1_v64i1_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v64i1_ty], [llvm_v64i1_ty],
       intr_properties>;

// tag : V6_pred_not
class Hexagon_v128i1_v128i1_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v128i1_ty], [llvm_v128i1_ty],
       intr_properties>;

// tag : V6_pred_scalar2
class Hexagon_v64i1_i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v64i1_ty], [llvm_i32_ty],
       intr_properties>;

// tag : V6_pred_scalar2
class Hexagon_v128i1_i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v128i1_ty], [llvm_i32_ty],
       intr_properties>;

// tag : V6_v6mpyhubs10
class Hexagon_v32i32_v32i32v32i32i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_i32_ty],
       intr_properties>;

// tag : V6_v6mpyhubs10
class Hexagon_v64i32_v64i32v64i32i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v64i32_ty,llvm_i32_ty],
       intr_properties>;

// tag : V6_v6mpyhubs10_vxx
class Hexagon_v32i32_v32i32v32i32v32i32i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_v32i32_ty,llvm_i32_ty],
       intr_properties>;

// tag : V6_v6mpyhubs10_vxx
class Hexagon_v64i32_v64i32v64i32v64i32i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v64i32_ty,llvm_v64i32_ty,llvm_i32_ty],
       intr_properties>;

// tag : V6_vS32b_nqpred_ai
class Hexagon__v64i1ptrv16i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [], [llvm_v64i1_ty,llvm_ptr_ty,llvm_v16i32_ty],
       intr_properties>;

// tag : V6_vS32b_nqpred_ai
class Hexagon__v128i1ptrv32i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [], [llvm_v128i1_ty,llvm_ptr_ty,llvm_v32i32_ty],
       intr_properties>;

// tag : V6_vabs_hf
class Hexagon_v16i32_v16i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v16i32_ty], [llvm_v16i32_ty],
       intr_properties>;

// tag : V6_vabs_hf
class Hexagon_v32i32_v32i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v32i32_ty], [llvm_v32i32_ty],
       intr_properties>;

// tag : V6_vabsdiffh
class Hexagon_v16i32_v16i32v16i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty],
       intr_properties>;

// tag : V6_vabsdiffh
class Hexagon_v32i32_v32i32v32i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty],
       intr_properties>;

// tag : V6_vadd_sf_bf
class Hexagon_v32i32_v16i32v16i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v32i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty],
       intr_properties>;

// tag : V6_vadd_sf_bf
class Hexagon_v64i32_v32i32v32i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v64i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty],
       intr_properties>;

// tag : V6_vaddb_dv
class Hexagon_v64i32_v64i32v64i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v64i32_ty],
       intr_properties>;

// tag : V6_vaddbnq
class Hexagon_v16i32_v64i1v16i32v16i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v16i32_ty], [llvm_v64i1_ty,llvm_v16i32_ty,llvm_v16i32_ty],
       intr_properties>;

// tag : V6_vaddbnq
class Hexagon_v32i32_v128i1v32i32v32i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v32i32_ty], [llvm_v128i1_ty,llvm_v32i32_ty,llvm_v32i32_ty],
       intr_properties>;

// tag : V6_vaddcarry
class Hexagon_custom_v16i32v64i1_v16i32v16i32v64i1_Intrinsic<
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_NonGCC_Intrinsic<
       [llvm_v16i32_ty,llvm_v64i1_ty], [llvm_v16i32_ty,llvm_v16i32_ty,llvm_v64i1_ty],
       intr_properties>;

// tag : V6_vaddcarry
class Hexagon_custom_v32i32v128i1_v32i32v32i32v128i1_Intrinsic_128B<
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_NonGCC_Intrinsic<
       [llvm_v32i32_ty,llvm_v128i1_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_v128i1_ty],
       intr_properties>;

// tag : V6_vaddcarryo
class Hexagon_custom_v16i32v64i1_v16i32v16i32_Intrinsic<
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_NonGCC_Intrinsic<
       [llvm_v16i32_ty,llvm_v64i1_ty], [llvm_v16i32_ty,llvm_v16i32_ty],
       intr_properties>;

// tag : V6_vaddcarryo
class Hexagon_custom_v32i32v128i1_v32i32v32i32_Intrinsic_128B<
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_NonGCC_Intrinsic<
       [llvm_v32i32_ty,llvm_v128i1_ty], [llvm_v32i32_ty,llvm_v32i32_ty],
       intr_properties>;

// tag : V6_vaddcarrysat
class Hexagon_v16i32_v16i32v16i32v64i1_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty,llvm_v64i1_ty],
       intr_properties>;

// tag : V6_vaddcarrysat
class Hexagon_v32i32_v32i32v32i32v128i1_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_v128i1_ty],
       intr_properties>;

// tag : V6_vaddhw_acc
class Hexagon_v32i32_v32i32v16i32v16i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v16i32_ty,llvm_v16i32_ty],
       intr_properties>;

// tag : V6_vaddhw_acc
class Hexagon_v64i32_v64i32v32i32v32i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v32i32_ty,llvm_v32i32_ty],
       intr_properties>;

// tag : V6_valignb
class Hexagon_v16i32_v16i32v16i32i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty,llvm_i32_ty],
       intr_properties>;

// tag : V6_vandnqrt
class Hexagon_v16i32_v64i1i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v16i32_ty], [llvm_v64i1_ty,llvm_i32_ty],
       intr_properties>;

// tag : V6_vandnqrt
class Hexagon_v32i32_v128i1i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v32i32_ty], [llvm_v128i1_ty,llvm_i32_ty],
       intr_properties>;

// tag : V6_vandnqrt_acc
class Hexagon_v16i32_v16i32v64i1i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v64i1_ty,llvm_i32_ty],
       intr_properties>;

// tag : V6_vandnqrt_acc
class Hexagon_v32i32_v32i32v128i1i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v128i1_ty,llvm_i32_ty],
       intr_properties>;

// tag : V6_vandvnqv
class Hexagon_v16i32_v64i1v16i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v16i32_ty], [llvm_v64i1_ty,llvm_v16i32_ty],
       intr_properties>;

// tag : V6_vandvnqv
class Hexagon_v32i32_v128i1v32i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v32i32_ty], [llvm_v128i1_ty,llvm_v32i32_ty],
       intr_properties>;

// tag : V6_vandvrt
class Hexagon_v64i1_v16i32i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v64i1_ty], [llvm_v16i32_ty,llvm_i32_ty],
       intr_properties>;

// tag : V6_vandvrt
class Hexagon_v128i1_v32i32i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v128i1_ty], [llvm_v32i32_ty,llvm_i32_ty],
       intr_properties>;

// tag : V6_vandvrt_acc
class Hexagon_v64i1_v64i1v16i32i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v64i1_ty], [llvm_v64i1_ty,llvm_v16i32_ty,llvm_i32_ty],
       intr_properties>;

// tag : V6_vandvrt_acc
class Hexagon_v128i1_v128i1v32i32i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v128i1_ty], [llvm_v128i1_ty,llvm_v32i32_ty,llvm_i32_ty],
       intr_properties>;

// tag : V6_vaslh
class Hexagon_v16i32_v16i32i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_i32_ty],
       intr_properties>;

// tag : V6_vaslh
class Hexagon_v32i32_v32i32i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_i32_ty],
       intr_properties>;

// tag : V6_vasrvuhubrndsat
class Hexagon_v16i32_v32i32v16i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v16i32_ty], [llvm_v32i32_ty,llvm_v16i32_ty],
       intr_properties>;

// tag : V6_vasrvuhubrndsat
class Hexagon_v32i32_v64i32v32i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v32i32_ty], [llvm_v64i32_ty,llvm_v32i32_ty],
       intr_properties>;

// tag : V6_vassignp
class Hexagon_v64i32_v64i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v64i32_ty], [llvm_v64i32_ty],
       intr_properties>;

// tag : V6_vcvt_hf_b
class Hexagon_v32i32_v16i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v32i32_ty], [llvm_v16i32_ty],
       intr_properties>;

// tag : V6_vcvt_hf_b
class Hexagon_v64i32_v32i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v64i32_ty], [llvm_v32i32_ty],
       intr_properties>;

// tag : V6_vd0
class Hexagon_v16i32__Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v16i32_ty], [],
       intr_properties>;

// tag : V6_vd0
class Hexagon_v32i32__Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v32i32_ty], [],
       intr_properties>;

// tag : V6_vdd0
class Hexagon_v64i32__Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v64i32_ty], [],
       intr_properties>;

// tag : V6_vdealvdd
class Hexagon_v32i32_v16i32v16i32i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v32i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty,llvm_i32_ty],
       intr_properties>;

// tag : V6_vdealvdd
class Hexagon_v64i32_v32i32v32i32i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v64i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_i32_ty],
       intr_properties>;

// tag : V6_vdmpy_sf_hf_acc
class Hexagon_v16i32_v16i32v16i32v16i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty,llvm_v16i32_ty],
       intr_properties>;

// tag : V6_vdmpy_sf_hf_acc
class Hexagon_v32i32_v32i32v32i32v32i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_v32i32_ty],
       intr_properties>;

// tag : V6_vdmpybus_dv
class Hexagon_v64i32_v64i32i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_i32_ty],
       intr_properties>;

// tag : V6_vdmpyhisat
class Hexagon_v16i32_v32i32i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v16i32_ty], [llvm_v32i32_ty,llvm_i32_ty],
       intr_properties>;

// tag : V6_vdmpyhisat
class Hexagon_v32i32_v64i32i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v32i32_ty], [llvm_v64i32_ty,llvm_i32_ty],
       intr_properties>;

// tag : V6_vdmpyhisat_acc
class Hexagon_v16i32_v16i32v32i32i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v32i32_ty,llvm_i32_ty],
       intr_properties>;

// tag : V6_vdmpyhisat_acc
class Hexagon_v32i32_v32i32v64i32i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v64i32_ty,llvm_i32_ty],
       intr_properties>;

// tag : V6_veqb
class Hexagon_v64i1_v16i32v16i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v64i1_ty], [llvm_v16i32_ty,llvm_v16i32_ty],
       intr_properties>;

// tag : V6_veqb
class Hexagon_v128i1_v32i32v32i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v128i1_ty], [llvm_v32i32_ty,llvm_v32i32_ty],
       intr_properties>;

// tag : V6_veqb_and
class Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v64i1_ty], [llvm_v64i1_ty,llvm_v16i32_ty,llvm_v16i32_ty],
       intr_properties>;

// tag : V6_veqb_and
class Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v128i1_ty], [llvm_v128i1_ty,llvm_v32i32_ty,llvm_v32i32_ty],
       intr_properties>;

// tag : V6_vgathermh
class Hexagon__ptri32i32v16i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [], [llvm_ptr_ty,llvm_i32_ty,llvm_i32_ty,llvm_v16i32_ty],
       intr_properties>;

// tag : V6_vgathermh
class Hexagon__ptri32i32v32i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [], [llvm_ptr_ty,llvm_i32_ty,llvm_i32_ty,llvm_v32i32_ty],
       intr_properties>;

// tag : V6_vgathermhq
class Hexagon__ptrv64i1i32i32v16i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [], [llvm_ptr_ty,llvm_v64i1_ty,llvm_i32_ty,llvm_i32_ty,llvm_v16i32_ty],
       intr_properties>;

// tag : V6_vgathermhq
class Hexagon__ptrv128i1i32i32v32i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [], [llvm_ptr_ty,llvm_v128i1_ty,llvm_i32_ty,llvm_i32_ty,llvm_v32i32_ty],
       intr_properties>;

// tag : V6_vgathermhw
class Hexagon__ptri32i32v64i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [], [llvm_ptr_ty,llvm_i32_ty,llvm_i32_ty,llvm_v64i32_ty],
       intr_properties>;

// tag : V6_vgathermhwq
class Hexagon__ptrv64i1i32i32v32i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [], [llvm_ptr_ty,llvm_v64i1_ty,llvm_i32_ty,llvm_i32_ty,llvm_v32i32_ty],
       intr_properties>;

// tag : V6_vgathermhwq
class Hexagon__ptrv128i1i32i32v64i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [], [llvm_ptr_ty,llvm_v128i1_ty,llvm_i32_ty,llvm_i32_ty,llvm_v64i32_ty],
       intr_properties>;

// tag : V6_vlut4
class Hexagon_v16i32_v16i32i64_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_i64_ty],
       intr_properties>;

// tag : V6_vlut4
class Hexagon_v32i32_v32i32i64_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_i64_ty],
       intr_properties>;

// tag : V6_vlutvvb_oracc
class Hexagon_v16i32_v16i32v16i32v16i32i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty,llvm_v16i32_ty,llvm_i32_ty],
       intr_properties>;

// tag : V6_vlutvwh_oracc
class Hexagon_v32i32_v32i32v16i32v16i32i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v16i32_ty,llvm_v16i32_ty,llvm_i32_ty],
       intr_properties>;

// tag : V6_vlutvwh_oracc
class Hexagon_v64i32_v64i32v32i32v32i32i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v32i32_ty,llvm_v32i32_ty,llvm_i32_ty],
       intr_properties>;

// tag : V6_vmpahhsat
class Hexagon_v16i32_v16i32v16i32i64_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty,llvm_i64_ty],
       intr_properties>;

// tag : V6_vmpahhsat
class Hexagon_v32i32_v32i32v32i32i64_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_i64_ty],
       intr_properties>;

// tag : V6_vmpybus
class Hexagon_v32i32_v16i32i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v32i32_ty], [llvm_v16i32_ty,llvm_i32_ty],
       intr_properties>;

// tag : V6_vmpybus
class Hexagon_v64i32_v32i32i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v64i32_ty], [llvm_v32i32_ty,llvm_i32_ty],
       intr_properties>;

// tag : V6_vmpybus_acc
class Hexagon_v32i32_v32i32v16i32i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v16i32_ty,llvm_i32_ty],
       intr_properties>;

// tag : V6_vmpybus_acc
class Hexagon_v64i32_v64i32v32i32i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v32i32_ty,llvm_i32_ty],
       intr_properties>;

// tag : V6_vprefixqb
class Hexagon_v16i32_v64i1_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v16i32_ty], [llvm_v64i1_ty],
       intr_properties>;

// tag : V6_vprefixqb
class Hexagon_v32i32_v128i1_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v32i32_ty], [llvm_v128i1_ty],
       intr_properties>;

// tag : V6_vrmpybusi
class Hexagon_v32i32_v32i32i32i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_i32_ty,llvm_i32_ty],
       intr_properties>;

// tag : V6_vrmpybusi
class Hexagon_v64i32_v64i32i32i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_i32_ty,llvm_i32_ty],
       intr_properties>;

// tag : V6_vrmpybusi_acc
class Hexagon_v32i32_v32i32v32i32i32i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_i32_ty,llvm_i32_ty],
       intr_properties>;

// tag : V6_vrmpybusi_acc
class Hexagon_v64i32_v64i32v64i32i32i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v64i32_ty,llvm_i32_ty,llvm_i32_ty],
       intr_properties>;

// tag : V6_vscattermh
class Hexagon__i32i32v16i32v16i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [], [llvm_i32_ty,llvm_i32_ty,llvm_v16i32_ty,llvm_v16i32_ty],
       intr_properties>;

// tag : V6_vscattermh
class Hexagon__i32i32v32i32v32i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [], [llvm_i32_ty,llvm_i32_ty,llvm_v32i32_ty,llvm_v32i32_ty],
       intr_properties>;

// tag : V6_vscattermhq
class Hexagon__v64i1i32i32v16i32v16i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [], [llvm_v64i1_ty,llvm_i32_ty,llvm_i32_ty,llvm_v16i32_ty,llvm_v16i32_ty],
       intr_properties>;

// tag : V6_vscattermhq
class Hexagon__v128i1i32i32v32i32v32i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [], [llvm_v128i1_ty,llvm_i32_ty,llvm_i32_ty,llvm_v32i32_ty,llvm_v32i32_ty],
       intr_properties>;

// tag : V6_vscattermhw
class Hexagon__i32i32v32i32v16i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [], [llvm_i32_ty,llvm_i32_ty,llvm_v32i32_ty,llvm_v16i32_ty],
       intr_properties>;

// tag : V6_vscattermhw
class Hexagon__i32i32v64i32v32i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [], [llvm_i32_ty,llvm_i32_ty,llvm_v64i32_ty,llvm_v32i32_ty],
       intr_properties>;

// tag : V6_vscattermhwq
class Hexagon__v64i1i32i32v32i32v16i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [], [llvm_v64i1_ty,llvm_i32_ty,llvm_i32_ty,llvm_v32i32_ty,llvm_v16i32_ty],
       intr_properties>;

// tag : V6_vscattermhwq
class Hexagon__v128i1i32i32v64i32v32i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [], [llvm_v128i1_ty,llvm_i32_ty,llvm_i32_ty,llvm_v64i32_ty,llvm_v32i32_ty],
       intr_properties>;

// tag : V6_vswap
class Hexagon_v32i32_v64i1v16i32v16i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v32i32_ty], [llvm_v64i1_ty,llvm_v16i32_ty,llvm_v16i32_ty],
       intr_properties>;

// tag : V6_vswap
class Hexagon_v64i32_v128i1v32i32v32i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v64i32_ty], [llvm_v128i1_ty,llvm_v32i32_ty,llvm_v32i32_ty],
       intr_properties>;

// tag : V6_vunpackob
class Hexagon_v32i32_v32i32v16i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v16i32_ty],
       intr_properties>;

// tag : V6_vunpackob
class Hexagon_v64i32_v64i32v32i32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v32i32_ty],
       intr_properties>;

// tag : Y2_dccleana
class Hexagon__ptr_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [], [llvm_ptr_ty],
       intr_properties>;

// tag : Y4_l2fetch
class Hexagon__ptri32_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [], [llvm_ptr_ty,llvm_i32_ty],
       intr_properties>;

// tag : Y5_l2fetch
class Hexagon__ptri64_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [], [llvm_ptr_ty,llvm_i64_ty],
       intr_properties>;

// tag : Y6_dmlink
class Hexagon__ptrptr_Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [], [llvm_ptr_ty,llvm_ptr_ty],
       intr_properties>;

// tag : Y6_dmpause
class Hexagon_i32__Intrinsic<string GCCIntSuffix,
      list<IntrinsicProperty> intr_properties = [IntrNoMem]>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_i32_ty], [],
       intr_properties>;

// V5 Scalar Instructions.

def int_hexagon_A2_abs :
Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_abs">;

def int_hexagon_A2_absp :
Hexagon_i64_i64_Intrinsic<"HEXAGON_A2_absp">;

def int_hexagon_A2_abssat :
Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_abssat">;

def int_hexagon_A2_add :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_add">;

def int_hexagon_A2_addh_h16_hh :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_addh_h16_hh">;

def int_hexagon_A2_addh_h16_hl :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_addh_h16_hl">;

def int_hexagon_A2_addh_h16_lh :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_addh_h16_lh">;

def int_hexagon_A2_addh_h16_ll :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_addh_h16_ll">;

def int_hexagon_A2_addh_h16_sat_hh :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_addh_h16_sat_hh">;

def int_hexagon_A2_addh_h16_sat_hl :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_addh_h16_sat_hl">;

def int_hexagon_A2_addh_h16_sat_lh :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_addh_h16_sat_lh">;

def int_hexagon_A2_addh_h16_sat_ll :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_addh_h16_sat_ll">;

def int_hexagon_A2_addh_l16_hl :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_addh_l16_hl">;

def int_hexagon_A2_addh_l16_ll :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_addh_l16_ll">;

def int_hexagon_A2_addh_l16_sat_hl :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_addh_l16_sat_hl">;

def int_hexagon_A2_addh_l16_sat_ll :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_addh_l16_sat_ll">;

def int_hexagon_A2_addi :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_addi", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_A2_addp :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_addp">;

def int_hexagon_A2_addpsat :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_addpsat">;

def int_hexagon_A2_addsat :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_addsat">;

def int_hexagon_A2_addsp :
Hexagon_i64_i32i64_Intrinsic<"HEXAGON_A2_addsp">;

def int_hexagon_A2_and :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_and">;

def int_hexagon_A2_andir :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_andir", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_A2_andp :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_andp">;

def int_hexagon_A2_aslh :
Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_aslh">;

def int_hexagon_A2_asrh :
Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_asrh">;

def int_hexagon_A2_combine_hh :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_combine_hh">;

def int_hexagon_A2_combine_hl :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_combine_hl">;

def int_hexagon_A2_combine_lh :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_combine_lh">;

def int_hexagon_A2_combine_ll :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_combine_ll">;

def int_hexagon_A2_combineii :
Hexagon_i64_i32i32_Intrinsic<"HEXAGON_A2_combineii", [IntrNoMem, ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>]>;

def int_hexagon_A2_combinew :
Hexagon_i64_i32i32_Intrinsic<"HEXAGON_A2_combinew">;

def int_hexagon_A2_max :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_max">;

def int_hexagon_A2_maxp :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_maxp">;

def int_hexagon_A2_maxu :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_maxu">;

def int_hexagon_A2_maxup :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_maxup">;

def int_hexagon_A2_min :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_min">;

def int_hexagon_A2_minp :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_minp">;

def int_hexagon_A2_minu :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_minu">;

def int_hexagon_A2_minup :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_minup">;

def int_hexagon_A2_neg :
Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_neg">;

def int_hexagon_A2_negp :
Hexagon_i64_i64_Intrinsic<"HEXAGON_A2_negp">;

def int_hexagon_A2_negsat :
Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_negsat">;

def int_hexagon_A2_not :
Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_not">;

def int_hexagon_A2_notp :
Hexagon_i64_i64_Intrinsic<"HEXAGON_A2_notp">;

def int_hexagon_A2_or :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_or">;

def int_hexagon_A2_orir :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_orir", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_A2_orp :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_orp">;

def int_hexagon_A2_roundsat :
Hexagon_i32_i64_Intrinsic<"HEXAGON_A2_roundsat">;

def int_hexagon_A2_sat :
Hexagon_i32_i64_Intrinsic<"HEXAGON_A2_sat">;

def int_hexagon_A2_satb :
Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_satb">;

def int_hexagon_A2_sath :
Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_sath">;

def int_hexagon_A2_satub :
Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_satub">;

def int_hexagon_A2_satuh :
Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_satuh">;

def int_hexagon_A2_sub :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_sub">;

def int_hexagon_A2_subh_h16_hh :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_subh_h16_hh">;

def int_hexagon_A2_subh_h16_hl :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_subh_h16_hl">;

def int_hexagon_A2_subh_h16_lh :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_subh_h16_lh">;

def int_hexagon_A2_subh_h16_ll :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_subh_h16_ll">;

def int_hexagon_A2_subh_h16_sat_hh :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_subh_h16_sat_hh">;

def int_hexagon_A2_subh_h16_sat_hl :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_subh_h16_sat_hl">;

def int_hexagon_A2_subh_h16_sat_lh :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_subh_h16_sat_lh">;

def int_hexagon_A2_subh_h16_sat_ll :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_subh_h16_sat_ll">;

def int_hexagon_A2_subh_l16_hl :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_subh_l16_hl">;

def int_hexagon_A2_subh_l16_ll :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_subh_l16_ll">;

def int_hexagon_A2_subh_l16_sat_hl :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_subh_l16_sat_hl">;

def int_hexagon_A2_subh_l16_sat_ll :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_subh_l16_sat_ll">;

def int_hexagon_A2_subp :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_subp">;

def int_hexagon_A2_subri :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_subri", [IntrNoMem, ImmArg<ArgIndex<0>>]>;

def int_hexagon_A2_subsat :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_subsat">;

def int_hexagon_A2_svaddh :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_svaddh">;

def int_hexagon_A2_svaddhs :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_svaddhs">;

def int_hexagon_A2_svadduhs :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_svadduhs">;

def int_hexagon_A2_svavgh :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_svavgh">;

def int_hexagon_A2_svavghs :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_svavghs">;

def int_hexagon_A2_svnavgh :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_svnavgh">;

def int_hexagon_A2_svsubh :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_svsubh">;

def int_hexagon_A2_svsubhs :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_svsubhs">;

def int_hexagon_A2_svsubuhs :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_svsubuhs">;

def int_hexagon_A2_swiz :
Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_swiz">;

def int_hexagon_A2_sxtb :
Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_sxtb">;

def int_hexagon_A2_sxth :
Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_sxth">;

def int_hexagon_A2_sxtw :
Hexagon_i64_i32_Intrinsic<"HEXAGON_A2_sxtw">;

def int_hexagon_A2_tfr :
Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_tfr">;

def int_hexagon_A2_tfrih :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_tfrih", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_A2_tfril :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_tfril", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_A2_tfrp :
Hexagon_i64_i64_Intrinsic<"HEXAGON_A2_tfrp">;

def int_hexagon_A2_tfrpi :
Hexagon_i64_i32_Intrinsic<"HEXAGON_A2_tfrpi", [IntrNoMem, ImmArg<ArgIndex<0>>]>;

def int_hexagon_A2_tfrsi :
Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_tfrsi", [IntrNoMem, ImmArg<ArgIndex<0>>]>;

def int_hexagon_A2_vabsh :
Hexagon_i64_i64_Intrinsic<"HEXAGON_A2_vabsh">;

def int_hexagon_A2_vabshsat :
Hexagon_i64_i64_Intrinsic<"HEXAGON_A2_vabshsat">;

def int_hexagon_A2_vabsw :
Hexagon_i64_i64_Intrinsic<"HEXAGON_A2_vabsw">;

def int_hexagon_A2_vabswsat :
Hexagon_i64_i64_Intrinsic<"HEXAGON_A2_vabswsat">;

def int_hexagon_A2_vaddb_map :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vaddb_map">;

def int_hexagon_A2_vaddh :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vaddh">;

def int_hexagon_A2_vaddhs :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vaddhs">;

def int_hexagon_A2_vaddub :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vaddub">;

def int_hexagon_A2_vaddubs :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vaddubs">;

def int_hexagon_A2_vadduhs :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vadduhs">;

def int_hexagon_A2_vaddw :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vaddw">;

def int_hexagon_A2_vaddws :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vaddws">;

def int_hexagon_A2_vavgh :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vavgh">;

def int_hexagon_A2_vavghcr :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vavghcr">;

def int_hexagon_A2_vavghr :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vavghr">;

def int_hexagon_A2_vavgub :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vavgub">;

def int_hexagon_A2_vavgubr :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vavgubr">;

def int_hexagon_A2_vavguh :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vavguh">;

def int_hexagon_A2_vavguhr :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vavguhr">;

def int_hexagon_A2_vavguw :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vavguw">;

def int_hexagon_A2_vavguwr :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vavguwr">;

def int_hexagon_A2_vavgw :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vavgw">;

def int_hexagon_A2_vavgwcr :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vavgwcr">;

def int_hexagon_A2_vavgwr :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vavgwr">;

def int_hexagon_A2_vcmpbeq :
Hexagon_i32_i64i64_Intrinsic<"HEXAGON_A2_vcmpbeq">;

def int_hexagon_A2_vcmpbgtu :
Hexagon_i32_i64i64_Intrinsic<"HEXAGON_A2_vcmpbgtu">;

def int_hexagon_A2_vcmpheq :
Hexagon_i32_i64i64_Intrinsic<"HEXAGON_A2_vcmpheq">;

def int_hexagon_A2_vcmphgt :
Hexagon_i32_i64i64_Intrinsic<"HEXAGON_A2_vcmphgt">;

def int_hexagon_A2_vcmphgtu :
Hexagon_i32_i64i64_Intrinsic<"HEXAGON_A2_vcmphgtu">;

def int_hexagon_A2_vcmpweq :
Hexagon_i32_i64i64_Intrinsic<"HEXAGON_A2_vcmpweq">;

def int_hexagon_A2_vcmpwgt :
Hexagon_i32_i64i64_Intrinsic<"HEXAGON_A2_vcmpwgt">;

def int_hexagon_A2_vcmpwgtu :
Hexagon_i32_i64i64_Intrinsic<"HEXAGON_A2_vcmpwgtu">;

def int_hexagon_A2_vconj :
Hexagon_i64_i64_Intrinsic<"HEXAGON_A2_vconj">;

def int_hexagon_A2_vmaxb :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vmaxb">;

def int_hexagon_A2_vmaxh :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vmaxh">;

def int_hexagon_A2_vmaxub :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vmaxub">;

def int_hexagon_A2_vmaxuh :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vmaxuh">;

def int_hexagon_A2_vmaxuw :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vmaxuw">;

def int_hexagon_A2_vmaxw :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vmaxw">;

def int_hexagon_A2_vminb :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vminb">;

def int_hexagon_A2_vminh :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vminh">;

def int_hexagon_A2_vminub :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vminub">;

def int_hexagon_A2_vminuh :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vminuh">;

def int_hexagon_A2_vminuw :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vminuw">;

def int_hexagon_A2_vminw :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vminw">;

def int_hexagon_A2_vnavgh :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vnavgh">;

def int_hexagon_A2_vnavghcr :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vnavghcr">;

def int_hexagon_A2_vnavghr :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vnavghr">;

def int_hexagon_A2_vnavgw :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vnavgw">;

def int_hexagon_A2_vnavgwcr :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vnavgwcr">;

def int_hexagon_A2_vnavgwr :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vnavgwr">;

def int_hexagon_A2_vraddub :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vraddub">;

def int_hexagon_A2_vraddub_acc :
Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_A2_vraddub_acc">;

def int_hexagon_A2_vrsadub :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vrsadub">;

def int_hexagon_A2_vrsadub_acc :
Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_A2_vrsadub_acc">;

def int_hexagon_A2_vsubb_map :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vsubb_map">;

def int_hexagon_A2_vsubh :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vsubh">;

def int_hexagon_A2_vsubhs :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vsubhs">;

def int_hexagon_A2_vsubub :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vsubub">;

def int_hexagon_A2_vsububs :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vsububs">;

def int_hexagon_A2_vsubuhs :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vsubuhs">;

def int_hexagon_A2_vsubw :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vsubw">;

def int_hexagon_A2_vsubws :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_vsubws">;

def int_hexagon_A2_xor :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A2_xor">;

def int_hexagon_A2_xorp :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A2_xorp">;

def int_hexagon_A2_zxtb :
Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_zxtb">;

def int_hexagon_A2_zxth :
Hexagon_i32_i32_Intrinsic<"HEXAGON_A2_zxth">;

def int_hexagon_A4_andn :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_andn">;

def int_hexagon_A4_andnp :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A4_andnp">;

def int_hexagon_A4_bitsplit :
Hexagon_i64_i32i32_Intrinsic<"HEXAGON_A4_bitsplit">;

def int_hexagon_A4_bitspliti :
Hexagon_i64_i32i32_Intrinsic<"HEXAGON_A4_bitspliti", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_A4_boundscheck :
Hexagon_i32_i32i64_Intrinsic<"HEXAGON_A4_boundscheck">;

def int_hexagon_A4_cmpbeq :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_cmpbeq">;

def int_hexagon_A4_cmpbeqi :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_cmpbeqi", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_A4_cmpbgt :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_cmpbgt">;

def int_hexagon_A4_cmpbgti :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_cmpbgti", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_A4_cmpbgtu :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_cmpbgtu">;

def int_hexagon_A4_cmpbgtui :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_cmpbgtui", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_A4_cmpheq :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_cmpheq">;

def int_hexagon_A4_cmpheqi :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_cmpheqi", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_A4_cmphgt :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_cmphgt">;

def int_hexagon_A4_cmphgti :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_cmphgti", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_A4_cmphgtu :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_cmphgtu">;

def int_hexagon_A4_cmphgtui :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_cmphgtui", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_A4_combineir :
Hexagon_i64_i32i32_Intrinsic<"HEXAGON_A4_combineir", [IntrNoMem, ImmArg<ArgIndex<0>>]>;

def int_hexagon_A4_combineri :
Hexagon_i64_i32i32_Intrinsic<"HEXAGON_A4_combineri", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_A4_cround_ri :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_cround_ri", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_A4_cround_rr :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_cround_rr">;

def int_hexagon_A4_modwrapu :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_modwrapu">;

def int_hexagon_A4_orn :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_orn">;

def int_hexagon_A4_ornp :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_A4_ornp">;

def int_hexagon_A4_rcmpeq :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_rcmpeq">;

def int_hexagon_A4_rcmpeqi :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_rcmpeqi", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_A4_rcmpneq :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_rcmpneq">;

def int_hexagon_A4_rcmpneqi :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_rcmpneqi", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_A4_round_ri :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_round_ri", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_A4_round_ri_sat :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_round_ri_sat", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_A4_round_rr :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_round_rr">;

def int_hexagon_A4_round_rr_sat :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A4_round_rr_sat">;

def int_hexagon_A4_tlbmatch :
Hexagon_i32_i64i32_Intrinsic<"HEXAGON_A4_tlbmatch">;

def int_hexagon_A4_vcmpbeq_any :
Hexagon_i32_i64i64_Intrinsic<"HEXAGON_A4_vcmpbeq_any">;

def int_hexagon_A4_vcmpbeqi :
Hexagon_i32_i64i32_Intrinsic<"HEXAGON_A4_vcmpbeqi", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_A4_vcmpbgt :
Hexagon_i32_i64i64_Intrinsic<"HEXAGON_A4_vcmpbgt">;

def int_hexagon_A4_vcmpbgti :
Hexagon_i32_i64i32_Intrinsic<"HEXAGON_A4_vcmpbgti", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_A4_vcmpbgtui :
Hexagon_i32_i64i32_Intrinsic<"HEXAGON_A4_vcmpbgtui", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_A4_vcmpheqi :
Hexagon_i32_i64i32_Intrinsic<"HEXAGON_A4_vcmpheqi", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_A4_vcmphgti :
Hexagon_i32_i64i32_Intrinsic<"HEXAGON_A4_vcmphgti", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_A4_vcmphgtui :
Hexagon_i32_i64i32_Intrinsic<"HEXAGON_A4_vcmphgtui", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_A4_vcmpweqi :
Hexagon_i32_i64i32_Intrinsic<"HEXAGON_A4_vcmpweqi", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_A4_vcmpwgti :
Hexagon_i32_i64i32_Intrinsic<"HEXAGON_A4_vcmpwgti", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_A4_vcmpwgtui :
Hexagon_i32_i64i32_Intrinsic<"HEXAGON_A4_vcmpwgtui", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_A4_vrmaxh :
Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_A4_vrmaxh">;

def int_hexagon_A4_vrmaxuh :
Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_A4_vrmaxuh">;

def int_hexagon_A4_vrmaxuw :
Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_A4_vrmaxuw">;

def int_hexagon_A4_vrmaxw :
Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_A4_vrmaxw">;

def int_hexagon_A4_vrminh :
Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_A4_vrminh">;

def int_hexagon_A4_vrminuh :
Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_A4_vrminuh">;

def int_hexagon_A4_vrminuw :
Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_A4_vrminuw">;

def int_hexagon_A4_vrminw :
Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_A4_vrminw">;

def int_hexagon_A5_vaddhubs :
Hexagon_i32_i64i64_Intrinsic<"HEXAGON_A5_vaddhubs">;

def int_hexagon_C2_all8 :
Hexagon_i32_i32_Intrinsic<"HEXAGON_C2_all8">;

def int_hexagon_C2_and :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_and">;

def int_hexagon_C2_andn :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_andn">;

def int_hexagon_C2_any8 :
Hexagon_i32_i32_Intrinsic<"HEXAGON_C2_any8">;

def int_hexagon_C2_bitsclr :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_bitsclr">;

def int_hexagon_C2_bitsclri :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_bitsclri", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_C2_bitsset :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_bitsset">;

def int_hexagon_C2_cmpeq :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_cmpeq">;

def int_hexagon_C2_cmpeqi :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_cmpeqi", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_C2_cmpeqp :
Hexagon_i32_i64i64_Intrinsic<"HEXAGON_C2_cmpeqp">;

def int_hexagon_C2_cmpgei :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_cmpgei", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_C2_cmpgeui :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_cmpgeui", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_C2_cmpgt :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_cmpgt">;

def int_hexagon_C2_cmpgti :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_cmpgti", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_C2_cmpgtp :
Hexagon_i32_i64i64_Intrinsic<"HEXAGON_C2_cmpgtp">;

def int_hexagon_C2_cmpgtu :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_cmpgtu">;

def int_hexagon_C2_cmpgtui :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_cmpgtui", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_C2_cmpgtup :
Hexagon_i32_i64i64_Intrinsic<"HEXAGON_C2_cmpgtup">;

def int_hexagon_C2_cmplt :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_cmplt">;

def int_hexagon_C2_cmpltu :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_cmpltu">;

def int_hexagon_C2_mask :
Hexagon_i64_i32_Intrinsic<"HEXAGON_C2_mask">;

def int_hexagon_C2_mux :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_C2_mux">;

def int_hexagon_C2_muxii :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_C2_muxii", [IntrNoMem, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>]>;

def int_hexagon_C2_muxir :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_C2_muxir", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_C2_muxri :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_C2_muxri", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_C2_not :
Hexagon_i32_i32_Intrinsic<"HEXAGON_C2_not">;

def int_hexagon_C2_or :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_or">;

def int_hexagon_C2_orn :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_orn">;

def int_hexagon_C2_pxfer_map :
Hexagon_i32_i32_Intrinsic<"HEXAGON_C2_pxfer_map">;

def int_hexagon_C2_tfrpr :
Hexagon_i32_i32_Intrinsic<"HEXAGON_C2_tfrpr">;

def int_hexagon_C2_tfrrp :
Hexagon_i32_i32_Intrinsic<"HEXAGON_C2_tfrrp">;

def int_hexagon_C2_vitpack :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_vitpack">;

def int_hexagon_C2_vmux :
Hexagon_i64_i32i64i64_Intrinsic<"HEXAGON_C2_vmux">;

def int_hexagon_C2_xor :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C2_xor">;

def int_hexagon_C4_and_and :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_C4_and_and">;

def int_hexagon_C4_and_andn :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_C4_and_andn">;

def int_hexagon_C4_and_or :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_C4_and_or">;

def int_hexagon_C4_and_orn :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_C4_and_orn">;

def int_hexagon_C4_cmplte :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C4_cmplte">;

def int_hexagon_C4_cmpltei :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C4_cmpltei", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_C4_cmplteu :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C4_cmplteu">;

def int_hexagon_C4_cmplteui :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C4_cmplteui", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_C4_cmpneq :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C4_cmpneq">;

def int_hexagon_C4_cmpneqi :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C4_cmpneqi", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_C4_fastcorner9 :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C4_fastcorner9">;

def int_hexagon_C4_fastcorner9_not :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C4_fastcorner9_not">;

def int_hexagon_C4_nbitsclr :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C4_nbitsclr">;

def int_hexagon_C4_nbitsclri :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C4_nbitsclri", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_C4_nbitsset :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_C4_nbitsset">;

def int_hexagon_C4_or_and :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_C4_or_and">;

def int_hexagon_C4_or_andn :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_C4_or_andn">;

def int_hexagon_C4_or_or :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_C4_or_or">;

def int_hexagon_C4_or_orn :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_C4_or_orn">;

def int_hexagon_F2_conv_d2df :
Hexagon_double_i64_Intrinsic<"HEXAGON_F2_conv_d2df">;

def int_hexagon_F2_conv_d2sf :
Hexagon_float_i64_Intrinsic<"HEXAGON_F2_conv_d2sf">;

def int_hexagon_F2_conv_df2d :
Hexagon_i64_double_Intrinsic<"HEXAGON_F2_conv_df2d">;

def int_hexagon_F2_conv_df2d_chop :
Hexagon_i64_double_Intrinsic<"HEXAGON_F2_conv_df2d_chop">;

def int_hexagon_F2_conv_df2sf :
Hexagon_float_double_Intrinsic<"HEXAGON_F2_conv_df2sf">;

def int_hexagon_F2_conv_df2ud :
Hexagon_i64_double_Intrinsic<"HEXAGON_F2_conv_df2ud">;

def int_hexagon_F2_conv_df2ud_chop :
Hexagon_i64_double_Intrinsic<"HEXAGON_F2_conv_df2ud_chop">;

def int_hexagon_F2_conv_df2uw :
Hexagon_i32_double_Intrinsic<"HEXAGON_F2_conv_df2uw">;

def int_hexagon_F2_conv_df2uw_chop :
Hexagon_i32_double_Intrinsic<"HEXAGON_F2_conv_df2uw_chop">;

def int_hexagon_F2_conv_df2w :
Hexagon_i32_double_Intrinsic<"HEXAGON_F2_conv_df2w">;

def int_hexagon_F2_conv_df2w_chop :
Hexagon_i32_double_Intrinsic<"HEXAGON_F2_conv_df2w_chop">;

def int_hexagon_F2_conv_sf2d :
Hexagon_i64_float_Intrinsic<"HEXAGON_F2_conv_sf2d">;

def int_hexagon_F2_conv_sf2d_chop :
Hexagon_i64_float_Intrinsic<"HEXAGON_F2_conv_sf2d_chop">;

def int_hexagon_F2_conv_sf2df :
Hexagon_double_float_Intrinsic<"HEXAGON_F2_conv_sf2df">;

def int_hexagon_F2_conv_sf2ud :
Hexagon_i64_float_Intrinsic<"HEXAGON_F2_conv_sf2ud">;

def int_hexagon_F2_conv_sf2ud_chop :
Hexagon_i64_float_Intrinsic<"HEXAGON_F2_conv_sf2ud_chop">;

def int_hexagon_F2_conv_sf2uw :
Hexagon_i32_float_Intrinsic<"HEXAGON_F2_conv_sf2uw">;

def int_hexagon_F2_conv_sf2uw_chop :
Hexagon_i32_float_Intrinsic<"HEXAGON_F2_conv_sf2uw_chop">;

def int_hexagon_F2_conv_sf2w :
Hexagon_i32_float_Intrinsic<"HEXAGON_F2_conv_sf2w">;

def int_hexagon_F2_conv_sf2w_chop :
Hexagon_i32_float_Intrinsic<"HEXAGON_F2_conv_sf2w_chop">;

def int_hexagon_F2_conv_ud2df :
Hexagon_double_i64_Intrinsic<"HEXAGON_F2_conv_ud2df">;

def int_hexagon_F2_conv_ud2sf :
Hexagon_float_i64_Intrinsic<"HEXAGON_F2_conv_ud2sf">;

def int_hexagon_F2_conv_uw2df :
Hexagon_double_i32_Intrinsic<"HEXAGON_F2_conv_uw2df">;

def int_hexagon_F2_conv_uw2sf :
Hexagon_float_i32_Intrinsic<"HEXAGON_F2_conv_uw2sf">;

def int_hexagon_F2_conv_w2df :
Hexagon_double_i32_Intrinsic<"HEXAGON_F2_conv_w2df">;

def int_hexagon_F2_conv_w2sf :
Hexagon_float_i32_Intrinsic<"HEXAGON_F2_conv_w2sf">;

def int_hexagon_F2_dfclass :
Hexagon_i32_doublei32_Intrinsic<"HEXAGON_F2_dfclass", [IntrNoMem, Throws, ImmArg<ArgIndex<1>>]>;

def int_hexagon_F2_dfcmpeq :
Hexagon_i32_doubledouble_Intrinsic<"HEXAGON_F2_dfcmpeq", [IntrNoMem, Throws]>;

def int_hexagon_F2_dfcmpge :
Hexagon_i32_doubledouble_Intrinsic<"HEXAGON_F2_dfcmpge", [IntrNoMem, Throws]>;

def int_hexagon_F2_dfcmpgt :
Hexagon_i32_doubledouble_Intrinsic<"HEXAGON_F2_dfcmpgt", [IntrNoMem, Throws]>;

def int_hexagon_F2_dfcmpuo :
Hexagon_i32_doubledouble_Intrinsic<"HEXAGON_F2_dfcmpuo", [IntrNoMem, Throws]>;

def int_hexagon_F2_dfimm_n :
Hexagon_double_i32_Intrinsic<"HEXAGON_F2_dfimm_n", [IntrNoMem, Throws, ImmArg<ArgIndex<0>>]>;

def int_hexagon_F2_dfimm_p :
Hexagon_double_i32_Intrinsic<"HEXAGON_F2_dfimm_p", [IntrNoMem, Throws, ImmArg<ArgIndex<0>>]>;

def int_hexagon_F2_sfadd :
Hexagon_float_floatfloat_Intrinsic<"HEXAGON_F2_sfadd", [IntrNoMem, Throws]>;

def int_hexagon_F2_sfclass :
Hexagon_i32_floati32_Intrinsic<"HEXAGON_F2_sfclass", [IntrNoMem, Throws, ImmArg<ArgIndex<1>>]>;

def int_hexagon_F2_sfcmpeq :
Hexagon_i32_floatfloat_Intrinsic<"HEXAGON_F2_sfcmpeq", [IntrNoMem, Throws]>;

def int_hexagon_F2_sfcmpge :
Hexagon_i32_floatfloat_Intrinsic<"HEXAGON_F2_sfcmpge", [IntrNoMem, Throws]>;

def int_hexagon_F2_sfcmpgt :
Hexagon_i32_floatfloat_Intrinsic<"HEXAGON_F2_sfcmpgt", [IntrNoMem, Throws]>;

def int_hexagon_F2_sfcmpuo :
Hexagon_i32_floatfloat_Intrinsic<"HEXAGON_F2_sfcmpuo", [IntrNoMem, Throws]>;

def int_hexagon_F2_sffixupd :
Hexagon_float_floatfloat_Intrinsic<"HEXAGON_F2_sffixupd", [IntrNoMem, Throws]>;

def int_hexagon_F2_sffixupn :
Hexagon_float_floatfloat_Intrinsic<"HEXAGON_F2_sffixupn", [IntrNoMem, Throws]>;

def int_hexagon_F2_sffixupr :
Hexagon_float_float_Intrinsic<"HEXAGON_F2_sffixupr", [IntrNoMem, Throws]>;

def int_hexagon_F2_sffma :
Hexagon_float_floatfloatfloat_Intrinsic<"HEXAGON_F2_sffma", [IntrNoMem, Throws]>;

def int_hexagon_F2_sffma_lib :
Hexagon_float_floatfloatfloat_Intrinsic<"HEXAGON_F2_sffma_lib", [IntrNoMem, Throws]>;

def int_hexagon_F2_sffma_sc :
Hexagon_float_floatfloatfloati32_Intrinsic<"HEXAGON_F2_sffma_sc", [IntrNoMem, Throws]>;

def int_hexagon_F2_sffms :
Hexagon_float_floatfloatfloat_Intrinsic<"HEXAGON_F2_sffms", [IntrNoMem, Throws]>;

def int_hexagon_F2_sffms_lib :
Hexagon_float_floatfloatfloat_Intrinsic<"HEXAGON_F2_sffms_lib", [IntrNoMem, Throws]>;

def int_hexagon_F2_sfimm_n :
Hexagon_float_i32_Intrinsic<"HEXAGON_F2_sfimm_n", [IntrNoMem, Throws, ImmArg<ArgIndex<0>>]>;

def int_hexagon_F2_sfimm_p :
Hexagon_float_i32_Intrinsic<"HEXAGON_F2_sfimm_p", [IntrNoMem, Throws, ImmArg<ArgIndex<0>>]>;

def int_hexagon_F2_sfmax :
Hexagon_float_floatfloat_Intrinsic<"HEXAGON_F2_sfmax", [IntrNoMem, Throws]>;

def int_hexagon_F2_sfmin :
Hexagon_float_floatfloat_Intrinsic<"HEXAGON_F2_sfmin", [IntrNoMem, Throws]>;

def int_hexagon_F2_sfmpy :
Hexagon_float_floatfloat_Intrinsic<"HEXAGON_F2_sfmpy", [IntrNoMem, Throws]>;

def int_hexagon_F2_sfsub :
Hexagon_float_floatfloat_Intrinsic<"HEXAGON_F2_sfsub", [IntrNoMem, Throws]>;

def int_hexagon_M2_acci :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_acci">;

def int_hexagon_M2_accii :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_accii", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_M2_cmaci_s0 :
Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_cmaci_s0">;

def int_hexagon_M2_cmacr_s0 :
Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_cmacr_s0">;

def int_hexagon_M2_cmacs_s0 :
Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_cmacs_s0">;

def int_hexagon_M2_cmacs_s1 :
Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_cmacs_s1">;

def int_hexagon_M2_cmacsc_s0 :
Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_cmacsc_s0">;

def int_hexagon_M2_cmacsc_s1 :
Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_cmacsc_s1">;

def int_hexagon_M2_cmpyi_s0 :
Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_cmpyi_s0">;

def int_hexagon_M2_cmpyr_s0 :
Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_cmpyr_s0">;

def int_hexagon_M2_cmpyrs_s0 :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_cmpyrs_s0">;

def int_hexagon_M2_cmpyrs_s1 :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_cmpyrs_s1">;

def int_hexagon_M2_cmpyrsc_s0 :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_cmpyrsc_s0">;

def int_hexagon_M2_cmpyrsc_s1 :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_cmpyrsc_s1">;

def int_hexagon_M2_cmpys_s0 :
Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_cmpys_s0">;

def int_hexagon_M2_cmpys_s1 :
Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_cmpys_s1">;

def int_hexagon_M2_cmpysc_s0 :
Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_cmpysc_s0">;

def int_hexagon_M2_cmpysc_s1 :
Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_cmpysc_s1">;

def int_hexagon_M2_cnacs_s0 :
Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_cnacs_s0">;

def int_hexagon_M2_cnacs_s1 :
Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_cnacs_s1">;

def int_hexagon_M2_cnacsc_s0 :
Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_cnacsc_s0">;

def int_hexagon_M2_cnacsc_s1 :
Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_cnacsc_s1">;

def int_hexagon_M2_dpmpyss_acc_s0 :
Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_dpmpyss_acc_s0">;

def int_hexagon_M2_dpmpyss_nac_s0 :
Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_dpmpyss_nac_s0">;

def int_hexagon_M2_dpmpyss_rnd_s0 :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_dpmpyss_rnd_s0">;

def int_hexagon_M2_dpmpyss_s0 :
Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_dpmpyss_s0">;

def int_hexagon_M2_dpmpyuu_acc_s0 :
Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_dpmpyuu_acc_s0">;

def int_hexagon_M2_dpmpyuu_nac_s0 :
Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_dpmpyuu_nac_s0">;

def int_hexagon_M2_dpmpyuu_s0 :
Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_dpmpyuu_s0">;

def int_hexagon_M2_hmmpyh_rs1 :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_hmmpyh_rs1">;

def int_hexagon_M2_hmmpyh_s1 :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_hmmpyh_s1">;

def int_hexagon_M2_hmmpyl_rs1 :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_hmmpyl_rs1">;

def int_hexagon_M2_hmmpyl_s1 :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_hmmpyl_s1">;

def int_hexagon_M2_maci :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_maci">;

def int_hexagon_M2_macsin :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_macsin", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_M2_macsip :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_macsip", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_M2_mmachs_rs0 :
Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmachs_rs0">;

def int_hexagon_M2_mmachs_rs1 :
Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmachs_rs1">;

def int_hexagon_M2_mmachs_s0 :
Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmachs_s0">;

def int_hexagon_M2_mmachs_s1 :
Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmachs_s1">;

def int_hexagon_M2_mmacls_rs0 :
Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmacls_rs0">;

def int_hexagon_M2_mmacls_rs1 :
Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmacls_rs1">;

def int_hexagon_M2_mmacls_s0 :
Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmacls_s0">;

def int_hexagon_M2_mmacls_s1 :
Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmacls_s1">;

def int_hexagon_M2_mmacuhs_rs0 :
Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmacuhs_rs0">;

def int_hexagon_M2_mmacuhs_rs1 :
Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmacuhs_rs1">;

def int_hexagon_M2_mmacuhs_s0 :
Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmacuhs_s0">;

def int_hexagon_M2_mmacuhs_s1 :
Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmacuhs_s1">;

def int_hexagon_M2_mmaculs_rs0 :
Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmaculs_rs0">;

def int_hexagon_M2_mmaculs_rs1 :
Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmaculs_rs1">;

def int_hexagon_M2_mmaculs_s0 :
Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmaculs_s0">;

def int_hexagon_M2_mmaculs_s1 :
Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_mmaculs_s1">;

def int_hexagon_M2_mmpyh_rs0 :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyh_rs0">;

def int_hexagon_M2_mmpyh_rs1 :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyh_rs1">;

def int_hexagon_M2_mmpyh_s0 :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyh_s0">;

def int_hexagon_M2_mmpyh_s1 :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyh_s1">;

def int_hexagon_M2_mmpyl_rs0 :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyl_rs0">;

def int_hexagon_M2_mmpyl_rs1 :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyl_rs1">;

def int_hexagon_M2_mmpyl_s0 :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyl_s0">;

def int_hexagon_M2_mmpyl_s1 :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyl_s1">;

def int_hexagon_M2_mmpyuh_rs0 :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyuh_rs0">;

def int_hexagon_M2_mmpyuh_rs1 :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyuh_rs1">;

def int_hexagon_M2_mmpyuh_s0 :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyuh_s0">;

def int_hexagon_M2_mmpyuh_s1 :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyuh_s1">;

def int_hexagon_M2_mmpyul_rs0 :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyul_rs0">;

def int_hexagon_M2_mmpyul_rs1 :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyul_rs1">;

def int_hexagon_M2_mmpyul_s0 :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyul_s0">;

def int_hexagon_M2_mmpyul_s1 :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_mmpyul_s1">;

def int_hexagon_M2_mpy_acc_hh_s0 :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_hh_s0">;

def int_hexagon_M2_mpy_acc_hh_s1 :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_hh_s1">;

def int_hexagon_M2_mpy_acc_hl_s0 :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_hl_s0">;

def int_hexagon_M2_mpy_acc_hl_s1 :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_hl_s1">;

def int_hexagon_M2_mpy_acc_lh_s0 :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_lh_s0">;

def int_hexagon_M2_mpy_acc_lh_s1 :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_lh_s1">;

def int_hexagon_M2_mpy_acc_ll_s0 :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_ll_s0">;

def int_hexagon_M2_mpy_acc_ll_s1 :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_ll_s1">;

def int_hexagon_M2_mpy_acc_sat_hh_s0 :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_sat_hh_s0">;

def int_hexagon_M2_mpy_acc_sat_hh_s1 :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_sat_hh_s1">;

def int_hexagon_M2_mpy_acc_sat_hl_s0 :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_sat_hl_s0">;

def int_hexagon_M2_mpy_acc_sat_hl_s1 :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_sat_hl_s1">;

def int_hexagon_M2_mpy_acc_sat_lh_s0 :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_sat_lh_s0">;

def int_hexagon_M2_mpy_acc_sat_lh_s1 :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_sat_lh_s1">;

def int_hexagon_M2_mpy_acc_sat_ll_s0 :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_sat_ll_s0">;

def int_hexagon_M2_mpy_acc_sat_ll_s1 :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_acc_sat_ll_s1">;

def int_hexagon_M2_mpy_hh_s0 :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_hh_s0">;

def int_hexagon_M2_mpy_hh_s1 :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_hh_s1">;

def int_hexagon_M2_mpy_hl_s0 :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_hl_s0">;

def int_hexagon_M2_mpy_hl_s1 :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_hl_s1">;

def int_hexagon_M2_mpy_lh_s0 :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_lh_s0">;

def int_hexagon_M2_mpy_lh_s1 :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_lh_s1">;

def int_hexagon_M2_mpy_ll_s0 :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_ll_s0">;

def int_hexagon_M2_mpy_ll_s1 :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_ll_s1">;

def int_hexagon_M2_mpy_nac_hh_s0 :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_hh_s0">;

def int_hexagon_M2_mpy_nac_hh_s1 :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_hh_s1">;

def int_hexagon_M2_mpy_nac_hl_s0 :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_hl_s0">;

def int_hexagon_M2_mpy_nac_hl_s1 :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_hl_s1">;

def int_hexagon_M2_mpy_nac_lh_s0 :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_lh_s0">;

def int_hexagon_M2_mpy_nac_lh_s1 :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_lh_s1">;

def int_hexagon_M2_mpy_nac_ll_s0 :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_ll_s0">;

def int_hexagon_M2_mpy_nac_ll_s1 :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_ll_s1">;

def int_hexagon_M2_mpy_nac_sat_hh_s0 :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_sat_hh_s0">;

def int_hexagon_M2_mpy_nac_sat_hh_s1 :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_sat_hh_s1">;

def int_hexagon_M2_mpy_nac_sat_hl_s0 :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_sat_hl_s0">;

def int_hexagon_M2_mpy_nac_sat_hl_s1 :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_sat_hl_s1">;

def int_hexagon_M2_mpy_nac_sat_lh_s0 :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_sat_lh_s0">;

def int_hexagon_M2_mpy_nac_sat_lh_s1 :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_sat_lh_s1">;

def int_hexagon_M2_mpy_nac_sat_ll_s0 :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_sat_ll_s0">;

def int_hexagon_M2_mpy_nac_sat_ll_s1 :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpy_nac_sat_ll_s1">;

def int_hexagon_M2_mpy_rnd_hh_s0 :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_rnd_hh_s0">;

def int_hexagon_M2_mpy_rnd_hh_s1 :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_rnd_hh_s1">;

def int_hexagon_M2_mpy_rnd_hl_s0 :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_rnd_hl_s0">;

def int_hexagon_M2_mpy_rnd_hl_s1 :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_rnd_hl_s1">;

def int_hexagon_M2_mpy_rnd_lh_s0 :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_rnd_lh_s0">;

def int_hexagon_M2_mpy_rnd_lh_s1 :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_rnd_lh_s1">;

def int_hexagon_M2_mpy_rnd_ll_s0 :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_rnd_ll_s0">;

def int_hexagon_M2_mpy_rnd_ll_s1 :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_rnd_ll_s1">;

def int_hexagon_M2_mpy_sat_hh_s0 :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_hh_s0">;

def int_hexagon_M2_mpy_sat_hh_s1 :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_hh_s1">;

def int_hexagon_M2_mpy_sat_hl_s0 :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_hl_s0">;

def int_hexagon_M2_mpy_sat_hl_s1 :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_hl_s1">;

def int_hexagon_M2_mpy_sat_lh_s0 :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_lh_s0">;

def int_hexagon_M2_mpy_sat_lh_s1 :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_lh_s1">;

def int_hexagon_M2_mpy_sat_ll_s0 :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_ll_s0">;

def int_hexagon_M2_mpy_sat_ll_s1 :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_ll_s1">;

def int_hexagon_M2_mpy_sat_rnd_hh_s0 :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_hh_s0">;

def int_hexagon_M2_mpy_sat_rnd_hh_s1 :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_hh_s1">;

def int_hexagon_M2_mpy_sat_rnd_hl_s0 :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_hl_s0">;

def int_hexagon_M2_mpy_sat_rnd_hl_s1 :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_hl_s1">;

def int_hexagon_M2_mpy_sat_rnd_lh_s0 :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_lh_s0">;

def int_hexagon_M2_mpy_sat_rnd_lh_s1 :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_lh_s1">;

def int_hexagon_M2_mpy_sat_rnd_ll_s0 :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_ll_s0">;

def int_hexagon_M2_mpy_sat_rnd_ll_s1 :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_ll_s1">;

def int_hexagon_M2_mpy_up :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_up">;

def int_hexagon_M2_mpy_up_s1 :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_up_s1">;

def int_hexagon_M2_mpy_up_s1_sat :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpy_up_s1_sat">;

def int_hexagon_M2_mpyd_acc_hh_s0 :
Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_acc_hh_s0">;

def int_hexagon_M2_mpyd_acc_hh_s1 :
Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_acc_hh_s1">;

def int_hexagon_M2_mpyd_acc_hl_s0 :
Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_acc_hl_s0">;

def int_hexagon_M2_mpyd_acc_hl_s1 :
Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_acc_hl_s1">;

def int_hexagon_M2_mpyd_acc_lh_s0 :
Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_acc_lh_s0">;

def int_hexagon_M2_mpyd_acc_lh_s1 :
Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_acc_lh_s1">;

def int_hexagon_M2_mpyd_acc_ll_s0 :
Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_acc_ll_s0">;

def int_hexagon_M2_mpyd_acc_ll_s1 :
Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_acc_ll_s1">;

def int_hexagon_M2_mpyd_hh_s0 :
Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_hh_s0">;

def int_hexagon_M2_mpyd_hh_s1 :
Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_hh_s1">;

def int_hexagon_M2_mpyd_hl_s0 :
Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_hl_s0">;

def int_hexagon_M2_mpyd_hl_s1 :
Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_hl_s1">;

def int_hexagon_M2_mpyd_lh_s0 :
Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_lh_s0">;

def int_hexagon_M2_mpyd_lh_s1 :
Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_lh_s1">;

def int_hexagon_M2_mpyd_ll_s0 :
Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_ll_s0">;

def int_hexagon_M2_mpyd_ll_s1 :
Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_ll_s1">;

def int_hexagon_M2_mpyd_nac_hh_s0 :
Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_nac_hh_s0">;

def int_hexagon_M2_mpyd_nac_hh_s1 :
Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_nac_hh_s1">;

def int_hexagon_M2_mpyd_nac_hl_s0 :
Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_nac_hl_s0">;

def int_hexagon_M2_mpyd_nac_hl_s1 :
Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_nac_hl_s1">;

def int_hexagon_M2_mpyd_nac_lh_s0 :
Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_nac_lh_s0">;

def int_hexagon_M2_mpyd_nac_lh_s1 :
Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_nac_lh_s1">;

def int_hexagon_M2_mpyd_nac_ll_s0 :
Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_nac_ll_s0">;

def int_hexagon_M2_mpyd_nac_ll_s1 :
Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyd_nac_ll_s1">;

def int_hexagon_M2_mpyd_rnd_hh_s0 :
Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_rnd_hh_s0">;

def int_hexagon_M2_mpyd_rnd_hh_s1 :
Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_rnd_hh_s1">;

def int_hexagon_M2_mpyd_rnd_hl_s0 :
Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_rnd_hl_s0">;

def int_hexagon_M2_mpyd_rnd_hl_s1 :
Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_rnd_hl_s1">;

def int_hexagon_M2_mpyd_rnd_lh_s0 :
Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_rnd_lh_s0">;

def int_hexagon_M2_mpyd_rnd_lh_s1 :
Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_rnd_lh_s1">;

def int_hexagon_M2_mpyd_rnd_ll_s0 :
Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_rnd_ll_s0">;

def int_hexagon_M2_mpyd_rnd_ll_s1 :
Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyd_rnd_ll_s1">;

def int_hexagon_M2_mpyi :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpyi">;

def int_hexagon_M2_mpysmi :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpysmi", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_M2_mpysu_up :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpysu_up">;

def int_hexagon_M2_mpyu_acc_hh_s0 :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_acc_hh_s0">;

def int_hexagon_M2_mpyu_acc_hh_s1 :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_acc_hh_s1">;

def int_hexagon_M2_mpyu_acc_hl_s0 :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_acc_hl_s0">;

def int_hexagon_M2_mpyu_acc_hl_s1 :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_acc_hl_s1">;

def int_hexagon_M2_mpyu_acc_lh_s0 :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_acc_lh_s0">;

def int_hexagon_M2_mpyu_acc_lh_s1 :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_acc_lh_s1">;

def int_hexagon_M2_mpyu_acc_ll_s0 :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_acc_ll_s0">;

def int_hexagon_M2_mpyu_acc_ll_s1 :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_acc_ll_s1">;

def int_hexagon_M2_mpyu_hh_s0 :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpyu_hh_s0">;

def int_hexagon_M2_mpyu_hh_s1 :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpyu_hh_s1">;

def int_hexagon_M2_mpyu_hl_s0 :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpyu_hl_s0">;

def int_hexagon_M2_mpyu_hl_s1 :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpyu_hl_s1">;

def int_hexagon_M2_mpyu_lh_s0 :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpyu_lh_s0">;

def int_hexagon_M2_mpyu_lh_s1 :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpyu_lh_s1">;

def int_hexagon_M2_mpyu_ll_s0 :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpyu_ll_s0">;

def int_hexagon_M2_mpyu_ll_s1 :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpyu_ll_s1">;

def int_hexagon_M2_mpyu_nac_hh_s0 :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_nac_hh_s0">;

def int_hexagon_M2_mpyu_nac_hh_s1 :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_nac_hh_s1">;

def int_hexagon_M2_mpyu_nac_hl_s0 :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_nac_hl_s0">;

def int_hexagon_M2_mpyu_nac_hl_s1 :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_nac_hl_s1">;

def int_hexagon_M2_mpyu_nac_lh_s0 :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_nac_lh_s0">;

def int_hexagon_M2_mpyu_nac_lh_s1 :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_nac_lh_s1">;

def int_hexagon_M2_mpyu_nac_ll_s0 :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_nac_ll_s0">;

def int_hexagon_M2_mpyu_nac_ll_s1 :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mpyu_nac_ll_s1">;

def int_hexagon_M2_mpyu_up :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpyu_up">;

def int_hexagon_M2_mpyud_acc_hh_s0 :
Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_acc_hh_s0">;

def int_hexagon_M2_mpyud_acc_hh_s1 :
Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_acc_hh_s1">;

def int_hexagon_M2_mpyud_acc_hl_s0 :
Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_acc_hl_s0">;

def int_hexagon_M2_mpyud_acc_hl_s1 :
Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_acc_hl_s1">;

def int_hexagon_M2_mpyud_acc_lh_s0 :
Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_acc_lh_s0">;

def int_hexagon_M2_mpyud_acc_lh_s1 :
Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_acc_lh_s1">;

def int_hexagon_M2_mpyud_acc_ll_s0 :
Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_acc_ll_s0">;

def int_hexagon_M2_mpyud_acc_ll_s1 :
Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_acc_ll_s1">;

def int_hexagon_M2_mpyud_hh_s0 :
Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyud_hh_s0">;

def int_hexagon_M2_mpyud_hh_s1 :
Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyud_hh_s1">;

def int_hexagon_M2_mpyud_hl_s0 :
Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyud_hl_s0">;

def int_hexagon_M2_mpyud_hl_s1 :
Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyud_hl_s1">;

def int_hexagon_M2_mpyud_lh_s0 :
Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyud_lh_s0">;

def int_hexagon_M2_mpyud_lh_s1 :
Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyud_lh_s1">;

def int_hexagon_M2_mpyud_ll_s0 :
Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyud_ll_s0">;

def int_hexagon_M2_mpyud_ll_s1 :
Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_mpyud_ll_s1">;

def int_hexagon_M2_mpyud_nac_hh_s0 :
Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_nac_hh_s0">;

def int_hexagon_M2_mpyud_nac_hh_s1 :
Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_nac_hh_s1">;

def int_hexagon_M2_mpyud_nac_hl_s0 :
Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_nac_hl_s0">;

def int_hexagon_M2_mpyud_nac_hl_s1 :
Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_nac_hl_s1">;

def int_hexagon_M2_mpyud_nac_lh_s0 :
Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_nac_lh_s0">;

def int_hexagon_M2_mpyud_nac_lh_s1 :
Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_nac_lh_s1">;

def int_hexagon_M2_mpyud_nac_ll_s0 :
Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_nac_ll_s0">;

def int_hexagon_M2_mpyud_nac_ll_s1 :
Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_mpyud_nac_ll_s1">;

def int_hexagon_M2_mpyui :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_mpyui">;

def int_hexagon_M2_nacci :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_nacci">;

def int_hexagon_M2_naccii :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_naccii", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_M2_subacc :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_subacc">;

def int_hexagon_M2_vabsdiffh :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_vabsdiffh">;

def int_hexagon_M2_vabsdiffw :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_vabsdiffw">;

def int_hexagon_M2_vcmac_s0_sat_i :
Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_vcmac_s0_sat_i">;

def int_hexagon_M2_vcmac_s0_sat_r :
Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_vcmac_s0_sat_r">;

def int_hexagon_M2_vcmpy_s0_sat_i :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_vcmpy_s0_sat_i">;

def int_hexagon_M2_vcmpy_s0_sat_r :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_vcmpy_s0_sat_r">;

def int_hexagon_M2_vcmpy_s1_sat_i :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_vcmpy_s1_sat_i">;

def int_hexagon_M2_vcmpy_s1_sat_r :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_vcmpy_s1_sat_r">;

def int_hexagon_M2_vdmacs_s0 :
Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_vdmacs_s0">;

def int_hexagon_M2_vdmacs_s1 :
Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_vdmacs_s1">;

def int_hexagon_M2_vdmpyrs_s0 :
Hexagon_i32_i64i64_Intrinsic<"HEXAGON_M2_vdmpyrs_s0">;

def int_hexagon_M2_vdmpyrs_s1 :
Hexagon_i32_i64i64_Intrinsic<"HEXAGON_M2_vdmpyrs_s1">;

def int_hexagon_M2_vdmpys_s0 :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_vdmpys_s0">;

def int_hexagon_M2_vdmpys_s1 :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_vdmpys_s1">;

def int_hexagon_M2_vmac2 :
Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_vmac2">;

def int_hexagon_M2_vmac2es :
Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_vmac2es">;

def int_hexagon_M2_vmac2es_s0 :
Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_vmac2es_s0">;

def int_hexagon_M2_vmac2es_s1 :
Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_vmac2es_s1">;

def int_hexagon_M2_vmac2s_s0 :
Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_vmac2s_s0">;

def int_hexagon_M2_vmac2s_s1 :
Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_vmac2s_s1">;

def int_hexagon_M2_vmac2su_s0 :
Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_vmac2su_s0">;

def int_hexagon_M2_vmac2su_s1 :
Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M2_vmac2su_s1">;

def int_hexagon_M2_vmpy2es_s0 :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_vmpy2es_s0">;

def int_hexagon_M2_vmpy2es_s1 :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_vmpy2es_s1">;

def int_hexagon_M2_vmpy2s_s0 :
Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_vmpy2s_s0">;

def int_hexagon_M2_vmpy2s_s0pack :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_vmpy2s_s0pack">;

def int_hexagon_M2_vmpy2s_s1 :
Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_vmpy2s_s1">;

def int_hexagon_M2_vmpy2s_s1pack :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_M2_vmpy2s_s1pack">;

def int_hexagon_M2_vmpy2su_s0 :
Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_vmpy2su_s0">;

def int_hexagon_M2_vmpy2su_s1 :
Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M2_vmpy2su_s1">;

def int_hexagon_M2_vraddh :
Hexagon_i32_i64i64_Intrinsic<"HEXAGON_M2_vraddh">;

def int_hexagon_M2_vradduh :
Hexagon_i32_i64i64_Intrinsic<"HEXAGON_M2_vradduh">;

def int_hexagon_M2_vrcmaci_s0 :
Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_vrcmaci_s0">;

def int_hexagon_M2_vrcmaci_s0c :
Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_vrcmaci_s0c">;

def int_hexagon_M2_vrcmacr_s0 :
Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_vrcmacr_s0">;

def int_hexagon_M2_vrcmacr_s0c :
Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_vrcmacr_s0c">;

def int_hexagon_M2_vrcmpyi_s0 :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_vrcmpyi_s0">;

def int_hexagon_M2_vrcmpyi_s0c :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_vrcmpyi_s0c">;

def int_hexagon_M2_vrcmpyr_s0 :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_vrcmpyr_s0">;

def int_hexagon_M2_vrcmpyr_s0c :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_vrcmpyr_s0c">;

def int_hexagon_M2_vrcmpys_acc_s1 :
Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_M2_vrcmpys_acc_s1">;

def int_hexagon_M2_vrcmpys_s1 :
Hexagon_i64_i64i32_Intrinsic<"HEXAGON_M2_vrcmpys_s1">;

def int_hexagon_M2_vrcmpys_s1rp :
Hexagon_i32_i64i32_Intrinsic<"HEXAGON_M2_vrcmpys_s1rp">;

def int_hexagon_M2_vrmac_s0 :
Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M2_vrmac_s0">;

def int_hexagon_M2_vrmpy_s0 :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M2_vrmpy_s0">;

def int_hexagon_M2_xor_xacc :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_xor_xacc">;

def int_hexagon_M4_and_and :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_and_and">;

def int_hexagon_M4_and_andn :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_and_andn">;

def int_hexagon_M4_and_or :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_and_or">;

def int_hexagon_M4_and_xor :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_and_xor">;

def int_hexagon_M4_cmpyi_wh :
Hexagon_i32_i64i32_Intrinsic<"HEXAGON_M4_cmpyi_wh">;

def int_hexagon_M4_cmpyi_whc :
Hexagon_i32_i64i32_Intrinsic<"HEXAGON_M4_cmpyi_whc">;

def int_hexagon_M4_cmpyr_wh :
Hexagon_i32_i64i32_Intrinsic<"HEXAGON_M4_cmpyr_wh">;

def int_hexagon_M4_cmpyr_whc :
Hexagon_i32_i64i32_Intrinsic<"HEXAGON_M4_cmpyr_whc">;

def int_hexagon_M4_mac_up_s1_sat :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_mac_up_s1_sat">;

def int_hexagon_M4_mpyri_addi :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_mpyri_addi", [IntrNoMem, ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<2>>]>;

def int_hexagon_M4_mpyri_addr :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_mpyri_addr", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_M4_mpyri_addr_u2 :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_mpyri_addr_u2", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_M4_mpyrr_addi :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_mpyrr_addi", [IntrNoMem, ImmArg<ArgIndex<0>>]>;

def int_hexagon_M4_mpyrr_addr :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_mpyrr_addr">;

def int_hexagon_M4_nac_up_s1_sat :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_nac_up_s1_sat">;

def int_hexagon_M4_or_and :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_or_and">;

def int_hexagon_M4_or_andn :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_or_andn">;

def int_hexagon_M4_or_or :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_or_or">;

def int_hexagon_M4_or_xor :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_or_xor">;

def int_hexagon_M4_pmpyw :
Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M4_pmpyw">;

def int_hexagon_M4_pmpyw_acc :
Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M4_pmpyw_acc">;

def int_hexagon_M4_vpmpyh :
Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M4_vpmpyh">;

def int_hexagon_M4_vpmpyh_acc :
Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M4_vpmpyh_acc">;

def int_hexagon_M4_vrmpyeh_acc_s0 :
Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M4_vrmpyeh_acc_s0">;

def int_hexagon_M4_vrmpyeh_acc_s1 :
Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M4_vrmpyeh_acc_s1">;

def int_hexagon_M4_vrmpyeh_s0 :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M4_vrmpyeh_s0">;

def int_hexagon_M4_vrmpyeh_s1 :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M4_vrmpyeh_s1">;

def int_hexagon_M4_vrmpyoh_acc_s0 :
Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M4_vrmpyoh_acc_s0">;

def int_hexagon_M4_vrmpyoh_acc_s1 :
Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M4_vrmpyoh_acc_s1">;

def int_hexagon_M4_vrmpyoh_s0 :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M4_vrmpyoh_s0">;

def int_hexagon_M4_vrmpyoh_s1 :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M4_vrmpyoh_s1">;

def int_hexagon_M4_xor_and :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_xor_and">;

def int_hexagon_M4_xor_andn :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_xor_andn">;

def int_hexagon_M4_xor_or :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M4_xor_or">;

def int_hexagon_M4_xor_xacc :
Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M4_xor_xacc">;

def int_hexagon_M5_vdmacbsu :
Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M5_vdmacbsu">;

def int_hexagon_M5_vdmpybsu :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M5_vdmpybsu">;

def int_hexagon_M5_vmacbsu :
Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M5_vmacbsu">;

def int_hexagon_M5_vmacbuu :
Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_M5_vmacbuu">;

def int_hexagon_M5_vmpybsu :
Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M5_vmpybsu">;

def int_hexagon_M5_vmpybuu :
Hexagon_i64_i32i32_Intrinsic<"HEXAGON_M5_vmpybuu">;

def int_hexagon_M5_vrmacbsu :
Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M5_vrmacbsu">;

def int_hexagon_M5_vrmacbuu :
Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M5_vrmacbuu">;

def int_hexagon_M5_vrmpybsu :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M5_vrmpybsu">;

def int_hexagon_M5_vrmpybuu :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M5_vrmpybuu">;

def int_hexagon_S2_addasl_rrri :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_addasl_rrri", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_S2_asl_i_p :
Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_asl_i_p", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_S2_asl_i_p_acc :
Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asl_i_p_acc", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_S2_asl_i_p_and :
Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asl_i_p_and", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_S2_asl_i_p_nac :
Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asl_i_p_nac", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_S2_asl_i_p_or :
Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asl_i_p_or", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_S2_asl_i_p_xacc :
Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asl_i_p_xacc", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_S2_asl_i_r :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_asl_i_r", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_S2_asl_i_r_acc :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asl_i_r_acc", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_S2_asl_i_r_and :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asl_i_r_and", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_S2_asl_i_r_nac :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asl_i_r_nac", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_S2_asl_i_r_or :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asl_i_r_or", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_S2_asl_i_r_sat :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_asl_i_r_sat", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_S2_asl_i_r_xacc :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asl_i_r_xacc", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_S2_asl_i_vh :
Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_asl_i_vh", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_S2_asl_i_vw :
Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_asl_i_vw", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_S2_asl_r_p :
Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_asl_r_p">;

def int_hexagon_S2_asl_r_p_acc :
Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asl_r_p_acc">;

def int_hexagon_S2_asl_r_p_and :
Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asl_r_p_and">;

def int_hexagon_S2_asl_r_p_nac :
Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asl_r_p_nac">;

def int_hexagon_S2_asl_r_p_or :
Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asl_r_p_or">;

def int_hexagon_S2_asl_r_p_xor :
Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asl_r_p_xor">;

def int_hexagon_S2_asl_r_r :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_asl_r_r">;

def int_hexagon_S2_asl_r_r_acc :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asl_r_r_acc">;

def int_hexagon_S2_asl_r_r_and :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asl_r_r_and">;

def int_hexagon_S2_asl_r_r_nac :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asl_r_r_nac">;

def int_hexagon_S2_asl_r_r_or :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asl_r_r_or">;

def int_hexagon_S2_asl_r_r_sat :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_asl_r_r_sat">;

def int_hexagon_S2_asl_r_vh :
Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_asl_r_vh">;

def int_hexagon_S2_asl_r_vw :
Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_asl_r_vw">;

def int_hexagon_S2_asr_i_p :
Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_asr_i_p", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_S2_asr_i_p_acc :
Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asr_i_p_acc", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_S2_asr_i_p_and :
Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asr_i_p_and", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_S2_asr_i_p_nac :
Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asr_i_p_nac", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_S2_asr_i_p_or :
Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asr_i_p_or", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_S2_asr_i_p_rnd :
Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_asr_i_p_rnd", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_S2_asr_i_p_rnd_goodsyntax :
Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_asr_i_p_rnd_goodsyntax", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_S2_asr_i_r :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_asr_i_r", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_S2_asr_i_r_acc :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asr_i_r_acc", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_S2_asr_i_r_and :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asr_i_r_and", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_S2_asr_i_r_nac :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asr_i_r_nac", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_S2_asr_i_r_or :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asr_i_r_or", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_S2_asr_i_r_rnd :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_asr_i_r_rnd", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_S2_asr_i_r_rnd_goodsyntax :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_asr_i_r_rnd_goodsyntax", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_S2_asr_i_svw_trun :
Hexagon_i32_i64i32_Intrinsic<"HEXAGON_S2_asr_i_svw_trun", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_S2_asr_i_vh :
Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_asr_i_vh", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_S2_asr_i_vw :
Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_asr_i_vw", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_S2_asr_r_p :
Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_asr_r_p">;

def int_hexagon_S2_asr_r_p_acc :
Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asr_r_p_acc">;

def int_hexagon_S2_asr_r_p_and :
Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asr_r_p_and">;

def int_hexagon_S2_asr_r_p_nac :
Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asr_r_p_nac">;

def int_hexagon_S2_asr_r_p_or :
Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asr_r_p_or">;

def int_hexagon_S2_asr_r_p_xor :
Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_asr_r_p_xor">;

def int_hexagon_S2_asr_r_r :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_asr_r_r">;

def int_hexagon_S2_asr_r_r_acc :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asr_r_r_acc">;

def int_hexagon_S2_asr_r_r_and :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asr_r_r_and">;

def int_hexagon_S2_asr_r_r_nac :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asr_r_r_nac">;

def int_hexagon_S2_asr_r_r_or :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_asr_r_r_or">;

def int_hexagon_S2_asr_r_r_sat :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_asr_r_r_sat">;

def int_hexagon_S2_asr_r_svw_trun :
Hexagon_i32_i64i32_Intrinsic<"HEXAGON_S2_asr_r_svw_trun">;

def int_hexagon_S2_asr_r_vh :
Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_asr_r_vh">;

def int_hexagon_S2_asr_r_vw :
Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_asr_r_vw">;

def int_hexagon_S2_brev :
Hexagon_i32_i32_Intrinsic<"HEXAGON_S2_brev">;

def int_hexagon_S2_brevp :
Hexagon_i64_i64_Intrinsic<"HEXAGON_S2_brevp">;

def int_hexagon_S2_cl0 :
Hexagon_i32_i32_Intrinsic<"HEXAGON_S2_cl0">;

def int_hexagon_S2_cl0p :
Hexagon_i32_i64_Intrinsic<"HEXAGON_S2_cl0p">;

def int_hexagon_S2_cl1 :
Hexagon_i32_i32_Intrinsic<"HEXAGON_S2_cl1">;

def int_hexagon_S2_cl1p :
Hexagon_i32_i64_Intrinsic<"HEXAGON_S2_cl1p">;

def int_hexagon_S2_clb :
Hexagon_i32_i32_Intrinsic<"HEXAGON_S2_clb">;

def int_hexagon_S2_clbnorm :
Hexagon_i32_i32_Intrinsic<"HEXAGON_S2_clbnorm">;

def int_hexagon_S2_clbp :
Hexagon_i32_i64_Intrinsic<"HEXAGON_S2_clbp">;

def int_hexagon_S2_clrbit_i :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_clrbit_i", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_S2_clrbit_r :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_clrbit_r">;

def int_hexagon_S2_ct0 :
Hexagon_i32_i32_Intrinsic<"HEXAGON_S2_ct0">;

def int_hexagon_S2_ct0p :
Hexagon_i32_i64_Intrinsic<"HEXAGON_S2_ct0p">;

def int_hexagon_S2_ct1 :
Hexagon_i32_i32_Intrinsic<"HEXAGON_S2_ct1">;

def int_hexagon_S2_ct1p :
Hexagon_i32_i64_Intrinsic<"HEXAGON_S2_ct1p">;

def int_hexagon_S2_deinterleave :
Hexagon_i64_i64_Intrinsic<"HEXAGON_S2_deinterleave">;

def int_hexagon_S2_extractu :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_extractu", [IntrNoMem, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>]>;

def int_hexagon_S2_extractu_rp :
Hexagon_i32_i32i64_Intrinsic<"HEXAGON_S2_extractu_rp">;

def int_hexagon_S2_extractup :
Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_S2_extractup", [IntrNoMem, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>]>;

def int_hexagon_S2_extractup_rp :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S2_extractup_rp">;

def int_hexagon_S2_insert :
Hexagon_i32_i32i32i32i32_Intrinsic<"HEXAGON_S2_insert", [IntrNoMem, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>]>;

def int_hexagon_S2_insert_rp :
Hexagon_i32_i32i32i64_Intrinsic<"HEXAGON_S2_insert_rp">;

def int_hexagon_S2_insertp :
Hexagon_i64_i64i64i32i32_Intrinsic<"HEXAGON_S2_insertp", [IntrNoMem, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>]>;

def int_hexagon_S2_insertp_rp :
Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_S2_insertp_rp">;

def int_hexagon_S2_interleave :
Hexagon_i64_i64_Intrinsic<"HEXAGON_S2_interleave">;

def int_hexagon_S2_lfsp :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S2_lfsp">;

def int_hexagon_S2_lsl_r_p :
Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_lsl_r_p">;

def int_hexagon_S2_lsl_r_p_acc :
Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_lsl_r_p_acc">;

def int_hexagon_S2_lsl_r_p_and :
Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_lsl_r_p_and">;

def int_hexagon_S2_lsl_r_p_nac :
Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_lsl_r_p_nac">;

def int_hexagon_S2_lsl_r_p_or :
Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_lsl_r_p_or">;

def int_hexagon_S2_lsl_r_p_xor :
Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_lsl_r_p_xor">;

def int_hexagon_S2_lsl_r_r :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_lsl_r_r">;

def int_hexagon_S2_lsl_r_r_acc :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_lsl_r_r_acc">;

def int_hexagon_S2_lsl_r_r_and :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_lsl_r_r_and">;

def int_hexagon_S2_lsl_r_r_nac :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_lsl_r_r_nac">;

def int_hexagon_S2_lsl_r_r_or :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_lsl_r_r_or">;

def int_hexagon_S2_lsl_r_vh :
Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_lsl_r_vh">;

def int_hexagon_S2_lsl_r_vw :
Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_lsl_r_vw">;

def int_hexagon_S2_lsr_i_p :
Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_lsr_i_p", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_S2_lsr_i_p_acc :
Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_lsr_i_p_acc", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_S2_lsr_i_p_and :
Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_lsr_i_p_and", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_S2_lsr_i_p_nac :
Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_lsr_i_p_nac", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_S2_lsr_i_p_or :
Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_lsr_i_p_or", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_S2_lsr_i_p_xacc :
Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_lsr_i_p_xacc", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_S2_lsr_i_r :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_lsr_i_r", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_S2_lsr_i_r_acc :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_lsr_i_r_acc", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_S2_lsr_i_r_and :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_lsr_i_r_and", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_S2_lsr_i_r_nac :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_lsr_i_r_nac", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_S2_lsr_i_r_or :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_lsr_i_r_or", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_S2_lsr_i_r_xacc :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_lsr_i_r_xacc", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_S2_lsr_i_vh :
Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_lsr_i_vh", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_S2_lsr_i_vw :
Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_lsr_i_vw", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_S2_lsr_r_p :
Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_lsr_r_p">;

def int_hexagon_S2_lsr_r_p_acc :
Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_lsr_r_p_acc">;

def int_hexagon_S2_lsr_r_p_and :
Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_lsr_r_p_and">;

def int_hexagon_S2_lsr_r_p_nac :
Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_lsr_r_p_nac">;

def int_hexagon_S2_lsr_r_p_or :
Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_lsr_r_p_or">;

def int_hexagon_S2_lsr_r_p_xor :
Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_lsr_r_p_xor">;

def int_hexagon_S2_lsr_r_r :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_lsr_r_r">;

def int_hexagon_S2_lsr_r_r_acc :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_lsr_r_r_acc">;

def int_hexagon_S2_lsr_r_r_and :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_lsr_r_r_and">;

def int_hexagon_S2_lsr_r_r_nac :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_lsr_r_r_nac">;

def int_hexagon_S2_lsr_r_r_or :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S2_lsr_r_r_or">;

def int_hexagon_S2_lsr_r_vh :
Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_lsr_r_vh">;

def int_hexagon_S2_lsr_r_vw :
Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_lsr_r_vw">;

def int_hexagon_S2_packhl :
Hexagon_i64_i32i32_Intrinsic<"HEXAGON_S2_packhl">;

def int_hexagon_S2_parityp :
Hexagon_i32_i64i64_Intrinsic<"HEXAGON_S2_parityp">;

def int_hexagon_S2_setbit_i :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_setbit_i", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_S2_setbit_r :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_setbit_r">;

def int_hexagon_S2_shuffeb :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S2_shuffeb">;

def int_hexagon_S2_shuffeh :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S2_shuffeh">;

def int_hexagon_S2_shuffob :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S2_shuffob">;

def int_hexagon_S2_shuffoh :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S2_shuffoh">;

def int_hexagon_S2_svsathb :
Hexagon_i32_i32_Intrinsic<"HEXAGON_S2_svsathb">;

def int_hexagon_S2_svsathub :
Hexagon_i32_i32_Intrinsic<"HEXAGON_S2_svsathub">;

def int_hexagon_S2_tableidxb_goodsyntax :
Hexagon_i32_i32i32i32i32_Intrinsic<"HEXAGON_S2_tableidxb_goodsyntax", [IntrNoMem, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>]>;

def int_hexagon_S2_tableidxd_goodsyntax :
Hexagon_i32_i32i32i32i32_Intrinsic<"HEXAGON_S2_tableidxd_goodsyntax", [IntrNoMem, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>]>;

def int_hexagon_S2_tableidxh_goodsyntax :
Hexagon_i32_i32i32i32i32_Intrinsic<"HEXAGON_S2_tableidxh_goodsyntax", [IntrNoMem, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>]>;

def int_hexagon_S2_tableidxw_goodsyntax :
Hexagon_i32_i32i32i32i32_Intrinsic<"HEXAGON_S2_tableidxw_goodsyntax", [IntrNoMem, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>]>;

def int_hexagon_S2_togglebit_i :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_togglebit_i", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_S2_togglebit_r :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_togglebit_r">;

def int_hexagon_S2_tstbit_i :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_tstbit_i", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_S2_tstbit_r :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_tstbit_r">;

def int_hexagon_S2_valignib :
Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_valignib", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_S2_valignrb :
Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_valignrb">;

def int_hexagon_S2_vcnegh :
Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_vcnegh">;

def int_hexagon_S2_vcrotate :
Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S2_vcrotate">;

def int_hexagon_S2_vrcnegh :
Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_vrcnegh">;

def int_hexagon_S2_vrndpackwh :
Hexagon_i32_i64_Intrinsic<"HEXAGON_S2_vrndpackwh">;

def int_hexagon_S2_vrndpackwhs :
Hexagon_i32_i64_Intrinsic<"HEXAGON_S2_vrndpackwhs">;

def int_hexagon_S2_vsathb :
Hexagon_i32_i64_Intrinsic<"HEXAGON_S2_vsathb">;

def int_hexagon_S2_vsathb_nopack :
Hexagon_i64_i64_Intrinsic<"HEXAGON_S2_vsathb_nopack">;

def int_hexagon_S2_vsathub :
Hexagon_i32_i64_Intrinsic<"HEXAGON_S2_vsathub">;

def int_hexagon_S2_vsathub_nopack :
Hexagon_i64_i64_Intrinsic<"HEXAGON_S2_vsathub_nopack">;

def int_hexagon_S2_vsatwh :
Hexagon_i32_i64_Intrinsic<"HEXAGON_S2_vsatwh">;

def int_hexagon_S2_vsatwh_nopack :
Hexagon_i64_i64_Intrinsic<"HEXAGON_S2_vsatwh_nopack">;

def int_hexagon_S2_vsatwuh :
Hexagon_i32_i64_Intrinsic<"HEXAGON_S2_vsatwuh">;

def int_hexagon_S2_vsatwuh_nopack :
Hexagon_i64_i64_Intrinsic<"HEXAGON_S2_vsatwuh_nopack">;

def int_hexagon_S2_vsplatrb :
Hexagon_i32_i32_Intrinsic<"HEXAGON_S2_vsplatrb">;

def int_hexagon_S2_vsplatrh :
Hexagon_i64_i32_Intrinsic<"HEXAGON_S2_vsplatrh">;

def int_hexagon_S2_vspliceib :
Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_vspliceib", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_S2_vsplicerb :
Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S2_vsplicerb">;

def int_hexagon_S2_vsxtbh :
Hexagon_i64_i32_Intrinsic<"HEXAGON_S2_vsxtbh">;

def int_hexagon_S2_vsxthw :
Hexagon_i64_i32_Intrinsic<"HEXAGON_S2_vsxthw">;

def int_hexagon_S2_vtrunehb :
Hexagon_i32_i64_Intrinsic<"HEXAGON_S2_vtrunehb">;

def int_hexagon_S2_vtrunewh :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S2_vtrunewh">;

def int_hexagon_S2_vtrunohb :
Hexagon_i32_i64_Intrinsic<"HEXAGON_S2_vtrunohb">;

def int_hexagon_S2_vtrunowh :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S2_vtrunowh">;

def int_hexagon_S2_vzxtbh :
Hexagon_i64_i32_Intrinsic<"HEXAGON_S2_vzxtbh">;

def int_hexagon_S2_vzxthw :
Hexagon_i64_i32_Intrinsic<"HEXAGON_S2_vzxthw">;

def int_hexagon_S4_addaddi :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S4_addaddi", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_S4_addi_asl_ri :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S4_addi_asl_ri", [IntrNoMem, ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<2>>]>;

def int_hexagon_S4_addi_lsr_ri :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S4_addi_lsr_ri", [IntrNoMem, ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<2>>]>;

def int_hexagon_S4_andi_asl_ri :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S4_andi_asl_ri", [IntrNoMem, ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<2>>]>;

def int_hexagon_S4_andi_lsr_ri :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S4_andi_lsr_ri", [IntrNoMem, ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<2>>]>;

def int_hexagon_S4_clbaddi :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S4_clbaddi", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_S4_clbpaddi :
Hexagon_i32_i64i32_Intrinsic<"HEXAGON_S4_clbpaddi", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_S4_clbpnorm :
Hexagon_i32_i64_Intrinsic<"HEXAGON_S4_clbpnorm">;

def int_hexagon_S4_extract :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S4_extract", [IntrNoMem, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>]>;

def int_hexagon_S4_extract_rp :
Hexagon_i32_i32i64_Intrinsic<"HEXAGON_S4_extract_rp">;

def int_hexagon_S4_extractp :
Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_S4_extractp", [IntrNoMem, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>]>;

def int_hexagon_S4_extractp_rp :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S4_extractp_rp">;

def int_hexagon_S4_lsli :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S4_lsli", [IntrNoMem, ImmArg<ArgIndex<0>>]>;

def int_hexagon_S4_ntstbit_i :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S4_ntstbit_i", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_S4_ntstbit_r :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S4_ntstbit_r">;

def int_hexagon_S4_or_andi :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S4_or_andi", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_S4_or_andix :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S4_or_andix", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_S4_or_ori :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S4_or_ori", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_S4_ori_asl_ri :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S4_ori_asl_ri", [IntrNoMem, ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<2>>]>;

def int_hexagon_S4_ori_lsr_ri :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S4_ori_lsr_ri", [IntrNoMem, ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<2>>]>;

def int_hexagon_S4_parity :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S4_parity">;

def int_hexagon_S4_subaddi :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S4_subaddi", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_S4_subi_asl_ri :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S4_subi_asl_ri", [IntrNoMem, ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<2>>]>;

def int_hexagon_S4_subi_lsr_ri :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S4_subi_lsr_ri", [IntrNoMem, ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<2>>]>;

def int_hexagon_S4_vrcrotate :
Hexagon_i64_i64i32i32_Intrinsic<"HEXAGON_S4_vrcrotate", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_S4_vrcrotate_acc :
Hexagon_i64_i64i64i32i32_Intrinsic<"HEXAGON_S4_vrcrotate_acc", [IntrNoMem, ImmArg<ArgIndex<3>>]>;

def int_hexagon_S4_vxaddsubh :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S4_vxaddsubh">;

def int_hexagon_S4_vxaddsubhr :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S4_vxaddsubhr">;

def int_hexagon_S4_vxaddsubw :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S4_vxaddsubw">;

def int_hexagon_S4_vxsubaddh :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S4_vxsubaddh">;

def int_hexagon_S4_vxsubaddhr :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S4_vxsubaddhr">;

def int_hexagon_S4_vxsubaddw :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S4_vxsubaddw">;

def int_hexagon_S5_asrhub_rnd_sat_goodsyntax :
Hexagon_i32_i64i32_Intrinsic<"HEXAGON_S5_asrhub_rnd_sat_goodsyntax", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_S5_asrhub_sat :
Hexagon_i32_i64i32_Intrinsic<"HEXAGON_S5_asrhub_sat", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_S5_popcountp :
Hexagon_i32_i64_Intrinsic<"HEXAGON_S5_popcountp">;

def int_hexagon_S5_vasrhrnd_goodsyntax :
Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S5_vasrhrnd_goodsyntax", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_Y2_dccleana :
Hexagon__ptr_Intrinsic<"HEXAGON_Y2_dccleana", []>;

def int_hexagon_Y2_dccleaninva :
Hexagon__ptr_Intrinsic<"HEXAGON_Y2_dccleaninva", []>;

def int_hexagon_Y2_dcfetch :
Hexagon__ptr_Intrinsic<"HEXAGON_Y2_dcfetch", []>;

def int_hexagon_Y2_dcinva :
Hexagon__ptr_Intrinsic<"HEXAGON_Y2_dcinva", []>;

def int_hexagon_Y2_dczeroa :
Hexagon__ptr_Intrinsic<"HEXAGON_Y2_dczeroa", []>;

def int_hexagon_Y4_l2fetch :
Hexagon__ptri32_Intrinsic<"HEXAGON_Y4_l2fetch", []>;

def int_hexagon_Y5_l2fetch :
Hexagon__ptri64_Intrinsic<"HEXAGON_Y5_l2fetch", []>;

// V60 Scalar Instructions.

def int_hexagon_S6_rol_i_p :
Hexagon_i64_i64i32_Intrinsic<"HEXAGON_S6_rol_i_p", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_S6_rol_i_p_acc :
Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S6_rol_i_p_acc", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_S6_rol_i_p_and :
Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S6_rol_i_p_and", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_S6_rol_i_p_nac :
Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S6_rol_i_p_nac", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_S6_rol_i_p_or :
Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S6_rol_i_p_or", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_S6_rol_i_p_xacc :
Hexagon_i64_i64i64i32_Intrinsic<"HEXAGON_S6_rol_i_p_xacc", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_S6_rol_i_r :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S6_rol_i_r", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_S6_rol_i_r_acc :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S6_rol_i_r_acc", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_S6_rol_i_r_and :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S6_rol_i_r_and", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_S6_rol_i_r_nac :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S6_rol_i_r_nac", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_S6_rol_i_r_or :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S6_rol_i_r_or", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_S6_rol_i_r_xacc :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_S6_rol_i_r_xacc", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

// V62 Scalar Instructions.

def int_hexagon_M6_vabsdiffb :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M6_vabsdiffb">;

def int_hexagon_M6_vabsdiffub :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M6_vabsdiffub">;

def int_hexagon_S6_vsplatrbp :
Hexagon_i64_i32_Intrinsic<"HEXAGON_S6_vsplatrbp">;

def int_hexagon_S6_vtrunehb_ppp :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S6_vtrunehb_ppp">;

def int_hexagon_S6_vtrunohb_ppp :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_S6_vtrunohb_ppp">;

// V65 Scalar Instructions.

def int_hexagon_A6_vcmpbeq_notany :
Hexagon_i32_i64i64_Intrinsic<"HEXAGON_A6_vcmpbeq_notany">;

// V66 Scalar Instructions.

def int_hexagon_F2_dfadd :
Hexagon_double_doubledouble_Intrinsic<"HEXAGON_F2_dfadd", [IntrNoMem, Throws]>;

def int_hexagon_F2_dfsub :
Hexagon_double_doubledouble_Intrinsic<"HEXAGON_F2_dfsub", [IntrNoMem, Throws]>;

def int_hexagon_M2_mnaci :
Hexagon_i32_i32i32i32_Intrinsic<"HEXAGON_M2_mnaci">;

def int_hexagon_S2_mask :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_S2_mask", [IntrNoMem, ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>]>;

// V67 Scalar Instructions.

def int_hexagon_A7_clip :
Hexagon_i32_i32i32_Intrinsic<"HEXAGON_A7_clip", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_A7_croundd_ri :
Hexagon_i64_i64i32_Intrinsic<"HEXAGON_A7_croundd_ri", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_A7_croundd_rr :
Hexagon_i64_i64i32_Intrinsic<"HEXAGON_A7_croundd_rr">;

def int_hexagon_A7_vclip :
Hexagon_i64_i64i32_Intrinsic<"HEXAGON_A7_vclip", [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_hexagon_F2_dfmax :
Hexagon_double_doubledouble_Intrinsic<"HEXAGON_F2_dfmax", [IntrNoMem, Throws]>;

def int_hexagon_F2_dfmin :
Hexagon_double_doubledouble_Intrinsic<"HEXAGON_F2_dfmin", [IntrNoMem, Throws]>;

def int_hexagon_F2_dfmpyfix :
Hexagon_double_doubledouble_Intrinsic<"HEXAGON_F2_dfmpyfix", [IntrNoMem, Throws]>;

def int_hexagon_F2_dfmpyhh :
Hexagon_double_doubledoubledouble_Intrinsic<"HEXAGON_F2_dfmpyhh", [IntrNoMem, Throws]>;

def int_hexagon_F2_dfmpylh :
Hexagon_double_doubledoubledouble_Intrinsic<"HEXAGON_F2_dfmpylh", [IntrNoMem, Throws]>;

def int_hexagon_F2_dfmpyll :
Hexagon_double_doubledouble_Intrinsic<"HEXAGON_F2_dfmpyll", [IntrNoMem, Throws]>;

def int_hexagon_M7_dcmpyiw :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M7_dcmpyiw">;

def int_hexagon_M7_dcmpyiw_acc :
Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M7_dcmpyiw_acc">;

def int_hexagon_M7_dcmpyiwc :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M7_dcmpyiwc">;

def int_hexagon_M7_dcmpyiwc_acc :
Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M7_dcmpyiwc_acc">;

def int_hexagon_M7_dcmpyrw :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M7_dcmpyrw">;

def int_hexagon_M7_dcmpyrw_acc :
Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M7_dcmpyrw_acc">;

def int_hexagon_M7_dcmpyrwc :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M7_dcmpyrwc">;

def int_hexagon_M7_dcmpyrwc_acc :
Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M7_dcmpyrwc_acc">;

def int_hexagon_M7_vdmpy :
Hexagon_i64_i64i64_Intrinsic<"HEXAGON_M7_vdmpy">;

def int_hexagon_M7_vdmpy_acc :
Hexagon_i64_i64i64i64_Intrinsic<"HEXAGON_M7_vdmpy_acc">;

def int_hexagon_M7_wcmpyiw :
Hexagon_i32_i64i64_Intrinsic<"HEXAGON_M7_wcmpyiw">;

def int_hexagon_M7_wcmpyiw_rnd :
Hexagon_i32_i64i64_Intrinsic<"HEXAGON_M7_wcmpyiw_rnd">;

def int_hexagon_M7_wcmpyiwc :
Hexagon_i32_i64i64_Intrinsic<"HEXAGON_M7_wcmpyiwc">;

def int_hexagon_M7_wcmpyiwc_rnd :
Hexagon_i32_i64i64_Intrinsic<"HEXAGON_M7_wcmpyiwc_rnd">;

def int_hexagon_M7_wcmpyrw :
Hexagon_i32_i64i64_Intrinsic<"HEXAGON_M7_wcmpyrw">;

def int_hexagon_M7_wcmpyrw_rnd :
Hexagon_i32_i64i64_Intrinsic<"HEXAGON_M7_wcmpyrw_rnd">;

def int_hexagon_M7_wcmpyrwc :
Hexagon_i32_i64i64_Intrinsic<"HEXAGON_M7_wcmpyrwc">;

def int_hexagon_M7_wcmpyrwc_rnd :
Hexagon_i32_i64i64_Intrinsic<"HEXAGON_M7_wcmpyrwc_rnd">;

// V68 Scalar Instructions.

def int_hexagon_Y6_dmlink :
Hexagon__ptrptr_Intrinsic<"HEXAGON_Y6_dmlink", [IntrArgMemOnly, IntrHasSideEffects]>;

def int_hexagon_Y6_dmpause :
Hexagon_i32__Intrinsic<"HEXAGON_Y6_dmpause", [IntrArgMemOnly, IntrHasSideEffects]>;

def int_hexagon_Y6_dmpoll :
Hexagon_i32__Intrinsic<"HEXAGON_Y6_dmpoll", [IntrArgMemOnly, IntrHasSideEffects]>;

def int_hexagon_Y6_dmresume :
Hexagon__ptr_Intrinsic<"HEXAGON_Y6_dmresume", [IntrArgMemOnly, IntrHasSideEffects]>;

def int_hexagon_Y6_dmstart :
Hexagon__ptr_Intrinsic<"HEXAGON_Y6_dmstart", [IntrArgMemOnly, IntrHasSideEffects]>;

def int_hexagon_Y6_dmwait :
Hexagon_i32__Intrinsic<"HEXAGON_Y6_dmwait", [IntrArgMemOnly, IntrHasSideEffects]>;

// V60 HVX Instructions.

def int_hexagon_V6_extractw :
Hexagon_i32_v16i32i32_Intrinsic<"HEXAGON_V6_extractw">;

def int_hexagon_V6_extractw_128B :
Hexagon_i32_v32i32i32_Intrinsic<"HEXAGON_V6_extractw_128B">;

def int_hexagon_V6_hi :
Hexagon_v16i32_v32i32_Intrinsic<"HEXAGON_V6_hi">;

def int_hexagon_V6_hi_128B :
Hexagon_v32i32_v64i32_Intrinsic<"HEXAGON_V6_hi_128B">;

def int_hexagon_V6_lo :
Hexagon_v16i32_v32i32_Intrinsic<"HEXAGON_V6_lo">;

def int_hexagon_V6_lo_128B :
Hexagon_v32i32_v64i32_Intrinsic<"HEXAGON_V6_lo_128B">;

def int_hexagon_V6_lvsplatw :
Hexagon_v16i32_i32_Intrinsic<"HEXAGON_V6_lvsplatw">;

def int_hexagon_V6_lvsplatw_128B :
Hexagon_v32i32_i32_Intrinsic<"HEXAGON_V6_lvsplatw_128B">;

def int_hexagon_V6_pred_and :
Hexagon_v64i1_v64i1v64i1_Intrinsic<"HEXAGON_V6_pred_and">;

def int_hexagon_V6_pred_and_128B :
Hexagon_v128i1_v128i1v128i1_Intrinsic<"HEXAGON_V6_pred_and_128B">;

def int_hexagon_V6_pred_and_n :
Hexagon_v64i1_v64i1v64i1_Intrinsic<"HEXAGON_V6_pred_and_n">;

def int_hexagon_V6_pred_and_n_128B :
Hexagon_v128i1_v128i1v128i1_Intrinsic<"HEXAGON_V6_pred_and_n_128B">;

def int_hexagon_V6_pred_not :
Hexagon_v64i1_v64i1_Intrinsic<"HEXAGON_V6_pred_not">;

def int_hexagon_V6_pred_not_128B :
Hexagon_v128i1_v128i1_Intrinsic<"HEXAGON_V6_pred_not_128B">;

def int_hexagon_V6_pred_or :
Hexagon_v64i1_v64i1v64i1_Intrinsic<"HEXAGON_V6_pred_or">;

def int_hexagon_V6_pred_or_128B :
Hexagon_v128i1_v128i1v128i1_Intrinsic<"HEXAGON_V6_pred_or_128B">;

def int_hexagon_V6_pred_or_n :
Hexagon_v64i1_v64i1v64i1_Intrinsic<"HEXAGON_V6_pred_or_n">;

def int_hexagon_V6_pred_or_n_128B :
Hexagon_v128i1_v128i1v128i1_Intrinsic<"HEXAGON_V6_pred_or_n_128B">;

def int_hexagon_V6_pred_scalar2 :
Hexagon_v64i1_i32_Intrinsic<"HEXAGON_V6_pred_scalar2">;

def int_hexagon_V6_pred_scalar2_128B :
Hexagon_v128i1_i32_Intrinsic<"HEXAGON_V6_pred_scalar2_128B">;

def int_hexagon_V6_pred_xor :
Hexagon_v64i1_v64i1v64i1_Intrinsic<"HEXAGON_V6_pred_xor">;

def int_hexagon_V6_pred_xor_128B :
Hexagon_v128i1_v128i1v128i1_Intrinsic<"HEXAGON_V6_pred_xor_128B">;

def int_hexagon_V6_vS32b_nqpred_ai :
Hexagon__v64i1ptrv16i32_Intrinsic<"HEXAGON_V6_vS32b_nqpred_ai", [IntrWriteMem]>;

def int_hexagon_V6_vS32b_nqpred_ai_128B :
Hexagon__v128i1ptrv32i32_Intrinsic<"HEXAGON_V6_vS32b_nqpred_ai_128B", [IntrWriteMem]>;

def int_hexagon_V6_vS32b_nt_nqpred_ai :
Hexagon__v64i1ptrv16i32_Intrinsic<"HEXAGON_V6_vS32b_nt_nqpred_ai", [IntrWriteMem]>;

def int_hexagon_V6_vS32b_nt_nqpred_ai_128B :
Hexagon__v128i1ptrv32i32_Intrinsic<"HEXAGON_V6_vS32b_nt_nqpred_ai_128B", [IntrWriteMem]>;

def int_hexagon_V6_vS32b_nt_qpred_ai :
Hexagon__v64i1ptrv16i32_Intrinsic<"HEXAGON_V6_vS32b_nt_qpred_ai", [IntrWriteMem]>;

def int_hexagon_V6_vS32b_nt_qpred_ai_128B :
Hexagon__v128i1ptrv32i32_Intrinsic<"HEXAGON_V6_vS32b_nt_qpred_ai_128B", [IntrWriteMem]>;

def int_hexagon_V6_vS32b_qpred_ai :
Hexagon__v64i1ptrv16i32_Intrinsic<"HEXAGON_V6_vS32b_qpred_ai", [IntrWriteMem]>;

def int_hexagon_V6_vS32b_qpred_ai_128B :
Hexagon__v128i1ptrv32i32_Intrinsic<"HEXAGON_V6_vS32b_qpred_ai_128B", [IntrWriteMem]>;

def int_hexagon_V6_vabsdiffh :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vabsdiffh">;

def int_hexagon_V6_vabsdiffh_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vabsdiffh_128B">;

def int_hexagon_V6_vabsdiffub :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vabsdiffub">;

def int_hexagon_V6_vabsdiffub_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vabsdiffub_128B">;

def int_hexagon_V6_vabsdiffuh :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vabsdiffuh">;

def int_hexagon_V6_vabsdiffuh_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vabsdiffuh_128B">;

def int_hexagon_V6_vabsdiffw :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vabsdiffw">;

def int_hexagon_V6_vabsdiffw_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vabsdiffw_128B">;

def int_hexagon_V6_vabsh :
Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vabsh">;

def int_hexagon_V6_vabsh_128B :
Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vabsh_128B">;

def int_hexagon_V6_vabsh_sat :
Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vabsh_sat">;

def int_hexagon_V6_vabsh_sat_128B :
Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vabsh_sat_128B">;

def int_hexagon_V6_vabsw :
Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vabsw">;

def int_hexagon_V6_vabsw_128B :
Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vabsw_128B">;

def int_hexagon_V6_vabsw_sat :
Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vabsw_sat">;

def int_hexagon_V6_vabsw_sat_128B :
Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vabsw_sat_128B">;

def int_hexagon_V6_vaddb :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddb">;

def int_hexagon_V6_vaddb_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddb_128B">;

def int_hexagon_V6_vaddb_dv :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddb_dv">;

def int_hexagon_V6_vaddb_dv_128B :
Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vaddb_dv_128B">;

def int_hexagon_V6_vaddbnq :
Hexagon_v16i32_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddbnq">;

def int_hexagon_V6_vaddbnq_128B :
Hexagon_v32i32_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddbnq_128B">;

def int_hexagon_V6_vaddbq :
Hexagon_v16i32_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddbq">;

def int_hexagon_V6_vaddbq_128B :
Hexagon_v32i32_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddbq_128B">;

def int_hexagon_V6_vaddh :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddh">;

def int_hexagon_V6_vaddh_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddh_128B">;

def int_hexagon_V6_vaddh_dv :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddh_dv">;

def int_hexagon_V6_vaddh_dv_128B :
Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vaddh_dv_128B">;

def int_hexagon_V6_vaddhnq :
Hexagon_v16i32_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddhnq">;

def int_hexagon_V6_vaddhnq_128B :
Hexagon_v32i32_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddhnq_128B">;

def int_hexagon_V6_vaddhq :
Hexagon_v16i32_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddhq">;

def int_hexagon_V6_vaddhq_128B :
Hexagon_v32i32_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddhq_128B">;

def int_hexagon_V6_vaddhsat :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddhsat">;

def int_hexagon_V6_vaddhsat_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddhsat_128B">;

def int_hexagon_V6_vaddhsat_dv :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddhsat_dv">;

def int_hexagon_V6_vaddhsat_dv_128B :
Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vaddhsat_dv_128B">;

def int_hexagon_V6_vaddhw :
Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddhw">;

def int_hexagon_V6_vaddhw_128B :
Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddhw_128B">;

def int_hexagon_V6_vaddubh :
Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddubh">;

def int_hexagon_V6_vaddubh_128B :
Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddubh_128B">;

def int_hexagon_V6_vaddubsat :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddubsat">;

def int_hexagon_V6_vaddubsat_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddubsat_128B">;

def int_hexagon_V6_vaddubsat_dv :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddubsat_dv">;

def int_hexagon_V6_vaddubsat_dv_128B :
Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vaddubsat_dv_128B">;

def int_hexagon_V6_vadduhsat :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vadduhsat">;

def int_hexagon_V6_vadduhsat_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vadduhsat_128B">;

def int_hexagon_V6_vadduhsat_dv :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vadduhsat_dv">;

def int_hexagon_V6_vadduhsat_dv_128B :
Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vadduhsat_dv_128B">;

def int_hexagon_V6_vadduhw :
Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vadduhw">;

def int_hexagon_V6_vadduhw_128B :
Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vadduhw_128B">;

def int_hexagon_V6_vaddw :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddw">;

def int_hexagon_V6_vaddw_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddw_128B">;

def int_hexagon_V6_vaddw_dv :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddw_dv">;

def int_hexagon_V6_vaddw_dv_128B :
Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vaddw_dv_128B">;

def int_hexagon_V6_vaddwnq :
Hexagon_v16i32_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddwnq">;

def int_hexagon_V6_vaddwnq_128B :
Hexagon_v32i32_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddwnq_128B">;

def int_hexagon_V6_vaddwq :
Hexagon_v16i32_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddwq">;

def int_hexagon_V6_vaddwq_128B :
Hexagon_v32i32_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddwq_128B">;

def int_hexagon_V6_vaddwsat :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddwsat">;

def int_hexagon_V6_vaddwsat_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddwsat_128B">;

def int_hexagon_V6_vaddwsat_dv :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddwsat_dv">;

def int_hexagon_V6_vaddwsat_dv_128B :
Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vaddwsat_dv_128B">;

def int_hexagon_V6_valignb :
Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_valignb">;

def int_hexagon_V6_valignb_128B :
Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_valignb_128B">;

def int_hexagon_V6_valignbi :
Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_valignbi", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_V6_valignbi_128B :
Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_valignbi_128B", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_V6_vand :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vand">;

def int_hexagon_V6_vand_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vand_128B">;

def int_hexagon_V6_vandqrt :
Hexagon_v16i32_v64i1i32_Intrinsic<"HEXAGON_V6_vandqrt">;

def int_hexagon_V6_vandqrt_128B :
Hexagon_v32i32_v128i1i32_Intrinsic<"HEXAGON_V6_vandqrt_128B">;

def int_hexagon_V6_vandqrt_acc :
Hexagon_v16i32_v16i32v64i1i32_Intrinsic<"HEXAGON_V6_vandqrt_acc">;

def int_hexagon_V6_vandqrt_acc_128B :
Hexagon_v32i32_v32i32v128i1i32_Intrinsic<"HEXAGON_V6_vandqrt_acc_128B">;

def int_hexagon_V6_vandvrt :
Hexagon_v64i1_v16i32i32_Intrinsic<"HEXAGON_V6_vandvrt">;

def int_hexagon_V6_vandvrt_128B :
Hexagon_v128i1_v32i32i32_Intrinsic<"HEXAGON_V6_vandvrt_128B">;

def int_hexagon_V6_vandvrt_acc :
Hexagon_v64i1_v64i1v16i32i32_Intrinsic<"HEXAGON_V6_vandvrt_acc">;

def int_hexagon_V6_vandvrt_acc_128B :
Hexagon_v128i1_v128i1v32i32i32_Intrinsic<"HEXAGON_V6_vandvrt_acc_128B">;

def int_hexagon_V6_vaslh :
Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vaslh">;

def int_hexagon_V6_vaslh_128B :
Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vaslh_128B">;

def int_hexagon_V6_vaslhv :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vaslhv">;

def int_hexagon_V6_vaslhv_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaslhv_128B">;

def int_hexagon_V6_vaslw :
Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vaslw">;

def int_hexagon_V6_vaslw_128B :
Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vaslw_128B">;

def int_hexagon_V6_vaslw_acc :
Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vaslw_acc">;

def int_hexagon_V6_vaslw_acc_128B :
Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vaslw_acc_128B">;

def int_hexagon_V6_vaslwv :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vaslwv">;

def int_hexagon_V6_vaslwv_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaslwv_128B">;

def int_hexagon_V6_vasrh :
Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vasrh">;

def int_hexagon_V6_vasrh_128B :
Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vasrh_128B">;

def int_hexagon_V6_vasrhbrndsat :
Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vasrhbrndsat">;

def int_hexagon_V6_vasrhbrndsat_128B :
Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vasrhbrndsat_128B">;

def int_hexagon_V6_vasrhubrndsat :
Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vasrhubrndsat">;

def int_hexagon_V6_vasrhubrndsat_128B :
Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vasrhubrndsat_128B">;

def int_hexagon_V6_vasrhubsat :
Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vasrhubsat">;

def int_hexagon_V6_vasrhubsat_128B :
Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vasrhubsat_128B">;

def int_hexagon_V6_vasrhv :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vasrhv">;

def int_hexagon_V6_vasrhv_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vasrhv_128B">;

def int_hexagon_V6_vasrw :
Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vasrw">;

def int_hexagon_V6_vasrw_128B :
Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vasrw_128B">;

def int_hexagon_V6_vasrw_acc :
Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vasrw_acc">;

def int_hexagon_V6_vasrw_acc_128B :
Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vasrw_acc_128B">;

def int_hexagon_V6_vasrwh :
Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vasrwh">;

def int_hexagon_V6_vasrwh_128B :
Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vasrwh_128B">;

def int_hexagon_V6_vasrwhrndsat :
Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vasrwhrndsat">;

def int_hexagon_V6_vasrwhrndsat_128B :
Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vasrwhrndsat_128B">;

def int_hexagon_V6_vasrwhsat :
Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vasrwhsat">;

def int_hexagon_V6_vasrwhsat_128B :
Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vasrwhsat_128B">;

def int_hexagon_V6_vasrwuhsat :
Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vasrwuhsat">;

def int_hexagon_V6_vasrwuhsat_128B :
Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vasrwuhsat_128B">;

def int_hexagon_V6_vasrwv :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vasrwv">;

def int_hexagon_V6_vasrwv_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vasrwv_128B">;

def int_hexagon_V6_vassign :
Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vassign">;

def int_hexagon_V6_vassign_128B :
Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vassign_128B">;

def int_hexagon_V6_vassignp :
Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vassignp">;

def int_hexagon_V6_vassignp_128B :
Hexagon_v64i32_v64i32_Intrinsic<"HEXAGON_V6_vassignp_128B">;

def int_hexagon_V6_vavgh :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vavgh">;

def int_hexagon_V6_vavgh_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vavgh_128B">;

def int_hexagon_V6_vavghrnd :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vavghrnd">;

def int_hexagon_V6_vavghrnd_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vavghrnd_128B">;

def int_hexagon_V6_vavgub :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vavgub">;

def int_hexagon_V6_vavgub_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vavgub_128B">;

def int_hexagon_V6_vavgubrnd :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vavgubrnd">;

def int_hexagon_V6_vavgubrnd_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vavgubrnd_128B">;

def int_hexagon_V6_vavguh :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vavguh">;

def int_hexagon_V6_vavguh_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vavguh_128B">;

def int_hexagon_V6_vavguhrnd :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vavguhrnd">;

def int_hexagon_V6_vavguhrnd_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vavguhrnd_128B">;

def int_hexagon_V6_vavgw :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vavgw">;

def int_hexagon_V6_vavgw_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vavgw_128B">;

def int_hexagon_V6_vavgwrnd :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vavgwrnd">;

def int_hexagon_V6_vavgwrnd_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vavgwrnd_128B">;

def int_hexagon_V6_vcl0h :
Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vcl0h">;

def int_hexagon_V6_vcl0h_128B :
Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vcl0h_128B">;

def int_hexagon_V6_vcl0w :
Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vcl0w">;

def int_hexagon_V6_vcl0w_128B :
Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vcl0w_128B">;

def int_hexagon_V6_vcombine :
Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vcombine">;

def int_hexagon_V6_vcombine_128B :
Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vcombine_128B">;

def int_hexagon_V6_vd0 :
Hexagon_v16i32__Intrinsic<"HEXAGON_V6_vd0">;

def int_hexagon_V6_vd0_128B :
Hexagon_v32i32__Intrinsic<"HEXAGON_V6_vd0_128B">;

def int_hexagon_V6_vdealb :
Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vdealb">;

def int_hexagon_V6_vdealb_128B :
Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vdealb_128B">;

def int_hexagon_V6_vdealb4w :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vdealb4w">;

def int_hexagon_V6_vdealb4w_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vdealb4w_128B">;

def int_hexagon_V6_vdealh :
Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vdealh">;

def int_hexagon_V6_vdealh_128B :
Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vdealh_128B">;

def int_hexagon_V6_vdealvdd :
Hexagon_v32i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vdealvdd">;

def int_hexagon_V6_vdealvdd_128B :
Hexagon_v64i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vdealvdd_128B">;

def int_hexagon_V6_vdelta :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vdelta">;

def int_hexagon_V6_vdelta_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vdelta_128B">;

def int_hexagon_V6_vdmpybus :
Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vdmpybus">;

def int_hexagon_V6_vdmpybus_128B :
Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vdmpybus_128B">;

def int_hexagon_V6_vdmpybus_acc :
Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vdmpybus_acc">;

def int_hexagon_V6_vdmpybus_acc_128B :
Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vdmpybus_acc_128B">;

def int_hexagon_V6_vdmpybus_dv :
Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vdmpybus_dv">;

def int_hexagon_V6_vdmpybus_dv_128B :
Hexagon_v64i32_v64i32i32_Intrinsic<"HEXAGON_V6_vdmpybus_dv_128B">;

def int_hexagon_V6_vdmpybus_dv_acc :
Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vdmpybus_dv_acc">;

def int_hexagon_V6_vdmpybus_dv_acc_128B :
Hexagon_v64i32_v64i32v64i32i32_Intrinsic<"HEXAGON_V6_vdmpybus_dv_acc_128B">;

def int_hexagon_V6_vdmpyhb :
Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vdmpyhb">;

def int_hexagon_V6_vdmpyhb_128B :
Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vdmpyhb_128B">;

def int_hexagon_V6_vdmpyhb_acc :
Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vdmpyhb_acc">;

def int_hexagon_V6_vdmpyhb_acc_128B :
Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vdmpyhb_acc_128B">;

def int_hexagon_V6_vdmpyhb_dv :
Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vdmpyhb_dv">;

def int_hexagon_V6_vdmpyhb_dv_128B :
Hexagon_v64i32_v64i32i32_Intrinsic<"HEXAGON_V6_vdmpyhb_dv_128B">;

def int_hexagon_V6_vdmpyhb_dv_acc :
Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vdmpyhb_dv_acc">;

def int_hexagon_V6_vdmpyhb_dv_acc_128B :
Hexagon_v64i32_v64i32v64i32i32_Intrinsic<"HEXAGON_V6_vdmpyhb_dv_acc_128B">;

def int_hexagon_V6_vdmpyhisat :
Hexagon_v16i32_v32i32i32_Intrinsic<"HEXAGON_V6_vdmpyhisat">;

def int_hexagon_V6_vdmpyhisat_128B :
Hexagon_v32i32_v64i32i32_Intrinsic<"HEXAGON_V6_vdmpyhisat_128B">;

def int_hexagon_V6_vdmpyhisat_acc :
Hexagon_v16i32_v16i32v32i32i32_Intrinsic<"HEXAGON_V6_vdmpyhisat_acc">;

def int_hexagon_V6_vdmpyhisat_acc_128B :
Hexagon_v32i32_v32i32v64i32i32_Intrinsic<"HEXAGON_V6_vdmpyhisat_acc_128B">;

def int_hexagon_V6_vdmpyhsat :
Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vdmpyhsat">;

def int_hexagon_V6_vdmpyhsat_128B :
Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vdmpyhsat_128B">;

def int_hexagon_V6_vdmpyhsat_acc :
Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vdmpyhsat_acc">;

def int_hexagon_V6_vdmpyhsat_acc_128B :
Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vdmpyhsat_acc_128B">;

def int_hexagon_V6_vdmpyhsuisat :
Hexagon_v16i32_v32i32i32_Intrinsic<"HEXAGON_V6_vdmpyhsuisat">;

def int_hexagon_V6_vdmpyhsuisat_128B :
Hexagon_v32i32_v64i32i32_Intrinsic<"HEXAGON_V6_vdmpyhsuisat_128B">;

def int_hexagon_V6_vdmpyhsuisat_acc :
Hexagon_v16i32_v16i32v32i32i32_Intrinsic<"HEXAGON_V6_vdmpyhsuisat_acc">;

def int_hexagon_V6_vdmpyhsuisat_acc_128B :
Hexagon_v32i32_v32i32v64i32i32_Intrinsic<"HEXAGON_V6_vdmpyhsuisat_acc_128B">;

def int_hexagon_V6_vdmpyhsusat :
Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vdmpyhsusat">;

def int_hexagon_V6_vdmpyhsusat_128B :
Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vdmpyhsusat_128B">;

def int_hexagon_V6_vdmpyhsusat_acc :
Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vdmpyhsusat_acc">;

def int_hexagon_V6_vdmpyhsusat_acc_128B :
Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vdmpyhsusat_acc_128B">;

def int_hexagon_V6_vdmpyhvsat :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vdmpyhvsat">;

def int_hexagon_V6_vdmpyhvsat_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vdmpyhvsat_128B">;

def int_hexagon_V6_vdmpyhvsat_acc :
Hexagon_v16i32_v16i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vdmpyhvsat_acc">;

def int_hexagon_V6_vdmpyhvsat_acc_128B :
Hexagon_v32i32_v32i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vdmpyhvsat_acc_128B">;

def int_hexagon_V6_vdsaduh :
Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vdsaduh">;

def int_hexagon_V6_vdsaduh_128B :
Hexagon_v64i32_v64i32i32_Intrinsic<"HEXAGON_V6_vdsaduh_128B">;

def int_hexagon_V6_vdsaduh_acc :
Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vdsaduh_acc">;

def int_hexagon_V6_vdsaduh_acc_128B :
Hexagon_v64i32_v64i32v64i32i32_Intrinsic<"HEXAGON_V6_vdsaduh_acc_128B">;

def int_hexagon_V6_veqb :
Hexagon_v64i1_v16i32v16i32_Intrinsic<"HEXAGON_V6_veqb">;

def int_hexagon_V6_veqb_128B :
Hexagon_v128i1_v32i32v32i32_Intrinsic<"HEXAGON_V6_veqb_128B">;

def int_hexagon_V6_veqb_and :
Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_veqb_and">;

def int_hexagon_V6_veqb_and_128B :
Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_veqb_and_128B">;

def int_hexagon_V6_veqb_or :
Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_veqb_or">;

def int_hexagon_V6_veqb_or_128B :
Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_veqb_or_128B">;

def int_hexagon_V6_veqb_xor :
Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_veqb_xor">;

def int_hexagon_V6_veqb_xor_128B :
Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_veqb_xor_128B">;

def int_hexagon_V6_veqh :
Hexagon_v64i1_v16i32v16i32_Intrinsic<"HEXAGON_V6_veqh">;

def int_hexagon_V6_veqh_128B :
Hexagon_v128i1_v32i32v32i32_Intrinsic<"HEXAGON_V6_veqh_128B">;

def int_hexagon_V6_veqh_and :
Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_veqh_and">;

def int_hexagon_V6_veqh_and_128B :
Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_veqh_and_128B">;

def int_hexagon_V6_veqh_or :
Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_veqh_or">;

def int_hexagon_V6_veqh_or_128B :
Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_veqh_or_128B">;

def int_hexagon_V6_veqh_xor :
Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_veqh_xor">;

def int_hexagon_V6_veqh_xor_128B :
Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_veqh_xor_128B">;

def int_hexagon_V6_veqw :
Hexagon_v64i1_v16i32v16i32_Intrinsic<"HEXAGON_V6_veqw">;

def int_hexagon_V6_veqw_128B :
Hexagon_v128i1_v32i32v32i32_Intrinsic<"HEXAGON_V6_veqw_128B">;

def int_hexagon_V6_veqw_and :
Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_veqw_and">;

def int_hexagon_V6_veqw_and_128B :
Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_veqw_and_128B">;

def int_hexagon_V6_veqw_or :
Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_veqw_or">;

def int_hexagon_V6_veqw_or_128B :
Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_veqw_or_128B">;

def int_hexagon_V6_veqw_xor :
Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_veqw_xor">;

def int_hexagon_V6_veqw_xor_128B :
Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_veqw_xor_128B">;

def int_hexagon_V6_vgtb :
Hexagon_v64i1_v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtb">;

def int_hexagon_V6_vgtb_128B :
Hexagon_v128i1_v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtb_128B">;

def int_hexagon_V6_vgtb_and :
Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtb_and">;

def int_hexagon_V6_vgtb_and_128B :
Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtb_and_128B">;

def int_hexagon_V6_vgtb_or :
Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtb_or">;

def int_hexagon_V6_vgtb_or_128B :
Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtb_or_128B">;

def int_hexagon_V6_vgtb_xor :
Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtb_xor">;

def int_hexagon_V6_vgtb_xor_128B :
Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtb_xor_128B">;

def int_hexagon_V6_vgth :
Hexagon_v64i1_v16i32v16i32_Intrinsic<"HEXAGON_V6_vgth">;

def int_hexagon_V6_vgth_128B :
Hexagon_v128i1_v32i32v32i32_Intrinsic<"HEXAGON_V6_vgth_128B">;

def int_hexagon_V6_vgth_and :
Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgth_and">;

def int_hexagon_V6_vgth_and_128B :
Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgth_and_128B">;

def int_hexagon_V6_vgth_or :
Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgth_or">;

def int_hexagon_V6_vgth_or_128B :
Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgth_or_128B">;

def int_hexagon_V6_vgth_xor :
Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgth_xor">;

def int_hexagon_V6_vgth_xor_128B :
Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgth_xor_128B">;

def int_hexagon_V6_vgtub :
Hexagon_v64i1_v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtub">;

def int_hexagon_V6_vgtub_128B :
Hexagon_v128i1_v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtub_128B">;

def int_hexagon_V6_vgtub_and :
Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtub_and">;

def int_hexagon_V6_vgtub_and_128B :
Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtub_and_128B">;

def int_hexagon_V6_vgtub_or :
Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtub_or">;

def int_hexagon_V6_vgtub_or_128B :
Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtub_or_128B">;

def int_hexagon_V6_vgtub_xor :
Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtub_xor">;

def int_hexagon_V6_vgtub_xor_128B :
Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtub_xor_128B">;

def int_hexagon_V6_vgtuh :
Hexagon_v64i1_v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtuh">;

def int_hexagon_V6_vgtuh_128B :
Hexagon_v128i1_v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtuh_128B">;

def int_hexagon_V6_vgtuh_and :
Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtuh_and">;

def int_hexagon_V6_vgtuh_and_128B :
Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtuh_and_128B">;

def int_hexagon_V6_vgtuh_or :
Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtuh_or">;

def int_hexagon_V6_vgtuh_or_128B :
Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtuh_or_128B">;

def int_hexagon_V6_vgtuh_xor :
Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtuh_xor">;

def int_hexagon_V6_vgtuh_xor_128B :
Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtuh_xor_128B">;

def int_hexagon_V6_vgtuw :
Hexagon_v64i1_v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtuw">;

def int_hexagon_V6_vgtuw_128B :
Hexagon_v128i1_v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtuw_128B">;

def int_hexagon_V6_vgtuw_and :
Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtuw_and">;

def int_hexagon_V6_vgtuw_and_128B :
Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtuw_and_128B">;

def int_hexagon_V6_vgtuw_or :
Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtuw_or">;

def int_hexagon_V6_vgtuw_or_128B :
Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtuw_or_128B">;

def int_hexagon_V6_vgtuw_xor :
Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtuw_xor">;

def int_hexagon_V6_vgtuw_xor_128B :
Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtuw_xor_128B">;

def int_hexagon_V6_vgtw :
Hexagon_v64i1_v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtw">;

def int_hexagon_V6_vgtw_128B :
Hexagon_v128i1_v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtw_128B">;

def int_hexagon_V6_vgtw_and :
Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtw_and">;

def int_hexagon_V6_vgtw_and_128B :
Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtw_and_128B">;

def int_hexagon_V6_vgtw_or :
Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtw_or">;

def int_hexagon_V6_vgtw_or_128B :
Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtw_or_128B">;

def int_hexagon_V6_vgtw_xor :
Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtw_xor">;

def int_hexagon_V6_vgtw_xor_128B :
Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtw_xor_128B">;

def int_hexagon_V6_vinsertwr :
Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vinsertwr">;

def int_hexagon_V6_vinsertwr_128B :
Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vinsertwr_128B">;

def int_hexagon_V6_vlalignb :
Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vlalignb">;

def int_hexagon_V6_vlalignb_128B :
Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vlalignb_128B">;

def int_hexagon_V6_vlalignbi :
Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vlalignbi", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_V6_vlalignbi_128B :
Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vlalignbi_128B", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_V6_vlsrh :
Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vlsrh">;

def int_hexagon_V6_vlsrh_128B :
Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vlsrh_128B">;

def int_hexagon_V6_vlsrhv :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vlsrhv">;

def int_hexagon_V6_vlsrhv_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vlsrhv_128B">;

def int_hexagon_V6_vlsrw :
Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vlsrw">;

def int_hexagon_V6_vlsrw_128B :
Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vlsrw_128B">;

def int_hexagon_V6_vlsrwv :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vlsrwv">;

def int_hexagon_V6_vlsrwv_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vlsrwv_128B">;

def int_hexagon_V6_vlutvvb :
Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vlutvvb">;

def int_hexagon_V6_vlutvvb_128B :
Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vlutvvb_128B">;

def int_hexagon_V6_vlutvvb_oracc :
Hexagon_v16i32_v16i32v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vlutvvb_oracc">;

def int_hexagon_V6_vlutvvb_oracc_128B :
Hexagon_v32i32_v32i32v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vlutvvb_oracc_128B">;

def int_hexagon_V6_vlutvwh :
Hexagon_v32i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vlutvwh">;

def int_hexagon_V6_vlutvwh_128B :
Hexagon_v64i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vlutvwh_128B">;

def int_hexagon_V6_vlutvwh_oracc :
Hexagon_v32i32_v32i32v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vlutvwh_oracc">;

def int_hexagon_V6_vlutvwh_oracc_128B :
Hexagon_v64i32_v64i32v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vlutvwh_oracc_128B">;

def int_hexagon_V6_vmaxh :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmaxh">;

def int_hexagon_V6_vmaxh_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmaxh_128B">;

def int_hexagon_V6_vmaxub :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmaxub">;

def int_hexagon_V6_vmaxub_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmaxub_128B">;

def int_hexagon_V6_vmaxuh :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmaxuh">;

def int_hexagon_V6_vmaxuh_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmaxuh_128B">;

def int_hexagon_V6_vmaxw :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmaxw">;

def int_hexagon_V6_vmaxw_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmaxw_128B">;

def int_hexagon_V6_vminh :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vminh">;

def int_hexagon_V6_vminh_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vminh_128B">;

def int_hexagon_V6_vminub :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vminub">;

def int_hexagon_V6_vminub_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vminub_128B">;

def int_hexagon_V6_vminuh :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vminuh">;

def int_hexagon_V6_vminuh_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vminuh_128B">;

def int_hexagon_V6_vminw :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vminw">;

def int_hexagon_V6_vminw_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vminw_128B">;

def int_hexagon_V6_vmpabus :
Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vmpabus">;

def int_hexagon_V6_vmpabus_128B :
Hexagon_v64i32_v64i32i32_Intrinsic<"HEXAGON_V6_vmpabus_128B">;

def int_hexagon_V6_vmpabus_acc :
Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vmpabus_acc">;

def int_hexagon_V6_vmpabus_acc_128B :
Hexagon_v64i32_v64i32v64i32i32_Intrinsic<"HEXAGON_V6_vmpabus_acc_128B">;

def int_hexagon_V6_vmpabusv :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpabusv">;

def int_hexagon_V6_vmpabusv_128B :
Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vmpabusv_128B">;

def int_hexagon_V6_vmpabuuv :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpabuuv">;

def int_hexagon_V6_vmpabuuv_128B :
Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vmpabuuv_128B">;

def int_hexagon_V6_vmpahb :
Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vmpahb">;

def int_hexagon_V6_vmpahb_128B :
Hexagon_v64i32_v64i32i32_Intrinsic<"HEXAGON_V6_vmpahb_128B">;

def int_hexagon_V6_vmpahb_acc :
Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vmpahb_acc">;

def int_hexagon_V6_vmpahb_acc_128B :
Hexagon_v64i32_v64i32v64i32i32_Intrinsic<"HEXAGON_V6_vmpahb_acc_128B">;

def int_hexagon_V6_vmpybus :
Hexagon_v32i32_v16i32i32_Intrinsic<"HEXAGON_V6_vmpybus">;

def int_hexagon_V6_vmpybus_128B :
Hexagon_v64i32_v32i32i32_Intrinsic<"HEXAGON_V6_vmpybus_128B">;

def int_hexagon_V6_vmpybus_acc :
Hexagon_v32i32_v32i32v16i32i32_Intrinsic<"HEXAGON_V6_vmpybus_acc">;

def int_hexagon_V6_vmpybus_acc_128B :
Hexagon_v64i32_v64i32v32i32i32_Intrinsic<"HEXAGON_V6_vmpybus_acc_128B">;

def int_hexagon_V6_vmpybusv :
Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpybusv">;

def int_hexagon_V6_vmpybusv_128B :
Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpybusv_128B">;

def int_hexagon_V6_vmpybusv_acc :
Hexagon_v32i32_v32i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpybusv_acc">;

def int_hexagon_V6_vmpybusv_acc_128B :
Hexagon_v64i32_v64i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpybusv_acc_128B">;

def int_hexagon_V6_vmpybv :
Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpybv">;

def int_hexagon_V6_vmpybv_128B :
Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpybv_128B">;

def int_hexagon_V6_vmpybv_acc :
Hexagon_v32i32_v32i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpybv_acc">;

def int_hexagon_V6_vmpybv_acc_128B :
Hexagon_v64i32_v64i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpybv_acc_128B">;

def int_hexagon_V6_vmpyewuh :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyewuh">;

def int_hexagon_V6_vmpyewuh_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyewuh_128B">;

def int_hexagon_V6_vmpyh :
Hexagon_v32i32_v16i32i32_Intrinsic<"HEXAGON_V6_vmpyh">;

def int_hexagon_V6_vmpyh_128B :
Hexagon_v64i32_v32i32i32_Intrinsic<"HEXAGON_V6_vmpyh_128B">;

def int_hexagon_V6_vmpyhsat_acc :
Hexagon_v32i32_v32i32v16i32i32_Intrinsic<"HEXAGON_V6_vmpyhsat_acc">;

def int_hexagon_V6_vmpyhsat_acc_128B :
Hexagon_v64i32_v64i32v32i32i32_Intrinsic<"HEXAGON_V6_vmpyhsat_acc_128B">;

def int_hexagon_V6_vmpyhsrs :
Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vmpyhsrs">;

def int_hexagon_V6_vmpyhsrs_128B :
Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vmpyhsrs_128B">;

def int_hexagon_V6_vmpyhss :
Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vmpyhss">;

def int_hexagon_V6_vmpyhss_128B :
Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vmpyhss_128B">;

def int_hexagon_V6_vmpyhus :
Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyhus">;

def int_hexagon_V6_vmpyhus_128B :
Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyhus_128B">;

def int_hexagon_V6_vmpyhus_acc :
Hexagon_v32i32_v32i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyhus_acc">;

def int_hexagon_V6_vmpyhus_acc_128B :
Hexagon_v64i32_v64i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyhus_acc_128B">;

def int_hexagon_V6_vmpyhv :
Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyhv">;

def int_hexagon_V6_vmpyhv_128B :
Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyhv_128B">;

def int_hexagon_V6_vmpyhv_acc :
Hexagon_v32i32_v32i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyhv_acc">;

def int_hexagon_V6_vmpyhv_acc_128B :
Hexagon_v64i32_v64i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyhv_acc_128B">;

def int_hexagon_V6_vmpyhvsrs :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyhvsrs">;

def int_hexagon_V6_vmpyhvsrs_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyhvsrs_128B">;

def int_hexagon_V6_vmpyieoh :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyieoh">;

def int_hexagon_V6_vmpyieoh_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyieoh_128B">;

def int_hexagon_V6_vmpyiewh_acc :
Hexagon_v16i32_v16i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyiewh_acc">;

def int_hexagon_V6_vmpyiewh_acc_128B :
Hexagon_v32i32_v32i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyiewh_acc_128B">;

def int_hexagon_V6_vmpyiewuh :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyiewuh">;

def int_hexagon_V6_vmpyiewuh_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyiewuh_128B">;

def int_hexagon_V6_vmpyiewuh_acc :
Hexagon_v16i32_v16i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyiewuh_acc">;

def int_hexagon_V6_vmpyiewuh_acc_128B :
Hexagon_v32i32_v32i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyiewuh_acc_128B">;

def int_hexagon_V6_vmpyih :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyih">;

def int_hexagon_V6_vmpyih_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyih_128B">;

def int_hexagon_V6_vmpyih_acc :
Hexagon_v16i32_v16i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyih_acc">;

def int_hexagon_V6_vmpyih_acc_128B :
Hexagon_v32i32_v32i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyih_acc_128B">;

def int_hexagon_V6_vmpyihb :
Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vmpyihb">;

def int_hexagon_V6_vmpyihb_128B :
Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vmpyihb_128B">;

def int_hexagon_V6_vmpyihb_acc :
Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vmpyihb_acc">;

def int_hexagon_V6_vmpyihb_acc_128B :
Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vmpyihb_acc_128B">;

def int_hexagon_V6_vmpyiowh :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyiowh">;

def int_hexagon_V6_vmpyiowh_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyiowh_128B">;

def int_hexagon_V6_vmpyiwb :
Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vmpyiwb">;

def int_hexagon_V6_vmpyiwb_128B :
Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vmpyiwb_128B">;

def int_hexagon_V6_vmpyiwb_acc :
Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vmpyiwb_acc">;

def int_hexagon_V6_vmpyiwb_acc_128B :
Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vmpyiwb_acc_128B">;

def int_hexagon_V6_vmpyiwh :
Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vmpyiwh">;

def int_hexagon_V6_vmpyiwh_128B :
Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vmpyiwh_128B">;

def int_hexagon_V6_vmpyiwh_acc :
Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vmpyiwh_acc">;

def int_hexagon_V6_vmpyiwh_acc_128B :
Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vmpyiwh_acc_128B">;

def int_hexagon_V6_vmpyowh :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyowh">;

def int_hexagon_V6_vmpyowh_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyowh_128B">;

def int_hexagon_V6_vmpyowh_rnd :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyowh_rnd">;

def int_hexagon_V6_vmpyowh_rnd_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyowh_rnd_128B">;

def int_hexagon_V6_vmpyowh_rnd_sacc :
Hexagon_v16i32_v16i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyowh_rnd_sacc">;

def int_hexagon_V6_vmpyowh_rnd_sacc_128B :
Hexagon_v32i32_v32i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyowh_rnd_sacc_128B">;

def int_hexagon_V6_vmpyowh_sacc :
Hexagon_v16i32_v16i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyowh_sacc">;

def int_hexagon_V6_vmpyowh_sacc_128B :
Hexagon_v32i32_v32i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyowh_sacc_128B">;

def int_hexagon_V6_vmpyub :
Hexagon_v32i32_v16i32i32_Intrinsic<"HEXAGON_V6_vmpyub">;

def int_hexagon_V6_vmpyub_128B :
Hexagon_v64i32_v32i32i32_Intrinsic<"HEXAGON_V6_vmpyub_128B">;

def int_hexagon_V6_vmpyub_acc :
Hexagon_v32i32_v32i32v16i32i32_Intrinsic<"HEXAGON_V6_vmpyub_acc">;

def int_hexagon_V6_vmpyub_acc_128B :
Hexagon_v64i32_v64i32v32i32i32_Intrinsic<"HEXAGON_V6_vmpyub_acc_128B">;

def int_hexagon_V6_vmpyubv :
Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyubv">;

def int_hexagon_V6_vmpyubv_128B :
Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyubv_128B">;

def int_hexagon_V6_vmpyubv_acc :
Hexagon_v32i32_v32i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyubv_acc">;

def int_hexagon_V6_vmpyubv_acc_128B :
Hexagon_v64i32_v64i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyubv_acc_128B">;

def int_hexagon_V6_vmpyuh :
Hexagon_v32i32_v16i32i32_Intrinsic<"HEXAGON_V6_vmpyuh">;

def int_hexagon_V6_vmpyuh_128B :
Hexagon_v64i32_v32i32i32_Intrinsic<"HEXAGON_V6_vmpyuh_128B">;

def int_hexagon_V6_vmpyuh_acc :
Hexagon_v32i32_v32i32v16i32i32_Intrinsic<"HEXAGON_V6_vmpyuh_acc">;

def int_hexagon_V6_vmpyuh_acc_128B :
Hexagon_v64i32_v64i32v32i32i32_Intrinsic<"HEXAGON_V6_vmpyuh_acc_128B">;

def int_hexagon_V6_vmpyuhv :
Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyuhv">;

def int_hexagon_V6_vmpyuhv_128B :
Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyuhv_128B">;

def int_hexagon_V6_vmpyuhv_acc :
Hexagon_v32i32_v32i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyuhv_acc">;

def int_hexagon_V6_vmpyuhv_acc_128B :
Hexagon_v64i32_v64i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyuhv_acc_128B">;

def int_hexagon_V6_vmux :
Hexagon_v16i32_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vmux">;

def int_hexagon_V6_vmux_128B :
Hexagon_v32i32_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vmux_128B">;

def int_hexagon_V6_vnavgh :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vnavgh">;

def int_hexagon_V6_vnavgh_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vnavgh_128B">;

def int_hexagon_V6_vnavgub :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vnavgub">;

def int_hexagon_V6_vnavgub_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vnavgub_128B">;

def int_hexagon_V6_vnavgw :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vnavgw">;

def int_hexagon_V6_vnavgw_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vnavgw_128B">;

def int_hexagon_V6_vnormamth :
Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vnormamth">;

def int_hexagon_V6_vnormamth_128B :
Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vnormamth_128B">;

def int_hexagon_V6_vnormamtw :
Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vnormamtw">;

def int_hexagon_V6_vnormamtw_128B :
Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vnormamtw_128B">;

def int_hexagon_V6_vnot :
Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vnot">;

def int_hexagon_V6_vnot_128B :
Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vnot_128B">;

def int_hexagon_V6_vor :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vor">;

def int_hexagon_V6_vor_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vor_128B">;

def int_hexagon_V6_vpackeb :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vpackeb">;

def int_hexagon_V6_vpackeb_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vpackeb_128B">;

def int_hexagon_V6_vpackeh :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vpackeh">;

def int_hexagon_V6_vpackeh_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vpackeh_128B">;

def int_hexagon_V6_vpackhb_sat :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vpackhb_sat">;

def int_hexagon_V6_vpackhb_sat_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vpackhb_sat_128B">;

def int_hexagon_V6_vpackhub_sat :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vpackhub_sat">;

def int_hexagon_V6_vpackhub_sat_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vpackhub_sat_128B">;

def int_hexagon_V6_vpackob :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vpackob">;

def int_hexagon_V6_vpackob_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vpackob_128B">;

def int_hexagon_V6_vpackoh :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vpackoh">;

def int_hexagon_V6_vpackoh_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vpackoh_128B">;

def int_hexagon_V6_vpackwh_sat :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vpackwh_sat">;

def int_hexagon_V6_vpackwh_sat_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vpackwh_sat_128B">;

def int_hexagon_V6_vpackwuh_sat :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vpackwuh_sat">;

def int_hexagon_V6_vpackwuh_sat_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vpackwuh_sat_128B">;

def int_hexagon_V6_vpopcounth :
Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vpopcounth">;

def int_hexagon_V6_vpopcounth_128B :
Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vpopcounth_128B">;

def int_hexagon_V6_vrdelta :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vrdelta">;

def int_hexagon_V6_vrdelta_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vrdelta_128B">;

def int_hexagon_V6_vrmpybus :
Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vrmpybus">;

def int_hexagon_V6_vrmpybus_128B :
Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vrmpybus_128B">;

def int_hexagon_V6_vrmpybus_acc :
Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vrmpybus_acc">;

def int_hexagon_V6_vrmpybus_acc_128B :
Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vrmpybus_acc_128B">;

def int_hexagon_V6_vrmpybusi :
Hexagon_v32i32_v32i32i32i32_Intrinsic<"HEXAGON_V6_vrmpybusi", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_V6_vrmpybusi_128B :
Hexagon_v64i32_v64i32i32i32_Intrinsic<"HEXAGON_V6_vrmpybusi_128B", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_V6_vrmpybusi_acc :
Hexagon_v32i32_v32i32v32i32i32i32_Intrinsic<"HEXAGON_V6_vrmpybusi_acc", [IntrNoMem, ImmArg<ArgIndex<3>>]>;

def int_hexagon_V6_vrmpybusi_acc_128B :
Hexagon_v64i32_v64i32v64i32i32i32_Intrinsic<"HEXAGON_V6_vrmpybusi_acc_128B", [IntrNoMem, ImmArg<ArgIndex<3>>]>;

def int_hexagon_V6_vrmpybusv :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vrmpybusv">;

def int_hexagon_V6_vrmpybusv_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vrmpybusv_128B">;

def int_hexagon_V6_vrmpybusv_acc :
Hexagon_v16i32_v16i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vrmpybusv_acc">;

def int_hexagon_V6_vrmpybusv_acc_128B :
Hexagon_v32i32_v32i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vrmpybusv_acc_128B">;

def int_hexagon_V6_vrmpybv :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vrmpybv">;

def int_hexagon_V6_vrmpybv_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vrmpybv_128B">;

def int_hexagon_V6_vrmpybv_acc :
Hexagon_v16i32_v16i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vrmpybv_acc">;

def int_hexagon_V6_vrmpybv_acc_128B :
Hexagon_v32i32_v32i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vrmpybv_acc_128B">;

def int_hexagon_V6_vrmpyub :
Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vrmpyub">;

def int_hexagon_V6_vrmpyub_128B :
Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vrmpyub_128B">;

def int_hexagon_V6_vrmpyub_acc :
Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vrmpyub_acc">;

def int_hexagon_V6_vrmpyub_acc_128B :
Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vrmpyub_acc_128B">;

def int_hexagon_V6_vrmpyubi :
Hexagon_v32i32_v32i32i32i32_Intrinsic<"HEXAGON_V6_vrmpyubi", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_V6_vrmpyubi_128B :
Hexagon_v64i32_v64i32i32i32_Intrinsic<"HEXAGON_V6_vrmpyubi_128B", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_V6_vrmpyubi_acc :
Hexagon_v32i32_v32i32v32i32i32i32_Intrinsic<"HEXAGON_V6_vrmpyubi_acc", [IntrNoMem, ImmArg<ArgIndex<3>>]>;

def int_hexagon_V6_vrmpyubi_acc_128B :
Hexagon_v64i32_v64i32v64i32i32i32_Intrinsic<"HEXAGON_V6_vrmpyubi_acc_128B", [IntrNoMem, ImmArg<ArgIndex<3>>]>;

def int_hexagon_V6_vrmpyubv :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vrmpyubv">;

def int_hexagon_V6_vrmpyubv_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vrmpyubv_128B">;

def int_hexagon_V6_vrmpyubv_acc :
Hexagon_v16i32_v16i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vrmpyubv_acc">;

def int_hexagon_V6_vrmpyubv_acc_128B :
Hexagon_v32i32_v32i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vrmpyubv_acc_128B">;

def int_hexagon_V6_vror :
Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vror">;

def int_hexagon_V6_vror_128B :
Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vror_128B">;

def int_hexagon_V6_vroundhb :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vroundhb">;

def int_hexagon_V6_vroundhb_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vroundhb_128B">;

def int_hexagon_V6_vroundhub :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vroundhub">;

def int_hexagon_V6_vroundhub_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vroundhub_128B">;

def int_hexagon_V6_vroundwh :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vroundwh">;

def int_hexagon_V6_vroundwh_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vroundwh_128B">;

def int_hexagon_V6_vroundwuh :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vroundwuh">;

def int_hexagon_V6_vroundwuh_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vroundwuh_128B">;

def int_hexagon_V6_vrsadubi :
Hexagon_v32i32_v32i32i32i32_Intrinsic<"HEXAGON_V6_vrsadubi", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_V6_vrsadubi_128B :
Hexagon_v64i32_v64i32i32i32_Intrinsic<"HEXAGON_V6_vrsadubi_128B", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_V6_vrsadubi_acc :
Hexagon_v32i32_v32i32v32i32i32i32_Intrinsic<"HEXAGON_V6_vrsadubi_acc", [IntrNoMem, ImmArg<ArgIndex<3>>]>;

def int_hexagon_V6_vrsadubi_acc_128B :
Hexagon_v64i32_v64i32v64i32i32i32_Intrinsic<"HEXAGON_V6_vrsadubi_acc_128B", [IntrNoMem, ImmArg<ArgIndex<3>>]>;

def int_hexagon_V6_vsathub :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsathub">;

def int_hexagon_V6_vsathub_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsathub_128B">;

def int_hexagon_V6_vsatwh :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsatwh">;

def int_hexagon_V6_vsatwh_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsatwh_128B">;

def int_hexagon_V6_vsb :
Hexagon_v32i32_v16i32_Intrinsic<"HEXAGON_V6_vsb">;

def int_hexagon_V6_vsb_128B :
Hexagon_v64i32_v32i32_Intrinsic<"HEXAGON_V6_vsb_128B">;

def int_hexagon_V6_vsh :
Hexagon_v32i32_v16i32_Intrinsic<"HEXAGON_V6_vsh">;

def int_hexagon_V6_vsh_128B :
Hexagon_v64i32_v32i32_Intrinsic<"HEXAGON_V6_vsh_128B">;

def int_hexagon_V6_vshufeh :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vshufeh">;

def int_hexagon_V6_vshufeh_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vshufeh_128B">;

def int_hexagon_V6_vshuffb :
Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vshuffb">;

def int_hexagon_V6_vshuffb_128B :
Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vshuffb_128B">;

def int_hexagon_V6_vshuffeb :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vshuffeb">;

def int_hexagon_V6_vshuffeb_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vshuffeb_128B">;

def int_hexagon_V6_vshuffh :
Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vshuffh">;

def int_hexagon_V6_vshuffh_128B :
Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vshuffh_128B">;

def int_hexagon_V6_vshuffob :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vshuffob">;

def int_hexagon_V6_vshuffob_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vshuffob_128B">;

def int_hexagon_V6_vshuffvdd :
Hexagon_v32i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vshuffvdd">;

def int_hexagon_V6_vshuffvdd_128B :
Hexagon_v64i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vshuffvdd_128B">;

def int_hexagon_V6_vshufoeb :
Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vshufoeb">;

def int_hexagon_V6_vshufoeb_128B :
Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vshufoeb_128B">;

def int_hexagon_V6_vshufoeh :
Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vshufoeh">;

def int_hexagon_V6_vshufoeh_128B :
Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vshufoeh_128B">;

def int_hexagon_V6_vshufoh :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vshufoh">;

def int_hexagon_V6_vshufoh_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vshufoh_128B">;

def int_hexagon_V6_vsubb :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubb">;

def int_hexagon_V6_vsubb_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubb_128B">;

def int_hexagon_V6_vsubb_dv :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubb_dv">;

def int_hexagon_V6_vsubb_dv_128B :
Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vsubb_dv_128B">;

def int_hexagon_V6_vsubbnq :
Hexagon_v16i32_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubbnq">;

def int_hexagon_V6_vsubbnq_128B :
Hexagon_v32i32_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubbnq_128B">;

def int_hexagon_V6_vsubbq :
Hexagon_v16i32_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubbq">;

def int_hexagon_V6_vsubbq_128B :
Hexagon_v32i32_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubbq_128B">;

def int_hexagon_V6_vsubh :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubh">;

def int_hexagon_V6_vsubh_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubh_128B">;

def int_hexagon_V6_vsubh_dv :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubh_dv">;

def int_hexagon_V6_vsubh_dv_128B :
Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vsubh_dv_128B">;

def int_hexagon_V6_vsubhnq :
Hexagon_v16i32_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubhnq">;

def int_hexagon_V6_vsubhnq_128B :
Hexagon_v32i32_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubhnq_128B">;

def int_hexagon_V6_vsubhq :
Hexagon_v16i32_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubhq">;

def int_hexagon_V6_vsubhq_128B :
Hexagon_v32i32_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubhq_128B">;

def int_hexagon_V6_vsubhsat :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubhsat">;

def int_hexagon_V6_vsubhsat_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubhsat_128B">;

def int_hexagon_V6_vsubhsat_dv :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubhsat_dv">;

def int_hexagon_V6_vsubhsat_dv_128B :
Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vsubhsat_dv_128B">;

def int_hexagon_V6_vsubhw :
Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubhw">;

def int_hexagon_V6_vsubhw_128B :
Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubhw_128B">;

def int_hexagon_V6_vsububh :
Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsububh">;

def int_hexagon_V6_vsububh_128B :
Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsububh_128B">;

def int_hexagon_V6_vsububsat :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsububsat">;

def int_hexagon_V6_vsububsat_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsububsat_128B">;

def int_hexagon_V6_vsububsat_dv :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsububsat_dv">;

def int_hexagon_V6_vsububsat_dv_128B :
Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vsububsat_dv_128B">;

def int_hexagon_V6_vsubuhsat :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubuhsat">;

def int_hexagon_V6_vsubuhsat_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubuhsat_128B">;

def int_hexagon_V6_vsubuhsat_dv :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubuhsat_dv">;

def int_hexagon_V6_vsubuhsat_dv_128B :
Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vsubuhsat_dv_128B">;

def int_hexagon_V6_vsubuhw :
Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubuhw">;

def int_hexagon_V6_vsubuhw_128B :
Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubuhw_128B">;

def int_hexagon_V6_vsubw :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubw">;

def int_hexagon_V6_vsubw_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubw_128B">;

def int_hexagon_V6_vsubw_dv :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubw_dv">;

def int_hexagon_V6_vsubw_dv_128B :
Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vsubw_dv_128B">;

def int_hexagon_V6_vsubwnq :
Hexagon_v16i32_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubwnq">;

def int_hexagon_V6_vsubwnq_128B :
Hexagon_v32i32_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubwnq_128B">;

def int_hexagon_V6_vsubwq :
Hexagon_v16i32_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubwq">;

def int_hexagon_V6_vsubwq_128B :
Hexagon_v32i32_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubwq_128B">;

def int_hexagon_V6_vsubwsat :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubwsat">;

def int_hexagon_V6_vsubwsat_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubwsat_128B">;

def int_hexagon_V6_vsubwsat_dv :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubwsat_dv">;

def int_hexagon_V6_vsubwsat_dv_128B :
Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vsubwsat_dv_128B">;

def int_hexagon_V6_vswap :
Hexagon_v32i32_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vswap">;

def int_hexagon_V6_vswap_128B :
Hexagon_v64i32_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vswap_128B">;

def int_hexagon_V6_vtmpyb :
Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vtmpyb">;

def int_hexagon_V6_vtmpyb_128B :
Hexagon_v64i32_v64i32i32_Intrinsic<"HEXAGON_V6_vtmpyb_128B">;

def int_hexagon_V6_vtmpyb_acc :
Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vtmpyb_acc">;

def int_hexagon_V6_vtmpyb_acc_128B :
Hexagon_v64i32_v64i32v64i32i32_Intrinsic<"HEXAGON_V6_vtmpyb_acc_128B">;

def int_hexagon_V6_vtmpybus :
Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vtmpybus">;

def int_hexagon_V6_vtmpybus_128B :
Hexagon_v64i32_v64i32i32_Intrinsic<"HEXAGON_V6_vtmpybus_128B">;

def int_hexagon_V6_vtmpybus_acc :
Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vtmpybus_acc">;

def int_hexagon_V6_vtmpybus_acc_128B :
Hexagon_v64i32_v64i32v64i32i32_Intrinsic<"HEXAGON_V6_vtmpybus_acc_128B">;

def int_hexagon_V6_vtmpyhb :
Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vtmpyhb">;

def int_hexagon_V6_vtmpyhb_128B :
Hexagon_v64i32_v64i32i32_Intrinsic<"HEXAGON_V6_vtmpyhb_128B">;

def int_hexagon_V6_vtmpyhb_acc :
Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vtmpyhb_acc">;

def int_hexagon_V6_vtmpyhb_acc_128B :
Hexagon_v64i32_v64i32v64i32i32_Intrinsic<"HEXAGON_V6_vtmpyhb_acc_128B">;

def int_hexagon_V6_vunpackb :
Hexagon_v32i32_v16i32_Intrinsic<"HEXAGON_V6_vunpackb">;

def int_hexagon_V6_vunpackb_128B :
Hexagon_v64i32_v32i32_Intrinsic<"HEXAGON_V6_vunpackb_128B">;

def int_hexagon_V6_vunpackh :
Hexagon_v32i32_v16i32_Intrinsic<"HEXAGON_V6_vunpackh">;

def int_hexagon_V6_vunpackh_128B :
Hexagon_v64i32_v32i32_Intrinsic<"HEXAGON_V6_vunpackh_128B">;

def int_hexagon_V6_vunpackob :
Hexagon_v32i32_v32i32v16i32_Intrinsic<"HEXAGON_V6_vunpackob">;

def int_hexagon_V6_vunpackob_128B :
Hexagon_v64i32_v64i32v32i32_Intrinsic<"HEXAGON_V6_vunpackob_128B">;

def int_hexagon_V6_vunpackoh :
Hexagon_v32i32_v32i32v16i32_Intrinsic<"HEXAGON_V6_vunpackoh">;

def int_hexagon_V6_vunpackoh_128B :
Hexagon_v64i32_v64i32v32i32_Intrinsic<"HEXAGON_V6_vunpackoh_128B">;

def int_hexagon_V6_vunpackub :
Hexagon_v32i32_v16i32_Intrinsic<"HEXAGON_V6_vunpackub">;

def int_hexagon_V6_vunpackub_128B :
Hexagon_v64i32_v32i32_Intrinsic<"HEXAGON_V6_vunpackub_128B">;

def int_hexagon_V6_vunpackuh :
Hexagon_v32i32_v16i32_Intrinsic<"HEXAGON_V6_vunpackuh">;

def int_hexagon_V6_vunpackuh_128B :
Hexagon_v64i32_v32i32_Intrinsic<"HEXAGON_V6_vunpackuh_128B">;

def int_hexagon_V6_vxor :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vxor">;

def int_hexagon_V6_vxor_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vxor_128B">;

def int_hexagon_V6_vzb :
Hexagon_v32i32_v16i32_Intrinsic<"HEXAGON_V6_vzb">;

def int_hexagon_V6_vzb_128B :
Hexagon_v64i32_v32i32_Intrinsic<"HEXAGON_V6_vzb_128B">;

def int_hexagon_V6_vzh :
Hexagon_v32i32_v16i32_Intrinsic<"HEXAGON_V6_vzh">;

def int_hexagon_V6_vzh_128B :
Hexagon_v64i32_v32i32_Intrinsic<"HEXAGON_V6_vzh_128B">;

// V62 HVX Instructions.

def int_hexagon_V6_lvsplatb :
Hexagon_v16i32_i32_Intrinsic<"HEXAGON_V6_lvsplatb">;

def int_hexagon_V6_lvsplatb_128B :
Hexagon_v32i32_i32_Intrinsic<"HEXAGON_V6_lvsplatb_128B">;

def int_hexagon_V6_lvsplath :
Hexagon_v16i32_i32_Intrinsic<"HEXAGON_V6_lvsplath">;

def int_hexagon_V6_lvsplath_128B :
Hexagon_v32i32_i32_Intrinsic<"HEXAGON_V6_lvsplath_128B">;

def int_hexagon_V6_pred_scalar2v2 :
Hexagon_v64i1_i32_Intrinsic<"HEXAGON_V6_pred_scalar2v2">;

def int_hexagon_V6_pred_scalar2v2_128B :
Hexagon_v128i1_i32_Intrinsic<"HEXAGON_V6_pred_scalar2v2_128B">;

def int_hexagon_V6_shuffeqh :
Hexagon_v64i1_v64i1v64i1_Intrinsic<"HEXAGON_V6_shuffeqh">;

def int_hexagon_V6_shuffeqh_128B :
Hexagon_v128i1_v128i1v128i1_Intrinsic<"HEXAGON_V6_shuffeqh_128B">;

def int_hexagon_V6_shuffeqw :
Hexagon_v64i1_v64i1v64i1_Intrinsic<"HEXAGON_V6_shuffeqw">;

def int_hexagon_V6_shuffeqw_128B :
Hexagon_v128i1_v128i1v128i1_Intrinsic<"HEXAGON_V6_shuffeqw_128B">;

def int_hexagon_V6_vaddbsat :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddbsat">;

def int_hexagon_V6_vaddbsat_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddbsat_128B">;

def int_hexagon_V6_vaddbsat_dv :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddbsat_dv">;

def int_hexagon_V6_vaddbsat_dv_128B :
Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vaddbsat_dv_128B">;

def int_hexagon_V6_vaddcarry :
Hexagon_custom_v16i32v64i1_v16i32v16i32v64i1_Intrinsic;

def int_hexagon_V6_vaddcarry_128B :
Hexagon_custom_v32i32v128i1_v32i32v32i32v128i1_Intrinsic_128B;

def int_hexagon_V6_vaddclbh :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddclbh">;

def int_hexagon_V6_vaddclbh_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddclbh_128B">;

def int_hexagon_V6_vaddclbw :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddclbw">;

def int_hexagon_V6_vaddclbw_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddclbw_128B">;

def int_hexagon_V6_vaddhw_acc :
Hexagon_v32i32_v32i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddhw_acc">;

def int_hexagon_V6_vaddhw_acc_128B :
Hexagon_v64i32_v64i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddhw_acc_128B">;

def int_hexagon_V6_vaddubh_acc :
Hexagon_v32i32_v32i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddubh_acc">;

def int_hexagon_V6_vaddubh_acc_128B :
Hexagon_v64i32_v64i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddubh_acc_128B">;

def int_hexagon_V6_vaddububb_sat :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vaddububb_sat">;

def int_hexagon_V6_vaddububb_sat_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vaddububb_sat_128B">;

def int_hexagon_V6_vadduhw_acc :
Hexagon_v32i32_v32i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vadduhw_acc">;

def int_hexagon_V6_vadduhw_acc_128B :
Hexagon_v64i32_v64i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vadduhw_acc_128B">;

def int_hexagon_V6_vadduwsat :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vadduwsat">;

def int_hexagon_V6_vadduwsat_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vadduwsat_128B">;

def int_hexagon_V6_vadduwsat_dv :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vadduwsat_dv">;

def int_hexagon_V6_vadduwsat_dv_128B :
Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vadduwsat_dv_128B">;

def int_hexagon_V6_vandnqrt :
Hexagon_v16i32_v64i1i32_Intrinsic<"HEXAGON_V6_vandnqrt">;

def int_hexagon_V6_vandnqrt_128B :
Hexagon_v32i32_v128i1i32_Intrinsic<"HEXAGON_V6_vandnqrt_128B">;

def int_hexagon_V6_vandnqrt_acc :
Hexagon_v16i32_v16i32v64i1i32_Intrinsic<"HEXAGON_V6_vandnqrt_acc">;

def int_hexagon_V6_vandnqrt_acc_128B :
Hexagon_v32i32_v32i32v128i1i32_Intrinsic<"HEXAGON_V6_vandnqrt_acc_128B">;

def int_hexagon_V6_vandvnqv :
Hexagon_v16i32_v64i1v16i32_Intrinsic<"HEXAGON_V6_vandvnqv">;

def int_hexagon_V6_vandvnqv_128B :
Hexagon_v32i32_v128i1v32i32_Intrinsic<"HEXAGON_V6_vandvnqv_128B">;

def int_hexagon_V6_vandvqv :
Hexagon_v16i32_v64i1v16i32_Intrinsic<"HEXAGON_V6_vandvqv">;

def int_hexagon_V6_vandvqv_128B :
Hexagon_v32i32_v128i1v32i32_Intrinsic<"HEXAGON_V6_vandvqv_128B">;

def int_hexagon_V6_vasrhbsat :
Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vasrhbsat">;

def int_hexagon_V6_vasrhbsat_128B :
Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vasrhbsat_128B">;

def int_hexagon_V6_vasruwuhrndsat :
Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vasruwuhrndsat">;

def int_hexagon_V6_vasruwuhrndsat_128B :
Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vasruwuhrndsat_128B">;

def int_hexagon_V6_vasrwuhrndsat :
Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vasrwuhrndsat">;

def int_hexagon_V6_vasrwuhrndsat_128B :
Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vasrwuhrndsat_128B">;

def int_hexagon_V6_vlsrb :
Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vlsrb">;

def int_hexagon_V6_vlsrb_128B :
Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vlsrb_128B">;

def int_hexagon_V6_vlutvvb_nm :
Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vlutvvb_nm">;

def int_hexagon_V6_vlutvvb_nm_128B :
Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vlutvvb_nm_128B">;

def int_hexagon_V6_vlutvvb_oracci :
Hexagon_v16i32_v16i32v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vlutvvb_oracci", [IntrNoMem, ImmArg<ArgIndex<3>>]>;

def int_hexagon_V6_vlutvvb_oracci_128B :
Hexagon_v32i32_v32i32v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vlutvvb_oracci_128B", [IntrNoMem, ImmArg<ArgIndex<3>>]>;

def int_hexagon_V6_vlutvvbi :
Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vlutvvbi", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_V6_vlutvvbi_128B :
Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vlutvvbi_128B", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_V6_vlutvwh_nm :
Hexagon_v32i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vlutvwh_nm">;

def int_hexagon_V6_vlutvwh_nm_128B :
Hexagon_v64i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vlutvwh_nm_128B">;

def int_hexagon_V6_vlutvwh_oracci :
Hexagon_v32i32_v32i32v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vlutvwh_oracci", [IntrNoMem, ImmArg<ArgIndex<3>>]>;

def int_hexagon_V6_vlutvwh_oracci_128B :
Hexagon_v64i32_v64i32v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vlutvwh_oracci_128B", [IntrNoMem, ImmArg<ArgIndex<3>>]>;

def int_hexagon_V6_vlutvwhi :
Hexagon_v32i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vlutvwhi", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_V6_vlutvwhi_128B :
Hexagon_v64i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vlutvwhi_128B", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_V6_vmaxb :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmaxb">;

def int_hexagon_V6_vmaxb_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmaxb_128B">;

def int_hexagon_V6_vminb :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vminb">;

def int_hexagon_V6_vminb_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vminb_128B">;

def int_hexagon_V6_vmpauhb :
Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vmpauhb">;

def int_hexagon_V6_vmpauhb_128B :
Hexagon_v64i32_v64i32i32_Intrinsic<"HEXAGON_V6_vmpauhb_128B">;

def int_hexagon_V6_vmpauhb_acc :
Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vmpauhb_acc">;

def int_hexagon_V6_vmpauhb_acc_128B :
Hexagon_v64i32_v64i32v64i32i32_Intrinsic<"HEXAGON_V6_vmpauhb_acc_128B">;

def int_hexagon_V6_vmpyewuh_64 :
Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyewuh_64">;

def int_hexagon_V6_vmpyewuh_64_128B :
Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyewuh_64_128B">;

def int_hexagon_V6_vmpyiwub :
Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vmpyiwub">;

def int_hexagon_V6_vmpyiwub_128B :
Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vmpyiwub_128B">;

def int_hexagon_V6_vmpyiwub_acc :
Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vmpyiwub_acc">;

def int_hexagon_V6_vmpyiwub_acc_128B :
Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vmpyiwub_acc_128B">;

def int_hexagon_V6_vmpyowh_64_acc :
Hexagon_v32i32_v32i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyowh_64_acc">;

def int_hexagon_V6_vmpyowh_64_acc_128B :
Hexagon_v64i32_v64i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyowh_64_acc_128B">;

def int_hexagon_V6_vrounduhub :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vrounduhub">;

def int_hexagon_V6_vrounduhub_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vrounduhub_128B">;

def int_hexagon_V6_vrounduwuh :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vrounduwuh">;

def int_hexagon_V6_vrounduwuh_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vrounduwuh_128B">;

def int_hexagon_V6_vsatuwuh :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsatuwuh">;

def int_hexagon_V6_vsatuwuh_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsatuwuh_128B">;

def int_hexagon_V6_vsubbsat :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubbsat">;

def int_hexagon_V6_vsubbsat_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubbsat_128B">;

def int_hexagon_V6_vsubbsat_dv :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubbsat_dv">;

def int_hexagon_V6_vsubbsat_dv_128B :
Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vsubbsat_dv_128B">;

def int_hexagon_V6_vsubcarry :
Hexagon_custom_v16i32v64i1_v16i32v16i32v64i1_Intrinsic;

def int_hexagon_V6_vsubcarry_128B :
Hexagon_custom_v32i32v128i1_v32i32v32i32v128i1_Intrinsic_128B;

def int_hexagon_V6_vsubububb_sat :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubububb_sat">;

def int_hexagon_V6_vsubububb_sat_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubububb_sat_128B">;

def int_hexagon_V6_vsubuwsat :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsubuwsat">;

def int_hexagon_V6_vsubuwsat_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubuwsat_128B">;

def int_hexagon_V6_vsubuwsat_dv :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsubuwsat_dv">;

def int_hexagon_V6_vsubuwsat_dv_128B :
Hexagon_v64i32_v64i32v64i32_Intrinsic<"HEXAGON_V6_vsubuwsat_dv_128B">;

// V65 HVX Instructions.

def int_hexagon_V6_vabsb :
Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vabsb">;

def int_hexagon_V6_vabsb_128B :
Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vabsb_128B">;

def int_hexagon_V6_vabsb_sat :
Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vabsb_sat">;

def int_hexagon_V6_vabsb_sat_128B :
Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vabsb_sat_128B">;

def int_hexagon_V6_vaslh_acc :
Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vaslh_acc">;

def int_hexagon_V6_vaslh_acc_128B :
Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vaslh_acc_128B">;

def int_hexagon_V6_vasrh_acc :
Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vasrh_acc">;

def int_hexagon_V6_vasrh_acc_128B :
Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vasrh_acc_128B">;

def int_hexagon_V6_vasruhubrndsat :
Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vasruhubrndsat">;

def int_hexagon_V6_vasruhubrndsat_128B :
Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vasruhubrndsat_128B">;

def int_hexagon_V6_vasruhubsat :
Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vasruhubsat">;

def int_hexagon_V6_vasruhubsat_128B :
Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vasruhubsat_128B">;

def int_hexagon_V6_vasruwuhsat :
Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vasruwuhsat">;

def int_hexagon_V6_vasruwuhsat_128B :
Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vasruwuhsat_128B">;

def int_hexagon_V6_vavgb :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vavgb">;

def int_hexagon_V6_vavgb_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vavgb_128B">;

def int_hexagon_V6_vavgbrnd :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vavgbrnd">;

def int_hexagon_V6_vavgbrnd_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vavgbrnd_128B">;

def int_hexagon_V6_vavguw :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vavguw">;

def int_hexagon_V6_vavguw_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vavguw_128B">;

def int_hexagon_V6_vavguwrnd :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vavguwrnd">;

def int_hexagon_V6_vavguwrnd_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vavguwrnd_128B">;

def int_hexagon_V6_vdd0 :
Hexagon_v32i32__Intrinsic<"HEXAGON_V6_vdd0">;

def int_hexagon_V6_vdd0_128B :
Hexagon_v64i32__Intrinsic<"HEXAGON_V6_vdd0_128B">;

def int_hexagon_V6_vgathermh :
Hexagon__ptri32i32v16i32_Intrinsic<"HEXAGON_V6_vgathermh", [IntrArgMemOnly]>;

def int_hexagon_V6_vgathermh_128B :
Hexagon__ptri32i32v32i32_Intrinsic<"HEXAGON_V6_vgathermh_128B", [IntrArgMemOnly]>;

def int_hexagon_V6_vgathermhq :
Hexagon__ptrv64i1i32i32v16i32_Intrinsic<"HEXAGON_V6_vgathermhq", [IntrArgMemOnly]>;

def int_hexagon_V6_vgathermhq_128B :
Hexagon__ptrv128i1i32i32v32i32_Intrinsic<"HEXAGON_V6_vgathermhq_128B", [IntrArgMemOnly]>;

def int_hexagon_V6_vgathermhw :
Hexagon__ptri32i32v32i32_Intrinsic<"HEXAGON_V6_vgathermhw", [IntrArgMemOnly]>;

def int_hexagon_V6_vgathermhw_128B :
Hexagon__ptri32i32v64i32_Intrinsic<"HEXAGON_V6_vgathermhw_128B", [IntrArgMemOnly]>;

def int_hexagon_V6_vgathermhwq :
Hexagon__ptrv64i1i32i32v32i32_Intrinsic<"HEXAGON_V6_vgathermhwq", [IntrArgMemOnly]>;

def int_hexagon_V6_vgathermhwq_128B :
Hexagon__ptrv128i1i32i32v64i32_Intrinsic<"HEXAGON_V6_vgathermhwq_128B", [IntrArgMemOnly]>;

def int_hexagon_V6_vgathermw :
Hexagon__ptri32i32v16i32_Intrinsic<"HEXAGON_V6_vgathermw", [IntrArgMemOnly]>;

def int_hexagon_V6_vgathermw_128B :
Hexagon__ptri32i32v32i32_Intrinsic<"HEXAGON_V6_vgathermw_128B", [IntrArgMemOnly]>;

def int_hexagon_V6_vgathermwq :
Hexagon__ptrv64i1i32i32v16i32_Intrinsic<"HEXAGON_V6_vgathermwq", [IntrArgMemOnly]>;

def int_hexagon_V6_vgathermwq_128B :
Hexagon__ptrv128i1i32i32v32i32_Intrinsic<"HEXAGON_V6_vgathermwq_128B", [IntrArgMemOnly]>;

def int_hexagon_V6_vlut4 :
Hexagon_v16i32_v16i32i64_Intrinsic<"HEXAGON_V6_vlut4">;

def int_hexagon_V6_vlut4_128B :
Hexagon_v32i32_v32i32i64_Intrinsic<"HEXAGON_V6_vlut4_128B">;

def int_hexagon_V6_vmpabuu :
Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vmpabuu">;

def int_hexagon_V6_vmpabuu_128B :
Hexagon_v64i32_v64i32i32_Intrinsic<"HEXAGON_V6_vmpabuu_128B">;

def int_hexagon_V6_vmpabuu_acc :
Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vmpabuu_acc">;

def int_hexagon_V6_vmpabuu_acc_128B :
Hexagon_v64i32_v64i32v64i32i32_Intrinsic<"HEXAGON_V6_vmpabuu_acc_128B">;

def int_hexagon_V6_vmpahhsat :
Hexagon_v16i32_v16i32v16i32i64_Intrinsic<"HEXAGON_V6_vmpahhsat">;

def int_hexagon_V6_vmpahhsat_128B :
Hexagon_v32i32_v32i32v32i32i64_Intrinsic<"HEXAGON_V6_vmpahhsat_128B">;

def int_hexagon_V6_vmpauhuhsat :
Hexagon_v16i32_v16i32v16i32i64_Intrinsic<"HEXAGON_V6_vmpauhuhsat">;

def int_hexagon_V6_vmpauhuhsat_128B :
Hexagon_v32i32_v32i32v32i32i64_Intrinsic<"HEXAGON_V6_vmpauhuhsat_128B">;

def int_hexagon_V6_vmpsuhuhsat :
Hexagon_v16i32_v16i32v16i32i64_Intrinsic<"HEXAGON_V6_vmpsuhuhsat">;

def int_hexagon_V6_vmpsuhuhsat_128B :
Hexagon_v32i32_v32i32v32i32i64_Intrinsic<"HEXAGON_V6_vmpsuhuhsat_128B">;

def int_hexagon_V6_vmpyh_acc :
Hexagon_v32i32_v32i32v16i32i32_Intrinsic<"HEXAGON_V6_vmpyh_acc">;

def int_hexagon_V6_vmpyh_acc_128B :
Hexagon_v64i32_v64i32v32i32i32_Intrinsic<"HEXAGON_V6_vmpyh_acc_128B">;

def int_hexagon_V6_vmpyuhe :
Hexagon_v16i32_v16i32i32_Intrinsic<"HEXAGON_V6_vmpyuhe">;

def int_hexagon_V6_vmpyuhe_128B :
Hexagon_v32i32_v32i32i32_Intrinsic<"HEXAGON_V6_vmpyuhe_128B">;

def int_hexagon_V6_vmpyuhe_acc :
Hexagon_v16i32_v16i32v16i32i32_Intrinsic<"HEXAGON_V6_vmpyuhe_acc">;

def int_hexagon_V6_vmpyuhe_acc_128B :
Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_vmpyuhe_acc_128B">;

def int_hexagon_V6_vnavgb :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vnavgb">;

def int_hexagon_V6_vnavgb_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vnavgb_128B">;

def int_hexagon_V6_vprefixqb :
Hexagon_v16i32_v64i1_Intrinsic<"HEXAGON_V6_vprefixqb">;

def int_hexagon_V6_vprefixqb_128B :
Hexagon_v32i32_v128i1_Intrinsic<"HEXAGON_V6_vprefixqb_128B">;

def int_hexagon_V6_vprefixqh :
Hexagon_v16i32_v64i1_Intrinsic<"HEXAGON_V6_vprefixqh">;

def int_hexagon_V6_vprefixqh_128B :
Hexagon_v32i32_v128i1_Intrinsic<"HEXAGON_V6_vprefixqh_128B">;

def int_hexagon_V6_vprefixqw :
Hexagon_v16i32_v64i1_Intrinsic<"HEXAGON_V6_vprefixqw">;

def int_hexagon_V6_vprefixqw_128B :
Hexagon_v32i32_v128i1_Intrinsic<"HEXAGON_V6_vprefixqw_128B">;

def int_hexagon_V6_vscattermh :
Hexagon__i32i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vscattermh", [IntrWriteMem]>;

def int_hexagon_V6_vscattermh_128B :
Hexagon__i32i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vscattermh_128B", [IntrWriteMem]>;

def int_hexagon_V6_vscattermh_add :
Hexagon__i32i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vscattermh_add", [IntrWriteMem]>;

def int_hexagon_V6_vscattermh_add_128B :
Hexagon__i32i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vscattermh_add_128B", [IntrWriteMem]>;

def int_hexagon_V6_vscattermhq :
Hexagon__v64i1i32i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vscattermhq", [IntrWriteMem]>;

def int_hexagon_V6_vscattermhq_128B :
Hexagon__v128i1i32i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vscattermhq_128B", [IntrWriteMem]>;

def int_hexagon_V6_vscattermhw :
Hexagon__i32i32v32i32v16i32_Intrinsic<"HEXAGON_V6_vscattermhw", [IntrWriteMem]>;

def int_hexagon_V6_vscattermhw_128B :
Hexagon__i32i32v64i32v32i32_Intrinsic<"HEXAGON_V6_vscattermhw_128B", [IntrWriteMem]>;

def int_hexagon_V6_vscattermhw_add :
Hexagon__i32i32v32i32v16i32_Intrinsic<"HEXAGON_V6_vscattermhw_add", [IntrWriteMem]>;

def int_hexagon_V6_vscattermhw_add_128B :
Hexagon__i32i32v64i32v32i32_Intrinsic<"HEXAGON_V6_vscattermhw_add_128B", [IntrWriteMem]>;

def int_hexagon_V6_vscattermhwq :
Hexagon__v64i1i32i32v32i32v16i32_Intrinsic<"HEXAGON_V6_vscattermhwq", [IntrWriteMem]>;

def int_hexagon_V6_vscattermhwq_128B :
Hexagon__v128i1i32i32v64i32v32i32_Intrinsic<"HEXAGON_V6_vscattermhwq_128B", [IntrWriteMem]>;

def int_hexagon_V6_vscattermw :
Hexagon__i32i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vscattermw", [IntrWriteMem]>;

def int_hexagon_V6_vscattermw_128B :
Hexagon__i32i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vscattermw_128B", [IntrWriteMem]>;

def int_hexagon_V6_vscattermw_add :
Hexagon__i32i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vscattermw_add", [IntrWriteMem]>;

def int_hexagon_V6_vscattermw_add_128B :
Hexagon__i32i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vscattermw_add_128B", [IntrWriteMem]>;

def int_hexagon_V6_vscattermwq :
Hexagon__v64i1i32i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vscattermwq", [IntrWriteMem]>;

def int_hexagon_V6_vscattermwq_128B :
Hexagon__v128i1i32i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vscattermwq_128B", [IntrWriteMem]>;

// V66 HVX Instructions.

def int_hexagon_V6_vaddcarryo :
Hexagon_custom_v16i32v64i1_v16i32v16i32_Intrinsic;

def int_hexagon_V6_vaddcarryo_128B :
Hexagon_custom_v32i32v128i1_v32i32v32i32_Intrinsic_128B;

def int_hexagon_V6_vaddcarrysat :
Hexagon_v16i32_v16i32v16i32v64i1_Intrinsic<"HEXAGON_V6_vaddcarrysat">;

def int_hexagon_V6_vaddcarrysat_128B :
Hexagon_v32i32_v32i32v32i32v128i1_Intrinsic<"HEXAGON_V6_vaddcarrysat_128B">;

def int_hexagon_V6_vasr_into :
Hexagon_v32i32_v32i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vasr_into">;

def int_hexagon_V6_vasr_into_128B :
Hexagon_v64i32_v64i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vasr_into_128B">;

def int_hexagon_V6_vrotr :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vrotr">;

def int_hexagon_V6_vrotr_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vrotr_128B">;

def int_hexagon_V6_vsatdw :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsatdw">;

def int_hexagon_V6_vsatdw_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsatdw_128B">;

def int_hexagon_V6_vsubcarryo :
Hexagon_custom_v16i32v64i1_v16i32v16i32_Intrinsic;

def int_hexagon_V6_vsubcarryo_128B :
Hexagon_custom_v32i32v128i1_v32i32v32i32_Intrinsic_128B;

// V68 HVX Instructions.

def int_hexagon_V6_v6mpyhubs10 :
Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_v6mpyhubs10", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_V6_v6mpyhubs10_128B :
Hexagon_v64i32_v64i32v64i32i32_Intrinsic<"HEXAGON_V6_v6mpyhubs10_128B", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_V6_v6mpyhubs10_vxx :
Hexagon_v32i32_v32i32v32i32v32i32i32_Intrinsic<"HEXAGON_V6_v6mpyhubs10_vxx", [IntrNoMem, ImmArg<ArgIndex<3>>]>;

def int_hexagon_V6_v6mpyhubs10_vxx_128B :
Hexagon_v64i32_v64i32v64i32v64i32i32_Intrinsic<"HEXAGON_V6_v6mpyhubs10_vxx_128B", [IntrNoMem, ImmArg<ArgIndex<3>>]>;

def int_hexagon_V6_v6mpyvubs10 :
Hexagon_v32i32_v32i32v32i32i32_Intrinsic<"HEXAGON_V6_v6mpyvubs10", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_V6_v6mpyvubs10_128B :
Hexagon_v64i32_v64i32v64i32i32_Intrinsic<"HEXAGON_V6_v6mpyvubs10_128B", [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_hexagon_V6_v6mpyvubs10_vxx :
Hexagon_v32i32_v32i32v32i32v32i32i32_Intrinsic<"HEXAGON_V6_v6mpyvubs10_vxx", [IntrNoMem, ImmArg<ArgIndex<3>>]>;

def int_hexagon_V6_v6mpyvubs10_vxx_128B :
Hexagon_v64i32_v64i32v64i32v64i32i32_Intrinsic<"HEXAGON_V6_v6mpyvubs10_vxx_128B", [IntrNoMem, ImmArg<ArgIndex<3>>]>;

def int_hexagon_V6_vabs_hf :
Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vabs_hf">;

def int_hexagon_V6_vabs_hf_128B :
Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vabs_hf_128B">;

def int_hexagon_V6_vabs_sf :
Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vabs_sf">;

def int_hexagon_V6_vabs_sf_128B :
Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vabs_sf_128B">;

def int_hexagon_V6_vadd_hf :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vadd_hf">;

def int_hexagon_V6_vadd_hf_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vadd_hf_128B">;

def int_hexagon_V6_vadd_hf_hf :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vadd_hf_hf">;

def int_hexagon_V6_vadd_hf_hf_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vadd_hf_hf_128B">;

def int_hexagon_V6_vadd_qf16 :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vadd_qf16">;

def int_hexagon_V6_vadd_qf16_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vadd_qf16_128B">;

def int_hexagon_V6_vadd_qf16_mix :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vadd_qf16_mix">;

def int_hexagon_V6_vadd_qf16_mix_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vadd_qf16_mix_128B">;

def int_hexagon_V6_vadd_qf32 :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vadd_qf32">;

def int_hexagon_V6_vadd_qf32_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vadd_qf32_128B">;

def int_hexagon_V6_vadd_qf32_mix :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vadd_qf32_mix">;

def int_hexagon_V6_vadd_qf32_mix_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vadd_qf32_mix_128B">;

def int_hexagon_V6_vadd_sf :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vadd_sf">;

def int_hexagon_V6_vadd_sf_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vadd_sf_128B">;

def int_hexagon_V6_vadd_sf_hf :
Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vadd_sf_hf">;

def int_hexagon_V6_vadd_sf_hf_128B :
Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vadd_sf_hf_128B">;

def int_hexagon_V6_vadd_sf_sf :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vadd_sf_sf">;

def int_hexagon_V6_vadd_sf_sf_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vadd_sf_sf_128B">;

def int_hexagon_V6_vassign_fp :
Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vassign_fp">;

def int_hexagon_V6_vassign_fp_128B :
Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vassign_fp_128B">;

def int_hexagon_V6_vconv_hf_qf16 :
Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vconv_hf_qf16">;

def int_hexagon_V6_vconv_hf_qf16_128B :
Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vconv_hf_qf16_128B">;

def int_hexagon_V6_vconv_hf_qf32 :
Hexagon_v16i32_v32i32_Intrinsic<"HEXAGON_V6_vconv_hf_qf32">;

def int_hexagon_V6_vconv_hf_qf32_128B :
Hexagon_v32i32_v64i32_Intrinsic<"HEXAGON_V6_vconv_hf_qf32_128B">;

def int_hexagon_V6_vconv_sf_qf32 :
Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vconv_sf_qf32">;

def int_hexagon_V6_vconv_sf_qf32_128B :
Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vconv_sf_qf32_128B">;

def int_hexagon_V6_vcvt_b_hf :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vcvt_b_hf">;

def int_hexagon_V6_vcvt_b_hf_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vcvt_b_hf_128B">;

def int_hexagon_V6_vcvt_h_hf :
Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vcvt_h_hf">;

def int_hexagon_V6_vcvt_h_hf_128B :
Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vcvt_h_hf_128B">;

def int_hexagon_V6_vcvt_hf_b :
Hexagon_v32i32_v16i32_Intrinsic<"HEXAGON_V6_vcvt_hf_b">;

def int_hexagon_V6_vcvt_hf_b_128B :
Hexagon_v64i32_v32i32_Intrinsic<"HEXAGON_V6_vcvt_hf_b_128B">;

def int_hexagon_V6_vcvt_hf_h :
Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vcvt_hf_h">;

def int_hexagon_V6_vcvt_hf_h_128B :
Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vcvt_hf_h_128B">;

def int_hexagon_V6_vcvt_hf_sf :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vcvt_hf_sf">;

def int_hexagon_V6_vcvt_hf_sf_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vcvt_hf_sf_128B">;

def int_hexagon_V6_vcvt_hf_ub :
Hexagon_v32i32_v16i32_Intrinsic<"HEXAGON_V6_vcvt_hf_ub">;

def int_hexagon_V6_vcvt_hf_ub_128B :
Hexagon_v64i32_v32i32_Intrinsic<"HEXAGON_V6_vcvt_hf_ub_128B">;

def int_hexagon_V6_vcvt_hf_uh :
Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vcvt_hf_uh">;

def int_hexagon_V6_vcvt_hf_uh_128B :
Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vcvt_hf_uh_128B">;

def int_hexagon_V6_vcvt_sf_hf :
Hexagon_v32i32_v16i32_Intrinsic<"HEXAGON_V6_vcvt_sf_hf">;

def int_hexagon_V6_vcvt_sf_hf_128B :
Hexagon_v64i32_v32i32_Intrinsic<"HEXAGON_V6_vcvt_sf_hf_128B">;

def int_hexagon_V6_vcvt_ub_hf :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vcvt_ub_hf">;

def int_hexagon_V6_vcvt_ub_hf_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vcvt_ub_hf_128B">;

def int_hexagon_V6_vcvt_uh_hf :
Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vcvt_uh_hf">;

def int_hexagon_V6_vcvt_uh_hf_128B :
Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vcvt_uh_hf_128B">;

def int_hexagon_V6_vdmpy_sf_hf :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vdmpy_sf_hf">;

def int_hexagon_V6_vdmpy_sf_hf_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vdmpy_sf_hf_128B">;

def int_hexagon_V6_vdmpy_sf_hf_acc :
Hexagon_v16i32_v16i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vdmpy_sf_hf_acc">;

def int_hexagon_V6_vdmpy_sf_hf_acc_128B :
Hexagon_v32i32_v32i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vdmpy_sf_hf_acc_128B">;

def int_hexagon_V6_vfmax_hf :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vfmax_hf">;

def int_hexagon_V6_vfmax_hf_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vfmax_hf_128B">;

def int_hexagon_V6_vfmax_sf :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vfmax_sf">;

def int_hexagon_V6_vfmax_sf_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vfmax_sf_128B">;

def int_hexagon_V6_vfmin_hf :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vfmin_hf">;

def int_hexagon_V6_vfmin_hf_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vfmin_hf_128B">;

def int_hexagon_V6_vfmin_sf :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vfmin_sf">;

def int_hexagon_V6_vfmin_sf_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vfmin_sf_128B">;

def int_hexagon_V6_vfneg_hf :
Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vfneg_hf">;

def int_hexagon_V6_vfneg_hf_128B :
Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vfneg_hf_128B">;

def int_hexagon_V6_vfneg_sf :
Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vfneg_sf">;

def int_hexagon_V6_vfneg_sf_128B :
Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vfneg_sf_128B">;

def int_hexagon_V6_vgthf :
Hexagon_v64i1_v16i32v16i32_Intrinsic<"HEXAGON_V6_vgthf">;

def int_hexagon_V6_vgthf_128B :
Hexagon_v128i1_v32i32v32i32_Intrinsic<"HEXAGON_V6_vgthf_128B">;

def int_hexagon_V6_vgthf_and :
Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgthf_and">;

def int_hexagon_V6_vgthf_and_128B :
Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgthf_and_128B">;

def int_hexagon_V6_vgthf_or :
Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgthf_or">;

def int_hexagon_V6_vgthf_or_128B :
Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgthf_or_128B">;

def int_hexagon_V6_vgthf_xor :
Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgthf_xor">;

def int_hexagon_V6_vgthf_xor_128B :
Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgthf_xor_128B">;

def int_hexagon_V6_vgtsf :
Hexagon_v64i1_v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtsf">;

def int_hexagon_V6_vgtsf_128B :
Hexagon_v128i1_v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtsf_128B">;

def int_hexagon_V6_vgtsf_and :
Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtsf_and">;

def int_hexagon_V6_vgtsf_and_128B :
Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtsf_and_128B">;

def int_hexagon_V6_vgtsf_or :
Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtsf_or">;

def int_hexagon_V6_vgtsf_or_128B :
Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtsf_or_128B">;

def int_hexagon_V6_vgtsf_xor :
Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtsf_xor">;

def int_hexagon_V6_vgtsf_xor_128B :
Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtsf_xor_128B">;

def int_hexagon_V6_vmax_hf :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmax_hf">;

def int_hexagon_V6_vmax_hf_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmax_hf_128B">;

def int_hexagon_V6_vmax_sf :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmax_sf">;

def int_hexagon_V6_vmax_sf_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmax_sf_128B">;

def int_hexagon_V6_vmin_hf :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmin_hf">;

def int_hexagon_V6_vmin_hf_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmin_hf_128B">;

def int_hexagon_V6_vmin_sf :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmin_sf">;

def int_hexagon_V6_vmin_sf_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmin_sf_128B">;

def int_hexagon_V6_vmpy_hf_hf :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpy_hf_hf">;

def int_hexagon_V6_vmpy_hf_hf_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpy_hf_hf_128B">;

def int_hexagon_V6_vmpy_hf_hf_acc :
Hexagon_v16i32_v16i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpy_hf_hf_acc">;

def int_hexagon_V6_vmpy_hf_hf_acc_128B :
Hexagon_v32i32_v32i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpy_hf_hf_acc_128B">;

def int_hexagon_V6_vmpy_qf16 :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpy_qf16">;

def int_hexagon_V6_vmpy_qf16_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpy_qf16_128B">;

def int_hexagon_V6_vmpy_qf16_hf :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpy_qf16_hf">;

def int_hexagon_V6_vmpy_qf16_hf_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpy_qf16_hf_128B">;

def int_hexagon_V6_vmpy_qf16_mix_hf :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpy_qf16_mix_hf">;

def int_hexagon_V6_vmpy_qf16_mix_hf_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpy_qf16_mix_hf_128B">;

def int_hexagon_V6_vmpy_qf32 :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpy_qf32">;

def int_hexagon_V6_vmpy_qf32_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpy_qf32_128B">;

def int_hexagon_V6_vmpy_qf32_hf :
Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpy_qf32_hf">;

def int_hexagon_V6_vmpy_qf32_hf_128B :
Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpy_qf32_hf_128B">;

def int_hexagon_V6_vmpy_qf32_mix_hf :
Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpy_qf32_mix_hf">;

def int_hexagon_V6_vmpy_qf32_mix_hf_128B :
Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpy_qf32_mix_hf_128B">;

def int_hexagon_V6_vmpy_qf32_qf16 :
Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpy_qf32_qf16">;

def int_hexagon_V6_vmpy_qf32_qf16_128B :
Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpy_qf32_qf16_128B">;

def int_hexagon_V6_vmpy_qf32_sf :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpy_qf32_sf">;

def int_hexagon_V6_vmpy_qf32_sf_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpy_qf32_sf_128B">;

def int_hexagon_V6_vmpy_sf_hf :
Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpy_sf_hf">;

def int_hexagon_V6_vmpy_sf_hf_128B :
Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpy_sf_hf_128B">;

def int_hexagon_V6_vmpy_sf_hf_acc :
Hexagon_v32i32_v32i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpy_sf_hf_acc">;

def int_hexagon_V6_vmpy_sf_hf_acc_128B :
Hexagon_v64i32_v64i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpy_sf_hf_acc_128B">;

def int_hexagon_V6_vmpy_sf_sf :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpy_sf_sf">;

def int_hexagon_V6_vmpy_sf_sf_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpy_sf_sf_128B">;

def int_hexagon_V6_vsub_hf :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsub_hf">;

def int_hexagon_V6_vsub_hf_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsub_hf_128B">;

def int_hexagon_V6_vsub_hf_hf :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsub_hf_hf">;

def int_hexagon_V6_vsub_hf_hf_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsub_hf_hf_128B">;

def int_hexagon_V6_vsub_qf16 :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsub_qf16">;

def int_hexagon_V6_vsub_qf16_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsub_qf16_128B">;

def int_hexagon_V6_vsub_qf16_mix :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsub_qf16_mix">;

def int_hexagon_V6_vsub_qf16_mix_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsub_qf16_mix_128B">;

def int_hexagon_V6_vsub_qf32 :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsub_qf32">;

def int_hexagon_V6_vsub_qf32_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsub_qf32_128B">;

def int_hexagon_V6_vsub_qf32_mix :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsub_qf32_mix">;

def int_hexagon_V6_vsub_qf32_mix_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsub_qf32_mix_128B">;

def int_hexagon_V6_vsub_sf :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsub_sf">;

def int_hexagon_V6_vsub_sf_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsub_sf_128B">;

def int_hexagon_V6_vsub_sf_hf :
Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsub_sf_hf">;

def int_hexagon_V6_vsub_sf_hf_128B :
Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsub_sf_hf_128B">;

def int_hexagon_V6_vsub_sf_sf :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsub_sf_sf">;

def int_hexagon_V6_vsub_sf_sf_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsub_sf_sf_128B">;

// V69 HVX Instructions.

def int_hexagon_V6_vasrvuhubrndsat :
Hexagon_v16i32_v32i32v16i32_Intrinsic<"HEXAGON_V6_vasrvuhubrndsat">;

def int_hexagon_V6_vasrvuhubrndsat_128B :
Hexagon_v32i32_v64i32v32i32_Intrinsic<"HEXAGON_V6_vasrvuhubrndsat_128B">;

def int_hexagon_V6_vasrvuhubsat :
Hexagon_v16i32_v32i32v16i32_Intrinsic<"HEXAGON_V6_vasrvuhubsat">;

def int_hexagon_V6_vasrvuhubsat_128B :
Hexagon_v32i32_v64i32v32i32_Intrinsic<"HEXAGON_V6_vasrvuhubsat_128B">;

def int_hexagon_V6_vasrvwuhrndsat :
Hexagon_v16i32_v32i32v16i32_Intrinsic<"HEXAGON_V6_vasrvwuhrndsat">;

def int_hexagon_V6_vasrvwuhrndsat_128B :
Hexagon_v32i32_v64i32v32i32_Intrinsic<"HEXAGON_V6_vasrvwuhrndsat_128B">;

def int_hexagon_V6_vasrvwuhsat :
Hexagon_v16i32_v32i32v16i32_Intrinsic<"HEXAGON_V6_vasrvwuhsat">;

def int_hexagon_V6_vasrvwuhsat_128B :
Hexagon_v32i32_v64i32v32i32_Intrinsic<"HEXAGON_V6_vasrvwuhsat_128B">;

def int_hexagon_V6_vmpyuhvs :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpyuhvs">;

def int_hexagon_V6_vmpyuhvs_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpyuhvs_128B">;

// V73 HVX Instructions.

def int_hexagon_V6_vadd_sf_bf :
Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vadd_sf_bf">;

def int_hexagon_V6_vadd_sf_bf_128B :
Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vadd_sf_bf_128B">;

def int_hexagon_V6_vconv_h_hf :
Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vconv_h_hf">;

def int_hexagon_V6_vconv_h_hf_128B :
Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vconv_h_hf_128B">;

def int_hexagon_V6_vconv_hf_h :
Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vconv_hf_h">;

def int_hexagon_V6_vconv_hf_h_128B :
Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vconv_hf_h_128B">;

def int_hexagon_V6_vconv_sf_w :
Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vconv_sf_w">;

def int_hexagon_V6_vconv_sf_w_128B :
Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vconv_sf_w_128B">;

def int_hexagon_V6_vconv_w_sf :
Hexagon_v16i32_v16i32_Intrinsic<"HEXAGON_V6_vconv_w_sf">;

def int_hexagon_V6_vconv_w_sf_128B :
Hexagon_v32i32_v32i32_Intrinsic<"HEXAGON_V6_vconv_w_sf_128B">;

def int_hexagon_V6_vcvt_bf_sf :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vcvt_bf_sf">;

def int_hexagon_V6_vcvt_bf_sf_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vcvt_bf_sf_128B">;

def int_hexagon_V6_vgtbf :
Hexagon_v64i1_v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtbf">;

def int_hexagon_V6_vgtbf_128B :
Hexagon_v128i1_v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtbf_128B">;

def int_hexagon_V6_vgtbf_and :
Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtbf_and">;

def int_hexagon_V6_vgtbf_and_128B :
Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtbf_and_128B">;

def int_hexagon_V6_vgtbf_or :
Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtbf_or">;

def int_hexagon_V6_vgtbf_or_128B :
Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtbf_or_128B">;

def int_hexagon_V6_vgtbf_xor :
Hexagon_v64i1_v64i1v16i32v16i32_Intrinsic<"HEXAGON_V6_vgtbf_xor">;

def int_hexagon_V6_vgtbf_xor_128B :
Hexagon_v128i1_v128i1v32i32v32i32_Intrinsic<"HEXAGON_V6_vgtbf_xor_128B">;

def int_hexagon_V6_vmax_bf :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmax_bf">;

def int_hexagon_V6_vmax_bf_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmax_bf_128B">;

def int_hexagon_V6_vmin_bf :
Hexagon_v16i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmin_bf">;

def int_hexagon_V6_vmin_bf_128B :
Hexagon_v32i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmin_bf_128B">;

def int_hexagon_V6_vmpy_sf_bf :
Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpy_sf_bf">;

def int_hexagon_V6_vmpy_sf_bf_128B :
Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpy_sf_bf_128B">;

def int_hexagon_V6_vmpy_sf_bf_acc :
Hexagon_v32i32_v32i32v16i32v16i32_Intrinsic<"HEXAGON_V6_vmpy_sf_bf_acc">;

def int_hexagon_V6_vmpy_sf_bf_acc_128B :
Hexagon_v64i32_v64i32v32i32v32i32_Intrinsic<"HEXAGON_V6_vmpy_sf_bf_acc_128B">;

def int_hexagon_V6_vsub_sf_bf :
Hexagon_v32i32_v16i32v16i32_Intrinsic<"HEXAGON_V6_vsub_sf_bf">;

def int_hexagon_V6_vsub_sf_bf_128B :
Hexagon_v64i32_v32i32v32i32_Intrinsic<"HEXAGON_V6_vsub_sf_bf_128B">;

PKjwFZ5��IR/IntrinsicsRISCVXTHead.tdnu�[���//===- IntrinsicsRISCVXTHead.td - T-Head intrinsics --------*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines all of the T-Head vendor intrinsics for RISC-V.
//
//===----------------------------------------------------------------------===//

let TargetPrefix = "riscv" in {

  class TH_VdotTernaryWideMasked
        : DefaultAttrsIntrinsic< [llvm_anyvector_ty],
                     [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty,
                      LLVMScalarOrSameVectorWidth<2, llvm_i1_ty>,
                      llvm_anyint_ty, LLVMMatchType<3>],
                     [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
    let ScalarOperand = 1;
    let VLOperand = 4;
  }

  multiclass TH_VdotTernaryWide {
    def "int_riscv_" # NAME : RISCVTernaryWideUnMasked;
    def "int_riscv_" # NAME # "_mask" : TH_VdotTernaryWideMasked;
  }

  defm th_vmaqa    : TH_VdotTernaryWide;
  defm th_vmaqau   : TH_VdotTernaryWide;
  defm th_vmaqasu  : TH_VdotTernaryWide;
  defm th_vmaqaus  : TH_VdotTernaryWide;
}
PKjwFZ^1�RIR/IntrinsicsVE.tdnu�[���// Define intrinsics written by hand

// VEL Intrinsic instructions.
let TargetPrefix = "ve" in {
  def int_ve_vl_pack_f32p : ClangBuiltin<"__builtin_ve_vl_pack_f32p">,
                            Intrinsic<[llvm_i64_ty], [llvm_ptr_ty, llvm_ptr_ty],
                                      [IntrReadMem]>;
  def int_ve_vl_pack_f32a : ClangBuiltin<"__builtin_ve_vl_pack_f32a">,
                            Intrinsic<[llvm_i64_ty], [llvm_ptr_ty],
                                      [IntrReadMem]>;

  def int_ve_vl_extract_vm512u :
      ClangBuiltin<"__builtin_ve_vl_extract_vm512u">,
      Intrinsic<[LLVMType<v256i1>], [LLVMType<v512i1>], [IntrNoMem]>;

  def int_ve_vl_extract_vm512l :
      ClangBuiltin<"__builtin_ve_vl_extract_vm512l">,
      Intrinsic<[LLVMType<v256i1>], [LLVMType<v512i1>], [IntrNoMem]>;

  def int_ve_vl_insert_vm512u :
      ClangBuiltin<"__builtin_ve_vl_insert_vm512u">,
      Intrinsic<[LLVMType<v512i1>], [LLVMType<v512i1>, LLVMType<v256i1>],
                [IntrNoMem]>;

  def int_ve_vl_insert_vm512l :
      ClangBuiltin<"__builtin_ve_vl_insert_vm512l">,
      Intrinsic<[LLVMType<v512i1>], [LLVMType<v512i1>, LLVMType<v256i1>],
                [IntrNoMem]>;
}

// Define intrinsics automatically generated
include "llvm/IR/IntrinsicsVEVL.gen.td"
PKjwFZ�p��""IR/Instruction.defnu�[���//===-- llvm/Instruction.def - File that describes Instructions -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains descriptions of the various LLVM instructions.  This is
// used as a central place for enumerating the different instructions and
// should eventually be the place to put comments about the instructions.
//
//===----------------------------------------------------------------------===//

// NOTE: NO INCLUDE GUARD DESIRED!

// Provide definitions of macros so that users of this file do not have to
// define everything to use it...
//
#ifndef FIRST_TERM_INST
#define FIRST_TERM_INST(num)
#endif
#ifndef HANDLE_TERM_INST
#ifndef HANDLE_INST
#define HANDLE_TERM_INST(num, opcode, Class)
#else
#define HANDLE_TERM_INST(num, opcode, Class) HANDLE_INST(num, opcode, Class)
#endif
#endif
#ifndef LAST_TERM_INST
#define LAST_TERM_INST(num)
#endif

#ifndef FIRST_UNARY_INST
#define FIRST_UNARY_INST(num)
#endif
#ifndef HANDLE_UNARY_INST
#ifndef HANDLE_INST
#define HANDLE_UNARY_INST(num, opcode, instclass)
#else
#define HANDLE_UNARY_INST(num, opcode, Class) HANDLE_INST(num, opcode, Class)
#endif
#endif
#ifndef LAST_UNARY_INST
#define LAST_UNARY_INST(num)
#endif

#ifndef FIRST_BINARY_INST
#define FIRST_BINARY_INST(num)
#endif
#ifndef HANDLE_BINARY_INST
#ifndef HANDLE_INST
#define HANDLE_BINARY_INST(num, opcode, instclass)
#else
#define HANDLE_BINARY_INST(num, opcode, Class) HANDLE_INST(num, opcode, Class)
#endif
#endif
#ifndef LAST_BINARY_INST
#define LAST_BINARY_INST(num)
#endif

#ifndef FIRST_MEMORY_INST
#define FIRST_MEMORY_INST(num)
#endif
#ifndef HANDLE_MEMORY_INST
#ifndef HANDLE_INST
#define HANDLE_MEMORY_INST(num, opcode, Class)
#else
#define HANDLE_MEMORY_INST(num, opcode, Class) HANDLE_INST(num, opcode, Class)
#endif
#endif
#ifndef LAST_MEMORY_INST
#define LAST_MEMORY_INST(num)
#endif

#ifndef FIRST_CAST_INST
#define FIRST_CAST_INST(num)
#endif
#ifndef HANDLE_CAST_INST
#ifndef HANDLE_INST
#define HANDLE_CAST_INST(num, opcode, Class)
#else
#define HANDLE_CAST_INST(num, opcode, Class) HANDLE_INST(num, opcode, Class)
#endif
#endif
#ifndef LAST_CAST_INST
#define LAST_CAST_INST(num)
#endif

#ifndef FIRST_FUNCLETPAD_INST
#define FIRST_FUNCLETPAD_INST(num)
#endif
#ifndef HANDLE_FUNCLETPAD_INST
#ifndef HANDLE_INST
#define HANDLE_FUNCLETPAD_INST(num, opcode, Class)
#else
#define HANDLE_FUNCLETPAD_INST(num, opcode, Class) HANDLE_INST(num, opcode, Class)
#endif
#endif
#ifndef LAST_FUNCLETPAD_INST
#define LAST_FUNCLETPAD_INST(num)
#endif

#ifndef FIRST_OTHER_INST
#define FIRST_OTHER_INST(num)
#endif
#ifndef HANDLE_OTHER_INST
#ifndef HANDLE_INST
#define HANDLE_OTHER_INST(num, opcode, Class)
#else
#define HANDLE_OTHER_INST(num, opcode, Class) HANDLE_INST(num, opcode, Class)
#endif
#endif
#ifndef LAST_OTHER_INST
#define LAST_OTHER_INST(num)
#endif

#ifndef HANDLE_USER_INST
#define HANDLE_USER_INST(num, opc, Class) HANDLE_OTHER_INST(num, opc, Class)
#endif

// Terminator Instructions - These instructions are used to terminate a basic
// block of the program.   Every basic block must end with one of these
// instructions for it to be a well formed basic block.
//
 FIRST_TERM_INST  ( 1)
HANDLE_TERM_INST  ( 1, Ret           , ReturnInst)
HANDLE_TERM_INST  ( 2, Br            , BranchInst)
HANDLE_TERM_INST  ( 3, Switch        , SwitchInst)
HANDLE_TERM_INST  ( 4, IndirectBr    , IndirectBrInst)
HANDLE_TERM_INST  ( 5, Invoke        , InvokeInst)
HANDLE_TERM_INST  ( 6, Resume        , ResumeInst)
HANDLE_TERM_INST  ( 7, Unreachable   , UnreachableInst)
HANDLE_TERM_INST  ( 8, CleanupRet    , CleanupReturnInst)
HANDLE_TERM_INST  ( 9, CatchRet      , CatchReturnInst)
HANDLE_TERM_INST  (10, CatchSwitch   , CatchSwitchInst)
HANDLE_TERM_INST  (11, CallBr        , CallBrInst) // A call-site terminator
  LAST_TERM_INST  (11)

// Standard unary operators...
 FIRST_UNARY_INST(12)
HANDLE_UNARY_INST(12, FNeg  , UnaryOperator)
  LAST_UNARY_INST(12)

// Standard binary operators...
 FIRST_BINARY_INST(13)
HANDLE_BINARY_INST(13, Add  , BinaryOperator)
HANDLE_BINARY_INST(14, FAdd , BinaryOperator)
HANDLE_BINARY_INST(15, Sub  , BinaryOperator)
HANDLE_BINARY_INST(16, FSub , BinaryOperator)
HANDLE_BINARY_INST(17, Mul  , BinaryOperator)
HANDLE_BINARY_INST(18, FMul , BinaryOperator)
HANDLE_BINARY_INST(19, UDiv , BinaryOperator)
HANDLE_BINARY_INST(20, SDiv , BinaryOperator)
HANDLE_BINARY_INST(21, FDiv , BinaryOperator)
HANDLE_BINARY_INST(22, URem , BinaryOperator)
HANDLE_BINARY_INST(23, SRem , BinaryOperator)
HANDLE_BINARY_INST(24, FRem , BinaryOperator)

// Logical operators (integer operands)
HANDLE_BINARY_INST(25, Shl  , BinaryOperator) // Shift left  (logical)
HANDLE_BINARY_INST(26, LShr , BinaryOperator) // Shift right (logical)
HANDLE_BINARY_INST(27, AShr , BinaryOperator) // Shift right (arithmetic)
HANDLE_BINARY_INST(28, And  , BinaryOperator)
HANDLE_BINARY_INST(29, Or   , BinaryOperator)
HANDLE_BINARY_INST(30, Xor  , BinaryOperator)
  LAST_BINARY_INST(30)

// Memory operators...
 FIRST_MEMORY_INST(31)
HANDLE_MEMORY_INST(31, Alloca, AllocaInst)  // Stack management
HANDLE_MEMORY_INST(32, Load  , LoadInst  )  // Memory manipulation instrs
HANDLE_MEMORY_INST(33, Store , StoreInst )
HANDLE_MEMORY_INST(34, GetElementPtr, GetElementPtrInst)
HANDLE_MEMORY_INST(35, Fence , FenceInst )
HANDLE_MEMORY_INST(36, AtomicCmpXchg , AtomicCmpXchgInst )
HANDLE_MEMORY_INST(37, AtomicRMW , AtomicRMWInst )
  LAST_MEMORY_INST(37)

// Cast operators ...
// NOTE: The order matters here because CastInst::isEliminableCastPair
// NOTE: (see Instructions.cpp) encodes a table based on this ordering.
 FIRST_CAST_INST(38)
HANDLE_CAST_INST(38, Trunc   , TruncInst   )  // Truncate integers
HANDLE_CAST_INST(39, ZExt    , ZExtInst    )  // Zero extend integers
HANDLE_CAST_INST(40, SExt    , SExtInst    )  // Sign extend integers
HANDLE_CAST_INST(41, FPToUI  , FPToUIInst  )  // floating point -> UInt
HANDLE_CAST_INST(42, FPToSI  , FPToSIInst  )  // floating point -> SInt
HANDLE_CAST_INST(43, UIToFP  , UIToFPInst  )  // UInt -> floating point
HANDLE_CAST_INST(44, SIToFP  , SIToFPInst  )  // SInt -> floating point
HANDLE_CAST_INST(45, FPTrunc , FPTruncInst )  // Truncate floating point
HANDLE_CAST_INST(46, FPExt   , FPExtInst   )  // Extend floating point
HANDLE_CAST_INST(47, PtrToInt, PtrToIntInst)  // Pointer -> Integer
HANDLE_CAST_INST(48, IntToPtr, IntToPtrInst)  // Integer -> Pointer
HANDLE_CAST_INST(49, BitCast , BitCastInst )  // Type cast
HANDLE_CAST_INST(50, AddrSpaceCast, AddrSpaceCastInst)  // addrspace cast
  LAST_CAST_INST(50)

 FIRST_FUNCLETPAD_INST(51)
HANDLE_FUNCLETPAD_INST(51, CleanupPad, CleanupPadInst)
HANDLE_FUNCLETPAD_INST(52, CatchPad  , CatchPadInst)
  LAST_FUNCLETPAD_INST(52)

// Other operators...
 FIRST_OTHER_INST(53)
HANDLE_OTHER_INST(53, ICmp   , ICmpInst   )  // Integer comparison instruction
HANDLE_OTHER_INST(54, FCmp   , FCmpInst   )  // Floating point comparison instr.
HANDLE_OTHER_INST(55, PHI    , PHINode    )  // PHI node instruction
HANDLE_OTHER_INST(56, Call   , CallInst   )  // Call a function
HANDLE_OTHER_INST(57, Select , SelectInst )  // select instruction
HANDLE_USER_INST (58, UserOp1, Instruction)  // May be used internally in a pass
HANDLE_USER_INST (59, UserOp2, Instruction)  // Internal to passes only
HANDLE_OTHER_INST(60, VAArg  , VAArgInst  )  // vaarg instruction
HANDLE_OTHER_INST(61, ExtractElement, ExtractElementInst)// extract from vector
HANDLE_OTHER_INST(62, InsertElement, InsertElementInst)  // insert into vector
HANDLE_OTHER_INST(63, ShuffleVector, ShuffleVectorInst)  // shuffle two vectors.
HANDLE_OTHER_INST(64, ExtractValue, ExtractValueInst)// extract from aggregate
HANDLE_OTHER_INST(65, InsertValue, InsertValueInst)  // insert into aggregate
HANDLE_OTHER_INST(66, LandingPad, LandingPadInst)  // Landing pad instruction.
HANDLE_OTHER_INST(67, Freeze, FreezeInst) // Freeze instruction.
  LAST_OTHER_INST(67)

#undef  FIRST_TERM_INST
#undef HANDLE_TERM_INST
#undef   LAST_TERM_INST

#undef  FIRST_UNARY_INST
#undef HANDLE_UNARY_INST
#undef   LAST_UNARY_INST

#undef  FIRST_BINARY_INST
#undef HANDLE_BINARY_INST
#undef   LAST_BINARY_INST

#undef  FIRST_MEMORY_INST
#undef HANDLE_MEMORY_INST
#undef   LAST_MEMORY_INST

#undef  FIRST_CAST_INST
#undef HANDLE_CAST_INST
#undef   LAST_CAST_INST

#undef  FIRST_FUNCLETPAD_INST
#undef HANDLE_FUNCLETPAD_INST
#undef   LAST_FUNCLETPAD_INST

#undef  FIRST_OTHER_INST
#undef HANDLE_OTHER_INST
#undef   LAST_OTHER_INST

#undef HANDLE_USER_INST

#ifdef HANDLE_INST
#undef HANDLE_INST
#endif
PKjwFZ��][��IR/FMF.hnu�[���//===-- llvm/FMF.h - Fast math flags subclass -------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the fast math flags.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_FMF_H
#define LLVM_IR_FMF_H

namespace llvm {
class raw_ostream;

/// Convenience struct for specifying and reasoning about fast-math flags.
class FastMathFlags {
private:
  friend class FPMathOperator;

  unsigned Flags = 0;

  FastMathFlags(unsigned F) {
    // If all 7 bits are set, turn this into -1. If the number of bits grows,
    // this must be updated. This is intended to provide some forward binary
    // compatibility insurance for the meaning of 'fast' in case bits are added.
    if (F == 0x7F) Flags = ~0U;
    else Flags = F;
  }

public:
  // This is how the bits are used in Value::SubclassOptionalData so they
  // should fit there too.
  // WARNING: We're out of space. SubclassOptionalData only has 7 bits. New
  // functionality will require a change in how this information is stored.
  enum {
    AllowReassoc    = (1 << 0),
    NoNaNs          = (1 << 1),
    NoInfs          = (1 << 2),
    NoSignedZeros   = (1 << 3),
    AllowReciprocal = (1 << 4),
    AllowContract   = (1 << 5),
    ApproxFunc      = (1 << 6)
  };

  FastMathFlags() = default;

  static FastMathFlags getFast() {
    FastMathFlags FMF;
    FMF.setFast();
    return FMF;
  }

  bool any() const { return Flags != 0; }
  bool none() const { return Flags == 0; }
  bool all() const { return Flags == ~0U; }

  void clear() { Flags = 0; }
  void set()   { Flags = ~0U; }

  /// Flag queries
  bool allowReassoc() const    { return 0 != (Flags & AllowReassoc); }
  bool noNaNs() const          { return 0 != (Flags & NoNaNs); }
  bool noInfs() const          { return 0 != (Flags & NoInfs); }
  bool noSignedZeros() const   { return 0 != (Flags & NoSignedZeros); }
  bool allowReciprocal() const { return 0 != (Flags & AllowReciprocal); }
  bool allowContract() const   { return 0 != (Flags & AllowContract); }
  bool approxFunc() const      { return 0 != (Flags & ApproxFunc); }
  /// 'Fast' means all bits are set.
  bool isFast() const          { return all(); }

  /// Flag setters
  void setAllowReassoc(bool B = true) {
    Flags = (Flags & ~AllowReassoc) | B * AllowReassoc;
  }
  void setNoNaNs(bool B = true) {
    Flags = (Flags & ~NoNaNs) | B * NoNaNs;
  }
  void setNoInfs(bool B = true) {
    Flags = (Flags & ~NoInfs) | B * NoInfs;
  }
  void setNoSignedZeros(bool B = true) {
    Flags = (Flags & ~NoSignedZeros) | B * NoSignedZeros;
  }
  void setAllowReciprocal(bool B = true) {
    Flags = (Flags & ~AllowReciprocal) | B * AllowReciprocal;
  }
  void setAllowContract(bool B = true) {
    Flags = (Flags & ~AllowContract) | B * AllowContract;
  }
  void setApproxFunc(bool B = true) {
    Flags = (Flags & ~ApproxFunc) | B * ApproxFunc;
  }
  void setFast(bool B = true) { B ? set() : clear(); }

  void operator&=(const FastMathFlags &OtherFlags) {
    Flags &= OtherFlags.Flags;
  }
  void operator|=(const FastMathFlags &OtherFlags) {
    Flags |= OtherFlags.Flags;
  }
  bool operator!=(const FastMathFlags &OtherFlags) const {
    return Flags != OtherFlags.Flags;
  }

  /// Print fast-math flags to \p O.
  void print(raw_ostream &O) const;
};

inline raw_ostream &operator<<(raw_ostream &O, FastMathFlags FMF) {
  FMF.print(O);
  return O;
}

} // end namespace llvm

#endif // LLVM_IR_FMF_H
PKjwFZ%͹IR/IntrinsicsXCore.hnu�[���/*===- TableGen'erated file -------------------------------------*- C++ -*-===*\
|*                                                                            *|
|* Intrinsic Function Source Fragment                                         *|
|*                                                                            *|
|* Automatically generated file, do not edit!                                 *|
|*                                                                            *|
\*===----------------------------------------------------------------------===*/

#ifndef LLVM_IR_INTRINSIC_XCORE_ENUMS_H
#define LLVM_IR_INTRINSIC_XCORE_ENUMS_H

namespace llvm {
namespace Intrinsic {
enum XCOREIntrinsics : unsigned {
// Enum values for intrinsics
    xcore_bitrev = 11873,                              // llvm.xcore.bitrev
    xcore_checkevent,                          // llvm.xcore.checkevent
    xcore_chkct,                               // llvm.xcore.chkct
    xcore_clre,                                // llvm.xcore.clre
    xcore_clrpt,                               // llvm.xcore.clrpt
    xcore_clrsr,                               // llvm.xcore.clrsr
    xcore_crc32,                               // llvm.xcore.crc32
    xcore_crc8,                                // llvm.xcore.crc8
    xcore_edu,                                 // llvm.xcore.edu
    xcore_eeu,                                 // llvm.xcore.eeu
    xcore_endin,                               // llvm.xcore.endin
    xcore_freer,                               // llvm.xcore.freer
    xcore_geted,                               // llvm.xcore.geted
    xcore_getet,                               // llvm.xcore.getet
    xcore_getid,                               // llvm.xcore.getid
    xcore_getps,                               // llvm.xcore.getps
    xcore_getr,                                // llvm.xcore.getr
    xcore_getst,                               // llvm.xcore.getst
    xcore_getts,                               // llvm.xcore.getts
    xcore_in,                                  // llvm.xcore.in
    xcore_inct,                                // llvm.xcore.inct
    xcore_initcp,                              // llvm.xcore.initcp
    xcore_initdp,                              // llvm.xcore.initdp
    xcore_initlr,                              // llvm.xcore.initlr
    xcore_initpc,                              // llvm.xcore.initpc
    xcore_initsp,                              // llvm.xcore.initsp
    xcore_inshr,                               // llvm.xcore.inshr
    xcore_int,                                 // llvm.xcore.int
    xcore_mjoin,                               // llvm.xcore.mjoin
    xcore_msync,                               // llvm.xcore.msync
    xcore_out,                                 // llvm.xcore.out
    xcore_outct,                               // llvm.xcore.outct
    xcore_outshr,                              // llvm.xcore.outshr
    xcore_outt,                                // llvm.xcore.outt
    xcore_peek,                                // llvm.xcore.peek
    xcore_setc,                                // llvm.xcore.setc
    xcore_setclk,                              // llvm.xcore.setclk
    xcore_setd,                                // llvm.xcore.setd
    xcore_setev,                               // llvm.xcore.setev
    xcore_setps,                               // llvm.xcore.setps
    xcore_setpsc,                              // llvm.xcore.setpsc
    xcore_setpt,                               // llvm.xcore.setpt
    xcore_setrdy,                              // llvm.xcore.setrdy
    xcore_setsr,                               // llvm.xcore.setsr
    xcore_settw,                               // llvm.xcore.settw
    xcore_setv,                                // llvm.xcore.setv
    xcore_sext,                                // llvm.xcore.sext
    xcore_ssync,                               // llvm.xcore.ssync
    xcore_syncr,                               // llvm.xcore.syncr
    xcore_testct,                              // llvm.xcore.testct
    xcore_testwct,                             // llvm.xcore.testwct
    xcore_waitevent,                           // llvm.xcore.waitevent
    xcore_zext,                                // llvm.xcore.zext
}; // enum
} // namespace Intrinsic
} // namespace llvm

#endif
PKjwFZ���BWWIR/IntrinsicsAArch64.tdnu�[���//===- IntrinsicsAARCH64.td - Defines AARCH64 intrinsics ---*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines all of the AARCH64-specific intrinsics.
//
//===----------------------------------------------------------------------===//

let TargetPrefix = "aarch64" in {

def int_aarch64_ldxr : Intrinsic<[llvm_i64_ty], [llvm_anyptr_ty],
                                 [IntrNoFree, IntrWillReturn]>;
def int_aarch64_ldaxr : Intrinsic<[llvm_i64_ty], [llvm_anyptr_ty],
                                  [IntrNoFree, IntrWillReturn]>;
def int_aarch64_stxr : Intrinsic<[llvm_i32_ty], [llvm_i64_ty, llvm_anyptr_ty],
                                 [IntrNoFree, IntrWillReturn]>;
def int_aarch64_stlxr : Intrinsic<[llvm_i32_ty], [llvm_i64_ty, llvm_anyptr_ty],
                                  [IntrNoFree, IntrWillReturn]>;

def int_aarch64_ldxp : Intrinsic<[llvm_i64_ty, llvm_i64_ty], [llvm_ptr_ty],
                                 [IntrNoFree, IntrWillReturn]>;
def int_aarch64_ldaxp : Intrinsic<[llvm_i64_ty, llvm_i64_ty], [llvm_ptr_ty],
                                  [IntrNoFree, IntrWillReturn]>;
def int_aarch64_stxp : Intrinsic<[llvm_i32_ty],
                               [llvm_i64_ty, llvm_i64_ty, llvm_ptr_ty],
                               [IntrNoFree, IntrWillReturn]>;
def int_aarch64_stlxp : Intrinsic<[llvm_i32_ty],
                                  [llvm_i64_ty, llvm_i64_ty, llvm_ptr_ty],
                                  [IntrNoFree, IntrWillReturn]>;

def int_aarch64_clrex : Intrinsic<[]>;

def int_aarch64_sdiv : DefaultAttrsIntrinsic<[llvm_anyint_ty], [LLVMMatchType<0>,
                                LLVMMatchType<0>], [IntrNoMem]>;
def int_aarch64_udiv : DefaultAttrsIntrinsic<[llvm_anyint_ty], [LLVMMatchType<0>,
                                LLVMMatchType<0>], [IntrNoMem]>;

def int_aarch64_fjcvtzs : DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_double_ty], [IntrNoMem]>;

def int_aarch64_cls: DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>;
def int_aarch64_cls64: DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem]>;

def int_aarch64_frint32z
    : DefaultAttrsIntrinsic<[ llvm_anyfloat_ty ], [ LLVMMatchType<0> ],
                            [ IntrNoMem ]>;
def int_aarch64_frint64z
    : DefaultAttrsIntrinsic<[ llvm_anyfloat_ty ], [ LLVMMatchType<0> ],
                            [ IntrNoMem ]>;
def int_aarch64_frint32x
    : DefaultAttrsIntrinsic<[ llvm_anyfloat_ty ], [ LLVMMatchType<0> ],
                            [ IntrNoMem ]>;
def int_aarch64_frint64x
    : DefaultAttrsIntrinsic<[ llvm_anyfloat_ty ], [ LLVMMatchType<0> ],
                            [ IntrNoMem ]>;

//===----------------------------------------------------------------------===//
// HINT

def int_aarch64_hint : DefaultAttrsIntrinsic<[], [llvm_i32_ty]>;

def int_aarch64_break : Intrinsic<[], [llvm_i32_ty],
    [IntrNoMem, IntrHasSideEffects, IntrNoReturn, IntrCold, ImmArg<ArgIndex<0>>]>;


def int_aarch64_prefetch : Intrinsic<[],
    [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
    [IntrInaccessibleMemOrArgMemOnly, IntrWillReturn, ReadOnly<ArgIndex<0>>,
     ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>
     ]>,
    ClangBuiltin<"__builtin_arm_prefetch">;

//===----------------------------------------------------------------------===//
// Data Barrier Instructions

def int_aarch64_dmb : ClangBuiltin<"__builtin_arm_dmb">, MSBuiltin<"__dmb">,
                      Intrinsic<[], [llvm_i32_ty], [IntrNoFree, IntrWillReturn]>;
def int_aarch64_dsb : ClangBuiltin<"__builtin_arm_dsb">, MSBuiltin<"__dsb">,
                      Intrinsic<[], [llvm_i32_ty], [IntrNoFree, IntrWillReturn]>;
def int_aarch64_isb : ClangBuiltin<"__builtin_arm_isb">, MSBuiltin<"__isb">,
                      Intrinsic<[], [llvm_i32_ty], [IntrNoFree, IntrWillReturn]>;

// A space-consuming intrinsic primarily for testing block and jump table
// placements. The first argument is the number of bytes this "instruction"
// takes up, the second and return value are essentially chains, used to force
// ordering during ISel.
def int_aarch64_space : DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_i32_ty, llvm_i64_ty], []>;

}

//===----------------------------------------------------------------------===//
// Advanced SIMD (NEON)

let TargetPrefix = "aarch64" in {  // All intrinsics start with "llvm.aarch64.".
  class AdvSIMD_2Scalar_Float_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
                [IntrNoMem]>;

  class AdvSIMD_FPToIntRounding_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty], [IntrNoMem]>;

  class AdvSIMD_1IntArg_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyint_ty], [LLVMMatchType<0>], [IntrNoMem]>;
  class AdvSIMD_1FloatArg_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]>;
  class AdvSIMD_1VectorArg_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>], [IntrNoMem]>;
  class AdvSIMD_1VectorArg_Expand_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>;
  class AdvSIMD_1VectorArg_Long_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty], [LLVMTruncatedType<0>], [IntrNoMem]>;
  class AdvSIMD_1IntArg_Narrow_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_any_ty], [llvm_any_ty], [IntrNoMem]>;
  class AdvSIMD_1VectorArg_Narrow_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyint_ty], [LLVMExtendedType<0>], [IntrNoMem]>;
  class AdvSIMD_1VectorArg_Int_Across_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyint_ty], [llvm_anyvector_ty], [IntrNoMem]>;
  class AdvSIMD_1VectorArg_Float_Across_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyfloat_ty], [llvm_anyvector_ty], [IntrNoMem]>;

  class AdvSIMD_2IntArg_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyint_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
                [IntrNoMem]>;
  class AdvSIMD_2FloatArg_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
                [IntrNoMem]>;
  class AdvSIMD_2VectorArg_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
                [IntrNoMem]>;
  class AdvSIMD_2VectorArg_Compare_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty, LLVMMatchType<1>],
                [IntrNoMem]>;
  class AdvSIMD_2Arg_FloatCompare_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty, LLVMMatchType<1>],
                [IntrNoMem]>;
  class AdvSIMD_2VectorArg_Long_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [LLVMTruncatedType<0>, LLVMTruncatedType<0>],
                [IntrNoMem]>;
  class AdvSIMD_2VectorArg_Wide_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [LLVMMatchType<0>, LLVMTruncatedType<0>],
                [IntrNoMem]>;
  class AdvSIMD_2VectorArg_Narrow_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [LLVMExtendedType<0>, LLVMExtendedType<0>],
                [IntrNoMem]>;
  class AdvSIMD_2Arg_Scalar_Narrow_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyint_ty],
                [LLVMExtendedType<0>, llvm_i32_ty],
                [IntrNoMem]>;
  class AdvSIMD_2VectorArg_Scalar_Expand_BySize_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [llvm_anyvector_ty],
                [IntrNoMem]>;
  class AdvSIMD_2VectorArg_Scalar_Wide_BySize_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [LLVMTruncatedType<0>],
                [IntrNoMem]>;
  class AdvSIMD_2VectorArg_Scalar_Wide_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [LLVMTruncatedType<0>, llvm_i32_ty],
                [IntrNoMem]>;
  class AdvSIMD_2VectorArg_Tied_Narrow_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [LLVMHalfElementsVectorType<0>, llvm_anyvector_ty],
                [IntrNoMem]>;
  class AdvSIMD_2VectorArg_Lane_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyint_ty],
                [LLVMMatchType<0>, llvm_anyint_ty, llvm_i32_ty],
                [IntrNoMem]>;

  class AdvSIMD_3IntArg_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyint_ty],
                [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
                [IntrNoMem]>;
  class AdvSIMD_3VectorArg_Intrinsic
      : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
               [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
               [IntrNoMem]>;
  class AdvSIMD_3VectorArg_Scalar_Intrinsic
      : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
               [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty],
               [IntrNoMem]>;
  class AdvSIMD_3VectorArg_Tied_Narrow_Intrinsic
      : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
               [LLVMHalfElementsVectorType<0>, llvm_anyvector_ty,
                LLVMMatchType<1>], [IntrNoMem]>;
  class AdvSIMD_3VectorArg_Scalar_Tied_Narrow_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [LLVMHalfElementsVectorType<0>, llvm_anyvector_ty, llvm_i32_ty],
                [IntrNoMem]>;
  class AdvSIMD_CvtFxToFP_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyfloat_ty], [llvm_anyint_ty, llvm_i32_ty],
                [IntrNoMem]>;
  class AdvSIMD_CvtFPToFx_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty, llvm_i32_ty],
                [IntrNoMem]>;

  class AdvSIMD_1Arg_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_any_ty], [LLVMMatchType<0>], [IntrNoMem]>;

  class AdvSIMD_Dot_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<1>],
                [IntrNoMem]>;

  class AdvSIMD_FP16FML_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<1>],
                [IntrNoMem]>;

  class AdvSIMD_MatMul_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<1>],
                [IntrNoMem]>;

  class AdvSIMD_FML_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<1>],
                [IntrNoMem]>;

  class AdvSIMD_BF16FML_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_v4f32_ty],
                [llvm_v4f32_ty, llvm_v8bf16_ty, llvm_v8bf16_ty],
                [IntrNoMem]>;
}

// Arithmetic ops

let TargetPrefix = "aarch64", IntrProperties = [IntrNoMem] in {
  // Vector Add Across Lanes
  def int_aarch64_neon_saddv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
  def int_aarch64_neon_uaddv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
  def int_aarch64_neon_faddv : AdvSIMD_1VectorArg_Float_Across_Intrinsic;

  // Vector Long Add Across Lanes
  def int_aarch64_neon_saddlv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
  def int_aarch64_neon_uaddlv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;

  // Vector Halving Add
  def int_aarch64_neon_shadd : AdvSIMD_2VectorArg_Intrinsic;
  def int_aarch64_neon_uhadd : AdvSIMD_2VectorArg_Intrinsic;

  // Vector Rounding Halving Add
  def int_aarch64_neon_srhadd : AdvSIMD_2VectorArg_Intrinsic;
  def int_aarch64_neon_urhadd : AdvSIMD_2VectorArg_Intrinsic;

  // Vector Saturating Add
  def int_aarch64_neon_sqadd : AdvSIMD_2IntArg_Intrinsic;
  def int_aarch64_neon_suqadd : AdvSIMD_2IntArg_Intrinsic;
  def int_aarch64_neon_usqadd : AdvSIMD_2IntArg_Intrinsic;
  def int_aarch64_neon_uqadd : AdvSIMD_2IntArg_Intrinsic;

  // Vector Add High-Half
  // FIXME: this is a legacy intrinsic for aarch64_simd.h. Remove it when that
  // header is no longer supported.
  def int_aarch64_neon_addhn : AdvSIMD_2VectorArg_Narrow_Intrinsic;

  // Vector Rounding Add High-Half
  def int_aarch64_neon_raddhn : AdvSIMD_2VectorArg_Narrow_Intrinsic;

  // Vector Saturating Doubling Multiply High
  def int_aarch64_neon_sqdmulh : AdvSIMD_2IntArg_Intrinsic;
  def int_aarch64_neon_sqdmulh_lane : AdvSIMD_2VectorArg_Lane_Intrinsic;
  def int_aarch64_neon_sqdmulh_laneq : AdvSIMD_2VectorArg_Lane_Intrinsic;

  // Vector Saturating Rounding Doubling Multiply High
  def int_aarch64_neon_sqrdmulh : AdvSIMD_2IntArg_Intrinsic;
  def int_aarch64_neon_sqrdmulh_lane : AdvSIMD_2VectorArg_Lane_Intrinsic;
  def int_aarch64_neon_sqrdmulh_laneq : AdvSIMD_2VectorArg_Lane_Intrinsic;

  def int_aarch64_neon_sqrdmlah : AdvSIMD_3IntArg_Intrinsic;
  def int_aarch64_neon_sqrdmlsh : AdvSIMD_3IntArg_Intrinsic;

  // Vector Polynominal Multiply
  def int_aarch64_neon_pmul : AdvSIMD_2VectorArg_Intrinsic;

  // Vector Long Multiply
  def int_aarch64_neon_smull : AdvSIMD_2VectorArg_Long_Intrinsic;
  def int_aarch64_neon_umull : AdvSIMD_2VectorArg_Long_Intrinsic;
  def int_aarch64_neon_pmull : AdvSIMD_2VectorArg_Long_Intrinsic;

  // 64-bit polynomial multiply really returns an i128, which is not legal. Fake
  // it with a v16i8.
  def int_aarch64_neon_pmull64 :
        DefaultAttrsIntrinsic<[llvm_v16i8_ty], [llvm_i64_ty, llvm_i64_ty], [IntrNoMem]>;

  // Vector Extending Multiply
  def int_aarch64_neon_fmulx : AdvSIMD_2FloatArg_Intrinsic {
    let IntrProperties = [IntrNoMem, Commutative];
  }

  // Vector Saturating Doubling Long Multiply
  def int_aarch64_neon_sqdmull : AdvSIMD_2VectorArg_Long_Intrinsic;
  def int_aarch64_neon_sqdmulls_scalar
    : DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;

  // Vector Halving Subtract
  def int_aarch64_neon_shsub : AdvSIMD_2VectorArg_Intrinsic;
  def int_aarch64_neon_uhsub : AdvSIMD_2VectorArg_Intrinsic;

  // Vector Saturating Subtract
  def int_aarch64_neon_sqsub : AdvSIMD_2IntArg_Intrinsic;
  def int_aarch64_neon_uqsub : AdvSIMD_2IntArg_Intrinsic;

  // Vector Subtract High-Half
  // FIXME: this is a legacy intrinsic for aarch64_simd.h. Remove it when that
  // header is no longer supported.
  def int_aarch64_neon_subhn : AdvSIMD_2VectorArg_Narrow_Intrinsic;

  // Vector Rounding Subtract High-Half
  def int_aarch64_neon_rsubhn : AdvSIMD_2VectorArg_Narrow_Intrinsic;

  // Vector Compare Absolute Greater-than-or-equal
  def int_aarch64_neon_facge : AdvSIMD_2Arg_FloatCompare_Intrinsic;

  // Vector Compare Absolute Greater-than
  def int_aarch64_neon_facgt : AdvSIMD_2Arg_FloatCompare_Intrinsic;

  // Vector Absolute Difference
  def int_aarch64_neon_sabd : AdvSIMD_2VectorArg_Intrinsic;
  def int_aarch64_neon_uabd : AdvSIMD_2VectorArg_Intrinsic;
  def int_aarch64_neon_fabd : AdvSIMD_2VectorArg_Intrinsic;

  // Scalar Absolute Difference
  def int_aarch64_sisd_fabd : AdvSIMD_2Scalar_Float_Intrinsic;

  // Vector Max
  def int_aarch64_neon_smax : AdvSIMD_2VectorArg_Intrinsic;
  def int_aarch64_neon_umax : AdvSIMD_2VectorArg_Intrinsic;
  def int_aarch64_neon_fmax : AdvSIMD_2FloatArg_Intrinsic;
  def int_aarch64_neon_fmaxnmp : AdvSIMD_2VectorArg_Intrinsic;

  // Vector Max Across Lanes
  def int_aarch64_neon_smaxv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
  def int_aarch64_neon_umaxv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
  def int_aarch64_neon_fmaxv : AdvSIMD_1VectorArg_Float_Across_Intrinsic;
  def int_aarch64_neon_fmaxnmv : AdvSIMD_1VectorArg_Float_Across_Intrinsic;

  // Vector Min
  def int_aarch64_neon_smin : AdvSIMD_2VectorArg_Intrinsic;
  def int_aarch64_neon_umin : AdvSIMD_2VectorArg_Intrinsic;
  def int_aarch64_neon_fmin : AdvSIMD_2FloatArg_Intrinsic;
  def int_aarch64_neon_fminnmp : AdvSIMD_2VectorArg_Intrinsic;

  // Vector Min/Max Number
  def int_aarch64_neon_fminnm : AdvSIMD_2FloatArg_Intrinsic;
  def int_aarch64_neon_fmaxnm : AdvSIMD_2FloatArg_Intrinsic;

  // Vector Min Across Lanes
  def int_aarch64_neon_sminv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
  def int_aarch64_neon_uminv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
  def int_aarch64_neon_fminv : AdvSIMD_1VectorArg_Float_Across_Intrinsic;
  def int_aarch64_neon_fminnmv : AdvSIMD_1VectorArg_Float_Across_Intrinsic;

  // Pairwise Add
  def int_aarch64_neon_addp : AdvSIMD_2VectorArg_Intrinsic;
  def int_aarch64_neon_faddp : AdvSIMD_2VectorArg_Intrinsic;

  // Long Pairwise Add
  // FIXME: In theory, we shouldn't need intrinsics for saddlp or
  // uaddlp, but tblgen's type inference currently can't handle the
  // pattern fragments this ends up generating.
  def int_aarch64_neon_saddlp : AdvSIMD_1VectorArg_Expand_Intrinsic;
  def int_aarch64_neon_uaddlp : AdvSIMD_1VectorArg_Expand_Intrinsic;

  // Folding Maximum
  def int_aarch64_neon_smaxp : AdvSIMD_2VectorArg_Intrinsic;
  def int_aarch64_neon_umaxp : AdvSIMD_2VectorArg_Intrinsic;
  def int_aarch64_neon_fmaxp : AdvSIMD_2VectorArg_Intrinsic;

  // Folding Minimum
  def int_aarch64_neon_sminp : AdvSIMD_2VectorArg_Intrinsic;
  def int_aarch64_neon_uminp : AdvSIMD_2VectorArg_Intrinsic;
  def int_aarch64_neon_fminp : AdvSIMD_2VectorArg_Intrinsic;

  // Reciprocal Estimate/Step
  def int_aarch64_neon_frecps : AdvSIMD_2FloatArg_Intrinsic;
  def int_aarch64_neon_frsqrts : AdvSIMD_2FloatArg_Intrinsic;

  // Reciprocal Exponent
  def int_aarch64_neon_frecpx : AdvSIMD_1FloatArg_Intrinsic;

  // Vector Saturating Shift Left
  def int_aarch64_neon_sqshl : AdvSIMD_2IntArg_Intrinsic;
  def int_aarch64_neon_uqshl : AdvSIMD_2IntArg_Intrinsic;

  // Vector Rounding Shift Left
  def int_aarch64_neon_srshl : AdvSIMD_2IntArg_Intrinsic;
  def int_aarch64_neon_urshl : AdvSIMD_2IntArg_Intrinsic;

  // Vector Saturating Rounding Shift Left
  def int_aarch64_neon_sqrshl : AdvSIMD_2IntArg_Intrinsic;
  def int_aarch64_neon_uqrshl : AdvSIMD_2IntArg_Intrinsic;

  // Vector Signed->Unsigned Shift Left by Constant
  def int_aarch64_neon_sqshlu : AdvSIMD_2IntArg_Intrinsic;

  // Vector Signed->Unsigned Narrowing Saturating Shift Right by Constant
  def int_aarch64_neon_sqshrun : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;

  // Vector Signed->Unsigned Rounding Narrowing Saturating Shift Right by Const
  def int_aarch64_neon_sqrshrun : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;

  // Vector Narrowing Shift Right by Constant
  def int_aarch64_neon_sqshrn : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
  def int_aarch64_neon_uqshrn : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;

  // Vector Rounding Narrowing Shift Right by Constant
  def int_aarch64_neon_rshrn : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;

  // Vector Rounding Narrowing Saturating Shift Right by Constant
  def int_aarch64_neon_sqrshrn : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
  def int_aarch64_neon_uqrshrn : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;

  // Vector Shift Left
  def int_aarch64_neon_sshl : AdvSIMD_2IntArg_Intrinsic;
  def int_aarch64_neon_ushl : AdvSIMD_2IntArg_Intrinsic;

  // Vector Widening Shift Left by Constant
  def int_aarch64_neon_shll : AdvSIMD_2VectorArg_Scalar_Wide_BySize_Intrinsic;
  def int_aarch64_neon_sshll : AdvSIMD_2VectorArg_Scalar_Wide_Intrinsic;
  def int_aarch64_neon_ushll : AdvSIMD_2VectorArg_Scalar_Wide_Intrinsic;

  // Vector Shift Right by Constant and Insert
  def int_aarch64_neon_vsri : AdvSIMD_3VectorArg_Scalar_Intrinsic;

  // Vector Shift Left by Constant and Insert
  def int_aarch64_neon_vsli : AdvSIMD_3VectorArg_Scalar_Intrinsic;

  // Vector Saturating Narrow
  def int_aarch64_neon_scalar_sqxtn: AdvSIMD_1IntArg_Narrow_Intrinsic;
  def int_aarch64_neon_scalar_uqxtn : AdvSIMD_1IntArg_Narrow_Intrinsic;
  def int_aarch64_neon_sqxtn : AdvSIMD_1VectorArg_Narrow_Intrinsic;
  def int_aarch64_neon_uqxtn : AdvSIMD_1VectorArg_Narrow_Intrinsic;

  // Vector Saturating Extract and Unsigned Narrow
  def int_aarch64_neon_scalar_sqxtun : AdvSIMD_1IntArg_Narrow_Intrinsic;
  def int_aarch64_neon_sqxtun : AdvSIMD_1VectorArg_Narrow_Intrinsic;

  // Vector Absolute Value
  def int_aarch64_neon_abs : AdvSIMD_1Arg_Intrinsic;

  // Vector Saturating Absolute Value
  def int_aarch64_neon_sqabs : AdvSIMD_1IntArg_Intrinsic;

  // Vector Saturating Negation
  def int_aarch64_neon_sqneg : AdvSIMD_1IntArg_Intrinsic;

  // Vector Count Leading Sign Bits
  def int_aarch64_neon_cls : AdvSIMD_1VectorArg_Intrinsic;

  // Vector Reciprocal Estimate
  def int_aarch64_neon_urecpe : AdvSIMD_1VectorArg_Intrinsic;
  def int_aarch64_neon_frecpe : AdvSIMD_1FloatArg_Intrinsic;

  // Vector Square Root Estimate
  def int_aarch64_neon_ursqrte : AdvSIMD_1VectorArg_Intrinsic;
  def int_aarch64_neon_frsqrte : AdvSIMD_1FloatArg_Intrinsic;

  // Vector Conversions Between Half-Precision and Single-Precision.
  def int_aarch64_neon_vcvtfp2hf
    : DefaultAttrsIntrinsic<[llvm_v4i16_ty], [llvm_v4f32_ty], [IntrNoMem]>;
  def int_aarch64_neon_vcvthf2fp
    : DefaultAttrsIntrinsic<[llvm_v4f32_ty], [llvm_v4i16_ty], [IntrNoMem]>;

  // Vector Conversions Between Floating-point and Fixed-point.
  def int_aarch64_neon_vcvtfp2fxs : AdvSIMD_CvtFPToFx_Intrinsic;
  def int_aarch64_neon_vcvtfp2fxu : AdvSIMD_CvtFPToFx_Intrinsic;
  def int_aarch64_neon_vcvtfxs2fp : AdvSIMD_CvtFxToFP_Intrinsic;
  def int_aarch64_neon_vcvtfxu2fp : AdvSIMD_CvtFxToFP_Intrinsic;

  // Vector FP->Int Conversions
  def int_aarch64_neon_fcvtas : AdvSIMD_FPToIntRounding_Intrinsic;
  def int_aarch64_neon_fcvtau : AdvSIMD_FPToIntRounding_Intrinsic;
  def int_aarch64_neon_fcvtms : AdvSIMD_FPToIntRounding_Intrinsic;
  def int_aarch64_neon_fcvtmu : AdvSIMD_FPToIntRounding_Intrinsic;
  def int_aarch64_neon_fcvtns : AdvSIMD_FPToIntRounding_Intrinsic;
  def int_aarch64_neon_fcvtnu : AdvSIMD_FPToIntRounding_Intrinsic;
  def int_aarch64_neon_fcvtps : AdvSIMD_FPToIntRounding_Intrinsic;
  def int_aarch64_neon_fcvtpu : AdvSIMD_FPToIntRounding_Intrinsic;
  def int_aarch64_neon_fcvtzs : AdvSIMD_FPToIntRounding_Intrinsic;
  def int_aarch64_neon_fcvtzu : AdvSIMD_FPToIntRounding_Intrinsic;

  // v8.5-A Vector FP Rounding
  def int_aarch64_neon_frint32x : AdvSIMD_1FloatArg_Intrinsic;
  def int_aarch64_neon_frint32z : AdvSIMD_1FloatArg_Intrinsic;
  def int_aarch64_neon_frint64x : AdvSIMD_1FloatArg_Intrinsic;
  def int_aarch64_neon_frint64z : AdvSIMD_1FloatArg_Intrinsic;

  // Scalar FP->Int conversions

  // Vector FP Inexact Narrowing
  def int_aarch64_neon_fcvtxn : AdvSIMD_1VectorArg_Expand_Intrinsic;

  // Scalar FP Inexact Narrowing
  def int_aarch64_sisd_fcvtxn : DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_double_ty],
                                        [IntrNoMem]>;

  // v8.2-A Dot Product
  def int_aarch64_neon_udot : AdvSIMD_Dot_Intrinsic;
  def int_aarch64_neon_sdot : AdvSIMD_Dot_Intrinsic;

  // v8.6-A Matrix Multiply Intrinsics
  def int_aarch64_neon_ummla : AdvSIMD_MatMul_Intrinsic;
  def int_aarch64_neon_smmla : AdvSIMD_MatMul_Intrinsic;
  def int_aarch64_neon_usmmla : AdvSIMD_MatMul_Intrinsic;
  def int_aarch64_neon_usdot : AdvSIMD_Dot_Intrinsic;
  def int_aarch64_neon_bfdot : AdvSIMD_Dot_Intrinsic;
  def int_aarch64_neon_bfmmla
    : DefaultAttrsIntrinsic<[llvm_v4f32_ty],
                [llvm_v4f32_ty, llvm_v8bf16_ty, llvm_v8bf16_ty],
                [IntrNoMem]>;
  def int_aarch64_neon_bfmlalb : AdvSIMD_BF16FML_Intrinsic;
  def int_aarch64_neon_bfmlalt : AdvSIMD_BF16FML_Intrinsic;


  // v8.6-A Bfloat Intrinsics
  def int_aarch64_neon_bfcvt
    : DefaultAttrsIntrinsic<[llvm_bfloat_ty], [llvm_float_ty], [IntrNoMem]>;
  def int_aarch64_neon_bfcvtn
    : DefaultAttrsIntrinsic<[llvm_v8bf16_ty], [llvm_v4f32_ty], [IntrNoMem]>;
  def int_aarch64_neon_bfcvtn2
    : DefaultAttrsIntrinsic<[llvm_v8bf16_ty],
                [llvm_v8bf16_ty, llvm_v4f32_ty],
                [IntrNoMem]>;

  // v8.2-A FP16 Fused Multiply-Add Long
  def int_aarch64_neon_fmlal : AdvSIMD_FP16FML_Intrinsic;
  def int_aarch64_neon_fmlsl : AdvSIMD_FP16FML_Intrinsic;
  def int_aarch64_neon_fmlal2 : AdvSIMD_FP16FML_Intrinsic;
  def int_aarch64_neon_fmlsl2 : AdvSIMD_FP16FML_Intrinsic;

  // v8.3-A Floating-point complex add
  def int_aarch64_neon_vcadd_rot90  : AdvSIMD_2VectorArg_Intrinsic;
  def int_aarch64_neon_vcadd_rot270 : AdvSIMD_2VectorArg_Intrinsic;

  def int_aarch64_neon_vcmla_rot0   : AdvSIMD_3VectorArg_Intrinsic;
  def int_aarch64_neon_vcmla_rot90  : AdvSIMD_3VectorArg_Intrinsic;
  def int_aarch64_neon_vcmla_rot180 : AdvSIMD_3VectorArg_Intrinsic;
  def int_aarch64_neon_vcmla_rot270 : AdvSIMD_3VectorArg_Intrinsic;
}

let TargetPrefix = "aarch64" in {  // All intrinsics start with "llvm.aarch64.".
  class AdvSIMD_2Vector2Index_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [llvm_anyvector_ty, llvm_i64_ty, LLVMMatchType<0>, llvm_i64_ty],
                [IntrNoMem]>;
}

// Vector element to element moves
def int_aarch64_neon_vcopy_lane: AdvSIMD_2Vector2Index_Intrinsic;

let TargetPrefix = "aarch64" in {  // All intrinsics start with "llvm.aarch64.".
  class AdvSIMD_1Vec_Load_Intrinsic
      : DefaultAttrsIntrinsic<[llvm_anyvector_ty], [llvm_anyptr_ty],
                  [IntrReadMem, IntrArgMemOnly]>;
  class AdvSIMD_1Vec_Store_Lane_Intrinsic
    : DefaultAttrsIntrinsic<[], [llvm_anyvector_ty, llvm_i64_ty, llvm_anyptr_ty],
                [IntrArgMemOnly, NoCapture<ArgIndex<2>>]>;

  class AdvSIMD_2Vec_Load_Intrinsic
    : DefaultAttrsIntrinsic<[LLVMMatchType<0>, llvm_anyvector_ty],
                [llvm_anyptr_ty],
                [IntrReadMem, IntrArgMemOnly]>;
  class AdvSIMD_2Vec_Load_Lane_Intrinsic
    : DefaultAttrsIntrinsic<[LLVMMatchType<0>, LLVMMatchType<0>],
                [LLVMMatchType<0>, llvm_anyvector_ty,
                 llvm_i64_ty, llvm_anyptr_ty],
                [IntrReadMem, IntrArgMemOnly]>;
  class AdvSIMD_2Vec_Store_Intrinsic
    : DefaultAttrsIntrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
                     llvm_anyptr_ty],
                [IntrArgMemOnly, NoCapture<ArgIndex<2>>]>;
  class AdvSIMD_2Vec_Store_Lane_Intrinsic
    : DefaultAttrsIntrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
                 llvm_i64_ty, llvm_anyptr_ty],
                [IntrArgMemOnly, NoCapture<ArgIndex<3>>]>;

  class AdvSIMD_3Vec_Load_Intrinsic
    : DefaultAttrsIntrinsic<[LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyvector_ty],
                [llvm_anyptr_ty],
                [IntrReadMem, IntrArgMemOnly]>;
  class AdvSIMD_3Vec_Load_Lane_Intrinsic
    : DefaultAttrsIntrinsic<[LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
                [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyvector_ty,
                 llvm_i64_ty, llvm_anyptr_ty],
                [IntrReadMem, IntrArgMemOnly]>;
  class AdvSIMD_3Vec_Store_Intrinsic
    : DefaultAttrsIntrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
                     LLVMMatchType<0>, llvm_anyptr_ty],
                [IntrArgMemOnly, NoCapture<ArgIndex<3>>]>;
  class AdvSIMD_3Vec_Store_Lane_Intrinsic
    : DefaultAttrsIntrinsic<[], [llvm_anyvector_ty,
                 LLVMMatchType<0>, LLVMMatchType<0>,
                 llvm_i64_ty, llvm_anyptr_ty],
                [IntrArgMemOnly, NoCapture<ArgIndex<4>>]>;

  class AdvSIMD_4Vec_Load_Intrinsic
    : DefaultAttrsIntrinsic<[LLVMMatchType<0>, LLVMMatchType<0>,
                 LLVMMatchType<0>, llvm_anyvector_ty],
                [llvm_anyptr_ty],
                [IntrReadMem, IntrArgMemOnly]>;
  class AdvSIMD_4Vec_Load_Lane_Intrinsic
    : DefaultAttrsIntrinsic<[LLVMMatchType<0>, LLVMMatchType<0>,
                 LLVMMatchType<0>, LLVMMatchType<0>],
                [LLVMMatchType<0>, LLVMMatchType<0>,
                 LLVMMatchType<0>, llvm_anyvector_ty,
                 llvm_i64_ty, llvm_anyptr_ty],
                [IntrReadMem, IntrArgMemOnly]>;
  class AdvSIMD_4Vec_Store_Intrinsic
    : DefaultAttrsIntrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
                 LLVMMatchType<0>, LLVMMatchType<0>,
                 llvm_anyptr_ty],
                [IntrArgMemOnly, NoCapture<ArgIndex<4>>]>;
  class AdvSIMD_4Vec_Store_Lane_Intrinsic
    : DefaultAttrsIntrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
                 LLVMMatchType<0>, LLVMMatchType<0>,
                 llvm_i64_ty, llvm_anyptr_ty],
                [IntrArgMemOnly, NoCapture<ArgIndex<5>>]>;
}

// Memory ops

def int_aarch64_neon_ld1x2 : AdvSIMD_2Vec_Load_Intrinsic;
def int_aarch64_neon_ld1x3 : AdvSIMD_3Vec_Load_Intrinsic;
def int_aarch64_neon_ld1x4 : AdvSIMD_4Vec_Load_Intrinsic;

def int_aarch64_neon_st1x2 : AdvSIMD_2Vec_Store_Intrinsic;
def int_aarch64_neon_st1x3 : AdvSIMD_3Vec_Store_Intrinsic;
def int_aarch64_neon_st1x4 : AdvSIMD_4Vec_Store_Intrinsic;

def int_aarch64_neon_ld2 : AdvSIMD_2Vec_Load_Intrinsic;
def int_aarch64_neon_ld3 : AdvSIMD_3Vec_Load_Intrinsic;
def int_aarch64_neon_ld4 : AdvSIMD_4Vec_Load_Intrinsic;

def int_aarch64_neon_ld2lane : AdvSIMD_2Vec_Load_Lane_Intrinsic;
def int_aarch64_neon_ld3lane : AdvSIMD_3Vec_Load_Lane_Intrinsic;
def int_aarch64_neon_ld4lane : AdvSIMD_4Vec_Load_Lane_Intrinsic;

def int_aarch64_neon_ld2r : AdvSIMD_2Vec_Load_Intrinsic;
def int_aarch64_neon_ld3r : AdvSIMD_3Vec_Load_Intrinsic;
def int_aarch64_neon_ld4r : AdvSIMD_4Vec_Load_Intrinsic;

def int_aarch64_neon_st2  : AdvSIMD_2Vec_Store_Intrinsic;
def int_aarch64_neon_st3  : AdvSIMD_3Vec_Store_Intrinsic;
def int_aarch64_neon_st4  : AdvSIMD_4Vec_Store_Intrinsic;

def int_aarch64_neon_st2lane  : AdvSIMD_2Vec_Store_Lane_Intrinsic;
def int_aarch64_neon_st3lane  : AdvSIMD_3Vec_Store_Lane_Intrinsic;
def int_aarch64_neon_st4lane  : AdvSIMD_4Vec_Store_Lane_Intrinsic;

let TargetPrefix = "aarch64" in {  // All intrinsics start with "llvm.aarch64.".
  class AdvSIMD_Tbl1_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty], [llvm_v16i8_ty, LLVMMatchType<0>],
                [IntrNoMem]>;
  class AdvSIMD_Tbl2_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [llvm_v16i8_ty, llvm_v16i8_ty, LLVMMatchType<0>], [IntrNoMem]>;
  class AdvSIMD_Tbl3_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty,
                 LLVMMatchType<0>],
                [IntrNoMem]>;
  class AdvSIMD_Tbl4_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty,
                 LLVMMatchType<0>],
                [IntrNoMem]>;

  class AdvSIMD_Tbx1_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [LLVMMatchType<0>, llvm_v16i8_ty, LLVMMatchType<0>],
                [IntrNoMem]>;
  class AdvSIMD_Tbx2_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [LLVMMatchType<0>, llvm_v16i8_ty, llvm_v16i8_ty,
                 LLVMMatchType<0>],
                [IntrNoMem]>;
  class AdvSIMD_Tbx3_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [LLVMMatchType<0>, llvm_v16i8_ty, llvm_v16i8_ty,
                 llvm_v16i8_ty, LLVMMatchType<0>],
                [IntrNoMem]>;
  class AdvSIMD_Tbx4_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [LLVMMatchType<0>, llvm_v16i8_ty, llvm_v16i8_ty,
                 llvm_v16i8_ty, llvm_v16i8_ty, LLVMMatchType<0>],
                [IntrNoMem]>;
}
def int_aarch64_neon_tbl1 : AdvSIMD_Tbl1_Intrinsic;
def int_aarch64_neon_tbl2 : AdvSIMD_Tbl2_Intrinsic;
def int_aarch64_neon_tbl3 : AdvSIMD_Tbl3_Intrinsic;
def int_aarch64_neon_tbl4 : AdvSIMD_Tbl4_Intrinsic;

def int_aarch64_neon_tbx1 : AdvSIMD_Tbx1_Intrinsic;
def int_aarch64_neon_tbx2 : AdvSIMD_Tbx2_Intrinsic;
def int_aarch64_neon_tbx3 : AdvSIMD_Tbx3_Intrinsic;
def int_aarch64_neon_tbx4 : AdvSIMD_Tbx4_Intrinsic;

let TargetPrefix = "aarch64" in {
  class FPCR_Get_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_i64_ty], [], [IntrNoMem, IntrHasSideEffects]>;
  class FPCR_Set_Intrinsic
    : DefaultAttrsIntrinsic<[], [llvm_i64_ty], [IntrNoMem, IntrHasSideEffects]>;
  class RNDR_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_i64_ty, llvm_i1_ty], [], [IntrNoMem, IntrHasSideEffects]>;
}

// FPCR
def int_aarch64_get_fpcr : FPCR_Get_Intrinsic;
def int_aarch64_set_fpcr : FPCR_Set_Intrinsic;

// Armv8.5-A Random number generation intrinsics
def int_aarch64_rndr : RNDR_Intrinsic;
def int_aarch64_rndrrs : RNDR_Intrinsic;

let TargetPrefix = "aarch64" in {
  class Crypto_AES_DataKey_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;

  class Crypto_AES_Data_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty], [IntrNoMem]>;

  // SHA intrinsic taking 5 words of the hash (v4i32, i32) and 4 of the schedule
  // (v4i32).
  class Crypto_SHA_5Hash4Schedule_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty, llvm_v4i32_ty],
                [IntrNoMem]>;

  // SHA intrinsic taking 5 words of the hash (v4i32, i32) and 4 of the schedule
  // (v4i32).
  class Crypto_SHA_1Hash_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>;

  // SHA intrinsic taking 8 words of the schedule
  class Crypto_SHA_8Schedule_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;

  // SHA intrinsic taking 12 words of the schedule
  class Crypto_SHA_12Schedule_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
                [IntrNoMem]>;

  // SHA intrinsic taking 8 words of the hash and 4 of the schedule.
  class Crypto_SHA_8Hash4Schedule_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
                [IntrNoMem]>;

  // SHA512 intrinsic taking 2 arguments
  class Crypto_SHA512_2Arg_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;

  // SHA512 intrinsic taking 3 Arguments
  class Crypto_SHA512_3Arg_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty],
                [IntrNoMem]>;

  // SHA3 Intrinsics taking 3 arguments
  class Crypto_SHA3_3Arg_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
               [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
               [IntrNoMem]>;

  // SHA3 Intrinsic taking 2 arguments
  class Crypto_SHA3_2Arg_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
               [IntrNoMem]>;

  // SHA3 Intrinsic taking 3 Arguments 1 immediate
  class Crypto_SHA3_2ArgImm_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty, llvm_i64_ty],
               [IntrNoMem, ImmArg<ArgIndex<2>>]>;

  class Crypto_SM3_3Vector_Intrinsic
    : Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
                [IntrNoMem]>;

  class Crypto_SM3_3VectorIndexed_Intrinsic
    : Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty, llvm_i64_ty],
                [IntrNoMem, ImmArg<ArgIndex<3>>]>;

  class Crypto_SM4_2Vector_Intrinsic
    : Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
}

// AES
def int_aarch64_crypto_aese   : Crypto_AES_DataKey_Intrinsic;
def int_aarch64_crypto_aesd   : Crypto_AES_DataKey_Intrinsic;
def int_aarch64_crypto_aesmc  : Crypto_AES_Data_Intrinsic;
def int_aarch64_crypto_aesimc : Crypto_AES_Data_Intrinsic;

// SHA1
def int_aarch64_crypto_sha1c  : Crypto_SHA_5Hash4Schedule_Intrinsic;
def int_aarch64_crypto_sha1p  : Crypto_SHA_5Hash4Schedule_Intrinsic;
def int_aarch64_crypto_sha1m  : Crypto_SHA_5Hash4Schedule_Intrinsic;
def int_aarch64_crypto_sha1h  : Crypto_SHA_1Hash_Intrinsic;

def int_aarch64_crypto_sha1su0 : Crypto_SHA_12Schedule_Intrinsic;
def int_aarch64_crypto_sha1su1 : Crypto_SHA_8Schedule_Intrinsic;

// SHA256
def int_aarch64_crypto_sha256h   : Crypto_SHA_8Hash4Schedule_Intrinsic;
def int_aarch64_crypto_sha256h2  : Crypto_SHA_8Hash4Schedule_Intrinsic;
def int_aarch64_crypto_sha256su0 : Crypto_SHA_8Schedule_Intrinsic;
def int_aarch64_crypto_sha256su1 : Crypto_SHA_12Schedule_Intrinsic;

//SHA3
def int_aarch64_crypto_eor3s : Crypto_SHA3_3Arg_Intrinsic;
def int_aarch64_crypto_eor3u : Crypto_SHA3_3Arg_Intrinsic;
def int_aarch64_crypto_bcaxs : Crypto_SHA3_3Arg_Intrinsic;
def int_aarch64_crypto_bcaxu : Crypto_SHA3_3Arg_Intrinsic;
def int_aarch64_crypto_rax1 : Crypto_SHA3_2Arg_Intrinsic;
def int_aarch64_crypto_xar : Crypto_SHA3_2ArgImm_Intrinsic;

// SHA512
def int_aarch64_crypto_sha512h : Crypto_SHA512_3Arg_Intrinsic;
def int_aarch64_crypto_sha512h2 : Crypto_SHA512_3Arg_Intrinsic;
def int_aarch64_crypto_sha512su0 : Crypto_SHA512_2Arg_Intrinsic;
def int_aarch64_crypto_sha512su1 : Crypto_SHA512_3Arg_Intrinsic;

//SM3 & SM4
def int_aarch64_crypto_sm3partw1 : Crypto_SM3_3Vector_Intrinsic;
def int_aarch64_crypto_sm3partw2 : Crypto_SM3_3Vector_Intrinsic;
def int_aarch64_crypto_sm3ss1    : Crypto_SM3_3Vector_Intrinsic;
def int_aarch64_crypto_sm3tt1a   : Crypto_SM3_3VectorIndexed_Intrinsic;
def int_aarch64_crypto_sm3tt1b   : Crypto_SM3_3VectorIndexed_Intrinsic;
def int_aarch64_crypto_sm3tt2a   : Crypto_SM3_3VectorIndexed_Intrinsic;
def int_aarch64_crypto_sm3tt2b   : Crypto_SM3_3VectorIndexed_Intrinsic;
def int_aarch64_crypto_sm4e      : Crypto_SM4_2Vector_Intrinsic;
def int_aarch64_crypto_sm4ekey   : Crypto_SM4_2Vector_Intrinsic;

//===----------------------------------------------------------------------===//
// CRC32

let TargetPrefix = "aarch64" in {

def int_aarch64_crc32b  : DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
    [IntrNoMem]>;
def int_aarch64_crc32cb : DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
    [IntrNoMem]>;
def int_aarch64_crc32h  : DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
    [IntrNoMem]>;
def int_aarch64_crc32ch : DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
    [IntrNoMem]>;
def int_aarch64_crc32w  : DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
    [IntrNoMem]>;
def int_aarch64_crc32cw : DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
    [IntrNoMem]>;
def int_aarch64_crc32x  : DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i64_ty],
    [IntrNoMem]>;
def int_aarch64_crc32cx : DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i64_ty],
    [IntrNoMem]>;
}

//===----------------------------------------------------------------------===//
// Memory Tagging Extensions (MTE) Intrinsics
let TargetPrefix = "aarch64" in {
def int_aarch64_irg   : DefaultAttrsIntrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_i64_ty],
    [IntrNoMem, IntrHasSideEffects]>;
def int_aarch64_addg  : DefaultAttrsIntrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_i64_ty],
    [IntrNoMem]>;
def int_aarch64_gmi   : DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_ptr_ty, llvm_i64_ty],
    [IntrNoMem]>;
def int_aarch64_ldg   : DefaultAttrsIntrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_ptr_ty],
    [IntrReadMem]>;
def int_aarch64_stg   : DefaultAttrsIntrinsic<[], [llvm_ptr_ty, llvm_ptr_ty],
    [IntrWriteMem]>;
def int_aarch64_subp :  DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_ptr_ty, llvm_ptr_ty],
    [IntrNoMem]>;

// The following are codegen-only intrinsics for stack instrumentation.

// Generate a randomly tagged stack base pointer.
def int_aarch64_irg_sp   : DefaultAttrsIntrinsic<[llvm_ptr_ty], [llvm_i64_ty],
    [IntrNoMem, IntrHasSideEffects]>;

// Transfer pointer tag with offset.
// ptr1 = tagp(ptr0, baseptr, tag_offset) returns a pointer where
// * address is the address in ptr0
// * tag is a function of (tag in baseptr, tag_offset).
// ** Beware, this is not the same function as implemented by the ADDG instruction!
//    Backend optimizations may change tag_offset; the only guarantee is that calls
//    to tagp with the same pair of (baseptr, tag_offset) will produce pointers
//    with the same tag value, assuming the set of excluded tags has not changed.
// Address bits in baseptr and tag bits in ptr0 are ignored.
// When offset between ptr0 and baseptr is a compile time constant, this can be emitted as
//   ADDG ptr1, baseptr, (ptr0 - baseptr), tag_offset
// It is intended that ptr0 is an alloca address, and baseptr is the direct output of llvm.aarch64.irg.sp.
def int_aarch64_tagp : DefaultAttrsIntrinsic<[llvm_anyptr_ty], [LLVMMatchType<0>, llvm_ptr_ty, llvm_i64_ty],
    [IntrNoMem, ImmArg<ArgIndex<2>>]>;

// Update allocation tags for the memory range to match the tag in the pointer argument.
def int_aarch64_settag  : DefaultAttrsIntrinsic<[], [llvm_ptr_ty, llvm_i64_ty],
    [IntrWriteMem, IntrArgMemOnly, NoCapture<ArgIndex<0>>, WriteOnly<ArgIndex<0>>]>;

// Update allocation tags for the memory range to match the tag in the pointer argument,
// and set memory contents to zero.
def int_aarch64_settag_zero  : DefaultAttrsIntrinsic<[], [llvm_ptr_ty, llvm_i64_ty],
    [IntrWriteMem, IntrArgMemOnly, NoCapture<ArgIndex<0>>, WriteOnly<ArgIndex<0>>]>;

// Update allocation tags for 16-aligned, 16-sized memory region, and store a pair 8-byte values.
def int_aarch64_stgp  : DefaultAttrsIntrinsic<[], [llvm_ptr_ty, llvm_i64_ty, llvm_i64_ty],
    [IntrWriteMem, IntrArgMemOnly, NoCapture<ArgIndex<0>>, WriteOnly<ArgIndex<0>>]>;
}

//===----------------------------------------------------------------------===//
// Memory Operations (MOPS) Intrinsics
let TargetPrefix = "aarch64" in {
  // Sizes are chosen to correspond to the llvm.memset intrinsic: ptr, i8, i64
  def int_aarch64_mops_memset_tag : DefaultAttrsIntrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_i8_ty, llvm_i64_ty],
      [IntrWriteMem, IntrArgMemOnly, NoCapture<ArgIndex<0>>, WriteOnly<ArgIndex<0>>]>;
}

// Transactional Memory Extension (TME) Intrinsics
let TargetPrefix = "aarch64" in {
def int_aarch64_tstart  : ClangBuiltin<"__builtin_arm_tstart">,
                         Intrinsic<[llvm_i64_ty], [], [IntrWillReturn]>;

def int_aarch64_tcommit : ClangBuiltin<"__builtin_arm_tcommit">, Intrinsic<[], [], [IntrWillReturn]>;

def int_aarch64_tcancel : ClangBuiltin<"__builtin_arm_tcancel">,
                          Intrinsic<[], [llvm_i64_ty], [IntrWillReturn, ImmArg<ArgIndex<0>>]>;

def int_aarch64_ttest   : ClangBuiltin<"__builtin_arm_ttest">,
                          Intrinsic<[llvm_i64_ty], [],
                                    [IntrNoMem, IntrHasSideEffects, IntrWillReturn]>;

// Armv8.7-A load/store 64-byte intrinsics
defvar data512 = !listsplat(llvm_i64_ty, 8);
def int_aarch64_ld64b: Intrinsic<data512, [llvm_ptr_ty]>;
def int_aarch64_st64b: Intrinsic<[], !listconcat([llvm_ptr_ty], data512)>;
def int_aarch64_st64bv: Intrinsic<[llvm_i64_ty], !listconcat([llvm_ptr_ty], data512)>;
def int_aarch64_st64bv0: Intrinsic<[llvm_i64_ty], !listconcat([llvm_ptr_ty], data512)>;

}

def llvm_nxv1i1_ty  : LLVMType<nxv1i1>;
def llvm_nxv2i1_ty  : LLVMType<nxv2i1>;
def llvm_nxv4i1_ty  : LLVMType<nxv4i1>;
def llvm_nxv8i1_ty  : LLVMType<nxv8i1>;
def llvm_nxv16i1_ty : LLVMType<nxv16i1>;
def llvm_nxv16i8_ty : LLVMType<nxv16i8>;
def llvm_nxv4i32_ty : LLVMType<nxv4i32>;
def llvm_nxv2i64_ty : LLVMType<nxv2i64>;
def llvm_nxv8f16_ty : LLVMType<nxv8f16>;
def llvm_nxv8bf16_ty : LLVMType<nxv8bf16>;
def llvm_nxv4f32_ty : LLVMType<nxv4f32>;
def llvm_nxv2f64_ty : LLVMType<nxv2f64>;

let TargetPrefix = "aarch64" in {  // All intrinsics start with "llvm.aarch64.".

  class AdvSIMD_1Vec_PredLoad_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_ptr_ty],
                [IntrReadMem, IntrArgMemOnly]>;

  class AdvSIMD_2Vec_PredLoad_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty, LLVMMatchType<0>],
                [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_ptr_ty],
                [IntrReadMem, IntrArgMemOnly]>;

  class AdvSIMD_3Vec_PredLoad_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>],
                [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_ptr_ty],
                [IntrReadMem, IntrArgMemOnly]>;

  class AdvSIMD_4Vec_PredLoad_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>,
                 LLVMMatchType<0>],
                [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_ptr_ty],
                [IntrReadMem, IntrArgMemOnly]>;

  class AdvSIMD_1Vec_PredLoad_WriteFFR_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_ptr_ty],
                [IntrInaccessibleMemOrArgMemOnly]>;

  class AdvSIMD_1Vec_PredStore_Intrinsic
    : DefaultAttrsIntrinsic<[],
                [llvm_anyvector_ty,
                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_ptr_ty],
                [IntrArgMemOnly, NoCapture<ArgIndex<2>>]>;

  class AdvSIMD_2Vec_PredStore_Intrinsic
      : DefaultAttrsIntrinsic<[],
                  [llvm_anyvector_ty, LLVMMatchType<0>,
                   LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_ptr_ty],
                  [IntrArgMemOnly, NoCapture<ArgIndex<3>>]>;

  class AdvSIMD_3Vec_PredStore_Intrinsic
      : DefaultAttrsIntrinsic<[],
                  [llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>,
                   LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_ptr_ty],
                  [IntrArgMemOnly, NoCapture<ArgIndex<4>>]>;

  class AdvSIMD_4Vec_PredStore_Intrinsic
      : DefaultAttrsIntrinsic<[],
                  [llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>,
                   LLVMMatchType<0>,
                   LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_ptr_ty],
                  [IntrArgMemOnly, NoCapture<ArgIndex<5>>]>;

  class AdvSIMD_SVE_Index_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [LLVMVectorElementType<0>,
                 LLVMVectorElementType<0>],
                [IntrNoMem]>;

  class AdvSIMD_Merged1VectorArg_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [LLVMMatchType<0>,
                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                 LLVMMatchType<0>],
                [IntrNoMem]>;

  class AdvSIMD_2VectorArgIndexed_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [LLVMMatchType<0>,
                 LLVMMatchType<0>,
                 llvm_i32_ty],
                [IntrNoMem, ImmArg<ArgIndex<2>>]>;

  class AdvSIMD_3VectorArgIndexed_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [LLVMMatchType<0>,
                 LLVMMatchType<0>,
                 LLVMMatchType<0>,
                 llvm_i32_ty],
                [IntrNoMem, ImmArg<ArgIndex<3>>]>;

  class AdvSIMD_Pred1VectorArg_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                 LLVMMatchType<0>],
                [IntrNoMem]>;

  class AdvSIMD_Pred2VectorArg_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                 LLVMMatchType<0>,
                 LLVMMatchType<0>],
                [IntrNoMem]>;

  class AdvSIMD_Pred3VectorArg_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                 LLVMMatchType<0>,
                 LLVMMatchType<0>,
                 LLVMMatchType<0>],
                [IntrNoMem]>;

  class AdvSIMD_SVE_Compare_Intrinsic
    : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
                [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                 llvm_anyvector_ty,
                 LLVMMatchType<0>],
                [IntrNoMem]>;

  class AdvSIMD_SVE_CompareWide_Intrinsic
    : DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
                [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                 llvm_anyvector_ty,
                 llvm_nxv2i64_ty],
                [IntrNoMem]>;

  class AdvSIMD_SVE_Saturating_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [LLVMMatchType<0>,
                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
                [IntrNoMem]>;

  class AdvSIMD_SVE_SaturatingWithPattern_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [LLVMMatchType<0>,
                 llvm_i32_ty,
                 llvm_i32_ty],
                [IntrNoMem, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>]>;

  class AdvSIMD_SVE_Saturating_N_Intrinsic<LLVMType T>
    : DefaultAttrsIntrinsic<[T],
                [T, llvm_anyvector_ty],
                [IntrNoMem]>;

  class AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<LLVMType T>
    : DefaultAttrsIntrinsic<[T],
                [T, llvm_i32_ty, llvm_i32_ty],
                [IntrNoMem, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>]>;

  class AdvSIMD_SVE_CNT_Intrinsic
    : DefaultAttrsIntrinsic<[LLVMVectorOfBitcastsToInt<0>],
                [LLVMVectorOfBitcastsToInt<0>,
                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                 llvm_anyvector_ty],
                [IntrNoMem]>;

  class AdvSIMD_SVE_ReduceWithInit_Intrinsic
    : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>],
                [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                 LLVMVectorElementType<0>,
                 llvm_anyvector_ty],
                [IntrNoMem]>;

  class AdvSIMD_SVE_ShiftByImm_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                 LLVMMatchType<0>,
                 llvm_i32_ty],
                [IntrNoMem, ImmArg<ArgIndex<2>>]>;

  class AdvSIMD_SVE_ShiftWide_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                 LLVMMatchType<0>,
                 llvm_nxv2i64_ty],
                [IntrNoMem]>;

  class AdvSIMD_SVE_Unpack_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
               [LLVMSubdivide2VectorType<0>],
               [IntrNoMem]>;

  class AdvSIMD_SVE_CADD_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                 LLVMMatchType<0>,
                 LLVMMatchType<0>,
                 llvm_i32_ty],
                [IntrNoMem, ImmArg<ArgIndex<3>>]>;

  class AdvSIMD_SVE_CMLA_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                 LLVMMatchType<0>,
                 LLVMMatchType<0>,
                 LLVMMatchType<0>,
                 llvm_i32_ty],
                [IntrNoMem, ImmArg<ArgIndex<4>>]>;

  class AdvSIMD_SVE_CMLA_LANE_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [LLVMMatchType<0>,
                 LLVMMatchType<0>,
                 LLVMMatchType<0>,
                 llvm_i32_ty,
                 llvm_i32_ty],
                [IntrNoMem, ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>]>;

  class AdvSIMD_SVE_DUP_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [LLVMMatchType<0>,
                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                 LLVMVectorElementType<0>],
                [IntrNoMem]>;

  class AdvSIMD_SVE_DUP_Unpred_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty], [LLVMVectorElementType<0>],
                [IntrNoMem]>;

  class AdvSIMD_SVE_DUPQ_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [LLVMMatchType<0>,
                 llvm_i64_ty],
                [IntrNoMem]>;

  class AdvSIMD_SVE_EXPA_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [LLVMVectorOfBitcastsToInt<0>],
                [IntrNoMem]>;

  class AdvSIMD_SVE_FCVT_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [LLVMMatchType<0>,
                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                 llvm_anyvector_ty],
                [IntrNoMem]>;

  class AdvSIMD_SVE_FCVTZS_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [LLVMVectorOfBitcastsToInt<0>,
                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                 llvm_anyvector_ty],
                [IntrNoMem]>;

  class AdvSIMD_SVE_INSR_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [LLVMMatchType<0>,
                 LLVMVectorElementType<0>],
                [IntrNoMem]>;

  class AdvSIMD_SVE_PTRUE_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [llvm_i32_ty],
                [IntrNoMem, ImmArg<ArgIndex<0>>]>;

  class AdvSIMD_SVE_PUNPKHI_Intrinsic
    : DefaultAttrsIntrinsic<[LLVMHalfElementsVectorType<0>],
                [llvm_anyvector_ty],
                [IntrNoMem]>;

  class AdvSIMD_SVE_SCALE_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                 LLVMMatchType<0>,
                 LLVMVectorOfBitcastsToInt<0>],
                [IntrNoMem]>;

  class AdvSIMD_SVE_SCVTF_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [LLVMMatchType<0>,
                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                 llvm_anyvector_ty],
                [IntrNoMem]>;

  class AdvSIMD_SVE_TSMUL_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [LLVMMatchType<0>,
                 LLVMVectorOfBitcastsToInt<0>],
                [IntrNoMem]>;

  class AdvSIMD_SVE_CNTB_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_i64_ty],
                [llvm_i32_ty],
                [IntrNoMem, ImmArg<ArgIndex<0>>]>;

  class AdvSIMD_SVE_CNTP_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_i64_ty],
                [llvm_anyvector_ty, LLVMMatchType<0>],
                [IntrNoMem]>;

  class AdvSIMD_SVE_DOT_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [LLVMMatchType<0>,
                 LLVMSubdivide4VectorType<0>,
                 LLVMSubdivide4VectorType<0>],
                [IntrNoMem]>;

  class AdvSIMD_SVE_DOT_Indexed_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [LLVMMatchType<0>,
                 LLVMSubdivide4VectorType<0>,
                 LLVMSubdivide4VectorType<0>,
                 llvm_i32_ty],
                [IntrNoMem, ImmArg<ArgIndex<3>>]>;

  class AdvSIMD_SVE_PTEST_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_i1_ty],
                [llvm_anyvector_ty,
                 LLVMMatchType<0>],
                [IntrNoMem]>;

  class AdvSIMD_SVE_TBL_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [LLVMMatchType<0>,
                 LLVMVectorOfBitcastsToInt<0>],
                [IntrNoMem]>;

  class AdvSIMD_SVE2_TBX_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [LLVMMatchType<0>,
                 LLVMMatchType<0>,
                 LLVMVectorOfBitcastsToInt<0>],
                [IntrNoMem]>;

  class SVE2_1VectorArg_Long_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [LLVMSubdivide2VectorType<0>,
                 llvm_i32_ty],
                [IntrNoMem, ImmArg<ArgIndex<1>>]>;

  class SVE2_2VectorArg_Long_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [LLVMSubdivide2VectorType<0>,
                 LLVMSubdivide2VectorType<0>],
                [IntrNoMem]>;

  class SVE2_2VectorArgIndexed_Long_Intrinsic
  : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
              [LLVMSubdivide2VectorType<0>,
               LLVMSubdivide2VectorType<0>,
               llvm_i32_ty],
              [IntrNoMem, ImmArg<ArgIndex<2>>]>;

  class SVE2_2VectorArg_Wide_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [LLVMMatchType<0>,
                 LLVMSubdivide2VectorType<0>],
                [IntrNoMem]>;

  class SVE2_2VectorArg_Pred_Long_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                 LLVMMatchType<0>,
                 LLVMSubdivide2VectorType<0>],
                [IntrNoMem]>;

  class SVE2_3VectorArg_Long_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [LLVMMatchType<0>,
                 LLVMSubdivide2VectorType<0>,
                 LLVMSubdivide2VectorType<0>],
                [IntrNoMem]>;

  class SVE2_3VectorArgIndexed_Long_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [LLVMMatchType<0>,
                 LLVMSubdivide2VectorType<0>,
                 LLVMSubdivide2VectorType<0>,
                 llvm_i32_ty],
                [IntrNoMem, ImmArg<ArgIndex<3>>]>;

  class SVE2_1VectorArg_Narrowing_Intrinsic
    : DefaultAttrsIntrinsic<[LLVMSubdivide2VectorType<0>],
                [llvm_anyvector_ty],
                [IntrNoMem]>;

  class SVE2_Merged1VectorArg_Narrowing_Intrinsic
    : DefaultAttrsIntrinsic<[LLVMSubdivide2VectorType<0>],
                [LLVMSubdivide2VectorType<0>,
                 llvm_anyvector_ty],
                [IntrNoMem]>;
  class SVE2_2VectorArg_Narrowing_Intrinsic
      : DefaultAttrsIntrinsic<
            [LLVMSubdivide2VectorType<0>],
            [llvm_anyvector_ty, LLVMMatchType<0>],
            [IntrNoMem]>;

  class SVE2_Merged2VectorArg_Narrowing_Intrinsic
      : DefaultAttrsIntrinsic<
            [LLVMSubdivide2VectorType<0>],
            [LLVMSubdivide2VectorType<0>, llvm_anyvector_ty, LLVMMatchType<0>],
            [IntrNoMem]>;

  class SVE2_1VectorArg_Imm_Narrowing_Intrinsic
      : DefaultAttrsIntrinsic<[LLVMSubdivide2VectorType<0>],
                  [llvm_anyvector_ty, llvm_i32_ty],
                  [IntrNoMem, ImmArg<ArgIndex<1>>]>;

  class SVE2_2VectorArg_Imm_Narrowing_Intrinsic
      : DefaultAttrsIntrinsic<[LLVMSubdivide2VectorType<0>],
                  [LLVMSubdivide2VectorType<0>, llvm_anyvector_ty,
                   llvm_i32_ty],
                  [IntrNoMem, ImmArg<ArgIndex<2>>]>;

  class SVE2_CONFLICT_DETECT_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [llvm_anyptr_ty, LLVMMatchType<1>],
                [IntrNoMem]>;

  class SVE2_3VectorArg_Indexed_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [LLVMMatchType<0>,
                 LLVMSubdivide2VectorType<0>,
                 LLVMSubdivide2VectorType<0>,
                 llvm_i32_ty],
                [IntrNoMem, ImmArg<ArgIndex<3>>]>;

  class AdvSIMD_SVE_CDOT_LANE_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [LLVMMatchType<0>,
                 LLVMSubdivide4VectorType<0>,
                 LLVMSubdivide4VectorType<0>,
                 llvm_i32_ty,
                 llvm_i32_ty],
                [IntrNoMem, ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>]>;

  // NOTE: There is no relationship between these intrinsics beyond an attempt
  // to reuse currently identical class definitions.
  class AdvSIMD_SVE_LOGB_Intrinsic  : AdvSIMD_SVE_CNT_Intrinsic;
  class AdvSIMD_SVE2_CADD_Intrinsic : AdvSIMD_2VectorArgIndexed_Intrinsic;
  class AdvSIMD_SVE2_CMLA_Intrinsic : AdvSIMD_3VectorArgIndexed_Intrinsic;

  // This class of intrinsics are not intended to be useful within LLVM IR but
  // are instead here to support some of the more regid parts of the ACLE.
  class Builtin_SVCVT<LLVMType OUT, LLVMType PRED, LLVMType IN>
      : DefaultAttrsIntrinsic<[OUT], [OUT, PRED, IN], [IntrNoMem]>;
}

//===----------------------------------------------------------------------===//
// SVE

let TargetPrefix = "aarch64" in {  // All intrinsics start with "llvm.aarch64.".

class AdvSIMD_SVE_2SVBoolArg_Intrinsic
  : DefaultAttrsIntrinsic<[llvm_nxv16i1_ty],
                          [llvm_nxv16i1_ty],
                          [IntrNoMem]>;

class AdvSIMD_SVE_3SVBoolArg_Intrinsic
  : DefaultAttrsIntrinsic<[llvm_nxv16i1_ty],
                          [llvm_nxv16i1_ty, llvm_nxv16i1_ty],
                          [IntrNoMem]>;

class AdvSIMD_SVE_Reduce_Intrinsic
  : DefaultAttrsIntrinsic<[LLVMVectorElementType<0>],
              [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
               llvm_anyvector_ty],
              [IntrNoMem]>;

class AdvSIMD_SVE_SADDV_Reduce_Intrinsic
  : DefaultAttrsIntrinsic<[llvm_i64_ty],
              [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
               llvm_anyvector_ty],
              [IntrNoMem]>;

class AdvSIMD_SVE_WHILE_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [llvm_anyint_ty, LLVMMatchType<1>],
                [IntrNoMem]>;

class AdvSIMD_GatherLoad_SV_64b_Offsets_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [
                  LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                  llvm_ptr_ty,
                  LLVMScalarOrSameVectorWidth<0, llvm_i64_ty>
                ],
                [IntrReadMem, IntrArgMemOnly]>;

class AdvSIMD_GatherLoad_SV_64b_Offsets_WriteFFR_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [
                  LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                  llvm_ptr_ty,
                  LLVMScalarOrSameVectorWidth<0, llvm_i64_ty>
                ],
                [IntrInaccessibleMemOrArgMemOnly]>;

class AdvSIMD_GatherLoad_SV_32b_Offsets_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [
                  LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                  llvm_ptr_ty,
                  LLVMScalarOrSameVectorWidth<0, llvm_i32_ty>
                ],
                [IntrReadMem, IntrArgMemOnly]>;

class AdvSIMD_GatherLoad_SV_32b_Offsets_WriteFFR_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [
                  LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                  llvm_ptr_ty,
                  LLVMScalarOrSameVectorWidth<0, llvm_i32_ty>
                ],
                [IntrInaccessibleMemOrArgMemOnly]>;

class AdvSIMD_GatherLoad_VS_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [
                  LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                  llvm_anyvector_ty,
                  llvm_i64_ty
                ],
                [IntrReadMem]>;

class AdvSIMD_GatherLoad_VS_WriteFFR_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [
                  LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                  llvm_anyvector_ty,
                  llvm_i64_ty
                ],
                [IntrInaccessibleMemOrArgMemOnly]>;

class AdvSIMD_ScatterStore_SV_64b_Offsets_Intrinsic
    : DefaultAttrsIntrinsic<[],
               [
                 llvm_anyvector_ty,
                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                 llvm_ptr_ty,
                 LLVMScalarOrSameVectorWidth<0, llvm_i64_ty>
               ],
               [IntrWriteMem, IntrArgMemOnly]>;

class AdvSIMD_ScatterStore_SV_32b_Offsets_Intrinsic
    : DefaultAttrsIntrinsic<[],
               [
                 llvm_anyvector_ty,
                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                 llvm_ptr_ty,
                 LLVMScalarOrSameVectorWidth<0, llvm_i32_ty>
               ],
               [IntrWriteMem, IntrArgMemOnly]>;

class AdvSIMD_ScatterStore_VS_Intrinsic
    : DefaultAttrsIntrinsic<[],
               [
                 llvm_anyvector_ty,
                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                 llvm_anyvector_ty, llvm_i64_ty
               ],
               [IntrWriteMem]>;


class SVE_gather_prf_SV
    : DefaultAttrsIntrinsic<[],
                [
                  LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, // Predicate
                  llvm_ptr_ty, // Base address
                  llvm_anyvector_ty, // Offsets
                  llvm_i32_ty // Prfop
                ],
                [IntrInaccessibleMemOrArgMemOnly, NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<3>>]>;

class SVE_gather_prf_VS
    : DefaultAttrsIntrinsic<[],
                [
                  LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, // Predicate
                  llvm_anyvector_ty, // Base addresses
                  llvm_i64_ty, // Scalar offset
                  llvm_i32_ty // Prfop
                ],
                [IntrInaccessibleMemOrArgMemOnly, ImmArg<ArgIndex<3>>]>;

class SVE_MatMul_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                [LLVMMatchType<0>, LLVMSubdivide4VectorType<0>, LLVMSubdivide4VectorType<0>],
                [IntrNoMem]>;

class SVE_4Vec_BF16
    : DefaultAttrsIntrinsic<[llvm_nxv4f32_ty],
                [llvm_nxv4f32_ty, llvm_nxv8bf16_ty, llvm_nxv8bf16_ty],
                [IntrNoMem]>;

class SVE_4Vec_BF16_Indexed
    : DefaultAttrsIntrinsic<[llvm_nxv4f32_ty],
                [llvm_nxv4f32_ty, llvm_nxv8bf16_ty, llvm_nxv8bf16_ty, llvm_i32_ty],
                [IntrNoMem, ImmArg<ArgIndex<3>>]>;

//
// Loads
//

def int_aarch64_sve_ld1   : AdvSIMD_1Vec_PredLoad_Intrinsic;

def int_aarch64_sve_ld2_sret : AdvSIMD_2Vec_PredLoad_Intrinsic;
def int_aarch64_sve_ld3_sret : AdvSIMD_3Vec_PredLoad_Intrinsic;
def int_aarch64_sve_ld4_sret : AdvSIMD_4Vec_PredLoad_Intrinsic;

def int_aarch64_sve_ldnt1 : AdvSIMD_1Vec_PredLoad_Intrinsic;
def int_aarch64_sve_ldnf1 : AdvSIMD_1Vec_PredLoad_WriteFFR_Intrinsic;
def int_aarch64_sve_ldff1 : AdvSIMD_1Vec_PredLoad_WriteFFR_Intrinsic;

def int_aarch64_sve_ld1rq : AdvSIMD_1Vec_PredLoad_Intrinsic;
def int_aarch64_sve_ld1ro : AdvSIMD_1Vec_PredLoad_Intrinsic;

//
// Stores
//

def int_aarch64_sve_st1  : AdvSIMD_1Vec_PredStore_Intrinsic;
def int_aarch64_sve_st2  : AdvSIMD_2Vec_PredStore_Intrinsic;
def int_aarch64_sve_st3  : AdvSIMD_3Vec_PredStore_Intrinsic;
def int_aarch64_sve_st4  : AdvSIMD_4Vec_PredStore_Intrinsic;

def int_aarch64_sve_stnt1 : AdvSIMD_1Vec_PredStore_Intrinsic;

//
// Prefetches
//

def int_aarch64_sve_prf
  : DefaultAttrsIntrinsic<[], [llvm_anyvector_ty, llvm_ptr_ty, llvm_i32_ty],
                  [IntrArgMemOnly, ImmArg<ArgIndex<2>>]>;

// Scalar + 32-bit scaled offset vector, zero extend, packed and
// unpacked.
def int_aarch64_sve_prfb_gather_uxtw_index : SVE_gather_prf_SV;
def int_aarch64_sve_prfh_gather_uxtw_index : SVE_gather_prf_SV;
def int_aarch64_sve_prfw_gather_uxtw_index : SVE_gather_prf_SV;
def int_aarch64_sve_prfd_gather_uxtw_index : SVE_gather_prf_SV;

// Scalar + 32-bit scaled offset vector, sign extend, packed and
// unpacked.
def int_aarch64_sve_prfb_gather_sxtw_index : SVE_gather_prf_SV;
def int_aarch64_sve_prfw_gather_sxtw_index : SVE_gather_prf_SV;
def int_aarch64_sve_prfh_gather_sxtw_index : SVE_gather_prf_SV;
def int_aarch64_sve_prfd_gather_sxtw_index : SVE_gather_prf_SV;

// Scalar + 64-bit scaled offset vector.
def int_aarch64_sve_prfb_gather_index : SVE_gather_prf_SV;
def int_aarch64_sve_prfh_gather_index : SVE_gather_prf_SV;
def int_aarch64_sve_prfw_gather_index : SVE_gather_prf_SV;
def int_aarch64_sve_prfd_gather_index : SVE_gather_prf_SV;

// Vector + scalar.
def int_aarch64_sve_prfb_gather_scalar_offset : SVE_gather_prf_VS;
def int_aarch64_sve_prfh_gather_scalar_offset : SVE_gather_prf_VS;
def int_aarch64_sve_prfw_gather_scalar_offset : SVE_gather_prf_VS;
def int_aarch64_sve_prfd_gather_scalar_offset : SVE_gather_prf_VS;

//
// Scalar to vector operations
//

def int_aarch64_sve_dup : AdvSIMD_SVE_DUP_Intrinsic;
def int_aarch64_sve_dup_x : AdvSIMD_SVE_DUP_Unpred_Intrinsic;

def int_aarch64_sve_index : AdvSIMD_SVE_Index_Intrinsic;

//
// Address calculation
//

def int_aarch64_sve_adrb : AdvSIMD_2VectorArg_Intrinsic;
def int_aarch64_sve_adrh : AdvSIMD_2VectorArg_Intrinsic;
def int_aarch64_sve_adrw : AdvSIMD_2VectorArg_Intrinsic;
def int_aarch64_sve_adrd : AdvSIMD_2VectorArg_Intrinsic;

//
// Integer arithmetic
//

def int_aarch64_sve_add   : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_add_u : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_sub   : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_sub_u : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_subr  : AdvSIMD_Pred2VectorArg_Intrinsic;

def int_aarch64_sve_pmul       : AdvSIMD_2VectorArg_Intrinsic;

def int_aarch64_sve_mul        : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_mul_u      : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_mul_lane   : AdvSIMD_2VectorArgIndexed_Intrinsic;
def int_aarch64_sve_smulh      : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_smulh_u    : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_umulh      : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_umulh_u    : AdvSIMD_Pred2VectorArg_Intrinsic;

def int_aarch64_sve_sdiv       : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_sdiv_u     : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_udiv       : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_udiv_u     : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_sdivr      : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_udivr      : AdvSIMD_Pred2VectorArg_Intrinsic;

def int_aarch64_sve_smax       : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_smax_u     : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_umax       : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_umax_u     : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_smin       : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_smin_u     : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_umin       : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_umin_u     : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_sabd       : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_sabd_u     : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_uabd       : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_uabd_u     : AdvSIMD_Pred2VectorArg_Intrinsic;

def int_aarch64_sve_mad        : AdvSIMD_Pred3VectorArg_Intrinsic;
def int_aarch64_sve_msb        : AdvSIMD_Pred3VectorArg_Intrinsic;
def int_aarch64_sve_mla        : AdvSIMD_Pred3VectorArg_Intrinsic;
def int_aarch64_sve_mla_u      : AdvSIMD_Pred3VectorArg_Intrinsic;
def int_aarch64_sve_mla_lane   : AdvSIMD_3VectorArgIndexed_Intrinsic;
def int_aarch64_sve_mls        : AdvSIMD_Pred3VectorArg_Intrinsic;
def int_aarch64_sve_mls_u      : AdvSIMD_Pred3VectorArg_Intrinsic;
def int_aarch64_sve_mls_lane   : AdvSIMD_3VectorArgIndexed_Intrinsic;

def int_aarch64_sve_saddv      : AdvSIMD_SVE_SADDV_Reduce_Intrinsic;
def int_aarch64_sve_uaddv      : AdvSIMD_SVE_SADDV_Reduce_Intrinsic;

def int_aarch64_sve_smaxv      : AdvSIMD_SVE_Reduce_Intrinsic;
def int_aarch64_sve_umaxv      : AdvSIMD_SVE_Reduce_Intrinsic;
def int_aarch64_sve_sminv      : AdvSIMD_SVE_Reduce_Intrinsic;
def int_aarch64_sve_uminv      : AdvSIMD_SVE_Reduce_Intrinsic;

def int_aarch64_sve_orv        : AdvSIMD_SVE_Reduce_Intrinsic;
def int_aarch64_sve_eorv       : AdvSIMD_SVE_Reduce_Intrinsic;
def int_aarch64_sve_andv       : AdvSIMD_SVE_Reduce_Intrinsic;

def int_aarch64_sve_abs : AdvSIMD_Merged1VectorArg_Intrinsic;
def int_aarch64_sve_neg : AdvSIMD_Merged1VectorArg_Intrinsic;

def int_aarch64_sve_sdot      : AdvSIMD_SVE_DOT_Intrinsic;
def int_aarch64_sve_sdot_lane : AdvSIMD_SVE_DOT_Indexed_Intrinsic;

def int_aarch64_sve_udot      : AdvSIMD_SVE_DOT_Intrinsic;
def int_aarch64_sve_udot_lane : AdvSIMD_SVE_DOT_Indexed_Intrinsic;

def int_aarch64_sve_sqadd_x   : AdvSIMD_2VectorArg_Intrinsic;
def int_aarch64_sve_sqsub_x   : AdvSIMD_2VectorArg_Intrinsic;
def int_aarch64_sve_uqadd_x   : AdvSIMD_2VectorArg_Intrinsic;
def int_aarch64_sve_uqsub_x   : AdvSIMD_2VectorArg_Intrinsic;

// Shifts

def int_aarch64_sve_asr      : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_asr_u    : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_asr_wide : AdvSIMD_SVE_ShiftWide_Intrinsic;
def int_aarch64_sve_asrd     : AdvSIMD_SVE_ShiftByImm_Intrinsic;
def int_aarch64_sve_insr     : AdvSIMD_SVE_INSR_Intrinsic;
def int_aarch64_sve_lsl      : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_lsl_u    : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_lsl_wide : AdvSIMD_SVE_ShiftWide_Intrinsic;
def int_aarch64_sve_lsr      : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_lsr_u    : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_lsr_wide : AdvSIMD_SVE_ShiftWide_Intrinsic;

//
// Integer comparisons
//

def int_aarch64_sve_cmpeq : AdvSIMD_SVE_Compare_Intrinsic;
def int_aarch64_sve_cmpge : AdvSIMD_SVE_Compare_Intrinsic;
def int_aarch64_sve_cmpgt : AdvSIMD_SVE_Compare_Intrinsic;
def int_aarch64_sve_cmphi : AdvSIMD_SVE_Compare_Intrinsic;
def int_aarch64_sve_cmphs : AdvSIMD_SVE_Compare_Intrinsic;
def int_aarch64_sve_cmpne : AdvSIMD_SVE_Compare_Intrinsic;

def int_aarch64_sve_cmpeq_wide : AdvSIMD_SVE_CompareWide_Intrinsic;
def int_aarch64_sve_cmpge_wide : AdvSIMD_SVE_CompareWide_Intrinsic;
def int_aarch64_sve_cmpgt_wide : AdvSIMD_SVE_CompareWide_Intrinsic;
def int_aarch64_sve_cmphi_wide : AdvSIMD_SVE_CompareWide_Intrinsic;
def int_aarch64_sve_cmphs_wide : AdvSIMD_SVE_CompareWide_Intrinsic;
def int_aarch64_sve_cmple_wide : AdvSIMD_SVE_CompareWide_Intrinsic;
def int_aarch64_sve_cmplo_wide : AdvSIMD_SVE_CompareWide_Intrinsic;
def int_aarch64_sve_cmpls_wide : AdvSIMD_SVE_CompareWide_Intrinsic;
def int_aarch64_sve_cmplt_wide : AdvSIMD_SVE_CompareWide_Intrinsic;
def int_aarch64_sve_cmpne_wide : AdvSIMD_SVE_CompareWide_Intrinsic;

//
// Counting bits
//

def int_aarch64_sve_cls : AdvSIMD_Merged1VectorArg_Intrinsic;
def int_aarch64_sve_clz : AdvSIMD_Merged1VectorArg_Intrinsic;
def int_aarch64_sve_cnt : AdvSIMD_SVE_CNT_Intrinsic;

//
// Counting elements
//

def int_aarch64_sve_cntb : AdvSIMD_SVE_CNTB_Intrinsic;
def int_aarch64_sve_cnth : AdvSIMD_SVE_CNTB_Intrinsic;
def int_aarch64_sve_cntw : AdvSIMD_SVE_CNTB_Intrinsic;
def int_aarch64_sve_cntd : AdvSIMD_SVE_CNTB_Intrinsic;

def int_aarch64_sve_cntp : AdvSIMD_SVE_CNTP_Intrinsic;

//
// FFR manipulation
//

def int_aarch64_sve_rdffr   : ClangBuiltin<"__builtin_sve_svrdffr">,   DefaultAttrsIntrinsic<[llvm_nxv16i1_ty], [], [IntrReadMem, IntrInaccessibleMemOnly]>;
def int_aarch64_sve_rdffr_z : ClangBuiltin<"__builtin_sve_svrdffr_z">, DefaultAttrsIntrinsic<[llvm_nxv16i1_ty], [llvm_nxv16i1_ty], [IntrReadMem, IntrInaccessibleMemOnly]>;
def int_aarch64_sve_setffr  : ClangBuiltin<"__builtin_sve_svsetffr">,  DefaultAttrsIntrinsic<[], [], [IntrWriteMem, IntrInaccessibleMemOnly]>;
def int_aarch64_sve_wrffr   : ClangBuiltin<"__builtin_sve_svwrffr">,   DefaultAttrsIntrinsic<[], [llvm_nxv16i1_ty], [IntrWriteMem, IntrInaccessibleMemOnly]>;

//
// Saturating scalar arithmetic
//

def int_aarch64_sve_sqdech : AdvSIMD_SVE_SaturatingWithPattern_Intrinsic;
def int_aarch64_sve_sqdecw : AdvSIMD_SVE_SaturatingWithPattern_Intrinsic;
def int_aarch64_sve_sqdecd : AdvSIMD_SVE_SaturatingWithPattern_Intrinsic;
def int_aarch64_sve_sqdecp : AdvSIMD_SVE_Saturating_Intrinsic;

def int_aarch64_sve_sqdecb_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>;
def int_aarch64_sve_sqdecb_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>;
def int_aarch64_sve_sqdech_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>;
def int_aarch64_sve_sqdech_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>;
def int_aarch64_sve_sqdecw_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>;
def int_aarch64_sve_sqdecw_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>;
def int_aarch64_sve_sqdecd_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>;
def int_aarch64_sve_sqdecd_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>;
def int_aarch64_sve_sqdecp_n32 : AdvSIMD_SVE_Saturating_N_Intrinsic<llvm_i32_ty>;
def int_aarch64_sve_sqdecp_n64 : AdvSIMD_SVE_Saturating_N_Intrinsic<llvm_i64_ty>;

def int_aarch64_sve_sqinch : AdvSIMD_SVE_SaturatingWithPattern_Intrinsic;
def int_aarch64_sve_sqincw : AdvSIMD_SVE_SaturatingWithPattern_Intrinsic;
def int_aarch64_sve_sqincd : AdvSIMD_SVE_SaturatingWithPattern_Intrinsic;
def int_aarch64_sve_sqincp : AdvSIMD_SVE_Saturating_Intrinsic;

def int_aarch64_sve_sqincb_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>;
def int_aarch64_sve_sqincb_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>;
def int_aarch64_sve_sqinch_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>;
def int_aarch64_sve_sqinch_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>;
def int_aarch64_sve_sqincw_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>;
def int_aarch64_sve_sqincw_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>;
def int_aarch64_sve_sqincd_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>;
def int_aarch64_sve_sqincd_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>;
def int_aarch64_sve_sqincp_n32 : AdvSIMD_SVE_Saturating_N_Intrinsic<llvm_i32_ty>;
def int_aarch64_sve_sqincp_n64 : AdvSIMD_SVE_Saturating_N_Intrinsic<llvm_i64_ty>;

def int_aarch64_sve_uqdech : AdvSIMD_SVE_SaturatingWithPattern_Intrinsic;
def int_aarch64_sve_uqdecw : AdvSIMD_SVE_SaturatingWithPattern_Intrinsic;
def int_aarch64_sve_uqdecd : AdvSIMD_SVE_SaturatingWithPattern_Intrinsic;
def int_aarch64_sve_uqdecp : AdvSIMD_SVE_Saturating_Intrinsic;

def int_aarch64_sve_uqdecb_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>;
def int_aarch64_sve_uqdecb_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>;
def int_aarch64_sve_uqdech_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>;
def int_aarch64_sve_uqdech_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>;
def int_aarch64_sve_uqdecw_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>;
def int_aarch64_sve_uqdecw_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>;
def int_aarch64_sve_uqdecd_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>;
def int_aarch64_sve_uqdecd_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>;
def int_aarch64_sve_uqdecp_n32 : AdvSIMD_SVE_Saturating_N_Intrinsic<llvm_i32_ty>;
def int_aarch64_sve_uqdecp_n64 : AdvSIMD_SVE_Saturating_N_Intrinsic<llvm_i64_ty>;

def int_aarch64_sve_uqinch : AdvSIMD_SVE_SaturatingWithPattern_Intrinsic;
def int_aarch64_sve_uqincw : AdvSIMD_SVE_SaturatingWithPattern_Intrinsic;
def int_aarch64_sve_uqincd : AdvSIMD_SVE_SaturatingWithPattern_Intrinsic;
def int_aarch64_sve_uqincp : AdvSIMD_SVE_Saturating_Intrinsic;

def int_aarch64_sve_uqincb_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>;
def int_aarch64_sve_uqincb_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>;
def int_aarch64_sve_uqinch_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>;
def int_aarch64_sve_uqinch_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>;
def int_aarch64_sve_uqincw_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>;
def int_aarch64_sve_uqincw_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>;
def int_aarch64_sve_uqincd_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>;
def int_aarch64_sve_uqincd_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>;
def int_aarch64_sve_uqincp_n32 : AdvSIMD_SVE_Saturating_N_Intrinsic<llvm_i32_ty>;
def int_aarch64_sve_uqincp_n64 : AdvSIMD_SVE_Saturating_N_Intrinsic<llvm_i64_ty>;

//
// Reversal
//

def int_aarch64_sve_rbit : AdvSIMD_Merged1VectorArg_Intrinsic;
def int_aarch64_sve_revb : AdvSIMD_Merged1VectorArg_Intrinsic;
def int_aarch64_sve_revh : AdvSIMD_Merged1VectorArg_Intrinsic;
def int_aarch64_sve_revw : AdvSIMD_Merged1VectorArg_Intrinsic;

//
// Permutations and selection
//

def int_aarch64_sve_clasta    : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_clasta_n  : AdvSIMD_SVE_ReduceWithInit_Intrinsic;
def int_aarch64_sve_clastb    : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_clastb_n  : AdvSIMD_SVE_ReduceWithInit_Intrinsic;
def int_aarch64_sve_compact   : AdvSIMD_Pred1VectorArg_Intrinsic;
def int_aarch64_sve_dupq_lane : AdvSIMD_SVE_DUPQ_Intrinsic;
def int_aarch64_sve_ext       : AdvSIMD_2VectorArgIndexed_Intrinsic;
def int_aarch64_sve_sel       : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_lasta     : AdvSIMD_SVE_Reduce_Intrinsic;
def int_aarch64_sve_lastb     : AdvSIMD_SVE_Reduce_Intrinsic;
def int_aarch64_sve_rev       : AdvSIMD_1VectorArg_Intrinsic;
def int_aarch64_sve_rev_b16   : AdvSIMD_SVE_2SVBoolArg_Intrinsic;
def int_aarch64_sve_rev_b32   : AdvSIMD_SVE_2SVBoolArg_Intrinsic;
def int_aarch64_sve_rev_b64   : AdvSIMD_SVE_2SVBoolArg_Intrinsic;
def int_aarch64_sve_splice    : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_sunpkhi   : AdvSIMD_SVE_Unpack_Intrinsic;
def int_aarch64_sve_sunpklo   : AdvSIMD_SVE_Unpack_Intrinsic;
def int_aarch64_sve_tbl       : AdvSIMD_SVE_TBL_Intrinsic;
def int_aarch64_sve_trn1      : AdvSIMD_2VectorArg_Intrinsic;
def int_aarch64_sve_trn1_b16  : AdvSIMD_SVE_3SVBoolArg_Intrinsic;
def int_aarch64_sve_trn1_b32  : AdvSIMD_SVE_3SVBoolArg_Intrinsic;
def int_aarch64_sve_trn1_b64  : AdvSIMD_SVE_3SVBoolArg_Intrinsic;
def int_aarch64_sve_trn2      : AdvSIMD_2VectorArg_Intrinsic;
def int_aarch64_sve_trn2_b16  : AdvSIMD_SVE_3SVBoolArg_Intrinsic;
def int_aarch64_sve_trn2_b32  : AdvSIMD_SVE_3SVBoolArg_Intrinsic;
def int_aarch64_sve_trn2_b64  : AdvSIMD_SVE_3SVBoolArg_Intrinsic;
def int_aarch64_sve_trn1q     : AdvSIMD_2VectorArg_Intrinsic;
def int_aarch64_sve_trn2q     : AdvSIMD_2VectorArg_Intrinsic;
def int_aarch64_sve_uunpkhi   : AdvSIMD_SVE_Unpack_Intrinsic;
def int_aarch64_sve_uunpklo   : AdvSIMD_SVE_Unpack_Intrinsic;
def int_aarch64_sve_uzp1      : AdvSIMD_2VectorArg_Intrinsic;
def int_aarch64_sve_uzp1_b16  : AdvSIMD_SVE_3SVBoolArg_Intrinsic;
def int_aarch64_sve_uzp1_b32  : AdvSIMD_SVE_3SVBoolArg_Intrinsic;
def int_aarch64_sve_uzp1_b64  : AdvSIMD_SVE_3SVBoolArg_Intrinsic;
def int_aarch64_sve_uzp2      : AdvSIMD_2VectorArg_Intrinsic;
def int_aarch64_sve_uzp2_b16  : AdvSIMD_SVE_3SVBoolArg_Intrinsic;
def int_aarch64_sve_uzp2_b32  : AdvSIMD_SVE_3SVBoolArg_Intrinsic;
def int_aarch64_sve_uzp2_b64  : AdvSIMD_SVE_3SVBoolArg_Intrinsic;
def int_aarch64_sve_uzp1q     : AdvSIMD_2VectorArg_Intrinsic;
def int_aarch64_sve_uzp2q     : AdvSIMD_2VectorArg_Intrinsic;
def int_aarch64_sve_zip1      : AdvSIMD_2VectorArg_Intrinsic;
def int_aarch64_sve_zip1_b16  : AdvSIMD_SVE_3SVBoolArg_Intrinsic;
def int_aarch64_sve_zip1_b32  : AdvSIMD_SVE_3SVBoolArg_Intrinsic;
def int_aarch64_sve_zip1_b64  : AdvSIMD_SVE_3SVBoolArg_Intrinsic;
def int_aarch64_sve_zip2      : AdvSIMD_2VectorArg_Intrinsic;
def int_aarch64_sve_zip2_b16  : AdvSIMD_SVE_3SVBoolArg_Intrinsic;
def int_aarch64_sve_zip2_b32  : AdvSIMD_SVE_3SVBoolArg_Intrinsic;
def int_aarch64_sve_zip2_b64  : AdvSIMD_SVE_3SVBoolArg_Intrinsic;
def int_aarch64_sve_zip1q     : AdvSIMD_2VectorArg_Intrinsic;
def int_aarch64_sve_zip2q     : AdvSIMD_2VectorArg_Intrinsic;

//
// Logical operations
//

def int_aarch64_sve_and  : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_and_u: AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_bic  : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_bic_u: AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_cnot : AdvSIMD_Merged1VectorArg_Intrinsic;
def int_aarch64_sve_eor  : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_eor_u: AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_not  : AdvSIMD_Merged1VectorArg_Intrinsic;
def int_aarch64_sve_orr  : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_orr_u: AdvSIMD_Pred2VectorArg_Intrinsic;

//
// Conversion
//

def int_aarch64_sve_sxtb : AdvSIMD_Merged1VectorArg_Intrinsic;
def int_aarch64_sve_sxth : AdvSIMD_Merged1VectorArg_Intrinsic;
def int_aarch64_sve_sxtw : AdvSIMD_Merged1VectorArg_Intrinsic;
def int_aarch64_sve_uxtb : AdvSIMD_Merged1VectorArg_Intrinsic;
def int_aarch64_sve_uxth : AdvSIMD_Merged1VectorArg_Intrinsic;
def int_aarch64_sve_uxtw : AdvSIMD_Merged1VectorArg_Intrinsic;

//
// While comparisons
//

def int_aarch64_sve_whilele : AdvSIMD_SVE_WHILE_Intrinsic;
def int_aarch64_sve_whilelo : AdvSIMD_SVE_WHILE_Intrinsic;
def int_aarch64_sve_whilels : AdvSIMD_SVE_WHILE_Intrinsic;
def int_aarch64_sve_whilelt : AdvSIMD_SVE_WHILE_Intrinsic;
def int_aarch64_sve_whilege : AdvSIMD_SVE_WHILE_Intrinsic;
def int_aarch64_sve_whilegt : AdvSIMD_SVE_WHILE_Intrinsic;
def int_aarch64_sve_whilehs : AdvSIMD_SVE_WHILE_Intrinsic;
def int_aarch64_sve_whilehi : AdvSIMD_SVE_WHILE_Intrinsic;

//
// Floating-point arithmetic
//

def int_aarch64_sve_fabd       : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_fabd_u     : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_fabs       : AdvSIMD_Merged1VectorArg_Intrinsic;
def int_aarch64_sve_fadd       : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_fadd_u     : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_fcadd      : AdvSIMD_SVE_CADD_Intrinsic;
def int_aarch64_sve_fcmla      : AdvSIMD_SVE_CMLA_Intrinsic;
def int_aarch64_sve_fcmla_lane : AdvSIMD_SVE_CMLA_LANE_Intrinsic;
def int_aarch64_sve_fdiv       : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_fdiv_u     : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_fdivr      : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_fexpa_x    : AdvSIMD_SVE_EXPA_Intrinsic;
def int_aarch64_sve_fmad       : AdvSIMD_Pred3VectorArg_Intrinsic;
def int_aarch64_sve_fmax       : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_fmax_u     : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_fmaxnm     : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_fmaxnm_u   : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_fmin       : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_fmin_u     : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_fminnm     : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_fminnm_u   : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_fmla       : AdvSIMD_Pred3VectorArg_Intrinsic;
def int_aarch64_sve_fmla_lane  : AdvSIMD_3VectorArgIndexed_Intrinsic;
def int_aarch64_sve_fmla_u     : AdvSIMD_Pred3VectorArg_Intrinsic;
def int_aarch64_sve_fmls       : AdvSIMD_Pred3VectorArg_Intrinsic;
def int_aarch64_sve_fmls_lane  : AdvSIMD_3VectorArgIndexed_Intrinsic;
def int_aarch64_sve_fmls_u     : AdvSIMD_Pred3VectorArg_Intrinsic;
def int_aarch64_sve_fmsb       : AdvSIMD_Pred3VectorArg_Intrinsic;
def int_aarch64_sve_fmul       : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_fmul_lane  : AdvSIMD_2VectorArgIndexed_Intrinsic;
def int_aarch64_sve_fmul_u     : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_fmulx      : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_fmulx_u    : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_fneg       : AdvSIMD_Merged1VectorArg_Intrinsic;
def int_aarch64_sve_fnmad      : AdvSIMD_Pred3VectorArg_Intrinsic;
def int_aarch64_sve_fnmla      : AdvSIMD_Pred3VectorArg_Intrinsic;
def int_aarch64_sve_fnmla_u    : AdvSIMD_Pred3VectorArg_Intrinsic;
def int_aarch64_sve_fnmls      : AdvSIMD_Pred3VectorArg_Intrinsic;
def int_aarch64_sve_fnmls_u    : AdvSIMD_Pred3VectorArg_Intrinsic;
def int_aarch64_sve_fnmsb      : AdvSIMD_Pred3VectorArg_Intrinsic;
def int_aarch64_sve_frecpe_x   : AdvSIMD_1VectorArg_Intrinsic;
def int_aarch64_sve_frecps_x   : AdvSIMD_2VectorArg_Intrinsic;
def int_aarch64_sve_frecpx     : AdvSIMD_Merged1VectorArg_Intrinsic;
def int_aarch64_sve_frinta     : AdvSIMD_Merged1VectorArg_Intrinsic;
def int_aarch64_sve_frinti     : AdvSIMD_Merged1VectorArg_Intrinsic;
def int_aarch64_sve_frintm     : AdvSIMD_Merged1VectorArg_Intrinsic;
def int_aarch64_sve_frintn     : AdvSIMD_Merged1VectorArg_Intrinsic;
def int_aarch64_sve_frintp     : AdvSIMD_Merged1VectorArg_Intrinsic;
def int_aarch64_sve_frintx     : AdvSIMD_Merged1VectorArg_Intrinsic;
def int_aarch64_sve_frintz     : AdvSIMD_Merged1VectorArg_Intrinsic;
def int_aarch64_sve_frsqrte_x  : AdvSIMD_1VectorArg_Intrinsic;
def int_aarch64_sve_frsqrts_x  : AdvSIMD_2VectorArg_Intrinsic;
def int_aarch64_sve_fscale     : AdvSIMD_SVE_SCALE_Intrinsic;
def int_aarch64_sve_fsqrt      : AdvSIMD_Merged1VectorArg_Intrinsic;
def int_aarch64_sve_fsub       : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_fsub_u     : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_fsubr      : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_ftmad_x    : AdvSIMD_2VectorArgIndexed_Intrinsic;
def int_aarch64_sve_ftsmul_x   : AdvSIMD_SVE_TSMUL_Intrinsic;
def int_aarch64_sve_ftssel_x   : AdvSIMD_SVE_TSMUL_Intrinsic;

//
// Floating-point reductions
//

def int_aarch64_sve_fadda   : AdvSIMD_SVE_ReduceWithInit_Intrinsic;
def int_aarch64_sve_faddv   : AdvSIMD_SVE_Reduce_Intrinsic;
def int_aarch64_sve_fmaxv   : AdvSIMD_SVE_Reduce_Intrinsic;
def int_aarch64_sve_fmaxnmv : AdvSIMD_SVE_Reduce_Intrinsic;
def int_aarch64_sve_fminv   : AdvSIMD_SVE_Reduce_Intrinsic;
def int_aarch64_sve_fminnmv : AdvSIMD_SVE_Reduce_Intrinsic;

//
// Floating-point conversions
//

def int_aarch64_sve_fcvt   : AdvSIMD_SVE_FCVT_Intrinsic;
def int_aarch64_sve_fcvtzs : AdvSIMD_SVE_FCVTZS_Intrinsic;
def int_aarch64_sve_fcvtzu : AdvSIMD_SVE_FCVTZS_Intrinsic;
def int_aarch64_sve_scvtf  : AdvSIMD_SVE_SCVTF_Intrinsic;
def int_aarch64_sve_ucvtf  : AdvSIMD_SVE_SCVTF_Intrinsic;

//
// Floating-point comparisons
//

def int_aarch64_sve_facge : AdvSIMD_SVE_Compare_Intrinsic;
def int_aarch64_sve_facgt : AdvSIMD_SVE_Compare_Intrinsic;

def int_aarch64_sve_fcmpeq : AdvSIMD_SVE_Compare_Intrinsic;
def int_aarch64_sve_fcmpge : AdvSIMD_SVE_Compare_Intrinsic;
def int_aarch64_sve_fcmpgt : AdvSIMD_SVE_Compare_Intrinsic;
def int_aarch64_sve_fcmpne : AdvSIMD_SVE_Compare_Intrinsic;
def int_aarch64_sve_fcmpuo : AdvSIMD_SVE_Compare_Intrinsic;

def int_aarch64_sve_fcvtzs_i32f16   : Builtin_SVCVT<llvm_nxv4i32_ty, llvm_nxv4i1_ty, llvm_nxv8f16_ty>;
def int_aarch64_sve_fcvtzs_i32f64   : Builtin_SVCVT<llvm_nxv4i32_ty, llvm_nxv2i1_ty, llvm_nxv2f64_ty>;
def int_aarch64_sve_fcvtzs_i64f16   : Builtin_SVCVT<llvm_nxv2i64_ty, llvm_nxv2i1_ty, llvm_nxv8f16_ty>;
def int_aarch64_sve_fcvtzs_i64f32   : Builtin_SVCVT<llvm_nxv2i64_ty, llvm_nxv2i1_ty, llvm_nxv4f32_ty>;

def int_aarch64_sve_fcvt_bf16f32    : Builtin_SVCVT<llvm_nxv8bf16_ty, llvm_nxv8i1_ty, llvm_nxv4f32_ty>;
def int_aarch64_sve_fcvtnt_bf16f32  : Builtin_SVCVT<llvm_nxv8bf16_ty, llvm_nxv8i1_ty, llvm_nxv4f32_ty>;

def int_aarch64_sve_fcvtzu_i32f16   : Builtin_SVCVT<llvm_nxv4i32_ty, llvm_nxv4i1_ty, llvm_nxv8f16_ty>;
def int_aarch64_sve_fcvtzu_i32f64   : Builtin_SVCVT<llvm_nxv4i32_ty, llvm_nxv2i1_ty, llvm_nxv2f64_ty>;
def int_aarch64_sve_fcvtzu_i64f16   : Builtin_SVCVT<llvm_nxv2i64_ty, llvm_nxv2i1_ty, llvm_nxv8f16_ty>;
def int_aarch64_sve_fcvtzu_i64f32   : Builtin_SVCVT<llvm_nxv2i64_ty, llvm_nxv2i1_ty, llvm_nxv4f32_ty>;

def int_aarch64_sve_fcvt_f16f32     : Builtin_SVCVT<llvm_nxv8f16_ty, llvm_nxv4i1_ty, llvm_nxv4f32_ty>;
def int_aarch64_sve_fcvt_f16f64     : Builtin_SVCVT<llvm_nxv8f16_ty, llvm_nxv2i1_ty, llvm_nxv2f64_ty>;
def int_aarch64_sve_fcvt_f32f64     : Builtin_SVCVT<llvm_nxv4f32_ty, llvm_nxv2i1_ty, llvm_nxv2f64_ty>;

def int_aarch64_sve_fcvt_f32f16     : Builtin_SVCVT<llvm_nxv4f32_ty, llvm_nxv4i1_ty, llvm_nxv8f16_ty>;
def int_aarch64_sve_fcvt_f64f16     : Builtin_SVCVT<llvm_nxv2f64_ty, llvm_nxv2i1_ty, llvm_nxv8f16_ty>;
def int_aarch64_sve_fcvt_f64f32     : Builtin_SVCVT<llvm_nxv2f64_ty, llvm_nxv2i1_ty, llvm_nxv4f32_ty>;

def int_aarch64_sve_fcvtlt_f32f16   : Builtin_SVCVT<llvm_nxv4f32_ty, llvm_nxv4i1_ty, llvm_nxv8f16_ty>;
def int_aarch64_sve_fcvtlt_f64f32   : Builtin_SVCVT<llvm_nxv2f64_ty, llvm_nxv2i1_ty, llvm_nxv4f32_ty>;
def int_aarch64_sve_fcvtnt_f16f32   : Builtin_SVCVT<llvm_nxv8f16_ty, llvm_nxv4i1_ty, llvm_nxv4f32_ty>;
def int_aarch64_sve_fcvtnt_f32f64   : Builtin_SVCVT<llvm_nxv4f32_ty, llvm_nxv2i1_ty, llvm_nxv2f64_ty>;

def int_aarch64_sve_fcvtx_f32f64    : Builtin_SVCVT<llvm_nxv4f32_ty, llvm_nxv2i1_ty, llvm_nxv2f64_ty>;
def int_aarch64_sve_fcvtxnt_f32f64  : Builtin_SVCVT<llvm_nxv4f32_ty, llvm_nxv2i1_ty, llvm_nxv2f64_ty>;

def int_aarch64_sve_scvtf_f16i32    : Builtin_SVCVT<llvm_nxv8f16_ty, llvm_nxv4i1_ty, llvm_nxv4i32_ty>;
def int_aarch64_sve_scvtf_f16i64    : Builtin_SVCVT<llvm_nxv8f16_ty, llvm_nxv2i1_ty, llvm_nxv2i64_ty>;
def int_aarch64_sve_scvtf_f32i64    : Builtin_SVCVT<llvm_nxv4f32_ty, llvm_nxv2i1_ty, llvm_nxv2i64_ty>;
def int_aarch64_sve_scvtf_f64i32    : Builtin_SVCVT<llvm_nxv2f64_ty, llvm_nxv2i1_ty, llvm_nxv4i32_ty>;

def int_aarch64_sve_ucvtf_f16i32    : Builtin_SVCVT<llvm_nxv8f16_ty, llvm_nxv4i1_ty, llvm_nxv4i32_ty>;
def int_aarch64_sve_ucvtf_f16i64    : Builtin_SVCVT<llvm_nxv8f16_ty, llvm_nxv2i1_ty, llvm_nxv2i64_ty>;
def int_aarch64_sve_ucvtf_f32i64    : Builtin_SVCVT<llvm_nxv4f32_ty, llvm_nxv2i1_ty, llvm_nxv2i64_ty>;
def int_aarch64_sve_ucvtf_f64i32    : Builtin_SVCVT<llvm_nxv2f64_ty, llvm_nxv2i1_ty, llvm_nxv4i32_ty>;

//
// Predicate creation
//

def int_aarch64_sve_ptrue : AdvSIMD_SVE_PTRUE_Intrinsic;

//
// Predicate operations
//

def int_aarch64_sve_and_z   : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_bic_z   : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_brka    : AdvSIMD_Merged1VectorArg_Intrinsic;
def int_aarch64_sve_brka_z  : AdvSIMD_Pred1VectorArg_Intrinsic;
def int_aarch64_sve_brkb    : AdvSIMD_Merged1VectorArg_Intrinsic;
def int_aarch64_sve_brkb_z  : AdvSIMD_Pred1VectorArg_Intrinsic;
def int_aarch64_sve_brkn_z  : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_brkpa_z : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_brkpb_z : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_eor_z   : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_nand_z  : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_nor_z   : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_orn_z   : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_orr_z   : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_pfirst  : AdvSIMD_Pred1VectorArg_Intrinsic;
def int_aarch64_sve_pnext   : AdvSIMD_Pred1VectorArg_Intrinsic;
def int_aarch64_sve_punpkhi : AdvSIMD_SVE_PUNPKHI_Intrinsic;
def int_aarch64_sve_punpklo : AdvSIMD_SVE_PUNPKHI_Intrinsic;

//
// Testing predicates
//

def int_aarch64_sve_ptest_any   : AdvSIMD_SVE_PTEST_Intrinsic;
def int_aarch64_sve_ptest_first : AdvSIMD_SVE_PTEST_Intrinsic;
def int_aarch64_sve_ptest_last  : AdvSIMD_SVE_PTEST_Intrinsic;

//
// Reinterpreting data
//

def int_aarch64_sve_convert_from_svbool : DefaultAttrsIntrinsic<[llvm_any_ty],
                                                    [llvm_nxv16i1_ty],
                                                    [IntrNoMem]>;

def int_aarch64_sve_convert_to_svbool : DefaultAttrsIntrinsic<[llvm_nxv16i1_ty],
                                                  [llvm_any_ty],
                                                  [IntrNoMem]>;

//
// Gather loads: scalar base + vector offsets
//

// 64 bit unscaled offsets
def int_aarch64_sve_ld1_gather : AdvSIMD_GatherLoad_SV_64b_Offsets_Intrinsic;

// 64 bit scaled offsets
def int_aarch64_sve_ld1_gather_index : AdvSIMD_GatherLoad_SV_64b_Offsets_Intrinsic;

// 32 bit unscaled offsets, sign (sxtw) or zero (zxtw) extended to 64 bits
def int_aarch64_sve_ld1_gather_sxtw : AdvSIMD_GatherLoad_SV_32b_Offsets_Intrinsic;
def int_aarch64_sve_ld1_gather_uxtw : AdvSIMD_GatherLoad_SV_32b_Offsets_Intrinsic;

// 32 bit scaled offsets, sign (sxtw) or zero (zxtw) extended to 64 bits
def int_aarch64_sve_ld1_gather_sxtw_index : AdvSIMD_GatherLoad_SV_32b_Offsets_Intrinsic;
def int_aarch64_sve_ld1_gather_uxtw_index : AdvSIMD_GatherLoad_SV_32b_Offsets_Intrinsic;

//
// Gather loads: vector base + scalar offset
//

def int_aarch64_sve_ld1_gather_scalar_offset : AdvSIMD_GatherLoad_VS_Intrinsic;


//
// First-faulting gather loads: scalar base + vector offsets
//

// 64 bit unscaled offsets
def int_aarch64_sve_ldff1_gather : AdvSIMD_GatherLoad_SV_64b_Offsets_WriteFFR_Intrinsic;

// 64 bit scaled offsets
def int_aarch64_sve_ldff1_gather_index : AdvSIMD_GatherLoad_SV_64b_Offsets_WriteFFR_Intrinsic;

// 32 bit unscaled offsets, sign (sxtw) or zero (uxtw) extended to 64 bits
def int_aarch64_sve_ldff1_gather_sxtw : AdvSIMD_GatherLoad_SV_32b_Offsets_WriteFFR_Intrinsic;
def int_aarch64_sve_ldff1_gather_uxtw : AdvSIMD_GatherLoad_SV_32b_Offsets_WriteFFR_Intrinsic;

// 32 bit scaled offsets, sign (sxtw) or zero (uxtw) extended to 64 bits
def int_aarch64_sve_ldff1_gather_sxtw_index : AdvSIMD_GatherLoad_SV_32b_Offsets_WriteFFR_Intrinsic;
def int_aarch64_sve_ldff1_gather_uxtw_index : AdvSIMD_GatherLoad_SV_32b_Offsets_WriteFFR_Intrinsic;

//
// First-faulting gather loads: vector base + scalar offset
//

def int_aarch64_sve_ldff1_gather_scalar_offset : AdvSIMD_GatherLoad_VS_WriteFFR_Intrinsic;


//
// Non-temporal gather loads: scalar base + vector offsets
//

// 64 bit unscaled offsets
def int_aarch64_sve_ldnt1_gather : AdvSIMD_GatherLoad_SV_64b_Offsets_Intrinsic;

// 64 bit indices
def int_aarch64_sve_ldnt1_gather_index : AdvSIMD_GatherLoad_SV_64b_Offsets_Intrinsic;

// 32 bit unscaled offsets, zero (zxtw) extended to 64 bits
def int_aarch64_sve_ldnt1_gather_uxtw : AdvSIMD_GatherLoad_SV_32b_Offsets_Intrinsic;

//
// Non-temporal gather loads: vector base + scalar offset
//

def int_aarch64_sve_ldnt1_gather_scalar_offset  : AdvSIMD_GatherLoad_VS_Intrinsic;

//
// Scatter stores: scalar base + vector offsets
//

// 64 bit unscaled offsets
def int_aarch64_sve_st1_scatter : AdvSIMD_ScatterStore_SV_64b_Offsets_Intrinsic;

// 64 bit scaled offsets
def int_aarch64_sve_st1_scatter_index
    : AdvSIMD_ScatterStore_SV_64b_Offsets_Intrinsic;

// 32 bit unscaled offsets, sign (sxtw) or zero (zxtw) extended to 64 bits
def int_aarch64_sve_st1_scatter_sxtw
    : AdvSIMD_ScatterStore_SV_32b_Offsets_Intrinsic;

def int_aarch64_sve_st1_scatter_uxtw
    : AdvSIMD_ScatterStore_SV_32b_Offsets_Intrinsic;

// 32 bit scaled offsets, sign (sxtw) or zero (zxtw) extended to 64 bits
def int_aarch64_sve_st1_scatter_sxtw_index
    : AdvSIMD_ScatterStore_SV_32b_Offsets_Intrinsic;

def int_aarch64_sve_st1_scatter_uxtw_index
    : AdvSIMD_ScatterStore_SV_32b_Offsets_Intrinsic;

//
// Scatter stores: vector base + scalar offset
//

def int_aarch64_sve_st1_scatter_scalar_offset : AdvSIMD_ScatterStore_VS_Intrinsic;

//
// Non-temporal scatter stores: scalar base + vector offsets
//

// 64 bit unscaled offsets
def int_aarch64_sve_stnt1_scatter : AdvSIMD_ScatterStore_SV_64b_Offsets_Intrinsic;

// 64 bit indices
def int_aarch64_sve_stnt1_scatter_index
    : AdvSIMD_ScatterStore_SV_64b_Offsets_Intrinsic;

// 32 bit unscaled offsets, zero (zxtw) extended to 64 bits
def int_aarch64_sve_stnt1_scatter_uxtw : AdvSIMD_ScatterStore_SV_32b_Offsets_Intrinsic;

//
// Non-temporal scatter stores: vector base + scalar offset
//

def int_aarch64_sve_stnt1_scatter_scalar_offset  : AdvSIMD_ScatterStore_VS_Intrinsic;

//
// SVE2 - Uniform DSP operations
//

def int_aarch64_sve_saba          : AdvSIMD_3VectorArg_Intrinsic;
def int_aarch64_sve_shadd         : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_shsub         : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_shsubr        : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_sli           : AdvSIMD_2VectorArgIndexed_Intrinsic;
def int_aarch64_sve_sqabs         : AdvSIMD_Merged1VectorArg_Intrinsic;
def int_aarch64_sve_sqadd         : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_sqdmulh       : AdvSIMD_2VectorArg_Intrinsic;
def int_aarch64_sve_sqdmulh_lane  : AdvSIMD_2VectorArgIndexed_Intrinsic;
def int_aarch64_sve_sqneg         : AdvSIMD_Merged1VectorArg_Intrinsic;
def int_aarch64_sve_sqrdmlah      : AdvSIMD_3VectorArg_Intrinsic;
def int_aarch64_sve_sqrdmlah_lane : AdvSIMD_3VectorArgIndexed_Intrinsic;
def int_aarch64_sve_sqrdmlsh      : AdvSIMD_3VectorArg_Intrinsic;
def int_aarch64_sve_sqrdmlsh_lane : AdvSIMD_3VectorArgIndexed_Intrinsic;
def int_aarch64_sve_sqrdmulh      : AdvSIMD_2VectorArg_Intrinsic;
def int_aarch64_sve_sqrdmulh_lane : AdvSIMD_2VectorArgIndexed_Intrinsic;
def int_aarch64_sve_sqrshl        : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_sqshl         : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_sqshlu        : AdvSIMD_SVE_ShiftByImm_Intrinsic;
def int_aarch64_sve_sqsub         : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_sqsub_u       : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_sqsubr        : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_srhadd        : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_sri           : AdvSIMD_2VectorArgIndexed_Intrinsic;
def int_aarch64_sve_srshl         : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_srshr         : AdvSIMD_SVE_ShiftByImm_Intrinsic;
def int_aarch64_sve_srsra         : AdvSIMD_2VectorArgIndexed_Intrinsic;
def int_aarch64_sve_ssra          : AdvSIMD_2VectorArgIndexed_Intrinsic;
def int_aarch64_sve_suqadd        : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_uaba          : AdvSIMD_3VectorArg_Intrinsic;
def int_aarch64_sve_uhadd         : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_uhsub         : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_uhsubr        : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_uqadd         : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_uqrshl        : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_uqshl         : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_uqsub         : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_uqsub_u       : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_uqsubr        : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_urecpe        : AdvSIMD_Merged1VectorArg_Intrinsic;
def int_aarch64_sve_urhadd        : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_urshl         : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_urshr         : AdvSIMD_SVE_ShiftByImm_Intrinsic;
def int_aarch64_sve_ursqrte       : AdvSIMD_Merged1VectorArg_Intrinsic;
def int_aarch64_sve_ursra         : AdvSIMD_2VectorArgIndexed_Intrinsic;
def int_aarch64_sve_usqadd        : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_usra          : AdvSIMD_2VectorArgIndexed_Intrinsic;

//
// SVE2 - Widening DSP operations
//

def int_aarch64_sve_sabalb : SVE2_3VectorArg_Long_Intrinsic;
def int_aarch64_sve_sabalt : SVE2_3VectorArg_Long_Intrinsic;
def int_aarch64_sve_sabdlb : SVE2_2VectorArg_Long_Intrinsic;
def int_aarch64_sve_sabdlt : SVE2_2VectorArg_Long_Intrinsic;
def int_aarch64_sve_saddlb : SVE2_2VectorArg_Long_Intrinsic;
def int_aarch64_sve_saddlt : SVE2_2VectorArg_Long_Intrinsic;
def int_aarch64_sve_saddwb : SVE2_2VectorArg_Wide_Intrinsic;
def int_aarch64_sve_saddwt : SVE2_2VectorArg_Wide_Intrinsic;
def int_aarch64_sve_sshllb : SVE2_1VectorArg_Long_Intrinsic;
def int_aarch64_sve_sshllt : SVE2_1VectorArg_Long_Intrinsic;
def int_aarch64_sve_ssublb : SVE2_2VectorArg_Long_Intrinsic;
def int_aarch64_sve_ssublt : SVE2_2VectorArg_Long_Intrinsic;
def int_aarch64_sve_ssubwb : SVE2_2VectorArg_Wide_Intrinsic;
def int_aarch64_sve_ssubwt : SVE2_2VectorArg_Wide_Intrinsic;
def int_aarch64_sve_uabalb : SVE2_3VectorArg_Long_Intrinsic;
def int_aarch64_sve_uabalt : SVE2_3VectorArg_Long_Intrinsic;
def int_aarch64_sve_uabdlb : SVE2_2VectorArg_Long_Intrinsic;
def int_aarch64_sve_uabdlt : SVE2_2VectorArg_Long_Intrinsic;
def int_aarch64_sve_uaddlb : SVE2_2VectorArg_Long_Intrinsic;
def int_aarch64_sve_uaddlt : SVE2_2VectorArg_Long_Intrinsic;
def int_aarch64_sve_uaddwb : SVE2_2VectorArg_Wide_Intrinsic;
def int_aarch64_sve_uaddwt : SVE2_2VectorArg_Wide_Intrinsic;
def int_aarch64_sve_ushllb : SVE2_1VectorArg_Long_Intrinsic;
def int_aarch64_sve_ushllt : SVE2_1VectorArg_Long_Intrinsic;
def int_aarch64_sve_usublb : SVE2_2VectorArg_Long_Intrinsic;
def int_aarch64_sve_usublt : SVE2_2VectorArg_Long_Intrinsic;
def int_aarch64_sve_usubwb : SVE2_2VectorArg_Wide_Intrinsic;
def int_aarch64_sve_usubwt : SVE2_2VectorArg_Wide_Intrinsic;

//
// SVE2 - Non-widening pairwise arithmetic
//

def int_aarch64_sve_addp    : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_faddp   : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_fmaxp   : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_fmaxnmp : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_fminp   : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_fminnmp : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_smaxp   : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_sminp   : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_umaxp   : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_uminp   : AdvSIMD_Pred2VectorArg_Intrinsic;

//
// SVE2 - Widening pairwise arithmetic
//

def int_aarch64_sve_sadalp : SVE2_2VectorArg_Pred_Long_Intrinsic;
def int_aarch64_sve_uadalp : SVE2_2VectorArg_Pred_Long_Intrinsic;

//
// SVE2 - Uniform complex integer arithmetic
//

def int_aarch64_sve_cadd_x           : AdvSIMD_SVE2_CADD_Intrinsic;
def int_aarch64_sve_sqcadd_x         : AdvSIMD_SVE2_CADD_Intrinsic;
def int_aarch64_sve_cmla_x           : AdvSIMD_SVE2_CMLA_Intrinsic;
def int_aarch64_sve_cmla_lane_x      : AdvSIMD_SVE_CMLA_LANE_Intrinsic;
def int_aarch64_sve_sqrdcmlah_x      : AdvSIMD_SVE2_CMLA_Intrinsic;
def int_aarch64_sve_sqrdcmlah_lane_x : AdvSIMD_SVE_CMLA_LANE_Intrinsic;

//
// SVE2 - Widening complex integer arithmetic
//

def int_aarch64_sve_saddlbt   : SVE2_2VectorArg_Long_Intrinsic;
def int_aarch64_sve_ssublbt   : SVE2_2VectorArg_Long_Intrinsic;
def int_aarch64_sve_ssubltb   : SVE2_2VectorArg_Long_Intrinsic;

//
// SVE2 - Widening complex integer dot product
//

def int_aarch64_sve_cdot      : AdvSIMD_SVE_DOT_Indexed_Intrinsic;
def int_aarch64_sve_cdot_lane : AdvSIMD_SVE_CDOT_LANE_Intrinsic;

//
// SVE2 - Floating-point widening multiply-accumulate
//

def int_aarch64_sve_fmlalb        : SVE2_3VectorArg_Long_Intrinsic;
def int_aarch64_sve_fmlalb_lane   : SVE2_3VectorArgIndexed_Long_Intrinsic;
def int_aarch64_sve_fmlalt        : SVE2_3VectorArg_Long_Intrinsic;
def int_aarch64_sve_fmlalt_lane   : SVE2_3VectorArgIndexed_Long_Intrinsic;
def int_aarch64_sve_fmlslb        : SVE2_3VectorArg_Long_Intrinsic;
def int_aarch64_sve_fmlslb_lane   : SVE2_3VectorArgIndexed_Long_Intrinsic;
def int_aarch64_sve_fmlslt        : SVE2_3VectorArg_Long_Intrinsic;
def int_aarch64_sve_fmlslt_lane   : SVE2_3VectorArgIndexed_Long_Intrinsic;

//
// SVE2 - Floating-point integer binary logarithm
//

def int_aarch64_sve_flogb : AdvSIMD_SVE_LOGB_Intrinsic;

//
// SVE2 - Vector histogram count
//

def int_aarch64_sve_histcnt : AdvSIMD_Pred2VectorArg_Intrinsic;
def int_aarch64_sve_histseg : AdvSIMD_2VectorArg_Intrinsic;

//
// SVE2 - Character match
//

def int_aarch64_sve_match   : AdvSIMD_SVE_Compare_Intrinsic;
def int_aarch64_sve_nmatch  : AdvSIMD_SVE_Compare_Intrinsic;

//
// SVE2 - Unary narrowing operations
//

def int_aarch64_sve_sqxtnb  : SVE2_1VectorArg_Narrowing_Intrinsic;
def int_aarch64_sve_sqxtnt  : SVE2_Merged1VectorArg_Narrowing_Intrinsic;
def int_aarch64_sve_sqxtunb : SVE2_1VectorArg_Narrowing_Intrinsic;
def int_aarch64_sve_sqxtunt : SVE2_Merged1VectorArg_Narrowing_Intrinsic;
def int_aarch64_sve_uqxtnb  : SVE2_1VectorArg_Narrowing_Intrinsic;
def int_aarch64_sve_uqxtnt  : SVE2_Merged1VectorArg_Narrowing_Intrinsic;

//
// SVE2 - Binary narrowing DSP operations
//
def int_aarch64_sve_addhnb    : SVE2_2VectorArg_Narrowing_Intrinsic;
def int_aarch64_sve_addhnt    : SVE2_Merged2VectorArg_Narrowing_Intrinsic;

def int_aarch64_sve_raddhnb   : SVE2_2VectorArg_Narrowing_Intrinsic;
def int_aarch64_sve_raddhnt   : SVE2_Merged2VectorArg_Narrowing_Intrinsic;

def int_aarch64_sve_subhnb    : SVE2_2VectorArg_Narrowing_Intrinsic;
def int_aarch64_sve_subhnt    : SVE2_Merged2VectorArg_Narrowing_Intrinsic;

def int_aarch64_sve_rsubhnb   : SVE2_2VectorArg_Narrowing_Intrinsic;
def int_aarch64_sve_rsubhnt   : SVE2_Merged2VectorArg_Narrowing_Intrinsic;

// Narrowing shift right
def int_aarch64_sve_shrnb     : SVE2_1VectorArg_Imm_Narrowing_Intrinsic;
def int_aarch64_sve_shrnt     : SVE2_2VectorArg_Imm_Narrowing_Intrinsic;

def int_aarch64_sve_rshrnb    : SVE2_1VectorArg_Imm_Narrowing_Intrinsic;
def int_aarch64_sve_rshrnt    : SVE2_2VectorArg_Imm_Narrowing_Intrinsic;

// Saturating shift right - signed input/output
def int_aarch64_sve_sqshrnb   : SVE2_1VectorArg_Imm_Narrowing_Intrinsic;
def int_aarch64_sve_sqshrnt   : SVE2_2VectorArg_Imm_Narrowing_Intrinsic;

def int_aarch64_sve_sqrshrnb  : SVE2_1VectorArg_Imm_Narrowing_Intrinsic;
def int_aarch64_sve_sqrshrnt  : SVE2_2VectorArg_Imm_Narrowing_Intrinsic;

// Saturating shift right - unsigned input/output
def int_aarch64_sve_uqshrnb   : SVE2_1VectorArg_Imm_Narrowing_Intrinsic;
def int_aarch64_sve_uqshrnt   : SVE2_2VectorArg_Imm_Narrowing_Intrinsic;

def int_aarch64_sve_uqrshrnb  : SVE2_1VectorArg_Imm_Narrowing_Intrinsic;
def int_aarch64_sve_uqrshrnt  : SVE2_2VectorArg_Imm_Narrowing_Intrinsic;

// Saturating shift right - signed input, unsigned output
def int_aarch64_sve_sqshrunb  : SVE2_1VectorArg_Imm_Narrowing_Intrinsic;
def int_aarch64_sve_sqshrunt  : SVE2_2VectorArg_Imm_Narrowing_Intrinsic;

def int_aarch64_sve_sqrshrunb : SVE2_1VectorArg_Imm_Narrowing_Intrinsic;
def int_aarch64_sve_sqrshrunt : SVE2_2VectorArg_Imm_Narrowing_Intrinsic;

// SVE2 MLA LANE.
def int_aarch64_sve_smlalb_lane   : SVE2_3VectorArg_Indexed_Intrinsic;
def int_aarch64_sve_smlalt_lane   : SVE2_3VectorArg_Indexed_Intrinsic;
def int_aarch64_sve_umlalb_lane   : SVE2_3VectorArg_Indexed_Intrinsic;
def int_aarch64_sve_umlalt_lane   : SVE2_3VectorArg_Indexed_Intrinsic;
def int_aarch64_sve_smlslb_lane   : SVE2_3VectorArg_Indexed_Intrinsic;
def int_aarch64_sve_smlslt_lane   : SVE2_3VectorArg_Indexed_Intrinsic;
def int_aarch64_sve_umlslb_lane   : SVE2_3VectorArg_Indexed_Intrinsic;
def int_aarch64_sve_umlslt_lane   : SVE2_3VectorArg_Indexed_Intrinsic;
def int_aarch64_sve_smullb_lane   : SVE2_2VectorArgIndexed_Long_Intrinsic;
def int_aarch64_sve_smullt_lane   : SVE2_2VectorArgIndexed_Long_Intrinsic;
def int_aarch64_sve_umullb_lane   : SVE2_2VectorArgIndexed_Long_Intrinsic;
def int_aarch64_sve_umullt_lane   : SVE2_2VectorArgIndexed_Long_Intrinsic;
def int_aarch64_sve_sqdmlalb_lane : SVE2_3VectorArg_Indexed_Intrinsic;
def int_aarch64_sve_sqdmlalt_lane : SVE2_3VectorArg_Indexed_Intrinsic;
def int_aarch64_sve_sqdmlslb_lane : SVE2_3VectorArg_Indexed_Intrinsic;
def int_aarch64_sve_sqdmlslt_lane : SVE2_3VectorArg_Indexed_Intrinsic;
def int_aarch64_sve_sqdmullb_lane : SVE2_2VectorArgIndexed_Long_Intrinsic;
def int_aarch64_sve_sqdmullt_lane : SVE2_2VectorArgIndexed_Long_Intrinsic;

// SVE2 MLA Unpredicated.
def int_aarch64_sve_smlalb      : SVE2_3VectorArg_Long_Intrinsic;
def int_aarch64_sve_smlalt      : SVE2_3VectorArg_Long_Intrinsic;
def int_aarch64_sve_umlalb      : SVE2_3VectorArg_Long_Intrinsic;
def int_aarch64_sve_umlalt      : SVE2_3VectorArg_Long_Intrinsic;
def int_aarch64_sve_smlslb      : SVE2_3VectorArg_Long_Intrinsic;
def int_aarch64_sve_smlslt      : SVE2_3VectorArg_Long_Intrinsic;
def int_aarch64_sve_umlslb      : SVE2_3VectorArg_Long_Intrinsic;
def int_aarch64_sve_umlslt      : SVE2_3VectorArg_Long_Intrinsic;
def int_aarch64_sve_smullb      : SVE2_2VectorArg_Long_Intrinsic;
def int_aarch64_sve_smullt      : SVE2_2VectorArg_Long_Intrinsic;
def int_aarch64_sve_umullb      : SVE2_2VectorArg_Long_Intrinsic;
def int_aarch64_sve_umullt      : SVE2_2VectorArg_Long_Intrinsic;

def int_aarch64_sve_sqdmlalb    : SVE2_3VectorArg_Long_Intrinsic;
def int_aarch64_sve_sqdmlalt    : SVE2_3VectorArg_Long_Intrinsic;
def int_aarch64_sve_sqdmlslb    : SVE2_3VectorArg_Long_Intrinsic;
def int_aarch64_sve_sqdmlslt    : SVE2_3VectorArg_Long_Intrinsic;
def int_aarch64_sve_sqdmullb    : SVE2_2VectorArg_Long_Intrinsic;
def int_aarch64_sve_sqdmullt    : SVE2_2VectorArg_Long_Intrinsic;
def int_aarch64_sve_sqdmlalbt   : SVE2_3VectorArg_Long_Intrinsic;
def int_aarch64_sve_sqdmlslbt   : SVE2_3VectorArg_Long_Intrinsic;

// SVE2 ADDSUB Long Unpredicated.
def int_aarch64_sve_adclb       : AdvSIMD_3VectorArg_Intrinsic;
def int_aarch64_sve_adclt       : AdvSIMD_3VectorArg_Intrinsic;
def int_aarch64_sve_sbclb       : AdvSIMD_3VectorArg_Intrinsic;
def int_aarch64_sve_sbclt       : AdvSIMD_3VectorArg_Intrinsic;

//
// SVE2 - Polynomial arithmetic
//
def int_aarch64_sve_eorbt       : AdvSIMD_3VectorArg_Intrinsic;
def int_aarch64_sve_eortb       : AdvSIMD_3VectorArg_Intrinsic;
def int_aarch64_sve_pmullb_pair : AdvSIMD_2VectorArg_Intrinsic;
def int_aarch64_sve_pmullt_pair : AdvSIMD_2VectorArg_Intrinsic;

//
// SVE2 bitwise ternary operations.
//
def int_aarch64_sve_eor3   : AdvSIMD_3VectorArg_Intrinsic;
def int_aarch64_sve_bcax   : AdvSIMD_3VectorArg_Intrinsic;
def int_aarch64_sve_bsl    : AdvSIMD_3VectorArg_Intrinsic;
def int_aarch64_sve_bsl1n  : AdvSIMD_3VectorArg_Intrinsic;
def int_aarch64_sve_bsl2n  : AdvSIMD_3VectorArg_Intrinsic;
def int_aarch64_sve_nbsl   : AdvSIMD_3VectorArg_Intrinsic;
def int_aarch64_sve_xar    : AdvSIMD_2VectorArgIndexed_Intrinsic;

//
// SVE2 - Optional AES, SHA-3 and SM4
//

def int_aarch64_sve_aesd    : ClangBuiltin<"__builtin_sve_svaesd_u8">,
                              DefaultAttrsIntrinsic<[llvm_nxv16i8_ty],
                                        [llvm_nxv16i8_ty, llvm_nxv16i8_ty],
                                        [IntrNoMem]>;
def int_aarch64_sve_aesimc  : ClangBuiltin<"__builtin_sve_svaesimc_u8">,
                              DefaultAttrsIntrinsic<[llvm_nxv16i8_ty],
                                        [llvm_nxv16i8_ty],
                                        [IntrNoMem]>;
def int_aarch64_sve_aese    : ClangBuiltin<"__builtin_sve_svaese_u8">,
                              DefaultAttrsIntrinsic<[llvm_nxv16i8_ty],
                                        [llvm_nxv16i8_ty, llvm_nxv16i8_ty],
                                        [IntrNoMem]>;
def int_aarch64_sve_aesmc   : ClangBuiltin<"__builtin_sve_svaesmc_u8">,
                              DefaultAttrsIntrinsic<[llvm_nxv16i8_ty],
                                        [llvm_nxv16i8_ty],
                                        [IntrNoMem]>;
def int_aarch64_sve_rax1    : ClangBuiltin<"__builtin_sve_svrax1_u64">,
                              DefaultAttrsIntrinsic<[llvm_nxv2i64_ty],
                                        [llvm_nxv2i64_ty, llvm_nxv2i64_ty],
                                        [IntrNoMem]>;
def int_aarch64_sve_sm4e    : ClangBuiltin<"__builtin_sve_svsm4e_u32">,
                              DefaultAttrsIntrinsic<[llvm_nxv4i32_ty],
                                        [llvm_nxv4i32_ty, llvm_nxv4i32_ty],
                                        [IntrNoMem]>;
def int_aarch64_sve_sm4ekey : ClangBuiltin<"__builtin_sve_svsm4ekey_u32">,
                              DefaultAttrsIntrinsic<[llvm_nxv4i32_ty],
                                        [llvm_nxv4i32_ty, llvm_nxv4i32_ty],
                                        [IntrNoMem]>;
//
// SVE2 - Extended table lookup/permute
//

def int_aarch64_sve_tbl2 : AdvSIMD_SVE2_TBX_Intrinsic;
def int_aarch64_sve_tbx  : AdvSIMD_SVE2_TBX_Intrinsic;

//
// SVE2 - Optional bit permutation
//

def int_aarch64_sve_bdep_x : AdvSIMD_2VectorArg_Intrinsic;
def int_aarch64_sve_bext_x : AdvSIMD_2VectorArg_Intrinsic;
def int_aarch64_sve_bgrp_x : AdvSIMD_2VectorArg_Intrinsic;


//
// SVE ACLE: 7.3. INT8 matrix multiply extensions
//
def int_aarch64_sve_ummla : SVE_MatMul_Intrinsic;
def int_aarch64_sve_smmla : SVE_MatMul_Intrinsic;
def int_aarch64_sve_usmmla : SVE_MatMul_Intrinsic;

def int_aarch64_sve_usdot : AdvSIMD_SVE_DOT_Intrinsic;
def int_aarch64_sve_usdot_lane : AdvSIMD_SVE_DOT_Indexed_Intrinsic;
def int_aarch64_sve_sudot_lane : AdvSIMD_SVE_DOT_Indexed_Intrinsic;

//
// SVE ACLE: 7.4/5. FP64/FP32 matrix multiply extensions
//
def int_aarch64_sve_fmmla : AdvSIMD_3VectorArg_Intrinsic;

//
// SVE ACLE: 7.2. BFloat16 extensions
//

def int_aarch64_sve_bfdot   : SVE_4Vec_BF16;
def int_aarch64_sve_bfmlalb : SVE_4Vec_BF16;
def int_aarch64_sve_bfmlalt : SVE_4Vec_BF16;

def int_aarch64_sve_bfmmla  : SVE_4Vec_BF16;

def int_aarch64_sve_bfdot_lane_v2   : SVE_4Vec_BF16_Indexed;
def int_aarch64_sve_bfmlalb_lane_v2 : SVE_4Vec_BF16_Indexed;
def int_aarch64_sve_bfmlalt_lane_v2 : SVE_4Vec_BF16_Indexed;

//
// SVE2.1 - Contiguous loads to multiple consecutive vectors
//

  class SVE2p1_Load_PN_X2_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty, LLVMMatchType<0>],
                [llvm_aarch64_svcount_ty, llvm_ptr_ty],
                [IntrReadMem, IntrArgMemOnly]>;

  class SVE2p1_Load_PN_X4_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty, LLVMMatchType<0>,
                             LLVMMatchType<0>, LLVMMatchType<0>],
                [llvm_aarch64_svcount_ty, llvm_ptr_ty],
                [IntrReadMem, IntrArgMemOnly]>;

def int_aarch64_sve_ld1_pn_x2 : SVE2p1_Load_PN_X2_Intrinsic;
def int_aarch64_sve_ld1_pn_x4 : SVE2p1_Load_PN_X4_Intrinsic;
def int_aarch64_sve_ldnt1_pn_x2 : SVE2p1_Load_PN_X2_Intrinsic;
def int_aarch64_sve_ldnt1_pn_x4 : SVE2p1_Load_PN_X4_Intrinsic;

//
// SVE2.1 - Contiguous stores to multiple consecutive vectors
//

  class SVE2p1_Store_PN_X2_Intrinsic
    : DefaultAttrsIntrinsic<[], [ llvm_anyvector_ty, LLVMMatchType<0>,
                                  llvm_aarch64_svcount_ty, llvm_ptr_ty ],
                [IntrWriteMem, IntrArgMemOnly]>;

  class SVE2p1_Store_PN_X4_Intrinsic
    : DefaultAttrsIntrinsic<[], [ llvm_anyvector_ty, LLVMMatchType<0>,
                                  LLVMMatchType<0>, LLVMMatchType<0>,
                                  llvm_aarch64_svcount_ty, llvm_ptr_ty],
                [IntrWriteMem, IntrArgMemOnly]>;

def int_aarch64_sve_st1_pn_x2 : SVE2p1_Store_PN_X2_Intrinsic;
def int_aarch64_sve_st1_pn_x4 : SVE2p1_Store_PN_X4_Intrinsic;
def int_aarch64_sve_stnt1_pn_x2 : SVE2p1_Store_PN_X2_Intrinsic;
def int_aarch64_sve_stnt1_pn_x4 : SVE2p1_Store_PN_X4_Intrinsic;
}

//
// SVE2 - Contiguous conflict detection
//

def int_aarch64_sve_whilerw_b : SVE2_CONFLICT_DETECT_Intrinsic;
def int_aarch64_sve_whilerw_h : SVE2_CONFLICT_DETECT_Intrinsic;
def int_aarch64_sve_whilerw_s : SVE2_CONFLICT_DETECT_Intrinsic;
def int_aarch64_sve_whilerw_d : SVE2_CONFLICT_DETECT_Intrinsic;
def int_aarch64_sve_whilewr_b : SVE2_CONFLICT_DETECT_Intrinsic;
def int_aarch64_sve_whilewr_h : SVE2_CONFLICT_DETECT_Intrinsic;
def int_aarch64_sve_whilewr_s : SVE2_CONFLICT_DETECT_Intrinsic;
def int_aarch64_sve_whilewr_d : SVE2_CONFLICT_DETECT_Intrinsic;

// Scalable Matrix Extension (SME) Intrinsics
let TargetPrefix = "aarch64" in {
  class SME_Load_Store_Intrinsic<LLVMType pred_ty>
    : DefaultAttrsIntrinsic<[],
        [pred_ty, llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg<ArgIndex<2>>]>;

  // Loads
  def int_aarch64_sme_ld1b_horiz : SME_Load_Store_Intrinsic<llvm_nxv16i1_ty>;
  def int_aarch64_sme_ld1h_horiz : SME_Load_Store_Intrinsic<llvm_nxv8i1_ty>;
  def int_aarch64_sme_ld1w_horiz : SME_Load_Store_Intrinsic<llvm_nxv4i1_ty>;
  def int_aarch64_sme_ld1d_horiz : SME_Load_Store_Intrinsic<llvm_nxv2i1_ty>;
  def int_aarch64_sme_ld1q_horiz : SME_Load_Store_Intrinsic<llvm_nxv1i1_ty>;
  def int_aarch64_sme_ld1b_vert  : SME_Load_Store_Intrinsic<llvm_nxv16i1_ty>;
  def int_aarch64_sme_ld1h_vert  : SME_Load_Store_Intrinsic<llvm_nxv8i1_ty>;
  def int_aarch64_sme_ld1w_vert  : SME_Load_Store_Intrinsic<llvm_nxv4i1_ty>;
  def int_aarch64_sme_ld1d_vert  : SME_Load_Store_Intrinsic<llvm_nxv2i1_ty>;
  def int_aarch64_sme_ld1q_vert  : SME_Load_Store_Intrinsic<llvm_nxv1i1_ty>;

  // Stores
  def int_aarch64_sme_st1b_horiz : SME_Load_Store_Intrinsic<llvm_nxv16i1_ty>;
  def int_aarch64_sme_st1h_horiz : SME_Load_Store_Intrinsic<llvm_nxv8i1_ty>;
  def int_aarch64_sme_st1w_horiz : SME_Load_Store_Intrinsic<llvm_nxv4i1_ty>;
  def int_aarch64_sme_st1d_horiz : SME_Load_Store_Intrinsic<llvm_nxv2i1_ty>;
  def int_aarch64_sme_st1q_horiz : SME_Load_Store_Intrinsic<llvm_nxv1i1_ty>;
  def int_aarch64_sme_st1b_vert  : SME_Load_Store_Intrinsic<llvm_nxv16i1_ty>;
  def int_aarch64_sme_st1h_vert  : SME_Load_Store_Intrinsic<llvm_nxv8i1_ty>;
  def int_aarch64_sme_st1w_vert  : SME_Load_Store_Intrinsic<llvm_nxv4i1_ty>;
  def int_aarch64_sme_st1d_vert  : SME_Load_Store_Intrinsic<llvm_nxv2i1_ty>;
  def int_aarch64_sme_st1q_vert  : SME_Load_Store_Intrinsic<llvm_nxv1i1_ty>;

  // Spill + fill
  def int_aarch64_sme_ldr : DefaultAttrsIntrinsic<
    [], [llvm_i32_ty, llvm_ptr_ty]>;
  def int_aarch64_sme_str : DefaultAttrsIntrinsic<
    [], [llvm_i32_ty, llvm_ptr_ty]>;

  class SME_TileToVector_Intrinsic
      : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
          [LLVMMatchType<0>, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
           llvm_i32_ty, llvm_i32_ty], [ImmArg<ArgIndex<2>>]>;
  class SME_VectorToTile_Intrinsic
      : DefaultAttrsIntrinsic<[],
          [llvm_i32_ty, llvm_i32_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
           llvm_anyvector_ty], [ImmArg<ArgIndex<0>>]>;

  def int_aarch64_sme_read_horiz  : SME_TileToVector_Intrinsic;
  def int_aarch64_sme_read_vert   : SME_TileToVector_Intrinsic;
  def int_aarch64_sme_write_horiz : SME_VectorToTile_Intrinsic;
  def int_aarch64_sme_write_vert  : SME_VectorToTile_Intrinsic;

  def int_aarch64_sme_readq_horiz  : SME_TileToVector_Intrinsic;
  def int_aarch64_sme_readq_vert   : SME_TileToVector_Intrinsic;
  def int_aarch64_sme_writeq_horiz : SME_VectorToTile_Intrinsic;
  def int_aarch64_sme_writeq_vert  : SME_VectorToTile_Intrinsic;

  def int_aarch64_sme_zero : DefaultAttrsIntrinsic<[], [llvm_i32_ty], [ImmArg<ArgIndex<0>>]>;

  class SME_OuterProduct_Intrinsic
      : DefaultAttrsIntrinsic<[],
          [llvm_i32_ty,
           LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
           LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
           LLVMMatchType<0>,
           llvm_anyvector_ty], [ImmArg<ArgIndex<0>>]>;

  def int_aarch64_sme_mopa : SME_OuterProduct_Intrinsic;
  def int_aarch64_sme_mops : SME_OuterProduct_Intrinsic;

  def int_aarch64_sme_mopa_wide : SME_OuterProduct_Intrinsic;
  def int_aarch64_sme_mops_wide : SME_OuterProduct_Intrinsic;

  def int_aarch64_sme_smopa_wide  : SME_OuterProduct_Intrinsic;
  def int_aarch64_sme_smops_wide  : SME_OuterProduct_Intrinsic;
  def int_aarch64_sme_umopa_wide  : SME_OuterProduct_Intrinsic;
  def int_aarch64_sme_umops_wide  : SME_OuterProduct_Intrinsic;
  def int_aarch64_sme_sumopa_wide : SME_OuterProduct_Intrinsic;
  def int_aarch64_sme_sumops_wide : SME_OuterProduct_Intrinsic;
  def int_aarch64_sme_usmopa_wide : SME_OuterProduct_Intrinsic;
  def int_aarch64_sme_usmops_wide : SME_OuterProduct_Intrinsic;

  class SME_AddVectorToTile_Intrinsic
      : DefaultAttrsIntrinsic<[],
          [llvm_i32_ty,
           LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
           LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
           llvm_anyvector_ty], [ImmArg<ArgIndex<0>>]>;

  def int_aarch64_sme_addha : SME_AddVectorToTile_Intrinsic;
  def int_aarch64_sme_addva : SME_AddVectorToTile_Intrinsic;

  //
  // Counting elements
  //

  class AdvSIMD_SME_CNTSB_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_i64_ty], [], [IntrNoMem]>;

  def int_aarch64_sme_cntsb : AdvSIMD_SME_CNTSB_Intrinsic;
  def int_aarch64_sme_cntsh : AdvSIMD_SME_CNTSB_Intrinsic;
  def int_aarch64_sme_cntsw : AdvSIMD_SME_CNTSB_Intrinsic;
  def int_aarch64_sme_cntsd : AdvSIMD_SME_CNTSB_Intrinsic;

  //
  // PSTATE Functions
  //

  def int_aarch64_sme_get_tpidr2
      : DefaultAttrsIntrinsic<[llvm_i64_ty], [],
                              [IntrNoMem, IntrHasSideEffects]>;
  def int_aarch64_sme_set_tpidr2
      : DefaultAttrsIntrinsic<[], [llvm_i64_ty],
                              [IntrNoMem, IntrHasSideEffects]>;

  def int_aarch64_sme_za_enable
      : DefaultAttrsIntrinsic<[], [], [IntrNoMem, IntrHasSideEffects]>;
  def int_aarch64_sme_za_disable
      : DefaultAttrsIntrinsic<[], [], [IntrNoMem, IntrHasSideEffects]>;

  // Clamp
  //

  def int_aarch64_sve_sclamp : AdvSIMD_3VectorArg_Intrinsic;
  def int_aarch64_sve_uclamp : AdvSIMD_3VectorArg_Intrinsic;
  def int_aarch64_sve_fclamp : AdvSIMD_3VectorArg_Intrinsic;


  //
  // Reversal
  //

  def int_aarch64_sve_revd : AdvSIMD_Merged1VectorArg_Intrinsic;

  //
  // Predicate selection
  //

  def int_aarch64_sve_psel
      : DefaultAttrsIntrinsic<[llvm_nxv16i1_ty],
                              [llvm_nxv16i1_ty,
                               llvm_anyvector_ty, llvm_i32_ty],
                              [IntrNoMem]>;

  //
  // Predicate-pair intrinsics
  //
  foreach cmp = ["ge", "gt", "hi", "hs", "le", "lo", "ls", "lt"] in {
    def int_aarch64_sve_while # cmp # _x2
        : DefaultAttrsIntrinsic<[llvm_anyvector_ty, LLVMMatchType<0>],
                                [llvm_i64_ty, llvm_i64_ty], [IntrNoMem]>;
  }

  //
  // Predicate-as-counter intrinsics
  //

  def int_aarch64_sve_pext
      : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                              [llvm_aarch64_svcount_ty, llvm_i32_ty],
                              [IntrNoMem, ImmArg<ArgIndex<1>>]>;

  def int_aarch64_sve_pext_x2
      : DefaultAttrsIntrinsic<[llvm_anyvector_ty, LLVMMatchType<0>],
                              [llvm_aarch64_svcount_ty, llvm_i32_ty],
                              [IntrNoMem, ImmArg<ArgIndex<1>>]>;

  def int_aarch64_sve_ptrue_c8
      : DefaultAttrsIntrinsic<[llvm_aarch64_svcount_ty], [], [IntrNoMem]>;
  def int_aarch64_sve_ptrue_c16
      : DefaultAttrsIntrinsic<[llvm_aarch64_svcount_ty], [], [IntrNoMem]>;
  def int_aarch64_sve_ptrue_c32
      : DefaultAttrsIntrinsic<[llvm_aarch64_svcount_ty], [], [IntrNoMem]>;
  def int_aarch64_sve_ptrue_c64
      : DefaultAttrsIntrinsic<[llvm_aarch64_svcount_ty], [], [IntrNoMem]>;

  def int_aarch64_sve_cntp_c8
      : DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_aarch64_svcount_ty, llvm_i32_ty],
                              [IntrNoMem, ImmArg<ArgIndex<1>>]>;
  def int_aarch64_sve_cntp_c16
      : DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_aarch64_svcount_ty, llvm_i32_ty],
                              [IntrNoMem, ImmArg<ArgIndex<1>>]>;
  def int_aarch64_sve_cntp_c32
      : DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_aarch64_svcount_ty, llvm_i32_ty],
                              [IntrNoMem, ImmArg<ArgIndex<1>>]>;
  def int_aarch64_sve_cntp_c64
      : DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_aarch64_svcount_ty, llvm_i32_ty],
                              [IntrNoMem, ImmArg<ArgIndex<1>>]>;

  // While (predicate-as-counter) intrinsics
  foreach cmp = ["ge", "gt", "hi", "hs", "le", "lo", "ls", "lt"] in {
    foreach ty = ["c8", "c16", "c32", "c64"] in {
      def int_aarch64_sve_while # cmp # _ # ty
          : DefaultAttrsIntrinsic<[llvm_aarch64_svcount_ty],
                                  [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty],
                                  [IntrNoMem, ImmArg<ArgIndex<2>>]>;
    }
  }

  //
  // SME2 Intrinsics
  //

  class SME2_Matrix_ArrayVector_Single_Single_Intrinsic
    : DefaultAttrsIntrinsic<[],
                [llvm_i32_ty,
                 llvm_anyvector_ty, LLVMMatchType<0>],
                []>;

  class SME2_Matrix_ArrayVector_VG2_Multi_Single_Intrinsic
    : DefaultAttrsIntrinsic<[],
                [llvm_i32_ty,
                 llvm_anyvector_ty, LLVMMatchType<0>,
                 LLVMMatchType<0>],
                []>;

  class SME2_Matrix_ArrayVector_VG4_Multi_Single_Intrinsic
    : DefaultAttrsIntrinsic<[],
                [llvm_i32_ty,
                 llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>,
                 LLVMMatchType<0>],
                []>;

  class SME2_Matrix_ArrayVector_VG2_Multi_Multi_Intrinsic
    : DefaultAttrsIntrinsic<[],
                [llvm_i32_ty,
                 llvm_anyvector_ty, LLVMMatchType<0>,
                 LLVMMatchType<0>, LLVMMatchType<0>],
                []>;

  class SME2_Matrix_ArrayVector_VG4_Multi_Multi_Intrinsic
    : DefaultAttrsIntrinsic<[],
                [llvm_i32_ty,
                 llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>,
                 LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
                []>;

  class SME2_Matrix_ArrayVector_Single_Index_Intrinsic
    : DefaultAttrsIntrinsic<[],
                [llvm_i32_ty,
                llvm_anyvector_ty,
                LLVMMatchType<0>, llvm_i32_ty],
                [ImmArg<ArgIndex<3>>]>;

  class SME2_Matrix_ArrayVector_VG2_Multi_Index_Intrinsic
    : DefaultAttrsIntrinsic<[],
                [llvm_i32_ty,
                 llvm_anyvector_ty, LLVMMatchType<0>,
                 LLVMMatchType<0>, llvm_i32_ty],
                [ImmArg<ArgIndex<4>>]>;

  class SME2_Matrix_ArrayVector_VG4_Multi_Index_Intrinsic
    : DefaultAttrsIntrinsic<[],
                [llvm_i32_ty,
                 llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>,
                 LLVMMatchType<0>, llvm_i32_ty],
                [ImmArg<ArgIndex<6>>]>;

  class SME2_VG2_Multi_Imm_Intrinsic
    : DefaultAttrsIntrinsic<[LLVMSubdivide2VectorType<0>],
                [llvm_anyvector_ty, LLVMMatchType<0>,
                 llvm_i32_ty],
                [IntrNoMem, ImmArg<ArgIndex<2>>]>;

  class SME2_VG4_Multi_Imm_Intrinsic
    : DefaultAttrsIntrinsic<[LLVMSubdivide4VectorType<0>],
                [llvm_anyvector_ty, LLVMMatchType<0>,
                 LLVMMatchType<0>, LLVMMatchType<0>,
                 llvm_i32_ty],
                [IntrNoMem, ImmArg<ArgIndex<4>>]>;

  class SME2_ZA_Write_VG2_Intrinsic
   : DefaultAttrsIntrinsic<[],
               [llvm_i32_ty,
                llvm_anyvector_ty, LLVMMatchType<0>],
               []>;

  class SME2_ZA_Write_VG4_Intrinsic
   : DefaultAttrsIntrinsic<[],
               [llvm_i32_ty,
                llvm_anyvector_ty, LLVMMatchType<0>,
                LLVMMatchType<0>,  LLVMMatchType<0>],
               []>;

  class SME2_VG2_Multi_Single_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty, LLVMMatchType<0>],
                [LLVMMatchType<0>, LLVMMatchType<0>,
                 LLVMMatchType<0>],
                [IntrNoMem]>;

  class SME2_VG4_Multi_Single_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty, LLVMMatchType<0>,
                             LLVMMatchType<0>,  LLVMMatchType<0>],
                            [LLVMMatchType<0>,  LLVMMatchType<0>,
                             LLVMMatchType<0>,  LLVMMatchType<0>,
                             LLVMMatchType<0>],
                            [IntrNoMem]>;

  class SME2_VG2_Multi_Multi_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty, LLVMMatchType<0>],
                [LLVMMatchType<0>, LLVMMatchType<0>,
                 LLVMMatchType<0>, LLVMMatchType<0>],
                [IntrNoMem]>;

  class SME2_VG4_Multi_Multi_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty, LLVMMatchType<0>,
                             LLVMMatchType<0>,  LLVMMatchType<0>],
                            [LLVMMatchType<0>,  LLVMMatchType<0>,
                             LLVMMatchType<0>,  LLVMMatchType<0>,
                             LLVMMatchType<0>, LLVMMatchType<0>,
                             LLVMMatchType<0>, LLVMMatchType<0>],
                            [IntrNoMem]>;

  class SVE2_VG2_Sel_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty, LLVMMatchType<0>],
                [llvm_aarch64_svcount_ty, LLVMMatchType<0>,
                 LLVMMatchType<0>, LLVMMatchType<0>,
                 LLVMMatchType<0>], [IntrNoMem]>;

  class SVE2_VG4_Sel_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty, LLVMMatchType<0>,
                             LLVMMatchType<0>, LLVMMatchType<0>],
                [llvm_aarch64_svcount_ty, LLVMMatchType<0>,
                 LLVMMatchType<0>, LLVMMatchType<0>,
                 LLVMMatchType<0>, LLVMMatchType<0>,
                 LLVMMatchType<0>, LLVMMatchType<0>,
                 LLVMMatchType<0>], [IntrNoMem]>;

  class SME2_CVT_VG2_SINGLE_Intrinsic
    : DefaultAttrsIntrinsic<[LLVMSubdivide2VectorType<0>],
                            [llvm_anyvector_ty, LLVMMatchType<0>],
                            [IntrNoMem]>;

  class SME2_CVT_VG2_SINGLE_BF16_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_nxv8bf16_ty],
                            [llvm_nxv4f32_ty, llvm_nxv4f32_ty],
                            [IntrNoMem]>;

  class SME2_CVT_VG4_SINGLE_Intrinsic
    : DefaultAttrsIntrinsic<[LLVMSubdivide4VectorType<0>],
                            [llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
                            [IntrNoMem]>;

  class SME2_CVT_FtoI_VG2_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty, LLVMMatchType<0>],
                            [LLVMVectorOfBitcastsToInt<0>, LLVMVectorOfBitcastsToInt<0>],
                            [IntrNoMem]>;

  class SME2_CVT_ItoF_VG2_Intrinsic
    : DefaultAttrsIntrinsic<[LLVMVectorOfBitcastsToInt<0>, LLVMVectorOfBitcastsToInt<0>],
                            [llvm_anyvector_ty, LLVMMatchType<0>],
                            [IntrNoMem]>;

  class SME2_CVT_FtoI_VG4_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
                            [LLVMVectorOfBitcastsToInt<0>, LLVMVectorOfBitcastsToInt<0>,
                             LLVMVectorOfBitcastsToInt<0>, LLVMVectorOfBitcastsToInt<0>],
                            [IntrNoMem]>;

  class SME2_CVT_ItoF_VG4_Intrinsic
    : DefaultAttrsIntrinsic<[LLVMVectorOfBitcastsToInt<0>, LLVMVectorOfBitcastsToInt<0>,
                             LLVMVectorOfBitcastsToInt<0>, LLVMVectorOfBitcastsToInt<0>],
                            [llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
                            [IntrNoMem]>;

  class SME2_ZA_ArrayVector_Read_VG2_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty, LLVMMatchType<0>],
                [llvm_i32_ty],
                []>;

  class SME2_ZA_ArrayVector_Read_VG4_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty, LLVMMatchType<0>,
                             LLVMMatchType<0>,  LLVMMatchType<0>],
                [llvm_i32_ty],
                []>;

  class SME2_Matrix_TileVector_Read_VG2_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty, LLVMMatchType<0>],
                [llvm_i32_ty, llvm_i32_ty],
                []>;

  class SME2_Matrix_TileVector_Read_VG4_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty, LLVMMatchType<0>,
                             LLVMMatchType<0>,  LLVMMatchType<0>],
                [llvm_i32_ty, llvm_i32_ty],
                []>;

  class SME2_ZA_ArrayVector_Write_VG2_Intrinsic
   : DefaultAttrsIntrinsic<[],
               [llvm_i32_ty,
                llvm_anyvector_ty, LLVMMatchType<0>],
               []>;

  class SME2_ZA_ArrayVector_Write_VG4_Intrinsic
   : DefaultAttrsIntrinsic<[],
               [llvm_i32_ty,
                llvm_anyvector_ty, LLVMMatchType<0>,
                LLVMMatchType<0>,  LLVMMatchType<0>],
               []>;

  class SME2_Matrix_TileVector_Write_VG2_Intrinsic
   : DefaultAttrsIntrinsic<[],
               [llvm_i32_ty, llvm_i32_ty,
                llvm_anyvector_ty, LLVMMatchType<0>],
               [ImmArg<ArgIndex<0>>]>;

  class SME2_Matrix_TileVector_Write_VG4_Intrinsic
   : DefaultAttrsIntrinsic<[],
               [llvm_i32_ty, llvm_i32_ty,
                llvm_anyvector_ty, LLVMMatchType<0>,
                LLVMMatchType<0>,  LLVMMatchType<0>],
               [ImmArg<ArgIndex<0>>]>;

  class SME2_VG2_Multi_Single_Single_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty, LLVMMatchType<0>],
                [LLVMMatchType<0>, LLVMMatchType<0>,
                 LLVMMatchType<0>, LLVMMatchType<0>],
                [IntrNoMem]>;

  class SME2_VG4_Multi_Single_Single_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty, LLVMMatchType<0>,
                             LLVMMatchType<0>, LLVMMatchType<0>],
                [LLVMMatchType<0>, LLVMMatchType<0>,
                 LLVMMatchType<0>, LLVMMatchType<0>,
                 LLVMMatchType<0>, LLVMMatchType<0>],
                [IntrNoMem]>;

  class SVE2_VG2_ZipUzp_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty, LLVMMatchType<0>],
                [LLVMMatchType<0>, LLVMMatchType<0>], [IntrNoMem]>;

  class SVE2_VG4_ZipUzp_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty, LLVMMatchType<0>,
                             LLVMMatchType<0>, LLVMMatchType<0>],
                [LLVMMatchType<0>, LLVMMatchType<0>,
                 LLVMMatchType<0>, LLVMMatchType<0>], [IntrNoMem]>;

  class SME2_VG2_Unpk_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty, LLVMMatchType<0>],
                [LLVMSubdivide2VectorType<0>], [IntrNoMem]>;

  class SME2_VG4_Unpk_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty, LLVMMatchType<0>,
                             LLVMMatchType<0>, LLVMMatchType<0>],
                [LLVMSubdivide2VectorType<0>, LLVMSubdivide2VectorType<0>],
                [IntrNoMem]>;

  //
  // Multi-vector fused multiply-add/subtract
  //

  def int_aarch64_sme_fmla_single_vg1x2 : SME2_Matrix_ArrayVector_VG2_Multi_Single_Intrinsic;
  def int_aarch64_sme_fmls_single_vg1x2 : SME2_Matrix_ArrayVector_VG2_Multi_Single_Intrinsic;
  def int_aarch64_sme_fmla_single_vg1x4 : SME2_Matrix_ArrayVector_VG4_Multi_Single_Intrinsic;
  def int_aarch64_sme_fmls_single_vg1x4 : SME2_Matrix_ArrayVector_VG4_Multi_Single_Intrinsic;

  def int_aarch64_sme_fmla_vg1x2 : SME2_Matrix_ArrayVector_VG2_Multi_Multi_Intrinsic;
  def int_aarch64_sme_fmls_vg1x2 : SME2_Matrix_ArrayVector_VG2_Multi_Multi_Intrinsic;
  def int_aarch64_sme_fmla_vg1x4 : SME2_Matrix_ArrayVector_VG4_Multi_Multi_Intrinsic;
  def int_aarch64_sme_fmls_vg1x4 : SME2_Matrix_ArrayVector_VG4_Multi_Multi_Intrinsic;

  def int_aarch64_sme_fmla_lane_vg1x2 : SME2_Matrix_ArrayVector_VG2_Multi_Index_Intrinsic;
  def int_aarch64_sme_fmls_lane_vg1x2 : SME2_Matrix_ArrayVector_VG2_Multi_Index_Intrinsic;
  def int_aarch64_sme_fmla_lane_vg1x4 : SME2_Matrix_ArrayVector_VG4_Multi_Index_Intrinsic;
  def int_aarch64_sme_fmls_lane_vg1x4 : SME2_Matrix_ArrayVector_VG4_Multi_Index_Intrinsic;

  //
  // Outer product and accumulate/subtract intrinsics
  //

  def int_aarch64_sme_smopa_za32 : SME_OuterProduct_Intrinsic;
  def int_aarch64_sme_umopa_za32 : SME_OuterProduct_Intrinsic;
  def int_aarch64_sme_smops_za32 : SME_OuterProduct_Intrinsic;
  def int_aarch64_sme_umops_za32 : SME_OuterProduct_Intrinsic;

  def int_aarch64_sme_bmopa_za32 : SME_OuterProduct_Intrinsic;
  def int_aarch64_sme_bmops_za32 : SME_OuterProduct_Intrinsic;

  //
  // Multi-vector rounding shift left intrinsics
  //

  def int_aarch64_sve_srshl_single_x2 : SME2_VG2_Multi_Single_Intrinsic;
  def int_aarch64_sve_urshl_single_x2 : SME2_VG2_Multi_Single_Intrinsic;
  def int_aarch64_sve_srshl_single_x4 : SME2_VG4_Multi_Single_Intrinsic;
  def int_aarch64_sve_urshl_single_x4 : SME2_VG4_Multi_Single_Intrinsic;

  def int_aarch64_sve_srshl_x2 : SME2_VG2_Multi_Multi_Intrinsic;
  def int_aarch64_sve_urshl_x2 : SME2_VG2_Multi_Multi_Intrinsic;
  def int_aarch64_sve_srshl_x4 : SME2_VG4_Multi_Multi_Intrinsic;
  def int_aarch64_sve_urshl_x4 : SME2_VG4_Multi_Multi_Intrinsic;

  // Multi-vector saturating rounding shift right intrinsics

  def int_aarch64_sve_sqrshr_x2 : SME2_VG2_Multi_Imm_Intrinsic;
  def int_aarch64_sve_uqrshr_x2 : SME2_VG2_Multi_Imm_Intrinsic;
  def int_aarch64_sve_sqrshr_x4 : SME2_VG4_Multi_Imm_Intrinsic;
  def int_aarch64_sve_uqrshr_x4 : SME2_VG4_Multi_Imm_Intrinsic;

  def int_aarch64_sve_sqrshrn_x2 : SME2_VG2_Multi_Imm_Intrinsic;
  def int_aarch64_sve_uqrshrn_x2 : SME2_VG2_Multi_Imm_Intrinsic;
  def int_aarch64_sve_sqrshrn_x4 : SME2_VG4_Multi_Imm_Intrinsic;
  def int_aarch64_sve_uqrshrn_x4 : SME2_VG4_Multi_Imm_Intrinsic;

  def int_aarch64_sve_sqrshru_x2 : SME2_VG2_Multi_Imm_Intrinsic;
  def int_aarch64_sve_sqrshru_x4 : SME2_VG4_Multi_Imm_Intrinsic;

  def int_aarch64_sve_sqrshrun_x2 : SME2_VG2_Multi_Imm_Intrinsic;
  def int_aarch64_sve_sqrshrun_x4 : SME2_VG4_Multi_Imm_Intrinsic;

  //
  // Multi-vector multiply-add/subtract long
  //

  foreach ty = ["f", "s", "u"] in {
    foreach instr = ["mlal", "mlsl"] in {
      def int_aarch64_sme_ # ty # instr # _single_vg2x1  : SME2_Matrix_ArrayVector_Single_Single_Intrinsic;
      def int_aarch64_sme_ # ty # instr # _single_vg2x2  : SME2_Matrix_ArrayVector_VG2_Multi_Single_Intrinsic;
      def int_aarch64_sme_ # ty # instr # _single_vg2x4  : SME2_Matrix_ArrayVector_VG4_Multi_Single_Intrinsic;

      def int_aarch64_sme_ # ty # instr # _vg2x2 : SME2_Matrix_ArrayVector_VG2_Multi_Multi_Intrinsic;
      def int_aarch64_sme_ # ty # instr # _vg2x4 : SME2_Matrix_ArrayVector_VG4_Multi_Multi_Intrinsic;

      def int_aarch64_sme_ # ty # instr # _lane_vg2x1  : SME2_Matrix_ArrayVector_Single_Index_Intrinsic;
      def int_aarch64_sme_ # ty # instr # _lane_vg2x2  : SME2_Matrix_ArrayVector_VG2_Multi_Index_Intrinsic;
      def int_aarch64_sme_ # ty # instr # _lane_vg2x4  : SME2_Matrix_ArrayVector_VG4_Multi_Index_Intrinsic;
    }
  }

  //
  // Multi-vector multiply-add long long
  //

  foreach ty = ["s", "u"] in {
    foreach instr = ["mla", "mls"] in {
      foreach za = ["za32", "za64"] in {
        def int_aarch64_sme_ # ty # instr # _ # za # _single_vg4x1 : SME2_Matrix_ArrayVector_Single_Single_Intrinsic;
        def int_aarch64_sme_ # ty # instr # _ # za # _single_vg4x2 : SME2_Matrix_ArrayVector_VG2_Multi_Single_Intrinsic;
        def int_aarch64_sme_ # ty # instr # _ # za # _single_vg4x4 : SME2_Matrix_ArrayVector_VG4_Multi_Single_Intrinsic;

        def int_aarch64_sme_ # ty # instr # _ # za # _vg4x2 : SME2_Matrix_ArrayVector_VG2_Multi_Multi_Intrinsic;
        def int_aarch64_sme_ # ty # instr # _ # za # _vg4x4 : SME2_Matrix_ArrayVector_VG4_Multi_Multi_Intrinsic;

        def int_aarch64_sme_ # ty # instr # _ # za # _lane_vg4x1 : SME2_Matrix_ArrayVector_Single_Index_Intrinsic;
        def int_aarch64_sme_ # ty # instr # _ # za # _lane_vg4x2 : SME2_Matrix_ArrayVector_VG2_Multi_Index_Intrinsic;
        def int_aarch64_sme_ # ty # instr # _ # za # _lane_vg4x4 : SME2_Matrix_ArrayVector_VG4_Multi_Index_Intrinsic;
      }
    }
  }

  def int_aarch64_sme_sumla_za32_single_vg4x2 : SME2_Matrix_ArrayVector_VG2_Multi_Single_Intrinsic;
  def int_aarch64_sme_sumla_za32_single_vg4x4 : SME2_Matrix_ArrayVector_VG4_Multi_Single_Intrinsic;

  def int_aarch64_sme_sumla_za32_lane_vg4x1 : SME2_Matrix_ArrayVector_Single_Index_Intrinsic;
  def int_aarch64_sme_sumla_za32_lane_vg4x2 : SME2_Matrix_ArrayVector_VG2_Multi_Index_Intrinsic;
  def int_aarch64_sme_sumla_za32_lane_vg4x4 : SME2_Matrix_ArrayVector_VG4_Multi_Index_Intrinsic;

  def int_aarch64_sme_usmla_za32_single_vg4x1 : SME2_Matrix_ArrayVector_Single_Single_Intrinsic;
  def int_aarch64_sme_usmla_za32_single_vg4x2 : SME2_Matrix_ArrayVector_VG2_Multi_Single_Intrinsic;
  def int_aarch64_sme_usmla_za32_single_vg4x4 : SME2_Matrix_ArrayVector_VG4_Multi_Single_Intrinsic;

  def int_aarch64_sme_usmla_za32_vg4x2 : SME2_Matrix_ArrayVector_VG2_Multi_Multi_Intrinsic;
  def int_aarch64_sme_usmla_za32_vg4x4 : SME2_Matrix_ArrayVector_VG4_Multi_Multi_Intrinsic;

  def int_aarch64_sme_usmla_za32_lane_vg4x1 : SME2_Matrix_ArrayVector_Single_Index_Intrinsic;
  def int_aarch64_sme_usmla_za32_lane_vg4x2 : SME2_Matrix_ArrayVector_VG2_Multi_Index_Intrinsic;
  def int_aarch64_sme_usmla_za32_lane_vg4x4 : SME2_Matrix_ArrayVector_VG4_Multi_Index_Intrinsic;

  // Multi-vector signed saturating doubling multiply high

  def int_aarch64_sve_sqdmulh_single_vgx2 : SME2_VG2_Multi_Single_Intrinsic;
  def int_aarch64_sve_sqdmulh_single_vgx4 : SME2_VG4_Multi_Single_Intrinsic;

  def int_aarch64_sve_sqdmulh_vgx2 : SME2_VG2_Multi_Multi_Intrinsic;
  def int_aarch64_sve_sqdmulh_vgx4 : SME2_VG4_Multi_Multi_Intrinsic;

  // Multi-vector floating-point round to integral value

  foreach inst = ["a", "m", "n", "p"] in {
    def int_aarch64_sve_frint # inst # _x2 : SVE2_VG2_ZipUzp_Intrinsic;
    def int_aarch64_sve_frint # inst # _x4 : SVE2_VG4_ZipUzp_Intrinsic;
  }

  //
  // Multi-vector min/max
  //

  foreach ty = ["f", "s", "u"] in {
    foreach instr = ["max", "min"] in {
      def int_aarch64_sve_ # ty # instr # _single_x2 : SME2_VG2_Multi_Single_Intrinsic;
      def int_aarch64_sve_ # ty # instr # _single_x4 : SME2_VG4_Multi_Single_Intrinsic;

      def int_aarch64_sve_ # ty # instr # _x2 : SME2_VG2_Multi_Multi_Intrinsic;
      def int_aarch64_sve_ # ty # instr # _x4 : SME2_VG4_Multi_Multi_Intrinsic;
    }
  }

  //
  // Multi-vector floating point min/max number
  //

  foreach instr = ["fmaxnm", "fminnm"] in {
    def int_aarch64_sve_ # instr # _single_x2 : SME2_VG2_Multi_Single_Intrinsic;
    def int_aarch64_sve_ # instr # _single_x4 : SME2_VG4_Multi_Single_Intrinsic;

    def int_aarch64_sve_ # instr # _x2 : SME2_VG2_Multi_Multi_Intrinsic;
    def int_aarch64_sve_ # instr # _x4 : SME2_VG4_Multi_Multi_Intrinsic;
  }

  //
  // Multi-vector vertical dot-products
  //

  def int_aarch64_sme_fvdot_lane_za32_vg1x2 : SME2_Matrix_ArrayVector_VG2_Multi_Index_Intrinsic;

  foreach ty = ["s", "u"] in {
    def int_aarch64_sme_ #ty # vdot_lane_za32_vg1x2 : SME2_Matrix_ArrayVector_VG2_Multi_Index_Intrinsic;
    def int_aarch64_sme_ #ty # vdot_lane_za32_vg1x4 : SME2_Matrix_ArrayVector_VG4_Multi_Index_Intrinsic;
    def int_aarch64_sme_ #ty # vdot_lane_za64_vg1x4 : SME2_Matrix_ArrayVector_VG4_Multi_Index_Intrinsic;
  }

  def int_aarch64_sme_suvdot_lane_za32_vg1x4 : SME2_Matrix_ArrayVector_VG4_Multi_Index_Intrinsic;
  def int_aarch64_sme_usvdot_lane_za32_vg1x4 : SME2_Matrix_ArrayVector_VG4_Multi_Index_Intrinsic;

  //
  // Multi-vector floating-point CVT from single-precision to interleaved half-precision/BFloat16
  //
  def int_aarch64_sve_fcvtn_x2  : SME2_CVT_VG2_SINGLE_Intrinsic;
  def int_aarch64_sve_bfcvtn_x2 : SME2_CVT_VG2_SINGLE_BF16_Intrinsic;

  //
  // Multi-vector convert to/from floating-point.
  //
  def int_aarch64_sve_fcvt_x2  : SME2_CVT_VG2_SINGLE_Intrinsic;
  def int_aarch64_sve_bfcvt_x2 : SME2_CVT_VG2_SINGLE_BF16_Intrinsic;
  def int_aarch64_sve_fcvts_x2 : SME2_CVT_FtoI_VG2_Intrinsic;
  def int_aarch64_sve_fcvtu_x2 : SME2_CVT_FtoI_VG2_Intrinsic;
  def int_aarch64_sve_scvtf_x2 : SME2_CVT_ItoF_VG2_Intrinsic;
  def int_aarch64_sve_ucvtf_x2 : SME2_CVT_ItoF_VG2_Intrinsic;
  def int_aarch64_sve_fcvts_x4 : SME2_CVT_FtoI_VG4_Intrinsic;
  def int_aarch64_sve_fcvtu_x4 : SME2_CVT_FtoI_VG4_Intrinsic;
  def int_aarch64_sve_scvtf_x4 : SME2_CVT_ItoF_VG4_Intrinsic;
  def int_aarch64_sve_ucvtf_x4 : SME2_CVT_ItoF_VG4_Intrinsic;

  //
  // Multi-vector saturating extract narrow
  //
  def int_aarch64_sve_sqcvt_x2  : SME2_CVT_VG2_SINGLE_Intrinsic;
  def int_aarch64_sve_uqcvt_x2  : SME2_CVT_VG2_SINGLE_Intrinsic;
  def int_aarch64_sve_sqcvtu_x2 : SME2_CVT_VG2_SINGLE_Intrinsic;
  def int_aarch64_sve_sqcvt_x4  : SME2_CVT_VG4_SINGLE_Intrinsic;
  def int_aarch64_sve_uqcvt_x4  : SME2_CVT_VG4_SINGLE_Intrinsic;
  def int_aarch64_sve_sqcvtu_x4 : SME2_CVT_VG4_SINGLE_Intrinsic;

  //
  // Multi-vector saturating extract narrow and interleave
  //
  def int_aarch64_sve_sqcvtn_x2  : SME2_CVT_VG2_SINGLE_Intrinsic;
  def int_aarch64_sve_uqcvtn_x2  : SME2_CVT_VG2_SINGLE_Intrinsic;
  def int_aarch64_sve_sqcvtun_x2 : SME2_CVT_VG2_SINGLE_Intrinsic;
  def int_aarch64_sve_sqcvtn_x4  : SME2_CVT_VG4_SINGLE_Intrinsic;
  def int_aarch64_sve_uqcvtn_x4  : SME2_CVT_VG4_SINGLE_Intrinsic;
  def int_aarch64_sve_sqcvtun_x4 : SME2_CVT_VG4_SINGLE_Intrinsic;

  //
  // Multi-Single add/sub
  //
  def int_aarch64_sme_add_write_single_za_vg1x2 : SME2_Matrix_ArrayVector_VG2_Multi_Single_Intrinsic;
  def int_aarch64_sme_sub_write_single_za_vg1x2 : SME2_Matrix_ArrayVector_VG2_Multi_Single_Intrinsic;
  def int_aarch64_sme_add_write_single_za_vg1x4 : SME2_Matrix_ArrayVector_VG4_Multi_Single_Intrinsic;
  def int_aarch64_sme_sub_write_single_za_vg1x4 : SME2_Matrix_ArrayVector_VG4_Multi_Single_Intrinsic;

  //
  // Multi-Multi add/sub
  //
  def int_aarch64_sme_add_write_za_vg1x2 : SME2_Matrix_ArrayVector_VG2_Multi_Multi_Intrinsic;
  def int_aarch64_sme_sub_write_za_vg1x2 : SME2_Matrix_ArrayVector_VG2_Multi_Multi_Intrinsic;
  def int_aarch64_sme_add_write_za_vg1x4 : SME2_Matrix_ArrayVector_VG4_Multi_Multi_Intrinsic;
  def int_aarch64_sme_sub_write_za_vg1x4 : SME2_Matrix_ArrayVector_VG4_Multi_Multi_Intrinsic;

  // Multi-vector clamps
  def int_aarch64_sve_sclamp_single_x2 : SME2_VG2_Multi_Single_Single_Intrinsic;
  def int_aarch64_sve_uclamp_single_x2 : SME2_VG2_Multi_Single_Single_Intrinsic;
  def int_aarch64_sve_fclamp_single_x2 : SME2_VG2_Multi_Single_Single_Intrinsic;

  def int_aarch64_sve_sclamp_single_x4 : SME2_VG4_Multi_Single_Single_Intrinsic;
  def int_aarch64_sve_uclamp_single_x4 : SME2_VG4_Multi_Single_Single_Intrinsic;
  def int_aarch64_sve_fclamp_single_x4 : SME2_VG4_Multi_Single_Single_Intrinsic;

  //
  // Multi-vector add/sub and accumulate into ZA
  //
  foreach intr = ["add", "sub"] in {
    foreach za = ["za32", "za64"] in {
      def int_aarch64_sme_ # intr # _ # za # _vg1x2 : SME2_ZA_Write_VG2_Intrinsic;
      def int_aarch64_sme_ # intr # _ # za # _vg1x4 : SME2_ZA_Write_VG4_Intrinsic;
    }
  }

  //
  // Move multi-vectors to/from ZA
  //

  def int_aarch64_sme_read_hor_vg2   : SME2_Matrix_TileVector_Read_VG2_Intrinsic;
  def int_aarch64_sme_read_hor_vg4   : SME2_Matrix_TileVector_Read_VG4_Intrinsic;

  def int_aarch64_sme_read_ver_vg2   : SME2_Matrix_TileVector_Read_VG2_Intrinsic;
  def int_aarch64_sme_read_ver_vg4   : SME2_Matrix_TileVector_Read_VG4_Intrinsic;

  def int_aarch64_sme_read_vg1x2 : SME2_ZA_ArrayVector_Read_VG2_Intrinsic;
  def int_aarch64_sme_read_vg1x4 : SME2_ZA_ArrayVector_Read_VG4_Intrinsic;

  def int_aarch64_sme_write_hor_vg2 : SME2_Matrix_TileVector_Write_VG2_Intrinsic;
  def int_aarch64_sme_write_hor_vg4 : SME2_Matrix_TileVector_Write_VG4_Intrinsic;

  def int_aarch64_sme_write_ver_vg2 : SME2_Matrix_TileVector_Write_VG2_Intrinsic;
  def int_aarch64_sme_write_ver_vg4 : SME2_Matrix_TileVector_Write_VG4_Intrinsic;

  def int_aarch64_sme_write_vg1x2 : SME2_ZA_ArrayVector_Write_VG2_Intrinsic;
  def int_aarch64_sme_write_vg1x4 : SME2_ZA_ArrayVector_Write_VG4_Intrinsic;

  //
  // Multi-Single Vector add
  //
  def int_aarch64_sve_add_single_x2 : SME2_VG2_Multi_Single_Intrinsic;
  def int_aarch64_sve_add_single_x4 : SME2_VG4_Multi_Single_Intrinsic;

  // 2-way and 4-way multi-vector signed/unsigned integer dot-product
  foreach ty = ["s", "u"] in {
    foreach sz = ["za32", "za64"] in {
      def int_aarch64_sme_ # ty # dot_single_ # sz # _vg1x2 : SME2_Matrix_ArrayVector_VG2_Multi_Single_Intrinsic;
      def int_aarch64_sme_ # ty # dot_single_ # sz # _vg1x4 : SME2_Matrix_ArrayVector_VG4_Multi_Single_Intrinsic;

      def int_aarch64_sme_ # ty # dot_ # sz # _vg1x2 : SME2_Matrix_ArrayVector_VG2_Multi_Multi_Intrinsic;
      def int_aarch64_sme_ # ty # dot_ # sz # _vg1x4 : SME2_Matrix_ArrayVector_VG4_Multi_Multi_Intrinsic;

      def int_aarch64_sme_ # ty # dot_lane_ # sz # _vg1x2 : SME2_Matrix_ArrayVector_VG2_Multi_Index_Intrinsic;
      def int_aarch64_sme_ # ty # dot_lane_ # sz # _vg1x4 : SME2_Matrix_ArrayVector_VG4_Multi_Index_Intrinsic;
    }
  }

  foreach ty = ["su", "us"] in {
    def int_aarch64_sme_ # ty # dot_single_za32_vg1x2 : SME2_Matrix_ArrayVector_VG2_Multi_Single_Intrinsic;
    def int_aarch64_sme_ # ty # dot_single_za32_vg1x4 : SME2_Matrix_ArrayVector_VG4_Multi_Single_Intrinsic;

    def int_aarch64_sme_ # ty # dot_lane_za32_vg1x2 : SME2_Matrix_ArrayVector_VG2_Multi_Index_Intrinsic;
    def int_aarch64_sme_ # ty # dot_lane_za32_vg1x4 : SME2_Matrix_ArrayVector_VG4_Multi_Index_Intrinsic;
  }

  def int_aarch64_sme_usdot_za32_vg1x2 : SME2_Matrix_ArrayVector_VG2_Multi_Multi_Intrinsic;
  def int_aarch64_sme_usdot_za32_vg1x4 : SME2_Matrix_ArrayVector_VG4_Multi_Multi_Intrinsic;

  // Multi-vector half-precision or bfloat floating-point dot-product
  def int_aarch64_sme_fdot_single_za32_vg1x2 : SME2_Matrix_ArrayVector_VG2_Multi_Single_Intrinsic;
  def int_aarch64_sme_fdot_single_za32_vg1x4 : SME2_Matrix_ArrayVector_VG4_Multi_Single_Intrinsic;

  def int_aarch64_sme_fdot_za32_vg1x2 : SME2_Matrix_ArrayVector_VG2_Multi_Multi_Intrinsic;
  def int_aarch64_sme_fdot_za32_vg1x4 : SME2_Matrix_ArrayVector_VG4_Multi_Multi_Intrinsic;

  def int_aarch64_sme_fdot_lane_za32_vg1x2 : SME2_Matrix_ArrayVector_VG2_Multi_Index_Intrinsic;
  def int_aarch64_sme_fdot_lane_za32_vg1x4 : SME2_Matrix_ArrayVector_VG4_Multi_Index_Intrinsic;

  // Multi-vector zip and unzips
  def int_aarch64_sve_zip_x2  : SVE2_VG2_ZipUzp_Intrinsic;
  def int_aarch64_sve_zipq_x2 : SVE2_VG2_ZipUzp_Intrinsic;
  def int_aarch64_sve_zip_x4  : SVE2_VG4_ZipUzp_Intrinsic;
  def int_aarch64_sve_zipq_x4 : SVE2_VG4_ZipUzp_Intrinsic;
  def int_aarch64_sve_uzp_x2  : SVE2_VG2_ZipUzp_Intrinsic;
  def int_aarch64_sve_uzpq_x2 : SVE2_VG2_ZipUzp_Intrinsic;
  def int_aarch64_sve_uzp_x4  : SVE2_VG4_ZipUzp_Intrinsic;
  def int_aarch64_sve_uzpq_x4 : SVE2_VG4_ZipUzp_Intrinsic;

  // Vector dot-products (2-way)
  def int_aarch64_sve_sdot_x2 : SVE2_3VectorArg_Long_Intrinsic;
  def int_aarch64_sve_udot_x2 : SVE2_3VectorArg_Long_Intrinsic;
  def int_aarch64_sve_fdot_x2 : SVE2_3VectorArg_Long_Intrinsic;
  def int_aarch64_sve_sdot_lane_x2 : SVE2_3VectorArgIndexed_Long_Intrinsic;
  def int_aarch64_sve_udot_lane_x2 : SVE2_3VectorArgIndexed_Long_Intrinsic;
  def int_aarch64_sve_fdot_lane_x2 : SVE2_3VectorArgIndexed_Long_Intrinsic;

  //
  // Signed/unsigned multi-vector unpacks
  //
  def int_aarch64_sve_sunpk_x2 : SME2_VG2_Unpk_Intrinsic;
  def int_aarch64_sve_uunpk_x2 : SME2_VG2_Unpk_Intrinsic;
  def int_aarch64_sve_sunpk_x4 : SME2_VG4_Unpk_Intrinsic;
  def int_aarch64_sve_uunpk_x4 : SME2_VG4_Unpk_Intrinsic;

  // 2-way and 4-way vector selects
  def int_aarch64_sve_sel_x2  : SVE2_VG2_Sel_Intrinsic;
  def int_aarch64_sve_sel_x4  : SVE2_VG4_Sel_Intrinsic;

}
PKjwFZQ�B���IR/ProfDataUtils.hnu�[���//===- llvm/IR/ProfDataUtils.h - Profiling Metadata Utilities ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// @file
/// This file contains the declarations for profiling metadata utility
/// functions.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_PROFDATAUTILS_H
#define LLVM_IR_PROFDATAUTILS_H

#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Twine.h"
#include "llvm/IR/Metadata.h"

namespace llvm {

/// Checks if an Instruction has MD_prof Metadata
bool hasProfMD(const Instruction &I);

/// Checks if an MDNode contains Branch Weight Metadata
bool isBranchWeightMD(const MDNode *ProfileData);

/// Checks if an instructions has Branch Weight Metadata
///
/// \param I The instruction to check
/// \returns True if I has an MD_prof node containing Branch Weights. False
/// otherwise.
bool hasBranchWeightMD(const Instruction &I);

/// Checks if an instructions has valid Branch Weight Metadata
///
/// \param I The instruction to check
/// \returns True if I has an MD_prof node containing valid Branch Weights,
/// i.e., one weight for each successor. False otherwise.
bool hasValidBranchWeightMD(const Instruction &I);

/// Get the branch weights metadata node
///
/// \param I The Instruction to get the weights from.
/// \returns A pointer to I's branch weights metadata node, if it exists.
/// Nullptr otherwise.
MDNode *getBranchWeightMDNode(const Instruction &I);

/// Get the valid branch weights metadata node
///
/// \param I The Instruction to get the weights from.
/// \returns A pointer to I's valid branch weights metadata node, if it exists.
/// Nullptr otherwise.
MDNode *getValidBranchWeightMDNode(const Instruction &I);

/// Extract branch weights from MD_prof metadata
///
/// \param ProfileData A pointer to an MDNode.
/// \param [out] Weights An output vector to fill with branch weights
/// \returns True if weights were extracted, False otherwise. When false Weights
/// will be cleared.
bool extractBranchWeights(const MDNode *ProfileData,
                          SmallVectorImpl<uint32_t> &Weights);

/// Extract branch weights attatched to an Instruction
///
/// \param I The Instruction to extract weights from.
/// \param [out] Weights An output vector to fill with branch weights
/// \returns True if weights were extracted, False otherwise. When false Weights
/// will be cleared.
bool extractBranchWeights(const Instruction &I,
                          SmallVectorImpl<uint32_t> &Weights);

/// Extract branch weights from a conditional branch or select Instruction.
///
/// \param I The instruction to extract branch weights from.
/// \param [out] TrueVal will contain the branch weight for the True branch
/// \param [out] FalseVal will contain the branch weight for the False branch
/// \returns True on success with profile weights filled in. False if no
/// metadata or invalid metadata was found.
bool extractBranchWeights(const Instruction &I, uint64_t &TrueVal,
                          uint64_t &FalseVal);

/// Retrieve the total of all weights from MD_prof data.
///
/// \param ProfileData The profile data to extract the total weight from
/// \param [out] TotalWeights input variable to fill with total weights
/// \returns True on success with profile total weights filled in. False if no
/// metadata was found.
bool extractProfTotalWeight(const MDNode *ProfileData, uint64_t &TotalWeights);

/// Retrieve the total of all weights from an instruction.
///
/// \param I The instruction to extract the total weight from
/// \param [out] TotalWeights input variable to fill with total weights
/// \returns True on success with profile total weights filled in. False if no
/// metadata was found.
bool extractProfTotalWeight(const Instruction &I, uint64_t &TotalWeights);

} // namespace llvm
#endif
PKjwFZ�γ<%4%4IR/PassInstrumentation.hnu�[���//===- llvm/IR/PassInstrumentation.h ----------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This file defines the Pass Instrumentation classes that provide
/// instrumentation points into the pass execution by PassManager.
///
/// There are two main classes:
///   - PassInstrumentation provides a set of instrumentation points for
///     pass managers to call on.
///
///   - PassInstrumentationCallbacks registers callbacks and provides access
///     to them for PassInstrumentation.
///
/// PassInstrumentation object is being used as a result of
/// PassInstrumentationAnalysis (so it is intended to be easily copyable).
///
/// Intended scheme of use for Pass Instrumentation is as follows:
///    - register instrumentation callbacks in PassInstrumentationCallbacks
///      instance. PassBuilder provides helper for that.
///
///    - register PassInstrumentationAnalysis with all the PassManagers.
///      PassBuilder handles that automatically when registering analyses.
///
///    - Pass Manager requests PassInstrumentationAnalysis from analysis manager
///      and gets PassInstrumentation as its result.
///
///    - Pass Manager invokes PassInstrumentation entry points appropriately,
///      passing StringRef identification ("name") of the pass currently being
///      executed and IRUnit it works on. There can be different schemes of
///      providing names in future, currently it is just a name() of the pass.
///
///    - PassInstrumentation wraps address of IRUnit into llvm::Any and passes
///      control to all the registered callbacks. Note that we specifically wrap
///      'const IRUnitT*' so as to avoid any accidental changes to IR in
///      instrumenting callbacks.
///
///    - Some instrumentation points (BeforePass) allow to control execution
///      of a pass. For those callbacks returning false means pass will not be
///      executed.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_PASSINSTRUMENTATION_H
#define LLVM_IR_PASSINSTRUMENTATION_H

#include "llvm/ADT/Any.h"
#include "llvm/ADT/FunctionExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
#include <type_traits>
#include <vector>

namespace llvm {

class PreservedAnalyses;
class StringRef;

/// This class manages callbacks registration, as well as provides a way for
/// PassInstrumentation to pass control to the registered callbacks.
class PassInstrumentationCallbacks {
public:
  // Before/After callbacks accept IRUnits whenever appropriate, so they need
  // to take them as constant pointers, wrapped with llvm::Any.
  // For the case when IRUnit has been invalidated there is a different
  // callback to use - AfterPassInvalidated.
  // We call all BeforePassFuncs to determine if a pass should run or not.
  // BeforeNonSkippedPassFuncs are called only if the pass should run.
  // TODO: currently AfterPassInvalidated does not accept IRUnit, since passing
  // already invalidated IRUnit is unsafe. There are ways to handle invalidated
  // IRUnits in a safe way, and we might pursue that as soon as there is a
  // useful instrumentation that needs it.
  using BeforePassFunc = bool(StringRef, Any);
  using BeforeSkippedPassFunc = void(StringRef, Any);
  using BeforeNonSkippedPassFunc = void(StringRef, Any);
  using AfterPassFunc = void(StringRef, Any, const PreservedAnalyses &);
  using AfterPassInvalidatedFunc = void(StringRef, const PreservedAnalyses &);
  using BeforeAnalysisFunc = void(StringRef, Any);
  using AfterAnalysisFunc = void(StringRef, Any);
  using AnalysisInvalidatedFunc = void(StringRef, Any);
  using AnalysesClearedFunc = void(StringRef);

public:
  PassInstrumentationCallbacks() = default;

  /// Copying PassInstrumentationCallbacks is not intended.
  PassInstrumentationCallbacks(const PassInstrumentationCallbacks &) = delete;
  void operator=(const PassInstrumentationCallbacks &) = delete;

  template <typename CallableT>
  void registerShouldRunOptionalPassCallback(CallableT C) {
    ShouldRunOptionalPassCallbacks.emplace_back(std::move(C));
  }

  template <typename CallableT>
  void registerBeforeSkippedPassCallback(CallableT C) {
    BeforeSkippedPassCallbacks.emplace_back(std::move(C));
  }

  template <typename CallableT>
  void registerBeforeNonSkippedPassCallback(CallableT C) {
    BeforeNonSkippedPassCallbacks.emplace_back(std::move(C));
  }

  template <typename CallableT>
  void registerAfterPassCallback(CallableT C, bool ToFront = false) {
    if (ToFront)
      AfterPassCallbacks.insert(AfterPassCallbacks.begin(), std::move(C));
    else
      AfterPassCallbacks.emplace_back(std::move(C));
  }

  template <typename CallableT>
  void registerAfterPassInvalidatedCallback(CallableT C, bool ToFront = false) {
    if (ToFront)
      AfterPassInvalidatedCallbacks.insert(
          AfterPassInvalidatedCallbacks.begin(), std::move(C));
    else
      AfterPassInvalidatedCallbacks.emplace_back(std::move(C));
  }

  template <typename CallableT>
  void registerBeforeAnalysisCallback(CallableT C) {
    BeforeAnalysisCallbacks.emplace_back(std::move(C));
  }

  template <typename CallableT>
  void registerAfterAnalysisCallback(CallableT C, bool ToFront = false) {
    if (ToFront)
      AfterAnalysisCallbacks.insert(AfterAnalysisCallbacks.begin(),
                                    std::move(C));
    else
      AfterAnalysisCallbacks.emplace_back(std::move(C));
  }

  template <typename CallableT>
  void registerAnalysisInvalidatedCallback(CallableT C) {
    AnalysisInvalidatedCallbacks.emplace_back(std::move(C));
  }

  template <typename CallableT>
  void registerAnalysesClearedCallback(CallableT C) {
    AnalysesClearedCallbacks.emplace_back(std::move(C));
  }

  /// Add a class name to pass name mapping for use by pass instrumentation.
  void addClassToPassName(StringRef ClassName, StringRef PassName);
  /// Get the pass name for a given pass class name.
  StringRef getPassNameForClassName(StringRef ClassName);

private:
  friend class PassInstrumentation;

  /// These are only run on passes that are not required. They return false when
  /// an optional pass should be skipped.
  SmallVector<llvm::unique_function<BeforePassFunc>, 4>
      ShouldRunOptionalPassCallbacks;
  /// These are run on passes that are skipped.
  SmallVector<llvm::unique_function<BeforeSkippedPassFunc>, 4>
      BeforeSkippedPassCallbacks;
  /// These are run on passes that are about to be run.
  SmallVector<llvm::unique_function<BeforeNonSkippedPassFunc>, 4>
      BeforeNonSkippedPassCallbacks;
  /// These are run on passes that have just run.
  SmallVector<llvm::unique_function<AfterPassFunc>, 4> AfterPassCallbacks;
  /// These are run passes that have just run on invalidated IR.
  SmallVector<llvm::unique_function<AfterPassInvalidatedFunc>, 4>
      AfterPassInvalidatedCallbacks;
  /// These are run on analyses that are about to be run.
  SmallVector<llvm::unique_function<BeforeAnalysisFunc>, 4>
      BeforeAnalysisCallbacks;
  /// These are run on analyses that have been run.
  SmallVector<llvm::unique_function<AfterAnalysisFunc>, 4>
      AfterAnalysisCallbacks;
  /// These are run on analyses that have been invalidated.
  SmallVector<llvm::unique_function<AnalysisInvalidatedFunc>, 4>
      AnalysisInvalidatedCallbacks;
  /// These are run on analyses that have been cleared.
  SmallVector<llvm::unique_function<AnalysesClearedFunc>, 4>
      AnalysesClearedCallbacks;

  StringMap<std::string> ClassToPassName;
};

/// This class provides instrumentation entry points for the Pass Manager,
/// doing calls to callbacks registered in PassInstrumentationCallbacks.
class PassInstrumentation {
  PassInstrumentationCallbacks *Callbacks;

  // Template argument PassT of PassInstrumentation::runBeforePass could be two
  // kinds: (1) a regular pass inherited from PassInfoMixin (happen when
  // creating a adaptor pass for a regular pass); (2) a type-erased PassConcept
  // created from (1). Here we want to make case (1) skippable unconditionally
  // since they are regular passes. We call PassConcept::isRequired to decide
  // for case (2).
  template <typename PassT>
  using has_required_t = decltype(std::declval<PassT &>().isRequired());

  template <typename PassT>
  static std::enable_if_t<is_detected<has_required_t, PassT>::value, bool>
  isRequired(const PassT &Pass) {
    return Pass.isRequired();
  }
  template <typename PassT>
  static std::enable_if_t<!is_detected<has_required_t, PassT>::value, bool>
  isRequired(const PassT &Pass) {
    return false;
  }

public:
  /// Callbacks object is not owned by PassInstrumentation, its life-time
  /// should at least match the life-time of corresponding
  /// PassInstrumentationAnalysis (which usually is till the end of current
  /// compilation).
  PassInstrumentation(PassInstrumentationCallbacks *CB = nullptr)
      : Callbacks(CB) {}

  /// BeforePass instrumentation point - takes \p Pass instance to be executed
  /// and constant reference to IR it operates on. \Returns true if pass is
  /// allowed to be executed. These are only run on optional pass since required
  /// passes must always be run. This allows these callbacks to print info when
  /// they want to skip a pass.
  template <typename IRUnitT, typename PassT>
  bool runBeforePass(const PassT &Pass, const IRUnitT &IR) const {
    if (!Callbacks)
      return true;

    bool ShouldRun = true;
    if (!isRequired(Pass)) {
      for (auto &C : Callbacks->ShouldRunOptionalPassCallbacks)
        ShouldRun &= C(Pass.name(), llvm::Any(&IR));
    }

    if (ShouldRun) {
      for (auto &C : Callbacks->BeforeNonSkippedPassCallbacks)
        C(Pass.name(), llvm::Any(&IR));
    } else {
      for (auto &C : Callbacks->BeforeSkippedPassCallbacks)
        C(Pass.name(), llvm::Any(&IR));
    }

    return ShouldRun;
  }

  /// AfterPass instrumentation point - takes \p Pass instance that has
  /// just been executed and constant reference to \p IR it operates on.
  /// \p IR is guaranteed to be valid at this point.
  template <typename IRUnitT, typename PassT>
  void runAfterPass(const PassT &Pass, const IRUnitT &IR,
                    const PreservedAnalyses &PA) const {
    if (Callbacks)
      for (auto &C : Callbacks->AfterPassCallbacks)
        C(Pass.name(), llvm::Any(&IR), PA);
  }

  /// AfterPassInvalidated instrumentation point - takes \p Pass instance
  /// that has just been executed. For use when IR has been invalidated
  /// by \p Pass execution.
  template <typename IRUnitT, typename PassT>
  void runAfterPassInvalidated(const PassT &Pass,
                               const PreservedAnalyses &PA) const {
    if (Callbacks)
      for (auto &C : Callbacks->AfterPassInvalidatedCallbacks)
        C(Pass.name(), PA);
  }

  /// BeforeAnalysis instrumentation point - takes \p Analysis instance
  /// to be executed and constant reference to IR it operates on.
  template <typename IRUnitT, typename PassT>
  void runBeforeAnalysis(const PassT &Analysis, const IRUnitT &IR) const {
    if (Callbacks)
      for (auto &C : Callbacks->BeforeAnalysisCallbacks)
        C(Analysis.name(), llvm::Any(&IR));
  }

  /// AfterAnalysis instrumentation point - takes \p Analysis instance
  /// that has just been executed and constant reference to IR it operated on.
  template <typename IRUnitT, typename PassT>
  void runAfterAnalysis(const PassT &Analysis, const IRUnitT &IR) const {
    if (Callbacks)
      for (auto &C : Callbacks->AfterAnalysisCallbacks)
        C(Analysis.name(), llvm::Any(&IR));
  }

  /// AnalysisInvalidated instrumentation point - takes \p Analysis instance
  /// that has just been invalidated and constant reference to IR it operated
  /// on.
  template <typename IRUnitT, typename PassT>
  void runAnalysisInvalidated(const PassT &Analysis, const IRUnitT &IR) const {
    if (Callbacks)
      for (auto &C : Callbacks->AnalysisInvalidatedCallbacks)
        C(Analysis.name(), llvm::Any(&IR));
  }

  /// AnalysesCleared instrumentation point - takes name of IR that analyses
  /// operated on.
  void runAnalysesCleared(StringRef Name) const {
    if (Callbacks)
      for (auto &C : Callbacks->AnalysesClearedCallbacks)
        C(Name);
  }

  /// Handle invalidation from the pass manager when PassInstrumentation
  /// is used as the result of PassInstrumentationAnalysis.
  ///
  /// On attempt to invalidate just return false. There is nothing to become
  /// invalid here.
  template <typename IRUnitT, typename... ExtraArgsT>
  bool invalidate(IRUnitT &, const class llvm::PreservedAnalyses &,
                  ExtraArgsT...) {
    return false;
  }

  template <typename CallableT>
  void pushBeforeNonSkippedPassCallback(CallableT C) {
    if (Callbacks)
      Callbacks->BeforeNonSkippedPassCallbacks.emplace_back(std::move(C));
  }
  void popBeforeNonSkippedPassCallback() {
    if (Callbacks)
      Callbacks->BeforeNonSkippedPassCallbacks.pop_back();
  }
};

bool isSpecialPass(StringRef PassID, const std::vector<StringRef> &Specials);

} // namespace llvm

#endif
PKjwFZz�Q.�;�;IR/IntrinsicsWebAssembly.tdnu�[���//===- IntrinsicsWebAssembly.td - Defines wasm intrinsics --*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file defines all of the WebAssembly-specific intrinsics.
///
//===----------------------------------------------------------------------===//

// Type definition for a table in an intrinsic
def llvm_table_ty : LLVMQualPointerType<1>;

let TargetPrefix = "wasm" in {  // All intrinsics start with "llvm.wasm.".

// Query the current memory size, and increase the current memory size.
// Note that memory.size is not IntrNoMem because it must be sequenced with
// respect to memory.grow calls.
def int_wasm_memory_size :
  DefaultAttrsIntrinsic<[llvm_anyint_ty], [llvm_i32_ty], [IntrReadMem]>;
def int_wasm_memory_grow :
  DefaultAttrsIntrinsic<[llvm_anyint_ty], [llvm_i32_ty, LLVMMatchType<0>], []>;

//===----------------------------------------------------------------------===//
// ref.null intrinsics
//===----------------------------------------------------------------------===//
def int_wasm_ref_null_extern :
  DefaultAttrsIntrinsic<[llvm_externref_ty], [], [IntrNoMem]>;
def int_wasm_ref_null_func :
  DefaultAttrsIntrinsic<[llvm_funcref_ty], [], [IntrNoMem]>;
def int_wasm_ref_is_null_extern :
  DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_externref_ty], [IntrNoMem],
                        "llvm.wasm.ref.is_null.extern">;
def int_wasm_ref_is_null_func :
  DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_funcref_ty],
                        [IntrNoMem], "llvm.wasm.ref.is_null.func">;

//===----------------------------------------------------------------------===//
// Table intrinsics
//===----------------------------------------------------------------------===//
def int_wasm_table_set_externref :
  DefaultAttrsIntrinsic<[], [llvm_table_ty, llvm_i32_ty, llvm_externref_ty],
                        [IntrWriteMem]>;
def int_wasm_table_set_funcref :
  DefaultAttrsIntrinsic<[], [llvm_table_ty, llvm_i32_ty, llvm_funcref_ty],
                        [IntrWriteMem]>;

def int_wasm_table_get_externref :
  DefaultAttrsIntrinsic<[llvm_externref_ty], [llvm_table_ty, llvm_i32_ty],
                        [IntrReadMem]>;
def int_wasm_table_get_funcref :
  DefaultAttrsIntrinsic<[llvm_funcref_ty], [llvm_table_ty, llvm_i32_ty],
                        [IntrReadMem]>;

// Query the current table size, and increase the current table size.
def int_wasm_table_size :
  DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_table_ty], [IntrReadMem]>;
def int_wasm_table_copy :
  DefaultAttrsIntrinsic<[],
                        [llvm_table_ty, llvm_table_ty, llvm_i32_ty, llvm_i32_ty,
                         llvm_i32_ty], []>;
def int_wasm_table_grow_externref :
  DefaultAttrsIntrinsic<[llvm_i32_ty],
                        [llvm_table_ty, llvm_externref_ty, llvm_i32_ty], []>;
def int_wasm_table_grow_funcref :
  DefaultAttrsIntrinsic<[llvm_i32_ty],
                        [llvm_table_ty, llvm_funcref_ty, llvm_i32_ty], []>;
def int_wasm_table_fill_externref :
  DefaultAttrsIntrinsic<[],
                        [llvm_table_ty, llvm_i32_ty, llvm_externref_ty,
                         llvm_i32_ty], []>;
def int_wasm_table_fill_funcref :
  DefaultAttrsIntrinsic<[],
                        [llvm_table_ty, llvm_i32_ty, llvm_funcref_ty,
                         llvm_i32_ty], []>;

//===----------------------------------------------------------------------===//
// Trapping float-to-int conversions
//===----------------------------------------------------------------------===//

// These don't use default attributes, because they are not willreturn.
def int_wasm_trunc_signed : Intrinsic<[llvm_anyint_ty],
                                      [llvm_anyfloat_ty],
                                      [IntrNoMem]>;
def int_wasm_trunc_unsigned : Intrinsic<[llvm_anyint_ty],
                                        [llvm_anyfloat_ty],
                                        [IntrNoMem]>;

//===----------------------------------------------------------------------===//
// Saturating float-to-int conversions
//===----------------------------------------------------------------------===//

def int_wasm_trunc_saturate_signed :
  DefaultAttrsIntrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty],
                        [IntrNoMem, IntrSpeculatable]>;
def int_wasm_trunc_saturate_unsigned :
  DefaultAttrsIntrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty],
                        [IntrNoMem, IntrSpeculatable]>;

//===----------------------------------------------------------------------===//
// Exception handling intrinsics
//===----------------------------------------------------------------------===//

// throw / rethrow
// The first immediate argument is an index to a tag, which is 0 for C++
// exception. The second argument is the thrown exception pointer.
def int_wasm_throw : Intrinsic<[], [llvm_i32_ty, llvm_ptr_ty],
                               [Throws, IntrNoReturn, ImmArg<ArgIndex<0>>]>;
def int_wasm_rethrow : Intrinsic<[], [], [Throws, IntrNoReturn]>;

// Since wasm does not use landingpad instructions, these instructions return
// exception pointer and selector values until we lower them in WasmEHPrepare.
def int_wasm_get_exception :
  DefaultAttrsIntrinsic<[llvm_ptr_ty], [llvm_token_ty], [IntrHasSideEffects]>;
def int_wasm_get_ehselector :
  DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_token_ty], [IntrHasSideEffects]>;

// wasm.catch returns the pointer to the exception object caught by wasm 'catch'
// instruction. This returns a single pointer, which is the case for C++
// exceptions. The immediate argument is an index to for a tag, which is 0 for
// C++ exceptions.
def int_wasm_catch :
  DefaultAttrsIntrinsic<[llvm_ptr_ty], [llvm_i32_ty],
                        [IntrHasSideEffects, ImmArg<ArgIndex<0>>]>;

// WebAssembly EH must maintain the landingpads in the order assigned to them
// by WasmEHPrepare pass to generate landingpad table in EHStreamer. This is
// used in order to give them the indices in WasmEHPrepare.
def int_wasm_landingpad_index :
  DefaultAttrsIntrinsic<[], [llvm_token_ty, llvm_i32_ty],
                        [IntrNoMem, ImmArg<ArgIndex<1>>]>;

// Returns LSDA address of the current function.
def int_wasm_lsda : DefaultAttrsIntrinsic<[llvm_ptr_ty], [], [IntrNoMem]>;

//===----------------------------------------------------------------------===//
// Atomic intrinsics
//===----------------------------------------------------------------------===//

// wait / notify
// These don't use default attributes, because they are not nosync.
def int_wasm_memory_atomic_wait32 :
  Intrinsic<[llvm_i32_ty],
            [llvm_ptr_ty, llvm_i32_ty, llvm_i64_ty],
            [IntrInaccessibleMemOrArgMemOnly, ReadOnly<ArgIndex<0>>,
             NoCapture<ArgIndex<0>>, IntrHasSideEffects],
            "", [SDNPMemOperand]>;
def int_wasm_memory_atomic_wait64 :
  Intrinsic<[llvm_i32_ty],
            [llvm_ptr_ty, llvm_i64_ty, llvm_i64_ty],
            [IntrInaccessibleMemOrArgMemOnly, ReadOnly<ArgIndex<0>>,
             NoCapture<ArgIndex<0>>, IntrHasSideEffects],
            "", [SDNPMemOperand]>;
def int_wasm_memory_atomic_notify:
  Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty],
            [IntrInaccessibleMemOnly, NoCapture<ArgIndex<0>>,
             IntrHasSideEffects],
            "", [SDNPMemOperand]>;

//===----------------------------------------------------------------------===//
// SIMD intrinsics
//===----------------------------------------------------------------------===//

def int_wasm_swizzle :
  DefaultAttrsIntrinsic<[llvm_v16i8_ty],
                        [llvm_v16i8_ty, llvm_v16i8_ty],
                        [IntrNoMem, IntrSpeculatable]>;
def int_wasm_shuffle :
  DefaultAttrsIntrinsic<[llvm_v16i8_ty],
                        [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty,
                         llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                         llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                         llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                         llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
                        [IntrNoMem, IntrSpeculatable,
                         ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>,
                         ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>,
                         ImmArg<ArgIndex<6>>, ImmArg<ArgIndex<7>>,
                         ImmArg<ArgIndex<8>>, ImmArg<ArgIndex<9>>,
                         ImmArg<ArgIndex<10>>, ImmArg<ArgIndex<11>>,
                         ImmArg<ArgIndex<12>>, ImmArg<ArgIndex<13>>,
                         ImmArg<ArgIndex<14>>, ImmArg<ArgIndex<15>>,
                         ImmArg<ArgIndex<16>>, ImmArg<ArgIndex<17>>]>;
def int_wasm_sub_sat_signed :
  DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                        [LLVMMatchType<0>, LLVMMatchType<0>],
                        [IntrNoMem, IntrSpeculatable]>;
def int_wasm_sub_sat_unsigned :
  DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                        [LLVMMatchType<0>, LLVMMatchType<0>],
                        [IntrNoMem, IntrSpeculatable]>;
def int_wasm_avgr_unsigned :
  DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                        [LLVMMatchType<0>, LLVMMatchType<0>],
                        [IntrNoMem, IntrSpeculatable]>;
def int_wasm_bitselect :
  DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                        [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
                        [IntrNoMem, IntrSpeculatable]>;
def int_wasm_anytrue :
  DefaultAttrsIntrinsic<[llvm_i32_ty],
                        [llvm_anyvector_ty],
                        [IntrNoMem, IntrSpeculatable]>;
def int_wasm_alltrue :
  DefaultAttrsIntrinsic<[llvm_i32_ty],
                        [llvm_anyvector_ty],
                        [IntrNoMem, IntrSpeculatable]>;
def int_wasm_bitmask :
  DefaultAttrsIntrinsic<[llvm_i32_ty],
                        [llvm_anyvector_ty],
                        [IntrNoMem, IntrSpeculatable]>;
def int_wasm_dot :
  DefaultAttrsIntrinsic<[llvm_v4i32_ty],
                        [llvm_v8i16_ty, llvm_v8i16_ty],
                        [IntrNoMem, IntrSpeculatable]>;

def int_wasm_narrow_signed :
  DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                        [llvm_anyvector_ty, LLVMMatchType<1>],
                        [IntrNoMem, IntrSpeculatable]>;
def int_wasm_narrow_unsigned :
  DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                        [llvm_anyvector_ty, LLVMMatchType<1>],
                        [IntrNoMem, IntrSpeculatable]>;

def int_wasm_q15mulr_sat_signed :
  DefaultAttrsIntrinsic<[llvm_v8i16_ty],
                        [llvm_v8i16_ty, llvm_v8i16_ty],
                        [IntrNoMem, IntrSpeculatable]>;

def int_wasm_pmin :
  DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                        [LLVMMatchType<0>, LLVMMatchType<0>],
                        [IntrNoMem, IntrSpeculatable]>;
def int_wasm_pmax :
  DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                        [LLVMMatchType<0>, LLVMMatchType<0>],
                        [IntrNoMem, IntrSpeculatable]>;

def int_wasm_extadd_pairwise_signed :
  DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                        [LLVMSubdivide2VectorType<0>],
                        [IntrNoMem, IntrSpeculatable]>;
def int_wasm_extadd_pairwise_unsigned :
  DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                        [LLVMSubdivide2VectorType<0>],
                        [IntrNoMem, IntrSpeculatable]>;

//===----------------------------------------------------------------------===//
// Relaxed SIMD intrinsics (experimental)
//===----------------------------------------------------------------------===//

def int_wasm_relaxed_madd :
  DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                        [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
                        [IntrNoMem, IntrSpeculatable]>;
def int_wasm_relaxed_nmadd :
  DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                        [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
                        [IntrNoMem, IntrSpeculatable]>;

def int_wasm_relaxed_laneselect :
  DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                        [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
                        [IntrNoMem, IntrSpeculatable]>;

def int_wasm_relaxed_swizzle :
  DefaultAttrsIntrinsic<[llvm_v16i8_ty],
                        [llvm_v16i8_ty, llvm_v16i8_ty],
                        [IntrNoMem, IntrSpeculatable]>;

def int_wasm_relaxed_min :
  DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                        [LLVMMatchType<0>, LLVMMatchType<0>],
                        [IntrNoMem, IntrSpeculatable]>;
def int_wasm_relaxed_max :
  DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                        [LLVMMatchType<0>, LLVMMatchType<0>],
                        [IntrNoMem, IntrSpeculatable]>;

def int_wasm_relaxed_trunc_signed:
  DefaultAttrsIntrinsic<[llvm_v4i32_ty],
                        [llvm_v4f32_ty],
                        [IntrNoMem, IntrSpeculatable]>;

def int_wasm_relaxed_trunc_unsigned:
  DefaultAttrsIntrinsic<[llvm_v4i32_ty],
                        [llvm_v4f32_ty],
                        [IntrNoMem, IntrSpeculatable]>;

def int_wasm_relaxed_trunc_signed_zero:
  DefaultAttrsIntrinsic<[llvm_v4i32_ty],
                        [llvm_v2f64_ty],
                        [IntrNoMem, IntrSpeculatable]>;

def int_wasm_relaxed_trunc_unsigned_zero:
  DefaultAttrsIntrinsic<[llvm_v4i32_ty],
                        [llvm_v2f64_ty],
                        [IntrNoMem, IntrSpeculatable]>;

def int_wasm_relaxed_q15mulr_signed:
  DefaultAttrsIntrinsic<[llvm_v8i16_ty],
                        [llvm_v8i16_ty, llvm_v8i16_ty],
                        [IntrNoMem, IntrSpeculatable]>;

def int_wasm_relaxed_dot_i8x16_i7x16_signed:
  DefaultAttrsIntrinsic<[llvm_v8i16_ty],
                        [llvm_v16i8_ty, llvm_v16i8_ty],
                        [IntrNoMem, IntrSpeculatable]>;

def int_wasm_relaxed_dot_i8x16_i7x16_add_signed:
  DefaultAttrsIntrinsic<[llvm_v4i32_ty],
                        [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v4i32_ty],
                        [IntrNoMem, IntrSpeculatable]>;

def int_wasm_relaxed_dot_bf16x8_add_f32:
  DefaultAttrsIntrinsic<[llvm_v4f32_ty],
                        [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v4f32_ty],
                        [IntrNoMem, IntrSpeculatable]>;


//===----------------------------------------------------------------------===//
// Thread-local storage intrinsics
//===----------------------------------------------------------------------===//

def int_wasm_tls_size :
  DefaultAttrsIntrinsic<[llvm_anyint_ty],
                        [],
                        [IntrNoMem, IntrSpeculatable]>;

def int_wasm_tls_align :
  DefaultAttrsIntrinsic<[llvm_anyint_ty],
                        [],
                        [IntrNoMem, IntrSpeculatable]>;

def int_wasm_tls_base :
  DefaultAttrsIntrinsic<[llvm_ptr_ty],
                        [],
                        [IntrReadMem]>;

} // TargetPrefix = "wasm"
PKjwFZJ�%J
J
IR/LLVMRemarkStreamer.hnu�[���//===- llvm/IR/LLVMRemarkStreamer.h - Streamer for LLVM remarks--*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements the conversion between IR Diagnostics and
// serializable remarks::Remark objects.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_LLVMREMARKSTREAMER_H
#define LLVM_IR_LLVMREMARKSTREAMER_H

#include "llvm/Remarks/Remark.h"
#include "llvm/Support/Error.h"
#include <memory>
#include <optional>
#include <string>

namespace llvm {

class DiagnosticInfoOptimizationBase;
class LLVMContext;
class ToolOutputFile;
namespace remarks {
class RemarkStreamer;
}

/// Streamer for LLVM remarks which has logic for dealing with DiagnosticInfo
/// objects.
class LLVMRemarkStreamer {
  remarks::RemarkStreamer &RS;
  /// Convert diagnostics into remark objects.
  /// The lifetime of the members of the result is bound to the lifetime of
  /// the LLVM diagnostics.
  remarks::Remark toRemark(const DiagnosticInfoOptimizationBase &Diag) const;

public:
  LLVMRemarkStreamer(remarks::RemarkStreamer &RS) : RS(RS) {}
  /// Emit a diagnostic through the streamer.
  void emit(const DiagnosticInfoOptimizationBase &Diag);
};

template <typename ThisError>
struct LLVMRemarkSetupErrorInfo : public ErrorInfo<ThisError> {
  std::string Msg;
  std::error_code EC;

  LLVMRemarkSetupErrorInfo(Error E) {
    handleAllErrors(std::move(E), [&](const ErrorInfoBase &EIB) {
      Msg = EIB.message();
      EC = EIB.convertToErrorCode();
    });
  }

  void log(raw_ostream &OS) const override { OS << Msg; }
  std::error_code convertToErrorCode() const override { return EC; }
};

struct LLVMRemarkSetupFileError
    : LLVMRemarkSetupErrorInfo<LLVMRemarkSetupFileError> {
  static char ID;
  using LLVMRemarkSetupErrorInfo<
      LLVMRemarkSetupFileError>::LLVMRemarkSetupErrorInfo;
};

struct LLVMRemarkSetupPatternError
    : LLVMRemarkSetupErrorInfo<LLVMRemarkSetupPatternError> {
  static char ID;
  using LLVMRemarkSetupErrorInfo<
      LLVMRemarkSetupPatternError>::LLVMRemarkSetupErrorInfo;
};

struct LLVMRemarkSetupFormatError
    : LLVMRemarkSetupErrorInfo<LLVMRemarkSetupFormatError> {
  static char ID;
  using LLVMRemarkSetupErrorInfo<
      LLVMRemarkSetupFormatError>::LLVMRemarkSetupErrorInfo;
};

/// Setup optimization remarks that output to a file.
Expected<std::unique_ptr<ToolOutputFile>>
setupLLVMOptimizationRemarks(LLVMContext &Context, StringRef RemarksFilename,
                             StringRef RemarksPasses, StringRef RemarksFormat,
                             bool RemarksWithHotness,
                             std::optional<uint64_t> RemarksHotnessThreshold = 0);

/// Setup optimization remarks that output directly to a raw_ostream.
/// \p OS is managed by the caller and should be open for writing as long as \p
/// Context is streaming remarks to it.
Error setupLLVMOptimizationRemarks(
    LLVMContext &Context, raw_ostream &OS, StringRef RemarksPasses,
    StringRef RemarksFormat, bool RemarksWithHotness,
    std::optional<uint64_t> RemarksHotnessThreshold = 0);

} // end namespace llvm

#endif // LLVM_IR_LLVMREMARKSTREAMER_H
PKjwFZsҥ-����IR/Instruction.hnu�[���//===-- llvm/Instruction.h - Instruction class definition -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the declaration of the Instruction class, which is the
// base class for all of the LLVM instructions.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_INSTRUCTION_H
#define LLVM_IR_INSTRUCTION_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Bitfields.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/ilist_node.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/SymbolTableListTraits.h"
#include "llvm/IR/User.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/AtomicOrdering.h"
#include <cstdint>
#include <utility>

namespace llvm {

class BasicBlock;
class FastMathFlags;
class MDNode;
class Module;
struct AAMDNodes;

template <> struct ilist_alloc_traits<Instruction> {
  static inline void deleteNode(Instruction *V);
};

class Instruction : public User,
                    public ilist_node_with_parent<Instruction, BasicBlock> {
  BasicBlock *Parent;
  DebugLoc DbgLoc;                         // 'dbg' Metadata cache.

  /// Relative order of this instruction in its parent basic block. Used for
  /// O(1) local dominance checks between instructions.
  mutable unsigned Order = 0;

protected:
  // The 15 first bits of `Value::SubclassData` are available for subclasses of
  // `Instruction` to use.
  using OpaqueField = Bitfield::Element<uint16_t, 0, 15>;

  // Template alias so that all Instruction storing alignment use the same
  // definiton.
  // Valid alignments are powers of two from 2^0 to 2^MaxAlignmentExponent =
  // 2^32. We store them as Log2(Alignment), so we need 6 bits to encode the 33
  // possible values.
  template <unsigned Offset>
  using AlignmentBitfieldElementT =
      typename Bitfield::Element<unsigned, Offset, 6,
                                 Value::MaxAlignmentExponent>;

  template <unsigned Offset>
  using BoolBitfieldElementT = typename Bitfield::Element<bool, Offset, 1>;

  template <unsigned Offset>
  using AtomicOrderingBitfieldElementT =
      typename Bitfield::Element<AtomicOrdering, Offset, 3,
                                 AtomicOrdering::LAST>;

private:
  // The last bit is used to store whether the instruction has metadata attached
  // or not.
  using HasMetadataField = Bitfield::Element<bool, 15, 1>;

protected:
  ~Instruction(); // Use deleteValue() to delete a generic Instruction.

public:
  Instruction(const Instruction &) = delete;
  Instruction &operator=(const Instruction &) = delete;

  /// Specialize the methods defined in Value, as we know that an instruction
  /// can only be used by other instructions.
  Instruction       *user_back()       { return cast<Instruction>(*user_begin());}
  const Instruction *user_back() const { return cast<Instruction>(*user_begin());}

  inline const BasicBlock *getParent() const { return Parent; }
  inline       BasicBlock *getParent()       { return Parent; }

  /// Return the module owning the function this instruction belongs to
  /// or nullptr it the function does not have a module.
  ///
  /// Note: this is undefined behavior if the instruction does not have a
  /// parent, or the parent basic block does not have a parent function.
  const Module *getModule() const;
  Module *getModule() {
    return const_cast<Module *>(
                           static_cast<const Instruction *>(this)->getModule());
  }

  /// Return the function this instruction belongs to.
  ///
  /// Note: it is undefined behavior to call this on an instruction not
  /// currently inserted into a function.
  const Function *getFunction() const;
  Function *getFunction() {
    return const_cast<Function *>(
                         static_cast<const Instruction *>(this)->getFunction());
  }

  /// This method unlinks 'this' from the containing basic block, but does not
  /// delete it.
  void removeFromParent();

  /// This method unlinks 'this' from the containing basic block and deletes it.
  ///
  /// \returns an iterator pointing to the element after the erased one
  SymbolTableList<Instruction>::iterator eraseFromParent();

  /// Insert an unlinked instruction into a basic block immediately before
  /// the specified instruction.
  void insertBefore(Instruction *InsertPos);

  /// Insert an unlinked instruction into a basic block immediately after the
  /// specified instruction.
  void insertAfter(Instruction *InsertPos);

  /// Inserts an unlinked instruction into \p ParentBB at position \p It and
  /// returns the iterator of the inserted instruction.
  SymbolTableList<Instruction>::iterator
  insertInto(BasicBlock *ParentBB, SymbolTableList<Instruction>::iterator It);

  /// Unlink this instruction from its current basic block and insert it into
  /// the basic block that MovePos lives in, right before MovePos.
  void moveBefore(Instruction *MovePos);

  /// Unlink this instruction and insert into BB before I.
  ///
  /// \pre I is a valid iterator into BB.
  void moveBefore(BasicBlock &BB, SymbolTableList<Instruction>::iterator I);

  /// Unlink this instruction from its current basic block and insert it into
  /// the basic block that MovePos lives in, right after MovePos.
  void moveAfter(Instruction *MovePos);

  /// Given an instruction Other in the same basic block as this instruction,
  /// return true if this instruction comes before Other. In this worst case,
  /// this takes linear time in the number of instructions in the block. The
  /// results are cached, so in common cases when the block remains unmodified,
  /// it takes constant time.
  bool comesBefore(const Instruction *Other) const;

  /// Get the first insertion point at which the result of this instruction
  /// is defined. This is *not* the directly following instruction in a number
  /// of cases, e.g. phi nodes or terminators that return values. This function
  /// may return null if the insertion after the definition is not possible,
  /// e.g. due to a catchswitch terminator.
  Instruction *getInsertionPointAfterDef();

  //===--------------------------------------------------------------------===//
  // Subclass classification.
  //===--------------------------------------------------------------------===//

  /// Returns a member of one of the enums like Instruction::Add.
  unsigned getOpcode() const { return getValueID() - InstructionVal; }

  const char *getOpcodeName() const { return getOpcodeName(getOpcode()); }
  bool isTerminator() const { return isTerminator(getOpcode()); }
  bool isUnaryOp() const { return isUnaryOp(getOpcode()); }
  bool isBinaryOp() const { return isBinaryOp(getOpcode()); }
  bool isIntDivRem() const { return isIntDivRem(getOpcode()); }
  bool isShift() const { return isShift(getOpcode()); }
  bool isCast() const { return isCast(getOpcode()); }
  bool isFuncletPad() const { return isFuncletPad(getOpcode()); }
  bool isExceptionalTerminator() const {
    return isExceptionalTerminator(getOpcode());
  }

  /// It checks if this instruction is the only user of at least one of
  /// its operands.
  bool isOnlyUserOfAnyOperand();

  static const char *getOpcodeName(unsigned Opcode);

  static inline bool isTerminator(unsigned Opcode) {
    return Opcode >= TermOpsBegin && Opcode < TermOpsEnd;
  }

  static inline bool isUnaryOp(unsigned Opcode) {
    return Opcode >= UnaryOpsBegin && Opcode < UnaryOpsEnd;
  }
  static inline bool isBinaryOp(unsigned Opcode) {
    return Opcode >= BinaryOpsBegin && Opcode < BinaryOpsEnd;
  }

  static inline bool isIntDivRem(unsigned Opcode) {
    return Opcode == UDiv || Opcode == SDiv || Opcode == URem || Opcode == SRem;
  }

  /// Determine if the Opcode is one of the shift instructions.
  static inline bool isShift(unsigned Opcode) {
    return Opcode >= Shl && Opcode <= AShr;
  }

  /// Return true if this is a logical shift left or a logical shift right.
  inline bool isLogicalShift() const {
    return getOpcode() == Shl || getOpcode() == LShr;
  }

  /// Return true if this is an arithmetic shift right.
  inline bool isArithmeticShift() const {
    return getOpcode() == AShr;
  }

  /// Determine if the Opcode is and/or/xor.
  static inline bool isBitwiseLogicOp(unsigned Opcode) {
    return Opcode == And || Opcode == Or || Opcode == Xor;
  }

  /// Return true if this is and/or/xor.
  inline bool isBitwiseLogicOp() const {
    return isBitwiseLogicOp(getOpcode());
  }

  /// Determine if the Opcode is one of the CastInst instructions.
  static inline bool isCast(unsigned Opcode) {
    return Opcode >= CastOpsBegin && Opcode < CastOpsEnd;
  }

  /// Determine if the Opcode is one of the FuncletPadInst instructions.
  static inline bool isFuncletPad(unsigned Opcode) {
    return Opcode >= FuncletPadOpsBegin && Opcode < FuncletPadOpsEnd;
  }

  /// Returns true if the Opcode is a terminator related to exception handling.
  static inline bool isExceptionalTerminator(unsigned Opcode) {
    switch (Opcode) {
    case Instruction::CatchSwitch:
    case Instruction::CatchRet:
    case Instruction::CleanupRet:
    case Instruction::Invoke:
    case Instruction::Resume:
      return true;
    default:
      return false;
    }
  }

  //===--------------------------------------------------------------------===//
  // Metadata manipulation.
  //===--------------------------------------------------------------------===//

  /// Return true if this instruction has any metadata attached to it.
  bool hasMetadata() const { return DbgLoc || Value::hasMetadata(); }

  /// Return true if this instruction has metadata attached to it other than a
  /// debug location.
  bool hasMetadataOtherThanDebugLoc() const { return Value::hasMetadata(); }

  /// Return true if this instruction has the given type of metadata attached.
  bool hasMetadata(unsigned KindID) const {
    return getMetadata(KindID) != nullptr;
  }

  /// Return true if this instruction has the given type of metadata attached.
  bool hasMetadata(StringRef Kind) const {
    return getMetadata(Kind) != nullptr;
  }

  /// Get the metadata of given kind attached to this Instruction.
  /// If the metadata is not found then return null.
  MDNode *getMetadata(unsigned KindID) const {
    if (!hasMetadata()) return nullptr;
    return getMetadataImpl(KindID);
  }

  /// Get the metadata of given kind attached to this Instruction.
  /// If the metadata is not found then return null.
  MDNode *getMetadata(StringRef Kind) const {
    if (!hasMetadata()) return nullptr;
    return getMetadataImpl(Kind);
  }

  /// Get all metadata attached to this Instruction. The first element of each
  /// pair returned is the KindID, the second element is the metadata value.
  /// This list is returned sorted by the KindID.
  void
  getAllMetadata(SmallVectorImpl<std::pair<unsigned, MDNode *>> &MDs) const {
    if (hasMetadata())
      getAllMetadataImpl(MDs);
  }

  /// This does the same thing as getAllMetadata, except that it filters out the
  /// debug location.
  void getAllMetadataOtherThanDebugLoc(
      SmallVectorImpl<std::pair<unsigned, MDNode *>> &MDs) const {
    Value::getAllMetadata(MDs);
  }

  /// Set the metadata of the specified kind to the specified node. This updates
  /// or replaces metadata if already present, or removes it if Node is null.
  void setMetadata(unsigned KindID, MDNode *Node);
  void setMetadata(StringRef Kind, MDNode *Node);

  /// Copy metadata from \p SrcInst to this instruction. \p WL, if not empty,
  /// specifies the list of meta data that needs to be copied. If \p WL is
  /// empty, all meta data will be copied.
  void copyMetadata(const Instruction &SrcInst,
                    ArrayRef<unsigned> WL = ArrayRef<unsigned>());

  /// If the instruction has "branch_weights" MD_prof metadata and the MDNode
  /// has three operands (including name string), swap the order of the
  /// metadata.
  void swapProfMetadata();

  /// Drop all unknown metadata except for debug locations.
  /// @{
  /// Passes are required to drop metadata they don't understand. This is a
  /// convenience method for passes to do so.
  /// dropUBImplyingAttrsAndUnknownMetadata should be used instead of
  /// this API if the Instruction being modified is a call.
  void dropUnknownNonDebugMetadata(ArrayRef<unsigned> KnownIDs);
  void dropUnknownNonDebugMetadata() {
    return dropUnknownNonDebugMetadata(std::nullopt);
  }
  void dropUnknownNonDebugMetadata(unsigned ID1) {
    return dropUnknownNonDebugMetadata(ArrayRef(ID1));
  }
  void dropUnknownNonDebugMetadata(unsigned ID1, unsigned ID2) {
    unsigned IDs[] = {ID1, ID2};
    return dropUnknownNonDebugMetadata(IDs);
  }
  /// @}

  /// Adds an !annotation metadata node with \p Annotation to this instruction.
  /// If this instruction already has !annotation metadata, append \p Annotation
  /// to the existing node.
  void addAnnotationMetadata(StringRef Annotation);
  /// Adds an !annotation metadata node with an array of \p Annotations
  /// as a tuple to this instruction. If this instruction already has
  /// !annotation metadata, append the tuple to
  /// the existing node.
  void addAnnotationMetadata(SmallVector<StringRef> Annotations);
  /// Returns the AA metadata for this instruction.
  AAMDNodes getAAMetadata() const;

  /// Sets the AA metadata on this instruction from the AAMDNodes structure.
  void setAAMetadata(const AAMDNodes &N);

  /// Sets the nosanitize metadata on this instruction.
  void setNoSanitizeMetadata();

  /// Retrieve total raw weight values of a branch.
  /// Returns true on success with profile total weights filled in.
  /// Returns false if no metadata was found.
  bool extractProfTotalWeight(uint64_t &TotalVal) const;

  /// Set the debug location information for this instruction.
  void setDebugLoc(DebugLoc Loc) { DbgLoc = std::move(Loc); }

  /// Return the debug location for this node as a DebugLoc.
  const DebugLoc &getDebugLoc() const { return DbgLoc; }

  /// Set or clear the nuw flag on this instruction, which must be an operator
  /// which supports this flag. See LangRef.html for the meaning of this flag.
  void setHasNoUnsignedWrap(bool b = true);

  /// Set or clear the nsw flag on this instruction, which must be an operator
  /// which supports this flag. See LangRef.html for the meaning of this flag.
  void setHasNoSignedWrap(bool b = true);

  /// Set or clear the exact flag on this instruction, which must be an operator
  /// which supports this flag. See LangRef.html for the meaning of this flag.
  void setIsExact(bool b = true);

  /// Determine whether the no unsigned wrap flag is set.
  bool hasNoUnsignedWrap() const LLVM_READONLY;

  /// Determine whether the no signed wrap flag is set.
  bool hasNoSignedWrap() const LLVM_READONLY;

  /// Return true if this operator has flags which may cause this instruction
  /// to evaluate to poison despite having non-poison inputs.
  bool hasPoisonGeneratingFlags() const LLVM_READONLY;

  /// Drops flags that may cause this instruction to evaluate to poison despite
  /// having non-poison inputs.
  void dropPoisonGeneratingFlags();

  /// Return true if this instruction has poison-generating metadata.
  bool hasPoisonGeneratingMetadata() const LLVM_READONLY;

  /// Drops metadata that may generate poison.
  void dropPoisonGeneratingMetadata();

  /// Return true if this instruction has poison-generating flags or metadata.
  bool hasPoisonGeneratingFlagsOrMetadata() const {
    return hasPoisonGeneratingFlags() || hasPoisonGeneratingMetadata();
  }

  /// Drops flags and metadata that may generate poison.
  void dropPoisonGeneratingFlagsAndMetadata() {
    dropPoisonGeneratingFlags();
    dropPoisonGeneratingMetadata();
  }

  /// This function drops non-debug unknown metadata (through
  /// dropUnknownNonDebugMetadata). For calls, it also drops parameter and
  /// return attributes that can cause undefined behaviour. Both of these should
  /// be done by passes which move instructions in IR.
  void dropUBImplyingAttrsAndUnknownMetadata(ArrayRef<unsigned> KnownIDs = {});

  /// Drop any attributes or metadata that can cause immediate undefined
  /// behavior. Retain other attributes/metadata on a best-effort basis.
  /// This should be used when speculating instructions.
  void dropUBImplyingAttrsAndMetadata();

  /// Determine whether the exact flag is set.
  bool isExact() const LLVM_READONLY;

  /// Set or clear all fast-math-flags on this instruction, which must be an
  /// operator which supports this flag. See LangRef.html for the meaning of
  /// this flag.
  void setFast(bool B);

  /// Set or clear the reassociation flag on this instruction, which must be
  /// an operator which supports this flag. See LangRef.html for the meaning of
  /// this flag.
  void setHasAllowReassoc(bool B);

  /// Set or clear the no-nans flag on this instruction, which must be an
  /// operator which supports this flag. See LangRef.html for the meaning of
  /// this flag.
  void setHasNoNaNs(bool B);

  /// Set or clear the no-infs flag on this instruction, which must be an
  /// operator which supports this flag. See LangRef.html for the meaning of
  /// this flag.
  void setHasNoInfs(bool B);

  /// Set or clear the no-signed-zeros flag on this instruction, which must be
  /// an operator which supports this flag. See LangRef.html for the meaning of
  /// this flag.
  void setHasNoSignedZeros(bool B);

  /// Set or clear the allow-reciprocal flag on this instruction, which must be
  /// an operator which supports this flag. See LangRef.html for the meaning of
  /// this flag.
  void setHasAllowReciprocal(bool B);

  /// Set or clear the allow-contract flag on this instruction, which must be
  /// an operator which supports this flag. See LangRef.html for the meaning of
  /// this flag.
  void setHasAllowContract(bool B);

  /// Set or clear the approximate-math-functions flag on this instruction,
  /// which must be an operator which supports this flag. See LangRef.html for
  /// the meaning of this flag.
  void setHasApproxFunc(bool B);

  /// Convenience function for setting multiple fast-math flags on this
  /// instruction, which must be an operator which supports these flags. See
  /// LangRef.html for the meaning of these flags.
  void setFastMathFlags(FastMathFlags FMF);

  /// Convenience function for transferring all fast-math flag values to this
  /// instruction, which must be an operator which supports these flags. See
  /// LangRef.html for the meaning of these flags.
  void copyFastMathFlags(FastMathFlags FMF);

  /// Determine whether all fast-math-flags are set.
  bool isFast() const LLVM_READONLY;

  /// Determine whether the allow-reassociation flag is set.
  bool hasAllowReassoc() const LLVM_READONLY;

  /// Determine whether the no-NaNs flag is set.
  bool hasNoNaNs() const LLVM_READONLY;

  /// Determine whether the no-infs flag is set.
  bool hasNoInfs() const LLVM_READONLY;

  /// Determine whether the no-signed-zeros flag is set.
  bool hasNoSignedZeros() const LLVM_READONLY;

  /// Determine whether the allow-reciprocal flag is set.
  bool hasAllowReciprocal() const LLVM_READONLY;

  /// Determine whether the allow-contract flag is set.
  bool hasAllowContract() const LLVM_READONLY;

  /// Determine whether the approximate-math-functions flag is set.
  bool hasApproxFunc() const LLVM_READONLY;

  /// Convenience function for getting all the fast-math flags, which must be an
  /// operator which supports these flags. See LangRef.html for the meaning of
  /// these flags.
  FastMathFlags getFastMathFlags() const LLVM_READONLY;

  /// Copy I's fast-math flags
  void copyFastMathFlags(const Instruction *I);

  /// Convenience method to copy supported exact, fast-math, and (optionally)
  /// wrapping flags from V to this instruction.
  void copyIRFlags(const Value *V, bool IncludeWrapFlags = true);

  /// Logical 'and' of any supported wrapping, exact, and fast-math flags of
  /// V and this instruction.
  void andIRFlags(const Value *V);

  /// Merge 2 debug locations and apply it to the Instruction. If the
  /// instruction is a CallIns, we need to traverse the inline chain to find
  /// the common scope. This is not efficient for N-way merging as each time
  /// you merge 2 iterations, you need to rebuild the hashmap to find the
  /// common scope. However, we still choose this API because:
  ///  1) Simplicity: it takes 2 locations instead of a list of locations.
  ///  2) In worst case, it increases the complexity from O(N*I) to
  ///     O(2*N*I), where N is # of Instructions to merge, and I is the
  ///     maximum level of inline stack. So it is still linear.
  ///  3) Merging of call instructions should be extremely rare in real
  ///     applications, thus the N-way merging should be in code path.
  /// The DebugLoc attached to this instruction will be overwritten by the
  /// merged DebugLoc.
  void applyMergedLocation(DILocation *LocA, DILocation *LocB);

  /// Updates the debug location given that the instruction has been hoisted
  /// from a block to a predecessor of that block.
  /// Note: it is undefined behavior to call this on an instruction not
  /// currently inserted into a function.
  void updateLocationAfterHoist();

  /// Drop the instruction's debug location. This does not guarantee removal
  /// of the !dbg source location attachment, as it must set a line 0 location
  /// with scope information attached on call instructions. To guarantee
  /// removal of the !dbg attachment, use the \ref setDebugLoc() API.
  /// Note: it is undefined behavior to call this on an instruction not
  /// currently inserted into a function.
  void dropLocation();

  /// Merge the DIAssignID metadata from this instruction and those attached to
  /// instructions in \p SourceInstructions. This process performs a RAUW on
  /// the MetadataAsValue uses of the merged DIAssignID nodes. Not every
  /// instruction in \p SourceInstructions needs to have DIAssignID
  /// metadata. If none of them do then nothing happens. If this instruction
  /// does not have a DIAssignID attachment but at least one in \p
  /// SourceInstructions does then the merged one will be attached to
  /// it. However, instructions without attachments in \p SourceInstructions
  /// are not modified.
  void mergeDIAssignID(ArrayRef<const Instruction *> SourceInstructions);

private:
  // These are all implemented in Metadata.cpp.
  MDNode *getMetadataImpl(unsigned KindID) const;
  MDNode *getMetadataImpl(StringRef Kind) const;
  void
  getAllMetadataImpl(SmallVectorImpl<std::pair<unsigned, MDNode *>> &) const;

  /// Update the LLVMContext ID-to-Instruction(s) mapping. If \p ID is nullptr
  /// then clear the mapping for this instruction.
  void updateDIAssignIDMapping(DIAssignID *ID);

public:
  //===--------------------------------------------------------------------===//
  // Predicates and helper methods.
  //===--------------------------------------------------------------------===//

  /// Return true if the instruction is associative:
  ///
  ///   Associative operators satisfy:  x op (y op z) === (x op y) op z
  ///
  /// In LLVM, the Add, Mul, And, Or, and Xor operators are associative.
  ///
  bool isAssociative() const LLVM_READONLY;
  static bool isAssociative(unsigned Opcode) {
    return Opcode == And || Opcode == Or || Opcode == Xor ||
           Opcode == Add || Opcode == Mul;
  }

  /// Return true if the instruction is commutative:
  ///
  ///   Commutative operators satisfy: (x op y) === (y op x)
  ///
  /// In LLVM, these are the commutative operators, plus SetEQ and SetNE, when
  /// applied to any type.
  ///
  bool isCommutative() const LLVM_READONLY;
  static bool isCommutative(unsigned Opcode) {
    switch (Opcode) {
    case Add: case FAdd:
    case Mul: case FMul:
    case And: case Or: case Xor:
      return true;
    default:
      return false;
  }
  }

  /// Return true if the instruction is idempotent:
  ///
  ///   Idempotent operators satisfy:  x op x === x
  ///
  /// In LLVM, the And and Or operators are idempotent.
  ///
  bool isIdempotent() const { return isIdempotent(getOpcode()); }
  static bool isIdempotent(unsigned Opcode) {
    return Opcode == And || Opcode == Or;
  }

  /// Return true if the instruction is nilpotent:
  ///
  ///   Nilpotent operators satisfy:  x op x === Id,
  ///
  ///   where Id is the identity for the operator, i.e. a constant such that
  ///     x op Id === x and Id op x === x for all x.
  ///
  /// In LLVM, the Xor operator is nilpotent.
  ///
  bool isNilpotent() const { return isNilpotent(getOpcode()); }
  static bool isNilpotent(unsigned Opcode) {
    return Opcode == Xor;
  }

  /// Return true if this instruction may modify memory.
  bool mayWriteToMemory() const LLVM_READONLY;

  /// Return true if this instruction may read memory.
  bool mayReadFromMemory() const LLVM_READONLY;

  /// Return true if this instruction may read or write memory.
  bool mayReadOrWriteMemory() const {
    return mayReadFromMemory() || mayWriteToMemory();
  }

  /// Return true if this instruction has an AtomicOrdering of unordered or
  /// higher.
  bool isAtomic() const LLVM_READONLY;

  /// Return true if this atomic instruction loads from memory.
  bool hasAtomicLoad() const LLVM_READONLY;

  /// Return true if this atomic instruction stores to memory.
  bool hasAtomicStore() const LLVM_READONLY;

  /// Return true if this instruction has a volatile memory access.
  bool isVolatile() const LLVM_READONLY;

  /// Return the type this instruction accesses in memory, if any.
  Type *getAccessType() const LLVM_READONLY;

  /// Return true if this instruction may throw an exception.
  ///
  /// If IncludePhaseOneUnwind is set, this will also include cases where
  /// phase one unwinding may unwind past this frame due to skipping of
  /// cleanup landingpads.
  bool mayThrow(bool IncludePhaseOneUnwind = false) const LLVM_READONLY;

  /// Return true if this instruction behaves like a memory fence: it can load
  /// or store to memory location without being given a memory location.
  bool isFenceLike() const {
    switch (getOpcode()) {
    default:
      return false;
    // This list should be kept in sync with the list in mayWriteToMemory for
    // all opcodes which don't have a memory location.
    case Instruction::Fence:
    case Instruction::CatchPad:
    case Instruction::CatchRet:
    case Instruction::Call:
    case Instruction::Invoke:
      return true;
    }
  }

  /// Return true if the instruction may have side effects.
  ///
  /// Side effects are:
  ///  * Writing to memory.
  ///  * Unwinding.
  ///  * Not returning (e.g. an infinite loop).
  ///
  /// Note that this does not consider malloc and alloca to have side
  /// effects because the newly allocated memory is completely invisible to
  /// instructions which don't use the returned value.  For cases where this
  /// matters, isSafeToSpeculativelyExecute may be more appropriate.
  bool mayHaveSideEffects() const LLVM_READONLY;

  /// Return true if the instruction can be removed if the result is unused.
  ///
  /// When constant folding some instructions cannot be removed even if their
  /// results are unused. Specifically terminator instructions and calls that
  /// may have side effects cannot be removed without semantically changing the
  /// generated program.
  bool isSafeToRemove() const LLVM_READONLY;

  /// Return true if the instruction will return (unwinding is considered as
  /// a form of returning control flow here).
  bool willReturn() const LLVM_READONLY;

  /// Return true if the instruction is a variety of EH-block.
  bool isEHPad() const {
    switch (getOpcode()) {
    case Instruction::CatchSwitch:
    case Instruction::CatchPad:
    case Instruction::CleanupPad:
    case Instruction::LandingPad:
      return true;
    default:
      return false;
    }
  }

  /// Return true if the instruction is a llvm.lifetime.start or
  /// llvm.lifetime.end marker.
  bool isLifetimeStartOrEnd() const LLVM_READONLY;

  /// Return true if the instruction is a llvm.launder.invariant.group or
  /// llvm.strip.invariant.group.
  bool isLaunderOrStripInvariantGroup() const LLVM_READONLY;

  /// Return true if the instruction is a DbgInfoIntrinsic or PseudoProbeInst.
  bool isDebugOrPseudoInst() const LLVM_READONLY;

  /// Return a pointer to the next non-debug instruction in the same basic
  /// block as 'this', or nullptr if no such instruction exists. Skip any pseudo
  /// operations if \c SkipPseudoOp is true.
  const Instruction *
  getNextNonDebugInstruction(bool SkipPseudoOp = false) const;
  Instruction *getNextNonDebugInstruction(bool SkipPseudoOp = false) {
    return const_cast<Instruction *>(
        static_cast<const Instruction *>(this)->getNextNonDebugInstruction(
            SkipPseudoOp));
  }

  /// Return a pointer to the previous non-debug instruction in the same basic
  /// block as 'this', or nullptr if no such instruction exists. Skip any pseudo
  /// operations if \c SkipPseudoOp is true.
  const Instruction *
  getPrevNonDebugInstruction(bool SkipPseudoOp = false) const;
  Instruction *getPrevNonDebugInstruction(bool SkipPseudoOp = false) {
    return const_cast<Instruction *>(
        static_cast<const Instruction *>(this)->getPrevNonDebugInstruction(
            SkipPseudoOp));
  }

  /// Create a copy of 'this' instruction that is identical in all ways except
  /// the following:
  ///   * The instruction has no parent
  ///   * The instruction has no name
  ///
  Instruction *clone() const;

  /// Return true if the specified instruction is exactly identical to the
  /// current one. This means that all operands match and any extra information
  /// (e.g. load is volatile) agree.
  bool isIdenticalTo(const Instruction *I) const LLVM_READONLY;

  /// This is like isIdenticalTo, except that it ignores the
  /// SubclassOptionalData flags, which may specify conditions under which the
  /// instruction's result is undefined.
  bool isIdenticalToWhenDefined(const Instruction *I) const LLVM_READONLY;

  /// When checking for operation equivalence (using isSameOperationAs) it is
  /// sometimes useful to ignore certain attributes.
  enum OperationEquivalenceFlags {
    /// Check for equivalence ignoring load/store alignment.
    CompareIgnoringAlignment = 1<<0,
    /// Check for equivalence treating a type and a vector of that type
    /// as equivalent.
    CompareUsingScalarTypes = 1<<1
  };

  /// This function determines if the specified instruction executes the same
  /// operation as the current one. This means that the opcodes, type, operand
  /// types and any other factors affecting the operation must be the same. This
  /// is similar to isIdenticalTo except the operands themselves don't have to
  /// be identical.
  /// @returns true if the specified instruction is the same operation as
  /// the current one.
  /// Determine if one instruction is the same operation as another.
  bool isSameOperationAs(const Instruction *I, unsigned flags = 0) const LLVM_READONLY;

  /// This function determines if the speficied instruction has the same
  /// "special" characteristics as the current one. This means that opcode
  /// specific details are the same. As a common example, if we are comparing
  /// loads, then hasSameSpecialState would compare the alignments (among
  /// other things).
  /// @returns true if the specific instruction has the same opcde specific
  /// characteristics as the current one. Determine if one instruction has the
  /// same state as another.
  bool hasSameSpecialState(const Instruction *I2,
                           bool IgnoreAlignment = false) const LLVM_READONLY;

  /// Return true if there are any uses of this instruction in blocks other than
  /// the specified block. Note that PHI nodes are considered to evaluate their
  /// operands in the corresponding predecessor block.
  bool isUsedOutsideOfBlock(const BasicBlock *BB) const LLVM_READONLY;

  /// Return the number of successors that this instruction has. The instruction
  /// must be a terminator.
  unsigned getNumSuccessors() const LLVM_READONLY;

  /// Return the specified successor. This instruction must be a terminator.
  BasicBlock *getSuccessor(unsigned Idx) const LLVM_READONLY;

  /// Update the specified successor to point at the provided block. This
  /// instruction must be a terminator.
  void setSuccessor(unsigned Idx, BasicBlock *BB);

  /// Replace specified successor OldBB to point at the provided block.
  /// This instruction must be a terminator.
  void replaceSuccessorWith(BasicBlock *OldBB, BasicBlock *NewBB);

  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Value *V) {
    return V->getValueID() >= Value::InstructionVal;
  }

  //----------------------------------------------------------------------
  // Exported enumerations.
  //
  enum TermOps {       // These terminate basic blocks
#define  FIRST_TERM_INST(N)             TermOpsBegin = N,
#define HANDLE_TERM_INST(N, OPC, CLASS) OPC = N,
#define   LAST_TERM_INST(N)             TermOpsEnd = N+1
#include "llvm/IR/Instruction.def"
  };

  enum UnaryOps {
#define  FIRST_UNARY_INST(N)             UnaryOpsBegin = N,
#define HANDLE_UNARY_INST(N, OPC, CLASS) OPC = N,
#define   LAST_UNARY_INST(N)             UnaryOpsEnd = N+1
#include "llvm/IR/Instruction.def"
  };

  enum BinaryOps {
#define  FIRST_BINARY_INST(N)             BinaryOpsBegin = N,
#define HANDLE_BINARY_INST(N, OPC, CLASS) OPC = N,
#define   LAST_BINARY_INST(N)             BinaryOpsEnd = N+1
#include "llvm/IR/Instruction.def"
  };

  enum MemoryOps {
#define  FIRST_MEMORY_INST(N)             MemoryOpsBegin = N,
#define HANDLE_MEMORY_INST(N, OPC, CLASS) OPC = N,
#define   LAST_MEMORY_INST(N)             MemoryOpsEnd = N+1
#include "llvm/IR/Instruction.def"
  };

  enum CastOps {
#define  FIRST_CAST_INST(N)             CastOpsBegin = N,
#define HANDLE_CAST_INST(N, OPC, CLASS) OPC = N,
#define   LAST_CAST_INST(N)             CastOpsEnd = N+1
#include "llvm/IR/Instruction.def"
  };

  enum FuncletPadOps {
#define  FIRST_FUNCLETPAD_INST(N)             FuncletPadOpsBegin = N,
#define HANDLE_FUNCLETPAD_INST(N, OPC, CLASS) OPC = N,
#define   LAST_FUNCLETPAD_INST(N)             FuncletPadOpsEnd = N+1
#include "llvm/IR/Instruction.def"
  };

  enum OtherOps {
#define  FIRST_OTHER_INST(N)             OtherOpsBegin = N,
#define HANDLE_OTHER_INST(N, OPC, CLASS) OPC = N,
#define   LAST_OTHER_INST(N)             OtherOpsEnd = N+1
#include "llvm/IR/Instruction.def"
  };

private:
  friend class SymbolTableListTraits<Instruction>;
  friend class BasicBlock; // For renumbering.

  // Shadow Value::setValueSubclassData with a private forwarding method so that
  // subclasses cannot accidentally use it.
  void setValueSubclassData(unsigned short D) {
    Value::setValueSubclassData(D);
  }

  unsigned short getSubclassDataFromValue() const {
    return Value::getSubclassDataFromValue();
  }

  void setParent(BasicBlock *P);

protected:
  // Instruction subclasses can stick up to 15 bits of stuff into the
  // SubclassData field of instruction with these members.

  template <typename BitfieldElement>
  typename BitfieldElement::Type getSubclassData() const {
    static_assert(
        std::is_same<BitfieldElement, HasMetadataField>::value ||
            !Bitfield::isOverlapping<BitfieldElement, HasMetadataField>(),
        "Must not overlap with the metadata bit");
    return Bitfield::get<BitfieldElement>(getSubclassDataFromValue());
  }

  template <typename BitfieldElement>
  void setSubclassData(typename BitfieldElement::Type Value) {
    static_assert(
        std::is_same<BitfieldElement, HasMetadataField>::value ||
            !Bitfield::isOverlapping<BitfieldElement, HasMetadataField>(),
        "Must not overlap with the metadata bit");
    auto Storage = getSubclassDataFromValue();
    Bitfield::set<BitfieldElement>(Storage, Value);
    setValueSubclassData(Storage);
  }

  Instruction(Type *Ty, unsigned iType, Use *Ops, unsigned NumOps,
              Instruction *InsertBefore = nullptr);
  Instruction(Type *Ty, unsigned iType, Use *Ops, unsigned NumOps,
              BasicBlock *InsertAtEnd);

private:
  /// Create a copy of this instruction.
  Instruction *cloneImpl() const;
};

inline void ilist_alloc_traits<Instruction>::deleteNode(Instruction *V) {
  V->deleteValue();
}

} // end namespace llvm

#endif // LLVM_IR_INSTRUCTION_H
PKjwFZ��ܯ��IR/SafepointIRVerifier.hnu�[���//===- SafepointIRVerifier.h - Checks for GC relocation problems *- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines a verifier which is useful for enforcing the relocation
// properties required by a relocating GC.  Specifically, it looks for uses of
// the unrelocated value of pointer SSA values after a possible safepoint. It
// attempts to report no false negatives, but may end up reporting false
// positives in rare cases (see the note at the top of the corresponding cpp
// file.)
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_SAFEPOINTIRVERIFIER_H
#define LLVM_IR_SAFEPOINTIRVERIFIER_H

#include "llvm/IR/PassManager.h"

namespace llvm {

class Function;
class FunctionPass;

/// Run the safepoint verifier over a single function.  Crashes on failure.
void verifySafepointIR(Function &F);

/// Create an instance of the safepoint verifier pass which can be added to
/// a pass pipeline to check for relocation bugs.
FunctionPass *createSafepointIRVerifierPass();

/// Create an instance of the safepoint verifier pass which can be added to
/// a pass pipeline to check for relocation bugs.
class SafepointIRVerifierPass : public PassInfoMixin<SafepointIRVerifierPass> {

public:
  explicit SafepointIRVerifierPass() = default;

  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
}

#endif // LLVM_IR_SAFEPOINTIRVERIFIER_H
PKjwFZL�e��IR/IntrinsicsAMDGPU.tdnu�[���//===- IntrinsicsAMDGPU.td - Defines AMDGPU intrinsics -----*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines all of the R600-specific intrinsics.
//
//===----------------------------------------------------------------------===//

class AMDGPUReadPreloadRegisterIntrinsic
  : DefaultAttrsIntrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable]>;

class AMDGPUReadPreloadRegisterIntrinsicNamed<string name>
  : DefaultAttrsIntrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable]>, ClangBuiltin<name>;

// Used to tag image and resource intrinsics with information used to generate
// mem operands.
class AMDGPURsrcIntrinsic<int rsrcarg, bit isimage = false> {
  int RsrcArg = rsrcarg;
  bit IsImage = isimage;
}

let TargetPrefix = "r600" in {

multiclass AMDGPUReadPreloadRegisterIntrinsic_xyz {
  def _x : AMDGPUReadPreloadRegisterIntrinsic;
  def _y : AMDGPUReadPreloadRegisterIntrinsic;
  def _z : AMDGPUReadPreloadRegisterIntrinsic;
}

multiclass AMDGPUReadPreloadRegisterIntrinsic_xyz_named<string prefix> {
  def _x : AMDGPUReadPreloadRegisterIntrinsicNamed<!strconcat(prefix, "_x")>;
  def _y : AMDGPUReadPreloadRegisterIntrinsicNamed<!strconcat(prefix, "_y")>;
  def _z : AMDGPUReadPreloadRegisterIntrinsicNamed<!strconcat(prefix, "_z")>;
}

defm int_r600_read_global_size : AMDGPUReadPreloadRegisterIntrinsic_xyz_named
                                 <"__builtin_r600_read_global_size">;
defm int_r600_read_ngroups : AMDGPUReadPreloadRegisterIntrinsic_xyz_named
                             <"__builtin_r600_read_ngroups">;
defm int_r600_read_tgid : AMDGPUReadPreloadRegisterIntrinsic_xyz_named
                          <"__builtin_r600_read_tgid">;

defm int_r600_read_local_size : AMDGPUReadPreloadRegisterIntrinsic_xyz;
defm int_r600_read_tidig : AMDGPUReadPreloadRegisterIntrinsic_xyz;

def int_r600_group_barrier : ClangBuiltin<"__builtin_r600_group_barrier">,
  Intrinsic<[], [], [IntrConvergent, IntrWillReturn]>;

// AS 7 is PARAM_I_ADDRESS, used for kernel arguments
def int_r600_implicitarg_ptr :
  ClangBuiltin<"__builtin_r600_implicitarg_ptr">,
  DefaultAttrsIntrinsic<[LLVMQualPointerType<7>], [],
  [IntrNoMem, IntrSpeculatable]>;

def int_r600_rat_store_typed :
  // 1st parameter: Data
  // 2nd parameter: Index
  // 3rd parameter: Constant RAT ID
  DefaultAttrsIntrinsic<[], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_i32_ty], []>,
  ClangBuiltin<"__builtin_r600_rat_store_typed">;

def int_r600_recipsqrt_ieee :  DefaultAttrsIntrinsic<
  [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
>;

def int_r600_recipsqrt_clamped : DefaultAttrsIntrinsic<
  [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
>;

def int_r600_cube : DefaultAttrsIntrinsic<
  [llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem, IntrSpeculatable]
>;

def int_r600_store_stream_output : DefaultAttrsIntrinsic<
  [], [llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []
>;

class TextureIntrinsicFloatInput : DefaultAttrsIntrinsic<[llvm_v4f32_ty], [
  llvm_v4f32_ty, // Coord
  llvm_i32_ty,   // offset_x
  llvm_i32_ty,   // offset_y,
  llvm_i32_ty,   // offset_z,
  llvm_i32_ty,   // resource_id
  llvm_i32_ty,   // samplerid
  llvm_i32_ty,   // coord_type_x
  llvm_i32_ty,   // coord_type_y
  llvm_i32_ty,   // coord_type_z
  llvm_i32_ty],  // coord_type_w
  [IntrNoMem]
>;

class TextureIntrinsicInt32Input : DefaultAttrsIntrinsic<[llvm_v4i32_ty], [
    llvm_v4i32_ty, // Coord
    llvm_i32_ty,   // offset_x
    llvm_i32_ty,   // offset_y,
    llvm_i32_ty,   // offset_z,
    llvm_i32_ty,   // resource_id
    llvm_i32_ty,   // samplerid
    llvm_i32_ty,   // coord_type_x
    llvm_i32_ty,   // coord_type_y
    llvm_i32_ty,   // coord_type_z
    llvm_i32_ty],  // coord_type_w
    [IntrNoMem]
>;

def int_r600_store_swizzle :
  Intrinsic<[], [llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty], [IntrWillReturn, IntrNoCallback, IntrNoFree]
>;

def int_r600_tex : TextureIntrinsicFloatInput;
def int_r600_texc : TextureIntrinsicFloatInput;
def int_r600_txl : TextureIntrinsicFloatInput;
def int_r600_txlc : TextureIntrinsicFloatInput;
def int_r600_txb : TextureIntrinsicFloatInput;
def int_r600_txbc : TextureIntrinsicFloatInput;
def int_r600_txf : TextureIntrinsicInt32Input;
def int_r600_txq : TextureIntrinsicInt32Input;
def int_r600_ddx : TextureIntrinsicFloatInput;
def int_r600_ddy : TextureIntrinsicFloatInput;

def int_r600_dot4 : DefaultAttrsIntrinsic<[llvm_float_ty],
  [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem, IntrSpeculatable]
>;

def int_r600_kill : DefaultAttrsIntrinsic<[], [llvm_float_ty], []>;

} // End TargetPrefix = "r600"

let TargetPrefix = "amdgcn" in {

//===----------------------------------------------------------------------===//
// ABI Special Intrinsics
//===----------------------------------------------------------------------===//

defm int_amdgcn_workitem_id : AMDGPUReadPreloadRegisterIntrinsic_xyz;
defm int_amdgcn_workgroup_id : AMDGPUReadPreloadRegisterIntrinsic_xyz_named
                               <"__builtin_amdgcn_workgroup_id">;

def int_amdgcn_dispatch_ptr :
  DefaultAttrsIntrinsic<[LLVMQualPointerType<4>], [],
  [Align<RetIndex, 4>, IntrNoMem, IntrSpeculatable]>;

def int_amdgcn_queue_ptr :
  ClangBuiltin<"__builtin_amdgcn_queue_ptr">,
  DefaultAttrsIntrinsic<[LLVMQualPointerType<4>], [],
  [Align<RetIndex, 4>, IntrNoMem, IntrSpeculatable]>;

def int_amdgcn_kernarg_segment_ptr :
  ClangBuiltin<"__builtin_amdgcn_kernarg_segment_ptr">,
  DefaultAttrsIntrinsic<[LLVMQualPointerType<4>], [],
  [Align<RetIndex, 4>, IntrNoMem, IntrSpeculatable]>;

def int_amdgcn_implicitarg_ptr :
  ClangBuiltin<"__builtin_amdgcn_implicitarg_ptr">,
  DefaultAttrsIntrinsic<[LLVMQualPointerType<4>], [],
  [Align<RetIndex, 4>, IntrNoMem, IntrSpeculatable]>;

def int_amdgcn_groupstaticsize :
  ClangBuiltin<"__builtin_amdgcn_groupstaticsize">,
  DefaultAttrsIntrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable]>;

def int_amdgcn_dispatch_id :
  ClangBuiltin<"__builtin_amdgcn_dispatch_id">,
  DefaultAttrsIntrinsic<[llvm_i64_ty], [], [IntrNoMem, IntrSpeculatable]>;

// For internal use. Coordinates LDS lowering between IR transform and backend.
def int_amdgcn_lds_kernel_id :
  DefaultAttrsIntrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable]>;

def int_amdgcn_implicit_buffer_ptr :
  ClangBuiltin<"__builtin_amdgcn_implicit_buffer_ptr">,
  DefaultAttrsIntrinsic<[LLVMQualPointerType<4>], [],
  [Align<RetIndex, 4>, IntrNoMem, IntrSpeculatable]>;

// Set EXEC to the 64-bit value given.
// This is always moved to the beginning of the basic block.
// FIXME: Should be mangled for wave size.
def int_amdgcn_init_exec : Intrinsic<[],
  [llvm_i64_ty],      // 64-bit literal constant
  [IntrConvergent, IntrNoMem, IntrHasSideEffects, IntrNoCallback,
   IntrNoFree, IntrWillReturn, ImmArg<ArgIndex<0>>]>;

// Set EXEC according to a thread count packed in an SGPR input:
//    thread_count = (input >> bitoffset) & 0x7f;
// This is always moved to the beginning of the basic block.
// Note: only inreg arguments to the parent function are valid as
// inputs to this intrinsic, computed values cannot be used.
def int_amdgcn_init_exec_from_input : Intrinsic<[],
  [llvm_i32_ty,       // 32-bit SGPR input
   llvm_i32_ty],      // bit offset of the thread count
  [IntrConvergent, IntrHasSideEffects, IntrNoMem, IntrNoCallback,
   IntrNoFree, IntrWillReturn, ImmArg<ArgIndex<1>>]>;

def int_amdgcn_wavefrontsize :
  ClangBuiltin<"__builtin_amdgcn_wavefrontsize">,
  DefaultAttrsIntrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable]>;


//===----------------------------------------------------------------------===//
// Instruction Intrinsics
//===----------------------------------------------------------------------===//

// The first parameter is s_sendmsg immediate (i16),
// the second one is copied to m0
def int_amdgcn_s_sendmsg : ClangBuiltin<"__builtin_amdgcn_s_sendmsg">,
  Intrinsic <[], [llvm_i32_ty, llvm_i32_ty],
  [ImmArg<ArgIndex<0>>, IntrNoMem, IntrHasSideEffects]>;
def int_amdgcn_s_sendmsghalt : ClangBuiltin<"__builtin_amdgcn_s_sendmsghalt">,
  Intrinsic <[], [llvm_i32_ty, llvm_i32_ty],
  [ImmArg<ArgIndex<0>>, IntrNoMem, IntrHasSideEffects]>;


// gfx11 intrinsic
// The first parameter is s_sendmsg immediate (i16). Return type is i32 or i64.
def int_amdgcn_s_sendmsg_rtn : Intrinsic <[llvm_anyint_ty], [llvm_i32_ty],
  [ImmArg<ArgIndex<0>>, IntrNoMem, IntrHasSideEffects]>;

def int_amdgcn_s_barrier : ClangBuiltin<"__builtin_amdgcn_s_barrier">,
  Intrinsic<[], [], [IntrNoMem, IntrHasSideEffects, IntrConvergent, IntrWillReturn, IntrNoCallback, IntrNoFree]>;

def int_amdgcn_wave_barrier : ClangBuiltin<"__builtin_amdgcn_wave_barrier">,
  Intrinsic<[], [], [IntrNoMem, IntrHasSideEffects, IntrConvergent, IntrWillReturn, IntrNoCallback, IntrNoFree]>;

// The 1st parameter is a mask for the types of instructions that may be allowed
// to cross the SCHED_BARRIER during scheduling.
//     MASK = 0x0000 0000: No instructions may be scheduled across SCHED_BARRIER.
//     MASK = 0x0000 0001: ALL, non-memory, non-side-effect producing instructions may be
//                         scheduled across SCHED_BARRIER, i.e. allow ALU instructions to pass.
//     MASK = 0x0000 0002: VALU instructions may be scheduled across SCHED_BARRIER.
//     MASK = 0x0000 0004: SALU instructions may be scheduled across SCHED_BARRIER.
//     MASK = 0x0000 0008: MFMA/WMMA instructions may be scheduled across SCHED_BARRIER.
//     MASK = 0x0000 0010: ALL VMEM instructions may be scheduled across SCHED_BARRIER.
//     MASK = 0x0000 0020: VMEM read instructions may be scheduled across SCHED_BARRIER.
//     MASK = 0x0000 0040: VMEM write instructions may be scheduled across SCHED_BARRIER.
//     MASK = 0x0000 0080: ALL DS instructions may be scheduled across SCHED_BARRIER.
//     MASK = 0x0000 0100: ALL DS read instructions may be scheduled accoss SCHED_BARRIER.
//     MASK = 0x0000 0200: ALL DS write instructions may be scheduled across SCHED_BARRIER.
def int_amdgcn_sched_barrier : ClangBuiltin<"__builtin_amdgcn_sched_barrier">,
  Intrinsic<[], [llvm_i32_ty], [ImmArg<ArgIndex<0>>, IntrNoMem, IntrHasSideEffects, IntrConvergent,
                                IntrWillReturn, IntrNoCallback, IntrNoFree]>;

// The first parameter is a mask that determines the types of instructions that
// you would like to synchronize around and add to a scheduling group. The
// values of the mask are defined above for sched_barrier. These instructions
// will be selected from the bottom up starting from the sched_group_barrier's
// location during instruction scheduling. The second parameter is the number of
// matching instructions that will be associated with this sched_group_barrier.
// The third parameter is an identifier which is used to describe what other
// sched_group_barriers should be synchronized with.
def int_amdgcn_sched_group_barrier : ClangBuiltin<"__builtin_amdgcn_sched_group_barrier">,
  Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
  [ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>, IntrNoMem, IntrHasSideEffects,
   IntrConvergent, IntrWillReturn, IntrNoCallback, IntrNoFree]>;

// Scheduler optimization hint.
//     MASK = 0: Small gemm opt
def int_amdgcn_iglp_opt : ClangBuiltin<"__builtin_amdgcn_iglp_opt">,
  Intrinsic<[], [llvm_i32_ty], [ImmArg<ArgIndex<0>>, IntrNoMem, IntrHasSideEffects, IntrConvergent,
                                IntrWillReturn, IntrNoCallback, IntrNoFree]>;

def int_amdgcn_s_waitcnt : ClangBuiltin<"__builtin_amdgcn_s_waitcnt">,
  Intrinsic<[], [llvm_i32_ty], [ImmArg<ArgIndex<0>>, IntrNoMem, IntrHasSideEffects, IntrWillReturn, IntrNoCallback, IntrNoFree]>;

def int_amdgcn_div_scale : DefaultAttrsIntrinsic<
  // 1st parameter: Numerator
  // 2nd parameter: Denominator
  // 3rd parameter: Select quotient. Must equal Numerator or Denominator.
  //                (0 = Denominator, 1 = Numerator).
  [llvm_anyfloat_ty, llvm_i1_ty],
  [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i1_ty],
  [IntrNoMem, IntrSpeculatable, ImmArg<ArgIndex<2>>]
>;

def int_amdgcn_div_fmas : DefaultAttrsIntrinsic<[llvm_anyfloat_ty],
  [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>, llvm_i1_ty],
  [IntrNoMem, IntrSpeculatable]
>;

def int_amdgcn_div_fixup : DefaultAttrsIntrinsic<[llvm_anyfloat_ty],
  [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
  [IntrNoMem, IntrSpeculatable]
>;

// Look Up 2.0 / pi src0 with segment select src1[4:0]
def int_amdgcn_trig_preop : DefaultAttrsIntrinsic<
  [llvm_anyfloat_ty], [LLVMMatchType<0>, llvm_i32_ty],
  [IntrNoMem, IntrSpeculatable]
>;

def int_amdgcn_sin : DefaultAttrsIntrinsic<
  [llvm_anyfloat_ty], [LLVMMatchType<0>],
  [IntrNoMem, IntrSpeculatable]
>;

def int_amdgcn_cos : DefaultAttrsIntrinsic<
  [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
>;

// v_log_{f16|f32}, performs log2. f32 version does not handle
// denormals. There is no reason to use this for f16 as it does
// support denormals, and the generic log2 intrinsic should be
// preferred.
def int_amdgcn_log : DefaultAttrsIntrinsic<
  [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
>;

// v_exp_{f16|f32} (int_amdgcn_exp was taken by export
// already). Performs exp2. f32 version does not handle
// denormals. There is no reason to use this for f16 as it does
// support denormals, and the generic exp2 intrinsic should be
// preferred.
def int_amdgcn_exp2 : DefaultAttrsIntrinsic<
  [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
>;

def int_amdgcn_log_clamp : DefaultAttrsIntrinsic<
  [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
>;

def int_amdgcn_fmul_legacy : ClangBuiltin<"__builtin_amdgcn_fmul_legacy">,
  DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
  [IntrNoMem, IntrSpeculatable, Commutative]
>;

// Fused single-precision multiply-add with legacy behaviour for the multiply,
// which is that +/- 0.0 * anything (even NaN or infinity) is +0.0. This is
// intended for use on subtargets that have the v_fma_legacy_f32 and/or
// v_fmac_legacy_f32 instructions. (Note that v_fma_legacy_f16 is unrelated and
// has a completely different kind of legacy behaviour.)
def int_amdgcn_fma_legacy :
  DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_float_ty],
  [IntrNoMem, IntrSpeculatable, Commutative]
>;

def int_amdgcn_rcp : DefaultAttrsIntrinsic<
  [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
>;

def int_amdgcn_rcp_legacy : ClangBuiltin<"__builtin_amdgcn_rcp_legacy">,
  DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty],
  [IntrNoMem, IntrSpeculatable]
>;

def int_amdgcn_sqrt :  DefaultAttrsIntrinsic<
  [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
>;

def int_amdgcn_rsq :  DefaultAttrsIntrinsic<
  [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
>;

def int_amdgcn_rsq_legacy :  ClangBuiltin<"__builtin_amdgcn_rsq_legacy">,
  DefaultAttrsIntrinsic<
  [llvm_float_ty], [llvm_float_ty], [IntrNoMem, IntrSpeculatable]
>;

// out = 1.0 / sqrt(a) result clamped to +/- max_float.
def int_amdgcn_rsq_clamp : DefaultAttrsIntrinsic<
  [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]>;

// For int_amdgcn_ldexp_f16, only the low 16 bits of the i32 src1 operand will used.
def int_amdgcn_ldexp : DefaultAttrsIntrinsic<
  [llvm_anyfloat_ty], [LLVMMatchType<0>, llvm_i32_ty],
  [IntrNoMem, IntrSpeculatable]
>;

def int_amdgcn_frexp_mant : DefaultAttrsIntrinsic<
  [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
>;

def int_amdgcn_frexp_exp : DefaultAttrsIntrinsic<
  [llvm_anyint_ty], [llvm_anyfloat_ty], [IntrNoMem, IntrSpeculatable]
>;

// v_fract is buggy on SI/CI. It mishandles infinities, may return 1.0
// and always uses rtz, so is not suitable for implementing the OpenCL
// fract function. It should be ok on VI.
def int_amdgcn_fract : DefaultAttrsIntrinsic<
  [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
>;

def int_amdgcn_cvt_pkrtz : ClangBuiltin<"__builtin_amdgcn_cvt_pkrtz">,
  DefaultAttrsIntrinsic<[llvm_v2f16_ty], [llvm_float_ty, llvm_float_ty],
            [IntrNoMem, IntrSpeculatable]
>;

def int_amdgcn_cvt_pknorm_i16 :
  ClangBuiltin<"__builtin_amdgcn_cvt_pknorm_i16">,
  DefaultAttrsIntrinsic<[llvm_v2i16_ty], [llvm_float_ty, llvm_float_ty],
            [IntrNoMem, IntrSpeculatable]
>;

def int_amdgcn_cvt_pknorm_u16 :
  ClangBuiltin<"__builtin_amdgcn_cvt_pknorm_u16">,
  DefaultAttrsIntrinsic<[llvm_v2i16_ty], [llvm_float_ty, llvm_float_ty],
            [IntrNoMem, IntrSpeculatable]
>;

def int_amdgcn_cvt_pk_i16 :
    ClangBuiltin<"__builtin_amdgcn_cvt_pk_i16">,
    DefaultAttrsIntrinsic<
  [llvm_v2i16_ty], [llvm_i32_ty, llvm_i32_ty],
  [IntrNoMem, IntrSpeculatable]
>;

def int_amdgcn_cvt_pk_u16 : ClangBuiltin<"__builtin_amdgcn_cvt_pk_u16">,
  DefaultAttrsIntrinsic<[llvm_v2i16_ty], [llvm_i32_ty, llvm_i32_ty],
    [IntrNoMem, IntrSpeculatable]
>;

def int_amdgcn_class : DefaultAttrsIntrinsic<
  [llvm_i1_ty], [llvm_anyfloat_ty, llvm_i32_ty],
  [IntrNoMem, IntrSpeculatable]
>;

def int_amdgcn_fmed3 :
  DefaultAttrsIntrinsic<[llvm_anyfloat_ty],
    [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
    [IntrNoMem, IntrSpeculatable]
>;

def int_amdgcn_cubeid : ClangBuiltin<"__builtin_amdgcn_cubeid">,
  DefaultAttrsIntrinsic<[llvm_float_ty],
    [llvm_float_ty, llvm_float_ty, llvm_float_ty],
    [IntrNoMem, IntrSpeculatable]
>;

def int_amdgcn_cubema : ClangBuiltin<"__builtin_amdgcn_cubema">,
  DefaultAttrsIntrinsic<[llvm_float_ty],
  [llvm_float_ty, llvm_float_ty, llvm_float_ty],
  [IntrNoMem, IntrSpeculatable]
>;

def int_amdgcn_cubesc : ClangBuiltin<"__builtin_amdgcn_cubesc">,
  DefaultAttrsIntrinsic<[llvm_float_ty],
    [llvm_float_ty, llvm_float_ty, llvm_float_ty],
    [IntrNoMem, IntrSpeculatable]
>;

def int_amdgcn_cubetc : ClangBuiltin<"__builtin_amdgcn_cubetc">,
  DefaultAttrsIntrinsic<[llvm_float_ty],
    [llvm_float_ty, llvm_float_ty, llvm_float_ty],
    [IntrNoMem, IntrSpeculatable]
>;

// v_ffbh_i32, as opposed to v_ffbh_u32. For v_ffbh_u32, llvm.ctlz
// should be used.
def int_amdgcn_sffbh :
  DefaultAttrsIntrinsic<[llvm_anyint_ty], [LLVMMatchType<0>],
  [IntrNoMem, IntrSpeculatable]
>;

// v_mad_f32|f16/v_mac_f32|f16, selected regardless of denorm support.
def int_amdgcn_fmad_ftz :
  DefaultAttrsIntrinsic<[llvm_anyfloat_ty],
            [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
            [IntrNoMem, IntrSpeculatable]
>;

class AMDGPULDSIntrin :
  Intrinsic<[llvm_any_ty],
    [LLVMQualPointerType<3>,
    LLVMMatchType<0>,
    llvm_i32_ty, // ordering
    llvm_i32_ty, // scope
    llvm_i1_ty], // isVolatile
    [IntrArgMemOnly, IntrWillReturn, NoCapture<ArgIndex<0>>,
     ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>, IntrNoCallback, IntrNoFree]
>;

// FIXME: The m0 argument should be moved after the normal arguments
class AMDGPUDSOrderedIntrinsic : Intrinsic<
  [llvm_i32_ty],
  // M0 = {hi16:address, lo16:waveID}. Allow passing M0 as a pointer, so that
  // the bit packing can be optimized at the IR level.
  [LLVMQualPointerType<2>, // IntToPtr(M0)
   llvm_i32_ty, // value to add or swap
   llvm_i32_ty, // ordering
   llvm_i32_ty, // scope
   llvm_i1_ty,  // isVolatile
   llvm_i32_ty, // ordered count index (OA index), also added to the address
                // gfx10: bits 24-27 indicate the number of active threads/dwords
   llvm_i1_ty,  // wave release, usually set to 1
   llvm_i1_ty], // wave done, set to 1 for the last ordered instruction
  [IntrWillReturn, NoCapture<ArgIndex<0>>,
   ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>,
   ImmArg<ArgIndex<5>>, ImmArg<ArgIndex<6>>, ImmArg<ArgIndex<7>>, IntrNoCallback, IntrNoFree
  ]
>;

class AMDGPUDSAppendConsumedIntrinsic : Intrinsic<
  [llvm_i32_ty],
  [llvm_anyptr_ty, // LDS or GDS ptr
   llvm_i1_ty], // isVolatile
   [IntrConvergent, IntrWillReturn, IntrArgMemOnly,
    NoCapture<ArgIndex<0>>, ImmArg<ArgIndex<1>>, IntrNoCallback, IntrNoFree],
   "",
   [SDNPMemOperand]
>;

def int_amdgcn_ds_ordered_add : AMDGPUDSOrderedIntrinsic;
def int_amdgcn_ds_ordered_swap : AMDGPUDSOrderedIntrinsic;

// The pointer argument is assumed to be dynamically uniform if a VGPR.
def int_amdgcn_ds_append : AMDGPUDSAppendConsumedIntrinsic;
def int_amdgcn_ds_consume : AMDGPUDSAppendConsumedIntrinsic;

def int_amdgcn_ds_fadd : AMDGPULDSIntrin;
def int_amdgcn_ds_fmin : AMDGPULDSIntrin;
def int_amdgcn_ds_fmax : AMDGPULDSIntrin;

} // TargetPrefix = "amdgcn"

// New-style image intrinsics

//////////////////////////////////////////////////////////////////////////
// Dimension-aware image intrinsics framework
//////////////////////////////////////////////////////////////////////////

// Helper class to represent (type, name) combinations of arguments. The
// argument names are explanatory and used as DAG operand names for codegen
// pattern matching.
class AMDGPUArg<LLVMType ty, string name> {
  LLVMType Type = ty;
  string Name = name;
}

// Return [AMDGPUArg<basety, names[0]>, AMDGPUArg<LLVMMatchType<0>, names[1]>, ...]
class makeArgList<list<string> names, LLVMType basety> {
  list<AMDGPUArg> ret =
    !listconcat([AMDGPUArg<basety, names[0]>],
                !foreach(name, !tail(names), AMDGPUArg<LLVMMatchType<0>, name>));
}

// Return arglist, with LLVMMatchType's references shifted by 'shift'.
class arglistmatchshift<list<AMDGPUArg> arglist, int shift> {
  list<AMDGPUArg> ret =
    !foreach(arg, arglist,
             !if(!isa<LLVMMatchType>(arg.Type),
                 AMDGPUArg<LLVMMatchType<!add(!cast<LLVMMatchType>(arg.Type).Number, shift)>,
                           arg.Name>,
                 arg));
}

// Return the concatenation of the given arglists. LLVMMatchType's are adjusted
// accordingly, and shifted by an additional 'shift'.
class arglistconcat<list<list<AMDGPUArg>> arglists, int shift = 0> {
  list<AMDGPUArg> ret =
    !foldl([]<AMDGPUArg>, arglists, lhs, rhs,
           !listconcat(
             lhs,
             arglistmatchshift<rhs,
                               !add(shift, !foldl(0, lhs, a, b,
                                                  !add(a, b.Type.isAny)))>.ret));
}

// Represent texture/image types / dimensionality.
class AMDGPUDimProps<bits<3> enc, string name, string asmsuffix,
                     list<string> coord_names, list<string> slice_names,
                     bit msaa = 0> {
  AMDGPUDimProps Dim = !cast<AMDGPUDimProps>(NAME);
  string Name = name; // e.g. "2darraymsaa"
  string AsmSuffix = asmsuffix; // e.g. 2D_MSAA_ARRAY (used in assembly strings)
  bits<3> Encoding = enc;
  bit DA = 0; // DA bit in MIMG encoding
  bit MSAA = msaa;

  list<AMDGPUArg> CoordSliceArgs =
    makeArgList<!listconcat(coord_names, slice_names), llvm_anyfloat_ty>.ret;
  list<AMDGPUArg> CoordSliceIntArgs =
    makeArgList<!listconcat(coord_names, slice_names), llvm_anyint_ty>.ret;
  list<AMDGPUArg> GradientArgs =
    makeArgList<!listconcat(!foreach(name, coord_names, "d" # name # "dh"),
                            !foreach(name, coord_names, "d" # name # "dv")),
                llvm_anyfloat_ty>.ret;

  bits<8> NumCoords = !size(CoordSliceArgs);
  bits<8> NumGradients = !size(GradientArgs);
}

def AMDGPUDim1D : AMDGPUDimProps<0x0, "1d", "1D", ["s"], []>;
def AMDGPUDim2D : AMDGPUDimProps<0x1, "2d", "2D", ["s", "t"], []>;
def AMDGPUDim3D : AMDGPUDimProps<0x2, "3d", "3D", ["s", "t", "r"], []>;
let DA = 1 in {
  def AMDGPUDimCube : AMDGPUDimProps<0x3, "cube", "CUBE", ["s", "t"], ["face"]>;
  def AMDGPUDim1DArray : AMDGPUDimProps<0x4, "1darray", "1D_ARRAY", ["s"], ["slice"]>;
  def AMDGPUDim2DArray : AMDGPUDimProps<0x5, "2darray", "2D_ARRAY", ["s", "t"], ["slice"]>;
}
def AMDGPUDim2DMsaa : AMDGPUDimProps<0x6, "2dmsaa", "2D_MSAA", ["s", "t"], ["fragid"], 1>;
let DA = 1 in {
  def AMDGPUDim2DArrayMsaa : AMDGPUDimProps<0x7, "2darraymsaa", "2D_MSAA_ARRAY", ["s", "t"], ["slice", "fragid"], 1>;
}

def AMDGPUDims {
  list<AMDGPUDimProps> NoMsaa = [AMDGPUDim1D, AMDGPUDim2D, AMDGPUDim3D,
                                 AMDGPUDimCube, AMDGPUDim1DArray,
                                 AMDGPUDim2DArray];
  list<AMDGPUDimProps> Msaa = [AMDGPUDim2DMsaa, AMDGPUDim2DArrayMsaa];
  list<AMDGPUDimProps> All = !listconcat(NoMsaa, Msaa);
}

// Represent sample variants, i.e. _C, _O, _B, ... and combinations thereof.
class AMDGPUSampleVariant<string ucmod, string lcmod, list<AMDGPUArg> extra_addr> {
  string UpperCaseMod = ucmod;
  string LowerCaseMod = lcmod;

  // {offset} {bias} {z-compare}
  list<AMDGPUArg> ExtraAddrArgs = extra_addr;
  bit Offset = false;
  bit Bias = false;
  bit ZCompare = false;
  bit Gradients = false;

  // Name of the {lod} or {clamp} argument that is appended to the coordinates,
  // if any.
  string LodOrClamp = "";
}

// AMDGPUSampleVariants: all variants supported by IMAGE_SAMPLE
// AMDGPUSampleVariantsNoGradients: variants supported by IMAGE_GATHER4
defset list<AMDGPUSampleVariant> AMDGPUSampleVariants = {
  multiclass AMDGPUSampleHelper_Offset<string ucmod, string lcmod,
                                       list<AMDGPUArg> extra_addr> {
    def NAME#lcmod : AMDGPUSampleVariant<ucmod, lcmod, extra_addr>;
    let Offset = true in
    def NAME#lcmod#_o : AMDGPUSampleVariant<
        ucmod#"_O", lcmod#"_o", !listconcat([AMDGPUArg<llvm_i32_ty, "offset">], extra_addr)>;
  }

  multiclass AMDGPUSampleHelper_Compare<string ucmod, string lcmod,
                                        list<AMDGPUArg> extra_addr> {
    defm NAME : AMDGPUSampleHelper_Offset<ucmod, lcmod, extra_addr>;
    let ZCompare = true in
    defm NAME : AMDGPUSampleHelper_Offset<
        "_C"#ucmod, "_c"#lcmod, !listconcat(extra_addr, [AMDGPUArg<llvm_float_ty, "zcompare">])>;
  }

  multiclass AMDGPUSampleHelper_Clamp<string ucmod, string lcmod,
                                      list<AMDGPUArg> extra_addr> {
    defm NAME : AMDGPUSampleHelper_Compare<ucmod, lcmod, extra_addr>;
    let LodOrClamp = "clamp" in
    defm NAME : AMDGPUSampleHelper_Compare<ucmod#"_CL", lcmod#"_cl", extra_addr>;
  }

  defset list<AMDGPUSampleVariant> AMDGPUSampleVariantsNoGradients = {
    defm AMDGPUSample : AMDGPUSampleHelper_Clamp<"", "", []>;
    let Bias = true in
    defm AMDGPUSample : AMDGPUSampleHelper_Clamp<
        "_B", "_b", [AMDGPUArg<llvm_anyfloat_ty, "bias">]>;
    let LodOrClamp = "lod" in
    defm AMDGPUSample : AMDGPUSampleHelper_Compare<"_L", "_l", []>;
    defm AMDGPUSample : AMDGPUSampleHelper_Compare<"_LZ", "_lz", []>;
  }

  let Gradients = true in {
    defm AMDGPUSample : AMDGPUSampleHelper_Clamp<"_D", "_d", []>;
    defm AMDGPUSample : AMDGPUSampleHelper_Clamp<"_CD", "_cd", []>;
  }
}

// Helper class to capture the profile of a dimension-aware image intrinsic.
// This information is used to generate the intrinsic's type and to inform
// codegen pattern matching.
class AMDGPUDimProfile<string opmod,
                       AMDGPUDimProps dim> {
  AMDGPUDimProps Dim = dim;
  string OpMod = opmod; // the corresponding instruction is named IMAGE_OpMod

  // These are intended to be overwritten by subclasses
  bit IsSample = false;
  bit IsAtomic = false;
  list<LLVMType> RetTypes = [];
  list<AMDGPUArg> DataArgs = [];
  list<AMDGPUArg> ExtraAddrArgs = [];
  bit Offset = false;
  bit Bias = false;
  bit ZCompare = false;
  bit Gradients = false;
  string LodClampMip = "";

  int NumRetAndDataAnyTypes =
    !foldl(0, !listconcat(RetTypes, !foreach(arg, DataArgs, arg.Type)), a, b,
           !add(a, b.isAny));

  list<AMDGPUArg> AddrArgs =
    arglistconcat<[ExtraAddrArgs,
                   !if(Gradients, dim.GradientArgs, []),
                   !listconcat(!if(IsSample, dim.CoordSliceArgs, dim.CoordSliceIntArgs),
                               !if(!empty(LodClampMip),
                                   []<AMDGPUArg>,
                                   [AMDGPUArg<LLVMMatchType<0>, LodClampMip>]))],
                  NumRetAndDataAnyTypes>.ret;
  list<LLVMType> AddrTypes = !foreach(arg, AddrArgs, arg.Type);
  list<AMDGPUArg> AddrDefaultArgs =
    !foreach(arg, AddrArgs,
             AMDGPUArg<!if(!or(arg.Type.isAny, !isa<LLVMMatchType>(arg.Type)),
                           !if(IsSample, llvm_float_ty, llvm_i32_ty), arg.Type),
                       arg.Name>);
  list<AMDGPUArg> AddrA16Args =
    !foreach(arg, AddrArgs,
             AMDGPUArg<!if(!or(arg.Type.isAny, !isa<LLVMMatchType>(arg.Type)),
                           !if(IsSample, llvm_half_ty, llvm_i16_ty), arg.Type),
                       arg.Name>);
}

class AMDGPUDimProfileCopy<AMDGPUDimProfile base> : AMDGPUDimProfile<base.OpMod, base.Dim> {
  let IsSample = base.IsSample;
  let IsAtomic = base.IsAtomic;
  let RetTypes = base.RetTypes;
  let DataArgs = base.DataArgs;
  let ExtraAddrArgs = base.ExtraAddrArgs;
  let Offset = base.Offset;
  let Bias = base.Bias;
  let ZCompare = base.ZCompare;
  let Gradients = base.Gradients;
  let LodClampMip = base.LodClampMip;
}

class AMDGPUDimSampleProfile<string opmod,
                             AMDGPUDimProps dim,
                             AMDGPUSampleVariant sample> : AMDGPUDimProfile<opmod, dim> {
  let IsSample = true;
  let RetTypes = [llvm_any_ty];
  let ExtraAddrArgs = sample.ExtraAddrArgs;
  let Offset = sample.Offset;
  let Bias = sample.Bias;
  let ZCompare = sample.ZCompare;
  let Gradients = sample.Gradients;
  let LodClampMip = sample.LodOrClamp;
}

class AMDGPUDimNoSampleProfile<string opmod,
                               AMDGPUDimProps dim,
                               list<LLVMType> retty,
                               list<AMDGPUArg> dataargs,
                               bit Mip = false> : AMDGPUDimProfile<opmod, dim> {
  let RetTypes = retty;
  let DataArgs = dataargs;
  let LodClampMip = !if(Mip, "mip", "");
}

class AMDGPUDimAtomicProfile<string opmod,
                             AMDGPUDimProps dim,
                             list<AMDGPUArg> dataargs> : AMDGPUDimProfile<opmod, dim> {
  let RetTypes = [llvm_anyint_ty];
  let DataArgs = dataargs;
  let IsAtomic = true;
}

class AMDGPUDimAtomicFloatProfile<string opmod, AMDGPUDimProps dim,
                                  list<AMDGPUArg> dataargs>
    : AMDGPUDimAtomicProfile<opmod, dim, dataargs> {
  let RetTypes = [llvm_anyfloat_ty];
}

class AMDGPUDimGetResInfoProfile<AMDGPUDimProps dim>
    : AMDGPUDimProfile<"GET_RESINFO", dim> {
  let RetTypes = [llvm_anyfloat_ty];
  let DataArgs = [];
  let AddrArgs = [AMDGPUArg<llvm_anyint_ty, "mip">];
  let LodClampMip = "mip";
}

// Helper class for figuring out image intrinsic argument indexes.
class AMDGPUImageDimIntrinsicEval<AMDGPUDimProfile P_> {
  int NumDataArgs = !size(P_.DataArgs);
  int NumDmaskArgs = !not(P_.IsAtomic);
  int NumOffsetArgs = !if(P_.Offset, 1, 0);
  int NumBiasArgs = !if(P_.Bias, 1, 0);
  int NumZCompareArgs = !if(P_.ZCompare, 1, 0);
  int NumExtraAddrArgs = !add(NumOffsetArgs, NumBiasArgs, NumZCompareArgs);
  int NumVAddrArgs = !size(P_.AddrArgs);
  int NumGradientArgs = !if(P_.Gradients, !size(P_.Dim.GradientArgs), 0);
  int NumCoordArgs = !if(P_.IsSample, !size(P_.Dim.CoordSliceArgs), !size(P_.Dim.CoordSliceIntArgs));
  int NumRSrcArgs = 1;
  int NumSampArgs = !if(P_.IsSample, 2, 0);
  int DmaskArgIndex = NumDataArgs;
  int VAddrArgIndex = !add(DmaskArgIndex, NumDmaskArgs);
  int OffsetArgIndex = VAddrArgIndex;
  int BiasArgIndex = !add(VAddrArgIndex, NumOffsetArgs);
  int ZCompareArgIndex = !add(BiasArgIndex, NumBiasArgs);
  int GradientArgIndex = !add(VAddrArgIndex, NumExtraAddrArgs);
  int CoordArgIndex = !add(GradientArgIndex, NumGradientArgs);
  int LodArgIndex = !add(VAddrArgIndex, NumVAddrArgs, -1);
  int MipArgIndex = LodArgIndex;
  int RsrcArgIndex = !add(VAddrArgIndex, NumVAddrArgs);
  int SampArgIndex = !add(RsrcArgIndex, NumRSrcArgs);
  int UnormArgIndex = !add(SampArgIndex, 1);
  int TexFailCtrlArgIndex = !add(SampArgIndex, NumSampArgs);
  int CachePolicyArgIndex = !add(TexFailCtrlArgIndex, 1);
}

// All dimension-aware intrinsics are derived from this class.
class AMDGPUImageDimIntrinsic<AMDGPUDimProfile P_,
                              list<IntrinsicProperty> props,
                              list<SDNodeProperty> sdnodeprops> : DefaultAttrsIntrinsic<
    P_.RetTypes,        // vdata(VGPR) -- for load/atomic-with-return
    !listconcat(
      !foreach(arg, P_.DataArgs, arg.Type),      // vdata(VGPR) -- for store/atomic
      !if(P_.IsAtomic, [], [llvm_i32_ty]),       // dmask(imm)
      P_.AddrTypes,                              // vaddr(VGPR)
      [llvm_v8i32_ty],                           // rsrc(SGPR)
      !if(P_.IsSample, [llvm_v4i32_ty,           // samp(SGPR)
                        llvm_i1_ty], []),        // unorm(imm)
      [llvm_i32_ty,                              // texfailctrl(imm; bit 0 = tfe, bit 1 = lwe)
       llvm_i32_ty]),                            // cachepolicy(imm; bit 0 = glc, bit 1 = slc, bit 2 = dlc)

     !listconcat(props,
          !if(P_.IsAtomic, [], [ImmArg<ArgIndex<AMDGPUImageDimIntrinsicEval<P_>.DmaskArgIndex>>]),
          !if(P_.IsSample, [ImmArg<ArgIndex<AMDGPUImageDimIntrinsicEval<P_>.UnormArgIndex>>], []),
          [ImmArg<ArgIndex<AMDGPUImageDimIntrinsicEval<P_>.TexFailCtrlArgIndex>>,
           ImmArg<ArgIndex<AMDGPUImageDimIntrinsicEval<P_>.CachePolicyArgIndex>>]),


      "", sdnodeprops>,
  AMDGPURsrcIntrinsic<!add(!size(P_.DataArgs), !size(P_.AddrTypes),
                           !if(P_.IsAtomic, 0, 1)), 1> {
  AMDGPUDimProfile P = P_;

  AMDGPUImageDimIntrinsic Intr = !cast<AMDGPUImageDimIntrinsic>(NAME);

  let TargetPrefix = "amdgcn";
}

// Marker class for intrinsics with a DMask that determines the returned
// channels.
class AMDGPUImageDMaskIntrinsic;

defset list<AMDGPUImageDimIntrinsic> AMDGPUImageDimIntrinsics = {

  //////////////////////////////////////////////////////////////////////////
  // Load and store intrinsics
  //////////////////////////////////////////////////////////////////////////
  multiclass AMDGPUImageDimIntrinsicsNoMsaa<string opmod,
                                            list<LLVMType> retty,
                                            list<AMDGPUArg> dataargs,
                                            list<IntrinsicProperty> props,
                                            list<SDNodeProperty> sdnodeprops,
                                            bit Mip = false> {
    foreach dim = AMDGPUDims.NoMsaa in {
      def !strconcat(NAME, "_", dim.Name)
        : AMDGPUImageDimIntrinsic<
            AMDGPUDimNoSampleProfile<opmod, dim, retty, dataargs, Mip>,
            props, sdnodeprops>;
    }
  }

  multiclass AMDGPUImageDimIntrinsicsAll<string opmod,
                                         list<LLVMType> retty,
                                         list<AMDGPUArg> dataargs,
                                         list<IntrinsicProperty> props,
                                         list<SDNodeProperty> sdnodeprops,
                                         bit Mip = false> {
    foreach dim = AMDGPUDims.All in {
      def !strconcat(NAME, "_", dim.Name)
        : AMDGPUImageDimIntrinsic<
            AMDGPUDimNoSampleProfile<opmod, dim, retty, dataargs, Mip>,
            props, sdnodeprops>;
    }
  }

  defm int_amdgcn_image_load
    : AMDGPUImageDimIntrinsicsAll<"LOAD", [llvm_any_ty], [], [IntrReadMem],
                                  [SDNPMemOperand]>,
      AMDGPUImageDMaskIntrinsic;
  defm int_amdgcn_image_load_mip
    : AMDGPUImageDimIntrinsicsNoMsaa<"LOAD_MIP", [llvm_any_ty], [],
                                     [IntrReadMem, IntrWillReturn], [SDNPMemOperand], 1>,
      AMDGPUImageDMaskIntrinsic;

  defm int_amdgcn_image_store : AMDGPUImageDimIntrinsicsAll<
              "STORE", [], [AMDGPUArg<llvm_anyfloat_ty, "vdata">],
              [IntrWriteMem, IntrWillReturn], [SDNPMemOperand]>,
              AMDGPUImageDMaskIntrinsic;
  defm int_amdgcn_image_store_mip : AMDGPUImageDimIntrinsicsNoMsaa<
              "STORE_MIP", [], [AMDGPUArg<llvm_anyfloat_ty, "vdata">],
              [IntrWriteMem, IntrWillReturn], [SDNPMemOperand], 1>,
              AMDGPUImageDMaskIntrinsic;

  //////////////////////////////////////////////////////////////////////////
  // MSAA intrinsics
  //////////////////////////////////////////////////////////////////////////
  foreach dim = AMDGPUDims.Msaa in {
    def int_amdgcn_image_msaa_load_x # _ # dim.Name:
        AMDGPUImageDimIntrinsic<
            AMDGPUDimNoSampleProfile<"MSAA_LOAD_X", dim, [llvm_any_ty], []>,
            [IntrReadMem], [SDNPMemOperand]>;
  }

  foreach dim = AMDGPUDims.Msaa in {
    def int_amdgcn_image_msaa_load # _ # dim.Name:
        AMDGPUImageDimIntrinsic<
            AMDGPUDimNoSampleProfile<"MSAA_LOAD", dim, [llvm_any_ty], []>,
            [IntrReadMem], [SDNPMemOperand]>;
  }

  //////////////////////////////////////////////////////////////////////////
  // sample and getlod intrinsics
  //////////////////////////////////////////////////////////////////////////
  multiclass AMDGPUImageDimSampleDims<string opmod,
                                      AMDGPUSampleVariant sample,
                                      bit NoMem = false> {
    foreach dim = AMDGPUDims.NoMsaa in {
      def !strconcat(NAME, "_", dim.Name) : AMDGPUImageDimIntrinsic<
          AMDGPUDimSampleProfile<opmod, dim, sample>,
          !if(NoMem, [IntrNoMem], [IntrReadMem]),
          !if(NoMem, [], [SDNPMemOperand])>;
    }
  }

  foreach sample = AMDGPUSampleVariants in {
    defm int_amdgcn_image_sample # sample.LowerCaseMod
      : AMDGPUImageDimSampleDims<"SAMPLE" # sample.UpperCaseMod, sample>,
        AMDGPUImageDMaskIntrinsic;
  }

  defm int_amdgcn_image_getlod
    : AMDGPUImageDimSampleDims<"GET_LOD", AMDGPUSample, 1>,
      AMDGPUImageDMaskIntrinsic;

  //////////////////////////////////////////////////////////////////////////
  // getresinfo intrinsics
  //////////////////////////////////////////////////////////////////////////
  foreach dim = AMDGPUDims.All in {
    def !strconcat("int_amdgcn_image_getresinfo_", dim.Name)
      : AMDGPUImageDimIntrinsic<AMDGPUDimGetResInfoProfile<dim>, [IntrNoMem], []>,
        AMDGPUImageDMaskIntrinsic;
  }

  //////////////////////////////////////////////////////////////////////////
  // gather4 intrinsics
  //////////////////////////////////////////////////////////////////////////
  foreach sample = AMDGPUSampleVariantsNoGradients in {
    foreach dim = [AMDGPUDim2D, AMDGPUDimCube, AMDGPUDim2DArray] in {
      def int_amdgcn_image_gather4 # sample.LowerCaseMod # _ # dim.Name:
          AMDGPUImageDimIntrinsic<
              AMDGPUDimSampleProfile<"GATHER4" # sample.UpperCaseMod, dim, sample>,
              [IntrReadMem], [SDNPMemOperand]>;
    }
  }
}

//////////////////////////////////////////////////////////////////////////
// atomic intrinsics
//////////////////////////////////////////////////////////////////////////
defset list<AMDGPUImageDimIntrinsic> AMDGPUImageDimAtomicIntrinsics = {
  multiclass AMDGPUImageDimAtomicX<string opmod, list<AMDGPUArg> dataargs,
                                   int isFloat = 0> {
        foreach dim = AMDGPUDims.All in {
          def !strconcat(NAME, "_", dim.Name): AMDGPUImageDimIntrinsic<
              !if (isFloat, AMDGPUDimAtomicFloatProfile<opmod, dim, dataargs>,
                   AMDGPUDimAtomicProfile<opmod, dim, dataargs>),
              [], [SDNPMemOperand]>;
        }
  }

  multiclass AMDGPUImageDimAtomic<string opmod, int isFloat = 0> {
    defm ""
        : AMDGPUImageDimAtomicX<opmod, [AMDGPUArg<LLVMMatchType<0>, "vdata">],
                                isFloat>;
  }

  multiclass AMDGPUImageDimFloatAtomic<string opmod> {
    defm "" : AMDGPUImageDimAtomic<opmod, 1 /*isFloat*/>;
  }

  defm int_amdgcn_image_atomic_swap : AMDGPUImageDimAtomic<"ATOMIC_SWAP">;
  defm int_amdgcn_image_atomic_add : AMDGPUImageDimAtomic<"ATOMIC_ADD">;
  defm int_amdgcn_image_atomic_sub : AMDGPUImageDimAtomic<"ATOMIC_SUB">;
  defm int_amdgcn_image_atomic_smin : AMDGPUImageDimAtomic<"ATOMIC_SMIN">;
  defm int_amdgcn_image_atomic_umin : AMDGPUImageDimAtomic<"ATOMIC_UMIN">;
  defm int_amdgcn_image_atomic_fmin : AMDGPUImageDimFloatAtomic<"ATOMIC_FMIN">;
  defm int_amdgcn_image_atomic_smax : AMDGPUImageDimAtomic<"ATOMIC_SMAX">;
  defm int_amdgcn_image_atomic_umax : AMDGPUImageDimAtomic<"ATOMIC_UMAX">;
  defm int_amdgcn_image_atomic_fmax : AMDGPUImageDimFloatAtomic<"ATOMIC_FMAX">;
  defm int_amdgcn_image_atomic_and : AMDGPUImageDimAtomic<"ATOMIC_AND">;
  defm int_amdgcn_image_atomic_or : AMDGPUImageDimAtomic<"ATOMIC_OR">;
  defm int_amdgcn_image_atomic_xor : AMDGPUImageDimAtomic<"ATOMIC_XOR">;
  defm int_amdgcn_image_atomic_inc : AMDGPUImageDimAtomic<"ATOMIC_INC">;
  defm int_amdgcn_image_atomic_dec : AMDGPUImageDimAtomic<"ATOMIC_DEC">;

  defm int_amdgcn_image_atomic_cmpswap :
      AMDGPUImageDimAtomicX<"ATOMIC_CMPSWAP", [AMDGPUArg<LLVMMatchType<0>, "src">,
                                               AMDGPUArg<LLVMMatchType<0>, "cmp">]>;
}

//////////////////////////////////////////////////////////////////////////
// Buffer intrinsics
//////////////////////////////////////////////////////////////////////////

// Data type for buffer resources (V#). Maybe, in the future, we can create a
// similar one for textures (T#).
def AMDGPUBufferRsrcTy : LLVMQualPointerType<8>;

let TargetPrefix = "amdgcn" in {

def int_amdgcn_make_buffer_rsrc : DefaultAttrsIntrinsic <
  [AMDGPUBufferRsrcTy],
  [llvm_anyptr_ty, // base
   llvm_i16_ty,    // stride (and swizzle control)
   llvm_i32_ty,    // NumRecords / extent
   llvm_i32_ty],   // flags
  // Attributes lifted from ptrmask + some extra argument attributes.
  [IntrNoMem, NoCapture<ArgIndex<0>>, ReadNone<ArgIndex<0>>,
   IntrSpeculatable, IntrWillReturn]>;

defset list<AMDGPURsrcIntrinsic> AMDGPUBufferIntrinsics = {

class AMDGPUBufferLoad<LLVMType data_ty = llvm_any_ty> : DefaultAttrsIntrinsic <
  [data_ty],
  [llvm_v4i32_ty,     // rsrc(SGPR)
   llvm_i32_ty,       // vindex(VGPR)
   llvm_i32_ty,       // offset(SGPR/VGPR/imm)
   llvm_i1_ty,        // glc(imm)
   llvm_i1_ty],       // slc(imm)
  [IntrReadMem, ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>], "", [SDNPMemOperand]>,
  AMDGPURsrcIntrinsic<0>;
def int_amdgcn_buffer_load_format : AMDGPUBufferLoad<llvm_anyfloat_ty>;
def int_amdgcn_buffer_load : AMDGPUBufferLoad;

// Generate a buffer_load instruction that may be optimized to s_buffer_load if
// the offset argument is uniform.
def int_amdgcn_s_buffer_load : DefaultAttrsIntrinsic <
  [llvm_any_ty],
  [llvm_v4i32_ty,     // rsrc(SGPR)
   llvm_i32_ty,       // byte offset
   llvm_i32_ty],      // cachepolicy(imm; bit 0 = glc, bit 2 = dlc)
  [IntrNoMem, ImmArg<ArgIndex<2>>]>,
  AMDGPURsrcIntrinsic<0>;

class AMDGPUBufferStore<LLVMType data_ty = llvm_any_ty> : DefaultAttrsIntrinsic <
  [],
  [data_ty,          // vdata(VGPR)
   llvm_v4i32_ty,     // rsrc(SGPR)
   llvm_i32_ty,       // vindex(VGPR)
   llvm_i32_ty,       // offset(SGPR/VGPR/imm)
   llvm_i1_ty,        // glc(imm)
   llvm_i1_ty],       // slc(imm)
  [IntrWriteMem, ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>], "", [SDNPMemOperand]>,
  AMDGPURsrcIntrinsic<1>;
def int_amdgcn_buffer_store_format : AMDGPUBufferStore<llvm_anyfloat_ty>;
def int_amdgcn_buffer_store : AMDGPUBufferStore;

// New buffer intrinsics with separate raw and struct variants.  The raw
// variant never has an index. The struct variant always has an index, even if
// it is const 0. A struct intrinsic with constant 0 index is different to the
// corresponding raw intrinsic on gfx9+ because the behavior of bound checking
// and swizzling changes depending on whether idxen is set in the instruction.
// These new instrinsics also keep the offset and soffset arguments separate as
// they behave differently in bounds checking and swizzling.

// The versions of these intrinsics that take <4 x i32> arguments are deprecated
// in favor of their .ptr.buffer variants that take ptr addrspace(8) arguments,
// which allow for improved reasoning about memory accesses.
class AMDGPURawBufferLoad<LLVMType data_ty = llvm_any_ty> : DefaultAttrsIntrinsic <
  [data_ty],
  [llvm_v4i32_ty,     // rsrc(SGPR)
   llvm_i32_ty,       // offset(VGPR/imm, included in bounds checking and swizzling)
   llvm_i32_ty,       // soffset(SGPR/imm, excluded from bounds checking and swizzling)
   llvm_i32_ty],      // auxiliary data (imm, cachepolicy     (bit 0 = glc,
                      //                                       bit 1 = slc,
                      //                                       bit 2 = dlc on gfx10+),
                      //                      swizzled buffer (bit 3 = swz))
  [IntrReadMem, ImmArg<ArgIndex<3>>], "", [SDNPMemOperand]>,
  AMDGPURsrcIntrinsic<0>;
def int_amdgcn_raw_buffer_load_format : AMDGPURawBufferLoad<llvm_anyfloat_ty>;
def int_amdgcn_raw_buffer_load : AMDGPURawBufferLoad;

class AMDGPURawPtrBufferLoad<LLVMType data_ty = llvm_any_ty> : DefaultAttrsIntrinsic <
  [data_ty],
  [AMDGPUBufferRsrcTy,         // rsrc(SGPR)
   llvm_i32_ty,                 // offset(VGPR/imm, included in bounds checking and swizzling)
   llvm_i32_ty,                 // soffset(SGPR/imm, excluded from bounds checking and swizzling)
   llvm_i32_ty],                // auxiliary data (imm, cachepolicy (bit 0 = glc,
                                //                                   bit 1 = slc,
                                //                                   bit 2 = dlc on gfx10+),
                                //                      swizzled buffer (bit 3 = swz))
  [IntrArgMemOnly, IntrReadMem, ReadOnly<ArgIndex<0>>, NoCapture<ArgIndex<0>>,
  ImmArg<ArgIndex<3>>], "", [SDNPMemOperand]>,
  AMDGPURsrcIntrinsic<0>;
def int_amdgcn_raw_ptr_buffer_load_format : AMDGPURawPtrBufferLoad<llvm_anyfloat_ty>;
def int_amdgcn_raw_ptr_buffer_load : AMDGPURawPtrBufferLoad;

class AMDGPUStructBufferLoad<LLVMType data_ty = llvm_any_ty> : DefaultAttrsIntrinsic <
  [data_ty],
  [llvm_v4i32_ty,     // rsrc(SGPR)
   llvm_i32_ty,       // vindex(VGPR)
   llvm_i32_ty,       // offset(VGPR/imm, included in bounds checking and swizzling)
   llvm_i32_ty,       // soffset(SGPR/imm, excluded from bounds checking and swizzling)
   llvm_i32_ty],      // auxiliary data (imm, cachepolicy     (bit 0 = glc,
                      //                                       bit 1 = slc,
                      //                                       bit 2 = dlc on gfx10+),
                      //                      swizzled buffer (bit 3 = swz))
  [IntrReadMem, ImmArg<ArgIndex<4>>], "", [SDNPMemOperand]>,
  AMDGPURsrcIntrinsic<0>;
def int_amdgcn_struct_buffer_load_format : AMDGPUStructBufferLoad;
def int_amdgcn_struct_buffer_load : AMDGPUStructBufferLoad;

class AMDGPUStructPtrBufferLoad<LLVMType data_ty = llvm_any_ty> : DefaultAttrsIntrinsic <
  [data_ty],
  [AMDGPUBufferRsrcTy,          // rsrc(SGPR)
   llvm_i32_ty,                 // vindex(VGPR)
   llvm_i32_ty,                 // offset(VGPR/imm, included in bounds checking and swizzling)
   llvm_i32_ty,                 // soffset(SGPR/imm, excluded from bounds checking and swizzling)
   llvm_i32_ty],                // auxiliary data (imm, cachepolicy (bit 0 = glc,
                                //                                   bit 1 = slc,
                                //                                   bit 2 = dlc on gfx10+),
                                //                      swizzled buffer (bit 3 = swz))
  [IntrArgMemOnly, IntrReadMem, ReadOnly<ArgIndex<0>>, NoCapture<ArgIndex<0>>,
   ImmArg<ArgIndex<4>>], "", [SDNPMemOperand]>,
  AMDGPURsrcIntrinsic<0>;
def int_amdgcn_struct_ptr_buffer_load_format : AMDGPUStructPtrBufferLoad;
def int_amdgcn_struct_ptr_buffer_load : AMDGPUStructPtrBufferLoad;

class AMDGPURawBufferStore<LLVMType data_ty = llvm_any_ty> : DefaultAttrsIntrinsic <
  [],
  [data_ty,           // vdata(VGPR)
   llvm_v4i32_ty,     // rsrc(SGPR)
   llvm_i32_ty,       // offset(VGPR/imm, included in bounds checking and swizzling)
   llvm_i32_ty,       // soffset(SGPR/imm, excluded from bounds checking and swizzling)
   llvm_i32_ty],      // auxiliary data (imm, cachepolicy     (bit 0 = glc,
                      //                                       bit 1 = slc,
                      //                                       bit 2 = dlc on gfx10+),
                      //                      swizzled buffer (bit 3 = swz))
  [IntrWriteMem, ImmArg<ArgIndex<4>>], "", [SDNPMemOperand]>,
  AMDGPURsrcIntrinsic<1>;
def int_amdgcn_raw_buffer_store_format : AMDGPURawBufferStore<llvm_anyfloat_ty>;
def int_amdgcn_raw_buffer_store : AMDGPURawBufferStore;

class AMDGPURawPtrBufferStore<LLVMType data_ty = llvm_any_ty> : DefaultAttrsIntrinsic <
  [],
  [data_ty,                     // vdata(VGPR)
   AMDGPUBufferRsrcTy,          // rsrc(SGPR)
   llvm_i32_ty,                 // offset(VGPR/imm, included in bounds checking and swizzling)
   llvm_i32_ty,                 // soffset(SGPR/imm, excluded from bounds checking and swizzling)
   llvm_i32_ty],                // auxiliary data (imm, cachepolicy (bit 0 = glc,
                                //                                   bit 1 = slc,
                                //                                   bit 2 = dlc on gfx10+),
                                //                      swizzled buffer (bit 3 = swz))
  [IntrArgMemOnly, IntrWriteMem, WriteOnly<ArgIndex<1>>, NoCapture<ArgIndex<1>>,
  ImmArg<ArgIndex<4>>], "", [SDNPMemOperand]>,
  AMDGPURsrcIntrinsic<1>;
def int_amdgcn_raw_ptr_buffer_store_format : AMDGPURawPtrBufferStore<llvm_anyfloat_ty>;
def int_amdgcn_raw_ptr_buffer_store : AMDGPURawPtrBufferStore;

class AMDGPUStructBufferStore<LLVMType data_ty = llvm_any_ty> : DefaultAttrsIntrinsic <
  [],
  [data_ty,           // vdata(VGPR)
   llvm_v4i32_ty,     // rsrc(SGPR)
   llvm_i32_ty,       // vindex(VGPR)
   llvm_i32_ty,       // offset(VGPR/imm, included in bounds checking and swizzling)
   llvm_i32_ty,       // soffset(SGPR/imm, excluded from bounds checking and swizzling)
   llvm_i32_ty],      // auxiliary data (imm, cachepolicy     (bit 0 = glc,
                      //                                       bit 1 = slc,
                      //                                       bit 2 = dlc on gfx10+),
                      //                      swizzled buffer (bit 3 = swz))
  [IntrWriteMem, ImmArg<ArgIndex<5>>], "", [SDNPMemOperand]>,
  AMDGPURsrcIntrinsic<1>;
def int_amdgcn_struct_buffer_store_format : AMDGPUStructBufferStore;
def int_amdgcn_struct_buffer_store : AMDGPUStructBufferStore;

class AMDGPUStructPtrBufferStore<LLVMType data_ty = llvm_any_ty> : DefaultAttrsIntrinsic <
  [],
  [data_ty,                     // vdata(VGPR)
   AMDGPUBufferRsrcTy,          // rsrc(SGPR)
   llvm_i32_ty,                 // vindex(VGPR)
   llvm_i32_ty,                 // offset(VGPR/imm, included in bounds checking and swizzling)
   llvm_i32_ty,                 // soffset(SGPR/imm, excluded from bounds checking and swizzling)
   llvm_i32_ty],                // auxiliary data (imm, cachepolicy (bit 0 = glc,
                                //                                   bit 1 = slc,
                                //                                   bit 2 = dlc on gfx10+),
                                //                      swizzled buffer (bit 3 = swz))
  [IntrArgMemOnly, IntrWriteMem, WriteOnly<ArgIndex<1>>, NoCapture<ArgIndex<1>>,
   ImmArg<ArgIndex<5>>], "", [SDNPMemOperand]>,
  AMDGPURsrcIntrinsic<1>;
def int_amdgcn_struct_ptr_buffer_store_format : AMDGPUStructPtrBufferStore;
def int_amdgcn_struct_ptr_buffer_store : AMDGPUStructPtrBufferStore;

class AMDGPURawBufferAtomic<LLVMType data_ty = llvm_any_ty, bit NoRtn = false> : Intrinsic <
  !if(NoRtn, [], [data_ty]),
  [!if(NoRtn, data_ty, LLVMMatchType<0>),  // vdata(VGPR)
   llvm_v4i32_ty,     // rsrc(SGPR)
   llvm_i32_ty,       // offset(VGPR/imm, included in bounds checking and swizzling)
   llvm_i32_ty,       // soffset(SGPR/imm, excluded from bounds checking and swizzling)
   llvm_i32_ty],      // cachepolicy(imm; bit 1 = slc)
  [ImmArg<ArgIndex<4>>, IntrWillReturn, IntrNoCallback, IntrNoFree], "", [SDNPMemOperand]>,
  AMDGPURsrcIntrinsic<1, 0>;
def int_amdgcn_raw_buffer_atomic_swap : AMDGPURawBufferAtomic;
def int_amdgcn_raw_buffer_atomic_add : AMDGPURawBufferAtomic;
def int_amdgcn_raw_buffer_atomic_sub : AMDGPURawBufferAtomic;
def int_amdgcn_raw_buffer_atomic_smin : AMDGPURawBufferAtomic;
def int_amdgcn_raw_buffer_atomic_umin : AMDGPURawBufferAtomic;
def int_amdgcn_raw_buffer_atomic_fmin : AMDGPURawBufferAtomic<llvm_anyfloat_ty>;
def int_amdgcn_raw_buffer_atomic_smax : AMDGPURawBufferAtomic;
def int_amdgcn_raw_buffer_atomic_umax : AMDGPURawBufferAtomic;
def int_amdgcn_raw_buffer_atomic_fmax : AMDGPURawBufferAtomic<llvm_anyfloat_ty>;
def int_amdgcn_raw_buffer_atomic_and : AMDGPURawBufferAtomic;
def int_amdgcn_raw_buffer_atomic_or : AMDGPURawBufferAtomic;
def int_amdgcn_raw_buffer_atomic_xor : AMDGPURawBufferAtomic;
def int_amdgcn_raw_buffer_atomic_inc : AMDGPURawBufferAtomic;
def int_amdgcn_raw_buffer_atomic_dec : AMDGPURawBufferAtomic;
def int_amdgcn_raw_buffer_atomic_cmpswap : Intrinsic<
  [llvm_anyint_ty],
  [LLVMMatchType<0>,  // src(VGPR)
   LLVMMatchType<0>,  // cmp(VGPR)
   llvm_v4i32_ty,     // rsrc(SGPR)
   llvm_i32_ty,       // offset(VGPR/imm, included in bounds checking and swizzling)
   llvm_i32_ty,       // soffset(SGPR/imm, excluded from bounds checking and swizzling)
   llvm_i32_ty],      // cachepolicy(imm; bit 1 = slc)
  [ImmArg<ArgIndex<5>>, IntrWillReturn, IntrNoCallback, IntrNoFree], "", [SDNPMemOperand]>,
  AMDGPURsrcIntrinsic<2, 0>;

class AMDGPURawPtrBufferAtomic<LLVMType data_ty = llvm_any_ty, bit NoRtn = false> : Intrinsic <
  !if(NoRtn, [], [data_ty]),
  [!if(NoRtn, data_ty, LLVMMatchType<0>),  // vdata(VGPR)
   AMDGPUBufferRsrcTy,          // rsrc(SGPR)
   llvm_i32_ty,                 // offset(VGPR/imm, included in bounds checking and swizzling)
   llvm_i32_ty,                 // soffset(SGPR/imm, excluded from bounds checking and swizzling)
   llvm_i32_ty],                // cachepolicy(imm; bit 1 = slc)
  [IntrArgMemOnly, NoCapture<ArgIndex<1>>,
   ImmArg<ArgIndex<4>>, IntrWillReturn, IntrNoCallback, IntrNoFree], "", [SDNPMemOperand]>,
  AMDGPURsrcIntrinsic<1, 0>;

def int_amdgcn_raw_ptr_buffer_atomic_swap : AMDGPURawPtrBufferAtomic;
def int_amdgcn_raw_ptr_buffer_atomic_add : AMDGPURawPtrBufferAtomic;
def int_amdgcn_raw_ptr_buffer_atomic_sub : AMDGPURawPtrBufferAtomic;
def int_amdgcn_raw_ptr_buffer_atomic_smin : AMDGPURawPtrBufferAtomic;
def int_amdgcn_raw_ptr_buffer_atomic_umin : AMDGPURawPtrBufferAtomic;
def int_amdgcn_raw_ptr_buffer_atomic_fmin : AMDGPURawPtrBufferAtomic<llvm_anyfloat_ty>;
def int_amdgcn_raw_ptr_buffer_atomic_smax : AMDGPURawPtrBufferAtomic;
def int_amdgcn_raw_ptr_buffer_atomic_umax : AMDGPURawPtrBufferAtomic;
def int_amdgcn_raw_ptr_buffer_atomic_fmax : AMDGPURawPtrBufferAtomic<llvm_anyfloat_ty>;
def int_amdgcn_raw_ptr_buffer_atomic_and : AMDGPURawPtrBufferAtomic;
def int_amdgcn_raw_ptr_buffer_atomic_or : AMDGPURawPtrBufferAtomic;
def int_amdgcn_raw_ptr_buffer_atomic_xor : AMDGPURawPtrBufferAtomic;
def int_amdgcn_raw_ptr_buffer_atomic_inc : AMDGPURawPtrBufferAtomic;
def int_amdgcn_raw_ptr_buffer_atomic_dec : AMDGPURawPtrBufferAtomic;
def int_amdgcn_raw_ptr_buffer_atomic_cmpswap : Intrinsic<
  [llvm_anyint_ty],
  [LLVMMatchType<0>,  // src(VGPR)
   LLVMMatchType<0>,  // cmp(VGPR)
   AMDGPUBufferRsrcTy, // rsrc(SGPR)
   llvm_i32_ty,       // offset(VGPR/imm, included in bounds checking and swizzling)
   llvm_i32_ty,       // soffset(SGPR/imm, excluded from bounds checking and swizzling)
   llvm_i32_ty],      // cachepolicy(imm; bit 1 = slc)
  [IntrArgMemOnly, NoCapture<ArgIndex<2>>,
   ImmArg<ArgIndex<5>>, IntrWillReturn, IntrNoCallback, IntrNoFree], "", [SDNPMemOperand]>,
  AMDGPURsrcIntrinsic<2, 0>;

// gfx908 intrinsic
def int_amdgcn_raw_buffer_atomic_fadd : AMDGPURawBufferAtomic<llvm_anyfloat_ty>;
def int_amdgcn_raw_ptr_buffer_atomic_fadd : AMDGPURawPtrBufferAtomic<llvm_anyfloat_ty>;

class AMDGPUStructBufferAtomic<LLVMType data_ty = llvm_any_ty, bit NoRtn = false> : Intrinsic <
  !if(NoRtn, [], [data_ty]),
  [!if(NoRtn, data_ty, LLVMMatchType<0>),  // vdata(VGPR)
   llvm_v4i32_ty,     // rsrc(SGPR)
   llvm_i32_ty,       // vindex(VGPR)
   llvm_i32_ty,       // offset(VGPR/imm, included in bounds checking and swizzling)
   llvm_i32_ty,       // soffset(SGPR/imm, excluded from bounds checking and swizzling)
   llvm_i32_ty],      // cachepolicy(imm; bit 1 = slc)
  [ImmArg<ArgIndex<5>>, IntrWillReturn, IntrNoCallback, IntrNoFree], "", [SDNPMemOperand]>,
  AMDGPURsrcIntrinsic<1, 0>;
def int_amdgcn_struct_buffer_atomic_swap : AMDGPUStructBufferAtomic;
def int_amdgcn_struct_buffer_atomic_add : AMDGPUStructBufferAtomic;
def int_amdgcn_struct_buffer_atomic_sub : AMDGPUStructBufferAtomic;
def int_amdgcn_struct_buffer_atomic_smin : AMDGPUStructBufferAtomic;
def int_amdgcn_struct_buffer_atomic_umin : AMDGPUStructBufferAtomic;
def int_amdgcn_struct_buffer_atomic_smax : AMDGPUStructBufferAtomic;
def int_amdgcn_struct_buffer_atomic_umax : AMDGPUStructBufferAtomic;
def int_amdgcn_struct_buffer_atomic_and : AMDGPUStructBufferAtomic;
def int_amdgcn_struct_buffer_atomic_or : AMDGPUStructBufferAtomic;
def int_amdgcn_struct_buffer_atomic_xor : AMDGPUStructBufferAtomic;
def int_amdgcn_struct_buffer_atomic_inc : AMDGPUStructBufferAtomic;
def int_amdgcn_struct_buffer_atomic_dec : AMDGPUStructBufferAtomic;
def int_amdgcn_struct_buffer_atomic_cmpswap : Intrinsic<
  [llvm_anyint_ty],
  [LLVMMatchType<0>,  // src(VGPR)
   LLVMMatchType<0>,  // cmp(VGPR)
   llvm_v4i32_ty,     // rsrc(SGPR)
   llvm_i32_ty,       // vindex(VGPR)
   llvm_i32_ty,       // offset(VGPR/imm, included in bounds checking and swizzling)
   llvm_i32_ty,       // soffset(SGPR/imm, excluded from bounds checking and swizzling)
   llvm_i32_ty],      // cachepolicy(imm; bit 1 = slc)
  [ImmArg<ArgIndex<6>>, IntrWillReturn, IntrNoCallback, IntrNoFree], "", [SDNPMemOperand]>,
  AMDGPURsrcIntrinsic<2, 0>;

class AMDGPUStructPtrBufferAtomic<LLVMType data_ty = llvm_any_ty, bit NoRtn = false> : Intrinsic <
  !if(NoRtn, [], [data_ty]),
  [!if(NoRtn, data_ty, LLVMMatchType<0>),  // vdata(VGPR)
   AMDGPUBufferRsrcTy,          // rsrc(SGPR)
   llvm_i32_ty,                 // vindex(VGPR)
   llvm_i32_ty,                 // offset(VGPR/imm, included in bounds checking and swizzling)
   llvm_i32_ty,                 // soffset(SGPR/imm, excluded from bounds checking and swizzling)
   llvm_i32_ty],                // cachepolicy(imm; bit 1 = slc)
  [IntrArgMemOnly, NoCapture<ArgIndex<1>>,
   ImmArg<ArgIndex<5>>, IntrWillReturn, IntrNoCallback, IntrNoFree], "", [SDNPMemOperand]>,
  AMDGPURsrcIntrinsic<1, 0>;
def int_amdgcn_struct_ptr_buffer_atomic_swap : AMDGPUStructPtrBufferAtomic;
def int_amdgcn_struct_ptr_buffer_atomic_add : AMDGPUStructPtrBufferAtomic;
def int_amdgcn_struct_ptr_buffer_atomic_sub : AMDGPUStructPtrBufferAtomic;
def int_amdgcn_struct_ptr_buffer_atomic_smin : AMDGPUStructPtrBufferAtomic;
def int_amdgcn_struct_ptr_buffer_atomic_umin : AMDGPUStructPtrBufferAtomic;
def int_amdgcn_struct_ptr_buffer_atomic_smax : AMDGPUStructPtrBufferAtomic;
def int_amdgcn_struct_ptr_buffer_atomic_umax : AMDGPUStructPtrBufferAtomic;
def int_amdgcn_struct_ptr_buffer_atomic_and : AMDGPUStructPtrBufferAtomic;
def int_amdgcn_struct_ptr_buffer_atomic_or : AMDGPUStructPtrBufferAtomic;
def int_amdgcn_struct_ptr_buffer_atomic_xor : AMDGPUStructPtrBufferAtomic;
def int_amdgcn_struct_ptr_buffer_atomic_inc : AMDGPUStructPtrBufferAtomic;
def int_amdgcn_struct_ptr_buffer_atomic_dec : AMDGPUStructPtrBufferAtomic;
def int_amdgcn_struct_ptr_buffer_atomic_cmpswap : Intrinsic<
  [llvm_anyint_ty],
  [LLVMMatchType<0>,  // src(VGPR)
   LLVMMatchType<0>,  // cmp(VGPR)
   AMDGPUBufferRsrcTy, // rsrc(SGPR)
   llvm_i32_ty,       // vindex(VGPR)
   llvm_i32_ty,       // offset(VGPR/imm, included in bounds checking and swizzling)
   llvm_i32_ty,       // soffset(SGPR/imm, excluded from bounds checking and swizzling)
   llvm_i32_ty],      // cachepolicy(imm; bit 1 = slc)
  [IntrArgMemOnly, NoCapture<ArgIndex<2>>,
   ImmArg<ArgIndex<6>>, IntrWillReturn, IntrNoCallback, IntrNoFree], "", [SDNPMemOperand]>,
  AMDGPURsrcIntrinsic<2, 0>;

// gfx908 intrinsic
def int_amdgcn_struct_buffer_atomic_fadd : AMDGPUStructBufferAtomic<llvm_anyfloat_ty>;
def int_amdgcn_struct_ptr_buffer_atomic_fadd : AMDGPUStructPtrBufferAtomic<llvm_anyfloat_ty>;

// gfx90a intrinsics
def int_amdgcn_struct_buffer_atomic_fmin : AMDGPUStructBufferAtomic<llvm_anyfloat_ty>;
def int_amdgcn_struct_buffer_atomic_fmax : AMDGPUStructBufferAtomic<llvm_anyfloat_ty>;

def int_amdgcn_struct_ptr_buffer_atomic_fmin : AMDGPUStructPtrBufferAtomic<llvm_anyfloat_ty>;
def int_amdgcn_struct_ptr_buffer_atomic_fmax : AMDGPUStructPtrBufferAtomic<llvm_anyfloat_ty>;

// Obsolescent tbuffer intrinsics.
def int_amdgcn_tbuffer_load : DefaultAttrsIntrinsic <
    [llvm_any_ty],    // overloaded for types f32/i32, v2f32/v2i32, v4f32/v4i32
    [llvm_v4i32_ty,   // rsrc(SGPR)
     llvm_i32_ty,     // vindex(VGPR)
     llvm_i32_ty,     // voffset(VGPR)
     llvm_i32_ty,     // soffset(SGPR)
     llvm_i32_ty,     // offset(imm)
     llvm_i32_ty,     // dfmt(imm)
     llvm_i32_ty,     // nfmt(imm)
     llvm_i1_ty,     // glc(imm)
     llvm_i1_ty],    // slc(imm)
    [IntrReadMem,
     ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>, ImmArg<ArgIndex<6>>,
     ImmArg<ArgIndex<7>>, ImmArg<ArgIndex<8>>], "", [SDNPMemOperand]>,
  AMDGPURsrcIntrinsic<0>;

def int_amdgcn_tbuffer_store : DefaultAttrsIntrinsic <
    [],
    [llvm_any_ty,    // vdata(VGPR), overloaded for types f32/i32, v2f32/v2i32, v4f32/v4i32
     llvm_v4i32_ty,  // rsrc(SGPR)
     llvm_i32_ty,    // vindex(VGPR)
     llvm_i32_ty,    // voffset(VGPR)
     llvm_i32_ty,    // soffset(SGPR)
     llvm_i32_ty,    // offset(imm)
     llvm_i32_ty,    // dfmt(imm)
     llvm_i32_ty,    // nfmt(imm)
     llvm_i1_ty,     // glc(imm)
     llvm_i1_ty],    // slc(imm)
    [IntrWriteMem, ImmArg<ArgIndex<5>>,
     ImmArg<ArgIndex<6>>, ImmArg<ArgIndex<7>>,
     ImmArg<ArgIndex<8>>, ImmArg<ArgIndex<9>>], "", [SDNPMemOperand]>,
  AMDGPURsrcIntrinsic<1>;

// New tbuffer intrinsics, with:
// - raw and struct variants
// - joint format field
// - joint cachepolicy field
def int_amdgcn_raw_tbuffer_load : DefaultAttrsIntrinsic <
    [llvm_any_ty],    // overloaded for types f32/i32, v2f32/v2i32, v4f32/v4i32
    [llvm_v4i32_ty,   // rsrc(SGPR)
     llvm_i32_ty,     // offset(VGPR/imm, included in bounds checking and swizzling)
     llvm_i32_ty,     // soffset(SGPR/imm, excluded from bounds checking and swizzling)
     llvm_i32_ty,     // format(imm; bits 3..0 = dfmt, bits 6..4 = nfmt)
     llvm_i32_ty],    // auxiliary data (imm, cachepolicy     (bit 0 = glc,
                      //                                       bit 1 = slc,
                      //                                       bit 2 = dlc on gfx10+),
                      //                      swizzled buffer (bit 3 = swz))
    [IntrReadMem,
     ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>], "", [SDNPMemOperand]>,
  AMDGPURsrcIntrinsic<0>;

def int_amdgcn_raw_ptr_tbuffer_load : DefaultAttrsIntrinsic <
    [llvm_any_ty],      // overloaded for types f32/i32, v2f32/v2i32, v4f32/v4i32
    [AMDGPUBufferRsrcTy, // rsrc(SGPR)
     llvm_i32_ty,     // offset(VGPR/imm, included in bounds` checking and swizzling)
     llvm_i32_ty,     // soffset(SGPR/imm, excluded from bounds checking and swizzling)
     llvm_i32_ty,     // format(imm; bits 3..0 = dfmt, bits 6..4 = nfmt)
     llvm_i32_ty],    // auxiliary data (imm, cachepolicy     (bit 0 = glc,
                      //                                       bit 1 = slc,
                      //                                       bit 2 = dlc on gfx10+),
                      //                      swizzled buffer (bit 3 = swz))
    [IntrArgMemOnly, IntrReadMem, ReadOnly<ArgIndex<0>>, NoCapture<ArgIndex<0>>,
     ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>], "", [SDNPMemOperand]>,
  AMDGPURsrcIntrinsic<0>;

def int_amdgcn_raw_tbuffer_store : DefaultAttrsIntrinsic <
    [],
    [llvm_any_ty,    // vdata(VGPR), overloaded for types f32/i32, v2f32/v2i32, v4f32/v4i32
     llvm_v4i32_ty,  // rsrc(SGPR)
     llvm_i32_ty,    // offset(VGPR/imm, included in bounds checking and swizzling)
     llvm_i32_ty,    // soffset(SGPR/imm, excluded from bounds checking and swizzling)
     llvm_i32_ty,    // format(imm; bits 3..0 = dfmt, bits 6..4 = nfmt)
     llvm_i32_ty],   // auxiliary data (imm, cachepolicy     (bit 0 = glc,
                     //                                       bit 1 = slc,
                     //                                       bit 2 = dlc on gfx10+),
                     //                      swizzled buffer (bit 3 = swz))
    [IntrWriteMem,
     ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>], "", [SDNPMemOperand]>,
  AMDGPURsrcIntrinsic<1>;

def int_amdgcn_raw_ptr_tbuffer_store : DefaultAttrsIntrinsic <
    [],
    [llvm_any_ty,    // vdata(VGPR), overloaded for types f32/i32, v2f32/v2i32, v4f32/v4i32
     AMDGPUBufferRsrcTy, // rsrc(SGPR)
     llvm_i32_ty,    // offset(VGPR/imm, included in bounds checking and swizzling)
     llvm_i32_ty,    // soffset(SGPR/imm, excluded from bounds checking and swizzling)
     llvm_i32_ty,    // format(imm; bits 3..0 = dfmt, bits 6..4 = nfmt)
     llvm_i32_ty],   // auxiliary data (imm, cachepolicy     (bit 0 = glc,
                     //                                       bit 1 = slc,
                     //                                       bit 2 = dlc on gfx10+),
                     //                      swizzled buffer (bit 3 = swz))
    [IntrArgMemOnly, IntrWriteMem, WriteOnly<ArgIndex<1>>, NoCapture<ArgIndex<1>>,
     ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>], "", [SDNPMemOperand]>,
  AMDGPURsrcIntrinsic<1>;

def int_amdgcn_struct_tbuffer_load : DefaultAttrsIntrinsic <
    [llvm_any_ty],    // overloaded for types f32/i32, v2f32/v2i32, v4f32/v4i32
    [llvm_v4i32_ty,   // rsrc(SGPR)
     llvm_i32_ty,     // vindex(VGPR)
     llvm_i32_ty,     // offset(VGPR/imm, included in bounds checking and swizzling)
     llvm_i32_ty,     // soffset(SGPR/imm, excluded from bounds checking and swizzling)
     llvm_i32_ty,     // format(imm; bits 3..0 = dfmt, bits 6..4 = nfmt)
     llvm_i32_ty],    // auxiliary data (imm, cachepolicy     (bit 0 = glc,
                      //                                       bit 1 = slc,
                      //                                       bit 2 = dlc on gfx10+),
                      //                      swizzled buffer (bit 3 = swz))
    [IntrReadMem,
     ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>], "", [SDNPMemOperand]>,
  AMDGPURsrcIntrinsic<0>;

def int_amdgcn_struct_ptr_tbuffer_load : DefaultAttrsIntrinsic <
    [llvm_any_ty],    // overloaded for types f32/i32, v2f32/v2i32, v4f32/v4i32
    [AMDGPUBufferRsrcTy, // rsrc(SGPR)
     llvm_i32_ty,     // vindex(VGPR)
     llvm_i32_ty,     // offset(VGPR/imm, included in bounds checking and swizzling)
     llvm_i32_ty,     // soffset(SGPR/imm, excluded from bounds checking and swizzling)
     llvm_i32_ty,     // format(imm; bits 3..0 = dfmt, bits 6..4 = nfmt)
     llvm_i32_ty],    // auxiliary data (imm, cachepolicy     (bit 0 = glc,
                      //                                       bit 1 = slc,
                      //                                       bit 2 = dlc on gfx10+),
                      //                      swizzled buffer (bit 3 = swz))
    [IntrArgMemOnly, IntrReadMem, ReadOnly<ArgIndex<0>>, NoCapture<ArgIndex<0>>,
     ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>], "", [SDNPMemOperand]>,
  AMDGPURsrcIntrinsic<0>;

def int_amdgcn_struct_ptr_tbuffer_store : DefaultAttrsIntrinsic <
    [],
    [llvm_any_ty,    // vdata(VGPR), overloaded for types f32/i32, v2f32/v2i32, v4f32/v4i32
     AMDGPUBufferRsrcTy, // rsrc(SGPR)
     llvm_i32_ty,    // vindex(VGPR)
     llvm_i32_ty,    // offset(VGPR/imm, included in bounds checking and swizzling)
     llvm_i32_ty,    // soffset(SGPR/imm, excluded from bounds checking and swizzling)
     llvm_i32_ty,    // format(imm; bits 3..0 = dfmt, bits 6..4 = nfmt)
     llvm_i32_ty],   // auxiliary data (imm, cachepolicy     (bit 0 = glc,
                     //                                       bit 1 = slc,
                     //                                       bit 2 = dlc on gfx10+),
                     //                      swizzled buffer (bit 3 = swz))
    [IntrArgMemOnly, IntrWriteMem, WriteOnly<ArgIndex<1>>, NoCapture<ArgIndex<1>>,
     ImmArg<ArgIndex<5>>, ImmArg<ArgIndex<6>>], "", [SDNPMemOperand]>,
  AMDGPURsrcIntrinsic<1>;

def int_amdgcn_struct_tbuffer_store : DefaultAttrsIntrinsic <
    [],
    [llvm_any_ty,    // vdata(VGPR), overloaded for types f32/i32, v2f32/v2i32, v4f32/v4i32
     llvm_v4i32_ty,  // rsrc(SGPR)
     llvm_i32_ty,    // vindex(VGPR)
     llvm_i32_ty,    // offset(VGPR/imm, included in bounds checking and swizzling)
     llvm_i32_ty,    // soffset(SGPR/imm, excluded from bounds checking and swizzling)
     llvm_i32_ty,    // format(imm; bits 3..0 = dfmt, bits 6..4 = nfmt)
     llvm_i32_ty],   // auxiliary data (imm, cachepolicy     (bit 0 = glc,
                     //                                       bit 1 = slc,
                     //                                       bit 2 = dlc on gfx10+),
                     //                      swizzled buffer (bit 3 = swz))
    [IntrWriteMem,
     ImmArg<ArgIndex<5>>, ImmArg<ArgIndex<6>>], "", [SDNPMemOperand]>,
  AMDGPURsrcIntrinsic<1>;

class AMDGPUBufferAtomic : Intrinsic <
  [llvm_anyint_ty],
  [LLVMMatchType<0>,       // vdata(VGPR)
   llvm_v4i32_ty,     // rsrc(SGPR)
   llvm_i32_ty,       // vindex(VGPR)
   llvm_i32_ty,       // offset(SGPR/VGPR/imm)
   llvm_i1_ty],       // slc(imm)
  [ImmArg<ArgIndex<4>>, IntrWillReturn, IntrNoCallback, IntrNoFree], "", [SDNPMemOperand]>,
  AMDGPURsrcIntrinsic<1, 0>;
def int_amdgcn_buffer_atomic_swap : AMDGPUBufferAtomic;
def int_amdgcn_buffer_atomic_add : AMDGPUBufferAtomic;
def int_amdgcn_buffer_atomic_sub : AMDGPUBufferAtomic;
def int_amdgcn_buffer_atomic_smin : AMDGPUBufferAtomic;
def int_amdgcn_buffer_atomic_umin : AMDGPUBufferAtomic;
def int_amdgcn_buffer_atomic_smax : AMDGPUBufferAtomic;
def int_amdgcn_buffer_atomic_umax : AMDGPUBufferAtomic;
def int_amdgcn_buffer_atomic_and : AMDGPUBufferAtomic;
def int_amdgcn_buffer_atomic_or : AMDGPUBufferAtomic;
def int_amdgcn_buffer_atomic_xor : AMDGPUBufferAtomic;
def int_amdgcn_buffer_atomic_cmpswap : Intrinsic<
  [llvm_i32_ty],
  [llvm_i32_ty,       // src(VGPR)
   llvm_i32_ty,       // cmp(VGPR)
   llvm_v4i32_ty,     // rsrc(SGPR)
   llvm_i32_ty,       // vindex(VGPR)
   llvm_i32_ty,       // offset(SGPR/VGPR/imm)
   llvm_i1_ty],       // slc(imm)
  [ImmArg<ArgIndex<5>>, IntrWillReturn, IntrNoCallback, IntrNoFree], "", [SDNPMemOperand]>,
  AMDGPURsrcIntrinsic<2, 0>;

def int_amdgcn_buffer_atomic_csub : AMDGPUBufferAtomic;

class AMDGPUBufferAtomicFP : Intrinsic <
  [llvm_anyfloat_ty],
  [LLVMMatchType<0>, // vdata(VGPR)
   llvm_v4i32_ty,    // rsrc(SGPR)
   llvm_i32_ty,      // vindex(VGPR)
   llvm_i32_ty,      // offset(SGPR/VGPR/imm)
   llvm_i1_ty],      // slc(imm)
  [ImmArg<ArgIndex<4>>, IntrWillReturn, IntrNoCallback, IntrNoFree], "", [SDNPMemOperand]>,
  AMDGPURsrcIntrinsic<1, 0>;

// Legacy form of the intrinsic. raw and struct forms should be preferred.
def int_amdgcn_buffer_atomic_fadd : AMDGPUBufferAtomicFP;

class AMDGPURawBufferLoadLDS : Intrinsic <
  [],
  [llvm_v4i32_ty,                      // rsrc(SGPR)
   LLVMQualPointerType<3>,             // LDS base offset
   llvm_i32_ty,                        // Data byte size: 1/2/4
   llvm_i32_ty,                        // voffset(VGPR, included in bounds checking and swizzling)
   llvm_i32_ty,                        // soffset(SGPR/imm, excluded from bounds checking and swizzling)
   llvm_i32_ty,                        // imm offset(imm, included in bounds checking and swizzling)
   llvm_i32_ty],                       // auxiliary data (imm, cachepolicy     (bit 0 = glc,
                                       //                                       bit 1 = slc,
                                       //                                       bit 2 = dlc on gfx10+))
                                       //                      swizzled buffer (bit 3 = swz))
  [IntrWillReturn, NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<5>>,
   ImmArg<ArgIndex<6>>, IntrNoCallback, IntrNoFree], "", [SDNPMemOperand]>, AMDGPURsrcIntrinsic<0>;
def int_amdgcn_raw_buffer_load_lds : AMDGPURawBufferLoadLDS;

class AMDGPURawPtrBufferLoadLDS : Intrinsic <
  [],
  [AMDGPUBufferRsrcTy,                 // rsrc(SGPR)
   LLVMQualPointerType<3>,             // LDS base offset
   llvm_i32_ty,                        // Data byte size: 1/2/4
   llvm_i32_ty,                        // voffset(VGPR, included in bounds checking and swizzling)
   llvm_i32_ty,                        // soffset(SGPR/imm, excluded from bounds checking and swizzling)
   llvm_i32_ty,                        // imm offset(imm, included in bounds checking and swizzling)
   llvm_i32_ty],                       // auxiliary data (imm, cachepolicy     (bit 0 = glc,
                                       //                                       bit 1 = slc,
                                       //                                       bit 2 = dlc on gfx10+))
                                       //                      swizzled buffer (bit 3 = swz))
  [IntrWillReturn, IntrArgMemOnly,
   ReadOnly<ArgIndex<0>>, NoCapture<ArgIndex<0>>,
   WriteOnly<ArgIndex<1>>, NoCapture<ArgIndex<1>>,
   ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<5>>,
   ImmArg<ArgIndex<6>>, IntrNoCallback, IntrNoFree], "", [SDNPMemOperand]>, AMDGPURsrcIntrinsic<0>;
def int_amdgcn_raw_ptr_buffer_load_lds : AMDGPURawPtrBufferLoadLDS;

class AMDGPUStructBufferLoadLDS : Intrinsic <
  [],
  [llvm_v4i32_ty,                      // rsrc(SGPR)
   LLVMQualPointerType<3>,             // LDS base offset
   llvm_i32_ty,                        // Data byte size: 1/2/4
   llvm_i32_ty,                        // vindex(VGPR)
   llvm_i32_ty,                        // voffset(VGPR, included in bounds checking and swizzling)
   llvm_i32_ty,                        // soffset(SGPR/imm, excluded from bounds checking and swizzling)
   llvm_i32_ty,                        // imm offset(imm, included in bounds checking and swizzling)
   llvm_i32_ty],                       // auxiliary data (imm, cachepolicy     (bit 0 = glc,
                                       //                                       bit 1 = slc,
                                       //                                       bit 2 = dlc on gfx10+))
                                       //                      swizzled buffer (bit 3 = swz))
  [IntrWillReturn, NoCapture<ArgIndex<1>>, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<6>>,
   ImmArg<ArgIndex<7>>, IntrNoCallback, IntrNoFree], "", [SDNPMemOperand]>, AMDGPURsrcIntrinsic<0>;
def int_amdgcn_struct_buffer_load_lds : AMDGPUStructBufferLoadLDS;

class AMDGPUStructPtrBufferLoadLDS : Intrinsic <
  [],
  [AMDGPUBufferRsrcTy,                 // rsrc(SGPR)
   LLVMQualPointerType<3> ,            // LDS base offset
   llvm_i32_ty,                        // Data byte size: 1/2/4
   llvm_i32_ty,                        // vindex(VGPR)
   llvm_i32_ty,                        // voffset(VGPR, included in bounds checking and swizzling)
   llvm_i32_ty,                        // soffset(SGPR/imm, excluded from bounds checking and swizzling)
   llvm_i32_ty,                        // imm offset(imm, included in bounds checking and swizzling)
   llvm_i32_ty],                       // auxiliary data (imm, cachepolicy     (bit 0 = glc,
                                       //                                       bit 1 = slc,
                                       //                                       bit 2 = dlc on gfx10+))
                                       //                      swizzled buffer (bit 3 = swz))
  [IntrWillReturn, IntrArgMemOnly,
   ReadOnly<ArgIndex<0>>, NoCapture<ArgIndex<0>>,
   WriteOnly<ArgIndex<1>>, NoCapture<ArgIndex<1>>,
   ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<6>>,
   ImmArg<ArgIndex<7>>, IntrNoCallback, IntrNoFree], "", [SDNPMemOperand]>, AMDGPURsrcIntrinsic<0>;
def int_amdgcn_struct_ptr_buffer_load_lds : AMDGPUStructPtrBufferLoadLDS;

} // defset AMDGPUBufferIntrinsics

// Uses that do not set the done bit should set IntrWriteMem on the
// call site.
def int_amdgcn_exp : DefaultAttrsIntrinsic <[], [
  llvm_i32_ty,       // tgt,
  llvm_i32_ty,       // en
  llvm_any_ty,       // src0 (f32 or i32)
  LLVMMatchType<0>,  // src1
  LLVMMatchType<0>,  // src2
  LLVMMatchType<0>,  // src3
  llvm_i1_ty,        // done
  llvm_i1_ty         // vm (ignored on GFX11+)
  ],
  [ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<6>>,
   ImmArg<ArgIndex<7>>, IntrWriteMem, IntrInaccessibleMemOnly]
>;

// exp with row_en bit set. Only supported on GFX11+.
def int_amdgcn_exp_row : DefaultAttrsIntrinsic <[], [
  llvm_i32_ty,       // tgt,
  llvm_i32_ty,       // en
  llvm_any_ty,       // src0 (f32 or i32)
  LLVMMatchType<0>,  // src1
  LLVMMatchType<0>,  // src2
  LLVMMatchType<0>,  // src3
  llvm_i1_ty,        // done
  llvm_i32_ty],      // row number
  [ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<6>>,
   IntrWriteMem, IntrInaccessibleMemOnly]
>;

// exp with compr bit set. Not supported on GFX11+.
def int_amdgcn_exp_compr : DefaultAttrsIntrinsic <[], [
  llvm_i32_ty,       // tgt,
  llvm_i32_ty,       // en
  llvm_anyvector_ty, // src0 (v2f16 or v2i16)
  LLVMMatchType<0>,  // src1
  llvm_i1_ty,        // done
  llvm_i1_ty],       // vm
  [ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<4>>,
   ImmArg<ArgIndex<5>>, IntrWriteMem, IntrInaccessibleMemOnly]
>;

def int_amdgcn_buffer_wbinvl1_sc :
  ClangBuiltin<"__builtin_amdgcn_buffer_wbinvl1_sc">,
  DefaultAttrsIntrinsic<[], [], [IntrNoMem, IntrHasSideEffects]>;

def int_amdgcn_buffer_wbinvl1 :
  ClangBuiltin<"__builtin_amdgcn_buffer_wbinvl1">,
  DefaultAttrsIntrinsic<[], [], [IntrNoMem, IntrHasSideEffects]>;

def int_amdgcn_s_dcache_inv :
  ClangBuiltin<"__builtin_amdgcn_s_dcache_inv">,
  DefaultAttrsIntrinsic<[], [], [IntrNoMem, IntrHasSideEffects]>;

def int_amdgcn_s_memtime :
  ClangBuiltin<"__builtin_amdgcn_s_memtime">,
  DefaultAttrsIntrinsic<[llvm_i64_ty], [], [IntrNoMem, IntrHasSideEffects]>;

def int_amdgcn_s_sleep :
  ClangBuiltin<"__builtin_amdgcn_s_sleep">,
  DefaultAttrsIntrinsic<[], [llvm_i32_ty], [ImmArg<ArgIndex<0>>, IntrNoMem,
                                IntrHasSideEffects]> {
}

def int_amdgcn_s_incperflevel :
  ClangBuiltin<"__builtin_amdgcn_s_incperflevel">,
  DefaultAttrsIntrinsic<[], [llvm_i32_ty], [ImmArg<ArgIndex<0>>, IntrNoMem,
                                IntrHasSideEffects]> {
}

def int_amdgcn_s_decperflevel :
  ClangBuiltin<"__builtin_amdgcn_s_decperflevel">,
  DefaultAttrsIntrinsic<[], [llvm_i32_ty], [ImmArg<ArgIndex<0>>, IntrNoMem,
                                IntrHasSideEffects]> {
}

def int_amdgcn_s_sethalt :
  DefaultAttrsIntrinsic<[], [llvm_i32_ty], [ImmArg<ArgIndex<0>>, IntrNoMem,
                                IntrHasSideEffects]>;

def int_amdgcn_s_setprio :
  ClangBuiltin<"__builtin_amdgcn_s_setprio">,
  DefaultAttrsIntrinsic<[], [llvm_i16_ty], [ImmArg<ArgIndex<0>>, IntrNoMem,
                                IntrHasSideEffects]>;

// This is IntrHasSideEffects so it can be used to read cycle counters.
def int_amdgcn_s_getreg :
  ClangBuiltin<"__builtin_amdgcn_s_getreg">,
  DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty],
  [IntrNoMem, IntrHasSideEffects, ImmArg<ArgIndex<0>>]
>;

// Note this can be used to set FP environment properties that are
// unsafe to change in non-strictfp functions. The register properties
// available (and value required to access them) may differ per
// subtarget. llvm.amdgcn.s.setreg(hwmode, value)
def int_amdgcn_s_setreg :
  ClangBuiltin<"__builtin_amdgcn_s_setreg">,
  DefaultAttrsIntrinsic<[], [llvm_i32_ty, llvm_i32_ty],
  [IntrNoMem, IntrHasSideEffects, ImmArg<ArgIndex<0>>]
>;

// int_amdgcn_s_getpc is provided to allow a specific style of position
// independent code to determine the high part of its address when it is
// known (through convention) that the code and any data of interest does
// not cross a 4Gb address boundary. Use for any other purpose may not
// produce the desired results as optimizations may cause code movement,
// especially as we explicitly use IntrNoMem to allow optimizations.
def int_amdgcn_s_getpc :
  ClangBuiltin<"__builtin_amdgcn_s_getpc">,
  DefaultAttrsIntrinsic<[llvm_i64_ty], [], [IntrNoMem, IntrSpeculatable,
                                IntrWillReturn]>;

// __builtin_amdgcn_interp_mov <param>, <attr_chan>, <attr>, <m0>
// param values: 0 = P10, 1 = P20, 2 = P0
def int_amdgcn_interp_mov :
  ClangBuiltin<"__builtin_amdgcn_interp_mov">,
  DefaultAttrsIntrinsic<[llvm_float_ty],
            [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
            [IntrNoMem, IntrSpeculatable,
              ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>]>;

// __builtin_amdgcn_interp_p1 <i>, <attr_chan>, <attr>, <m0>
// This intrinsic reads from lds, but the memory values are constant,
// so it behaves like IntrNoMem.
def int_amdgcn_interp_p1 :
  ClangBuiltin<"__builtin_amdgcn_interp_p1">,
  DefaultAttrsIntrinsic<[llvm_float_ty],
            [llvm_float_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
            [IntrNoMem, IntrSpeculatable,
             ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>]>;

// __builtin_amdgcn_interp_p2 <p1>, <j>, <attr_chan>, <attr>, <m0>
def int_amdgcn_interp_p2 :
  ClangBuiltin<"__builtin_amdgcn_interp_p2">,
  DefaultAttrsIntrinsic<[llvm_float_ty],
            [llvm_float_ty, llvm_float_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
            [IntrNoMem, IntrSpeculatable,
             ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>]>;
          // See int_amdgcn_v_interp_p1 for why this is IntrNoMem.

// __builtin_amdgcn_interp_p1_f16 <i>, <attr_chan>, <attr>, <high>, <m0>
// high selects whether high or low 16-bits are loaded from LDS
def int_amdgcn_interp_p1_f16 :
  ClangBuiltin<"__builtin_amdgcn_interp_p1_f16">,
  DefaultAttrsIntrinsic<[llvm_float_ty],
            [llvm_float_ty, llvm_i32_ty, llvm_i32_ty, llvm_i1_ty, llvm_i32_ty],
            [IntrNoMem, IntrSpeculatable,
             ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>]>;

// __builtin_amdgcn_interp_p2_f16 <p1>, <j>, <attr_chan>, <attr>, <high>, <m0>
// high selects whether high or low 16-bits are loaded from LDS
def int_amdgcn_interp_p2_f16 :
  ClangBuiltin<"__builtin_amdgcn_interp_p2_f16">,
  DefaultAttrsIntrinsic<[llvm_half_ty],
            [llvm_float_ty, llvm_float_ty, llvm_i32_ty, llvm_i32_ty, llvm_i1_ty, llvm_i32_ty],
            [IntrNoMem, IntrSpeculatable,
             ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>]>;

// llvm.amdgcn.lds.direct.load <m0>
// The input argument is m0, which contains a packed combination of address
// offset and flags describing the data type.
def int_amdgcn_lds_direct_load :
  DefaultAttrsIntrinsic<[llvm_any_ty], // overloaded for types u8, u16, i32/f32, i8, i16
            [llvm_i32_ty],
            [IntrReadMem, IntrSpeculatable]>;

// llvm.amdgcn.lds.param.load <attr_chan>, <attr>, <m0>
// Like interp intrinsics, this reads from lds, but the memory values are constant,
// so it behaves like IntrNoMem.
def int_amdgcn_lds_param_load :
  DefaultAttrsIntrinsic<[llvm_float_ty],
            [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
            [IntrNoMem, IntrSpeculatable,
             ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>]>;

// llvm.amdgcn.interp.inreg.p10 <p>, <i>, <p0>
def int_amdgcn_interp_inreg_p10 :
  DefaultAttrsIntrinsic<[llvm_float_ty],
            [llvm_float_ty, llvm_float_ty, llvm_float_ty],
            [IntrNoMem, IntrSpeculatable]>;

// llvm.amdgcn.interp.inreg.p2 <p>, <j>, <tmp>
def int_amdgcn_interp_inreg_p2 :
  DefaultAttrsIntrinsic<[llvm_float_ty],
            [llvm_float_ty, llvm_float_ty, llvm_float_ty],
            [IntrNoMem, IntrSpeculatable]>;

// llvm.amdgcn.interp.inreg.p10.f16 <p>, <i>, <p0>, <high>
// high selects whether high or low 16-bits are used for p and p0 operands
def int_amdgcn_interp_inreg_p10_f16:
  DefaultAttrsIntrinsic<[llvm_float_ty],
            [llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_i1_ty],
            [IntrNoMem, IntrSpeculatable,
             ImmArg<ArgIndex<3>>]>;

// llvm.amdgcn.interp.inreg.p2.f16 <p>, <j>, <tmp>, <high>
// high selects whether high or low 16-bits are used for p operand
def int_amdgcn_interp_inreg_p2_f16 :
  DefaultAttrsIntrinsic<[llvm_half_ty],
            [llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_i1_ty],
            [IntrNoMem, IntrSpeculatable,
             ImmArg<ArgIndex<3>>]>;

// Deprecated: use llvm.amdgcn.live.mask instead.
def int_amdgcn_ps_live : DefaultAttrsIntrinsic <
  [llvm_i1_ty],
  [],
  [IntrNoMem]>;

// Query currently live lanes.
// Returns true if lane is live (and not a helper lane).
def int_amdgcn_live_mask : DefaultAttrsIntrinsic <[llvm_i1_ty],
  [], [IntrReadMem, IntrInaccessibleMemOnly]
>;

def int_amdgcn_mbcnt_lo :
  ClangBuiltin<"__builtin_amdgcn_mbcnt_lo">,
  DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
   [IntrNoMem]>;

def int_amdgcn_mbcnt_hi :
  ClangBuiltin<"__builtin_amdgcn_mbcnt_hi">,
  DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
            [IntrNoMem]>;

// llvm.amdgcn.ds.swizzle src offset
def int_amdgcn_ds_swizzle :
  ClangBuiltin<"__builtin_amdgcn_ds_swizzle">,
  Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
            [IntrNoMem, IntrConvergent, IntrWillReturn, IntrNoCallback, IntrNoFree,
             ImmArg<ArgIndex<1>>]>;

def int_amdgcn_ubfe : DefaultAttrsIntrinsic<[llvm_anyint_ty],
    [LLVMMatchType<0>, llvm_i32_ty, llvm_i32_ty],
    [IntrNoMem, IntrSpeculatable]
>;

def int_amdgcn_sbfe : DefaultAttrsIntrinsic<[llvm_anyint_ty],
    [LLVMMatchType<0>, llvm_i32_ty, llvm_i32_ty],
    [IntrNoMem, IntrSpeculatable]
>;

def int_amdgcn_lerp :
  ClangBuiltin<"__builtin_amdgcn_lerp">,
  DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
  [IntrNoMem, IntrSpeculatable]
>;

def int_amdgcn_sad_u8 :
  ClangBuiltin<"__builtin_amdgcn_sad_u8">,
  DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
  [IntrNoMem, IntrSpeculatable]
>;

def int_amdgcn_msad_u8 :
  ClangBuiltin<"__builtin_amdgcn_msad_u8">,
  DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
  [IntrNoMem, IntrSpeculatable]
>;

def int_amdgcn_sad_hi_u8 :
  ClangBuiltin<"__builtin_amdgcn_sad_hi_u8">,
  DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
  [IntrNoMem, IntrSpeculatable]
>;

def int_amdgcn_sad_u16 :
  ClangBuiltin<"__builtin_amdgcn_sad_u16">,
  DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
  [IntrNoMem, IntrSpeculatable]
>;

def int_amdgcn_qsad_pk_u16_u8 :
  ClangBuiltin<"__builtin_amdgcn_qsad_pk_u16_u8">,
  DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty, llvm_i64_ty],
  [IntrNoMem, IntrSpeculatable]
>;

def int_amdgcn_mqsad_pk_u16_u8 :
  ClangBuiltin<"__builtin_amdgcn_mqsad_pk_u16_u8">,
  DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty, llvm_i64_ty],
  [IntrNoMem, IntrSpeculatable]
>;

def int_amdgcn_mqsad_u32_u8 :
  ClangBuiltin<"__builtin_amdgcn_mqsad_u32_u8">,
  DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_i64_ty, llvm_i32_ty, llvm_v4i32_ty],
  [IntrNoMem, IntrSpeculatable]
>;

def int_amdgcn_cvt_pk_u8_f32 :
  ClangBuiltin<"__builtin_amdgcn_cvt_pk_u8_f32">,
  DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_float_ty, llvm_i32_ty, llvm_i32_ty],
  [IntrNoMem, IntrSpeculatable]
>;

def int_amdgcn_icmp :
  Intrinsic<[llvm_anyint_ty], [llvm_anyint_ty, LLVMMatchType<1>, llvm_i32_ty],
            [IntrNoMem, IntrConvergent,
             ImmArg<ArgIndex<2>>, IntrWillReturn, IntrNoCallback, IntrNoFree]>;

def int_amdgcn_fcmp :
  Intrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty, LLVMMatchType<1>, llvm_i32_ty],
            [IntrNoMem, IntrConvergent,
             ImmArg<ArgIndex<2>>, IntrWillReturn, IntrNoCallback, IntrNoFree]>;

def int_amdgcn_ballot :
  Intrinsic<[llvm_anyint_ty], [llvm_i1_ty],
            [IntrNoMem, IntrConvergent, IntrWillReturn, IntrNoCallback, IntrNoFree]>;

def int_amdgcn_inverse_ballot :
  Intrinsic<[llvm_i1_ty], [llvm_anyint_ty],
            [IntrNoMem, IntrConvergent, IntrWillReturn, IntrNoCallback, IntrNoFree]>;

class AMDGPUWaveReduce<LLVMType data_ty = llvm_anyint_ty> : Intrinsic<
    [data_ty], 
    [
      LLVMMatchType<0>,   // llvm value to reduce (SGPR/VGPR)
      llvm_i32_ty         // Reduction Strategy Switch for lowering ( 0: Default,
                          //                                          1: Iterative strategy, and
                          //                                          2. DPP)
    ],
    [IntrNoMem, IntrConvergent, IntrWillReturn, IntrNoCallback, IntrNoFree, ImmArg<ArgIndex<1>>]>;

def int_amdgcn_wave_reduce_umin : AMDGPUWaveReduce;
def int_amdgcn_wave_reduce_umax : AMDGPUWaveReduce;

def int_amdgcn_readfirstlane :
  ClangBuiltin<"__builtin_amdgcn_readfirstlane">,
  Intrinsic<[llvm_i32_ty], [llvm_i32_ty],
            [IntrNoMem, IntrConvergent, IntrWillReturn, IntrNoCallback, IntrNoFree]>;

// The lane argument must be uniform across the currently active threads of the
// current wave. Otherwise, the result is undefined.
def int_amdgcn_readlane :
  ClangBuiltin<"__builtin_amdgcn_readlane">,
  Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
            [IntrNoMem, IntrConvergent, IntrWillReturn, IntrNoCallback, IntrNoFree]>;

// The value to write and lane select arguments must be uniform across the
// currently active threads of the current wave. Otherwise, the result is
// undefined.
def int_amdgcn_writelane :
  ClangBuiltin<"__builtin_amdgcn_writelane">,
  Intrinsic<[llvm_i32_ty], [
    llvm_i32_ty,    // uniform value to write: returned by the selected lane
    llvm_i32_ty,    // uniform lane select
    llvm_i32_ty     // returned by all lanes other than the selected one
  ],
  [IntrNoMem, IntrConvergent, IntrWillReturn, IntrNoCallback, IntrNoFree]
>;

def int_amdgcn_alignbyte : ClangBuiltin<"__builtin_amdgcn_alignbyte">,
  DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
  [IntrNoMem, IntrSpeculatable]
>;

def int_amdgcn_mul_i24 : DefaultAttrsIntrinsic<[llvm_i32_ty],
  [llvm_i32_ty, llvm_i32_ty],
  [IntrNoMem, IntrSpeculatable]
>;

def int_amdgcn_mul_u24 : DefaultAttrsIntrinsic<[llvm_i32_ty],
  [llvm_i32_ty, llvm_i32_ty],
  [IntrNoMem, IntrSpeculatable]
>;

def int_amdgcn_mulhi_i24 : DefaultAttrsIntrinsic<[llvm_i32_ty],
  [llvm_i32_ty, llvm_i32_ty],
  [IntrNoMem, IntrSpeculatable]
>;

def int_amdgcn_mulhi_u24 : DefaultAttrsIntrinsic<[llvm_i32_ty],
  [llvm_i32_ty, llvm_i32_ty],
  [IntrNoMem, IntrSpeculatable]
>;

// llvm.amdgcn.ds.gws.init(i32 bar_val, i32 resource_id)
//
// bar_val is the total number of waves that will wait on this
// barrier, minus 1.
def int_amdgcn_ds_gws_init :
  ClangBuiltin<"__builtin_amdgcn_ds_gws_init">,
  Intrinsic<[],
  [llvm_i32_ty, llvm_i32_ty],
  [IntrConvergent, IntrWriteMem,
   IntrInaccessibleMemOnly, IntrWillReturn, IntrNoCallback, IntrNoFree], "",
  [SDNPMemOperand]
>;

// llvm.amdgcn.ds.gws.barrier(i32 vsrc0, i32 resource_id)
// bar_val is the total number of waves that will wait on this
// barrier, minus 1.
def int_amdgcn_ds_gws_barrier :
  ClangBuiltin<"__builtin_amdgcn_ds_gws_barrier">,
  Intrinsic<[],
  [llvm_i32_ty, llvm_i32_ty],
  [IntrConvergent, IntrInaccessibleMemOnly, IntrWillReturn, IntrNoCallback, IntrNoFree], "",
  [SDNPMemOperand]
>;

// llvm.amdgcn.ds.gws.sema.v(i32 resource_id)
def int_amdgcn_ds_gws_sema_v :
  ClangBuiltin<"__builtin_amdgcn_ds_gws_sema_v">,
  Intrinsic<[],
  [llvm_i32_ty],
  [IntrConvergent, IntrInaccessibleMemOnly, IntrWillReturn, IntrNoCallback, IntrNoFree], "",
  [SDNPMemOperand]
>;

// llvm.amdgcn.ds.gws.sema.br(i32 vsrc, i32 resource_id)
def int_amdgcn_ds_gws_sema_br :
  ClangBuiltin<"__builtin_amdgcn_ds_gws_sema_br">,
  Intrinsic<[],
  [llvm_i32_ty, llvm_i32_ty],
  [IntrConvergent, IntrInaccessibleMemOnly, IntrWillReturn, IntrNoCallback, IntrNoFree], "",
  [SDNPMemOperand]
>;

// llvm.amdgcn.ds.gws.sema.p(i32 resource_id)
def int_amdgcn_ds_gws_sema_p :
  ClangBuiltin<"__builtin_amdgcn_ds_gws_sema_p">,
  Intrinsic<[],
  [llvm_i32_ty],
  [IntrConvergent, IntrInaccessibleMemOnly, IntrWillReturn, IntrNoCallback, IntrNoFree], "",
  [SDNPMemOperand]
>;

// llvm.amdgcn.ds.gws.sema.release.all(i32 resource_id)
def int_amdgcn_ds_gws_sema_release_all :
  ClangBuiltin<"__builtin_amdgcn_ds_gws_sema_release_all">,
  Intrinsic<[],
  [llvm_i32_ty],
  [IntrConvergent, IntrInaccessibleMemOnly, IntrWillReturn, IntrNoCallback, IntrNoFree], "",
  [SDNPMemOperand]
>;


// Copies the source value to the destination value, with the guarantee that
// the source value is computed as if the entire program were executed in WQM.
def int_amdgcn_wqm : Intrinsic<[llvm_any_ty],
  [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable, IntrWillReturn, IntrNoCallback, IntrNoFree]
>;

// Copies the source value to the destination value, such that the source
// is computed as if the entire program were executed in WQM if any other
// program code executes in WQM.
def int_amdgcn_softwqm : Intrinsic<[llvm_any_ty],
  [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable, IntrWillReturn, IntrNoCallback, IntrNoFree]
>;

// Return true if at least one thread within the pixel quad passes true into
// the function.
def int_amdgcn_wqm_vote : Intrinsic<[llvm_i1_ty],
  [llvm_i1_ty], [IntrNoMem, IntrConvergent, IntrWillReturn, IntrNoCallback, IntrNoFree]
>;

// If false, set EXEC=0 for the current thread until the end of program.
// FIXME: Should this be IntrNoMem, IntrHasSideEffects, or IntrWillReturn?
def int_amdgcn_kill : Intrinsic<[], [llvm_i1_ty], [IntrNoCallback, IntrNoFree]>;

def int_amdgcn_endpgm : ClangBuiltin<"__builtin_amdgcn_endpgm">,
  Intrinsic<[], [], [IntrNoReturn, IntrCold, IntrNoMem, IntrHasSideEffects, IntrNoCallback, IntrNoFree]
>;

// If false, mark all active lanes as helper lanes until the end of program.
def int_amdgcn_wqm_demote : Intrinsic<[],
  [llvm_i1_ty], [IntrWriteMem, IntrInaccessibleMemOnly, IntrNoCallback, IntrNoFree]
>;

// Copies the active channels of the source value to the destination value,
// with the guarantee that the source value is computed as if the entire
// program were executed in Whole Wavefront Mode, i.e. with all channels
// enabled, with a few exceptions: - Phi nodes which require WWM return an
// undefined value.
def int_amdgcn_strict_wwm : Intrinsic<[llvm_any_ty],
  [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable,
                       IntrConvergent, IntrWillReturn, IntrNoCallback, IntrNoFree]
>;
// Deprecated. Use int_amdgcn_strict_wwm instead.
def int_amdgcn_wwm : Intrinsic<[llvm_any_ty],
  [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable,
                       IntrConvergent, IntrWillReturn, IntrNoCallback, IntrNoFree]
>;
def int_amdgcn_strict_wqm : Intrinsic<[llvm_any_ty],
  [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable,
                       IntrConvergent, IntrWillReturn, IntrNoCallback, IntrNoFree]
>;

// Given a value, copies it while setting all the inactive lanes to a given
// value. Note that OpenGL helper lanes are considered active, so if the
// program ever uses WQM, then the instruction and the first source will be
// computed in WQM.
def int_amdgcn_set_inactive :
  Intrinsic<[llvm_anyint_ty],
            [LLVMMatchType<0>, // value to be copied
             LLVMMatchType<0>], // value for the inactive lanes to take
            [IntrNoMem, IntrConvergent, IntrWillReturn, IntrNoCallback, IntrNoFree]>;

// Return if the given flat pointer points to a local memory address.
def int_amdgcn_is_shared : ClangBuiltin<"__builtin_amdgcn_is_shared">,
  DefaultAttrsIntrinsic<[llvm_i1_ty], [llvm_ptr_ty],
  [IntrNoMem, IntrSpeculatable, NoCapture<ArgIndex<0>>]
>;

// Return if the given flat pointer points to a prvate memory address.
def int_amdgcn_is_private : ClangBuiltin<"__builtin_amdgcn_is_private">,
  DefaultAttrsIntrinsic<[llvm_i1_ty], [llvm_ptr_ty],
  [IntrNoMem, IntrSpeculatable, NoCapture<ArgIndex<0>>]
>;

// A uniform tail call to a function with the `amdgpu_cs_chain` or
// `amdgpu_cs_chain_preserve` calling convention. It will populate the SGPRs
// starting at s0 and the VGPRs starting at v8, set EXEC and perform a jump to
// the given function.
// Can only be used in functions with the `amdgpu_cs`, `amdgpu_cs_chain` or
// `amdgpu_cs_chain_preserve` calling conventions, and only in uniform control
// flow.
def int_amdgcn_cs_chain:
  Intrinsic<[],
            [llvm_anyptr_ty, // The function to jump to.
             llvm_anyint_ty, // Value to put in EXEC (should be i32 or i64).
             llvm_any_ty, // Arguments that will be copied into SGPRs (s0+).
                          // Must be uniform.
             llvm_any_ty, // Arguments that will be copied into VGPRs (v8+).
                          // Need not be uniform.
             llvm_i32_ty, // Flags.
             llvm_vararg_ty // Additional arguments. Only present if Flags is
                            // non-zero.
            ],
            [IntrConvergent, IntrNoReturn, ImmArg<ArgIndex<4>>]>;


//===----------------------------------------------------------------------===//
// CI+ Intrinsics
//===----------------------------------------------------------------------===//

def int_amdgcn_s_dcache_inv_vol :
  ClangBuiltin<"__builtin_amdgcn_s_dcache_inv_vol">,
  DefaultAttrsIntrinsic<[], [], [IntrNoMem, IntrHasSideEffects]>;

def int_amdgcn_buffer_wbinvl1_vol :
  ClangBuiltin<"__builtin_amdgcn_buffer_wbinvl1_vol">,
  DefaultAttrsIntrinsic<[], [], [IntrNoMem, IntrHasSideEffects]>;

//===----------------------------------------------------------------------===//
// VI Intrinsics
//===----------------------------------------------------------------------===//

// llvm.amdgcn.mov.dpp.i32 <src> <dpp_ctrl> <row_mask> <bank_mask> <bound_ctrl>
def int_amdgcn_mov_dpp :
  Intrinsic<[llvm_anyint_ty],
            [LLVMMatchType<0>, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
             llvm_i1_ty],
             [IntrNoMem, IntrConvergent, IntrWillReturn,
             ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>,
             ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>, IntrNoCallback, IntrNoFree]>;

// llvm.amdgcn.update.dpp.i32 <old> <src> <dpp_ctrl> <row_mask> <bank_mask> <bound_ctrl>
// Should be equivalent to:
// v_mov_b32 <dest> <old>
// v_mov_b32 <dest> <src> <dpp_ctrl> <row_mask> <bank_mask> <bound_ctrl>
def int_amdgcn_update_dpp :
  Intrinsic<[llvm_anyint_ty],
            [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty,
            llvm_i32_ty, llvm_i32_ty, llvm_i1_ty],
             [IntrNoMem, IntrConvergent, IntrWillReturn,
              ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>,
              ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>, IntrNoCallback, IntrNoFree]>;

def int_amdgcn_s_dcache_wb :
  ClangBuiltin<"__builtin_amdgcn_s_dcache_wb">,
  Intrinsic<[], [], [IntrNoMem, IntrHasSideEffects, IntrWillReturn, IntrNoCallback, IntrNoFree]>;

def int_amdgcn_s_dcache_wb_vol :
  ClangBuiltin<"__builtin_amdgcn_s_dcache_wb_vol">,
  Intrinsic<[], [], [IntrNoMem, IntrHasSideEffects, IntrWillReturn, IntrNoCallback, IntrNoFree]>;

def int_amdgcn_s_memrealtime :
  ClangBuiltin<"__builtin_amdgcn_s_memrealtime">,
  Intrinsic<[llvm_i64_ty], [], [IntrNoMem, IntrHasSideEffects, IntrWillReturn, IntrNoCallback, IntrNoFree]>;

// llvm.amdgcn.ds.permute <index> <src>
def int_amdgcn_ds_permute :
  ClangBuiltin<"__builtin_amdgcn_ds_permute">,
  Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
    [IntrNoMem, IntrConvergent, IntrWillReturn, IntrNoCallback, IntrNoFree]>;

// llvm.amdgcn.ds.bpermute <index> <src>
def int_amdgcn_ds_bpermute :
  ClangBuiltin<"__builtin_amdgcn_ds_bpermute">,
  Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
     [IntrNoMem, IntrConvergent, IntrWillReturn, IntrNoCallback, IntrNoFree]>;

// llvm.amdgcn.perm <src0> <src1> <selector>
def int_amdgcn_perm :
  ClangBuiltin<"__builtin_amdgcn_perm">,
  Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
     [IntrNoMem, IntrSpeculatable, IntrWillReturn, IntrNoCallback, IntrNoFree]>;

//===----------------------------------------------------------------------===//
// GFX9 Intrinsics
//===----------------------------------------------------------------------===//

class AMDGPUGlobalLoadLDS : Intrinsic <
  [],
  [LLVMQualPointerType<1>,             // Base global pointer to load from
   LLVMQualPointerType<3>,             // LDS base pointer to store to
   llvm_i32_ty,                        // Data byte size: 1/2/4
   llvm_i32_ty,                        // imm offset (applied to both global and LDS address)
   llvm_i32_ty],                       // auxiliary data (imm, cachepolicy (bit 0 = glc/sc0,
                                       //                                   bit 1 = slc/sc1,
                                       //                                   bit 2 = dlc on gfx10+))
                                       //                                   bit 4 = scc/nt on gfx90a+))
  [IntrWillReturn, NoCapture<ArgIndex<0>>, NoCapture<ArgIndex<1>>,
   ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>, IntrNoCallback, IntrNoFree],
  "", [SDNPMemOperand]>;
def int_amdgcn_global_load_lds : AMDGPUGlobalLoadLDS;

//===----------------------------------------------------------------------===//
// GFX10 Intrinsics
//===----------------------------------------------------------------------===//

// llvm.amdgcn.permlane16 <old> <src0> <src1> <src2> <fi> <bound_control>
def int_amdgcn_permlane16 : ClangBuiltin<"__builtin_amdgcn_permlane16">,
  Intrinsic<[llvm_i32_ty],
            [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i1_ty, llvm_i1_ty],
            [IntrNoMem, IntrConvergent, IntrWillReturn,
             ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>, IntrNoCallback, IntrNoFree]>;

// llvm.amdgcn.permlanex16 <old> <src0> <src1> <src2> <fi> <bound_control>
def int_amdgcn_permlanex16 : ClangBuiltin<"__builtin_amdgcn_permlanex16">,
  Intrinsic<[llvm_i32_ty],
            [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i1_ty, llvm_i1_ty],
            [IntrNoMem, IntrConvergent, IntrWillReturn,
             ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>, IntrNoCallback, IntrNoFree]>;

// llvm.amdgcn.mov.dpp8.i32 <src> <sel>
// <sel> is a 32-bit constant whose high 8 bits must be zero which selects
// the lanes to read from.
def int_amdgcn_mov_dpp8 :
  Intrinsic<[llvm_anyint_ty],
            [LLVMMatchType<0>, llvm_i32_ty],
            [IntrNoMem, IntrConvergent, IntrWillReturn,
             ImmArg<ArgIndex<1>>, IntrNoCallback, IntrNoFree]>;

def int_amdgcn_s_get_waveid_in_workgroup :
  ClangBuiltin<"__builtin_amdgcn_s_get_waveid_in_workgroup">,
  Intrinsic<[llvm_i32_ty], [],
    [IntrNoMem, IntrHasSideEffects, IntrWillReturn, IntrNoCallback, IntrNoFree]>;

class AMDGPUGlobalAtomicRtn<LLVMType vt> : Intrinsic <
  [vt],
  [llvm_anyptr_ty,    // vaddr
   vt],               // vdata(VGPR)
  [IntrArgMemOnly, IntrWillReturn, NoCapture<ArgIndex<0>>, IntrNoCallback, IntrNoFree], "",
  [SDNPMemOperand]>;

def int_amdgcn_global_atomic_csub : AMDGPUGlobalAtomicRtn<llvm_i32_ty>;

// uint4 llvm.amdgcn.image.bvh.intersect.ray <node_ptr>, <ray_extent>, <ray_origin>,
//                                           <ray_dir>, <ray_inv_dir>, <texture_descr>
// <node_ptr> is i32 or i64.
// <ray_dir> and <ray_inv_dir> are both v3f16 or both v3f32.
def int_amdgcn_image_bvh_intersect_ray :
  DefaultAttrsIntrinsic<[llvm_v4i32_ty],
            [llvm_anyint_ty, llvm_float_ty, llvm_v3f32_ty, llvm_anyvector_ty,
             LLVMMatchType<1>, llvm_v4i32_ty],
            [IntrReadMem, IntrWillReturn, IntrNoCallback, IntrNoFree]>;

//===----------------------------------------------------------------------===//
// GFX11 Intrinsics
//===----------------------------------------------------------------------===//

// llvm.amdgcn.permlane64 <src0>
def int_amdgcn_permlane64 :
  ClangBuiltin<"__builtin_amdgcn_permlane64">,
  Intrinsic<[llvm_i32_ty], [llvm_i32_ty],
            [IntrNoMem, IntrConvergent, IntrWillReturn, IntrNoCallback, IntrNoFree]>;

def int_amdgcn_ds_add_gs_reg_rtn :
  ClangBuiltin<"__builtin_amdgcn_ds_add_gs_reg_rtn">,
  Intrinsic<[llvm_anyint_ty], [llvm_i32_ty, llvm_i32_ty],
            [ImmArg<ArgIndex<1>>, IntrHasSideEffects, IntrWillReturn, IntrNoCallback, IntrNoFree],
            "", [SDNPMemOperand]>;

def int_amdgcn_ds_sub_gs_reg_rtn :
  ClangBuiltin<"__builtin_amdgcn_ds_sub_gs_reg_rtn">,
  Intrinsic<[llvm_anyint_ty], [llvm_i32_ty, llvm_i32_ty],
            [ImmArg<ArgIndex<1>>, IntrHasSideEffects, IntrWillReturn, IntrNoCallback, IntrNoFree],
            "", [SDNPMemOperand]>;

def int_amdgcn_ds_bvh_stack_rtn :
  Intrinsic<
    [llvm_i32_ty, llvm_i32_ty], // %vdst, %addr
    [
      llvm_i32_ty,   // %addr
      llvm_i32_ty,   // %data0
      llvm_v4i32_ty, // %data1
      llvm_i32_ty,   // %offset
    ],
    [ImmArg<ArgIndex<3>>, IntrWillReturn, IntrNoCallback, IntrNoFree]
  >;

// WMMA (Wave Matrix Multiply-Accumulate) intrinsics
//
// These operations perform a matrix multiplication and accumulation of
// the form: D = A * B + C .

class AMDGPUWmmaIntrinsic<LLVMType AB, LLVMType CD> :
  Intrinsic<
    [CD],               // %D
    [
      AB,               // %A
      AB,               // %B
      LLVMMatchType<0>, // %C
    ],
    [IntrNoMem, IntrConvergent, IntrWillReturn, IntrNoCallback, IntrNoFree]
>;

class AMDGPUWmmaIntrinsicOPSEL<LLVMType AB, LLVMType CD> :
  Intrinsic<
    [CD],               // %D
    [
      AB,               // %A
      AB,               // %B
      LLVMMatchType<0>, // %C
      llvm_i1_ty,       // %high
    ],
    [IntrNoMem, IntrConvergent, ImmArg<ArgIndex<3>>, IntrWillReturn, IntrNoCallback, IntrNoFree]
>;

class AMDGPUWmmaIntrinsicIU<LLVMType AB, LLVMType CD> :
  Intrinsic<
    [CD],               // %D
    [
      llvm_i1_ty,       // %A_sign
      AB,               // %A
      llvm_i1_ty,       // %B_sign
      AB,               // %B
      LLVMMatchType<0>, // %C
      llvm_i1_ty,       // %clamp
    ],
    [IntrNoMem, IntrConvergent, ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<5>>, IntrWillReturn, IntrNoCallback, IntrNoFree]
>;

def int_amdgcn_wmma_f32_16x16x16_f16   : AMDGPUWmmaIntrinsic<llvm_v16f16_ty, llvm_anyfloat_ty>;
def int_amdgcn_wmma_f32_16x16x16_bf16  : AMDGPUWmmaIntrinsic<llvm_v16i16_ty, llvm_anyfloat_ty>;
def int_amdgcn_wmma_f16_16x16x16_f16   : AMDGPUWmmaIntrinsicOPSEL<llvm_v16f16_ty, llvm_anyfloat_ty>;
def int_amdgcn_wmma_bf16_16x16x16_bf16 : AMDGPUWmmaIntrinsicOPSEL<llvm_v16i16_ty, llvm_anyint_ty>;
def int_amdgcn_wmma_i32_16x16x16_iu8   : AMDGPUWmmaIntrinsicIU<llvm_v4i32_ty, llvm_anyint_ty>;
def int_amdgcn_wmma_i32_16x16x16_iu4   : AMDGPUWmmaIntrinsicIU<llvm_v2i32_ty, llvm_anyint_ty>;

def int_amdgcn_s_wait_event_export_ready :
  ClangBuiltin<"__builtin_amdgcn_s_wait_event_export_ready">,
  Intrinsic<[], [], [IntrNoMem, IntrHasSideEffects, IntrWillReturn]
>;

//===----------------------------------------------------------------------===//
// Deep learning intrinsics.
//===----------------------------------------------------------------------===//

// f32 %r = llvm.amdgcn.fdot2(v2f16 %a, v2f16 %b, f32 %c, i1 %clamp)
//   %r = %a[0] * %b[0] + %a[1] * %b[1] + %c
def int_amdgcn_fdot2 :
  ClangBuiltin<"__builtin_amdgcn_fdot2">,
  DefaultAttrsIntrinsic<
    [llvm_float_ty], // %r
    [
      llvm_v2f16_ty, // %a
      llvm_v2f16_ty, // %b
      llvm_float_ty, // %c
      llvm_i1_ty     // %clamp
    ],
    [IntrNoMem, IntrSpeculatable, ImmArg<ArgIndex<3>>]
  >;

// f16 %r = llvm.amdgcn.fdot2.f16.f16(v2f16 %a, v2f16 %b, f16 %c)
//   %r = %a[0] * %b[0] + %a[1] * %b[1] + %c
def int_amdgcn_fdot2_f16_f16 :
  ClangBuiltin<"__builtin_amdgcn_fdot2_f16_f16">,
  DefaultAttrsIntrinsic<
    [llvm_half_ty],  // %r
    [
      llvm_v2f16_ty, // %a
      llvm_v2f16_ty, // %b
      llvm_half_ty   // %c
    ],
    [IntrNoMem, IntrSpeculatable]
  >;

// bf16 %r = llvm.amdgcn.fdot2.bf16.bf16(v2bf16 %a, v2bf16 %b, bf16 %c)
//   %r = %a[0] * %b[0] + %a[1] * %b[1] + %c
def int_amdgcn_fdot2_bf16_bf16 :
  ClangBuiltin<"__builtin_amdgcn_fdot2_bf16_bf16">,
  DefaultAttrsIntrinsic<
    [llvm_i16_ty],   // %r
    [
      llvm_v2i16_ty, // %a
      llvm_v2i16_ty, // %b
      llvm_i16_ty    // %c
    ],
    [IntrNoMem, IntrSpeculatable]
  >;

// f32 %r = llvm.amdgcn.fdot2.f32.bf16(v2bf16 %a, v2bf16 %b, f32 %c, i1 %clamp)
//   %r = %a[0] * %b[0] + %a[1] * %b[1] + %c
def int_amdgcn_fdot2_f32_bf16 :
  ClangBuiltin<"__builtin_amdgcn_fdot2_f32_bf16">,
  DefaultAttrsIntrinsic<
    [llvm_float_ty], // %r
    [
      llvm_v2i16_ty, // %a
      llvm_v2i16_ty, // %b
      llvm_float_ty, // %c
      llvm_i1_ty     // %clamp
    ],
    [IntrNoMem, IntrSpeculatable, ImmArg<ArgIndex<3>>]
  >;

// i32 %r = llvm.amdgcn.sdot2(v2i16 %a, v2i16 %b, i32 %c, i1 %clamp)
//   %r = %a[0] * %b[0] + %a[1] * %b[1] + %c
def int_amdgcn_sdot2 :
  ClangBuiltin<"__builtin_amdgcn_sdot2">,
  DefaultAttrsIntrinsic<
    [llvm_i32_ty], // %r
    [
      llvm_v2i16_ty, // %a
      llvm_v2i16_ty, // %b
      llvm_i32_ty,   // %c
      llvm_i1_ty     // %clamp
    ],
    [IntrNoMem, IntrSpeculatable, ImmArg<ArgIndex<3>>]
  >;

// u32 %r = llvm.amdgcn.udot2(v2u16 %a, v2u16 %b, u32 %c, i1 %clamp)
//   %r = %a[0] * %b[0] + %a[1] * %b[1] + %c
def int_amdgcn_udot2 :
  ClangBuiltin<"__builtin_amdgcn_udot2">,
  DefaultAttrsIntrinsic<
    [llvm_i32_ty], // %r
    [
      llvm_v2i16_ty, // %a
      llvm_v2i16_ty, // %b
      llvm_i32_ty,   // %c
      llvm_i1_ty     // %clamp
    ],
    [IntrNoMem, IntrSpeculatable, ImmArg<ArgIndex<3>>]
  >;

// i32 %r = llvm.amdgcn.sdot4(v4i8 (as i32) %a, v4i8 (as i32) %b, i32 %c, i1 %clamp)
//   %r = %a[0] * %b[0] + %a[1] * %b[1] + %a[2] * %b[2] + %a[3] * %b[3] + %c
def int_amdgcn_sdot4 :
  ClangBuiltin<"__builtin_amdgcn_sdot4">,
  DefaultAttrsIntrinsic<
    [llvm_i32_ty], // %r
    [
      llvm_i32_ty, // %a
      llvm_i32_ty, // %b
      llvm_i32_ty, // %c
      llvm_i1_ty   // %clamp
    ],
    [IntrNoMem, IntrSpeculatable, ImmArg<ArgIndex<3>>]
  >;

// u32 %r = llvm.amdgcn.udot4(v4u8 (as u32) %a, v4u8 (as u32) %b, u32 %c, i1 %clamp)
//   %r = %a[0] * %b[0] + %a[1] * %b[1] + %a[2] * %b[2] + %a[3] * %b[3] + %c
def int_amdgcn_udot4 :
  ClangBuiltin<"__builtin_amdgcn_udot4">,
  DefaultAttrsIntrinsic<
    [llvm_i32_ty], // %r
    [
      llvm_i32_ty, // %a
      llvm_i32_ty, // %b
      llvm_i32_ty, // %c
      llvm_i1_ty   // %clamp
    ],
    [IntrNoMem, IntrSpeculatable, ImmArg<ArgIndex<3>>]
  >;

// i32 %r = llvm.amdgcn.sudot4(i1 %a_sign, v4i8 (as i32) %a, i1 %b_sign, v4i8 (as i32) %b, i32 %c, i1 %clamp)
// Treat input as signed (_sign = 1) or unsigned (_sign = 0).
// a[i in 0. . . 3] = (%a_sign ? a.i8[i] : promoteToSigned(a.u8[i]));
// b[i in 0. . . 3] = (%b_sign ? b.i8[i] : promoteToSigned(b.u8[i]));
//   %r = %a[0] * %b[0] + %a[1] * %b[1] + %a[2] * %b[2] + %a[3] * %b[3] + %c
def int_amdgcn_sudot4 :
  ClangBuiltin<"__builtin_amdgcn_sudot4">,
  DefaultAttrsIntrinsic<
    [llvm_i32_ty], // %r
    [
      llvm_i1_ty,  // %a_sign
      llvm_i32_ty, // %a
      llvm_i1_ty,  // %b_sign
      llvm_i32_ty, // %b
      llvm_i32_ty, // %c
      llvm_i1_ty   // %clamp
    ],
    [IntrNoMem, IntrSpeculatable,
     ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<5>>]
  >;

// i32 %r = llvm.amdgcn.sdot8(v8i4 (as i32) %a, v8i4 (as i32) %b, i32 %c, i1 %clamp)
//   %r = %a[0] * %b[0] + %a[1] * %b[1] + %a[2] * %b[2] + %a[3] * %b[3] +
//        %a[4] * %b[4] + %a[5] * %b[5] + %a[6] * %b[6] + %a[7] * %b[7] + %c
def int_amdgcn_sdot8 :
  ClangBuiltin<"__builtin_amdgcn_sdot8">,
  DefaultAttrsIntrinsic<
    [llvm_i32_ty], // %r
    [
      llvm_i32_ty, // %a
      llvm_i32_ty, // %b
      llvm_i32_ty, // %c
      llvm_i1_ty   // %clamp
    ],
    [IntrNoMem, IntrSpeculatable, ImmArg<ArgIndex<3>>]
  >;

// u32 %r = llvm.amdgcn.udot8(v8u4 (as u32) %a, v8u4 (as u32) %b, u32 %c, i1 %clamp)
//   %r = %a[0] * %b[0] + %a[1] * %b[1] + %a[2] * %b[2] + %a[3] * %b[3] +
//        %a[4] * %b[4] + %a[5] * %b[5] + %a[6] * %b[6] + %a[7] * %b[7] + %c
def int_amdgcn_udot8 :
  ClangBuiltin<"__builtin_amdgcn_udot8">,
  DefaultAttrsIntrinsic<
    [llvm_i32_ty], // %r
    [
      llvm_i32_ty, // %a
      llvm_i32_ty, // %b
      llvm_i32_ty, // %c
      llvm_i1_ty   // %clamp
    ],
    [IntrNoMem, IntrSpeculatable, ImmArg<ArgIndex<3>>]
  >;

// i32 %r = llvm.amdgcn.sudot8(i1 %a_sign, v8i4 (as i32) %a, i1 %b_sign, v8i4 (as i32) %b, i32 %c, i1 %clamp)
// Treat input as signed (_sign = 1) or unsigned (_sign = 0).
// a[i in 0. . . 7] = (%a_sign ? a.i4[i] : promoteToSigned(a.u4[i]));
// b[i in 0. . . 7] = (%b_sign ? b.i4[i] : promoteToSigned(b.u4[i]));
//   %r = %a[0] * %b[0] + %a[1] * %b[1] + %a[2] * %b[2] + %a[3] * %b[3] +
//        %a[4] * %b[4] + %a[5] * %b[5] + %a[6] * %b[6] + %a[7] * %b[7] + %c
  def int_amdgcn_sudot8 :
  ClangBuiltin<"__builtin_amdgcn_sudot8">,
  DefaultAttrsIntrinsic<
    [llvm_i32_ty], // %r
    [
      llvm_i1_ty,  // %a_sign
      llvm_i32_ty, // %a
      llvm_i1_ty,  // %b_sign
      llvm_i32_ty, // %b
      llvm_i32_ty, // %c
      llvm_i1_ty   // %clamp
    ],
    [IntrNoMem, IntrSpeculatable,
     ImmArg<ArgIndex<0>>,  ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<5>>]
  >;

//===----------------------------------------------------------------------===//
// gfx908 intrinsics
// ===----------------------------------------------------------------------===//

def int_amdgcn_global_atomic_fadd : AMDGPUGlobalAtomicRtn<llvm_anyfloat_ty>;

// llvm.amdgcn.mfma.*.* vdst, srcA, srcB, srcC, cbsz, abid, blgp
class AMDGPUMfmaIntrinsic<LLVMType DestTy, LLVMType SrcABTy> :
  ClangBuiltin<!subst("int", "__builtin", NAME)>,
  DefaultAttrsIntrinsic<[DestTy],
            [SrcABTy, SrcABTy, DestTy,
             llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
            [IntrConvergent, IntrNoMem,
             ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>]>;

def int_amdgcn_mfma_f32_32x32x1f32  : AMDGPUMfmaIntrinsic<llvm_v32f32_ty, llvm_float_ty>;
def int_amdgcn_mfma_f32_16x16x1f32  : AMDGPUMfmaIntrinsic<llvm_v16f32_ty, llvm_float_ty>;
def int_amdgcn_mfma_f32_4x4x1f32    : AMDGPUMfmaIntrinsic<llvm_v4f32_ty,  llvm_float_ty>;
def int_amdgcn_mfma_f32_32x32x2f32  : AMDGPUMfmaIntrinsic<llvm_v16f32_ty, llvm_float_ty>;
def int_amdgcn_mfma_f32_16x16x4f32  : AMDGPUMfmaIntrinsic<llvm_v4f32_ty,  llvm_float_ty>;
def int_amdgcn_mfma_f32_32x32x4f16  : AMDGPUMfmaIntrinsic<llvm_v32f32_ty, llvm_v4f16_ty>;
def int_amdgcn_mfma_f32_16x16x4f16  : AMDGPUMfmaIntrinsic<llvm_v16f32_ty, llvm_v4f16_ty>;
def int_amdgcn_mfma_f32_4x4x4f16    : AMDGPUMfmaIntrinsic<llvm_v4f32_ty,  llvm_v4f16_ty>;
def int_amdgcn_mfma_f32_32x32x8f16  : AMDGPUMfmaIntrinsic<llvm_v16f32_ty, llvm_v4f16_ty>;
def int_amdgcn_mfma_f32_16x16x16f16 : AMDGPUMfmaIntrinsic<llvm_v4f32_ty,  llvm_v4f16_ty>;
def int_amdgcn_mfma_i32_32x32x4i8   : AMDGPUMfmaIntrinsic<llvm_v32i32_ty, llvm_i32_ty>;
def int_amdgcn_mfma_i32_16x16x4i8   : AMDGPUMfmaIntrinsic<llvm_v16i32_ty, llvm_i32_ty>;
def int_amdgcn_mfma_i32_4x4x4i8     : AMDGPUMfmaIntrinsic<llvm_v4i32_ty,  llvm_i32_ty>;
def int_amdgcn_mfma_i32_32x32x8i8   : AMDGPUMfmaIntrinsic<llvm_v16i32_ty, llvm_i32_ty>;
def int_amdgcn_mfma_i32_16x16x16i8  : AMDGPUMfmaIntrinsic<llvm_v4i32_ty,  llvm_i32_ty>;
def int_amdgcn_mfma_f32_32x32x2bf16 : AMDGPUMfmaIntrinsic<llvm_v32f32_ty, llvm_v2i16_ty>;
def int_amdgcn_mfma_f32_16x16x2bf16 : AMDGPUMfmaIntrinsic<llvm_v16f32_ty, llvm_v2i16_ty>;
def int_amdgcn_mfma_f32_4x4x2bf16   : AMDGPUMfmaIntrinsic<llvm_v4f32_ty,  llvm_v2i16_ty>;
def int_amdgcn_mfma_f32_32x32x4bf16 : AMDGPUMfmaIntrinsic<llvm_v16f32_ty, llvm_v2i16_ty>;
def int_amdgcn_mfma_f32_16x16x8bf16 : AMDGPUMfmaIntrinsic<llvm_v4f32_ty,  llvm_v2i16_ty>;

//===----------------------------------------------------------------------===//
// gfx90a intrinsics
// ===----------------------------------------------------------------------===//

def int_amdgcn_global_atomic_fmin : AMDGPUGlobalAtomicRtn<llvm_anyfloat_ty>;
def int_amdgcn_global_atomic_fmax : AMDGPUGlobalAtomicRtn<llvm_anyfloat_ty>;
def int_amdgcn_flat_atomic_fadd   : AMDGPUGlobalAtomicRtn<llvm_anyfloat_ty>;
def int_amdgcn_flat_atomic_fmin   : AMDGPUGlobalAtomicRtn<llvm_anyfloat_ty>;
def int_amdgcn_flat_atomic_fmax   : AMDGPUGlobalAtomicRtn<llvm_anyfloat_ty>;

def int_amdgcn_mfma_f32_32x32x4bf16_1k  : AMDGPUMfmaIntrinsic<llvm_v32f32_ty, llvm_v4i16_ty>;
def int_amdgcn_mfma_f32_16x16x4bf16_1k  : AMDGPUMfmaIntrinsic<llvm_v16f32_ty, llvm_v4i16_ty>;
def int_amdgcn_mfma_f32_4x4x4bf16_1k    : AMDGPUMfmaIntrinsic<llvm_v4f32_ty,  llvm_v4i16_ty>;
def int_amdgcn_mfma_f32_32x32x8bf16_1k  : AMDGPUMfmaIntrinsic<llvm_v16f32_ty, llvm_v4i16_ty>;
def int_amdgcn_mfma_f32_16x16x16bf16_1k : AMDGPUMfmaIntrinsic<llvm_v4f32_ty,  llvm_v4i16_ty>;

// Note: in gfx940 BLGP argument is replaced by NEG bitfield in the DGEMM MFMA.
//       Three bits corresponding to the neg modifier applied to the respective
//       source operand.
def int_amdgcn_mfma_f64_16x16x4f64      : AMDGPUMfmaIntrinsic<llvm_v4f64_ty,  llvm_double_ty>;
def int_amdgcn_mfma_f64_4x4x4f64        : AMDGPUMfmaIntrinsic<llvm_double_ty, llvm_double_ty>;

//===----------------------------------------------------------------------===//
// gfx940 intrinsics
// ===----------------------------------------------------------------------===//

// bf16 atomics use v2i16 argument since there is no bf16 data type in the llvm.
def int_amdgcn_global_atomic_fadd_v2bf16 : AMDGPUGlobalAtomicRtn<llvm_v2i16_ty>;
def int_amdgcn_flat_atomic_fadd_v2bf16   : AMDGPUGlobalAtomicRtn<llvm_v2i16_ty>;
def int_amdgcn_ds_fadd_v2bf16 : DefaultAttrsIntrinsic<
    [llvm_v2i16_ty],
    [LLVMQualPointerType<3>, llvm_v2i16_ty],
    [IntrArgMemOnly, NoCapture<ArgIndex<0>>]>,
    ClangBuiltin<"__builtin_amdgcn_ds_atomic_fadd_v2bf16">;

def int_amdgcn_mfma_i32_16x16x32_i8     : AMDGPUMfmaIntrinsic<llvm_v4i32_ty,  llvm_i64_ty>;
def int_amdgcn_mfma_i32_32x32x16_i8     : AMDGPUMfmaIntrinsic<llvm_v16i32_ty, llvm_i64_ty>;
def int_amdgcn_mfma_f32_16x16x8_xf32    : AMDGPUMfmaIntrinsic<llvm_v4f32_ty,  llvm_v2f32_ty>;
def int_amdgcn_mfma_f32_32x32x4_xf32    : AMDGPUMfmaIntrinsic<llvm_v16f32_ty, llvm_v2f32_ty>;

class AMDGPUMFp8MfmaIntrinsic<LLVMType DestTy> :
  AMDGPUMfmaIntrinsic<DestTy, llvm_i64_ty>;

multiclass AMDGPUMFp8MfmaIntrinsic<LLVMType DestTy> {
  foreach kind = ["bf8_bf8", "bf8_fp8", "fp8_bf8", "fp8_fp8"] in
    def NAME#"_"#kind : AMDGPUMFp8MfmaIntrinsic<DestTy>;
}

defm int_amdgcn_mfma_f32_16x16x32 : AMDGPUMFp8MfmaIntrinsic<llvm_v4f32_ty>;
defm int_amdgcn_mfma_f32_32x32x16 : AMDGPUMFp8MfmaIntrinsic<llvm_v16f32_ty>;

// llvm.amdgcn.smfmac.?32.* vdst, srcA, srcB, srcC, index, cbsz, abid
class AMDGPUMSmfmacIntrinsic<LLVMType DestTy, LLVMType SrcA, LLVMType SrcB> :
  ClangBuiltin<!subst("int", "__builtin", NAME)>,
  DefaultAttrsIntrinsic<[DestTy],
            [SrcA, SrcB, DestTy, llvm_i32_ty,
             llvm_i32_ty, llvm_i32_ty],
            [IntrConvergent, IntrNoMem,
             ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>]>;

def int_amdgcn_smfmac_f32_16x16x32_f16  : AMDGPUMSmfmacIntrinsic<llvm_v4f32_ty,  llvm_v4f16_ty, llvm_v8f16_ty>;
def int_amdgcn_smfmac_f32_32x32x16_f16  : AMDGPUMSmfmacIntrinsic<llvm_v16f32_ty, llvm_v4f16_ty, llvm_v8f16_ty>;
def int_amdgcn_smfmac_f32_16x16x32_bf16 : AMDGPUMSmfmacIntrinsic<llvm_v4f32_ty,  llvm_v4i16_ty, llvm_v8i16_ty>;
def int_amdgcn_smfmac_f32_32x32x16_bf16 : AMDGPUMSmfmacIntrinsic<llvm_v16f32_ty, llvm_v4i16_ty, llvm_v8i16_ty>;
def int_amdgcn_smfmac_i32_16x16x64_i8   : AMDGPUMSmfmacIntrinsic<llvm_v4i32_ty,  llvm_v2i32_ty, llvm_v4i32_ty>;
def int_amdgcn_smfmac_i32_32x32x32_i8   : AMDGPUMSmfmacIntrinsic<llvm_v16i32_ty, llvm_v2i32_ty, llvm_v4i32_ty>;

class AMDGPUMFp8SmfmacIntrinsic<LLVMType DestTy> :
  AMDGPUMSmfmacIntrinsic<DestTy, llvm_v2i32_ty, llvm_v4i32_ty>;

multiclass AMDGPUMFp8SmfmacIntrinsic<LLVMType DestTy> {
  foreach kind = ["bf8_bf8", "bf8_fp8", "fp8_bf8", "fp8_fp8"] in
    def NAME#"_"#kind : AMDGPUMFp8SmfmacIntrinsic<DestTy>;
}

defm int_amdgcn_smfmac_f32_16x16x64 : AMDGPUMFp8SmfmacIntrinsic<llvm_v4f32_ty>;
defm int_amdgcn_smfmac_f32_32x32x32 : AMDGPUMFp8SmfmacIntrinsic<llvm_v16f32_ty>;

// llvm.amdgcn.cvt.f32.bf8 float vdst, int srcA, imm byte_sel [0..3]
// byte_sel selects byte from srcA.
def int_amdgcn_cvt_f32_bf8 : ClangBuiltin<"__builtin_amdgcn_cvt_f32_bf8">,
  DefaultAttrsIntrinsic<[llvm_float_ty],
            [llvm_i32_ty, llvm_i32_ty],
            [IntrNoMem, ImmArg<ArgIndex<1>>]>;

// llvm.amdgcn.cvt.f32.fp8 float vdst, int srcA, imm byte_sel [0..3]
def int_amdgcn_cvt_f32_fp8 : ClangBuiltin<"__builtin_amdgcn_cvt_f32_fp8">,
  DefaultAttrsIntrinsic<[llvm_float_ty],
            [llvm_i32_ty, llvm_i32_ty],
            [IntrNoMem, ImmArg<ArgIndex<1>>]>;

// llvm.amdgcn.cvt.pk.f32.bf8 float2 vdst, int srcA, imm word_sel
// word_sel = 1 selects 2 high bytes, 0 selects 2 low bytes.
def int_amdgcn_cvt_pk_f32_bf8 : ClangBuiltin<"__builtin_amdgcn_cvt_pk_f32_bf8">,
  DefaultAttrsIntrinsic<[llvm_v2f32_ty],
            [llvm_i32_ty, llvm_i1_ty],
            [IntrNoMem, ImmArg<ArgIndex<1>>]>;

// llvm.amdgcn.cvt.pk.f32.fp8 float2 vdst, int srcA, imm word_sel.
def int_amdgcn_cvt_pk_f32_fp8 : ClangBuiltin<"__builtin_amdgcn_cvt_pk_f32_fp8">,
  DefaultAttrsIntrinsic<[llvm_v2f32_ty],
            [llvm_i32_ty, llvm_i1_ty],
            [IntrNoMem, ImmArg<ArgIndex<1>>]>;

// llvm.amdgcn.cvt.pk.bf8.f32 int vdst, float srcA, float srcB, int old, imm word_sel
// word_sel = 1 selects 2 high bytes in the vdst, 0 selects 2 low bytes.
def int_amdgcn_cvt_pk_bf8_f32 : ClangBuiltin<"__builtin_amdgcn_cvt_pk_bf8_f32">,
  DefaultAttrsIntrinsic<[llvm_i32_ty],
            [llvm_float_ty, llvm_float_ty, llvm_i32_ty, llvm_i1_ty],
            [IntrNoMem, ImmArg<ArgIndex<3>>]>;

// llvm.amdgcn.cvt.pk.fp8.f32 int vdst, float srcA, float srcB, int old, imm word_sel
def int_amdgcn_cvt_pk_fp8_f32 : ClangBuiltin<"__builtin_amdgcn_cvt_pk_fp8_f32">,
  DefaultAttrsIntrinsic<[llvm_i32_ty],
            [llvm_float_ty, llvm_float_ty, llvm_i32_ty, llvm_i1_ty],
            [IntrNoMem, ImmArg<ArgIndex<3>>]>;

// llvm.amdgcn.cvt.sr.bf8.f32 int vdst, float srcA, int srcB, int old, imm byte_sel [0..3]
// byte_sel selects byte to write into vdst.
def int_amdgcn_cvt_sr_bf8_f32 : ClangBuiltin<"__builtin_amdgcn_cvt_sr_bf8_f32">,
  DefaultAttrsIntrinsic<[llvm_i32_ty],
            [llvm_float_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
            [IntrNoMem, ImmArg<ArgIndex<3>>]>;

// llvm.amdgcn.cvt.sr.fp8.f32 int vdst, float srcA, int srcB, int old, imm byte_sel [0..3]
def int_amdgcn_cvt_sr_fp8_f32 : ClangBuiltin<"__builtin_amdgcn_cvt_sr_fp8_f32">,
  DefaultAttrsIntrinsic<[llvm_i32_ty],
            [llvm_float_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
            [IntrNoMem, ImmArg<ArgIndex<3>>]>;

// Represent a relocation constant.
def int_amdgcn_reloc_constant : DefaultAttrsIntrinsic<
  [llvm_i32_ty], [llvm_metadata_ty],
  [IntrNoMem, IntrSpeculatable]
>;

//===----------------------------------------------------------------------===//
// Special Intrinsics for backend internal use only. No frontend
// should emit calls to these.
// ===----------------------------------------------------------------------===//
def int_amdgcn_if : Intrinsic<[llvm_i1_ty, llvm_anyint_ty],
  [llvm_i1_ty], [IntrConvergent, IntrWillReturn, IntrNoCallback, IntrNoFree]
>;

def int_amdgcn_else : Intrinsic<[llvm_i1_ty, llvm_anyint_ty],
  [llvm_anyint_ty], [IntrConvergent, IntrWillReturn, IntrNoCallback, IntrNoFree]
>;

def int_amdgcn_if_break : Intrinsic<[llvm_anyint_ty],
  [llvm_i1_ty, LLVMMatchType<0>],
  [IntrNoMem, IntrConvergent, IntrWillReturn, IntrNoCallback, IntrNoFree]
>;

def int_amdgcn_loop : Intrinsic<[llvm_i1_ty],
  [llvm_anyint_ty], [IntrConvergent, IntrWillReturn, IntrNoCallback, IntrNoFree]
>;

def int_amdgcn_end_cf : Intrinsic<[], [llvm_anyint_ty],
  [IntrConvergent, IntrWillReturn, IntrNoCallback, IntrNoFree]>;

// Represent unreachable in a divergent region.
def int_amdgcn_unreachable : Intrinsic<[], [], [IntrConvergent, IntrNoCallback, IntrNoFree]>;

// Emit 2.5 ulp, no denormal division. Should only be inserted by
// pass based on !fpmath metadata.
def int_amdgcn_fdiv_fast : DefaultAttrsIntrinsic<
  [llvm_float_ty], [llvm_float_ty, llvm_float_ty],
  [IntrNoMem, IntrSpeculatable]
>;
}
PKjwFZ���IR/Metadata.defnu�[���//===- llvm/IR/Metadata.def - Metadata definitions --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Macros for running through all types of metadata.
//
//===----------------------------------------------------------------------===//

#if !(defined HANDLE_METADATA || defined HANDLE_METADATA_LEAF ||               \
      defined HANDLE_METADATA_BRANCH || defined HANDLE_MDNODE_LEAF ||          \
      defined HANDLE_MDNODE_LEAF_UNIQUABLE || defined HANDLE_MDNODE_BRANCH ||  \
      defined HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE ||                      \
      defined HANDLE_SPECIALIZED_MDNODE_LEAF ||                                \
      defined HANDLE_SPECIALIZED_MDNODE_BRANCH)
#error "Missing macro definition of HANDLE_METADATA*"
#endif

// Handler for all types of metadata.
#ifndef HANDLE_METADATA
#define HANDLE_METADATA(CLASS)
#endif

// Handler for leaf nodes in the class hierarchy.
#ifndef HANDLE_METADATA_LEAF
#define HANDLE_METADATA_LEAF(CLASS) HANDLE_METADATA(CLASS)
#endif

// Handler for non-leaf nodes in the class hierarchy.
#ifndef HANDLE_METADATA_BRANCH
#define HANDLE_METADATA_BRANCH(CLASS) HANDLE_METADATA(CLASS)
#endif

// Handler for specialized and uniquable leaf nodes under MDNode.  Defers to
// HANDLE_MDNODE_LEAF_UNIQUABLE if it's defined, otherwise to
// HANDLE_SPECIALIZED_MDNODE_LEAF.
#ifndef HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE
#ifdef HANDLE_MDNODE_LEAF_UNIQUABLE
#define HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(CLASS)                        \
  HANDLE_MDNODE_LEAF_UNIQUABLE(CLASS)
#else
#define HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(CLASS)                        \
  HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS)
#endif
#endif

// Handler for leaf nodes under MDNode.
#ifndef HANDLE_MDNODE_LEAF_UNIQUABLE
#define HANDLE_MDNODE_LEAF_UNIQUABLE(CLASS) HANDLE_MDNODE_LEAF(CLASS)
#endif

// Handler for leaf nodes under MDNode.
#ifndef HANDLE_MDNODE_LEAF
#define HANDLE_MDNODE_LEAF(CLASS) HANDLE_METADATA_LEAF(CLASS)
#endif

// Handler for non-leaf nodes under MDNode.
#ifndef HANDLE_MDNODE_BRANCH
#define HANDLE_MDNODE_BRANCH(CLASS) HANDLE_METADATA_BRANCH(CLASS)
#endif

// Handler for specialized leaf nodes under MDNode.
#ifndef HANDLE_SPECIALIZED_MDNODE_LEAF
#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) HANDLE_MDNODE_LEAF(CLASS)
#endif

// Handler for specialized non-leaf nodes under MDNode.
#ifndef HANDLE_SPECIALIZED_MDNODE_BRANCH
#define HANDLE_SPECIALIZED_MDNODE_BRANCH(CLASS) HANDLE_MDNODE_BRANCH(CLASS)
#endif

HANDLE_METADATA_LEAF(MDString)
HANDLE_METADATA_BRANCH(ValueAsMetadata)
HANDLE_METADATA_LEAF(ConstantAsMetadata)
HANDLE_METADATA_LEAF(LocalAsMetadata)
HANDLE_METADATA_LEAF(DistinctMDOperandPlaceholder)
HANDLE_MDNODE_BRANCH(MDNode)
HANDLE_MDNODE_LEAF_UNIQUABLE(MDTuple)
HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DILocation)
HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DIExpression)
HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DIGlobalVariableExpression)
HANDLE_SPECIALIZED_MDNODE_BRANCH(DINode)
HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(GenericDINode)
HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DISubrange)
HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DIEnumerator)
HANDLE_SPECIALIZED_MDNODE_BRANCH(DIScope)
HANDLE_SPECIALIZED_MDNODE_BRANCH(DIType)
HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DIBasicType)
HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DIDerivedType)
HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DICompositeType)
HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DISubroutineType)
HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DIFile)
HANDLE_SPECIALIZED_MDNODE_LEAF(DICompileUnit)
HANDLE_SPECIALIZED_MDNODE_BRANCH(DILocalScope)
HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DISubprogram)
HANDLE_SPECIALIZED_MDNODE_BRANCH(DILexicalBlockBase)
HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DILexicalBlock)
HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DILexicalBlockFile)
HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DINamespace)
HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DIModule)
HANDLE_SPECIALIZED_MDNODE_BRANCH(DITemplateParameter)
HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DITemplateTypeParameter)
HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DITemplateValueParameter)
HANDLE_SPECIALIZED_MDNODE_BRANCH(DIVariable)
HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DIGlobalVariable)
HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DILocalVariable)
HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DILabel)
HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DIObjCProperty)
HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DIImportedEntity)
HANDLE_SPECIALIZED_MDNODE_LEAF(DIAssignID)
HANDLE_SPECIALIZED_MDNODE_BRANCH(DIMacroNode)
HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DIMacro)
HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DIMacroFile)
HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DICommonBlock)
HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DIArgList)
HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DIStringType)
HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DIGenericSubrange)

#undef HANDLE_METADATA
#undef HANDLE_METADATA_LEAF
#undef HANDLE_METADATA_BRANCH
#undef HANDLE_MDNODE_LEAF
#undef HANDLE_MDNODE_LEAF_UNIQUABLE
#undef HANDLE_MDNODE_BRANCH
#undef HANDLE_SPECIALIZED_MDNODE_LEAF
#undef HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE
#undef HANDLE_SPECIALIZED_MDNODE_BRANCH
PKjwFZ^�jn� � IR/ConstantFolder.hnu�[���//===- ConstantFolder.h - Constant folding helper ---------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the ConstantFolder class, a helper for IRBuilder.
// It provides IRBuilder with a set of methods for creating constants
// with minimal folding.  For general constant creation and folding,
// use ConstantExpr and the routines in llvm/Analysis/ConstantFolding.h.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_CONSTANTFOLDER_H
#define LLVM_IR_CONSTANTFOLDER_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/ConstantFold.h"
#include "llvm/IR/IRBuilderFolder.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Operator.h"

namespace llvm {

/// ConstantFolder - Create constants with minimum, target independent, folding.
class ConstantFolder final : public IRBuilderFolder {
  virtual void anchor();

public:
  explicit ConstantFolder() = default;

  //===--------------------------------------------------------------------===//
  // Value-based folders.
  //
  // Return an existing value or a constant if the operation can be simplified.
  // Otherwise return nullptr.
  //===--------------------------------------------------------------------===//

  Value *FoldBinOp(Instruction::BinaryOps Opc, Value *LHS,
                   Value *RHS) const override {
    auto *LC = dyn_cast<Constant>(LHS);
    auto *RC = dyn_cast<Constant>(RHS);
    if (LC && RC) {
      if (ConstantExpr::isDesirableBinOp(Opc))
        return ConstantExpr::get(Opc, LC, RC);
      return ConstantFoldBinaryInstruction(Opc, LC, RC);
    }
    return nullptr;
  }

  Value *FoldExactBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS,
                        bool IsExact) const override {
    auto *LC = dyn_cast<Constant>(LHS);
    auto *RC = dyn_cast<Constant>(RHS);
    if (LC && RC) {
      if (ConstantExpr::isDesirableBinOp(Opc))
        return ConstantExpr::get(Opc, LC, RC,
                                 IsExact ? PossiblyExactOperator::IsExact : 0);
      return ConstantFoldBinaryInstruction(Opc, LC, RC);
    }
    return nullptr;
  }

  Value *FoldNoWrapBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS,
                         bool HasNUW, bool HasNSW) const override {
    auto *LC = dyn_cast<Constant>(LHS);
    auto *RC = dyn_cast<Constant>(RHS);
    if (LC && RC) {
      if (ConstantExpr::isDesirableBinOp(Opc)) {
        unsigned Flags = 0;
        if (HasNUW)
          Flags |= OverflowingBinaryOperator::NoUnsignedWrap;
        if (HasNSW)
          Flags |= OverflowingBinaryOperator::NoSignedWrap;
        return ConstantExpr::get(Opc, LC, RC, Flags);
      }
      return ConstantFoldBinaryInstruction(Opc, LC, RC);
    }
    return nullptr;
  }

  Value *FoldBinOpFMF(Instruction::BinaryOps Opc, Value *LHS, Value *RHS,
                      FastMathFlags FMF) const override {
    return FoldBinOp(Opc, LHS, RHS);
  }

  Value *FoldUnOpFMF(Instruction::UnaryOps Opc, Value *V,
                      FastMathFlags FMF) const override {
    if (Constant *C = dyn_cast<Constant>(V))
      return ConstantFoldUnaryInstruction(Opc, C);
    return nullptr;
  }

  Value *FoldICmp(CmpInst::Predicate P, Value *LHS, Value *RHS) const override {
    auto *LC = dyn_cast<Constant>(LHS);
    auto *RC = dyn_cast<Constant>(RHS);
    if (LC && RC)
      return ConstantExpr::getCompare(P, LC, RC);
    return nullptr;
  }

  Value *FoldGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList,
                 bool IsInBounds = false) const override {
    if (!ConstantExpr::isSupportedGetElementPtr(Ty))
      return nullptr;

    if (auto *PC = dyn_cast<Constant>(Ptr)) {
      // Every index must be constant.
      if (any_of(IdxList, [](Value *V) { return !isa<Constant>(V); }))
        return nullptr;

      if (IsInBounds)
        return ConstantExpr::getInBoundsGetElementPtr(Ty, PC, IdxList);
      else
        return ConstantExpr::getGetElementPtr(Ty, PC, IdxList);
    }
    return nullptr;
  }

  Value *FoldSelect(Value *C, Value *True, Value *False) const override {
    auto *CC = dyn_cast<Constant>(C);
    auto *TC = dyn_cast<Constant>(True);
    auto *FC = dyn_cast<Constant>(False);
    if (CC && TC && FC)
      return ConstantFoldSelectInstruction(CC, TC, FC);
    return nullptr;
  }

  Value *FoldExtractValue(Value *Agg,
                          ArrayRef<unsigned> IdxList) const override {
    if (auto *CAgg = dyn_cast<Constant>(Agg))
      return ConstantFoldExtractValueInstruction(CAgg, IdxList);
    return nullptr;
  };

  Value *FoldInsertValue(Value *Agg, Value *Val,
                         ArrayRef<unsigned> IdxList) const override {
    auto *CAgg = dyn_cast<Constant>(Agg);
    auto *CVal = dyn_cast<Constant>(Val);
    if (CAgg && CVal)
      return ConstantFoldInsertValueInstruction(CAgg, CVal, IdxList);
    return nullptr;
  }

  Value *FoldExtractElement(Value *Vec, Value *Idx) const override {
    auto *CVec = dyn_cast<Constant>(Vec);
    auto *CIdx = dyn_cast<Constant>(Idx);
    if (CVec && CIdx)
      return ConstantExpr::getExtractElement(CVec, CIdx);
    return nullptr;
  }

  Value *FoldInsertElement(Value *Vec, Value *NewElt,
                           Value *Idx) const override {
    auto *CVec = dyn_cast<Constant>(Vec);
    auto *CNewElt = dyn_cast<Constant>(NewElt);
    auto *CIdx = dyn_cast<Constant>(Idx);
    if (CVec && CNewElt && CIdx)
      return ConstantExpr::getInsertElement(CVec, CNewElt, CIdx);
    return nullptr;
  }

  Value *FoldShuffleVector(Value *V1, Value *V2,
                           ArrayRef<int> Mask) const override {
    auto *C1 = dyn_cast<Constant>(V1);
    auto *C2 = dyn_cast<Constant>(V2);
    if (C1 && C2)
      return ConstantExpr::getShuffleVector(C1, C2, Mask);
    return nullptr;
  }

  //===--------------------------------------------------------------------===//
  // Cast/Conversion Operators
  //===--------------------------------------------------------------------===//

  Constant *CreateCast(Instruction::CastOps Op, Constant *C,
                       Type *DestTy) const override {
    return ConstantExpr::getCast(Op, C, DestTy);
  }

  Constant *CreatePointerCast(Constant *C, Type *DestTy) const override {
    return ConstantExpr::getPointerCast(C, DestTy);
  }

  Constant *CreatePointerBitCastOrAddrSpaceCast(Constant *C,
                                                Type *DestTy) const override {
    return ConstantExpr::getPointerBitCastOrAddrSpaceCast(C, DestTy);
  }

  Constant *CreateIntCast(Constant *C, Type *DestTy,
                          bool isSigned) const override {
    return ConstantExpr::getIntegerCast(C, DestTy, isSigned);
  }

  Constant *CreateFPCast(Constant *C, Type *DestTy) const override {
    return ConstantExpr::getFPCast(C, DestTy);
  }

  Constant *CreateBitCast(Constant *C, Type *DestTy) const override {
    return CreateCast(Instruction::BitCast, C, DestTy);
  }

  Constant *CreateIntToPtr(Constant *C, Type *DestTy) const override {
    return CreateCast(Instruction::IntToPtr, C, DestTy);
  }

  Constant *CreatePtrToInt(Constant *C, Type *DestTy) const override {
    return CreateCast(Instruction::PtrToInt, C, DestTy);
  }

  Constant *CreateZExtOrBitCast(Constant *C, Type *DestTy) const override {
    return ConstantExpr::getZExtOrBitCast(C, DestTy);
  }

  Constant *CreateSExtOrBitCast(Constant *C, Type *DestTy) const override {
    return ConstantExpr::getSExtOrBitCast(C, DestTy);
  }

  Constant *CreateTruncOrBitCast(Constant *C, Type *DestTy) const override {
    return ConstantExpr::getTruncOrBitCast(C, DestTy);
  }

  //===--------------------------------------------------------------------===//
  // Compare Instructions
  //===--------------------------------------------------------------------===//

  Constant *CreateFCmp(CmpInst::Predicate P, Constant *LHS,
                       Constant *RHS) const override {
    return ConstantExpr::getCompare(P, LHS, RHS);
  }
};

} // end namespace llvm

#endif // LLVM_IR_CONSTANTFOLDER_H
PKjwFZ#o�o*o*IR/Attributes.incnu�[���#ifdef GET_ATTR_NAMES
#undef GET_ATTR_NAMES
#ifndef ATTRIBUTE_ALL
#define ATTRIBUTE_ALL(FIRST, SECOND)
#endif

#ifndef ATTRIBUTE_ENUM
#define ATTRIBUTE_ENUM(FIRST, SECOND) ATTRIBUTE_ALL(FIRST, SECOND)
#endif

ATTRIBUTE_ENUM(AllocAlign,allocalign)
ATTRIBUTE_ENUM(AllocatedPointer,allocptr)
ATTRIBUTE_ENUM(AlwaysInline,alwaysinline)
ATTRIBUTE_ENUM(Builtin,builtin)
ATTRIBUTE_ENUM(Cold,cold)
ATTRIBUTE_ENUM(Convergent,convergent)
ATTRIBUTE_ENUM(DisableSanitizerInstrumentation,disable_sanitizer_instrumentation)
ATTRIBUTE_ENUM(FnRetThunkExtern,fn_ret_thunk_extern)
ATTRIBUTE_ENUM(Hot,hot)
ATTRIBUTE_ENUM(ImmArg,immarg)
ATTRIBUTE_ENUM(InReg,inreg)
ATTRIBUTE_ENUM(InlineHint,inlinehint)
ATTRIBUTE_ENUM(JumpTable,jumptable)
ATTRIBUTE_ENUM(MinSize,minsize)
ATTRIBUTE_ENUM(MustProgress,mustprogress)
ATTRIBUTE_ENUM(Naked,naked)
ATTRIBUTE_ENUM(Nest,nest)
ATTRIBUTE_ENUM(NoAlias,noalias)
ATTRIBUTE_ENUM(NoBuiltin,nobuiltin)
ATTRIBUTE_ENUM(NoCallback,nocallback)
ATTRIBUTE_ENUM(NoCapture,nocapture)
ATTRIBUTE_ENUM(NoCfCheck,nocf_check)
ATTRIBUTE_ENUM(NoDuplicate,noduplicate)
ATTRIBUTE_ENUM(NoFree,nofree)
ATTRIBUTE_ENUM(NoImplicitFloat,noimplicitfloat)
ATTRIBUTE_ENUM(NoInline,noinline)
ATTRIBUTE_ENUM(NoMerge,nomerge)
ATTRIBUTE_ENUM(NoProfile,noprofile)
ATTRIBUTE_ENUM(NoRecurse,norecurse)
ATTRIBUTE_ENUM(NoRedZone,noredzone)
ATTRIBUTE_ENUM(NoReturn,noreturn)
ATTRIBUTE_ENUM(NoSanitizeBounds,nosanitize_bounds)
ATTRIBUTE_ENUM(NoSanitizeCoverage,nosanitize_coverage)
ATTRIBUTE_ENUM(NoSync,nosync)
ATTRIBUTE_ENUM(NoUndef,noundef)
ATTRIBUTE_ENUM(NoUnwind,nounwind)
ATTRIBUTE_ENUM(NonLazyBind,nonlazybind)
ATTRIBUTE_ENUM(NonNull,nonnull)
ATTRIBUTE_ENUM(NullPointerIsValid,null_pointer_is_valid)
ATTRIBUTE_ENUM(OptForFuzzing,optforfuzzing)
ATTRIBUTE_ENUM(OptimizeForSize,optsize)
ATTRIBUTE_ENUM(OptimizeNone,optnone)
ATTRIBUTE_ENUM(PresplitCoroutine,presplitcoroutine)
ATTRIBUTE_ENUM(ReadNone,readnone)
ATTRIBUTE_ENUM(ReadOnly,readonly)
ATTRIBUTE_ENUM(Returned,returned)
ATTRIBUTE_ENUM(ReturnsTwice,returns_twice)
ATTRIBUTE_ENUM(SExt,signext)
ATTRIBUTE_ENUM(SafeStack,safestack)
ATTRIBUTE_ENUM(SanitizeAddress,sanitize_address)
ATTRIBUTE_ENUM(SanitizeHWAddress,sanitize_hwaddress)
ATTRIBUTE_ENUM(SanitizeMemTag,sanitize_memtag)
ATTRIBUTE_ENUM(SanitizeMemory,sanitize_memory)
ATTRIBUTE_ENUM(SanitizeThread,sanitize_thread)
ATTRIBUTE_ENUM(ShadowCallStack,shadowcallstack)
ATTRIBUTE_ENUM(SkipProfile,skipprofile)
ATTRIBUTE_ENUM(Speculatable,speculatable)
ATTRIBUTE_ENUM(SpeculativeLoadHardening,speculative_load_hardening)
ATTRIBUTE_ENUM(StackProtect,ssp)
ATTRIBUTE_ENUM(StackProtectReq,sspreq)
ATTRIBUTE_ENUM(StackProtectStrong,sspstrong)
ATTRIBUTE_ENUM(StrictFP,strictfp)
ATTRIBUTE_ENUM(SwiftAsync,swiftasync)
ATTRIBUTE_ENUM(SwiftError,swifterror)
ATTRIBUTE_ENUM(SwiftSelf,swiftself)
ATTRIBUTE_ENUM(WillReturn,willreturn)
ATTRIBUTE_ENUM(WriteOnly,writeonly)
ATTRIBUTE_ENUM(ZExt,zeroext)
ATTRIBUTE_ENUM(ByRef,byref)
ATTRIBUTE_ENUM(ByVal,byval)
ATTRIBUTE_ENUM(ElementType,elementtype)
ATTRIBUTE_ENUM(InAlloca,inalloca)
ATTRIBUTE_ENUM(Preallocated,preallocated)
ATTRIBUTE_ENUM(StructRet,sret)
ATTRIBUTE_ENUM(Alignment,align)
ATTRIBUTE_ENUM(AllocKind,allockind)
ATTRIBUTE_ENUM(AllocSize,allocsize)
ATTRIBUTE_ENUM(Dereferenceable,dereferenceable)
ATTRIBUTE_ENUM(DereferenceableOrNull,dereferenceable_or_null)
ATTRIBUTE_ENUM(Memory,memory)
ATTRIBUTE_ENUM(NoFPClass,nofpclass)
ATTRIBUTE_ENUM(StackAlignment,alignstack)
ATTRIBUTE_ENUM(UWTable,uwtable)
ATTRIBUTE_ENUM(VScaleRange,vscale_range)
#undef ATTRIBUTE_ENUM

#ifndef ATTRIBUTE_STRBOOL
#define ATTRIBUTE_STRBOOL(FIRST, SECOND) ATTRIBUTE_ALL(FIRST, SECOND)
#endif

ATTRIBUTE_STRBOOL(ApproxFuncFPMath,approx-func-fp-math)
ATTRIBUTE_STRBOOL(LessPreciseFPMAD,less-precise-fpmad)
ATTRIBUTE_STRBOOL(NoInfsFPMath,no-infs-fp-math)
ATTRIBUTE_STRBOOL(NoInlineLineTables,no-inline-line-tables)
ATTRIBUTE_STRBOOL(NoJumpTables,no-jump-tables)
ATTRIBUTE_STRBOOL(NoNansFPMath,no-nans-fp-math)
ATTRIBUTE_STRBOOL(NoSignedZerosFPMath,no-signed-zeros-fp-math)
ATTRIBUTE_STRBOOL(ProfileSampleAccurate,profile-sample-accurate)
ATTRIBUTE_STRBOOL(UnsafeFPMath,unsafe-fp-math)
ATTRIBUTE_STRBOOL(UseSampleProfile,use-sample-profile)
#undef ATTRIBUTE_STRBOOL

#ifndef ATTRIBUTE_COMPLEXSTR
#define ATTRIBUTE_COMPLEXSTR(FIRST, SECOND) ATTRIBUTE_ALL(FIRST, SECOND)
#endif

ATTRIBUTE_COMPLEXSTR(DenormalFPMath,denormal-fp-math)
ATTRIBUTE_COMPLEXSTR(DenormalFPMathF32,denormal-fp-math-f32)
#undef ATTRIBUTE_COMPLEXSTR

#undef ATTRIBUTE_ALL
#endif

#ifdef GET_ATTR_ENUM
#undef GET_ATTR_ENUM
FirstEnumAttr = 1,
AllocAlign = 1,
AllocatedPointer = 2,
AlwaysInline = 3,
Builtin = 4,
Cold = 5,
Convergent = 6,
DisableSanitizerInstrumentation = 7,
FnRetThunkExtern = 8,
Hot = 9,
ImmArg = 10,
InReg = 11,
InlineHint = 12,
JumpTable = 13,
MinSize = 14,
MustProgress = 15,
Naked = 16,
Nest = 17,
NoAlias = 18,
NoBuiltin = 19,
NoCallback = 20,
NoCapture = 21,
NoCfCheck = 22,
NoDuplicate = 23,
NoFree = 24,
NoImplicitFloat = 25,
NoInline = 26,
NoMerge = 27,
NoProfile = 28,
NoRecurse = 29,
NoRedZone = 30,
NoReturn = 31,
NoSanitizeBounds = 32,
NoSanitizeCoverage = 33,
NoSync = 34,
NoUndef = 35,
NoUnwind = 36,
NonLazyBind = 37,
NonNull = 38,
NullPointerIsValid = 39,
OptForFuzzing = 40,
OptimizeForSize = 41,
OptimizeNone = 42,
PresplitCoroutine = 43,
ReadNone = 44,
ReadOnly = 45,
Returned = 46,
ReturnsTwice = 47,
SExt = 48,
SafeStack = 49,
SanitizeAddress = 50,
SanitizeHWAddress = 51,
SanitizeMemTag = 52,
SanitizeMemory = 53,
SanitizeThread = 54,
ShadowCallStack = 55,
SkipProfile = 56,
Speculatable = 57,
SpeculativeLoadHardening = 58,
StackProtect = 59,
StackProtectReq = 60,
StackProtectStrong = 61,
StrictFP = 62,
SwiftAsync = 63,
SwiftError = 64,
SwiftSelf = 65,
WillReturn = 66,
WriteOnly = 67,
ZExt = 68,
LastEnumAttr = 68,
FirstTypeAttr = 69,
ByRef = 69,
ByVal = 70,
ElementType = 71,
InAlloca = 72,
Preallocated = 73,
StructRet = 74,
LastTypeAttr = 74,
FirstIntAttr = 75,
Alignment = 75,
AllocKind = 76,
AllocSize = 77,
Dereferenceable = 78,
DereferenceableOrNull = 79,
Memory = 80,
NoFPClass = 81,
StackAlignment = 82,
UWTable = 83,
VScaleRange = 84,
LastIntAttr = 84,
#endif

#ifdef GET_ATTR_COMPAT_FUNC
#undef GET_ATTR_COMPAT_FUNC
static inline bool hasCompatibleFnAttrs(const Function &Caller,
                                        const Function &Callee) {
  bool Ret = true;

  Ret &= isEqual<SanitizeAddressAttr>(Caller, Callee);
  Ret &= isEqual<SanitizeThreadAttr>(Caller, Callee);
  Ret &= isEqual<SanitizeMemoryAttr>(Caller, Callee);
  Ret &= isEqual<SanitizeHWAddressAttr>(Caller, Callee);
  Ret &= isEqual<SanitizeMemTagAttr>(Caller, Callee);
  Ret &= isEqual<SafeStackAttr>(Caller, Callee);
  Ret &= isEqual<ShadowCallStackAttr>(Caller, Callee);
  Ret &= isEqual<UseSampleProfileAttr>(Caller, Callee);
  Ret &= isEqual<NoProfileAttr>(Caller, Callee);
  Ret &= checkDenormMode(Caller, Callee);

  return Ret;
}

static inline void mergeFnAttrs(Function &Caller,
                                const Function &Callee) {
  setAND<LessPreciseFPMADAttr>(Caller, Callee);
  setAND<NoInfsFPMathAttr>(Caller, Callee);
  setAND<NoNansFPMathAttr>(Caller, Callee);
  setAND<ApproxFuncFPMathAttr>(Caller, Callee);
  setAND<NoSignedZerosFPMathAttr>(Caller, Callee);
  setAND<UnsafeFPMathAttr>(Caller, Callee);
  setOR<NoImplicitFloatAttr>(Caller, Callee);
  setOR<NoJumpTablesAttr>(Caller, Callee);
  setOR<ProfileSampleAccurateAttr>(Caller, Callee);
  setOR<SpeculativeLoadHardeningAttr>(Caller, Callee);
  adjustCallerSSPLevel(Caller, Callee);
  adjustCallerStackProbes(Caller, Callee);
  adjustCallerStackProbeSize(Caller, Callee);
  adjustMinLegalVectorWidth(Caller, Callee);
  adjustNullPointerValidAttr(Caller, Callee);
  setAND<MustProgressAttr>(Caller, Callee);
}

#endif
#ifdef GET_ATTR_PROP_TABLE
#undef GET_ATTR_PROP_TABLE
static const uint8_t AttrPropTable[] = {
0 | AttributeProperty::ParamAttr,
0 | AttributeProperty::ParamAttr,
0 | AttributeProperty::FnAttr,
0 | AttributeProperty::FnAttr,
0 | AttributeProperty::FnAttr,
0 | AttributeProperty::FnAttr,
0 | AttributeProperty::FnAttr,
0 | AttributeProperty::FnAttr,
0 | AttributeProperty::FnAttr,
0 | AttributeProperty::ParamAttr,
0 | AttributeProperty::ParamAttr | AttributeProperty::RetAttr,
0 | AttributeProperty::FnAttr,
0 | AttributeProperty::FnAttr,
0 | AttributeProperty::FnAttr,
0 | AttributeProperty::FnAttr,
0 | AttributeProperty::FnAttr,
0 | AttributeProperty::ParamAttr,
0 | AttributeProperty::ParamAttr | AttributeProperty::RetAttr,
0 | AttributeProperty::FnAttr,
0 | AttributeProperty::FnAttr,
0 | AttributeProperty::ParamAttr,
0 | AttributeProperty::FnAttr,
0 | AttributeProperty::FnAttr,
0 | AttributeProperty::FnAttr | AttributeProperty::ParamAttr,
0 | AttributeProperty::FnAttr,
0 | AttributeProperty::FnAttr,
0 | AttributeProperty::FnAttr,
0 | AttributeProperty::FnAttr,
0 | AttributeProperty::FnAttr,
0 | AttributeProperty::FnAttr,
0 | AttributeProperty::FnAttr,
0 | AttributeProperty::FnAttr,
0 | AttributeProperty::FnAttr,
0 | AttributeProperty::FnAttr,
0 | AttributeProperty::ParamAttr | AttributeProperty::RetAttr,
0 | AttributeProperty::FnAttr,
0 | AttributeProperty::FnAttr,
0 | AttributeProperty::ParamAttr | AttributeProperty::RetAttr,
0 | AttributeProperty::FnAttr,
0 | AttributeProperty::FnAttr,
0 | AttributeProperty::FnAttr,
0 | AttributeProperty::FnAttr,
0 | AttributeProperty::FnAttr,
0 | AttributeProperty::ParamAttr,
0 | AttributeProperty::ParamAttr,
0 | AttributeProperty::ParamAttr,
0 | AttributeProperty::FnAttr,
0 | AttributeProperty::ParamAttr | AttributeProperty::RetAttr,
0 | AttributeProperty::FnAttr,
0 | AttributeProperty::FnAttr,
0 | AttributeProperty::FnAttr,
0 | AttributeProperty::FnAttr,
0 | AttributeProperty::FnAttr,
0 | AttributeProperty::FnAttr,
0 | AttributeProperty::FnAttr,
0 | AttributeProperty::FnAttr,
0 | AttributeProperty::FnAttr,
0 | AttributeProperty::FnAttr,
0 | AttributeProperty::FnAttr,
0 | AttributeProperty::FnAttr,
0 | AttributeProperty::FnAttr,
0 | AttributeProperty::FnAttr,
0 | AttributeProperty::ParamAttr,
0 | AttributeProperty::ParamAttr,
0 | AttributeProperty::ParamAttr,
0 | AttributeProperty::FnAttr,
0 | AttributeProperty::ParamAttr,
0 | AttributeProperty::ParamAttr | AttributeProperty::RetAttr,
0 | AttributeProperty::ParamAttr,
0 | AttributeProperty::ParamAttr,
0 | AttributeProperty::ParamAttr,
0 | AttributeProperty::ParamAttr,
0 | AttributeProperty::FnAttr | AttributeProperty::ParamAttr,
0 | AttributeProperty::ParamAttr,
0 | AttributeProperty::ParamAttr | AttributeProperty::RetAttr,
0 | AttributeProperty::FnAttr,
0 | AttributeProperty::FnAttr,
0 | AttributeProperty::ParamAttr | AttributeProperty::RetAttr,
0 | AttributeProperty::ParamAttr | AttributeProperty::RetAttr,
0 | AttributeProperty::FnAttr,
0 | AttributeProperty::ParamAttr | AttributeProperty::RetAttr,
0 | AttributeProperty::FnAttr | AttributeProperty::ParamAttr,
0 | AttributeProperty::FnAttr,
0 | AttributeProperty::FnAttr,
};
#endif
PKjwFZ���NT*T*IR/MatrixBuilder.hnu�[���//===- llvm/MatrixBuilder.h - Builder to lower matrix ops -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the MatrixBuilder class, which is used as a convenient way
// to lower matrix operations to LLVM IR.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_MATRIXBUILDER_H
#define LLVM_IR_MATRIXBUILDER_H

#include "llvm/IR/Constant.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/Alignment.h"

namespace llvm {

class Function;
class Twine;
class Module;

class MatrixBuilder {
  IRBuilderBase &B;
  Module *getModule() { return B.GetInsertBlock()->getParent()->getParent(); }

  std::pair<Value *, Value *> splatScalarOperandIfNeeded(Value *LHS,
                                                         Value *RHS) {
    assert((LHS->getType()->isVectorTy() || RHS->getType()->isVectorTy()) &&
           "One of the operands must be a matrix (embedded in a vector)");
    if (LHS->getType()->isVectorTy() && !RHS->getType()->isVectorTy()) {
      assert(!isa<ScalableVectorType>(LHS->getType()) &&
             "LHS Assumed to be fixed width");
      RHS = B.CreateVectorSplat(
          cast<VectorType>(LHS->getType())->getElementCount(), RHS,
          "scalar.splat");
    } else if (!LHS->getType()->isVectorTy() && RHS->getType()->isVectorTy()) {
      assert(!isa<ScalableVectorType>(RHS->getType()) &&
             "RHS Assumed to be fixed width");
      LHS = B.CreateVectorSplat(
          cast<VectorType>(RHS->getType())->getElementCount(), LHS,
          "scalar.splat");
    }
    return {LHS, RHS};
  }

public:
  MatrixBuilder(IRBuilderBase &Builder) : B(Builder) {}

  /// Create a column major, strided matrix load.
  /// \p EltTy   - Matrix element type
  /// \p DataPtr - Start address of the matrix read
  /// \p Rows    - Number of rows in matrix (must be a constant)
  /// \p Columns - Number of columns in matrix (must be a constant)
  /// \p Stride  - Space between columns
  CallInst *CreateColumnMajorLoad(Type *EltTy, Value *DataPtr, Align Alignment,
                                  Value *Stride, bool IsVolatile, unsigned Rows,
                                  unsigned Columns, const Twine &Name = "") {
    auto *RetType = FixedVectorType::get(EltTy, Rows * Columns);

    Value *Ops[] = {DataPtr, Stride, B.getInt1(IsVolatile), B.getInt32(Rows),
                    B.getInt32(Columns)};
    Type *OverloadedTypes[] = {RetType, Stride->getType()};

    Function *TheFn = Intrinsic::getDeclaration(
        getModule(), Intrinsic::matrix_column_major_load, OverloadedTypes);

    CallInst *Call = B.CreateCall(TheFn->getFunctionType(), TheFn, Ops, Name);
    Attribute AlignAttr =
        Attribute::getWithAlignment(Call->getContext(), Alignment);
    Call->addParamAttr(0, AlignAttr);
    return Call;
  }

  /// Create a column major, strided matrix store.
  /// \p Matrix  - Matrix to store
  /// \p Ptr     - Pointer to write back to
  /// \p Stride  - Space between columns
  CallInst *CreateColumnMajorStore(Value *Matrix, Value *Ptr, Align Alignment,
                                   Value *Stride, bool IsVolatile,
                                   unsigned Rows, unsigned Columns,
                                   const Twine &Name = "") {
    Value *Ops[] = {Matrix,           Ptr,
                    Stride,           B.getInt1(IsVolatile),
                    B.getInt32(Rows), B.getInt32(Columns)};
    Type *OverloadedTypes[] = {Matrix->getType(), Stride->getType()};

    Function *TheFn = Intrinsic::getDeclaration(
        getModule(), Intrinsic::matrix_column_major_store, OverloadedTypes);

    CallInst *Call = B.CreateCall(TheFn->getFunctionType(), TheFn, Ops, Name);
    Attribute AlignAttr =
        Attribute::getWithAlignment(Call->getContext(), Alignment);
    Call->addParamAttr(1, AlignAttr);
    return Call;
  }

  /// Create a llvm.matrix.transpose call, transposing \p Matrix with \p Rows
  /// rows and \p Columns columns.
  CallInst *CreateMatrixTranspose(Value *Matrix, unsigned Rows,
                                  unsigned Columns, const Twine &Name = "") {
    auto *OpType = cast<VectorType>(Matrix->getType());
    auto *ReturnType =
        FixedVectorType::get(OpType->getElementType(), Rows * Columns);

    Type *OverloadedTypes[] = {ReturnType};
    Value *Ops[] = {Matrix, B.getInt32(Rows), B.getInt32(Columns)};
    Function *TheFn = Intrinsic::getDeclaration(
        getModule(), Intrinsic::matrix_transpose, OverloadedTypes);

    return B.CreateCall(TheFn->getFunctionType(), TheFn, Ops, Name);
  }

  /// Create a llvm.matrix.multiply call, multiplying matrixes \p LHS and \p
  /// RHS.
  CallInst *CreateMatrixMultiply(Value *LHS, Value *RHS, unsigned LHSRows,
                                 unsigned LHSColumns, unsigned RHSColumns,
                                 const Twine &Name = "") {
    auto *LHSType = cast<VectorType>(LHS->getType());
    auto *RHSType = cast<VectorType>(RHS->getType());

    auto *ReturnType =
        FixedVectorType::get(LHSType->getElementType(), LHSRows * RHSColumns);

    Value *Ops[] = {LHS, RHS, B.getInt32(LHSRows), B.getInt32(LHSColumns),
                    B.getInt32(RHSColumns)};
    Type *OverloadedTypes[] = {ReturnType, LHSType, RHSType};

    Function *TheFn = Intrinsic::getDeclaration(
        getModule(), Intrinsic::matrix_multiply, OverloadedTypes);
    return B.CreateCall(TheFn->getFunctionType(), TheFn, Ops, Name);
  }

  /// Insert a single element \p NewVal into \p Matrix at indices (\p RowIdx, \p
  /// ColumnIdx).
  Value *CreateMatrixInsert(Value *Matrix, Value *NewVal, Value *RowIdx,
                            Value *ColumnIdx, unsigned NumRows) {
    return B.CreateInsertElement(
        Matrix, NewVal,
        B.CreateAdd(B.CreateMul(ColumnIdx, ConstantInt::get(
                                               ColumnIdx->getType(), NumRows)),
                    RowIdx));
  }

  /// Add matrixes \p LHS and \p RHS. Support both integer and floating point
  /// matrixes.
  Value *CreateAdd(Value *LHS, Value *RHS) {
    assert(LHS->getType()->isVectorTy() || RHS->getType()->isVectorTy());
    if (LHS->getType()->isVectorTy() && !RHS->getType()->isVectorTy()) {
      assert(!isa<ScalableVectorType>(LHS->getType()) &&
             "LHS Assumed to be fixed width");
      RHS = B.CreateVectorSplat(
          cast<VectorType>(LHS->getType())->getElementCount(), RHS,
          "scalar.splat");
    } else if (!LHS->getType()->isVectorTy() && RHS->getType()->isVectorTy()) {
      assert(!isa<ScalableVectorType>(RHS->getType()) &&
             "RHS Assumed to be fixed width");
      LHS = B.CreateVectorSplat(
          cast<VectorType>(RHS->getType())->getElementCount(), LHS,
          "scalar.splat");
    }

    return cast<VectorType>(LHS->getType())
                   ->getElementType()
                   ->isFloatingPointTy()
               ? B.CreateFAdd(LHS, RHS)
               : B.CreateAdd(LHS, RHS);
  }

  /// Subtract matrixes \p LHS and \p RHS. Support both integer and floating
  /// point matrixes.
  Value *CreateSub(Value *LHS, Value *RHS) {
    assert(LHS->getType()->isVectorTy() || RHS->getType()->isVectorTy());
    if (LHS->getType()->isVectorTy() && !RHS->getType()->isVectorTy()) {
      assert(!isa<ScalableVectorType>(LHS->getType()) &&
             "LHS Assumed to be fixed width");
      RHS = B.CreateVectorSplat(
          cast<VectorType>(LHS->getType())->getElementCount(), RHS,
          "scalar.splat");
    } else if (!LHS->getType()->isVectorTy() && RHS->getType()->isVectorTy()) {
      assert(!isa<ScalableVectorType>(RHS->getType()) &&
             "RHS Assumed to be fixed width");
      LHS = B.CreateVectorSplat(
          cast<VectorType>(RHS->getType())->getElementCount(), LHS,
          "scalar.splat");
    }

    return cast<VectorType>(LHS->getType())
                   ->getElementType()
                   ->isFloatingPointTy()
               ? B.CreateFSub(LHS, RHS)
               : B.CreateSub(LHS, RHS);
  }

  /// Multiply matrix \p LHS with scalar \p RHS or scalar \p LHS with matrix \p
  /// RHS.
  Value *CreateScalarMultiply(Value *LHS, Value *RHS) {
    std::tie(LHS, RHS) = splatScalarOperandIfNeeded(LHS, RHS);
    if (LHS->getType()->getScalarType()->isFloatingPointTy())
      return B.CreateFMul(LHS, RHS);
    return B.CreateMul(LHS, RHS);
  }

  /// Divide matrix \p LHS by scalar \p RHS. If the operands are integers, \p
  /// IsUnsigned indicates whether UDiv or SDiv should be used.
  Value *CreateScalarDiv(Value *LHS, Value *RHS, bool IsUnsigned) {
    assert(LHS->getType()->isVectorTy() && !RHS->getType()->isVectorTy());
    assert(!isa<ScalableVectorType>(LHS->getType()) &&
           "LHS Assumed to be fixed width");
    RHS =
        B.CreateVectorSplat(cast<VectorType>(LHS->getType())->getElementCount(),
                            RHS, "scalar.splat");
    return cast<VectorType>(LHS->getType())
                   ->getElementType()
                   ->isFloatingPointTy()
               ? B.CreateFDiv(LHS, RHS)
               : (IsUnsigned ? B.CreateUDiv(LHS, RHS) : B.CreateSDiv(LHS, RHS));
  }

  /// Create an assumption that \p Idx is less than \p NumElements.
  void CreateIndexAssumption(Value *Idx, unsigned NumElements,
                             Twine const &Name = "") {
    Value *NumElts =
        B.getIntN(Idx->getType()->getScalarSizeInBits(), NumElements);
    auto *Cmp = B.CreateICmpULT(Idx, NumElts);
    if (isa<ConstantInt>(Cmp))
      assert(cast<ConstantInt>(Cmp)->isOne() && "Index must be valid!");
    else
      B.CreateAssumption(Cmp);
  }

  /// Compute the index to access the element at (\p RowIdx, \p ColumnIdx) from
  /// a matrix with \p NumRows embedded in a vector.
  Value *CreateIndex(Value *RowIdx, Value *ColumnIdx, unsigned NumRows,
                     Twine const &Name = "") {
    unsigned MaxWidth = std::max(RowIdx->getType()->getScalarSizeInBits(),
                                 ColumnIdx->getType()->getScalarSizeInBits());
    Type *IntTy = IntegerType::get(RowIdx->getType()->getContext(), MaxWidth);
    RowIdx = B.CreateZExt(RowIdx, IntTy);
    ColumnIdx = B.CreateZExt(ColumnIdx, IntTy);
    Value *NumRowsV = B.getIntN(MaxWidth, NumRows);
    return B.CreateAdd(B.CreateMul(ColumnIdx, NumRowsV), RowIdx);
  }
};

} // end namespace llvm

#endif // LLVM_IR_MATRIXBUILDER_H
PKjwFZ/`���
�
IR/ConstantFold.hnu�[���//==-- ConstantFold.h - DL-independent Constant Folding Interface -*- C++ -*-=//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the DataLayout-independent constant folding interface.
// When possible, the DataLayout-aware constant folding interface in
// Analysis/ConstantFolding.h should be preferred.
//
// These interfaces are used by the ConstantExpr::get* methods to automatically
// fold constants when possible.
//
// These operators may return a null object if they don't know how to perform
// the specified operation on the specified constant types.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_CONSTANTFOLD_H
#define LLVM_IR_CONSTANTFOLD_H

#include "llvm/IR/InstrTypes.h"
#include <optional>

namespace llvm {
  template <typename T> class ArrayRef;
  class Value;
  class Constant;
  class Type;

  // Constant fold various types of instruction...
  Constant *ConstantFoldCastInstruction(
    unsigned opcode,     ///< The opcode of the cast
    Constant *V,         ///< The source constant
    Type *DestTy   ///< The destination type
  );
  Constant *ConstantFoldSelectInstruction(Constant *Cond,
                                          Constant *V1, Constant *V2);
  Constant *ConstantFoldExtractElementInstruction(Constant *Val, Constant *Idx);
  Constant *ConstantFoldInsertElementInstruction(Constant *Val, Constant *Elt,
                                                 Constant *Idx);
  Constant *ConstantFoldShuffleVectorInstruction(Constant *V1, Constant *V2,
                                                 ArrayRef<int> Mask);
  Constant *ConstantFoldExtractValueInstruction(Constant *Agg,
                                                ArrayRef<unsigned> Idxs);
  Constant *ConstantFoldInsertValueInstruction(Constant *Agg, Constant *Val,
                                               ArrayRef<unsigned> Idxs);
  Constant *ConstantFoldUnaryInstruction(unsigned Opcode, Constant *V);
  Constant *ConstantFoldBinaryInstruction(unsigned Opcode, Constant *V1,
                                          Constant *V2);
  Constant *ConstantFoldCompareInstruction(CmpInst::Predicate Predicate,
                                           Constant *C1, Constant *C2);
  Constant *ConstantFoldGetElementPtr(Type *Ty, Constant *C, bool InBounds,
                                      std::optional<unsigned> InRangeIndex,
                                      ArrayRef<Value *> Idxs);
} // End llvm namespace

#endif
PKjwFZk߮�xxIR/TypedPointerType.hnu�[���//===- llvm/IR/TypedPointerType.h - Typed Pointer Type --------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains typed pointer type information. It is separated out into
// a separate file to make it less likely to accidentally use this type.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_TYPEDPOINTERTYPE_H
#define LLVM_IR_TYPEDPOINTERTYPE_H

#include "llvm/IR/Type.h"

namespace llvm {

/// A few GPU targets, such as DXIL and SPIR-V, have typed pointers. This
/// pointer type abstraction is used for tracking the types of these pointers.
/// It is not legal to use this type, or derived types containing this type, in
/// LLVM IR.
class TypedPointerType : public Type {
  explicit TypedPointerType(Type *ElType, unsigned AddrSpace);

  Type *PointeeTy;

public:
  TypedPointerType(const TypedPointerType &) = delete;
  TypedPointerType &operator=(const TypedPointerType &) = delete;

  /// This constructs a pointer to an object of the specified type in a numbered
  /// address space.
  static TypedPointerType *get(Type *ElementType, unsigned AddressSpace);

  /// Return true if the specified type is valid as a element type.
  static bool isValidElementType(Type *ElemTy);

  /// Return the address space of the Pointer type.
  unsigned getAddressSpace() const { return getSubclassData(); }

  Type *getElementType() const { return PointeeTy; }

  /// Implement support type inquiry through isa, cast, and dyn_cast.
  static bool classof(const Type *T) {
    return T->getTypeID() == TypedPointerTyID;
  }
};

} // namespace llvm

#endif // LLVM_IR_TYPEDPOINTERTYPE_H
PKjwFZ��o)�%�%IR/AbstractCallSite.hnu�[���//===- AbstractCallSite.h - Abstract call sites -----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the AbstractCallSite class, which is a is a wrapper that
// allows treating direct, indirect, and callback calls the same.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_ABSTRACTCALLSITE_H
#define LLVM_IR_ABSTRACTCALLSITE_H

#include "llvm/IR/Constants.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Value.h"
#include <cassert>

namespace llvm {

class Argument;
class Use;

/// AbstractCallSite
///
/// An abstract call site is a wrapper that allows to treat direct,
/// indirect, and callback calls the same. If an abstract call site
/// represents a direct or indirect call site it behaves like a stripped
/// down version of a normal call site object. The abstract call site can
/// also represent a callback call, thus the fact that the initially
/// called function (=broker) may invoke a third one (=callback callee).
/// In this case, the abstract call site hides the middle man, hence the
/// broker function. The result is a representation of the callback call,
/// inside the broker, but in the context of the original call to the broker.
///
/// There are up to three functions involved when we talk about callback call
/// sites. The caller (1), which invokes the broker function. The broker
/// function (2), that will invoke the callee zero or more times. And finally
/// the callee (3), which is the target of the callback call.
///
/// The abstract call site will handle the mapping from parameters to arguments
/// depending on the semantic of the broker function. However, it is important
/// to note that the mapping is often partial. Thus, some arguments of the
/// call/invoke instruction are mapped to parameters of the callee while others
/// are not.
class AbstractCallSite {
public:

  /// The encoding of a callback with regards to the underlying instruction.
  struct CallbackInfo {

    /// For direct/indirect calls the parameter encoding is empty. If it is not,
    /// the abstract call site represents a callback. In that case, the first
    /// element of the encoding vector represents which argument of the call
    /// site CB is the callback callee. The remaining elements map parameters
    /// (identified by their position) to the arguments that will be passed
    /// through (also identified by position but in the call site instruction).
    ///
    /// NOTE that we use LLVM argument numbers (starting at 0) and not
    /// clang/source argument numbers (starting at 1). The -1 entries represent
    /// unknown values that are passed to the callee.
    using ParameterEncodingTy = SmallVector<int, 0>;
    ParameterEncodingTy ParameterEncoding;

  };

private:

  /// The underlying call site:
  ///   caller -> callee,             if this is a direct or indirect call site
  ///   caller -> broker function,    if this is a callback call site
  CallBase *CB;

  /// The encoding of a callback with regards to the underlying instruction.
  CallbackInfo CI;

public:
  /// Sole constructor for abstract call sites (ACS).
  ///
  /// An abstract call site can only be constructed through a llvm::Use because
  /// each operand (=use) of an instruction could potentially be a different
  /// abstract call site. Furthermore, even if the value of the llvm::Use is the
  /// same, and the user is as well, the abstract call sites might not be.
  ///
  /// If a use is not associated with an abstract call site the constructed ACS
  /// will evaluate to false if converted to a boolean.
  ///
  /// If the use is the callee use of a call or invoke instruction, the
  /// constructed abstract call site will behave as a llvm::CallSite would.
  ///
  /// If the use is not a callee use of a call or invoke instruction, the
  /// callback metadata is used to determine the argument <-> parameter mapping
  /// as well as the callee of the abstract call site.
  AbstractCallSite(const Use *U);

  /// Add operand uses of \p CB that represent callback uses into
  /// \p CallbackUses.
  ///
  /// All uses added to \p CallbackUses can be used to create abstract call
  /// sites for which AbstractCallSite::isCallbackCall() will return true.
  static void getCallbackUses(const CallBase &CB,
                              SmallVectorImpl<const Use *> &CallbackUses);

  /// Conversion operator to conveniently check for a valid/initialized ACS.
  explicit operator bool() const { return CB != nullptr; }

  /// Return the underlying instruction.
  CallBase *getInstruction() const { return CB; }

  /// Return true if this ACS represents a direct call.
  bool isDirectCall() const {
    return !isCallbackCall() && !CB->isIndirectCall();
  }

  /// Return true if this ACS represents an indirect call.
  bool isIndirectCall() const {
    return !isCallbackCall() && CB->isIndirectCall();
  }

  /// Return true if this ACS represents a callback call.
  bool isCallbackCall() const {
    // For a callback call site the callee is ALWAYS stored first in the
    // transitive values vector. Thus, a non-empty vector indicates a callback.
    return !CI.ParameterEncoding.empty();
  }

  /// Return true if @p UI is the use that defines the callee of this ACS.
  bool isCallee(Value::const_user_iterator UI) const {
    return isCallee(&UI.getUse());
  }

  /// Return true if @p U is the use that defines the callee of this ACS.
  bool isCallee(const Use *U) const {
    if (isDirectCall())
      return CB->isCallee(U);

    assert(!CI.ParameterEncoding.empty() &&
           "Callback without parameter encoding!");

    // If the use is actually in a constant cast expression which itself
    // has only one use, we look through the constant cast expression.
    if (auto *CE = dyn_cast<ConstantExpr>(U->getUser()))
      if (CE->hasOneUse() && CE->isCast())
        U = &*CE->use_begin();

    return (int)CB->getArgOperandNo(U) == CI.ParameterEncoding[0];
  }

  /// Return the number of parameters of the callee.
  unsigned getNumArgOperands() const {
    if (isDirectCall())
      return CB->arg_size();
    // Subtract 1 for the callee encoding.
    return CI.ParameterEncoding.size() - 1;
  }

  /// Return the operand index of the underlying instruction associated with @p
  /// Arg.
  int getCallArgOperandNo(Argument &Arg) const {
    return getCallArgOperandNo(Arg.getArgNo());
  }

  /// Return the operand index of the underlying instruction associated with
  /// the function parameter number @p ArgNo or -1 if there is none.
  int getCallArgOperandNo(unsigned ArgNo) const {
    if (isDirectCall())
      return ArgNo;
    // Add 1 for the callee encoding.
    return CI.ParameterEncoding[ArgNo + 1];
  }

  /// Return the operand of the underlying instruction associated with @p Arg.
  Value *getCallArgOperand(Argument &Arg) const {
    return getCallArgOperand(Arg.getArgNo());
  }

  /// Return the operand of the underlying instruction associated with the
  /// function parameter number @p ArgNo or nullptr if there is none.
  Value *getCallArgOperand(unsigned ArgNo) const {
    if (isDirectCall())
      return CB->getArgOperand(ArgNo);
    // Add 1 for the callee encoding.
    return CI.ParameterEncoding[ArgNo + 1] >= 0
               ? CB->getArgOperand(CI.ParameterEncoding[ArgNo + 1])
               : nullptr;
  }

  /// Return the operand index of the underlying instruction associated with the
  /// callee of this ACS. Only valid for callback calls!
  int getCallArgOperandNoForCallee() const {
    assert(isCallbackCall());
    assert(CI.ParameterEncoding.size() && CI.ParameterEncoding[0] >= 0);
    return CI.ParameterEncoding[0];
  }

  /// Return the use of the callee value in the underlying instruction. Only
  /// valid for callback calls!
  const Use &getCalleeUseForCallback() const {
    int CalleeArgIdx = getCallArgOperandNoForCallee();
    assert(CalleeArgIdx >= 0 &&
           unsigned(CalleeArgIdx) < getInstruction()->getNumOperands());
    return getInstruction()->getOperandUse(CalleeArgIdx);
  }

  /// Return the pointer to function that is being called.
  Value *getCalledOperand() const {
    if (isDirectCall())
      return CB->getCalledOperand();
    return CB->getArgOperand(getCallArgOperandNoForCallee());
  }

  /// Return the function being called if this is a direct call, otherwise
  /// return null (if it's an indirect call).
  Function *getCalledFunction() const {
    Value *V = getCalledOperand();
    return V ? dyn_cast<Function>(V->stripPointerCasts()) : nullptr;
  }
};

/// Apply function Func to each CB's callback call site.
template <typename UnaryFunction>
void forEachCallbackCallSite(const CallBase &CB, UnaryFunction Func) {
  SmallVector<const Use *, 4u> CallbackUses;
  AbstractCallSite::getCallbackUses(CB, CallbackUses);
  for (const Use *U : CallbackUses) {
    AbstractCallSite ACS(U);
    assert(ACS && ACS.isCallbackCall() && "must be a callback call");
    Func(ACS);
  }
}

/// Apply function Func to each CB's callback function.
template <typename UnaryFunction>
void forEachCallbackFunction(const CallBase &CB, UnaryFunction Func) {
  forEachCallbackCallSite(CB, [&Func](AbstractCallSite &ACS) {
    if (Function *Callback = ACS.getCalledFunction())
      Func(Callback);
  });
}

} // end namespace llvm

#endif // LLVM_IR_ABSTRACTCALLSITE_H
PKjwFZ������IR/Attributes.hnu�[���//===- llvm/Attributes.h - Container for Attributes -------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file
/// This file contains the simple types necessary to represent the
/// attributes associated with functions and their calls.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_ATTRIBUTES_H
#define LLVM_IR_ATTRIBUTES_H

#include "llvm-c/Types.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/BitmaskEnum.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/Support/Alignment.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/ModRef.h"
#include "llvm/Support/PointerLikeTypeTraits.h"
#include <cassert>
#include <cstdint>
#include <optional>
#include <string>
#include <utility>

namespace llvm {

class AttrBuilder;
class AttributeMask;
class AttributeImpl;
class AttributeListImpl;
class AttributeSetNode;
class FoldingSetNodeID;
class Function;
class LLVMContext;
class Type;
class raw_ostream;
enum FPClassTest : unsigned;

enum class AllocFnKind : uint64_t {
  Unknown = 0,
  Alloc = 1 << 0,         // Allocator function returns a new allocation
  Realloc = 1 << 1,       // Allocator function resizes the `allocptr` argument
  Free = 1 << 2,          // Allocator function frees the `allocptr` argument
  Uninitialized = 1 << 3, // Allocator function returns uninitialized memory
  Zeroed = 1 << 4,        // Allocator function returns zeroed memory
  Aligned = 1 << 5,       // Allocator function aligns allocations per the
                          // `allocalign` argument
  LLVM_MARK_AS_BITMASK_ENUM(/* LargestValue = */ Aligned)
};

//===----------------------------------------------------------------------===//
/// \class
/// Functions, function parameters, and return types can have attributes
/// to indicate how they should be treated by optimizations and code
/// generation. This class represents one of those attributes. It's light-weight
/// and should be passed around by-value.
class Attribute {
public:
  /// This enumeration lists the attributes that can be associated with
  /// parameters, function results, or the function itself.
  ///
  /// Note: The `uwtable' attribute is about the ABI or the user mandating an
  /// entry in the unwind table. The `nounwind' attribute is about an exception
  /// passing by the function.
  ///
  /// In a theoretical system that uses tables for profiling and SjLj for
  /// exceptions, they would be fully independent. In a normal system that uses
  /// tables for both, the semantics are:
  ///
  /// nil                = Needs an entry because an exception might pass by.
  /// nounwind           = No need for an entry
  /// uwtable            = Needs an entry because the ABI says so and because
  ///                      an exception might pass by.
  /// uwtable + nounwind = Needs an entry because the ABI says so.

  enum AttrKind {
    // IR-Level Attributes
    None,                  ///< No attributes have been set
    #define GET_ATTR_ENUM
    #include "llvm/IR/Attributes.inc"
    EndAttrKinds,          ///< Sentinal value useful for loops
    EmptyKey,              ///< Use as Empty key for DenseMap of AttrKind
    TombstoneKey,          ///< Use as Tombstone key for DenseMap of AttrKind
  };

  static const unsigned NumIntAttrKinds = LastIntAttr - FirstIntAttr + 1;
  static const unsigned NumTypeAttrKinds = LastTypeAttr - FirstTypeAttr + 1;

  static bool isEnumAttrKind(AttrKind Kind) {
    return Kind >= FirstEnumAttr && Kind <= LastEnumAttr;
  }
  static bool isIntAttrKind(AttrKind Kind) {
    return Kind >= FirstIntAttr && Kind <= LastIntAttr;
  }
  static bool isTypeAttrKind(AttrKind Kind) {
    return Kind >= FirstTypeAttr && Kind <= LastTypeAttr;
  }

  static bool canUseAsFnAttr(AttrKind Kind);
  static bool canUseAsParamAttr(AttrKind Kind);
  static bool canUseAsRetAttr(AttrKind Kind);

private:
  AttributeImpl *pImpl = nullptr;

  Attribute(AttributeImpl *A) : pImpl(A) {}

public:
  Attribute() = default;

  //===--------------------------------------------------------------------===//
  // Attribute Construction
  //===--------------------------------------------------------------------===//

  /// Return a uniquified Attribute object.
  static Attribute get(LLVMContext &Context, AttrKind Kind, uint64_t Val = 0);
  static Attribute get(LLVMContext &Context, StringRef Kind,
                       StringRef Val = StringRef());
  static Attribute get(LLVMContext &Context, AttrKind Kind, Type *Ty);

  /// Return a uniquified Attribute object that has the specific
  /// alignment set.
  static Attribute getWithAlignment(LLVMContext &Context, Align Alignment);
  static Attribute getWithStackAlignment(LLVMContext &Context, Align Alignment);
  static Attribute getWithDereferenceableBytes(LLVMContext &Context,
                                              uint64_t Bytes);
  static Attribute getWithDereferenceableOrNullBytes(LLVMContext &Context,
                                                     uint64_t Bytes);
  static Attribute getWithAllocSizeArgs(
      LLVMContext &Context, unsigned ElemSizeArg,
      const std::optional<unsigned> &NumElemsArg);
  static Attribute getWithVScaleRangeArgs(LLVMContext &Context,
                                          unsigned MinValue, unsigned MaxValue);
  static Attribute getWithByValType(LLVMContext &Context, Type *Ty);
  static Attribute getWithStructRetType(LLVMContext &Context, Type *Ty);
  static Attribute getWithByRefType(LLVMContext &Context, Type *Ty);
  static Attribute getWithPreallocatedType(LLVMContext &Context, Type *Ty);
  static Attribute getWithInAllocaType(LLVMContext &Context, Type *Ty);
  static Attribute getWithUWTableKind(LLVMContext &Context, UWTableKind Kind);
  static Attribute getWithMemoryEffects(LLVMContext &Context, MemoryEffects ME);
  static Attribute getWithNoFPClass(LLVMContext &Context, FPClassTest Mask);

  /// For a typed attribute, return the equivalent attribute with the type
  /// changed to \p ReplacementTy.
  Attribute getWithNewType(LLVMContext &Context, Type *ReplacementTy) {
    assert(isTypeAttribute() && "this requires a typed attribute");
    return get(Context, getKindAsEnum(), ReplacementTy);
  }

  static Attribute::AttrKind getAttrKindFromName(StringRef AttrName);

  static StringRef getNameFromAttrKind(Attribute::AttrKind AttrKind);

  /// Return true if the provided string matches the IR name of an attribute.
  /// example: "noalias" return true but not "NoAlias"
  static bool isExistingAttribute(StringRef Name);

  //===--------------------------------------------------------------------===//
  // Attribute Accessors
  //===--------------------------------------------------------------------===//

  /// Return true if the attribute is an Attribute::AttrKind type.
  bool isEnumAttribute() const;

  /// Return true if the attribute is an integer attribute.
  bool isIntAttribute() const;

  /// Return true if the attribute is a string (target-dependent)
  /// attribute.
  bool isStringAttribute() const;

  /// Return true if the attribute is a type attribute.
  bool isTypeAttribute() const;

  /// Return true if the attribute is any kind of attribute.
  bool isValid() const { return pImpl; }

  /// Return true if the attribute is present.
  bool hasAttribute(AttrKind Val) const;

  /// Return true if the target-dependent attribute is present.
  bool hasAttribute(StringRef Val) const;

  /// Return the attribute's kind as an enum (Attribute::AttrKind). This
  /// requires the attribute to be an enum, integer, or type attribute.
  Attribute::AttrKind getKindAsEnum() const;

  /// Return the attribute's value as an integer. This requires that the
  /// attribute be an integer attribute.
  uint64_t getValueAsInt() const;

  /// Return the attribute's value as a boolean. This requires that the
  /// attribute be a string attribute.
  bool getValueAsBool() const;

  /// Return the attribute's kind as a string. This requires the
  /// attribute to be a string attribute.
  StringRef getKindAsString() const;

  /// Return the attribute's value as a string. This requires the
  /// attribute to be a string attribute.
  StringRef getValueAsString() const;

  /// Return the attribute's value as a Type. This requires the attribute to be
  /// a type attribute.
  Type *getValueAsType() const;

  /// Returns the alignment field of an attribute as a byte alignment
  /// value.
  MaybeAlign getAlignment() const;

  /// Returns the stack alignment field of an attribute as a byte
  /// alignment value.
  MaybeAlign getStackAlignment() const;

  /// Returns the number of dereferenceable bytes from the
  /// dereferenceable attribute.
  uint64_t getDereferenceableBytes() const;

  /// Returns the number of dereferenceable_or_null bytes from the
  /// dereferenceable_or_null attribute.
  uint64_t getDereferenceableOrNullBytes() const;

  /// Returns the argument numbers for the allocsize attribute.
  std::pair<unsigned, std::optional<unsigned>> getAllocSizeArgs() const;

  /// Returns the minimum value for the vscale_range attribute.
  unsigned getVScaleRangeMin() const;

  /// Returns the maximum value for the vscale_range attribute or std::nullopt
  /// when unknown.
  std::optional<unsigned> getVScaleRangeMax() const;

  // Returns the unwind table kind.
  UWTableKind getUWTableKind() const;

  // Returns the allocator function kind.
  AllocFnKind getAllocKind() const;

  /// Returns memory effects.
  MemoryEffects getMemoryEffects() const;

  /// Return the FPClassTest for nofpclass
  FPClassTest getNoFPClass() const;

  /// The Attribute is converted to a string of equivalent mnemonic. This
  /// is, presumably, for writing out the mnemonics for the assembly writer.
  std::string getAsString(bool InAttrGrp = false) const;

  /// Return true if this attribute belongs to the LLVMContext.
  bool hasParentContext(LLVMContext &C) const;

  /// Equality and non-equality operators.
  bool operator==(Attribute A) const { return pImpl == A.pImpl; }
  bool operator!=(Attribute A) const { return pImpl != A.pImpl; }

  /// Less-than operator. Useful for sorting the attributes list.
  bool operator<(Attribute A) const;

  void Profile(FoldingSetNodeID &ID) const;

  /// Return a raw pointer that uniquely identifies this attribute.
  void *getRawPointer() const {
    return pImpl;
  }

  /// Get an attribute from a raw pointer created by getRawPointer.
  static Attribute fromRawPointer(void *RawPtr) {
    return Attribute(reinterpret_cast<AttributeImpl*>(RawPtr));
  }
};

// Specialized opaque value conversions.
inline LLVMAttributeRef wrap(Attribute Attr) {
  return reinterpret_cast<LLVMAttributeRef>(Attr.getRawPointer());
}

// Specialized opaque value conversions.
inline Attribute unwrap(LLVMAttributeRef Attr) {
  return Attribute::fromRawPointer(Attr);
}

//===----------------------------------------------------------------------===//
/// \class
/// This class holds the attributes for a particular argument, parameter,
/// function, or return value. It is an immutable value type that is cheap to
/// copy. Adding and removing enum attributes is intended to be fast, but adding
/// and removing string or integer attributes involves a FoldingSet lookup.
class AttributeSet {
  friend AttributeListImpl;
  template <typename Ty, typename Enable> friend struct DenseMapInfo;

  // TODO: Extract AvailableAttrs from AttributeSetNode and store them here.
  // This will allow an efficient implementation of addAttribute and
  // removeAttribute for enum attrs.

  /// Private implementation pointer.
  AttributeSetNode *SetNode = nullptr;

private:
  explicit AttributeSet(AttributeSetNode *ASN) : SetNode(ASN) {}

public:
  /// AttributeSet is a trivially copyable value type.
  AttributeSet() = default;
  AttributeSet(const AttributeSet &) = default;
  ~AttributeSet() = default;

  static AttributeSet get(LLVMContext &C, const AttrBuilder &B);
  static AttributeSet get(LLVMContext &C, ArrayRef<Attribute> Attrs);

  bool operator==(const AttributeSet &O) const { return SetNode == O.SetNode; }
  bool operator!=(const AttributeSet &O) const { return !(*this == O); }

  /// Add an argument attribute. Returns a new set because attribute sets are
  /// immutable.
  [[nodiscard]] AttributeSet addAttribute(LLVMContext &C,
                                          Attribute::AttrKind Kind) const;

  /// Add a target-dependent attribute. Returns a new set because attribute sets
  /// are immutable.
  [[nodiscard]] AttributeSet addAttribute(LLVMContext &C, StringRef Kind,
                                          StringRef Value = StringRef()) const;

  /// Add attributes to the attribute set. Returns a new set because attribute
  /// sets are immutable.
  [[nodiscard]] AttributeSet addAttributes(LLVMContext &C,
                                           AttributeSet AS) const;

  /// Remove the specified attribute from this set. Returns a new set because
  /// attribute sets are immutable.
  [[nodiscard]] AttributeSet removeAttribute(LLVMContext &C,
                                             Attribute::AttrKind Kind) const;

  /// Remove the specified attribute from this set. Returns a new set because
  /// attribute sets are immutable.
  [[nodiscard]] AttributeSet removeAttribute(LLVMContext &C,
                                             StringRef Kind) const;

  /// Remove the specified attributes from this set. Returns a new set because
  /// attribute sets are immutable.
  [[nodiscard]] AttributeSet
  removeAttributes(LLVMContext &C, const AttributeMask &AttrsToRemove) const;

  /// Return the number of attributes in this set.
  unsigned getNumAttributes() const;

  /// Return true if attributes exists in this set.
  bool hasAttributes() const { return SetNode != nullptr; }

  /// Return true if the attribute exists in this set.
  bool hasAttribute(Attribute::AttrKind Kind) const;

  /// Return true if the attribute exists in this set.
  bool hasAttribute(StringRef Kind) const;

  /// Return the attribute object.
  Attribute getAttribute(Attribute::AttrKind Kind) const;

  /// Return the target-dependent attribute object.
  Attribute getAttribute(StringRef Kind) const;

  MaybeAlign getAlignment() const;
  MaybeAlign getStackAlignment() const;
  uint64_t getDereferenceableBytes() const;
  uint64_t getDereferenceableOrNullBytes() const;
  Type *getByValType() const;
  Type *getStructRetType() const;
  Type *getByRefType() const;
  Type *getPreallocatedType() const;
  Type *getInAllocaType() const;
  Type *getElementType() const;
  std::optional<std::pair<unsigned, std::optional<unsigned>>> getAllocSizeArgs()
      const;
  unsigned getVScaleRangeMin() const;
  std::optional<unsigned> getVScaleRangeMax() const;
  UWTableKind getUWTableKind() const;
  AllocFnKind getAllocKind() const;
  MemoryEffects getMemoryEffects() const;
  FPClassTest getNoFPClass() const;
  std::string getAsString(bool InAttrGrp = false) const;

  /// Return true if this attribute set belongs to the LLVMContext.
  bool hasParentContext(LLVMContext &C) const;

  using iterator = const Attribute *;

  iterator begin() const;
  iterator end() const;
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
  void dump() const;
#endif
};

//===----------------------------------------------------------------------===//
/// \class
/// Provide DenseMapInfo for AttributeSet.
template <> struct DenseMapInfo<AttributeSet, void> {
  static AttributeSet getEmptyKey() {
    auto Val = static_cast<uintptr_t>(-1);
    Val <<= PointerLikeTypeTraits<void *>::NumLowBitsAvailable;
    return AttributeSet(reinterpret_cast<AttributeSetNode *>(Val));
  }

  static AttributeSet getTombstoneKey() {
    auto Val = static_cast<uintptr_t>(-2);
    Val <<= PointerLikeTypeTraits<void *>::NumLowBitsAvailable;
    return AttributeSet(reinterpret_cast<AttributeSetNode *>(Val));
  }

  static unsigned getHashValue(AttributeSet AS) {
    return (unsigned((uintptr_t)AS.SetNode) >> 4) ^
           (unsigned((uintptr_t)AS.SetNode) >> 9);
  }

  static bool isEqual(AttributeSet LHS, AttributeSet RHS) { return LHS == RHS; }
};

//===----------------------------------------------------------------------===//
/// \class
/// This class holds the attributes for a function, its return value, and
/// its parameters. You access the attributes for each of them via an index into
/// the AttributeList object. The function attributes are at index
/// `AttributeList::FunctionIndex', the return value is at index
/// `AttributeList::ReturnIndex', and the attributes for the parameters start at
/// index `AttributeList::FirstArgIndex'.
class AttributeList {
public:
  enum AttrIndex : unsigned {
    ReturnIndex = 0U,
    FunctionIndex = ~0U,
    FirstArgIndex = 1,
  };

private:
  friend class AttrBuilder;
  friend class AttributeListImpl;
  friend class AttributeSet;
  friend class AttributeSetNode;
  template <typename Ty, typename Enable> friend struct DenseMapInfo;

  /// The attributes that we are managing. This can be null to represent
  /// the empty attributes list.
  AttributeListImpl *pImpl = nullptr;

public:
  /// Create an AttributeList with the specified parameters in it.
  static AttributeList get(LLVMContext &C,
                           ArrayRef<std::pair<unsigned, Attribute>> Attrs);
  static AttributeList get(LLVMContext &C,
                           ArrayRef<std::pair<unsigned, AttributeSet>> Attrs);

  /// Create an AttributeList from attribute sets for a function, its
  /// return value, and all of its arguments.
  static AttributeList get(LLVMContext &C, AttributeSet FnAttrs,
                           AttributeSet RetAttrs,
                           ArrayRef<AttributeSet> ArgAttrs);

private:
  explicit AttributeList(AttributeListImpl *LI) : pImpl(LI) {}

  static AttributeList getImpl(LLVMContext &C, ArrayRef<AttributeSet> AttrSets);

  AttributeList setAttributesAtIndex(LLVMContext &C, unsigned Index,
                                     AttributeSet Attrs) const;

public:
  AttributeList() = default;

  //===--------------------------------------------------------------------===//
  // AttributeList Construction and Mutation
  //===--------------------------------------------------------------------===//

  /// Return an AttributeList with the specified parameters in it.
  static AttributeList get(LLVMContext &C, ArrayRef<AttributeList> Attrs);
  static AttributeList get(LLVMContext &C, unsigned Index,
                           ArrayRef<Attribute::AttrKind> Kinds);
  static AttributeList get(LLVMContext &C, unsigned Index,
                           ArrayRef<Attribute::AttrKind> Kinds,
                           ArrayRef<uint64_t> Values);
  static AttributeList get(LLVMContext &C, unsigned Index,
                           ArrayRef<StringRef> Kind);
  static AttributeList get(LLVMContext &C, unsigned Index,
                           AttributeSet Attrs);
  static AttributeList get(LLVMContext &C, unsigned Index,
                           const AttrBuilder &B);

  // TODO: remove non-AtIndex versions of these methods.
  /// Add an attribute to the attribute set at the given index.
  /// Returns a new list because attribute lists are immutable.
  [[nodiscard]] AttributeList
  addAttributeAtIndex(LLVMContext &C, unsigned Index,
                      Attribute::AttrKind Kind) const;

  /// Add an attribute to the attribute set at the given index.
  /// Returns a new list because attribute lists are immutable.
  [[nodiscard]] AttributeList
  addAttributeAtIndex(LLVMContext &C, unsigned Index, StringRef Kind,
                      StringRef Value = StringRef()) const;

  /// Add an attribute to the attribute set at the given index.
  /// Returns a new list because attribute lists are immutable.
  [[nodiscard]] AttributeList
  addAttributeAtIndex(LLVMContext &C, unsigned Index, Attribute A) const;

  /// Add attributes to the attribute set at the given index.
  /// Returns a new list because attribute lists are immutable.
  [[nodiscard]] AttributeList addAttributesAtIndex(LLVMContext &C,
                                                   unsigned Index,
                                                   const AttrBuilder &B) const;

  /// Add a function attribute to the list. Returns a new list because
  /// attribute lists are immutable.
  [[nodiscard]] AttributeList addFnAttribute(LLVMContext &C,
                                             Attribute::AttrKind Kind) const {
    return addAttributeAtIndex(C, FunctionIndex, Kind);
  }

  /// Add a function attribute to the list. Returns a new list because
  /// attribute lists are immutable.
  [[nodiscard]] AttributeList addFnAttribute(LLVMContext &C,
                                             Attribute Attr) const {
    return addAttributeAtIndex(C, FunctionIndex, Attr);
  }

  /// Add a function attribute to the list. Returns a new list because
  /// attribute lists are immutable.
  [[nodiscard]] AttributeList
  addFnAttribute(LLVMContext &C, StringRef Kind,
                 StringRef Value = StringRef()) const {
    return addAttributeAtIndex(C, FunctionIndex, Kind, Value);
  }

  /// Add function attribute to the list. Returns a new list because
  /// attribute lists are immutable.
  [[nodiscard]] AttributeList addFnAttributes(LLVMContext &C,
                                              const AttrBuilder &B) const {
    return addAttributesAtIndex(C, FunctionIndex, B);
  }

  /// Add a return value attribute to the list. Returns a new list because
  /// attribute lists are immutable.
  [[nodiscard]] AttributeList addRetAttribute(LLVMContext &C,
                                              Attribute::AttrKind Kind) const {
    return addAttributeAtIndex(C, ReturnIndex, Kind);
  }

  /// Add a return value attribute to the list. Returns a new list because
  /// attribute lists are immutable.
  [[nodiscard]] AttributeList addRetAttribute(LLVMContext &C,
                                              Attribute Attr) const {
    return addAttributeAtIndex(C, ReturnIndex, Attr);
  }

  /// Add a return value attribute to the list. Returns a new list because
  /// attribute lists are immutable.
  [[nodiscard]] AttributeList addRetAttributes(LLVMContext &C,
                                               const AttrBuilder &B) const {
    return addAttributesAtIndex(C, ReturnIndex, B);
  }

  /// Add an argument attribute to the list. Returns a new list because
  /// attribute lists are immutable.
  [[nodiscard]] AttributeList
  addParamAttribute(LLVMContext &C, unsigned ArgNo,
                    Attribute::AttrKind Kind) const {
    return addAttributeAtIndex(C, ArgNo + FirstArgIndex, Kind);
  }

  /// Add an argument attribute to the list. Returns a new list because
  /// attribute lists are immutable.
  [[nodiscard]] AttributeList
  addParamAttribute(LLVMContext &C, unsigned ArgNo, StringRef Kind,
                    StringRef Value = StringRef()) const {
    return addAttributeAtIndex(C, ArgNo + FirstArgIndex, Kind, Value);
  }

  /// Add an attribute to the attribute list at the given arg indices. Returns a
  /// new list because attribute lists are immutable.
  [[nodiscard]] AttributeList addParamAttribute(LLVMContext &C,
                                                ArrayRef<unsigned> ArgNos,
                                                Attribute A) const;

  /// Add an argument attribute to the list. Returns a new list because
  /// attribute lists are immutable.
  [[nodiscard]] AttributeList addParamAttributes(LLVMContext &C, unsigned ArgNo,
                                                 const AttrBuilder &B) const {
    return addAttributesAtIndex(C, ArgNo + FirstArgIndex, B);
  }

  /// Remove the specified attribute at the specified index from this
  /// attribute list. Returns a new list because attribute lists are immutable.
  [[nodiscard]] AttributeList
  removeAttributeAtIndex(LLVMContext &C, unsigned Index,
                         Attribute::AttrKind Kind) const;

  /// Remove the specified attribute at the specified index from this
  /// attribute list. Returns a new list because attribute lists are immutable.
  [[nodiscard]] AttributeList
  removeAttributeAtIndex(LLVMContext &C, unsigned Index, StringRef Kind) const;
  [[nodiscard]] AttributeList removeAttribute(LLVMContext &C, unsigned Index,
                                              StringRef Kind) const {
    return removeAttributeAtIndex(C, Index, Kind);
  }

  /// Remove the specified attributes at the specified index from this
  /// attribute list. Returns a new list because attribute lists are immutable.
  [[nodiscard]] AttributeList
  removeAttributesAtIndex(LLVMContext &C, unsigned Index,
                          const AttributeMask &AttrsToRemove) const;

  /// Remove all attributes at the specified index from this
  /// attribute list. Returns a new list because attribute lists are immutable.
  [[nodiscard]] AttributeList removeAttributesAtIndex(LLVMContext &C,
                                                      unsigned Index) const;

  /// Remove the specified attribute at the function index from this
  /// attribute list. Returns a new list because attribute lists are immutable.
  [[nodiscard]] AttributeList
  removeFnAttribute(LLVMContext &C, Attribute::AttrKind Kind) const {
    return removeAttributeAtIndex(C, FunctionIndex, Kind);
  }

  /// Remove the specified attribute at the function index from this
  /// attribute list. Returns a new list because attribute lists are immutable.
  [[nodiscard]] AttributeList removeFnAttribute(LLVMContext &C,
                                                StringRef Kind) const {
    return removeAttributeAtIndex(C, FunctionIndex, Kind);
  }

  /// Remove the specified attribute at the function index from this
  /// attribute list. Returns a new list because attribute lists are immutable.
  [[nodiscard]] AttributeList
  removeFnAttributes(LLVMContext &C, const AttributeMask &AttrsToRemove) const {
    return removeAttributesAtIndex(C, FunctionIndex, AttrsToRemove);
  }

  /// Remove the attributes at the function index from this
  /// attribute list. Returns a new list because attribute lists are immutable.
  [[nodiscard]] AttributeList removeFnAttributes(LLVMContext &C) const {
    return removeAttributesAtIndex(C, FunctionIndex);
  }

  /// Remove the specified attribute at the return value index from this
  /// attribute list. Returns a new list because attribute lists are immutable.
  [[nodiscard]] AttributeList
  removeRetAttribute(LLVMContext &C, Attribute::AttrKind Kind) const {
    return removeAttributeAtIndex(C, ReturnIndex, Kind);
  }

  /// Remove the specified attribute at the return value index from this
  /// attribute list. Returns a new list because attribute lists are immutable.
  [[nodiscard]] AttributeList removeRetAttribute(LLVMContext &C,
                                                 StringRef Kind) const {
    return removeAttributeAtIndex(C, ReturnIndex, Kind);
  }

  /// Remove the specified attribute at the return value index from this
  /// attribute list. Returns a new list because attribute lists are immutable.
  [[nodiscard]] AttributeList
  removeRetAttributes(LLVMContext &C,
                      const AttributeMask &AttrsToRemove) const {
    return removeAttributesAtIndex(C, ReturnIndex, AttrsToRemove);
  }

  /// Remove the specified attribute at the specified arg index from this
  /// attribute list. Returns a new list because attribute lists are immutable.
  [[nodiscard]] AttributeList
  removeParamAttribute(LLVMContext &C, unsigned ArgNo,
                       Attribute::AttrKind Kind) const {
    return removeAttributeAtIndex(C, ArgNo + FirstArgIndex, Kind);
  }

  /// Remove the specified attribute at the specified arg index from this
  /// attribute list. Returns a new list because attribute lists are immutable.
  [[nodiscard]] AttributeList
  removeParamAttribute(LLVMContext &C, unsigned ArgNo, StringRef Kind) const {
    return removeAttributeAtIndex(C, ArgNo + FirstArgIndex, Kind);
  }

  /// Remove the specified attribute at the specified arg index from this
  /// attribute list. Returns a new list because attribute lists are immutable.
  [[nodiscard]] AttributeList
  removeParamAttributes(LLVMContext &C, unsigned ArgNo,
                        const AttributeMask &AttrsToRemove) const {
    return removeAttributesAtIndex(C, ArgNo + FirstArgIndex, AttrsToRemove);
  }

  /// Remove all attributes at the specified arg index from this
  /// attribute list. Returns a new list because attribute lists are immutable.
  [[nodiscard]] AttributeList removeParamAttributes(LLVMContext &C,
                                                    unsigned ArgNo) const {
    return removeAttributesAtIndex(C, ArgNo + FirstArgIndex);
  }

  /// Replace the type contained by attribute \p AttrKind at index \p ArgNo wih
  /// \p ReplacementTy, preserving all other attributes.
  [[nodiscard]] AttributeList
  replaceAttributeTypeAtIndex(LLVMContext &C, unsigned ArgNo,
                              Attribute::AttrKind Kind,
                              Type *ReplacementTy) const {
    Attribute Attr = getAttributeAtIndex(ArgNo, Kind);
    auto Attrs = removeAttributeAtIndex(C, ArgNo, Kind);
    return Attrs.addAttributeAtIndex(C, ArgNo,
                                     Attr.getWithNewType(C, ReplacementTy));
  }

  /// \brief Add the dereferenceable attribute to the attribute set at the given
  /// index. Returns a new list because attribute lists are immutable.
  [[nodiscard]] AttributeList addDereferenceableRetAttr(LLVMContext &C,
                                                        uint64_t Bytes) const;

  /// \brief Add the dereferenceable attribute to the attribute set at the given
  /// arg index. Returns a new list because attribute lists are immutable.
  [[nodiscard]] AttributeList addDereferenceableParamAttr(LLVMContext &C,
                                                          unsigned ArgNo,
                                                          uint64_t Bytes) const;

  /// Add the dereferenceable_or_null attribute to the attribute set at
  /// the given arg index. Returns a new list because attribute lists are
  /// immutable.
  [[nodiscard]] AttributeList
  addDereferenceableOrNullParamAttr(LLVMContext &C, unsigned ArgNo,
                                    uint64_t Bytes) const;

  /// Add the allocsize attribute to the attribute set at the given arg index.
  /// Returns a new list because attribute lists are immutable.
  [[nodiscard]] AttributeList
  addAllocSizeParamAttr(LLVMContext &C, unsigned ArgNo, unsigned ElemSizeArg,
                        const std::optional<unsigned> &NumElemsArg);

  //===--------------------------------------------------------------------===//
  // AttributeList Accessors
  //===--------------------------------------------------------------------===//

  /// The attributes for the specified index are returned.
  AttributeSet getAttributes(unsigned Index) const;

  /// The attributes for the argument or parameter at the given index are
  /// returned.
  AttributeSet getParamAttrs(unsigned ArgNo) const;

  /// The attributes for the ret value are returned.
  AttributeSet getRetAttrs() const;

  /// The function attributes are returned.
  AttributeSet getFnAttrs() const;

  /// Return true if the attribute exists at the given index.
  bool hasAttributeAtIndex(unsigned Index, Attribute::AttrKind Kind) const;

  /// Return true if the attribute exists at the given index.
  bool hasAttributeAtIndex(unsigned Index, StringRef Kind) const;

  /// Return true if attribute exists at the given index.
  bool hasAttributesAtIndex(unsigned Index) const;

  /// Return true if the attribute exists for the given argument
  bool hasParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
    return hasAttributeAtIndex(ArgNo + FirstArgIndex, Kind);
  }

  /// Return true if the attribute exists for the given argument
  bool hasParamAttr(unsigned ArgNo, StringRef Kind) const {
    return hasAttributeAtIndex(ArgNo + FirstArgIndex, Kind);
  }

  /// Return true if attributes exists for the given argument
  bool hasParamAttrs(unsigned ArgNo) const {
    return hasAttributesAtIndex(ArgNo + FirstArgIndex);
  }

  /// Return true if the attribute exists for the return value.
  bool hasRetAttr(Attribute::AttrKind Kind) const {
    return hasAttributeAtIndex(ReturnIndex, Kind);
  }

  /// Return true if the attribute exists for the return value.
  bool hasRetAttr(StringRef Kind) const {
    return hasAttributeAtIndex(ReturnIndex, Kind);
  }

  /// Return true if attributes exist for the return value.
  bool hasRetAttrs() const { return hasAttributesAtIndex(ReturnIndex); }

  /// Return true if the attribute exists for the function.
  bool hasFnAttr(Attribute::AttrKind Kind) const;

  /// Return true if the attribute exists for the function.
  bool hasFnAttr(StringRef Kind) const;

  /// Return true the attributes exist for the function.
  bool hasFnAttrs() const { return hasAttributesAtIndex(FunctionIndex); }

  /// Return true if the specified attribute is set for at least one
  /// parameter or for the return value. If Index is not nullptr, the index
  /// of a parameter with the specified attribute is provided.
  bool hasAttrSomewhere(Attribute::AttrKind Kind,
                        unsigned *Index = nullptr) const;

  /// Return the attribute object that exists at the given index.
  Attribute getAttributeAtIndex(unsigned Index, Attribute::AttrKind Kind) const;

  /// Return the attribute object that exists at the given index.
  Attribute getAttributeAtIndex(unsigned Index, StringRef Kind) const;

  /// Return the attribute object that exists at the arg index.
  Attribute getParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
    return getAttributeAtIndex(ArgNo + FirstArgIndex, Kind);
  }

  /// Return the attribute object that exists at the given index.
  Attribute getParamAttr(unsigned ArgNo, StringRef Kind) const {
    return getAttributeAtIndex(ArgNo + FirstArgIndex, Kind);
  }

  /// Return the attribute object that exists for the function.
  Attribute getFnAttr(Attribute::AttrKind Kind) const {
    return getAttributeAtIndex(FunctionIndex, Kind);
  }

  /// Return the attribute object that exists for the function.
  Attribute getFnAttr(StringRef Kind) const {
    return getAttributeAtIndex(FunctionIndex, Kind);
  }

  /// Return the alignment of the return value.
  MaybeAlign getRetAlignment() const;

  /// Return the alignment for the specified function parameter.
  MaybeAlign getParamAlignment(unsigned ArgNo) const;

  /// Return the stack alignment for the specified function parameter.
  MaybeAlign getParamStackAlignment(unsigned ArgNo) const;

  /// Return the byval type for the specified function parameter.
  Type *getParamByValType(unsigned ArgNo) const;

  /// Return the sret type for the specified function parameter.
  Type *getParamStructRetType(unsigned ArgNo) const;

  /// Return the byref type for the specified function parameter.
  Type *getParamByRefType(unsigned ArgNo) const;

  /// Return the preallocated type for the specified function parameter.
  Type *getParamPreallocatedType(unsigned ArgNo) const;

  /// Return the inalloca type for the specified function parameter.
  Type *getParamInAllocaType(unsigned ArgNo) const;

  /// Return the elementtype type for the specified function parameter.
  Type *getParamElementType(unsigned ArgNo) const;

  /// Get the stack alignment of the function.
  MaybeAlign getFnStackAlignment() const;

  /// Get the stack alignment of the return value.
  MaybeAlign getRetStackAlignment() const;

  /// Get the number of dereferenceable bytes (or zero if unknown) of the return
  /// value.
  uint64_t getRetDereferenceableBytes() const;

  /// Get the number of dereferenceable bytes (or zero if unknown) of an arg.
  uint64_t getParamDereferenceableBytes(unsigned Index) const;

  /// Get the number of dereferenceable_or_null bytes (or zero if unknown) of
  /// the return value.
  uint64_t getRetDereferenceableOrNullBytes() const;

  /// Get the number of dereferenceable_or_null bytes (or zero if unknown) of an
  /// arg.
  uint64_t getParamDereferenceableOrNullBytes(unsigned ArgNo) const;

  /// Get the disallowed floating-point classes of the return value.
  FPClassTest getRetNoFPClass() const;

  /// Get the disallowed floating-point classes of the argument value.
  FPClassTest getParamNoFPClass(unsigned ArgNo) const;

  /// Get the unwind table kind requested for the function.
  UWTableKind getUWTableKind() const;

  AllocFnKind getAllocKind() const;

  /// Returns memory effects of the function.
  MemoryEffects getMemoryEffects() const;

  /// Return the attributes at the index as a string.
  std::string getAsString(unsigned Index, bool InAttrGrp = false) const;

  /// Return true if this attribute list belongs to the LLVMContext.
  bool hasParentContext(LLVMContext &C) const;

  //===--------------------------------------------------------------------===//
  // AttributeList Introspection
  //===--------------------------------------------------------------------===//

  using iterator = const AttributeSet *;

  iterator begin() const;
  iterator end() const;

  unsigned getNumAttrSets() const;

  // Implementation of indexes(). Produces iterators that wrap an index. Mostly
  // to hide the awkwardness of unsigned wrapping when iterating over valid
  // indexes.
  struct index_iterator {
    unsigned NumAttrSets;
    index_iterator(int NumAttrSets) : NumAttrSets(NumAttrSets) {}
    struct int_wrapper {
      int_wrapper(unsigned i) : i(i) {}
      unsigned i;
      unsigned operator*() { return i; }
      bool operator!=(const int_wrapper &Other) { return i != Other.i; }
      int_wrapper &operator++() {
        // This is expected to undergo unsigned wrapping since FunctionIndex is
        // ~0 and that's where we start.
        ++i;
        return *this;
      }
    };

    int_wrapper begin() { return int_wrapper(AttributeList::FunctionIndex); }

    int_wrapper end() { return int_wrapper(NumAttrSets - 1); }
  };

  /// Use this to iterate over the valid attribute indexes.
  index_iterator indexes() const { return index_iterator(getNumAttrSets()); }

  /// operator==/!= - Provide equality predicates.
  bool operator==(const AttributeList &RHS) const { return pImpl == RHS.pImpl; }
  bool operator!=(const AttributeList &RHS) const { return pImpl != RHS.pImpl; }

  /// Return a raw pointer that uniquely identifies this attribute list.
  void *getRawPointer() const {
    return pImpl;
  }

  /// Return true if there are no attributes.
  bool isEmpty() const { return pImpl == nullptr; }

  void print(raw_ostream &O) const;

  void dump() const;
};

//===----------------------------------------------------------------------===//
/// \class
/// Provide DenseMapInfo for AttributeList.
template <> struct DenseMapInfo<AttributeList, void> {
  static AttributeList getEmptyKey() {
    auto Val = static_cast<uintptr_t>(-1);
    Val <<= PointerLikeTypeTraits<void*>::NumLowBitsAvailable;
    return AttributeList(reinterpret_cast<AttributeListImpl *>(Val));
  }

  static AttributeList getTombstoneKey() {
    auto Val = static_cast<uintptr_t>(-2);
    Val <<= PointerLikeTypeTraits<void*>::NumLowBitsAvailable;
    return AttributeList(reinterpret_cast<AttributeListImpl *>(Val));
  }

  static unsigned getHashValue(AttributeList AS) {
    return (unsigned((uintptr_t)AS.pImpl) >> 4) ^
           (unsigned((uintptr_t)AS.pImpl) >> 9);
  }

  static bool isEqual(AttributeList LHS, AttributeList RHS) {
    return LHS == RHS;
  }
};

//===----------------------------------------------------------------------===//
/// \class
/// This class is used in conjunction with the Attribute::get method to
/// create an Attribute object. The object itself is uniquified. The Builder's
/// value, however, is not. So this can be used as a quick way to test for
/// equality, presence of attributes, etc.
class AttrBuilder {
  LLVMContext &Ctx;
  SmallVector<Attribute, 8> Attrs;

public:
  AttrBuilder(LLVMContext &Ctx) : Ctx(Ctx) {}
  AttrBuilder(const AttrBuilder &) = delete;
  AttrBuilder(AttrBuilder &&) = default;

  AttrBuilder(LLVMContext &Ctx, const Attribute &A) : Ctx(Ctx) {
    addAttribute(A);
  }

  AttrBuilder(LLVMContext &Ctx, AttributeSet AS);

  void clear();

  /// Add an attribute to the builder.
  AttrBuilder &addAttribute(Attribute::AttrKind Val);

  /// Add the Attribute object to the builder.
  AttrBuilder &addAttribute(Attribute A);

  /// Add the target-dependent attribute to the builder.
  AttrBuilder &addAttribute(StringRef A, StringRef V = StringRef());

  /// Remove an attribute from the builder.
  AttrBuilder &removeAttribute(Attribute::AttrKind Val);

  /// Remove the target-dependent attribute from the builder.
  AttrBuilder &removeAttribute(StringRef A);

  /// Remove the target-dependent attribute from the builder.
  AttrBuilder &removeAttribute(Attribute A) {
    if (A.isStringAttribute())
      return removeAttribute(A.getKindAsString());
    else
      return removeAttribute(A.getKindAsEnum());
  }

  /// Add the attributes from the builder. Attributes in the passed builder
  /// overwrite attributes in this builder if they have the same key.
  AttrBuilder &merge(const AttrBuilder &B);

  /// Remove the attributes from the builder.
  AttrBuilder &remove(const AttributeMask &AM);

  /// Return true if the builder has any attribute that's in the
  /// specified builder.
  bool overlaps(const AttributeMask &AM) const;

  /// Return true if the builder has the specified attribute.
  bool contains(Attribute::AttrKind A) const;

  /// Return true if the builder has the specified target-dependent
  /// attribute.
  bool contains(StringRef A) const;

  /// Return true if the builder has IR-level attributes.
  bool hasAttributes() const { return !Attrs.empty(); }

  /// Return Attribute with the given Kind. The returned attribute will be
  /// invalid if the Kind is not present in the builder.
  Attribute getAttribute(Attribute::AttrKind Kind) const;

  /// Return Attribute with the given Kind. The returned attribute will be
  /// invalid if the Kind is not present in the builder.
  Attribute getAttribute(StringRef Kind) const;

  /// Return raw (possibly packed/encoded) value of integer attribute or
  /// std::nullopt if not set.
  std::optional<uint64_t> getRawIntAttr(Attribute::AttrKind Kind) const;

  /// Retrieve the alignment attribute, if it exists.
  MaybeAlign getAlignment() const {
    return MaybeAlign(getRawIntAttr(Attribute::Alignment).value_or(0));
  }

  /// Retrieve the stack alignment attribute, if it exists.
  MaybeAlign getStackAlignment() const {
    return MaybeAlign(getRawIntAttr(Attribute::StackAlignment).value_or(0));
  }

  /// Retrieve the number of dereferenceable bytes, if the
  /// dereferenceable attribute exists (zero is returned otherwise).
  uint64_t getDereferenceableBytes() const {
    return getRawIntAttr(Attribute::Dereferenceable).value_or(0);
  }

  /// Retrieve the number of dereferenceable_or_null bytes, if the
  /// dereferenceable_or_null attribute exists (zero is returned otherwise).
  uint64_t getDereferenceableOrNullBytes() const {
    return getRawIntAttr(Attribute::DereferenceableOrNull).value_or(0);
  }

  /// Retrieve type for the given type attribute.
  Type *getTypeAttr(Attribute::AttrKind Kind) const;

  /// Retrieve the byval type.
  Type *getByValType() const { return getTypeAttr(Attribute::ByVal); }

  /// Retrieve the sret type.
  Type *getStructRetType() const { return getTypeAttr(Attribute::StructRet); }

  /// Retrieve the byref type.
  Type *getByRefType() const { return getTypeAttr(Attribute::ByRef); }

  /// Retrieve the preallocated type.
  Type *getPreallocatedType() const {
    return getTypeAttr(Attribute::Preallocated);
  }

  /// Retrieve the inalloca type.
  Type *getInAllocaType() const { return getTypeAttr(Attribute::InAlloca); }

  /// Retrieve the allocsize args, or std::nullopt if the attribute does not
  /// exist.
  std::optional<std::pair<unsigned, std::optional<unsigned>>> getAllocSizeArgs()
      const;

  /// Add integer attribute with raw value (packed/encoded if necessary).
  AttrBuilder &addRawIntAttr(Attribute::AttrKind Kind, uint64_t Value);

  /// This turns an alignment into the form used internally in Attribute.
  /// This call has no effect if Align is not set.
  AttrBuilder &addAlignmentAttr(MaybeAlign Align);

  /// This turns an int alignment (which must be a power of 2) into the
  /// form used internally in Attribute.
  /// This call has no effect if Align is 0.
  /// Deprecated, use the version using a MaybeAlign.
  inline AttrBuilder &addAlignmentAttr(unsigned Align) {
    return addAlignmentAttr(MaybeAlign(Align));
  }

  /// This turns a stack alignment into the form used internally in Attribute.
  /// This call has no effect if Align is not set.
  AttrBuilder &addStackAlignmentAttr(MaybeAlign Align);

  /// This turns an int stack alignment (which must be a power of 2) into
  /// the form used internally in Attribute.
  /// This call has no effect if Align is 0.
  /// Deprecated, use the version using a MaybeAlign.
  inline AttrBuilder &addStackAlignmentAttr(unsigned Align) {
    return addStackAlignmentAttr(MaybeAlign(Align));
  }

  /// This turns the number of dereferenceable bytes into the form used
  /// internally in Attribute.
  AttrBuilder &addDereferenceableAttr(uint64_t Bytes);

  /// This turns the number of dereferenceable_or_null bytes into the
  /// form used internally in Attribute.
  AttrBuilder &addDereferenceableOrNullAttr(uint64_t Bytes);

  /// This turns one (or two) ints into the form used internally in Attribute.
  AttrBuilder &addAllocSizeAttr(unsigned ElemSizeArg,
                                const std::optional<unsigned> &NumElemsArg);

  /// This turns two ints into the form used internally in Attribute.
  AttrBuilder &addVScaleRangeAttr(unsigned MinValue,
                                  std::optional<unsigned> MaxValue);

  /// Add a type attribute with the given type.
  AttrBuilder &addTypeAttr(Attribute::AttrKind Kind, Type *Ty);

  /// This turns a byval type into the form used internally in Attribute.
  AttrBuilder &addByValAttr(Type *Ty);

  /// This turns a sret type into the form used internally in Attribute.
  AttrBuilder &addStructRetAttr(Type *Ty);

  /// This turns a byref type into the form used internally in Attribute.
  AttrBuilder &addByRefAttr(Type *Ty);

  /// This turns a preallocated type into the form used internally in Attribute.
  AttrBuilder &addPreallocatedAttr(Type *Ty);

  /// This turns an inalloca type into the form used internally in Attribute.
  AttrBuilder &addInAllocaAttr(Type *Ty);

  /// Add an allocsize attribute, using the representation returned by
  /// Attribute.getIntValue().
  AttrBuilder &addAllocSizeAttrFromRawRepr(uint64_t RawAllocSizeRepr);

  /// Add a vscale_range attribute, using the representation returned by
  /// Attribute.getIntValue().
  AttrBuilder &addVScaleRangeAttrFromRawRepr(uint64_t RawVScaleRangeRepr);

  /// This turns the unwind table kind into the form used internally in
  /// Attribute.
  AttrBuilder &addUWTableAttr(UWTableKind Kind);

  // This turns the allocator kind into the form used internally in Attribute.
  AttrBuilder &addAllocKindAttr(AllocFnKind Kind);

  /// Add memory effect attribute.
  AttrBuilder &addMemoryAttr(MemoryEffects ME);

  // Add nofpclass attribute
  AttrBuilder &addNoFPClassAttr(FPClassTest NoFPClassMask);

  ArrayRef<Attribute> attrs() const { return Attrs; }

  bool operator==(const AttrBuilder &B) const;
  bool operator!=(const AttrBuilder &B) const { return !(*this == B); }
};

namespace AttributeFuncs {

enum AttributeSafetyKind : uint8_t {
  ASK_SAFE_TO_DROP = 1,
  ASK_UNSAFE_TO_DROP = 2,
  ASK_ALL = ASK_SAFE_TO_DROP | ASK_UNSAFE_TO_DROP,
};

/// Returns true if this is a type legal for the 'nofpclass' attribute. This
/// follows the same type rules as FPMathOperator.
bool isNoFPClassCompatibleType(Type *Ty);

/// Which attributes cannot be applied to a type. The argument \p ASK indicates,
/// if only attributes that are known to be safely droppable are contained in
/// the mask; only attributes that might be unsafe to drop (e.g., ABI-related
/// attributes) are in the mask; or both.
AttributeMask typeIncompatible(Type *Ty, AttributeSafetyKind ASK = ASK_ALL);

/// Get param/return attributes which imply immediate undefined behavior if an
/// invalid value is passed. For example, this includes noundef (where undef
/// implies UB), but not nonnull (where null implies poison). It also does not
/// include attributes like nocapture, which constrain the function
/// implementation rather than the passed value.
AttributeMask getUBImplyingAttributes();

/// \returns Return true if the two functions have compatible target-independent
/// attributes for inlining purposes.
bool areInlineCompatible(const Function &Caller, const Function &Callee);


/// Checks  if there are any incompatible function attributes between
/// \p A and \p B.
///
/// \param [in] A - The first function to be compared with.
/// \param [in] B - The second function to be compared with.
/// \returns true if the functions have compatible attributes.
bool areOutlineCompatible(const Function &A, const Function &B);

/// Merge caller's and callee's attributes.
void mergeAttributesForInlining(Function &Caller, const Function &Callee);

/// Merges the functions attributes from \p ToMerge into function \p Base.
///
/// \param [in,out] Base - The function being merged into.
/// \param [in] ToMerge - The function to merge attributes from.
void mergeAttributesForOutlining(Function &Base, const Function &ToMerge);

/// Update min-legal-vector-width if it is in Attribute and less than Width.
void updateMinLegalVectorWidthAttr(Function &Fn, uint64_t Width);

} // end namespace AttributeFuncs

} // end namespace llvm

#endif // LLVM_IR_ATTRIBUTES_H
PKjwFZ4�P����
IR/Function.hnu�[���//===- llvm/Function.h - Class to represent a single function ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the declaration of the Function class, which represents a
// single function/procedure in LLVM.
//
// A function basically consists of a list of basic blocks, a list of arguments,
// and a symbol table.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_FUNCTION_H
#define LLVM_IR_FUNCTION_H

#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
#include "llvm/ADT/ilist_node.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/IR/Argument.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/GlobalObject.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/OperandTraits.h"
#include "llvm/IR/SymbolTableListTraits.h"
#include "llvm/IR/Value.h"
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <string>

namespace llvm {

namespace Intrinsic {
typedef unsigned ID;
}

class AssemblyAnnotationWriter;
class Constant;
struct DenormalMode;
class DISubprogram;
class LLVMContext;
class Module;
class raw_ostream;
class Type;
class User;
class BranchProbabilityInfo;
class BlockFrequencyInfo;

class LLVM_EXTERNAL_VISIBILITY Function : public GlobalObject,
                                          public ilist_node<Function> {
public:
  using BasicBlockListType = SymbolTableList<BasicBlock>;

  // BasicBlock iterators...
  using iterator = BasicBlockListType::iterator;
  using const_iterator = BasicBlockListType::const_iterator;

  using arg_iterator = Argument *;
  using const_arg_iterator = const Argument *;

private:
  // Important things that make up a function!
  BasicBlockListType BasicBlocks;         ///< The basic blocks
  mutable Argument *Arguments = nullptr;  ///< The formal arguments
  size_t NumArgs;
  std::unique_ptr<ValueSymbolTable>
      SymTab;                             ///< Symbol table of args/instructions
  AttributeList AttributeSets;            ///< Parameter attributes

  /*
   * Value::SubclassData
   *
   * bit 0      : HasLazyArguments
   * bit 1      : HasPrefixData
   * bit 2      : HasPrologueData
   * bit 3      : HasPersonalityFn
   * bits 4-13  : CallingConvention
   * bits 14    : HasGC
   * bits 15 : [reserved]
   */

  /// Bits from GlobalObject::GlobalObjectSubclassData.
  enum {
    /// Whether this function is materializable.
    IsMaterializableBit = 0,
  };

  friend class SymbolTableListTraits<Function>;

  /// hasLazyArguments/CheckLazyArguments - The argument list of a function is
  /// built on demand, so that the list isn't allocated until the first client
  /// needs it.  The hasLazyArguments predicate returns true if the arg list
  /// hasn't been set up yet.
public:
  bool hasLazyArguments() const {
    return getSubclassDataFromValue() & (1<<0);
  }

private:
  void CheckLazyArguments() const {
    if (hasLazyArguments())
      BuildLazyArguments();
  }

  void BuildLazyArguments() const;

  void clearArguments();

  /// Function ctor - If the (optional) Module argument is specified, the
  /// function is automatically inserted into the end of the function list for
  /// the module.
  ///
  Function(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace,
           const Twine &N = "", Module *M = nullptr);

public:
  Function(const Function&) = delete;
  void operator=(const Function&) = delete;
  ~Function();

  // This is here to help easily convert from FunctionT * (Function * or
  // MachineFunction *) in BlockFrequencyInfoImpl to Function * by calling
  // FunctionT->getFunction().
  const Function &getFunction() const { return *this; }

  static Function *Create(FunctionType *Ty, LinkageTypes Linkage,
                          unsigned AddrSpace, const Twine &N = "",
                          Module *M = nullptr) {
    return new Function(Ty, Linkage, AddrSpace, N, M);
  }

  // TODO: remove this once all users have been updated to pass an AddrSpace
  static Function *Create(FunctionType *Ty, LinkageTypes Linkage,
                          const Twine &N = "", Module *M = nullptr) {
    return new Function(Ty, Linkage, static_cast<unsigned>(-1), N, M);
  }

  /// Creates a new function and attaches it to a module.
  ///
  /// Places the function in the program address space as specified
  /// by the module's data layout.
  static Function *Create(FunctionType *Ty, LinkageTypes Linkage,
                          const Twine &N, Module &M);

  /// Creates a function with some attributes recorded in llvm.module.flags
  /// applied.
  ///
  /// Use this when synthesizing new functions that need attributes that would
  /// have been set by command line options.
  static Function *createWithDefaultAttr(FunctionType *Ty, LinkageTypes Linkage,
                                         unsigned AddrSpace,
                                         const Twine &N = "",
                                         Module *M = nullptr);

  // Provide fast operand accessors.
  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);

  /// Returns the number of non-debug IR instructions in this function.
  /// This is equivalent to the sum of the sizes of each basic block contained
  /// within this function.
  unsigned getInstructionCount() const;

  /// Returns the FunctionType for me.
  FunctionType *getFunctionType() const {
    return cast<FunctionType>(getValueType());
  }

  /// Returns the type of the ret val.
  Type *getReturnType() const { return getFunctionType()->getReturnType(); }

  /// getContext - Return a reference to the LLVMContext associated with this
  /// function.
  LLVMContext &getContext() const;

  /// isVarArg - Return true if this function takes a variable number of
  /// arguments.
  bool isVarArg() const { return getFunctionType()->isVarArg(); }

  bool isMaterializable() const {
    return getGlobalObjectSubClassData() & (1 << IsMaterializableBit);
  }
  void setIsMaterializable(bool V) {
    unsigned Mask = 1 << IsMaterializableBit;
    setGlobalObjectSubClassData((~Mask & getGlobalObjectSubClassData()) |
                                (V ? Mask : 0u));
  }

  /// getIntrinsicID - This method returns the ID number of the specified
  /// function, or Intrinsic::not_intrinsic if the function is not an
  /// intrinsic, or if the pointer is null.  This value is always defined to be
  /// zero to allow easy checking for whether a function is intrinsic or not.
  /// The particular intrinsic functions which correspond to this value are
  /// defined in llvm/Intrinsics.h.
  Intrinsic::ID getIntrinsicID() const LLVM_READONLY { return IntID; }

  /// isIntrinsic - Returns true if the function's name starts with "llvm.".
  /// It's possible for this function to return true while getIntrinsicID()
  /// returns Intrinsic::not_intrinsic!
  bool isIntrinsic() const { return HasLLVMReservedName; }

  /// isTargetIntrinsic - Returns true if IID is an intrinsic specific to a
  /// certain target. If it is a generic intrinsic false is returned.
  static bool isTargetIntrinsic(Intrinsic::ID IID);

  /// isTargetIntrinsic - Returns true if this function is an intrinsic and the
  /// intrinsic is specific to a certain target. If this is not an intrinsic
  /// or a generic intrinsic, false is returned.
  bool isTargetIntrinsic() const;

  /// Returns true if the function is one of the "Constrained Floating-Point
  /// Intrinsics". Returns false if not, and returns false when
  /// getIntrinsicID() returns Intrinsic::not_intrinsic.
  bool isConstrainedFPIntrinsic() const;

  static Intrinsic::ID lookupIntrinsicID(StringRef Name);

  /// Recalculate the ID for this function if it is an Intrinsic defined
  /// in llvm/Intrinsics.h.  Sets the intrinsic ID to Intrinsic::not_intrinsic
  /// if the name of this function does not match an intrinsic in that header.
  /// Note, this method does not need to be called directly, as it is called
  /// from Value::setName() whenever the name of this function changes.
  void recalculateIntrinsicID();

  /// getCallingConv()/setCallingConv(CC) - These method get and set the
  /// calling convention of this function.  The enum values for the known
  /// calling conventions are defined in CallingConv.h.
  CallingConv::ID getCallingConv() const {
    return static_cast<CallingConv::ID>((getSubclassDataFromValue() >> 4) &
                                        CallingConv::MaxID);
  }
  void setCallingConv(CallingConv::ID CC) {
    auto ID = static_cast<unsigned>(CC);
    assert(!(ID & ~CallingConv::MaxID) && "Unsupported calling convention");
    setValueSubclassData((getSubclassDataFromValue() & 0xc00f) | (ID << 4));
  }

  enum ProfileCountType { PCT_Real, PCT_Synthetic };

  /// Class to represent profile counts.
  ///
  /// This class represents both real and synthetic profile counts.
  class ProfileCount {
  private:
    uint64_t Count = 0;
    ProfileCountType PCT = PCT_Real;

  public:
    ProfileCount(uint64_t Count, ProfileCountType PCT)
        : Count(Count), PCT(PCT) {}
    uint64_t getCount() const { return Count; }
    ProfileCountType getType() const { return PCT; }
    bool isSynthetic() const { return PCT == PCT_Synthetic; }
  };

  /// Set the entry count for this function.
  ///
  /// Entry count is the number of times this function was executed based on
  /// pgo data. \p Imports points to a set of GUIDs that needs to
  /// be imported by the function for sample PGO, to enable the same inlines as
  /// the profiled optimized binary.
  void setEntryCount(ProfileCount Count,
                     const DenseSet<GlobalValue::GUID> *Imports = nullptr);

  /// A convenience wrapper for setting entry count
  void setEntryCount(uint64_t Count, ProfileCountType Type = PCT_Real,
                     const DenseSet<GlobalValue::GUID> *Imports = nullptr);

  /// Get the entry count for this function.
  ///
  /// Entry count is the number of times the function was executed.
  /// When AllowSynthetic is false, only pgo_data will be returned.
  std::optional<ProfileCount> getEntryCount(bool AllowSynthetic = false) const;

  /// Return true if the function is annotated with profile data.
  ///
  /// Presence of entry counts from a profile run implies the function has
  /// profile annotations. If IncludeSynthetic is false, only return true
  /// when the profile data is real.
  bool hasProfileData(bool IncludeSynthetic = false) const {
    return getEntryCount(IncludeSynthetic).has_value();
  }

  /// Returns the set of GUIDs that needs to be imported to the function for
  /// sample PGO, to enable the same inlines as the profiled optimized binary.
  DenseSet<GlobalValue::GUID> getImportGUIDs() const;

  /// Set the section prefix for this function.
  void setSectionPrefix(StringRef Prefix);

  /// Get the section prefix for this function.
  std::optional<StringRef> getSectionPrefix() const;

  /// hasGC/getGC/setGC/clearGC - The name of the garbage collection algorithm
  ///                             to use during code generation.
  bool hasGC() const {
    return getSubclassDataFromValue() & (1<<14);
  }
  const std::string &getGC() const;
  void setGC(std::string Str);
  void clearGC();

  /// Return the attribute list for this Function.
  AttributeList getAttributes() const { return AttributeSets; }

  /// Set the attribute list for this Function.
  void setAttributes(AttributeList Attrs) { AttributeSets = Attrs; }

  // TODO: remove non-AtIndex versions of these methods.
  /// adds the attribute to the list of attributes.
  void addAttributeAtIndex(unsigned i, Attribute Attr);

  /// Add function attributes to this function.
  void addFnAttr(Attribute::AttrKind Kind);

  /// Add function attributes to this function.
  void addFnAttr(StringRef Kind, StringRef Val = StringRef());

  /// Add function attributes to this function.
  void addFnAttr(Attribute Attr);

  /// Add function attributes to this function.
  void addFnAttrs(const AttrBuilder &Attrs);

  /// Add return value attributes to this function.
  void addRetAttr(Attribute::AttrKind Kind);

  /// Add return value attributes to this function.
  void addRetAttr(Attribute Attr);

  /// Add return value attributes to this function.
  void addRetAttrs(const AttrBuilder &Attrs);

  /// adds the attribute to the list of attributes for the given arg.
  void addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind);

  /// adds the attribute to the list of attributes for the given arg.
  void addParamAttr(unsigned ArgNo, Attribute Attr);

  /// adds the attributes to the list of attributes for the given arg.
  void addParamAttrs(unsigned ArgNo, const AttrBuilder &Attrs);

  /// removes the attribute from the list of attributes.
  void removeAttributeAtIndex(unsigned i, Attribute::AttrKind Kind);

  /// removes the attribute from the list of attributes.
  void removeAttributeAtIndex(unsigned i, StringRef Kind);

  /// Remove function attributes from this function.
  void removeFnAttr(Attribute::AttrKind Kind);

  /// Remove function attribute from this function.
  void removeFnAttr(StringRef Kind);

  void removeFnAttrs(const AttributeMask &Attrs);

  /// removes the attribute from the return value list of attributes.
  void removeRetAttr(Attribute::AttrKind Kind);

  /// removes the attribute from the return value list of attributes.
  void removeRetAttr(StringRef Kind);

  /// removes the attributes from the return value list of attributes.
  void removeRetAttrs(const AttributeMask &Attrs);

  /// removes the attribute from the list of attributes.
  void removeParamAttr(unsigned ArgNo, Attribute::AttrKind Kind);

  /// removes the attribute from the list of attributes.
  void removeParamAttr(unsigned ArgNo, StringRef Kind);

  /// removes the attribute from the list of attributes.
  void removeParamAttrs(unsigned ArgNo, const AttributeMask &Attrs);

  /// Return true if the function has the attribute.
  bool hasFnAttribute(Attribute::AttrKind Kind) const;

  /// Return true if the function has the attribute.
  bool hasFnAttribute(StringRef Kind) const;

  /// check if an attribute is in the list of attributes for the return value.
  bool hasRetAttribute(Attribute::AttrKind Kind) const;

  /// check if an attributes is in the list of attributes.
  bool hasParamAttribute(unsigned ArgNo, Attribute::AttrKind Kind) const;

  /// gets the attribute from the list of attributes.
  Attribute getAttributeAtIndex(unsigned i, Attribute::AttrKind Kind) const;

  /// gets the attribute from the list of attributes.
  Attribute getAttributeAtIndex(unsigned i, StringRef Kind) const;

  /// Return the attribute for the given attribute kind.
  Attribute getFnAttribute(Attribute::AttrKind Kind) const;

  /// Return the attribute for the given attribute kind.
  Attribute getFnAttribute(StringRef Kind) const;

  /// For a string attribute \p Kind, parse attribute as an integer.
  ///
  /// \returns \p Default if attribute is not present.
  ///
  /// \returns \p Default if there is an error parsing the attribute integer,
  /// and error is emitted to the LLVMContext
  uint64_t getFnAttributeAsParsedInteger(StringRef Kind,
                                         uint64_t Default = 0) const;

  /// gets the specified attribute from the list of attributes.
  Attribute getParamAttribute(unsigned ArgNo, Attribute::AttrKind Kind) const;

  /// Return the stack alignment for the function.
  MaybeAlign getFnStackAlign() const {
    return AttributeSets.getFnStackAlignment();
  }

  /// Returns true if the function has ssp, sspstrong, or sspreq fn attrs.
  bool hasStackProtectorFnAttr() const;

  /// adds the dereferenceable attribute to the list of attributes for
  /// the given arg.
  void addDereferenceableParamAttr(unsigned ArgNo, uint64_t Bytes);

  /// adds the dereferenceable_or_null attribute to the list of
  /// attributes for the given arg.
  void addDereferenceableOrNullParamAttr(unsigned ArgNo, uint64_t Bytes);

  MaybeAlign getParamAlign(unsigned ArgNo) const {
    return AttributeSets.getParamAlignment(ArgNo);
  }

  MaybeAlign getParamStackAlign(unsigned ArgNo) const {
    return AttributeSets.getParamStackAlignment(ArgNo);
  }

  /// Extract the byval type for a parameter.
  Type *getParamByValType(unsigned ArgNo) const {
    return AttributeSets.getParamByValType(ArgNo);
  }

  /// Extract the sret type for a parameter.
  Type *getParamStructRetType(unsigned ArgNo) const {
    return AttributeSets.getParamStructRetType(ArgNo);
  }

  /// Extract the inalloca type for a parameter.
  Type *getParamInAllocaType(unsigned ArgNo) const {
    return AttributeSets.getParamInAllocaType(ArgNo);
  }

  /// Extract the byref type for a parameter.
  Type *getParamByRefType(unsigned ArgNo) const {
    return AttributeSets.getParamByRefType(ArgNo);
  }

  /// Extract the preallocated type for a parameter.
  Type *getParamPreallocatedType(unsigned ArgNo) const {
    return AttributeSets.getParamPreallocatedType(ArgNo);
  }

  /// Extract the number of dereferenceable bytes for a parameter.
  /// @param ArgNo Index of an argument, with 0 being the first function arg.
  uint64_t getParamDereferenceableBytes(unsigned ArgNo) const {
    return AttributeSets.getParamDereferenceableBytes(ArgNo);
  }

  /// Extract the number of dereferenceable_or_null bytes for a
  /// parameter.
  /// @param ArgNo AttributeList ArgNo, referring to an argument.
  uint64_t getParamDereferenceableOrNullBytes(unsigned ArgNo) const {
    return AttributeSets.getParamDereferenceableOrNullBytes(ArgNo);
  }

  /// Extract the nofpclass attribute for a parameter.
  FPClassTest getParamNoFPClass(unsigned ArgNo) const {
    return AttributeSets.getParamNoFPClass(ArgNo);
  }

  /// Determine if the function is presplit coroutine.
  bool isPresplitCoroutine() const {
    return hasFnAttribute(Attribute::PresplitCoroutine);
  }
  void setPresplitCoroutine() { addFnAttr(Attribute::PresplitCoroutine); }
  void setSplittedCoroutine() { removeFnAttr(Attribute::PresplitCoroutine); }

  MemoryEffects getMemoryEffects() const;
  void setMemoryEffects(MemoryEffects ME);

  /// Determine if the function does not access memory.
  bool doesNotAccessMemory() const;
  void setDoesNotAccessMemory();

  /// Determine if the function does not access or only reads memory.
  bool onlyReadsMemory() const;
  void setOnlyReadsMemory();

  /// Determine if the function does not access or only writes memory.
  bool onlyWritesMemory() const;
  void setOnlyWritesMemory();

  /// Determine if the call can access memmory only using pointers based
  /// on its arguments.
  bool onlyAccessesArgMemory() const;
  void setOnlyAccessesArgMemory();

  /// Determine if the function may only access memory that is
  ///  inaccessible from the IR.
  bool onlyAccessesInaccessibleMemory() const;
  void setOnlyAccessesInaccessibleMemory();

  /// Determine if the function may only access memory that is
  ///  either inaccessible from the IR or pointed to by its arguments.
  bool onlyAccessesInaccessibleMemOrArgMem() const;
  void setOnlyAccessesInaccessibleMemOrArgMem();

  /// Determine if the function cannot return.
  bool doesNotReturn() const {
    return hasFnAttribute(Attribute::NoReturn);
  }
  void setDoesNotReturn() {
    addFnAttr(Attribute::NoReturn);
  }

  /// Determine if the function should not perform indirect branch tracking.
  bool doesNoCfCheck() const { return hasFnAttribute(Attribute::NoCfCheck); }

  /// Determine if the function cannot unwind.
  bool doesNotThrow() const {
    return hasFnAttribute(Attribute::NoUnwind);
  }
  void setDoesNotThrow() {
    addFnAttr(Attribute::NoUnwind);
  }

  /// Determine if the call cannot be duplicated.
  bool cannotDuplicate() const {
    return hasFnAttribute(Attribute::NoDuplicate);
  }
  void setCannotDuplicate() {
    addFnAttr(Attribute::NoDuplicate);
  }

  /// Determine if the call is convergent.
  bool isConvergent() const {
    return hasFnAttribute(Attribute::Convergent);
  }
  void setConvergent() {
    addFnAttr(Attribute::Convergent);
  }
  void setNotConvergent() {
    removeFnAttr(Attribute::Convergent);
  }

  /// Determine if the call has sideeffects.
  bool isSpeculatable() const {
    return hasFnAttribute(Attribute::Speculatable);
  }
  void setSpeculatable() {
    addFnAttr(Attribute::Speculatable);
  }

  /// Determine if the call might deallocate memory.
  bool doesNotFreeMemory() const {
    return onlyReadsMemory() || hasFnAttribute(Attribute::NoFree);
  }
  void setDoesNotFreeMemory() {
    addFnAttr(Attribute::NoFree);
  }

  /// Determine if the call can synchroize with other threads
  bool hasNoSync() const {
    return hasFnAttribute(Attribute::NoSync);
  }
  void setNoSync() {
    addFnAttr(Attribute::NoSync);
  }

  /// Determine if the function is known not to recurse, directly or
  /// indirectly.
  bool doesNotRecurse() const {
    return hasFnAttribute(Attribute::NoRecurse);
  }
  void setDoesNotRecurse() {
    addFnAttr(Attribute::NoRecurse);
  }

  /// Determine if the function is required to make forward progress.
  bool mustProgress() const {
    return hasFnAttribute(Attribute::MustProgress) ||
           hasFnAttribute(Attribute::WillReturn);
  }
  void setMustProgress() { addFnAttr(Attribute::MustProgress); }

  /// Determine if the function will return.
  bool willReturn() const { return hasFnAttribute(Attribute::WillReturn); }
  void setWillReturn() { addFnAttr(Attribute::WillReturn); }

  /// Get what kind of unwind table entry to generate for this function.
  UWTableKind getUWTableKind() const {
    return AttributeSets.getUWTableKind();
  }

  /// True if the ABI mandates (or the user requested) that this
  /// function be in a unwind table.
  bool hasUWTable() const {
    return getUWTableKind() != UWTableKind::None;
  }
  void setUWTableKind(UWTableKind K) {
    addFnAttr(Attribute::getWithUWTableKind(getContext(), K));
  }
  /// True if this function needs an unwind table.
  bool needsUnwindTableEntry() const {
    return hasUWTable() || !doesNotThrow() || hasPersonalityFn();
  }

  /// Determine if the function returns a structure through first
  /// or second pointer argument.
  bool hasStructRetAttr() const {
    return AttributeSets.hasParamAttr(0, Attribute::StructRet) ||
           AttributeSets.hasParamAttr(1, Attribute::StructRet);
  }

  /// Determine if the parameter or return value is marked with NoAlias
  /// attribute.
  bool returnDoesNotAlias() const {
    return AttributeSets.hasRetAttr(Attribute::NoAlias);
  }
  void setReturnDoesNotAlias() { addRetAttr(Attribute::NoAlias); }

  /// Do not optimize this function (-O0).
  bool hasOptNone() const { return hasFnAttribute(Attribute::OptimizeNone); }

  /// Optimize this function for minimum size (-Oz).
  bool hasMinSize() const { return hasFnAttribute(Attribute::MinSize); }

  /// Optimize this function for size (-Os) or minimum size (-Oz).
  bool hasOptSize() const {
    return hasFnAttribute(Attribute::OptimizeForSize) || hasMinSize();
  }

  /// Returns the denormal handling type for the default rounding mode of the
  /// function.
  DenormalMode getDenormalMode(const fltSemantics &FPType) const;

  /// Return the representational value of "denormal-fp-math". Code interested
  /// in the semantics of the function should use getDenormalMode instead.
  DenormalMode getDenormalModeRaw() const;

  /// Return the representational value of "denormal-fp-math-f32". Code
  /// interested in the semantics of the function should use getDenormalMode
  /// instead.
  DenormalMode getDenormalModeF32Raw() const;

  /// copyAttributesFrom - copy all additional attributes (those not needed to
  /// create a Function) from the Function Src to this one.
  void copyAttributesFrom(const Function *Src);

  /// deleteBody - This method deletes the body of the function, and converts
  /// the linkage to external.
  ///
  void deleteBody() {
    dropAllReferences();
    setLinkage(ExternalLinkage);
  }

  /// removeFromParent - This method unlinks 'this' from the containing module,
  /// but does not delete it.
  ///
  void removeFromParent();

  /// eraseFromParent - This method unlinks 'this' from the containing module
  /// and deletes it.
  ///
  void eraseFromParent();

  /// Steal arguments from another function.
  ///
  /// Drop this function's arguments and splice in the ones from \c Src.
  /// Requires that this has no function body.
  void stealArgumentListFrom(Function &Src);

  /// Insert \p BB in the basic block list at \p Position. \Returns an iterator
  /// to the newly inserted BB.
  Function::iterator insert(Function::iterator Position, BasicBlock *BB) {
    return BasicBlocks.insert(Position, BB);
  }

  /// Transfer all blocks from \p FromF to this function at \p ToIt.
  void splice(Function::iterator ToIt, Function *FromF) {
    splice(ToIt, FromF, FromF->begin(), FromF->end());
  }

  /// Transfer one BasicBlock from \p FromF at \p FromIt to this function
  /// at \p ToIt.
  void splice(Function::iterator ToIt, Function *FromF,
              Function::iterator FromIt) {
    auto FromItNext = std::next(FromIt);
    // Single-element splice is a noop if destination == source.
    if (ToIt == FromIt || ToIt == FromItNext)
      return;
    splice(ToIt, FromF, FromIt, FromItNext);
  }

  /// Transfer a range of basic blocks that belong to \p FromF from \p
  /// FromBeginIt to \p FromEndIt, to this function at \p ToIt.
  void splice(Function::iterator ToIt, Function *FromF,
              Function::iterator FromBeginIt,
              Function::iterator FromEndIt);

  /// Erases a range of BasicBlocks from \p FromIt to (not including) \p ToIt.
  /// \Returns \p ToIt.
  Function::iterator erase(Function::iterator FromIt, Function::iterator ToIt);

private:
  // These need access to the underlying BB list.
  friend void BasicBlock::removeFromParent();
  friend iplist<BasicBlock>::iterator BasicBlock::eraseFromParent();
  template <class BB_t, class BB_i_t, class BI_t, class II_t>
  friend class InstIterator;
  friend class llvm::SymbolTableListTraits<llvm::BasicBlock>;
  friend class llvm::ilist_node_with_parent<llvm::BasicBlock, llvm::Function>;

  /// Get the underlying elements of the Function... the basic block list is
  /// empty for external functions.
  ///
  /// This is deliberately private because we have implemented an adequate set
  /// of functions to modify the list, including Function::splice(),
  /// Function::erase(), Function::insert() etc.
  const BasicBlockListType &getBasicBlockList() const { return BasicBlocks; }
        BasicBlockListType &getBasicBlockList()       { return BasicBlocks; }

  static BasicBlockListType Function::*getSublistAccess(BasicBlock*) {
    return &Function::BasicBlocks;
  }

public:
  const BasicBlock       &getEntryBlock() const   { return front(); }
        BasicBlock       &getEntryBlock()         { return front(); }

  //===--------------------------------------------------------------------===//
  // Symbol Table Accessing functions...

  /// getSymbolTable() - Return the symbol table if any, otherwise nullptr.
  ///
  inline ValueSymbolTable *getValueSymbolTable() { return SymTab.get(); }
  inline const ValueSymbolTable *getValueSymbolTable() const {
    return SymTab.get();
  }

  //===--------------------------------------------------------------------===//
  // BasicBlock iterator forwarding functions
  //
  iterator                begin()       { return BasicBlocks.begin(); }
  const_iterator          begin() const { return BasicBlocks.begin(); }
  iterator                end  ()       { return BasicBlocks.end();   }
  const_iterator          end  () const { return BasicBlocks.end();   }

  size_t                   size() const { return BasicBlocks.size();  }
  bool                    empty() const { return BasicBlocks.empty(); }
  const BasicBlock       &front() const { return BasicBlocks.front(); }
        BasicBlock       &front()       { return BasicBlocks.front(); }
  const BasicBlock        &back() const { return BasicBlocks.back();  }
        BasicBlock        &back()       { return BasicBlocks.back();  }

/// @name Function Argument Iteration
/// @{

  arg_iterator arg_begin() {
    CheckLazyArguments();
    return Arguments;
  }
  const_arg_iterator arg_begin() const {
    CheckLazyArguments();
    return Arguments;
  }

  arg_iterator arg_end() {
    CheckLazyArguments();
    return Arguments + NumArgs;
  }
  const_arg_iterator arg_end() const {
    CheckLazyArguments();
    return Arguments + NumArgs;
  }

  Argument* getArg(unsigned i) const {
    assert (i < NumArgs && "getArg() out of range!");
    CheckLazyArguments();
    return Arguments + i;
  }

  iterator_range<arg_iterator> args() {
    return make_range(arg_begin(), arg_end());
  }
  iterator_range<const_arg_iterator> args() const {
    return make_range(arg_begin(), arg_end());
  }

/// @}

  size_t arg_size() const { return NumArgs; }
  bool arg_empty() const { return arg_size() == 0; }

  /// Check whether this function has a personality function.
  bool hasPersonalityFn() const {
    return getSubclassDataFromValue() & (1<<3);
  }

  /// Get the personality function associated with this function.
  Constant *getPersonalityFn() const;
  void setPersonalityFn(Constant *Fn);

  /// Check whether this function has prefix data.
  bool hasPrefixData() const {
    return getSubclassDataFromValue() & (1<<1);
  }

  /// Get the prefix data associated with this function.
  Constant *getPrefixData() const;
  void setPrefixData(Constant *PrefixData);

  /// Check whether this function has prologue data.
  bool hasPrologueData() const {
    return getSubclassDataFromValue() & (1<<2);
  }

  /// Get the prologue data associated with this function.
  Constant *getPrologueData() const;
  void setPrologueData(Constant *PrologueData);

  /// Print the function to an output stream with an optional
  /// AssemblyAnnotationWriter.
  void print(raw_ostream &OS, AssemblyAnnotationWriter *AAW = nullptr,
             bool ShouldPreserveUseListOrder = false,
             bool IsForDebug = false) const;

  /// viewCFG - This function is meant for use from the debugger.  You can just
  /// say 'call F->viewCFG()' and a ghostview window should pop up from the
  /// program, displaying the CFG of the current function with the code for each
  /// basic block inside.  This depends on there being a 'dot' and 'gv' program
  /// in your path.
  ///
  void viewCFG() const;

  /// Extended form to print edge weights.
  void viewCFG(bool ViewCFGOnly, const BlockFrequencyInfo *BFI,
               const BranchProbabilityInfo *BPI) const;

  /// viewCFGOnly - This function is meant for use from the debugger.  It works
  /// just like viewCFG, but it does not include the contents of basic blocks
  /// into the nodes, just the label.  If you are only interested in the CFG
  /// this can make the graph smaller.
  ///
  void viewCFGOnly() const;

  /// Extended form to print edge weights.
  void viewCFGOnly(const BlockFrequencyInfo *BFI,
                   const BranchProbabilityInfo *BPI) const;

  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Value *V) {
    return V->getValueID() == Value::FunctionVal;
  }

  /// dropAllReferences() - This method causes all the subinstructions to "let
  /// go" of all references that they are maintaining.  This allows one to
  /// 'delete' a whole module at a time, even though there may be circular
  /// references... first all references are dropped, and all use counts go to
  /// zero.  Then everything is deleted for real.  Note that no operations are
  /// valid on an object that has "dropped all references", except operator
  /// delete.
  ///
  /// Since no other object in the module can have references into the body of a
  /// function, dropping all references deletes the entire body of the function,
  /// including any contained basic blocks.
  ///
  void dropAllReferences();

  /// hasAddressTaken - returns true if there are any uses of this function
  /// other than direct calls or invokes to it, or blockaddress expressions.
  /// Optionally passes back an offending user for diagnostic purposes,
  /// ignores callback uses, assume like pointer annotation calls, references in
  /// llvm.used and llvm.compiler.used variables, and operand bundle
  /// "clang.arc.attachedcall".
  bool hasAddressTaken(const User ** = nullptr,
                       bool IgnoreCallbackUses = false,
                       bool IgnoreAssumeLikeCalls = true,
                       bool IngoreLLVMUsed = false,
                       bool IgnoreARCAttachedCall = false) const;

  /// isDefTriviallyDead - Return true if it is trivially safe to remove
  /// this function definition from the module (because it isn't externally
  /// visible, does not have its address taken, and has no callers).  To make
  /// this more accurate, call removeDeadConstantUsers first.
  bool isDefTriviallyDead() const;

  /// callsFunctionThatReturnsTwice - Return true if the function has a call to
  /// setjmp or other function that gcc recognizes as "returning twice".
  bool callsFunctionThatReturnsTwice() const;

  /// Set the attached subprogram.
  ///
  /// Calls \a setMetadata() with \a LLVMContext::MD_dbg.
  void setSubprogram(DISubprogram *SP);

  /// Get the attached subprogram.
  ///
  /// Calls \a getMetadata() with \a LLVMContext::MD_dbg and casts the result
  /// to \a DISubprogram.
  DISubprogram *getSubprogram() const;

  /// Returns true if we should emit debug info for profiling.
  bool shouldEmitDebugInfoForProfiling() const;

  /// Check if null pointer dereferencing is considered undefined behavior for
  /// the function.
  /// Return value: false => null pointer dereference is undefined.
  /// Return value: true =>  null pointer dereference is not undefined.
  bool nullPointerIsDefined() const;

private:
  void allocHungoffUselist();
  template<int Idx> void setHungoffOperand(Constant *C);

  /// Shadow Value::setValueSubclassData with a private forwarding method so
  /// that subclasses cannot accidentally use it.
  void setValueSubclassData(unsigned short D) {
    Value::setValueSubclassData(D);
  }
  void setValueSubclassDataBit(unsigned Bit, bool On);
};

/// Check whether null pointer dereferencing is considered undefined behavior
/// for a given function or an address space.
/// Null pointer access in non-zero address space is not considered undefined.
/// Return value: false => null pointer dereference is undefined.
/// Return value: true =>  null pointer dereference is not undefined.
bool NullPointerIsDefined(const Function *F, unsigned AS = 0);

template <>
struct OperandTraits<Function> : public HungoffOperandTraits<3> {};

DEFINE_TRANSPARENT_OPERAND_ACCESSORS(Function, Value)

} // end namespace llvm

#endif // LLVM_IR_FUNCTION_H
PKjwFZz{\�
IR/DebugLoc.hnu�[���//===- DebugLoc.h - Debug Location Information ------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines a number of light weight data structures used
// to describe and track debug location information.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_DEBUGLOC_H
#define LLVM_IR_DEBUGLOC_H

#include "llvm/IR/TrackingMDRef.h"
#include "llvm/Support/DataTypes.h"

namespace llvm {

  class LLVMContext;
  class raw_ostream;
  class DILocation;

  /// A debug info location.
  ///
  /// This class is a wrapper around a tracking reference to an \a DILocation
  /// pointer.
  ///
  /// To avoid extra includes, \a DebugLoc doubles the \a DILocation API with a
  /// one based on relatively opaque \a MDNode pointers.
  class DebugLoc {
    TrackingMDNodeRef Loc;

  public:
    DebugLoc() = default;

    /// Construct from an \a DILocation.
    DebugLoc(const DILocation *L);

    /// Construct from an \a MDNode.
    ///
    /// Note: if \c N is not an \a DILocation, a verifier check will fail, and
    /// accessors will crash.  However, construction from other nodes is
    /// supported in order to handle forward references when reading textual
    /// IR.
    explicit DebugLoc(const MDNode *N);

    /// Get the underlying \a DILocation.
    ///
    /// \pre !*this or \c isa<DILocation>(getAsMDNode()).
    /// @{
    DILocation *get() const;
    operator DILocation *() const { return get(); }
    DILocation *operator->() const { return get(); }
    DILocation &operator*() const { return *get(); }
    /// @}

    /// Check for null.
    ///
    /// Check for null in a way that is safe with broken debug info.  Unlike
    /// the conversion to \c DILocation, this doesn't require that \c Loc is of
    /// the right type.  Important for cases like \a llvm::StripDebugInfo() and
    /// \a Instruction::hasMetadata().
    explicit operator bool() const { return Loc; }

    /// Check whether this has a trivial destructor.
    bool hasTrivialDestructor() const { return Loc.hasTrivialDestructor(); }

    enum { ReplaceLastInlinedAt = true };
    /// Rebuild the entire inlined-at chain for this instruction so that the top of
    /// the chain now is inlined-at the new call site.
    /// \param   InlinedAt    The new outermost inlined-at in the chain.
    static DebugLoc appendInlinedAt(const DebugLoc &DL, DILocation *InlinedAt,
                                    LLVMContext &Ctx,
                                    DenseMap<const MDNode *, MDNode *> &Cache);

    unsigned getLine() const;
    unsigned getCol() const;
    MDNode *getScope() const;
    DILocation *getInlinedAt() const;

    /// Get the fully inlined-at scope for a DebugLoc.
    ///
    /// Gets the inlined-at scope for a DebugLoc.
    MDNode *getInlinedAtScope() const;

    /// Rebuild the entire inline-at chain by replacing the subprogram at the
    /// end of the chain with NewSP.
    static DebugLoc
    replaceInlinedAtSubprogram(const DebugLoc &DL, DISubprogram &NewSP,
                               LLVMContext &Ctx,
                               DenseMap<const MDNode *, MDNode *> &Cache);

    /// Find the debug info location for the start of the function.
    ///
    /// Walk up the scope chain of given debug loc and find line number info
    /// for the function.
    ///
    /// FIXME: Remove this.  Users should use DILocation/DILocalScope API to
    /// find the subprogram, and then DILocation::get().
    DebugLoc getFnDebugLoc() const;

    /// Return \c this as a bar \a MDNode.
    MDNode *getAsMDNode() const { return Loc; }

    /// Check if the DebugLoc corresponds to an implicit code.
    bool isImplicitCode() const;
    void setImplicitCode(bool ImplicitCode);

    bool operator==(const DebugLoc &DL) const { return Loc == DL.Loc; }
    bool operator!=(const DebugLoc &DL) const { return Loc != DL.Loc; }

    void dump() const;

    /// prints source location /path/to/file.exe:line:col @[inlined at]
    void print(raw_ostream &OS) const;
  };

} // end namespace llvm

#endif // LLVM_IR_DEBUGLOC_H
PKjwFZ���
d
dIR/ConstantRange.hnu�[���//===- ConstantRange.h - Represent a range ----------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Represent a range of possible values that may occur when the program is run
// for an integral value.  This keeps track of a lower and upper bound for the
// constant, which MAY wrap around the end of the numeric range.  To do this, it
// keeps track of a [lower, upper) bound, which specifies an interval just like
// STL iterators.  When used with boolean values, the following are important
// ranges: :
//
//  [F, F) = {}     = Empty set
//  [T, F) = {T}
//  [F, T) = {F}
//  [T, T) = {F, T} = Full set
//
// The other integral ranges use min/max values for special range values. For
// example, for 8-bit types, it uses:
// [0, 0)     = {}       = Empty set
// [255, 255) = {0..255} = Full Set
//
// Note that ConstantRange can be used to represent either signed or
// unsigned ranges.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_CONSTANTRANGE_H
#define LLVM_IR_CONSTANTRANGE_H

#include "llvm/ADT/APInt.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instruction.h"
#include "llvm/Support/Compiler.h"
#include <cstdint>

namespace llvm {

class MDNode;
class raw_ostream;
struct KnownBits;

/// This class represents a range of values.
class [[nodiscard]] ConstantRange {
  APInt Lower, Upper;

  /// Create empty constant range with same bitwidth.
  ConstantRange getEmpty() const {
    return ConstantRange(getBitWidth(), false);
  }

  /// Create full constant range with same bitwidth.
  ConstantRange getFull() const {
    return ConstantRange(getBitWidth(), true);
  }

public:
  /// Initialize a full or empty set for the specified bit width.
  explicit ConstantRange(uint32_t BitWidth, bool isFullSet);

  /// Initialize a range to hold the single specified value.
  ConstantRange(APInt Value);

  /// Initialize a range of values explicitly. This will assert out if
  /// Lower==Upper and Lower != Min or Max value for its type. It will also
  /// assert out if the two APInt's are not the same bit width.
  ConstantRange(APInt Lower, APInt Upper);

  /// Create empty constant range with the given bit width.
  static ConstantRange getEmpty(uint32_t BitWidth) {
    return ConstantRange(BitWidth, false);
  }

  /// Create full constant range with the given bit width.
  static ConstantRange getFull(uint32_t BitWidth) {
    return ConstantRange(BitWidth, true);
  }

  /// Create non-empty constant range with the given bounds. If Lower and
  /// Upper are the same, a full range is returned.
  static ConstantRange getNonEmpty(APInt Lower, APInt Upper) {
    if (Lower == Upper)
      return getFull(Lower.getBitWidth());
    return ConstantRange(std::move(Lower), std::move(Upper));
  }

  /// Initialize a range based on a known bits constraint. The IsSigned flag
  /// indicates whether the constant range should not wrap in the signed or
  /// unsigned domain.
  static ConstantRange fromKnownBits(const KnownBits &Known, bool IsSigned);

  /// Produce the smallest range such that all values that may satisfy the given
  /// predicate with any value contained within Other is contained in the
  /// returned range.  Formally, this returns a superset of
  /// 'union over all y in Other . { x : icmp op x y is true }'.  If the exact
  /// answer is not representable as a ConstantRange, the return value will be a
  /// proper superset of the above.
  ///
  /// Example: Pred = ult and Other = i8 [2, 5) returns Result = [0, 4)
  static ConstantRange makeAllowedICmpRegion(CmpInst::Predicate Pred,
                                             const ConstantRange &Other);

  /// Produce the largest range such that all values in the returned range
  /// satisfy the given predicate with all values contained within Other.
  /// Formally, this returns a subset of
  /// 'intersection over all y in Other . { x : icmp op x y is true }'.  If the
  /// exact answer is not representable as a ConstantRange, the return value
  /// will be a proper subset of the above.
  ///
  /// Example: Pred = ult and Other = i8 [2, 5) returns [0, 2)
  static ConstantRange makeSatisfyingICmpRegion(CmpInst::Predicate Pred,
                                                const ConstantRange &Other);

  /// Produce the exact range such that all values in the returned range satisfy
  /// the given predicate with any value contained within Other. Formally, this
  /// returns the exact answer when the superset of 'union over all y in Other
  /// is exactly same as the subset of intersection over all y in Other.
  /// { x : icmp op x y is true}'.
  ///
  /// Example: Pred = ult and Other = i8 3 returns [0, 3)
  static ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred,
                                           const APInt &Other);

  /// Does the predicate \p Pred hold between ranges this and \p Other?
  /// NOTE: false does not mean that inverse predicate holds!
  bool icmp(CmpInst::Predicate Pred, const ConstantRange &Other) const;

  /// Return true iff CR1 ult CR2 is equivalent to CR1 slt CR2.
  /// Does not depend on strictness/direction of the predicate.
  static bool
  areInsensitiveToSignednessOfICmpPredicate(const ConstantRange &CR1,
                                            const ConstantRange &CR2);

  /// Return true iff CR1 ult CR2 is equivalent to CR1 sge CR2.
  /// Does not depend on strictness/direction of the predicate.
  static bool
  areInsensitiveToSignednessOfInvertedICmpPredicate(const ConstantRange &CR1,
                                                    const ConstantRange &CR2);

  /// If the comparison between constant ranges this and Other
  /// is insensitive to the signedness of the comparison predicate,
  /// return a predicate equivalent to \p Pred, with flipped signedness
  /// (i.e. unsigned instead of signed or vice versa), and maybe inverted,
  /// otherwise returns CmpInst::Predicate::BAD_ICMP_PREDICATE.
  static CmpInst::Predicate
  getEquivalentPredWithFlippedSignedness(CmpInst::Predicate Pred,
                                         const ConstantRange &CR1,
                                         const ConstantRange &CR2);

  /// Produce the largest range containing all X such that "X BinOp Y" is
  /// guaranteed not to wrap (overflow) for *all* Y in Other. However, there may
  /// be *some* Y in Other for which additional X not contained in the result
  /// also do not overflow.
  ///
  /// NoWrapKind must be one of OBO::NoUnsignedWrap or OBO::NoSignedWrap.
  ///
  /// Examples:
  ///  typedef OverflowingBinaryOperator OBO;
  ///  #define MGNR makeGuaranteedNoWrapRegion
  ///  MGNR(Add, [i8 1, 2), OBO::NoSignedWrap) == [-128, 127)
  ///  MGNR(Add, [i8 1, 2), OBO::NoUnsignedWrap) == [0, -1)
  ///  MGNR(Add, [i8 0, 1), OBO::NoUnsignedWrap) == Full Set
  ///  MGNR(Add, [i8 -1, 6), OBO::NoSignedWrap) == [INT_MIN+1, INT_MAX-4)
  ///  MGNR(Sub, [i8 1, 2), OBO::NoSignedWrap) == [-127, 128)
  ///  MGNR(Sub, [i8 1, 2), OBO::NoUnsignedWrap) == [1, 0)
  static ConstantRange makeGuaranteedNoWrapRegion(Instruction::BinaryOps BinOp,
                                                  const ConstantRange &Other,
                                                  unsigned NoWrapKind);

  /// Produce the range that contains X if and only if "X BinOp Other" does
  /// not wrap.
  static ConstantRange makeExactNoWrapRegion(Instruction::BinaryOps BinOp,
                                             const APInt &Other,
                                             unsigned NoWrapKind);

  /// Returns true if ConstantRange calculations are supported for intrinsic
  /// with \p IntrinsicID.
  static bool isIntrinsicSupported(Intrinsic::ID IntrinsicID);

  /// Compute range of intrinsic result for the given operand ranges.
  static ConstantRange intrinsic(Intrinsic::ID IntrinsicID,
                                 ArrayRef<ConstantRange> Ops);

  /// Set up \p Pred and \p RHS such that
  /// ConstantRange::makeExactICmpRegion(Pred, RHS) == *this.  Return true if
  /// successful.
  bool getEquivalentICmp(CmpInst::Predicate &Pred, APInt &RHS) const;

  /// Set up \p Pred, \p RHS and \p Offset such that (V + Offset) Pred RHS
  /// is true iff V is in the range. Prefers using Offset == 0 if possible.
  void
  getEquivalentICmp(CmpInst::Predicate &Pred, APInt &RHS, APInt &Offset) const;

  /// Return the lower value for this range.
  const APInt &getLower() const { return Lower; }

  /// Return the upper value for this range.
  const APInt &getUpper() const { return Upper; }

  /// Get the bit width of this ConstantRange.
  uint32_t getBitWidth() const { return Lower.getBitWidth(); }

  /// Return true if this set contains all of the elements possible
  /// for this data-type.
  bool isFullSet() const;

  /// Return true if this set contains no members.
  bool isEmptySet() const;

  /// Return true if this set wraps around the unsigned domain. Special cases:
  ///  * Empty set: Not wrapped.
  ///  * Full set: Not wrapped.
  ///  * [X, 0) == [X, Max]: Not wrapped.
  bool isWrappedSet() const;

  /// Return true if the exclusive upper bound wraps around the unsigned
  /// domain. Special cases:
  ///  * Empty set: Not wrapped.
  ///  * Full set: Not wrapped.
  ///  * [X, 0): Wrapped.
  bool isUpperWrapped() const;

  /// Return true if this set wraps around the signed domain. Special cases:
  ///  * Empty set: Not wrapped.
  ///  * Full set: Not wrapped.
  ///  * [X, SignedMin) == [X, SignedMax]: Not wrapped.
  bool isSignWrappedSet() const;

  /// Return true if the (exclusive) upper bound wraps around the signed
  /// domain. Special cases:
  ///  * Empty set: Not wrapped.
  ///  * Full set: Not wrapped.
  ///  * [X, SignedMin): Wrapped.
  bool isUpperSignWrapped() const;

  /// Return true if the specified value is in the set.
  bool contains(const APInt &Val) const;

  /// Return true if the other range is a subset of this one.
  bool contains(const ConstantRange &CR) const;

  /// If this set contains a single element, return it, otherwise return null.
  const APInt *getSingleElement() const {
    if (Upper == Lower + 1)
      return &Lower;
    return nullptr;
  }

  /// If this set contains all but a single element, return it, otherwise return
  /// null.
  const APInt *getSingleMissingElement() const {
    if (Lower == Upper + 1)
      return &Upper;
    return nullptr;
  }

  /// Return true if this set contains exactly one member.
  bool isSingleElement() const { return getSingleElement() != nullptr; }

  /// Compare set size of this range with the range CR.
  bool isSizeStrictlySmallerThan(const ConstantRange &CR) const;

  /// Compare set size of this range with Value.
  bool isSizeLargerThan(uint64_t MaxSize) const;

  /// Return true if all values in this range are negative.
  bool isAllNegative() const;

  /// Return true if all values in this range are non-negative.
  bool isAllNonNegative() const;

  /// Return the largest unsigned value contained in the ConstantRange.
  APInt getUnsignedMax() const;

  /// Return the smallest unsigned value contained in the ConstantRange.
  APInt getUnsignedMin() const;

  /// Return the largest signed value contained in the ConstantRange.
  APInt getSignedMax() const;

  /// Return the smallest signed value contained in the ConstantRange.
  APInt getSignedMin() const;

  /// Return true if this range is equal to another range.
  bool operator==(const ConstantRange &CR) const {
    return Lower == CR.Lower && Upper == CR.Upper;
  }
  bool operator!=(const ConstantRange &CR) const {
    return !operator==(CR);
  }

  /// Compute the maximal number of active bits needed to represent every value
  /// in this range.
  unsigned getActiveBits() const;

  /// Compute the maximal number of bits needed to represent every value
  /// in this signed range.
  unsigned getMinSignedBits() const;

  /// Subtract the specified constant from the endpoints of this constant range.
  ConstantRange subtract(const APInt &CI) const;

  /// Subtract the specified range from this range (aka relative complement of
  /// the sets).
  ConstantRange difference(const ConstantRange &CR) const;

  /// If represented precisely, the result of some range operations may consist
  /// of multiple disjoint ranges. As only a single range may be returned, any
  /// range covering these disjoint ranges constitutes a valid result, but some
  /// may be more useful than others depending on context. The preferred range
  /// type specifies whether a range that is non-wrapping in the unsigned or
  /// signed domain, or has the smallest size, is preferred. If a signedness is
  /// preferred but all ranges are non-wrapping or all wrapping, then the
  /// smallest set size is preferred. If there are multiple smallest sets, any
  /// one of them may be returned.
  enum PreferredRangeType { Smallest, Unsigned, Signed };

  /// Return the range that results from the intersection of this range with
  /// another range. If the intersection is disjoint, such that two results
  /// are possible, the preferred range is determined by the PreferredRangeType.
  ConstantRange intersectWith(const ConstantRange &CR,
                              PreferredRangeType Type = Smallest) const;

  /// Return the range that results from the union of this range
  /// with another range.  The resultant range is guaranteed to include the
  /// elements of both sets, but may contain more.  For example, [3, 9) union
  /// [12,15) is [3, 15), which includes 9, 10, and 11, which were not included
  /// in either set before.
  ConstantRange unionWith(const ConstantRange &CR,
                          PreferredRangeType Type = Smallest) const;

  /// Intersect the two ranges and return the result if it can be represented
  /// exactly, otherwise return std::nullopt.
  std::optional<ConstantRange>
  exactIntersectWith(const ConstantRange &CR) const;

  /// Union the two ranges and return the result if it can be represented
  /// exactly, otherwise return std::nullopt.
  std::optional<ConstantRange> exactUnionWith(const ConstantRange &CR) const;

  /// Return a new range representing the possible values resulting
  /// from an application of the specified cast operator to this range. \p
  /// BitWidth is the target bitwidth of the cast.  For casts which don't
  /// change bitwidth, it must be the same as the source bitwidth.  For casts
  /// which do change bitwidth, the bitwidth must be consistent with the
  /// requested cast and source bitwidth.
  ConstantRange castOp(Instruction::CastOps CastOp,
                       uint32_t BitWidth) const;

  /// Return a new range in the specified integer type, which must
  /// be strictly larger than the current type.  The returned range will
  /// correspond to the possible range of values if the source range had been
  /// zero extended to BitWidth.
  ConstantRange zeroExtend(uint32_t BitWidth) const;

  /// Return a new range in the specified integer type, which must
  /// be strictly larger than the current type.  The returned range will
  /// correspond to the possible range of values if the source range had been
  /// sign extended to BitWidth.
  ConstantRange signExtend(uint32_t BitWidth) const;

  /// Return a new range in the specified integer type, which must be
  /// strictly smaller than the current type.  The returned range will
  /// correspond to the possible range of values if the source range had been
  /// truncated to the specified type.
  ConstantRange truncate(uint32_t BitWidth) const;

  /// Make this range have the bit width given by \p BitWidth. The
  /// value is zero extended, truncated, or left alone to make it that width.
  ConstantRange zextOrTrunc(uint32_t BitWidth) const;

  /// Make this range have the bit width given by \p BitWidth. The
  /// value is sign extended, truncated, or left alone to make it that width.
  ConstantRange sextOrTrunc(uint32_t BitWidth) const;

  /// Return a new range representing the possible values resulting
  /// from an application of the specified binary operator to an left hand side
  /// of this range and a right hand side of \p Other.
  ConstantRange binaryOp(Instruction::BinaryOps BinOp,
                         const ConstantRange &Other) const;

  /// Return a new range representing the possible values resulting
  /// from an application of the specified overflowing binary operator to a
  /// left hand side of this range and a right hand side of \p Other given
  /// the provided knowledge about lack of wrapping \p NoWrapKind.
  ConstantRange overflowingBinaryOp(Instruction::BinaryOps BinOp,
                                    const ConstantRange &Other,
                                    unsigned NoWrapKind) const;

  /// Return a new range representing the possible values resulting
  /// from an addition of a value in this range and a value in \p Other.
  ConstantRange add(const ConstantRange &Other) const;

  /// Return a new range representing the possible values resulting
  /// from an addition with wrap type \p NoWrapKind of a value in this
  /// range and a value in \p Other.
  /// If the result range is disjoint, the preferred range is determined by the
  /// \p PreferredRangeType.
  ConstantRange addWithNoWrap(const ConstantRange &Other, unsigned NoWrapKind,
                              PreferredRangeType RangeType = Smallest) const;

  /// Return a new range representing the possible values resulting
  /// from a subtraction of a value in this range and a value in \p Other.
  ConstantRange sub(const ConstantRange &Other) const;

  /// Return a new range representing the possible values resulting
  /// from an subtraction with wrap type \p NoWrapKind of a value in this
  /// range and a value in \p Other.
  /// If the result range is disjoint, the preferred range is determined by the
  /// \p PreferredRangeType.
  ConstantRange subWithNoWrap(const ConstantRange &Other, unsigned NoWrapKind,
                              PreferredRangeType RangeType = Smallest) const;

  /// Return a new range representing the possible values resulting
  /// from a multiplication of a value in this range and a value in \p Other,
  /// treating both this and \p Other as unsigned ranges.
  ConstantRange multiply(const ConstantRange &Other) const;

  /// Return range of possible values for a signed multiplication of this and
  /// \p Other. However, if overflow is possible always return a full range
  /// rather than trying to determine a more precise result.
  ConstantRange smul_fast(const ConstantRange &Other) const;

  /// Return a new range representing the possible values resulting
  /// from a signed maximum of a value in this range and a value in \p Other.
  ConstantRange smax(const ConstantRange &Other) const;

  /// Return a new range representing the possible values resulting
  /// from an unsigned maximum of a value in this range and a value in \p Other.
  ConstantRange umax(const ConstantRange &Other) const;

  /// Return a new range representing the possible values resulting
  /// from a signed minimum of a value in this range and a value in \p Other.
  ConstantRange smin(const ConstantRange &Other) const;

  /// Return a new range representing the possible values resulting
  /// from an unsigned minimum of a value in this range and a value in \p Other.
  ConstantRange umin(const ConstantRange &Other) const;

  /// Return a new range representing the possible values resulting
  /// from an unsigned division of a value in this range and a value in
  /// \p Other.
  ConstantRange udiv(const ConstantRange &Other) const;

  /// Return a new range representing the possible values resulting
  /// from a signed division of a value in this range and a value in
  /// \p Other. Division by zero and division of SignedMin by -1 are considered
  /// undefined behavior, in line with IR, and do not contribute towards the
  /// result.
  ConstantRange sdiv(const ConstantRange &Other) const;

  /// Return a new range representing the possible values resulting
  /// from an unsigned remainder operation of a value in this range and a
  /// value in \p Other.
  ConstantRange urem(const ConstantRange &Other) const;

  /// Return a new range representing the possible values resulting
  /// from a signed remainder operation of a value in this range and a
  /// value in \p Other.
  ConstantRange srem(const ConstantRange &Other) const;

  /// Return a new range representing the possible values resulting from
  /// a binary-xor of a value in this range by an all-one value,
  /// aka bitwise complement operation.
  ConstantRange binaryNot() const;

  /// Return a new range representing the possible values resulting
  /// from a binary-and of a value in this range by a value in \p Other.
  ConstantRange binaryAnd(const ConstantRange &Other) const;

  /// Return a new range representing the possible values resulting
  /// from a binary-or of a value in this range by a value in \p Other.
  ConstantRange binaryOr(const ConstantRange &Other) const;

  /// Return a new range representing the possible values resulting
  /// from a binary-xor of a value in this range by a value in \p Other.
  ConstantRange binaryXor(const ConstantRange &Other) const;

  /// Return a new range representing the possible values resulting
  /// from a left shift of a value in this range by a value in \p Other.
  /// TODO: This isn't fully implemented yet.
  ConstantRange shl(const ConstantRange &Other) const;

  /// Return a new range representing the possible values resulting from a
  /// logical right shift of a value in this range and a value in \p Other.
  ConstantRange lshr(const ConstantRange &Other) const;

  /// Return a new range representing the possible values resulting from a
  /// arithmetic right shift of a value in this range and a value in \p Other.
  ConstantRange ashr(const ConstantRange &Other) const;

  /// Perform an unsigned saturating addition of two constant ranges.
  ConstantRange uadd_sat(const ConstantRange &Other) const;

  /// Perform a signed saturating addition of two constant ranges.
  ConstantRange sadd_sat(const ConstantRange &Other) const;

  /// Perform an unsigned saturating subtraction of two constant ranges.
  ConstantRange usub_sat(const ConstantRange &Other) const;

  /// Perform a signed saturating subtraction of two constant ranges.
  ConstantRange ssub_sat(const ConstantRange &Other) const;

  /// Perform an unsigned saturating multiplication of two constant ranges.
  ConstantRange umul_sat(const ConstantRange &Other) const;

  /// Perform a signed saturating multiplication of two constant ranges.
  ConstantRange smul_sat(const ConstantRange &Other) const;

  /// Perform an unsigned saturating left shift of this constant range by a
  /// value in \p Other.
  ConstantRange ushl_sat(const ConstantRange &Other) const;

  /// Perform a signed saturating left shift of this constant range by a
  /// value in \p Other.
  ConstantRange sshl_sat(const ConstantRange &Other) const;

  /// Return a new range that is the logical not of the current set.
  ConstantRange inverse() const;

  /// Calculate absolute value range. If the original range contains signed
  /// min, then the resulting range will contain signed min if and only if
  /// \p IntMinIsPoison is false.
  ConstantRange abs(bool IntMinIsPoison = false) const;

  /// Calculate ctlz range. If \p ZeroIsPoison is set, the range is computed
  /// ignoring a possible zero value contained in the input range.
  ConstantRange ctlz(bool ZeroIsPoison = false) const;

  /// Represents whether an operation on the given constant range is known to
  /// always or never overflow.
  enum class OverflowResult {
    /// Always overflows in the direction of signed/unsigned min value.
    AlwaysOverflowsLow,
    /// Always overflows in the direction of signed/unsigned max value.
    AlwaysOverflowsHigh,
    /// May or may not overflow.
    MayOverflow,
    /// Never overflows.
    NeverOverflows,
  };

  /// Return whether unsigned add of the two ranges always/never overflows.
  OverflowResult unsignedAddMayOverflow(const ConstantRange &Other) const;

  /// Return whether signed add of the two ranges always/never overflows.
  OverflowResult signedAddMayOverflow(const ConstantRange &Other) const;

  /// Return whether unsigned sub of the two ranges always/never overflows.
  OverflowResult unsignedSubMayOverflow(const ConstantRange &Other) const;

  /// Return whether signed sub of the two ranges always/never overflows.
  OverflowResult signedSubMayOverflow(const ConstantRange &Other) const;

  /// Return whether unsigned mul of the two ranges always/never overflows.
  OverflowResult unsignedMulMayOverflow(const ConstantRange &Other) const;

  /// Return known bits for values in this range.
  KnownBits toKnownBits() const;

  /// Print out the bounds to a stream.
  void print(raw_ostream &OS) const;

  /// Allow printing from a debugger easily.
  void dump() const;
};

inline raw_ostream &operator<<(raw_ostream &OS, const ConstantRange &CR) {
  CR.print(OS);
  return OS;
}

/// Parse out a conservative ConstantRange from !range metadata.
///
/// E.g. if RangeMD is !{i32 0, i32 10, i32 15, i32 20} then return [0, 20).
ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD);

} // end namespace llvm

#endif // LLVM_IR_CONSTANTRANGE_H
PKjwFZ�y��??IR/Mangler.hnu�[���//===-- llvm/IR/Mangler.h - Self-contained name mangler ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Unified name mangler for various backends.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_MANGLER_H
#define LLVM_IR_MANGLER_H

#include "llvm/ADT/DenseMap.h"

namespace llvm {

class DataLayout;
class GlobalValue;
template <typename T> class SmallVectorImpl;
class Triple;
class Twine;
class raw_ostream;

class Mangler {
  /// We need to give global values the same name every time they are mangled.
  /// This keeps track of the number we give to anonymous ones.
  mutable DenseMap<const GlobalValue*, unsigned> AnonGlobalIDs;

public:
  /// Print the appropriate prefix and the specified global variable's name.
  /// If the global variable doesn't have a name, this fills in a unique name
  /// for the global.
  void getNameWithPrefix(raw_ostream &OS, const GlobalValue *GV,
                         bool CannotUsePrivateLabel) const;
  void getNameWithPrefix(SmallVectorImpl<char> &OutName, const GlobalValue *GV,
                         bool CannotUsePrivateLabel) const;

  /// Print the appropriate prefix and the specified name as the global variable
  /// name. GVName must not be empty.
  static void getNameWithPrefix(raw_ostream &OS, const Twine &GVName,
                                const DataLayout &DL);
  static void getNameWithPrefix(SmallVectorImpl<char> &OutName,
                                const Twine &GVName, const DataLayout &DL);
};

void emitLinkerFlagsForGlobalCOFF(raw_ostream &OS, const GlobalValue *GV,
                                  const Triple &TT, Mangler &Mangler);

void emitLinkerFlagsForUsedCOFF(raw_ostream &OS, const GlobalValue *GV,
                                const Triple &T, Mangler &M);

} // End llvm namespace

#endif
PKjwFZyB$�z�zIR/DerivedTypes.hnu�[���//===- llvm/DerivedTypes.h - Classes for handling data types ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the declarations of classes that represent "derived
// types".  These are things like "arrays of x" or "structure of x, y, z" or
// "function returning x taking (y,z) as parameters", etc...
//
// The implementations of these classes live in the Type.cpp file.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_DERIVEDTYPES_H
#define LLVM_IR_DERIVEDTYPES_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/IR/Type.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/TypeSize.h"
#include <cassert>
#include <cstdint>

namespace llvm {

class Value;
class APInt;
class LLVMContext;

/// Class to represent integer types. Note that this class is also used to
/// represent the built-in integer types: Int1Ty, Int8Ty, Int16Ty, Int32Ty and
/// Int64Ty.
/// Integer representation type
class IntegerType : public Type {
  friend class LLVMContextImpl;

protected:
  explicit IntegerType(LLVMContext &C, unsigned NumBits) : Type(C, IntegerTyID){
    setSubclassData(NumBits);
  }

public:
  /// This enum is just used to hold constants we need for IntegerType.
  enum {
    MIN_INT_BITS = 1,        ///< Minimum number of bits that can be specified
    MAX_INT_BITS = (1<<23)   ///< Maximum number of bits that can be specified
      ///< Note that bit width is stored in the Type classes SubclassData field
      ///< which has 24 bits. SelectionDAG type legalization can require a
      ///< power of 2 IntegerType, so limit to the largest representable power
      ///< of 2, 8388608.
  };

  /// This static method is the primary way of constructing an IntegerType.
  /// If an IntegerType with the same NumBits value was previously instantiated,
  /// that instance will be returned. Otherwise a new one will be created. Only
  /// one instance with a given NumBits value is ever created.
  /// Get or create an IntegerType instance.
  static IntegerType *get(LLVMContext &C, unsigned NumBits);

  /// Returns type twice as wide the input type.
  IntegerType *getExtendedType() const {
    return Type::getIntNTy(getContext(), 2 * getScalarSizeInBits());
  }

  /// Get the number of bits in this IntegerType
  unsigned getBitWidth() const { return getSubclassData(); }

  /// Return a bitmask with ones set for all of the bits that can be set by an
  /// unsigned version of this type. This is 0xFF for i8, 0xFFFF for i16, etc.
  uint64_t getBitMask() const {
    return ~uint64_t(0UL) >> (64-getBitWidth());
  }

  /// Return a uint64_t with just the most significant bit set (the sign bit, if
  /// the value is treated as a signed number).
  uint64_t getSignBit() const {
    return 1ULL << (getBitWidth()-1);
  }

  /// For example, this is 0xFF for an 8 bit integer, 0xFFFF for i16, etc.
  /// @returns a bit mask with ones set for all the bits of this type.
  /// Get a bit mask for this type.
  APInt getMask() const;

  /// Methods for support type inquiry through isa, cast, and dyn_cast.
  static bool classof(const Type *T) {
    return T->getTypeID() == IntegerTyID;
  }
};

unsigned Type::getIntegerBitWidth() const {
  return cast<IntegerType>(this)->getBitWidth();
}

/// Class to represent function types
///
class FunctionType : public Type {
  FunctionType(Type *Result, ArrayRef<Type*> Params, bool IsVarArgs);

public:
  FunctionType(const FunctionType &) = delete;
  FunctionType &operator=(const FunctionType &) = delete;

  /// This static method is the primary way of constructing a FunctionType.
  static FunctionType *get(Type *Result,
                           ArrayRef<Type*> Params, bool isVarArg);

  /// Create a FunctionType taking no parameters.
  static FunctionType *get(Type *Result, bool isVarArg);

  /// Return true if the specified type is valid as a return type.
  static bool isValidReturnType(Type *RetTy);

  /// Return true if the specified type is valid as an argument type.
  static bool isValidArgumentType(Type *ArgTy);

  bool isVarArg() const { return getSubclassData()!=0; }
  Type *getReturnType() const { return ContainedTys[0]; }

  using param_iterator = Type::subtype_iterator;

  param_iterator param_begin() const { return ContainedTys + 1; }
  param_iterator param_end() const { return &ContainedTys[NumContainedTys]; }
  ArrayRef<Type *> params() const {
    return ArrayRef(param_begin(), param_end());
  }

  /// Parameter type accessors.
  Type *getParamType(unsigned i) const { return ContainedTys[i+1]; }

  /// Return the number of fixed parameters this function type requires.
  /// This does not consider varargs.
  unsigned getNumParams() const { return NumContainedTys - 1; }

  /// Methods for support type inquiry through isa, cast, and dyn_cast.
  static bool classof(const Type *T) {
    return T->getTypeID() == FunctionTyID;
  }
};
static_assert(alignof(FunctionType) >= alignof(Type *),
              "Alignment sufficient for objects appended to FunctionType");

bool Type::isFunctionVarArg() const {
  return cast<FunctionType>(this)->isVarArg();
}

Type *Type::getFunctionParamType(unsigned i) const {
  return cast<FunctionType>(this)->getParamType(i);
}

unsigned Type::getFunctionNumParams() const {
  return cast<FunctionType>(this)->getNumParams();
}

/// A handy container for a FunctionType+Callee-pointer pair, which can be
/// passed around as a single entity. This assists in replacing the use of
/// PointerType::getElementType() to access the function's type, since that's
/// slated for removal as part of the [opaque pointer types] project.
class FunctionCallee {
public:
  // Allow implicit conversion from types which have a getFunctionType member
  // (e.g. Function and InlineAsm).
  template <typename T, typename U = decltype(&T::getFunctionType)>
  FunctionCallee(T *Fn)
      : FnTy(Fn ? Fn->getFunctionType() : nullptr), Callee(Fn) {}

  FunctionCallee(FunctionType *FnTy, Value *Callee)
      : FnTy(FnTy), Callee(Callee) {
    assert((FnTy == nullptr) == (Callee == nullptr));
  }

  FunctionCallee(std::nullptr_t) {}

  FunctionCallee() = default;

  FunctionType *getFunctionType() { return FnTy; }

  Value *getCallee() { return Callee; }

  explicit operator bool() { return Callee; }

private:
  FunctionType *FnTy = nullptr;
  Value *Callee = nullptr;
};

/// Class to represent struct types. There are two different kinds of struct
/// types: Literal structs and Identified structs.
///
/// Literal struct types (e.g. { i32, i32 }) are uniqued structurally, and must
/// always have a body when created.  You can get one of these by using one of
/// the StructType::get() forms.
///
/// Identified structs (e.g. %foo or %42) may optionally have a name and are not
/// uniqued.  The names for identified structs are managed at the LLVMContext
/// level, so there can only be a single identified struct with a given name in
/// a particular LLVMContext.  Identified structs may also optionally be opaque
/// (have no body specified).  You get one of these by using one of the
/// StructType::create() forms.
///
/// Independent of what kind of struct you have, the body of a struct type are
/// laid out in memory consecutively with the elements directly one after the
/// other (if the struct is packed) or (if not packed) with padding between the
/// elements as defined by DataLayout (which is required to match what the code
/// generator for a target expects).
///
class StructType : public Type {
  StructType(LLVMContext &C) : Type(C, StructTyID) {}

  enum {
    /// This is the contents of the SubClassData field.
    SCDB_HasBody = 1,
    SCDB_Packed = 2,
    SCDB_IsLiteral = 4,
    SCDB_IsSized = 8,
    SCDB_ContainsScalableVector = 16,
    SCDB_NotContainsScalableVector = 32
  };

  /// For a named struct that actually has a name, this is a pointer to the
  /// symbol table entry (maintained by LLVMContext) for the struct.
  /// This is null if the type is an literal struct or if it is a identified
  /// type that has an empty name.
  void *SymbolTableEntry = nullptr;

public:
  StructType(const StructType &) = delete;
  StructType &operator=(const StructType &) = delete;

  /// This creates an identified struct.
  static StructType *create(LLVMContext &Context, StringRef Name);
  static StructType *create(LLVMContext &Context);

  static StructType *create(ArrayRef<Type *> Elements, StringRef Name,
                            bool isPacked = false);
  static StructType *create(ArrayRef<Type *> Elements);
  static StructType *create(LLVMContext &Context, ArrayRef<Type *> Elements,
                            StringRef Name, bool isPacked = false);
  static StructType *create(LLVMContext &Context, ArrayRef<Type *> Elements);
  template <class... Tys>
  static std::enable_if_t<are_base_of<Type, Tys...>::value, StructType *>
  create(StringRef Name, Type *elt1, Tys *... elts) {
    assert(elt1 && "Cannot create a struct type with no elements with this");
    return create(ArrayRef<Type *>({elt1, elts...}), Name);
  }

  /// This static method is the primary way to create a literal StructType.
  static StructType *get(LLVMContext &Context, ArrayRef<Type*> Elements,
                         bool isPacked = false);

  /// Create an empty structure type.
  static StructType *get(LLVMContext &Context, bool isPacked = false);

  /// This static method is a convenience method for creating structure types by
  /// specifying the elements as arguments. Note that this method always returns
  /// a non-packed struct, and requires at least one element type.
  template <class... Tys>
  static std::enable_if_t<are_base_of<Type, Tys...>::value, StructType *>
  get(Type *elt1, Tys *... elts) {
    assert(elt1 && "Cannot create a struct type with no elements with this");
    LLVMContext &Ctx = elt1->getContext();
    return StructType::get(Ctx, ArrayRef<Type *>({elt1, elts...}));
  }

  /// Return the type with the specified name, or null if there is none by that
  /// name.
  static StructType *getTypeByName(LLVMContext &C, StringRef Name);

  bool isPacked() const { return (getSubclassData() & SCDB_Packed) != 0; }

  /// Return true if this type is uniqued by structural equivalence, false if it
  /// is a struct definition.
  bool isLiteral() const { return (getSubclassData() & SCDB_IsLiteral) != 0; }

  /// Return true if this is a type with an identity that has no body specified
  /// yet. These prints as 'opaque' in .ll files.
  bool isOpaque() const { return (getSubclassData() & SCDB_HasBody) == 0; }

  /// isSized - Return true if this is a sized type.
  bool isSized(SmallPtrSetImpl<Type *> *Visited = nullptr) const;

  /// Returns true if this struct contains a scalable vector.
  bool
  containsScalableVectorType(SmallPtrSetImpl<Type *> *Visited = nullptr) const;

  /// Returns true if this struct contains homogeneous scalable vector types.
  /// Note that the definition of homogeneous scalable vector type is not
  /// recursive here. That means the following structure will return false
  /// when calling this function.
  /// {{<vscale x 2 x i32>, <vscale x 4 x i64>},
  ///  {<vscale x 2 x i32>, <vscale x 4 x i64>}}
  bool containsHomogeneousScalableVectorTypes() const;

  /// Return true if this is a named struct that has a non-empty name.
  bool hasName() const { return SymbolTableEntry != nullptr; }

  /// Return the name for this struct type if it has an identity.
  /// This may return an empty string for an unnamed struct type.  Do not call
  /// this on an literal type.
  StringRef getName() const;

  /// Change the name of this type to the specified name, or to a name with a
  /// suffix if there is a collision. Do not call this on an literal type.
  void setName(StringRef Name);

  /// Specify a body for an opaque identified type.
  void setBody(ArrayRef<Type*> Elements, bool isPacked = false);

  template <typename... Tys>
  std::enable_if_t<are_base_of<Type, Tys...>::value, void>
  setBody(Type *elt1, Tys *... elts) {
    assert(elt1 && "Cannot create a struct type with no elements with this");
    setBody(ArrayRef<Type *>({elt1, elts...}));
  }

  /// Return true if the specified type is valid as a element type.
  static bool isValidElementType(Type *ElemTy);

  // Iterator access to the elements.
  using element_iterator = Type::subtype_iterator;

  element_iterator element_begin() const { return ContainedTys; }
  element_iterator element_end() const { return &ContainedTys[NumContainedTys];}
  ArrayRef<Type *> elements() const {
    return ArrayRef(element_begin(), element_end());
  }

  /// Return true if this is layout identical to the specified struct.
  bool isLayoutIdentical(StructType *Other) const;

  /// Random access to the elements
  unsigned getNumElements() const { return NumContainedTys; }
  Type *getElementType(unsigned N) const {
    assert(N < NumContainedTys && "Element number out of range!");
    return ContainedTys[N];
  }
  /// Given an index value into the type, return the type of the element.
  Type *getTypeAtIndex(const Value *V) const;
  Type *getTypeAtIndex(unsigned N) const { return getElementType(N); }
  bool indexValid(const Value *V) const;
  bool indexValid(unsigned Idx) const { return Idx < getNumElements(); }

  /// Methods for support type inquiry through isa, cast, and dyn_cast.
  static bool classof(const Type *T) {
    return T->getTypeID() == StructTyID;
  }
};

StringRef Type::getStructName() const {
  return cast<StructType>(this)->getName();
}

unsigned Type::getStructNumElements() const {
  return cast<StructType>(this)->getNumElements();
}

Type *Type::getStructElementType(unsigned N) const {
  return cast<StructType>(this)->getElementType(N);
}

/// Class to represent array types.
class ArrayType : public Type {
  /// The element type of the array.
  Type *ContainedType;
  /// Number of elements in the array.
  uint64_t NumElements;

  ArrayType(Type *ElType, uint64_t NumEl);

public:
  ArrayType(const ArrayType &) = delete;
  ArrayType &operator=(const ArrayType &) = delete;

  uint64_t getNumElements() const { return NumElements; }
  Type *getElementType() const { return ContainedType; }

  /// This static method is the primary way to construct an ArrayType
  static ArrayType *get(Type *ElementType, uint64_t NumElements);

  /// Return true if the specified type is valid as a element type.
  static bool isValidElementType(Type *ElemTy);

  /// Methods for support type inquiry through isa, cast, and dyn_cast.
  static bool classof(const Type *T) {
    return T->getTypeID() == ArrayTyID;
  }
};

uint64_t Type::getArrayNumElements() const {
  return cast<ArrayType>(this)->getNumElements();
}

/// Base class of all SIMD vector types
class VectorType : public Type {
  /// A fully specified VectorType is of the form <vscale x n x Ty>. 'n' is the
  /// minimum number of elements of type Ty contained within the vector, and
  /// 'vscale x' indicates that the total element count is an integer multiple
  /// of 'n', where the multiple is either guaranteed to be one, or is
  /// statically unknown at compile time.
  ///
  /// If the multiple is known to be 1, then the extra term is discarded in
  /// textual IR:
  ///
  /// <4 x i32>          - a vector containing 4 i32s
  /// <vscale x 4 x i32> - a vector containing an unknown integer multiple
  ///                      of 4 i32s

  /// The element type of the vector.
  Type *ContainedType;

protected:
  /// The element quantity of this vector. The meaning of this value depends
  /// on the type of vector:
  /// - For FixedVectorType = <ElementQuantity x ty>, there are
  ///   exactly ElementQuantity elements in this vector.
  /// - For ScalableVectorType = <vscale x ElementQuantity x ty>,
  ///   there are vscale * ElementQuantity elements in this vector, where
  ///   vscale is a runtime-constant integer greater than 0.
  const unsigned ElementQuantity;

  VectorType(Type *ElType, unsigned EQ, Type::TypeID TID);

public:
  VectorType(const VectorType &) = delete;
  VectorType &operator=(const VectorType &) = delete;

  Type *getElementType() const { return ContainedType; }

  /// This static method is the primary way to construct an VectorType.
  static VectorType *get(Type *ElementType, ElementCount EC);

  static VectorType *get(Type *ElementType, unsigned NumElements,
                         bool Scalable) {
    return VectorType::get(ElementType,
                           ElementCount::get(NumElements, Scalable));
  }

  static VectorType *get(Type *ElementType, const VectorType *Other) {
    return VectorType::get(ElementType, Other->getElementCount());
  }

  /// This static method gets a VectorType with the same number of elements as
  /// the input type, and the element type is an integer type of the same width
  /// as the input element type.
  static VectorType *getInteger(VectorType *VTy) {
    unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
    assert(EltBits && "Element size must be of a non-zero size");
    Type *EltTy = IntegerType::get(VTy->getContext(), EltBits);
    return VectorType::get(EltTy, VTy->getElementCount());
  }

  /// This static method is like getInteger except that the element types are
  /// twice as wide as the elements in the input type.
  static VectorType *getExtendedElementVectorType(VectorType *VTy) {
    assert(VTy->isIntOrIntVectorTy() && "VTy expected to be a vector of ints.");
    auto *EltTy = cast<IntegerType>(VTy->getElementType());
    return VectorType::get(EltTy->getExtendedType(), VTy->getElementCount());
  }

  // This static method gets a VectorType with the same number of elements as
  // the input type, and the element type is an integer or float type which
  // is half as wide as the elements in the input type.
  static VectorType *getTruncatedElementVectorType(VectorType *VTy) {
    Type *EltTy;
    if (VTy->getElementType()->isFloatingPointTy()) {
      switch(VTy->getElementType()->getTypeID()) {
      case DoubleTyID:
        EltTy = Type::getFloatTy(VTy->getContext());
        break;
      case FloatTyID:
        EltTy = Type::getHalfTy(VTy->getContext());
        break;
      default:
        llvm_unreachable("Cannot create narrower fp vector element type");
      }
    } else {
      unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
      assert((EltBits & 1) == 0 &&
             "Cannot truncate vector element with odd bit-width");
      EltTy = IntegerType::get(VTy->getContext(), EltBits / 2);
    }
    return VectorType::get(EltTy, VTy->getElementCount());
  }

  // This static method returns a VectorType with a smaller number of elements
  // of a larger type than the input element type. For example, a <16 x i8>
  // subdivided twice would return <4 x i32>
  static VectorType *getSubdividedVectorType(VectorType *VTy, int NumSubdivs) {
    for (int i = 0; i < NumSubdivs; ++i) {
      VTy = VectorType::getDoubleElementsVectorType(VTy);
      VTy = VectorType::getTruncatedElementVectorType(VTy);
    }
    return VTy;
  }

  /// This static method returns a VectorType with half as many elements as the
  /// input type and the same element type.
  static VectorType *getHalfElementsVectorType(VectorType *VTy) {
    auto EltCnt = VTy->getElementCount();
    assert(EltCnt.isKnownEven() &&
           "Cannot halve vector with odd number of elements.");
    return VectorType::get(VTy->getElementType(),
                           EltCnt.divideCoefficientBy(2));
  }

  /// This static method returns a VectorType with twice as many elements as the
  /// input type and the same element type.
  static VectorType *getDoubleElementsVectorType(VectorType *VTy) {
    auto EltCnt = VTy->getElementCount();
    assert((EltCnt.getKnownMinValue() * 2ull) <= UINT_MAX &&
           "Too many elements in vector");
    return VectorType::get(VTy->getElementType(), EltCnt * 2);
  }

  /// Return true if the specified type is valid as a element type.
  static bool isValidElementType(Type *ElemTy);

  /// Return an ElementCount instance to represent the (possibly scalable)
  /// number of elements in the vector.
  inline ElementCount getElementCount() const;

  /// Methods for support type inquiry through isa, cast, and dyn_cast.
  static bool classof(const Type *T) {
    return T->getTypeID() == FixedVectorTyID ||
           T->getTypeID() == ScalableVectorTyID;
  }
};

/// Class to represent fixed width SIMD vectors
class FixedVectorType : public VectorType {
protected:
  FixedVectorType(Type *ElTy, unsigned NumElts)
      : VectorType(ElTy, NumElts, FixedVectorTyID) {}

public:
  static FixedVectorType *get(Type *ElementType, unsigned NumElts);

  static FixedVectorType *get(Type *ElementType, const FixedVectorType *FVTy) {
    return get(ElementType, FVTy->getNumElements());
  }

  static FixedVectorType *getInteger(FixedVectorType *VTy) {
    return cast<FixedVectorType>(VectorType::getInteger(VTy));
  }

  static FixedVectorType *getExtendedElementVectorType(FixedVectorType *VTy) {
    return cast<FixedVectorType>(VectorType::getExtendedElementVectorType(VTy));
  }

  static FixedVectorType *getTruncatedElementVectorType(FixedVectorType *VTy) {
    return cast<FixedVectorType>(
        VectorType::getTruncatedElementVectorType(VTy));
  }

  static FixedVectorType *getSubdividedVectorType(FixedVectorType *VTy,
                                                  int NumSubdivs) {
    return cast<FixedVectorType>(
        VectorType::getSubdividedVectorType(VTy, NumSubdivs));
  }

  static FixedVectorType *getHalfElementsVectorType(FixedVectorType *VTy) {
    return cast<FixedVectorType>(VectorType::getHalfElementsVectorType(VTy));
  }

  static FixedVectorType *getDoubleElementsVectorType(FixedVectorType *VTy) {
    return cast<FixedVectorType>(VectorType::getDoubleElementsVectorType(VTy));
  }

  static bool classof(const Type *T) {
    return T->getTypeID() == FixedVectorTyID;
  }

  unsigned getNumElements() const { return ElementQuantity; }
};

/// Class to represent scalable SIMD vectors
class ScalableVectorType : public VectorType {
protected:
  ScalableVectorType(Type *ElTy, unsigned MinNumElts)
      : VectorType(ElTy, MinNumElts, ScalableVectorTyID) {}

public:
  static ScalableVectorType *get(Type *ElementType, unsigned MinNumElts);

  static ScalableVectorType *get(Type *ElementType,
                                 const ScalableVectorType *SVTy) {
    return get(ElementType, SVTy->getMinNumElements());
  }

  static ScalableVectorType *getInteger(ScalableVectorType *VTy) {
    return cast<ScalableVectorType>(VectorType::getInteger(VTy));
  }

  static ScalableVectorType *
  getExtendedElementVectorType(ScalableVectorType *VTy) {
    return cast<ScalableVectorType>(
        VectorType::getExtendedElementVectorType(VTy));
  }

  static ScalableVectorType *
  getTruncatedElementVectorType(ScalableVectorType *VTy) {
    return cast<ScalableVectorType>(
        VectorType::getTruncatedElementVectorType(VTy));
  }

  static ScalableVectorType *getSubdividedVectorType(ScalableVectorType *VTy,
                                                     int NumSubdivs) {
    return cast<ScalableVectorType>(
        VectorType::getSubdividedVectorType(VTy, NumSubdivs));
  }

  static ScalableVectorType *
  getHalfElementsVectorType(ScalableVectorType *VTy) {
    return cast<ScalableVectorType>(VectorType::getHalfElementsVectorType(VTy));
  }

  static ScalableVectorType *
  getDoubleElementsVectorType(ScalableVectorType *VTy) {
    return cast<ScalableVectorType>(
        VectorType::getDoubleElementsVectorType(VTy));
  }

  /// Get the minimum number of elements in this vector. The actual number of
  /// elements in the vector is an integer multiple of this value.
  uint64_t getMinNumElements() const { return ElementQuantity; }

  static bool classof(const Type *T) {
    return T->getTypeID() == ScalableVectorTyID;
  }
};

inline ElementCount VectorType::getElementCount() const {
  return ElementCount::get(ElementQuantity, isa<ScalableVectorType>(this));
}

/// Class to represent pointers.
class PointerType : public Type {
  explicit PointerType(LLVMContext &C, unsigned AddrSpace);

public:
  PointerType(const PointerType &) = delete;
  PointerType &operator=(const PointerType &) = delete;

  /// This constructs a pointer to an object of the specified type in a numbered
  /// address space.
  static PointerType *get(Type *ElementType, unsigned AddressSpace);
  /// This constructs an opaque pointer to an object in a numbered address
  /// space.
  static PointerType *get(LLVMContext &C, unsigned AddressSpace);

  /// This constructs a pointer to an object of the specified type in the
  /// default address space (address space zero).
  static PointerType *getUnqual(Type *ElementType) {
    return PointerType::get(ElementType, 0);
  }

  /// This constructs an opaque pointer to an object in the
  /// default address space (address space zero).
  static PointerType *getUnqual(LLVMContext &C) {
    return PointerType::get(C, 0);
  }

  /// This constructs a pointer type with the same pointee type as input
  /// PointerType (or opaque pointer if the input PointerType is opaque) and the
  /// given address space. This is only useful during the opaque pointer
  /// transition.
  /// TODO: remove after opaque pointer transition is complete.
  [[deprecated("Use PointerType::get() with LLVMContext argument instead")]]
  static PointerType *getWithSamePointeeType(PointerType *PT,
                                             unsigned AddressSpace) {
    return get(PT->getContext(), AddressSpace);
  }

  [[deprecated("Always returns true")]]
  bool isOpaque() const { return true; }

  /// Return true if the specified type is valid as a element type.
  static bool isValidElementType(Type *ElemTy);

  /// Return true if we can load or store from a pointer to this type.
  static bool isLoadableOrStorableType(Type *ElemTy);

  /// Return the address space of the Pointer type.
  inline unsigned getAddressSpace() const { return getSubclassData(); }

  /// Return true if either this is an opaque pointer type or if this pointee
  /// type matches Ty. Primarily used for checking if an instruction's pointer
  /// operands are valid types. Will be useless after non-opaque pointers are
  /// removed.
  [[deprecated("Always returns true")]]
  bool isOpaqueOrPointeeTypeMatches(Type *) {
    return true;
  }

  /// Return true if both pointer types have the same element type. Two opaque
  /// pointers are considered to have the same element type, while an opaque
  /// and a non-opaque pointer have different element types.
  /// TODO: Remove after opaque pointer transition is complete.
  [[deprecated("Always returns true")]]
  bool hasSameElementTypeAs(PointerType *Other) {
    return true;
  }

  /// Implement support type inquiry through isa, cast, and dyn_cast.
  static bool classof(const Type *T) {
    return T->getTypeID() == PointerTyID;
  }
};

Type *Type::getExtendedType() const {
  assert(
      isIntOrIntVectorTy() &&
      "Original type expected to be a vector of integers or a scalar integer.");
  if (auto *VTy = dyn_cast<VectorType>(this))
    return VectorType::getExtendedElementVectorType(
        const_cast<VectorType *>(VTy));
  return cast<IntegerType>(this)->getExtendedType();
}

Type *Type::getWithNewType(Type *EltTy) const {
  if (auto *VTy = dyn_cast<VectorType>(this))
    return VectorType::get(EltTy, VTy->getElementCount());
  return EltTy;
}

Type *Type::getWithNewBitWidth(unsigned NewBitWidth) const {
  assert(
      isIntOrIntVectorTy() &&
      "Original type expected to be a vector of integers or a scalar integer.");
  return getWithNewType(getIntNTy(getContext(), NewBitWidth));
}

unsigned Type::getPointerAddressSpace() const {
  return cast<PointerType>(getScalarType())->getAddressSpace();
}

/// Class to represent target extensions types, which are generally
/// unintrospectable from target-independent optimizations.
///
/// Target extension types have a string name, and optionally have type and/or
/// integer parameters. The exact meaning of any parameters is dependent on the
/// target.
class TargetExtType : public Type {
  TargetExtType(LLVMContext &C, StringRef Name, ArrayRef<Type *> Types,
                ArrayRef<unsigned> Ints);

  // These strings are ultimately owned by the context.
  StringRef Name;
  unsigned *IntParams;

public:
  TargetExtType(const TargetExtType &) = delete;
  TargetExtType &operator=(const TargetExtType &) = delete;

  /// Return a target extension type having the specified name and optional
  /// type and integer parameters.
  static TargetExtType *get(LLVMContext &Context, StringRef Name,
                            ArrayRef<Type *> Types = std::nullopt,
                            ArrayRef<unsigned> Ints = std::nullopt);

  /// Return the name for this target extension type. Two distinct target
  /// extension types may have the same name if their type or integer parameters
  /// differ.
  StringRef getName() const { return Name; }

  /// Return the type parameters for this particular target extension type. If
  /// there are no parameters, an empty array is returned.
  ArrayRef<Type *> type_params() const {
    return ArrayRef(type_param_begin(), type_param_end());
  }

  using type_param_iterator = Type::subtype_iterator;
  type_param_iterator type_param_begin() const { return ContainedTys; }
  type_param_iterator type_param_end() const {
    return &ContainedTys[NumContainedTys];
  }

  Type *getTypeParameter(unsigned i) const { return getContainedType(i); }
  unsigned getNumTypeParameters() const { return getNumContainedTypes(); }

  /// Return the integer parameters for this particular target extension type.
  /// If there are no parameters, an empty array is returned.
  ArrayRef<unsigned> int_params() const {
    return ArrayRef(IntParams, getNumIntParameters());
  }

  unsigned getIntParameter(unsigned i) const { return IntParams[i]; }
  unsigned getNumIntParameters() const { return getSubclassData(); }

  enum Property {
    /// zeroinitializer is valid for this target extension type.
    HasZeroInit = 1U << 0,
    /// This type may be used as the value type of a global variable.
    CanBeGlobal = 1U << 1,
  };

  /// Returns true if the target extension type contains the given property.
  bool hasProperty(Property Prop) const;

  /// Returns an underlying layout type for the target extension type. This
  /// type can be used to query size and alignment information, if it is
  /// appropriate (although note that the layout type may also be void). It is
  /// not legal to bitcast between this type and the layout type, however.
  Type *getLayoutType() const;

  /// Methods for support type inquiry through isa, cast, and dyn_cast.
  static bool classof(const Type *T) { return T->getTypeID() == TargetExtTyID; }
};

StringRef Type::getTargetExtName() const {
  return cast<TargetExtType>(this)->getName();
}

} // end namespace llvm

#endif // LLVM_IR_DERIVEDTYPES_H
PKjwFZ�����IR/EHPersonalities.hnu�[���//===- EHPersonalities.h - Compute EH-related information -----------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_EHPERSONALITIES_H
#define LLVM_IR_EHPERSONALITIES_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/TinyPtrVector.h"

namespace llvm {
class BasicBlock;
class Function;
class Triple;
class Value;

enum class EHPersonality {
  Unknown,
  GNU_Ada,
  GNU_C,
  GNU_C_SjLj,
  GNU_CXX,
  GNU_CXX_SjLj,
  GNU_ObjC,
  MSVC_X86SEH,
  MSVC_TableSEH,
  MSVC_CXX,
  CoreCLR,
  Rust,
  Wasm_CXX,
  XL_CXX
};

/// See if the given exception handling personality function is one
/// that we understand.  If so, return a description of it; otherwise return
/// Unknown.
EHPersonality classifyEHPersonality(const Value *Pers);

StringRef getEHPersonalityName(EHPersonality Pers);

EHPersonality getDefaultEHPersonality(const Triple &T);

/// Returns true if this personality function catches asynchronous
/// exceptions.
inline bool isAsynchronousEHPersonality(EHPersonality Pers) {
  // The two SEH personality functions can catch asynch exceptions. We assume
  // unknown personalities don't catch asynch exceptions.
  switch (Pers) {
  case EHPersonality::MSVC_X86SEH:
  case EHPersonality::MSVC_TableSEH:
    return true;
  default:
    return false;
  }
  llvm_unreachable("invalid enum");
}

/// Returns true if this is a personality function that invokes
/// handler funclets (which must return to it).
inline bool isFuncletEHPersonality(EHPersonality Pers) {
  switch (Pers) {
  case EHPersonality::MSVC_CXX:
  case EHPersonality::MSVC_X86SEH:
  case EHPersonality::MSVC_TableSEH:
  case EHPersonality::CoreCLR:
    return true;
  default:
    return false;
  }
  llvm_unreachable("invalid enum");
}

/// Returns true if this personality uses scope-style EH IR instructions:
/// catchswitch, catchpad/ret, and cleanuppad/ret.
inline bool isScopedEHPersonality(EHPersonality Pers) {
  switch (Pers) {
  case EHPersonality::MSVC_CXX:
  case EHPersonality::MSVC_X86SEH:
  case EHPersonality::MSVC_TableSEH:
  case EHPersonality::CoreCLR:
  case EHPersonality::Wasm_CXX:
    return true;
  default:
    return false;
  }
  llvm_unreachable("invalid enum");
}

/// Return true if this personality may be safely removed if there
/// are no invoke instructions remaining in the current function.
inline bool isNoOpWithoutInvoke(EHPersonality Pers) {
  switch (Pers) {
  case EHPersonality::Unknown:
    return false;
  // All known personalities currently have this behavior
  default:
    return true;
  }
  llvm_unreachable("invalid enum");
}

bool canSimplifyInvokeNoUnwind(const Function *F);

typedef TinyPtrVector<BasicBlock *> ColorVector;

/// If an EH funclet personality is in use (see isFuncletEHPersonality),
/// this will recompute which blocks are in which funclet. It is possible that
/// some blocks are in multiple funclets. Consider this analysis to be
/// expensive.
DenseMap<BasicBlock *, ColorVector> colorEHFunclets(Function &F);

} // end namespace llvm

#endif // LLVM_IR_EHPERSONALITIES_H
PKjwFZ
��k��IR/AssemblyAnnotationWriter.hnu�[���//===-- AssemblyAnnotationWriter.h - Annotation .ll files -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Clients of the assembly writer can use this interface to add their own
// special-purpose annotations to LLVM assembly language printouts.  Note that
// the assembly parser won't be able to parse these, in general, so
// implementations are advised to print stuff as LLVM comments.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_ASSEMBLYANNOTATIONWRITER_H
#define LLVM_IR_ASSEMBLYANNOTATIONWRITER_H

namespace llvm {

class Function;
class BasicBlock;
class Instruction;
class Value;
class formatted_raw_ostream;

class AssemblyAnnotationWriter {
public:
  virtual ~AssemblyAnnotationWriter();

  /// emitFunctionAnnot - This may be implemented to emit a string right before
  /// the start of a function.
  virtual void emitFunctionAnnot(const Function *,
                                 formatted_raw_ostream &) {}

  /// emitBasicBlockStartAnnot - This may be implemented to emit a string right
  /// after the basic block label, but before the first instruction in the
  /// block.
  virtual void emitBasicBlockStartAnnot(const BasicBlock *,
                                        formatted_raw_ostream &) {
  }

  /// emitBasicBlockEndAnnot - This may be implemented to emit a string right
  /// after the basic block.
  virtual void emitBasicBlockEndAnnot(const BasicBlock *,
                                      formatted_raw_ostream &) {
  }

  /// emitInstructionAnnot - This may be implemented to emit a string right
  /// before an instruction is emitted.
  virtual void emitInstructionAnnot(const Instruction *,
                                    formatted_raw_ostream &) {}

  /// printInfoComment - This may be implemented to emit a comment to the
  /// right of an instruction or global value.
  virtual void printInfoComment(const Value &, formatted_raw_ostream &) {}
};

} // End llvm namespace

#endif
PKjwFZ��{{IR/IntrinsicsARM.tdnu�[���//===- IntrinsicsARM.td - Defines ARM intrinsics -----------*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines all of the ARM-specific intrinsics.
//
//===----------------------------------------------------------------------===//


//===----------------------------------------------------------------------===//
// TLS

let TargetPrefix = "arm" in {  // All intrinsics start with "llvm.arm.".

// A space-consuming intrinsic primarily for testing ARMConstantIslands. The
// first argument is the number of bytes this "instruction" takes up, the second
// and return value are essentially chains, used to force ordering during ISel.
def int_arm_space : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [ImmArg<ArgIndex<0>>]>;

// 16-bit multiplications
def int_arm_smulbb : ClangBuiltin<"__builtin_arm_smulbb">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
                          [IntrNoMem]>;
def int_arm_smulbt : ClangBuiltin<"__builtin_arm_smulbt">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
                          [IntrNoMem]>;
def int_arm_smultb : ClangBuiltin<"__builtin_arm_smultb">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
                          [IntrNoMem]>;
def int_arm_smultt : ClangBuiltin<"__builtin_arm_smultt">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
                          [IntrNoMem]>;
def int_arm_smulwb : ClangBuiltin<"__builtin_arm_smulwb">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
                          [IntrNoMem]>;
def int_arm_smulwt : ClangBuiltin<"__builtin_arm_smulwt">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
                          [IntrNoMem]>;

//===----------------------------------------------------------------------===//
// Saturating Arithmetic

def int_arm_qadd : ClangBuiltin<"__builtin_arm_qadd">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
                          [Commutative, IntrNoMem]>;
def int_arm_qsub : ClangBuiltin<"__builtin_arm_qsub">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
                          [IntrNoMem]>;
def int_arm_ssat : ClangBuiltin<"__builtin_arm_ssat">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
                          [IntrNoMem]>;
def int_arm_usat : ClangBuiltin<"__builtin_arm_usat">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
                          [IntrNoMem]>;

// Accumulating multiplications
def int_arm_smlabb : ClangBuiltin<"__builtin_arm_smlabb">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty,
                           llvm_i32_ty],
                          [IntrNoMem]>;
def int_arm_smlabt : ClangBuiltin<"__builtin_arm_smlabt">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty,
                           llvm_i32_ty],
                          [IntrNoMem]>;
def int_arm_smlatb : ClangBuiltin<"__builtin_arm_smlatb">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty,
                           llvm_i32_ty],
                          [IntrNoMem]>;
def int_arm_smlatt : ClangBuiltin<"__builtin_arm_smlatt">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty,
                           llvm_i32_ty],
                          [IntrNoMem]>;
def int_arm_smlawb : ClangBuiltin<"__builtin_arm_smlawb">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty,
                           llvm_i32_ty],
                          [IntrNoMem]>;
def int_arm_smlawt : ClangBuiltin<"__builtin_arm_smlawt">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty,
                           llvm_i32_ty],
                          [IntrNoMem]>;

// Parallel 16-bit saturation
def int_arm_ssat16 : ClangBuiltin<"__builtin_arm_ssat16">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
                          [IntrNoMem]>;
def int_arm_usat16 : ClangBuiltin<"__builtin_arm_usat16">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
                          [IntrNoMem]>;

// Packing and unpacking
def int_arm_sxtab16 : ClangBuiltin<"__builtin_arm_sxtab16">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
                          [IntrNoMem]>;
def int_arm_sxtb16 : ClangBuiltin<"__builtin_arm_sxtb16">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>;
def int_arm_uxtab16 : ClangBuiltin<"__builtin_arm_uxtab16">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
                          [IntrNoMem]>;
def int_arm_uxtb16 : ClangBuiltin<"__builtin_arm_uxtb16">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>;

// Parallel selection, reads the GE flags.
def int_arm_sel : ClangBuiltin<"__builtin_arm_sel">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
                          [IntrReadMem]>;

// Parallel 8-bit addition and subtraction
def int_arm_qadd8  : ClangBuiltin<"__builtin_arm_qadd8">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
                          [IntrNoMem]>;
def int_arm_qsub8  : ClangBuiltin<"__builtin_arm_qsub8">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
                          [IntrNoMem]>;
// Writes to the GE bits.
def int_arm_sadd8  : ClangBuiltin<"__builtin_arm_sadd8">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
def int_arm_shadd8  : ClangBuiltin<"__builtin_arm_shadd8">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
                          [IntrNoMem]>;
def int_arm_shsub8  : ClangBuiltin<"__builtin_arm_shsub8">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
                          [IntrNoMem]>;
// Writes to the GE bits.
def int_arm_ssub8  : ClangBuiltin<"__builtin_arm_ssub8">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
// Writes to the GE bits.
def int_arm_uadd8  : ClangBuiltin<"__builtin_arm_uadd8">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
def int_arm_uhadd8  : ClangBuiltin<"__builtin_arm_uhadd8">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
                          [IntrNoMem]>;
def int_arm_uhsub8  : ClangBuiltin<"__builtin_arm_uhsub8">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
                          [IntrNoMem]>;
def int_arm_uqadd8  : ClangBuiltin<"__builtin_arm_uqadd8">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
                          [IntrNoMem]>;
def int_arm_uqsub8  : ClangBuiltin<"__builtin_arm_uqsub8">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
                          [IntrNoMem]>;
// Writes to the GE bits.
def int_arm_usub8  : ClangBuiltin<"__builtin_arm_usub8">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;

// Sum of 8-bit absolute differences
def int_arm_usad8  : ClangBuiltin<"__builtin_arm_usad8">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
                          [IntrNoMem]>;
def int_arm_usada8  : ClangBuiltin<"__builtin_arm_usada8">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty,
                           llvm_i32_ty],
                          [IntrNoMem]>;

// Parallel 16-bit addition and subtraction
def int_arm_qadd16  : ClangBuiltin<"__builtin_arm_qadd16">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
                          [IntrNoMem]>;
def int_arm_qasx  : ClangBuiltin<"__builtin_arm_qasx">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
                          [IntrNoMem]>;
def int_arm_qsax  : ClangBuiltin<"__builtin_arm_qsax">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
                          [IntrNoMem]>;
def int_arm_qsub16  : ClangBuiltin<"__builtin_arm_qsub16">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
                          [IntrNoMem]>;
// Writes to the GE bits.
def int_arm_sadd16  : ClangBuiltin<"__builtin_arm_sadd16">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
// Writes to the GE bits.
def int_arm_sasx  : ClangBuiltin<"__builtin_arm_sasx">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
def int_arm_shadd16  : ClangBuiltin<"__builtin_arm_shadd16">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
                          [IntrNoMem]>;
def int_arm_shasx  : ClangBuiltin<"__builtin_arm_shasx">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
                          [IntrNoMem]>;
def int_arm_shsax  : ClangBuiltin<"__builtin_arm_shsax">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
                          [IntrNoMem]>;
def int_arm_shsub16  : ClangBuiltin<"__builtin_arm_shsub16">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
                          [IntrNoMem]>;
// Writes to the GE bits.
def int_arm_ssax  : ClangBuiltin<"__builtin_arm_ssax">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
// Writes to the GE bits.
def int_arm_ssub16  : ClangBuiltin<"__builtin_arm_ssub16">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
// Writes to the GE bits.
def int_arm_uadd16  : ClangBuiltin<"__builtin_arm_uadd16">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
// Writes to the GE bits.
def int_arm_uasx  : ClangBuiltin<"__builtin_arm_uasx">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
def int_arm_uhadd16  : ClangBuiltin<"__builtin_arm_uhadd16">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
                          [IntrNoMem]>;
def int_arm_uhasx  : ClangBuiltin<"__builtin_arm_uhasx">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
                          [IntrNoMem]>;
def int_arm_uhsax  : ClangBuiltin<"__builtin_arm_uhsax">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
                          [IntrNoMem]>;
def int_arm_uhsub16  : ClangBuiltin<"__builtin_arm_uhsub16">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
                          [IntrNoMem]>;
def int_arm_uqadd16  : ClangBuiltin<"__builtin_arm_uqadd16">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
                          [IntrNoMem]>;
def int_arm_uqasx  : ClangBuiltin<"__builtin_arm_uqasx">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
                          [IntrNoMem]>;
def int_arm_uqsax  : ClangBuiltin<"__builtin_arm_uqsax">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
                          [IntrNoMem]>;
def int_arm_uqsub16  : ClangBuiltin<"__builtin_arm_uqsub16">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
                          [IntrNoMem]>;
// Writes to the GE bits.
def int_arm_usax  : ClangBuiltin<"__builtin_arm_usax">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
// Writes to the GE bits.
def int_arm_usub16  : ClangBuiltin<"__builtin_arm_usub16">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;

// Parallel 16-bit multiplication
def int_arm_smlad : ClangBuiltin<"__builtin_arm_smlad">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty,
                           llvm_i32_ty],
                          [IntrNoMem]>;
def int_arm_smladx : ClangBuiltin<"__builtin_arm_smladx">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty,
                           llvm_i32_ty],
                          [IntrNoMem]>;
def int_arm_smlald : ClangBuiltin<"__builtin_arm_smlald">,
    DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_i32_ty, llvm_i32_ty,
                           llvm_i64_ty],
                          [IntrNoMem]>;
def int_arm_smlaldx : ClangBuiltin<"__builtin_arm_smlaldx">,
    DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_i32_ty, llvm_i32_ty,
                           llvm_i64_ty],
                          [IntrNoMem]>;
def int_arm_smlsd : ClangBuiltin<"__builtin_arm_smlsd">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty,
                           llvm_i32_ty],
                          [IntrNoMem]>;
def int_arm_smlsdx : ClangBuiltin<"__builtin_arm_smlsdx">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty,
                           llvm_i32_ty],
                          [IntrNoMem]>;
def int_arm_smlsld : ClangBuiltin<"__builtin_arm_smlsld">,
    DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_i32_ty, llvm_i32_ty,
                           llvm_i64_ty],
                          [IntrNoMem]>;
def int_arm_smlsldx : ClangBuiltin<"__builtin_arm_smlsldx">,
    DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_i32_ty, llvm_i32_ty,
                           llvm_i64_ty],
                          [IntrNoMem]>;
def int_arm_smuad : ClangBuiltin<"__builtin_arm_smuad">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
                          [IntrNoMem]>;
def int_arm_smuadx : ClangBuiltin<"__builtin_arm_smuadx">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
                          [IntrNoMem]>;
def int_arm_smusd : ClangBuiltin<"__builtin_arm_smusd">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
                          [IntrNoMem]>;
def int_arm_smusdx : ClangBuiltin<"__builtin_arm_smusdx">,
    DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
                          [IntrNoMem]>;


//===----------------------------------------------------------------------===//
// Load, Store and Clear exclusive

// TODO: Add applicable default attributes.
def int_arm_ldrex : Intrinsic<[llvm_i32_ty], [llvm_anyptr_ty]>;
def int_arm_strex : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_anyptr_ty]>;

def int_arm_ldaex : Intrinsic<[llvm_i32_ty], [llvm_anyptr_ty]>;
def int_arm_stlex : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_anyptr_ty]>;

def int_arm_clrex : Intrinsic<[]>;

def int_arm_strexd : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty,
    llvm_ptr_ty]>;
def int_arm_ldrexd : Intrinsic<[llvm_i32_ty, llvm_i32_ty], [llvm_ptr_ty]>;

def int_arm_stlexd : Intrinsic<[llvm_i32_ty],
                               [llvm_i32_ty, llvm_i32_ty, llvm_ptr_ty]>;
def int_arm_ldaexd : Intrinsic<[llvm_i32_ty, llvm_i32_ty], [llvm_ptr_ty]>;

//===----------------------------------------------------------------------===//
// Data barrier instructions

// TODO: Add applicable default attributes.
def int_arm_dmb : ClangBuiltin<"__builtin_arm_dmb">, MSBuiltin<"__dmb">,
                  Intrinsic<[], [llvm_i32_ty]>;
def int_arm_dsb : ClangBuiltin<"__builtin_arm_dsb">, MSBuiltin<"__dsb">,
                  Intrinsic<[], [llvm_i32_ty]>;
def int_arm_isb : ClangBuiltin<"__builtin_arm_isb">, MSBuiltin<"__isb">,
                  Intrinsic<[], [llvm_i32_ty]>;

//===----------------------------------------------------------------------===//
// VFP

def int_arm_get_fpscr : ClangBuiltin<"__builtin_arm_get_fpscr">,
                       DefaultAttrsIntrinsic<[llvm_i32_ty], [], []>;
def int_arm_set_fpscr : ClangBuiltin<"__builtin_arm_set_fpscr">,
                       DefaultAttrsIntrinsic<[], [llvm_i32_ty], []>;
def int_arm_vcvtr : DefaultAttrsIntrinsic<[llvm_float_ty],
                                          [llvm_anyfloat_ty], [IntrNoMem]>;
def int_arm_vcvtru : DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_anyfloat_ty],
                                           [IntrNoMem]>;

//===----------------------------------------------------------------------===//
// Coprocessor

// TODO: Add applicable default attributes.
def int_arm_ldc : ClangBuiltin<"__builtin_arm_ldc">,
   Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_ptr_ty], [ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>]>;
def int_arm_ldcl : ClangBuiltin<"__builtin_arm_ldcl">,
   Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_ptr_ty], [ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>]>;
def int_arm_ldc2 : ClangBuiltin<"__builtin_arm_ldc2">,
   Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_ptr_ty], [ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>]>;
def int_arm_ldc2l : ClangBuiltin<"__builtin_arm_ldc2l">,
   Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_ptr_ty], [ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>]>;

def int_arm_stc : ClangBuiltin<"__builtin_arm_stc">,
   Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_ptr_ty], [ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>]>;
def int_arm_stcl : ClangBuiltin<"__builtin_arm_stcl">,
   Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_ptr_ty], [ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>]>;
def int_arm_stc2 : ClangBuiltin<"__builtin_arm_stc2">,
   Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_ptr_ty], [ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>]>;
def int_arm_stc2l : ClangBuiltin<"__builtin_arm_stc2l">,
   Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_ptr_ty], [ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>]>;

// Move to coprocessor
def int_arm_mcr : ClangBuiltin<"__builtin_arm_mcr">,
   Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                  llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>]>;
def int_arm_mcr2 : ClangBuiltin<"__builtin_arm_mcr2">,
   Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                  llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>]>;

// Move from coprocessor
def int_arm_mrc : ClangBuiltin<"__builtin_arm_mrc">,
                  MSBuiltin<"_MoveFromCoprocessor">,
   Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                             llvm_i32_ty, llvm_i32_ty], [ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>]>;
def int_arm_mrc2 : ClangBuiltin<"__builtin_arm_mrc2">,
                   MSBuiltin<"_MoveFromCoprocessor2">,
   Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                             llvm_i32_ty, llvm_i32_ty], [ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>]>;

// Coprocessor data processing
def int_arm_cdp : ClangBuiltin<"__builtin_arm_cdp">,
   Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                  llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>]>;
def int_arm_cdp2 : ClangBuiltin<"__builtin_arm_cdp2">,
   Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                  llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>, ImmArg<ArgIndex<5>>]>;

// Move from two registers to coprocessor
def int_arm_mcrr : Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                                  llvm_i32_ty, llvm_i32_ty], [ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<4>>]>;
def int_arm_mcrr2 : Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                                   llvm_i32_ty, llvm_i32_ty], [ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<4>>]>;

def int_arm_mrrc : Intrinsic<[llvm_i32_ty, llvm_i32_ty], [llvm_i32_ty,
                              llvm_i32_ty, llvm_i32_ty], [ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>]>;
def int_arm_mrrc2 : Intrinsic<[llvm_i32_ty, llvm_i32_ty], [llvm_i32_ty,
                               llvm_i32_ty, llvm_i32_ty], [ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>]>;

//===----------------------------------------------------------------------===//
// CRC32

def int_arm_crc32b : DefaultAttrsIntrinsic<
    [llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
def int_arm_crc32cb : DefaultAttrsIntrinsic<
    [llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
def int_arm_crc32h  : DefaultAttrsIntrinsic<
    [llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
def int_arm_crc32ch : DefaultAttrsIntrinsic<
    [llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
def int_arm_crc32w  : DefaultAttrsIntrinsic<
    [llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
def int_arm_crc32cw : DefaultAttrsIntrinsic<
    [llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;

//===----------------------------------------------------------------------===//
// CMSE

// TODO: Add applicable default attributes.
def int_arm_cmse_tt : ClangBuiltin<"__builtin_arm_cmse_TT">,
    Intrinsic<[llvm_i32_ty], [llvm_ptr_ty], [IntrNoMem]>;
def int_arm_cmse_ttt : ClangBuiltin<"__builtin_arm_cmse_TTT">,
    Intrinsic<[llvm_i32_ty], [llvm_ptr_ty], [IntrNoMem]>;
def int_arm_cmse_tta : ClangBuiltin<"__builtin_arm_cmse_TTA">,
    Intrinsic<[llvm_i32_ty], [llvm_ptr_ty], [IntrNoMem]>;
def int_arm_cmse_ttat : ClangBuiltin<"__builtin_arm_cmse_TTAT">,
    Intrinsic<[llvm_i32_ty], [llvm_ptr_ty], [IntrNoMem]>;

//===----------------------------------------------------------------------===//
// HINT

// TODO: Add applicable default attributes.
def int_arm_hint : Intrinsic<[], [llvm_i32_ty]>;
def int_arm_dbg : Intrinsic<[], [llvm_i32_ty]>;

//===----------------------------------------------------------------------===//
// UND (reserved undefined sequence)

// TODO: Add applicable default attributes.
def int_arm_undefined : Intrinsic<[], [llvm_i32_ty]>;

//===----------------------------------------------------------------------===//
// Advanced SIMD (NEON)

// The following classes do not correspond directly to GCC builtins.
class Neon_1Arg_Intrinsic
  : DefaultAttrsIntrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>], [IntrNoMem]>;
class Neon_1Arg_Narrow_Intrinsic
  : DefaultAttrsIntrinsic<[llvm_anyvector_ty], [LLVMExtendedType<0>],
                          [IntrNoMem]>;
class Neon_2Arg_Intrinsic
  : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                          [LLVMMatchType<0>, LLVMMatchType<0>], [IntrNoMem]>;
class Neon_2Arg_Narrow_Intrinsic
  : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                          [LLVMExtendedType<0>, LLVMExtendedType<0>],
                          [IntrNoMem]>;
class Neon_2Arg_Long_Intrinsic
  : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                          [LLVMTruncatedType<0>, LLVMTruncatedType<0>],
                          [IntrNoMem]>;
class Neon_3Arg_Intrinsic
  : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                          [LLVMMatchType<0>, LLVMMatchType<0>,
                           LLVMMatchType<0>],
                          [IntrNoMem]>;
class Neon_3Arg_Long_Intrinsic
  : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                          [LLVMMatchType<0>, LLVMTruncatedType<0>,
                           LLVMTruncatedType<0>],
                          [IntrNoMem]>;

class Neon_1FloatArg_Intrinsic
  : DefaultAttrsIntrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]>;

class Neon_CvtFxToFP_Intrinsic
  : DefaultAttrsIntrinsic<[llvm_anyfloat_ty], [llvm_anyint_ty, llvm_i32_ty],
                          [IntrNoMem]>;
class Neon_CvtFPToFx_Intrinsic
  : DefaultAttrsIntrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty, llvm_i32_ty],
                          [IntrNoMem]>;
class Neon_CvtFPtoInt_1Arg_Intrinsic
  : DefaultAttrsIntrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty],
                          [IntrNoMem]>;

class Neon_Compare_Intrinsic
  : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                          [llvm_anyvector_ty, LLVMMatchType<1>], [IntrNoMem]>;

// The table operands for VTBL and VTBX consist of 1 to 4 v8i8 vectors.
// Besides the table, VTBL has one other v8i8 argument and VTBX has two.
// Overall, the classes range from 2 to 6 v8i8 arguments.
class Neon_Tbl2Arg_Intrinsic
  : DefaultAttrsIntrinsic<[llvm_v8i8_ty],
                          [llvm_v8i8_ty, llvm_v8i8_ty], [IntrNoMem]>;
class Neon_Tbl3Arg_Intrinsic
  : DefaultAttrsIntrinsic<[llvm_v8i8_ty],
                          [llvm_v8i8_ty, llvm_v8i8_ty, llvm_v8i8_ty],
                          [IntrNoMem]>;
class Neon_Tbl4Arg_Intrinsic
  : DefaultAttrsIntrinsic<[llvm_v8i8_ty],
                          [llvm_v8i8_ty, llvm_v8i8_ty, llvm_v8i8_ty,
                           llvm_v8i8_ty],
                          [IntrNoMem]>;
class Neon_Tbl5Arg_Intrinsic
  : DefaultAttrsIntrinsic<[llvm_v8i8_ty],
                          [llvm_v8i8_ty, llvm_v8i8_ty, llvm_v8i8_ty,
                           llvm_v8i8_ty, llvm_v8i8_ty],
                          [IntrNoMem]>;
class Neon_Tbl6Arg_Intrinsic
  : DefaultAttrsIntrinsic<[llvm_v8i8_ty],
                          [llvm_v8i8_ty, llvm_v8i8_ty, llvm_v8i8_ty,
                           llvm_v8i8_ty, llvm_v8i8_ty, llvm_v8i8_ty],
                          [IntrNoMem]>;

// Arithmetic ops

let IntrProperties = [IntrNoMem, Commutative] in {

  // Vector Add.
  def int_arm_neon_vhadds : Neon_2Arg_Intrinsic;
  def int_arm_neon_vhaddu : Neon_2Arg_Intrinsic;
  def int_arm_neon_vrhadds : Neon_2Arg_Intrinsic;
  def int_arm_neon_vrhaddu : Neon_2Arg_Intrinsic;
  def int_arm_neon_vraddhn : Neon_2Arg_Narrow_Intrinsic;

  // Vector Multiply.
  def int_arm_neon_vmulp : Neon_2Arg_Intrinsic;
  def int_arm_neon_vqdmulh : Neon_2Arg_Intrinsic;
  def int_arm_neon_vqrdmulh : Neon_2Arg_Intrinsic;
  def int_arm_neon_vmulls : Neon_2Arg_Long_Intrinsic;
  def int_arm_neon_vmullu : Neon_2Arg_Long_Intrinsic;
  def int_arm_neon_vmullp : Neon_2Arg_Long_Intrinsic;
  def int_arm_neon_vqdmull : Neon_2Arg_Long_Intrinsic;

  // Vector Maximum.
  def int_arm_neon_vmaxs : Neon_2Arg_Intrinsic;
  def int_arm_neon_vmaxu : Neon_2Arg_Intrinsic;
  def int_arm_neon_vmaxnm : Neon_2Arg_Intrinsic;

  // Vector Minimum.
  def int_arm_neon_vmins : Neon_2Arg_Intrinsic;
  def int_arm_neon_vminu : Neon_2Arg_Intrinsic;
  def int_arm_neon_vminnm : Neon_2Arg_Intrinsic;

  // Vector Reciprocal Step.
  def int_arm_neon_vrecps : Neon_2Arg_Intrinsic;

  // Vector Reciprocal Square Root Step.
  def int_arm_neon_vrsqrts : Neon_2Arg_Intrinsic;
}

// Vector Subtract.
def int_arm_neon_vhsubs : Neon_2Arg_Intrinsic;
def int_arm_neon_vhsubu : Neon_2Arg_Intrinsic;
def int_arm_neon_vrsubhn : Neon_2Arg_Narrow_Intrinsic;

// Vector Absolute Compare.
def int_arm_neon_vacge : Neon_Compare_Intrinsic;
def int_arm_neon_vacgt : Neon_Compare_Intrinsic;

// Vector Absolute Differences.
def int_arm_neon_vabds : Neon_2Arg_Intrinsic;
def int_arm_neon_vabdu : Neon_2Arg_Intrinsic;

// Vector Pairwise Add.
def int_arm_neon_vpadd : Neon_2Arg_Intrinsic;

// Vector Pairwise Add Long.
// Note: This is different than the other "long" NEON intrinsics because
// the result vector has half as many elements as the source vector.
// The source and destination vector types must be specified separately.
def int_arm_neon_vpaddls : DefaultAttrsIntrinsic<
    [llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>;
def int_arm_neon_vpaddlu : DefaultAttrsIntrinsic<
    [llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>;

// Vector Pairwise Add and Accumulate Long.
// Note: This is similar to vpaddl but the destination vector also appears
// as the first argument.
def int_arm_neon_vpadals : DefaultAttrsIntrinsic<
    [llvm_anyvector_ty], [LLVMMatchType<0>, llvm_anyvector_ty], [IntrNoMem]>;
def int_arm_neon_vpadalu : DefaultAttrsIntrinsic<
    [llvm_anyvector_ty], [LLVMMatchType<0>, llvm_anyvector_ty], [IntrNoMem]>;

// Vector Pairwise Maximum and Minimum.
def int_arm_neon_vpmaxs : Neon_2Arg_Intrinsic;
def int_arm_neon_vpmaxu : Neon_2Arg_Intrinsic;
def int_arm_neon_vpmins : Neon_2Arg_Intrinsic;
def int_arm_neon_vpminu : Neon_2Arg_Intrinsic;

// Vector Shifts:
//
// The various saturating and rounding vector shift operations need to be
// represented by intrinsics in LLVM, and even the basic VSHL variable shift
// operation cannot be safely translated to LLVM's shift operators.  VSHL can
// be used for both left and right shifts, or even combinations of the two,
// depending on the signs of the shift amounts.  It also has well-defined
// behavior for shift amounts that LLVM leaves undefined.  Only basic shifts
// by constants can be represented with LLVM's shift operators.
//
// The shift counts for these intrinsics are always vectors, even for constant
// shifts, where the constant is replicated.  For consistency with VSHL (and
// other variable shift instructions), left shifts have positive shift counts
// and right shifts have negative shift counts.  This convention is also used
// for constant right shift intrinsics, and to help preserve sanity, the
// intrinsic names use "shift" instead of either "shl" or "shr".  Where
// applicable, signed and unsigned versions of the intrinsics are
// distinguished with "s" and "u" suffixes.  A few NEON shift instructions,
// such as VQSHLU, take signed operands but produce unsigned results; these
// use a "su" suffix.

// Vector Shift.
def int_arm_neon_vshifts : Neon_2Arg_Intrinsic;
def int_arm_neon_vshiftu : Neon_2Arg_Intrinsic;

// Vector Rounding Shift.
def int_arm_neon_vrshifts : Neon_2Arg_Intrinsic;
def int_arm_neon_vrshiftu : Neon_2Arg_Intrinsic;
def int_arm_neon_vrshiftn : Neon_2Arg_Narrow_Intrinsic;

// Vector Saturating Shift.
def int_arm_neon_vqshifts : Neon_2Arg_Intrinsic;
def int_arm_neon_vqshiftu : Neon_2Arg_Intrinsic;
def int_arm_neon_vqshiftsu : Neon_2Arg_Intrinsic;
def int_arm_neon_vqshiftns : Neon_2Arg_Narrow_Intrinsic;
def int_arm_neon_vqshiftnu : Neon_2Arg_Narrow_Intrinsic;
def int_arm_neon_vqshiftnsu : Neon_2Arg_Narrow_Intrinsic;

// Vector Saturating Rounding Shift.
def int_arm_neon_vqrshifts : Neon_2Arg_Intrinsic;
def int_arm_neon_vqrshiftu : Neon_2Arg_Intrinsic;
def int_arm_neon_vqrshiftns : Neon_2Arg_Narrow_Intrinsic;
def int_arm_neon_vqrshiftnu : Neon_2Arg_Narrow_Intrinsic;
def int_arm_neon_vqrshiftnsu : Neon_2Arg_Narrow_Intrinsic;

// Vector Shift and Insert.
def int_arm_neon_vshiftins : Neon_3Arg_Intrinsic;

// Vector Absolute Value and Saturating Absolute Value.
def int_arm_neon_vabs : Neon_1Arg_Intrinsic;
def int_arm_neon_vqabs : Neon_1Arg_Intrinsic;

// Vector Saturating Negate.
def int_arm_neon_vqneg : Neon_1Arg_Intrinsic;

// Vector Count Leading Sign/Zero Bits.
def int_arm_neon_vcls : Neon_1Arg_Intrinsic;

// Vector Reciprocal Estimate.
def int_arm_neon_vrecpe : Neon_1Arg_Intrinsic;

// Vector Reciprocal Square Root Estimate.
def int_arm_neon_vrsqrte : Neon_1Arg_Intrinsic;

// Vector Conversions Between Floating-point and Integer
def int_arm_neon_vcvtau : Neon_CvtFPtoInt_1Arg_Intrinsic;
def int_arm_neon_vcvtas : Neon_CvtFPtoInt_1Arg_Intrinsic;
def int_arm_neon_vcvtnu : Neon_CvtFPtoInt_1Arg_Intrinsic;
def int_arm_neon_vcvtns : Neon_CvtFPtoInt_1Arg_Intrinsic;
def int_arm_neon_vcvtpu : Neon_CvtFPtoInt_1Arg_Intrinsic;
def int_arm_neon_vcvtps : Neon_CvtFPtoInt_1Arg_Intrinsic;
def int_arm_neon_vcvtmu : Neon_CvtFPtoInt_1Arg_Intrinsic;
def int_arm_neon_vcvtms : Neon_CvtFPtoInt_1Arg_Intrinsic;

// Vector Conversions Between Floating-point and Fixed-point.
def int_arm_neon_vcvtfp2fxs : Neon_CvtFPToFx_Intrinsic;
def int_arm_neon_vcvtfp2fxu : Neon_CvtFPToFx_Intrinsic;
def int_arm_neon_vcvtfxs2fp : Neon_CvtFxToFP_Intrinsic;
def int_arm_neon_vcvtfxu2fp : Neon_CvtFxToFP_Intrinsic;

// Vector Conversions Between Half-Precision and Single-Precision.
def int_arm_neon_vcvtfp2hf
    : DefaultAttrsIntrinsic<[llvm_v4i16_ty], [llvm_v4f32_ty], [IntrNoMem]>;
def int_arm_neon_vcvthf2fp
    : DefaultAttrsIntrinsic<[llvm_v4f32_ty], [llvm_v4i16_ty], [IntrNoMem]>;

// Narrowing Saturating Vector Moves.
def int_arm_neon_vqmovns : Neon_1Arg_Narrow_Intrinsic;
def int_arm_neon_vqmovnu : Neon_1Arg_Narrow_Intrinsic;
def int_arm_neon_vqmovnsu : Neon_1Arg_Narrow_Intrinsic;

// Vector Table Lookup.
// The first 1-4 arguments are the table.
def int_arm_neon_vtbl1 : Neon_Tbl2Arg_Intrinsic;
def int_arm_neon_vtbl2 : Neon_Tbl3Arg_Intrinsic;
def int_arm_neon_vtbl3 : Neon_Tbl4Arg_Intrinsic;
def int_arm_neon_vtbl4 : Neon_Tbl5Arg_Intrinsic;

// Vector Table Extension.
// Some elements of the destination vector may not be updated, so the original
// value of that vector is passed as the first argument.  The next 1-4
// arguments after that are the table.
def int_arm_neon_vtbx1 : Neon_Tbl3Arg_Intrinsic;
def int_arm_neon_vtbx2 : Neon_Tbl4Arg_Intrinsic;
def int_arm_neon_vtbx3 : Neon_Tbl5Arg_Intrinsic;
def int_arm_neon_vtbx4 : Neon_Tbl6Arg_Intrinsic;

// Vector and Scalar Rounding.
def int_arm_neon_vrintn : Neon_1FloatArg_Intrinsic;
def int_arm_neon_vrintx : Neon_1Arg_Intrinsic;
def int_arm_neon_vrinta : Neon_1Arg_Intrinsic;
def int_arm_neon_vrintz : Neon_1Arg_Intrinsic;
def int_arm_neon_vrintm : Neon_1Arg_Intrinsic;
def int_arm_neon_vrintp : Neon_1Arg_Intrinsic;

// De-interleaving vector loads from N-element structures.
// Source operands are the address and alignment.
def int_arm_neon_vld1 : DefaultAttrsIntrinsic<
    [llvm_anyvector_ty], [llvm_anyptr_ty, llvm_i32_ty],
    [IntrReadMem, IntrArgMemOnly]>;
def int_arm_neon_vld2 : DefaultAttrsIntrinsic<
    [llvm_anyvector_ty, LLVMMatchType<0>], [llvm_anyptr_ty, llvm_i32_ty],
    [IntrReadMem, IntrArgMemOnly]>;
def int_arm_neon_vld3 : DefaultAttrsIntrinsic<
    [llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>],
    [llvm_anyptr_ty, llvm_i32_ty], [IntrReadMem, IntrArgMemOnly]>;
def int_arm_neon_vld4 : DefaultAttrsIntrinsic<
    [llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
    [llvm_anyptr_ty, llvm_i32_ty], [IntrReadMem, IntrArgMemOnly]>;

def int_arm_neon_vld1x2 : DefaultAttrsIntrinsic<
    [llvm_anyvector_ty, LLVMMatchType<0>],
    [llvm_anyptr_ty], [IntrReadMem, IntrArgMemOnly]>;
def int_arm_neon_vld1x3 : DefaultAttrsIntrinsic<
    [llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>],
    [llvm_anyptr_ty], [IntrReadMem, IntrArgMemOnly]>;
def int_arm_neon_vld1x4 : DefaultAttrsIntrinsic<
    [llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
    [llvm_anyptr_ty], [IntrReadMem, IntrArgMemOnly]>;

// Vector load N-element structure to one lane.
// Source operands are: the address, the N input vectors (since only one
// lane is assigned), the lane number, and the alignment.
def int_arm_neon_vld2lane : DefaultAttrsIntrinsic<
    [llvm_anyvector_ty, LLVMMatchType<0>],
    [llvm_anyptr_ty, LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty,
     llvm_i32_ty],
    [IntrReadMem, IntrArgMemOnly]>;
def int_arm_neon_vld3lane : DefaultAttrsIntrinsic<
    [llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>],
    [llvm_anyptr_ty, LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>,
     llvm_i32_ty, llvm_i32_ty],
    [IntrReadMem, IntrArgMemOnly]>;
def int_arm_neon_vld4lane : DefaultAttrsIntrinsic<
    [llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
    [llvm_anyptr_ty, LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>,
     LLVMMatchType<0>, llvm_i32_ty, llvm_i32_ty],
    [IntrReadMem, IntrArgMemOnly]>;

// Vector load N-element structure to all lanes.
// Source operands are the address and alignment.
def int_arm_neon_vld2dup : DefaultAttrsIntrinsic<
    [llvm_anyvector_ty, LLVMMatchType<0>], [llvm_anyptr_ty, llvm_i32_ty],
    [IntrReadMem, IntrArgMemOnly]>;
def int_arm_neon_vld3dup : DefaultAttrsIntrinsic<
    [llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>],
    [llvm_anyptr_ty, llvm_i32_ty], [IntrReadMem, IntrArgMemOnly]>;
def int_arm_neon_vld4dup : DefaultAttrsIntrinsic<
    [llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
    [llvm_anyptr_ty, llvm_i32_ty], [IntrReadMem, IntrArgMemOnly]>;

// Interleaving vector stores from N-element structures.
// Source operands are: the address, the N vectors, and the alignment.
def int_arm_neon_vst1 : DefaultAttrsIntrinsic<
    [], [llvm_anyptr_ty, llvm_anyvector_ty, llvm_i32_ty], [IntrArgMemOnly]>;
def int_arm_neon_vst2 : DefaultAttrsIntrinsic<
    [], [llvm_anyptr_ty, llvm_anyvector_ty, LLVMMatchType<1>, llvm_i32_ty],
    [IntrArgMemOnly]>;
def int_arm_neon_vst3 : DefaultAttrsIntrinsic<
    [],
    [llvm_anyptr_ty, llvm_anyvector_ty, LLVMMatchType<1>, LLVMMatchType<1>,
     llvm_i32_ty],
    [IntrArgMemOnly]>;
def int_arm_neon_vst4 : DefaultAttrsIntrinsic<
    [],
    [llvm_anyptr_ty, llvm_anyvector_ty, LLVMMatchType<1>, LLVMMatchType<1>,
     LLVMMatchType<1>, llvm_i32_ty],
    [IntrArgMemOnly]>;

def int_arm_neon_vst1x2 : DefaultAttrsIntrinsic<
    [], [llvm_anyptr_ty, llvm_anyvector_ty, LLVMMatchType<1>],
    [IntrArgMemOnly, NoCapture<ArgIndex<0>>]>;
def int_arm_neon_vst1x3 : DefaultAttrsIntrinsic<
    [], [llvm_anyptr_ty, llvm_anyvector_ty, LLVMMatchType<1>, LLVMMatchType<1>],
    [IntrArgMemOnly, NoCapture<ArgIndex<0>>]>;
def int_arm_neon_vst1x4 : DefaultAttrsIntrinsic<
    [],
    [llvm_anyptr_ty, llvm_anyvector_ty, LLVMMatchType<1>, LLVMMatchType<1>,
     LLVMMatchType<1>],
    [IntrArgMemOnly, NoCapture<ArgIndex<0>>]>;

// Vector store N-element structure from one lane.
// Source operands are: the address, the N vectors, the lane number, and
// the alignment.
def int_arm_neon_vst2lane : DefaultAttrsIntrinsic<
    [],
    [llvm_anyptr_ty, llvm_anyvector_ty, LLVMMatchType<1>, llvm_i32_ty,
     llvm_i32_ty],
    [IntrArgMemOnly]>;
def int_arm_neon_vst3lane : DefaultAttrsIntrinsic<
    [],
    [llvm_anyptr_ty, llvm_anyvector_ty, LLVMMatchType<1>, LLVMMatchType<1>,
     llvm_i32_ty, llvm_i32_ty],
    [IntrArgMemOnly]>;
def int_arm_neon_vst4lane : DefaultAttrsIntrinsic<
    [],
    [llvm_anyptr_ty, llvm_anyvector_ty, LLVMMatchType<1>, LLVMMatchType<1>,
     LLVMMatchType<1>, llvm_i32_ty, llvm_i32_ty],
    [IntrArgMemOnly]>;

// Vector bitwise select.
def int_arm_neon_vbsl : DefaultAttrsIntrinsic<
    [llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
    [IntrNoMem]>;


// Crypto instructions
class AES_1Arg_Intrinsic : DefaultAttrsIntrinsic<
    [llvm_v16i8_ty], [llvm_v16i8_ty], [IntrNoMem]>;
class AES_2Arg_Intrinsic : DefaultAttrsIntrinsic<
    [llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;

class SHA_1Arg_Intrinsic : DefaultAttrsIntrinsic<
    [llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>;
class SHA_2Arg_Intrinsic : DefaultAttrsIntrinsic<
    [llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
class SHA_3Arg_i32_Intrinsic : DefaultAttrsIntrinsic<
    [llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
class SHA_3Arg_v4i32_Intrinsic : DefaultAttrsIntrinsic<
    [llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty,llvm_v4i32_ty], [IntrNoMem]>;

def int_arm_neon_aesd : AES_2Arg_Intrinsic;
def int_arm_neon_aese : AES_2Arg_Intrinsic;
def int_arm_neon_aesimc : AES_1Arg_Intrinsic;
def int_arm_neon_aesmc : AES_1Arg_Intrinsic;
def int_arm_neon_sha1h : SHA_1Arg_Intrinsic;
def int_arm_neon_sha1su1 : SHA_2Arg_Intrinsic;
def int_arm_neon_sha256su0 : SHA_2Arg_Intrinsic;
def int_arm_neon_sha1c : SHA_3Arg_i32_Intrinsic;
def int_arm_neon_sha1m : SHA_3Arg_i32_Intrinsic;
def int_arm_neon_sha1p : SHA_3Arg_i32_Intrinsic;
def int_arm_neon_sha1su0: SHA_3Arg_v4i32_Intrinsic;
def int_arm_neon_sha256h: SHA_3Arg_v4i32_Intrinsic;
def int_arm_neon_sha256h2: SHA_3Arg_v4i32_Intrinsic;
def int_arm_neon_sha256su1: SHA_3Arg_v4i32_Intrinsic;

def int_arm_neon_vqrdmlah : Neon_3Arg_Intrinsic;
def int_arm_neon_vqrdmlsh : Neon_3Arg_Intrinsic;

// Armv8.2-A dot product instructions
class Neon_Dot_Intrinsic
  : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                          [LLVMMatchType<0>, llvm_anyvector_ty,
                           LLVMMatchType<1>],
                          [IntrNoMem]>;
def int_arm_neon_udot : Neon_Dot_Intrinsic;
def int_arm_neon_sdot : Neon_Dot_Intrinsic;

// v8.6-A Matrix Multiply Intrinsics
class Neon_MatMul_Intrinsic
  : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                          [LLVMMatchType<0>, llvm_anyvector_ty,
                           LLVMMatchType<1>],
                          [IntrNoMem]>;
def int_arm_neon_ummla  : Neon_MatMul_Intrinsic;
def int_arm_neon_smmla  : Neon_MatMul_Intrinsic;
def int_arm_neon_usmmla : Neon_MatMul_Intrinsic;
def int_arm_neon_usdot  : Neon_Dot_Intrinsic;

// v8.6-A Bfloat Intrinsics
def int_arm_neon_vcvtfp2bf
    : DefaultAttrsIntrinsic<[llvm_anyvector_ty], [llvm_v4f32_ty], [IntrNoMem]>;
def int_arm_neon_vcvtbfp2bf
    : DefaultAttrsIntrinsic<[llvm_bfloat_ty], [llvm_float_ty], [IntrNoMem]>;

def int_arm_neon_bfdot : Neon_Dot_Intrinsic;
def int_arm_neon_bfmmla
    : DefaultAttrsIntrinsic<[llvm_v4f32_ty],
                            [llvm_v4f32_ty, llvm_v8bf16_ty, llvm_v8bf16_ty],
                            [IntrNoMem]>;

class Neon_BF16FML_Intrinsic
    : DefaultAttrsIntrinsic<[llvm_v4f32_ty],
                            [llvm_v4f32_ty, llvm_v8bf16_ty, llvm_v8bf16_ty],
                            [IntrNoMem]>;
def int_arm_neon_bfmlalb : Neon_BF16FML_Intrinsic;
def int_arm_neon_bfmlalt : Neon_BF16FML_Intrinsic;

def int_arm_cls: DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty],
                                       [IntrNoMem]>;
def int_arm_cls64: DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i64_ty],
                                         [IntrNoMem]>;

def int_arm_mve_vctp8  : DefaultAttrsIntrinsic<[llvm_v16i1_ty], [llvm_i32_ty],
                                               [IntrNoMem]>;
def int_arm_mve_vctp16 : DefaultAttrsIntrinsic<[llvm_v8i1_ty], [llvm_i32_ty],
                                               [IntrNoMem]>;
def int_arm_mve_vctp32 : DefaultAttrsIntrinsic<[llvm_v4i1_ty], [llvm_i32_ty],
                                               [IntrNoMem]>;
def int_arm_mve_vctp64 : DefaultAttrsIntrinsic<[llvm_v2i1_ty], [llvm_i32_ty],
                                               [IntrNoMem]>;

// v8.3-A Floating-point complex add
def int_arm_neon_vcadd_rot90  : Neon_2Arg_Intrinsic;
def int_arm_neon_vcadd_rot270 : Neon_2Arg_Intrinsic;

// GNU eabi mcount
// TODO: Add applicable default attributes.
def int_arm_gnu_eabi_mcount : Intrinsic<[], [], []>;

def int_arm_mve_pred_i2v : DefaultAttrsIntrinsic<
  [llvm_anyvector_ty], [llvm_i32_ty], [IntrNoMem]>;
def int_arm_mve_pred_v2i : DefaultAttrsIntrinsic<
  [llvm_i32_ty], [llvm_anyvector_ty], [IntrNoMem]>;
def int_arm_mve_vreinterpretq : DefaultAttrsIntrinsic<
  [llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>;

def int_arm_mve_min_predicated: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty /* unsigned */,
    llvm_anyvector_ty, LLVMMatchType<0>],
   [IntrNoMem]>;
def int_arm_mve_max_predicated: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty /* unsigned */,
    llvm_anyvector_ty, LLVMMatchType<0>],
   [IntrNoMem]>;
def int_arm_mve_abd_predicated: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty /* unsigned */,
    llvm_anyvector_ty, LLVMMatchType<0>], [IntrNoMem]>;
def int_arm_mve_add_predicated: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>],
   [IntrNoMem]>;
def int_arm_mve_and_predicated: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>],
   [IntrNoMem]>;
def int_arm_mve_bic_predicated: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>],
   [IntrNoMem]>;
def int_arm_mve_eor_predicated: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>],
   [IntrNoMem]>;
def int_arm_mve_orn_predicated: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>],
   [IntrNoMem]>;
def int_arm_mve_orr_predicated: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>],
   [IntrNoMem]>;
def int_arm_mve_sub_predicated: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>],
   [IntrNoMem]>;
def int_arm_mve_mul_predicated: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>],
   [IntrNoMem]>;
def int_arm_mve_mulh_predicated: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty /* unsigned */,
    llvm_anyvector_ty, LLVMMatchType<0>],
   [IntrNoMem]>;
def int_arm_mve_qdmulh_predicated: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>],
   [IntrNoMem]>;
def int_arm_mve_rmulh_predicated: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty /* unsigned */,
    llvm_anyvector_ty, LLVMMatchType<0>],
   [IntrNoMem]>;
def int_arm_mve_qrdmulh_predicated: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>],
   [IntrNoMem]>;
def int_arm_mve_mull_int_predicated: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
   [llvm_anyvector_ty, LLVMMatchType<1>, llvm_i32_ty /* unsigned */,
    llvm_i32_ty /* top */, llvm_anyvector_ty, LLVMMatchType<0>],
   [IntrNoMem]>;
def int_arm_mve_mull_poly_predicated: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
   [llvm_anyvector_ty, LLVMMatchType<1>, llvm_i32_ty, llvm_anyvector_ty,
    LLVMMatchType<0>],
   [IntrNoMem]>;
def int_arm_mve_qadd_predicated: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty /* unsigned */,
    llvm_anyvector_ty, LLVMMatchType<0>], [IntrNoMem]>;
def int_arm_mve_hadd_predicated: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty /* unsigned */,
    llvm_anyvector_ty, LLVMMatchType<0>], [IntrNoMem]>;
def int_arm_mve_rhadd_predicated: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty /* unsigned */,
    llvm_anyvector_ty, LLVMMatchType<0>], [IntrNoMem]>;
def int_arm_mve_qsub_predicated: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty /* unsigned */,
    llvm_anyvector_ty, LLVMMatchType<0>], [IntrNoMem]>;
def int_arm_mve_hsub_predicated: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty /* unsigned */,
    llvm_anyvector_ty, LLVMMatchType<0>], [IntrNoMem]>;
def int_arm_mve_vmina_predicated: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyvector_ty],
    [IntrNoMem]>;
def int_arm_mve_vmaxa_predicated: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyvector_ty],
    [IntrNoMem]>;
def int_arm_mve_vminnma_predicated: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyvector_ty],
    [IntrNoMem]>;
def int_arm_mve_vmaxnma_predicated: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyvector_ty],
    [IntrNoMem]>;

multiclass MVEPredicated<list<LLVMType> rets, list<LLVMType> params,
                         LLVMType pred = llvm_anyvector_ty,
                         list<IntrinsicProperty> props = [IntrNoMem],
                         list<SDNodeProperty> sdprops = []> {
  def "": DefaultAttrsIntrinsic<rets, params, props, "", sdprops>;
  def _predicated: DefaultAttrsIntrinsic<rets, params # [pred], props, "",
                                         sdprops>;
}
multiclass MVEPredicatedM<list<LLVMType> rets, list<LLVMType> params,
                          LLVMType pred = llvm_anyvector_ty,
                          list<IntrinsicProperty> props = [IntrNoMem]> {
  def "": DefaultAttrsIntrinsic<rets, params, props>;
  def _predicated: DefaultAttrsIntrinsic<rets, params # [pred,
      !if(!eq(rets[0], llvm_anyvector_ty),
          LLVMMatchType<0>, rets[0])], props>;
}

multiclass MVE_minmaxv {
  defm v: MVEPredicated<[llvm_i32_ty],
     [llvm_i32_ty, llvm_anyvector_ty, llvm_i32_ty /* unsigned */]>;
  defm av: MVEPredicated<[llvm_i32_ty],
     [llvm_i32_ty, llvm_anyvector_ty]>;
  defm nmv: MVEPredicated<[llvm_anyfloat_ty],
     [LLVMMatchType<0>, llvm_anyvector_ty]>;
  defm nmav: MVEPredicated<[llvm_anyfloat_ty],
     [LLVMMatchType<0>, llvm_anyvector_ty]>;
}
defm int_arm_mve_min: MVE_minmaxv;
defm int_arm_mve_max: MVE_minmaxv;

defm int_arm_mve_addv: MVEPredicated<[llvm_i32_ty],
   [llvm_anyvector_ty, llvm_i32_ty /* unsigned */]>;
defm int_arm_mve_addlv: MVEPredicated<[llvm_i64_ty],
   [llvm_anyvector_ty, llvm_i32_ty /* unsigned */]>;

// Intrinsic with a predicated and a non-predicated case. The predicated case
// has two additional parameters: inactive (the value for inactive lanes, can
// be undef) and predicate.
multiclass MVEMXPredicated<list<LLVMType> rets, list<LLVMType> flags,
                           list<LLVMType> params, LLVMType inactive,
                           LLVMType predicate,
                           list<IntrinsicProperty> props = [IntrNoMem]> {
  def "":          DefaultAttrsIntrinsic<rets, flags # params, props>;
  def _predicated: DefaultAttrsIntrinsic<
      rets, flags # [inactive] # params # [predicate], props>;
}

defm int_arm_mve_vcvt_narrow: MVEPredicated<[llvm_v8f16_ty],
   [llvm_v8f16_ty, llvm_v4f32_ty, llvm_i32_ty], llvm_v4i1_ty>;
defm int_arm_mve_vcvt_widen: MVEMXPredicated<[llvm_v4f32_ty], [],
   [llvm_v8f16_ty, llvm_i32_ty], llvm_v4f32_ty, llvm_v4i1_ty>;

defm int_arm_mve_vldr_gather_base: MVEPredicated<
   [llvm_anyvector_ty], [llvm_anyvector_ty, llvm_i32_ty],
   llvm_anyvector_ty, [IntrReadMem], [SDNPMemOperand]>;
defm int_arm_mve_vldr_gather_base_wb: MVEPredicated<
   [llvm_anyvector_ty, llvm_anyvector_ty],
   [LLVMMatchType<1>, llvm_i32_ty], llvm_anyvector_ty, [IntrReadMem],
   [SDNPMemOperand]>;
defm int_arm_mve_vstr_scatter_base: MVEPredicated<
   [], [llvm_anyvector_ty, llvm_i32_ty, llvm_anyvector_ty],
   llvm_anyvector_ty, [IntrWriteMem], [SDNPMemOperand]>;
defm int_arm_mve_vstr_scatter_base_wb: MVEPredicated<
   [llvm_anyvector_ty], [LLVMMatchType<0>, llvm_i32_ty, llvm_anyvector_ty],
   llvm_anyvector_ty, [IntrWriteMem], [SDNPMemOperand]>;

// gather_offset takes three i32 parameters. The first is the size of
// memory element loaded, in bits. The second is a left bit shift to
// apply to each offset in the vector parameter (must be either 0, or
// correspond to the element size of the destination vector type). The
// last is 1 to indicate zero extension (if the load is widening), or
// 0 for sign extension.
//
// scatter_offset has the first two of those parameters, but since it
// narrows rather than widening, it doesn't have the last one.
defm int_arm_mve_vldr_gather_offset: MVEPredicated<
   [llvm_anyvector_ty], [llvm_anyptr_ty, llvm_anyvector_ty,
   llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], llvm_anyvector_ty, [IntrReadMem],
   [SDNPMemOperand]>;
defm int_arm_mve_vstr_scatter_offset: MVEPredicated<
   [], [llvm_anyptr_ty, llvm_anyvector_ty, llvm_anyvector_ty,
   llvm_i32_ty, llvm_i32_ty], llvm_anyvector_ty, [IntrWriteMem],
   [SDNPMemOperand]>;

def int_arm_mve_shl_imm_predicated: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
   [LLVMMatchType<0>, llvm_i32_ty, llvm_anyvector_ty, LLVMMatchType<0>],
   [IntrNoMem]>;
def int_arm_mve_shr_imm_predicated: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
   [LLVMMatchType<0>, llvm_i32_ty, llvm_i32_ty, // extra i32 is unsigned flag
    llvm_anyvector_ty, LLVMMatchType<0>],
   [IntrNoMem]>;

defm int_arm_mve_vqshl_imm: MVEPredicatedM<[llvm_anyvector_ty],
   [LLVMMatchType<0>, llvm_i32_ty /*shiftcount*/, llvm_i32_ty /*unsigned*/]>;
defm int_arm_mve_vrshr_imm: MVEPredicatedM<[llvm_anyvector_ty],
   [LLVMMatchType<0>, llvm_i32_ty /*shiftcount*/, llvm_i32_ty /*unsigned*/]>;
defm int_arm_mve_vqshlu_imm: MVEPredicatedM<[llvm_anyvector_ty],
   [LLVMMatchType<0>, llvm_i32_ty /*shiftcount*/]>;
defm int_arm_mve_vshll_imm: MVEPredicatedM<[llvm_anyvector_ty],
   [llvm_anyvector_ty, llvm_i32_ty /*shiftcount*/, llvm_i32_ty /*unsigned*/,
                       llvm_i32_ty /*top-half*/]>;

defm int_arm_mve_vsli: MVEPredicated<
   [llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty]>;
defm int_arm_mve_vsri: MVEPredicated<
   [llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty]>;

defm int_arm_mve_vshrn: MVEPredicated<
   [llvm_anyvector_ty], [LLVMMatchType<0>, llvm_anyvector_ty,
    llvm_i32_ty /*shiftcount*/, llvm_i32_ty /*saturate*/, llvm_i32_ty /*round*/,
    llvm_i32_ty /*unsigned-out*/, llvm_i32_ty /*unsigned-in*/,
    llvm_i32_ty /*top-half*/]>;

defm int_arm_mve_vshl_scalar: MVEPredicated<
   [llvm_anyvector_ty], [LLVMMatchType<0>, llvm_i32_ty /*shiftcount*/,
    llvm_i32_ty /*saturate*/, llvm_i32_ty /*round*/, llvm_i32_ty /*unsigned*/]>;
defm int_arm_mve_vshl_vector: MVEPredicatedM<
   [llvm_anyvector_ty], [LLVMMatchType<0>, llvm_anyvector_ty /*shiftcounts*/,
    llvm_i32_ty /*saturate*/, llvm_i32_ty /*round*/, llvm_i32_ty /*unsigned*/]>;

// MVE scalar shifts.
class ARM_MVE_qrshift_single<list<LLVMType> value,
                             list<LLVMType> saturate = []> :
  DefaultAttrsIntrinsic<value, value # [llvm_i32_ty] # saturate, [IntrNoMem]>;
multiclass ARM_MVE_qrshift<list<LLVMType> saturate = []> {
  // Most of these shifts come in 32- and 64-bit versions. But only
  // the 64-bit ones have the extra saturation argument (if any).
  def "": ARM_MVE_qrshift_single<[llvm_i32_ty]>;
  def l:  ARM_MVE_qrshift_single<[llvm_i32_ty, llvm_i32_ty], saturate>;
}
defm int_arm_mve_urshr: ARM_MVE_qrshift;
defm int_arm_mve_uqshl: ARM_MVE_qrshift;
defm int_arm_mve_srshr: ARM_MVE_qrshift;
defm int_arm_mve_sqshl: ARM_MVE_qrshift;
defm int_arm_mve_uqrshl: ARM_MVE_qrshift<[llvm_i32_ty]>;
defm int_arm_mve_sqrshr: ARM_MVE_qrshift<[llvm_i32_ty]>;
// LSLL and ASRL only have 64-bit versions, not 32.
def int_arm_mve_lsll: ARM_MVE_qrshift_single<[llvm_i32_ty, llvm_i32_ty]>;
def int_arm_mve_asrl: ARM_MVE_qrshift_single<[llvm_i32_ty, llvm_i32_ty]>;

def int_arm_mve_vabd: DefaultAttrsIntrinsic<
   [llvm_anyvector_ty],
   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty /* unsigned */],
   [IntrNoMem]>;
def int_arm_mve_vadc: DefaultAttrsIntrinsic<
   [llvm_anyvector_ty, llvm_i32_ty],
   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty], [IntrNoMem]>;
def int_arm_mve_vsbc: DefaultAttrsIntrinsic<
   [llvm_anyvector_ty, llvm_i32_ty],
   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty], [IntrNoMem]>;
def int_arm_mve_vadc_predicated: DefaultAttrsIntrinsic<
   [llvm_anyvector_ty, llvm_i32_ty],
   [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>,
    llvm_i32_ty, llvm_anyvector_ty], [IntrNoMem]>;
def int_arm_mve_vsbc_predicated: DefaultAttrsIntrinsic<
   [llvm_anyvector_ty, llvm_i32_ty],
   [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>,
    llvm_i32_ty, llvm_anyvector_ty], [IntrNoMem]>;
def int_arm_mve_vshlc: DefaultAttrsIntrinsic<
   [llvm_i32_ty /* bits shifted out */, llvm_anyvector_ty],
   [LLVMMatchType<0>, llvm_i32_ty /* bits shifted in */,
    llvm_i32_ty /* shift count */], [IntrNoMem]>;
def int_arm_mve_vshlc_predicated: DefaultAttrsIntrinsic<
   [llvm_i32_ty /* bits shifted out */, llvm_anyvector_ty],
   [LLVMMatchType<0>, llvm_i32_ty /* bits shifted in */,
    llvm_i32_ty /* shift count */, llvm_anyvector_ty], [IntrNoMem]>;
def int_arm_mve_vmulh: DefaultAttrsIntrinsic<
   [llvm_anyvector_ty],
   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty /* unsigned */],
   [IntrNoMem]>;
def int_arm_mve_vqdmulh: DefaultAttrsIntrinsic<
   [llvm_anyvector_ty],
   [LLVMMatchType<0>, LLVMMatchType<0>], [IntrNoMem]>;
def int_arm_mve_vhadd: DefaultAttrsIntrinsic<
   [llvm_anyvector_ty],
   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty /* unsigned */],
   [IntrNoMem]>;
def int_arm_mve_vrhadd: DefaultAttrsIntrinsic<
   [llvm_anyvector_ty],
   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty /* unsigned */],
   [IntrNoMem]>;
def int_arm_mve_vhsub: DefaultAttrsIntrinsic<
   [llvm_anyvector_ty],
   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty /* unsigned */],
   [IntrNoMem]>;
def int_arm_mve_vrmulh: DefaultAttrsIntrinsic<
   [llvm_anyvector_ty],
   [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty /* unsigned */],
   [IntrNoMem]>;
def int_arm_mve_vqrdmulh: DefaultAttrsIntrinsic<
   [llvm_anyvector_ty],
   [LLVMMatchType<0>, LLVMMatchType<0>], [IntrNoMem]>;
def int_arm_mve_vmull: DefaultAttrsIntrinsic<
   [llvm_anyvector_ty],
   [llvm_anyvector_ty, LLVMMatchType<1>, llvm_i32_ty /* unsigned */,
    llvm_i32_ty /* top */], [IntrNoMem]>;
def int_arm_mve_vmull_poly: DefaultAttrsIntrinsic<
   [llvm_anyvector_ty],
   [llvm_anyvector_ty, LLVMMatchType<1>, llvm_i32_ty], [IntrNoMem]>;

// The first two parameters are compile-time constants:
// * Halving: 0 means  halving (vhcaddq), 1 means non-halving (vcaddq) 
//            instruction. Note: the flag is inverted to match the corresponding
//            bit in the instruction encoding
// * Rotation angle: 0 mean 90 deg, 1 means 180 deg
defm int_arm_mve_vcaddq : MVEMXPredicated<
  [llvm_anyvector_ty],
  [llvm_i32_ty, llvm_i32_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
   LLVMMatchType<0>, llvm_anyvector_ty>;

// The first operand of the following two intrinsics is the rotation angle
// (must be a compile-time constant):
// 0 - 0 deg
// 1 - 90 deg
// 2 - 180 deg
// 3 - 270 deg
defm int_arm_mve_vcmulq : MVEMXPredicated<
  [llvm_anyvector_ty],
  [llvm_i32_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
   LLVMMatchType<0>, llvm_anyvector_ty>;

defm int_arm_mve_vcmlaq : MVEPredicated<
  [llvm_anyvector_ty],
  [llvm_i32_ty, LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
   llvm_anyvector_ty>;

def int_arm_mve_vld2q: DefaultAttrsIntrinsic<
    [llvm_anyvector_ty, LLVMMatchType<0>], [llvm_anyptr_ty],
    [IntrReadMem, IntrArgMemOnly]>;
def int_arm_mve_vld4q: DefaultAttrsIntrinsic<
    [llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
    [llvm_anyptr_ty], [IntrReadMem, IntrArgMemOnly]>;

def int_arm_mve_vst2q: DefaultAttrsIntrinsic<
    [], [llvm_anyptr_ty, llvm_anyvector_ty, LLVMMatchType<1>, llvm_i32_ty],
    [IntrWriteMem, IntrArgMemOnly], "", [SDNPMemOperand]>;
def int_arm_mve_vst4q: DefaultAttrsIntrinsic<
    [],
    [llvm_anyptr_ty, llvm_anyvector_ty, LLVMMatchType<1>, LLVMMatchType<1>,
     LLVMMatchType<1>, llvm_i32_ty],
    [IntrWriteMem, IntrArgMemOnly], "", [SDNPMemOperand]>;

// MVE vector absolute difference and accumulate across vector
// The first operand is an 'unsigned' flag. The remaining operands are:
// * accumulator
// * first vector operand
// * second vector operand
// * mask (only in predicated versions)
defm int_arm_mve_vabav: MVEPredicated<
  [llvm_i32_ty],
  [llvm_i32_ty, llvm_i32_ty, llvm_anyvector_ty, LLVMMatchType<0>], llvm_anyvector_ty>;

// The following 3 intrinsics are MVE vector reductions with two vector
// operands.
// The first 3 operands are boolean flags (must be compile-time constants):
// * unsigned - the instruction operates on vectors of unsigned values and
//              unsigned scalars
// * subtract - the instruction performs subtraction after multiplication of
//              lane pairs (e.g., vmlsdav vs vmladav)
// * exchange - the instruction exchanges successive even and odd lanes of
//              the first operands before multiplication of lane pairs
//              (e.g., vmladavx vs vmladav)
// The remaining operands are:
// * accumulator
// * first vector operand
// * second vector operand
// * mask (only in predicated versions)

// Version with 32-bit result, vml{a,s}dav[a][x]
defm int_arm_mve_vmldava: MVEPredicated<
  [llvm_i32_ty],
  [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
   llvm_i32_ty, llvm_anyvector_ty, LLVMMatchType<0>],
  llvm_anyvector_ty>;

// Version with 64-bit result, vml{a,s}ldav[a][x]
defm int_arm_mve_vmlldava: MVEPredicated<
  [llvm_i32_ty, llvm_i32_ty],
  [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
   llvm_i32_ty, llvm_i32_ty, llvm_anyvector_ty, LLVMMatchType<0>],
  llvm_anyvector_ty>;

// Version with 72-bit rounded result, vrml{a,s}ldavh[a][x]
defm int_arm_mve_vrmlldavha: MVEPredicated<
  [llvm_i32_ty, llvm_i32_ty],
  [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
   llvm_i32_ty, llvm_i32_ty, llvm_anyvector_ty, LLVMMatchType<0>],
  llvm_anyvector_ty>;

defm int_arm_mve_vidup: MVEMXPredicated<
   [llvm_anyvector_ty /* output */, llvm_i32_ty /* written-back base */], [],
   [llvm_i32_ty /* base */, llvm_i32_ty /* step */],
   LLVMMatchType<0>, llvm_anyvector_ty>;
defm int_arm_mve_vddup: MVEMXPredicated<
   [llvm_anyvector_ty /* output */, llvm_i32_ty /* written-back base */], [],
   [llvm_i32_ty /* base */, llvm_i32_ty /* step */],
   LLVMMatchType<0>, llvm_anyvector_ty>;
defm int_arm_mve_viwdup: MVEMXPredicated<
   [llvm_anyvector_ty /* output */, llvm_i32_ty /* written-back base */], [],
   [llvm_i32_ty /* base */, llvm_i32_ty /* limit */, llvm_i32_ty /* step */],
   LLVMMatchType<0>, llvm_anyvector_ty>;
defm int_arm_mve_vdwdup: MVEMXPredicated<
   [llvm_anyvector_ty /* output */, llvm_i32_ty /* written-back base */], [],
   [llvm_i32_ty /* base */, llvm_i32_ty /* limit */, llvm_i32_ty /* step */],
   LLVMMatchType<0>, llvm_anyvector_ty>;

// Flags:
// * unsigned
defm int_arm_mve_vcvt_fix: MVEMXPredicated<
  [llvm_anyvector_ty /* output */], [llvm_i32_ty],
  [llvm_anyvector_ty /* input vector */, llvm_i32_ty /* scale */],
  LLVMMatchType<0>, llvm_anyvector_ty>;

def int_arm_mve_vcvt_fp_int_predicated: DefaultAttrsIntrinsic<
  [llvm_anyvector_ty], [llvm_anyvector_ty, llvm_i32_ty /* unsigned */,
   llvm_anyvector_ty /* predicate */, LLVMMatchType<0> /* inactive */],
  [IntrNoMem]>;

foreach suffix = ["a","n","p","m"] in {
  defm "int_arm_mve_vcvt"#suffix: MVEMXPredicated<
    [llvm_anyvector_ty /* output */], [llvm_i32_ty /* unsigned */],
    [llvm_anyvector_ty /* input */], LLVMMatchType<0>, llvm_anyvector_ty>;
}

def int_arm_mve_vrintn: DefaultAttrsIntrinsic<
  [llvm_anyvector_ty], [LLVMMatchType<0>], [IntrNoMem]>;
def int_arm_mve_vcls: DefaultAttrsIntrinsic<
  [llvm_anyvector_ty], [LLVMMatchType<0>], [IntrNoMem]>;

defm int_arm_mve_vbrsr: MVEMXPredicated<
  [llvm_anyvector_ty], [],
  [LLVMMatchType<0>, llvm_i32_ty], LLVMMatchType<0>, llvm_anyvector_ty>;

def int_arm_mve_vqdmull: DefaultAttrsIntrinsic<
  [llvm_anyvector_ty],
  [llvm_anyvector_ty, LLVMMatchType<1>, llvm_i32_ty],
  [IntrNoMem]>;
def int_arm_mve_vqdmull_predicated: DefaultAttrsIntrinsic<
  [llvm_anyvector_ty],
  [llvm_anyvector_ty, LLVMMatchType<1>, llvm_i32_ty, llvm_anyvector_ty,
   LLVMMatchType<0>],
  [IntrNoMem]>;

class MVESimpleUnaryPredicated: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
   [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>], [IntrNoMem]>;

def int_arm_mve_mvn_predicated: MVESimpleUnaryPredicated;
def int_arm_mve_abs_predicated: MVESimpleUnaryPredicated;
def int_arm_mve_neg_predicated: MVESimpleUnaryPredicated;
def int_arm_mve_qabs_predicated: MVESimpleUnaryPredicated;
def int_arm_mve_qneg_predicated: MVESimpleUnaryPredicated;
def int_arm_mve_clz_predicated: MVESimpleUnaryPredicated;
def int_arm_mve_cls_predicated: MVESimpleUnaryPredicated;
def int_arm_mve_vrintz_predicated: MVESimpleUnaryPredicated;
def int_arm_mve_vrintm_predicated: MVESimpleUnaryPredicated;
def int_arm_mve_vrintp_predicated: MVESimpleUnaryPredicated;
def int_arm_mve_vrinta_predicated: MVESimpleUnaryPredicated;
def int_arm_mve_vrintx_predicated: MVESimpleUnaryPredicated;
def int_arm_mve_vrintn_predicated: MVESimpleUnaryPredicated;

def int_arm_mve_vrev_predicated: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
   [LLVMMatchType<0>, llvm_i32_ty /* size to reverse */,
    llvm_anyvector_ty, LLVMMatchType<0>], [IntrNoMem]>;

def int_arm_mve_vmovl_predicated: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
   [llvm_anyvector_ty, llvm_i32_ty /* unsigned */, llvm_i32_ty /* top half */,
    llvm_anyvector_ty /* predicate */, LLVMMatchType<0>], [IntrNoMem]>;
def int_arm_mve_vmovn_predicated: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
   [LLVMMatchType<0>, llvm_anyvector_ty, llvm_i32_ty /* top half */,
    llvm_anyvector_ty /* predicate */], [IntrNoMem]>;

def int_arm_mve_vqmovn: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
   [LLVMMatchType<0>, llvm_anyvector_ty,
    llvm_i32_ty /* unsigned output */, llvm_i32_ty /* unsigned input */,
    llvm_i32_ty /* top half */], [IntrNoMem]>;
def int_arm_mve_vqmovn_predicated: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
   [LLVMMatchType<0>, llvm_anyvector_ty,
    llvm_i32_ty /* unsigned output */, llvm_i32_ty /* unsigned input */,
    llvm_i32_ty /* top half */, llvm_anyvector_ty /* pred */], [IntrNoMem]>;

def int_arm_mve_fma_predicated: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
   [LLVMMatchType<0> /* mult op #1 */, LLVMMatchType<0> /* mult op #2 */,
    LLVMMatchType<0> /* addend */, llvm_anyvector_ty /* pred */], [IntrNoMem]>;
def int_arm_mve_vmla_n_predicated: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
   [LLVMMatchType<0> /* mult op #1 */, LLVMMatchType<0> /* addend */,
    llvm_i32_ty /* mult op #2 (scalar) */, llvm_anyvector_ty /* pred */],
   [IntrNoMem]>;
def int_arm_mve_vmlas_n_predicated: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
   [LLVMMatchType<0> /* mult op #1 */, LLVMMatchType<0> /* mult op #2 */,
    llvm_i32_ty /* addend (scalar) */, llvm_anyvector_ty /* pred */],
   [IntrNoMem]>;

defm int_arm_mve_vqdmlah: MVEPredicated<[llvm_anyvector_ty],
  [LLVMMatchType<0> /* mult op #1 */, LLVMMatchType<0> /* addend */,
   llvm_i32_ty /* mult op #2 (scalar) */]>;
defm int_arm_mve_vqrdmlah: MVEPredicated<[llvm_anyvector_ty],
  [LLVMMatchType<0> /* mult op #1 */, LLVMMatchType<0> /* addend */,
   llvm_i32_ty /* mult op #2 (scalar) */]>;
defm int_arm_mve_vqdmlash: MVEPredicated<[llvm_anyvector_ty],
  [LLVMMatchType<0> /* mult op #1 */, LLVMMatchType<0> /* mult op #2 */,
   llvm_i32_ty /* addend (scalar) */]>;
defm int_arm_mve_vqrdmlash: MVEPredicated<[llvm_anyvector_ty],
  [LLVMMatchType<0> /* mult op #1 */, LLVMMatchType<0> /* mult op #2 */,
   llvm_i32_ty /* addend (scalar) */]>;

defm int_arm_mve_vqdmlad: MVEPredicated<[llvm_anyvector_ty],
  [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>,
   llvm_i32_ty /* exchange */, llvm_i32_ty /* round */,
   llvm_i32_ty /* subtract */]>;

// CDE (Custom Datapath Extension)

multiclass CDEGPRIntrinsics<list<LLVMType> args> {
  def "" : DefaultAttrsIntrinsic<
    [llvm_i32_ty],
    !listconcat([llvm_i32_ty /* coproc */], args, [llvm_i32_ty /* imm */]),
    [IntrNoMem, ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<!add(!size(args), 1)>>]>;
  def a : DefaultAttrsIntrinsic<
    [llvm_i32_ty],
    !listconcat([llvm_i32_ty /* coproc */, llvm_i32_ty /* acc */], args,
                [llvm_i32_ty /* imm */]),
    [IntrNoMem, ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<!add(!size(args), 2)>>]>;

  def d: DefaultAttrsIntrinsic<
    [llvm_i32_ty /* lo */, llvm_i32_ty /* hi */],
    !listconcat([llvm_i32_ty /* coproc */], args, [llvm_i32_ty /* imm */]),
    [IntrNoMem, ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<!add(!size(args), 1)>>]>;
  def da: DefaultAttrsIntrinsic<
    [llvm_i32_ty /* lo */, llvm_i32_ty /* hi */],
    !listconcat([llvm_i32_ty /* coproc */, llvm_i32_ty /* acc_lo */,
                 llvm_i32_ty /* acc_hi */], args, [llvm_i32_ty /* imm */]),
    [IntrNoMem, ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<!add(!size(args), 3)>>]>;
}

defm int_arm_cde_cx1: CDEGPRIntrinsics<[]>;
defm int_arm_cde_cx2: CDEGPRIntrinsics<[llvm_i32_ty]>;
defm int_arm_cde_cx3: CDEGPRIntrinsics<[llvm_i32_ty, llvm_i32_ty]>;

multiclass CDEVCXIntrinsics<list<LLVMType> args> {
  def "" : DefaultAttrsIntrinsic<
    [llvm_anyfloat_ty],
    !listconcat([llvm_i32_ty /* coproc */], args, [llvm_i32_ty /* imm */]),
    [IntrNoMem, ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<!add(!size(args), 1)>>]>;
  def a : DefaultAttrsIntrinsic<
    [llvm_anyfloat_ty],
    !listconcat([llvm_i32_ty /* coproc */,  LLVMMatchType<0> /* acc */],
                args, [llvm_i32_ty /* imm */]),
    [IntrNoMem, ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<!add(!size(args), 2)>>]>;
}

defm int_arm_cde_vcx1 : CDEVCXIntrinsics<[]>;
defm int_arm_cde_vcx2 : CDEVCXIntrinsics<[LLVMMatchType<0>]>;
defm int_arm_cde_vcx3 : CDEVCXIntrinsics<[LLVMMatchType<0>, LLVMMatchType<0>]>;

multiclass CDEVCXVecIntrinsics<list<LLVMType> args> {
  def "" : DefaultAttrsIntrinsic<
    [llvm_v16i8_ty],
    !listconcat([llvm_i32_ty /* coproc */], args, [llvm_i32_ty /* imm */]),
    [IntrNoMem, ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<!add(!size(args), 1)>>]>;
  def a : DefaultAttrsIntrinsic<
    [llvm_v16i8_ty],
    !listconcat([llvm_i32_ty /* coproc */, llvm_v16i8_ty /* acc */],
                args, [llvm_i32_ty /* imm */]),
    [IntrNoMem, ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<!add(!size(args), 2)>>]>;

  def _predicated : DefaultAttrsIntrinsic<
    [llvm_anyvector_ty],
    !listconcat([llvm_i32_ty /* coproc */, LLVMMatchType<0> /* inactive */],
                args, [llvm_i32_ty /* imm */, llvm_anyvector_ty /* mask */]),
    [IntrNoMem, ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<!add(!size(args), 2)>>]>;
  def a_predicated : DefaultAttrsIntrinsic<
    [llvm_anyvector_ty],
    !listconcat([llvm_i32_ty /* coproc */, LLVMMatchType<0> /* acc */],
                args, [llvm_i32_ty /* imm */, llvm_anyvector_ty /* mask */]),
    [IntrNoMem, ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<!add(!size(args), 2)>>]>;
}

defm int_arm_cde_vcx1q : CDEVCXVecIntrinsics<[]>;
defm int_arm_cde_vcx2q : CDEVCXVecIntrinsics<[llvm_v16i8_ty]>;
defm int_arm_cde_vcx3q : CDEVCXVecIntrinsics<[llvm_v16i8_ty, llvm_v16i8_ty]>;

} // end TargetPrefix
PKjwFZZ,��}&}&IR/GlobalVariable.hnu�[���//===-- llvm/GlobalVariable.h - GlobalVariable class ------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the declaration of the GlobalVariable class, which
// represents a single global variable (or constant) in the VM.
//
// Global variables are constant pointers that refer to hunks of space that are
// allocated by either the VM, or by the linker in a static compiler.  A global
// variable may have an initial value, which is copied into the executables .data
// area.  Global Constants are required to have initializers.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_GLOBALVARIABLE_H
#define LLVM_IR_GLOBALVARIABLE_H

#include "llvm/ADT/Twine.h"
#include "llvm/ADT/ilist_node.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/GlobalObject.h"
#include "llvm/IR/OperandTraits.h"
#include "llvm/IR/Value.h"
#include <cassert>
#include <cstddef>

namespace llvm {

class Constant;
class Module;

template <typename ValueSubClass> class SymbolTableListTraits;
class DIGlobalVariableExpression;

class GlobalVariable : public GlobalObject, public ilist_node<GlobalVariable> {
  friend class SymbolTableListTraits<GlobalVariable>;

  AttributeSet Attrs;
  bool isConstantGlobal : 1;                   // Is this a global constant?
  bool isExternallyInitializedConstant : 1;    // Is this a global whose value
                                               // can change from its initial
                                               // value before global
                                               // initializers are run?

public:
  /// GlobalVariable ctor - If a parent module is specified, the global is
  /// automatically inserted into the end of the specified modules global list.
  GlobalVariable(Type *Ty, bool isConstant, LinkageTypes Linkage,
                 Constant *Initializer = nullptr, const Twine &Name = "",
                 ThreadLocalMode = NotThreadLocal, unsigned AddressSpace = 0,
                 bool isExternallyInitialized = false);
  /// GlobalVariable ctor - This creates a global and inserts it before the
  /// specified other global.
  GlobalVariable(Module &M, Type *Ty, bool isConstant, LinkageTypes Linkage,
                 Constant *Initializer, const Twine &Name = "",
                 GlobalVariable *InsertBefore = nullptr,
                 ThreadLocalMode = NotThreadLocal,
                 std::optional<unsigned> AddressSpace = std::nullopt,
                 bool isExternallyInitialized = false);
  GlobalVariable(const GlobalVariable &) = delete;
  GlobalVariable &operator=(const GlobalVariable &) = delete;

  ~GlobalVariable() {
    dropAllReferences();
  }

  // allocate space for exactly one operand
  void *operator new(size_t s) {
    return User::operator new(s, 1);
  }

  // delete space for exactly one operand as created in the corresponding new operator
  void operator delete(void *ptr){
    assert(ptr != nullptr && "must not be nullptr");
    User *Obj = static_cast<User *>(ptr);
    // Number of operands can be set to 0 after construction and initialization. Make sure
    // that number of operands is reset to 1, as this is needed in User::operator delete
    Obj->setGlobalVariableNumOperands(1);
    User::operator delete(Obj);
  }

  /// Provide fast operand accessors
  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);

  /// Definitions have initializers, declarations don't.
  ///
  inline bool hasInitializer() const { return !isDeclaration(); }

  /// hasDefinitiveInitializer - Whether the global variable has an initializer,
  /// and any other instances of the global (this can happen due to weak
  /// linkage) are guaranteed to have the same initializer.
  ///
  /// Note that if you want to transform a global, you must use
  /// hasUniqueInitializer() instead, because of the *_odr linkage type.
  ///
  /// Example:
  ///
  /// @a = global SomeType* null - Initializer is both definitive and unique.
  ///
  /// @b = global weak SomeType* null - Initializer is neither definitive nor
  /// unique.
  ///
  /// @c = global weak_odr SomeType* null - Initializer is definitive, but not
  /// unique.
  inline bool hasDefinitiveInitializer() const {
    return hasInitializer() &&
      // The initializer of a global variable may change to something arbitrary
      // at link time.
      !isInterposable() &&
      // The initializer of a global variable with the externally_initialized
      // marker may change at runtime before C++ initializers are evaluated.
      !isExternallyInitialized();
  }

  /// hasUniqueInitializer - Whether the global variable has an initializer, and
  /// any changes made to the initializer will turn up in the final executable.
  inline bool hasUniqueInitializer() const {
    return
        // We need to be sure this is the definition that will actually be used
        isStrongDefinitionForLinker() &&
        // It is not safe to modify initializers of global variables with the
        // external_initializer marker since the value may be changed at runtime
        // before C++ initializers are evaluated.
        !isExternallyInitialized();
  }

  /// getInitializer - Return the initializer for this global variable.  It is
  /// illegal to call this method if the global is external, because we cannot
  /// tell what the value is initialized to!
  ///
  inline const Constant *getInitializer() const {
    assert(hasInitializer() && "GV doesn't have initializer!");
    return static_cast<Constant*>(Op<0>().get());
  }
  inline Constant *getInitializer() {
    assert(hasInitializer() && "GV doesn't have initializer!");
    return static_cast<Constant*>(Op<0>().get());
  }
  /// setInitializer - Sets the initializer for this global variable, removing
  /// any existing initializer if InitVal==NULL.  If this GV has type T*, the
  /// initializer must have type T.
  void setInitializer(Constant *InitVal);

  /// If the value is a global constant, its value is immutable throughout the
  /// runtime execution of the program.  Assigning a value into the constant
  /// leads to undefined behavior.
  ///
  bool isConstant() const { return isConstantGlobal; }
  void setConstant(bool Val) { isConstantGlobal = Val; }

  bool isExternallyInitialized() const {
    return isExternallyInitializedConstant;
  }
  void setExternallyInitialized(bool Val) {
    isExternallyInitializedConstant = Val;
  }

  /// copyAttributesFrom - copy all additional attributes (those not needed to
  /// create a GlobalVariable) from the GlobalVariable Src to this one.
  void copyAttributesFrom(const GlobalVariable *Src);

  /// removeFromParent - This method unlinks 'this' from the containing module,
  /// but does not delete it.
  ///
  void removeFromParent();

  /// eraseFromParent - This method unlinks 'this' from the containing module
  /// and deletes it.
  ///
  void eraseFromParent();

  /// Drop all references in preparation to destroy the GlobalVariable. This
  /// drops not only the reference to the initializer but also to any metadata.
  void dropAllReferences();

  /// Attach a DIGlobalVariableExpression.
  void addDebugInfo(DIGlobalVariableExpression *GV);

  /// Fill the vector with all debug info attachements.
  void getDebugInfo(SmallVectorImpl<DIGlobalVariableExpression *> &GVs) const;

  /// Add attribute to this global.
  void addAttribute(Attribute::AttrKind Kind) {
    Attrs = Attrs.addAttribute(getContext(), Kind);
  }

  /// Add attribute to this global.
  void addAttribute(StringRef Kind, StringRef Val = StringRef()) {
    Attrs = Attrs.addAttribute(getContext(), Kind, Val);
  }

  /// Return true if the attribute exists.
  bool hasAttribute(Attribute::AttrKind Kind) const {
    return Attrs.hasAttribute(Kind);
  }

  /// Return true if the attribute exists.
  bool hasAttribute(StringRef Kind) const {
    return Attrs.hasAttribute(Kind);
  }

  /// Return true if any attributes exist.
  bool hasAttributes() const {
    return Attrs.hasAttributes();
  }

  /// Return the attribute object.
  Attribute getAttribute(Attribute::AttrKind Kind) const {
    return Attrs.getAttribute(Kind);
  }

  /// Return the attribute object.
  Attribute getAttribute(StringRef Kind) const {
    return Attrs.getAttribute(Kind);
  }

  /// Return the attribute set for this global
  AttributeSet getAttributes() const {
    return Attrs;
  }

  /// Return attribute set as list with index.
  /// FIXME: This may not be required once ValueEnumerators
  /// in bitcode-writer can enumerate attribute-set.
  AttributeList getAttributesAsList(unsigned index) const {
    if (!hasAttributes())
      return AttributeList();
    std::pair<unsigned, AttributeSet> AS[1] = {{index, Attrs}};
    return AttributeList::get(getContext(), AS);
  }

  /// Set attribute list for this global
  void setAttributes(AttributeSet A) {
    Attrs = A;
  }

  /// Check if section name is present
  bool hasImplicitSection() const {
    return getAttributes().hasAttribute("bss-section") ||
           getAttributes().hasAttribute("data-section") ||
           getAttributes().hasAttribute("relro-section") ||
           getAttributes().hasAttribute("rodata-section");
  }

  // Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Value *V) {
    return V->getValueID() == Value::GlobalVariableVal;
  }
};

template <>
struct OperandTraits<GlobalVariable> :
  public OptionalOperandTraits<GlobalVariable> {
};

DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GlobalVariable, Value)

} // end namespace llvm

#endif // LLVM_IR_GLOBALVARIABLE_H
PKjwFZ�0�

IR/AttributeMask.hnu�[���//===- llvm/AttributeMask.h - Mask for Attributes ---------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file
// This file declares the AttributeMask class.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_ATTRIBUTEMASK_H
#define LLVM_IR_ATTRIBUTEMASK_H

#include "llvm/ADT/SmallString.h"
#include "llvm/IR/Attributes.h"
#include <bitset>
#include <cassert>
#include <set>

namespace llvm {

//===----------------------------------------------------------------------===//
/// \class
/// This class stores enough information to efficiently remove some attributes
/// from an existing AttrBuilder, AttributeSet or AttributeList.
class AttributeMask {
  std::bitset<Attribute::EndAttrKinds> Attrs;
  std::set<SmallString<32>, std::less<>> TargetDepAttrs;

public:
  AttributeMask() = default;
  AttributeMask(const AttributeMask &) = delete;
  AttributeMask(AttributeMask &&) = default;

  AttributeMask(AttributeSet AS) {
    for (Attribute A : AS)
      addAttribute(A);
  }

  /// Add an attribute to the mask.
  AttributeMask &addAttribute(Attribute::AttrKind Val) {
    assert((unsigned)Val < Attribute::EndAttrKinds &&
           "Attribute out of range!");
    Attrs[Val] = true;
    return *this;
  }

  /// Add the Attribute object to the builder.
  AttributeMask &addAttribute(Attribute A) {
    if (A.isStringAttribute())
      addAttribute(A.getKindAsString());
    else
      addAttribute(A.getKindAsEnum());
    return *this;
  }

  /// Add the target-dependent attribute to the builder.
  AttributeMask &addAttribute(StringRef A) {
    TargetDepAttrs.insert(A);
    return *this;
  }

  /// Return true if the builder has the specified attribute.
  bool contains(Attribute::AttrKind A) const {
    assert((unsigned)A < Attribute::EndAttrKinds && "Attribute out of range!");
    return Attrs[A];
  }

  /// Return true if the builder has the specified target-dependent
  /// attribute.
  bool contains(StringRef A) const { return TargetDepAttrs.count(A); }

  /// Return true if the mask contains the specified attribute.
  bool contains(Attribute A) const {
    if (A.isStringAttribute())
      return contains(A.getKindAsString());
    return contains(A.getKindAsEnum());
  }
};

} // end namespace llvm

#endif // LLVM_IR_ATTRIBUTEMASK_H
PKjwFZ�}�`YYIR/IntrinsicsXCore.tdnu�[���//==- IntrinsicsXCore.td - XCore intrinsics                 -*- tablegen -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines all of the XCore-specific intrinsics.
//
//===----------------------------------------------------------------------===//

let TargetPrefix = "xcore" in {  // All intrinsics start with "llvm.xcore.".
  // Miscellaneous instructions.
  def int_xcore_bitrev : Intrinsic<[llvm_i32_ty],[llvm_i32_ty],[IntrNoMem]>,
                         ClangBuiltin<"__builtin_bitrev">;
  def int_xcore_crc8 : Intrinsic<[llvm_i32_ty, llvm_i32_ty],
                                 [llvm_i32_ty,llvm_i32_ty,llvm_i32_ty],
                                 [IntrNoMem]>;
  def int_xcore_crc32 : Intrinsic<[llvm_i32_ty],
                                  [llvm_i32_ty,llvm_i32_ty,llvm_i32_ty],
                                  [IntrNoMem]>;
  def int_xcore_sext : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
                                 [IntrNoMem]>;
  def int_xcore_zext : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
                                 [IntrNoMem]>;
  def int_xcore_getid : Intrinsic<[llvm_i32_ty],[],[IntrNoMem]>,
                        ClangBuiltin<"__builtin_getid">;
  def int_xcore_getps : Intrinsic<[llvm_i32_ty],[llvm_i32_ty]>,
                        ClangBuiltin<"__builtin_getps">;
  def int_xcore_setps : Intrinsic<[],[llvm_i32_ty, llvm_i32_ty]>,
                        ClangBuiltin<"__builtin_setps">;
  def int_xcore_geted : Intrinsic<[llvm_i32_ty],[]>;
  def int_xcore_getet : Intrinsic<[llvm_i32_ty],[]>;
  def int_xcore_setsr : Intrinsic<[],[llvm_i32_ty]>;
  def int_xcore_clrsr : Intrinsic<[],[llvm_i32_ty]>;

  // Resource instructions.
  def int_xcore_getr : Intrinsic<[llvm_anyptr_ty],[llvm_i32_ty]>;
  def int_xcore_freer : Intrinsic<[],[llvm_anyptr_ty],
                                   [NoCapture<ArgIndex<0>>]>;
  def int_xcore_in : Intrinsic<[llvm_i32_ty],[llvm_anyptr_ty],[NoCapture<ArgIndex<0>>]>;
  def int_xcore_int : Intrinsic<[llvm_i32_ty],[llvm_anyptr_ty],
                                [NoCapture<ArgIndex<0>>]>;
  def int_xcore_inct : Intrinsic<[llvm_i32_ty],[llvm_anyptr_ty],
                                 [NoCapture<ArgIndex<0>>]>;
  def int_xcore_out : Intrinsic<[],[llvm_anyptr_ty, llvm_i32_ty],
                                [NoCapture<ArgIndex<0>>]>;
  def int_xcore_outt : Intrinsic<[],[llvm_anyptr_ty, llvm_i32_ty],
                                 [NoCapture<ArgIndex<0>>]>;
  def int_xcore_outct : Intrinsic<[],[llvm_anyptr_ty, llvm_i32_ty],
                                  [NoCapture<ArgIndex<0>>]>;
  def int_xcore_chkct : Intrinsic<[],[llvm_anyptr_ty, llvm_i32_ty],
                                  [NoCapture<ArgIndex<0>>]>;
  def int_xcore_testct : Intrinsic<[llvm_i32_ty],[llvm_anyptr_ty],
                                   [NoCapture<ArgIndex<0>>]>;
  def int_xcore_testwct : Intrinsic<[llvm_i32_ty],[llvm_anyptr_ty],
                                    [NoCapture<ArgIndex<0>>]>;
  def int_xcore_setd : Intrinsic<[],[llvm_anyptr_ty, llvm_i32_ty],
                                  [NoCapture<ArgIndex<0>>]>;
  def int_xcore_setc : Intrinsic<[],[llvm_anyptr_ty, llvm_i32_ty],
                                  [NoCapture<ArgIndex<0>>]>;
  def int_xcore_inshr : Intrinsic<[llvm_i32_ty],[llvm_anyptr_ty, llvm_i32_ty],
                                  [NoCapture<ArgIndex<0>>]>;
  def int_xcore_outshr : Intrinsic<[llvm_i32_ty],[llvm_anyptr_ty, llvm_i32_ty],
                                  [NoCapture<ArgIndex<0>>]>;
  def int_xcore_setpt : Intrinsic<[],[llvm_anyptr_ty, llvm_i32_ty],
                                  [NoCapture<ArgIndex<0>>]>;
  def int_xcore_clrpt : Intrinsic<[],[llvm_anyptr_ty],
                                  [NoCapture<ArgIndex<0>>]>;
  def int_xcore_getts : Intrinsic<[llvm_i32_ty],[llvm_anyptr_ty],
                                  [NoCapture<ArgIndex<0>>]>;
  def int_xcore_syncr : Intrinsic<[],[llvm_anyptr_ty],
                                  [NoCapture<ArgIndex<0>>]>;
  def int_xcore_settw : Intrinsic<[],[llvm_anyptr_ty, llvm_i32_ty],
                                  [NoCapture<ArgIndex<0>>]>;
  def int_xcore_setv : Intrinsic<[],[llvm_anyptr_ty, llvm_ptr_ty],
                                 [NoCapture<ArgIndex<0>>]>;
  def int_xcore_setev : Intrinsic<[],[llvm_anyptr_ty, llvm_ptr_ty],
                                  [NoCapture<ArgIndex<0>>]>;
  def int_xcore_eeu : Intrinsic<[],[llvm_anyptr_ty], [NoCapture<ArgIndex<0>>]>;
  def int_xcore_edu : Intrinsic<[],[llvm_anyptr_ty], [NoCapture<ArgIndex<0>>]>;
  def int_xcore_setclk : Intrinsic<[],[llvm_anyptr_ty, llvm_anyptr_ty],
                                   [NoCapture<ArgIndex<0>>, NoCapture<ArgIndex<1>>]>;
  def int_xcore_setrdy : Intrinsic<[],[llvm_anyptr_ty, llvm_anyptr_ty],
                                   [NoCapture<ArgIndex<0>>, NoCapture<ArgIndex<1>>]>;
  def int_xcore_setpsc : Intrinsic<[],[llvm_anyptr_ty, llvm_i32_ty],
                                   [NoCapture<ArgIndex<0>>]>;
  def int_xcore_peek : Intrinsic<[llvm_i32_ty],[llvm_anyptr_ty],
                                 [NoCapture<ArgIndex<0>>]>;
  def int_xcore_endin : Intrinsic<[llvm_i32_ty],[llvm_anyptr_ty],
                                 [NoCapture<ArgIndex<0>>]>;

  // Intrinsics for events.
  def int_xcore_waitevent : Intrinsic<[llvm_ptr_ty],[], [IntrReadMem]>;

  // If any of the resources owned by the thread are ready this returns the
  // vector of one of the ready resources. If no resources owned by the thread
  // are ready then the operand passed to the intrinsic is returned.
  def int_xcore_checkevent : Intrinsic<[llvm_ptr_ty],[llvm_ptr_ty]>;

  def int_xcore_clre : Intrinsic<[],[],[]>;

  // Intrinsics for threads.
  def int_xcore_getst : Intrinsic <[llvm_anyptr_ty],[llvm_anyptr_ty],
                                   [NoCapture<ArgIndex<0>>]>;
  def int_xcore_msync : Intrinsic <[],[llvm_anyptr_ty], [NoCapture<ArgIndex<0>>]>;
  def int_xcore_ssync : Intrinsic <[],[]>;
  def int_xcore_mjoin : Intrinsic <[],[llvm_anyptr_ty], [NoCapture<ArgIndex<0>>]>;
  def int_xcore_initsp : Intrinsic <[],[llvm_anyptr_ty, llvm_ptr_ty],
                                    [NoCapture<ArgIndex<0>>]>;
  def int_xcore_initpc : Intrinsic <[],[llvm_anyptr_ty, llvm_ptr_ty],
                                    [NoCapture<ArgIndex<0>>]>;
  def int_xcore_initlr : Intrinsic <[],[llvm_anyptr_ty, llvm_ptr_ty],
                                    [NoCapture<ArgIndex<0>>]>;
  def int_xcore_initcp : Intrinsic <[],[llvm_anyptr_ty, llvm_ptr_ty],
                                    [NoCapture<ArgIndex<0>>]>;
  def int_xcore_initdp : Intrinsic <[],[llvm_anyptr_ty, llvm_ptr_ty],
                                    [NoCapture<ArgIndex<0>>]>;
}
PKjwFZ}�\IR/AutoUpgrade.hnu�[���//===- AutoUpgrade.h - AutoUpgrade Helpers ----------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
//  These functions are implemented by lib/IR/AutoUpgrade.cpp.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_AUTOUPGRADE_H
#define LLVM_IR_AUTOUPGRADE_H

#include "llvm/ADT/StringRef.h"
#include <vector>

namespace llvm {
  class AttrBuilder;
  class CallBase;
  class Constant;
  class Function;
  class Instruction;
  class GlobalVariable;
  class MDNode;
  class Module;
  class StringRef;
  class Type;
  class Value;

  template <typename T> class OperandBundleDefT;
  using OperandBundleDef = OperandBundleDefT<Value *>;

  /// This is a more granular function that simply checks an intrinsic function
  /// for upgrading, and returns true if it requires upgrading. It may return
  /// null in NewFn if the all calls to the original intrinsic function
  /// should be transformed to non-function-call instructions.
  bool UpgradeIntrinsicFunction(Function *F, Function *&NewFn);

  /// This is the complement to the above, replacing a specific call to an
  /// intrinsic function with a call to the specified new function.
  void UpgradeIntrinsicCall(CallBase *CB, Function *NewFn);

  // This upgrades the comment for objc retain release markers in inline asm
  // calls
  void UpgradeInlineAsmString(std::string *AsmStr);

  /// This is an auto-upgrade hook for any old intrinsic function syntaxes
  /// which need to have both the function updated as well as all calls updated
  /// to the new function. This should only be run in a post-processing fashion
  /// so that it can update all calls to the old function.
  void UpgradeCallsToIntrinsic(Function* F);

  /// This checks for global variables which should be upgraded. It it requires
  /// upgrading, returns a pointer to the upgraded variable.
  GlobalVariable *UpgradeGlobalVariable(GlobalVariable *GV);

  /// This checks for module flags which should be upgraded. It returns true if
  /// module is modified.
  bool UpgradeModuleFlags(Module &M);

  /// Convert calls to ARC runtime functions to intrinsic calls and upgrade the
  /// old retain release marker to new module flag format.
  void UpgradeARCRuntime(Module &M);

  void UpgradeSectionAttributes(Module &M);

  /// Correct any IR that is relying on old function attribute behavior.
  void UpgradeFunctionAttributes(Function &F);

  /// If the given TBAA tag uses the scalar TBAA format, create a new node
  /// corresponding to the upgrade to the struct-path aware TBAA format.
  /// Otherwise return the \p TBAANode itself.
  MDNode *UpgradeTBAANode(MDNode &TBAANode);

  /// This is an auto-upgrade for bitcast between pointers with different
  /// address spaces: the instruction is replaced by a pair ptrtoint+inttoptr.
  Instruction *UpgradeBitCastInst(unsigned Opc, Value *V, Type *DestTy,
                                  Instruction *&Temp);

  /// This is an auto-upgrade for bitcast constant expression between pointers
  /// with different address spaces: the instruction is replaced by a pair
  /// ptrtoint+inttoptr.
  Constant *UpgradeBitCastExpr(unsigned Opc, Constant *C, Type *DestTy);

  /// Check the debug info version number, if it is out-dated, drop the debug
  /// info. Return true if module is modified.
  bool UpgradeDebugInfo(Module &M);

  /// Check whether a string looks like an old loop attachment tag.
  inline bool mayBeOldLoopAttachmentTag(StringRef Name) {
    return Name.startswith("llvm.vectorizer.");
  }

  /// Upgrade the loop attachment metadata node.
  MDNode *upgradeInstructionLoopAttachment(MDNode &N);

  /// Upgrade the datalayout string by adding a section for address space
  /// pointers.
  std::string UpgradeDataLayoutString(StringRef DL, StringRef Triple);

  /// Upgrade attributes that changed format or kind.
  void UpgradeAttributes(AttrBuilder &B);

  /// Upgrade operand bundles (without knowing about their user instruction).
  void UpgradeOperandBundles(std::vector<OperandBundleDef> &OperandBundles);

} // End llvm namespace

#endif
PKjwFZ�a���IR/ConstrainedOps.defnu�[���//===- llvm/IR/ConstrainedOps.def - Constrained intrinsics ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Defines properties of constrained intrinsics, in particular corresponding
// floating point operations and DAG nodes.
//
//===----------------------------------------------------------------------===//

// DAG_FUNCTION defers to DAG_INSTRUCTION if its defined, otherwise FUNCTION.
#ifndef DAG_FUNCTION
#ifdef DAG_INSTRUCTION
#define DAG_FUNCTION(N,A,R,I,D) DAG_INSTRUCTION(N,A,R,I,D)
#else
#define DAG_FUNCTION(N,A,R,I,D) FUNCTION(N,A,R,I)
#endif
#endif

#ifndef INSTRUCTION
#define INSTRUCTION(N,A,R,I)
#endif

// DAG_INSTRUCTION is treated like an INSTRUCTION if the DAG node isn't used.
#ifndef DAG_INSTRUCTION
#define DAG_INSTRUCTION(N,A,R,I,D) INSTRUCTION(N,A,R,I)
#endif

// In most cases intrinsic function is handled similar to instruction.
#ifndef FUNCTION
#define FUNCTION(N,A,R,I) INSTRUCTION(N,A,R,I)
#endif

// Compare instruction have a DAG node so they are treated like DAG_INSTRUCTION.
#ifndef CMP_INSTRUCTION
#define CMP_INSTRUCTION(N,A,R,I,D) DAG_INSTRUCTION(N,A,R,I,D)
#endif

// Arguments of the entries are:
// - instruction or intrinsic function name.
// - Number of original instruction/intrinsic arguments.
// - 1 if the corresponding constrained intrinsic has rounding mode argument.
// - name of the constrained intrinsic to represent this instruction/function.
// - DAG node corresponding to the constrained intrinsic without prefix STRICT_.

// These are definitions for instructions, that are converted into constrained
// intrinsics.
//
DAG_INSTRUCTION(FAdd,         2, 1, experimental_constrained_fadd,       FADD)
DAG_INSTRUCTION(FSub,         2, 1, experimental_constrained_fsub,       FSUB)
DAG_INSTRUCTION(FMul,         2, 1, experimental_constrained_fmul,       FMUL)
DAG_INSTRUCTION(FDiv,         2, 1, experimental_constrained_fdiv,       FDIV)
DAG_INSTRUCTION(FRem,         2, 1, experimental_constrained_frem,       FREM)
DAG_INSTRUCTION(FPExt,        1, 0, experimental_constrained_fpext,      FP_EXTEND)
DAG_INSTRUCTION(SIToFP,       1, 1, experimental_constrained_sitofp,     SINT_TO_FP)
DAG_INSTRUCTION(UIToFP,       1, 1, experimental_constrained_uitofp,     UINT_TO_FP)
DAG_INSTRUCTION(FPToSI,       1, 0, experimental_constrained_fptosi,     FP_TO_SINT)
DAG_INSTRUCTION(FPToUI,       1, 0, experimental_constrained_fptoui,     FP_TO_UINT)
DAG_INSTRUCTION(FPTrunc,      1, 1, experimental_constrained_fptrunc,    FP_ROUND)

// These are definitions for compare instructions (signaling and quiet version).
// Both of these match to FCmp / SETCC.
CMP_INSTRUCTION(FCmp,         2, 0, experimental_constrained_fcmp,       FSETCC)
CMP_INSTRUCTION(FCmp,         2, 0, experimental_constrained_fcmps,      FSETCCS)

// Theses are definitions for intrinsic functions, that are converted into
// constrained intrinsics.
//
DAG_FUNCTION(ceil,            1, 0, experimental_constrained_ceil,       FCEIL)
DAG_FUNCTION(cos,             1, 1, experimental_constrained_cos,        FCOS)
DAG_FUNCTION(exp,             1, 1, experimental_constrained_exp,        FEXP)
DAG_FUNCTION(exp2,            1, 1, experimental_constrained_exp2,       FEXP2)
DAG_FUNCTION(floor,           1, 0, experimental_constrained_floor,      FFLOOR)
DAG_FUNCTION(fma,             3, 1, experimental_constrained_fma,        FMA)
DAG_FUNCTION(log,             1, 1, experimental_constrained_log,        FLOG)
DAG_FUNCTION(log10,           1, 1, experimental_constrained_log10,      FLOG10)
DAG_FUNCTION(log2,            1, 1, experimental_constrained_log2,       FLOG2)
DAG_FUNCTION(lrint,           1, 1, experimental_constrained_lrint,      LRINT)
DAG_FUNCTION(llrint,          1, 1, experimental_constrained_llrint,     LLRINT)
DAG_FUNCTION(lround,          1, 0, experimental_constrained_lround,     LROUND)
DAG_FUNCTION(llround,         1, 0, experimental_constrained_llround,    LLROUND)
DAG_FUNCTION(maxnum,          2, 0, experimental_constrained_maxnum,     FMAXNUM)
DAG_FUNCTION(minnum,          2, 0, experimental_constrained_minnum,     FMINNUM)
DAG_FUNCTION(maximum,         2, 0, experimental_constrained_maximum,    FMAXIMUM)
DAG_FUNCTION(minimum,         2, 0, experimental_constrained_minimum,    FMINIMUM)
DAG_FUNCTION(nearbyint,       1, 1, experimental_constrained_nearbyint,  FNEARBYINT)
DAG_FUNCTION(pow,             2, 1, experimental_constrained_pow,        FPOW)
DAG_FUNCTION(powi,            2, 1, experimental_constrained_powi,       FPOWI)
DAG_FUNCTION(ldexp,           2, 1, experimental_constrained_ldexp,      FLDEXP)
DAG_FUNCTION(rint,            1, 1, experimental_constrained_rint,       FRINT)
DAG_FUNCTION(round,           1, 0, experimental_constrained_round,      FROUND)
DAG_FUNCTION(roundeven,       1, 0, experimental_constrained_roundeven,  FROUNDEVEN)
DAG_FUNCTION(sin,             1, 1, experimental_constrained_sin,        FSIN)
DAG_FUNCTION(sqrt,            1, 1, experimental_constrained_sqrt,       FSQRT)
DAG_FUNCTION(trunc,           1, 0, experimental_constrained_trunc,      FTRUNC)

// This is definition for fmuladd intrinsic function, that is converted into
// constrained FMA or FMUL + FADD intrinsics.
FUNCTION(fmuladd,         3, 1, experimental_constrained_fmuladd)

#undef INSTRUCTION
#undef FUNCTION
#undef CMP_INSTRUCTION
#undef DAG_INSTRUCTION
#undef DAG_FUNCTION
PKjwFZN"���L�LIR/FixedPointBuilder.hnu�[���//===- llvm/FixedPointBuilder.h - Builder for fixed-point ops ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the FixedPointBuilder class, which is used as a convenient
// way to lower fixed-point arithmetic operations to LLVM IR.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_FIXEDPOINTBUILDER_H
#define LLVM_IR_FIXEDPOINTBUILDER_H

#include "llvm/ADT/APFixedPoint.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Value.h"

#include <cmath>

namespace llvm {

template <class IRBuilderTy> class FixedPointBuilder {
  IRBuilderTy &B;

  Value *Convert(Value *Src, const FixedPointSemantics &SrcSema,
                 const FixedPointSemantics &DstSema, bool DstIsInteger) {
    unsigned SrcWidth = SrcSema.getWidth();
    unsigned DstWidth = DstSema.getWidth();
    unsigned SrcScale = SrcSema.getScale();
    unsigned DstScale = DstSema.getScale();
    bool SrcIsSigned = SrcSema.isSigned();
    bool DstIsSigned = DstSema.isSigned();

    Type *DstIntTy = B.getIntNTy(DstWidth);

    Value *Result = Src;
    unsigned ResultWidth = SrcWidth;

    // Downscale.
    if (DstScale < SrcScale) {
      // When converting to integers, we round towards zero. For negative
      // numbers, right shifting rounds towards negative infinity. In this case,
      // we can just round up before shifting.
      if (DstIsInteger && SrcIsSigned) {
        Value *Zero = Constant::getNullValue(Result->getType());
        Value *IsNegative = B.CreateICmpSLT(Result, Zero);
        Value *LowBits = ConstantInt::get(
            B.getContext(), APInt::getLowBitsSet(ResultWidth, SrcScale));
        Value *Rounded = B.CreateAdd(Result, LowBits);
        Result = B.CreateSelect(IsNegative, Rounded, Result);
      }

      Result = SrcIsSigned
                   ? B.CreateAShr(Result, SrcScale - DstScale, "downscale")
                   : B.CreateLShr(Result, SrcScale - DstScale, "downscale");
    }

    if (!DstSema.isSaturated()) {
      // Resize.
      Result = B.CreateIntCast(Result, DstIntTy, SrcIsSigned, "resize");

      // Upscale.
      if (DstScale > SrcScale)
        Result = B.CreateShl(Result, DstScale - SrcScale, "upscale");
    } else {
      // Adjust the number of fractional bits.
      if (DstScale > SrcScale) {
        // Compare to DstWidth to prevent resizing twice.
        ResultWidth = std::max(SrcWidth + DstScale - SrcScale, DstWidth);
        Type *UpscaledTy = B.getIntNTy(ResultWidth);
        Result = B.CreateIntCast(Result, UpscaledTy, SrcIsSigned, "resize");
        Result = B.CreateShl(Result, DstScale - SrcScale, "upscale");
      }

      // Handle saturation.
      bool LessIntBits = DstSema.getIntegralBits() < SrcSema.getIntegralBits();
      if (LessIntBits) {
        Value *Max = ConstantInt::get(
            B.getContext(),
            APFixedPoint::getMax(DstSema).getValue().extOrTrunc(ResultWidth));
        Value *TooHigh = SrcIsSigned ? B.CreateICmpSGT(Result, Max)
                                     : B.CreateICmpUGT(Result, Max);
        Result = B.CreateSelect(TooHigh, Max, Result, "satmax");
      }
      // Cannot overflow min to dest type if src is unsigned since all fixed
      // point types can cover the unsigned min of 0.
      if (SrcIsSigned && (LessIntBits || !DstIsSigned)) {
        Value *Min = ConstantInt::get(
            B.getContext(),
            APFixedPoint::getMin(DstSema).getValue().extOrTrunc(ResultWidth));
        Value *TooLow = B.CreateICmpSLT(Result, Min);
        Result = B.CreateSelect(TooLow, Min, Result, "satmin");
      }

      // Resize the integer part to get the final destination size.
      if (ResultWidth != DstWidth)
        Result = B.CreateIntCast(Result, DstIntTy, SrcIsSigned, "resize");
    }
    return Result;
  }

  /// Get the common semantic for two semantics, with the added imposition that
  /// saturated padded types retain the padding bit.
  FixedPointSemantics
  getCommonBinopSemantic(const FixedPointSemantics &LHSSema,
                         const FixedPointSemantics &RHSSema) {
    auto C = LHSSema.getCommonSemantics(RHSSema);
    bool BothPadded =
        LHSSema.hasUnsignedPadding() && RHSSema.hasUnsignedPadding();
    return FixedPointSemantics(
        C.getWidth() + (unsigned)(BothPadded && C.isSaturated()), C.getScale(),
        C.isSigned(), C.isSaturated(), BothPadded);
  }

  /// Given a floating point type and a fixed-point semantic, return a floating
  /// point type which can accommodate the fixed-point semantic. This is either
  /// \p Ty, or a floating point type with a larger exponent than Ty.
  Type *getAccommodatingFloatType(Type *Ty, const FixedPointSemantics &Sema) {
    const fltSemantics *FloatSema = &Ty->getFltSemantics();
    while (!Sema.fitsInFloatSemantics(*FloatSema))
      FloatSema = APFixedPoint::promoteFloatSemantics(FloatSema);
    return Type::getFloatingPointTy(Ty->getContext(), *FloatSema);
  }

public:
  FixedPointBuilder(IRBuilderTy &Builder) : B(Builder) {}

  /// Convert an integer value representing a fixed-point number from one
  /// fixed-point semantic to another fixed-point semantic.
  /// \p Src     - The source value
  /// \p SrcSema - The fixed-point semantic of the source value
  /// \p DstSema - The resulting fixed-point semantic
  Value *CreateFixedToFixed(Value *Src, const FixedPointSemantics &SrcSema,
                            const FixedPointSemantics &DstSema) {
    return Convert(Src, SrcSema, DstSema, false);
  }

  /// Convert an integer value representing a fixed-point number to an integer
  /// with the given bit width and signedness.
  /// \p Src         - The source value
  /// \p SrcSema     - The fixed-point semantic of the source value
  /// \p DstWidth    - The bit width of the result value
  /// \p DstIsSigned - The signedness of the result value
  Value *CreateFixedToInteger(Value *Src, const FixedPointSemantics &SrcSema,
                              unsigned DstWidth, bool DstIsSigned) {
    return Convert(
        Src, SrcSema,
        FixedPointSemantics::GetIntegerSemantics(DstWidth, DstIsSigned), true);
  }

  /// Convert an integer value with the given signedness to an integer value
  /// representing the given fixed-point semantic.
  /// \p Src         - The source value
  /// \p SrcIsSigned - The signedness of the source value
  /// \p DstSema     - The resulting fixed-point semantic
  Value *CreateIntegerToFixed(Value *Src, unsigned SrcIsSigned,
                              const FixedPointSemantics &DstSema) {
    return Convert(Src,
                   FixedPointSemantics::GetIntegerSemantics(
                       Src->getType()->getScalarSizeInBits(), SrcIsSigned),
                   DstSema, false);
  }

  Value *CreateFixedToFloating(Value *Src, const FixedPointSemantics &SrcSema,
                               Type *DstTy) {
    Value *Result;
    Type *OpTy = getAccommodatingFloatType(DstTy, SrcSema);
    // Convert the raw fixed-point value directly to floating point. If the
    // value is too large to fit, it will be rounded, not truncated.
    Result = SrcSema.isSigned() ? B.CreateSIToFP(Src, OpTy)
                                : B.CreateUIToFP(Src, OpTy);
    // Rescale the integral-in-floating point by the scaling factor. This is
    // lossless, except for overflow to infinity which is unlikely.
    Result = B.CreateFMul(Result,
        ConstantFP::get(OpTy, std::pow(2, -(int)SrcSema.getScale())));
    if (OpTy != DstTy)
      Result = B.CreateFPTrunc(Result, DstTy);
    return Result;
  }

  Value *CreateFloatingToFixed(Value *Src, const FixedPointSemantics &DstSema) {
    bool UseSigned = DstSema.isSigned() || DstSema.hasUnsignedPadding();
    Value *Result = Src;
    Type *OpTy = getAccommodatingFloatType(Src->getType(), DstSema);
    if (OpTy != Src->getType())
      Result = B.CreateFPExt(Result, OpTy);
    // Rescale the floating point value so that its significant bits (for the
    // purposes of the conversion) are in the integral range.
    Result = B.CreateFMul(Result,
        ConstantFP::get(OpTy, std::pow(2, DstSema.getScale())));

    Type *ResultTy = B.getIntNTy(DstSema.getWidth());
    if (DstSema.isSaturated()) {
      Intrinsic::ID IID =
          UseSigned ? Intrinsic::fptosi_sat : Intrinsic::fptoui_sat;
      Result = B.CreateIntrinsic(IID, {ResultTy, OpTy}, {Result});
    } else {
      Result = UseSigned ? B.CreateFPToSI(Result, ResultTy)
                         : B.CreateFPToUI(Result, ResultTy);
    }

    // When saturating unsigned-with-padding using signed operations, we may
    // get negative values. Emit an extra clamp to zero.
    if (DstSema.isSaturated() && DstSema.hasUnsignedPadding()) {
      Constant *Zero = Constant::getNullValue(Result->getType());
      Result =
          B.CreateSelect(B.CreateICmpSLT(Result, Zero), Zero, Result, "satmin");
    }

    return Result;
  }

  /// Add two fixed-point values and return the result in their common semantic.
  /// \p LHS     - The left hand side
  /// \p LHSSema - The semantic of the left hand side
  /// \p RHS     - The right hand side
  /// \p RHSSema - The semantic of the right hand side
  Value *CreateAdd(Value *LHS, const FixedPointSemantics &LHSSema,
                   Value *RHS, const FixedPointSemantics &RHSSema) {
    auto CommonSema = getCommonBinopSemantic(LHSSema, RHSSema);
    bool UseSigned = CommonSema.isSigned() || CommonSema.hasUnsignedPadding();

    Value *WideLHS = CreateFixedToFixed(LHS, LHSSema, CommonSema);
    Value *WideRHS = CreateFixedToFixed(RHS, RHSSema, CommonSema);

    Value *Result;
    if (CommonSema.isSaturated()) {
      Intrinsic::ID IID = UseSigned ? Intrinsic::sadd_sat : Intrinsic::uadd_sat;
      Result = B.CreateBinaryIntrinsic(IID, WideLHS, WideRHS);
    } else {
      Result = B.CreateAdd(WideLHS, WideRHS);
    }

    return CreateFixedToFixed(Result, CommonSema,
                              LHSSema.getCommonSemantics(RHSSema));
  }

  /// Subtract two fixed-point values and return the result in their common
  /// semantic.
  /// \p LHS     - The left hand side
  /// \p LHSSema - The semantic of the left hand side
  /// \p RHS     - The right hand side
  /// \p RHSSema - The semantic of the right hand side
  Value *CreateSub(Value *LHS, const FixedPointSemantics &LHSSema,
                   Value *RHS, const FixedPointSemantics &RHSSema) {
    auto CommonSema = getCommonBinopSemantic(LHSSema, RHSSema);
    bool UseSigned = CommonSema.isSigned() || CommonSema.hasUnsignedPadding();

    Value *WideLHS = CreateFixedToFixed(LHS, LHSSema, CommonSema);
    Value *WideRHS = CreateFixedToFixed(RHS, RHSSema, CommonSema);

    Value *Result;
    if (CommonSema.isSaturated()) {
      Intrinsic::ID IID = UseSigned ? Intrinsic::ssub_sat : Intrinsic::usub_sat;
      Result = B.CreateBinaryIntrinsic(IID, WideLHS, WideRHS);
    } else {
      Result = B.CreateSub(WideLHS, WideRHS);
    }

    // Subtraction can end up below 0 for padded unsigned operations, so emit
    // an extra clamp in that case.
    if (CommonSema.isSaturated() && CommonSema.hasUnsignedPadding()) {
      Constant *Zero = Constant::getNullValue(Result->getType());
      Result =
          B.CreateSelect(B.CreateICmpSLT(Result, Zero), Zero, Result, "satmin");
    }

    return CreateFixedToFixed(Result, CommonSema,
                              LHSSema.getCommonSemantics(RHSSema));
  }

  /// Multiply two fixed-point values and return the result in their common
  /// semantic.
  /// \p LHS     - The left hand side
  /// \p LHSSema - The semantic of the left hand side
  /// \p RHS     - The right hand side
  /// \p RHSSema - The semantic of the right hand side
  Value *CreateMul(Value *LHS, const FixedPointSemantics &LHSSema,
                   Value *RHS, const FixedPointSemantics &RHSSema) {
    auto CommonSema = getCommonBinopSemantic(LHSSema, RHSSema);
    bool UseSigned = CommonSema.isSigned() || CommonSema.hasUnsignedPadding();

    Value *WideLHS = CreateFixedToFixed(LHS, LHSSema, CommonSema);
    Value *WideRHS = CreateFixedToFixed(RHS, RHSSema, CommonSema);

    Intrinsic::ID IID;
    if (CommonSema.isSaturated()) {
      IID = UseSigned ? Intrinsic::smul_fix_sat : Intrinsic::umul_fix_sat;
    } else {
      IID = UseSigned ? Intrinsic::smul_fix : Intrinsic::umul_fix;
    }
    Value *Result = B.CreateIntrinsic(
        IID, {WideLHS->getType()},
        {WideLHS, WideRHS, B.getInt32(CommonSema.getScale())});

    return CreateFixedToFixed(Result, CommonSema,
                              LHSSema.getCommonSemantics(RHSSema));
  }

  /// Divide two fixed-point values and return the result in their common
  /// semantic.
  /// \p LHS     - The left hand side
  /// \p LHSSema - The semantic of the left hand side
  /// \p RHS     - The right hand side
  /// \p RHSSema - The semantic of the right hand side
  Value *CreateDiv(Value *LHS, const FixedPointSemantics &LHSSema,
                   Value *RHS, const FixedPointSemantics &RHSSema) {
    auto CommonSema = getCommonBinopSemantic(LHSSema, RHSSema);
    bool UseSigned = CommonSema.isSigned() || CommonSema.hasUnsignedPadding();

    Value *WideLHS = CreateFixedToFixed(LHS, LHSSema, CommonSema);
    Value *WideRHS = CreateFixedToFixed(RHS, RHSSema, CommonSema);

    Intrinsic::ID IID;
    if (CommonSema.isSaturated()) {
      IID = UseSigned ? Intrinsic::sdiv_fix_sat : Intrinsic::udiv_fix_sat;
    } else {
      IID = UseSigned ? Intrinsic::sdiv_fix : Intrinsic::udiv_fix;
    }
    Value *Result = B.CreateIntrinsic(
        IID, {WideLHS->getType()},
        {WideLHS, WideRHS, B.getInt32(CommonSema.getScale())});

    return CreateFixedToFixed(Result, CommonSema,
                              LHSSema.getCommonSemantics(RHSSema));
  }

  /// Left shift a fixed-point value by an unsigned integer value. The integer
  /// value can be any bit width.
  /// \p LHS     - The left hand side
  /// \p LHSSema - The semantic of the left hand side
  /// \p RHS     - The right hand side
  Value *CreateShl(Value *LHS, const FixedPointSemantics &LHSSema, Value *RHS) {
    bool UseSigned = LHSSema.isSigned() || LHSSema.hasUnsignedPadding();

    RHS = B.CreateIntCast(RHS, LHS->getType(), /*IsSigned=*/false);

    Value *Result;
    if (LHSSema.isSaturated()) {
      Intrinsic::ID IID = UseSigned ? Intrinsic::sshl_sat : Intrinsic::ushl_sat;
      Result = B.CreateBinaryIntrinsic(IID, LHS, RHS);
    } else {
      Result = B.CreateShl(LHS, RHS);
    }

    return Result;
  }

  /// Right shift a fixed-point value by an unsigned integer value. The integer
  /// value can be any bit width.
  /// \p LHS     - The left hand side
  /// \p LHSSema - The semantic of the left hand side
  /// \p RHS     - The right hand side
  Value *CreateShr(Value *LHS, const FixedPointSemantics &LHSSema, Value *RHS) {
    RHS = B.CreateIntCast(RHS, LHS->getType(), false);

    return LHSSema.isSigned() ? B.CreateAShr(LHS, RHS) : B.CreateLShr(LHS, RHS);
  }

  /// Compare two fixed-point values for equality.
  /// \p LHS     - The left hand side
  /// \p LHSSema - The semantic of the left hand side
  /// \p RHS     - The right hand side
  /// \p RHSSema - The semantic of the right hand side
  Value *CreateEQ(Value *LHS, const FixedPointSemantics &LHSSema,
                  Value *RHS, const FixedPointSemantics &RHSSema) {
    auto CommonSema = getCommonBinopSemantic(LHSSema, RHSSema);

    Value *WideLHS = CreateFixedToFixed(LHS, LHSSema, CommonSema);
    Value *WideRHS = CreateFixedToFixed(RHS, RHSSema, CommonSema);

    return B.CreateICmpEQ(WideLHS, WideRHS);
  }

  /// Compare two fixed-point values for inequality.
  /// \p LHS     - The left hand side
  /// \p LHSSema - The semantic of the left hand side
  /// \p RHS     - The right hand side
  /// \p RHSSema - The semantic of the right hand side
  Value *CreateNE(Value *LHS, const FixedPointSemantics &LHSSema,
                  Value *RHS, const FixedPointSemantics &RHSSema) {
    auto CommonSema = getCommonBinopSemantic(LHSSema, RHSSema);

    Value *WideLHS = CreateFixedToFixed(LHS, LHSSema, CommonSema);
    Value *WideRHS = CreateFixedToFixed(RHS, RHSSema, CommonSema);

    return B.CreateICmpNE(WideLHS, WideRHS);
  }

  /// Compare two fixed-point values as LHS < RHS.
  /// \p LHS     - The left hand side
  /// \p LHSSema - The semantic of the left hand side
  /// \p RHS     - The right hand side
  /// \p RHSSema - The semantic of the right hand side
  Value *CreateLT(Value *LHS, const FixedPointSemantics &LHSSema,
                  Value *RHS, const FixedPointSemantics &RHSSema) {
    auto CommonSema = getCommonBinopSemantic(LHSSema, RHSSema);

    Value *WideLHS = CreateFixedToFixed(LHS, LHSSema, CommonSema);
    Value *WideRHS = CreateFixedToFixed(RHS, RHSSema, CommonSema);

    return CommonSema.isSigned() ? B.CreateICmpSLT(WideLHS, WideRHS)
                                 : B.CreateICmpULT(WideLHS, WideRHS);
  }

  /// Compare two fixed-point values as LHS <= RHS.
  /// \p LHS     - The left hand side
  /// \p LHSSema - The semantic of the left hand side
  /// \p RHS     - The right hand side
  /// \p RHSSema - The semantic of the right hand side
  Value *CreateLE(Value *LHS, const FixedPointSemantics &LHSSema,
                  Value *RHS, const FixedPointSemantics &RHSSema) {
    auto CommonSema = getCommonBinopSemantic(LHSSema, RHSSema);

    Value *WideLHS = CreateFixedToFixed(LHS, LHSSema, CommonSema);
    Value *WideRHS = CreateFixedToFixed(RHS, RHSSema, CommonSema);

    return CommonSema.isSigned() ? B.CreateICmpSLE(WideLHS, WideRHS)
                                 : B.CreateICmpULE(WideLHS, WideRHS);
  }

  /// Compare two fixed-point values as LHS > RHS.
  /// \p LHS     - The left hand side
  /// \p LHSSema - The semantic of the left hand side
  /// \p RHS     - The right hand side
  /// \p RHSSema - The semantic of the right hand side
  Value *CreateGT(Value *LHS, const FixedPointSemantics &LHSSema,
                  Value *RHS, const FixedPointSemantics &RHSSema) {
    auto CommonSema = getCommonBinopSemantic(LHSSema, RHSSema);

    Value *WideLHS = CreateFixedToFixed(LHS, LHSSema, CommonSema);
    Value *WideRHS = CreateFixedToFixed(RHS, RHSSema, CommonSema);

    return CommonSema.isSigned() ? B.CreateICmpSGT(WideLHS, WideRHS)
                                 : B.CreateICmpUGT(WideLHS, WideRHS);
  }

  /// Compare two fixed-point values as LHS >= RHS.
  /// \p LHS     - The left hand side
  /// \p LHSSema - The semantic of the left hand side
  /// \p RHS     - The right hand side
  /// \p RHSSema - The semantic of the right hand side
  Value *CreateGE(Value *LHS, const FixedPointSemantics &LHSSema,
                  Value *RHS, const FixedPointSemantics &RHSSema) {
    auto CommonSema = getCommonBinopSemantic(LHSSema, RHSSema);

    Value *WideLHS = CreateFixedToFixed(LHS, LHSSema, CommonSema);
    Value *WideRHS = CreateFixedToFixed(RHS, RHSSema, CommonSema);

    return CommonSema.isSigned() ? B.CreateICmpSGE(WideLHS, WideRHS)
                                 : B.CreateICmpUGE(WideLHS, WideRHS);
  }
};

} // end namespace llvm

#endif // LLVM_IR_FIXEDPOINTBUILDER_H
PKjwFZ��F.�"�"IR/Statepoint.hnu�[���//===- llvm/IR/Statepoint.h - gc.statepoint utilities -----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains utility functions and a wrapper class analogous to
// CallBase for accessing the fields of gc.statepoint, gc.relocate,
// gc.result intrinsics; and some general utilities helpful when dealing with
// gc.statepoint.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_STATEPOINT_H
#define LLVM_IR_STATEPOINT_H

#include "llvm/ADT/iterator_range.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/MathExtras.h"
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <optional>
#include <vector>

namespace llvm {

/// The statepoint intrinsic accepts a set of flags as its third argument.
/// Valid values come out of this set.
enum class StatepointFlags {
  None = 0,
  GCTransition = 1, ///< Indicates that this statepoint is a transition from
                    ///< GC-aware code to code that is not GC-aware.
  /// Mark the deopt arguments associated with the statepoint as only being
  /// "live-in". By default, deopt arguments are "live-through".  "live-through"
  /// requires that they the value be live on entry, on exit, and at any point
  /// during the call.  "live-in" only requires the value be available at the
  /// start of the call.  In particular, "live-in" values can be placed in
  /// unused argument registers or other non-callee saved registers.
  DeoptLiveIn = 2,

  MaskAll = 3 ///< A bitmask that includes all valid flags.
};

// These two are defined in IntrinsicInst since they're part of the
// IntrinsicInst class hierarchy.
class GCRelocateInst;

/// Represents a gc.statepoint intrinsic call.  This extends directly from
/// CallBase as the IntrinsicInst only supports calls and gc.statepoint is
/// invokable.
class GCStatepointInst : public CallBase {
public:
  GCStatepointInst() = delete;
  GCStatepointInst(const GCStatepointInst &) = delete;
  GCStatepointInst &operator=(const GCStatepointInst &) = delete;

  static bool classof(const CallBase *I) {
    if (const Function *CF = I->getCalledFunction())
      return CF->getIntrinsicID() == Intrinsic::experimental_gc_statepoint;
    return false;
  }

  static bool classof(const Value *V) {
    return isa<CallBase>(V) && classof(cast<CallBase>(V));
  }

  enum {
    IDPos = 0,
    NumPatchBytesPos = 1,
    CalledFunctionPos = 2,
    NumCallArgsPos = 3,
    FlagsPos = 4,
    CallArgsBeginPos = 5,
  };

  /// Return the ID associated with this statepoint.
  uint64_t getID() const {
    return cast<ConstantInt>(getArgOperand(IDPos))->getZExtValue();
  }

  /// Return the number of patchable bytes associated with this statepoint.
  uint32_t getNumPatchBytes() const {
    const Value *NumPatchBytesVal = getArgOperand(NumPatchBytesPos);
    uint64_t NumPatchBytes =
      cast<ConstantInt>(NumPatchBytesVal)->getZExtValue();
    assert(isInt<32>(NumPatchBytes) && "should fit in 32 bits!");
    return NumPatchBytes;
  }

  /// Number of arguments to be passed to the actual callee.
  int getNumCallArgs() const {
    return cast<ConstantInt>(getArgOperand(NumCallArgsPos))->getZExtValue();
  }

  uint64_t getFlags() const {
    return cast<ConstantInt>(getArgOperand(FlagsPos))->getZExtValue();
  }

  /// Return the value actually being called or invoked.
  Value *getActualCalledOperand() const {
    return getArgOperand(CalledFunctionPos);
  }

  /// Returns the function called if this is a wrapping a direct call, and null
  /// otherwise.
  Function *getActualCalledFunction() const {
    return dyn_cast_or_null<Function>(getActualCalledOperand());
  }

  /// Return the type of the value returned by the call underlying the
  /// statepoint.
  Type *getActualReturnType() const {
    auto *FT = cast<FunctionType>(getParamElementType(CalledFunctionPos));
    return FT->getReturnType();
  }


  /// Return the number of arguments to the underlying call.
  size_t actual_arg_size() const { return getNumCallArgs(); }
  /// Return an iterator to the begining of the arguments to the underlying call
  const_op_iterator actual_arg_begin() const {
    assert(CallArgsBeginPos <= (int)arg_size());
    return arg_begin() + CallArgsBeginPos;
  }
  /// Return an end iterator of the arguments to the underlying call
  const_op_iterator actual_arg_end() const {
    auto I = actual_arg_begin() + actual_arg_size();
    assert((arg_end() - I) == 2);
    return I;
  }
  /// range adapter for actual call arguments
  iterator_range<const_op_iterator> actual_args() const {
    return make_range(actual_arg_begin(), actual_arg_end());
  }

  const_op_iterator gc_transition_args_begin() const {
    if (auto Opt = getOperandBundle(LLVMContext::OB_gc_transition))
      return Opt->Inputs.begin();
    return arg_end();
  }
  const_op_iterator gc_transition_args_end() const {
    if (auto Opt = getOperandBundle(LLVMContext::OB_gc_transition))
      return Opt->Inputs.end();
    return arg_end();
  }

  /// range adapter for GC transition arguments
  iterator_range<const_op_iterator> gc_transition_args() const {
    return make_range(gc_transition_args_begin(), gc_transition_args_end());
  }

  const_op_iterator deopt_begin() const {
    if (auto Opt = getOperandBundle(LLVMContext::OB_deopt))
      return Opt->Inputs.begin();
    return arg_end();
  }
  const_op_iterator deopt_end() const {
    if (auto Opt = getOperandBundle(LLVMContext::OB_deopt))
      return Opt->Inputs.end();
    return arg_end();
  }

  /// range adapter for vm state arguments
  iterator_range<const_op_iterator> deopt_operands() const {
    return make_range(deopt_begin(), deopt_end());
  }

  /// Returns an iterator to the begining of the argument range describing gc
  /// values for the statepoint.
  const_op_iterator gc_args_begin() const {
    if (auto Opt = getOperandBundle(LLVMContext::OB_gc_live))
      return Opt->Inputs.begin();
    return arg_end();
  }

  /// Return an end iterator for the gc argument range
  const_op_iterator gc_args_end() const {
    if (auto Opt = getOperandBundle(LLVMContext::OB_gc_live))
      return Opt->Inputs.end();
    return arg_end();
  }

  /// range adapter for gc arguments
  iterator_range<const_op_iterator> gc_args() const {
    return make_range(gc_args_begin(), gc_args_end());
  }


  /// Get list of all gc reloactes linked to this statepoint
  /// May contain several relocations for the same base/derived pair.
  /// For example this could happen due to relocations on unwinding
  /// path of invoke.
  inline std::vector<const GCRelocateInst *> getGCRelocates() const;
};

std::vector<const GCRelocateInst *> GCStatepointInst::getGCRelocates() const {
  std::vector<const GCRelocateInst *> Result;

  // Search for relocated pointers.  Note that working backwards from the
  // gc_relocates ensures that we only get pairs which are actually relocated
  // and used after the statepoint.
  for (const User *U : users())
    if (auto *Relocate = dyn_cast<GCRelocateInst>(U))
      Result.push_back(Relocate);

  auto *StatepointInvoke = dyn_cast<InvokeInst>(this);
  if (!StatepointInvoke)
    return Result;

  // We need to scan thorough exceptional relocations if it is invoke statepoint
  LandingPadInst *LandingPad = StatepointInvoke->getLandingPadInst();

  // Search for gc relocates that are attached to this landingpad.
  for (const User *LandingPadUser : LandingPad->users()) {
    if (auto *Relocate = dyn_cast<GCRelocateInst>(LandingPadUser))
      Result.push_back(Relocate);
  }
  return Result;
}

/// Call sites that get wrapped by a gc.statepoint (currently only in
/// RewriteStatepointsForGC and potentially in other passes in the future) can
/// have attributes that describe properties of gc.statepoint call they will be
/// eventually be wrapped in.  This struct is used represent such directives.
struct StatepointDirectives {
  std::optional<uint32_t> NumPatchBytes;
  std::optional<uint64_t> StatepointID;

  static const uint64_t DefaultStatepointID = 0xABCDEF00;
  static const uint64_t DeoptBundleStatepointID = 0xABCDEF0F;
};

/// Parse out statepoint directives from the function attributes present in \p
/// AS.
StatepointDirectives parseStatepointDirectivesFromAttrs(AttributeList AS);

/// Return \c true if the \p Attr is an attribute that is a statepoint
/// directive.
bool isStatepointDirectiveAttr(Attribute Attr);

} // end namespace llvm

#endif // LLVM_IR_STATEPOINT_H
PKjwFZ�?pA��IR/IntrinsicsMips.tdnu�[���//===- IntrinsicsMips.td - Defines Mips intrinsics ---------*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines all of the MIPS-specific intrinsics.
//
//===----------------------------------------------------------------------===//

//===----------------------------------------------------------------------===//
// MIPS DSP data types
def mips_v2q15_ty: LLVMType<v2i16>;
def mips_v4q7_ty: LLVMType<v4i8>;
def mips_q31_ty: LLVMType<i32>;

let TargetPrefix = "mips" in {  // All intrinsics start with "llvm.mips.".

//===----------------------------------------------------------------------===//
// MIPS DSP Rev 1

//===----------------------------------------------------------------------===//
// Addition/subtraction

def int_mips_addu_qb : ClangBuiltin<"__builtin_mips_addu_qb">,
  Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_v4i8_ty],
            [Commutative, IntrNoMem]>;
def int_mips_addu_s_qb : ClangBuiltin<"__builtin_mips_addu_s_qb">,
  Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_v4i8_ty],
            [Commutative, IntrNoMem]>;
def int_mips_subu_qb : ClangBuiltin<"__builtin_mips_subu_qb">,
  Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_v4i8_ty], [IntrNoMem]>;
def int_mips_subu_s_qb : ClangBuiltin<"__builtin_mips_subu_s_qb">,
  Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_v4i8_ty], [IntrNoMem]>;

def int_mips_addq_ph : ClangBuiltin<"__builtin_mips_addq_ph">,
  Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty],
            [Commutative, IntrNoMem]>;
def int_mips_addq_s_ph : ClangBuiltin<"__builtin_mips_addq_s_ph">,
  Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty],
            [Commutative, IntrNoMem]>;
def int_mips_subq_ph : ClangBuiltin<"__builtin_mips_subq_ph">,
  Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty], [IntrNoMem]>;
def int_mips_subq_s_ph : ClangBuiltin<"__builtin_mips_subq_s_ph">,
  Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty], [IntrNoMem]>;

def int_mips_madd: ClangBuiltin<"__builtin_mips_madd">,
  Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty],
            [IntrNoMem, Commutative]>;
def int_mips_maddu: ClangBuiltin<"__builtin_mips_maddu">,
  Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty],
            [IntrNoMem, Commutative]>;

def int_mips_msub: ClangBuiltin<"__builtin_mips_msub">,
  Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty],
            [IntrNoMem]>;
def int_mips_msubu: ClangBuiltin<"__builtin_mips_msubu">,
  Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty],
            [IntrNoMem]>;

def int_mips_addq_s_w: ClangBuiltin<"__builtin_mips_addq_s_w">,
  Intrinsic<[mips_q31_ty], [mips_q31_ty, mips_q31_ty], [Commutative]>;
def int_mips_subq_s_w: ClangBuiltin<"__builtin_mips_subq_s_w">,
  Intrinsic<[mips_q31_ty], [mips_q31_ty, mips_q31_ty], []>;

def int_mips_addsc: ClangBuiltin<"__builtin_mips_addsc">,
  Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [Commutative]>;
def int_mips_addwc: ClangBuiltin<"__builtin_mips_addwc">,
  Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [Commutative]>;

def int_mips_modsub: ClangBuiltin<"__builtin_mips_modsub">,
  Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;

def int_mips_raddu_w_qb: ClangBuiltin<"__builtin_mips_raddu_w_qb">,
  Intrinsic<[llvm_i32_ty], [llvm_v4i8_ty], [IntrNoMem]>;

//===----------------------------------------------------------------------===//
// Absolute value

def int_mips_absq_s_ph: ClangBuiltin<"__builtin_mips_absq_s_ph">,
  Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty], []>;
def int_mips_absq_s_w: ClangBuiltin<"__builtin_mips_absq_s_w">,
  Intrinsic<[mips_q31_ty], [mips_q31_ty], []>;

//===----------------------------------------------------------------------===//
// Precision reduce/expand

def int_mips_precrq_qb_ph: ClangBuiltin<"__builtin_mips_precrq_qb_ph">,
  Intrinsic<[llvm_v4i8_ty], [mips_v2q15_ty, mips_v2q15_ty], [IntrNoMem]>;
def int_mips_precrqu_s_qb_ph: ClangBuiltin<"__builtin_mips_precrqu_s_qb_ph">,
  Intrinsic<[llvm_v4i8_ty], [mips_v2q15_ty, mips_v2q15_ty], []>;
def int_mips_precrq_ph_w: ClangBuiltin<"__builtin_mips_precrq_ph_w">,
  Intrinsic<[mips_v2q15_ty], [mips_q31_ty, mips_q31_ty], [IntrNoMem]>;
def int_mips_precrq_rs_ph_w: ClangBuiltin<"__builtin_mips_precrq_rs_ph_w">,
  Intrinsic<[mips_v2q15_ty], [mips_q31_ty, mips_q31_ty], []>;
def int_mips_preceq_w_phl: ClangBuiltin<"__builtin_mips_preceq_w_phl">,
  Intrinsic<[mips_q31_ty], [mips_v2q15_ty], [IntrNoMem]>;
def int_mips_preceq_w_phr: ClangBuiltin<"__builtin_mips_preceq_w_phr">,
  Intrinsic<[mips_q31_ty], [mips_v2q15_ty], [IntrNoMem]>;
def int_mips_precequ_ph_qbl: ClangBuiltin<"__builtin_mips_precequ_ph_qbl">,
  Intrinsic<[mips_v2q15_ty], [llvm_v4i8_ty], [IntrNoMem]>;
def int_mips_precequ_ph_qbr: ClangBuiltin<"__builtin_mips_precequ_ph_qbr">,
  Intrinsic<[mips_v2q15_ty], [llvm_v4i8_ty], [IntrNoMem]>;
def int_mips_precequ_ph_qbla: ClangBuiltin<"__builtin_mips_precequ_ph_qbla">,
  Intrinsic<[mips_v2q15_ty], [llvm_v4i8_ty], [IntrNoMem]>;
def int_mips_precequ_ph_qbra: ClangBuiltin<"__builtin_mips_precequ_ph_qbra">,
  Intrinsic<[mips_v2q15_ty], [llvm_v4i8_ty], [IntrNoMem]>;
def int_mips_preceu_ph_qbl: ClangBuiltin<"__builtin_mips_preceu_ph_qbl">,
  Intrinsic<[mips_v2q15_ty], [llvm_v4i8_ty], [IntrNoMem]>;
def int_mips_preceu_ph_qbr: ClangBuiltin<"__builtin_mips_preceu_ph_qbr">,
  Intrinsic<[mips_v2q15_ty], [llvm_v4i8_ty], [IntrNoMem]>;
def int_mips_preceu_ph_qbla: ClangBuiltin<"__builtin_mips_preceu_ph_qbla">,
  Intrinsic<[mips_v2q15_ty], [llvm_v4i8_ty], [IntrNoMem]>;
def int_mips_preceu_ph_qbra: ClangBuiltin<"__builtin_mips_preceu_ph_qbra">,
  Intrinsic<[mips_v2q15_ty], [llvm_v4i8_ty], [IntrNoMem]>;

//===----------------------------------------------------------------------===//
// Shift

def int_mips_shll_qb: ClangBuiltin<"__builtin_mips_shll_qb">,
  Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_i32_ty], []>;
def int_mips_shrl_qb: ClangBuiltin<"__builtin_mips_shrl_qb">,
  Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_i32_ty], [IntrNoMem]>;
def int_mips_shll_ph: ClangBuiltin<"__builtin_mips_shll_ph">,
  Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, llvm_i32_ty], []>;
def int_mips_shll_s_ph: ClangBuiltin<"__builtin_mips_shll_s_ph">,
  Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, llvm_i32_ty], []>;
def int_mips_shra_ph: ClangBuiltin<"__builtin_mips_shra_ph">,
  Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, llvm_i32_ty], [IntrNoMem]>;
def int_mips_shra_r_ph: ClangBuiltin<"__builtin_mips_shra_r_ph">,
  Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, llvm_i32_ty], [IntrNoMem]>;
def int_mips_shll_s_w: ClangBuiltin<"__builtin_mips_shll_s_w">,
  Intrinsic<[mips_q31_ty], [mips_q31_ty, llvm_i32_ty], []>;
def int_mips_shra_r_w: ClangBuiltin<"__builtin_mips_shra_r_w">,
  Intrinsic<[mips_q31_ty], [mips_q31_ty, llvm_i32_ty], [IntrNoMem]>;
def int_mips_shilo: ClangBuiltin<"__builtin_mips_shilo">,
  Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty], [IntrNoMem]>;

//===----------------------------------------------------------------------===//
// Multiplication

def int_mips_muleu_s_ph_qbl: ClangBuiltin<"__builtin_mips_muleu_s_ph_qbl">,
  Intrinsic<[mips_v2q15_ty], [llvm_v4i8_ty, mips_v2q15_ty], []>;
def int_mips_muleu_s_ph_qbr: ClangBuiltin<"__builtin_mips_muleu_s_ph_qbr">,
  Intrinsic<[mips_v2q15_ty], [llvm_v4i8_ty, mips_v2q15_ty], []>;
def int_mips_mulq_rs_ph: ClangBuiltin<"__builtin_mips_mulq_rs_ph">,
  Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty], [Commutative]>;
def int_mips_muleq_s_w_phl: ClangBuiltin<"__builtin_mips_muleq_s_w_phl">,
  Intrinsic<[mips_q31_ty], [mips_v2q15_ty, mips_v2q15_ty], [Commutative]>;
def int_mips_muleq_s_w_phr: ClangBuiltin<"__builtin_mips_muleq_s_w_phr">,
  Intrinsic<[mips_q31_ty], [mips_v2q15_ty, mips_v2q15_ty], [Commutative]>;
def int_mips_mulsaq_s_w_ph: ClangBuiltin<"__builtin_mips_mulsaq_s_w_ph">,
  Intrinsic<[llvm_i64_ty], [llvm_i64_ty, mips_v2q15_ty, mips_v2q15_ty], []>;
def int_mips_maq_s_w_phl: ClangBuiltin<"__builtin_mips_maq_s_w_phl">,
  Intrinsic<[llvm_i64_ty], [llvm_i64_ty, mips_v2q15_ty, mips_v2q15_ty], []>;
def int_mips_maq_s_w_phr: ClangBuiltin<"__builtin_mips_maq_s_w_phr">,
  Intrinsic<[llvm_i64_ty], [llvm_i64_ty, mips_v2q15_ty, mips_v2q15_ty], []>;
def int_mips_maq_sa_w_phl: ClangBuiltin<"__builtin_mips_maq_sa_w_phl">,
  Intrinsic<[llvm_i64_ty], [llvm_i64_ty, mips_v2q15_ty, mips_v2q15_ty], []>;
def int_mips_maq_sa_w_phr: ClangBuiltin<"__builtin_mips_maq_sa_w_phr">,
  Intrinsic<[llvm_i64_ty], [llvm_i64_ty, mips_v2q15_ty, mips_v2q15_ty], []>;
def int_mips_mult: ClangBuiltin<"__builtin_mips_mult">,
  Intrinsic<[llvm_i64_ty], [llvm_i32_ty, llvm_i32_ty],
            [IntrNoMem, Commutative]>;
def int_mips_multu: ClangBuiltin<"__builtin_mips_multu">,
  Intrinsic<[llvm_i64_ty], [llvm_i32_ty, llvm_i32_ty],
            [IntrNoMem, Commutative]>;

//===----------------------------------------------------------------------===//
// Dot product with accumulate/subtract

def int_mips_dpau_h_qbl: ClangBuiltin<"__builtin_mips_dpau_h_qbl">,
  Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_v4i8_ty, llvm_v4i8_ty],
            [IntrNoMem]>;
def int_mips_dpau_h_qbr: ClangBuiltin<"__builtin_mips_dpau_h_qbr">,
  Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_v4i8_ty, llvm_v4i8_ty],
            [IntrNoMem]>;
def int_mips_dpsu_h_qbl: ClangBuiltin<"__builtin_mips_dpsu_h_qbl">,
  Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_v4i8_ty, llvm_v4i8_ty],
            [IntrNoMem]>;
def int_mips_dpsu_h_qbr: ClangBuiltin<"__builtin_mips_dpsu_h_qbr">,
  Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_v4i8_ty, llvm_v4i8_ty],
            [IntrNoMem]>;
def int_mips_dpaq_s_w_ph: ClangBuiltin<"__builtin_mips_dpaq_s_w_ph">,
  Intrinsic<[llvm_i64_ty], [llvm_i64_ty, mips_v2q15_ty, mips_v2q15_ty], []>;
def int_mips_dpsq_s_w_ph: ClangBuiltin<"__builtin_mips_dpsq_s_w_ph">,
  Intrinsic<[llvm_i64_ty], [llvm_i64_ty, mips_v2q15_ty, mips_v2q15_ty], []>;
def int_mips_dpaq_sa_l_w: ClangBuiltin<"__builtin_mips_dpaq_sa_l_w">,
  Intrinsic<[llvm_i64_ty], [llvm_i64_ty, mips_q31_ty, mips_q31_ty], []>;
def int_mips_dpsq_sa_l_w: ClangBuiltin<"__builtin_mips_dpsq_sa_l_w">,
  Intrinsic<[llvm_i64_ty], [llvm_i64_ty, mips_q31_ty, mips_q31_ty], []>;

//===----------------------------------------------------------------------===//
// Comparison

def int_mips_cmpu_eq_qb: ClangBuiltin<"__builtin_mips_cmpu_eq_qb">,
  Intrinsic<[], [llvm_v4i8_ty, llvm_v4i8_ty], [Commutative]>;
def int_mips_cmpu_lt_qb: ClangBuiltin<"__builtin_mips_cmpu_lt_qb">,
  Intrinsic<[], [llvm_v4i8_ty, llvm_v4i8_ty], []>;
def int_mips_cmpu_le_qb: ClangBuiltin<"__builtin_mips_cmpu_le_qb">,
  Intrinsic<[], [llvm_v4i8_ty, llvm_v4i8_ty], []>;
def int_mips_cmpgu_eq_qb: ClangBuiltin<"__builtin_mips_cmpgu_eq_qb">,
  Intrinsic<[llvm_i32_ty], [llvm_v4i8_ty, llvm_v4i8_ty], [Commutative]>;
def int_mips_cmpgu_lt_qb: ClangBuiltin<"__builtin_mips_cmpgu_lt_qb">,
  Intrinsic<[llvm_i32_ty], [llvm_v4i8_ty, llvm_v4i8_ty], []>;
def int_mips_cmpgu_le_qb: ClangBuiltin<"__builtin_mips_cmpgu_le_qb">,
  Intrinsic<[llvm_i32_ty], [llvm_v4i8_ty, llvm_v4i8_ty], []>;
def int_mips_cmp_eq_ph: ClangBuiltin<"__builtin_mips_cmp_eq_ph">,
  Intrinsic<[], [mips_v2q15_ty, mips_v2q15_ty], [Commutative]>;
def int_mips_cmp_lt_ph: ClangBuiltin<"__builtin_mips_cmp_lt_ph">,
  Intrinsic<[], [mips_v2q15_ty, mips_v2q15_ty], []>;
def int_mips_cmp_le_ph: ClangBuiltin<"__builtin_mips_cmp_le_ph">,
  Intrinsic<[], [mips_v2q15_ty, mips_v2q15_ty], []>;

//===----------------------------------------------------------------------===//
// Extracting

def int_mips_extr_s_h: ClangBuiltin<"__builtin_mips_extr_s_h">,
  Intrinsic<[llvm_i32_ty], [llvm_i64_ty, llvm_i32_ty], []>;
def int_mips_extr_w: ClangBuiltin<"__builtin_mips_extr_w">,
  Intrinsic<[llvm_i32_ty], [llvm_i64_ty, llvm_i32_ty], []>;
def int_mips_extr_rs_w: ClangBuiltin<"__builtin_mips_extr_rs_w">,
  Intrinsic<[llvm_i32_ty], [llvm_i64_ty, llvm_i32_ty], []>;
def int_mips_extr_r_w: ClangBuiltin<"__builtin_mips_extr_r_w">,
  Intrinsic<[llvm_i32_ty], [llvm_i64_ty, llvm_i32_ty], []>;
def int_mips_extp: ClangBuiltin<"__builtin_mips_extp">,
  Intrinsic<[llvm_i32_ty], [llvm_i64_ty, llvm_i32_ty], []>;
def int_mips_extpdp: ClangBuiltin<"__builtin_mips_extpdp">,
  Intrinsic<[llvm_i32_ty], [llvm_i64_ty, llvm_i32_ty], []>;

//===----------------------------------------------------------------------===//
// Misc

def int_mips_wrdsp: ClangBuiltin<"__builtin_mips_wrdsp">,
  Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg<ArgIndex<1>>]>;
def int_mips_rddsp: ClangBuiltin<"__builtin_mips_rddsp">,
  Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrReadMem, ImmArg<ArgIndex<0>>]>;

def int_mips_insv: ClangBuiltin<"__builtin_mips_insv">,
  Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrReadMem]>;
def int_mips_bitrev: ClangBuiltin<"__builtin_mips_bitrev">,
  Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>;

def int_mips_packrl_ph: ClangBuiltin<"__builtin_mips_packrl_ph">,
  Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty], [IntrNoMem]>;

def int_mips_repl_qb: ClangBuiltin<"__builtin_mips_repl_qb">,
  Intrinsic<[llvm_v4i8_ty], [llvm_i32_ty], [IntrNoMem]>;
def int_mips_repl_ph: ClangBuiltin<"__builtin_mips_repl_ph">,
  Intrinsic<[mips_v2q15_ty], [llvm_i32_ty], [IntrNoMem]>;

def int_mips_pick_qb: ClangBuiltin<"__builtin_mips_pick_qb">,
  Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_v4i8_ty], [IntrReadMem]>;
def int_mips_pick_ph: ClangBuiltin<"__builtin_mips_pick_ph">,
  Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty], [IntrReadMem]>;

def int_mips_mthlip: ClangBuiltin<"__builtin_mips_mthlip">,
  Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty], []>;

def int_mips_bposge32: ClangBuiltin<"__builtin_mips_bposge32">,
  Intrinsic<[llvm_i32_ty], [], [IntrReadMem]>;

def int_mips_lbux: ClangBuiltin<"__builtin_mips_lbux">,
  Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty], [IntrReadMem, IntrArgMemOnly]>;
def int_mips_lhx: ClangBuiltin<"__builtin_mips_lhx">,
  Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty], [IntrReadMem, IntrArgMemOnly]>;
def int_mips_lwx: ClangBuiltin<"__builtin_mips_lwx">,
  Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty], [IntrReadMem, IntrArgMemOnly]>;

//===----------------------------------------------------------------------===//
// MIPS DSP Rev 2

def int_mips_absq_s_qb: ClangBuiltin<"__builtin_mips_absq_s_qb">,
  Intrinsic<[mips_v4q7_ty], [mips_v4q7_ty], []>;

def int_mips_addqh_ph: ClangBuiltin<"__builtin_mips_addqh_ph">,
  Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty],
            [IntrNoMem, Commutative]>;
def int_mips_addqh_r_ph: ClangBuiltin<"__builtin_mips_addqh_r_ph">,
  Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty],
            [IntrNoMem, Commutative]>;
def int_mips_addqh_w: ClangBuiltin<"__builtin_mips_addqh_w">,
  Intrinsic<[mips_q31_ty], [mips_q31_ty, mips_q31_ty],
            [IntrNoMem, Commutative]>;
def int_mips_addqh_r_w: ClangBuiltin<"__builtin_mips_addqh_r_w">,
  Intrinsic<[mips_q31_ty], [mips_q31_ty, mips_q31_ty],
            [IntrNoMem, Commutative]>;

def int_mips_addu_ph: ClangBuiltin<"__builtin_mips_addu_ph">,
  Intrinsic<[llvm_v2i16_ty], [llvm_v2i16_ty, llvm_v2i16_ty], [Commutative]>;
def int_mips_addu_s_ph: ClangBuiltin<"__builtin_mips_addu_s_ph">,
  Intrinsic<[llvm_v2i16_ty], [llvm_v2i16_ty, llvm_v2i16_ty], [Commutative]>;

def int_mips_adduh_qb: ClangBuiltin<"__builtin_mips_adduh_qb">,
  Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_v4i8_ty],
            [IntrNoMem, Commutative]>;
def int_mips_adduh_r_qb: ClangBuiltin<"__builtin_mips_adduh_r_qb">,
  Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_v4i8_ty],
            [IntrNoMem, Commutative]>;

def int_mips_append: ClangBuiltin<"__builtin_mips_append">,
  Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
  [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_mips_balign: ClangBuiltin<"__builtin_mips_balign">,
  Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
  [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_mips_cmpgdu_eq_qb: ClangBuiltin<"__builtin_mips_cmpgdu_eq_qb">,
  Intrinsic<[llvm_i32_ty], [llvm_v4i8_ty, llvm_v4i8_ty], [Commutative]>;
def int_mips_cmpgdu_lt_qb: ClangBuiltin<"__builtin_mips_cmpgdu_lt_qb">,
  Intrinsic<[llvm_i32_ty], [llvm_v4i8_ty, llvm_v4i8_ty], []>;
def int_mips_cmpgdu_le_qb: ClangBuiltin<"__builtin_mips_cmpgdu_le_qb">,
  Intrinsic<[llvm_i32_ty], [llvm_v4i8_ty, llvm_v4i8_ty], []>;

def int_mips_dpa_w_ph: ClangBuiltin<"__builtin_mips_dpa_w_ph">,
  Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_v2i16_ty, llvm_v2i16_ty],
            [IntrNoMem]>;
def int_mips_dps_w_ph: ClangBuiltin<"__builtin_mips_dps_w_ph">,
  Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_v2i16_ty, llvm_v2i16_ty],
            [IntrNoMem]>;

def int_mips_dpaqx_s_w_ph: ClangBuiltin<"__builtin_mips_dpaqx_s_w_ph">,
  Intrinsic<[llvm_i64_ty], [llvm_i64_ty, mips_v2q15_ty, mips_v2q15_ty], []>;
def int_mips_dpaqx_sa_w_ph: ClangBuiltin<"__builtin_mips_dpaqx_sa_w_ph">,
  Intrinsic<[llvm_i64_ty], [llvm_i64_ty, mips_v2q15_ty, mips_v2q15_ty], []>;
def int_mips_dpax_w_ph: ClangBuiltin<"__builtin_mips_dpax_w_ph">,
  Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_v2i16_ty, llvm_v2i16_ty],
            [IntrNoMem]>;
def int_mips_dpsx_w_ph: ClangBuiltin<"__builtin_mips_dpsx_w_ph">,
  Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_v2i16_ty, llvm_v2i16_ty],
            [IntrNoMem]>;
def int_mips_dpsqx_s_w_ph: ClangBuiltin<"__builtin_mips_dpsqx_s_w_ph">,
  Intrinsic<[llvm_i64_ty], [llvm_i64_ty, mips_v2q15_ty, mips_v2q15_ty], []>;
def int_mips_dpsqx_sa_w_ph: ClangBuiltin<"__builtin_mips_dpsqx_sa_w_ph">,
  Intrinsic<[llvm_i64_ty], [llvm_i64_ty, mips_v2q15_ty, mips_v2q15_ty], []>;

def int_mips_mul_ph: ClangBuiltin<"__builtin_mips_mul_ph">,
  Intrinsic<[llvm_v2i16_ty], [llvm_v2i16_ty, llvm_v2i16_ty], [Commutative]>;
def int_mips_mul_s_ph: ClangBuiltin<"__builtin_mips_mul_s_ph">,
  Intrinsic<[llvm_v2i16_ty], [llvm_v2i16_ty, llvm_v2i16_ty], [Commutative]>;

def int_mips_mulq_rs_w: ClangBuiltin<"__builtin_mips_mulq_rs_w">,
  Intrinsic<[mips_q31_ty], [mips_q31_ty, mips_q31_ty], [Commutative]>;
def int_mips_mulq_s_ph: ClangBuiltin<"__builtin_mips_mulq_s_ph">,
  Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty], [Commutative]>;
def int_mips_mulq_s_w: ClangBuiltin<"__builtin_mips_mulq_s_w">,
  Intrinsic<[mips_q31_ty], [mips_q31_ty, mips_q31_ty], [Commutative]>;
def int_mips_mulsa_w_ph: ClangBuiltin<"__builtin_mips_mulsa_w_ph">,
  Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_v2i16_ty, llvm_v2i16_ty],
            [IntrNoMem]>;

def int_mips_precr_qb_ph: ClangBuiltin<"__builtin_mips_precr_qb_ph">,
  Intrinsic<[llvm_v4i8_ty], [llvm_v2i16_ty, llvm_v2i16_ty], []>;
def int_mips_precr_sra_ph_w: ClangBuiltin<"__builtin_mips_precr_sra_ph_w">,
  Intrinsic<[llvm_v2i16_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
            [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_mips_precr_sra_r_ph_w: ClangBuiltin<"__builtin_mips_precr_sra_r_ph_w">,
  Intrinsic<[llvm_v2i16_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
            [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_mips_prepend: ClangBuiltin<"__builtin_mips_prepend">,
  Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
  [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_mips_shra_qb: ClangBuiltin<"__builtin_mips_shra_qb">,
  Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_i32_ty], [IntrNoMem]>;
def int_mips_shra_r_qb: ClangBuiltin<"__builtin_mips_shra_r_qb">,
  Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_i32_ty], [IntrNoMem]>;
def int_mips_shrl_ph: ClangBuiltin<"__builtin_mips_shrl_ph">,
  Intrinsic<[llvm_v2i16_ty], [llvm_v2i16_ty, llvm_i32_ty], [IntrNoMem]>;

def int_mips_subqh_ph: ClangBuiltin<"__builtin_mips_subqh_ph">,
  Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty], [IntrNoMem]>;
def int_mips_subqh_r_ph: ClangBuiltin<"__builtin_mips_subqh_r_ph">,
  Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty], [IntrNoMem]>;
def int_mips_subqh_w: ClangBuiltin<"__builtin_mips_subqh_w">,
  Intrinsic<[mips_q31_ty], [mips_q31_ty, mips_q31_ty], [IntrNoMem]>;
def int_mips_subqh_r_w: ClangBuiltin<"__builtin_mips_subqh_r_w">,
  Intrinsic<[mips_q31_ty], [mips_q31_ty, mips_q31_ty], [IntrNoMem]>;

def int_mips_subu_ph: ClangBuiltin<"__builtin_mips_subu_ph">,
  Intrinsic<[llvm_v2i16_ty], [llvm_v2i16_ty, llvm_v2i16_ty], []>;
def int_mips_subu_s_ph: ClangBuiltin<"__builtin_mips_subu_s_ph">,
  Intrinsic<[llvm_v2i16_ty], [llvm_v2i16_ty, llvm_v2i16_ty], []>;

def int_mips_subuh_qb: ClangBuiltin<"__builtin_mips_subuh_qb">,
  Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_v4i8_ty], [IntrNoMem]>;
def int_mips_subuh_r_qb: ClangBuiltin<"__builtin_mips_subuh_r_qb">,
  Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_v4i8_ty], [IntrNoMem]>;

//===----------------------------------------------------------------------===//
// MIPS MSA

//===----------------------------------------------------------------------===//
// Addition/subtraction

def int_mips_add_a_b : ClangBuiltin<"__builtin_msa_add_a_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
  [Commutative, IntrNoMem]>;
def int_mips_add_a_h : ClangBuiltin<"__builtin_msa_add_a_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
  [Commutative, IntrNoMem]>;
def int_mips_add_a_w : ClangBuiltin<"__builtin_msa_add_a_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
  [Commutative, IntrNoMem]>;
def int_mips_add_a_d : ClangBuiltin<"__builtin_msa_add_a_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
  [Commutative, IntrNoMem]>;

def int_mips_adds_a_b : ClangBuiltin<"__builtin_msa_adds_a_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
  [Commutative, IntrNoMem]>;
def int_mips_adds_a_h : ClangBuiltin<"__builtin_msa_adds_a_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
  [Commutative, IntrNoMem]>;
def int_mips_adds_a_w : ClangBuiltin<"__builtin_msa_adds_a_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
  [Commutative, IntrNoMem]>;
def int_mips_adds_a_d : ClangBuiltin<"__builtin_msa_adds_a_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
  [Commutative, IntrNoMem]>;

def int_mips_adds_s_b : ClangBuiltin<"__builtin_msa_adds_s_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
  [Commutative, IntrNoMem]>;
def int_mips_adds_s_h : ClangBuiltin<"__builtin_msa_adds_s_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
  [Commutative, IntrNoMem]>;
def int_mips_adds_s_w : ClangBuiltin<"__builtin_msa_adds_s_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
  [Commutative, IntrNoMem]>;
def int_mips_adds_s_d : ClangBuiltin<"__builtin_msa_adds_s_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
  [Commutative, IntrNoMem]>;

def int_mips_adds_u_b : ClangBuiltin<"__builtin_msa_adds_u_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
  [Commutative, IntrNoMem]>;
def int_mips_adds_u_h : ClangBuiltin<"__builtin_msa_adds_u_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
  [Commutative, IntrNoMem]>;
def int_mips_adds_u_w : ClangBuiltin<"__builtin_msa_adds_u_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
  [Commutative, IntrNoMem]>;
def int_mips_adds_u_d : ClangBuiltin<"__builtin_msa_adds_u_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
  [Commutative, IntrNoMem]>;

def int_mips_addv_b : ClangBuiltin<"__builtin_msa_addv_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
  [Commutative, IntrNoMem]>;
def int_mips_addv_h : ClangBuiltin<"__builtin_msa_addv_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
  [Commutative, IntrNoMem]>;
def int_mips_addv_w : ClangBuiltin<"__builtin_msa_addv_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
  [Commutative, IntrNoMem]>;
def int_mips_addv_d : ClangBuiltin<"__builtin_msa_addv_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
  [Commutative, IntrNoMem]>;

def int_mips_addvi_b : ClangBuiltin<"__builtin_msa_addvi_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty],
  [Commutative, IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_addvi_h : ClangBuiltin<"__builtin_msa_addvi_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty],
  [Commutative, IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_addvi_w : ClangBuiltin<"__builtin_msa_addvi_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty],
  [Commutative, IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_addvi_d : ClangBuiltin<"__builtin_msa_addvi_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty],
  [Commutative, IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_mips_and_v : ClangBuiltin<"__builtin_msa_and_v">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;

def int_mips_andi_b : ClangBuiltin<"__builtin_msa_andi_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_mips_asub_s_b : ClangBuiltin<"__builtin_msa_asub_s_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
def int_mips_asub_s_h : ClangBuiltin<"__builtin_msa_asub_s_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
def int_mips_asub_s_w : ClangBuiltin<"__builtin_msa_asub_s_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
def int_mips_asub_s_d : ClangBuiltin<"__builtin_msa_asub_s_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;

def int_mips_asub_u_b : ClangBuiltin<"__builtin_msa_asub_u_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
def int_mips_asub_u_h : ClangBuiltin<"__builtin_msa_asub_u_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
def int_mips_asub_u_w : ClangBuiltin<"__builtin_msa_asub_u_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
def int_mips_asub_u_d : ClangBuiltin<"__builtin_msa_asub_u_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;

def int_mips_ave_s_b : ClangBuiltin<"__builtin_msa_ave_s_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
  [Commutative, IntrNoMem]>;
def int_mips_ave_s_h : ClangBuiltin<"__builtin_msa_ave_s_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
  [Commutative, IntrNoMem]>;
def int_mips_ave_s_w : ClangBuiltin<"__builtin_msa_ave_s_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
  [Commutative, IntrNoMem]>;
def int_mips_ave_s_d : ClangBuiltin<"__builtin_msa_ave_s_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
  [Commutative, IntrNoMem]>;

def int_mips_ave_u_b : ClangBuiltin<"__builtin_msa_ave_u_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
  [Commutative, IntrNoMem]>;
def int_mips_ave_u_h : ClangBuiltin<"__builtin_msa_ave_u_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
  [Commutative, IntrNoMem]>;
def int_mips_ave_u_w : ClangBuiltin<"__builtin_msa_ave_u_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
  [Commutative, IntrNoMem]>;
def int_mips_ave_u_d : ClangBuiltin<"__builtin_msa_ave_u_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
  [Commutative, IntrNoMem]>;

def int_mips_aver_s_b : ClangBuiltin<"__builtin_msa_aver_s_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
  [Commutative, IntrNoMem]>;
def int_mips_aver_s_h : ClangBuiltin<"__builtin_msa_aver_s_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
  [Commutative, IntrNoMem]>;
def int_mips_aver_s_w : ClangBuiltin<"__builtin_msa_aver_s_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
  [Commutative, IntrNoMem]>;
def int_mips_aver_s_d : ClangBuiltin<"__builtin_msa_aver_s_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
  [Commutative, IntrNoMem]>;

def int_mips_aver_u_b : ClangBuiltin<"__builtin_msa_aver_u_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
  [Commutative, IntrNoMem]>;
def int_mips_aver_u_h : ClangBuiltin<"__builtin_msa_aver_u_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
  [Commutative, IntrNoMem]>;
def int_mips_aver_u_w : ClangBuiltin<"__builtin_msa_aver_u_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
  [Commutative, IntrNoMem]>;
def int_mips_aver_u_d : ClangBuiltin<"__builtin_msa_aver_u_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
  [Commutative, IntrNoMem]>;

def int_mips_bclr_b : ClangBuiltin<"__builtin_msa_bclr_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
def int_mips_bclr_h : ClangBuiltin<"__builtin_msa_bclr_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
def int_mips_bclr_w : ClangBuiltin<"__builtin_msa_bclr_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
def int_mips_bclr_d : ClangBuiltin<"__builtin_msa_bclr_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;

def int_mips_bclri_b : ClangBuiltin<"__builtin_msa_bclri_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_bclri_h : ClangBuiltin<"__builtin_msa_bclri_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_bclri_w : ClangBuiltin<"__builtin_msa_bclri_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_bclri_d : ClangBuiltin<"__builtin_msa_bclri_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_mips_binsl_b : ClangBuiltin<"__builtin_msa_binsl_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty],
            [IntrNoMem]>;
def int_mips_binsl_h : ClangBuiltin<"__builtin_msa_binsl_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v8i16_ty],
            [IntrNoMem]>;
def int_mips_binsl_w : ClangBuiltin<"__builtin_msa_binsl_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
            [IntrNoMem]>;
def int_mips_binsl_d : ClangBuiltin<"__builtin_msa_binsl_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty],
            [IntrNoMem]>;

def int_mips_binsli_b : ClangBuiltin<"__builtin_msa_binsli_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty],
            [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_mips_binsli_h : ClangBuiltin<"__builtin_msa_binsli_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty, llvm_i32_ty],
            [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_mips_binsli_w : ClangBuiltin<"__builtin_msa_binsli_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_i32_ty],
            [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_mips_binsli_d : ClangBuiltin<"__builtin_msa_binsli_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty, llvm_i32_ty],
            [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_mips_binsr_b : ClangBuiltin<"__builtin_msa_binsr_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty],
            [IntrNoMem]>;
def int_mips_binsr_h : ClangBuiltin<"__builtin_msa_binsr_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v8i16_ty],
            [IntrNoMem]>;
def int_mips_binsr_w : ClangBuiltin<"__builtin_msa_binsr_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
            [IntrNoMem]>;
def int_mips_binsr_d : ClangBuiltin<"__builtin_msa_binsr_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty],
            [IntrNoMem]>;

def int_mips_binsri_b : ClangBuiltin<"__builtin_msa_binsri_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty],
            [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_mips_binsri_h : ClangBuiltin<"__builtin_msa_binsri_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty, llvm_i32_ty],
            [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_mips_binsri_w : ClangBuiltin<"__builtin_msa_binsri_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_i32_ty],
            [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_mips_binsri_d : ClangBuiltin<"__builtin_msa_binsri_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty, llvm_i32_ty],
            [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_mips_bmnz_v : ClangBuiltin<"__builtin_msa_bmnz_v">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty],
            [IntrNoMem]>;

def int_mips_bmnzi_b : ClangBuiltin<"__builtin_msa_bmnzi_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty],
            [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_mips_bmz_v : ClangBuiltin<"__builtin_msa_bmz_v">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty],
            [IntrNoMem]>;

def int_mips_bmzi_b : ClangBuiltin<"__builtin_msa_bmzi_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty],
            [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_mips_bneg_b : ClangBuiltin<"__builtin_msa_bneg_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
def int_mips_bneg_h : ClangBuiltin<"__builtin_msa_bneg_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
def int_mips_bneg_w : ClangBuiltin<"__builtin_msa_bneg_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
def int_mips_bneg_d : ClangBuiltin<"__builtin_msa_bneg_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;

def int_mips_bnegi_b : ClangBuiltin<"__builtin_msa_bnegi_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_bnegi_h : ClangBuiltin<"__builtin_msa_bnegi_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_bnegi_w : ClangBuiltin<"__builtin_msa_bnegi_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_bnegi_d : ClangBuiltin<"__builtin_msa_bnegi_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_mips_bnz_b : ClangBuiltin<"__builtin_msa_bnz_b">,
  Intrinsic<[llvm_i32_ty], [llvm_v16i8_ty], [IntrNoMem]>;
def int_mips_bnz_h : ClangBuiltin<"__builtin_msa_bnz_h">,
  Intrinsic<[llvm_i32_ty], [llvm_v8i16_ty], [IntrNoMem]>;
def int_mips_bnz_w : ClangBuiltin<"__builtin_msa_bnz_w">,
  Intrinsic<[llvm_i32_ty], [llvm_v4i32_ty], [IntrNoMem]>;
def int_mips_bnz_d : ClangBuiltin<"__builtin_msa_bnz_d">,
  Intrinsic<[llvm_i32_ty], [llvm_v2i64_ty], [IntrNoMem]>;

def int_mips_bnz_v : ClangBuiltin<"__builtin_msa_bnz_v">,
  Intrinsic<[llvm_i32_ty], [llvm_v16i8_ty], [IntrNoMem]>;

def int_mips_bsel_v : ClangBuiltin<"__builtin_msa_bsel_v">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty],
            [IntrNoMem]>;

def int_mips_bseli_b : ClangBuiltin<"__builtin_msa_bseli_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty],
            [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_mips_bset_b : ClangBuiltin<"__builtin_msa_bset_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
def int_mips_bset_h : ClangBuiltin<"__builtin_msa_bset_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
def int_mips_bset_w : ClangBuiltin<"__builtin_msa_bset_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
def int_mips_bset_d : ClangBuiltin<"__builtin_msa_bset_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;

def int_mips_bseti_b : ClangBuiltin<"__builtin_msa_bseti_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_bseti_h : ClangBuiltin<"__builtin_msa_bseti_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_bseti_w : ClangBuiltin<"__builtin_msa_bseti_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_bseti_d : ClangBuiltin<"__builtin_msa_bseti_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_mips_bz_b : ClangBuiltin<"__builtin_msa_bz_b">,
  Intrinsic<[llvm_i32_ty], [llvm_v16i8_ty], [IntrNoMem]>;
def int_mips_bz_h : ClangBuiltin<"__builtin_msa_bz_h">,
  Intrinsic<[llvm_i32_ty], [llvm_v8i16_ty], [IntrNoMem]>;
def int_mips_bz_w : ClangBuiltin<"__builtin_msa_bz_w">,
  Intrinsic<[llvm_i32_ty], [llvm_v4i32_ty], [IntrNoMem]>;
def int_mips_bz_d : ClangBuiltin<"__builtin_msa_bz_d">,
  Intrinsic<[llvm_i32_ty], [llvm_v2i64_ty], [IntrNoMem]>;

def int_mips_bz_v : ClangBuiltin<"__builtin_msa_bz_v">,
  Intrinsic<[llvm_i32_ty], [llvm_v16i8_ty], [IntrNoMem]>;

def int_mips_ceq_b : ClangBuiltin<"__builtin_msa_ceq_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
def int_mips_ceq_h : ClangBuiltin<"__builtin_msa_ceq_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
def int_mips_ceq_w : ClangBuiltin<"__builtin_msa_ceq_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
def int_mips_ceq_d : ClangBuiltin<"__builtin_msa_ceq_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;

def int_mips_ceqi_b : ClangBuiltin<"__builtin_msa_ceqi_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_ceqi_h : ClangBuiltin<"__builtin_msa_ceqi_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_ceqi_w : ClangBuiltin<"__builtin_msa_ceqi_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_ceqi_d : ClangBuiltin<"__builtin_msa_ceqi_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_mips_cfcmsa : ClangBuiltin<"__builtin_msa_cfcmsa">,
  Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [ImmArg<ArgIndex<0>>]>;

def int_mips_cle_s_b : ClangBuiltin<"__builtin_msa_cle_s_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
def int_mips_cle_s_h : ClangBuiltin<"__builtin_msa_cle_s_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
def int_mips_cle_s_w : ClangBuiltin<"__builtin_msa_cle_s_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
def int_mips_cle_s_d : ClangBuiltin<"__builtin_msa_cle_s_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;

def int_mips_cle_u_b : ClangBuiltin<"__builtin_msa_cle_u_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
def int_mips_cle_u_h : ClangBuiltin<"__builtin_msa_cle_u_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
def int_mips_cle_u_w : ClangBuiltin<"__builtin_msa_cle_u_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
def int_mips_cle_u_d : ClangBuiltin<"__builtin_msa_cle_u_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;

def int_mips_clei_s_b : ClangBuiltin<"__builtin_msa_clei_s_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_clei_s_h : ClangBuiltin<"__builtin_msa_clei_s_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_clei_s_w : ClangBuiltin<"__builtin_msa_clei_s_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_clei_s_d : ClangBuiltin<"__builtin_msa_clei_s_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_mips_clei_u_b : ClangBuiltin<"__builtin_msa_clei_u_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_clei_u_h : ClangBuiltin<"__builtin_msa_clei_u_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_clei_u_w : ClangBuiltin<"__builtin_msa_clei_u_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_clei_u_d : ClangBuiltin<"__builtin_msa_clei_u_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_mips_clt_s_b : ClangBuiltin<"__builtin_msa_clt_s_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
def int_mips_clt_s_h : ClangBuiltin<"__builtin_msa_clt_s_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
def int_mips_clt_s_w : ClangBuiltin<"__builtin_msa_clt_s_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
def int_mips_clt_s_d : ClangBuiltin<"__builtin_msa_clt_s_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;

def int_mips_clt_u_b : ClangBuiltin<"__builtin_msa_clt_u_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
def int_mips_clt_u_h : ClangBuiltin<"__builtin_msa_clt_u_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
def int_mips_clt_u_w : ClangBuiltin<"__builtin_msa_clt_u_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
def int_mips_clt_u_d : ClangBuiltin<"__builtin_msa_clt_u_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;

def int_mips_clti_s_b : ClangBuiltin<"__builtin_msa_clti_s_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_clti_s_h : ClangBuiltin<"__builtin_msa_clti_s_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_clti_s_w : ClangBuiltin<"__builtin_msa_clti_s_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_clti_s_d : ClangBuiltin<"__builtin_msa_clti_s_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_mips_clti_u_b : ClangBuiltin<"__builtin_msa_clti_u_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_clti_u_h : ClangBuiltin<"__builtin_msa_clti_u_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_clti_u_w : ClangBuiltin<"__builtin_msa_clti_u_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_clti_u_d : ClangBuiltin<"__builtin_msa_clti_u_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_mips_copy_s_b : ClangBuiltin<"__builtin_msa_copy_s_b">,
  Intrinsic<[llvm_i32_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
def int_mips_copy_s_h : ClangBuiltin<"__builtin_msa_copy_s_h">,
  Intrinsic<[llvm_i32_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
def int_mips_copy_s_w : ClangBuiltin<"__builtin_msa_copy_s_w">,
  Intrinsic<[llvm_i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
def int_mips_copy_s_d : ClangBuiltin<"__builtin_msa_copy_s_d">,
  Intrinsic<[llvm_i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;

def int_mips_copy_u_b : ClangBuiltin<"__builtin_msa_copy_u_b">,
  Intrinsic<[llvm_i32_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
def int_mips_copy_u_h : ClangBuiltin<"__builtin_msa_copy_u_h">,
  Intrinsic<[llvm_i32_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
def int_mips_copy_u_w : ClangBuiltin<"__builtin_msa_copy_u_w">,
  Intrinsic<[llvm_i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
def int_mips_copy_u_d : ClangBuiltin<"__builtin_msa_copy_u_d">,
  Intrinsic<[llvm_i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;

def int_mips_ctcmsa : ClangBuiltin<"__builtin_msa_ctcmsa">,
  Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [ImmArg<ArgIndex<0>>]>;

def int_mips_div_s_b : ClangBuiltin<"__builtin_msa_div_s_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
def int_mips_div_s_h : ClangBuiltin<"__builtin_msa_div_s_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
def int_mips_div_s_w : ClangBuiltin<"__builtin_msa_div_s_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
def int_mips_div_s_d : ClangBuiltin<"__builtin_msa_div_s_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;

def int_mips_div_u_b : ClangBuiltin<"__builtin_msa_div_u_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
def int_mips_div_u_h : ClangBuiltin<"__builtin_msa_div_u_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
def int_mips_div_u_w : ClangBuiltin<"__builtin_msa_div_u_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
def int_mips_div_u_d : ClangBuiltin<"__builtin_msa_div_u_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;

// This instruction is part of the MSA spec but it does not share the
// __builtin_msa prefix because it operates on GP registers.
def int_mips_dlsa : ClangBuiltin<"__builtin_mips_dlsa">,
  Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty],
            [IntrNoMem]>;

def int_mips_dotp_s_h : ClangBuiltin<"__builtin_msa_dotp_s_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
def int_mips_dotp_s_w : ClangBuiltin<"__builtin_msa_dotp_s_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
def int_mips_dotp_s_d : ClangBuiltin<"__builtin_msa_dotp_s_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;

def int_mips_dotp_u_h : ClangBuiltin<"__builtin_msa_dotp_u_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
def int_mips_dotp_u_w : ClangBuiltin<"__builtin_msa_dotp_u_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
def int_mips_dotp_u_d : ClangBuiltin<"__builtin_msa_dotp_u_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;

def int_mips_dpadd_s_h : ClangBuiltin<"__builtin_msa_dpadd_s_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v16i8_ty, llvm_v16i8_ty],
  [IntrNoMem]>;
def int_mips_dpadd_s_w : ClangBuiltin<"__builtin_msa_dpadd_s_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v8i16_ty, llvm_v8i16_ty],
  [IntrNoMem]>;
def int_mips_dpadd_s_d : ClangBuiltin<"__builtin_msa_dpadd_s_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v4i32_ty, llvm_v4i32_ty],
  [IntrNoMem]>;

def int_mips_dpadd_u_h : ClangBuiltin<"__builtin_msa_dpadd_u_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v16i8_ty, llvm_v16i8_ty],
  [IntrNoMem]>;
def int_mips_dpadd_u_w : ClangBuiltin<"__builtin_msa_dpadd_u_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v8i16_ty, llvm_v8i16_ty],
  [IntrNoMem]>;
def int_mips_dpadd_u_d : ClangBuiltin<"__builtin_msa_dpadd_u_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v4i32_ty, llvm_v4i32_ty],
  [IntrNoMem]>;

def int_mips_dpsub_s_h : ClangBuiltin<"__builtin_msa_dpsub_s_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v16i8_ty, llvm_v16i8_ty],
  [IntrNoMem]>;
def int_mips_dpsub_s_w : ClangBuiltin<"__builtin_msa_dpsub_s_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v8i16_ty, llvm_v8i16_ty],
  [IntrNoMem]>;
def int_mips_dpsub_s_d : ClangBuiltin<"__builtin_msa_dpsub_s_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v4i32_ty, llvm_v4i32_ty],
  [IntrNoMem]>;

def int_mips_dpsub_u_h : ClangBuiltin<"__builtin_msa_dpsub_u_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v16i8_ty, llvm_v16i8_ty],
  [IntrNoMem]>;
def int_mips_dpsub_u_w : ClangBuiltin<"__builtin_msa_dpsub_u_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v8i16_ty, llvm_v8i16_ty],
  [IntrNoMem]>;
def int_mips_dpsub_u_d : ClangBuiltin<"__builtin_msa_dpsub_u_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v4i32_ty, llvm_v4i32_ty],
  [IntrNoMem]>;

def int_mips_fadd_w : ClangBuiltin<"__builtin_msa_fadd_w">,
  Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
def int_mips_fadd_d : ClangBuiltin<"__builtin_msa_fadd_d">,
  Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;

def int_mips_fcaf_w : ClangBuiltin<"__builtin_msa_fcaf_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
def int_mips_fcaf_d : ClangBuiltin<"__builtin_msa_fcaf_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;

def int_mips_fceq_w : ClangBuiltin<"__builtin_msa_fceq_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
def int_mips_fceq_d : ClangBuiltin<"__builtin_msa_fceq_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;

def int_mips_fcle_w : ClangBuiltin<"__builtin_msa_fcle_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
def int_mips_fcle_d : ClangBuiltin<"__builtin_msa_fcle_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;

def int_mips_fclt_w : ClangBuiltin<"__builtin_msa_fclt_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
def int_mips_fclt_d : ClangBuiltin<"__builtin_msa_fclt_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;

def int_mips_fclass_w : ClangBuiltin<"__builtin_msa_fclass_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
def int_mips_fclass_d : ClangBuiltin<"__builtin_msa_fclass_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty], [IntrNoMem]>;

def int_mips_fcne_w : ClangBuiltin<"__builtin_msa_fcne_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
def int_mips_fcne_d : ClangBuiltin<"__builtin_msa_fcne_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;

def int_mips_fcor_w : ClangBuiltin<"__builtin_msa_fcor_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
def int_mips_fcor_d : ClangBuiltin<"__builtin_msa_fcor_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;

def int_mips_fcueq_w : ClangBuiltin<"__builtin_msa_fcueq_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
def int_mips_fcueq_d : ClangBuiltin<"__builtin_msa_fcueq_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;

def int_mips_fcule_w : ClangBuiltin<"__builtin_msa_fcule_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
def int_mips_fcule_d : ClangBuiltin<"__builtin_msa_fcule_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;

def int_mips_fcult_w : ClangBuiltin<"__builtin_msa_fcult_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
def int_mips_fcult_d : ClangBuiltin<"__builtin_msa_fcult_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;

def int_mips_fcun_w : ClangBuiltin<"__builtin_msa_fcun_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
def int_mips_fcun_d : ClangBuiltin<"__builtin_msa_fcun_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;

def int_mips_fcune_w : ClangBuiltin<"__builtin_msa_fcune_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
def int_mips_fcune_d : ClangBuiltin<"__builtin_msa_fcune_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;

def int_mips_fdiv_w : ClangBuiltin<"__builtin_msa_fdiv_w">,
  Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
def int_mips_fdiv_d : ClangBuiltin<"__builtin_msa_fdiv_d">,
  Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;

def int_mips_fexdo_h : ClangBuiltin<"__builtin_msa_fexdo_h">,
  Intrinsic<[llvm_v8f16_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
def int_mips_fexdo_w : ClangBuiltin<"__builtin_msa_fexdo_w">,
  Intrinsic<[llvm_v4f32_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;

def int_mips_fexp2_w : ClangBuiltin<"__builtin_msa_fexp2_w">,
  Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4i32_ty], [IntrNoMem]>;
def int_mips_fexp2_d : ClangBuiltin<"__builtin_msa_fexp2_d">,
  Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2i64_ty], [IntrNoMem]>;

def int_mips_fexupl_w : ClangBuiltin<"__builtin_msa_fexupl_w">,
  Intrinsic<[llvm_v4f32_ty], [llvm_v8f16_ty], [IntrNoMem]>;
def int_mips_fexupl_d : ClangBuiltin<"__builtin_msa_fexupl_d">,
  Intrinsic<[llvm_v2f64_ty], [llvm_v4f32_ty], [IntrNoMem]>;

def int_mips_fexupr_w : ClangBuiltin<"__builtin_msa_fexupr_w">,
  Intrinsic<[llvm_v4f32_ty], [llvm_v8f16_ty], [IntrNoMem]>;
def int_mips_fexupr_d : ClangBuiltin<"__builtin_msa_fexupr_d">,
  Intrinsic<[llvm_v2f64_ty], [llvm_v4f32_ty], [IntrNoMem]>;

def int_mips_ffint_s_w : ClangBuiltin<"__builtin_msa_ffint_s_w">,
  Intrinsic<[llvm_v4f32_ty], [llvm_v4i32_ty], [IntrNoMem]>;
def int_mips_ffint_s_d : ClangBuiltin<"__builtin_msa_ffint_s_d">,
  Intrinsic<[llvm_v2f64_ty], [llvm_v2i64_ty], [IntrNoMem]>;

def int_mips_ffint_u_w : ClangBuiltin<"__builtin_msa_ffint_u_w">,
  Intrinsic<[llvm_v4f32_ty], [llvm_v4i32_ty], [IntrNoMem]>;
def int_mips_ffint_u_d : ClangBuiltin<"__builtin_msa_ffint_u_d">,
  Intrinsic<[llvm_v2f64_ty], [llvm_v2i64_ty], [IntrNoMem]>;

def int_mips_ffql_w : ClangBuiltin<"__builtin_msa_ffql_w">,
  Intrinsic<[llvm_v4f32_ty], [llvm_v8i16_ty], [IntrNoMem]>;
def int_mips_ffql_d : ClangBuiltin<"__builtin_msa_ffql_d">,
  Intrinsic<[llvm_v2f64_ty], [llvm_v4i32_ty], [IntrNoMem]>;

def int_mips_ffqr_w : ClangBuiltin<"__builtin_msa_ffqr_w">,
  Intrinsic<[llvm_v4f32_ty], [llvm_v8i16_ty], [IntrNoMem]>;
def int_mips_ffqr_d : ClangBuiltin<"__builtin_msa_ffqr_d">,
  Intrinsic<[llvm_v2f64_ty], [llvm_v4i32_ty], [IntrNoMem]>;

def int_mips_fill_b : ClangBuiltin<"__builtin_msa_fill_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_i32_ty], [IntrNoMem]>;
def int_mips_fill_h : ClangBuiltin<"__builtin_msa_fill_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_i32_ty], [IntrNoMem]>;
def int_mips_fill_w : ClangBuiltin<"__builtin_msa_fill_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_i32_ty], [IntrNoMem]>;
def int_mips_fill_d : ClangBuiltin<"__builtin_msa_fill_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_i64_ty], [IntrNoMem]>;

def int_mips_flog2_w : ClangBuiltin<"__builtin_msa_flog2_w">,
  Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
def int_mips_flog2_d : ClangBuiltin<"__builtin_msa_flog2_d">,
  Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty], [IntrNoMem]>;

def int_mips_fmadd_w : ClangBuiltin<"__builtin_msa_fmadd_w">,
  Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty],
            [IntrNoMem]>;
def int_mips_fmadd_d : ClangBuiltin<"__builtin_msa_fmadd_d">,
  Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty],
            [IntrNoMem]>;

def int_mips_fmax_w : ClangBuiltin<"__builtin_msa_fmax_w">,
  Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
def int_mips_fmax_d : ClangBuiltin<"__builtin_msa_fmax_d">,
  Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;

def int_mips_fmax_a_w : ClangBuiltin<"__builtin_msa_fmax_a_w">,
  Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
def int_mips_fmax_a_d : ClangBuiltin<"__builtin_msa_fmax_a_d">,
  Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;

def int_mips_fmin_w : ClangBuiltin<"__builtin_msa_fmin_w">,
  Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
def int_mips_fmin_d : ClangBuiltin<"__builtin_msa_fmin_d">,
  Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;

def int_mips_fmin_a_w : ClangBuiltin<"__builtin_msa_fmin_a_w">,
  Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
def int_mips_fmin_a_d : ClangBuiltin<"__builtin_msa_fmin_a_d">,
  Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;

def int_mips_fmsub_w : ClangBuiltin<"__builtin_msa_fmsub_w">,
  Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty],
            [IntrNoMem]>;
def int_mips_fmsub_d : ClangBuiltin<"__builtin_msa_fmsub_d">,
  Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty],
            [IntrNoMem]>;

def int_mips_fmul_w : ClangBuiltin<"__builtin_msa_fmul_w">,
  Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
def int_mips_fmul_d : ClangBuiltin<"__builtin_msa_fmul_d">,
  Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;

def int_mips_frint_w : ClangBuiltin<"__builtin_msa_frint_w">,
  Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
def int_mips_frint_d : ClangBuiltin<"__builtin_msa_frint_d">,
  Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty], [IntrNoMem]>;

def int_mips_frcp_w : ClangBuiltin<"__builtin_msa_frcp_w">,
  Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
def int_mips_frcp_d : ClangBuiltin<"__builtin_msa_frcp_d">,
  Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty], [IntrNoMem]>;

def int_mips_frsqrt_w : ClangBuiltin<"__builtin_msa_frsqrt_w">,
  Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
def int_mips_frsqrt_d : ClangBuiltin<"__builtin_msa_frsqrt_d">,
  Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty], [IntrNoMem]>;

def int_mips_fsaf_w : ClangBuiltin<"__builtin_msa_fsaf_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
def int_mips_fsaf_d : ClangBuiltin<"__builtin_msa_fsaf_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;

def int_mips_fseq_w : ClangBuiltin<"__builtin_msa_fseq_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
def int_mips_fseq_d : ClangBuiltin<"__builtin_msa_fseq_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;

def int_mips_fsle_w : ClangBuiltin<"__builtin_msa_fsle_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
def int_mips_fsle_d : ClangBuiltin<"__builtin_msa_fsle_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;

def int_mips_fslt_w : ClangBuiltin<"__builtin_msa_fslt_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
def int_mips_fslt_d : ClangBuiltin<"__builtin_msa_fslt_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;

def int_mips_fsne_w : ClangBuiltin<"__builtin_msa_fsne_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
def int_mips_fsne_d : ClangBuiltin<"__builtin_msa_fsne_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;

def int_mips_fsor_w : ClangBuiltin<"__builtin_msa_fsor_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
def int_mips_fsor_d : ClangBuiltin<"__builtin_msa_fsor_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;

def int_mips_fsqrt_w : ClangBuiltin<"__builtin_msa_fsqrt_w">,
  Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
def int_mips_fsqrt_d : ClangBuiltin<"__builtin_msa_fsqrt_d">,
  Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty], [IntrNoMem]>;

def int_mips_fsub_w : ClangBuiltin<"__builtin_msa_fsub_w">,
  Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
def int_mips_fsub_d : ClangBuiltin<"__builtin_msa_fsub_d">,
  Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;

def int_mips_fsueq_w : ClangBuiltin<"__builtin_msa_fsueq_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
def int_mips_fsueq_d : ClangBuiltin<"__builtin_msa_fsueq_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;

def int_mips_fsule_w : ClangBuiltin<"__builtin_msa_fsule_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
def int_mips_fsule_d : ClangBuiltin<"__builtin_msa_fsule_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;

def int_mips_fsult_w : ClangBuiltin<"__builtin_msa_fsult_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
def int_mips_fsult_d : ClangBuiltin<"__builtin_msa_fsult_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;

def int_mips_fsun_w : ClangBuiltin<"__builtin_msa_fsun_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
def int_mips_fsun_d : ClangBuiltin<"__builtin_msa_fsun_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;

def int_mips_fsune_w : ClangBuiltin<"__builtin_msa_fsune_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
def int_mips_fsune_d : ClangBuiltin<"__builtin_msa_fsune_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;

def int_mips_ftint_s_w : ClangBuiltin<"__builtin_msa_ftint_s_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
def int_mips_ftint_s_d : ClangBuiltin<"__builtin_msa_ftint_s_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty], [IntrNoMem]>;

def int_mips_ftint_u_w : ClangBuiltin<"__builtin_msa_ftint_u_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
def int_mips_ftint_u_d : ClangBuiltin<"__builtin_msa_ftint_u_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty], [IntrNoMem]>;

def int_mips_ftq_h : ClangBuiltin<"__builtin_msa_ftq_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
def int_mips_ftq_w : ClangBuiltin<"__builtin_msa_ftq_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;

def int_mips_ftrunc_s_w : ClangBuiltin<"__builtin_msa_ftrunc_s_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
def int_mips_ftrunc_s_d : ClangBuiltin<"__builtin_msa_ftrunc_s_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty], [IntrNoMem]>;

def int_mips_ftrunc_u_w : ClangBuiltin<"__builtin_msa_ftrunc_u_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
def int_mips_ftrunc_u_d : ClangBuiltin<"__builtin_msa_ftrunc_u_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty], [IntrNoMem]>;

def int_mips_hadd_s_h : ClangBuiltin<"__builtin_msa_hadd_s_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
def int_mips_hadd_s_w : ClangBuiltin<"__builtin_msa_hadd_s_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
def int_mips_hadd_s_d : ClangBuiltin<"__builtin_msa_hadd_s_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;

def int_mips_hadd_u_h : ClangBuiltin<"__builtin_msa_hadd_u_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
def int_mips_hadd_u_w : ClangBuiltin<"__builtin_msa_hadd_u_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
def int_mips_hadd_u_d : ClangBuiltin<"__builtin_msa_hadd_u_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;

def int_mips_hsub_s_h : ClangBuiltin<"__builtin_msa_hsub_s_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
def int_mips_hsub_s_w : ClangBuiltin<"__builtin_msa_hsub_s_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
def int_mips_hsub_s_d : ClangBuiltin<"__builtin_msa_hsub_s_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;

def int_mips_hsub_u_h : ClangBuiltin<"__builtin_msa_hsub_u_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
def int_mips_hsub_u_w : ClangBuiltin<"__builtin_msa_hsub_u_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
def int_mips_hsub_u_d : ClangBuiltin<"__builtin_msa_hsub_u_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;

def int_mips_ilvev_b : ClangBuiltin<"__builtin_msa_ilvev_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
def int_mips_ilvev_h : ClangBuiltin<"__builtin_msa_ilvev_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
def int_mips_ilvev_w : ClangBuiltin<"__builtin_msa_ilvev_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
def int_mips_ilvev_d : ClangBuiltin<"__builtin_msa_ilvev_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;

def int_mips_ilvl_b : ClangBuiltin<"__builtin_msa_ilvl_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
def int_mips_ilvl_h : ClangBuiltin<"__builtin_msa_ilvl_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
def int_mips_ilvl_w : ClangBuiltin<"__builtin_msa_ilvl_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
def int_mips_ilvl_d : ClangBuiltin<"__builtin_msa_ilvl_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;

def int_mips_ilvod_b : ClangBuiltin<"__builtin_msa_ilvod_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
def int_mips_ilvod_h : ClangBuiltin<"__builtin_msa_ilvod_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
def int_mips_ilvod_w : ClangBuiltin<"__builtin_msa_ilvod_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
def int_mips_ilvod_d : ClangBuiltin<"__builtin_msa_ilvod_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;

def int_mips_ilvr_b : ClangBuiltin<"__builtin_msa_ilvr_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
def int_mips_ilvr_h : ClangBuiltin<"__builtin_msa_ilvr_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
def int_mips_ilvr_w : ClangBuiltin<"__builtin_msa_ilvr_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
def int_mips_ilvr_d : ClangBuiltin<"__builtin_msa_ilvr_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;

def int_mips_insert_b : ClangBuiltin<"__builtin_msa_insert_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty, llvm_i32_ty],
  [IntrNoMem]>;
def int_mips_insert_h : ClangBuiltin<"__builtin_msa_insert_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty, llvm_i32_ty],
  [IntrNoMem]>;
def int_mips_insert_w : ClangBuiltin<"__builtin_msa_insert_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty, llvm_i32_ty],
  [IntrNoMem]>;
def int_mips_insert_d : ClangBuiltin<"__builtin_msa_insert_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty, llvm_i64_ty],
  [IntrNoMem]>;

def int_mips_insve_b : ClangBuiltin<"__builtin_msa_insve_b">,
  Intrinsic<[llvm_v16i8_ty],
            [llvm_v16i8_ty, llvm_i32_ty, llvm_v16i8_ty],
            [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_insve_h : ClangBuiltin<"__builtin_msa_insve_h">,
  Intrinsic<[llvm_v8i16_ty],
            [llvm_v8i16_ty, llvm_i32_ty, llvm_v8i16_ty],
            [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_insve_w : ClangBuiltin<"__builtin_msa_insve_w">,
  Intrinsic<[llvm_v4i32_ty],
            [llvm_v4i32_ty, llvm_i32_ty, llvm_v4i32_ty],
            [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_insve_d : ClangBuiltin<"__builtin_msa_insve_d">,
  Intrinsic<[llvm_v2i64_ty],
            [llvm_v2i64_ty, llvm_i32_ty, llvm_v2i64_ty],
            [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_mips_ld_b : ClangBuiltin<"__builtin_msa_ld_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_ptr_ty, llvm_i32_ty],
  [IntrReadMem, IntrArgMemOnly]>;
def int_mips_ld_h : ClangBuiltin<"__builtin_msa_ld_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_ptr_ty, llvm_i32_ty],
  [IntrReadMem, IntrArgMemOnly]>;
def int_mips_ld_w : ClangBuiltin<"__builtin_msa_ld_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_ptr_ty, llvm_i32_ty],
  [IntrReadMem, IntrArgMemOnly]>;
def int_mips_ld_d : ClangBuiltin<"__builtin_msa_ld_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_ptr_ty, llvm_i32_ty],
  [IntrReadMem, IntrArgMemOnly]>;

def int_mips_ldr_d : ClangBuiltin<"__builtin_msa_ldr_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_ptr_ty, llvm_i32_ty],
  [IntrReadMem, IntrArgMemOnly]>;
def int_mips_ldr_w : ClangBuiltin<"__builtin_msa_ldr_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_ptr_ty, llvm_i32_ty],
  [IntrReadMem, IntrArgMemOnly]>;

def int_mips_ldi_b : ClangBuiltin<"__builtin_msa_ldi_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<0>>]>;
def int_mips_ldi_h : ClangBuiltin<"__builtin_msa_ldi_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<0>>]>;
def int_mips_ldi_w : ClangBuiltin<"__builtin_msa_ldi_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<0>>]>;
def int_mips_ldi_d : ClangBuiltin<"__builtin_msa_ldi_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<0>>]>;

// This instruction is part of the MSA spec but it does not share the
// __builtin_msa prefix because it operates on the GPR registers.
def int_mips_lsa : ClangBuiltin<"__builtin_mips_lsa">,
  Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
            [IntrNoMem]>;

def int_mips_madd_q_h : ClangBuiltin<"__builtin_msa_madd_q_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v8i16_ty],
  [IntrNoMem]>;
def int_mips_madd_q_w : ClangBuiltin<"__builtin_msa_madd_q_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
  [IntrNoMem]>;

def int_mips_maddr_q_h : ClangBuiltin<"__builtin_msa_maddr_q_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v8i16_ty],
  [IntrNoMem]>;
def int_mips_maddr_q_w : ClangBuiltin<"__builtin_msa_maddr_q_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
  [IntrNoMem]>;

def int_mips_maddv_b : ClangBuiltin<"__builtin_msa_maddv_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty],
  [IntrNoMem]>;
def int_mips_maddv_h : ClangBuiltin<"__builtin_msa_maddv_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v8i16_ty],
  [IntrNoMem]>;
def int_mips_maddv_w : ClangBuiltin<"__builtin_msa_maddv_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
  [IntrNoMem]>;
def int_mips_maddv_d : ClangBuiltin<"__builtin_msa_maddv_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty],
  [IntrNoMem]>;

def int_mips_max_a_b : ClangBuiltin<"__builtin_msa_max_a_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
def int_mips_max_a_h : ClangBuiltin<"__builtin_msa_max_a_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
def int_mips_max_a_w : ClangBuiltin<"__builtin_msa_max_a_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
def int_mips_max_a_d : ClangBuiltin<"__builtin_msa_max_a_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;

def int_mips_max_s_b : ClangBuiltin<"__builtin_msa_max_s_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
def int_mips_max_s_h : ClangBuiltin<"__builtin_msa_max_s_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
def int_mips_max_s_w : ClangBuiltin<"__builtin_msa_max_s_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
def int_mips_max_s_d : ClangBuiltin<"__builtin_msa_max_s_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;

def int_mips_max_u_b : ClangBuiltin<"__builtin_msa_max_u_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
def int_mips_max_u_h : ClangBuiltin<"__builtin_msa_max_u_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
def int_mips_max_u_w : ClangBuiltin<"__builtin_msa_max_u_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
def int_mips_max_u_d : ClangBuiltin<"__builtin_msa_max_u_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;

def int_mips_maxi_s_b : ClangBuiltin<"__builtin_msa_maxi_s_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_maxi_s_h : ClangBuiltin<"__builtin_msa_maxi_s_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_maxi_s_w : ClangBuiltin<"__builtin_msa_maxi_s_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_maxi_s_d : ClangBuiltin<"__builtin_msa_maxi_s_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_mips_maxi_u_b : ClangBuiltin<"__builtin_msa_maxi_u_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_maxi_u_h : ClangBuiltin<"__builtin_msa_maxi_u_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_maxi_u_w : ClangBuiltin<"__builtin_msa_maxi_u_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_maxi_u_d : ClangBuiltin<"__builtin_msa_maxi_u_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_mips_min_a_b : ClangBuiltin<"__builtin_msa_min_a_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
def int_mips_min_a_h : ClangBuiltin<"__builtin_msa_min_a_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
def int_mips_min_a_w : ClangBuiltin<"__builtin_msa_min_a_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
def int_mips_min_a_d : ClangBuiltin<"__builtin_msa_min_a_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;

def int_mips_min_s_b : ClangBuiltin<"__builtin_msa_min_s_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
def int_mips_min_s_h : ClangBuiltin<"__builtin_msa_min_s_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
def int_mips_min_s_w : ClangBuiltin<"__builtin_msa_min_s_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
def int_mips_min_s_d : ClangBuiltin<"__builtin_msa_min_s_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;

def int_mips_min_u_b : ClangBuiltin<"__builtin_msa_min_u_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
def int_mips_min_u_h : ClangBuiltin<"__builtin_msa_min_u_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
def int_mips_min_u_w : ClangBuiltin<"__builtin_msa_min_u_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
def int_mips_min_u_d : ClangBuiltin<"__builtin_msa_min_u_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;

def int_mips_mini_s_b : ClangBuiltin<"__builtin_msa_mini_s_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_mini_s_h : ClangBuiltin<"__builtin_msa_mini_s_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_mini_s_w : ClangBuiltin<"__builtin_msa_mini_s_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_mini_s_d : ClangBuiltin<"__builtin_msa_mini_s_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_mips_mini_u_b : ClangBuiltin<"__builtin_msa_mini_u_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_mini_u_h : ClangBuiltin<"__builtin_msa_mini_u_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_mini_u_w : ClangBuiltin<"__builtin_msa_mini_u_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_mini_u_d : ClangBuiltin<"__builtin_msa_mini_u_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_mips_mod_s_b : ClangBuiltin<"__builtin_msa_mod_s_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
def int_mips_mod_s_h : ClangBuiltin<"__builtin_msa_mod_s_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
def int_mips_mod_s_w : ClangBuiltin<"__builtin_msa_mod_s_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
def int_mips_mod_s_d : ClangBuiltin<"__builtin_msa_mod_s_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;

def int_mips_mod_u_b : ClangBuiltin<"__builtin_msa_mod_u_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
def int_mips_mod_u_h : ClangBuiltin<"__builtin_msa_mod_u_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
def int_mips_mod_u_w : ClangBuiltin<"__builtin_msa_mod_u_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
def int_mips_mod_u_d : ClangBuiltin<"__builtin_msa_mod_u_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;

def int_mips_move_v : ClangBuiltin<"__builtin_msa_move_v">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty], [IntrNoMem]>;

def int_mips_msub_q_h : ClangBuiltin<"__builtin_msa_msub_q_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v8i16_ty],
  [IntrNoMem]>;
def int_mips_msub_q_w : ClangBuiltin<"__builtin_msa_msub_q_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
  [IntrNoMem]>;

def int_mips_msubr_q_h : ClangBuiltin<"__builtin_msa_msubr_q_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v8i16_ty],
  [IntrNoMem]>;
def int_mips_msubr_q_w : ClangBuiltin<"__builtin_msa_msubr_q_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
  [IntrNoMem]>;

def int_mips_msubv_b : ClangBuiltin<"__builtin_msa_msubv_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty],
  [IntrNoMem]>;
def int_mips_msubv_h : ClangBuiltin<"__builtin_msa_msubv_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v8i16_ty],
  [IntrNoMem]>;
def int_mips_msubv_w : ClangBuiltin<"__builtin_msa_msubv_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
  [IntrNoMem]>;
def int_mips_msubv_d : ClangBuiltin<"__builtin_msa_msubv_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty],
  [IntrNoMem]>;

def int_mips_mul_q_h : ClangBuiltin<"__builtin_msa_mul_q_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
def int_mips_mul_q_w : ClangBuiltin<"__builtin_msa_mul_q_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;

def int_mips_mulr_q_h : ClangBuiltin<"__builtin_msa_mulr_q_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
def int_mips_mulr_q_w : ClangBuiltin<"__builtin_msa_mulr_q_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;

def int_mips_mulv_b : ClangBuiltin<"__builtin_msa_mulv_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
def int_mips_mulv_h : ClangBuiltin<"__builtin_msa_mulv_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
def int_mips_mulv_w : ClangBuiltin<"__builtin_msa_mulv_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
def int_mips_mulv_d : ClangBuiltin<"__builtin_msa_mulv_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;

def int_mips_nloc_b : ClangBuiltin<"__builtin_msa_nloc_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty], [IntrNoMem]>;
def int_mips_nloc_h : ClangBuiltin<"__builtin_msa_nloc_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty], [IntrNoMem]>;
def int_mips_nloc_w : ClangBuiltin<"__builtin_msa_nloc_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty], [IntrNoMem]>;
def int_mips_nloc_d : ClangBuiltin<"__builtin_msa_nloc_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty], [IntrNoMem]>;

def int_mips_nlzc_b : ClangBuiltin<"__builtin_msa_nlzc_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty], [IntrNoMem]>;
def int_mips_nlzc_h : ClangBuiltin<"__builtin_msa_nlzc_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty], [IntrNoMem]>;
def int_mips_nlzc_w : ClangBuiltin<"__builtin_msa_nlzc_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty], [IntrNoMem]>;
def int_mips_nlzc_d : ClangBuiltin<"__builtin_msa_nlzc_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty], [IntrNoMem]>;

def int_mips_nor_v : ClangBuiltin<"__builtin_msa_nor_v">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;

def int_mips_nori_b : ClangBuiltin<"__builtin_msa_nori_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_mips_or_v : ClangBuiltin<"__builtin_msa_or_v">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;

def int_mips_ori_b : ClangBuiltin<"__builtin_msa_ori_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_mips_pckev_b : ClangBuiltin<"__builtin_msa_pckev_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
def int_mips_pckev_h : ClangBuiltin<"__builtin_msa_pckev_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
def int_mips_pckev_w : ClangBuiltin<"__builtin_msa_pckev_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
def int_mips_pckev_d : ClangBuiltin<"__builtin_msa_pckev_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;

def int_mips_pckod_b : ClangBuiltin<"__builtin_msa_pckod_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
def int_mips_pckod_h : ClangBuiltin<"__builtin_msa_pckod_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
def int_mips_pckod_w : ClangBuiltin<"__builtin_msa_pckod_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
def int_mips_pckod_d : ClangBuiltin<"__builtin_msa_pckod_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;

def int_mips_pcnt_b : ClangBuiltin<"__builtin_msa_pcnt_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty], [IntrNoMem]>;
def int_mips_pcnt_h : ClangBuiltin<"__builtin_msa_pcnt_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty], [IntrNoMem]>;
def int_mips_pcnt_w : ClangBuiltin<"__builtin_msa_pcnt_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty], [IntrNoMem]>;
def int_mips_pcnt_d : ClangBuiltin<"__builtin_msa_pcnt_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty], [IntrNoMem]>;

def int_mips_sat_s_b : ClangBuiltin<"__builtin_msa_sat_s_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_sat_s_h : ClangBuiltin<"__builtin_msa_sat_s_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_sat_s_w : ClangBuiltin<"__builtin_msa_sat_s_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_sat_s_d : ClangBuiltin<"__builtin_msa_sat_s_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_mips_sat_u_b : ClangBuiltin<"__builtin_msa_sat_u_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_sat_u_h : ClangBuiltin<"__builtin_msa_sat_u_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_sat_u_w : ClangBuiltin<"__builtin_msa_sat_u_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_sat_u_d : ClangBuiltin<"__builtin_msa_sat_u_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_mips_shf_b : ClangBuiltin<"__builtin_msa_shf_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_shf_h : ClangBuiltin<"__builtin_msa_shf_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_shf_w : ClangBuiltin<"__builtin_msa_shf_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_mips_sld_b : ClangBuiltin<"__builtin_msa_sld_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
def int_mips_sld_h : ClangBuiltin<"__builtin_msa_sld_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
def int_mips_sld_w : ClangBuiltin<"__builtin_msa_sld_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
def int_mips_sld_d : ClangBuiltin<"__builtin_msa_sld_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;

def int_mips_sldi_b : ClangBuiltin<"__builtin_msa_sldi_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty],
            [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_mips_sldi_h : ClangBuiltin<"__builtin_msa_sldi_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty, llvm_i32_ty],
            [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_mips_sldi_w : ClangBuiltin<"__builtin_msa_sldi_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_i32_ty],
            [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_mips_sldi_d : ClangBuiltin<"__builtin_msa_sldi_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty, llvm_i32_ty],
            [IntrNoMem, ImmArg<ArgIndex<2>>]>;

def int_mips_sll_b : ClangBuiltin<"__builtin_msa_sll_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
def int_mips_sll_h : ClangBuiltin<"__builtin_msa_sll_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
def int_mips_sll_w : ClangBuiltin<"__builtin_msa_sll_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
def int_mips_sll_d : ClangBuiltin<"__builtin_msa_sll_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;

def int_mips_slli_b : ClangBuiltin<"__builtin_msa_slli_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_slli_h : ClangBuiltin<"__builtin_msa_slli_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_slli_w : ClangBuiltin<"__builtin_msa_slli_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_slli_d : ClangBuiltin<"__builtin_msa_slli_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_mips_splat_b : ClangBuiltin<"__builtin_msa_splat_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
def int_mips_splat_h : ClangBuiltin<"__builtin_msa_splat_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
def int_mips_splat_w : ClangBuiltin<"__builtin_msa_splat_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
def int_mips_splat_d : ClangBuiltin<"__builtin_msa_splat_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;

def int_mips_splati_b : ClangBuiltin<"__builtin_msa_splati_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_splati_h : ClangBuiltin<"__builtin_msa_splati_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_splati_w : ClangBuiltin<"__builtin_msa_splati_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_splati_d : ClangBuiltin<"__builtin_msa_splati_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_mips_sra_b : ClangBuiltin<"__builtin_msa_sra_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
def int_mips_sra_h : ClangBuiltin<"__builtin_msa_sra_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
def int_mips_sra_w : ClangBuiltin<"__builtin_msa_sra_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
def int_mips_sra_d : ClangBuiltin<"__builtin_msa_sra_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;

def int_mips_srai_b : ClangBuiltin<"__builtin_msa_srai_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_srai_h : ClangBuiltin<"__builtin_msa_srai_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_srai_w : ClangBuiltin<"__builtin_msa_srai_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_srai_d : ClangBuiltin<"__builtin_msa_srai_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_mips_srar_b : ClangBuiltin<"__builtin_msa_srar_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
def int_mips_srar_h : ClangBuiltin<"__builtin_msa_srar_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
def int_mips_srar_w : ClangBuiltin<"__builtin_msa_srar_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
def int_mips_srar_d : ClangBuiltin<"__builtin_msa_srar_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;

def int_mips_srari_b : ClangBuiltin<"__builtin_msa_srari_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_srari_h : ClangBuiltin<"__builtin_msa_srari_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_srari_w : ClangBuiltin<"__builtin_msa_srari_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_srari_d : ClangBuiltin<"__builtin_msa_srari_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_mips_srl_b : ClangBuiltin<"__builtin_msa_srl_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
def int_mips_srl_h : ClangBuiltin<"__builtin_msa_srl_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
def int_mips_srl_w : ClangBuiltin<"__builtin_msa_srl_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
def int_mips_srl_d : ClangBuiltin<"__builtin_msa_srl_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;

def int_mips_srli_b : ClangBuiltin<"__builtin_msa_srli_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_srli_h : ClangBuiltin<"__builtin_msa_srli_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_srli_w : ClangBuiltin<"__builtin_msa_srli_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_srli_d : ClangBuiltin<"__builtin_msa_srli_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_mips_srlr_b : ClangBuiltin<"__builtin_msa_srlr_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
def int_mips_srlr_h : ClangBuiltin<"__builtin_msa_srlr_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
def int_mips_srlr_w : ClangBuiltin<"__builtin_msa_srlr_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
def int_mips_srlr_d : ClangBuiltin<"__builtin_msa_srlr_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;

def int_mips_srlri_b : ClangBuiltin<"__builtin_msa_srlri_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_srlri_h : ClangBuiltin<"__builtin_msa_srlri_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_srlri_w : ClangBuiltin<"__builtin_msa_srlri_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_srlri_d : ClangBuiltin<"__builtin_msa_srlri_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_mips_st_b : ClangBuiltin<"__builtin_msa_st_b">,
  Intrinsic<[], [llvm_v16i8_ty, llvm_ptr_ty, llvm_i32_ty],
  [IntrArgMemOnly]>;
def int_mips_st_h : ClangBuiltin<"__builtin_msa_st_h">,
  Intrinsic<[], [llvm_v8i16_ty, llvm_ptr_ty, llvm_i32_ty],
  [IntrArgMemOnly]>;
def int_mips_st_w : ClangBuiltin<"__builtin_msa_st_w">,
  Intrinsic<[], [llvm_v4i32_ty, llvm_ptr_ty, llvm_i32_ty],
  [IntrArgMemOnly]>;
def int_mips_st_d : ClangBuiltin<"__builtin_msa_st_d">,
  Intrinsic<[], [llvm_v2i64_ty, llvm_ptr_ty, llvm_i32_ty],
  [IntrArgMemOnly]>;

def int_mips_str_d : ClangBuiltin<"__builtin_msa_str_d">,
  Intrinsic<[], [llvm_v2i64_ty, llvm_ptr_ty, llvm_i32_ty],
  [IntrArgMemOnly]>;
def int_mips_str_w : ClangBuiltin<"__builtin_msa_str_w">,
  Intrinsic<[], [llvm_v4i32_ty, llvm_ptr_ty, llvm_i32_ty],
  [IntrArgMemOnly]>;

def int_mips_subs_s_b : ClangBuiltin<"__builtin_msa_subs_s_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
def int_mips_subs_s_h : ClangBuiltin<"__builtin_msa_subs_s_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
def int_mips_subs_s_w : ClangBuiltin<"__builtin_msa_subs_s_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
def int_mips_subs_s_d : ClangBuiltin<"__builtin_msa_subs_s_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;

def int_mips_subs_u_b : ClangBuiltin<"__builtin_msa_subs_u_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
def int_mips_subs_u_h : ClangBuiltin<"__builtin_msa_subs_u_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
def int_mips_subs_u_w : ClangBuiltin<"__builtin_msa_subs_u_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
def int_mips_subs_u_d : ClangBuiltin<"__builtin_msa_subs_u_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;

def int_mips_subsus_u_b : ClangBuiltin<"__builtin_msa_subsus_u_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
def int_mips_subsus_u_h : ClangBuiltin<"__builtin_msa_subsus_u_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
def int_mips_subsus_u_w : ClangBuiltin<"__builtin_msa_subsus_u_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
def int_mips_subsus_u_d : ClangBuiltin<"__builtin_msa_subsus_u_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;

def int_mips_subsuu_s_b : ClangBuiltin<"__builtin_msa_subsuu_s_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
def int_mips_subsuu_s_h : ClangBuiltin<"__builtin_msa_subsuu_s_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
def int_mips_subsuu_s_w : ClangBuiltin<"__builtin_msa_subsuu_s_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
def int_mips_subsuu_s_d : ClangBuiltin<"__builtin_msa_subsuu_s_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;

def int_mips_subv_b : ClangBuiltin<"__builtin_msa_subv_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
def int_mips_subv_h : ClangBuiltin<"__builtin_msa_subv_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
def int_mips_subv_w : ClangBuiltin<"__builtin_msa_subv_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
def int_mips_subv_d : ClangBuiltin<"__builtin_msa_subv_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;

def int_mips_subvi_b : ClangBuiltin<"__builtin_msa_subvi_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_subvi_h : ClangBuiltin<"__builtin_msa_subvi_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_subvi_w : ClangBuiltin<"__builtin_msa_subvi_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_mips_subvi_d : ClangBuiltin<"__builtin_msa_subvi_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;

def int_mips_vshf_b : ClangBuiltin<"__builtin_msa_vshf_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty],
            [IntrNoMem]>;
def int_mips_vshf_h : ClangBuiltin<"__builtin_msa_vshf_h">,
  Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v8i16_ty],
            [IntrNoMem]>;
def int_mips_vshf_w : ClangBuiltin<"__builtin_msa_vshf_w">,
  Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
            [IntrNoMem]>;
def int_mips_vshf_d : ClangBuiltin<"__builtin_msa_vshf_d">,
  Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty],
            [IntrNoMem]>;

def int_mips_xor_v : ClangBuiltin<"__builtin_msa_xor_v">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;

def int_mips_xori_b : ClangBuiltin<"__builtin_msa_xori_b">,
  Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
}
PKjwFZ��bQ�
�
IR/OptBisect.hnu�[���//===- llvm/IR/OptBisect.h - LLVM Bisect support ----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file declares the interface for bisecting optimizations.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_OPTBISECT_H
#define LLVM_IR_OPTBISECT_H

#include "llvm/ADT/StringRef.h"
#include <limits>

namespace llvm {

/// Extensions to this class implement mechanisms to disable passes and
/// individual optimizations at compile time.
class OptPassGate {
public:
  virtual ~OptPassGate() = default;

  /// IRDescription is a textual description of the IR unit the pass is running
  /// over.
  virtual bool shouldRunPass(const StringRef PassName,
                             StringRef IRDescription) {
    return true;
  }

  /// isEnabled() should return true before calling shouldRunPass().
  virtual bool isEnabled() const { return false; }
};

/// This class implements a mechanism to disable passes and individual
/// optimizations at compile time based on a command line option
/// (-opt-bisect-limit) in order to perform a bisecting search for
/// optimization-related problems.
class OptBisect : public OptPassGate {
public:
  /// Default constructor. Initializes the state to "disabled". The bisection
  /// will be enabled by the cl::opt call-back when the command line option
  /// is processed.
  /// Clients should not instantiate this class directly.  All access should go
  /// through LLVMContext.
  OptBisect() = default;

  virtual ~OptBisect() = default;

  /// Checks the bisect limit to determine if the specified pass should run.
  ///
  /// This forwards to checkPass().
  bool shouldRunPass(const StringRef PassName,
                     StringRef IRDescription) override;

  /// isEnabled() should return true before calling shouldRunPass().
  bool isEnabled() const override { return BisectLimit != Disabled; }

  /// Set the new optimization limit and reset the counter. Passing
  /// OptBisect::Disabled disables the limiting.
  void setLimit(int Limit) {
    BisectLimit = Limit;
    LastBisectNum = 0;
  }

  /// Checks the bisect limit to determine if the specified pass should run.
  ///
  /// If the bisect limit is set to -1, the function prints a message describing
  /// the pass and the bisect number assigned to it and return true.  Otherwise,
  /// the function prints a message with the bisect number assigned to the
  /// pass and indicating whether or not the pass will be run and return true if
  /// the bisect limit has not yet been exceeded or false if it has.
  ///
  /// Most passes should not call this routine directly. Instead, they are
  /// called through helper routines provided by the pass base classes.  For
  /// instance, function passes should call FunctionPass::skipFunction().
  bool checkPass(const StringRef PassName, const StringRef TargetDesc);

  static const int Disabled = std::numeric_limits<int>::max();

private:
  int BisectLimit = Disabled;
  int LastBisectNum = 0;
};

/// Singleton instance of the OptBisect class, so multiple pass managers don't
/// need to coordinate their uses of OptBisect.
OptPassGate &getGlobalPassGate();

} // end namespace llvm

#endif // LLVM_IR_OPTBISECT_H
PKjwFZ������IR/ProfileSummary.hnu�[���//===- ProfileSummary.h - Profile summary data structure. -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the profile summary data structure.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_PROFILESUMMARY_H
#define LLVM_IR_PROFILESUMMARY_H

#include <algorithm>
#include <cassert>
#include <cstdint>
#include <vector>

namespace llvm {

class LLVMContext;
class Metadata;
class raw_ostream;

// The profile summary is one or more (Cutoff, MinCount, NumCounts) triplets.
// The semantics of counts depend on the type of profile. For instrumentation
// profile, counts are block counts and for sample profile, counts are
// per-line samples. Given a target counts percentile, we compute the minimum
// number of counts needed to reach this target and the minimum among these
// counts.
struct ProfileSummaryEntry {
  const uint32_t Cutoff;    ///< The required percentile of counts.
  const uint64_t MinCount;  ///< The minimum count for this percentile.
  const uint64_t NumCounts; ///< Number of counts >= the minimum count.

  ProfileSummaryEntry(uint32_t TheCutoff, uint64_t TheMinCount,
                      uint64_t TheNumCounts)
      : Cutoff(TheCutoff), MinCount(TheMinCount), NumCounts(TheNumCounts) {}
};

using SummaryEntryVector = std::vector<ProfileSummaryEntry>;

class ProfileSummary {
public:
  enum Kind { PSK_Instr, PSK_CSInstr, PSK_Sample };

private:
  const Kind PSK;
  const SummaryEntryVector DetailedSummary;
  const uint64_t TotalCount, MaxCount, MaxInternalCount, MaxFunctionCount;
  const uint32_t NumCounts, NumFunctions;
  /// If 'Partial' is false, it means the profile being used to optimize
  /// a target is collected from the same target.
  /// If 'Partial' is true, it means the profile is for common/shared
  /// code. The common profile is usually merged from profiles collected
  /// from running other targets.
  bool Partial = false;
  /// This approximately represents the ratio of the number of profile counters
  /// of the program being built to the number of profile counters in the
  /// partial sample profile. When 'Partial' is false, it is undefined. This is
  /// currently only available under thin LTO mode.
  double PartialProfileRatio = 0.0;
  /// Return detailed summary as metadata.
  Metadata *getDetailedSummaryMD(LLVMContext &Context);

public:
  static const int Scale = 1000000;

  ProfileSummary(Kind K, const SummaryEntryVector &DetailedSummary,
                 uint64_t TotalCount, uint64_t MaxCount,
                 uint64_t MaxInternalCount, uint64_t MaxFunctionCount,
                 uint32_t NumCounts, uint32_t NumFunctions,
                 bool Partial = false, double PartialProfileRatio = 0)
      : PSK(K), DetailedSummary(std::move(DetailedSummary)),
        TotalCount(TotalCount), MaxCount(MaxCount),
        MaxInternalCount(MaxInternalCount), MaxFunctionCount(MaxFunctionCount),
        NumCounts(NumCounts), NumFunctions(NumFunctions), Partial(Partial),
        PartialProfileRatio(PartialProfileRatio) {}

  Kind getKind() const { return PSK; }
  /// Return summary information as metadata.
  Metadata *getMD(LLVMContext &Context, bool AddPartialField = true,
                  bool AddPartialProfileRatioField = true);
  /// Construct profile summary from metdata.
  static ProfileSummary *getFromMD(Metadata *MD);
  const SummaryEntryVector &getDetailedSummary() { return DetailedSummary; }
  uint32_t getNumFunctions() const { return NumFunctions; }
  uint64_t getMaxFunctionCount() const { return MaxFunctionCount; }
  uint32_t getNumCounts() const { return NumCounts; }
  uint64_t getTotalCount() const { return TotalCount; }
  uint64_t getMaxCount() const { return MaxCount; }
  uint64_t getMaxInternalCount() const { return MaxInternalCount; }
  void setPartialProfile(bool PP) { Partial = PP; }
  bool isPartialProfile() const { return Partial; }
  double getPartialProfileRatio() const { return PartialProfileRatio; }
  void setPartialProfileRatio(double R) {
    assert(isPartialProfile() && "Unexpected when not partial profile");
    PartialProfileRatio = R;
  }
  void printSummary(raw_ostream &OS) const;
  void printDetailedSummary(raw_ostream &OS) const;
};

} // end namespace llvm

#endif // LLVM_IR_PROFILESUMMARY_H
PKjwFZ��6�i�iIR/PatternMatch.hnu�[���//===- PatternMatch.h - Match on the LLVM IR --------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file provides a simple and efficient mechanism for performing general
// tree-based pattern matches on the LLVM IR. The power of these routines is
// that it allows you to write concise patterns that are expressive and easy to
// understand. The other major advantage of this is that it allows you to
// trivially capture/bind elements in the pattern to variables. For example,
// you can do something like this:
//
//  Value *Exp = ...
//  Value *X, *Y;  ConstantInt *C1, *C2;      // (X & C1) | (Y & C2)
//  if (match(Exp, m_Or(m_And(m_Value(X), m_ConstantInt(C1)),
//                      m_And(m_Value(Y), m_ConstantInt(C2))))) {
//    ... Pattern is matched and variables are bound ...
//  }
//
// This is primarily useful to things like the instruction combiner, but can
// also be useful for static analysis tools or code generators.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_PATTERNMATCH_H
#define LLVM_IR_PATTERNMATCH_H

#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APInt.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/Casting.h"
#include <cstdint>

namespace llvm {
namespace PatternMatch {

template <typename Val, typename Pattern> bool match(Val *V, const Pattern &P) {
  return const_cast<Pattern &>(P).match(V);
}

template <typename Pattern> bool match(ArrayRef<int> Mask, const Pattern &P) {
  return const_cast<Pattern &>(P).match(Mask);
}

template <typename SubPattern_t> struct OneUse_match {
  SubPattern_t SubPattern;

  OneUse_match(const SubPattern_t &SP) : SubPattern(SP) {}

  template <typename OpTy> bool match(OpTy *V) {
    return V->hasOneUse() && SubPattern.match(V);
  }
};

template <typename T> inline OneUse_match<T> m_OneUse(const T &SubPattern) {
  return SubPattern;
}

template <typename Class> struct class_match {
  template <typename ITy> bool match(ITy *V) { return isa<Class>(V); }
};

/// Match an arbitrary value and ignore it.
inline class_match<Value> m_Value() { return class_match<Value>(); }

/// Match an arbitrary unary operation and ignore it.
inline class_match<UnaryOperator> m_UnOp() {
  return class_match<UnaryOperator>();
}

/// Match an arbitrary binary operation and ignore it.
inline class_match<BinaryOperator> m_BinOp() {
  return class_match<BinaryOperator>();
}

/// Matches any compare instruction and ignore it.
inline class_match<CmpInst> m_Cmp() { return class_match<CmpInst>(); }

struct undef_match {
  static bool check(const Value *V) {
    if (isa<UndefValue>(V))
      return true;

    const auto *CA = dyn_cast<ConstantAggregate>(V);
    if (!CA)
      return false;

    SmallPtrSet<const ConstantAggregate *, 8> Seen;
    SmallVector<const ConstantAggregate *, 8> Worklist;

    // Either UndefValue, PoisonValue, or an aggregate that only contains
    // these is accepted by matcher.
    // CheckValue returns false if CA cannot satisfy this constraint.
    auto CheckValue = [&](const ConstantAggregate *CA) {
      for (const Value *Op : CA->operand_values()) {
        if (isa<UndefValue>(Op))
          continue;

        const auto *CA = dyn_cast<ConstantAggregate>(Op);
        if (!CA)
          return false;
        if (Seen.insert(CA).second)
          Worklist.emplace_back(CA);
      }

      return true;
    };

    if (!CheckValue(CA))
      return false;

    while (!Worklist.empty()) {
      if (!CheckValue(Worklist.pop_back_val()))
        return false;
    }
    return true;
  }
  template <typename ITy> bool match(ITy *V) { return check(V); }
};

/// Match an arbitrary undef constant. This matches poison as well.
/// If this is an aggregate and contains a non-aggregate element that is
/// neither undef nor poison, the aggregate is not matched.
inline auto m_Undef() { return undef_match(); }

/// Match an arbitrary poison constant.
inline class_match<PoisonValue> m_Poison() {
  return class_match<PoisonValue>();
}

/// Match an arbitrary Constant and ignore it.
inline class_match<Constant> m_Constant() { return class_match<Constant>(); }

/// Match an arbitrary ConstantInt and ignore it.
inline class_match<ConstantInt> m_ConstantInt() {
  return class_match<ConstantInt>();
}

/// Match an arbitrary ConstantFP and ignore it.
inline class_match<ConstantFP> m_ConstantFP() {
  return class_match<ConstantFP>();
}

struct constantexpr_match {
  template <typename ITy> bool match(ITy *V) {
    auto *C = dyn_cast<Constant>(V);
    return C && (isa<ConstantExpr>(C) || C->containsConstantExpression());
  }
};

/// Match a constant expression or a constant that contains a constant
/// expression.
inline constantexpr_match m_ConstantExpr() { return constantexpr_match(); }

/// Match an arbitrary basic block value and ignore it.
inline class_match<BasicBlock> m_BasicBlock() {
  return class_match<BasicBlock>();
}

/// Inverting matcher
template <typename Ty> struct match_unless {
  Ty M;

  match_unless(const Ty &Matcher) : M(Matcher) {}

  template <typename ITy> bool match(ITy *V) { return !M.match(V); }
};

/// Match if the inner matcher does *NOT* match.
template <typename Ty> inline match_unless<Ty> m_Unless(const Ty &M) {
  return match_unless<Ty>(M);
}

/// Matching combinators
template <typename LTy, typename RTy> struct match_combine_or {
  LTy L;
  RTy R;

  match_combine_or(const LTy &Left, const RTy &Right) : L(Left), R(Right) {}

  template <typename ITy> bool match(ITy *V) {
    if (L.match(V))
      return true;
    if (R.match(V))
      return true;
    return false;
  }
};

template <typename LTy, typename RTy> struct match_combine_and {
  LTy L;
  RTy R;

  match_combine_and(const LTy &Left, const RTy &Right) : L(Left), R(Right) {}

  template <typename ITy> bool match(ITy *V) {
    if (L.match(V))
      if (R.match(V))
        return true;
    return false;
  }
};

/// Combine two pattern matchers matching L || R
template <typename LTy, typename RTy>
inline match_combine_or<LTy, RTy> m_CombineOr(const LTy &L, const RTy &R) {
  return match_combine_or<LTy, RTy>(L, R);
}

/// Combine two pattern matchers matching L && R
template <typename LTy, typename RTy>
inline match_combine_and<LTy, RTy> m_CombineAnd(const LTy &L, const RTy &R) {
  return match_combine_and<LTy, RTy>(L, R);
}

struct apint_match {
  const APInt *&Res;
  bool AllowUndef;

  apint_match(const APInt *&Res, bool AllowUndef)
      : Res(Res), AllowUndef(AllowUndef) {}

  template <typename ITy> bool match(ITy *V) {
    if (auto *CI = dyn_cast<ConstantInt>(V)) {
      Res = &CI->getValue();
      return true;
    }
    if (V->getType()->isVectorTy())
      if (const auto *C = dyn_cast<Constant>(V))
        if (auto *CI =
                dyn_cast_or_null<ConstantInt>(C->getSplatValue(AllowUndef))) {
          Res = &CI->getValue();
          return true;
        }
    return false;
  }
};
// Either constexpr if or renaming ConstantFP::getValueAPF to
// ConstantFP::getValue is needed to do it via single template
// function for both apint/apfloat.
struct apfloat_match {
  const APFloat *&Res;
  bool AllowUndef;

  apfloat_match(const APFloat *&Res, bool AllowUndef)
      : Res(Res), AllowUndef(AllowUndef) {}

  template <typename ITy> bool match(ITy *V) {
    if (auto *CI = dyn_cast<ConstantFP>(V)) {
      Res = &CI->getValueAPF();
      return true;
    }
    if (V->getType()->isVectorTy())
      if (const auto *C = dyn_cast<Constant>(V))
        if (auto *CI =
                dyn_cast_or_null<ConstantFP>(C->getSplatValue(AllowUndef))) {
          Res = &CI->getValueAPF();
          return true;
        }
    return false;
  }
};

/// Match a ConstantInt or splatted ConstantVector, binding the
/// specified pointer to the contained APInt.
inline apint_match m_APInt(const APInt *&Res) {
  // Forbid undefs by default to maintain previous behavior.
  return apint_match(Res, /* AllowUndef */ false);
}

/// Match APInt while allowing undefs in splat vector constants.
inline apint_match m_APIntAllowUndef(const APInt *&Res) {
  return apint_match(Res, /* AllowUndef */ true);
}

/// Match APInt while forbidding undefs in splat vector constants.
inline apint_match m_APIntForbidUndef(const APInt *&Res) {
  return apint_match(Res, /* AllowUndef */ false);
}

/// Match a ConstantFP or splatted ConstantVector, binding the
/// specified pointer to the contained APFloat.
inline apfloat_match m_APFloat(const APFloat *&Res) {
  // Forbid undefs by default to maintain previous behavior.
  return apfloat_match(Res, /* AllowUndef */ false);
}

/// Match APFloat while allowing undefs in splat vector constants.
inline apfloat_match m_APFloatAllowUndef(const APFloat *&Res) {
  return apfloat_match(Res, /* AllowUndef */ true);
}

/// Match APFloat while forbidding undefs in splat vector constants.
inline apfloat_match m_APFloatForbidUndef(const APFloat *&Res) {
  return apfloat_match(Res, /* AllowUndef */ false);
}

template <int64_t Val> struct constantint_match {
  template <typename ITy> bool match(ITy *V) {
    if (const auto *CI = dyn_cast<ConstantInt>(V)) {
      const APInt &CIV = CI->getValue();
      if (Val >= 0)
        return CIV == static_cast<uint64_t>(Val);
      // If Val is negative, and CI is shorter than it, truncate to the right
      // number of bits.  If it is larger, then we have to sign extend.  Just
      // compare their negated values.
      return -CIV == -Val;
    }
    return false;
  }
};

/// Match a ConstantInt with a specific value.
template <int64_t Val> inline constantint_match<Val> m_ConstantInt() {
  return constantint_match<Val>();
}

/// This helper class is used to match constant scalars, vector splats,
/// and fixed width vectors that satisfy a specified predicate.
/// For fixed width vector constants, undefined elements are ignored.
template <typename Predicate, typename ConstantVal>
struct cstval_pred_ty : public Predicate {
  template <typename ITy> bool match(ITy *V) {
    if (const auto *CV = dyn_cast<ConstantVal>(V))
      return this->isValue(CV->getValue());
    if (const auto *VTy = dyn_cast<VectorType>(V->getType())) {
      if (const auto *C = dyn_cast<Constant>(V)) {
        if (const auto *CV = dyn_cast_or_null<ConstantVal>(C->getSplatValue()))
          return this->isValue(CV->getValue());

        // Number of elements of a scalable vector unknown at compile time
        auto *FVTy = dyn_cast<FixedVectorType>(VTy);
        if (!FVTy)
          return false;

        // Non-splat vector constant: check each element for a match.
        unsigned NumElts = FVTy->getNumElements();
        assert(NumElts != 0 && "Constant vector with no elements?");
        bool HasNonUndefElements = false;
        for (unsigned i = 0; i != NumElts; ++i) {
          Constant *Elt = C->getAggregateElement(i);
          if (!Elt)
            return false;
          if (isa<UndefValue>(Elt))
            continue;
          auto *CV = dyn_cast<ConstantVal>(Elt);
          if (!CV || !this->isValue(CV->getValue()))
            return false;
          HasNonUndefElements = true;
        }
        return HasNonUndefElements;
      }
    }
    return false;
  }
};

/// specialization of cstval_pred_ty for ConstantInt
template <typename Predicate>
using cst_pred_ty = cstval_pred_ty<Predicate, ConstantInt>;

/// specialization of cstval_pred_ty for ConstantFP
template <typename Predicate>
using cstfp_pred_ty = cstval_pred_ty<Predicate, ConstantFP>;

/// This helper class is used to match scalar and vector constants that
/// satisfy a specified predicate, and bind them to an APInt.
template <typename Predicate> struct api_pred_ty : public Predicate {
  const APInt *&Res;

  api_pred_ty(const APInt *&R) : Res(R) {}

  template <typename ITy> bool match(ITy *V) {
    if (const auto *CI = dyn_cast<ConstantInt>(V))
      if (this->isValue(CI->getValue())) {
        Res = &CI->getValue();
        return true;
      }
    if (V->getType()->isVectorTy())
      if (const auto *C = dyn_cast<Constant>(V))
        if (auto *CI = dyn_cast_or_null<ConstantInt>(C->getSplatValue()))
          if (this->isValue(CI->getValue())) {
            Res = &CI->getValue();
            return true;
          }

    return false;
  }
};

/// This helper class is used to match scalar and vector constants that
/// satisfy a specified predicate, and bind them to an APFloat.
/// Undefs are allowed in splat vector constants.
template <typename Predicate> struct apf_pred_ty : public Predicate {
  const APFloat *&Res;

  apf_pred_ty(const APFloat *&R) : Res(R) {}

  template <typename ITy> bool match(ITy *V) {
    if (const auto *CI = dyn_cast<ConstantFP>(V))
      if (this->isValue(CI->getValue())) {
        Res = &CI->getValue();
        return true;
      }
    if (V->getType()->isVectorTy())
      if (const auto *C = dyn_cast<Constant>(V))
        if (auto *CI = dyn_cast_or_null<ConstantFP>(
                C->getSplatValue(/* AllowUndef */ true)))
          if (this->isValue(CI->getValue())) {
            Res = &CI->getValue();
            return true;
          }

    return false;
  }
};

///////////////////////////////////////////////////////////////////////////////
//
// Encapsulate constant value queries for use in templated predicate matchers.
// This allows checking if constants match using compound predicates and works
// with vector constants, possibly with relaxed constraints. For example, ignore
// undef values.
//
///////////////////////////////////////////////////////////////////////////////

struct is_any_apint {
  bool isValue(const APInt &C) { return true; }
};
/// Match an integer or vector with any integral constant.
/// For vectors, this includes constants with undefined elements.
inline cst_pred_ty<is_any_apint> m_AnyIntegralConstant() {
  return cst_pred_ty<is_any_apint>();
}

struct is_shifted_mask {
  bool isValue(const APInt &C) { return C.isShiftedMask(); }
};

inline cst_pred_ty<is_shifted_mask> m_ShiftedMask() {
  return cst_pred_ty<is_shifted_mask>();
}

struct is_all_ones {
  bool isValue(const APInt &C) { return C.isAllOnes(); }
};
/// Match an integer or vector with all bits set.
/// For vectors, this includes constants with undefined elements.
inline cst_pred_ty<is_all_ones> m_AllOnes() {
  return cst_pred_ty<is_all_ones>();
}

struct is_maxsignedvalue {
  bool isValue(const APInt &C) { return C.isMaxSignedValue(); }
};
/// Match an integer or vector with values having all bits except for the high
/// bit set (0x7f...).
/// For vectors, this includes constants with undefined elements.
inline cst_pred_ty<is_maxsignedvalue> m_MaxSignedValue() {
  return cst_pred_ty<is_maxsignedvalue>();
}
inline api_pred_ty<is_maxsignedvalue> m_MaxSignedValue(const APInt *&V) {
  return V;
}

struct is_negative {
  bool isValue(const APInt &C) { return C.isNegative(); }
};
/// Match an integer or vector of negative values.
/// For vectors, this includes constants with undefined elements.
inline cst_pred_ty<is_negative> m_Negative() {
  return cst_pred_ty<is_negative>();
}
inline api_pred_ty<is_negative> m_Negative(const APInt *&V) { return V; }

struct is_nonnegative {
  bool isValue(const APInt &C) { return C.isNonNegative(); }
};
/// Match an integer or vector of non-negative values.
/// For vectors, this includes constants with undefined elements.
inline cst_pred_ty<is_nonnegative> m_NonNegative() {
  return cst_pred_ty<is_nonnegative>();
}
inline api_pred_ty<is_nonnegative> m_NonNegative(const APInt *&V) { return V; }

struct is_strictlypositive {
  bool isValue(const APInt &C) { return C.isStrictlyPositive(); }
};
/// Match an integer or vector of strictly positive values.
/// For vectors, this includes constants with undefined elements.
inline cst_pred_ty<is_strictlypositive> m_StrictlyPositive() {
  return cst_pred_ty<is_strictlypositive>();
}
inline api_pred_ty<is_strictlypositive> m_StrictlyPositive(const APInt *&V) {
  return V;
}

struct is_nonpositive {
  bool isValue(const APInt &C) { return C.isNonPositive(); }
};
/// Match an integer or vector of non-positive values.
/// For vectors, this includes constants with undefined elements.
inline cst_pred_ty<is_nonpositive> m_NonPositive() {
  return cst_pred_ty<is_nonpositive>();
}
inline api_pred_ty<is_nonpositive> m_NonPositive(const APInt *&V) { return V; }

struct is_one {
  bool isValue(const APInt &C) { return C.isOne(); }
};
/// Match an integer 1 or a vector with all elements equal to 1.
/// For vectors, this includes constants with undefined elements.
inline cst_pred_ty<is_one> m_One() { return cst_pred_ty<is_one>(); }

struct is_zero_int {
  bool isValue(const APInt &C) { return C.isZero(); }
};
/// Match an integer 0 or a vector with all elements equal to 0.
/// For vectors, this includes constants with undefined elements.
inline cst_pred_ty<is_zero_int> m_ZeroInt() {
  return cst_pred_ty<is_zero_int>();
}

struct is_zero {
  template <typename ITy> bool match(ITy *V) {
    auto *C = dyn_cast<Constant>(V);
    // FIXME: this should be able to do something for scalable vectors
    return C && (C->isNullValue() || cst_pred_ty<is_zero_int>().match(C));
  }
};
/// Match any null constant or a vector with all elements equal to 0.
/// For vectors, this includes constants with undefined elements.
inline is_zero m_Zero() { return is_zero(); }

struct is_power2 {
  bool isValue(const APInt &C) { return C.isPowerOf2(); }
};
/// Match an integer or vector power-of-2.
/// For vectors, this includes constants with undefined elements.
inline cst_pred_ty<is_power2> m_Power2() { return cst_pred_ty<is_power2>(); }
inline api_pred_ty<is_power2> m_Power2(const APInt *&V) { return V; }

struct is_negated_power2 {
  bool isValue(const APInt &C) { return C.isNegatedPowerOf2(); }
};
/// Match a integer or vector negated power-of-2.
/// For vectors, this includes constants with undefined elements.
inline cst_pred_ty<is_negated_power2> m_NegatedPower2() {
  return cst_pred_ty<is_negated_power2>();
}
inline api_pred_ty<is_negated_power2> m_NegatedPower2(const APInt *&V) {
  return V;
}

struct is_power2_or_zero {
  bool isValue(const APInt &C) { return !C || C.isPowerOf2(); }
};
/// Match an integer or vector of 0 or power-of-2 values.
/// For vectors, this includes constants with undefined elements.
inline cst_pred_ty<is_power2_or_zero> m_Power2OrZero() {
  return cst_pred_ty<is_power2_or_zero>();
}
inline api_pred_ty<is_power2_or_zero> m_Power2OrZero(const APInt *&V) {
  return V;
}

struct is_sign_mask {
  bool isValue(const APInt &C) { return C.isSignMask(); }
};
/// Match an integer or vector with only the sign bit(s) set.
/// For vectors, this includes constants with undefined elements.
inline cst_pred_ty<is_sign_mask> m_SignMask() {
  return cst_pred_ty<is_sign_mask>();
}

struct is_lowbit_mask {
  bool isValue(const APInt &C) { return C.isMask(); }
};
/// Match an integer or vector with only the low bit(s) set.
/// For vectors, this includes constants with undefined elements.
inline cst_pred_ty<is_lowbit_mask> m_LowBitMask() {
  return cst_pred_ty<is_lowbit_mask>();
}
inline api_pred_ty<is_lowbit_mask> m_LowBitMask(const APInt *&V) { return V; }

struct icmp_pred_with_threshold {
  ICmpInst::Predicate Pred;
  const APInt *Thr;
  bool isValue(const APInt &C) { return ICmpInst::compare(C, *Thr, Pred); }
};
/// Match an integer or vector with every element comparing 'pred' (eg/ne/...)
/// to Threshold. For vectors, this includes constants with undefined elements.
inline cst_pred_ty<icmp_pred_with_threshold>
m_SpecificInt_ICMP(ICmpInst::Predicate Predicate, const APInt &Threshold) {
  cst_pred_ty<icmp_pred_with_threshold> P;
  P.Pred = Predicate;
  P.Thr = &Threshold;
  return P;
}

struct is_nan {
  bool isValue(const APFloat &C) { return C.isNaN(); }
};
/// Match an arbitrary NaN constant. This includes quiet and signalling nans.
/// For vectors, this includes constants with undefined elements.
inline cstfp_pred_ty<is_nan> m_NaN() { return cstfp_pred_ty<is_nan>(); }

struct is_nonnan {
  bool isValue(const APFloat &C) { return !C.isNaN(); }
};
/// Match a non-NaN FP constant.
/// For vectors, this includes constants with undefined elements.
inline cstfp_pred_ty<is_nonnan> m_NonNaN() {
  return cstfp_pred_ty<is_nonnan>();
}

struct is_inf {
  bool isValue(const APFloat &C) { return C.isInfinity(); }
};
/// Match a positive or negative infinity FP constant.
/// For vectors, this includes constants with undefined elements.
inline cstfp_pred_ty<is_inf> m_Inf() { return cstfp_pred_ty<is_inf>(); }

struct is_noninf {
  bool isValue(const APFloat &C) { return !C.isInfinity(); }
};
/// Match a non-infinity FP constant, i.e. finite or NaN.
/// For vectors, this includes constants with undefined elements.
inline cstfp_pred_ty<is_noninf> m_NonInf() {
  return cstfp_pred_ty<is_noninf>();
}

struct is_finite {
  bool isValue(const APFloat &C) { return C.isFinite(); }
};
/// Match a finite FP constant, i.e. not infinity or NaN.
/// For vectors, this includes constants with undefined elements.
inline cstfp_pred_ty<is_finite> m_Finite() {
  return cstfp_pred_ty<is_finite>();
}
inline apf_pred_ty<is_finite> m_Finite(const APFloat *&V) { return V; }

struct is_finitenonzero {
  bool isValue(const APFloat &C) { return C.isFiniteNonZero(); }
};
/// Match a finite non-zero FP constant.
/// For vectors, this includes constants with undefined elements.
inline cstfp_pred_ty<is_finitenonzero> m_FiniteNonZero() {
  return cstfp_pred_ty<is_finitenonzero>();
}
inline apf_pred_ty<is_finitenonzero> m_FiniteNonZero(const APFloat *&V) {
  return V;
}

struct is_any_zero_fp {
  bool isValue(const APFloat &C) { return C.isZero(); }
};
/// Match a floating-point negative zero or positive zero.
/// For vectors, this includes constants with undefined elements.
inline cstfp_pred_ty<is_any_zero_fp> m_AnyZeroFP() {
  return cstfp_pred_ty<is_any_zero_fp>();
}

struct is_pos_zero_fp {
  bool isValue(const APFloat &C) { return C.isPosZero(); }
};
/// Match a floating-point positive zero.
/// For vectors, this includes constants with undefined elements.
inline cstfp_pred_ty<is_pos_zero_fp> m_PosZeroFP() {
  return cstfp_pred_ty<is_pos_zero_fp>();
}

struct is_neg_zero_fp {
  bool isValue(const APFloat &C) { return C.isNegZero(); }
};
/// Match a floating-point negative zero.
/// For vectors, this includes constants with undefined elements.
inline cstfp_pred_ty<is_neg_zero_fp> m_NegZeroFP() {
  return cstfp_pred_ty<is_neg_zero_fp>();
}

struct is_non_zero_fp {
  bool isValue(const APFloat &C) { return C.isNonZero(); }
};
/// Match a floating-point non-zero.
/// For vectors, this includes constants with undefined elements.
inline cstfp_pred_ty<is_non_zero_fp> m_NonZeroFP() {
  return cstfp_pred_ty<is_non_zero_fp>();
}

///////////////////////////////////////////////////////////////////////////////

template <typename Class> struct bind_ty {
  Class *&VR;

  bind_ty(Class *&V) : VR(V) {}

  template <typename ITy> bool match(ITy *V) {
    if (auto *CV = dyn_cast<Class>(V)) {
      VR = CV;
      return true;
    }
    return false;
  }
};

/// Match a value, capturing it if we match.
inline bind_ty<Value> m_Value(Value *&V) { return V; }
inline bind_ty<const Value> m_Value(const Value *&V) { return V; }

/// Match an instruction, capturing it if we match.
inline bind_ty<Instruction> m_Instruction(Instruction *&I) { return I; }
/// Match a unary operator, capturing it if we match.
inline bind_ty<UnaryOperator> m_UnOp(UnaryOperator *&I) { return I; }
/// Match a binary operator, capturing it if we match.
inline bind_ty<BinaryOperator> m_BinOp(BinaryOperator *&I) { return I; }
/// Match a with overflow intrinsic, capturing it if we match.
inline bind_ty<WithOverflowInst> m_WithOverflowInst(WithOverflowInst *&I) {
  return I;
}
inline bind_ty<const WithOverflowInst>
m_WithOverflowInst(const WithOverflowInst *&I) {
  return I;
}

/// Match a Constant, capturing the value if we match.
inline bind_ty<Constant> m_Constant(Constant *&C) { return C; }

/// Match a ConstantInt, capturing the value if we match.
inline bind_ty<ConstantInt> m_ConstantInt(ConstantInt *&CI) { return CI; }

/// Match a ConstantFP, capturing the value if we match.
inline bind_ty<ConstantFP> m_ConstantFP(ConstantFP *&C) { return C; }

/// Match a ConstantExpr, capturing the value if we match.
inline bind_ty<ConstantExpr> m_ConstantExpr(ConstantExpr *&C) { return C; }

/// Match a basic block value, capturing it if we match.
inline bind_ty<BasicBlock> m_BasicBlock(BasicBlock *&V) { return V; }
inline bind_ty<const BasicBlock> m_BasicBlock(const BasicBlock *&V) {
  return V;
}

/// Match an arbitrary immediate Constant and ignore it.
inline match_combine_and<class_match<Constant>,
                         match_unless<constantexpr_match>>
m_ImmConstant() {
  return m_CombineAnd(m_Constant(), m_Unless(m_ConstantExpr()));
}

/// Match an immediate Constant, capturing the value if we match.
inline match_combine_and<bind_ty<Constant>,
                         match_unless<constantexpr_match>>
m_ImmConstant(Constant *&C) {
  return m_CombineAnd(m_Constant(C), m_Unless(m_ConstantExpr()));
}

/// Match a specified Value*.
struct specificval_ty {
  const Value *Val;

  specificval_ty(const Value *V) : Val(V) {}

  template <typename ITy> bool match(ITy *V) { return V == Val; }
};

/// Match if we have a specific specified value.
inline specificval_ty m_Specific(const Value *V) { return V; }

/// Stores a reference to the Value *, not the Value * itself,
/// thus can be used in commutative matchers.
template <typename Class> struct deferredval_ty {
  Class *const &Val;

  deferredval_ty(Class *const &V) : Val(V) {}

  template <typename ITy> bool match(ITy *const V) { return V == Val; }
};

/// Like m_Specific(), but works if the specific value to match is determined
/// as part of the same match() expression. For example:
/// m_Add(m_Value(X), m_Specific(X)) is incorrect, because m_Specific() will
/// bind X before the pattern match starts.
/// m_Add(m_Value(X), m_Deferred(X)) is correct, and will check against
/// whichever value m_Value(X) populated.
inline deferredval_ty<Value> m_Deferred(Value *const &V) { return V; }
inline deferredval_ty<const Value> m_Deferred(const Value *const &V) {
  return V;
}

/// Match a specified floating point value or vector of all elements of
/// that value.
struct specific_fpval {
  double Val;

  specific_fpval(double V) : Val(V) {}

  template <typename ITy> bool match(ITy *V) {
    if (const auto *CFP = dyn_cast<ConstantFP>(V))
      return CFP->isExactlyValue(Val);
    if (V->getType()->isVectorTy())
      if (const auto *C = dyn_cast<Constant>(V))
        if (auto *CFP = dyn_cast_or_null<ConstantFP>(C->getSplatValue()))
          return CFP->isExactlyValue(Val);
    return false;
  }
};

/// Match a specific floating point value or vector with all elements
/// equal to the value.
inline specific_fpval m_SpecificFP(double V) { return specific_fpval(V); }

/// Match a float 1.0 or vector with all elements equal to 1.0.
inline specific_fpval m_FPOne() { return m_SpecificFP(1.0); }

struct bind_const_intval_ty {
  uint64_t &VR;

  bind_const_intval_ty(uint64_t &V) : VR(V) {}

  template <typename ITy> bool match(ITy *V) {
    if (const auto *CV = dyn_cast<ConstantInt>(V))
      if (CV->getValue().ule(UINT64_MAX)) {
        VR = CV->getZExtValue();
        return true;
      }
    return false;
  }
};

/// Match a specified integer value or vector of all elements of that
/// value.
template <bool AllowUndefs> struct specific_intval {
  APInt Val;

  specific_intval(APInt V) : Val(std::move(V)) {}

  template <typename ITy> bool match(ITy *V) {
    const auto *CI = dyn_cast<ConstantInt>(V);
    if (!CI && V->getType()->isVectorTy())
      if (const auto *C = dyn_cast<Constant>(V))
        CI = dyn_cast_or_null<ConstantInt>(C->getSplatValue(AllowUndefs));

    return CI && APInt::isSameValue(CI->getValue(), Val);
  }
};

/// Match a specific integer value or vector with all elements equal to
/// the value.
inline specific_intval<false> m_SpecificInt(APInt V) {
  return specific_intval<false>(std::move(V));
}

inline specific_intval<false> m_SpecificInt(uint64_t V) {
  return m_SpecificInt(APInt(64, V));
}

inline specific_intval<true> m_SpecificIntAllowUndef(APInt V) {
  return specific_intval<true>(std::move(V));
}

inline specific_intval<true> m_SpecificIntAllowUndef(uint64_t V) {
  return m_SpecificIntAllowUndef(APInt(64, V));
}

/// Match a ConstantInt and bind to its value.  This does not match
/// ConstantInts wider than 64-bits.
inline bind_const_intval_ty m_ConstantInt(uint64_t &V) { return V; }

/// Match a specified basic block value.
struct specific_bbval {
  BasicBlock *Val;

  specific_bbval(BasicBlock *Val) : Val(Val) {}

  template <typename ITy> bool match(ITy *V) {
    const auto *BB = dyn_cast<BasicBlock>(V);
    return BB && BB == Val;
  }
};

/// Match a specific basic block value.
inline specific_bbval m_SpecificBB(BasicBlock *BB) {
  return specific_bbval(BB);
}

/// A commutative-friendly version of m_Specific().
inline deferredval_ty<BasicBlock> m_Deferred(BasicBlock *const &BB) {
  return BB;
}
inline deferredval_ty<const BasicBlock>
m_Deferred(const BasicBlock *const &BB) {
  return BB;
}

//===----------------------------------------------------------------------===//
// Matcher for any binary operator.
//
template <typename LHS_t, typename RHS_t, bool Commutable = false>
struct AnyBinaryOp_match {
  LHS_t L;
  RHS_t R;

  // The evaluation order is always stable, regardless of Commutability.
  // The LHS is always matched first.
  AnyBinaryOp_match(const LHS_t &LHS, const RHS_t &RHS) : L(LHS), R(RHS) {}

  template <typename OpTy> bool match(OpTy *V) {
    if (auto *I = dyn_cast<BinaryOperator>(V))
      return (L.match(I->getOperand(0)) && R.match(I->getOperand(1))) ||
             (Commutable && L.match(I->getOperand(1)) &&
              R.match(I->getOperand(0)));
    return false;
  }
};

template <typename LHS, typename RHS>
inline AnyBinaryOp_match<LHS, RHS> m_BinOp(const LHS &L, const RHS &R) {
  return AnyBinaryOp_match<LHS, RHS>(L, R);
}

//===----------------------------------------------------------------------===//
// Matcher for any unary operator.
// TODO fuse unary, binary matcher into n-ary matcher
//
template <typename OP_t> struct AnyUnaryOp_match {
  OP_t X;

  AnyUnaryOp_match(const OP_t &X) : X(X) {}

  template <typename OpTy> bool match(OpTy *V) {
    if (auto *I = dyn_cast<UnaryOperator>(V))
      return X.match(I->getOperand(0));
    return false;
  }
};

template <typename OP_t> inline AnyUnaryOp_match<OP_t> m_UnOp(const OP_t &X) {
  return AnyUnaryOp_match<OP_t>(X);
}

//===----------------------------------------------------------------------===//
// Matchers for specific binary operators.
//

template <typename LHS_t, typename RHS_t, unsigned Opcode,
          bool Commutable = false>
struct BinaryOp_match {
  LHS_t L;
  RHS_t R;

  // The evaluation order is always stable, regardless of Commutability.
  // The LHS is always matched first.
  BinaryOp_match(const LHS_t &LHS, const RHS_t &RHS) : L(LHS), R(RHS) {}

  template <typename OpTy> inline bool match(unsigned Opc, OpTy *V) {
    if (V->getValueID() == Value::InstructionVal + Opc) {
      auto *I = cast<BinaryOperator>(V);
      return (L.match(I->getOperand(0)) && R.match(I->getOperand(1))) ||
             (Commutable && L.match(I->getOperand(1)) &&
              R.match(I->getOperand(0)));
    }
    if (auto *CE = dyn_cast<ConstantExpr>(V))
      return CE->getOpcode() == Opc &&
             ((L.match(CE->getOperand(0)) && R.match(CE->getOperand(1))) ||
              (Commutable && L.match(CE->getOperand(1)) &&
               R.match(CE->getOperand(0))));
    return false;
  }

  template <typename OpTy> bool match(OpTy *V) { return match(Opcode, V); }
};

template <typename LHS, typename RHS>
inline BinaryOp_match<LHS, RHS, Instruction::Add> m_Add(const LHS &L,
                                                        const RHS &R) {
  return BinaryOp_match<LHS, RHS, Instruction::Add>(L, R);
}

template <typename LHS, typename RHS>
inline BinaryOp_match<LHS, RHS, Instruction::FAdd> m_FAdd(const LHS &L,
                                                          const RHS &R) {
  return BinaryOp_match<LHS, RHS, Instruction::FAdd>(L, R);
}

template <typename LHS, typename RHS>
inline BinaryOp_match<LHS, RHS, Instruction::Sub> m_Sub(const LHS &L,
                                                        const RHS &R) {
  return BinaryOp_match<LHS, RHS, Instruction::Sub>(L, R);
}

template <typename LHS, typename RHS>
inline BinaryOp_match<LHS, RHS, Instruction::FSub> m_FSub(const LHS &L,
                                                          const RHS &R) {
  return BinaryOp_match<LHS, RHS, Instruction::FSub>(L, R);
}

template <typename Op_t> struct FNeg_match {
  Op_t X;

  FNeg_match(const Op_t &Op) : X(Op) {}
  template <typename OpTy> bool match(OpTy *V) {
    auto *FPMO = dyn_cast<FPMathOperator>(V);
    if (!FPMO)
      return false;

    if (FPMO->getOpcode() == Instruction::FNeg)
      return X.match(FPMO->getOperand(0));

    if (FPMO->getOpcode() == Instruction::FSub) {
      if (FPMO->hasNoSignedZeros()) {
        // With 'nsz', any zero goes.
        if (!cstfp_pred_ty<is_any_zero_fp>().match(FPMO->getOperand(0)))
          return false;
      } else {
        // Without 'nsz', we need fsub -0.0, X exactly.
        if (!cstfp_pred_ty<is_neg_zero_fp>().match(FPMO->getOperand(0)))
          return false;
      }

      return X.match(FPMO->getOperand(1));
    }

    return false;
  }
};

/// Match 'fneg X' as 'fsub -0.0, X'.
template <typename OpTy> inline FNeg_match<OpTy> m_FNeg(const OpTy &X) {
  return FNeg_match<OpTy>(X);
}

/// Match 'fneg X' as 'fsub +-0.0, X'.
template <typename RHS>
inline BinaryOp_match<cstfp_pred_ty<is_any_zero_fp>, RHS, Instruction::FSub>
m_FNegNSZ(const RHS &X) {
  return m_FSub(m_AnyZeroFP(), X);
}

template <typename LHS, typename RHS>
inline BinaryOp_match<LHS, RHS, Instruction::Mul> m_Mul(const LHS &L,
                                                        const RHS &R) {
  return BinaryOp_match<LHS, RHS, Instruction::Mul>(L, R);
}

template <typename LHS, typename RHS>
inline BinaryOp_match<LHS, RHS, Instruction::FMul> m_FMul(const LHS &L,
                                                          const RHS &R) {
  return BinaryOp_match<LHS, RHS, Instruction::FMul>(L, R);
}

template <typename LHS, typename RHS>
inline BinaryOp_match<LHS, RHS, Instruction::UDiv> m_UDiv(const LHS &L,
                                                          const RHS &R) {
  return BinaryOp_match<LHS, RHS, Instruction::UDiv>(L, R);
}

template <typename LHS, typename RHS>
inline BinaryOp_match<LHS, RHS, Instruction::SDiv> m_SDiv(const LHS &L,
                                                          const RHS &R) {
  return BinaryOp_match<LHS, RHS, Instruction::SDiv>(L, R);
}

template <typename LHS, typename RHS>
inline BinaryOp_match<LHS, RHS, Instruction::FDiv> m_FDiv(const LHS &L,
                                                          const RHS &R) {
  return BinaryOp_match<LHS, RHS, Instruction::FDiv>(L, R);
}

template <typename LHS, typename RHS>
inline BinaryOp_match<LHS, RHS, Instruction::URem> m_URem(const LHS &L,
                                                          const RHS &R) {
  return BinaryOp_match<LHS, RHS, Instruction::URem>(L, R);
}

template <typename LHS, typename RHS>
inline BinaryOp_match<LHS, RHS, Instruction::SRem> m_SRem(const LHS &L,
                                                          const RHS &R) {
  return BinaryOp_match<LHS, RHS, Instruction::SRem>(L, R);
}

template <typename LHS, typename RHS>
inline BinaryOp_match<LHS, RHS, Instruction::FRem> m_FRem(const LHS &L,
                                                          const RHS &R) {
  return BinaryOp_match<LHS, RHS, Instruction::FRem>(L, R);
}

template <typename LHS, typename RHS>
inline BinaryOp_match<LHS, RHS, Instruction::And> m_And(const LHS &L,
                                                        const RHS &R) {
  return BinaryOp_match<LHS, RHS, Instruction::And>(L, R);
}

template <typename LHS, typename RHS>
inline BinaryOp_match<LHS, RHS, Instruction::Or> m_Or(const LHS &L,
                                                      const RHS &R) {
  return BinaryOp_match<LHS, RHS, Instruction::Or>(L, R);
}

template <typename LHS, typename RHS>
inline BinaryOp_match<LHS, RHS, Instruction::Xor> m_Xor(const LHS &L,
                                                        const RHS &R) {
  return BinaryOp_match<LHS, RHS, Instruction::Xor>(L, R);
}

template <typename LHS, typename RHS>
inline BinaryOp_match<LHS, RHS, Instruction::Shl> m_Shl(const LHS &L,
                                                        const RHS &R) {
  return BinaryOp_match<LHS, RHS, Instruction::Shl>(L, R);
}

template <typename LHS, typename RHS>
inline BinaryOp_match<LHS, RHS, Instruction::LShr> m_LShr(const LHS &L,
                                                          const RHS &R) {
  return BinaryOp_match<LHS, RHS, Instruction::LShr>(L, R);
}

template <typename LHS, typename RHS>
inline BinaryOp_match<LHS, RHS, Instruction::AShr> m_AShr(const LHS &L,
                                                          const RHS &R) {
  return BinaryOp_match<LHS, RHS, Instruction::AShr>(L, R);
}

template <typename LHS_t, typename RHS_t, unsigned Opcode,
          unsigned WrapFlags = 0>
struct OverflowingBinaryOp_match {
  LHS_t L;
  RHS_t R;

  OverflowingBinaryOp_match(const LHS_t &LHS, const RHS_t &RHS)
      : L(LHS), R(RHS) {}

  template <typename OpTy> bool match(OpTy *V) {
    if (auto *Op = dyn_cast<OverflowingBinaryOperator>(V)) {
      if (Op->getOpcode() != Opcode)
        return false;
      if ((WrapFlags & OverflowingBinaryOperator::NoUnsignedWrap) &&
          !Op->hasNoUnsignedWrap())
        return false;
      if ((WrapFlags & OverflowingBinaryOperator::NoSignedWrap) &&
          !Op->hasNoSignedWrap())
        return false;
      return L.match(Op->getOperand(0)) && R.match(Op->getOperand(1));
    }
    return false;
  }
};

template <typename LHS, typename RHS>
inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Add,
                                 OverflowingBinaryOperator::NoSignedWrap>
m_NSWAdd(const LHS &L, const RHS &R) {
  return OverflowingBinaryOp_match<LHS, RHS, Instruction::Add,
                                   OverflowingBinaryOperator::NoSignedWrap>(L,
                                                                            R);
}
template <typename LHS, typename RHS>
inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Sub,
                                 OverflowingBinaryOperator::NoSignedWrap>
m_NSWSub(const LHS &L, const RHS &R) {
  return OverflowingBinaryOp_match<LHS, RHS, Instruction::Sub,
                                   OverflowingBinaryOperator::NoSignedWrap>(L,
                                                                            R);
}
template <typename LHS, typename RHS>
inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Mul,
                                 OverflowingBinaryOperator::NoSignedWrap>
m_NSWMul(const LHS &L, const RHS &R) {
  return OverflowingBinaryOp_match<LHS, RHS, Instruction::Mul,
                                   OverflowingBinaryOperator::NoSignedWrap>(L,
                                                                            R);
}
template <typename LHS, typename RHS>
inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Shl,
                                 OverflowingBinaryOperator::NoSignedWrap>
m_NSWShl(const LHS &L, const RHS &R) {
  return OverflowingBinaryOp_match<LHS, RHS, Instruction::Shl,
                                   OverflowingBinaryOperator::NoSignedWrap>(L,
                                                                            R);
}

template <typename LHS, typename RHS>
inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Add,
                                 OverflowingBinaryOperator::NoUnsignedWrap>
m_NUWAdd(const LHS &L, const RHS &R) {
  return OverflowingBinaryOp_match<LHS, RHS, Instruction::Add,
                                   OverflowingBinaryOperator::NoUnsignedWrap>(
      L, R);
}
template <typename LHS, typename RHS>
inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Sub,
                                 OverflowingBinaryOperator::NoUnsignedWrap>
m_NUWSub(const LHS &L, const RHS &R) {
  return OverflowingBinaryOp_match<LHS, RHS, Instruction::Sub,
                                   OverflowingBinaryOperator::NoUnsignedWrap>(
      L, R);
}
template <typename LHS, typename RHS>
inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Mul,
                                 OverflowingBinaryOperator::NoUnsignedWrap>
m_NUWMul(const LHS &L, const RHS &R) {
  return OverflowingBinaryOp_match<LHS, RHS, Instruction::Mul,
                                   OverflowingBinaryOperator::NoUnsignedWrap>(
      L, R);
}
template <typename LHS, typename RHS>
inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Shl,
                                 OverflowingBinaryOperator::NoUnsignedWrap>
m_NUWShl(const LHS &L, const RHS &R) {
  return OverflowingBinaryOp_match<LHS, RHS, Instruction::Shl,
                                   OverflowingBinaryOperator::NoUnsignedWrap>(
      L, R);
}

template <typename LHS_t, typename RHS_t, bool Commutable = false>
struct SpecificBinaryOp_match
    : public BinaryOp_match<LHS_t, RHS_t, 0, Commutable> {
  unsigned Opcode;

  SpecificBinaryOp_match(unsigned Opcode, const LHS_t &LHS, const RHS_t &RHS)
      : BinaryOp_match<LHS_t, RHS_t, 0, Commutable>(LHS, RHS), Opcode(Opcode) {}

  template <typename OpTy> bool match(OpTy *V) {
    return BinaryOp_match<LHS_t, RHS_t, 0, Commutable>::match(Opcode, V);
  }
};

/// Matches a specific opcode.
template <typename LHS, typename RHS>
inline SpecificBinaryOp_match<LHS, RHS> m_BinOp(unsigned Opcode, const LHS &L,
                                                const RHS &R) {
  return SpecificBinaryOp_match<LHS, RHS>(Opcode, L, R);
}

//===----------------------------------------------------------------------===//
// Class that matches a group of binary opcodes.
//
template <typename LHS_t, typename RHS_t, typename Predicate>
struct BinOpPred_match : Predicate {
  LHS_t L;
  RHS_t R;

  BinOpPred_match(const LHS_t &LHS, const RHS_t &RHS) : L(LHS), R(RHS) {}

  template <typename OpTy> bool match(OpTy *V) {
    if (auto *I = dyn_cast<Instruction>(V))
      return this->isOpType(I->getOpcode()) && L.match(I->getOperand(0)) &&
             R.match(I->getOperand(1));
    if (auto *CE = dyn_cast<ConstantExpr>(V))
      return this->isOpType(CE->getOpcode()) && L.match(CE->getOperand(0)) &&
             R.match(CE->getOperand(1));
    return false;
  }
};

struct is_shift_op {
  bool isOpType(unsigned Opcode) { return Instruction::isShift(Opcode); }
};

struct is_right_shift_op {
  bool isOpType(unsigned Opcode) {
    return Opcode == Instruction::LShr || Opcode == Instruction::AShr;
  }
};

struct is_logical_shift_op {
  bool isOpType(unsigned Opcode) {
    return Opcode == Instruction::LShr || Opcode == Instruction::Shl;
  }
};

struct is_bitwiselogic_op {
  bool isOpType(unsigned Opcode) {
    return Instruction::isBitwiseLogicOp(Opcode);
  }
};

struct is_idiv_op {
  bool isOpType(unsigned Opcode) {
    return Opcode == Instruction::SDiv || Opcode == Instruction::UDiv;
  }
};

struct is_irem_op {
  bool isOpType(unsigned Opcode) {
    return Opcode == Instruction::SRem || Opcode == Instruction::URem;
  }
};

/// Matches shift operations.
template <typename LHS, typename RHS>
inline BinOpPred_match<LHS, RHS, is_shift_op> m_Shift(const LHS &L,
                                                      const RHS &R) {
  return BinOpPred_match<LHS, RHS, is_shift_op>(L, R);
}

/// Matches logical shift operations.
template <typename LHS, typename RHS>
inline BinOpPred_match<LHS, RHS, is_right_shift_op> m_Shr(const LHS &L,
                                                          const RHS &R) {
  return BinOpPred_match<LHS, RHS, is_right_shift_op>(L, R);
}

/// Matches logical shift operations.
template <typename LHS, typename RHS>
inline BinOpPred_match<LHS, RHS, is_logical_shift_op>
m_LogicalShift(const LHS &L, const RHS &R) {
  return BinOpPred_match<LHS, RHS, is_logical_shift_op>(L, R);
}

/// Matches bitwise logic operations.
template <typename LHS, typename RHS>
inline BinOpPred_match<LHS, RHS, is_bitwiselogic_op>
m_BitwiseLogic(const LHS &L, const RHS &R) {
  return BinOpPred_match<LHS, RHS, is_bitwiselogic_op>(L, R);
}

/// Matches integer division operations.
template <typename LHS, typename RHS>
inline BinOpPred_match<LHS, RHS, is_idiv_op> m_IDiv(const LHS &L,
                                                    const RHS &R) {
  return BinOpPred_match<LHS, RHS, is_idiv_op>(L, R);
}

/// Matches integer remainder operations.
template <typename LHS, typename RHS>
inline BinOpPred_match<LHS, RHS, is_irem_op> m_IRem(const LHS &L,
                                                    const RHS &R) {
  return BinOpPred_match<LHS, RHS, is_irem_op>(L, R);
}

//===----------------------------------------------------------------------===//
// Class that matches exact binary ops.
//
template <typename SubPattern_t> struct Exact_match {
  SubPattern_t SubPattern;

  Exact_match(const SubPattern_t &SP) : SubPattern(SP) {}

  template <typename OpTy> bool match(OpTy *V) {
    if (auto *PEO = dyn_cast<PossiblyExactOperator>(V))
      return PEO->isExact() && SubPattern.match(V);
    return false;
  }
};

template <typename T> inline Exact_match<T> m_Exact(const T &SubPattern) {
  return SubPattern;
}

//===----------------------------------------------------------------------===//
// Matchers for CmpInst classes
//

template <typename LHS_t, typename RHS_t, typename Class, typename PredicateTy,
          bool Commutable = false>
struct CmpClass_match {
  PredicateTy &Predicate;
  LHS_t L;
  RHS_t R;

  // The evaluation order is always stable, regardless of Commutability.
  // The LHS is always matched first.
  CmpClass_match(PredicateTy &Pred, const LHS_t &LHS, const RHS_t &RHS)
      : Predicate(Pred), L(LHS), R(RHS) {}

  template <typename OpTy> bool match(OpTy *V) {
    if (auto *I = dyn_cast<Class>(V)) {
      if (L.match(I->getOperand(0)) && R.match(I->getOperand(1))) {
        Predicate = I->getPredicate();
        return true;
      } else if (Commutable && L.match(I->getOperand(1)) &&
                 R.match(I->getOperand(0))) {
        Predicate = I->getSwappedPredicate();
        return true;
      }
    }
    return false;
  }
};

template <typename LHS, typename RHS>
inline CmpClass_match<LHS, RHS, CmpInst, CmpInst::Predicate>
m_Cmp(CmpInst::Predicate &Pred, const LHS &L, const RHS &R) {
  return CmpClass_match<LHS, RHS, CmpInst, CmpInst::Predicate>(Pred, L, R);
}

template <typename LHS, typename RHS>
inline CmpClass_match<LHS, RHS, ICmpInst, ICmpInst::Predicate>
m_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R) {
  return CmpClass_match<LHS, RHS, ICmpInst, ICmpInst::Predicate>(Pred, L, R);
}

template <typename LHS, typename RHS>
inline CmpClass_match<LHS, RHS, FCmpInst, FCmpInst::Predicate>
m_FCmp(FCmpInst::Predicate &Pred, const LHS &L, const RHS &R) {
  return CmpClass_match<LHS, RHS, FCmpInst, FCmpInst::Predicate>(Pred, L, R);
}

//===----------------------------------------------------------------------===//
// Matchers for instructions with a given opcode and number of operands.
//

/// Matches instructions with Opcode and three operands.
template <typename T0, unsigned Opcode> struct OneOps_match {
  T0 Op1;

  OneOps_match(const T0 &Op1) : Op1(Op1) {}

  template <typename OpTy> bool match(OpTy *V) {
    if (V->getValueID() == Value::InstructionVal + Opcode) {
      auto *I = cast<Instruction>(V);
      return Op1.match(I->getOperand(0));
    }
    return false;
  }
};

/// Matches instructions with Opcode and three operands.
template <typename T0, typename T1, unsigned Opcode> struct TwoOps_match {
  T0 Op1;
  T1 Op2;

  TwoOps_match(const T0 &Op1, const T1 &Op2) : Op1(Op1), Op2(Op2) {}

  template <typename OpTy> bool match(OpTy *V) {
    if (V->getValueID() == Value::InstructionVal + Opcode) {
      auto *I = cast<Instruction>(V);
      return Op1.match(I->getOperand(0)) && Op2.match(I->getOperand(1));
    }
    return false;
  }
};

/// Matches instructions with Opcode and three operands.
template <typename T0, typename T1, typename T2, unsigned Opcode>
struct ThreeOps_match {
  T0 Op1;
  T1 Op2;
  T2 Op3;

  ThreeOps_match(const T0 &Op1, const T1 &Op2, const T2 &Op3)
      : Op1(Op1), Op2(Op2), Op3(Op3) {}

  template <typename OpTy> bool match(OpTy *V) {
    if (V->getValueID() == Value::InstructionVal + Opcode) {
      auto *I = cast<Instruction>(V);
      return Op1.match(I->getOperand(0)) && Op2.match(I->getOperand(1)) &&
             Op3.match(I->getOperand(2));
    }
    return false;
  }
};

/// Matches SelectInst.
template <typename Cond, typename LHS, typename RHS>
inline ThreeOps_match<Cond, LHS, RHS, Instruction::Select>
m_Select(const Cond &C, const LHS &L, const RHS &R) {
  return ThreeOps_match<Cond, LHS, RHS, Instruction::Select>(C, L, R);
}

/// This matches a select of two constants, e.g.:
/// m_SelectCst<-1, 0>(m_Value(V))
template <int64_t L, int64_t R, typename Cond>
inline ThreeOps_match<Cond, constantint_match<L>, constantint_match<R>,
                      Instruction::Select>
m_SelectCst(const Cond &C) {
  return m_Select(C, m_ConstantInt<L>(), m_ConstantInt<R>());
}

/// Matches FreezeInst.
template <typename OpTy>
inline OneOps_match<OpTy, Instruction::Freeze> m_Freeze(const OpTy &Op) {
  return OneOps_match<OpTy, Instruction::Freeze>(Op);
}

/// Matches InsertElementInst.
template <typename Val_t, typename Elt_t, typename Idx_t>
inline ThreeOps_match<Val_t, Elt_t, Idx_t, Instruction::InsertElement>
m_InsertElt(const Val_t &Val, const Elt_t &Elt, const Idx_t &Idx) {
  return ThreeOps_match<Val_t, Elt_t, Idx_t, Instruction::InsertElement>(
      Val, Elt, Idx);
}

/// Matches ExtractElementInst.
template <typename Val_t, typename Idx_t>
inline TwoOps_match<Val_t, Idx_t, Instruction::ExtractElement>
m_ExtractElt(const Val_t &Val, const Idx_t &Idx) {
  return TwoOps_match<Val_t, Idx_t, Instruction::ExtractElement>(Val, Idx);
}

/// Matches shuffle.
template <typename T0, typename T1, typename T2> struct Shuffle_match {
  T0 Op1;
  T1 Op2;
  T2 Mask;

  Shuffle_match(const T0 &Op1, const T1 &Op2, const T2 &Mask)
      : Op1(Op1), Op2(Op2), Mask(Mask) {}

  template <typename OpTy> bool match(OpTy *V) {
    if (auto *I = dyn_cast<ShuffleVectorInst>(V)) {
      return Op1.match(I->getOperand(0)) && Op2.match(I->getOperand(1)) &&
             Mask.match(I->getShuffleMask());
    }
    return false;
  }
};

struct m_Mask {
  ArrayRef<int> &MaskRef;
  m_Mask(ArrayRef<int> &MaskRef) : MaskRef(MaskRef) {}
  bool match(ArrayRef<int> Mask) {
    MaskRef = Mask;
    return true;
  }
};

struct m_ZeroMask {
  bool match(ArrayRef<int> Mask) {
    return all_of(Mask, [](int Elem) { return Elem == 0 || Elem == -1; });
  }
};

struct m_SpecificMask {
  ArrayRef<int> &MaskRef;
  m_SpecificMask(ArrayRef<int> &MaskRef) : MaskRef(MaskRef) {}
  bool match(ArrayRef<int> Mask) { return MaskRef == Mask; }
};

struct m_SplatOrUndefMask {
  int &SplatIndex;
  m_SplatOrUndefMask(int &SplatIndex) : SplatIndex(SplatIndex) {}
  bool match(ArrayRef<int> Mask) {
    const auto *First = find_if(Mask, [](int Elem) { return Elem != -1; });
    if (First == Mask.end())
      return false;
    SplatIndex = *First;
    return all_of(Mask,
                  [First](int Elem) { return Elem == *First || Elem == -1; });
  }
};

/// Matches ShuffleVectorInst independently of mask value.
template <typename V1_t, typename V2_t>
inline TwoOps_match<V1_t, V2_t, Instruction::ShuffleVector>
m_Shuffle(const V1_t &v1, const V2_t &v2) {
  return TwoOps_match<V1_t, V2_t, Instruction::ShuffleVector>(v1, v2);
}

template <typename V1_t, typename V2_t, typename Mask_t>
inline Shuffle_match<V1_t, V2_t, Mask_t>
m_Shuffle(const V1_t &v1, const V2_t &v2, const Mask_t &mask) {
  return Shuffle_match<V1_t, V2_t, Mask_t>(v1, v2, mask);
}

/// Matches LoadInst.
template <typename OpTy>
inline OneOps_match<OpTy, Instruction::Load> m_Load(const OpTy &Op) {
  return OneOps_match<OpTy, Instruction::Load>(Op);
}

/// Matches StoreInst.
template <typename ValueOpTy, typename PointerOpTy>
inline TwoOps_match<ValueOpTy, PointerOpTy, Instruction::Store>
m_Store(const ValueOpTy &ValueOp, const PointerOpTy &PointerOp) {
  return TwoOps_match<ValueOpTy, PointerOpTy, Instruction::Store>(ValueOp,
                                                                  PointerOp);
}

//===----------------------------------------------------------------------===//
// Matchers for CastInst classes
//

template <typename Op_t, unsigned Opcode> struct CastClass_match {
  Op_t Op;

  CastClass_match(const Op_t &OpMatch) : Op(OpMatch) {}

  template <typename OpTy> bool match(OpTy *V) {
    if (auto *O = dyn_cast<Operator>(V))
      return O->getOpcode() == Opcode && Op.match(O->getOperand(0));
    return false;
  }
};

template <typename Op_t> struct PtrToIntSameSize_match {
  const DataLayout &DL;
  Op_t Op;

  PtrToIntSameSize_match(const DataLayout &DL, const Op_t &OpMatch)
      : DL(DL), Op(OpMatch) {}

  template <typename OpTy> bool match(OpTy *V) {
    if (auto *O = dyn_cast<Operator>(V))
      return O->getOpcode() == Instruction::PtrToInt &&
             DL.getTypeSizeInBits(O->getType()) ==
                 DL.getTypeSizeInBits(O->getOperand(0)->getType()) &&
             Op.match(O->getOperand(0));
    return false;
  }
};

/// Matches BitCast.
template <typename OpTy>
inline CastClass_match<OpTy, Instruction::BitCast> m_BitCast(const OpTy &Op) {
  return CastClass_match<OpTy, Instruction::BitCast>(Op);
}

/// Matches PtrToInt.
template <typename OpTy>
inline CastClass_match<OpTy, Instruction::PtrToInt> m_PtrToInt(const OpTy &Op) {
  return CastClass_match<OpTy, Instruction::PtrToInt>(Op);
}

template <typename OpTy>
inline PtrToIntSameSize_match<OpTy> m_PtrToIntSameSize(const DataLayout &DL,
                                                       const OpTy &Op) {
  return PtrToIntSameSize_match<OpTy>(DL, Op);
}

/// Matches IntToPtr.
template <typename OpTy>
inline CastClass_match<OpTy, Instruction::IntToPtr> m_IntToPtr(const OpTy &Op) {
  return CastClass_match<OpTy, Instruction::IntToPtr>(Op);
}

/// Matches Trunc.
template <typename OpTy>
inline CastClass_match<OpTy, Instruction::Trunc> m_Trunc(const OpTy &Op) {
  return CastClass_match<OpTy, Instruction::Trunc>(Op);
}

template <typename OpTy>
inline match_combine_or<CastClass_match<OpTy, Instruction::Trunc>, OpTy>
m_TruncOrSelf(const OpTy &Op) {
  return m_CombineOr(m_Trunc(Op), Op);
}

/// Matches SExt.
template <typename OpTy>
inline CastClass_match<OpTy, Instruction::SExt> m_SExt(const OpTy &Op) {
  return CastClass_match<OpTy, Instruction::SExt>(Op);
}

/// Matches ZExt.
template <typename OpTy>
inline CastClass_match<OpTy, Instruction::ZExt> m_ZExt(const OpTy &Op) {
  return CastClass_match<OpTy, Instruction::ZExt>(Op);
}

template <typename OpTy>
inline match_combine_or<CastClass_match<OpTy, Instruction::ZExt>, OpTy>
m_ZExtOrSelf(const OpTy &Op) {
  return m_CombineOr(m_ZExt(Op), Op);
}

template <typename OpTy>
inline match_combine_or<CastClass_match<OpTy, Instruction::SExt>, OpTy>
m_SExtOrSelf(const OpTy &Op) {
  return m_CombineOr(m_SExt(Op), Op);
}

template <typename OpTy>
inline match_combine_or<CastClass_match<OpTy, Instruction::ZExt>,
                        CastClass_match<OpTy, Instruction::SExt>>
m_ZExtOrSExt(const OpTy &Op) {
  return m_CombineOr(m_ZExt(Op), m_SExt(Op));
}

template <typename OpTy>
inline match_combine_or<
    match_combine_or<CastClass_match<OpTy, Instruction::ZExt>,
                     CastClass_match<OpTy, Instruction::SExt>>,
    OpTy>
m_ZExtOrSExtOrSelf(const OpTy &Op) {
  return m_CombineOr(m_ZExtOrSExt(Op), Op);
}

template <typename OpTy>
inline CastClass_match<OpTy, Instruction::UIToFP> m_UIToFP(const OpTy &Op) {
  return CastClass_match<OpTy, Instruction::UIToFP>(Op);
}

template <typename OpTy>
inline CastClass_match<OpTy, Instruction::SIToFP> m_SIToFP(const OpTy &Op) {
  return CastClass_match<OpTy, Instruction::SIToFP>(Op);
}

template <typename OpTy>
inline CastClass_match<OpTy, Instruction::FPToUI> m_FPToUI(const OpTy &Op) {
  return CastClass_match<OpTy, Instruction::FPToUI>(Op);
}

template <typename OpTy>
inline CastClass_match<OpTy, Instruction::FPToSI> m_FPToSI(const OpTy &Op) {
  return CastClass_match<OpTy, Instruction::FPToSI>(Op);
}

template <typename OpTy>
inline CastClass_match<OpTy, Instruction::FPTrunc> m_FPTrunc(const OpTy &Op) {
  return CastClass_match<OpTy, Instruction::FPTrunc>(Op);
}

template <typename OpTy>
inline CastClass_match<OpTy, Instruction::FPExt> m_FPExt(const OpTy &Op) {
  return CastClass_match<OpTy, Instruction::FPExt>(Op);
}

//===----------------------------------------------------------------------===//
// Matchers for control flow.
//

struct br_match {
  BasicBlock *&Succ;

  br_match(BasicBlock *&Succ) : Succ(Succ) {}

  template <typename OpTy> bool match(OpTy *V) {
    if (auto *BI = dyn_cast<BranchInst>(V))
      if (BI->isUnconditional()) {
        Succ = BI->getSuccessor(0);
        return true;
      }
    return false;
  }
};

inline br_match m_UnconditionalBr(BasicBlock *&Succ) { return br_match(Succ); }

template <typename Cond_t, typename TrueBlock_t, typename FalseBlock_t>
struct brc_match {
  Cond_t Cond;
  TrueBlock_t T;
  FalseBlock_t F;

  brc_match(const Cond_t &C, const TrueBlock_t &t, const FalseBlock_t &f)
      : Cond(C), T(t), F(f) {}

  template <typename OpTy> bool match(OpTy *V) {
    if (auto *BI = dyn_cast<BranchInst>(V))
      if (BI->isConditional() && Cond.match(BI->getCondition()))
        return T.match(BI->getSuccessor(0)) && F.match(BI->getSuccessor(1));
    return false;
  }
};

template <typename Cond_t>
inline brc_match<Cond_t, bind_ty<BasicBlock>, bind_ty<BasicBlock>>
m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F) {
  return brc_match<Cond_t, bind_ty<BasicBlock>, bind_ty<BasicBlock>>(
      C, m_BasicBlock(T), m_BasicBlock(F));
}

template <typename Cond_t, typename TrueBlock_t, typename FalseBlock_t>
inline brc_match<Cond_t, TrueBlock_t, FalseBlock_t>
m_Br(const Cond_t &C, const TrueBlock_t &T, const FalseBlock_t &F) {
  return brc_match<Cond_t, TrueBlock_t, FalseBlock_t>(C, T, F);
}

//===----------------------------------------------------------------------===//
// Matchers for max/min idioms, eg: "select (sgt x, y), x, y" -> smax(x,y).
//

template <typename CmpInst_t, typename LHS_t, typename RHS_t, typename Pred_t,
          bool Commutable = false>
struct MaxMin_match {
  using PredType = Pred_t;
  LHS_t L;
  RHS_t R;

  // The evaluation order is always stable, regardless of Commutability.
  // The LHS is always matched first.
  MaxMin_match(const LHS_t &LHS, const RHS_t &RHS) : L(LHS), R(RHS) {}

  template <typename OpTy> bool match(OpTy *V) {
    if (auto *II = dyn_cast<IntrinsicInst>(V)) {
      Intrinsic::ID IID = II->getIntrinsicID();
      if ((IID == Intrinsic::smax && Pred_t::match(ICmpInst::ICMP_SGT)) ||
          (IID == Intrinsic::smin && Pred_t::match(ICmpInst::ICMP_SLT)) ||
          (IID == Intrinsic::umax && Pred_t::match(ICmpInst::ICMP_UGT)) ||
          (IID == Intrinsic::umin && Pred_t::match(ICmpInst::ICMP_ULT))) {
        Value *LHS = II->getOperand(0), *RHS = II->getOperand(1);
        return (L.match(LHS) && R.match(RHS)) ||
               (Commutable && L.match(RHS) && R.match(LHS));
      }
    }
    // Look for "(x pred y) ? x : y" or "(x pred y) ? y : x".
    auto *SI = dyn_cast<SelectInst>(V);
    if (!SI)
      return false;
    auto *Cmp = dyn_cast<CmpInst_t>(SI->getCondition());
    if (!Cmp)
      return false;
    // At this point we have a select conditioned on a comparison.  Check that
    // it is the values returned by the select that are being compared.
    auto *TrueVal = SI->getTrueValue();
    auto *FalseVal = SI->getFalseValue();
    auto *LHS = Cmp->getOperand(0);
    auto *RHS = Cmp->getOperand(1);
    if ((TrueVal != LHS || FalseVal != RHS) &&
        (TrueVal != RHS || FalseVal != LHS))
      return false;
    typename CmpInst_t::Predicate Pred =
        LHS == TrueVal ? Cmp->getPredicate() : Cmp->getInversePredicate();
    // Does "(x pred y) ? x : y" represent the desired max/min operation?
    if (!Pred_t::match(Pred))
      return false;
    // It does!  Bind the operands.
    return (L.match(LHS) && R.match(RHS)) ||
           (Commutable && L.match(RHS) && R.match(LHS));
  }
};

/// Helper class for identifying signed max predicates.
struct smax_pred_ty {
  static bool match(ICmpInst::Predicate Pred) {
    return Pred == CmpInst::ICMP_SGT || Pred == CmpInst::ICMP_SGE;
  }
};

/// Helper class for identifying signed min predicates.
struct smin_pred_ty {
  static bool match(ICmpInst::Predicate Pred) {
    return Pred == CmpInst::ICMP_SLT || Pred == CmpInst::ICMP_SLE;
  }
};

/// Helper class for identifying unsigned max predicates.
struct umax_pred_ty {
  static bool match(ICmpInst::Predicate Pred) {
    return Pred == CmpInst::ICMP_UGT || Pred == CmpInst::ICMP_UGE;
  }
};

/// Helper class for identifying unsigned min predicates.
struct umin_pred_ty {
  static bool match(ICmpInst::Predicate Pred) {
    return Pred == CmpInst::ICMP_ULT || Pred == CmpInst::ICMP_ULE;
  }
};

/// Helper class for identifying ordered max predicates.
struct ofmax_pred_ty {
  static bool match(FCmpInst::Predicate Pred) {
    return Pred == CmpInst::FCMP_OGT || Pred == CmpInst::FCMP_OGE;
  }
};

/// Helper class for identifying ordered min predicates.
struct ofmin_pred_ty {
  static bool match(FCmpInst::Predicate Pred) {
    return Pred == CmpInst::FCMP_OLT || Pred == CmpInst::FCMP_OLE;
  }
};

/// Helper class for identifying unordered max predicates.
struct ufmax_pred_ty {
  static bool match(FCmpInst::Predicate Pred) {
    return Pred == CmpInst::FCMP_UGT || Pred == CmpInst::FCMP_UGE;
  }
};

/// Helper class for identifying unordered min predicates.
struct ufmin_pred_ty {
  static bool match(FCmpInst::Predicate Pred) {
    return Pred == CmpInst::FCMP_ULT || Pred == CmpInst::FCMP_ULE;
  }
};

template <typename LHS, typename RHS>
inline MaxMin_match<ICmpInst, LHS, RHS, smax_pred_ty> m_SMax(const LHS &L,
                                                             const RHS &R) {
  return MaxMin_match<ICmpInst, LHS, RHS, smax_pred_ty>(L, R);
}

template <typename LHS, typename RHS>
inline MaxMin_match<ICmpInst, LHS, RHS, smin_pred_ty> m_SMin(const LHS &L,
                                                             const RHS &R) {
  return MaxMin_match<ICmpInst, LHS, RHS, smin_pred_ty>(L, R);
}

template <typename LHS, typename RHS>
inline MaxMin_match<ICmpInst, LHS, RHS, umax_pred_ty> m_UMax(const LHS &L,
                                                             const RHS &R) {
  return MaxMin_match<ICmpInst, LHS, RHS, umax_pred_ty>(L, R);
}

template <typename LHS, typename RHS>
inline MaxMin_match<ICmpInst, LHS, RHS, umin_pred_ty> m_UMin(const LHS &L,
                                                             const RHS &R) {
  return MaxMin_match<ICmpInst, LHS, RHS, umin_pred_ty>(L, R);
}

template <typename LHS, typename RHS>
inline match_combine_or<
    match_combine_or<MaxMin_match<ICmpInst, LHS, RHS, smax_pred_ty>,
                     MaxMin_match<ICmpInst, LHS, RHS, smin_pred_ty>>,
    match_combine_or<MaxMin_match<ICmpInst, LHS, RHS, umax_pred_ty>,
                     MaxMin_match<ICmpInst, LHS, RHS, umin_pred_ty>>>
m_MaxOrMin(const LHS &L, const RHS &R) {
  return m_CombineOr(m_CombineOr(m_SMax(L, R), m_SMin(L, R)),
                     m_CombineOr(m_UMax(L, R), m_UMin(L, R)));
}

/// Match an 'ordered' floating point maximum function.
/// Floating point has one special value 'NaN'. Therefore, there is no total
/// order. However, if we can ignore the 'NaN' value (for example, because of a
/// 'no-nans-float-math' flag) a combination of a fcmp and select has 'maximum'
/// semantics. In the presence of 'NaN' we have to preserve the original
/// select(fcmp(ogt/ge, L, R), L, R) semantics matched by this predicate.
///
///                         max(L, R)  iff L and R are not NaN
///  m_OrdFMax(L, R) =      R          iff L or R are NaN
template <typename LHS, typename RHS>
inline MaxMin_match<FCmpInst, LHS, RHS, ofmax_pred_ty> m_OrdFMax(const LHS &L,
                                                                 const RHS &R) {
  return MaxMin_match<FCmpInst, LHS, RHS, ofmax_pred_ty>(L, R);
}

/// Match an 'ordered' floating point minimum function.
/// Floating point has one special value 'NaN'. Therefore, there is no total
/// order. However, if we can ignore the 'NaN' value (for example, because of a
/// 'no-nans-float-math' flag) a combination of a fcmp and select has 'minimum'
/// semantics. In the presence of 'NaN' we have to preserve the original
/// select(fcmp(olt/le, L, R), L, R) semantics matched by this predicate.
///
///                         min(L, R)  iff L and R are not NaN
///  m_OrdFMin(L, R) =      R          iff L or R are NaN
template <typename LHS, typename RHS>
inline MaxMin_match<FCmpInst, LHS, RHS, ofmin_pred_ty> m_OrdFMin(const LHS &L,
                                                                 const RHS &R) {
  return MaxMin_match<FCmpInst, LHS, RHS, ofmin_pred_ty>(L, R);
}

/// Match an 'unordered' floating point maximum function.
/// Floating point has one special value 'NaN'. Therefore, there is no total
/// order. However, if we can ignore the 'NaN' value (for example, because of a
/// 'no-nans-float-math' flag) a combination of a fcmp and select has 'maximum'
/// semantics. In the presence of 'NaN' we have to preserve the original
/// select(fcmp(ugt/ge, L, R), L, R) semantics matched by this predicate.
///
///                         max(L, R)  iff L and R are not NaN
///  m_UnordFMax(L, R) =    L          iff L or R are NaN
template <typename LHS, typename RHS>
inline MaxMin_match<FCmpInst, LHS, RHS, ufmax_pred_ty>
m_UnordFMax(const LHS &L, const RHS &R) {
  return MaxMin_match<FCmpInst, LHS, RHS, ufmax_pred_ty>(L, R);
}

/// Match an 'unordered' floating point minimum function.
/// Floating point has one special value 'NaN'. Therefore, there is no total
/// order. However, if we can ignore the 'NaN' value (for example, because of a
/// 'no-nans-float-math' flag) a combination of a fcmp and select has 'minimum'
/// semantics. In the presence of 'NaN' we have to preserve the original
/// select(fcmp(ult/le, L, R), L, R) semantics matched by this predicate.
///
///                          min(L, R)  iff L and R are not NaN
///  m_UnordFMin(L, R) =     L          iff L or R are NaN
template <typename LHS, typename RHS>
inline MaxMin_match<FCmpInst, LHS, RHS, ufmin_pred_ty>
m_UnordFMin(const LHS &L, const RHS &R) {
  return MaxMin_match<FCmpInst, LHS, RHS, ufmin_pred_ty>(L, R);
}

//===----------------------------------------------------------------------===//
// Matchers for overflow check patterns: e.g. (a + b) u< a, (a ^ -1) <u b
// Note that S might be matched to other instructions than AddInst.
//

template <typename LHS_t, typename RHS_t, typename Sum_t>
struct UAddWithOverflow_match {
  LHS_t L;
  RHS_t R;
  Sum_t S;

  UAddWithOverflow_match(const LHS_t &L, const RHS_t &R, const Sum_t &S)
      : L(L), R(R), S(S) {}

  template <typename OpTy> bool match(OpTy *V) {
    Value *ICmpLHS, *ICmpRHS;
    ICmpInst::Predicate Pred;
    if (!m_ICmp(Pred, m_Value(ICmpLHS), m_Value(ICmpRHS)).match(V))
      return false;

    Value *AddLHS, *AddRHS;
    auto AddExpr = m_Add(m_Value(AddLHS), m_Value(AddRHS));

    // (a + b) u< a, (a + b) u< b
    if (Pred == ICmpInst::ICMP_ULT)
      if (AddExpr.match(ICmpLHS) && (ICmpRHS == AddLHS || ICmpRHS == AddRHS))
        return L.match(AddLHS) && R.match(AddRHS) && S.match(ICmpLHS);

    // a >u (a + b), b >u (a + b)
    if (Pred == ICmpInst::ICMP_UGT)
      if (AddExpr.match(ICmpRHS) && (ICmpLHS == AddLHS || ICmpLHS == AddRHS))
        return L.match(AddLHS) && R.match(AddRHS) && S.match(ICmpRHS);

    Value *Op1;
    auto XorExpr = m_OneUse(m_Xor(m_Value(Op1), m_AllOnes()));
    // (a ^ -1) <u b
    if (Pred == ICmpInst::ICMP_ULT) {
      if (XorExpr.match(ICmpLHS))
        return L.match(Op1) && R.match(ICmpRHS) && S.match(ICmpLHS);
    }
    //  b > u (a ^ -1)
    if (Pred == ICmpInst::ICMP_UGT) {
      if (XorExpr.match(ICmpRHS))
        return L.match(Op1) && R.match(ICmpLHS) && S.match(ICmpRHS);
    }

    // Match special-case for increment-by-1.
    if (Pred == ICmpInst::ICMP_EQ) {
      // (a + 1) == 0
      // (1 + a) == 0
      if (AddExpr.match(ICmpLHS) && m_ZeroInt().match(ICmpRHS) &&
          (m_One().match(AddLHS) || m_One().match(AddRHS)))
        return L.match(AddLHS) && R.match(AddRHS) && S.match(ICmpLHS);
      // 0 == (a + 1)
      // 0 == (1 + a)
      if (m_ZeroInt().match(ICmpLHS) && AddExpr.match(ICmpRHS) &&
          (m_One().match(AddLHS) || m_One().match(AddRHS)))
        return L.match(AddLHS) && R.match(AddRHS) && S.match(ICmpRHS);
    }

    return false;
  }
};

/// Match an icmp instruction checking for unsigned overflow on addition.
///
/// S is matched to the addition whose result is being checked for overflow, and
/// L and R are matched to the LHS and RHS of S.
template <typename LHS_t, typename RHS_t, typename Sum_t>
UAddWithOverflow_match<LHS_t, RHS_t, Sum_t>
m_UAddWithOverflow(const LHS_t &L, const RHS_t &R, const Sum_t &S) {
  return UAddWithOverflow_match<LHS_t, RHS_t, Sum_t>(L, R, S);
}

template <typename Opnd_t> struct Argument_match {
  unsigned OpI;
  Opnd_t Val;

  Argument_match(unsigned OpIdx, const Opnd_t &V) : OpI(OpIdx), Val(V) {}

  template <typename OpTy> bool match(OpTy *V) {
    // FIXME: Should likely be switched to use `CallBase`.
    if (const auto *CI = dyn_cast<CallInst>(V))
      return Val.match(CI->getArgOperand(OpI));
    return false;
  }
};

/// Match an argument.
template <unsigned OpI, typename Opnd_t>
inline Argument_match<Opnd_t> m_Argument(const Opnd_t &Op) {
  return Argument_match<Opnd_t>(OpI, Op);
}

/// Intrinsic matchers.
struct IntrinsicID_match {
  unsigned ID;

  IntrinsicID_match(Intrinsic::ID IntrID) : ID(IntrID) {}

  template <typename OpTy> bool match(OpTy *V) {
    if (const auto *CI = dyn_cast<CallInst>(V))
      if (const auto *F = CI->getCalledFunction())
        return F->getIntrinsicID() == ID;
    return false;
  }
};

/// Intrinsic matches are combinations of ID matchers, and argument
/// matchers. Higher arity matcher are defined recursively in terms of and-ing
/// them with lower arity matchers. Here's some convenient typedefs for up to
/// several arguments, and more can be added as needed
template <typename T0 = void, typename T1 = void, typename T2 = void,
          typename T3 = void, typename T4 = void, typename T5 = void,
          typename T6 = void, typename T7 = void, typename T8 = void,
          typename T9 = void, typename T10 = void>
struct m_Intrinsic_Ty;
template <typename T0> struct m_Intrinsic_Ty<T0> {
  using Ty = match_combine_and<IntrinsicID_match, Argument_match<T0>>;
};
template <typename T0, typename T1> struct m_Intrinsic_Ty<T0, T1> {
  using Ty =
      match_combine_and<typename m_Intrinsic_Ty<T0>::Ty, Argument_match<T1>>;
};
template <typename T0, typename T1, typename T2>
struct m_Intrinsic_Ty<T0, T1, T2> {
  using Ty = match_combine_and<typename m_Intrinsic_Ty<T0, T1>::Ty,
                               Argument_match<T2>>;
};
template <typename T0, typename T1, typename T2, typename T3>
struct m_Intrinsic_Ty<T0, T1, T2, T3> {
  using Ty = match_combine_and<typename m_Intrinsic_Ty<T0, T1, T2>::Ty,
                               Argument_match<T3>>;
};

template <typename T0, typename T1, typename T2, typename T3, typename T4>
struct m_Intrinsic_Ty<T0, T1, T2, T3, T4> {
  using Ty = match_combine_and<typename m_Intrinsic_Ty<T0, T1, T2, T3>::Ty,
                               Argument_match<T4>>;
};

template <typename T0, typename T1, typename T2, typename T3, typename T4,
          typename T5>
struct m_Intrinsic_Ty<T0, T1, T2, T3, T4, T5> {
  using Ty = match_combine_and<typename m_Intrinsic_Ty<T0, T1, T2, T3, T4>::Ty,
                               Argument_match<T5>>;
};

/// Match intrinsic calls like this:
/// m_Intrinsic<Intrinsic::fabs>(m_Value(X))
template <Intrinsic::ID IntrID> inline IntrinsicID_match m_Intrinsic() {
  return IntrinsicID_match(IntrID);
}

/// Matches MaskedLoad Intrinsic.
template <typename Opnd0, typename Opnd1, typename Opnd2, typename Opnd3>
inline typename m_Intrinsic_Ty<Opnd0, Opnd1, Opnd2, Opnd3>::Ty
m_MaskedLoad(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2,
             const Opnd3 &Op3) {
  return m_Intrinsic<Intrinsic::masked_load>(Op0, Op1, Op2, Op3);
}

/// Matches MaskedGather Intrinsic.
template <typename Opnd0, typename Opnd1, typename Opnd2, typename Opnd3>
inline typename m_Intrinsic_Ty<Opnd0, Opnd1, Opnd2, Opnd3>::Ty
m_MaskedGather(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2,
               const Opnd3 &Op3) {
  return m_Intrinsic<Intrinsic::masked_gather>(Op0, Op1, Op2, Op3);
}

template <Intrinsic::ID IntrID, typename T0>
inline typename m_Intrinsic_Ty<T0>::Ty m_Intrinsic(const T0 &Op0) {
  return m_CombineAnd(m_Intrinsic<IntrID>(), m_Argument<0>(Op0));
}

template <Intrinsic::ID IntrID, typename T0, typename T1>
inline typename m_Intrinsic_Ty<T0, T1>::Ty m_Intrinsic(const T0 &Op0,
                                                       const T1 &Op1) {
  return m_CombineAnd(m_Intrinsic<IntrID>(Op0), m_Argument<1>(Op1));
}

template <Intrinsic::ID IntrID, typename T0, typename T1, typename T2>
inline typename m_Intrinsic_Ty<T0, T1, T2>::Ty
m_Intrinsic(const T0 &Op0, const T1 &Op1, const T2 &Op2) {
  return m_CombineAnd(m_Intrinsic<IntrID>(Op0, Op1), m_Argument<2>(Op2));
}

template <Intrinsic::ID IntrID, typename T0, typename T1, typename T2,
          typename T3>
inline typename m_Intrinsic_Ty<T0, T1, T2, T3>::Ty
m_Intrinsic(const T0 &Op0, const T1 &Op1, const T2 &Op2, const T3 &Op3) {
  return m_CombineAnd(m_Intrinsic<IntrID>(Op0, Op1, Op2), m_Argument<3>(Op3));
}

template <Intrinsic::ID IntrID, typename T0, typename T1, typename T2,
          typename T3, typename T4>
inline typename m_Intrinsic_Ty<T0, T1, T2, T3, T4>::Ty
m_Intrinsic(const T0 &Op0, const T1 &Op1, const T2 &Op2, const T3 &Op3,
            const T4 &Op4) {
  return m_CombineAnd(m_Intrinsic<IntrID>(Op0, Op1, Op2, Op3),
                      m_Argument<4>(Op4));
}

template <Intrinsic::ID IntrID, typename T0, typename T1, typename T2,
          typename T3, typename T4, typename T5>
inline typename m_Intrinsic_Ty<T0, T1, T2, T3, T4, T5>::Ty
m_Intrinsic(const T0 &Op0, const T1 &Op1, const T2 &Op2, const T3 &Op3,
            const T4 &Op4, const T5 &Op5) {
  return m_CombineAnd(m_Intrinsic<IntrID>(Op0, Op1, Op2, Op3, Op4),
                      m_Argument<5>(Op5));
}

// Helper intrinsic matching specializations.
template <typename Opnd0>
inline typename m_Intrinsic_Ty<Opnd0>::Ty m_BitReverse(const Opnd0 &Op0) {
  return m_Intrinsic<Intrinsic::bitreverse>(Op0);
}

template <typename Opnd0>
inline typename m_Intrinsic_Ty<Opnd0>::Ty m_BSwap(const Opnd0 &Op0) {
  return m_Intrinsic<Intrinsic::bswap>(Op0);
}

template <typename Opnd0>
inline typename m_Intrinsic_Ty<Opnd0>::Ty m_FAbs(const Opnd0 &Op0) {
  return m_Intrinsic<Intrinsic::fabs>(Op0);
}

template <typename Opnd0>
inline typename m_Intrinsic_Ty<Opnd0>::Ty m_FCanonicalize(const Opnd0 &Op0) {
  return m_Intrinsic<Intrinsic::canonicalize>(Op0);
}

template <typename Opnd0, typename Opnd1>
inline typename m_Intrinsic_Ty<Opnd0, Opnd1>::Ty m_FMin(const Opnd0 &Op0,
                                                        const Opnd1 &Op1) {
  return m_Intrinsic<Intrinsic::minnum>(Op0, Op1);
}

template <typename Opnd0, typename Opnd1>
inline typename m_Intrinsic_Ty<Opnd0, Opnd1>::Ty m_FMax(const Opnd0 &Op0,
                                                        const Opnd1 &Op1) {
  return m_Intrinsic<Intrinsic::maxnum>(Op0, Op1);
}

template <typename Opnd0, typename Opnd1, typename Opnd2>
inline typename m_Intrinsic_Ty<Opnd0, Opnd1, Opnd2>::Ty
m_FShl(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2) {
  return m_Intrinsic<Intrinsic::fshl>(Op0, Op1, Op2);
}

template <typename Opnd0, typename Opnd1, typename Opnd2>
inline typename m_Intrinsic_Ty<Opnd0, Opnd1, Opnd2>::Ty
m_FShr(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2) {
  return m_Intrinsic<Intrinsic::fshr>(Op0, Op1, Op2);
}

template <typename Opnd0>
inline typename m_Intrinsic_Ty<Opnd0>::Ty m_Sqrt(const Opnd0 &Op0) {
  return m_Intrinsic<Intrinsic::sqrt>(Op0);
}

template <typename Opnd0, typename Opnd1>
inline typename m_Intrinsic_Ty<Opnd0, Opnd1>::Ty m_CopySign(const Opnd0 &Op0,
                                                            const Opnd1 &Op1) {
  return m_Intrinsic<Intrinsic::copysign>(Op0, Op1);
}

template <typename Opnd0>
inline typename m_Intrinsic_Ty<Opnd0>::Ty m_VecReverse(const Opnd0 &Op0) {
  return m_Intrinsic<Intrinsic::experimental_vector_reverse>(Op0);
}

//===----------------------------------------------------------------------===//
// Matchers for two-operands operators with the operators in either order
//

/// Matches a BinaryOperator with LHS and RHS in either order.
template <typename LHS, typename RHS>
inline AnyBinaryOp_match<LHS, RHS, true> m_c_BinOp(const LHS &L, const RHS &R) {
  return AnyBinaryOp_match<LHS, RHS, true>(L, R);
}

/// Matches an ICmp with a predicate over LHS and RHS in either order.
/// Swaps the predicate if operands are commuted.
template <typename LHS, typename RHS>
inline CmpClass_match<LHS, RHS, ICmpInst, ICmpInst::Predicate, true>
m_c_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R) {
  return CmpClass_match<LHS, RHS, ICmpInst, ICmpInst::Predicate, true>(Pred, L,
                                                                       R);
}

/// Matches a specific opcode with LHS and RHS in either order.
template <typename LHS, typename RHS>
inline SpecificBinaryOp_match<LHS, RHS, true>
m_c_BinOp(unsigned Opcode, const LHS &L, const RHS &R) {
  return SpecificBinaryOp_match<LHS, RHS, true>(Opcode, L, R);
}

/// Matches a Add with LHS and RHS in either order.
template <typename LHS, typename RHS>
inline BinaryOp_match<LHS, RHS, Instruction::Add, true> m_c_Add(const LHS &L,
                                                                const RHS &R) {
  return BinaryOp_match<LHS, RHS, Instruction::Add, true>(L, R);
}

/// Matches a Mul with LHS and RHS in either order.
template <typename LHS, typename RHS>
inline BinaryOp_match<LHS, RHS, Instruction::Mul, true> m_c_Mul(const LHS &L,
                                                                const RHS &R) {
  return BinaryOp_match<LHS, RHS, Instruction::Mul, true>(L, R);
}

/// Matches an And with LHS and RHS in either order.
template <typename LHS, typename RHS>
inline BinaryOp_match<LHS, RHS, Instruction::And, true> m_c_And(const LHS &L,
                                                                const RHS &R) {
  return BinaryOp_match<LHS, RHS, Instruction::And, true>(L, R);
}

/// Matches an Or with LHS and RHS in either order.
template <typename LHS, typename RHS>
inline BinaryOp_match<LHS, RHS, Instruction::Or, true> m_c_Or(const LHS &L,
                                                              const RHS &R) {
  return BinaryOp_match<LHS, RHS, Instruction::Or, true>(L, R);
}

/// Matches an Xor with LHS and RHS in either order.
template <typename LHS, typename RHS>
inline BinaryOp_match<LHS, RHS, Instruction::Xor, true> m_c_Xor(const LHS &L,
                                                                const RHS &R) {
  return BinaryOp_match<LHS, RHS, Instruction::Xor, true>(L, R);
}

/// Matches a 'Neg' as 'sub 0, V'.
template <typename ValTy>
inline BinaryOp_match<cst_pred_ty<is_zero_int>, ValTy, Instruction::Sub>
m_Neg(const ValTy &V) {
  return m_Sub(m_ZeroInt(), V);
}

/// Matches a 'Neg' as 'sub nsw 0, V'.
template <typename ValTy>
inline OverflowingBinaryOp_match<cst_pred_ty<is_zero_int>, ValTy,
                                 Instruction::Sub,
                                 OverflowingBinaryOperator::NoSignedWrap>
m_NSWNeg(const ValTy &V) {
  return m_NSWSub(m_ZeroInt(), V);
}

/// Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
/// NOTE: we first match the 'Not' (by matching '-1'),
/// and only then match the inner matcher!
template <typename ValTy>
inline BinaryOp_match<cst_pred_ty<is_all_ones>, ValTy, Instruction::Xor, true>
m_Not(const ValTy &V) {
  return m_c_Xor(m_AllOnes(), V);
}

template <typename ValTy> struct NotForbidUndef_match {
  ValTy Val;
  NotForbidUndef_match(const ValTy &V) : Val(V) {}

  template <typename OpTy> bool match(OpTy *V) {
    // We do not use m_c_Xor because that could match an arbitrary APInt that is
    // not -1 as C and then fail to match the other operand if it is -1.
    // This code should still work even when both operands are constants.
    Value *X;
    const APInt *C;
    if (m_Xor(m_Value(X), m_APIntForbidUndef(C)).match(V) && C->isAllOnes())
      return Val.match(X);
    if (m_Xor(m_APIntForbidUndef(C), m_Value(X)).match(V) && C->isAllOnes())
      return Val.match(X);
    return false;
  }
};

/// Matches a bitwise 'not' as 'xor V, -1' or 'xor -1, V'. For vectors, the
/// constant value must be composed of only -1 scalar elements.
template <typename ValTy>
inline NotForbidUndef_match<ValTy> m_NotForbidUndef(const ValTy &V) {
  return NotForbidUndef_match<ValTy>(V);
}

/// Matches an SMin with LHS and RHS in either order.
template <typename LHS, typename RHS>
inline MaxMin_match<ICmpInst, LHS, RHS, smin_pred_ty, true>
m_c_SMin(const LHS &L, const RHS &R) {
  return MaxMin_match<ICmpInst, LHS, RHS, smin_pred_ty, true>(L, R);
}
/// Matches an SMax with LHS and RHS in either order.
template <typename LHS, typename RHS>
inline MaxMin_match<ICmpInst, LHS, RHS, smax_pred_ty, true>
m_c_SMax(const LHS &L, const RHS &R) {
  return MaxMin_match<ICmpInst, LHS, RHS, smax_pred_ty, true>(L, R);
}
/// Matches a UMin with LHS and RHS in either order.
template <typename LHS, typename RHS>
inline MaxMin_match<ICmpInst, LHS, RHS, umin_pred_ty, true>
m_c_UMin(const LHS &L, const RHS &R) {
  return MaxMin_match<ICmpInst, LHS, RHS, umin_pred_ty, true>(L, R);
}
/// Matches a UMax with LHS and RHS in either order.
template <typename LHS, typename RHS>
inline MaxMin_match<ICmpInst, LHS, RHS, umax_pred_ty, true>
m_c_UMax(const LHS &L, const RHS &R) {
  return MaxMin_match<ICmpInst, LHS, RHS, umax_pred_ty, true>(L, R);
}

template <typename LHS, typename RHS>
inline match_combine_or<
    match_combine_or<MaxMin_match<ICmpInst, LHS, RHS, smax_pred_ty, true>,
                     MaxMin_match<ICmpInst, LHS, RHS, smin_pred_ty, true>>,
    match_combine_or<MaxMin_match<ICmpInst, LHS, RHS, umax_pred_ty, true>,
                     MaxMin_match<ICmpInst, LHS, RHS, umin_pred_ty, true>>>
m_c_MaxOrMin(const LHS &L, const RHS &R) {
  return m_CombineOr(m_CombineOr(m_c_SMax(L, R), m_c_SMin(L, R)),
                     m_CombineOr(m_c_UMax(L, R), m_c_UMin(L, R)));
}

template <Intrinsic::ID IntrID, typename T0, typename T1>
inline match_combine_or<typename m_Intrinsic_Ty<T0, T1>::Ty,
                        typename m_Intrinsic_Ty<T1, T0>::Ty>
m_c_Intrinsic(const T0 &Op0, const T1 &Op1) {
  return m_CombineOr(m_Intrinsic<IntrID>(Op0, Op1),
                     m_Intrinsic<IntrID>(Op1, Op0));
}

/// Matches FAdd with LHS and RHS in either order.
template <typename LHS, typename RHS>
inline BinaryOp_match<LHS, RHS, Instruction::FAdd, true>
m_c_FAdd(const LHS &L, const RHS &R) {
  return BinaryOp_match<LHS, RHS, Instruction::FAdd, true>(L, R);
}

/// Matches FMul with LHS and RHS in either order.
template <typename LHS, typename RHS>
inline BinaryOp_match<LHS, RHS, Instruction::FMul, true>
m_c_FMul(const LHS &L, const RHS &R) {
  return BinaryOp_match<LHS, RHS, Instruction::FMul, true>(L, R);
}

template <typename Opnd_t> struct Signum_match {
  Opnd_t Val;
  Signum_match(const Opnd_t &V) : Val(V) {}

  template <typename OpTy> bool match(OpTy *V) {
    unsigned TypeSize = V->getType()->getScalarSizeInBits();
    if (TypeSize == 0)
      return false;

    unsigned ShiftWidth = TypeSize - 1;
    Value *OpL = nullptr, *OpR = nullptr;

    // This is the representation of signum we match:
    //
    //  signum(x) == (x >> 63) | (-x >>u 63)
    //
    // An i1 value is its own signum, so it's correct to match
    //
    //  signum(x) == (x >> 0)  | (-x >>u 0)
    //
    // for i1 values.

    auto LHS = m_AShr(m_Value(OpL), m_SpecificInt(ShiftWidth));
    auto RHS = m_LShr(m_Neg(m_Value(OpR)), m_SpecificInt(ShiftWidth));
    auto Signum = m_Or(LHS, RHS);

    return Signum.match(V) && OpL == OpR && Val.match(OpL);
  }
};

/// Matches a signum pattern.
///
/// signum(x) =
///      x >  0  ->  1
///      x == 0  ->  0
///      x <  0  -> -1
template <typename Val_t> inline Signum_match<Val_t> m_Signum(const Val_t &V) {
  return Signum_match<Val_t>(V);
}

template <int Ind, typename Opnd_t> struct ExtractValue_match {
  Opnd_t Val;
  ExtractValue_match(const Opnd_t &V) : Val(V) {}

  template <typename OpTy> bool match(OpTy *V) {
    if (auto *I = dyn_cast<ExtractValueInst>(V)) {
      // If Ind is -1, don't inspect indices
      if (Ind != -1 &&
          !(I->getNumIndices() == 1 && I->getIndices()[0] == (unsigned)Ind))
        return false;
      return Val.match(I->getAggregateOperand());
    }
    return false;
  }
};

/// Match a single index ExtractValue instruction.
/// For example m_ExtractValue<1>(...)
template <int Ind, typename Val_t>
inline ExtractValue_match<Ind, Val_t> m_ExtractValue(const Val_t &V) {
  return ExtractValue_match<Ind, Val_t>(V);
}

/// Match an ExtractValue instruction with any index.
/// For example m_ExtractValue(...)
template <typename Val_t>
inline ExtractValue_match<-1, Val_t> m_ExtractValue(const Val_t &V) {
  return ExtractValue_match<-1, Val_t>(V);
}

/// Matcher for a single index InsertValue instruction.
template <int Ind, typename T0, typename T1> struct InsertValue_match {
  T0 Op0;
  T1 Op1;

  InsertValue_match(const T0 &Op0, const T1 &Op1) : Op0(Op0), Op1(Op1) {}

  template <typename OpTy> bool match(OpTy *V) {
    if (auto *I = dyn_cast<InsertValueInst>(V)) {
      return Op0.match(I->getOperand(0)) && Op1.match(I->getOperand(1)) &&
             I->getNumIndices() == 1 && Ind == I->getIndices()[0];
    }
    return false;
  }
};

/// Matches a single index InsertValue instruction.
template <int Ind, typename Val_t, typename Elt_t>
inline InsertValue_match<Ind, Val_t, Elt_t> m_InsertValue(const Val_t &Val,
                                                          const Elt_t &Elt) {
  return InsertValue_match<Ind, Val_t, Elt_t>(Val, Elt);
}

/// Matches patterns for `vscale`. This can either be a call to `llvm.vscale` or
/// the constant expression
///  `ptrtoint(gep <vscale x 1 x i8>, <vscale x 1 x i8>* null, i32 1>`
/// under the right conditions determined by DataLayout.
struct VScaleVal_match {
  template <typename ITy> bool match(ITy *V) {
    if (m_Intrinsic<Intrinsic::vscale>().match(V))
      return true;

    Value *Ptr;
    if (m_PtrToInt(m_Value(Ptr)).match(V)) {
      if (auto *GEP = dyn_cast<GEPOperator>(Ptr)) {
        auto *DerefTy =
            dyn_cast<ScalableVectorType>(GEP->getSourceElementType());
        if (GEP->getNumIndices() == 1 && DerefTy &&
            DerefTy->getElementType()->isIntegerTy(8) &&
            m_Zero().match(GEP->getPointerOperand()) &&
            m_SpecificInt(1).match(GEP->idx_begin()->get()))
          return true;
      }
    }

    return false;
  }
};

inline VScaleVal_match m_VScale() {
  return VScaleVal_match();
}

template <typename LHS, typename RHS, unsigned Opcode, bool Commutable = false>
struct LogicalOp_match {
  LHS L;
  RHS R;

  LogicalOp_match(const LHS &L, const RHS &R) : L(L), R(R) {}

  template <typename T> bool match(T *V) {
    auto *I = dyn_cast<Instruction>(V);
    if (!I || !I->getType()->isIntOrIntVectorTy(1))
      return false;

    if (I->getOpcode() == Opcode) {
      auto *Op0 = I->getOperand(0);
      auto *Op1 = I->getOperand(1);
      return (L.match(Op0) && R.match(Op1)) ||
             (Commutable && L.match(Op1) && R.match(Op0));
    }

    if (auto *Select = dyn_cast<SelectInst>(I)) {
      auto *Cond = Select->getCondition();
      auto *TVal = Select->getTrueValue();
      auto *FVal = Select->getFalseValue();

      // Don't match a scalar select of bool vectors.
      // Transforms expect a single type for operands if this matches.
      if (Cond->getType() != Select->getType())
        return false;

      if (Opcode == Instruction::And) {
        auto *C = dyn_cast<Constant>(FVal);
        if (C && C->isNullValue())
          return (L.match(Cond) && R.match(TVal)) ||
                 (Commutable && L.match(TVal) && R.match(Cond));
      } else {
        assert(Opcode == Instruction::Or);
        auto *C = dyn_cast<Constant>(TVal);
        if (C && C->isOneValue())
          return (L.match(Cond) && R.match(FVal)) ||
                 (Commutable && L.match(FVal) && R.match(Cond));
      }
    }

    return false;
  }
};

/// Matches L && R either in the form of L & R or L ? R : false.
/// Note that the latter form is poison-blocking.
template <typename LHS, typename RHS>
inline LogicalOp_match<LHS, RHS, Instruction::And> m_LogicalAnd(const LHS &L,
                                                                const RHS &R) {
  return LogicalOp_match<LHS, RHS, Instruction::And>(L, R);
}

/// Matches L && R where L and R are arbitrary values.
inline auto m_LogicalAnd() { return m_LogicalAnd(m_Value(), m_Value()); }

/// Matches L && R with LHS and RHS in either order.
template <typename LHS, typename RHS>
inline LogicalOp_match<LHS, RHS, Instruction::And, true>
m_c_LogicalAnd(const LHS &L, const RHS &R) {
  return LogicalOp_match<LHS, RHS, Instruction::And, true>(L, R);
}

/// Matches L || R either in the form of L | R or L ? true : R.
/// Note that the latter form is poison-blocking.
template <typename LHS, typename RHS>
inline LogicalOp_match<LHS, RHS, Instruction::Or> m_LogicalOr(const LHS &L,
                                                              const RHS &R) {
  return LogicalOp_match<LHS, RHS, Instruction::Or>(L, R);
}

/// Matches L || R where L and R are arbitrary values.
inline auto m_LogicalOr() { return m_LogicalOr(m_Value(), m_Value()); }

/// Matches L || R with LHS and RHS in either order.
template <typename LHS, typename RHS>
inline LogicalOp_match<LHS, RHS, Instruction::Or, true>
m_c_LogicalOr(const LHS &L, const RHS &R) {
  return LogicalOp_match<LHS, RHS, Instruction::Or, true>(L, R);
}

/// Matches either L && R or L || R,
/// either one being in the either binary or logical form.
/// Note that the latter form is poison-blocking.
template <typename LHS, typename RHS, bool Commutable = false>
inline auto m_LogicalOp(const LHS &L, const RHS &R) {
  return m_CombineOr(
      LogicalOp_match<LHS, RHS, Instruction::And, Commutable>(L, R),
      LogicalOp_match<LHS, RHS, Instruction::Or, Commutable>(L, R));
}

/// Matches either L && R or L || R where L and R are arbitrary values.
inline auto m_LogicalOp() { return m_LogicalOp(m_Value(), m_Value()); }

/// Matches either L && R or L || R with LHS and RHS in either order.
template <typename LHS, typename RHS>
inline auto m_c_LogicalOp(const LHS &L, const RHS &R) {
  return m_LogicalOp<LHS, RHS, /*Commutable=*/true>(L, R);
}

} // end namespace PatternMatch
} // end namespace llvm

#endif // LLVM_IR_PATTERNMATCH_H
PKjwFZf+���6�6IR/CFG.hnu�[���//===- CFG.h ----------------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This file provides various utilities for inspecting and working with the
/// control flow graph in LLVM IR. This includes generic facilities for
/// iterating successors and predecessors of basic blocks, the successors of
/// specific terminator instructions, etc. It also defines specializations of
/// GraphTraits that allow Function and BasicBlock graphs to be treated as
/// proper graphs for generic algorithms.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_CFG_H
#define LLVM_IR_CFG_H

#include "llvm/ADT/GraphTraits.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Value.h"
#include <cassert>
#include <cstddef>
#include <iterator>

namespace llvm {

class Instruction;
class Use;

//===----------------------------------------------------------------------===//
// BasicBlock pred_iterator definition
//===----------------------------------------------------------------------===//

template <class Ptr, class USE_iterator> // Predecessor Iterator
class PredIterator {
public:
  using iterator_category = std::forward_iterator_tag;
  using value_type = Ptr;
  using difference_type = std::ptrdiff_t;
  using pointer = Ptr *;
  using reference = Ptr *;

protected:
  using Self = PredIterator<Ptr, USE_iterator>;
  USE_iterator It;

  inline void advancePastNonTerminators() {
    // Loop to ignore non-terminator uses (for example BlockAddresses).
    while (!It.atEnd()) {
      if (auto *Inst = dyn_cast<Instruction>(*It))
        if (Inst->isTerminator())
          break;

      ++It;
    }
  }

public:
  PredIterator() = default;
  explicit inline PredIterator(Ptr *bb) : It(bb->user_begin()) {
    advancePastNonTerminators();
  }
  inline PredIterator(Ptr *bb, bool) : It(bb->user_end()) {}

  inline bool operator==(const Self& x) const { return It == x.It; }
  inline bool operator!=(const Self& x) const { return !operator==(x); }

  inline reference operator*() const {
    assert(!It.atEnd() && "pred_iterator out of range!");
    return cast<Instruction>(*It)->getParent();
  }
  inline pointer *operator->() const { return &operator*(); }

  inline Self& operator++() {   // Preincrement
    assert(!It.atEnd() && "pred_iterator out of range!");
    ++It; advancePastNonTerminators();
    return *this;
  }

  inline Self operator++(int) { // Postincrement
    Self tmp = *this; ++*this; return tmp;
  }

  /// getOperandNo - Return the operand number in the predecessor's
  /// terminator of the successor.
  unsigned getOperandNo() const {
    return It.getOperandNo();
  }

  /// getUse - Return the operand Use in the predecessor's terminator
  /// of the successor.
  Use &getUse() const {
    return It.getUse();
  }
};

using pred_iterator = PredIterator<BasicBlock, Value::user_iterator>;
using const_pred_iterator =
    PredIterator<const BasicBlock, Value::const_user_iterator>;
using pred_range = iterator_range<pred_iterator>;
using const_pred_range = iterator_range<const_pred_iterator>;

inline pred_iterator pred_begin(BasicBlock *BB) { return pred_iterator(BB); }
inline const_pred_iterator pred_begin(const BasicBlock *BB) {
  return const_pred_iterator(BB);
}
inline pred_iterator pred_end(BasicBlock *BB) { return pred_iterator(BB, true);}
inline const_pred_iterator pred_end(const BasicBlock *BB) {
  return const_pred_iterator(BB, true);
}
inline bool pred_empty(const BasicBlock *BB) {
  return pred_begin(BB) == pred_end(BB);
}
/// Get the number of predecessors of \p BB. This is a linear time operation.
/// Use \ref BasicBlock::hasNPredecessors() or hasNPredecessorsOrMore if able.
inline unsigned pred_size(const BasicBlock *BB) {
  return std::distance(pred_begin(BB), pred_end(BB));
}
inline pred_range predecessors(BasicBlock *BB) {
  return pred_range(pred_begin(BB), pred_end(BB));
}
inline const_pred_range predecessors(const BasicBlock *BB) {
  return const_pred_range(pred_begin(BB), pred_end(BB));
}

//===----------------------------------------------------------------------===//
// Instruction and BasicBlock succ_iterator helpers
//===----------------------------------------------------------------------===//

template <class InstructionT, class BlockT>
class SuccIterator
    : public iterator_facade_base<SuccIterator<InstructionT, BlockT>,
                                  std::random_access_iterator_tag, BlockT, int,
                                  BlockT *, BlockT *> {
public:
  using difference_type = int;
  using pointer = BlockT *;
  using reference = BlockT *;

private:
  InstructionT *Inst;
  int Idx;
  using Self = SuccIterator<InstructionT, BlockT>;

  inline bool index_is_valid(int Idx) {
    // Note that we specially support the index of zero being valid even in the
    // face of a null instruction.
    return Idx >= 0 && (Idx == 0 || Idx <= (int)Inst->getNumSuccessors());
  }

  /// Proxy object to allow write access in operator[]
  class SuccessorProxy {
    Self It;

  public:
    explicit SuccessorProxy(const Self &It) : It(It) {}

    SuccessorProxy(const SuccessorProxy &) = default;

    SuccessorProxy &operator=(SuccessorProxy RHS) {
      *this = reference(RHS);
      return *this;
    }

    SuccessorProxy &operator=(reference RHS) {
      It.Inst->setSuccessor(It.Idx, RHS);
      return *this;
    }

    operator reference() const { return *It; }
  };

public:
  // begin iterator
  explicit inline SuccIterator(InstructionT *Inst) : Inst(Inst), Idx(0) {}
  // end iterator
  inline SuccIterator(InstructionT *Inst, bool) : Inst(Inst) {
    if (Inst)
      Idx = Inst->getNumSuccessors();
    else
      // Inst == NULL happens, if a basic block is not fully constructed and
      // consequently getTerminator() returns NULL. In this case we construct
      // a SuccIterator which describes a basic block that has zero
      // successors.
      // Defining SuccIterator for incomplete and malformed CFGs is especially
      // useful for debugging.
      Idx = 0;
  }

  /// This is used to interface between code that wants to
  /// operate on terminator instructions directly.
  int getSuccessorIndex() const { return Idx; }

  inline bool operator==(const Self &x) const { return Idx == x.Idx; }

  inline BlockT *operator*() const { return Inst->getSuccessor(Idx); }

  // We use the basic block pointer directly for operator->.
  inline BlockT *operator->() const { return operator*(); }

  inline bool operator<(const Self &RHS) const {
    assert(Inst == RHS.Inst && "Cannot compare iterators of different blocks!");
    return Idx < RHS.Idx;
  }

  int operator-(const Self &RHS) const {
    assert(Inst == RHS.Inst && "Cannot compare iterators of different blocks!");
    return Idx - RHS.Idx;
  }

  inline Self &operator+=(int RHS) {
    int NewIdx = Idx + RHS;
    assert(index_is_valid(NewIdx) && "Iterator index out of bound");
    Idx = NewIdx;
    return *this;
  }

  inline Self &operator-=(int RHS) { return operator+=(-RHS); }

  // Specially implement the [] operation using a proxy object to support
  // assignment.
  inline SuccessorProxy operator[](int Offset) {
    Self TmpIt = *this;
    TmpIt += Offset;
    return SuccessorProxy(TmpIt);
  }

  /// Get the source BlockT of this iterator.
  inline BlockT *getSource() {
    assert(Inst && "Source not available, if basic block was malformed");
    return Inst->getParent();
  }
};

using succ_iterator = SuccIterator<Instruction, BasicBlock>;
using const_succ_iterator = SuccIterator<const Instruction, const BasicBlock>;
using succ_range = iterator_range<succ_iterator>;
using const_succ_range = iterator_range<const_succ_iterator>;

inline succ_iterator succ_begin(Instruction *I) { return succ_iterator(I); }
inline const_succ_iterator succ_begin(const Instruction *I) {
  return const_succ_iterator(I);
}
inline succ_iterator succ_end(Instruction *I) { return succ_iterator(I, true); }
inline const_succ_iterator succ_end(const Instruction *I) {
  return const_succ_iterator(I, true);
}
inline bool succ_empty(const Instruction *I) {
  return succ_begin(I) == succ_end(I);
}
inline unsigned succ_size(const Instruction *I) {
  return std::distance(succ_begin(I), succ_end(I));
}
inline succ_range successors(Instruction *I) {
  return succ_range(succ_begin(I), succ_end(I));
}
inline const_succ_range successors(const Instruction *I) {
  return const_succ_range(succ_begin(I), succ_end(I));
}

inline succ_iterator succ_begin(BasicBlock *BB) {
  return succ_iterator(BB->getTerminator());
}
inline const_succ_iterator succ_begin(const BasicBlock *BB) {
  return const_succ_iterator(BB->getTerminator());
}
inline succ_iterator succ_end(BasicBlock *BB) {
  return succ_iterator(BB->getTerminator(), true);
}
inline const_succ_iterator succ_end(const BasicBlock *BB) {
  return const_succ_iterator(BB->getTerminator(), true);
}
inline bool succ_empty(const BasicBlock *BB) {
  return succ_begin(BB) == succ_end(BB);
}
inline unsigned succ_size(const BasicBlock *BB) {
  return std::distance(succ_begin(BB), succ_end(BB));
}
inline succ_range successors(BasicBlock *BB) {
  return succ_range(succ_begin(BB), succ_end(BB));
}
inline const_succ_range successors(const BasicBlock *BB) {
  return const_succ_range(succ_begin(BB), succ_end(BB));
}

//===--------------------------------------------------------------------===//
// GraphTraits specializations for basic block graphs (CFGs)
//===--------------------------------------------------------------------===//

// Provide specializations of GraphTraits to be able to treat a function as a
// graph of basic blocks...

template <> struct GraphTraits<BasicBlock*> {
  using NodeRef = BasicBlock *;
  using ChildIteratorType = succ_iterator;

  static NodeRef getEntryNode(BasicBlock *BB) { return BB; }
  static ChildIteratorType child_begin(NodeRef N) { return succ_begin(N); }
  static ChildIteratorType child_end(NodeRef N) { return succ_end(N); }
};

template <> struct GraphTraits<const BasicBlock*> {
  using NodeRef = const BasicBlock *;
  using ChildIteratorType = const_succ_iterator;

  static NodeRef getEntryNode(const BasicBlock *BB) { return BB; }

  static ChildIteratorType child_begin(NodeRef N) { return succ_begin(N); }
  static ChildIteratorType child_end(NodeRef N) { return succ_end(N); }
};

// Provide specializations of GraphTraits to be able to treat a function as a
// graph of basic blocks... and to walk it in inverse order.  Inverse order for
// a function is considered to be when traversing the predecessor edges of a BB
// instead of the successor edges.
//
template <> struct GraphTraits<Inverse<BasicBlock*>> {
  using NodeRef = BasicBlock *;
  using ChildIteratorType = pred_iterator;

  static NodeRef getEntryNode(Inverse<BasicBlock *> G) { return G.Graph; }
  static ChildIteratorType child_begin(NodeRef N) { return pred_begin(N); }
  static ChildIteratorType child_end(NodeRef N) { return pred_end(N); }
};

template <> struct GraphTraits<Inverse<const BasicBlock*>> {
  using NodeRef = const BasicBlock *;
  using ChildIteratorType = const_pred_iterator;

  static NodeRef getEntryNode(Inverse<const BasicBlock *> G) { return G.Graph; }
  static ChildIteratorType child_begin(NodeRef N) { return pred_begin(N); }
  static ChildIteratorType child_end(NodeRef N) { return pred_end(N); }
};

//===--------------------------------------------------------------------===//
// GraphTraits specializations for function basic block graphs (CFGs)
//===--------------------------------------------------------------------===//

// Provide specializations of GraphTraits to be able to treat a function as a
// graph of basic blocks... these are the same as the basic block iterators,
// except that the root node is implicitly the first node of the function.
//
template <> struct GraphTraits<Function*> : public GraphTraits<BasicBlock*> {
  static NodeRef getEntryNode(Function *F) { return &F->getEntryBlock(); }

  // nodes_iterator/begin/end - Allow iteration over all nodes in the graph
  using nodes_iterator = pointer_iterator<Function::iterator>;

  static nodes_iterator nodes_begin(Function *F) {
    return nodes_iterator(F->begin());
  }

  static nodes_iterator nodes_end(Function *F) {
    return nodes_iterator(F->end());
  }

  static size_t size(Function *F) { return F->size(); }
};
template <> struct GraphTraits<const Function*> :
  public GraphTraits<const BasicBlock*> {
  static NodeRef getEntryNode(const Function *F) { return &F->getEntryBlock(); }

  // nodes_iterator/begin/end - Allow iteration over all nodes in the graph
  using nodes_iterator = pointer_iterator<Function::const_iterator>;

  static nodes_iterator nodes_begin(const Function *F) {
    return nodes_iterator(F->begin());
  }

  static nodes_iterator nodes_end(const Function *F) {
    return nodes_iterator(F->end());
  }

  static size_t size(const Function *F) { return F->size(); }
};

// Provide specializations of GraphTraits to be able to treat a function as a
// graph of basic blocks... and to walk it in inverse order.  Inverse order for
// a function is considered to be when traversing the predecessor edges of a BB
// instead of the successor edges.
//
template <> struct GraphTraits<Inverse<Function*>> :
  public GraphTraits<Inverse<BasicBlock*>> {
  static NodeRef getEntryNode(Inverse<Function *> G) {
    return &G.Graph->getEntryBlock();
  }
};
template <> struct GraphTraits<Inverse<const Function*>> :
  public GraphTraits<Inverse<const BasicBlock*>> {
  static NodeRef getEntryNode(Inverse<const Function *> G) {
    return &G.Graph->getEntryBlock();
  }
};

} // end namespace llvm

#endif // LLVM_IR_CFG_H
PKjwFZV�:�/
/
IR/PredIteratorCache.hnu�[���//===- PredIteratorCache.h - pred_iterator Cache ----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the PredIteratorCache class.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_PREDITERATORCACHE_H
#define LLVM_IR_PREDITERATORCACHE_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/CFG.h"
#include "llvm/Support/Allocator.h"

namespace llvm {

/// PredIteratorCache - This class is an extremely trivial cache for
/// predecessor iterator queries.  This is useful for code that repeatedly
/// wants the predecessor list for the same blocks.
class PredIteratorCache {
  /// BlockToPredsMap - Pointer to null-terminated list.
  mutable DenseMap<BasicBlock *, BasicBlock **> BlockToPredsMap;
  mutable DenseMap<BasicBlock *, unsigned> BlockToPredCountMap;

  /// Memory - This is the space that holds cached preds.
  BumpPtrAllocator Memory;

private:
  /// GetPreds - Get a cached list for the null-terminated predecessor list of
  /// the specified block.  This can be used in a loop like this:
  ///   for (BasicBlock **PI = PredCache->GetPreds(BB); *PI; ++PI)
  ///      use(*PI);
  /// instead of:
  /// for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI)
  BasicBlock **GetPreds(BasicBlock *BB) {
    BasicBlock **&Entry = BlockToPredsMap[BB];
    if (Entry)
      return Entry;

    SmallVector<BasicBlock *, 32> PredCache(predecessors(BB));
    PredCache.push_back(nullptr); // null terminator.

    BlockToPredCountMap[BB] = PredCache.size() - 1;

    Entry = Memory.Allocate<BasicBlock *>(PredCache.size());
    std::copy(PredCache.begin(), PredCache.end(), Entry);
    return Entry;
  }

  unsigned GetNumPreds(BasicBlock *BB) const {
    auto Result = BlockToPredCountMap.find(BB);
    if (Result != BlockToPredCountMap.end())
      return Result->second;
    return BlockToPredCountMap[BB] = pred_size(BB);
  }

public:
  size_t size(BasicBlock *BB) const { return GetNumPreds(BB); }
  ArrayRef<BasicBlock *> get(BasicBlock *BB) {
    return ArrayRef(GetPreds(BB), GetNumPreds(BB));
  }

  /// clear - Remove all information.
  void clear() {
    BlockToPredsMap.clear();
    BlockToPredCountMap.clear();
    Memory.Reset();
  }
};

} // end namespace llvm

#endif
PKjwFZ^��D�DIR/InlineAsm.hnu�[���//===- llvm/InlineAsm.h - Class to represent inline asm strings -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This class represents the inline asm strings, which are Value*'s that are
// used as the callee operand of call instructions.  InlineAsm's are uniqued
// like constants, and created via InlineAsm::get(...).
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_INLINEASM_H
#define LLVM_IR_INLINEASM_H

#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/ErrorHandling.h"
#include <cassert>
#include <string>
#include <vector>

namespace llvm {

class Error;
class FunctionType;
class PointerType;
template <class ConstantClass> class ConstantUniqueMap;

class InlineAsm final : public Value {
public:
  enum AsmDialect {
    AD_ATT,
    AD_Intel
  };

private:
  friend struct InlineAsmKeyType;
  friend class ConstantUniqueMap<InlineAsm>;

  std::string AsmString, Constraints;
  FunctionType *FTy;
  bool HasSideEffects;
  bool IsAlignStack;
  AsmDialect Dialect;
  bool CanThrow;

  InlineAsm(FunctionType *Ty, const std::string &AsmString,
            const std::string &Constraints, bool hasSideEffects,
            bool isAlignStack, AsmDialect asmDialect, bool canThrow);

  /// When the ConstantUniqueMap merges two types and makes two InlineAsms
  /// identical, it destroys one of them with this method.
  void destroyConstant();

public:
  InlineAsm(const InlineAsm &) = delete;
  InlineAsm &operator=(const InlineAsm &) = delete;

  /// InlineAsm::get - Return the specified uniqued inline asm string.
  ///
  static InlineAsm *get(FunctionType *Ty, StringRef AsmString,
                        StringRef Constraints, bool hasSideEffects,
                        bool isAlignStack = false,
                        AsmDialect asmDialect = AD_ATT, bool canThrow = false);

  bool hasSideEffects() const { return HasSideEffects; }
  bool isAlignStack() const { return IsAlignStack; }
  AsmDialect getDialect() const { return Dialect; }
  bool canThrow() const { return CanThrow; }

  /// getType - InlineAsm's are always pointers.
  ///
  PointerType *getType() const {
    return reinterpret_cast<PointerType*>(Value::getType());
  }

  /// getFunctionType - InlineAsm's are always pointers to functions.
  ///
  FunctionType *getFunctionType() const;

  const std::string &getAsmString() const { return AsmString; }
  const std::string &getConstraintString() const { return Constraints; }
  void collectAsmStrs(SmallVectorImpl<StringRef> &AsmStrs) const;

  /// This static method can be used by the parser to check to see if the
  /// specified constraint string is legal for the type.
  static Error verify(FunctionType *Ty, StringRef Constraints);

  // Constraint String Parsing
  enum ConstraintPrefix {
    isInput,            // 'x'
    isOutput,           // '=x'
    isClobber,          // '~x'
    isLabel,            // '!x'
  };

  using ConstraintCodeVector = std::vector<std::string>;

  struct SubConstraintInfo {
    /// MatchingInput - If this is not -1, this is an output constraint where an
    /// input constraint is required to match it (e.g. "0").  The value is the
    /// constraint number that matches this one (for example, if this is
    /// constraint #0 and constraint #4 has the value "0", this will be 4).
    int MatchingInput = -1;

    /// Code - The constraint code, either the register name (in braces) or the
    /// constraint letter/number.
    ConstraintCodeVector Codes;

    /// Default constructor.
    SubConstraintInfo() = default;
  };

  using SubConstraintInfoVector = std::vector<SubConstraintInfo>;
  struct ConstraintInfo;
  using ConstraintInfoVector = std::vector<ConstraintInfo>;

  struct ConstraintInfo {
    /// Type - The basic type of the constraint: input/output/clobber/label
    ///
    ConstraintPrefix Type = isInput;

    /// isEarlyClobber - "&": output operand writes result before inputs are all
    /// read.  This is only ever set for an output operand.
    bool isEarlyClobber = false;

    /// MatchingInput - If this is not -1, this is an output constraint where an
    /// input constraint is required to match it (e.g. "0").  The value is the
    /// constraint number that matches this one (for example, if this is
    /// constraint #0 and constraint #4 has the value "0", this will be 4).
    int MatchingInput = -1;

    /// hasMatchingInput - Return true if this is an output constraint that has
    /// a matching input constraint.
    bool hasMatchingInput() const { return MatchingInput != -1; }

    /// isCommutative - This is set to true for a constraint that is commutative
    /// with the next operand.
    bool isCommutative = false;

    /// isIndirect - True if this operand is an indirect operand.  This means
    /// that the address of the source or destination is present in the call
    /// instruction, instead of it being returned or passed in explicitly.  This
    /// is represented with a '*' in the asm string.
    bool isIndirect = false;

    /// Code - The constraint code, either the register name (in braces) or the
    /// constraint letter/number.
    ConstraintCodeVector Codes;

    /// isMultipleAlternative - '|': has multiple-alternative constraints.
    bool isMultipleAlternative = false;

    /// multipleAlternatives - If there are multiple alternative constraints,
    /// this array will contain them.  Otherwise it will be empty.
    SubConstraintInfoVector multipleAlternatives;

    /// The currently selected alternative constraint index.
    unsigned currentAlternativeIndex = 0;

    /// Default constructor.
    ConstraintInfo() = default;

    /// Parse - Analyze the specified string (e.g. "=*&{eax}") and fill in the
    /// fields in this structure.  If the constraint string is not understood,
    /// return true, otherwise return false.
    bool Parse(StringRef Str, ConstraintInfoVector &ConstraintsSoFar);

    /// selectAlternative - Point this constraint to the alternative constraint
    /// indicated by the index.
    void selectAlternative(unsigned index);

    /// Whether this constraint corresponds to an argument.
    bool hasArg() const {
      return Type == isInput || (Type == isOutput && isIndirect);
    }
  };

  /// ParseConstraints - Split up the constraint string into the specific
  /// constraints and their prefixes.  If this returns an empty vector, and if
  /// the constraint string itself isn't empty, there was an error parsing.
  static ConstraintInfoVector ParseConstraints(StringRef ConstraintString);

  /// ParseConstraints - Parse the constraints of this inlineasm object,
  /// returning them the same way that ParseConstraints(str) does.
  ConstraintInfoVector ParseConstraints() const {
    return ParseConstraints(Constraints);
  }

  // Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Value *V) {
    return V->getValueID() == Value::InlineAsmVal;
  }

  // These are helper methods for dealing with flags in the INLINEASM SDNode
  // in the backend.
  //
  // The encoding of the flag word is currently:
  //   Bits 2-0 - A Kind_* value indicating the kind of the operand.
  //   Bits 15-3 - The number of SDNode operands associated with this inline
  //               assembly operand.
  //   If bit 31 is set:
  //     Bit 30-16 - The operand number that this operand must match.
  //                 When bits 2-0 are Kind_Mem, the Constraint_* value must be
  //                 obtained from the flags for this operand number.
  //   Else if bits 2-0 are Kind_Mem:
  //     Bit 30-16 - A Constraint_* value indicating the original constraint
  //                 code.
  //   Else:
  //     Bit 30-16 - The register class ID to use for the operand.

  enum : uint32_t {
    // Fixed operands on an INLINEASM SDNode.
    Op_InputChain = 0,
    Op_AsmString = 1,
    Op_MDNode = 2,
    Op_ExtraInfo = 3,    // HasSideEffects, IsAlignStack, AsmDialect.
    Op_FirstOperand = 4,

    // Fixed operands on an INLINEASM MachineInstr.
    MIOp_AsmString = 0,
    MIOp_ExtraInfo = 1,    // HasSideEffects, IsAlignStack, AsmDialect.
    MIOp_FirstOperand = 2,

    // Interpretation of the MIOp_ExtraInfo bit field.
    Extra_HasSideEffects = 1,
    Extra_IsAlignStack = 2,
    Extra_AsmDialect = 4,
    Extra_MayLoad = 8,
    Extra_MayStore = 16,
    Extra_IsConvergent = 32,

    // Inline asm operands map to multiple SDNode / MachineInstr operands.
    // The first operand is an immediate describing the asm operand, the low
    // bits is the kind:
    Kind_RegUse = 1,             // Input register, "r".
    Kind_RegDef = 2,             // Output register, "=r".
    Kind_RegDefEarlyClobber = 3, // Early-clobber output register, "=&r".
    Kind_Clobber = 4,            // Clobbered register, "~r".
    Kind_Imm = 5,                // Immediate.
    Kind_Mem = 6,                // Memory operand, "m", or an address, "p".
    Kind_Func = 7,               // Address operand of function call

    // Memory constraint codes.
    // These could be tablegenerated but there's little need to do that since
    // there's plenty of space in the encoding to support the union of all
    // constraint codes for all targets.
    // Addresses are included here as they need to be treated the same by the
    // backend, the only difference is that they are not used to actaully
    // access memory by the instruction.
    Constraint_Unknown = 0,
    Constraint_es,
    Constraint_i,
    Constraint_k,
    Constraint_m,
    Constraint_o,
    Constraint_v,
    Constraint_A,
    Constraint_Q,
    Constraint_R,
    Constraint_S,
    Constraint_T,
    Constraint_Um,
    Constraint_Un,
    Constraint_Uq,
    Constraint_Us,
    Constraint_Ut,
    Constraint_Uv,
    Constraint_Uy,
    Constraint_X,
    Constraint_Z,
    Constraint_ZB,
    Constraint_ZC,
    Constraint_Zy,

    // Address constraints
    Constraint_p,
    Constraint_ZQ,
    Constraint_ZR,
    Constraint_ZS,
    Constraint_ZT,

    Constraints_Max = Constraint_ZT,
    Constraints_ShiftAmount = 16,

    Flag_MatchingOperand = 0x80000000
  };

  static unsigned getFlagWord(unsigned Kind, unsigned NumOps) {
    assert(((NumOps << 3) & ~0xffff) == 0 && "Too many inline asm operands!");
    assert(Kind >= Kind_RegUse && Kind <= Kind_Func && "Invalid Kind");
    return Kind | (NumOps << 3);
  }

  static bool isRegDefKind(unsigned Flag){ return getKind(Flag) == Kind_RegDef;}
  static bool isImmKind(unsigned Flag) { return getKind(Flag) == Kind_Imm; }
  static bool isMemKind(unsigned Flag) { return getKind(Flag) == Kind_Mem; }
  static bool isFuncKind(unsigned Flag) { return getKind(Flag) == Kind_Func; }
  static bool isRegDefEarlyClobberKind(unsigned Flag) {
    return getKind(Flag) == Kind_RegDefEarlyClobber;
  }
  static bool isClobberKind(unsigned Flag) {
    return getKind(Flag) == Kind_Clobber;
  }

  /// getFlagWordForMatchingOp - Augment an existing flag word returned by
  /// getFlagWord with information indicating that this input operand is tied
  /// to a previous output operand.
  static unsigned getFlagWordForMatchingOp(unsigned InputFlag,
                                           unsigned MatchedOperandNo) {
    assert(MatchedOperandNo <= 0x7fff && "Too big matched operand");
    assert((InputFlag & ~0xffff) == 0 && "High bits already contain data");
    return InputFlag | Flag_MatchingOperand | (MatchedOperandNo << 16);
  }

  /// getFlagWordForRegClass - Augment an existing flag word returned by
  /// getFlagWord with the required register class for the following register
  /// operands.
  /// A tied use operand cannot have a register class, use the register class
  /// from the def operand instead.
  static unsigned getFlagWordForRegClass(unsigned InputFlag, unsigned RC) {
    // Store RC + 1, reserve the value 0 to mean 'no register class'.
    ++RC;
    assert(!isImmKind(InputFlag) && "Immediates cannot have a register class");
    assert(!isMemKind(InputFlag) && "Memory operand cannot have a register class");
    assert(RC <= 0x7fff && "Too large register class ID");
    assert((InputFlag & ~0xffff) == 0 && "High bits already contain data");
    return InputFlag | (RC << 16);
  }

  /// Augment an existing flag word returned by getFlagWord with the constraint
  /// code for a memory constraint.
  static unsigned getFlagWordForMem(unsigned InputFlag, unsigned Constraint) {
    assert((isMemKind(InputFlag) || isFuncKind(InputFlag)) &&
           "InputFlag is not a memory (include function) constraint!");
    assert(Constraint <= 0x7fff && "Too large a memory constraint ID");
    assert(Constraint <= Constraints_Max && "Unknown constraint ID");
    assert((InputFlag & ~0xffff) == 0 && "High bits already contain data");
    return InputFlag | (Constraint << Constraints_ShiftAmount);
  }

  static unsigned convertMemFlagWordToMatchingFlagWord(unsigned InputFlag) {
    assert(isMemKind(InputFlag));
    return InputFlag & ~(0x7fff << Constraints_ShiftAmount);
  }

  static unsigned getKind(unsigned Flags) {
    return Flags & 7;
  }

  static unsigned getMemoryConstraintID(unsigned Flag) {
    assert((isMemKind(Flag) || isFuncKind(Flag)) &&
           "Not expected mem or function flang!");
    return (Flag >> Constraints_ShiftAmount) & 0x7fff;
  }

  /// getNumOperandRegisters - Extract the number of registers field from the
  /// inline asm operand flag.
  static unsigned getNumOperandRegisters(unsigned Flag) {
    return (Flag & 0xffff) >> 3;
  }

  /// isUseOperandTiedToDef - Return true if the flag of the inline asm
  /// operand indicates it is an use operand that's matched to a def operand.
  static bool isUseOperandTiedToDef(unsigned Flag, unsigned &Idx) {
    if ((Flag & Flag_MatchingOperand) == 0)
      return false;
    Idx = (Flag & ~Flag_MatchingOperand) >> 16;
    return true;
  }

  /// hasRegClassConstraint - Returns true if the flag contains a register
  /// class constraint.  Sets RC to the register class ID.
  static bool hasRegClassConstraint(unsigned Flag, unsigned &RC) {
    if (Flag & Flag_MatchingOperand)
      return false;
    unsigned High = Flag >> 16;
    // getFlagWordForRegClass() uses 0 to mean no register class, and otherwise
    // stores RC + 1.
    if (!High)
      return false;
    RC = High - 1;
    return true;
  }

  static std::vector<StringRef> getExtraInfoNames(unsigned ExtraInfo) {
    std::vector<StringRef> Result;
    if (ExtraInfo & InlineAsm::Extra_HasSideEffects)
      Result.push_back("sideeffect");
    if (ExtraInfo & InlineAsm::Extra_MayLoad)
      Result.push_back("mayload");
    if (ExtraInfo & InlineAsm::Extra_MayStore)
      Result.push_back("maystore");
    if (ExtraInfo & InlineAsm::Extra_IsConvergent)
      Result.push_back("isconvergent");
    if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
      Result.push_back("alignstack");

    AsmDialect Dialect =
        InlineAsm::AsmDialect((ExtraInfo & InlineAsm::Extra_AsmDialect));

    if (Dialect == InlineAsm::AD_ATT)
      Result.push_back("attdialect");
    if (Dialect == InlineAsm::AD_Intel)
      Result.push_back("inteldialect");

    return Result;
  }

  static StringRef getKindName(unsigned Kind) {
    switch (Kind) {
    case InlineAsm::Kind_RegUse:
      return "reguse";
    case InlineAsm::Kind_RegDef:
      return "regdef";
    case InlineAsm::Kind_RegDefEarlyClobber:
      return "regdef-ec";
    case InlineAsm::Kind_Clobber:
      return "clobber";
    case InlineAsm::Kind_Imm:
      return "imm";
    case InlineAsm::Kind_Mem:
    case InlineAsm::Kind_Func:
      return "mem";
    default:
      llvm_unreachable("Unknown operand kind");
    }
  }

  static StringRef getMemConstraintName(unsigned Constraint) {
    switch (Constraint) {
    case InlineAsm::Constraint_es:
      return "es";
    case InlineAsm::Constraint_i:
      return "i";
    case InlineAsm::Constraint_k:
      return "k";
    case InlineAsm::Constraint_m:
      return "m";
    case InlineAsm::Constraint_o:
      return "o";
    case InlineAsm::Constraint_v:
      return "v";
    case InlineAsm::Constraint_Q:
      return "Q";
    case InlineAsm::Constraint_R:
      return "R";
    case InlineAsm::Constraint_S:
      return "S";
    case InlineAsm::Constraint_T:
      return "T";
    case InlineAsm::Constraint_Um:
      return "Um";
    case InlineAsm::Constraint_Un:
      return "Un";
    case InlineAsm::Constraint_Uq:
      return "Uq";
    case InlineAsm::Constraint_Us:
      return "Us";
    case InlineAsm::Constraint_Ut:
      return "Ut";
    case InlineAsm::Constraint_Uv:
      return "Uv";
    case InlineAsm::Constraint_Uy:
      return "Uy";
    case InlineAsm::Constraint_X:
      return "X";
    case InlineAsm::Constraint_Z:
      return "Z";
    case InlineAsm::Constraint_ZB:
      return "ZB";
    case InlineAsm::Constraint_ZC:
      return "ZC";
    case InlineAsm::Constraint_Zy:
      return "Zy";
    case InlineAsm::Constraint_p:
      return "p";
    case InlineAsm::Constraint_ZQ:
      return "ZQ";
    case InlineAsm::Constraint_ZR:
      return "ZR";
    case InlineAsm::Constraint_ZS:
      return "ZS";
    case InlineAsm::Constraint_ZT:
      return "ZT";
    default:
      llvm_unreachable("Unknown memory constraint");
    }
  }
};

} // end namespace llvm

#endif // LLVM_IR_INLINEASM_H
PKjwFZޣ�5�j�jIR/RuntimeLibcalls.defnu�[���//===-- llvm/RuntimeLibcalls.def - File that describes libcalls -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines all of the runtime library calls the backend can emit.
// The various long double types cannot be merged, because 80-bit library
// functions use "xf" and 128-bit use "tf".
//
// When adding PPCF128 functions here, note that their names generally need
// to be overridden for Darwin with the xxx$LDBL128 form.  See
// PPCISelLowering.cpp.
//
//===----------------------------------------------------------------------===//

// NOTE: NO INCLUDE GUARD DESIRED!

// Provide definitions of macros so that users of this file do not have to
// define everything to use it...

// Declare the enumerator for each libcall, along with its default name. Some
// libcalls have different names on particular OSes or architectures. These
// are set in InitLibcalls() in TargetLoweringBase.cpp and/or by targets
// using TargetLoweringBase::setLibcallName()
#ifndef HANDLE_LIBCALL
#error "HANDLE_LIBCALL must be defined"
#endif

// Integer
HANDLE_LIBCALL(SHL_I16, "__ashlhi3")
HANDLE_LIBCALL(SHL_I32, "__ashlsi3")
HANDLE_LIBCALL(SHL_I64, "__ashldi3")
HANDLE_LIBCALL(SHL_I128, "__ashlti3")
HANDLE_LIBCALL(SRL_I16, "__lshrhi3")
HANDLE_LIBCALL(SRL_I32, "__lshrsi3")
HANDLE_LIBCALL(SRL_I64, "__lshrdi3")
HANDLE_LIBCALL(SRL_I128, "__lshrti3")
HANDLE_LIBCALL(SRA_I16, "__ashrhi3")
HANDLE_LIBCALL(SRA_I32, "__ashrsi3")
HANDLE_LIBCALL(SRA_I64, "__ashrdi3")
HANDLE_LIBCALL(SRA_I128, "__ashrti3")
HANDLE_LIBCALL(MUL_I8, "__mulqi3")
HANDLE_LIBCALL(MUL_I16, "__mulhi3")
HANDLE_LIBCALL(MUL_I32, "__mulsi3")
HANDLE_LIBCALL(MUL_I64, "__muldi3")
HANDLE_LIBCALL(MUL_I128, "__multi3")
HANDLE_LIBCALL(MULO_I32, "__mulosi4")
HANDLE_LIBCALL(MULO_I64, "__mulodi4")
HANDLE_LIBCALL(MULO_I128, "__muloti4")
HANDLE_LIBCALL(SDIV_I8, "__divqi3")
HANDLE_LIBCALL(SDIV_I16, "__divhi3")
HANDLE_LIBCALL(SDIV_I32, "__divsi3")
HANDLE_LIBCALL(SDIV_I64, "__divdi3")
HANDLE_LIBCALL(SDIV_I128, "__divti3")
HANDLE_LIBCALL(UDIV_I8, "__udivqi3")
HANDLE_LIBCALL(UDIV_I16, "__udivhi3")
HANDLE_LIBCALL(UDIV_I32, "__udivsi3")
HANDLE_LIBCALL(UDIV_I64, "__udivdi3")
HANDLE_LIBCALL(UDIV_I128, "__udivti3")
HANDLE_LIBCALL(SREM_I8, "__modqi3")
HANDLE_LIBCALL(SREM_I16, "__modhi3")
HANDLE_LIBCALL(SREM_I32, "__modsi3")
HANDLE_LIBCALL(SREM_I64, "__moddi3")
HANDLE_LIBCALL(SREM_I128, "__modti3")
HANDLE_LIBCALL(UREM_I8, "__umodqi3")
HANDLE_LIBCALL(UREM_I16, "__umodhi3")
HANDLE_LIBCALL(UREM_I32, "__umodsi3")
HANDLE_LIBCALL(UREM_I64, "__umoddi3")
HANDLE_LIBCALL(UREM_I128, "__umodti3")
HANDLE_LIBCALL(SDIVREM_I8, nullptr)
HANDLE_LIBCALL(SDIVREM_I16, nullptr)
HANDLE_LIBCALL(SDIVREM_I32, nullptr)
HANDLE_LIBCALL(SDIVREM_I64, nullptr)
HANDLE_LIBCALL(SDIVREM_I128, nullptr)
HANDLE_LIBCALL(UDIVREM_I8, nullptr)
HANDLE_LIBCALL(UDIVREM_I16, nullptr)
HANDLE_LIBCALL(UDIVREM_I32, nullptr)
HANDLE_LIBCALL(UDIVREM_I64, nullptr)
HANDLE_LIBCALL(UDIVREM_I128, nullptr)
HANDLE_LIBCALL(NEG_I32, "__negsi2")
HANDLE_LIBCALL(NEG_I64, "__negdi2")
HANDLE_LIBCALL(CTLZ_I32, "__clzsi2")
HANDLE_LIBCALL(CTLZ_I64, "__clzdi2")
HANDLE_LIBCALL(CTLZ_I128, "__clzti2")

// Floating-point
HANDLE_LIBCALL(ADD_F32, "__addsf3")
HANDLE_LIBCALL(ADD_F64, "__adddf3")
HANDLE_LIBCALL(ADD_F80, "__addxf3")
HANDLE_LIBCALL(ADD_F128, "__addtf3")
HANDLE_LIBCALL(ADD_PPCF128, "__gcc_qadd")
HANDLE_LIBCALL(SUB_F32, "__subsf3")
HANDLE_LIBCALL(SUB_F64, "__subdf3")
HANDLE_LIBCALL(SUB_F80, "__subxf3")
HANDLE_LIBCALL(SUB_F128, "__subtf3")
HANDLE_LIBCALL(SUB_PPCF128, "__gcc_qsub")
HANDLE_LIBCALL(MUL_F32, "__mulsf3")
HANDLE_LIBCALL(MUL_F64, "__muldf3")
HANDLE_LIBCALL(MUL_F80, "__mulxf3")
HANDLE_LIBCALL(MUL_F128, "__multf3")
HANDLE_LIBCALL(MUL_PPCF128, "__gcc_qmul")
HANDLE_LIBCALL(DIV_F32, "__divsf3")
HANDLE_LIBCALL(DIV_F64, "__divdf3")
HANDLE_LIBCALL(DIV_F80, "__divxf3")
HANDLE_LIBCALL(DIV_F128, "__divtf3")
HANDLE_LIBCALL(DIV_PPCF128, "__gcc_qdiv")
HANDLE_LIBCALL(REM_F32, "fmodf")
HANDLE_LIBCALL(REM_F64, "fmod")
HANDLE_LIBCALL(REM_F80, "fmodl")
HANDLE_LIBCALL(REM_F128, "fmodl")
HANDLE_LIBCALL(REM_PPCF128, "fmodl")
HANDLE_LIBCALL(FMA_F32, "fmaf")
HANDLE_LIBCALL(FMA_F64, "fma")
HANDLE_LIBCALL(FMA_F80, "fmal")
HANDLE_LIBCALL(FMA_F128, "fmal")
HANDLE_LIBCALL(FMA_PPCF128, "fmal")
HANDLE_LIBCALL(POWI_F32, "__powisf2")
HANDLE_LIBCALL(POWI_F64, "__powidf2")
HANDLE_LIBCALL(POWI_F80, "__powixf2")
HANDLE_LIBCALL(POWI_F128, "__powitf2")
HANDLE_LIBCALL(POWI_PPCF128, "__powitf2")
HANDLE_LIBCALL(SQRT_F32, "sqrtf")
HANDLE_LIBCALL(SQRT_F64, "sqrt")
HANDLE_LIBCALL(SQRT_F80, "sqrtl")
HANDLE_LIBCALL(SQRT_F128, "sqrtl")
HANDLE_LIBCALL(SQRT_PPCF128, "sqrtl")
HANDLE_LIBCALL(CBRT_F32, "cbrtf")
HANDLE_LIBCALL(CBRT_F64, "cbrt")
HANDLE_LIBCALL(CBRT_F80, "cbrtl")
HANDLE_LIBCALL(CBRT_F128, "cbrtl")
HANDLE_LIBCALL(CBRT_PPCF128, "cbrtl")
HANDLE_LIBCALL(LOG_F32, "logf")
HANDLE_LIBCALL(LOG_F64, "log")
HANDLE_LIBCALL(LOG_F80, "logl")
HANDLE_LIBCALL(LOG_F128, "logl")
HANDLE_LIBCALL(LOG_PPCF128, "logl")
HANDLE_LIBCALL(LOG_FINITE_F32, "__logf_finite")
HANDLE_LIBCALL(LOG_FINITE_F64, "__log_finite")
HANDLE_LIBCALL(LOG_FINITE_F80, "__logl_finite")
HANDLE_LIBCALL(LOG_FINITE_F128, "__logl_finite")
HANDLE_LIBCALL(LOG_FINITE_PPCF128, "__logl_finite")
HANDLE_LIBCALL(LOG2_F32, "log2f")
HANDLE_LIBCALL(LOG2_F64, "log2")
HANDLE_LIBCALL(LOG2_F80, "log2l")
HANDLE_LIBCALL(LOG2_F128, "log2l")
HANDLE_LIBCALL(LOG2_PPCF128, "log2l")
HANDLE_LIBCALL(LOG2_FINITE_F32, "__log2f_finite")
HANDLE_LIBCALL(LOG2_FINITE_F64, "__log2_finite")
HANDLE_LIBCALL(LOG2_FINITE_F80, "__log2l_finite")
HANDLE_LIBCALL(LOG2_FINITE_F128, "__log2l_finite")
HANDLE_LIBCALL(LOG2_FINITE_PPCF128, "__log2l_finite")
HANDLE_LIBCALL(LOG10_F32, "log10f")
HANDLE_LIBCALL(LOG10_F64, "log10")
HANDLE_LIBCALL(LOG10_F80, "log10l")
HANDLE_LIBCALL(LOG10_F128, "log10l")
HANDLE_LIBCALL(LOG10_PPCF128, "log10l")
HANDLE_LIBCALL(LOG10_FINITE_F32, "__log10f_finite")
HANDLE_LIBCALL(LOG10_FINITE_F64, "__log10_finite")
HANDLE_LIBCALL(LOG10_FINITE_F80, "__log10l_finite")
HANDLE_LIBCALL(LOG10_FINITE_F128, "__log10l_finite")
HANDLE_LIBCALL(LOG10_FINITE_PPCF128, "__log10l_finite")
HANDLE_LIBCALL(EXP_F32, "expf")
HANDLE_LIBCALL(EXP_F64, "exp")
HANDLE_LIBCALL(EXP_F80, "expl")
HANDLE_LIBCALL(EXP_F128, "expl")
HANDLE_LIBCALL(EXP_PPCF128, "expl")
HANDLE_LIBCALL(EXP_FINITE_F32, "__expf_finite")
HANDLE_LIBCALL(EXP_FINITE_F64, "__exp_finite")
HANDLE_LIBCALL(EXP_FINITE_F80, "__expl_finite")
HANDLE_LIBCALL(EXP_FINITE_F128, "__expl_finite")
HANDLE_LIBCALL(EXP_FINITE_PPCF128, "__expl_finite")
HANDLE_LIBCALL(EXP2_F32, "exp2f")
HANDLE_LIBCALL(EXP2_F64, "exp2")
HANDLE_LIBCALL(EXP2_F80, "exp2l")
HANDLE_LIBCALL(EXP2_F128, "exp2l")
HANDLE_LIBCALL(EXP2_PPCF128, "exp2l")
HANDLE_LIBCALL(EXP2_FINITE_F32, "__exp2f_finite")
HANDLE_LIBCALL(EXP2_FINITE_F64, "__exp2_finite")
HANDLE_LIBCALL(EXP2_FINITE_F80, "__exp2l_finite")
HANDLE_LIBCALL(EXP2_FINITE_F128, "__exp2l_finite")
HANDLE_LIBCALL(EXP2_FINITE_PPCF128, "__exp2l_finite")
HANDLE_LIBCALL(SIN_F32, "sinf")
HANDLE_LIBCALL(SIN_F64, "sin")
HANDLE_LIBCALL(SIN_F80, "sinl")
HANDLE_LIBCALL(SIN_F128, "sinl")
HANDLE_LIBCALL(SIN_PPCF128, "sinl")
HANDLE_LIBCALL(COS_F32, "cosf")
HANDLE_LIBCALL(COS_F64, "cos")
HANDLE_LIBCALL(COS_F80, "cosl")
HANDLE_LIBCALL(COS_F128, "cosl")
HANDLE_LIBCALL(COS_PPCF128, "cosl")
HANDLE_LIBCALL(SINCOS_F32, nullptr)
HANDLE_LIBCALL(SINCOS_F64, nullptr)
HANDLE_LIBCALL(SINCOS_F80, nullptr)
HANDLE_LIBCALL(SINCOS_F128, nullptr)
HANDLE_LIBCALL(SINCOS_PPCF128, nullptr)
HANDLE_LIBCALL(SINCOS_STRET_F32, nullptr)
HANDLE_LIBCALL(SINCOS_STRET_F64, nullptr)
HANDLE_LIBCALL(POW_F32, "powf")
HANDLE_LIBCALL(POW_F64, "pow")
HANDLE_LIBCALL(POW_F80, "powl")
HANDLE_LIBCALL(POW_F128, "powl")
HANDLE_LIBCALL(POW_PPCF128, "powl")
HANDLE_LIBCALL(POW_FINITE_F32, "__powf_finite")
HANDLE_LIBCALL(POW_FINITE_F64, "__pow_finite")
HANDLE_LIBCALL(POW_FINITE_F80, "__powl_finite")
HANDLE_LIBCALL(POW_FINITE_F128, "__powl_finite")
HANDLE_LIBCALL(POW_FINITE_PPCF128, "__powl_finite")
HANDLE_LIBCALL(CEIL_F32, "ceilf")
HANDLE_LIBCALL(CEIL_F64, "ceil")
HANDLE_LIBCALL(CEIL_F80, "ceill")
HANDLE_LIBCALL(CEIL_F128, "ceill")
HANDLE_LIBCALL(CEIL_PPCF128, "ceill")
HANDLE_LIBCALL(TRUNC_F32, "truncf")
HANDLE_LIBCALL(TRUNC_F64, "trunc")
HANDLE_LIBCALL(TRUNC_F80, "truncl")
HANDLE_LIBCALL(TRUNC_F128, "truncl")
HANDLE_LIBCALL(TRUNC_PPCF128, "truncl")
HANDLE_LIBCALL(RINT_F32, "rintf")
HANDLE_LIBCALL(RINT_F64, "rint")
HANDLE_LIBCALL(RINT_F80, "rintl")
HANDLE_LIBCALL(RINT_F128, "rintl")
HANDLE_LIBCALL(RINT_PPCF128, "rintl")
HANDLE_LIBCALL(NEARBYINT_F32, "nearbyintf")
HANDLE_LIBCALL(NEARBYINT_F64, "nearbyint")
HANDLE_LIBCALL(NEARBYINT_F80, "nearbyintl")
HANDLE_LIBCALL(NEARBYINT_F128, "nearbyintl")
HANDLE_LIBCALL(NEARBYINT_PPCF128, "nearbyintl")
HANDLE_LIBCALL(ROUND_F32, "roundf")
HANDLE_LIBCALL(ROUND_F64, "round")
HANDLE_LIBCALL(ROUND_F80, "roundl")
HANDLE_LIBCALL(ROUND_F128, "roundl")
HANDLE_LIBCALL(ROUND_PPCF128, "roundl")
HANDLE_LIBCALL(ROUNDEVEN_F32, "roundevenf")
HANDLE_LIBCALL(ROUNDEVEN_F64, "roundeven")
HANDLE_LIBCALL(ROUNDEVEN_F80, "roundevenl")
HANDLE_LIBCALL(ROUNDEVEN_F128, "roundevenl")
HANDLE_LIBCALL(ROUNDEVEN_PPCF128, "roundevenl")
HANDLE_LIBCALL(FLOOR_F32, "floorf")
HANDLE_LIBCALL(FLOOR_F64, "floor")
HANDLE_LIBCALL(FLOOR_F80, "floorl")
HANDLE_LIBCALL(FLOOR_F128, "floorl")
HANDLE_LIBCALL(FLOOR_PPCF128, "floorl")
HANDLE_LIBCALL(COPYSIGN_F32, "copysignf")
HANDLE_LIBCALL(COPYSIGN_F64, "copysign")
HANDLE_LIBCALL(COPYSIGN_F80, "copysignl")
HANDLE_LIBCALL(COPYSIGN_F128, "copysignl")
HANDLE_LIBCALL(COPYSIGN_PPCF128, "copysignl")
HANDLE_LIBCALL(FMIN_F32, "fminf")
HANDLE_LIBCALL(FMIN_F64, "fmin")
HANDLE_LIBCALL(FMIN_F80, "fminl")
HANDLE_LIBCALL(FMIN_F128, "fminl")
HANDLE_LIBCALL(FMIN_PPCF128, "fminl")
HANDLE_LIBCALL(FMAX_F32, "fmaxf")
HANDLE_LIBCALL(FMAX_F64, "fmax")
HANDLE_LIBCALL(FMAX_F80, "fmaxl")
HANDLE_LIBCALL(FMAX_F128, "fmaxl")
HANDLE_LIBCALL(FMAX_PPCF128, "fmaxl")
HANDLE_LIBCALL(LROUND_F32, "lroundf")
HANDLE_LIBCALL(LROUND_F64, "lround")
HANDLE_LIBCALL(LROUND_F80, "lroundl")
HANDLE_LIBCALL(LROUND_F128, "lroundl")
HANDLE_LIBCALL(LROUND_PPCF128, "lroundl")
HANDLE_LIBCALL(LLROUND_F32, "llroundf")
HANDLE_LIBCALL(LLROUND_F64, "llround")
HANDLE_LIBCALL(LLROUND_F80, "llroundl")
HANDLE_LIBCALL(LLROUND_F128, "llroundl")
HANDLE_LIBCALL(LLROUND_PPCF128, "llroundl")
HANDLE_LIBCALL(LRINT_F32, "lrintf")
HANDLE_LIBCALL(LRINT_F64, "lrint")
HANDLE_LIBCALL(LRINT_F80, "lrintl")
HANDLE_LIBCALL(LRINT_F128, "lrintl")
HANDLE_LIBCALL(LRINT_PPCF128, "lrintl")
HANDLE_LIBCALL(LLRINT_F32, "llrintf")
HANDLE_LIBCALL(LLRINT_F64, "llrint")
HANDLE_LIBCALL(LLRINT_F80, "llrintl")
HANDLE_LIBCALL(LLRINT_F128, "llrintl")
HANDLE_LIBCALL(LLRINT_PPCF128, "llrintl")
HANDLE_LIBCALL(LDEXP_F32, "ldexpf")
HANDLE_LIBCALL(LDEXP_F64, "ldexp")
HANDLE_LIBCALL(LDEXP_F80, "ldexpl")
HANDLE_LIBCALL(LDEXP_F128, "ldexpl")
HANDLE_LIBCALL(LDEXP_PPCF128, "ldexpl")
HANDLE_LIBCALL(FREXP_F32, "frexpf")
HANDLE_LIBCALL(FREXP_F64, "frexp")
HANDLE_LIBCALL(FREXP_F80, "frexpl")
HANDLE_LIBCALL(FREXP_F128, "frexpl")
HANDLE_LIBCALL(FREXP_PPCF128, "frexpl")

// Floating point environment
HANDLE_LIBCALL(FEGETENV, "fegetenv")
HANDLE_LIBCALL(FESETENV, "fesetenv")

// Conversion
HANDLE_LIBCALL(FPEXT_F32_PPCF128, "__gcc_stoq")
HANDLE_LIBCALL(FPEXT_F64_PPCF128, "__gcc_dtoq")
HANDLE_LIBCALL(FPEXT_F80_F128, "__extendxftf2")
HANDLE_LIBCALL(FPEXT_F64_F128, "__extenddftf2")
HANDLE_LIBCALL(FPEXT_F32_F128, "__extendsftf2")
HANDLE_LIBCALL(FPEXT_F16_F128, "__extendhftf2")
HANDLE_LIBCALL(FPEXT_F16_F80, "__extendhfxf2")
HANDLE_LIBCALL(FPEXT_F32_F64, "__extendsfdf2")
HANDLE_LIBCALL(FPEXT_F16_F64, "__extendhfdf2")
HANDLE_LIBCALL(FPEXT_F16_F32, "__gnu_h2f_ieee")
HANDLE_LIBCALL(FPROUND_F32_F16, "__gnu_f2h_ieee")
HANDLE_LIBCALL(FPROUND_F64_F16, "__truncdfhf2")
HANDLE_LIBCALL(FPROUND_F80_F16, "__truncxfhf2")
HANDLE_LIBCALL(FPROUND_F128_F16, "__trunctfhf2")
HANDLE_LIBCALL(FPROUND_PPCF128_F16, "__trunctfhf2")
HANDLE_LIBCALL(FPROUND_F32_BF16, "__truncsfbf2")
HANDLE_LIBCALL(FPROUND_F64_BF16, "__truncdfbf2")
HANDLE_LIBCALL(FPROUND_F64_F32, "__truncdfsf2")
HANDLE_LIBCALL(FPROUND_F80_F32, "__truncxfsf2")
HANDLE_LIBCALL(FPROUND_F128_F32, "__trunctfsf2")
HANDLE_LIBCALL(FPROUND_PPCF128_F32, "__gcc_qtos")
HANDLE_LIBCALL(FPROUND_F80_F64, "__truncxfdf2")
HANDLE_LIBCALL(FPROUND_F128_F64, "__trunctfdf2")
HANDLE_LIBCALL(FPROUND_PPCF128_F64, "__gcc_qtod")
HANDLE_LIBCALL(FPROUND_F128_F80, "__trunctfxf2")
HANDLE_LIBCALL(FPTOSINT_F16_I32, "__fixhfsi")
HANDLE_LIBCALL(FPTOSINT_F16_I64, "__fixhfdi")
HANDLE_LIBCALL(FPTOSINT_F16_I128, "__fixhfti")
HANDLE_LIBCALL(FPTOSINT_F32_I32, "__fixsfsi")
HANDLE_LIBCALL(FPTOSINT_F32_I64, "__fixsfdi")
HANDLE_LIBCALL(FPTOSINT_F32_I128, "__fixsfti")
HANDLE_LIBCALL(FPTOSINT_F64_I32, "__fixdfsi")
HANDLE_LIBCALL(FPTOSINT_F64_I64, "__fixdfdi")
HANDLE_LIBCALL(FPTOSINT_F64_I128, "__fixdfti")
HANDLE_LIBCALL(FPTOSINT_F80_I32, "__fixxfsi")
HANDLE_LIBCALL(FPTOSINT_F80_I64, "__fixxfdi")
HANDLE_LIBCALL(FPTOSINT_F80_I128, "__fixxfti")
HANDLE_LIBCALL(FPTOSINT_F128_I32, "__fixtfsi")
HANDLE_LIBCALL(FPTOSINT_F128_I64, "__fixtfdi")
HANDLE_LIBCALL(FPTOSINT_F128_I128, "__fixtfti")
HANDLE_LIBCALL(FPTOSINT_PPCF128_I32, "__gcc_qtou")
HANDLE_LIBCALL(FPTOSINT_PPCF128_I64, "__fixtfdi")
HANDLE_LIBCALL(FPTOSINT_PPCF128_I128, "__fixtfti")
HANDLE_LIBCALL(FPTOUINT_F16_I32, "__fixunshfsi")
HANDLE_LIBCALL(FPTOUINT_F16_I64, "__fixunshfdi")
HANDLE_LIBCALL(FPTOUINT_F16_I128, "__fixunshfti")
HANDLE_LIBCALL(FPTOUINT_F32_I32, "__fixunssfsi")
HANDLE_LIBCALL(FPTOUINT_F32_I64, "__fixunssfdi")
HANDLE_LIBCALL(FPTOUINT_F32_I128, "__fixunssfti")
HANDLE_LIBCALL(FPTOUINT_F64_I32, "__fixunsdfsi")
HANDLE_LIBCALL(FPTOUINT_F64_I64, "__fixunsdfdi")
HANDLE_LIBCALL(FPTOUINT_F64_I128, "__fixunsdfti")
HANDLE_LIBCALL(FPTOUINT_F80_I32, "__fixunsxfsi")
HANDLE_LIBCALL(FPTOUINT_F80_I64, "__fixunsxfdi")
HANDLE_LIBCALL(FPTOUINT_F80_I128, "__fixunsxfti")
HANDLE_LIBCALL(FPTOUINT_F128_I32, "__fixunstfsi")
HANDLE_LIBCALL(FPTOUINT_F128_I64, "__fixunstfdi")
HANDLE_LIBCALL(FPTOUINT_F128_I128, "__fixunstfti")
HANDLE_LIBCALL(FPTOUINT_PPCF128_I32, "__fixunstfsi")
HANDLE_LIBCALL(FPTOUINT_PPCF128_I64, "__fixunstfdi")
HANDLE_LIBCALL(FPTOUINT_PPCF128_I128, "__fixunstfti")
HANDLE_LIBCALL(SINTTOFP_I32_F16, "__floatsihf")
HANDLE_LIBCALL(SINTTOFP_I32_F32, "__floatsisf")
HANDLE_LIBCALL(SINTTOFP_I32_F64, "__floatsidf")
HANDLE_LIBCALL(SINTTOFP_I32_F80, "__floatsixf")
HANDLE_LIBCALL(SINTTOFP_I32_F128, "__floatsitf")
HANDLE_LIBCALL(SINTTOFP_I32_PPCF128, "__gcc_itoq")
HANDLE_LIBCALL(SINTTOFP_I64_F16, "__floatdihf")
HANDLE_LIBCALL(SINTTOFP_I64_F32, "__floatdisf")
HANDLE_LIBCALL(SINTTOFP_I64_F64, "__floatdidf")
HANDLE_LIBCALL(SINTTOFP_I64_F80, "__floatdixf")
HANDLE_LIBCALL(SINTTOFP_I64_F128, "__floatditf")
HANDLE_LIBCALL(SINTTOFP_I64_PPCF128, "__floatditf")
HANDLE_LIBCALL(SINTTOFP_I128_F16, "__floattihf")
HANDLE_LIBCALL(SINTTOFP_I128_F32, "__floattisf")
HANDLE_LIBCALL(SINTTOFP_I128_F64, "__floattidf")
HANDLE_LIBCALL(SINTTOFP_I128_F80, "__floattixf")
HANDLE_LIBCALL(SINTTOFP_I128_F128, "__floattitf")
HANDLE_LIBCALL(SINTTOFP_I128_PPCF128, "__floattitf")
HANDLE_LIBCALL(UINTTOFP_I32_F16, "__floatunsihf")
HANDLE_LIBCALL(UINTTOFP_I32_F32, "__floatunsisf")
HANDLE_LIBCALL(UINTTOFP_I32_F64, "__floatunsidf")
HANDLE_LIBCALL(UINTTOFP_I32_F80, "__floatunsixf")
HANDLE_LIBCALL(UINTTOFP_I32_F128, "__floatunsitf")
HANDLE_LIBCALL(UINTTOFP_I32_PPCF128, "__gcc_utoq")
HANDLE_LIBCALL(UINTTOFP_I64_F16, "__floatundihf")
HANDLE_LIBCALL(UINTTOFP_I64_F32, "__floatundisf")
HANDLE_LIBCALL(UINTTOFP_I64_F64, "__floatundidf")
HANDLE_LIBCALL(UINTTOFP_I64_F80, "__floatundixf")
HANDLE_LIBCALL(UINTTOFP_I64_F128, "__floatunditf")
HANDLE_LIBCALL(UINTTOFP_I64_PPCF128, "__floatunditf")
HANDLE_LIBCALL(UINTTOFP_I128_F16, "__floatuntihf")
HANDLE_LIBCALL(UINTTOFP_I128_F32, "__floatuntisf")
HANDLE_LIBCALL(UINTTOFP_I128_F64, "__floatuntidf")
HANDLE_LIBCALL(UINTTOFP_I128_F80, "__floatuntixf")
HANDLE_LIBCALL(UINTTOFP_I128_F128, "__floatuntitf")
HANDLE_LIBCALL(UINTTOFP_I128_PPCF128, "__floatuntitf")
HANDLE_LIBCALL(CONVERT_F128_PPCF128, "__extendkftf2")
HANDLE_LIBCALL(CONVERT_PPCF128_F128, "__trunctfkf2")

// Comparison
HANDLE_LIBCALL(OEQ_F32, "__eqsf2")
HANDLE_LIBCALL(OEQ_F64, "__eqdf2")
HANDLE_LIBCALL(OEQ_F128, "__eqtf2")
HANDLE_LIBCALL(OEQ_PPCF128, "__gcc_qeq")
HANDLE_LIBCALL(UNE_F32, "__nesf2")
HANDLE_LIBCALL(UNE_F64, "__nedf2")
HANDLE_LIBCALL(UNE_F128, "__netf2")
HANDLE_LIBCALL(UNE_PPCF128, "__gcc_qne")
HANDLE_LIBCALL(OGE_F32, "__gesf2")
HANDLE_LIBCALL(OGE_F64, "__gedf2")
HANDLE_LIBCALL(OGE_F128, "__getf2")
HANDLE_LIBCALL(OGE_PPCF128, "__gcc_qge")
HANDLE_LIBCALL(OLT_F32, "__ltsf2")
HANDLE_LIBCALL(OLT_F64, "__ltdf2")
HANDLE_LIBCALL(OLT_F128, "__lttf2")
HANDLE_LIBCALL(OLT_PPCF128, "__gcc_qlt")
HANDLE_LIBCALL(OLE_F32, "__lesf2")
HANDLE_LIBCALL(OLE_F64, "__ledf2")
HANDLE_LIBCALL(OLE_F128, "__letf2")
HANDLE_LIBCALL(OLE_PPCF128, "__gcc_qle")
HANDLE_LIBCALL(OGT_F32, "__gtsf2")
HANDLE_LIBCALL(OGT_F64, "__gtdf2")
HANDLE_LIBCALL(OGT_F128, "__gttf2")
HANDLE_LIBCALL(OGT_PPCF128, "__gcc_qgt")
HANDLE_LIBCALL(UO_F32, "__unordsf2")
HANDLE_LIBCALL(UO_F64, "__unorddf2")
HANDLE_LIBCALL(UO_F128, "__unordtf2")
HANDLE_LIBCALL(UO_PPCF128, "__gcc_qunord")

// Memory
HANDLE_LIBCALL(MEMCPY, "memcpy")
HANDLE_LIBCALL(MEMMOVE, "memmove")
HANDLE_LIBCALL(MEMSET, "memset")
HANDLE_LIBCALL(BZERO, nullptr)

// Element-wise unordered-atomic memory of different sizes
HANDLE_LIBCALL(MEMCPY_ELEMENT_UNORDERED_ATOMIC_1, "__llvm_memcpy_element_unordered_atomic_1")
HANDLE_LIBCALL(MEMCPY_ELEMENT_UNORDERED_ATOMIC_2, "__llvm_memcpy_element_unordered_atomic_2")
HANDLE_LIBCALL(MEMCPY_ELEMENT_UNORDERED_ATOMIC_4, "__llvm_memcpy_element_unordered_atomic_4")
HANDLE_LIBCALL(MEMCPY_ELEMENT_UNORDERED_ATOMIC_8, "__llvm_memcpy_element_unordered_atomic_8")
HANDLE_LIBCALL(MEMCPY_ELEMENT_UNORDERED_ATOMIC_16, "__llvm_memcpy_element_unordered_atomic_16")
HANDLE_LIBCALL(MEMMOVE_ELEMENT_UNORDERED_ATOMIC_1, "__llvm_memmove_element_unordered_atomic_1")
HANDLE_LIBCALL(MEMMOVE_ELEMENT_UNORDERED_ATOMIC_2, "__llvm_memmove_element_unordered_atomic_2")
HANDLE_LIBCALL(MEMMOVE_ELEMENT_UNORDERED_ATOMIC_4, "__llvm_memmove_element_unordered_atomic_4")
HANDLE_LIBCALL(MEMMOVE_ELEMENT_UNORDERED_ATOMIC_8, "__llvm_memmove_element_unordered_atomic_8")
HANDLE_LIBCALL(MEMMOVE_ELEMENT_UNORDERED_ATOMIC_16, "__llvm_memmove_element_unordered_atomic_16")
HANDLE_LIBCALL(MEMSET_ELEMENT_UNORDERED_ATOMIC_1, "__llvm_memset_element_unordered_atomic_1")
HANDLE_LIBCALL(MEMSET_ELEMENT_UNORDERED_ATOMIC_2, "__llvm_memset_element_unordered_atomic_2")
HANDLE_LIBCALL(MEMSET_ELEMENT_UNORDERED_ATOMIC_4, "__llvm_memset_element_unordered_atomic_4")
HANDLE_LIBCALL(MEMSET_ELEMENT_UNORDERED_ATOMIC_8, "__llvm_memset_element_unordered_atomic_8")
HANDLE_LIBCALL(MEMSET_ELEMENT_UNORDERED_ATOMIC_16, "__llvm_memset_element_unordered_atomic_16")

// Exception handling
HANDLE_LIBCALL(UNWIND_RESUME, "_Unwind_Resume")
HANDLE_LIBCALL(CXA_END_CLEANUP, "__cxa_end_cleanup")

// Note: there are two sets of atomics libcalls; see
// <https://llvm.org/docs/Atomics.html> for more info on the
// difference between them.

// Atomic '__sync_*' libcalls.
HANDLE_LIBCALL(SYNC_VAL_COMPARE_AND_SWAP_1, "__sync_val_compare_and_swap_1")
HANDLE_LIBCALL(SYNC_VAL_COMPARE_AND_SWAP_2, "__sync_val_compare_and_swap_2")
HANDLE_LIBCALL(SYNC_VAL_COMPARE_AND_SWAP_4, "__sync_val_compare_and_swap_4")
HANDLE_LIBCALL(SYNC_VAL_COMPARE_AND_SWAP_8, "__sync_val_compare_and_swap_8")
HANDLE_LIBCALL(SYNC_VAL_COMPARE_AND_SWAP_16, "__sync_val_compare_and_swap_16")
HANDLE_LIBCALL(SYNC_LOCK_TEST_AND_SET_1, "__sync_lock_test_and_set_1")
HANDLE_LIBCALL(SYNC_LOCK_TEST_AND_SET_2, "__sync_lock_test_and_set_2")
HANDLE_LIBCALL(SYNC_LOCK_TEST_AND_SET_4, "__sync_lock_test_and_set_4")
HANDLE_LIBCALL(SYNC_LOCK_TEST_AND_SET_8, "__sync_lock_test_and_set_8")
HANDLE_LIBCALL(SYNC_LOCK_TEST_AND_SET_16, "__sync_lock_test_and_set_16")
HANDLE_LIBCALL(SYNC_FETCH_AND_ADD_1, "__sync_fetch_and_add_1")
HANDLE_LIBCALL(SYNC_FETCH_AND_ADD_2, "__sync_fetch_and_add_2")
HANDLE_LIBCALL(SYNC_FETCH_AND_ADD_4, "__sync_fetch_and_add_4")
HANDLE_LIBCALL(SYNC_FETCH_AND_ADD_8, "__sync_fetch_and_add_8")
HANDLE_LIBCALL(SYNC_FETCH_AND_ADD_16, "__sync_fetch_and_add_16")
HANDLE_LIBCALL(SYNC_FETCH_AND_SUB_1, "__sync_fetch_and_sub_1")
HANDLE_LIBCALL(SYNC_FETCH_AND_SUB_2, "__sync_fetch_and_sub_2")
HANDLE_LIBCALL(SYNC_FETCH_AND_SUB_4, "__sync_fetch_and_sub_4")
HANDLE_LIBCALL(SYNC_FETCH_AND_SUB_8, "__sync_fetch_and_sub_8")
HANDLE_LIBCALL(SYNC_FETCH_AND_SUB_16, "__sync_fetch_and_sub_16")
HANDLE_LIBCALL(SYNC_FETCH_AND_AND_1, "__sync_fetch_and_and_1")
HANDLE_LIBCALL(SYNC_FETCH_AND_AND_2, "__sync_fetch_and_and_2")
HANDLE_LIBCALL(SYNC_FETCH_AND_AND_4, "__sync_fetch_and_and_4")
HANDLE_LIBCALL(SYNC_FETCH_AND_AND_8, "__sync_fetch_and_and_8")
HANDLE_LIBCALL(SYNC_FETCH_AND_AND_16, "__sync_fetch_and_and_16")
HANDLE_LIBCALL(SYNC_FETCH_AND_OR_1, "__sync_fetch_and_or_1")
HANDLE_LIBCALL(SYNC_FETCH_AND_OR_2, "__sync_fetch_and_or_2")
HANDLE_LIBCALL(SYNC_FETCH_AND_OR_4, "__sync_fetch_and_or_4")
HANDLE_LIBCALL(SYNC_FETCH_AND_OR_8, "__sync_fetch_and_or_8")
HANDLE_LIBCALL(SYNC_FETCH_AND_OR_16, "__sync_fetch_and_or_16")
HANDLE_LIBCALL(SYNC_FETCH_AND_XOR_1, "__sync_fetch_and_xor_1")
HANDLE_LIBCALL(SYNC_FETCH_AND_XOR_2, "__sync_fetch_and_xor_2")
HANDLE_LIBCALL(SYNC_FETCH_AND_XOR_4, "__sync_fetch_and_xor_4")
HANDLE_LIBCALL(SYNC_FETCH_AND_XOR_8, "__sync_fetch_and_xor_8")
HANDLE_LIBCALL(SYNC_FETCH_AND_XOR_16, "__sync_fetch_and_xor_16")
HANDLE_LIBCALL(SYNC_FETCH_AND_NAND_1, "__sync_fetch_and_nand_1")
HANDLE_LIBCALL(SYNC_FETCH_AND_NAND_2, "__sync_fetch_and_nand_2")
HANDLE_LIBCALL(SYNC_FETCH_AND_NAND_4, "__sync_fetch_and_nand_4")
HANDLE_LIBCALL(SYNC_FETCH_AND_NAND_8, "__sync_fetch_and_nand_8")
HANDLE_LIBCALL(SYNC_FETCH_AND_NAND_16, "__sync_fetch_and_nand_16")
HANDLE_LIBCALL(SYNC_FETCH_AND_MAX_1, "__sync_fetch_and_max_1")
HANDLE_LIBCALL(SYNC_FETCH_AND_MAX_2, "__sync_fetch_and_max_2")
HANDLE_LIBCALL(SYNC_FETCH_AND_MAX_4, "__sync_fetch_and_max_4")
HANDLE_LIBCALL(SYNC_FETCH_AND_MAX_8, "__sync_fetch_and_max_8")
HANDLE_LIBCALL(SYNC_FETCH_AND_MAX_16, "__sync_fetch_and_max_16")
HANDLE_LIBCALL(SYNC_FETCH_AND_UMAX_1, "__sync_fetch_and_umax_1")
HANDLE_LIBCALL(SYNC_FETCH_AND_UMAX_2, "__sync_fetch_and_umax_2")
HANDLE_LIBCALL(SYNC_FETCH_AND_UMAX_4, "__sync_fetch_and_umax_4")
HANDLE_LIBCALL(SYNC_FETCH_AND_UMAX_8, "__sync_fetch_and_umax_8")
HANDLE_LIBCALL(SYNC_FETCH_AND_UMAX_16, "__sync_fetch_and_umax_16")
HANDLE_LIBCALL(SYNC_FETCH_AND_MIN_1, "__sync_fetch_and_min_1")
HANDLE_LIBCALL(SYNC_FETCH_AND_MIN_2, "__sync_fetch_and_min_2")
HANDLE_LIBCALL(SYNC_FETCH_AND_MIN_4, "__sync_fetch_and_min_4")
HANDLE_LIBCALL(SYNC_FETCH_AND_MIN_8, "__sync_fetch_and_min_8")
HANDLE_LIBCALL(SYNC_FETCH_AND_MIN_16, "__sync_fetch_and_min_16")
HANDLE_LIBCALL(SYNC_FETCH_AND_UMIN_1, "__sync_fetch_and_umin_1")
HANDLE_LIBCALL(SYNC_FETCH_AND_UMIN_2, "__sync_fetch_and_umin_2")
HANDLE_LIBCALL(SYNC_FETCH_AND_UMIN_4, "__sync_fetch_and_umin_4")
HANDLE_LIBCALL(SYNC_FETCH_AND_UMIN_8, "__sync_fetch_and_umin_8")
HANDLE_LIBCALL(SYNC_FETCH_AND_UMIN_16, "__sync_fetch_and_umin_16")

// Atomic `__atomic_*' libcalls.
HANDLE_LIBCALL(ATOMIC_LOAD, "__atomic_load")
HANDLE_LIBCALL(ATOMIC_LOAD_1, "__atomic_load_1")
HANDLE_LIBCALL(ATOMIC_LOAD_2, "__atomic_load_2")
HANDLE_LIBCALL(ATOMIC_LOAD_4, "__atomic_load_4")
HANDLE_LIBCALL(ATOMIC_LOAD_8, "__atomic_load_8")
HANDLE_LIBCALL(ATOMIC_LOAD_16, "__atomic_load_16")

HANDLE_LIBCALL(ATOMIC_STORE, "__atomic_store")
HANDLE_LIBCALL(ATOMIC_STORE_1, "__atomic_store_1")
HANDLE_LIBCALL(ATOMIC_STORE_2, "__atomic_store_2")
HANDLE_LIBCALL(ATOMIC_STORE_4, "__atomic_store_4")
HANDLE_LIBCALL(ATOMIC_STORE_8, "__atomic_store_8")
HANDLE_LIBCALL(ATOMIC_STORE_16, "__atomic_store_16")

HANDLE_LIBCALL(ATOMIC_EXCHANGE, "__atomic_exchange")
HANDLE_LIBCALL(ATOMIC_EXCHANGE_1, "__atomic_exchange_1")
HANDLE_LIBCALL(ATOMIC_EXCHANGE_2, "__atomic_exchange_2")
HANDLE_LIBCALL(ATOMIC_EXCHANGE_4, "__atomic_exchange_4")
HANDLE_LIBCALL(ATOMIC_EXCHANGE_8, "__atomic_exchange_8")
HANDLE_LIBCALL(ATOMIC_EXCHANGE_16, "__atomic_exchange_16")

HANDLE_LIBCALL(ATOMIC_COMPARE_EXCHANGE, "__atomic_compare_exchange")
HANDLE_LIBCALL(ATOMIC_COMPARE_EXCHANGE_1, "__atomic_compare_exchange_1")
HANDLE_LIBCALL(ATOMIC_COMPARE_EXCHANGE_2, "__atomic_compare_exchange_2")
HANDLE_LIBCALL(ATOMIC_COMPARE_EXCHANGE_4, "__atomic_compare_exchange_4")
HANDLE_LIBCALL(ATOMIC_COMPARE_EXCHANGE_8, "__atomic_compare_exchange_8")
HANDLE_LIBCALL(ATOMIC_COMPARE_EXCHANGE_16, "__atomic_compare_exchange_16")

HANDLE_LIBCALL(ATOMIC_FETCH_ADD_1, "__atomic_fetch_add_1")
HANDLE_LIBCALL(ATOMIC_FETCH_ADD_2, "__atomic_fetch_add_2")
HANDLE_LIBCALL(ATOMIC_FETCH_ADD_4, "__atomic_fetch_add_4")
HANDLE_LIBCALL(ATOMIC_FETCH_ADD_8, "__atomic_fetch_add_8")
HANDLE_LIBCALL(ATOMIC_FETCH_ADD_16, "__atomic_fetch_add_16")
HANDLE_LIBCALL(ATOMIC_FETCH_SUB_1, "__atomic_fetch_sub_1")
HANDLE_LIBCALL(ATOMIC_FETCH_SUB_2, "__atomic_fetch_sub_2")
HANDLE_LIBCALL(ATOMIC_FETCH_SUB_4, "__atomic_fetch_sub_4")
HANDLE_LIBCALL(ATOMIC_FETCH_SUB_8, "__atomic_fetch_sub_8")
HANDLE_LIBCALL(ATOMIC_FETCH_SUB_16, "__atomic_fetch_sub_16")
HANDLE_LIBCALL(ATOMIC_FETCH_AND_1, "__atomic_fetch_and_1")
HANDLE_LIBCALL(ATOMIC_FETCH_AND_2, "__atomic_fetch_and_2")
HANDLE_LIBCALL(ATOMIC_FETCH_AND_4, "__atomic_fetch_and_4")
HANDLE_LIBCALL(ATOMIC_FETCH_AND_8, "__atomic_fetch_and_8")
HANDLE_LIBCALL(ATOMIC_FETCH_AND_16, "__atomic_fetch_and_16")
HANDLE_LIBCALL(ATOMIC_FETCH_OR_1, "__atomic_fetch_or_1")
HANDLE_LIBCALL(ATOMIC_FETCH_OR_2, "__atomic_fetch_or_2")
HANDLE_LIBCALL(ATOMIC_FETCH_OR_4, "__atomic_fetch_or_4")
HANDLE_LIBCALL(ATOMIC_FETCH_OR_8, "__atomic_fetch_or_8")
HANDLE_LIBCALL(ATOMIC_FETCH_OR_16, "__atomic_fetch_or_16")
HANDLE_LIBCALL(ATOMIC_FETCH_XOR_1, "__atomic_fetch_xor_1")
HANDLE_LIBCALL(ATOMIC_FETCH_XOR_2, "__atomic_fetch_xor_2")
HANDLE_LIBCALL(ATOMIC_FETCH_XOR_4, "__atomic_fetch_xor_4")
HANDLE_LIBCALL(ATOMIC_FETCH_XOR_8, "__atomic_fetch_xor_8")
HANDLE_LIBCALL(ATOMIC_FETCH_XOR_16, "__atomic_fetch_xor_16")
HANDLE_LIBCALL(ATOMIC_FETCH_NAND_1, "__atomic_fetch_nand_1")
HANDLE_LIBCALL(ATOMIC_FETCH_NAND_2, "__atomic_fetch_nand_2")
HANDLE_LIBCALL(ATOMIC_FETCH_NAND_4, "__atomic_fetch_nand_4")
HANDLE_LIBCALL(ATOMIC_FETCH_NAND_8, "__atomic_fetch_nand_8")
HANDLE_LIBCALL(ATOMIC_FETCH_NAND_16, "__atomic_fetch_nand_16")

// Out-of-line atomics libcalls
#define HLCALLS(A, N)                                                          \
  HANDLE_LIBCALL(A##N##_RELAX, nullptr)                                        \
  HANDLE_LIBCALL(A##N##_ACQ, nullptr)                                          \
  HANDLE_LIBCALL(A##N##_REL, nullptr)                                          \
  HANDLE_LIBCALL(A##N##_ACQ_REL, nullptr)
#define HLCALL5(A)                                                             \
  HLCALLS(A, 1) HLCALLS(A, 2) HLCALLS(A, 4) HLCALLS(A, 8) HLCALLS(A, 16)
HLCALL5(OUTLINE_ATOMIC_CAS)
HLCALL5(OUTLINE_ATOMIC_SWP)
HLCALL5(OUTLINE_ATOMIC_LDADD)
HLCALL5(OUTLINE_ATOMIC_LDSET)
HLCALL5(OUTLINE_ATOMIC_LDCLR)
HLCALL5(OUTLINE_ATOMIC_LDEOR)
#undef HLCALLS
#undef HLCALL5

// Stack Protector Fail
HANDLE_LIBCALL(STACKPROTECTOR_CHECK_FAIL, "__stack_chk_fail")

// Deoptimization
HANDLE_LIBCALL(DEOPTIMIZE, "__llvm_deoptimize")

// Return address
HANDLE_LIBCALL(RETURN_ADDRESS, nullptr)

HANDLE_LIBCALL(UNKNOWN_LIBCALL, nullptr)

PKjwFZ]�j��IR/DiagnosticHandler.hnu�[���//===- DiagnosticHandler.h - DiagnosticHandler class for LLVM ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// Base DiagnosticHandler class declaration. Derive from this class to provide
// custom diagnostic reporting.
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_DIAGNOSTICHANDLER_H
#define LLVM_IR_DIAGNOSTICHANDLER_H

#include "llvm/ADT/StringRef.h"

namespace llvm {
class DiagnosticInfo;

/// This is the base class for diagnostic handling in LLVM.
/// The handleDiagnostics method must be overriden by the subclasses to handle
/// diagnostic. The *RemarkEnabled methods can be overriden to control
/// which remarks are enabled.
struct DiagnosticHandler {
  void *DiagnosticContext = nullptr;
  DiagnosticHandler(void *DiagContext = nullptr)
      : DiagnosticContext(DiagContext) {}
  virtual ~DiagnosticHandler() = default;

  using DiagnosticHandlerTy = void (*)(const DiagnosticInfo &DI, void *Context);

  /// DiagHandlerCallback is settable from the C API and base implementation
  /// of DiagnosticHandler will call it from handleDiagnostics(). Any derived
  /// class of DiagnosticHandler should not use callback but
  /// implement handleDiagnostics().
  DiagnosticHandlerTy DiagHandlerCallback = nullptr;

  /// Override handleDiagnostics to provide custom implementation.
  /// Return true if it handles diagnostics reporting properly otherwise
  /// return false to make LLVMContext::diagnose() to print the message
  /// with a prefix based on the severity.
  virtual bool handleDiagnostics(const DiagnosticInfo &DI) {
    if (DiagHandlerCallback) {
      DiagHandlerCallback(DI, DiagnosticContext);
      return true;
    }
    return false;
  }

  /// Return true if analysis remarks are enabled, override
  /// to provide different implementation.
  virtual bool isAnalysisRemarkEnabled(StringRef PassName) const;

  /// Return true if missed optimization remarks are enabled, override
  /// to provide different implementation.
  virtual bool isMissedOptRemarkEnabled(StringRef PassName) const;

  /// Return true if passed optimization remarks are enabled, override
  /// to provide different implementation.
  virtual bool isPassedOptRemarkEnabled(StringRef PassName) const;

  /// Return true if any type of remarks are enabled for this pass.
  bool isAnyRemarkEnabled(StringRef PassName) const {
    return (isMissedOptRemarkEnabled(PassName) ||
            isPassedOptRemarkEnabled(PassName) ||
            isAnalysisRemarkEnabled(PassName));
  }

  /// Return true if any type of remarks are enabled for any pass.
  virtual bool isAnyRemarkEnabled() const;
};
} // namespace llvm

#endif // LLVM_IR_DIAGNOSTICHANDLER_H
PKjwFZ$f�L��IR/ModuleSummaryIndex.hnu�[���//===- llvm/ModuleSummaryIndex.h - Module Summary Index ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// @file
/// ModuleSummaryIndex.h This file contains the declarations the classes that
///  hold the module index and summary for function importing.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_MODULESUMMARYINDEX_H
#define LLVM_IR_MODULESUMMARYINDEX_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/IR/ConstantRange.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/Module.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/ScaledNumber.h"
#include "llvm/Support/StringSaver.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <array>
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <map>
#include <memory>
#include <optional>
#include <set>
#include <string>
#include <utility>
#include <vector>

namespace llvm {

template <class GraphType> struct GraphTraits;

namespace yaml {

template <typename T> struct MappingTraits;

} // end namespace yaml

/// Class to accumulate and hold information about a callee.
struct CalleeInfo {
  enum class HotnessType : uint8_t {
    Unknown = 0,
    Cold = 1,
    None = 2,
    Hot = 3,
    Critical = 4
  };

  // The size of the bit-field might need to be adjusted if more values are
  // added to HotnessType enum.
  uint32_t Hotness : 3;

  /// The value stored in RelBlockFreq has to be interpreted as the digits of
  /// a scaled number with a scale of \p -ScaleShift.
  uint32_t RelBlockFreq : 29;
  static constexpr int32_t ScaleShift = 8;
  static constexpr uint64_t MaxRelBlockFreq = (1 << 29) - 1;

  CalleeInfo()
      : Hotness(static_cast<uint32_t>(HotnessType::Unknown)), RelBlockFreq(0) {}
  explicit CalleeInfo(HotnessType Hotness, uint64_t RelBF)
      : Hotness(static_cast<uint32_t>(Hotness)), RelBlockFreq(RelBF) {}

  void updateHotness(const HotnessType OtherHotness) {
    Hotness = std::max(Hotness, static_cast<uint32_t>(OtherHotness));
  }

  HotnessType getHotness() const { return HotnessType(Hotness); }

  /// Update \p RelBlockFreq from \p BlockFreq and \p EntryFreq
  ///
  /// BlockFreq is divided by EntryFreq and added to RelBlockFreq. To represent
  /// fractional values, the result is represented as a fixed point number with
  /// scale of -ScaleShift.
  void updateRelBlockFreq(uint64_t BlockFreq, uint64_t EntryFreq) {
    if (EntryFreq == 0)
      return;
    using Scaled64 = ScaledNumber<uint64_t>;
    Scaled64 Temp(BlockFreq, ScaleShift);
    Temp /= Scaled64::get(EntryFreq);

    uint64_t Sum =
        SaturatingAdd<uint64_t>(Temp.toInt<uint64_t>(), RelBlockFreq);
    Sum = std::min(Sum, uint64_t(MaxRelBlockFreq));
    RelBlockFreq = static_cast<uint32_t>(Sum);
  }
};

inline const char *getHotnessName(CalleeInfo::HotnessType HT) {
  switch (HT) {
  case CalleeInfo::HotnessType::Unknown:
    return "unknown";
  case CalleeInfo::HotnessType::Cold:
    return "cold";
  case CalleeInfo::HotnessType::None:
    return "none";
  case CalleeInfo::HotnessType::Hot:
    return "hot";
  case CalleeInfo::HotnessType::Critical:
    return "critical";
  }
  llvm_unreachable("invalid hotness");
}

class GlobalValueSummary;

using GlobalValueSummaryList = std::vector<std::unique_ptr<GlobalValueSummary>>;

struct alignas(8) GlobalValueSummaryInfo {
  union NameOrGV {
    NameOrGV(bool HaveGVs) {
      if (HaveGVs)
        GV = nullptr;
      else
        Name = "";
    }

    /// The GlobalValue corresponding to this summary. This is only used in
    /// per-module summaries and when the IR is available. E.g. when module
    /// analysis is being run, or when parsing both the IR and the summary
    /// from assembly.
    const GlobalValue *GV;

    /// Summary string representation. This StringRef points to BC module
    /// string table and is valid until module data is stored in memory.
    /// This is guaranteed to happen until runThinLTOBackend function is
    /// called, so it is safe to use this field during thin link. This field
    /// is only valid if summary index was loaded from BC file.
    StringRef Name;
  } U;

  inline GlobalValueSummaryInfo(bool HaveGVs);

  /// List of global value summary structures for a particular value held
  /// in the GlobalValueMap. Requires a vector in the case of multiple
  /// COMDAT values of the same name.
  GlobalValueSummaryList SummaryList;
};

/// Map from global value GUID to corresponding summary structures. Use a
/// std::map rather than a DenseMap so that pointers to the map's value_type
/// (which are used by ValueInfo) are not invalidated by insertion. Also it will
/// likely incur less overhead, as the value type is not very small and the size
/// of the map is unknown, resulting in inefficiencies due to repeated
/// insertions and resizing.
using GlobalValueSummaryMapTy =
    std::map<GlobalValue::GUID, GlobalValueSummaryInfo>;

/// Struct that holds a reference to a particular GUID in a global value
/// summary.
struct ValueInfo {
  enum Flags { HaveGV = 1, ReadOnly = 2, WriteOnly = 4 };
  PointerIntPair<const GlobalValueSummaryMapTy::value_type *, 3, int>
      RefAndFlags;

  ValueInfo() = default;
  ValueInfo(bool HaveGVs, const GlobalValueSummaryMapTy::value_type *R) {
    RefAndFlags.setPointer(R);
    RefAndFlags.setInt(HaveGVs);
  }

  explicit operator bool() const { return getRef(); }

  GlobalValue::GUID getGUID() const { return getRef()->first; }
  const GlobalValue *getValue() const {
    assert(haveGVs());
    return getRef()->second.U.GV;
  }

  ArrayRef<std::unique_ptr<GlobalValueSummary>> getSummaryList() const {
    return getRef()->second.SummaryList;
  }

  StringRef name() const {
    return haveGVs() ? getRef()->second.U.GV->getName()
                     : getRef()->second.U.Name;
  }

  bool haveGVs() const { return RefAndFlags.getInt() & HaveGV; }
  bool isReadOnly() const {
    assert(isValidAccessSpecifier());
    return RefAndFlags.getInt() & ReadOnly;
  }
  bool isWriteOnly() const {
    assert(isValidAccessSpecifier());
    return RefAndFlags.getInt() & WriteOnly;
  }
  unsigned getAccessSpecifier() const {
    assert(isValidAccessSpecifier());
    return RefAndFlags.getInt() & (ReadOnly | WriteOnly);
  }
  bool isValidAccessSpecifier() const {
    unsigned BadAccessMask = ReadOnly | WriteOnly;
    return (RefAndFlags.getInt() & BadAccessMask) != BadAccessMask;
  }
  void setReadOnly() {
    // We expect ro/wo attribute to set only once during
    // ValueInfo lifetime.
    assert(getAccessSpecifier() == 0);
    RefAndFlags.setInt(RefAndFlags.getInt() | ReadOnly);
  }
  void setWriteOnly() {
    assert(getAccessSpecifier() == 0);
    RefAndFlags.setInt(RefAndFlags.getInt() | WriteOnly);
  }

  const GlobalValueSummaryMapTy::value_type *getRef() const {
    return RefAndFlags.getPointer();
  }

  /// Returns the most constraining visibility among summaries. The
  /// visibilities, ordered from least to most constraining, are: default,
  /// protected and hidden.
  GlobalValue::VisibilityTypes getELFVisibility() const;

  /// Checks if all summaries are DSO local (have the flag set). When DSOLocal
  /// propagation has been done, set the parameter to enable fast check.
  bool isDSOLocal(bool WithDSOLocalPropagation = false) const;

  /// Checks if all copies are eligible for auto-hiding (have flag set).
  bool canAutoHide() const;
};

inline raw_ostream &operator<<(raw_ostream &OS, const ValueInfo &VI) {
  OS << VI.getGUID();
  if (!VI.name().empty())
    OS << " (" << VI.name() << ")";
  return OS;
}

inline bool operator==(const ValueInfo &A, const ValueInfo &B) {
  assert(A.getRef() && B.getRef() &&
         "Need ValueInfo with non-null Ref for comparison");
  return A.getRef() == B.getRef();
}

inline bool operator!=(const ValueInfo &A, const ValueInfo &B) {
  assert(A.getRef() && B.getRef() &&
         "Need ValueInfo with non-null Ref for comparison");
  return A.getRef() != B.getRef();
}

inline bool operator<(const ValueInfo &A, const ValueInfo &B) {
  assert(A.getRef() && B.getRef() &&
         "Need ValueInfo with non-null Ref to compare GUIDs");
  return A.getGUID() < B.getGUID();
}

template <> struct DenseMapInfo<ValueInfo> {
  static inline ValueInfo getEmptyKey() {
    return ValueInfo(false, (GlobalValueSummaryMapTy::value_type *)-8);
  }

  static inline ValueInfo getTombstoneKey() {
    return ValueInfo(false, (GlobalValueSummaryMapTy::value_type *)-16);
  }

  static inline bool isSpecialKey(ValueInfo V) {
    return V == getTombstoneKey() || V == getEmptyKey();
  }

  static bool isEqual(ValueInfo L, ValueInfo R) {
    // We are not supposed to mix ValueInfo(s) with different HaveGVs flag
    // in a same container.
    assert(isSpecialKey(L) || isSpecialKey(R) || (L.haveGVs() == R.haveGVs()));
    return L.getRef() == R.getRef();
  }
  static unsigned getHashValue(ValueInfo I) { return (uintptr_t)I.getRef(); }
};

/// Summary of memprof callsite metadata.
struct CallsiteInfo {
  // Actual callee function.
  ValueInfo Callee;

  // Used to record whole program analysis cloning decisions.
  // The ThinLTO backend will need to create as many clones as there are entries
  // in the vector (it is expected and should be confirmed that all such
  // summaries in the same FunctionSummary have the same number of entries).
  // Each index records version info for the corresponding clone of this
  // function. The value is the callee clone it calls (becomes the appended
  // suffix id). Index 0 is the original version, and a value of 0 calls the
  // original callee.
  SmallVector<unsigned> Clones{0};

  // Represents stack ids in this context, recorded as indices into the
  // StackIds vector in the summary index, which in turn holds the full 64-bit
  // stack ids. This reduces memory as there are in practice far fewer unique
  // stack ids than stack id references.
  SmallVector<unsigned> StackIdIndices;

  CallsiteInfo(ValueInfo Callee, SmallVector<unsigned> StackIdIndices)
      : Callee(Callee), StackIdIndices(std::move(StackIdIndices)) {}
  CallsiteInfo(ValueInfo Callee, SmallVector<unsigned> Clones,
               SmallVector<unsigned> StackIdIndices)
      : Callee(Callee), Clones(std::move(Clones)),
        StackIdIndices(std::move(StackIdIndices)) {}
};

inline raw_ostream &operator<<(raw_ostream &OS, const CallsiteInfo &SNI) {
  OS << "Callee: " << SNI.Callee;
  bool First = true;
  OS << " Clones: ";
  for (auto V : SNI.Clones) {
    if (!First)
      OS << ", ";
    First = false;
    OS << V;
  }
  First = true;
  OS << " StackIds: ";
  for (auto Id : SNI.StackIdIndices) {
    if (!First)
      OS << ", ";
    First = false;
    OS << Id;
  }
  return OS;
}

// Allocation type assigned to an allocation reached by a given context.
// More can be added, now this is cold, notcold and hot.
// Values should be powers of two so that they can be ORed, in particular to
// track allocations that have different behavior with different calling
// contexts.
enum class AllocationType : uint8_t {
  None = 0,
  NotCold = 1,
  Cold = 2,
  Hot = 4,
  All = 7 // This should always be set to the OR of all values.
};

/// Summary of a single MIB in a memprof metadata on allocations.
struct MIBInfo {
  // The allocation type for this profiled context.
  AllocationType AllocType;

  // Represents stack ids in this context, recorded as indices into the
  // StackIds vector in the summary index, which in turn holds the full 64-bit
  // stack ids. This reduces memory as there are in practice far fewer unique
  // stack ids than stack id references.
  SmallVector<unsigned> StackIdIndices;

  MIBInfo(AllocationType AllocType, SmallVector<unsigned> StackIdIndices)
      : AllocType(AllocType), StackIdIndices(std::move(StackIdIndices)) {}
};

inline raw_ostream &operator<<(raw_ostream &OS, const MIBInfo &MIB) {
  OS << "AllocType " << (unsigned)MIB.AllocType;
  bool First = true;
  OS << " StackIds: ";
  for (auto Id : MIB.StackIdIndices) {
    if (!First)
      OS << ", ";
    First = false;
    OS << Id;
  }
  return OS;
}

/// Summary of memprof metadata on allocations.
struct AllocInfo {
  // Used to record whole program analysis cloning decisions.
  // The ThinLTO backend will need to create as many clones as there are entries
  // in the vector (it is expected and should be confirmed that all such
  // summaries in the same FunctionSummary have the same number of entries).
  // Each index records version info for the corresponding clone of this
  // function. The value is the allocation type of the corresponding allocation.
  // Index 0 is the original version. Before cloning, index 0 may have more than
  // one allocation type.
  SmallVector<uint8_t> Versions;

  // Vector of MIBs in this memprof metadata.
  std::vector<MIBInfo> MIBs;

  AllocInfo(std::vector<MIBInfo> MIBs) : MIBs(std::move(MIBs)) {
    Versions.push_back(0);
  }
  AllocInfo(SmallVector<uint8_t> Versions, std::vector<MIBInfo> MIBs)
      : Versions(std::move(Versions)), MIBs(std::move(MIBs)) {}
};

inline raw_ostream &operator<<(raw_ostream &OS, const AllocInfo &AE) {
  bool First = true;
  OS << "Versions: ";
  for (auto V : AE.Versions) {
    if (!First)
      OS << ", ";
    First = false;
    OS << (unsigned)V;
  }
  OS << " MIB:\n";
  for (auto &M : AE.MIBs) {
    OS << "\t\t" << M << "\n";
  }
  return OS;
}

/// Function and variable summary information to aid decisions and
/// implementation of importing.
class GlobalValueSummary {
public:
  /// Sububclass discriminator (for dyn_cast<> et al.)
  enum SummaryKind : unsigned { AliasKind, FunctionKind, GlobalVarKind };

  /// Group flags (Linkage, NotEligibleToImport, etc.) as a bitfield.
  struct GVFlags {
    /// The linkage type of the associated global value.
    ///
    /// One use is to flag values that have local linkage types and need to
    /// have module identifier appended before placing into the combined
    /// index, to disambiguate from other values with the same name.
    /// In the future this will be used to update and optimize linkage
    /// types based on global summary-based analysis.
    unsigned Linkage : 4;

    /// Indicates the visibility.
    unsigned Visibility : 2;

    /// Indicate if the global value cannot be imported (e.g. it cannot
    /// be renamed or references something that can't be renamed).
    unsigned NotEligibleToImport : 1;

    /// In per-module summary, indicate that the global value must be considered
    /// a live root for index-based liveness analysis. Used for special LLVM
    /// values such as llvm.global_ctors that the linker does not know about.
    ///
    /// In combined summary, indicate that the global value is live.
    unsigned Live : 1;

    /// Indicates that the linker resolved the symbol to a definition from
    /// within the same linkage unit.
    unsigned DSOLocal : 1;

    /// In the per-module summary, indicates that the global value is
    /// linkonce_odr and global unnamed addr (so eligible for auto-hiding
    /// via hidden visibility). In the combined summary, indicates that the
    /// prevailing linkonce_odr copy can be auto-hidden via hidden visibility
    /// when it is upgraded to weak_odr in the backend. This is legal when
    /// all copies are eligible for auto-hiding (i.e. all copies were
    /// linkonce_odr global unnamed addr. If any copy is not (e.g. it was
    /// originally weak_odr, we cannot auto-hide the prevailing copy as it
    /// means the symbol was externally visible.
    unsigned CanAutoHide : 1;

    /// Convenience Constructors
    explicit GVFlags(GlobalValue::LinkageTypes Linkage,
                     GlobalValue::VisibilityTypes Visibility,
                     bool NotEligibleToImport, bool Live, bool IsLocal,
                     bool CanAutoHide)
        : Linkage(Linkage), Visibility(Visibility),
          NotEligibleToImport(NotEligibleToImport), Live(Live),
          DSOLocal(IsLocal), CanAutoHide(CanAutoHide) {}
  };

private:
  /// Kind of summary for use in dyn_cast<> et al.
  SummaryKind Kind;

  GVFlags Flags;

  /// This is the hash of the name of the symbol in the original file. It is
  /// identical to the GUID for global symbols, but differs for local since the
  /// GUID includes the module level id in the hash.
  GlobalValue::GUID OriginalName = 0;

  /// Path of module IR containing value's definition, used to locate
  /// module during importing.
  ///
  /// This is only used during parsing of the combined index, or when
  /// parsing the per-module index for creation of the combined summary index,
  /// not during writing of the per-module index which doesn't contain a
  /// module path string table.
  StringRef ModulePath;

  /// List of values referenced by this global value's definition
  /// (either by the initializer of a global variable, or referenced
  /// from within a function). This does not include functions called, which
  /// are listed in the derived FunctionSummary object.
  std::vector<ValueInfo> RefEdgeList;

protected:
  GlobalValueSummary(SummaryKind K, GVFlags Flags, std::vector<ValueInfo> Refs)
      : Kind(K), Flags(Flags), RefEdgeList(std::move(Refs)) {
    assert((K != AliasKind || Refs.empty()) &&
           "Expect no references for AliasSummary");
  }

public:
  virtual ~GlobalValueSummary() = default;

  /// Returns the hash of the original name, it is identical to the GUID for
  /// externally visible symbols, but not for local ones.
  GlobalValue::GUID getOriginalName() const { return OriginalName; }

  /// Initialize the original name hash in this summary.
  void setOriginalName(GlobalValue::GUID Name) { OriginalName = Name; }

  /// Which kind of summary subclass this is.
  SummaryKind getSummaryKind() const { return Kind; }

  /// Set the path to the module containing this function, for use in
  /// the combined index.
  void setModulePath(StringRef ModPath) { ModulePath = ModPath; }

  /// Get the path to the module containing this function.
  StringRef modulePath() const { return ModulePath; }

  /// Get the flags for this GlobalValue (see \p struct GVFlags).
  GVFlags flags() const { return Flags; }

  /// Return linkage type recorded for this global value.
  GlobalValue::LinkageTypes linkage() const {
    return static_cast<GlobalValue::LinkageTypes>(Flags.Linkage);
  }

  /// Sets the linkage to the value determined by global summary-based
  /// optimization. Will be applied in the ThinLTO backends.
  void setLinkage(GlobalValue::LinkageTypes Linkage) {
    Flags.Linkage = Linkage;
  }

  /// Return true if this global value can't be imported.
  bool notEligibleToImport() const { return Flags.NotEligibleToImport; }

  bool isLive() const { return Flags.Live; }

  void setLive(bool Live) { Flags.Live = Live; }

  void setDSOLocal(bool Local) { Flags.DSOLocal = Local; }

  bool isDSOLocal() const { return Flags.DSOLocal; }

  void setCanAutoHide(bool CanAutoHide) { Flags.CanAutoHide = CanAutoHide; }

  bool canAutoHide() const { return Flags.CanAutoHide; }

  GlobalValue::VisibilityTypes getVisibility() const {
    return (GlobalValue::VisibilityTypes)Flags.Visibility;
  }
  void setVisibility(GlobalValue::VisibilityTypes Vis) {
    Flags.Visibility = (unsigned)Vis;
  }

  /// Flag that this global value cannot be imported.
  void setNotEligibleToImport() { Flags.NotEligibleToImport = true; }

  /// Return the list of values referenced by this global value definition.
  ArrayRef<ValueInfo> refs() const { return RefEdgeList; }

  /// If this is an alias summary, returns the summary of the aliased object (a
  /// global variable or function), otherwise returns itself.
  GlobalValueSummary *getBaseObject();
  const GlobalValueSummary *getBaseObject() const;

  friend class ModuleSummaryIndex;
};

GlobalValueSummaryInfo::GlobalValueSummaryInfo(bool HaveGVs) : U(HaveGVs) {}

/// Alias summary information.
class AliasSummary : public GlobalValueSummary {
  ValueInfo AliaseeValueInfo;

  /// This is the Aliasee in the same module as alias (could get from VI, trades
  /// memory for time). Note that this pointer may be null (and the value info
  /// empty) when we have a distributed index where the alias is being imported
  /// (as a copy of the aliasee), but the aliasee is not.
  GlobalValueSummary *AliaseeSummary;

public:
  AliasSummary(GVFlags Flags)
      : GlobalValueSummary(AliasKind, Flags, ArrayRef<ValueInfo>{}),
        AliaseeSummary(nullptr) {}

  /// Check if this is an alias summary.
  static bool classof(const GlobalValueSummary *GVS) {
    return GVS->getSummaryKind() == AliasKind;
  }

  void setAliasee(ValueInfo &AliaseeVI, GlobalValueSummary *Aliasee) {
    AliaseeValueInfo = AliaseeVI;
    AliaseeSummary = Aliasee;
  }

  bool hasAliasee() const {
    assert(!!AliaseeSummary == (AliaseeValueInfo &&
                                !AliaseeValueInfo.getSummaryList().empty()) &&
           "Expect to have both aliasee summary and summary list or neither");
    return !!AliaseeSummary;
  }

  const GlobalValueSummary &getAliasee() const {
    assert(AliaseeSummary && "Unexpected missing aliasee summary");
    return *AliaseeSummary;
  }

  GlobalValueSummary &getAliasee() {
    return const_cast<GlobalValueSummary &>(
                         static_cast<const AliasSummary *>(this)->getAliasee());
  }
  ValueInfo getAliaseeVI() const {
    assert(AliaseeValueInfo && "Unexpected missing aliasee");
    return AliaseeValueInfo;
  }
  GlobalValue::GUID getAliaseeGUID() const {
    assert(AliaseeValueInfo && "Unexpected missing aliasee");
    return AliaseeValueInfo.getGUID();
  }
};

const inline GlobalValueSummary *GlobalValueSummary::getBaseObject() const {
  if (auto *AS = dyn_cast<AliasSummary>(this))
    return &AS->getAliasee();
  return this;
}

inline GlobalValueSummary *GlobalValueSummary::getBaseObject() {
  if (auto *AS = dyn_cast<AliasSummary>(this))
    return &AS->getAliasee();
  return this;
}

/// Function summary information to aid decisions and implementation of
/// importing.
class FunctionSummary : public GlobalValueSummary {
public:
  /// <CalleeValueInfo, CalleeInfo> call edge pair.
  using EdgeTy = std::pair<ValueInfo, CalleeInfo>;

  /// Types for -force-summary-edges-cold debugging option.
  enum ForceSummaryHotnessType : unsigned {
    FSHT_None,
    FSHT_AllNonCritical,
    FSHT_All
  };

  /// An "identifier" for a virtual function. This contains the type identifier
  /// represented as a GUID and the offset from the address point to the virtual
  /// function pointer, where "address point" is as defined in the Itanium ABI:
  /// https://itanium-cxx-abi.github.io/cxx-abi/abi.html#vtable-general
  struct VFuncId {
    GlobalValue::GUID GUID;
    uint64_t Offset;
  };

  /// A specification for a virtual function call with all constant integer
  /// arguments. This is used to perform virtual constant propagation on the
  /// summary.
  struct ConstVCall {
    VFuncId VFunc;
    std::vector<uint64_t> Args;
  };

  /// All type identifier related information. Because these fields are
  /// relatively uncommon we only allocate space for them if necessary.
  struct TypeIdInfo {
    /// List of type identifiers used by this function in llvm.type.test
    /// intrinsics referenced by something other than an llvm.assume intrinsic,
    /// represented as GUIDs.
    std::vector<GlobalValue::GUID> TypeTests;

    /// List of virtual calls made by this function using (respectively)
    /// llvm.assume(llvm.type.test) or llvm.type.checked.load intrinsics that do
    /// not have all constant integer arguments.
    std::vector<VFuncId> TypeTestAssumeVCalls, TypeCheckedLoadVCalls;

    /// List of virtual calls made by this function using (respectively)
    /// llvm.assume(llvm.type.test) or llvm.type.checked.load intrinsics with
    /// all constant integer arguments.
    std::vector<ConstVCall> TypeTestAssumeConstVCalls,
        TypeCheckedLoadConstVCalls;
  };

  /// Flags specific to function summaries.
  struct FFlags {
    // Function attribute flags. Used to track if a function accesses memory,
    // recurses or aliases.
    unsigned ReadNone : 1;
    unsigned ReadOnly : 1;
    unsigned NoRecurse : 1;
    unsigned ReturnDoesNotAlias : 1;

    // Indicate if the global value cannot be inlined.
    unsigned NoInline : 1;
    // Indicate if function should be always inlined.
    unsigned AlwaysInline : 1;
    // Indicate if function never raises an exception. Can be modified during
    // thinlink function attribute propagation
    unsigned NoUnwind : 1;
    // Indicate if function contains instructions that mayThrow
    unsigned MayThrow : 1;

    // If there are calls to unknown targets (e.g. indirect)
    unsigned HasUnknownCall : 1;

    // Indicate if a function must be an unreachable function.
    //
    // This bit is sufficient but not necessary;
    // if this bit is on, the function must be regarded as unreachable;
    // if this bit is off, the function might be reachable or unreachable.
    unsigned MustBeUnreachable : 1;

    FFlags &operator&=(const FFlags &RHS) {
      this->ReadNone &= RHS.ReadNone;
      this->ReadOnly &= RHS.ReadOnly;
      this->NoRecurse &= RHS.NoRecurse;
      this->ReturnDoesNotAlias &= RHS.ReturnDoesNotAlias;
      this->NoInline &= RHS.NoInline;
      this->AlwaysInline &= RHS.AlwaysInline;
      this->NoUnwind &= RHS.NoUnwind;
      this->MayThrow &= RHS.MayThrow;
      this->HasUnknownCall &= RHS.HasUnknownCall;
      this->MustBeUnreachable &= RHS.MustBeUnreachable;
      return *this;
    }

    bool anyFlagSet() {
      return this->ReadNone | this->ReadOnly | this->NoRecurse |
             this->ReturnDoesNotAlias | this->NoInline | this->AlwaysInline |
             this->NoUnwind | this->MayThrow | this->HasUnknownCall |
             this->MustBeUnreachable;
    }

    operator std::string() {
      std::string Output;
      raw_string_ostream OS(Output);
      OS << "funcFlags: (";
      OS << "readNone: " << this->ReadNone;
      OS << ", readOnly: " << this->ReadOnly;
      OS << ", noRecurse: " << this->NoRecurse;
      OS << ", returnDoesNotAlias: " << this->ReturnDoesNotAlias;
      OS << ", noInline: " << this->NoInline;
      OS << ", alwaysInline: " << this->AlwaysInline;
      OS << ", noUnwind: " << this->NoUnwind;
      OS << ", mayThrow: " << this->MayThrow;
      OS << ", hasUnknownCall: " << this->HasUnknownCall;
      OS << ", mustBeUnreachable: " << this->MustBeUnreachable;
      OS << ")";
      return OS.str();
    }
  };

  /// Describes the uses of a parameter by the function.
  struct ParamAccess {
    static constexpr uint32_t RangeWidth = 64;

    /// Describes the use of a value in a call instruction, specifying the
    /// call's target, the value's parameter number, and the possible range of
    /// offsets from the beginning of the value that are passed.
    struct Call {
      uint64_t ParamNo = 0;
      ValueInfo Callee;
      ConstantRange Offsets{/*BitWidth=*/RangeWidth, /*isFullSet=*/true};

      Call() = default;
      Call(uint64_t ParamNo, ValueInfo Callee, const ConstantRange &Offsets)
          : ParamNo(ParamNo), Callee(Callee), Offsets(Offsets) {}
    };

    uint64_t ParamNo = 0;
    /// The range contains byte offsets from the parameter pointer which
    /// accessed by the function. In the per-module summary, it only includes
    /// accesses made by the function instructions. In the combined summary, it
    /// also includes accesses by nested function calls.
    ConstantRange Use{/*BitWidth=*/RangeWidth, /*isFullSet=*/true};
    /// In the per-module summary, it summarizes the byte offset applied to each
    /// pointer parameter before passing to each corresponding callee.
    /// In the combined summary, it's empty and information is propagated by
    /// inter-procedural analysis and applied to the Use field.
    std::vector<Call> Calls;

    ParamAccess() = default;
    ParamAccess(uint64_t ParamNo, const ConstantRange &Use)
        : ParamNo(ParamNo), Use(Use) {}
  };

  /// Create an empty FunctionSummary (with specified call edges).
  /// Used to represent external nodes and the dummy root node.
  static FunctionSummary
  makeDummyFunctionSummary(std::vector<FunctionSummary::EdgeTy> Edges) {
    return FunctionSummary(
        FunctionSummary::GVFlags(
            GlobalValue::LinkageTypes::AvailableExternallyLinkage,
            GlobalValue::DefaultVisibility,
            /*NotEligibleToImport=*/true, /*Live=*/true, /*IsLocal=*/false,
            /*CanAutoHide=*/false),
        /*NumInsts=*/0, FunctionSummary::FFlags{}, /*EntryCount=*/0,
        std::vector<ValueInfo>(), std::move(Edges),
        std::vector<GlobalValue::GUID>(),
        std::vector<FunctionSummary::VFuncId>(),
        std::vector<FunctionSummary::VFuncId>(),
        std::vector<FunctionSummary::ConstVCall>(),
        std::vector<FunctionSummary::ConstVCall>(),
        std::vector<FunctionSummary::ParamAccess>(),
        std::vector<CallsiteInfo>(), std::vector<AllocInfo>());
  }

  /// A dummy node to reference external functions that aren't in the index
  static FunctionSummary ExternalNode;

private:
  /// Number of instructions (ignoring debug instructions, e.g.) computed
  /// during the initial compile step when the summary index is first built.
  unsigned InstCount;

  /// Function summary specific flags.
  FFlags FunFlags;

  /// The synthesized entry count of the function.
  /// This is only populated during ThinLink phase and remains unused while
  /// generating per-module summaries.
  uint64_t EntryCount = 0;

  /// List of <CalleeValueInfo, CalleeInfo> call edge pairs from this function.
  std::vector<EdgeTy> CallGraphEdgeList;

  std::unique_ptr<TypeIdInfo> TIdInfo;

  /// Uses for every parameter to this function.
  using ParamAccessesTy = std::vector<ParamAccess>;
  std::unique_ptr<ParamAccessesTy> ParamAccesses;

  /// Optional list of memprof callsite metadata summaries. The correspondence
  /// between the callsite summary and the callsites in the function is implied
  /// by the order in the vector (and can be validated by comparing the stack
  /// ids in the CallsiteInfo to those in the instruction callsite metadata).
  /// As a memory savings optimization, we only create these for the prevailing
  /// copy of a symbol when creating the combined index during LTO.
  using CallsitesTy = std::vector<CallsiteInfo>;
  std::unique_ptr<CallsitesTy> Callsites;

  /// Optional list of allocation memprof metadata summaries. The correspondence
  /// between the alloc memprof summary and the allocation callsites in the
  /// function is implied by the order in the vector (and can be validated by
  /// comparing the stack ids in the AllocInfo to those in the instruction
  /// memprof metadata).
  /// As a memory savings optimization, we only create these for the prevailing
  /// copy of a symbol when creating the combined index during LTO.
  using AllocsTy = std::vector<AllocInfo>;
  std::unique_ptr<AllocsTy> Allocs;

public:
  FunctionSummary(GVFlags Flags, unsigned NumInsts, FFlags FunFlags,
                  uint64_t EntryCount, std::vector<ValueInfo> Refs,
                  std::vector<EdgeTy> CGEdges,
                  std::vector<GlobalValue::GUID> TypeTests,
                  std::vector<VFuncId> TypeTestAssumeVCalls,
                  std::vector<VFuncId> TypeCheckedLoadVCalls,
                  std::vector<ConstVCall> TypeTestAssumeConstVCalls,
                  std::vector<ConstVCall> TypeCheckedLoadConstVCalls,
                  std::vector<ParamAccess> Params, CallsitesTy CallsiteList,
                  AllocsTy AllocList)
      : GlobalValueSummary(FunctionKind, Flags, std::move(Refs)),
        InstCount(NumInsts), FunFlags(FunFlags), EntryCount(EntryCount),
        CallGraphEdgeList(std::move(CGEdges)) {
    if (!TypeTests.empty() || !TypeTestAssumeVCalls.empty() ||
        !TypeCheckedLoadVCalls.empty() || !TypeTestAssumeConstVCalls.empty() ||
        !TypeCheckedLoadConstVCalls.empty())
      TIdInfo = std::make_unique<TypeIdInfo>(
          TypeIdInfo{std::move(TypeTests), std::move(TypeTestAssumeVCalls),
                     std::move(TypeCheckedLoadVCalls),
                     std::move(TypeTestAssumeConstVCalls),
                     std::move(TypeCheckedLoadConstVCalls)});
    if (!Params.empty())
      ParamAccesses = std::make_unique<ParamAccessesTy>(std::move(Params));
    if (!CallsiteList.empty())
      Callsites = std::make_unique<CallsitesTy>(std::move(CallsiteList));
    if (!AllocList.empty())
      Allocs = std::make_unique<AllocsTy>(std::move(AllocList));
  }
  // Gets the number of readonly and writeonly refs in RefEdgeList
  std::pair<unsigned, unsigned> specialRefCounts() const;

  /// Check if this is a function summary.
  static bool classof(const GlobalValueSummary *GVS) {
    return GVS->getSummaryKind() == FunctionKind;
  }

  /// Get function summary flags.
  FFlags fflags() const { return FunFlags; }

  void setNoRecurse() { FunFlags.NoRecurse = true; }

  void setNoUnwind() { FunFlags.NoUnwind = true; }

  /// Get the instruction count recorded for this function.
  unsigned instCount() const { return InstCount; }

  /// Get the synthetic entry count for this function.
  uint64_t entryCount() const { return EntryCount; }

  /// Set the synthetic entry count for this function.
  void setEntryCount(uint64_t EC) { EntryCount = EC; }

  /// Return the list of <CalleeValueInfo, CalleeInfo> pairs.
  ArrayRef<EdgeTy> calls() const { return CallGraphEdgeList; }

  std::vector<EdgeTy> &mutableCalls() { return CallGraphEdgeList; }

  void addCall(EdgeTy E) { CallGraphEdgeList.push_back(E); }

  /// Returns the list of type identifiers used by this function in
  /// llvm.type.test intrinsics other than by an llvm.assume intrinsic,
  /// represented as GUIDs.
  ArrayRef<GlobalValue::GUID> type_tests() const {
    if (TIdInfo)
      return TIdInfo->TypeTests;
    return {};
  }

  /// Returns the list of virtual calls made by this function using
  /// llvm.assume(llvm.type.test) intrinsics that do not have all constant
  /// integer arguments.
  ArrayRef<VFuncId> type_test_assume_vcalls() const {
    if (TIdInfo)
      return TIdInfo->TypeTestAssumeVCalls;
    return {};
  }

  /// Returns the list of virtual calls made by this function using
  /// llvm.type.checked.load intrinsics that do not have all constant integer
  /// arguments.
  ArrayRef<VFuncId> type_checked_load_vcalls() const {
    if (TIdInfo)
      return TIdInfo->TypeCheckedLoadVCalls;
    return {};
  }

  /// Returns the list of virtual calls made by this function using
  /// llvm.assume(llvm.type.test) intrinsics with all constant integer
  /// arguments.
  ArrayRef<ConstVCall> type_test_assume_const_vcalls() const {
    if (TIdInfo)
      return TIdInfo->TypeTestAssumeConstVCalls;
    return {};
  }

  /// Returns the list of virtual calls made by this function using
  /// llvm.type.checked.load intrinsics with all constant integer arguments.
  ArrayRef<ConstVCall> type_checked_load_const_vcalls() const {
    if (TIdInfo)
      return TIdInfo->TypeCheckedLoadConstVCalls;
    return {};
  }

  /// Returns the list of known uses of pointer parameters.
  ArrayRef<ParamAccess> paramAccesses() const {
    if (ParamAccesses)
      return *ParamAccesses;
    return {};
  }

  /// Sets the list of known uses of pointer parameters.
  void setParamAccesses(std::vector<ParamAccess> NewParams) {
    if (NewParams.empty())
      ParamAccesses.reset();
    else if (ParamAccesses)
      *ParamAccesses = std::move(NewParams);
    else
      ParamAccesses = std::make_unique<ParamAccessesTy>(std::move(NewParams));
  }

  /// Add a type test to the summary. This is used by WholeProgramDevirt if we
  /// were unable to devirtualize a checked call.
  void addTypeTest(GlobalValue::GUID Guid) {
    if (!TIdInfo)
      TIdInfo = std::make_unique<TypeIdInfo>();
    TIdInfo->TypeTests.push_back(Guid);
  }

  const TypeIdInfo *getTypeIdInfo() const { return TIdInfo.get(); };

  ArrayRef<CallsiteInfo> callsites() const {
    if (Callsites)
      return *Callsites;
    return {};
  }

  CallsitesTy &mutableCallsites() {
    assert(Callsites);
    return *Callsites;
  }

  ArrayRef<AllocInfo> allocs() const {
    if (Allocs)
      return *Allocs;
    return {};
  }

  AllocsTy &mutableAllocs() {
    assert(Allocs);
    return *Allocs;
  }

  friend struct GraphTraits<ValueInfo>;
};

template <> struct DenseMapInfo<FunctionSummary::VFuncId> {
  static FunctionSummary::VFuncId getEmptyKey() { return {0, uint64_t(-1)}; }

  static FunctionSummary::VFuncId getTombstoneKey() {
    return {0, uint64_t(-2)};
  }

  static bool isEqual(FunctionSummary::VFuncId L, FunctionSummary::VFuncId R) {
    return L.GUID == R.GUID && L.Offset == R.Offset;
  }

  static unsigned getHashValue(FunctionSummary::VFuncId I) { return I.GUID; }
};

template <> struct DenseMapInfo<FunctionSummary::ConstVCall> {
  static FunctionSummary::ConstVCall getEmptyKey() {
    return {{0, uint64_t(-1)}, {}};
  }

  static FunctionSummary::ConstVCall getTombstoneKey() {
    return {{0, uint64_t(-2)}, {}};
  }

  static bool isEqual(FunctionSummary::ConstVCall L,
                      FunctionSummary::ConstVCall R) {
    return DenseMapInfo<FunctionSummary::VFuncId>::isEqual(L.VFunc, R.VFunc) &&
           L.Args == R.Args;
  }

  static unsigned getHashValue(FunctionSummary::ConstVCall I) {
    return I.VFunc.GUID;
  }
};

/// The ValueInfo and offset for a function within a vtable definition
/// initializer array.
struct VirtFuncOffset {
  VirtFuncOffset(ValueInfo VI, uint64_t Offset)
      : FuncVI(VI), VTableOffset(Offset) {}

  ValueInfo FuncVI;
  uint64_t VTableOffset;
};
/// List of functions referenced by a particular vtable definition.
using VTableFuncList = std::vector<VirtFuncOffset>;

/// Global variable summary information to aid decisions and
/// implementation of importing.
///
/// Global variable summary has two extra flag, telling if it is
/// readonly or writeonly. Both readonly and writeonly variables
/// can be optimized in the backed: readonly variables can be
/// const-folded, while writeonly vars can be completely eliminated
/// together with corresponding stores. We let both things happen
/// by means of internalizing such variables after ThinLTO import.
class GlobalVarSummary : public GlobalValueSummary {
private:
  /// For vtable definitions this holds the list of functions and
  /// their corresponding offsets within the initializer array.
  std::unique_ptr<VTableFuncList> VTableFuncs;

public:
  struct GVarFlags {
    GVarFlags(bool ReadOnly, bool WriteOnly, bool Constant,
              GlobalObject::VCallVisibility Vis)
        : MaybeReadOnly(ReadOnly), MaybeWriteOnly(WriteOnly),
          Constant(Constant), VCallVisibility(Vis) {}

    // If true indicates that this global variable might be accessed
    // purely by non-volatile load instructions. This in turn means
    // it can be internalized in source and destination modules during
    // thin LTO import because it neither modified nor its address
    // is taken.
    unsigned MaybeReadOnly : 1;
    // If true indicates that variable is possibly only written to, so
    // its value isn't loaded and its address isn't taken anywhere.
    // False, when 'Constant' attribute is set.
    unsigned MaybeWriteOnly : 1;
    // Indicates that value is a compile-time constant. Global variable
    // can be 'Constant' while not being 'ReadOnly' on several occasions:
    // - it is volatile, (e.g mapped device address)
    // - its address is taken, meaning that unlike 'ReadOnly' vars we can't
    //   internalize it.
    // Constant variables are always imported thus giving compiler an
    // opportunity to make some extra optimizations. Readonly constants
    // are also internalized.
    unsigned Constant : 1;
    // Set from metadata on vtable definitions during the module summary
    // analysis.
    unsigned VCallVisibility : 2;
  } VarFlags;

  GlobalVarSummary(GVFlags Flags, GVarFlags VarFlags,
                   std::vector<ValueInfo> Refs)
      : GlobalValueSummary(GlobalVarKind, Flags, std::move(Refs)),
        VarFlags(VarFlags) {}

  /// Check if this is a global variable summary.
  static bool classof(const GlobalValueSummary *GVS) {
    return GVS->getSummaryKind() == GlobalVarKind;
  }

  GVarFlags varflags() const { return VarFlags; }
  void setReadOnly(bool RO) { VarFlags.MaybeReadOnly = RO; }
  void setWriteOnly(bool WO) { VarFlags.MaybeWriteOnly = WO; }
  bool maybeReadOnly() const { return VarFlags.MaybeReadOnly; }
  bool maybeWriteOnly() const { return VarFlags.MaybeWriteOnly; }
  bool isConstant() const { return VarFlags.Constant; }
  void setVCallVisibility(GlobalObject::VCallVisibility Vis) {
    VarFlags.VCallVisibility = Vis;
  }
  GlobalObject::VCallVisibility getVCallVisibility() const {
    return (GlobalObject::VCallVisibility)VarFlags.VCallVisibility;
  }

  void setVTableFuncs(VTableFuncList Funcs) {
    assert(!VTableFuncs);
    VTableFuncs = std::make_unique<VTableFuncList>(std::move(Funcs));
  }

  ArrayRef<VirtFuncOffset> vTableFuncs() const {
    if (VTableFuncs)
      return *VTableFuncs;
    return {};
  }
};

struct TypeTestResolution {
  /// Specifies which kind of type check we should emit for this byte array.
  /// See http://clang.llvm.org/docs/ControlFlowIntegrityDesign.html for full
  /// details on each kind of check; the enumerators are described with
  /// reference to that document.
  enum Kind {
    Unsat,     ///< Unsatisfiable type (i.e. no global has this type metadata)
    ByteArray, ///< Test a byte array (first example)
    Inline,    ///< Inlined bit vector ("Short Inline Bit Vectors")
    Single,    ///< Single element (last example in "Short Inline Bit Vectors")
    AllOnes,   ///< All-ones bit vector ("Eliminating Bit Vector Checks for
               ///  All-Ones Bit Vectors")
    Unknown,   ///< Unknown (analysis not performed, don't lower)
  } TheKind = Unknown;

  /// Range of size-1 expressed as a bit width. For example, if the size is in
  /// range [1,256], this number will be 8. This helps generate the most compact
  /// instruction sequences.
  unsigned SizeM1BitWidth = 0;

  // The following fields are only used if the target does not support the use
  // of absolute symbols to store constants. Their meanings are the same as the
  // corresponding fields in LowerTypeTestsModule::TypeIdLowering in
  // LowerTypeTests.cpp.

  uint64_t AlignLog2 = 0;
  uint64_t SizeM1 = 0;
  uint8_t BitMask = 0;
  uint64_t InlineBits = 0;
};

struct WholeProgramDevirtResolution {
  enum Kind {
    Indir,        ///< Just do a regular virtual call
    SingleImpl,   ///< Single implementation devirtualization
    BranchFunnel, ///< When retpoline mitigation is enabled, use a branch funnel
                  ///< that is defined in the merged module. Otherwise same as
                  ///< Indir.
  } TheKind = Indir;

  std::string SingleImplName;

  struct ByArg {
    enum Kind {
      Indir,            ///< Just do a regular virtual call
      UniformRetVal,    ///< Uniform return value optimization
      UniqueRetVal,     ///< Unique return value optimization
      VirtualConstProp, ///< Virtual constant propagation
    } TheKind = Indir;

    /// Additional information for the resolution:
    /// - UniformRetVal: the uniform return value.
    /// - UniqueRetVal: the return value associated with the unique vtable (0 or
    ///   1).
    uint64_t Info = 0;

    // The following fields are only used if the target does not support the use
    // of absolute symbols to store constants.

    uint32_t Byte = 0;
    uint32_t Bit = 0;
  };

  /// Resolutions for calls with all constant integer arguments (excluding the
  /// first argument, "this"), where the key is the argument vector.
  std::map<std::vector<uint64_t>, ByArg> ResByArg;
};

struct TypeIdSummary {
  TypeTestResolution TTRes;

  /// Mapping from byte offset to whole-program devirt resolution for that
  /// (typeid, byte offset) pair.
  std::map<uint64_t, WholeProgramDevirtResolution> WPDRes;
};

/// 160 bits SHA1
using ModuleHash = std::array<uint32_t, 5>;

/// Type used for iterating through the global value summary map.
using const_gvsummary_iterator = GlobalValueSummaryMapTy::const_iterator;
using gvsummary_iterator = GlobalValueSummaryMapTy::iterator;

/// String table to hold/own module path strings, which additionally holds the
/// module ID assigned to each module during the plugin step, as well as a hash
/// of the module. The StringMap makes a copy of and owns inserted strings.
using ModulePathStringTableTy = StringMap<std::pair<uint64_t, ModuleHash>>;

/// Map of global value GUID to its summary, used to identify values defined in
/// a particular module, and provide efficient access to their summary.
using GVSummaryMapTy = DenseMap<GlobalValue::GUID, GlobalValueSummary *>;

/// Map of a type GUID to type id string and summary (multimap used
/// in case of GUID conflicts).
using TypeIdSummaryMapTy =
    std::multimap<GlobalValue::GUID, std::pair<std::string, TypeIdSummary>>;

/// The following data structures summarize type metadata information.
/// For type metadata overview see https://llvm.org/docs/TypeMetadata.html.
/// Each type metadata includes both the type identifier and the offset of
/// the address point of the type (the address held by objects of that type
/// which may not be the beginning of the virtual table). Vtable definitions
/// are decorated with type metadata for the types they are compatible with.
///
/// Holds information about vtable definitions decorated with type metadata:
/// the vtable definition value and its address point offset in a type
/// identifier metadata it is decorated (compatible) with.
struct TypeIdOffsetVtableInfo {
  TypeIdOffsetVtableInfo(uint64_t Offset, ValueInfo VI)
      : AddressPointOffset(Offset), VTableVI(VI) {}

  uint64_t AddressPointOffset;
  ValueInfo VTableVI;
};
/// List of vtable definitions decorated by a particular type identifier,
/// and their corresponding offsets in that type identifier's metadata.
/// Note that each type identifier may be compatible with multiple vtables, due
/// to inheritance, which is why this is a vector.
using TypeIdCompatibleVtableInfo = std::vector<TypeIdOffsetVtableInfo>;

/// Class to hold module path string table and global value map,
/// and encapsulate methods for operating on them.
class ModuleSummaryIndex {
private:
  /// Map from value name to list of summary instances for values of that
  /// name (may be duplicates in the COMDAT case, e.g.).
  GlobalValueSummaryMapTy GlobalValueMap;

  /// Holds strings for combined index, mapping to the corresponding module ID.
  ModulePathStringTableTy ModulePathStringTable;

  /// Mapping from type identifier GUIDs to type identifier and its summary
  /// information. Produced by thin link.
  TypeIdSummaryMapTy TypeIdMap;

  /// Mapping from type identifier to information about vtables decorated
  /// with that type identifier's metadata. Produced by per module summary
  /// analysis and consumed by thin link. For more information, see description
  /// above where TypeIdCompatibleVtableInfo is defined.
  std::map<std::string, TypeIdCompatibleVtableInfo, std::less<>>
      TypeIdCompatibleVtableMap;

  /// Mapping from original ID to GUID. If original ID can map to multiple
  /// GUIDs, it will be mapped to 0.
  std::map<GlobalValue::GUID, GlobalValue::GUID> OidGuidMap;

  /// Indicates that summary-based GlobalValue GC has run, and values with
  /// GVFlags::Live==false are really dead. Otherwise, all values must be
  /// considered live.
  bool WithGlobalValueDeadStripping = false;

  /// Indicates that summary-based attribute propagation has run and
  /// GVarFlags::MaybeReadonly / GVarFlags::MaybeWriteonly are really
  /// read/write only.
  bool WithAttributePropagation = false;

  /// Indicates that summary-based DSOLocal propagation has run and the flag in
  /// every summary of a GV is synchronized.
  bool WithDSOLocalPropagation = false;

  /// Indicates that we have whole program visibility.
  bool WithWholeProgramVisibility = false;

  /// Indicates that summary-based synthetic entry count propagation has run
  bool HasSyntheticEntryCounts = false;

  /// Indicates that we linked with allocator supporting hot/cold new operators.
  bool WithSupportsHotColdNew = false;

  /// Indicates that distributed backend should skip compilation of the
  /// module. Flag is suppose to be set by distributed ThinLTO indexing
  /// when it detected that the module is not needed during the final
  /// linking. As result distributed backend should just output a minimal
  /// valid object file.
  bool SkipModuleByDistributedBackend = false;

  /// If true then we're performing analysis of IR module, or parsing along with
  /// the IR from assembly. The value of 'false' means we're reading summary
  /// from BC or YAML source. Affects the type of value stored in NameOrGV
  /// union.
  bool HaveGVs;

  // True if the index was created for a module compiled with -fsplit-lto-unit.
  bool EnableSplitLTOUnit;

  // True if the index was created for a module compiled with -funified-lto
  bool UnifiedLTO;

  // True if some of the modules were compiled with -fsplit-lto-unit and
  // some were not. Set when the combined index is created during the thin link.
  bool PartiallySplitLTOUnits = false;

  /// True if some of the FunctionSummary contains a ParamAccess.
  bool HasParamAccess = false;

  std::set<std::string> CfiFunctionDefs;
  std::set<std::string> CfiFunctionDecls;

  // Used in cases where we want to record the name of a global, but
  // don't have the string owned elsewhere (e.g. the Strtab on a module).
  BumpPtrAllocator Alloc;
  StringSaver Saver;

  // The total number of basic blocks in the module in the per-module summary or
  // the total number of basic blocks in the LTO unit in the combined index.
  // FIXME: Putting this in the distributed ThinLTO index files breaks LTO
  // backend caching on any BB change to any linked file. It is currently not
  // used except in the case of a SamplePGO partial profile, and should be
  // reevaluated/redesigned to allow more effective incremental builds in that
  // case.
  uint64_t BlockCount;

  // List of unique stack ids (hashes). We use a 4B index of the id in the
  // stack id lists on the alloc and callsite summaries for memory savings,
  // since the number of unique ids is in practice much smaller than the
  // number of stack id references in the summaries.
  std::vector<uint64_t> StackIds;

  // Temporary map while building StackIds list. Clear when index is completely
  // built via releaseTemporaryMemory.
  std::map<uint64_t, unsigned> StackIdToIndex;

  // YAML I/O support.
  friend yaml::MappingTraits<ModuleSummaryIndex>;

  GlobalValueSummaryMapTy::value_type *
  getOrInsertValuePtr(GlobalValue::GUID GUID) {
    return &*GlobalValueMap.emplace(GUID, GlobalValueSummaryInfo(HaveGVs))
                 .first;
  }

public:
  // See HaveGVs variable comment.
  ModuleSummaryIndex(bool HaveGVs, bool EnableSplitLTOUnit = false,
                     bool UnifiedLTO = false)
      : HaveGVs(HaveGVs), EnableSplitLTOUnit(EnableSplitLTOUnit),
        UnifiedLTO(UnifiedLTO), Saver(Alloc), BlockCount(0) {}

  // Current version for the module summary in bitcode files.
  // The BitcodeSummaryVersion should be bumped whenever we introduce changes
  // in the way some record are interpreted, like flags for instance.
  // Note that incrementing this may require changes in both BitcodeReader.cpp
  // and BitcodeWriter.cpp.
  static constexpr uint64_t BitcodeSummaryVersion = 9;

  // Regular LTO module name for ASM writer
  static constexpr const char *getRegularLTOModuleName() {
    return "[Regular LTO]";
  }

  bool haveGVs() const { return HaveGVs; }

  uint64_t getFlags() const;
  void setFlags(uint64_t Flags);

  uint64_t getBlockCount() const { return BlockCount; }
  void addBlockCount(uint64_t C) { BlockCount += C; }
  void setBlockCount(uint64_t C) { BlockCount = C; }

  gvsummary_iterator begin() { return GlobalValueMap.begin(); }
  const_gvsummary_iterator begin() const { return GlobalValueMap.begin(); }
  gvsummary_iterator end() { return GlobalValueMap.end(); }
  const_gvsummary_iterator end() const { return GlobalValueMap.end(); }
  size_t size() const { return GlobalValueMap.size(); }

  const std::vector<uint64_t> &stackIds() const { return StackIds; }

  unsigned addOrGetStackIdIndex(uint64_t StackId) {
    auto Inserted = StackIdToIndex.insert({StackId, StackIds.size()});
    if (Inserted.second)
      StackIds.push_back(StackId);
    return Inserted.first->second;
  }

  uint64_t getStackIdAtIndex(unsigned Index) const {
    assert(StackIds.size() > Index);
    return StackIds[Index];
  }

  // Facility to release memory from data structures only needed during index
  // construction (including while building combined index). Currently this only
  // releases the temporary map used while constructing a correspondence between
  // stack ids and their index in the StackIds vector. Mostly impactful when
  // building a large combined index.
  void releaseTemporaryMemory() {
    assert(StackIdToIndex.size() == StackIds.size());
    StackIdToIndex.clear();
    StackIds.shrink_to_fit();
  }

  /// Convenience function for doing a DFS on a ValueInfo. Marks the function in
  /// the FunctionHasParent map.
  static void discoverNodes(ValueInfo V,
                            std::map<ValueInfo, bool> &FunctionHasParent) {
    if (!V.getSummaryList().size())
      return; // skip external functions that don't have summaries

    // Mark discovered if we haven't yet
    auto S = FunctionHasParent.emplace(V, false);

    // Stop if we've already discovered this node
    if (!S.second)
      return;

    FunctionSummary *F =
        dyn_cast<FunctionSummary>(V.getSummaryList().front().get());
    assert(F != nullptr && "Expected FunctionSummary node");

    for (const auto &C : F->calls()) {
      // Insert node if necessary
      auto S = FunctionHasParent.emplace(C.first, true);

      // Skip nodes that we're sure have parents
      if (!S.second && S.first->second)
        continue;

      if (S.second)
        discoverNodes(C.first, FunctionHasParent);
      else
        S.first->second = true;
    }
  }

  // Calculate the callgraph root
  FunctionSummary calculateCallGraphRoot() {
    // Functions that have a parent will be marked in FunctionHasParent pair.
    // Once we've marked all functions, the functions in the map that are false
    // have no parent (so they're the roots)
    std::map<ValueInfo, bool> FunctionHasParent;

    for (auto &S : *this) {
      // Skip external functions
      if (!S.second.SummaryList.size() ||
          !isa<FunctionSummary>(S.second.SummaryList.front().get()))
        continue;
      discoverNodes(ValueInfo(HaveGVs, &S), FunctionHasParent);
    }

    std::vector<FunctionSummary::EdgeTy> Edges;
    // create edges to all roots in the Index
    for (auto &P : FunctionHasParent) {
      if (P.second)
        continue; // skip over non-root nodes
      Edges.push_back(std::make_pair(P.first, CalleeInfo{}));
    }
    if (Edges.empty()) {
      // Failed to find root - return an empty node
      return FunctionSummary::makeDummyFunctionSummary({});
    }
    auto CallGraphRoot = FunctionSummary::makeDummyFunctionSummary(Edges);
    return CallGraphRoot;
  }

  bool withGlobalValueDeadStripping() const {
    return WithGlobalValueDeadStripping;
  }
  void setWithGlobalValueDeadStripping() {
    WithGlobalValueDeadStripping = true;
  }

  bool withAttributePropagation() const { return WithAttributePropagation; }
  void setWithAttributePropagation() {
    WithAttributePropagation = true;
  }

  bool withDSOLocalPropagation() const { return WithDSOLocalPropagation; }
  void setWithDSOLocalPropagation() { WithDSOLocalPropagation = true; }

  bool withWholeProgramVisibility() const { return WithWholeProgramVisibility; }
  void setWithWholeProgramVisibility() { WithWholeProgramVisibility = true; }

  bool isReadOnly(const GlobalVarSummary *GVS) const {
    return WithAttributePropagation && GVS->maybeReadOnly();
  }
  bool isWriteOnly(const GlobalVarSummary *GVS) const {
    return WithAttributePropagation && GVS->maybeWriteOnly();
  }

  bool hasSyntheticEntryCounts() const { return HasSyntheticEntryCounts; }
  void setHasSyntheticEntryCounts() { HasSyntheticEntryCounts = true; }

  bool withSupportsHotColdNew() const { return WithSupportsHotColdNew; }
  void setWithSupportsHotColdNew() { WithSupportsHotColdNew = true; }

  bool skipModuleByDistributedBackend() const {
    return SkipModuleByDistributedBackend;
  }
  void setSkipModuleByDistributedBackend() {
    SkipModuleByDistributedBackend = true;
  }

  bool enableSplitLTOUnit() const { return EnableSplitLTOUnit; }
  void setEnableSplitLTOUnit() { EnableSplitLTOUnit = true; }

  bool hasUnifiedLTO() const { return UnifiedLTO; }
  void setUnifiedLTO() { UnifiedLTO = true; }

  bool partiallySplitLTOUnits() const { return PartiallySplitLTOUnits; }
  void setPartiallySplitLTOUnits() { PartiallySplitLTOUnits = true; }

  bool hasParamAccess() const { return HasParamAccess; }

  bool isGlobalValueLive(const GlobalValueSummary *GVS) const {
    return !WithGlobalValueDeadStripping || GVS->isLive();
  }
  bool isGUIDLive(GlobalValue::GUID GUID) const;

  /// Return a ValueInfo for the index value_type (convenient when iterating
  /// index).
  ValueInfo getValueInfo(const GlobalValueSummaryMapTy::value_type &R) const {
    return ValueInfo(HaveGVs, &R);
  }

  /// Return a ValueInfo for GUID if it exists, otherwise return ValueInfo().
  ValueInfo getValueInfo(GlobalValue::GUID GUID) const {
    auto I = GlobalValueMap.find(GUID);
    return ValueInfo(HaveGVs, I == GlobalValueMap.end() ? nullptr : &*I);
  }

  /// Return a ValueInfo for \p GUID.
  ValueInfo getOrInsertValueInfo(GlobalValue::GUID GUID) {
    return ValueInfo(HaveGVs, getOrInsertValuePtr(GUID));
  }

  // Save a string in the Index. Use before passing Name to
  // getOrInsertValueInfo when the string isn't owned elsewhere (e.g. on the
  // module's Strtab).
  StringRef saveString(StringRef String) { return Saver.save(String); }

  /// Return a ValueInfo for \p GUID setting value \p Name.
  ValueInfo getOrInsertValueInfo(GlobalValue::GUID GUID, StringRef Name) {
    assert(!HaveGVs);
    auto VP = getOrInsertValuePtr(GUID);
    VP->second.U.Name = Name;
    return ValueInfo(HaveGVs, VP);
  }

  /// Return a ValueInfo for \p GV and mark it as belonging to GV.
  ValueInfo getOrInsertValueInfo(const GlobalValue *GV) {
    assert(HaveGVs);
    auto VP = getOrInsertValuePtr(GV->getGUID());
    VP->second.U.GV = GV;
    return ValueInfo(HaveGVs, VP);
  }

  /// Return the GUID for \p OriginalId in the OidGuidMap.
  GlobalValue::GUID getGUIDFromOriginalID(GlobalValue::GUID OriginalID) const {
    const auto I = OidGuidMap.find(OriginalID);
    return I == OidGuidMap.end() ? 0 : I->second;
  }

  std::set<std::string> &cfiFunctionDefs() { return CfiFunctionDefs; }
  const std::set<std::string> &cfiFunctionDefs() const { return CfiFunctionDefs; }

  std::set<std::string> &cfiFunctionDecls() { return CfiFunctionDecls; }
  const std::set<std::string> &cfiFunctionDecls() const { return CfiFunctionDecls; }

  /// Add a global value summary for a value.
  void addGlobalValueSummary(const GlobalValue &GV,
                             std::unique_ptr<GlobalValueSummary> Summary) {
    addGlobalValueSummary(getOrInsertValueInfo(&GV), std::move(Summary));
  }

  /// Add a global value summary for a value of the given name.
  void addGlobalValueSummary(StringRef ValueName,
                             std::unique_ptr<GlobalValueSummary> Summary) {
    addGlobalValueSummary(getOrInsertValueInfo(GlobalValue::getGUID(ValueName)),
                          std::move(Summary));
  }

  /// Add a global value summary for the given ValueInfo.
  void addGlobalValueSummary(ValueInfo VI,
                             std::unique_ptr<GlobalValueSummary> Summary) {
    if (const FunctionSummary *FS = dyn_cast<FunctionSummary>(Summary.get()))
      HasParamAccess |= !FS->paramAccesses().empty();
    addOriginalName(VI.getGUID(), Summary->getOriginalName());
    // Here we have a notionally const VI, but the value it points to is owned
    // by the non-const *this.
    const_cast<GlobalValueSummaryMapTy::value_type *>(VI.getRef())
        ->second.SummaryList.push_back(std::move(Summary));
  }

  /// Add an original name for the value of the given GUID.
  void addOriginalName(GlobalValue::GUID ValueGUID,
                       GlobalValue::GUID OrigGUID) {
    if (OrigGUID == 0 || ValueGUID == OrigGUID)
      return;
    if (OidGuidMap.count(OrigGUID) && OidGuidMap[OrigGUID] != ValueGUID)
      OidGuidMap[OrigGUID] = 0;
    else
      OidGuidMap[OrigGUID] = ValueGUID;
  }

  /// Find the summary for ValueInfo \p VI in module \p ModuleId, or nullptr if
  /// not found.
  GlobalValueSummary *findSummaryInModule(ValueInfo VI, StringRef ModuleId) const {
    auto SummaryList = VI.getSummaryList();
    auto Summary =
        llvm::find_if(SummaryList,
                      [&](const std::unique_ptr<GlobalValueSummary> &Summary) {
                        return Summary->modulePath() == ModuleId;
                      });
    if (Summary == SummaryList.end())
      return nullptr;
    return Summary->get();
  }

  /// Find the summary for global \p GUID in module \p ModuleId, or nullptr if
  /// not found.
  GlobalValueSummary *findSummaryInModule(GlobalValue::GUID ValueGUID,
                                          StringRef ModuleId) const {
    auto CalleeInfo = getValueInfo(ValueGUID);
    if (!CalleeInfo)
      return nullptr; // This function does not have a summary
    return findSummaryInModule(CalleeInfo, ModuleId);
  }

  /// Returns the first GlobalValueSummary for \p GV, asserting that there
  /// is only one if \p PerModuleIndex.
  GlobalValueSummary *getGlobalValueSummary(const GlobalValue &GV,
                                            bool PerModuleIndex = true) const {
    assert(GV.hasName() && "Can't get GlobalValueSummary for GV with no name");
    return getGlobalValueSummary(GV.getGUID(), PerModuleIndex);
  }

  /// Returns the first GlobalValueSummary for \p ValueGUID, asserting that
  /// there
  /// is only one if \p PerModuleIndex.
  GlobalValueSummary *getGlobalValueSummary(GlobalValue::GUID ValueGUID,
                                            bool PerModuleIndex = true) const;

  /// Table of modules, containing module hash and id.
  const StringMap<std::pair<uint64_t, ModuleHash>> &modulePaths() const {
    return ModulePathStringTable;
  }

  /// Table of modules, containing hash and id.
  StringMap<std::pair<uint64_t, ModuleHash>> &modulePaths() {
    return ModulePathStringTable;
  }

  /// Get the module ID recorded for the given module path.
  uint64_t getModuleId(const StringRef ModPath) const {
    return ModulePathStringTable.lookup(ModPath).first;
  }

  /// Get the module SHA1 hash recorded for the given module path.
  const ModuleHash &getModuleHash(const StringRef ModPath) const {
    auto It = ModulePathStringTable.find(ModPath);
    assert(It != ModulePathStringTable.end() && "Module not registered");
    return It->second.second;
  }

  /// Convenience method for creating a promoted global name
  /// for the given value name of a local, and its original module's ID.
  static std::string getGlobalNameForLocal(StringRef Name, ModuleHash ModHash) {
    std::string Suffix = utostr((uint64_t(ModHash[0]) << 32) |
                                ModHash[1]); // Take the first 64 bits
    return getGlobalNameForLocal(Name, Suffix);
  }

  static std::string getGlobalNameForLocal(StringRef Name, StringRef Suffix) {
    SmallString<256> NewName(Name);
    NewName += ".llvm.";
    NewName += Suffix;
    return std::string(NewName.str());
  }

  /// Helper to obtain the unpromoted name for a global value (or the original
  /// name if not promoted). Split off the rightmost ".llvm.${hash}" suffix,
  /// because it is possible in certain clients (not clang at the moment) for
  /// two rounds of ThinLTO optimization and therefore promotion to occur.
  static StringRef getOriginalNameBeforePromote(StringRef Name) {
    std::pair<StringRef, StringRef> Pair = Name.rsplit(".llvm.");
    return Pair.first;
  }

  typedef ModulePathStringTableTy::value_type ModuleInfo;

  /// Add a new module with the given \p Hash, mapped to the given \p
  /// ModID, and return a reference to the module.
  ModuleInfo *addModule(StringRef ModPath, uint64_t ModId,
                        ModuleHash Hash = ModuleHash{{0}}) {
    return &*ModulePathStringTable.insert({ModPath, {ModId, Hash}}).first;
  }

  /// Return module entry for module with the given \p ModPath.
  ModuleInfo *getModule(StringRef ModPath) {
    auto It = ModulePathStringTable.find(ModPath);
    assert(It != ModulePathStringTable.end() && "Module not registered");
    return &*It;
  }

  /// Return module entry for module with the given \p ModPath.
  const ModuleInfo *getModule(StringRef ModPath) const {
    auto It = ModulePathStringTable.find(ModPath);
    assert(It != ModulePathStringTable.end() && "Module not registered");
    return &*It;
  }

  /// Check if the given Module has any functions available for exporting
  /// in the index. We consider any module present in the ModulePathStringTable
  /// to have exported functions.
  bool hasExportedFunctions(const Module &M) const {
    return ModulePathStringTable.count(M.getModuleIdentifier());
  }

  const TypeIdSummaryMapTy &typeIds() const { return TypeIdMap; }

  /// Return an existing or new TypeIdSummary entry for \p TypeId.
  /// This accessor can mutate the map and therefore should not be used in
  /// the ThinLTO backends.
  TypeIdSummary &getOrInsertTypeIdSummary(StringRef TypeId) {
    auto TidIter = TypeIdMap.equal_range(GlobalValue::getGUID(TypeId));
    for (auto It = TidIter.first; It != TidIter.second; ++It)
      if (It->second.first == TypeId)
        return It->second.second;
    auto It = TypeIdMap.insert(
        {GlobalValue::getGUID(TypeId), {std::string(TypeId), TypeIdSummary()}});
    return It->second.second;
  }

  /// This returns either a pointer to the type id summary (if present in the
  /// summary map) or null (if not present). This may be used when importing.
  const TypeIdSummary *getTypeIdSummary(StringRef TypeId) const {
    auto TidIter = TypeIdMap.equal_range(GlobalValue::getGUID(TypeId));
    for (auto It = TidIter.first; It != TidIter.second; ++It)
      if (It->second.first == TypeId)
        return &It->second.second;
    return nullptr;
  }

  TypeIdSummary *getTypeIdSummary(StringRef TypeId) {
    return const_cast<TypeIdSummary *>(
        static_cast<const ModuleSummaryIndex *>(this)->getTypeIdSummary(
            TypeId));
  }

  const auto &typeIdCompatibleVtableMap() const {
    return TypeIdCompatibleVtableMap;
  }

  /// Return an existing or new TypeIdCompatibleVtableMap entry for \p TypeId.
  /// This accessor can mutate the map and therefore should not be used in
  /// the ThinLTO backends.
  TypeIdCompatibleVtableInfo &
  getOrInsertTypeIdCompatibleVtableSummary(StringRef TypeId) {
    return TypeIdCompatibleVtableMap[std::string(TypeId)];
  }

  /// For the given \p TypeId, this returns the TypeIdCompatibleVtableMap
  /// entry if present in the summary map. This may be used when importing.
  std::optional<TypeIdCompatibleVtableInfo>
  getTypeIdCompatibleVtableSummary(StringRef TypeId) const {
    auto I = TypeIdCompatibleVtableMap.find(TypeId);
    if (I == TypeIdCompatibleVtableMap.end())
      return std::nullopt;
    return I->second;
  }

  /// Collect for the given module the list of functions it defines
  /// (GUID -> Summary).
  void collectDefinedFunctionsForModule(StringRef ModulePath,
                                        GVSummaryMapTy &GVSummaryMap) const;

  /// Collect for each module the list of Summaries it defines (GUID ->
  /// Summary).
  template <class Map>
  void
  collectDefinedGVSummariesPerModule(Map &ModuleToDefinedGVSummaries) const {
    for (const auto &GlobalList : *this) {
      auto GUID = GlobalList.first;
      for (const auto &Summary : GlobalList.second.SummaryList) {
        ModuleToDefinedGVSummaries[Summary->modulePath()][GUID] = Summary.get();
      }
    }
  }

  /// Print to an output stream.
  void print(raw_ostream &OS, bool IsForDebug = false) const;

  /// Dump to stderr (for debugging).
  void dump() const;

  /// Export summary to dot file for GraphViz.
  void
  exportToDot(raw_ostream &OS,
              const DenseSet<GlobalValue::GUID> &GUIDPreservedSymbols) const;

  /// Print out strongly connected components for debugging.
  void dumpSCCs(raw_ostream &OS);

  /// Do the access attribute and DSOLocal propagation in combined index.
  void propagateAttributes(const DenseSet<GlobalValue::GUID> &PreservedSymbols);

  /// Checks if we can import global variable from another module.
  bool canImportGlobalVar(const GlobalValueSummary *S, bool AnalyzeRefs) const;
};

/// GraphTraits definition to build SCC for the index
template <> struct GraphTraits<ValueInfo> {
  typedef ValueInfo NodeRef;
  using EdgeRef = FunctionSummary::EdgeTy &;

  static NodeRef valueInfoFromEdge(FunctionSummary::EdgeTy &P) {
    return P.first;
  }
  using ChildIteratorType =
      mapped_iterator<std::vector<FunctionSummary::EdgeTy>::iterator,
                      decltype(&valueInfoFromEdge)>;

  using ChildEdgeIteratorType = std::vector<FunctionSummary::EdgeTy>::iterator;

  static NodeRef getEntryNode(ValueInfo V) { return V; }

  static ChildIteratorType child_begin(NodeRef N) {
    if (!N.getSummaryList().size()) // handle external function
      return ChildIteratorType(
          FunctionSummary::ExternalNode.CallGraphEdgeList.begin(),
          &valueInfoFromEdge);
    FunctionSummary *F =
        cast<FunctionSummary>(N.getSummaryList().front()->getBaseObject());
    return ChildIteratorType(F->CallGraphEdgeList.begin(), &valueInfoFromEdge);
  }

  static ChildIteratorType child_end(NodeRef N) {
    if (!N.getSummaryList().size()) // handle external function
      return ChildIteratorType(
          FunctionSummary::ExternalNode.CallGraphEdgeList.end(),
          &valueInfoFromEdge);
    FunctionSummary *F =
        cast<FunctionSummary>(N.getSummaryList().front()->getBaseObject());
    return ChildIteratorType(F->CallGraphEdgeList.end(), &valueInfoFromEdge);
  }

  static ChildEdgeIteratorType child_edge_begin(NodeRef N) {
    if (!N.getSummaryList().size()) // handle external function
      return FunctionSummary::ExternalNode.CallGraphEdgeList.begin();

    FunctionSummary *F =
        cast<FunctionSummary>(N.getSummaryList().front()->getBaseObject());
    return F->CallGraphEdgeList.begin();
  }

  static ChildEdgeIteratorType child_edge_end(NodeRef N) {
    if (!N.getSummaryList().size()) // handle external function
      return FunctionSummary::ExternalNode.CallGraphEdgeList.end();

    FunctionSummary *F =
        cast<FunctionSummary>(N.getSummaryList().front()->getBaseObject());
    return F->CallGraphEdgeList.end();
  }

  static NodeRef edge_dest(EdgeRef E) { return E.first; }
};

template <>
struct GraphTraits<ModuleSummaryIndex *> : public GraphTraits<ValueInfo> {
  static NodeRef getEntryNode(ModuleSummaryIndex *I) {
    std::unique_ptr<GlobalValueSummary> Root =
        std::make_unique<FunctionSummary>(I->calculateCallGraphRoot());
    GlobalValueSummaryInfo G(I->haveGVs());
    G.SummaryList.push_back(std::move(Root));
    static auto P =
        GlobalValueSummaryMapTy::value_type(GlobalValue::GUID(0), std::move(G));
    return ValueInfo(I->haveGVs(), &P);
  }
};
} // end namespace llvm

#endif // LLVM_IR_MODULESUMMARYINDEX_H
PKjwFZ�.K�K�IR/IntrinsicsMips.hnu�[���/*===- TableGen'erated file -------------------------------------*- C++ -*-===*\
|*                                                                            *|
|* Intrinsic Function Source Fragment                                         *|
|*                                                                            *|
|* Automatically generated file, do not edit!                                 *|
|*                                                                            *|
\*===----------------------------------------------------------------------===*/

#ifndef LLVM_IR_INTRINSIC_MIPS_ENUMS_H
#define LLVM_IR_INTRINSIC_MIPS_ENUMS_H

namespace llvm {
namespace Intrinsic {
enum MIPSIntrinsics : unsigned {
// Enum values for intrinsics
    mips_absq_s_ph = 5228,                            // llvm.mips.absq.s.ph
    mips_absq_s_qb,                            // llvm.mips.absq.s.qb
    mips_absq_s_w,                             // llvm.mips.absq.s.w
    mips_add_a_b,                              // llvm.mips.add.a.b
    mips_add_a_d,                              // llvm.mips.add.a.d
    mips_add_a_h,                              // llvm.mips.add.a.h
    mips_add_a_w,                              // llvm.mips.add.a.w
    mips_addq_ph,                              // llvm.mips.addq.ph
    mips_addq_s_ph,                            // llvm.mips.addq.s.ph
    mips_addq_s_w,                             // llvm.mips.addq.s.w
    mips_addqh_ph,                             // llvm.mips.addqh.ph
    mips_addqh_r_ph,                           // llvm.mips.addqh.r.ph
    mips_addqh_r_w,                            // llvm.mips.addqh.r.w
    mips_addqh_w,                              // llvm.mips.addqh.w
    mips_adds_a_b,                             // llvm.mips.adds.a.b
    mips_adds_a_d,                             // llvm.mips.adds.a.d
    mips_adds_a_h,                             // llvm.mips.adds.a.h
    mips_adds_a_w,                             // llvm.mips.adds.a.w
    mips_adds_s_b,                             // llvm.mips.adds.s.b
    mips_adds_s_d,                             // llvm.mips.adds.s.d
    mips_adds_s_h,                             // llvm.mips.adds.s.h
    mips_adds_s_w,                             // llvm.mips.adds.s.w
    mips_adds_u_b,                             // llvm.mips.adds.u.b
    mips_adds_u_d,                             // llvm.mips.adds.u.d
    mips_adds_u_h,                             // llvm.mips.adds.u.h
    mips_adds_u_w,                             // llvm.mips.adds.u.w
    mips_addsc,                                // llvm.mips.addsc
    mips_addu_ph,                              // llvm.mips.addu.ph
    mips_addu_qb,                              // llvm.mips.addu.qb
    mips_addu_s_ph,                            // llvm.mips.addu.s.ph
    mips_addu_s_qb,                            // llvm.mips.addu.s.qb
    mips_adduh_qb,                             // llvm.mips.adduh.qb
    mips_adduh_r_qb,                           // llvm.mips.adduh.r.qb
    mips_addv_b,                               // llvm.mips.addv.b
    mips_addv_d,                               // llvm.mips.addv.d
    mips_addv_h,                               // llvm.mips.addv.h
    mips_addv_w,                               // llvm.mips.addv.w
    mips_addvi_b,                              // llvm.mips.addvi.b
    mips_addvi_d,                              // llvm.mips.addvi.d
    mips_addvi_h,                              // llvm.mips.addvi.h
    mips_addvi_w,                              // llvm.mips.addvi.w
    mips_addwc,                                // llvm.mips.addwc
    mips_and_v,                                // llvm.mips.and.v
    mips_andi_b,                               // llvm.mips.andi.b
    mips_append,                               // llvm.mips.append
    mips_asub_s_b,                             // llvm.mips.asub.s.b
    mips_asub_s_d,                             // llvm.mips.asub.s.d
    mips_asub_s_h,                             // llvm.mips.asub.s.h
    mips_asub_s_w,                             // llvm.mips.asub.s.w
    mips_asub_u_b,                             // llvm.mips.asub.u.b
    mips_asub_u_d,                             // llvm.mips.asub.u.d
    mips_asub_u_h,                             // llvm.mips.asub.u.h
    mips_asub_u_w,                             // llvm.mips.asub.u.w
    mips_ave_s_b,                              // llvm.mips.ave.s.b
    mips_ave_s_d,                              // llvm.mips.ave.s.d
    mips_ave_s_h,                              // llvm.mips.ave.s.h
    mips_ave_s_w,                              // llvm.mips.ave.s.w
    mips_ave_u_b,                              // llvm.mips.ave.u.b
    mips_ave_u_d,                              // llvm.mips.ave.u.d
    mips_ave_u_h,                              // llvm.mips.ave.u.h
    mips_ave_u_w,                              // llvm.mips.ave.u.w
    mips_aver_s_b,                             // llvm.mips.aver.s.b
    mips_aver_s_d,                             // llvm.mips.aver.s.d
    mips_aver_s_h,                             // llvm.mips.aver.s.h
    mips_aver_s_w,                             // llvm.mips.aver.s.w
    mips_aver_u_b,                             // llvm.mips.aver.u.b
    mips_aver_u_d,                             // llvm.mips.aver.u.d
    mips_aver_u_h,                             // llvm.mips.aver.u.h
    mips_aver_u_w,                             // llvm.mips.aver.u.w
    mips_balign,                               // llvm.mips.balign
    mips_bclr_b,                               // llvm.mips.bclr.b
    mips_bclr_d,                               // llvm.mips.bclr.d
    mips_bclr_h,                               // llvm.mips.bclr.h
    mips_bclr_w,                               // llvm.mips.bclr.w
    mips_bclri_b,                              // llvm.mips.bclri.b
    mips_bclri_d,                              // llvm.mips.bclri.d
    mips_bclri_h,                              // llvm.mips.bclri.h
    mips_bclri_w,                              // llvm.mips.bclri.w
    mips_binsl_b,                              // llvm.mips.binsl.b
    mips_binsl_d,                              // llvm.mips.binsl.d
    mips_binsl_h,                              // llvm.mips.binsl.h
    mips_binsl_w,                              // llvm.mips.binsl.w
    mips_binsli_b,                             // llvm.mips.binsli.b
    mips_binsli_d,                             // llvm.mips.binsli.d
    mips_binsli_h,                             // llvm.mips.binsli.h
    mips_binsli_w,                             // llvm.mips.binsli.w
    mips_binsr_b,                              // llvm.mips.binsr.b
    mips_binsr_d,                              // llvm.mips.binsr.d
    mips_binsr_h,                              // llvm.mips.binsr.h
    mips_binsr_w,                              // llvm.mips.binsr.w
    mips_binsri_b,                             // llvm.mips.binsri.b
    mips_binsri_d,                             // llvm.mips.binsri.d
    mips_binsri_h,                             // llvm.mips.binsri.h
    mips_binsri_w,                             // llvm.mips.binsri.w
    mips_bitrev,                               // llvm.mips.bitrev
    mips_bmnz_v,                               // llvm.mips.bmnz.v
    mips_bmnzi_b,                              // llvm.mips.bmnzi.b
    mips_bmz_v,                                // llvm.mips.bmz.v
    mips_bmzi_b,                               // llvm.mips.bmzi.b
    mips_bneg_b,                               // llvm.mips.bneg.b
    mips_bneg_d,                               // llvm.mips.bneg.d
    mips_bneg_h,                               // llvm.mips.bneg.h
    mips_bneg_w,                               // llvm.mips.bneg.w
    mips_bnegi_b,                              // llvm.mips.bnegi.b
    mips_bnegi_d,                              // llvm.mips.bnegi.d
    mips_bnegi_h,                              // llvm.mips.bnegi.h
    mips_bnegi_w,                              // llvm.mips.bnegi.w
    mips_bnz_b,                                // llvm.mips.bnz.b
    mips_bnz_d,                                // llvm.mips.bnz.d
    mips_bnz_h,                                // llvm.mips.bnz.h
    mips_bnz_v,                                // llvm.mips.bnz.v
    mips_bnz_w,                                // llvm.mips.bnz.w
    mips_bposge32,                             // llvm.mips.bposge32
    mips_bsel_v,                               // llvm.mips.bsel.v
    mips_bseli_b,                              // llvm.mips.bseli.b
    mips_bset_b,                               // llvm.mips.bset.b
    mips_bset_d,                               // llvm.mips.bset.d
    mips_bset_h,                               // llvm.mips.bset.h
    mips_bset_w,                               // llvm.mips.bset.w
    mips_bseti_b,                              // llvm.mips.bseti.b
    mips_bseti_d,                              // llvm.mips.bseti.d
    mips_bseti_h,                              // llvm.mips.bseti.h
    mips_bseti_w,                              // llvm.mips.bseti.w
    mips_bz_b,                                 // llvm.mips.bz.b
    mips_bz_d,                                 // llvm.mips.bz.d
    mips_bz_h,                                 // llvm.mips.bz.h
    mips_bz_v,                                 // llvm.mips.bz.v
    mips_bz_w,                                 // llvm.mips.bz.w
    mips_ceq_b,                                // llvm.mips.ceq.b
    mips_ceq_d,                                // llvm.mips.ceq.d
    mips_ceq_h,                                // llvm.mips.ceq.h
    mips_ceq_w,                                // llvm.mips.ceq.w
    mips_ceqi_b,                               // llvm.mips.ceqi.b
    mips_ceqi_d,                               // llvm.mips.ceqi.d
    mips_ceqi_h,                               // llvm.mips.ceqi.h
    mips_ceqi_w,                               // llvm.mips.ceqi.w
    mips_cfcmsa,                               // llvm.mips.cfcmsa
    mips_cle_s_b,                              // llvm.mips.cle.s.b
    mips_cle_s_d,                              // llvm.mips.cle.s.d
    mips_cle_s_h,                              // llvm.mips.cle.s.h
    mips_cle_s_w,                              // llvm.mips.cle.s.w
    mips_cle_u_b,                              // llvm.mips.cle.u.b
    mips_cle_u_d,                              // llvm.mips.cle.u.d
    mips_cle_u_h,                              // llvm.mips.cle.u.h
    mips_cle_u_w,                              // llvm.mips.cle.u.w
    mips_clei_s_b,                             // llvm.mips.clei.s.b
    mips_clei_s_d,                             // llvm.mips.clei.s.d
    mips_clei_s_h,                             // llvm.mips.clei.s.h
    mips_clei_s_w,                             // llvm.mips.clei.s.w
    mips_clei_u_b,                             // llvm.mips.clei.u.b
    mips_clei_u_d,                             // llvm.mips.clei.u.d
    mips_clei_u_h,                             // llvm.mips.clei.u.h
    mips_clei_u_w,                             // llvm.mips.clei.u.w
    mips_clt_s_b,                              // llvm.mips.clt.s.b
    mips_clt_s_d,                              // llvm.mips.clt.s.d
    mips_clt_s_h,                              // llvm.mips.clt.s.h
    mips_clt_s_w,                              // llvm.mips.clt.s.w
    mips_clt_u_b,                              // llvm.mips.clt.u.b
    mips_clt_u_d,                              // llvm.mips.clt.u.d
    mips_clt_u_h,                              // llvm.mips.clt.u.h
    mips_clt_u_w,                              // llvm.mips.clt.u.w
    mips_clti_s_b,                             // llvm.mips.clti.s.b
    mips_clti_s_d,                             // llvm.mips.clti.s.d
    mips_clti_s_h,                             // llvm.mips.clti.s.h
    mips_clti_s_w,                             // llvm.mips.clti.s.w
    mips_clti_u_b,                             // llvm.mips.clti.u.b
    mips_clti_u_d,                             // llvm.mips.clti.u.d
    mips_clti_u_h,                             // llvm.mips.clti.u.h
    mips_clti_u_w,                             // llvm.mips.clti.u.w
    mips_cmp_eq_ph,                            // llvm.mips.cmp.eq.ph
    mips_cmp_le_ph,                            // llvm.mips.cmp.le.ph
    mips_cmp_lt_ph,                            // llvm.mips.cmp.lt.ph
    mips_cmpgdu_eq_qb,                         // llvm.mips.cmpgdu.eq.qb
    mips_cmpgdu_le_qb,                         // llvm.mips.cmpgdu.le.qb
    mips_cmpgdu_lt_qb,                         // llvm.mips.cmpgdu.lt.qb
    mips_cmpgu_eq_qb,                          // llvm.mips.cmpgu.eq.qb
    mips_cmpgu_le_qb,                          // llvm.mips.cmpgu.le.qb
    mips_cmpgu_lt_qb,                          // llvm.mips.cmpgu.lt.qb
    mips_cmpu_eq_qb,                           // llvm.mips.cmpu.eq.qb
    mips_cmpu_le_qb,                           // llvm.mips.cmpu.le.qb
    mips_cmpu_lt_qb,                           // llvm.mips.cmpu.lt.qb
    mips_copy_s_b,                             // llvm.mips.copy.s.b
    mips_copy_s_d,                             // llvm.mips.copy.s.d
    mips_copy_s_h,                             // llvm.mips.copy.s.h
    mips_copy_s_w,                             // llvm.mips.copy.s.w
    mips_copy_u_b,                             // llvm.mips.copy.u.b
    mips_copy_u_d,                             // llvm.mips.copy.u.d
    mips_copy_u_h,                             // llvm.mips.copy.u.h
    mips_copy_u_w,                             // llvm.mips.copy.u.w
    mips_ctcmsa,                               // llvm.mips.ctcmsa
    mips_div_s_b,                              // llvm.mips.div.s.b
    mips_div_s_d,                              // llvm.mips.div.s.d
    mips_div_s_h,                              // llvm.mips.div.s.h
    mips_div_s_w,                              // llvm.mips.div.s.w
    mips_div_u_b,                              // llvm.mips.div.u.b
    mips_div_u_d,                              // llvm.mips.div.u.d
    mips_div_u_h,                              // llvm.mips.div.u.h
    mips_div_u_w,                              // llvm.mips.div.u.w
    mips_dlsa,                                 // llvm.mips.dlsa
    mips_dotp_s_d,                             // llvm.mips.dotp.s.d
    mips_dotp_s_h,                             // llvm.mips.dotp.s.h
    mips_dotp_s_w,                             // llvm.mips.dotp.s.w
    mips_dotp_u_d,                             // llvm.mips.dotp.u.d
    mips_dotp_u_h,                             // llvm.mips.dotp.u.h
    mips_dotp_u_w,                             // llvm.mips.dotp.u.w
    mips_dpa_w_ph,                             // llvm.mips.dpa.w.ph
    mips_dpadd_s_d,                            // llvm.mips.dpadd.s.d
    mips_dpadd_s_h,                            // llvm.mips.dpadd.s.h
    mips_dpadd_s_w,                            // llvm.mips.dpadd.s.w
    mips_dpadd_u_d,                            // llvm.mips.dpadd.u.d
    mips_dpadd_u_h,                            // llvm.mips.dpadd.u.h
    mips_dpadd_u_w,                            // llvm.mips.dpadd.u.w
    mips_dpaq_s_w_ph,                          // llvm.mips.dpaq.s.w.ph
    mips_dpaq_sa_l_w,                          // llvm.mips.dpaq.sa.l.w
    mips_dpaqx_s_w_ph,                         // llvm.mips.dpaqx.s.w.ph
    mips_dpaqx_sa_w_ph,                        // llvm.mips.dpaqx.sa.w.ph
    mips_dpau_h_qbl,                           // llvm.mips.dpau.h.qbl
    mips_dpau_h_qbr,                           // llvm.mips.dpau.h.qbr
    mips_dpax_w_ph,                            // llvm.mips.dpax.w.ph
    mips_dps_w_ph,                             // llvm.mips.dps.w.ph
    mips_dpsq_s_w_ph,                          // llvm.mips.dpsq.s.w.ph
    mips_dpsq_sa_l_w,                          // llvm.mips.dpsq.sa.l.w
    mips_dpsqx_s_w_ph,                         // llvm.mips.dpsqx.s.w.ph
    mips_dpsqx_sa_w_ph,                        // llvm.mips.dpsqx.sa.w.ph
    mips_dpsu_h_qbl,                           // llvm.mips.dpsu.h.qbl
    mips_dpsu_h_qbr,                           // llvm.mips.dpsu.h.qbr
    mips_dpsub_s_d,                            // llvm.mips.dpsub.s.d
    mips_dpsub_s_h,                            // llvm.mips.dpsub.s.h
    mips_dpsub_s_w,                            // llvm.mips.dpsub.s.w
    mips_dpsub_u_d,                            // llvm.mips.dpsub.u.d
    mips_dpsub_u_h,                            // llvm.mips.dpsub.u.h
    mips_dpsub_u_w,                            // llvm.mips.dpsub.u.w
    mips_dpsx_w_ph,                            // llvm.mips.dpsx.w.ph
    mips_extp,                                 // llvm.mips.extp
    mips_extpdp,                               // llvm.mips.extpdp
    mips_extr_r_w,                             // llvm.mips.extr.r.w
    mips_extr_rs_w,                            // llvm.mips.extr.rs.w
    mips_extr_s_h,                             // llvm.mips.extr.s.h
    mips_extr_w,                               // llvm.mips.extr.w
    mips_fadd_d,                               // llvm.mips.fadd.d
    mips_fadd_w,                               // llvm.mips.fadd.w
    mips_fcaf_d,                               // llvm.mips.fcaf.d
    mips_fcaf_w,                               // llvm.mips.fcaf.w
    mips_fceq_d,                               // llvm.mips.fceq.d
    mips_fceq_w,                               // llvm.mips.fceq.w
    mips_fclass_d,                             // llvm.mips.fclass.d
    mips_fclass_w,                             // llvm.mips.fclass.w
    mips_fcle_d,                               // llvm.mips.fcle.d
    mips_fcle_w,                               // llvm.mips.fcle.w
    mips_fclt_d,                               // llvm.mips.fclt.d
    mips_fclt_w,                               // llvm.mips.fclt.w
    mips_fcne_d,                               // llvm.mips.fcne.d
    mips_fcne_w,                               // llvm.mips.fcne.w
    mips_fcor_d,                               // llvm.mips.fcor.d
    mips_fcor_w,                               // llvm.mips.fcor.w
    mips_fcueq_d,                              // llvm.mips.fcueq.d
    mips_fcueq_w,                              // llvm.mips.fcueq.w
    mips_fcule_d,                              // llvm.mips.fcule.d
    mips_fcule_w,                              // llvm.mips.fcule.w
    mips_fcult_d,                              // llvm.mips.fcult.d
    mips_fcult_w,                              // llvm.mips.fcult.w
    mips_fcun_d,                               // llvm.mips.fcun.d
    mips_fcun_w,                               // llvm.mips.fcun.w
    mips_fcune_d,                              // llvm.mips.fcune.d
    mips_fcune_w,                              // llvm.mips.fcune.w
    mips_fdiv_d,                               // llvm.mips.fdiv.d
    mips_fdiv_w,                               // llvm.mips.fdiv.w
    mips_fexdo_h,                              // llvm.mips.fexdo.h
    mips_fexdo_w,                              // llvm.mips.fexdo.w
    mips_fexp2_d,                              // llvm.mips.fexp2.d
    mips_fexp2_w,                              // llvm.mips.fexp2.w
    mips_fexupl_d,                             // llvm.mips.fexupl.d
    mips_fexupl_w,                             // llvm.mips.fexupl.w
    mips_fexupr_d,                             // llvm.mips.fexupr.d
    mips_fexupr_w,                             // llvm.mips.fexupr.w
    mips_ffint_s_d,                            // llvm.mips.ffint.s.d
    mips_ffint_s_w,                            // llvm.mips.ffint.s.w
    mips_ffint_u_d,                            // llvm.mips.ffint.u.d
    mips_ffint_u_w,                            // llvm.mips.ffint.u.w
    mips_ffql_d,                               // llvm.mips.ffql.d
    mips_ffql_w,                               // llvm.mips.ffql.w
    mips_ffqr_d,                               // llvm.mips.ffqr.d
    mips_ffqr_w,                               // llvm.mips.ffqr.w
    mips_fill_b,                               // llvm.mips.fill.b
    mips_fill_d,                               // llvm.mips.fill.d
    mips_fill_h,                               // llvm.mips.fill.h
    mips_fill_w,                               // llvm.mips.fill.w
    mips_flog2_d,                              // llvm.mips.flog2.d
    mips_flog2_w,                              // llvm.mips.flog2.w
    mips_fmadd_d,                              // llvm.mips.fmadd.d
    mips_fmadd_w,                              // llvm.mips.fmadd.w
    mips_fmax_a_d,                             // llvm.mips.fmax.a.d
    mips_fmax_a_w,                             // llvm.mips.fmax.a.w
    mips_fmax_d,                               // llvm.mips.fmax.d
    mips_fmax_w,                               // llvm.mips.fmax.w
    mips_fmin_a_d,                             // llvm.mips.fmin.a.d
    mips_fmin_a_w,                             // llvm.mips.fmin.a.w
    mips_fmin_d,                               // llvm.mips.fmin.d
    mips_fmin_w,                               // llvm.mips.fmin.w
    mips_fmsub_d,                              // llvm.mips.fmsub.d
    mips_fmsub_w,                              // llvm.mips.fmsub.w
    mips_fmul_d,                               // llvm.mips.fmul.d
    mips_fmul_w,                               // llvm.mips.fmul.w
    mips_frcp_d,                               // llvm.mips.frcp.d
    mips_frcp_w,                               // llvm.mips.frcp.w
    mips_frint_d,                              // llvm.mips.frint.d
    mips_frint_w,                              // llvm.mips.frint.w
    mips_frsqrt_d,                             // llvm.mips.frsqrt.d
    mips_frsqrt_w,                             // llvm.mips.frsqrt.w
    mips_fsaf_d,                               // llvm.mips.fsaf.d
    mips_fsaf_w,                               // llvm.mips.fsaf.w
    mips_fseq_d,                               // llvm.mips.fseq.d
    mips_fseq_w,                               // llvm.mips.fseq.w
    mips_fsle_d,                               // llvm.mips.fsle.d
    mips_fsle_w,                               // llvm.mips.fsle.w
    mips_fslt_d,                               // llvm.mips.fslt.d
    mips_fslt_w,                               // llvm.mips.fslt.w
    mips_fsne_d,                               // llvm.mips.fsne.d
    mips_fsne_w,                               // llvm.mips.fsne.w
    mips_fsor_d,                               // llvm.mips.fsor.d
    mips_fsor_w,                               // llvm.mips.fsor.w
    mips_fsqrt_d,                              // llvm.mips.fsqrt.d
    mips_fsqrt_w,                              // llvm.mips.fsqrt.w
    mips_fsub_d,                               // llvm.mips.fsub.d
    mips_fsub_w,                               // llvm.mips.fsub.w
    mips_fsueq_d,                              // llvm.mips.fsueq.d
    mips_fsueq_w,                              // llvm.mips.fsueq.w
    mips_fsule_d,                              // llvm.mips.fsule.d
    mips_fsule_w,                              // llvm.mips.fsule.w
    mips_fsult_d,                              // llvm.mips.fsult.d
    mips_fsult_w,                              // llvm.mips.fsult.w
    mips_fsun_d,                               // llvm.mips.fsun.d
    mips_fsun_w,                               // llvm.mips.fsun.w
    mips_fsune_d,                              // llvm.mips.fsune.d
    mips_fsune_w,                              // llvm.mips.fsune.w
    mips_ftint_s_d,                            // llvm.mips.ftint.s.d
    mips_ftint_s_w,                            // llvm.mips.ftint.s.w
    mips_ftint_u_d,                            // llvm.mips.ftint.u.d
    mips_ftint_u_w,                            // llvm.mips.ftint.u.w
    mips_ftq_h,                                // llvm.mips.ftq.h
    mips_ftq_w,                                // llvm.mips.ftq.w
    mips_ftrunc_s_d,                           // llvm.mips.ftrunc.s.d
    mips_ftrunc_s_w,                           // llvm.mips.ftrunc.s.w
    mips_ftrunc_u_d,                           // llvm.mips.ftrunc.u.d
    mips_ftrunc_u_w,                           // llvm.mips.ftrunc.u.w
    mips_hadd_s_d,                             // llvm.mips.hadd.s.d
    mips_hadd_s_h,                             // llvm.mips.hadd.s.h
    mips_hadd_s_w,                             // llvm.mips.hadd.s.w
    mips_hadd_u_d,                             // llvm.mips.hadd.u.d
    mips_hadd_u_h,                             // llvm.mips.hadd.u.h
    mips_hadd_u_w,                             // llvm.mips.hadd.u.w
    mips_hsub_s_d,                             // llvm.mips.hsub.s.d
    mips_hsub_s_h,                             // llvm.mips.hsub.s.h
    mips_hsub_s_w,                             // llvm.mips.hsub.s.w
    mips_hsub_u_d,                             // llvm.mips.hsub.u.d
    mips_hsub_u_h,                             // llvm.mips.hsub.u.h
    mips_hsub_u_w,                             // llvm.mips.hsub.u.w
    mips_ilvev_b,                              // llvm.mips.ilvev.b
    mips_ilvev_d,                              // llvm.mips.ilvev.d
    mips_ilvev_h,                              // llvm.mips.ilvev.h
    mips_ilvev_w,                              // llvm.mips.ilvev.w
    mips_ilvl_b,                               // llvm.mips.ilvl.b
    mips_ilvl_d,                               // llvm.mips.ilvl.d
    mips_ilvl_h,                               // llvm.mips.ilvl.h
    mips_ilvl_w,                               // llvm.mips.ilvl.w
    mips_ilvod_b,                              // llvm.mips.ilvod.b
    mips_ilvod_d,                              // llvm.mips.ilvod.d
    mips_ilvod_h,                              // llvm.mips.ilvod.h
    mips_ilvod_w,                              // llvm.mips.ilvod.w
    mips_ilvr_b,                               // llvm.mips.ilvr.b
    mips_ilvr_d,                               // llvm.mips.ilvr.d
    mips_ilvr_h,                               // llvm.mips.ilvr.h
    mips_ilvr_w,                               // llvm.mips.ilvr.w
    mips_insert_b,                             // llvm.mips.insert.b
    mips_insert_d,                             // llvm.mips.insert.d
    mips_insert_h,                             // llvm.mips.insert.h
    mips_insert_w,                             // llvm.mips.insert.w
    mips_insv,                                 // llvm.mips.insv
    mips_insve_b,                              // llvm.mips.insve.b
    mips_insve_d,                              // llvm.mips.insve.d
    mips_insve_h,                              // llvm.mips.insve.h
    mips_insve_w,                              // llvm.mips.insve.w
    mips_lbux,                                 // llvm.mips.lbux
    mips_ld_b,                                 // llvm.mips.ld.b
    mips_ld_d,                                 // llvm.mips.ld.d
    mips_ld_h,                                 // llvm.mips.ld.h
    mips_ld_w,                                 // llvm.mips.ld.w
    mips_ldi_b,                                // llvm.mips.ldi.b
    mips_ldi_d,                                // llvm.mips.ldi.d
    mips_ldi_h,                                // llvm.mips.ldi.h
    mips_ldi_w,                                // llvm.mips.ldi.w
    mips_ldr_d,                                // llvm.mips.ldr.d
    mips_ldr_w,                                // llvm.mips.ldr.w
    mips_lhx,                                  // llvm.mips.lhx
    mips_lsa,                                  // llvm.mips.lsa
    mips_lwx,                                  // llvm.mips.lwx
    mips_madd,                                 // llvm.mips.madd
    mips_madd_q_h,                             // llvm.mips.madd.q.h
    mips_madd_q_w,                             // llvm.mips.madd.q.w
    mips_maddr_q_h,                            // llvm.mips.maddr.q.h
    mips_maddr_q_w,                            // llvm.mips.maddr.q.w
    mips_maddu,                                // llvm.mips.maddu
    mips_maddv_b,                              // llvm.mips.maddv.b
    mips_maddv_d,                              // llvm.mips.maddv.d
    mips_maddv_h,                              // llvm.mips.maddv.h
    mips_maddv_w,                              // llvm.mips.maddv.w
    mips_maq_s_w_phl,                          // llvm.mips.maq.s.w.phl
    mips_maq_s_w_phr,                          // llvm.mips.maq.s.w.phr
    mips_maq_sa_w_phl,                         // llvm.mips.maq.sa.w.phl
    mips_maq_sa_w_phr,                         // llvm.mips.maq.sa.w.phr
    mips_max_a_b,                              // llvm.mips.max.a.b
    mips_max_a_d,                              // llvm.mips.max.a.d
    mips_max_a_h,                              // llvm.mips.max.a.h
    mips_max_a_w,                              // llvm.mips.max.a.w
    mips_max_s_b,                              // llvm.mips.max.s.b
    mips_max_s_d,                              // llvm.mips.max.s.d
    mips_max_s_h,                              // llvm.mips.max.s.h
    mips_max_s_w,                              // llvm.mips.max.s.w
    mips_max_u_b,                              // llvm.mips.max.u.b
    mips_max_u_d,                              // llvm.mips.max.u.d
    mips_max_u_h,                              // llvm.mips.max.u.h
    mips_max_u_w,                              // llvm.mips.max.u.w
    mips_maxi_s_b,                             // llvm.mips.maxi.s.b
    mips_maxi_s_d,                             // llvm.mips.maxi.s.d
    mips_maxi_s_h,                             // llvm.mips.maxi.s.h
    mips_maxi_s_w,                             // llvm.mips.maxi.s.w
    mips_maxi_u_b,                             // llvm.mips.maxi.u.b
    mips_maxi_u_d,                             // llvm.mips.maxi.u.d
    mips_maxi_u_h,                             // llvm.mips.maxi.u.h
    mips_maxi_u_w,                             // llvm.mips.maxi.u.w
    mips_min_a_b,                              // llvm.mips.min.a.b
    mips_min_a_d,                              // llvm.mips.min.a.d
    mips_min_a_h,                              // llvm.mips.min.a.h
    mips_min_a_w,                              // llvm.mips.min.a.w
    mips_min_s_b,                              // llvm.mips.min.s.b
    mips_min_s_d,                              // llvm.mips.min.s.d
    mips_min_s_h,                              // llvm.mips.min.s.h
    mips_min_s_w,                              // llvm.mips.min.s.w
    mips_min_u_b,                              // llvm.mips.min.u.b
    mips_min_u_d,                              // llvm.mips.min.u.d
    mips_min_u_h,                              // llvm.mips.min.u.h
    mips_min_u_w,                              // llvm.mips.min.u.w
    mips_mini_s_b,                             // llvm.mips.mini.s.b
    mips_mini_s_d,                             // llvm.mips.mini.s.d
    mips_mini_s_h,                             // llvm.mips.mini.s.h
    mips_mini_s_w,                             // llvm.mips.mini.s.w
    mips_mini_u_b,                             // llvm.mips.mini.u.b
    mips_mini_u_d,                             // llvm.mips.mini.u.d
    mips_mini_u_h,                             // llvm.mips.mini.u.h
    mips_mini_u_w,                             // llvm.mips.mini.u.w
    mips_mod_s_b,                              // llvm.mips.mod.s.b
    mips_mod_s_d,                              // llvm.mips.mod.s.d
    mips_mod_s_h,                              // llvm.mips.mod.s.h
    mips_mod_s_w,                              // llvm.mips.mod.s.w
    mips_mod_u_b,                              // llvm.mips.mod.u.b
    mips_mod_u_d,                              // llvm.mips.mod.u.d
    mips_mod_u_h,                              // llvm.mips.mod.u.h
    mips_mod_u_w,                              // llvm.mips.mod.u.w
    mips_modsub,                               // llvm.mips.modsub
    mips_move_v,                               // llvm.mips.move.v
    mips_msub,                                 // llvm.mips.msub
    mips_msub_q_h,                             // llvm.mips.msub.q.h
    mips_msub_q_w,                             // llvm.mips.msub.q.w
    mips_msubr_q_h,                            // llvm.mips.msubr.q.h
    mips_msubr_q_w,                            // llvm.mips.msubr.q.w
    mips_msubu,                                // llvm.mips.msubu
    mips_msubv_b,                              // llvm.mips.msubv.b
    mips_msubv_d,                              // llvm.mips.msubv.d
    mips_msubv_h,                              // llvm.mips.msubv.h
    mips_msubv_w,                              // llvm.mips.msubv.w
    mips_mthlip,                               // llvm.mips.mthlip
    mips_mul_ph,                               // llvm.mips.mul.ph
    mips_mul_q_h,                              // llvm.mips.mul.q.h
    mips_mul_q_w,                              // llvm.mips.mul.q.w
    mips_mul_s_ph,                             // llvm.mips.mul.s.ph
    mips_muleq_s_w_phl,                        // llvm.mips.muleq.s.w.phl
    mips_muleq_s_w_phr,                        // llvm.mips.muleq.s.w.phr
    mips_muleu_s_ph_qbl,                       // llvm.mips.muleu.s.ph.qbl
    mips_muleu_s_ph_qbr,                       // llvm.mips.muleu.s.ph.qbr
    mips_mulq_rs_ph,                           // llvm.mips.mulq.rs.ph
    mips_mulq_rs_w,                            // llvm.mips.mulq.rs.w
    mips_mulq_s_ph,                            // llvm.mips.mulq.s.ph
    mips_mulq_s_w,                             // llvm.mips.mulq.s.w
    mips_mulr_q_h,                             // llvm.mips.mulr.q.h
    mips_mulr_q_w,                             // llvm.mips.mulr.q.w
    mips_mulsa_w_ph,                           // llvm.mips.mulsa.w.ph
    mips_mulsaq_s_w_ph,                        // llvm.mips.mulsaq.s.w.ph
    mips_mult,                                 // llvm.mips.mult
    mips_multu,                                // llvm.mips.multu
    mips_mulv_b,                               // llvm.mips.mulv.b
    mips_mulv_d,                               // llvm.mips.mulv.d
    mips_mulv_h,                               // llvm.mips.mulv.h
    mips_mulv_w,                               // llvm.mips.mulv.w
    mips_nloc_b,                               // llvm.mips.nloc.b
    mips_nloc_d,                               // llvm.mips.nloc.d
    mips_nloc_h,                               // llvm.mips.nloc.h
    mips_nloc_w,                               // llvm.mips.nloc.w
    mips_nlzc_b,                               // llvm.mips.nlzc.b
    mips_nlzc_d,                               // llvm.mips.nlzc.d
    mips_nlzc_h,                               // llvm.mips.nlzc.h
    mips_nlzc_w,                               // llvm.mips.nlzc.w
    mips_nor_v,                                // llvm.mips.nor.v
    mips_nori_b,                               // llvm.mips.nori.b
    mips_or_v,                                 // llvm.mips.or.v
    mips_ori_b,                                // llvm.mips.ori.b
    mips_packrl_ph,                            // llvm.mips.packrl.ph
    mips_pckev_b,                              // llvm.mips.pckev.b
    mips_pckev_d,                              // llvm.mips.pckev.d
    mips_pckev_h,                              // llvm.mips.pckev.h
    mips_pckev_w,                              // llvm.mips.pckev.w
    mips_pckod_b,                              // llvm.mips.pckod.b
    mips_pckod_d,                              // llvm.mips.pckod.d
    mips_pckod_h,                              // llvm.mips.pckod.h
    mips_pckod_w,                              // llvm.mips.pckod.w
    mips_pcnt_b,                               // llvm.mips.pcnt.b
    mips_pcnt_d,                               // llvm.mips.pcnt.d
    mips_pcnt_h,                               // llvm.mips.pcnt.h
    mips_pcnt_w,                               // llvm.mips.pcnt.w
    mips_pick_ph,                              // llvm.mips.pick.ph
    mips_pick_qb,                              // llvm.mips.pick.qb
    mips_preceq_w_phl,                         // llvm.mips.preceq.w.phl
    mips_preceq_w_phr,                         // llvm.mips.preceq.w.phr
    mips_precequ_ph_qbl,                       // llvm.mips.precequ.ph.qbl
    mips_precequ_ph_qbla,                      // llvm.mips.precequ.ph.qbla
    mips_precequ_ph_qbr,                       // llvm.mips.precequ.ph.qbr
    mips_precequ_ph_qbra,                      // llvm.mips.precequ.ph.qbra
    mips_preceu_ph_qbl,                        // llvm.mips.preceu.ph.qbl
    mips_preceu_ph_qbla,                       // llvm.mips.preceu.ph.qbla
    mips_preceu_ph_qbr,                        // llvm.mips.preceu.ph.qbr
    mips_preceu_ph_qbra,                       // llvm.mips.preceu.ph.qbra
    mips_precr_qb_ph,                          // llvm.mips.precr.qb.ph
    mips_precr_sra_ph_w,                       // llvm.mips.precr.sra.ph.w
    mips_precr_sra_r_ph_w,                     // llvm.mips.precr.sra.r.ph.w
    mips_precrq_ph_w,                          // llvm.mips.precrq.ph.w
    mips_precrq_qb_ph,                         // llvm.mips.precrq.qb.ph
    mips_precrq_rs_ph_w,                       // llvm.mips.precrq.rs.ph.w
    mips_precrqu_s_qb_ph,                      // llvm.mips.precrqu.s.qb.ph
    mips_prepend,                              // llvm.mips.prepend
    mips_raddu_w_qb,                           // llvm.mips.raddu.w.qb
    mips_rddsp,                                // llvm.mips.rddsp
    mips_repl_ph,                              // llvm.mips.repl.ph
    mips_repl_qb,                              // llvm.mips.repl.qb
    mips_sat_s_b,                              // llvm.mips.sat.s.b
    mips_sat_s_d,                              // llvm.mips.sat.s.d
    mips_sat_s_h,                              // llvm.mips.sat.s.h
    mips_sat_s_w,                              // llvm.mips.sat.s.w
    mips_sat_u_b,                              // llvm.mips.sat.u.b
    mips_sat_u_d,                              // llvm.mips.sat.u.d
    mips_sat_u_h,                              // llvm.mips.sat.u.h
    mips_sat_u_w,                              // llvm.mips.sat.u.w
    mips_shf_b,                                // llvm.mips.shf.b
    mips_shf_h,                                // llvm.mips.shf.h
    mips_shf_w,                                // llvm.mips.shf.w
    mips_shilo,                                // llvm.mips.shilo
    mips_shll_ph,                              // llvm.mips.shll.ph
    mips_shll_qb,                              // llvm.mips.shll.qb
    mips_shll_s_ph,                            // llvm.mips.shll.s.ph
    mips_shll_s_w,                             // llvm.mips.shll.s.w
    mips_shra_ph,                              // llvm.mips.shra.ph
    mips_shra_qb,                              // llvm.mips.shra.qb
    mips_shra_r_ph,                            // llvm.mips.shra.r.ph
    mips_shra_r_qb,                            // llvm.mips.shra.r.qb
    mips_shra_r_w,                             // llvm.mips.shra.r.w
    mips_shrl_ph,                              // llvm.mips.shrl.ph
    mips_shrl_qb,                              // llvm.mips.shrl.qb
    mips_sld_b,                                // llvm.mips.sld.b
    mips_sld_d,                                // llvm.mips.sld.d
    mips_sld_h,                                // llvm.mips.sld.h
    mips_sld_w,                                // llvm.mips.sld.w
    mips_sldi_b,                               // llvm.mips.sldi.b
    mips_sldi_d,                               // llvm.mips.sldi.d
    mips_sldi_h,                               // llvm.mips.sldi.h
    mips_sldi_w,                               // llvm.mips.sldi.w
    mips_sll_b,                                // llvm.mips.sll.b
    mips_sll_d,                                // llvm.mips.sll.d
    mips_sll_h,                                // llvm.mips.sll.h
    mips_sll_w,                                // llvm.mips.sll.w
    mips_slli_b,                               // llvm.mips.slli.b
    mips_slli_d,                               // llvm.mips.slli.d
    mips_slli_h,                               // llvm.mips.slli.h
    mips_slli_w,                               // llvm.mips.slli.w
    mips_splat_b,                              // llvm.mips.splat.b
    mips_splat_d,                              // llvm.mips.splat.d
    mips_splat_h,                              // llvm.mips.splat.h
    mips_splat_w,                              // llvm.mips.splat.w
    mips_splati_b,                             // llvm.mips.splati.b
    mips_splati_d,                             // llvm.mips.splati.d
    mips_splati_h,                             // llvm.mips.splati.h
    mips_splati_w,                             // llvm.mips.splati.w
    mips_sra_b,                                // llvm.mips.sra.b
    mips_sra_d,                                // llvm.mips.sra.d
    mips_sra_h,                                // llvm.mips.sra.h
    mips_sra_w,                                // llvm.mips.sra.w
    mips_srai_b,                               // llvm.mips.srai.b
    mips_srai_d,                               // llvm.mips.srai.d
    mips_srai_h,                               // llvm.mips.srai.h
    mips_srai_w,                               // llvm.mips.srai.w
    mips_srar_b,                               // llvm.mips.srar.b
    mips_srar_d,                               // llvm.mips.srar.d
    mips_srar_h,                               // llvm.mips.srar.h
    mips_srar_w,                               // llvm.mips.srar.w
    mips_srari_b,                              // llvm.mips.srari.b
    mips_srari_d,                              // llvm.mips.srari.d
    mips_srari_h,                              // llvm.mips.srari.h
    mips_srari_w,                              // llvm.mips.srari.w
    mips_srl_b,                                // llvm.mips.srl.b
    mips_srl_d,                                // llvm.mips.srl.d
    mips_srl_h,                                // llvm.mips.srl.h
    mips_srl_w,                                // llvm.mips.srl.w
    mips_srli_b,                               // llvm.mips.srli.b
    mips_srli_d,                               // llvm.mips.srli.d
    mips_srli_h,                               // llvm.mips.srli.h
    mips_srli_w,                               // llvm.mips.srli.w
    mips_srlr_b,                               // llvm.mips.srlr.b
    mips_srlr_d,                               // llvm.mips.srlr.d
    mips_srlr_h,                               // llvm.mips.srlr.h
    mips_srlr_w,                               // llvm.mips.srlr.w
    mips_srlri_b,                              // llvm.mips.srlri.b
    mips_srlri_d,                              // llvm.mips.srlri.d
    mips_srlri_h,                              // llvm.mips.srlri.h
    mips_srlri_w,                              // llvm.mips.srlri.w
    mips_st_b,                                 // llvm.mips.st.b
    mips_st_d,                                 // llvm.mips.st.d
    mips_st_h,                                 // llvm.mips.st.h
    mips_st_w,                                 // llvm.mips.st.w
    mips_str_d,                                // llvm.mips.str.d
    mips_str_w,                                // llvm.mips.str.w
    mips_subq_ph,                              // llvm.mips.subq.ph
    mips_subq_s_ph,                            // llvm.mips.subq.s.ph
    mips_subq_s_w,                             // llvm.mips.subq.s.w
    mips_subqh_ph,                             // llvm.mips.subqh.ph
    mips_subqh_r_ph,                           // llvm.mips.subqh.r.ph
    mips_subqh_r_w,                            // llvm.mips.subqh.r.w
    mips_subqh_w,                              // llvm.mips.subqh.w
    mips_subs_s_b,                             // llvm.mips.subs.s.b
    mips_subs_s_d,                             // llvm.mips.subs.s.d
    mips_subs_s_h,                             // llvm.mips.subs.s.h
    mips_subs_s_w,                             // llvm.mips.subs.s.w
    mips_subs_u_b,                             // llvm.mips.subs.u.b
    mips_subs_u_d,                             // llvm.mips.subs.u.d
    mips_subs_u_h,                             // llvm.mips.subs.u.h
    mips_subs_u_w,                             // llvm.mips.subs.u.w
    mips_subsus_u_b,                           // llvm.mips.subsus.u.b
    mips_subsus_u_d,                           // llvm.mips.subsus.u.d
    mips_subsus_u_h,                           // llvm.mips.subsus.u.h
    mips_subsus_u_w,                           // llvm.mips.subsus.u.w
    mips_subsuu_s_b,                           // llvm.mips.subsuu.s.b
    mips_subsuu_s_d,                           // llvm.mips.subsuu.s.d
    mips_subsuu_s_h,                           // llvm.mips.subsuu.s.h
    mips_subsuu_s_w,                           // llvm.mips.subsuu.s.w
    mips_subu_ph,                              // llvm.mips.subu.ph
    mips_subu_qb,                              // llvm.mips.subu.qb
    mips_subu_s_ph,                            // llvm.mips.subu.s.ph
    mips_subu_s_qb,                            // llvm.mips.subu.s.qb
    mips_subuh_qb,                             // llvm.mips.subuh.qb
    mips_subuh_r_qb,                           // llvm.mips.subuh.r.qb
    mips_subv_b,                               // llvm.mips.subv.b
    mips_subv_d,                               // llvm.mips.subv.d
    mips_subv_h,                               // llvm.mips.subv.h
    mips_subv_w,                               // llvm.mips.subv.w
    mips_subvi_b,                              // llvm.mips.subvi.b
    mips_subvi_d,                              // llvm.mips.subvi.d
    mips_subvi_h,                              // llvm.mips.subvi.h
    mips_subvi_w,                              // llvm.mips.subvi.w
    mips_vshf_b,                               // llvm.mips.vshf.b
    mips_vshf_d,                               // llvm.mips.vshf.d
    mips_vshf_h,                               // llvm.mips.vshf.h
    mips_vshf_w,                               // llvm.mips.vshf.w
    mips_wrdsp,                                // llvm.mips.wrdsp
    mips_xor_v,                                // llvm.mips.xor.v
    mips_xori_b,                               // llvm.mips.xori.b
}; // enum
} // namespace Intrinsic
} // namespace llvm

#endif
PKjwFZ��*%I%IIR/ValueHandle.hnu�[���//===- ValueHandle.h - Value Smart Pointer classes --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the ValueHandle class and its sub-classes.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_VALUEHANDLE_H
#define LLVM_IR_VALUEHANDLE_H

#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/Casting.h"
#include <cassert>

namespace llvm {

/// This is the common base class of value handles.
///
/// ValueHandle's are smart pointers to Value's that have special behavior when
/// the value is deleted or ReplaceAllUsesWith'd.  See the specific handles
/// below for details.
class ValueHandleBase {
  friend class Value;

protected:
  /// This indicates what sub class the handle actually is.
  ///
  /// This is to avoid having a vtable for the light-weight handle pointers. The
  /// fully general Callback version does have a vtable.
  enum HandleBaseKind { Assert, Callback, Weak, WeakTracking };

  ValueHandleBase(const ValueHandleBase &RHS)
      : ValueHandleBase(RHS.PrevPair.getInt(), RHS) {}

  ValueHandleBase(HandleBaseKind Kind, const ValueHandleBase &RHS)
      : PrevPair(nullptr, Kind), Val(RHS.getValPtr()) {
    if (isValid(getValPtr()))
      AddToExistingUseList(RHS.getPrevPtr());
  }

private:
  PointerIntPair<ValueHandleBase**, 2, HandleBaseKind> PrevPair;
  ValueHandleBase *Next = nullptr;
  Value *Val = nullptr;

  void setValPtr(Value *V) { Val = V; }

public:
  explicit ValueHandleBase(HandleBaseKind Kind)
      : PrevPair(nullptr, Kind) {}
  ValueHandleBase(HandleBaseKind Kind, Value *V)
      : PrevPair(nullptr, Kind), Val(V) {
    if (isValid(getValPtr()))
      AddToUseList();
  }

  ~ValueHandleBase() {
    if (isValid(getValPtr()))
      RemoveFromUseList();
  }

  Value *operator=(Value *RHS) {
    if (getValPtr() == RHS)
      return RHS;
    if (isValid(getValPtr()))
      RemoveFromUseList();
    setValPtr(RHS);
    if (isValid(getValPtr()))
      AddToUseList();
    return RHS;
  }

  Value *operator=(const ValueHandleBase &RHS) {
    if (getValPtr() == RHS.getValPtr())
      return RHS.getValPtr();
    if (isValid(getValPtr()))
      RemoveFromUseList();
    setValPtr(RHS.getValPtr());
    if (isValid(getValPtr()))
      AddToExistingUseList(RHS.getPrevPtr());
    return getValPtr();
  }

  Value *operator->() const { return getValPtr(); }
  Value &operator*() const {
    Value *V = getValPtr();
    assert(V && "Dereferencing deleted ValueHandle");
    return *V;
  }

protected:
  Value *getValPtr() const { return Val; }

  static bool isValid(Value *V) {
    return V &&
           V != DenseMapInfo<Value *>::getEmptyKey() &&
           V != DenseMapInfo<Value *>::getTombstoneKey();
  }

  /// Remove this ValueHandle from its current use list.
  void RemoveFromUseList();

  /// Clear the underlying pointer without clearing the use list.
  ///
  /// This should only be used if a derived class has manually removed the
  /// handle from the use list.
  void clearValPtr() { setValPtr(nullptr); }

public:
  // Callbacks made from Value.
  static void ValueIsDeleted(Value *V);
  static void ValueIsRAUWd(Value *Old, Value *New);

private:
  // Internal implementation details.
  ValueHandleBase **getPrevPtr() const { return PrevPair.getPointer(); }
  HandleBaseKind getKind() const { return PrevPair.getInt(); }
  void setPrevPtr(ValueHandleBase **Ptr) { PrevPair.setPointer(Ptr); }

  /// Add this ValueHandle to the use list for V.
  ///
  /// List is the address of either the head of the list or a Next node within
  /// the existing use list.
  void AddToExistingUseList(ValueHandleBase **List);

  /// Add this ValueHandle to the use list after Node.
  void AddToExistingUseListAfter(ValueHandleBase *Node);

  /// Add this ValueHandle to the use list for V.
  void AddToUseList();
};

/// A nullable Value handle that is nullable.
///
/// This is a value handle that points to a value, and nulls itself
/// out if that value is deleted.
class WeakVH : public ValueHandleBase {
public:
  WeakVH() : ValueHandleBase(Weak) {}
  WeakVH(Value *P) : ValueHandleBase(Weak, P) {}
  WeakVH(const WeakVH &RHS)
      : ValueHandleBase(Weak, RHS) {}

  WeakVH &operator=(const WeakVH &RHS) = default;

  Value *operator=(Value *RHS) {
    return ValueHandleBase::operator=(RHS);
  }
  Value *operator=(const ValueHandleBase &RHS) {
    return ValueHandleBase::operator=(RHS);
  }

  operator Value*() const {
    return getValPtr();
  }
};

// Specialize simplify_type to allow WeakVH to participate in
// dyn_cast, isa, etc.
template <> struct simplify_type<WeakVH> {
  using SimpleType = Value *;

  static SimpleType getSimplifiedValue(WeakVH &WVH) { return WVH; }
};
template <> struct simplify_type<const WeakVH> {
  using SimpleType = Value *;

  static SimpleType getSimplifiedValue(const WeakVH &WVH) { return WVH; }
};

// Specialize DenseMapInfo to allow WeakVH to participate in DenseMap.
template <> struct DenseMapInfo<WeakVH> {
  static inline WeakVH getEmptyKey() {
    return WeakVH(DenseMapInfo<Value *>::getEmptyKey());
  }

  static inline WeakVH getTombstoneKey() {
    return WeakVH(DenseMapInfo<Value *>::getTombstoneKey());
  }

  static unsigned getHashValue(const WeakVH &Val) {
    return DenseMapInfo<Value *>::getHashValue(Val);
  }

  static bool isEqual(const WeakVH &LHS, const WeakVH &RHS) {
    return DenseMapInfo<Value *>::isEqual(LHS, RHS);
  }
};

/// Value handle that is nullable, but tries to track the Value.
///
/// This is a value handle that tries hard to point to a Value, even across
/// RAUW operations, but will null itself out if the value is destroyed.  this
/// is useful for advisory sorts of information, but should not be used as the
/// key of a map (since the map would have to rearrange itself when the pointer
/// changes).
class WeakTrackingVH : public ValueHandleBase {
public:
  WeakTrackingVH() : ValueHandleBase(WeakTracking) {}
  WeakTrackingVH(Value *P) : ValueHandleBase(WeakTracking, P) {}
  WeakTrackingVH(const WeakTrackingVH &RHS)
      : ValueHandleBase(WeakTracking, RHS) {}

  WeakTrackingVH &operator=(const WeakTrackingVH &RHS) = default;

  Value *operator=(Value *RHS) {
    return ValueHandleBase::operator=(RHS);
  }
  Value *operator=(const ValueHandleBase &RHS) {
    return ValueHandleBase::operator=(RHS);
  }

  operator Value*() const {
    return getValPtr();
  }

  bool pointsToAliveValue() const {
    return ValueHandleBase::isValid(getValPtr());
  }
};

// Specialize simplify_type to allow WeakTrackingVH to participate in
// dyn_cast, isa, etc.
template <> struct simplify_type<WeakTrackingVH> {
  using SimpleType = Value *;

  static SimpleType getSimplifiedValue(WeakTrackingVH &WVH) { return WVH; }
};
template <> struct simplify_type<const WeakTrackingVH> {
  using SimpleType = Value *;

  static SimpleType getSimplifiedValue(const WeakTrackingVH &WVH) {
    return WVH;
  }
};

/// Value handle that asserts if the Value is deleted.
///
/// This is a Value Handle that points to a value and asserts out if the value
/// is destroyed while the handle is still live.  This is very useful for
/// catching dangling pointer bugs and other things which can be non-obvious.
/// One particularly useful place to use this is as the Key of a map.  Dangling
/// pointer bugs often lead to really subtle bugs that only occur if another
/// object happens to get allocated to the same address as the old one.  Using
/// an AssertingVH ensures that an assert is triggered as soon as the bad
/// delete occurs.
///
/// Note that an AssertingVH handle does *not* follow values across RAUW
/// operations.  This means that RAUW's need to explicitly update the
/// AssertingVH's as it moves.  This is required because in non-assert mode this
/// class turns into a trivial wrapper around a pointer.
template <typename ValueTy>
class AssertingVH
#if LLVM_ENABLE_ABI_BREAKING_CHECKS
    : public ValueHandleBase
#endif
{
  friend struct DenseMapInfo<AssertingVH<ValueTy>>;

#if LLVM_ENABLE_ABI_BREAKING_CHECKS
  Value *getRawValPtr() const { return ValueHandleBase::getValPtr(); }
  void setRawValPtr(Value *P) { ValueHandleBase::operator=(P); }
#else
  Value *ThePtr;
  Value *getRawValPtr() const { return ThePtr; }
  void setRawValPtr(Value *P) { ThePtr = P; }
#endif
  // Convert a ValueTy*, which may be const, to the raw Value*.
  static Value *GetAsValue(Value *V) { return V; }
  static Value *GetAsValue(const Value *V) { return const_cast<Value*>(V); }

  ValueTy *getValPtr() const { return static_cast<ValueTy *>(getRawValPtr()); }
  void setValPtr(ValueTy *P) { setRawValPtr(GetAsValue(P)); }

public:
#if LLVM_ENABLE_ABI_BREAKING_CHECKS
  AssertingVH() : ValueHandleBase(Assert) {}
  AssertingVH(ValueTy *P) : ValueHandleBase(Assert, GetAsValue(P)) {}
  AssertingVH(const AssertingVH &RHS) : ValueHandleBase(Assert, RHS) {}
#else
  AssertingVH() : ThePtr(nullptr) {}
  AssertingVH(ValueTy *P) : ThePtr(GetAsValue(P)) {}
  AssertingVH(const AssertingVH &) = default;
#endif

  operator ValueTy*() const {
    return getValPtr();
  }

  ValueTy *operator=(ValueTy *RHS) {
    setValPtr(RHS);
    return getValPtr();
  }
  ValueTy *operator=(const AssertingVH<ValueTy> &RHS) {
    setValPtr(RHS.getValPtr());
    return getValPtr();
  }

  ValueTy *operator->() const { return getValPtr(); }
  ValueTy &operator*() const { return *getValPtr(); }
};

// Treat AssertingVH<T> like T* inside maps. This also allows using find_as()
// to look up a value without constructing a value handle.
template<typename T>
struct DenseMapInfo<AssertingVH<T>> : DenseMapInfo<T *> {};

/// Value handle that tracks a Value across RAUW.
///
/// TrackingVH is designed for situations where a client needs to hold a handle
/// to a Value (or subclass) across some operations which may move that value,
/// but should never destroy it or replace it with some unacceptable type.
///
/// It is an error to attempt to replace a value with one of a type which is
/// incompatible with any of its outstanding TrackingVHs.
///
/// It is an error to read from a TrackingVH that does not point to a valid
/// value.  A TrackingVH is said to not point to a valid value if either it
/// hasn't yet been assigned a value yet or because the value it was tracking
/// has since been deleted.
///
/// Assigning a value to a TrackingVH is always allowed, even if said TrackingVH
/// no longer points to a valid value.
template <typename ValueTy> class TrackingVH {
  WeakTrackingVH InnerHandle;

public:
  ValueTy *getValPtr() const {
    assert(InnerHandle.pointsToAliveValue() &&
           "TrackingVH must be non-null and valid on dereference!");

    // Check that the value is a member of the correct subclass. We would like
    // to check this property on assignment for better debugging, but we don't
    // want to require a virtual interface on this VH. Instead we allow RAUW to
    // replace this value with a value of an invalid type, and check it here.
    assert(isa<ValueTy>(InnerHandle) &&
           "Tracked Value was replaced by one with an invalid type!");
    return cast<ValueTy>(InnerHandle);
  }

  void setValPtr(ValueTy *P) {
    // Assigning to non-valid TrackingVH's are fine so we just unconditionally
    // assign here.
    InnerHandle = GetAsValue(P);
  }

  // Convert a ValueTy*, which may be const, to the type the base
  // class expects.
  static Value *GetAsValue(Value *V) { return V; }
  static Value *GetAsValue(const Value *V) { return const_cast<Value*>(V); }

public:
  TrackingVH() = default;
  TrackingVH(ValueTy *P) { setValPtr(P); }

  operator ValueTy*() const {
    return getValPtr();
  }

  ValueTy *operator=(ValueTy *RHS) {
    setValPtr(RHS);
    return getValPtr();
  }

  ValueTy *operator->() const { return getValPtr(); }
  ValueTy &operator*() const { return *getValPtr(); }
};

/// Value handle with callbacks on RAUW and destruction.
///
/// This is a value handle that allows subclasses to define callbacks that run
/// when the underlying Value has RAUW called on it or is destroyed.  This
/// class can be used as the key of a map, as long as the user takes it out of
/// the map before calling setValPtr() (since the map has to rearrange itself
/// when the pointer changes).  Unlike ValueHandleBase, this class has a vtable.
class CallbackVH : public ValueHandleBase {
  virtual void anchor();
protected:
  ~CallbackVH() = default;
  CallbackVH(const CallbackVH &) = default;
  CallbackVH &operator=(const CallbackVH &) = default;

  void setValPtr(Value *P) {
    ValueHandleBase::operator=(P);
  }

public:
  CallbackVH() : ValueHandleBase(Callback) {}
  CallbackVH(Value *P) : ValueHandleBase(Callback, P) {}
  CallbackVH(const Value *P) : CallbackVH(const_cast<Value *>(P)) {}

  operator Value*() const {
    return getValPtr();
  }

  /// Callback for Value destruction.
  ///
  /// Called when this->getValPtr() is destroyed, inside ~Value(), so you
  /// may call any non-virtual Value method on getValPtr(), but no subclass
  /// methods.  If WeakTrackingVH were implemented as a CallbackVH, it would use
  /// this
  /// method to call setValPtr(NULL).  AssertingVH would use this method to
  /// cause an assertion failure.
  ///
  /// All implementations must remove the reference from this object to the
  /// Value that's being destroyed.
  virtual void deleted() { setValPtr(nullptr); }

  /// Callback for Value RAUW.
  ///
  /// Called when this->getValPtr()->replaceAllUsesWith(new_value) is called,
  /// _before_ any of the uses have actually been replaced.  If WeakTrackingVH
  /// were
  /// implemented as a CallbackVH, it would use this method to call
  /// setValPtr(new_value).  AssertingVH would do nothing in this method.
  virtual void allUsesReplacedWith(Value *) {}
};

/// Value handle that poisons itself if the Value is deleted.
///
/// This is a Value Handle that points to a value and poisons itself if the
/// value is destroyed while the handle is still live.  This is very useful for
/// catching dangling pointer bugs where an \c AssertingVH cannot be used
/// because the dangling handle needs to outlive the value without ever being
/// used.
///
/// One particularly useful place to use this is as the Key of a map. Dangling
/// pointer bugs often lead to really subtle bugs that only occur if another
/// object happens to get allocated to the same address as the old one. Using
/// a PoisoningVH ensures that an assert is triggered if looking up a new value
/// in the map finds a handle from the old value.
///
/// Note that a PoisoningVH handle does *not* follow values across RAUW
/// operations. This means that RAUW's need to explicitly update the
/// PoisoningVH's as it moves. This is required because in non-assert mode this
/// class turns into a trivial wrapper around a pointer.
template <typename ValueTy>
class PoisoningVH final
#if LLVM_ENABLE_ABI_BREAKING_CHECKS
    : public CallbackVH
#endif
{
  friend struct DenseMapInfo<PoisoningVH<ValueTy>>;

  // Convert a ValueTy*, which may be const, to the raw Value*.
  static Value *GetAsValue(Value *V) { return V; }
  static Value *GetAsValue(const Value *V) { return const_cast<Value *>(V); }

#if LLVM_ENABLE_ABI_BREAKING_CHECKS
  /// A flag tracking whether this value has been poisoned.
  ///
  /// On delete and RAUW, we leave the value pointer alone so that as a raw
  /// pointer it produces the same value (and we fit into the same key of
  /// a hash table, etc), but we poison the handle so that any top-level usage
  /// will fail.
  bool Poisoned = false;

  Value *getRawValPtr() const { return ValueHandleBase::getValPtr(); }
  void setRawValPtr(Value *P) { ValueHandleBase::operator=(P); }

  /// Handle deletion by poisoning the handle.
  void deleted() override {
    assert(!Poisoned && "Tried to delete an already poisoned handle!");
    Poisoned = true;
    RemoveFromUseList();
  }

  /// Handle RAUW by poisoning the handle.
  void allUsesReplacedWith(Value *) override {
    assert(!Poisoned && "Tried to RAUW an already poisoned handle!");
    Poisoned = true;
    RemoveFromUseList();
  }
#else // LLVM_ENABLE_ABI_BREAKING_CHECKS
  Value *ThePtr = nullptr;

  Value *getRawValPtr() const { return ThePtr; }
  void setRawValPtr(Value *P) { ThePtr = P; }
#endif

  ValueTy *getValPtr() const {
#if LLVM_ENABLE_ABI_BREAKING_CHECKS
    assert(!Poisoned && "Accessed a poisoned value handle!");
#endif
    return static_cast<ValueTy *>(getRawValPtr());
  }
  void setValPtr(ValueTy *P) { setRawValPtr(GetAsValue(P)); }

public:
  PoisoningVH() = default;
#if LLVM_ENABLE_ABI_BREAKING_CHECKS
  PoisoningVH(ValueTy *P) : CallbackVH(GetAsValue(P)) {}
  PoisoningVH(const PoisoningVH &RHS)
      : CallbackVH(RHS), Poisoned(RHS.Poisoned) {}

  ~PoisoningVH() {
    if (Poisoned)
      clearValPtr();
  }

  PoisoningVH &operator=(const PoisoningVH &RHS) {
    if (Poisoned)
      clearValPtr();
    CallbackVH::operator=(RHS);
    Poisoned = RHS.Poisoned;
    return *this;
  }
#else
  PoisoningVH(ValueTy *P) : ThePtr(GetAsValue(P)) {}
#endif

  operator ValueTy *() const { return getValPtr(); }

  ValueTy *operator->() const { return getValPtr(); }
  ValueTy &operator*() const { return *getValPtr(); }
};

// Specialize DenseMapInfo to allow PoisoningVH to participate in DenseMap.
template <typename T> struct DenseMapInfo<PoisoningVH<T>> {
  static inline PoisoningVH<T> getEmptyKey() {
    PoisoningVH<T> Res;
    Res.setRawValPtr(DenseMapInfo<Value *>::getEmptyKey());
    return Res;
  }

  static inline PoisoningVH<T> getTombstoneKey() {
    PoisoningVH<T> Res;
    Res.setRawValPtr(DenseMapInfo<Value *>::getTombstoneKey());
    return Res;
  }

  static unsigned getHashValue(const PoisoningVH<T> &Val) {
    return DenseMapInfo<Value *>::getHashValue(Val.getRawValPtr());
  }

  static bool isEqual(const PoisoningVH<T> &LHS, const PoisoningVH<T> &RHS) {
    return DenseMapInfo<Value *>::isEqual(LHS.getRawValPtr(),
                                          RHS.getRawValPtr());
  }

  // Allow lookup by T* via find_as(), without constructing a temporary
  // value handle.

  static unsigned getHashValue(const T *Val) {
    return DenseMapInfo<Value *>::getHashValue(Val);
  }

  static bool isEqual(const T *LHS, const PoisoningVH<T> &RHS) {
    return DenseMapInfo<Value *>::isEqual(LHS, RHS.getRawValPtr());
  }
};

} // end namespace llvm

#endif // LLVM_IR_VALUEHANDLE_H
PKjwFZ�C�KIR/IRBuilderFolder.hnu�[���//===- IRBuilderFolder.h - Const folder interface for IRBuilder -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines for constant folding interface used by IRBuilder.
// It is implemented by ConstantFolder (default), TargetFolder and NoFoler.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_IRBUILDERFOLDER_H
#define LLVM_IR_IRBUILDERFOLDER_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instruction.h"

namespace llvm {

/// IRBuilderFolder - Interface for constant folding in IRBuilder.
class IRBuilderFolder {
public:
  virtual ~IRBuilderFolder();

  //===--------------------------------------------------------------------===//
  // Value-based folders.
  //
  // Return an existing value or a constant if the operation can be simplified.
  // Otherwise return nullptr.
  //===--------------------------------------------------------------------===//

  virtual Value *FoldBinOp(Instruction::BinaryOps Opc, Value *LHS,
                           Value *RHS) const = 0;

  virtual Value *FoldExactBinOp(Instruction::BinaryOps Opc, Value *LHS,
                                Value *RHS, bool IsExact) const = 0;

  virtual Value *FoldNoWrapBinOp(Instruction::BinaryOps Opc, Value *LHS,
                                 Value *RHS, bool HasNUW,
                                 bool HasNSW) const = 0;

  virtual Value *FoldBinOpFMF(Instruction::BinaryOps Opc, Value *LHS,
                              Value *RHS, FastMathFlags FMF) const = 0;

  virtual Value *FoldUnOpFMF(Instruction::UnaryOps Opc, Value *V,
                             FastMathFlags FMF) const = 0;

  virtual Value *FoldICmp(CmpInst::Predicate P, Value *LHS,
                          Value *RHS) const = 0;

  virtual Value *FoldGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList,
                         bool IsInBounds = false) const = 0;

  virtual Value *FoldSelect(Value *C, Value *True, Value *False) const = 0;

  virtual Value *FoldExtractValue(Value *Agg,
                                  ArrayRef<unsigned> IdxList) const = 0;

  virtual Value *FoldInsertValue(Value *Agg, Value *Val,
                                 ArrayRef<unsigned> IdxList) const = 0;

  virtual Value *FoldExtractElement(Value *Vec, Value *Idx) const = 0;

  virtual Value *FoldInsertElement(Value *Vec, Value *NewElt,
                                   Value *Idx) const = 0;

  virtual Value *FoldShuffleVector(Value *V1, Value *V2,
                                   ArrayRef<int> Mask) const = 0;

  //===--------------------------------------------------------------------===//
  // Cast/Conversion Operators
  //===--------------------------------------------------------------------===//

  virtual Value *CreateCast(Instruction::CastOps Op, Constant *C,
                            Type *DestTy) const = 0;
  virtual Value *CreatePointerCast(Constant *C, Type *DestTy) const = 0;
  virtual Value *CreatePointerBitCastOrAddrSpaceCast(Constant *C,
                                                     Type *DestTy) const = 0;
  virtual Value *CreateIntCast(Constant *C, Type *DestTy,
                               bool isSigned) const = 0;
  virtual Value *CreateFPCast(Constant *C, Type *DestTy) const = 0;
  virtual Value *CreateBitCast(Constant *C, Type *DestTy) const = 0;
  virtual Value *CreateIntToPtr(Constant *C, Type *DestTy) const = 0;
  virtual Value *CreatePtrToInt(Constant *C, Type *DestTy) const = 0;
  virtual Value *CreateZExtOrBitCast(Constant *C, Type *DestTy) const = 0;
  virtual Value *CreateSExtOrBitCast(Constant *C, Type *DestTy) const = 0;
  virtual Value *CreateTruncOrBitCast(Constant *C, Type *DestTy) const = 0;

  //===--------------------------------------------------------------------===//
  // Compare Instructions
  //===--------------------------------------------------------------------===//

  virtual Value *CreateFCmp(CmpInst::Predicate P, Constant *LHS,
                            Constant *RHS) const = 0;
};

} // end namespace llvm

#endif // LLVM_IR_IRBUILDERFOLDER_H
PKjwFZ�)+�]s]sIR/IntrinsicsVE.hnu�[���/*===- TableGen'erated file -------------------------------------*- C++ -*-===*\
|*                                                                            *|
|* Intrinsic Function Source Fragment                                         *|
|*                                                                            *|
|* Automatically generated file, do not edit!                                 *|
|*                                                                            *|
\*===----------------------------------------------------------------------===*/

#ifndef LLVM_IR_INTRINSIC_VE_ENUMS_H
#define LLVM_IR_INTRINSIC_VE_ENUMS_H

namespace llvm {
namespace Intrinsic {
enum VEIntrinsics : unsigned {
// Enum values for intrinsics
    ve_vl_andm_MMM = 9102,                            // llvm.ve.vl.andm.MMM
    ve_vl_andm_mmm,                            // llvm.ve.vl.andm.mmm
    ve_vl_eqvm_MMM,                            // llvm.ve.vl.eqvm.MMM
    ve_vl_eqvm_mmm,                            // llvm.ve.vl.eqvm.mmm
    ve_vl_extract_vm512l,                      // llvm.ve.vl.extract.vm512l
    ve_vl_extract_vm512u,                      // llvm.ve.vl.extract.vm512u
    ve_vl_fencec_s,                            // llvm.ve.vl.fencec.s
    ve_vl_fencei,                              // llvm.ve.vl.fencei
    ve_vl_fencem_s,                            // llvm.ve.vl.fencem.s
    ve_vl_fidcr_sss,                           // llvm.ve.vl.fidcr.sss
    ve_vl_insert_vm512l,                       // llvm.ve.vl.insert.vm512l
    ve_vl_insert_vm512u,                       // llvm.ve.vl.insert.vm512u
    ve_vl_lcr_sss,                             // llvm.ve.vl.lcr.sss
    ve_vl_lsv_vvss,                            // llvm.ve.vl.lsv.vvss
    ve_vl_lvm_MMss,                            // llvm.ve.vl.lvm.MMss
    ve_vl_lvm_mmss,                            // llvm.ve.vl.lvm.mmss
    ve_vl_lvsd_svs,                            // llvm.ve.vl.lvsd.svs
    ve_vl_lvsl_svs,                            // llvm.ve.vl.lvsl.svs
    ve_vl_lvss_svs,                            // llvm.ve.vl.lvss.svs
    ve_vl_lzvm_sml,                            // llvm.ve.vl.lzvm.sml
    ve_vl_negm_MM,                             // llvm.ve.vl.negm.MM
    ve_vl_negm_mm,                             // llvm.ve.vl.negm.mm
    ve_vl_nndm_MMM,                            // llvm.ve.vl.nndm.MMM
    ve_vl_nndm_mmm,                            // llvm.ve.vl.nndm.mmm
    ve_vl_orm_MMM,                             // llvm.ve.vl.orm.MMM
    ve_vl_orm_mmm,                             // llvm.ve.vl.orm.mmm
    ve_vl_pack_f32a,                           // llvm.ve.vl.pack.f32a
    ve_vl_pack_f32p,                           // llvm.ve.vl.pack.f32p
    ve_vl_pcvm_sml,                            // llvm.ve.vl.pcvm.sml
    ve_vl_pfchv_ssl,                           // llvm.ve.vl.pfchv.ssl
    ve_vl_pfchvnc_ssl,                         // llvm.ve.vl.pfchvnc.ssl
    ve_vl_pvadds_vsvMvl,                       // llvm.ve.vl.pvadds.vsvMvl
    ve_vl_pvadds_vsvl,                         // llvm.ve.vl.pvadds.vsvl
    ve_vl_pvadds_vsvvl,                        // llvm.ve.vl.pvadds.vsvvl
    ve_vl_pvadds_vvvMvl,                       // llvm.ve.vl.pvadds.vvvMvl
    ve_vl_pvadds_vvvl,                         // llvm.ve.vl.pvadds.vvvl
    ve_vl_pvadds_vvvvl,                        // llvm.ve.vl.pvadds.vvvvl
    ve_vl_pvaddu_vsvMvl,                       // llvm.ve.vl.pvaddu.vsvMvl
    ve_vl_pvaddu_vsvl,                         // llvm.ve.vl.pvaddu.vsvl
    ve_vl_pvaddu_vsvvl,                        // llvm.ve.vl.pvaddu.vsvvl
    ve_vl_pvaddu_vvvMvl,                       // llvm.ve.vl.pvaddu.vvvMvl
    ve_vl_pvaddu_vvvl,                         // llvm.ve.vl.pvaddu.vvvl
    ve_vl_pvaddu_vvvvl,                        // llvm.ve.vl.pvaddu.vvvvl
    ve_vl_pvand_vsvMvl,                        // llvm.ve.vl.pvand.vsvMvl
    ve_vl_pvand_vsvl,                          // llvm.ve.vl.pvand.vsvl
    ve_vl_pvand_vsvvl,                         // llvm.ve.vl.pvand.vsvvl
    ve_vl_pvand_vvvMvl,                        // llvm.ve.vl.pvand.vvvMvl
    ve_vl_pvand_vvvl,                          // llvm.ve.vl.pvand.vvvl
    ve_vl_pvand_vvvvl,                         // llvm.ve.vl.pvand.vvvvl
    ve_vl_pvbrd_vsMvl,                         // llvm.ve.vl.pvbrd.vsMvl
    ve_vl_pvbrd_vsl,                           // llvm.ve.vl.pvbrd.vsl
    ve_vl_pvbrd_vsvl,                          // llvm.ve.vl.pvbrd.vsvl
    ve_vl_pvbrv_vvMvl,                         // llvm.ve.vl.pvbrv.vvMvl
    ve_vl_pvbrv_vvl,                           // llvm.ve.vl.pvbrv.vvl
    ve_vl_pvbrv_vvvl,                          // llvm.ve.vl.pvbrv.vvvl
    ve_vl_pvbrvlo_vvl,                         // llvm.ve.vl.pvbrvlo.vvl
    ve_vl_pvbrvlo_vvmvl,                       // llvm.ve.vl.pvbrvlo.vvmvl
    ve_vl_pvbrvlo_vvvl,                        // llvm.ve.vl.pvbrvlo.vvvl
    ve_vl_pvbrvup_vvl,                         // llvm.ve.vl.pvbrvup.vvl
    ve_vl_pvbrvup_vvmvl,                       // llvm.ve.vl.pvbrvup.vvmvl
    ve_vl_pvbrvup_vvvl,                        // llvm.ve.vl.pvbrvup.vvvl
    ve_vl_pvcmps_vsvMvl,                       // llvm.ve.vl.pvcmps.vsvMvl
    ve_vl_pvcmps_vsvl,                         // llvm.ve.vl.pvcmps.vsvl
    ve_vl_pvcmps_vsvvl,                        // llvm.ve.vl.pvcmps.vsvvl
    ve_vl_pvcmps_vvvMvl,                       // llvm.ve.vl.pvcmps.vvvMvl
    ve_vl_pvcmps_vvvl,                         // llvm.ve.vl.pvcmps.vvvl
    ve_vl_pvcmps_vvvvl,                        // llvm.ve.vl.pvcmps.vvvvl
    ve_vl_pvcmpu_vsvMvl,                       // llvm.ve.vl.pvcmpu.vsvMvl
    ve_vl_pvcmpu_vsvl,                         // llvm.ve.vl.pvcmpu.vsvl
    ve_vl_pvcmpu_vsvvl,                        // llvm.ve.vl.pvcmpu.vsvvl
    ve_vl_pvcmpu_vvvMvl,                       // llvm.ve.vl.pvcmpu.vvvMvl
    ve_vl_pvcmpu_vvvl,                         // llvm.ve.vl.pvcmpu.vvvl
    ve_vl_pvcmpu_vvvvl,                        // llvm.ve.vl.pvcmpu.vvvvl
    ve_vl_pvcvtsw_vvl,                         // llvm.ve.vl.pvcvtsw.vvl
    ve_vl_pvcvtsw_vvvl,                        // llvm.ve.vl.pvcvtsw.vvvl
    ve_vl_pvcvtws_vvMvl,                       // llvm.ve.vl.pvcvtws.vvMvl
    ve_vl_pvcvtws_vvl,                         // llvm.ve.vl.pvcvtws.vvl
    ve_vl_pvcvtws_vvvl,                        // llvm.ve.vl.pvcvtws.vvvl
    ve_vl_pvcvtwsrz_vvMvl,                     // llvm.ve.vl.pvcvtwsrz.vvMvl
    ve_vl_pvcvtwsrz_vvl,                       // llvm.ve.vl.pvcvtwsrz.vvl
    ve_vl_pvcvtwsrz_vvvl,                      // llvm.ve.vl.pvcvtwsrz.vvvl
    ve_vl_pveqv_vsvMvl,                        // llvm.ve.vl.pveqv.vsvMvl
    ve_vl_pveqv_vsvl,                          // llvm.ve.vl.pveqv.vsvl
    ve_vl_pveqv_vsvvl,                         // llvm.ve.vl.pveqv.vsvvl
    ve_vl_pveqv_vvvMvl,                        // llvm.ve.vl.pveqv.vvvMvl
    ve_vl_pveqv_vvvl,                          // llvm.ve.vl.pveqv.vvvl
    ve_vl_pveqv_vvvvl,                         // llvm.ve.vl.pveqv.vvvvl
    ve_vl_pvfadd_vsvMvl,                       // llvm.ve.vl.pvfadd.vsvMvl
    ve_vl_pvfadd_vsvl,                         // llvm.ve.vl.pvfadd.vsvl
    ve_vl_pvfadd_vsvvl,                        // llvm.ve.vl.pvfadd.vsvvl
    ve_vl_pvfadd_vvvMvl,                       // llvm.ve.vl.pvfadd.vvvMvl
    ve_vl_pvfadd_vvvl,                         // llvm.ve.vl.pvfadd.vvvl
    ve_vl_pvfadd_vvvvl,                        // llvm.ve.vl.pvfadd.vvvvl
    ve_vl_pvfcmp_vsvMvl,                       // llvm.ve.vl.pvfcmp.vsvMvl
    ve_vl_pvfcmp_vsvl,                         // llvm.ve.vl.pvfcmp.vsvl
    ve_vl_pvfcmp_vsvvl,                        // llvm.ve.vl.pvfcmp.vsvvl
    ve_vl_pvfcmp_vvvMvl,                       // llvm.ve.vl.pvfcmp.vvvMvl
    ve_vl_pvfcmp_vvvl,                         // llvm.ve.vl.pvfcmp.vvvl
    ve_vl_pvfcmp_vvvvl,                        // llvm.ve.vl.pvfcmp.vvvvl
    ve_vl_pvfmad_vsvvMvl,                      // llvm.ve.vl.pvfmad.vsvvMvl
    ve_vl_pvfmad_vsvvl,                        // llvm.ve.vl.pvfmad.vsvvl
    ve_vl_pvfmad_vsvvvl,                       // llvm.ve.vl.pvfmad.vsvvvl
    ve_vl_pvfmad_vvsvMvl,                      // llvm.ve.vl.pvfmad.vvsvMvl
    ve_vl_pvfmad_vvsvl,                        // llvm.ve.vl.pvfmad.vvsvl
    ve_vl_pvfmad_vvsvvl,                       // llvm.ve.vl.pvfmad.vvsvvl
    ve_vl_pvfmad_vvvvMvl,                      // llvm.ve.vl.pvfmad.vvvvMvl
    ve_vl_pvfmad_vvvvl,                        // llvm.ve.vl.pvfmad.vvvvl
    ve_vl_pvfmad_vvvvvl,                       // llvm.ve.vl.pvfmad.vvvvvl
    ve_vl_pvfmax_vsvMvl,                       // llvm.ve.vl.pvfmax.vsvMvl
    ve_vl_pvfmax_vsvl,                         // llvm.ve.vl.pvfmax.vsvl
    ve_vl_pvfmax_vsvvl,                        // llvm.ve.vl.pvfmax.vsvvl
    ve_vl_pvfmax_vvvMvl,                       // llvm.ve.vl.pvfmax.vvvMvl
    ve_vl_pvfmax_vvvl,                         // llvm.ve.vl.pvfmax.vvvl
    ve_vl_pvfmax_vvvvl,                        // llvm.ve.vl.pvfmax.vvvvl
    ve_vl_pvfmin_vsvMvl,                       // llvm.ve.vl.pvfmin.vsvMvl
    ve_vl_pvfmin_vsvl,                         // llvm.ve.vl.pvfmin.vsvl
    ve_vl_pvfmin_vsvvl,                        // llvm.ve.vl.pvfmin.vsvvl
    ve_vl_pvfmin_vvvMvl,                       // llvm.ve.vl.pvfmin.vvvMvl
    ve_vl_pvfmin_vvvl,                         // llvm.ve.vl.pvfmin.vvvl
    ve_vl_pvfmin_vvvvl,                        // llvm.ve.vl.pvfmin.vvvvl
    ve_vl_pvfmkaf_Ml,                          // llvm.ve.vl.pvfmkaf.Ml
    ve_vl_pvfmkat_Ml,                          // llvm.ve.vl.pvfmkat.Ml
    ve_vl_pvfmkseq_MvMl,                       // llvm.ve.vl.pvfmkseq.MvMl
    ve_vl_pvfmkseq_Mvl,                        // llvm.ve.vl.pvfmkseq.Mvl
    ve_vl_pvfmkseqnan_MvMl,                    // llvm.ve.vl.pvfmkseqnan.MvMl
    ve_vl_pvfmkseqnan_Mvl,                     // llvm.ve.vl.pvfmkseqnan.Mvl
    ve_vl_pvfmksge_MvMl,                       // llvm.ve.vl.pvfmksge.MvMl
    ve_vl_pvfmksge_Mvl,                        // llvm.ve.vl.pvfmksge.Mvl
    ve_vl_pvfmksgenan_MvMl,                    // llvm.ve.vl.pvfmksgenan.MvMl
    ve_vl_pvfmksgenan_Mvl,                     // llvm.ve.vl.pvfmksgenan.Mvl
    ve_vl_pvfmksgt_MvMl,                       // llvm.ve.vl.pvfmksgt.MvMl
    ve_vl_pvfmksgt_Mvl,                        // llvm.ve.vl.pvfmksgt.Mvl
    ve_vl_pvfmksgtnan_MvMl,                    // llvm.ve.vl.pvfmksgtnan.MvMl
    ve_vl_pvfmksgtnan_Mvl,                     // llvm.ve.vl.pvfmksgtnan.Mvl
    ve_vl_pvfmksle_MvMl,                       // llvm.ve.vl.pvfmksle.MvMl
    ve_vl_pvfmksle_Mvl,                        // llvm.ve.vl.pvfmksle.Mvl
    ve_vl_pvfmkslenan_MvMl,                    // llvm.ve.vl.pvfmkslenan.MvMl
    ve_vl_pvfmkslenan_Mvl,                     // llvm.ve.vl.pvfmkslenan.Mvl
    ve_vl_pvfmksloeq_mvl,                      // llvm.ve.vl.pvfmksloeq.mvl
    ve_vl_pvfmksloeq_mvml,                     // llvm.ve.vl.pvfmksloeq.mvml
    ve_vl_pvfmksloeqnan_mvl,                   // llvm.ve.vl.pvfmksloeqnan.mvl
    ve_vl_pvfmksloeqnan_mvml,                  // llvm.ve.vl.pvfmksloeqnan.mvml
    ve_vl_pvfmksloge_mvl,                      // llvm.ve.vl.pvfmksloge.mvl
    ve_vl_pvfmksloge_mvml,                     // llvm.ve.vl.pvfmksloge.mvml
    ve_vl_pvfmkslogenan_mvl,                   // llvm.ve.vl.pvfmkslogenan.mvl
    ve_vl_pvfmkslogenan_mvml,                  // llvm.ve.vl.pvfmkslogenan.mvml
    ve_vl_pvfmkslogt_mvl,                      // llvm.ve.vl.pvfmkslogt.mvl
    ve_vl_pvfmkslogt_mvml,                     // llvm.ve.vl.pvfmkslogt.mvml
    ve_vl_pvfmkslogtnan_mvl,                   // llvm.ve.vl.pvfmkslogtnan.mvl
    ve_vl_pvfmkslogtnan_mvml,                  // llvm.ve.vl.pvfmkslogtnan.mvml
    ve_vl_pvfmkslole_mvl,                      // llvm.ve.vl.pvfmkslole.mvl
    ve_vl_pvfmkslole_mvml,                     // llvm.ve.vl.pvfmkslole.mvml
    ve_vl_pvfmkslolenan_mvl,                   // llvm.ve.vl.pvfmkslolenan.mvl
    ve_vl_pvfmkslolenan_mvml,                  // llvm.ve.vl.pvfmkslolenan.mvml
    ve_vl_pvfmkslolt_mvl,                      // llvm.ve.vl.pvfmkslolt.mvl
    ve_vl_pvfmkslolt_mvml,                     // llvm.ve.vl.pvfmkslolt.mvml
    ve_vl_pvfmksloltnan_mvl,                   // llvm.ve.vl.pvfmksloltnan.mvl
    ve_vl_pvfmksloltnan_mvml,                  // llvm.ve.vl.pvfmksloltnan.mvml
    ve_vl_pvfmkslonan_mvl,                     // llvm.ve.vl.pvfmkslonan.mvl
    ve_vl_pvfmkslonan_mvml,                    // llvm.ve.vl.pvfmkslonan.mvml
    ve_vl_pvfmkslone_mvl,                      // llvm.ve.vl.pvfmkslone.mvl
    ve_vl_pvfmkslone_mvml,                     // llvm.ve.vl.pvfmkslone.mvml
    ve_vl_pvfmkslonenan_mvl,                   // llvm.ve.vl.pvfmkslonenan.mvl
    ve_vl_pvfmkslonenan_mvml,                  // llvm.ve.vl.pvfmkslonenan.mvml
    ve_vl_pvfmkslonum_mvl,                     // llvm.ve.vl.pvfmkslonum.mvl
    ve_vl_pvfmkslonum_mvml,                    // llvm.ve.vl.pvfmkslonum.mvml
    ve_vl_pvfmkslt_MvMl,                       // llvm.ve.vl.pvfmkslt.MvMl
    ve_vl_pvfmkslt_Mvl,                        // llvm.ve.vl.pvfmkslt.Mvl
    ve_vl_pvfmksltnan_MvMl,                    // llvm.ve.vl.pvfmksltnan.MvMl
    ve_vl_pvfmksltnan_Mvl,                     // llvm.ve.vl.pvfmksltnan.Mvl
    ve_vl_pvfmksnan_MvMl,                      // llvm.ve.vl.pvfmksnan.MvMl
    ve_vl_pvfmksnan_Mvl,                       // llvm.ve.vl.pvfmksnan.Mvl
    ve_vl_pvfmksne_MvMl,                       // llvm.ve.vl.pvfmksne.MvMl
    ve_vl_pvfmksne_Mvl,                        // llvm.ve.vl.pvfmksne.Mvl
    ve_vl_pvfmksnenan_MvMl,                    // llvm.ve.vl.pvfmksnenan.MvMl
    ve_vl_pvfmksnenan_Mvl,                     // llvm.ve.vl.pvfmksnenan.Mvl
    ve_vl_pvfmksnum_MvMl,                      // llvm.ve.vl.pvfmksnum.MvMl
    ve_vl_pvfmksnum_Mvl,                       // llvm.ve.vl.pvfmksnum.Mvl
    ve_vl_pvfmksupeq_mvl,                      // llvm.ve.vl.pvfmksupeq.mvl
    ve_vl_pvfmksupeq_mvml,                     // llvm.ve.vl.pvfmksupeq.mvml
    ve_vl_pvfmksupeqnan_mvl,                   // llvm.ve.vl.pvfmksupeqnan.mvl
    ve_vl_pvfmksupeqnan_mvml,                  // llvm.ve.vl.pvfmksupeqnan.mvml
    ve_vl_pvfmksupge_mvl,                      // llvm.ve.vl.pvfmksupge.mvl
    ve_vl_pvfmksupge_mvml,                     // llvm.ve.vl.pvfmksupge.mvml
    ve_vl_pvfmksupgenan_mvl,                   // llvm.ve.vl.pvfmksupgenan.mvl
    ve_vl_pvfmksupgenan_mvml,                  // llvm.ve.vl.pvfmksupgenan.mvml
    ve_vl_pvfmksupgt_mvl,                      // llvm.ve.vl.pvfmksupgt.mvl
    ve_vl_pvfmksupgt_mvml,                     // llvm.ve.vl.pvfmksupgt.mvml
    ve_vl_pvfmksupgtnan_mvl,                   // llvm.ve.vl.pvfmksupgtnan.mvl
    ve_vl_pvfmksupgtnan_mvml,                  // llvm.ve.vl.pvfmksupgtnan.mvml
    ve_vl_pvfmksuple_mvl,                      // llvm.ve.vl.pvfmksuple.mvl
    ve_vl_pvfmksuple_mvml,                     // llvm.ve.vl.pvfmksuple.mvml
    ve_vl_pvfmksuplenan_mvl,                   // llvm.ve.vl.pvfmksuplenan.mvl
    ve_vl_pvfmksuplenan_mvml,                  // llvm.ve.vl.pvfmksuplenan.mvml
    ve_vl_pvfmksuplt_mvl,                      // llvm.ve.vl.pvfmksuplt.mvl
    ve_vl_pvfmksuplt_mvml,                     // llvm.ve.vl.pvfmksuplt.mvml
    ve_vl_pvfmksupltnan_mvl,                   // llvm.ve.vl.pvfmksupltnan.mvl
    ve_vl_pvfmksupltnan_mvml,                  // llvm.ve.vl.pvfmksupltnan.mvml
    ve_vl_pvfmksupnan_mvl,                     // llvm.ve.vl.pvfmksupnan.mvl
    ve_vl_pvfmksupnan_mvml,                    // llvm.ve.vl.pvfmksupnan.mvml
    ve_vl_pvfmksupne_mvl,                      // llvm.ve.vl.pvfmksupne.mvl
    ve_vl_pvfmksupne_mvml,                     // llvm.ve.vl.pvfmksupne.mvml
    ve_vl_pvfmksupnenan_mvl,                   // llvm.ve.vl.pvfmksupnenan.mvl
    ve_vl_pvfmksupnenan_mvml,                  // llvm.ve.vl.pvfmksupnenan.mvml
    ve_vl_pvfmksupnum_mvl,                     // llvm.ve.vl.pvfmksupnum.mvl
    ve_vl_pvfmksupnum_mvml,                    // llvm.ve.vl.pvfmksupnum.mvml
    ve_vl_pvfmkweq_MvMl,                       // llvm.ve.vl.pvfmkweq.MvMl
    ve_vl_pvfmkweq_Mvl,                        // llvm.ve.vl.pvfmkweq.Mvl
    ve_vl_pvfmkweqnan_MvMl,                    // llvm.ve.vl.pvfmkweqnan.MvMl
    ve_vl_pvfmkweqnan_Mvl,                     // llvm.ve.vl.pvfmkweqnan.Mvl
    ve_vl_pvfmkwge_MvMl,                       // llvm.ve.vl.pvfmkwge.MvMl
    ve_vl_pvfmkwge_Mvl,                        // llvm.ve.vl.pvfmkwge.Mvl
    ve_vl_pvfmkwgenan_MvMl,                    // llvm.ve.vl.pvfmkwgenan.MvMl
    ve_vl_pvfmkwgenan_Mvl,                     // llvm.ve.vl.pvfmkwgenan.Mvl
    ve_vl_pvfmkwgt_MvMl,                       // llvm.ve.vl.pvfmkwgt.MvMl
    ve_vl_pvfmkwgt_Mvl,                        // llvm.ve.vl.pvfmkwgt.Mvl
    ve_vl_pvfmkwgtnan_MvMl,                    // llvm.ve.vl.pvfmkwgtnan.MvMl
    ve_vl_pvfmkwgtnan_Mvl,                     // llvm.ve.vl.pvfmkwgtnan.Mvl
    ve_vl_pvfmkwle_MvMl,                       // llvm.ve.vl.pvfmkwle.MvMl
    ve_vl_pvfmkwle_Mvl,                        // llvm.ve.vl.pvfmkwle.Mvl
    ve_vl_pvfmkwlenan_MvMl,                    // llvm.ve.vl.pvfmkwlenan.MvMl
    ve_vl_pvfmkwlenan_Mvl,                     // llvm.ve.vl.pvfmkwlenan.Mvl
    ve_vl_pvfmkwloeq_mvl,                      // llvm.ve.vl.pvfmkwloeq.mvl
    ve_vl_pvfmkwloeq_mvml,                     // llvm.ve.vl.pvfmkwloeq.mvml
    ve_vl_pvfmkwloeqnan_mvl,                   // llvm.ve.vl.pvfmkwloeqnan.mvl
    ve_vl_pvfmkwloeqnan_mvml,                  // llvm.ve.vl.pvfmkwloeqnan.mvml
    ve_vl_pvfmkwloge_mvl,                      // llvm.ve.vl.pvfmkwloge.mvl
    ve_vl_pvfmkwloge_mvml,                     // llvm.ve.vl.pvfmkwloge.mvml
    ve_vl_pvfmkwlogenan_mvl,                   // llvm.ve.vl.pvfmkwlogenan.mvl
    ve_vl_pvfmkwlogenan_mvml,                  // llvm.ve.vl.pvfmkwlogenan.mvml
    ve_vl_pvfmkwlogt_mvl,                      // llvm.ve.vl.pvfmkwlogt.mvl
    ve_vl_pvfmkwlogt_mvml,                     // llvm.ve.vl.pvfmkwlogt.mvml
    ve_vl_pvfmkwlogtnan_mvl,                   // llvm.ve.vl.pvfmkwlogtnan.mvl
    ve_vl_pvfmkwlogtnan_mvml,                  // llvm.ve.vl.pvfmkwlogtnan.mvml
    ve_vl_pvfmkwlole_mvl,                      // llvm.ve.vl.pvfmkwlole.mvl
    ve_vl_pvfmkwlole_mvml,                     // llvm.ve.vl.pvfmkwlole.mvml
    ve_vl_pvfmkwlolenan_mvl,                   // llvm.ve.vl.pvfmkwlolenan.mvl
    ve_vl_pvfmkwlolenan_mvml,                  // llvm.ve.vl.pvfmkwlolenan.mvml
    ve_vl_pvfmkwlolt_mvl,                      // llvm.ve.vl.pvfmkwlolt.mvl
    ve_vl_pvfmkwlolt_mvml,                     // llvm.ve.vl.pvfmkwlolt.mvml
    ve_vl_pvfmkwloltnan_mvl,                   // llvm.ve.vl.pvfmkwloltnan.mvl
    ve_vl_pvfmkwloltnan_mvml,                  // llvm.ve.vl.pvfmkwloltnan.mvml
    ve_vl_pvfmkwlonan_mvl,                     // llvm.ve.vl.pvfmkwlonan.mvl
    ve_vl_pvfmkwlonan_mvml,                    // llvm.ve.vl.pvfmkwlonan.mvml
    ve_vl_pvfmkwlone_mvl,                      // llvm.ve.vl.pvfmkwlone.mvl
    ve_vl_pvfmkwlone_mvml,                     // llvm.ve.vl.pvfmkwlone.mvml
    ve_vl_pvfmkwlonenan_mvl,                   // llvm.ve.vl.pvfmkwlonenan.mvl
    ve_vl_pvfmkwlonenan_mvml,                  // llvm.ve.vl.pvfmkwlonenan.mvml
    ve_vl_pvfmkwlonum_mvl,                     // llvm.ve.vl.pvfmkwlonum.mvl
    ve_vl_pvfmkwlonum_mvml,                    // llvm.ve.vl.pvfmkwlonum.mvml
    ve_vl_pvfmkwlt_MvMl,                       // llvm.ve.vl.pvfmkwlt.MvMl
    ve_vl_pvfmkwlt_Mvl,                        // llvm.ve.vl.pvfmkwlt.Mvl
    ve_vl_pvfmkwltnan_MvMl,                    // llvm.ve.vl.pvfmkwltnan.MvMl
    ve_vl_pvfmkwltnan_Mvl,                     // llvm.ve.vl.pvfmkwltnan.Mvl
    ve_vl_pvfmkwnan_MvMl,                      // llvm.ve.vl.pvfmkwnan.MvMl
    ve_vl_pvfmkwnan_Mvl,                       // llvm.ve.vl.pvfmkwnan.Mvl
    ve_vl_pvfmkwne_MvMl,                       // llvm.ve.vl.pvfmkwne.MvMl
    ve_vl_pvfmkwne_Mvl,                        // llvm.ve.vl.pvfmkwne.Mvl
    ve_vl_pvfmkwnenan_MvMl,                    // llvm.ve.vl.pvfmkwnenan.MvMl
    ve_vl_pvfmkwnenan_Mvl,                     // llvm.ve.vl.pvfmkwnenan.Mvl
    ve_vl_pvfmkwnum_MvMl,                      // llvm.ve.vl.pvfmkwnum.MvMl
    ve_vl_pvfmkwnum_Mvl,                       // llvm.ve.vl.pvfmkwnum.Mvl
    ve_vl_pvfmkwupeq_mvl,                      // llvm.ve.vl.pvfmkwupeq.mvl
    ve_vl_pvfmkwupeq_mvml,                     // llvm.ve.vl.pvfmkwupeq.mvml
    ve_vl_pvfmkwupeqnan_mvl,                   // llvm.ve.vl.pvfmkwupeqnan.mvl
    ve_vl_pvfmkwupeqnan_mvml,                  // llvm.ve.vl.pvfmkwupeqnan.mvml
    ve_vl_pvfmkwupge_mvl,                      // llvm.ve.vl.pvfmkwupge.mvl
    ve_vl_pvfmkwupge_mvml,                     // llvm.ve.vl.pvfmkwupge.mvml
    ve_vl_pvfmkwupgenan_mvl,                   // llvm.ve.vl.pvfmkwupgenan.mvl
    ve_vl_pvfmkwupgenan_mvml,                  // llvm.ve.vl.pvfmkwupgenan.mvml
    ve_vl_pvfmkwupgt_mvl,                      // llvm.ve.vl.pvfmkwupgt.mvl
    ve_vl_pvfmkwupgt_mvml,                     // llvm.ve.vl.pvfmkwupgt.mvml
    ve_vl_pvfmkwupgtnan_mvl,                   // llvm.ve.vl.pvfmkwupgtnan.mvl
    ve_vl_pvfmkwupgtnan_mvml,                  // llvm.ve.vl.pvfmkwupgtnan.mvml
    ve_vl_pvfmkwuple_mvl,                      // llvm.ve.vl.pvfmkwuple.mvl
    ve_vl_pvfmkwuple_mvml,                     // llvm.ve.vl.pvfmkwuple.mvml
    ve_vl_pvfmkwuplenan_mvl,                   // llvm.ve.vl.pvfmkwuplenan.mvl
    ve_vl_pvfmkwuplenan_mvml,                  // llvm.ve.vl.pvfmkwuplenan.mvml
    ve_vl_pvfmkwuplt_mvl,                      // llvm.ve.vl.pvfmkwuplt.mvl
    ve_vl_pvfmkwuplt_mvml,                     // llvm.ve.vl.pvfmkwuplt.mvml
    ve_vl_pvfmkwupltnan_mvl,                   // llvm.ve.vl.pvfmkwupltnan.mvl
    ve_vl_pvfmkwupltnan_mvml,                  // llvm.ve.vl.pvfmkwupltnan.mvml
    ve_vl_pvfmkwupnan_mvl,                     // llvm.ve.vl.pvfmkwupnan.mvl
    ve_vl_pvfmkwupnan_mvml,                    // llvm.ve.vl.pvfmkwupnan.mvml
    ve_vl_pvfmkwupne_mvl,                      // llvm.ve.vl.pvfmkwupne.mvl
    ve_vl_pvfmkwupne_mvml,                     // llvm.ve.vl.pvfmkwupne.mvml
    ve_vl_pvfmkwupnenan_mvl,                   // llvm.ve.vl.pvfmkwupnenan.mvl
    ve_vl_pvfmkwupnenan_mvml,                  // llvm.ve.vl.pvfmkwupnenan.mvml
    ve_vl_pvfmkwupnum_mvl,                     // llvm.ve.vl.pvfmkwupnum.mvl
    ve_vl_pvfmkwupnum_mvml,                    // llvm.ve.vl.pvfmkwupnum.mvml
    ve_vl_pvfmsb_vsvvMvl,                      // llvm.ve.vl.pvfmsb.vsvvMvl
    ve_vl_pvfmsb_vsvvl,                        // llvm.ve.vl.pvfmsb.vsvvl
    ve_vl_pvfmsb_vsvvvl,                       // llvm.ve.vl.pvfmsb.vsvvvl
    ve_vl_pvfmsb_vvsvMvl,                      // llvm.ve.vl.pvfmsb.vvsvMvl
    ve_vl_pvfmsb_vvsvl,                        // llvm.ve.vl.pvfmsb.vvsvl
    ve_vl_pvfmsb_vvsvvl,                       // llvm.ve.vl.pvfmsb.vvsvvl
    ve_vl_pvfmsb_vvvvMvl,                      // llvm.ve.vl.pvfmsb.vvvvMvl
    ve_vl_pvfmsb_vvvvl,                        // llvm.ve.vl.pvfmsb.vvvvl
    ve_vl_pvfmsb_vvvvvl,                       // llvm.ve.vl.pvfmsb.vvvvvl
    ve_vl_pvfmul_vsvMvl,                       // llvm.ve.vl.pvfmul.vsvMvl
    ve_vl_pvfmul_vsvl,                         // llvm.ve.vl.pvfmul.vsvl
    ve_vl_pvfmul_vsvvl,                        // llvm.ve.vl.pvfmul.vsvvl
    ve_vl_pvfmul_vvvMvl,                       // llvm.ve.vl.pvfmul.vvvMvl
    ve_vl_pvfmul_vvvl,                         // llvm.ve.vl.pvfmul.vvvl
    ve_vl_pvfmul_vvvvl,                        // llvm.ve.vl.pvfmul.vvvvl
    ve_vl_pvfnmad_vsvvMvl,                     // llvm.ve.vl.pvfnmad.vsvvMvl
    ve_vl_pvfnmad_vsvvl,                       // llvm.ve.vl.pvfnmad.vsvvl
    ve_vl_pvfnmad_vsvvvl,                      // llvm.ve.vl.pvfnmad.vsvvvl
    ve_vl_pvfnmad_vvsvMvl,                     // llvm.ve.vl.pvfnmad.vvsvMvl
    ve_vl_pvfnmad_vvsvl,                       // llvm.ve.vl.pvfnmad.vvsvl
    ve_vl_pvfnmad_vvsvvl,                      // llvm.ve.vl.pvfnmad.vvsvvl
    ve_vl_pvfnmad_vvvvMvl,                     // llvm.ve.vl.pvfnmad.vvvvMvl
    ve_vl_pvfnmad_vvvvl,                       // llvm.ve.vl.pvfnmad.vvvvl
    ve_vl_pvfnmad_vvvvvl,                      // llvm.ve.vl.pvfnmad.vvvvvl
    ve_vl_pvfnmsb_vsvvMvl,                     // llvm.ve.vl.pvfnmsb.vsvvMvl
    ve_vl_pvfnmsb_vsvvl,                       // llvm.ve.vl.pvfnmsb.vsvvl
    ve_vl_pvfnmsb_vsvvvl,                      // llvm.ve.vl.pvfnmsb.vsvvvl
    ve_vl_pvfnmsb_vvsvMvl,                     // llvm.ve.vl.pvfnmsb.vvsvMvl
    ve_vl_pvfnmsb_vvsvl,                       // llvm.ve.vl.pvfnmsb.vvsvl
    ve_vl_pvfnmsb_vvsvvl,                      // llvm.ve.vl.pvfnmsb.vvsvvl
    ve_vl_pvfnmsb_vvvvMvl,                     // llvm.ve.vl.pvfnmsb.vvvvMvl
    ve_vl_pvfnmsb_vvvvl,                       // llvm.ve.vl.pvfnmsb.vvvvl
    ve_vl_pvfnmsb_vvvvvl,                      // llvm.ve.vl.pvfnmsb.vvvvvl
    ve_vl_pvfsub_vsvMvl,                       // llvm.ve.vl.pvfsub.vsvMvl
    ve_vl_pvfsub_vsvl,                         // llvm.ve.vl.pvfsub.vsvl
    ve_vl_pvfsub_vsvvl,                        // llvm.ve.vl.pvfsub.vsvvl
    ve_vl_pvfsub_vvvMvl,                       // llvm.ve.vl.pvfsub.vvvMvl
    ve_vl_pvfsub_vvvl,                         // llvm.ve.vl.pvfsub.vvvl
    ve_vl_pvfsub_vvvvl,                        // llvm.ve.vl.pvfsub.vvvvl
    ve_vl_pvldz_vvMvl,                         // llvm.ve.vl.pvldz.vvMvl
    ve_vl_pvldz_vvl,                           // llvm.ve.vl.pvldz.vvl
    ve_vl_pvldz_vvvl,                          // llvm.ve.vl.pvldz.vvvl
    ve_vl_pvldzlo_vvl,                         // llvm.ve.vl.pvldzlo.vvl
    ve_vl_pvldzlo_vvmvl,                       // llvm.ve.vl.pvldzlo.vvmvl
    ve_vl_pvldzlo_vvvl,                        // llvm.ve.vl.pvldzlo.vvvl
    ve_vl_pvldzup_vvl,                         // llvm.ve.vl.pvldzup.vvl
    ve_vl_pvldzup_vvmvl,                       // llvm.ve.vl.pvldzup.vvmvl
    ve_vl_pvldzup_vvvl,                        // llvm.ve.vl.pvldzup.vvvl
    ve_vl_pvmaxs_vsvMvl,                       // llvm.ve.vl.pvmaxs.vsvMvl
    ve_vl_pvmaxs_vsvl,                         // llvm.ve.vl.pvmaxs.vsvl
    ve_vl_pvmaxs_vsvvl,                        // llvm.ve.vl.pvmaxs.vsvvl
    ve_vl_pvmaxs_vvvMvl,                       // llvm.ve.vl.pvmaxs.vvvMvl
    ve_vl_pvmaxs_vvvl,                         // llvm.ve.vl.pvmaxs.vvvl
    ve_vl_pvmaxs_vvvvl,                        // llvm.ve.vl.pvmaxs.vvvvl
    ve_vl_pvmins_vsvMvl,                       // llvm.ve.vl.pvmins.vsvMvl
    ve_vl_pvmins_vsvl,                         // llvm.ve.vl.pvmins.vsvl
    ve_vl_pvmins_vsvvl,                        // llvm.ve.vl.pvmins.vsvvl
    ve_vl_pvmins_vvvMvl,                       // llvm.ve.vl.pvmins.vvvMvl
    ve_vl_pvmins_vvvl,                         // llvm.ve.vl.pvmins.vvvl
    ve_vl_pvmins_vvvvl,                        // llvm.ve.vl.pvmins.vvvvl
    ve_vl_pvor_vsvMvl,                         // llvm.ve.vl.pvor.vsvMvl
    ve_vl_pvor_vsvl,                           // llvm.ve.vl.pvor.vsvl
    ve_vl_pvor_vsvvl,                          // llvm.ve.vl.pvor.vsvvl
    ve_vl_pvor_vvvMvl,                         // llvm.ve.vl.pvor.vvvMvl
    ve_vl_pvor_vvvl,                           // llvm.ve.vl.pvor.vvvl
    ve_vl_pvor_vvvvl,                          // llvm.ve.vl.pvor.vvvvl
    ve_vl_pvpcnt_vvMvl,                        // llvm.ve.vl.pvpcnt.vvMvl
    ve_vl_pvpcnt_vvl,                          // llvm.ve.vl.pvpcnt.vvl
    ve_vl_pvpcnt_vvvl,                         // llvm.ve.vl.pvpcnt.vvvl
    ve_vl_pvpcntlo_vvl,                        // llvm.ve.vl.pvpcntlo.vvl
    ve_vl_pvpcntlo_vvmvl,                      // llvm.ve.vl.pvpcntlo.vvmvl
    ve_vl_pvpcntlo_vvvl,                       // llvm.ve.vl.pvpcntlo.vvvl
    ve_vl_pvpcntup_vvl,                        // llvm.ve.vl.pvpcntup.vvl
    ve_vl_pvpcntup_vvmvl,                      // llvm.ve.vl.pvpcntup.vvmvl
    ve_vl_pvpcntup_vvvl,                       // llvm.ve.vl.pvpcntup.vvvl
    ve_vl_pvrcp_vvl,                           // llvm.ve.vl.pvrcp.vvl
    ve_vl_pvrcp_vvvl,                          // llvm.ve.vl.pvrcp.vvvl
    ve_vl_pvrsqrt_vvl,                         // llvm.ve.vl.pvrsqrt.vvl
    ve_vl_pvrsqrt_vvvl,                        // llvm.ve.vl.pvrsqrt.vvvl
    ve_vl_pvrsqrtnex_vvl,                      // llvm.ve.vl.pvrsqrtnex.vvl
    ve_vl_pvrsqrtnex_vvvl,                     // llvm.ve.vl.pvrsqrtnex.vvvl
    ve_vl_pvseq_vl,                            // llvm.ve.vl.pvseq.vl
    ve_vl_pvseq_vvl,                           // llvm.ve.vl.pvseq.vvl
    ve_vl_pvseqlo_vl,                          // llvm.ve.vl.pvseqlo.vl
    ve_vl_pvseqlo_vvl,                         // llvm.ve.vl.pvseqlo.vvl
    ve_vl_pvsequp_vl,                          // llvm.ve.vl.pvsequp.vl
    ve_vl_pvsequp_vvl,                         // llvm.ve.vl.pvsequp.vvl
    ve_vl_pvsla_vvsMvl,                        // llvm.ve.vl.pvsla.vvsMvl
    ve_vl_pvsla_vvsl,                          // llvm.ve.vl.pvsla.vvsl
    ve_vl_pvsla_vvsvl,                         // llvm.ve.vl.pvsla.vvsvl
    ve_vl_pvsla_vvvMvl,                        // llvm.ve.vl.pvsla.vvvMvl
    ve_vl_pvsla_vvvl,                          // llvm.ve.vl.pvsla.vvvl
    ve_vl_pvsla_vvvvl,                         // llvm.ve.vl.pvsla.vvvvl
    ve_vl_pvsll_vvsMvl,                        // llvm.ve.vl.pvsll.vvsMvl
    ve_vl_pvsll_vvsl,                          // llvm.ve.vl.pvsll.vvsl
    ve_vl_pvsll_vvsvl,                         // llvm.ve.vl.pvsll.vvsvl
    ve_vl_pvsll_vvvMvl,                        // llvm.ve.vl.pvsll.vvvMvl
    ve_vl_pvsll_vvvl,                          // llvm.ve.vl.pvsll.vvvl
    ve_vl_pvsll_vvvvl,                         // llvm.ve.vl.pvsll.vvvvl
    ve_vl_pvsra_vvsMvl,                        // llvm.ve.vl.pvsra.vvsMvl
    ve_vl_pvsra_vvsl,                          // llvm.ve.vl.pvsra.vvsl
    ve_vl_pvsra_vvsvl,                         // llvm.ve.vl.pvsra.vvsvl
    ve_vl_pvsra_vvvMvl,                        // llvm.ve.vl.pvsra.vvvMvl
    ve_vl_pvsra_vvvl,                          // llvm.ve.vl.pvsra.vvvl
    ve_vl_pvsra_vvvvl,                         // llvm.ve.vl.pvsra.vvvvl
    ve_vl_pvsrl_vvsMvl,                        // llvm.ve.vl.pvsrl.vvsMvl
    ve_vl_pvsrl_vvsl,                          // llvm.ve.vl.pvsrl.vvsl
    ve_vl_pvsrl_vvsvl,                         // llvm.ve.vl.pvsrl.vvsvl
    ve_vl_pvsrl_vvvMvl,                        // llvm.ve.vl.pvsrl.vvvMvl
    ve_vl_pvsrl_vvvl,                          // llvm.ve.vl.pvsrl.vvvl
    ve_vl_pvsrl_vvvvl,                         // llvm.ve.vl.pvsrl.vvvvl
    ve_vl_pvsubs_vsvMvl,                       // llvm.ve.vl.pvsubs.vsvMvl
    ve_vl_pvsubs_vsvl,                         // llvm.ve.vl.pvsubs.vsvl
    ve_vl_pvsubs_vsvvl,                        // llvm.ve.vl.pvsubs.vsvvl
    ve_vl_pvsubs_vvvMvl,                       // llvm.ve.vl.pvsubs.vvvMvl
    ve_vl_pvsubs_vvvl,                         // llvm.ve.vl.pvsubs.vvvl
    ve_vl_pvsubs_vvvvl,                        // llvm.ve.vl.pvsubs.vvvvl
    ve_vl_pvsubu_vsvMvl,                       // llvm.ve.vl.pvsubu.vsvMvl
    ve_vl_pvsubu_vsvl,                         // llvm.ve.vl.pvsubu.vsvl
    ve_vl_pvsubu_vsvvl,                        // llvm.ve.vl.pvsubu.vsvvl
    ve_vl_pvsubu_vvvMvl,                       // llvm.ve.vl.pvsubu.vvvMvl
    ve_vl_pvsubu_vvvl,                         // llvm.ve.vl.pvsubu.vvvl
    ve_vl_pvsubu_vvvvl,                        // llvm.ve.vl.pvsubu.vvvvl
    ve_vl_pvxor_vsvMvl,                        // llvm.ve.vl.pvxor.vsvMvl
    ve_vl_pvxor_vsvl,                          // llvm.ve.vl.pvxor.vsvl
    ve_vl_pvxor_vsvvl,                         // llvm.ve.vl.pvxor.vsvvl
    ve_vl_pvxor_vvvMvl,                        // llvm.ve.vl.pvxor.vvvMvl
    ve_vl_pvxor_vvvl,                          // llvm.ve.vl.pvxor.vvvl
    ve_vl_pvxor_vvvvl,                         // llvm.ve.vl.pvxor.vvvvl
    ve_vl_scr_sss,                             // llvm.ve.vl.scr.sss
    ve_vl_svm_sMs,                             // llvm.ve.vl.svm.sMs
    ve_vl_svm_sms,                             // llvm.ve.vl.svm.sms
    ve_vl_svob,                                // llvm.ve.vl.svob
    ve_vl_tovm_sml,                            // llvm.ve.vl.tovm.sml
    ve_vl_tscr_ssss,                           // llvm.ve.vl.tscr.ssss
    ve_vl_vaddsl_vsvl,                         // llvm.ve.vl.vaddsl.vsvl
    ve_vl_vaddsl_vsvmvl,                       // llvm.ve.vl.vaddsl.vsvmvl
    ve_vl_vaddsl_vsvvl,                        // llvm.ve.vl.vaddsl.vsvvl
    ve_vl_vaddsl_vvvl,                         // llvm.ve.vl.vaddsl.vvvl
    ve_vl_vaddsl_vvvmvl,                       // llvm.ve.vl.vaddsl.vvvmvl
    ve_vl_vaddsl_vvvvl,                        // llvm.ve.vl.vaddsl.vvvvl
    ve_vl_vaddswsx_vsvl,                       // llvm.ve.vl.vaddswsx.vsvl
    ve_vl_vaddswsx_vsvmvl,                     // llvm.ve.vl.vaddswsx.vsvmvl
    ve_vl_vaddswsx_vsvvl,                      // llvm.ve.vl.vaddswsx.vsvvl
    ve_vl_vaddswsx_vvvl,                       // llvm.ve.vl.vaddswsx.vvvl
    ve_vl_vaddswsx_vvvmvl,                     // llvm.ve.vl.vaddswsx.vvvmvl
    ve_vl_vaddswsx_vvvvl,                      // llvm.ve.vl.vaddswsx.vvvvl
    ve_vl_vaddswzx_vsvl,                       // llvm.ve.vl.vaddswzx.vsvl
    ve_vl_vaddswzx_vsvmvl,                     // llvm.ve.vl.vaddswzx.vsvmvl
    ve_vl_vaddswzx_vsvvl,                      // llvm.ve.vl.vaddswzx.vsvvl
    ve_vl_vaddswzx_vvvl,                       // llvm.ve.vl.vaddswzx.vvvl
    ve_vl_vaddswzx_vvvmvl,                     // llvm.ve.vl.vaddswzx.vvvmvl
    ve_vl_vaddswzx_vvvvl,                      // llvm.ve.vl.vaddswzx.vvvvl
    ve_vl_vaddul_vsvl,                         // llvm.ve.vl.vaddul.vsvl
    ve_vl_vaddul_vsvmvl,                       // llvm.ve.vl.vaddul.vsvmvl
    ve_vl_vaddul_vsvvl,                        // llvm.ve.vl.vaddul.vsvvl
    ve_vl_vaddul_vvvl,                         // llvm.ve.vl.vaddul.vvvl
    ve_vl_vaddul_vvvmvl,                       // llvm.ve.vl.vaddul.vvvmvl
    ve_vl_vaddul_vvvvl,                        // llvm.ve.vl.vaddul.vvvvl
    ve_vl_vadduw_vsvl,                         // llvm.ve.vl.vadduw.vsvl
    ve_vl_vadduw_vsvmvl,                       // llvm.ve.vl.vadduw.vsvmvl
    ve_vl_vadduw_vsvvl,                        // llvm.ve.vl.vadduw.vsvvl
    ve_vl_vadduw_vvvl,                         // llvm.ve.vl.vadduw.vvvl
    ve_vl_vadduw_vvvmvl,                       // llvm.ve.vl.vadduw.vvvmvl
    ve_vl_vadduw_vvvvl,                        // llvm.ve.vl.vadduw.vvvvl
    ve_vl_vand_vsvl,                           // llvm.ve.vl.vand.vsvl
    ve_vl_vand_vsvmvl,                         // llvm.ve.vl.vand.vsvmvl
    ve_vl_vand_vsvvl,                          // llvm.ve.vl.vand.vsvvl
    ve_vl_vand_vvvl,                           // llvm.ve.vl.vand.vvvl
    ve_vl_vand_vvvmvl,                         // llvm.ve.vl.vand.vvvmvl
    ve_vl_vand_vvvvl,                          // llvm.ve.vl.vand.vvvvl
    ve_vl_vbrdd_vsl,                           // llvm.ve.vl.vbrdd.vsl
    ve_vl_vbrdd_vsmvl,                         // llvm.ve.vl.vbrdd.vsmvl
    ve_vl_vbrdd_vsvl,                          // llvm.ve.vl.vbrdd.vsvl
    ve_vl_vbrdl_vsl,                           // llvm.ve.vl.vbrdl.vsl
    ve_vl_vbrdl_vsmvl,                         // llvm.ve.vl.vbrdl.vsmvl
    ve_vl_vbrdl_vsvl,                          // llvm.ve.vl.vbrdl.vsvl
    ve_vl_vbrds_vsl,                           // llvm.ve.vl.vbrds.vsl
    ve_vl_vbrds_vsmvl,                         // llvm.ve.vl.vbrds.vsmvl
    ve_vl_vbrds_vsvl,                          // llvm.ve.vl.vbrds.vsvl
    ve_vl_vbrdw_vsl,                           // llvm.ve.vl.vbrdw.vsl
    ve_vl_vbrdw_vsmvl,                         // llvm.ve.vl.vbrdw.vsmvl
    ve_vl_vbrdw_vsvl,                          // llvm.ve.vl.vbrdw.vsvl
    ve_vl_vbrv_vvl,                            // llvm.ve.vl.vbrv.vvl
    ve_vl_vbrv_vvmvl,                          // llvm.ve.vl.vbrv.vvmvl
    ve_vl_vbrv_vvvl,                           // llvm.ve.vl.vbrv.vvvl
    ve_vl_vcmpsl_vsvl,                         // llvm.ve.vl.vcmpsl.vsvl
    ve_vl_vcmpsl_vsvmvl,                       // llvm.ve.vl.vcmpsl.vsvmvl
    ve_vl_vcmpsl_vsvvl,                        // llvm.ve.vl.vcmpsl.vsvvl
    ve_vl_vcmpsl_vvvl,                         // llvm.ve.vl.vcmpsl.vvvl
    ve_vl_vcmpsl_vvvmvl,                       // llvm.ve.vl.vcmpsl.vvvmvl
    ve_vl_vcmpsl_vvvvl,                        // llvm.ve.vl.vcmpsl.vvvvl
    ve_vl_vcmpswsx_vsvl,                       // llvm.ve.vl.vcmpswsx.vsvl
    ve_vl_vcmpswsx_vsvmvl,                     // llvm.ve.vl.vcmpswsx.vsvmvl
    ve_vl_vcmpswsx_vsvvl,                      // llvm.ve.vl.vcmpswsx.vsvvl
    ve_vl_vcmpswsx_vvvl,                       // llvm.ve.vl.vcmpswsx.vvvl
    ve_vl_vcmpswsx_vvvmvl,                     // llvm.ve.vl.vcmpswsx.vvvmvl
    ve_vl_vcmpswsx_vvvvl,                      // llvm.ve.vl.vcmpswsx.vvvvl
    ve_vl_vcmpswzx_vsvl,                       // llvm.ve.vl.vcmpswzx.vsvl
    ve_vl_vcmpswzx_vsvmvl,                     // llvm.ve.vl.vcmpswzx.vsvmvl
    ve_vl_vcmpswzx_vsvvl,                      // llvm.ve.vl.vcmpswzx.vsvvl
    ve_vl_vcmpswzx_vvvl,                       // llvm.ve.vl.vcmpswzx.vvvl
    ve_vl_vcmpswzx_vvvmvl,                     // llvm.ve.vl.vcmpswzx.vvvmvl
    ve_vl_vcmpswzx_vvvvl,                      // llvm.ve.vl.vcmpswzx.vvvvl
    ve_vl_vcmpul_vsvl,                         // llvm.ve.vl.vcmpul.vsvl
    ve_vl_vcmpul_vsvmvl,                       // llvm.ve.vl.vcmpul.vsvmvl
    ve_vl_vcmpul_vsvvl,                        // llvm.ve.vl.vcmpul.vsvvl
    ve_vl_vcmpul_vvvl,                         // llvm.ve.vl.vcmpul.vvvl
    ve_vl_vcmpul_vvvmvl,                       // llvm.ve.vl.vcmpul.vvvmvl
    ve_vl_vcmpul_vvvvl,                        // llvm.ve.vl.vcmpul.vvvvl
    ve_vl_vcmpuw_vsvl,                         // llvm.ve.vl.vcmpuw.vsvl
    ve_vl_vcmpuw_vsvmvl,                       // llvm.ve.vl.vcmpuw.vsvmvl
    ve_vl_vcmpuw_vsvvl,                        // llvm.ve.vl.vcmpuw.vsvvl
    ve_vl_vcmpuw_vvvl,                         // llvm.ve.vl.vcmpuw.vvvl
    ve_vl_vcmpuw_vvvmvl,                       // llvm.ve.vl.vcmpuw.vvvmvl
    ve_vl_vcmpuw_vvvvl,                        // llvm.ve.vl.vcmpuw.vvvvl
    ve_vl_vcp_vvmvl,                           // llvm.ve.vl.vcp.vvmvl
    ve_vl_vcvtdl_vvl,                          // llvm.ve.vl.vcvtdl.vvl
    ve_vl_vcvtdl_vvvl,                         // llvm.ve.vl.vcvtdl.vvvl
    ve_vl_vcvtds_vvl,                          // llvm.ve.vl.vcvtds.vvl
    ve_vl_vcvtds_vvvl,                         // llvm.ve.vl.vcvtds.vvvl
    ve_vl_vcvtdw_vvl,                          // llvm.ve.vl.vcvtdw.vvl
    ve_vl_vcvtdw_vvvl,                         // llvm.ve.vl.vcvtdw.vvvl
    ve_vl_vcvtld_vvl,                          // llvm.ve.vl.vcvtld.vvl
    ve_vl_vcvtld_vvmvl,                        // llvm.ve.vl.vcvtld.vvmvl
    ve_vl_vcvtld_vvvl,                         // llvm.ve.vl.vcvtld.vvvl
    ve_vl_vcvtldrz_vvl,                        // llvm.ve.vl.vcvtldrz.vvl
    ve_vl_vcvtldrz_vvmvl,                      // llvm.ve.vl.vcvtldrz.vvmvl
    ve_vl_vcvtldrz_vvvl,                       // llvm.ve.vl.vcvtldrz.vvvl
    ve_vl_vcvtsd_vvl,                          // llvm.ve.vl.vcvtsd.vvl
    ve_vl_vcvtsd_vvvl,                         // llvm.ve.vl.vcvtsd.vvvl
    ve_vl_vcvtsw_vvl,                          // llvm.ve.vl.vcvtsw.vvl
    ve_vl_vcvtsw_vvvl,                         // llvm.ve.vl.vcvtsw.vvvl
    ve_vl_vcvtwdsx_vvl,                        // llvm.ve.vl.vcvtwdsx.vvl
    ve_vl_vcvtwdsx_vvmvl,                      // llvm.ve.vl.vcvtwdsx.vvmvl
    ve_vl_vcvtwdsx_vvvl,                       // llvm.ve.vl.vcvtwdsx.vvvl
    ve_vl_vcvtwdsxrz_vvl,                      // llvm.ve.vl.vcvtwdsxrz.vvl
    ve_vl_vcvtwdsxrz_vvmvl,                    // llvm.ve.vl.vcvtwdsxrz.vvmvl
    ve_vl_vcvtwdsxrz_vvvl,                     // llvm.ve.vl.vcvtwdsxrz.vvvl
    ve_vl_vcvtwdzx_vvl,                        // llvm.ve.vl.vcvtwdzx.vvl
    ve_vl_vcvtwdzx_vvmvl,                      // llvm.ve.vl.vcvtwdzx.vvmvl
    ve_vl_vcvtwdzx_vvvl,                       // llvm.ve.vl.vcvtwdzx.vvvl
    ve_vl_vcvtwdzxrz_vvl,                      // llvm.ve.vl.vcvtwdzxrz.vvl
    ve_vl_vcvtwdzxrz_vvmvl,                    // llvm.ve.vl.vcvtwdzxrz.vvmvl
    ve_vl_vcvtwdzxrz_vvvl,                     // llvm.ve.vl.vcvtwdzxrz.vvvl
    ve_vl_vcvtwssx_vvl,                        // llvm.ve.vl.vcvtwssx.vvl
    ve_vl_vcvtwssx_vvmvl,                      // llvm.ve.vl.vcvtwssx.vvmvl
    ve_vl_vcvtwssx_vvvl,                       // llvm.ve.vl.vcvtwssx.vvvl
    ve_vl_vcvtwssxrz_vvl,                      // llvm.ve.vl.vcvtwssxrz.vvl
    ve_vl_vcvtwssxrz_vvmvl,                    // llvm.ve.vl.vcvtwssxrz.vvmvl
    ve_vl_vcvtwssxrz_vvvl,                     // llvm.ve.vl.vcvtwssxrz.vvvl
    ve_vl_vcvtwszx_vvl,                        // llvm.ve.vl.vcvtwszx.vvl
    ve_vl_vcvtwszx_vvmvl,                      // llvm.ve.vl.vcvtwszx.vvmvl
    ve_vl_vcvtwszx_vvvl,                       // llvm.ve.vl.vcvtwszx.vvvl
    ve_vl_vcvtwszxrz_vvl,                      // llvm.ve.vl.vcvtwszxrz.vvl
    ve_vl_vcvtwszxrz_vvmvl,                    // llvm.ve.vl.vcvtwszxrz.vvmvl
    ve_vl_vcvtwszxrz_vvvl,                     // llvm.ve.vl.vcvtwszxrz.vvvl
    ve_vl_vdivsl_vsvl,                         // llvm.ve.vl.vdivsl.vsvl
    ve_vl_vdivsl_vsvmvl,                       // llvm.ve.vl.vdivsl.vsvmvl
    ve_vl_vdivsl_vsvvl,                        // llvm.ve.vl.vdivsl.vsvvl
    ve_vl_vdivsl_vvsl,                         // llvm.ve.vl.vdivsl.vvsl
    ve_vl_vdivsl_vvsmvl,                       // llvm.ve.vl.vdivsl.vvsmvl
    ve_vl_vdivsl_vvsvl,                        // llvm.ve.vl.vdivsl.vvsvl
    ve_vl_vdivsl_vvvl,                         // llvm.ve.vl.vdivsl.vvvl
    ve_vl_vdivsl_vvvmvl,                       // llvm.ve.vl.vdivsl.vvvmvl
    ve_vl_vdivsl_vvvvl,                        // llvm.ve.vl.vdivsl.vvvvl
    ve_vl_vdivswsx_vsvl,                       // llvm.ve.vl.vdivswsx.vsvl
    ve_vl_vdivswsx_vsvmvl,                     // llvm.ve.vl.vdivswsx.vsvmvl
    ve_vl_vdivswsx_vsvvl,                      // llvm.ve.vl.vdivswsx.vsvvl
    ve_vl_vdivswsx_vvsl,                       // llvm.ve.vl.vdivswsx.vvsl
    ve_vl_vdivswsx_vvsmvl,                     // llvm.ve.vl.vdivswsx.vvsmvl
    ve_vl_vdivswsx_vvsvl,                      // llvm.ve.vl.vdivswsx.vvsvl
    ve_vl_vdivswsx_vvvl,                       // llvm.ve.vl.vdivswsx.vvvl
    ve_vl_vdivswsx_vvvmvl,                     // llvm.ve.vl.vdivswsx.vvvmvl
    ve_vl_vdivswsx_vvvvl,                      // llvm.ve.vl.vdivswsx.vvvvl
    ve_vl_vdivswzx_vsvl,                       // llvm.ve.vl.vdivswzx.vsvl
    ve_vl_vdivswzx_vsvmvl,                     // llvm.ve.vl.vdivswzx.vsvmvl
    ve_vl_vdivswzx_vsvvl,                      // llvm.ve.vl.vdivswzx.vsvvl
    ve_vl_vdivswzx_vvsl,                       // llvm.ve.vl.vdivswzx.vvsl
    ve_vl_vdivswzx_vvsmvl,                     // llvm.ve.vl.vdivswzx.vvsmvl
    ve_vl_vdivswzx_vvsvl,                      // llvm.ve.vl.vdivswzx.vvsvl
    ve_vl_vdivswzx_vvvl,                       // llvm.ve.vl.vdivswzx.vvvl
    ve_vl_vdivswzx_vvvmvl,                     // llvm.ve.vl.vdivswzx.vvvmvl
    ve_vl_vdivswzx_vvvvl,                      // llvm.ve.vl.vdivswzx.vvvvl
    ve_vl_vdivul_vsvl,                         // llvm.ve.vl.vdivul.vsvl
    ve_vl_vdivul_vsvmvl,                       // llvm.ve.vl.vdivul.vsvmvl
    ve_vl_vdivul_vsvvl,                        // llvm.ve.vl.vdivul.vsvvl
    ve_vl_vdivul_vvsl,                         // llvm.ve.vl.vdivul.vvsl
    ve_vl_vdivul_vvsmvl,                       // llvm.ve.vl.vdivul.vvsmvl
    ve_vl_vdivul_vvsvl,                        // llvm.ve.vl.vdivul.vvsvl
    ve_vl_vdivul_vvvl,                         // llvm.ve.vl.vdivul.vvvl
    ve_vl_vdivul_vvvmvl,                       // llvm.ve.vl.vdivul.vvvmvl
    ve_vl_vdivul_vvvvl,                        // llvm.ve.vl.vdivul.vvvvl
    ve_vl_vdivuw_vsvl,                         // llvm.ve.vl.vdivuw.vsvl
    ve_vl_vdivuw_vsvmvl,                       // llvm.ve.vl.vdivuw.vsvmvl
    ve_vl_vdivuw_vsvvl,                        // llvm.ve.vl.vdivuw.vsvvl
    ve_vl_vdivuw_vvsl,                         // llvm.ve.vl.vdivuw.vvsl
    ve_vl_vdivuw_vvsmvl,                       // llvm.ve.vl.vdivuw.vvsmvl
    ve_vl_vdivuw_vvsvl,                        // llvm.ve.vl.vdivuw.vvsvl
    ve_vl_vdivuw_vvvl,                         // llvm.ve.vl.vdivuw.vvvl
    ve_vl_vdivuw_vvvmvl,                       // llvm.ve.vl.vdivuw.vvvmvl
    ve_vl_vdivuw_vvvvl,                        // llvm.ve.vl.vdivuw.vvvvl
    ve_vl_veqv_vsvl,                           // llvm.ve.vl.veqv.vsvl
    ve_vl_veqv_vsvmvl,                         // llvm.ve.vl.veqv.vsvmvl
    ve_vl_veqv_vsvvl,                          // llvm.ve.vl.veqv.vsvvl
    ve_vl_veqv_vvvl,                           // llvm.ve.vl.veqv.vvvl
    ve_vl_veqv_vvvmvl,                         // llvm.ve.vl.veqv.vvvmvl
    ve_vl_veqv_vvvvl,                          // llvm.ve.vl.veqv.vvvvl
    ve_vl_vex_vvmvl,                           // llvm.ve.vl.vex.vvmvl
    ve_vl_vfaddd_vsvl,                         // llvm.ve.vl.vfaddd.vsvl
    ve_vl_vfaddd_vsvmvl,                       // llvm.ve.vl.vfaddd.vsvmvl
    ve_vl_vfaddd_vsvvl,                        // llvm.ve.vl.vfaddd.vsvvl
    ve_vl_vfaddd_vvvl,                         // llvm.ve.vl.vfaddd.vvvl
    ve_vl_vfaddd_vvvmvl,                       // llvm.ve.vl.vfaddd.vvvmvl
    ve_vl_vfaddd_vvvvl,                        // llvm.ve.vl.vfaddd.vvvvl
    ve_vl_vfadds_vsvl,                         // llvm.ve.vl.vfadds.vsvl
    ve_vl_vfadds_vsvmvl,                       // llvm.ve.vl.vfadds.vsvmvl
    ve_vl_vfadds_vsvvl,                        // llvm.ve.vl.vfadds.vsvvl
    ve_vl_vfadds_vvvl,                         // llvm.ve.vl.vfadds.vvvl
    ve_vl_vfadds_vvvmvl,                       // llvm.ve.vl.vfadds.vvvmvl
    ve_vl_vfadds_vvvvl,                        // llvm.ve.vl.vfadds.vvvvl
    ve_vl_vfcmpd_vsvl,                         // llvm.ve.vl.vfcmpd.vsvl
    ve_vl_vfcmpd_vsvmvl,                       // llvm.ve.vl.vfcmpd.vsvmvl
    ve_vl_vfcmpd_vsvvl,                        // llvm.ve.vl.vfcmpd.vsvvl
    ve_vl_vfcmpd_vvvl,                         // llvm.ve.vl.vfcmpd.vvvl
    ve_vl_vfcmpd_vvvmvl,                       // llvm.ve.vl.vfcmpd.vvvmvl
    ve_vl_vfcmpd_vvvvl,                        // llvm.ve.vl.vfcmpd.vvvvl
    ve_vl_vfcmps_vsvl,                         // llvm.ve.vl.vfcmps.vsvl
    ve_vl_vfcmps_vsvmvl,                       // llvm.ve.vl.vfcmps.vsvmvl
    ve_vl_vfcmps_vsvvl,                        // llvm.ve.vl.vfcmps.vsvvl
    ve_vl_vfcmps_vvvl,                         // llvm.ve.vl.vfcmps.vvvl
    ve_vl_vfcmps_vvvmvl,                       // llvm.ve.vl.vfcmps.vvvmvl
    ve_vl_vfcmps_vvvvl,                        // llvm.ve.vl.vfcmps.vvvvl
    ve_vl_vfdivd_vsvl,                         // llvm.ve.vl.vfdivd.vsvl
    ve_vl_vfdivd_vsvmvl,                       // llvm.ve.vl.vfdivd.vsvmvl
    ve_vl_vfdivd_vsvvl,                        // llvm.ve.vl.vfdivd.vsvvl
    ve_vl_vfdivd_vvvl,                         // llvm.ve.vl.vfdivd.vvvl
    ve_vl_vfdivd_vvvmvl,                       // llvm.ve.vl.vfdivd.vvvmvl
    ve_vl_vfdivd_vvvvl,                        // llvm.ve.vl.vfdivd.vvvvl
    ve_vl_vfdivs_vsvl,                         // llvm.ve.vl.vfdivs.vsvl
    ve_vl_vfdivs_vsvmvl,                       // llvm.ve.vl.vfdivs.vsvmvl
    ve_vl_vfdivs_vsvvl,                        // llvm.ve.vl.vfdivs.vsvvl
    ve_vl_vfdivs_vvvl,                         // llvm.ve.vl.vfdivs.vvvl
    ve_vl_vfdivs_vvvmvl,                       // llvm.ve.vl.vfdivs.vvvmvl
    ve_vl_vfdivs_vvvvl,                        // llvm.ve.vl.vfdivs.vvvvl
    ve_vl_vfmadd_vsvvl,                        // llvm.ve.vl.vfmadd.vsvvl
    ve_vl_vfmadd_vsvvmvl,                      // llvm.ve.vl.vfmadd.vsvvmvl
    ve_vl_vfmadd_vsvvvl,                       // llvm.ve.vl.vfmadd.vsvvvl
    ve_vl_vfmadd_vvsvl,                        // llvm.ve.vl.vfmadd.vvsvl
    ve_vl_vfmadd_vvsvmvl,                      // llvm.ve.vl.vfmadd.vvsvmvl
    ve_vl_vfmadd_vvsvvl,                       // llvm.ve.vl.vfmadd.vvsvvl
    ve_vl_vfmadd_vvvvl,                        // llvm.ve.vl.vfmadd.vvvvl
    ve_vl_vfmadd_vvvvmvl,                      // llvm.ve.vl.vfmadd.vvvvmvl
    ve_vl_vfmadd_vvvvvl,                       // llvm.ve.vl.vfmadd.vvvvvl
    ve_vl_vfmads_vsvvl,                        // llvm.ve.vl.vfmads.vsvvl
    ve_vl_vfmads_vsvvmvl,                      // llvm.ve.vl.vfmads.vsvvmvl
    ve_vl_vfmads_vsvvvl,                       // llvm.ve.vl.vfmads.vsvvvl
    ve_vl_vfmads_vvsvl,                        // llvm.ve.vl.vfmads.vvsvl
    ve_vl_vfmads_vvsvmvl,                      // llvm.ve.vl.vfmads.vvsvmvl
    ve_vl_vfmads_vvsvvl,                       // llvm.ve.vl.vfmads.vvsvvl
    ve_vl_vfmads_vvvvl,                        // llvm.ve.vl.vfmads.vvvvl
    ve_vl_vfmads_vvvvmvl,                      // llvm.ve.vl.vfmads.vvvvmvl
    ve_vl_vfmads_vvvvvl,                       // llvm.ve.vl.vfmads.vvvvvl
    ve_vl_vfmaxd_vsvl,                         // llvm.ve.vl.vfmaxd.vsvl
    ve_vl_vfmaxd_vsvmvl,                       // llvm.ve.vl.vfmaxd.vsvmvl
    ve_vl_vfmaxd_vsvvl,                        // llvm.ve.vl.vfmaxd.vsvvl
    ve_vl_vfmaxd_vvvl,                         // llvm.ve.vl.vfmaxd.vvvl
    ve_vl_vfmaxd_vvvmvl,                       // llvm.ve.vl.vfmaxd.vvvmvl
    ve_vl_vfmaxd_vvvvl,                        // llvm.ve.vl.vfmaxd.vvvvl
    ve_vl_vfmaxs_vsvl,                         // llvm.ve.vl.vfmaxs.vsvl
    ve_vl_vfmaxs_vsvmvl,                       // llvm.ve.vl.vfmaxs.vsvmvl
    ve_vl_vfmaxs_vsvvl,                        // llvm.ve.vl.vfmaxs.vsvvl
    ve_vl_vfmaxs_vvvl,                         // llvm.ve.vl.vfmaxs.vvvl
    ve_vl_vfmaxs_vvvmvl,                       // llvm.ve.vl.vfmaxs.vvvmvl
    ve_vl_vfmaxs_vvvvl,                        // llvm.ve.vl.vfmaxs.vvvvl
    ve_vl_vfmind_vsvl,                         // llvm.ve.vl.vfmind.vsvl
    ve_vl_vfmind_vsvmvl,                       // llvm.ve.vl.vfmind.vsvmvl
    ve_vl_vfmind_vsvvl,                        // llvm.ve.vl.vfmind.vsvvl
    ve_vl_vfmind_vvvl,                         // llvm.ve.vl.vfmind.vvvl
    ve_vl_vfmind_vvvmvl,                       // llvm.ve.vl.vfmind.vvvmvl
    ve_vl_vfmind_vvvvl,                        // llvm.ve.vl.vfmind.vvvvl
    ve_vl_vfmins_vsvl,                         // llvm.ve.vl.vfmins.vsvl
    ve_vl_vfmins_vsvmvl,                       // llvm.ve.vl.vfmins.vsvmvl
    ve_vl_vfmins_vsvvl,                        // llvm.ve.vl.vfmins.vsvvl
    ve_vl_vfmins_vvvl,                         // llvm.ve.vl.vfmins.vvvl
    ve_vl_vfmins_vvvmvl,                       // llvm.ve.vl.vfmins.vvvmvl
    ve_vl_vfmins_vvvvl,                        // llvm.ve.vl.vfmins.vvvvl
    ve_vl_vfmkdeq_mvl,                         // llvm.ve.vl.vfmkdeq.mvl
    ve_vl_vfmkdeq_mvml,                        // llvm.ve.vl.vfmkdeq.mvml
    ve_vl_vfmkdeqnan_mvl,                      // llvm.ve.vl.vfmkdeqnan.mvl
    ve_vl_vfmkdeqnan_mvml,                     // llvm.ve.vl.vfmkdeqnan.mvml
    ve_vl_vfmkdge_mvl,                         // llvm.ve.vl.vfmkdge.mvl
    ve_vl_vfmkdge_mvml,                        // llvm.ve.vl.vfmkdge.mvml
    ve_vl_vfmkdgenan_mvl,                      // llvm.ve.vl.vfmkdgenan.mvl
    ve_vl_vfmkdgenan_mvml,                     // llvm.ve.vl.vfmkdgenan.mvml
    ve_vl_vfmkdgt_mvl,                         // llvm.ve.vl.vfmkdgt.mvl
    ve_vl_vfmkdgt_mvml,                        // llvm.ve.vl.vfmkdgt.mvml
    ve_vl_vfmkdgtnan_mvl,                      // llvm.ve.vl.vfmkdgtnan.mvl
    ve_vl_vfmkdgtnan_mvml,                     // llvm.ve.vl.vfmkdgtnan.mvml
    ve_vl_vfmkdle_mvl,                         // llvm.ve.vl.vfmkdle.mvl
    ve_vl_vfmkdle_mvml,                        // llvm.ve.vl.vfmkdle.mvml
    ve_vl_vfmkdlenan_mvl,                      // llvm.ve.vl.vfmkdlenan.mvl
    ve_vl_vfmkdlenan_mvml,                     // llvm.ve.vl.vfmkdlenan.mvml
    ve_vl_vfmkdlt_mvl,                         // llvm.ve.vl.vfmkdlt.mvl
    ve_vl_vfmkdlt_mvml,                        // llvm.ve.vl.vfmkdlt.mvml
    ve_vl_vfmkdltnan_mvl,                      // llvm.ve.vl.vfmkdltnan.mvl
    ve_vl_vfmkdltnan_mvml,                     // llvm.ve.vl.vfmkdltnan.mvml
    ve_vl_vfmkdnan_mvl,                        // llvm.ve.vl.vfmkdnan.mvl
    ve_vl_vfmkdnan_mvml,                       // llvm.ve.vl.vfmkdnan.mvml
    ve_vl_vfmkdne_mvl,                         // llvm.ve.vl.vfmkdne.mvl
    ve_vl_vfmkdne_mvml,                        // llvm.ve.vl.vfmkdne.mvml
    ve_vl_vfmkdnenan_mvl,                      // llvm.ve.vl.vfmkdnenan.mvl
    ve_vl_vfmkdnenan_mvml,                     // llvm.ve.vl.vfmkdnenan.mvml
    ve_vl_vfmkdnum_mvl,                        // llvm.ve.vl.vfmkdnum.mvl
    ve_vl_vfmkdnum_mvml,                       // llvm.ve.vl.vfmkdnum.mvml
    ve_vl_vfmklaf_ml,                          // llvm.ve.vl.vfmklaf.ml
    ve_vl_vfmklat_ml,                          // llvm.ve.vl.vfmklat.ml
    ve_vl_vfmkleq_mvl,                         // llvm.ve.vl.vfmkleq.mvl
    ve_vl_vfmkleq_mvml,                        // llvm.ve.vl.vfmkleq.mvml
    ve_vl_vfmkleqnan_mvl,                      // llvm.ve.vl.vfmkleqnan.mvl
    ve_vl_vfmkleqnan_mvml,                     // llvm.ve.vl.vfmkleqnan.mvml
    ve_vl_vfmklge_mvl,                         // llvm.ve.vl.vfmklge.mvl
    ve_vl_vfmklge_mvml,                        // llvm.ve.vl.vfmklge.mvml
    ve_vl_vfmklgenan_mvl,                      // llvm.ve.vl.vfmklgenan.mvl
    ve_vl_vfmklgenan_mvml,                     // llvm.ve.vl.vfmklgenan.mvml
    ve_vl_vfmklgt_mvl,                         // llvm.ve.vl.vfmklgt.mvl
    ve_vl_vfmklgt_mvml,                        // llvm.ve.vl.vfmklgt.mvml
    ve_vl_vfmklgtnan_mvl,                      // llvm.ve.vl.vfmklgtnan.mvl
    ve_vl_vfmklgtnan_mvml,                     // llvm.ve.vl.vfmklgtnan.mvml
    ve_vl_vfmklle_mvl,                         // llvm.ve.vl.vfmklle.mvl
    ve_vl_vfmklle_mvml,                        // llvm.ve.vl.vfmklle.mvml
    ve_vl_vfmkllenan_mvl,                      // llvm.ve.vl.vfmkllenan.mvl
    ve_vl_vfmkllenan_mvml,                     // llvm.ve.vl.vfmkllenan.mvml
    ve_vl_vfmkllt_mvl,                         // llvm.ve.vl.vfmkllt.mvl
    ve_vl_vfmkllt_mvml,                        // llvm.ve.vl.vfmkllt.mvml
    ve_vl_vfmklltnan_mvl,                      // llvm.ve.vl.vfmklltnan.mvl
    ve_vl_vfmklltnan_mvml,                     // llvm.ve.vl.vfmklltnan.mvml
    ve_vl_vfmklnan_mvl,                        // llvm.ve.vl.vfmklnan.mvl
    ve_vl_vfmklnan_mvml,                       // llvm.ve.vl.vfmklnan.mvml
    ve_vl_vfmklne_mvl,                         // llvm.ve.vl.vfmklne.mvl
    ve_vl_vfmklne_mvml,                        // llvm.ve.vl.vfmklne.mvml
    ve_vl_vfmklnenan_mvl,                      // llvm.ve.vl.vfmklnenan.mvl
    ve_vl_vfmklnenan_mvml,                     // llvm.ve.vl.vfmklnenan.mvml
    ve_vl_vfmklnum_mvl,                        // llvm.ve.vl.vfmklnum.mvl
    ve_vl_vfmklnum_mvml,                       // llvm.ve.vl.vfmklnum.mvml
    ve_vl_vfmkseq_mvl,                         // llvm.ve.vl.vfmkseq.mvl
    ve_vl_vfmkseq_mvml,                        // llvm.ve.vl.vfmkseq.mvml
    ve_vl_vfmkseqnan_mvl,                      // llvm.ve.vl.vfmkseqnan.mvl
    ve_vl_vfmkseqnan_mvml,                     // llvm.ve.vl.vfmkseqnan.mvml
    ve_vl_vfmksge_mvl,                         // llvm.ve.vl.vfmksge.mvl
    ve_vl_vfmksge_mvml,                        // llvm.ve.vl.vfmksge.mvml
    ve_vl_vfmksgenan_mvl,                      // llvm.ve.vl.vfmksgenan.mvl
    ve_vl_vfmksgenan_mvml,                     // llvm.ve.vl.vfmksgenan.mvml
    ve_vl_vfmksgt_mvl,                         // llvm.ve.vl.vfmksgt.mvl
    ve_vl_vfmksgt_mvml,                        // llvm.ve.vl.vfmksgt.mvml
    ve_vl_vfmksgtnan_mvl,                      // llvm.ve.vl.vfmksgtnan.mvl
    ve_vl_vfmksgtnan_mvml,                     // llvm.ve.vl.vfmksgtnan.mvml
    ve_vl_vfmksle_mvl,                         // llvm.ve.vl.vfmksle.mvl
    ve_vl_vfmksle_mvml,                        // llvm.ve.vl.vfmksle.mvml
    ve_vl_vfmkslenan_mvl,                      // llvm.ve.vl.vfmkslenan.mvl
    ve_vl_vfmkslenan_mvml,                     // llvm.ve.vl.vfmkslenan.mvml
    ve_vl_vfmkslt_mvl,                         // llvm.ve.vl.vfmkslt.mvl
    ve_vl_vfmkslt_mvml,                        // llvm.ve.vl.vfmkslt.mvml
    ve_vl_vfmksltnan_mvl,                      // llvm.ve.vl.vfmksltnan.mvl
    ve_vl_vfmksltnan_mvml,                     // llvm.ve.vl.vfmksltnan.mvml
    ve_vl_vfmksnan_mvl,                        // llvm.ve.vl.vfmksnan.mvl
    ve_vl_vfmksnan_mvml,                       // llvm.ve.vl.vfmksnan.mvml
    ve_vl_vfmksne_mvl,                         // llvm.ve.vl.vfmksne.mvl
    ve_vl_vfmksne_mvml,                        // llvm.ve.vl.vfmksne.mvml
    ve_vl_vfmksnenan_mvl,                      // llvm.ve.vl.vfmksnenan.mvl
    ve_vl_vfmksnenan_mvml,                     // llvm.ve.vl.vfmksnenan.mvml
    ve_vl_vfmksnum_mvl,                        // llvm.ve.vl.vfmksnum.mvl
    ve_vl_vfmksnum_mvml,                       // llvm.ve.vl.vfmksnum.mvml
    ve_vl_vfmkweq_mvl,                         // llvm.ve.vl.vfmkweq.mvl
    ve_vl_vfmkweq_mvml,                        // llvm.ve.vl.vfmkweq.mvml
    ve_vl_vfmkweqnan_mvl,                      // llvm.ve.vl.vfmkweqnan.mvl
    ve_vl_vfmkweqnan_mvml,                     // llvm.ve.vl.vfmkweqnan.mvml
    ve_vl_vfmkwge_mvl,                         // llvm.ve.vl.vfmkwge.mvl
    ve_vl_vfmkwge_mvml,                        // llvm.ve.vl.vfmkwge.mvml
    ve_vl_vfmkwgenan_mvl,                      // llvm.ve.vl.vfmkwgenan.mvl
    ve_vl_vfmkwgenan_mvml,                     // llvm.ve.vl.vfmkwgenan.mvml
    ve_vl_vfmkwgt_mvl,                         // llvm.ve.vl.vfmkwgt.mvl
    ve_vl_vfmkwgt_mvml,                        // llvm.ve.vl.vfmkwgt.mvml
    ve_vl_vfmkwgtnan_mvl,                      // llvm.ve.vl.vfmkwgtnan.mvl
    ve_vl_vfmkwgtnan_mvml,                     // llvm.ve.vl.vfmkwgtnan.mvml
    ve_vl_vfmkwle_mvl,                         // llvm.ve.vl.vfmkwle.mvl
    ve_vl_vfmkwle_mvml,                        // llvm.ve.vl.vfmkwle.mvml
    ve_vl_vfmkwlenan_mvl,                      // llvm.ve.vl.vfmkwlenan.mvl
    ve_vl_vfmkwlenan_mvml,                     // llvm.ve.vl.vfmkwlenan.mvml
    ve_vl_vfmkwlt_mvl,                         // llvm.ve.vl.vfmkwlt.mvl
    ve_vl_vfmkwlt_mvml,                        // llvm.ve.vl.vfmkwlt.mvml
    ve_vl_vfmkwltnan_mvl,                      // llvm.ve.vl.vfmkwltnan.mvl
    ve_vl_vfmkwltnan_mvml,                     // llvm.ve.vl.vfmkwltnan.mvml
    ve_vl_vfmkwnan_mvl,                        // llvm.ve.vl.vfmkwnan.mvl
    ve_vl_vfmkwnan_mvml,                       // llvm.ve.vl.vfmkwnan.mvml
    ve_vl_vfmkwne_mvl,                         // llvm.ve.vl.vfmkwne.mvl
    ve_vl_vfmkwne_mvml,                        // llvm.ve.vl.vfmkwne.mvml
    ve_vl_vfmkwnenan_mvl,                      // llvm.ve.vl.vfmkwnenan.mvl
    ve_vl_vfmkwnenan_mvml,                     // llvm.ve.vl.vfmkwnenan.mvml
    ve_vl_vfmkwnum_mvl,                        // llvm.ve.vl.vfmkwnum.mvl
    ve_vl_vfmkwnum_mvml,                       // llvm.ve.vl.vfmkwnum.mvml
    ve_vl_vfmsbd_vsvvl,                        // llvm.ve.vl.vfmsbd.vsvvl
    ve_vl_vfmsbd_vsvvmvl,                      // llvm.ve.vl.vfmsbd.vsvvmvl
    ve_vl_vfmsbd_vsvvvl,                       // llvm.ve.vl.vfmsbd.vsvvvl
    ve_vl_vfmsbd_vvsvl,                        // llvm.ve.vl.vfmsbd.vvsvl
    ve_vl_vfmsbd_vvsvmvl,                      // llvm.ve.vl.vfmsbd.vvsvmvl
    ve_vl_vfmsbd_vvsvvl,                       // llvm.ve.vl.vfmsbd.vvsvvl
    ve_vl_vfmsbd_vvvvl,                        // llvm.ve.vl.vfmsbd.vvvvl
    ve_vl_vfmsbd_vvvvmvl,                      // llvm.ve.vl.vfmsbd.vvvvmvl
    ve_vl_vfmsbd_vvvvvl,                       // llvm.ve.vl.vfmsbd.vvvvvl
    ve_vl_vfmsbs_vsvvl,                        // llvm.ve.vl.vfmsbs.vsvvl
    ve_vl_vfmsbs_vsvvmvl,                      // llvm.ve.vl.vfmsbs.vsvvmvl
    ve_vl_vfmsbs_vsvvvl,                       // llvm.ve.vl.vfmsbs.vsvvvl
    ve_vl_vfmsbs_vvsvl,                        // llvm.ve.vl.vfmsbs.vvsvl
    ve_vl_vfmsbs_vvsvmvl,                      // llvm.ve.vl.vfmsbs.vvsvmvl
    ve_vl_vfmsbs_vvsvvl,                       // llvm.ve.vl.vfmsbs.vvsvvl
    ve_vl_vfmsbs_vvvvl,                        // llvm.ve.vl.vfmsbs.vvvvl
    ve_vl_vfmsbs_vvvvmvl,                      // llvm.ve.vl.vfmsbs.vvvvmvl
    ve_vl_vfmsbs_vvvvvl,                       // llvm.ve.vl.vfmsbs.vvvvvl
    ve_vl_vfmuld_vsvl,                         // llvm.ve.vl.vfmuld.vsvl
    ve_vl_vfmuld_vsvmvl,                       // llvm.ve.vl.vfmuld.vsvmvl
    ve_vl_vfmuld_vsvvl,                        // llvm.ve.vl.vfmuld.vsvvl
    ve_vl_vfmuld_vvvl,                         // llvm.ve.vl.vfmuld.vvvl
    ve_vl_vfmuld_vvvmvl,                       // llvm.ve.vl.vfmuld.vvvmvl
    ve_vl_vfmuld_vvvvl,                        // llvm.ve.vl.vfmuld.vvvvl
    ve_vl_vfmuls_vsvl,                         // llvm.ve.vl.vfmuls.vsvl
    ve_vl_vfmuls_vsvmvl,                       // llvm.ve.vl.vfmuls.vsvmvl
    ve_vl_vfmuls_vsvvl,                        // llvm.ve.vl.vfmuls.vsvvl
    ve_vl_vfmuls_vvvl,                         // llvm.ve.vl.vfmuls.vvvl
    ve_vl_vfmuls_vvvmvl,                       // llvm.ve.vl.vfmuls.vvvmvl
    ve_vl_vfmuls_vvvvl,                        // llvm.ve.vl.vfmuls.vvvvl
    ve_vl_vfnmadd_vsvvl,                       // llvm.ve.vl.vfnmadd.vsvvl
    ve_vl_vfnmadd_vsvvmvl,                     // llvm.ve.vl.vfnmadd.vsvvmvl
    ve_vl_vfnmadd_vsvvvl,                      // llvm.ve.vl.vfnmadd.vsvvvl
    ve_vl_vfnmadd_vvsvl,                       // llvm.ve.vl.vfnmadd.vvsvl
    ve_vl_vfnmadd_vvsvmvl,                     // llvm.ve.vl.vfnmadd.vvsvmvl
    ve_vl_vfnmadd_vvsvvl,                      // llvm.ve.vl.vfnmadd.vvsvvl
    ve_vl_vfnmadd_vvvvl,                       // llvm.ve.vl.vfnmadd.vvvvl
    ve_vl_vfnmadd_vvvvmvl,                     // llvm.ve.vl.vfnmadd.vvvvmvl
    ve_vl_vfnmadd_vvvvvl,                      // llvm.ve.vl.vfnmadd.vvvvvl
    ve_vl_vfnmads_vsvvl,                       // llvm.ve.vl.vfnmads.vsvvl
    ve_vl_vfnmads_vsvvmvl,                     // llvm.ve.vl.vfnmads.vsvvmvl
    ve_vl_vfnmads_vsvvvl,                      // llvm.ve.vl.vfnmads.vsvvvl
    ve_vl_vfnmads_vvsvl,                       // llvm.ve.vl.vfnmads.vvsvl
    ve_vl_vfnmads_vvsvmvl,                     // llvm.ve.vl.vfnmads.vvsvmvl
    ve_vl_vfnmads_vvsvvl,                      // llvm.ve.vl.vfnmads.vvsvvl
    ve_vl_vfnmads_vvvvl,                       // llvm.ve.vl.vfnmads.vvvvl
    ve_vl_vfnmads_vvvvmvl,                     // llvm.ve.vl.vfnmads.vvvvmvl
    ve_vl_vfnmads_vvvvvl,                      // llvm.ve.vl.vfnmads.vvvvvl
    ve_vl_vfnmsbd_vsvvl,                       // llvm.ve.vl.vfnmsbd.vsvvl
    ve_vl_vfnmsbd_vsvvmvl,                     // llvm.ve.vl.vfnmsbd.vsvvmvl
    ve_vl_vfnmsbd_vsvvvl,                      // llvm.ve.vl.vfnmsbd.vsvvvl
    ve_vl_vfnmsbd_vvsvl,                       // llvm.ve.vl.vfnmsbd.vvsvl
    ve_vl_vfnmsbd_vvsvmvl,                     // llvm.ve.vl.vfnmsbd.vvsvmvl
    ve_vl_vfnmsbd_vvsvvl,                      // llvm.ve.vl.vfnmsbd.vvsvvl
    ve_vl_vfnmsbd_vvvvl,                       // llvm.ve.vl.vfnmsbd.vvvvl
    ve_vl_vfnmsbd_vvvvmvl,                     // llvm.ve.vl.vfnmsbd.vvvvmvl
    ve_vl_vfnmsbd_vvvvvl,                      // llvm.ve.vl.vfnmsbd.vvvvvl
    ve_vl_vfnmsbs_vsvvl,                       // llvm.ve.vl.vfnmsbs.vsvvl
    ve_vl_vfnmsbs_vsvvmvl,                     // llvm.ve.vl.vfnmsbs.vsvvmvl
    ve_vl_vfnmsbs_vsvvvl,                      // llvm.ve.vl.vfnmsbs.vsvvvl
    ve_vl_vfnmsbs_vvsvl,                       // llvm.ve.vl.vfnmsbs.vvsvl
    ve_vl_vfnmsbs_vvsvmvl,                     // llvm.ve.vl.vfnmsbs.vvsvmvl
    ve_vl_vfnmsbs_vvsvvl,                      // llvm.ve.vl.vfnmsbs.vvsvvl
    ve_vl_vfnmsbs_vvvvl,                       // llvm.ve.vl.vfnmsbs.vvvvl
    ve_vl_vfnmsbs_vvvvmvl,                     // llvm.ve.vl.vfnmsbs.vvvvmvl
    ve_vl_vfnmsbs_vvvvvl,                      // llvm.ve.vl.vfnmsbs.vvvvvl
    ve_vl_vfrmaxdfst_vvl,                      // llvm.ve.vl.vfrmaxdfst.vvl
    ve_vl_vfrmaxdfst_vvvl,                     // llvm.ve.vl.vfrmaxdfst.vvvl
    ve_vl_vfrmaxdlst_vvl,                      // llvm.ve.vl.vfrmaxdlst.vvl
    ve_vl_vfrmaxdlst_vvvl,                     // llvm.ve.vl.vfrmaxdlst.vvvl
    ve_vl_vfrmaxsfst_vvl,                      // llvm.ve.vl.vfrmaxsfst.vvl
    ve_vl_vfrmaxsfst_vvvl,                     // llvm.ve.vl.vfrmaxsfst.vvvl
    ve_vl_vfrmaxslst_vvl,                      // llvm.ve.vl.vfrmaxslst.vvl
    ve_vl_vfrmaxslst_vvvl,                     // llvm.ve.vl.vfrmaxslst.vvvl
    ve_vl_vfrmindfst_vvl,                      // llvm.ve.vl.vfrmindfst.vvl
    ve_vl_vfrmindfst_vvvl,                     // llvm.ve.vl.vfrmindfst.vvvl
    ve_vl_vfrmindlst_vvl,                      // llvm.ve.vl.vfrmindlst.vvl
    ve_vl_vfrmindlst_vvvl,                     // llvm.ve.vl.vfrmindlst.vvvl
    ve_vl_vfrminsfst_vvl,                      // llvm.ve.vl.vfrminsfst.vvl
    ve_vl_vfrminsfst_vvvl,                     // llvm.ve.vl.vfrminsfst.vvvl
    ve_vl_vfrminslst_vvl,                      // llvm.ve.vl.vfrminslst.vvl
    ve_vl_vfrminslst_vvvl,                     // llvm.ve.vl.vfrminslst.vvvl
    ve_vl_vfsqrtd_vvl,                         // llvm.ve.vl.vfsqrtd.vvl
    ve_vl_vfsqrtd_vvvl,                        // llvm.ve.vl.vfsqrtd.vvvl
    ve_vl_vfsqrts_vvl,                         // llvm.ve.vl.vfsqrts.vvl
    ve_vl_vfsqrts_vvvl,                        // llvm.ve.vl.vfsqrts.vvvl
    ve_vl_vfsubd_vsvl,                         // llvm.ve.vl.vfsubd.vsvl
    ve_vl_vfsubd_vsvmvl,                       // llvm.ve.vl.vfsubd.vsvmvl
    ve_vl_vfsubd_vsvvl,                        // llvm.ve.vl.vfsubd.vsvvl
    ve_vl_vfsubd_vvvl,                         // llvm.ve.vl.vfsubd.vvvl
    ve_vl_vfsubd_vvvmvl,                       // llvm.ve.vl.vfsubd.vvvmvl
    ve_vl_vfsubd_vvvvl,                        // llvm.ve.vl.vfsubd.vvvvl
    ve_vl_vfsubs_vsvl,                         // llvm.ve.vl.vfsubs.vsvl
    ve_vl_vfsubs_vsvmvl,                       // llvm.ve.vl.vfsubs.vsvmvl
    ve_vl_vfsubs_vsvvl,                        // llvm.ve.vl.vfsubs.vsvvl
    ve_vl_vfsubs_vvvl,                         // llvm.ve.vl.vfsubs.vvvl
    ve_vl_vfsubs_vvvmvl,                       // llvm.ve.vl.vfsubs.vvvmvl
    ve_vl_vfsubs_vvvvl,                        // llvm.ve.vl.vfsubs.vvvvl
    ve_vl_vfsumd_vvl,                          // llvm.ve.vl.vfsumd.vvl
    ve_vl_vfsumd_vvml,                         // llvm.ve.vl.vfsumd.vvml
    ve_vl_vfsums_vvl,                          // llvm.ve.vl.vfsums.vvl
    ve_vl_vfsums_vvml,                         // llvm.ve.vl.vfsums.vvml
    ve_vl_vgt_vvssl,                           // llvm.ve.vl.vgt.vvssl
    ve_vl_vgt_vvssml,                          // llvm.ve.vl.vgt.vvssml
    ve_vl_vgt_vvssmvl,                         // llvm.ve.vl.vgt.vvssmvl
    ve_vl_vgt_vvssvl,                          // llvm.ve.vl.vgt.vvssvl
    ve_vl_vgtlsx_vvssl,                        // llvm.ve.vl.vgtlsx.vvssl
    ve_vl_vgtlsx_vvssml,                       // llvm.ve.vl.vgtlsx.vvssml
    ve_vl_vgtlsx_vvssmvl,                      // llvm.ve.vl.vgtlsx.vvssmvl
    ve_vl_vgtlsx_vvssvl,                       // llvm.ve.vl.vgtlsx.vvssvl
    ve_vl_vgtlsxnc_vvssl,                      // llvm.ve.vl.vgtlsxnc.vvssl
    ve_vl_vgtlsxnc_vvssml,                     // llvm.ve.vl.vgtlsxnc.vvssml
    ve_vl_vgtlsxnc_vvssmvl,                    // llvm.ve.vl.vgtlsxnc.vvssmvl
    ve_vl_vgtlsxnc_vvssvl,                     // llvm.ve.vl.vgtlsxnc.vvssvl
    ve_vl_vgtlzx_vvssl,                        // llvm.ve.vl.vgtlzx.vvssl
    ve_vl_vgtlzx_vvssml,                       // llvm.ve.vl.vgtlzx.vvssml
    ve_vl_vgtlzx_vvssmvl,                      // llvm.ve.vl.vgtlzx.vvssmvl
    ve_vl_vgtlzx_vvssvl,                       // llvm.ve.vl.vgtlzx.vvssvl
    ve_vl_vgtlzxnc_vvssl,                      // llvm.ve.vl.vgtlzxnc.vvssl
    ve_vl_vgtlzxnc_vvssml,                     // llvm.ve.vl.vgtlzxnc.vvssml
    ve_vl_vgtlzxnc_vvssmvl,                    // llvm.ve.vl.vgtlzxnc.vvssmvl
    ve_vl_vgtlzxnc_vvssvl,                     // llvm.ve.vl.vgtlzxnc.vvssvl
    ve_vl_vgtnc_vvssl,                         // llvm.ve.vl.vgtnc.vvssl
    ve_vl_vgtnc_vvssml,                        // llvm.ve.vl.vgtnc.vvssml
    ve_vl_vgtnc_vvssmvl,                       // llvm.ve.vl.vgtnc.vvssmvl
    ve_vl_vgtnc_vvssvl,                        // llvm.ve.vl.vgtnc.vvssvl
    ve_vl_vgtu_vvssl,                          // llvm.ve.vl.vgtu.vvssl
    ve_vl_vgtu_vvssml,                         // llvm.ve.vl.vgtu.vvssml
    ve_vl_vgtu_vvssmvl,                        // llvm.ve.vl.vgtu.vvssmvl
    ve_vl_vgtu_vvssvl,                         // llvm.ve.vl.vgtu.vvssvl
    ve_vl_vgtunc_vvssl,                        // llvm.ve.vl.vgtunc.vvssl
    ve_vl_vgtunc_vvssml,                       // llvm.ve.vl.vgtunc.vvssml
    ve_vl_vgtunc_vvssmvl,                      // llvm.ve.vl.vgtunc.vvssmvl
    ve_vl_vgtunc_vvssvl,                       // llvm.ve.vl.vgtunc.vvssvl
    ve_vl_vld_vssl,                            // llvm.ve.vl.vld.vssl
    ve_vl_vld_vssvl,                           // llvm.ve.vl.vld.vssvl
    ve_vl_vld2d_vssl,                          // llvm.ve.vl.vld2d.vssl
    ve_vl_vld2d_vssvl,                         // llvm.ve.vl.vld2d.vssvl
    ve_vl_vld2dnc_vssl,                        // llvm.ve.vl.vld2dnc.vssl
    ve_vl_vld2dnc_vssvl,                       // llvm.ve.vl.vld2dnc.vssvl
    ve_vl_vldl2dsx_vssl,                       // llvm.ve.vl.vldl2dsx.vssl
    ve_vl_vldl2dsx_vssvl,                      // llvm.ve.vl.vldl2dsx.vssvl
    ve_vl_vldl2dsxnc_vssl,                     // llvm.ve.vl.vldl2dsxnc.vssl
    ve_vl_vldl2dsxnc_vssvl,                    // llvm.ve.vl.vldl2dsxnc.vssvl
    ve_vl_vldl2dzx_vssl,                       // llvm.ve.vl.vldl2dzx.vssl
    ve_vl_vldl2dzx_vssvl,                      // llvm.ve.vl.vldl2dzx.vssvl
    ve_vl_vldl2dzxnc_vssl,                     // llvm.ve.vl.vldl2dzxnc.vssl
    ve_vl_vldl2dzxnc_vssvl,                    // llvm.ve.vl.vldl2dzxnc.vssvl
    ve_vl_vldlsx_vssl,                         // llvm.ve.vl.vldlsx.vssl
    ve_vl_vldlsx_vssvl,                        // llvm.ve.vl.vldlsx.vssvl
    ve_vl_vldlsxnc_vssl,                       // llvm.ve.vl.vldlsxnc.vssl
    ve_vl_vldlsxnc_vssvl,                      // llvm.ve.vl.vldlsxnc.vssvl
    ve_vl_vldlzx_vssl,                         // llvm.ve.vl.vldlzx.vssl
    ve_vl_vldlzx_vssvl,                        // llvm.ve.vl.vldlzx.vssvl
    ve_vl_vldlzxnc_vssl,                       // llvm.ve.vl.vldlzxnc.vssl
    ve_vl_vldlzxnc_vssvl,                      // llvm.ve.vl.vldlzxnc.vssvl
    ve_vl_vldnc_vssl,                          // llvm.ve.vl.vldnc.vssl
    ve_vl_vldnc_vssvl,                         // llvm.ve.vl.vldnc.vssvl
    ve_vl_vldu_vssl,                           // llvm.ve.vl.vldu.vssl
    ve_vl_vldu_vssvl,                          // llvm.ve.vl.vldu.vssvl
    ve_vl_vldu2d_vssl,                         // llvm.ve.vl.vldu2d.vssl
    ve_vl_vldu2d_vssvl,                        // llvm.ve.vl.vldu2d.vssvl
    ve_vl_vldu2dnc_vssl,                       // llvm.ve.vl.vldu2dnc.vssl
    ve_vl_vldu2dnc_vssvl,                      // llvm.ve.vl.vldu2dnc.vssvl
    ve_vl_vldunc_vssl,                         // llvm.ve.vl.vldunc.vssl
    ve_vl_vldunc_vssvl,                        // llvm.ve.vl.vldunc.vssvl
    ve_vl_vldz_vvl,                            // llvm.ve.vl.vldz.vvl
    ve_vl_vldz_vvmvl,                          // llvm.ve.vl.vldz.vvmvl
    ve_vl_vldz_vvvl,                           // llvm.ve.vl.vldz.vvvl
    ve_vl_vmaxsl_vsvl,                         // llvm.ve.vl.vmaxsl.vsvl
    ve_vl_vmaxsl_vsvmvl,                       // llvm.ve.vl.vmaxsl.vsvmvl
    ve_vl_vmaxsl_vsvvl,                        // llvm.ve.vl.vmaxsl.vsvvl
    ve_vl_vmaxsl_vvvl,                         // llvm.ve.vl.vmaxsl.vvvl
    ve_vl_vmaxsl_vvvmvl,                       // llvm.ve.vl.vmaxsl.vvvmvl
    ve_vl_vmaxsl_vvvvl,                        // llvm.ve.vl.vmaxsl.vvvvl
    ve_vl_vmaxswsx_vsvl,                       // llvm.ve.vl.vmaxswsx.vsvl
    ve_vl_vmaxswsx_vsvmvl,                     // llvm.ve.vl.vmaxswsx.vsvmvl
    ve_vl_vmaxswsx_vsvvl,                      // llvm.ve.vl.vmaxswsx.vsvvl
    ve_vl_vmaxswsx_vvvl,                       // llvm.ve.vl.vmaxswsx.vvvl
    ve_vl_vmaxswsx_vvvmvl,                     // llvm.ve.vl.vmaxswsx.vvvmvl
    ve_vl_vmaxswsx_vvvvl,                      // llvm.ve.vl.vmaxswsx.vvvvl
    ve_vl_vmaxswzx_vsvl,                       // llvm.ve.vl.vmaxswzx.vsvl
    ve_vl_vmaxswzx_vsvmvl,                     // llvm.ve.vl.vmaxswzx.vsvmvl
    ve_vl_vmaxswzx_vsvvl,                      // llvm.ve.vl.vmaxswzx.vsvvl
    ve_vl_vmaxswzx_vvvl,                       // llvm.ve.vl.vmaxswzx.vvvl
    ve_vl_vmaxswzx_vvvmvl,                     // llvm.ve.vl.vmaxswzx.vvvmvl
    ve_vl_vmaxswzx_vvvvl,                      // llvm.ve.vl.vmaxswzx.vvvvl
    ve_vl_vminsl_vsvl,                         // llvm.ve.vl.vminsl.vsvl
    ve_vl_vminsl_vsvmvl,                       // llvm.ve.vl.vminsl.vsvmvl
    ve_vl_vminsl_vsvvl,                        // llvm.ve.vl.vminsl.vsvvl
    ve_vl_vminsl_vvvl,                         // llvm.ve.vl.vminsl.vvvl
    ve_vl_vminsl_vvvmvl,                       // llvm.ve.vl.vminsl.vvvmvl
    ve_vl_vminsl_vvvvl,                        // llvm.ve.vl.vminsl.vvvvl
    ve_vl_vminswsx_vsvl,                       // llvm.ve.vl.vminswsx.vsvl
    ve_vl_vminswsx_vsvmvl,                     // llvm.ve.vl.vminswsx.vsvmvl
    ve_vl_vminswsx_vsvvl,                      // llvm.ve.vl.vminswsx.vsvvl
    ve_vl_vminswsx_vvvl,                       // llvm.ve.vl.vminswsx.vvvl
    ve_vl_vminswsx_vvvmvl,                     // llvm.ve.vl.vminswsx.vvvmvl
    ve_vl_vminswsx_vvvvl,                      // llvm.ve.vl.vminswsx.vvvvl
    ve_vl_vminswzx_vsvl,                       // llvm.ve.vl.vminswzx.vsvl
    ve_vl_vminswzx_vsvmvl,                     // llvm.ve.vl.vminswzx.vsvmvl
    ve_vl_vminswzx_vsvvl,                      // llvm.ve.vl.vminswzx.vsvvl
    ve_vl_vminswzx_vvvl,                       // llvm.ve.vl.vminswzx.vvvl
    ve_vl_vminswzx_vvvmvl,                     // llvm.ve.vl.vminswzx.vvvmvl
    ve_vl_vminswzx_vvvvl,                      // llvm.ve.vl.vminswzx.vvvvl
    ve_vl_vmrg_vsvml,                          // llvm.ve.vl.vmrg.vsvml
    ve_vl_vmrg_vsvmvl,                         // llvm.ve.vl.vmrg.vsvmvl
    ve_vl_vmrg_vvvml,                          // llvm.ve.vl.vmrg.vvvml
    ve_vl_vmrg_vvvmvl,                         // llvm.ve.vl.vmrg.vvvmvl
    ve_vl_vmrgw_vsvMl,                         // llvm.ve.vl.vmrgw.vsvMl
    ve_vl_vmrgw_vsvMvl,                        // llvm.ve.vl.vmrgw.vsvMvl
    ve_vl_vmrgw_vvvMl,                         // llvm.ve.vl.vmrgw.vvvMl
    ve_vl_vmrgw_vvvMvl,                        // llvm.ve.vl.vmrgw.vvvMvl
    ve_vl_vmulsl_vsvl,                         // llvm.ve.vl.vmulsl.vsvl
    ve_vl_vmulsl_vsvmvl,                       // llvm.ve.vl.vmulsl.vsvmvl
    ve_vl_vmulsl_vsvvl,                        // llvm.ve.vl.vmulsl.vsvvl
    ve_vl_vmulsl_vvvl,                         // llvm.ve.vl.vmulsl.vvvl
    ve_vl_vmulsl_vvvmvl,                       // llvm.ve.vl.vmulsl.vvvmvl
    ve_vl_vmulsl_vvvvl,                        // llvm.ve.vl.vmulsl.vvvvl
    ve_vl_vmulslw_vsvl,                        // llvm.ve.vl.vmulslw.vsvl
    ve_vl_vmulslw_vsvvl,                       // llvm.ve.vl.vmulslw.vsvvl
    ve_vl_vmulslw_vvvl,                        // llvm.ve.vl.vmulslw.vvvl
    ve_vl_vmulslw_vvvvl,                       // llvm.ve.vl.vmulslw.vvvvl
    ve_vl_vmulswsx_vsvl,                       // llvm.ve.vl.vmulswsx.vsvl
    ve_vl_vmulswsx_vsvmvl,                     // llvm.ve.vl.vmulswsx.vsvmvl
    ve_vl_vmulswsx_vsvvl,                      // llvm.ve.vl.vmulswsx.vsvvl
    ve_vl_vmulswsx_vvvl,                       // llvm.ve.vl.vmulswsx.vvvl
    ve_vl_vmulswsx_vvvmvl,                     // llvm.ve.vl.vmulswsx.vvvmvl
    ve_vl_vmulswsx_vvvvl,                      // llvm.ve.vl.vmulswsx.vvvvl
    ve_vl_vmulswzx_vsvl,                       // llvm.ve.vl.vmulswzx.vsvl
    ve_vl_vmulswzx_vsvmvl,                     // llvm.ve.vl.vmulswzx.vsvmvl
    ve_vl_vmulswzx_vsvvl,                      // llvm.ve.vl.vmulswzx.vsvvl
    ve_vl_vmulswzx_vvvl,                       // llvm.ve.vl.vmulswzx.vvvl
    ve_vl_vmulswzx_vvvmvl,                     // llvm.ve.vl.vmulswzx.vvvmvl
    ve_vl_vmulswzx_vvvvl,                      // llvm.ve.vl.vmulswzx.vvvvl
    ve_vl_vmulul_vsvl,                         // llvm.ve.vl.vmulul.vsvl
    ve_vl_vmulul_vsvmvl,                       // llvm.ve.vl.vmulul.vsvmvl
    ve_vl_vmulul_vsvvl,                        // llvm.ve.vl.vmulul.vsvvl
    ve_vl_vmulul_vvvl,                         // llvm.ve.vl.vmulul.vvvl
    ve_vl_vmulul_vvvmvl,                       // llvm.ve.vl.vmulul.vvvmvl
    ve_vl_vmulul_vvvvl,                        // llvm.ve.vl.vmulul.vvvvl
    ve_vl_vmuluw_vsvl,                         // llvm.ve.vl.vmuluw.vsvl
    ve_vl_vmuluw_vsvmvl,                       // llvm.ve.vl.vmuluw.vsvmvl
    ve_vl_vmuluw_vsvvl,                        // llvm.ve.vl.vmuluw.vsvvl
    ve_vl_vmuluw_vvvl,                         // llvm.ve.vl.vmuluw.vvvl
    ve_vl_vmuluw_vvvmvl,                       // llvm.ve.vl.vmuluw.vvvmvl
    ve_vl_vmuluw_vvvvl,                        // llvm.ve.vl.vmuluw.vvvvl
    ve_vl_vmv_vsvl,                            // llvm.ve.vl.vmv.vsvl
    ve_vl_vmv_vsvmvl,                          // llvm.ve.vl.vmv.vsvmvl
    ve_vl_vmv_vsvvl,                           // llvm.ve.vl.vmv.vsvvl
    ve_vl_vor_vsvl,                            // llvm.ve.vl.vor.vsvl
    ve_vl_vor_vsvmvl,                          // llvm.ve.vl.vor.vsvmvl
    ve_vl_vor_vsvvl,                           // llvm.ve.vl.vor.vsvvl
    ve_vl_vor_vvvl,                            // llvm.ve.vl.vor.vvvl
    ve_vl_vor_vvvmvl,                          // llvm.ve.vl.vor.vvvmvl
    ve_vl_vor_vvvvl,                           // llvm.ve.vl.vor.vvvvl
    ve_vl_vpcnt_vvl,                           // llvm.ve.vl.vpcnt.vvl
    ve_vl_vpcnt_vvmvl,                         // llvm.ve.vl.vpcnt.vvmvl
    ve_vl_vpcnt_vvvl,                          // llvm.ve.vl.vpcnt.vvvl
    ve_vl_vrand_vvl,                           // llvm.ve.vl.vrand.vvl
    ve_vl_vrand_vvml,                          // llvm.ve.vl.vrand.vvml
    ve_vl_vrcpd_vvl,                           // llvm.ve.vl.vrcpd.vvl
    ve_vl_vrcpd_vvvl,                          // llvm.ve.vl.vrcpd.vvvl
    ve_vl_vrcps_vvl,                           // llvm.ve.vl.vrcps.vvl
    ve_vl_vrcps_vvvl,                          // llvm.ve.vl.vrcps.vvvl
    ve_vl_vrmaxslfst_vvl,                      // llvm.ve.vl.vrmaxslfst.vvl
    ve_vl_vrmaxslfst_vvvl,                     // llvm.ve.vl.vrmaxslfst.vvvl
    ve_vl_vrmaxsllst_vvl,                      // llvm.ve.vl.vrmaxsllst.vvl
    ve_vl_vrmaxsllst_vvvl,                     // llvm.ve.vl.vrmaxsllst.vvvl
    ve_vl_vrmaxswfstsx_vvl,                    // llvm.ve.vl.vrmaxswfstsx.vvl
    ve_vl_vrmaxswfstsx_vvvl,                   // llvm.ve.vl.vrmaxswfstsx.vvvl
    ve_vl_vrmaxswfstzx_vvl,                    // llvm.ve.vl.vrmaxswfstzx.vvl
    ve_vl_vrmaxswfstzx_vvvl,                   // llvm.ve.vl.vrmaxswfstzx.vvvl
    ve_vl_vrmaxswlstsx_vvl,                    // llvm.ve.vl.vrmaxswlstsx.vvl
    ve_vl_vrmaxswlstsx_vvvl,                   // llvm.ve.vl.vrmaxswlstsx.vvvl
    ve_vl_vrmaxswlstzx_vvl,                    // llvm.ve.vl.vrmaxswlstzx.vvl
    ve_vl_vrmaxswlstzx_vvvl,                   // llvm.ve.vl.vrmaxswlstzx.vvvl
    ve_vl_vrminslfst_vvl,                      // llvm.ve.vl.vrminslfst.vvl
    ve_vl_vrminslfst_vvvl,                     // llvm.ve.vl.vrminslfst.vvvl
    ve_vl_vrminsllst_vvl,                      // llvm.ve.vl.vrminsllst.vvl
    ve_vl_vrminsllst_vvvl,                     // llvm.ve.vl.vrminsllst.vvvl
    ve_vl_vrminswfstsx_vvl,                    // llvm.ve.vl.vrminswfstsx.vvl
    ve_vl_vrminswfstsx_vvvl,                   // llvm.ve.vl.vrminswfstsx.vvvl
    ve_vl_vrminswfstzx_vvl,                    // llvm.ve.vl.vrminswfstzx.vvl
    ve_vl_vrminswfstzx_vvvl,                   // llvm.ve.vl.vrminswfstzx.vvvl
    ve_vl_vrminswlstsx_vvl,                    // llvm.ve.vl.vrminswlstsx.vvl
    ve_vl_vrminswlstsx_vvvl,                   // llvm.ve.vl.vrminswlstsx.vvvl
    ve_vl_vrminswlstzx_vvl,                    // llvm.ve.vl.vrminswlstzx.vvl
    ve_vl_vrminswlstzx_vvvl,                   // llvm.ve.vl.vrminswlstzx.vvvl
    ve_vl_vror_vvl,                            // llvm.ve.vl.vror.vvl
    ve_vl_vror_vvml,                           // llvm.ve.vl.vror.vvml
    ve_vl_vrsqrtd_vvl,                         // llvm.ve.vl.vrsqrtd.vvl
    ve_vl_vrsqrtd_vvvl,                        // llvm.ve.vl.vrsqrtd.vvvl
    ve_vl_vrsqrtdnex_vvl,                      // llvm.ve.vl.vrsqrtdnex.vvl
    ve_vl_vrsqrtdnex_vvvl,                     // llvm.ve.vl.vrsqrtdnex.vvvl
    ve_vl_vrsqrts_vvl,                         // llvm.ve.vl.vrsqrts.vvl
    ve_vl_vrsqrts_vvvl,                        // llvm.ve.vl.vrsqrts.vvvl
    ve_vl_vrsqrtsnex_vvl,                      // llvm.ve.vl.vrsqrtsnex.vvl
    ve_vl_vrsqrtsnex_vvvl,                     // llvm.ve.vl.vrsqrtsnex.vvvl
    ve_vl_vrxor_vvl,                           // llvm.ve.vl.vrxor.vvl
    ve_vl_vrxor_vvml,                          // llvm.ve.vl.vrxor.vvml
    ve_vl_vsc_vvssl,                           // llvm.ve.vl.vsc.vvssl
    ve_vl_vsc_vvssml,                          // llvm.ve.vl.vsc.vvssml
    ve_vl_vscl_vvssl,                          // llvm.ve.vl.vscl.vvssl
    ve_vl_vscl_vvssml,                         // llvm.ve.vl.vscl.vvssml
    ve_vl_vsclnc_vvssl,                        // llvm.ve.vl.vsclnc.vvssl
    ve_vl_vsclnc_vvssml,                       // llvm.ve.vl.vsclnc.vvssml
    ve_vl_vsclncot_vvssl,                      // llvm.ve.vl.vsclncot.vvssl
    ve_vl_vsclncot_vvssml,                     // llvm.ve.vl.vsclncot.vvssml
    ve_vl_vsclot_vvssl,                        // llvm.ve.vl.vsclot.vvssl
    ve_vl_vsclot_vvssml,                       // llvm.ve.vl.vsclot.vvssml
    ve_vl_vscnc_vvssl,                         // llvm.ve.vl.vscnc.vvssl
    ve_vl_vscnc_vvssml,                        // llvm.ve.vl.vscnc.vvssml
    ve_vl_vscncot_vvssl,                       // llvm.ve.vl.vscncot.vvssl
    ve_vl_vscncot_vvssml,                      // llvm.ve.vl.vscncot.vvssml
    ve_vl_vscot_vvssl,                         // llvm.ve.vl.vscot.vvssl
    ve_vl_vscot_vvssml,                        // llvm.ve.vl.vscot.vvssml
    ve_vl_vscu_vvssl,                          // llvm.ve.vl.vscu.vvssl
    ve_vl_vscu_vvssml,                         // llvm.ve.vl.vscu.vvssml
    ve_vl_vscunc_vvssl,                        // llvm.ve.vl.vscunc.vvssl
    ve_vl_vscunc_vvssml,                       // llvm.ve.vl.vscunc.vvssml
    ve_vl_vscuncot_vvssl,                      // llvm.ve.vl.vscuncot.vvssl
    ve_vl_vscuncot_vvssml,                     // llvm.ve.vl.vscuncot.vvssml
    ve_vl_vscuot_vvssl,                        // llvm.ve.vl.vscuot.vvssl
    ve_vl_vscuot_vvssml,                       // llvm.ve.vl.vscuot.vvssml
    ve_vl_vseq_vl,                             // llvm.ve.vl.vseq.vl
    ve_vl_vseq_vvl,                            // llvm.ve.vl.vseq.vvl
    ve_vl_vsfa_vvssl,                          // llvm.ve.vl.vsfa.vvssl
    ve_vl_vsfa_vvssmvl,                        // llvm.ve.vl.vsfa.vvssmvl
    ve_vl_vsfa_vvssvl,                         // llvm.ve.vl.vsfa.vvssvl
    ve_vl_vshf_vvvsl,                          // llvm.ve.vl.vshf.vvvsl
    ve_vl_vshf_vvvsvl,                         // llvm.ve.vl.vshf.vvvsvl
    ve_vl_vslal_vvsl,                          // llvm.ve.vl.vslal.vvsl
    ve_vl_vslal_vvsmvl,                        // llvm.ve.vl.vslal.vvsmvl
    ve_vl_vslal_vvsvl,                         // llvm.ve.vl.vslal.vvsvl
    ve_vl_vslal_vvvl,                          // llvm.ve.vl.vslal.vvvl
    ve_vl_vslal_vvvmvl,                        // llvm.ve.vl.vslal.vvvmvl
    ve_vl_vslal_vvvvl,                         // llvm.ve.vl.vslal.vvvvl
    ve_vl_vslawsx_vvsl,                        // llvm.ve.vl.vslawsx.vvsl
    ve_vl_vslawsx_vvsmvl,                      // llvm.ve.vl.vslawsx.vvsmvl
    ve_vl_vslawsx_vvsvl,                       // llvm.ve.vl.vslawsx.vvsvl
    ve_vl_vslawsx_vvvl,                        // llvm.ve.vl.vslawsx.vvvl
    ve_vl_vslawsx_vvvmvl,                      // llvm.ve.vl.vslawsx.vvvmvl
    ve_vl_vslawsx_vvvvl,                       // llvm.ve.vl.vslawsx.vvvvl
    ve_vl_vslawzx_vvsl,                        // llvm.ve.vl.vslawzx.vvsl
    ve_vl_vslawzx_vvsmvl,                      // llvm.ve.vl.vslawzx.vvsmvl
    ve_vl_vslawzx_vvsvl,                       // llvm.ve.vl.vslawzx.vvsvl
    ve_vl_vslawzx_vvvl,                        // llvm.ve.vl.vslawzx.vvvl
    ve_vl_vslawzx_vvvmvl,                      // llvm.ve.vl.vslawzx.vvvmvl
    ve_vl_vslawzx_vvvvl,                       // llvm.ve.vl.vslawzx.vvvvl
    ve_vl_vsll_vvsl,                           // llvm.ve.vl.vsll.vvsl
    ve_vl_vsll_vvsmvl,                         // llvm.ve.vl.vsll.vvsmvl
    ve_vl_vsll_vvsvl,                          // llvm.ve.vl.vsll.vvsvl
    ve_vl_vsll_vvvl,                           // llvm.ve.vl.vsll.vvvl
    ve_vl_vsll_vvvmvl,                         // llvm.ve.vl.vsll.vvvmvl
    ve_vl_vsll_vvvvl,                          // llvm.ve.vl.vsll.vvvvl
    ve_vl_vsral_vvsl,                          // llvm.ve.vl.vsral.vvsl
    ve_vl_vsral_vvsmvl,                        // llvm.ve.vl.vsral.vvsmvl
    ve_vl_vsral_vvsvl,                         // llvm.ve.vl.vsral.vvsvl
    ve_vl_vsral_vvvl,                          // llvm.ve.vl.vsral.vvvl
    ve_vl_vsral_vvvmvl,                        // llvm.ve.vl.vsral.vvvmvl
    ve_vl_vsral_vvvvl,                         // llvm.ve.vl.vsral.vvvvl
    ve_vl_vsrawsx_vvsl,                        // llvm.ve.vl.vsrawsx.vvsl
    ve_vl_vsrawsx_vvsmvl,                      // llvm.ve.vl.vsrawsx.vvsmvl
    ve_vl_vsrawsx_vvsvl,                       // llvm.ve.vl.vsrawsx.vvsvl
    ve_vl_vsrawsx_vvvl,                        // llvm.ve.vl.vsrawsx.vvvl
    ve_vl_vsrawsx_vvvmvl,                      // llvm.ve.vl.vsrawsx.vvvmvl
    ve_vl_vsrawsx_vvvvl,                       // llvm.ve.vl.vsrawsx.vvvvl
    ve_vl_vsrawzx_vvsl,                        // llvm.ve.vl.vsrawzx.vvsl
    ve_vl_vsrawzx_vvsmvl,                      // llvm.ve.vl.vsrawzx.vvsmvl
    ve_vl_vsrawzx_vvsvl,                       // llvm.ve.vl.vsrawzx.vvsvl
    ve_vl_vsrawzx_vvvl,                        // llvm.ve.vl.vsrawzx.vvvl
    ve_vl_vsrawzx_vvvmvl,                      // llvm.ve.vl.vsrawzx.vvvmvl
    ve_vl_vsrawzx_vvvvl,                       // llvm.ve.vl.vsrawzx.vvvvl
    ve_vl_vsrl_vvsl,                           // llvm.ve.vl.vsrl.vvsl
    ve_vl_vsrl_vvsmvl,                         // llvm.ve.vl.vsrl.vvsmvl
    ve_vl_vsrl_vvsvl,                          // llvm.ve.vl.vsrl.vvsvl
    ve_vl_vsrl_vvvl,                           // llvm.ve.vl.vsrl.vvvl
    ve_vl_vsrl_vvvmvl,                         // llvm.ve.vl.vsrl.vvvmvl
    ve_vl_vsrl_vvvvl,                          // llvm.ve.vl.vsrl.vvvvl
    ve_vl_vst_vssl,                            // llvm.ve.vl.vst.vssl
    ve_vl_vst_vssml,                           // llvm.ve.vl.vst.vssml
    ve_vl_vst2d_vssl,                          // llvm.ve.vl.vst2d.vssl
    ve_vl_vst2d_vssml,                         // llvm.ve.vl.vst2d.vssml
    ve_vl_vst2dnc_vssl,                        // llvm.ve.vl.vst2dnc.vssl
    ve_vl_vst2dnc_vssml,                       // llvm.ve.vl.vst2dnc.vssml
    ve_vl_vst2dncot_vssl,                      // llvm.ve.vl.vst2dncot.vssl
    ve_vl_vst2dncot_vssml,                     // llvm.ve.vl.vst2dncot.vssml
    ve_vl_vst2dot_vssl,                        // llvm.ve.vl.vst2dot.vssl
    ve_vl_vst2dot_vssml,                       // llvm.ve.vl.vst2dot.vssml
    ve_vl_vstl_vssl,                           // llvm.ve.vl.vstl.vssl
    ve_vl_vstl_vssml,                          // llvm.ve.vl.vstl.vssml
    ve_vl_vstl2d_vssl,                         // llvm.ve.vl.vstl2d.vssl
    ve_vl_vstl2d_vssml,                        // llvm.ve.vl.vstl2d.vssml
    ve_vl_vstl2dnc_vssl,                       // llvm.ve.vl.vstl2dnc.vssl
    ve_vl_vstl2dnc_vssml,                      // llvm.ve.vl.vstl2dnc.vssml
    ve_vl_vstl2dncot_vssl,                     // llvm.ve.vl.vstl2dncot.vssl
    ve_vl_vstl2dncot_vssml,                    // llvm.ve.vl.vstl2dncot.vssml
    ve_vl_vstl2dot_vssl,                       // llvm.ve.vl.vstl2dot.vssl
    ve_vl_vstl2dot_vssml,                      // llvm.ve.vl.vstl2dot.vssml
    ve_vl_vstlnc_vssl,                         // llvm.ve.vl.vstlnc.vssl
    ve_vl_vstlnc_vssml,                        // llvm.ve.vl.vstlnc.vssml
    ve_vl_vstlncot_vssl,                       // llvm.ve.vl.vstlncot.vssl
    ve_vl_vstlncot_vssml,                      // llvm.ve.vl.vstlncot.vssml
    ve_vl_vstlot_vssl,                         // llvm.ve.vl.vstlot.vssl
    ve_vl_vstlot_vssml,                        // llvm.ve.vl.vstlot.vssml
    ve_vl_vstnc_vssl,                          // llvm.ve.vl.vstnc.vssl
    ve_vl_vstnc_vssml,                         // llvm.ve.vl.vstnc.vssml
    ve_vl_vstncot_vssl,                        // llvm.ve.vl.vstncot.vssl
    ve_vl_vstncot_vssml,                       // llvm.ve.vl.vstncot.vssml
    ve_vl_vstot_vssl,                          // llvm.ve.vl.vstot.vssl
    ve_vl_vstot_vssml,                         // llvm.ve.vl.vstot.vssml
    ve_vl_vstu_vssl,                           // llvm.ve.vl.vstu.vssl
    ve_vl_vstu_vssml,                          // llvm.ve.vl.vstu.vssml
    ve_vl_vstu2d_vssl,                         // llvm.ve.vl.vstu2d.vssl
    ve_vl_vstu2d_vssml,                        // llvm.ve.vl.vstu2d.vssml
    ve_vl_vstu2dnc_vssl,                       // llvm.ve.vl.vstu2dnc.vssl
    ve_vl_vstu2dnc_vssml,                      // llvm.ve.vl.vstu2dnc.vssml
    ve_vl_vstu2dncot_vssl,                     // llvm.ve.vl.vstu2dncot.vssl
    ve_vl_vstu2dncot_vssml,                    // llvm.ve.vl.vstu2dncot.vssml
    ve_vl_vstu2dot_vssl,                       // llvm.ve.vl.vstu2dot.vssl
    ve_vl_vstu2dot_vssml,                      // llvm.ve.vl.vstu2dot.vssml
    ve_vl_vstunc_vssl,                         // llvm.ve.vl.vstunc.vssl
    ve_vl_vstunc_vssml,                        // llvm.ve.vl.vstunc.vssml
    ve_vl_vstuncot_vssl,                       // llvm.ve.vl.vstuncot.vssl
    ve_vl_vstuncot_vssml,                      // llvm.ve.vl.vstuncot.vssml
    ve_vl_vstuot_vssl,                         // llvm.ve.vl.vstuot.vssl
    ve_vl_vstuot_vssml,                        // llvm.ve.vl.vstuot.vssml
    ve_vl_vsubsl_vsvl,                         // llvm.ve.vl.vsubsl.vsvl
    ve_vl_vsubsl_vsvmvl,                       // llvm.ve.vl.vsubsl.vsvmvl
    ve_vl_vsubsl_vsvvl,                        // llvm.ve.vl.vsubsl.vsvvl
    ve_vl_vsubsl_vvvl,                         // llvm.ve.vl.vsubsl.vvvl
    ve_vl_vsubsl_vvvmvl,                       // llvm.ve.vl.vsubsl.vvvmvl
    ve_vl_vsubsl_vvvvl,                        // llvm.ve.vl.vsubsl.vvvvl
    ve_vl_vsubswsx_vsvl,                       // llvm.ve.vl.vsubswsx.vsvl
    ve_vl_vsubswsx_vsvmvl,                     // llvm.ve.vl.vsubswsx.vsvmvl
    ve_vl_vsubswsx_vsvvl,                      // llvm.ve.vl.vsubswsx.vsvvl
    ve_vl_vsubswsx_vvvl,                       // llvm.ve.vl.vsubswsx.vvvl
    ve_vl_vsubswsx_vvvmvl,                     // llvm.ve.vl.vsubswsx.vvvmvl
    ve_vl_vsubswsx_vvvvl,                      // llvm.ve.vl.vsubswsx.vvvvl
    ve_vl_vsubswzx_vsvl,                       // llvm.ve.vl.vsubswzx.vsvl
    ve_vl_vsubswzx_vsvmvl,                     // llvm.ve.vl.vsubswzx.vsvmvl
    ve_vl_vsubswzx_vsvvl,                      // llvm.ve.vl.vsubswzx.vsvvl
    ve_vl_vsubswzx_vvvl,                       // llvm.ve.vl.vsubswzx.vvvl
    ve_vl_vsubswzx_vvvmvl,                     // llvm.ve.vl.vsubswzx.vvvmvl
    ve_vl_vsubswzx_vvvvl,                      // llvm.ve.vl.vsubswzx.vvvvl
    ve_vl_vsubul_vsvl,                         // llvm.ve.vl.vsubul.vsvl
    ve_vl_vsubul_vsvmvl,                       // llvm.ve.vl.vsubul.vsvmvl
    ve_vl_vsubul_vsvvl,                        // llvm.ve.vl.vsubul.vsvvl
    ve_vl_vsubul_vvvl,                         // llvm.ve.vl.vsubul.vvvl
    ve_vl_vsubul_vvvmvl,                       // llvm.ve.vl.vsubul.vvvmvl
    ve_vl_vsubul_vvvvl,                        // llvm.ve.vl.vsubul.vvvvl
    ve_vl_vsubuw_vsvl,                         // llvm.ve.vl.vsubuw.vsvl
    ve_vl_vsubuw_vsvmvl,                       // llvm.ve.vl.vsubuw.vsvmvl
    ve_vl_vsubuw_vsvvl,                        // llvm.ve.vl.vsubuw.vsvvl
    ve_vl_vsubuw_vvvl,                         // llvm.ve.vl.vsubuw.vvvl
    ve_vl_vsubuw_vvvmvl,                       // llvm.ve.vl.vsubuw.vvvmvl
    ve_vl_vsubuw_vvvvl,                        // llvm.ve.vl.vsubuw.vvvvl
    ve_vl_vsuml_vvl,                           // llvm.ve.vl.vsuml.vvl
    ve_vl_vsuml_vvml,                          // llvm.ve.vl.vsuml.vvml
    ve_vl_vsumwsx_vvl,                         // llvm.ve.vl.vsumwsx.vvl
    ve_vl_vsumwsx_vvml,                        // llvm.ve.vl.vsumwsx.vvml
    ve_vl_vsumwzx_vvl,                         // llvm.ve.vl.vsumwzx.vvl
    ve_vl_vsumwzx_vvml,                        // llvm.ve.vl.vsumwzx.vvml
    ve_vl_vxor_vsvl,                           // llvm.ve.vl.vxor.vsvl
    ve_vl_vxor_vsvmvl,                         // llvm.ve.vl.vxor.vsvmvl
    ve_vl_vxor_vsvvl,                          // llvm.ve.vl.vxor.vsvvl
    ve_vl_vxor_vvvl,                           // llvm.ve.vl.vxor.vvvl
    ve_vl_vxor_vvvmvl,                         // llvm.ve.vl.vxor.vvvmvl
    ve_vl_vxor_vvvvl,                          // llvm.ve.vl.vxor.vvvvl
    ve_vl_xorm_MMM,                            // llvm.ve.vl.xorm.MMM
    ve_vl_xorm_mmm,                            // llvm.ve.vl.xorm.mmm
}; // enum
} // namespace Intrinsic
} // namespace llvm

#endif
PKjwFZĠ�%��IR/Use.hnu�[���//===- llvm/Use.h - Definition of the Use class -----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This defines the Use class.  The Use class represents the operand of an
/// instruction or some other User instance which refers to a Value.  The Use
/// class keeps the "use list" of the referenced value up to date.
///
/// Pointer tagging is used to efficiently find the User corresponding to a Use
/// without having to store a User pointer in every Use. A User is preceded in
/// memory by all the Uses corresponding to its operands, and the low bits of
/// one of the fields (Prev) of the Use class are used to encode offsets to be
/// able to find that User given a pointer to any Use. For details, see:
///
///   http://www.llvm.org/docs/ProgrammersManual.html#UserLayout
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_USE_H
#define LLVM_IR_USE_H

#include "llvm-c/Types.h"
#include "llvm/Support/CBindingWrapping.h"
#include "llvm/Support/Compiler.h"

namespace llvm {

template <typename> struct simplify_type;
class User;
class Value;

/// A Use represents the edge between a Value definition and its users.
///
/// This is notionally a two-dimensional linked list. It supports traversing
/// all of the uses for a particular value definition. It also supports jumping
/// directly to the used value when we arrive from the User's operands, and
/// jumping directly to the User when we arrive from the Value's uses.
class Use {
public:
  Use(const Use &U) = delete;

  /// Provide a fast substitute to std::swap<Use>
  /// that also works with less standard-compliant compilers
  void swap(Use &RHS);

private:
  /// Destructor - Only for zap()
  ~Use() {
    if (Val)
      removeFromList();
  }

  /// Constructor
  Use(User *Parent) : Parent(Parent) {}

public:
  friend class Value;
  friend class User;

  operator Value *() const { return Val; }
  Value *get() const { return Val; }

  /// Returns the User that contains this Use.
  ///
  /// For an instruction operand, for example, this will return the
  /// instruction.
  User *getUser() const { return Parent; };

  inline void set(Value *Val);

  inline Value *operator=(Value *RHS);
  inline const Use &operator=(const Use &RHS);

  Value *operator->() { return Val; }
  const Value *operator->() const { return Val; }

  Use *getNext() const { return Next; }

  /// Return the operand # of this use in its User.
  unsigned getOperandNo() const;

  /// Destroys Use operands when the number of operands of
  /// a User changes.
  static void zap(Use *Start, const Use *Stop, bool del = false);

private:

  Value *Val = nullptr;
  Use *Next = nullptr;
  Use **Prev = nullptr;
  User *Parent = nullptr;

  void addToList(Use **List) {
    Next = *List;
    if (Next)
      Next->Prev = &Next;
    Prev = List;
    *Prev = this;
  }

  void removeFromList() {
    *Prev = Next;
    if (Next)
      Next->Prev = Prev;
  }
};

/// Allow clients to treat uses just like values when using
/// casting operators.
template <> struct simplify_type<Use> {
  using SimpleType = Value *;

  static SimpleType getSimplifiedValue(Use &Val) { return Val.get(); }
};
template <> struct simplify_type<const Use> {
  using SimpleType = /*const*/ Value *;

  static SimpleType getSimplifiedValue(const Use &Val) { return Val.get(); }
};

// Create wrappers for C Binding types (see CBindingWrapping.h).
DEFINE_SIMPLE_CONVERSION_FUNCTIONS(Use, LLVMUseRef)

} // end namespace llvm

#endif // LLVM_IR_USE_H
PKjwFZXg@��IR/LegacyPassManager.hnu�[���//===- LegacyPassManager.h - Legacy Container for Passes --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the legacy PassManager class.  This class is used to hold,
// maintain, and optimize execution of Passes.  The PassManager class ensures
// that analysis results are available before a pass runs, and that Pass's are
// destroyed when the PassManager is destroyed.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_LEGACYPASSMANAGER_H
#define LLVM_IR_LEGACYPASSMANAGER_H

#include "llvm/Support/CBindingWrapping.h"

namespace llvm {

class Function;
class Pass;
class Module;

namespace legacy {

// Whether or not -debug-pass has been specified. For use to check if it's
// specified alongside the new PM.
bool debugPassSpecified();

class PassManagerImpl;
class FunctionPassManagerImpl;

/// PassManagerBase - An abstract interface to allow code to add passes to
/// a pass manager without having to hard-code what kind of pass manager
/// it is.
class PassManagerBase {
public:
  virtual ~PassManagerBase();

  /// Add a pass to the queue of passes to run.  This passes ownership of
  /// the Pass to the PassManager.  When the PassManager is destroyed, the pass
  /// will be destroyed as well, so there is no need to delete the pass.  This
  /// may even destroy the pass right away if it is found to be redundant. This
  /// implies that all passes MUST be allocated with 'new'.
  virtual void add(Pass *P) = 0;
};

/// PassManager manages ModulePassManagers
class PassManager : public PassManagerBase {
public:

  PassManager();
  ~PassManager() override;

  void add(Pass *P) override;

  /// run - Execute all of the passes scheduled for execution.  Keep track of
  /// whether any of the passes modifies the module, and if so, return true.
  bool run(Module &M);

private:
  /// PassManagerImpl_New is the actual class. PassManager is just the
  /// wraper to publish simple pass manager interface
  PassManagerImpl *PM;
};

/// FunctionPassManager manages FunctionPasses.
class FunctionPassManager : public PassManagerBase {
public:
  /// FunctionPassManager ctor - This initializes the pass manager.  It needs,
  /// but does not take ownership of, the specified Module.
  explicit FunctionPassManager(Module *M);
  ~FunctionPassManager() override;

  void add(Pass *P) override;

  /// run - Execute all of the passes scheduled for execution.  Keep
  /// track of whether any of the passes modifies the function, and if
  /// so, return true.
  ///
  bool run(Function &F);

  /// doInitialization - Run all of the initializers for the function passes.
  ///
  bool doInitialization();

  /// doFinalization - Run all of the finalizers for the function passes.
  ///
  bool doFinalization();

private:
  FunctionPassManagerImpl *FPM;
  Module *M;
};

} // End legacy namespace

// Create wrappers for C Binding types (see CBindingWrapping.h).
DEFINE_STDCXX_CONVERSION_FUNCTIONS(legacy::PassManagerBase, LLVMPassManagerRef)

} // End llvm namespace

#endif
PKjwFZ$cB�t,t,IR/ModuleSummaryIndexYAML.hnu�[���//===-- llvm/ModuleSummaryIndexYAML.h - YAML I/O for summary ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_MODULESUMMARYINDEXYAML_H
#define LLVM_IR_MODULESUMMARYINDEXYAML_H

#include "llvm/IR/ModuleSummaryIndex.h"
#include "llvm/Support/YAMLTraits.h"

namespace llvm {
namespace yaml {

template <> struct ScalarEnumerationTraits<TypeTestResolution::Kind> {
  static void enumeration(IO &io, TypeTestResolution::Kind &value) {
    io.enumCase(value, "Unknown", TypeTestResolution::Unknown);
    io.enumCase(value, "Unsat", TypeTestResolution::Unsat);
    io.enumCase(value, "ByteArray", TypeTestResolution::ByteArray);
    io.enumCase(value, "Inline", TypeTestResolution::Inline);
    io.enumCase(value, "Single", TypeTestResolution::Single);
    io.enumCase(value, "AllOnes", TypeTestResolution::AllOnes);
  }
};

template <> struct MappingTraits<TypeTestResolution> {
  static void mapping(IO &io, TypeTestResolution &res) {
    io.mapOptional("Kind", res.TheKind);
    io.mapOptional("SizeM1BitWidth", res.SizeM1BitWidth);
    io.mapOptional("AlignLog2", res.AlignLog2);
    io.mapOptional("SizeM1", res.SizeM1);
    io.mapOptional("BitMask", res.BitMask);
    io.mapOptional("InlineBits", res.InlineBits);
  }
};

template <>
struct ScalarEnumerationTraits<WholeProgramDevirtResolution::ByArg::Kind> {
  static void enumeration(IO &io,
                          WholeProgramDevirtResolution::ByArg::Kind &value) {
    io.enumCase(value, "Indir", WholeProgramDevirtResolution::ByArg::Indir);
    io.enumCase(value, "UniformRetVal",
                WholeProgramDevirtResolution::ByArg::UniformRetVal);
    io.enumCase(value, "UniqueRetVal",
                WholeProgramDevirtResolution::ByArg::UniqueRetVal);
    io.enumCase(value, "VirtualConstProp",
                WholeProgramDevirtResolution::ByArg::VirtualConstProp);
  }
};

template <> struct MappingTraits<WholeProgramDevirtResolution::ByArg> {
  static void mapping(IO &io, WholeProgramDevirtResolution::ByArg &res) {
    io.mapOptional("Kind", res.TheKind);
    io.mapOptional("Info", res.Info);
    io.mapOptional("Byte", res.Byte);
    io.mapOptional("Bit", res.Bit);
  }
};

template <>
struct CustomMappingTraits<
    std::map<std::vector<uint64_t>, WholeProgramDevirtResolution::ByArg>> {
  static void inputOne(
      IO &io, StringRef Key,
      std::map<std::vector<uint64_t>, WholeProgramDevirtResolution::ByArg> &V) {
    std::vector<uint64_t> Args;
    std::pair<StringRef, StringRef> P = {"", Key};
    while (!P.second.empty()) {
      P = P.second.split(',');
      uint64_t Arg;
      if (P.first.getAsInteger(0, Arg)) {
        io.setError("key not an integer");
        return;
      }
      Args.push_back(Arg);
    }
    io.mapRequired(Key.str().c_str(), V[Args]);
  }
  static void output(
      IO &io,
      std::map<std::vector<uint64_t>, WholeProgramDevirtResolution::ByArg> &V) {
    for (auto &P : V) {
      std::string Key;
      for (uint64_t Arg : P.first) {
        if (!Key.empty())
          Key += ',';
        Key += llvm::utostr(Arg);
      }
      io.mapRequired(Key.c_str(), P.second);
    }
  }
};

template <> struct ScalarEnumerationTraits<WholeProgramDevirtResolution::Kind> {
  static void enumeration(IO &io, WholeProgramDevirtResolution::Kind &value) {
    io.enumCase(value, "Indir", WholeProgramDevirtResolution::Indir);
    io.enumCase(value, "SingleImpl", WholeProgramDevirtResolution::SingleImpl);
    io.enumCase(value, "BranchFunnel",
                WholeProgramDevirtResolution::BranchFunnel);
  }
};

template <> struct MappingTraits<WholeProgramDevirtResolution> {
  static void mapping(IO &io, WholeProgramDevirtResolution &res) {
    io.mapOptional("Kind", res.TheKind);
    io.mapOptional("SingleImplName", res.SingleImplName);
    io.mapOptional("ResByArg", res.ResByArg);
  }
};

template <>
struct CustomMappingTraits<std::map<uint64_t, WholeProgramDevirtResolution>> {
  static void inputOne(IO &io, StringRef Key,
                       std::map<uint64_t, WholeProgramDevirtResolution> &V) {
    uint64_t KeyInt;
    if (Key.getAsInteger(0, KeyInt)) {
      io.setError("key not an integer");
      return;
    }
    io.mapRequired(Key.str().c_str(), V[KeyInt]);
  }
  static void output(IO &io, std::map<uint64_t, WholeProgramDevirtResolution> &V) {
    for (auto &P : V)
      io.mapRequired(llvm::utostr(P.first).c_str(), P.second);
  }
};

template <> struct MappingTraits<TypeIdSummary> {
  static void mapping(IO &io, TypeIdSummary& summary) {
    io.mapOptional("TTRes", summary.TTRes);
    io.mapOptional("WPDRes", summary.WPDRes);
  }
};

struct FunctionSummaryYaml {
  unsigned Linkage, Visibility;
  bool NotEligibleToImport, Live, IsLocal, CanAutoHide;
  std::vector<uint64_t> Refs;
  std::vector<uint64_t> TypeTests;
  std::vector<FunctionSummary::VFuncId> TypeTestAssumeVCalls,
      TypeCheckedLoadVCalls;
  std::vector<FunctionSummary::ConstVCall> TypeTestAssumeConstVCalls,
      TypeCheckedLoadConstVCalls;
};

} // End yaml namespace
} // End llvm namespace

namespace llvm {
namespace yaml {

template <> struct MappingTraits<FunctionSummary::VFuncId> {
  static void mapping(IO &io, FunctionSummary::VFuncId& id) {
    io.mapOptional("GUID", id.GUID);
    io.mapOptional("Offset", id.Offset);
  }
};

template <> struct MappingTraits<FunctionSummary::ConstVCall> {
  static void mapping(IO &io, FunctionSummary::ConstVCall& id) {
    io.mapOptional("VFunc", id.VFunc);
    io.mapOptional("Args", id.Args);
  }
};

} // End yaml namespace
} // End llvm namespace

LLVM_YAML_IS_SEQUENCE_VECTOR(FunctionSummary::VFuncId)
LLVM_YAML_IS_SEQUENCE_VECTOR(FunctionSummary::ConstVCall)

namespace llvm {
namespace yaml {

template <> struct MappingTraits<FunctionSummaryYaml> {
  static void mapping(IO &io, FunctionSummaryYaml& summary) {
    io.mapOptional("Linkage", summary.Linkage);
    io.mapOptional("Visibility", summary.Visibility);
    io.mapOptional("NotEligibleToImport", summary.NotEligibleToImport);
    io.mapOptional("Live", summary.Live);
    io.mapOptional("Local", summary.IsLocal);
    io.mapOptional("CanAutoHide", summary.CanAutoHide);
    io.mapOptional("Refs", summary.Refs);
    io.mapOptional("TypeTests", summary.TypeTests);
    io.mapOptional("TypeTestAssumeVCalls", summary.TypeTestAssumeVCalls);
    io.mapOptional("TypeCheckedLoadVCalls", summary.TypeCheckedLoadVCalls);
    io.mapOptional("TypeTestAssumeConstVCalls",
                   summary.TypeTestAssumeConstVCalls);
    io.mapOptional("TypeCheckedLoadConstVCalls",
                   summary.TypeCheckedLoadConstVCalls);
  }
};

} // End yaml namespace
} // End llvm namespace

LLVM_YAML_IS_SEQUENCE_VECTOR(FunctionSummaryYaml)

namespace llvm {
namespace yaml {

// FIXME: Add YAML mappings for the rest of the module summary.
template <> struct CustomMappingTraits<GlobalValueSummaryMapTy> {
  static void inputOne(IO &io, StringRef Key, GlobalValueSummaryMapTy &V) {
    std::vector<FunctionSummaryYaml> FSums;
    io.mapRequired(Key.str().c_str(), FSums);
    uint64_t KeyInt;
    if (Key.getAsInteger(0, KeyInt)) {
      io.setError("key not an integer");
      return;
    }
    if (!V.count(KeyInt))
      V.emplace(KeyInt, /*IsAnalysis=*/false);
    auto &Elem = V.find(KeyInt)->second;
    for (auto &FSum : FSums) {
      std::vector<ValueInfo> Refs;
      for (auto &RefGUID : FSum.Refs) {
        if (!V.count(RefGUID))
          V.emplace(RefGUID, /*IsAnalysis=*/false);
        Refs.push_back(ValueInfo(/*IsAnalysis=*/false, &*V.find(RefGUID)));
      }
      Elem.SummaryList.push_back(std::make_unique<FunctionSummary>(
          GlobalValueSummary::GVFlags(
              static_cast<GlobalValue::LinkageTypes>(FSum.Linkage),
              static_cast<GlobalValue::VisibilityTypes>(FSum.Visibility),
              FSum.NotEligibleToImport, FSum.Live, FSum.IsLocal,
              FSum.CanAutoHide),
          /*NumInsts=*/0, FunctionSummary::FFlags{}, /*EntryCount=*/0, Refs,
          ArrayRef<FunctionSummary::EdgeTy>{}, std::move(FSum.TypeTests),
          std::move(FSum.TypeTestAssumeVCalls),
          std::move(FSum.TypeCheckedLoadVCalls),
          std::move(FSum.TypeTestAssumeConstVCalls),
          std::move(FSum.TypeCheckedLoadConstVCalls),
          ArrayRef<FunctionSummary::ParamAccess>{}, ArrayRef<CallsiteInfo>{},
          ArrayRef<AllocInfo>{}));
    }
  }
  static void output(IO &io, GlobalValueSummaryMapTy &V) {
    for (auto &P : V) {
      std::vector<FunctionSummaryYaml> FSums;
      for (auto &Sum : P.second.SummaryList) {
        if (auto *FSum = dyn_cast<FunctionSummary>(Sum.get())) {
          std::vector<uint64_t> Refs;
          for (auto &VI : FSum->refs())
            Refs.push_back(VI.getGUID());
          FSums.push_back(FunctionSummaryYaml{
              FSum->flags().Linkage, FSum->flags().Visibility,
              static_cast<bool>(FSum->flags().NotEligibleToImport),
              static_cast<bool>(FSum->flags().Live),
              static_cast<bool>(FSum->flags().DSOLocal),
              static_cast<bool>(FSum->flags().CanAutoHide), Refs,
              FSum->type_tests(), FSum->type_test_assume_vcalls(),
              FSum->type_checked_load_vcalls(),
              FSum->type_test_assume_const_vcalls(),
              FSum->type_checked_load_const_vcalls()});
          }
      }
      if (!FSums.empty())
        io.mapRequired(llvm::utostr(P.first).c_str(), FSums);
    }
  }
};

template <> struct CustomMappingTraits<TypeIdSummaryMapTy> {
  static void inputOne(IO &io, StringRef Key, TypeIdSummaryMapTy &V) {
    TypeIdSummary TId;
    io.mapRequired(Key.str().c_str(), TId);
    V.insert({GlobalValue::getGUID(Key), {std::string(Key), TId}});
  }
  static void output(IO &io, TypeIdSummaryMapTy &V) {
    for (auto &TidIter : V)
      io.mapRequired(TidIter.second.first.c_str(), TidIter.second.second);
  }
};

template <> struct MappingTraits<ModuleSummaryIndex> {
  static void mapping(IO &io, ModuleSummaryIndex& index) {
    io.mapOptional("GlobalValueMap", index.GlobalValueMap);
    io.mapOptional("TypeIdMap", index.TypeIdMap);
    io.mapOptional("WithGlobalValueDeadStripping",
                   index.WithGlobalValueDeadStripping);

    if (io.outputting()) {
      std::vector<std::string> CfiFunctionDefs(index.CfiFunctionDefs.begin(),
                                               index.CfiFunctionDefs.end());
      io.mapOptional("CfiFunctionDefs", CfiFunctionDefs);
      std::vector<std::string> CfiFunctionDecls(index.CfiFunctionDecls.begin(),
                                                index.CfiFunctionDecls.end());
      io.mapOptional("CfiFunctionDecls", CfiFunctionDecls);
    } else {
      std::vector<std::string> CfiFunctionDefs;
      io.mapOptional("CfiFunctionDefs", CfiFunctionDefs);
      index.CfiFunctionDefs = {CfiFunctionDefs.begin(), CfiFunctionDefs.end()};
      std::vector<std::string> CfiFunctionDecls;
      io.mapOptional("CfiFunctionDecls", CfiFunctionDecls);
      index.CfiFunctionDecls = {CfiFunctionDecls.begin(),
                                CfiFunctionDecls.end()};
    }
  }
};

} // End yaml namespace
} // End llvm namespace

#endif
PKjwFZv�F�F�IR/IntrinsicsNVVM.tdnu�[���//===- IntrinsicsNVVM.td - Defines NVVM intrinsics ---------*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines all of the NVVM-specific intrinsics for use with NVPTX.
//
//===----------------------------------------------------------------------===//

// The following intrinsics were once defined here, but are now auto-upgraded
// to target-generic LLVM intrinsics.
//
//   * llvm.nvvm.brev32  --> llvm.bitreverse.i32
//   * llvm.nvvm.brev64  --> llvm.bitreverse.i64
//   * llvm.nvvm.clz.i   --> llvm.ctlz.i32
//   * llvm.nvvm.clz.ll  --> trunc i64 llvm.ctlz.i64(x) to i32
//   * llvm.nvvm.popc.i  --> llvm.ctpop.i32
//   * llvm.nvvm.popc.ll --> trunc i64 llvm.ctpop.i64 to i32
//   * llvm.nvvm.abs.i   --> select(x >= -x, x, -x)
//   * llvm.nvvm.abs.ll  --> ibid.
//   * llvm.nvvm.max.i   --> select(x sge y, x, y)
//   * llvm.nvvm.max.ll  --> ibid.
//   * llvm.nvvm.max.ui  --> select(x uge y, x, y)
//   * llvm.nvvm.max.ull --> ibid.
//   * llvm.nvvm.max.i   --> select(x sle y, x, y)
//   * llvm.nvvm.max.ll  --> ibid.
//   * llvm.nvvm.max.ui  --> select(x ule y, x, y)
//   * llvm.nvvm.max.ull --> ibid.
//   * llvm.nvvm.h2f     --> llvm.convert.to.fp16.f32

def llvm_global_ptr_ty  : LLVMQualPointerType<1>;  // (global)ptr
def llvm_shared_ptr_ty  : LLVMQualPointerType<3>;  // (shared)ptr

//
// MISC
//

// Helper class that represents a 'fragment' of an NVPTX *MMA instruction.
// Geom: m<M>n<N>k<K>. E.g. m8n32k16
// Frag: [a|b|c|d] ([x1|x2|x4] for ldmatrix)
// PtxEltType: PTX type for the element.
class WMMA_REGS<string Geom, string Frag, string PtxEltType> {
  string geom = Geom;
  string frag = Frag;
  string ptx_elt_type = PtxEltType;
  string gft = Geom#":"#Frag#":"#ptx_elt_type;
  string ft = frag#":"#ptx_elt_type;
  list<LLVMType> regs = !cond(
    // mma fp ops use smaller fragments than wmma fp ops
    !eq(gft,"m8n8k4:a:f16") : !listsplat(llvm_v2f16_ty, 2),
    !eq(gft,"m8n8k4:b:f16") : !listsplat(llvm_v2f16_ty, 2),
    !eq(gft,"m16n8k8:a:f16") : !listsplat(llvm_v2f16_ty, 2),
    !eq(gft,"m16n8k8:b:f16") : [llvm_v2f16_ty],
    !eq(gft,"m16n8k8:c:f16") : !listsplat(llvm_v2f16_ty, 2),
    !eq(gft,"m16n8k8:d:f16") : !listsplat(llvm_v2f16_ty, 2),
    !eq(gft,"m16n8k8:c:f32") : !listsplat(llvm_float_ty, 4),
    !eq(gft,"m16n8k8:d:f32") : !listsplat(llvm_float_ty, 4),
    !eq(gft,"m16n8k16:a:f16") : !listsplat(llvm_v2f16_ty, 4),
    !eq(gft,"m16n8k16:b:f16") : !listsplat(llvm_v2f16_ty, 2),
    !eq(gft,"m16n8k16:c:f16") : !listsplat(llvm_v2f16_ty, 2),
    !eq(gft,"m16n8k16:d:f16") : !listsplat(llvm_v2f16_ty, 2),
    !eq(gft,"m16n8k16:c:f32") : !listsplat(llvm_float_ty, 4),
    !eq(gft,"m16n8k16:d:f32") : !listsplat(llvm_float_ty, 4),
    !eq(gft,"m16n8k4:c:f32") : !listsplat(llvm_float_ty, 4),
    !eq(gft,"m16n8k4:d:f32") : !listsplat(llvm_float_ty, 4),

    // wmma fp16 -> fp16/fp32 @  m16n16k16/m8n32k16/m32n8k16
    // All other supported geometries use the same fragment format for f32 and
    // f16, so we only need to consider {fragment, type}.
    !eq(ft,"a:f16") : !listsplat(llvm_v2f16_ty, 8),
    !eq(ft,"b:f16") : !listsplat(llvm_v2f16_ty, 8),
    !eq(ft,"c:f16") : !listsplat(llvm_v2f16_ty, 4),
    !eq(ft,"d:f16") : !listsplat(llvm_v2f16_ty, 4),
    !eq(ft,"c:f32") : !listsplat(llvm_float_ty, 8),
    !eq(ft,"d:f32") : !listsplat(llvm_float_ty, 8),

    // wmma tf32 -> s32 @ m16n16k8
    !eq(gft,"m16n16k8:a:tf32") : !listsplat(llvm_i32_ty, 4),
    !eq(gft,"m16n16k8:b:tf32") : !listsplat(llvm_i32_ty, 4),

    // mma tf32 -> s32 @ m16n16k8/m16n8k8
    !eq(gft,"m16n8k4:a:tf32") : !listsplat(llvm_i32_ty, 2),
    !eq(gft,"m16n8k4:b:tf32") : [llvm_i32_ty],
    !eq(gft,"m16n8k8:a:tf32") : !listsplat(llvm_i32_ty, 4),
    !eq(gft,"m16n8k8:b:tf32") : !listsplat(llvm_i32_ty, 2),

    !eq(gft,"m8n8k4:a:f64") : [llvm_double_ty],
    !eq(gft,"m8n8k4:b:f64") : [llvm_double_ty],
    !eq(gft,"m8n8k4:c:f64") : !listsplat(llvm_double_ty, 2),
    !eq(gft,"m8n8k4:d:f64") : !listsplat(llvm_double_ty, 2),

    // wmma bf16 -> s32 @ m16n16k16/m8n32k16/m32n8k16
    !eq(gft,"m16n16k16:a:bf16") : !listsplat(llvm_i32_ty, 4),
    !eq(gft,"m16n16k16:b:bf16") : !listsplat(llvm_i32_ty, 4),
    !eq(gft,"m8n32k16:a:bf16") : !listsplat(llvm_i32_ty, 2),
    !eq(gft,"m8n32k16:b:bf16") : !listsplat(llvm_i32_ty, 8),
    !eq(gft,"m32n8k16:a:bf16") : !listsplat(llvm_i32_ty, 8),
    !eq(gft,"m32n8k16:b:bf16") : !listsplat(llvm_i32_ty, 2),

    // mma bf16 -> s32 @ m16n8k16/m16n8k8
    !eq(gft,"m16n8k16:a:bf16") : !listsplat(llvm_i32_ty, 4),
    !eq(gft,"m16n8k16:b:bf16") : !listsplat(llvm_i32_ty, 2),
    !eq(gft,"m16n8k8:a:bf16") : !listsplat(llvm_i32_ty, 2),
    !eq(gft,"m16n8k8:b:bf16") : [llvm_i32_ty],

    // wmma u8/s8 -> s32 @ m16n16k16/m8n32k16/m32n8k16
    !eq(gft,"m16n16k16:a:u8") : !listsplat(llvm_i32_ty, 2),
    !eq(gft,"m16n16k16:a:s8") : !listsplat(llvm_i32_ty, 2),
    !eq(gft,"m16n16k16:b:u8") : !listsplat(llvm_i32_ty, 2),
    !eq(gft,"m16n16k16:b:s8") : !listsplat(llvm_i32_ty, 2),
    !eq(gft,"m16n16k16:c:s32") : !listsplat(llvm_i32_ty, 8),
    !eq(gft,"m16n16k16:d:s32") : !listsplat(llvm_i32_ty, 8),

    !eq(gft,"m8n32k16:a:u8") : [llvm_i32_ty],
    !eq(gft,"m8n32k16:a:s8") : [llvm_i32_ty],
    !eq(gft,"m8n32k16:b:u8") : !listsplat(llvm_i32_ty, 4),
    !eq(gft,"m8n32k16:b:s8") : !listsplat(llvm_i32_ty, 4),
    !eq(gft,"m8n32k16:c:s32") : !listsplat(llvm_i32_ty, 8),
    !eq(gft,"m8n32k16:d:s32") : !listsplat(llvm_i32_ty, 8),

    !eq(gft,"m32n8k16:a:u8") : !listsplat(llvm_i32_ty, 4),
    !eq(gft,"m32n8k16:a:s8") : !listsplat(llvm_i32_ty, 4),
    !eq(gft,"m32n8k16:b:u8") : [llvm_i32_ty],
    !eq(gft,"m32n8k16:b:s8") : [llvm_i32_ty],
    !eq(gft,"m32n8k16:c:s32") : !listsplat(llvm_i32_ty, 8),
    !eq(gft,"m32n8k16:d:s32") : !listsplat(llvm_i32_ty, 8),

    // mma u8/s8 -> s32 @ m8n8k16/m16n8k16/m16n8k32
    !eq(gft,"m8n8k16:a:u8") : [llvm_i32_ty],
    !eq(gft,"m8n8k16:a:s8") : [llvm_i32_ty],
    !eq(gft,"m8n8k16:b:u8") : [llvm_i32_ty],
    !eq(gft,"m8n8k16:b:s8") : [llvm_i32_ty],
    !eq(gft,"m8n8k16:c:s32") : !listsplat(llvm_i32_ty, 2),
    !eq(gft,"m8n8k16:d:s32") : !listsplat(llvm_i32_ty, 2),

    !eq(gft,"m16n8k16:a:u8") : !listsplat(llvm_i32_ty, 2),
    !eq(gft,"m16n8k16:a:s8") : !listsplat(llvm_i32_ty, 2),
    !eq(gft,"m16n8k16:b:u8") : [llvm_i32_ty],
    !eq(gft,"m16n8k16:b:s8") : [llvm_i32_ty],
    !eq(gft,"m16n8k16:c:s32") : !listsplat(llvm_i32_ty, 4),
    !eq(gft,"m16n8k16:d:s32") : !listsplat(llvm_i32_ty, 4),

    !eq(gft,"m16n8k32:a:u8") : !listsplat(llvm_i32_ty, 4),
    !eq(gft,"m16n8k32:a:s8") : !listsplat(llvm_i32_ty, 4),
    !eq(gft,"m16n8k32:b:u8") : !listsplat(llvm_i32_ty, 2),
    !eq(gft,"m16n8k32:b:s8") : !listsplat(llvm_i32_ty, 2),
    !eq(gft,"m16n8k32:c:s32") : !listsplat(llvm_i32_ty, 4),
    !eq(gft,"m16n8k32:d:s32") : !listsplat(llvm_i32_ty, 4),

    // wmma/mma u4/s4 -> s32 @ m8n8k32 (u4/s4)
    !eq(gft,"m8n8k32:a:u4") : [llvm_i32_ty],
    !eq(gft,"m8n8k32:a:s4") : [llvm_i32_ty],
    !eq(gft,"m8n8k32:b:u4") : [llvm_i32_ty],
    !eq(gft,"m8n8k32:b:s4") : [llvm_i32_ty],
    !eq(gft,"m8n8k32:c:s32") : !listsplat(llvm_i32_ty, 2),
    !eq(gft,"m8n8k32:d:s32") : !listsplat(llvm_i32_ty, 2),

    !eq(gft,"m16n8k32:a:u4") : !listsplat(llvm_i32_ty, 2),
    !eq(gft,"m16n8k32:a:s4") : !listsplat(llvm_i32_ty, 2),
    !eq(gft,"m16n8k32:b:u4") : [llvm_i32_ty],
    !eq(gft,"m16n8k32:b:s4") : [llvm_i32_ty],
    !eq(gft,"m16n8k32:c:s32") : !listsplat(llvm_i32_ty, 4),
    !eq(gft,"m16n8k32:d:s32") : !listsplat(llvm_i32_ty, 4),

    !eq(gft,"m16n8k64:a:u4") : !listsplat(llvm_i32_ty, 4),
    !eq(gft,"m16n8k64:a:s4") : !listsplat(llvm_i32_ty, 4),
    !eq(gft,"m16n8k64:b:u4") : !listsplat(llvm_i32_ty, 2),
    !eq(gft,"m16n8k64:b:s4") : !listsplat(llvm_i32_ty, 2),
    !eq(gft,"m16n8k64:c:s32") : !listsplat(llvm_i32_ty, 4),
    !eq(gft,"m16n8k64:d:s32") : !listsplat(llvm_i32_ty, 4),

    // wmma/mma b1 -> s32 @ m8n8k128(b1)
    !eq(gft,"m8n8k128:a:b1") : [llvm_i32_ty],
    !eq(gft,"m8n8k128:b:b1") : [llvm_i32_ty],
    !eq(gft,"m8n8k128:c:s32") : !listsplat(llvm_i32_ty, 2),
    !eq(gft,"m8n8k128:d:s32") : !listsplat(llvm_i32_ty, 2),

    !eq(gft,"m16n8k128:a:b1") : !listsplat(llvm_i32_ty, 2),
    !eq(gft,"m16n8k128:b:b1") : [llvm_i32_ty],
    !eq(gft,"m16n8k128:c:s32") : !listsplat(llvm_i32_ty, 4),
    !eq(gft,"m16n8k128:d:s32") : !listsplat(llvm_i32_ty, 4),

    !eq(gft,"m16n8k256:a:b1") : !listsplat(llvm_i32_ty, 4),
    !eq(gft,"m16n8k256:b:b1") : !listsplat(llvm_i32_ty, 2),
    !eq(gft,"m16n8k256:c:s32") : !listsplat(llvm_i32_ty, 4),
    !eq(gft,"m16n8k256:d:s32") : !listsplat(llvm_i32_ty, 4),

    // ldmatrix b16 -> s32 @ m8n8
    !eq(gft,"m8n8:x1:b16") : !listsplat(llvm_i32_ty, 1),
    !eq(gft,"m8n8:x2:b16") : !listsplat(llvm_i32_ty, 2),
    !eq(gft,"m8n8:x4:b16") : !listsplat(llvm_i32_ty, 4),
  );
}

class WMMA_NAME_LDST<string Op, WMMA_REGS Frag, string Layout, int WithStride> {
  string intr = "llvm.nvvm.wmma."
                # Frag.geom
                # "." # Op
                # "." # Frag.frag
                # "." # Layout
                # !if(WithStride, ".stride", "")
                # "." # Frag.ptx_elt_type
                ;
  // TODO(tra): record name should ideally use the same field order as the intrinsic.
  // E.g. string record = !subst("llvm", "int",
  //                      !subst(".", "_", llvm));
  string record = "int_nvvm_wmma_"
                # Frag.geom
                # "_" # Op
                # "_" # Frag.frag
                # "_" # Frag.ptx_elt_type
                # "_" # Layout
                # !if(WithStride, "_stride", "");
}

class MMA_SIGNATURE<WMMA_REGS A, WMMA_REGS B, WMMA_REGS C, WMMA_REGS D> {
  list<WMMA_REGS> id_frags = !cond(
     // FP16 ops are identified by accumulator & result type.
     !eq(A.ptx_elt_type, "f16") : [D, C],
     // other ops are identified by input types.
     !ne(A.ptx_elt_type, B.ptx_elt_type): [A, B],
     true: [A]
     );
   string ret = !foldl("", id_frags, a, b, !strconcat(a, ".", b.ptx_elt_type));
}

class WMMA_NAME<string ALayout, string BLayout, int Satfinite, string Rnd, string b1op,
                WMMA_REGS A, WMMA_REGS B, WMMA_REGS C, WMMA_REGS D> {
  string signature = MMA_SIGNATURE<A, B, C, D>.ret;
  string llvm = "llvm.nvvm.wmma."
                # A.geom
                # ".mma"
                # b1op
                # "." # ALayout
                # "." # BLayout
                # !if(!ne(Rnd, ""), !strconcat(".", Rnd), "")
                # signature
                # !if(Satfinite, ".satfinite", "");

  string record = !subst(".", "_",
                  !subst("llvm.", "int_", llvm));
}

class MMA_NAME<string ALayout, string BLayout, int Satfinite, string b1op,
               WMMA_REGS A, WMMA_REGS B, WMMA_REGS C, WMMA_REGS D> {
  string signature = MMA_SIGNATURE<A, B, C, D>.ret;
  string llvm = "llvm.nvvm.mma"
                # b1op
                # "." # A.geom
                # "." # ALayout
                # "." # BLayout
                # !if(Satfinite, ".satfinite", "")
                # signature;
  string record = !subst(".", "_",
                  !subst("llvm.", "int_", llvm));
}

class LDMATRIX_NAME<WMMA_REGS Frag, int Trans> {
  string intr = "llvm.nvvm.ldmatrix.sync.aligned"
                # "." # Frag.geom
                # "." # Frag.frag
                # !if(Trans, ".trans", "")
                # "." # Frag.ptx_elt_type
                ;
  string record = !subst(".", "_",
                  !subst("llvm.", "int_", intr));
}

// Generates list of 4-tuples of WMMA_REGS representing a valid MMA op.
//   Geom: list of supported geometries.
//   TypeN: PTX type of the corresponding fragment's element.
//   TypeB and TypeD may be empty if it must match that of TypeA or TypeC.
class MMA_OPS<list<string> Geom, list<string> TypeA, list<string> TypeB,
            list<string> TypeC, list<string> TypeD> {
  list<list<WMMA_REGS>> ret =
     !foldl([]<list<WMMA_REGS>>, Geom, t1, geom, !listconcat(t1,
     !foldl([]<list<WMMA_REGS>>, TypeA, t2, type_a, !listconcat(t2,
     !foldl([]<list<WMMA_REGS>>, !if(!size(TypeB), TypeB, [type_a]), t3, type_b, !listconcat(t3,
     !foldl([]<list<WMMA_REGS>>, TypeC, t4, type_c, !listconcat(t4,
     !foldl([]<list<WMMA_REGS>>, !if(!size(TypeD), TypeD, [type_c]), t5, type_d, !listconcat(t5,
            [[WMMA_REGS<geom, "a", type_a>,
              WMMA_REGS<geom, "b", type_b>,
              WMMA_REGS<geom, "c", type_c>,
              WMMA_REGS<geom, "d", type_d>]]))))))))));
   // Debugging aid for readable representation of the list above.
   list<list<string>> ops = !foreach(x, ret, [x[0].gft, x[1].gft, x[2].gft, x[3].gft]);
}

class MMA_LDST_OPS<list<string> Geom, list<string> Frags, list<string> Types> {
  list<WMMA_REGS> ret =
     !foldl([]<WMMA_REGS>, Geom, t1, geom, !listconcat(t1,
     !foldl([]<WMMA_REGS>, Frags, t2, frag, !listconcat(t2,
     !foldl([]<WMMA_REGS>, Types, t3, type, !listconcat(t3,
            [WMMA_REGS<geom, frag, type>]))))));
   // Debugging aid for readable representation of the list above.
   list<string> ops = !foreach(x, ret, x.gft);
}

class LDMATRIX_OPS<list<string> Geom, list<string> Frags, list<string> Types> {
  list<WMMA_REGS> ret =
     !foldl([]<WMMA_REGS>, Geom, t1, geom, !listconcat(t1,
     !foldl([]<WMMA_REGS>, Frags, t2, frag, !listconcat(t2,
     !foldl([]<WMMA_REGS>, Types, t3, type, !listconcat(t3,
            [WMMA_REGS<geom, frag, type>]))))));
   // Debugging aid for readable representation of the list above.
   list<string> ops = !foreach(x, ret, x.gft);
}

// Creates list of valid combinations of fragments. This is the main list that
// drives generation of corresponding intrinsics and instructions.
class NVVM_MMA_OPS {
  list<list<WMMA_REGS>> tf32_wmma_ops = MMA_OPS<
            ["m16n16k8"],
            ["tf32"], [], ["f32"], []>.ret;
  list<list<WMMA_REGS>> bf16_wmma_ops = MMA_OPS<
            ["m16n16k16", "m32n8k16", "m8n32k16"],
            ["bf16"], [], ["f32"], []>.ret;
  list<list<WMMA_REGS>> f64_wmma_ops = MMA_OPS<
            ["m8n8k4"],
            ["f64"], [], ["f64"], []>.ret;
  list<list<WMMA_REGS>> fp_wmma_ops = MMA_OPS<
            ["m16n16k16", "m32n8k16", "m8n32k16"],
            ["f16"], [], ["f16", "f32"], ["f16", "f32"]>.ret;
  list<list<WMMA_REGS>> int_wmma_ops = MMA_OPS<
            ["m16n16k16", "m32n8k16", "m8n32k16"],
            ["s8", "u8"], [], ["s32"], []>.ret;
  list<list<WMMA_REGS>> subint_wmma_ops = MMA_OPS<
            ["m8n8k32"],
            ["s4", "u4"], [], ["s32"], []>.ret;
  list<list<WMMA_REGS>> bit_wmma_ops = MMA_OPS<
            ["m8n8k128"],
            ["b1"], [], ["s32"], []>.ret;
  list<list<WMMA_REGS>> all_wmma_ops = !listconcat(
            tf32_wmma_ops, bf16_wmma_ops, f64_wmma_ops,
            fp_wmma_ops, int_wmma_ops, subint_wmma_ops, bit_wmma_ops);

  list<list<WMMA_REGS>> tf32_mma_ops = MMA_OPS<
            ["m16n8k4", "m16n8k8"],
            ["tf32"], [], ["f32"], []>.ret;
  list<list<WMMA_REGS>> bf16_mma_ops = MMA_OPS<
            ["m16n8k16", "m16n8k8"],
            ["bf16"], [], ["f32"], []>.ret;
  list<list<WMMA_REGS>> f64_mma_ops = MMA_OPS<
            ["m8n8k4"],
            ["f64"], [], ["f64"], []>.ret;
  list<list<WMMA_REGS>> fp_mma_ops = MMA_OPS<
            ["m8n8k4", "m16n8k8", "m16n8k16"],
            ["f16"], [], ["f16", "f32"], ["f16", "f32"]>.ret;
  list<list<WMMA_REGS>> int_mma_ops = MMA_OPS<
            ["m8n8k16", "m16n8k16", "m16n8k32"],
            ["s8", "u8"], ["s8", "u8"], ["s32"], []>.ret;
  list<list<WMMA_REGS>> subint_mma_ops = MMA_OPS<
            ["m8n8k32", "m16n8k32", "m16n8k64"],
            ["s4", "u4"], ["s4", "u4"], ["s32"], []>.ret;
  list<list<WMMA_REGS>> bit_mma_ops = MMA_OPS<
            ["m8n8k128", "m16n8k128", "m16n8k256"],
            ["b1"], [], ["s32"], []>.ret;
  list<list<WMMA_REGS>> all_mma_ops = !listconcat(
            tf32_mma_ops, bf16_mma_ops, f64_mma_ops,
            fp_mma_ops, int_mma_ops, subint_mma_ops, bit_mma_ops);

  list<WMMA_REGS> ldst_ab_ops = MMA_LDST_OPS<
            ["m16n16k16", "m32n8k16", "m8n32k16"],
            ["a", "b"], ["f16", "u8", "s8", "bf16"]>.ret;
  list<WMMA_REGS> ldst_cd_ops = MMA_LDST_OPS<
            ["m16n16k16", "m32n8k16", "m8n32k16"],
            ["c", "d"], ["f16", "f32", "s32"]>.ret;
  list<WMMA_REGS> ldst_tf32_ab_ops = MMA_LDST_OPS<
            ["m16n16k8"],
            ["a", "b"], ["tf32"]>.ret;
  list<WMMA_REGS> ldst_tf32_cd_ops = MMA_LDST_OPS<
            ["m16n16k8"],
            ["c", "d"], ["f32"]>.ret;
  list<WMMA_REGS> ldst_f64_abcd_ops = MMA_LDST_OPS<
            ["m8n8k4"],
            ["a", "b", "c", "d"], ["f64"]>.ret;
  list<WMMA_REGS> ldst_subint_ab_ops = MMA_LDST_OPS<
            ["m8n8k32"], ["a", "b"], ["s4","u4"]>.ret;
  list<WMMA_REGS> ldst_bit_ab_ops = MMA_LDST_OPS<
            ["m8n8k128"], ["a", "b"], ["b1"]>.ret;
  list<WMMA_REGS> ldst_subint_cd_ops = MMA_LDST_OPS<
            ["m8n8k32", "m8n8k128"],  ["c", "d"], ["s32"]>.ret;
  list<WMMA_REGS> all_ldst_ops = !listconcat(ldst_ab_ops, ldst_cd_ops,
                                             ldst_tf32_ab_ops,
                                             ldst_tf32_cd_ops,
                                             ldst_f64_abcd_ops,
                                             ldst_subint_ab_ops,
                                             ldst_bit_ab_ops,
                                             ldst_subint_cd_ops);
  // Separate A/B/C fragments (loads) from D (stores).
  list<WMMA_REGS> all_ld_ops = !filter(op, all_ldst_ops, !ne(op.frag, "d"));
  list<WMMA_REGS> all_st_ops = !filter(op, all_ldst_ops, !eq(op.frag, "d"));

  list<WMMA_REGS> ldmatrix_b16_ops = LDMATRIX_OPS<
    ["m8n8"], ["x1", "x2", "x4"], ["b16"]>.ret;
  list<WMMA_REGS> all_ldmatrix_ops = ldmatrix_b16_ops;
}

def NVVM_MMA_OPS : NVVM_MMA_OPS;

// Returns true if this combination of fragment and layout for WMMA load/store
// ops is supported; false otherwise.
// E.g.
// if NVVM_WMMA_LDST_SUPPORTED<...>.ret then
//   def : FOO<>; // The record will only be defined for supported ops.
//
class NVVM_WMMA_LDST_SUPPORTED<WMMA_REGS frag, string layout> {
  string f = frag.frag;
  string t = frag.ptx_elt_type;

  bit ret = !cond(
    // Sub-int load and store requires A fragment to be of row layout and B
    // fragments to be of column layout.
    !and(!or(!eq(t, "b1"),
             !eq(t, "u4"),
             !eq(t, "s4")),
         !or(!and(!eq(f, "a"),
                  !ne(layout, "row")),
             !and(!eq(f, "b"),
                  !ne(layout, "col")))) : false,
    true: true
  );
}

// Returns true if this combination of layout/satf/rnd for WMMA ops is
// supported; false otherwise.
// E.g.
// if NVVM_WMMA_SUPPORTED<...>.ret then
//   def : FOO<>; // The record will only be defined for supported ops.
//
class NVVM_WMMA_SUPPORTED<list<WMMA_REGS> frags, string layout_a, string layout_b, int satf, string rnd> {
  // WMMA ops check both layouts.
  string layout = layout_a # ":" # layout_b;
  string t = frags[0].ptx_elt_type;

  bit ret = !cond(
    // only f64 wmma functions support rnd options
    // any non f64 type that uses a rnd value is invalid
    !and(!ne(t, "f64"), !ne(rnd, "")) : false,

    // satf is only valid for select types
    !and(!eq(satf, 1),
         !ne(t, "s8"),
         !ne(t, "u8"),
         !ne(t, "s4"),
         !ne(t, "u4"),
         !ne(t, "f16")): false,

    // Sub-int wmma requires row/column layout
    !and(!or(!eq(t, "s4"),
             !eq(t, "u4"),
             !eq(t, "b1")),
         !ne(layout, "row:col")) : false,
    true: true
  );
}

class NVVM_MMA_B1OPS<list<WMMA_REGS> frags> {
  list<string> ret = !cond(
    !eq(frags[0].ptx_elt_type, "b1") : [".xor.popc", ".and.popc"],
    true: [""]
  );
}

// Returns true if this combination of layout/satf for MMA ops is supported;
// false otherwise.
// E.g.
// if NVVM_MMA_SUPPORTED<...>.ret then
//   def : FOO<>; // The record will only be defined for supported ops.
//
class NVVM_MMA_SUPPORTED<list<WMMA_REGS> frags, string layout_a, string layout_b, int satf> {
  // MMA ops check both layouts.
  string layout = layout_a # ":" # layout_b;
  string a_type = frags[0].ptx_elt_type;
  string b_type = frags[1].ptx_elt_type;
  string c_type = frags[2].ptx_elt_type;
  string d_type = frags[3].ptx_elt_type;
  string geom = frags[0].geom;

  // gcd is a shortcut used to identify instructions that depend on
  // geom+frag_c+frag_d.
  string gcd = geom # ":" # c_type # d_type;
  bit ret = !cond(

    // Limit satf to valid types
    !and(!eq(satf, 1),
         !ne(a_type, "s8"),
         !ne(a_type, "u8"),
         !ne(a_type, "s4"),
         !ne(a_type, "u4")): false,

    // m8n8k4 has no C=f32 D=f16 variant.
    !eq(gcd, "m8n8k4:f32f16"): false,

    // only m8n8k4 for f16 does not require row:col layout
    !and(!ne(layout, "row:col"),
         !or(!ne(geom, "m8n8k4"),
             !ne(a_type, "f16"))) : false,

    // m16n8k8 requires A and B to be the same type and C and D to be the same
    // type.
    !and(!eq(geom, "m16n8k8"),
         !or(!ne(a_type, b_type),
             !ne(c_type, d_type))): false,

    // m16n8k8 requires C and D to be the same type.
    !and(!eq(geom, "m16n8k8"),
         !ne(c_type, d_type)): false,

    // All other are OK.
    true: true
  );
}

// Returns true if the fragment is valid for ldmatrix ops is supported;
// false otherwise.
// E.g.
// if NVVM_LDMATRIX_SUPPORTED<...>.ret then
//   def : FOO<>; // The record will only be defined for supported ops.
//
class NVVM_LDMATRIX_SUPPORTED<WMMA_REGS frag> {
  string g = frag.geom;
  string t = frag.ptx_elt_type;

  bit ret = !cond(
    // Only currently support m8n8 and b16
    !and(!eq(g, "m8n8"), !eq(t, "b16")): true,
    true: false
  );
}

class SHFL_INFO<bit sync, string mode, string type, bit return_pred> {
  string Suffix = !if(sync, "sync_", "")
                  # mode # "_"
                  # type
                  # !if(return_pred, "p", "");

  string Name = "int_nvvm_shfl_" # Suffix;
  string Builtin = "__nvvm_shfl_" # Suffix;
  string IntrName = "llvm.nvvm.shfl." # !subst("_",".", Suffix);
  bit withGccBuiltin = !not(return_pred);
  bit withoutGccBuiltin = return_pred;
  LLVMType OpType = !cond(
    !eq(type,"i32"): llvm_i32_ty,
    !eq(type,"f32"): llvm_float_ty);
  list<LLVMType> RetTy = !if(return_pred, [OpType, llvm_i1_ty], [OpType]);
  list<LLVMType> ArgsTy = !if(sync,
    [llvm_i32_ty, OpType, llvm_i32_ty, llvm_i32_ty],
    [OpType, llvm_i32_ty, llvm_i32_ty]);
}

let TargetPrefix = "nvvm" in {
  def int_nvvm_prmt : ClangBuiltin<"__nvvm_prmt">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
        [IntrNoMem, IntrSpeculatable]>;

//
// Min Max
//

  foreach operation = ["min", "max"] in {
    def int_nvvm_f # operation # _d :
      ClangBuiltin<!strconcat("__nvvm_f", operation, "_d")>,
      DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_double_ty, llvm_double_ty],
        [IntrNoMem, IntrSpeculatable, Commutative]>;

    foreach variant = ["_f", "_ftz_f", "_nan_f", "_ftz_nan_f",
      "_xorsign_abs_f", "_ftz_xorsign_abs_f", "_nan_xorsign_abs_f",
      "_ftz_nan_xorsign_abs_f"] in {
      def int_nvvm_f # operation # variant :
        ClangBuiltin<!strconcat("__nvvm_f", operation, variant)>,
        DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
          [IntrNoMem, IntrSpeculatable, Commutative]>;
    }

    foreach variant = ["_f16", "_ftz_f16", "_nan_f16", "_ftz_nan_f16",
      "_xorsign_abs_f16", "_ftz_xorsign_abs_f16", "_nan_xorsign_abs_f16",
      "_ftz_nan_xorsign_abs_f16"] in {
      def int_nvvm_f # operation # variant :
        DefaultAttrsIntrinsic<[llvm_half_ty], [llvm_half_ty, llvm_half_ty],
          [IntrNoMem, IntrSpeculatable, Commutative]>;
    }

    foreach variant = ["_f16x2", "_ftz_f16x2", "_nan_f16x2",
      "_ftz_nan_f16x2", "_xorsign_abs_f16x2", "_ftz_xorsign_abs_f16x2",
      "_nan_xorsign_abs_f16x2", "_ftz_nan_xorsign_abs_f16x2"] in {
      def int_nvvm_f # operation # variant :
        DefaultAttrsIntrinsic<[llvm_v2f16_ty], [llvm_v2f16_ty, llvm_v2f16_ty],
          [IntrNoMem, IntrSpeculatable, Commutative]>;
    }

    foreach variant = ["_bf16", "_ftz_bf16", "_nan_bf16", "_ftz_nan_bf16",
      "_xorsign_abs_bf16", "_ftz_xorsign_abs_bf16", "_nan_xorsign_abs_bf16",
      "_ftz_nan_xorsign_abs_bf16"] in {
      def int_nvvm_f # operation # variant :
        ClangBuiltin<!strconcat("__nvvm_f", operation, variant)>,
        DefaultAttrsIntrinsic<[llvm_bfloat_ty], [llvm_bfloat_ty, llvm_bfloat_ty],
          [IntrNoMem, IntrSpeculatable, Commutative]>;
    }

    foreach variant = ["_bf16x2", "_ftz_bf16x2", "_nan_bf16x2",
      "_ftz_nan_bf16x2", "_xorsign_abs_bf16x2", "_ftz_xorsign_abs_bf16x2",
      "_nan_xorsign_abs_bf16x2", "_ftz_nan_xorsign_abs_bf16x2"]  in {
      def int_nvvm_f # operation # variant :
        ClangBuiltin<!strconcat("__nvvm_f", operation, variant)>,
        DefaultAttrsIntrinsic<[llvm_v2bf16_ty], [llvm_v2bf16_ty, llvm_v2bf16_ty],
          [IntrNoMem, IntrSpeculatable, Commutative]>;
    }
  }

//
// Multiplication
//

  def int_nvvm_mulhi_i : ClangBuiltin<"__nvvm_mulhi_i">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
        [IntrNoMem, IntrSpeculatable, Commutative]>;
  def int_nvvm_mulhi_ui : ClangBuiltin<"__nvvm_mulhi_ui">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
        [IntrNoMem, IntrSpeculatable, Commutative]>;

  def int_nvvm_mulhi_ll : ClangBuiltin<"__nvvm_mulhi_ll">,
      DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty],
        [IntrNoMem, IntrSpeculatable, Commutative]>;
  def int_nvvm_mulhi_ull : ClangBuiltin<"__nvvm_mulhi_ull">,
      DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty],
        [IntrNoMem, IntrSpeculatable, Commutative]>;

  def int_nvvm_mul_rn_ftz_f : ClangBuiltin<"__nvvm_mul_rn_ftz_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
        [IntrNoMem, IntrSpeculatable, Commutative]>;
  def int_nvvm_mul_rn_f : ClangBuiltin<"__nvvm_mul_rn_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
        [IntrNoMem, IntrSpeculatable, Commutative]>;
  def int_nvvm_mul_rz_ftz_f : ClangBuiltin<"__nvvm_mul_rz_ftz_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
        [IntrNoMem, IntrSpeculatable, Commutative]>;
  def int_nvvm_mul_rz_f : ClangBuiltin<"__nvvm_mul_rz_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
        [IntrNoMem, IntrSpeculatable, Commutative]>;
  def int_nvvm_mul_rm_ftz_f : ClangBuiltin<"__nvvm_mul_rm_ftz_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
        [IntrNoMem, IntrSpeculatable, Commutative]>;
  def int_nvvm_mul_rm_f : ClangBuiltin<"__nvvm_mul_rm_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
        [IntrNoMem, IntrSpeculatable, Commutative]>;
  def int_nvvm_mul_rp_ftz_f : ClangBuiltin<"__nvvm_mul_rp_ftz_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
        [IntrNoMem, IntrSpeculatable, Commutative]>;
  def int_nvvm_mul_rp_f : ClangBuiltin<"__nvvm_mul_rp_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
        [IntrNoMem, IntrSpeculatable, Commutative]>;

  def int_nvvm_mul_rn_d : ClangBuiltin<"__nvvm_mul_rn_d">,
      DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_double_ty, llvm_double_ty],
        [IntrNoMem, IntrSpeculatable, Commutative]>;
  def int_nvvm_mul_rz_d : ClangBuiltin<"__nvvm_mul_rz_d">,
      DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_double_ty, llvm_double_ty],
        [IntrNoMem, IntrSpeculatable, Commutative]>;
  def int_nvvm_mul_rm_d : ClangBuiltin<"__nvvm_mul_rm_d">,
      DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_double_ty, llvm_double_ty],
        [IntrNoMem, IntrSpeculatable, Commutative]>;
  def int_nvvm_mul_rp_d : ClangBuiltin<"__nvvm_mul_rp_d">,
      DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_double_ty, llvm_double_ty],
        [IntrNoMem, IntrSpeculatable, Commutative]>;

  def int_nvvm_mul24_i : ClangBuiltin<"__nvvm_mul24_i">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
        [IntrNoMem, IntrSpeculatable, Commutative]>;
  def int_nvvm_mul24_ui : ClangBuiltin<"__nvvm_mul24_ui">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
        [IntrNoMem, IntrSpeculatable, Commutative]>;

//
// Div
//

  def int_nvvm_div_approx_ftz_f : ClangBuiltin<"__nvvm_div_approx_ftz_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
        [IntrNoMem]>;
  def int_nvvm_div_approx_f : ClangBuiltin<"__nvvm_div_approx_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
        [IntrNoMem]>;

  def int_nvvm_div_rn_ftz_f : ClangBuiltin<"__nvvm_div_rn_ftz_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
        [IntrNoMem]>;
  def int_nvvm_div_rn_f : ClangBuiltin<"__nvvm_div_rn_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
        [IntrNoMem]>;

  def int_nvvm_div_rz_ftz_f : ClangBuiltin<"__nvvm_div_rz_ftz_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
        [IntrNoMem]>;
  def int_nvvm_div_rz_f : ClangBuiltin<"__nvvm_div_rz_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
        [IntrNoMem]>;

  def int_nvvm_div_rm_ftz_f : ClangBuiltin<"__nvvm_div_rm_ftz_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
        [IntrNoMem]>;
  def int_nvvm_div_rm_f : ClangBuiltin<"__nvvm_div_rm_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
        [IntrNoMem]>;

  def int_nvvm_div_rp_ftz_f : ClangBuiltin<"__nvvm_div_rp_ftz_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
        [IntrNoMem]>;
  def int_nvvm_div_rp_f : ClangBuiltin<"__nvvm_div_rp_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
        [IntrNoMem]>;

  def int_nvvm_div_rn_d : ClangBuiltin<"__nvvm_div_rn_d">,
      DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_double_ty, llvm_double_ty],
        [IntrNoMem]>;
  def int_nvvm_div_rz_d : ClangBuiltin<"__nvvm_div_rz_d">,
      DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_double_ty, llvm_double_ty],
        [IntrNoMem]>;
  def int_nvvm_div_rm_d : ClangBuiltin<"__nvvm_div_rm_d">,
      DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_double_ty, llvm_double_ty],
        [IntrNoMem]>;
  def int_nvvm_div_rp_d : ClangBuiltin<"__nvvm_div_rp_d">,
      DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_double_ty, llvm_double_ty],
        [IntrNoMem]>;

//
// Sad
//

  def int_nvvm_sad_i : ClangBuiltin<"__nvvm_sad_i">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
        [IntrNoMem, Commutative]>;
  def int_nvvm_sad_ui : ClangBuiltin<"__nvvm_sad_ui">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
        [IntrNoMem, Commutative]>;

//
// Floor  Ceil
//

  def int_nvvm_floor_ftz_f : ClangBuiltin<"__nvvm_floor_ftz_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_floor_f : ClangBuiltin<"__nvvm_floor_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_floor_d : ClangBuiltin<"__nvvm_floor_d">,
      DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem, IntrSpeculatable]>;

  def int_nvvm_ceil_ftz_f : ClangBuiltin<"__nvvm_ceil_ftz_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_ceil_f : ClangBuiltin<"__nvvm_ceil_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_ceil_d : ClangBuiltin<"__nvvm_ceil_d">,
      DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem, IntrSpeculatable]>;

//
// Abs
//

  def int_nvvm_fabs_ftz_f : ClangBuiltin<"__nvvm_fabs_ftz_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_fabs_f : ClangBuiltin<"__nvvm_fabs_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_fabs_d : ClangBuiltin<"__nvvm_fabs_d">,
      DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem, IntrSpeculatable]>;

//
// Abs, Neg bf16, bf16x2
//

  foreach unary = ["abs", "neg"] in {
    def int_nvvm_ # unary # _bf16 :
      ClangBuiltin<!strconcat("__nvvm_", unary, "_bf16")>,
      DefaultAttrsIntrinsic<[llvm_bfloat_ty], [llvm_bfloat_ty], [IntrNoMem]>;
    def int_nvvm_ # unary # _bf16x2 :
      ClangBuiltin<!strconcat("__nvvm_", unary, "_bf16x2")>,
      DefaultAttrsIntrinsic<[llvm_v2bf16_ty], [llvm_v2bf16_ty], [IntrNoMem]>;
  }

//
// Round
//

  def int_nvvm_round_ftz_f : ClangBuiltin<"__nvvm_round_ftz_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_round_f : ClangBuiltin<"__nvvm_round_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem, IntrSpeculatable]>;

  def int_nvvm_round_d : ClangBuiltin<"__nvvm_round_d">,
      DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem, IntrSpeculatable]>;

//
// Trunc
//

  def int_nvvm_trunc_ftz_f : ClangBuiltin<"__nvvm_trunc_ftz_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_trunc_f : ClangBuiltin<"__nvvm_trunc_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem, IntrSpeculatable]>;

  def int_nvvm_trunc_d : ClangBuiltin<"__nvvm_trunc_d">,
      DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem, IntrSpeculatable]>;

//
// Saturate
//

  def int_nvvm_saturate_ftz_f : ClangBuiltin<"__nvvm_saturate_ftz_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_saturate_f : ClangBuiltin<"__nvvm_saturate_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem, IntrSpeculatable]>;

  def int_nvvm_saturate_d : ClangBuiltin<"__nvvm_saturate_d">,
      DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem, IntrSpeculatable]>;

//
// Exp2  Log2
//

  def int_nvvm_ex2_approx_ftz_f : ClangBuiltin<"__nvvm_ex2_approx_ftz_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
  def int_nvvm_ex2_approx_f : ClangBuiltin<"__nvvm_ex2_approx_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
  def int_nvvm_ex2_approx_d : ClangBuiltin<"__nvvm_ex2_approx_d">,
      DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
  def int_nvvm_ex2_approx_f16 :
      DefaultAttrsIntrinsic<[llvm_half_ty], [llvm_half_ty], [IntrNoMem]>;
  def int_nvvm_ex2_approx_f16x2 :
      DefaultAttrsIntrinsic<[llvm_v2f16_ty], [llvm_v2f16_ty], [IntrNoMem]>;

  def int_nvvm_lg2_approx_ftz_f : ClangBuiltin<"__nvvm_lg2_approx_ftz_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
  def int_nvvm_lg2_approx_f : ClangBuiltin<"__nvvm_lg2_approx_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
  def int_nvvm_lg2_approx_d : ClangBuiltin<"__nvvm_lg2_approx_d">,
      DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;

//
// Sin  Cos
//

  def int_nvvm_sin_approx_ftz_f : ClangBuiltin<"__nvvm_sin_approx_ftz_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
  def int_nvvm_sin_approx_f : ClangBuiltin<"__nvvm_sin_approx_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;

  def int_nvvm_cos_approx_ftz_f : ClangBuiltin<"__nvvm_cos_approx_ftz_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
  def int_nvvm_cos_approx_f : ClangBuiltin<"__nvvm_cos_approx_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;

//
// Fma
//

  foreach variant = ["_rn_f16", "_rn_ftz_f16", "_rn_sat_f16",
    "_rn_ftz_sat_f16", "_rn_relu_f16", "_rn_ftz_relu_f16"] in {
    def int_nvvm_fma # variant : DefaultAttrsIntrinsic<[llvm_half_ty],
      [llvm_half_ty, llvm_half_ty, llvm_half_ty],
      [IntrNoMem, IntrSpeculatable]>;
  }

  foreach variant = ["_rn_f16x2", "_rn_ftz_f16x2", "_rn_sat_f16x2",
    "_rn_ftz_sat_f16x2", "_rn_relu_f16x2", "_rn_ftz_relu_f16x2"] in {
    def int_nvvm_fma # variant : DefaultAttrsIntrinsic<[llvm_v2f16_ty],
      [llvm_v2f16_ty, llvm_v2f16_ty, llvm_v2f16_ty],
      [IntrNoMem, IntrSpeculatable]>;
  }

  foreach variant = ["_rn_bf16", "_rn_ftz_bf16", "_rn_sat_bf16",
    "_rn_ftz_sat_bf16", "_rn_relu_bf16", "_rn_ftz_relu_bf16"] in {
    def int_nvvm_fma # variant : ClangBuiltin<!strconcat("__nvvm_fma", variant)>,
      DefaultAttrsIntrinsic<[llvm_bfloat_ty],
        [llvm_bfloat_ty, llvm_bfloat_ty, llvm_bfloat_ty],
        [IntrNoMem, IntrSpeculatable]>;
  }

  foreach variant = ["_rn_bf16x2", "_rn_ftz_bf16x2", "_rn_sat_bf16x2",
    "_rn_ftz_sat_bf16x2", "_rn_relu_bf16x2", "_rn_ftz_relu_bf16x2"] in {
    def int_nvvm_fma # variant : ClangBuiltin<!strconcat("__nvvm_fma", variant)>,
      DefaultAttrsIntrinsic<[llvm_v2bf16_ty],
        [llvm_v2bf16_ty, llvm_v2bf16_ty, llvm_v2bf16_ty],
        [IntrNoMem, IntrSpeculatable]>;
  }

  foreach variant = ["_rn_ftz_f", "_rn_f", "_rz_ftz_f", "_rz_f", "_rm_ftz_f",
    "_rm_f", "_rp_ftz_f", "_rp_f"] in {
    def int_nvvm_fma # variant : ClangBuiltin<!strconcat("__nvvm_fma", variant)>,
      DefaultAttrsIntrinsic<[llvm_float_ty],
        [llvm_float_ty, llvm_float_ty, llvm_float_ty],
        [IntrNoMem, IntrSpeculatable]>;
  }

  foreach variant = ["_rn_d", "_rz_d", "_rm_d", "_rp_d"] in {
    def int_nvvm_fma # variant : ClangBuiltin<!strconcat("__nvvm_fma", variant)>,
      DefaultAttrsIntrinsic<[llvm_double_ty],
        [llvm_double_ty, llvm_double_ty, llvm_double_ty],
        [IntrNoMem, IntrSpeculatable]>;
  }

//
// Rcp
//

  def int_nvvm_rcp_rn_ftz_f : ClangBuiltin<"__nvvm_rcp_rn_ftz_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
  def int_nvvm_rcp_rn_f : ClangBuiltin<"__nvvm_rcp_rn_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
  def int_nvvm_rcp_rz_ftz_f : ClangBuiltin<"__nvvm_rcp_rz_ftz_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
  def int_nvvm_rcp_rz_f : ClangBuiltin<"__nvvm_rcp_rz_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
  def int_nvvm_rcp_rm_ftz_f : ClangBuiltin<"__nvvm_rcp_rm_ftz_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
  def int_nvvm_rcp_rm_f : ClangBuiltin<"__nvvm_rcp_rm_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
  def int_nvvm_rcp_rp_ftz_f : ClangBuiltin<"__nvvm_rcp_rp_ftz_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
  def int_nvvm_rcp_rp_f : ClangBuiltin<"__nvvm_rcp_rp_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;

  def int_nvvm_rcp_rn_d : ClangBuiltin<"__nvvm_rcp_rn_d">,
      DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
  def int_nvvm_rcp_rz_d : ClangBuiltin<"__nvvm_rcp_rz_d">,
      DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
  def int_nvvm_rcp_rm_d : ClangBuiltin<"__nvvm_rcp_rm_d">,
      DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
  def int_nvvm_rcp_rp_d : ClangBuiltin<"__nvvm_rcp_rp_d">,
      DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;

  def int_nvvm_rcp_approx_ftz_f : ClangBuiltin<"__nvvm_rcp_approx_ftz_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
  def int_nvvm_rcp_approx_ftz_d : ClangBuiltin<"__nvvm_rcp_approx_ftz_d">,
      DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;

//
// Sqrt
//

  def int_nvvm_sqrt_f : ClangBuiltin<"__nvvm_sqrt_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
  def int_nvvm_sqrt_rn_ftz_f : ClangBuiltin<"__nvvm_sqrt_rn_ftz_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
  def int_nvvm_sqrt_rn_f : ClangBuiltin<"__nvvm_sqrt_rn_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
  def int_nvvm_sqrt_rz_ftz_f : ClangBuiltin<"__nvvm_sqrt_rz_ftz_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
  def int_nvvm_sqrt_rz_f : ClangBuiltin<"__nvvm_sqrt_rz_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
  def int_nvvm_sqrt_rm_ftz_f : ClangBuiltin<"__nvvm_sqrt_rm_ftz_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
  def int_nvvm_sqrt_rm_f : ClangBuiltin<"__nvvm_sqrt_rm_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
  def int_nvvm_sqrt_rp_ftz_f : ClangBuiltin<"__nvvm_sqrt_rp_ftz_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
  def int_nvvm_sqrt_rp_f : ClangBuiltin<"__nvvm_sqrt_rp_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
  def int_nvvm_sqrt_approx_ftz_f : ClangBuiltin<"__nvvm_sqrt_approx_ftz_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
  def int_nvvm_sqrt_approx_f : ClangBuiltin<"__nvvm_sqrt_approx_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;

  def int_nvvm_sqrt_rn_d : ClangBuiltin<"__nvvm_sqrt_rn_d">,
      DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
  def int_nvvm_sqrt_rz_d : ClangBuiltin<"__nvvm_sqrt_rz_d">,
      DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
  def int_nvvm_sqrt_rm_d : ClangBuiltin<"__nvvm_sqrt_rm_d">,
      DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
  def int_nvvm_sqrt_rp_d : ClangBuiltin<"__nvvm_sqrt_rp_d">,
      DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;

//
// Rsqrt
//

  def int_nvvm_rsqrt_approx_ftz_f : ClangBuiltin<"__nvvm_rsqrt_approx_ftz_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
  def int_nvvm_rsqrt_approx_f : ClangBuiltin<"__nvvm_rsqrt_approx_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
  def int_nvvm_rsqrt_approx_d : ClangBuiltin<"__nvvm_rsqrt_approx_d">,
      DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;

//
// Add
//

  def int_nvvm_add_rn_ftz_f : ClangBuiltin<"__nvvm_add_rn_ftz_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
        [IntrNoMem, IntrSpeculatable, Commutative]>;
  def int_nvvm_add_rn_f : ClangBuiltin<"__nvvm_add_rn_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
        [IntrNoMem, IntrSpeculatable, Commutative]>;
  def int_nvvm_add_rz_ftz_f : ClangBuiltin<"__nvvm_add_rz_ftz_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
        [IntrNoMem, IntrSpeculatable, Commutative]>;
  def int_nvvm_add_rz_f : ClangBuiltin<"__nvvm_add_rz_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
        [IntrNoMem, IntrSpeculatable, Commutative]>;
  def int_nvvm_add_rm_ftz_f : ClangBuiltin<"__nvvm_add_rm_ftz_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
        [IntrNoMem, IntrSpeculatable, Commutative]>;
  def int_nvvm_add_rm_f : ClangBuiltin<"__nvvm_add_rm_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
        [IntrNoMem, IntrSpeculatable, Commutative]>;
  def int_nvvm_add_rp_ftz_f : ClangBuiltin<"__nvvm_add_rp_ftz_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
        [IntrNoMem, IntrSpeculatable, Commutative]>;
  def int_nvvm_add_rp_f : ClangBuiltin<"__nvvm_add_rp_f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
        [IntrNoMem, IntrSpeculatable, Commutative]>;

  def int_nvvm_add_rn_d : ClangBuiltin<"__nvvm_add_rn_d">,
      DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_double_ty, llvm_double_ty],
        [IntrNoMem, IntrSpeculatable, Commutative]>;
  def int_nvvm_add_rz_d : ClangBuiltin<"__nvvm_add_rz_d">,
      DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_double_ty, llvm_double_ty],
        [IntrNoMem, IntrSpeculatable, Commutative]>;
  def int_nvvm_add_rm_d : ClangBuiltin<"__nvvm_add_rm_d">,
      DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_double_ty, llvm_double_ty],
        [IntrNoMem, IntrSpeculatable, Commutative]>;
  def int_nvvm_add_rp_d : ClangBuiltin<"__nvvm_add_rp_d">,
      DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_double_ty, llvm_double_ty],
        [IntrNoMem, IntrSpeculatable, Commutative]>;

//
// Convert
//

  def int_nvvm_d2f_rn_ftz : ClangBuiltin<"__nvvm_d2f_rn_ftz">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_double_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_d2f_rn : ClangBuiltin<"__nvvm_d2f_rn">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_double_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_d2f_rz_ftz : ClangBuiltin<"__nvvm_d2f_rz_ftz">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_double_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_d2f_rz : ClangBuiltin<"__nvvm_d2f_rz">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_double_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_d2f_rm_ftz : ClangBuiltin<"__nvvm_d2f_rm_ftz">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_double_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_d2f_rm : ClangBuiltin<"__nvvm_d2f_rm">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_double_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_d2f_rp_ftz : ClangBuiltin<"__nvvm_d2f_rp_ftz">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_double_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_d2f_rp : ClangBuiltin<"__nvvm_d2f_rp">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_double_ty], [IntrNoMem, IntrSpeculatable]>;

  def int_nvvm_d2i_rn : ClangBuiltin<"__nvvm_d2i_rn">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_double_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_d2i_rz : ClangBuiltin<"__nvvm_d2i_rz">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_double_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_d2i_rm : ClangBuiltin<"__nvvm_d2i_rm">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_double_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_d2i_rp : ClangBuiltin<"__nvvm_d2i_rp">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_double_ty], [IntrNoMem, IntrSpeculatable]>;

  def int_nvvm_d2ui_rn : ClangBuiltin<"__nvvm_d2ui_rn">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_double_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_d2ui_rz : ClangBuiltin<"__nvvm_d2ui_rz">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_double_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_d2ui_rm : ClangBuiltin<"__nvvm_d2ui_rm">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_double_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_d2ui_rp : ClangBuiltin<"__nvvm_d2ui_rp">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_double_ty], [IntrNoMem, IntrSpeculatable]>;

  def int_nvvm_i2d_rn : ClangBuiltin<"__nvvm_i2d_rn">,
      DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_i32_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_i2d_rz : ClangBuiltin<"__nvvm_i2d_rz">,
      DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_i32_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_i2d_rm : ClangBuiltin<"__nvvm_i2d_rm">,
      DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_i32_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_i2d_rp : ClangBuiltin<"__nvvm_i2d_rp">,
      DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_i32_ty], [IntrNoMem, IntrSpeculatable]>;

  def int_nvvm_ui2d_rn : ClangBuiltin<"__nvvm_ui2d_rn">,
      DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_i32_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_ui2d_rz : ClangBuiltin<"__nvvm_ui2d_rz">,
      DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_i32_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_ui2d_rm : ClangBuiltin<"__nvvm_ui2d_rm">,
      DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_i32_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_ui2d_rp : ClangBuiltin<"__nvvm_ui2d_rp">,
      DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_i32_ty], [IntrNoMem, IntrSpeculatable]>;

  def int_nvvm_f2i_rn_ftz : ClangBuiltin<"__nvvm_f2i_rn_ftz">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_f2i_rn : ClangBuiltin<"__nvvm_f2i_rn">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_f2i_rz_ftz : ClangBuiltin<"__nvvm_f2i_rz_ftz">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_f2i_rz : ClangBuiltin<"__nvvm_f2i_rz">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_f2i_rm_ftz : ClangBuiltin<"__nvvm_f2i_rm_ftz">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_f2i_rm : ClangBuiltin<"__nvvm_f2i_rm">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_f2i_rp_ftz : ClangBuiltin<"__nvvm_f2i_rp_ftz">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_f2i_rp : ClangBuiltin<"__nvvm_f2i_rp">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem, IntrSpeculatable]>;

  def int_nvvm_f2ui_rn_ftz : ClangBuiltin<"__nvvm_f2ui_rn_ftz">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_f2ui_rn : ClangBuiltin<"__nvvm_f2ui_rn">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_f2ui_rz_ftz : ClangBuiltin<"__nvvm_f2ui_rz_ftz">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_f2ui_rz : ClangBuiltin<"__nvvm_f2ui_rz">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_f2ui_rm_ftz : ClangBuiltin<"__nvvm_f2ui_rm_ftz">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_f2ui_rm : ClangBuiltin<"__nvvm_f2ui_rm">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_f2ui_rp_ftz : ClangBuiltin<"__nvvm_f2ui_rp_ftz">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_f2ui_rp : ClangBuiltin<"__nvvm_f2ui_rp">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem, IntrSpeculatable]>;

  def int_nvvm_i2f_rn : ClangBuiltin<"__nvvm_i2f_rn">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_i32_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_i2f_rz : ClangBuiltin<"__nvvm_i2f_rz">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_i32_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_i2f_rm : ClangBuiltin<"__nvvm_i2f_rm">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_i32_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_i2f_rp : ClangBuiltin<"__nvvm_i2f_rp">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_i32_ty], [IntrNoMem, IntrSpeculatable]>;

  def int_nvvm_ui2f_rn : ClangBuiltin<"__nvvm_ui2f_rn">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_i32_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_ui2f_rz : ClangBuiltin<"__nvvm_ui2f_rz">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_i32_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_ui2f_rm : ClangBuiltin<"__nvvm_ui2f_rm">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_i32_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_ui2f_rp : ClangBuiltin<"__nvvm_ui2f_rp">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_i32_ty], [IntrNoMem, IntrSpeculatable]>;

  def int_nvvm_lohi_i2d : ClangBuiltin<"__nvvm_lohi_i2d">,
      DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_i32_ty, llvm_i32_ty],
        [IntrNoMem, IntrSpeculatable, Commutative]>;

  def int_nvvm_d2i_lo : ClangBuiltin<"__nvvm_d2i_lo">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_double_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_d2i_hi : ClangBuiltin<"__nvvm_d2i_hi">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_double_ty], [IntrNoMem, IntrSpeculatable]>;

  def int_nvvm_f2ll_rn_ftz : ClangBuiltin<"__nvvm_f2ll_rn_ftz">,
      DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_f2ll_rn : ClangBuiltin<"__nvvm_f2ll_rn">,
      DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_f2ll_rz_ftz : ClangBuiltin<"__nvvm_f2ll_rz_ftz">,
      DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_f2ll_rz : ClangBuiltin<"__nvvm_f2ll_rz">,
      DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_f2ll_rm_ftz : ClangBuiltin<"__nvvm_f2ll_rm_ftz">,
      DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_f2ll_rm : ClangBuiltin<"__nvvm_f2ll_rm">,
      DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_f2ll_rp_ftz : ClangBuiltin<"__nvvm_f2ll_rp_ftz">,
      DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_f2ll_rp : ClangBuiltin<"__nvvm_f2ll_rp">,
      DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem, IntrSpeculatable]>;

  def int_nvvm_f2ull_rn_ftz : ClangBuiltin<"__nvvm_f2ull_rn_ftz">,
      DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_f2ull_rn : ClangBuiltin<"__nvvm_f2ull_rn">,
      DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_f2ull_rz_ftz : ClangBuiltin<"__nvvm_f2ull_rz_ftz">,
      DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_f2ull_rz : ClangBuiltin<"__nvvm_f2ull_rz">,
      DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_f2ull_rm_ftz : ClangBuiltin<"__nvvm_f2ull_rm_ftz">,
      DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_f2ull_rm : ClangBuiltin<"__nvvm_f2ull_rm">,
      DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_f2ull_rp_ftz : ClangBuiltin<"__nvvm_f2ull_rp_ftz">,
      DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_f2ull_rp : ClangBuiltin<"__nvvm_f2ull_rp">,
      DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem, IntrSpeculatable]>;

  def int_nvvm_d2ll_rn : ClangBuiltin<"__nvvm_d2ll_rn">,
      DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_double_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_d2ll_rz : ClangBuiltin<"__nvvm_d2ll_rz">,
      DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_double_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_d2ll_rm : ClangBuiltin<"__nvvm_d2ll_rm">,
      DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_double_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_d2ll_rp : ClangBuiltin<"__nvvm_d2ll_rp">,
      DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_double_ty], [IntrNoMem, IntrSpeculatable]>;

  def int_nvvm_d2ull_rn : ClangBuiltin<"__nvvm_d2ull_rn">,
      DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_double_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_d2ull_rz : ClangBuiltin<"__nvvm_d2ull_rz">,
      DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_double_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_d2ull_rm : ClangBuiltin<"__nvvm_d2ull_rm">,
      DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_double_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_d2ull_rp : ClangBuiltin<"__nvvm_d2ull_rp">,
      DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_double_ty], [IntrNoMem, IntrSpeculatable]>;

  def int_nvvm_ll2f_rn : ClangBuiltin<"__nvvm_ll2f_rn">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_i64_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_ll2f_rz : ClangBuiltin<"__nvvm_ll2f_rz">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_i64_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_ll2f_rm : ClangBuiltin<"__nvvm_ll2f_rm">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_i64_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_ll2f_rp : ClangBuiltin<"__nvvm_ll2f_rp">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_i64_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_ull2f_rn : ClangBuiltin<"__nvvm_ull2f_rn">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_i64_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_ull2f_rz : ClangBuiltin<"__nvvm_ull2f_rz">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_i64_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_ull2f_rm : ClangBuiltin<"__nvvm_ull2f_rm">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_i64_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_ull2f_rp : ClangBuiltin<"__nvvm_ull2f_rp">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_i64_ty], [IntrNoMem, IntrSpeculatable]>;

  def int_nvvm_ll2d_rn : ClangBuiltin<"__nvvm_ll2d_rn">,
      DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_i64_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_ll2d_rz : ClangBuiltin<"__nvvm_ll2d_rz">,
      DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_i64_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_ll2d_rm : ClangBuiltin<"__nvvm_ll2d_rm">,
      DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_i64_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_ll2d_rp : ClangBuiltin<"__nvvm_ll2d_rp">,
      DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_i64_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_ull2d_rn : ClangBuiltin<"__nvvm_ull2d_rn">,
      DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_i64_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_ull2d_rz : ClangBuiltin<"__nvvm_ull2d_rz">,
      DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_i64_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_ull2d_rm : ClangBuiltin<"__nvvm_ull2d_rm">,
      DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_i64_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_ull2d_rp : ClangBuiltin<"__nvvm_ull2d_rp">,
      DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_i64_ty], [IntrNoMem, IntrSpeculatable]>;

  def int_nvvm_f2h_rn_ftz : ClangBuiltin<"__nvvm_f2h_rn_ftz">,
      DefaultAttrsIntrinsic<[llvm_i16_ty], [llvm_float_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_f2h_rn : ClangBuiltin<"__nvvm_f2h_rn">,
      DefaultAttrsIntrinsic<[llvm_i16_ty], [llvm_float_ty], [IntrNoMem, IntrSpeculatable]>;

  def int_nvvm_bf2h_rn_ftz : ClangBuiltin<"__nvvm_bf2h_rn_ftz">,
      DefaultAttrsIntrinsic<[llvm_i16_ty], [llvm_bfloat_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_bf2h_rn : ClangBuiltin<"__nvvm_bf2h_rn">,
      DefaultAttrsIntrinsic<[llvm_i16_ty], [llvm_bfloat_ty], [IntrNoMem, IntrSpeculatable]>;

  def int_nvvm_ff2bf16x2_rn : ClangBuiltin<"__nvvm_ff2bf16x2_rn">,
       Intrinsic<[llvm_v2bf16_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem, IntrNoCallback]>;
  def int_nvvm_ff2bf16x2_rn_relu : ClangBuiltin<"__nvvm_ff2bf16x2_rn_relu">,
      Intrinsic<[llvm_v2bf16_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem, IntrNoCallback]>;
  def int_nvvm_ff2bf16x2_rz : ClangBuiltin<"__nvvm_ff2bf16x2_rz">,
      Intrinsic<[llvm_v2bf16_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem, IntrNoCallback]>;
  def int_nvvm_ff2bf16x2_rz_relu : ClangBuiltin<"__nvvm_ff2bf16x2_rz_relu">,
      Intrinsic<[llvm_v2bf16_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>;

  def int_nvvm_ff2f16x2_rn : ClangBuiltin<"__nvvm_ff2f16x2_rn">,
      Intrinsic<[llvm_v2f16_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem, IntrNoCallback]>;
  def int_nvvm_ff2f16x2_rn_relu : ClangBuiltin<"__nvvm_ff2f16x2_rn_relu">,
      Intrinsic<[llvm_v2f16_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem, IntrNoCallback]>;
  def int_nvvm_ff2f16x2_rz : ClangBuiltin<"__nvvm_ff2f16x2_rz">,
      Intrinsic<[llvm_v2f16_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem, IntrNoCallback]>;
  def int_nvvm_ff2f16x2_rz_relu : ClangBuiltin<"__nvvm_ff2f16x2_rz_relu">,
      Intrinsic<[llvm_v2f16_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem, IntrNoCallback]>;

  def int_nvvm_f2bf16_rn : ClangBuiltin<"__nvvm_f2bf16_rn">,
      Intrinsic<[llvm_bfloat_ty], [llvm_float_ty], [IntrNoMem, IntrNoCallback]>;
  def int_nvvm_f2bf16_rn_relu : ClangBuiltin<"__nvvm_f2bf16_rn_relu">,
      Intrinsic<[llvm_bfloat_ty], [llvm_float_ty], [IntrNoMem, IntrNoCallback]>;
  def int_nvvm_f2bf16_rz : ClangBuiltin<"__nvvm_f2bf16_rz">,
      Intrinsic<[llvm_bfloat_ty], [llvm_float_ty], [IntrNoMem, IntrNoCallback]>;
  def int_nvvm_f2bf16_rz_relu : ClangBuiltin<"__nvvm_f2bf16_rz_relu">,
       Intrinsic<[llvm_bfloat_ty], [llvm_float_ty], [IntrNoMem, IntrNoCallback]>;

  def int_nvvm_f2tf32_rna : ClangBuiltin<"__nvvm_f2tf32_rna">,
      Intrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem, IntrNoCallback]>;

//
// Bitcast
//

  def int_nvvm_bitcast_f2i : ClangBuiltin<"__nvvm_bitcast_f2i">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_bitcast_i2f : ClangBuiltin<"__nvvm_bitcast_i2f">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_i32_ty], [IntrNoMem, IntrSpeculatable]>;

  def int_nvvm_bitcast_ll2d : ClangBuiltin<"__nvvm_bitcast_ll2d">,
      DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_i64_ty], [IntrNoMem, IntrSpeculatable]>;
  def int_nvvm_bitcast_d2ll : ClangBuiltin<"__nvvm_bitcast_d2ll">,
      DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_double_ty], [IntrNoMem, IntrSpeculatable]>;

// FNS

  def int_nvvm_fns : ClangBuiltin<"__nvvm_fns">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
                [IntrNoMem]>;

// Atomics not available as llvm intrinsics.
  def int_nvvm_atomic_load_inc_32 : Intrinsic<[llvm_i32_ty],
          [llvm_anyptr_ty, llvm_i32_ty],
                                      [IntrArgMemOnly, IntrNoCallback, NoCapture<ArgIndex<0>>]>;
  def int_nvvm_atomic_load_dec_32 : Intrinsic<[llvm_i32_ty],
          [llvm_anyptr_ty, llvm_i32_ty],
                                      [IntrArgMemOnly, IntrNoCallback, NoCapture<ArgIndex<0>>]>;

  class SCOPED_ATOMIC2_impl<LLVMType elty>
        : Intrinsic<[elty],
          [llvm_anyptr_ty, LLVMMatchType<0>],
          [IntrArgMemOnly, IntrNoCallback, NoCapture<ArgIndex<0>>]>;
  class SCOPED_ATOMIC3_impl<LLVMType elty>
        : Intrinsic<[elty],
          [llvm_anyptr_ty, LLVMMatchType<0>,
           LLVMMatchType<0>],
          [IntrArgMemOnly, IntrNoCallback, NoCapture<ArgIndex<0>>]>;

  multiclass PTXAtomicWithScope2<LLVMType elty> {
    def _cta : SCOPED_ATOMIC2_impl<elty>;
    def _sys : SCOPED_ATOMIC2_impl<elty>;
  }
  multiclass PTXAtomicWithScope3<LLVMType elty> {
    def _cta : SCOPED_ATOMIC3_impl<elty>;
    def _sys : SCOPED_ATOMIC3_impl<elty>;
  }
  multiclass PTXAtomicWithScope2_fi {
    defm _f: PTXAtomicWithScope2<llvm_anyfloat_ty>;
    defm _i: PTXAtomicWithScope2<llvm_anyint_ty>;
  }
  defm int_nvvm_atomic_add_gen   : PTXAtomicWithScope2_fi;
  defm int_nvvm_atomic_inc_gen_i : PTXAtomicWithScope2<llvm_anyint_ty>;
  defm int_nvvm_atomic_dec_gen_i : PTXAtomicWithScope2<llvm_anyint_ty>;
  defm int_nvvm_atomic_exch_gen_i: PTXAtomicWithScope2<llvm_anyint_ty>;
  defm int_nvvm_atomic_xor_gen_i : PTXAtomicWithScope2<llvm_anyint_ty>;
  defm int_nvvm_atomic_max_gen_i : PTXAtomicWithScope2<llvm_anyint_ty>;
  defm int_nvvm_atomic_min_gen_i : PTXAtomicWithScope2<llvm_anyint_ty>;
  defm int_nvvm_atomic_or_gen_i  : PTXAtomicWithScope2<llvm_anyint_ty>;
  defm int_nvvm_atomic_and_gen_i : PTXAtomicWithScope2<llvm_anyint_ty>;
  defm int_nvvm_atomic_cas_gen_i : PTXAtomicWithScope3<llvm_anyint_ty>;

// Bar.Sync

  // The builtin for "bar.sync 0" is called __syncthreads.  Unlike most of the
  // intrinsics in this file, this one is a user-facing API.
  def int_nvvm_barrier0 : ClangBuiltin<"__syncthreads">,
      Intrinsic<[], [], [IntrConvergent, IntrNoCallback]>;
  // Synchronize all threads in the CTA at barrier 'n'.
  def int_nvvm_barrier_n : ClangBuiltin<"__nvvm_bar_n">,
      Intrinsic<[], [llvm_i32_ty], [IntrConvergent, IntrNoCallback]>;
  // Synchronize 'm', a multiple of warp size, (arg 2) threads in
  // the CTA at barrier 'n' (arg 1).
  def int_nvvm_barrier : ClangBuiltin<"__nvvm_bar">,
      Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [IntrConvergent, IntrNoCallback]>;
  def int_nvvm_barrier0_popc : ClangBuiltin<"__nvvm_bar0_popc">,
      Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrConvergent, IntrNoCallback]>;
  def int_nvvm_barrier0_and : ClangBuiltin<"__nvvm_bar0_and">,
      Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrConvergent, IntrNoCallback]>;
  def int_nvvm_barrier0_or : ClangBuiltin<"__nvvm_bar0_or">,
      Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrConvergent, IntrNoCallback]>;

  def int_nvvm_bar_sync :
      Intrinsic<[], [llvm_i32_ty], [IntrConvergent, IntrNoCallback]>,
      ClangBuiltin<"__nvvm_bar_sync">;
  def int_nvvm_bar_warp_sync :
      Intrinsic<[], [llvm_i32_ty], [IntrConvergent, IntrNoCallback]>,
      ClangBuiltin<"__nvvm_bar_warp_sync">;

  // barrier.sync id[, cnt]
  def int_nvvm_barrier_sync :
      Intrinsic<[], [llvm_i32_ty], [IntrConvergent, IntrNoCallback]>,
      ClangBuiltin<"__nvvm_barrier_sync">;
  def int_nvvm_barrier_sync_cnt :
      Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], [IntrConvergent, IntrNoCallback]>,
      ClangBuiltin<"__nvvm_barrier_sync_cnt">;

  // barrier.cluster.[wait, arrive, arrive.relaxed]
  def int_nvvm_barrier_cluster_arrive :
      Intrinsic<[], [], [IntrConvergent, IntrNoCallback]>;
  def int_nvvm_barrier_cluster_arrive_relaxed :
      Intrinsic<[], [], [IntrConvergent, IntrNoCallback]>;
  def int_nvvm_barrier_cluster_wait :
      Intrinsic<[], [], [IntrConvergent, IntrNoCallback]>;

  // Membar
  def int_nvvm_membar_cta : ClangBuiltin<"__nvvm_membar_cta">,
      Intrinsic<[], [], [IntrNoCallback]>;
  def int_nvvm_membar_gl : ClangBuiltin<"__nvvm_membar_gl">,
      Intrinsic<[], [], [IntrNoCallback]>;
  def int_nvvm_membar_sys : ClangBuiltin<"__nvvm_membar_sys">,
      Intrinsic<[], [], [IntrNoCallback]>;
  def int_nvvm_fence_sc_cluster:
      Intrinsic<[], [], [IntrNoCallback]>;

// Async Copy
def int_nvvm_cp_async_mbarrier_arrive :
    ClangBuiltin<"__nvvm_cp_async_mbarrier_arrive">,
    Intrinsic<[],[llvm_ptr_ty],[IntrConvergent, IntrNoCallback]>;
def int_nvvm_cp_async_mbarrier_arrive_shared :
    ClangBuiltin<"__nvvm_cp_async_mbarrier_arrive_shared">,
    Intrinsic<[],[llvm_shared_ptr_ty],[IntrConvergent, IntrNoCallback]>;
def int_nvvm_cp_async_mbarrier_arrive_noinc :
    ClangBuiltin<"__nvvm_cp_async_mbarrier_arrive_noinc">,
    Intrinsic<[],[llvm_ptr_ty],[IntrConvergent, IntrNoCallback]>;
def int_nvvm_cp_async_mbarrier_arrive_noinc_shared :
    ClangBuiltin<"__nvvm_cp_async_mbarrier_arrive_noinc_shared">,
    Intrinsic<[],[llvm_shared_ptr_ty],[IntrConvergent, IntrNoCallback]>;

multiclass CP_ASYNC_SHARED_GLOBAL<string n, string cc> {
  def NAME: Intrinsic<[],[llvm_shared_ptr_ty, llvm_global_ptr_ty],
        [IntrArgMemOnly, IntrNoCallback, NoAlias<ArgIndex<0>>, NoAlias<ArgIndex<1>>,
        WriteOnly<ArgIndex<0>>, ReadOnly<ArgIndex<1>>],
        "llvm.nvvm.cp.async." # cc # ".shared.global." # n>;
  def _s: Intrinsic<[],[llvm_shared_ptr_ty, llvm_global_ptr_ty, llvm_i32_ty],
        [IntrArgMemOnly, IntrNoCallback, NoAlias<ArgIndex<0>>, NoAlias<ArgIndex<1>>,
        WriteOnly<ArgIndex<0>>, ReadOnly<ArgIndex<1>>],
        "llvm.nvvm.cp.async." # cc # ".shared.global." # n # ".s">;
}

defm int_nvvm_cp_async_ca_shared_global_4 : CP_ASYNC_SHARED_GLOBAL<"4", "ca">;
defm int_nvvm_cp_async_ca_shared_global_8 : CP_ASYNC_SHARED_GLOBAL<"8", "ca">;
defm int_nvvm_cp_async_ca_shared_global_16 : CP_ASYNC_SHARED_GLOBAL<"16", "ca">;
defm int_nvvm_cp_async_cg_shared_global_16 : CP_ASYNC_SHARED_GLOBAL<"16", "cg">;

def int_nvvm_cp_async_commit_group :
    ClangBuiltin<"__nvvm_cp_async_commit_group">,
    Intrinsic<[],[],[]>;

def int_nvvm_cp_async_wait_group :
    ClangBuiltin<"__nvvm_cp_async_wait_group">,
    Intrinsic<[],[llvm_i32_ty],[ImmArg<ArgIndex<0>>]>;

def int_nvvm_cp_async_wait_all :
    ClangBuiltin<"__nvvm_cp_async_wait_all">,
    Intrinsic<[],[],[]>;

// mbarrier
def int_nvvm_mbarrier_init : ClangBuiltin<"__nvvm_mbarrier_init">,
    Intrinsic<[],[llvm_ptr_ty, llvm_i32_ty],[IntrConvergent, IntrNoCallback]>;
def int_nvvm_mbarrier_init_shared :
    ClangBuiltin<"__nvvm_mbarrier_init_shared">,
    Intrinsic<[],[llvm_shared_ptr_ty, llvm_i32_ty],[IntrConvergent, IntrNoCallback]>;

def int_nvvm_mbarrier_inval : ClangBuiltin<"__nvvm_mbarrier_inval">,
    Intrinsic<[],[llvm_ptr_ty],
    [IntrConvergent, IntrWriteMem, IntrArgMemOnly, IntrNoCallback,
    WriteOnly<ArgIndex<0>>, NoCapture<ArgIndex<0>>]>;
def int_nvvm_mbarrier_inval_shared :
    ClangBuiltin<"__nvvm_mbarrier_inval_shared">,
    Intrinsic<[],[llvm_shared_ptr_ty],
    [IntrConvergent, IntrWriteMem, IntrArgMemOnly, IntrNoCallback,
    WriteOnly<ArgIndex<0>>, NoCapture<ArgIndex<0>>]>;

def int_nvvm_mbarrier_arrive : ClangBuiltin<"__nvvm_mbarrier_arrive">,
    Intrinsic<[llvm_i64_ty],[llvm_ptr_ty],[IntrConvergent, IntrNoCallback]>;
def int_nvvm_mbarrier_arrive_shared :
    ClangBuiltin<"__nvvm_mbarrier_arrive_shared">,
    Intrinsic<[llvm_i64_ty],[llvm_shared_ptr_ty],[IntrConvergent, IntrNoCallback]>;
def int_nvvm_mbarrier_arrive_noComplete :
    ClangBuiltin<"__nvvm_mbarrier_arrive_noComplete">,
    Intrinsic<[llvm_i64_ty],[llvm_ptr_ty, llvm_i32_ty],[IntrConvergent, IntrNoCallback]>;
def int_nvvm_mbarrier_arrive_noComplete_shared :
    ClangBuiltin<"__nvvm_mbarrier_arrive_noComplete_shared">,
    Intrinsic<[llvm_i64_ty],[llvm_shared_ptr_ty,
    llvm_i32_ty],[IntrConvergent, IntrNoCallback]>;

def int_nvvm_mbarrier_arrive_drop :
    ClangBuiltin<"__nvvm_mbarrier_arrive_drop">,
    Intrinsic<[llvm_i64_ty],[llvm_ptr_ty],[IntrConvergent, IntrNoCallback]>;
def int_nvvm_mbarrier_arrive_drop_shared :
    ClangBuiltin<"__nvvm_mbarrier_arrive_drop_shared">,
    Intrinsic<[llvm_i64_ty],[llvm_shared_ptr_ty],[IntrConvergent, IntrNoCallback]>;
def int_nvvm_mbarrier_arrive_drop_noComplete :
    ClangBuiltin<"__nvvm_mbarrier_arrive_drop_noComplete">,
    Intrinsic<[llvm_i64_ty],[llvm_ptr_ty, llvm_i32_ty],[IntrConvergent, IntrNoCallback]>;
def int_nvvm_mbarrier_arrive_drop_noComplete_shared :
    ClangBuiltin<"__nvvm_mbarrier_arrive_drop_noComplete_shared">,
    Intrinsic<[llvm_i64_ty],[llvm_shared_ptr_ty,
    llvm_i32_ty],[IntrConvergent, IntrNoCallback]>;

def int_nvvm_mbarrier_test_wait :
    ClangBuiltin<"__nvvm_mbarrier_test_wait">,
    Intrinsic<[llvm_i1_ty],[llvm_ptr_ty, llvm_i64_ty],[IntrConvergent, IntrNoCallback]>;
def int_nvvm_mbarrier_test_wait_shared :
    ClangBuiltin<"__nvvm_mbarrier_test_wait_shared">,
    Intrinsic<[llvm_i1_ty],[llvm_shared_ptr_ty, llvm_i64_ty],[IntrConvergent, IntrNoCallback]>;

def int_nvvm_mbarrier_pending_count :
    ClangBuiltin<"__nvvm_mbarrier_pending_count">,
    Intrinsic<[llvm_i32_ty],[llvm_i64_ty],[IntrNoMem, IntrConvergent, IntrNoCallback]>;

// Generated within nvvm. Use for ldu on sm_20 or later.  Second arg is the
// pointer's alignment.
def int_nvvm_ldu_global_i : Intrinsic<[llvm_anyint_ty],
  [llvm_anyptr_ty, llvm_i32_ty],
  [IntrReadMem, IntrArgMemOnly, IntrNoCallback, NoCapture<ArgIndex<0>>],
  "llvm.nvvm.ldu.global.i">;
def int_nvvm_ldu_global_f : Intrinsic<[llvm_anyfloat_ty],
  [llvm_anyptr_ty, llvm_i32_ty],
  [IntrReadMem, IntrArgMemOnly, IntrNoCallback, NoCapture<ArgIndex<0>>],
  "llvm.nvvm.ldu.global.f">;
def int_nvvm_ldu_global_p : Intrinsic<[llvm_anyptr_ty],
  [llvm_anyptr_ty, llvm_i32_ty],
  [IntrReadMem, IntrArgMemOnly, IntrNoCallback, NoCapture<ArgIndex<0>>],
  "llvm.nvvm.ldu.global.p">;

// Generated within nvvm. Use for ldg on sm_35 or later.  Second arg is the
// pointer's alignment.
def int_nvvm_ldg_global_i : Intrinsic<[llvm_anyint_ty],
  [llvm_anyptr_ty, llvm_i32_ty],
  [IntrReadMem, IntrArgMemOnly, IntrNoCallback, NoCapture<ArgIndex<0>>],
  "llvm.nvvm.ldg.global.i">;
def int_nvvm_ldg_global_f : Intrinsic<[llvm_anyfloat_ty],
  [llvm_anyptr_ty, llvm_i32_ty],
  [IntrReadMem, IntrArgMemOnly, IntrNoCallback, NoCapture<ArgIndex<0>>],
  "llvm.nvvm.ldg.global.f">;
def int_nvvm_ldg_global_p : Intrinsic<[llvm_anyptr_ty],
  [llvm_anyptr_ty, llvm_i32_ty],
  [IntrReadMem, IntrArgMemOnly, IntrNoCallback, NoCapture<ArgIndex<0>>],
  "llvm.nvvm.ldg.global.p">;

// Use for generic pointers
// - These intrinsics are used to convert address spaces.
// - The input pointer and output pointer must have the same type, except for
//   the address-space. (This restriction is not enforced here as there is
//   currently no way to describe it).
// - This complements the llvm bitcast, which can be used to cast one type
//   of pointer to another type of pointer, while the address space remains
//   the same.
def int_nvvm_ptr_local_to_gen: DefaultAttrsIntrinsic<[llvm_anyptr_ty],
                 [llvm_anyptr_ty], [IntrNoMem, IntrSpeculatable],
                 "llvm.nvvm.ptr.local.to.gen">;
def int_nvvm_ptr_shared_to_gen: DefaultAttrsIntrinsic<[llvm_anyptr_ty],
                 [llvm_anyptr_ty], [IntrNoMem, IntrSpeculatable],
                 "llvm.nvvm.ptr.shared.to.gen">;
def int_nvvm_ptr_global_to_gen: DefaultAttrsIntrinsic<[llvm_anyptr_ty],
                 [llvm_anyptr_ty], [IntrNoMem, IntrSpeculatable],
                 "llvm.nvvm.ptr.global.to.gen">;
def int_nvvm_ptr_constant_to_gen: DefaultAttrsIntrinsic<[llvm_anyptr_ty],
                 [llvm_anyptr_ty], [IntrNoMem, IntrSpeculatable],
                 "llvm.nvvm.ptr.constant.to.gen">;

def int_nvvm_ptr_gen_to_global: DefaultAttrsIntrinsic<[llvm_anyptr_ty],
                 [llvm_anyptr_ty], [IntrNoMem, IntrSpeculatable],
                 "llvm.nvvm.ptr.gen.to.global">;
def int_nvvm_ptr_gen_to_shared: DefaultAttrsIntrinsic<[llvm_anyptr_ty],
                 [llvm_anyptr_ty], [IntrNoMem, IntrSpeculatable],
                 "llvm.nvvm.ptr.gen.to.shared">;
def int_nvvm_ptr_gen_to_local: DefaultAttrsIntrinsic<[llvm_anyptr_ty],
                 [llvm_anyptr_ty], [IntrNoMem, IntrSpeculatable],
                 "llvm.nvvm.ptr.gen.to.local">;
def int_nvvm_ptr_gen_to_constant: DefaultAttrsIntrinsic<[llvm_anyptr_ty],
                 [llvm_anyptr_ty], [IntrNoMem, IntrSpeculatable],
                 "llvm.nvvm.ptr.gen.to.constant">;

// Used in nvvm internally to help address space opt and ptx code generation
// This is for params that are passed to kernel functions by pointer by-val.
def int_nvvm_ptr_gen_to_param: Intrinsic<[llvm_anyptr_ty],
                                     [llvm_anyptr_ty],
                                   [IntrNoMem, IntrSpeculatable, IntrNoCallback],
                                   "llvm.nvvm.ptr.gen.to.param">;

// Move intrinsics, used in nvvm internally

def int_nvvm_move_i16 : Intrinsic<[llvm_i16_ty], [llvm_i16_ty], [IntrNoMem],
  "llvm.nvvm.move.i16">;
def int_nvvm_move_i32 : Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem],
  "llvm.nvvm.move.i32">;
def int_nvvm_move_i64 : Intrinsic<[llvm_i64_ty], [llvm_i64_ty], [IntrNoMem],
  "llvm.nvvm.move.i64">;
def int_nvvm_move_float : Intrinsic<[llvm_float_ty], [llvm_float_ty],
  [IntrNoMem], "llvm.nvvm.move.float">;
def int_nvvm_move_double : Intrinsic<[llvm_double_ty], [llvm_double_ty],
  [IntrNoMem], "llvm.nvvm.move.double">;
def int_nvvm_move_ptr : Intrinsic<[llvm_anyptr_ty], [llvm_anyptr_ty],
  [IntrNoMem, NoCapture<ArgIndex<0>>], "llvm.nvvm.move.ptr">;


// For getting the handle from a texture or surface variable
def int_nvvm_texsurf_handle
  : Intrinsic<[llvm_i64_ty], [llvm_metadata_ty, llvm_anyptr_ty],
              [IntrNoMem], "llvm.nvvm.texsurf.handle">;
def int_nvvm_texsurf_handle_internal
  : Intrinsic<[llvm_i64_ty], [llvm_anyptr_ty],
              [IntrNoMem], "llvm.nvvm.texsurf.handle.internal">;

/// Error / Warn
def int_nvvm_compiler_error :
    Intrinsic<[], [llvm_anyptr_ty], [], "llvm.nvvm.compiler.error">;
def int_nvvm_compiler_warn :
    Intrinsic<[], [llvm_anyptr_ty], [], "llvm.nvvm.compiler.warn">;

def int_nvvm_reflect :
  Intrinsic<[llvm_i32_ty], [llvm_anyptr_ty], [IntrNoMem], "llvm.nvvm.reflect">;

// isspacep.{const, global, local, shared}
def int_nvvm_isspacep_const
  : DefaultAttrsIntrinsic<[llvm_i1_ty], [llvm_ptr_ty],
              [IntrNoMem, IntrSpeculatable, NoCapture<ArgIndex<0>>],
              "llvm.nvvm.isspacep.const">,
    ClangBuiltin<"__nvvm_isspacep_const">;
def int_nvvm_isspacep_global
  : DefaultAttrsIntrinsic<[llvm_i1_ty], [llvm_ptr_ty],
              [IntrNoMem, IntrSpeculatable, NoCapture<ArgIndex<0>>],
              "llvm.nvvm.isspacep.global">,
    ClangBuiltin<"__nvvm_isspacep_global">;
def int_nvvm_isspacep_local
  : DefaultAttrsIntrinsic<[llvm_i1_ty], [llvm_ptr_ty],
              [IntrNoMem, IntrSpeculatable, NoCapture<ArgIndex<0>>],
              "llvm.nvvm.isspacep.local">,
    ClangBuiltin<"__nvvm_isspacep_local">;
def int_nvvm_isspacep_shared
  : DefaultAttrsIntrinsic<[llvm_i1_ty], [llvm_ptr_ty],
              [IntrNoMem, IntrSpeculatable, NoCapture<ArgIndex<0>>],
              "llvm.nvvm.isspacep.shared">,
    ClangBuiltin<"__nvvm_isspacep_shared">;
def int_nvvm_isspacep_shared_cluster
  : DefaultAttrsIntrinsic<[llvm_i1_ty], [llvm_ptr_ty],
              [IntrNoMem, IntrSpeculatable, NoCapture<ArgIndex<0>>],
              "llvm.nvvm.isspacep.shared.cluster">;

// Environment register read
def int_nvvm_read_ptx_sreg_envreg0
  : DefaultAttrsIntrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable, NoUndef<RetIndex>],
              "llvm.nvvm.read.ptx.sreg.envreg0">,
    ClangBuiltin<"__nvvm_read_ptx_sreg_envreg0">;
def int_nvvm_read_ptx_sreg_envreg1
  : DefaultAttrsIntrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable, NoUndef<RetIndex>],
              "llvm.nvvm.read.ptx.sreg.envreg1">,
    ClangBuiltin<"__nvvm_read_ptx_sreg_envreg1">;
def int_nvvm_read_ptx_sreg_envreg2
  : DefaultAttrsIntrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable, NoUndef<RetIndex>],
              "llvm.nvvm.read.ptx.sreg.envreg2">,
    ClangBuiltin<"__nvvm_read_ptx_sreg_envreg2">;
def int_nvvm_read_ptx_sreg_envreg3
  : DefaultAttrsIntrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable, NoUndef<RetIndex>],
              "llvm.nvvm.read.ptx.sreg.envreg3">,
    ClangBuiltin<"__nvvm_read_ptx_sreg_envreg3">;
def int_nvvm_read_ptx_sreg_envreg4
  : DefaultAttrsIntrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable, NoUndef<RetIndex>],
              "llvm.nvvm.read.ptx.sreg.envreg4">,
    ClangBuiltin<"__nvvm_read_ptx_sreg_envreg4">;
def int_nvvm_read_ptx_sreg_envreg5
  : DefaultAttrsIntrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable, NoUndef<RetIndex>],
              "llvm.nvvm.read.ptx.sreg.envreg5">,
    ClangBuiltin<"__nvvm_read_ptx_sreg_envreg5">;
def int_nvvm_read_ptx_sreg_envreg6
  : DefaultAttrsIntrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable, NoUndef<RetIndex>],
              "llvm.nvvm.read.ptx.sreg.envreg6">,
    ClangBuiltin<"__nvvm_read_ptx_sreg_envreg6">;
def int_nvvm_read_ptx_sreg_envreg7
  : DefaultAttrsIntrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable, NoUndef<RetIndex>],
              "llvm.nvvm.read.ptx.sreg.envreg7">,
    ClangBuiltin<"__nvvm_read_ptx_sreg_envreg7">;
def int_nvvm_read_ptx_sreg_envreg8
  : DefaultAttrsIntrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable, NoUndef<RetIndex>],
              "llvm.nvvm.read.ptx.sreg.envreg8">,
    ClangBuiltin<"__nvvm_read_ptx_sreg_envreg8">;
def int_nvvm_read_ptx_sreg_envreg9
  : DefaultAttrsIntrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable, NoUndef<RetIndex>],
              "llvm.nvvm.read.ptx.sreg.envreg9">,
    ClangBuiltin<"__nvvm_read_ptx_sreg_envreg9">;
def int_nvvm_read_ptx_sreg_envreg10
  : DefaultAttrsIntrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable, NoUndef<RetIndex>],
              "llvm.nvvm.read.ptx.sreg.envreg10">,
    ClangBuiltin<"__nvvm_read_ptx_sreg_envreg10">;
def int_nvvm_read_ptx_sreg_envreg11
  : DefaultAttrsIntrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable, NoUndef<RetIndex>],
              "llvm.nvvm.read.ptx.sreg.envreg11">,
    ClangBuiltin<"__nvvm_read_ptx_sreg_envreg11">;
def int_nvvm_read_ptx_sreg_envreg12
  : DefaultAttrsIntrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable, NoUndef<RetIndex>],
              "llvm.nvvm.read.ptx.sreg.envreg12">,
    ClangBuiltin<"__nvvm_read_ptx_sreg_envreg12">;
def int_nvvm_read_ptx_sreg_envreg13
  : DefaultAttrsIntrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable, NoUndef<RetIndex>],
              "llvm.nvvm.read.ptx.sreg.envreg13">,
    ClangBuiltin<"__nvvm_read_ptx_sreg_envreg13">;
def int_nvvm_read_ptx_sreg_envreg14
  : DefaultAttrsIntrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable, NoUndef<RetIndex>],
              "llvm.nvvm.read.ptx.sreg.envreg14">,
    ClangBuiltin<"__nvvm_read_ptx_sreg_envreg14">;
def int_nvvm_read_ptx_sreg_envreg15
  : DefaultAttrsIntrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable, NoUndef<RetIndex>],
              "llvm.nvvm.read.ptx.sreg.envreg15">,
    ClangBuiltin<"__nvvm_read_ptx_sreg_envreg15">;
def int_nvvm_read_ptx_sreg_envreg16
  : DefaultAttrsIntrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable, NoUndef<RetIndex>],
              "llvm.nvvm.read.ptx.sreg.envreg16">,
    ClangBuiltin<"__nvvm_read_ptx_sreg_envreg16">;
def int_nvvm_read_ptx_sreg_envreg17
  : DefaultAttrsIntrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable, NoUndef<RetIndex>],
              "llvm.nvvm.read.ptx.sreg.envreg17">,
    ClangBuiltin<"__nvvm_read_ptx_sreg_envreg17">;
def int_nvvm_read_ptx_sreg_envreg18
  : DefaultAttrsIntrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable, NoUndef<RetIndex>],
              "llvm.nvvm.read.ptx.sreg.envreg18">,
    ClangBuiltin<"__nvvm_read_ptx_sreg_envreg18">;
def int_nvvm_read_ptx_sreg_envreg19
  : DefaultAttrsIntrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable, NoUndef<RetIndex>],
              "llvm.nvvm.read.ptx.sreg.envreg19">,
    ClangBuiltin<"__nvvm_read_ptx_sreg_envreg19">;
def int_nvvm_read_ptx_sreg_envreg20
  : DefaultAttrsIntrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable, NoUndef<RetIndex>],
              "llvm.nvvm.read.ptx.sreg.envreg20">,
    ClangBuiltin<"__nvvm_read_ptx_sreg_envreg20">;
def int_nvvm_read_ptx_sreg_envreg21
  : DefaultAttrsIntrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable, NoUndef<RetIndex>],
              "llvm.nvvm.read.ptx.sreg.envreg21">,
    ClangBuiltin<"__nvvm_read_ptx_sreg_envreg21">;
def int_nvvm_read_ptx_sreg_envreg22
  : DefaultAttrsIntrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable, NoUndef<RetIndex>],
              "llvm.nvvm.read.ptx.sreg.envreg22">,
    ClangBuiltin<"__nvvm_read_ptx_sreg_envreg22">;
def int_nvvm_read_ptx_sreg_envreg23
  : DefaultAttrsIntrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable, NoUndef<RetIndex>],
              "llvm.nvvm.read.ptx.sreg.envreg23">,
    ClangBuiltin<"__nvvm_read_ptx_sreg_envreg23">;
def int_nvvm_read_ptx_sreg_envreg24
  : DefaultAttrsIntrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable, NoUndef<RetIndex>],
              "llvm.nvvm.read.ptx.sreg.envreg24">,
    ClangBuiltin<"__nvvm_read_ptx_sreg_envreg24">;
def int_nvvm_read_ptx_sreg_envreg25
  : DefaultAttrsIntrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable, NoUndef<RetIndex>],
              "llvm.nvvm.read.ptx.sreg.envreg25">,
    ClangBuiltin<"__nvvm_read_ptx_sreg_envreg25">;
def int_nvvm_read_ptx_sreg_envreg26
  : DefaultAttrsIntrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable, NoUndef<RetIndex>],
              "llvm.nvvm.read.ptx.sreg.envreg26">,
    ClangBuiltin<"__nvvm_read_ptx_sreg_envreg26">;
def int_nvvm_read_ptx_sreg_envreg27
  : DefaultAttrsIntrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable, NoUndef<RetIndex>],
              "llvm.nvvm.read.ptx.sreg.envreg27">,
    ClangBuiltin<"__nvvm_read_ptx_sreg_envreg27">;
def int_nvvm_read_ptx_sreg_envreg28
  : DefaultAttrsIntrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable, NoUndef<RetIndex>],
              "llvm.nvvm.read.ptx.sreg.envreg28">,
    ClangBuiltin<"__nvvm_read_ptx_sreg_envreg28">;
def int_nvvm_read_ptx_sreg_envreg29
  : DefaultAttrsIntrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable, NoUndef<RetIndex>],
              "llvm.nvvm.read.ptx.sreg.envreg29">,
    ClangBuiltin<"__nvvm_read_ptx_sreg_envreg29">;
def int_nvvm_read_ptx_sreg_envreg30
  : DefaultAttrsIntrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable, NoUndef<RetIndex>],
              "llvm.nvvm.read.ptx.sreg.envreg30">,
    ClangBuiltin<"__nvvm_read_ptx_sreg_envreg30">;
def int_nvvm_read_ptx_sreg_envreg31
  : DefaultAttrsIntrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable, NoUndef<RetIndex>],
              "llvm.nvvm.read.ptx.sreg.envreg31">,
    ClangBuiltin<"__nvvm_read_ptx_sreg_envreg31">;


// Texture Fetch
// texmode_independent
def int_nvvm_tex_1d_v4f32_s32
  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty], [],
              "llvm.nvvm.tex.1d.v4f32.s32">;
def int_nvvm_tex_1d_v4f32_f32
  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.1d.v4f32.f32">;
def int_nvvm_tex_1d_level_v4f32_f32
  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.1d.level.v4f32.f32">;
def int_nvvm_tex_1d_grad_v4f32_f32
  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
               llvm_float_ty], [],
              "llvm.nvvm.tex.1d.grad.v4f32.f32">;
def int_nvvm_tex_1d_v4s32_s32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty], [],
              "llvm.nvvm.tex.1d.v4s32.s32">;
def int_nvvm_tex_1d_v4s32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.1d.v4s32.f32">;
def int_nvvm_tex_1d_level_v4s32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.1d.level.v4s32.f32">;
def int_nvvm_tex_1d_grad_v4s32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
               llvm_float_ty], [],
              "llvm.nvvm.tex.1d.grad.v4s32.f32">;
def int_nvvm_tex_1d_v4u32_s32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty], [],
              "llvm.nvvm.tex.1d.v4u32.s32">;
def int_nvvm_tex_1d_v4u32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.1d.v4u32.f32">;
def int_nvvm_tex_1d_level_v4u32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.1d.level.v4u32.f32">;
def int_nvvm_tex_1d_grad_v4u32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
               llvm_float_ty], [],
              "llvm.nvvm.tex.1d.grad.v4u32.f32">;

def int_nvvm_tex_1d_array_v4f32_s32
  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.tex.1d.array.v4f32.s32">;
def int_nvvm_tex_1d_array_v4f32_f32
  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.1d.array.v4f32.f32">;
def int_nvvm_tex_1d_array_level_v4f32_f32
  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
               llvm_float_ty], [],
              "llvm.nvvm.tex.1d.array.level.v4f32.f32">;
def int_nvvm_tex_1d_array_grad_v4f32_f32
  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
               llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.1d.array.grad.v4f32.f32">;
def int_nvvm_tex_1d_array_v4s32_s32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.tex.1d.array.v4s32.s32">;
def int_nvvm_tex_1d_array_v4s32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.1d.array.v4s32.f32">;
def int_nvvm_tex_1d_array_level_v4s32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
               llvm_float_ty], [],
              "llvm.nvvm.tex.1d.array.level.v4s32.f32">;
def int_nvvm_tex_1d_array_grad_v4s32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
               llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.1d.array.grad.v4s32.f32">;
def int_nvvm_tex_1d_array_v4u32_s32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.tex.1d.array.v4u32.s32">;
def int_nvvm_tex_1d_array_v4u32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.1d.array.v4u32.f32">;
def int_nvvm_tex_1d_array_level_v4u32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
               llvm_float_ty], [],
              "llvm.nvvm.tex.1d.array.level.v4u32.f32">;
def int_nvvm_tex_1d_array_grad_v4u32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
               llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.1d.array.grad.v4u32.f32">;

def int_nvvm_tex_2d_v4f32_s32
  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.tex.2d.v4f32.s32">;
def int_nvvm_tex_2d_v4f32_f32
  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.2d.v4f32.f32">;
def int_nvvm_tex_2d_level_v4f32_f32
  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
               llvm_float_ty], [],
              "llvm.nvvm.tex.2d.level.v4f32.f32">;
def int_nvvm_tex_2d_grad_v4f32_f32
  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
               llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.2d.grad.v4f32.f32">;
def int_nvvm_tex_2d_v4s32_s32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.tex.2d.v4s32.s32">;
def int_nvvm_tex_2d_v4s32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.2d.v4s32.f32">;
def int_nvvm_tex_2d_level_v4s32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
               llvm_float_ty], [],
              "llvm.nvvm.tex.2d.level.v4s32.f32">;
def int_nvvm_tex_2d_grad_v4s32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
               llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.2d.grad.v4s32.f32">;
def int_nvvm_tex_2d_v4u32_s32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.tex.2d.v4u32.s32">;
def int_nvvm_tex_2d_v4u32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.2d.v4u32.f32">;
def int_nvvm_tex_2d_level_v4u32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
               llvm_float_ty], [],
              "llvm.nvvm.tex.2d.level.v4u32.f32">;
def int_nvvm_tex_2d_grad_v4u32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
               llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.2d.grad.v4u32.f32">;

def int_nvvm_tex_2d_array_v4f32_s32
  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
               llvm_i32_ty], [],
              "llvm.nvvm.tex.2d.array.v4f32.s32">;
def int_nvvm_tex_2d_array_v4f32_f32
  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
               llvm_float_ty], [],
              "llvm.nvvm.tex.2d.array.v4f32.f32">;
def int_nvvm_tex_2d_array_level_v4f32_f32
  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
               llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.2d.array.level.v4f32.f32">;
def int_nvvm_tex_2d_array_grad_v4f32_f32
  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
               llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty,
               llvm_float_ty], [],
              "llvm.nvvm.tex.2d.array.grad.v4f32.f32">;
def int_nvvm_tex_2d_array_v4s32_s32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
               llvm_i32_ty], [],
              "llvm.nvvm.tex.2d.array.v4s32.s32">;
def int_nvvm_tex_2d_array_v4s32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
               llvm_float_ty], [],
              "llvm.nvvm.tex.2d.array.v4s32.f32">;
def int_nvvm_tex_2d_array_level_v4s32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
               llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.2d.array.level.v4s32.f32">;
def int_nvvm_tex_2d_array_grad_v4s32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
               llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty,
               llvm_float_ty], [],
              "llvm.nvvm.tex.2d.array.grad.v4s32.f32">;
def int_nvvm_tex_2d_array_v4u32_s32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
               llvm_i32_ty], [],
              "llvm.nvvm.tex.2d.array.v4u32.s32">;
def int_nvvm_tex_2d_array_v4u32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
               llvm_float_ty], [],
              "llvm.nvvm.tex.2d.array.v4u32.f32">;
def int_nvvm_tex_2d_array_level_v4u32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
               llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.2d.array.level.v4u32.f32">;
def int_nvvm_tex_2d_array_grad_v4u32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
               llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty,
               llvm_float_ty], [],
              "llvm.nvvm.tex.2d.array.grad.v4u32.f32">;

def int_nvvm_tex_3d_v4f32_s32
  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [], "llvm.nvvm.tex.3d.v4f32.s32">;
def int_nvvm_tex_3d_v4f32_f32
  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
               llvm_float_ty], [],
              "llvm.nvvm.tex.3d.v4f32.f32">;
def int_nvvm_tex_3d_level_v4f32_f32
  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
               llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.3d.level.v4f32.f32">;
def int_nvvm_tex_3d_grad_v4f32_f32
  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
               llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty,
               llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.3d.grad.v4f32.f32">;
def int_nvvm_tex_3d_v4s32_s32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [], "llvm.nvvm.tex.3d.v4s32.s32">;
def int_nvvm_tex_3d_v4s32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
               llvm_float_ty], [],
              "llvm.nvvm.tex.3d.v4s32.f32">;
def int_nvvm_tex_3d_level_v4s32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
               llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.3d.level.v4s32.f32">;
def int_nvvm_tex_3d_grad_v4s32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
               llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty,
               llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.3d.grad.v4s32.f32">;
def int_nvvm_tex_3d_v4u32_s32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [], "llvm.nvvm.tex.3d.v4u32.s32">;
def int_nvvm_tex_3d_v4u32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
               llvm_float_ty], [],
              "llvm.nvvm.tex.3d.v4u32.f32">;
def int_nvvm_tex_3d_level_v4u32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
               llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.3d.level.v4u32.f32">;
def int_nvvm_tex_3d_grad_v4u32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
               llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty,
               llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.3d.grad.v4u32.f32">;

def int_nvvm_tex_cube_v4f32_f32
  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
              [llvm_i64_ty, llvm_i64_ty,
               llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.cube.v4f32.f32">;
def int_nvvm_tex_cube_level_v4f32_f32
  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
              [llvm_i64_ty, llvm_i64_ty,
               llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.cube.level.v4f32.f32">;
def int_nvvm_tex_cube_v4s32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i64_ty,
               llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.cube.v4s32.f32">;
def int_nvvm_tex_cube_level_v4s32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i64_ty,
               llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.cube.level.v4s32.f32">;
def int_nvvm_tex_cube_v4u32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i64_ty,
               llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.cube.v4u32.f32">;
def int_nvvm_tex_cube_level_v4u32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i64_ty,
               llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.cube.level.v4u32.f32">;

def int_nvvm_tex_cube_array_v4f32_f32
  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty,
               llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.cube.array.v4f32.f32">;
def int_nvvm_tex_cube_array_level_v4f32_f32
  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty,
               llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.cube.array.level.v4f32.f32">;
def int_nvvm_tex_cube_array_v4s32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty,
               llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.cube.array.v4s32.f32">;
def int_nvvm_tex_cube_array_level_v4s32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty,
               llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.cube.array.level.v4s32.f32">;
def int_nvvm_tex_cube_array_v4u32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty,
               llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.cube.array.v4u32.f32">;
def int_nvvm_tex_cube_array_level_v4u32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty,
               llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.cube.array.level.v4u32.f32">;

def int_nvvm_tld4_r_2d_v4f32_f32
  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tld4.r.2d.v4f32.f32">;
def int_nvvm_tld4_g_2d_v4f32_f32
  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tld4.g.2d.v4f32.f32">;
def int_nvvm_tld4_b_2d_v4f32_f32
  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tld4.b.2d.v4f32.f32">;
def int_nvvm_tld4_a_2d_v4f32_f32
  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tld4.a.2d.v4f32.f32">;
def int_nvvm_tld4_r_2d_v4s32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tld4.r.2d.v4s32.f32">;
def int_nvvm_tld4_g_2d_v4s32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tld4.g.2d.v4s32.f32">;
def int_nvvm_tld4_b_2d_v4s32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tld4.b.2d.v4s32.f32">;
def int_nvvm_tld4_a_2d_v4s32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tld4.a.2d.v4s32.f32">;
def int_nvvm_tld4_r_2d_v4u32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tld4.r.2d.v4u32.f32">;
def int_nvvm_tld4_g_2d_v4u32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tld4.g.2d.v4u32.f32">;
def int_nvvm_tld4_b_2d_v4u32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tld4.b.2d.v4u32.f32">;
def int_nvvm_tld4_a_2d_v4u32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tld4.a.2d.v4u32.f32">;


// texmode_unified
def int_nvvm_tex_unified_1d_v4f32_s32
  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
              [llvm_i64_ty, llvm_i32_ty], [],
              "llvm.nvvm.tex.unified.1d.v4f32.s32">;
def int_nvvm_tex_unified_1d_v4f32_f32
  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
              [llvm_i64_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.unified.1d.v4f32.f32">;
def int_nvvm_tex_unified_1d_level_v4f32_f32
  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
              [llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.unified.1d.level.v4f32.f32">;
def int_nvvm_tex_unified_1d_grad_v4f32_f32
  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
              [llvm_i64_ty, llvm_float_ty, llvm_float_ty,
               llvm_float_ty], [],
              "llvm.nvvm.tex.unified.1d.grad.v4f32.f32">;
def int_nvvm_tex_unified_1d_v4s32_s32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty], [],
              "llvm.nvvm.tex.unified.1d.v4s32.s32">;
def int_nvvm_tex_unified_1d_v4s32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.unified.1d.v4s32.f32">;
def int_nvvm_tex_unified_1d_level_v4s32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.unified.1d.level.v4s32.f32">;
def int_nvvm_tex_unified_1d_grad_v4s32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_float_ty, llvm_float_ty,
               llvm_float_ty], [],
              "llvm.nvvm.tex.unified.1d.grad.v4s32.f32">;
def int_nvvm_tex_unified_1d_v4u32_s32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty], [],
              "llvm.nvvm.tex.unified.1d.v4u32.s32">;
def int_nvvm_tex_unified_1d_v4u32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.unified.1d.v4u32.f32">;
def int_nvvm_tex_unified_1d_level_v4u32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.unified.1d.level.v4u32.f32">;
def int_nvvm_tex_unified_1d_grad_v4u32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_float_ty, llvm_float_ty,
               llvm_float_ty], [],
              "llvm.nvvm.tex.unified.1d.grad.v4u32.f32">;

def int_nvvm_tex_unified_1d_array_v4f32_s32
  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.tex.unified.1d.array.v4f32.s32">;
def int_nvvm_tex_unified_1d_array_v4f32_f32
  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.unified.1d.array.v4f32.f32">;
def int_nvvm_tex_unified_1d_array_level_v4f32_f32
  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
               llvm_float_ty], [],
              "llvm.nvvm.tex.unified.1d.array.level.v4f32.f32">;
def int_nvvm_tex_unified_1d_array_grad_v4f32_f32
  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
               llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.unified.1d.array.grad.v4f32.f32">;
def int_nvvm_tex_unified_1d_array_v4s32_s32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.tex.unified.1d.array.v4s32.s32">;
def int_nvvm_tex_unified_1d_array_v4s32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.unified.1d.array.v4s32.f32">;
def int_nvvm_tex_unified_1d_array_level_v4s32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
               llvm_float_ty], [],
              "llvm.nvvm.tex.unified.1d.array.level.v4s32.f32">;
def int_nvvm_tex_unified_1d_array_grad_v4s32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
               llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.unified.1d.array.grad.v4s32.f32">;
def int_nvvm_tex_unified_1d_array_v4u32_s32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.tex.unified.1d.array.v4u32.s32">;
def int_nvvm_tex_unified_1d_array_v4u32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.unified.1d.array.v4u32.f32">;
def int_nvvm_tex_unified_1d_array_level_v4u32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
               llvm_float_ty], [],
              "llvm.nvvm.tex.unified.1d.array.level.v4u32.f32">;
def int_nvvm_tex_unified_1d_array_grad_v4u32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
               llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.unified.1d.array.grad.v4u32.f32">;

def int_nvvm_tex_unified_2d_v4f32_s32
  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.tex.unified.2d.v4f32.s32">;
def int_nvvm_tex_unified_2d_v4f32_f32
  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
              [llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.unified.2d.v4f32.f32">;
def int_nvvm_tex_unified_2d_level_v4f32_f32
  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
              [llvm_i64_ty, llvm_float_ty, llvm_float_ty,
               llvm_float_ty], [],
              "llvm.nvvm.tex.unified.2d.level.v4f32.f32">;
def int_nvvm_tex_unified_2d_grad_v4f32_f32
  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
              [llvm_i64_ty, llvm_float_ty, llvm_float_ty,
               llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.unified.2d.grad.v4f32.f32">;
def int_nvvm_tex_unified_2d_v4s32_s32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.tex.unified.2d.v4s32.s32">;
def int_nvvm_tex_unified_2d_v4s32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.unified.2d.v4s32.f32">;
def int_nvvm_tex_unified_2d_level_v4s32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_float_ty, llvm_float_ty,
               llvm_float_ty], [],
              "llvm.nvvm.tex.unified.2d.level.v4s32.f32">;
def int_nvvm_tex_unified_2d_grad_v4s32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_float_ty, llvm_float_ty,
               llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.unified.2d.grad.v4s32.f32">;
def int_nvvm_tex_unified_2d_v4u32_s32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.tex.unified.2d.v4u32.s32">;
def int_nvvm_tex_unified_2d_v4u32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.unified.2d.v4u32.f32">;
def int_nvvm_tex_unified_2d_level_v4u32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_float_ty, llvm_float_ty,
               llvm_float_ty], [],
              "llvm.nvvm.tex.unified.2d.level.v4u32.f32">;
def int_nvvm_tex_unified_2d_grad_v4u32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_float_ty, llvm_float_ty,
               llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.unified.2d.grad.v4u32.f32">;

def int_nvvm_tex_unified_2d_array_v4f32_s32
  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
               llvm_i32_ty], [],
              "llvm.nvvm.tex.unified.2d.array.v4f32.s32">;
def int_nvvm_tex_unified_2d_array_v4f32_f32
  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
               llvm_float_ty], [],
              "llvm.nvvm.tex.unified.2d.array.v4f32.f32">;
def int_nvvm_tex_unified_2d_array_level_v4f32_f32
  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
               llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.unified.2d.array.level.v4f32.f32">;
def int_nvvm_tex_unified_2d_array_grad_v4f32_f32
  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
               llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty,
               llvm_float_ty], [],
              "llvm.nvvm.tex.unified.2d.array.grad.v4f32.f32">;
def int_nvvm_tex_unified_2d_array_v4s32_s32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
               llvm_i32_ty], [],
              "llvm.nvvm.tex.unified.2d.array.v4s32.s32">;
def int_nvvm_tex_unified_2d_array_v4s32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
               llvm_float_ty], [],
              "llvm.nvvm.tex.unified.2d.array.v4s32.f32">;
def int_nvvm_tex_unified_2d_array_level_v4s32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
               llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.unified.2d.array.level.v4s32.f32">;
def int_nvvm_tex_unified_2d_array_grad_v4s32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
               llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty,
               llvm_float_ty], [],
              "llvm.nvvm.tex.unified.2d.array.grad.v4s32.f32">;
def int_nvvm_tex_unified_2d_array_v4u32_s32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
               llvm_i32_ty], [],
              "llvm.nvvm.tex.unified.2d.array.v4u32.s32">;
def int_nvvm_tex_unified_2d_array_v4u32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
               llvm_float_ty], [],
              "llvm.nvvm.tex.unified.2d.array.v4u32.f32">;
def int_nvvm_tex_unified_2d_array_level_v4u32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
               llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.unified.2d.array.level.v4u32.f32">;
def int_nvvm_tex_unified_2d_array_grad_v4u32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
               llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty,
               llvm_float_ty], [],
              "llvm.nvvm.tex.unified.2d.array.grad.v4u32.f32">;

def int_nvvm_tex_unified_3d_v4f32_s32
  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [], "llvm.nvvm.tex.unified.3d.v4f32.s32">;
def int_nvvm_tex_unified_3d_v4f32_f32
  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
              [llvm_i64_ty, llvm_float_ty, llvm_float_ty,
               llvm_float_ty], [],
              "llvm.nvvm.tex.unified.3d.v4f32.f32">;
def int_nvvm_tex_unified_3d_level_v4f32_f32
  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
              [llvm_i64_ty, llvm_float_ty, llvm_float_ty,
               llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.unified.3d.level.v4f32.f32">;
def int_nvvm_tex_unified_3d_grad_v4f32_f32
  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
              [llvm_i64_ty, llvm_float_ty, llvm_float_ty,
               llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty,
               llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.unified.3d.grad.v4f32.f32">;
def int_nvvm_tex_unified_3d_v4s32_s32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [], "llvm.nvvm.tex.unified.3d.v4s32.s32">;
def int_nvvm_tex_unified_3d_v4s32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_float_ty, llvm_float_ty,
               llvm_float_ty], [],
              "llvm.nvvm.tex.unified.3d.v4s32.f32">;
def int_nvvm_tex_unified_3d_level_v4s32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_float_ty, llvm_float_ty,
               llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.unified.3d.level.v4s32.f32">;
def int_nvvm_tex_unified_3d_grad_v4s32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_float_ty, llvm_float_ty,
               llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty,
               llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.unified.3d.grad.v4s32.f32">;
def int_nvvm_tex_unified_3d_v4u32_s32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [], "llvm.nvvm.tex.unified.3d.v4u32.s32">;
def int_nvvm_tex_unified_3d_v4u32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_float_ty, llvm_float_ty,
               llvm_float_ty], [],
              "llvm.nvvm.tex.unified.3d.v4u32.f32">;
def int_nvvm_tex_unified_3d_level_v4u32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_float_ty, llvm_float_ty,
               llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.unified.3d.level.v4u32.f32">;
def int_nvvm_tex_unified_3d_grad_v4u32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_float_ty, llvm_float_ty,
               llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty,
               llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.unified.3d.grad.v4u32.f32">;

def int_nvvm_tex_unified_cube_v4f32_f32
  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
              [llvm_i64_ty,
               llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.unified.cube.v4f32.f32">;
def int_nvvm_tex_unified_cube_level_v4f32_f32
  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
              [llvm_i64_ty,
               llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.unified.cube.level.v4f32.f32">;
def int_nvvm_tex_unified_cube_v4s32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty,
               llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.unified.cube.v4s32.f32">;
def int_nvvm_tex_unified_cube_level_v4s32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty,
               llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.unified.cube.level.v4s32.f32">;
def int_nvvm_tex_unified_cube_v4u32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty,
               llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.unified.cube.v4u32.f32">;
def int_nvvm_tex_unified_cube_level_v4u32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty,
               llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.unified.cube.level.v4u32.f32">;

def int_nvvm_tex_unified_cube_array_v4f32_f32
  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
              [llvm_i64_ty, llvm_i32_ty,
               llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.unified.cube.array.v4f32.f32">;
def int_nvvm_tex_unified_cube_array_level_v4f32_f32
  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
              [llvm_i64_ty, llvm_i32_ty,
               llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.unified.cube.array.level.v4f32.f32">;
def int_nvvm_tex_unified_cube_array_v4s32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty,
               llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.unified.cube.array.v4s32.f32">;
def int_nvvm_tex_unified_cube_array_level_v4s32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty,
               llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.unified.cube.array.level.v4s32.f32">;
def int_nvvm_tex_unified_cube_array_v4u32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty,
               llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.unified.cube.array.v4u32.f32">;
def int_nvvm_tex_unified_cube_array_level_v4u32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty,
               llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tex.unified.cube.array.level.v4u32.f32">;

def int_nvvm_tld4_unified_r_2d_v4f32_f32
  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
              [llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tld4.unified.r.2d.v4f32.f32">;
def int_nvvm_tld4_unified_g_2d_v4f32_f32
  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
              [llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tld4.unified.g.2d.v4f32.f32">;
def int_nvvm_tld4_unified_b_2d_v4f32_f32
  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
              [llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tld4.unified.b.2d.v4f32.f32">;
def int_nvvm_tld4_unified_a_2d_v4f32_f32
  : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
              [llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tld4.unified.a.2d.v4f32.f32">;
def int_nvvm_tld4_unified_r_2d_v4s32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tld4.unified.r.2d.v4s32.f32">;
def int_nvvm_tld4_unified_g_2d_v4s32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tld4.unified.g.2d.v4s32.f32">;
def int_nvvm_tld4_unified_b_2d_v4s32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tld4.unified.b.2d.v4s32.f32">;
def int_nvvm_tld4_unified_a_2d_v4s32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tld4.unified.a.2d.v4s32.f32">;
def int_nvvm_tld4_unified_r_2d_v4u32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tld4.unified.r.2d.v4u32.f32">;
def int_nvvm_tld4_unified_g_2d_v4u32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tld4.unified.g.2d.v4u32.f32">;
def int_nvvm_tld4_unified_b_2d_v4u32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tld4.unified.b.2d.v4u32.f32">;
def int_nvvm_tld4_unified_a_2d_v4u32_f32
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
              "llvm.nvvm.tld4.unified.a.2d.v4u32.f32">;


//=== Surface Load
// .clamp variants
def int_nvvm_suld_1d_i8_clamp
  : Intrinsic<[llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.i8.clamp">;
def int_nvvm_suld_1d_i16_clamp
  : Intrinsic<[llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.i16.clamp">;
def int_nvvm_suld_1d_i32_clamp
  : Intrinsic<[llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.i32.clamp">;
def int_nvvm_suld_1d_i64_clamp
  : Intrinsic<[llvm_i64_ty],
              [llvm_i64_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.i64.clamp">;
def int_nvvm_suld_1d_v2i8_clamp
  : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.v2i8.clamp">;
def int_nvvm_suld_1d_v2i16_clamp
  : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.v2i16.clamp">;
def int_nvvm_suld_1d_v2i32_clamp
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.v2i32.clamp">;
def int_nvvm_suld_1d_v2i64_clamp
  : Intrinsic<[llvm_i64_ty, llvm_i64_ty],
              [llvm_i64_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.v2i64.clamp">;
def int_nvvm_suld_1d_v4i8_clamp
  : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.v4i8.clamp">;
def int_nvvm_suld_1d_v4i16_clamp
  : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.v4i16.clamp">;
def int_nvvm_suld_1d_v4i32_clamp
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.v4i32.clamp">;

def int_nvvm_suld_1d_array_i8_clamp
  : Intrinsic<[llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.array.i8.clamp">;
def int_nvvm_suld_1d_array_i16_clamp
  : Intrinsic<[llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.array.i16.clamp">;
def int_nvvm_suld_1d_array_i32_clamp
  : Intrinsic<[llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.array.i32.clamp">;
def int_nvvm_suld_1d_array_i64_clamp
  : Intrinsic<[llvm_i64_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.array.i64.clamp">;
def int_nvvm_suld_1d_array_v2i8_clamp
  : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.array.v2i8.clamp">;
def int_nvvm_suld_1d_array_v2i16_clamp
  : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.array.v2i16.clamp">;
def int_nvvm_suld_1d_array_v2i32_clamp
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.array.v2i32.clamp">;
def int_nvvm_suld_1d_array_v2i64_clamp
  : Intrinsic<[llvm_i64_ty, llvm_i64_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.array.v2i64.clamp">;
def int_nvvm_suld_1d_array_v4i8_clamp
  : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.array.v4i8.clamp">;
def int_nvvm_suld_1d_array_v4i16_clamp
  : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.array.v4i16.clamp">;
def int_nvvm_suld_1d_array_v4i32_clamp
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.array.v4i32.clamp">;

def int_nvvm_suld_2d_i8_clamp
  : Intrinsic<[llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.i8.clamp">;
def int_nvvm_suld_2d_i16_clamp
  : Intrinsic<[llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.i16.clamp">;
def int_nvvm_suld_2d_i32_clamp
  : Intrinsic<[llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.i32.clamp">;
def int_nvvm_suld_2d_i64_clamp
  : Intrinsic<[llvm_i64_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.i64.clamp">;
def int_nvvm_suld_2d_v2i8_clamp
  : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.v2i8.clamp">;
def int_nvvm_suld_2d_v2i16_clamp
  : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.v2i16.clamp">;
def int_nvvm_suld_2d_v2i32_clamp
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.v2i32.clamp">;
def int_nvvm_suld_2d_v2i64_clamp
  : Intrinsic<[llvm_i64_ty, llvm_i64_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.v2i64.clamp">;
def int_nvvm_suld_2d_v4i8_clamp
  : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.v4i8.clamp">;
def int_nvvm_suld_2d_v4i16_clamp
  : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.v4i16.clamp">;
def int_nvvm_suld_2d_v4i32_clamp
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.v4i32.clamp">;

def int_nvvm_suld_2d_array_i8_clamp
  : Intrinsic<[llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.array.i8.clamp">;
def int_nvvm_suld_2d_array_i16_clamp
  : Intrinsic<[llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.array.i16.clamp">;
def int_nvvm_suld_2d_array_i32_clamp
  : Intrinsic<[llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.array.i32.clamp">;
def int_nvvm_suld_2d_array_i64_clamp
  : Intrinsic<[llvm_i64_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.array.i64.clamp">;
def int_nvvm_suld_2d_array_v2i8_clamp
  : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.array.v2i8.clamp">;
def int_nvvm_suld_2d_array_v2i16_clamp
  : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.array.v2i16.clamp">;
def int_nvvm_suld_2d_array_v2i32_clamp
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.array.v2i32.clamp">;
def int_nvvm_suld_2d_array_v2i64_clamp
  : Intrinsic<[llvm_i64_ty, llvm_i64_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.array.v2i64.clamp">;
def int_nvvm_suld_2d_array_v4i8_clamp
  : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.array.v4i8.clamp">;
def int_nvvm_suld_2d_array_v4i16_clamp
  : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.array.v4i16.clamp">;
def int_nvvm_suld_2d_array_v4i32_clamp
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.array.v4i32.clamp">;

def int_nvvm_suld_3d_i8_clamp
  : Intrinsic<[llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.3d.i8.clamp">;
def int_nvvm_suld_3d_i16_clamp
  : Intrinsic<[llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.3d.i16.clamp">;
def int_nvvm_suld_3d_i32_clamp
  : Intrinsic<[llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.3d.i32.clamp">;
def int_nvvm_suld_3d_i64_clamp
  : Intrinsic<[llvm_i64_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.3d.i64.clamp">;
def int_nvvm_suld_3d_v2i8_clamp
  : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.3d.v2i8.clamp">;
def int_nvvm_suld_3d_v2i16_clamp
  : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.3d.v2i16.clamp">;
def int_nvvm_suld_3d_v2i32_clamp
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.3d.v2i32.clamp">;
def int_nvvm_suld_3d_v2i64_clamp
  : Intrinsic<[llvm_i64_ty, llvm_i64_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.3d.v2i64.clamp">;
def int_nvvm_suld_3d_v4i8_clamp
  : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.3d.v4i8.clamp">;
def int_nvvm_suld_3d_v4i16_clamp
  : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.3d.v4i16.clamp">;
def int_nvvm_suld_3d_v4i32_clamp
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.3d.v4i32.clamp">;

// .trap variants
def int_nvvm_suld_1d_i8_trap
  : Intrinsic<[llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.i8.trap">;
def int_nvvm_suld_1d_i16_trap
  : Intrinsic<[llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.i16.trap">;
def int_nvvm_suld_1d_i32_trap
  : Intrinsic<[llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.i32.trap">;
def int_nvvm_suld_1d_i64_trap
  : Intrinsic<[llvm_i64_ty],
              [llvm_i64_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.i64.trap">;
def int_nvvm_suld_1d_v2i8_trap
  : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.v2i8.trap">;
def int_nvvm_suld_1d_v2i16_trap
  : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.v2i16.trap">;
def int_nvvm_suld_1d_v2i32_trap
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.v2i32.trap">;
def int_nvvm_suld_1d_v2i64_trap
  : Intrinsic<[llvm_i64_ty, llvm_i64_ty],
              [llvm_i64_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.v2i64.trap">;
def int_nvvm_suld_1d_v4i8_trap
  : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.v4i8.trap">;
def int_nvvm_suld_1d_v4i16_trap
  : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.v4i16.trap">;
def int_nvvm_suld_1d_v4i32_trap
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.v4i32.trap">;

def int_nvvm_suld_1d_array_i8_trap
  : Intrinsic<[llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.array.i8.trap">;
def int_nvvm_suld_1d_array_i16_trap
  : Intrinsic<[llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.array.i16.trap">;
def int_nvvm_suld_1d_array_i32_trap
  : Intrinsic<[llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.array.i32.trap">;
def int_nvvm_suld_1d_array_i64_trap
  : Intrinsic<[llvm_i64_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.array.i64.trap">;
def int_nvvm_suld_1d_array_v2i8_trap
  : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.array.v2i8.trap">;
def int_nvvm_suld_1d_array_v2i16_trap
  : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.array.v2i16.trap">;
def int_nvvm_suld_1d_array_v2i32_trap
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.array.v2i32.trap">;
def int_nvvm_suld_1d_array_v2i64_trap
  : Intrinsic<[llvm_i64_ty, llvm_i64_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.array.v2i64.trap">;
def int_nvvm_suld_1d_array_v4i8_trap
  : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.array.v4i8.trap">;
def int_nvvm_suld_1d_array_v4i16_trap
  : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.array.v4i16.trap">;
def int_nvvm_suld_1d_array_v4i32_trap
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.array.v4i32.trap">;

def int_nvvm_suld_2d_i8_trap
  : Intrinsic<[llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.i8.trap">;
def int_nvvm_suld_2d_i16_trap
  : Intrinsic<[llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.i16.trap">;
def int_nvvm_suld_2d_i32_trap
  : Intrinsic<[llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.i32.trap">;
def int_nvvm_suld_2d_i64_trap
  : Intrinsic<[llvm_i64_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.i64.trap">;
def int_nvvm_suld_2d_v2i8_trap
  : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.v2i8.trap">;
def int_nvvm_suld_2d_v2i16_trap
  : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.v2i16.trap">;
def int_nvvm_suld_2d_v2i32_trap
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.v2i32.trap">;
def int_nvvm_suld_2d_v2i64_trap
  : Intrinsic<[llvm_i64_ty, llvm_i64_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.v2i64.trap">;
def int_nvvm_suld_2d_v4i8_trap
  : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.v4i8.trap">;
def int_nvvm_suld_2d_v4i16_trap
  : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.v4i16.trap">;
def int_nvvm_suld_2d_v4i32_trap
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.v4i32.trap">;

def int_nvvm_suld_2d_array_i8_trap
  : Intrinsic<[llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.array.i8.trap">;
def int_nvvm_suld_2d_array_i16_trap
  : Intrinsic<[llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.array.i16.trap">;
def int_nvvm_suld_2d_array_i32_trap
  : Intrinsic<[llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.array.i32.trap">;
def int_nvvm_suld_2d_array_i64_trap
  : Intrinsic<[llvm_i64_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.array.i64.trap">;
def int_nvvm_suld_2d_array_v2i8_trap
  : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.array.v2i8.trap">;
def int_nvvm_suld_2d_array_v2i16_trap
  : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.array.v2i16.trap">;
def int_nvvm_suld_2d_array_v2i32_trap
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.array.v2i32.trap">;
def int_nvvm_suld_2d_array_v2i64_trap
  : Intrinsic<[llvm_i64_ty, llvm_i64_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.array.v2i64.trap">;
def int_nvvm_suld_2d_array_v4i8_trap
  : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.array.v4i8.trap">;
def int_nvvm_suld_2d_array_v4i16_trap
  : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.array.v4i16.trap">;
def int_nvvm_suld_2d_array_v4i32_trap
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.array.v4i32.trap">;

def int_nvvm_suld_3d_i8_trap
  : Intrinsic<[llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.3d.i8.trap">;
def int_nvvm_suld_3d_i16_trap
  : Intrinsic<[llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.3d.i16.trap">;
def int_nvvm_suld_3d_i32_trap
  : Intrinsic<[llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.3d.i32.trap">;
def int_nvvm_suld_3d_i64_trap
  : Intrinsic<[llvm_i64_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.3d.i64.trap">;
def int_nvvm_suld_3d_v2i8_trap
  : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.3d.v2i8.trap">;
def int_nvvm_suld_3d_v2i16_trap
  : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.3d.v2i16.trap">;
def int_nvvm_suld_3d_v2i32_trap
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.3d.v2i32.trap">;
def int_nvvm_suld_3d_v2i64_trap
  : Intrinsic<[llvm_i64_ty, llvm_i64_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.3d.v2i64.trap">;
def int_nvvm_suld_3d_v4i8_trap
  : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.3d.v4i8.trap">;
def int_nvvm_suld_3d_v4i16_trap
  : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.3d.v4i16.trap">;
def int_nvvm_suld_3d_v4i32_trap
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.3d.v4i32.trap">;

// .zero variants
def int_nvvm_suld_1d_i8_zero
  : Intrinsic<[llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.i8.zero">;
def int_nvvm_suld_1d_i16_zero
  : Intrinsic<[llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.i16.zero">;
def int_nvvm_suld_1d_i32_zero
  : Intrinsic<[llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.i32.zero">;
def int_nvvm_suld_1d_i64_zero
  : Intrinsic<[llvm_i64_ty],
              [llvm_i64_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.i64.zero">;
def int_nvvm_suld_1d_v2i8_zero
  : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.v2i8.zero">;
def int_nvvm_suld_1d_v2i16_zero
  : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.v2i16.zero">;
def int_nvvm_suld_1d_v2i32_zero
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.v2i32.zero">;
def int_nvvm_suld_1d_v2i64_zero
  : Intrinsic<[llvm_i64_ty, llvm_i64_ty],
              [llvm_i64_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.v2i64.zero">;
def int_nvvm_suld_1d_v4i8_zero
  : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.v4i8.zero">;
def int_nvvm_suld_1d_v4i16_zero
  : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.v4i16.zero">;
def int_nvvm_suld_1d_v4i32_zero
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.v4i32.zero">;

def int_nvvm_suld_1d_array_i8_zero
  : Intrinsic<[llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.array.i8.zero">;
def int_nvvm_suld_1d_array_i16_zero
  : Intrinsic<[llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.array.i16.zero">;
def int_nvvm_suld_1d_array_i32_zero
  : Intrinsic<[llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.array.i32.zero">;
def int_nvvm_suld_1d_array_i64_zero
  : Intrinsic<[llvm_i64_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.array.i64.zero">;
def int_nvvm_suld_1d_array_v2i8_zero
  : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.array.v2i8.zero">;
def int_nvvm_suld_1d_array_v2i16_zero
  : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.array.v2i16.zero">;
def int_nvvm_suld_1d_array_v2i32_zero
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.array.v2i32.zero">;
def int_nvvm_suld_1d_array_v2i64_zero
  : Intrinsic<[llvm_i64_ty, llvm_i64_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.array.v2i64.zero">;
def int_nvvm_suld_1d_array_v4i8_zero
  : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.array.v4i8.zero">;
def int_nvvm_suld_1d_array_v4i16_zero
  : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.array.v4i16.zero">;
def int_nvvm_suld_1d_array_v4i32_zero
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.1d.array.v4i32.zero">;

def int_nvvm_suld_2d_i8_zero
  : Intrinsic<[llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.i8.zero">;
def int_nvvm_suld_2d_i16_zero
  : Intrinsic<[llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.i16.zero">;
def int_nvvm_suld_2d_i32_zero
  : Intrinsic<[llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.i32.zero">;
def int_nvvm_suld_2d_i64_zero
  : Intrinsic<[llvm_i64_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.i64.zero">;
def int_nvvm_suld_2d_v2i8_zero
  : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.v2i8.zero">;
def int_nvvm_suld_2d_v2i16_zero
  : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.v2i16.zero">;
def int_nvvm_suld_2d_v2i32_zero
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.v2i32.zero">;
def int_nvvm_suld_2d_v2i64_zero
  : Intrinsic<[llvm_i64_ty, llvm_i64_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.v2i64.zero">;
def int_nvvm_suld_2d_v4i8_zero
  : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.v4i8.zero">;
def int_nvvm_suld_2d_v4i16_zero
  : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.v4i16.zero">;
def int_nvvm_suld_2d_v4i32_zero
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.v4i32.zero">;

def int_nvvm_suld_2d_array_i8_zero
  : Intrinsic<[llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.array.i8.zero">;
def int_nvvm_suld_2d_array_i16_zero
  : Intrinsic<[llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.array.i16.zero">;
def int_nvvm_suld_2d_array_i32_zero
  : Intrinsic<[llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.array.i32.zero">;
def int_nvvm_suld_2d_array_i64_zero
  : Intrinsic<[llvm_i64_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.array.i64.zero">;
def int_nvvm_suld_2d_array_v2i8_zero
  : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.array.v2i8.zero">;
def int_nvvm_suld_2d_array_v2i16_zero
  : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.array.v2i16.zero">;
def int_nvvm_suld_2d_array_v2i32_zero
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.array.v2i32.zero">;
def int_nvvm_suld_2d_array_v2i64_zero
  : Intrinsic<[llvm_i64_ty, llvm_i64_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.array.v2i64.zero">;
def int_nvvm_suld_2d_array_v4i8_zero
  : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.array.v4i8.zero">;
def int_nvvm_suld_2d_array_v4i16_zero
  : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.array.v4i16.zero">;
def int_nvvm_suld_2d_array_v4i32_zero
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.2d.array.v4i32.zero">;

def int_nvvm_suld_3d_i8_zero
  : Intrinsic<[llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.3d.i8.zero">;
def int_nvvm_suld_3d_i16_zero
  : Intrinsic<[llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.3d.i16.zero">;
def int_nvvm_suld_3d_i32_zero
  : Intrinsic<[llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.3d.i32.zero">;
def int_nvvm_suld_3d_i64_zero
  : Intrinsic<[llvm_i64_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.3d.i64.zero">;
def int_nvvm_suld_3d_v2i8_zero
  : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.3d.v2i8.zero">;
def int_nvvm_suld_3d_v2i16_zero
  : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.3d.v2i16.zero">;
def int_nvvm_suld_3d_v2i32_zero
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.3d.v2i32.zero">;
def int_nvvm_suld_3d_v2i64_zero
  : Intrinsic<[llvm_i64_ty, llvm_i64_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.3d.v2i64.zero">;
def int_nvvm_suld_3d_v4i8_zero
  : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.3d.v4i8.zero">;
def int_nvvm_suld_3d_v4i16_zero
  : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.3d.v4i16.zero">;
def int_nvvm_suld_3d_v4i32_zero
  : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
              [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.suld.3d.v4i32.zero">;

//===- Texture Query ------------------------------------------------------===//

def int_nvvm_txq_channel_order
  : Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem],
              "llvm.nvvm.txq.channel.order">,
    ClangBuiltin<"__nvvm_txq_channel_order">;
def int_nvvm_txq_channel_data_type
  : Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem],
              "llvm.nvvm.txq.channel.data.type">,
    ClangBuiltin<"__nvvm_txq_channel_data_type">;
def int_nvvm_txq_width
  : Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem],
              "llvm.nvvm.txq.width">,
    ClangBuiltin<"__nvvm_txq_width">;
def int_nvvm_txq_height
  : Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem],
              "llvm.nvvm.txq.height">,
    ClangBuiltin<"__nvvm_txq_height">;
def int_nvvm_txq_depth
  : Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem],
              "llvm.nvvm.txq.depth">,
    ClangBuiltin<"__nvvm_txq_depth">;
def int_nvvm_txq_array_size
  : Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem],
              "llvm.nvvm.txq.array.size">,
    ClangBuiltin<"__nvvm_txq_array_size">;
def int_nvvm_txq_num_samples
  : Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem],
              "llvm.nvvm.txq.num.samples">,
    ClangBuiltin<"__nvvm_txq_num_samples">;
def int_nvvm_txq_num_mipmap_levels
  : Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem],
              "llvm.nvvm.txq.num.mipmap.levels">,
    ClangBuiltin<"__nvvm_txq_num_mipmap_levels">;

//===- Surface Query ------------------------------------------------------===//

def int_nvvm_suq_channel_order
  : Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem],
              "llvm.nvvm.suq.channel.order">,
    ClangBuiltin<"__nvvm_suq_channel_order">;
def int_nvvm_suq_channel_data_type
  : Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem],
              "llvm.nvvm.suq.channel.data.type">,
    ClangBuiltin<"__nvvm_suq_channel_data_type">;
def int_nvvm_suq_width
  : Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem],
              "llvm.nvvm.suq.width">,
    ClangBuiltin<"__nvvm_suq_width">;
def int_nvvm_suq_height
  : Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem],
              "llvm.nvvm.suq.height">,
    ClangBuiltin<"__nvvm_suq_height">;
def int_nvvm_suq_depth
  : Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem],
              "llvm.nvvm.suq.depth">,
    ClangBuiltin<"__nvvm_suq_depth">;
def int_nvvm_suq_array_size
  : Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem],
              "llvm.nvvm.suq.array.size">,
    ClangBuiltin<"__nvvm_suq_array_size">;


//===- Handle Query -------------------------------------------------------===//

def int_nvvm_istypep_sampler
  : Intrinsic<[llvm_i1_ty], [llvm_i64_ty], [IntrNoMem],
              "llvm.nvvm.istypep.sampler">,
    ClangBuiltin<"__nvvm_istypep_sampler">;
def int_nvvm_istypep_surface
  : Intrinsic<[llvm_i1_ty], [llvm_i64_ty], [IntrNoMem],
              "llvm.nvvm.istypep.surface">,
    ClangBuiltin<"__nvvm_istypep_surface">;
def int_nvvm_istypep_texture
  : Intrinsic<[llvm_i1_ty], [llvm_i64_ty], [IntrNoMem],
              "llvm.nvvm.istypep.texture">,
    ClangBuiltin<"__nvvm_istypep_texture">;



//===- Surface Stores -----------------------------------------------------===//

// Unformatted
// .clamp variant
def int_nvvm_sust_b_1d_i8_clamp
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.1d.i8.clamp">,
    ClangBuiltin<"__nvvm_sust_b_1d_i8_clamp">;
def int_nvvm_sust_b_1d_i16_clamp
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.1d.i16.clamp">,
    ClangBuiltin<"__nvvm_sust_b_1d_i16_clamp">;
def int_nvvm_sust_b_1d_i32_clamp
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.sust.b.1d.i32.clamp">,
    ClangBuiltin<"__nvvm_sust_b_1d_i32_clamp">;
def int_nvvm_sust_b_1d_i64_clamp
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i64_ty], [],
              "llvm.nvvm.sust.b.1d.i64.clamp">,
    ClangBuiltin<"__nvvm_sust_b_1d_i64_clamp">;
def int_nvvm_sust_b_1d_v2i8_clamp
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.1d.v2i8.clamp">,
    ClangBuiltin<"__nvvm_sust_b_1d_v2i8_clamp">;
def int_nvvm_sust_b_1d_v2i16_clamp
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.1d.v2i16.clamp">,
    ClangBuiltin<"__nvvm_sust_b_1d_v2i16_clamp">;
def int_nvvm_sust_b_1d_v2i32_clamp
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.sust.b.1d.v2i32.clamp">,
    ClangBuiltin<"__nvvm_sust_b_1d_v2i32_clamp">;
def int_nvvm_sust_b_1d_v2i64_clamp
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i64_ty, llvm_i64_ty], [],
              "llvm.nvvm.sust.b.1d.v2i64.clamp">,
    ClangBuiltin<"__nvvm_sust_b_1d_v2i64_clamp">;
def int_nvvm_sust_b_1d_v4i8_clamp
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty,
                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.1d.v4i8.clamp">,
    ClangBuiltin<"__nvvm_sust_b_1d_v4i8_clamp">;
def int_nvvm_sust_b_1d_v4i16_clamp
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty,
                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.1d.v4i16.clamp">,
    ClangBuiltin<"__nvvm_sust_b_1d_v4i16_clamp">;
def int_nvvm_sust_b_1d_v4i32_clamp
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.sust.b.1d.v4i32.clamp">,
    ClangBuiltin<"__nvvm_sust_b_1d_v4i32_clamp">;


def int_nvvm_sust_b_1d_array_i8_clamp
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.1d.array.i8.clamp">,
    ClangBuiltin<"__nvvm_sust_b_1d_array_i8_clamp">;
def int_nvvm_sust_b_1d_array_i16_clamp
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.1d.array.i16.clamp">,
    ClangBuiltin<"__nvvm_sust_b_1d_array_i16_clamp">;
def int_nvvm_sust_b_1d_array_i32_clamp
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.sust.b.1d.array.i32.clamp">,
    ClangBuiltin<"__nvvm_sust_b_1d_array_i32_clamp">;
def int_nvvm_sust_b_1d_array_i64_clamp
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i64_ty], [],
              "llvm.nvvm.sust.b.1d.array.i64.clamp">,
    ClangBuiltin<"__nvvm_sust_b_1d_array_i64_clamp">;
def int_nvvm_sust_b_1d_array_v2i8_clamp
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.1d.array.v2i8.clamp">,
    ClangBuiltin<"__nvvm_sust_b_1d_array_v2i8_clamp">;
def int_nvvm_sust_b_1d_array_v2i16_clamp
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.1d.array.v2i16.clamp">,
    ClangBuiltin<"__nvvm_sust_b_1d_array_v2i16_clamp">;
def int_nvvm_sust_b_1d_array_v2i32_clamp
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.sust.b.1d.array.v2i32.clamp">,
    ClangBuiltin<"__nvvm_sust_b_1d_array_v2i32_clamp">;
def int_nvvm_sust_b_1d_array_v2i64_clamp
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i64_ty, llvm_i64_ty], [],
              "llvm.nvvm.sust.b.1d.array.v2i64.clamp">,
    ClangBuiltin<"__nvvm_sust_b_1d_array_v2i64_clamp">;
def int_nvvm_sust_b_1d_array_v4i8_clamp
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty,
                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.1d.array.v4i8.clamp">,
    ClangBuiltin<"__nvvm_sust_b_1d_array_v4i8_clamp">;
def int_nvvm_sust_b_1d_array_v4i16_clamp
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty,
                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.1d.array.v4i16.clamp">,
    ClangBuiltin<"__nvvm_sust_b_1d_array_v4i16_clamp">;
def int_nvvm_sust_b_1d_array_v4i32_clamp
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.sust.b.1d.array.v4i32.clamp">,
    ClangBuiltin<"__nvvm_sust_b_1d_array_v4i32_clamp">;


def int_nvvm_sust_b_2d_i8_clamp
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.2d.i8.clamp">,
    ClangBuiltin<"__nvvm_sust_b_2d_i8_clamp">;
def int_nvvm_sust_b_2d_i16_clamp
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.2d.i16.clamp">,
    ClangBuiltin<"__nvvm_sust_b_2d_i16_clamp">;
def int_nvvm_sust_b_2d_i32_clamp
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.sust.b.2d.i32.clamp">,
    ClangBuiltin<"__nvvm_sust_b_2d_i32_clamp">;
def int_nvvm_sust_b_2d_i64_clamp
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i64_ty], [],
              "llvm.nvvm.sust.b.2d.i64.clamp">,
    ClangBuiltin<"__nvvm_sust_b_2d_i64_clamp">;
def int_nvvm_sust_b_2d_v2i8_clamp
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.2d.v2i8.clamp">,
    ClangBuiltin<"__nvvm_sust_b_2d_v2i8_clamp">;
def int_nvvm_sust_b_2d_v2i16_clamp
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.2d.v2i16.clamp">,
    ClangBuiltin<"__nvvm_sust_b_2d_v2i16_clamp">;
def int_nvvm_sust_b_2d_v2i32_clamp
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.sust.b.2d.v2i32.clamp">,
    ClangBuiltin<"__nvvm_sust_b_2d_v2i32_clamp">;
def int_nvvm_sust_b_2d_v2i64_clamp
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i64_ty, llvm_i64_ty], [],
              "llvm.nvvm.sust.b.2d.v2i64.clamp">,
    ClangBuiltin<"__nvvm_sust_b_2d_v2i64_clamp">;
def int_nvvm_sust_b_2d_v4i8_clamp
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty,
                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.2d.v4i8.clamp">,
    ClangBuiltin<"__nvvm_sust_b_2d_v4i8_clamp">;
def int_nvvm_sust_b_2d_v4i16_clamp
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty,
                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.2d.v4i16.clamp">,
    ClangBuiltin<"__nvvm_sust_b_2d_v4i16_clamp">;
def int_nvvm_sust_b_2d_v4i32_clamp
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.sust.b.2d.v4i32.clamp">,
    ClangBuiltin<"__nvvm_sust_b_2d_v4i32_clamp">;


def int_nvvm_sust_b_2d_array_i8_clamp
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.2d.array.i8.clamp">,
    ClangBuiltin<"__nvvm_sust_b_2d_array_i8_clamp">;
def int_nvvm_sust_b_2d_array_i16_clamp
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.2d.array.i16.clamp">,
    ClangBuiltin<"__nvvm_sust_b_2d_array_i16_clamp">;
def int_nvvm_sust_b_2d_array_i32_clamp
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.sust.b.2d.array.i32.clamp">,
    ClangBuiltin<"__nvvm_sust_b_2d_array_i32_clamp">;
def int_nvvm_sust_b_2d_array_i64_clamp
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i64_ty], [],
              "llvm.nvvm.sust.b.2d.array.i64.clamp">,
    ClangBuiltin<"__nvvm_sust_b_2d_array_i64_clamp">;
def int_nvvm_sust_b_2d_array_v2i8_clamp
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.2d.array.v2i8.clamp">,
    ClangBuiltin<"__nvvm_sust_b_2d_array_v2i8_clamp">;
def int_nvvm_sust_b_2d_array_v2i16_clamp
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.2d.array.v2i16.clamp">,
    ClangBuiltin<"__nvvm_sust_b_2d_array_v2i16_clamp">;
def int_nvvm_sust_b_2d_array_v2i32_clamp
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.sust.b.2d.array.v2i32.clamp">,
    ClangBuiltin<"__nvvm_sust_b_2d_array_v2i32_clamp">;
def int_nvvm_sust_b_2d_array_v2i64_clamp
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i64_ty, llvm_i64_ty], [],
              "llvm.nvvm.sust.b.2d.array.v2i64.clamp">,
    ClangBuiltin<"__nvvm_sust_b_2d_array_v2i64_clamp">;
def int_nvvm_sust_b_2d_array_v4i8_clamp
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.2d.array.v4i8.clamp">,
    ClangBuiltin<"__nvvm_sust_b_2d_array_v4i8_clamp">;
def int_nvvm_sust_b_2d_array_v4i16_clamp
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.2d.array.v4i16.clamp">,
    ClangBuiltin<"__nvvm_sust_b_2d_array_v4i16_clamp">;
def int_nvvm_sust_b_2d_array_v4i32_clamp
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.sust.b.2d.array.v4i32.clamp">,
    ClangBuiltin<"__nvvm_sust_b_2d_array_v4i32_clamp">;


def int_nvvm_sust_b_3d_i8_clamp
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.3d.i8.clamp">,
    ClangBuiltin<"__nvvm_sust_b_3d_i8_clamp">;
def int_nvvm_sust_b_3d_i16_clamp
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.3d.i16.clamp">,
    ClangBuiltin<"__nvvm_sust_b_3d_i16_clamp">;
def int_nvvm_sust_b_3d_i32_clamp
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.sust.b.3d.i32.clamp">,
    ClangBuiltin<"__nvvm_sust_b_3d_i32_clamp">;
def int_nvvm_sust_b_3d_i64_clamp
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i64_ty], [],
              "llvm.nvvm.sust.b.3d.i64.clamp">,
    ClangBuiltin<"__nvvm_sust_b_3d_i64_clamp">;
def int_nvvm_sust_b_3d_v2i8_clamp
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.3d.v2i8.clamp">,
    ClangBuiltin<"__nvvm_sust_b_3d_v2i8_clamp">;
def int_nvvm_sust_b_3d_v2i16_clamp
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.3d.v2i16.clamp">,
    ClangBuiltin<"__nvvm_sust_b_3d_v2i16_clamp">;
def int_nvvm_sust_b_3d_v2i32_clamp
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.sust.b.3d.v2i32.clamp">,
    ClangBuiltin<"__nvvm_sust_b_3d_v2i32_clamp">;
def int_nvvm_sust_b_3d_v2i64_clamp
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i64_ty, llvm_i64_ty], [],
              "llvm.nvvm.sust.b.3d.v2i64.clamp">,
    ClangBuiltin<"__nvvm_sust_b_3d_v2i64_clamp">;
def int_nvvm_sust_b_3d_v4i8_clamp
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.3d.v4i8.clamp">,
    ClangBuiltin<"__nvvm_sust_b_3d_v4i8_clamp">;
def int_nvvm_sust_b_3d_v4i16_clamp
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.3d.v4i16.clamp">,
    ClangBuiltin<"__nvvm_sust_b_3d_v4i16_clamp">;
def int_nvvm_sust_b_3d_v4i32_clamp
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.sust.b.3d.v4i32.clamp">,
    ClangBuiltin<"__nvvm_sust_b_3d_v4i32_clamp">;


// .trap variant
def int_nvvm_sust_b_1d_i8_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.1d.i8.trap">,
    ClangBuiltin<"__nvvm_sust_b_1d_i8_trap">;
def int_nvvm_sust_b_1d_i16_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.1d.i16.trap">,
    ClangBuiltin<"__nvvm_sust_b_1d_i16_trap">;
def int_nvvm_sust_b_1d_i32_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.sust.b.1d.i32.trap">,
    ClangBuiltin<"__nvvm_sust_b_1d_i32_trap">;
def int_nvvm_sust_b_1d_i64_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i64_ty], [],
              "llvm.nvvm.sust.b.1d.i64.trap">,
    ClangBuiltin<"__nvvm_sust_b_1d_i64_trap">;
def int_nvvm_sust_b_1d_v2i8_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.1d.v2i8.trap">,
    ClangBuiltin<"__nvvm_sust_b_1d_v2i8_trap">;
def int_nvvm_sust_b_1d_v2i16_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.1d.v2i16.trap">,
    ClangBuiltin<"__nvvm_sust_b_1d_v2i16_trap">;
def int_nvvm_sust_b_1d_v2i32_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.sust.b.1d.v2i32.trap">,
    ClangBuiltin<"__nvvm_sust_b_1d_v2i32_trap">;
def int_nvvm_sust_b_1d_v2i64_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i64_ty, llvm_i64_ty], [],
              "llvm.nvvm.sust.b.1d.v2i64.trap">,
    ClangBuiltin<"__nvvm_sust_b_1d_v2i64_trap">;
def int_nvvm_sust_b_1d_v4i8_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty,
                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.1d.v4i8.trap">,
    ClangBuiltin<"__nvvm_sust_b_1d_v4i8_trap">;
def int_nvvm_sust_b_1d_v4i16_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty,
                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.1d.v4i16.trap">,
    ClangBuiltin<"__nvvm_sust_b_1d_v4i16_trap">;
def int_nvvm_sust_b_1d_v4i32_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.sust.b.1d.v4i32.trap">,
    ClangBuiltin<"__nvvm_sust_b_1d_v4i32_trap">;


def int_nvvm_sust_b_1d_array_i8_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.1d.array.i8.trap">,
    ClangBuiltin<"__nvvm_sust_b_1d_array_i8_trap">;
def int_nvvm_sust_b_1d_array_i16_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.1d.array.i16.trap">,
    ClangBuiltin<"__nvvm_sust_b_1d_array_i16_trap">;
def int_nvvm_sust_b_1d_array_i32_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.sust.b.1d.array.i32.trap">,
    ClangBuiltin<"__nvvm_sust_b_1d_array_i32_trap">;
def int_nvvm_sust_b_1d_array_i64_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i64_ty], [],
              "llvm.nvvm.sust.b.1d.array.i64.trap">,
    ClangBuiltin<"__nvvm_sust_b_1d_array_i64_trap">;
def int_nvvm_sust_b_1d_array_v2i8_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.1d.array.v2i8.trap">,
    ClangBuiltin<"__nvvm_sust_b_1d_array_v2i8_trap">;
def int_nvvm_sust_b_1d_array_v2i16_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.1d.array.v2i16.trap">,
    ClangBuiltin<"__nvvm_sust_b_1d_array_v2i16_trap">;
def int_nvvm_sust_b_1d_array_v2i32_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.sust.b.1d.array.v2i32.trap">,
    ClangBuiltin<"__nvvm_sust_b_1d_array_v2i32_trap">;
def int_nvvm_sust_b_1d_array_v2i64_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i64_ty, llvm_i64_ty], [],
              "llvm.nvvm.sust.b.1d.array.v2i64.trap">,
    ClangBuiltin<"__nvvm_sust_b_1d_array_v2i64_trap">;
def int_nvvm_sust_b_1d_array_v4i8_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty,
                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.1d.array.v4i8.trap">,
    ClangBuiltin<"__nvvm_sust_b_1d_array_v4i8_trap">;
def int_nvvm_sust_b_1d_array_v4i16_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty,
                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.1d.array.v4i16.trap">,
    ClangBuiltin<"__nvvm_sust_b_1d_array_v4i16_trap">;
def int_nvvm_sust_b_1d_array_v4i32_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.sust.b.1d.array.v4i32.trap">,
    ClangBuiltin<"__nvvm_sust_b_1d_array_v4i32_trap">;


def int_nvvm_sust_b_2d_i8_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.2d.i8.trap">,
    ClangBuiltin<"__nvvm_sust_b_2d_i8_trap">;
def int_nvvm_sust_b_2d_i16_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.2d.i16.trap">,
    ClangBuiltin<"__nvvm_sust_b_2d_i16_trap">;
def int_nvvm_sust_b_2d_i32_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.sust.b.2d.i32.trap">,
    ClangBuiltin<"__nvvm_sust_b_2d_i32_trap">;
def int_nvvm_sust_b_2d_i64_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i64_ty], [],
              "llvm.nvvm.sust.b.2d.i64.trap">,
    ClangBuiltin<"__nvvm_sust_b_2d_i64_trap">;
def int_nvvm_sust_b_2d_v2i8_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.2d.v2i8.trap">,
    ClangBuiltin<"__nvvm_sust_b_2d_v2i8_trap">;
def int_nvvm_sust_b_2d_v2i16_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.2d.v2i16.trap">,
    ClangBuiltin<"__nvvm_sust_b_2d_v2i16_trap">;
def int_nvvm_sust_b_2d_v2i32_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.sust.b.2d.v2i32.trap">,
    ClangBuiltin<"__nvvm_sust_b_2d_v2i32_trap">;
def int_nvvm_sust_b_2d_v2i64_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i64_ty, llvm_i64_ty], [],
              "llvm.nvvm.sust.b.2d.v2i64.trap">,
    ClangBuiltin<"__nvvm_sust_b_2d_v2i64_trap">;
def int_nvvm_sust_b_2d_v4i8_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty,
                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.2d.v4i8.trap">,
    ClangBuiltin<"__nvvm_sust_b_2d_v4i8_trap">;
def int_nvvm_sust_b_2d_v4i16_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty,
                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.2d.v4i16.trap">,
    ClangBuiltin<"__nvvm_sust_b_2d_v4i16_trap">;
def int_nvvm_sust_b_2d_v4i32_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.sust.b.2d.v4i32.trap">,
    ClangBuiltin<"__nvvm_sust_b_2d_v4i32_trap">;


def int_nvvm_sust_b_2d_array_i8_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.2d.array.i8.trap">,
    ClangBuiltin<"__nvvm_sust_b_2d_array_i8_trap">;
def int_nvvm_sust_b_2d_array_i16_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.2d.array.i16.trap">,
    ClangBuiltin<"__nvvm_sust_b_2d_array_i16_trap">;
def int_nvvm_sust_b_2d_array_i32_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.sust.b.2d.array.i32.trap">,
    ClangBuiltin<"__nvvm_sust_b_2d_array_i32_trap">;
def int_nvvm_sust_b_2d_array_i64_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i64_ty], [],
              "llvm.nvvm.sust.b.2d.array.i64.trap">,
    ClangBuiltin<"__nvvm_sust_b_2d_array_i64_trap">;
def int_nvvm_sust_b_2d_array_v2i8_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.2d.array.v2i8.trap">,
    ClangBuiltin<"__nvvm_sust_b_2d_array_v2i8_trap">;
def int_nvvm_sust_b_2d_array_v2i16_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.2d.array.v2i16.trap">,
    ClangBuiltin<"__nvvm_sust_b_2d_array_v2i16_trap">;
def int_nvvm_sust_b_2d_array_v2i32_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.sust.b.2d.array.v2i32.trap">,
    ClangBuiltin<"__nvvm_sust_b_2d_array_v2i32_trap">;
def int_nvvm_sust_b_2d_array_v2i64_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i64_ty, llvm_i64_ty], [],
              "llvm.nvvm.sust.b.2d.array.v2i64.trap">,
    ClangBuiltin<"__nvvm_sust_b_2d_array_v2i64_trap">;
def int_nvvm_sust_b_2d_array_v4i8_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.2d.array.v4i8.trap">,
    ClangBuiltin<"__nvvm_sust_b_2d_array_v4i8_trap">;
def int_nvvm_sust_b_2d_array_v4i16_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.2d.array.v4i16.trap">,
    ClangBuiltin<"__nvvm_sust_b_2d_array_v4i16_trap">;
def int_nvvm_sust_b_2d_array_v4i32_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.sust.b.2d.array.v4i32.trap">,
    ClangBuiltin<"__nvvm_sust_b_2d_array_v4i32_trap">;


def int_nvvm_sust_b_3d_i8_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.3d.i8.trap">,
    ClangBuiltin<"__nvvm_sust_b_3d_i8_trap">;
def int_nvvm_sust_b_3d_i16_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.3d.i16.trap">,
    ClangBuiltin<"__nvvm_sust_b_3d_i16_trap">;
def int_nvvm_sust_b_3d_i32_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.sust.b.3d.i32.trap">,
    ClangBuiltin<"__nvvm_sust_b_3d_i32_trap">;
def int_nvvm_sust_b_3d_i64_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i64_ty], [],
              "llvm.nvvm.sust.b.3d.i64.trap">,
    ClangBuiltin<"__nvvm_sust_b_3d_i64_trap">;
def int_nvvm_sust_b_3d_v2i8_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.3d.v2i8.trap">,
    ClangBuiltin<"__nvvm_sust_b_3d_v2i8_trap">;
def int_nvvm_sust_b_3d_v2i16_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.3d.v2i16.trap">,
    ClangBuiltin<"__nvvm_sust_b_3d_v2i16_trap">;
def int_nvvm_sust_b_3d_v2i32_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.sust.b.3d.v2i32.trap">,
    ClangBuiltin<"__nvvm_sust_b_3d_v2i32_trap">;
def int_nvvm_sust_b_3d_v2i64_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i64_ty, llvm_i64_ty], [],
              "llvm.nvvm.sust.b.3d.v2i64.trap">,
    ClangBuiltin<"__nvvm_sust_b_3d_v2i64_trap">;
def int_nvvm_sust_b_3d_v4i8_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.3d.v4i8.trap">,
    ClangBuiltin<"__nvvm_sust_b_3d_v4i8_trap">;
def int_nvvm_sust_b_3d_v4i16_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.3d.v4i16.trap">,
    ClangBuiltin<"__nvvm_sust_b_3d_v4i16_trap">;
def int_nvvm_sust_b_3d_v4i32_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.sust.b.3d.v4i32.trap">,
    ClangBuiltin<"__nvvm_sust_b_3d_v4i32_trap">;


// .zero variant
def int_nvvm_sust_b_1d_i8_zero
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.1d.i8.zero">,
    ClangBuiltin<"__nvvm_sust_b_1d_i8_zero">;
def int_nvvm_sust_b_1d_i16_zero
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.1d.i16.zero">,
    ClangBuiltin<"__nvvm_sust_b_1d_i16_zero">;
def int_nvvm_sust_b_1d_i32_zero
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.sust.b.1d.i32.zero">,
    ClangBuiltin<"__nvvm_sust_b_1d_i32_zero">;
def int_nvvm_sust_b_1d_i64_zero
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i64_ty], [],
              "llvm.nvvm.sust.b.1d.i64.zero">,
    ClangBuiltin<"__nvvm_sust_b_1d_i64_zero">;
def int_nvvm_sust_b_1d_v2i8_zero
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.1d.v2i8.zero">,
    ClangBuiltin<"__nvvm_sust_b_1d_v2i8_zero">;
def int_nvvm_sust_b_1d_v2i16_zero
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.1d.v2i16.zero">,
    ClangBuiltin<"__nvvm_sust_b_1d_v2i16_zero">;
def int_nvvm_sust_b_1d_v2i32_zero
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.sust.b.1d.v2i32.zero">,
    ClangBuiltin<"__nvvm_sust_b_1d_v2i32_zero">;
def int_nvvm_sust_b_1d_v2i64_zero
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i64_ty, llvm_i64_ty], [],
              "llvm.nvvm.sust.b.1d.v2i64.zero">,
    ClangBuiltin<"__nvvm_sust_b_1d_v2i64_zero">;
def int_nvvm_sust_b_1d_v4i8_zero
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty,
                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.1d.v4i8.zero">,
    ClangBuiltin<"__nvvm_sust_b_1d_v4i8_zero">;
def int_nvvm_sust_b_1d_v4i16_zero
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty,
                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.1d.v4i16.zero">,
    ClangBuiltin<"__nvvm_sust_b_1d_v4i16_zero">;
def int_nvvm_sust_b_1d_v4i32_zero
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.sust.b.1d.v4i32.zero">,
    ClangBuiltin<"__nvvm_sust_b_1d_v4i32_zero">;


def int_nvvm_sust_b_1d_array_i8_zero
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.1d.array.i8.zero">,
    ClangBuiltin<"__nvvm_sust_b_1d_array_i8_zero">;
def int_nvvm_sust_b_1d_array_i16_zero
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.1d.array.i16.zero">,
    ClangBuiltin<"__nvvm_sust_b_1d_array_i16_zero">;
def int_nvvm_sust_b_1d_array_i32_zero
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.sust.b.1d.array.i32.zero">,
    ClangBuiltin<"__nvvm_sust_b_1d_array_i32_zero">;
def int_nvvm_sust_b_1d_array_i64_zero
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i64_ty], [],
              "llvm.nvvm.sust.b.1d.array.i64.zero">,
    ClangBuiltin<"__nvvm_sust_b_1d_array_i64_zero">;
def int_nvvm_sust_b_1d_array_v2i8_zero
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.1d.array.v2i8.zero">,
    ClangBuiltin<"__nvvm_sust_b_1d_array_v2i8_zero">;
def int_nvvm_sust_b_1d_array_v2i16_zero
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.1d.array.v2i16.zero">,
    ClangBuiltin<"__nvvm_sust_b_1d_array_v2i16_zero">;
def int_nvvm_sust_b_1d_array_v2i32_zero
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.sust.b.1d.array.v2i32.zero">,
    ClangBuiltin<"__nvvm_sust_b_1d_array_v2i32_zero">;
def int_nvvm_sust_b_1d_array_v2i64_zero
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i64_ty, llvm_i64_ty], [],
              "llvm.nvvm.sust.b.1d.array.v2i64.zero">,
    ClangBuiltin<"__nvvm_sust_b_1d_array_v2i64_zero">;
def int_nvvm_sust_b_1d_array_v4i8_zero
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty,
                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.1d.array.v4i8.zero">,
    ClangBuiltin<"__nvvm_sust_b_1d_array_v4i8_zero">;
def int_nvvm_sust_b_1d_array_v4i16_zero
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty,
                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.1d.array.v4i16.zero">,
    ClangBuiltin<"__nvvm_sust_b_1d_array_v4i16_zero">;
def int_nvvm_sust_b_1d_array_v4i32_zero
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.sust.b.1d.array.v4i32.zero">,
    ClangBuiltin<"__nvvm_sust_b_1d_array_v4i32_zero">;


def int_nvvm_sust_b_2d_i8_zero
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.2d.i8.zero">,
    ClangBuiltin<"__nvvm_sust_b_2d_i8_zero">;
def int_nvvm_sust_b_2d_i16_zero
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.2d.i16.zero">,
    ClangBuiltin<"__nvvm_sust_b_2d_i16_zero">;
def int_nvvm_sust_b_2d_i32_zero
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.sust.b.2d.i32.zero">,
    ClangBuiltin<"__nvvm_sust_b_2d_i32_zero">;
def int_nvvm_sust_b_2d_i64_zero
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i64_ty], [],
              "llvm.nvvm.sust.b.2d.i64.zero">,
    ClangBuiltin<"__nvvm_sust_b_2d_i64_zero">;
def int_nvvm_sust_b_2d_v2i8_zero
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.2d.v2i8.zero">,
    ClangBuiltin<"__nvvm_sust_b_2d_v2i8_zero">;
def int_nvvm_sust_b_2d_v2i16_zero
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.2d.v2i16.zero">,
    ClangBuiltin<"__nvvm_sust_b_2d_v2i16_zero">;
def int_nvvm_sust_b_2d_v2i32_zero
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.sust.b.2d.v2i32.zero">,
    ClangBuiltin<"__nvvm_sust_b_2d_v2i32_zero">;
def int_nvvm_sust_b_2d_v2i64_zero
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i64_ty, llvm_i64_ty], [],
              "llvm.nvvm.sust.b.2d.v2i64.zero">,
    ClangBuiltin<"__nvvm_sust_b_2d_v2i64_zero">;
def int_nvvm_sust_b_2d_v4i8_zero
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty,
                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.2d.v4i8.zero">,
    ClangBuiltin<"__nvvm_sust_b_2d_v4i8_zero">;
def int_nvvm_sust_b_2d_v4i16_zero
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty,
                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.2d.v4i16.zero">,
    ClangBuiltin<"__nvvm_sust_b_2d_v4i16_zero">;
def int_nvvm_sust_b_2d_v4i32_zero
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.sust.b.2d.v4i32.zero">,
    ClangBuiltin<"__nvvm_sust_b_2d_v4i32_zero">;


def int_nvvm_sust_b_2d_array_i8_zero
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.2d.array.i8.zero">,
    ClangBuiltin<"__nvvm_sust_b_2d_array_i8_zero">;
def int_nvvm_sust_b_2d_array_i16_zero
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.2d.array.i16.zero">,
    ClangBuiltin<"__nvvm_sust_b_2d_array_i16_zero">;
def int_nvvm_sust_b_2d_array_i32_zero
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.sust.b.2d.array.i32.zero">,
    ClangBuiltin<"__nvvm_sust_b_2d_array_i32_zero">;
def int_nvvm_sust_b_2d_array_i64_zero
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i64_ty], [],
              "llvm.nvvm.sust.b.2d.array.i64.zero">,
    ClangBuiltin<"__nvvm_sust_b_2d_array_i64_zero">;
def int_nvvm_sust_b_2d_array_v2i8_zero
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.2d.array.v2i8.zero">,
    ClangBuiltin<"__nvvm_sust_b_2d_array_v2i8_zero">;
def int_nvvm_sust_b_2d_array_v2i16_zero
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.2d.array.v2i16.zero">,
    ClangBuiltin<"__nvvm_sust_b_2d_array_v2i16_zero">;
def int_nvvm_sust_b_2d_array_v2i32_zero
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.sust.b.2d.array.v2i32.zero">,
    ClangBuiltin<"__nvvm_sust_b_2d_array_v2i32_zero">;
def int_nvvm_sust_b_2d_array_v2i64_zero
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i64_ty, llvm_i64_ty], [],
              "llvm.nvvm.sust.b.2d.array.v2i64.zero">,
    ClangBuiltin<"__nvvm_sust_b_2d_array_v2i64_zero">;
def int_nvvm_sust_b_2d_array_v4i8_zero
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.2d.array.v4i8.zero">,
    ClangBuiltin<"__nvvm_sust_b_2d_array_v4i8_zero">;
def int_nvvm_sust_b_2d_array_v4i16_zero
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.2d.array.v4i16.zero">,
    ClangBuiltin<"__nvvm_sust_b_2d_array_v4i16_zero">;
def int_nvvm_sust_b_2d_array_v4i32_zero
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.sust.b.2d.array.v4i32.zero">,
    ClangBuiltin<"__nvvm_sust_b_2d_array_v4i32_zero">;


def int_nvvm_sust_b_3d_i8_zero
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.3d.i8.zero">,
    ClangBuiltin<"__nvvm_sust_b_3d_i8_zero">;
def int_nvvm_sust_b_3d_i16_zero
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.3d.i16.zero">,
    ClangBuiltin<"__nvvm_sust_b_3d_i16_zero">;
def int_nvvm_sust_b_3d_i32_zero
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.sust.b.3d.i32.zero">,
    ClangBuiltin<"__nvvm_sust_b_3d_i32_zero">;
def int_nvvm_sust_b_3d_i64_zero
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i64_ty], [],
              "llvm.nvvm.sust.b.3d.i64.zero">,
    ClangBuiltin<"__nvvm_sust_b_3d_i64_zero">;
def int_nvvm_sust_b_3d_v2i8_zero
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.3d.v2i8.zero">,
    ClangBuiltin<"__nvvm_sust_b_3d_v2i8_zero">;
def int_nvvm_sust_b_3d_v2i16_zero
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.3d.v2i16.zero">,
    ClangBuiltin<"__nvvm_sust_b_3d_v2i16_zero">;
def int_nvvm_sust_b_3d_v2i32_zero
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.sust.b.3d.v2i32.zero">,
    ClangBuiltin<"__nvvm_sust_b_3d_v2i32_zero">;
def int_nvvm_sust_b_3d_v2i64_zero
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i64_ty, llvm_i64_ty], [],
              "llvm.nvvm.sust.b.3d.v2i64.zero">,
    ClangBuiltin<"__nvvm_sust_b_3d_v2i64_zero">;
def int_nvvm_sust_b_3d_v4i8_zero
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.3d.v4i8.zero">,
    ClangBuiltin<"__nvvm_sust_b_3d_v4i8_zero">;
def int_nvvm_sust_b_3d_v4i16_zero
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.b.3d.v4i16.zero">,
    ClangBuiltin<"__nvvm_sust_b_3d_v4i16_zero">;
def int_nvvm_sust_b_3d_v4i32_zero
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.sust.b.3d.v4i32.zero">,
    ClangBuiltin<"__nvvm_sust_b_3d_v4i32_zero">;



// Formatted

def int_nvvm_sust_p_1d_i8_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.p.1d.i8.trap">,
    ClangBuiltin<"__nvvm_sust_p_1d_i8_trap">;
def int_nvvm_sust_p_1d_i16_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.p.1d.i16.trap">,
    ClangBuiltin<"__nvvm_sust_p_1d_i16_trap">;
def int_nvvm_sust_p_1d_i32_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.sust.p.1d.i32.trap">,
    ClangBuiltin<"__nvvm_sust_p_1d_i32_trap">;
def int_nvvm_sust_p_1d_v2i8_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.p.1d.v2i8.trap">,
    ClangBuiltin<"__nvvm_sust_p_1d_v2i8_trap">;
def int_nvvm_sust_p_1d_v2i16_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.p.1d.v2i16.trap">,
    ClangBuiltin<"__nvvm_sust_p_1d_v2i16_trap">;
def int_nvvm_sust_p_1d_v2i32_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.sust.p.1d.v2i32.trap">,
    ClangBuiltin<"__nvvm_sust_p_1d_v2i32_trap">;
def int_nvvm_sust_p_1d_v4i8_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty,
                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.p.1d.v4i8.trap">,
    ClangBuiltin<"__nvvm_sust_p_1d_v4i8_trap">;
def int_nvvm_sust_p_1d_v4i16_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty,
                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.p.1d.v4i16.trap">,
    ClangBuiltin<"__nvvm_sust_p_1d_v4i16_trap">;
def int_nvvm_sust_p_1d_v4i32_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.sust.p.1d.v4i32.trap">,
    ClangBuiltin<"__nvvm_sust_p_1d_v4i32_trap">;


def int_nvvm_sust_p_1d_array_i8_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.p.1d.array.i8.trap">,
    ClangBuiltin<"__nvvm_sust_p_1d_array_i8_trap">;
def int_nvvm_sust_p_1d_array_i16_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.p.1d.array.i16.trap">,
    ClangBuiltin<"__nvvm_sust_p_1d_array_i16_trap">;
def int_nvvm_sust_p_1d_array_i32_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.sust.p.1d.array.i32.trap">,
    ClangBuiltin<"__nvvm_sust_p_1d_array_i32_trap">;
def int_nvvm_sust_p_1d_array_v2i8_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.p.1d.array.v2i8.trap">,
    ClangBuiltin<"__nvvm_sust_p_1d_array_v2i8_trap">;
def int_nvvm_sust_p_1d_array_v2i16_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.p.1d.array.v2i16.trap">,
    ClangBuiltin<"__nvvm_sust_p_1d_array_v2i16_trap">;
def int_nvvm_sust_p_1d_array_v2i32_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.sust.p.1d.array.v2i32.trap">,
    ClangBuiltin<"__nvvm_sust_p_1d_array_v2i32_trap">;
def int_nvvm_sust_p_1d_array_v4i8_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty,
                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.p.1d.array.v4i8.trap">,
    ClangBuiltin<"__nvvm_sust_p_1d_array_v4i8_trap">;
def int_nvvm_sust_p_1d_array_v4i16_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty,
                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.p.1d.array.v4i16.trap">,
    ClangBuiltin<"__nvvm_sust_p_1d_array_v4i16_trap">;
def int_nvvm_sust_p_1d_array_v4i32_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.sust.p.1d.array.v4i32.trap">,
    ClangBuiltin<"__nvvm_sust_p_1d_array_v4i32_trap">;


def int_nvvm_sust_p_2d_i8_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.p.2d.i8.trap">,
    ClangBuiltin<"__nvvm_sust_p_2d_i8_trap">;
def int_nvvm_sust_p_2d_i16_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.p.2d.i16.trap">,
    ClangBuiltin<"__nvvm_sust_p_2d_i16_trap">;
def int_nvvm_sust_p_2d_i32_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.sust.p.2d.i32.trap">,
    ClangBuiltin<"__nvvm_sust_p_2d_i32_trap">;
def int_nvvm_sust_p_2d_v2i8_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.p.2d.v2i8.trap">,
    ClangBuiltin<"__nvvm_sust_p_2d_v2i8_trap">;
def int_nvvm_sust_p_2d_v2i16_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.p.2d.v2i16.trap">,
    ClangBuiltin<"__nvvm_sust_p_2d_v2i16_trap">;
def int_nvvm_sust_p_2d_v2i32_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.sust.p.2d.v2i32.trap">,
    ClangBuiltin<"__nvvm_sust_p_2d_v2i32_trap">;
def int_nvvm_sust_p_2d_v4i8_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty,
                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.p.2d.v4i8.trap">,
    ClangBuiltin<"__nvvm_sust_p_2d_v4i8_trap">;
def int_nvvm_sust_p_2d_v4i16_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty,
                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.p.2d.v4i16.trap">,
    ClangBuiltin<"__nvvm_sust_p_2d_v4i16_trap">;
def int_nvvm_sust_p_2d_v4i32_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.sust.p.2d.v4i32.trap">,
    ClangBuiltin<"__nvvm_sust_p_2d_v4i32_trap">;


def int_nvvm_sust_p_2d_array_i8_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.p.2d.array.i8.trap">,
    ClangBuiltin<"__nvvm_sust_p_2d_array_i8_trap">;
def int_nvvm_sust_p_2d_array_i16_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.p.2d.array.i16.trap">,
    ClangBuiltin<"__nvvm_sust_p_2d_array_i16_trap">;
def int_nvvm_sust_p_2d_array_i32_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.sust.p.2d.array.i32.trap">,
    ClangBuiltin<"__nvvm_sust_p_2d_array_i32_trap">;
def int_nvvm_sust_p_2d_array_v2i8_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.p.2d.array.v2i8.trap">,
    ClangBuiltin<"__nvvm_sust_p_2d_array_v2i8_trap">;
def int_nvvm_sust_p_2d_array_v2i16_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.p.2d.array.v2i16.trap">,
    ClangBuiltin<"__nvvm_sust_p_2d_array_v2i16_trap">;
def int_nvvm_sust_p_2d_array_v2i32_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.sust.p.2d.array.v2i32.trap">,
    ClangBuiltin<"__nvvm_sust_p_2d_array_v2i32_trap">;
def int_nvvm_sust_p_2d_array_v4i8_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.p.2d.array.v4i8.trap">,
    ClangBuiltin<"__nvvm_sust_p_2d_array_v4i8_trap">;
def int_nvvm_sust_p_2d_array_v4i16_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.p.2d.array.v4i16.trap">,
    ClangBuiltin<"__nvvm_sust_p_2d_array_v4i16_trap">;
def int_nvvm_sust_p_2d_array_v4i32_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.sust.p.2d.array.v4i32.trap">,
    ClangBuiltin<"__nvvm_sust_p_2d_array_v4i32_trap">;


def int_nvvm_sust_p_3d_i8_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.p.3d.i8.trap">,
    ClangBuiltin<"__nvvm_sust_p_3d_i8_trap">;
def int_nvvm_sust_p_3d_i16_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.p.3d.i16.trap">,
    ClangBuiltin<"__nvvm_sust_p_3d_i16_trap">;
def int_nvvm_sust_p_3d_i32_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.sust.p.3d.i32.trap">,
    ClangBuiltin<"__nvvm_sust_p_3d_i32_trap">;
def int_nvvm_sust_p_3d_v2i8_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.p.3d.v2i8.trap">,
    ClangBuiltin<"__nvvm_sust_p_3d_v2i8_trap">;
def int_nvvm_sust_p_3d_v2i16_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.p.3d.v2i16.trap">,
    ClangBuiltin<"__nvvm_sust_p_3d_v2i16_trap">;
def int_nvvm_sust_p_3d_v2i32_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.sust.p.3d.v2i32.trap">,
    ClangBuiltin<"__nvvm_sust_p_3d_v2i32_trap">;
def int_nvvm_sust_p_3d_v4i8_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.p.3d.v4i8.trap">,
    ClangBuiltin<"__nvvm_sust_p_3d_v4i8_trap">;
def int_nvvm_sust_p_3d_v4i16_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
              "llvm.nvvm.sust.p.3d.v4i16.trap">,
    ClangBuiltin<"__nvvm_sust_p_3d_v4i16_trap">;
def int_nvvm_sust_p_3d_v4i32_trap
  : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
                   llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
              "llvm.nvvm.sust.p.3d.v4i32.trap">,
    ClangBuiltin<"__nvvm_sust_p_3d_v4i32_trap">;


def int_nvvm_rotate_b32
  : DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
              [IntrNoMem, IntrSpeculatable], "llvm.nvvm.rotate.b32">,
              ClangBuiltin<"__nvvm_rotate_b32">;

def int_nvvm_rotate_b64
  : DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty],
             [IntrNoMem, IntrSpeculatable], "llvm.nvvm.rotate.b64">,
             ClangBuiltin<"__nvvm_rotate_b64">;

def int_nvvm_rotate_right_b64
  : DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty],
              [IntrNoMem, IntrSpeculatable], "llvm.nvvm.rotate.right.b64">,
              ClangBuiltin<"__nvvm_rotate_right_b64">;

def int_nvvm_swap_lo_hi_b64
  : DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_i64_ty],
              [IntrNoMem, IntrSpeculatable], "llvm.nvvm.swap.lo.hi.b64">,
              ClangBuiltin<"__nvvm_swap_lo_hi_b64">;


// Accessing special registers.

class PTXReadSRegIntrinsicNB_r32
  : DefaultAttrsIntrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable, NoUndef<RetIndex>]>;
class PTXReadSRegIntrinsic_r32<string name>
  : PTXReadSRegIntrinsicNB_r32, ClangBuiltin<"__nvvm_read_ptx_sreg_" # name>;

multiclass PTXReadSRegIntrinsic_v4i32<string regname> {
// FIXME: Do we need the 128-bit integer type version?
//    def _r64   : Intrinsic<[llvm_i128_ty],   [], [IntrNoMem, IntrSpeculatable]>;

// FIXME: Enable this once v4i32 support is enabled in back-end.
//    def _v4i16 : Intrinsic<[llvm_v4i32_ty], [], [IntrNoMem, IntrSpeculatable]>;
  foreach suffix = ["_x", "_y", "_z", "_w"] in
    def suffix : PTXReadSRegIntrinsic_r32<regname # suffix>;
}

// Same, but without automatic clang builtins. It will be used for
// registers that require particular GPU or PTX version.
multiclass PTXReadSRegIntrinsicNB_v4i32 {
  foreach suffix = ["_x", "_y", "_z", "_w"] in
    def suffix : PTXReadSRegIntrinsicNB_r32;
}

class PTXReadSRegIntrinsic_r64<string name>
  : DefaultAttrsIntrinsic<[llvm_i64_ty], [], [IntrNoMem, IntrSpeculatable, NoUndef<RetIndex>]>,
    ClangBuiltin<"__nvvm_read_ptx_sreg_" # name>;

// Intrinsics to read registers with non-constant values. E.g. the values that
// do change over the kernel lifetime. Such reads should not be CSE'd.
class PTXReadNCSRegIntrinsic_r32<string name>
  : Intrinsic<[llvm_i32_ty], [], [IntrInaccessibleMemOnly, IntrNoCallback, NoUndef<RetIndex>]>,
    ClangBuiltin<"__nvvm_read_ptx_sreg_" # name>;
class PTXReadNCSRegIntrinsic_r64<string name>
  : Intrinsic<[llvm_i64_ty], [], [IntrInaccessibleMemOnly, IntrNoCallback, NoUndef<RetIndex>]>,
    ClangBuiltin<"__nvvm_read_ptx_sreg_" # name>;

defm int_nvvm_read_ptx_sreg_tid : PTXReadSRegIntrinsic_v4i32<"tid">;
defm int_nvvm_read_ptx_sreg_ntid : PTXReadSRegIntrinsic_v4i32<"ntid">;

def int_nvvm_read_ptx_sreg_laneid : PTXReadSRegIntrinsic_r32<"laneid">;
def int_nvvm_read_ptx_sreg_warpid : PTXReadSRegIntrinsic_r32<"warpid">;
def int_nvvm_read_ptx_sreg_nwarpid : PTXReadSRegIntrinsic_r32<"nwarpid">;

defm int_nvvm_read_ptx_sreg_ctaid : PTXReadSRegIntrinsic_v4i32<"ctaid">;
defm int_nvvm_read_ptx_sreg_nctaid : PTXReadSRegIntrinsic_v4i32<"nctaid">;

def int_nvvm_read_ptx_sreg_smid : PTXReadSRegIntrinsic_r32<"smid">;
def int_nvvm_read_ptx_sreg_nsmid : PTXReadSRegIntrinsic_r32<"nsmid">;
def int_nvvm_read_ptx_sreg_gridid : PTXReadSRegIntrinsic_r32<"gridid">;

def int_nvvm_read_ptx_sreg_lanemask_eq :
    PTXReadSRegIntrinsic_r32<"lanemask_eq">;
def int_nvvm_read_ptx_sreg_lanemask_le :
    PTXReadSRegIntrinsic_r32<"lanemask_le">;
def int_nvvm_read_ptx_sreg_lanemask_lt :
    PTXReadSRegIntrinsic_r32<"lanemask_lt">;
def int_nvvm_read_ptx_sreg_lanemask_ge :
    PTXReadSRegIntrinsic_r32<"lanemask_ge">;
def int_nvvm_read_ptx_sreg_lanemask_gt :
    PTXReadSRegIntrinsic_r32<"lanemask_gt">;

def int_nvvm_read_ptx_sreg_clock : PTXReadNCSRegIntrinsic_r32<"clock">;
def int_nvvm_read_ptx_sreg_clock64 : PTXReadNCSRegIntrinsic_r64<"clock64">;

def int_nvvm_read_ptx_sreg_pm0 : PTXReadNCSRegIntrinsic_r32<"pm0">;
def int_nvvm_read_ptx_sreg_pm1 : PTXReadNCSRegIntrinsic_r32<"pm1">;
def int_nvvm_read_ptx_sreg_pm2 : PTXReadNCSRegIntrinsic_r32<"pm2">;
def int_nvvm_read_ptx_sreg_pm3 : PTXReadNCSRegIntrinsic_r32<"pm3">;

def int_nvvm_read_ptx_sreg_warpsize : PTXReadSRegIntrinsic_r32<"warpsize">;

// sm90+, PTX7.8+
defm int_nvvm_read_ptx_sreg_clusterid : PTXReadSRegIntrinsicNB_v4i32;
defm int_nvvm_read_ptx_sreg_nclusterid : PTXReadSRegIntrinsicNB_v4i32;
defm int_nvvm_read_ptx_sreg_cluster_ctaid : PTXReadSRegIntrinsicNB_v4i32;
defm int_nvvm_read_ptx_sreg_cluster_nctaid : PTXReadSRegIntrinsicNB_v4i32;

def int_nvvm_read_ptx_sreg_cluster_ctarank : PTXReadSRegIntrinsicNB_r32;
def int_nvvm_read_ptx_sreg_cluster_nctarank : PTXReadSRegIntrinsicNB_r32;

//
// SHUFFLE
//
// Generate intrinsics for all variants of shfl instruction.
foreach sync = [false, true] in {
  foreach mode = ["up", "down", "bfly", "idx"] in {
    foreach type = ["i32", "f32"] in {
      foreach return_pred = [false, true] in {
        foreach i = [SHFL_INFO<sync, mode, type, return_pred>] in {
          if i.withGccBuiltin then {
            def i.Name : ClangBuiltin<i.Builtin>,
                         Intrinsic<i.RetTy, i.ArgsTy,
                                   [IntrInaccessibleMemOnly, IntrConvergent,
                                   IntrNoCallback],
                                   i.IntrName>;
          }
          if i.withoutGccBuiltin then {
            def i.Name : Intrinsic<i.RetTy, i.ArgsTy,
                         [IntrInaccessibleMemOnly, IntrConvergent,
                         IntrNoCallback], i.IntrName>;
          }
        }
      }
    }
  }
}

//
// VOTE
//

// vote.all pred
def int_nvvm_vote_all :
  Intrinsic<[llvm_i1_ty], [llvm_i1_ty],
            [IntrInaccessibleMemOnly, IntrConvergent, IntrNoCallback], "llvm.nvvm.vote.all">,
  ClangBuiltin<"__nvvm_vote_all">;
// vote.any pred
def int_nvvm_vote_any :
  Intrinsic<[llvm_i1_ty], [llvm_i1_ty],
            [IntrInaccessibleMemOnly, IntrConvergent, IntrNoCallback], "llvm.nvvm.vote.any">,
  ClangBuiltin<"__nvvm_vote_any">;
// vote.uni pred
def int_nvvm_vote_uni :
  Intrinsic<[llvm_i1_ty], [llvm_i1_ty],
            [IntrInaccessibleMemOnly, IntrConvergent, IntrNoCallback], "llvm.nvvm.vote.uni">,
  ClangBuiltin<"__nvvm_vote_uni">;
// vote.ballot pred
def int_nvvm_vote_ballot :
  Intrinsic<[llvm_i32_ty], [llvm_i1_ty],
            [IntrInaccessibleMemOnly, IntrConvergent, IntrNoCallback], "llvm.nvvm.vote.ballot">,
  ClangBuiltin<"__nvvm_vote_ballot">;

//
// VOTE.SYNC
//

// vote.sync.all mask, pred
def int_nvvm_vote_all_sync :
  Intrinsic<[llvm_i1_ty], [llvm_i32_ty, llvm_i1_ty],
            [IntrInaccessibleMemOnly, IntrConvergent, IntrNoCallback], "llvm.nvvm.vote.all.sync">,
  ClangBuiltin<"__nvvm_vote_all_sync">;
// vote.sync.any mask, pred
def int_nvvm_vote_any_sync :
  Intrinsic<[llvm_i1_ty], [llvm_i32_ty, llvm_i1_ty],
            [IntrInaccessibleMemOnly, IntrConvergent, IntrNoCallback], "llvm.nvvm.vote.any.sync">,
  ClangBuiltin<"__nvvm_vote_any_sync">;
// vote.sync.uni mask, pred
def int_nvvm_vote_uni_sync :
  Intrinsic<[llvm_i1_ty], [llvm_i32_ty, llvm_i1_ty],
            [IntrInaccessibleMemOnly, IntrConvergent, IntrNoCallback], "llvm.nvvm.vote.uni.sync">,
  ClangBuiltin<"__nvvm_vote_uni_sync">;
// vote.sync.ballot mask, pred
def int_nvvm_vote_ballot_sync :
  Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i1_ty],
            [IntrInaccessibleMemOnly, IntrConvergent, IntrNoCallback], "llvm.nvvm.vote.ballot.sync">,
  ClangBuiltin<"__nvvm_vote_ballot_sync">;

//
// MATCH.SYNC
//
// match.any.sync.b32 mask, value
def int_nvvm_match_any_sync_i32 :
  Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
            [IntrInaccessibleMemOnly, IntrConvergent, IntrNoCallback], "llvm.nvvm.match.any.sync.i32">,
  ClangBuiltin<"__nvvm_match_any_sync_i32">;
// match.any.sync.b64 mask, value
def int_nvvm_match_any_sync_i64 :
  Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i64_ty],
            [IntrInaccessibleMemOnly, IntrConvergent, IntrNoCallback], "llvm.nvvm.match.any.sync.i64">,
  ClangBuiltin<"__nvvm_match_any_sync_i64">;

// match.all instruction have two variants -- one returns a single value, another
// returns a pair {value, predicate}. We currently only implement the latter as
// that's the variant exposed by CUDA API.

// match.all.sync.b32p mask, value
def int_nvvm_match_all_sync_i32p :
  Intrinsic<[llvm_i32_ty, llvm_i1_ty], [llvm_i32_ty, llvm_i32_ty],
            [IntrInaccessibleMemOnly, IntrConvergent, IntrNoCallback], "llvm.nvvm.match.all.sync.i32p">;
// match.all.sync.b64p mask, value
def int_nvvm_match_all_sync_i64p :
  Intrinsic<[llvm_i32_ty, llvm_i1_ty], [llvm_i32_ty, llvm_i64_ty],
            [IntrInaccessibleMemOnly, IntrConvergent, IntrNoCallback], "llvm.nvvm.match.all.sync.i64p">;

//
// REDUX.SYNC
//
// redux.sync.min.u32 dst, src, membermask;
def int_nvvm_redux_sync_umin : ClangBuiltin<"__nvvm_redux_sync_umin">,
  Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
            [IntrConvergent, IntrInaccessibleMemOnly, IntrNoCallback]>;

// redux.sync.max.u32 dst, src, membermask;
def int_nvvm_redux_sync_umax : ClangBuiltin<"__nvvm_redux_sync_umax">,
  Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
            [IntrConvergent, IntrInaccessibleMemOnly, IntrNoCallback]>;

// redux.sync.add.s32 dst, src, membermask;
def int_nvvm_redux_sync_add : ClangBuiltin<"__nvvm_redux_sync_add">,
  Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
            [IntrConvergent, IntrInaccessibleMemOnly, IntrNoCallback]>;

// redux.sync.min.s32 dst, src, membermask;
def int_nvvm_redux_sync_min : ClangBuiltin<"__nvvm_redux_sync_min">,
  Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
            [IntrConvergent, IntrInaccessibleMemOnly, IntrNoCallback]>;

// redux.sync.max.s32 dst, src, membermask;
def int_nvvm_redux_sync_max : ClangBuiltin<"__nvvm_redux_sync_max">,
  Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
            [IntrConvergent, IntrInaccessibleMemOnly, IntrNoCallback]>;

// redux.sync.and.b32 dst, src, membermask;
def int_nvvm_redux_sync_and : ClangBuiltin<"__nvvm_redux_sync_and">,
  Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
            [IntrConvergent, IntrInaccessibleMemOnly, IntrNoCallback]>;

// redux.sync.xor.b32 dst, src, membermask;
def int_nvvm_redux_sync_xor : ClangBuiltin<"__nvvm_redux_sync_xor">,
  Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
            [IntrConvergent, IntrInaccessibleMemOnly, IntrNoCallback]>;

// redux.sync.or.b32 dst, src, membermask;
def int_nvvm_redux_sync_or : ClangBuiltin<"__nvvm_redux_sync_or">,
  Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
            [IntrConvergent, IntrInaccessibleMemOnly, IntrNoCallback]>;

//
// WMMA instructions
//
// WMMA.LOAD
class NVVM_WMMA_LD<WMMA_REGS Frag, string Layout, int WithStride>
  : Intrinsic<Frag.regs,
              !if(WithStride, [llvm_anyptr_ty, llvm_i32_ty], [llvm_anyptr_ty]),
              [IntrReadMem, IntrArgMemOnly, IntrNoCallback, ReadOnly<ArgIndex<0>>, NoCapture<ArgIndex<0>>],
              WMMA_NAME_LDST<"load", Frag, Layout, WithStride>.intr>;

// WMMA.STORE.D
class NVVM_WMMA_ST<WMMA_REGS Frag, string Layout, int WithStride>
  : Intrinsic<[],
              !listconcat(
                [llvm_anyptr_ty],
                Frag.regs,
                !if(WithStride, [llvm_i32_ty], [])),
              [IntrWriteMem, IntrArgMemOnly, IntrNoCallback, WriteOnly<ArgIndex<0>>, NoCapture<ArgIndex<0>>],
              WMMA_NAME_LDST<"store", Frag, Layout, WithStride>.intr>;

// Create all load/store variants
foreach layout = ["row", "col"] in {
  foreach stride = [0, 1] in {
    foreach frag = NVVM_MMA_OPS.all_ld_ops in
      if NVVM_WMMA_LDST_SUPPORTED<frag, layout>.ret then
        def WMMA_NAME_LDST<"load", frag, layout, stride>.record
             : NVVM_WMMA_LD<frag, layout, stride>;
    foreach frag = NVVM_MMA_OPS.all_st_ops in
      if NVVM_WMMA_LDST_SUPPORTED<frag, layout>.ret then
        def WMMA_NAME_LDST<"store", frag, layout, stride>.record
             : NVVM_WMMA_ST<frag, layout, stride>;
  }
}

// WMMA.MMA
class NVVM_WMMA_MMA<string ALayout, string BLayout, int Satfinite, string rnd, string b1op,
                    WMMA_REGS A, WMMA_REGS B,
                    WMMA_REGS C, WMMA_REGS D>
  : Intrinsic<D.regs,
              !listconcat(A.regs, B.regs, C.regs),
              [IntrNoMem, IntrNoCallback],
              WMMA_NAME<ALayout, BLayout, Satfinite, rnd, b1op, A, B, C, D>.llvm>;

foreach layout_a = ["row", "col"] in {
  foreach layout_b = ["row", "col"] in {
    foreach satf = [0, 1] in {
      foreach rnd = ["", "rn", "rz", "rm", "rp"] in {
        foreach op = NVVM_MMA_OPS.all_wmma_ops in {
          foreach b1op = NVVM_MMA_B1OPS<op>.ret in {
            if NVVM_WMMA_SUPPORTED<op, layout_a, layout_b, satf, rnd>.ret then {
              def WMMA_NAME<layout_a, layout_b, satf, rnd, b1op,
                                op[0], op[1], op[2], op[3]>.record
                : NVVM_WMMA_MMA<layout_a, layout_b, satf, rnd, b1op,
                                op[0], op[1], op[2], op[3]>;
            }
          } // b1op
        } // op
      } // rnd
    } // satf
  } // layout_b
} // layout_a

// MMA
class NVVM_MMA<string ALayout, string BLayout, int Satfinite, string b1op,
               WMMA_REGS A, WMMA_REGS B, WMMA_REGS C, WMMA_REGS D>
  : Intrinsic<D.regs,
              !listconcat(A.regs, B.regs, C.regs),
              [IntrNoMem, IntrNoCallback],
              MMA_NAME<ALayout, BLayout, Satfinite, b1op, A, B, C, D>.llvm>;

foreach layout_a = ["row", "col"] in {
  foreach layout_b = ["row", "col"] in {
    foreach satf = [0, 1] in {
      foreach op = NVVM_MMA_OPS.all_mma_ops in {
        foreach b1op = NVVM_MMA_B1OPS<op>.ret in {
          if NVVM_MMA_SUPPORTED<op, layout_a, layout_b, satf>.ret then {
            def MMA_NAME<layout_a, layout_b, satf, b1op, op[0], op[1], op[2], op[3]>.record
              : NVVM_MMA<layout_a, layout_b, satf, b1op, op[0], op[1], op[2], op[3]>;
          }
        } // b1op
      } // op
    } // satf
  } // layout_b
} // layout_a

// LDMATRIX
class NVVM_LDMATRIX<WMMA_REGS Frag, int Transposed>
  : Intrinsic<Frag.regs, [llvm_anyptr_ty],
              [IntrReadMem, IntrArgMemOnly, IntrNoCallback, ReadOnly<ArgIndex<0>>,
               NoCapture<ArgIndex<0>>],
              LDMATRIX_NAME<Frag, Transposed>.intr>;

foreach transposed = [0, 1] in {
  foreach frag = NVVM_MMA_OPS.all_ldmatrix_ops in {
    if NVVM_LDMATRIX_SUPPORTED<frag>.ret then {
      def LDMATRIX_NAME<frag, transposed>.record
        : NVVM_LDMATRIX<frag, transposed>;
    }
  }
}

def int_nvvm_mapa
  : DefaultAttrsIntrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty],
              [IntrNoMem, IntrSpeculatable, NoCapture<ArgIndex<0>>],
              "llvm.nvvm.mapa">;
def int_nvvm_mapa_shared_cluster
  : DefaultAttrsIntrinsic<[llvm_shared_ptr_ty], [llvm_shared_ptr_ty, llvm_i32_ty],
              [IntrNoMem, IntrSpeculatable, NoCapture<ArgIndex<0>>],
              "llvm.nvvm.mapa.shared.cluster">;
def int_nvvm_getctarank
  : DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_ptr_ty],
              [IntrNoMem, IntrSpeculatable, NoCapture<ArgIndex<0>>],
              "llvm.nvvm.getctarank">;
def int_nvvm_getctarank_shared_cluster
  : DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_shared_ptr_ty],
              [IntrNoMem, IntrSpeculatable, NoCapture<ArgIndex<0>>],
              "llvm.nvvm.getctarank.shared.cluster">;
def int_nvvm_is_explicit_cluster
  : DefaultAttrsIntrinsic<[llvm_i1_ty], [],
              [IntrNoMem, IntrSpeculatable, NoUndef<RetIndex>],
              "llvm.nvvm.is_explicit_cluster">;

} // let TargetPrefix = "nvvm"
PKjwFZ�u��S4S4IR/DebugInfo.hnu�[���//===- DebugInfo.h - Debug Information Helpers ------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines a bunch of datatypes that are useful for creating and
// walking debug info in LLVM IR form. They essentially provide wrappers around
// the information in the global variables that's needed when constructing the
// DWARF information.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_DEBUGINFO_H
#define LLVM_IR_DEBUGINFO_H

#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/PassManager.h"
#include <optional>

namespace llvm {

class DbgDeclareInst;
class DbgValueInst;
class DbgVariableIntrinsic;
class Instruction;
class Module;

/// Finds dbg.declare intrinsics declaring local variables as living in the
/// memory that 'V' points to.
TinyPtrVector<DbgDeclareInst *> FindDbgDeclareUses(Value *V);

/// Finds the llvm.dbg.value intrinsics describing a value.
void findDbgValues(SmallVectorImpl<DbgValueInst *> &DbgValues, Value *V);

/// Finds the debug info intrinsics describing a value.
void findDbgUsers(SmallVectorImpl<DbgVariableIntrinsic *> &DbgInsts, Value *V);

/// Find subprogram that is enclosing this scope.
DISubprogram *getDISubprogram(const MDNode *Scope);

/// Produce a DebugLoc to use for each dbg.declare that is promoted to a
/// dbg.value.
DebugLoc getDebugValueLoc(DbgVariableIntrinsic *DII);

/// Strip debug info in the module if it exists.
///
/// To do this, we remove all calls to the debugger intrinsics and any named
/// metadata for debugging. We also remove debug locations for instructions.
/// Return true if module is modified.
bool StripDebugInfo(Module &M);
bool stripDebugInfo(Function &F);

/// Downgrade the debug info in a module to contain only line table information.
///
/// In order to convert debug info to what -gline-tables-only would have
/// created, this does the following:
///   1) Delete all debug intrinsics.
///   2) Delete all non-CU named metadata debug info nodes.
///   3) Create new DebugLocs for each instruction.
///   4) Create a new CU debug info, and similarly for every metadata node
///      that's reachable from the CU debug info.
///   All debug type metadata nodes are unreachable and garbage collected.
bool stripNonLineTableDebugInfo(Module &M);

/// Update the debug locations contained within the MD_loop metadata attached
/// to the instruction \p I, if one exists. \p Updater is applied to Metadata
/// operand in the MD_loop metadata: the returned value is included in the
/// updated loop metadata node if it is non-null.
void updateLoopMetadataDebugLocations(
    Instruction &I, function_ref<Metadata *(Metadata *)> Updater);

/// Return Debug Info Metadata Version by checking module flags.
unsigned getDebugMetadataVersionFromModule(const Module &M);

/// Utility to find all debug info in a module.
///
/// DebugInfoFinder tries to list all debug info MDNodes used in a module. To
/// list debug info MDNodes used by an instruction, DebugInfoFinder uses
/// processDeclare, processValue and processLocation to handle DbgDeclareInst,
/// DbgValueInst and DbgLoc attached to instructions. processModule will go
/// through all DICompileUnits in llvm.dbg.cu and list debug info MDNodes
/// used by the CUs.
class DebugInfoFinder {
public:
  /// Process entire module and collect debug info anchors.
  void processModule(const Module &M);
  /// Process a single instruction and collect debug info anchors.
  void processInstruction(const Module &M, const Instruction &I);

  /// Process DbgVariableIntrinsic.
  void processVariable(const Module &M, const DbgVariableIntrinsic &DVI);
  /// Process debug info location.
  void processLocation(const Module &M, const DILocation *Loc);

  /// Process subprogram.
  void processSubprogram(DISubprogram *SP);

  /// Clear all lists.
  void reset();

private:
  void processCompileUnit(DICompileUnit *CU);
  void processScope(DIScope *Scope);
  void processType(DIType *DT);
  bool addCompileUnit(DICompileUnit *CU);
  bool addGlobalVariable(DIGlobalVariableExpression *DIG);
  bool addScope(DIScope *Scope);
  bool addSubprogram(DISubprogram *SP);
  bool addType(DIType *DT);

public:
  using compile_unit_iterator =
      SmallVectorImpl<DICompileUnit *>::const_iterator;
  using subprogram_iterator = SmallVectorImpl<DISubprogram *>::const_iterator;
  using global_variable_expression_iterator =
      SmallVectorImpl<DIGlobalVariableExpression *>::const_iterator;
  using type_iterator = SmallVectorImpl<DIType *>::const_iterator;
  using scope_iterator = SmallVectorImpl<DIScope *>::const_iterator;

  iterator_range<compile_unit_iterator> compile_units() const {
    return make_range(CUs.begin(), CUs.end());
  }

  iterator_range<subprogram_iterator> subprograms() const {
    return make_range(SPs.begin(), SPs.end());
  }

  iterator_range<global_variable_expression_iterator> global_variables() const {
    return make_range(GVs.begin(), GVs.end());
  }

  iterator_range<type_iterator> types() const {
    return make_range(TYs.begin(), TYs.end());
  }

  iterator_range<scope_iterator> scopes() const {
    return make_range(Scopes.begin(), Scopes.end());
  }

  unsigned compile_unit_count() const { return CUs.size(); }
  unsigned global_variable_count() const { return GVs.size(); }
  unsigned subprogram_count() const { return SPs.size(); }
  unsigned type_count() const { return TYs.size(); }
  unsigned scope_count() const { return Scopes.size(); }

private:
  SmallVector<DICompileUnit *, 8> CUs;
  SmallVector<DISubprogram *, 8> SPs;
  SmallVector<DIGlobalVariableExpression *, 8> GVs;
  SmallVector<DIType *, 8> TYs;
  SmallVector<DIScope *, 8> Scopes;
  SmallPtrSet<const MDNode *, 32> NodesSeen;
};

/// Assignment Tracking (at).
namespace at {
//
// Utilities for enumerating storing instructions from an assignment ID.
//
/// A range of instructions.
using AssignmentInstRange =
    iterator_range<SmallVectorImpl<Instruction *>::iterator>;
/// Return a range of instructions (typically just one) that have \p ID
/// as an attachment.
/// Iterators invalidated by adding or removing DIAssignID metadata to/from any
/// instruction (including by deleting or cloning instructions).
AssignmentInstRange getAssignmentInsts(DIAssignID *ID);
/// Return a range of instructions (typically just one) that perform the
/// assignment that \p DAI encodes.
/// Iterators invalidated by adding or removing DIAssignID metadata to/from any
/// instruction (including by deleting or cloning instructions).
inline AssignmentInstRange getAssignmentInsts(const DbgAssignIntrinsic *DAI) {
  return getAssignmentInsts(DAI->getAssignID());
}

//
// Utilities for enumerating llvm.dbg.assign intrinsic from an assignment ID.
//
/// High level: this is an iterator for llvm.dbg.assign intrinsics.
/// Implementation details: this is a wrapper around Value's User iterator that
/// dereferences to a DbgAssignIntrinsic ptr rather than a User ptr.
class DbgAssignIt
    : public iterator_adaptor_base<DbgAssignIt, Value::user_iterator,
                                   typename std::iterator_traits<
                                       Value::user_iterator>::iterator_category,
                                   DbgAssignIntrinsic *, std::ptrdiff_t,
                                   DbgAssignIntrinsic **,
                                   DbgAssignIntrinsic *&> {
public:
  DbgAssignIt(Value::user_iterator It) : iterator_adaptor_base(It) {}
  DbgAssignIntrinsic *operator*() const { return cast<DbgAssignIntrinsic>(*I); }
};
/// A range of llvm.dbg.assign intrinsics.
using AssignmentMarkerRange = iterator_range<DbgAssignIt>;
/// Return a range of dbg.assign intrinsics which use \ID as an operand.
/// Iterators invalidated by deleting an intrinsic contained in this range.
AssignmentMarkerRange getAssignmentMarkers(DIAssignID *ID);
/// Return a range of dbg.assign intrinsics for which \p Inst performs the
/// assignment they encode.
/// Iterators invalidated by deleting an intrinsic contained in this range.
inline AssignmentMarkerRange getAssignmentMarkers(const Instruction *Inst) {
  if (auto *ID = Inst->getMetadata(LLVMContext::MD_DIAssignID))
    return getAssignmentMarkers(cast<DIAssignID>(ID));
  else
    return make_range(Value::user_iterator(), Value::user_iterator());
}

/// Delete the llvm.dbg.assign intrinsics linked to \p Inst.
void deleteAssignmentMarkers(const Instruction *Inst);

/// Replace all uses (and attachments) of \p Old with \p New.
void RAUW(DIAssignID *Old, DIAssignID *New);

/// Remove all Assignment Tracking related intrinsics and metadata from \p F.
void deleteAll(Function *F);

/// Calculate the fragment of the variable in \p DAI covered
/// from (Dest + SliceOffsetInBits) to
///   to (Dest + SliceOffsetInBits + SliceSizeInBits)
///
/// Return false if it can't be calculated for any reason.
/// Result is set to nullopt if the intersect equals the variable fragment (or
/// variable size) in DAI.
///
/// Result contains a zero-sized fragment if there's no intersect.
bool calculateFragmentIntersect(
    const DataLayout &DL, const Value *Dest, uint64_t SliceOffsetInBits,
    uint64_t SliceSizeInBits, const DbgAssignIntrinsic *DAI,
    std::optional<DIExpression::FragmentInfo> &Result);

/// Helper struct for trackAssignments, below. We don't use the similar
/// DebugVariable class because trackAssignments doesn't (yet?) understand
/// partial variables (fragment info) as input and want to make that clear and
/// explicit using types. In addition, eventually we will want to understand
/// expressions that modify the base address too, which a DebugVariable doesn't
/// capture.
struct VarRecord {
  DILocalVariable *Var;
  DILocation *DL;

  VarRecord(DbgVariableIntrinsic *DVI)
      : Var(DVI->getVariable()), DL(getDebugValueLoc(DVI)) {}
  VarRecord(DILocalVariable *Var, DILocation *DL) : Var(Var), DL(DL) {}
  friend bool operator<(const VarRecord &LHS, const VarRecord &RHS) {
    return std::tie(LHS.Var, LHS.DL) < std::tie(RHS.Var, RHS.DL);
  }
  friend bool operator==(const VarRecord &LHS, const VarRecord &RHS) {
    return std::tie(LHS.Var, LHS.DL) == std::tie(RHS.Var, RHS.DL);
  }
};

/// Map of backing storage to a set of variables that are stored to it.
/// TODO: Backing storage shouldn't be limited to allocas only. Some local
/// variables have their storage allocated by the calling function (addresses
/// passed in with sret & byval parameters).
using StorageToVarsMap = DenseMap<const AllocaInst *, SmallSet<VarRecord, 2>>;

/// Track assignments to \p Vars between \p Start and \p End.

void trackAssignments(Function::iterator Start, Function::iterator End,
                      const StorageToVarsMap &Vars, const DataLayout &DL,
                      bool DebugPrints = false);

/// Describes properties of a store that has a static size and offset into a
/// some base storage. Used by the getAssignmentInfo functions.
struct AssignmentInfo {
  AllocaInst const *Base;  ///< Base storage.
  uint64_t OffsetInBits;   ///< Offset into Base.
  uint64_t SizeInBits;     ///< Number of bits stored.
  bool StoreToWholeAlloca; ///< SizeInBits equals the size of the base storage.

  AssignmentInfo(const DataLayout &DL, AllocaInst const *Base,
                 uint64_t OffsetInBits, uint64_t SizeInBits)
      : Base(Base), OffsetInBits(OffsetInBits), SizeInBits(SizeInBits),
        StoreToWholeAlloca(
            OffsetInBits == 0 &&
            SizeInBits == DL.getTypeSizeInBits(Base->getAllocatedType())) {}
};

std::optional<AssignmentInfo> getAssignmentInfo(const DataLayout &DL,
                                                const MemIntrinsic *I);
std::optional<AssignmentInfo> getAssignmentInfo(const DataLayout &DL,
                                                const StoreInst *SI);
std::optional<AssignmentInfo> getAssignmentInfo(const DataLayout &DL,
                                                const AllocaInst *AI);

} // end namespace at

/// Convert @llvm.dbg.declare intrinsics into sets of @llvm.dbg.assign
/// intrinsics by treating stores to the dbg.declare'd address as assignments
/// to the variable. Not all kinds of variables are supported yet; those will
/// be left with their dbg.declare intrinsics.
/// The pass sets the debug-info-assignment-tracking module flag to true to
/// indicate assignment tracking has been enabled.
class AssignmentTrackingPass : public PassInfoMixin<AssignmentTrackingPass> {
  /// Note: this method does not set the debug-info-assignment-tracking module
  /// flag.
  bool runOnFunction(Function &F);

public:
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
};

/// Return true if assignment tracking is enabled for module \p M.
bool isAssignmentTrackingEnabled(const Module &M);
} // end namespace llvm

#endif // LLVM_IR_DEBUGINFO_H
PKjwFZ:&f,�,�
IR/Metadata.hnu�[���//===- llvm/IR/Metadata.h - Metadata definitions ----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// @file
/// This file contains the declarations for metadata subclasses.
/// They represent the different flavors of metadata that live in LLVM.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_METADATA_H
#define LLVM_IR_METADATA_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/ilist_node.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/CBindingWrapping.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <memory>
#include <string>
#include <type_traits>
#include <utility>

namespace llvm {

class Module;
class ModuleSlotTracker;
class raw_ostream;
template <typename T> class StringMapEntry;
template <typename ValueTy> class StringMapEntryStorage;
class Type;

enum LLVMConstants : uint32_t {
  DEBUG_METADATA_VERSION = 3 // Current debug info version number.
};

/// Magic number in the value profile metadata showing a target has been
/// promoted for the instruction and shouldn't be promoted again.
const uint64_t NOMORE_ICP_MAGICNUM = -1;

/// Root of the metadata hierarchy.
///
/// This is a root class for typeless data in the IR.
class Metadata {
  friend class ReplaceableMetadataImpl;

  /// RTTI.
  const unsigned char SubclassID;

protected:
  /// Active type of storage.
  enum StorageType { Uniqued, Distinct, Temporary };

  /// Storage flag for non-uniqued, otherwise unowned, metadata.
  unsigned char Storage : 7;

  unsigned char SubclassData1 : 1;
  unsigned short SubclassData16 = 0;
  unsigned SubclassData32 = 0;

public:
  enum MetadataKind {
#define HANDLE_METADATA_LEAF(CLASS) CLASS##Kind,
#include "llvm/IR/Metadata.def"
  };

protected:
  Metadata(unsigned ID, StorageType Storage)
      : SubclassID(ID), Storage(Storage), SubclassData1(false) {
    static_assert(sizeof(*this) == 8, "Metadata fields poorly packed");
  }

  ~Metadata() = default;

  /// Default handling of a changed operand, which asserts.
  ///
  /// If subclasses pass themselves in as owners to a tracking node reference,
  /// they must provide an implementation of this method.
  void handleChangedOperand(void *, Metadata *) {
    llvm_unreachable("Unimplemented in Metadata subclass");
  }

public:
  unsigned getMetadataID() const { return SubclassID; }

  /// User-friendly dump.
  ///
  /// If \c M is provided, metadata nodes will be numbered canonically;
  /// otherwise, pointer addresses are substituted.
  ///
  /// Note: this uses an explicit overload instead of default arguments so that
  /// the nullptr version is easy to call from a debugger.
  ///
  /// @{
  void dump() const;
  void dump(const Module *M) const;
  /// @}

  /// Print.
  ///
  /// Prints definition of \c this.
  ///
  /// If \c M is provided, metadata nodes will be numbered canonically;
  /// otherwise, pointer addresses are substituted.
  /// @{
  void print(raw_ostream &OS, const Module *M = nullptr,
             bool IsForDebug = false) const;
  void print(raw_ostream &OS, ModuleSlotTracker &MST, const Module *M = nullptr,
             bool IsForDebug = false) const;
  /// @}

  /// Print as operand.
  ///
  /// Prints reference of \c this.
  ///
  /// If \c M is provided, metadata nodes will be numbered canonically;
  /// otherwise, pointer addresses are substituted.
  /// @{
  void printAsOperand(raw_ostream &OS, const Module *M = nullptr) const;
  void printAsOperand(raw_ostream &OS, ModuleSlotTracker &MST,
                      const Module *M = nullptr) const;
  /// @}
};

// Create wrappers for C Binding types (see CBindingWrapping.h).
DEFINE_ISA_CONVERSION_FUNCTIONS(Metadata, LLVMMetadataRef)

// Specialized opaque metadata conversions.
inline Metadata **unwrap(LLVMMetadataRef *MDs) {
  return reinterpret_cast<Metadata**>(MDs);
}

#define HANDLE_METADATA(CLASS) class CLASS;
#include "llvm/IR/Metadata.def"

// Provide specializations of isa so that we don't need definitions of
// subclasses to see if the metadata is a subclass.
#define HANDLE_METADATA_LEAF(CLASS)                                            \
  template <> struct isa_impl<CLASS, Metadata> {                               \
    static inline bool doit(const Metadata &MD) {                              \
      return MD.getMetadataID() == Metadata::CLASS##Kind;                      \
    }                                                                          \
  };
#include "llvm/IR/Metadata.def"

inline raw_ostream &operator<<(raw_ostream &OS, const Metadata &MD) {
  MD.print(OS);
  return OS;
}

/// Metadata wrapper in the Value hierarchy.
///
/// A member of the \a Value hierarchy to represent a reference to metadata.
/// This allows, e.g., intrinsics to have metadata as operands.
///
/// Notably, this is the only thing in either hierarchy that is allowed to
/// reference \a LocalAsMetadata.
class MetadataAsValue : public Value {
  friend class ReplaceableMetadataImpl;
  friend class LLVMContextImpl;

  Metadata *MD;

  MetadataAsValue(Type *Ty, Metadata *MD);

  /// Drop use of metadata (during teardown).
  void dropUse() { MD = nullptr; }

public:
  ~MetadataAsValue();

  static MetadataAsValue *get(LLVMContext &Context, Metadata *MD);
  static MetadataAsValue *getIfExists(LLVMContext &Context, Metadata *MD);

  Metadata *getMetadata() const { return MD; }

  static bool classof(const Value *V) {
    return V->getValueID() == MetadataAsValueVal;
  }

private:
  void handleChangedMetadata(Metadata *MD);
  void track();
  void untrack();
};

/// API for tracking metadata references through RAUW and deletion.
///
/// Shared API for updating \a Metadata pointers in subclasses that support
/// RAUW.
///
/// This API is not meant to be used directly.  See \a TrackingMDRef for a
/// user-friendly tracking reference.
class MetadataTracking {
public:
  /// Track the reference to metadata.
  ///
  /// Register \c MD with \c *MD, if the subclass supports tracking.  If \c *MD
  /// gets RAUW'ed, \c MD will be updated to the new address.  If \c *MD gets
  /// deleted, \c MD will be set to \c nullptr.
  ///
  /// If tracking isn't supported, \c *MD will not change.
  ///
  /// \return true iff tracking is supported by \c MD.
  static bool track(Metadata *&MD) {
    return track(&MD, *MD, static_cast<Metadata *>(nullptr));
  }

  /// Track the reference to metadata for \a Metadata.
  ///
  /// As \a track(Metadata*&), but with support for calling back to \c Owner to
  /// tell it that its operand changed.  This could trigger \c Owner being
  /// re-uniqued.
  static bool track(void *Ref, Metadata &MD, Metadata &Owner) {
    return track(Ref, MD, &Owner);
  }

  /// Track the reference to metadata for \a MetadataAsValue.
  ///
  /// As \a track(Metadata*&), but with support for calling back to \c Owner to
  /// tell it that its operand changed.  This could trigger \c Owner being
  /// re-uniqued.
  static bool track(void *Ref, Metadata &MD, MetadataAsValue &Owner) {
    return track(Ref, MD, &Owner);
  }

  /// Stop tracking a reference to metadata.
  ///
  /// Stops \c *MD from tracking \c MD.
  static void untrack(Metadata *&MD) { untrack(&MD, *MD); }
  static void untrack(void *Ref, Metadata &MD);

  /// Move tracking from one reference to another.
  ///
  /// Semantically equivalent to \c untrack(MD) followed by \c track(New),
  /// except that ownership callbacks are maintained.
  ///
  /// Note: it is an error if \c *MD does not equal \c New.
  ///
  /// \return true iff tracking is supported by \c MD.
  static bool retrack(Metadata *&MD, Metadata *&New) {
    return retrack(&MD, *MD, &New);
  }
  static bool retrack(void *Ref, Metadata &MD, void *New);

  /// Check whether metadata is replaceable.
  static bool isReplaceable(const Metadata &MD);

  using OwnerTy = PointerUnion<MetadataAsValue *, Metadata *>;

private:
  /// Track a reference to metadata for an owner.
  ///
  /// Generalized version of tracking.
  static bool track(void *Ref, Metadata &MD, OwnerTy Owner);
};

/// Shared implementation of use-lists for replaceable metadata.
///
/// Most metadata cannot be RAUW'ed.  This is a shared implementation of
/// use-lists and associated API for the two that support it (\a ValueAsMetadata
/// and \a TempMDNode).
class ReplaceableMetadataImpl {
  friend class MetadataTracking;

public:
  using OwnerTy = MetadataTracking::OwnerTy;

private:
  LLVMContext &Context;
  uint64_t NextIndex = 0;
  SmallDenseMap<void *, std::pair<OwnerTy, uint64_t>, 4> UseMap;

public:
  ReplaceableMetadataImpl(LLVMContext &Context) : Context(Context) {}

  ~ReplaceableMetadataImpl() {
    assert(UseMap.empty() && "Cannot destroy in-use replaceable metadata");
  }

  LLVMContext &getContext() const { return Context; }

  /// Replace all uses of this with MD.
  ///
  /// Replace all uses of this with \c MD, which is allowed to be null.
  void replaceAllUsesWith(Metadata *MD);
   /// Replace all uses of the constant with Undef in debug info metadata
  static void SalvageDebugInfo(const Constant &C); 
  /// Returns the list of all DIArgList users of this.
  SmallVector<Metadata *> getAllArgListUsers();

  /// Resolve all uses of this.
  ///
  /// Resolve all uses of this, turning off RAUW permanently.  If \c
  /// ResolveUsers, call \a MDNode::resolve() on any users whose last operand
  /// is resolved.
  void resolveAllUses(bool ResolveUsers = true);

private:
  void addRef(void *Ref, OwnerTy Owner);
  void dropRef(void *Ref);
  void moveRef(void *Ref, void *New, const Metadata &MD);

  /// Lazily construct RAUW support on MD.
  ///
  /// If this is an unresolved MDNode, RAUW support will be created on-demand.
  /// ValueAsMetadata always has RAUW support.
  static ReplaceableMetadataImpl *getOrCreate(Metadata &MD);

  /// Get RAUW support on MD, if it exists.
  static ReplaceableMetadataImpl *getIfExists(Metadata &MD);

  /// Check whether this node will support RAUW.
  ///
  /// Returns \c true unless getOrCreate() would return null.
  static bool isReplaceable(const Metadata &MD);
};

/// Value wrapper in the Metadata hierarchy.
///
/// This is a custom value handle that allows other metadata to refer to
/// classes in the Value hierarchy.
///
/// Because of full uniquing support, each value is only wrapped by a single \a
/// ValueAsMetadata object, so the lookup maps are far more efficient than
/// those using ValueHandleBase.
class ValueAsMetadata : public Metadata, ReplaceableMetadataImpl {
  friend class ReplaceableMetadataImpl;
  friend class LLVMContextImpl;

  Value *V;

  /// Drop users without RAUW (during teardown).
  void dropUsers() {
    ReplaceableMetadataImpl::resolveAllUses(/* ResolveUsers */ false);
  }

protected:
  ValueAsMetadata(unsigned ID, Value *V)
      : Metadata(ID, Uniqued), ReplaceableMetadataImpl(V->getContext()), V(V) {
    assert(V && "Expected valid value");
  }

  ~ValueAsMetadata() = default;

public:
  static ValueAsMetadata *get(Value *V);

  static ConstantAsMetadata *getConstant(Value *C) {
    return cast<ConstantAsMetadata>(get(C));
  }

  static LocalAsMetadata *getLocal(Value *Local) {
    return cast<LocalAsMetadata>(get(Local));
  }

  static ValueAsMetadata *getIfExists(Value *V);

  static ConstantAsMetadata *getConstantIfExists(Value *C) {
    return cast_or_null<ConstantAsMetadata>(getIfExists(C));
  }

  static LocalAsMetadata *getLocalIfExists(Value *Local) {
    return cast_or_null<LocalAsMetadata>(getIfExists(Local));
  }

  Value *getValue() const { return V; }
  Type *getType() const { return V->getType(); }
  LLVMContext &getContext() const { return V->getContext(); }

  SmallVector<Metadata *> getAllArgListUsers() {
    return ReplaceableMetadataImpl::getAllArgListUsers();
  }

  static void handleDeletion(Value *V);
  static void handleRAUW(Value *From, Value *To);

protected:
  /// Handle collisions after \a Value::replaceAllUsesWith().
  ///
  /// RAUW isn't supported directly for \a ValueAsMetadata, but if the wrapped
  /// \a Value gets RAUW'ed and the target already exists, this is used to
  /// merge the two metadata nodes.
  void replaceAllUsesWith(Metadata *MD) {
    ReplaceableMetadataImpl::replaceAllUsesWith(MD);
  }

public:
  static bool classof(const Metadata *MD) {
    return MD->getMetadataID() == LocalAsMetadataKind ||
           MD->getMetadataID() == ConstantAsMetadataKind;
  }
};

class ConstantAsMetadata : public ValueAsMetadata {
  friend class ValueAsMetadata;

  ConstantAsMetadata(Constant *C)
      : ValueAsMetadata(ConstantAsMetadataKind, C) {}

public:
  static ConstantAsMetadata *get(Constant *C) {
    return ValueAsMetadata::getConstant(C);
  }

  static ConstantAsMetadata *getIfExists(Constant *C) {
    return ValueAsMetadata::getConstantIfExists(C);
  }

  Constant *getValue() const {
    return cast<Constant>(ValueAsMetadata::getValue());
  }

  static bool classof(const Metadata *MD) {
    return MD->getMetadataID() == ConstantAsMetadataKind;
  }
};

class LocalAsMetadata : public ValueAsMetadata {
  friend class ValueAsMetadata;

  LocalAsMetadata(Value *Local)
      : ValueAsMetadata(LocalAsMetadataKind, Local) {
    assert(!isa<Constant>(Local) && "Expected local value");
  }

public:
  static LocalAsMetadata *get(Value *Local) {
    return ValueAsMetadata::getLocal(Local);
  }

  static LocalAsMetadata *getIfExists(Value *Local) {
    return ValueAsMetadata::getLocalIfExists(Local);
  }

  static bool classof(const Metadata *MD) {
    return MD->getMetadataID() == LocalAsMetadataKind;
  }
};

/// Transitional API for extracting constants from Metadata.
///
/// This namespace contains transitional functions for metadata that points to
/// \a Constants.
///
/// In prehistory -- when metadata was a subclass of \a Value -- \a MDNode
/// operands could refer to any \a Value.  There's was a lot of code like this:
///
/// \code
///     MDNode *N = ...;
///     auto *CI = dyn_cast<ConstantInt>(N->getOperand(2));
/// \endcode
///
/// Now that \a Value and \a Metadata are in separate hierarchies, maintaining
/// the semantics for \a isa(), \a cast(), \a dyn_cast() (etc.) requires three
/// steps: cast in the \a Metadata hierarchy, extraction of the \a Value, and
/// cast in the \a Value hierarchy.  Besides creating boiler-plate, this
/// requires subtle control flow changes.
///
/// The end-goal is to create a new type of metadata, called (e.g.) \a MDInt,
/// so that metadata can refer to numbers without traversing a bridge to the \a
/// Value hierarchy.  In this final state, the code above would look like this:
///
/// \code
///     MDNode *N = ...;
///     auto *MI = dyn_cast<MDInt>(N->getOperand(2));
/// \endcode
///
/// The API in this namespace supports the transition.  \a MDInt doesn't exist
/// yet, and even once it does, changing each metadata schema to use it is its
/// own mini-project.  In the meantime this API prevents us from introducing
/// complex and bug-prone control flow that will disappear in the end.  In
/// particular, the above code looks like this:
///
/// \code
///     MDNode *N = ...;
///     auto *CI = mdconst::dyn_extract<ConstantInt>(N->getOperand(2));
/// \endcode
///
/// The full set of provided functions includes:
///
///   mdconst::hasa                <=> isa
///   mdconst::extract             <=> cast
///   mdconst::extract_or_null     <=> cast_or_null
///   mdconst::dyn_extract         <=> dyn_cast
///   mdconst::dyn_extract_or_null <=> dyn_cast_or_null
///
/// The target of the cast must be a subclass of \a Constant.
namespace mdconst {

namespace detail {

template <class T> T &make();
template <class T, class Result> struct HasDereference {
  using Yes = char[1];
  using No = char[2];
  template <size_t N> struct SFINAE {};

  template <class U, class V>
  static Yes &hasDereference(SFINAE<sizeof(static_cast<V>(*make<U>()))> * = 0);
  template <class U, class V> static No &hasDereference(...);

  static const bool value =
      sizeof(hasDereference<T, Result>(nullptr)) == sizeof(Yes);
};
template <class V, class M> struct IsValidPointer {
  static const bool value = std::is_base_of<Constant, V>::value &&
                            HasDereference<M, const Metadata &>::value;
};
template <class V, class M> struct IsValidReference {
  static const bool value = std::is_base_of<Constant, V>::value &&
                            std::is_convertible<M, const Metadata &>::value;
};

} // end namespace detail

/// Check whether Metadata has a Value.
///
/// As an analogue to \a isa(), check whether \c MD has an \a Value inside of
/// type \c X.
template <class X, class Y>
inline std::enable_if_t<detail::IsValidPointer<X, Y>::value, bool>
hasa(Y &&MD) {
  assert(MD && "Null pointer sent into hasa");
  if (auto *V = dyn_cast<ConstantAsMetadata>(MD))
    return isa<X>(V->getValue());
  return false;
}
template <class X, class Y>
inline std::enable_if_t<detail::IsValidReference<X, Y &>::value, bool>
hasa(Y &MD) {
  return hasa(&MD);
}

/// Extract a Value from Metadata.
///
/// As an analogue to \a cast(), extract the \a Value subclass \c X from \c MD.
template <class X, class Y>
inline std::enable_if_t<detail::IsValidPointer<X, Y>::value, X *>
extract(Y &&MD) {
  return cast<X>(cast<ConstantAsMetadata>(MD)->getValue());
}
template <class X, class Y>
inline std::enable_if_t<detail::IsValidReference<X, Y &>::value, X *>
extract(Y &MD) {
  return extract(&MD);
}

/// Extract a Value from Metadata, allowing null.
///
/// As an analogue to \a cast_or_null(), extract the \a Value subclass \c X
/// from \c MD, allowing \c MD to be null.
template <class X, class Y>
inline std::enable_if_t<detail::IsValidPointer<X, Y>::value, X *>
extract_or_null(Y &&MD) {
  if (auto *V = cast_or_null<ConstantAsMetadata>(MD))
    return cast<X>(V->getValue());
  return nullptr;
}

/// Extract a Value from Metadata, if any.
///
/// As an analogue to \a dyn_cast_or_null(), extract the \a Value subclass \c X
/// from \c MD, return null if \c MD doesn't contain a \a Value or if the \a
/// Value it does contain is of the wrong subclass.
template <class X, class Y>
inline std::enable_if_t<detail::IsValidPointer<X, Y>::value, X *>
dyn_extract(Y &&MD) {
  if (auto *V = dyn_cast<ConstantAsMetadata>(MD))
    return dyn_cast<X>(V->getValue());
  return nullptr;
}

/// Extract a Value from Metadata, if any, allowing null.
///
/// As an analogue to \a dyn_cast_or_null(), extract the \a Value subclass \c X
/// from \c MD, return null if \c MD doesn't contain a \a Value or if the \a
/// Value it does contain is of the wrong subclass, allowing \c MD to be null.
template <class X, class Y>
inline std::enable_if_t<detail::IsValidPointer<X, Y>::value, X *>
dyn_extract_or_null(Y &&MD) {
  if (auto *V = dyn_cast_or_null<ConstantAsMetadata>(MD))
    return dyn_cast<X>(V->getValue());
  return nullptr;
}

} // end namespace mdconst

//===----------------------------------------------------------------------===//
/// A single uniqued string.
///
/// These are used to efficiently contain a byte sequence for metadata.
/// MDString is always unnamed.
class MDString : public Metadata {
  friend class StringMapEntryStorage<MDString>;

  StringMapEntry<MDString> *Entry = nullptr;

  MDString() : Metadata(MDStringKind, Uniqued) {}

public:
  MDString(const MDString &) = delete;
  MDString &operator=(MDString &&) = delete;
  MDString &operator=(const MDString &) = delete;

  static MDString *get(LLVMContext &Context, StringRef Str);
  static MDString *get(LLVMContext &Context, const char *Str) {
    return get(Context, Str ? StringRef(Str) : StringRef());
  }

  StringRef getString() const;

  unsigned getLength() const { return (unsigned)getString().size(); }

  using iterator = StringRef::iterator;

  /// Pointer to the first byte of the string.
  iterator begin() const { return getString().begin(); }

  /// Pointer to one byte past the end of the string.
  iterator end() const { return getString().end(); }

  const unsigned char *bytes_begin() const { return getString().bytes_begin(); }
  const unsigned char *bytes_end() const { return getString().bytes_end(); }

  /// Methods for support type inquiry through isa, cast, and dyn_cast.
  static bool classof(const Metadata *MD) {
    return MD->getMetadataID() == MDStringKind;
  }
};

/// A collection of metadata nodes that might be associated with a
/// memory access used by the alias-analysis infrastructure.
struct AAMDNodes {
  explicit AAMDNodes() = default;
  explicit AAMDNodes(MDNode *T, MDNode *TS, MDNode *S, MDNode *N)
      : TBAA(T), TBAAStruct(TS), Scope(S), NoAlias(N) {}

  bool operator==(const AAMDNodes &A) const {
    return TBAA == A.TBAA && TBAAStruct == A.TBAAStruct && Scope == A.Scope &&
           NoAlias == A.NoAlias;
  }

  bool operator!=(const AAMDNodes &A) const { return !(*this == A); }

  explicit operator bool() const {
    return TBAA || TBAAStruct || Scope || NoAlias;
  }

  /// The tag for type-based alias analysis.
  MDNode *TBAA = nullptr;

  /// The tag for type-based alias analysis (tbaa struct).
  MDNode *TBAAStruct = nullptr;

  /// The tag for alias scope specification (used with noalias).
  MDNode *Scope = nullptr;

  /// The tag specifying the noalias scope.
  MDNode *NoAlias = nullptr;

  // Shift tbaa Metadata node to start off bytes later
  static MDNode *shiftTBAA(MDNode *M, size_t off);

  // Shift tbaa.struct Metadata node to start off bytes later
  static MDNode *shiftTBAAStruct(MDNode *M, size_t off);

  // Extend tbaa Metadata node to apply to a series of bytes of length len.
  // A size of -1 denotes an unknown size.
  static MDNode *extendToTBAA(MDNode *TBAA, ssize_t len);

  /// Given two sets of AAMDNodes that apply to the same pointer,
  /// give the best AAMDNodes that are compatible with both (i.e. a set of
  /// nodes whose allowable aliasing conclusions are a subset of those
  /// allowable by both of the inputs). However, for efficiency
  /// reasons, do not create any new MDNodes.
  AAMDNodes intersect(const AAMDNodes &Other) const {
    AAMDNodes Result;
    Result.TBAA = Other.TBAA == TBAA ? TBAA : nullptr;
    Result.TBAAStruct = Other.TBAAStruct == TBAAStruct ? TBAAStruct : nullptr;
    Result.Scope = Other.Scope == Scope ? Scope : nullptr;
    Result.NoAlias = Other.NoAlias == NoAlias ? NoAlias : nullptr;
    return Result;
  }

  /// Create a new AAMDNode that describes this AAMDNode after applying a
  /// constant offset to the start of the pointer.
  AAMDNodes shift(size_t Offset) const {
    AAMDNodes Result;
    Result.TBAA = TBAA ? shiftTBAA(TBAA, Offset) : nullptr;
    Result.TBAAStruct =
        TBAAStruct ? shiftTBAAStruct(TBAAStruct, Offset) : nullptr;
    Result.Scope = Scope;
    Result.NoAlias = NoAlias;
    return Result;
  }

  /// Create a new AAMDNode that describes this AAMDNode after extending it to
  /// apply to a series of bytes of length Len. A size of -1 denotes an unknown
  /// size.
  AAMDNodes extendTo(ssize_t Len) const {
    AAMDNodes Result;
    Result.TBAA = TBAA ? extendToTBAA(TBAA, Len) : nullptr;
    // tbaa.struct contains (offset, size, type) triples. Extending the length
    // of the tbaa.struct doesn't require changing this (though more information
    // could be provided by adding more triples at subsequent lengths).
    Result.TBAAStruct = TBAAStruct;
    Result.Scope = Scope;
    Result.NoAlias = NoAlias;
    return Result;
  }

  /// Given two sets of AAMDNodes applying to potentially different locations,
  /// determine the best AAMDNodes that apply to both.
  AAMDNodes merge(const AAMDNodes &Other) const;

  /// Determine the best AAMDNodes after concatenating two different locations
  /// together. Different from `merge`, where different locations should
  /// overlap each other, `concat` puts non-overlapping locations together.
  AAMDNodes concat(const AAMDNodes &Other) const;
};

// Specialize DenseMapInfo for AAMDNodes.
template<>
struct DenseMapInfo<AAMDNodes> {
  static inline AAMDNodes getEmptyKey() {
    return AAMDNodes(DenseMapInfo<MDNode *>::getEmptyKey(),
                     nullptr, nullptr, nullptr);
  }

  static inline AAMDNodes getTombstoneKey() {
    return AAMDNodes(DenseMapInfo<MDNode *>::getTombstoneKey(),
                     nullptr, nullptr, nullptr);
  }

  static unsigned getHashValue(const AAMDNodes &Val) {
    return DenseMapInfo<MDNode *>::getHashValue(Val.TBAA) ^
           DenseMapInfo<MDNode *>::getHashValue(Val.TBAAStruct) ^
           DenseMapInfo<MDNode *>::getHashValue(Val.Scope) ^
           DenseMapInfo<MDNode *>::getHashValue(Val.NoAlias);
  }

  static bool isEqual(const AAMDNodes &LHS, const AAMDNodes &RHS) {
    return LHS == RHS;
  }
};

/// Tracking metadata reference owned by Metadata.
///
/// Similar to \a TrackingMDRef, but it's expected to be owned by an instance
/// of \a Metadata, which has the option of registering itself for callbacks to
/// re-unique itself.
///
/// In particular, this is used by \a MDNode.
class MDOperand {
  Metadata *MD = nullptr;

public:
  MDOperand() = default;
  MDOperand(const MDOperand &) = delete;
  MDOperand(MDOperand &&Op) {
    MD = Op.MD;
    if (MD)
      (void)MetadataTracking::retrack(Op.MD, MD);
    Op.MD = nullptr;
  }
  MDOperand &operator=(const MDOperand &) = delete;
  MDOperand &operator=(MDOperand &&Op) {
    MD = Op.MD;
    if (MD)
      (void)MetadataTracking::retrack(Op.MD, MD);
    Op.MD = nullptr;
    return *this;
  }

  // Check if MDOperand is of type MDString and equals `Str`.
  bool equalsStr(StringRef Str) const {
    return isa<MDString>(this->get()) &&
           cast<MDString>(this->get())->getString() == Str;
  }

  ~MDOperand() { untrack(); }

  Metadata *get() const { return MD; }
  operator Metadata *() const { return get(); }
  Metadata *operator->() const { return get(); }
  Metadata &operator*() const { return *get(); }

  void reset() {
    untrack();
    MD = nullptr;
  }
  void reset(Metadata *MD, Metadata *Owner) {
    untrack();
    this->MD = MD;
    track(Owner);
  }

private:
  void track(Metadata *Owner) {
    if (MD) {
      if (Owner)
        MetadataTracking::track(this, *MD, *Owner);
      else
        MetadataTracking::track(MD);
    }
  }

  void untrack() {
    assert(static_cast<void *>(this) == &MD && "Expected same address");
    if (MD)
      MetadataTracking::untrack(MD);
  }
};

template <> struct simplify_type<MDOperand> {
  using SimpleType = Metadata *;

  static SimpleType getSimplifiedValue(MDOperand &MD) { return MD.get(); }
};

template <> struct simplify_type<const MDOperand> {
  using SimpleType = Metadata *;

  static SimpleType getSimplifiedValue(const MDOperand &MD) { return MD.get(); }
};

/// Pointer to the context, with optional RAUW support.
///
/// Either a raw (non-null) pointer to the \a LLVMContext, or an owned pointer
/// to \a ReplaceableMetadataImpl (which has a reference to \a LLVMContext).
class ContextAndReplaceableUses {
  PointerUnion<LLVMContext *, ReplaceableMetadataImpl *> Ptr;

public:
  ContextAndReplaceableUses(LLVMContext &Context) : Ptr(&Context) {}
  ContextAndReplaceableUses(
      std::unique_ptr<ReplaceableMetadataImpl> ReplaceableUses)
      : Ptr(ReplaceableUses.release()) {
    assert(getReplaceableUses() && "Expected non-null replaceable uses");
  }
  ContextAndReplaceableUses() = delete;
  ContextAndReplaceableUses(ContextAndReplaceableUses &&) = delete;
  ContextAndReplaceableUses(const ContextAndReplaceableUses &) = delete;
  ContextAndReplaceableUses &operator=(ContextAndReplaceableUses &&) = delete;
  ContextAndReplaceableUses &
  operator=(const ContextAndReplaceableUses &) = delete;
  ~ContextAndReplaceableUses() { delete getReplaceableUses(); }

  operator LLVMContext &() { return getContext(); }

  /// Whether this contains RAUW support.
  bool hasReplaceableUses() const {
    return isa<ReplaceableMetadataImpl *>(Ptr);
  }

  LLVMContext &getContext() const {
    if (hasReplaceableUses())
      return getReplaceableUses()->getContext();
    return *cast<LLVMContext *>(Ptr);
  }

  ReplaceableMetadataImpl *getReplaceableUses() const {
    if (hasReplaceableUses())
      return cast<ReplaceableMetadataImpl *>(Ptr);
    return nullptr;
  }

  /// Ensure that this has RAUW support, and then return it.
  ReplaceableMetadataImpl *getOrCreateReplaceableUses() {
    if (!hasReplaceableUses())
      makeReplaceable(std::make_unique<ReplaceableMetadataImpl>(getContext()));
    return getReplaceableUses();
  }

  /// Assign RAUW support to this.
  ///
  /// Make this replaceable, taking ownership of \c ReplaceableUses (which must
  /// not be null).
  void
  makeReplaceable(std::unique_ptr<ReplaceableMetadataImpl> ReplaceableUses) {
    assert(ReplaceableUses && "Expected non-null replaceable uses");
    assert(&ReplaceableUses->getContext() == &getContext() &&
           "Expected same context");
    delete getReplaceableUses();
    Ptr = ReplaceableUses.release();
  }

  /// Drop RAUW support.
  ///
  /// Cede ownership of RAUW support, returning it.
  std::unique_ptr<ReplaceableMetadataImpl> takeReplaceableUses() {
    assert(hasReplaceableUses() && "Expected to own replaceable uses");
    std::unique_ptr<ReplaceableMetadataImpl> ReplaceableUses(
        getReplaceableUses());
    Ptr = &ReplaceableUses->getContext();
    return ReplaceableUses;
  }
};

struct TempMDNodeDeleter {
  inline void operator()(MDNode *Node) const;
};

#define HANDLE_MDNODE_LEAF(CLASS)                                              \
  using Temp##CLASS = std::unique_ptr<CLASS, TempMDNodeDeleter>;
#define HANDLE_MDNODE_BRANCH(CLASS) HANDLE_MDNODE_LEAF(CLASS)
#include "llvm/IR/Metadata.def"

/// Metadata node.
///
/// Metadata nodes can be uniqued, like constants, or distinct.  Temporary
/// metadata nodes (with full support for RAUW) can be used to delay uniquing
/// until forward references are known.  The basic metadata node is an \a
/// MDTuple.
///
/// There is limited support for RAUW at construction time.  At construction
/// time, if any operand is a temporary node (or an unresolved uniqued node,
/// which indicates a transitive temporary operand), the node itself will be
/// unresolved.  As soon as all operands become resolved, it will drop RAUW
/// support permanently.
///
/// If an unresolved node is part of a cycle, \a resolveCycles() needs
/// to be called on some member of the cycle once all temporary nodes have been
/// replaced.
///
/// MDNodes can be large or small, as well as resizable or non-resizable.
/// Large MDNodes' operands are allocated in a separate storage vector,
/// whereas small MDNodes' operands are co-allocated. Distinct and temporary
/// MDnodes are resizable, but only MDTuples support this capability.
///
/// Clients can add operands to resizable MDNodes using push_back().
class MDNode : public Metadata {
  friend class ReplaceableMetadataImpl;
  friend class LLVMContextImpl;
  friend class DIArgList;

  /// The header that is coallocated with an MDNode along with its "small"
  /// operands. It is located immediately before the main body of the node.
  /// The operands are in turn located immediately before the header.
  /// For resizable MDNodes, the space for the storage vector is also allocated
  /// immediately before the header, overlapping with the operands.
  /// Explicity set alignment because bitfields by default have an
  /// alignment of 1 on z/OS.
  struct alignas(alignof(size_t)) Header {
    bool IsResizable : 1;
    bool IsLarge : 1;
    size_t SmallSize : 4;
    size_t SmallNumOps : 4;
    size_t : sizeof(size_t) * CHAR_BIT - 10;

    unsigned NumUnresolved = 0;
    using LargeStorageVector = SmallVector<MDOperand, 0>;

    static constexpr size_t NumOpsFitInVector =
        sizeof(LargeStorageVector) / sizeof(MDOperand);
    static_assert(
        NumOpsFitInVector * sizeof(MDOperand) == sizeof(LargeStorageVector),
        "sizeof(LargeStorageVector) must be a multiple of sizeof(MDOperand)");

    static constexpr size_t MaxSmallSize = 15;

    static constexpr size_t getOpSize(unsigned NumOps) {
      return sizeof(MDOperand) * NumOps;
    }
    /// Returns the number of operands the node has space for based on its
    /// allocation characteristics.
    static size_t getSmallSize(size_t NumOps, bool IsResizable, bool IsLarge) {
      return IsLarge ? NumOpsFitInVector
                     : std::max(NumOps, NumOpsFitInVector * IsResizable);
    }
    /// Returns the number of bytes allocated for operands and header.
    static size_t getAllocSize(StorageType Storage, size_t NumOps) {
      return getOpSize(
                 getSmallSize(NumOps, isResizable(Storage), isLarge(NumOps))) +
             sizeof(Header);
    }

    /// Only temporary and distinct nodes are resizable.
    static bool isResizable(StorageType Storage) { return Storage != Uniqued; }
    static bool isLarge(size_t NumOps) { return NumOps > MaxSmallSize; }

    size_t getAllocSize() const {
      return getOpSize(SmallSize) + sizeof(Header);
    }
    void *getAllocation() {
      return reinterpret_cast<char *>(this + 1) -
             alignTo(getAllocSize(), alignof(uint64_t));
    }

    void *getLargePtr() const {
      static_assert(alignof(LargeStorageVector) <= alignof(Header),
                    "LargeStorageVector too strongly aligned");
      return reinterpret_cast<char *>(const_cast<Header *>(this)) -
             sizeof(LargeStorageVector);
    }

    void *getSmallPtr();

    LargeStorageVector &getLarge() {
      assert(IsLarge);
      return *reinterpret_cast<LargeStorageVector *>(getLargePtr());
    }

    const LargeStorageVector &getLarge() const {
      assert(IsLarge);
      return *reinterpret_cast<const LargeStorageVector *>(getLargePtr());
    }

    void resizeSmall(size_t NumOps);
    void resizeSmallToLarge(size_t NumOps);
    void resize(size_t NumOps);

    explicit Header(size_t NumOps, StorageType Storage);
    ~Header();

    MutableArrayRef<MDOperand> operands() {
      if (IsLarge)
        return getLarge();
      return MutableArrayRef(
          reinterpret_cast<MDOperand *>(this) - SmallSize, SmallNumOps);
    }

    ArrayRef<MDOperand> operands() const {
      if (IsLarge)
        return getLarge();
      return ArrayRef(reinterpret_cast<const MDOperand *>(this) - SmallSize,
                      SmallNumOps);
    }

    unsigned getNumOperands() const {
      if (!IsLarge)
        return SmallNumOps;
      return getLarge().size();
    }
  };

  Header &getHeader() { return *(reinterpret_cast<Header *>(this) - 1); }

  const Header &getHeader() const {
    return *(reinterpret_cast<const Header *>(this) - 1);
  }

  ContextAndReplaceableUses Context;

protected:
  MDNode(LLVMContext &Context, unsigned ID, StorageType Storage,
         ArrayRef<Metadata *> Ops1, ArrayRef<Metadata *> Ops2 = std::nullopt);
  ~MDNode() = default;

  void *operator new(size_t Size, size_t NumOps, StorageType Storage);
  void operator delete(void *Mem);

  /// Required by std, but never called.
  void operator delete(void *, unsigned) {
    llvm_unreachable("Constructor throws?");
  }

  /// Required by std, but never called.
  void operator delete(void *, unsigned, bool) {
    llvm_unreachable("Constructor throws?");
  }

  void dropAllReferences();

  MDOperand *mutable_begin() { return getHeader().operands().begin(); }
  MDOperand *mutable_end() { return getHeader().operands().end(); }

  using mutable_op_range = iterator_range<MDOperand *>;

  mutable_op_range mutable_operands() {
    return mutable_op_range(mutable_begin(), mutable_end());
  }

public:
  MDNode(const MDNode &) = delete;
  void operator=(const MDNode &) = delete;
  void *operator new(size_t) = delete;

  static inline MDTuple *get(LLVMContext &Context, ArrayRef<Metadata *> MDs);
  static inline MDTuple *getIfExists(LLVMContext &Context,
                                     ArrayRef<Metadata *> MDs);
  static inline MDTuple *getDistinct(LLVMContext &Context,
                                     ArrayRef<Metadata *> MDs);
  static inline TempMDTuple getTemporary(LLVMContext &Context,
                                         ArrayRef<Metadata *> MDs);

  /// Create a (temporary) clone of this.
  TempMDNode clone() const;

  /// Deallocate a node created by getTemporary.
  ///
  /// Calls \c replaceAllUsesWith(nullptr) before deleting, so any remaining
  /// references will be reset.
  static void deleteTemporary(MDNode *N);

  LLVMContext &getContext() const { return Context.getContext(); }

  /// Replace a specific operand.
  void replaceOperandWith(unsigned I, Metadata *New);

  /// Check if node is fully resolved.
  ///
  /// If \a isTemporary(), this always returns \c false; if \a isDistinct(),
  /// this always returns \c true.
  ///
  /// If \a isUniqued(), returns \c true if this has already dropped RAUW
  /// support (because all operands are resolved).
  ///
  /// As forward declarations are resolved, their containers should get
  /// resolved automatically.  However, if this (or one of its operands) is
  /// involved in a cycle, \a resolveCycles() needs to be called explicitly.
  bool isResolved() const { return !isTemporary() && !getNumUnresolved(); }

  bool isUniqued() const { return Storage == Uniqued; }
  bool isDistinct() const { return Storage == Distinct; }
  bool isTemporary() const { return Storage == Temporary; }

  /// RAUW a temporary.
  ///
  /// \pre \a isTemporary() must be \c true.
  void replaceAllUsesWith(Metadata *MD) {
    assert(isTemporary() && "Expected temporary node");
    if (Context.hasReplaceableUses())
      Context.getReplaceableUses()->replaceAllUsesWith(MD);
  }

  /// Resolve cycles.
  ///
  /// Once all forward declarations have been resolved, force cycles to be
  /// resolved.
  ///
  /// \pre No operands (or operands' operands, etc.) have \a isTemporary().
  void resolveCycles();

  /// Resolve a unique, unresolved node.
  void resolve();

  /// Replace a temporary node with a permanent one.
  ///
  /// Try to create a uniqued version of \c N -- in place, if possible -- and
  /// return it.  If \c N cannot be uniqued, return a distinct node instead.
  template <class T>
  static std::enable_if_t<std::is_base_of<MDNode, T>::value, T *>
  replaceWithPermanent(std::unique_ptr<T, TempMDNodeDeleter> N) {
    return cast<T>(N.release()->replaceWithPermanentImpl());
  }

  /// Replace a temporary node with a uniqued one.
  ///
  /// Create a uniqued version of \c N -- in place, if possible -- and return
  /// it.  Takes ownership of the temporary node.
  ///
  /// \pre N does not self-reference.
  template <class T>
  static std::enable_if_t<std::is_base_of<MDNode, T>::value, T *>
  replaceWithUniqued(std::unique_ptr<T, TempMDNodeDeleter> N) {
    return cast<T>(N.release()->replaceWithUniquedImpl());
  }

  /// Replace a temporary node with a distinct one.
  ///
  /// Create a distinct version of \c N -- in place, if possible -- and return
  /// it.  Takes ownership of the temporary node.
  template <class T>
  static std::enable_if_t<std::is_base_of<MDNode, T>::value, T *>
  replaceWithDistinct(std::unique_ptr<T, TempMDNodeDeleter> N) {
    return cast<T>(N.release()->replaceWithDistinctImpl());
  }

  /// Print in tree shape.
  ///
  /// Prints definition of \c this in tree shape.
  ///
  /// If \c M is provided, metadata nodes will be numbered canonically;
  /// otherwise, pointer addresses are substituted.
  /// @{
  void printTree(raw_ostream &OS, const Module *M = nullptr) const;
  void printTree(raw_ostream &OS, ModuleSlotTracker &MST,
                 const Module *M = nullptr) const;
  /// @}

  /// User-friendly dump in tree shape.
  ///
  /// If \c M is provided, metadata nodes will be numbered canonically;
  /// otherwise, pointer addresses are substituted.
  ///
  /// Note: this uses an explicit overload instead of default arguments so that
  /// the nullptr version is easy to call from a debugger.
  ///
  /// @{
  void dumpTree() const;
  void dumpTree(const Module *M) const;
  /// @}

private:
  MDNode *replaceWithPermanentImpl();
  MDNode *replaceWithUniquedImpl();
  MDNode *replaceWithDistinctImpl();

protected:
  /// Set an operand.
  ///
  /// Sets the operand directly, without worrying about uniquing.
  void setOperand(unsigned I, Metadata *New);

  unsigned getNumUnresolved() const { return getHeader().NumUnresolved; }

  void setNumUnresolved(unsigned N) { getHeader().NumUnresolved = N; }
  void storeDistinctInContext();
  template <class T, class StoreT>
  static T *storeImpl(T *N, StorageType Storage, StoreT &Store);
  template <class T> static T *storeImpl(T *N, StorageType Storage);

  /// Resize the node to hold \a NumOps operands.
  ///
  /// \pre \a isTemporary() or \a isDistinct()
  /// \pre MetadataID == MDTupleKind
  void resize(size_t NumOps) {
    assert(!isUniqued() && "Resizing is not supported for uniqued nodes");
    assert(getMetadataID() == MDTupleKind &&
           "Resizing is not supported for this node kind");
    getHeader().resize(NumOps);
  }

private:
  void handleChangedOperand(void *Ref, Metadata *New);

  /// Drop RAUW support, if any.
  void dropReplaceableUses();

  void resolveAfterOperandChange(Metadata *Old, Metadata *New);
  void decrementUnresolvedOperandCount();
  void countUnresolvedOperands();

  /// Mutate this to be "uniqued".
  ///
  /// Mutate this so that \a isUniqued().
  /// \pre \a isTemporary().
  /// \pre already added to uniquing set.
  void makeUniqued();

  /// Mutate this to be "distinct".
  ///
  /// Mutate this so that \a isDistinct().
  /// \pre \a isTemporary().
  void makeDistinct();

  void deleteAsSubclass();
  MDNode *uniquify();
  void eraseFromStore();

  template <class NodeTy> struct HasCachedHash;
  template <class NodeTy>
  static void dispatchRecalculateHash(NodeTy *N, std::true_type) {
    N->recalculateHash();
  }
  template <class NodeTy>
  static void dispatchRecalculateHash(NodeTy *, std::false_type) {}
  template <class NodeTy>
  static void dispatchResetHash(NodeTy *N, std::true_type) {
    N->setHash(0);
  }
  template <class NodeTy>
  static void dispatchResetHash(NodeTy *, std::false_type) {}

  /// Merge branch weights from two direct callsites.
  static MDNode *mergeDirectCallProfMetadata(MDNode *A, MDNode *B,
                                             const Instruction *AInstr,
                                             const Instruction *BInstr);

public:
  using op_iterator = const MDOperand *;
  using op_range = iterator_range<op_iterator>;

  op_iterator op_begin() const {
    return const_cast<MDNode *>(this)->mutable_begin();
  }

  op_iterator op_end() const {
    return const_cast<MDNode *>(this)->mutable_end();
  }

  ArrayRef<MDOperand> operands() const { return getHeader().operands(); }

  const MDOperand &getOperand(unsigned I) const {
    assert(I < getNumOperands() && "Out of range");
    return getHeader().operands()[I];
  }

  /// Return number of MDNode operands.
  unsigned getNumOperands() const { return getHeader().getNumOperands(); }

  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Metadata *MD) {
    switch (MD->getMetadataID()) {
    default:
      return false;
#define HANDLE_MDNODE_LEAF(CLASS)                                              \
  case CLASS##Kind:                                                            \
    return true;
#include "llvm/IR/Metadata.def"
    }
  }

  /// Check whether MDNode is a vtable access.
  bool isTBAAVtableAccess() const;

  /// Methods for metadata merging.
  static MDNode *concatenate(MDNode *A, MDNode *B);
  static MDNode *intersect(MDNode *A, MDNode *B);
  static MDNode *getMostGenericTBAA(MDNode *A, MDNode *B);
  static MDNode *getMostGenericFPMath(MDNode *A, MDNode *B);
  static MDNode *getMostGenericRange(MDNode *A, MDNode *B);
  static MDNode *getMostGenericAliasScope(MDNode *A, MDNode *B);
  static MDNode *getMostGenericAlignmentOrDereferenceable(MDNode *A, MDNode *B);
  /// Merge !prof metadata from two instructions.
  /// Currently only implemented with direct callsites with branch weights.
  static MDNode *getMergedProfMetadata(MDNode *A, MDNode *B,
                                       const Instruction *AInstr,
                                       const Instruction *BInstr);
};

/// Tuple of metadata.
///
/// This is the simple \a MDNode arbitrary tuple.  Nodes are uniqued by
/// default based on their operands.
class MDTuple : public MDNode {
  friend class LLVMContextImpl;
  friend class MDNode;

  MDTuple(LLVMContext &C, StorageType Storage, unsigned Hash,
          ArrayRef<Metadata *> Vals)
      : MDNode(C, MDTupleKind, Storage, Vals) {
    setHash(Hash);
  }

  ~MDTuple() { dropAllReferences(); }

  void setHash(unsigned Hash) { SubclassData32 = Hash; }
  void recalculateHash();

  static MDTuple *getImpl(LLVMContext &Context, ArrayRef<Metadata *> MDs,
                          StorageType Storage, bool ShouldCreate = true);

  TempMDTuple cloneImpl() const {
    ArrayRef<MDOperand> Operands = operands();
    return getTemporary(getContext(), SmallVector<Metadata *, 4>(
                                          Operands.begin(), Operands.end()));
  }

public:
  /// Get the hash, if any.
  unsigned getHash() const { return SubclassData32; }

  static MDTuple *get(LLVMContext &Context, ArrayRef<Metadata *> MDs) {
    return getImpl(Context, MDs, Uniqued);
  }

  static MDTuple *getIfExists(LLVMContext &Context, ArrayRef<Metadata *> MDs) {
    return getImpl(Context, MDs, Uniqued, /* ShouldCreate */ false);
  }

  /// Return a distinct node.
  ///
  /// Return a distinct node -- i.e., a node that is not uniqued.
  static MDTuple *getDistinct(LLVMContext &Context, ArrayRef<Metadata *> MDs) {
    return getImpl(Context, MDs, Distinct);
  }

  /// Return a temporary node.
  ///
  /// For use in constructing cyclic MDNode structures. A temporary MDNode is
  /// not uniqued, may be RAUW'd, and must be manually deleted with
  /// deleteTemporary.
  static TempMDTuple getTemporary(LLVMContext &Context,
                                  ArrayRef<Metadata *> MDs) {
    return TempMDTuple(getImpl(Context, MDs, Temporary));
  }

  /// Return a (temporary) clone of this.
  TempMDTuple clone() const { return cloneImpl(); }

  /// Append an element to the tuple. This will resize the node.
  void push_back(Metadata *MD) {
    size_t NumOps = getNumOperands();
    resize(NumOps + 1);
    setOperand(NumOps, MD);
  }

  /// Shrink the operands by 1.
  void pop_back() { resize(getNumOperands() - 1); }

  static bool classof(const Metadata *MD) {
    return MD->getMetadataID() == MDTupleKind;
  }
};

MDTuple *MDNode::get(LLVMContext &Context, ArrayRef<Metadata *> MDs) {
  return MDTuple::get(Context, MDs);
}

MDTuple *MDNode::getIfExists(LLVMContext &Context, ArrayRef<Metadata *> MDs) {
  return MDTuple::getIfExists(Context, MDs);
}

MDTuple *MDNode::getDistinct(LLVMContext &Context, ArrayRef<Metadata *> MDs) {
  return MDTuple::getDistinct(Context, MDs);
}

TempMDTuple MDNode::getTemporary(LLVMContext &Context,
                                 ArrayRef<Metadata *> MDs) {
  return MDTuple::getTemporary(Context, MDs);
}

void TempMDNodeDeleter::operator()(MDNode *Node) const {
  MDNode::deleteTemporary(Node);
}

/// This is a simple wrapper around an MDNode which provides a higher-level
/// interface by hiding the details of how alias analysis information is encoded
/// in its operands.
class AliasScopeNode {
  const MDNode *Node = nullptr;

public:
  AliasScopeNode() = default;
  explicit AliasScopeNode(const MDNode *N) : Node(N) {}

  /// Get the MDNode for this AliasScopeNode.
  const MDNode *getNode() const { return Node; }

  /// Get the MDNode for this AliasScopeNode's domain.
  const MDNode *getDomain() const {
    if (Node->getNumOperands() < 2)
      return nullptr;
    return dyn_cast_or_null<MDNode>(Node->getOperand(1));
  }
  StringRef getName() const {
    if (Node->getNumOperands() > 2)
      if (MDString *N = dyn_cast_or_null<MDString>(Node->getOperand(2)))
        return N->getString();
    return StringRef();
  }
};

/// Typed iterator through MDNode operands.
///
/// An iterator that transforms an \a MDNode::iterator into an iterator over a
/// particular Metadata subclass.
template <class T> class TypedMDOperandIterator {
  MDNode::op_iterator I = nullptr;

public:
  using iterator_category = std::input_iterator_tag;
  using value_type = T *;
  using difference_type = std::ptrdiff_t;
  using pointer = void;
  using reference = T *;

  TypedMDOperandIterator() = default;
  explicit TypedMDOperandIterator(MDNode::op_iterator I) : I(I) {}

  T *operator*() const { return cast_or_null<T>(*I); }

  TypedMDOperandIterator &operator++() {
    ++I;
    return *this;
  }

  TypedMDOperandIterator operator++(int) {
    TypedMDOperandIterator Temp(*this);
    ++I;
    return Temp;
  }

  bool operator==(const TypedMDOperandIterator &X) const { return I == X.I; }
  bool operator!=(const TypedMDOperandIterator &X) const { return I != X.I; }
};

/// Typed, array-like tuple of metadata.
///
/// This is a wrapper for \a MDTuple that makes it act like an array holding a
/// particular type of metadata.
template <class T> class MDTupleTypedArrayWrapper {
  const MDTuple *N = nullptr;

public:
  MDTupleTypedArrayWrapper() = default;
  MDTupleTypedArrayWrapper(const MDTuple *N) : N(N) {}

  template <class U>
  MDTupleTypedArrayWrapper(
      const MDTupleTypedArrayWrapper<U> &Other,
      std::enable_if_t<std::is_convertible<U *, T *>::value> * = nullptr)
      : N(Other.get()) {}

  template <class U>
  explicit MDTupleTypedArrayWrapper(
      const MDTupleTypedArrayWrapper<U> &Other,
      std::enable_if_t<!std::is_convertible<U *, T *>::value> * = nullptr)
      : N(Other.get()) {}

  explicit operator bool() const { return get(); }
  explicit operator MDTuple *() const { return get(); }

  MDTuple *get() const { return const_cast<MDTuple *>(N); }
  MDTuple *operator->() const { return get(); }
  MDTuple &operator*() const { return *get(); }

  // FIXME: Fix callers and remove condition on N.
  unsigned size() const { return N ? N->getNumOperands() : 0u; }
  bool empty() const { return N ? N->getNumOperands() == 0 : true; }
  T *operator[](unsigned I) const { return cast_or_null<T>(N->getOperand(I)); }

  // FIXME: Fix callers and remove condition on N.
  using iterator = TypedMDOperandIterator<T>;

  iterator begin() const { return N ? iterator(N->op_begin()) : iterator(); }
  iterator end() const { return N ? iterator(N->op_end()) : iterator(); }
};

#define HANDLE_METADATA(CLASS)                                                 \
  using CLASS##Array = MDTupleTypedArrayWrapper<CLASS>;
#include "llvm/IR/Metadata.def"

/// Placeholder metadata for operands of distinct MDNodes.
///
/// This is a lightweight placeholder for an operand of a distinct node.  It's
/// purpose is to help track forward references when creating a distinct node.
/// This allows distinct nodes involved in a cycle to be constructed before
/// their operands without requiring a heavyweight temporary node with
/// full-blown RAUW support.
///
/// Each placeholder supports only a single MDNode user.  Clients should pass
/// an ID, retrieved via \a getID(), to indicate the "real" operand that this
/// should be replaced with.
///
/// While it would be possible to implement move operators, they would be
/// fairly expensive.  Leave them unimplemented to discourage their use
/// (clients can use std::deque, std::list, BumpPtrAllocator, etc.).
class DistinctMDOperandPlaceholder : public Metadata {
  friend class MetadataTracking;

  Metadata **Use = nullptr;

public:
  explicit DistinctMDOperandPlaceholder(unsigned ID)
      : Metadata(DistinctMDOperandPlaceholderKind, Distinct) {
    SubclassData32 = ID;
  }

  DistinctMDOperandPlaceholder() = delete;
  DistinctMDOperandPlaceholder(DistinctMDOperandPlaceholder &&) = delete;
  DistinctMDOperandPlaceholder(const DistinctMDOperandPlaceholder &) = delete;

  ~DistinctMDOperandPlaceholder() {
    if (Use)
      *Use = nullptr;
  }

  unsigned getID() const { return SubclassData32; }

  /// Replace the use of this with MD.
  void replaceUseWith(Metadata *MD) {
    if (!Use)
      return;
    *Use = MD;

    if (*Use)
      MetadataTracking::track(*Use);

    Metadata *T = cast<Metadata>(this);
    MetadataTracking::untrack(T);
    assert(!Use && "Use is still being tracked despite being untracked!");
  }
};

//===----------------------------------------------------------------------===//
/// A tuple of MDNodes.
///
/// Despite its name, a NamedMDNode isn't itself an MDNode.
///
/// NamedMDNodes are named module-level entities that contain lists of MDNodes.
///
/// It is illegal for a NamedMDNode to appear as an operand of an MDNode.
class NamedMDNode : public ilist_node<NamedMDNode> {
  friend class LLVMContextImpl;
  friend class Module;

  std::string Name;
  Module *Parent = nullptr;
  void *Operands; // SmallVector<TrackingMDRef, 4>

  void setParent(Module *M) { Parent = M; }

  explicit NamedMDNode(const Twine &N);

  template <class T1, class T2> class op_iterator_impl {
    friend class NamedMDNode;

    const NamedMDNode *Node = nullptr;
    unsigned Idx = 0;

    op_iterator_impl(const NamedMDNode *N, unsigned i) : Node(N), Idx(i) {}

  public:
    using iterator_category = std::bidirectional_iterator_tag;
    using value_type = T2;
    using difference_type = std::ptrdiff_t;
    using pointer = value_type *;
    using reference = value_type &;

    op_iterator_impl() = default;

    bool operator==(const op_iterator_impl &o) const { return Idx == o.Idx; }
    bool operator!=(const op_iterator_impl &o) const { return Idx != o.Idx; }

    op_iterator_impl &operator++() {
      ++Idx;
      return *this;
    }

    op_iterator_impl operator++(int) {
      op_iterator_impl tmp(*this);
      operator++();
      return tmp;
    }

    op_iterator_impl &operator--() {
      --Idx;
      return *this;
    }

    op_iterator_impl operator--(int) {
      op_iterator_impl tmp(*this);
      operator--();
      return tmp;
    }

    T1 operator*() const { return Node->getOperand(Idx); }
  };

public:
  NamedMDNode(const NamedMDNode &) = delete;
  ~NamedMDNode();

  /// Drop all references and remove the node from parent module.
  void eraseFromParent();

  /// Remove all uses and clear node vector.
  void dropAllReferences() { clearOperands(); }
  /// Drop all references to this node's operands.
  void clearOperands();

  /// Get the module that holds this named metadata collection.
  inline Module *getParent() { return Parent; }
  inline const Module *getParent() const { return Parent; }

  MDNode *getOperand(unsigned i) const;
  unsigned getNumOperands() const;
  void addOperand(MDNode *M);
  void setOperand(unsigned I, MDNode *New);
  StringRef getName() const;
  void print(raw_ostream &ROS, bool IsForDebug = false) const;
  void print(raw_ostream &ROS, ModuleSlotTracker &MST,
             bool IsForDebug = false) const;
  void dump() const;

  // ---------------------------------------------------------------------------
  // Operand Iterator interface...
  //
  using op_iterator = op_iterator_impl<MDNode *, MDNode>;

  op_iterator op_begin() { return op_iterator(this, 0); }
  op_iterator op_end()   { return op_iterator(this, getNumOperands()); }

  using const_op_iterator = op_iterator_impl<const MDNode *, MDNode>;

  const_op_iterator op_begin() const { return const_op_iterator(this, 0); }
  const_op_iterator op_end()   const { return const_op_iterator(this, getNumOperands()); }

  inline iterator_range<op_iterator>  operands() {
    return make_range(op_begin(), op_end());
  }
  inline iterator_range<const_op_iterator> operands() const {
    return make_range(op_begin(), op_end());
  }
};

// Create wrappers for C Binding types (see CBindingWrapping.h).
DEFINE_ISA_CONVERSION_FUNCTIONS(NamedMDNode, LLVMNamedMDNodeRef)

} // end namespace llvm

#endif // LLVM_IR_METADATA_H
PKjwFZ^ώ��IR/IntrinsicsBPF.tdnu�[���//===- IntrinsicsBPF.td - Defines BPF intrinsics -----------*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines all of the BPF-specific intrinsics.
//
//===----------------------------------------------------------------------===//

// Specialized loads from packet
let TargetPrefix = "bpf" in {  // All intrinsics start with "llvm.bpf."
  def int_bpf_load_byte : ClangBuiltin<"__builtin_bpf_load_byte">,
              Intrinsic<[llvm_i64_ty], [llvm_ptr_ty, llvm_i64_ty], [IntrReadMem]>;
  def int_bpf_load_half : ClangBuiltin<"__builtin_bpf_load_half">,
              Intrinsic<[llvm_i64_ty], [llvm_ptr_ty, llvm_i64_ty], [IntrReadMem]>;
  def int_bpf_load_word : ClangBuiltin<"__builtin_bpf_load_word">,
              Intrinsic<[llvm_i64_ty], [llvm_ptr_ty, llvm_i64_ty], [IntrReadMem]>;
  def int_bpf_pseudo : ClangBuiltin<"__builtin_bpf_pseudo">,
              Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty]>;
  def int_bpf_preserve_field_info : ClangBuiltin<"__builtin_bpf_preserve_field_info">,
              Intrinsic<[llvm_i32_ty], [llvm_anyptr_ty, llvm_i64_ty],
              [IntrNoMem, ImmArg<ArgIndex<1>>]>;
  def int_bpf_btf_type_id : ClangBuiltin<"__builtin_bpf_btf_type_id">,
              Intrinsic<[llvm_i64_ty], [llvm_i32_ty, llvm_i64_ty],
              [IntrNoMem]>;
  def int_bpf_preserve_type_info : ClangBuiltin<"__builtin_bpf_preserve_type_info">,
              Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i64_ty],
              [IntrNoMem]>;
  def int_bpf_preserve_enum_value : ClangBuiltin<"__builtin_bpf_preserve_enum_value">,
              Intrinsic<[llvm_i64_ty], [llvm_i32_ty, llvm_ptr_ty, llvm_i64_ty],
              [IntrNoMem]>;
  def int_bpf_passthrough : ClangBuiltin<"__builtin_bpf_passthrough">,
              Intrinsic<[llvm_any_ty], [llvm_i32_ty, llvm_any_ty], [IntrNoMem]>;
  def int_bpf_compare : ClangBuiltin<"__builtin_bpf_compare">,
              Intrinsic<[llvm_i1_ty], [llvm_i32_ty, llvm_anyint_ty, llvm_anyint_ty],
              [IntrNoMem]>;
}
PKjwFZG��	�	IR/IntrinsicsVEVL.gen.tdnu�[���let TargetPrefix = "ve" in def int_ve_vl_vld_vssl : ClangBuiltin<"__builtin_ve_vl_vld_vssl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, llvm_ptr_ty, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vld_vssvl : ClangBuiltin<"__builtin_ve_vl_vld_vssvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, llvm_ptr_ty, LLVMType<v256f64>, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vldnc_vssl : ClangBuiltin<"__builtin_ve_vl_vldnc_vssl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, llvm_ptr_ty, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vldnc_vssvl : ClangBuiltin<"__builtin_ve_vl_vldnc_vssvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, llvm_ptr_ty, LLVMType<v256f64>, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vldu_vssl : ClangBuiltin<"__builtin_ve_vl_vldu_vssl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, llvm_ptr_ty, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vldu_vssvl : ClangBuiltin<"__builtin_ve_vl_vldu_vssvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, llvm_ptr_ty, LLVMType<v256f64>, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vldunc_vssl : ClangBuiltin<"__builtin_ve_vl_vldunc_vssl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, llvm_ptr_ty, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vldunc_vssvl : ClangBuiltin<"__builtin_ve_vl_vldunc_vssvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, llvm_ptr_ty, LLVMType<v256f64>, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vldlsx_vssl : ClangBuiltin<"__builtin_ve_vl_vldlsx_vssl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, llvm_ptr_ty, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vldlsx_vssvl : ClangBuiltin<"__builtin_ve_vl_vldlsx_vssvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, llvm_ptr_ty, LLVMType<v256f64>, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vldlsxnc_vssl : ClangBuiltin<"__builtin_ve_vl_vldlsxnc_vssl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, llvm_ptr_ty, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vldlsxnc_vssvl : ClangBuiltin<"__builtin_ve_vl_vldlsxnc_vssvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, llvm_ptr_ty, LLVMType<v256f64>, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vldlzx_vssl : ClangBuiltin<"__builtin_ve_vl_vldlzx_vssl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, llvm_ptr_ty, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vldlzx_vssvl : ClangBuiltin<"__builtin_ve_vl_vldlzx_vssvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, llvm_ptr_ty, LLVMType<v256f64>, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vldlzxnc_vssl : ClangBuiltin<"__builtin_ve_vl_vldlzxnc_vssl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, llvm_ptr_ty, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vldlzxnc_vssvl : ClangBuiltin<"__builtin_ve_vl_vldlzxnc_vssvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, llvm_ptr_ty, LLVMType<v256f64>, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vld2d_vssl : ClangBuiltin<"__builtin_ve_vl_vld2d_vssl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, llvm_ptr_ty, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vld2d_vssvl : ClangBuiltin<"__builtin_ve_vl_vld2d_vssvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, llvm_ptr_ty, LLVMType<v256f64>, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vld2dnc_vssl : ClangBuiltin<"__builtin_ve_vl_vld2dnc_vssl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, llvm_ptr_ty, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vld2dnc_vssvl : ClangBuiltin<"__builtin_ve_vl_vld2dnc_vssvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, llvm_ptr_ty, LLVMType<v256f64>, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vldu2d_vssl : ClangBuiltin<"__builtin_ve_vl_vldu2d_vssl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, llvm_ptr_ty, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vldu2d_vssvl : ClangBuiltin<"__builtin_ve_vl_vldu2d_vssvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, llvm_ptr_ty, LLVMType<v256f64>, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vldu2dnc_vssl : ClangBuiltin<"__builtin_ve_vl_vldu2dnc_vssl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, llvm_ptr_ty, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vldu2dnc_vssvl : ClangBuiltin<"__builtin_ve_vl_vldu2dnc_vssvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, llvm_ptr_ty, LLVMType<v256f64>, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vldl2dsx_vssl : ClangBuiltin<"__builtin_ve_vl_vldl2dsx_vssl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, llvm_ptr_ty, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vldl2dsx_vssvl : ClangBuiltin<"__builtin_ve_vl_vldl2dsx_vssvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, llvm_ptr_ty, LLVMType<v256f64>, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vldl2dsxnc_vssl : ClangBuiltin<"__builtin_ve_vl_vldl2dsxnc_vssl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, llvm_ptr_ty, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vldl2dsxnc_vssvl : ClangBuiltin<"__builtin_ve_vl_vldl2dsxnc_vssvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, llvm_ptr_ty, LLVMType<v256f64>, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vldl2dzx_vssl : ClangBuiltin<"__builtin_ve_vl_vldl2dzx_vssl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, llvm_ptr_ty, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vldl2dzx_vssvl : ClangBuiltin<"__builtin_ve_vl_vldl2dzx_vssvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, llvm_ptr_ty, LLVMType<v256f64>, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vldl2dzxnc_vssl : ClangBuiltin<"__builtin_ve_vl_vldl2dzxnc_vssl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, llvm_ptr_ty, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vldl2dzxnc_vssvl : ClangBuiltin<"__builtin_ve_vl_vldl2dzxnc_vssvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, llvm_ptr_ty, LLVMType<v256f64>, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vst_vssl : ClangBuiltin<"__builtin_ve_vl_vst_vssl">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<i64>, llvm_ptr_ty, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vst_vssml : ClangBuiltin<"__builtin_ve_vl_vst_vssml">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<i64>, llvm_ptr_ty, LLVMType<v256i1>, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vstnc_vssl : ClangBuiltin<"__builtin_ve_vl_vstnc_vssl">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<i64>, llvm_ptr_ty, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vstnc_vssml : ClangBuiltin<"__builtin_ve_vl_vstnc_vssml">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<i64>, llvm_ptr_ty, LLVMType<v256i1>, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vstot_vssl : ClangBuiltin<"__builtin_ve_vl_vstot_vssl">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<i64>, llvm_ptr_ty, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vstot_vssml : ClangBuiltin<"__builtin_ve_vl_vstot_vssml">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<i64>, llvm_ptr_ty, LLVMType<v256i1>, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vstncot_vssl : ClangBuiltin<"__builtin_ve_vl_vstncot_vssl">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<i64>, llvm_ptr_ty, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vstncot_vssml : ClangBuiltin<"__builtin_ve_vl_vstncot_vssml">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<i64>, llvm_ptr_ty, LLVMType<v256i1>, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vstu_vssl : ClangBuiltin<"__builtin_ve_vl_vstu_vssl">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<i64>, llvm_ptr_ty, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vstu_vssml : ClangBuiltin<"__builtin_ve_vl_vstu_vssml">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<i64>, llvm_ptr_ty, LLVMType<v256i1>, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vstunc_vssl : ClangBuiltin<"__builtin_ve_vl_vstunc_vssl">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<i64>, llvm_ptr_ty, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vstunc_vssml : ClangBuiltin<"__builtin_ve_vl_vstunc_vssml">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<i64>, llvm_ptr_ty, LLVMType<v256i1>, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vstuot_vssl : ClangBuiltin<"__builtin_ve_vl_vstuot_vssl">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<i64>, llvm_ptr_ty, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vstuot_vssml : ClangBuiltin<"__builtin_ve_vl_vstuot_vssml">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<i64>, llvm_ptr_ty, LLVMType<v256i1>, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vstuncot_vssl : ClangBuiltin<"__builtin_ve_vl_vstuncot_vssl">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<i64>, llvm_ptr_ty, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vstuncot_vssml : ClangBuiltin<"__builtin_ve_vl_vstuncot_vssml">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<i64>, llvm_ptr_ty, LLVMType<v256i1>, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vstl_vssl : ClangBuiltin<"__builtin_ve_vl_vstl_vssl">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<i64>, llvm_ptr_ty, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vstl_vssml : ClangBuiltin<"__builtin_ve_vl_vstl_vssml">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<i64>, llvm_ptr_ty, LLVMType<v256i1>, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vstlnc_vssl : ClangBuiltin<"__builtin_ve_vl_vstlnc_vssl">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<i64>, llvm_ptr_ty, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vstlnc_vssml : ClangBuiltin<"__builtin_ve_vl_vstlnc_vssml">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<i64>, llvm_ptr_ty, LLVMType<v256i1>, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vstlot_vssl : ClangBuiltin<"__builtin_ve_vl_vstlot_vssl">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<i64>, llvm_ptr_ty, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vstlot_vssml : ClangBuiltin<"__builtin_ve_vl_vstlot_vssml">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<i64>, llvm_ptr_ty, LLVMType<v256i1>, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vstlncot_vssl : ClangBuiltin<"__builtin_ve_vl_vstlncot_vssl">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<i64>, llvm_ptr_ty, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vstlncot_vssml : ClangBuiltin<"__builtin_ve_vl_vstlncot_vssml">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<i64>, llvm_ptr_ty, LLVMType<v256i1>, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vst2d_vssl : ClangBuiltin<"__builtin_ve_vl_vst2d_vssl">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<i64>, llvm_ptr_ty, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vst2d_vssml : ClangBuiltin<"__builtin_ve_vl_vst2d_vssml">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<i64>, llvm_ptr_ty, LLVMType<v256i1>, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vst2dnc_vssl : ClangBuiltin<"__builtin_ve_vl_vst2dnc_vssl">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<i64>, llvm_ptr_ty, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vst2dnc_vssml : ClangBuiltin<"__builtin_ve_vl_vst2dnc_vssml">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<i64>, llvm_ptr_ty, LLVMType<v256i1>, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vst2dot_vssl : ClangBuiltin<"__builtin_ve_vl_vst2dot_vssl">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<i64>, llvm_ptr_ty, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vst2dot_vssml : ClangBuiltin<"__builtin_ve_vl_vst2dot_vssml">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<i64>, llvm_ptr_ty, LLVMType<v256i1>, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vst2dncot_vssl : ClangBuiltin<"__builtin_ve_vl_vst2dncot_vssl">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<i64>, llvm_ptr_ty, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vst2dncot_vssml : ClangBuiltin<"__builtin_ve_vl_vst2dncot_vssml">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<i64>, llvm_ptr_ty, LLVMType<v256i1>, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vstu2d_vssl : ClangBuiltin<"__builtin_ve_vl_vstu2d_vssl">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<i64>, llvm_ptr_ty, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vstu2d_vssml : ClangBuiltin<"__builtin_ve_vl_vstu2d_vssml">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<i64>, llvm_ptr_ty, LLVMType<v256i1>, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vstu2dnc_vssl : ClangBuiltin<"__builtin_ve_vl_vstu2dnc_vssl">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<i64>, llvm_ptr_ty, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vstu2dnc_vssml : ClangBuiltin<"__builtin_ve_vl_vstu2dnc_vssml">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<i64>, llvm_ptr_ty, LLVMType<v256i1>, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vstu2dot_vssl : ClangBuiltin<"__builtin_ve_vl_vstu2dot_vssl">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<i64>, llvm_ptr_ty, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vstu2dot_vssml : ClangBuiltin<"__builtin_ve_vl_vstu2dot_vssml">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<i64>, llvm_ptr_ty, LLVMType<v256i1>, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vstu2dncot_vssl : ClangBuiltin<"__builtin_ve_vl_vstu2dncot_vssl">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<i64>, llvm_ptr_ty, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vstu2dncot_vssml : ClangBuiltin<"__builtin_ve_vl_vstu2dncot_vssml">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<i64>, llvm_ptr_ty, LLVMType<v256i1>, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vstl2d_vssl : ClangBuiltin<"__builtin_ve_vl_vstl2d_vssl">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<i64>, llvm_ptr_ty, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vstl2d_vssml : ClangBuiltin<"__builtin_ve_vl_vstl2d_vssml">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<i64>, llvm_ptr_ty, LLVMType<v256i1>, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vstl2dnc_vssl : ClangBuiltin<"__builtin_ve_vl_vstl2dnc_vssl">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<i64>, llvm_ptr_ty, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vstl2dnc_vssml : ClangBuiltin<"__builtin_ve_vl_vstl2dnc_vssml">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<i64>, llvm_ptr_ty, LLVMType<v256i1>, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vstl2dot_vssl : ClangBuiltin<"__builtin_ve_vl_vstl2dot_vssl">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<i64>, llvm_ptr_ty, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vstl2dot_vssml : ClangBuiltin<"__builtin_ve_vl_vstl2dot_vssml">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<i64>, llvm_ptr_ty, LLVMType<v256i1>, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vstl2dncot_vssl : ClangBuiltin<"__builtin_ve_vl_vstl2dncot_vssl">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<i64>, llvm_ptr_ty, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vstl2dncot_vssml : ClangBuiltin<"__builtin_ve_vl_vstl2dncot_vssml">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<i64>, llvm_ptr_ty, LLVMType<v256i1>, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pfchv_ssl : ClangBuiltin<"__builtin_ve_vl_pfchv_ssl">, Intrinsic<[], [LLVMType<i64>, llvm_ptr_ty, LLVMType<i32>], [IntrInaccessibleMemOrArgMemOnly]>;
let TargetPrefix = "ve" in def int_ve_vl_pfchvnc_ssl : ClangBuiltin<"__builtin_ve_vl_pfchvnc_ssl">, Intrinsic<[], [LLVMType<i64>, llvm_ptr_ty, LLVMType<i32>], [IntrInaccessibleMemOrArgMemOnly]>;
let TargetPrefix = "ve" in def int_ve_vl_lsv_vvss : ClangBuiltin<"__builtin_ve_vl_lsv_vvss">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>, LLVMType<i64>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_lvsl_svs : ClangBuiltin<"__builtin_ve_vl_lvsl_svs">, Intrinsic<[LLVMType<i64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_lvsd_svs : ClangBuiltin<"__builtin_ve_vl_lvsd_svs">, Intrinsic<[LLVMType<f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_lvss_svs : ClangBuiltin<"__builtin_ve_vl_lvss_svs">, Intrinsic<[LLVMType<f32>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_lvm_mmss : ClangBuiltin<"__builtin_ve_vl_lvm_mmss">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256i1>, LLVMType<i64>, LLVMType<i64>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_lvm_MMss : ClangBuiltin<"__builtin_ve_vl_lvm_MMss">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v512i1>, LLVMType<i64>, LLVMType<i64>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_svm_sms : ClangBuiltin<"__builtin_ve_vl_svm_sms">, Intrinsic<[LLVMType<i64>], [LLVMType<v256i1>, LLVMType<i64>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_svm_sMs : ClangBuiltin<"__builtin_ve_vl_svm_sMs">, Intrinsic<[LLVMType<i64>], [LLVMType<v512i1>, LLVMType<i64>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vbrdd_vsl : ClangBuiltin<"__builtin_ve_vl_vbrdd_vsl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vbrdd_vsvl : ClangBuiltin<"__builtin_ve_vl_vbrdd_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vbrdd_vsmvl : ClangBuiltin<"__builtin_ve_vl_vbrdd_vsmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vbrdl_vsl : ClangBuiltin<"__builtin_ve_vl_vbrdl_vsl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vbrdl_vsvl : ClangBuiltin<"__builtin_ve_vl_vbrdl_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vbrdl_vsmvl : ClangBuiltin<"__builtin_ve_vl_vbrdl_vsmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vbrds_vsl : ClangBuiltin<"__builtin_ve_vl_vbrds_vsl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f32>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vbrds_vsvl : ClangBuiltin<"__builtin_ve_vl_vbrds_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f32>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vbrds_vsmvl : ClangBuiltin<"__builtin_ve_vl_vbrds_vsmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f32>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vbrdw_vsl : ClangBuiltin<"__builtin_ve_vl_vbrdw_vsl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vbrdw_vsvl : ClangBuiltin<"__builtin_ve_vl_vbrdw_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vbrdw_vsmvl : ClangBuiltin<"__builtin_ve_vl_vbrdw_vsmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvbrd_vsl : ClangBuiltin<"__builtin_ve_vl_pvbrd_vsl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvbrd_vsvl : ClangBuiltin<"__builtin_ve_vl_pvbrd_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvbrd_vsMvl : ClangBuiltin<"__builtin_ve_vl_pvbrd_vsMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmv_vsvl : ClangBuiltin<"__builtin_ve_vl_vmv_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmv_vsvvl : ClangBuiltin<"__builtin_ve_vl_vmv_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmv_vsvmvl : ClangBuiltin<"__builtin_ve_vl_vmv_vsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vaddul_vvvl : ClangBuiltin<"__builtin_ve_vl_vaddul_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vaddul_vvvvl : ClangBuiltin<"__builtin_ve_vl_vaddul_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vaddul_vsvl : ClangBuiltin<"__builtin_ve_vl_vaddul_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vaddul_vsvvl : ClangBuiltin<"__builtin_ve_vl_vaddul_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vaddul_vvvmvl : ClangBuiltin<"__builtin_ve_vl_vaddul_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vaddul_vsvmvl : ClangBuiltin<"__builtin_ve_vl_vaddul_vsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vadduw_vvvl : ClangBuiltin<"__builtin_ve_vl_vadduw_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vadduw_vvvvl : ClangBuiltin<"__builtin_ve_vl_vadduw_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vadduw_vsvl : ClangBuiltin<"__builtin_ve_vl_vadduw_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vadduw_vsvvl : ClangBuiltin<"__builtin_ve_vl_vadduw_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vadduw_vvvmvl : ClangBuiltin<"__builtin_ve_vl_vadduw_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vadduw_vsvmvl : ClangBuiltin<"__builtin_ve_vl_vadduw_vsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvaddu_vvvl : ClangBuiltin<"__builtin_ve_vl_pvaddu_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvaddu_vvvvl : ClangBuiltin<"__builtin_ve_vl_pvaddu_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvaddu_vsvl : ClangBuiltin<"__builtin_ve_vl_pvaddu_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvaddu_vsvvl : ClangBuiltin<"__builtin_ve_vl_pvaddu_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvaddu_vvvMvl : ClangBuiltin<"__builtin_ve_vl_pvaddu_vvvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvaddu_vsvMvl : ClangBuiltin<"__builtin_ve_vl_pvaddu_vsvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vaddswsx_vvvl : ClangBuiltin<"__builtin_ve_vl_vaddswsx_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vaddswsx_vvvvl : ClangBuiltin<"__builtin_ve_vl_vaddswsx_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vaddswsx_vsvl : ClangBuiltin<"__builtin_ve_vl_vaddswsx_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vaddswsx_vsvvl : ClangBuiltin<"__builtin_ve_vl_vaddswsx_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vaddswsx_vvvmvl : ClangBuiltin<"__builtin_ve_vl_vaddswsx_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vaddswsx_vsvmvl : ClangBuiltin<"__builtin_ve_vl_vaddswsx_vsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vaddswzx_vvvl : ClangBuiltin<"__builtin_ve_vl_vaddswzx_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vaddswzx_vvvvl : ClangBuiltin<"__builtin_ve_vl_vaddswzx_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vaddswzx_vsvl : ClangBuiltin<"__builtin_ve_vl_vaddswzx_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vaddswzx_vsvvl : ClangBuiltin<"__builtin_ve_vl_vaddswzx_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vaddswzx_vvvmvl : ClangBuiltin<"__builtin_ve_vl_vaddswzx_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vaddswzx_vsvmvl : ClangBuiltin<"__builtin_ve_vl_vaddswzx_vsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvadds_vvvl : ClangBuiltin<"__builtin_ve_vl_pvadds_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvadds_vvvvl : ClangBuiltin<"__builtin_ve_vl_pvadds_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvadds_vsvl : ClangBuiltin<"__builtin_ve_vl_pvadds_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvadds_vsvvl : ClangBuiltin<"__builtin_ve_vl_pvadds_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvadds_vvvMvl : ClangBuiltin<"__builtin_ve_vl_pvadds_vvvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvadds_vsvMvl : ClangBuiltin<"__builtin_ve_vl_pvadds_vsvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vaddsl_vvvl : ClangBuiltin<"__builtin_ve_vl_vaddsl_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vaddsl_vvvvl : ClangBuiltin<"__builtin_ve_vl_vaddsl_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vaddsl_vsvl : ClangBuiltin<"__builtin_ve_vl_vaddsl_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vaddsl_vsvvl : ClangBuiltin<"__builtin_ve_vl_vaddsl_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vaddsl_vvvmvl : ClangBuiltin<"__builtin_ve_vl_vaddsl_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vaddsl_vsvmvl : ClangBuiltin<"__builtin_ve_vl_vaddsl_vsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsubul_vvvl : ClangBuiltin<"__builtin_ve_vl_vsubul_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsubul_vvvvl : ClangBuiltin<"__builtin_ve_vl_vsubul_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsubul_vsvl : ClangBuiltin<"__builtin_ve_vl_vsubul_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsubul_vsvvl : ClangBuiltin<"__builtin_ve_vl_vsubul_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsubul_vvvmvl : ClangBuiltin<"__builtin_ve_vl_vsubul_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsubul_vsvmvl : ClangBuiltin<"__builtin_ve_vl_vsubul_vsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsubuw_vvvl : ClangBuiltin<"__builtin_ve_vl_vsubuw_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsubuw_vvvvl : ClangBuiltin<"__builtin_ve_vl_vsubuw_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsubuw_vsvl : ClangBuiltin<"__builtin_ve_vl_vsubuw_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsubuw_vsvvl : ClangBuiltin<"__builtin_ve_vl_vsubuw_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsubuw_vvvmvl : ClangBuiltin<"__builtin_ve_vl_vsubuw_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsubuw_vsvmvl : ClangBuiltin<"__builtin_ve_vl_vsubuw_vsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvsubu_vvvl : ClangBuiltin<"__builtin_ve_vl_pvsubu_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvsubu_vvvvl : ClangBuiltin<"__builtin_ve_vl_pvsubu_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvsubu_vsvl : ClangBuiltin<"__builtin_ve_vl_pvsubu_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvsubu_vsvvl : ClangBuiltin<"__builtin_ve_vl_pvsubu_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvsubu_vvvMvl : ClangBuiltin<"__builtin_ve_vl_pvsubu_vvvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvsubu_vsvMvl : ClangBuiltin<"__builtin_ve_vl_pvsubu_vsvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsubswsx_vvvl : ClangBuiltin<"__builtin_ve_vl_vsubswsx_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsubswsx_vvvvl : ClangBuiltin<"__builtin_ve_vl_vsubswsx_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsubswsx_vsvl : ClangBuiltin<"__builtin_ve_vl_vsubswsx_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsubswsx_vsvvl : ClangBuiltin<"__builtin_ve_vl_vsubswsx_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsubswsx_vvvmvl : ClangBuiltin<"__builtin_ve_vl_vsubswsx_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsubswsx_vsvmvl : ClangBuiltin<"__builtin_ve_vl_vsubswsx_vsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsubswzx_vvvl : ClangBuiltin<"__builtin_ve_vl_vsubswzx_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsubswzx_vvvvl : ClangBuiltin<"__builtin_ve_vl_vsubswzx_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsubswzx_vsvl : ClangBuiltin<"__builtin_ve_vl_vsubswzx_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsubswzx_vsvvl : ClangBuiltin<"__builtin_ve_vl_vsubswzx_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsubswzx_vvvmvl : ClangBuiltin<"__builtin_ve_vl_vsubswzx_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsubswzx_vsvmvl : ClangBuiltin<"__builtin_ve_vl_vsubswzx_vsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvsubs_vvvl : ClangBuiltin<"__builtin_ve_vl_pvsubs_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvsubs_vvvvl : ClangBuiltin<"__builtin_ve_vl_pvsubs_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvsubs_vsvl : ClangBuiltin<"__builtin_ve_vl_pvsubs_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvsubs_vsvvl : ClangBuiltin<"__builtin_ve_vl_pvsubs_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvsubs_vvvMvl : ClangBuiltin<"__builtin_ve_vl_pvsubs_vvvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvsubs_vsvMvl : ClangBuiltin<"__builtin_ve_vl_pvsubs_vsvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsubsl_vvvl : ClangBuiltin<"__builtin_ve_vl_vsubsl_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsubsl_vvvvl : ClangBuiltin<"__builtin_ve_vl_vsubsl_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsubsl_vsvl : ClangBuiltin<"__builtin_ve_vl_vsubsl_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsubsl_vsvvl : ClangBuiltin<"__builtin_ve_vl_vsubsl_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsubsl_vvvmvl : ClangBuiltin<"__builtin_ve_vl_vsubsl_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsubsl_vsvmvl : ClangBuiltin<"__builtin_ve_vl_vsubsl_vsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmulul_vvvl : ClangBuiltin<"__builtin_ve_vl_vmulul_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmulul_vvvvl : ClangBuiltin<"__builtin_ve_vl_vmulul_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmulul_vsvl : ClangBuiltin<"__builtin_ve_vl_vmulul_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmulul_vsvvl : ClangBuiltin<"__builtin_ve_vl_vmulul_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmulul_vvvmvl : ClangBuiltin<"__builtin_ve_vl_vmulul_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmulul_vsvmvl : ClangBuiltin<"__builtin_ve_vl_vmulul_vsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmuluw_vvvl : ClangBuiltin<"__builtin_ve_vl_vmuluw_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmuluw_vvvvl : ClangBuiltin<"__builtin_ve_vl_vmuluw_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmuluw_vsvl : ClangBuiltin<"__builtin_ve_vl_vmuluw_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmuluw_vsvvl : ClangBuiltin<"__builtin_ve_vl_vmuluw_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmuluw_vvvmvl : ClangBuiltin<"__builtin_ve_vl_vmuluw_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmuluw_vsvmvl : ClangBuiltin<"__builtin_ve_vl_vmuluw_vsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmulswsx_vvvl : ClangBuiltin<"__builtin_ve_vl_vmulswsx_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmulswsx_vvvvl : ClangBuiltin<"__builtin_ve_vl_vmulswsx_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmulswsx_vsvl : ClangBuiltin<"__builtin_ve_vl_vmulswsx_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmulswsx_vsvvl : ClangBuiltin<"__builtin_ve_vl_vmulswsx_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmulswsx_vvvmvl : ClangBuiltin<"__builtin_ve_vl_vmulswsx_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmulswsx_vsvmvl : ClangBuiltin<"__builtin_ve_vl_vmulswsx_vsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmulswzx_vvvl : ClangBuiltin<"__builtin_ve_vl_vmulswzx_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmulswzx_vvvvl : ClangBuiltin<"__builtin_ve_vl_vmulswzx_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmulswzx_vsvl : ClangBuiltin<"__builtin_ve_vl_vmulswzx_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmulswzx_vsvvl : ClangBuiltin<"__builtin_ve_vl_vmulswzx_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmulswzx_vvvmvl : ClangBuiltin<"__builtin_ve_vl_vmulswzx_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmulswzx_vsvmvl : ClangBuiltin<"__builtin_ve_vl_vmulswzx_vsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmulsl_vvvl : ClangBuiltin<"__builtin_ve_vl_vmulsl_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmulsl_vvvvl : ClangBuiltin<"__builtin_ve_vl_vmulsl_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmulsl_vsvl : ClangBuiltin<"__builtin_ve_vl_vmulsl_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmulsl_vsvvl : ClangBuiltin<"__builtin_ve_vl_vmulsl_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmulsl_vvvmvl : ClangBuiltin<"__builtin_ve_vl_vmulsl_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmulsl_vsvmvl : ClangBuiltin<"__builtin_ve_vl_vmulsl_vsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmulslw_vvvl : ClangBuiltin<"__builtin_ve_vl_vmulslw_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmulslw_vvvvl : ClangBuiltin<"__builtin_ve_vl_vmulslw_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmulslw_vsvl : ClangBuiltin<"__builtin_ve_vl_vmulslw_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmulslw_vsvvl : ClangBuiltin<"__builtin_ve_vl_vmulslw_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vdivul_vvvl : ClangBuiltin<"__builtin_ve_vl_vdivul_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vdivul_vvvvl : ClangBuiltin<"__builtin_ve_vl_vdivul_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vdivul_vsvl : ClangBuiltin<"__builtin_ve_vl_vdivul_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vdivul_vsvvl : ClangBuiltin<"__builtin_ve_vl_vdivul_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vdivul_vvvmvl : ClangBuiltin<"__builtin_ve_vl_vdivul_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vdivul_vsvmvl : ClangBuiltin<"__builtin_ve_vl_vdivul_vsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vdivuw_vvvl : ClangBuiltin<"__builtin_ve_vl_vdivuw_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vdivuw_vvvvl : ClangBuiltin<"__builtin_ve_vl_vdivuw_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vdivuw_vsvl : ClangBuiltin<"__builtin_ve_vl_vdivuw_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vdivuw_vsvvl : ClangBuiltin<"__builtin_ve_vl_vdivuw_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vdivuw_vvvmvl : ClangBuiltin<"__builtin_ve_vl_vdivuw_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vdivuw_vsvmvl : ClangBuiltin<"__builtin_ve_vl_vdivuw_vsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vdivul_vvsl : ClangBuiltin<"__builtin_ve_vl_vdivul_vvsl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vdivul_vvsvl : ClangBuiltin<"__builtin_ve_vl_vdivul_vvsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vdivul_vvsmvl : ClangBuiltin<"__builtin_ve_vl_vdivul_vvsmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vdivuw_vvsl : ClangBuiltin<"__builtin_ve_vl_vdivuw_vvsl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vdivuw_vvsvl : ClangBuiltin<"__builtin_ve_vl_vdivuw_vvsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vdivuw_vvsmvl : ClangBuiltin<"__builtin_ve_vl_vdivuw_vvsmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vdivswsx_vvvl : ClangBuiltin<"__builtin_ve_vl_vdivswsx_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vdivswsx_vvvvl : ClangBuiltin<"__builtin_ve_vl_vdivswsx_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vdivswsx_vsvl : ClangBuiltin<"__builtin_ve_vl_vdivswsx_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vdivswsx_vsvvl : ClangBuiltin<"__builtin_ve_vl_vdivswsx_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vdivswsx_vvvmvl : ClangBuiltin<"__builtin_ve_vl_vdivswsx_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vdivswsx_vsvmvl : ClangBuiltin<"__builtin_ve_vl_vdivswsx_vsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vdivswzx_vvvl : ClangBuiltin<"__builtin_ve_vl_vdivswzx_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vdivswzx_vvvvl : ClangBuiltin<"__builtin_ve_vl_vdivswzx_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vdivswzx_vsvl : ClangBuiltin<"__builtin_ve_vl_vdivswzx_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vdivswzx_vsvvl : ClangBuiltin<"__builtin_ve_vl_vdivswzx_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vdivswzx_vvvmvl : ClangBuiltin<"__builtin_ve_vl_vdivswzx_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vdivswzx_vsvmvl : ClangBuiltin<"__builtin_ve_vl_vdivswzx_vsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vdivswsx_vvsl : ClangBuiltin<"__builtin_ve_vl_vdivswsx_vvsl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vdivswsx_vvsvl : ClangBuiltin<"__builtin_ve_vl_vdivswsx_vvsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vdivswsx_vvsmvl : ClangBuiltin<"__builtin_ve_vl_vdivswsx_vvsmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vdivswzx_vvsl : ClangBuiltin<"__builtin_ve_vl_vdivswzx_vvsl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vdivswzx_vvsvl : ClangBuiltin<"__builtin_ve_vl_vdivswzx_vvsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vdivswzx_vvsmvl : ClangBuiltin<"__builtin_ve_vl_vdivswzx_vvsmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vdivsl_vvvl : ClangBuiltin<"__builtin_ve_vl_vdivsl_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vdivsl_vvvvl : ClangBuiltin<"__builtin_ve_vl_vdivsl_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vdivsl_vsvl : ClangBuiltin<"__builtin_ve_vl_vdivsl_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vdivsl_vsvvl : ClangBuiltin<"__builtin_ve_vl_vdivsl_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vdivsl_vvvmvl : ClangBuiltin<"__builtin_ve_vl_vdivsl_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vdivsl_vsvmvl : ClangBuiltin<"__builtin_ve_vl_vdivsl_vsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vdivsl_vvsl : ClangBuiltin<"__builtin_ve_vl_vdivsl_vvsl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vdivsl_vvsvl : ClangBuiltin<"__builtin_ve_vl_vdivsl_vvsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vdivsl_vvsmvl : ClangBuiltin<"__builtin_ve_vl_vdivsl_vvsmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcmpul_vvvl : ClangBuiltin<"__builtin_ve_vl_vcmpul_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcmpul_vvvvl : ClangBuiltin<"__builtin_ve_vl_vcmpul_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcmpul_vsvl : ClangBuiltin<"__builtin_ve_vl_vcmpul_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcmpul_vsvvl : ClangBuiltin<"__builtin_ve_vl_vcmpul_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcmpul_vvvmvl : ClangBuiltin<"__builtin_ve_vl_vcmpul_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcmpul_vsvmvl : ClangBuiltin<"__builtin_ve_vl_vcmpul_vsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcmpuw_vvvl : ClangBuiltin<"__builtin_ve_vl_vcmpuw_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcmpuw_vvvvl : ClangBuiltin<"__builtin_ve_vl_vcmpuw_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcmpuw_vsvl : ClangBuiltin<"__builtin_ve_vl_vcmpuw_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcmpuw_vsvvl : ClangBuiltin<"__builtin_ve_vl_vcmpuw_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcmpuw_vvvmvl : ClangBuiltin<"__builtin_ve_vl_vcmpuw_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcmpuw_vsvmvl : ClangBuiltin<"__builtin_ve_vl_vcmpuw_vsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvcmpu_vvvl : ClangBuiltin<"__builtin_ve_vl_pvcmpu_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvcmpu_vvvvl : ClangBuiltin<"__builtin_ve_vl_pvcmpu_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvcmpu_vsvl : ClangBuiltin<"__builtin_ve_vl_pvcmpu_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvcmpu_vsvvl : ClangBuiltin<"__builtin_ve_vl_pvcmpu_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvcmpu_vvvMvl : ClangBuiltin<"__builtin_ve_vl_pvcmpu_vvvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvcmpu_vsvMvl : ClangBuiltin<"__builtin_ve_vl_pvcmpu_vsvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcmpswsx_vvvl : ClangBuiltin<"__builtin_ve_vl_vcmpswsx_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcmpswsx_vvvvl : ClangBuiltin<"__builtin_ve_vl_vcmpswsx_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcmpswsx_vsvl : ClangBuiltin<"__builtin_ve_vl_vcmpswsx_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcmpswsx_vsvvl : ClangBuiltin<"__builtin_ve_vl_vcmpswsx_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcmpswsx_vvvmvl : ClangBuiltin<"__builtin_ve_vl_vcmpswsx_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcmpswsx_vsvmvl : ClangBuiltin<"__builtin_ve_vl_vcmpswsx_vsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcmpswzx_vvvl : ClangBuiltin<"__builtin_ve_vl_vcmpswzx_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcmpswzx_vvvvl : ClangBuiltin<"__builtin_ve_vl_vcmpswzx_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcmpswzx_vsvl : ClangBuiltin<"__builtin_ve_vl_vcmpswzx_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcmpswzx_vsvvl : ClangBuiltin<"__builtin_ve_vl_vcmpswzx_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcmpswzx_vvvmvl : ClangBuiltin<"__builtin_ve_vl_vcmpswzx_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcmpswzx_vsvmvl : ClangBuiltin<"__builtin_ve_vl_vcmpswzx_vsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvcmps_vvvl : ClangBuiltin<"__builtin_ve_vl_pvcmps_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvcmps_vvvvl : ClangBuiltin<"__builtin_ve_vl_pvcmps_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvcmps_vsvl : ClangBuiltin<"__builtin_ve_vl_pvcmps_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvcmps_vsvvl : ClangBuiltin<"__builtin_ve_vl_pvcmps_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvcmps_vvvMvl : ClangBuiltin<"__builtin_ve_vl_pvcmps_vvvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvcmps_vsvMvl : ClangBuiltin<"__builtin_ve_vl_pvcmps_vsvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcmpsl_vvvl : ClangBuiltin<"__builtin_ve_vl_vcmpsl_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcmpsl_vvvvl : ClangBuiltin<"__builtin_ve_vl_vcmpsl_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcmpsl_vsvl : ClangBuiltin<"__builtin_ve_vl_vcmpsl_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcmpsl_vsvvl : ClangBuiltin<"__builtin_ve_vl_vcmpsl_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcmpsl_vvvmvl : ClangBuiltin<"__builtin_ve_vl_vcmpsl_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcmpsl_vsvmvl : ClangBuiltin<"__builtin_ve_vl_vcmpsl_vsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmaxswsx_vvvl : ClangBuiltin<"__builtin_ve_vl_vmaxswsx_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmaxswsx_vvvvl : ClangBuiltin<"__builtin_ve_vl_vmaxswsx_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmaxswsx_vsvl : ClangBuiltin<"__builtin_ve_vl_vmaxswsx_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmaxswsx_vsvvl : ClangBuiltin<"__builtin_ve_vl_vmaxswsx_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmaxswsx_vvvmvl : ClangBuiltin<"__builtin_ve_vl_vmaxswsx_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmaxswsx_vsvmvl : ClangBuiltin<"__builtin_ve_vl_vmaxswsx_vsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmaxswzx_vvvl : ClangBuiltin<"__builtin_ve_vl_vmaxswzx_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmaxswzx_vvvvl : ClangBuiltin<"__builtin_ve_vl_vmaxswzx_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmaxswzx_vsvl : ClangBuiltin<"__builtin_ve_vl_vmaxswzx_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmaxswzx_vsvvl : ClangBuiltin<"__builtin_ve_vl_vmaxswzx_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmaxswzx_vvvmvl : ClangBuiltin<"__builtin_ve_vl_vmaxswzx_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmaxswzx_vsvmvl : ClangBuiltin<"__builtin_ve_vl_vmaxswzx_vsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvmaxs_vvvl : ClangBuiltin<"__builtin_ve_vl_pvmaxs_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvmaxs_vvvvl : ClangBuiltin<"__builtin_ve_vl_pvmaxs_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvmaxs_vsvl : ClangBuiltin<"__builtin_ve_vl_pvmaxs_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvmaxs_vsvvl : ClangBuiltin<"__builtin_ve_vl_pvmaxs_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvmaxs_vvvMvl : ClangBuiltin<"__builtin_ve_vl_pvmaxs_vvvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvmaxs_vsvMvl : ClangBuiltin<"__builtin_ve_vl_pvmaxs_vsvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vminswsx_vvvl : ClangBuiltin<"__builtin_ve_vl_vminswsx_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vminswsx_vvvvl : ClangBuiltin<"__builtin_ve_vl_vminswsx_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vminswsx_vsvl : ClangBuiltin<"__builtin_ve_vl_vminswsx_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vminswsx_vsvvl : ClangBuiltin<"__builtin_ve_vl_vminswsx_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vminswsx_vvvmvl : ClangBuiltin<"__builtin_ve_vl_vminswsx_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vminswsx_vsvmvl : ClangBuiltin<"__builtin_ve_vl_vminswsx_vsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vminswzx_vvvl : ClangBuiltin<"__builtin_ve_vl_vminswzx_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vminswzx_vvvvl : ClangBuiltin<"__builtin_ve_vl_vminswzx_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vminswzx_vsvl : ClangBuiltin<"__builtin_ve_vl_vminswzx_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vminswzx_vsvvl : ClangBuiltin<"__builtin_ve_vl_vminswzx_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vminswzx_vvvmvl : ClangBuiltin<"__builtin_ve_vl_vminswzx_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vminswzx_vsvmvl : ClangBuiltin<"__builtin_ve_vl_vminswzx_vsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvmins_vvvl : ClangBuiltin<"__builtin_ve_vl_pvmins_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvmins_vvvvl : ClangBuiltin<"__builtin_ve_vl_pvmins_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvmins_vsvl : ClangBuiltin<"__builtin_ve_vl_pvmins_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvmins_vsvvl : ClangBuiltin<"__builtin_ve_vl_pvmins_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvmins_vvvMvl : ClangBuiltin<"__builtin_ve_vl_pvmins_vvvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvmins_vsvMvl : ClangBuiltin<"__builtin_ve_vl_pvmins_vsvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmaxsl_vvvl : ClangBuiltin<"__builtin_ve_vl_vmaxsl_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmaxsl_vvvvl : ClangBuiltin<"__builtin_ve_vl_vmaxsl_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmaxsl_vsvl : ClangBuiltin<"__builtin_ve_vl_vmaxsl_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmaxsl_vsvvl : ClangBuiltin<"__builtin_ve_vl_vmaxsl_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmaxsl_vvvmvl : ClangBuiltin<"__builtin_ve_vl_vmaxsl_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmaxsl_vsvmvl : ClangBuiltin<"__builtin_ve_vl_vmaxsl_vsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vminsl_vvvl : ClangBuiltin<"__builtin_ve_vl_vminsl_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vminsl_vvvvl : ClangBuiltin<"__builtin_ve_vl_vminsl_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vminsl_vsvl : ClangBuiltin<"__builtin_ve_vl_vminsl_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vminsl_vsvvl : ClangBuiltin<"__builtin_ve_vl_vminsl_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vminsl_vvvmvl : ClangBuiltin<"__builtin_ve_vl_vminsl_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vminsl_vsvmvl : ClangBuiltin<"__builtin_ve_vl_vminsl_vsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vand_vvvl : ClangBuiltin<"__builtin_ve_vl_vand_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vand_vvvvl : ClangBuiltin<"__builtin_ve_vl_vand_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vand_vsvl : ClangBuiltin<"__builtin_ve_vl_vand_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vand_vsvvl : ClangBuiltin<"__builtin_ve_vl_vand_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vand_vvvmvl : ClangBuiltin<"__builtin_ve_vl_vand_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vand_vsvmvl : ClangBuiltin<"__builtin_ve_vl_vand_vsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvand_vvvl : ClangBuiltin<"__builtin_ve_vl_pvand_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvand_vvvvl : ClangBuiltin<"__builtin_ve_vl_pvand_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvand_vsvl : ClangBuiltin<"__builtin_ve_vl_pvand_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvand_vsvvl : ClangBuiltin<"__builtin_ve_vl_pvand_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvand_vvvMvl : ClangBuiltin<"__builtin_ve_vl_pvand_vvvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvand_vsvMvl : ClangBuiltin<"__builtin_ve_vl_pvand_vsvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vor_vvvl : ClangBuiltin<"__builtin_ve_vl_vor_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vor_vvvvl : ClangBuiltin<"__builtin_ve_vl_vor_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vor_vsvl : ClangBuiltin<"__builtin_ve_vl_vor_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vor_vsvvl : ClangBuiltin<"__builtin_ve_vl_vor_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vor_vvvmvl : ClangBuiltin<"__builtin_ve_vl_vor_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vor_vsvmvl : ClangBuiltin<"__builtin_ve_vl_vor_vsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvor_vvvl : ClangBuiltin<"__builtin_ve_vl_pvor_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvor_vvvvl : ClangBuiltin<"__builtin_ve_vl_pvor_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvor_vsvl : ClangBuiltin<"__builtin_ve_vl_pvor_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvor_vsvvl : ClangBuiltin<"__builtin_ve_vl_pvor_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvor_vvvMvl : ClangBuiltin<"__builtin_ve_vl_pvor_vvvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvor_vsvMvl : ClangBuiltin<"__builtin_ve_vl_pvor_vsvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vxor_vvvl : ClangBuiltin<"__builtin_ve_vl_vxor_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vxor_vvvvl : ClangBuiltin<"__builtin_ve_vl_vxor_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vxor_vsvl : ClangBuiltin<"__builtin_ve_vl_vxor_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vxor_vsvvl : ClangBuiltin<"__builtin_ve_vl_vxor_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vxor_vvvmvl : ClangBuiltin<"__builtin_ve_vl_vxor_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vxor_vsvmvl : ClangBuiltin<"__builtin_ve_vl_vxor_vsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvxor_vvvl : ClangBuiltin<"__builtin_ve_vl_pvxor_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvxor_vvvvl : ClangBuiltin<"__builtin_ve_vl_pvxor_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvxor_vsvl : ClangBuiltin<"__builtin_ve_vl_pvxor_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvxor_vsvvl : ClangBuiltin<"__builtin_ve_vl_pvxor_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvxor_vvvMvl : ClangBuiltin<"__builtin_ve_vl_pvxor_vvvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvxor_vsvMvl : ClangBuiltin<"__builtin_ve_vl_pvxor_vsvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_veqv_vvvl : ClangBuiltin<"__builtin_ve_vl_veqv_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_veqv_vvvvl : ClangBuiltin<"__builtin_ve_vl_veqv_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_veqv_vsvl : ClangBuiltin<"__builtin_ve_vl_veqv_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_veqv_vsvvl : ClangBuiltin<"__builtin_ve_vl_veqv_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_veqv_vvvmvl : ClangBuiltin<"__builtin_ve_vl_veqv_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_veqv_vsvmvl : ClangBuiltin<"__builtin_ve_vl_veqv_vsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pveqv_vvvl : ClangBuiltin<"__builtin_ve_vl_pveqv_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pveqv_vvvvl : ClangBuiltin<"__builtin_ve_vl_pveqv_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pveqv_vsvl : ClangBuiltin<"__builtin_ve_vl_pveqv_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pveqv_vsvvl : ClangBuiltin<"__builtin_ve_vl_pveqv_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pveqv_vvvMvl : ClangBuiltin<"__builtin_ve_vl_pveqv_vvvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pveqv_vsvMvl : ClangBuiltin<"__builtin_ve_vl_pveqv_vsvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vldz_vvl : ClangBuiltin<"__builtin_ve_vl_vldz_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vldz_vvvl : ClangBuiltin<"__builtin_ve_vl_vldz_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vldz_vvmvl : ClangBuiltin<"__builtin_ve_vl_vldz_vvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvldzlo_vvl : ClangBuiltin<"__builtin_ve_vl_pvldzlo_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvldzlo_vvvl : ClangBuiltin<"__builtin_ve_vl_pvldzlo_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvldzlo_vvmvl : ClangBuiltin<"__builtin_ve_vl_pvldzlo_vvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvldzup_vvl : ClangBuiltin<"__builtin_ve_vl_pvldzup_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvldzup_vvvl : ClangBuiltin<"__builtin_ve_vl_pvldzup_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvldzup_vvmvl : ClangBuiltin<"__builtin_ve_vl_pvldzup_vvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvldz_vvl : ClangBuiltin<"__builtin_ve_vl_pvldz_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvldz_vvvl : ClangBuiltin<"__builtin_ve_vl_pvldz_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvldz_vvMvl : ClangBuiltin<"__builtin_ve_vl_pvldz_vvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vpcnt_vvl : ClangBuiltin<"__builtin_ve_vl_vpcnt_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vpcnt_vvvl : ClangBuiltin<"__builtin_ve_vl_vpcnt_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vpcnt_vvmvl : ClangBuiltin<"__builtin_ve_vl_vpcnt_vvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvpcntlo_vvl : ClangBuiltin<"__builtin_ve_vl_pvpcntlo_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvpcntlo_vvvl : ClangBuiltin<"__builtin_ve_vl_pvpcntlo_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvpcntlo_vvmvl : ClangBuiltin<"__builtin_ve_vl_pvpcntlo_vvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvpcntup_vvl : ClangBuiltin<"__builtin_ve_vl_pvpcntup_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvpcntup_vvvl : ClangBuiltin<"__builtin_ve_vl_pvpcntup_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvpcntup_vvmvl : ClangBuiltin<"__builtin_ve_vl_pvpcntup_vvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvpcnt_vvl : ClangBuiltin<"__builtin_ve_vl_pvpcnt_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvpcnt_vvvl : ClangBuiltin<"__builtin_ve_vl_pvpcnt_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvpcnt_vvMvl : ClangBuiltin<"__builtin_ve_vl_pvpcnt_vvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vbrv_vvl : ClangBuiltin<"__builtin_ve_vl_vbrv_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vbrv_vvvl : ClangBuiltin<"__builtin_ve_vl_vbrv_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vbrv_vvmvl : ClangBuiltin<"__builtin_ve_vl_vbrv_vvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvbrvlo_vvl : ClangBuiltin<"__builtin_ve_vl_pvbrvlo_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvbrvlo_vvvl : ClangBuiltin<"__builtin_ve_vl_pvbrvlo_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvbrvlo_vvmvl : ClangBuiltin<"__builtin_ve_vl_pvbrvlo_vvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvbrvup_vvl : ClangBuiltin<"__builtin_ve_vl_pvbrvup_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvbrvup_vvvl : ClangBuiltin<"__builtin_ve_vl_pvbrvup_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvbrvup_vvmvl : ClangBuiltin<"__builtin_ve_vl_pvbrvup_vvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvbrv_vvl : ClangBuiltin<"__builtin_ve_vl_pvbrv_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvbrv_vvvl : ClangBuiltin<"__builtin_ve_vl_pvbrv_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvbrv_vvMvl : ClangBuiltin<"__builtin_ve_vl_pvbrv_vvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vseq_vl : ClangBuiltin<"__builtin_ve_vl_vseq_vl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vseq_vvl : ClangBuiltin<"__builtin_ve_vl_vseq_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvseqlo_vl : ClangBuiltin<"__builtin_ve_vl_pvseqlo_vl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvseqlo_vvl : ClangBuiltin<"__builtin_ve_vl_pvseqlo_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvsequp_vl : ClangBuiltin<"__builtin_ve_vl_pvsequp_vl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvsequp_vvl : ClangBuiltin<"__builtin_ve_vl_pvsequp_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvseq_vl : ClangBuiltin<"__builtin_ve_vl_pvseq_vl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvseq_vvl : ClangBuiltin<"__builtin_ve_vl_pvseq_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsll_vvvl : ClangBuiltin<"__builtin_ve_vl_vsll_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsll_vvvvl : ClangBuiltin<"__builtin_ve_vl_vsll_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsll_vvsl : ClangBuiltin<"__builtin_ve_vl_vsll_vvsl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsll_vvsvl : ClangBuiltin<"__builtin_ve_vl_vsll_vvsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsll_vvvmvl : ClangBuiltin<"__builtin_ve_vl_vsll_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsll_vvsmvl : ClangBuiltin<"__builtin_ve_vl_vsll_vvsmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvsll_vvvl : ClangBuiltin<"__builtin_ve_vl_pvsll_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvsll_vvvvl : ClangBuiltin<"__builtin_ve_vl_pvsll_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvsll_vvsl : ClangBuiltin<"__builtin_ve_vl_pvsll_vvsl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvsll_vvsvl : ClangBuiltin<"__builtin_ve_vl_pvsll_vvsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvsll_vvvMvl : ClangBuiltin<"__builtin_ve_vl_pvsll_vvvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvsll_vvsMvl : ClangBuiltin<"__builtin_ve_vl_pvsll_vvsMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsrl_vvvl : ClangBuiltin<"__builtin_ve_vl_vsrl_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsrl_vvvvl : ClangBuiltin<"__builtin_ve_vl_vsrl_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsrl_vvsl : ClangBuiltin<"__builtin_ve_vl_vsrl_vvsl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsrl_vvsvl : ClangBuiltin<"__builtin_ve_vl_vsrl_vvsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsrl_vvvmvl : ClangBuiltin<"__builtin_ve_vl_vsrl_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsrl_vvsmvl : ClangBuiltin<"__builtin_ve_vl_vsrl_vvsmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvsrl_vvvl : ClangBuiltin<"__builtin_ve_vl_pvsrl_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvsrl_vvvvl : ClangBuiltin<"__builtin_ve_vl_pvsrl_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvsrl_vvsl : ClangBuiltin<"__builtin_ve_vl_pvsrl_vvsl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvsrl_vvsvl : ClangBuiltin<"__builtin_ve_vl_pvsrl_vvsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvsrl_vvvMvl : ClangBuiltin<"__builtin_ve_vl_pvsrl_vvvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvsrl_vvsMvl : ClangBuiltin<"__builtin_ve_vl_pvsrl_vvsMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vslawsx_vvvl : ClangBuiltin<"__builtin_ve_vl_vslawsx_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vslawsx_vvvvl : ClangBuiltin<"__builtin_ve_vl_vslawsx_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vslawsx_vvsl : ClangBuiltin<"__builtin_ve_vl_vslawsx_vvsl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vslawsx_vvsvl : ClangBuiltin<"__builtin_ve_vl_vslawsx_vvsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vslawsx_vvvmvl : ClangBuiltin<"__builtin_ve_vl_vslawsx_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vslawsx_vvsmvl : ClangBuiltin<"__builtin_ve_vl_vslawsx_vvsmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vslawzx_vvvl : ClangBuiltin<"__builtin_ve_vl_vslawzx_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vslawzx_vvvvl : ClangBuiltin<"__builtin_ve_vl_vslawzx_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vslawzx_vvsl : ClangBuiltin<"__builtin_ve_vl_vslawzx_vvsl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vslawzx_vvsvl : ClangBuiltin<"__builtin_ve_vl_vslawzx_vvsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vslawzx_vvvmvl : ClangBuiltin<"__builtin_ve_vl_vslawzx_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vslawzx_vvsmvl : ClangBuiltin<"__builtin_ve_vl_vslawzx_vvsmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvsla_vvvl : ClangBuiltin<"__builtin_ve_vl_pvsla_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvsla_vvvvl : ClangBuiltin<"__builtin_ve_vl_pvsla_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvsla_vvsl : ClangBuiltin<"__builtin_ve_vl_pvsla_vvsl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvsla_vvsvl : ClangBuiltin<"__builtin_ve_vl_pvsla_vvsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvsla_vvvMvl : ClangBuiltin<"__builtin_ve_vl_pvsla_vvvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvsla_vvsMvl : ClangBuiltin<"__builtin_ve_vl_pvsla_vvsMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vslal_vvvl : ClangBuiltin<"__builtin_ve_vl_vslal_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vslal_vvvvl : ClangBuiltin<"__builtin_ve_vl_vslal_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vslal_vvsl : ClangBuiltin<"__builtin_ve_vl_vslal_vvsl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vslal_vvsvl : ClangBuiltin<"__builtin_ve_vl_vslal_vvsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vslal_vvvmvl : ClangBuiltin<"__builtin_ve_vl_vslal_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vslal_vvsmvl : ClangBuiltin<"__builtin_ve_vl_vslal_vvsmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsrawsx_vvvl : ClangBuiltin<"__builtin_ve_vl_vsrawsx_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsrawsx_vvvvl : ClangBuiltin<"__builtin_ve_vl_vsrawsx_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsrawsx_vvsl : ClangBuiltin<"__builtin_ve_vl_vsrawsx_vvsl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsrawsx_vvsvl : ClangBuiltin<"__builtin_ve_vl_vsrawsx_vvsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsrawsx_vvvmvl : ClangBuiltin<"__builtin_ve_vl_vsrawsx_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsrawsx_vvsmvl : ClangBuiltin<"__builtin_ve_vl_vsrawsx_vvsmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsrawzx_vvvl : ClangBuiltin<"__builtin_ve_vl_vsrawzx_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsrawzx_vvvvl : ClangBuiltin<"__builtin_ve_vl_vsrawzx_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsrawzx_vvsl : ClangBuiltin<"__builtin_ve_vl_vsrawzx_vvsl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsrawzx_vvsvl : ClangBuiltin<"__builtin_ve_vl_vsrawzx_vvsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsrawzx_vvvmvl : ClangBuiltin<"__builtin_ve_vl_vsrawzx_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsrawzx_vvsmvl : ClangBuiltin<"__builtin_ve_vl_vsrawzx_vvsmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvsra_vvvl : ClangBuiltin<"__builtin_ve_vl_pvsra_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvsra_vvvvl : ClangBuiltin<"__builtin_ve_vl_pvsra_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvsra_vvsl : ClangBuiltin<"__builtin_ve_vl_pvsra_vvsl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvsra_vvsvl : ClangBuiltin<"__builtin_ve_vl_pvsra_vvsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvsra_vvvMvl : ClangBuiltin<"__builtin_ve_vl_pvsra_vvvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvsra_vvsMvl : ClangBuiltin<"__builtin_ve_vl_pvsra_vvsMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsral_vvvl : ClangBuiltin<"__builtin_ve_vl_vsral_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsral_vvvvl : ClangBuiltin<"__builtin_ve_vl_vsral_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsral_vvsl : ClangBuiltin<"__builtin_ve_vl_vsral_vvsl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsral_vvsvl : ClangBuiltin<"__builtin_ve_vl_vsral_vvsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsral_vvvmvl : ClangBuiltin<"__builtin_ve_vl_vsral_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsral_vvsmvl : ClangBuiltin<"__builtin_ve_vl_vsral_vvsmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsfa_vvssl : ClangBuiltin<"__builtin_ve_vl_vsfa_vvssl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsfa_vvssvl : ClangBuiltin<"__builtin_ve_vl_vsfa_vvssvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsfa_vvssmvl : ClangBuiltin<"__builtin_ve_vl_vsfa_vvssmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfaddd_vvvl : ClangBuiltin<"__builtin_ve_vl_vfaddd_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfaddd_vvvvl : ClangBuiltin<"__builtin_ve_vl_vfaddd_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfaddd_vsvl : ClangBuiltin<"__builtin_ve_vl_vfaddd_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfaddd_vsvvl : ClangBuiltin<"__builtin_ve_vl_vfaddd_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfaddd_vvvmvl : ClangBuiltin<"__builtin_ve_vl_vfaddd_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfaddd_vsvmvl : ClangBuiltin<"__builtin_ve_vl_vfaddd_vsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfadds_vvvl : ClangBuiltin<"__builtin_ve_vl_vfadds_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfadds_vvvvl : ClangBuiltin<"__builtin_ve_vl_vfadds_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfadds_vsvl : ClangBuiltin<"__builtin_ve_vl_vfadds_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f32>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfadds_vsvvl : ClangBuiltin<"__builtin_ve_vl_vfadds_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f32>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfadds_vvvmvl : ClangBuiltin<"__builtin_ve_vl_vfadds_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfadds_vsvmvl : ClangBuiltin<"__builtin_ve_vl_vfadds_vsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f32>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfadd_vvvl : ClangBuiltin<"__builtin_ve_vl_pvfadd_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfadd_vvvvl : ClangBuiltin<"__builtin_ve_vl_pvfadd_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfadd_vsvl : ClangBuiltin<"__builtin_ve_vl_pvfadd_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfadd_vsvvl : ClangBuiltin<"__builtin_ve_vl_pvfadd_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfadd_vvvMvl : ClangBuiltin<"__builtin_ve_vl_pvfadd_vvvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfadd_vsvMvl : ClangBuiltin<"__builtin_ve_vl_pvfadd_vsvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfsubd_vvvl : ClangBuiltin<"__builtin_ve_vl_vfsubd_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfsubd_vvvvl : ClangBuiltin<"__builtin_ve_vl_vfsubd_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfsubd_vsvl : ClangBuiltin<"__builtin_ve_vl_vfsubd_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfsubd_vsvvl : ClangBuiltin<"__builtin_ve_vl_vfsubd_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfsubd_vvvmvl : ClangBuiltin<"__builtin_ve_vl_vfsubd_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfsubd_vsvmvl : ClangBuiltin<"__builtin_ve_vl_vfsubd_vsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfsubs_vvvl : ClangBuiltin<"__builtin_ve_vl_vfsubs_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfsubs_vvvvl : ClangBuiltin<"__builtin_ve_vl_vfsubs_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfsubs_vsvl : ClangBuiltin<"__builtin_ve_vl_vfsubs_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f32>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfsubs_vsvvl : ClangBuiltin<"__builtin_ve_vl_vfsubs_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f32>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfsubs_vvvmvl : ClangBuiltin<"__builtin_ve_vl_vfsubs_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfsubs_vsvmvl : ClangBuiltin<"__builtin_ve_vl_vfsubs_vsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f32>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfsub_vvvl : ClangBuiltin<"__builtin_ve_vl_pvfsub_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfsub_vvvvl : ClangBuiltin<"__builtin_ve_vl_pvfsub_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfsub_vsvl : ClangBuiltin<"__builtin_ve_vl_pvfsub_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfsub_vsvvl : ClangBuiltin<"__builtin_ve_vl_pvfsub_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfsub_vvvMvl : ClangBuiltin<"__builtin_ve_vl_pvfsub_vvvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfsub_vsvMvl : ClangBuiltin<"__builtin_ve_vl_pvfsub_vsvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmuld_vvvl : ClangBuiltin<"__builtin_ve_vl_vfmuld_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmuld_vvvvl : ClangBuiltin<"__builtin_ve_vl_vfmuld_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmuld_vsvl : ClangBuiltin<"__builtin_ve_vl_vfmuld_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmuld_vsvvl : ClangBuiltin<"__builtin_ve_vl_vfmuld_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmuld_vvvmvl : ClangBuiltin<"__builtin_ve_vl_vfmuld_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmuld_vsvmvl : ClangBuiltin<"__builtin_ve_vl_vfmuld_vsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmuls_vvvl : ClangBuiltin<"__builtin_ve_vl_vfmuls_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmuls_vvvvl : ClangBuiltin<"__builtin_ve_vl_vfmuls_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmuls_vsvl : ClangBuiltin<"__builtin_ve_vl_vfmuls_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f32>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmuls_vsvvl : ClangBuiltin<"__builtin_ve_vl_vfmuls_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f32>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmuls_vvvmvl : ClangBuiltin<"__builtin_ve_vl_vfmuls_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmuls_vsvmvl : ClangBuiltin<"__builtin_ve_vl_vfmuls_vsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f32>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmul_vvvl : ClangBuiltin<"__builtin_ve_vl_pvfmul_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmul_vvvvl : ClangBuiltin<"__builtin_ve_vl_pvfmul_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmul_vsvl : ClangBuiltin<"__builtin_ve_vl_pvfmul_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmul_vsvvl : ClangBuiltin<"__builtin_ve_vl_pvfmul_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmul_vvvMvl : ClangBuiltin<"__builtin_ve_vl_pvfmul_vvvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmul_vsvMvl : ClangBuiltin<"__builtin_ve_vl_pvfmul_vsvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfdivd_vvvl : ClangBuiltin<"__builtin_ve_vl_vfdivd_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfdivd_vvvvl : ClangBuiltin<"__builtin_ve_vl_vfdivd_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfdivd_vsvl : ClangBuiltin<"__builtin_ve_vl_vfdivd_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfdivd_vsvvl : ClangBuiltin<"__builtin_ve_vl_vfdivd_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfdivd_vvvmvl : ClangBuiltin<"__builtin_ve_vl_vfdivd_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfdivd_vsvmvl : ClangBuiltin<"__builtin_ve_vl_vfdivd_vsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfdivs_vvvl : ClangBuiltin<"__builtin_ve_vl_vfdivs_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfdivs_vvvvl : ClangBuiltin<"__builtin_ve_vl_vfdivs_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfdivs_vsvl : ClangBuiltin<"__builtin_ve_vl_vfdivs_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f32>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfdivs_vsvvl : ClangBuiltin<"__builtin_ve_vl_vfdivs_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f32>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfdivs_vvvmvl : ClangBuiltin<"__builtin_ve_vl_vfdivs_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfdivs_vsvmvl : ClangBuiltin<"__builtin_ve_vl_vfdivs_vsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f32>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfsqrtd_vvl : ClangBuiltin<"__builtin_ve_vl_vfsqrtd_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfsqrtd_vvvl : ClangBuiltin<"__builtin_ve_vl_vfsqrtd_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfsqrts_vvl : ClangBuiltin<"__builtin_ve_vl_vfsqrts_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfsqrts_vvvl : ClangBuiltin<"__builtin_ve_vl_vfsqrts_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfcmpd_vvvl : ClangBuiltin<"__builtin_ve_vl_vfcmpd_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfcmpd_vvvvl : ClangBuiltin<"__builtin_ve_vl_vfcmpd_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfcmpd_vsvl : ClangBuiltin<"__builtin_ve_vl_vfcmpd_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfcmpd_vsvvl : ClangBuiltin<"__builtin_ve_vl_vfcmpd_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfcmpd_vvvmvl : ClangBuiltin<"__builtin_ve_vl_vfcmpd_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfcmpd_vsvmvl : ClangBuiltin<"__builtin_ve_vl_vfcmpd_vsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfcmps_vvvl : ClangBuiltin<"__builtin_ve_vl_vfcmps_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfcmps_vvvvl : ClangBuiltin<"__builtin_ve_vl_vfcmps_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfcmps_vsvl : ClangBuiltin<"__builtin_ve_vl_vfcmps_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f32>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfcmps_vsvvl : ClangBuiltin<"__builtin_ve_vl_vfcmps_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f32>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfcmps_vvvmvl : ClangBuiltin<"__builtin_ve_vl_vfcmps_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfcmps_vsvmvl : ClangBuiltin<"__builtin_ve_vl_vfcmps_vsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f32>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfcmp_vvvl : ClangBuiltin<"__builtin_ve_vl_pvfcmp_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfcmp_vvvvl : ClangBuiltin<"__builtin_ve_vl_pvfcmp_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfcmp_vsvl : ClangBuiltin<"__builtin_ve_vl_pvfcmp_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfcmp_vsvvl : ClangBuiltin<"__builtin_ve_vl_pvfcmp_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfcmp_vvvMvl : ClangBuiltin<"__builtin_ve_vl_pvfcmp_vvvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfcmp_vsvMvl : ClangBuiltin<"__builtin_ve_vl_pvfcmp_vsvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmaxd_vvvl : ClangBuiltin<"__builtin_ve_vl_vfmaxd_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmaxd_vvvvl : ClangBuiltin<"__builtin_ve_vl_vfmaxd_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmaxd_vsvl : ClangBuiltin<"__builtin_ve_vl_vfmaxd_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmaxd_vsvvl : ClangBuiltin<"__builtin_ve_vl_vfmaxd_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmaxd_vvvmvl : ClangBuiltin<"__builtin_ve_vl_vfmaxd_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmaxd_vsvmvl : ClangBuiltin<"__builtin_ve_vl_vfmaxd_vsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmaxs_vvvl : ClangBuiltin<"__builtin_ve_vl_vfmaxs_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmaxs_vvvvl : ClangBuiltin<"__builtin_ve_vl_vfmaxs_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmaxs_vsvl : ClangBuiltin<"__builtin_ve_vl_vfmaxs_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f32>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmaxs_vsvvl : ClangBuiltin<"__builtin_ve_vl_vfmaxs_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f32>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmaxs_vvvmvl : ClangBuiltin<"__builtin_ve_vl_vfmaxs_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmaxs_vsvmvl : ClangBuiltin<"__builtin_ve_vl_vfmaxs_vsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f32>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmax_vvvl : ClangBuiltin<"__builtin_ve_vl_pvfmax_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmax_vvvvl : ClangBuiltin<"__builtin_ve_vl_pvfmax_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmax_vsvl : ClangBuiltin<"__builtin_ve_vl_pvfmax_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmax_vsvvl : ClangBuiltin<"__builtin_ve_vl_pvfmax_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmax_vvvMvl : ClangBuiltin<"__builtin_ve_vl_pvfmax_vvvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmax_vsvMvl : ClangBuiltin<"__builtin_ve_vl_pvfmax_vsvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmind_vvvl : ClangBuiltin<"__builtin_ve_vl_vfmind_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmind_vvvvl : ClangBuiltin<"__builtin_ve_vl_vfmind_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmind_vsvl : ClangBuiltin<"__builtin_ve_vl_vfmind_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmind_vsvvl : ClangBuiltin<"__builtin_ve_vl_vfmind_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmind_vvvmvl : ClangBuiltin<"__builtin_ve_vl_vfmind_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmind_vsvmvl : ClangBuiltin<"__builtin_ve_vl_vfmind_vsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmins_vvvl : ClangBuiltin<"__builtin_ve_vl_vfmins_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmins_vvvvl : ClangBuiltin<"__builtin_ve_vl_vfmins_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmins_vsvl : ClangBuiltin<"__builtin_ve_vl_vfmins_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f32>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmins_vsvvl : ClangBuiltin<"__builtin_ve_vl_vfmins_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f32>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmins_vvvmvl : ClangBuiltin<"__builtin_ve_vl_vfmins_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmins_vsvmvl : ClangBuiltin<"__builtin_ve_vl_vfmins_vsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f32>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmin_vvvl : ClangBuiltin<"__builtin_ve_vl_pvfmin_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmin_vvvvl : ClangBuiltin<"__builtin_ve_vl_pvfmin_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmin_vsvl : ClangBuiltin<"__builtin_ve_vl_pvfmin_vsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmin_vsvvl : ClangBuiltin<"__builtin_ve_vl_pvfmin_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmin_vvvMvl : ClangBuiltin<"__builtin_ve_vl_pvfmin_vvvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmin_vsvMvl : ClangBuiltin<"__builtin_ve_vl_pvfmin_vsvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmadd_vvvvl : ClangBuiltin<"__builtin_ve_vl_vfmadd_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmadd_vvvvvl : ClangBuiltin<"__builtin_ve_vl_vfmadd_vvvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmadd_vsvvl : ClangBuiltin<"__builtin_ve_vl_vfmadd_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmadd_vsvvvl : ClangBuiltin<"__builtin_ve_vl_vfmadd_vsvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmadd_vvsvl : ClangBuiltin<"__builtin_ve_vl_vfmadd_vvsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmadd_vvsvvl : ClangBuiltin<"__builtin_ve_vl_vfmadd_vvsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmadd_vvvvmvl : ClangBuiltin<"__builtin_ve_vl_vfmadd_vvvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmadd_vsvvmvl : ClangBuiltin<"__builtin_ve_vl_vfmadd_vsvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmadd_vvsvmvl : ClangBuiltin<"__builtin_ve_vl_vfmadd_vvsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmads_vvvvl : ClangBuiltin<"__builtin_ve_vl_vfmads_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmads_vvvvvl : ClangBuiltin<"__builtin_ve_vl_vfmads_vvvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmads_vsvvl : ClangBuiltin<"__builtin_ve_vl_vfmads_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f32>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmads_vsvvvl : ClangBuiltin<"__builtin_ve_vl_vfmads_vsvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f32>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmads_vvsvl : ClangBuiltin<"__builtin_ve_vl_vfmads_vvsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<f32>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmads_vvsvvl : ClangBuiltin<"__builtin_ve_vl_vfmads_vvsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<f32>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmads_vvvvmvl : ClangBuiltin<"__builtin_ve_vl_vfmads_vvvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmads_vsvvmvl : ClangBuiltin<"__builtin_ve_vl_vfmads_vsvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f32>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmads_vvsvmvl : ClangBuiltin<"__builtin_ve_vl_vfmads_vvsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<f32>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmad_vvvvl : ClangBuiltin<"__builtin_ve_vl_pvfmad_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmad_vvvvvl : ClangBuiltin<"__builtin_ve_vl_pvfmad_vvvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmad_vsvvl : ClangBuiltin<"__builtin_ve_vl_pvfmad_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmad_vsvvvl : ClangBuiltin<"__builtin_ve_vl_pvfmad_vsvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmad_vvsvl : ClangBuiltin<"__builtin_ve_vl_pvfmad_vvsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmad_vvsvvl : ClangBuiltin<"__builtin_ve_vl_pvfmad_vvsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmad_vvvvMvl : ClangBuiltin<"__builtin_ve_vl_pvfmad_vvvvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmad_vsvvMvl : ClangBuiltin<"__builtin_ve_vl_pvfmad_vsvvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmad_vvsvMvl : ClangBuiltin<"__builtin_ve_vl_pvfmad_vvsvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmsbd_vvvvl : ClangBuiltin<"__builtin_ve_vl_vfmsbd_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmsbd_vvvvvl : ClangBuiltin<"__builtin_ve_vl_vfmsbd_vvvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmsbd_vsvvl : ClangBuiltin<"__builtin_ve_vl_vfmsbd_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmsbd_vsvvvl : ClangBuiltin<"__builtin_ve_vl_vfmsbd_vsvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmsbd_vvsvl : ClangBuiltin<"__builtin_ve_vl_vfmsbd_vvsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmsbd_vvsvvl : ClangBuiltin<"__builtin_ve_vl_vfmsbd_vvsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmsbd_vvvvmvl : ClangBuiltin<"__builtin_ve_vl_vfmsbd_vvvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmsbd_vsvvmvl : ClangBuiltin<"__builtin_ve_vl_vfmsbd_vsvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmsbd_vvsvmvl : ClangBuiltin<"__builtin_ve_vl_vfmsbd_vvsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmsbs_vvvvl : ClangBuiltin<"__builtin_ve_vl_vfmsbs_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmsbs_vvvvvl : ClangBuiltin<"__builtin_ve_vl_vfmsbs_vvvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmsbs_vsvvl : ClangBuiltin<"__builtin_ve_vl_vfmsbs_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f32>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmsbs_vsvvvl : ClangBuiltin<"__builtin_ve_vl_vfmsbs_vsvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f32>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmsbs_vvsvl : ClangBuiltin<"__builtin_ve_vl_vfmsbs_vvsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<f32>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmsbs_vvsvvl : ClangBuiltin<"__builtin_ve_vl_vfmsbs_vvsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<f32>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmsbs_vvvvmvl : ClangBuiltin<"__builtin_ve_vl_vfmsbs_vvvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmsbs_vsvvmvl : ClangBuiltin<"__builtin_ve_vl_vfmsbs_vsvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f32>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmsbs_vvsvmvl : ClangBuiltin<"__builtin_ve_vl_vfmsbs_vvsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<f32>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmsb_vvvvl : ClangBuiltin<"__builtin_ve_vl_pvfmsb_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmsb_vvvvvl : ClangBuiltin<"__builtin_ve_vl_pvfmsb_vvvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmsb_vsvvl : ClangBuiltin<"__builtin_ve_vl_pvfmsb_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmsb_vsvvvl : ClangBuiltin<"__builtin_ve_vl_pvfmsb_vsvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmsb_vvsvl : ClangBuiltin<"__builtin_ve_vl_pvfmsb_vvsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmsb_vvsvvl : ClangBuiltin<"__builtin_ve_vl_pvfmsb_vvsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmsb_vvvvMvl : ClangBuiltin<"__builtin_ve_vl_pvfmsb_vvvvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmsb_vsvvMvl : ClangBuiltin<"__builtin_ve_vl_pvfmsb_vsvvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmsb_vvsvMvl : ClangBuiltin<"__builtin_ve_vl_pvfmsb_vvsvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfnmadd_vvvvl : ClangBuiltin<"__builtin_ve_vl_vfnmadd_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfnmadd_vvvvvl : ClangBuiltin<"__builtin_ve_vl_vfnmadd_vvvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfnmadd_vsvvl : ClangBuiltin<"__builtin_ve_vl_vfnmadd_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfnmadd_vsvvvl : ClangBuiltin<"__builtin_ve_vl_vfnmadd_vsvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfnmadd_vvsvl : ClangBuiltin<"__builtin_ve_vl_vfnmadd_vvsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfnmadd_vvsvvl : ClangBuiltin<"__builtin_ve_vl_vfnmadd_vvsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfnmadd_vvvvmvl : ClangBuiltin<"__builtin_ve_vl_vfnmadd_vvvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfnmadd_vsvvmvl : ClangBuiltin<"__builtin_ve_vl_vfnmadd_vsvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfnmadd_vvsvmvl : ClangBuiltin<"__builtin_ve_vl_vfnmadd_vvsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfnmads_vvvvl : ClangBuiltin<"__builtin_ve_vl_vfnmads_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfnmads_vvvvvl : ClangBuiltin<"__builtin_ve_vl_vfnmads_vvvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfnmads_vsvvl : ClangBuiltin<"__builtin_ve_vl_vfnmads_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f32>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfnmads_vsvvvl : ClangBuiltin<"__builtin_ve_vl_vfnmads_vsvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f32>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfnmads_vvsvl : ClangBuiltin<"__builtin_ve_vl_vfnmads_vvsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<f32>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfnmads_vvsvvl : ClangBuiltin<"__builtin_ve_vl_vfnmads_vvsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<f32>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfnmads_vvvvmvl : ClangBuiltin<"__builtin_ve_vl_vfnmads_vvvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfnmads_vsvvmvl : ClangBuiltin<"__builtin_ve_vl_vfnmads_vsvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f32>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfnmads_vvsvmvl : ClangBuiltin<"__builtin_ve_vl_vfnmads_vvsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<f32>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfnmad_vvvvl : ClangBuiltin<"__builtin_ve_vl_pvfnmad_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfnmad_vvvvvl : ClangBuiltin<"__builtin_ve_vl_pvfnmad_vvvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfnmad_vsvvl : ClangBuiltin<"__builtin_ve_vl_pvfnmad_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfnmad_vsvvvl : ClangBuiltin<"__builtin_ve_vl_pvfnmad_vsvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfnmad_vvsvl : ClangBuiltin<"__builtin_ve_vl_pvfnmad_vvsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfnmad_vvsvvl : ClangBuiltin<"__builtin_ve_vl_pvfnmad_vvsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfnmad_vvvvMvl : ClangBuiltin<"__builtin_ve_vl_pvfnmad_vvvvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfnmad_vsvvMvl : ClangBuiltin<"__builtin_ve_vl_pvfnmad_vsvvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfnmad_vvsvMvl : ClangBuiltin<"__builtin_ve_vl_pvfnmad_vvsvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfnmsbd_vvvvl : ClangBuiltin<"__builtin_ve_vl_vfnmsbd_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfnmsbd_vvvvvl : ClangBuiltin<"__builtin_ve_vl_vfnmsbd_vvvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfnmsbd_vsvvl : ClangBuiltin<"__builtin_ve_vl_vfnmsbd_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfnmsbd_vsvvvl : ClangBuiltin<"__builtin_ve_vl_vfnmsbd_vsvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfnmsbd_vvsvl : ClangBuiltin<"__builtin_ve_vl_vfnmsbd_vvsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfnmsbd_vvsvvl : ClangBuiltin<"__builtin_ve_vl_vfnmsbd_vvsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfnmsbd_vvvvmvl : ClangBuiltin<"__builtin_ve_vl_vfnmsbd_vvvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfnmsbd_vsvvmvl : ClangBuiltin<"__builtin_ve_vl_vfnmsbd_vsvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfnmsbd_vvsvmvl : ClangBuiltin<"__builtin_ve_vl_vfnmsbd_vvsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfnmsbs_vvvvl : ClangBuiltin<"__builtin_ve_vl_vfnmsbs_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfnmsbs_vvvvvl : ClangBuiltin<"__builtin_ve_vl_vfnmsbs_vvvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfnmsbs_vsvvl : ClangBuiltin<"__builtin_ve_vl_vfnmsbs_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f32>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfnmsbs_vsvvvl : ClangBuiltin<"__builtin_ve_vl_vfnmsbs_vsvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f32>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfnmsbs_vvsvl : ClangBuiltin<"__builtin_ve_vl_vfnmsbs_vvsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<f32>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfnmsbs_vvsvvl : ClangBuiltin<"__builtin_ve_vl_vfnmsbs_vvsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<f32>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfnmsbs_vvvvmvl : ClangBuiltin<"__builtin_ve_vl_vfnmsbs_vvvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfnmsbs_vsvvmvl : ClangBuiltin<"__builtin_ve_vl_vfnmsbs_vsvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<f32>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfnmsbs_vvsvmvl : ClangBuiltin<"__builtin_ve_vl_vfnmsbs_vvsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<f32>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfnmsb_vvvvl : ClangBuiltin<"__builtin_ve_vl_pvfnmsb_vvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfnmsb_vvvvvl : ClangBuiltin<"__builtin_ve_vl_pvfnmsb_vvvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfnmsb_vsvvl : ClangBuiltin<"__builtin_ve_vl_pvfnmsb_vsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfnmsb_vsvvvl : ClangBuiltin<"__builtin_ve_vl_pvfnmsb_vsvvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfnmsb_vvsvl : ClangBuiltin<"__builtin_ve_vl_pvfnmsb_vvsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfnmsb_vvsvvl : ClangBuiltin<"__builtin_ve_vl_pvfnmsb_vvsvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfnmsb_vvvvMvl : ClangBuiltin<"__builtin_ve_vl_pvfnmsb_vvvvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfnmsb_vsvvMvl : ClangBuiltin<"__builtin_ve_vl_pvfnmsb_vsvvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfnmsb_vvsvMvl : ClangBuiltin<"__builtin_ve_vl_pvfnmsb_vvsvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vrcpd_vvl : ClangBuiltin<"__builtin_ve_vl_vrcpd_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vrcpd_vvvl : ClangBuiltin<"__builtin_ve_vl_vrcpd_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vrcps_vvl : ClangBuiltin<"__builtin_ve_vl_vrcps_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vrcps_vvvl : ClangBuiltin<"__builtin_ve_vl_vrcps_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvrcp_vvl : ClangBuiltin<"__builtin_ve_vl_pvrcp_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvrcp_vvvl : ClangBuiltin<"__builtin_ve_vl_pvrcp_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vrsqrtd_vvl : ClangBuiltin<"__builtin_ve_vl_vrsqrtd_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vrsqrtd_vvvl : ClangBuiltin<"__builtin_ve_vl_vrsqrtd_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vrsqrts_vvl : ClangBuiltin<"__builtin_ve_vl_vrsqrts_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vrsqrts_vvvl : ClangBuiltin<"__builtin_ve_vl_vrsqrts_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvrsqrt_vvl : ClangBuiltin<"__builtin_ve_vl_pvrsqrt_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvrsqrt_vvvl : ClangBuiltin<"__builtin_ve_vl_pvrsqrt_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vrsqrtdnex_vvl : ClangBuiltin<"__builtin_ve_vl_vrsqrtdnex_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vrsqrtdnex_vvvl : ClangBuiltin<"__builtin_ve_vl_vrsqrtdnex_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vrsqrtsnex_vvl : ClangBuiltin<"__builtin_ve_vl_vrsqrtsnex_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vrsqrtsnex_vvvl : ClangBuiltin<"__builtin_ve_vl_vrsqrtsnex_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvrsqrtnex_vvl : ClangBuiltin<"__builtin_ve_vl_pvrsqrtnex_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvrsqrtnex_vvvl : ClangBuiltin<"__builtin_ve_vl_pvrsqrtnex_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcvtwdsx_vvl : ClangBuiltin<"__builtin_ve_vl_vcvtwdsx_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcvtwdsx_vvvl : ClangBuiltin<"__builtin_ve_vl_vcvtwdsx_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcvtwdsx_vvmvl : ClangBuiltin<"__builtin_ve_vl_vcvtwdsx_vvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcvtwdsxrz_vvl : ClangBuiltin<"__builtin_ve_vl_vcvtwdsxrz_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcvtwdsxrz_vvvl : ClangBuiltin<"__builtin_ve_vl_vcvtwdsxrz_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcvtwdsxrz_vvmvl : ClangBuiltin<"__builtin_ve_vl_vcvtwdsxrz_vvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcvtwdzx_vvl : ClangBuiltin<"__builtin_ve_vl_vcvtwdzx_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcvtwdzx_vvvl : ClangBuiltin<"__builtin_ve_vl_vcvtwdzx_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcvtwdzx_vvmvl : ClangBuiltin<"__builtin_ve_vl_vcvtwdzx_vvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcvtwdzxrz_vvl : ClangBuiltin<"__builtin_ve_vl_vcvtwdzxrz_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcvtwdzxrz_vvvl : ClangBuiltin<"__builtin_ve_vl_vcvtwdzxrz_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcvtwdzxrz_vvmvl : ClangBuiltin<"__builtin_ve_vl_vcvtwdzxrz_vvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcvtwssx_vvl : ClangBuiltin<"__builtin_ve_vl_vcvtwssx_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcvtwssx_vvvl : ClangBuiltin<"__builtin_ve_vl_vcvtwssx_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcvtwssx_vvmvl : ClangBuiltin<"__builtin_ve_vl_vcvtwssx_vvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcvtwssxrz_vvl : ClangBuiltin<"__builtin_ve_vl_vcvtwssxrz_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcvtwssxrz_vvvl : ClangBuiltin<"__builtin_ve_vl_vcvtwssxrz_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcvtwssxrz_vvmvl : ClangBuiltin<"__builtin_ve_vl_vcvtwssxrz_vvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcvtwszx_vvl : ClangBuiltin<"__builtin_ve_vl_vcvtwszx_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcvtwszx_vvvl : ClangBuiltin<"__builtin_ve_vl_vcvtwszx_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcvtwszx_vvmvl : ClangBuiltin<"__builtin_ve_vl_vcvtwszx_vvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcvtwszxrz_vvl : ClangBuiltin<"__builtin_ve_vl_vcvtwszxrz_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcvtwszxrz_vvvl : ClangBuiltin<"__builtin_ve_vl_vcvtwszxrz_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcvtwszxrz_vvmvl : ClangBuiltin<"__builtin_ve_vl_vcvtwszxrz_vvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvcvtws_vvl : ClangBuiltin<"__builtin_ve_vl_pvcvtws_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvcvtws_vvvl : ClangBuiltin<"__builtin_ve_vl_pvcvtws_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvcvtws_vvMvl : ClangBuiltin<"__builtin_ve_vl_pvcvtws_vvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvcvtwsrz_vvl : ClangBuiltin<"__builtin_ve_vl_pvcvtwsrz_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvcvtwsrz_vvvl : ClangBuiltin<"__builtin_ve_vl_pvcvtwsrz_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvcvtwsrz_vvMvl : ClangBuiltin<"__builtin_ve_vl_pvcvtwsrz_vvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcvtld_vvl : ClangBuiltin<"__builtin_ve_vl_vcvtld_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcvtld_vvvl : ClangBuiltin<"__builtin_ve_vl_vcvtld_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcvtld_vvmvl : ClangBuiltin<"__builtin_ve_vl_vcvtld_vvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcvtldrz_vvl : ClangBuiltin<"__builtin_ve_vl_vcvtldrz_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcvtldrz_vvvl : ClangBuiltin<"__builtin_ve_vl_vcvtldrz_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcvtldrz_vvmvl : ClangBuiltin<"__builtin_ve_vl_vcvtldrz_vvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcvtdw_vvl : ClangBuiltin<"__builtin_ve_vl_vcvtdw_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcvtdw_vvvl : ClangBuiltin<"__builtin_ve_vl_vcvtdw_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcvtsw_vvl : ClangBuiltin<"__builtin_ve_vl_vcvtsw_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcvtsw_vvvl : ClangBuiltin<"__builtin_ve_vl_vcvtsw_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvcvtsw_vvl : ClangBuiltin<"__builtin_ve_vl_pvcvtsw_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvcvtsw_vvvl : ClangBuiltin<"__builtin_ve_vl_pvcvtsw_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcvtdl_vvl : ClangBuiltin<"__builtin_ve_vl_vcvtdl_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcvtdl_vvvl : ClangBuiltin<"__builtin_ve_vl_vcvtdl_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcvtds_vvl : ClangBuiltin<"__builtin_ve_vl_vcvtds_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcvtds_vvvl : ClangBuiltin<"__builtin_ve_vl_vcvtds_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcvtsd_vvl : ClangBuiltin<"__builtin_ve_vl_vcvtsd_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcvtsd_vvvl : ClangBuiltin<"__builtin_ve_vl_vcvtsd_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmrg_vvvml : ClangBuiltin<"__builtin_ve_vl_vmrg_vvvml">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmrg_vvvmvl : ClangBuiltin<"__builtin_ve_vl_vmrg_vvvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmrg_vsvml : ClangBuiltin<"__builtin_ve_vl_vmrg_vsvml">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmrg_vsvmvl : ClangBuiltin<"__builtin_ve_vl_vmrg_vsvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i64>, LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmrgw_vvvMl : ClangBuiltin<"__builtin_ve_vl_vmrgw_vvvMl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmrgw_vvvMvl : ClangBuiltin<"__builtin_ve_vl_vmrgw_vvvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmrgw_vsvMl : ClangBuiltin<"__builtin_ve_vl_vmrgw_vsvMl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vmrgw_vsvMvl : ClangBuiltin<"__builtin_ve_vl_vmrgw_vsvMvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<i32>, LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vshf_vvvsl : ClangBuiltin<"__builtin_ve_vl_vshf_vvvsl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vshf_vvvsvl : ClangBuiltin<"__builtin_ve_vl_vshf_vvvsvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vcp_vvmvl : ClangBuiltin<"__builtin_ve_vl_vcp_vvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vex_vvmvl : ClangBuiltin<"__builtin_ve_vl_vex_vvmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmklat_ml : ClangBuiltin<"__builtin_ve_vl_vfmklat_ml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmklaf_ml : ClangBuiltin<"__builtin_ve_vl_vfmklaf_ml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkat_Ml : ClangBuiltin<"__builtin_ve_vl_pvfmkat_Ml">, Intrinsic<[LLVMType<v512i1>], [LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkaf_Ml : ClangBuiltin<"__builtin_ve_vl_pvfmkaf_Ml">, Intrinsic<[LLVMType<v512i1>], [LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmklgt_mvl : ClangBuiltin<"__builtin_ve_vl_vfmklgt_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmklgt_mvml : ClangBuiltin<"__builtin_ve_vl_vfmklgt_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkllt_mvl : ClangBuiltin<"__builtin_ve_vl_vfmkllt_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkllt_mvml : ClangBuiltin<"__builtin_ve_vl_vfmkllt_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmklne_mvl : ClangBuiltin<"__builtin_ve_vl_vfmklne_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmklne_mvml : ClangBuiltin<"__builtin_ve_vl_vfmklne_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkleq_mvl : ClangBuiltin<"__builtin_ve_vl_vfmkleq_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkleq_mvml : ClangBuiltin<"__builtin_ve_vl_vfmkleq_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmklge_mvl : ClangBuiltin<"__builtin_ve_vl_vfmklge_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmklge_mvml : ClangBuiltin<"__builtin_ve_vl_vfmklge_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmklle_mvl : ClangBuiltin<"__builtin_ve_vl_vfmklle_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmklle_mvml : ClangBuiltin<"__builtin_ve_vl_vfmklle_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmklnum_mvl : ClangBuiltin<"__builtin_ve_vl_vfmklnum_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmklnum_mvml : ClangBuiltin<"__builtin_ve_vl_vfmklnum_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmklnan_mvl : ClangBuiltin<"__builtin_ve_vl_vfmklnan_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmklnan_mvml : ClangBuiltin<"__builtin_ve_vl_vfmklnan_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmklgtnan_mvl : ClangBuiltin<"__builtin_ve_vl_vfmklgtnan_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmklgtnan_mvml : ClangBuiltin<"__builtin_ve_vl_vfmklgtnan_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmklltnan_mvl : ClangBuiltin<"__builtin_ve_vl_vfmklltnan_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmklltnan_mvml : ClangBuiltin<"__builtin_ve_vl_vfmklltnan_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmklnenan_mvl : ClangBuiltin<"__builtin_ve_vl_vfmklnenan_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmklnenan_mvml : ClangBuiltin<"__builtin_ve_vl_vfmklnenan_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkleqnan_mvl : ClangBuiltin<"__builtin_ve_vl_vfmkleqnan_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkleqnan_mvml : ClangBuiltin<"__builtin_ve_vl_vfmkleqnan_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmklgenan_mvl : ClangBuiltin<"__builtin_ve_vl_vfmklgenan_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmklgenan_mvml : ClangBuiltin<"__builtin_ve_vl_vfmklgenan_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkllenan_mvl : ClangBuiltin<"__builtin_ve_vl_vfmkllenan_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkllenan_mvml : ClangBuiltin<"__builtin_ve_vl_vfmkllenan_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkwgt_mvl : ClangBuiltin<"__builtin_ve_vl_vfmkwgt_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkwgt_mvml : ClangBuiltin<"__builtin_ve_vl_vfmkwgt_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkwlt_mvl : ClangBuiltin<"__builtin_ve_vl_vfmkwlt_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkwlt_mvml : ClangBuiltin<"__builtin_ve_vl_vfmkwlt_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkwne_mvl : ClangBuiltin<"__builtin_ve_vl_vfmkwne_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkwne_mvml : ClangBuiltin<"__builtin_ve_vl_vfmkwne_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkweq_mvl : ClangBuiltin<"__builtin_ve_vl_vfmkweq_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkweq_mvml : ClangBuiltin<"__builtin_ve_vl_vfmkweq_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkwge_mvl : ClangBuiltin<"__builtin_ve_vl_vfmkwge_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkwge_mvml : ClangBuiltin<"__builtin_ve_vl_vfmkwge_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkwle_mvl : ClangBuiltin<"__builtin_ve_vl_vfmkwle_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkwle_mvml : ClangBuiltin<"__builtin_ve_vl_vfmkwle_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkwnum_mvl : ClangBuiltin<"__builtin_ve_vl_vfmkwnum_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkwnum_mvml : ClangBuiltin<"__builtin_ve_vl_vfmkwnum_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkwnan_mvl : ClangBuiltin<"__builtin_ve_vl_vfmkwnan_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkwnan_mvml : ClangBuiltin<"__builtin_ve_vl_vfmkwnan_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkwgtnan_mvl : ClangBuiltin<"__builtin_ve_vl_vfmkwgtnan_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkwgtnan_mvml : ClangBuiltin<"__builtin_ve_vl_vfmkwgtnan_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkwltnan_mvl : ClangBuiltin<"__builtin_ve_vl_vfmkwltnan_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkwltnan_mvml : ClangBuiltin<"__builtin_ve_vl_vfmkwltnan_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkwnenan_mvl : ClangBuiltin<"__builtin_ve_vl_vfmkwnenan_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkwnenan_mvml : ClangBuiltin<"__builtin_ve_vl_vfmkwnenan_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkweqnan_mvl : ClangBuiltin<"__builtin_ve_vl_vfmkweqnan_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkweqnan_mvml : ClangBuiltin<"__builtin_ve_vl_vfmkweqnan_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkwgenan_mvl : ClangBuiltin<"__builtin_ve_vl_vfmkwgenan_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkwgenan_mvml : ClangBuiltin<"__builtin_ve_vl_vfmkwgenan_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkwlenan_mvl : ClangBuiltin<"__builtin_ve_vl_vfmkwlenan_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkwlenan_mvml : ClangBuiltin<"__builtin_ve_vl_vfmkwlenan_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwlogt_mvl : ClangBuiltin<"__builtin_ve_vl_pvfmkwlogt_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwupgt_mvl : ClangBuiltin<"__builtin_ve_vl_pvfmkwupgt_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwlogt_mvml : ClangBuiltin<"__builtin_ve_vl_pvfmkwlogt_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwupgt_mvml : ClangBuiltin<"__builtin_ve_vl_pvfmkwupgt_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwlolt_mvl : ClangBuiltin<"__builtin_ve_vl_pvfmkwlolt_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwuplt_mvl : ClangBuiltin<"__builtin_ve_vl_pvfmkwuplt_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwlolt_mvml : ClangBuiltin<"__builtin_ve_vl_pvfmkwlolt_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwuplt_mvml : ClangBuiltin<"__builtin_ve_vl_pvfmkwuplt_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwlone_mvl : ClangBuiltin<"__builtin_ve_vl_pvfmkwlone_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwupne_mvl : ClangBuiltin<"__builtin_ve_vl_pvfmkwupne_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwlone_mvml : ClangBuiltin<"__builtin_ve_vl_pvfmkwlone_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwupne_mvml : ClangBuiltin<"__builtin_ve_vl_pvfmkwupne_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwloeq_mvl : ClangBuiltin<"__builtin_ve_vl_pvfmkwloeq_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwupeq_mvl : ClangBuiltin<"__builtin_ve_vl_pvfmkwupeq_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwloeq_mvml : ClangBuiltin<"__builtin_ve_vl_pvfmkwloeq_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwupeq_mvml : ClangBuiltin<"__builtin_ve_vl_pvfmkwupeq_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwloge_mvl : ClangBuiltin<"__builtin_ve_vl_pvfmkwloge_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwupge_mvl : ClangBuiltin<"__builtin_ve_vl_pvfmkwupge_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwloge_mvml : ClangBuiltin<"__builtin_ve_vl_pvfmkwloge_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwupge_mvml : ClangBuiltin<"__builtin_ve_vl_pvfmkwupge_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwlole_mvl : ClangBuiltin<"__builtin_ve_vl_pvfmkwlole_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwuple_mvl : ClangBuiltin<"__builtin_ve_vl_pvfmkwuple_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwlole_mvml : ClangBuiltin<"__builtin_ve_vl_pvfmkwlole_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwuple_mvml : ClangBuiltin<"__builtin_ve_vl_pvfmkwuple_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwlonum_mvl : ClangBuiltin<"__builtin_ve_vl_pvfmkwlonum_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwupnum_mvl : ClangBuiltin<"__builtin_ve_vl_pvfmkwupnum_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwlonum_mvml : ClangBuiltin<"__builtin_ve_vl_pvfmkwlonum_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwupnum_mvml : ClangBuiltin<"__builtin_ve_vl_pvfmkwupnum_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwlonan_mvl : ClangBuiltin<"__builtin_ve_vl_pvfmkwlonan_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwupnan_mvl : ClangBuiltin<"__builtin_ve_vl_pvfmkwupnan_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwlonan_mvml : ClangBuiltin<"__builtin_ve_vl_pvfmkwlonan_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwupnan_mvml : ClangBuiltin<"__builtin_ve_vl_pvfmkwupnan_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwlogtnan_mvl : ClangBuiltin<"__builtin_ve_vl_pvfmkwlogtnan_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwupgtnan_mvl : ClangBuiltin<"__builtin_ve_vl_pvfmkwupgtnan_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwlogtnan_mvml : ClangBuiltin<"__builtin_ve_vl_pvfmkwlogtnan_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwupgtnan_mvml : ClangBuiltin<"__builtin_ve_vl_pvfmkwupgtnan_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwloltnan_mvl : ClangBuiltin<"__builtin_ve_vl_pvfmkwloltnan_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwupltnan_mvl : ClangBuiltin<"__builtin_ve_vl_pvfmkwupltnan_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwloltnan_mvml : ClangBuiltin<"__builtin_ve_vl_pvfmkwloltnan_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwupltnan_mvml : ClangBuiltin<"__builtin_ve_vl_pvfmkwupltnan_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwlonenan_mvl : ClangBuiltin<"__builtin_ve_vl_pvfmkwlonenan_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwupnenan_mvl : ClangBuiltin<"__builtin_ve_vl_pvfmkwupnenan_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwlonenan_mvml : ClangBuiltin<"__builtin_ve_vl_pvfmkwlonenan_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwupnenan_mvml : ClangBuiltin<"__builtin_ve_vl_pvfmkwupnenan_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwloeqnan_mvl : ClangBuiltin<"__builtin_ve_vl_pvfmkwloeqnan_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwupeqnan_mvl : ClangBuiltin<"__builtin_ve_vl_pvfmkwupeqnan_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwloeqnan_mvml : ClangBuiltin<"__builtin_ve_vl_pvfmkwloeqnan_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwupeqnan_mvml : ClangBuiltin<"__builtin_ve_vl_pvfmkwupeqnan_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwlogenan_mvl : ClangBuiltin<"__builtin_ve_vl_pvfmkwlogenan_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwupgenan_mvl : ClangBuiltin<"__builtin_ve_vl_pvfmkwupgenan_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwlogenan_mvml : ClangBuiltin<"__builtin_ve_vl_pvfmkwlogenan_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwupgenan_mvml : ClangBuiltin<"__builtin_ve_vl_pvfmkwupgenan_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwlolenan_mvl : ClangBuiltin<"__builtin_ve_vl_pvfmkwlolenan_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwuplenan_mvl : ClangBuiltin<"__builtin_ve_vl_pvfmkwuplenan_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwlolenan_mvml : ClangBuiltin<"__builtin_ve_vl_pvfmkwlolenan_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwuplenan_mvml : ClangBuiltin<"__builtin_ve_vl_pvfmkwuplenan_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwgt_Mvl : ClangBuiltin<"__builtin_ve_vl_pvfmkwgt_Mvl">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwgt_MvMl : ClangBuiltin<"__builtin_ve_vl_pvfmkwgt_MvMl">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwlt_Mvl : ClangBuiltin<"__builtin_ve_vl_pvfmkwlt_Mvl">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwlt_MvMl : ClangBuiltin<"__builtin_ve_vl_pvfmkwlt_MvMl">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwne_Mvl : ClangBuiltin<"__builtin_ve_vl_pvfmkwne_Mvl">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwne_MvMl : ClangBuiltin<"__builtin_ve_vl_pvfmkwne_MvMl">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkweq_Mvl : ClangBuiltin<"__builtin_ve_vl_pvfmkweq_Mvl">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkweq_MvMl : ClangBuiltin<"__builtin_ve_vl_pvfmkweq_MvMl">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwge_Mvl : ClangBuiltin<"__builtin_ve_vl_pvfmkwge_Mvl">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwge_MvMl : ClangBuiltin<"__builtin_ve_vl_pvfmkwge_MvMl">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwle_Mvl : ClangBuiltin<"__builtin_ve_vl_pvfmkwle_Mvl">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwle_MvMl : ClangBuiltin<"__builtin_ve_vl_pvfmkwle_MvMl">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwnum_Mvl : ClangBuiltin<"__builtin_ve_vl_pvfmkwnum_Mvl">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwnum_MvMl : ClangBuiltin<"__builtin_ve_vl_pvfmkwnum_MvMl">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwnan_Mvl : ClangBuiltin<"__builtin_ve_vl_pvfmkwnan_Mvl">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwnan_MvMl : ClangBuiltin<"__builtin_ve_vl_pvfmkwnan_MvMl">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwgtnan_Mvl : ClangBuiltin<"__builtin_ve_vl_pvfmkwgtnan_Mvl">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwgtnan_MvMl : ClangBuiltin<"__builtin_ve_vl_pvfmkwgtnan_MvMl">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwltnan_Mvl : ClangBuiltin<"__builtin_ve_vl_pvfmkwltnan_Mvl">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwltnan_MvMl : ClangBuiltin<"__builtin_ve_vl_pvfmkwltnan_MvMl">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwnenan_Mvl : ClangBuiltin<"__builtin_ve_vl_pvfmkwnenan_Mvl">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwnenan_MvMl : ClangBuiltin<"__builtin_ve_vl_pvfmkwnenan_MvMl">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkweqnan_Mvl : ClangBuiltin<"__builtin_ve_vl_pvfmkweqnan_Mvl">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkweqnan_MvMl : ClangBuiltin<"__builtin_ve_vl_pvfmkweqnan_MvMl">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwgenan_Mvl : ClangBuiltin<"__builtin_ve_vl_pvfmkwgenan_Mvl">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwgenan_MvMl : ClangBuiltin<"__builtin_ve_vl_pvfmkwgenan_MvMl">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwlenan_Mvl : ClangBuiltin<"__builtin_ve_vl_pvfmkwlenan_Mvl">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkwlenan_MvMl : ClangBuiltin<"__builtin_ve_vl_pvfmkwlenan_MvMl">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkdgt_mvl : ClangBuiltin<"__builtin_ve_vl_vfmkdgt_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkdgt_mvml : ClangBuiltin<"__builtin_ve_vl_vfmkdgt_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkdlt_mvl : ClangBuiltin<"__builtin_ve_vl_vfmkdlt_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkdlt_mvml : ClangBuiltin<"__builtin_ve_vl_vfmkdlt_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkdne_mvl : ClangBuiltin<"__builtin_ve_vl_vfmkdne_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkdne_mvml : ClangBuiltin<"__builtin_ve_vl_vfmkdne_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkdeq_mvl : ClangBuiltin<"__builtin_ve_vl_vfmkdeq_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkdeq_mvml : ClangBuiltin<"__builtin_ve_vl_vfmkdeq_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkdge_mvl : ClangBuiltin<"__builtin_ve_vl_vfmkdge_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkdge_mvml : ClangBuiltin<"__builtin_ve_vl_vfmkdge_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkdle_mvl : ClangBuiltin<"__builtin_ve_vl_vfmkdle_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkdle_mvml : ClangBuiltin<"__builtin_ve_vl_vfmkdle_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkdnum_mvl : ClangBuiltin<"__builtin_ve_vl_vfmkdnum_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkdnum_mvml : ClangBuiltin<"__builtin_ve_vl_vfmkdnum_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkdnan_mvl : ClangBuiltin<"__builtin_ve_vl_vfmkdnan_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkdnan_mvml : ClangBuiltin<"__builtin_ve_vl_vfmkdnan_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkdgtnan_mvl : ClangBuiltin<"__builtin_ve_vl_vfmkdgtnan_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkdgtnan_mvml : ClangBuiltin<"__builtin_ve_vl_vfmkdgtnan_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkdltnan_mvl : ClangBuiltin<"__builtin_ve_vl_vfmkdltnan_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkdltnan_mvml : ClangBuiltin<"__builtin_ve_vl_vfmkdltnan_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkdnenan_mvl : ClangBuiltin<"__builtin_ve_vl_vfmkdnenan_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkdnenan_mvml : ClangBuiltin<"__builtin_ve_vl_vfmkdnenan_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkdeqnan_mvl : ClangBuiltin<"__builtin_ve_vl_vfmkdeqnan_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkdeqnan_mvml : ClangBuiltin<"__builtin_ve_vl_vfmkdeqnan_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkdgenan_mvl : ClangBuiltin<"__builtin_ve_vl_vfmkdgenan_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkdgenan_mvml : ClangBuiltin<"__builtin_ve_vl_vfmkdgenan_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkdlenan_mvl : ClangBuiltin<"__builtin_ve_vl_vfmkdlenan_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkdlenan_mvml : ClangBuiltin<"__builtin_ve_vl_vfmkdlenan_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmksgt_mvl : ClangBuiltin<"__builtin_ve_vl_vfmksgt_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmksgt_mvml : ClangBuiltin<"__builtin_ve_vl_vfmksgt_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkslt_mvl : ClangBuiltin<"__builtin_ve_vl_vfmkslt_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkslt_mvml : ClangBuiltin<"__builtin_ve_vl_vfmkslt_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmksne_mvl : ClangBuiltin<"__builtin_ve_vl_vfmksne_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmksne_mvml : ClangBuiltin<"__builtin_ve_vl_vfmksne_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkseq_mvl : ClangBuiltin<"__builtin_ve_vl_vfmkseq_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkseq_mvml : ClangBuiltin<"__builtin_ve_vl_vfmkseq_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmksge_mvl : ClangBuiltin<"__builtin_ve_vl_vfmksge_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmksge_mvml : ClangBuiltin<"__builtin_ve_vl_vfmksge_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmksle_mvl : ClangBuiltin<"__builtin_ve_vl_vfmksle_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmksle_mvml : ClangBuiltin<"__builtin_ve_vl_vfmksle_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmksnum_mvl : ClangBuiltin<"__builtin_ve_vl_vfmksnum_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmksnum_mvml : ClangBuiltin<"__builtin_ve_vl_vfmksnum_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmksnan_mvl : ClangBuiltin<"__builtin_ve_vl_vfmksnan_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmksnan_mvml : ClangBuiltin<"__builtin_ve_vl_vfmksnan_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmksgtnan_mvl : ClangBuiltin<"__builtin_ve_vl_vfmksgtnan_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmksgtnan_mvml : ClangBuiltin<"__builtin_ve_vl_vfmksgtnan_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmksltnan_mvl : ClangBuiltin<"__builtin_ve_vl_vfmksltnan_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmksltnan_mvml : ClangBuiltin<"__builtin_ve_vl_vfmksltnan_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmksnenan_mvl : ClangBuiltin<"__builtin_ve_vl_vfmksnenan_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmksnenan_mvml : ClangBuiltin<"__builtin_ve_vl_vfmksnenan_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkseqnan_mvl : ClangBuiltin<"__builtin_ve_vl_vfmkseqnan_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkseqnan_mvml : ClangBuiltin<"__builtin_ve_vl_vfmkseqnan_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmksgenan_mvl : ClangBuiltin<"__builtin_ve_vl_vfmksgenan_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmksgenan_mvml : ClangBuiltin<"__builtin_ve_vl_vfmksgenan_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkslenan_mvl : ClangBuiltin<"__builtin_ve_vl_vfmkslenan_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfmkslenan_mvml : ClangBuiltin<"__builtin_ve_vl_vfmkslenan_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkslogt_mvl : ClangBuiltin<"__builtin_ve_vl_pvfmkslogt_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmksupgt_mvl : ClangBuiltin<"__builtin_ve_vl_pvfmksupgt_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkslogt_mvml : ClangBuiltin<"__builtin_ve_vl_pvfmkslogt_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmksupgt_mvml : ClangBuiltin<"__builtin_ve_vl_pvfmksupgt_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkslolt_mvl : ClangBuiltin<"__builtin_ve_vl_pvfmkslolt_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmksuplt_mvl : ClangBuiltin<"__builtin_ve_vl_pvfmksuplt_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkslolt_mvml : ClangBuiltin<"__builtin_ve_vl_pvfmkslolt_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmksuplt_mvml : ClangBuiltin<"__builtin_ve_vl_pvfmksuplt_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkslone_mvl : ClangBuiltin<"__builtin_ve_vl_pvfmkslone_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmksupne_mvl : ClangBuiltin<"__builtin_ve_vl_pvfmksupne_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkslone_mvml : ClangBuiltin<"__builtin_ve_vl_pvfmkslone_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmksupne_mvml : ClangBuiltin<"__builtin_ve_vl_pvfmksupne_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmksloeq_mvl : ClangBuiltin<"__builtin_ve_vl_pvfmksloeq_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmksupeq_mvl : ClangBuiltin<"__builtin_ve_vl_pvfmksupeq_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmksloeq_mvml : ClangBuiltin<"__builtin_ve_vl_pvfmksloeq_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmksupeq_mvml : ClangBuiltin<"__builtin_ve_vl_pvfmksupeq_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmksloge_mvl : ClangBuiltin<"__builtin_ve_vl_pvfmksloge_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmksupge_mvl : ClangBuiltin<"__builtin_ve_vl_pvfmksupge_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmksloge_mvml : ClangBuiltin<"__builtin_ve_vl_pvfmksloge_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmksupge_mvml : ClangBuiltin<"__builtin_ve_vl_pvfmksupge_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkslole_mvl : ClangBuiltin<"__builtin_ve_vl_pvfmkslole_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmksuple_mvl : ClangBuiltin<"__builtin_ve_vl_pvfmksuple_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkslole_mvml : ClangBuiltin<"__builtin_ve_vl_pvfmkslole_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmksuple_mvml : ClangBuiltin<"__builtin_ve_vl_pvfmksuple_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkslonum_mvl : ClangBuiltin<"__builtin_ve_vl_pvfmkslonum_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmksupnum_mvl : ClangBuiltin<"__builtin_ve_vl_pvfmksupnum_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkslonum_mvml : ClangBuiltin<"__builtin_ve_vl_pvfmkslonum_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmksupnum_mvml : ClangBuiltin<"__builtin_ve_vl_pvfmksupnum_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkslonan_mvl : ClangBuiltin<"__builtin_ve_vl_pvfmkslonan_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmksupnan_mvl : ClangBuiltin<"__builtin_ve_vl_pvfmksupnan_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkslonan_mvml : ClangBuiltin<"__builtin_ve_vl_pvfmkslonan_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmksupnan_mvml : ClangBuiltin<"__builtin_ve_vl_pvfmksupnan_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkslogtnan_mvl : ClangBuiltin<"__builtin_ve_vl_pvfmkslogtnan_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmksupgtnan_mvl : ClangBuiltin<"__builtin_ve_vl_pvfmksupgtnan_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkslogtnan_mvml : ClangBuiltin<"__builtin_ve_vl_pvfmkslogtnan_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmksupgtnan_mvml : ClangBuiltin<"__builtin_ve_vl_pvfmksupgtnan_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmksloltnan_mvl : ClangBuiltin<"__builtin_ve_vl_pvfmksloltnan_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmksupltnan_mvl : ClangBuiltin<"__builtin_ve_vl_pvfmksupltnan_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmksloltnan_mvml : ClangBuiltin<"__builtin_ve_vl_pvfmksloltnan_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmksupltnan_mvml : ClangBuiltin<"__builtin_ve_vl_pvfmksupltnan_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkslonenan_mvl : ClangBuiltin<"__builtin_ve_vl_pvfmkslonenan_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmksupnenan_mvl : ClangBuiltin<"__builtin_ve_vl_pvfmksupnenan_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkslonenan_mvml : ClangBuiltin<"__builtin_ve_vl_pvfmkslonenan_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmksupnenan_mvml : ClangBuiltin<"__builtin_ve_vl_pvfmksupnenan_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmksloeqnan_mvl : ClangBuiltin<"__builtin_ve_vl_pvfmksloeqnan_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmksupeqnan_mvl : ClangBuiltin<"__builtin_ve_vl_pvfmksupeqnan_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmksloeqnan_mvml : ClangBuiltin<"__builtin_ve_vl_pvfmksloeqnan_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmksupeqnan_mvml : ClangBuiltin<"__builtin_ve_vl_pvfmksupeqnan_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkslogenan_mvl : ClangBuiltin<"__builtin_ve_vl_pvfmkslogenan_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmksupgenan_mvl : ClangBuiltin<"__builtin_ve_vl_pvfmksupgenan_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkslogenan_mvml : ClangBuiltin<"__builtin_ve_vl_pvfmkslogenan_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmksupgenan_mvml : ClangBuiltin<"__builtin_ve_vl_pvfmksupgenan_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkslolenan_mvl : ClangBuiltin<"__builtin_ve_vl_pvfmkslolenan_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmksuplenan_mvl : ClangBuiltin<"__builtin_ve_vl_pvfmksuplenan_mvl">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkslolenan_mvml : ClangBuiltin<"__builtin_ve_vl_pvfmkslolenan_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmksuplenan_mvml : ClangBuiltin<"__builtin_ve_vl_pvfmksuplenan_mvml">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmksgt_Mvl : ClangBuiltin<"__builtin_ve_vl_pvfmksgt_Mvl">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmksgt_MvMl : ClangBuiltin<"__builtin_ve_vl_pvfmksgt_MvMl">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkslt_Mvl : ClangBuiltin<"__builtin_ve_vl_pvfmkslt_Mvl">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkslt_MvMl : ClangBuiltin<"__builtin_ve_vl_pvfmkslt_MvMl">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmksne_Mvl : ClangBuiltin<"__builtin_ve_vl_pvfmksne_Mvl">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmksne_MvMl : ClangBuiltin<"__builtin_ve_vl_pvfmksne_MvMl">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkseq_Mvl : ClangBuiltin<"__builtin_ve_vl_pvfmkseq_Mvl">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkseq_MvMl : ClangBuiltin<"__builtin_ve_vl_pvfmkseq_MvMl">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmksge_Mvl : ClangBuiltin<"__builtin_ve_vl_pvfmksge_Mvl">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmksge_MvMl : ClangBuiltin<"__builtin_ve_vl_pvfmksge_MvMl">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmksle_Mvl : ClangBuiltin<"__builtin_ve_vl_pvfmksle_Mvl">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmksle_MvMl : ClangBuiltin<"__builtin_ve_vl_pvfmksle_MvMl">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmksnum_Mvl : ClangBuiltin<"__builtin_ve_vl_pvfmksnum_Mvl">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmksnum_MvMl : ClangBuiltin<"__builtin_ve_vl_pvfmksnum_MvMl">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmksnan_Mvl : ClangBuiltin<"__builtin_ve_vl_pvfmksnan_Mvl">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmksnan_MvMl : ClangBuiltin<"__builtin_ve_vl_pvfmksnan_MvMl">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmksgtnan_Mvl : ClangBuiltin<"__builtin_ve_vl_pvfmksgtnan_Mvl">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmksgtnan_MvMl : ClangBuiltin<"__builtin_ve_vl_pvfmksgtnan_MvMl">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmksltnan_Mvl : ClangBuiltin<"__builtin_ve_vl_pvfmksltnan_Mvl">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmksltnan_MvMl : ClangBuiltin<"__builtin_ve_vl_pvfmksltnan_MvMl">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmksnenan_Mvl : ClangBuiltin<"__builtin_ve_vl_pvfmksnenan_Mvl">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmksnenan_MvMl : ClangBuiltin<"__builtin_ve_vl_pvfmksnenan_MvMl">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkseqnan_Mvl : ClangBuiltin<"__builtin_ve_vl_pvfmkseqnan_Mvl">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkseqnan_MvMl : ClangBuiltin<"__builtin_ve_vl_pvfmkseqnan_MvMl">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmksgenan_Mvl : ClangBuiltin<"__builtin_ve_vl_pvfmksgenan_Mvl">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmksgenan_MvMl : ClangBuiltin<"__builtin_ve_vl_pvfmksgenan_MvMl">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkslenan_Mvl : ClangBuiltin<"__builtin_ve_vl_pvfmkslenan_Mvl">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pvfmkslenan_MvMl : ClangBuiltin<"__builtin_ve_vl_pvfmkslenan_MvMl">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v256f64>, LLVMType<v512i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsumwsx_vvl : ClangBuiltin<"__builtin_ve_vl_vsumwsx_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsumwsx_vvml : ClangBuiltin<"__builtin_ve_vl_vsumwsx_vvml">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsumwzx_vvl : ClangBuiltin<"__builtin_ve_vl_vsumwzx_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsumwzx_vvml : ClangBuiltin<"__builtin_ve_vl_vsumwzx_vvml">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsuml_vvl : ClangBuiltin<"__builtin_ve_vl_vsuml_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsuml_vvml : ClangBuiltin<"__builtin_ve_vl_vsuml_vvml">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfsumd_vvl : ClangBuiltin<"__builtin_ve_vl_vfsumd_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfsumd_vvml : ClangBuiltin<"__builtin_ve_vl_vfsumd_vvml">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfsums_vvl : ClangBuiltin<"__builtin_ve_vl_vfsums_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfsums_vvml : ClangBuiltin<"__builtin_ve_vl_vfsums_vvml">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vrmaxswfstsx_vvl : ClangBuiltin<"__builtin_ve_vl_vrmaxswfstsx_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vrmaxswfstsx_vvvl : ClangBuiltin<"__builtin_ve_vl_vrmaxswfstsx_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vrmaxswlstsx_vvl : ClangBuiltin<"__builtin_ve_vl_vrmaxswlstsx_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vrmaxswlstsx_vvvl : ClangBuiltin<"__builtin_ve_vl_vrmaxswlstsx_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vrmaxswfstzx_vvl : ClangBuiltin<"__builtin_ve_vl_vrmaxswfstzx_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vrmaxswfstzx_vvvl : ClangBuiltin<"__builtin_ve_vl_vrmaxswfstzx_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vrmaxswlstzx_vvl : ClangBuiltin<"__builtin_ve_vl_vrmaxswlstzx_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vrmaxswlstzx_vvvl : ClangBuiltin<"__builtin_ve_vl_vrmaxswlstzx_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vrminswfstsx_vvl : ClangBuiltin<"__builtin_ve_vl_vrminswfstsx_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vrminswfstsx_vvvl : ClangBuiltin<"__builtin_ve_vl_vrminswfstsx_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vrminswlstsx_vvl : ClangBuiltin<"__builtin_ve_vl_vrminswlstsx_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vrminswlstsx_vvvl : ClangBuiltin<"__builtin_ve_vl_vrminswlstsx_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vrminswfstzx_vvl : ClangBuiltin<"__builtin_ve_vl_vrminswfstzx_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vrminswfstzx_vvvl : ClangBuiltin<"__builtin_ve_vl_vrminswfstzx_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vrminswlstzx_vvl : ClangBuiltin<"__builtin_ve_vl_vrminswlstzx_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vrminswlstzx_vvvl : ClangBuiltin<"__builtin_ve_vl_vrminswlstzx_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vrmaxslfst_vvl : ClangBuiltin<"__builtin_ve_vl_vrmaxslfst_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vrmaxslfst_vvvl : ClangBuiltin<"__builtin_ve_vl_vrmaxslfst_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vrmaxsllst_vvl : ClangBuiltin<"__builtin_ve_vl_vrmaxsllst_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vrmaxsllst_vvvl : ClangBuiltin<"__builtin_ve_vl_vrmaxsllst_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vrminslfst_vvl : ClangBuiltin<"__builtin_ve_vl_vrminslfst_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vrminslfst_vvvl : ClangBuiltin<"__builtin_ve_vl_vrminslfst_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vrminsllst_vvl : ClangBuiltin<"__builtin_ve_vl_vrminsllst_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vrminsllst_vvvl : ClangBuiltin<"__builtin_ve_vl_vrminsllst_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfrmaxdfst_vvl : ClangBuiltin<"__builtin_ve_vl_vfrmaxdfst_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfrmaxdfst_vvvl : ClangBuiltin<"__builtin_ve_vl_vfrmaxdfst_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfrmaxdlst_vvl : ClangBuiltin<"__builtin_ve_vl_vfrmaxdlst_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfrmaxdlst_vvvl : ClangBuiltin<"__builtin_ve_vl_vfrmaxdlst_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfrmaxsfst_vvl : ClangBuiltin<"__builtin_ve_vl_vfrmaxsfst_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfrmaxsfst_vvvl : ClangBuiltin<"__builtin_ve_vl_vfrmaxsfst_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfrmaxslst_vvl : ClangBuiltin<"__builtin_ve_vl_vfrmaxslst_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfrmaxslst_vvvl : ClangBuiltin<"__builtin_ve_vl_vfrmaxslst_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfrmindfst_vvl : ClangBuiltin<"__builtin_ve_vl_vfrmindfst_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfrmindfst_vvvl : ClangBuiltin<"__builtin_ve_vl_vfrmindfst_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfrmindlst_vvl : ClangBuiltin<"__builtin_ve_vl_vfrmindlst_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfrmindlst_vvvl : ClangBuiltin<"__builtin_ve_vl_vfrmindlst_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfrminsfst_vvl : ClangBuiltin<"__builtin_ve_vl_vfrminsfst_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfrminsfst_vvvl : ClangBuiltin<"__builtin_ve_vl_vfrminsfst_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfrminslst_vvl : ClangBuiltin<"__builtin_ve_vl_vfrminslst_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vfrminslst_vvvl : ClangBuiltin<"__builtin_ve_vl_vfrminslst_vvvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vrand_vvl : ClangBuiltin<"__builtin_ve_vl_vrand_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vrand_vvml : ClangBuiltin<"__builtin_ve_vl_vrand_vvml">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vror_vvl : ClangBuiltin<"__builtin_ve_vl_vror_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vror_vvml : ClangBuiltin<"__builtin_ve_vl_vror_vvml">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vrxor_vvl : ClangBuiltin<"__builtin_ve_vl_vrxor_vvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vrxor_vvml : ClangBuiltin<"__builtin_ve_vl_vrxor_vvml">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vgt_vvssl : ClangBuiltin<"__builtin_ve_vl_vgt_vvssl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vgt_vvssvl : ClangBuiltin<"__builtin_ve_vl_vgt_vvssvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vgt_vvssml : ClangBuiltin<"__builtin_ve_vl_vgt_vvssml">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vgt_vvssmvl : ClangBuiltin<"__builtin_ve_vl_vgt_vvssmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vgtnc_vvssl : ClangBuiltin<"__builtin_ve_vl_vgtnc_vvssl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vgtnc_vvssvl : ClangBuiltin<"__builtin_ve_vl_vgtnc_vvssvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vgtnc_vvssml : ClangBuiltin<"__builtin_ve_vl_vgtnc_vvssml">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vgtnc_vvssmvl : ClangBuiltin<"__builtin_ve_vl_vgtnc_vvssmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vgtu_vvssl : ClangBuiltin<"__builtin_ve_vl_vgtu_vvssl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vgtu_vvssvl : ClangBuiltin<"__builtin_ve_vl_vgtu_vvssvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vgtu_vvssml : ClangBuiltin<"__builtin_ve_vl_vgtu_vvssml">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vgtu_vvssmvl : ClangBuiltin<"__builtin_ve_vl_vgtu_vvssmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vgtunc_vvssl : ClangBuiltin<"__builtin_ve_vl_vgtunc_vvssl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vgtunc_vvssvl : ClangBuiltin<"__builtin_ve_vl_vgtunc_vvssvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vgtunc_vvssml : ClangBuiltin<"__builtin_ve_vl_vgtunc_vvssml">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vgtunc_vvssmvl : ClangBuiltin<"__builtin_ve_vl_vgtunc_vvssmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vgtlsx_vvssl : ClangBuiltin<"__builtin_ve_vl_vgtlsx_vvssl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vgtlsx_vvssvl : ClangBuiltin<"__builtin_ve_vl_vgtlsx_vvssvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vgtlsx_vvssml : ClangBuiltin<"__builtin_ve_vl_vgtlsx_vvssml">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vgtlsx_vvssmvl : ClangBuiltin<"__builtin_ve_vl_vgtlsx_vvssmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vgtlsxnc_vvssl : ClangBuiltin<"__builtin_ve_vl_vgtlsxnc_vvssl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vgtlsxnc_vvssvl : ClangBuiltin<"__builtin_ve_vl_vgtlsxnc_vvssvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vgtlsxnc_vvssml : ClangBuiltin<"__builtin_ve_vl_vgtlsxnc_vvssml">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vgtlsxnc_vvssmvl : ClangBuiltin<"__builtin_ve_vl_vgtlsxnc_vvssmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vgtlzx_vvssl : ClangBuiltin<"__builtin_ve_vl_vgtlzx_vvssl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vgtlzx_vvssvl : ClangBuiltin<"__builtin_ve_vl_vgtlzx_vvssvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vgtlzx_vvssml : ClangBuiltin<"__builtin_ve_vl_vgtlzx_vvssml">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vgtlzx_vvssmvl : ClangBuiltin<"__builtin_ve_vl_vgtlzx_vvssmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vgtlzxnc_vvssl : ClangBuiltin<"__builtin_ve_vl_vgtlzxnc_vvssl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vgtlzxnc_vvssvl : ClangBuiltin<"__builtin_ve_vl_vgtlzxnc_vvssvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256f64>, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vgtlzxnc_vvssml : ClangBuiltin<"__builtin_ve_vl_vgtlzxnc_vvssml">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vgtlzxnc_vvssmvl : ClangBuiltin<"__builtin_ve_vl_vgtlzxnc_vvssmvl">, Intrinsic<[LLVMType<v256f64>], [LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<v256f64>, LLVMType<i32>], [IntrReadMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsc_vvssl : ClangBuiltin<"__builtin_ve_vl_vsc_vvssl">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsc_vvssml : ClangBuiltin<"__builtin_ve_vl_vsc_vvssml">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vscnc_vvssl : ClangBuiltin<"__builtin_ve_vl_vscnc_vvssl">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vscnc_vvssml : ClangBuiltin<"__builtin_ve_vl_vscnc_vvssml">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vscot_vvssl : ClangBuiltin<"__builtin_ve_vl_vscot_vvssl">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vscot_vvssml : ClangBuiltin<"__builtin_ve_vl_vscot_vvssml">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vscncot_vvssl : ClangBuiltin<"__builtin_ve_vl_vscncot_vvssl">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vscncot_vvssml : ClangBuiltin<"__builtin_ve_vl_vscncot_vvssml">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vscu_vvssl : ClangBuiltin<"__builtin_ve_vl_vscu_vvssl">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vscu_vvssml : ClangBuiltin<"__builtin_ve_vl_vscu_vvssml">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vscunc_vvssl : ClangBuiltin<"__builtin_ve_vl_vscunc_vvssl">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vscunc_vvssml : ClangBuiltin<"__builtin_ve_vl_vscunc_vvssml">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vscuot_vvssl : ClangBuiltin<"__builtin_ve_vl_vscuot_vvssl">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vscuot_vvssml : ClangBuiltin<"__builtin_ve_vl_vscuot_vvssml">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vscuncot_vvssl : ClangBuiltin<"__builtin_ve_vl_vscuncot_vvssl">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vscuncot_vvssml : ClangBuiltin<"__builtin_ve_vl_vscuncot_vvssml">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vscl_vvssl : ClangBuiltin<"__builtin_ve_vl_vscl_vvssl">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vscl_vvssml : ClangBuiltin<"__builtin_ve_vl_vscl_vvssml">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsclnc_vvssl : ClangBuiltin<"__builtin_ve_vl_vsclnc_vvssl">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsclnc_vvssml : ClangBuiltin<"__builtin_ve_vl_vsclnc_vvssml">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsclot_vvssl : ClangBuiltin<"__builtin_ve_vl_vsclot_vvssl">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsclot_vvssml : ClangBuiltin<"__builtin_ve_vl_vsclot_vvssml">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsclncot_vvssl : ClangBuiltin<"__builtin_ve_vl_vsclncot_vvssl">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_vsclncot_vvssml : ClangBuiltin<"__builtin_ve_vl_vsclncot_vvssml">, Intrinsic<[], [LLVMType<v256f64>, LLVMType<v256f64>, LLVMType<i64>, LLVMType<i64>, LLVMType<v256i1>, LLVMType<i32>], [IntrWriteMem]>;
let TargetPrefix = "ve" in def int_ve_vl_andm_mmm : ClangBuiltin<"__builtin_ve_vl_andm_mmm">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256i1>, LLVMType<v256i1>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_andm_MMM : ClangBuiltin<"__builtin_ve_vl_andm_MMM">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v512i1>, LLVMType<v512i1>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_orm_mmm : ClangBuiltin<"__builtin_ve_vl_orm_mmm">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256i1>, LLVMType<v256i1>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_orm_MMM : ClangBuiltin<"__builtin_ve_vl_orm_MMM">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v512i1>, LLVMType<v512i1>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_xorm_mmm : ClangBuiltin<"__builtin_ve_vl_xorm_mmm">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256i1>, LLVMType<v256i1>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_xorm_MMM : ClangBuiltin<"__builtin_ve_vl_xorm_MMM">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v512i1>, LLVMType<v512i1>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_eqvm_mmm : ClangBuiltin<"__builtin_ve_vl_eqvm_mmm">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256i1>, LLVMType<v256i1>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_eqvm_MMM : ClangBuiltin<"__builtin_ve_vl_eqvm_MMM">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v512i1>, LLVMType<v512i1>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_nndm_mmm : ClangBuiltin<"__builtin_ve_vl_nndm_mmm">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256i1>, LLVMType<v256i1>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_nndm_MMM : ClangBuiltin<"__builtin_ve_vl_nndm_MMM">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v512i1>, LLVMType<v512i1>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_negm_mm : ClangBuiltin<"__builtin_ve_vl_negm_mm">, Intrinsic<[LLVMType<v256i1>], [LLVMType<v256i1>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_negm_MM : ClangBuiltin<"__builtin_ve_vl_negm_MM">, Intrinsic<[LLVMType<v512i1>], [LLVMType<v512i1>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_pcvm_sml : ClangBuiltin<"__builtin_ve_vl_pcvm_sml">, Intrinsic<[LLVMType<i64>], [LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_lzvm_sml : ClangBuiltin<"__builtin_ve_vl_lzvm_sml">, Intrinsic<[LLVMType<i64>], [LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_tovm_sml : ClangBuiltin<"__builtin_ve_vl_tovm_sml">, Intrinsic<[LLVMType<i64>], [LLVMType<v256i1>, LLVMType<i32>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_lcr_sss : ClangBuiltin<"__builtin_ve_vl_lcr_sss">, Intrinsic<[LLVMType<i64>], [LLVMType<i64>, LLVMType<i64>], [IntrNoMem]>;
let TargetPrefix = "ve" in def int_ve_vl_scr_sss : ClangBuiltin<"__builtin_ve_vl_scr_sss">, Intrinsic<[], [LLVMType<i64>, LLVMType<i64>, LLVMType<i64>], [IntrNoMem, IntrHasSideEffects]>;
let TargetPrefix = "ve" in def int_ve_vl_tscr_ssss : ClangBuiltin<"__builtin_ve_vl_tscr_ssss">, Intrinsic<[LLVMType<i64>], [LLVMType<i64>, LLVMType<i64>, LLVMType<i64>], [IntrNoMem, IntrHasSideEffects]>;
let TargetPrefix = "ve" in def int_ve_vl_fidcr_sss : ClangBuiltin<"__builtin_ve_vl_fidcr_sss">, Intrinsic<[LLVMType<i64>], [LLVMType<i64>, LLVMType<i32>], [IntrNoMem, IntrHasSideEffects]>;
let TargetPrefix = "ve" in def int_ve_vl_fencei : ClangBuiltin<"__builtin_ve_vl_fencei">, Intrinsic<[], [], [IntrNoMem, IntrHasSideEffects]>;
let TargetPrefix = "ve" in def int_ve_vl_fencem_s : ClangBuiltin<"__builtin_ve_vl_fencem_s">, Intrinsic<[], [LLVMType<i32>], [IntrNoMem, IntrHasSideEffects]>;
let TargetPrefix = "ve" in def int_ve_vl_fencec_s : ClangBuiltin<"__builtin_ve_vl_fencec_s">, Intrinsic<[], [LLVMType<i32>], [IntrNoMem, IntrHasSideEffects]>;
let TargetPrefix = "ve" in def int_ve_vl_svob : ClangBuiltin<"__builtin_ve_vl_svob">, Intrinsic<[], [], [IntrNoMem, IntrHasSideEffects]>;
PKjwFZ$.�q�q�IR/PassManager.hnu�[���//===- PassManager.h - Pass management infrastructure -----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This header defines various interfaces for pass management in LLVM. There
/// is no "pass" interface in LLVM per se. Instead, an instance of any class
/// which supports a method to 'run' it over a unit of IR can be used as
/// a pass. A pass manager is generally a tool to collect a sequence of passes
/// which run over a particular IR construct, and run each of them in sequence
/// over each such construct in the containing IR construct. As there is no
/// containing IR construct for a Module, a manager for passes over modules
/// forms the base case which runs its managed passes in sequence over the
/// single module provided.
///
/// The core IR library provides managers for running passes over
/// modules and functions.
///
/// * FunctionPassManager can run over a Module, runs each pass over
///   a Function.
/// * ModulePassManager must be directly run, runs each pass over the Module.
///
/// Note that the implementations of the pass managers use concept-based
/// polymorphism as outlined in the "Value Semantics and Concept-based
/// Polymorphism" talk (or its abbreviated sibling "Inheritance Is The Base
/// Class of Evil") by Sean Parent:
/// * http://github.com/sean-parent/sean-parent.github.com/wiki/Papers-and-Presentations
/// * http://www.youtube.com/watch?v=_BpMYeUFXv8
/// * http://channel9.msdn.com/Events/GoingNative/2013/Inheritance-Is-The-Base-Class-of-Evil
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_PASSMANAGER_H
#define LLVM_IR_PASSMANAGER_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/PassInstrumentation.h"
#include "llvm/IR/PassManagerInternal.h"
#include "llvm/Support/TimeProfiler.h"
#include "llvm/Support/TypeName.h"
#include <cassert>
#include <cstring>
#include <iterator>
#include <list>
#include <memory>
#include <tuple>
#include <type_traits>
#include <utility>
#include <vector>

namespace llvm {

/// A special type used by analysis passes to provide an address that
/// identifies that particular analysis pass type.
///
/// Analysis passes should have a static data member of this type and derive
/// from the \c AnalysisInfoMixin to get a static ID method used to identify
/// the analysis in the pass management infrastructure.
struct alignas(8) AnalysisKey {};

/// A special type used to provide an address that identifies a set of related
/// analyses.  These sets are primarily used below to mark sets of analyses as
/// preserved.
///
/// For example, a transformation can indicate that it preserves the CFG of a
/// function by preserving the appropriate AnalysisSetKey.  An analysis that
/// depends only on the CFG can then check if that AnalysisSetKey is preserved;
/// if it is, the analysis knows that it itself is preserved.
struct alignas(8) AnalysisSetKey {};

/// This templated class represents "all analyses that operate over \<a
/// particular IR unit\>" (e.g. a Function or a Module) in instances of
/// PreservedAnalysis.
///
/// This lets a transformation say e.g. "I preserved all function analyses".
///
/// Note that you must provide an explicit instantiation declaration and
/// definition for this template in order to get the correct behavior on
/// Windows. Otherwise, the address of SetKey will not be stable.
template <typename IRUnitT> class AllAnalysesOn {
public:
  static AnalysisSetKey *ID() { return &SetKey; }

private:
  static AnalysisSetKey SetKey;
};

template <typename IRUnitT> AnalysisSetKey AllAnalysesOn<IRUnitT>::SetKey;

extern template class AllAnalysesOn<Module>;
extern template class AllAnalysesOn<Function>;

/// Represents analyses that only rely on functions' control flow.
///
/// This can be used with \c PreservedAnalyses to mark the CFG as preserved and
/// to query whether it has been preserved.
///
/// The CFG of a function is defined as the set of basic blocks and the edges
/// between them. Changing the set of basic blocks in a function is enough to
/// mutate the CFG. Mutating the condition of a branch or argument of an
/// invoked function does not mutate the CFG, but changing the successor labels
/// of those instructions does.
class CFGAnalyses {
public:
  static AnalysisSetKey *ID() { return &SetKey; }

private:
  static AnalysisSetKey SetKey;
};

/// A set of analyses that are preserved following a run of a transformation
/// pass.
///
/// Transformation passes build and return these objects to communicate which
/// analyses are still valid after the transformation. For most passes this is
/// fairly simple: if they don't change anything all analyses are preserved,
/// otherwise only a short list of analyses that have been explicitly updated
/// are preserved.
///
/// This class also lets transformation passes mark abstract *sets* of analyses
/// as preserved. A transformation that (say) does not alter the CFG can
/// indicate such by marking a particular AnalysisSetKey as preserved, and
/// then analyses can query whether that AnalysisSetKey is preserved.
///
/// Finally, this class can represent an "abandoned" analysis, which is
/// not preserved even if it would be covered by some abstract set of analyses.
///
/// Given a `PreservedAnalyses` object, an analysis will typically want to
/// figure out whether it is preserved. In the example below, MyAnalysisType is
/// preserved if it's not abandoned, and (a) it's explicitly marked as
/// preserved, (b), the set AllAnalysesOn<MyIRUnit> is preserved, or (c) both
/// AnalysisSetA and AnalysisSetB are preserved.
///
/// ```
///   auto PAC = PA.getChecker<MyAnalysisType>();
///   if (PAC.preserved() || PAC.preservedSet<AllAnalysesOn<MyIRUnit>>() ||
///       (PAC.preservedSet<AnalysisSetA>() &&
///        PAC.preservedSet<AnalysisSetB>())) {
///     // The analysis has been successfully preserved ...
///   }
/// ```
class PreservedAnalyses {
public:
  /// Convenience factory function for the empty preserved set.
  static PreservedAnalyses none() { return PreservedAnalyses(); }

  /// Construct a special preserved set that preserves all passes.
  static PreservedAnalyses all() {
    PreservedAnalyses PA;
    PA.PreservedIDs.insert(&AllAnalysesKey);
    return PA;
  }

  /// Construct a preserved analyses object with a single preserved set.
  template <typename AnalysisSetT>
  static PreservedAnalyses allInSet() {
    PreservedAnalyses PA;
    PA.preserveSet<AnalysisSetT>();
    return PA;
  }

  /// Mark an analysis as preserved.
  template <typename AnalysisT> void preserve() { preserve(AnalysisT::ID()); }

  /// Given an analysis's ID, mark the analysis as preserved, adding it
  /// to the set.
  void preserve(AnalysisKey *ID) {
    // Clear this ID from the explicit not-preserved set if present.
    NotPreservedAnalysisIDs.erase(ID);

    // If we're not already preserving all analyses (other than those in
    // NotPreservedAnalysisIDs).
    if (!areAllPreserved())
      PreservedIDs.insert(ID);
  }

  /// Mark an analysis set as preserved.
  template <typename AnalysisSetT> void preserveSet() {
    preserveSet(AnalysisSetT::ID());
  }

  /// Mark an analysis set as preserved using its ID.
  void preserveSet(AnalysisSetKey *ID) {
    // If we're not already in the saturated 'all' state, add this set.
    if (!areAllPreserved())
      PreservedIDs.insert(ID);
  }

  /// Mark an analysis as abandoned.
  ///
  /// An abandoned analysis is not preserved, even if it is nominally covered
  /// by some other set or was previously explicitly marked as preserved.
  ///
  /// Note that you can only abandon a specific analysis, not a *set* of
  /// analyses.
  template <typename AnalysisT> void abandon() { abandon(AnalysisT::ID()); }

  /// Mark an analysis as abandoned using its ID.
  ///
  /// An abandoned analysis is not preserved, even if it is nominally covered
  /// by some other set or was previously explicitly marked as preserved.
  ///
  /// Note that you can only abandon a specific analysis, not a *set* of
  /// analyses.
  void abandon(AnalysisKey *ID) {
    PreservedIDs.erase(ID);
    NotPreservedAnalysisIDs.insert(ID);
  }

  /// Intersect this set with another in place.
  ///
  /// This is a mutating operation on this preserved set, removing all
  /// preserved passes which are not also preserved in the argument.
  void intersect(const PreservedAnalyses &Arg) {
    if (Arg.areAllPreserved())
      return;
    if (areAllPreserved()) {
      *this = Arg;
      return;
    }
    // The intersection requires the *union* of the explicitly not-preserved
    // IDs and the *intersection* of the preserved IDs.
    for (auto *ID : Arg.NotPreservedAnalysisIDs) {
      PreservedIDs.erase(ID);
      NotPreservedAnalysisIDs.insert(ID);
    }
    for (auto *ID : PreservedIDs)
      if (!Arg.PreservedIDs.count(ID))
        PreservedIDs.erase(ID);
  }

  /// Intersect this set with a temporary other set in place.
  ///
  /// This is a mutating operation on this preserved set, removing all
  /// preserved passes which are not also preserved in the argument.
  void intersect(PreservedAnalyses &&Arg) {
    if (Arg.areAllPreserved())
      return;
    if (areAllPreserved()) {
      *this = std::move(Arg);
      return;
    }
    // The intersection requires the *union* of the explicitly not-preserved
    // IDs and the *intersection* of the preserved IDs.
    for (auto *ID : Arg.NotPreservedAnalysisIDs) {
      PreservedIDs.erase(ID);
      NotPreservedAnalysisIDs.insert(ID);
    }
    for (auto *ID : PreservedIDs)
      if (!Arg.PreservedIDs.count(ID))
        PreservedIDs.erase(ID);
  }

  /// A checker object that makes it easy to query for whether an analysis or
  /// some set covering it is preserved.
  class PreservedAnalysisChecker {
    friend class PreservedAnalyses;

    const PreservedAnalyses &PA;
    AnalysisKey *const ID;
    const bool IsAbandoned;

    /// A PreservedAnalysisChecker is tied to a particular Analysis because
    /// `preserved()` and `preservedSet()` both return false if the Analysis
    /// was abandoned.
    PreservedAnalysisChecker(const PreservedAnalyses &PA, AnalysisKey *ID)
        : PA(PA), ID(ID), IsAbandoned(PA.NotPreservedAnalysisIDs.count(ID)) {}

  public:
    /// Returns true if the checker's analysis was not abandoned and either
    ///  - the analysis is explicitly preserved or
    ///  - all analyses are preserved.
    bool preserved() {
      return !IsAbandoned && (PA.PreservedIDs.count(&AllAnalysesKey) ||
                              PA.PreservedIDs.count(ID));
    }

    /// Return true if the checker's analysis was not abandoned, i.e. it was not
    /// explicitly invalidated. Even if the analysis is not explicitly
    /// preserved, if the analysis is known stateless, then it is preserved.
    bool preservedWhenStateless() {
      return !IsAbandoned;
    }

    /// Returns true if the checker's analysis was not abandoned and either
    ///  - \p AnalysisSetT is explicitly preserved or
    ///  - all analyses are preserved.
    template <typename AnalysisSetT> bool preservedSet() {
      AnalysisSetKey *SetID = AnalysisSetT::ID();
      return !IsAbandoned && (PA.PreservedIDs.count(&AllAnalysesKey) ||
                              PA.PreservedIDs.count(SetID));
    }
  };

  /// Build a checker for this `PreservedAnalyses` and the specified analysis
  /// type.
  ///
  /// You can use the returned object to query whether an analysis was
  /// preserved. See the example in the comment on `PreservedAnalysis`.
  template <typename AnalysisT> PreservedAnalysisChecker getChecker() const {
    return PreservedAnalysisChecker(*this, AnalysisT::ID());
  }

  /// Build a checker for this `PreservedAnalyses` and the specified analysis
  /// ID.
  ///
  /// You can use the returned object to query whether an analysis was
  /// preserved. See the example in the comment on `PreservedAnalysis`.
  PreservedAnalysisChecker getChecker(AnalysisKey *ID) const {
    return PreservedAnalysisChecker(*this, ID);
  }

  /// Test whether all analyses are preserved (and none are abandoned).
  ///
  /// This is used primarily to optimize for the common case of a transformation
  /// which makes no changes to the IR.
  bool areAllPreserved() const {
    return NotPreservedAnalysisIDs.empty() &&
           PreservedIDs.count(&AllAnalysesKey);
  }

  /// Directly test whether a set of analyses is preserved.
  ///
  /// This is only true when no analyses have been explicitly abandoned.
  template <typename AnalysisSetT> bool allAnalysesInSetPreserved() const {
    return allAnalysesInSetPreserved(AnalysisSetT::ID());
  }

  /// Directly test whether a set of analyses is preserved.
  ///
  /// This is only true when no analyses have been explicitly abandoned.
  bool allAnalysesInSetPreserved(AnalysisSetKey *SetID) const {
    return NotPreservedAnalysisIDs.empty() &&
           (PreservedIDs.count(&AllAnalysesKey) || PreservedIDs.count(SetID));
  }

private:
  /// A special key used to indicate all analyses.
  static AnalysisSetKey AllAnalysesKey;

  /// The IDs of analyses and analysis sets that are preserved.
  SmallPtrSet<void *, 2> PreservedIDs;

  /// The IDs of explicitly not-preserved analyses.
  ///
  /// If an analysis in this set is covered by a set in `PreservedIDs`, we
  /// consider it not-preserved. That is, `NotPreservedAnalysisIDs` always
  /// "wins" over analysis sets in `PreservedIDs`.
  ///
  /// Also, a given ID should never occur both here and in `PreservedIDs`.
  SmallPtrSet<AnalysisKey *, 2> NotPreservedAnalysisIDs;
};

// Forward declare the analysis manager template.
template <typename IRUnitT, typename... ExtraArgTs> class AnalysisManager;

/// A CRTP mix-in to automatically provide informational APIs needed for
/// passes.
///
/// This provides some boilerplate for types that are passes.
template <typename DerivedT> struct PassInfoMixin {
  /// Gets the name of the pass we are mixed into.
  static StringRef name() {
    static_assert(std::is_base_of<PassInfoMixin, DerivedT>::value,
                  "Must pass the derived type as the template argument!");
    StringRef Name = getTypeName<DerivedT>();
    Name.consume_front("llvm::");
    return Name;
  }

  void printPipeline(raw_ostream &OS,
                     function_ref<StringRef(StringRef)> MapClassName2PassName) {
    StringRef ClassName = DerivedT::name();
    auto PassName = MapClassName2PassName(ClassName);
    OS << PassName;
  }
};

/// A CRTP mix-in that provides informational APIs needed for analysis passes.
///
/// This provides some boilerplate for types that are analysis passes. It
/// automatically mixes in \c PassInfoMixin.
template <typename DerivedT>
struct AnalysisInfoMixin : PassInfoMixin<DerivedT> {
  /// Returns an opaque, unique ID for this analysis type.
  ///
  /// This ID is a pointer type that is guaranteed to be 8-byte aligned and thus
  /// suitable for use in sets, maps, and other data structures that use the low
  /// bits of pointers.
  ///
  /// Note that this requires the derived type provide a static \c AnalysisKey
  /// member called \c Key.
  ///
  /// FIXME: The only reason the mixin type itself can't declare the Key value
  /// is that some compilers cannot correctly unique a templated static variable
  /// so it has the same addresses in each instantiation. The only currently
  /// known platform with this limitation is Windows DLL builds, specifically
  /// building each part of LLVM as a DLL. If we ever remove that build
  /// configuration, this mixin can provide the static key as well.
  static AnalysisKey *ID() {
    static_assert(std::is_base_of<AnalysisInfoMixin, DerivedT>::value,
                  "Must pass the derived type as the template argument!");
    return &DerivedT::Key;
  }
};

namespace detail {

/// Actual unpacker of extra arguments in getAnalysisResult,
/// passes only those tuple arguments that are mentioned in index_sequence.
template <typename PassT, typename IRUnitT, typename AnalysisManagerT,
          typename... ArgTs, size_t... Ns>
typename PassT::Result
getAnalysisResultUnpackTuple(AnalysisManagerT &AM, IRUnitT &IR,
                             std::tuple<ArgTs...> Args,
                             std::index_sequence<Ns...>) {
  (void)Args;
  return AM.template getResult<PassT>(IR, std::get<Ns>(Args)...);
}

/// Helper for *partial* unpacking of extra arguments in getAnalysisResult.
///
/// Arguments passed in tuple come from PassManager, so they might have extra
/// arguments after those AnalysisManager's ExtraArgTs ones that we need to
/// pass to getResult.
template <typename PassT, typename IRUnitT, typename... AnalysisArgTs,
          typename... MainArgTs>
typename PassT::Result
getAnalysisResult(AnalysisManager<IRUnitT, AnalysisArgTs...> &AM, IRUnitT &IR,
                  std::tuple<MainArgTs...> Args) {
  return (getAnalysisResultUnpackTuple<
          PassT, IRUnitT>)(AM, IR, Args,
                           std::index_sequence_for<AnalysisArgTs...>{});
}

} // namespace detail

// Forward declare the pass instrumentation analysis explicitly queried in
// generic PassManager code.
// FIXME: figure out a way to move PassInstrumentationAnalysis into its own
// header.
class PassInstrumentationAnalysis;

/// Manages a sequence of passes over a particular unit of IR.
///
/// A pass manager contains a sequence of passes to run over a particular unit
/// of IR (e.g. Functions, Modules). It is itself a valid pass over that unit of
/// IR, and when run over some given IR will run each of its contained passes in
/// sequence. Pass managers are the primary and most basic building block of a
/// pass pipeline.
///
/// When you run a pass manager, you provide an \c AnalysisManager<IRUnitT>
/// argument. The pass manager will propagate that analysis manager to each
/// pass it runs, and will call the analysis manager's invalidation routine with
/// the PreservedAnalyses of each pass it runs.
template <typename IRUnitT,
          typename AnalysisManagerT = AnalysisManager<IRUnitT>,
          typename... ExtraArgTs>
class PassManager : public PassInfoMixin<
                        PassManager<IRUnitT, AnalysisManagerT, ExtraArgTs...>> {
public:
  /// Construct a pass manager.
  explicit PassManager() = default;

  // FIXME: These are equivalent to the default move constructor/move
  // assignment. However, using = default triggers linker errors due to the
  // explicit instantiations below. Find away to use the default and remove the
  // duplicated code here.
  PassManager(PassManager &&Arg) : Passes(std::move(Arg.Passes)) {}

  PassManager &operator=(PassManager &&RHS) {
    Passes = std::move(RHS.Passes);
    return *this;
  }

  void printPipeline(raw_ostream &OS,
                     function_ref<StringRef(StringRef)> MapClassName2PassName) {
    for (unsigned Idx = 0, Size = Passes.size(); Idx != Size; ++Idx) {
      auto *P = Passes[Idx].get();
      P->printPipeline(OS, MapClassName2PassName);
      if (Idx + 1 < Size)
        OS << ',';
    }
  }

  /// Run all of the passes in this manager over the given unit of IR.
  /// ExtraArgs are passed to each pass.
  PreservedAnalyses run(IRUnitT &IR, AnalysisManagerT &AM,
                        ExtraArgTs... ExtraArgs) {
    PreservedAnalyses PA = PreservedAnalyses::all();

    // Request PassInstrumentation from analysis manager, will use it to run
    // instrumenting callbacks for the passes later.
    // Here we use std::tuple wrapper over getResult which helps to extract
    // AnalysisManager's arguments out of the whole ExtraArgs set.
    PassInstrumentation PI =
        detail::getAnalysisResult<PassInstrumentationAnalysis>(
            AM, IR, std::tuple<ExtraArgTs...>(ExtraArgs...));

    for (auto &Pass : Passes) {
      // Check the PassInstrumentation's BeforePass callbacks before running the
      // pass, skip its execution completely if asked to (callback returns
      // false).
      if (!PI.runBeforePass<IRUnitT>(*Pass, IR))
        continue;

      PreservedAnalyses PassPA = Pass->run(IR, AM, ExtraArgs...);

      // Update the analysis manager as each pass runs and potentially
      // invalidates analyses.
      AM.invalidate(IR, PassPA);

      // Call onto PassInstrumentation's AfterPass callbacks immediately after
      // running the pass.
      PI.runAfterPass<IRUnitT>(*Pass, IR, PassPA);

      // Finally, intersect the preserved analyses to compute the aggregate
      // preserved set for this pass manager.
      PA.intersect(std::move(PassPA));
    }

    // Invalidation was handled after each pass in the above loop for the
    // current unit of IR. Therefore, the remaining analysis results in the
    // AnalysisManager are preserved. We mark this with a set so that we don't
    // need to inspect each one individually.
    PA.preserveSet<AllAnalysesOn<IRUnitT>>();

    return PA;
  }

  template <typename PassT>
  LLVM_ATTRIBUTE_MINSIZE
      std::enable_if_t<!std::is_same<PassT, PassManager>::value>
      addPass(PassT &&Pass) {
    using PassModelT =
        detail::PassModel<IRUnitT, PassT, PreservedAnalyses, AnalysisManagerT,
                          ExtraArgTs...>;
    // Do not use make_unique or emplace_back, they cause too many template
    // instantiations, causing terrible compile times.
    Passes.push_back(std::unique_ptr<PassConceptT>(
        new PassModelT(std::forward<PassT>(Pass))));
  }

  /// When adding a pass manager pass that has the same type as this pass
  /// manager, simply move the passes over. This is because we don't have use
  /// cases rely on executing nested pass managers. Doing this could reduce
  /// implementation complexity and avoid potential invalidation issues that may
  /// happen with nested pass managers of the same type.
  template <typename PassT>
  LLVM_ATTRIBUTE_MINSIZE
      std::enable_if_t<std::is_same<PassT, PassManager>::value>
      addPass(PassT &&Pass) {
    for (auto &P : Pass.Passes)
      Passes.push_back(std::move(P));
  }

  /// Returns if the pass manager contains any passes.
  bool isEmpty() const { return Passes.empty(); }

  static bool isRequired() { return true; }

protected:
  using PassConceptT =
      detail::PassConcept<IRUnitT, AnalysisManagerT, ExtraArgTs...>;

  std::vector<std::unique_ptr<PassConceptT>> Passes;
};

extern template class PassManager<Module>;

/// Convenience typedef for a pass manager over modules.
using ModulePassManager = PassManager<Module>;

extern template class PassManager<Function>;

/// Convenience typedef for a pass manager over functions.
using FunctionPassManager = PassManager<Function>;

/// Pseudo-analysis pass that exposes the \c PassInstrumentation to pass
/// managers. Goes before AnalysisManager definition to provide its
/// internals (e.g PassInstrumentationAnalysis::ID) for use there if needed.
/// FIXME: figure out a way to move PassInstrumentationAnalysis into its own
/// header.
class PassInstrumentationAnalysis
    : public AnalysisInfoMixin<PassInstrumentationAnalysis> {
  friend AnalysisInfoMixin<PassInstrumentationAnalysis>;
  static AnalysisKey Key;

  PassInstrumentationCallbacks *Callbacks;

public:
  /// PassInstrumentationCallbacks object is shared, owned by something else,
  /// not this analysis.
  PassInstrumentationAnalysis(PassInstrumentationCallbacks *Callbacks = nullptr)
      : Callbacks(Callbacks) {}

  using Result = PassInstrumentation;

  template <typename IRUnitT, typename AnalysisManagerT, typename... ExtraArgTs>
  Result run(IRUnitT &, AnalysisManagerT &, ExtraArgTs &&...) {
    return PassInstrumentation(Callbacks);
  }
};

/// A container for analyses that lazily runs them and caches their
/// results.
///
/// This class can manage analyses for any IR unit where the address of the IR
/// unit sufficies as its identity.
template <typename IRUnitT, typename... ExtraArgTs> class AnalysisManager {
public:
  class Invalidator;

private:
  // Now that we've defined our invalidator, we can define the concept types.
  using ResultConceptT =
      detail::AnalysisResultConcept<IRUnitT, PreservedAnalyses, Invalidator>;
  using PassConceptT =
      detail::AnalysisPassConcept<IRUnitT, PreservedAnalyses, Invalidator,
                                  ExtraArgTs...>;

  /// List of analysis pass IDs and associated concept pointers.
  ///
  /// Requires iterators to be valid across appending new entries and arbitrary
  /// erases. Provides the analysis ID to enable finding iterators to a given
  /// entry in maps below, and provides the storage for the actual result
  /// concept.
  using AnalysisResultListT =
      std::list<std::pair<AnalysisKey *, std::unique_ptr<ResultConceptT>>>;

  /// Map type from IRUnitT pointer to our custom list type.
  using AnalysisResultListMapT = DenseMap<IRUnitT *, AnalysisResultListT>;

  /// Map type from a pair of analysis ID and IRUnitT pointer to an
  /// iterator into a particular result list (which is where the actual analysis
  /// result is stored).
  using AnalysisResultMapT =
      DenseMap<std::pair<AnalysisKey *, IRUnitT *>,
               typename AnalysisResultListT::iterator>;

public:
  /// API to communicate dependencies between analyses during invalidation.
  ///
  /// When an analysis result embeds handles to other analysis results, it
  /// needs to be invalidated both when its own information isn't preserved and
  /// when any of its embedded analysis results end up invalidated. We pass an
  /// \c Invalidator object as an argument to \c invalidate() in order to let
  /// the analysis results themselves define the dependency graph on the fly.
  /// This lets us avoid building an explicit representation of the
  /// dependencies between analysis results.
  class Invalidator {
  public:
    /// Trigger the invalidation of some other analysis pass if not already
    /// handled and return whether it was in fact invalidated.
    ///
    /// This is expected to be called from within a given analysis result's \c
    /// invalidate method to trigger a depth-first walk of all inter-analysis
    /// dependencies. The same \p IR unit and \p PA passed to that result's \c
    /// invalidate method should in turn be provided to this routine.
    ///
    /// The first time this is called for a given analysis pass, it will call
    /// the corresponding result's \c invalidate method.  Subsequent calls will
    /// use a cache of the results of that initial call.  It is an error to form
    /// cyclic dependencies between analysis results.
    ///
    /// This returns true if the given analysis's result is invalid. Any
    /// dependecies on it will become invalid as a result.
    template <typename PassT>
    bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA) {
      using ResultModelT =
          detail::AnalysisResultModel<IRUnitT, PassT, typename PassT::Result,
                                      PreservedAnalyses, Invalidator>;

      return invalidateImpl<ResultModelT>(PassT::ID(), IR, PA);
    }

    /// A type-erased variant of the above invalidate method with the same core
    /// API other than passing an analysis ID rather than an analysis type
    /// parameter.
    ///
    /// This is sadly less efficient than the above routine, which leverages
    /// the type parameter to avoid the type erasure overhead.
    bool invalidate(AnalysisKey *ID, IRUnitT &IR, const PreservedAnalyses &PA) {
      return invalidateImpl<>(ID, IR, PA);
    }

  private:
    friend class AnalysisManager;

    template <typename ResultT = ResultConceptT>
    bool invalidateImpl(AnalysisKey *ID, IRUnitT &IR,
                        const PreservedAnalyses &PA) {
      // If we've already visited this pass, return true if it was invalidated
      // and false otherwise.
      auto IMapI = IsResultInvalidated.find(ID);
      if (IMapI != IsResultInvalidated.end())
        return IMapI->second;

      // Otherwise look up the result object.
      auto RI = Results.find({ID, &IR});
      assert(RI != Results.end() &&
             "Trying to invalidate a dependent result that isn't in the "
             "manager's cache is always an error, likely due to a stale result "
             "handle!");

      auto &Result = static_cast<ResultT &>(*RI->second->second);

      // Insert into the map whether the result should be invalidated and return
      // that. Note that we cannot reuse IMapI and must do a fresh insert here,
      // as calling invalidate could (recursively) insert things into the map,
      // making any iterator or reference invalid.
      bool Inserted;
      std::tie(IMapI, Inserted) =
          IsResultInvalidated.insert({ID, Result.invalidate(IR, PA, *this)});
      (void)Inserted;
      assert(Inserted && "Should not have already inserted this ID, likely "
                         "indicates a dependency cycle!");
      return IMapI->second;
    }

    Invalidator(SmallDenseMap<AnalysisKey *, bool, 8> &IsResultInvalidated,
                const AnalysisResultMapT &Results)
        : IsResultInvalidated(IsResultInvalidated), Results(Results) {}

    SmallDenseMap<AnalysisKey *, bool, 8> &IsResultInvalidated;
    const AnalysisResultMapT &Results;
  };

  /// Construct an empty analysis manager.
  AnalysisManager();
  AnalysisManager(AnalysisManager &&);
  AnalysisManager &operator=(AnalysisManager &&);

  /// Returns true if the analysis manager has an empty results cache.
  bool empty() const {
    assert(AnalysisResults.empty() == AnalysisResultLists.empty() &&
           "The storage and index of analysis results disagree on how many "
           "there are!");
    return AnalysisResults.empty();
  }

  /// Clear any cached analysis results for a single unit of IR.
  ///
  /// This doesn't invalidate, but instead simply deletes, the relevant results.
  /// It is useful when the IR is being removed and we want to clear out all the
  /// memory pinned for it.
  void clear(IRUnitT &IR, llvm::StringRef Name);

  /// Clear all analysis results cached by this AnalysisManager.
  ///
  /// Like \c clear(IRUnitT&), this doesn't invalidate the results; it simply
  /// deletes them.  This lets you clean up the AnalysisManager when the set of
  /// IR units itself has potentially changed, and thus we can't even look up a
  /// a result and invalidate/clear it directly.
  void clear() {
    AnalysisResults.clear();
    AnalysisResultLists.clear();
  }

  /// Get the result of an analysis pass for a given IR unit.
  ///
  /// Runs the analysis if a cached result is not available.
  template <typename PassT>
  typename PassT::Result &getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs) {
    assert(AnalysisPasses.count(PassT::ID()) &&
           "This analysis pass was not registered prior to being queried");
    ResultConceptT &ResultConcept =
        getResultImpl(PassT::ID(), IR, ExtraArgs...);

    using ResultModelT =
        detail::AnalysisResultModel<IRUnitT, PassT, typename PassT::Result,
                                    PreservedAnalyses, Invalidator>;

    return static_cast<ResultModelT &>(ResultConcept).Result;
  }

  /// Get the cached result of an analysis pass for a given IR unit.
  ///
  /// This method never runs the analysis.
  ///
  /// \returns null if there is no cached result.
  template <typename PassT>
  typename PassT::Result *getCachedResult(IRUnitT &IR) const {
    assert(AnalysisPasses.count(PassT::ID()) &&
           "This analysis pass was not registered prior to being queried");

    ResultConceptT *ResultConcept = getCachedResultImpl(PassT::ID(), IR);
    if (!ResultConcept)
      return nullptr;

    using ResultModelT =
        detail::AnalysisResultModel<IRUnitT, PassT, typename PassT::Result,
                                    PreservedAnalyses, Invalidator>;

    return &static_cast<ResultModelT *>(ResultConcept)->Result;
  }

  /// Verify that the given Result cannot be invalidated, assert otherwise.
  template <typename PassT>
  void verifyNotInvalidated(IRUnitT &IR, typename PassT::Result *Result) const {
    PreservedAnalyses PA = PreservedAnalyses::none();
    SmallDenseMap<AnalysisKey *, bool, 8> IsResultInvalidated;
    Invalidator Inv(IsResultInvalidated, AnalysisResults);
    assert(!Result->invalidate(IR, PA, Inv) &&
           "Cached result cannot be invalidated");
  }

  /// Register an analysis pass with the manager.
  ///
  /// The parameter is a callable whose result is an analysis pass. This allows
  /// passing in a lambda to construct the analysis.
  ///
  /// The analysis type to register is the type returned by calling the \c
  /// PassBuilder argument. If that type has already been registered, then the
  /// argument will not be called and this function will return false.
  /// Otherwise, we register the analysis returned by calling \c PassBuilder(),
  /// and this function returns true.
  ///
  /// (Note: Although the return value of this function indicates whether or not
  /// an analysis was previously registered, there intentionally isn't a way to
  /// query this directly.  Instead, you should just register all the analyses
  /// you might want and let this class run them lazily.  This idiom lets us
  /// minimize the number of times we have to look up analyses in our
  /// hashtable.)
  template <typename PassBuilderT>
  bool registerPass(PassBuilderT &&PassBuilder) {
    using PassT = decltype(PassBuilder());
    using PassModelT =
        detail::AnalysisPassModel<IRUnitT, PassT, PreservedAnalyses,
                                  Invalidator, ExtraArgTs...>;

    auto &PassPtr = AnalysisPasses[PassT::ID()];
    if (PassPtr)
      // Already registered this pass type!
      return false;

    // Construct a new model around the instance returned by the builder.
    PassPtr.reset(new PassModelT(PassBuilder()));
    return true;
  }

  /// Invalidate cached analyses for an IR unit.
  ///
  /// Walk through all of the analyses pertaining to this unit of IR and
  /// invalidate them, unless they are preserved by the PreservedAnalyses set.
  void invalidate(IRUnitT &IR, const PreservedAnalyses &PA);

private:
  /// Look up a registered analysis pass.
  PassConceptT &lookUpPass(AnalysisKey *ID) {
    typename AnalysisPassMapT::iterator PI = AnalysisPasses.find(ID);
    assert(PI != AnalysisPasses.end() &&
           "Analysis passes must be registered prior to being queried!");
    return *PI->second;
  }

  /// Look up a registered analysis pass.
  const PassConceptT &lookUpPass(AnalysisKey *ID) const {
    typename AnalysisPassMapT::const_iterator PI = AnalysisPasses.find(ID);
    assert(PI != AnalysisPasses.end() &&
           "Analysis passes must be registered prior to being queried!");
    return *PI->second;
  }

  /// Get an analysis result, running the pass if necessary.
  ResultConceptT &getResultImpl(AnalysisKey *ID, IRUnitT &IR,
                                ExtraArgTs... ExtraArgs);

  /// Get a cached analysis result or return null.
  ResultConceptT *getCachedResultImpl(AnalysisKey *ID, IRUnitT &IR) const {
    typename AnalysisResultMapT::const_iterator RI =
        AnalysisResults.find({ID, &IR});
    return RI == AnalysisResults.end() ? nullptr : &*RI->second->second;
  }

  /// Map type from analysis pass ID to pass concept pointer.
  using AnalysisPassMapT =
      DenseMap<AnalysisKey *, std::unique_ptr<PassConceptT>>;

  /// Collection of analysis passes, indexed by ID.
  AnalysisPassMapT AnalysisPasses;

  /// Map from IR unit to a list of analysis results.
  ///
  /// Provides linear time removal of all analysis results for a IR unit and
  /// the ultimate storage for a particular cached analysis result.
  AnalysisResultListMapT AnalysisResultLists;

  /// Map from an analysis ID and IR unit to a particular cached
  /// analysis result.
  AnalysisResultMapT AnalysisResults;
};

extern template class AnalysisManager<Module>;

/// Convenience typedef for the Module analysis manager.
using ModuleAnalysisManager = AnalysisManager<Module>;

extern template class AnalysisManager<Function>;

/// Convenience typedef for the Function analysis manager.
using FunctionAnalysisManager = AnalysisManager<Function>;

/// An analysis over an "outer" IR unit that provides access to an
/// analysis manager over an "inner" IR unit.  The inner unit must be contained
/// in the outer unit.
///
/// For example, InnerAnalysisManagerProxy<FunctionAnalysisManager, Module> is
/// an analysis over Modules (the "outer" unit) that provides access to a
/// Function analysis manager.  The FunctionAnalysisManager is the "inner"
/// manager being proxied, and Functions are the "inner" unit.  The inner/outer
/// relationship is valid because each Function is contained in one Module.
///
/// If you're (transitively) within a pass manager for an IR unit U that
/// contains IR unit V, you should never use an analysis manager over V, except
/// via one of these proxies.
///
/// Note that the proxy's result is a move-only RAII object.  The validity of
/// the analyses in the inner analysis manager is tied to its lifetime.
template <typename AnalysisManagerT, typename IRUnitT, typename... ExtraArgTs>
class InnerAnalysisManagerProxy
    : public AnalysisInfoMixin<
          InnerAnalysisManagerProxy<AnalysisManagerT, IRUnitT>> {
public:
  class Result {
  public:
    explicit Result(AnalysisManagerT &InnerAM) : InnerAM(&InnerAM) {}

    Result(Result &&Arg) : InnerAM(std::move(Arg.InnerAM)) {
      // We have to null out the analysis manager in the moved-from state
      // because we are taking ownership of the responsibilty to clear the
      // analysis state.
      Arg.InnerAM = nullptr;
    }

    ~Result() {
      // InnerAM is cleared in a moved from state where there is nothing to do.
      if (!InnerAM)
        return;

      // Clear out the analysis manager if we're being destroyed -- it means we
      // didn't even see an invalidate call when we got invalidated.
      InnerAM->clear();
    }

    Result &operator=(Result &&RHS) {
      InnerAM = RHS.InnerAM;
      // We have to null out the analysis manager in the moved-from state
      // because we are taking ownership of the responsibilty to clear the
      // analysis state.
      RHS.InnerAM = nullptr;
      return *this;
    }

    /// Accessor for the analysis manager.
    AnalysisManagerT &getManager() { return *InnerAM; }

    /// Handler for invalidation of the outer IR unit, \c IRUnitT.
    ///
    /// If the proxy analysis itself is not preserved, we assume that the set of
    /// inner IR objects contained in IRUnit may have changed.  In this case,
    /// we have to call \c clear() on the inner analysis manager, as it may now
    /// have stale pointers to its inner IR objects.
    ///
    /// Regardless of whether the proxy analysis is marked as preserved, all of
    /// the analyses in the inner analysis manager are potentially invalidated
    /// based on the set of preserved analyses.
    bool invalidate(
        IRUnitT &IR, const PreservedAnalyses &PA,
        typename AnalysisManager<IRUnitT, ExtraArgTs...>::Invalidator &Inv);

  private:
    AnalysisManagerT *InnerAM;
  };

  explicit InnerAnalysisManagerProxy(AnalysisManagerT &InnerAM)
      : InnerAM(&InnerAM) {}

  /// Run the analysis pass and create our proxy result object.
  ///
  /// This doesn't do any interesting work; it is primarily used to insert our
  /// proxy result object into the outer analysis cache so that we can proxy
  /// invalidation to the inner analysis manager.
  Result run(IRUnitT &IR, AnalysisManager<IRUnitT, ExtraArgTs...> &AM,
             ExtraArgTs...) {
    return Result(*InnerAM);
  }

private:
  friend AnalysisInfoMixin<
      InnerAnalysisManagerProxy<AnalysisManagerT, IRUnitT>>;

  static AnalysisKey Key;

  AnalysisManagerT *InnerAM;
};

template <typename AnalysisManagerT, typename IRUnitT, typename... ExtraArgTs>
AnalysisKey
    InnerAnalysisManagerProxy<AnalysisManagerT, IRUnitT, ExtraArgTs...>::Key;

/// Provide the \c FunctionAnalysisManager to \c Module proxy.
using FunctionAnalysisManagerModuleProxy =
    InnerAnalysisManagerProxy<FunctionAnalysisManager, Module>;

/// Specialization of the invalidate method for the \c
/// FunctionAnalysisManagerModuleProxy's result.
template <>
bool FunctionAnalysisManagerModuleProxy::Result::invalidate(
    Module &M, const PreservedAnalyses &PA,
    ModuleAnalysisManager::Invalidator &Inv);

// Ensure the \c FunctionAnalysisManagerModuleProxy is provided as an extern
// template.
extern template class InnerAnalysisManagerProxy<FunctionAnalysisManager,
                                                Module>;

/// An analysis over an "inner" IR unit that provides access to an
/// analysis manager over a "outer" IR unit.  The inner unit must be contained
/// in the outer unit.
///
/// For example OuterAnalysisManagerProxy<ModuleAnalysisManager, Function> is an
/// analysis over Functions (the "inner" unit) which provides access to a Module
/// analysis manager.  The ModuleAnalysisManager is the "outer" manager being
/// proxied, and Modules are the "outer" IR unit.  The inner/outer relationship
/// is valid because each Function is contained in one Module.
///
/// This proxy only exposes the const interface of the outer analysis manager,
/// to indicate that you cannot cause an outer analysis to run from within an
/// inner pass.  Instead, you must rely on the \c getCachedResult API.  This is
/// due to keeping potential future concurrency in mind. To give an example,
/// running a module analysis before any function passes may give a different
/// result than running it in a function pass. Both may be valid, but it would
/// produce non-deterministic results. GlobalsAA is a good analysis example,
/// because the cached information has the mod/ref info for all memory for each
/// function at the time the analysis was computed. The information is still
/// valid after a function transformation, but it may be *different* if
/// recomputed after that transform. GlobalsAA is never invalidated.

///
/// This proxy doesn't manage invalidation in any way -- that is handled by the
/// recursive return path of each layer of the pass manager.  A consequence of
/// this is the outer analyses may be stale.  We invalidate the outer analyses
/// only when we're done running passes over the inner IR units.
template <typename AnalysisManagerT, typename IRUnitT, typename... ExtraArgTs>
class OuterAnalysisManagerProxy
    : public AnalysisInfoMixin<
          OuterAnalysisManagerProxy<AnalysisManagerT, IRUnitT, ExtraArgTs...>> {
public:
  /// Result proxy object for \c OuterAnalysisManagerProxy.
  class Result {
  public:
    explicit Result(const AnalysisManagerT &OuterAM) : OuterAM(&OuterAM) {}

    /// Get a cached analysis. If the analysis can be invalidated, this will
    /// assert.
    template <typename PassT, typename IRUnitTParam>
    typename PassT::Result *getCachedResult(IRUnitTParam &IR) const {
      typename PassT::Result *Res =
          OuterAM->template getCachedResult<PassT>(IR);
      if (Res)
        OuterAM->template verifyNotInvalidated<PassT>(IR, Res);
      return Res;
    }

    /// Method provided for unit testing, not intended for general use.
    template <typename PassT, typename IRUnitTParam>
    bool cachedResultExists(IRUnitTParam &IR) const {
      typename PassT::Result *Res =
          OuterAM->template getCachedResult<PassT>(IR);
      return Res != nullptr;
    }

    /// When invalidation occurs, remove any registered invalidation events.
    bool invalidate(
        IRUnitT &IRUnit, const PreservedAnalyses &PA,
        typename AnalysisManager<IRUnitT, ExtraArgTs...>::Invalidator &Inv) {
      // Loop over the set of registered outer invalidation mappings and if any
      // of them map to an analysis that is now invalid, clear it out.
      SmallVector<AnalysisKey *, 4> DeadKeys;
      for (auto &KeyValuePair : OuterAnalysisInvalidationMap) {
        AnalysisKey *OuterID = KeyValuePair.first;
        auto &InnerIDs = KeyValuePair.second;
        llvm::erase_if(InnerIDs, [&](AnalysisKey *InnerID) {
          return Inv.invalidate(InnerID, IRUnit, PA);
        });
        if (InnerIDs.empty())
          DeadKeys.push_back(OuterID);
      }

      for (auto *OuterID : DeadKeys)
        OuterAnalysisInvalidationMap.erase(OuterID);

      // The proxy itself remains valid regardless of anything else.
      return false;
    }

    /// Register a deferred invalidation event for when the outer analysis
    /// manager processes its invalidations.
    template <typename OuterAnalysisT, typename InvalidatedAnalysisT>
    void registerOuterAnalysisInvalidation() {
      AnalysisKey *OuterID = OuterAnalysisT::ID();
      AnalysisKey *InvalidatedID = InvalidatedAnalysisT::ID();

      auto &InvalidatedIDList = OuterAnalysisInvalidationMap[OuterID];
      // Note, this is a linear scan. If we end up with large numbers of
      // analyses that all trigger invalidation on the same outer analysis,
      // this entire system should be changed to some other deterministic
      // data structure such as a `SetVector` of a pair of pointers.
      if (!llvm::is_contained(InvalidatedIDList, InvalidatedID))
        InvalidatedIDList.push_back(InvalidatedID);
    }

    /// Access the map from outer analyses to deferred invalidation requiring
    /// analyses.
    const SmallDenseMap<AnalysisKey *, TinyPtrVector<AnalysisKey *>, 2> &
    getOuterInvalidations() const {
      return OuterAnalysisInvalidationMap;
    }

  private:
    const AnalysisManagerT *OuterAM;

    /// A map from an outer analysis ID to the set of this IR-unit's analyses
    /// which need to be invalidated.
    SmallDenseMap<AnalysisKey *, TinyPtrVector<AnalysisKey *>, 2>
        OuterAnalysisInvalidationMap;
  };

  OuterAnalysisManagerProxy(const AnalysisManagerT &OuterAM)
      : OuterAM(&OuterAM) {}

  /// Run the analysis pass and create our proxy result object.
  /// Nothing to see here, it just forwards the \c OuterAM reference into the
  /// result.
  Result run(IRUnitT &, AnalysisManager<IRUnitT, ExtraArgTs...> &,
             ExtraArgTs...) {
    return Result(*OuterAM);
  }

private:
  friend AnalysisInfoMixin<
      OuterAnalysisManagerProxy<AnalysisManagerT, IRUnitT, ExtraArgTs...>>;

  static AnalysisKey Key;

  const AnalysisManagerT *OuterAM;
};

template <typename AnalysisManagerT, typename IRUnitT, typename... ExtraArgTs>
AnalysisKey
    OuterAnalysisManagerProxy<AnalysisManagerT, IRUnitT, ExtraArgTs...>::Key;

extern template class OuterAnalysisManagerProxy<ModuleAnalysisManager,
                                                Function>;
/// Provide the \c ModuleAnalysisManager to \c Function proxy.
using ModuleAnalysisManagerFunctionProxy =
    OuterAnalysisManagerProxy<ModuleAnalysisManager, Function>;

/// Trivial adaptor that maps from a module to its functions.
///
/// Designed to allow composition of a FunctionPass(Manager) and
/// a ModulePassManager, by running the FunctionPass(Manager) over every
/// function in the module.
///
/// Function passes run within this adaptor can rely on having exclusive access
/// to the function they are run over. They should not read or modify any other
/// functions! Other threads or systems may be manipulating other functions in
/// the module, and so their state should never be relied on.
/// FIXME: Make the above true for all of LLVM's actual passes, some still
/// violate this principle.
///
/// Function passes can also read the module containing the function, but they
/// should not modify that module outside of the use lists of various globals.
/// For example, a function pass is not permitted to add functions to the
/// module.
/// FIXME: Make the above true for all of LLVM's actual passes, some still
/// violate this principle.
///
/// Note that although function passes can access module analyses, module
/// analyses are not invalidated while the function passes are running, so they
/// may be stale.  Function analyses will not be stale.
class ModuleToFunctionPassAdaptor
    : public PassInfoMixin<ModuleToFunctionPassAdaptor> {
public:
  using PassConceptT = detail::PassConcept<Function, FunctionAnalysisManager>;

  explicit ModuleToFunctionPassAdaptor(std::unique_ptr<PassConceptT> Pass,
                                       bool EagerlyInvalidate)
      : Pass(std::move(Pass)), EagerlyInvalidate(EagerlyInvalidate) {}

  /// Runs the function pass across every function in the module.
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
  void printPipeline(raw_ostream &OS,
                     function_ref<StringRef(StringRef)> MapClassName2PassName);

  static bool isRequired() { return true; }

private:
  std::unique_ptr<PassConceptT> Pass;
  bool EagerlyInvalidate;
};

/// A function to deduce a function pass type and wrap it in the
/// templated adaptor.
template <typename FunctionPassT>
ModuleToFunctionPassAdaptor
createModuleToFunctionPassAdaptor(FunctionPassT &&Pass,
                                  bool EagerlyInvalidate = false) {
  using PassModelT =
      detail::PassModel<Function, FunctionPassT, PreservedAnalyses,
                        FunctionAnalysisManager>;
  // Do not use make_unique, it causes too many template instantiations,
  // causing terrible compile times.
  return ModuleToFunctionPassAdaptor(
      std::unique_ptr<ModuleToFunctionPassAdaptor::PassConceptT>(
          new PassModelT(std::forward<FunctionPassT>(Pass))),
      EagerlyInvalidate);
}

/// A utility pass template to force an analysis result to be available.
///
/// If there are extra arguments at the pass's run level there may also be
/// extra arguments to the analysis manager's \c getResult routine. We can't
/// guess how to effectively map the arguments from one to the other, and so
/// this specialization just ignores them.
///
/// Specific patterns of run-method extra arguments and analysis manager extra
/// arguments will have to be defined as appropriate specializations.
template <typename AnalysisT, typename IRUnitT,
          typename AnalysisManagerT = AnalysisManager<IRUnitT>,
          typename... ExtraArgTs>
struct RequireAnalysisPass
    : PassInfoMixin<RequireAnalysisPass<AnalysisT, IRUnitT, AnalysisManagerT,
                                        ExtraArgTs...>> {
  /// Run this pass over some unit of IR.
  ///
  /// This pass can be run over any unit of IR and use any analysis manager
  /// provided they satisfy the basic API requirements. When this pass is
  /// created, these methods can be instantiated to satisfy whatever the
  /// context requires.
  PreservedAnalyses run(IRUnitT &Arg, AnalysisManagerT &AM,
                        ExtraArgTs &&... Args) {
    (void)AM.template getResult<AnalysisT>(Arg,
                                           std::forward<ExtraArgTs>(Args)...);

    return PreservedAnalyses::all();
  }
  void printPipeline(raw_ostream &OS,
                     function_ref<StringRef(StringRef)> MapClassName2PassName) {
    auto ClassName = AnalysisT::name();
    auto PassName = MapClassName2PassName(ClassName);
    OS << "require<" << PassName << '>';
  }
  static bool isRequired() { return true; }
};

/// A no-op pass template which simply forces a specific analysis result
/// to be invalidated.
template <typename AnalysisT>
struct InvalidateAnalysisPass
    : PassInfoMixin<InvalidateAnalysisPass<AnalysisT>> {
  /// Run this pass over some unit of IR.
  ///
  /// This pass can be run over any unit of IR and use any analysis manager,
  /// provided they satisfy the basic API requirements. When this pass is
  /// created, these methods can be instantiated to satisfy whatever the
  /// context requires.
  template <typename IRUnitT, typename AnalysisManagerT, typename... ExtraArgTs>
  PreservedAnalyses run(IRUnitT &Arg, AnalysisManagerT &AM, ExtraArgTs &&...) {
    auto PA = PreservedAnalyses::all();
    PA.abandon<AnalysisT>();
    return PA;
  }
  void printPipeline(raw_ostream &OS,
                     function_ref<StringRef(StringRef)> MapClassName2PassName) {
    auto ClassName = AnalysisT::name();
    auto PassName = MapClassName2PassName(ClassName);
    OS << "invalidate<" << PassName << '>';
  }
};

/// A utility pass that does nothing, but preserves no analyses.
///
/// Because this preserves no analyses, any analysis passes queried after this
/// pass runs will recompute fresh results.
struct InvalidateAllAnalysesPass : PassInfoMixin<InvalidateAllAnalysesPass> {
  /// Run this pass over some unit of IR.
  template <typename IRUnitT, typename AnalysisManagerT, typename... ExtraArgTs>
  PreservedAnalyses run(IRUnitT &, AnalysisManagerT &, ExtraArgTs &&...) {
    return PreservedAnalyses::none();
  }
};

/// A utility pass template that simply runs another pass multiple times.
///
/// This can be useful when debugging or testing passes. It also serves as an
/// example of how to extend the pass manager in ways beyond composition.
template <typename PassT>
class RepeatedPass : public PassInfoMixin<RepeatedPass<PassT>> {
public:
  RepeatedPass(int Count, PassT &&P)
      : Count(Count), P(std::forward<PassT>(P)) {}

  template <typename IRUnitT, typename AnalysisManagerT, typename... Ts>
  PreservedAnalyses run(IRUnitT &IR, AnalysisManagerT &AM, Ts &&... Args) {

    // Request PassInstrumentation from analysis manager, will use it to run
    // instrumenting callbacks for the passes later.
    // Here we use std::tuple wrapper over getResult which helps to extract
    // AnalysisManager's arguments out of the whole Args set.
    PassInstrumentation PI =
        detail::getAnalysisResult<PassInstrumentationAnalysis>(
            AM, IR, std::tuple<Ts...>(Args...));

    auto PA = PreservedAnalyses::all();
    for (int i = 0; i < Count; ++i) {
      // Check the PassInstrumentation's BeforePass callbacks before running the
      // pass, skip its execution completely if asked to (callback returns
      // false).
      if (!PI.runBeforePass<IRUnitT>(P, IR))
        continue;
      PreservedAnalyses IterPA = P.run(IR, AM, std::forward<Ts>(Args)...);
      PA.intersect(IterPA);
      PI.runAfterPass(P, IR, IterPA);
    }
    return PA;
  }

  void printPipeline(raw_ostream &OS,
                     function_ref<StringRef(StringRef)> MapClassName2PassName) {
    OS << "repeat<" << Count << ">(";
    P.printPipeline(OS, MapClassName2PassName);
    OS << ')';
  }

private:
  int Count;
  PassT P;
};

template <typename PassT>
RepeatedPass<PassT> createRepeatedPass(int Count, PassT &&P) {
  return RepeatedPass<PassT>(Count, std::forward<PassT>(P));
}

} // end namespace llvm

#endif // LLVM_IR_PASSMANAGER_H
PKjwFZ�ݬ99IR/IntrinsicsSPIRV.tdnu�[���//===- IntrinsicsSPIRV.td - Defines SPIRV intrinsics -------*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines all of the SPIRV-specific intrinsics.
//
//===----------------------------------------------------------------------===//

let TargetPrefix = "spv" in {
  def int_spv_assign_type : Intrinsic<[], [llvm_any_ty, llvm_metadata_ty]>;
  def int_spv_assign_name : Intrinsic<[], [llvm_any_ty, llvm_vararg_ty]>;

  def int_spv_track_constant : Intrinsic<[llvm_any_ty], [llvm_any_ty, llvm_metadata_ty]>;
  def int_spv_init_global : Intrinsic<[], [llvm_any_ty, llvm_any_ty]>;
  def int_spv_unref_global : Intrinsic<[], [llvm_any_ty]>;

  def int_spv_gep : Intrinsic<[llvm_anyptr_ty], [llvm_i1_ty, llvm_any_ty, llvm_vararg_ty], [ImmArg<ArgIndex<0>>]>;
  def int_spv_load : Intrinsic<[llvm_i32_ty], [llvm_anyptr_ty, llvm_i16_ty, llvm_i8_ty], [ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>]>;
  def int_spv_store : Intrinsic<[], [llvm_any_ty, llvm_anyptr_ty, llvm_i16_ty, llvm_i8_ty], [ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<3>>]>;
  def int_spv_extractv : Intrinsic<[llvm_any_ty], [llvm_i32_ty, llvm_vararg_ty]>;
  def int_spv_insertv : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_any_ty, llvm_vararg_ty]>;
  def int_spv_extractelt : Intrinsic<[llvm_any_ty], [llvm_any_ty, llvm_anyint_ty]>;
  def int_spv_insertelt : Intrinsic<[llvm_any_ty], [llvm_any_ty, llvm_any_ty, llvm_anyint_ty]>;
  def int_spv_const_composite : Intrinsic<[llvm_i32_ty], [llvm_vararg_ty]>;
  def int_spv_bitcast : Intrinsic<[llvm_any_ty], [llvm_any_ty]>;
  def int_spv_switch : Intrinsic<[], [llvm_any_ty, llvm_vararg_ty]>;
  def int_spv_cmpxchg : Intrinsic<[llvm_i32_ty], [llvm_any_ty, llvm_vararg_ty]>;
  def int_spv_unreachable : Intrinsic<[], []>;
  def int_spv_alloca : Intrinsic<[llvm_any_ty], []>;
  def int_spv_undef : Intrinsic<[llvm_i32_ty], []>;
}
PKjwFZ�?�GGIR/IntrinsicsSPIRV.hnu�[���/*===- TableGen'erated file -------------------------------------*- C++ -*-===*\
|*                                                                            *|
|* Intrinsic Function Source Fragment                                         *|
|*                                                                            *|
|* Automatically generated file, do not edit!                                 *|
|*                                                                            *|
\*===----------------------------------------------------------------------===*/

#ifndef LLVM_IR_INTRINSIC_SPV_ENUMS_H
#define LLVM_IR_INTRINSIC_SPV_ENUMS_H

namespace llvm {
namespace Intrinsic {
enum SPVIntrinsics : unsigned {
// Enum values for intrinsics
    spv_alloca = 9083,                                // llvm.spv.alloca
    spv_assign_name,                           // llvm.spv.assign.name
    spv_assign_type,                           // llvm.spv.assign.type
    spv_bitcast,                               // llvm.spv.bitcast
    spv_cmpxchg,                               // llvm.spv.cmpxchg
    spv_const_composite,                       // llvm.spv.const.composite
    spv_extractelt,                            // llvm.spv.extractelt
    spv_extractv,                              // llvm.spv.extractv
    spv_gep,                                   // llvm.spv.gep
    spv_init_global,                           // llvm.spv.init.global
    spv_insertelt,                             // llvm.spv.insertelt
    spv_insertv,                               // llvm.spv.insertv
    spv_load,                                  // llvm.spv.load
    spv_store,                                 // llvm.spv.store
    spv_switch,                                // llvm.spv.switch
    spv_track_constant,                        // llvm.spv.track.constant
    spv_undef,                                 // llvm.spv.undef
    spv_unreachable,                           // llvm.spv.unreachable
    spv_unref_global,                          // llvm.spv.unref.global
}; // enum
} // namespace Intrinsic
} // namespace llvm

#endif
PKjwFZ�Ѓ���IR/IntrinsicsLoongArch.tdnu�[���//===- IntrinsicsLoongArch.td - Defines LoongArch intrinsics *- tablegen -*===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines all of the LoongArch-specific intrinsics.
//
//===----------------------------------------------------------------------===//

let TargetPrefix = "loongarch" in {

//===----------------------------------------------------------------------===//
// Atomics

// T @llvm.<name>.T.<p>(any*, T, T, T imm);
class MaskedAtomicRMW<LLVMType itype>
    : Intrinsic<[itype], [llvm_anyptr_ty, itype, itype, itype],
                [IntrArgMemOnly, NoCapture<ArgIndex<0>>, ImmArg<ArgIndex<3>>]>;

// We define 32-bit and 64-bit variants of the above, where T stands for i32
// or i64 respectively:
multiclass MaskedAtomicRMWIntrinsics {
  // i32 @llvm.<name>.i32.<p>(any*, i32, i32, i32 imm);
  def _i32 : MaskedAtomicRMW<llvm_i32_ty>;
  // i64 @llvm.<name>.i32.<p>(any*, i64, i64, i64 imm);
  def _i64 : MaskedAtomicRMW<llvm_i64_ty>;
}

multiclass MaskedAtomicRMWFiveOpIntrinsics {
  // TODO: Support cmpxchg on LA32.
  // i64 @llvm.<name>.i64.<p>(any*, i64, i64, i64, i64 imm);
  def _i64 : MaskedAtomicRMWFiveArg<llvm_i64_ty>;
}

defm int_loongarch_masked_atomicrmw_xchg : MaskedAtomicRMWIntrinsics;
defm int_loongarch_masked_atomicrmw_add : MaskedAtomicRMWIntrinsics;
defm int_loongarch_masked_atomicrmw_sub : MaskedAtomicRMWIntrinsics;
defm int_loongarch_masked_atomicrmw_nand : MaskedAtomicRMWIntrinsics;
defm int_loongarch_masked_atomicrmw_umax : MaskedAtomicRMWIntrinsics;
defm int_loongarch_masked_atomicrmw_umin : MaskedAtomicRMWIntrinsics;
defm int_loongarch_masked_atomicrmw_max : MaskedAtomicRMWFiveOpIntrinsics;
defm int_loongarch_masked_atomicrmw_min : MaskedAtomicRMWFiveOpIntrinsics;

// @llvm.loongarch.masked.cmpxchg.i64.<p>(
//   ptr addr, grlen cmpval, grlen newval, grlen mask, grlenimm ordering)
defm int_loongarch_masked_cmpxchg : MaskedAtomicRMWFiveOpIntrinsics;

//===----------------------------------------------------------------------===//
// LoongArch BASE

def int_loongarch_break : Intrinsic<[], [llvm_i32_ty], [ImmArg<ArgIndex<0>>]>;
def int_loongarch_cacop_d : Intrinsic<[], [llvm_i64_ty, llvm_i64_ty, llvm_i64_ty],
    [ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<2>>]>;
def int_loongarch_cacop_w : Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
    [ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<2>>]>;
def int_loongarch_dbar : Intrinsic<[], [llvm_i32_ty], [ImmArg<ArgIndex<0>>]>;
def int_loongarch_ibar : Intrinsic<[], [llvm_i32_ty], [ImmArg<ArgIndex<0>>]>;
def int_loongarch_movfcsr2gr : Intrinsic<[llvm_i32_ty], [llvm_i32_ty],
                               [ImmArg<ArgIndex<0>>]>;
def int_loongarch_movgr2fcsr : Intrinsic<[], [llvm_i32_ty, llvm_i32_ty],
                               [ImmArg<ArgIndex<0>>]>;
def int_loongarch_syscall : Intrinsic<[], [llvm_i32_ty], [ImmArg<ArgIndex<0>>]>;

def int_loongarch_crc_w_b_w : Intrinsic<[llvm_i32_ty],
                                        [llvm_i32_ty, llvm_i32_ty]>;
def int_loongarch_crc_w_h_w : Intrinsic<[llvm_i32_ty],
                                        [llvm_i32_ty, llvm_i32_ty]>;
def int_loongarch_crc_w_w_w : Intrinsic<[llvm_i32_ty],
                                        [llvm_i32_ty, llvm_i32_ty]>;
def int_loongarch_crc_w_d_w : Intrinsic<[llvm_i32_ty],
                                        [llvm_i64_ty, llvm_i32_ty]>;

def int_loongarch_crcc_w_b_w : Intrinsic<[llvm_i32_ty],
                                         [llvm_i32_ty, llvm_i32_ty]>;
def int_loongarch_crcc_w_h_w : Intrinsic<[llvm_i32_ty],
                                         [llvm_i32_ty, llvm_i32_ty]>;
def int_loongarch_crcc_w_w_w : Intrinsic<[llvm_i32_ty],
                                         [llvm_i32_ty, llvm_i32_ty]>;
def int_loongarch_crcc_w_d_w : Intrinsic<[llvm_i32_ty],
                                         [llvm_i64_ty, llvm_i32_ty]>;

def int_loongarch_csrrd_w : Intrinsic<[llvm_i32_ty], [llvm_i32_ty],
                                      [ImmArg<ArgIndex<0>>]>;
def int_loongarch_csrrd_d : Intrinsic<[llvm_i64_ty], [llvm_i32_ty],
                                      [ImmArg<ArgIndex<0>>]>;
def int_loongarch_csrwr_w : Intrinsic<[llvm_i32_ty],
                                      [llvm_i32_ty, llvm_i32_ty],
                                      [ImmArg<ArgIndex<1>>]>;
def int_loongarch_csrwr_d : Intrinsic<[llvm_i64_ty],
                                      [llvm_i64_ty, llvm_i32_ty],
                                      [ImmArg<ArgIndex<1>>]>;
def int_loongarch_csrxchg_w : Intrinsic<[llvm_i32_ty],
                                        [llvm_i32_ty, llvm_i32_ty,
                                         llvm_i32_ty],
                                        [ImmArg<ArgIndex<2>>]>;
def int_loongarch_csrxchg_d : Intrinsic<[llvm_i64_ty],
                                        [llvm_i64_ty, llvm_i64_ty,
                                         llvm_i32_ty],
                                        [ImmArg<ArgIndex<2>>]>;

def int_loongarch_iocsrrd_b : Intrinsic<[llvm_i32_ty], [llvm_i32_ty]>;
def int_loongarch_iocsrrd_h : Intrinsic<[llvm_i32_ty], [llvm_i32_ty]>;
def int_loongarch_iocsrrd_w : Intrinsic<[llvm_i32_ty], [llvm_i32_ty]>;
def int_loongarch_iocsrrd_d : Intrinsic<[llvm_i64_ty], [llvm_i32_ty]>;

def int_loongarch_iocsrwr_b : Intrinsic<[], [llvm_i32_ty, llvm_i32_ty]>;
def int_loongarch_iocsrwr_h : Intrinsic<[], [llvm_i32_ty, llvm_i32_ty]>;
def int_loongarch_iocsrwr_w : Intrinsic<[], [llvm_i32_ty, llvm_i32_ty]>;
def int_loongarch_iocsrwr_d : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty]>;

def int_loongarch_cpucfg : Intrinsic<[llvm_i32_ty], [llvm_i32_ty]>;

def int_loongarch_asrtle_d : Intrinsic<[], [llvm_i64_ty, llvm_i64_ty]>;
def int_loongarch_asrtgt_d : Intrinsic<[], [llvm_i64_ty, llvm_i64_ty]>;

def int_loongarch_lddir_d : Intrinsic<[llvm_i64_ty],
                                      [llvm_i64_ty, llvm_i64_ty],
                                      [ImmArg<ArgIndex<1>>]>;
def int_loongarch_ldpte_d : Intrinsic<[], [llvm_i64_ty, llvm_i64_ty],
                                          [ImmArg<ArgIndex<1>>]>;
} // TargetPrefix = "loongarch"
PKjwFZC�53�#�#IR/CallingConv.hnu�[���//===- llvm/CallingConv.h - LLVM Calling Conventions ------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines LLVM's set of calling conventions.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_CALLINGCONV_H
#define LLVM_IR_CALLINGCONV_H

namespace llvm {

/// CallingConv Namespace - This namespace contains an enum with a value for
/// the well-known calling conventions.
///
namespace CallingConv {

  /// LLVM IR allows to use arbitrary numbers as calling convention identifiers.
  using ID = unsigned;

  /// A set of enums which specify the assigned numeric values for known llvm
  /// calling conventions.
  /// LLVM Calling Convention Representation
  enum {
    /// The default llvm calling convention, compatible with C. This convention
    /// is the only one that supports varargs calls. As with typical C calling
    /// conventions, the callee/caller have to tolerate certain amounts of
    /// prototype mismatch.
    C = 0,

    // Generic LLVM calling conventions. None of these support varargs calls,
    // and all assume that the caller and callee prototype exactly match.

    /// Attempts to make calls as fast as possible (e.g. by passing things in
    /// registers).
    Fast = 8,

    /// Attempts to make code in the caller as efficient as possible under the
    /// assumption that the call is not commonly executed. As such, these calls
    /// often preserve all registers so that the call does not break any live
    /// ranges in the caller side.
    Cold = 9,

    /// Used by the Glasgow Haskell Compiler (GHC).
    GHC = 10,

    /// Used by the High-Performance Erlang Compiler (HiPE).
    HiPE = 11,

    /// Used for stack based JavaScript calls
    WebKit_JS = 12,

    /// Used for dynamic register based calls (e.g. stackmap and patchpoint
    /// intrinsics).
    AnyReg = 13,

    /// Used for runtime calls that preserves most registers.
    PreserveMost = 14,

    /// Used for runtime calls that preserves (almost) all registers.
    PreserveAll = 15,

    /// Calling convention for Swift.
    Swift = 16,

    /// Used for access functions.
    CXX_FAST_TLS = 17,

    /// Attemps to make calls as fast as possible while guaranteeing that tail
    /// call optimization can always be performed.
    Tail = 18,

    /// Special calling convention on Windows for calling the Control Guard
    /// Check ICall funtion. The function takes exactly one argument (address of
    /// the target function) passed in the first argument register, and has no
    /// return value. All register values are preserved.
    CFGuard_Check = 19,

    /// This follows the Swift calling convention in how arguments are passed
    /// but guarantees tail calls will be made by making the callee clean up
    /// their stack.
    SwiftTail = 20,

    /// This is the start of the target-specific calling conventions, e.g.
    /// fastcall and thiscall on X86.
    FirstTargetCC = 64,

    /// stdcall is mostly used by the Win32 API. It is basically the same as the
    /// C convention with the difference in that the callee is responsible for
    /// popping the arguments from the stack.
    X86_StdCall = 64,

    /// 'fast' analog of X86_StdCall. Passes first two arguments in ECX:EDX
    /// registers, others - via stack. Callee is responsible for stack cleaning.
    X86_FastCall = 65,

    /// ARM Procedure Calling Standard (obsolete, but still used on some
    /// targets).
    ARM_APCS = 66,

    /// ARM Architecture Procedure Calling Standard calling convention (aka
    /// EABI). Soft float variant.
    ARM_AAPCS = 67,

    /// Same as ARM_AAPCS, but uses hard floating point ABI.
    ARM_AAPCS_VFP = 68,

    /// Used for MSP430 interrupt routines.
    MSP430_INTR = 69,

    /// Similar to X86_StdCall. Passes first argument in ECX, others via stack.
    /// Callee is responsible for stack cleaning. MSVC uses this by default for
    /// methods in its ABI.
    X86_ThisCall = 70,

    /// Call to a PTX kernel. Passes all arguments in parameter space.
    PTX_Kernel = 71,

    /// Call to a PTX device function. Passes all arguments in register or
    /// parameter space.
    PTX_Device = 72,

    /// Used for SPIR non-kernel device functions. No lowering or expansion of
    /// arguments. Structures are passed as a pointer to a struct with the
    /// byval attribute. Functions can only call SPIR_FUNC and SPIR_KERNEL
    /// functions. Functions can only have zero or one return values. Variable
    /// arguments are not allowed, except for printf. How arguments/return
    /// values are lowered are not specified. Functions are only visible to the
    /// devices.
    SPIR_FUNC = 75,

    /// Used for SPIR kernel functions. Inherits the restrictions of SPIR_FUNC,
    /// except it cannot have non-void return values, it cannot have variable
    /// arguments, it can also be called by the host or it is externally
    /// visible.
    SPIR_KERNEL = 76,

    /// Used for Intel OpenCL built-ins.
    Intel_OCL_BI = 77,

    /// The C convention as specified in the x86-64 supplement to the System V
    /// ABI, used on most non-Windows systems.
    X86_64_SysV = 78,

    /// The C convention as implemented on Windows/x86-64 and AArch64. It
    /// differs from the more common \c X86_64_SysV convention in a number of
    /// ways, most notably in that XMM registers used to pass arguments are
    /// shadowed by GPRs, and vice versa. On AArch64, this is identical to the
    /// normal C (AAPCS) calling convention for normal functions, but floats are
    /// passed in integer registers to variadic functions.
    Win64 = 79,

    /// MSVC calling convention that passes vectors and vector aggregates in SSE
    /// registers.
    X86_VectorCall = 80,

    /// Placeholders for HHVM calling conventions (deprecated, removed).
    DUMMY_HHVM = 81,
    DUMMY_HHVM_C = 82,

    /// x86 hardware interrupt context. Callee may take one or two parameters,
    /// where the 1st represents a pointer to hardware context frame and the 2nd
    /// represents hardware error code, the presence of the later depends on the
    /// interrupt vector taken. Valid for both 32- and 64-bit subtargets.
    X86_INTR = 83,

    /// Used for AVR interrupt routines.
    AVR_INTR = 84,

    /// Used for AVR signal routines.
    AVR_SIGNAL = 85,

    /// Used for special AVR rtlib functions which have an "optimized"
    /// convention to preserve registers.
    AVR_BUILTIN = 86,

    /// Used for Mesa vertex shaders, or AMDPAL last shader stage before
    /// rasterization (vertex shader if tessellation and geometry are not in
    /// use, or otherwise copy shader if one is needed).
    AMDGPU_VS = 87,

    /// Used for Mesa/AMDPAL geometry shaders.
    AMDGPU_GS = 88,

    /// Used for Mesa/AMDPAL pixel shaders.
    AMDGPU_PS = 89,

    /// Used for Mesa/AMDPAL compute shaders.
    AMDGPU_CS = 90,

    /// Used for AMDGPU code object kernels.
    AMDGPU_KERNEL = 91,

    /// Register calling convention used for parameters transfer optimization
    X86_RegCall = 92,

    /// Used for Mesa/AMDPAL hull shaders (= tessellation control shaders).
    AMDGPU_HS = 93,

    /// Used for special MSP430 rtlib functions which have an "optimized"
    /// convention using additional registers.
    MSP430_BUILTIN = 94,

    /// Used for AMDPAL vertex shader if tessellation is in use.
    AMDGPU_LS = 95,

    /// Used for AMDPAL shader stage before geometry shader if geometry is in
    /// use. So either the domain (= tessellation evaluation) shader if
    /// tessellation is in use, or otherwise the vertex shader.
    AMDGPU_ES = 96,

    /// Used between AArch64 Advanced SIMD functions
    AArch64_VectorCall = 97,

    /// Used between AArch64 SVE functions
    AArch64_SVE_VectorCall = 98,

    /// For emscripten __invoke_* functions. The first argument is required to
    /// be the function ptr being indirectly called. The remainder matches the
    /// regular calling convention.
    WASM_EmscriptenInvoke = 99,

    /// Used for AMD graphics targets.
    AMDGPU_Gfx = 100,

    /// Used for M68k interrupt routines.
    M68k_INTR = 101,

    /// Preserve X0-X13, X19-X29, SP, Z0-Z31, P0-P15.
    AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0 = 102,

    /// Preserve X2-X15, X19-X29, SP, Z0-Z31, P0-P15.
    AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2 = 103,

    /// Used on AMDGPUs to give the middle-end more control over argument
    /// placement.
    AMDGPU_CS_Chain = 104,

    /// Used on AMDGPUs to give the middle-end more control over argument
    /// placement. Preserves active lane values for input VGPRs.
    AMDGPU_CS_ChainPreserve = 105,

    /// The highest possible ID. Must be some 2^k - 1.
    MaxID = 1023
  };

} // end namespace CallingConv

} // end namespace llvm

#endif // LLVM_IR_CALLINGCONV_H
PKjwFZ
4�����IR/IntrinsicsPowerPC.hnu�[���/*===- TableGen'erated file -------------------------------------*- C++ -*-===*\
|*                                                                            *|
|* Intrinsic Function Source Fragment                                         *|
|*                                                                            *|
|* Automatically generated file, do not edit!                                 *|
|*                                                                            *|
\*===----------------------------------------------------------------------===*/

#ifndef LLVM_IR_INTRINSIC_PPC_ENUMS_H
#define LLVM_IR_INTRINSIC_PPC_ENUMS_H

namespace llvm {
namespace Intrinsic {
enum PPCIntrinsics : unsigned {
// Enum values for intrinsics
    ppc_addex = 7547,                                 // llvm.ppc.addex
    ppc_addf128_round_to_odd,                  // llvm.ppc.addf128.round.to.odd
    ppc_altivec_crypto_vcipher,                // llvm.ppc.altivec.crypto.vcipher
    ppc_altivec_crypto_vcipherlast,            // llvm.ppc.altivec.crypto.vcipherlast
    ppc_altivec_crypto_vncipher,               // llvm.ppc.altivec.crypto.vncipher
    ppc_altivec_crypto_vncipherlast,           // llvm.ppc.altivec.crypto.vncipherlast
    ppc_altivec_crypto_vpermxor,               // llvm.ppc.altivec.crypto.vpermxor
    ppc_altivec_crypto_vpermxor_be,            // llvm.ppc.altivec.crypto.vpermxor.be
    ppc_altivec_crypto_vpmsumb,                // llvm.ppc.altivec.crypto.vpmsumb
    ppc_altivec_crypto_vpmsumd,                // llvm.ppc.altivec.crypto.vpmsumd
    ppc_altivec_crypto_vpmsumh,                // llvm.ppc.altivec.crypto.vpmsumh
    ppc_altivec_crypto_vpmsumw,                // llvm.ppc.altivec.crypto.vpmsumw
    ppc_altivec_crypto_vsbox,                  // llvm.ppc.altivec.crypto.vsbox
    ppc_altivec_crypto_vshasigmad,             // llvm.ppc.altivec.crypto.vshasigmad
    ppc_altivec_crypto_vshasigmaw,             // llvm.ppc.altivec.crypto.vshasigmaw
    ppc_altivec_dss,                           // llvm.ppc.altivec.dss
    ppc_altivec_dssall,                        // llvm.ppc.altivec.dssall
    ppc_altivec_dst,                           // llvm.ppc.altivec.dst
    ppc_altivec_dstst,                         // llvm.ppc.altivec.dstst
    ppc_altivec_dststt,                        // llvm.ppc.altivec.dststt
    ppc_altivec_dstt,                          // llvm.ppc.altivec.dstt
    ppc_altivec_lvebx,                         // llvm.ppc.altivec.lvebx
    ppc_altivec_lvehx,                         // llvm.ppc.altivec.lvehx
    ppc_altivec_lvewx,                         // llvm.ppc.altivec.lvewx
    ppc_altivec_lvsl,                          // llvm.ppc.altivec.lvsl
    ppc_altivec_lvsr,                          // llvm.ppc.altivec.lvsr
    ppc_altivec_lvx,                           // llvm.ppc.altivec.lvx
    ppc_altivec_lvxl,                          // llvm.ppc.altivec.lvxl
    ppc_altivec_mfvscr,                        // llvm.ppc.altivec.mfvscr
    ppc_altivec_mtvscr,                        // llvm.ppc.altivec.mtvscr
    ppc_altivec_mtvsrbm,                       // llvm.ppc.altivec.mtvsrbm
    ppc_altivec_mtvsrdm,                       // llvm.ppc.altivec.mtvsrdm
    ppc_altivec_mtvsrhm,                       // llvm.ppc.altivec.mtvsrhm
    ppc_altivec_mtvsrqm,                       // llvm.ppc.altivec.mtvsrqm
    ppc_altivec_mtvsrwm,                       // llvm.ppc.altivec.mtvsrwm
    ppc_altivec_stvebx,                        // llvm.ppc.altivec.stvebx
    ppc_altivec_stvehx,                        // llvm.ppc.altivec.stvehx
    ppc_altivec_stvewx,                        // llvm.ppc.altivec.stvewx
    ppc_altivec_stvx,                          // llvm.ppc.altivec.stvx
    ppc_altivec_stvxl,                         // llvm.ppc.altivec.stvxl
    ppc_altivec_vabsdub,                       // llvm.ppc.altivec.vabsdub
    ppc_altivec_vabsduh,                       // llvm.ppc.altivec.vabsduh
    ppc_altivec_vabsduw,                       // llvm.ppc.altivec.vabsduw
    ppc_altivec_vaddcuq,                       // llvm.ppc.altivec.vaddcuq
    ppc_altivec_vaddcuw,                       // llvm.ppc.altivec.vaddcuw
    ppc_altivec_vaddecuq,                      // llvm.ppc.altivec.vaddecuq
    ppc_altivec_vaddeuqm,                      // llvm.ppc.altivec.vaddeuqm
    ppc_altivec_vaddsbs,                       // llvm.ppc.altivec.vaddsbs
    ppc_altivec_vaddshs,                       // llvm.ppc.altivec.vaddshs
    ppc_altivec_vaddsws,                       // llvm.ppc.altivec.vaddsws
    ppc_altivec_vaddubs,                       // llvm.ppc.altivec.vaddubs
    ppc_altivec_vadduhs,                       // llvm.ppc.altivec.vadduhs
    ppc_altivec_vadduws,                       // llvm.ppc.altivec.vadduws
    ppc_altivec_vavgsb,                        // llvm.ppc.altivec.vavgsb
    ppc_altivec_vavgsh,                        // llvm.ppc.altivec.vavgsh
    ppc_altivec_vavgsw,                        // llvm.ppc.altivec.vavgsw
    ppc_altivec_vavgub,                        // llvm.ppc.altivec.vavgub
    ppc_altivec_vavguh,                        // llvm.ppc.altivec.vavguh
    ppc_altivec_vavguw,                        // llvm.ppc.altivec.vavguw
    ppc_altivec_vbpermd,                       // llvm.ppc.altivec.vbpermd
    ppc_altivec_vbpermq,                       // llvm.ppc.altivec.vbpermq
    ppc_altivec_vcfsx,                         // llvm.ppc.altivec.vcfsx
    ppc_altivec_vcfuged,                       // llvm.ppc.altivec.vcfuged
    ppc_altivec_vcfux,                         // llvm.ppc.altivec.vcfux
    ppc_altivec_vclrlb,                        // llvm.ppc.altivec.vclrlb
    ppc_altivec_vclrrb,                        // llvm.ppc.altivec.vclrrb
    ppc_altivec_vclzdm,                        // llvm.ppc.altivec.vclzdm
    ppc_altivec_vclzlsbb,                      // llvm.ppc.altivec.vclzlsbb
    ppc_altivec_vcmpbfp,                       // llvm.ppc.altivec.vcmpbfp
    ppc_altivec_vcmpbfp_p,                     // llvm.ppc.altivec.vcmpbfp.p
    ppc_altivec_vcmpeqfp,                      // llvm.ppc.altivec.vcmpeqfp
    ppc_altivec_vcmpeqfp_p,                    // llvm.ppc.altivec.vcmpeqfp.p
    ppc_altivec_vcmpequb,                      // llvm.ppc.altivec.vcmpequb
    ppc_altivec_vcmpequb_p,                    // llvm.ppc.altivec.vcmpequb.p
    ppc_altivec_vcmpequd,                      // llvm.ppc.altivec.vcmpequd
    ppc_altivec_vcmpequd_p,                    // llvm.ppc.altivec.vcmpequd.p
    ppc_altivec_vcmpequh,                      // llvm.ppc.altivec.vcmpequh
    ppc_altivec_vcmpequh_p,                    // llvm.ppc.altivec.vcmpequh.p
    ppc_altivec_vcmpequq,                      // llvm.ppc.altivec.vcmpequq
    ppc_altivec_vcmpequq_p,                    // llvm.ppc.altivec.vcmpequq.p
    ppc_altivec_vcmpequw,                      // llvm.ppc.altivec.vcmpequw
    ppc_altivec_vcmpequw_p,                    // llvm.ppc.altivec.vcmpequw.p
    ppc_altivec_vcmpgefp,                      // llvm.ppc.altivec.vcmpgefp
    ppc_altivec_vcmpgefp_p,                    // llvm.ppc.altivec.vcmpgefp.p
    ppc_altivec_vcmpgtfp,                      // llvm.ppc.altivec.vcmpgtfp
    ppc_altivec_vcmpgtfp_p,                    // llvm.ppc.altivec.vcmpgtfp.p
    ppc_altivec_vcmpgtsb,                      // llvm.ppc.altivec.vcmpgtsb
    ppc_altivec_vcmpgtsb_p,                    // llvm.ppc.altivec.vcmpgtsb.p
    ppc_altivec_vcmpgtsd,                      // llvm.ppc.altivec.vcmpgtsd
    ppc_altivec_vcmpgtsd_p,                    // llvm.ppc.altivec.vcmpgtsd.p
    ppc_altivec_vcmpgtsh,                      // llvm.ppc.altivec.vcmpgtsh
    ppc_altivec_vcmpgtsh_p,                    // llvm.ppc.altivec.vcmpgtsh.p
    ppc_altivec_vcmpgtsq,                      // llvm.ppc.altivec.vcmpgtsq
    ppc_altivec_vcmpgtsq_p,                    // llvm.ppc.altivec.vcmpgtsq.p
    ppc_altivec_vcmpgtsw,                      // llvm.ppc.altivec.vcmpgtsw
    ppc_altivec_vcmpgtsw_p,                    // llvm.ppc.altivec.vcmpgtsw.p
    ppc_altivec_vcmpgtub,                      // llvm.ppc.altivec.vcmpgtub
    ppc_altivec_vcmpgtub_p,                    // llvm.ppc.altivec.vcmpgtub.p
    ppc_altivec_vcmpgtud,                      // llvm.ppc.altivec.vcmpgtud
    ppc_altivec_vcmpgtud_p,                    // llvm.ppc.altivec.vcmpgtud.p
    ppc_altivec_vcmpgtuh,                      // llvm.ppc.altivec.vcmpgtuh
    ppc_altivec_vcmpgtuh_p,                    // llvm.ppc.altivec.vcmpgtuh.p
    ppc_altivec_vcmpgtuq,                      // llvm.ppc.altivec.vcmpgtuq
    ppc_altivec_vcmpgtuq_p,                    // llvm.ppc.altivec.vcmpgtuq.p
    ppc_altivec_vcmpgtuw,                      // llvm.ppc.altivec.vcmpgtuw
    ppc_altivec_vcmpgtuw_p,                    // llvm.ppc.altivec.vcmpgtuw.p
    ppc_altivec_vcmpneb,                       // llvm.ppc.altivec.vcmpneb
    ppc_altivec_vcmpneb_p,                     // llvm.ppc.altivec.vcmpneb.p
    ppc_altivec_vcmpneh,                       // llvm.ppc.altivec.vcmpneh
    ppc_altivec_vcmpneh_p,                     // llvm.ppc.altivec.vcmpneh.p
    ppc_altivec_vcmpnew,                       // llvm.ppc.altivec.vcmpnew
    ppc_altivec_vcmpnew_p,                     // llvm.ppc.altivec.vcmpnew.p
    ppc_altivec_vcmpnezb,                      // llvm.ppc.altivec.vcmpnezb
    ppc_altivec_vcmpnezb_p,                    // llvm.ppc.altivec.vcmpnezb.p
    ppc_altivec_vcmpnezh,                      // llvm.ppc.altivec.vcmpnezh
    ppc_altivec_vcmpnezh_p,                    // llvm.ppc.altivec.vcmpnezh.p
    ppc_altivec_vcmpnezw,                      // llvm.ppc.altivec.vcmpnezw
    ppc_altivec_vcmpnezw_p,                    // llvm.ppc.altivec.vcmpnezw.p
    ppc_altivec_vcntmbb,                       // llvm.ppc.altivec.vcntmbb
    ppc_altivec_vcntmbd,                       // llvm.ppc.altivec.vcntmbd
    ppc_altivec_vcntmbh,                       // llvm.ppc.altivec.vcntmbh
    ppc_altivec_vcntmbw,                       // llvm.ppc.altivec.vcntmbw
    ppc_altivec_vctsxs,                        // llvm.ppc.altivec.vctsxs
    ppc_altivec_vctuxs,                        // llvm.ppc.altivec.vctuxs
    ppc_altivec_vctzdm,                        // llvm.ppc.altivec.vctzdm
    ppc_altivec_vctzlsbb,                      // llvm.ppc.altivec.vctzlsbb
    ppc_altivec_vdivesd,                       // llvm.ppc.altivec.vdivesd
    ppc_altivec_vdivesq,                       // llvm.ppc.altivec.vdivesq
    ppc_altivec_vdivesw,                       // llvm.ppc.altivec.vdivesw
    ppc_altivec_vdiveud,                       // llvm.ppc.altivec.vdiveud
    ppc_altivec_vdiveuq,                       // llvm.ppc.altivec.vdiveuq
    ppc_altivec_vdiveuw,                       // llvm.ppc.altivec.vdiveuw
    ppc_altivec_vexpandbm,                     // llvm.ppc.altivec.vexpandbm
    ppc_altivec_vexpanddm,                     // llvm.ppc.altivec.vexpanddm
    ppc_altivec_vexpandhm,                     // llvm.ppc.altivec.vexpandhm
    ppc_altivec_vexpandqm,                     // llvm.ppc.altivec.vexpandqm
    ppc_altivec_vexpandwm,                     // llvm.ppc.altivec.vexpandwm
    ppc_altivec_vexptefp,                      // llvm.ppc.altivec.vexptefp
    ppc_altivec_vextddvlx,                     // llvm.ppc.altivec.vextddvlx
    ppc_altivec_vextddvrx,                     // llvm.ppc.altivec.vextddvrx
    ppc_altivec_vextdubvlx,                    // llvm.ppc.altivec.vextdubvlx
    ppc_altivec_vextdubvrx,                    // llvm.ppc.altivec.vextdubvrx
    ppc_altivec_vextduhvlx,                    // llvm.ppc.altivec.vextduhvlx
    ppc_altivec_vextduhvrx,                    // llvm.ppc.altivec.vextduhvrx
    ppc_altivec_vextduwvlx,                    // llvm.ppc.altivec.vextduwvlx
    ppc_altivec_vextduwvrx,                    // llvm.ppc.altivec.vextduwvrx
    ppc_altivec_vextractbm,                    // llvm.ppc.altivec.vextractbm
    ppc_altivec_vextractdm,                    // llvm.ppc.altivec.vextractdm
    ppc_altivec_vextracthm,                    // llvm.ppc.altivec.vextracthm
    ppc_altivec_vextractqm,                    // llvm.ppc.altivec.vextractqm
    ppc_altivec_vextractwm,                    // llvm.ppc.altivec.vextractwm
    ppc_altivec_vextsb2d,                      // llvm.ppc.altivec.vextsb2d
    ppc_altivec_vextsb2w,                      // llvm.ppc.altivec.vextsb2w
    ppc_altivec_vextsd2q,                      // llvm.ppc.altivec.vextsd2q
    ppc_altivec_vextsh2d,                      // llvm.ppc.altivec.vextsh2d
    ppc_altivec_vextsh2w,                      // llvm.ppc.altivec.vextsh2w
    ppc_altivec_vextsw2d,                      // llvm.ppc.altivec.vextsw2d
    ppc_altivec_vgbbd,                         // llvm.ppc.altivec.vgbbd
    ppc_altivec_vgnb,                          // llvm.ppc.altivec.vgnb
    ppc_altivec_vinsblx,                       // llvm.ppc.altivec.vinsblx
    ppc_altivec_vinsbrx,                       // llvm.ppc.altivec.vinsbrx
    ppc_altivec_vinsbvlx,                      // llvm.ppc.altivec.vinsbvlx
    ppc_altivec_vinsbvrx,                      // llvm.ppc.altivec.vinsbvrx
    ppc_altivec_vinsd,                         // llvm.ppc.altivec.vinsd
    ppc_altivec_vinsdlx,                       // llvm.ppc.altivec.vinsdlx
    ppc_altivec_vinsdrx,                       // llvm.ppc.altivec.vinsdrx
    ppc_altivec_vinshlx,                       // llvm.ppc.altivec.vinshlx
    ppc_altivec_vinshrx,                       // llvm.ppc.altivec.vinshrx
    ppc_altivec_vinshvlx,                      // llvm.ppc.altivec.vinshvlx
    ppc_altivec_vinshvrx,                      // llvm.ppc.altivec.vinshvrx
    ppc_altivec_vinsw,                         // llvm.ppc.altivec.vinsw
    ppc_altivec_vinswlx,                       // llvm.ppc.altivec.vinswlx
    ppc_altivec_vinswrx,                       // llvm.ppc.altivec.vinswrx
    ppc_altivec_vinswvlx,                      // llvm.ppc.altivec.vinswvlx
    ppc_altivec_vinswvrx,                      // llvm.ppc.altivec.vinswvrx
    ppc_altivec_vlogefp,                       // llvm.ppc.altivec.vlogefp
    ppc_altivec_vmaddfp,                       // llvm.ppc.altivec.vmaddfp
    ppc_altivec_vmaxfp,                        // llvm.ppc.altivec.vmaxfp
    ppc_altivec_vmaxsb,                        // llvm.ppc.altivec.vmaxsb
    ppc_altivec_vmaxsd,                        // llvm.ppc.altivec.vmaxsd
    ppc_altivec_vmaxsh,                        // llvm.ppc.altivec.vmaxsh
    ppc_altivec_vmaxsw,                        // llvm.ppc.altivec.vmaxsw
    ppc_altivec_vmaxub,                        // llvm.ppc.altivec.vmaxub
    ppc_altivec_vmaxud,                        // llvm.ppc.altivec.vmaxud
    ppc_altivec_vmaxuh,                        // llvm.ppc.altivec.vmaxuh
    ppc_altivec_vmaxuw,                        // llvm.ppc.altivec.vmaxuw
    ppc_altivec_vmhaddshs,                     // llvm.ppc.altivec.vmhaddshs
    ppc_altivec_vmhraddshs,                    // llvm.ppc.altivec.vmhraddshs
    ppc_altivec_vminfp,                        // llvm.ppc.altivec.vminfp
    ppc_altivec_vminsb,                        // llvm.ppc.altivec.vminsb
    ppc_altivec_vminsd,                        // llvm.ppc.altivec.vminsd
    ppc_altivec_vminsh,                        // llvm.ppc.altivec.vminsh
    ppc_altivec_vminsw,                        // llvm.ppc.altivec.vminsw
    ppc_altivec_vminub,                        // llvm.ppc.altivec.vminub
    ppc_altivec_vminud,                        // llvm.ppc.altivec.vminud
    ppc_altivec_vminuh,                        // llvm.ppc.altivec.vminuh
    ppc_altivec_vminuw,                        // llvm.ppc.altivec.vminuw
    ppc_altivec_vmladduhm,                     // llvm.ppc.altivec.vmladduhm
    ppc_altivec_vmsumcud,                      // llvm.ppc.altivec.vmsumcud
    ppc_altivec_vmsummbm,                      // llvm.ppc.altivec.vmsummbm
    ppc_altivec_vmsumshm,                      // llvm.ppc.altivec.vmsumshm
    ppc_altivec_vmsumshs,                      // llvm.ppc.altivec.vmsumshs
    ppc_altivec_vmsumubm,                      // llvm.ppc.altivec.vmsumubm
    ppc_altivec_vmsumudm,                      // llvm.ppc.altivec.vmsumudm
    ppc_altivec_vmsumuhm,                      // llvm.ppc.altivec.vmsumuhm
    ppc_altivec_vmsumuhs,                      // llvm.ppc.altivec.vmsumuhs
    ppc_altivec_vmulesb,                       // llvm.ppc.altivec.vmulesb
    ppc_altivec_vmulesd,                       // llvm.ppc.altivec.vmulesd
    ppc_altivec_vmulesh,                       // llvm.ppc.altivec.vmulesh
    ppc_altivec_vmulesw,                       // llvm.ppc.altivec.vmulesw
    ppc_altivec_vmuleub,                       // llvm.ppc.altivec.vmuleub
    ppc_altivec_vmuleud,                       // llvm.ppc.altivec.vmuleud
    ppc_altivec_vmuleuh,                       // llvm.ppc.altivec.vmuleuh
    ppc_altivec_vmuleuw,                       // llvm.ppc.altivec.vmuleuw
    ppc_altivec_vmulhsd,                       // llvm.ppc.altivec.vmulhsd
    ppc_altivec_vmulhsw,                       // llvm.ppc.altivec.vmulhsw
    ppc_altivec_vmulhud,                       // llvm.ppc.altivec.vmulhud
    ppc_altivec_vmulhuw,                       // llvm.ppc.altivec.vmulhuw
    ppc_altivec_vmulosb,                       // llvm.ppc.altivec.vmulosb
    ppc_altivec_vmulosd,                       // llvm.ppc.altivec.vmulosd
    ppc_altivec_vmulosh,                       // llvm.ppc.altivec.vmulosh
    ppc_altivec_vmulosw,                       // llvm.ppc.altivec.vmulosw
    ppc_altivec_vmuloub,                       // llvm.ppc.altivec.vmuloub
    ppc_altivec_vmuloud,                       // llvm.ppc.altivec.vmuloud
    ppc_altivec_vmulouh,                       // llvm.ppc.altivec.vmulouh
    ppc_altivec_vmulouw,                       // llvm.ppc.altivec.vmulouw
    ppc_altivec_vnmsubfp,                      // llvm.ppc.altivec.vnmsubfp
    ppc_altivec_vpdepd,                        // llvm.ppc.altivec.vpdepd
    ppc_altivec_vperm,                         // llvm.ppc.altivec.vperm
    ppc_altivec_vpextd,                        // llvm.ppc.altivec.vpextd
    ppc_altivec_vpkpx,                         // llvm.ppc.altivec.vpkpx
    ppc_altivec_vpksdss,                       // llvm.ppc.altivec.vpksdss
    ppc_altivec_vpksdus,                       // llvm.ppc.altivec.vpksdus
    ppc_altivec_vpkshss,                       // llvm.ppc.altivec.vpkshss
    ppc_altivec_vpkshus,                       // llvm.ppc.altivec.vpkshus
    ppc_altivec_vpkswss,                       // llvm.ppc.altivec.vpkswss
    ppc_altivec_vpkswus,                       // llvm.ppc.altivec.vpkswus
    ppc_altivec_vpkudus,                       // llvm.ppc.altivec.vpkudus
    ppc_altivec_vpkuhus,                       // llvm.ppc.altivec.vpkuhus
    ppc_altivec_vpkuwus,                       // llvm.ppc.altivec.vpkuwus
    ppc_altivec_vprtybd,                       // llvm.ppc.altivec.vprtybd
    ppc_altivec_vprtybq,                       // llvm.ppc.altivec.vprtybq
    ppc_altivec_vprtybw,                       // llvm.ppc.altivec.vprtybw
    ppc_altivec_vrefp,                         // llvm.ppc.altivec.vrefp
    ppc_altivec_vrfim,                         // llvm.ppc.altivec.vrfim
    ppc_altivec_vrfin,                         // llvm.ppc.altivec.vrfin
    ppc_altivec_vrfip,                         // llvm.ppc.altivec.vrfip
    ppc_altivec_vrfiz,                         // llvm.ppc.altivec.vrfiz
    ppc_altivec_vrlb,                          // llvm.ppc.altivec.vrlb
    ppc_altivec_vrld,                          // llvm.ppc.altivec.vrld
    ppc_altivec_vrldmi,                        // llvm.ppc.altivec.vrldmi
    ppc_altivec_vrldnm,                        // llvm.ppc.altivec.vrldnm
    ppc_altivec_vrlh,                          // llvm.ppc.altivec.vrlh
    ppc_altivec_vrlqmi,                        // llvm.ppc.altivec.vrlqmi
    ppc_altivec_vrlqnm,                        // llvm.ppc.altivec.vrlqnm
    ppc_altivec_vrlw,                          // llvm.ppc.altivec.vrlw
    ppc_altivec_vrlwmi,                        // llvm.ppc.altivec.vrlwmi
    ppc_altivec_vrlwnm,                        // llvm.ppc.altivec.vrlwnm
    ppc_altivec_vrsqrtefp,                     // llvm.ppc.altivec.vrsqrtefp
    ppc_altivec_vsel,                          // llvm.ppc.altivec.vsel
    ppc_altivec_vsl,                           // llvm.ppc.altivec.vsl
    ppc_altivec_vslb,                          // llvm.ppc.altivec.vslb
    ppc_altivec_vsldbi,                        // llvm.ppc.altivec.vsldbi
    ppc_altivec_vslh,                          // llvm.ppc.altivec.vslh
    ppc_altivec_vslo,                          // llvm.ppc.altivec.vslo
    ppc_altivec_vslv,                          // llvm.ppc.altivec.vslv
    ppc_altivec_vslw,                          // llvm.ppc.altivec.vslw
    ppc_altivec_vsr,                           // llvm.ppc.altivec.vsr
    ppc_altivec_vsrab,                         // llvm.ppc.altivec.vsrab
    ppc_altivec_vsrah,                         // llvm.ppc.altivec.vsrah
    ppc_altivec_vsraw,                         // llvm.ppc.altivec.vsraw
    ppc_altivec_vsrb,                          // llvm.ppc.altivec.vsrb
    ppc_altivec_vsrdbi,                        // llvm.ppc.altivec.vsrdbi
    ppc_altivec_vsrh,                          // llvm.ppc.altivec.vsrh
    ppc_altivec_vsro,                          // llvm.ppc.altivec.vsro
    ppc_altivec_vsrv,                          // llvm.ppc.altivec.vsrv
    ppc_altivec_vsrw,                          // llvm.ppc.altivec.vsrw
    ppc_altivec_vstribl,                       // llvm.ppc.altivec.vstribl
    ppc_altivec_vstribl_p,                     // llvm.ppc.altivec.vstribl.p
    ppc_altivec_vstribr,                       // llvm.ppc.altivec.vstribr
    ppc_altivec_vstribr_p,                     // llvm.ppc.altivec.vstribr.p
    ppc_altivec_vstrihl,                       // llvm.ppc.altivec.vstrihl
    ppc_altivec_vstrihl_p,                     // llvm.ppc.altivec.vstrihl.p
    ppc_altivec_vstrihr,                       // llvm.ppc.altivec.vstrihr
    ppc_altivec_vstrihr_p,                     // llvm.ppc.altivec.vstrihr.p
    ppc_altivec_vsubcuq,                       // llvm.ppc.altivec.vsubcuq
    ppc_altivec_vsubcuw,                       // llvm.ppc.altivec.vsubcuw
    ppc_altivec_vsubecuq,                      // llvm.ppc.altivec.vsubecuq
    ppc_altivec_vsubeuqm,                      // llvm.ppc.altivec.vsubeuqm
    ppc_altivec_vsubsbs,                       // llvm.ppc.altivec.vsubsbs
    ppc_altivec_vsubshs,                       // llvm.ppc.altivec.vsubshs
    ppc_altivec_vsubsws,                       // llvm.ppc.altivec.vsubsws
    ppc_altivec_vsububs,                       // llvm.ppc.altivec.vsububs
    ppc_altivec_vsubuhs,                       // llvm.ppc.altivec.vsubuhs
    ppc_altivec_vsubuws,                       // llvm.ppc.altivec.vsubuws
    ppc_altivec_vsum2sws,                      // llvm.ppc.altivec.vsum2sws
    ppc_altivec_vsum4sbs,                      // llvm.ppc.altivec.vsum4sbs
    ppc_altivec_vsum4shs,                      // llvm.ppc.altivec.vsum4shs
    ppc_altivec_vsum4ubs,                      // llvm.ppc.altivec.vsum4ubs
    ppc_altivec_vsumsws,                       // llvm.ppc.altivec.vsumsws
    ppc_altivec_vupkhpx,                       // llvm.ppc.altivec.vupkhpx
    ppc_altivec_vupkhsb,                       // llvm.ppc.altivec.vupkhsb
    ppc_altivec_vupkhsh,                       // llvm.ppc.altivec.vupkhsh
    ppc_altivec_vupkhsw,                       // llvm.ppc.altivec.vupkhsw
    ppc_altivec_vupklpx,                       // llvm.ppc.altivec.vupklpx
    ppc_altivec_vupklsb,                       // llvm.ppc.altivec.vupklsb
    ppc_altivec_vupklsh,                       // llvm.ppc.altivec.vupklsh
    ppc_altivec_vupklsw,                       // llvm.ppc.altivec.vupklsw
    ppc_atomic_load_i128,                      // llvm.ppc.atomic.load.i128
    ppc_atomic_store_i128,                     // llvm.ppc.atomic.store.i128
    ppc_atomicrmw_add_i128,                    // llvm.ppc.atomicrmw.add.i128
    ppc_atomicrmw_and_i128,                    // llvm.ppc.atomicrmw.and.i128
    ppc_atomicrmw_nand_i128,                   // llvm.ppc.atomicrmw.nand.i128
    ppc_atomicrmw_or_i128,                     // llvm.ppc.atomicrmw.or.i128
    ppc_atomicrmw_sub_i128,                    // llvm.ppc.atomicrmw.sub.i128
    ppc_atomicrmw_xchg_i128,                   // llvm.ppc.atomicrmw.xchg.i128
    ppc_atomicrmw_xor_i128,                    // llvm.ppc.atomicrmw.xor.i128
    ppc_bcdadd,                                // llvm.ppc.bcdadd
    ppc_bcdadd_p,                              // llvm.ppc.bcdadd.p
    ppc_bcdsub,                                // llvm.ppc.bcdsub
    ppc_bcdsub_p,                              // llvm.ppc.bcdsub.p
    ppc_bpermd,                                // llvm.ppc.bpermd
    ppc_cfence,                                // llvm.ppc.cfence
    ppc_cfuged,                                // llvm.ppc.cfuged
    ppc_cmpb,                                  // llvm.ppc.cmpb
    ppc_cmpeqb,                                // llvm.ppc.cmpeqb
    ppc_cmprb,                                 // llvm.ppc.cmprb
    ppc_cmpxchg_i128,                          // llvm.ppc.cmpxchg.i128
    ppc_cntlzdm,                               // llvm.ppc.cntlzdm
    ppc_cnttzdm,                               // llvm.ppc.cnttzdm
    ppc_compare_exp_eq,                        // llvm.ppc.compare.exp.eq
    ppc_compare_exp_gt,                        // llvm.ppc.compare.exp.gt
    ppc_compare_exp_lt,                        // llvm.ppc.compare.exp.lt
    ppc_compare_exp_uo,                        // llvm.ppc.compare.exp.uo
    ppc_convert_f128_to_ppcf128,               // llvm.ppc.convert.f128.to.ppcf128
    ppc_convert_ppcf128_to_f128,               // llvm.ppc.convert.ppcf128.to.f128
    ppc_darn,                                  // llvm.ppc.darn
    ppc_darn32,                                // llvm.ppc.darn32
    ppc_darnraw,                               // llvm.ppc.darnraw
    ppc_dcba,                                  // llvm.ppc.dcba
    ppc_dcbf,                                  // llvm.ppc.dcbf
    ppc_dcbfl,                                 // llvm.ppc.dcbfl
    ppc_dcbflp,                                // llvm.ppc.dcbflp
    ppc_dcbfps,                                // llvm.ppc.dcbfps
    ppc_dcbi,                                  // llvm.ppc.dcbi
    ppc_dcbst,                                 // llvm.ppc.dcbst
    ppc_dcbstps,                               // llvm.ppc.dcbstps
    ppc_dcbt,                                  // llvm.ppc.dcbt
    ppc_dcbt_with_hint,                        // llvm.ppc.dcbt.with.hint
    ppc_dcbtst,                                // llvm.ppc.dcbtst
    ppc_dcbtst_with_hint,                      // llvm.ppc.dcbtst.with.hint
    ppc_dcbtstt,                               // llvm.ppc.dcbtstt
    ppc_dcbtt,                                 // llvm.ppc.dcbtt
    ppc_dcbz,                                  // llvm.ppc.dcbz
    ppc_dcbzl,                                 // llvm.ppc.dcbzl
    ppc_divde,                                 // llvm.ppc.divde
    ppc_divdeu,                                // llvm.ppc.divdeu
    ppc_divf128_round_to_odd,                  // llvm.ppc.divf128.round.to.odd
    ppc_divwe,                                 // llvm.ppc.divwe
    ppc_divweu,                                // llvm.ppc.divweu
    ppc_eieio,                                 // llvm.ppc.eieio
    ppc_extract_exp,                           // llvm.ppc.extract.exp
    ppc_extract_sig,                           // llvm.ppc.extract.sig
    ppc_fcfid,                                 // llvm.ppc.fcfid
    ppc_fcfud,                                 // llvm.ppc.fcfud
    ppc_fctid,                                 // llvm.ppc.fctid
    ppc_fctidz,                                // llvm.ppc.fctidz
    ppc_fctiw,                                 // llvm.ppc.fctiw
    ppc_fctiwz,                                // llvm.ppc.fctiwz
    ppc_fctudz,                                // llvm.ppc.fctudz
    ppc_fctuwz,                                // llvm.ppc.fctuwz
    ppc_fmaf128_round_to_odd,                  // llvm.ppc.fmaf128.round.to.odd
    ppc_fmsub,                                 // llvm.ppc.fmsub
    ppc_fmsubs,                                // llvm.ppc.fmsubs
    ppc_fnabs,                                 // llvm.ppc.fnabs
    ppc_fnabss,                                // llvm.ppc.fnabss
    ppc_fnmadd,                                // llvm.ppc.fnmadd
    ppc_fnmadds,                               // llvm.ppc.fnmadds
    ppc_fnmsub,                                // llvm.ppc.fnmsub
    ppc_fre,                                   // llvm.ppc.fre
    ppc_fres,                                  // llvm.ppc.fres
    ppc_frsqrte,                               // llvm.ppc.frsqrte
    ppc_frsqrtes,                              // llvm.ppc.frsqrtes
    ppc_fsel,                                  // llvm.ppc.fsel
    ppc_fsels,                                 // llvm.ppc.fsels
    ppc_get_texasr,                            // llvm.ppc.get.texasr
    ppc_get_texasru,                           // llvm.ppc.get.texasru
    ppc_get_tfhar,                             // llvm.ppc.get.tfhar
    ppc_get_tfiar,                             // llvm.ppc.get.tfiar
    ppc_icbt,                                  // llvm.ppc.icbt
    ppc_insert_exp,                            // llvm.ppc.insert.exp
    ppc_iospace_eieio,                         // llvm.ppc.iospace.eieio
    ppc_iospace_lwsync,                        // llvm.ppc.iospace.lwsync
    ppc_iospace_sync,                          // llvm.ppc.iospace.sync
    ppc_isync,                                 // llvm.ppc.isync
    ppc_load2r,                                // llvm.ppc.load2r
    ppc_load4r,                                // llvm.ppc.load4r
    ppc_load8r,                                // llvm.ppc.load8r
    ppc_lwsync,                                // llvm.ppc.lwsync
    ppc_maddhd,                                // llvm.ppc.maddhd
    ppc_maddhdu,                               // llvm.ppc.maddhdu
    ppc_maddld,                                // llvm.ppc.maddld
    ppc_maxfe,                                 // llvm.ppc.maxfe
    ppc_maxfl,                                 // llvm.ppc.maxfl
    ppc_maxfs,                                 // llvm.ppc.maxfs
    ppc_mfmsr,                                 // llvm.ppc.mfmsr
    ppc_mfspr,                                 // llvm.ppc.mfspr
    ppc_mftbu,                                 // llvm.ppc.mftbu
    ppc_minfe,                                 // llvm.ppc.minfe
    ppc_minfl,                                 // llvm.ppc.minfl
    ppc_minfs,                                 // llvm.ppc.minfs
    ppc_mma_assemble_acc,                      // llvm.ppc.mma.assemble.acc
    ppc_mma_disassemble_acc,                   // llvm.ppc.mma.disassemble.acc
    ppc_mma_pmxvbf16ger2,                      // llvm.ppc.mma.pmxvbf16ger2
    ppc_mma_pmxvbf16ger2nn,                    // llvm.ppc.mma.pmxvbf16ger2nn
    ppc_mma_pmxvbf16ger2np,                    // llvm.ppc.mma.pmxvbf16ger2np
    ppc_mma_pmxvbf16ger2pn,                    // llvm.ppc.mma.pmxvbf16ger2pn
    ppc_mma_pmxvbf16ger2pp,                    // llvm.ppc.mma.pmxvbf16ger2pp
    ppc_mma_pmxvf16ger2,                       // llvm.ppc.mma.pmxvf16ger2
    ppc_mma_pmxvf16ger2nn,                     // llvm.ppc.mma.pmxvf16ger2nn
    ppc_mma_pmxvf16ger2np,                     // llvm.ppc.mma.pmxvf16ger2np
    ppc_mma_pmxvf16ger2pn,                     // llvm.ppc.mma.pmxvf16ger2pn
    ppc_mma_pmxvf16ger2pp,                     // llvm.ppc.mma.pmxvf16ger2pp
    ppc_mma_pmxvf32ger,                        // llvm.ppc.mma.pmxvf32ger
    ppc_mma_pmxvf32gernn,                      // llvm.ppc.mma.pmxvf32gernn
    ppc_mma_pmxvf32gernp,                      // llvm.ppc.mma.pmxvf32gernp
    ppc_mma_pmxvf32gerpn,                      // llvm.ppc.mma.pmxvf32gerpn
    ppc_mma_pmxvf32gerpp,                      // llvm.ppc.mma.pmxvf32gerpp
    ppc_mma_pmxvf64ger,                        // llvm.ppc.mma.pmxvf64ger
    ppc_mma_pmxvf64gernn,                      // llvm.ppc.mma.pmxvf64gernn
    ppc_mma_pmxvf64gernp,                      // llvm.ppc.mma.pmxvf64gernp
    ppc_mma_pmxvf64gerpn,                      // llvm.ppc.mma.pmxvf64gerpn
    ppc_mma_pmxvf64gerpp,                      // llvm.ppc.mma.pmxvf64gerpp
    ppc_mma_pmxvi16ger2,                       // llvm.ppc.mma.pmxvi16ger2
    ppc_mma_pmxvi16ger2pp,                     // llvm.ppc.mma.pmxvi16ger2pp
    ppc_mma_pmxvi16ger2s,                      // llvm.ppc.mma.pmxvi16ger2s
    ppc_mma_pmxvi16ger2spp,                    // llvm.ppc.mma.pmxvi16ger2spp
    ppc_mma_pmxvi4ger8,                        // llvm.ppc.mma.pmxvi4ger8
    ppc_mma_pmxvi4ger8pp,                      // llvm.ppc.mma.pmxvi4ger8pp
    ppc_mma_pmxvi8ger4,                        // llvm.ppc.mma.pmxvi8ger4
    ppc_mma_pmxvi8ger4pp,                      // llvm.ppc.mma.pmxvi8ger4pp
    ppc_mma_pmxvi8ger4spp,                     // llvm.ppc.mma.pmxvi8ger4spp
    ppc_mma_xvbf16ger2,                        // llvm.ppc.mma.xvbf16ger2
    ppc_mma_xvbf16ger2nn,                      // llvm.ppc.mma.xvbf16ger2nn
    ppc_mma_xvbf16ger2np,                      // llvm.ppc.mma.xvbf16ger2np
    ppc_mma_xvbf16ger2pn,                      // llvm.ppc.mma.xvbf16ger2pn
    ppc_mma_xvbf16ger2pp,                      // llvm.ppc.mma.xvbf16ger2pp
    ppc_mma_xvf16ger2,                         // llvm.ppc.mma.xvf16ger2
    ppc_mma_xvf16ger2nn,                       // llvm.ppc.mma.xvf16ger2nn
    ppc_mma_xvf16ger2np,                       // llvm.ppc.mma.xvf16ger2np
    ppc_mma_xvf16ger2pn,                       // llvm.ppc.mma.xvf16ger2pn
    ppc_mma_xvf16ger2pp,                       // llvm.ppc.mma.xvf16ger2pp
    ppc_mma_xvf32ger,                          // llvm.ppc.mma.xvf32ger
    ppc_mma_xvf32gernn,                        // llvm.ppc.mma.xvf32gernn
    ppc_mma_xvf32gernp,                        // llvm.ppc.mma.xvf32gernp
    ppc_mma_xvf32gerpn,                        // llvm.ppc.mma.xvf32gerpn
    ppc_mma_xvf32gerpp,                        // llvm.ppc.mma.xvf32gerpp
    ppc_mma_xvf64ger,                          // llvm.ppc.mma.xvf64ger
    ppc_mma_xvf64gernn,                        // llvm.ppc.mma.xvf64gernn
    ppc_mma_xvf64gernp,                        // llvm.ppc.mma.xvf64gernp
    ppc_mma_xvf64gerpn,                        // llvm.ppc.mma.xvf64gerpn
    ppc_mma_xvf64gerpp,                        // llvm.ppc.mma.xvf64gerpp
    ppc_mma_xvi16ger2,                         // llvm.ppc.mma.xvi16ger2
    ppc_mma_xvi16ger2pp,                       // llvm.ppc.mma.xvi16ger2pp
    ppc_mma_xvi16ger2s,                        // llvm.ppc.mma.xvi16ger2s
    ppc_mma_xvi16ger2spp,                      // llvm.ppc.mma.xvi16ger2spp
    ppc_mma_xvi4ger8,                          // llvm.ppc.mma.xvi4ger8
    ppc_mma_xvi4ger8pp,                        // llvm.ppc.mma.xvi4ger8pp
    ppc_mma_xvi8ger4,                          // llvm.ppc.mma.xvi8ger4
    ppc_mma_xvi8ger4pp,                        // llvm.ppc.mma.xvi8ger4pp
    ppc_mma_xvi8ger4spp,                       // llvm.ppc.mma.xvi8ger4spp
    ppc_mma_xxmfacc,                           // llvm.ppc.mma.xxmfacc
    ppc_mma_xxmtacc,                           // llvm.ppc.mma.xxmtacc
    ppc_mma_xxsetaccz,                         // llvm.ppc.mma.xxsetaccz
    ppc_mtfsb0,                                // llvm.ppc.mtfsb0
    ppc_mtfsb1,                                // llvm.ppc.mtfsb1
    ppc_mtfsf,                                 // llvm.ppc.mtfsf
    ppc_mtfsfi,                                // llvm.ppc.mtfsfi
    ppc_mtmsr,                                 // llvm.ppc.mtmsr
    ppc_mtspr,                                 // llvm.ppc.mtspr
    ppc_mulf128_round_to_odd,                  // llvm.ppc.mulf128.round.to.odd
    ppc_mulhd,                                 // llvm.ppc.mulhd
    ppc_mulhdu,                                // llvm.ppc.mulhdu
    ppc_mulhw,                                 // llvm.ppc.mulhw
    ppc_mulhwu,                                // llvm.ppc.mulhwu
    ppc_pack_longdouble,                       // llvm.ppc.pack.longdouble
    ppc_pdepd,                                 // llvm.ppc.pdepd
    ppc_pextd,                                 // llvm.ppc.pextd
    ppc_popcntb,                               // llvm.ppc.popcntb
    ppc_readflm,                               // llvm.ppc.readflm
    ppc_scalar_extract_expq,                   // llvm.ppc.scalar.extract.expq
    ppc_scalar_insert_exp_qp,                  // llvm.ppc.scalar.insert.exp.qp
    ppc_set_texasr,                            // llvm.ppc.set.texasr
    ppc_set_texasru,                           // llvm.ppc.set.texasru
    ppc_set_tfhar,                             // llvm.ppc.set.tfhar
    ppc_set_tfiar,                             // llvm.ppc.set.tfiar
    ppc_setb,                                  // llvm.ppc.setb
    ppc_setflm,                                // llvm.ppc.setflm
    ppc_setrnd,                                // llvm.ppc.setrnd
    ppc_sqrtf128_round_to_odd,                 // llvm.ppc.sqrtf128.round.to.odd
    ppc_stbcx,                                 // llvm.ppc.stbcx
    ppc_stdcx,                                 // llvm.ppc.stdcx
    ppc_stfiw,                                 // llvm.ppc.stfiw
    ppc_sthcx,                                 // llvm.ppc.sthcx
    ppc_store2r,                               // llvm.ppc.store2r
    ppc_store4r,                               // llvm.ppc.store4r
    ppc_store8r,                               // llvm.ppc.store8r
    ppc_stwcx,                                 // llvm.ppc.stwcx
    ppc_subf128_round_to_odd,                  // llvm.ppc.subf128.round.to.odd
    ppc_sync,                                  // llvm.ppc.sync
    ppc_tabort,                                // llvm.ppc.tabort
    ppc_tabortdc,                              // llvm.ppc.tabortdc
    ppc_tabortdci,                             // llvm.ppc.tabortdci
    ppc_tabortwc,                              // llvm.ppc.tabortwc
    ppc_tabortwci,                             // llvm.ppc.tabortwci
    ppc_tbegin,                                // llvm.ppc.tbegin
    ppc_tcheck,                                // llvm.ppc.tcheck
    ppc_tdw,                                   // llvm.ppc.tdw
    ppc_tend,                                  // llvm.ppc.tend
    ppc_tendall,                               // llvm.ppc.tendall
    ppc_test_data_class,                       // llvm.ppc.test.data.class
    ppc_trap,                                  // llvm.ppc.trap
    ppc_trapd,                                 // llvm.ppc.trapd
    ppc_trechkpt,                              // llvm.ppc.trechkpt
    ppc_treclaim,                              // llvm.ppc.treclaim
    ppc_tresume,                               // llvm.ppc.tresume
    ppc_truncf128_round_to_odd,                // llvm.ppc.truncf128.round.to.odd
    ppc_tsr,                                   // llvm.ppc.tsr
    ppc_tsuspend,                              // llvm.ppc.tsuspend
    ppc_ttest,                                 // llvm.ppc.ttest
    ppc_tw,                                    // llvm.ppc.tw
    ppc_unpack_longdouble,                     // llvm.ppc.unpack.longdouble
    ppc_vsx_assemble_pair,                     // llvm.ppc.vsx.assemble.pair
    ppc_vsx_disassemble_pair,                  // llvm.ppc.vsx.disassemble.pair
    ppc_vsx_lxvd2x,                            // llvm.ppc.vsx.lxvd2x
    ppc_vsx_lxvd2x_be,                         // llvm.ppc.vsx.lxvd2x.be
    ppc_vsx_lxvl,                              // llvm.ppc.vsx.lxvl
    ppc_vsx_lxvll,                             // llvm.ppc.vsx.lxvll
    ppc_vsx_lxvp,                              // llvm.ppc.vsx.lxvp
    ppc_vsx_lxvw4x,                            // llvm.ppc.vsx.lxvw4x
    ppc_vsx_lxvw4x_be,                         // llvm.ppc.vsx.lxvw4x.be
    ppc_vsx_stxvd2x,                           // llvm.ppc.vsx.stxvd2x
    ppc_vsx_stxvd2x_be,                        // llvm.ppc.vsx.stxvd2x.be
    ppc_vsx_stxvl,                             // llvm.ppc.vsx.stxvl
    ppc_vsx_stxvll,                            // llvm.ppc.vsx.stxvll
    ppc_vsx_stxvp,                             // llvm.ppc.vsx.stxvp
    ppc_vsx_stxvw4x,                           // llvm.ppc.vsx.stxvw4x
    ppc_vsx_stxvw4x_be,                        // llvm.ppc.vsx.stxvw4x.be
    ppc_vsx_xsmaxdp,                           // llvm.ppc.vsx.xsmaxdp
    ppc_vsx_xsmindp,                           // llvm.ppc.vsx.xsmindp
    ppc_vsx_xvcmpeqdp,                         // llvm.ppc.vsx.xvcmpeqdp
    ppc_vsx_xvcmpeqdp_p,                       // llvm.ppc.vsx.xvcmpeqdp.p
    ppc_vsx_xvcmpeqsp,                         // llvm.ppc.vsx.xvcmpeqsp
    ppc_vsx_xvcmpeqsp_p,                       // llvm.ppc.vsx.xvcmpeqsp.p
    ppc_vsx_xvcmpgedp,                         // llvm.ppc.vsx.xvcmpgedp
    ppc_vsx_xvcmpgedp_p,                       // llvm.ppc.vsx.xvcmpgedp.p
    ppc_vsx_xvcmpgesp,                         // llvm.ppc.vsx.xvcmpgesp
    ppc_vsx_xvcmpgesp_p,                       // llvm.ppc.vsx.xvcmpgesp.p
    ppc_vsx_xvcmpgtdp,                         // llvm.ppc.vsx.xvcmpgtdp
    ppc_vsx_xvcmpgtdp_p,                       // llvm.ppc.vsx.xvcmpgtdp.p
    ppc_vsx_xvcmpgtsp,                         // llvm.ppc.vsx.xvcmpgtsp
    ppc_vsx_xvcmpgtsp_p,                       // llvm.ppc.vsx.xvcmpgtsp.p
    ppc_vsx_xvcvbf16spn,                       // llvm.ppc.vsx.xvcvbf16spn
    ppc_vsx_xvcvdpsp,                          // llvm.ppc.vsx.xvcvdpsp
    ppc_vsx_xvcvdpsxws,                        // llvm.ppc.vsx.xvcvdpsxws
    ppc_vsx_xvcvdpuxws,                        // llvm.ppc.vsx.xvcvdpuxws
    ppc_vsx_xvcvhpsp,                          // llvm.ppc.vsx.xvcvhpsp
    ppc_vsx_xvcvspbf16,                        // llvm.ppc.vsx.xvcvspbf16
    ppc_vsx_xvcvspdp,                          // llvm.ppc.vsx.xvcvspdp
    ppc_vsx_xvcvsphp,                          // llvm.ppc.vsx.xvcvsphp
    ppc_vsx_xvcvspsxds,                        // llvm.ppc.vsx.xvcvspsxds
    ppc_vsx_xvcvspuxds,                        // llvm.ppc.vsx.xvcvspuxds
    ppc_vsx_xvcvsxdsp,                         // llvm.ppc.vsx.xvcvsxdsp
    ppc_vsx_xvcvsxwdp,                         // llvm.ppc.vsx.xvcvsxwdp
    ppc_vsx_xvcvuxdsp,                         // llvm.ppc.vsx.xvcvuxdsp
    ppc_vsx_xvcvuxwdp,                         // llvm.ppc.vsx.xvcvuxwdp
    ppc_vsx_xvdivdp,                           // llvm.ppc.vsx.xvdivdp
    ppc_vsx_xvdivsp,                           // llvm.ppc.vsx.xvdivsp
    ppc_vsx_xviexpdp,                          // llvm.ppc.vsx.xviexpdp
    ppc_vsx_xviexpsp,                          // llvm.ppc.vsx.xviexpsp
    ppc_vsx_xvmaxdp,                           // llvm.ppc.vsx.xvmaxdp
    ppc_vsx_xvmaxsp,                           // llvm.ppc.vsx.xvmaxsp
    ppc_vsx_xvmindp,                           // llvm.ppc.vsx.xvmindp
    ppc_vsx_xvminsp,                           // llvm.ppc.vsx.xvminsp
    ppc_vsx_xvrdpip,                           // llvm.ppc.vsx.xvrdpip
    ppc_vsx_xvredp,                            // llvm.ppc.vsx.xvredp
    ppc_vsx_xvresp,                            // llvm.ppc.vsx.xvresp
    ppc_vsx_xvrspip,                           // llvm.ppc.vsx.xvrspip
    ppc_vsx_xvrsqrtedp,                        // llvm.ppc.vsx.xvrsqrtedp
    ppc_vsx_xvrsqrtesp,                        // llvm.ppc.vsx.xvrsqrtesp
    ppc_vsx_xvtdivdp,                          // llvm.ppc.vsx.xvtdivdp
    ppc_vsx_xvtdivsp,                          // llvm.ppc.vsx.xvtdivsp
    ppc_vsx_xvtlsbb,                           // llvm.ppc.vsx.xvtlsbb
    ppc_vsx_xvtsqrtdp,                         // llvm.ppc.vsx.xvtsqrtdp
    ppc_vsx_xvtsqrtsp,                         // llvm.ppc.vsx.xvtsqrtsp
    ppc_vsx_xvtstdcdp,                         // llvm.ppc.vsx.xvtstdcdp
    ppc_vsx_xvtstdcsp,                         // llvm.ppc.vsx.xvtstdcsp
    ppc_vsx_xvxexpdp,                          // llvm.ppc.vsx.xvxexpdp
    ppc_vsx_xvxexpsp,                          // llvm.ppc.vsx.xvxexpsp
    ppc_vsx_xvxsigdp,                          // llvm.ppc.vsx.xvxsigdp
    ppc_vsx_xvxsigsp,                          // llvm.ppc.vsx.xvxsigsp
    ppc_vsx_xxblendvb,                         // llvm.ppc.vsx.xxblendvb
    ppc_vsx_xxblendvd,                         // llvm.ppc.vsx.xxblendvd
    ppc_vsx_xxblendvh,                         // llvm.ppc.vsx.xxblendvh
    ppc_vsx_xxblendvw,                         // llvm.ppc.vsx.xxblendvw
    ppc_vsx_xxeval,                            // llvm.ppc.vsx.xxeval
    ppc_vsx_xxextractuw,                       // llvm.ppc.vsx.xxextractuw
    ppc_vsx_xxgenpcvbm,                        // llvm.ppc.vsx.xxgenpcvbm
    ppc_vsx_xxgenpcvdm,                        // llvm.ppc.vsx.xxgenpcvdm
    ppc_vsx_xxgenpcvhm,                        // llvm.ppc.vsx.xxgenpcvhm
    ppc_vsx_xxgenpcvwm,                        // llvm.ppc.vsx.xxgenpcvwm
    ppc_vsx_xxinsertw,                         // llvm.ppc.vsx.xxinsertw
    ppc_vsx_xxleqv,                            // llvm.ppc.vsx.xxleqv
    ppc_vsx_xxpermx,                           // llvm.ppc.vsx.xxpermx
}; // enum
} // namespace Intrinsic
} // namespace llvm

#endif
PKjwFZ$�vc��IR/PseudoProbe.hnu�[���//===- PseudoProbe.h - Pseudo Probe IR Helpers ------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Pseudo probe IR intrinsic and dwarf discriminator manipulation routines.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_PSEUDOPROBE_H
#define LLVM_IR_PSEUDOPROBE_H

#include <cassert>
#include <cstdint>
#include <limits>
#include <optional>

namespace llvm {

class Instruction;

constexpr const char *PseudoProbeDescMetadataName = "llvm.pseudo_probe_desc";

enum class PseudoProbeReservedId { Invalid = 0, Last = Invalid };

enum class PseudoProbeType { Block = 0, IndirectCall, DirectCall };

enum class PseudoProbeAttributes {
  Reserved = 0x1,
  Sentinel = 0x2,         // A place holder for split function entry address.
  HasDiscriminator = 0x4, // for probes with a discriminator
};

// The saturated distrution factor representing 100% for block probes.
constexpr static uint64_t PseudoProbeFullDistributionFactor =
    std::numeric_limits<uint64_t>::max();

struct PseudoProbeDwarfDiscriminator {
public:
  // The following APIs encodes/decodes per-probe information to/from a
  // 32-bit integer which is organized as:
  //  [2:0] - 0x7, this is reserved for regular discriminator,
  //          see DWARF discriminator encoding rule
  //  [18:3] - probe id
  //  [25:19] - probe distribution factor
  //  [28:26] - probe type, see PseudoProbeType
  //  [31:29] - reserved for probe attributes
  static uint32_t packProbeData(uint32_t Index, uint32_t Type, uint32_t Flags,
                                uint32_t Factor) {
    assert(Index <= 0xFFFF && "Probe index too big to encode, exceeding 2^16");
    assert(Type <= 0x7 && "Probe type too big to encode, exceeding 7");
    assert(Flags <= 0x7);
    assert(Factor <= 100 &&
           "Probe distribution factor too big to encode, exceeding 100");
    return (Index << 3) | (Factor << 19) | (Type << 26) | 0x7;
  }

  static uint32_t extractProbeIndex(uint32_t Value) {
    return (Value >> 3) & 0xFFFF;
  }

  static uint32_t extractProbeType(uint32_t Value) {
    return (Value >> 26) & 0x7;
  }

  static uint32_t extractProbeAttributes(uint32_t Value) {
    return (Value >> 29) & 0x7;
  }

  static uint32_t extractProbeFactor(uint32_t Value) {
    return (Value >> 19) & 0x7F;
  }

  // The saturated distrution factor representing 100% for callsites.
  constexpr static uint8_t FullDistributionFactor = 100;
};

class PseudoProbeDescriptor {
  uint64_t FunctionGUID;
  uint64_t FunctionHash;

public:
  PseudoProbeDescriptor(uint64_t GUID, uint64_t Hash)
      : FunctionGUID(GUID), FunctionHash(Hash) {}
  uint64_t getFunctionGUID() const { return FunctionGUID; }
  uint64_t getFunctionHash() const { return FunctionHash; }
};

struct PseudoProbe {
  uint32_t Id;
  uint32_t Type;
  uint32_t Attr;
  uint32_t Discriminator;
  // Distribution factor that estimates the portion of the real execution count.
  // A saturated distribution factor stands for 1.0 or 100%. A pesudo probe has
  // a factor with the value ranged from 0.0 to 1.0.
  float Factor;
};

static inline bool isSentinelProbe(uint32_t Flags) {
  return Flags & (uint32_t)PseudoProbeAttributes::Sentinel;
}

static inline bool hasDiscriminator(uint32_t Flags) {
  return Flags & (uint32_t)PseudoProbeAttributes::HasDiscriminator;
}

std::optional<PseudoProbe> extractProbe(const Instruction &Inst);

void setProbeDistributionFactor(Instruction &Inst, float Factor);
} // end namespace llvm

#endif // LLVM_IR_PSEUDOPROBE_H
PKjwFZ�T�[[IR/IntrinsicsAMDGPU.hnu�[���/*===- TableGen'erated file -------------------------------------*- C++ -*-===*\
|*                                                                            *|
|* Intrinsic Function Source Fragment                                         *|
|*                                                                            *|
|* Automatically generated file, do not edit!                                 *|
|*                                                                            *|
\*===----------------------------------------------------------------------===*/

#ifndef LLVM_IR_INTRINSIC_AMDGCN_ENUMS_H
#define LLVM_IR_INTRINSIC_AMDGCN_ENUMS_H

namespace llvm {
namespace Intrinsic {
enum AMDGCNIntrinsics : unsigned {
// Enum values for intrinsics
    amdgcn_alignbyte = 1812,                          // llvm.amdgcn.alignbyte
    amdgcn_ballot,                             // llvm.amdgcn.ballot
    amdgcn_buffer_atomic_add,                  // llvm.amdgcn.buffer.atomic.add
    amdgcn_buffer_atomic_and,                  // llvm.amdgcn.buffer.atomic.and
    amdgcn_buffer_atomic_cmpswap,              // llvm.amdgcn.buffer.atomic.cmpswap
    amdgcn_buffer_atomic_csub,                 // llvm.amdgcn.buffer.atomic.csub
    amdgcn_buffer_atomic_fadd,                 // llvm.amdgcn.buffer.atomic.fadd
    amdgcn_buffer_atomic_or,                   // llvm.amdgcn.buffer.atomic.or
    amdgcn_buffer_atomic_smax,                 // llvm.amdgcn.buffer.atomic.smax
    amdgcn_buffer_atomic_smin,                 // llvm.amdgcn.buffer.atomic.smin
    amdgcn_buffer_atomic_sub,                  // llvm.amdgcn.buffer.atomic.sub
    amdgcn_buffer_atomic_swap,                 // llvm.amdgcn.buffer.atomic.swap
    amdgcn_buffer_atomic_umax,                 // llvm.amdgcn.buffer.atomic.umax
    amdgcn_buffer_atomic_umin,                 // llvm.amdgcn.buffer.atomic.umin
    amdgcn_buffer_atomic_xor,                  // llvm.amdgcn.buffer.atomic.xor
    amdgcn_buffer_load,                        // llvm.amdgcn.buffer.load
    amdgcn_buffer_load_format,                 // llvm.amdgcn.buffer.load.format
    amdgcn_buffer_store,                       // llvm.amdgcn.buffer.store
    amdgcn_buffer_store_format,                // llvm.amdgcn.buffer.store.format
    amdgcn_buffer_wbinvl1,                     // llvm.amdgcn.buffer.wbinvl1
    amdgcn_buffer_wbinvl1_sc,                  // llvm.amdgcn.buffer.wbinvl1.sc
    amdgcn_buffer_wbinvl1_vol,                 // llvm.amdgcn.buffer.wbinvl1.vol
    amdgcn_class,                              // llvm.amdgcn.class
    amdgcn_cos,                                // llvm.amdgcn.cos
    amdgcn_cs_chain,                           // llvm.amdgcn.cs.chain
    amdgcn_cubeid,                             // llvm.amdgcn.cubeid
    amdgcn_cubema,                             // llvm.amdgcn.cubema
    amdgcn_cubesc,                             // llvm.amdgcn.cubesc
    amdgcn_cubetc,                             // llvm.amdgcn.cubetc
    amdgcn_cvt_f32_bf8,                        // llvm.amdgcn.cvt.f32.bf8
    amdgcn_cvt_f32_fp8,                        // llvm.amdgcn.cvt.f32.fp8
    amdgcn_cvt_pk_bf8_f32,                     // llvm.amdgcn.cvt.pk.bf8.f32
    amdgcn_cvt_pk_f32_bf8,                     // llvm.amdgcn.cvt.pk.f32.bf8
    amdgcn_cvt_pk_f32_fp8,                     // llvm.amdgcn.cvt.pk.f32.fp8
    amdgcn_cvt_pk_fp8_f32,                     // llvm.amdgcn.cvt.pk.fp8.f32
    amdgcn_cvt_pk_i16,                         // llvm.amdgcn.cvt.pk.i16
    amdgcn_cvt_pk_u16,                         // llvm.amdgcn.cvt.pk.u16
    amdgcn_cvt_pk_u8_f32,                      // llvm.amdgcn.cvt.pk.u8.f32
    amdgcn_cvt_pknorm_i16,                     // llvm.amdgcn.cvt.pknorm.i16
    amdgcn_cvt_pknorm_u16,                     // llvm.amdgcn.cvt.pknorm.u16
    amdgcn_cvt_pkrtz,                          // llvm.amdgcn.cvt.pkrtz
    amdgcn_cvt_sr_bf8_f32,                     // llvm.amdgcn.cvt.sr.bf8.f32
    amdgcn_cvt_sr_fp8_f32,                     // llvm.amdgcn.cvt.sr.fp8.f32
    amdgcn_dispatch_id,                        // llvm.amdgcn.dispatch.id
    amdgcn_dispatch_ptr,                       // llvm.amdgcn.dispatch.ptr
    amdgcn_div_fixup,                          // llvm.amdgcn.div.fixup
    amdgcn_div_fmas,                           // llvm.amdgcn.div.fmas
    amdgcn_div_scale,                          // llvm.amdgcn.div.scale
    amdgcn_ds_add_gs_reg_rtn,                  // llvm.amdgcn.ds.add.gs.reg.rtn
    amdgcn_ds_append,                          // llvm.amdgcn.ds.append
    amdgcn_ds_bpermute,                        // llvm.amdgcn.ds.bpermute
    amdgcn_ds_bvh_stack_rtn,                   // llvm.amdgcn.ds.bvh.stack.rtn
    amdgcn_ds_consume,                         // llvm.amdgcn.ds.consume
    amdgcn_ds_fadd,                            // llvm.amdgcn.ds.fadd
    amdgcn_ds_fadd_v2bf16,                     // llvm.amdgcn.ds.fadd.v2bf16
    amdgcn_ds_fmax,                            // llvm.amdgcn.ds.fmax
    amdgcn_ds_fmin,                            // llvm.amdgcn.ds.fmin
    amdgcn_ds_gws_barrier,                     // llvm.amdgcn.ds.gws.barrier
    amdgcn_ds_gws_init,                        // llvm.amdgcn.ds.gws.init
    amdgcn_ds_gws_sema_br,                     // llvm.amdgcn.ds.gws.sema.br
    amdgcn_ds_gws_sema_p,                      // llvm.amdgcn.ds.gws.sema.p
    amdgcn_ds_gws_sema_release_all,            // llvm.amdgcn.ds.gws.sema.release.all
    amdgcn_ds_gws_sema_v,                      // llvm.amdgcn.ds.gws.sema.v
    amdgcn_ds_ordered_add,                     // llvm.amdgcn.ds.ordered.add
    amdgcn_ds_ordered_swap,                    // llvm.amdgcn.ds.ordered.swap
    amdgcn_ds_permute,                         // llvm.amdgcn.ds.permute
    amdgcn_ds_sub_gs_reg_rtn,                  // llvm.amdgcn.ds.sub.gs.reg.rtn
    amdgcn_ds_swizzle,                         // llvm.amdgcn.ds.swizzle
    amdgcn_else,                               // llvm.amdgcn.else
    amdgcn_end_cf,                             // llvm.amdgcn.end.cf
    amdgcn_endpgm,                             // llvm.amdgcn.endpgm
    amdgcn_exp,                                // llvm.amdgcn.exp
    amdgcn_exp_compr,                          // llvm.amdgcn.exp.compr
    amdgcn_exp_row,                            // llvm.amdgcn.exp.row
    amdgcn_exp2,                               // llvm.amdgcn.exp2
    amdgcn_fcmp,                               // llvm.amdgcn.fcmp
    amdgcn_fdiv_fast,                          // llvm.amdgcn.fdiv.fast
    amdgcn_fdot2,                              // llvm.amdgcn.fdot2
    amdgcn_fdot2_bf16_bf16,                    // llvm.amdgcn.fdot2.bf16.bf16
    amdgcn_fdot2_f16_f16,                      // llvm.amdgcn.fdot2.f16.f16
    amdgcn_fdot2_f32_bf16,                     // llvm.amdgcn.fdot2.f32.bf16
    amdgcn_flat_atomic_fadd,                   // llvm.amdgcn.flat.atomic.fadd
    amdgcn_flat_atomic_fadd_v2bf16,            // llvm.amdgcn.flat.atomic.fadd.v2bf16
    amdgcn_flat_atomic_fmax,                   // llvm.amdgcn.flat.atomic.fmax
    amdgcn_flat_atomic_fmin,                   // llvm.amdgcn.flat.atomic.fmin
    amdgcn_fma_legacy,                         // llvm.amdgcn.fma.legacy
    amdgcn_fmad_ftz,                           // llvm.amdgcn.fmad.ftz
    amdgcn_fmed3,                              // llvm.amdgcn.fmed3
    amdgcn_fmul_legacy,                        // llvm.amdgcn.fmul.legacy
    amdgcn_fract,                              // llvm.amdgcn.fract
    amdgcn_frexp_exp,                          // llvm.amdgcn.frexp.exp
    amdgcn_frexp_mant,                         // llvm.amdgcn.frexp.mant
    amdgcn_global_atomic_csub,                 // llvm.amdgcn.global.atomic.csub
    amdgcn_global_atomic_fadd,                 // llvm.amdgcn.global.atomic.fadd
    amdgcn_global_atomic_fadd_v2bf16,          // llvm.amdgcn.global.atomic.fadd.v2bf16
    amdgcn_global_atomic_fmax,                 // llvm.amdgcn.global.atomic.fmax
    amdgcn_global_atomic_fmin,                 // llvm.amdgcn.global.atomic.fmin
    amdgcn_global_load_lds,                    // llvm.amdgcn.global.load.lds
    amdgcn_groupstaticsize,                    // llvm.amdgcn.groupstaticsize
    amdgcn_icmp,                               // llvm.amdgcn.icmp
    amdgcn_if,                                 // llvm.amdgcn.if
    amdgcn_if_break,                           // llvm.amdgcn.if.break
    amdgcn_iglp_opt,                           // llvm.amdgcn.iglp.opt
    amdgcn_image_atomic_add_1d,                // llvm.amdgcn.image.atomic.add.1d
    amdgcn_image_atomic_add_1darray,           // llvm.amdgcn.image.atomic.add.1darray
    amdgcn_image_atomic_add_2d,                // llvm.amdgcn.image.atomic.add.2d
    amdgcn_image_atomic_add_2darray,           // llvm.amdgcn.image.atomic.add.2darray
    amdgcn_image_atomic_add_2darraymsaa,       // llvm.amdgcn.image.atomic.add.2darraymsaa
    amdgcn_image_atomic_add_2dmsaa,            // llvm.amdgcn.image.atomic.add.2dmsaa
    amdgcn_image_atomic_add_3d,                // llvm.amdgcn.image.atomic.add.3d
    amdgcn_image_atomic_add_cube,              // llvm.amdgcn.image.atomic.add.cube
    amdgcn_image_atomic_and_1d,                // llvm.amdgcn.image.atomic.and.1d
    amdgcn_image_atomic_and_1darray,           // llvm.amdgcn.image.atomic.and.1darray
    amdgcn_image_atomic_and_2d,                // llvm.amdgcn.image.atomic.and.2d
    amdgcn_image_atomic_and_2darray,           // llvm.amdgcn.image.atomic.and.2darray
    amdgcn_image_atomic_and_2darraymsaa,       // llvm.amdgcn.image.atomic.and.2darraymsaa
    amdgcn_image_atomic_and_2dmsaa,            // llvm.amdgcn.image.atomic.and.2dmsaa
    amdgcn_image_atomic_and_3d,                // llvm.amdgcn.image.atomic.and.3d
    amdgcn_image_atomic_and_cube,              // llvm.amdgcn.image.atomic.and.cube
    amdgcn_image_atomic_cmpswap_1d,            // llvm.amdgcn.image.atomic.cmpswap.1d
    amdgcn_image_atomic_cmpswap_1darray,       // llvm.amdgcn.image.atomic.cmpswap.1darray
    amdgcn_image_atomic_cmpswap_2d,            // llvm.amdgcn.image.atomic.cmpswap.2d
    amdgcn_image_atomic_cmpswap_2darray,       // llvm.amdgcn.image.atomic.cmpswap.2darray
    amdgcn_image_atomic_cmpswap_2darraymsaa,   // llvm.amdgcn.image.atomic.cmpswap.2darraymsaa
    amdgcn_image_atomic_cmpswap_2dmsaa,        // llvm.amdgcn.image.atomic.cmpswap.2dmsaa
    amdgcn_image_atomic_cmpswap_3d,            // llvm.amdgcn.image.atomic.cmpswap.3d
    amdgcn_image_atomic_cmpswap_cube,          // llvm.amdgcn.image.atomic.cmpswap.cube
    amdgcn_image_atomic_dec_1d,                // llvm.amdgcn.image.atomic.dec.1d
    amdgcn_image_atomic_dec_1darray,           // llvm.amdgcn.image.atomic.dec.1darray
    amdgcn_image_atomic_dec_2d,                // llvm.amdgcn.image.atomic.dec.2d
    amdgcn_image_atomic_dec_2darray,           // llvm.amdgcn.image.atomic.dec.2darray
    amdgcn_image_atomic_dec_2darraymsaa,       // llvm.amdgcn.image.atomic.dec.2darraymsaa
    amdgcn_image_atomic_dec_2dmsaa,            // llvm.amdgcn.image.atomic.dec.2dmsaa
    amdgcn_image_atomic_dec_3d,                // llvm.amdgcn.image.atomic.dec.3d
    amdgcn_image_atomic_dec_cube,              // llvm.amdgcn.image.atomic.dec.cube
    amdgcn_image_atomic_fmax_1d,               // llvm.amdgcn.image.atomic.fmax.1d
    amdgcn_image_atomic_fmax_1darray,          // llvm.amdgcn.image.atomic.fmax.1darray
    amdgcn_image_atomic_fmax_2d,               // llvm.amdgcn.image.atomic.fmax.2d
    amdgcn_image_atomic_fmax_2darray,          // llvm.amdgcn.image.atomic.fmax.2darray
    amdgcn_image_atomic_fmax_2darraymsaa,      // llvm.amdgcn.image.atomic.fmax.2darraymsaa
    amdgcn_image_atomic_fmax_2dmsaa,           // llvm.amdgcn.image.atomic.fmax.2dmsaa
    amdgcn_image_atomic_fmax_3d,               // llvm.amdgcn.image.atomic.fmax.3d
    amdgcn_image_atomic_fmax_cube,             // llvm.amdgcn.image.atomic.fmax.cube
    amdgcn_image_atomic_fmin_1d,               // llvm.amdgcn.image.atomic.fmin.1d
    amdgcn_image_atomic_fmin_1darray,          // llvm.amdgcn.image.atomic.fmin.1darray
    amdgcn_image_atomic_fmin_2d,               // llvm.amdgcn.image.atomic.fmin.2d
    amdgcn_image_atomic_fmin_2darray,          // llvm.amdgcn.image.atomic.fmin.2darray
    amdgcn_image_atomic_fmin_2darraymsaa,      // llvm.amdgcn.image.atomic.fmin.2darraymsaa
    amdgcn_image_atomic_fmin_2dmsaa,           // llvm.amdgcn.image.atomic.fmin.2dmsaa
    amdgcn_image_atomic_fmin_3d,               // llvm.amdgcn.image.atomic.fmin.3d
    amdgcn_image_atomic_fmin_cube,             // llvm.amdgcn.image.atomic.fmin.cube
    amdgcn_image_atomic_inc_1d,                // llvm.amdgcn.image.atomic.inc.1d
    amdgcn_image_atomic_inc_1darray,           // llvm.amdgcn.image.atomic.inc.1darray
    amdgcn_image_atomic_inc_2d,                // llvm.amdgcn.image.atomic.inc.2d
    amdgcn_image_atomic_inc_2darray,           // llvm.amdgcn.image.atomic.inc.2darray
    amdgcn_image_atomic_inc_2darraymsaa,       // llvm.amdgcn.image.atomic.inc.2darraymsaa
    amdgcn_image_atomic_inc_2dmsaa,            // llvm.amdgcn.image.atomic.inc.2dmsaa
    amdgcn_image_atomic_inc_3d,                // llvm.amdgcn.image.atomic.inc.3d
    amdgcn_image_atomic_inc_cube,              // llvm.amdgcn.image.atomic.inc.cube
    amdgcn_image_atomic_or_1d,                 // llvm.amdgcn.image.atomic.or.1d
    amdgcn_image_atomic_or_1darray,            // llvm.amdgcn.image.atomic.or.1darray
    amdgcn_image_atomic_or_2d,                 // llvm.amdgcn.image.atomic.or.2d
    amdgcn_image_atomic_or_2darray,            // llvm.amdgcn.image.atomic.or.2darray
    amdgcn_image_atomic_or_2darraymsaa,        // llvm.amdgcn.image.atomic.or.2darraymsaa
    amdgcn_image_atomic_or_2dmsaa,             // llvm.amdgcn.image.atomic.or.2dmsaa
    amdgcn_image_atomic_or_3d,                 // llvm.amdgcn.image.atomic.or.3d
    amdgcn_image_atomic_or_cube,               // llvm.amdgcn.image.atomic.or.cube
    amdgcn_image_atomic_smax_1d,               // llvm.amdgcn.image.atomic.smax.1d
    amdgcn_image_atomic_smax_1darray,          // llvm.amdgcn.image.atomic.smax.1darray
    amdgcn_image_atomic_smax_2d,               // llvm.amdgcn.image.atomic.smax.2d
    amdgcn_image_atomic_smax_2darray,          // llvm.amdgcn.image.atomic.smax.2darray
    amdgcn_image_atomic_smax_2darraymsaa,      // llvm.amdgcn.image.atomic.smax.2darraymsaa
    amdgcn_image_atomic_smax_2dmsaa,           // llvm.amdgcn.image.atomic.smax.2dmsaa
    amdgcn_image_atomic_smax_3d,               // llvm.amdgcn.image.atomic.smax.3d
    amdgcn_image_atomic_smax_cube,             // llvm.amdgcn.image.atomic.smax.cube
    amdgcn_image_atomic_smin_1d,               // llvm.amdgcn.image.atomic.smin.1d
    amdgcn_image_atomic_smin_1darray,          // llvm.amdgcn.image.atomic.smin.1darray
    amdgcn_image_atomic_smin_2d,               // llvm.amdgcn.image.atomic.smin.2d
    amdgcn_image_atomic_smin_2darray,          // llvm.amdgcn.image.atomic.smin.2darray
    amdgcn_image_atomic_smin_2darraymsaa,      // llvm.amdgcn.image.atomic.smin.2darraymsaa
    amdgcn_image_atomic_smin_2dmsaa,           // llvm.amdgcn.image.atomic.smin.2dmsaa
    amdgcn_image_atomic_smin_3d,               // llvm.amdgcn.image.atomic.smin.3d
    amdgcn_image_atomic_smin_cube,             // llvm.amdgcn.image.atomic.smin.cube
    amdgcn_image_atomic_sub_1d,                // llvm.amdgcn.image.atomic.sub.1d
    amdgcn_image_atomic_sub_1darray,           // llvm.amdgcn.image.atomic.sub.1darray
    amdgcn_image_atomic_sub_2d,                // llvm.amdgcn.image.atomic.sub.2d
    amdgcn_image_atomic_sub_2darray,           // llvm.amdgcn.image.atomic.sub.2darray
    amdgcn_image_atomic_sub_2darraymsaa,       // llvm.amdgcn.image.atomic.sub.2darraymsaa
    amdgcn_image_atomic_sub_2dmsaa,            // llvm.amdgcn.image.atomic.sub.2dmsaa
    amdgcn_image_atomic_sub_3d,                // llvm.amdgcn.image.atomic.sub.3d
    amdgcn_image_atomic_sub_cube,              // llvm.amdgcn.image.atomic.sub.cube
    amdgcn_image_atomic_swap_1d,               // llvm.amdgcn.image.atomic.swap.1d
    amdgcn_image_atomic_swap_1darray,          // llvm.amdgcn.image.atomic.swap.1darray
    amdgcn_image_atomic_swap_2d,               // llvm.amdgcn.image.atomic.swap.2d
    amdgcn_image_atomic_swap_2darray,          // llvm.amdgcn.image.atomic.swap.2darray
    amdgcn_image_atomic_swap_2darraymsaa,      // llvm.amdgcn.image.atomic.swap.2darraymsaa
    amdgcn_image_atomic_swap_2dmsaa,           // llvm.amdgcn.image.atomic.swap.2dmsaa
    amdgcn_image_atomic_swap_3d,               // llvm.amdgcn.image.atomic.swap.3d
    amdgcn_image_atomic_swap_cube,             // llvm.amdgcn.image.atomic.swap.cube
    amdgcn_image_atomic_umax_1d,               // llvm.amdgcn.image.atomic.umax.1d
    amdgcn_image_atomic_umax_1darray,          // llvm.amdgcn.image.atomic.umax.1darray
    amdgcn_image_atomic_umax_2d,               // llvm.amdgcn.image.atomic.umax.2d
    amdgcn_image_atomic_umax_2darray,          // llvm.amdgcn.image.atomic.umax.2darray
    amdgcn_image_atomic_umax_2darraymsaa,      // llvm.amdgcn.image.atomic.umax.2darraymsaa
    amdgcn_image_atomic_umax_2dmsaa,           // llvm.amdgcn.image.atomic.umax.2dmsaa
    amdgcn_image_atomic_umax_3d,               // llvm.amdgcn.image.atomic.umax.3d
    amdgcn_image_atomic_umax_cube,             // llvm.amdgcn.image.atomic.umax.cube
    amdgcn_image_atomic_umin_1d,               // llvm.amdgcn.image.atomic.umin.1d
    amdgcn_image_atomic_umin_1darray,          // llvm.amdgcn.image.atomic.umin.1darray
    amdgcn_image_atomic_umin_2d,               // llvm.amdgcn.image.atomic.umin.2d
    amdgcn_image_atomic_umin_2darray,          // llvm.amdgcn.image.atomic.umin.2darray
    amdgcn_image_atomic_umin_2darraymsaa,      // llvm.amdgcn.image.atomic.umin.2darraymsaa
    amdgcn_image_atomic_umin_2dmsaa,           // llvm.amdgcn.image.atomic.umin.2dmsaa
    amdgcn_image_atomic_umin_3d,               // llvm.amdgcn.image.atomic.umin.3d
    amdgcn_image_atomic_umin_cube,             // llvm.amdgcn.image.atomic.umin.cube
    amdgcn_image_atomic_xor_1d,                // llvm.amdgcn.image.atomic.xor.1d
    amdgcn_image_atomic_xor_1darray,           // llvm.amdgcn.image.atomic.xor.1darray
    amdgcn_image_atomic_xor_2d,                // llvm.amdgcn.image.atomic.xor.2d
    amdgcn_image_atomic_xor_2darray,           // llvm.amdgcn.image.atomic.xor.2darray
    amdgcn_image_atomic_xor_2darraymsaa,       // llvm.amdgcn.image.atomic.xor.2darraymsaa
    amdgcn_image_atomic_xor_2dmsaa,            // llvm.amdgcn.image.atomic.xor.2dmsaa
    amdgcn_image_atomic_xor_3d,                // llvm.amdgcn.image.atomic.xor.3d
    amdgcn_image_atomic_xor_cube,              // llvm.amdgcn.image.atomic.xor.cube
    amdgcn_image_bvh_intersect_ray,            // llvm.amdgcn.image.bvh.intersect.ray
    amdgcn_image_gather4_2d,                   // llvm.amdgcn.image.gather4.2d
    amdgcn_image_gather4_2darray,              // llvm.amdgcn.image.gather4.2darray
    amdgcn_image_gather4_b_2d,                 // llvm.amdgcn.image.gather4.b.2d
    amdgcn_image_gather4_b_2darray,            // llvm.amdgcn.image.gather4.b.2darray
    amdgcn_image_gather4_b_cl_2d,              // llvm.amdgcn.image.gather4.b.cl.2d
    amdgcn_image_gather4_b_cl_2darray,         // llvm.amdgcn.image.gather4.b.cl.2darray
    amdgcn_image_gather4_b_cl_cube,            // llvm.amdgcn.image.gather4.b.cl.cube
    amdgcn_image_gather4_b_cl_o_2d,            // llvm.amdgcn.image.gather4.b.cl.o.2d
    amdgcn_image_gather4_b_cl_o_2darray,       // llvm.amdgcn.image.gather4.b.cl.o.2darray
    amdgcn_image_gather4_b_cl_o_cube,          // llvm.amdgcn.image.gather4.b.cl.o.cube
    amdgcn_image_gather4_b_cube,               // llvm.amdgcn.image.gather4.b.cube
    amdgcn_image_gather4_b_o_2d,               // llvm.amdgcn.image.gather4.b.o.2d
    amdgcn_image_gather4_b_o_2darray,          // llvm.amdgcn.image.gather4.b.o.2darray
    amdgcn_image_gather4_b_o_cube,             // llvm.amdgcn.image.gather4.b.o.cube
    amdgcn_image_gather4_c_2d,                 // llvm.amdgcn.image.gather4.c.2d
    amdgcn_image_gather4_c_2darray,            // llvm.amdgcn.image.gather4.c.2darray
    amdgcn_image_gather4_c_b_2d,               // llvm.amdgcn.image.gather4.c.b.2d
    amdgcn_image_gather4_c_b_2darray,          // llvm.amdgcn.image.gather4.c.b.2darray
    amdgcn_image_gather4_c_b_cl_2d,            // llvm.amdgcn.image.gather4.c.b.cl.2d
    amdgcn_image_gather4_c_b_cl_2darray,       // llvm.amdgcn.image.gather4.c.b.cl.2darray
    amdgcn_image_gather4_c_b_cl_cube,          // llvm.amdgcn.image.gather4.c.b.cl.cube
    amdgcn_image_gather4_c_b_cl_o_2d,          // llvm.amdgcn.image.gather4.c.b.cl.o.2d
    amdgcn_image_gather4_c_b_cl_o_2darray,     // llvm.amdgcn.image.gather4.c.b.cl.o.2darray
    amdgcn_image_gather4_c_b_cl_o_cube,        // llvm.amdgcn.image.gather4.c.b.cl.o.cube
    amdgcn_image_gather4_c_b_cube,             // llvm.amdgcn.image.gather4.c.b.cube
    amdgcn_image_gather4_c_b_o_2d,             // llvm.amdgcn.image.gather4.c.b.o.2d
    amdgcn_image_gather4_c_b_o_2darray,        // llvm.amdgcn.image.gather4.c.b.o.2darray
    amdgcn_image_gather4_c_b_o_cube,           // llvm.amdgcn.image.gather4.c.b.o.cube
    amdgcn_image_gather4_c_cl_2d,              // llvm.amdgcn.image.gather4.c.cl.2d
    amdgcn_image_gather4_c_cl_2darray,         // llvm.amdgcn.image.gather4.c.cl.2darray
    amdgcn_image_gather4_c_cl_cube,            // llvm.amdgcn.image.gather4.c.cl.cube
    amdgcn_image_gather4_c_cl_o_2d,            // llvm.amdgcn.image.gather4.c.cl.o.2d
    amdgcn_image_gather4_c_cl_o_2darray,       // llvm.amdgcn.image.gather4.c.cl.o.2darray
    amdgcn_image_gather4_c_cl_o_cube,          // llvm.amdgcn.image.gather4.c.cl.o.cube
    amdgcn_image_gather4_c_cube,               // llvm.amdgcn.image.gather4.c.cube
    amdgcn_image_gather4_c_l_2d,               // llvm.amdgcn.image.gather4.c.l.2d
    amdgcn_image_gather4_c_l_2darray,          // llvm.amdgcn.image.gather4.c.l.2darray
    amdgcn_image_gather4_c_l_cube,             // llvm.amdgcn.image.gather4.c.l.cube
    amdgcn_image_gather4_c_l_o_2d,             // llvm.amdgcn.image.gather4.c.l.o.2d
    amdgcn_image_gather4_c_l_o_2darray,        // llvm.amdgcn.image.gather4.c.l.o.2darray
    amdgcn_image_gather4_c_l_o_cube,           // llvm.amdgcn.image.gather4.c.l.o.cube
    amdgcn_image_gather4_c_lz_2d,              // llvm.amdgcn.image.gather4.c.lz.2d
    amdgcn_image_gather4_c_lz_2darray,         // llvm.amdgcn.image.gather4.c.lz.2darray
    amdgcn_image_gather4_c_lz_cube,            // llvm.amdgcn.image.gather4.c.lz.cube
    amdgcn_image_gather4_c_lz_o_2d,            // llvm.amdgcn.image.gather4.c.lz.o.2d
    amdgcn_image_gather4_c_lz_o_2darray,       // llvm.amdgcn.image.gather4.c.lz.o.2darray
    amdgcn_image_gather4_c_lz_o_cube,          // llvm.amdgcn.image.gather4.c.lz.o.cube
    amdgcn_image_gather4_c_o_2d,               // llvm.amdgcn.image.gather4.c.o.2d
    amdgcn_image_gather4_c_o_2darray,          // llvm.amdgcn.image.gather4.c.o.2darray
    amdgcn_image_gather4_c_o_cube,             // llvm.amdgcn.image.gather4.c.o.cube
    amdgcn_image_gather4_cl_2d,                // llvm.amdgcn.image.gather4.cl.2d
    amdgcn_image_gather4_cl_2darray,           // llvm.amdgcn.image.gather4.cl.2darray
    amdgcn_image_gather4_cl_cube,              // llvm.amdgcn.image.gather4.cl.cube
    amdgcn_image_gather4_cl_o_2d,              // llvm.amdgcn.image.gather4.cl.o.2d
    amdgcn_image_gather4_cl_o_2darray,         // llvm.amdgcn.image.gather4.cl.o.2darray
    amdgcn_image_gather4_cl_o_cube,            // llvm.amdgcn.image.gather4.cl.o.cube
    amdgcn_image_gather4_cube,                 // llvm.amdgcn.image.gather4.cube
    amdgcn_image_gather4_l_2d,                 // llvm.amdgcn.image.gather4.l.2d
    amdgcn_image_gather4_l_2darray,            // llvm.amdgcn.image.gather4.l.2darray
    amdgcn_image_gather4_l_cube,               // llvm.amdgcn.image.gather4.l.cube
    amdgcn_image_gather4_l_o_2d,               // llvm.amdgcn.image.gather4.l.o.2d
    amdgcn_image_gather4_l_o_2darray,          // llvm.amdgcn.image.gather4.l.o.2darray
    amdgcn_image_gather4_l_o_cube,             // llvm.amdgcn.image.gather4.l.o.cube
    amdgcn_image_gather4_lz_2d,                // llvm.amdgcn.image.gather4.lz.2d
    amdgcn_image_gather4_lz_2darray,           // llvm.amdgcn.image.gather4.lz.2darray
    amdgcn_image_gather4_lz_cube,              // llvm.amdgcn.image.gather4.lz.cube
    amdgcn_image_gather4_lz_o_2d,              // llvm.amdgcn.image.gather4.lz.o.2d
    amdgcn_image_gather4_lz_o_2darray,         // llvm.amdgcn.image.gather4.lz.o.2darray
    amdgcn_image_gather4_lz_o_cube,            // llvm.amdgcn.image.gather4.lz.o.cube
    amdgcn_image_gather4_o_2d,                 // llvm.amdgcn.image.gather4.o.2d
    amdgcn_image_gather4_o_2darray,            // llvm.amdgcn.image.gather4.o.2darray
    amdgcn_image_gather4_o_cube,               // llvm.amdgcn.image.gather4.o.cube
    amdgcn_image_getlod_1d,                    // llvm.amdgcn.image.getlod.1d
    amdgcn_image_getlod_1darray,               // llvm.amdgcn.image.getlod.1darray
    amdgcn_image_getlod_2d,                    // llvm.amdgcn.image.getlod.2d
    amdgcn_image_getlod_2darray,               // llvm.amdgcn.image.getlod.2darray
    amdgcn_image_getlod_3d,                    // llvm.amdgcn.image.getlod.3d
    amdgcn_image_getlod_cube,                  // llvm.amdgcn.image.getlod.cube
    amdgcn_image_getresinfo_1d,                // llvm.amdgcn.image.getresinfo.1d
    amdgcn_image_getresinfo_1darray,           // llvm.amdgcn.image.getresinfo.1darray
    amdgcn_image_getresinfo_2d,                // llvm.amdgcn.image.getresinfo.2d
    amdgcn_image_getresinfo_2darray,           // llvm.amdgcn.image.getresinfo.2darray
    amdgcn_image_getresinfo_2darraymsaa,       // llvm.amdgcn.image.getresinfo.2darraymsaa
    amdgcn_image_getresinfo_2dmsaa,            // llvm.amdgcn.image.getresinfo.2dmsaa
    amdgcn_image_getresinfo_3d,                // llvm.amdgcn.image.getresinfo.3d
    amdgcn_image_getresinfo_cube,              // llvm.amdgcn.image.getresinfo.cube
    amdgcn_image_load_1d,                      // llvm.amdgcn.image.load.1d
    amdgcn_image_load_1darray,                 // llvm.amdgcn.image.load.1darray
    amdgcn_image_load_2d,                      // llvm.amdgcn.image.load.2d
    amdgcn_image_load_2darray,                 // llvm.amdgcn.image.load.2darray
    amdgcn_image_load_2darraymsaa,             // llvm.amdgcn.image.load.2darraymsaa
    amdgcn_image_load_2dmsaa,                  // llvm.amdgcn.image.load.2dmsaa
    amdgcn_image_load_3d,                      // llvm.amdgcn.image.load.3d
    amdgcn_image_load_cube,                    // llvm.amdgcn.image.load.cube
    amdgcn_image_load_mip_1d,                  // llvm.amdgcn.image.load.mip.1d
    amdgcn_image_load_mip_1darray,             // llvm.amdgcn.image.load.mip.1darray
    amdgcn_image_load_mip_2d,                  // llvm.amdgcn.image.load.mip.2d
    amdgcn_image_load_mip_2darray,             // llvm.amdgcn.image.load.mip.2darray
    amdgcn_image_load_mip_3d,                  // llvm.amdgcn.image.load.mip.3d
    amdgcn_image_load_mip_cube,                // llvm.amdgcn.image.load.mip.cube
    amdgcn_image_msaa_load_2darraymsaa,        // llvm.amdgcn.image.msaa.load.2darraymsaa
    amdgcn_image_msaa_load_2dmsaa,             // llvm.amdgcn.image.msaa.load.2dmsaa
    amdgcn_image_msaa_load_x_2darraymsaa,      // llvm.amdgcn.image.msaa.load.x.2darraymsaa
    amdgcn_image_msaa_load_x_2dmsaa,           // llvm.amdgcn.image.msaa.load.x.2dmsaa
    amdgcn_image_sample_1d,                    // llvm.amdgcn.image.sample.1d
    amdgcn_image_sample_1darray,               // llvm.amdgcn.image.sample.1darray
    amdgcn_image_sample_2d,                    // llvm.amdgcn.image.sample.2d
    amdgcn_image_sample_2darray,               // llvm.amdgcn.image.sample.2darray
    amdgcn_image_sample_3d,                    // llvm.amdgcn.image.sample.3d
    amdgcn_image_sample_b_1d,                  // llvm.amdgcn.image.sample.b.1d
    amdgcn_image_sample_b_1darray,             // llvm.amdgcn.image.sample.b.1darray
    amdgcn_image_sample_b_2d,                  // llvm.amdgcn.image.sample.b.2d
    amdgcn_image_sample_b_2darray,             // llvm.amdgcn.image.sample.b.2darray
    amdgcn_image_sample_b_3d,                  // llvm.amdgcn.image.sample.b.3d
    amdgcn_image_sample_b_cl_1d,               // llvm.amdgcn.image.sample.b.cl.1d
    amdgcn_image_sample_b_cl_1darray,          // llvm.amdgcn.image.sample.b.cl.1darray
    amdgcn_image_sample_b_cl_2d,               // llvm.amdgcn.image.sample.b.cl.2d
    amdgcn_image_sample_b_cl_2darray,          // llvm.amdgcn.image.sample.b.cl.2darray
    amdgcn_image_sample_b_cl_3d,               // llvm.amdgcn.image.sample.b.cl.3d
    amdgcn_image_sample_b_cl_cube,             // llvm.amdgcn.image.sample.b.cl.cube
    amdgcn_image_sample_b_cl_o_1d,             // llvm.amdgcn.image.sample.b.cl.o.1d
    amdgcn_image_sample_b_cl_o_1darray,        // llvm.amdgcn.image.sample.b.cl.o.1darray
    amdgcn_image_sample_b_cl_o_2d,             // llvm.amdgcn.image.sample.b.cl.o.2d
    amdgcn_image_sample_b_cl_o_2darray,        // llvm.amdgcn.image.sample.b.cl.o.2darray
    amdgcn_image_sample_b_cl_o_3d,             // llvm.amdgcn.image.sample.b.cl.o.3d
    amdgcn_image_sample_b_cl_o_cube,           // llvm.amdgcn.image.sample.b.cl.o.cube
    amdgcn_image_sample_b_cube,                // llvm.amdgcn.image.sample.b.cube
    amdgcn_image_sample_b_o_1d,                // llvm.amdgcn.image.sample.b.o.1d
    amdgcn_image_sample_b_o_1darray,           // llvm.amdgcn.image.sample.b.o.1darray
    amdgcn_image_sample_b_o_2d,                // llvm.amdgcn.image.sample.b.o.2d
    amdgcn_image_sample_b_o_2darray,           // llvm.amdgcn.image.sample.b.o.2darray
    amdgcn_image_sample_b_o_3d,                // llvm.amdgcn.image.sample.b.o.3d
    amdgcn_image_sample_b_o_cube,              // llvm.amdgcn.image.sample.b.o.cube
    amdgcn_image_sample_c_1d,                  // llvm.amdgcn.image.sample.c.1d
    amdgcn_image_sample_c_1darray,             // llvm.amdgcn.image.sample.c.1darray
    amdgcn_image_sample_c_2d,                  // llvm.amdgcn.image.sample.c.2d
    amdgcn_image_sample_c_2darray,             // llvm.amdgcn.image.sample.c.2darray
    amdgcn_image_sample_c_3d,                  // llvm.amdgcn.image.sample.c.3d
    amdgcn_image_sample_c_b_1d,                // llvm.amdgcn.image.sample.c.b.1d
    amdgcn_image_sample_c_b_1darray,           // llvm.amdgcn.image.sample.c.b.1darray
    amdgcn_image_sample_c_b_2d,                // llvm.amdgcn.image.sample.c.b.2d
    amdgcn_image_sample_c_b_2darray,           // llvm.amdgcn.image.sample.c.b.2darray
    amdgcn_image_sample_c_b_3d,                // llvm.amdgcn.image.sample.c.b.3d
    amdgcn_image_sample_c_b_cl_1d,             // llvm.amdgcn.image.sample.c.b.cl.1d
    amdgcn_image_sample_c_b_cl_1darray,        // llvm.amdgcn.image.sample.c.b.cl.1darray
    amdgcn_image_sample_c_b_cl_2d,             // llvm.amdgcn.image.sample.c.b.cl.2d
    amdgcn_image_sample_c_b_cl_2darray,        // llvm.amdgcn.image.sample.c.b.cl.2darray
    amdgcn_image_sample_c_b_cl_3d,             // llvm.amdgcn.image.sample.c.b.cl.3d
    amdgcn_image_sample_c_b_cl_cube,           // llvm.amdgcn.image.sample.c.b.cl.cube
    amdgcn_image_sample_c_b_cl_o_1d,           // llvm.amdgcn.image.sample.c.b.cl.o.1d
    amdgcn_image_sample_c_b_cl_o_1darray,      // llvm.amdgcn.image.sample.c.b.cl.o.1darray
    amdgcn_image_sample_c_b_cl_o_2d,           // llvm.amdgcn.image.sample.c.b.cl.o.2d
    amdgcn_image_sample_c_b_cl_o_2darray,      // llvm.amdgcn.image.sample.c.b.cl.o.2darray
    amdgcn_image_sample_c_b_cl_o_3d,           // llvm.amdgcn.image.sample.c.b.cl.o.3d
    amdgcn_image_sample_c_b_cl_o_cube,         // llvm.amdgcn.image.sample.c.b.cl.o.cube
    amdgcn_image_sample_c_b_cube,              // llvm.amdgcn.image.sample.c.b.cube
    amdgcn_image_sample_c_b_o_1d,              // llvm.amdgcn.image.sample.c.b.o.1d
    amdgcn_image_sample_c_b_o_1darray,         // llvm.amdgcn.image.sample.c.b.o.1darray
    amdgcn_image_sample_c_b_o_2d,              // llvm.amdgcn.image.sample.c.b.o.2d
    amdgcn_image_sample_c_b_o_2darray,         // llvm.amdgcn.image.sample.c.b.o.2darray
    amdgcn_image_sample_c_b_o_3d,              // llvm.amdgcn.image.sample.c.b.o.3d
    amdgcn_image_sample_c_b_o_cube,            // llvm.amdgcn.image.sample.c.b.o.cube
    amdgcn_image_sample_c_cd_1d,               // llvm.amdgcn.image.sample.c.cd.1d
    amdgcn_image_sample_c_cd_1darray,          // llvm.amdgcn.image.sample.c.cd.1darray
    amdgcn_image_sample_c_cd_2d,               // llvm.amdgcn.image.sample.c.cd.2d
    amdgcn_image_sample_c_cd_2darray,          // llvm.amdgcn.image.sample.c.cd.2darray
    amdgcn_image_sample_c_cd_3d,               // llvm.amdgcn.image.sample.c.cd.3d
    amdgcn_image_sample_c_cd_cl_1d,            // llvm.amdgcn.image.sample.c.cd.cl.1d
    amdgcn_image_sample_c_cd_cl_1darray,       // llvm.amdgcn.image.sample.c.cd.cl.1darray
    amdgcn_image_sample_c_cd_cl_2d,            // llvm.amdgcn.image.sample.c.cd.cl.2d
    amdgcn_image_sample_c_cd_cl_2darray,       // llvm.amdgcn.image.sample.c.cd.cl.2darray
    amdgcn_image_sample_c_cd_cl_3d,            // llvm.amdgcn.image.sample.c.cd.cl.3d
    amdgcn_image_sample_c_cd_cl_cube,          // llvm.amdgcn.image.sample.c.cd.cl.cube
    amdgcn_image_sample_c_cd_cl_o_1d,          // llvm.amdgcn.image.sample.c.cd.cl.o.1d
    amdgcn_image_sample_c_cd_cl_o_1darray,     // llvm.amdgcn.image.sample.c.cd.cl.o.1darray
    amdgcn_image_sample_c_cd_cl_o_2d,          // llvm.amdgcn.image.sample.c.cd.cl.o.2d
    amdgcn_image_sample_c_cd_cl_o_2darray,     // llvm.amdgcn.image.sample.c.cd.cl.o.2darray
    amdgcn_image_sample_c_cd_cl_o_3d,          // llvm.amdgcn.image.sample.c.cd.cl.o.3d
    amdgcn_image_sample_c_cd_cl_o_cube,        // llvm.amdgcn.image.sample.c.cd.cl.o.cube
    amdgcn_image_sample_c_cd_cube,             // llvm.amdgcn.image.sample.c.cd.cube
    amdgcn_image_sample_c_cd_o_1d,             // llvm.amdgcn.image.sample.c.cd.o.1d
    amdgcn_image_sample_c_cd_o_1darray,        // llvm.amdgcn.image.sample.c.cd.o.1darray
    amdgcn_image_sample_c_cd_o_2d,             // llvm.amdgcn.image.sample.c.cd.o.2d
    amdgcn_image_sample_c_cd_o_2darray,        // llvm.amdgcn.image.sample.c.cd.o.2darray
    amdgcn_image_sample_c_cd_o_3d,             // llvm.amdgcn.image.sample.c.cd.o.3d
    amdgcn_image_sample_c_cd_o_cube,           // llvm.amdgcn.image.sample.c.cd.o.cube
    amdgcn_image_sample_c_cl_1d,               // llvm.amdgcn.image.sample.c.cl.1d
    amdgcn_image_sample_c_cl_1darray,          // llvm.amdgcn.image.sample.c.cl.1darray
    amdgcn_image_sample_c_cl_2d,               // llvm.amdgcn.image.sample.c.cl.2d
    amdgcn_image_sample_c_cl_2darray,          // llvm.amdgcn.image.sample.c.cl.2darray
    amdgcn_image_sample_c_cl_3d,               // llvm.amdgcn.image.sample.c.cl.3d
    amdgcn_image_sample_c_cl_cube,             // llvm.amdgcn.image.sample.c.cl.cube
    amdgcn_image_sample_c_cl_o_1d,             // llvm.amdgcn.image.sample.c.cl.o.1d
    amdgcn_image_sample_c_cl_o_1darray,        // llvm.amdgcn.image.sample.c.cl.o.1darray
    amdgcn_image_sample_c_cl_o_2d,             // llvm.amdgcn.image.sample.c.cl.o.2d
    amdgcn_image_sample_c_cl_o_2darray,        // llvm.amdgcn.image.sample.c.cl.o.2darray
    amdgcn_image_sample_c_cl_o_3d,             // llvm.amdgcn.image.sample.c.cl.o.3d
    amdgcn_image_sample_c_cl_o_cube,           // llvm.amdgcn.image.sample.c.cl.o.cube
    amdgcn_image_sample_c_cube,                // llvm.amdgcn.image.sample.c.cube
    amdgcn_image_sample_c_d_1d,                // llvm.amdgcn.image.sample.c.d.1d
    amdgcn_image_sample_c_d_1darray,           // llvm.amdgcn.image.sample.c.d.1darray
    amdgcn_image_sample_c_d_2d,                // llvm.amdgcn.image.sample.c.d.2d
    amdgcn_image_sample_c_d_2darray,           // llvm.amdgcn.image.sample.c.d.2darray
    amdgcn_image_sample_c_d_3d,                // llvm.amdgcn.image.sample.c.d.3d
    amdgcn_image_sample_c_d_cl_1d,             // llvm.amdgcn.image.sample.c.d.cl.1d
    amdgcn_image_sample_c_d_cl_1darray,        // llvm.amdgcn.image.sample.c.d.cl.1darray
    amdgcn_image_sample_c_d_cl_2d,             // llvm.amdgcn.image.sample.c.d.cl.2d
    amdgcn_image_sample_c_d_cl_2darray,        // llvm.amdgcn.image.sample.c.d.cl.2darray
    amdgcn_image_sample_c_d_cl_3d,             // llvm.amdgcn.image.sample.c.d.cl.3d
    amdgcn_image_sample_c_d_cl_cube,           // llvm.amdgcn.image.sample.c.d.cl.cube
    amdgcn_image_sample_c_d_cl_o_1d,           // llvm.amdgcn.image.sample.c.d.cl.o.1d
    amdgcn_image_sample_c_d_cl_o_1darray,      // llvm.amdgcn.image.sample.c.d.cl.o.1darray
    amdgcn_image_sample_c_d_cl_o_2d,           // llvm.amdgcn.image.sample.c.d.cl.o.2d
    amdgcn_image_sample_c_d_cl_o_2darray,      // llvm.amdgcn.image.sample.c.d.cl.o.2darray
    amdgcn_image_sample_c_d_cl_o_3d,           // llvm.amdgcn.image.sample.c.d.cl.o.3d
    amdgcn_image_sample_c_d_cl_o_cube,         // llvm.amdgcn.image.sample.c.d.cl.o.cube
    amdgcn_image_sample_c_d_cube,              // llvm.amdgcn.image.sample.c.d.cube
    amdgcn_image_sample_c_d_o_1d,              // llvm.amdgcn.image.sample.c.d.o.1d
    amdgcn_image_sample_c_d_o_1darray,         // llvm.amdgcn.image.sample.c.d.o.1darray
    amdgcn_image_sample_c_d_o_2d,              // llvm.amdgcn.image.sample.c.d.o.2d
    amdgcn_image_sample_c_d_o_2darray,         // llvm.amdgcn.image.sample.c.d.o.2darray
    amdgcn_image_sample_c_d_o_3d,              // llvm.amdgcn.image.sample.c.d.o.3d
    amdgcn_image_sample_c_d_o_cube,            // llvm.amdgcn.image.sample.c.d.o.cube
    amdgcn_image_sample_c_l_1d,                // llvm.amdgcn.image.sample.c.l.1d
    amdgcn_image_sample_c_l_1darray,           // llvm.amdgcn.image.sample.c.l.1darray
    amdgcn_image_sample_c_l_2d,                // llvm.amdgcn.image.sample.c.l.2d
    amdgcn_image_sample_c_l_2darray,           // llvm.amdgcn.image.sample.c.l.2darray
    amdgcn_image_sample_c_l_3d,                // llvm.amdgcn.image.sample.c.l.3d
    amdgcn_image_sample_c_l_cube,              // llvm.amdgcn.image.sample.c.l.cube
    amdgcn_image_sample_c_l_o_1d,              // llvm.amdgcn.image.sample.c.l.o.1d
    amdgcn_image_sample_c_l_o_1darray,         // llvm.amdgcn.image.sample.c.l.o.1darray
    amdgcn_image_sample_c_l_o_2d,              // llvm.amdgcn.image.sample.c.l.o.2d
    amdgcn_image_sample_c_l_o_2darray,         // llvm.amdgcn.image.sample.c.l.o.2darray
    amdgcn_image_sample_c_l_o_3d,              // llvm.amdgcn.image.sample.c.l.o.3d
    amdgcn_image_sample_c_l_o_cube,            // llvm.amdgcn.image.sample.c.l.o.cube
    amdgcn_image_sample_c_lz_1d,               // llvm.amdgcn.image.sample.c.lz.1d
    amdgcn_image_sample_c_lz_1darray,          // llvm.amdgcn.image.sample.c.lz.1darray
    amdgcn_image_sample_c_lz_2d,               // llvm.amdgcn.image.sample.c.lz.2d
    amdgcn_image_sample_c_lz_2darray,          // llvm.amdgcn.image.sample.c.lz.2darray
    amdgcn_image_sample_c_lz_3d,               // llvm.amdgcn.image.sample.c.lz.3d
    amdgcn_image_sample_c_lz_cube,             // llvm.amdgcn.image.sample.c.lz.cube
    amdgcn_image_sample_c_lz_o_1d,             // llvm.amdgcn.image.sample.c.lz.o.1d
    amdgcn_image_sample_c_lz_o_1darray,        // llvm.amdgcn.image.sample.c.lz.o.1darray
    amdgcn_image_sample_c_lz_o_2d,             // llvm.amdgcn.image.sample.c.lz.o.2d
    amdgcn_image_sample_c_lz_o_2darray,        // llvm.amdgcn.image.sample.c.lz.o.2darray
    amdgcn_image_sample_c_lz_o_3d,             // llvm.amdgcn.image.sample.c.lz.o.3d
    amdgcn_image_sample_c_lz_o_cube,           // llvm.amdgcn.image.sample.c.lz.o.cube
    amdgcn_image_sample_c_o_1d,                // llvm.amdgcn.image.sample.c.o.1d
    amdgcn_image_sample_c_o_1darray,           // llvm.amdgcn.image.sample.c.o.1darray
    amdgcn_image_sample_c_o_2d,                // llvm.amdgcn.image.sample.c.o.2d
    amdgcn_image_sample_c_o_2darray,           // llvm.amdgcn.image.sample.c.o.2darray
    amdgcn_image_sample_c_o_3d,                // llvm.amdgcn.image.sample.c.o.3d
    amdgcn_image_sample_c_o_cube,              // llvm.amdgcn.image.sample.c.o.cube
    amdgcn_image_sample_cd_1d,                 // llvm.amdgcn.image.sample.cd.1d
    amdgcn_image_sample_cd_1darray,            // llvm.amdgcn.image.sample.cd.1darray
    amdgcn_image_sample_cd_2d,                 // llvm.amdgcn.image.sample.cd.2d
    amdgcn_image_sample_cd_2darray,            // llvm.amdgcn.image.sample.cd.2darray
    amdgcn_image_sample_cd_3d,                 // llvm.amdgcn.image.sample.cd.3d
    amdgcn_image_sample_cd_cl_1d,              // llvm.amdgcn.image.sample.cd.cl.1d
    amdgcn_image_sample_cd_cl_1darray,         // llvm.amdgcn.image.sample.cd.cl.1darray
    amdgcn_image_sample_cd_cl_2d,              // llvm.amdgcn.image.sample.cd.cl.2d
    amdgcn_image_sample_cd_cl_2darray,         // llvm.amdgcn.image.sample.cd.cl.2darray
    amdgcn_image_sample_cd_cl_3d,              // llvm.amdgcn.image.sample.cd.cl.3d
    amdgcn_image_sample_cd_cl_cube,            // llvm.amdgcn.image.sample.cd.cl.cube
    amdgcn_image_sample_cd_cl_o_1d,            // llvm.amdgcn.image.sample.cd.cl.o.1d
    amdgcn_image_sample_cd_cl_o_1darray,       // llvm.amdgcn.image.sample.cd.cl.o.1darray
    amdgcn_image_sample_cd_cl_o_2d,            // llvm.amdgcn.image.sample.cd.cl.o.2d
    amdgcn_image_sample_cd_cl_o_2darray,       // llvm.amdgcn.image.sample.cd.cl.o.2darray
    amdgcn_image_sample_cd_cl_o_3d,            // llvm.amdgcn.image.sample.cd.cl.o.3d
    amdgcn_image_sample_cd_cl_o_cube,          // llvm.amdgcn.image.sample.cd.cl.o.cube
    amdgcn_image_sample_cd_cube,               // llvm.amdgcn.image.sample.cd.cube
    amdgcn_image_sample_cd_o_1d,               // llvm.amdgcn.image.sample.cd.o.1d
    amdgcn_image_sample_cd_o_1darray,          // llvm.amdgcn.image.sample.cd.o.1darray
    amdgcn_image_sample_cd_o_2d,               // llvm.amdgcn.image.sample.cd.o.2d
    amdgcn_image_sample_cd_o_2darray,          // llvm.amdgcn.image.sample.cd.o.2darray
    amdgcn_image_sample_cd_o_3d,               // llvm.amdgcn.image.sample.cd.o.3d
    amdgcn_image_sample_cd_o_cube,             // llvm.amdgcn.image.sample.cd.o.cube
    amdgcn_image_sample_cl_1d,                 // llvm.amdgcn.image.sample.cl.1d
    amdgcn_image_sample_cl_1darray,            // llvm.amdgcn.image.sample.cl.1darray
    amdgcn_image_sample_cl_2d,                 // llvm.amdgcn.image.sample.cl.2d
    amdgcn_image_sample_cl_2darray,            // llvm.amdgcn.image.sample.cl.2darray
    amdgcn_image_sample_cl_3d,                 // llvm.amdgcn.image.sample.cl.3d
    amdgcn_image_sample_cl_cube,               // llvm.amdgcn.image.sample.cl.cube
    amdgcn_image_sample_cl_o_1d,               // llvm.amdgcn.image.sample.cl.o.1d
    amdgcn_image_sample_cl_o_1darray,          // llvm.amdgcn.image.sample.cl.o.1darray
    amdgcn_image_sample_cl_o_2d,               // llvm.amdgcn.image.sample.cl.o.2d
    amdgcn_image_sample_cl_o_2darray,          // llvm.amdgcn.image.sample.cl.o.2darray
    amdgcn_image_sample_cl_o_3d,               // llvm.amdgcn.image.sample.cl.o.3d
    amdgcn_image_sample_cl_o_cube,             // llvm.amdgcn.image.sample.cl.o.cube
    amdgcn_image_sample_cube,                  // llvm.amdgcn.image.sample.cube
    amdgcn_image_sample_d_1d,                  // llvm.amdgcn.image.sample.d.1d
    amdgcn_image_sample_d_1darray,             // llvm.amdgcn.image.sample.d.1darray
    amdgcn_image_sample_d_2d,                  // llvm.amdgcn.image.sample.d.2d
    amdgcn_image_sample_d_2darray,             // llvm.amdgcn.image.sample.d.2darray
    amdgcn_image_sample_d_3d,                  // llvm.amdgcn.image.sample.d.3d
    amdgcn_image_sample_d_cl_1d,               // llvm.amdgcn.image.sample.d.cl.1d
    amdgcn_image_sample_d_cl_1darray,          // llvm.amdgcn.image.sample.d.cl.1darray
    amdgcn_image_sample_d_cl_2d,               // llvm.amdgcn.image.sample.d.cl.2d
    amdgcn_image_sample_d_cl_2darray,          // llvm.amdgcn.image.sample.d.cl.2darray
    amdgcn_image_sample_d_cl_3d,               // llvm.amdgcn.image.sample.d.cl.3d
    amdgcn_image_sample_d_cl_cube,             // llvm.amdgcn.image.sample.d.cl.cube
    amdgcn_image_sample_d_cl_o_1d,             // llvm.amdgcn.image.sample.d.cl.o.1d
    amdgcn_image_sample_d_cl_o_1darray,        // llvm.amdgcn.image.sample.d.cl.o.1darray
    amdgcn_image_sample_d_cl_o_2d,             // llvm.amdgcn.image.sample.d.cl.o.2d
    amdgcn_image_sample_d_cl_o_2darray,        // llvm.amdgcn.image.sample.d.cl.o.2darray
    amdgcn_image_sample_d_cl_o_3d,             // llvm.amdgcn.image.sample.d.cl.o.3d
    amdgcn_image_sample_d_cl_o_cube,           // llvm.amdgcn.image.sample.d.cl.o.cube
    amdgcn_image_sample_d_cube,                // llvm.amdgcn.image.sample.d.cube
    amdgcn_image_sample_d_o_1d,                // llvm.amdgcn.image.sample.d.o.1d
    amdgcn_image_sample_d_o_1darray,           // llvm.amdgcn.image.sample.d.o.1darray
    amdgcn_image_sample_d_o_2d,                // llvm.amdgcn.image.sample.d.o.2d
    amdgcn_image_sample_d_o_2darray,           // llvm.amdgcn.image.sample.d.o.2darray
    amdgcn_image_sample_d_o_3d,                // llvm.amdgcn.image.sample.d.o.3d
    amdgcn_image_sample_d_o_cube,              // llvm.amdgcn.image.sample.d.o.cube
    amdgcn_image_sample_l_1d,                  // llvm.amdgcn.image.sample.l.1d
    amdgcn_image_sample_l_1darray,             // llvm.amdgcn.image.sample.l.1darray
    amdgcn_image_sample_l_2d,                  // llvm.amdgcn.image.sample.l.2d
    amdgcn_image_sample_l_2darray,             // llvm.amdgcn.image.sample.l.2darray
    amdgcn_image_sample_l_3d,                  // llvm.amdgcn.image.sample.l.3d
    amdgcn_image_sample_l_cube,                // llvm.amdgcn.image.sample.l.cube
    amdgcn_image_sample_l_o_1d,                // llvm.amdgcn.image.sample.l.o.1d
    amdgcn_image_sample_l_o_1darray,           // llvm.amdgcn.image.sample.l.o.1darray
    amdgcn_image_sample_l_o_2d,                // llvm.amdgcn.image.sample.l.o.2d
    amdgcn_image_sample_l_o_2darray,           // llvm.amdgcn.image.sample.l.o.2darray
    amdgcn_image_sample_l_o_3d,                // llvm.amdgcn.image.sample.l.o.3d
    amdgcn_image_sample_l_o_cube,              // llvm.amdgcn.image.sample.l.o.cube
    amdgcn_image_sample_lz_1d,                 // llvm.amdgcn.image.sample.lz.1d
    amdgcn_image_sample_lz_1darray,            // llvm.amdgcn.image.sample.lz.1darray
    amdgcn_image_sample_lz_2d,                 // llvm.amdgcn.image.sample.lz.2d
    amdgcn_image_sample_lz_2darray,            // llvm.amdgcn.image.sample.lz.2darray
    amdgcn_image_sample_lz_3d,                 // llvm.amdgcn.image.sample.lz.3d
    amdgcn_image_sample_lz_cube,               // llvm.amdgcn.image.sample.lz.cube
    amdgcn_image_sample_lz_o_1d,               // llvm.amdgcn.image.sample.lz.o.1d
    amdgcn_image_sample_lz_o_1darray,          // llvm.amdgcn.image.sample.lz.o.1darray
    amdgcn_image_sample_lz_o_2d,               // llvm.amdgcn.image.sample.lz.o.2d
    amdgcn_image_sample_lz_o_2darray,          // llvm.amdgcn.image.sample.lz.o.2darray
    amdgcn_image_sample_lz_o_3d,               // llvm.amdgcn.image.sample.lz.o.3d
    amdgcn_image_sample_lz_o_cube,             // llvm.amdgcn.image.sample.lz.o.cube
    amdgcn_image_sample_o_1d,                  // llvm.amdgcn.image.sample.o.1d
    amdgcn_image_sample_o_1darray,             // llvm.amdgcn.image.sample.o.1darray
    amdgcn_image_sample_o_2d,                  // llvm.amdgcn.image.sample.o.2d
    amdgcn_image_sample_o_2darray,             // llvm.amdgcn.image.sample.o.2darray
    amdgcn_image_sample_o_3d,                  // llvm.amdgcn.image.sample.o.3d
    amdgcn_image_sample_o_cube,                // llvm.amdgcn.image.sample.o.cube
    amdgcn_image_store_1d,                     // llvm.amdgcn.image.store.1d
    amdgcn_image_store_1darray,                // llvm.amdgcn.image.store.1darray
    amdgcn_image_store_2d,                     // llvm.amdgcn.image.store.2d
    amdgcn_image_store_2darray,                // llvm.amdgcn.image.store.2darray
    amdgcn_image_store_2darraymsaa,            // llvm.amdgcn.image.store.2darraymsaa
    amdgcn_image_store_2dmsaa,                 // llvm.amdgcn.image.store.2dmsaa
    amdgcn_image_store_3d,                     // llvm.amdgcn.image.store.3d
    amdgcn_image_store_cube,                   // llvm.amdgcn.image.store.cube
    amdgcn_image_store_mip_1d,                 // llvm.amdgcn.image.store.mip.1d
    amdgcn_image_store_mip_1darray,            // llvm.amdgcn.image.store.mip.1darray
    amdgcn_image_store_mip_2d,                 // llvm.amdgcn.image.store.mip.2d
    amdgcn_image_store_mip_2darray,            // llvm.amdgcn.image.store.mip.2darray
    amdgcn_image_store_mip_3d,                 // llvm.amdgcn.image.store.mip.3d
    amdgcn_image_store_mip_cube,               // llvm.amdgcn.image.store.mip.cube
    amdgcn_implicit_buffer_ptr,                // llvm.amdgcn.implicit.buffer.ptr
    amdgcn_implicitarg_ptr,                    // llvm.amdgcn.implicitarg.ptr
    amdgcn_init_exec,                          // llvm.amdgcn.init.exec
    amdgcn_init_exec_from_input,               // llvm.amdgcn.init.exec.from.input
    amdgcn_interp_inreg_p10,                   // llvm.amdgcn.interp.inreg.p10
    amdgcn_interp_inreg_p10_f16,               // llvm.amdgcn.interp.inreg.p10.f16
    amdgcn_interp_inreg_p2,                    // llvm.amdgcn.interp.inreg.p2
    amdgcn_interp_inreg_p2_f16,                // llvm.amdgcn.interp.inreg.p2.f16
    amdgcn_interp_mov,                         // llvm.amdgcn.interp.mov
    amdgcn_interp_p1,                          // llvm.amdgcn.interp.p1
    amdgcn_interp_p1_f16,                      // llvm.amdgcn.interp.p1.f16
    amdgcn_interp_p2,                          // llvm.amdgcn.interp.p2
    amdgcn_interp_p2_f16,                      // llvm.amdgcn.interp.p2.f16
    amdgcn_inverse_ballot,                     // llvm.amdgcn.inverse.ballot
    amdgcn_is_private,                         // llvm.amdgcn.is.private
    amdgcn_is_shared,                          // llvm.amdgcn.is.shared
    amdgcn_kernarg_segment_ptr,                // llvm.amdgcn.kernarg.segment.ptr
    amdgcn_kill,                               // llvm.amdgcn.kill
    amdgcn_ldexp,                              // llvm.amdgcn.ldexp
    amdgcn_lds_direct_load,                    // llvm.amdgcn.lds.direct.load
    amdgcn_lds_kernel_id,                      // llvm.amdgcn.lds.kernel.id
    amdgcn_lds_param_load,                     // llvm.amdgcn.lds.param.load
    amdgcn_lerp,                               // llvm.amdgcn.lerp
    amdgcn_live_mask,                          // llvm.amdgcn.live.mask
    amdgcn_log,                                // llvm.amdgcn.log
    amdgcn_log_clamp,                          // llvm.amdgcn.log.clamp
    amdgcn_loop,                               // llvm.amdgcn.loop
    amdgcn_make_buffer_rsrc,                   // llvm.amdgcn.make.buffer.rsrc
    amdgcn_mbcnt_hi,                           // llvm.amdgcn.mbcnt.hi
    amdgcn_mbcnt_lo,                           // llvm.amdgcn.mbcnt.lo
    amdgcn_mfma_f32_16x16x16bf16_1k,           // llvm.amdgcn.mfma.f32.16x16x16bf16.1k
    amdgcn_mfma_f32_16x16x16f16,               // llvm.amdgcn.mfma.f32.16x16x16f16
    amdgcn_mfma_f32_16x16x1f32,                // llvm.amdgcn.mfma.f32.16x16x1f32
    amdgcn_mfma_f32_16x16x2bf16,               // llvm.amdgcn.mfma.f32.16x16x2bf16
    amdgcn_mfma_f32_16x16x32_bf8_bf8,          // llvm.amdgcn.mfma.f32.16x16x32.bf8.bf8
    amdgcn_mfma_f32_16x16x32_bf8_fp8,          // llvm.amdgcn.mfma.f32.16x16x32.bf8.fp8
    amdgcn_mfma_f32_16x16x32_fp8_bf8,          // llvm.amdgcn.mfma.f32.16x16x32.fp8.bf8
    amdgcn_mfma_f32_16x16x32_fp8_fp8,          // llvm.amdgcn.mfma.f32.16x16x32.fp8.fp8
    amdgcn_mfma_f32_16x16x4bf16_1k,            // llvm.amdgcn.mfma.f32.16x16x4bf16.1k
    amdgcn_mfma_f32_16x16x4f16,                // llvm.amdgcn.mfma.f32.16x16x4f16
    amdgcn_mfma_f32_16x16x4f32,                // llvm.amdgcn.mfma.f32.16x16x4f32
    amdgcn_mfma_f32_16x16x8_xf32,              // llvm.amdgcn.mfma.f32.16x16x8.xf32
    amdgcn_mfma_f32_16x16x8bf16,               // llvm.amdgcn.mfma.f32.16x16x8bf16
    amdgcn_mfma_f32_32x32x16_bf8_bf8,          // llvm.amdgcn.mfma.f32.32x32x16.bf8.bf8
    amdgcn_mfma_f32_32x32x16_bf8_fp8,          // llvm.amdgcn.mfma.f32.32x32x16.bf8.fp8
    amdgcn_mfma_f32_32x32x16_fp8_bf8,          // llvm.amdgcn.mfma.f32.32x32x16.fp8.bf8
    amdgcn_mfma_f32_32x32x16_fp8_fp8,          // llvm.amdgcn.mfma.f32.32x32x16.fp8.fp8
    amdgcn_mfma_f32_32x32x1f32,                // llvm.amdgcn.mfma.f32.32x32x1f32
    amdgcn_mfma_f32_32x32x2bf16,               // llvm.amdgcn.mfma.f32.32x32x2bf16
    amdgcn_mfma_f32_32x32x2f32,                // llvm.amdgcn.mfma.f32.32x32x2f32
    amdgcn_mfma_f32_32x32x4_xf32,              // llvm.amdgcn.mfma.f32.32x32x4.xf32
    amdgcn_mfma_f32_32x32x4bf16,               // llvm.amdgcn.mfma.f32.32x32x4bf16
    amdgcn_mfma_f32_32x32x4bf16_1k,            // llvm.amdgcn.mfma.f32.32x32x4bf16.1k
    amdgcn_mfma_f32_32x32x4f16,                // llvm.amdgcn.mfma.f32.32x32x4f16
    amdgcn_mfma_f32_32x32x8bf16_1k,            // llvm.amdgcn.mfma.f32.32x32x8bf16.1k
    amdgcn_mfma_f32_32x32x8f16,                // llvm.amdgcn.mfma.f32.32x32x8f16
    amdgcn_mfma_f32_4x4x1f32,                  // llvm.amdgcn.mfma.f32.4x4x1f32
    amdgcn_mfma_f32_4x4x2bf16,                 // llvm.amdgcn.mfma.f32.4x4x2bf16
    amdgcn_mfma_f32_4x4x4bf16_1k,              // llvm.amdgcn.mfma.f32.4x4x4bf16.1k
    amdgcn_mfma_f32_4x4x4f16,                  // llvm.amdgcn.mfma.f32.4x4x4f16
    amdgcn_mfma_f64_16x16x4f64,                // llvm.amdgcn.mfma.f64.16x16x4f64
    amdgcn_mfma_f64_4x4x4f64,                  // llvm.amdgcn.mfma.f64.4x4x4f64
    amdgcn_mfma_i32_16x16x16i8,                // llvm.amdgcn.mfma.i32.16x16x16i8
    amdgcn_mfma_i32_16x16x32_i8,               // llvm.amdgcn.mfma.i32.16x16x32.i8
    amdgcn_mfma_i32_16x16x4i8,                 // llvm.amdgcn.mfma.i32.16x16x4i8
    amdgcn_mfma_i32_32x32x16_i8,               // llvm.amdgcn.mfma.i32.32x32x16.i8
    amdgcn_mfma_i32_32x32x4i8,                 // llvm.amdgcn.mfma.i32.32x32x4i8
    amdgcn_mfma_i32_32x32x8i8,                 // llvm.amdgcn.mfma.i32.32x32x8i8
    amdgcn_mfma_i32_4x4x4i8,                   // llvm.amdgcn.mfma.i32.4x4x4i8
    amdgcn_mov_dpp,                            // llvm.amdgcn.mov.dpp
    amdgcn_mov_dpp8,                           // llvm.amdgcn.mov.dpp8
    amdgcn_mqsad_pk_u16_u8,                    // llvm.amdgcn.mqsad.pk.u16.u8
    amdgcn_mqsad_u32_u8,                       // llvm.amdgcn.mqsad.u32.u8
    amdgcn_msad_u8,                            // llvm.amdgcn.msad.u8
    amdgcn_mul_i24,                            // llvm.amdgcn.mul.i24
    amdgcn_mul_u24,                            // llvm.amdgcn.mul.u24
    amdgcn_mulhi_i24,                          // llvm.amdgcn.mulhi.i24
    amdgcn_mulhi_u24,                          // llvm.amdgcn.mulhi.u24
    amdgcn_perm,                               // llvm.amdgcn.perm
    amdgcn_permlane16,                         // llvm.amdgcn.permlane16
    amdgcn_permlane64,                         // llvm.amdgcn.permlane64
    amdgcn_permlanex16,                        // llvm.amdgcn.permlanex16
    amdgcn_ps_live,                            // llvm.amdgcn.ps.live
    amdgcn_qsad_pk_u16_u8,                     // llvm.amdgcn.qsad.pk.u16.u8
    amdgcn_queue_ptr,                          // llvm.amdgcn.queue.ptr
    amdgcn_raw_buffer_atomic_add,              // llvm.amdgcn.raw.buffer.atomic.add
    amdgcn_raw_buffer_atomic_and,              // llvm.amdgcn.raw.buffer.atomic.and
    amdgcn_raw_buffer_atomic_cmpswap,          // llvm.amdgcn.raw.buffer.atomic.cmpswap
    amdgcn_raw_buffer_atomic_dec,              // llvm.amdgcn.raw.buffer.atomic.dec
    amdgcn_raw_buffer_atomic_fadd,             // llvm.amdgcn.raw.buffer.atomic.fadd
    amdgcn_raw_buffer_atomic_fmax,             // llvm.amdgcn.raw.buffer.atomic.fmax
    amdgcn_raw_buffer_atomic_fmin,             // llvm.amdgcn.raw.buffer.atomic.fmin
    amdgcn_raw_buffer_atomic_inc,              // llvm.amdgcn.raw.buffer.atomic.inc
    amdgcn_raw_buffer_atomic_or,               // llvm.amdgcn.raw.buffer.atomic.or
    amdgcn_raw_buffer_atomic_smax,             // llvm.amdgcn.raw.buffer.atomic.smax
    amdgcn_raw_buffer_atomic_smin,             // llvm.amdgcn.raw.buffer.atomic.smin
    amdgcn_raw_buffer_atomic_sub,              // llvm.amdgcn.raw.buffer.atomic.sub
    amdgcn_raw_buffer_atomic_swap,             // llvm.amdgcn.raw.buffer.atomic.swap
    amdgcn_raw_buffer_atomic_umax,             // llvm.amdgcn.raw.buffer.atomic.umax
    amdgcn_raw_buffer_atomic_umin,             // llvm.amdgcn.raw.buffer.atomic.umin
    amdgcn_raw_buffer_atomic_xor,              // llvm.amdgcn.raw.buffer.atomic.xor
    amdgcn_raw_buffer_load,                    // llvm.amdgcn.raw.buffer.load
    amdgcn_raw_buffer_load_format,             // llvm.amdgcn.raw.buffer.load.format
    amdgcn_raw_buffer_load_lds,                // llvm.amdgcn.raw.buffer.load.lds
    amdgcn_raw_buffer_store,                   // llvm.amdgcn.raw.buffer.store
    amdgcn_raw_buffer_store_format,            // llvm.amdgcn.raw.buffer.store.format
    amdgcn_raw_ptr_buffer_atomic_add,          // llvm.amdgcn.raw.ptr.buffer.atomic.add
    amdgcn_raw_ptr_buffer_atomic_and,          // llvm.amdgcn.raw.ptr.buffer.atomic.and
    amdgcn_raw_ptr_buffer_atomic_cmpswap,      // llvm.amdgcn.raw.ptr.buffer.atomic.cmpswap
    amdgcn_raw_ptr_buffer_atomic_dec,          // llvm.amdgcn.raw.ptr.buffer.atomic.dec
    amdgcn_raw_ptr_buffer_atomic_fadd,         // llvm.amdgcn.raw.ptr.buffer.atomic.fadd
    amdgcn_raw_ptr_buffer_atomic_fmax,         // llvm.amdgcn.raw.ptr.buffer.atomic.fmax
    amdgcn_raw_ptr_buffer_atomic_fmin,         // llvm.amdgcn.raw.ptr.buffer.atomic.fmin
    amdgcn_raw_ptr_buffer_atomic_inc,          // llvm.amdgcn.raw.ptr.buffer.atomic.inc
    amdgcn_raw_ptr_buffer_atomic_or,           // llvm.amdgcn.raw.ptr.buffer.atomic.or
    amdgcn_raw_ptr_buffer_atomic_smax,         // llvm.amdgcn.raw.ptr.buffer.atomic.smax
    amdgcn_raw_ptr_buffer_atomic_smin,         // llvm.amdgcn.raw.ptr.buffer.atomic.smin
    amdgcn_raw_ptr_buffer_atomic_sub,          // llvm.amdgcn.raw.ptr.buffer.atomic.sub
    amdgcn_raw_ptr_buffer_atomic_swap,         // llvm.amdgcn.raw.ptr.buffer.atomic.swap
    amdgcn_raw_ptr_buffer_atomic_umax,         // llvm.amdgcn.raw.ptr.buffer.atomic.umax
    amdgcn_raw_ptr_buffer_atomic_umin,         // llvm.amdgcn.raw.ptr.buffer.atomic.umin
    amdgcn_raw_ptr_buffer_atomic_xor,          // llvm.amdgcn.raw.ptr.buffer.atomic.xor
    amdgcn_raw_ptr_buffer_load,                // llvm.amdgcn.raw.ptr.buffer.load
    amdgcn_raw_ptr_buffer_load_format,         // llvm.amdgcn.raw.ptr.buffer.load.format
    amdgcn_raw_ptr_buffer_load_lds,            // llvm.amdgcn.raw.ptr.buffer.load.lds
    amdgcn_raw_ptr_buffer_store,               // llvm.amdgcn.raw.ptr.buffer.store
    amdgcn_raw_ptr_buffer_store_format,        // llvm.amdgcn.raw.ptr.buffer.store.format
    amdgcn_raw_ptr_tbuffer_load,               // llvm.amdgcn.raw.ptr.tbuffer.load
    amdgcn_raw_ptr_tbuffer_store,              // llvm.amdgcn.raw.ptr.tbuffer.store
    amdgcn_raw_tbuffer_load,                   // llvm.amdgcn.raw.tbuffer.load
    amdgcn_raw_tbuffer_store,                  // llvm.amdgcn.raw.tbuffer.store
    amdgcn_rcp,                                // llvm.amdgcn.rcp
    amdgcn_rcp_legacy,                         // llvm.amdgcn.rcp.legacy
    amdgcn_readfirstlane,                      // llvm.amdgcn.readfirstlane
    amdgcn_readlane,                           // llvm.amdgcn.readlane
    amdgcn_reloc_constant,                     // llvm.amdgcn.reloc.constant
    amdgcn_rsq,                                // llvm.amdgcn.rsq
    amdgcn_rsq_clamp,                          // llvm.amdgcn.rsq.clamp
    amdgcn_rsq_legacy,                         // llvm.amdgcn.rsq.legacy
    amdgcn_s_barrier,                          // llvm.amdgcn.s.barrier
    amdgcn_s_buffer_load,                      // llvm.amdgcn.s.buffer.load
    amdgcn_s_dcache_inv,                       // llvm.amdgcn.s.dcache.inv
    amdgcn_s_dcache_inv_vol,                   // llvm.amdgcn.s.dcache.inv.vol
    amdgcn_s_dcache_wb,                        // llvm.amdgcn.s.dcache.wb
    amdgcn_s_dcache_wb_vol,                    // llvm.amdgcn.s.dcache.wb.vol
    amdgcn_s_decperflevel,                     // llvm.amdgcn.s.decperflevel
    amdgcn_s_get_waveid_in_workgroup,          // llvm.amdgcn.s.get.waveid.in.workgroup
    amdgcn_s_getpc,                            // llvm.amdgcn.s.getpc
    amdgcn_s_getreg,                           // llvm.amdgcn.s.getreg
    amdgcn_s_incperflevel,                     // llvm.amdgcn.s.incperflevel
    amdgcn_s_memrealtime,                      // llvm.amdgcn.s.memrealtime
    amdgcn_s_memtime,                          // llvm.amdgcn.s.memtime
    amdgcn_s_sendmsg,                          // llvm.amdgcn.s.sendmsg
    amdgcn_s_sendmsg_rtn,                      // llvm.amdgcn.s.sendmsg.rtn
    amdgcn_s_sendmsghalt,                      // llvm.amdgcn.s.sendmsghalt
    amdgcn_s_sethalt,                          // llvm.amdgcn.s.sethalt
    amdgcn_s_setprio,                          // llvm.amdgcn.s.setprio
    amdgcn_s_setreg,                           // llvm.amdgcn.s.setreg
    amdgcn_s_sleep,                            // llvm.amdgcn.s.sleep
    amdgcn_s_wait_event_export_ready,          // llvm.amdgcn.s.wait.event.export.ready
    amdgcn_s_waitcnt,                          // llvm.amdgcn.s.waitcnt
    amdgcn_sad_hi_u8,                          // llvm.amdgcn.sad.hi.u8
    amdgcn_sad_u16,                            // llvm.amdgcn.sad.u16
    amdgcn_sad_u8,                             // llvm.amdgcn.sad.u8
    amdgcn_sbfe,                               // llvm.amdgcn.sbfe
    amdgcn_sched_barrier,                      // llvm.amdgcn.sched.barrier
    amdgcn_sched_group_barrier,                // llvm.amdgcn.sched.group.barrier
    amdgcn_sdot2,                              // llvm.amdgcn.sdot2
    amdgcn_sdot4,                              // llvm.amdgcn.sdot4
    amdgcn_sdot8,                              // llvm.amdgcn.sdot8
    amdgcn_set_inactive,                       // llvm.amdgcn.set.inactive
    amdgcn_sffbh,                              // llvm.amdgcn.sffbh
    amdgcn_sin,                                // llvm.amdgcn.sin
    amdgcn_smfmac_f32_16x16x32_bf16,           // llvm.amdgcn.smfmac.f32.16x16x32.bf16
    amdgcn_smfmac_f32_16x16x32_f16,            // llvm.amdgcn.smfmac.f32.16x16x32.f16
    amdgcn_smfmac_f32_16x16x64_bf8_bf8,        // llvm.amdgcn.smfmac.f32.16x16x64.bf8.bf8
    amdgcn_smfmac_f32_16x16x64_bf8_fp8,        // llvm.amdgcn.smfmac.f32.16x16x64.bf8.fp8
    amdgcn_smfmac_f32_16x16x64_fp8_bf8,        // llvm.amdgcn.smfmac.f32.16x16x64.fp8.bf8
    amdgcn_smfmac_f32_16x16x64_fp8_fp8,        // llvm.amdgcn.smfmac.f32.16x16x64.fp8.fp8
    amdgcn_smfmac_f32_32x32x16_bf16,           // llvm.amdgcn.smfmac.f32.32x32x16.bf16
    amdgcn_smfmac_f32_32x32x16_f16,            // llvm.amdgcn.smfmac.f32.32x32x16.f16
    amdgcn_smfmac_f32_32x32x32_bf8_bf8,        // llvm.amdgcn.smfmac.f32.32x32x32.bf8.bf8
    amdgcn_smfmac_f32_32x32x32_bf8_fp8,        // llvm.amdgcn.smfmac.f32.32x32x32.bf8.fp8
    amdgcn_smfmac_f32_32x32x32_fp8_bf8,        // llvm.amdgcn.smfmac.f32.32x32x32.fp8.bf8
    amdgcn_smfmac_f32_32x32x32_fp8_fp8,        // llvm.amdgcn.smfmac.f32.32x32x32.fp8.fp8
    amdgcn_smfmac_i32_16x16x64_i8,             // llvm.amdgcn.smfmac.i32.16x16x64.i8
    amdgcn_smfmac_i32_32x32x32_i8,             // llvm.amdgcn.smfmac.i32.32x32x32.i8
    amdgcn_softwqm,                            // llvm.amdgcn.softwqm
    amdgcn_sqrt,                               // llvm.amdgcn.sqrt
    amdgcn_strict_wqm,                         // llvm.amdgcn.strict.wqm
    amdgcn_strict_wwm,                         // llvm.amdgcn.strict.wwm
    amdgcn_struct_buffer_atomic_add,           // llvm.amdgcn.struct.buffer.atomic.add
    amdgcn_struct_buffer_atomic_and,           // llvm.amdgcn.struct.buffer.atomic.and
    amdgcn_struct_buffer_atomic_cmpswap,       // llvm.amdgcn.struct.buffer.atomic.cmpswap
    amdgcn_struct_buffer_atomic_dec,           // llvm.amdgcn.struct.buffer.atomic.dec
    amdgcn_struct_buffer_atomic_fadd,          // llvm.amdgcn.struct.buffer.atomic.fadd
    amdgcn_struct_buffer_atomic_fmax,          // llvm.amdgcn.struct.buffer.atomic.fmax
    amdgcn_struct_buffer_atomic_fmin,          // llvm.amdgcn.struct.buffer.atomic.fmin
    amdgcn_struct_buffer_atomic_inc,           // llvm.amdgcn.struct.buffer.atomic.inc
    amdgcn_struct_buffer_atomic_or,            // llvm.amdgcn.struct.buffer.atomic.or
    amdgcn_struct_buffer_atomic_smax,          // llvm.amdgcn.struct.buffer.atomic.smax
    amdgcn_struct_buffer_atomic_smin,          // llvm.amdgcn.struct.buffer.atomic.smin
    amdgcn_struct_buffer_atomic_sub,           // llvm.amdgcn.struct.buffer.atomic.sub
    amdgcn_struct_buffer_atomic_swap,          // llvm.amdgcn.struct.buffer.atomic.swap
    amdgcn_struct_buffer_atomic_umax,          // llvm.amdgcn.struct.buffer.atomic.umax
    amdgcn_struct_buffer_atomic_umin,          // llvm.amdgcn.struct.buffer.atomic.umin
    amdgcn_struct_buffer_atomic_xor,           // llvm.amdgcn.struct.buffer.atomic.xor
    amdgcn_struct_buffer_load,                 // llvm.amdgcn.struct.buffer.load
    amdgcn_struct_buffer_load_format,          // llvm.amdgcn.struct.buffer.load.format
    amdgcn_struct_buffer_load_lds,             // llvm.amdgcn.struct.buffer.load.lds
    amdgcn_struct_buffer_store,                // llvm.amdgcn.struct.buffer.store
    amdgcn_struct_buffer_store_format,         // llvm.amdgcn.struct.buffer.store.format
    amdgcn_struct_ptr_buffer_atomic_add,       // llvm.amdgcn.struct.ptr.buffer.atomic.add
    amdgcn_struct_ptr_buffer_atomic_and,       // llvm.amdgcn.struct.ptr.buffer.atomic.and
    amdgcn_struct_ptr_buffer_atomic_cmpswap,   // llvm.amdgcn.struct.ptr.buffer.atomic.cmpswap
    amdgcn_struct_ptr_buffer_atomic_dec,       // llvm.amdgcn.struct.ptr.buffer.atomic.dec
    amdgcn_struct_ptr_buffer_atomic_fadd,      // llvm.amdgcn.struct.ptr.buffer.atomic.fadd
    amdgcn_struct_ptr_buffer_atomic_fmax,      // llvm.amdgcn.struct.ptr.buffer.atomic.fmax
    amdgcn_struct_ptr_buffer_atomic_fmin,      // llvm.amdgcn.struct.ptr.buffer.atomic.fmin
    amdgcn_struct_ptr_buffer_atomic_inc,       // llvm.amdgcn.struct.ptr.buffer.atomic.inc
    amdgcn_struct_ptr_buffer_atomic_or,        // llvm.amdgcn.struct.ptr.buffer.atomic.or
    amdgcn_struct_ptr_buffer_atomic_smax,      // llvm.amdgcn.struct.ptr.buffer.atomic.smax
    amdgcn_struct_ptr_buffer_atomic_smin,      // llvm.amdgcn.struct.ptr.buffer.atomic.smin
    amdgcn_struct_ptr_buffer_atomic_sub,       // llvm.amdgcn.struct.ptr.buffer.atomic.sub
    amdgcn_struct_ptr_buffer_atomic_swap,      // llvm.amdgcn.struct.ptr.buffer.atomic.swap
    amdgcn_struct_ptr_buffer_atomic_umax,      // llvm.amdgcn.struct.ptr.buffer.atomic.umax
    amdgcn_struct_ptr_buffer_atomic_umin,      // llvm.amdgcn.struct.ptr.buffer.atomic.umin
    amdgcn_struct_ptr_buffer_atomic_xor,       // llvm.amdgcn.struct.ptr.buffer.atomic.xor
    amdgcn_struct_ptr_buffer_load,             // llvm.amdgcn.struct.ptr.buffer.load
    amdgcn_struct_ptr_buffer_load_format,      // llvm.amdgcn.struct.ptr.buffer.load.format
    amdgcn_struct_ptr_buffer_load_lds,         // llvm.amdgcn.struct.ptr.buffer.load.lds
    amdgcn_struct_ptr_buffer_store,            // llvm.amdgcn.struct.ptr.buffer.store
    amdgcn_struct_ptr_buffer_store_format,     // llvm.amdgcn.struct.ptr.buffer.store.format
    amdgcn_struct_ptr_tbuffer_load,            // llvm.amdgcn.struct.ptr.tbuffer.load
    amdgcn_struct_ptr_tbuffer_store,           // llvm.amdgcn.struct.ptr.tbuffer.store
    amdgcn_struct_tbuffer_load,                // llvm.amdgcn.struct.tbuffer.load
    amdgcn_struct_tbuffer_store,               // llvm.amdgcn.struct.tbuffer.store
    amdgcn_sudot4,                             // llvm.amdgcn.sudot4
    amdgcn_sudot8,                             // llvm.amdgcn.sudot8
    amdgcn_tbuffer_load,                       // llvm.amdgcn.tbuffer.load
    amdgcn_tbuffer_store,                      // llvm.amdgcn.tbuffer.store
    amdgcn_trig_preop,                         // llvm.amdgcn.trig.preop
    amdgcn_ubfe,                               // llvm.amdgcn.ubfe
    amdgcn_udot2,                              // llvm.amdgcn.udot2
    amdgcn_udot4,                              // llvm.amdgcn.udot4
    amdgcn_udot8,                              // llvm.amdgcn.udot8
    amdgcn_unreachable,                        // llvm.amdgcn.unreachable
    amdgcn_update_dpp,                         // llvm.amdgcn.update.dpp
    amdgcn_wave_barrier,                       // llvm.amdgcn.wave.barrier
    amdgcn_wave_reduce_umax,                   // llvm.amdgcn.wave.reduce.umax
    amdgcn_wave_reduce_umin,                   // llvm.amdgcn.wave.reduce.umin
    amdgcn_wavefrontsize,                      // llvm.amdgcn.wavefrontsize
    amdgcn_wmma_bf16_16x16x16_bf16,            // llvm.amdgcn.wmma.bf16.16x16x16.bf16
    amdgcn_wmma_f16_16x16x16_f16,              // llvm.amdgcn.wmma.f16.16x16x16.f16
    amdgcn_wmma_f32_16x16x16_bf16,             // llvm.amdgcn.wmma.f32.16x16x16.bf16
    amdgcn_wmma_f32_16x16x16_f16,              // llvm.amdgcn.wmma.f32.16x16x16.f16
    amdgcn_wmma_i32_16x16x16_iu4,              // llvm.amdgcn.wmma.i32.16x16x16.iu4
    amdgcn_wmma_i32_16x16x16_iu8,              // llvm.amdgcn.wmma.i32.16x16x16.iu8
    amdgcn_workgroup_id_x,                     // llvm.amdgcn.workgroup.id.x
    amdgcn_workgroup_id_y,                     // llvm.amdgcn.workgroup.id.y
    amdgcn_workgroup_id_z,                     // llvm.amdgcn.workgroup.id.z
    amdgcn_workitem_id_x,                      // llvm.amdgcn.workitem.id.x
    amdgcn_workitem_id_y,                      // llvm.amdgcn.workitem.id.y
    amdgcn_workitem_id_z,                      // llvm.amdgcn.workitem.id.z
    amdgcn_wqm,                                // llvm.amdgcn.wqm
    amdgcn_wqm_demote,                         // llvm.amdgcn.wqm.demote
    amdgcn_wqm_vote,                           // llvm.amdgcn.wqm.vote
    amdgcn_writelane,                          // llvm.amdgcn.writelane
    amdgcn_wwm,                                // llvm.amdgcn.wwm
}; // enum
} // namespace Intrinsic
} // namespace llvm

#endif
PKjwFZ�
wg��IR/PassManagerImpl.hnu�[���//===- PassManagerImpl.h - Pass management infrastructure -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// Provides implementations for PassManager and AnalysisManager template
/// methods. These classes should be explicitly instantiated for any IR unit,
/// and files doing the explicit instantiation should include this header.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_PASSMANAGERIMPL_H
#define LLVM_IR_PASSMANAGERIMPL_H

#include "llvm/IR/PassManager.h"

namespace llvm {

template <typename IRUnitT, typename... ExtraArgTs>
inline AnalysisManager<IRUnitT, ExtraArgTs...>::AnalysisManager() = default;

template <typename IRUnitT, typename... ExtraArgTs>
inline AnalysisManager<IRUnitT, ExtraArgTs...>::AnalysisManager(
    AnalysisManager &&) = default;

template <typename IRUnitT, typename... ExtraArgTs>
inline AnalysisManager<IRUnitT, ExtraArgTs...> &
AnalysisManager<IRUnitT, ExtraArgTs...>::operator=(AnalysisManager &&) =
    default;

template <typename IRUnitT, typename... ExtraArgTs>
inline void
AnalysisManager<IRUnitT, ExtraArgTs...>::clear(IRUnitT &IR,
                                               llvm::StringRef Name) {
  if (auto *PI = getCachedResult<PassInstrumentationAnalysis>(IR))
    PI->runAnalysesCleared(Name);

  auto ResultsListI = AnalysisResultLists.find(&IR);
  if (ResultsListI == AnalysisResultLists.end())
    return;
  // Delete the map entries that point into the results list.
  for (auto &IDAndResult : ResultsListI->second)
    AnalysisResults.erase({IDAndResult.first, &IR});

  // And actually destroy and erase the results associated with this IR.
  AnalysisResultLists.erase(ResultsListI);
}

template <typename IRUnitT, typename... ExtraArgTs>
inline typename AnalysisManager<IRUnitT, ExtraArgTs...>::ResultConceptT &
AnalysisManager<IRUnitT, ExtraArgTs...>::getResultImpl(
    AnalysisKey *ID, IRUnitT &IR, ExtraArgTs... ExtraArgs) {
  typename AnalysisResultMapT::iterator RI;
  bool Inserted;
  std::tie(RI, Inserted) = AnalysisResults.insert(std::make_pair(
      std::make_pair(ID, &IR), typename AnalysisResultListT::iterator()));

  // If we don't have a cached result for this function, look up the pass and
  // run it to produce a result, which we then add to the cache.
  if (Inserted) {
    auto &P = this->lookUpPass(ID);

    PassInstrumentation PI;
    if (ID != PassInstrumentationAnalysis::ID()) {
      PI = getResult<PassInstrumentationAnalysis>(IR, ExtraArgs...);
      PI.runBeforeAnalysis(P, IR);
    }

    AnalysisResultListT &ResultList = AnalysisResultLists[&IR];
    ResultList.emplace_back(ID, P.run(IR, *this, ExtraArgs...));

    PI.runAfterAnalysis(P, IR);

    // P.run may have inserted elements into AnalysisResults and invalidated
    // RI.
    RI = AnalysisResults.find({ID, &IR});
    assert(RI != AnalysisResults.end() && "we just inserted it!");

    RI->second = std::prev(ResultList.end());
  }

  return *RI->second->second;
}

template <typename IRUnitT, typename... ExtraArgTs>
inline void AnalysisManager<IRUnitT, ExtraArgTs...>::invalidate(
    IRUnitT &IR, const PreservedAnalyses &PA) {
  // We're done if all analyses on this IR unit are preserved.
  if (PA.allAnalysesInSetPreserved<AllAnalysesOn<IRUnitT>>())
    return;

  // Track whether each analysis's result is invalidated in
  // IsResultInvalidated.
  SmallDenseMap<AnalysisKey *, bool, 8> IsResultInvalidated;
  Invalidator Inv(IsResultInvalidated, AnalysisResults);
  AnalysisResultListT &ResultsList = AnalysisResultLists[&IR];
  for (auto &AnalysisResultPair : ResultsList) {
    // This is basically the same thing as Invalidator::invalidate, but we
    // can't call it here because we're operating on the type-erased result.
    // Moreover if we instead called invalidate() directly, it would do an
    // unnecessary look up in ResultsList.
    AnalysisKey *ID = AnalysisResultPair.first;
    auto &Result = *AnalysisResultPair.second;

    auto IMapI = IsResultInvalidated.find(ID);
    if (IMapI != IsResultInvalidated.end())
      // This result was already handled via the Invalidator.
      continue;

    // Try to invalidate the result, giving it the Invalidator so it can
    // recursively query for any dependencies it has and record the result.
    // Note that we cannot reuse 'IMapI' here or pre-insert the ID, as
    // Result.invalidate may insert things into the map, invalidating our
    // iterator.
    bool Inserted =
        IsResultInvalidated.insert({ID, Result.invalidate(IR, PA, Inv)}).second;
    (void)Inserted;
    assert(Inserted && "Should never have already inserted this ID, likely "
                       "indicates a cycle!");
  }

  // Now erase the results that were marked above as invalidated.
  if (!IsResultInvalidated.empty()) {
    for (auto I = ResultsList.begin(), E = ResultsList.end(); I != E;) {
      AnalysisKey *ID = I->first;
      if (!IsResultInvalidated.lookup(ID)) {
        ++I;
        continue;
      }

      if (auto *PI = getCachedResult<PassInstrumentationAnalysis>(IR))
        PI->runAnalysisInvalidated(this->lookUpPass(ID), IR);

      I = ResultsList.erase(I);
      AnalysisResults.erase({ID, &IR});
    }
  }

  if (ResultsList.empty())
    AnalysisResultLists.erase(&IR);
}
} // end namespace llvm

#endif // LLVM_IR_PASSMANAGERIMPL_H
PKjwFZtό�
�
IR/TypeFinder.hnu�[���//===- llvm/IR/TypeFinder.h - Class to find used struct types ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the declaration of the TypeFinder class.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_TYPEFINDER_H
#define LLVM_IR_TYPEFINDER_H

#include "llvm/ADT/DenseSet.h"
#include "llvm/IR/Attributes.h"
#include <cstddef>
#include <vector>

namespace llvm {

class MDNode;
class Module;
class StructType;
class Type;
class Value;

/// TypeFinder - Walk over a module, identifying all of the types that are
/// used by the module.
class TypeFinder {
  // To avoid walking constant expressions multiple times and other IR
  // objects, we keep several helper maps.
  DenseSet<const Value*> VisitedConstants;
  DenseSet<const MDNode *> VisitedMetadata;
  DenseSet<AttributeList> VisitedAttributes;
  DenseSet<Type*> VisitedTypes;

  std::vector<StructType*> StructTypes;
  bool OnlyNamed = false;

public:
  TypeFinder() = default;

  void run(const Module &M, bool onlyNamed);
  void clear();

  using iterator = std::vector<StructType*>::iterator;
  using const_iterator = std::vector<StructType*>::const_iterator;

  iterator begin() { return StructTypes.begin(); }
  iterator end() { return StructTypes.end(); }

  const_iterator begin() const { return StructTypes.begin(); }
  const_iterator end() const { return StructTypes.end(); }

  bool empty() const { return StructTypes.empty(); }
  size_t size() const { return StructTypes.size(); }
  iterator erase(iterator I, iterator E) { return StructTypes.erase(I, E); }

  StructType *&operator[](unsigned Idx) { return StructTypes[Idx]; }

  DenseSet<const MDNode *> &getVisitedMetadata() { return VisitedMetadata; }

private:
  /// incorporateType - This method adds the type to the list of used
  /// structures if it's not in there already.
  void incorporateType(Type *Ty);

  /// incorporateValue - This method is used to walk operand lists finding types
  /// hiding in constant expressions and other operands that won't be walked in
  /// other ways.  GlobalValues, basic blocks, instructions, and inst operands
  /// are all explicitly enumerated.
  void incorporateValue(const Value *V);

  /// incorporateMDNode - This method is used to walk the operands of an MDNode
  /// to find types hiding within.
  void incorporateMDNode(const MDNode *V);

  /// Incorporate types referenced by attributes.
  void incorporateAttributes(AttributeList AL);
};

} // end namespace llvm

#endif // LLVM_IR_TYPEFINDER_H
PKjwFZ�4v�||IR/DebugInfoFlags.defnu�[���//===- llvm/IR/DebugInfoFlags.def - Debug info flag definitions -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Macros for running through debug info flags.
//
//===----------------------------------------------------------------------===//

#if !(defined HANDLE_DI_FLAG || defined HANDLE_DISP_FLAG)
#error "Missing macro definition of HANDLE_DI*"
#endif

#ifndef HANDLE_DI_FLAG
#define HANDLE_DI_FLAG(ID, NAME)
#endif

#ifndef HANDLE_DISP_FLAG
#define HANDLE_DISP_FLAG(ID, NAME)
#endif

// General flags kept in DINode.

HANDLE_DI_FLAG(0, Zero) // Use it as zero value.
                        // For example: void foo(DIFlags Flags = FlagZero).
HANDLE_DI_FLAG(1, Private)
HANDLE_DI_FLAG(2, Protected)
HANDLE_DI_FLAG(3, Public)
HANDLE_DI_FLAG((1 << 2), FwdDecl)
HANDLE_DI_FLAG((1 << 3), AppleBlock)
// Used to be BlockByRef, can be reused for anything except DICompositeType.
HANDLE_DI_FLAG((1 << 4), ReservedBit4)
HANDLE_DI_FLAG((1 << 5), Virtual)
HANDLE_DI_FLAG((1 << 6), Artificial)
HANDLE_DI_FLAG((1 << 7), Explicit)
HANDLE_DI_FLAG((1 << 8), Prototyped)
HANDLE_DI_FLAG((1 << 9), ObjcClassComplete)
HANDLE_DI_FLAG((1 << 10), ObjectPointer)
HANDLE_DI_FLAG((1 << 11), Vector)
HANDLE_DI_FLAG((1 << 12), StaticMember)
HANDLE_DI_FLAG((1 << 13), LValueReference)
HANDLE_DI_FLAG((1 << 14), RValueReference)
HANDLE_DI_FLAG((1 << 15), ExportSymbols)
HANDLE_DI_FLAG((1 << 16), SingleInheritance)
HANDLE_DI_FLAG((2 << 16), MultipleInheritance)
HANDLE_DI_FLAG((3 << 16), VirtualInheritance)
HANDLE_DI_FLAG((1 << 18), IntroducedVirtual)
HANDLE_DI_FLAG((1 << 19), BitField)
HANDLE_DI_FLAG((1 << 20), NoReturn)
HANDLE_DI_FLAG((1 << 22), TypePassByValue)
HANDLE_DI_FLAG((1 << 23), TypePassByReference)
HANDLE_DI_FLAG((1 << 24), EnumClass)
HANDLE_DI_FLAG((1 << 25), Thunk)
HANDLE_DI_FLAG((1 << 26), NonTrivial)
HANDLE_DI_FLAG((1 << 27), BigEndian)
HANDLE_DI_FLAG((1 << 28), LittleEndian)
HANDLE_DI_FLAG((1 << 29), AllCallsDescribed)

// To avoid needing a dedicated value for IndirectVirtualBase, we use
// the bitwise or of Virtual and FwdDecl, which does not otherwise
// make sense for inheritance.
HANDLE_DI_FLAG((1 << 2) | (1 << 5), IndirectVirtualBase)

#ifdef DI_FLAG_LARGEST_NEEDED
// intended to be used with ADT/BitmaskEnum.h
// NOTE: always must be equal to largest flag, check this when adding new flag
HANDLE_DI_FLAG((1 << 29), Largest)
#undef DI_FLAG_LARGEST_NEEDED
#endif

// Subprogram-specific flags kept in DISubprogram.

// Use this as a zero/initialization value.
// For example: void foo(DISPFlags Flags = SPFlagZero).
HANDLE_DISP_FLAG(0, Zero)
// Virtuality is a two-bit enum field in the LSB of the word.
// Values should match DW_VIRTUALITY_*.
HANDLE_DISP_FLAG(1u, Virtual)
HANDLE_DISP_FLAG(2u, PureVirtual)
HANDLE_DISP_FLAG((1u << 2), LocalToUnit)
HANDLE_DISP_FLAG((1u << 3), Definition)
HANDLE_DISP_FLAG((1u << 4), Optimized)
HANDLE_DISP_FLAG((1u << 5), Pure)
HANDLE_DISP_FLAG((1u << 6), Elemental)
HANDLE_DISP_FLAG((1u << 7), Recursive)
HANDLE_DISP_FLAG((1u << 8), MainSubprogram)
// May also utilize this Flag in future, when adding support
// for defaulted functions
HANDLE_DISP_FLAG((1u << 9), Deleted)
HANDLE_DISP_FLAG((1u << 11), ObjCDirect)

#ifdef DISP_FLAG_LARGEST_NEEDED
// Intended to be used with ADT/BitmaskEnum.h.
// NOTE: Always must be equal to largest flag, check this when adding new flags.
HANDLE_DISP_FLAG((1 << 11), Largest)
#undef DISP_FLAG_LARGEST_NEEDED
#endif

#undef HANDLE_DI_FLAG
#undef HANDLE_DISP_FLAG
PKjwFZ��С??IR/PassTimingInfo.hnu�[���//===- PassTimingInfo.h - pass execution timing -----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This header defines classes/functions to handle pass execution timing
/// information with interfaces for both pass managers.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_PASSTIMINGINFO_H
#define LLVM_IR_PASSTIMINGINFO_H

#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Timer.h"
#include <memory>
#include <utility>

namespace llvm {

class Pass;
class PassInstrumentationCallbacks;
class raw_ostream;

/// If -time-passes has been specified, report the timings immediately and then
/// reset the timers to zero. By default it uses the stream created by
/// CreateInfoOutputFile().
void reportAndResetTimings(raw_ostream *OutStream = nullptr);

/// Request the timer for this legacy-pass-manager's pass instance.
Timer *getPassTimer(Pass *);

/// This class implements -time-passes functionality for new pass manager.
/// It provides the pass-instrumentation callbacks that measure the pass
/// execution time. They collect timing info into individual timers as
/// passes are being run. At the end of its life-time it prints the resulting
/// timing report.
class TimePassesHandler {
  /// Value of this type is capable of uniquely identifying pass invocations.
  /// It is a pair of string Pass-Identifier (which for now is common
  /// to all the instance of a given pass) + sequential invocation counter.
  using PassInvocationID = std::pair<StringRef, unsigned>;

  /// Groups of timers for passes and analyses.
  TimerGroup PassTG;
  TimerGroup AnalysisTG;

  using TimerVector = llvm::SmallVector<std::unique_ptr<Timer>, 4>;
  /// Map of timers for pass invocations
  StringMap<TimerVector> TimingData;

  /// Currently active pass timer.
  Timer *ActivePassTimer = nullptr;
  /// Stack of currently active analysis timers. Analyses can request other
  /// analyses.
  SmallVector<Timer *, 8> AnalysisActiveTimerStack;

  /// Custom output stream to print timing information into.
  /// By default (== nullptr) we emit time report into the stream created by
  /// CreateInfoOutputFile().
  raw_ostream *OutStream = nullptr;

  bool Enabled;
  bool PerRun;

public:
  TimePassesHandler();
  TimePassesHandler(bool Enabled, bool PerRun = false);

  /// Destructor handles the print action if it has not been handled before.
  ~TimePassesHandler() { print(); }

  /// Prints out timing information and then resets the timers.
  void print();

  // We intend this to be unique per-compilation, thus no copies.
  TimePassesHandler(const TimePassesHandler &) = delete;
  void operator=(const TimePassesHandler &) = delete;

  void registerCallbacks(PassInstrumentationCallbacks &PIC);

  /// Set a custom output stream for subsequent reporting.
  void setOutStream(raw_ostream &OutStream);

private:
  /// Dumps information for running/triggered timers, useful for debugging
  LLVM_DUMP_METHOD void dump() const;

  /// Returns the new timer for each new run of the pass.
  Timer &getPassTimer(StringRef PassID, bool IsPass);

  void startAnalysisTimer(StringRef PassID);
  void stopAnalysisTimer(StringRef PassID);
  void startPassTimer(StringRef PassID);
  void stopPassTimer(StringRef PassID);
};

} // namespace llvm

#endif
PKjwFZf)�.�.�IR/IntrinsicsRISCV.hnu�[���/*===- TableGen'erated file -------------------------------------*- C++ -*-===*\
|*                                                                            *|
|* Intrinsic Function Source Fragment                                         *|
|*                                                                            *|
|* Automatically generated file, do not edit!                                 *|
|*                                                                            *|
\*===----------------------------------------------------------------------===*/

#ifndef LLVM_IR_INTRINSIC_RISCV_ENUMS_H
#define LLVM_IR_INTRINSIC_RISCV_ENUMS_H

namespace llvm {
namespace Intrinsic {
enum RISCVIntrinsics : unsigned {
// Enum values for intrinsics
    riscv_aes32dsi = 8196,                            // llvm.riscv.aes32dsi
    riscv_aes32dsmi,                           // llvm.riscv.aes32dsmi
    riscv_aes32esi,                            // llvm.riscv.aes32esi
    riscv_aes32esmi,                           // llvm.riscv.aes32esmi
    riscv_aes64ds,                             // llvm.riscv.aes64ds
    riscv_aes64dsm,                            // llvm.riscv.aes64dsm
    riscv_aes64es,                             // llvm.riscv.aes64es
    riscv_aes64esm,                            // llvm.riscv.aes64esm
    riscv_aes64im,                             // llvm.riscv.aes64im
    riscv_aes64ks1i,                           // llvm.riscv.aes64ks1i
    riscv_aes64ks2,                            // llvm.riscv.aes64ks2
    riscv_brev8,                               // llvm.riscv.brev8
    riscv_clmul,                               // llvm.riscv.clmul
    riscv_clmulh,                              // llvm.riscv.clmulh
    riscv_clmulr,                              // llvm.riscv.clmulr
    riscv_masked_atomicrmw_add_i32,            // llvm.riscv.masked.atomicrmw.add.i32
    riscv_masked_atomicrmw_add_i64,            // llvm.riscv.masked.atomicrmw.add.i64
    riscv_masked_atomicrmw_max_i32,            // llvm.riscv.masked.atomicrmw.max.i32
    riscv_masked_atomicrmw_max_i64,            // llvm.riscv.masked.atomicrmw.max.i64
    riscv_masked_atomicrmw_min_i32,            // llvm.riscv.masked.atomicrmw.min.i32
    riscv_masked_atomicrmw_min_i64,            // llvm.riscv.masked.atomicrmw.min.i64
    riscv_masked_atomicrmw_nand_i32,           // llvm.riscv.masked.atomicrmw.nand.i32
    riscv_masked_atomicrmw_nand_i64,           // llvm.riscv.masked.atomicrmw.nand.i64
    riscv_masked_atomicrmw_sub_i32,            // llvm.riscv.masked.atomicrmw.sub.i32
    riscv_masked_atomicrmw_sub_i64,            // llvm.riscv.masked.atomicrmw.sub.i64
    riscv_masked_atomicrmw_umax_i32,           // llvm.riscv.masked.atomicrmw.umax.i32
    riscv_masked_atomicrmw_umax_i64,           // llvm.riscv.masked.atomicrmw.umax.i64
    riscv_masked_atomicrmw_umin_i32,           // llvm.riscv.masked.atomicrmw.umin.i32
    riscv_masked_atomicrmw_umin_i64,           // llvm.riscv.masked.atomicrmw.umin.i64
    riscv_masked_atomicrmw_xchg_i32,           // llvm.riscv.masked.atomicrmw.xchg.i32
    riscv_masked_atomicrmw_xchg_i64,           // llvm.riscv.masked.atomicrmw.xchg.i64
    riscv_masked_cmpxchg_i32,                  // llvm.riscv.masked.cmpxchg.i32
    riscv_masked_cmpxchg_i64,                  // llvm.riscv.masked.cmpxchg.i64
    riscv_masked_strided_load,                 // llvm.riscv.masked.strided.load
    riscv_masked_strided_store,                // llvm.riscv.masked.strided.store
    riscv_orc_b,                               // llvm.riscv.orc.b
    riscv_seg2_load,                           // llvm.riscv.seg2.load
    riscv_seg2_store,                          // llvm.riscv.seg2.store
    riscv_seg3_load,                           // llvm.riscv.seg3.load
    riscv_seg3_store,                          // llvm.riscv.seg3.store
    riscv_seg4_load,                           // llvm.riscv.seg4.load
    riscv_seg4_store,                          // llvm.riscv.seg4.store
    riscv_seg5_load,                           // llvm.riscv.seg5.load
    riscv_seg5_store,                          // llvm.riscv.seg5.store
    riscv_seg6_load,                           // llvm.riscv.seg6.load
    riscv_seg6_store,                          // llvm.riscv.seg6.store
    riscv_seg7_load,                           // llvm.riscv.seg7.load
    riscv_seg7_store,                          // llvm.riscv.seg7.store
    riscv_seg8_load,                           // llvm.riscv.seg8.load
    riscv_seg8_store,                          // llvm.riscv.seg8.store
    riscv_sf_vc_fv_se,                         // llvm.riscv.sf.vc.fv.se
    riscv_sf_vc_fvv_se,                        // llvm.riscv.sf.vc.fvv.se
    riscv_sf_vc_fvw_se,                        // llvm.riscv.sf.vc.fvw.se
    riscv_sf_vc_i_se_e16m1,                    // llvm.riscv.sf.vc.i.se.e16m1
    riscv_sf_vc_i_se_e16m2,                    // llvm.riscv.sf.vc.i.se.e16m2
    riscv_sf_vc_i_se_e16m4,                    // llvm.riscv.sf.vc.i.se.e16m4
    riscv_sf_vc_i_se_e16m8,                    // llvm.riscv.sf.vc.i.se.e16m8
    riscv_sf_vc_i_se_e16mf2,                   // llvm.riscv.sf.vc.i.se.e16mf2
    riscv_sf_vc_i_se_e16mf4,                   // llvm.riscv.sf.vc.i.se.e16mf4
    riscv_sf_vc_i_se_e32m1,                    // llvm.riscv.sf.vc.i.se.e32m1
    riscv_sf_vc_i_se_e32m2,                    // llvm.riscv.sf.vc.i.se.e32m2
    riscv_sf_vc_i_se_e32m4,                    // llvm.riscv.sf.vc.i.se.e32m4
    riscv_sf_vc_i_se_e32m8,                    // llvm.riscv.sf.vc.i.se.e32m8
    riscv_sf_vc_i_se_e32mf2,                   // llvm.riscv.sf.vc.i.se.e32mf2
    riscv_sf_vc_i_se_e64m1,                    // llvm.riscv.sf.vc.i.se.e64m1
    riscv_sf_vc_i_se_e64m2,                    // llvm.riscv.sf.vc.i.se.e64m2
    riscv_sf_vc_i_se_e64m4,                    // llvm.riscv.sf.vc.i.se.e64m4
    riscv_sf_vc_i_se_e64m8,                    // llvm.riscv.sf.vc.i.se.e64m8
    riscv_sf_vc_i_se_e8m1,                     // llvm.riscv.sf.vc.i.se.e8m1
    riscv_sf_vc_i_se_e8m2,                     // llvm.riscv.sf.vc.i.se.e8m2
    riscv_sf_vc_i_se_e8m4,                     // llvm.riscv.sf.vc.i.se.e8m4
    riscv_sf_vc_i_se_e8m8,                     // llvm.riscv.sf.vc.i.se.e8m8
    riscv_sf_vc_i_se_e8mf2,                    // llvm.riscv.sf.vc.i.se.e8mf2
    riscv_sf_vc_i_se_e8mf4,                    // llvm.riscv.sf.vc.i.se.e8mf4
    riscv_sf_vc_i_se_e8mf8,                    // llvm.riscv.sf.vc.i.se.e8mf8
    riscv_sf_vc_iv_se,                         // llvm.riscv.sf.vc.iv.se
    riscv_sf_vc_ivv_se,                        // llvm.riscv.sf.vc.ivv.se
    riscv_sf_vc_ivw_se,                        // llvm.riscv.sf.vc.ivw.se
    riscv_sf_vc_v_fv,                          // llvm.riscv.sf.vc.v.fv
    riscv_sf_vc_v_fv_se,                       // llvm.riscv.sf.vc.v.fv.se
    riscv_sf_vc_v_fvv,                         // llvm.riscv.sf.vc.v.fvv
    riscv_sf_vc_v_fvv_se,                      // llvm.riscv.sf.vc.v.fvv.se
    riscv_sf_vc_v_fvw,                         // llvm.riscv.sf.vc.v.fvw
    riscv_sf_vc_v_fvw_se,                      // llvm.riscv.sf.vc.v.fvw.se
    riscv_sf_vc_v_i,                           // llvm.riscv.sf.vc.v.i
    riscv_sf_vc_v_i_se,                        // llvm.riscv.sf.vc.v.i.se
    riscv_sf_vc_v_iv,                          // llvm.riscv.sf.vc.v.iv
    riscv_sf_vc_v_iv_se,                       // llvm.riscv.sf.vc.v.iv.se
    riscv_sf_vc_v_ivv,                         // llvm.riscv.sf.vc.v.ivv
    riscv_sf_vc_v_ivv_se,                      // llvm.riscv.sf.vc.v.ivv.se
    riscv_sf_vc_v_ivw,                         // llvm.riscv.sf.vc.v.ivw
    riscv_sf_vc_v_ivw_se,                      // llvm.riscv.sf.vc.v.ivw.se
    riscv_sf_vc_v_vv,                          // llvm.riscv.sf.vc.v.vv
    riscv_sf_vc_v_vv_se,                       // llvm.riscv.sf.vc.v.vv.se
    riscv_sf_vc_v_vvv,                         // llvm.riscv.sf.vc.v.vvv
    riscv_sf_vc_v_vvv_se,                      // llvm.riscv.sf.vc.v.vvv.se
    riscv_sf_vc_v_vvw,                         // llvm.riscv.sf.vc.v.vvw
    riscv_sf_vc_v_vvw_se,                      // llvm.riscv.sf.vc.v.vvw.se
    riscv_sf_vc_v_x,                           // llvm.riscv.sf.vc.v.x
    riscv_sf_vc_v_x_se,                        // llvm.riscv.sf.vc.v.x.se
    riscv_sf_vc_v_xv,                          // llvm.riscv.sf.vc.v.xv
    riscv_sf_vc_v_xv_se,                       // llvm.riscv.sf.vc.v.xv.se
    riscv_sf_vc_v_xvv,                         // llvm.riscv.sf.vc.v.xvv
    riscv_sf_vc_v_xvv_se,                      // llvm.riscv.sf.vc.v.xvv.se
    riscv_sf_vc_v_xvw,                         // llvm.riscv.sf.vc.v.xvw
    riscv_sf_vc_v_xvw_se,                      // llvm.riscv.sf.vc.v.xvw.se
    riscv_sf_vc_vv_se,                         // llvm.riscv.sf.vc.vv.se
    riscv_sf_vc_vvv_se,                        // llvm.riscv.sf.vc.vvv.se
    riscv_sf_vc_vvw_se,                        // llvm.riscv.sf.vc.vvw.se
    riscv_sf_vc_x_se_e16m1,                    // llvm.riscv.sf.vc.x.se.e16m1
    riscv_sf_vc_x_se_e16m2,                    // llvm.riscv.sf.vc.x.se.e16m2
    riscv_sf_vc_x_se_e16m4,                    // llvm.riscv.sf.vc.x.se.e16m4
    riscv_sf_vc_x_se_e16m8,                    // llvm.riscv.sf.vc.x.se.e16m8
    riscv_sf_vc_x_se_e16mf2,                   // llvm.riscv.sf.vc.x.se.e16mf2
    riscv_sf_vc_x_se_e16mf4,                   // llvm.riscv.sf.vc.x.se.e16mf4
    riscv_sf_vc_x_se_e32m1,                    // llvm.riscv.sf.vc.x.se.e32m1
    riscv_sf_vc_x_se_e32m2,                    // llvm.riscv.sf.vc.x.se.e32m2
    riscv_sf_vc_x_se_e32m4,                    // llvm.riscv.sf.vc.x.se.e32m4
    riscv_sf_vc_x_se_e32m8,                    // llvm.riscv.sf.vc.x.se.e32m8
    riscv_sf_vc_x_se_e32mf2,                   // llvm.riscv.sf.vc.x.se.e32mf2
    riscv_sf_vc_x_se_e64m1,                    // llvm.riscv.sf.vc.x.se.e64m1
    riscv_sf_vc_x_se_e64m2,                    // llvm.riscv.sf.vc.x.se.e64m2
    riscv_sf_vc_x_se_e64m4,                    // llvm.riscv.sf.vc.x.se.e64m4
    riscv_sf_vc_x_se_e64m8,                    // llvm.riscv.sf.vc.x.se.e64m8
    riscv_sf_vc_x_se_e8m1,                     // llvm.riscv.sf.vc.x.se.e8m1
    riscv_sf_vc_x_se_e8m2,                     // llvm.riscv.sf.vc.x.se.e8m2
    riscv_sf_vc_x_se_e8m4,                     // llvm.riscv.sf.vc.x.se.e8m4
    riscv_sf_vc_x_se_e8m8,                     // llvm.riscv.sf.vc.x.se.e8m8
    riscv_sf_vc_x_se_e8mf2,                    // llvm.riscv.sf.vc.x.se.e8mf2
    riscv_sf_vc_x_se_e8mf4,                    // llvm.riscv.sf.vc.x.se.e8mf4
    riscv_sf_vc_x_se_e8mf8,                    // llvm.riscv.sf.vc.x.se.e8mf8
    riscv_sf_vc_xv_se,                         // llvm.riscv.sf.vc.xv.se
    riscv_sf_vc_xvv_se,                        // llvm.riscv.sf.vc.xvv.se
    riscv_sf_vc_xvw_se,                        // llvm.riscv.sf.vc.xvw.se
    riscv_sha256sig0,                          // llvm.riscv.sha256sig0
    riscv_sha256sig1,                          // llvm.riscv.sha256sig1
    riscv_sha256sum0,                          // llvm.riscv.sha256sum0
    riscv_sha256sum1,                          // llvm.riscv.sha256sum1
    riscv_sha512sig0,                          // llvm.riscv.sha512sig0
    riscv_sha512sig0h,                         // llvm.riscv.sha512sig0h
    riscv_sha512sig0l,                         // llvm.riscv.sha512sig0l
    riscv_sha512sig1,                          // llvm.riscv.sha512sig1
    riscv_sha512sig1h,                         // llvm.riscv.sha512sig1h
    riscv_sha512sig1l,                         // llvm.riscv.sha512sig1l
    riscv_sha512sum0,                          // llvm.riscv.sha512sum0
    riscv_sha512sum0r,                         // llvm.riscv.sha512sum0r
    riscv_sha512sum1,                          // llvm.riscv.sha512sum1
    riscv_sha512sum1r,                         // llvm.riscv.sha512sum1r
    riscv_sm3p0,                               // llvm.riscv.sm3p0
    riscv_sm3p1,                               // llvm.riscv.sm3p1
    riscv_sm4ed,                               // llvm.riscv.sm4ed
    riscv_sm4ks,                               // llvm.riscv.sm4ks
    riscv_th_vmaqa,                            // llvm.riscv.th.vmaqa
    riscv_th_vmaqa_mask,                       // llvm.riscv.th.vmaqa.mask
    riscv_th_vmaqasu,                          // llvm.riscv.th.vmaqasu
    riscv_th_vmaqasu_mask,                     // llvm.riscv.th.vmaqasu.mask
    riscv_th_vmaqau,                           // llvm.riscv.th.vmaqau
    riscv_th_vmaqau_mask,                      // llvm.riscv.th.vmaqau.mask
    riscv_th_vmaqaus,                          // llvm.riscv.th.vmaqaus
    riscv_th_vmaqaus_mask,                     // llvm.riscv.th.vmaqaus.mask
    riscv_unzip,                               // llvm.riscv.unzip
    riscv_vaadd,                               // llvm.riscv.vaadd
    riscv_vaadd_mask,                          // llvm.riscv.vaadd.mask
    riscv_vaaddu,                              // llvm.riscv.vaaddu
    riscv_vaaddu_mask,                         // llvm.riscv.vaaddu.mask
    riscv_vadc,                                // llvm.riscv.vadc
    riscv_vadd,                                // llvm.riscv.vadd
    riscv_vadd_mask,                           // llvm.riscv.vadd.mask
    riscv_vand,                                // llvm.riscv.vand
    riscv_vand_mask,                           // llvm.riscv.vand.mask
    riscv_vasub,                               // llvm.riscv.vasub
    riscv_vasub_mask,                          // llvm.riscv.vasub.mask
    riscv_vasubu,                              // llvm.riscv.vasubu
    riscv_vasubu_mask,                         // llvm.riscv.vasubu.mask
    riscv_vcompress,                           // llvm.riscv.vcompress
    riscv_vcpop,                               // llvm.riscv.vcpop
    riscv_vcpop_mask,                          // llvm.riscv.vcpop.mask
    riscv_vdiv,                                // llvm.riscv.vdiv
    riscv_vdiv_mask,                           // llvm.riscv.vdiv.mask
    riscv_vdivu,                               // llvm.riscv.vdivu
    riscv_vdivu_mask,                          // llvm.riscv.vdivu.mask
    riscv_vfadd,                               // llvm.riscv.vfadd
    riscv_vfadd_mask,                          // llvm.riscv.vfadd.mask
    riscv_vfclass,                             // llvm.riscv.vfclass
    riscv_vfclass_mask,                        // llvm.riscv.vfclass.mask
    riscv_vfcvt_f_x_v,                         // llvm.riscv.vfcvt.f.x.v
    riscv_vfcvt_f_x_v_mask,                    // llvm.riscv.vfcvt.f.x.v.mask
    riscv_vfcvt_f_xu_v,                        // llvm.riscv.vfcvt.f.xu.v
    riscv_vfcvt_f_xu_v_mask,                   // llvm.riscv.vfcvt.f.xu.v.mask
    riscv_vfcvt_rtz_x_f_v,                     // llvm.riscv.vfcvt.rtz.x.f.v
    riscv_vfcvt_rtz_x_f_v_mask,                // llvm.riscv.vfcvt.rtz.x.f.v.mask
    riscv_vfcvt_rtz_xu_f_v,                    // llvm.riscv.vfcvt.rtz.xu.f.v
    riscv_vfcvt_rtz_xu_f_v_mask,               // llvm.riscv.vfcvt.rtz.xu.f.v.mask
    riscv_vfcvt_x_f_v,                         // llvm.riscv.vfcvt.x.f.v
    riscv_vfcvt_x_f_v_mask,                    // llvm.riscv.vfcvt.x.f.v.mask
    riscv_vfcvt_xu_f_v,                        // llvm.riscv.vfcvt.xu.f.v
    riscv_vfcvt_xu_f_v_mask,                   // llvm.riscv.vfcvt.xu.f.v.mask
    riscv_vfdiv,                               // llvm.riscv.vfdiv
    riscv_vfdiv_mask,                          // llvm.riscv.vfdiv.mask
    riscv_vfirst,                              // llvm.riscv.vfirst
    riscv_vfirst_mask,                         // llvm.riscv.vfirst.mask
    riscv_vfmacc,                              // llvm.riscv.vfmacc
    riscv_vfmacc_mask,                         // llvm.riscv.vfmacc.mask
    riscv_vfmadd,                              // llvm.riscv.vfmadd
    riscv_vfmadd_mask,                         // llvm.riscv.vfmadd.mask
    riscv_vfmax,                               // llvm.riscv.vfmax
    riscv_vfmax_mask,                          // llvm.riscv.vfmax.mask
    riscv_vfmerge,                             // llvm.riscv.vfmerge
    riscv_vfmin,                               // llvm.riscv.vfmin
    riscv_vfmin_mask,                          // llvm.riscv.vfmin.mask
    riscv_vfmsac,                              // llvm.riscv.vfmsac
    riscv_vfmsac_mask,                         // llvm.riscv.vfmsac.mask
    riscv_vfmsub,                              // llvm.riscv.vfmsub
    riscv_vfmsub_mask,                         // llvm.riscv.vfmsub.mask
    riscv_vfmul,                               // llvm.riscv.vfmul
    riscv_vfmul_mask,                          // llvm.riscv.vfmul.mask
    riscv_vfmv_f_s,                            // llvm.riscv.vfmv.f.s
    riscv_vfmv_s_f,                            // llvm.riscv.vfmv.s.f
    riscv_vfmv_v_f,                            // llvm.riscv.vfmv.v.f
    riscv_vfncvt_f_f_w,                        // llvm.riscv.vfncvt.f.f.w
    riscv_vfncvt_f_f_w_mask,                   // llvm.riscv.vfncvt.f.f.w.mask
    riscv_vfncvt_f_x_w,                        // llvm.riscv.vfncvt.f.x.w
    riscv_vfncvt_f_x_w_mask,                   // llvm.riscv.vfncvt.f.x.w.mask
    riscv_vfncvt_f_xu_w,                       // llvm.riscv.vfncvt.f.xu.w
    riscv_vfncvt_f_xu_w_mask,                  // llvm.riscv.vfncvt.f.xu.w.mask
    riscv_vfncvt_rod_f_f_w,                    // llvm.riscv.vfncvt.rod.f.f.w
    riscv_vfncvt_rod_f_f_w_mask,               // llvm.riscv.vfncvt.rod.f.f.w.mask
    riscv_vfncvt_rtz_x_f_w,                    // llvm.riscv.vfncvt.rtz.x.f.w
    riscv_vfncvt_rtz_x_f_w_mask,               // llvm.riscv.vfncvt.rtz.x.f.w.mask
    riscv_vfncvt_rtz_xu_f_w,                   // llvm.riscv.vfncvt.rtz.xu.f.w
    riscv_vfncvt_rtz_xu_f_w_mask,              // llvm.riscv.vfncvt.rtz.xu.f.w.mask
    riscv_vfncvt_x_f_w,                        // llvm.riscv.vfncvt.x.f.w
    riscv_vfncvt_x_f_w_mask,                   // llvm.riscv.vfncvt.x.f.w.mask
    riscv_vfncvt_xu_f_w,                       // llvm.riscv.vfncvt.xu.f.w
    riscv_vfncvt_xu_f_w_mask,                  // llvm.riscv.vfncvt.xu.f.w.mask
    riscv_vfnmacc,                             // llvm.riscv.vfnmacc
    riscv_vfnmacc_mask,                        // llvm.riscv.vfnmacc.mask
    riscv_vfnmadd,                             // llvm.riscv.vfnmadd
    riscv_vfnmadd_mask,                        // llvm.riscv.vfnmadd.mask
    riscv_vfnmsac,                             // llvm.riscv.vfnmsac
    riscv_vfnmsac_mask,                        // llvm.riscv.vfnmsac.mask
    riscv_vfnmsub,                             // llvm.riscv.vfnmsub
    riscv_vfnmsub_mask,                        // llvm.riscv.vfnmsub.mask
    riscv_vfrdiv,                              // llvm.riscv.vfrdiv
    riscv_vfrdiv_mask,                         // llvm.riscv.vfrdiv.mask
    riscv_vfrec7,                              // llvm.riscv.vfrec7
    riscv_vfrec7_mask,                         // llvm.riscv.vfrec7.mask
    riscv_vfredmax,                            // llvm.riscv.vfredmax
    riscv_vfredmax_mask,                       // llvm.riscv.vfredmax.mask
    riscv_vfredmin,                            // llvm.riscv.vfredmin
    riscv_vfredmin_mask,                       // llvm.riscv.vfredmin.mask
    riscv_vfredosum,                           // llvm.riscv.vfredosum
    riscv_vfredosum_mask,                      // llvm.riscv.vfredosum.mask
    riscv_vfredusum,                           // llvm.riscv.vfredusum
    riscv_vfredusum_mask,                      // llvm.riscv.vfredusum.mask
    riscv_vfrsqrt7,                            // llvm.riscv.vfrsqrt7
    riscv_vfrsqrt7_mask,                       // llvm.riscv.vfrsqrt7.mask
    riscv_vfrsub,                              // llvm.riscv.vfrsub
    riscv_vfrsub_mask,                         // llvm.riscv.vfrsub.mask
    riscv_vfsgnj,                              // llvm.riscv.vfsgnj
    riscv_vfsgnj_mask,                         // llvm.riscv.vfsgnj.mask
    riscv_vfsgnjn,                             // llvm.riscv.vfsgnjn
    riscv_vfsgnjn_mask,                        // llvm.riscv.vfsgnjn.mask
    riscv_vfsgnjx,                             // llvm.riscv.vfsgnjx
    riscv_vfsgnjx_mask,                        // llvm.riscv.vfsgnjx.mask
    riscv_vfslide1down,                        // llvm.riscv.vfslide1down
    riscv_vfslide1down_mask,                   // llvm.riscv.vfslide1down.mask
    riscv_vfslide1up,                          // llvm.riscv.vfslide1up
    riscv_vfslide1up_mask,                     // llvm.riscv.vfslide1up.mask
    riscv_vfsqrt,                              // llvm.riscv.vfsqrt
    riscv_vfsqrt_mask,                         // llvm.riscv.vfsqrt.mask
    riscv_vfsub,                               // llvm.riscv.vfsub
    riscv_vfsub_mask,                          // llvm.riscv.vfsub.mask
    riscv_vfwadd,                              // llvm.riscv.vfwadd
    riscv_vfwadd_mask,                         // llvm.riscv.vfwadd.mask
    riscv_vfwadd_w,                            // llvm.riscv.vfwadd.w
    riscv_vfwadd_w_mask,                       // llvm.riscv.vfwadd.w.mask
    riscv_vfwcvt_f_f_v,                        // llvm.riscv.vfwcvt.f.f.v
    riscv_vfwcvt_f_f_v_mask,                   // llvm.riscv.vfwcvt.f.f.v.mask
    riscv_vfwcvt_f_x_v,                        // llvm.riscv.vfwcvt.f.x.v
    riscv_vfwcvt_f_x_v_mask,                   // llvm.riscv.vfwcvt.f.x.v.mask
    riscv_vfwcvt_f_xu_v,                       // llvm.riscv.vfwcvt.f.xu.v
    riscv_vfwcvt_f_xu_v_mask,                  // llvm.riscv.vfwcvt.f.xu.v.mask
    riscv_vfwcvt_rtz_x_f_v,                    // llvm.riscv.vfwcvt.rtz.x.f.v
    riscv_vfwcvt_rtz_x_f_v_mask,               // llvm.riscv.vfwcvt.rtz.x.f.v.mask
    riscv_vfwcvt_rtz_xu_f_v,                   // llvm.riscv.vfwcvt.rtz.xu.f.v
    riscv_vfwcvt_rtz_xu_f_v_mask,              // llvm.riscv.vfwcvt.rtz.xu.f.v.mask
    riscv_vfwcvt_x_f_v,                        // llvm.riscv.vfwcvt.x.f.v
    riscv_vfwcvt_x_f_v_mask,                   // llvm.riscv.vfwcvt.x.f.v.mask
    riscv_vfwcvt_xu_f_v,                       // llvm.riscv.vfwcvt.xu.f.v
    riscv_vfwcvt_xu_f_v_mask,                  // llvm.riscv.vfwcvt.xu.f.v.mask
    riscv_vfwmacc,                             // llvm.riscv.vfwmacc
    riscv_vfwmacc_mask,                        // llvm.riscv.vfwmacc.mask
    riscv_vfwmsac,                             // llvm.riscv.vfwmsac
    riscv_vfwmsac_mask,                        // llvm.riscv.vfwmsac.mask
    riscv_vfwmul,                              // llvm.riscv.vfwmul
    riscv_vfwmul_mask,                         // llvm.riscv.vfwmul.mask
    riscv_vfwnmacc,                            // llvm.riscv.vfwnmacc
    riscv_vfwnmacc_mask,                       // llvm.riscv.vfwnmacc.mask
    riscv_vfwnmsac,                            // llvm.riscv.vfwnmsac
    riscv_vfwnmsac_mask,                       // llvm.riscv.vfwnmsac.mask
    riscv_vfwredosum,                          // llvm.riscv.vfwredosum
    riscv_vfwredosum_mask,                     // llvm.riscv.vfwredosum.mask
    riscv_vfwredusum,                          // llvm.riscv.vfwredusum
    riscv_vfwredusum_mask,                     // llvm.riscv.vfwredusum.mask
    riscv_vfwsub,                              // llvm.riscv.vfwsub
    riscv_vfwsub_mask,                         // llvm.riscv.vfwsub.mask
    riscv_vfwsub_w,                            // llvm.riscv.vfwsub.w
    riscv_vfwsub_w_mask,                       // llvm.riscv.vfwsub.w.mask
    riscv_vid,                                 // llvm.riscv.vid
    riscv_vid_mask,                            // llvm.riscv.vid.mask
    riscv_viota,                               // llvm.riscv.viota
    riscv_viota_mask,                          // llvm.riscv.viota.mask
    riscv_vle,                                 // llvm.riscv.vle
    riscv_vle_mask,                            // llvm.riscv.vle.mask
    riscv_vleff,                               // llvm.riscv.vleff
    riscv_vleff_mask,                          // llvm.riscv.vleff.mask
    riscv_vlm,                                 // llvm.riscv.vlm
    riscv_vloxei,                              // llvm.riscv.vloxei
    riscv_vloxei_mask,                         // llvm.riscv.vloxei.mask
    riscv_vloxseg2,                            // llvm.riscv.vloxseg2
    riscv_vloxseg2_mask,                       // llvm.riscv.vloxseg2.mask
    riscv_vloxseg3,                            // llvm.riscv.vloxseg3
    riscv_vloxseg3_mask,                       // llvm.riscv.vloxseg3.mask
    riscv_vloxseg4,                            // llvm.riscv.vloxseg4
    riscv_vloxseg4_mask,                       // llvm.riscv.vloxseg4.mask
    riscv_vloxseg5,                            // llvm.riscv.vloxseg5
    riscv_vloxseg5_mask,                       // llvm.riscv.vloxseg5.mask
    riscv_vloxseg6,                            // llvm.riscv.vloxseg6
    riscv_vloxseg6_mask,                       // llvm.riscv.vloxseg6.mask
    riscv_vloxseg7,                            // llvm.riscv.vloxseg7
    riscv_vloxseg7_mask,                       // llvm.riscv.vloxseg7.mask
    riscv_vloxseg8,                            // llvm.riscv.vloxseg8
    riscv_vloxseg8_mask,                       // llvm.riscv.vloxseg8.mask
    riscv_vlse,                                // llvm.riscv.vlse
    riscv_vlse_mask,                           // llvm.riscv.vlse.mask
    riscv_vlseg2,                              // llvm.riscv.vlseg2
    riscv_vlseg2_mask,                         // llvm.riscv.vlseg2.mask
    riscv_vlseg2ff,                            // llvm.riscv.vlseg2ff
    riscv_vlseg2ff_mask,                       // llvm.riscv.vlseg2ff.mask
    riscv_vlseg3,                              // llvm.riscv.vlseg3
    riscv_vlseg3_mask,                         // llvm.riscv.vlseg3.mask
    riscv_vlseg3ff,                            // llvm.riscv.vlseg3ff
    riscv_vlseg3ff_mask,                       // llvm.riscv.vlseg3ff.mask
    riscv_vlseg4,                              // llvm.riscv.vlseg4
    riscv_vlseg4_mask,                         // llvm.riscv.vlseg4.mask
    riscv_vlseg4ff,                            // llvm.riscv.vlseg4ff
    riscv_vlseg4ff_mask,                       // llvm.riscv.vlseg4ff.mask
    riscv_vlseg5,                              // llvm.riscv.vlseg5
    riscv_vlseg5_mask,                         // llvm.riscv.vlseg5.mask
    riscv_vlseg5ff,                            // llvm.riscv.vlseg5ff
    riscv_vlseg5ff_mask,                       // llvm.riscv.vlseg5ff.mask
    riscv_vlseg6,                              // llvm.riscv.vlseg6
    riscv_vlseg6_mask,                         // llvm.riscv.vlseg6.mask
    riscv_vlseg6ff,                            // llvm.riscv.vlseg6ff
    riscv_vlseg6ff_mask,                       // llvm.riscv.vlseg6ff.mask
    riscv_vlseg7,                              // llvm.riscv.vlseg7
    riscv_vlseg7_mask,                         // llvm.riscv.vlseg7.mask
    riscv_vlseg7ff,                            // llvm.riscv.vlseg7ff
    riscv_vlseg7ff_mask,                       // llvm.riscv.vlseg7ff.mask
    riscv_vlseg8,                              // llvm.riscv.vlseg8
    riscv_vlseg8_mask,                         // llvm.riscv.vlseg8.mask
    riscv_vlseg8ff,                            // llvm.riscv.vlseg8ff
    riscv_vlseg8ff_mask,                       // llvm.riscv.vlseg8ff.mask
    riscv_vlsseg2,                             // llvm.riscv.vlsseg2
    riscv_vlsseg2_mask,                        // llvm.riscv.vlsseg2.mask
    riscv_vlsseg3,                             // llvm.riscv.vlsseg3
    riscv_vlsseg3_mask,                        // llvm.riscv.vlsseg3.mask
    riscv_vlsseg4,                             // llvm.riscv.vlsseg4
    riscv_vlsseg4_mask,                        // llvm.riscv.vlsseg4.mask
    riscv_vlsseg5,                             // llvm.riscv.vlsseg5
    riscv_vlsseg5_mask,                        // llvm.riscv.vlsseg5.mask
    riscv_vlsseg6,                             // llvm.riscv.vlsseg6
    riscv_vlsseg6_mask,                        // llvm.riscv.vlsseg6.mask
    riscv_vlsseg7,                             // llvm.riscv.vlsseg7
    riscv_vlsseg7_mask,                        // llvm.riscv.vlsseg7.mask
    riscv_vlsseg8,                             // llvm.riscv.vlsseg8
    riscv_vlsseg8_mask,                        // llvm.riscv.vlsseg8.mask
    riscv_vluxei,                              // llvm.riscv.vluxei
    riscv_vluxei_mask,                         // llvm.riscv.vluxei.mask
    riscv_vluxseg2,                            // llvm.riscv.vluxseg2
    riscv_vluxseg2_mask,                       // llvm.riscv.vluxseg2.mask
    riscv_vluxseg3,                            // llvm.riscv.vluxseg3
    riscv_vluxseg3_mask,                       // llvm.riscv.vluxseg3.mask
    riscv_vluxseg4,                            // llvm.riscv.vluxseg4
    riscv_vluxseg4_mask,                       // llvm.riscv.vluxseg4.mask
    riscv_vluxseg5,                            // llvm.riscv.vluxseg5
    riscv_vluxseg5_mask,                       // llvm.riscv.vluxseg5.mask
    riscv_vluxseg6,                            // llvm.riscv.vluxseg6
    riscv_vluxseg6_mask,                       // llvm.riscv.vluxseg6.mask
    riscv_vluxseg7,                            // llvm.riscv.vluxseg7
    riscv_vluxseg7_mask,                       // llvm.riscv.vluxseg7.mask
    riscv_vluxseg8,                            // llvm.riscv.vluxseg8
    riscv_vluxseg8_mask,                       // llvm.riscv.vluxseg8.mask
    riscv_vmacc,                               // llvm.riscv.vmacc
    riscv_vmacc_mask,                          // llvm.riscv.vmacc.mask
    riscv_vmadc,                               // llvm.riscv.vmadc
    riscv_vmadc_carry_in,                      // llvm.riscv.vmadc.carry.in
    riscv_vmadd,                               // llvm.riscv.vmadd
    riscv_vmadd_mask,                          // llvm.riscv.vmadd.mask
    riscv_vmand,                               // llvm.riscv.vmand
    riscv_vmandn,                              // llvm.riscv.vmandn
    riscv_vmax,                                // llvm.riscv.vmax
    riscv_vmax_mask,                           // llvm.riscv.vmax.mask
    riscv_vmaxu,                               // llvm.riscv.vmaxu
    riscv_vmaxu_mask,                          // llvm.riscv.vmaxu.mask
    riscv_vmclr,                               // llvm.riscv.vmclr
    riscv_vmerge,                              // llvm.riscv.vmerge
    riscv_vmfeq,                               // llvm.riscv.vmfeq
    riscv_vmfeq_mask,                          // llvm.riscv.vmfeq.mask
    riscv_vmfge,                               // llvm.riscv.vmfge
    riscv_vmfge_mask,                          // llvm.riscv.vmfge.mask
    riscv_vmfgt,                               // llvm.riscv.vmfgt
    riscv_vmfgt_mask,                          // llvm.riscv.vmfgt.mask
    riscv_vmfle,                               // llvm.riscv.vmfle
    riscv_vmfle_mask,                          // llvm.riscv.vmfle.mask
    riscv_vmflt,                               // llvm.riscv.vmflt
    riscv_vmflt_mask,                          // llvm.riscv.vmflt.mask
    riscv_vmfne,                               // llvm.riscv.vmfne
    riscv_vmfne_mask,                          // llvm.riscv.vmfne.mask
    riscv_vmin,                                // llvm.riscv.vmin
    riscv_vmin_mask,                           // llvm.riscv.vmin.mask
    riscv_vminu,                               // llvm.riscv.vminu
    riscv_vminu_mask,                          // llvm.riscv.vminu.mask
    riscv_vmnand,                              // llvm.riscv.vmnand
    riscv_vmnor,                               // llvm.riscv.vmnor
    riscv_vmor,                                // llvm.riscv.vmor
    riscv_vmorn,                               // llvm.riscv.vmorn
    riscv_vmsbc,                               // llvm.riscv.vmsbc
    riscv_vmsbc_borrow_in,                     // llvm.riscv.vmsbc.borrow.in
    riscv_vmsbf,                               // llvm.riscv.vmsbf
    riscv_vmsbf_mask,                          // llvm.riscv.vmsbf.mask
    riscv_vmseq,                               // llvm.riscv.vmseq
    riscv_vmseq_mask,                          // llvm.riscv.vmseq.mask
    riscv_vmset,                               // llvm.riscv.vmset
    riscv_vmsge,                               // llvm.riscv.vmsge
    riscv_vmsge_mask,                          // llvm.riscv.vmsge.mask
    riscv_vmsgeu,                              // llvm.riscv.vmsgeu
    riscv_vmsgeu_mask,                         // llvm.riscv.vmsgeu.mask
    riscv_vmsgt,                               // llvm.riscv.vmsgt
    riscv_vmsgt_mask,                          // llvm.riscv.vmsgt.mask
    riscv_vmsgtu,                              // llvm.riscv.vmsgtu
    riscv_vmsgtu_mask,                         // llvm.riscv.vmsgtu.mask
    riscv_vmsif,                               // llvm.riscv.vmsif
    riscv_vmsif_mask,                          // llvm.riscv.vmsif.mask
    riscv_vmsle,                               // llvm.riscv.vmsle
    riscv_vmsle_mask,                          // llvm.riscv.vmsle.mask
    riscv_vmsleu,                              // llvm.riscv.vmsleu
    riscv_vmsleu_mask,                         // llvm.riscv.vmsleu.mask
    riscv_vmslt,                               // llvm.riscv.vmslt
    riscv_vmslt_mask,                          // llvm.riscv.vmslt.mask
    riscv_vmsltu,                              // llvm.riscv.vmsltu
    riscv_vmsltu_mask,                         // llvm.riscv.vmsltu.mask
    riscv_vmsne,                               // llvm.riscv.vmsne
    riscv_vmsne_mask,                          // llvm.riscv.vmsne.mask
    riscv_vmsof,                               // llvm.riscv.vmsof
    riscv_vmsof_mask,                          // llvm.riscv.vmsof.mask
    riscv_vmul,                                // llvm.riscv.vmul
    riscv_vmul_mask,                           // llvm.riscv.vmul.mask
    riscv_vmulh,                               // llvm.riscv.vmulh
    riscv_vmulh_mask,                          // llvm.riscv.vmulh.mask
    riscv_vmulhsu,                             // llvm.riscv.vmulhsu
    riscv_vmulhsu_mask,                        // llvm.riscv.vmulhsu.mask
    riscv_vmulhu,                              // llvm.riscv.vmulhu
    riscv_vmulhu_mask,                         // llvm.riscv.vmulhu.mask
    riscv_vmv_s_x,                             // llvm.riscv.vmv.s.x
    riscv_vmv_v_v,                             // llvm.riscv.vmv.v.v
    riscv_vmv_v_x,                             // llvm.riscv.vmv.v.x
    riscv_vmv_x_s,                             // llvm.riscv.vmv.x.s
    riscv_vmxnor,                              // llvm.riscv.vmxnor
    riscv_vmxor,                               // llvm.riscv.vmxor
    riscv_vnclip,                              // llvm.riscv.vnclip
    riscv_vnclip_mask,                         // llvm.riscv.vnclip.mask
    riscv_vnclipu,                             // llvm.riscv.vnclipu
    riscv_vnclipu_mask,                        // llvm.riscv.vnclipu.mask
    riscv_vnmsac,                              // llvm.riscv.vnmsac
    riscv_vnmsac_mask,                         // llvm.riscv.vnmsac.mask
    riscv_vnmsub,                              // llvm.riscv.vnmsub
    riscv_vnmsub_mask,                         // llvm.riscv.vnmsub.mask
    riscv_vnsra,                               // llvm.riscv.vnsra
    riscv_vnsra_mask,                          // llvm.riscv.vnsra.mask
    riscv_vnsrl,                               // llvm.riscv.vnsrl
    riscv_vnsrl_mask,                          // llvm.riscv.vnsrl.mask
    riscv_vor,                                 // llvm.riscv.vor
    riscv_vor_mask,                            // llvm.riscv.vor.mask
    riscv_vredand,                             // llvm.riscv.vredand
    riscv_vredand_mask,                        // llvm.riscv.vredand.mask
    riscv_vredmax,                             // llvm.riscv.vredmax
    riscv_vredmax_mask,                        // llvm.riscv.vredmax.mask
    riscv_vredmaxu,                            // llvm.riscv.vredmaxu
    riscv_vredmaxu_mask,                       // llvm.riscv.vredmaxu.mask
    riscv_vredmin,                             // llvm.riscv.vredmin
    riscv_vredmin_mask,                        // llvm.riscv.vredmin.mask
    riscv_vredminu,                            // llvm.riscv.vredminu
    riscv_vredminu_mask,                       // llvm.riscv.vredminu.mask
    riscv_vredor,                              // llvm.riscv.vredor
    riscv_vredor_mask,                         // llvm.riscv.vredor.mask
    riscv_vredsum,                             // llvm.riscv.vredsum
    riscv_vredsum_mask,                        // llvm.riscv.vredsum.mask
    riscv_vredxor,                             // llvm.riscv.vredxor
    riscv_vredxor_mask,                        // llvm.riscv.vredxor.mask
    riscv_vrem,                                // llvm.riscv.vrem
    riscv_vrem_mask,                           // llvm.riscv.vrem.mask
    riscv_vremu,                               // llvm.riscv.vremu
    riscv_vremu_mask,                          // llvm.riscv.vremu.mask
    riscv_vrgather_vv,                         // llvm.riscv.vrgather.vv
    riscv_vrgather_vv_mask,                    // llvm.riscv.vrgather.vv.mask
    riscv_vrgather_vx,                         // llvm.riscv.vrgather.vx
    riscv_vrgather_vx_mask,                    // llvm.riscv.vrgather.vx.mask
    riscv_vrgatherei16_vv,                     // llvm.riscv.vrgatherei16.vv
    riscv_vrgatherei16_vv_mask,                // llvm.riscv.vrgatherei16.vv.mask
    riscv_vrsub,                               // llvm.riscv.vrsub
    riscv_vrsub_mask,                          // llvm.riscv.vrsub.mask
    riscv_vsadd,                               // llvm.riscv.vsadd
    riscv_vsadd_mask,                          // llvm.riscv.vsadd.mask
    riscv_vsaddu,                              // llvm.riscv.vsaddu
    riscv_vsaddu_mask,                         // llvm.riscv.vsaddu.mask
    riscv_vsbc,                                // llvm.riscv.vsbc
    riscv_vse,                                 // llvm.riscv.vse
    riscv_vse_mask,                            // llvm.riscv.vse.mask
    riscv_vsetvli,                             // llvm.riscv.vsetvli
    riscv_vsetvlimax,                          // llvm.riscv.vsetvlimax
    riscv_vsext,                               // llvm.riscv.vsext
    riscv_vsext_mask,                          // llvm.riscv.vsext.mask
    riscv_vslide1down,                         // llvm.riscv.vslide1down
    riscv_vslide1down_mask,                    // llvm.riscv.vslide1down.mask
    riscv_vslide1up,                           // llvm.riscv.vslide1up
    riscv_vslide1up_mask,                      // llvm.riscv.vslide1up.mask
    riscv_vslidedown,                          // llvm.riscv.vslidedown
    riscv_vslidedown_mask,                     // llvm.riscv.vslidedown.mask
    riscv_vslideup,                            // llvm.riscv.vslideup
    riscv_vslideup_mask,                       // llvm.riscv.vslideup.mask
    riscv_vsll,                                // llvm.riscv.vsll
    riscv_vsll_mask,                           // llvm.riscv.vsll.mask
    riscv_vsm,                                 // llvm.riscv.vsm
    riscv_vsmul,                               // llvm.riscv.vsmul
    riscv_vsmul_mask,                          // llvm.riscv.vsmul.mask
    riscv_vsoxei,                              // llvm.riscv.vsoxei
    riscv_vsoxei_mask,                         // llvm.riscv.vsoxei.mask
    riscv_vsoxseg2,                            // llvm.riscv.vsoxseg2
    riscv_vsoxseg2_mask,                       // llvm.riscv.vsoxseg2.mask
    riscv_vsoxseg3,                            // llvm.riscv.vsoxseg3
    riscv_vsoxseg3_mask,                       // llvm.riscv.vsoxseg3.mask
    riscv_vsoxseg4,                            // llvm.riscv.vsoxseg4
    riscv_vsoxseg4_mask,                       // llvm.riscv.vsoxseg4.mask
    riscv_vsoxseg5,                            // llvm.riscv.vsoxseg5
    riscv_vsoxseg5_mask,                       // llvm.riscv.vsoxseg5.mask
    riscv_vsoxseg6,                            // llvm.riscv.vsoxseg6
    riscv_vsoxseg6_mask,                       // llvm.riscv.vsoxseg6.mask
    riscv_vsoxseg7,                            // llvm.riscv.vsoxseg7
    riscv_vsoxseg7_mask,                       // llvm.riscv.vsoxseg7.mask
    riscv_vsoxseg8,                            // llvm.riscv.vsoxseg8
    riscv_vsoxseg8_mask,                       // llvm.riscv.vsoxseg8.mask
    riscv_vsra,                                // llvm.riscv.vsra
    riscv_vsra_mask,                           // llvm.riscv.vsra.mask
    riscv_vsrl,                                // llvm.riscv.vsrl
    riscv_vsrl_mask,                           // llvm.riscv.vsrl.mask
    riscv_vsse,                                // llvm.riscv.vsse
    riscv_vsse_mask,                           // llvm.riscv.vsse.mask
    riscv_vsseg2,                              // llvm.riscv.vsseg2
    riscv_vsseg2_mask,                         // llvm.riscv.vsseg2.mask
    riscv_vsseg3,                              // llvm.riscv.vsseg3
    riscv_vsseg3_mask,                         // llvm.riscv.vsseg3.mask
    riscv_vsseg4,                              // llvm.riscv.vsseg4
    riscv_vsseg4_mask,                         // llvm.riscv.vsseg4.mask
    riscv_vsseg5,                              // llvm.riscv.vsseg5
    riscv_vsseg5_mask,                         // llvm.riscv.vsseg5.mask
    riscv_vsseg6,                              // llvm.riscv.vsseg6
    riscv_vsseg6_mask,                         // llvm.riscv.vsseg6.mask
    riscv_vsseg7,                              // llvm.riscv.vsseg7
    riscv_vsseg7_mask,                         // llvm.riscv.vsseg7.mask
    riscv_vsseg8,                              // llvm.riscv.vsseg8
    riscv_vsseg8_mask,                         // llvm.riscv.vsseg8.mask
    riscv_vssra,                               // llvm.riscv.vssra
    riscv_vssra_mask,                          // llvm.riscv.vssra.mask
    riscv_vssrl,                               // llvm.riscv.vssrl
    riscv_vssrl_mask,                          // llvm.riscv.vssrl.mask
    riscv_vssseg2,                             // llvm.riscv.vssseg2
    riscv_vssseg2_mask,                        // llvm.riscv.vssseg2.mask
    riscv_vssseg3,                             // llvm.riscv.vssseg3
    riscv_vssseg3_mask,                        // llvm.riscv.vssseg3.mask
    riscv_vssseg4,                             // llvm.riscv.vssseg4
    riscv_vssseg4_mask,                        // llvm.riscv.vssseg4.mask
    riscv_vssseg5,                             // llvm.riscv.vssseg5
    riscv_vssseg5_mask,                        // llvm.riscv.vssseg5.mask
    riscv_vssseg6,                             // llvm.riscv.vssseg6
    riscv_vssseg6_mask,                        // llvm.riscv.vssseg6.mask
    riscv_vssseg7,                             // llvm.riscv.vssseg7
    riscv_vssseg7_mask,                        // llvm.riscv.vssseg7.mask
    riscv_vssseg8,                             // llvm.riscv.vssseg8
    riscv_vssseg8_mask,                        // llvm.riscv.vssseg8.mask
    riscv_vssub,                               // llvm.riscv.vssub
    riscv_vssub_mask,                          // llvm.riscv.vssub.mask
    riscv_vssubu,                              // llvm.riscv.vssubu
    riscv_vssubu_mask,                         // llvm.riscv.vssubu.mask
    riscv_vsub,                                // llvm.riscv.vsub
    riscv_vsub_mask,                           // llvm.riscv.vsub.mask
    riscv_vsuxei,                              // llvm.riscv.vsuxei
    riscv_vsuxei_mask,                         // llvm.riscv.vsuxei.mask
    riscv_vsuxseg2,                            // llvm.riscv.vsuxseg2
    riscv_vsuxseg2_mask,                       // llvm.riscv.vsuxseg2.mask
    riscv_vsuxseg3,                            // llvm.riscv.vsuxseg3
    riscv_vsuxseg3_mask,                       // llvm.riscv.vsuxseg3.mask
    riscv_vsuxseg4,                            // llvm.riscv.vsuxseg4
    riscv_vsuxseg4_mask,                       // llvm.riscv.vsuxseg4.mask
    riscv_vsuxseg5,                            // llvm.riscv.vsuxseg5
    riscv_vsuxseg5_mask,                       // llvm.riscv.vsuxseg5.mask
    riscv_vsuxseg6,                            // llvm.riscv.vsuxseg6
    riscv_vsuxseg6_mask,                       // llvm.riscv.vsuxseg6.mask
    riscv_vsuxseg7,                            // llvm.riscv.vsuxseg7
    riscv_vsuxseg7_mask,                       // llvm.riscv.vsuxseg7.mask
    riscv_vsuxseg8,                            // llvm.riscv.vsuxseg8
    riscv_vsuxseg8_mask,                       // llvm.riscv.vsuxseg8.mask
    riscv_vwadd,                               // llvm.riscv.vwadd
    riscv_vwadd_mask,                          // llvm.riscv.vwadd.mask
    riscv_vwadd_w,                             // llvm.riscv.vwadd.w
    riscv_vwadd_w_mask,                        // llvm.riscv.vwadd.w.mask
    riscv_vwaddu,                              // llvm.riscv.vwaddu
    riscv_vwaddu_mask,                         // llvm.riscv.vwaddu.mask
    riscv_vwaddu_w,                            // llvm.riscv.vwaddu.w
    riscv_vwaddu_w_mask,                       // llvm.riscv.vwaddu.w.mask
    riscv_vwmacc,                              // llvm.riscv.vwmacc
    riscv_vwmacc_mask,                         // llvm.riscv.vwmacc.mask
    riscv_vwmaccsu,                            // llvm.riscv.vwmaccsu
    riscv_vwmaccsu_mask,                       // llvm.riscv.vwmaccsu.mask
    riscv_vwmaccu,                             // llvm.riscv.vwmaccu
    riscv_vwmaccu_mask,                        // llvm.riscv.vwmaccu.mask
    riscv_vwmaccus,                            // llvm.riscv.vwmaccus
    riscv_vwmaccus_mask,                       // llvm.riscv.vwmaccus.mask
    riscv_vwmul,                               // llvm.riscv.vwmul
    riscv_vwmul_mask,                          // llvm.riscv.vwmul.mask
    riscv_vwmulsu,                             // llvm.riscv.vwmulsu
    riscv_vwmulsu_mask,                        // llvm.riscv.vwmulsu.mask
    riscv_vwmulu,                              // llvm.riscv.vwmulu
    riscv_vwmulu_mask,                         // llvm.riscv.vwmulu.mask
    riscv_vwredsum,                            // llvm.riscv.vwredsum
    riscv_vwredsum_mask,                       // llvm.riscv.vwredsum.mask
    riscv_vwredsumu,                           // llvm.riscv.vwredsumu
    riscv_vwredsumu_mask,                      // llvm.riscv.vwredsumu.mask
    riscv_vwsub,                               // llvm.riscv.vwsub
    riscv_vwsub_mask,                          // llvm.riscv.vwsub.mask
    riscv_vwsub_w,                             // llvm.riscv.vwsub.w
    riscv_vwsub_w_mask,                        // llvm.riscv.vwsub.w.mask
    riscv_vwsubu,                              // llvm.riscv.vwsubu
    riscv_vwsubu_mask,                         // llvm.riscv.vwsubu.mask
    riscv_vwsubu_w,                            // llvm.riscv.vwsubu.w
    riscv_vwsubu_w_mask,                       // llvm.riscv.vwsubu.w.mask
    riscv_vxor,                                // llvm.riscv.vxor
    riscv_vxor_mask,                           // llvm.riscv.vxor.mask
    riscv_vzext,                               // llvm.riscv.vzext
    riscv_vzext_mask,                          // llvm.riscv.vzext.mask
    riscv_xperm4,                              // llvm.riscv.xperm4
    riscv_xperm8,                              // llvm.riscv.xperm8
    riscv_zip,                                 // llvm.riscv.zip
}; // enum
} // namespace Intrinsic
} // namespace llvm

#endif
PKjwFZ���N�NIR/IntrinsicsHexagon.tdnu�[���//===- IntrinsicsHexagon.td - Defines Hexagon intrinsics ---*- tablegen -*-===//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines all of the Hexagon-specific intrinsics.
//
//===----------------------------------------------------------------------===//

//===----------------------------------------------------------------------===//
// Definitions for all Hexagon intrinsics.
//
// All Hexagon intrinsics start with "llvm.hexagon.".
let TargetPrefix = "hexagon" in {
  /// Hexagon_Intrinsic - Base class for the majority of Hexagon intrinsics.
  class Hexagon_Intrinsic<string GCCIntSuffix, list<LLVMType> ret_types,
                              list<LLVMType> param_types,
                              list<IntrinsicProperty> properties>
    : ClangBuiltin<!strconcat("__builtin_", GCCIntSuffix)>,
      DefaultAttrsIntrinsic<ret_types, param_types, properties>;

  /// Hexagon_NonGCC_Intrinsic - Base class for bitcode convertible Hexagon
  /// intrinsics.
  class Hexagon_NonGCC_Intrinsic<list<LLVMType> ret_types,
                                 list<LLVMType> param_types,
                                 list<IntrinsicProperty> properties>
    : DefaultAttrsIntrinsic<ret_types, param_types, properties>;
}

class Hexagon_mem_memmemsi_Intrinsic<string GCCIntSuffix>
  : Hexagon_Intrinsic<GCCIntSuffix,
                          [llvm_ptr_ty], [llvm_ptr_ty, llvm_ptr_ty,
                           llvm_i32_ty],
                          [IntrArgMemOnly]>;

class Hexagon_mem_memsisi_Intrinsic<string GCCIntSuffix>
  : Hexagon_Intrinsic<GCCIntSuffix,
                          [llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty,
                           llvm_i32_ty],
                          [IntrWriteMem]>;

class Hexagon_mem_memdisi_Intrinsic<string GCCIntSuffix>
  : Hexagon_Intrinsic<GCCIntSuffix,
                          [llvm_ptr_ty], [llvm_ptr_ty, llvm_i64_ty,
                           llvm_i32_ty],
                          [IntrWriteMem]>;

class Hexagon_mem_memmemsisi_Intrinsic<string GCCIntSuffix>
  : Hexagon_Intrinsic<GCCIntSuffix,
                          [llvm_ptr_ty], [llvm_ptr_ty, llvm_ptr_ty,
                           llvm_i32_ty, llvm_i32_ty],
                          [IntrArgMemOnly, ImmArg<ArgIndex<3>>]>;

class Hexagon_mem_memsisisi_Intrinsic<string GCCIntSuffix>
  : Hexagon_Intrinsic<GCCIntSuffix,
                          [llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty,
                           llvm_i32_ty, llvm_i32_ty],
                          [IntrWriteMem, ImmArg<ArgIndex<3>>]>;

class Hexagon_mem_memdisisi_Intrinsic<string GCCIntSuffix>
  : Hexagon_Intrinsic<GCCIntSuffix,
                          [llvm_ptr_ty], [llvm_ptr_ty, llvm_i64_ty,
                           llvm_i32_ty, llvm_i32_ty],
                          [IntrWriteMem, ImmArg<ArgIndex<3>>]>;

//
// BUILTIN_INFO_NONCONST(circ_ldd,PTR_ftype_PTRPTRSISI,4)
//
def int_hexagon_circ_ldd :
Hexagon_mem_memmemsisi_Intrinsic<"circ_ldd">;
//
// BUILTIN_INFO_NONCONST(circ_ldw,PTR_ftype_PTRPTRSISI,4)
//
def int_hexagon_circ_ldw :
Hexagon_mem_memmemsisi_Intrinsic<"circ_ldw">;
//
// BUILTIN_INFO_NONCONST(circ_ldh,PTR_ftype_PTRPTRSISI,4)
//
def int_hexagon_circ_ldh :
Hexagon_mem_memmemsisi_Intrinsic<"circ_ldh">;
//
// BUILTIN_INFO_NONCONST(circ_lduh,PTR_ftype_PTRPTRSISI,4)
//
def int_hexagon_circ_lduh :
Hexagon_mem_memmemsisi_Intrinsic<"circ_lduh">;
//
// BUILTIN_INFO_NONCONST(circ_ldb,PTR_ftype_PTRPTRSISI,4)
//
def int_hexagon_circ_ldb :
Hexagon_mem_memmemsisi_Intrinsic<"circ_ldb">;
//
// BUILTIN_INFO_NONCONST(circ_ldub,PTR_ftype_PTRPTRSISI,4)
//
def int_hexagon_circ_ldub :
Hexagon_mem_memmemsisi_Intrinsic<"circ_ldub">;

//
// BUILTIN_INFO_NONCONST(circ_std,PTR_ftype_PTRDISISI,4)
//
def int_hexagon_circ_std :
Hexagon_mem_memdisisi_Intrinsic<"circ_std">;
//
// BUILTIN_INFO_NONCONST(circ_stw,PTR_ftype_PTRSISISI,4)
//
def int_hexagon_circ_stw :
Hexagon_mem_memsisisi_Intrinsic<"circ_stw">;
//
// BUILTIN_INFO_NONCONST(circ_sth,PTR_ftype_PTRSISISI,4)
//
def int_hexagon_circ_sth :
Hexagon_mem_memsisisi_Intrinsic<"circ_sth">;
//
// BUILTIN_INFO_NONCONST(circ_sthhi,PTR_ftype_PTRSISISI,4)
//
def int_hexagon_circ_sthhi :
Hexagon_mem_memsisisi_Intrinsic<"circ_sthhi">;
//
// BUILTIN_INFO_NONCONST(circ_stb,PTR_ftype_PTRSISISI,4)
//
def int_hexagon_circ_stb :
Hexagon_mem_memsisisi_Intrinsic<"circ_stb">;

def int_hexagon_prefetch :
Hexagon_Intrinsic<"HEXAGON_prefetch", [], [llvm_ptr_ty], []>;

// Mark locked loads as read/write to prevent any accidental reordering.
// These don't use Hexagon_Intrinsic, because they are not nosync, and as such
// cannot use default attributes.
let TargetPrefix = "hexagon" in {
  def int_hexagon_L2_loadw_locked :
  ClangBuiltin<"__builtin_HEXAGON_L2_loadw_locked">,
  Intrinsic<[llvm_i32_ty], [llvm_ptr_ty],
        [IntrArgMemOnly, NoCapture<ArgIndex<0>>]>;
  def int_hexagon_L4_loadd_locked :
  ClangBuiltin<"__builtin__HEXAGON_L4_loadd_locked">,
  Intrinsic<[llvm_i64_ty], [llvm_ptr_ty],
        [IntrArgMemOnly, NoCapture<ArgIndex<0>>]>;

  def int_hexagon_S2_storew_locked :
  ClangBuiltin<"__builtin_HEXAGON_S2_storew_locked">,
  Intrinsic<[llvm_i32_ty],
        [llvm_ptr_ty, llvm_i32_ty], [IntrArgMemOnly, NoCapture<ArgIndex<0>>]>;
  def int_hexagon_S4_stored_locked :
  ClangBuiltin<"__builtin_HEXAGON_S4_stored_locked">,
  Intrinsic<[llvm_i32_ty],
        [llvm_ptr_ty, llvm_i64_ty], [IntrArgMemOnly, NoCapture<ArgIndex<0>>]>;
}

def int_hexagon_vmemcpy : Hexagon_Intrinsic<"hexagon_vmemcpy",
    [], [llvm_ptr_ty, llvm_ptr_ty, llvm_i32_ty],
    [IntrArgMemOnly, NoCapture<ArgIndex<0>>, NoCapture<ArgIndex<1>>, WriteOnly<ArgIndex<0>>, ReadOnly<ArgIndex<1>>]>;

def int_hexagon_vmemset : Hexagon_Intrinsic<"hexagon_vmemset",
    [], [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty],
    [IntrArgMemOnly, NoCapture<ArgIndex<0>>, WriteOnly<ArgIndex<0>>]>;

multiclass Hexagon_custom_circ_ld_Intrinsic<LLVMType ElTy> {
  def NAME#_pci : Hexagon_NonGCC_Intrinsic<
    [ElTy, llvm_ptr_ty],
    [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty, llvm_ptr_ty],
    [IntrArgMemOnly, NoCapture<ArgIndex<3>>]>;
  def NAME#_pcr : Hexagon_NonGCC_Intrinsic<
    [ElTy, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty, llvm_ptr_ty],
    [IntrArgMemOnly, NoCapture<ArgIndex<2>>]>;
}

defm int_hexagon_L2_loadrub : Hexagon_custom_circ_ld_Intrinsic<llvm_i32_ty>;
defm int_hexagon_L2_loadrb : Hexagon_custom_circ_ld_Intrinsic<llvm_i32_ty>;
defm int_hexagon_L2_loadruh : Hexagon_custom_circ_ld_Intrinsic<llvm_i32_ty>;
defm int_hexagon_L2_loadrh : Hexagon_custom_circ_ld_Intrinsic<llvm_i32_ty>;
defm int_hexagon_L2_loadri : Hexagon_custom_circ_ld_Intrinsic<llvm_i32_ty>;
defm int_hexagon_L2_loadrd : Hexagon_custom_circ_ld_Intrinsic<llvm_i64_ty>;

multiclass Hexagon_custom_circ_st_Intrinsic<LLVMType ElTy> {
  def NAME#_pci : Hexagon_NonGCC_Intrinsic<
    [llvm_ptr_ty],
    [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty, ElTy, llvm_ptr_ty],
    [IntrArgMemOnly, NoCapture<ArgIndex<4>>]>;
  def NAME#_pcr : Hexagon_NonGCC_Intrinsic<
    [llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty, ElTy, llvm_ptr_ty],
    [IntrArgMemOnly, NoCapture<ArgIndex<3>>]>;
}

defm int_hexagon_S2_storerb : Hexagon_custom_circ_st_Intrinsic<llvm_i32_ty>;
defm int_hexagon_S2_storerh : Hexagon_custom_circ_st_Intrinsic<llvm_i32_ty>;
defm int_hexagon_S2_storerf : Hexagon_custom_circ_st_Intrinsic<llvm_i32_ty>;
defm int_hexagon_S2_storeri : Hexagon_custom_circ_st_Intrinsic<llvm_i32_ty>;
defm int_hexagon_S2_storerd : Hexagon_custom_circ_st_Intrinsic<llvm_i64_ty>;

// The front-end emits the intrinsic call with only two arguments. The third
// argument from the builtin is already used by front-end to write to memory
// by generating a store.
class Hexagon_custom_brev_ld_Intrinsic<LLVMType ElTy>
 : Hexagon_NonGCC_Intrinsic<
    [ElTy, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty],
    [IntrReadMem]>;

def int_hexagon_L2_loadrub_pbr : Hexagon_custom_brev_ld_Intrinsic<llvm_i32_ty>;
def int_hexagon_L2_loadrb_pbr : Hexagon_custom_brev_ld_Intrinsic<llvm_i32_ty>;
def int_hexagon_L2_loadruh_pbr : Hexagon_custom_brev_ld_Intrinsic<llvm_i32_ty>;
def int_hexagon_L2_loadrh_pbr : Hexagon_custom_brev_ld_Intrinsic<llvm_i32_ty>;
def int_hexagon_L2_loadri_pbr : Hexagon_custom_brev_ld_Intrinsic<llvm_i32_ty>;
def int_hexagon_L2_loadrd_pbr : Hexagon_custom_brev_ld_Intrinsic<llvm_i64_ty>;

def int_hexagon_S2_storerb_pbr : Hexagon_mem_memsisi_Intrinsic<"brev_stb">;
def int_hexagon_S2_storerh_pbr : Hexagon_mem_memsisi_Intrinsic<"brev_sth">;
def int_hexagon_S2_storerf_pbr : Hexagon_mem_memsisi_Intrinsic<"brev_sthhi">;
def int_hexagon_S2_storeri_pbr : Hexagon_mem_memsisi_Intrinsic<"brev_stw">;
def int_hexagon_S2_storerd_pbr : Hexagon_mem_memdisi_Intrinsic<"brev_std">;

// tag : V6_vrmpybub_rtt
class Hexagon_v32i32_v16i32i64_rtt_Intrinsic<string GCCIntSuffix>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v32i32_ty], [llvm_v16i32_ty,llvm_i64_ty],
       [IntrNoMem]>;

// tag : V6_vrmpybub_rtt_128B
class Hexagon_v64i32_v32i32i64_rtt_Intrinsic<string GCCIntSuffix>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v64i32_ty], [llvm_v32i32_ty,llvm_i64_ty],
       [IntrNoMem]>;

// tag : V6_vrmpybub_rtt_acc
class Hexagon_v32i32_v32i32v16i32i64_rtt_Intrinsic<string GCCIntSuffix>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v16i32_ty,llvm_i64_ty],
       [IntrNoMem]>;

// tag : V6_vrmpybub_rtt_acc_128B
class Hexagon_v64i32_v64i32v32i32i64_rtt_Intrinsic<string GCCIntSuffix>
  : Hexagon_Intrinsic<GCCIntSuffix,
       [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v32i32_ty,llvm_i64_ty],
       [IntrNoMem]>;

def int_hexagon_V6_vrmpybub_rtt :
Hexagon_v32i32_v16i32i64_rtt_Intrinsic<"HEXAGON_V6_vrmpybub_rtt">;

def int_hexagon_V6_vrmpybub_rtt_128B :
Hexagon_v64i32_v32i32i64_rtt_Intrinsic<"HEXAGON_V6_vrmpybub_rtt_128B">;

def int_hexagon_V6_vrmpybub_rtt_acc :
Hexagon_v32i32_v32i32v16i32i64_rtt_Intrinsic<"HEXAGON_V6_vrmpybub_rtt_acc">;

def int_hexagon_V6_vrmpybub_rtt_acc_128B :
Hexagon_v64i32_v64i32v32i32i64_rtt_Intrinsic<"HEXAGON_V6_vrmpybub_rtt_acc_128B">;

def int_hexagon_V6_vrmpyub_rtt :
Hexagon_v32i32_v16i32i64_rtt_Intrinsic<"HEXAGON_V6_vrmpyub_rtt">;

def int_hexagon_V6_vrmpyub_rtt_128B :
Hexagon_v64i32_v32i32i64_rtt_Intrinsic<"HEXAGON_V6_vrmpyub_rtt_128B">;

def int_hexagon_V6_vrmpyub_rtt_acc :
Hexagon_v32i32_v32i32v16i32i64_rtt_Intrinsic<"HEXAGON_V6_vrmpyub_rtt_acc">;

def int_hexagon_V6_vrmpyub_rtt_acc_128B :
Hexagon_v64i32_v64i32v32i32i64_rtt_Intrinsic<"HEXAGON_V6_vrmpyub_rtt_acc_128B">;

// HVX conditional loads/stores

class Hexagon_pred_vload_imm<LLVMType ValTy>
  : Hexagon_NonGCC_Intrinsic<
      [ValTy],
      [llvm_i1_ty, llvm_ptr_ty, llvm_i32_ty],
      [IntrReadMem, IntrArgMemOnly, NoCapture<ArgIndex<1>>,
       ImmArg<ArgIndex<2>>]>;

class Hexagon_pred_vload_imm_64B:  Hexagon_pred_vload_imm<llvm_v16i32_ty>;
class Hexagon_pred_vload_imm_128B: Hexagon_pred_vload_imm<llvm_v32i32_ty>;

def int_hexagon_V6_vL32b_pred_ai:            Hexagon_pred_vload_imm_64B;
def int_hexagon_V6_vL32b_npred_ai:           Hexagon_pred_vload_imm_64B;
def int_hexagon_V6_vL32b_nt_pred_ai:         Hexagon_pred_vload_imm_64B;
def int_hexagon_V6_vL32b_nt_npred_ai:        Hexagon_pred_vload_imm_64B;
def int_hexagon_V6_vL32b_pred_ai_128B:      Hexagon_pred_vload_imm_128B;
def int_hexagon_V6_vL32b_npred_ai_128B:     Hexagon_pred_vload_imm_128B;
def int_hexagon_V6_vL32b_nt_pred_ai_128B:   Hexagon_pred_vload_imm_128B;
def int_hexagon_V6_vL32b_nt_npred_ai_128B:  Hexagon_pred_vload_imm_128B;

class Hexagom_pred_vload_upd<LLVMType ValTy, bit TakesImm>
  : Hexagon_NonGCC_Intrinsic<
      [ValTy, llvm_ptr_ty],
      [llvm_i1_ty, llvm_ptr_ty, llvm_i32_ty],
      !if(TakesImm,
          [IntrReadMem, IntrArgMemOnly, NoCapture<ArgIndex<1>>,
           ImmArg<ArgIndex<2>>],
          [IntrReadMem, IntrArgMemOnly, NoCapture<ArgIndex<1>>])>;

class Hexagom_pred_vload_upd_64B<bit TakesImm>
  : Hexagom_pred_vload_upd<llvm_v16i32_ty, TakesImm>;
class Hexagom_pred_vload_upd_128B<bit TakesImm>
  : Hexagom_pred_vload_upd<llvm_v32i32_ty, TakesImm>;

def int_hexagon_V6_vL32b_pred_pi:            Hexagom_pred_vload_upd_64B<1>;
def int_hexagon_V6_vL32b_npred_pi:           Hexagom_pred_vload_upd_64B<1>;
def int_hexagon_V6_vL32b_nt_pred_pi:         Hexagom_pred_vload_upd_64B<1>;
def int_hexagon_V6_vL32b_nt_npred_pi:        Hexagom_pred_vload_upd_64B<1>;
def int_hexagon_V6_vL32b_pred_pi_128B:      Hexagom_pred_vload_upd_128B<1>;
def int_hexagon_V6_vL32b_npred_pi_128B:     Hexagom_pred_vload_upd_128B<1>;
def int_hexagon_V6_vL32b_nt_pred_pi_128B:   Hexagom_pred_vload_upd_128B<1>;
def int_hexagon_V6_vL32b_nt_npred_pi_128B:  Hexagom_pred_vload_upd_128B<1>;

def int_hexagon_V6_vL32b_pred_ppu:           Hexagom_pred_vload_upd_64B<0>;
def int_hexagon_V6_vL32b_npred_ppu:          Hexagom_pred_vload_upd_64B<0>;
def int_hexagon_V6_vL32b_nt_pred_ppu:        Hexagom_pred_vload_upd_64B<0>;
def int_hexagon_V6_vL32b_nt_npred_ppu:       Hexagom_pred_vload_upd_64B<0>;
def int_hexagon_V6_vL32b_pred_ppu_128B:     Hexagom_pred_vload_upd_128B<0>;
def int_hexagon_V6_vL32b_npred_ppu_128B:    Hexagom_pred_vload_upd_128B<0>;
def int_hexagon_V6_vL32b_nt_pred_ppu_128B:  Hexagom_pred_vload_upd_128B<0>;
def int_hexagon_V6_vL32b_nt_npred_ppu_128B: Hexagom_pred_vload_upd_128B<0>;


class Hexagon_pred_vstore_imm<LLVMType ValTy>
  : Hexagon_NonGCC_Intrinsic<
      [],
      [llvm_i1_ty, llvm_ptr_ty, llvm_i32_ty, ValTy],
      [IntrWriteMem, IntrArgMemOnly, NoCapture<ArgIndex<1>>,
       ImmArg<ArgIndex<2>>]>;

class Hexagon_pred_vstore_imm_64B:  Hexagon_pred_vstore_imm<llvm_v16i32_ty>;
class Hexagon_pred_vstore_imm_128B: Hexagon_pred_vstore_imm<llvm_v32i32_ty>;

def int_hexagon_V6_vS32b_pred_ai:            Hexagon_pred_vstore_imm_64B;
def int_hexagon_V6_vS32b_npred_ai:           Hexagon_pred_vstore_imm_64B;
def int_hexagon_V6_vS32Ub_pred_ai:           Hexagon_pred_vstore_imm_64B;
def int_hexagon_V6_vS32Ub_npred_ai:          Hexagon_pred_vstore_imm_64B;
def int_hexagon_V6_vS32b_nt_pred_ai:         Hexagon_pred_vstore_imm_64B;
def int_hexagon_V6_vS32b_nt_npred_ai:        Hexagon_pred_vstore_imm_64B;
def int_hexagon_V6_vS32b_pred_ai_128B:      Hexagon_pred_vstore_imm_128B;
def int_hexagon_V6_vS32b_npred_ai_128B:     Hexagon_pred_vstore_imm_128B;
def int_hexagon_V6_vS32Ub_pred_ai_128B:     Hexagon_pred_vstore_imm_128B;
def int_hexagon_V6_vS32Ub_npred_ai_128B:    Hexagon_pred_vstore_imm_128B;
def int_hexagon_V6_vS32b_nt_pred_ai_128B:   Hexagon_pred_vstore_imm_128B;
def int_hexagon_V6_vS32b_nt_npred_ai_128B:  Hexagon_pred_vstore_imm_128B;

class Hexagon_pred_vstore_upd<LLVMType ValTy, bit TakesImm>
  : Hexagon_NonGCC_Intrinsic<
      [llvm_ptr_ty],
      [llvm_i1_ty, llvm_ptr_ty, llvm_i32_ty, ValTy],
      !if(TakesImm,
          [IntrWriteMem, IntrArgMemOnly, NoCapture<ArgIndex<1>>,
           ImmArg<ArgIndex<2>>],
          [IntrWriteMem, IntrArgMemOnly, NoCapture<ArgIndex<1>>])>;

class Hexagon_pred_vstore_upd_64B<bit TakesImm>
  : Hexagon_pred_vstore_upd<llvm_v16i32_ty, TakesImm>;
class Hexagon_pred_vstore_upd_128B<bit TakesImm>
  : Hexagon_pred_vstore_upd<llvm_v32i32_ty, TakesImm>;

def int_hexagon_V6_vS32b_pred_pi:            Hexagon_pred_vstore_upd_64B<1>;
def int_hexagon_V6_vS32b_npred_pi:           Hexagon_pred_vstore_upd_64B<1>;
def int_hexagon_V6_vS32Ub_pred_pi:           Hexagon_pred_vstore_upd_64B<1>;
def int_hexagon_V6_vS32Ub_npred_pi:          Hexagon_pred_vstore_upd_64B<1>;
def int_hexagon_V6_vS32b_nt_pred_pi:         Hexagon_pred_vstore_upd_64B<1>;
def int_hexagon_V6_vS32b_nt_npred_pi:        Hexagon_pred_vstore_upd_64B<1>;
def int_hexagon_V6_vS32b_pred_pi_128B:      Hexagon_pred_vstore_upd_128B<1>;
def int_hexagon_V6_vS32b_npred_pi_128B:     Hexagon_pred_vstore_upd_128B<1>;
def int_hexagon_V6_vS32Ub_pred_pi_128B:     Hexagon_pred_vstore_upd_128B<1>;
def int_hexagon_V6_vS32Ub_npred_pi_128B:    Hexagon_pred_vstore_upd_128B<1>;
def int_hexagon_V6_vS32b_nt_pred_pi_128B:   Hexagon_pred_vstore_upd_128B<1>;
def int_hexagon_V6_vS32b_nt_npred_pi_128B:  Hexagon_pred_vstore_upd_128B<1>;

def int_hexagon_V6_vS32b_pred_ppu:           Hexagon_pred_vstore_upd_64B<0>;
def int_hexagon_V6_vS32b_npred_ppu:          Hexagon_pred_vstore_upd_64B<0>;
def int_hexagon_V6_vS32Ub_pred_ppu:          Hexagon_pred_vstore_upd_64B<0>;
def int_hexagon_V6_vS32Ub_npred_ppu:         Hexagon_pred_vstore_upd_64B<0>;
def int_hexagon_V6_vS32b_nt_pred_ppu:        Hexagon_pred_vstore_upd_64B<0>;
def int_hexagon_V6_vS32b_nt_npred_ppu:       Hexagon_pred_vstore_upd_64B<0>;
def int_hexagon_V6_vS32b_pred_ppu_128B:     Hexagon_pred_vstore_upd_128B<0>;
def int_hexagon_V6_vS32b_npred_ppu_128B:    Hexagon_pred_vstore_upd_128B<0>;
def int_hexagon_V6_vS32Ub_pred_ppu_128B:    Hexagon_pred_vstore_upd_128B<0>;
def int_hexagon_V6_vS32Ub_npred_ppu_128B:   Hexagon_pred_vstore_upd_128B<0>;
def int_hexagon_V6_vS32b_nt_pred_ppu_128B:  Hexagon_pred_vstore_upd_128B<0>;
def int_hexagon_V6_vS32b_nt_npred_ppu_128B: Hexagon_pred_vstore_upd_128B<0>;


// HVX Vector predicate casts.
// These intrinsics do not emit (nor do they correspond to) any instructions,
// they are no-ops.

def int_hexagon_V6_pred_typecast :
Hexagon_NonGCC_Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>;

def int_hexagon_V6_pred_typecast_128B :
Hexagon_NonGCC_Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>;

// HVX full-precision multiplication.
// V6_vmpyss_parts(Vu,Vv) = (MulHS(Vu,Vv),  Mul(Vu,Vv))
// V6_vmpyuu_parts(Vu,Vv) = (MulHU(Vu,Vv),  Mul(Vu,Vv))
// V6_vmpyus_parts(Vu,Vv) = (MulHUS(Vu,Vv), Mul(Vu,Vv))
//
// Both, the (purportedly) 64b and the _128B versions are exactly equivalent
// regardless of the HVX mode, they are both defined for consistency.
// The purpose of these intrinsics is to have a uniform way of multiplying two
// integer vectors in the LLVM IR. Many HVX multiply operations interleave
// the even-odd results, except for 32x32 multiplications. Also, different
// HVX versions have different instructions that can be used, so defer the
// instruction choice to the isel.
class Hexagon_vv_vv_pure:
  Hexagon_NonGCC_Intrinsic<
    [llvm_anyvector_ty, LLVMMatchType<0>],
    [LLVMMatchType<0>, LLVMMatchType<0>],
    [IntrNoMem]>;

def int_hexagon_V6_vmpyss_parts:      Hexagon_vv_vv_pure;
def int_hexagon_V6_vmpyss_parts_128B: Hexagon_vv_vv_pure;
def int_hexagon_V6_vmpyuu_parts:      Hexagon_vv_vv_pure;
def int_hexagon_V6_vmpyuu_parts_128B: Hexagon_vv_vv_pure;
def int_hexagon_V6_vmpyus_parts:      Hexagon_vv_vv_pure;
def int_hexagon_V6_vmpyus_parts_128B: Hexagon_vv_vv_pure;


// Masked vector stores
//
// These are all deprecated, the intrinsics matching instruction names
// should be used instead, e.g. int_hexagon_V6_vS32b_qpred_ai, etc.

class Hexagon_custom_vms_Intrinsic
  : Hexagon_NonGCC_Intrinsic<
       [], [llvm_v64i1_ty,llvm_ptr_ty,llvm_v16i32_ty], [IntrWriteMem]>;

class Hexagon_custom_vms_Intrinsic_128B
  : Hexagon_NonGCC_Intrinsic<
       [], [llvm_v128i1_ty,llvm_ptr_ty,llvm_v32i32_ty], [IntrWriteMem]>;

def int_hexagon_V6_vmaskedstoreq: Hexagon_custom_vms_Intrinsic;
def int_hexagon_V6_vmaskedstorenq: Hexagon_custom_vms_Intrinsic;
def int_hexagon_V6_vmaskedstorentq: Hexagon_custom_vms_Intrinsic;
def int_hexagon_V6_vmaskedstorentnq: Hexagon_custom_vms_Intrinsic;

def int_hexagon_V6_vmaskedstoreq_128B: Hexagon_custom_vms_Intrinsic_128B;
def int_hexagon_V6_vmaskedstorenq_128B: Hexagon_custom_vms_Intrinsic_128B;
def int_hexagon_V6_vmaskedstorentq_128B: Hexagon_custom_vms_Intrinsic_128B;
def int_hexagon_V6_vmaskedstorentnq_128B: Hexagon_custom_vms_Intrinsic_128B;


// Intrinsic for instrumentation based profiling using a custom handler. The
// name of the handler is passed as the first operand to the intrinsic. The
// handler can take only one int32 input which is passed as the second
// operand to the intrinsic.
def int_hexagon_instrprof_custom
    : Hexagon_NonGCC_Intrinsic<[],
                               [llvm_ptr_ty, llvm_i32_ty],
                               [IntrInaccessibleMemOnly]>;


include "llvm/IR/IntrinsicsHexagonDep.td"
PKjwFZ���QOQOIR/IntrinsicsSystemZ.tdnu�[���//===- IntrinsicsSystemZ.td - Defines SystemZ intrinsics ---*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines all of the SystemZ-specific intrinsics.
//
//===----------------------------------------------------------------------===//

class SystemZUnaryConv<string name, LLVMType result, LLVMType arg>
  : ClangBuiltin<"__builtin_s390_" # name>,
    Intrinsic<[result], [arg], [IntrNoMem]>;

class SystemZUnary<string name, LLVMType type>
  : SystemZUnaryConv<name, type, type>;

class SystemZUnaryConvCC<LLVMType result, LLVMType arg>
  : Intrinsic<[result, llvm_i32_ty], [arg], [IntrNoMem]>;

class SystemZUnaryCC<LLVMType type>
  : SystemZUnaryConvCC<type, type>;

class SystemZBinaryConv<string name, LLVMType result, LLVMType arg>
  : ClangBuiltin<"__builtin_s390_" # name>,
    Intrinsic<[result], [arg, arg], [IntrNoMem]>;

class SystemZBinary<string name, LLVMType type>
  : SystemZBinaryConv<name, type, type>;

class SystemZBinaryInt<string name, LLVMType type>
  : ClangBuiltin<"__builtin_s390_" # name>,
    Intrinsic<[type], [type, llvm_i32_ty], [IntrNoMem]>;

class SystemZBinaryConvCC<LLVMType result, LLVMType arg>
  : Intrinsic<[result, llvm_i32_ty], [arg, arg], [IntrNoMem]>;

class SystemZBinaryConvIntCC<LLVMType result, LLVMType arg>
  : Intrinsic<[result, llvm_i32_ty], [arg, llvm_i32_ty],
              [IntrNoMem, ImmArg<ArgIndex<1>>]>;

class SystemZBinaryCC<LLVMType type>
  : SystemZBinaryConvCC<type, type>;

class SystemZTernaryConv<string name, LLVMType result, LLVMType arg>
  : ClangBuiltin<"__builtin_s390_" # name>,
    Intrinsic<[result], [arg, arg, result], [IntrNoMem]>;

class SystemZTernaryConvCC<LLVMType result, LLVMType arg>
  : Intrinsic<[result, llvm_i32_ty], [arg, arg, result], [IntrNoMem]>;

class SystemZTernary<string name, LLVMType type>
  : SystemZTernaryConv<name, type, type>;

class SystemZTernaryInt<string name, LLVMType type>
  : ClangBuiltin<"__builtin_s390_" # name>,
    Intrinsic<[type], [type, type, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<2>>]>;

class SystemZTernaryIntCC<LLVMType type>
  : Intrinsic<[type, llvm_i32_ty], [type, type, llvm_i32_ty],
              [IntrNoMem, ImmArg<ArgIndex<2>>]>;

class SystemZQuaternaryInt<string name, LLVMType type>
  : ClangBuiltin<"__builtin_s390_" # name>,
    Intrinsic<[type], [type, type, type, llvm_i32_ty],
    [IntrNoMem, ImmArg<ArgIndex<3>>]>;

class SystemZQuaternaryIntCC<LLVMType type>
  : Intrinsic<[type, llvm_i32_ty], [type, type, type, llvm_i32_ty],
              [IntrNoMem, ImmArg<ArgIndex<3>>]>;

multiclass SystemZUnaryExtBHF<string name> {
  def b : SystemZUnaryConv<name#"b", llvm_v8i16_ty, llvm_v16i8_ty>;
  def h : SystemZUnaryConv<name#"h", llvm_v4i32_ty, llvm_v8i16_ty>;
  def f : SystemZUnaryConv<name#"f", llvm_v2i64_ty, llvm_v4i32_ty>;
}

multiclass SystemZUnaryExtBHWF<string name> {
  def b  : SystemZUnaryConv<name#"b",  llvm_v8i16_ty, llvm_v16i8_ty>;
  def hw : SystemZUnaryConv<name#"hw", llvm_v4i32_ty, llvm_v8i16_ty>;
  def f  : SystemZUnaryConv<name#"f",  llvm_v2i64_ty, llvm_v4i32_ty>;
}

multiclass SystemZUnaryBHF<string name> {
  def b : SystemZUnary<name#"b", llvm_v16i8_ty>;
  def h : SystemZUnary<name#"h", llvm_v8i16_ty>;
  def f : SystemZUnary<name#"f", llvm_v4i32_ty>;
}

multiclass SystemZUnaryBHFG<string name> : SystemZUnaryBHF<name> {
  def g : SystemZUnary<name#"g", llvm_v2i64_ty>;
}

multiclass SystemZUnaryCCBHF {
  def bs : SystemZUnaryCC<llvm_v16i8_ty>;
  def hs : SystemZUnaryCC<llvm_v8i16_ty>;
  def fs : SystemZUnaryCC<llvm_v4i32_ty>;
}

multiclass SystemZBinaryTruncHFG<string name> {
  def h : SystemZBinaryConv<name#"h", llvm_v16i8_ty, llvm_v8i16_ty>;
  def f : SystemZBinaryConv<name#"f", llvm_v8i16_ty, llvm_v4i32_ty>;
  def g : SystemZBinaryConv<name#"g", llvm_v4i32_ty, llvm_v2i64_ty>;
}

multiclass SystemZBinaryTruncCCHFG {
  def hs : SystemZBinaryConvCC<llvm_v16i8_ty, llvm_v8i16_ty>;
  def fs : SystemZBinaryConvCC<llvm_v8i16_ty, llvm_v4i32_ty>;
  def gs : SystemZBinaryConvCC<llvm_v4i32_ty, llvm_v2i64_ty>;
}

multiclass SystemZBinaryExtBHF<string name> {
  def b : SystemZBinaryConv<name#"b", llvm_v8i16_ty, llvm_v16i8_ty>;
  def h : SystemZBinaryConv<name#"h", llvm_v4i32_ty, llvm_v8i16_ty>;
  def f : SystemZBinaryConv<name#"f", llvm_v2i64_ty, llvm_v4i32_ty>;
}

multiclass SystemZBinaryExtBHFG<string name> : SystemZBinaryExtBHF<name> {
  def g : SystemZBinaryConv<name#"g", llvm_v16i8_ty, llvm_v2i64_ty>;
}

multiclass SystemZBinaryBHF<string name> {
  def b : SystemZBinary<name#"b", llvm_v16i8_ty>;
  def h : SystemZBinary<name#"h", llvm_v8i16_ty>;
  def f : SystemZBinary<name#"f", llvm_v4i32_ty>;
}

multiclass SystemZBinaryBHFG<string name> : SystemZBinaryBHF<name> {
  def g : SystemZBinary<name#"g", llvm_v2i64_ty>;
}

multiclass SystemZBinaryIntBHFG<string name> {
  def b : SystemZBinaryInt<name#"b", llvm_v16i8_ty>;
  def h : SystemZBinaryInt<name#"h", llvm_v8i16_ty>;
  def f : SystemZBinaryInt<name#"f", llvm_v4i32_ty>;
  def g : SystemZBinaryInt<name#"g", llvm_v2i64_ty>;
}

multiclass SystemZBinaryCCBHF {
  def bs : SystemZBinaryCC<llvm_v16i8_ty>;
  def hs : SystemZBinaryCC<llvm_v8i16_ty>;
  def fs : SystemZBinaryCC<llvm_v4i32_ty>;
}

multiclass SystemZCompareBHFG {
  def bs : SystemZBinaryCC<llvm_v16i8_ty>;
  def hs : SystemZBinaryCC<llvm_v8i16_ty>;
  def fs : SystemZBinaryCC<llvm_v4i32_ty>;
  def gs : SystemZBinaryCC<llvm_v2i64_ty>;
}

multiclass SystemZTernaryExtBHF<string name> {
  def b : SystemZTernaryConv<name#"b", llvm_v8i16_ty, llvm_v16i8_ty>;
  def h : SystemZTernaryConv<name#"h", llvm_v4i32_ty, llvm_v8i16_ty>;
  def f : SystemZTernaryConv<name#"f", llvm_v2i64_ty, llvm_v4i32_ty>;
}

multiclass SystemZTernaryExtBHFG<string name> : SystemZTernaryExtBHF<name> {
  def g : SystemZTernaryConv<name#"g", llvm_v16i8_ty, llvm_v2i64_ty>;
}

multiclass SystemZTernaryBHF<string name> {
  def b : SystemZTernary<name#"b", llvm_v16i8_ty>;
  def h : SystemZTernary<name#"h", llvm_v8i16_ty>;
  def f : SystemZTernary<name#"f", llvm_v4i32_ty>;
}

multiclass SystemZTernaryIntBHF<string name> {
  def b : SystemZTernaryInt<name#"b", llvm_v16i8_ty>;
  def h : SystemZTernaryInt<name#"h", llvm_v8i16_ty>;
  def f : SystemZTernaryInt<name#"f", llvm_v4i32_ty>;
}

multiclass SystemZTernaryIntCCBHF {
  def bs : SystemZTernaryIntCC<llvm_v16i8_ty>;
  def hs : SystemZTernaryIntCC<llvm_v8i16_ty>;
  def fs : SystemZTernaryIntCC<llvm_v4i32_ty>;
}

multiclass SystemZQuaternaryIntBHF<string name> {
  def b : SystemZQuaternaryInt<name#"b", llvm_v16i8_ty>;
  def h : SystemZQuaternaryInt<name#"h", llvm_v8i16_ty>;
  def f : SystemZQuaternaryInt<name#"f", llvm_v4i32_ty>;
}

multiclass SystemZQuaternaryIntBHFG<string name> :
  SystemZQuaternaryIntBHF<name> {
  def g : SystemZQuaternaryInt<name#"g", llvm_v2i64_ty>;
}

multiclass SystemZQuaternaryIntCCBHF {
  def bs : SystemZQuaternaryIntCC<llvm_v16i8_ty>;
  def hs : SystemZQuaternaryIntCC<llvm_v8i16_ty>;
  def fs : SystemZQuaternaryIntCC<llvm_v4i32_ty>;
}

//===----------------------------------------------------------------------===//
//
// Transactional-execution intrinsics
//
//===----------------------------------------------------------------------===//

let TargetPrefix = "s390" in {
  def int_s390_tbegin : Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty],
                                  [IntrNoDuplicate, IntrWriteMem]>;

  def int_s390_tbegin_nofloat : Intrinsic<[llvm_i32_ty],
                                          [llvm_ptr_ty, llvm_i32_ty],
                                          [IntrNoDuplicate, IntrWriteMem]>;

  def int_s390_tbeginc : Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty],
                                   [IntrNoDuplicate, IntrWriteMem]>;

  def int_s390_tabort : Intrinsic<[], [llvm_i64_ty],
                                  [IntrNoReturn, Throws, IntrWriteMem]>;

  def int_s390_tend : ClangBuiltin<"__builtin_tend">,
                      Intrinsic<[llvm_i32_ty], []>;

  def int_s390_etnd : ClangBuiltin<"__builtin_tx_nesting_depth">,
                      Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>;

  def int_s390_ntstg : Intrinsic<[], [llvm_i64_ty, llvm_ptr_ty],
                                 [IntrArgMemOnly, IntrWriteMem]>;

  def int_s390_ppa_txassist : ClangBuiltin<"__builtin_tx_assist">,
                              Intrinsic<[], [llvm_i32_ty]>;
}

//===----------------------------------------------------------------------===//
//
// Vector intrinsics
//
//===----------------------------------------------------------------------===//

let TargetPrefix = "s390" in {
  def int_s390_lcbb : ClangBuiltin<"__builtin_s390_lcbb">,
                      Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty],
                                [IntrNoMem, ImmArg<ArgIndex<1>>]>;

  def int_s390_vlbb : ClangBuiltin<"__builtin_s390_vlbb">,
                      Intrinsic<[llvm_v16i8_ty], [llvm_ptr_ty, llvm_i32_ty],
                                [IntrReadMem, IntrArgMemOnly, ImmArg<ArgIndex<1>>]>;

  def int_s390_vll : ClangBuiltin<"__builtin_s390_vll">,
                     Intrinsic<[llvm_v16i8_ty], [llvm_i32_ty, llvm_ptr_ty],
                               [IntrReadMem, IntrArgMemOnly]>;

  def int_s390_vpdi : ClangBuiltin<"__builtin_s390_vpdi">,
                      Intrinsic<[llvm_v2i64_ty],
                                [llvm_v2i64_ty, llvm_v2i64_ty, llvm_i32_ty],
                                [IntrNoMem, ImmArg<ArgIndex<2>>]>;

  def int_s390_vperm : ClangBuiltin<"__builtin_s390_vperm">,
                       Intrinsic<[llvm_v16i8_ty],
                                 [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty],
                                 [IntrNoMem]>;

  defm int_s390_vpks : SystemZBinaryTruncHFG<"vpks">;
  defm int_s390_vpks : SystemZBinaryTruncCCHFG;

  defm int_s390_vpkls : SystemZBinaryTruncHFG<"vpkls">;
  defm int_s390_vpkls : SystemZBinaryTruncCCHFG;

  def int_s390_vstl : ClangBuiltin<"__builtin_s390_vstl">,
                      Intrinsic<[], [llvm_v16i8_ty, llvm_i32_ty, llvm_ptr_ty],
                                [IntrArgMemOnly, IntrWriteMem]>;

  defm int_s390_vupl  : SystemZUnaryExtBHWF<"vupl">;
  defm int_s390_vupll : SystemZUnaryExtBHF<"vupll">;

  defm int_s390_vuph  : SystemZUnaryExtBHF<"vuph">;
  defm int_s390_vuplh : SystemZUnaryExtBHF<"vuplh">;

  defm int_s390_vacc : SystemZBinaryBHFG<"vacc">;

  def int_s390_vaq    : SystemZBinary<"vaq",     llvm_v16i8_ty>;
  def int_s390_vacq   : SystemZTernary<"vacq",   llvm_v16i8_ty>;
  def int_s390_vaccq  : SystemZBinary<"vaccq",   llvm_v16i8_ty>;
  def int_s390_vacccq : SystemZTernary<"vacccq", llvm_v16i8_ty>;

  defm int_s390_vavg  : SystemZBinaryBHFG<"vavg">;
  defm int_s390_vavgl : SystemZBinaryBHFG<"vavgl">;

  def int_s390_vcksm : SystemZBinary<"vcksm", llvm_v4i32_ty>;

  defm int_s390_vgfm  : SystemZBinaryExtBHFG<"vgfm">;
  defm int_s390_vgfma : SystemZTernaryExtBHFG<"vgfma">;

  defm int_s390_vmah  : SystemZTernaryBHF<"vmah">;
  defm int_s390_vmalh : SystemZTernaryBHF<"vmalh">;
  defm int_s390_vmae  : SystemZTernaryExtBHF<"vmae">;
  defm int_s390_vmale : SystemZTernaryExtBHF<"vmale">;
  defm int_s390_vmao  : SystemZTernaryExtBHF<"vmao">;
  defm int_s390_vmalo : SystemZTernaryExtBHF<"vmalo">;

  defm int_s390_vmh  : SystemZBinaryBHF<"vmh">;
  defm int_s390_vmlh : SystemZBinaryBHF<"vmlh">;
  defm int_s390_vme  : SystemZBinaryExtBHF<"vme">;
  defm int_s390_vmle : SystemZBinaryExtBHF<"vmle">;
  defm int_s390_vmo  : SystemZBinaryExtBHF<"vmo">;
  defm int_s390_vmlo : SystemZBinaryExtBHF<"vmlo">;

  defm int_s390_verllv : SystemZBinaryBHFG<"verllv">;
  defm int_s390_verll  : SystemZBinaryIntBHFG<"verll">;
  defm int_s390_verim  : SystemZQuaternaryIntBHFG<"verim">;

  def int_s390_vsl   : SystemZBinary<"vsl",   llvm_v16i8_ty>;
  def int_s390_vslb  : SystemZBinary<"vslb",  llvm_v16i8_ty>;
  def int_s390_vsra  : SystemZBinary<"vsra",  llvm_v16i8_ty>;
  def int_s390_vsrab : SystemZBinary<"vsrab", llvm_v16i8_ty>;
  def int_s390_vsrl  : SystemZBinary<"vsrl",  llvm_v16i8_ty>;
  def int_s390_vsrlb : SystemZBinary<"vsrlb", llvm_v16i8_ty>;

  def int_s390_vsldb : ClangBuiltin<"__builtin_s390_vsldb">,
                       Intrinsic<[llvm_v16i8_ty],
                                 [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty],
                                 [IntrNoMem, ImmArg<ArgIndex<2>>]>;

  defm int_s390_vscbi : SystemZBinaryBHFG<"vscbi">;

  def int_s390_vsq     : SystemZBinary<"vsq",      llvm_v16i8_ty>;
  def int_s390_vsbiq   : SystemZTernary<"vsbiq",   llvm_v16i8_ty>;
  def int_s390_vscbiq  : SystemZBinary<"vscbiq",   llvm_v16i8_ty>;
  def int_s390_vsbcbiq : SystemZTernary<"vsbcbiq", llvm_v16i8_ty>;

  def int_s390_vsumb : SystemZBinaryConv<"vsumb", llvm_v4i32_ty, llvm_v16i8_ty>;
  def int_s390_vsumh : SystemZBinaryConv<"vsumh", llvm_v4i32_ty, llvm_v8i16_ty>;

  def int_s390_vsumgh : SystemZBinaryConv<"vsumgh", llvm_v2i64_ty,
                                          llvm_v8i16_ty>;
  def int_s390_vsumgf : SystemZBinaryConv<"vsumgf", llvm_v2i64_ty,
                                          llvm_v4i32_ty>;

  def int_s390_vsumqf : SystemZBinaryConv<"vsumqf", llvm_v16i8_ty,
                                          llvm_v4i32_ty>;
  def int_s390_vsumqg : SystemZBinaryConv<"vsumqg", llvm_v16i8_ty,
                                          llvm_v2i64_ty>;

  def int_s390_vtm : SystemZBinaryConv<"vtm", llvm_i32_ty, llvm_v16i8_ty>;

  defm int_s390_vceq : SystemZCompareBHFG;
  defm int_s390_vch  : SystemZCompareBHFG;
  defm int_s390_vchl : SystemZCompareBHFG;

  defm int_s390_vfae  : SystemZTernaryIntBHF<"vfae">;
  defm int_s390_vfae  : SystemZTernaryIntCCBHF;
  defm int_s390_vfaez : SystemZTernaryIntBHF<"vfaez">;
  defm int_s390_vfaez : SystemZTernaryIntCCBHF;

  defm int_s390_vfee  : SystemZBinaryBHF<"vfee">;
  defm int_s390_vfee  : SystemZBinaryCCBHF;
  defm int_s390_vfeez : SystemZBinaryBHF<"vfeez">;
  defm int_s390_vfeez : SystemZBinaryCCBHF;

  defm int_s390_vfene  : SystemZBinaryBHF<"vfene">;
  defm int_s390_vfene  : SystemZBinaryCCBHF;
  defm int_s390_vfenez : SystemZBinaryBHF<"vfenez">;
  defm int_s390_vfenez : SystemZBinaryCCBHF;

  defm int_s390_vistr : SystemZUnaryBHF<"vistr">;
  defm int_s390_vistr : SystemZUnaryCCBHF;

  defm int_s390_vstrc  : SystemZQuaternaryIntBHF<"vstrc">;
  defm int_s390_vstrc  : SystemZQuaternaryIntCCBHF;
  defm int_s390_vstrcz : SystemZQuaternaryIntBHF<"vstrcz">;
  defm int_s390_vstrcz : SystemZQuaternaryIntCCBHF;

  def int_s390_vfcedbs  : SystemZBinaryConvCC<llvm_v2i64_ty, llvm_v2f64_ty>;
  def int_s390_vfchdbs  : SystemZBinaryConvCC<llvm_v2i64_ty, llvm_v2f64_ty>;
  def int_s390_vfchedbs : SystemZBinaryConvCC<llvm_v2i64_ty, llvm_v2f64_ty>;

  def int_s390_vftcidb : SystemZBinaryConvIntCC<llvm_v2i64_ty, llvm_v2f64_ty>;

  def int_s390_vfidb : Intrinsic<[llvm_v2f64_ty],
                                 [llvm_v2f64_ty, llvm_i32_ty, llvm_i32_ty],
                                 [IntrNoMem, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>]>;

  // Instructions from the Vector Enhancements Facility 1
  def int_s390_vbperm : SystemZBinaryConv<"vbperm", llvm_v2i64_ty,
                                          llvm_v16i8_ty>;

  def int_s390_vmslg  : ClangBuiltin<"__builtin_s390_vmslg">,
                        Intrinsic<[llvm_v16i8_ty],
                                  [llvm_v2i64_ty, llvm_v2i64_ty, llvm_v16i8_ty,
                                   llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<3>>]>;

  def int_s390_vfmaxdb : Intrinsic<[llvm_v2f64_ty],
                                   [llvm_v2f64_ty, llvm_v2f64_ty, llvm_i32_ty],
                                   [IntrNoMem, ImmArg<ArgIndex<2>>]>;
  def int_s390_vfmindb : Intrinsic<[llvm_v2f64_ty],
                                   [llvm_v2f64_ty, llvm_v2f64_ty, llvm_i32_ty],
                                   [IntrNoMem, ImmArg<ArgIndex<2>>]>;
  def int_s390_vfmaxsb : Intrinsic<[llvm_v4f32_ty],
                                   [llvm_v4f32_ty, llvm_v4f32_ty, llvm_i32_ty],
                                   [IntrNoMem, ImmArg<ArgIndex<2>>]>;
  def int_s390_vfminsb : Intrinsic<[llvm_v4f32_ty],
                                   [llvm_v4f32_ty, llvm_v4f32_ty, llvm_i32_ty],
                                   [IntrNoMem, ImmArg<ArgIndex<2>>]>;

  def int_s390_vfcesbs  : SystemZBinaryConvCC<llvm_v4i32_ty, llvm_v4f32_ty>;
  def int_s390_vfchsbs  : SystemZBinaryConvCC<llvm_v4i32_ty, llvm_v4f32_ty>;
  def int_s390_vfchesbs : SystemZBinaryConvCC<llvm_v4i32_ty, llvm_v4f32_ty>;

  def int_s390_vftcisb : SystemZBinaryConvIntCC<llvm_v4i32_ty, llvm_v4f32_ty>;

  def int_s390_vfisb : Intrinsic<[llvm_v4f32_ty],
                                 [llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty],
                                 [IntrNoMem, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>]>;

  // Instructions from the Vector Packed Decimal Facility
  def int_s390_vlrl : ClangBuiltin<"__builtin_s390_vlrl">,
                      Intrinsic<[llvm_v16i8_ty], [llvm_i32_ty, llvm_ptr_ty],
                                [IntrReadMem, IntrArgMemOnly]>;

  def int_s390_vstrl : ClangBuiltin<"__builtin_s390_vstrl">,
                       Intrinsic<[], [llvm_v16i8_ty, llvm_i32_ty, llvm_ptr_ty],
                                 [IntrArgMemOnly, IntrWriteMem]>;

  // Instructions from the Vector Enhancements Facility 2
  def int_s390_vsld : ClangBuiltin<"__builtin_s390_vsld">,
                      Intrinsic<[llvm_v16i8_ty],
                                [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty],
                                [IntrNoMem, ImmArg<ArgIndex<2>>]>;

  def int_s390_vsrd : ClangBuiltin<"__builtin_s390_vsrd">,
                      Intrinsic<[llvm_v16i8_ty],
                                [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty],
                                [IntrNoMem, ImmArg<ArgIndex<2>>]>;

  def int_s390_vstrsb : SystemZTernaryConvCC<llvm_v16i8_ty, llvm_v16i8_ty>;
  def int_s390_vstrsh : SystemZTernaryConvCC<llvm_v16i8_ty, llvm_v8i16_ty>;
  def int_s390_vstrsf : SystemZTernaryConvCC<llvm_v16i8_ty, llvm_v4i32_ty>;
  def int_s390_vstrszb : SystemZTernaryConvCC<llvm_v16i8_ty, llvm_v16i8_ty>;
  def int_s390_vstrszh : SystemZTernaryConvCC<llvm_v16i8_ty, llvm_v8i16_ty>;
  def int_s390_vstrszf : SystemZTernaryConvCC<llvm_v16i8_ty, llvm_v4i32_ty>;

  // Instructions from the NNP-assist Facility
  def int_s390_vclfnhs : ClangBuiltin<"__builtin_s390_vclfnhs">,
                         Intrinsic<[llvm_v4f32_ty],
                                   [llvm_v8i16_ty, llvm_i32_ty],
                                   [IntrNoMem, ImmArg<ArgIndex<1>>]>;
  def int_s390_vclfnls : ClangBuiltin<"__builtin_s390_vclfnls">,
                         Intrinsic<[llvm_v4f32_ty],
                                   [llvm_v8i16_ty, llvm_i32_ty],
                                   [IntrNoMem, ImmArg<ArgIndex<1>>]>;
  def int_s390_vcrnfs : ClangBuiltin<"__builtin_s390_vcrnfs">,
                        Intrinsic<[llvm_v8i16_ty],
                                  [llvm_v4f32_ty, llvm_v4f32_ty, llvm_i32_ty],
                                  [IntrNoMem, ImmArg<ArgIndex<2>>]>;
  def int_s390_vcfn : ClangBuiltin<"__builtin_s390_vcfn">,
                      Intrinsic<[llvm_v8i16_ty],
                                [llvm_v8i16_ty, llvm_i32_ty],
                                [IntrNoMem, ImmArg<ArgIndex<1>>]>;
  def int_s390_vcnf : ClangBuiltin<"__builtin_s390_vcnf">,
                      Intrinsic<[llvm_v8i16_ty],
                                [llvm_v8i16_ty, llvm_i32_ty],
                                [IntrNoMem, ImmArg<ArgIndex<1>>]>;
}

//===----------------------------------------------------------------------===//
//
// Misc intrinsics
//
//===----------------------------------------------------------------------===//

let TargetPrefix = "s390" in {
  def int_s390_sfpc : ClangBuiltin<"__builtin_s390_sfpc">,
                      Intrinsic<[], [llvm_i32_ty], []>;
  def int_s390_efpc : ClangBuiltin<"__builtin_s390_efpc">,
                      Intrinsic<[llvm_i32_ty], [], []>;

  def int_s390_tdc : Intrinsic<[llvm_i32_ty], [llvm_anyfloat_ty, llvm_i64_ty],
                               [IntrNoMem]>;
}
PKjwFZ<_�i0�0�IR/IntrinsicsAArch64.hnu�[���/*===- TableGen'erated file -------------------------------------*- C++ -*-===*\
|*                                                                            *|
|* Intrinsic Function Source Fragment                                         *|
|*                                                                            *|
|* Automatically generated file, do not edit!                                 *|
|*                                                                            *|
\*===----------------------------------------------------------------------===*/

#ifndef LLVM_IR_INTRINSIC_AARCH64_ENUMS_H
#define LLVM_IR_INTRINSIC_AARCH64_ENUMS_H

namespace llvm {
namespace Intrinsic {
enum AARCH64Intrinsics : unsigned {
// Enum values for intrinsics
    aarch64_addg = 428,                              // llvm.aarch64.addg
    aarch64_break,                             // llvm.aarch64.break
    aarch64_clrex,                             // llvm.aarch64.clrex
    aarch64_cls,                               // llvm.aarch64.cls
    aarch64_cls64,                             // llvm.aarch64.cls64
    aarch64_crc32b,                            // llvm.aarch64.crc32b
    aarch64_crc32cb,                           // llvm.aarch64.crc32cb
    aarch64_crc32ch,                           // llvm.aarch64.crc32ch
    aarch64_crc32cw,                           // llvm.aarch64.crc32cw
    aarch64_crc32cx,                           // llvm.aarch64.crc32cx
    aarch64_crc32h,                            // llvm.aarch64.crc32h
    aarch64_crc32w,                            // llvm.aarch64.crc32w
    aarch64_crc32x,                            // llvm.aarch64.crc32x
    aarch64_crypto_aesd,                       // llvm.aarch64.crypto.aesd
    aarch64_crypto_aese,                       // llvm.aarch64.crypto.aese
    aarch64_crypto_aesimc,                     // llvm.aarch64.crypto.aesimc
    aarch64_crypto_aesmc,                      // llvm.aarch64.crypto.aesmc
    aarch64_crypto_bcaxs,                      // llvm.aarch64.crypto.bcaxs
    aarch64_crypto_bcaxu,                      // llvm.aarch64.crypto.bcaxu
    aarch64_crypto_eor3s,                      // llvm.aarch64.crypto.eor3s
    aarch64_crypto_eor3u,                      // llvm.aarch64.crypto.eor3u
    aarch64_crypto_rax1,                       // llvm.aarch64.crypto.rax1
    aarch64_crypto_sha1c,                      // llvm.aarch64.crypto.sha1c
    aarch64_crypto_sha1h,                      // llvm.aarch64.crypto.sha1h
    aarch64_crypto_sha1m,                      // llvm.aarch64.crypto.sha1m
    aarch64_crypto_sha1p,                      // llvm.aarch64.crypto.sha1p
    aarch64_crypto_sha1su0,                    // llvm.aarch64.crypto.sha1su0
    aarch64_crypto_sha1su1,                    // llvm.aarch64.crypto.sha1su1
    aarch64_crypto_sha256h,                    // llvm.aarch64.crypto.sha256h
    aarch64_crypto_sha256h2,                   // llvm.aarch64.crypto.sha256h2
    aarch64_crypto_sha256su0,                  // llvm.aarch64.crypto.sha256su0
    aarch64_crypto_sha256su1,                  // llvm.aarch64.crypto.sha256su1
    aarch64_crypto_sha512h,                    // llvm.aarch64.crypto.sha512h
    aarch64_crypto_sha512h2,                   // llvm.aarch64.crypto.sha512h2
    aarch64_crypto_sha512su0,                  // llvm.aarch64.crypto.sha512su0
    aarch64_crypto_sha512su1,                  // llvm.aarch64.crypto.sha512su1
    aarch64_crypto_sm3partw1,                  // llvm.aarch64.crypto.sm3partw1
    aarch64_crypto_sm3partw2,                  // llvm.aarch64.crypto.sm3partw2
    aarch64_crypto_sm3ss1,                     // llvm.aarch64.crypto.sm3ss1
    aarch64_crypto_sm3tt1a,                    // llvm.aarch64.crypto.sm3tt1a
    aarch64_crypto_sm3tt1b,                    // llvm.aarch64.crypto.sm3tt1b
    aarch64_crypto_sm3tt2a,                    // llvm.aarch64.crypto.sm3tt2a
    aarch64_crypto_sm3tt2b,                    // llvm.aarch64.crypto.sm3tt2b
    aarch64_crypto_sm4e,                       // llvm.aarch64.crypto.sm4e
    aarch64_crypto_sm4ekey,                    // llvm.aarch64.crypto.sm4ekey
    aarch64_crypto_xar,                        // llvm.aarch64.crypto.xar
    aarch64_dmb,                               // llvm.aarch64.dmb
    aarch64_dsb,                               // llvm.aarch64.dsb
    aarch64_fjcvtzs,                           // llvm.aarch64.fjcvtzs
    aarch64_frint32x,                          // llvm.aarch64.frint32x
    aarch64_frint32z,                          // llvm.aarch64.frint32z
    aarch64_frint64x,                          // llvm.aarch64.frint64x
    aarch64_frint64z,                          // llvm.aarch64.frint64z
    aarch64_get_fpcr,                          // llvm.aarch64.get.fpcr
    aarch64_gmi,                               // llvm.aarch64.gmi
    aarch64_hint,                              // llvm.aarch64.hint
    aarch64_irg,                               // llvm.aarch64.irg
    aarch64_irg_sp,                            // llvm.aarch64.irg.sp
    aarch64_isb,                               // llvm.aarch64.isb
    aarch64_ld64b,                             // llvm.aarch64.ld64b
    aarch64_ldaxp,                             // llvm.aarch64.ldaxp
    aarch64_ldaxr,                             // llvm.aarch64.ldaxr
    aarch64_ldg,                               // llvm.aarch64.ldg
    aarch64_ldxp,                              // llvm.aarch64.ldxp
    aarch64_ldxr,                              // llvm.aarch64.ldxr
    aarch64_mops_memset_tag,                   // llvm.aarch64.mops.memset.tag
    aarch64_neon_abs,                          // llvm.aarch64.neon.abs
    aarch64_neon_addhn,                        // llvm.aarch64.neon.addhn
    aarch64_neon_addp,                         // llvm.aarch64.neon.addp
    aarch64_neon_bfcvt,                        // llvm.aarch64.neon.bfcvt
    aarch64_neon_bfcvtn,                       // llvm.aarch64.neon.bfcvtn
    aarch64_neon_bfcvtn2,                      // llvm.aarch64.neon.bfcvtn2
    aarch64_neon_bfdot,                        // llvm.aarch64.neon.bfdot
    aarch64_neon_bfmlalb,                      // llvm.aarch64.neon.bfmlalb
    aarch64_neon_bfmlalt,                      // llvm.aarch64.neon.bfmlalt
    aarch64_neon_bfmmla,                       // llvm.aarch64.neon.bfmmla
    aarch64_neon_cls,                          // llvm.aarch64.neon.cls
    aarch64_neon_fabd,                         // llvm.aarch64.neon.fabd
    aarch64_neon_facge,                        // llvm.aarch64.neon.facge
    aarch64_neon_facgt,                        // llvm.aarch64.neon.facgt
    aarch64_neon_faddp,                        // llvm.aarch64.neon.faddp
    aarch64_neon_faddv,                        // llvm.aarch64.neon.faddv
    aarch64_neon_fcvtas,                       // llvm.aarch64.neon.fcvtas
    aarch64_neon_fcvtau,                       // llvm.aarch64.neon.fcvtau
    aarch64_neon_fcvtms,                       // llvm.aarch64.neon.fcvtms
    aarch64_neon_fcvtmu,                       // llvm.aarch64.neon.fcvtmu
    aarch64_neon_fcvtns,                       // llvm.aarch64.neon.fcvtns
    aarch64_neon_fcvtnu,                       // llvm.aarch64.neon.fcvtnu
    aarch64_neon_fcvtps,                       // llvm.aarch64.neon.fcvtps
    aarch64_neon_fcvtpu,                       // llvm.aarch64.neon.fcvtpu
    aarch64_neon_fcvtxn,                       // llvm.aarch64.neon.fcvtxn
    aarch64_neon_fcvtzs,                       // llvm.aarch64.neon.fcvtzs
    aarch64_neon_fcvtzu,                       // llvm.aarch64.neon.fcvtzu
    aarch64_neon_fmax,                         // llvm.aarch64.neon.fmax
    aarch64_neon_fmaxnm,                       // llvm.aarch64.neon.fmaxnm
    aarch64_neon_fmaxnmp,                      // llvm.aarch64.neon.fmaxnmp
    aarch64_neon_fmaxnmv,                      // llvm.aarch64.neon.fmaxnmv
    aarch64_neon_fmaxp,                        // llvm.aarch64.neon.fmaxp
    aarch64_neon_fmaxv,                        // llvm.aarch64.neon.fmaxv
    aarch64_neon_fmin,                         // llvm.aarch64.neon.fmin
    aarch64_neon_fminnm,                       // llvm.aarch64.neon.fminnm
    aarch64_neon_fminnmp,                      // llvm.aarch64.neon.fminnmp
    aarch64_neon_fminnmv,                      // llvm.aarch64.neon.fminnmv
    aarch64_neon_fminp,                        // llvm.aarch64.neon.fminp
    aarch64_neon_fminv,                        // llvm.aarch64.neon.fminv
    aarch64_neon_fmlal,                        // llvm.aarch64.neon.fmlal
    aarch64_neon_fmlal2,                       // llvm.aarch64.neon.fmlal2
    aarch64_neon_fmlsl,                        // llvm.aarch64.neon.fmlsl
    aarch64_neon_fmlsl2,                       // llvm.aarch64.neon.fmlsl2
    aarch64_neon_fmulx,                        // llvm.aarch64.neon.fmulx
    aarch64_neon_frecpe,                       // llvm.aarch64.neon.frecpe
    aarch64_neon_frecps,                       // llvm.aarch64.neon.frecps
    aarch64_neon_frecpx,                       // llvm.aarch64.neon.frecpx
    aarch64_neon_frint32x,                     // llvm.aarch64.neon.frint32x
    aarch64_neon_frint32z,                     // llvm.aarch64.neon.frint32z
    aarch64_neon_frint64x,                     // llvm.aarch64.neon.frint64x
    aarch64_neon_frint64z,                     // llvm.aarch64.neon.frint64z
    aarch64_neon_frsqrte,                      // llvm.aarch64.neon.frsqrte
    aarch64_neon_frsqrts,                      // llvm.aarch64.neon.frsqrts
    aarch64_neon_ld1x2,                        // llvm.aarch64.neon.ld1x2
    aarch64_neon_ld1x3,                        // llvm.aarch64.neon.ld1x3
    aarch64_neon_ld1x4,                        // llvm.aarch64.neon.ld1x4
    aarch64_neon_ld2,                          // llvm.aarch64.neon.ld2
    aarch64_neon_ld2lane,                      // llvm.aarch64.neon.ld2lane
    aarch64_neon_ld2r,                         // llvm.aarch64.neon.ld2r
    aarch64_neon_ld3,                          // llvm.aarch64.neon.ld3
    aarch64_neon_ld3lane,                      // llvm.aarch64.neon.ld3lane
    aarch64_neon_ld3r,                         // llvm.aarch64.neon.ld3r
    aarch64_neon_ld4,                          // llvm.aarch64.neon.ld4
    aarch64_neon_ld4lane,                      // llvm.aarch64.neon.ld4lane
    aarch64_neon_ld4r,                         // llvm.aarch64.neon.ld4r
    aarch64_neon_pmul,                         // llvm.aarch64.neon.pmul
    aarch64_neon_pmull,                        // llvm.aarch64.neon.pmull
    aarch64_neon_pmull64,                      // llvm.aarch64.neon.pmull64
    aarch64_neon_raddhn,                       // llvm.aarch64.neon.raddhn
    aarch64_neon_rshrn,                        // llvm.aarch64.neon.rshrn
    aarch64_neon_rsubhn,                       // llvm.aarch64.neon.rsubhn
    aarch64_neon_sabd,                         // llvm.aarch64.neon.sabd
    aarch64_neon_saddlp,                       // llvm.aarch64.neon.saddlp
    aarch64_neon_saddlv,                       // llvm.aarch64.neon.saddlv
    aarch64_neon_saddv,                        // llvm.aarch64.neon.saddv
    aarch64_neon_scalar_sqxtn,                 // llvm.aarch64.neon.scalar.sqxtn
    aarch64_neon_scalar_sqxtun,                // llvm.aarch64.neon.scalar.sqxtun
    aarch64_neon_scalar_uqxtn,                 // llvm.aarch64.neon.scalar.uqxtn
    aarch64_neon_sdot,                         // llvm.aarch64.neon.sdot
    aarch64_neon_shadd,                        // llvm.aarch64.neon.shadd
    aarch64_neon_shll,                         // llvm.aarch64.neon.shll
    aarch64_neon_shsub,                        // llvm.aarch64.neon.shsub
    aarch64_neon_smax,                         // llvm.aarch64.neon.smax
    aarch64_neon_smaxp,                        // llvm.aarch64.neon.smaxp
    aarch64_neon_smaxv,                        // llvm.aarch64.neon.smaxv
    aarch64_neon_smin,                         // llvm.aarch64.neon.smin
    aarch64_neon_sminp,                        // llvm.aarch64.neon.sminp
    aarch64_neon_sminv,                        // llvm.aarch64.neon.sminv
    aarch64_neon_smmla,                        // llvm.aarch64.neon.smmla
    aarch64_neon_smull,                        // llvm.aarch64.neon.smull
    aarch64_neon_sqabs,                        // llvm.aarch64.neon.sqabs
    aarch64_neon_sqadd,                        // llvm.aarch64.neon.sqadd
    aarch64_neon_sqdmulh,                      // llvm.aarch64.neon.sqdmulh
    aarch64_neon_sqdmulh_lane,                 // llvm.aarch64.neon.sqdmulh.lane
    aarch64_neon_sqdmulh_laneq,                // llvm.aarch64.neon.sqdmulh.laneq
    aarch64_neon_sqdmull,                      // llvm.aarch64.neon.sqdmull
    aarch64_neon_sqdmulls_scalar,              // llvm.aarch64.neon.sqdmulls.scalar
    aarch64_neon_sqneg,                        // llvm.aarch64.neon.sqneg
    aarch64_neon_sqrdmlah,                     // llvm.aarch64.neon.sqrdmlah
    aarch64_neon_sqrdmlsh,                     // llvm.aarch64.neon.sqrdmlsh
    aarch64_neon_sqrdmulh,                     // llvm.aarch64.neon.sqrdmulh
    aarch64_neon_sqrdmulh_lane,                // llvm.aarch64.neon.sqrdmulh.lane
    aarch64_neon_sqrdmulh_laneq,               // llvm.aarch64.neon.sqrdmulh.laneq
    aarch64_neon_sqrshl,                       // llvm.aarch64.neon.sqrshl
    aarch64_neon_sqrshrn,                      // llvm.aarch64.neon.sqrshrn
    aarch64_neon_sqrshrun,                     // llvm.aarch64.neon.sqrshrun
    aarch64_neon_sqshl,                        // llvm.aarch64.neon.sqshl
    aarch64_neon_sqshlu,                       // llvm.aarch64.neon.sqshlu
    aarch64_neon_sqshrn,                       // llvm.aarch64.neon.sqshrn
    aarch64_neon_sqshrun,                      // llvm.aarch64.neon.sqshrun
    aarch64_neon_sqsub,                        // llvm.aarch64.neon.sqsub
    aarch64_neon_sqxtn,                        // llvm.aarch64.neon.sqxtn
    aarch64_neon_sqxtun,                       // llvm.aarch64.neon.sqxtun
    aarch64_neon_srhadd,                       // llvm.aarch64.neon.srhadd
    aarch64_neon_srshl,                        // llvm.aarch64.neon.srshl
    aarch64_neon_sshl,                         // llvm.aarch64.neon.sshl
    aarch64_neon_sshll,                        // llvm.aarch64.neon.sshll
    aarch64_neon_st1x2,                        // llvm.aarch64.neon.st1x2
    aarch64_neon_st1x3,                        // llvm.aarch64.neon.st1x3
    aarch64_neon_st1x4,                        // llvm.aarch64.neon.st1x4
    aarch64_neon_st2,                          // llvm.aarch64.neon.st2
    aarch64_neon_st2lane,                      // llvm.aarch64.neon.st2lane
    aarch64_neon_st3,                          // llvm.aarch64.neon.st3
    aarch64_neon_st3lane,                      // llvm.aarch64.neon.st3lane
    aarch64_neon_st4,                          // llvm.aarch64.neon.st4
    aarch64_neon_st4lane,                      // llvm.aarch64.neon.st4lane
    aarch64_neon_subhn,                        // llvm.aarch64.neon.subhn
    aarch64_neon_suqadd,                       // llvm.aarch64.neon.suqadd
    aarch64_neon_tbl1,                         // llvm.aarch64.neon.tbl1
    aarch64_neon_tbl2,                         // llvm.aarch64.neon.tbl2
    aarch64_neon_tbl3,                         // llvm.aarch64.neon.tbl3
    aarch64_neon_tbl4,                         // llvm.aarch64.neon.tbl4
    aarch64_neon_tbx1,                         // llvm.aarch64.neon.tbx1
    aarch64_neon_tbx2,                         // llvm.aarch64.neon.tbx2
    aarch64_neon_tbx3,                         // llvm.aarch64.neon.tbx3
    aarch64_neon_tbx4,                         // llvm.aarch64.neon.tbx4
    aarch64_neon_uabd,                         // llvm.aarch64.neon.uabd
    aarch64_neon_uaddlp,                       // llvm.aarch64.neon.uaddlp
    aarch64_neon_uaddlv,                       // llvm.aarch64.neon.uaddlv
    aarch64_neon_uaddv,                        // llvm.aarch64.neon.uaddv
    aarch64_neon_udot,                         // llvm.aarch64.neon.udot
    aarch64_neon_uhadd,                        // llvm.aarch64.neon.uhadd
    aarch64_neon_uhsub,                        // llvm.aarch64.neon.uhsub
    aarch64_neon_umax,                         // llvm.aarch64.neon.umax
    aarch64_neon_umaxp,                        // llvm.aarch64.neon.umaxp
    aarch64_neon_umaxv,                        // llvm.aarch64.neon.umaxv
    aarch64_neon_umin,                         // llvm.aarch64.neon.umin
    aarch64_neon_uminp,                        // llvm.aarch64.neon.uminp
    aarch64_neon_uminv,                        // llvm.aarch64.neon.uminv
    aarch64_neon_ummla,                        // llvm.aarch64.neon.ummla
    aarch64_neon_umull,                        // llvm.aarch64.neon.umull
    aarch64_neon_uqadd,                        // llvm.aarch64.neon.uqadd
    aarch64_neon_uqrshl,                       // llvm.aarch64.neon.uqrshl
    aarch64_neon_uqrshrn,                      // llvm.aarch64.neon.uqrshrn
    aarch64_neon_uqshl,                        // llvm.aarch64.neon.uqshl
    aarch64_neon_uqshrn,                       // llvm.aarch64.neon.uqshrn
    aarch64_neon_uqsub,                        // llvm.aarch64.neon.uqsub
    aarch64_neon_uqxtn,                        // llvm.aarch64.neon.uqxtn
    aarch64_neon_urecpe,                       // llvm.aarch64.neon.urecpe
    aarch64_neon_urhadd,                       // llvm.aarch64.neon.urhadd
    aarch64_neon_urshl,                        // llvm.aarch64.neon.urshl
    aarch64_neon_ursqrte,                      // llvm.aarch64.neon.ursqrte
    aarch64_neon_usdot,                        // llvm.aarch64.neon.usdot
    aarch64_neon_ushl,                         // llvm.aarch64.neon.ushl
    aarch64_neon_ushll,                        // llvm.aarch64.neon.ushll
    aarch64_neon_usmmla,                       // llvm.aarch64.neon.usmmla
    aarch64_neon_usqadd,                       // llvm.aarch64.neon.usqadd
    aarch64_neon_vcadd_rot270,                 // llvm.aarch64.neon.vcadd.rot270
    aarch64_neon_vcadd_rot90,                  // llvm.aarch64.neon.vcadd.rot90
    aarch64_neon_vcmla_rot0,                   // llvm.aarch64.neon.vcmla.rot0
    aarch64_neon_vcmla_rot180,                 // llvm.aarch64.neon.vcmla.rot180
    aarch64_neon_vcmla_rot270,                 // llvm.aarch64.neon.vcmla.rot270
    aarch64_neon_vcmla_rot90,                  // llvm.aarch64.neon.vcmla.rot90
    aarch64_neon_vcopy_lane,                   // llvm.aarch64.neon.vcopy.lane
    aarch64_neon_vcvtfp2fxs,                   // llvm.aarch64.neon.vcvtfp2fxs
    aarch64_neon_vcvtfp2fxu,                   // llvm.aarch64.neon.vcvtfp2fxu
    aarch64_neon_vcvtfp2hf,                    // llvm.aarch64.neon.vcvtfp2hf
    aarch64_neon_vcvtfxs2fp,                   // llvm.aarch64.neon.vcvtfxs2fp
    aarch64_neon_vcvtfxu2fp,                   // llvm.aarch64.neon.vcvtfxu2fp
    aarch64_neon_vcvthf2fp,                    // llvm.aarch64.neon.vcvthf2fp
    aarch64_neon_vsli,                         // llvm.aarch64.neon.vsli
    aarch64_neon_vsri,                         // llvm.aarch64.neon.vsri
    aarch64_prefetch,                          // llvm.aarch64.prefetch
    aarch64_rndr,                              // llvm.aarch64.rndr
    aarch64_rndrrs,                            // llvm.aarch64.rndrrs
    aarch64_sdiv,                              // llvm.aarch64.sdiv
    aarch64_set_fpcr,                          // llvm.aarch64.set.fpcr
    aarch64_settag,                            // llvm.aarch64.settag
    aarch64_settag_zero,                       // llvm.aarch64.settag.zero
    aarch64_sisd_fabd,                         // llvm.aarch64.sisd.fabd
    aarch64_sisd_fcvtxn,                       // llvm.aarch64.sisd.fcvtxn
    aarch64_sme_add_write_single_za_vg1x2,     // llvm.aarch64.sme.add.write.single.za.vg1x2
    aarch64_sme_add_write_single_za_vg1x4,     // llvm.aarch64.sme.add.write.single.za.vg1x4
    aarch64_sme_add_write_za_vg1x2,            // llvm.aarch64.sme.add.write.za.vg1x2
    aarch64_sme_add_write_za_vg1x4,            // llvm.aarch64.sme.add.write.za.vg1x4
    aarch64_sme_add_za32_vg1x2,                // llvm.aarch64.sme.add.za32.vg1x2
    aarch64_sme_add_za32_vg1x4,                // llvm.aarch64.sme.add.za32.vg1x4
    aarch64_sme_add_za64_vg1x2,                // llvm.aarch64.sme.add.za64.vg1x2
    aarch64_sme_add_za64_vg1x4,                // llvm.aarch64.sme.add.za64.vg1x4
    aarch64_sme_addha,                         // llvm.aarch64.sme.addha
    aarch64_sme_addva,                         // llvm.aarch64.sme.addva
    aarch64_sme_bmopa_za32,                    // llvm.aarch64.sme.bmopa.za32
    aarch64_sme_bmops_za32,                    // llvm.aarch64.sme.bmops.za32
    aarch64_sme_cntsb,                         // llvm.aarch64.sme.cntsb
    aarch64_sme_cntsd,                         // llvm.aarch64.sme.cntsd
    aarch64_sme_cntsh,                         // llvm.aarch64.sme.cntsh
    aarch64_sme_cntsw,                         // llvm.aarch64.sme.cntsw
    aarch64_sme_fdot_lane_za32_vg1x2,          // llvm.aarch64.sme.fdot.lane.za32.vg1x2
    aarch64_sme_fdot_lane_za32_vg1x4,          // llvm.aarch64.sme.fdot.lane.za32.vg1x4
    aarch64_sme_fdot_single_za32_vg1x2,        // llvm.aarch64.sme.fdot.single.za32.vg1x2
    aarch64_sme_fdot_single_za32_vg1x4,        // llvm.aarch64.sme.fdot.single.za32.vg1x4
    aarch64_sme_fdot_za32_vg1x2,               // llvm.aarch64.sme.fdot.za32.vg1x2
    aarch64_sme_fdot_za32_vg1x4,               // llvm.aarch64.sme.fdot.za32.vg1x4
    aarch64_sme_fmla_lane_vg1x2,               // llvm.aarch64.sme.fmla.lane.vg1x2
    aarch64_sme_fmla_lane_vg1x4,               // llvm.aarch64.sme.fmla.lane.vg1x4
    aarch64_sme_fmla_single_vg1x2,             // llvm.aarch64.sme.fmla.single.vg1x2
    aarch64_sme_fmla_single_vg1x4,             // llvm.aarch64.sme.fmla.single.vg1x4
    aarch64_sme_fmla_vg1x2,                    // llvm.aarch64.sme.fmla.vg1x2
    aarch64_sme_fmla_vg1x4,                    // llvm.aarch64.sme.fmla.vg1x4
    aarch64_sme_fmlal_lane_vg2x1,              // llvm.aarch64.sme.fmlal.lane.vg2x1
    aarch64_sme_fmlal_lane_vg2x2,              // llvm.aarch64.sme.fmlal.lane.vg2x2
    aarch64_sme_fmlal_lane_vg2x4,              // llvm.aarch64.sme.fmlal.lane.vg2x4
    aarch64_sme_fmlal_single_vg2x1,            // llvm.aarch64.sme.fmlal.single.vg2x1
    aarch64_sme_fmlal_single_vg2x2,            // llvm.aarch64.sme.fmlal.single.vg2x2
    aarch64_sme_fmlal_single_vg2x4,            // llvm.aarch64.sme.fmlal.single.vg2x4
    aarch64_sme_fmlal_vg2x2,                   // llvm.aarch64.sme.fmlal.vg2x2
    aarch64_sme_fmlal_vg2x4,                   // llvm.aarch64.sme.fmlal.vg2x4
    aarch64_sme_fmls_lane_vg1x2,               // llvm.aarch64.sme.fmls.lane.vg1x2
    aarch64_sme_fmls_lane_vg1x4,               // llvm.aarch64.sme.fmls.lane.vg1x4
    aarch64_sme_fmls_single_vg1x2,             // llvm.aarch64.sme.fmls.single.vg1x2
    aarch64_sme_fmls_single_vg1x4,             // llvm.aarch64.sme.fmls.single.vg1x4
    aarch64_sme_fmls_vg1x2,                    // llvm.aarch64.sme.fmls.vg1x2
    aarch64_sme_fmls_vg1x4,                    // llvm.aarch64.sme.fmls.vg1x4
    aarch64_sme_fmlsl_lane_vg2x1,              // llvm.aarch64.sme.fmlsl.lane.vg2x1
    aarch64_sme_fmlsl_lane_vg2x2,              // llvm.aarch64.sme.fmlsl.lane.vg2x2
    aarch64_sme_fmlsl_lane_vg2x4,              // llvm.aarch64.sme.fmlsl.lane.vg2x4
    aarch64_sme_fmlsl_single_vg2x1,            // llvm.aarch64.sme.fmlsl.single.vg2x1
    aarch64_sme_fmlsl_single_vg2x2,            // llvm.aarch64.sme.fmlsl.single.vg2x2
    aarch64_sme_fmlsl_single_vg2x4,            // llvm.aarch64.sme.fmlsl.single.vg2x4
    aarch64_sme_fmlsl_vg2x2,                   // llvm.aarch64.sme.fmlsl.vg2x2
    aarch64_sme_fmlsl_vg2x4,                   // llvm.aarch64.sme.fmlsl.vg2x4
    aarch64_sme_fvdot_lane_za32_vg1x2,         // llvm.aarch64.sme.fvdot.lane.za32.vg1x2
    aarch64_sme_get_tpidr2,                    // llvm.aarch64.sme.get.tpidr2
    aarch64_sme_ld1b_horiz,                    // llvm.aarch64.sme.ld1b.horiz
    aarch64_sme_ld1b_vert,                     // llvm.aarch64.sme.ld1b.vert
    aarch64_sme_ld1d_horiz,                    // llvm.aarch64.sme.ld1d.horiz
    aarch64_sme_ld1d_vert,                     // llvm.aarch64.sme.ld1d.vert
    aarch64_sme_ld1h_horiz,                    // llvm.aarch64.sme.ld1h.horiz
    aarch64_sme_ld1h_vert,                     // llvm.aarch64.sme.ld1h.vert
    aarch64_sme_ld1q_horiz,                    // llvm.aarch64.sme.ld1q.horiz
    aarch64_sme_ld1q_vert,                     // llvm.aarch64.sme.ld1q.vert
    aarch64_sme_ld1w_horiz,                    // llvm.aarch64.sme.ld1w.horiz
    aarch64_sme_ld1w_vert,                     // llvm.aarch64.sme.ld1w.vert
    aarch64_sme_ldr,                           // llvm.aarch64.sme.ldr
    aarch64_sme_mopa,                          // llvm.aarch64.sme.mopa
    aarch64_sme_mopa_wide,                     // llvm.aarch64.sme.mopa.wide
    aarch64_sme_mops,                          // llvm.aarch64.sme.mops
    aarch64_sme_mops_wide,                     // llvm.aarch64.sme.mops.wide
    aarch64_sme_read_hor_vg2,                  // llvm.aarch64.sme.read.hor.vg2
    aarch64_sme_read_hor_vg4,                  // llvm.aarch64.sme.read.hor.vg4
    aarch64_sme_read_horiz,                    // llvm.aarch64.sme.read.horiz
    aarch64_sme_read_ver_vg2,                  // llvm.aarch64.sme.read.ver.vg2
    aarch64_sme_read_ver_vg4,                  // llvm.aarch64.sme.read.ver.vg4
    aarch64_sme_read_vert,                     // llvm.aarch64.sme.read.vert
    aarch64_sme_read_vg1x2,                    // llvm.aarch64.sme.read.vg1x2
    aarch64_sme_read_vg1x4,                    // llvm.aarch64.sme.read.vg1x4
    aarch64_sme_readq_horiz,                   // llvm.aarch64.sme.readq.horiz
    aarch64_sme_readq_vert,                    // llvm.aarch64.sme.readq.vert
    aarch64_sme_sdot_lane_za32_vg1x2,          // llvm.aarch64.sme.sdot.lane.za32.vg1x2
    aarch64_sme_sdot_lane_za32_vg1x4,          // llvm.aarch64.sme.sdot.lane.za32.vg1x4
    aarch64_sme_sdot_lane_za64_vg1x2,          // llvm.aarch64.sme.sdot.lane.za64.vg1x2
    aarch64_sme_sdot_lane_za64_vg1x4,          // llvm.aarch64.sme.sdot.lane.za64.vg1x4
    aarch64_sme_sdot_single_za32_vg1x2,        // llvm.aarch64.sme.sdot.single.za32.vg1x2
    aarch64_sme_sdot_single_za32_vg1x4,        // llvm.aarch64.sme.sdot.single.za32.vg1x4
    aarch64_sme_sdot_single_za64_vg1x2,        // llvm.aarch64.sme.sdot.single.za64.vg1x2
    aarch64_sme_sdot_single_za64_vg1x4,        // llvm.aarch64.sme.sdot.single.za64.vg1x4
    aarch64_sme_sdot_za32_vg1x2,               // llvm.aarch64.sme.sdot.za32.vg1x2
    aarch64_sme_sdot_za32_vg1x4,               // llvm.aarch64.sme.sdot.za32.vg1x4
    aarch64_sme_sdot_za64_vg1x2,               // llvm.aarch64.sme.sdot.za64.vg1x2
    aarch64_sme_sdot_za64_vg1x4,               // llvm.aarch64.sme.sdot.za64.vg1x4
    aarch64_sme_set_tpidr2,                    // llvm.aarch64.sme.set.tpidr2
    aarch64_sme_smla_za32_lane_vg4x1,          // llvm.aarch64.sme.smla.za32.lane.vg4x1
    aarch64_sme_smla_za32_lane_vg4x2,          // llvm.aarch64.sme.smla.za32.lane.vg4x2
    aarch64_sme_smla_za32_lane_vg4x4,          // llvm.aarch64.sme.smla.za32.lane.vg4x4
    aarch64_sme_smla_za32_single_vg4x1,        // llvm.aarch64.sme.smla.za32.single.vg4x1
    aarch64_sme_smla_za32_single_vg4x2,        // llvm.aarch64.sme.smla.za32.single.vg4x2
    aarch64_sme_smla_za32_single_vg4x4,        // llvm.aarch64.sme.smla.za32.single.vg4x4
    aarch64_sme_smla_za32_vg4x2,               // llvm.aarch64.sme.smla.za32.vg4x2
    aarch64_sme_smla_za32_vg4x4,               // llvm.aarch64.sme.smla.za32.vg4x4
    aarch64_sme_smla_za64_lane_vg4x1,          // llvm.aarch64.sme.smla.za64.lane.vg4x1
    aarch64_sme_smla_za64_lane_vg4x2,          // llvm.aarch64.sme.smla.za64.lane.vg4x2
    aarch64_sme_smla_za64_lane_vg4x4,          // llvm.aarch64.sme.smla.za64.lane.vg4x4
    aarch64_sme_smla_za64_single_vg4x1,        // llvm.aarch64.sme.smla.za64.single.vg4x1
    aarch64_sme_smla_za64_single_vg4x2,        // llvm.aarch64.sme.smla.za64.single.vg4x2
    aarch64_sme_smla_za64_single_vg4x4,        // llvm.aarch64.sme.smla.za64.single.vg4x4
    aarch64_sme_smla_za64_vg4x2,               // llvm.aarch64.sme.smla.za64.vg4x2
    aarch64_sme_smla_za64_vg4x4,               // llvm.aarch64.sme.smla.za64.vg4x4
    aarch64_sme_smlal_lane_vg2x1,              // llvm.aarch64.sme.smlal.lane.vg2x1
    aarch64_sme_smlal_lane_vg2x2,              // llvm.aarch64.sme.smlal.lane.vg2x2
    aarch64_sme_smlal_lane_vg2x4,              // llvm.aarch64.sme.smlal.lane.vg2x4
    aarch64_sme_smlal_single_vg2x1,            // llvm.aarch64.sme.smlal.single.vg2x1
    aarch64_sme_smlal_single_vg2x2,            // llvm.aarch64.sme.smlal.single.vg2x2
    aarch64_sme_smlal_single_vg2x4,            // llvm.aarch64.sme.smlal.single.vg2x4
    aarch64_sme_smlal_vg2x2,                   // llvm.aarch64.sme.smlal.vg2x2
    aarch64_sme_smlal_vg2x4,                   // llvm.aarch64.sme.smlal.vg2x4
    aarch64_sme_smls_za32_lane_vg4x1,          // llvm.aarch64.sme.smls.za32.lane.vg4x1
    aarch64_sme_smls_za32_lane_vg4x2,          // llvm.aarch64.sme.smls.za32.lane.vg4x2
    aarch64_sme_smls_za32_lane_vg4x4,          // llvm.aarch64.sme.smls.za32.lane.vg4x4
    aarch64_sme_smls_za32_single_vg4x1,        // llvm.aarch64.sme.smls.za32.single.vg4x1
    aarch64_sme_smls_za32_single_vg4x2,        // llvm.aarch64.sme.smls.za32.single.vg4x2
    aarch64_sme_smls_za32_single_vg4x4,        // llvm.aarch64.sme.smls.za32.single.vg4x4
    aarch64_sme_smls_za32_vg4x2,               // llvm.aarch64.sme.smls.za32.vg4x2
    aarch64_sme_smls_za32_vg4x4,               // llvm.aarch64.sme.smls.za32.vg4x4
    aarch64_sme_smls_za64_lane_vg4x1,          // llvm.aarch64.sme.smls.za64.lane.vg4x1
    aarch64_sme_smls_za64_lane_vg4x2,          // llvm.aarch64.sme.smls.za64.lane.vg4x2
    aarch64_sme_smls_za64_lane_vg4x4,          // llvm.aarch64.sme.smls.za64.lane.vg4x4
    aarch64_sme_smls_za64_single_vg4x1,        // llvm.aarch64.sme.smls.za64.single.vg4x1
    aarch64_sme_smls_za64_single_vg4x2,        // llvm.aarch64.sme.smls.za64.single.vg4x2
    aarch64_sme_smls_za64_single_vg4x4,        // llvm.aarch64.sme.smls.za64.single.vg4x4
    aarch64_sme_smls_za64_vg4x2,               // llvm.aarch64.sme.smls.za64.vg4x2
    aarch64_sme_smls_za64_vg4x4,               // llvm.aarch64.sme.smls.za64.vg4x4
    aarch64_sme_smlsl_lane_vg2x1,              // llvm.aarch64.sme.smlsl.lane.vg2x1
    aarch64_sme_smlsl_lane_vg2x2,              // llvm.aarch64.sme.smlsl.lane.vg2x2
    aarch64_sme_smlsl_lane_vg2x4,              // llvm.aarch64.sme.smlsl.lane.vg2x4
    aarch64_sme_smlsl_single_vg2x1,            // llvm.aarch64.sme.smlsl.single.vg2x1
    aarch64_sme_smlsl_single_vg2x2,            // llvm.aarch64.sme.smlsl.single.vg2x2
    aarch64_sme_smlsl_single_vg2x4,            // llvm.aarch64.sme.smlsl.single.vg2x4
    aarch64_sme_smlsl_vg2x2,                   // llvm.aarch64.sme.smlsl.vg2x2
    aarch64_sme_smlsl_vg2x4,                   // llvm.aarch64.sme.smlsl.vg2x4
    aarch64_sme_smopa_wide,                    // llvm.aarch64.sme.smopa.wide
    aarch64_sme_smopa_za32,                    // llvm.aarch64.sme.smopa.za32
    aarch64_sme_smops_wide,                    // llvm.aarch64.sme.smops.wide
    aarch64_sme_smops_za32,                    // llvm.aarch64.sme.smops.za32
    aarch64_sme_st1b_horiz,                    // llvm.aarch64.sme.st1b.horiz
    aarch64_sme_st1b_vert,                     // llvm.aarch64.sme.st1b.vert
    aarch64_sme_st1d_horiz,                    // llvm.aarch64.sme.st1d.horiz
    aarch64_sme_st1d_vert,                     // llvm.aarch64.sme.st1d.vert
    aarch64_sme_st1h_horiz,                    // llvm.aarch64.sme.st1h.horiz
    aarch64_sme_st1h_vert,                     // llvm.aarch64.sme.st1h.vert
    aarch64_sme_st1q_horiz,                    // llvm.aarch64.sme.st1q.horiz
    aarch64_sme_st1q_vert,                     // llvm.aarch64.sme.st1q.vert
    aarch64_sme_st1w_horiz,                    // llvm.aarch64.sme.st1w.horiz
    aarch64_sme_st1w_vert,                     // llvm.aarch64.sme.st1w.vert
    aarch64_sme_str,                           // llvm.aarch64.sme.str
    aarch64_sme_sub_write_single_za_vg1x2,     // llvm.aarch64.sme.sub.write.single.za.vg1x2
    aarch64_sme_sub_write_single_za_vg1x4,     // llvm.aarch64.sme.sub.write.single.za.vg1x4
    aarch64_sme_sub_write_za_vg1x2,            // llvm.aarch64.sme.sub.write.za.vg1x2
    aarch64_sme_sub_write_za_vg1x4,            // llvm.aarch64.sme.sub.write.za.vg1x4
    aarch64_sme_sub_za32_vg1x2,                // llvm.aarch64.sme.sub.za32.vg1x2
    aarch64_sme_sub_za32_vg1x4,                // llvm.aarch64.sme.sub.za32.vg1x4
    aarch64_sme_sub_za64_vg1x2,                // llvm.aarch64.sme.sub.za64.vg1x2
    aarch64_sme_sub_za64_vg1x4,                // llvm.aarch64.sme.sub.za64.vg1x4
    aarch64_sme_sudot_lane_za32_vg1x2,         // llvm.aarch64.sme.sudot.lane.za32.vg1x2
    aarch64_sme_sudot_lane_za32_vg1x4,         // llvm.aarch64.sme.sudot.lane.za32.vg1x4
    aarch64_sme_sudot_single_za32_vg1x2,       // llvm.aarch64.sme.sudot.single.za32.vg1x2
    aarch64_sme_sudot_single_za32_vg1x4,       // llvm.aarch64.sme.sudot.single.za32.vg1x4
    aarch64_sme_sumla_za32_lane_vg4x1,         // llvm.aarch64.sme.sumla.za32.lane.vg4x1
    aarch64_sme_sumla_za32_lane_vg4x2,         // llvm.aarch64.sme.sumla.za32.lane.vg4x2
    aarch64_sme_sumla_za32_lane_vg4x4,         // llvm.aarch64.sme.sumla.za32.lane.vg4x4
    aarch64_sme_sumla_za32_single_vg4x2,       // llvm.aarch64.sme.sumla.za32.single.vg4x2
    aarch64_sme_sumla_za32_single_vg4x4,       // llvm.aarch64.sme.sumla.za32.single.vg4x4
    aarch64_sme_sumopa_wide,                   // llvm.aarch64.sme.sumopa.wide
    aarch64_sme_sumops_wide,                   // llvm.aarch64.sme.sumops.wide
    aarch64_sme_suvdot_lane_za32_vg1x4,        // llvm.aarch64.sme.suvdot.lane.za32.vg1x4
    aarch64_sme_svdot_lane_za32_vg1x2,         // llvm.aarch64.sme.svdot.lane.za32.vg1x2
    aarch64_sme_svdot_lane_za32_vg1x4,         // llvm.aarch64.sme.svdot.lane.za32.vg1x4
    aarch64_sme_svdot_lane_za64_vg1x4,         // llvm.aarch64.sme.svdot.lane.za64.vg1x4
    aarch64_sme_udot_lane_za32_vg1x2,          // llvm.aarch64.sme.udot.lane.za32.vg1x2
    aarch64_sme_udot_lane_za32_vg1x4,          // llvm.aarch64.sme.udot.lane.za32.vg1x4
    aarch64_sme_udot_lane_za64_vg1x2,          // llvm.aarch64.sme.udot.lane.za64.vg1x2
    aarch64_sme_udot_lane_za64_vg1x4,          // llvm.aarch64.sme.udot.lane.za64.vg1x4
    aarch64_sme_udot_single_za32_vg1x2,        // llvm.aarch64.sme.udot.single.za32.vg1x2
    aarch64_sme_udot_single_za32_vg1x4,        // llvm.aarch64.sme.udot.single.za32.vg1x4
    aarch64_sme_udot_single_za64_vg1x2,        // llvm.aarch64.sme.udot.single.za64.vg1x2
    aarch64_sme_udot_single_za64_vg1x4,        // llvm.aarch64.sme.udot.single.za64.vg1x4
    aarch64_sme_udot_za32_vg1x2,               // llvm.aarch64.sme.udot.za32.vg1x2
    aarch64_sme_udot_za32_vg1x4,               // llvm.aarch64.sme.udot.za32.vg1x4
    aarch64_sme_udot_za64_vg1x2,               // llvm.aarch64.sme.udot.za64.vg1x2
    aarch64_sme_udot_za64_vg1x4,               // llvm.aarch64.sme.udot.za64.vg1x4
    aarch64_sme_umla_za32_lane_vg4x1,          // llvm.aarch64.sme.umla.za32.lane.vg4x1
    aarch64_sme_umla_za32_lane_vg4x2,          // llvm.aarch64.sme.umla.za32.lane.vg4x2
    aarch64_sme_umla_za32_lane_vg4x4,          // llvm.aarch64.sme.umla.za32.lane.vg4x4
    aarch64_sme_umla_za32_single_vg4x1,        // llvm.aarch64.sme.umla.za32.single.vg4x1
    aarch64_sme_umla_za32_single_vg4x2,        // llvm.aarch64.sme.umla.za32.single.vg4x2
    aarch64_sme_umla_za32_single_vg4x4,        // llvm.aarch64.sme.umla.za32.single.vg4x4
    aarch64_sme_umla_za32_vg4x2,               // llvm.aarch64.sme.umla.za32.vg4x2
    aarch64_sme_umla_za32_vg4x4,               // llvm.aarch64.sme.umla.za32.vg4x4
    aarch64_sme_umla_za64_lane_vg4x1,          // llvm.aarch64.sme.umla.za64.lane.vg4x1
    aarch64_sme_umla_za64_lane_vg4x2,          // llvm.aarch64.sme.umla.za64.lane.vg4x2
    aarch64_sme_umla_za64_lane_vg4x4,          // llvm.aarch64.sme.umla.za64.lane.vg4x4
    aarch64_sme_umla_za64_single_vg4x1,        // llvm.aarch64.sme.umla.za64.single.vg4x1
    aarch64_sme_umla_za64_single_vg4x2,        // llvm.aarch64.sme.umla.za64.single.vg4x2
    aarch64_sme_umla_za64_single_vg4x4,        // llvm.aarch64.sme.umla.za64.single.vg4x4
    aarch64_sme_umla_za64_vg4x2,               // llvm.aarch64.sme.umla.za64.vg4x2
    aarch64_sme_umla_za64_vg4x4,               // llvm.aarch64.sme.umla.za64.vg4x4
    aarch64_sme_umlal_lane_vg2x1,              // llvm.aarch64.sme.umlal.lane.vg2x1
    aarch64_sme_umlal_lane_vg2x2,              // llvm.aarch64.sme.umlal.lane.vg2x2
    aarch64_sme_umlal_lane_vg2x4,              // llvm.aarch64.sme.umlal.lane.vg2x4
    aarch64_sme_umlal_single_vg2x1,            // llvm.aarch64.sme.umlal.single.vg2x1
    aarch64_sme_umlal_single_vg2x2,            // llvm.aarch64.sme.umlal.single.vg2x2
    aarch64_sme_umlal_single_vg2x4,            // llvm.aarch64.sme.umlal.single.vg2x4
    aarch64_sme_umlal_vg2x2,                   // llvm.aarch64.sme.umlal.vg2x2
    aarch64_sme_umlal_vg2x4,                   // llvm.aarch64.sme.umlal.vg2x4
    aarch64_sme_umls_za32_lane_vg4x1,          // llvm.aarch64.sme.umls.za32.lane.vg4x1
    aarch64_sme_umls_za32_lane_vg4x2,          // llvm.aarch64.sme.umls.za32.lane.vg4x2
    aarch64_sme_umls_za32_lane_vg4x4,          // llvm.aarch64.sme.umls.za32.lane.vg4x4
    aarch64_sme_umls_za32_single_vg4x1,        // llvm.aarch64.sme.umls.za32.single.vg4x1
    aarch64_sme_umls_za32_single_vg4x2,        // llvm.aarch64.sme.umls.za32.single.vg4x2
    aarch64_sme_umls_za32_single_vg4x4,        // llvm.aarch64.sme.umls.za32.single.vg4x4
    aarch64_sme_umls_za32_vg4x2,               // llvm.aarch64.sme.umls.za32.vg4x2
    aarch64_sme_umls_za32_vg4x4,               // llvm.aarch64.sme.umls.za32.vg4x4
    aarch64_sme_umls_za64_lane_vg4x1,          // llvm.aarch64.sme.umls.za64.lane.vg4x1
    aarch64_sme_umls_za64_lane_vg4x2,          // llvm.aarch64.sme.umls.za64.lane.vg4x2
    aarch64_sme_umls_za64_lane_vg4x4,          // llvm.aarch64.sme.umls.za64.lane.vg4x4
    aarch64_sme_umls_za64_single_vg4x1,        // llvm.aarch64.sme.umls.za64.single.vg4x1
    aarch64_sme_umls_za64_single_vg4x2,        // llvm.aarch64.sme.umls.za64.single.vg4x2
    aarch64_sme_umls_za64_single_vg4x4,        // llvm.aarch64.sme.umls.za64.single.vg4x4
    aarch64_sme_umls_za64_vg4x2,               // llvm.aarch64.sme.umls.za64.vg4x2
    aarch64_sme_umls_za64_vg4x4,               // llvm.aarch64.sme.umls.za64.vg4x4
    aarch64_sme_umlsl_lane_vg2x1,              // llvm.aarch64.sme.umlsl.lane.vg2x1
    aarch64_sme_umlsl_lane_vg2x2,              // llvm.aarch64.sme.umlsl.lane.vg2x2
    aarch64_sme_umlsl_lane_vg2x4,              // llvm.aarch64.sme.umlsl.lane.vg2x4
    aarch64_sme_umlsl_single_vg2x1,            // llvm.aarch64.sme.umlsl.single.vg2x1
    aarch64_sme_umlsl_single_vg2x2,            // llvm.aarch64.sme.umlsl.single.vg2x2
    aarch64_sme_umlsl_single_vg2x4,            // llvm.aarch64.sme.umlsl.single.vg2x4
    aarch64_sme_umlsl_vg2x2,                   // llvm.aarch64.sme.umlsl.vg2x2
    aarch64_sme_umlsl_vg2x4,                   // llvm.aarch64.sme.umlsl.vg2x4
    aarch64_sme_umopa_wide,                    // llvm.aarch64.sme.umopa.wide
    aarch64_sme_umopa_za32,                    // llvm.aarch64.sme.umopa.za32
    aarch64_sme_umops_wide,                    // llvm.aarch64.sme.umops.wide
    aarch64_sme_umops_za32,                    // llvm.aarch64.sme.umops.za32
    aarch64_sme_usdot_lane_za32_vg1x2,         // llvm.aarch64.sme.usdot.lane.za32.vg1x2
    aarch64_sme_usdot_lane_za32_vg1x4,         // llvm.aarch64.sme.usdot.lane.za32.vg1x4
    aarch64_sme_usdot_single_za32_vg1x2,       // llvm.aarch64.sme.usdot.single.za32.vg1x2
    aarch64_sme_usdot_single_za32_vg1x4,       // llvm.aarch64.sme.usdot.single.za32.vg1x4
    aarch64_sme_usdot_za32_vg1x2,              // llvm.aarch64.sme.usdot.za32.vg1x2
    aarch64_sme_usdot_za32_vg1x4,              // llvm.aarch64.sme.usdot.za32.vg1x4
    aarch64_sme_usmla_za32_lane_vg4x1,         // llvm.aarch64.sme.usmla.za32.lane.vg4x1
    aarch64_sme_usmla_za32_lane_vg4x2,         // llvm.aarch64.sme.usmla.za32.lane.vg4x2
    aarch64_sme_usmla_za32_lane_vg4x4,         // llvm.aarch64.sme.usmla.za32.lane.vg4x4
    aarch64_sme_usmla_za32_single_vg4x1,       // llvm.aarch64.sme.usmla.za32.single.vg4x1
    aarch64_sme_usmla_za32_single_vg4x2,       // llvm.aarch64.sme.usmla.za32.single.vg4x2
    aarch64_sme_usmla_za32_single_vg4x4,       // llvm.aarch64.sme.usmla.za32.single.vg4x4
    aarch64_sme_usmla_za32_vg4x2,              // llvm.aarch64.sme.usmla.za32.vg4x2
    aarch64_sme_usmla_za32_vg4x4,              // llvm.aarch64.sme.usmla.za32.vg4x4
    aarch64_sme_usmopa_wide,                   // llvm.aarch64.sme.usmopa.wide
    aarch64_sme_usmops_wide,                   // llvm.aarch64.sme.usmops.wide
    aarch64_sme_usvdot_lane_za32_vg1x4,        // llvm.aarch64.sme.usvdot.lane.za32.vg1x4
    aarch64_sme_uvdot_lane_za32_vg1x2,         // llvm.aarch64.sme.uvdot.lane.za32.vg1x2
    aarch64_sme_uvdot_lane_za32_vg1x4,         // llvm.aarch64.sme.uvdot.lane.za32.vg1x4
    aarch64_sme_uvdot_lane_za64_vg1x4,         // llvm.aarch64.sme.uvdot.lane.za64.vg1x4
    aarch64_sme_write_hor_vg2,                 // llvm.aarch64.sme.write.hor.vg2
    aarch64_sme_write_hor_vg4,                 // llvm.aarch64.sme.write.hor.vg4
    aarch64_sme_write_horiz,                   // llvm.aarch64.sme.write.horiz
    aarch64_sme_write_ver_vg2,                 // llvm.aarch64.sme.write.ver.vg2
    aarch64_sme_write_ver_vg4,                 // llvm.aarch64.sme.write.ver.vg4
    aarch64_sme_write_vert,                    // llvm.aarch64.sme.write.vert
    aarch64_sme_write_vg1x2,                   // llvm.aarch64.sme.write.vg1x2
    aarch64_sme_write_vg1x4,                   // llvm.aarch64.sme.write.vg1x4
    aarch64_sme_writeq_horiz,                  // llvm.aarch64.sme.writeq.horiz
    aarch64_sme_writeq_vert,                   // llvm.aarch64.sme.writeq.vert
    aarch64_sme_za_disable,                    // llvm.aarch64.sme.za.disable
    aarch64_sme_za_enable,                     // llvm.aarch64.sme.za.enable
    aarch64_sme_zero,                          // llvm.aarch64.sme.zero
    aarch64_space,                             // llvm.aarch64.space
    aarch64_st64b,                             // llvm.aarch64.st64b
    aarch64_st64bv,                            // llvm.aarch64.st64bv
    aarch64_st64bv0,                           // llvm.aarch64.st64bv0
    aarch64_stg,                               // llvm.aarch64.stg
    aarch64_stgp,                              // llvm.aarch64.stgp
    aarch64_stlxp,                             // llvm.aarch64.stlxp
    aarch64_stlxr,                             // llvm.aarch64.stlxr
    aarch64_stxp,                              // llvm.aarch64.stxp
    aarch64_stxr,                              // llvm.aarch64.stxr
    aarch64_subp,                              // llvm.aarch64.subp
    aarch64_sve_abs,                           // llvm.aarch64.sve.abs
    aarch64_sve_adclb,                         // llvm.aarch64.sve.adclb
    aarch64_sve_adclt,                         // llvm.aarch64.sve.adclt
    aarch64_sve_add,                           // llvm.aarch64.sve.add
    aarch64_sve_add_single_x2,                 // llvm.aarch64.sve.add.single.x2
    aarch64_sve_add_single_x4,                 // llvm.aarch64.sve.add.single.x4
    aarch64_sve_add_u,                         // llvm.aarch64.sve.add.u
    aarch64_sve_addhnb,                        // llvm.aarch64.sve.addhnb
    aarch64_sve_addhnt,                        // llvm.aarch64.sve.addhnt
    aarch64_sve_addp,                          // llvm.aarch64.sve.addp
    aarch64_sve_adrb,                          // llvm.aarch64.sve.adrb
    aarch64_sve_adrd,                          // llvm.aarch64.sve.adrd
    aarch64_sve_adrh,                          // llvm.aarch64.sve.adrh
    aarch64_sve_adrw,                          // llvm.aarch64.sve.adrw
    aarch64_sve_aesd,                          // llvm.aarch64.sve.aesd
    aarch64_sve_aese,                          // llvm.aarch64.sve.aese
    aarch64_sve_aesimc,                        // llvm.aarch64.sve.aesimc
    aarch64_sve_aesmc,                         // llvm.aarch64.sve.aesmc
    aarch64_sve_and,                           // llvm.aarch64.sve.and
    aarch64_sve_and_u,                         // llvm.aarch64.sve.and.u
    aarch64_sve_and_z,                         // llvm.aarch64.sve.and.z
    aarch64_sve_andv,                          // llvm.aarch64.sve.andv
    aarch64_sve_asr,                           // llvm.aarch64.sve.asr
    aarch64_sve_asr_u,                         // llvm.aarch64.sve.asr.u
    aarch64_sve_asr_wide,                      // llvm.aarch64.sve.asr.wide
    aarch64_sve_asrd,                          // llvm.aarch64.sve.asrd
    aarch64_sve_bcax,                          // llvm.aarch64.sve.bcax
    aarch64_sve_bdep_x,                        // llvm.aarch64.sve.bdep.x
    aarch64_sve_bext_x,                        // llvm.aarch64.sve.bext.x
    aarch64_sve_bfcvt_x2,                      // llvm.aarch64.sve.bfcvt.x2
    aarch64_sve_bfcvtn_x2,                     // llvm.aarch64.sve.bfcvtn.x2
    aarch64_sve_bfdot,                         // llvm.aarch64.sve.bfdot
    aarch64_sve_bfdot_lane_v2,                 // llvm.aarch64.sve.bfdot.lane.v2
    aarch64_sve_bfmlalb,                       // llvm.aarch64.sve.bfmlalb
    aarch64_sve_bfmlalb_lane_v2,               // llvm.aarch64.sve.bfmlalb.lane.v2
    aarch64_sve_bfmlalt,                       // llvm.aarch64.sve.bfmlalt
    aarch64_sve_bfmlalt_lane_v2,               // llvm.aarch64.sve.bfmlalt.lane.v2
    aarch64_sve_bfmmla,                        // llvm.aarch64.sve.bfmmla
    aarch64_sve_bgrp_x,                        // llvm.aarch64.sve.bgrp.x
    aarch64_sve_bic,                           // llvm.aarch64.sve.bic
    aarch64_sve_bic_u,                         // llvm.aarch64.sve.bic.u
    aarch64_sve_bic_z,                         // llvm.aarch64.sve.bic.z
    aarch64_sve_brka,                          // llvm.aarch64.sve.brka
    aarch64_sve_brka_z,                        // llvm.aarch64.sve.brka.z
    aarch64_sve_brkb,                          // llvm.aarch64.sve.brkb
    aarch64_sve_brkb_z,                        // llvm.aarch64.sve.brkb.z
    aarch64_sve_brkn_z,                        // llvm.aarch64.sve.brkn.z
    aarch64_sve_brkpa_z,                       // llvm.aarch64.sve.brkpa.z
    aarch64_sve_brkpb_z,                       // llvm.aarch64.sve.brkpb.z
    aarch64_sve_bsl,                           // llvm.aarch64.sve.bsl
    aarch64_sve_bsl1n,                         // llvm.aarch64.sve.bsl1n
    aarch64_sve_bsl2n,                         // llvm.aarch64.sve.bsl2n
    aarch64_sve_cadd_x,                        // llvm.aarch64.sve.cadd.x
    aarch64_sve_cdot,                          // llvm.aarch64.sve.cdot
    aarch64_sve_cdot_lane,                     // llvm.aarch64.sve.cdot.lane
    aarch64_sve_clasta,                        // llvm.aarch64.sve.clasta
    aarch64_sve_clasta_n,                      // llvm.aarch64.sve.clasta.n
    aarch64_sve_clastb,                        // llvm.aarch64.sve.clastb
    aarch64_sve_clastb_n,                      // llvm.aarch64.sve.clastb.n
    aarch64_sve_cls,                           // llvm.aarch64.sve.cls
    aarch64_sve_clz,                           // llvm.aarch64.sve.clz
    aarch64_sve_cmla_lane_x,                   // llvm.aarch64.sve.cmla.lane.x
    aarch64_sve_cmla_x,                        // llvm.aarch64.sve.cmla.x
    aarch64_sve_cmpeq,                         // llvm.aarch64.sve.cmpeq
    aarch64_sve_cmpeq_wide,                    // llvm.aarch64.sve.cmpeq.wide
    aarch64_sve_cmpge,                         // llvm.aarch64.sve.cmpge
    aarch64_sve_cmpge_wide,                    // llvm.aarch64.sve.cmpge.wide
    aarch64_sve_cmpgt,                         // llvm.aarch64.sve.cmpgt
    aarch64_sve_cmpgt_wide,                    // llvm.aarch64.sve.cmpgt.wide
    aarch64_sve_cmphi,                         // llvm.aarch64.sve.cmphi
    aarch64_sve_cmphi_wide,                    // llvm.aarch64.sve.cmphi.wide
    aarch64_sve_cmphs,                         // llvm.aarch64.sve.cmphs
    aarch64_sve_cmphs_wide,                    // llvm.aarch64.sve.cmphs.wide
    aarch64_sve_cmple_wide,                    // llvm.aarch64.sve.cmple.wide
    aarch64_sve_cmplo_wide,                    // llvm.aarch64.sve.cmplo.wide
    aarch64_sve_cmpls_wide,                    // llvm.aarch64.sve.cmpls.wide
    aarch64_sve_cmplt_wide,                    // llvm.aarch64.sve.cmplt.wide
    aarch64_sve_cmpne,                         // llvm.aarch64.sve.cmpne
    aarch64_sve_cmpne_wide,                    // llvm.aarch64.sve.cmpne.wide
    aarch64_sve_cnot,                          // llvm.aarch64.sve.cnot
    aarch64_sve_cnt,                           // llvm.aarch64.sve.cnt
    aarch64_sve_cntb,                          // llvm.aarch64.sve.cntb
    aarch64_sve_cntd,                          // llvm.aarch64.sve.cntd
    aarch64_sve_cnth,                          // llvm.aarch64.sve.cnth
    aarch64_sve_cntp,                          // llvm.aarch64.sve.cntp
    aarch64_sve_cntp_c16,                      // llvm.aarch64.sve.cntp.c16
    aarch64_sve_cntp_c32,                      // llvm.aarch64.sve.cntp.c32
    aarch64_sve_cntp_c64,                      // llvm.aarch64.sve.cntp.c64
    aarch64_sve_cntp_c8,                       // llvm.aarch64.sve.cntp.c8
    aarch64_sve_cntw,                          // llvm.aarch64.sve.cntw
    aarch64_sve_compact,                       // llvm.aarch64.sve.compact
    aarch64_sve_convert_from_svbool,           // llvm.aarch64.sve.convert.from.svbool
    aarch64_sve_convert_to_svbool,             // llvm.aarch64.sve.convert.to.svbool
    aarch64_sve_dup,                           // llvm.aarch64.sve.dup
    aarch64_sve_dup_x,                         // llvm.aarch64.sve.dup.x
    aarch64_sve_dupq_lane,                     // llvm.aarch64.sve.dupq.lane
    aarch64_sve_eor,                           // llvm.aarch64.sve.eor
    aarch64_sve_eor_u,                         // llvm.aarch64.sve.eor.u
    aarch64_sve_eor_z,                         // llvm.aarch64.sve.eor.z
    aarch64_sve_eor3,                          // llvm.aarch64.sve.eor3
    aarch64_sve_eorbt,                         // llvm.aarch64.sve.eorbt
    aarch64_sve_eortb,                         // llvm.aarch64.sve.eortb
    aarch64_sve_eorv,                          // llvm.aarch64.sve.eorv
    aarch64_sve_ext,                           // llvm.aarch64.sve.ext
    aarch64_sve_fabd,                          // llvm.aarch64.sve.fabd
    aarch64_sve_fabd_u,                        // llvm.aarch64.sve.fabd.u
    aarch64_sve_fabs,                          // llvm.aarch64.sve.fabs
    aarch64_sve_facge,                         // llvm.aarch64.sve.facge
    aarch64_sve_facgt,                         // llvm.aarch64.sve.facgt
    aarch64_sve_fadd,                          // llvm.aarch64.sve.fadd
    aarch64_sve_fadd_u,                        // llvm.aarch64.sve.fadd.u
    aarch64_sve_fadda,                         // llvm.aarch64.sve.fadda
    aarch64_sve_faddp,                         // llvm.aarch64.sve.faddp
    aarch64_sve_faddv,                         // llvm.aarch64.sve.faddv
    aarch64_sve_fcadd,                         // llvm.aarch64.sve.fcadd
    aarch64_sve_fclamp,                        // llvm.aarch64.sve.fclamp
    aarch64_sve_fclamp_single_x2,              // llvm.aarch64.sve.fclamp.single.x2
    aarch64_sve_fclamp_single_x4,              // llvm.aarch64.sve.fclamp.single.x4
    aarch64_sve_fcmla,                         // llvm.aarch64.sve.fcmla
    aarch64_sve_fcmla_lane,                    // llvm.aarch64.sve.fcmla.lane
    aarch64_sve_fcmpeq,                        // llvm.aarch64.sve.fcmpeq
    aarch64_sve_fcmpge,                        // llvm.aarch64.sve.fcmpge
    aarch64_sve_fcmpgt,                        // llvm.aarch64.sve.fcmpgt
    aarch64_sve_fcmpne,                        // llvm.aarch64.sve.fcmpne
    aarch64_sve_fcmpuo,                        // llvm.aarch64.sve.fcmpuo
    aarch64_sve_fcvt,                          // llvm.aarch64.sve.fcvt
    aarch64_sve_fcvt_bf16f32,                  // llvm.aarch64.sve.fcvt.bf16f32
    aarch64_sve_fcvt_f16f32,                   // llvm.aarch64.sve.fcvt.f16f32
    aarch64_sve_fcvt_f16f64,                   // llvm.aarch64.sve.fcvt.f16f64
    aarch64_sve_fcvt_f32f16,                   // llvm.aarch64.sve.fcvt.f32f16
    aarch64_sve_fcvt_f32f64,                   // llvm.aarch64.sve.fcvt.f32f64
    aarch64_sve_fcvt_f64f16,                   // llvm.aarch64.sve.fcvt.f64f16
    aarch64_sve_fcvt_f64f32,                   // llvm.aarch64.sve.fcvt.f64f32
    aarch64_sve_fcvt_x2,                       // llvm.aarch64.sve.fcvt.x2
    aarch64_sve_fcvtlt_f32f16,                 // llvm.aarch64.sve.fcvtlt.f32f16
    aarch64_sve_fcvtlt_f64f32,                 // llvm.aarch64.sve.fcvtlt.f64f32
    aarch64_sve_fcvtn_x2,                      // llvm.aarch64.sve.fcvtn.x2
    aarch64_sve_fcvtnt_bf16f32,                // llvm.aarch64.sve.fcvtnt.bf16f32
    aarch64_sve_fcvtnt_f16f32,                 // llvm.aarch64.sve.fcvtnt.f16f32
    aarch64_sve_fcvtnt_f32f64,                 // llvm.aarch64.sve.fcvtnt.f32f64
    aarch64_sve_fcvts_x2,                      // llvm.aarch64.sve.fcvts.x2
    aarch64_sve_fcvts_x4,                      // llvm.aarch64.sve.fcvts.x4
    aarch64_sve_fcvtu_x2,                      // llvm.aarch64.sve.fcvtu.x2
    aarch64_sve_fcvtu_x4,                      // llvm.aarch64.sve.fcvtu.x4
    aarch64_sve_fcvtx_f32f64,                  // llvm.aarch64.sve.fcvtx.f32f64
    aarch64_sve_fcvtxnt_f32f64,                // llvm.aarch64.sve.fcvtxnt.f32f64
    aarch64_sve_fcvtzs,                        // llvm.aarch64.sve.fcvtzs
    aarch64_sve_fcvtzs_i32f16,                 // llvm.aarch64.sve.fcvtzs.i32f16
    aarch64_sve_fcvtzs_i32f64,                 // llvm.aarch64.sve.fcvtzs.i32f64
    aarch64_sve_fcvtzs_i64f16,                 // llvm.aarch64.sve.fcvtzs.i64f16
    aarch64_sve_fcvtzs_i64f32,                 // llvm.aarch64.sve.fcvtzs.i64f32
    aarch64_sve_fcvtzu,                        // llvm.aarch64.sve.fcvtzu
    aarch64_sve_fcvtzu_i32f16,                 // llvm.aarch64.sve.fcvtzu.i32f16
    aarch64_sve_fcvtzu_i32f64,                 // llvm.aarch64.sve.fcvtzu.i32f64
    aarch64_sve_fcvtzu_i64f16,                 // llvm.aarch64.sve.fcvtzu.i64f16
    aarch64_sve_fcvtzu_i64f32,                 // llvm.aarch64.sve.fcvtzu.i64f32
    aarch64_sve_fdiv,                          // llvm.aarch64.sve.fdiv
    aarch64_sve_fdiv_u,                        // llvm.aarch64.sve.fdiv.u
    aarch64_sve_fdivr,                         // llvm.aarch64.sve.fdivr
    aarch64_sve_fdot_lane_x2,                  // llvm.aarch64.sve.fdot.lane.x2
    aarch64_sve_fdot_x2,                       // llvm.aarch64.sve.fdot.x2
    aarch64_sve_fexpa_x,                       // llvm.aarch64.sve.fexpa.x
    aarch64_sve_flogb,                         // llvm.aarch64.sve.flogb
    aarch64_sve_fmad,                          // llvm.aarch64.sve.fmad
    aarch64_sve_fmax,                          // llvm.aarch64.sve.fmax
    aarch64_sve_fmax_single_x2,                // llvm.aarch64.sve.fmax.single.x2
    aarch64_sve_fmax_single_x4,                // llvm.aarch64.sve.fmax.single.x4
    aarch64_sve_fmax_u,                        // llvm.aarch64.sve.fmax.u
    aarch64_sve_fmax_x2,                       // llvm.aarch64.sve.fmax.x2
    aarch64_sve_fmax_x4,                       // llvm.aarch64.sve.fmax.x4
    aarch64_sve_fmaxnm,                        // llvm.aarch64.sve.fmaxnm
    aarch64_sve_fmaxnm_single_x2,              // llvm.aarch64.sve.fmaxnm.single.x2
    aarch64_sve_fmaxnm_single_x4,              // llvm.aarch64.sve.fmaxnm.single.x4
    aarch64_sve_fmaxnm_u,                      // llvm.aarch64.sve.fmaxnm.u
    aarch64_sve_fmaxnm_x2,                     // llvm.aarch64.sve.fmaxnm.x2
    aarch64_sve_fmaxnm_x4,                     // llvm.aarch64.sve.fmaxnm.x4
    aarch64_sve_fmaxnmp,                       // llvm.aarch64.sve.fmaxnmp
    aarch64_sve_fmaxnmv,                       // llvm.aarch64.sve.fmaxnmv
    aarch64_sve_fmaxp,                         // llvm.aarch64.sve.fmaxp
    aarch64_sve_fmaxv,                         // llvm.aarch64.sve.fmaxv
    aarch64_sve_fmin,                          // llvm.aarch64.sve.fmin
    aarch64_sve_fmin_single_x2,                // llvm.aarch64.sve.fmin.single.x2
    aarch64_sve_fmin_single_x4,                // llvm.aarch64.sve.fmin.single.x4
    aarch64_sve_fmin_u,                        // llvm.aarch64.sve.fmin.u
    aarch64_sve_fmin_x2,                       // llvm.aarch64.sve.fmin.x2
    aarch64_sve_fmin_x4,                       // llvm.aarch64.sve.fmin.x4
    aarch64_sve_fminnm,                        // llvm.aarch64.sve.fminnm
    aarch64_sve_fminnm_single_x2,              // llvm.aarch64.sve.fminnm.single.x2
    aarch64_sve_fminnm_single_x4,              // llvm.aarch64.sve.fminnm.single.x4
    aarch64_sve_fminnm_u,                      // llvm.aarch64.sve.fminnm.u
    aarch64_sve_fminnm_x2,                     // llvm.aarch64.sve.fminnm.x2
    aarch64_sve_fminnm_x4,                     // llvm.aarch64.sve.fminnm.x4
    aarch64_sve_fminnmp,                       // llvm.aarch64.sve.fminnmp
    aarch64_sve_fminnmv,                       // llvm.aarch64.sve.fminnmv
    aarch64_sve_fminp,                         // llvm.aarch64.sve.fminp
    aarch64_sve_fminv,                         // llvm.aarch64.sve.fminv
    aarch64_sve_fmla,                          // llvm.aarch64.sve.fmla
    aarch64_sve_fmla_lane,                     // llvm.aarch64.sve.fmla.lane
    aarch64_sve_fmla_u,                        // llvm.aarch64.sve.fmla.u
    aarch64_sve_fmlalb,                        // llvm.aarch64.sve.fmlalb
    aarch64_sve_fmlalb_lane,                   // llvm.aarch64.sve.fmlalb.lane
    aarch64_sve_fmlalt,                        // llvm.aarch64.sve.fmlalt
    aarch64_sve_fmlalt_lane,                   // llvm.aarch64.sve.fmlalt.lane
    aarch64_sve_fmls,                          // llvm.aarch64.sve.fmls
    aarch64_sve_fmls_lane,                     // llvm.aarch64.sve.fmls.lane
    aarch64_sve_fmls_u,                        // llvm.aarch64.sve.fmls.u
    aarch64_sve_fmlslb,                        // llvm.aarch64.sve.fmlslb
    aarch64_sve_fmlslb_lane,                   // llvm.aarch64.sve.fmlslb.lane
    aarch64_sve_fmlslt,                        // llvm.aarch64.sve.fmlslt
    aarch64_sve_fmlslt_lane,                   // llvm.aarch64.sve.fmlslt.lane
    aarch64_sve_fmmla,                         // llvm.aarch64.sve.fmmla
    aarch64_sve_fmsb,                          // llvm.aarch64.sve.fmsb
    aarch64_sve_fmul,                          // llvm.aarch64.sve.fmul
    aarch64_sve_fmul_lane,                     // llvm.aarch64.sve.fmul.lane
    aarch64_sve_fmul_u,                        // llvm.aarch64.sve.fmul.u
    aarch64_sve_fmulx,                         // llvm.aarch64.sve.fmulx
    aarch64_sve_fmulx_u,                       // llvm.aarch64.sve.fmulx.u
    aarch64_sve_fneg,                          // llvm.aarch64.sve.fneg
    aarch64_sve_fnmad,                         // llvm.aarch64.sve.fnmad
    aarch64_sve_fnmla,                         // llvm.aarch64.sve.fnmla
    aarch64_sve_fnmla_u,                       // llvm.aarch64.sve.fnmla.u
    aarch64_sve_fnmls,                         // llvm.aarch64.sve.fnmls
    aarch64_sve_fnmls_u,                       // llvm.aarch64.sve.fnmls.u
    aarch64_sve_fnmsb,                         // llvm.aarch64.sve.fnmsb
    aarch64_sve_frecpe_x,                      // llvm.aarch64.sve.frecpe.x
    aarch64_sve_frecps_x,                      // llvm.aarch64.sve.frecps.x
    aarch64_sve_frecpx,                        // llvm.aarch64.sve.frecpx
    aarch64_sve_frinta,                        // llvm.aarch64.sve.frinta
    aarch64_sve_frinta_x2,                     // llvm.aarch64.sve.frinta.x2
    aarch64_sve_frinta_x4,                     // llvm.aarch64.sve.frinta.x4
    aarch64_sve_frinti,                        // llvm.aarch64.sve.frinti
    aarch64_sve_frintm,                        // llvm.aarch64.sve.frintm
    aarch64_sve_frintm_x2,                     // llvm.aarch64.sve.frintm.x2
    aarch64_sve_frintm_x4,                     // llvm.aarch64.sve.frintm.x4
    aarch64_sve_frintn,                        // llvm.aarch64.sve.frintn
    aarch64_sve_frintn_x2,                     // llvm.aarch64.sve.frintn.x2
    aarch64_sve_frintn_x4,                     // llvm.aarch64.sve.frintn.x4
    aarch64_sve_frintp,                        // llvm.aarch64.sve.frintp
    aarch64_sve_frintp_x2,                     // llvm.aarch64.sve.frintp.x2
    aarch64_sve_frintp_x4,                     // llvm.aarch64.sve.frintp.x4
    aarch64_sve_frintx,                        // llvm.aarch64.sve.frintx
    aarch64_sve_frintz,                        // llvm.aarch64.sve.frintz
    aarch64_sve_frsqrte_x,                     // llvm.aarch64.sve.frsqrte.x
    aarch64_sve_frsqrts_x,                     // llvm.aarch64.sve.frsqrts.x
    aarch64_sve_fscale,                        // llvm.aarch64.sve.fscale
    aarch64_sve_fsqrt,                         // llvm.aarch64.sve.fsqrt
    aarch64_sve_fsub,                          // llvm.aarch64.sve.fsub
    aarch64_sve_fsub_u,                        // llvm.aarch64.sve.fsub.u
    aarch64_sve_fsubr,                         // llvm.aarch64.sve.fsubr
    aarch64_sve_ftmad_x,                       // llvm.aarch64.sve.ftmad.x
    aarch64_sve_ftsmul_x,                      // llvm.aarch64.sve.ftsmul.x
    aarch64_sve_ftssel_x,                      // llvm.aarch64.sve.ftssel.x
    aarch64_sve_histcnt,                       // llvm.aarch64.sve.histcnt
    aarch64_sve_histseg,                       // llvm.aarch64.sve.histseg
    aarch64_sve_index,                         // llvm.aarch64.sve.index
    aarch64_sve_insr,                          // llvm.aarch64.sve.insr
    aarch64_sve_lasta,                         // llvm.aarch64.sve.lasta
    aarch64_sve_lastb,                         // llvm.aarch64.sve.lastb
    aarch64_sve_ld1,                           // llvm.aarch64.sve.ld1
    aarch64_sve_ld1_gather,                    // llvm.aarch64.sve.ld1.gather
    aarch64_sve_ld1_gather_index,              // llvm.aarch64.sve.ld1.gather.index
    aarch64_sve_ld1_gather_scalar_offset,      // llvm.aarch64.sve.ld1.gather.scalar.offset
    aarch64_sve_ld1_gather_sxtw,               // llvm.aarch64.sve.ld1.gather.sxtw
    aarch64_sve_ld1_gather_sxtw_index,         // llvm.aarch64.sve.ld1.gather.sxtw.index
    aarch64_sve_ld1_gather_uxtw,               // llvm.aarch64.sve.ld1.gather.uxtw
    aarch64_sve_ld1_gather_uxtw_index,         // llvm.aarch64.sve.ld1.gather.uxtw.index
    aarch64_sve_ld1_pn_x2,                     // llvm.aarch64.sve.ld1.pn.x2
    aarch64_sve_ld1_pn_x4,                     // llvm.aarch64.sve.ld1.pn.x4
    aarch64_sve_ld1ro,                         // llvm.aarch64.sve.ld1ro
    aarch64_sve_ld1rq,                         // llvm.aarch64.sve.ld1rq
    aarch64_sve_ld2_sret,                      // llvm.aarch64.sve.ld2.sret
    aarch64_sve_ld3_sret,                      // llvm.aarch64.sve.ld3.sret
    aarch64_sve_ld4_sret,                      // llvm.aarch64.sve.ld4.sret
    aarch64_sve_ldff1,                         // llvm.aarch64.sve.ldff1
    aarch64_sve_ldff1_gather,                  // llvm.aarch64.sve.ldff1.gather
    aarch64_sve_ldff1_gather_index,            // llvm.aarch64.sve.ldff1.gather.index
    aarch64_sve_ldff1_gather_scalar_offset,    // llvm.aarch64.sve.ldff1.gather.scalar.offset
    aarch64_sve_ldff1_gather_sxtw,             // llvm.aarch64.sve.ldff1.gather.sxtw
    aarch64_sve_ldff1_gather_sxtw_index,       // llvm.aarch64.sve.ldff1.gather.sxtw.index
    aarch64_sve_ldff1_gather_uxtw,             // llvm.aarch64.sve.ldff1.gather.uxtw
    aarch64_sve_ldff1_gather_uxtw_index,       // llvm.aarch64.sve.ldff1.gather.uxtw.index
    aarch64_sve_ldnf1,                         // llvm.aarch64.sve.ldnf1
    aarch64_sve_ldnt1,                         // llvm.aarch64.sve.ldnt1
    aarch64_sve_ldnt1_gather,                  // llvm.aarch64.sve.ldnt1.gather
    aarch64_sve_ldnt1_gather_index,            // llvm.aarch64.sve.ldnt1.gather.index
    aarch64_sve_ldnt1_gather_scalar_offset,    // llvm.aarch64.sve.ldnt1.gather.scalar.offset
    aarch64_sve_ldnt1_gather_uxtw,             // llvm.aarch64.sve.ldnt1.gather.uxtw
    aarch64_sve_ldnt1_pn_x2,                   // llvm.aarch64.sve.ldnt1.pn.x2
    aarch64_sve_ldnt1_pn_x4,                   // llvm.aarch64.sve.ldnt1.pn.x4
    aarch64_sve_lsl,                           // llvm.aarch64.sve.lsl
    aarch64_sve_lsl_u,                         // llvm.aarch64.sve.lsl.u
    aarch64_sve_lsl_wide,                      // llvm.aarch64.sve.lsl.wide
    aarch64_sve_lsr,                           // llvm.aarch64.sve.lsr
    aarch64_sve_lsr_u,                         // llvm.aarch64.sve.lsr.u
    aarch64_sve_lsr_wide,                      // llvm.aarch64.sve.lsr.wide
    aarch64_sve_mad,                           // llvm.aarch64.sve.mad
    aarch64_sve_match,                         // llvm.aarch64.sve.match
    aarch64_sve_mla,                           // llvm.aarch64.sve.mla
    aarch64_sve_mla_lane,                      // llvm.aarch64.sve.mla.lane
    aarch64_sve_mla_u,                         // llvm.aarch64.sve.mla.u
    aarch64_sve_mls,                           // llvm.aarch64.sve.mls
    aarch64_sve_mls_lane,                      // llvm.aarch64.sve.mls.lane
    aarch64_sve_mls_u,                         // llvm.aarch64.sve.mls.u
    aarch64_sve_msb,                           // llvm.aarch64.sve.msb
    aarch64_sve_mul,                           // llvm.aarch64.sve.mul
    aarch64_sve_mul_lane,                      // llvm.aarch64.sve.mul.lane
    aarch64_sve_mul_u,                         // llvm.aarch64.sve.mul.u
    aarch64_sve_nand_z,                        // llvm.aarch64.sve.nand.z
    aarch64_sve_nbsl,                          // llvm.aarch64.sve.nbsl
    aarch64_sve_neg,                           // llvm.aarch64.sve.neg
    aarch64_sve_nmatch,                        // llvm.aarch64.sve.nmatch
    aarch64_sve_nor_z,                         // llvm.aarch64.sve.nor.z
    aarch64_sve_not,                           // llvm.aarch64.sve.not
    aarch64_sve_orn_z,                         // llvm.aarch64.sve.orn.z
    aarch64_sve_orr,                           // llvm.aarch64.sve.orr
    aarch64_sve_orr_u,                         // llvm.aarch64.sve.orr.u
    aarch64_sve_orr_z,                         // llvm.aarch64.sve.orr.z
    aarch64_sve_orv,                           // llvm.aarch64.sve.orv
    aarch64_sve_pext,                          // llvm.aarch64.sve.pext
    aarch64_sve_pext_x2,                       // llvm.aarch64.sve.pext.x2
    aarch64_sve_pfirst,                        // llvm.aarch64.sve.pfirst
    aarch64_sve_pmul,                          // llvm.aarch64.sve.pmul
    aarch64_sve_pmullb_pair,                   // llvm.aarch64.sve.pmullb.pair
    aarch64_sve_pmullt_pair,                   // llvm.aarch64.sve.pmullt.pair
    aarch64_sve_pnext,                         // llvm.aarch64.sve.pnext
    aarch64_sve_prf,                           // llvm.aarch64.sve.prf
    aarch64_sve_prfb_gather_index,             // llvm.aarch64.sve.prfb.gather.index
    aarch64_sve_prfb_gather_scalar_offset,     // llvm.aarch64.sve.prfb.gather.scalar.offset
    aarch64_sve_prfb_gather_sxtw_index,        // llvm.aarch64.sve.prfb.gather.sxtw.index
    aarch64_sve_prfb_gather_uxtw_index,        // llvm.aarch64.sve.prfb.gather.uxtw.index
    aarch64_sve_prfd_gather_index,             // llvm.aarch64.sve.prfd.gather.index
    aarch64_sve_prfd_gather_scalar_offset,     // llvm.aarch64.sve.prfd.gather.scalar.offset
    aarch64_sve_prfd_gather_sxtw_index,        // llvm.aarch64.sve.prfd.gather.sxtw.index
    aarch64_sve_prfd_gather_uxtw_index,        // llvm.aarch64.sve.prfd.gather.uxtw.index
    aarch64_sve_prfh_gather_index,             // llvm.aarch64.sve.prfh.gather.index
    aarch64_sve_prfh_gather_scalar_offset,     // llvm.aarch64.sve.prfh.gather.scalar.offset
    aarch64_sve_prfh_gather_sxtw_index,        // llvm.aarch64.sve.prfh.gather.sxtw.index
    aarch64_sve_prfh_gather_uxtw_index,        // llvm.aarch64.sve.prfh.gather.uxtw.index
    aarch64_sve_prfw_gather_index,             // llvm.aarch64.sve.prfw.gather.index
    aarch64_sve_prfw_gather_scalar_offset,     // llvm.aarch64.sve.prfw.gather.scalar.offset
    aarch64_sve_prfw_gather_sxtw_index,        // llvm.aarch64.sve.prfw.gather.sxtw.index
    aarch64_sve_prfw_gather_uxtw_index,        // llvm.aarch64.sve.prfw.gather.uxtw.index
    aarch64_sve_psel,                          // llvm.aarch64.sve.psel
    aarch64_sve_ptest_any,                     // llvm.aarch64.sve.ptest.any
    aarch64_sve_ptest_first,                   // llvm.aarch64.sve.ptest.first
    aarch64_sve_ptest_last,                    // llvm.aarch64.sve.ptest.last
    aarch64_sve_ptrue,                         // llvm.aarch64.sve.ptrue
    aarch64_sve_ptrue_c16,                     // llvm.aarch64.sve.ptrue.c16
    aarch64_sve_ptrue_c32,                     // llvm.aarch64.sve.ptrue.c32
    aarch64_sve_ptrue_c64,                     // llvm.aarch64.sve.ptrue.c64
    aarch64_sve_ptrue_c8,                      // llvm.aarch64.sve.ptrue.c8
    aarch64_sve_punpkhi,                       // llvm.aarch64.sve.punpkhi
    aarch64_sve_punpklo,                       // llvm.aarch64.sve.punpklo
    aarch64_sve_raddhnb,                       // llvm.aarch64.sve.raddhnb
    aarch64_sve_raddhnt,                       // llvm.aarch64.sve.raddhnt
    aarch64_sve_rax1,                          // llvm.aarch64.sve.rax1
    aarch64_sve_rbit,                          // llvm.aarch64.sve.rbit
    aarch64_sve_rdffr,                         // llvm.aarch64.sve.rdffr
    aarch64_sve_rdffr_z,                       // llvm.aarch64.sve.rdffr.z
    aarch64_sve_rev,                           // llvm.aarch64.sve.rev
    aarch64_sve_rev_b16,                       // llvm.aarch64.sve.rev.b16
    aarch64_sve_rev_b32,                       // llvm.aarch64.sve.rev.b32
    aarch64_sve_rev_b64,                       // llvm.aarch64.sve.rev.b64
    aarch64_sve_revb,                          // llvm.aarch64.sve.revb
    aarch64_sve_revd,                          // llvm.aarch64.sve.revd
    aarch64_sve_revh,                          // llvm.aarch64.sve.revh
    aarch64_sve_revw,                          // llvm.aarch64.sve.revw
    aarch64_sve_rshrnb,                        // llvm.aarch64.sve.rshrnb
    aarch64_sve_rshrnt,                        // llvm.aarch64.sve.rshrnt
    aarch64_sve_rsubhnb,                       // llvm.aarch64.sve.rsubhnb
    aarch64_sve_rsubhnt,                       // llvm.aarch64.sve.rsubhnt
    aarch64_sve_saba,                          // llvm.aarch64.sve.saba
    aarch64_sve_sabalb,                        // llvm.aarch64.sve.sabalb
    aarch64_sve_sabalt,                        // llvm.aarch64.sve.sabalt
    aarch64_sve_sabd,                          // llvm.aarch64.sve.sabd
    aarch64_sve_sabd_u,                        // llvm.aarch64.sve.sabd.u
    aarch64_sve_sabdlb,                        // llvm.aarch64.sve.sabdlb
    aarch64_sve_sabdlt,                        // llvm.aarch64.sve.sabdlt
    aarch64_sve_sadalp,                        // llvm.aarch64.sve.sadalp
    aarch64_sve_saddlb,                        // llvm.aarch64.sve.saddlb
    aarch64_sve_saddlbt,                       // llvm.aarch64.sve.saddlbt
    aarch64_sve_saddlt,                        // llvm.aarch64.sve.saddlt
    aarch64_sve_saddv,                         // llvm.aarch64.sve.saddv
    aarch64_sve_saddwb,                        // llvm.aarch64.sve.saddwb
    aarch64_sve_saddwt,                        // llvm.aarch64.sve.saddwt
    aarch64_sve_sbclb,                         // llvm.aarch64.sve.sbclb
    aarch64_sve_sbclt,                         // llvm.aarch64.sve.sbclt
    aarch64_sve_sclamp,                        // llvm.aarch64.sve.sclamp
    aarch64_sve_sclamp_single_x2,              // llvm.aarch64.sve.sclamp.single.x2
    aarch64_sve_sclamp_single_x4,              // llvm.aarch64.sve.sclamp.single.x4
    aarch64_sve_scvtf,                         // llvm.aarch64.sve.scvtf
    aarch64_sve_scvtf_f16i32,                  // llvm.aarch64.sve.scvtf.f16i32
    aarch64_sve_scvtf_f16i64,                  // llvm.aarch64.sve.scvtf.f16i64
    aarch64_sve_scvtf_f32i64,                  // llvm.aarch64.sve.scvtf.f32i64
    aarch64_sve_scvtf_f64i32,                  // llvm.aarch64.sve.scvtf.f64i32
    aarch64_sve_scvtf_x2,                      // llvm.aarch64.sve.scvtf.x2
    aarch64_sve_scvtf_x4,                      // llvm.aarch64.sve.scvtf.x4
    aarch64_sve_sdiv,                          // llvm.aarch64.sve.sdiv
    aarch64_sve_sdiv_u,                        // llvm.aarch64.sve.sdiv.u
    aarch64_sve_sdivr,                         // llvm.aarch64.sve.sdivr
    aarch64_sve_sdot,                          // llvm.aarch64.sve.sdot
    aarch64_sve_sdot_lane,                     // llvm.aarch64.sve.sdot.lane
    aarch64_sve_sdot_lane_x2,                  // llvm.aarch64.sve.sdot.lane.x2
    aarch64_sve_sdot_x2,                       // llvm.aarch64.sve.sdot.x2
    aarch64_sve_sel,                           // llvm.aarch64.sve.sel
    aarch64_sve_sel_x2,                        // llvm.aarch64.sve.sel.x2
    aarch64_sve_sel_x4,                        // llvm.aarch64.sve.sel.x4
    aarch64_sve_setffr,                        // llvm.aarch64.sve.setffr
    aarch64_sve_shadd,                         // llvm.aarch64.sve.shadd
    aarch64_sve_shrnb,                         // llvm.aarch64.sve.shrnb
    aarch64_sve_shrnt,                         // llvm.aarch64.sve.shrnt
    aarch64_sve_shsub,                         // llvm.aarch64.sve.shsub
    aarch64_sve_shsubr,                        // llvm.aarch64.sve.shsubr
    aarch64_sve_sli,                           // llvm.aarch64.sve.sli
    aarch64_sve_sm4e,                          // llvm.aarch64.sve.sm4e
    aarch64_sve_sm4ekey,                       // llvm.aarch64.sve.sm4ekey
    aarch64_sve_smax,                          // llvm.aarch64.sve.smax
    aarch64_sve_smax_single_x2,                // llvm.aarch64.sve.smax.single.x2
    aarch64_sve_smax_single_x4,                // llvm.aarch64.sve.smax.single.x4
    aarch64_sve_smax_u,                        // llvm.aarch64.sve.smax.u
    aarch64_sve_smax_x2,                       // llvm.aarch64.sve.smax.x2
    aarch64_sve_smax_x4,                       // llvm.aarch64.sve.smax.x4
    aarch64_sve_smaxp,                         // llvm.aarch64.sve.smaxp
    aarch64_sve_smaxv,                         // llvm.aarch64.sve.smaxv
    aarch64_sve_smin,                          // llvm.aarch64.sve.smin
    aarch64_sve_smin_single_x2,                // llvm.aarch64.sve.smin.single.x2
    aarch64_sve_smin_single_x4,                // llvm.aarch64.sve.smin.single.x4
    aarch64_sve_smin_u,                        // llvm.aarch64.sve.smin.u
    aarch64_sve_smin_x2,                       // llvm.aarch64.sve.smin.x2
    aarch64_sve_smin_x4,                       // llvm.aarch64.sve.smin.x4
    aarch64_sve_sminp,                         // llvm.aarch64.sve.sminp
    aarch64_sve_sminv,                         // llvm.aarch64.sve.sminv
    aarch64_sve_smlalb,                        // llvm.aarch64.sve.smlalb
    aarch64_sve_smlalb_lane,                   // llvm.aarch64.sve.smlalb.lane
    aarch64_sve_smlalt,                        // llvm.aarch64.sve.smlalt
    aarch64_sve_smlalt_lane,                   // llvm.aarch64.sve.smlalt.lane
    aarch64_sve_smlslb,                        // llvm.aarch64.sve.smlslb
    aarch64_sve_smlslb_lane,                   // llvm.aarch64.sve.smlslb.lane
    aarch64_sve_smlslt,                        // llvm.aarch64.sve.smlslt
    aarch64_sve_smlslt_lane,                   // llvm.aarch64.sve.smlslt.lane
    aarch64_sve_smmla,                         // llvm.aarch64.sve.smmla
    aarch64_sve_smulh,                         // llvm.aarch64.sve.smulh
    aarch64_sve_smulh_u,                       // llvm.aarch64.sve.smulh.u
    aarch64_sve_smullb,                        // llvm.aarch64.sve.smullb
    aarch64_sve_smullb_lane,                   // llvm.aarch64.sve.smullb.lane
    aarch64_sve_smullt,                        // llvm.aarch64.sve.smullt
    aarch64_sve_smullt_lane,                   // llvm.aarch64.sve.smullt.lane
    aarch64_sve_splice,                        // llvm.aarch64.sve.splice
    aarch64_sve_sqabs,                         // llvm.aarch64.sve.sqabs
    aarch64_sve_sqadd,                         // llvm.aarch64.sve.sqadd
    aarch64_sve_sqadd_x,                       // llvm.aarch64.sve.sqadd.x
    aarch64_sve_sqcadd_x,                      // llvm.aarch64.sve.sqcadd.x
    aarch64_sve_sqcvt_x2,                      // llvm.aarch64.sve.sqcvt.x2
    aarch64_sve_sqcvt_x4,                      // llvm.aarch64.sve.sqcvt.x4
    aarch64_sve_sqcvtn_x2,                     // llvm.aarch64.sve.sqcvtn.x2
    aarch64_sve_sqcvtn_x4,                     // llvm.aarch64.sve.sqcvtn.x4
    aarch64_sve_sqcvtu_x2,                     // llvm.aarch64.sve.sqcvtu.x2
    aarch64_sve_sqcvtu_x4,                     // llvm.aarch64.sve.sqcvtu.x4
    aarch64_sve_sqcvtun_x2,                    // llvm.aarch64.sve.sqcvtun.x2
    aarch64_sve_sqcvtun_x4,                    // llvm.aarch64.sve.sqcvtun.x4
    aarch64_sve_sqdecb_n32,                    // llvm.aarch64.sve.sqdecb.n32
    aarch64_sve_sqdecb_n64,                    // llvm.aarch64.sve.sqdecb.n64
    aarch64_sve_sqdecd,                        // llvm.aarch64.sve.sqdecd
    aarch64_sve_sqdecd_n32,                    // llvm.aarch64.sve.sqdecd.n32
    aarch64_sve_sqdecd_n64,                    // llvm.aarch64.sve.sqdecd.n64
    aarch64_sve_sqdech,                        // llvm.aarch64.sve.sqdech
    aarch64_sve_sqdech_n32,                    // llvm.aarch64.sve.sqdech.n32
    aarch64_sve_sqdech_n64,                    // llvm.aarch64.sve.sqdech.n64
    aarch64_sve_sqdecp,                        // llvm.aarch64.sve.sqdecp
    aarch64_sve_sqdecp_n32,                    // llvm.aarch64.sve.sqdecp.n32
    aarch64_sve_sqdecp_n64,                    // llvm.aarch64.sve.sqdecp.n64
    aarch64_sve_sqdecw,                        // llvm.aarch64.sve.sqdecw
    aarch64_sve_sqdecw_n32,                    // llvm.aarch64.sve.sqdecw.n32
    aarch64_sve_sqdecw_n64,                    // llvm.aarch64.sve.sqdecw.n64
    aarch64_sve_sqdmlalb,                      // llvm.aarch64.sve.sqdmlalb
    aarch64_sve_sqdmlalb_lane,                 // llvm.aarch64.sve.sqdmlalb.lane
    aarch64_sve_sqdmlalbt,                     // llvm.aarch64.sve.sqdmlalbt
    aarch64_sve_sqdmlalt,                      // llvm.aarch64.sve.sqdmlalt
    aarch64_sve_sqdmlalt_lane,                 // llvm.aarch64.sve.sqdmlalt.lane
    aarch64_sve_sqdmlslb,                      // llvm.aarch64.sve.sqdmlslb
    aarch64_sve_sqdmlslb_lane,                 // llvm.aarch64.sve.sqdmlslb.lane
    aarch64_sve_sqdmlslbt,                     // llvm.aarch64.sve.sqdmlslbt
    aarch64_sve_sqdmlslt,                      // llvm.aarch64.sve.sqdmlslt
    aarch64_sve_sqdmlslt_lane,                 // llvm.aarch64.sve.sqdmlslt.lane
    aarch64_sve_sqdmulh,                       // llvm.aarch64.sve.sqdmulh
    aarch64_sve_sqdmulh_lane,                  // llvm.aarch64.sve.sqdmulh.lane
    aarch64_sve_sqdmulh_single_vgx2,           // llvm.aarch64.sve.sqdmulh.single.vgx2
    aarch64_sve_sqdmulh_single_vgx4,           // llvm.aarch64.sve.sqdmulh.single.vgx4
    aarch64_sve_sqdmulh_vgx2,                  // llvm.aarch64.sve.sqdmulh.vgx2
    aarch64_sve_sqdmulh_vgx4,                  // llvm.aarch64.sve.sqdmulh.vgx4
    aarch64_sve_sqdmullb,                      // llvm.aarch64.sve.sqdmullb
    aarch64_sve_sqdmullb_lane,                 // llvm.aarch64.sve.sqdmullb.lane
    aarch64_sve_sqdmullt,                      // llvm.aarch64.sve.sqdmullt
    aarch64_sve_sqdmullt_lane,                 // llvm.aarch64.sve.sqdmullt.lane
    aarch64_sve_sqincb_n32,                    // llvm.aarch64.sve.sqincb.n32
    aarch64_sve_sqincb_n64,                    // llvm.aarch64.sve.sqincb.n64
    aarch64_sve_sqincd,                        // llvm.aarch64.sve.sqincd
    aarch64_sve_sqincd_n32,                    // llvm.aarch64.sve.sqincd.n32
    aarch64_sve_sqincd_n64,                    // llvm.aarch64.sve.sqincd.n64
    aarch64_sve_sqinch,                        // llvm.aarch64.sve.sqinch
    aarch64_sve_sqinch_n32,                    // llvm.aarch64.sve.sqinch.n32
    aarch64_sve_sqinch_n64,                    // llvm.aarch64.sve.sqinch.n64
    aarch64_sve_sqincp,                        // llvm.aarch64.sve.sqincp
    aarch64_sve_sqincp_n32,                    // llvm.aarch64.sve.sqincp.n32
    aarch64_sve_sqincp_n64,                    // llvm.aarch64.sve.sqincp.n64
    aarch64_sve_sqincw,                        // llvm.aarch64.sve.sqincw
    aarch64_sve_sqincw_n32,                    // llvm.aarch64.sve.sqincw.n32
    aarch64_sve_sqincw_n64,                    // llvm.aarch64.sve.sqincw.n64
    aarch64_sve_sqneg,                         // llvm.aarch64.sve.sqneg
    aarch64_sve_sqrdcmlah_lane_x,              // llvm.aarch64.sve.sqrdcmlah.lane.x
    aarch64_sve_sqrdcmlah_x,                   // llvm.aarch64.sve.sqrdcmlah.x
    aarch64_sve_sqrdmlah,                      // llvm.aarch64.sve.sqrdmlah
    aarch64_sve_sqrdmlah_lane,                 // llvm.aarch64.sve.sqrdmlah.lane
    aarch64_sve_sqrdmlsh,                      // llvm.aarch64.sve.sqrdmlsh
    aarch64_sve_sqrdmlsh_lane,                 // llvm.aarch64.sve.sqrdmlsh.lane
    aarch64_sve_sqrdmulh,                      // llvm.aarch64.sve.sqrdmulh
    aarch64_sve_sqrdmulh_lane,                 // llvm.aarch64.sve.sqrdmulh.lane
    aarch64_sve_sqrshl,                        // llvm.aarch64.sve.sqrshl
    aarch64_sve_sqrshr_x2,                     // llvm.aarch64.sve.sqrshr.x2
    aarch64_sve_sqrshr_x4,                     // llvm.aarch64.sve.sqrshr.x4
    aarch64_sve_sqrshrn_x2,                    // llvm.aarch64.sve.sqrshrn.x2
    aarch64_sve_sqrshrn_x4,                    // llvm.aarch64.sve.sqrshrn.x4
    aarch64_sve_sqrshrnb,                      // llvm.aarch64.sve.sqrshrnb
    aarch64_sve_sqrshrnt,                      // llvm.aarch64.sve.sqrshrnt
    aarch64_sve_sqrshru_x2,                    // llvm.aarch64.sve.sqrshru.x2
    aarch64_sve_sqrshru_x4,                    // llvm.aarch64.sve.sqrshru.x4
    aarch64_sve_sqrshrun_x2,                   // llvm.aarch64.sve.sqrshrun.x2
    aarch64_sve_sqrshrun_x4,                   // llvm.aarch64.sve.sqrshrun.x4
    aarch64_sve_sqrshrunb,                     // llvm.aarch64.sve.sqrshrunb
    aarch64_sve_sqrshrunt,                     // llvm.aarch64.sve.sqrshrunt
    aarch64_sve_sqshl,                         // llvm.aarch64.sve.sqshl
    aarch64_sve_sqshlu,                        // llvm.aarch64.sve.sqshlu
    aarch64_sve_sqshrnb,                       // llvm.aarch64.sve.sqshrnb
    aarch64_sve_sqshrnt,                       // llvm.aarch64.sve.sqshrnt
    aarch64_sve_sqshrunb,                      // llvm.aarch64.sve.sqshrunb
    aarch64_sve_sqshrunt,                      // llvm.aarch64.sve.sqshrunt
    aarch64_sve_sqsub,                         // llvm.aarch64.sve.sqsub
    aarch64_sve_sqsub_u,                       // llvm.aarch64.sve.sqsub.u
    aarch64_sve_sqsub_x,                       // llvm.aarch64.sve.sqsub.x
    aarch64_sve_sqsubr,                        // llvm.aarch64.sve.sqsubr
    aarch64_sve_sqxtnb,                        // llvm.aarch64.sve.sqxtnb
    aarch64_sve_sqxtnt,                        // llvm.aarch64.sve.sqxtnt
    aarch64_sve_sqxtunb,                       // llvm.aarch64.sve.sqxtunb
    aarch64_sve_sqxtunt,                       // llvm.aarch64.sve.sqxtunt
    aarch64_sve_srhadd,                        // llvm.aarch64.sve.srhadd
    aarch64_sve_sri,                           // llvm.aarch64.sve.sri
    aarch64_sve_srshl,                         // llvm.aarch64.sve.srshl
    aarch64_sve_srshl_single_x2,               // llvm.aarch64.sve.srshl.single.x2
    aarch64_sve_srshl_single_x4,               // llvm.aarch64.sve.srshl.single.x4
    aarch64_sve_srshl_x2,                      // llvm.aarch64.sve.srshl.x2
    aarch64_sve_srshl_x4,                      // llvm.aarch64.sve.srshl.x4
    aarch64_sve_srshr,                         // llvm.aarch64.sve.srshr
    aarch64_sve_srsra,                         // llvm.aarch64.sve.srsra
    aarch64_sve_sshllb,                        // llvm.aarch64.sve.sshllb
    aarch64_sve_sshllt,                        // llvm.aarch64.sve.sshllt
    aarch64_sve_ssra,                          // llvm.aarch64.sve.ssra
    aarch64_sve_ssublb,                        // llvm.aarch64.sve.ssublb
    aarch64_sve_ssublbt,                       // llvm.aarch64.sve.ssublbt
    aarch64_sve_ssublt,                        // llvm.aarch64.sve.ssublt
    aarch64_sve_ssubltb,                       // llvm.aarch64.sve.ssubltb
    aarch64_sve_ssubwb,                        // llvm.aarch64.sve.ssubwb
    aarch64_sve_ssubwt,                        // llvm.aarch64.sve.ssubwt
    aarch64_sve_st1,                           // llvm.aarch64.sve.st1
    aarch64_sve_st1_pn_x2,                     // llvm.aarch64.sve.st1.pn.x2
    aarch64_sve_st1_pn_x4,                     // llvm.aarch64.sve.st1.pn.x4
    aarch64_sve_st1_scatter,                   // llvm.aarch64.sve.st1.scatter
    aarch64_sve_st1_scatter_index,             // llvm.aarch64.sve.st1.scatter.index
    aarch64_sve_st1_scatter_scalar_offset,     // llvm.aarch64.sve.st1.scatter.scalar.offset
    aarch64_sve_st1_scatter_sxtw,              // llvm.aarch64.sve.st1.scatter.sxtw
    aarch64_sve_st1_scatter_sxtw_index,        // llvm.aarch64.sve.st1.scatter.sxtw.index
    aarch64_sve_st1_scatter_uxtw,              // llvm.aarch64.sve.st1.scatter.uxtw
    aarch64_sve_st1_scatter_uxtw_index,        // llvm.aarch64.sve.st1.scatter.uxtw.index
    aarch64_sve_st2,                           // llvm.aarch64.sve.st2
    aarch64_sve_st3,                           // llvm.aarch64.sve.st3
    aarch64_sve_st4,                           // llvm.aarch64.sve.st4
    aarch64_sve_stnt1,                         // llvm.aarch64.sve.stnt1
    aarch64_sve_stnt1_pn_x2,                   // llvm.aarch64.sve.stnt1.pn.x2
    aarch64_sve_stnt1_pn_x4,                   // llvm.aarch64.sve.stnt1.pn.x4
    aarch64_sve_stnt1_scatter,                 // llvm.aarch64.sve.stnt1.scatter
    aarch64_sve_stnt1_scatter_index,           // llvm.aarch64.sve.stnt1.scatter.index
    aarch64_sve_stnt1_scatter_scalar_offset,   // llvm.aarch64.sve.stnt1.scatter.scalar.offset
    aarch64_sve_stnt1_scatter_uxtw,            // llvm.aarch64.sve.stnt1.scatter.uxtw
    aarch64_sve_sub,                           // llvm.aarch64.sve.sub
    aarch64_sve_sub_u,                         // llvm.aarch64.sve.sub.u
    aarch64_sve_subhnb,                        // llvm.aarch64.sve.subhnb
    aarch64_sve_subhnt,                        // llvm.aarch64.sve.subhnt
    aarch64_sve_subr,                          // llvm.aarch64.sve.subr
    aarch64_sve_sudot_lane,                    // llvm.aarch64.sve.sudot.lane
    aarch64_sve_sunpk_x2,                      // llvm.aarch64.sve.sunpk.x2
    aarch64_sve_sunpk_x4,                      // llvm.aarch64.sve.sunpk.x4
    aarch64_sve_sunpkhi,                       // llvm.aarch64.sve.sunpkhi
    aarch64_sve_sunpklo,                       // llvm.aarch64.sve.sunpklo
    aarch64_sve_suqadd,                        // llvm.aarch64.sve.suqadd
    aarch64_sve_sxtb,                          // llvm.aarch64.sve.sxtb
    aarch64_sve_sxth,                          // llvm.aarch64.sve.sxth
    aarch64_sve_sxtw,                          // llvm.aarch64.sve.sxtw
    aarch64_sve_tbl,                           // llvm.aarch64.sve.tbl
    aarch64_sve_tbl2,                          // llvm.aarch64.sve.tbl2
    aarch64_sve_tbx,                           // llvm.aarch64.sve.tbx
    aarch64_sve_trn1,                          // llvm.aarch64.sve.trn1
    aarch64_sve_trn1_b16,                      // llvm.aarch64.sve.trn1.b16
    aarch64_sve_trn1_b32,                      // llvm.aarch64.sve.trn1.b32
    aarch64_sve_trn1_b64,                      // llvm.aarch64.sve.trn1.b64
    aarch64_sve_trn1q,                         // llvm.aarch64.sve.trn1q
    aarch64_sve_trn2,                          // llvm.aarch64.sve.trn2
    aarch64_sve_trn2_b16,                      // llvm.aarch64.sve.trn2.b16
    aarch64_sve_trn2_b32,                      // llvm.aarch64.sve.trn2.b32
    aarch64_sve_trn2_b64,                      // llvm.aarch64.sve.trn2.b64
    aarch64_sve_trn2q,                         // llvm.aarch64.sve.trn2q
    aarch64_sve_uaba,                          // llvm.aarch64.sve.uaba
    aarch64_sve_uabalb,                        // llvm.aarch64.sve.uabalb
    aarch64_sve_uabalt,                        // llvm.aarch64.sve.uabalt
    aarch64_sve_uabd,                          // llvm.aarch64.sve.uabd
    aarch64_sve_uabd_u,                        // llvm.aarch64.sve.uabd.u
    aarch64_sve_uabdlb,                        // llvm.aarch64.sve.uabdlb
    aarch64_sve_uabdlt,                        // llvm.aarch64.sve.uabdlt
    aarch64_sve_uadalp,                        // llvm.aarch64.sve.uadalp
    aarch64_sve_uaddlb,                        // llvm.aarch64.sve.uaddlb
    aarch64_sve_uaddlt,                        // llvm.aarch64.sve.uaddlt
    aarch64_sve_uaddv,                         // llvm.aarch64.sve.uaddv
    aarch64_sve_uaddwb,                        // llvm.aarch64.sve.uaddwb
    aarch64_sve_uaddwt,                        // llvm.aarch64.sve.uaddwt
    aarch64_sve_uclamp,                        // llvm.aarch64.sve.uclamp
    aarch64_sve_uclamp_single_x2,              // llvm.aarch64.sve.uclamp.single.x2
    aarch64_sve_uclamp_single_x4,              // llvm.aarch64.sve.uclamp.single.x4
    aarch64_sve_ucvtf,                         // llvm.aarch64.sve.ucvtf
    aarch64_sve_ucvtf_f16i32,                  // llvm.aarch64.sve.ucvtf.f16i32
    aarch64_sve_ucvtf_f16i64,                  // llvm.aarch64.sve.ucvtf.f16i64
    aarch64_sve_ucvtf_f32i64,                  // llvm.aarch64.sve.ucvtf.f32i64
    aarch64_sve_ucvtf_f64i32,                  // llvm.aarch64.sve.ucvtf.f64i32
    aarch64_sve_ucvtf_x2,                      // llvm.aarch64.sve.ucvtf.x2
    aarch64_sve_ucvtf_x4,                      // llvm.aarch64.sve.ucvtf.x4
    aarch64_sve_udiv,                          // llvm.aarch64.sve.udiv
    aarch64_sve_udiv_u,                        // llvm.aarch64.sve.udiv.u
    aarch64_sve_udivr,                         // llvm.aarch64.sve.udivr
    aarch64_sve_udot,                          // llvm.aarch64.sve.udot
    aarch64_sve_udot_lane,                     // llvm.aarch64.sve.udot.lane
    aarch64_sve_udot_lane_x2,                  // llvm.aarch64.sve.udot.lane.x2
    aarch64_sve_udot_x2,                       // llvm.aarch64.sve.udot.x2
    aarch64_sve_uhadd,                         // llvm.aarch64.sve.uhadd
    aarch64_sve_uhsub,                         // llvm.aarch64.sve.uhsub
    aarch64_sve_uhsubr,                        // llvm.aarch64.sve.uhsubr
    aarch64_sve_umax,                          // llvm.aarch64.sve.umax
    aarch64_sve_umax_single_x2,                // llvm.aarch64.sve.umax.single.x2
    aarch64_sve_umax_single_x4,                // llvm.aarch64.sve.umax.single.x4
    aarch64_sve_umax_u,                        // llvm.aarch64.sve.umax.u
    aarch64_sve_umax_x2,                       // llvm.aarch64.sve.umax.x2
    aarch64_sve_umax_x4,                       // llvm.aarch64.sve.umax.x4
    aarch64_sve_umaxp,                         // llvm.aarch64.sve.umaxp
    aarch64_sve_umaxv,                         // llvm.aarch64.sve.umaxv
    aarch64_sve_umin,                          // llvm.aarch64.sve.umin
    aarch64_sve_umin_single_x2,                // llvm.aarch64.sve.umin.single.x2
    aarch64_sve_umin_single_x4,                // llvm.aarch64.sve.umin.single.x4
    aarch64_sve_umin_u,                        // llvm.aarch64.sve.umin.u
    aarch64_sve_umin_x2,                       // llvm.aarch64.sve.umin.x2
    aarch64_sve_umin_x4,                       // llvm.aarch64.sve.umin.x4
    aarch64_sve_uminp,                         // llvm.aarch64.sve.uminp
    aarch64_sve_uminv,                         // llvm.aarch64.sve.uminv
    aarch64_sve_umlalb,                        // llvm.aarch64.sve.umlalb
    aarch64_sve_umlalb_lane,                   // llvm.aarch64.sve.umlalb.lane
    aarch64_sve_umlalt,                        // llvm.aarch64.sve.umlalt
    aarch64_sve_umlalt_lane,                   // llvm.aarch64.sve.umlalt.lane
    aarch64_sve_umlslb,                        // llvm.aarch64.sve.umlslb
    aarch64_sve_umlslb_lane,                   // llvm.aarch64.sve.umlslb.lane
    aarch64_sve_umlslt,                        // llvm.aarch64.sve.umlslt
    aarch64_sve_umlslt_lane,                   // llvm.aarch64.sve.umlslt.lane
    aarch64_sve_ummla,                         // llvm.aarch64.sve.ummla
    aarch64_sve_umulh,                         // llvm.aarch64.sve.umulh
    aarch64_sve_umulh_u,                       // llvm.aarch64.sve.umulh.u
    aarch64_sve_umullb,                        // llvm.aarch64.sve.umullb
    aarch64_sve_umullb_lane,                   // llvm.aarch64.sve.umullb.lane
    aarch64_sve_umullt,                        // llvm.aarch64.sve.umullt
    aarch64_sve_umullt_lane,                   // llvm.aarch64.sve.umullt.lane
    aarch64_sve_uqadd,                         // llvm.aarch64.sve.uqadd
    aarch64_sve_uqadd_x,                       // llvm.aarch64.sve.uqadd.x
    aarch64_sve_uqcvt_x2,                      // llvm.aarch64.sve.uqcvt.x2
    aarch64_sve_uqcvt_x4,                      // llvm.aarch64.sve.uqcvt.x4
    aarch64_sve_uqcvtn_x2,                     // llvm.aarch64.sve.uqcvtn.x2
    aarch64_sve_uqcvtn_x4,                     // llvm.aarch64.sve.uqcvtn.x4
    aarch64_sve_uqdecb_n32,                    // llvm.aarch64.sve.uqdecb.n32
    aarch64_sve_uqdecb_n64,                    // llvm.aarch64.sve.uqdecb.n64
    aarch64_sve_uqdecd,                        // llvm.aarch64.sve.uqdecd
    aarch64_sve_uqdecd_n32,                    // llvm.aarch64.sve.uqdecd.n32
    aarch64_sve_uqdecd_n64,                    // llvm.aarch64.sve.uqdecd.n64
    aarch64_sve_uqdech,                        // llvm.aarch64.sve.uqdech
    aarch64_sve_uqdech_n32,                    // llvm.aarch64.sve.uqdech.n32
    aarch64_sve_uqdech_n64,                    // llvm.aarch64.sve.uqdech.n64
    aarch64_sve_uqdecp,                        // llvm.aarch64.sve.uqdecp
    aarch64_sve_uqdecp_n32,                    // llvm.aarch64.sve.uqdecp.n32
    aarch64_sve_uqdecp_n64,                    // llvm.aarch64.sve.uqdecp.n64
    aarch64_sve_uqdecw,                        // llvm.aarch64.sve.uqdecw
    aarch64_sve_uqdecw_n32,                    // llvm.aarch64.sve.uqdecw.n32
    aarch64_sve_uqdecw_n64,                    // llvm.aarch64.sve.uqdecw.n64
    aarch64_sve_uqincb_n32,                    // llvm.aarch64.sve.uqincb.n32
    aarch64_sve_uqincb_n64,                    // llvm.aarch64.sve.uqincb.n64
    aarch64_sve_uqincd,                        // llvm.aarch64.sve.uqincd
    aarch64_sve_uqincd_n32,                    // llvm.aarch64.sve.uqincd.n32
    aarch64_sve_uqincd_n64,                    // llvm.aarch64.sve.uqincd.n64
    aarch64_sve_uqinch,                        // llvm.aarch64.sve.uqinch
    aarch64_sve_uqinch_n32,                    // llvm.aarch64.sve.uqinch.n32
    aarch64_sve_uqinch_n64,                    // llvm.aarch64.sve.uqinch.n64
    aarch64_sve_uqincp,                        // llvm.aarch64.sve.uqincp
    aarch64_sve_uqincp_n32,                    // llvm.aarch64.sve.uqincp.n32
    aarch64_sve_uqincp_n64,                    // llvm.aarch64.sve.uqincp.n64
    aarch64_sve_uqincw,                        // llvm.aarch64.sve.uqincw
    aarch64_sve_uqincw_n32,                    // llvm.aarch64.sve.uqincw.n32
    aarch64_sve_uqincw_n64,                    // llvm.aarch64.sve.uqincw.n64
    aarch64_sve_uqrshl,                        // llvm.aarch64.sve.uqrshl
    aarch64_sve_uqrshr_x2,                     // llvm.aarch64.sve.uqrshr.x2
    aarch64_sve_uqrshr_x4,                     // llvm.aarch64.sve.uqrshr.x4
    aarch64_sve_uqrshrn_x2,                    // llvm.aarch64.sve.uqrshrn.x2
    aarch64_sve_uqrshrn_x4,                    // llvm.aarch64.sve.uqrshrn.x4
    aarch64_sve_uqrshrnb,                      // llvm.aarch64.sve.uqrshrnb
    aarch64_sve_uqrshrnt,                      // llvm.aarch64.sve.uqrshrnt
    aarch64_sve_uqshl,                         // llvm.aarch64.sve.uqshl
    aarch64_sve_uqshrnb,                       // llvm.aarch64.sve.uqshrnb
    aarch64_sve_uqshrnt,                       // llvm.aarch64.sve.uqshrnt
    aarch64_sve_uqsub,                         // llvm.aarch64.sve.uqsub
    aarch64_sve_uqsub_u,                       // llvm.aarch64.sve.uqsub.u
    aarch64_sve_uqsub_x,                       // llvm.aarch64.sve.uqsub.x
    aarch64_sve_uqsubr,                        // llvm.aarch64.sve.uqsubr
    aarch64_sve_uqxtnb,                        // llvm.aarch64.sve.uqxtnb
    aarch64_sve_uqxtnt,                        // llvm.aarch64.sve.uqxtnt
    aarch64_sve_urecpe,                        // llvm.aarch64.sve.urecpe
    aarch64_sve_urhadd,                        // llvm.aarch64.sve.urhadd
    aarch64_sve_urshl,                         // llvm.aarch64.sve.urshl
    aarch64_sve_urshl_single_x2,               // llvm.aarch64.sve.urshl.single.x2
    aarch64_sve_urshl_single_x4,               // llvm.aarch64.sve.urshl.single.x4
    aarch64_sve_urshl_x2,                      // llvm.aarch64.sve.urshl.x2
    aarch64_sve_urshl_x4,                      // llvm.aarch64.sve.urshl.x4
    aarch64_sve_urshr,                         // llvm.aarch64.sve.urshr
    aarch64_sve_ursqrte,                       // llvm.aarch64.sve.ursqrte
    aarch64_sve_ursra,                         // llvm.aarch64.sve.ursra
    aarch64_sve_usdot,                         // llvm.aarch64.sve.usdot
    aarch64_sve_usdot_lane,                    // llvm.aarch64.sve.usdot.lane
    aarch64_sve_ushllb,                        // llvm.aarch64.sve.ushllb
    aarch64_sve_ushllt,                        // llvm.aarch64.sve.ushllt
    aarch64_sve_usmmla,                        // llvm.aarch64.sve.usmmla
    aarch64_sve_usqadd,                        // llvm.aarch64.sve.usqadd
    aarch64_sve_usra,                          // llvm.aarch64.sve.usra
    aarch64_sve_usublb,                        // llvm.aarch64.sve.usublb
    aarch64_sve_usublt,                        // llvm.aarch64.sve.usublt
    aarch64_sve_usubwb,                        // llvm.aarch64.sve.usubwb
    aarch64_sve_usubwt,                        // llvm.aarch64.sve.usubwt
    aarch64_sve_uunpk_x2,                      // llvm.aarch64.sve.uunpk.x2
    aarch64_sve_uunpk_x4,                      // llvm.aarch64.sve.uunpk.x4
    aarch64_sve_uunpkhi,                       // llvm.aarch64.sve.uunpkhi
    aarch64_sve_uunpklo,                       // llvm.aarch64.sve.uunpklo
    aarch64_sve_uxtb,                          // llvm.aarch64.sve.uxtb
    aarch64_sve_uxth,                          // llvm.aarch64.sve.uxth
    aarch64_sve_uxtw,                          // llvm.aarch64.sve.uxtw
    aarch64_sve_uzp_x2,                        // llvm.aarch64.sve.uzp.x2
    aarch64_sve_uzp_x4,                        // llvm.aarch64.sve.uzp.x4
    aarch64_sve_uzp1,                          // llvm.aarch64.sve.uzp1
    aarch64_sve_uzp1_b16,                      // llvm.aarch64.sve.uzp1.b16
    aarch64_sve_uzp1_b32,                      // llvm.aarch64.sve.uzp1.b32
    aarch64_sve_uzp1_b64,                      // llvm.aarch64.sve.uzp1.b64
    aarch64_sve_uzp1q,                         // llvm.aarch64.sve.uzp1q
    aarch64_sve_uzp2,                          // llvm.aarch64.sve.uzp2
    aarch64_sve_uzp2_b16,                      // llvm.aarch64.sve.uzp2.b16
    aarch64_sve_uzp2_b32,                      // llvm.aarch64.sve.uzp2.b32
    aarch64_sve_uzp2_b64,                      // llvm.aarch64.sve.uzp2.b64
    aarch64_sve_uzp2q,                         // llvm.aarch64.sve.uzp2q
    aarch64_sve_uzpq_x2,                       // llvm.aarch64.sve.uzpq.x2
    aarch64_sve_uzpq_x4,                       // llvm.aarch64.sve.uzpq.x4
    aarch64_sve_whilege,                       // llvm.aarch64.sve.whilege
    aarch64_sve_whilege_c16,                   // llvm.aarch64.sve.whilege.c16
    aarch64_sve_whilege_c32,                   // llvm.aarch64.sve.whilege.c32
    aarch64_sve_whilege_c64,                   // llvm.aarch64.sve.whilege.c64
    aarch64_sve_whilege_c8,                    // llvm.aarch64.sve.whilege.c8
    aarch64_sve_whilege_x2,                    // llvm.aarch64.sve.whilege.x2
    aarch64_sve_whilegt,                       // llvm.aarch64.sve.whilegt
    aarch64_sve_whilegt_c16,                   // llvm.aarch64.sve.whilegt.c16
    aarch64_sve_whilegt_c32,                   // llvm.aarch64.sve.whilegt.c32
    aarch64_sve_whilegt_c64,                   // llvm.aarch64.sve.whilegt.c64
    aarch64_sve_whilegt_c8,                    // llvm.aarch64.sve.whilegt.c8
    aarch64_sve_whilegt_x2,                    // llvm.aarch64.sve.whilegt.x2
    aarch64_sve_whilehi,                       // llvm.aarch64.sve.whilehi
    aarch64_sve_whilehi_c16,                   // llvm.aarch64.sve.whilehi.c16
    aarch64_sve_whilehi_c32,                   // llvm.aarch64.sve.whilehi.c32
    aarch64_sve_whilehi_c64,                   // llvm.aarch64.sve.whilehi.c64
    aarch64_sve_whilehi_c8,                    // llvm.aarch64.sve.whilehi.c8
    aarch64_sve_whilehi_x2,                    // llvm.aarch64.sve.whilehi.x2
    aarch64_sve_whilehs,                       // llvm.aarch64.sve.whilehs
    aarch64_sve_whilehs_c16,                   // llvm.aarch64.sve.whilehs.c16
    aarch64_sve_whilehs_c32,                   // llvm.aarch64.sve.whilehs.c32
    aarch64_sve_whilehs_c64,                   // llvm.aarch64.sve.whilehs.c64
    aarch64_sve_whilehs_c8,                    // llvm.aarch64.sve.whilehs.c8
    aarch64_sve_whilehs_x2,                    // llvm.aarch64.sve.whilehs.x2
    aarch64_sve_whilele,                       // llvm.aarch64.sve.whilele
    aarch64_sve_whilele_c16,                   // llvm.aarch64.sve.whilele.c16
    aarch64_sve_whilele_c32,                   // llvm.aarch64.sve.whilele.c32
    aarch64_sve_whilele_c64,                   // llvm.aarch64.sve.whilele.c64
    aarch64_sve_whilele_c8,                    // llvm.aarch64.sve.whilele.c8
    aarch64_sve_whilele_x2,                    // llvm.aarch64.sve.whilele.x2
    aarch64_sve_whilelo,                       // llvm.aarch64.sve.whilelo
    aarch64_sve_whilelo_c16,                   // llvm.aarch64.sve.whilelo.c16
    aarch64_sve_whilelo_c32,                   // llvm.aarch64.sve.whilelo.c32
    aarch64_sve_whilelo_c64,                   // llvm.aarch64.sve.whilelo.c64
    aarch64_sve_whilelo_c8,                    // llvm.aarch64.sve.whilelo.c8
    aarch64_sve_whilelo_x2,                    // llvm.aarch64.sve.whilelo.x2
    aarch64_sve_whilels,                       // llvm.aarch64.sve.whilels
    aarch64_sve_whilels_c16,                   // llvm.aarch64.sve.whilels.c16
    aarch64_sve_whilels_c32,                   // llvm.aarch64.sve.whilels.c32
    aarch64_sve_whilels_c64,                   // llvm.aarch64.sve.whilels.c64
    aarch64_sve_whilels_c8,                    // llvm.aarch64.sve.whilels.c8
    aarch64_sve_whilels_x2,                    // llvm.aarch64.sve.whilels.x2
    aarch64_sve_whilelt,                       // llvm.aarch64.sve.whilelt
    aarch64_sve_whilelt_c16,                   // llvm.aarch64.sve.whilelt.c16
    aarch64_sve_whilelt_c32,                   // llvm.aarch64.sve.whilelt.c32
    aarch64_sve_whilelt_c64,                   // llvm.aarch64.sve.whilelt.c64
    aarch64_sve_whilelt_c8,                    // llvm.aarch64.sve.whilelt.c8
    aarch64_sve_whilelt_x2,                    // llvm.aarch64.sve.whilelt.x2
    aarch64_sve_whilerw_b,                     // llvm.aarch64.sve.whilerw.b
    aarch64_sve_whilerw_d,                     // llvm.aarch64.sve.whilerw.d
    aarch64_sve_whilerw_h,                     // llvm.aarch64.sve.whilerw.h
    aarch64_sve_whilerw_s,                     // llvm.aarch64.sve.whilerw.s
    aarch64_sve_whilewr_b,                     // llvm.aarch64.sve.whilewr.b
    aarch64_sve_whilewr_d,                     // llvm.aarch64.sve.whilewr.d
    aarch64_sve_whilewr_h,                     // llvm.aarch64.sve.whilewr.h
    aarch64_sve_whilewr_s,                     // llvm.aarch64.sve.whilewr.s
    aarch64_sve_wrffr,                         // llvm.aarch64.sve.wrffr
    aarch64_sve_xar,                           // llvm.aarch64.sve.xar
    aarch64_sve_zip_x2,                        // llvm.aarch64.sve.zip.x2
    aarch64_sve_zip_x4,                        // llvm.aarch64.sve.zip.x4
    aarch64_sve_zip1,                          // llvm.aarch64.sve.zip1
    aarch64_sve_zip1_b16,                      // llvm.aarch64.sve.zip1.b16
    aarch64_sve_zip1_b32,                      // llvm.aarch64.sve.zip1.b32
    aarch64_sve_zip1_b64,                      // llvm.aarch64.sve.zip1.b64
    aarch64_sve_zip1q,                         // llvm.aarch64.sve.zip1q
    aarch64_sve_zip2,                          // llvm.aarch64.sve.zip2
    aarch64_sve_zip2_b16,                      // llvm.aarch64.sve.zip2.b16
    aarch64_sve_zip2_b32,                      // llvm.aarch64.sve.zip2.b32
    aarch64_sve_zip2_b64,                      // llvm.aarch64.sve.zip2.b64
    aarch64_sve_zip2q,                         // llvm.aarch64.sve.zip2q
    aarch64_sve_zipq_x2,                       // llvm.aarch64.sve.zipq.x2
    aarch64_sve_zipq_x4,                       // llvm.aarch64.sve.zipq.x4
    aarch64_tagp,                              // llvm.aarch64.tagp
    aarch64_tcancel,                           // llvm.aarch64.tcancel
    aarch64_tcommit,                           // llvm.aarch64.tcommit
    aarch64_tstart,                            // llvm.aarch64.tstart
    aarch64_ttest,                             // llvm.aarch64.ttest
    aarch64_udiv,                              // llvm.aarch64.udiv
}; // enum
} // namespace Intrinsic
} // namespace llvm

#endif
PKjwFZ[�����IR/IntrinsicsLoongArch.hnu�[���/*===- TableGen'erated file -------------------------------------*- C++ -*-===*\
|*                                                                            *|
|* Intrinsic Function Source Fragment                                         *|
|*                                                                            *|
|* Automatically generated file, do not edit!                                 *|
|*                                                                            *|
\*===----------------------------------------------------------------------===*/

#ifndef LLVM_IR_INTRINSIC_LOONGARCH_ENUMS_H
#define LLVM_IR_INTRINSIC_LOONGARCH_ENUMS_H

namespace llvm {
namespace Intrinsic {
enum LOONGARCHIntrinsics : unsigned {
// Enum values for intrinsics
    loongarch_asrtgt_d = 5178,                        // llvm.loongarch.asrtgt.d
    loongarch_asrtle_d,                        // llvm.loongarch.asrtle.d
    loongarch_break,                           // llvm.loongarch.break
    loongarch_cacop_d,                         // llvm.loongarch.cacop.d
    loongarch_cacop_w,                         // llvm.loongarch.cacop.w
    loongarch_cpucfg,                          // llvm.loongarch.cpucfg
    loongarch_crc_w_b_w,                       // llvm.loongarch.crc.w.b.w
    loongarch_crc_w_d_w,                       // llvm.loongarch.crc.w.d.w
    loongarch_crc_w_h_w,                       // llvm.loongarch.crc.w.h.w
    loongarch_crc_w_w_w,                       // llvm.loongarch.crc.w.w.w
    loongarch_crcc_w_b_w,                      // llvm.loongarch.crcc.w.b.w
    loongarch_crcc_w_d_w,                      // llvm.loongarch.crcc.w.d.w
    loongarch_crcc_w_h_w,                      // llvm.loongarch.crcc.w.h.w
    loongarch_crcc_w_w_w,                      // llvm.loongarch.crcc.w.w.w
    loongarch_csrrd_d,                         // llvm.loongarch.csrrd.d
    loongarch_csrrd_w,                         // llvm.loongarch.csrrd.w
    loongarch_csrwr_d,                         // llvm.loongarch.csrwr.d
    loongarch_csrwr_w,                         // llvm.loongarch.csrwr.w
    loongarch_csrxchg_d,                       // llvm.loongarch.csrxchg.d
    loongarch_csrxchg_w,                       // llvm.loongarch.csrxchg.w
    loongarch_dbar,                            // llvm.loongarch.dbar
    loongarch_ibar,                            // llvm.loongarch.ibar
    loongarch_iocsrrd_b,                       // llvm.loongarch.iocsrrd.b
    loongarch_iocsrrd_d,                       // llvm.loongarch.iocsrrd.d
    loongarch_iocsrrd_h,                       // llvm.loongarch.iocsrrd.h
    loongarch_iocsrrd_w,                       // llvm.loongarch.iocsrrd.w
    loongarch_iocsrwr_b,                       // llvm.loongarch.iocsrwr.b
    loongarch_iocsrwr_d,                       // llvm.loongarch.iocsrwr.d
    loongarch_iocsrwr_h,                       // llvm.loongarch.iocsrwr.h
    loongarch_iocsrwr_w,                       // llvm.loongarch.iocsrwr.w
    loongarch_lddir_d,                         // llvm.loongarch.lddir.d
    loongarch_ldpte_d,                         // llvm.loongarch.ldpte.d
    loongarch_masked_atomicrmw_add_i32,        // llvm.loongarch.masked.atomicrmw.add.i32
    loongarch_masked_atomicrmw_add_i64,        // llvm.loongarch.masked.atomicrmw.add.i64
    loongarch_masked_atomicrmw_max_i64,        // llvm.loongarch.masked.atomicrmw.max.i64
    loongarch_masked_atomicrmw_min_i64,        // llvm.loongarch.masked.atomicrmw.min.i64
    loongarch_masked_atomicrmw_nand_i32,       // llvm.loongarch.masked.atomicrmw.nand.i32
    loongarch_masked_atomicrmw_nand_i64,       // llvm.loongarch.masked.atomicrmw.nand.i64
    loongarch_masked_atomicrmw_sub_i32,        // llvm.loongarch.masked.atomicrmw.sub.i32
    loongarch_masked_atomicrmw_sub_i64,        // llvm.loongarch.masked.atomicrmw.sub.i64
    loongarch_masked_atomicrmw_umax_i32,       // llvm.loongarch.masked.atomicrmw.umax.i32
    loongarch_masked_atomicrmw_umax_i64,       // llvm.loongarch.masked.atomicrmw.umax.i64
    loongarch_masked_atomicrmw_umin_i32,       // llvm.loongarch.masked.atomicrmw.umin.i32
    loongarch_masked_atomicrmw_umin_i64,       // llvm.loongarch.masked.atomicrmw.umin.i64
    loongarch_masked_atomicrmw_xchg_i32,       // llvm.loongarch.masked.atomicrmw.xchg.i32
    loongarch_masked_atomicrmw_xchg_i64,       // llvm.loongarch.masked.atomicrmw.xchg.i64
    loongarch_masked_cmpxchg_i64,              // llvm.loongarch.masked.cmpxchg.i64
    loongarch_movfcsr2gr,                      // llvm.loongarch.movfcsr2gr
    loongarch_movgr2fcsr,                      // llvm.loongarch.movgr2fcsr
    loongarch_syscall,                         // llvm.loongarch.syscall
}; // enum
} // namespace Intrinsic
} // namespace llvm

#endif
PKjwFZ.];/((IR/IntrinsicsWebAssembly.hnu�[���/*===- TableGen'erated file -------------------------------------*- C++ -*-===*\
|*                                                                            *|
|* Intrinsic Function Source Fragment                                         *|
|*                                                                            *|
|* Automatically generated file, do not edit!                                 *|
|*                                                                            *|
\*===----------------------------------------------------------------------===*/

#ifndef LLVM_IR_INTRINSIC_WASM_ENUMS_H
#define LLVM_IR_INTRINSIC_WASM_ENUMS_H

namespace llvm {
namespace Intrinsic {
enum WASMIntrinsics : unsigned {
// Enum values for intrinsics
    wasm_alltrue = 10365,                              // llvm.wasm.alltrue
    wasm_anytrue,                              // llvm.wasm.anytrue
    wasm_avgr_unsigned,                        // llvm.wasm.avgr.unsigned
    wasm_bitmask,                              // llvm.wasm.bitmask
    wasm_bitselect,                            // llvm.wasm.bitselect
    wasm_catch,                                // llvm.wasm.catch
    wasm_dot,                                  // llvm.wasm.dot
    wasm_extadd_pairwise_signed,               // llvm.wasm.extadd.pairwise.signed
    wasm_extadd_pairwise_unsigned,             // llvm.wasm.extadd.pairwise.unsigned
    wasm_get_ehselector,                       // llvm.wasm.get.ehselector
    wasm_get_exception,                        // llvm.wasm.get.exception
    wasm_landingpad_index,                     // llvm.wasm.landingpad.index
    wasm_lsda,                                 // llvm.wasm.lsda
    wasm_memory_atomic_notify,                 // llvm.wasm.memory.atomic.notify
    wasm_memory_atomic_wait32,                 // llvm.wasm.memory.atomic.wait32
    wasm_memory_atomic_wait64,                 // llvm.wasm.memory.atomic.wait64
    wasm_memory_grow,                          // llvm.wasm.memory.grow
    wasm_memory_size,                          // llvm.wasm.memory.size
    wasm_narrow_signed,                        // llvm.wasm.narrow.signed
    wasm_narrow_unsigned,                      // llvm.wasm.narrow.unsigned
    wasm_pmax,                                 // llvm.wasm.pmax
    wasm_pmin,                                 // llvm.wasm.pmin
    wasm_q15mulr_sat_signed,                   // llvm.wasm.q15mulr.sat.signed
    wasm_ref_is_null_extern,                   // llvm.wasm.ref.is_null.extern
    wasm_ref_is_null_func,                     // llvm.wasm.ref.is_null.func
    wasm_ref_null_extern,                      // llvm.wasm.ref.null.extern
    wasm_ref_null_func,                        // llvm.wasm.ref.null.func
    wasm_relaxed_dot_bf16x8_add_f32,           // llvm.wasm.relaxed.dot.bf16x8.add.f32
    wasm_relaxed_dot_i8x16_i7x16_add_signed,   // llvm.wasm.relaxed.dot.i8x16.i7x16.add.signed
    wasm_relaxed_dot_i8x16_i7x16_signed,       // llvm.wasm.relaxed.dot.i8x16.i7x16.signed
    wasm_relaxed_laneselect,                   // llvm.wasm.relaxed.laneselect
    wasm_relaxed_madd,                         // llvm.wasm.relaxed.madd
    wasm_relaxed_max,                          // llvm.wasm.relaxed.max
    wasm_relaxed_min,                          // llvm.wasm.relaxed.min
    wasm_relaxed_nmadd,                        // llvm.wasm.relaxed.nmadd
    wasm_relaxed_q15mulr_signed,               // llvm.wasm.relaxed.q15mulr.signed
    wasm_relaxed_swizzle,                      // llvm.wasm.relaxed.swizzle
    wasm_relaxed_trunc_signed,                 // llvm.wasm.relaxed.trunc.signed
    wasm_relaxed_trunc_signed_zero,            // llvm.wasm.relaxed.trunc.signed.zero
    wasm_relaxed_trunc_unsigned,               // llvm.wasm.relaxed.trunc.unsigned
    wasm_relaxed_trunc_unsigned_zero,          // llvm.wasm.relaxed.trunc.unsigned.zero
    wasm_rethrow,                              // llvm.wasm.rethrow
    wasm_shuffle,                              // llvm.wasm.shuffle
    wasm_sub_sat_signed,                       // llvm.wasm.sub.sat.signed
    wasm_sub_sat_unsigned,                     // llvm.wasm.sub.sat.unsigned
    wasm_swizzle,                              // llvm.wasm.swizzle
    wasm_table_copy,                           // llvm.wasm.table.copy
    wasm_table_fill_externref,                 // llvm.wasm.table.fill.externref
    wasm_table_fill_funcref,                   // llvm.wasm.table.fill.funcref
    wasm_table_get_externref,                  // llvm.wasm.table.get.externref
    wasm_table_get_funcref,                    // llvm.wasm.table.get.funcref
    wasm_table_grow_externref,                 // llvm.wasm.table.grow.externref
    wasm_table_grow_funcref,                   // llvm.wasm.table.grow.funcref
    wasm_table_set_externref,                  // llvm.wasm.table.set.externref
    wasm_table_set_funcref,                    // llvm.wasm.table.set.funcref
    wasm_table_size,                           // llvm.wasm.table.size
    wasm_throw,                                // llvm.wasm.throw
    wasm_tls_align,                            // llvm.wasm.tls.align
    wasm_tls_base,                             // llvm.wasm.tls.base
    wasm_tls_size,                             // llvm.wasm.tls.size
    wasm_trunc_saturate_signed,                // llvm.wasm.trunc.saturate.signed
    wasm_trunc_saturate_unsigned,              // llvm.wasm.trunc.saturate.unsigned
    wasm_trunc_signed,                         // llvm.wasm.trunc.signed
    wasm_trunc_unsigned,                       // llvm.wasm.trunc.unsigned
}; // enum
} // namespace Intrinsic
} // namespace llvm

#endif
PKjwFZV���
IR/Argument.hnu�[���//===-- llvm/Argument.h - Definition of the Argument class ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the Argument class.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_ARGUMENT_H
#define LLVM_IR_ARGUMENT_H

#include "llvm/ADT/Twine.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/Value.h"

namespace llvm {

/// This class represents an incoming formal argument to a Function. A formal
/// argument, since it is ``formal'', does not contain an actual value but
/// instead represents the type, argument number, and attributes of an argument
/// for a specific function. When used in the body of said function, the
/// argument of course represents the value of the actual argument that the
/// function was called with.
class Argument final : public Value {
  Function *Parent;
  unsigned ArgNo;

  friend class Function;
  void setParent(Function *parent);

public:
  /// Argument constructor.
  explicit Argument(Type *Ty, const Twine &Name = "", Function *F = nullptr,
                    unsigned ArgNo = 0);

  inline const Function *getParent() const { return Parent; }
  inline       Function *getParent()       { return Parent; }

  /// Return the index of this formal argument in its containing function.
  ///
  /// For example in "void foo(int a, float b)" a is 0 and b is 1.
  unsigned getArgNo() const {
    assert(Parent && "can't get number of unparented arg");
    return ArgNo;
  }

  /// Return true if this argument has the nonnull attribute. Also returns true
  /// if at least one byte is known to be dereferenceable and the pointer is in
  /// addrspace(0).
  /// If AllowUndefOrPoison is true, respect the semantics of nonnull attribute
  /// and return true even if the argument can be undef or poison.
  bool hasNonNullAttr(bool AllowUndefOrPoison = true) const;

  /// If this argument has the dereferenceable attribute, return the number of
  /// bytes known to be dereferenceable. Otherwise, zero is returned.
  uint64_t getDereferenceableBytes() const;

  /// If this argument has the dereferenceable_or_null attribute, return the
  /// number of bytes known to be dereferenceable. Otherwise, zero is returned.
  uint64_t getDereferenceableOrNullBytes() const;

  /// If this argument has nofpclass attribute, return the mask representing
  /// disallowed floating-point values. Otherwise, fcNone is returned.
  FPClassTest getNoFPClass() const;

  /// Return true if this argument has the byval attribute.
  bool hasByValAttr() const;

  /// Return true if this argument has the byref attribute.
  bool hasByRefAttr() const;

  /// Return true if this argument has the swiftself attribute.
  bool hasSwiftSelfAttr() const;

  /// Return true if this argument has the swifterror attribute.
  bool hasSwiftErrorAttr() const;

  /// Return true if this argument has the byval, inalloca, or preallocated
  /// attribute. These attributes represent arguments being passed by value,
  /// with an associated copy between the caller and callee
  bool hasPassPointeeByValueCopyAttr() const;

  /// If this argument satisfies has hasPassPointeeByValueAttr, return the
  /// in-memory ABI size copied to the stack for the call. Otherwise, return 0.
  uint64_t getPassPointeeByValueCopySize(const DataLayout &DL) const;

  /// Return true if this argument has the byval, sret, inalloca, preallocated,
  /// or byref attribute. These attributes represent arguments being passed by
  /// value (which may or may not involve a stack copy)
  bool hasPointeeInMemoryValueAttr() const;

  /// If hasPointeeInMemoryValueAttr returns true, the in-memory ABI type is
  /// returned. Otherwise, nullptr.
  Type *getPointeeInMemoryValueType() const;

  /// If this is a byval or inalloca argument, return its alignment.
  /// FIXME: Remove this function once transition to Align is over.
  /// Use getParamAlign() instead.
  LLVM_DEPRECATED("Use getParamAlign() instead", "getParamAlign")
  uint64_t getParamAlignment() const;

  /// If this is a byval or inalloca argument, return its alignment.
  MaybeAlign getParamAlign() const;

  MaybeAlign getParamStackAlign() const;

  /// If this is a byval argument, return its type.
  Type *getParamByValType() const;

  /// If this is an sret argument, return its type.
  Type *getParamStructRetType() const;

  /// If this is a byref argument, return its type.
  Type *getParamByRefType() const;

  /// If this is an inalloca argument, return its type.
  Type *getParamInAllocaType() const;

  /// Return true if this argument has the nest attribute.
  bool hasNestAttr() const;

  /// Return true if this argument has the noalias attribute.
  bool hasNoAliasAttr() const;

  /// Return true if this argument has the nocapture attribute.
  bool hasNoCaptureAttr() const;

  /// Return true if this argument has the nofree attribute.
  bool hasNoFreeAttr() const;

  /// Return true if this argument has the sret attribute.
  bool hasStructRetAttr() const;

  /// Return true if this argument has the inreg attribute.
  bool hasInRegAttr() const;

  /// Return true if this argument has the returned attribute.
  bool hasReturnedAttr() const;

  /// Return true if this argument has the readonly or readnone attribute.
  bool onlyReadsMemory() const;

  /// Return true if this argument has the inalloca attribute.
  bool hasInAllocaAttr() const;

  /// Return true if this argument has the preallocated attribute.
  bool hasPreallocatedAttr() const;

  /// Return true if this argument has the zext attribute.
  bool hasZExtAttr() const;

  /// Return true if this argument has the sext attribute.
  bool hasSExtAttr() const;

  /// Add attributes to an argument.
  void addAttrs(AttrBuilder &B);

  void addAttr(Attribute::AttrKind Kind);

  void addAttr(Attribute Attr);

  /// Remove attributes from an argument.
  void removeAttr(Attribute::AttrKind Kind);

  void removeAttrs(const AttributeMask &AM);

  /// Check if an argument has a given attribute.
  bool hasAttribute(Attribute::AttrKind Kind) const;

  Attribute getAttribute(Attribute::AttrKind Kind) const;

  /// Method for support type inquiry through isa, cast, and dyn_cast.
  static bool classof(const Value *V) {
    return V->getValueID() == ArgumentVal;
  }
};

} // End llvm namespace

#endif
PKjwFZ1ǖ��8�8IR/InstVisitor.hnu�[���//===- InstVisitor.h - Instruction visitor templates ------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//


#ifndef LLVM_IR_INSTVISITOR_H
#define LLVM_IR_INSTVISITOR_H

#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Module.h"

namespace llvm {

// We operate on opaque instruction classes, so forward declare all instruction
// types now...
//
#define HANDLE_INST(NUM, OPCODE, CLASS)   class CLASS;
#include "llvm/IR/Instruction.def"

#define DELEGATE(CLASS_TO_VISIT) \
  return static_cast<SubClass*>(this)-> \
               visit##CLASS_TO_VISIT(static_cast<CLASS_TO_VISIT&>(I))


/// Base class for instruction visitors
///
/// Instruction visitors are used when you want to perform different actions
/// for different kinds of instructions without having to use lots of casts
/// and a big switch statement (in your code, that is).
///
/// To define your own visitor, inherit from this class, specifying your
/// new type for the 'SubClass' template parameter, and "override" visitXXX
/// functions in your class. I say "override" because this class is defined
/// in terms of statically resolved overloading, not virtual functions.
///
/// For example, here is a visitor that counts the number of malloc
/// instructions processed:
///
///  /// Declare the class.  Note that we derive from InstVisitor instantiated
///  /// with _our new subclasses_ type.
///  ///
///  struct CountAllocaVisitor : public InstVisitor<CountAllocaVisitor> {
///    unsigned Count;
///    CountAllocaVisitor() : Count(0) {}
///
///    void visitAllocaInst(AllocaInst &AI) { ++Count; }
///  };
///
///  And this class would be used like this:
///    CountAllocaVisitor CAV;
///    CAV.visit(function);
///    NumAllocas = CAV.Count;
///
/// The defined has 'visit' methods for Instruction, and also for BasicBlock,
/// Function, and Module, which recursively process all contained instructions.
///
/// Note that if you don't implement visitXXX for some instruction type,
/// the visitXXX method for instruction superclass will be invoked. So
/// if instructions are added in the future, they will be automatically
/// supported, if you handle one of their superclasses.
///
/// The optional second template argument specifies the type that instruction
/// visitation functions should return. If you specify this, you *MUST* provide
/// an implementation of visitInstruction though!.
///
/// Note that this class is specifically designed as a template to avoid
/// virtual function call overhead.  Defining and using an InstVisitor is just
/// as efficient as having your own switch statement over the instruction
/// opcode.
template<typename SubClass, typename RetTy=void>
class InstVisitor {
  //===--------------------------------------------------------------------===//
  // Interface code - This is the public interface of the InstVisitor that you
  // use to visit instructions...
  //

public:
  // Generic visit method - Allow visitation to all instructions in a range
  template<class Iterator>
  void visit(Iterator Start, Iterator End) {
    while (Start != End)
      static_cast<SubClass*>(this)->visit(*Start++);
  }

  // Define visitors for functions and basic blocks...
  //
  void visit(Module &M) {
    static_cast<SubClass*>(this)->visitModule(M);
    visit(M.begin(), M.end());
  }
  void visit(Function &F) {
    static_cast<SubClass*>(this)->visitFunction(F);
    visit(F.begin(), F.end());
  }
  void visit(BasicBlock &BB) {
    static_cast<SubClass*>(this)->visitBasicBlock(BB);
    visit(BB.begin(), BB.end());
  }

  // Forwarding functions so that the user can visit with pointers AND refs.
  void visit(Module       *M)  { visit(*M); }
  void visit(Function     *F)  { visit(*F); }
  void visit(BasicBlock   *BB) { visit(*BB); }
  RetTy visit(Instruction *I)  { return visit(*I); }

  // visit - Finally, code to visit an instruction...
  //
  RetTy visit(Instruction &I) {
    static_assert(std::is_base_of<InstVisitor, SubClass>::value,
                  "Must pass the derived type to this template!");

    switch (I.getOpcode()) {
    default: llvm_unreachable("Unknown instruction type encountered!");
      // Build the switch statement using the Instruction.def file...
#define HANDLE_INST(NUM, OPCODE, CLASS) \
    case Instruction::OPCODE: return \
           static_cast<SubClass*>(this)-> \
                      visit##OPCODE(static_cast<CLASS&>(I));
#include "llvm/IR/Instruction.def"
    }
  }

  //===--------------------------------------------------------------------===//
  // Visitation functions... these functions provide default fallbacks in case
  // the user does not specify what to do for a particular instruction type.
  // The default behavior is to generalize the instruction type to its subtype
  // and try visiting the subtype.  All of this should be inlined perfectly,
  // because there are no virtual functions to get in the way.
  //

  // When visiting a module, function or basic block directly, these methods get
  // called to indicate when transitioning into a new unit.
  //
  void visitModule    (Module &M) {}
  void visitFunction  (Function &F) {}
  void visitBasicBlock(BasicBlock &BB) {}

  // Define instruction specific visitor functions that can be overridden to
  // handle SPECIFIC instructions.  These functions automatically define
  // visitMul to proxy to visitBinaryOperator for instance in case the user does
  // not need this generality.
  //
  // These functions can also implement fan-out, when a single opcode and
  // instruction have multiple more specific Instruction subclasses. The Call
  // instruction currently supports this. We implement that by redirecting that
  // instruction to a special delegation helper.
#define HANDLE_INST(NUM, OPCODE, CLASS) \
    RetTy visit##OPCODE(CLASS &I) { \
      if (NUM == Instruction::Call) \
        return delegateCallInst(I); \
      else \
        DELEGATE(CLASS); \
    }
#include "llvm/IR/Instruction.def"

  // Specific Instruction type classes... note that all of the casts are
  // necessary because we use the instruction classes as opaque types...
  //
  RetTy visitICmpInst(ICmpInst &I)                { DELEGATE(CmpInst);}
  RetTy visitFCmpInst(FCmpInst &I)                { DELEGATE(CmpInst);}
  RetTy visitAllocaInst(AllocaInst &I)            { DELEGATE(UnaryInstruction);}
  RetTy visitLoadInst(LoadInst     &I)            { DELEGATE(UnaryInstruction);}
  RetTy visitStoreInst(StoreInst   &I)            { DELEGATE(Instruction);}
  RetTy visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) { DELEGATE(Instruction);}
  RetTy visitAtomicRMWInst(AtomicRMWInst &I)      { DELEGATE(Instruction);}
  RetTy visitFenceInst(FenceInst   &I)            { DELEGATE(Instruction);}
  RetTy visitGetElementPtrInst(GetElementPtrInst &I){ DELEGATE(Instruction);}
  RetTy visitPHINode(PHINode       &I)            { DELEGATE(Instruction);}
  RetTy visitTruncInst(TruncInst &I)              { DELEGATE(CastInst);}
  RetTy visitZExtInst(ZExtInst &I)                { DELEGATE(CastInst);}
  RetTy visitSExtInst(SExtInst &I)                { DELEGATE(CastInst);}
  RetTy visitFPTruncInst(FPTruncInst &I)          { DELEGATE(CastInst);}
  RetTy visitFPExtInst(FPExtInst &I)              { DELEGATE(CastInst);}
  RetTy visitFPToUIInst(FPToUIInst &I)            { DELEGATE(CastInst);}
  RetTy visitFPToSIInst(FPToSIInst &I)            { DELEGATE(CastInst);}
  RetTy visitUIToFPInst(UIToFPInst &I)            { DELEGATE(CastInst);}
  RetTy visitSIToFPInst(SIToFPInst &I)            { DELEGATE(CastInst);}
  RetTy visitPtrToIntInst(PtrToIntInst &I)        { DELEGATE(CastInst);}
  RetTy visitIntToPtrInst(IntToPtrInst &I)        { DELEGATE(CastInst);}
  RetTy visitBitCastInst(BitCastInst &I)          { DELEGATE(CastInst);}
  RetTy visitAddrSpaceCastInst(AddrSpaceCastInst &I) { DELEGATE(CastInst);}
  RetTy visitSelectInst(SelectInst &I)            { DELEGATE(Instruction);}
  RetTy visitVAArgInst(VAArgInst   &I)            { DELEGATE(UnaryInstruction);}
  RetTy visitExtractElementInst(ExtractElementInst &I) { DELEGATE(Instruction);}
  RetTy visitInsertElementInst(InsertElementInst &I) { DELEGATE(Instruction);}
  RetTy visitShuffleVectorInst(ShuffleVectorInst &I) { DELEGATE(Instruction);}
  RetTy visitExtractValueInst(ExtractValueInst &I){ DELEGATE(UnaryInstruction);}
  RetTy visitInsertValueInst(InsertValueInst &I)  { DELEGATE(Instruction); }
  RetTy visitLandingPadInst(LandingPadInst &I)    { DELEGATE(Instruction); }
  RetTy visitFuncletPadInst(FuncletPadInst &I) { DELEGATE(Instruction); }
  RetTy visitCleanupPadInst(CleanupPadInst &I) { DELEGATE(FuncletPadInst); }
  RetTy visitCatchPadInst(CatchPadInst &I)     { DELEGATE(FuncletPadInst); }
  RetTy visitFreezeInst(FreezeInst &I)         { DELEGATE(Instruction); }

  // Handle the special intrinsic instruction classes.
  RetTy visitDbgDeclareInst(DbgDeclareInst &I)    { DELEGATE(DbgVariableIntrinsic);}
  RetTy visitDbgValueInst(DbgValueInst &I)        { DELEGATE(DbgVariableIntrinsic);}
  RetTy visitDbgVariableIntrinsic(DbgVariableIntrinsic &I)
                                                  { DELEGATE(DbgInfoIntrinsic);}
  RetTy visitDbgLabelInst(DbgLabelInst &I)        { DELEGATE(DbgInfoIntrinsic);}
  RetTy visitDbgInfoIntrinsic(DbgInfoIntrinsic &I){ DELEGATE(IntrinsicInst); }
  RetTy visitMemSetInst(MemSetInst &I)            { DELEGATE(MemIntrinsic); }
  RetTy visitMemSetInlineInst(MemSetInlineInst &I){ DELEGATE(MemSetInst); }
  RetTy visitMemCpyInst(MemCpyInst &I)            { DELEGATE(MemTransferInst); }
  RetTy visitMemCpyInlineInst(MemCpyInlineInst &I){ DELEGATE(MemCpyInst); }
  RetTy visitMemMoveInst(MemMoveInst &I)          { DELEGATE(MemTransferInst); }
  RetTy visitMemTransferInst(MemTransferInst &I)  { DELEGATE(MemIntrinsic); }
  RetTy visitMemIntrinsic(MemIntrinsic &I)        { DELEGATE(IntrinsicInst); }
  RetTy visitVAStartInst(VAStartInst &I)          { DELEGATE(IntrinsicInst); }
  RetTy visitVAEndInst(VAEndInst &I)              { DELEGATE(IntrinsicInst); }
  RetTy visitVACopyInst(VACopyInst &I)            { DELEGATE(IntrinsicInst); }
  RetTy visitIntrinsicInst(IntrinsicInst &I)      { DELEGATE(CallInst); }
  RetTy visitCallInst(CallInst &I)                { DELEGATE(CallBase); }
  RetTy visitInvokeInst(InvokeInst &I)            { DELEGATE(CallBase); }
  RetTy visitCallBrInst(CallBrInst &I)            { DELEGATE(CallBase); }

  // While terminators don't have a distinct type modeling them, we support
  // intercepting them with dedicated a visitor callback.
  RetTy visitReturnInst(ReturnInst &I) {
    return static_cast<SubClass *>(this)->visitTerminator(I);
  }
  RetTy visitBranchInst(BranchInst &I) {
    return static_cast<SubClass *>(this)->visitTerminator(I);
  }
  RetTy visitSwitchInst(SwitchInst &I) {
    return static_cast<SubClass *>(this)->visitTerminator(I);
  }
  RetTy visitIndirectBrInst(IndirectBrInst &I) {
    return static_cast<SubClass *>(this)->visitTerminator(I);
  }
  RetTy visitResumeInst(ResumeInst &I) {
    return static_cast<SubClass *>(this)->visitTerminator(I);
  }
  RetTy visitUnreachableInst(UnreachableInst &I) {
    return static_cast<SubClass *>(this)->visitTerminator(I);
  }
  RetTy visitCleanupReturnInst(CleanupReturnInst &I) {
    return static_cast<SubClass *>(this)->visitTerminator(I);
  }
  RetTy visitCatchReturnInst(CatchReturnInst &I) {
    return static_cast<SubClass *>(this)->visitTerminator(I);
  }
  RetTy visitCatchSwitchInst(CatchSwitchInst &I) {
    return static_cast<SubClass *>(this)->visitTerminator(I);
  }
  RetTy visitTerminator(Instruction &I)    { DELEGATE(Instruction);}

  // Next level propagators: If the user does not overload a specific
  // instruction type, they can overload one of these to get the whole class
  // of instructions...
  //
  RetTy visitCastInst(CastInst &I)                { DELEGATE(UnaryInstruction);}
  RetTy visitUnaryOperator(UnaryOperator &I)      { DELEGATE(UnaryInstruction);}
  RetTy visitBinaryOperator(BinaryOperator &I)    { DELEGATE(Instruction);}
  RetTy visitCmpInst(CmpInst &I)                  { DELEGATE(Instruction);}
  RetTy visitUnaryInstruction(UnaryInstruction &I){ DELEGATE(Instruction);}

  // The next level delegation for `CallBase` is slightly more complex in order
  // to support visiting cases where the call is also a terminator.
  RetTy visitCallBase(CallBase &I) {
    if (isa<InvokeInst>(I) || isa<CallBrInst>(I))
      return static_cast<SubClass *>(this)->visitTerminator(I);

    DELEGATE(Instruction);
  }

  // If the user wants a 'default' case, they can choose to override this
  // function.  If this function is not overloaded in the user's subclass, then
  // this instruction just gets ignored.
  //
  // Note that you MUST override this function if your return type is not void.
  //
  void visitInstruction(Instruction &I) {}  // Ignore unhandled instructions

private:
  // Special helper function to delegate to CallInst subclass visitors.
  RetTy delegateCallInst(CallInst &I) {
    if (const Function *F = I.getCalledFunction()) {
      switch (F->getIntrinsicID()) {
      default:                     DELEGATE(IntrinsicInst);
      case Intrinsic::dbg_declare: DELEGATE(DbgDeclareInst);
      case Intrinsic::dbg_value:   DELEGATE(DbgValueInst);
      case Intrinsic::dbg_label:   DELEGATE(DbgLabelInst);
      case Intrinsic::memcpy:      DELEGATE(MemCpyInst);
      case Intrinsic::memcpy_inline:
        DELEGATE(MemCpyInlineInst);
      case Intrinsic::memmove:     DELEGATE(MemMoveInst);
      case Intrinsic::memset:      DELEGATE(MemSetInst);
      case Intrinsic::memset_inline:
        DELEGATE(MemSetInlineInst);
      case Intrinsic::vastart:     DELEGATE(VAStartInst);
      case Intrinsic::vaend:       DELEGATE(VAEndInst);
      case Intrinsic::vacopy:      DELEGATE(VACopyInst);
      case Intrinsic::not_intrinsic: break;
      }
    }
    DELEGATE(CallInst);
  }

  // An overload that will never actually be called, it is used only from dead
  // code in the dispatching from opcodes to instruction subclasses.
  RetTy delegateCallInst(Instruction &I) {
    llvm_unreachable("delegateCallInst called for non-CallInst");
  }
};

#undef DELEGATE

} // End llvm namespace

#endif
PKjwFZNrD$$
IR/Verifier.hnu�[���//===- Verifier.h - LLVM IR Verifier ----------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the function verifier interface, that can be used for
// validation checking of input to the system, and for checking that
// transformations haven't done something bad.
//
// Note that this does not provide full 'java style' security and verifications,
// instead it just tries to ensure that code is well formed.
//
// To see what specifically is checked, look at the top of Verifier.cpp
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_VERIFIER_H
#define LLVM_IR_VERIFIER_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/IR/PassManager.h"
#include <utility>

namespace llvm {

class APInt;
class Function;
class FunctionPass;
class Instruction;
class MDNode;
class Module;
class raw_ostream;
struct VerifierSupport;

/// Verify that the TBAA Metadatas are valid.
class TBAAVerifier {
  VerifierSupport *Diagnostic = nullptr;

  /// Helper to diagnose a failure
  template <typename... Tys> void CheckFailed(Tys &&... Args);

  /// Cache of TBAA base nodes that have already been visited.  This cachce maps
  /// a node that has been visited to a pair (IsInvalid, BitWidth) where
  ///
  ///  \c IsInvalid is true iff the node is invalid.
  ///  \c BitWidth, if non-zero, is the bitwidth of the integer used to denoting
  ///    the offset of the access.  If zero, only a zero offset is allowed.
  ///
  /// \c BitWidth has no meaning if \c IsInvalid is true.
  using TBAABaseNodeSummary = std::pair<bool, unsigned>;
  DenseMap<const MDNode *, TBAABaseNodeSummary> TBAABaseNodes;

  /// Maps an alleged scalar TBAA node to a boolean that is true if the said
  /// TBAA node is a valid scalar TBAA node or false otherwise.
  DenseMap<const MDNode *, bool> TBAAScalarNodes;

  /// \name Helper functions used by \c visitTBAAMetadata.
  /// @{
  MDNode *getFieldNodeFromTBAABaseNode(Instruction &I, const MDNode *BaseNode,
                                       APInt &Offset, bool IsNewFormat);
  TBAAVerifier::TBAABaseNodeSummary verifyTBAABaseNode(Instruction &I,
                                                       const MDNode *BaseNode,
                                                       bool IsNewFormat);
  TBAABaseNodeSummary verifyTBAABaseNodeImpl(Instruction &I,
                                             const MDNode *BaseNode,
                                             bool IsNewFormat);

  bool isValidScalarTBAANode(const MDNode *MD);
  /// @}

public:
  TBAAVerifier(VerifierSupport *Diagnostic = nullptr)
      : Diagnostic(Diagnostic) {}
  /// Visit an instruction and return true if it is valid, return false if an
  /// invalid TBAA is attached.
  bool visitTBAAMetadata(Instruction &I, const MDNode *MD);
};

/// Check a function for errors, useful for use when debugging a
/// pass.
///
/// If there are no errors, the function returns false. If an error is found,
/// a message describing the error is written to OS (if non-null) and true is
/// returned.
bool verifyFunction(const Function &F, raw_ostream *OS = nullptr);

/// Check a module for errors.
///
/// If there are no errors, the function returns false. If an error is
/// found, a message describing the error is written to OS (if
/// non-null) and true is returned.
///
/// \return true if the module is broken. If BrokenDebugInfo is
/// supplied, DebugInfo verification failures won't be considered as
/// error and instead *BrokenDebugInfo will be set to true. Debug
/// info errors can be "recovered" from by stripping the debug info.
bool verifyModule(const Module &M, raw_ostream *OS = nullptr,
                  bool *BrokenDebugInfo = nullptr);

FunctionPass *createVerifierPass(bool FatalErrors = true);

/// Check a module for errors, and report separate error states for IR
/// and debug info errors.
class VerifierAnalysis : public AnalysisInfoMixin<VerifierAnalysis> {
  friend AnalysisInfoMixin<VerifierAnalysis>;

  static AnalysisKey Key;

public:
  struct Result {
    bool IRBroken, DebugInfoBroken;
  };

  Result run(Module &M, ModuleAnalysisManager &);
  Result run(Function &F, FunctionAnalysisManager &);
  static bool isRequired() { return true; }
};

/// Create a verifier pass.
///
/// Check a module or function for validity. This is essentially a pass wrapped
/// around the above verifyFunction and verifyModule routines and
/// functionality. When the pass detects a verification error it is always
/// printed to stderr, and by default they are fatal. You can override that by
/// passing \c false to \p FatalErrors.
///
/// Note that this creates a pass suitable for the legacy pass manager. It has
/// nothing to do with \c VerifierPass.
class VerifierPass : public PassInfoMixin<VerifierPass> {
  bool FatalErrors;

public:
  explicit VerifierPass(bool FatalErrors = true) : FatalErrors(FatalErrors) {}

  PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
  static bool isRequired() { return true; }
};

} // end namespace llvm

#endif // LLVM_IR_VERIFIER_H
PKjwFZ��f<�c�cIR/IntrinsicsHexagon.hnu�[���/*===- TableGen'erated file -------------------------------------*- C++ -*-===*\
|*                                                                            *|
|* Intrinsic Function Source Fragment                                         *|
|*                                                                            *|
|* Automatically generated file, do not edit!                                 *|
|*                                                                            *|
\*===----------------------------------------------------------------------===*/

#ifndef LLVM_IR_INTRINSIC_HEXAGON_ENUMS_H
#define LLVM_IR_INTRINSIC_HEXAGON_ENUMS_H

namespace llvm {
namespace Intrinsic {
enum HEXAGONIntrinsics : unsigned {
// Enum values for intrinsics
    hexagon_A2_abs = 3169,                            // llvm.hexagon.A2.abs
    hexagon_A2_absp,                           // llvm.hexagon.A2.absp
    hexagon_A2_abssat,                         // llvm.hexagon.A2.abssat
    hexagon_A2_add,                            // llvm.hexagon.A2.add
    hexagon_A2_addh_h16_hh,                    // llvm.hexagon.A2.addh.h16.hh
    hexagon_A2_addh_h16_hl,                    // llvm.hexagon.A2.addh.h16.hl
    hexagon_A2_addh_h16_lh,                    // llvm.hexagon.A2.addh.h16.lh
    hexagon_A2_addh_h16_ll,                    // llvm.hexagon.A2.addh.h16.ll
    hexagon_A2_addh_h16_sat_hh,                // llvm.hexagon.A2.addh.h16.sat.hh
    hexagon_A2_addh_h16_sat_hl,                // llvm.hexagon.A2.addh.h16.sat.hl
    hexagon_A2_addh_h16_sat_lh,                // llvm.hexagon.A2.addh.h16.sat.lh
    hexagon_A2_addh_h16_sat_ll,                // llvm.hexagon.A2.addh.h16.sat.ll
    hexagon_A2_addh_l16_hl,                    // llvm.hexagon.A2.addh.l16.hl
    hexagon_A2_addh_l16_ll,                    // llvm.hexagon.A2.addh.l16.ll
    hexagon_A2_addh_l16_sat_hl,                // llvm.hexagon.A2.addh.l16.sat.hl
    hexagon_A2_addh_l16_sat_ll,                // llvm.hexagon.A2.addh.l16.sat.ll
    hexagon_A2_addi,                           // llvm.hexagon.A2.addi
    hexagon_A2_addp,                           // llvm.hexagon.A2.addp
    hexagon_A2_addpsat,                        // llvm.hexagon.A2.addpsat
    hexagon_A2_addsat,                         // llvm.hexagon.A2.addsat
    hexagon_A2_addsp,                          // llvm.hexagon.A2.addsp
    hexagon_A2_and,                            // llvm.hexagon.A2.and
    hexagon_A2_andir,                          // llvm.hexagon.A2.andir
    hexagon_A2_andp,                           // llvm.hexagon.A2.andp
    hexagon_A2_aslh,                           // llvm.hexagon.A2.aslh
    hexagon_A2_asrh,                           // llvm.hexagon.A2.asrh
    hexagon_A2_combine_hh,                     // llvm.hexagon.A2.combine.hh
    hexagon_A2_combine_hl,                     // llvm.hexagon.A2.combine.hl
    hexagon_A2_combine_lh,                     // llvm.hexagon.A2.combine.lh
    hexagon_A2_combine_ll,                     // llvm.hexagon.A2.combine.ll
    hexagon_A2_combineii,                      // llvm.hexagon.A2.combineii
    hexagon_A2_combinew,                       // llvm.hexagon.A2.combinew
    hexagon_A2_max,                            // llvm.hexagon.A2.max
    hexagon_A2_maxp,                           // llvm.hexagon.A2.maxp
    hexagon_A2_maxu,                           // llvm.hexagon.A2.maxu
    hexagon_A2_maxup,                          // llvm.hexagon.A2.maxup
    hexagon_A2_min,                            // llvm.hexagon.A2.min
    hexagon_A2_minp,                           // llvm.hexagon.A2.minp
    hexagon_A2_minu,                           // llvm.hexagon.A2.minu
    hexagon_A2_minup,                          // llvm.hexagon.A2.minup
    hexagon_A2_neg,                            // llvm.hexagon.A2.neg
    hexagon_A2_negp,                           // llvm.hexagon.A2.negp
    hexagon_A2_negsat,                         // llvm.hexagon.A2.negsat
    hexagon_A2_not,                            // llvm.hexagon.A2.not
    hexagon_A2_notp,                           // llvm.hexagon.A2.notp
    hexagon_A2_or,                             // llvm.hexagon.A2.or
    hexagon_A2_orir,                           // llvm.hexagon.A2.orir
    hexagon_A2_orp,                            // llvm.hexagon.A2.orp
    hexagon_A2_roundsat,                       // llvm.hexagon.A2.roundsat
    hexagon_A2_sat,                            // llvm.hexagon.A2.sat
    hexagon_A2_satb,                           // llvm.hexagon.A2.satb
    hexagon_A2_sath,                           // llvm.hexagon.A2.sath
    hexagon_A2_satub,                          // llvm.hexagon.A2.satub
    hexagon_A2_satuh,                          // llvm.hexagon.A2.satuh
    hexagon_A2_sub,                            // llvm.hexagon.A2.sub
    hexagon_A2_subh_h16_hh,                    // llvm.hexagon.A2.subh.h16.hh
    hexagon_A2_subh_h16_hl,                    // llvm.hexagon.A2.subh.h16.hl
    hexagon_A2_subh_h16_lh,                    // llvm.hexagon.A2.subh.h16.lh
    hexagon_A2_subh_h16_ll,                    // llvm.hexagon.A2.subh.h16.ll
    hexagon_A2_subh_h16_sat_hh,                // llvm.hexagon.A2.subh.h16.sat.hh
    hexagon_A2_subh_h16_sat_hl,                // llvm.hexagon.A2.subh.h16.sat.hl
    hexagon_A2_subh_h16_sat_lh,                // llvm.hexagon.A2.subh.h16.sat.lh
    hexagon_A2_subh_h16_sat_ll,                // llvm.hexagon.A2.subh.h16.sat.ll
    hexagon_A2_subh_l16_hl,                    // llvm.hexagon.A2.subh.l16.hl
    hexagon_A2_subh_l16_ll,                    // llvm.hexagon.A2.subh.l16.ll
    hexagon_A2_subh_l16_sat_hl,                // llvm.hexagon.A2.subh.l16.sat.hl
    hexagon_A2_subh_l16_sat_ll,                // llvm.hexagon.A2.subh.l16.sat.ll
    hexagon_A2_subp,                           // llvm.hexagon.A2.subp
    hexagon_A2_subri,                          // llvm.hexagon.A2.subri
    hexagon_A2_subsat,                         // llvm.hexagon.A2.subsat
    hexagon_A2_svaddh,                         // llvm.hexagon.A2.svaddh
    hexagon_A2_svaddhs,                        // llvm.hexagon.A2.svaddhs
    hexagon_A2_svadduhs,                       // llvm.hexagon.A2.svadduhs
    hexagon_A2_svavgh,                         // llvm.hexagon.A2.svavgh
    hexagon_A2_svavghs,                        // llvm.hexagon.A2.svavghs
    hexagon_A2_svnavgh,                        // llvm.hexagon.A2.svnavgh
    hexagon_A2_svsubh,                         // llvm.hexagon.A2.svsubh
    hexagon_A2_svsubhs,                        // llvm.hexagon.A2.svsubhs
    hexagon_A2_svsubuhs,                       // llvm.hexagon.A2.svsubuhs
    hexagon_A2_swiz,                           // llvm.hexagon.A2.swiz
    hexagon_A2_sxtb,                           // llvm.hexagon.A2.sxtb
    hexagon_A2_sxth,                           // llvm.hexagon.A2.sxth
    hexagon_A2_sxtw,                           // llvm.hexagon.A2.sxtw
    hexagon_A2_tfr,                            // llvm.hexagon.A2.tfr
    hexagon_A2_tfrih,                          // llvm.hexagon.A2.tfrih
    hexagon_A2_tfril,                          // llvm.hexagon.A2.tfril
    hexagon_A2_tfrp,                           // llvm.hexagon.A2.tfrp
    hexagon_A2_tfrpi,                          // llvm.hexagon.A2.tfrpi
    hexagon_A2_tfrsi,                          // llvm.hexagon.A2.tfrsi
    hexagon_A2_vabsh,                          // llvm.hexagon.A2.vabsh
    hexagon_A2_vabshsat,                       // llvm.hexagon.A2.vabshsat
    hexagon_A2_vabsw,                          // llvm.hexagon.A2.vabsw
    hexagon_A2_vabswsat,                       // llvm.hexagon.A2.vabswsat
    hexagon_A2_vaddb_map,                      // llvm.hexagon.A2.vaddb.map
    hexagon_A2_vaddh,                          // llvm.hexagon.A2.vaddh
    hexagon_A2_vaddhs,                         // llvm.hexagon.A2.vaddhs
    hexagon_A2_vaddub,                         // llvm.hexagon.A2.vaddub
    hexagon_A2_vaddubs,                        // llvm.hexagon.A2.vaddubs
    hexagon_A2_vadduhs,                        // llvm.hexagon.A2.vadduhs
    hexagon_A2_vaddw,                          // llvm.hexagon.A2.vaddw
    hexagon_A2_vaddws,                         // llvm.hexagon.A2.vaddws
    hexagon_A2_vavgh,                          // llvm.hexagon.A2.vavgh
    hexagon_A2_vavghcr,                        // llvm.hexagon.A2.vavghcr
    hexagon_A2_vavghr,                         // llvm.hexagon.A2.vavghr
    hexagon_A2_vavgub,                         // llvm.hexagon.A2.vavgub
    hexagon_A2_vavgubr,                        // llvm.hexagon.A2.vavgubr
    hexagon_A2_vavguh,                         // llvm.hexagon.A2.vavguh
    hexagon_A2_vavguhr,                        // llvm.hexagon.A2.vavguhr
    hexagon_A2_vavguw,                         // llvm.hexagon.A2.vavguw
    hexagon_A2_vavguwr,                        // llvm.hexagon.A2.vavguwr
    hexagon_A2_vavgw,                          // llvm.hexagon.A2.vavgw
    hexagon_A2_vavgwcr,                        // llvm.hexagon.A2.vavgwcr
    hexagon_A2_vavgwr,                         // llvm.hexagon.A2.vavgwr
    hexagon_A2_vcmpbeq,                        // llvm.hexagon.A2.vcmpbeq
    hexagon_A2_vcmpbgtu,                       // llvm.hexagon.A2.vcmpbgtu
    hexagon_A2_vcmpheq,                        // llvm.hexagon.A2.vcmpheq
    hexagon_A2_vcmphgt,                        // llvm.hexagon.A2.vcmphgt
    hexagon_A2_vcmphgtu,                       // llvm.hexagon.A2.vcmphgtu
    hexagon_A2_vcmpweq,                        // llvm.hexagon.A2.vcmpweq
    hexagon_A2_vcmpwgt,                        // llvm.hexagon.A2.vcmpwgt
    hexagon_A2_vcmpwgtu,                       // llvm.hexagon.A2.vcmpwgtu
    hexagon_A2_vconj,                          // llvm.hexagon.A2.vconj
    hexagon_A2_vmaxb,                          // llvm.hexagon.A2.vmaxb
    hexagon_A2_vmaxh,                          // llvm.hexagon.A2.vmaxh
    hexagon_A2_vmaxub,                         // llvm.hexagon.A2.vmaxub
    hexagon_A2_vmaxuh,                         // llvm.hexagon.A2.vmaxuh
    hexagon_A2_vmaxuw,                         // llvm.hexagon.A2.vmaxuw
    hexagon_A2_vmaxw,                          // llvm.hexagon.A2.vmaxw
    hexagon_A2_vminb,                          // llvm.hexagon.A2.vminb
    hexagon_A2_vminh,                          // llvm.hexagon.A2.vminh
    hexagon_A2_vminub,                         // llvm.hexagon.A2.vminub
    hexagon_A2_vminuh,                         // llvm.hexagon.A2.vminuh
    hexagon_A2_vminuw,                         // llvm.hexagon.A2.vminuw
    hexagon_A2_vminw,                          // llvm.hexagon.A2.vminw
    hexagon_A2_vnavgh,                         // llvm.hexagon.A2.vnavgh
    hexagon_A2_vnavghcr,                       // llvm.hexagon.A2.vnavghcr
    hexagon_A2_vnavghr,                        // llvm.hexagon.A2.vnavghr
    hexagon_A2_vnavgw,                         // llvm.hexagon.A2.vnavgw
    hexagon_A2_vnavgwcr,                       // llvm.hexagon.A2.vnavgwcr
    hexagon_A2_vnavgwr,                        // llvm.hexagon.A2.vnavgwr
    hexagon_A2_vraddub,                        // llvm.hexagon.A2.vraddub
    hexagon_A2_vraddub_acc,                    // llvm.hexagon.A2.vraddub.acc
    hexagon_A2_vrsadub,                        // llvm.hexagon.A2.vrsadub
    hexagon_A2_vrsadub_acc,                    // llvm.hexagon.A2.vrsadub.acc
    hexagon_A2_vsubb_map,                      // llvm.hexagon.A2.vsubb.map
    hexagon_A2_vsubh,                          // llvm.hexagon.A2.vsubh
    hexagon_A2_vsubhs,                         // llvm.hexagon.A2.vsubhs
    hexagon_A2_vsubub,                         // llvm.hexagon.A2.vsubub
    hexagon_A2_vsububs,                        // llvm.hexagon.A2.vsububs
    hexagon_A2_vsubuhs,                        // llvm.hexagon.A2.vsubuhs
    hexagon_A2_vsubw,                          // llvm.hexagon.A2.vsubw
    hexagon_A2_vsubws,                         // llvm.hexagon.A2.vsubws
    hexagon_A2_xor,                            // llvm.hexagon.A2.xor
    hexagon_A2_xorp,                           // llvm.hexagon.A2.xorp
    hexagon_A2_zxtb,                           // llvm.hexagon.A2.zxtb
    hexagon_A2_zxth,                           // llvm.hexagon.A2.zxth
    hexagon_A4_andn,                           // llvm.hexagon.A4.andn
    hexagon_A4_andnp,                          // llvm.hexagon.A4.andnp
    hexagon_A4_bitsplit,                       // llvm.hexagon.A4.bitsplit
    hexagon_A4_bitspliti,                      // llvm.hexagon.A4.bitspliti
    hexagon_A4_boundscheck,                    // llvm.hexagon.A4.boundscheck
    hexagon_A4_cmpbeq,                         // llvm.hexagon.A4.cmpbeq
    hexagon_A4_cmpbeqi,                        // llvm.hexagon.A4.cmpbeqi
    hexagon_A4_cmpbgt,                         // llvm.hexagon.A4.cmpbgt
    hexagon_A4_cmpbgti,                        // llvm.hexagon.A4.cmpbgti
    hexagon_A4_cmpbgtu,                        // llvm.hexagon.A4.cmpbgtu
    hexagon_A4_cmpbgtui,                       // llvm.hexagon.A4.cmpbgtui
    hexagon_A4_cmpheq,                         // llvm.hexagon.A4.cmpheq
    hexagon_A4_cmpheqi,                        // llvm.hexagon.A4.cmpheqi
    hexagon_A4_cmphgt,                         // llvm.hexagon.A4.cmphgt
    hexagon_A4_cmphgti,                        // llvm.hexagon.A4.cmphgti
    hexagon_A4_cmphgtu,                        // llvm.hexagon.A4.cmphgtu
    hexagon_A4_cmphgtui,                       // llvm.hexagon.A4.cmphgtui
    hexagon_A4_combineir,                      // llvm.hexagon.A4.combineir
    hexagon_A4_combineri,                      // llvm.hexagon.A4.combineri
    hexagon_A4_cround_ri,                      // llvm.hexagon.A4.cround.ri
    hexagon_A4_cround_rr,                      // llvm.hexagon.A4.cround.rr
    hexagon_A4_modwrapu,                       // llvm.hexagon.A4.modwrapu
    hexagon_A4_orn,                            // llvm.hexagon.A4.orn
    hexagon_A4_ornp,                           // llvm.hexagon.A4.ornp
    hexagon_A4_rcmpeq,                         // llvm.hexagon.A4.rcmpeq
    hexagon_A4_rcmpeqi,                        // llvm.hexagon.A4.rcmpeqi
    hexagon_A4_rcmpneq,                        // llvm.hexagon.A4.rcmpneq
    hexagon_A4_rcmpneqi,                       // llvm.hexagon.A4.rcmpneqi
    hexagon_A4_round_ri,                       // llvm.hexagon.A4.round.ri
    hexagon_A4_round_ri_sat,                   // llvm.hexagon.A4.round.ri.sat
    hexagon_A4_round_rr,                       // llvm.hexagon.A4.round.rr
    hexagon_A4_round_rr_sat,                   // llvm.hexagon.A4.round.rr.sat
    hexagon_A4_tlbmatch,                       // llvm.hexagon.A4.tlbmatch
    hexagon_A4_vcmpbeq_any,                    // llvm.hexagon.A4.vcmpbeq.any
    hexagon_A4_vcmpbeqi,                       // llvm.hexagon.A4.vcmpbeqi
    hexagon_A4_vcmpbgt,                        // llvm.hexagon.A4.vcmpbgt
    hexagon_A4_vcmpbgti,                       // llvm.hexagon.A4.vcmpbgti
    hexagon_A4_vcmpbgtui,                      // llvm.hexagon.A4.vcmpbgtui
    hexagon_A4_vcmpheqi,                       // llvm.hexagon.A4.vcmpheqi
    hexagon_A4_vcmphgti,                       // llvm.hexagon.A4.vcmphgti
    hexagon_A4_vcmphgtui,                      // llvm.hexagon.A4.vcmphgtui
    hexagon_A4_vcmpweqi,                       // llvm.hexagon.A4.vcmpweqi
    hexagon_A4_vcmpwgti,                       // llvm.hexagon.A4.vcmpwgti
    hexagon_A4_vcmpwgtui,                      // llvm.hexagon.A4.vcmpwgtui
    hexagon_A4_vrmaxh,                         // llvm.hexagon.A4.vrmaxh
    hexagon_A4_vrmaxuh,                        // llvm.hexagon.A4.vrmaxuh
    hexagon_A4_vrmaxuw,                        // llvm.hexagon.A4.vrmaxuw
    hexagon_A4_vrmaxw,                         // llvm.hexagon.A4.vrmaxw
    hexagon_A4_vrminh,                         // llvm.hexagon.A4.vrminh
    hexagon_A4_vrminuh,                        // llvm.hexagon.A4.vrminuh
    hexagon_A4_vrminuw,                        // llvm.hexagon.A4.vrminuw
    hexagon_A4_vrminw,                         // llvm.hexagon.A4.vrminw
    hexagon_A5_vaddhubs,                       // llvm.hexagon.A5.vaddhubs
    hexagon_A6_vcmpbeq_notany,                 // llvm.hexagon.A6.vcmpbeq.notany
    hexagon_A7_clip,                           // llvm.hexagon.A7.clip
    hexagon_A7_croundd_ri,                     // llvm.hexagon.A7.croundd.ri
    hexagon_A7_croundd_rr,                     // llvm.hexagon.A7.croundd.rr
    hexagon_A7_vclip,                          // llvm.hexagon.A7.vclip
    hexagon_C2_all8,                           // llvm.hexagon.C2.all8
    hexagon_C2_and,                            // llvm.hexagon.C2.and
    hexagon_C2_andn,                           // llvm.hexagon.C2.andn
    hexagon_C2_any8,                           // llvm.hexagon.C2.any8
    hexagon_C2_bitsclr,                        // llvm.hexagon.C2.bitsclr
    hexagon_C2_bitsclri,                       // llvm.hexagon.C2.bitsclri
    hexagon_C2_bitsset,                        // llvm.hexagon.C2.bitsset
    hexagon_C2_cmpeq,                          // llvm.hexagon.C2.cmpeq
    hexagon_C2_cmpeqi,                         // llvm.hexagon.C2.cmpeqi
    hexagon_C2_cmpeqp,                         // llvm.hexagon.C2.cmpeqp
    hexagon_C2_cmpgei,                         // llvm.hexagon.C2.cmpgei
    hexagon_C2_cmpgeui,                        // llvm.hexagon.C2.cmpgeui
    hexagon_C2_cmpgt,                          // llvm.hexagon.C2.cmpgt
    hexagon_C2_cmpgti,                         // llvm.hexagon.C2.cmpgti
    hexagon_C2_cmpgtp,                         // llvm.hexagon.C2.cmpgtp
    hexagon_C2_cmpgtu,                         // llvm.hexagon.C2.cmpgtu
    hexagon_C2_cmpgtui,                        // llvm.hexagon.C2.cmpgtui
    hexagon_C2_cmpgtup,                        // llvm.hexagon.C2.cmpgtup
    hexagon_C2_cmplt,                          // llvm.hexagon.C2.cmplt
    hexagon_C2_cmpltu,                         // llvm.hexagon.C2.cmpltu
    hexagon_C2_mask,                           // llvm.hexagon.C2.mask
    hexagon_C2_mux,                            // llvm.hexagon.C2.mux
    hexagon_C2_muxii,                          // llvm.hexagon.C2.muxii
    hexagon_C2_muxir,                          // llvm.hexagon.C2.muxir
    hexagon_C2_muxri,                          // llvm.hexagon.C2.muxri
    hexagon_C2_not,                            // llvm.hexagon.C2.not
    hexagon_C2_or,                             // llvm.hexagon.C2.or
    hexagon_C2_orn,                            // llvm.hexagon.C2.orn
    hexagon_C2_pxfer_map,                      // llvm.hexagon.C2.pxfer.map
    hexagon_C2_tfrpr,                          // llvm.hexagon.C2.tfrpr
    hexagon_C2_tfrrp,                          // llvm.hexagon.C2.tfrrp
    hexagon_C2_vitpack,                        // llvm.hexagon.C2.vitpack
    hexagon_C2_vmux,                           // llvm.hexagon.C2.vmux
    hexagon_C2_xor,                            // llvm.hexagon.C2.xor
    hexagon_C4_and_and,                        // llvm.hexagon.C4.and.and
    hexagon_C4_and_andn,                       // llvm.hexagon.C4.and.andn
    hexagon_C4_and_or,                         // llvm.hexagon.C4.and.or
    hexagon_C4_and_orn,                        // llvm.hexagon.C4.and.orn
    hexagon_C4_cmplte,                         // llvm.hexagon.C4.cmplte
    hexagon_C4_cmpltei,                        // llvm.hexagon.C4.cmpltei
    hexagon_C4_cmplteu,                        // llvm.hexagon.C4.cmplteu
    hexagon_C4_cmplteui,                       // llvm.hexagon.C4.cmplteui
    hexagon_C4_cmpneq,                         // llvm.hexagon.C4.cmpneq
    hexagon_C4_cmpneqi,                        // llvm.hexagon.C4.cmpneqi
    hexagon_C4_fastcorner9,                    // llvm.hexagon.C4.fastcorner9
    hexagon_C4_fastcorner9_not,                // llvm.hexagon.C4.fastcorner9.not
    hexagon_C4_nbitsclr,                       // llvm.hexagon.C4.nbitsclr
    hexagon_C4_nbitsclri,                      // llvm.hexagon.C4.nbitsclri
    hexagon_C4_nbitsset,                       // llvm.hexagon.C4.nbitsset
    hexagon_C4_or_and,                         // llvm.hexagon.C4.or.and
    hexagon_C4_or_andn,                        // llvm.hexagon.C4.or.andn
    hexagon_C4_or_or,                          // llvm.hexagon.C4.or.or
    hexagon_C4_or_orn,                         // llvm.hexagon.C4.or.orn
    hexagon_F2_conv_d2df,                      // llvm.hexagon.F2.conv.d2df
    hexagon_F2_conv_d2sf,                      // llvm.hexagon.F2.conv.d2sf
    hexagon_F2_conv_df2d,                      // llvm.hexagon.F2.conv.df2d
    hexagon_F2_conv_df2d_chop,                 // llvm.hexagon.F2.conv.df2d.chop
    hexagon_F2_conv_df2sf,                     // llvm.hexagon.F2.conv.df2sf
    hexagon_F2_conv_df2ud,                     // llvm.hexagon.F2.conv.df2ud
    hexagon_F2_conv_df2ud_chop,                // llvm.hexagon.F2.conv.df2ud.chop
    hexagon_F2_conv_df2uw,                     // llvm.hexagon.F2.conv.df2uw
    hexagon_F2_conv_df2uw_chop,                // llvm.hexagon.F2.conv.df2uw.chop
    hexagon_F2_conv_df2w,                      // llvm.hexagon.F2.conv.df2w
    hexagon_F2_conv_df2w_chop,                 // llvm.hexagon.F2.conv.df2w.chop
    hexagon_F2_conv_sf2d,                      // llvm.hexagon.F2.conv.sf2d
    hexagon_F2_conv_sf2d_chop,                 // llvm.hexagon.F2.conv.sf2d.chop
    hexagon_F2_conv_sf2df,                     // llvm.hexagon.F2.conv.sf2df
    hexagon_F2_conv_sf2ud,                     // llvm.hexagon.F2.conv.sf2ud
    hexagon_F2_conv_sf2ud_chop,                // llvm.hexagon.F2.conv.sf2ud.chop
    hexagon_F2_conv_sf2uw,                     // llvm.hexagon.F2.conv.sf2uw
    hexagon_F2_conv_sf2uw_chop,                // llvm.hexagon.F2.conv.sf2uw.chop
    hexagon_F2_conv_sf2w,                      // llvm.hexagon.F2.conv.sf2w
    hexagon_F2_conv_sf2w_chop,                 // llvm.hexagon.F2.conv.sf2w.chop
    hexagon_F2_conv_ud2df,                     // llvm.hexagon.F2.conv.ud2df
    hexagon_F2_conv_ud2sf,                     // llvm.hexagon.F2.conv.ud2sf
    hexagon_F2_conv_uw2df,                     // llvm.hexagon.F2.conv.uw2df
    hexagon_F2_conv_uw2sf,                     // llvm.hexagon.F2.conv.uw2sf
    hexagon_F2_conv_w2df,                      // llvm.hexagon.F2.conv.w2df
    hexagon_F2_conv_w2sf,                      // llvm.hexagon.F2.conv.w2sf
    hexagon_F2_dfadd,                          // llvm.hexagon.F2.dfadd
    hexagon_F2_dfclass,                        // llvm.hexagon.F2.dfclass
    hexagon_F2_dfcmpeq,                        // llvm.hexagon.F2.dfcmpeq
    hexagon_F2_dfcmpge,                        // llvm.hexagon.F2.dfcmpge
    hexagon_F2_dfcmpgt,                        // llvm.hexagon.F2.dfcmpgt
    hexagon_F2_dfcmpuo,                        // llvm.hexagon.F2.dfcmpuo
    hexagon_F2_dfimm_n,                        // llvm.hexagon.F2.dfimm.n
    hexagon_F2_dfimm_p,                        // llvm.hexagon.F2.dfimm.p
    hexagon_F2_dfmax,                          // llvm.hexagon.F2.dfmax
    hexagon_F2_dfmin,                          // llvm.hexagon.F2.dfmin
    hexagon_F2_dfmpyfix,                       // llvm.hexagon.F2.dfmpyfix
    hexagon_F2_dfmpyhh,                        // llvm.hexagon.F2.dfmpyhh
    hexagon_F2_dfmpylh,                        // llvm.hexagon.F2.dfmpylh
    hexagon_F2_dfmpyll,                        // llvm.hexagon.F2.dfmpyll
    hexagon_F2_dfsub,                          // llvm.hexagon.F2.dfsub
    hexagon_F2_sfadd,                          // llvm.hexagon.F2.sfadd
    hexagon_F2_sfclass,                        // llvm.hexagon.F2.sfclass
    hexagon_F2_sfcmpeq,                        // llvm.hexagon.F2.sfcmpeq
    hexagon_F2_sfcmpge,                        // llvm.hexagon.F2.sfcmpge
    hexagon_F2_sfcmpgt,                        // llvm.hexagon.F2.sfcmpgt
    hexagon_F2_sfcmpuo,                        // llvm.hexagon.F2.sfcmpuo
    hexagon_F2_sffixupd,                       // llvm.hexagon.F2.sffixupd
    hexagon_F2_sffixupn,                       // llvm.hexagon.F2.sffixupn
    hexagon_F2_sffixupr,                       // llvm.hexagon.F2.sffixupr
    hexagon_F2_sffma,                          // llvm.hexagon.F2.sffma
    hexagon_F2_sffma_lib,                      // llvm.hexagon.F2.sffma.lib
    hexagon_F2_sffma_sc,                       // llvm.hexagon.F2.sffma.sc
    hexagon_F2_sffms,                          // llvm.hexagon.F2.sffms
    hexagon_F2_sffms_lib,                      // llvm.hexagon.F2.sffms.lib
    hexagon_F2_sfimm_n,                        // llvm.hexagon.F2.sfimm.n
    hexagon_F2_sfimm_p,                        // llvm.hexagon.F2.sfimm.p
    hexagon_F2_sfmax,                          // llvm.hexagon.F2.sfmax
    hexagon_F2_sfmin,                          // llvm.hexagon.F2.sfmin
    hexagon_F2_sfmpy,                          // llvm.hexagon.F2.sfmpy
    hexagon_F2_sfsub,                          // llvm.hexagon.F2.sfsub
    hexagon_L2_loadrb_pbr,                     // llvm.hexagon.L2.loadrb.pbr
    hexagon_L2_loadrb_pci,                     // llvm.hexagon.L2.loadrb.pci
    hexagon_L2_loadrb_pcr,                     // llvm.hexagon.L2.loadrb.pcr
    hexagon_L2_loadrd_pbr,                     // llvm.hexagon.L2.loadrd.pbr
    hexagon_L2_loadrd_pci,                     // llvm.hexagon.L2.loadrd.pci
    hexagon_L2_loadrd_pcr,                     // llvm.hexagon.L2.loadrd.pcr
    hexagon_L2_loadrh_pbr,                     // llvm.hexagon.L2.loadrh.pbr
    hexagon_L2_loadrh_pci,                     // llvm.hexagon.L2.loadrh.pci
    hexagon_L2_loadrh_pcr,                     // llvm.hexagon.L2.loadrh.pcr
    hexagon_L2_loadri_pbr,                     // llvm.hexagon.L2.loadri.pbr
    hexagon_L2_loadri_pci,                     // llvm.hexagon.L2.loadri.pci
    hexagon_L2_loadri_pcr,                     // llvm.hexagon.L2.loadri.pcr
    hexagon_L2_loadrub_pbr,                    // llvm.hexagon.L2.loadrub.pbr
    hexagon_L2_loadrub_pci,                    // llvm.hexagon.L2.loadrub.pci
    hexagon_L2_loadrub_pcr,                    // llvm.hexagon.L2.loadrub.pcr
    hexagon_L2_loadruh_pbr,                    // llvm.hexagon.L2.loadruh.pbr
    hexagon_L2_loadruh_pci,                    // llvm.hexagon.L2.loadruh.pci
    hexagon_L2_loadruh_pcr,                    // llvm.hexagon.L2.loadruh.pcr
    hexagon_L2_loadw_locked,                   // llvm.hexagon.L2.loadw.locked
    hexagon_L4_loadd_locked,                   // llvm.hexagon.L4.loadd.locked
    hexagon_M2_acci,                           // llvm.hexagon.M2.acci
    hexagon_M2_accii,                          // llvm.hexagon.M2.accii
    hexagon_M2_cmaci_s0,                       // llvm.hexagon.M2.cmaci.s0
    hexagon_M2_cmacr_s0,                       // llvm.hexagon.M2.cmacr.s0
    hexagon_M2_cmacs_s0,                       // llvm.hexagon.M2.cmacs.s0
    hexagon_M2_cmacs_s1,                       // llvm.hexagon.M2.cmacs.s1
    hexagon_M2_cmacsc_s0,                      // llvm.hexagon.M2.cmacsc.s0
    hexagon_M2_cmacsc_s1,                      // llvm.hexagon.M2.cmacsc.s1
    hexagon_M2_cmpyi_s0,                       // llvm.hexagon.M2.cmpyi.s0
    hexagon_M2_cmpyr_s0,                       // llvm.hexagon.M2.cmpyr.s0
    hexagon_M2_cmpyrs_s0,                      // llvm.hexagon.M2.cmpyrs.s0
    hexagon_M2_cmpyrs_s1,                      // llvm.hexagon.M2.cmpyrs.s1
    hexagon_M2_cmpyrsc_s0,                     // llvm.hexagon.M2.cmpyrsc.s0
    hexagon_M2_cmpyrsc_s1,                     // llvm.hexagon.M2.cmpyrsc.s1
    hexagon_M2_cmpys_s0,                       // llvm.hexagon.M2.cmpys.s0
    hexagon_M2_cmpys_s1,                       // llvm.hexagon.M2.cmpys.s1
    hexagon_M2_cmpysc_s0,                      // llvm.hexagon.M2.cmpysc.s0
    hexagon_M2_cmpysc_s1,                      // llvm.hexagon.M2.cmpysc.s1
    hexagon_M2_cnacs_s0,                       // llvm.hexagon.M2.cnacs.s0
    hexagon_M2_cnacs_s1,                       // llvm.hexagon.M2.cnacs.s1
    hexagon_M2_cnacsc_s0,                      // llvm.hexagon.M2.cnacsc.s0
    hexagon_M2_cnacsc_s1,                      // llvm.hexagon.M2.cnacsc.s1
    hexagon_M2_dpmpyss_acc_s0,                 // llvm.hexagon.M2.dpmpyss.acc.s0
    hexagon_M2_dpmpyss_nac_s0,                 // llvm.hexagon.M2.dpmpyss.nac.s0
    hexagon_M2_dpmpyss_rnd_s0,                 // llvm.hexagon.M2.dpmpyss.rnd.s0
    hexagon_M2_dpmpyss_s0,                     // llvm.hexagon.M2.dpmpyss.s0
    hexagon_M2_dpmpyuu_acc_s0,                 // llvm.hexagon.M2.dpmpyuu.acc.s0
    hexagon_M2_dpmpyuu_nac_s0,                 // llvm.hexagon.M2.dpmpyuu.nac.s0
    hexagon_M2_dpmpyuu_s0,                     // llvm.hexagon.M2.dpmpyuu.s0
    hexagon_M2_hmmpyh_rs1,                     // llvm.hexagon.M2.hmmpyh.rs1
    hexagon_M2_hmmpyh_s1,                      // llvm.hexagon.M2.hmmpyh.s1
    hexagon_M2_hmmpyl_rs1,                     // llvm.hexagon.M2.hmmpyl.rs1
    hexagon_M2_hmmpyl_s1,                      // llvm.hexagon.M2.hmmpyl.s1
    hexagon_M2_maci,                           // llvm.hexagon.M2.maci
    hexagon_M2_macsin,                         // llvm.hexagon.M2.macsin
    hexagon_M2_macsip,                         // llvm.hexagon.M2.macsip
    hexagon_M2_mmachs_rs0,                     // llvm.hexagon.M2.mmachs.rs0
    hexagon_M2_mmachs_rs1,                     // llvm.hexagon.M2.mmachs.rs1
    hexagon_M2_mmachs_s0,                      // llvm.hexagon.M2.mmachs.s0
    hexagon_M2_mmachs_s1,                      // llvm.hexagon.M2.mmachs.s1
    hexagon_M2_mmacls_rs0,                     // llvm.hexagon.M2.mmacls.rs0
    hexagon_M2_mmacls_rs1,                     // llvm.hexagon.M2.mmacls.rs1
    hexagon_M2_mmacls_s0,                      // llvm.hexagon.M2.mmacls.s0
    hexagon_M2_mmacls_s1,                      // llvm.hexagon.M2.mmacls.s1
    hexagon_M2_mmacuhs_rs0,                    // llvm.hexagon.M2.mmacuhs.rs0
    hexagon_M2_mmacuhs_rs1,                    // llvm.hexagon.M2.mmacuhs.rs1
    hexagon_M2_mmacuhs_s0,                     // llvm.hexagon.M2.mmacuhs.s0
    hexagon_M2_mmacuhs_s1,                     // llvm.hexagon.M2.mmacuhs.s1
    hexagon_M2_mmaculs_rs0,                    // llvm.hexagon.M2.mmaculs.rs0
    hexagon_M2_mmaculs_rs1,                    // llvm.hexagon.M2.mmaculs.rs1
    hexagon_M2_mmaculs_s0,                     // llvm.hexagon.M2.mmaculs.s0
    hexagon_M2_mmaculs_s1,                     // llvm.hexagon.M2.mmaculs.s1
    hexagon_M2_mmpyh_rs0,                      // llvm.hexagon.M2.mmpyh.rs0
    hexagon_M2_mmpyh_rs1,                      // llvm.hexagon.M2.mmpyh.rs1
    hexagon_M2_mmpyh_s0,                       // llvm.hexagon.M2.mmpyh.s0
    hexagon_M2_mmpyh_s1,                       // llvm.hexagon.M2.mmpyh.s1
    hexagon_M2_mmpyl_rs0,                      // llvm.hexagon.M2.mmpyl.rs0
    hexagon_M2_mmpyl_rs1,                      // llvm.hexagon.M2.mmpyl.rs1
    hexagon_M2_mmpyl_s0,                       // llvm.hexagon.M2.mmpyl.s0
    hexagon_M2_mmpyl_s1,                       // llvm.hexagon.M2.mmpyl.s1
    hexagon_M2_mmpyuh_rs0,                     // llvm.hexagon.M2.mmpyuh.rs0
    hexagon_M2_mmpyuh_rs1,                     // llvm.hexagon.M2.mmpyuh.rs1
    hexagon_M2_mmpyuh_s0,                      // llvm.hexagon.M2.mmpyuh.s0
    hexagon_M2_mmpyuh_s1,                      // llvm.hexagon.M2.mmpyuh.s1
    hexagon_M2_mmpyul_rs0,                     // llvm.hexagon.M2.mmpyul.rs0
    hexagon_M2_mmpyul_rs1,                     // llvm.hexagon.M2.mmpyul.rs1
    hexagon_M2_mmpyul_s0,                      // llvm.hexagon.M2.mmpyul.s0
    hexagon_M2_mmpyul_s1,                      // llvm.hexagon.M2.mmpyul.s1
    hexagon_M2_mnaci,                          // llvm.hexagon.M2.mnaci
    hexagon_M2_mpy_acc_hh_s0,                  // llvm.hexagon.M2.mpy.acc.hh.s0
    hexagon_M2_mpy_acc_hh_s1,                  // llvm.hexagon.M2.mpy.acc.hh.s1
    hexagon_M2_mpy_acc_hl_s0,                  // llvm.hexagon.M2.mpy.acc.hl.s0
    hexagon_M2_mpy_acc_hl_s1,                  // llvm.hexagon.M2.mpy.acc.hl.s1
    hexagon_M2_mpy_acc_lh_s0,                  // llvm.hexagon.M2.mpy.acc.lh.s0
    hexagon_M2_mpy_acc_lh_s1,                  // llvm.hexagon.M2.mpy.acc.lh.s1
    hexagon_M2_mpy_acc_ll_s0,                  // llvm.hexagon.M2.mpy.acc.ll.s0
    hexagon_M2_mpy_acc_ll_s1,                  // llvm.hexagon.M2.mpy.acc.ll.s1
    hexagon_M2_mpy_acc_sat_hh_s0,              // llvm.hexagon.M2.mpy.acc.sat.hh.s0
    hexagon_M2_mpy_acc_sat_hh_s1,              // llvm.hexagon.M2.mpy.acc.sat.hh.s1
    hexagon_M2_mpy_acc_sat_hl_s0,              // llvm.hexagon.M2.mpy.acc.sat.hl.s0
    hexagon_M2_mpy_acc_sat_hl_s1,              // llvm.hexagon.M2.mpy.acc.sat.hl.s1
    hexagon_M2_mpy_acc_sat_lh_s0,              // llvm.hexagon.M2.mpy.acc.sat.lh.s0
    hexagon_M2_mpy_acc_sat_lh_s1,              // llvm.hexagon.M2.mpy.acc.sat.lh.s1
    hexagon_M2_mpy_acc_sat_ll_s0,              // llvm.hexagon.M2.mpy.acc.sat.ll.s0
    hexagon_M2_mpy_acc_sat_ll_s1,              // llvm.hexagon.M2.mpy.acc.sat.ll.s1
    hexagon_M2_mpy_hh_s0,                      // llvm.hexagon.M2.mpy.hh.s0
    hexagon_M2_mpy_hh_s1,                      // llvm.hexagon.M2.mpy.hh.s1
    hexagon_M2_mpy_hl_s0,                      // llvm.hexagon.M2.mpy.hl.s0
    hexagon_M2_mpy_hl_s1,                      // llvm.hexagon.M2.mpy.hl.s1
    hexagon_M2_mpy_lh_s0,                      // llvm.hexagon.M2.mpy.lh.s0
    hexagon_M2_mpy_lh_s1,                      // llvm.hexagon.M2.mpy.lh.s1
    hexagon_M2_mpy_ll_s0,                      // llvm.hexagon.M2.mpy.ll.s0
    hexagon_M2_mpy_ll_s1,                      // llvm.hexagon.M2.mpy.ll.s1
    hexagon_M2_mpy_nac_hh_s0,                  // llvm.hexagon.M2.mpy.nac.hh.s0
    hexagon_M2_mpy_nac_hh_s1,                  // llvm.hexagon.M2.mpy.nac.hh.s1
    hexagon_M2_mpy_nac_hl_s0,                  // llvm.hexagon.M2.mpy.nac.hl.s0
    hexagon_M2_mpy_nac_hl_s1,                  // llvm.hexagon.M2.mpy.nac.hl.s1
    hexagon_M2_mpy_nac_lh_s0,                  // llvm.hexagon.M2.mpy.nac.lh.s0
    hexagon_M2_mpy_nac_lh_s1,                  // llvm.hexagon.M2.mpy.nac.lh.s1
    hexagon_M2_mpy_nac_ll_s0,                  // llvm.hexagon.M2.mpy.nac.ll.s0
    hexagon_M2_mpy_nac_ll_s1,                  // llvm.hexagon.M2.mpy.nac.ll.s1
    hexagon_M2_mpy_nac_sat_hh_s0,              // llvm.hexagon.M2.mpy.nac.sat.hh.s0
    hexagon_M2_mpy_nac_sat_hh_s1,              // llvm.hexagon.M2.mpy.nac.sat.hh.s1
    hexagon_M2_mpy_nac_sat_hl_s0,              // llvm.hexagon.M2.mpy.nac.sat.hl.s0
    hexagon_M2_mpy_nac_sat_hl_s1,              // llvm.hexagon.M2.mpy.nac.sat.hl.s1
    hexagon_M2_mpy_nac_sat_lh_s0,              // llvm.hexagon.M2.mpy.nac.sat.lh.s0
    hexagon_M2_mpy_nac_sat_lh_s1,              // llvm.hexagon.M2.mpy.nac.sat.lh.s1
    hexagon_M2_mpy_nac_sat_ll_s0,              // llvm.hexagon.M2.mpy.nac.sat.ll.s0
    hexagon_M2_mpy_nac_sat_ll_s1,              // llvm.hexagon.M2.mpy.nac.sat.ll.s1
    hexagon_M2_mpy_rnd_hh_s0,                  // llvm.hexagon.M2.mpy.rnd.hh.s0
    hexagon_M2_mpy_rnd_hh_s1,                  // llvm.hexagon.M2.mpy.rnd.hh.s1
    hexagon_M2_mpy_rnd_hl_s0,                  // llvm.hexagon.M2.mpy.rnd.hl.s0
    hexagon_M2_mpy_rnd_hl_s1,                  // llvm.hexagon.M2.mpy.rnd.hl.s1
    hexagon_M2_mpy_rnd_lh_s0,                  // llvm.hexagon.M2.mpy.rnd.lh.s0
    hexagon_M2_mpy_rnd_lh_s1,                  // llvm.hexagon.M2.mpy.rnd.lh.s1
    hexagon_M2_mpy_rnd_ll_s0,                  // llvm.hexagon.M2.mpy.rnd.ll.s0
    hexagon_M2_mpy_rnd_ll_s1,                  // llvm.hexagon.M2.mpy.rnd.ll.s1
    hexagon_M2_mpy_sat_hh_s0,                  // llvm.hexagon.M2.mpy.sat.hh.s0
    hexagon_M2_mpy_sat_hh_s1,                  // llvm.hexagon.M2.mpy.sat.hh.s1
    hexagon_M2_mpy_sat_hl_s0,                  // llvm.hexagon.M2.mpy.sat.hl.s0
    hexagon_M2_mpy_sat_hl_s1,                  // llvm.hexagon.M2.mpy.sat.hl.s1
    hexagon_M2_mpy_sat_lh_s0,                  // llvm.hexagon.M2.mpy.sat.lh.s0
    hexagon_M2_mpy_sat_lh_s1,                  // llvm.hexagon.M2.mpy.sat.lh.s1
    hexagon_M2_mpy_sat_ll_s0,                  // llvm.hexagon.M2.mpy.sat.ll.s0
    hexagon_M2_mpy_sat_ll_s1,                  // llvm.hexagon.M2.mpy.sat.ll.s1
    hexagon_M2_mpy_sat_rnd_hh_s0,              // llvm.hexagon.M2.mpy.sat.rnd.hh.s0
    hexagon_M2_mpy_sat_rnd_hh_s1,              // llvm.hexagon.M2.mpy.sat.rnd.hh.s1
    hexagon_M2_mpy_sat_rnd_hl_s0,              // llvm.hexagon.M2.mpy.sat.rnd.hl.s0
    hexagon_M2_mpy_sat_rnd_hl_s1,              // llvm.hexagon.M2.mpy.sat.rnd.hl.s1
    hexagon_M2_mpy_sat_rnd_lh_s0,              // llvm.hexagon.M2.mpy.sat.rnd.lh.s0
    hexagon_M2_mpy_sat_rnd_lh_s1,              // llvm.hexagon.M2.mpy.sat.rnd.lh.s1
    hexagon_M2_mpy_sat_rnd_ll_s0,              // llvm.hexagon.M2.mpy.sat.rnd.ll.s0
    hexagon_M2_mpy_sat_rnd_ll_s1,              // llvm.hexagon.M2.mpy.sat.rnd.ll.s1
    hexagon_M2_mpy_up,                         // llvm.hexagon.M2.mpy.up
    hexagon_M2_mpy_up_s1,                      // llvm.hexagon.M2.mpy.up.s1
    hexagon_M2_mpy_up_s1_sat,                  // llvm.hexagon.M2.mpy.up.s1.sat
    hexagon_M2_mpyd_acc_hh_s0,                 // llvm.hexagon.M2.mpyd.acc.hh.s0
    hexagon_M2_mpyd_acc_hh_s1,                 // llvm.hexagon.M2.mpyd.acc.hh.s1
    hexagon_M2_mpyd_acc_hl_s0,                 // llvm.hexagon.M2.mpyd.acc.hl.s0
    hexagon_M2_mpyd_acc_hl_s1,                 // llvm.hexagon.M2.mpyd.acc.hl.s1
    hexagon_M2_mpyd_acc_lh_s0,                 // llvm.hexagon.M2.mpyd.acc.lh.s0
    hexagon_M2_mpyd_acc_lh_s1,                 // llvm.hexagon.M2.mpyd.acc.lh.s1
    hexagon_M2_mpyd_acc_ll_s0,                 // llvm.hexagon.M2.mpyd.acc.ll.s0
    hexagon_M2_mpyd_acc_ll_s1,                 // llvm.hexagon.M2.mpyd.acc.ll.s1
    hexagon_M2_mpyd_hh_s0,                     // llvm.hexagon.M2.mpyd.hh.s0
    hexagon_M2_mpyd_hh_s1,                     // llvm.hexagon.M2.mpyd.hh.s1
    hexagon_M2_mpyd_hl_s0,                     // llvm.hexagon.M2.mpyd.hl.s0
    hexagon_M2_mpyd_hl_s1,                     // llvm.hexagon.M2.mpyd.hl.s1
    hexagon_M2_mpyd_lh_s0,                     // llvm.hexagon.M2.mpyd.lh.s0
    hexagon_M2_mpyd_lh_s1,                     // llvm.hexagon.M2.mpyd.lh.s1
    hexagon_M2_mpyd_ll_s0,                     // llvm.hexagon.M2.mpyd.ll.s0
    hexagon_M2_mpyd_ll_s1,                     // llvm.hexagon.M2.mpyd.ll.s1
    hexagon_M2_mpyd_nac_hh_s0,                 // llvm.hexagon.M2.mpyd.nac.hh.s0
    hexagon_M2_mpyd_nac_hh_s1,                 // llvm.hexagon.M2.mpyd.nac.hh.s1
    hexagon_M2_mpyd_nac_hl_s0,                 // llvm.hexagon.M2.mpyd.nac.hl.s0
    hexagon_M2_mpyd_nac_hl_s1,                 // llvm.hexagon.M2.mpyd.nac.hl.s1
    hexagon_M2_mpyd_nac_lh_s0,                 // llvm.hexagon.M2.mpyd.nac.lh.s0
    hexagon_M2_mpyd_nac_lh_s1,                 // llvm.hexagon.M2.mpyd.nac.lh.s1
    hexagon_M2_mpyd_nac_ll_s0,                 // llvm.hexagon.M2.mpyd.nac.ll.s0
    hexagon_M2_mpyd_nac_ll_s1,                 // llvm.hexagon.M2.mpyd.nac.ll.s1
    hexagon_M2_mpyd_rnd_hh_s0,                 // llvm.hexagon.M2.mpyd.rnd.hh.s0
    hexagon_M2_mpyd_rnd_hh_s1,                 // llvm.hexagon.M2.mpyd.rnd.hh.s1
    hexagon_M2_mpyd_rnd_hl_s0,                 // llvm.hexagon.M2.mpyd.rnd.hl.s0
    hexagon_M2_mpyd_rnd_hl_s1,                 // llvm.hexagon.M2.mpyd.rnd.hl.s1
    hexagon_M2_mpyd_rnd_lh_s0,                 // llvm.hexagon.M2.mpyd.rnd.lh.s0
    hexagon_M2_mpyd_rnd_lh_s1,                 // llvm.hexagon.M2.mpyd.rnd.lh.s1
    hexagon_M2_mpyd_rnd_ll_s0,                 // llvm.hexagon.M2.mpyd.rnd.ll.s0
    hexagon_M2_mpyd_rnd_ll_s1,                 // llvm.hexagon.M2.mpyd.rnd.ll.s1
    hexagon_M2_mpyi,                           // llvm.hexagon.M2.mpyi
    hexagon_M2_mpysmi,                         // llvm.hexagon.M2.mpysmi
    hexagon_M2_mpysu_up,                       // llvm.hexagon.M2.mpysu.up
    hexagon_M2_mpyu_acc_hh_s0,                 // llvm.hexagon.M2.mpyu.acc.hh.s0
    hexagon_M2_mpyu_acc_hh_s1,                 // llvm.hexagon.M2.mpyu.acc.hh.s1
    hexagon_M2_mpyu_acc_hl_s0,                 // llvm.hexagon.M2.mpyu.acc.hl.s0
    hexagon_M2_mpyu_acc_hl_s1,                 // llvm.hexagon.M2.mpyu.acc.hl.s1
    hexagon_M2_mpyu_acc_lh_s0,                 // llvm.hexagon.M2.mpyu.acc.lh.s0
    hexagon_M2_mpyu_acc_lh_s1,                 // llvm.hexagon.M2.mpyu.acc.lh.s1
    hexagon_M2_mpyu_acc_ll_s0,                 // llvm.hexagon.M2.mpyu.acc.ll.s0
    hexagon_M2_mpyu_acc_ll_s1,                 // llvm.hexagon.M2.mpyu.acc.ll.s1
    hexagon_M2_mpyu_hh_s0,                     // llvm.hexagon.M2.mpyu.hh.s0
    hexagon_M2_mpyu_hh_s1,                     // llvm.hexagon.M2.mpyu.hh.s1
    hexagon_M2_mpyu_hl_s0,                     // llvm.hexagon.M2.mpyu.hl.s0
    hexagon_M2_mpyu_hl_s1,                     // llvm.hexagon.M2.mpyu.hl.s1
    hexagon_M2_mpyu_lh_s0,                     // llvm.hexagon.M2.mpyu.lh.s0
    hexagon_M2_mpyu_lh_s1,                     // llvm.hexagon.M2.mpyu.lh.s1
    hexagon_M2_mpyu_ll_s0,                     // llvm.hexagon.M2.mpyu.ll.s0
    hexagon_M2_mpyu_ll_s1,                     // llvm.hexagon.M2.mpyu.ll.s1
    hexagon_M2_mpyu_nac_hh_s0,                 // llvm.hexagon.M2.mpyu.nac.hh.s0
    hexagon_M2_mpyu_nac_hh_s1,                 // llvm.hexagon.M2.mpyu.nac.hh.s1
    hexagon_M2_mpyu_nac_hl_s0,                 // llvm.hexagon.M2.mpyu.nac.hl.s0
    hexagon_M2_mpyu_nac_hl_s1,                 // llvm.hexagon.M2.mpyu.nac.hl.s1
    hexagon_M2_mpyu_nac_lh_s0,                 // llvm.hexagon.M2.mpyu.nac.lh.s0
    hexagon_M2_mpyu_nac_lh_s1,                 // llvm.hexagon.M2.mpyu.nac.lh.s1
    hexagon_M2_mpyu_nac_ll_s0,                 // llvm.hexagon.M2.mpyu.nac.ll.s0
    hexagon_M2_mpyu_nac_ll_s1,                 // llvm.hexagon.M2.mpyu.nac.ll.s1
    hexagon_M2_mpyu_up,                        // llvm.hexagon.M2.mpyu.up
    hexagon_M2_mpyud_acc_hh_s0,                // llvm.hexagon.M2.mpyud.acc.hh.s0
    hexagon_M2_mpyud_acc_hh_s1,                // llvm.hexagon.M2.mpyud.acc.hh.s1
    hexagon_M2_mpyud_acc_hl_s0,                // llvm.hexagon.M2.mpyud.acc.hl.s0
    hexagon_M2_mpyud_acc_hl_s1,                // llvm.hexagon.M2.mpyud.acc.hl.s1
    hexagon_M2_mpyud_acc_lh_s0,                // llvm.hexagon.M2.mpyud.acc.lh.s0
    hexagon_M2_mpyud_acc_lh_s1,                // llvm.hexagon.M2.mpyud.acc.lh.s1
    hexagon_M2_mpyud_acc_ll_s0,                // llvm.hexagon.M2.mpyud.acc.ll.s0
    hexagon_M2_mpyud_acc_ll_s1,                // llvm.hexagon.M2.mpyud.acc.ll.s1
    hexagon_M2_mpyud_hh_s0,                    // llvm.hexagon.M2.mpyud.hh.s0
    hexagon_M2_mpyud_hh_s1,                    // llvm.hexagon.M2.mpyud.hh.s1
    hexagon_M2_mpyud_hl_s0,                    // llvm.hexagon.M2.mpyud.hl.s0
    hexagon_M2_mpyud_hl_s1,                    // llvm.hexagon.M2.mpyud.hl.s1
    hexagon_M2_mpyud_lh_s0,                    // llvm.hexagon.M2.mpyud.lh.s0
    hexagon_M2_mpyud_lh_s1,                    // llvm.hexagon.M2.mpyud.lh.s1
    hexagon_M2_mpyud_ll_s0,                    // llvm.hexagon.M2.mpyud.ll.s0
    hexagon_M2_mpyud_ll_s1,                    // llvm.hexagon.M2.mpyud.ll.s1
    hexagon_M2_mpyud_nac_hh_s0,                // llvm.hexagon.M2.mpyud.nac.hh.s0
    hexagon_M2_mpyud_nac_hh_s1,                // llvm.hexagon.M2.mpyud.nac.hh.s1
    hexagon_M2_mpyud_nac_hl_s0,                // llvm.hexagon.M2.mpyud.nac.hl.s0
    hexagon_M2_mpyud_nac_hl_s1,                // llvm.hexagon.M2.mpyud.nac.hl.s1
    hexagon_M2_mpyud_nac_lh_s0,                // llvm.hexagon.M2.mpyud.nac.lh.s0
    hexagon_M2_mpyud_nac_lh_s1,                // llvm.hexagon.M2.mpyud.nac.lh.s1
    hexagon_M2_mpyud_nac_ll_s0,                // llvm.hexagon.M2.mpyud.nac.ll.s0
    hexagon_M2_mpyud_nac_ll_s1,                // llvm.hexagon.M2.mpyud.nac.ll.s1
    hexagon_M2_mpyui,                          // llvm.hexagon.M2.mpyui
    hexagon_M2_nacci,                          // llvm.hexagon.M2.nacci
    hexagon_M2_naccii,                         // llvm.hexagon.M2.naccii
    hexagon_M2_subacc,                         // llvm.hexagon.M2.subacc
    hexagon_M2_vabsdiffh,                      // llvm.hexagon.M2.vabsdiffh
    hexagon_M2_vabsdiffw,                      // llvm.hexagon.M2.vabsdiffw
    hexagon_M2_vcmac_s0_sat_i,                 // llvm.hexagon.M2.vcmac.s0.sat.i
    hexagon_M2_vcmac_s0_sat_r,                 // llvm.hexagon.M2.vcmac.s0.sat.r
    hexagon_M2_vcmpy_s0_sat_i,                 // llvm.hexagon.M2.vcmpy.s0.sat.i
    hexagon_M2_vcmpy_s0_sat_r,                 // llvm.hexagon.M2.vcmpy.s0.sat.r
    hexagon_M2_vcmpy_s1_sat_i,                 // llvm.hexagon.M2.vcmpy.s1.sat.i
    hexagon_M2_vcmpy_s1_sat_r,                 // llvm.hexagon.M2.vcmpy.s1.sat.r
    hexagon_M2_vdmacs_s0,                      // llvm.hexagon.M2.vdmacs.s0
    hexagon_M2_vdmacs_s1,                      // llvm.hexagon.M2.vdmacs.s1
    hexagon_M2_vdmpyrs_s0,                     // llvm.hexagon.M2.vdmpyrs.s0
    hexagon_M2_vdmpyrs_s1,                     // llvm.hexagon.M2.vdmpyrs.s1
    hexagon_M2_vdmpys_s0,                      // llvm.hexagon.M2.vdmpys.s0
    hexagon_M2_vdmpys_s1,                      // llvm.hexagon.M2.vdmpys.s1
    hexagon_M2_vmac2,                          // llvm.hexagon.M2.vmac2
    hexagon_M2_vmac2es,                        // llvm.hexagon.M2.vmac2es
    hexagon_M2_vmac2es_s0,                     // llvm.hexagon.M2.vmac2es.s0
    hexagon_M2_vmac2es_s1,                     // llvm.hexagon.M2.vmac2es.s1
    hexagon_M2_vmac2s_s0,                      // llvm.hexagon.M2.vmac2s.s0
    hexagon_M2_vmac2s_s1,                      // llvm.hexagon.M2.vmac2s.s1
    hexagon_M2_vmac2su_s0,                     // llvm.hexagon.M2.vmac2su.s0
    hexagon_M2_vmac2su_s1,                     // llvm.hexagon.M2.vmac2su.s1
    hexagon_M2_vmpy2es_s0,                     // llvm.hexagon.M2.vmpy2es.s0
    hexagon_M2_vmpy2es_s1,                     // llvm.hexagon.M2.vmpy2es.s1
    hexagon_M2_vmpy2s_s0,                      // llvm.hexagon.M2.vmpy2s.s0
    hexagon_M2_vmpy2s_s0pack,                  // llvm.hexagon.M2.vmpy2s.s0pack
    hexagon_M2_vmpy2s_s1,                      // llvm.hexagon.M2.vmpy2s.s1
    hexagon_M2_vmpy2s_s1pack,                  // llvm.hexagon.M2.vmpy2s.s1pack
    hexagon_M2_vmpy2su_s0,                     // llvm.hexagon.M2.vmpy2su.s0
    hexagon_M2_vmpy2su_s1,                     // llvm.hexagon.M2.vmpy2su.s1
    hexagon_M2_vraddh,                         // llvm.hexagon.M2.vraddh
    hexagon_M2_vradduh,                        // llvm.hexagon.M2.vradduh
    hexagon_M2_vrcmaci_s0,                     // llvm.hexagon.M2.vrcmaci.s0
    hexagon_M2_vrcmaci_s0c,                    // llvm.hexagon.M2.vrcmaci.s0c
    hexagon_M2_vrcmacr_s0,                     // llvm.hexagon.M2.vrcmacr.s0
    hexagon_M2_vrcmacr_s0c,                    // llvm.hexagon.M2.vrcmacr.s0c
    hexagon_M2_vrcmpyi_s0,                     // llvm.hexagon.M2.vrcmpyi.s0
    hexagon_M2_vrcmpyi_s0c,                    // llvm.hexagon.M2.vrcmpyi.s0c
    hexagon_M2_vrcmpyr_s0,                     // llvm.hexagon.M2.vrcmpyr.s0
    hexagon_M2_vrcmpyr_s0c,                    // llvm.hexagon.M2.vrcmpyr.s0c
    hexagon_M2_vrcmpys_acc_s1,                 // llvm.hexagon.M2.vrcmpys.acc.s1
    hexagon_M2_vrcmpys_s1,                     // llvm.hexagon.M2.vrcmpys.s1
    hexagon_M2_vrcmpys_s1rp,                   // llvm.hexagon.M2.vrcmpys.s1rp
    hexagon_M2_vrmac_s0,                       // llvm.hexagon.M2.vrmac.s0
    hexagon_M2_vrmpy_s0,                       // llvm.hexagon.M2.vrmpy.s0
    hexagon_M2_xor_xacc,                       // llvm.hexagon.M2.xor.xacc
    hexagon_M4_and_and,                        // llvm.hexagon.M4.and.and
    hexagon_M4_and_andn,                       // llvm.hexagon.M4.and.andn
    hexagon_M4_and_or,                         // llvm.hexagon.M4.and.or
    hexagon_M4_and_xor,                        // llvm.hexagon.M4.and.xor
    hexagon_M4_cmpyi_wh,                       // llvm.hexagon.M4.cmpyi.wh
    hexagon_M4_cmpyi_whc,                      // llvm.hexagon.M4.cmpyi.whc
    hexagon_M4_cmpyr_wh,                       // llvm.hexagon.M4.cmpyr.wh
    hexagon_M4_cmpyr_whc,                      // llvm.hexagon.M4.cmpyr.whc
    hexagon_M4_mac_up_s1_sat,                  // llvm.hexagon.M4.mac.up.s1.sat
    hexagon_M4_mpyri_addi,                     // llvm.hexagon.M4.mpyri.addi
    hexagon_M4_mpyri_addr,                     // llvm.hexagon.M4.mpyri.addr
    hexagon_M4_mpyri_addr_u2,                  // llvm.hexagon.M4.mpyri.addr.u2
    hexagon_M4_mpyrr_addi,                     // llvm.hexagon.M4.mpyrr.addi
    hexagon_M4_mpyrr_addr,                     // llvm.hexagon.M4.mpyrr.addr
    hexagon_M4_nac_up_s1_sat,                  // llvm.hexagon.M4.nac.up.s1.sat
    hexagon_M4_or_and,                         // llvm.hexagon.M4.or.and
    hexagon_M4_or_andn,                        // llvm.hexagon.M4.or.andn
    hexagon_M4_or_or,                          // llvm.hexagon.M4.or.or
    hexagon_M4_or_xor,                         // llvm.hexagon.M4.or.xor
    hexagon_M4_pmpyw,                          // llvm.hexagon.M4.pmpyw
    hexagon_M4_pmpyw_acc,                      // llvm.hexagon.M4.pmpyw.acc
    hexagon_M4_vpmpyh,                         // llvm.hexagon.M4.vpmpyh
    hexagon_M4_vpmpyh_acc,                     // llvm.hexagon.M4.vpmpyh.acc
    hexagon_M4_vrmpyeh_acc_s0,                 // llvm.hexagon.M4.vrmpyeh.acc.s0
    hexagon_M4_vrmpyeh_acc_s1,                 // llvm.hexagon.M4.vrmpyeh.acc.s1
    hexagon_M4_vrmpyeh_s0,                     // llvm.hexagon.M4.vrmpyeh.s0
    hexagon_M4_vrmpyeh_s1,                     // llvm.hexagon.M4.vrmpyeh.s1
    hexagon_M4_vrmpyoh_acc_s0,                 // llvm.hexagon.M4.vrmpyoh.acc.s0
    hexagon_M4_vrmpyoh_acc_s1,                 // llvm.hexagon.M4.vrmpyoh.acc.s1
    hexagon_M4_vrmpyoh_s0,                     // llvm.hexagon.M4.vrmpyoh.s0
    hexagon_M4_vrmpyoh_s1,                     // llvm.hexagon.M4.vrmpyoh.s1
    hexagon_M4_xor_and,                        // llvm.hexagon.M4.xor.and
    hexagon_M4_xor_andn,                       // llvm.hexagon.M4.xor.andn
    hexagon_M4_xor_or,                         // llvm.hexagon.M4.xor.or
    hexagon_M4_xor_xacc,                       // llvm.hexagon.M4.xor.xacc
    hexagon_M5_vdmacbsu,                       // llvm.hexagon.M5.vdmacbsu
    hexagon_M5_vdmpybsu,                       // llvm.hexagon.M5.vdmpybsu
    hexagon_M5_vmacbsu,                        // llvm.hexagon.M5.vmacbsu
    hexagon_M5_vmacbuu,                        // llvm.hexagon.M5.vmacbuu
    hexagon_M5_vmpybsu,                        // llvm.hexagon.M5.vmpybsu
    hexagon_M5_vmpybuu,                        // llvm.hexagon.M5.vmpybuu
    hexagon_M5_vrmacbsu,                       // llvm.hexagon.M5.vrmacbsu
    hexagon_M5_vrmacbuu,                       // llvm.hexagon.M5.vrmacbuu
    hexagon_M5_vrmpybsu,                       // llvm.hexagon.M5.vrmpybsu
    hexagon_M5_vrmpybuu,                       // llvm.hexagon.M5.vrmpybuu
    hexagon_M6_vabsdiffb,                      // llvm.hexagon.M6.vabsdiffb
    hexagon_M6_vabsdiffub,                     // llvm.hexagon.M6.vabsdiffub
    hexagon_M7_dcmpyiw,                        // llvm.hexagon.M7.dcmpyiw
    hexagon_M7_dcmpyiw_acc,                    // llvm.hexagon.M7.dcmpyiw.acc
    hexagon_M7_dcmpyiwc,                       // llvm.hexagon.M7.dcmpyiwc
    hexagon_M7_dcmpyiwc_acc,                   // llvm.hexagon.M7.dcmpyiwc.acc
    hexagon_M7_dcmpyrw,                        // llvm.hexagon.M7.dcmpyrw
    hexagon_M7_dcmpyrw_acc,                    // llvm.hexagon.M7.dcmpyrw.acc
    hexagon_M7_dcmpyrwc,                       // llvm.hexagon.M7.dcmpyrwc
    hexagon_M7_dcmpyrwc_acc,                   // llvm.hexagon.M7.dcmpyrwc.acc
    hexagon_M7_vdmpy,                          // llvm.hexagon.M7.vdmpy
    hexagon_M7_vdmpy_acc,                      // llvm.hexagon.M7.vdmpy.acc
    hexagon_M7_wcmpyiw,                        // llvm.hexagon.M7.wcmpyiw
    hexagon_M7_wcmpyiw_rnd,                    // llvm.hexagon.M7.wcmpyiw.rnd
    hexagon_M7_wcmpyiwc,                       // llvm.hexagon.M7.wcmpyiwc
    hexagon_M7_wcmpyiwc_rnd,                   // llvm.hexagon.M7.wcmpyiwc.rnd
    hexagon_M7_wcmpyrw,                        // llvm.hexagon.M7.wcmpyrw
    hexagon_M7_wcmpyrw_rnd,                    // llvm.hexagon.M7.wcmpyrw.rnd
    hexagon_M7_wcmpyrwc,                       // llvm.hexagon.M7.wcmpyrwc
    hexagon_M7_wcmpyrwc_rnd,                   // llvm.hexagon.M7.wcmpyrwc.rnd
    hexagon_S2_addasl_rrri,                    // llvm.hexagon.S2.addasl.rrri
    hexagon_S2_asl_i_p,                        // llvm.hexagon.S2.asl.i.p
    hexagon_S2_asl_i_p_acc,                    // llvm.hexagon.S2.asl.i.p.acc
    hexagon_S2_asl_i_p_and,                    // llvm.hexagon.S2.asl.i.p.and
    hexagon_S2_asl_i_p_nac,                    // llvm.hexagon.S2.asl.i.p.nac
    hexagon_S2_asl_i_p_or,                     // llvm.hexagon.S2.asl.i.p.or
    hexagon_S2_asl_i_p_xacc,                   // llvm.hexagon.S2.asl.i.p.xacc
    hexagon_S2_asl_i_r,                        // llvm.hexagon.S2.asl.i.r
    hexagon_S2_asl_i_r_acc,                    // llvm.hexagon.S2.asl.i.r.acc
    hexagon_S2_asl_i_r_and,                    // llvm.hexagon.S2.asl.i.r.and
    hexagon_S2_asl_i_r_nac,                    // llvm.hexagon.S2.asl.i.r.nac
    hexagon_S2_asl_i_r_or,                     // llvm.hexagon.S2.asl.i.r.or
    hexagon_S2_asl_i_r_sat,                    // llvm.hexagon.S2.asl.i.r.sat
    hexagon_S2_asl_i_r_xacc,                   // llvm.hexagon.S2.asl.i.r.xacc
    hexagon_S2_asl_i_vh,                       // llvm.hexagon.S2.asl.i.vh
    hexagon_S2_asl_i_vw,                       // llvm.hexagon.S2.asl.i.vw
    hexagon_S2_asl_r_p,                        // llvm.hexagon.S2.asl.r.p
    hexagon_S2_asl_r_p_acc,                    // llvm.hexagon.S2.asl.r.p.acc
    hexagon_S2_asl_r_p_and,                    // llvm.hexagon.S2.asl.r.p.and
    hexagon_S2_asl_r_p_nac,                    // llvm.hexagon.S2.asl.r.p.nac
    hexagon_S2_asl_r_p_or,                     // llvm.hexagon.S2.asl.r.p.or
    hexagon_S2_asl_r_p_xor,                    // llvm.hexagon.S2.asl.r.p.xor
    hexagon_S2_asl_r_r,                        // llvm.hexagon.S2.asl.r.r
    hexagon_S2_asl_r_r_acc,                    // llvm.hexagon.S2.asl.r.r.acc
    hexagon_S2_asl_r_r_and,                    // llvm.hexagon.S2.asl.r.r.and
    hexagon_S2_asl_r_r_nac,                    // llvm.hexagon.S2.asl.r.r.nac
    hexagon_S2_asl_r_r_or,                     // llvm.hexagon.S2.asl.r.r.or
    hexagon_S2_asl_r_r_sat,                    // llvm.hexagon.S2.asl.r.r.sat
    hexagon_S2_asl_r_vh,                       // llvm.hexagon.S2.asl.r.vh
    hexagon_S2_asl_r_vw,                       // llvm.hexagon.S2.asl.r.vw
    hexagon_S2_asr_i_p,                        // llvm.hexagon.S2.asr.i.p
    hexagon_S2_asr_i_p_acc,                    // llvm.hexagon.S2.asr.i.p.acc
    hexagon_S2_asr_i_p_and,                    // llvm.hexagon.S2.asr.i.p.and
    hexagon_S2_asr_i_p_nac,                    // llvm.hexagon.S2.asr.i.p.nac
    hexagon_S2_asr_i_p_or,                     // llvm.hexagon.S2.asr.i.p.or
    hexagon_S2_asr_i_p_rnd,                    // llvm.hexagon.S2.asr.i.p.rnd
    hexagon_S2_asr_i_p_rnd_goodsyntax,         // llvm.hexagon.S2.asr.i.p.rnd.goodsyntax
    hexagon_S2_asr_i_r,                        // llvm.hexagon.S2.asr.i.r
    hexagon_S2_asr_i_r_acc,                    // llvm.hexagon.S2.asr.i.r.acc
    hexagon_S2_asr_i_r_and,                    // llvm.hexagon.S2.asr.i.r.and
    hexagon_S2_asr_i_r_nac,                    // llvm.hexagon.S2.asr.i.r.nac
    hexagon_S2_asr_i_r_or,                     // llvm.hexagon.S2.asr.i.r.or
    hexagon_S2_asr_i_r_rnd,                    // llvm.hexagon.S2.asr.i.r.rnd
    hexagon_S2_asr_i_r_rnd_goodsyntax,         // llvm.hexagon.S2.asr.i.r.rnd.goodsyntax
    hexagon_S2_asr_i_svw_trun,                 // llvm.hexagon.S2.asr.i.svw.trun
    hexagon_S2_asr_i_vh,                       // llvm.hexagon.S2.asr.i.vh
    hexagon_S2_asr_i_vw,                       // llvm.hexagon.S2.asr.i.vw
    hexagon_S2_asr_r_p,                        // llvm.hexagon.S2.asr.r.p
    hexagon_S2_asr_r_p_acc,                    // llvm.hexagon.S2.asr.r.p.acc
    hexagon_S2_asr_r_p_and,                    // llvm.hexagon.S2.asr.r.p.and
    hexagon_S2_asr_r_p_nac,                    // llvm.hexagon.S2.asr.r.p.nac
    hexagon_S2_asr_r_p_or,                     // llvm.hexagon.S2.asr.r.p.or
    hexagon_S2_asr_r_p_xor,                    // llvm.hexagon.S2.asr.r.p.xor
    hexagon_S2_asr_r_r,                        // llvm.hexagon.S2.asr.r.r
    hexagon_S2_asr_r_r_acc,                    // llvm.hexagon.S2.asr.r.r.acc
    hexagon_S2_asr_r_r_and,                    // llvm.hexagon.S2.asr.r.r.and
    hexagon_S2_asr_r_r_nac,                    // llvm.hexagon.S2.asr.r.r.nac
    hexagon_S2_asr_r_r_or,                     // llvm.hexagon.S2.asr.r.r.or
    hexagon_S2_asr_r_r_sat,                    // llvm.hexagon.S2.asr.r.r.sat
    hexagon_S2_asr_r_svw_trun,                 // llvm.hexagon.S2.asr.r.svw.trun
    hexagon_S2_asr_r_vh,                       // llvm.hexagon.S2.asr.r.vh
    hexagon_S2_asr_r_vw,                       // llvm.hexagon.S2.asr.r.vw
    hexagon_S2_brev,                           // llvm.hexagon.S2.brev
    hexagon_S2_brevp,                          // llvm.hexagon.S2.brevp
    hexagon_S2_cl0,                            // llvm.hexagon.S2.cl0
    hexagon_S2_cl0p,                           // llvm.hexagon.S2.cl0p
    hexagon_S2_cl1,                            // llvm.hexagon.S2.cl1
    hexagon_S2_cl1p,                           // llvm.hexagon.S2.cl1p
    hexagon_S2_clb,                            // llvm.hexagon.S2.clb
    hexagon_S2_clbnorm,                        // llvm.hexagon.S2.clbnorm
    hexagon_S2_clbp,                           // llvm.hexagon.S2.clbp
    hexagon_S2_clrbit_i,                       // llvm.hexagon.S2.clrbit.i
    hexagon_S2_clrbit_r,                       // llvm.hexagon.S2.clrbit.r
    hexagon_S2_ct0,                            // llvm.hexagon.S2.ct0
    hexagon_S2_ct0p,                           // llvm.hexagon.S2.ct0p
    hexagon_S2_ct1,                            // llvm.hexagon.S2.ct1
    hexagon_S2_ct1p,                           // llvm.hexagon.S2.ct1p
    hexagon_S2_deinterleave,                   // llvm.hexagon.S2.deinterleave
    hexagon_S2_extractu,                       // llvm.hexagon.S2.extractu
    hexagon_S2_extractu_rp,                    // llvm.hexagon.S2.extractu.rp
    hexagon_S2_extractup,                      // llvm.hexagon.S2.extractup
    hexagon_S2_extractup_rp,                   // llvm.hexagon.S2.extractup.rp
    hexagon_S2_insert,                         // llvm.hexagon.S2.insert
    hexagon_S2_insert_rp,                      // llvm.hexagon.S2.insert.rp
    hexagon_S2_insertp,                        // llvm.hexagon.S2.insertp
    hexagon_S2_insertp_rp,                     // llvm.hexagon.S2.insertp.rp
    hexagon_S2_interleave,                     // llvm.hexagon.S2.interleave
    hexagon_S2_lfsp,                           // llvm.hexagon.S2.lfsp
    hexagon_S2_lsl_r_p,                        // llvm.hexagon.S2.lsl.r.p
    hexagon_S2_lsl_r_p_acc,                    // llvm.hexagon.S2.lsl.r.p.acc
    hexagon_S2_lsl_r_p_and,                    // llvm.hexagon.S2.lsl.r.p.and
    hexagon_S2_lsl_r_p_nac,                    // llvm.hexagon.S2.lsl.r.p.nac
    hexagon_S2_lsl_r_p_or,                     // llvm.hexagon.S2.lsl.r.p.or
    hexagon_S2_lsl_r_p_xor,                    // llvm.hexagon.S2.lsl.r.p.xor
    hexagon_S2_lsl_r_r,                        // llvm.hexagon.S2.lsl.r.r
    hexagon_S2_lsl_r_r_acc,                    // llvm.hexagon.S2.lsl.r.r.acc
    hexagon_S2_lsl_r_r_and,                    // llvm.hexagon.S2.lsl.r.r.and
    hexagon_S2_lsl_r_r_nac,                    // llvm.hexagon.S2.lsl.r.r.nac
    hexagon_S2_lsl_r_r_or,                     // llvm.hexagon.S2.lsl.r.r.or
    hexagon_S2_lsl_r_vh,                       // llvm.hexagon.S2.lsl.r.vh
    hexagon_S2_lsl_r_vw,                       // llvm.hexagon.S2.lsl.r.vw
    hexagon_S2_lsr_i_p,                        // llvm.hexagon.S2.lsr.i.p
    hexagon_S2_lsr_i_p_acc,                    // llvm.hexagon.S2.lsr.i.p.acc
    hexagon_S2_lsr_i_p_and,                    // llvm.hexagon.S2.lsr.i.p.and
    hexagon_S2_lsr_i_p_nac,                    // llvm.hexagon.S2.lsr.i.p.nac
    hexagon_S2_lsr_i_p_or,                     // llvm.hexagon.S2.lsr.i.p.or
    hexagon_S2_lsr_i_p_xacc,                   // llvm.hexagon.S2.lsr.i.p.xacc
    hexagon_S2_lsr_i_r,                        // llvm.hexagon.S2.lsr.i.r
    hexagon_S2_lsr_i_r_acc,                    // llvm.hexagon.S2.lsr.i.r.acc
    hexagon_S2_lsr_i_r_and,                    // llvm.hexagon.S2.lsr.i.r.and
    hexagon_S2_lsr_i_r_nac,                    // llvm.hexagon.S2.lsr.i.r.nac
    hexagon_S2_lsr_i_r_or,                     // llvm.hexagon.S2.lsr.i.r.or
    hexagon_S2_lsr_i_r_xacc,                   // llvm.hexagon.S2.lsr.i.r.xacc
    hexagon_S2_lsr_i_vh,                       // llvm.hexagon.S2.lsr.i.vh
    hexagon_S2_lsr_i_vw,                       // llvm.hexagon.S2.lsr.i.vw
    hexagon_S2_lsr_r_p,                        // llvm.hexagon.S2.lsr.r.p
    hexagon_S2_lsr_r_p_acc,                    // llvm.hexagon.S2.lsr.r.p.acc
    hexagon_S2_lsr_r_p_and,                    // llvm.hexagon.S2.lsr.r.p.and
    hexagon_S2_lsr_r_p_nac,                    // llvm.hexagon.S2.lsr.r.p.nac
    hexagon_S2_lsr_r_p_or,                     // llvm.hexagon.S2.lsr.r.p.or
    hexagon_S2_lsr_r_p_xor,                    // llvm.hexagon.S2.lsr.r.p.xor
    hexagon_S2_lsr_r_r,                        // llvm.hexagon.S2.lsr.r.r
    hexagon_S2_lsr_r_r_acc,                    // llvm.hexagon.S2.lsr.r.r.acc
    hexagon_S2_lsr_r_r_and,                    // llvm.hexagon.S2.lsr.r.r.and
    hexagon_S2_lsr_r_r_nac,                    // llvm.hexagon.S2.lsr.r.r.nac
    hexagon_S2_lsr_r_r_or,                     // llvm.hexagon.S2.lsr.r.r.or
    hexagon_S2_lsr_r_vh,                       // llvm.hexagon.S2.lsr.r.vh
    hexagon_S2_lsr_r_vw,                       // llvm.hexagon.S2.lsr.r.vw
    hexagon_S2_mask,                           // llvm.hexagon.S2.mask
    hexagon_S2_packhl,                         // llvm.hexagon.S2.packhl
    hexagon_S2_parityp,                        // llvm.hexagon.S2.parityp
    hexagon_S2_setbit_i,                       // llvm.hexagon.S2.setbit.i
    hexagon_S2_setbit_r,                       // llvm.hexagon.S2.setbit.r
    hexagon_S2_shuffeb,                        // llvm.hexagon.S2.shuffeb
    hexagon_S2_shuffeh,                        // llvm.hexagon.S2.shuffeh
    hexagon_S2_shuffob,                        // llvm.hexagon.S2.shuffob
    hexagon_S2_shuffoh,                        // llvm.hexagon.S2.shuffoh
    hexagon_S2_storerb_pbr,                    // llvm.hexagon.S2.storerb.pbr
    hexagon_S2_storerb_pci,                    // llvm.hexagon.S2.storerb.pci
    hexagon_S2_storerb_pcr,                    // llvm.hexagon.S2.storerb.pcr
    hexagon_S2_storerd_pbr,                    // llvm.hexagon.S2.storerd.pbr
    hexagon_S2_storerd_pci,                    // llvm.hexagon.S2.storerd.pci
    hexagon_S2_storerd_pcr,                    // llvm.hexagon.S2.storerd.pcr
    hexagon_S2_storerf_pbr,                    // llvm.hexagon.S2.storerf.pbr
    hexagon_S2_storerf_pci,                    // llvm.hexagon.S2.storerf.pci
    hexagon_S2_storerf_pcr,                    // llvm.hexagon.S2.storerf.pcr
    hexagon_S2_storerh_pbr,                    // llvm.hexagon.S2.storerh.pbr
    hexagon_S2_storerh_pci,                    // llvm.hexagon.S2.storerh.pci
    hexagon_S2_storerh_pcr,                    // llvm.hexagon.S2.storerh.pcr
    hexagon_S2_storeri_pbr,                    // llvm.hexagon.S2.storeri.pbr
    hexagon_S2_storeri_pci,                    // llvm.hexagon.S2.storeri.pci
    hexagon_S2_storeri_pcr,                    // llvm.hexagon.S2.storeri.pcr
    hexagon_S2_storew_locked,                  // llvm.hexagon.S2.storew.locked
    hexagon_S2_svsathb,                        // llvm.hexagon.S2.svsathb
    hexagon_S2_svsathub,                       // llvm.hexagon.S2.svsathub
    hexagon_S2_tableidxb_goodsyntax,           // llvm.hexagon.S2.tableidxb.goodsyntax
    hexagon_S2_tableidxd_goodsyntax,           // llvm.hexagon.S2.tableidxd.goodsyntax
    hexagon_S2_tableidxh_goodsyntax,           // llvm.hexagon.S2.tableidxh.goodsyntax
    hexagon_S2_tableidxw_goodsyntax,           // llvm.hexagon.S2.tableidxw.goodsyntax
    hexagon_S2_togglebit_i,                    // llvm.hexagon.S2.togglebit.i
    hexagon_S2_togglebit_r,                    // llvm.hexagon.S2.togglebit.r
    hexagon_S2_tstbit_i,                       // llvm.hexagon.S2.tstbit.i
    hexagon_S2_tstbit_r,                       // llvm.hexagon.S2.tstbit.r
    hexagon_S2_valignib,                       // llvm.hexagon.S2.valignib
    hexagon_S2_valignrb,                       // llvm.hexagon.S2.valignrb
    hexagon_S2_vcnegh,                         // llvm.hexagon.S2.vcnegh
    hexagon_S2_vcrotate,                       // llvm.hexagon.S2.vcrotate
    hexagon_S2_vrcnegh,                        // llvm.hexagon.S2.vrcnegh
    hexagon_S2_vrndpackwh,                     // llvm.hexagon.S2.vrndpackwh
    hexagon_S2_vrndpackwhs,                    // llvm.hexagon.S2.vrndpackwhs
    hexagon_S2_vsathb,                         // llvm.hexagon.S2.vsathb
    hexagon_S2_vsathb_nopack,                  // llvm.hexagon.S2.vsathb.nopack
    hexagon_S2_vsathub,                        // llvm.hexagon.S2.vsathub
    hexagon_S2_vsathub_nopack,                 // llvm.hexagon.S2.vsathub.nopack
    hexagon_S2_vsatwh,                         // llvm.hexagon.S2.vsatwh
    hexagon_S2_vsatwh_nopack,                  // llvm.hexagon.S2.vsatwh.nopack
    hexagon_S2_vsatwuh,                        // llvm.hexagon.S2.vsatwuh
    hexagon_S2_vsatwuh_nopack,                 // llvm.hexagon.S2.vsatwuh.nopack
    hexagon_S2_vsplatrb,                       // llvm.hexagon.S2.vsplatrb
    hexagon_S2_vsplatrh,                       // llvm.hexagon.S2.vsplatrh
    hexagon_S2_vspliceib,                      // llvm.hexagon.S2.vspliceib
    hexagon_S2_vsplicerb,                      // llvm.hexagon.S2.vsplicerb
    hexagon_S2_vsxtbh,                         // llvm.hexagon.S2.vsxtbh
    hexagon_S2_vsxthw,                         // llvm.hexagon.S2.vsxthw
    hexagon_S2_vtrunehb,                       // llvm.hexagon.S2.vtrunehb
    hexagon_S2_vtrunewh,                       // llvm.hexagon.S2.vtrunewh
    hexagon_S2_vtrunohb,                       // llvm.hexagon.S2.vtrunohb
    hexagon_S2_vtrunowh,                       // llvm.hexagon.S2.vtrunowh
    hexagon_S2_vzxtbh,                         // llvm.hexagon.S2.vzxtbh
    hexagon_S2_vzxthw,                         // llvm.hexagon.S2.vzxthw
    hexagon_S4_addaddi,                        // llvm.hexagon.S4.addaddi
    hexagon_S4_addi_asl_ri,                    // llvm.hexagon.S4.addi.asl.ri
    hexagon_S4_addi_lsr_ri,                    // llvm.hexagon.S4.addi.lsr.ri
    hexagon_S4_andi_asl_ri,                    // llvm.hexagon.S4.andi.asl.ri
    hexagon_S4_andi_lsr_ri,                    // llvm.hexagon.S4.andi.lsr.ri
    hexagon_S4_clbaddi,                        // llvm.hexagon.S4.clbaddi
    hexagon_S4_clbpaddi,                       // llvm.hexagon.S4.clbpaddi
    hexagon_S4_clbpnorm,                       // llvm.hexagon.S4.clbpnorm
    hexagon_S4_extract,                        // llvm.hexagon.S4.extract
    hexagon_S4_extract_rp,                     // llvm.hexagon.S4.extract.rp
    hexagon_S4_extractp,                       // llvm.hexagon.S4.extractp
    hexagon_S4_extractp_rp,                    // llvm.hexagon.S4.extractp.rp
    hexagon_S4_lsli,                           // llvm.hexagon.S4.lsli
    hexagon_S4_ntstbit_i,                      // llvm.hexagon.S4.ntstbit.i
    hexagon_S4_ntstbit_r,                      // llvm.hexagon.S4.ntstbit.r
    hexagon_S4_or_andi,                        // llvm.hexagon.S4.or.andi
    hexagon_S4_or_andix,                       // llvm.hexagon.S4.or.andix
    hexagon_S4_or_ori,                         // llvm.hexagon.S4.or.ori
    hexagon_S4_ori_asl_ri,                     // llvm.hexagon.S4.ori.asl.ri
    hexagon_S4_ori_lsr_ri,                     // llvm.hexagon.S4.ori.lsr.ri
    hexagon_S4_parity,                         // llvm.hexagon.S4.parity
    hexagon_S4_stored_locked,                  // llvm.hexagon.S4.stored.locked
    hexagon_S4_subaddi,                        // llvm.hexagon.S4.subaddi
    hexagon_S4_subi_asl_ri,                    // llvm.hexagon.S4.subi.asl.ri
    hexagon_S4_subi_lsr_ri,                    // llvm.hexagon.S4.subi.lsr.ri
    hexagon_S4_vrcrotate,                      // llvm.hexagon.S4.vrcrotate
    hexagon_S4_vrcrotate_acc,                  // llvm.hexagon.S4.vrcrotate.acc
    hexagon_S4_vxaddsubh,                      // llvm.hexagon.S4.vxaddsubh
    hexagon_S4_vxaddsubhr,                     // llvm.hexagon.S4.vxaddsubhr
    hexagon_S4_vxaddsubw,                      // llvm.hexagon.S4.vxaddsubw
    hexagon_S4_vxsubaddh,                      // llvm.hexagon.S4.vxsubaddh
    hexagon_S4_vxsubaddhr,                     // llvm.hexagon.S4.vxsubaddhr
    hexagon_S4_vxsubaddw,                      // llvm.hexagon.S4.vxsubaddw
    hexagon_S5_asrhub_rnd_sat_goodsyntax,      // llvm.hexagon.S5.asrhub.rnd.sat.goodsyntax
    hexagon_S5_asrhub_sat,                     // llvm.hexagon.S5.asrhub.sat
    hexagon_S5_popcountp,                      // llvm.hexagon.S5.popcountp
    hexagon_S5_vasrhrnd_goodsyntax,            // llvm.hexagon.S5.vasrhrnd.goodsyntax
    hexagon_S6_rol_i_p,                        // llvm.hexagon.S6.rol.i.p
    hexagon_S6_rol_i_p_acc,                    // llvm.hexagon.S6.rol.i.p.acc
    hexagon_S6_rol_i_p_and,                    // llvm.hexagon.S6.rol.i.p.and
    hexagon_S6_rol_i_p_nac,                    // llvm.hexagon.S6.rol.i.p.nac
    hexagon_S6_rol_i_p_or,                     // llvm.hexagon.S6.rol.i.p.or
    hexagon_S6_rol_i_p_xacc,                   // llvm.hexagon.S6.rol.i.p.xacc
    hexagon_S6_rol_i_r,                        // llvm.hexagon.S6.rol.i.r
    hexagon_S6_rol_i_r_acc,                    // llvm.hexagon.S6.rol.i.r.acc
    hexagon_S6_rol_i_r_and,                    // llvm.hexagon.S6.rol.i.r.and
    hexagon_S6_rol_i_r_nac,                    // llvm.hexagon.S6.rol.i.r.nac
    hexagon_S6_rol_i_r_or,                     // llvm.hexagon.S6.rol.i.r.or
    hexagon_S6_rol_i_r_xacc,                   // llvm.hexagon.S6.rol.i.r.xacc
    hexagon_S6_vsplatrbp,                      // llvm.hexagon.S6.vsplatrbp
    hexagon_S6_vtrunehb_ppp,                   // llvm.hexagon.S6.vtrunehb.ppp
    hexagon_S6_vtrunohb_ppp,                   // llvm.hexagon.S6.vtrunohb.ppp
    hexagon_V6_extractw,                       // llvm.hexagon.V6.extractw
    hexagon_V6_extractw_128B,                  // llvm.hexagon.V6.extractw.128B
    hexagon_V6_hi,                             // llvm.hexagon.V6.hi
    hexagon_V6_hi_128B,                        // llvm.hexagon.V6.hi.128B
    hexagon_V6_lo,                             // llvm.hexagon.V6.lo
    hexagon_V6_lo_128B,                        // llvm.hexagon.V6.lo.128B
    hexagon_V6_lvsplatb,                       // llvm.hexagon.V6.lvsplatb
    hexagon_V6_lvsplatb_128B,                  // llvm.hexagon.V6.lvsplatb.128B
    hexagon_V6_lvsplath,                       // llvm.hexagon.V6.lvsplath
    hexagon_V6_lvsplath_128B,                  // llvm.hexagon.V6.lvsplath.128B
    hexagon_V6_lvsplatw,                       // llvm.hexagon.V6.lvsplatw
    hexagon_V6_lvsplatw_128B,                  // llvm.hexagon.V6.lvsplatw.128B
    hexagon_V6_pred_and,                       // llvm.hexagon.V6.pred.and
    hexagon_V6_pred_and_128B,                  // llvm.hexagon.V6.pred.and.128B
    hexagon_V6_pred_and_n,                     // llvm.hexagon.V6.pred.and.n
    hexagon_V6_pred_and_n_128B,                // llvm.hexagon.V6.pred.and.n.128B
    hexagon_V6_pred_not,                       // llvm.hexagon.V6.pred.not
    hexagon_V6_pred_not_128B,                  // llvm.hexagon.V6.pred.not.128B
    hexagon_V6_pred_or,                        // llvm.hexagon.V6.pred.or
    hexagon_V6_pred_or_128B,                   // llvm.hexagon.V6.pred.or.128B
    hexagon_V6_pred_or_n,                      // llvm.hexagon.V6.pred.or.n
    hexagon_V6_pred_or_n_128B,                 // llvm.hexagon.V6.pred.or.n.128B
    hexagon_V6_pred_scalar2,                   // llvm.hexagon.V6.pred.scalar2
    hexagon_V6_pred_scalar2_128B,              // llvm.hexagon.V6.pred.scalar2.128B
    hexagon_V6_pred_scalar2v2,                 // llvm.hexagon.V6.pred.scalar2v2
    hexagon_V6_pred_scalar2v2_128B,            // llvm.hexagon.V6.pred.scalar2v2.128B
    hexagon_V6_pred_typecast,                  // llvm.hexagon.V6.pred.typecast
    hexagon_V6_pred_typecast_128B,             // llvm.hexagon.V6.pred.typecast.128B
    hexagon_V6_pred_xor,                       // llvm.hexagon.V6.pred.xor
    hexagon_V6_pred_xor_128B,                  // llvm.hexagon.V6.pred.xor.128B
    hexagon_V6_shuffeqh,                       // llvm.hexagon.V6.shuffeqh
    hexagon_V6_shuffeqh_128B,                  // llvm.hexagon.V6.shuffeqh.128B
    hexagon_V6_shuffeqw,                       // llvm.hexagon.V6.shuffeqw
    hexagon_V6_shuffeqw_128B,                  // llvm.hexagon.V6.shuffeqw.128B
    hexagon_V6_v6mpyhubs10,                    // llvm.hexagon.V6.v6mpyhubs10
    hexagon_V6_v6mpyhubs10_128B,               // llvm.hexagon.V6.v6mpyhubs10.128B
    hexagon_V6_v6mpyhubs10_vxx,                // llvm.hexagon.V6.v6mpyhubs10.vxx
    hexagon_V6_v6mpyhubs10_vxx_128B,           // llvm.hexagon.V6.v6mpyhubs10.vxx.128B
    hexagon_V6_v6mpyvubs10,                    // llvm.hexagon.V6.v6mpyvubs10
    hexagon_V6_v6mpyvubs10_128B,               // llvm.hexagon.V6.v6mpyvubs10.128B
    hexagon_V6_v6mpyvubs10_vxx,                // llvm.hexagon.V6.v6mpyvubs10.vxx
    hexagon_V6_v6mpyvubs10_vxx_128B,           // llvm.hexagon.V6.v6mpyvubs10.vxx.128B
    hexagon_V6_vL32b_npred_ai,                 // llvm.hexagon.V6.vL32b.npred.ai
    hexagon_V6_vL32b_npred_ai_128B,            // llvm.hexagon.V6.vL32b.npred.ai.128B
    hexagon_V6_vL32b_npred_pi,                 // llvm.hexagon.V6.vL32b.npred.pi
    hexagon_V6_vL32b_npred_pi_128B,            // llvm.hexagon.V6.vL32b.npred.pi.128B
    hexagon_V6_vL32b_npred_ppu,                // llvm.hexagon.V6.vL32b.npred.ppu
    hexagon_V6_vL32b_npred_ppu_128B,           // llvm.hexagon.V6.vL32b.npred.ppu.128B
    hexagon_V6_vL32b_nt_npred_ai,              // llvm.hexagon.V6.vL32b.nt.npred.ai
    hexagon_V6_vL32b_nt_npred_ai_128B,         // llvm.hexagon.V6.vL32b.nt.npred.ai.128B
    hexagon_V6_vL32b_nt_npred_pi,              // llvm.hexagon.V6.vL32b.nt.npred.pi
    hexagon_V6_vL32b_nt_npred_pi_128B,         // llvm.hexagon.V6.vL32b.nt.npred.pi.128B
    hexagon_V6_vL32b_nt_npred_ppu,             // llvm.hexagon.V6.vL32b.nt.npred.ppu
    hexagon_V6_vL32b_nt_npred_ppu_128B,        // llvm.hexagon.V6.vL32b.nt.npred.ppu.128B
    hexagon_V6_vL32b_nt_pred_ai,               // llvm.hexagon.V6.vL32b.nt.pred.ai
    hexagon_V6_vL32b_nt_pred_ai_128B,          // llvm.hexagon.V6.vL32b.nt.pred.ai.128B
    hexagon_V6_vL32b_nt_pred_pi,               // llvm.hexagon.V6.vL32b.nt.pred.pi
    hexagon_V6_vL32b_nt_pred_pi_128B,          // llvm.hexagon.V6.vL32b.nt.pred.pi.128B
    hexagon_V6_vL32b_nt_pred_ppu,              // llvm.hexagon.V6.vL32b.nt.pred.ppu
    hexagon_V6_vL32b_nt_pred_ppu_128B,         // llvm.hexagon.V6.vL32b.nt.pred.ppu.128B
    hexagon_V6_vL32b_pred_ai,                  // llvm.hexagon.V6.vL32b.pred.ai
    hexagon_V6_vL32b_pred_ai_128B,             // llvm.hexagon.V6.vL32b.pred.ai.128B
    hexagon_V6_vL32b_pred_pi,                  // llvm.hexagon.V6.vL32b.pred.pi
    hexagon_V6_vL32b_pred_pi_128B,             // llvm.hexagon.V6.vL32b.pred.pi.128B
    hexagon_V6_vL32b_pred_ppu,                 // llvm.hexagon.V6.vL32b.pred.ppu
    hexagon_V6_vL32b_pred_ppu_128B,            // llvm.hexagon.V6.vL32b.pred.ppu.128B
    hexagon_V6_vS32Ub_npred_ai,                // llvm.hexagon.V6.vS32Ub.npred.ai
    hexagon_V6_vS32Ub_npred_ai_128B,           // llvm.hexagon.V6.vS32Ub.npred.ai.128B
    hexagon_V6_vS32Ub_npred_pi,                // llvm.hexagon.V6.vS32Ub.npred.pi
    hexagon_V6_vS32Ub_npred_pi_128B,           // llvm.hexagon.V6.vS32Ub.npred.pi.128B
    hexagon_V6_vS32Ub_npred_ppu,               // llvm.hexagon.V6.vS32Ub.npred.ppu
    hexagon_V6_vS32Ub_npred_ppu_128B,          // llvm.hexagon.V6.vS32Ub.npred.ppu.128B
    hexagon_V6_vS32Ub_pred_ai,                 // llvm.hexagon.V6.vS32Ub.pred.ai
    hexagon_V6_vS32Ub_pred_ai_128B,            // llvm.hexagon.V6.vS32Ub.pred.ai.128B
    hexagon_V6_vS32Ub_pred_pi,                 // llvm.hexagon.V6.vS32Ub.pred.pi
    hexagon_V6_vS32Ub_pred_pi_128B,            // llvm.hexagon.V6.vS32Ub.pred.pi.128B
    hexagon_V6_vS32Ub_pred_ppu,                // llvm.hexagon.V6.vS32Ub.pred.ppu
    hexagon_V6_vS32Ub_pred_ppu_128B,           // llvm.hexagon.V6.vS32Ub.pred.ppu.128B
    hexagon_V6_vS32b_npred_ai,                 // llvm.hexagon.V6.vS32b.npred.ai
    hexagon_V6_vS32b_npred_ai_128B,            // llvm.hexagon.V6.vS32b.npred.ai.128B
    hexagon_V6_vS32b_npred_pi,                 // llvm.hexagon.V6.vS32b.npred.pi
    hexagon_V6_vS32b_npred_pi_128B,            // llvm.hexagon.V6.vS32b.npred.pi.128B
    hexagon_V6_vS32b_npred_ppu,                // llvm.hexagon.V6.vS32b.npred.ppu
    hexagon_V6_vS32b_npred_ppu_128B,           // llvm.hexagon.V6.vS32b.npred.ppu.128B
    hexagon_V6_vS32b_nqpred_ai,                // llvm.hexagon.V6.vS32b.nqpred.ai
    hexagon_V6_vS32b_nqpred_ai_128B,           // llvm.hexagon.V6.vS32b.nqpred.ai.128B
    hexagon_V6_vS32b_nt_npred_ai,              // llvm.hexagon.V6.vS32b.nt.npred.ai
    hexagon_V6_vS32b_nt_npred_ai_128B,         // llvm.hexagon.V6.vS32b.nt.npred.ai.128B
    hexagon_V6_vS32b_nt_npred_pi,              // llvm.hexagon.V6.vS32b.nt.npred.pi
    hexagon_V6_vS32b_nt_npred_pi_128B,         // llvm.hexagon.V6.vS32b.nt.npred.pi.128B
    hexagon_V6_vS32b_nt_npred_ppu,             // llvm.hexagon.V6.vS32b.nt.npred.ppu
    hexagon_V6_vS32b_nt_npred_ppu_128B,        // llvm.hexagon.V6.vS32b.nt.npred.ppu.128B
    hexagon_V6_vS32b_nt_nqpred_ai,             // llvm.hexagon.V6.vS32b.nt.nqpred.ai
    hexagon_V6_vS32b_nt_nqpred_ai_128B,        // llvm.hexagon.V6.vS32b.nt.nqpred.ai.128B
    hexagon_V6_vS32b_nt_pred_ai,               // llvm.hexagon.V6.vS32b.nt.pred.ai
    hexagon_V6_vS32b_nt_pred_ai_128B,          // llvm.hexagon.V6.vS32b.nt.pred.ai.128B
    hexagon_V6_vS32b_nt_pred_pi,               // llvm.hexagon.V6.vS32b.nt.pred.pi
    hexagon_V6_vS32b_nt_pred_pi_128B,          // llvm.hexagon.V6.vS32b.nt.pred.pi.128B
    hexagon_V6_vS32b_nt_pred_ppu,              // llvm.hexagon.V6.vS32b.nt.pred.ppu
    hexagon_V6_vS32b_nt_pred_ppu_128B,         // llvm.hexagon.V6.vS32b.nt.pred.ppu.128B
    hexagon_V6_vS32b_nt_qpred_ai,              // llvm.hexagon.V6.vS32b.nt.qpred.ai
    hexagon_V6_vS32b_nt_qpred_ai_128B,         // llvm.hexagon.V6.vS32b.nt.qpred.ai.128B
    hexagon_V6_vS32b_pred_ai,                  // llvm.hexagon.V6.vS32b.pred.ai
    hexagon_V6_vS32b_pred_ai_128B,             // llvm.hexagon.V6.vS32b.pred.ai.128B
    hexagon_V6_vS32b_pred_pi,                  // llvm.hexagon.V6.vS32b.pred.pi
    hexagon_V6_vS32b_pred_pi_128B,             // llvm.hexagon.V6.vS32b.pred.pi.128B
    hexagon_V6_vS32b_pred_ppu,                 // llvm.hexagon.V6.vS32b.pred.ppu
    hexagon_V6_vS32b_pred_ppu_128B,            // llvm.hexagon.V6.vS32b.pred.ppu.128B
    hexagon_V6_vS32b_qpred_ai,                 // llvm.hexagon.V6.vS32b.qpred.ai
    hexagon_V6_vS32b_qpred_ai_128B,            // llvm.hexagon.V6.vS32b.qpred.ai.128B
    hexagon_V6_vabs_hf,                        // llvm.hexagon.V6.vabs.hf
    hexagon_V6_vabs_hf_128B,                   // llvm.hexagon.V6.vabs.hf.128B
    hexagon_V6_vabs_sf,                        // llvm.hexagon.V6.vabs.sf
    hexagon_V6_vabs_sf_128B,                   // llvm.hexagon.V6.vabs.sf.128B
    hexagon_V6_vabsb,                          // llvm.hexagon.V6.vabsb
    hexagon_V6_vabsb_128B,                     // llvm.hexagon.V6.vabsb.128B
    hexagon_V6_vabsb_sat,                      // llvm.hexagon.V6.vabsb.sat
    hexagon_V6_vabsb_sat_128B,                 // llvm.hexagon.V6.vabsb.sat.128B
    hexagon_V6_vabsdiffh,                      // llvm.hexagon.V6.vabsdiffh
    hexagon_V6_vabsdiffh_128B,                 // llvm.hexagon.V6.vabsdiffh.128B
    hexagon_V6_vabsdiffub,                     // llvm.hexagon.V6.vabsdiffub
    hexagon_V6_vabsdiffub_128B,                // llvm.hexagon.V6.vabsdiffub.128B
    hexagon_V6_vabsdiffuh,                     // llvm.hexagon.V6.vabsdiffuh
    hexagon_V6_vabsdiffuh_128B,                // llvm.hexagon.V6.vabsdiffuh.128B
    hexagon_V6_vabsdiffw,                      // llvm.hexagon.V6.vabsdiffw
    hexagon_V6_vabsdiffw_128B,                 // llvm.hexagon.V6.vabsdiffw.128B
    hexagon_V6_vabsh,                          // llvm.hexagon.V6.vabsh
    hexagon_V6_vabsh_128B,                     // llvm.hexagon.V6.vabsh.128B
    hexagon_V6_vabsh_sat,                      // llvm.hexagon.V6.vabsh.sat
    hexagon_V6_vabsh_sat_128B,                 // llvm.hexagon.V6.vabsh.sat.128B
    hexagon_V6_vabsw,                          // llvm.hexagon.V6.vabsw
    hexagon_V6_vabsw_128B,                     // llvm.hexagon.V6.vabsw.128B
    hexagon_V6_vabsw_sat,                      // llvm.hexagon.V6.vabsw.sat
    hexagon_V6_vabsw_sat_128B,                 // llvm.hexagon.V6.vabsw.sat.128B
    hexagon_V6_vadd_hf,                        // llvm.hexagon.V6.vadd.hf
    hexagon_V6_vadd_hf_128B,                   // llvm.hexagon.V6.vadd.hf.128B
    hexagon_V6_vadd_hf_hf,                     // llvm.hexagon.V6.vadd.hf.hf
    hexagon_V6_vadd_hf_hf_128B,                // llvm.hexagon.V6.vadd.hf.hf.128B
    hexagon_V6_vadd_qf16,                      // llvm.hexagon.V6.vadd.qf16
    hexagon_V6_vadd_qf16_128B,                 // llvm.hexagon.V6.vadd.qf16.128B
    hexagon_V6_vadd_qf16_mix,                  // llvm.hexagon.V6.vadd.qf16.mix
    hexagon_V6_vadd_qf16_mix_128B,             // llvm.hexagon.V6.vadd.qf16.mix.128B
    hexagon_V6_vadd_qf32,                      // llvm.hexagon.V6.vadd.qf32
    hexagon_V6_vadd_qf32_128B,                 // llvm.hexagon.V6.vadd.qf32.128B
    hexagon_V6_vadd_qf32_mix,                  // llvm.hexagon.V6.vadd.qf32.mix
    hexagon_V6_vadd_qf32_mix_128B,             // llvm.hexagon.V6.vadd.qf32.mix.128B
    hexagon_V6_vadd_sf,                        // llvm.hexagon.V6.vadd.sf
    hexagon_V6_vadd_sf_128B,                   // llvm.hexagon.V6.vadd.sf.128B
    hexagon_V6_vadd_sf_bf,                     // llvm.hexagon.V6.vadd.sf.bf
    hexagon_V6_vadd_sf_bf_128B,                // llvm.hexagon.V6.vadd.sf.bf.128B
    hexagon_V6_vadd_sf_hf,                     // llvm.hexagon.V6.vadd.sf.hf
    hexagon_V6_vadd_sf_hf_128B,                // llvm.hexagon.V6.vadd.sf.hf.128B
    hexagon_V6_vadd_sf_sf,                     // llvm.hexagon.V6.vadd.sf.sf
    hexagon_V6_vadd_sf_sf_128B,                // llvm.hexagon.V6.vadd.sf.sf.128B
    hexagon_V6_vaddb,                          // llvm.hexagon.V6.vaddb
    hexagon_V6_vaddb_128B,                     // llvm.hexagon.V6.vaddb.128B
    hexagon_V6_vaddb_dv,                       // llvm.hexagon.V6.vaddb.dv
    hexagon_V6_vaddb_dv_128B,                  // llvm.hexagon.V6.vaddb.dv.128B
    hexagon_V6_vaddbnq,                        // llvm.hexagon.V6.vaddbnq
    hexagon_V6_vaddbnq_128B,                   // llvm.hexagon.V6.vaddbnq.128B
    hexagon_V6_vaddbq,                         // llvm.hexagon.V6.vaddbq
    hexagon_V6_vaddbq_128B,                    // llvm.hexagon.V6.vaddbq.128B
    hexagon_V6_vaddbsat,                       // llvm.hexagon.V6.vaddbsat
    hexagon_V6_vaddbsat_128B,                  // llvm.hexagon.V6.vaddbsat.128B
    hexagon_V6_vaddbsat_dv,                    // llvm.hexagon.V6.vaddbsat.dv
    hexagon_V6_vaddbsat_dv_128B,               // llvm.hexagon.V6.vaddbsat.dv.128B
    hexagon_V6_vaddcarry,                      // llvm.hexagon.V6.vaddcarry
    hexagon_V6_vaddcarry_128B,                 // llvm.hexagon.V6.vaddcarry.128B
    hexagon_V6_vaddcarryo,                     // llvm.hexagon.V6.vaddcarryo
    hexagon_V6_vaddcarryo_128B,                // llvm.hexagon.V6.vaddcarryo.128B
    hexagon_V6_vaddcarrysat,                   // llvm.hexagon.V6.vaddcarrysat
    hexagon_V6_vaddcarrysat_128B,              // llvm.hexagon.V6.vaddcarrysat.128B
    hexagon_V6_vaddclbh,                       // llvm.hexagon.V6.vaddclbh
    hexagon_V6_vaddclbh_128B,                  // llvm.hexagon.V6.vaddclbh.128B
    hexagon_V6_vaddclbw,                       // llvm.hexagon.V6.vaddclbw
    hexagon_V6_vaddclbw_128B,                  // llvm.hexagon.V6.vaddclbw.128B
    hexagon_V6_vaddh,                          // llvm.hexagon.V6.vaddh
    hexagon_V6_vaddh_128B,                     // llvm.hexagon.V6.vaddh.128B
    hexagon_V6_vaddh_dv,                       // llvm.hexagon.V6.vaddh.dv
    hexagon_V6_vaddh_dv_128B,                  // llvm.hexagon.V6.vaddh.dv.128B
    hexagon_V6_vaddhnq,                        // llvm.hexagon.V6.vaddhnq
    hexagon_V6_vaddhnq_128B,                   // llvm.hexagon.V6.vaddhnq.128B
    hexagon_V6_vaddhq,                         // llvm.hexagon.V6.vaddhq
    hexagon_V6_vaddhq_128B,                    // llvm.hexagon.V6.vaddhq.128B
    hexagon_V6_vaddhsat,                       // llvm.hexagon.V6.vaddhsat
    hexagon_V6_vaddhsat_128B,                  // llvm.hexagon.V6.vaddhsat.128B
    hexagon_V6_vaddhsat_dv,                    // llvm.hexagon.V6.vaddhsat.dv
    hexagon_V6_vaddhsat_dv_128B,               // llvm.hexagon.V6.vaddhsat.dv.128B
    hexagon_V6_vaddhw,                         // llvm.hexagon.V6.vaddhw
    hexagon_V6_vaddhw_128B,                    // llvm.hexagon.V6.vaddhw.128B
    hexagon_V6_vaddhw_acc,                     // llvm.hexagon.V6.vaddhw.acc
    hexagon_V6_vaddhw_acc_128B,                // llvm.hexagon.V6.vaddhw.acc.128B
    hexagon_V6_vaddubh,                        // llvm.hexagon.V6.vaddubh
    hexagon_V6_vaddubh_128B,                   // llvm.hexagon.V6.vaddubh.128B
    hexagon_V6_vaddubh_acc,                    // llvm.hexagon.V6.vaddubh.acc
    hexagon_V6_vaddubh_acc_128B,               // llvm.hexagon.V6.vaddubh.acc.128B
    hexagon_V6_vaddubsat,                      // llvm.hexagon.V6.vaddubsat
    hexagon_V6_vaddubsat_128B,                 // llvm.hexagon.V6.vaddubsat.128B
    hexagon_V6_vaddubsat_dv,                   // llvm.hexagon.V6.vaddubsat.dv
    hexagon_V6_vaddubsat_dv_128B,              // llvm.hexagon.V6.vaddubsat.dv.128B
    hexagon_V6_vaddububb_sat,                  // llvm.hexagon.V6.vaddububb.sat
    hexagon_V6_vaddububb_sat_128B,             // llvm.hexagon.V6.vaddububb.sat.128B
    hexagon_V6_vadduhsat,                      // llvm.hexagon.V6.vadduhsat
    hexagon_V6_vadduhsat_128B,                 // llvm.hexagon.V6.vadduhsat.128B
    hexagon_V6_vadduhsat_dv,                   // llvm.hexagon.V6.vadduhsat.dv
    hexagon_V6_vadduhsat_dv_128B,              // llvm.hexagon.V6.vadduhsat.dv.128B
    hexagon_V6_vadduhw,                        // llvm.hexagon.V6.vadduhw
    hexagon_V6_vadduhw_128B,                   // llvm.hexagon.V6.vadduhw.128B
    hexagon_V6_vadduhw_acc,                    // llvm.hexagon.V6.vadduhw.acc
    hexagon_V6_vadduhw_acc_128B,               // llvm.hexagon.V6.vadduhw.acc.128B
    hexagon_V6_vadduwsat,                      // llvm.hexagon.V6.vadduwsat
    hexagon_V6_vadduwsat_128B,                 // llvm.hexagon.V6.vadduwsat.128B
    hexagon_V6_vadduwsat_dv,                   // llvm.hexagon.V6.vadduwsat.dv
    hexagon_V6_vadduwsat_dv_128B,              // llvm.hexagon.V6.vadduwsat.dv.128B
    hexagon_V6_vaddw,                          // llvm.hexagon.V6.vaddw
    hexagon_V6_vaddw_128B,                     // llvm.hexagon.V6.vaddw.128B
    hexagon_V6_vaddw_dv,                       // llvm.hexagon.V6.vaddw.dv
    hexagon_V6_vaddw_dv_128B,                  // llvm.hexagon.V6.vaddw.dv.128B
    hexagon_V6_vaddwnq,                        // llvm.hexagon.V6.vaddwnq
    hexagon_V6_vaddwnq_128B,                   // llvm.hexagon.V6.vaddwnq.128B
    hexagon_V6_vaddwq,                         // llvm.hexagon.V6.vaddwq
    hexagon_V6_vaddwq_128B,                    // llvm.hexagon.V6.vaddwq.128B
    hexagon_V6_vaddwsat,                       // llvm.hexagon.V6.vaddwsat
    hexagon_V6_vaddwsat_128B,                  // llvm.hexagon.V6.vaddwsat.128B
    hexagon_V6_vaddwsat_dv,                    // llvm.hexagon.V6.vaddwsat.dv
    hexagon_V6_vaddwsat_dv_128B,               // llvm.hexagon.V6.vaddwsat.dv.128B
    hexagon_V6_valignb,                        // llvm.hexagon.V6.valignb
    hexagon_V6_valignb_128B,                   // llvm.hexagon.V6.valignb.128B
    hexagon_V6_valignbi,                       // llvm.hexagon.V6.valignbi
    hexagon_V6_valignbi_128B,                  // llvm.hexagon.V6.valignbi.128B
    hexagon_V6_vand,                           // llvm.hexagon.V6.vand
    hexagon_V6_vand_128B,                      // llvm.hexagon.V6.vand.128B
    hexagon_V6_vandnqrt,                       // llvm.hexagon.V6.vandnqrt
    hexagon_V6_vandnqrt_128B,                  // llvm.hexagon.V6.vandnqrt.128B
    hexagon_V6_vandnqrt_acc,                   // llvm.hexagon.V6.vandnqrt.acc
    hexagon_V6_vandnqrt_acc_128B,              // llvm.hexagon.V6.vandnqrt.acc.128B
    hexagon_V6_vandqrt,                        // llvm.hexagon.V6.vandqrt
    hexagon_V6_vandqrt_128B,                   // llvm.hexagon.V6.vandqrt.128B
    hexagon_V6_vandqrt_acc,                    // llvm.hexagon.V6.vandqrt.acc
    hexagon_V6_vandqrt_acc_128B,               // llvm.hexagon.V6.vandqrt.acc.128B
    hexagon_V6_vandvnqv,                       // llvm.hexagon.V6.vandvnqv
    hexagon_V6_vandvnqv_128B,                  // llvm.hexagon.V6.vandvnqv.128B
    hexagon_V6_vandvqv,                        // llvm.hexagon.V6.vandvqv
    hexagon_V6_vandvqv_128B,                   // llvm.hexagon.V6.vandvqv.128B
    hexagon_V6_vandvrt,                        // llvm.hexagon.V6.vandvrt
    hexagon_V6_vandvrt_128B,                   // llvm.hexagon.V6.vandvrt.128B
    hexagon_V6_vandvrt_acc,                    // llvm.hexagon.V6.vandvrt.acc
    hexagon_V6_vandvrt_acc_128B,               // llvm.hexagon.V6.vandvrt.acc.128B
    hexagon_V6_vaslh,                          // llvm.hexagon.V6.vaslh
    hexagon_V6_vaslh_128B,                     // llvm.hexagon.V6.vaslh.128B
    hexagon_V6_vaslh_acc,                      // llvm.hexagon.V6.vaslh.acc
    hexagon_V6_vaslh_acc_128B,                 // llvm.hexagon.V6.vaslh.acc.128B
    hexagon_V6_vaslhv,                         // llvm.hexagon.V6.vaslhv
    hexagon_V6_vaslhv_128B,                    // llvm.hexagon.V6.vaslhv.128B
    hexagon_V6_vaslw,                          // llvm.hexagon.V6.vaslw
    hexagon_V6_vaslw_128B,                     // llvm.hexagon.V6.vaslw.128B
    hexagon_V6_vaslw_acc,                      // llvm.hexagon.V6.vaslw.acc
    hexagon_V6_vaslw_acc_128B,                 // llvm.hexagon.V6.vaslw.acc.128B
    hexagon_V6_vaslwv,                         // llvm.hexagon.V6.vaslwv
    hexagon_V6_vaslwv_128B,                    // llvm.hexagon.V6.vaslwv.128B
    hexagon_V6_vasr_into,                      // llvm.hexagon.V6.vasr.into
    hexagon_V6_vasr_into_128B,                 // llvm.hexagon.V6.vasr.into.128B
    hexagon_V6_vasrh,                          // llvm.hexagon.V6.vasrh
    hexagon_V6_vasrh_128B,                     // llvm.hexagon.V6.vasrh.128B
    hexagon_V6_vasrh_acc,                      // llvm.hexagon.V6.vasrh.acc
    hexagon_V6_vasrh_acc_128B,                 // llvm.hexagon.V6.vasrh.acc.128B
    hexagon_V6_vasrhbrndsat,                   // llvm.hexagon.V6.vasrhbrndsat
    hexagon_V6_vasrhbrndsat_128B,              // llvm.hexagon.V6.vasrhbrndsat.128B
    hexagon_V6_vasrhbsat,                      // llvm.hexagon.V6.vasrhbsat
    hexagon_V6_vasrhbsat_128B,                 // llvm.hexagon.V6.vasrhbsat.128B
    hexagon_V6_vasrhubrndsat,                  // llvm.hexagon.V6.vasrhubrndsat
    hexagon_V6_vasrhubrndsat_128B,             // llvm.hexagon.V6.vasrhubrndsat.128B
    hexagon_V6_vasrhubsat,                     // llvm.hexagon.V6.vasrhubsat
    hexagon_V6_vasrhubsat_128B,                // llvm.hexagon.V6.vasrhubsat.128B
    hexagon_V6_vasrhv,                         // llvm.hexagon.V6.vasrhv
    hexagon_V6_vasrhv_128B,                    // llvm.hexagon.V6.vasrhv.128B
    hexagon_V6_vasruhubrndsat,                 // llvm.hexagon.V6.vasruhubrndsat
    hexagon_V6_vasruhubrndsat_128B,            // llvm.hexagon.V6.vasruhubrndsat.128B
    hexagon_V6_vasruhubsat,                    // llvm.hexagon.V6.vasruhubsat
    hexagon_V6_vasruhubsat_128B,               // llvm.hexagon.V6.vasruhubsat.128B
    hexagon_V6_vasruwuhrndsat,                 // llvm.hexagon.V6.vasruwuhrndsat
    hexagon_V6_vasruwuhrndsat_128B,            // llvm.hexagon.V6.vasruwuhrndsat.128B
    hexagon_V6_vasruwuhsat,                    // llvm.hexagon.V6.vasruwuhsat
    hexagon_V6_vasruwuhsat_128B,               // llvm.hexagon.V6.vasruwuhsat.128B
    hexagon_V6_vasrvuhubrndsat,                // llvm.hexagon.V6.vasrvuhubrndsat
    hexagon_V6_vasrvuhubrndsat_128B,           // llvm.hexagon.V6.vasrvuhubrndsat.128B
    hexagon_V6_vasrvuhubsat,                   // llvm.hexagon.V6.vasrvuhubsat
    hexagon_V6_vasrvuhubsat_128B,              // llvm.hexagon.V6.vasrvuhubsat.128B
    hexagon_V6_vasrvwuhrndsat,                 // llvm.hexagon.V6.vasrvwuhrndsat
    hexagon_V6_vasrvwuhrndsat_128B,            // llvm.hexagon.V6.vasrvwuhrndsat.128B
    hexagon_V6_vasrvwuhsat,                    // llvm.hexagon.V6.vasrvwuhsat
    hexagon_V6_vasrvwuhsat_128B,               // llvm.hexagon.V6.vasrvwuhsat.128B
    hexagon_V6_vasrw,                          // llvm.hexagon.V6.vasrw
    hexagon_V6_vasrw_128B,                     // llvm.hexagon.V6.vasrw.128B
    hexagon_V6_vasrw_acc,                      // llvm.hexagon.V6.vasrw.acc
    hexagon_V6_vasrw_acc_128B,                 // llvm.hexagon.V6.vasrw.acc.128B
    hexagon_V6_vasrwh,                         // llvm.hexagon.V6.vasrwh
    hexagon_V6_vasrwh_128B,                    // llvm.hexagon.V6.vasrwh.128B
    hexagon_V6_vasrwhrndsat,                   // llvm.hexagon.V6.vasrwhrndsat
    hexagon_V6_vasrwhrndsat_128B,              // llvm.hexagon.V6.vasrwhrndsat.128B
    hexagon_V6_vasrwhsat,                      // llvm.hexagon.V6.vasrwhsat
    hexagon_V6_vasrwhsat_128B,                 // llvm.hexagon.V6.vasrwhsat.128B
    hexagon_V6_vasrwuhrndsat,                  // llvm.hexagon.V6.vasrwuhrndsat
    hexagon_V6_vasrwuhrndsat_128B,             // llvm.hexagon.V6.vasrwuhrndsat.128B
    hexagon_V6_vasrwuhsat,                     // llvm.hexagon.V6.vasrwuhsat
    hexagon_V6_vasrwuhsat_128B,                // llvm.hexagon.V6.vasrwuhsat.128B
    hexagon_V6_vasrwv,                         // llvm.hexagon.V6.vasrwv
    hexagon_V6_vasrwv_128B,                    // llvm.hexagon.V6.vasrwv.128B
    hexagon_V6_vassign,                        // llvm.hexagon.V6.vassign
    hexagon_V6_vassign_128B,                   // llvm.hexagon.V6.vassign.128B
    hexagon_V6_vassign_fp,                     // llvm.hexagon.V6.vassign.fp
    hexagon_V6_vassign_fp_128B,                // llvm.hexagon.V6.vassign.fp.128B
    hexagon_V6_vassignp,                       // llvm.hexagon.V6.vassignp
    hexagon_V6_vassignp_128B,                  // llvm.hexagon.V6.vassignp.128B
    hexagon_V6_vavgb,                          // llvm.hexagon.V6.vavgb
    hexagon_V6_vavgb_128B,                     // llvm.hexagon.V6.vavgb.128B
    hexagon_V6_vavgbrnd,                       // llvm.hexagon.V6.vavgbrnd
    hexagon_V6_vavgbrnd_128B,                  // llvm.hexagon.V6.vavgbrnd.128B
    hexagon_V6_vavgh,                          // llvm.hexagon.V6.vavgh
    hexagon_V6_vavgh_128B,                     // llvm.hexagon.V6.vavgh.128B
    hexagon_V6_vavghrnd,                       // llvm.hexagon.V6.vavghrnd
    hexagon_V6_vavghrnd_128B,                  // llvm.hexagon.V6.vavghrnd.128B
    hexagon_V6_vavgub,                         // llvm.hexagon.V6.vavgub
    hexagon_V6_vavgub_128B,                    // llvm.hexagon.V6.vavgub.128B
    hexagon_V6_vavgubrnd,                      // llvm.hexagon.V6.vavgubrnd
    hexagon_V6_vavgubrnd_128B,                 // llvm.hexagon.V6.vavgubrnd.128B
    hexagon_V6_vavguh,                         // llvm.hexagon.V6.vavguh
    hexagon_V6_vavguh_128B,                    // llvm.hexagon.V6.vavguh.128B
    hexagon_V6_vavguhrnd,                      // llvm.hexagon.V6.vavguhrnd
    hexagon_V6_vavguhrnd_128B,                 // llvm.hexagon.V6.vavguhrnd.128B
    hexagon_V6_vavguw,                         // llvm.hexagon.V6.vavguw
    hexagon_V6_vavguw_128B,                    // llvm.hexagon.V6.vavguw.128B
    hexagon_V6_vavguwrnd,                      // llvm.hexagon.V6.vavguwrnd
    hexagon_V6_vavguwrnd_128B,                 // llvm.hexagon.V6.vavguwrnd.128B
    hexagon_V6_vavgw,                          // llvm.hexagon.V6.vavgw
    hexagon_V6_vavgw_128B,                     // llvm.hexagon.V6.vavgw.128B
    hexagon_V6_vavgwrnd,                       // llvm.hexagon.V6.vavgwrnd
    hexagon_V6_vavgwrnd_128B,                  // llvm.hexagon.V6.vavgwrnd.128B
    hexagon_V6_vcl0h,                          // llvm.hexagon.V6.vcl0h
    hexagon_V6_vcl0h_128B,                     // llvm.hexagon.V6.vcl0h.128B
    hexagon_V6_vcl0w,                          // llvm.hexagon.V6.vcl0w
    hexagon_V6_vcl0w_128B,                     // llvm.hexagon.V6.vcl0w.128B
    hexagon_V6_vcombine,                       // llvm.hexagon.V6.vcombine
    hexagon_V6_vcombine_128B,                  // llvm.hexagon.V6.vcombine.128B
    hexagon_V6_vconv_h_hf,                     // llvm.hexagon.V6.vconv.h.hf
    hexagon_V6_vconv_h_hf_128B,                // llvm.hexagon.V6.vconv.h.hf.128B
    hexagon_V6_vconv_hf_h,                     // llvm.hexagon.V6.vconv.hf.h
    hexagon_V6_vconv_hf_h_128B,                // llvm.hexagon.V6.vconv.hf.h.128B
    hexagon_V6_vconv_hf_qf16,                  // llvm.hexagon.V6.vconv.hf.qf16
    hexagon_V6_vconv_hf_qf16_128B,             // llvm.hexagon.V6.vconv.hf.qf16.128B
    hexagon_V6_vconv_hf_qf32,                  // llvm.hexagon.V6.vconv.hf.qf32
    hexagon_V6_vconv_hf_qf32_128B,             // llvm.hexagon.V6.vconv.hf.qf32.128B
    hexagon_V6_vconv_sf_qf32,                  // llvm.hexagon.V6.vconv.sf.qf32
    hexagon_V6_vconv_sf_qf32_128B,             // llvm.hexagon.V6.vconv.sf.qf32.128B
    hexagon_V6_vconv_sf_w,                     // llvm.hexagon.V6.vconv.sf.w
    hexagon_V6_vconv_sf_w_128B,                // llvm.hexagon.V6.vconv.sf.w.128B
    hexagon_V6_vconv_w_sf,                     // llvm.hexagon.V6.vconv.w.sf
    hexagon_V6_vconv_w_sf_128B,                // llvm.hexagon.V6.vconv.w.sf.128B
    hexagon_V6_vcvt_b_hf,                      // llvm.hexagon.V6.vcvt.b.hf
    hexagon_V6_vcvt_b_hf_128B,                 // llvm.hexagon.V6.vcvt.b.hf.128B
    hexagon_V6_vcvt_bf_sf,                     // llvm.hexagon.V6.vcvt.bf.sf
    hexagon_V6_vcvt_bf_sf_128B,                // llvm.hexagon.V6.vcvt.bf.sf.128B
    hexagon_V6_vcvt_h_hf,                      // llvm.hexagon.V6.vcvt.h.hf
    hexagon_V6_vcvt_h_hf_128B,                 // llvm.hexagon.V6.vcvt.h.hf.128B
    hexagon_V6_vcvt_hf_b,                      // llvm.hexagon.V6.vcvt.hf.b
    hexagon_V6_vcvt_hf_b_128B,                 // llvm.hexagon.V6.vcvt.hf.b.128B
    hexagon_V6_vcvt_hf_h,                      // llvm.hexagon.V6.vcvt.hf.h
    hexagon_V6_vcvt_hf_h_128B,                 // llvm.hexagon.V6.vcvt.hf.h.128B
    hexagon_V6_vcvt_hf_sf,                     // llvm.hexagon.V6.vcvt.hf.sf
    hexagon_V6_vcvt_hf_sf_128B,                // llvm.hexagon.V6.vcvt.hf.sf.128B
    hexagon_V6_vcvt_hf_ub,                     // llvm.hexagon.V6.vcvt.hf.ub
    hexagon_V6_vcvt_hf_ub_128B,                // llvm.hexagon.V6.vcvt.hf.ub.128B
    hexagon_V6_vcvt_hf_uh,                     // llvm.hexagon.V6.vcvt.hf.uh
    hexagon_V6_vcvt_hf_uh_128B,                // llvm.hexagon.V6.vcvt.hf.uh.128B
    hexagon_V6_vcvt_sf_hf,                     // llvm.hexagon.V6.vcvt.sf.hf
    hexagon_V6_vcvt_sf_hf_128B,                // llvm.hexagon.V6.vcvt.sf.hf.128B
    hexagon_V6_vcvt_ub_hf,                     // llvm.hexagon.V6.vcvt.ub.hf
    hexagon_V6_vcvt_ub_hf_128B,                // llvm.hexagon.V6.vcvt.ub.hf.128B
    hexagon_V6_vcvt_uh_hf,                     // llvm.hexagon.V6.vcvt.uh.hf
    hexagon_V6_vcvt_uh_hf_128B,                // llvm.hexagon.V6.vcvt.uh.hf.128B
    hexagon_V6_vd0,                            // llvm.hexagon.V6.vd0
    hexagon_V6_vd0_128B,                       // llvm.hexagon.V6.vd0.128B
    hexagon_V6_vdd0,                           // llvm.hexagon.V6.vdd0
    hexagon_V6_vdd0_128B,                      // llvm.hexagon.V6.vdd0.128B
    hexagon_V6_vdealb,                         // llvm.hexagon.V6.vdealb
    hexagon_V6_vdealb_128B,                    // llvm.hexagon.V6.vdealb.128B
    hexagon_V6_vdealb4w,                       // llvm.hexagon.V6.vdealb4w
    hexagon_V6_vdealb4w_128B,                  // llvm.hexagon.V6.vdealb4w.128B
    hexagon_V6_vdealh,                         // llvm.hexagon.V6.vdealh
    hexagon_V6_vdealh_128B,                    // llvm.hexagon.V6.vdealh.128B
    hexagon_V6_vdealvdd,                       // llvm.hexagon.V6.vdealvdd
    hexagon_V6_vdealvdd_128B,                  // llvm.hexagon.V6.vdealvdd.128B
    hexagon_V6_vdelta,                         // llvm.hexagon.V6.vdelta
    hexagon_V6_vdelta_128B,                    // llvm.hexagon.V6.vdelta.128B
    hexagon_V6_vdmpy_sf_hf,                    // llvm.hexagon.V6.vdmpy.sf.hf
    hexagon_V6_vdmpy_sf_hf_128B,               // llvm.hexagon.V6.vdmpy.sf.hf.128B
    hexagon_V6_vdmpy_sf_hf_acc,                // llvm.hexagon.V6.vdmpy.sf.hf.acc
    hexagon_V6_vdmpy_sf_hf_acc_128B,           // llvm.hexagon.V6.vdmpy.sf.hf.acc.128B
    hexagon_V6_vdmpybus,                       // llvm.hexagon.V6.vdmpybus
    hexagon_V6_vdmpybus_128B,                  // llvm.hexagon.V6.vdmpybus.128B
    hexagon_V6_vdmpybus_acc,                   // llvm.hexagon.V6.vdmpybus.acc
    hexagon_V6_vdmpybus_acc_128B,              // llvm.hexagon.V6.vdmpybus.acc.128B
    hexagon_V6_vdmpybus_dv,                    // llvm.hexagon.V6.vdmpybus.dv
    hexagon_V6_vdmpybus_dv_128B,               // llvm.hexagon.V6.vdmpybus.dv.128B
    hexagon_V6_vdmpybus_dv_acc,                // llvm.hexagon.V6.vdmpybus.dv.acc
    hexagon_V6_vdmpybus_dv_acc_128B,           // llvm.hexagon.V6.vdmpybus.dv.acc.128B
    hexagon_V6_vdmpyhb,                        // llvm.hexagon.V6.vdmpyhb
    hexagon_V6_vdmpyhb_128B,                   // llvm.hexagon.V6.vdmpyhb.128B
    hexagon_V6_vdmpyhb_acc,                    // llvm.hexagon.V6.vdmpyhb.acc
    hexagon_V6_vdmpyhb_acc_128B,               // llvm.hexagon.V6.vdmpyhb.acc.128B
    hexagon_V6_vdmpyhb_dv,                     // llvm.hexagon.V6.vdmpyhb.dv
    hexagon_V6_vdmpyhb_dv_128B,                // llvm.hexagon.V6.vdmpyhb.dv.128B
    hexagon_V6_vdmpyhb_dv_acc,                 // llvm.hexagon.V6.vdmpyhb.dv.acc
    hexagon_V6_vdmpyhb_dv_acc_128B,            // llvm.hexagon.V6.vdmpyhb.dv.acc.128B
    hexagon_V6_vdmpyhisat,                     // llvm.hexagon.V6.vdmpyhisat
    hexagon_V6_vdmpyhisat_128B,                // llvm.hexagon.V6.vdmpyhisat.128B
    hexagon_V6_vdmpyhisat_acc,                 // llvm.hexagon.V6.vdmpyhisat.acc
    hexagon_V6_vdmpyhisat_acc_128B,            // llvm.hexagon.V6.vdmpyhisat.acc.128B
    hexagon_V6_vdmpyhsat,                      // llvm.hexagon.V6.vdmpyhsat
    hexagon_V6_vdmpyhsat_128B,                 // llvm.hexagon.V6.vdmpyhsat.128B
    hexagon_V6_vdmpyhsat_acc,                  // llvm.hexagon.V6.vdmpyhsat.acc
    hexagon_V6_vdmpyhsat_acc_128B,             // llvm.hexagon.V6.vdmpyhsat.acc.128B
    hexagon_V6_vdmpyhsuisat,                   // llvm.hexagon.V6.vdmpyhsuisat
    hexagon_V6_vdmpyhsuisat_128B,              // llvm.hexagon.V6.vdmpyhsuisat.128B
    hexagon_V6_vdmpyhsuisat_acc,               // llvm.hexagon.V6.vdmpyhsuisat.acc
    hexagon_V6_vdmpyhsuisat_acc_128B,          // llvm.hexagon.V6.vdmpyhsuisat.acc.128B
    hexagon_V6_vdmpyhsusat,                    // llvm.hexagon.V6.vdmpyhsusat
    hexagon_V6_vdmpyhsusat_128B,               // llvm.hexagon.V6.vdmpyhsusat.128B
    hexagon_V6_vdmpyhsusat_acc,                // llvm.hexagon.V6.vdmpyhsusat.acc
    hexagon_V6_vdmpyhsusat_acc_128B,           // llvm.hexagon.V6.vdmpyhsusat.acc.128B
    hexagon_V6_vdmpyhvsat,                     // llvm.hexagon.V6.vdmpyhvsat
    hexagon_V6_vdmpyhvsat_128B,                // llvm.hexagon.V6.vdmpyhvsat.128B
    hexagon_V6_vdmpyhvsat_acc,                 // llvm.hexagon.V6.vdmpyhvsat.acc
    hexagon_V6_vdmpyhvsat_acc_128B,            // llvm.hexagon.V6.vdmpyhvsat.acc.128B
    hexagon_V6_vdsaduh,                        // llvm.hexagon.V6.vdsaduh
    hexagon_V6_vdsaduh_128B,                   // llvm.hexagon.V6.vdsaduh.128B
    hexagon_V6_vdsaduh_acc,                    // llvm.hexagon.V6.vdsaduh.acc
    hexagon_V6_vdsaduh_acc_128B,               // llvm.hexagon.V6.vdsaduh.acc.128B
    hexagon_V6_veqb,                           // llvm.hexagon.V6.veqb
    hexagon_V6_veqb_128B,                      // llvm.hexagon.V6.veqb.128B
    hexagon_V6_veqb_and,                       // llvm.hexagon.V6.veqb.and
    hexagon_V6_veqb_and_128B,                  // llvm.hexagon.V6.veqb.and.128B
    hexagon_V6_veqb_or,                        // llvm.hexagon.V6.veqb.or
    hexagon_V6_veqb_or_128B,                   // llvm.hexagon.V6.veqb.or.128B
    hexagon_V6_veqb_xor,                       // llvm.hexagon.V6.veqb.xor
    hexagon_V6_veqb_xor_128B,                  // llvm.hexagon.V6.veqb.xor.128B
    hexagon_V6_veqh,                           // llvm.hexagon.V6.veqh
    hexagon_V6_veqh_128B,                      // llvm.hexagon.V6.veqh.128B
    hexagon_V6_veqh_and,                       // llvm.hexagon.V6.veqh.and
    hexagon_V6_veqh_and_128B,                  // llvm.hexagon.V6.veqh.and.128B
    hexagon_V6_veqh_or,                        // llvm.hexagon.V6.veqh.or
    hexagon_V6_veqh_or_128B,                   // llvm.hexagon.V6.veqh.or.128B
    hexagon_V6_veqh_xor,                       // llvm.hexagon.V6.veqh.xor
    hexagon_V6_veqh_xor_128B,                  // llvm.hexagon.V6.veqh.xor.128B
    hexagon_V6_veqw,                           // llvm.hexagon.V6.veqw
    hexagon_V6_veqw_128B,                      // llvm.hexagon.V6.veqw.128B
    hexagon_V6_veqw_and,                       // llvm.hexagon.V6.veqw.and
    hexagon_V6_veqw_and_128B,                  // llvm.hexagon.V6.veqw.and.128B
    hexagon_V6_veqw_or,                        // llvm.hexagon.V6.veqw.or
    hexagon_V6_veqw_or_128B,                   // llvm.hexagon.V6.veqw.or.128B
    hexagon_V6_veqw_xor,                       // llvm.hexagon.V6.veqw.xor
    hexagon_V6_veqw_xor_128B,                  // llvm.hexagon.V6.veqw.xor.128B
    hexagon_V6_vfmax_hf,                       // llvm.hexagon.V6.vfmax.hf
    hexagon_V6_vfmax_hf_128B,                  // llvm.hexagon.V6.vfmax.hf.128B
    hexagon_V6_vfmax_sf,                       // llvm.hexagon.V6.vfmax.sf
    hexagon_V6_vfmax_sf_128B,                  // llvm.hexagon.V6.vfmax.sf.128B
    hexagon_V6_vfmin_hf,                       // llvm.hexagon.V6.vfmin.hf
    hexagon_V6_vfmin_hf_128B,                  // llvm.hexagon.V6.vfmin.hf.128B
    hexagon_V6_vfmin_sf,                       // llvm.hexagon.V6.vfmin.sf
    hexagon_V6_vfmin_sf_128B,                  // llvm.hexagon.V6.vfmin.sf.128B
    hexagon_V6_vfneg_hf,                       // llvm.hexagon.V6.vfneg.hf
    hexagon_V6_vfneg_hf_128B,                  // llvm.hexagon.V6.vfneg.hf.128B
    hexagon_V6_vfneg_sf,                       // llvm.hexagon.V6.vfneg.sf
    hexagon_V6_vfneg_sf_128B,                  // llvm.hexagon.V6.vfneg.sf.128B
    hexagon_V6_vgathermh,                      // llvm.hexagon.V6.vgathermh
    hexagon_V6_vgathermh_128B,                 // llvm.hexagon.V6.vgathermh.128B
    hexagon_V6_vgathermhq,                     // llvm.hexagon.V6.vgathermhq
    hexagon_V6_vgathermhq_128B,                // llvm.hexagon.V6.vgathermhq.128B
    hexagon_V6_vgathermhw,                     // llvm.hexagon.V6.vgathermhw
    hexagon_V6_vgathermhw_128B,                // llvm.hexagon.V6.vgathermhw.128B
    hexagon_V6_vgathermhwq,                    // llvm.hexagon.V6.vgathermhwq
    hexagon_V6_vgathermhwq_128B,               // llvm.hexagon.V6.vgathermhwq.128B
    hexagon_V6_vgathermw,                      // llvm.hexagon.V6.vgathermw
    hexagon_V6_vgathermw_128B,                 // llvm.hexagon.V6.vgathermw.128B
    hexagon_V6_vgathermwq,                     // llvm.hexagon.V6.vgathermwq
    hexagon_V6_vgathermwq_128B,                // llvm.hexagon.V6.vgathermwq.128B
    hexagon_V6_vgtb,                           // llvm.hexagon.V6.vgtb
    hexagon_V6_vgtb_128B,                      // llvm.hexagon.V6.vgtb.128B
    hexagon_V6_vgtb_and,                       // llvm.hexagon.V6.vgtb.and
    hexagon_V6_vgtb_and_128B,                  // llvm.hexagon.V6.vgtb.and.128B
    hexagon_V6_vgtb_or,                        // llvm.hexagon.V6.vgtb.or
    hexagon_V6_vgtb_or_128B,                   // llvm.hexagon.V6.vgtb.or.128B
    hexagon_V6_vgtb_xor,                       // llvm.hexagon.V6.vgtb.xor
    hexagon_V6_vgtb_xor_128B,                  // llvm.hexagon.V6.vgtb.xor.128B
    hexagon_V6_vgtbf,                          // llvm.hexagon.V6.vgtbf
    hexagon_V6_vgtbf_128B,                     // llvm.hexagon.V6.vgtbf.128B
    hexagon_V6_vgtbf_and,                      // llvm.hexagon.V6.vgtbf.and
    hexagon_V6_vgtbf_and_128B,                 // llvm.hexagon.V6.vgtbf.and.128B
    hexagon_V6_vgtbf_or,                       // llvm.hexagon.V6.vgtbf.or
    hexagon_V6_vgtbf_or_128B,                  // llvm.hexagon.V6.vgtbf.or.128B
    hexagon_V6_vgtbf_xor,                      // llvm.hexagon.V6.vgtbf.xor
    hexagon_V6_vgtbf_xor_128B,                 // llvm.hexagon.V6.vgtbf.xor.128B
    hexagon_V6_vgth,                           // llvm.hexagon.V6.vgth
    hexagon_V6_vgth_128B,                      // llvm.hexagon.V6.vgth.128B
    hexagon_V6_vgth_and,                       // llvm.hexagon.V6.vgth.and
    hexagon_V6_vgth_and_128B,                  // llvm.hexagon.V6.vgth.and.128B
    hexagon_V6_vgth_or,                        // llvm.hexagon.V6.vgth.or
    hexagon_V6_vgth_or_128B,                   // llvm.hexagon.V6.vgth.or.128B
    hexagon_V6_vgth_xor,                       // llvm.hexagon.V6.vgth.xor
    hexagon_V6_vgth_xor_128B,                  // llvm.hexagon.V6.vgth.xor.128B
    hexagon_V6_vgthf,                          // llvm.hexagon.V6.vgthf
    hexagon_V6_vgthf_128B,                     // llvm.hexagon.V6.vgthf.128B
    hexagon_V6_vgthf_and,                      // llvm.hexagon.V6.vgthf.and
    hexagon_V6_vgthf_and_128B,                 // llvm.hexagon.V6.vgthf.and.128B
    hexagon_V6_vgthf_or,                       // llvm.hexagon.V6.vgthf.or
    hexagon_V6_vgthf_or_128B,                  // llvm.hexagon.V6.vgthf.or.128B
    hexagon_V6_vgthf_xor,                      // llvm.hexagon.V6.vgthf.xor
    hexagon_V6_vgthf_xor_128B,                 // llvm.hexagon.V6.vgthf.xor.128B
    hexagon_V6_vgtsf,                          // llvm.hexagon.V6.vgtsf
    hexagon_V6_vgtsf_128B,                     // llvm.hexagon.V6.vgtsf.128B
    hexagon_V6_vgtsf_and,                      // llvm.hexagon.V6.vgtsf.and
    hexagon_V6_vgtsf_and_128B,                 // llvm.hexagon.V6.vgtsf.and.128B
    hexagon_V6_vgtsf_or,                       // llvm.hexagon.V6.vgtsf.or
    hexagon_V6_vgtsf_or_128B,                  // llvm.hexagon.V6.vgtsf.or.128B
    hexagon_V6_vgtsf_xor,                      // llvm.hexagon.V6.vgtsf.xor
    hexagon_V6_vgtsf_xor_128B,                 // llvm.hexagon.V6.vgtsf.xor.128B
    hexagon_V6_vgtub,                          // llvm.hexagon.V6.vgtub
    hexagon_V6_vgtub_128B,                     // llvm.hexagon.V6.vgtub.128B
    hexagon_V6_vgtub_and,                      // llvm.hexagon.V6.vgtub.and
    hexagon_V6_vgtub_and_128B,                 // llvm.hexagon.V6.vgtub.and.128B
    hexagon_V6_vgtub_or,                       // llvm.hexagon.V6.vgtub.or
    hexagon_V6_vgtub_or_128B,                  // llvm.hexagon.V6.vgtub.or.128B
    hexagon_V6_vgtub_xor,                      // llvm.hexagon.V6.vgtub.xor
    hexagon_V6_vgtub_xor_128B,                 // llvm.hexagon.V6.vgtub.xor.128B
    hexagon_V6_vgtuh,                          // llvm.hexagon.V6.vgtuh
    hexagon_V6_vgtuh_128B,                     // llvm.hexagon.V6.vgtuh.128B
    hexagon_V6_vgtuh_and,                      // llvm.hexagon.V6.vgtuh.and
    hexagon_V6_vgtuh_and_128B,                 // llvm.hexagon.V6.vgtuh.and.128B
    hexagon_V6_vgtuh_or,                       // llvm.hexagon.V6.vgtuh.or
    hexagon_V6_vgtuh_or_128B,                  // llvm.hexagon.V6.vgtuh.or.128B
    hexagon_V6_vgtuh_xor,                      // llvm.hexagon.V6.vgtuh.xor
    hexagon_V6_vgtuh_xor_128B,                 // llvm.hexagon.V6.vgtuh.xor.128B
    hexagon_V6_vgtuw,                          // llvm.hexagon.V6.vgtuw
    hexagon_V6_vgtuw_128B,                     // llvm.hexagon.V6.vgtuw.128B
    hexagon_V6_vgtuw_and,                      // llvm.hexagon.V6.vgtuw.and
    hexagon_V6_vgtuw_and_128B,                 // llvm.hexagon.V6.vgtuw.and.128B
    hexagon_V6_vgtuw_or,                       // llvm.hexagon.V6.vgtuw.or
    hexagon_V6_vgtuw_or_128B,                  // llvm.hexagon.V6.vgtuw.or.128B
    hexagon_V6_vgtuw_xor,                      // llvm.hexagon.V6.vgtuw.xor
    hexagon_V6_vgtuw_xor_128B,                 // llvm.hexagon.V6.vgtuw.xor.128B
    hexagon_V6_vgtw,                           // llvm.hexagon.V6.vgtw
    hexagon_V6_vgtw_128B,                      // llvm.hexagon.V6.vgtw.128B
    hexagon_V6_vgtw_and,                       // llvm.hexagon.V6.vgtw.and
    hexagon_V6_vgtw_and_128B,                  // llvm.hexagon.V6.vgtw.and.128B
    hexagon_V6_vgtw_or,                        // llvm.hexagon.V6.vgtw.or
    hexagon_V6_vgtw_or_128B,                   // llvm.hexagon.V6.vgtw.or.128B
    hexagon_V6_vgtw_xor,                       // llvm.hexagon.V6.vgtw.xor
    hexagon_V6_vgtw_xor_128B,                  // llvm.hexagon.V6.vgtw.xor.128B
    hexagon_V6_vinsertwr,                      // llvm.hexagon.V6.vinsertwr
    hexagon_V6_vinsertwr_128B,                 // llvm.hexagon.V6.vinsertwr.128B
    hexagon_V6_vlalignb,                       // llvm.hexagon.V6.vlalignb
    hexagon_V6_vlalignb_128B,                  // llvm.hexagon.V6.vlalignb.128B
    hexagon_V6_vlalignbi,                      // llvm.hexagon.V6.vlalignbi
    hexagon_V6_vlalignbi_128B,                 // llvm.hexagon.V6.vlalignbi.128B
    hexagon_V6_vlsrb,                          // llvm.hexagon.V6.vlsrb
    hexagon_V6_vlsrb_128B,                     // llvm.hexagon.V6.vlsrb.128B
    hexagon_V6_vlsrh,                          // llvm.hexagon.V6.vlsrh
    hexagon_V6_vlsrh_128B,                     // llvm.hexagon.V6.vlsrh.128B
    hexagon_V6_vlsrhv,                         // llvm.hexagon.V6.vlsrhv
    hexagon_V6_vlsrhv_128B,                    // llvm.hexagon.V6.vlsrhv.128B
    hexagon_V6_vlsrw,                          // llvm.hexagon.V6.vlsrw
    hexagon_V6_vlsrw_128B,                     // llvm.hexagon.V6.vlsrw.128B
    hexagon_V6_vlsrwv,                         // llvm.hexagon.V6.vlsrwv
    hexagon_V6_vlsrwv_128B,                    // llvm.hexagon.V6.vlsrwv.128B
    hexagon_V6_vlut4,                          // llvm.hexagon.V6.vlut4
    hexagon_V6_vlut4_128B,                     // llvm.hexagon.V6.vlut4.128B
    hexagon_V6_vlutvvb,                        // llvm.hexagon.V6.vlutvvb
    hexagon_V6_vlutvvb_128B,                   // llvm.hexagon.V6.vlutvvb.128B
    hexagon_V6_vlutvvb_nm,                     // llvm.hexagon.V6.vlutvvb.nm
    hexagon_V6_vlutvvb_nm_128B,                // llvm.hexagon.V6.vlutvvb.nm.128B
    hexagon_V6_vlutvvb_oracc,                  // llvm.hexagon.V6.vlutvvb.oracc
    hexagon_V6_vlutvvb_oracc_128B,             // llvm.hexagon.V6.vlutvvb.oracc.128B
    hexagon_V6_vlutvvb_oracci,                 // llvm.hexagon.V6.vlutvvb.oracci
    hexagon_V6_vlutvvb_oracci_128B,            // llvm.hexagon.V6.vlutvvb.oracci.128B
    hexagon_V6_vlutvvbi,                       // llvm.hexagon.V6.vlutvvbi
    hexagon_V6_vlutvvbi_128B,                  // llvm.hexagon.V6.vlutvvbi.128B
    hexagon_V6_vlutvwh,                        // llvm.hexagon.V6.vlutvwh
    hexagon_V6_vlutvwh_128B,                   // llvm.hexagon.V6.vlutvwh.128B
    hexagon_V6_vlutvwh_nm,                     // llvm.hexagon.V6.vlutvwh.nm
    hexagon_V6_vlutvwh_nm_128B,                // llvm.hexagon.V6.vlutvwh.nm.128B
    hexagon_V6_vlutvwh_oracc,                  // llvm.hexagon.V6.vlutvwh.oracc
    hexagon_V6_vlutvwh_oracc_128B,             // llvm.hexagon.V6.vlutvwh.oracc.128B
    hexagon_V6_vlutvwh_oracci,                 // llvm.hexagon.V6.vlutvwh.oracci
    hexagon_V6_vlutvwh_oracci_128B,            // llvm.hexagon.V6.vlutvwh.oracci.128B
    hexagon_V6_vlutvwhi,                       // llvm.hexagon.V6.vlutvwhi
    hexagon_V6_vlutvwhi_128B,                  // llvm.hexagon.V6.vlutvwhi.128B
    hexagon_V6_vmaskedstorenq,                 // llvm.hexagon.V6.vmaskedstorenq
    hexagon_V6_vmaskedstorenq_128B,            // llvm.hexagon.V6.vmaskedstorenq.128B
    hexagon_V6_vmaskedstorentnq,               // llvm.hexagon.V6.vmaskedstorentnq
    hexagon_V6_vmaskedstorentnq_128B,          // llvm.hexagon.V6.vmaskedstorentnq.128B
    hexagon_V6_vmaskedstorentq,                // llvm.hexagon.V6.vmaskedstorentq
    hexagon_V6_vmaskedstorentq_128B,           // llvm.hexagon.V6.vmaskedstorentq.128B
    hexagon_V6_vmaskedstoreq,                  // llvm.hexagon.V6.vmaskedstoreq
    hexagon_V6_vmaskedstoreq_128B,             // llvm.hexagon.V6.vmaskedstoreq.128B
    hexagon_V6_vmax_bf,                        // llvm.hexagon.V6.vmax.bf
    hexagon_V6_vmax_bf_128B,                   // llvm.hexagon.V6.vmax.bf.128B
    hexagon_V6_vmax_hf,                        // llvm.hexagon.V6.vmax.hf
    hexagon_V6_vmax_hf_128B,                   // llvm.hexagon.V6.vmax.hf.128B
    hexagon_V6_vmax_sf,                        // llvm.hexagon.V6.vmax.sf
    hexagon_V6_vmax_sf_128B,                   // llvm.hexagon.V6.vmax.sf.128B
    hexagon_V6_vmaxb,                          // llvm.hexagon.V6.vmaxb
    hexagon_V6_vmaxb_128B,                     // llvm.hexagon.V6.vmaxb.128B
    hexagon_V6_vmaxh,                          // llvm.hexagon.V6.vmaxh
    hexagon_V6_vmaxh_128B,                     // llvm.hexagon.V6.vmaxh.128B
    hexagon_V6_vmaxub,                         // llvm.hexagon.V6.vmaxub
    hexagon_V6_vmaxub_128B,                    // llvm.hexagon.V6.vmaxub.128B
    hexagon_V6_vmaxuh,                         // llvm.hexagon.V6.vmaxuh
    hexagon_V6_vmaxuh_128B,                    // llvm.hexagon.V6.vmaxuh.128B
    hexagon_V6_vmaxw,                          // llvm.hexagon.V6.vmaxw
    hexagon_V6_vmaxw_128B,                     // llvm.hexagon.V6.vmaxw.128B
    hexagon_V6_vmin_bf,                        // llvm.hexagon.V6.vmin.bf
    hexagon_V6_vmin_bf_128B,                   // llvm.hexagon.V6.vmin.bf.128B
    hexagon_V6_vmin_hf,                        // llvm.hexagon.V6.vmin.hf
    hexagon_V6_vmin_hf_128B,                   // llvm.hexagon.V6.vmin.hf.128B
    hexagon_V6_vmin_sf,                        // llvm.hexagon.V6.vmin.sf
    hexagon_V6_vmin_sf_128B,                   // llvm.hexagon.V6.vmin.sf.128B
    hexagon_V6_vminb,                          // llvm.hexagon.V6.vminb
    hexagon_V6_vminb_128B,                     // llvm.hexagon.V6.vminb.128B
    hexagon_V6_vminh,                          // llvm.hexagon.V6.vminh
    hexagon_V6_vminh_128B,                     // llvm.hexagon.V6.vminh.128B
    hexagon_V6_vminub,                         // llvm.hexagon.V6.vminub
    hexagon_V6_vminub_128B,                    // llvm.hexagon.V6.vminub.128B
    hexagon_V6_vminuh,                         // llvm.hexagon.V6.vminuh
    hexagon_V6_vminuh_128B,                    // llvm.hexagon.V6.vminuh.128B
    hexagon_V6_vminw,                          // llvm.hexagon.V6.vminw
    hexagon_V6_vminw_128B,                     // llvm.hexagon.V6.vminw.128B
    hexagon_V6_vmpabus,                        // llvm.hexagon.V6.vmpabus
    hexagon_V6_vmpabus_128B,                   // llvm.hexagon.V6.vmpabus.128B
    hexagon_V6_vmpabus_acc,                    // llvm.hexagon.V6.vmpabus.acc
    hexagon_V6_vmpabus_acc_128B,               // llvm.hexagon.V6.vmpabus.acc.128B
    hexagon_V6_vmpabusv,                       // llvm.hexagon.V6.vmpabusv
    hexagon_V6_vmpabusv_128B,                  // llvm.hexagon.V6.vmpabusv.128B
    hexagon_V6_vmpabuu,                        // llvm.hexagon.V6.vmpabuu
    hexagon_V6_vmpabuu_128B,                   // llvm.hexagon.V6.vmpabuu.128B
    hexagon_V6_vmpabuu_acc,                    // llvm.hexagon.V6.vmpabuu.acc
    hexagon_V6_vmpabuu_acc_128B,               // llvm.hexagon.V6.vmpabuu.acc.128B
    hexagon_V6_vmpabuuv,                       // llvm.hexagon.V6.vmpabuuv
    hexagon_V6_vmpabuuv_128B,                  // llvm.hexagon.V6.vmpabuuv.128B
    hexagon_V6_vmpahb,                         // llvm.hexagon.V6.vmpahb
    hexagon_V6_vmpahb_128B,                    // llvm.hexagon.V6.vmpahb.128B
    hexagon_V6_vmpahb_acc,                     // llvm.hexagon.V6.vmpahb.acc
    hexagon_V6_vmpahb_acc_128B,                // llvm.hexagon.V6.vmpahb.acc.128B
    hexagon_V6_vmpahhsat,                      // llvm.hexagon.V6.vmpahhsat
    hexagon_V6_vmpahhsat_128B,                 // llvm.hexagon.V6.vmpahhsat.128B
    hexagon_V6_vmpauhb,                        // llvm.hexagon.V6.vmpauhb
    hexagon_V6_vmpauhb_128B,                   // llvm.hexagon.V6.vmpauhb.128B
    hexagon_V6_vmpauhb_acc,                    // llvm.hexagon.V6.vmpauhb.acc
    hexagon_V6_vmpauhb_acc_128B,               // llvm.hexagon.V6.vmpauhb.acc.128B
    hexagon_V6_vmpauhuhsat,                    // llvm.hexagon.V6.vmpauhuhsat
    hexagon_V6_vmpauhuhsat_128B,               // llvm.hexagon.V6.vmpauhuhsat.128B
    hexagon_V6_vmpsuhuhsat,                    // llvm.hexagon.V6.vmpsuhuhsat
    hexagon_V6_vmpsuhuhsat_128B,               // llvm.hexagon.V6.vmpsuhuhsat.128B
    hexagon_V6_vmpy_hf_hf,                     // llvm.hexagon.V6.vmpy.hf.hf
    hexagon_V6_vmpy_hf_hf_128B,                // llvm.hexagon.V6.vmpy.hf.hf.128B
    hexagon_V6_vmpy_hf_hf_acc,                 // llvm.hexagon.V6.vmpy.hf.hf.acc
    hexagon_V6_vmpy_hf_hf_acc_128B,            // llvm.hexagon.V6.vmpy.hf.hf.acc.128B
    hexagon_V6_vmpy_qf16,                      // llvm.hexagon.V6.vmpy.qf16
    hexagon_V6_vmpy_qf16_128B,                 // llvm.hexagon.V6.vmpy.qf16.128B
    hexagon_V6_vmpy_qf16_hf,                   // llvm.hexagon.V6.vmpy.qf16.hf
    hexagon_V6_vmpy_qf16_hf_128B,              // llvm.hexagon.V6.vmpy.qf16.hf.128B
    hexagon_V6_vmpy_qf16_mix_hf,               // llvm.hexagon.V6.vmpy.qf16.mix.hf
    hexagon_V6_vmpy_qf16_mix_hf_128B,          // llvm.hexagon.V6.vmpy.qf16.mix.hf.128B
    hexagon_V6_vmpy_qf32,                      // llvm.hexagon.V6.vmpy.qf32
    hexagon_V6_vmpy_qf32_128B,                 // llvm.hexagon.V6.vmpy.qf32.128B
    hexagon_V6_vmpy_qf32_hf,                   // llvm.hexagon.V6.vmpy.qf32.hf
    hexagon_V6_vmpy_qf32_hf_128B,              // llvm.hexagon.V6.vmpy.qf32.hf.128B
    hexagon_V6_vmpy_qf32_mix_hf,               // llvm.hexagon.V6.vmpy.qf32.mix.hf
    hexagon_V6_vmpy_qf32_mix_hf_128B,          // llvm.hexagon.V6.vmpy.qf32.mix.hf.128B
    hexagon_V6_vmpy_qf32_qf16,                 // llvm.hexagon.V6.vmpy.qf32.qf16
    hexagon_V6_vmpy_qf32_qf16_128B,            // llvm.hexagon.V6.vmpy.qf32.qf16.128B
    hexagon_V6_vmpy_qf32_sf,                   // llvm.hexagon.V6.vmpy.qf32.sf
    hexagon_V6_vmpy_qf32_sf_128B,              // llvm.hexagon.V6.vmpy.qf32.sf.128B
    hexagon_V6_vmpy_sf_bf,                     // llvm.hexagon.V6.vmpy.sf.bf
    hexagon_V6_vmpy_sf_bf_128B,                // llvm.hexagon.V6.vmpy.sf.bf.128B
    hexagon_V6_vmpy_sf_bf_acc,                 // llvm.hexagon.V6.vmpy.sf.bf.acc
    hexagon_V6_vmpy_sf_bf_acc_128B,            // llvm.hexagon.V6.vmpy.sf.bf.acc.128B
    hexagon_V6_vmpy_sf_hf,                     // llvm.hexagon.V6.vmpy.sf.hf
    hexagon_V6_vmpy_sf_hf_128B,                // llvm.hexagon.V6.vmpy.sf.hf.128B
    hexagon_V6_vmpy_sf_hf_acc,                 // llvm.hexagon.V6.vmpy.sf.hf.acc
    hexagon_V6_vmpy_sf_hf_acc_128B,            // llvm.hexagon.V6.vmpy.sf.hf.acc.128B
    hexagon_V6_vmpy_sf_sf,                     // llvm.hexagon.V6.vmpy.sf.sf
    hexagon_V6_vmpy_sf_sf_128B,                // llvm.hexagon.V6.vmpy.sf.sf.128B
    hexagon_V6_vmpybus,                        // llvm.hexagon.V6.vmpybus
    hexagon_V6_vmpybus_128B,                   // llvm.hexagon.V6.vmpybus.128B
    hexagon_V6_vmpybus_acc,                    // llvm.hexagon.V6.vmpybus.acc
    hexagon_V6_vmpybus_acc_128B,               // llvm.hexagon.V6.vmpybus.acc.128B
    hexagon_V6_vmpybusv,                       // llvm.hexagon.V6.vmpybusv
    hexagon_V6_vmpybusv_128B,                  // llvm.hexagon.V6.vmpybusv.128B
    hexagon_V6_vmpybusv_acc,                   // llvm.hexagon.V6.vmpybusv.acc
    hexagon_V6_vmpybusv_acc_128B,              // llvm.hexagon.V6.vmpybusv.acc.128B
    hexagon_V6_vmpybv,                         // llvm.hexagon.V6.vmpybv
    hexagon_V6_vmpybv_128B,                    // llvm.hexagon.V6.vmpybv.128B
    hexagon_V6_vmpybv_acc,                     // llvm.hexagon.V6.vmpybv.acc
    hexagon_V6_vmpybv_acc_128B,                // llvm.hexagon.V6.vmpybv.acc.128B
    hexagon_V6_vmpyewuh,                       // llvm.hexagon.V6.vmpyewuh
    hexagon_V6_vmpyewuh_128B,                  // llvm.hexagon.V6.vmpyewuh.128B
    hexagon_V6_vmpyewuh_64,                    // llvm.hexagon.V6.vmpyewuh.64
    hexagon_V6_vmpyewuh_64_128B,               // llvm.hexagon.V6.vmpyewuh.64.128B
    hexagon_V6_vmpyh,                          // llvm.hexagon.V6.vmpyh
    hexagon_V6_vmpyh_128B,                     // llvm.hexagon.V6.vmpyh.128B
    hexagon_V6_vmpyh_acc,                      // llvm.hexagon.V6.vmpyh.acc
    hexagon_V6_vmpyh_acc_128B,                 // llvm.hexagon.V6.vmpyh.acc.128B
    hexagon_V6_vmpyhsat_acc,                   // llvm.hexagon.V6.vmpyhsat.acc
    hexagon_V6_vmpyhsat_acc_128B,              // llvm.hexagon.V6.vmpyhsat.acc.128B
    hexagon_V6_vmpyhsrs,                       // llvm.hexagon.V6.vmpyhsrs
    hexagon_V6_vmpyhsrs_128B,                  // llvm.hexagon.V6.vmpyhsrs.128B
    hexagon_V6_vmpyhss,                        // llvm.hexagon.V6.vmpyhss
    hexagon_V6_vmpyhss_128B,                   // llvm.hexagon.V6.vmpyhss.128B
    hexagon_V6_vmpyhus,                        // llvm.hexagon.V6.vmpyhus
    hexagon_V6_vmpyhus_128B,                   // llvm.hexagon.V6.vmpyhus.128B
    hexagon_V6_vmpyhus_acc,                    // llvm.hexagon.V6.vmpyhus.acc
    hexagon_V6_vmpyhus_acc_128B,               // llvm.hexagon.V6.vmpyhus.acc.128B
    hexagon_V6_vmpyhv,                         // llvm.hexagon.V6.vmpyhv
    hexagon_V6_vmpyhv_128B,                    // llvm.hexagon.V6.vmpyhv.128B
    hexagon_V6_vmpyhv_acc,                     // llvm.hexagon.V6.vmpyhv.acc
    hexagon_V6_vmpyhv_acc_128B,                // llvm.hexagon.V6.vmpyhv.acc.128B
    hexagon_V6_vmpyhvsrs,                      // llvm.hexagon.V6.vmpyhvsrs
    hexagon_V6_vmpyhvsrs_128B,                 // llvm.hexagon.V6.vmpyhvsrs.128B
    hexagon_V6_vmpyieoh,                       // llvm.hexagon.V6.vmpyieoh
    hexagon_V6_vmpyieoh_128B,                  // llvm.hexagon.V6.vmpyieoh.128B
    hexagon_V6_vmpyiewh_acc,                   // llvm.hexagon.V6.vmpyiewh.acc
    hexagon_V6_vmpyiewh_acc_128B,              // llvm.hexagon.V6.vmpyiewh.acc.128B
    hexagon_V6_vmpyiewuh,                      // llvm.hexagon.V6.vmpyiewuh
    hexagon_V6_vmpyiewuh_128B,                 // llvm.hexagon.V6.vmpyiewuh.128B
    hexagon_V6_vmpyiewuh_acc,                  // llvm.hexagon.V6.vmpyiewuh.acc
    hexagon_V6_vmpyiewuh_acc_128B,             // llvm.hexagon.V6.vmpyiewuh.acc.128B
    hexagon_V6_vmpyih,                         // llvm.hexagon.V6.vmpyih
    hexagon_V6_vmpyih_128B,                    // llvm.hexagon.V6.vmpyih.128B
    hexagon_V6_vmpyih_acc,                     // llvm.hexagon.V6.vmpyih.acc
    hexagon_V6_vmpyih_acc_128B,                // llvm.hexagon.V6.vmpyih.acc.128B
    hexagon_V6_vmpyihb,                        // llvm.hexagon.V6.vmpyihb
    hexagon_V6_vmpyihb_128B,                   // llvm.hexagon.V6.vmpyihb.128B
    hexagon_V6_vmpyihb_acc,                    // llvm.hexagon.V6.vmpyihb.acc
    hexagon_V6_vmpyihb_acc_128B,               // llvm.hexagon.V6.vmpyihb.acc.128B
    hexagon_V6_vmpyiowh,                       // llvm.hexagon.V6.vmpyiowh
    hexagon_V6_vmpyiowh_128B,                  // llvm.hexagon.V6.vmpyiowh.128B
    hexagon_V6_vmpyiwb,                        // llvm.hexagon.V6.vmpyiwb
    hexagon_V6_vmpyiwb_128B,                   // llvm.hexagon.V6.vmpyiwb.128B
    hexagon_V6_vmpyiwb_acc,                    // llvm.hexagon.V6.vmpyiwb.acc
    hexagon_V6_vmpyiwb_acc_128B,               // llvm.hexagon.V6.vmpyiwb.acc.128B
    hexagon_V6_vmpyiwh,                        // llvm.hexagon.V6.vmpyiwh
    hexagon_V6_vmpyiwh_128B,                   // llvm.hexagon.V6.vmpyiwh.128B
    hexagon_V6_vmpyiwh_acc,                    // llvm.hexagon.V6.vmpyiwh.acc
    hexagon_V6_vmpyiwh_acc_128B,               // llvm.hexagon.V6.vmpyiwh.acc.128B
    hexagon_V6_vmpyiwub,                       // llvm.hexagon.V6.vmpyiwub
    hexagon_V6_vmpyiwub_128B,                  // llvm.hexagon.V6.vmpyiwub.128B
    hexagon_V6_vmpyiwub_acc,                   // llvm.hexagon.V6.vmpyiwub.acc
    hexagon_V6_vmpyiwub_acc_128B,              // llvm.hexagon.V6.vmpyiwub.acc.128B
    hexagon_V6_vmpyowh,                        // llvm.hexagon.V6.vmpyowh
    hexagon_V6_vmpyowh_128B,                   // llvm.hexagon.V6.vmpyowh.128B
    hexagon_V6_vmpyowh_64_acc,                 // llvm.hexagon.V6.vmpyowh.64.acc
    hexagon_V6_vmpyowh_64_acc_128B,            // llvm.hexagon.V6.vmpyowh.64.acc.128B
    hexagon_V6_vmpyowh_rnd,                    // llvm.hexagon.V6.vmpyowh.rnd
    hexagon_V6_vmpyowh_rnd_128B,               // llvm.hexagon.V6.vmpyowh.rnd.128B
    hexagon_V6_vmpyowh_rnd_sacc,               // llvm.hexagon.V6.vmpyowh.rnd.sacc
    hexagon_V6_vmpyowh_rnd_sacc_128B,          // llvm.hexagon.V6.vmpyowh.rnd.sacc.128B
    hexagon_V6_vmpyowh_sacc,                   // llvm.hexagon.V6.vmpyowh.sacc
    hexagon_V6_vmpyowh_sacc_128B,              // llvm.hexagon.V6.vmpyowh.sacc.128B
    hexagon_V6_vmpyss_parts,                   // llvm.hexagon.V6.vmpyss.parts
    hexagon_V6_vmpyss_parts_128B,              // llvm.hexagon.V6.vmpyss.parts.128B
    hexagon_V6_vmpyub,                         // llvm.hexagon.V6.vmpyub
    hexagon_V6_vmpyub_128B,                    // llvm.hexagon.V6.vmpyub.128B
    hexagon_V6_vmpyub_acc,                     // llvm.hexagon.V6.vmpyub.acc
    hexagon_V6_vmpyub_acc_128B,                // llvm.hexagon.V6.vmpyub.acc.128B
    hexagon_V6_vmpyubv,                        // llvm.hexagon.V6.vmpyubv
    hexagon_V6_vmpyubv_128B,                   // llvm.hexagon.V6.vmpyubv.128B
    hexagon_V6_vmpyubv_acc,                    // llvm.hexagon.V6.vmpyubv.acc
    hexagon_V6_vmpyubv_acc_128B,               // llvm.hexagon.V6.vmpyubv.acc.128B
    hexagon_V6_vmpyuh,                         // llvm.hexagon.V6.vmpyuh
    hexagon_V6_vmpyuh_128B,                    // llvm.hexagon.V6.vmpyuh.128B
    hexagon_V6_vmpyuh_acc,                     // llvm.hexagon.V6.vmpyuh.acc
    hexagon_V6_vmpyuh_acc_128B,                // llvm.hexagon.V6.vmpyuh.acc.128B
    hexagon_V6_vmpyuhe,                        // llvm.hexagon.V6.vmpyuhe
    hexagon_V6_vmpyuhe_128B,                   // llvm.hexagon.V6.vmpyuhe.128B
    hexagon_V6_vmpyuhe_acc,                    // llvm.hexagon.V6.vmpyuhe.acc
    hexagon_V6_vmpyuhe_acc_128B,               // llvm.hexagon.V6.vmpyuhe.acc.128B
    hexagon_V6_vmpyuhv,                        // llvm.hexagon.V6.vmpyuhv
    hexagon_V6_vmpyuhv_128B,                   // llvm.hexagon.V6.vmpyuhv.128B
    hexagon_V6_vmpyuhv_acc,                    // llvm.hexagon.V6.vmpyuhv.acc
    hexagon_V6_vmpyuhv_acc_128B,               // llvm.hexagon.V6.vmpyuhv.acc.128B
    hexagon_V6_vmpyuhvs,                       // llvm.hexagon.V6.vmpyuhvs
    hexagon_V6_vmpyuhvs_128B,                  // llvm.hexagon.V6.vmpyuhvs.128B
    hexagon_V6_vmpyus_parts,                   // llvm.hexagon.V6.vmpyus.parts
    hexagon_V6_vmpyus_parts_128B,              // llvm.hexagon.V6.vmpyus.parts.128B
    hexagon_V6_vmpyuu_parts,                   // llvm.hexagon.V6.vmpyuu.parts
    hexagon_V6_vmpyuu_parts_128B,              // llvm.hexagon.V6.vmpyuu.parts.128B
    hexagon_V6_vmux,                           // llvm.hexagon.V6.vmux
    hexagon_V6_vmux_128B,                      // llvm.hexagon.V6.vmux.128B
    hexagon_V6_vnavgb,                         // llvm.hexagon.V6.vnavgb
    hexagon_V6_vnavgb_128B,                    // llvm.hexagon.V6.vnavgb.128B
    hexagon_V6_vnavgh,                         // llvm.hexagon.V6.vnavgh
    hexagon_V6_vnavgh_128B,                    // llvm.hexagon.V6.vnavgh.128B
    hexagon_V6_vnavgub,                        // llvm.hexagon.V6.vnavgub
    hexagon_V6_vnavgub_128B,                   // llvm.hexagon.V6.vnavgub.128B
    hexagon_V6_vnavgw,                         // llvm.hexagon.V6.vnavgw
    hexagon_V6_vnavgw_128B,                    // llvm.hexagon.V6.vnavgw.128B
    hexagon_V6_vnormamth,                      // llvm.hexagon.V6.vnormamth
    hexagon_V6_vnormamth_128B,                 // llvm.hexagon.V6.vnormamth.128B
    hexagon_V6_vnormamtw,                      // llvm.hexagon.V6.vnormamtw
    hexagon_V6_vnormamtw_128B,                 // llvm.hexagon.V6.vnormamtw.128B
    hexagon_V6_vnot,                           // llvm.hexagon.V6.vnot
    hexagon_V6_vnot_128B,                      // llvm.hexagon.V6.vnot.128B
    hexagon_V6_vor,                            // llvm.hexagon.V6.vor
    hexagon_V6_vor_128B,                       // llvm.hexagon.V6.vor.128B
    hexagon_V6_vpackeb,                        // llvm.hexagon.V6.vpackeb
    hexagon_V6_vpackeb_128B,                   // llvm.hexagon.V6.vpackeb.128B
    hexagon_V6_vpackeh,                        // llvm.hexagon.V6.vpackeh
    hexagon_V6_vpackeh_128B,                   // llvm.hexagon.V6.vpackeh.128B
    hexagon_V6_vpackhb_sat,                    // llvm.hexagon.V6.vpackhb.sat
    hexagon_V6_vpackhb_sat_128B,               // llvm.hexagon.V6.vpackhb.sat.128B
    hexagon_V6_vpackhub_sat,                   // llvm.hexagon.V6.vpackhub.sat
    hexagon_V6_vpackhub_sat_128B,              // llvm.hexagon.V6.vpackhub.sat.128B
    hexagon_V6_vpackob,                        // llvm.hexagon.V6.vpackob
    hexagon_V6_vpackob_128B,                   // llvm.hexagon.V6.vpackob.128B
    hexagon_V6_vpackoh,                        // llvm.hexagon.V6.vpackoh
    hexagon_V6_vpackoh_128B,                   // llvm.hexagon.V6.vpackoh.128B
    hexagon_V6_vpackwh_sat,                    // llvm.hexagon.V6.vpackwh.sat
    hexagon_V6_vpackwh_sat_128B,               // llvm.hexagon.V6.vpackwh.sat.128B
    hexagon_V6_vpackwuh_sat,                   // llvm.hexagon.V6.vpackwuh.sat
    hexagon_V6_vpackwuh_sat_128B,              // llvm.hexagon.V6.vpackwuh.sat.128B
    hexagon_V6_vpopcounth,                     // llvm.hexagon.V6.vpopcounth
    hexagon_V6_vpopcounth_128B,                // llvm.hexagon.V6.vpopcounth.128B
    hexagon_V6_vprefixqb,                      // llvm.hexagon.V6.vprefixqb
    hexagon_V6_vprefixqb_128B,                 // llvm.hexagon.V6.vprefixqb.128B
    hexagon_V6_vprefixqh,                      // llvm.hexagon.V6.vprefixqh
    hexagon_V6_vprefixqh_128B,                 // llvm.hexagon.V6.vprefixqh.128B
    hexagon_V6_vprefixqw,                      // llvm.hexagon.V6.vprefixqw
    hexagon_V6_vprefixqw_128B,                 // llvm.hexagon.V6.vprefixqw.128B
    hexagon_V6_vrdelta,                        // llvm.hexagon.V6.vrdelta
    hexagon_V6_vrdelta_128B,                   // llvm.hexagon.V6.vrdelta.128B
    hexagon_V6_vrmpybub_rtt,                   // llvm.hexagon.V6.vrmpybub.rtt
    hexagon_V6_vrmpybub_rtt_128B,              // llvm.hexagon.V6.vrmpybub.rtt.128B
    hexagon_V6_vrmpybub_rtt_acc,               // llvm.hexagon.V6.vrmpybub.rtt.acc
    hexagon_V6_vrmpybub_rtt_acc_128B,          // llvm.hexagon.V6.vrmpybub.rtt.acc.128B
    hexagon_V6_vrmpybus,                       // llvm.hexagon.V6.vrmpybus
    hexagon_V6_vrmpybus_128B,                  // llvm.hexagon.V6.vrmpybus.128B
    hexagon_V6_vrmpybus_acc,                   // llvm.hexagon.V6.vrmpybus.acc
    hexagon_V6_vrmpybus_acc_128B,              // llvm.hexagon.V6.vrmpybus.acc.128B
    hexagon_V6_vrmpybusi,                      // llvm.hexagon.V6.vrmpybusi
    hexagon_V6_vrmpybusi_128B,                 // llvm.hexagon.V6.vrmpybusi.128B
    hexagon_V6_vrmpybusi_acc,                  // llvm.hexagon.V6.vrmpybusi.acc
    hexagon_V6_vrmpybusi_acc_128B,             // llvm.hexagon.V6.vrmpybusi.acc.128B
    hexagon_V6_vrmpybusv,                      // llvm.hexagon.V6.vrmpybusv
    hexagon_V6_vrmpybusv_128B,                 // llvm.hexagon.V6.vrmpybusv.128B
    hexagon_V6_vrmpybusv_acc,                  // llvm.hexagon.V6.vrmpybusv.acc
    hexagon_V6_vrmpybusv_acc_128B,             // llvm.hexagon.V6.vrmpybusv.acc.128B
    hexagon_V6_vrmpybv,                        // llvm.hexagon.V6.vrmpybv
    hexagon_V6_vrmpybv_128B,                   // llvm.hexagon.V6.vrmpybv.128B
    hexagon_V6_vrmpybv_acc,                    // llvm.hexagon.V6.vrmpybv.acc
    hexagon_V6_vrmpybv_acc_128B,               // llvm.hexagon.V6.vrmpybv.acc.128B
    hexagon_V6_vrmpyub,                        // llvm.hexagon.V6.vrmpyub
    hexagon_V6_vrmpyub_128B,                   // llvm.hexagon.V6.vrmpyub.128B
    hexagon_V6_vrmpyub_acc,                    // llvm.hexagon.V6.vrmpyub.acc
    hexagon_V6_vrmpyub_acc_128B,               // llvm.hexagon.V6.vrmpyub.acc.128B
    hexagon_V6_vrmpyub_rtt,                    // llvm.hexagon.V6.vrmpyub.rtt
    hexagon_V6_vrmpyub_rtt_128B,               // llvm.hexagon.V6.vrmpyub.rtt.128B
    hexagon_V6_vrmpyub_rtt_acc,                // llvm.hexagon.V6.vrmpyub.rtt.acc
    hexagon_V6_vrmpyub_rtt_acc_128B,           // llvm.hexagon.V6.vrmpyub.rtt.acc.128B
    hexagon_V6_vrmpyubi,                       // llvm.hexagon.V6.vrmpyubi
    hexagon_V6_vrmpyubi_128B,                  // llvm.hexagon.V6.vrmpyubi.128B
    hexagon_V6_vrmpyubi_acc,                   // llvm.hexagon.V6.vrmpyubi.acc
    hexagon_V6_vrmpyubi_acc_128B,              // llvm.hexagon.V6.vrmpyubi.acc.128B
    hexagon_V6_vrmpyubv,                       // llvm.hexagon.V6.vrmpyubv
    hexagon_V6_vrmpyubv_128B,                  // llvm.hexagon.V6.vrmpyubv.128B
    hexagon_V6_vrmpyubv_acc,                   // llvm.hexagon.V6.vrmpyubv.acc
    hexagon_V6_vrmpyubv_acc_128B,              // llvm.hexagon.V6.vrmpyubv.acc.128B
    hexagon_V6_vror,                           // llvm.hexagon.V6.vror
    hexagon_V6_vror_128B,                      // llvm.hexagon.V6.vror.128B
    hexagon_V6_vrotr,                          // llvm.hexagon.V6.vrotr
    hexagon_V6_vrotr_128B,                     // llvm.hexagon.V6.vrotr.128B
    hexagon_V6_vroundhb,                       // llvm.hexagon.V6.vroundhb
    hexagon_V6_vroundhb_128B,                  // llvm.hexagon.V6.vroundhb.128B
    hexagon_V6_vroundhub,                      // llvm.hexagon.V6.vroundhub
    hexagon_V6_vroundhub_128B,                 // llvm.hexagon.V6.vroundhub.128B
    hexagon_V6_vrounduhub,                     // llvm.hexagon.V6.vrounduhub
    hexagon_V6_vrounduhub_128B,                // llvm.hexagon.V6.vrounduhub.128B
    hexagon_V6_vrounduwuh,                     // llvm.hexagon.V6.vrounduwuh
    hexagon_V6_vrounduwuh_128B,                // llvm.hexagon.V6.vrounduwuh.128B
    hexagon_V6_vroundwh,                       // llvm.hexagon.V6.vroundwh
    hexagon_V6_vroundwh_128B,                  // llvm.hexagon.V6.vroundwh.128B
    hexagon_V6_vroundwuh,                      // llvm.hexagon.V6.vroundwuh
    hexagon_V6_vroundwuh_128B,                 // llvm.hexagon.V6.vroundwuh.128B
    hexagon_V6_vrsadubi,                       // llvm.hexagon.V6.vrsadubi
    hexagon_V6_vrsadubi_128B,                  // llvm.hexagon.V6.vrsadubi.128B
    hexagon_V6_vrsadubi_acc,                   // llvm.hexagon.V6.vrsadubi.acc
    hexagon_V6_vrsadubi_acc_128B,              // llvm.hexagon.V6.vrsadubi.acc.128B
    hexagon_V6_vsatdw,                         // llvm.hexagon.V6.vsatdw
    hexagon_V6_vsatdw_128B,                    // llvm.hexagon.V6.vsatdw.128B
    hexagon_V6_vsathub,                        // llvm.hexagon.V6.vsathub
    hexagon_V6_vsathub_128B,                   // llvm.hexagon.V6.vsathub.128B
    hexagon_V6_vsatuwuh,                       // llvm.hexagon.V6.vsatuwuh
    hexagon_V6_vsatuwuh_128B,                  // llvm.hexagon.V6.vsatuwuh.128B
    hexagon_V6_vsatwh,                         // llvm.hexagon.V6.vsatwh
    hexagon_V6_vsatwh_128B,                    // llvm.hexagon.V6.vsatwh.128B
    hexagon_V6_vsb,                            // llvm.hexagon.V6.vsb
    hexagon_V6_vsb_128B,                       // llvm.hexagon.V6.vsb.128B
    hexagon_V6_vscattermh,                     // llvm.hexagon.V6.vscattermh
    hexagon_V6_vscattermh_128B,                // llvm.hexagon.V6.vscattermh.128B
    hexagon_V6_vscattermh_add,                 // llvm.hexagon.V6.vscattermh.add
    hexagon_V6_vscattermh_add_128B,            // llvm.hexagon.V6.vscattermh.add.128B
    hexagon_V6_vscattermhq,                    // llvm.hexagon.V6.vscattermhq
    hexagon_V6_vscattermhq_128B,               // llvm.hexagon.V6.vscattermhq.128B
    hexagon_V6_vscattermhw,                    // llvm.hexagon.V6.vscattermhw
    hexagon_V6_vscattermhw_128B,               // llvm.hexagon.V6.vscattermhw.128B
    hexagon_V6_vscattermhw_add,                // llvm.hexagon.V6.vscattermhw.add
    hexagon_V6_vscattermhw_add_128B,           // llvm.hexagon.V6.vscattermhw.add.128B
    hexagon_V6_vscattermhwq,                   // llvm.hexagon.V6.vscattermhwq
    hexagon_V6_vscattermhwq_128B,              // llvm.hexagon.V6.vscattermhwq.128B
    hexagon_V6_vscattermw,                     // llvm.hexagon.V6.vscattermw
    hexagon_V6_vscattermw_128B,                // llvm.hexagon.V6.vscattermw.128B
    hexagon_V6_vscattermw_add,                 // llvm.hexagon.V6.vscattermw.add
    hexagon_V6_vscattermw_add_128B,            // llvm.hexagon.V6.vscattermw.add.128B
    hexagon_V6_vscattermwq,                    // llvm.hexagon.V6.vscattermwq
    hexagon_V6_vscattermwq_128B,               // llvm.hexagon.V6.vscattermwq.128B
    hexagon_V6_vsh,                            // llvm.hexagon.V6.vsh
    hexagon_V6_vsh_128B,                       // llvm.hexagon.V6.vsh.128B
    hexagon_V6_vshufeh,                        // llvm.hexagon.V6.vshufeh
    hexagon_V6_vshufeh_128B,                   // llvm.hexagon.V6.vshufeh.128B
    hexagon_V6_vshuffb,                        // llvm.hexagon.V6.vshuffb
    hexagon_V6_vshuffb_128B,                   // llvm.hexagon.V6.vshuffb.128B
    hexagon_V6_vshuffeb,                       // llvm.hexagon.V6.vshuffeb
    hexagon_V6_vshuffeb_128B,                  // llvm.hexagon.V6.vshuffeb.128B
    hexagon_V6_vshuffh,                        // llvm.hexagon.V6.vshuffh
    hexagon_V6_vshuffh_128B,                   // llvm.hexagon.V6.vshuffh.128B
    hexagon_V6_vshuffob,                       // llvm.hexagon.V6.vshuffob
    hexagon_V6_vshuffob_128B,                  // llvm.hexagon.V6.vshuffob.128B
    hexagon_V6_vshuffvdd,                      // llvm.hexagon.V6.vshuffvdd
    hexagon_V6_vshuffvdd_128B,                 // llvm.hexagon.V6.vshuffvdd.128B
    hexagon_V6_vshufoeb,                       // llvm.hexagon.V6.vshufoeb
    hexagon_V6_vshufoeb_128B,                  // llvm.hexagon.V6.vshufoeb.128B
    hexagon_V6_vshufoeh,                       // llvm.hexagon.V6.vshufoeh
    hexagon_V6_vshufoeh_128B,                  // llvm.hexagon.V6.vshufoeh.128B
    hexagon_V6_vshufoh,                        // llvm.hexagon.V6.vshufoh
    hexagon_V6_vshufoh_128B,                   // llvm.hexagon.V6.vshufoh.128B
    hexagon_V6_vsub_hf,                        // llvm.hexagon.V6.vsub.hf
    hexagon_V6_vsub_hf_128B,                   // llvm.hexagon.V6.vsub.hf.128B
    hexagon_V6_vsub_hf_hf,                     // llvm.hexagon.V6.vsub.hf.hf
    hexagon_V6_vsub_hf_hf_128B,                // llvm.hexagon.V6.vsub.hf.hf.128B
    hexagon_V6_vsub_qf16,                      // llvm.hexagon.V6.vsub.qf16
    hexagon_V6_vsub_qf16_128B,                 // llvm.hexagon.V6.vsub.qf16.128B
    hexagon_V6_vsub_qf16_mix,                  // llvm.hexagon.V6.vsub.qf16.mix
    hexagon_V6_vsub_qf16_mix_128B,             // llvm.hexagon.V6.vsub.qf16.mix.128B
    hexagon_V6_vsub_qf32,                      // llvm.hexagon.V6.vsub.qf32
    hexagon_V6_vsub_qf32_128B,                 // llvm.hexagon.V6.vsub.qf32.128B
    hexagon_V6_vsub_qf32_mix,                  // llvm.hexagon.V6.vsub.qf32.mix
    hexagon_V6_vsub_qf32_mix_128B,             // llvm.hexagon.V6.vsub.qf32.mix.128B
    hexagon_V6_vsub_sf,                        // llvm.hexagon.V6.vsub.sf
    hexagon_V6_vsub_sf_128B,                   // llvm.hexagon.V6.vsub.sf.128B
    hexagon_V6_vsub_sf_bf,                     // llvm.hexagon.V6.vsub.sf.bf
    hexagon_V6_vsub_sf_bf_128B,                // llvm.hexagon.V6.vsub.sf.bf.128B
    hexagon_V6_vsub_sf_hf,                     // llvm.hexagon.V6.vsub.sf.hf
    hexagon_V6_vsub_sf_hf_128B,                // llvm.hexagon.V6.vsub.sf.hf.128B
    hexagon_V6_vsub_sf_sf,                     // llvm.hexagon.V6.vsub.sf.sf
    hexagon_V6_vsub_sf_sf_128B,                // llvm.hexagon.V6.vsub.sf.sf.128B
    hexagon_V6_vsubb,                          // llvm.hexagon.V6.vsubb
    hexagon_V6_vsubb_128B,                     // llvm.hexagon.V6.vsubb.128B
    hexagon_V6_vsubb_dv,                       // llvm.hexagon.V6.vsubb.dv
    hexagon_V6_vsubb_dv_128B,                  // llvm.hexagon.V6.vsubb.dv.128B
    hexagon_V6_vsubbnq,                        // llvm.hexagon.V6.vsubbnq
    hexagon_V6_vsubbnq_128B,                   // llvm.hexagon.V6.vsubbnq.128B
    hexagon_V6_vsubbq,                         // llvm.hexagon.V6.vsubbq
    hexagon_V6_vsubbq_128B,                    // llvm.hexagon.V6.vsubbq.128B
    hexagon_V6_vsubbsat,                       // llvm.hexagon.V6.vsubbsat
    hexagon_V6_vsubbsat_128B,                  // llvm.hexagon.V6.vsubbsat.128B
    hexagon_V6_vsubbsat_dv,                    // llvm.hexagon.V6.vsubbsat.dv
    hexagon_V6_vsubbsat_dv_128B,               // llvm.hexagon.V6.vsubbsat.dv.128B
    hexagon_V6_vsubcarry,                      // llvm.hexagon.V6.vsubcarry
    hexagon_V6_vsubcarry_128B,                 // llvm.hexagon.V6.vsubcarry.128B
    hexagon_V6_vsubcarryo,                     // llvm.hexagon.V6.vsubcarryo
    hexagon_V6_vsubcarryo_128B,                // llvm.hexagon.V6.vsubcarryo.128B
    hexagon_V6_vsubh,                          // llvm.hexagon.V6.vsubh
    hexagon_V6_vsubh_128B,                     // llvm.hexagon.V6.vsubh.128B
    hexagon_V6_vsubh_dv,                       // llvm.hexagon.V6.vsubh.dv
    hexagon_V6_vsubh_dv_128B,                  // llvm.hexagon.V6.vsubh.dv.128B
    hexagon_V6_vsubhnq,                        // llvm.hexagon.V6.vsubhnq
    hexagon_V6_vsubhnq_128B,                   // llvm.hexagon.V6.vsubhnq.128B
    hexagon_V6_vsubhq,                         // llvm.hexagon.V6.vsubhq
    hexagon_V6_vsubhq_128B,                    // llvm.hexagon.V6.vsubhq.128B
    hexagon_V6_vsubhsat,                       // llvm.hexagon.V6.vsubhsat
    hexagon_V6_vsubhsat_128B,                  // llvm.hexagon.V6.vsubhsat.128B
    hexagon_V6_vsubhsat_dv,                    // llvm.hexagon.V6.vsubhsat.dv
    hexagon_V6_vsubhsat_dv_128B,               // llvm.hexagon.V6.vsubhsat.dv.128B
    hexagon_V6_vsubhw,                         // llvm.hexagon.V6.vsubhw
    hexagon_V6_vsubhw_128B,                    // llvm.hexagon.V6.vsubhw.128B
    hexagon_V6_vsububh,                        // llvm.hexagon.V6.vsububh
    hexagon_V6_vsububh_128B,                   // llvm.hexagon.V6.vsububh.128B
    hexagon_V6_vsububsat,                      // llvm.hexagon.V6.vsububsat
    hexagon_V6_vsububsat_128B,                 // llvm.hexagon.V6.vsububsat.128B
    hexagon_V6_vsububsat_dv,                   // llvm.hexagon.V6.vsububsat.dv
    hexagon_V6_vsububsat_dv_128B,              // llvm.hexagon.V6.vsububsat.dv.128B
    hexagon_V6_vsubububb_sat,                  // llvm.hexagon.V6.vsubububb.sat
    hexagon_V6_vsubububb_sat_128B,             // llvm.hexagon.V6.vsubububb.sat.128B
    hexagon_V6_vsubuhsat,                      // llvm.hexagon.V6.vsubuhsat
    hexagon_V6_vsubuhsat_128B,                 // llvm.hexagon.V6.vsubuhsat.128B
    hexagon_V6_vsubuhsat_dv,                   // llvm.hexagon.V6.vsubuhsat.dv
    hexagon_V6_vsubuhsat_dv_128B,              // llvm.hexagon.V6.vsubuhsat.dv.128B
    hexagon_V6_vsubuhw,                        // llvm.hexagon.V6.vsubuhw
    hexagon_V6_vsubuhw_128B,                   // llvm.hexagon.V6.vsubuhw.128B
    hexagon_V6_vsubuwsat,                      // llvm.hexagon.V6.vsubuwsat
    hexagon_V6_vsubuwsat_128B,                 // llvm.hexagon.V6.vsubuwsat.128B
    hexagon_V6_vsubuwsat_dv,                   // llvm.hexagon.V6.vsubuwsat.dv
    hexagon_V6_vsubuwsat_dv_128B,              // llvm.hexagon.V6.vsubuwsat.dv.128B
    hexagon_V6_vsubw,                          // llvm.hexagon.V6.vsubw
    hexagon_V6_vsubw_128B,                     // llvm.hexagon.V6.vsubw.128B
    hexagon_V6_vsubw_dv,                       // llvm.hexagon.V6.vsubw.dv
    hexagon_V6_vsubw_dv_128B,                  // llvm.hexagon.V6.vsubw.dv.128B
    hexagon_V6_vsubwnq,                        // llvm.hexagon.V6.vsubwnq
    hexagon_V6_vsubwnq_128B,                   // llvm.hexagon.V6.vsubwnq.128B
    hexagon_V6_vsubwq,                         // llvm.hexagon.V6.vsubwq
    hexagon_V6_vsubwq_128B,                    // llvm.hexagon.V6.vsubwq.128B
    hexagon_V6_vsubwsat,                       // llvm.hexagon.V6.vsubwsat
    hexagon_V6_vsubwsat_128B,                  // llvm.hexagon.V6.vsubwsat.128B
    hexagon_V6_vsubwsat_dv,                    // llvm.hexagon.V6.vsubwsat.dv
    hexagon_V6_vsubwsat_dv_128B,               // llvm.hexagon.V6.vsubwsat.dv.128B
    hexagon_V6_vswap,                          // llvm.hexagon.V6.vswap
    hexagon_V6_vswap_128B,                     // llvm.hexagon.V6.vswap.128B
    hexagon_V6_vtmpyb,                         // llvm.hexagon.V6.vtmpyb
    hexagon_V6_vtmpyb_128B,                    // llvm.hexagon.V6.vtmpyb.128B
    hexagon_V6_vtmpyb_acc,                     // llvm.hexagon.V6.vtmpyb.acc
    hexagon_V6_vtmpyb_acc_128B,                // llvm.hexagon.V6.vtmpyb.acc.128B
    hexagon_V6_vtmpybus,                       // llvm.hexagon.V6.vtmpybus
    hexagon_V6_vtmpybus_128B,                  // llvm.hexagon.V6.vtmpybus.128B
    hexagon_V6_vtmpybus_acc,                   // llvm.hexagon.V6.vtmpybus.acc
    hexagon_V6_vtmpybus_acc_128B,              // llvm.hexagon.V6.vtmpybus.acc.128B
    hexagon_V6_vtmpyhb,                        // llvm.hexagon.V6.vtmpyhb
    hexagon_V6_vtmpyhb_128B,                   // llvm.hexagon.V6.vtmpyhb.128B
    hexagon_V6_vtmpyhb_acc,                    // llvm.hexagon.V6.vtmpyhb.acc
    hexagon_V6_vtmpyhb_acc_128B,               // llvm.hexagon.V6.vtmpyhb.acc.128B
    hexagon_V6_vunpackb,                       // llvm.hexagon.V6.vunpackb
    hexagon_V6_vunpackb_128B,                  // llvm.hexagon.V6.vunpackb.128B
    hexagon_V6_vunpackh,                       // llvm.hexagon.V6.vunpackh
    hexagon_V6_vunpackh_128B,                  // llvm.hexagon.V6.vunpackh.128B
    hexagon_V6_vunpackob,                      // llvm.hexagon.V6.vunpackob
    hexagon_V6_vunpackob_128B,                 // llvm.hexagon.V6.vunpackob.128B
    hexagon_V6_vunpackoh,                      // llvm.hexagon.V6.vunpackoh
    hexagon_V6_vunpackoh_128B,                 // llvm.hexagon.V6.vunpackoh.128B
    hexagon_V6_vunpackub,                      // llvm.hexagon.V6.vunpackub
    hexagon_V6_vunpackub_128B,                 // llvm.hexagon.V6.vunpackub.128B
    hexagon_V6_vunpackuh,                      // llvm.hexagon.V6.vunpackuh
    hexagon_V6_vunpackuh_128B,                 // llvm.hexagon.V6.vunpackuh.128B
    hexagon_V6_vxor,                           // llvm.hexagon.V6.vxor
    hexagon_V6_vxor_128B,                      // llvm.hexagon.V6.vxor.128B
    hexagon_V6_vzb,                            // llvm.hexagon.V6.vzb
    hexagon_V6_vzb_128B,                       // llvm.hexagon.V6.vzb.128B
    hexagon_V6_vzh,                            // llvm.hexagon.V6.vzh
    hexagon_V6_vzh_128B,                       // llvm.hexagon.V6.vzh.128B
    hexagon_Y2_dccleana,                       // llvm.hexagon.Y2.dccleana
    hexagon_Y2_dccleaninva,                    // llvm.hexagon.Y2.dccleaninva
    hexagon_Y2_dcfetch,                        // llvm.hexagon.Y2.dcfetch
    hexagon_Y2_dcinva,                         // llvm.hexagon.Y2.dcinva
    hexagon_Y2_dczeroa,                        // llvm.hexagon.Y2.dczeroa
    hexagon_Y4_l2fetch,                        // llvm.hexagon.Y4.l2fetch
    hexagon_Y5_l2fetch,                        // llvm.hexagon.Y5.l2fetch
    hexagon_Y6_dmlink,                         // llvm.hexagon.Y6.dmlink
    hexagon_Y6_dmpause,                        // llvm.hexagon.Y6.dmpause
    hexagon_Y6_dmpoll,                         // llvm.hexagon.Y6.dmpoll
    hexagon_Y6_dmresume,                       // llvm.hexagon.Y6.dmresume
    hexagon_Y6_dmstart,                        // llvm.hexagon.Y6.dmstart
    hexagon_Y6_dmwait,                         // llvm.hexagon.Y6.dmwait
    hexagon_circ_ldb,                          // llvm.hexagon.circ.ldb
    hexagon_circ_ldd,                          // llvm.hexagon.circ.ldd
    hexagon_circ_ldh,                          // llvm.hexagon.circ.ldh
    hexagon_circ_ldub,                         // llvm.hexagon.circ.ldub
    hexagon_circ_lduh,                         // llvm.hexagon.circ.lduh
    hexagon_circ_ldw,                          // llvm.hexagon.circ.ldw
    hexagon_circ_stb,                          // llvm.hexagon.circ.stb
    hexagon_circ_std,                          // llvm.hexagon.circ.std
    hexagon_circ_sth,                          // llvm.hexagon.circ.sth
    hexagon_circ_sthhi,                        // llvm.hexagon.circ.sthhi
    hexagon_circ_stw,                          // llvm.hexagon.circ.stw
    hexagon_instrprof_custom,                  // llvm.hexagon.instrprof.custom
    hexagon_prefetch,                          // llvm.hexagon.prefetch
    hexagon_vmemcpy,                           // llvm.hexagon.vmemcpy
    hexagon_vmemset,                           // llvm.hexagon.vmemset
}; // enum
} // namespace Intrinsic
} // namespace llvm

#endif
PKjwFZ՗XC�G�G
IR/Operator.hnu�[���//===-- llvm/Operator.h - Operator utility subclass -------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines various classes for working with Instructions and
// ConstantExprs.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_OPERATOR_H
#define LLVM_IR_OPERATOR_H

#include "llvm/ADT/MapVector.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/FMF.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/Casting.h"
#include <cstddef>
#include <optional>

namespace llvm {

/// This is a utility class that provides an abstraction for the common
/// functionality between Instructions and ConstantExprs.
class Operator : public User {
public:
  // The Operator class is intended to be used as a utility, and is never itself
  // instantiated.
  Operator() = delete;
  ~Operator() = delete;

  void *operator new(size_t s) = delete;

  /// Return the opcode for this Instruction or ConstantExpr.
  unsigned getOpcode() const {
    if (const Instruction *I = dyn_cast<Instruction>(this))
      return I->getOpcode();
    return cast<ConstantExpr>(this)->getOpcode();
  }

  /// If V is an Instruction or ConstantExpr, return its opcode.
  /// Otherwise return UserOp1.
  static unsigned getOpcode(const Value *V) {
    if (const Instruction *I = dyn_cast<Instruction>(V))
      return I->getOpcode();
    if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
      return CE->getOpcode();
    return Instruction::UserOp1;
  }

  static bool classof(const Instruction *) { return true; }
  static bool classof(const ConstantExpr *) { return true; }
  static bool classof(const Value *V) {
    return isa<Instruction>(V) || isa<ConstantExpr>(V);
  }

  /// Return true if this operator has flags which may cause this operator
  /// to evaluate to poison despite having non-poison inputs.
  bool hasPoisonGeneratingFlags() const;

  /// Return true if this operator has poison-generating flags or metadata.
  /// The latter is only possible for instructions.
  bool hasPoisonGeneratingFlagsOrMetadata() const;
};

/// Utility class for integer operators which may exhibit overflow - Add, Sub,
/// Mul, and Shl. It does not include SDiv, despite that operator having the
/// potential for overflow.
class OverflowingBinaryOperator : public Operator {
public:
  enum {
    AnyWrap        = 0,
    NoUnsignedWrap = (1 << 0),
    NoSignedWrap   = (1 << 1)
  };

private:
  friend class Instruction;
  friend class ConstantExpr;

  void setHasNoUnsignedWrap(bool B) {
    SubclassOptionalData =
      (SubclassOptionalData & ~NoUnsignedWrap) | (B * NoUnsignedWrap);
  }
  void setHasNoSignedWrap(bool B) {
    SubclassOptionalData =
      (SubclassOptionalData & ~NoSignedWrap) | (B * NoSignedWrap);
  }

public:
  /// Test whether this operation is known to never
  /// undergo unsigned overflow, aka the nuw property.
  bool hasNoUnsignedWrap() const {
    return SubclassOptionalData & NoUnsignedWrap;
  }

  /// Test whether this operation is known to never
  /// undergo signed overflow, aka the nsw property.
  bool hasNoSignedWrap() const {
    return (SubclassOptionalData & NoSignedWrap) != 0;
  }

  static bool classof(const Instruction *I) {
    return I->getOpcode() == Instruction::Add ||
           I->getOpcode() == Instruction::Sub ||
           I->getOpcode() == Instruction::Mul ||
           I->getOpcode() == Instruction::Shl;
  }
  static bool classof(const ConstantExpr *CE) {
    return CE->getOpcode() == Instruction::Add ||
           CE->getOpcode() == Instruction::Sub ||
           CE->getOpcode() == Instruction::Mul ||
           CE->getOpcode() == Instruction::Shl;
  }
  static bool classof(const Value *V) {
    return (isa<Instruction>(V) && classof(cast<Instruction>(V))) ||
           (isa<ConstantExpr>(V) && classof(cast<ConstantExpr>(V)));
  }
};

/// A udiv or sdiv instruction, which can be marked as "exact",
/// indicating that no bits are destroyed.
class PossiblyExactOperator : public Operator {
public:
  enum {
    IsExact = (1 << 0)
  };

private:
  friend class Instruction;
  friend class ConstantExpr;

  void setIsExact(bool B) {
    SubclassOptionalData = (SubclassOptionalData & ~IsExact) | (B * IsExact);
  }

public:
  /// Test whether this division is known to be exact, with zero remainder.
  bool isExact() const {
    return SubclassOptionalData & IsExact;
  }

  static bool isPossiblyExactOpcode(unsigned OpC) {
    return OpC == Instruction::SDiv ||
           OpC == Instruction::UDiv ||
           OpC == Instruction::AShr ||
           OpC == Instruction::LShr;
  }

  static bool classof(const ConstantExpr *CE) {
    return isPossiblyExactOpcode(CE->getOpcode());
  }
  static bool classof(const Instruction *I) {
    return isPossiblyExactOpcode(I->getOpcode());
  }
  static bool classof(const Value *V) {
    return (isa<Instruction>(V) && classof(cast<Instruction>(V))) ||
           (isa<ConstantExpr>(V) && classof(cast<ConstantExpr>(V)));
  }
};

/// Utility class for floating point operations which can have
/// information about relaxed accuracy requirements attached to them.
class FPMathOperator : public Operator {
private:
  friend class Instruction;

  /// 'Fast' means all bits are set.
  void setFast(bool B) {
    setHasAllowReassoc(B);
    setHasNoNaNs(B);
    setHasNoInfs(B);
    setHasNoSignedZeros(B);
    setHasAllowReciprocal(B);
    setHasAllowContract(B);
    setHasApproxFunc(B);
  }

  void setHasAllowReassoc(bool B) {
    SubclassOptionalData =
    (SubclassOptionalData & ~FastMathFlags::AllowReassoc) |
    (B * FastMathFlags::AllowReassoc);
  }

  void setHasNoNaNs(bool B) {
    SubclassOptionalData =
      (SubclassOptionalData & ~FastMathFlags::NoNaNs) |
      (B * FastMathFlags::NoNaNs);
  }

  void setHasNoInfs(bool B) {
    SubclassOptionalData =
      (SubclassOptionalData & ~FastMathFlags::NoInfs) |
      (B * FastMathFlags::NoInfs);
  }

  void setHasNoSignedZeros(bool B) {
    SubclassOptionalData =
      (SubclassOptionalData & ~FastMathFlags::NoSignedZeros) |
      (B * FastMathFlags::NoSignedZeros);
  }

  void setHasAllowReciprocal(bool B) {
    SubclassOptionalData =
      (SubclassOptionalData & ~FastMathFlags::AllowReciprocal) |
      (B * FastMathFlags::AllowReciprocal);
  }

  void setHasAllowContract(bool B) {
    SubclassOptionalData =
        (SubclassOptionalData & ~FastMathFlags::AllowContract) |
        (B * FastMathFlags::AllowContract);
  }

  void setHasApproxFunc(bool B) {
    SubclassOptionalData =
        (SubclassOptionalData & ~FastMathFlags::ApproxFunc) |
        (B * FastMathFlags::ApproxFunc);
  }

  /// Convenience function for setting multiple fast-math flags.
  /// FMF is a mask of the bits to set.
  void setFastMathFlags(FastMathFlags FMF) {
    SubclassOptionalData |= FMF.Flags;
  }

  /// Convenience function for copying all fast-math flags.
  /// All values in FMF are transferred to this operator.
  void copyFastMathFlags(FastMathFlags FMF) {
    SubclassOptionalData = FMF.Flags;
  }

public:
  /// Test if this operation allows all non-strict floating-point transforms.
  bool isFast() const {
    return ((SubclassOptionalData & FastMathFlags::AllowReassoc) != 0 &&
            (SubclassOptionalData & FastMathFlags::NoNaNs) != 0 &&
            (SubclassOptionalData & FastMathFlags::NoInfs) != 0 &&
            (SubclassOptionalData & FastMathFlags::NoSignedZeros) != 0 &&
            (SubclassOptionalData & FastMathFlags::AllowReciprocal) != 0 &&
            (SubclassOptionalData & FastMathFlags::AllowContract) != 0 &&
            (SubclassOptionalData & FastMathFlags::ApproxFunc) != 0);
  }

  /// Test if this operation may be simplified with reassociative transforms.
  bool hasAllowReassoc() const {
    return (SubclassOptionalData & FastMathFlags::AllowReassoc) != 0;
  }

  /// Test if this operation's arguments and results are assumed not-NaN.
  bool hasNoNaNs() const {
    return (SubclassOptionalData & FastMathFlags::NoNaNs) != 0;
  }

  /// Test if this operation's arguments and results are assumed not-infinite.
  bool hasNoInfs() const {
    return (SubclassOptionalData & FastMathFlags::NoInfs) != 0;
  }

  /// Test if this operation can ignore the sign of zero.
  bool hasNoSignedZeros() const {
    return (SubclassOptionalData & FastMathFlags::NoSignedZeros) != 0;
  }

  /// Test if this operation can use reciprocal multiply instead of division.
  bool hasAllowReciprocal() const {
    return (SubclassOptionalData & FastMathFlags::AllowReciprocal) != 0;
  }

  /// Test if this operation can be floating-point contracted (FMA).
  bool hasAllowContract() const {
    return (SubclassOptionalData & FastMathFlags::AllowContract) != 0;
  }

  /// Test if this operation allows approximations of math library functions or
  /// intrinsics.
  bool hasApproxFunc() const {
    return (SubclassOptionalData & FastMathFlags::ApproxFunc) != 0;
  }

  /// Convenience function for getting all the fast-math flags
  FastMathFlags getFastMathFlags() const {
    return FastMathFlags(SubclassOptionalData);
  }

  /// Get the maximum error permitted by this operation in ULPs. An accuracy of
  /// 0.0 means that the operation should be performed with the default
  /// precision.
  float getFPAccuracy() const;

  static bool classof(const Value *V) {
    unsigned Opcode;
    if (auto *I = dyn_cast<Instruction>(V))
      Opcode = I->getOpcode();
    else if (auto *CE = dyn_cast<ConstantExpr>(V))
      Opcode = CE->getOpcode();
    else
      return false;

    switch (Opcode) {
    case Instruction::FNeg:
    case Instruction::FAdd:
    case Instruction::FSub:
    case Instruction::FMul:
    case Instruction::FDiv:
    case Instruction::FRem:
    // FIXME: To clean up and correct the semantics of fast-math-flags, FCmp
    //        should not be treated as a math op, but the other opcodes should.
    //        This would make things consistent with Select/PHI (FP value type
    //        determines whether they are math ops and, therefore, capable of
    //        having fast-math-flags).
    case Instruction::FCmp:
      return true;
    case Instruction::PHI:
    case Instruction::Select:
    case Instruction::Call: {
      Type *Ty = V->getType();
      while (ArrayType *ArrTy = dyn_cast<ArrayType>(Ty))
        Ty = ArrTy->getElementType();
      return Ty->isFPOrFPVectorTy();
    }
    default:
      return false;
    }
  }
};

/// A helper template for defining operators for individual opcodes.
template<typename SuperClass, unsigned Opc>
class ConcreteOperator : public SuperClass {
public:
  static bool classof(const Instruction *I) {
    return I->getOpcode() == Opc;
  }
  static bool classof(const ConstantExpr *CE) {
    return CE->getOpcode() == Opc;
  }
  static bool classof(const Value *V) {
    return (isa<Instruction>(V) && classof(cast<Instruction>(V))) ||
           (isa<ConstantExpr>(V) && classof(cast<ConstantExpr>(V)));
  }
};

class AddOperator
  : public ConcreteOperator<OverflowingBinaryOperator, Instruction::Add> {
};
class SubOperator
  : public ConcreteOperator<OverflowingBinaryOperator, Instruction::Sub> {
};
class MulOperator
  : public ConcreteOperator<OverflowingBinaryOperator, Instruction::Mul> {
};
class ShlOperator
  : public ConcreteOperator<OverflowingBinaryOperator, Instruction::Shl> {
};

class SDivOperator
  : public ConcreteOperator<PossiblyExactOperator, Instruction::SDiv> {
};
class UDivOperator
  : public ConcreteOperator<PossiblyExactOperator, Instruction::UDiv> {
};
class AShrOperator
  : public ConcreteOperator<PossiblyExactOperator, Instruction::AShr> {
};
class LShrOperator
  : public ConcreteOperator<PossiblyExactOperator, Instruction::LShr> {
};

class ZExtOperator : public ConcreteOperator<Operator, Instruction::ZExt> {};

class GEPOperator
  : public ConcreteOperator<Operator, Instruction::GetElementPtr> {
  friend class GetElementPtrInst;
  friend class ConstantExpr;

  enum {
    IsInBounds = (1 << 0),
    // InRangeIndex: bits 1-6
  };

  void setIsInBounds(bool B) {
    SubclassOptionalData =
      (SubclassOptionalData & ~IsInBounds) | (B * IsInBounds);
  }

public:
  /// Test whether this is an inbounds GEP, as defined by LangRef.html.
  bool isInBounds() const {
    return SubclassOptionalData & IsInBounds;
  }

  /// Returns the offset of the index with an inrange attachment, or
  /// std::nullopt if none.
  std::optional<unsigned> getInRangeIndex() const {
    if (SubclassOptionalData >> 1 == 0)
      return std::nullopt;
    return (SubclassOptionalData >> 1) - 1;
  }

  inline op_iterator       idx_begin()       { return op_begin()+1; }
  inline const_op_iterator idx_begin() const { return op_begin()+1; }
  inline op_iterator       idx_end()         { return op_end(); }
  inline const_op_iterator idx_end()   const { return op_end(); }

  inline iterator_range<op_iterator> indices() {
    return make_range(idx_begin(), idx_end());
  }

  inline iterator_range<const_op_iterator> indices() const {
    return make_range(idx_begin(), idx_end());
  }

  Value *getPointerOperand() {
    return getOperand(0);
  }
  const Value *getPointerOperand() const {
    return getOperand(0);
  }
  static unsigned getPointerOperandIndex() {
    return 0U;                      // get index for modifying correct operand
  }

  /// Method to return the pointer operand as a PointerType.
  Type *getPointerOperandType() const {
    return getPointerOperand()->getType();
  }

  Type *getSourceElementType() const;
  Type *getResultElementType() const;

  /// Method to return the address space of the pointer operand.
  unsigned getPointerAddressSpace() const {
    return getPointerOperandType()->getPointerAddressSpace();
  }

  unsigned getNumIndices() const {  // Note: always non-negative
    return getNumOperands() - 1;
  }

  bool hasIndices() const {
    return getNumOperands() > 1;
  }

  /// Return true if all of the indices of this GEP are zeros.
  /// If so, the result pointer and the first operand have the same
  /// value, just potentially different types.
  bool hasAllZeroIndices() const {
    for (const_op_iterator I = idx_begin(), E = idx_end(); I != E; ++I) {
      if (ConstantInt *C = dyn_cast<ConstantInt>(I))
        if (C->isZero())
          continue;
      return false;
    }
    return true;
  }

  /// Return true if all of the indices of this GEP are constant integers.
  /// If so, the result pointer and the first operand have
  /// a constant offset between them.
  bool hasAllConstantIndices() const {
    for (const_op_iterator I = idx_begin(), E = idx_end(); I != E; ++I) {
      if (!isa<ConstantInt>(I))
        return false;
    }
    return true;
  }

  unsigned countNonConstantIndices() const {
    return count_if(indices(), [](const Use& use) {
        return !isa<ConstantInt>(*use);
      });
  }

  /// Compute the maximum alignment that this GEP is garranteed to preserve.
  Align getMaxPreservedAlignment(const DataLayout &DL) const;

  /// Accumulate the constant address offset of this GEP if possible.
  ///
  /// This routine accepts an APInt into which it will try to accumulate the
  /// constant offset of this GEP.
  ///
  /// If \p ExternalAnalysis is provided it will be used to calculate a offset
  /// when a operand of GEP is not constant.
  /// For example, for a value \p ExternalAnalysis might try to calculate a
  /// lower bound. If \p ExternalAnalysis is successful, it should return true.
  ///
  /// If the \p ExternalAnalysis returns false or the value returned by \p
  /// ExternalAnalysis results in a overflow/underflow, this routine returns
  /// false and the value of the offset APInt is undefined (it is *not*
  /// preserved!).
  ///
  /// The APInt passed into this routine must be at exactly as wide as the
  /// IntPtr type for the address space of the base GEP pointer.
  bool accumulateConstantOffset(
      const DataLayout &DL, APInt &Offset,
      function_ref<bool(Value &, APInt &)> ExternalAnalysis = nullptr) const;

  static bool accumulateConstantOffset(
      Type *SourceType, ArrayRef<const Value *> Index, const DataLayout &DL,
      APInt &Offset,
      function_ref<bool(Value &, APInt &)> ExternalAnalysis = nullptr);

  /// Collect the offset of this GEP as a map of Values to their associated
  /// APInt multipliers, as well as a total Constant Offset.
  bool collectOffset(const DataLayout &DL, unsigned BitWidth,
                     MapVector<Value *, APInt> &VariableOffsets,
                     APInt &ConstantOffset) const;
};

class PtrToIntOperator
    : public ConcreteOperator<Operator, Instruction::PtrToInt> {
  friend class PtrToInt;
  friend class ConstantExpr;

public:
  Value *getPointerOperand() {
    return getOperand(0);
  }
  const Value *getPointerOperand() const {
    return getOperand(0);
  }

  static unsigned getPointerOperandIndex() {
    return 0U;                      // get index for modifying correct operand
  }

  /// Method to return the pointer operand as a PointerType.
  Type *getPointerOperandType() const {
    return getPointerOperand()->getType();
  }

  /// Method to return the address space of the pointer operand.
  unsigned getPointerAddressSpace() const {
    return cast<PointerType>(getPointerOperandType())->getAddressSpace();
  }
};

class BitCastOperator
    : public ConcreteOperator<Operator, Instruction::BitCast> {
  friend class BitCastInst;
  friend class ConstantExpr;

public:
  Type *getSrcTy() const {
    return getOperand(0)->getType();
  }

  Type *getDestTy() const {
    return getType();
  }
};

class AddrSpaceCastOperator
    : public ConcreteOperator<Operator, Instruction::AddrSpaceCast> {
  friend class AddrSpaceCastInst;
  friend class ConstantExpr;

public:
  Value *getPointerOperand() { return getOperand(0); }

  const Value *getPointerOperand() const { return getOperand(0); }

  unsigned getSrcAddressSpace() const {
    return getPointerOperand()->getType()->getPointerAddressSpace();
  }

  unsigned getDestAddressSpace() const {
    return getType()->getPointerAddressSpace();
  }
};

} // end namespace llvm

#endif // LLVM_IR_OPERATOR_H
PKjwFZ���Ӆ�IR/PrintPasses.hnu�[���//===- PrintPasses.h - Determining whether/when to print IR ---------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_PRINTPASSES_H
#define LLVM_IR_PRINTPASSES_H

#include "llvm/ADT/StringRef.h"
#include "llvm/Support/CommandLine.h"
#include <vector>

namespace llvm {

enum class ChangePrinter {
  None,
  Verbose,
  Quiet,
  DiffVerbose,
  DiffQuiet,
  ColourDiffVerbose,
  ColourDiffQuiet,
  DotCfgVerbose,
  DotCfgQuiet
};

extern cl::opt<ChangePrinter> PrintChanged;

// Returns true if printing before/after some pass is enabled, whether all
// passes or a specific pass.
bool shouldPrintBeforeSomePass();
bool shouldPrintAfterSomePass();

// Returns true if we should print before/after a specific pass. The argument
// should be the pass ID, e.g. "instcombine".
bool shouldPrintBeforePass(StringRef PassID);
bool shouldPrintAfterPass(StringRef PassID);

// Returns true if we should print before/after all passes.
bool shouldPrintBeforeAll();
bool shouldPrintAfterAll();

// The list of passes to print before/after, if we only want to print
// before/after specific passes.
std::vector<std::string> printBeforePasses();
std::vector<std::string> printAfterPasses();

// Returns true if we should always print the entire module.
bool forcePrintModuleIR();

// Return true if -filter-passes is empty or contains the pass name.
bool isPassInPrintList(StringRef PassName);
bool isFilterPassesEmpty();

// Returns true if we should print the function.
bool isFunctionInPrintList(StringRef FunctionName);

// Ensure temporary files exist, creating or re-using them.  \p FD contains
// file descriptors (-1 indicates that the file should be created) and
// \p SR contains the corresponding initial content.  \p FileName will have
// the filenames filled in when creating files.  Return first error code (if
// any) and stop.
std::error_code prepareTempFiles(SmallVector<int> &FD, ArrayRef<StringRef> SR,
                                 SmallVector<std::string> &FileName);

// Remove the temporary files in \p FileName.  Typically used in conjunction
// with prepareTempFiles.  Return first error code (if any) and stop..
std::error_code cleanUpTempFiles(ArrayRef<std::string> FileName);

// Perform a system based diff between \p Before and \p After, using \p
// OldLineFormat, \p NewLineFormat, and \p UnchangedLineFormat to control the
// formatting of the output. Return an error message for any failures instead
// of the diff.
std::string doSystemDiff(StringRef Before, StringRef After,
                         StringRef OldLineFormat, StringRef NewLineFormat,
                         StringRef UnchangedLineFormat);

} // namespace llvm

#endif // LLVM_IR_PRINTPASSES_H
PKjwFZUN�����IR/IntrinsicsPowerPC.tdnu�[���//===- IntrinsicsPowerPC.td - Defines PowerPC intrinsics ---*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines all of the PowerPC-specific intrinsics.
//
//===----------------------------------------------------------------------===//

//===----------------------------------------------------------------------===//
// Definitions for all PowerPC intrinsics.
//

// Non-altivec intrinsics.
let TargetPrefix = "ppc" in {  // All intrinsics start with "llvm.ppc.".
  // dcba/dcbf/dcbi/dcbst/dcbt/dcbz/dcbzl(PPC970) instructions.
  def int_ppc_dcba  : Intrinsic<[], [llvm_ptr_ty], []>;
  def int_ppc_dcbf : ClangBuiltin<"__builtin_dcbf">,
                      Intrinsic<[], [llvm_ptr_ty], [IntrArgMemOnly]>;
  def int_ppc_dcbfps : Intrinsic<[], [llvm_ptr_ty], [IntrArgMemOnly]>;
  def int_ppc_dcbstps : Intrinsic<[], [llvm_ptr_ty], [IntrArgMemOnly]>;
  def int_ppc_dcbi  : Intrinsic<[], [llvm_ptr_ty], []>;
  def int_ppc_dcbt_with_hint: Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty],
    [IntrArgMemOnly, NoCapture<ArgIndex<0>>, ImmArg<ArgIndex<1>>]>;
  def int_ppc_dcbtst_with_hint: Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty],
    [IntrArgMemOnly, NoCapture<ArgIndex<0>>, ImmArg<ArgIndex<1>>]>;
  def int_ppc_dcbzl : Intrinsic<[], [llvm_ptr_ty], []>;

  // Get content from current FPSCR register
  def int_ppc_readflm : ClangBuiltin<"__builtin_readflm">,
                        DefaultAttrsIntrinsic<[llvm_double_ty], [],
                                              [IntrNoMerge, IntrHasSideEffects]>;
  // Set FPSCR register, and return previous content
  def int_ppc_setflm : ClangBuiltin<"__builtin_setflm">,
                       DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_double_ty],
                                             [IntrHasSideEffects]>;

  // Intrinsics for [double]word extended forms of divide instructions
  def int_ppc_divwe : ClangBuiltin<"__builtin_divwe">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
                            [IntrNoMem]>;
  def int_ppc_divweu : ClangBuiltin<"__builtin_divweu">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
                            [IntrNoMem]>;
  def int_ppc_divde : ClangBuiltin<"__builtin_divde">,
      DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty],
                            [IntrNoMem]>;
  def int_ppc_divdeu : ClangBuiltin<"__builtin_divdeu">,
      DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty],
                            [IntrNoMem]>;

  def int_ppc_unpack_longdouble : ClangBuiltin<"__builtin_unpack_longdouble">,
      DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_ppcf128_ty, llvm_i32_ty],
                            [IntrNoMem]>;
  def int_ppc_pack_longdouble : ClangBuiltin<"__builtin_pack_longdouble">,
      DefaultAttrsIntrinsic<[llvm_ppcf128_ty], [llvm_double_ty, llvm_double_ty],
                            [IntrNoMem]>;

  // Generate a random number
  def int_ppc_darn : ClangBuiltin<"__builtin_darn">,
                     DefaultAttrsIntrinsic<[llvm_i64_ty], [],
                                           [IntrNoMerge, IntrHasSideEffects]>;
  def int_ppc_darnraw : ClangBuiltin<"__builtin_darn_raw">,
                     DefaultAttrsIntrinsic<[llvm_i64_ty], [],
                                           [IntrNoMerge, IntrHasSideEffects]>;
  def int_ppc_darn32 : ClangBuiltin<"__builtin_darn_32">,
                     DefaultAttrsIntrinsic<[llvm_i32_ty], [],
                                           [IntrNoMerge, IntrHasSideEffects]>;

  // Bit permute doubleword
  def int_ppc_bpermd : ClangBuiltin<"__builtin_bpermd">,
      DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty],
                            [IntrNoMem]>;

  // Parallel Bits Deposit/Extract Doubleword Builtins.
  def int_ppc_pdepd
      : ClangBuiltin<"__builtin_pdepd">,
        DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty],
                              [IntrNoMem]>;
  def int_ppc_pextd
      : ClangBuiltin<"__builtin_pextd">,
        DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty],
                              [IntrNoMem]>;

  // Centrifuge Doubleword Builtin.
  def int_ppc_cfuged
      : ClangBuiltin<"__builtin_cfuged">,
        DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty],
                              [IntrNoMem]>;

  // Count Leading / Trailing Zeroes under bit Mask Builtins.
  def int_ppc_cntlzdm
      : ClangBuiltin<"__builtin_cntlzdm">,
        DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty],
                              [IntrNoMem]>;
  def int_ppc_cnttzdm
      : ClangBuiltin<"__builtin_cnttzdm">,
        DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty],
                              [IntrNoMem]>;

  def int_ppc_truncf128_round_to_odd
      : ClangBuiltin<"__builtin_truncf128_round_to_odd">,
        DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_f128_ty], [IntrNoMem]>;
  def int_ppc_sqrtf128_round_to_odd
      : ClangBuiltin<"__builtin_sqrtf128_round_to_odd">,
        DefaultAttrsIntrinsic<[llvm_f128_ty], [llvm_f128_ty], [IntrNoMem]>;
  def int_ppc_addf128_round_to_odd
      : ClangBuiltin<"__builtin_addf128_round_to_odd">,
        DefaultAttrsIntrinsic<[llvm_f128_ty], [llvm_f128_ty,llvm_f128_ty],
                              [IntrNoMem]>;
  def int_ppc_subf128_round_to_odd
      : ClangBuiltin<"__builtin_subf128_round_to_odd">,
        DefaultAttrsIntrinsic<[llvm_f128_ty], [llvm_f128_ty,llvm_f128_ty],
                              [IntrNoMem]>;
  def int_ppc_mulf128_round_to_odd
      : ClangBuiltin<"__builtin_mulf128_round_to_odd">,
        DefaultAttrsIntrinsic<[llvm_f128_ty], [llvm_f128_ty,llvm_f128_ty],
                              [IntrNoMem]>;
  def int_ppc_divf128_round_to_odd
      : ClangBuiltin<"__builtin_divf128_round_to_odd">,
        DefaultAttrsIntrinsic<[llvm_f128_ty], [llvm_f128_ty,llvm_f128_ty],
                              [IntrNoMem]>;
  def int_ppc_fmaf128_round_to_odd
      : ClangBuiltin<"__builtin_fmaf128_round_to_odd">,
        DefaultAttrsIntrinsic<[llvm_f128_ty],
                              [llvm_f128_ty,llvm_f128_ty,llvm_f128_ty],
                              [IntrNoMem]>;
  def int_ppc_scalar_extract_expq
      : ClangBuiltin<"__builtin_vsx_scalar_extract_expq">,
        DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_f128_ty], [IntrNoMem]>;
  def int_ppc_scalar_insert_exp_qp
      : ClangBuiltin<"__builtin_vsx_scalar_insert_exp_qp">,
        DefaultAttrsIntrinsic<[llvm_f128_ty], [llvm_f128_ty, llvm_i64_ty],
                              [IntrNoMem]>;

  // Intrinsics defined to maintain XL compatibility
  def int_ppc_tdw
      : ClangBuiltin<"__builtin_ppc_tdw">,
        Intrinsic <[], [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty], [ImmArg<ArgIndex<2>>]>;
  def int_ppc_tw
      : ClangBuiltin<"__builtin_ppc_tw">,
        Intrinsic <[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [ImmArg<ArgIndex<2>>]>;
  def int_ppc_trapd
      : ClangBuiltin<"__builtin_ppc_trapd">,
        Intrinsic <[], [llvm_i64_ty], []>;
  def int_ppc_trap
      : ClangBuiltin<"__builtin_ppc_trap">,
        Intrinsic <[], [llvm_i32_ty], []>;
  def int_ppc_fcfid
      : ClangBuiltin<"__builtin_ppc_fcfid">,
        DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
  def int_ppc_fcfud
      : ClangBuiltin<"__builtin_ppc_fcfud">,
        DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
  def int_ppc_fctid
      : ClangBuiltin<"__builtin_ppc_fctid">,
        DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
  def int_ppc_fctidz
      : ClangBuiltin<"__builtin_ppc_fctidz">,
        DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
  def int_ppc_fctiw
      : ClangBuiltin<"__builtin_ppc_fctiw">,
        DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
  def int_ppc_fctiwz
      : ClangBuiltin<"__builtin_ppc_fctiwz">,
        DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
  def int_ppc_fctudz
      : ClangBuiltin<"__builtin_ppc_fctudz">,
        DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
  def int_ppc_fctuwz
      : ClangBuiltin<"__builtin_ppc_fctuwz">,
        DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;

  // XL compatible select functions
  // TODO: Add llvm_f128_ty support.
  def int_ppc_maxfe
      : DefaultAttrsIntrinsic<
            [llvm_ppcf128_ty],
            [llvm_ppcf128_ty, llvm_ppcf128_ty, llvm_ppcf128_ty, llvm_vararg_ty],
            [IntrNoMem]>;
  def int_ppc_maxfl
      : DefaultAttrsIntrinsic<
            [llvm_double_ty],
            [llvm_double_ty, llvm_double_ty, llvm_double_ty, llvm_vararg_ty],
            [IntrNoMem]>;
  def int_ppc_maxfs
      : DefaultAttrsIntrinsic<
            [llvm_float_ty],
            [llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_vararg_ty],
            [IntrNoMem]>;
  def int_ppc_minfe
      : DefaultAttrsIntrinsic<
            [llvm_ppcf128_ty],
            [llvm_ppcf128_ty, llvm_ppcf128_ty, llvm_ppcf128_ty, llvm_vararg_ty],
            [IntrNoMem]>;
  def int_ppc_minfl
      : DefaultAttrsIntrinsic<
            [llvm_double_ty],
            [llvm_double_ty, llvm_double_ty, llvm_double_ty, llvm_vararg_ty],
            [IntrNoMem]>;
  def int_ppc_minfs
      : DefaultAttrsIntrinsic<
            [llvm_float_ty],
            [llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_vararg_ty],
            [IntrNoMem]>;
}

let TargetPrefix = "ppc" in {  // All PPC intrinsics start with "llvm.ppc.".
  /// PowerPC_Vec_Intrinsic - Base class for all altivec intrinsics.
  class PowerPC_Vec_Intrinsic<string GCCIntSuffix, list<LLVMType> ret_types,
                              list<LLVMType> param_types,
                              list<IntrinsicProperty> properties>
    : ClangBuiltin<!strconcat("__builtin_altivec_", GCCIntSuffix)>,
      DefaultAttrsIntrinsic<ret_types, param_types, properties>;

  /// PowerPC_VSX_Intrinsic - Base class for all VSX intrinsics.
  class PowerPC_VSX_Intrinsic<string GCCIntSuffix, list<LLVMType> ret_types,
                              list<LLVMType> param_types,
                              list<IntrinsicProperty> properties>
    : ClangBuiltin<!strconcat("__builtin_vsx_", GCCIntSuffix)>,
      DefaultAttrsIntrinsic<ret_types, param_types, properties>;
}

//===----------------------------------------------------------------------===//
// PowerPC MMA Intrinsic Multi Class Definitions.
//

multiclass PowerPC_MMA_ACC_Intrinsic<list<LLVMType> args> {
  def NAME: DefaultAttrsIntrinsic<[llvm_v512i1_ty], args, [IntrNoMem]>;
  def pp : DefaultAttrsIntrinsic<[llvm_v512i1_ty],
                                 !listconcat([llvm_v512i1_ty], args),
                                 [IntrNoMem]>;
  def pn : DefaultAttrsIntrinsic<[llvm_v512i1_ty],
                                 !listconcat([llvm_v512i1_ty], args),
                                 [IntrNoMem]>;
  def np : DefaultAttrsIntrinsic<[llvm_v512i1_ty],
                                 !listconcat([llvm_v512i1_ty], args),
                                 [IntrNoMem]>;
  def nn : DefaultAttrsIntrinsic<[llvm_v512i1_ty],
                                 !listconcat([llvm_v512i1_ty], args),
                                 [IntrNoMem]>;
}

multiclass PowerPC_MMA_ACC_PP_Intrinsic<list<LLVMType> args> {
  def NAME: DefaultAttrsIntrinsic<[llvm_v512i1_ty], args, [IntrNoMem]>;
  def pp : DefaultAttrsIntrinsic<[llvm_v512i1_ty],
                                 !listconcat([llvm_v512i1_ty], args),
                                 [IntrNoMem]>;
}

//===----------------------------------------------------------------------===//
// PowerPC Altivec Intrinsic Class Definitions.
//

/// PowerPC_Vec_FF_Intrinsic - A PowerPC intrinsic that takes one v4f32
/// vector and returns one.  These intrinsics have no side effects.
class PowerPC_Vec_FF_Intrinsic<string GCCIntSuffix>
  : PowerPC_Vec_Intrinsic<GCCIntSuffix,
                          [llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>;

/// PowerPC_Vec_FFF_Intrinsic - A PowerPC intrinsic that takes two v4f32
/// vectors and returns one.  These intrinsics have no side effects.
class PowerPC_Vec_FFF_Intrinsic<string GCCIntSuffix>
  : PowerPC_Vec_Intrinsic<GCCIntSuffix,
                          [llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty],
                          [IntrNoMem]>;

/// PowerPC_Vec_BBB_Intrinsic - A PowerPC intrinsic that takes two v16i8
/// vectors and returns one.  These intrinsics have no side effects.
class PowerPC_Vec_BBB_Intrinsic<string GCCIntSuffix>
  : PowerPC_Vec_Intrinsic<GCCIntSuffix,
                          [llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
                          [IntrNoMem]>;

/// PowerPC_Vec_HHH_Intrinsic - A PowerPC intrinsic that takes two v8i16
/// vectors and returns one.  These intrinsics have no side effects.
class PowerPC_Vec_HHH_Intrinsic<string GCCIntSuffix>
  : PowerPC_Vec_Intrinsic<GCCIntSuffix,
                          [llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
                          [IntrNoMem]>;

/// PowerPC_Vec_WWW_Intrinsic - A PowerPC intrinsic that takes two v4i32
/// vectors and returns one.  These intrinsics have no side effects.
class PowerPC_Vec_WWW_Intrinsic<string GCCIntSuffix>
  : PowerPC_Vec_Intrinsic<GCCIntSuffix,
                          [llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
                          [IntrNoMem]>;

/// PowerPC_Vec_DDD_Intrinsic - A PowerPC intrinsic that takes two v2i64
/// vectors and returns one.  These intrinsics have no side effects.
class PowerPC_Vec_DDD_Intrinsic<string GCCIntSuffix>
  : PowerPC_Vec_Intrinsic<GCCIntSuffix,
                          [llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
                          [IntrNoMem]>;

/// PowerPC_Vec_QQQ_Intrinsic - A PowerPC intrinsic that takes two v1i128
/// vectors and returns one. These intrinsics have no side effects.
class PowerPC_Vec_QQQ_Intrinsic<string GCCIntSuffix>
  : PowerPC_Vec_Intrinsic<GCCIntSuffix,
                         [llvm_v1i128_ty], [llvm_v1i128_ty, llvm_v1i128_ty],
                         [IntrNoMem]>;

/// PowerPC_Vec_QDD_Intrinsic - A PowerPC intrinsic that takes two v2i64
/// vectors and returns one v1i128. These intrinsics have no side effects.
class PowerPC_Vec_QDD_Intrinsic<string GCCIntSuffix>
  : PowerPC_Vec_Intrinsic<GCCIntSuffix,
                          [llvm_v1i128_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
                          [IntrNoMem]>;

//===----------------------------------------------------------------------===//
// PowerPC VSX Intrinsic Class Definitions.
//

/// PowerPC_VSX_Vec_DDD_Intrinsic - A PowerPC intrinsic that takes two v2f64
/// vectors and returns one.  These intrinsics have no side effects.
class PowerPC_VSX_Vec_DDD_Intrinsic<string GCCIntSuffix>
  : PowerPC_VSX_Intrinsic<GCCIntSuffix,
                          [llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty],
                          [IntrNoMem]>;

/// PowerPC_VSX_Vec_FFF_Intrinsic - A PowerPC intrinsic that takes two v4f32
/// vectors and returns one.  These intrinsics have no side effects.
class PowerPC_VSX_Vec_FFF_Intrinsic<string GCCIntSuffix>
  : PowerPC_VSX_Intrinsic<GCCIntSuffix,
                          [llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty],
                          [IntrNoMem]>;

/// PowerPC_VSX_Sca_DDD_Intrinsic - A PowerPC intrinsic that takes two f64
/// scalars and returns one.  These intrinsics have no side effects.
class PowerPC_VSX_Sca_DDD_Intrinsic<string GCCIntSuffix>
  : PowerPC_VSX_Intrinsic<GCCIntSuffix,
                          [llvm_double_ty], [llvm_double_ty, llvm_double_ty],
                          [IntrNoMem]>;

//===----------------------------------------------------------------------===//
// PowerPC Altivec Intrinsic Definitions.

let TargetPrefix = "ppc" in {  // All intrinsics start with "llvm.ppc.".
  // Data Stream Control.
  def int_ppc_altivec_dss : ClangBuiltin<"__builtin_altivec_dss">,
              Intrinsic<[], [llvm_i32_ty], []>;
  def int_ppc_altivec_dssall : ClangBuiltin<"__builtin_altivec_dssall">,
              Intrinsic<[], [], []>;
  def int_ppc_altivec_dst : ClangBuiltin<"__builtin_altivec_dst">,
              Intrinsic<[],
                        [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty],
                        []>;
  def int_ppc_altivec_dstt : ClangBuiltin<"__builtin_altivec_dstt">,
              Intrinsic<[],
                        [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty],
                        []>;
  def int_ppc_altivec_dstst : ClangBuiltin<"__builtin_altivec_dstst">,
              Intrinsic<[],
                        [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty],
                        []>;
  def int_ppc_altivec_dststt : ClangBuiltin<"__builtin_altivec_dststt">,
              Intrinsic<[],
                        [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty],
                        []>;

  // VSCR access.
  def int_ppc_altivec_mfvscr : ClangBuiltin<"__builtin_altivec_mfvscr">,
              Intrinsic<[llvm_v8i16_ty], [], [IntrNoMem, IntrHasSideEffects]>;
  def int_ppc_altivec_mtvscr : ClangBuiltin<"__builtin_altivec_mtvscr">,
              Intrinsic<[], [llvm_v4i32_ty], [IntrNoMem, IntrHasSideEffects]>;


  // Loads.  These don't map directly to GCC builtins because they represent the
  // source address with a single pointer.
  def int_ppc_altivec_lvx :
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_ptr_ty],
                            [IntrReadMem, IntrArgMemOnly]>;
  def int_ppc_altivec_lvxl :
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_ptr_ty],
                            [IntrReadMem, IntrArgMemOnly]>;
  def int_ppc_altivec_lvebx :
      DefaultAttrsIntrinsic<[llvm_v16i8_ty], [llvm_ptr_ty],
                            [IntrReadMem, IntrArgMemOnly]>;
  def int_ppc_altivec_lvehx :
      DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_ptr_ty],
                            [IntrReadMem, IntrArgMemOnly]>;
  def int_ppc_altivec_lvewx :
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_ptr_ty],
                            [IntrReadMem, IntrArgMemOnly]>;

  // Stores.  These don't map directly to GCC builtins because they represent the
  // source address with a single pointer.
  def int_ppc_altivec_stvx :
              DefaultAttrsIntrinsic<[], [llvm_v4i32_ty, llvm_ptr_ty],
                                    [IntrWriteMem, IntrArgMemOnly]>;
  def int_ppc_altivec_stvxl :
              DefaultAttrsIntrinsic<[], [llvm_v4i32_ty, llvm_ptr_ty],
                                    [IntrWriteMem, IntrArgMemOnly]>;
  def int_ppc_altivec_stvebx :
              DefaultAttrsIntrinsic<[], [llvm_v16i8_ty, llvm_ptr_ty],
                                    [IntrWriteMem, IntrArgMemOnly]>;
  def int_ppc_altivec_stvehx :
              DefaultAttrsIntrinsic<[], [llvm_v8i16_ty, llvm_ptr_ty],
                                    [IntrWriteMem, IntrArgMemOnly]>;
  def int_ppc_altivec_stvewx :
              DefaultAttrsIntrinsic<[], [llvm_v4i32_ty, llvm_ptr_ty],
                                    [IntrWriteMem, IntrArgMemOnly]>;

  // Comparisons setting a vector.
  def int_ppc_altivec_vcmpbfp : ClangBuiltin<"__builtin_altivec_vcmpbfp">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vcmpeqfp : ClangBuiltin<"__builtin_altivec_vcmpeqfp">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vcmpgefp : ClangBuiltin<"__builtin_altivec_vcmpgefp">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vcmpgtfp : ClangBuiltin<"__builtin_altivec_vcmpgtfp">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty],
                            [IntrNoMem]>;

  def int_ppc_altivec_vcmpequd : ClangBuiltin<"__builtin_altivec_vcmpequd">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vcmpgtsd : ClangBuiltin<"__builtin_altivec_vcmpgtsd">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vcmpgtud : ClangBuiltin<"__builtin_altivec_vcmpgtud">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
                            [IntrNoMem]>;

  def int_ppc_altivec_vcmpequw : ClangBuiltin<"__builtin_altivec_vcmpequw">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vcmpgtsw : ClangBuiltin<"__builtin_altivec_vcmpgtsw">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vcmpgtuw : ClangBuiltin<"__builtin_altivec_vcmpgtuw">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vcmpnew : ClangBuiltin<"__builtin_altivec_vcmpnew">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vcmpnezw : ClangBuiltin<"__builtin_altivec_vcmpnezw">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
                            [IntrNoMem]>;

  def int_ppc_altivec_vcmpequh : ClangBuiltin<"__builtin_altivec_vcmpequh">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vcmpgtsh : ClangBuiltin<"__builtin_altivec_vcmpgtsh">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vcmpgtuh : ClangBuiltin<"__builtin_altivec_vcmpgtuh">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vcmpneh : ClangBuiltin<"__builtin_altivec_vcmpneh">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vcmpnezh : ClangBuiltin<"__builtin_altivec_vcmpnezh">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
                            [IntrNoMem]>;

  def int_ppc_altivec_vcmpequb : ClangBuiltin<"__builtin_altivec_vcmpequb">,
      DefaultAttrsIntrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vcmpgtsb : ClangBuiltin<"__builtin_altivec_vcmpgtsb">,
      DefaultAttrsIntrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vcmpgtub : ClangBuiltin<"__builtin_altivec_vcmpgtub">,
      DefaultAttrsIntrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vcmpneb : ClangBuiltin<"__builtin_altivec_vcmpneb">,
      DefaultAttrsIntrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vcmpnezb : ClangBuiltin<"__builtin_altivec_vcmpnezb">,
      DefaultAttrsIntrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
                            [IntrNoMem]>;

  def int_ppc_altivec_vcmpequq : ClangBuiltin<"__builtin_altivec_vcmpequq">,
      DefaultAttrsIntrinsic<[llvm_v1i128_ty], [llvm_v1i128_ty, llvm_v1i128_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vcmpgtsq : ClangBuiltin<"__builtin_altivec_vcmpgtsq">,
      DefaultAttrsIntrinsic<[llvm_v1i128_ty], [llvm_v1i128_ty, llvm_v1i128_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vcmpgtuq : ClangBuiltin<"__builtin_altivec_vcmpgtuq">,
      DefaultAttrsIntrinsic<[llvm_v1i128_ty], [llvm_v1i128_ty, llvm_v1i128_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vcmpequq_p : ClangBuiltin<"__builtin_altivec_vcmpequq_p">,
      DefaultAttrsIntrinsic<[llvm_i32_ty],
                            [llvm_i32_ty,llvm_v1i128_ty,llvm_v1i128_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vcmpgtsq_p : ClangBuiltin<"__builtin_altivec_vcmpgtsq_p">,
      DefaultAttrsIntrinsic<[llvm_i32_ty],
                            [llvm_i32_ty,llvm_v1i128_ty,llvm_v1i128_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vcmpgtuq_p : ClangBuiltin<"__builtin_altivec_vcmpgtuq_p">,
      DefaultAttrsIntrinsic<[llvm_i32_ty],
                            [llvm_i32_ty,llvm_v1i128_ty,llvm_v1i128_ty],
                            [IntrNoMem]>;

  // Predicate Comparisons.  The first operand specifies interpretation of CR6.
  def int_ppc_altivec_vcmpbfp_p : ClangBuiltin<"__builtin_altivec_vcmpbfp_p">,
      DefaultAttrsIntrinsic<[llvm_i32_ty],
                            [llvm_i32_ty,llvm_v4f32_ty,llvm_v4f32_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vcmpeqfp_p : ClangBuiltin<"__builtin_altivec_vcmpeqfp_p">,
      DefaultAttrsIntrinsic<[llvm_i32_ty],
                            [llvm_i32_ty,llvm_v4f32_ty,llvm_v4f32_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vcmpgefp_p : ClangBuiltin<"__builtin_altivec_vcmpgefp_p">,
      DefaultAttrsIntrinsic<[llvm_i32_ty],
                            [llvm_i32_ty,llvm_v4f32_ty,llvm_v4f32_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vcmpgtfp_p : ClangBuiltin<"__builtin_altivec_vcmpgtfp_p">,
      DefaultAttrsIntrinsic<[llvm_i32_ty],
                            [llvm_i32_ty,llvm_v4f32_ty,llvm_v4f32_ty],
                            [IntrNoMem]>;

  def int_ppc_altivec_vcmpequd_p : ClangBuiltin<"__builtin_altivec_vcmpequd_p">,
      DefaultAttrsIntrinsic<[llvm_i32_ty],
                            [llvm_i32_ty,llvm_v2i64_ty,llvm_v2i64_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vcmpgtsd_p : ClangBuiltin<"__builtin_altivec_vcmpgtsd_p">,
      DefaultAttrsIntrinsic<[llvm_i32_ty],
                            [llvm_i32_ty,llvm_v2i64_ty,llvm_v2i64_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vcmpgtud_p : ClangBuiltin<"__builtin_altivec_vcmpgtud_p">,
      DefaultAttrsIntrinsic<[llvm_i32_ty],
                            [llvm_i32_ty,llvm_v2i64_ty,llvm_v2i64_ty],
                            [IntrNoMem]>;

  def int_ppc_altivec_vcmpequw_p : ClangBuiltin<"__builtin_altivec_vcmpequw_p">,
      DefaultAttrsIntrinsic<[llvm_i32_ty],
                            [llvm_i32_ty,llvm_v4i32_ty,llvm_v4i32_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vcmpgtsw_p : ClangBuiltin<"__builtin_altivec_vcmpgtsw_p">,
      DefaultAttrsIntrinsic<[llvm_i32_ty],
                            [llvm_i32_ty,llvm_v4i32_ty,llvm_v4i32_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vcmpgtuw_p : ClangBuiltin<"__builtin_altivec_vcmpgtuw_p">,
      DefaultAttrsIntrinsic<[llvm_i32_ty],
                            [llvm_i32_ty,llvm_v4i32_ty,llvm_v4i32_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vcmpnew_p : ClangBuiltin<"__builtin_altivec_vcmpnew_p">,
      DefaultAttrsIntrinsic<[llvm_i32_ty],
                            [llvm_i32_ty,llvm_v4i32_ty,llvm_v4i32_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vcmpnezw_p : ClangBuiltin<"__builtin_altivec_vcmpnezw_p">,
      DefaultAttrsIntrinsic<[llvm_i32_ty],
                            [llvm_i32_ty,llvm_v4i32_ty,llvm_v4i32_ty],
                            [IntrNoMem]>;

  def int_ppc_altivec_vcmpequh_p : ClangBuiltin<"__builtin_altivec_vcmpequh_p">,
      DefaultAttrsIntrinsic<[llvm_i32_ty],
                            [llvm_i32_ty,llvm_v8i16_ty,llvm_v8i16_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vcmpgtsh_p : ClangBuiltin<"__builtin_altivec_vcmpgtsh_p">,
      DefaultAttrsIntrinsic<[llvm_i32_ty],
                            [llvm_i32_ty,llvm_v8i16_ty,llvm_v8i16_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vcmpgtuh_p : ClangBuiltin<"__builtin_altivec_vcmpgtuh_p">,
      DefaultAttrsIntrinsic<[llvm_i32_ty],
                            [llvm_i32_ty,llvm_v8i16_ty,llvm_v8i16_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vcmpneh_p : ClangBuiltin<"__builtin_altivec_vcmpneh_p">,
      DefaultAttrsIntrinsic<[llvm_i32_ty],
                            [llvm_i32_ty,llvm_v8i16_ty,llvm_v8i16_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vcmpnezh_p : ClangBuiltin<"__builtin_altivec_vcmpnezh_p">,
      DefaultAttrsIntrinsic<[llvm_i32_ty],
                            [llvm_i32_ty,llvm_v8i16_ty,llvm_v8i16_ty],
                            [IntrNoMem]>;

  def int_ppc_altivec_vcmpequb_p : ClangBuiltin<"__builtin_altivec_vcmpequb_p">,
      DefaultAttrsIntrinsic<[llvm_i32_ty],
                            [llvm_i32_ty,llvm_v16i8_ty,llvm_v16i8_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vcmpgtsb_p : ClangBuiltin<"__builtin_altivec_vcmpgtsb_p">,
      DefaultAttrsIntrinsic<[llvm_i32_ty],
                            [llvm_i32_ty,llvm_v16i8_ty,llvm_v16i8_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vcmpgtub_p : ClangBuiltin<"__builtin_altivec_vcmpgtub_p">,
      DefaultAttrsIntrinsic<[llvm_i32_ty],
                            [llvm_i32_ty,llvm_v16i8_ty,llvm_v16i8_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vcmpneb_p : ClangBuiltin<"__builtin_altivec_vcmpneb_p">,
      DefaultAttrsIntrinsic<[llvm_i32_ty],
                            [llvm_i32_ty,llvm_v16i8_ty,llvm_v16i8_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vcmpnezb_p : ClangBuiltin<"__builtin_altivec_vcmpnezb_p">,
      DefaultAttrsIntrinsic<[llvm_i32_ty],
                            [llvm_i32_ty,llvm_v16i8_ty,llvm_v16i8_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vclzlsbb : ClangBuiltin<"__builtin_altivec_vclzlsbb">,
      DefaultAttrsIntrinsic<[llvm_i32_ty],[llvm_v16i8_ty],[IntrNoMem]>;
  def int_ppc_altivec_vctzlsbb : ClangBuiltin<"__builtin_altivec_vctzlsbb">,
      DefaultAttrsIntrinsic<[llvm_i32_ty],[llvm_v16i8_ty],[IntrNoMem]>;
  def int_ppc_altivec_vprtybw : ClangBuiltin<"__builtin_altivec_vprtybw">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty],[llvm_v4i32_ty],[IntrNoMem]>;
  def int_ppc_altivec_vprtybd : ClangBuiltin<"__builtin_altivec_vprtybd">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty],[llvm_v2i64_ty],[IntrNoMem]>;
  def int_ppc_altivec_vprtybq : ClangBuiltin<"__builtin_altivec_vprtybq">,
      DefaultAttrsIntrinsic<[llvm_v1i128_ty],[llvm_v1i128_ty],[IntrNoMem]>;

  // BCD intrinsics.
  def int_ppc_bcdadd : ClangBuiltin<"__builtin_ppc_bcdadd">,
    DefaultAttrsIntrinsic<
    [llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty],
    [IntrNoMem, ImmArg<ArgIndex<2>>]>;
  def int_ppc_bcdadd_p : ClangBuiltin<"__builtin_ppc_bcdadd_p">,
    DefaultAttrsIntrinsic<
    [llvm_i32_ty], [llvm_i32_ty, llvm_v16i8_ty, llvm_v16i8_ty],
    [IntrNoMem, ImmArg<ArgIndex<0>>]>;
  def int_ppc_bcdsub : ClangBuiltin<"__builtin_ppc_bcdsub">,
    DefaultAttrsIntrinsic<
    [llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty],
    [IntrNoMem, ImmArg<ArgIndex<2>>]>;
  def int_ppc_bcdsub_p : ClangBuiltin<"__builtin_ppc_bcdsub_p">,
    DefaultAttrsIntrinsic<
    [llvm_i32_ty], [llvm_i32_ty, llvm_v16i8_ty, llvm_v16i8_ty],
    [IntrNoMem, ImmArg<ArgIndex<0>>]>;

  // P10 Vector Extract with Mask
  def int_ppc_altivec_vextractbm : ClangBuiltin<"__builtin_altivec_vextractbm">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v16i8_ty], [IntrNoMem]>;
  def int_ppc_altivec_vextracthm : ClangBuiltin<"__builtin_altivec_vextracthm">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v8i16_ty], [IntrNoMem]>;
  def int_ppc_altivec_vextractwm : ClangBuiltin<"__builtin_altivec_vextractwm">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v4i32_ty], [IntrNoMem]>;
  def int_ppc_altivec_vextractdm : ClangBuiltin<"__builtin_altivec_vextractdm">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v2i64_ty], [IntrNoMem]>;
  def int_ppc_altivec_vextractqm : ClangBuiltin<"__builtin_altivec_vextractqm">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v1i128_ty], [IntrNoMem]>;

  // P10 Vector Expand with Mask
  def int_ppc_altivec_vexpandbm : ClangBuiltin<"__builtin_altivec_vexpandbm">,
      DefaultAttrsIntrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty], [IntrNoMem]>;
  def int_ppc_altivec_vexpandhm : ClangBuiltin<"__builtin_altivec_vexpandhm">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty], [IntrNoMem]>;
  def int_ppc_altivec_vexpandwm : ClangBuiltin<"__builtin_altivec_vexpandwm">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty], [IntrNoMem]>;
  def int_ppc_altivec_vexpanddm : ClangBuiltin<"__builtin_altivec_vexpanddm">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty], [IntrNoMem]>;
  def int_ppc_altivec_vexpandqm : ClangBuiltin<"__builtin_altivec_vexpandqm">,
      DefaultAttrsIntrinsic<[llvm_v1i128_ty], [llvm_v1i128_ty], [IntrNoMem]>;

  // P10 Vector Count with Mask intrinsics.
  def int_ppc_altivec_vcntmbb : ClangBuiltin<"__builtin_altivec_vcntmbb">,
      DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_v16i8_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<1>>]>;
  def int_ppc_altivec_vcntmbh : ClangBuiltin<"__builtin_altivec_vcntmbh">,
      DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_v8i16_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<1>>]>;
  def int_ppc_altivec_vcntmbw : ClangBuiltin<"__builtin_altivec_vcntmbw">,
      DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_v4i32_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<1>>]>;
  def int_ppc_altivec_vcntmbd : ClangBuiltin<"__builtin_altivec_vcntmbd">,
      DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_v2i64_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<1>>]>;

  // P10 Move to VSR with Mask Intrinsics.
  def int_ppc_altivec_mtvsrbm : ClangBuiltin<"__builtin_altivec_mtvsrbm">,
      DefaultAttrsIntrinsic<[llvm_v16i8_ty], [llvm_i64_ty], [IntrNoMem]>;
  def int_ppc_altivec_mtvsrhm : ClangBuiltin<"__builtin_altivec_mtvsrhm">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_i64_ty], [IntrNoMem]>;
  def int_ppc_altivec_mtvsrwm : ClangBuiltin<"__builtin_altivec_mtvsrwm">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_i64_ty], [IntrNoMem]>;
  def int_ppc_altivec_mtvsrdm : ClangBuiltin<"__builtin_altivec_mtvsrdm">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_i64_ty], [IntrNoMem]>;
  def int_ppc_altivec_mtvsrqm : ClangBuiltin<"__builtin_altivec_mtvsrqm">,
      DefaultAttrsIntrinsic<[llvm_v1i128_ty], [llvm_i64_ty], [IntrNoMem]>;

  // P10 Vector Parallel Bits Deposit/Extract Doubleword Builtins.
  def int_ppc_altivec_vpdepd : ClangBuiltin<"__builtin_altivec_vpdepd">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vpextd : ClangBuiltin<"__builtin_altivec_vpextd">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
                            [IntrNoMem]>;

  // P10 Vector String Isolate Intrinsics.
  def int_ppc_altivec_vstribr : ClangBuiltin<"__builtin_altivec_vstribr">,
      DefaultAttrsIntrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty], [IntrNoMem]>;
  def int_ppc_altivec_vstribl : ClangBuiltin<"__builtin_altivec_vstribl">,
      DefaultAttrsIntrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty], [IntrNoMem]>;
  def int_ppc_altivec_vstrihr : ClangBuiltin<"__builtin_altivec_vstrihr">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty], [IntrNoMem]>;
  def int_ppc_altivec_vstrihl : ClangBuiltin<"__builtin_altivec_vstrihl">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty], [IntrNoMem]>;
  // Predicate Intrinsics: The first operand specifies interpretation of CR6.
  def int_ppc_altivec_vstribr_p : ClangBuiltin<"__builtin_altivec_vstribr_p">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_v16i8_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vstribl_p : ClangBuiltin<"__builtin_altivec_vstribl_p">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_v16i8_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vstrihr_p : ClangBuiltin<"__builtin_altivec_vstrihr_p">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_v8i16_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vstrihl_p : ClangBuiltin<"__builtin_altivec_vstrihl_p">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_v8i16_ty],
                            [IntrNoMem]>;

  // P10 Vector Centrifuge Builtin.
  def int_ppc_altivec_vcfuged : ClangBuiltin<"__builtin_altivec_vcfuged">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
                            [IntrNoMem]>;

  // P10 Vector Gather Every Nth Bit Builtin.
  def int_ppc_altivec_vgnb : ClangBuiltin<"__builtin_altivec_vgnb">,
      DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_v1i128_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<1>>]>;

   // P10 Vector Clear Bytes
   def int_ppc_altivec_vclrlb :  ClangBuiltin<"__builtin_altivec_vclrlb">,
       DefaultAttrsIntrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty],
                             [IntrNoMem]>;
   def int_ppc_altivec_vclrrb :  ClangBuiltin<"__builtin_altivec_vclrrb">,
       DefaultAttrsIntrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty],
                             [IntrNoMem]>;

  // P10 Vector Shift Double Bit Immediate.
  def int_ppc_altivec_vsldbi : ClangBuiltin<"__builtin_altivec_vsldbi">,
      DefaultAttrsIntrinsic<[llvm_v16i8_ty],
                            [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<2>>]>;
  def int_ppc_altivec_vsrdbi : ClangBuiltin<"__builtin_altivec_vsrdbi">,
      DefaultAttrsIntrinsic<[llvm_v16i8_ty],
                            [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<2>>]>;

  // P10 Vector Insert.
  def int_ppc_altivec_vinsblx : ClangBuiltin<"__builtin_altivec_vinsblx">,
      DefaultAttrsIntrinsic<[llvm_v16i8_ty],
                            [llvm_v16i8_ty, llvm_i32_ty, llvm_i32_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vinsbrx : ClangBuiltin<"__builtin_altivec_vinsbrx">,
      DefaultAttrsIntrinsic<[llvm_v16i8_ty],
                            [llvm_v16i8_ty, llvm_i32_ty, llvm_i32_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vinshlx : ClangBuiltin<"__builtin_altivec_vinshlx">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty],
                            [llvm_v8i16_ty, llvm_i32_ty, llvm_i32_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vinshrx : ClangBuiltin<"__builtin_altivec_vinshrx">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty],
                            [llvm_v8i16_ty, llvm_i32_ty, llvm_i32_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vinswlx : ClangBuiltin<"__builtin_altivec_vinswlx">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty],
                            [llvm_v4i32_ty, llvm_i32_ty, llvm_i32_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vinswrx : ClangBuiltin<"__builtin_altivec_vinswrx">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty],
                            [llvm_v4i32_ty, llvm_i32_ty, llvm_i32_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vinsdlx : ClangBuiltin<"__builtin_altivec_vinsdlx">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty],
                            [llvm_v2i64_ty, llvm_i64_ty, llvm_i64_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vinsdrx : ClangBuiltin<"__builtin_altivec_vinsdrx">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty],
                            [llvm_v2i64_ty, llvm_i64_ty, llvm_i64_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vinsbvlx : ClangBuiltin<"__builtin_altivec_vinsbvlx">,
      DefaultAttrsIntrinsic<[llvm_v16i8_ty],
                            [llvm_v16i8_ty, llvm_i32_ty, llvm_v16i8_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vinsbvrx : ClangBuiltin<"__builtin_altivec_vinsbvrx">,
      DefaultAttrsIntrinsic<[llvm_v16i8_ty],
                            [llvm_v16i8_ty, llvm_i32_ty, llvm_v16i8_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vinshvlx : ClangBuiltin<"__builtin_altivec_vinshvlx">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty],
                            [llvm_v8i16_ty, llvm_i32_ty, llvm_v8i16_ty],
                            [IntrNoMem]>;
 def int_ppc_altivec_vinshvrx : ClangBuiltin<"__builtin_altivec_vinshvrx">,
     DefaultAttrsIntrinsic<[llvm_v8i16_ty],
                            [llvm_v8i16_ty, llvm_i32_ty, llvm_v8i16_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vinswvlx : ClangBuiltin<"__builtin_altivec_vinswvlx">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty],
                            [llvm_v4i32_ty, llvm_i32_ty, llvm_v4i32_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vinswvrx : ClangBuiltin<"__builtin_altivec_vinswvrx">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty],
                            [llvm_v4i32_ty, llvm_i32_ty, llvm_v4i32_ty],
                            [IntrNoMem]>;
  // P10 Vector Insert with immediate.
  def int_ppc_altivec_vinsw :
      DefaultAttrsIntrinsic<[llvm_v4i32_ty],
                            [llvm_v4i32_ty, llvm_i32_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<2>>]>;
  def int_ppc_altivec_vinsd :
      DefaultAttrsIntrinsic<[llvm_v2i64_ty],
                            [llvm_v2i64_ty, llvm_i64_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<2>>]>;
  // P10 Vector Extract.
  def int_ppc_altivec_vextdubvlx : ClangBuiltin<"__builtin_altivec_vextdubvlx">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty],
                            [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vextdubvrx : ClangBuiltin<"__builtin_altivec_vextdubvrx">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty],
                            [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vextduhvlx : ClangBuiltin<"__builtin_altivec_vextduhvlx">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty],
                            [llvm_v8i16_ty, llvm_v8i16_ty, llvm_i32_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vextduhvrx : ClangBuiltin<"__builtin_altivec_vextduhvrx">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty],
                            [llvm_v8i16_ty, llvm_v8i16_ty, llvm_i32_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vextduwvlx : ClangBuiltin<"__builtin_altivec_vextduwvlx">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty],
                            [llvm_v4i32_ty, llvm_v4i32_ty, llvm_i32_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vextduwvrx : ClangBuiltin<"__builtin_altivec_vextduwvrx">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty],
                            [llvm_v4i32_ty, llvm_v4i32_ty, llvm_i32_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vextddvlx : ClangBuiltin<"__builtin_altivec_vextddvlx">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty],
                            [llvm_v2i64_ty, llvm_v2i64_ty, llvm_i32_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vextddvrx : ClangBuiltin<"__builtin_altivec_vextddvrx">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty],
                            [llvm_v2i64_ty, llvm_v2i64_ty, llvm_i32_ty],
                            [IntrNoMem]>;
}

// Vector average.
def int_ppc_altivec_vavgsb : PowerPC_Vec_BBB_Intrinsic<"vavgsb">;
def int_ppc_altivec_vavgsh : PowerPC_Vec_HHH_Intrinsic<"vavgsh">;
def int_ppc_altivec_vavgsw : PowerPC_Vec_WWW_Intrinsic<"vavgsw">;
def int_ppc_altivec_vavgub : PowerPC_Vec_BBB_Intrinsic<"vavgub">;
def int_ppc_altivec_vavguh : PowerPC_Vec_HHH_Intrinsic<"vavguh">;
def int_ppc_altivec_vavguw : PowerPC_Vec_WWW_Intrinsic<"vavguw">;

// Vector maximum.
def int_ppc_altivec_vmaxfp : PowerPC_Vec_FFF_Intrinsic<"vmaxfp">;
def int_ppc_altivec_vmaxsb : PowerPC_Vec_BBB_Intrinsic<"vmaxsb">;
def int_ppc_altivec_vmaxsh : PowerPC_Vec_HHH_Intrinsic<"vmaxsh">;
def int_ppc_altivec_vmaxsw : PowerPC_Vec_WWW_Intrinsic<"vmaxsw">;
def int_ppc_altivec_vmaxsd : PowerPC_Vec_DDD_Intrinsic<"vmaxsd">;
def int_ppc_altivec_vmaxub : PowerPC_Vec_BBB_Intrinsic<"vmaxub">;
def int_ppc_altivec_vmaxuh : PowerPC_Vec_HHH_Intrinsic<"vmaxuh">;
def int_ppc_altivec_vmaxuw : PowerPC_Vec_WWW_Intrinsic<"vmaxuw">;
def int_ppc_altivec_vmaxud : PowerPC_Vec_DDD_Intrinsic<"vmaxud">;

// Vector minimum.
def int_ppc_altivec_vminfp : PowerPC_Vec_FFF_Intrinsic<"vminfp">;
def int_ppc_altivec_vminsb : PowerPC_Vec_BBB_Intrinsic<"vminsb">;
def int_ppc_altivec_vminsh : PowerPC_Vec_HHH_Intrinsic<"vminsh">;
def int_ppc_altivec_vminsw : PowerPC_Vec_WWW_Intrinsic<"vminsw">;
def int_ppc_altivec_vminsd : PowerPC_Vec_DDD_Intrinsic<"vminsd">;
def int_ppc_altivec_vminub : PowerPC_Vec_BBB_Intrinsic<"vminub">;
def int_ppc_altivec_vminuh : PowerPC_Vec_HHH_Intrinsic<"vminuh">;
def int_ppc_altivec_vminuw : PowerPC_Vec_WWW_Intrinsic<"vminuw">;
def int_ppc_altivec_vminud : PowerPC_Vec_DDD_Intrinsic<"vminud">;

// Saturating adds.
def int_ppc_altivec_vaddubs : PowerPC_Vec_BBB_Intrinsic<"vaddubs">;
def int_ppc_altivec_vaddsbs : PowerPC_Vec_BBB_Intrinsic<"vaddsbs">;
def int_ppc_altivec_vadduhs : PowerPC_Vec_HHH_Intrinsic<"vadduhs">;
def int_ppc_altivec_vaddshs : PowerPC_Vec_HHH_Intrinsic<"vaddshs">;
def int_ppc_altivec_vadduws : PowerPC_Vec_WWW_Intrinsic<"vadduws">;
def int_ppc_altivec_vaddsws : PowerPC_Vec_WWW_Intrinsic<"vaddsws">;
def int_ppc_altivec_vaddcuw : PowerPC_Vec_WWW_Intrinsic<"vaddcuw">;
def int_ppc_altivec_vaddcuq : PowerPC_Vec_QQQ_Intrinsic<"vaddcuq">;

// Saturating subs.
def int_ppc_altivec_vsububs : PowerPC_Vec_BBB_Intrinsic<"vsububs">;
def int_ppc_altivec_vsubsbs : PowerPC_Vec_BBB_Intrinsic<"vsubsbs">;
def int_ppc_altivec_vsubuhs : PowerPC_Vec_HHH_Intrinsic<"vsubuhs">;
def int_ppc_altivec_vsubshs : PowerPC_Vec_HHH_Intrinsic<"vsubshs">;
def int_ppc_altivec_vsubuws : PowerPC_Vec_WWW_Intrinsic<"vsubuws">;
def int_ppc_altivec_vsubsws : PowerPC_Vec_WWW_Intrinsic<"vsubsws">;
def int_ppc_altivec_vsubcuw : PowerPC_Vec_WWW_Intrinsic<"vsubcuw">;
def int_ppc_altivec_vsubcuq : PowerPC_Vec_QQQ_Intrinsic<"vsubcuq">;

let TargetPrefix = "ppc" in {  // All PPC intrinsics start with "llvm.ppc.".
  // Saturating multiply-adds.
  def int_ppc_altivec_vmhaddshs : ClangBuiltin<"__builtin_altivec_vmhaddshs">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
                             llvm_v8i16_ty, llvm_v8i16_ty],
                             [IntrNoMem, IntrHasSideEffects]>;
  def int_ppc_altivec_vmhraddshs : ClangBuiltin<"__builtin_altivec_vmhraddshs">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
                             llvm_v8i16_ty, llvm_v8i16_ty],
                             [IntrNoMem, IntrHasSideEffects]>;

  def int_ppc_altivec_vmaddfp : ClangBuiltin<"__builtin_altivec_vmaddfp">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
                             llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
  def int_ppc_altivec_vnmsubfp : ClangBuiltin<"__builtin_altivec_vnmsubfp">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
                             llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;

  // Vector Multiply Sum Instructions.
  def int_ppc_altivec_vmsummbm : ClangBuiltin<"__builtin_altivec_vmsummbm">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v16i8_ty, llvm_v16i8_ty,
                             llvm_v4i32_ty], [IntrNoMem]>;
  def int_ppc_altivec_vmsumshm : ClangBuiltin<"__builtin_altivec_vmsumshm">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
                             llvm_v4i32_ty], [IntrNoMem]>;
  def int_ppc_altivec_vmsumshs : ClangBuiltin<"__builtin_altivec_vmsumshs">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
                             llvm_v4i32_ty], [IntrNoMem, IntrHasSideEffects]>;
  def int_ppc_altivec_vmsumubm : ClangBuiltin<"__builtin_altivec_vmsumubm">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v16i8_ty, llvm_v16i8_ty,
                             llvm_v4i32_ty], [IntrNoMem]>;
  def int_ppc_altivec_vmsumuhm : ClangBuiltin<"__builtin_altivec_vmsumuhm">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
                             llvm_v4i32_ty], [IntrNoMem]>;
  def int_ppc_altivec_vmsumudm : ClangBuiltin<"__builtin_altivec_vmsumudm">,
      DefaultAttrsIntrinsic<[llvm_v1i128_ty], [llvm_v2i64_ty, llvm_v2i64_ty,
                             llvm_v1i128_ty], [IntrNoMem]>;
  def int_ppc_altivec_vmsumuhs : ClangBuiltin<"__builtin_altivec_vmsumuhs">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
                             llvm_v4i32_ty], [IntrNoMem, IntrHasSideEffects]>;
  def int_ppc_altivec_vmsumcud : ClangBuiltin<"__builtin_altivec_vmsumcud">,
      DefaultAttrsIntrinsic<[llvm_v1i128_ty],
                            [llvm_v2i64_ty, llvm_v2i64_ty, llvm_v1i128_ty],
                            [IntrNoMem]>;

  // Vector Multiply Instructions.
  def int_ppc_altivec_vmulesb : ClangBuiltin<"__builtin_altivec_vmulesb">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vmulesh : ClangBuiltin<"__builtin_altivec_vmulesh">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vmulesw : ClangBuiltin<"__builtin_altivec_vmulesw">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vmulesd : PowerPC_Vec_QDD_Intrinsic<"vmulesd">;
  def int_ppc_altivec_vmuleub : ClangBuiltin<"__builtin_altivec_vmuleub">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vmuleuh : ClangBuiltin<"__builtin_altivec_vmuleuh">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vmuleuw : ClangBuiltin<"__builtin_altivec_vmuleuw">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vmuleud : PowerPC_Vec_QDD_Intrinsic<"vmuleud">;

  def int_ppc_altivec_vmulosb : ClangBuiltin<"__builtin_altivec_vmulosb">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vmulosh : ClangBuiltin<"__builtin_altivec_vmulosh">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vmulosw : ClangBuiltin<"__builtin_altivec_vmulosw">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vmulosd : PowerPC_Vec_QDD_Intrinsic<"vmulosd">;
  def int_ppc_altivec_vmuloub : ClangBuiltin<"__builtin_altivec_vmuloub">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vmulouh : ClangBuiltin<"__builtin_altivec_vmulouh">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vmulouw : ClangBuiltin<"__builtin_altivec_vmulouw">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vmuloud : PowerPC_Vec_QDD_Intrinsic<"vmuloud">;

  // Vector Sum Instructions.
  def int_ppc_altivec_vsumsws : ClangBuiltin<"__builtin_altivec_vsumsws">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
                            [IntrNoMem, IntrHasSideEffects]>;
  def int_ppc_altivec_vsum2sws : ClangBuiltin<"__builtin_altivec_vsum2sws">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
                            [IntrNoMem, IntrHasSideEffects]>;
  def int_ppc_altivec_vsum4sbs : ClangBuiltin<"__builtin_altivec_vsum4sbs">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v16i8_ty, llvm_v4i32_ty],
                            [IntrNoMem, IntrHasSideEffects]>;
  def int_ppc_altivec_vsum4shs : ClangBuiltin<"__builtin_altivec_vsum4shs">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v4i32_ty],
                            [IntrNoMem, IntrHasSideEffects]>;
  def int_ppc_altivec_vsum4ubs : ClangBuiltin<"__builtin_altivec_vsum4ubs">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v16i8_ty, llvm_v4i32_ty],
                            [IntrNoMem, IntrHasSideEffects]>;

  // Vector Sign Extension Instructions
  def int_ppc_altivec_vextsb2w : ClangBuiltin<"__builtin_altivec_vextsb2w">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v16i8_ty], [IntrNoMem]>;
  def int_ppc_altivec_vextsb2d : ClangBuiltin<"__builtin_altivec_vextsb2d">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v16i8_ty], [IntrNoMem]>;
  def int_ppc_altivec_vextsh2w : ClangBuiltin<"__builtin_altivec_vextsh2w">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty], [IntrNoMem]>;
  def int_ppc_altivec_vextsh2d : ClangBuiltin<"__builtin_altivec_vextsh2d">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v8i16_ty], [IntrNoMem]>;
  def int_ppc_altivec_vextsw2d : ClangBuiltin<"__builtin_altivec_vextsw2d">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty], [IntrNoMem]>;
  def int_ppc_altivec_vextsd2q : ClangBuiltin<"__builtin_altivec_vextsd2q">,
      DefaultAttrsIntrinsic<[llvm_v1i128_ty], [llvm_v2i64_ty], [IntrNoMem]>;

  // Other multiplies.
  def int_ppc_altivec_vmladduhm : ClangBuiltin<"__builtin_altivec_vmladduhm">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
                             llvm_v8i16_ty], [IntrNoMem]>;

  // Packs.
  def int_ppc_altivec_vpkpx : ClangBuiltin<"__builtin_altivec_vpkpx">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vpkshss : ClangBuiltin<"__builtin_altivec_vpkshss">,
      DefaultAttrsIntrinsic<[llvm_v16i8_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
                            [IntrNoMem, IntrHasSideEffects]>;
  def int_ppc_altivec_vpkshus : ClangBuiltin<"__builtin_altivec_vpkshus">,
      DefaultAttrsIntrinsic<[llvm_v16i8_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
                            [IntrNoMem, IntrHasSideEffects]>;
  def int_ppc_altivec_vpkswss : ClangBuiltin<"__builtin_altivec_vpkswss">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
                            [IntrNoMem, IntrHasSideEffects]>;
  def int_ppc_altivec_vpkswus : ClangBuiltin<"__builtin_altivec_vpkswus">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
                            [IntrNoMem, IntrHasSideEffects]>;
  def int_ppc_altivec_vpksdss : ClangBuiltin<"__builtin_altivec_vpksdss">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
                            [IntrNoMem, IntrHasSideEffects]>;
  def int_ppc_altivec_vpksdus : ClangBuiltin<"__builtin_altivec_vpksdus">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
                            [IntrNoMem, IntrHasSideEffects]>;
  // vpkuhum is lowered to a shuffle.
  def int_ppc_altivec_vpkuhus : ClangBuiltin<"__builtin_altivec_vpkuhus">,
      DefaultAttrsIntrinsic<[llvm_v16i8_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
                            [IntrNoMem, IntrHasSideEffects]>;
  // vpkuwum is lowered to a shuffle.
  def int_ppc_altivec_vpkuwus : ClangBuiltin<"__builtin_altivec_vpkuwus">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
                            [IntrNoMem, IntrHasSideEffects]>;
  // vpkudum is lowered to a shuffle.
  def int_ppc_altivec_vpkudus : ClangBuiltin<"__builtin_altivec_vpkudus">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
                            [IntrNoMem, IntrHasSideEffects]>;

  // Unpacks.
  def int_ppc_altivec_vupkhpx : ClangBuiltin<"__builtin_altivec_vupkhpx">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty], [IntrNoMem]>;
  def int_ppc_altivec_vupkhsb : ClangBuiltin<"__builtin_altivec_vupkhsb">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty], [IntrNoMem]>;
  def int_ppc_altivec_vupkhsh : ClangBuiltin<"__builtin_altivec_vupkhsh">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty], [IntrNoMem]>;
  def int_ppc_altivec_vupkhsw : ClangBuiltin<"__builtin_altivec_vupkhsw">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty], [IntrNoMem]>;
  def int_ppc_altivec_vupklpx : ClangBuiltin<"__builtin_altivec_vupklpx">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty], [IntrNoMem]>;
  def int_ppc_altivec_vupklsb : ClangBuiltin<"__builtin_altivec_vupklsb">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty], [IntrNoMem]>;
  def int_ppc_altivec_vupklsh : ClangBuiltin<"__builtin_altivec_vupklsh">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty], [IntrNoMem]>;
  def int_ppc_altivec_vupklsw : ClangBuiltin<"__builtin_altivec_vupklsw">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty], [IntrNoMem]>;


  // FP <-> integer conversion.
  def int_ppc_altivec_vcfsx : ClangBuiltin<"__builtin_altivec_vcfsx">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty], [llvm_v4i32_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<1>>]>;
  def int_ppc_altivec_vcfux : ClangBuiltin<"__builtin_altivec_vcfux">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty], [llvm_v4i32_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<1>>]>;
  def int_ppc_altivec_vctsxs : ClangBuiltin<"__builtin_altivec_vctsxs">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<1>>]>;
  def int_ppc_altivec_vctuxs : ClangBuiltin<"__builtin_altivec_vctuxs">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<1>>]>;

  def int_ppc_altivec_vrfim : ClangBuiltin<"__builtin_altivec_vrfim">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
  def int_ppc_altivec_vrfin : ClangBuiltin<"__builtin_altivec_vrfin">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
  def int_ppc_altivec_vrfip : ClangBuiltin<"__builtin_altivec_vrfip">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
  def int_ppc_altivec_vrfiz : ClangBuiltin<"__builtin_altivec_vrfiz">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>;

  // Add Extended Quadword
  def int_ppc_altivec_vaddeuqm : ClangBuiltin<"__builtin_altivec_vaddeuqm">,
      DefaultAttrsIntrinsic<[llvm_v1i128_ty],
                            [llvm_v1i128_ty, llvm_v1i128_ty, llvm_v1i128_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vaddecuq : ClangBuiltin<"__builtin_altivec_vaddecuq">,
      DefaultAttrsIntrinsic<[llvm_v1i128_ty],
                            [llvm_v1i128_ty, llvm_v1i128_ty, llvm_v1i128_ty],
                            [IntrNoMem]>;

  // Sub Extended Quadword
  def int_ppc_altivec_vsubeuqm : ClangBuiltin<"__builtin_altivec_vsubeuqm">,
      DefaultAttrsIntrinsic<[llvm_v1i128_ty],
                            [llvm_v1i128_ty, llvm_v1i128_ty, llvm_v1i128_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vsubecuq : ClangBuiltin<"__builtin_altivec_vsubecuq">,
      DefaultAttrsIntrinsic<[llvm_v1i128_ty],
                            [llvm_v1i128_ty, llvm_v1i128_ty, llvm_v1i128_ty],
                            [IntrNoMem]>;

  // P10 Vector Count Leading / Trailing Zeroes under bit Mask Builtins.
  def int_ppc_altivec_vclzdm : ClangBuiltin<"__builtin_altivec_vclzdm">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vctzdm : ClangBuiltin<"__builtin_altivec_vctzdm">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
                            [IntrNoMem]>;
}

def int_ppc_altivec_vsl   : PowerPC_Vec_WWW_Intrinsic<"vsl">;
def int_ppc_altivec_vslo  : PowerPC_Vec_WWW_Intrinsic<"vslo">;

def int_ppc_altivec_vslb  : PowerPC_Vec_BBB_Intrinsic<"vslb">;
def int_ppc_altivec_vslv  : PowerPC_Vec_BBB_Intrinsic<"vslv">;
def int_ppc_altivec_vsrv  : PowerPC_Vec_BBB_Intrinsic<"vsrv">;
def int_ppc_altivec_vslh  : PowerPC_Vec_HHH_Intrinsic<"vslh">;
def int_ppc_altivec_vslw  : PowerPC_Vec_WWW_Intrinsic<"vslw">;

// Right Shifts.
def int_ppc_altivec_vsr   : PowerPC_Vec_WWW_Intrinsic<"vsr">;
def int_ppc_altivec_vsro  : PowerPC_Vec_WWW_Intrinsic<"vsro">;

def int_ppc_altivec_vsrb  : PowerPC_Vec_BBB_Intrinsic<"vsrb">;
def int_ppc_altivec_vsrh  : PowerPC_Vec_HHH_Intrinsic<"vsrh">;
def int_ppc_altivec_vsrw  : PowerPC_Vec_WWW_Intrinsic<"vsrw">;
def int_ppc_altivec_vsrab : PowerPC_Vec_BBB_Intrinsic<"vsrab">;
def int_ppc_altivec_vsrah : PowerPC_Vec_HHH_Intrinsic<"vsrah">;
def int_ppc_altivec_vsraw : PowerPC_Vec_WWW_Intrinsic<"vsraw">;

// Rotates.
def int_ppc_altivec_vrlb  : PowerPC_Vec_BBB_Intrinsic<"vrlb">;
def int_ppc_altivec_vrlh  : PowerPC_Vec_HHH_Intrinsic<"vrlh">;
def int_ppc_altivec_vrlw  : PowerPC_Vec_WWW_Intrinsic<"vrlw">;
def int_ppc_altivec_vrld  : PowerPC_Vec_DDD_Intrinsic<"vrld">;

let TargetPrefix = "ppc" in {  // All PPC intrinsics start with "llvm.ppc.".
  // Miscellaneous.
  def int_ppc_altivec_lvsl :
      DefaultAttrsIntrinsic<[llvm_v16i8_ty], [llvm_ptr_ty], [IntrNoMem]>;
  def int_ppc_altivec_lvsr :
      DefaultAttrsIntrinsic<[llvm_v16i8_ty], [llvm_ptr_ty], [IntrNoMem]>;

  def int_ppc_altivec_vperm : ClangBuiltin<"__builtin_altivec_vperm_4si">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
                             llvm_v4i32_ty, llvm_v16i8_ty], [IntrNoMem]>;
  def int_ppc_altivec_vsel : ClangBuiltin<"__builtin_altivec_vsel_4si">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
                             llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
  def int_ppc_altivec_vgbbd : ClangBuiltin<"__builtin_altivec_vgbbd">,
      DefaultAttrsIntrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty], [IntrNoMem]>;
  def int_ppc_altivec_vbpermq : ClangBuiltin<"__builtin_altivec_vbpermq">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
                            [IntrNoMem]>;
  def int_ppc_altivec_vbpermd : ClangBuiltin<"__builtin_altivec_vbpermd">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v16i8_ty],
                            [IntrNoMem]>;
}

def int_ppc_altivec_vexptefp  : PowerPC_Vec_FF_Intrinsic<"vexptefp">;
def int_ppc_altivec_vlogefp   : PowerPC_Vec_FF_Intrinsic<"vlogefp">;
def int_ppc_altivec_vrefp     : PowerPC_Vec_FF_Intrinsic<"vrefp">;
def int_ppc_altivec_vrsqrtefp : PowerPC_Vec_FF_Intrinsic<"vrsqrtefp">;

// Power8 Intrinsics
// Crypto
let TargetPrefix = "ppc" in {  // All PPC intrinsics start with "llvm.ppc.".
  def int_ppc_altivec_crypto_vsbox :
      ClangBuiltin<"__builtin_altivec_crypto_vsbox">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty], [IntrNoMem]>;
  def int_ppc_altivec_crypto_vpermxor :
      ClangBuiltin<"__builtin_altivec_crypto_vpermxor">,
      DefaultAttrsIntrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty,
                             llvm_v16i8_ty], [IntrNoMem]>;
  def int_ppc_altivec_crypto_vpermxor_be :
      ClangBuiltin<"__builtin_altivec_crypto_vpermxor_be">,
      DefaultAttrsIntrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty,
                             llvm_v16i8_ty], [IntrNoMem]>;

def int_ppc_altivec_crypto_vshasigmad :
    ClangBuiltin<"__builtin_altivec_crypto_vshasigmad">,
    DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty,
                           llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>,
                           ImmArg<ArgIndex<2>>]>;
def int_ppc_altivec_crypto_vshasigmaw :
    ClangBuiltin<"__builtin_altivec_crypto_vshasigmaw">,
    DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty,
                           llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>,
                           ImmArg<ArgIndex<2>>]>;
}
def int_ppc_altivec_crypto_vcipher :
            PowerPC_Vec_DDD_Intrinsic<"crypto_vcipher">;
def int_ppc_altivec_crypto_vcipherlast :
            PowerPC_Vec_DDD_Intrinsic<"crypto_vcipherlast">;
def int_ppc_altivec_crypto_vncipher :
            PowerPC_Vec_DDD_Intrinsic<"crypto_vncipher">;
def int_ppc_altivec_crypto_vncipherlast :
            PowerPC_Vec_DDD_Intrinsic<"crypto_vncipherlast">;
def int_ppc_altivec_crypto_vpmsumb :
            PowerPC_Vec_BBB_Intrinsic<"crypto_vpmsumb">;
def int_ppc_altivec_crypto_vpmsumh :
            PowerPC_Vec_HHH_Intrinsic<"crypto_vpmsumh">;
def int_ppc_altivec_crypto_vpmsumw :
            PowerPC_Vec_WWW_Intrinsic<"crypto_vpmsumw">;
def int_ppc_altivec_crypto_vpmsumd :
            PowerPC_Vec_DDD_Intrinsic<"crypto_vpmsumd">;

// Absolute Difference intrinsics
def int_ppc_altivec_vabsdub : PowerPC_Vec_BBB_Intrinsic<"vabsdub">;
def int_ppc_altivec_vabsduh : PowerPC_Vec_HHH_Intrinsic<"vabsduh">;
def int_ppc_altivec_vabsduw : PowerPC_Vec_WWW_Intrinsic<"vabsduw">;

// Vector rotates
def int_ppc_altivec_vrlwnm :
      PowerPC_Vec_Intrinsic<"vrlwnm", [llvm_v4i32_ty],
                            [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
def int_ppc_altivec_vrlwmi :
      PowerPC_Vec_Intrinsic<"vrlwmi", [llvm_v4i32_ty],
                            [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
                            [IntrNoMem]>;
def int_ppc_altivec_vrldnm :
      PowerPC_Vec_Intrinsic<"vrldnm", [llvm_v2i64_ty],
                            [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
def int_ppc_altivec_vrldmi :
      PowerPC_Vec_Intrinsic<"vrldmi", [llvm_v2i64_ty],
                            [llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty],
                            [IntrNoMem]>;

def int_ppc_altivec_vrlqnm :
      PowerPC_Vec_Intrinsic<"vrlqnm", [llvm_v1i128_ty],
                           [llvm_v1i128_ty, llvm_v1i128_ty],
                            [IntrNoMem]>;
def int_ppc_altivec_vrlqmi :
      PowerPC_Vec_Intrinsic<"vrlqmi", [llvm_v1i128_ty],
                            [llvm_v1i128_ty, llvm_v1i128_ty, llvm_v1i128_ty],
                            [IntrNoMem]>;

// Vector Divide Extended Intrinsics.
def int_ppc_altivec_vdivesw : PowerPC_Vec_WWW_Intrinsic<"vdivesw">;
def int_ppc_altivec_vdiveuw : PowerPC_Vec_WWW_Intrinsic<"vdiveuw">;
def int_ppc_altivec_vdivesd : PowerPC_Vec_DDD_Intrinsic<"vdivesd">;
def int_ppc_altivec_vdiveud : PowerPC_Vec_DDD_Intrinsic<"vdiveud">;
def int_ppc_altivec_vdivesq : PowerPC_Vec_QQQ_Intrinsic<"vdivesq">;
def int_ppc_altivec_vdiveuq : PowerPC_Vec_QQQ_Intrinsic<"vdiveuq">;

// Vector Multiply High Intrinsics.
def int_ppc_altivec_vmulhsw : PowerPC_Vec_WWW_Intrinsic<"vmulhsw">;
def int_ppc_altivec_vmulhuw : PowerPC_Vec_WWW_Intrinsic<"vmulhuw">;
def int_ppc_altivec_vmulhsd : PowerPC_Vec_DDD_Intrinsic<"vmulhsd">;
def int_ppc_altivec_vmulhud : PowerPC_Vec_DDD_Intrinsic<"vmulhud">;

//===----------------------------------------------------------------------===//
// PowerPC VSX Intrinsic Definitions.

let TargetPrefix = "ppc" in {  // All intrinsics start with "llvm.ppc.".

// Vector load.
def int_ppc_vsx_lxvw4x :
    DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_ptr_ty],
                          [IntrReadMem, IntrArgMemOnly]>;
def int_ppc_vsx_lxvd2x :
    DefaultAttrsIntrinsic<[llvm_v2f64_ty], [llvm_ptr_ty],
                          [IntrReadMem, IntrArgMemOnly]>;
def int_ppc_vsx_lxvw4x_be :
    DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_ptr_ty],
                          [IntrReadMem, IntrArgMemOnly]>;
def int_ppc_vsx_lxvd2x_be :
    DefaultAttrsIntrinsic<[llvm_v2f64_ty], [llvm_ptr_ty],
                          [IntrReadMem, IntrArgMemOnly]>;
def int_ppc_vsx_lxvl :
    DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_ptr_ty, llvm_i64_ty],
                          [IntrReadMem, IntrArgMemOnly]>;
def int_ppc_vsx_lxvll :
    DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_ptr_ty, llvm_i64_ty],
                          [IntrReadMem, IntrArgMemOnly]>;
def int_ppc_vsx_lxvp :
    DefaultAttrsIntrinsic<[llvm_v256i1_ty], [llvm_ptr_ty],
                          [IntrReadMem, IntrArgMemOnly]>;

// Vector store.
def int_ppc_vsx_stxvw4x : Intrinsic<[], [llvm_v4i32_ty, llvm_ptr_ty],
                                    [IntrWriteMem, IntrArgMemOnly]>;
def int_ppc_vsx_stxvd2x : Intrinsic<[], [llvm_v2f64_ty, llvm_ptr_ty],
                                    [IntrWriteMem, IntrArgMemOnly]>;
def int_ppc_vsx_stxvw4x_be : Intrinsic<[], [llvm_v4i32_ty, llvm_ptr_ty],
                                       [IntrWriteMem, IntrArgMemOnly]>;
def int_ppc_vsx_stxvd2x_be : Intrinsic<[], [llvm_v2f64_ty, llvm_ptr_ty],
                                       [IntrWriteMem, IntrArgMemOnly]>;
def int_ppc_vsx_stxvl :
      Intrinsic<[], [llvm_v4i32_ty, llvm_ptr_ty, llvm_i64_ty],
      [IntrWriteMem, IntrArgMemOnly]>;
def int_ppc_vsx_stxvll :
      Intrinsic<[], [llvm_v4i32_ty, llvm_ptr_ty, llvm_i64_ty],
      [IntrWriteMem, IntrArgMemOnly]>;
def int_ppc_vsx_stxvp :
      Intrinsic<[], [llvm_v256i1_ty, llvm_ptr_ty], [IntrWriteMem,
      IntrArgMemOnly]>;
// Vector and scalar maximum.
def int_ppc_vsx_xvmaxdp : PowerPC_VSX_Vec_DDD_Intrinsic<"xvmaxdp">;
def int_ppc_vsx_xvmaxsp : PowerPC_VSX_Vec_FFF_Intrinsic<"xvmaxsp">;
def int_ppc_vsx_xsmaxdp : PowerPC_VSX_Sca_DDD_Intrinsic<"xsmaxdp">;

// Vector and scalar minimum.
def int_ppc_vsx_xvmindp : PowerPC_VSX_Vec_DDD_Intrinsic<"xvmindp">;
def int_ppc_vsx_xvminsp : PowerPC_VSX_Vec_FFF_Intrinsic<"xvminsp">;
def int_ppc_vsx_xsmindp : PowerPC_VSX_Sca_DDD_Intrinsic<"xsmindp">;

// Vector divide.
def int_ppc_vsx_xvdivdp : PowerPC_VSX_Vec_DDD_Intrinsic<"xvdivdp">;
def int_ppc_vsx_xvdivsp : PowerPC_VSX_Vec_FFF_Intrinsic<"xvdivsp">;

// Vector round-to-infinity (ceil)
def int_ppc_vsx_xvrspip :
    DefaultAttrsIntrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
def int_ppc_vsx_xvrdpip :
    DefaultAttrsIntrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty], [IntrNoMem]>;

// Vector reciprocal estimate
def int_ppc_vsx_xvresp : ClangBuiltin<"__builtin_vsx_xvresp">,
    DefaultAttrsIntrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
def int_ppc_vsx_xvredp : ClangBuiltin<"__builtin_vsx_xvredp">,
    DefaultAttrsIntrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty], [IntrNoMem]>;

// Vector rsqrte
def int_ppc_vsx_xvrsqrtesp : ClangBuiltin<"__builtin_vsx_xvrsqrtesp">,
    DefaultAttrsIntrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
def int_ppc_vsx_xvrsqrtedp : ClangBuiltin<"__builtin_vsx_xvrsqrtedp">,
    DefaultAttrsIntrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty], [IntrNoMem]>;

// Vector compare
def int_ppc_vsx_xvcmpeqdp :
      PowerPC_VSX_Intrinsic<"xvcmpeqdp", [llvm_v2i64_ty],
                            [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
def int_ppc_vsx_xvcmpeqdp_p : ClangBuiltin<"__builtin_vsx_xvcmpeqdp_p">,
    DefaultAttrsIntrinsic<[llvm_i32_ty],
                          [llvm_i32_ty,llvm_v2f64_ty,llvm_v2f64_ty],
                          [IntrNoMem]>;
def int_ppc_vsx_xvcmpeqsp :
      PowerPC_VSX_Intrinsic<"xvcmpeqsp", [llvm_v4i32_ty],
                            [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
def int_ppc_vsx_xvcmpeqsp_p : ClangBuiltin<"__builtin_vsx_xvcmpeqsp_p">,
    DefaultAttrsIntrinsic<[llvm_i32_ty],
                          [llvm_i32_ty,llvm_v4f32_ty,llvm_v4f32_ty],
                          [IntrNoMem]>;
def int_ppc_vsx_xvcmpgedp :
      PowerPC_VSX_Intrinsic<"xvcmpgedp", [llvm_v2i64_ty],
                            [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
def int_ppc_vsx_xvcmpgedp_p : ClangBuiltin<"__builtin_vsx_xvcmpgedp_p">,
    DefaultAttrsIntrinsic<[llvm_i32_ty],
                          [llvm_i32_ty,llvm_v2f64_ty,llvm_v2f64_ty],
                          [IntrNoMem]>;
def int_ppc_vsx_xvcmpgesp :
      PowerPC_VSX_Intrinsic<"xvcmpgesp", [llvm_v4i32_ty],
                            [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
def int_ppc_vsx_xvcmpgesp_p : ClangBuiltin<"__builtin_vsx_xvcmpgesp_p">,
    DefaultAttrsIntrinsic<[llvm_i32_ty],
                          [llvm_i32_ty,llvm_v4f32_ty,llvm_v4f32_ty],
                          [IntrNoMem]>;
def int_ppc_vsx_xvcmpgtdp :
      PowerPC_VSX_Intrinsic<"xvcmpgtdp", [llvm_v2i64_ty],
                            [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
def int_ppc_vsx_xvcmpgtdp_p : ClangBuiltin<"__builtin_vsx_xvcmpgtdp_p">,
    DefaultAttrsIntrinsic<[llvm_i32_ty],
                          [llvm_i32_ty,llvm_v2f64_ty,llvm_v2f64_ty],
                          [IntrNoMem]>;
def int_ppc_vsx_xvcmpgtsp :
      PowerPC_VSX_Intrinsic<"xvcmpgtsp", [llvm_v4i32_ty],
                            [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
def int_ppc_vsx_xvcmpgtsp_p : ClangBuiltin<"__builtin_vsx_xvcmpgtsp_p">,
    DefaultAttrsIntrinsic<[llvm_i32_ty],
                          [llvm_i32_ty,llvm_v4f32_ty,llvm_v4f32_ty],
                          [IntrNoMem]>;
def int_ppc_vsx_xxleqv :
      PowerPC_VSX_Intrinsic<"xxleqv", [llvm_v4i32_ty],
                            [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
def int_ppc_vsx_xviexpdp :
      PowerPC_VSX_Intrinsic<"xviexpdp",[llvm_v2f64_ty],
                            [llvm_v2i64_ty, llvm_v2i64_ty],[IntrNoMem]>;
def int_ppc_vsx_xviexpsp :
      PowerPC_VSX_Intrinsic<"xviexpsp",[llvm_v4f32_ty],
                            [llvm_v4i32_ty, llvm_v4i32_ty],[IntrNoMem]>;
def int_ppc_vsx_xvcvdpsxws :
      PowerPC_VSX_Intrinsic<"xvcvdpsxws", [llvm_v4i32_ty],
                            [llvm_v2f64_ty], [IntrNoMem]>;
def int_ppc_vsx_xvcvdpuxws :
      PowerPC_VSX_Intrinsic<"xvcvdpuxws", [llvm_v4i32_ty],
                            [llvm_v2f64_ty], [IntrNoMem]>;
def int_ppc_vsx_xvcvspsxds :
      PowerPC_VSX_Intrinsic<"xvcvspsxds", [llvm_v2i64_ty],
                            [llvm_v4f32_ty], [IntrNoMem]>;
def int_ppc_vsx_xvcvspuxds :
      PowerPC_VSX_Intrinsic<"xvcvspuxds", [llvm_v2i64_ty],
                            [llvm_v4f32_ty], [IntrNoMem]>;
def int_ppc_vsx_xvcvsxwdp :
      PowerPC_VSX_Intrinsic<"xvcvsxwdp", [llvm_v2f64_ty],
                            [llvm_v4i32_ty], [IntrNoMem]>;
def int_ppc_vsx_xvcvuxwdp :
      PowerPC_VSX_Intrinsic<"xvcvuxwdp", [llvm_v2f64_ty],
                            [llvm_v4i32_ty], [IntrNoMem]>;
def int_ppc_vsx_xvcvspdp :
      PowerPC_VSX_Intrinsic<"xvcvspdp", [llvm_v2f64_ty],
                            [llvm_v4f32_ty], [IntrNoMem]>;
def int_ppc_vsx_xvcvsxdsp :
      PowerPC_VSX_Intrinsic<"xvcvsxdsp", [llvm_v4f32_ty],
                            [llvm_v2i64_ty], [IntrNoMem]>;
def int_ppc_vsx_xvcvuxdsp :
      PowerPC_VSX_Intrinsic<"xvcvuxdsp", [llvm_v4f32_ty],
                            [llvm_v2i64_ty], [IntrNoMem]>;
def int_ppc_vsx_xvcvdpsp :
      PowerPC_VSX_Intrinsic<"xvcvdpsp", [llvm_v4f32_ty],
                            [llvm_v2f64_ty], [IntrNoMem]>;
def int_ppc_vsx_xvcvsphp :
      PowerPC_VSX_Intrinsic<"xvcvsphp", [llvm_v4f32_ty],
                            [llvm_v4f32_ty], [IntrNoMem]>;
def int_ppc_vsx_xvxexpdp :
      PowerPC_VSX_Intrinsic<"xvxexpdp", [llvm_v2i64_ty],
                            [llvm_v2f64_ty], [IntrNoMem]>;
def int_ppc_vsx_xvxexpsp :
      PowerPC_VSX_Intrinsic<"xvxexpsp", [llvm_v4i32_ty],
                            [llvm_v4f32_ty], [IntrNoMem]>;
def int_ppc_vsx_xvxsigdp :
      PowerPC_VSX_Intrinsic<"xvxsigdp", [llvm_v2i64_ty],
                            [llvm_v2f64_ty], [IntrNoMem]>;
def int_ppc_vsx_xvxsigsp :
      PowerPC_VSX_Intrinsic<"xvxsigsp", [llvm_v4i32_ty],
                            [llvm_v4f32_ty], [IntrNoMem]>;
def int_ppc_vsx_xvtstdcdp :
      PowerPC_VSX_Intrinsic<"xvtstdcdp", [llvm_v2i64_ty],
                            [llvm_v2f64_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_ppc_vsx_xvtstdcsp :
      PowerPC_VSX_Intrinsic<"xvtstdcsp", [llvm_v4i32_ty],
                            [llvm_v4f32_ty,llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_ppc_vsx_xvcvhpsp :
      PowerPC_VSX_Intrinsic<"xvcvhpsp", [llvm_v4f32_ty],
                            [llvm_v8i16_ty],[IntrNoMem]>;
def int_ppc_vsx_xvcvspbf16 :
      PowerPC_VSX_Intrinsic<"xvcvspbf16", [llvm_v16i8_ty],
                            [llvm_v16i8_ty], [IntrNoMem]>;
def int_ppc_vsx_xvcvbf16spn :
      PowerPC_VSX_Intrinsic<"xvcvbf16spn", [llvm_v16i8_ty],
                            [llvm_v16i8_ty], [IntrNoMem]>;
def int_ppc_vsx_xxextractuw :
      PowerPC_VSX_Intrinsic<"xxextractuw",[llvm_v2i64_ty],
                            [llvm_v2i64_ty,llvm_i32_ty], [IntrNoMem]>;
def int_ppc_vsx_xxinsertw :
      PowerPC_VSX_Intrinsic<"xxinsertw",[llvm_v4i32_ty],
                            [llvm_v4i32_ty,llvm_v2i64_ty,llvm_i32_ty],
                            [IntrNoMem]>;
def int_ppc_vsx_xvtlsbb :
      PowerPC_VSX_Intrinsic<"xvtlsbb", [llvm_i32_ty],
                            [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
def int_ppc_vsx_xvtdivdp :
      PowerPC_VSX_Intrinsic<"xvtdivdp", [llvm_i32_ty],
                            [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
def int_ppc_vsx_xvtdivsp :
      PowerPC_VSX_Intrinsic<"xvtdivsp", [llvm_i32_ty],
                            [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
def int_ppc_vsx_xvtsqrtdp :
      PowerPC_VSX_Intrinsic<"xvtsqrtdp", [llvm_i32_ty], [llvm_v2f64_ty], [IntrNoMem]>;
def int_ppc_vsx_xvtsqrtsp :
      PowerPC_VSX_Intrinsic<"xvtsqrtsp", [llvm_i32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
def int_ppc_vsx_xxeval :
      PowerPC_VSX_Intrinsic<"xxeval", [llvm_v2i64_ty],
                           [llvm_v2i64_ty, llvm_v2i64_ty,
                            llvm_v2i64_ty, llvm_i32_ty],
                           [IntrNoMem, ImmArg<ArgIndex<3>>]>;
def int_ppc_vsx_xxgenpcvbm :
      PowerPC_VSX_Intrinsic<"xxgenpcvbm", [llvm_v16i8_ty],
                            [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
def int_ppc_vsx_xxgenpcvhm :
      PowerPC_VSX_Intrinsic<"xxgenpcvhm", [llvm_v8i16_ty],
                            [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
def int_ppc_vsx_xxgenpcvwm :
      PowerPC_VSX_Intrinsic<"xxgenpcvwm", [llvm_v4i32_ty],
                            [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
def int_ppc_vsx_xxgenpcvdm :
      PowerPC_VSX_Intrinsic<"xxgenpcvdm", [llvm_v2i64_ty],
                            [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;

// P10 VSX Vector permute extended.
def int_ppc_vsx_xxpermx : 
    ClangBuiltin<"__builtin_vsx_xxpermx">,
    DefaultAttrsIntrinsic<[llvm_v16i8_ty],
                          [llvm_v16i8_ty,llvm_v16i8_ty,llvm_v16i8_ty,
                           llvm_i32_ty],
                          [IntrNoMem, ImmArg<ArgIndex<3>>]>;
// P10 VSX Vector Blend Variable.
def int_ppc_vsx_xxblendvb: ClangBuiltin<"__builtin_vsx_xxblendvb">,
    DefaultAttrsIntrinsic<[llvm_v16i8_ty],
                          [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty],
                          [IntrNoMem]>;
def int_ppc_vsx_xxblendvh: ClangBuiltin<"__builtin_vsx_xxblendvh">,
    DefaultAttrsIntrinsic<[llvm_v8i16_ty],
                          [llvm_v8i16_ty, llvm_v8i16_ty,llvm_v8i16_ty],
                          [IntrNoMem]>;
def int_ppc_vsx_xxblendvw: ClangBuiltin<"__builtin_vsx_xxblendvw">,
    DefaultAttrsIntrinsic<[llvm_v4i32_ty],
                          [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
                          [IntrNoMem]>;
def int_ppc_vsx_xxblendvd: ClangBuiltin<"__builtin_vsx_xxblendvd">,
    DefaultAttrsIntrinsic<[llvm_v2i64_ty],
                          [llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty],
                          [IntrNoMem]>;
}

//===----------------------------------------------------------------------===//
// PowerPC HTM Intrinsic Definitions.

let TargetPrefix = "ppc" in {  // All intrinsics start with "llvm.ppc.".

def int_ppc_tbegin : ClangBuiltin<"__builtin_tbegin">,
      Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [ImmArg<ArgIndex<0>>]>;
def int_ppc_tend : ClangBuiltin<"__builtin_tend">,
      Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [ImmArg<ArgIndex<0>>]>;

def int_ppc_tabort : ClangBuiltin<"__builtin_tabort">,
      Intrinsic<[llvm_i32_ty], [llvm_i32_ty], []>;
def int_ppc_tabortwc : ClangBuiltin<"__builtin_tabortwc">,
      Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>;
def int_ppc_tabortwci : ClangBuiltin<"__builtin_tabortwci">,
      Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>;
def int_ppc_tabortdc : ClangBuiltin<"__builtin_tabortdc">,
      Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>;
def int_ppc_tabortdci : ClangBuiltin<"__builtin_tabortdci">,
      Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>;

def int_ppc_tcheck : ClangBuiltin<"__builtin_tcheck">,
      Intrinsic<[llvm_i32_ty], [], []>;
def int_ppc_treclaim : ClangBuiltin<"__builtin_treclaim">,
      Intrinsic<[llvm_i32_ty], [llvm_i32_ty], []>;
def int_ppc_trechkpt : ClangBuiltin<"__builtin_trechkpt">,
      Intrinsic<[llvm_i32_ty], [], []>;
def int_ppc_tsr : ClangBuiltin<"__builtin_tsr">,
      Intrinsic<[llvm_i32_ty], [llvm_i32_ty], []>;

def int_ppc_get_texasr : ClangBuiltin<"__builtin_get_texasr">,
      Intrinsic<[llvm_i64_ty], [], []>;
def int_ppc_get_texasru : ClangBuiltin<"__builtin_get_texasru">,
      Intrinsic<[llvm_i64_ty], [], []>;
def int_ppc_get_tfhar : ClangBuiltin<"__builtin_get_tfhar">,
      Intrinsic<[llvm_i64_ty], [], []>;
def int_ppc_get_tfiar : ClangBuiltin<"__builtin_get_tfiar">,
      Intrinsic<[llvm_i64_ty], [], []>;

def int_ppc_set_texasr : ClangBuiltin<"__builtin_set_texasr">,
      Intrinsic<[], [llvm_i64_ty], []>;
def int_ppc_set_texasru : ClangBuiltin<"__builtin_set_texasru">,
      Intrinsic<[], [llvm_i64_ty], []>;
def int_ppc_set_tfhar : ClangBuiltin<"__builtin_set_tfhar">,
      Intrinsic<[], [llvm_i64_ty], []>;
def int_ppc_set_tfiar : ClangBuiltin<"__builtin_set_tfiar">,
      Intrinsic<[], [llvm_i64_ty], []>;

// Extended mnemonics
def int_ppc_tendall : ClangBuiltin<"__builtin_tendall">,
      Intrinsic<[llvm_i32_ty], [], []>;
def int_ppc_tresume : ClangBuiltin<"__builtin_tresume">,
      Intrinsic<[llvm_i32_ty], [], []>;
def int_ppc_tsuspend : ClangBuiltin<"__builtin_tsuspend">,
      Intrinsic<[llvm_i32_ty], [], []>;

def int_ppc_ttest : ClangBuiltin<"__builtin_ttest">,
      Intrinsic<[llvm_i64_ty], [], []>;

// We currently use llvm.ppc.cfence in the context of atomic load which
// in LLVM IR requires its type to be one of integer, pointer and
// float point type. So llvm_any_ty here refers to type mentioned above.
// Backend is supposed to lower these types to appropriate MVTs.
def int_ppc_cfence : Intrinsic<[], [llvm_any_ty], []>;

// PowerPC set FPSCR Intrinsic Definitions.
def int_ppc_setrnd : ClangBuiltin<"__builtin_setrnd">,
      DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_i32_ty], [IntrHasSideEffects]>;
}

let TargetPrefix = "ppc" in {
  def int_ppc_vsx_assemble_pair :
      DefaultAttrsIntrinsic<[llvm_v256i1_ty],
                            [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;

  def int_ppc_vsx_disassemble_pair :
      DefaultAttrsIntrinsic<[llvm_v16i8_ty, llvm_v16i8_ty],
                            [llvm_v256i1_ty], [IntrNoMem]>;

  def int_ppc_mma_assemble_acc :
      DefaultAttrsIntrinsic<[llvm_v512i1_ty],
                            [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty,
                             llvm_v16i8_ty], [IntrNoMem]>;

  def int_ppc_mma_disassemble_acc :
      DefaultAttrsIntrinsic<[llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty,
                             llvm_v16i8_ty], [llvm_v512i1_ty], [IntrNoMem]>;

  def int_ppc_mma_xxmtacc :
      DefaultAttrsIntrinsic<[llvm_v512i1_ty], [llvm_v512i1_ty], [IntrNoMem]>;

  def int_ppc_mma_xxmfacc :
      DefaultAttrsIntrinsic<[llvm_v512i1_ty], [llvm_v512i1_ty], [IntrNoMem]>;

  def int_ppc_mma_xxsetaccz :
      DefaultAttrsIntrinsic<[llvm_v512i1_ty], [], [IntrNoMem]>;

  // MMA Reduced-Precision: Outer Product Intrinsic Definitions.
  defm int_ppc_mma_xvi4ger8 :
        PowerPC_MMA_ACC_PP_Intrinsic<[llvm_v16i8_ty, llvm_v16i8_ty]>;
  defm int_ppc_mma_pmxvi4ger8 :
        PowerPC_MMA_ACC_PP_Intrinsic<[llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty,
                                      llvm_i32_ty, llvm_i32_ty]>;

  defm int_ppc_mma_xvi8ger4 :
       PowerPC_MMA_ACC_PP_Intrinsic<[llvm_v16i8_ty, llvm_v16i8_ty]>;
  defm int_ppc_mma_pmxvi8ger4 :
       PowerPC_MMA_ACC_PP_Intrinsic<[llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty,
                                     llvm_i32_ty, llvm_i32_ty]>;

  defm int_ppc_mma_xvi16ger2s :
       PowerPC_MMA_ACC_PP_Intrinsic<[llvm_v16i8_ty, llvm_v16i8_ty]>;
  defm int_ppc_mma_pmxvi16ger2s :
       PowerPC_MMA_ACC_PP_Intrinsic<[llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty,
                                     llvm_i32_ty, llvm_i32_ty]>;

  defm int_ppc_mma_xvf16ger2 :
       PowerPC_MMA_ACC_Intrinsic<[llvm_v16i8_ty, llvm_v16i8_ty]>;
  defm int_ppc_mma_pmxvf16ger2 :
       PowerPC_MMA_ACC_Intrinsic<[llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty,
                                  llvm_i32_ty, llvm_i32_ty]>;
  defm int_ppc_mma_xvf32ger :
       PowerPC_MMA_ACC_Intrinsic<[llvm_v16i8_ty, llvm_v16i8_ty]>;
  defm int_ppc_mma_pmxvf32ger :
       PowerPC_MMA_ACC_Intrinsic<[llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty,
                                  llvm_i32_ty]>;
  defm int_ppc_mma_xvf64ger :
       PowerPC_MMA_ACC_Intrinsic<[llvm_v256i1_ty, llvm_v16i8_ty]>;
  defm int_ppc_mma_pmxvf64ger :
       PowerPC_MMA_ACC_Intrinsic<[llvm_v256i1_ty, llvm_v16i8_ty, llvm_i32_ty,
                                  llvm_i32_ty]>;

  // MMA Reduced-Precision: bfloat16 Outer Product Intrinsic Definitions.
  defm int_ppc_mma_xvbf16ger2 :
         PowerPC_MMA_ACC_Intrinsic<[llvm_v16i8_ty, llvm_v16i8_ty]>;
  defm int_ppc_mma_pmxvbf16ger2 :
         PowerPC_MMA_ACC_Intrinsic<
           [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty]>;

  // MMA Reduced-Precision: Missing Integer-based Outer Product Operations.
  defm int_ppc_mma_xvi16ger2 :
         PowerPC_MMA_ACC_PP_Intrinsic<[llvm_v16i8_ty, llvm_v16i8_ty]>;
  defm int_ppc_mma_pmxvi16ger2 :
         PowerPC_MMA_ACC_PP_Intrinsic<[llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty,
                                       llvm_i32_ty, llvm_i32_ty]>;
  def int_ppc_mma_xvi8ger4spp :
      DefaultAttrsIntrinsic<[llvm_v512i1_ty],
                            [llvm_v512i1_ty, llvm_v16i8_ty, llvm_v16i8_ty],
                            [IntrNoMem]>;
  def int_ppc_mma_pmxvi8ger4spp :
      DefaultAttrsIntrinsic<[llvm_v512i1_ty],
                            [llvm_v512i1_ty, llvm_v16i8_ty, llvm_v16i8_ty,
                             llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
                            [IntrNoMem]>;
}

// XL Compat intrinsics.
let TargetPrefix = "ppc" in {
  def int_ppc_dcbfl : ClangBuiltin<"__builtin_ppc_dcbfl">,
                      Intrinsic<[], [llvm_ptr_ty], [IntrArgMemOnly]>;
  def int_ppc_dcbflp : ClangBuiltin<"__builtin_ppc_dcbflp">,
                       Intrinsic<[], [llvm_ptr_ty], [IntrArgMemOnly]>;
  def int_ppc_dcbst : ClangBuiltin<"__builtin_ppc_dcbst">,
                      Intrinsic<[], [llvm_ptr_ty], []>;
  def int_ppc_dcbt  : ClangBuiltin<"__builtin_ppc_dcbt">,
                      Intrinsic<[], [llvm_ptr_ty],
    [IntrArgMemOnly, NoCapture<ArgIndex<0>>]>;
  def int_ppc_dcbtst : ClangBuiltin<"__builtin_ppc_dcbtst">,
                       Intrinsic<[], [llvm_ptr_ty],
    [IntrArgMemOnly, NoCapture<ArgIndex<0>>]>;
  def int_ppc_dcbz  : ClangBuiltin<"__builtin_ppc_dcbz">,
                      Intrinsic<[], [llvm_ptr_ty], []>;
  def int_ppc_icbt : ClangBuiltin<"__builtin_ppc_icbt">,
                     Intrinsic<[], [llvm_ptr_ty], []>;
  
  // Population Count in each Byte.
  def int_ppc_popcntb :
      DefaultAttrsIntrinsic<[llvm_anyint_ty], [llvm_anyint_ty], [IntrNoMem]>;
  
  // sync instruction (i.e. sync 0, a.k.a hwsync)
  def int_ppc_sync : ClangBuiltin<"__builtin_ppc_sync">,
                     Intrinsic<[], [], []>;
  def int_ppc_iospace_sync : ClangBuiltin<"__builtin_ppc_iospace_sync">,
                             Intrinsic<[], [], []>;
  // isync instruction
  def int_ppc_isync : ClangBuiltin<"__builtin_ppc_isync">,
                      Intrinsic<[], [], []>;
  // lwsync is sync 1
  def int_ppc_lwsync : ClangBuiltin<"__builtin_ppc_lwsync">,
                       Intrinsic<[], [], []>;
  def int_ppc_iospace_lwsync : ClangBuiltin<"__builtin_ppc_iospace_lwsync">,
                               Intrinsic<[], [], []>;
  // eieio instruction
  def int_ppc_eieio : ClangBuiltin<"__builtin_ppc_eieio">,
                      Intrinsic<[],[],[]>;
  def int_ppc_iospace_eieio : ClangBuiltin<"__builtin_ppc_iospace_eieio">,
                              Intrinsic<[],[],[]>;
  def int_ppc_stdcx :
    ClangBuiltin<"__builtin_ppc_stdcx">,
    Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i64_ty],
              [IntrWriteMem, IntrArgMemOnly, IntrNoDuplicate]>;
  def int_ppc_stwcx :
    ClangBuiltin<"__builtin_ppc_stwcx">,
    Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty],
              [IntrWriteMem, IntrArgMemOnly]>;
  def int_ppc_sthcx :
    Intrinsic<[llvm_i32_ty], [ llvm_ptr_ty, llvm_i32_ty ],
              [IntrWriteMem, IntrArgMemOnly, IntrNoDuplicate]>;
  def int_ppc_stbcx :
    ClangBuiltin<"__builtin_ppc_stbcx">,
    Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty],
              [IntrWriteMem, IntrArgMemOnly, IntrNoDuplicate]>;
  def int_ppc_dcbtstt : ClangBuiltin<"__builtin_ppc_dcbtstt">,
                        Intrinsic<[], [llvm_ptr_ty],
                                  [IntrArgMemOnly, NoCapture<ArgIndex<0>>]>;
  def int_ppc_dcbtt : ClangBuiltin<"__builtin_ppc_dcbtt">,
                      Intrinsic<[], [llvm_ptr_ty],
                                [IntrArgMemOnly, NoCapture<ArgIndex<0>>]>;
  def int_ppc_mftbu : ClangBuiltin<"__builtin_ppc_mftbu">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [], [IntrNoMem]>;
  def int_ppc_mfmsr : ClangBuiltin<"__builtin_ppc_mfmsr">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [], [IntrNoMem]>;
  def int_ppc_mfspr
      : DefaultAttrsIntrinsic<[llvm_anyint_ty], [llvm_i32_ty], [ImmArg<ArgIndex<0>>]>;
  def int_ppc_mtmsr
      : ClangBuiltin<"__builtin_ppc_mtmsr">, Intrinsic<[], [llvm_i32_ty], []>;
  def int_ppc_mtspr
      : DefaultAttrsIntrinsic<[], [llvm_i32_ty, llvm_anyint_ty], [ImmArg<ArgIndex<0>>]>;
  def int_ppc_stfiw : ClangBuiltin<"__builtin_ppc_stfiw">,
                      DefaultAttrsIntrinsic<[], [llvm_ptr_ty, llvm_double_ty],
                                            [IntrWriteMem]>;
  // compare
  def int_ppc_cmpeqb
      : ClangBuiltin<"__builtin_ppc_cmpeqb">,
        DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty],
                              [IntrNoMem]>;
  def int_ppc_cmprb
      : ClangBuiltin<"__builtin_ppc_cmprb">,
        DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty,
                              llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<0>>]>;
  def int_ppc_setb
      : ClangBuiltin<"__builtin_ppc_setb">,
        DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty],
                              [IntrNoMem]>;
  def int_ppc_cmpb
      : DefaultAttrsIntrinsic<[llvm_anyint_ty],
                              [llvm_anyint_ty, llvm_anyint_ty], [IntrNoMem]>;
  // multiply
  def int_ppc_mulhd
      : ClangBuiltin<"__builtin_ppc_mulhd">,
        DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty],
                              [IntrNoMem]>;
  def int_ppc_mulhdu
      : ClangBuiltin<"__builtin_ppc_mulhdu">,
        DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty],
                              [IntrNoMem]>;
  def int_ppc_mulhw
      : ClangBuiltin<"__builtin_ppc_mulhw">,
        DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
                              [IntrNoMem]>;
  def int_ppc_mulhwu
      : ClangBuiltin<"__builtin_ppc_mulhwu">,
        DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
                              [IntrNoMem]>;
  def int_ppc_maddhd
      : ClangBuiltin<"__builtin_ppc_maddhd">,
        DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty,
                               llvm_i64_ty], [IntrNoMem]>;
  def int_ppc_maddhdu
      : ClangBuiltin<"__builtin_ppc_maddhdu">,
        DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty,
                               llvm_i64_ty], [IntrNoMem]>;
  def int_ppc_maddld
      : ClangBuiltin<"__builtin_ppc_maddld">,
        DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty,
                               llvm_i64_ty], [IntrNoMem]>;
  // load
  def int_ppc_load2r
      : DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_ptr_ty],
                              [IntrReadMem, IntrArgMemOnly]>;
  def int_ppc_load4r
      : ClangBuiltin<"__builtin_ppc_load4r">,
        DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_ptr_ty],
                              [IntrReadMem, IntrArgMemOnly]>;
  def int_ppc_load8r
      : ClangBuiltin<"__builtin_ppc_load8r">,
        DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_ptr_ty],
                              [IntrReadMem, IntrArgMemOnly]>;
  // store
  def int_ppc_store2r
      : ClangBuiltin<"__builtin_ppc_store2r">,
        Intrinsic<[], [llvm_i32_ty, llvm_ptr_ty], [IntrWriteMem]>;
  def int_ppc_store4r
      : ClangBuiltin<"__builtin_ppc_store4r">,
        Intrinsic<[], [llvm_i32_ty, llvm_ptr_ty], [IntrWriteMem]>;
  def int_ppc_store8r
      : ClangBuiltin<"__builtin_ppc_store8r">,
        Intrinsic<[], [llvm_i64_ty, llvm_ptr_ty], [IntrWriteMem]>;
  def int_ppc_insert_exp
      : ClangBuiltin<"__builtin_ppc_insert_exp">,
        DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_double_ty, llvm_i64_ty],
                              [IntrNoMem]>;
  def int_ppc_extract_exp
      : ClangBuiltin<"__builtin_ppc_extract_exp">,
        DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_double_ty], [IntrNoMem]>;
  def int_ppc_extract_sig
      : ClangBuiltin<"__builtin_ppc_extract_sig">,
        DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_double_ty], [IntrNoMem]>;
  def int_ppc_mtfsb0
      : ClangBuiltin<"__builtin_ppc_mtfsb0">,
        DefaultAttrsIntrinsic<[], [llvm_i32_ty],
                              [IntrNoMem, IntrHasSideEffects,
                               ImmArg<ArgIndex<0>>]>;
  def int_ppc_mtfsb1
      : ClangBuiltin<"__builtin_ppc_mtfsb1">,
        DefaultAttrsIntrinsic<[], [llvm_i32_ty],
                              [IntrNoMem, IntrHasSideEffects,
                               ImmArg<ArgIndex<0>>]>;
  def int_ppc_mtfsf :
        DefaultAttrsIntrinsic<[], [llvm_i32_ty, llvm_double_ty],
                              [IntrNoMem, IntrHasSideEffects,
                               ImmArg<ArgIndex<0>>]>;
  def int_ppc_mtfsfi
      : ClangBuiltin<"__builtin_ppc_mtfsfi">,
        DefaultAttrsIntrinsic<[], [llvm_i32_ty, llvm_i32_ty],
                              [IntrNoMem, IntrHasSideEffects,
                               ImmArg<ArgIndex<0>>,ImmArg<ArgIndex<1>>]>;
  def int_ppc_fmsub
      : ClangBuiltin<"__builtin_ppc_fmsub">,
        DefaultAttrsIntrinsic<[llvm_double_ty],
                              [llvm_double_ty, llvm_double_ty, llvm_double_ty],
                              [IntrNoMem]>;
  def int_ppc_fmsubs
      : ClangBuiltin<"__builtin_ppc_fmsubs">,
        DefaultAttrsIntrinsic<[llvm_float_ty],
                              [llvm_float_ty, llvm_float_ty, llvm_float_ty],
                              [IntrNoMem]>;
  def int_ppc_fnmadd
      : ClangBuiltin<"__builtin_ppc_fnmadd">,
        DefaultAttrsIntrinsic<[llvm_double_ty],
                              [llvm_double_ty, llvm_double_ty, llvm_double_ty],
                              [IntrNoMem]>;
  def int_ppc_fnmadds
      : ClangBuiltin<"__builtin_ppc_fnmadds">,
        DefaultAttrsIntrinsic<[llvm_float_ty],
                              [llvm_float_ty, llvm_float_ty, llvm_float_ty],
                              [IntrNoMem]>;
  def int_ppc_fnmsub
      : DefaultAttrsIntrinsic<[llvm_anyfloat_ty],
                              [LLVMMatchType<0>, LLVMMatchType<0>,
                               LLVMMatchType<0>],
                              [IntrNoMem]>;
  def int_ppc_fre
      : ClangBuiltin<"__builtin_ppc_fre">,
        DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
  def int_ppc_fres
      : ClangBuiltin<"__builtin_ppc_fres">,
        DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
  def int_ppc_addex
      : ClangBuiltin<"__builtin_ppc_addex">,
        DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty],
                              [IntrNoMem, IntrHasSideEffects, ImmArg<ArgIndex<2>>]>;
  def int_ppc_fsel : ClangBuiltin<"__builtin_ppc_fsel">,
      DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_double_ty, llvm_double_ty, 
                             llvm_double_ty], [IntrNoMem]>;
  def int_ppc_fsels : ClangBuiltin<"__builtin_ppc_fsels">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty,
                             llvm_float_ty], [IntrNoMem]>;
  def int_ppc_frsqrte : ClangBuiltin<"__builtin_ppc_frsqrte">,
      DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
  def int_ppc_frsqrtes : ClangBuiltin<"__builtin_ppc_frsqrtes">,
      DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
  def int_ppc_compare_exp_uo : ClangBuiltin<"__builtin_ppc_compare_exp_uo">,
      DefaultAttrsIntrinsic<[llvm_i32_ty],
                            [llvm_double_ty, llvm_double_ty],
                            [IntrNoMem]>;
  def int_ppc_compare_exp_lt : ClangBuiltin<"__builtin_ppc_compare_exp_lt">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], 
                            [llvm_double_ty, llvm_double_ty],
                            [IntrNoMem]>;
  def int_ppc_compare_exp_gt : ClangBuiltin<"__builtin_ppc_compare_exp_gt">,
      DefaultAttrsIntrinsic<[llvm_i32_ty],
                            [llvm_double_ty, llvm_double_ty],
                            [IntrNoMem]>;
  def int_ppc_compare_exp_eq : ClangBuiltin<"__builtin_ppc_compare_exp_eq">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], 
                            [llvm_double_ty, llvm_double_ty],
                            [IntrNoMem]>;
  def int_ppc_test_data_class
      : DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_anyfloat_ty, llvm_i32_ty],
                              [IntrNoMem, ImmArg<ArgIndex<1>>]>;
  def int_ppc_fnabs
      : ClangBuiltin<"__builtin_ppc_fnabs">,
        DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
  def int_ppc_fnabss
      : ClangBuiltin<"__builtin_ppc_fnabss">,
        DefaultAttrsIntrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;

  def int_ppc_convert_f128_to_ppcf128
      : DefaultAttrsIntrinsic<[llvm_ppcf128_ty], [llvm_f128_ty], [IntrNoMem]>;
  def int_ppc_convert_ppcf128_to_f128
      : DefaultAttrsIntrinsic<[llvm_f128_ty], [llvm_ppcf128_ty], [IntrNoMem]>;
}

//===----------------------------------------------------------------------===//
// PowerPC Atomic Intrinsic Definitions.
let TargetPrefix = "ppc" in {
  class AtomicRMW128Intrinsic
    : Intrinsic<[llvm_i64_ty, llvm_i64_ty],
                [llvm_ptr_ty, llvm_i64_ty, llvm_i64_ty],
                [IntrArgMemOnly, NoCapture<ArgIndex<0>>]>;
  def int_ppc_atomicrmw_xchg_i128 : AtomicRMW128Intrinsic;
  def int_ppc_atomicrmw_add_i128  : AtomicRMW128Intrinsic;
  def int_ppc_atomicrmw_sub_i128  : AtomicRMW128Intrinsic;
  def int_ppc_atomicrmw_and_i128  : AtomicRMW128Intrinsic;
  def int_ppc_atomicrmw_or_i128   : AtomicRMW128Intrinsic;
  def int_ppc_atomicrmw_xor_i128  : AtomicRMW128Intrinsic;
  def int_ppc_atomicrmw_nand_i128 : AtomicRMW128Intrinsic;
  def int_ppc_cmpxchg_i128 : Intrinsic<[llvm_i64_ty, llvm_i64_ty],
                                       [llvm_ptr_ty,
                                        llvm_i64_ty, llvm_i64_ty,
                                        llvm_i64_ty, llvm_i64_ty],
                                       [IntrArgMemOnly, NoCapture<ArgIndex<0>>]>;
  def int_ppc_atomic_load_i128 :
    Intrinsic<[llvm_i64_ty, llvm_i64_ty],
              [llvm_ptr_ty],
              [IntrArgMemOnly, IntrReadMem, NoCapture<ArgIndex<0>>]>;
  def int_ppc_atomic_store_i128 :
    Intrinsic<[], [llvm_i64_ty, llvm_i64_ty, llvm_ptr_ty],
              [IntrArgMemOnly, IntrWriteMem, NoCapture<ArgIndex<2>>]>;
}
PKjwFZ��>��IR/ReplaceConstant.hnu�[���//===- ReplaceConstant.h - Replacing LLVM constant expressions --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the utility function for replacing LLVM constant
// expressions by instructions.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_REPLACECONSTANT_H
#define LLVM_IR_REPLACECONSTANT_H

#include <map>
#include <vector>

namespace llvm {

template <typename T> class ArrayRef;
class Constant;

/// Replace constant expressions users of the given constants with
/// instructions. Return whether anything was changed.
bool convertUsersOfConstantsToInstructions(ArrayRef<Constant *> Consts);

} // end namespace llvm

#endif // LLVM_IR_REPLACECONSTANT_H
PKjwFZ��
Ps�s�IR/IntrinsicInst.hnu�[���//===-- llvm/IntrinsicInst.h - Intrinsic Instruction Wrappers ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines classes that make it really easy to deal with intrinsic
// functions with the isa/dyncast family of functions.  In particular, this
// allows you to do things like:
//
//     if (MemCpyInst *MCI = dyn_cast<MemCpyInst>(Inst))
//        ... MCI->getDest() ... MCI->getSource() ...
//
// All intrinsic function calls are instances of the call instruction, so these
// are all subclasses of the CallInst class.  Note that none of these classes
// has state or virtual methods, which is an important part of this gross/neat
// hack working.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_INTRINSICINST_H
#define LLVM_IR_INTRINSICINST_H

#include "llvm/IR/Constants.h"
#include "llvm/IR/DebugInfoMetadata.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/FPEnv.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/Casting.h"
#include <cassert>
#include <cstdint>
#include <optional>

namespace llvm {

class Metadata;

/// A wrapper class for inspecting calls to intrinsic functions.
/// This allows the standard isa/dyncast/cast functionality to work with calls
/// to intrinsic functions.
class IntrinsicInst : public CallInst {
public:
  IntrinsicInst() = delete;
  IntrinsicInst(const IntrinsicInst &) = delete;
  IntrinsicInst &operator=(const IntrinsicInst &) = delete;

  /// Return the intrinsic ID of this intrinsic.
  Intrinsic::ID getIntrinsicID() const {
    return getCalledFunction()->getIntrinsicID();
  }

  /// Return true if swapping the first two arguments to the intrinsic produces
  /// the same result.
  bool isCommutative() const {
    switch (getIntrinsicID()) {
    case Intrinsic::maxnum:
    case Intrinsic::minnum:
    case Intrinsic::maximum:
    case Intrinsic::minimum:
    case Intrinsic::smax:
    case Intrinsic::smin:
    case Intrinsic::umax:
    case Intrinsic::umin:
    case Intrinsic::sadd_sat:
    case Intrinsic::uadd_sat:
    case Intrinsic::sadd_with_overflow:
    case Intrinsic::uadd_with_overflow:
    case Intrinsic::smul_with_overflow:
    case Intrinsic::umul_with_overflow:
    case Intrinsic::smul_fix:
    case Intrinsic::umul_fix:
    case Intrinsic::smul_fix_sat:
    case Intrinsic::umul_fix_sat:
    case Intrinsic::fma:
    case Intrinsic::fmuladd:
      return true;
    default:
      return false;
    }
  }

  /// Checks if the intrinsic is an annotation.
  bool isAssumeLikeIntrinsic() const {
    switch (getIntrinsicID()) {
    default: break;
    case Intrinsic::assume:
    case Intrinsic::sideeffect:
    case Intrinsic::pseudoprobe:
    case Intrinsic::dbg_assign:
    case Intrinsic::dbg_declare:
    case Intrinsic::dbg_value:
    case Intrinsic::dbg_label:
    case Intrinsic::invariant_start:
    case Intrinsic::invariant_end:
    case Intrinsic::lifetime_start:
    case Intrinsic::lifetime_end:
    case Intrinsic::experimental_noalias_scope_decl:
    case Intrinsic::objectsize:
    case Intrinsic::ptr_annotation:
    case Intrinsic::var_annotation:
      return true;
    }
    return false;
  }

  /// Check if the intrinsic might lower into a regular function call in the
  /// course of IR transformations
  static bool mayLowerToFunctionCall(Intrinsic::ID IID);

  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const CallInst *I) {
    if (const Function *CF = I->getCalledFunction())
      return CF->isIntrinsic();
    return false;
  }
  static bool classof(const Value *V) {
    return isa<CallInst>(V) && classof(cast<CallInst>(V));
  }
};

/// Check if \p ID corresponds to a lifetime intrinsic.
static inline bool isLifetimeIntrinsic(Intrinsic::ID ID) {
  switch (ID) {
  case Intrinsic::lifetime_start:
  case Intrinsic::lifetime_end:
    return true;
  default:
    return false;
  }
}

/// This is the common base class for lifetime intrinsics.
class LifetimeIntrinsic : public IntrinsicInst {
public:
  /// \name Casting methods
  /// @{
  static bool classof(const IntrinsicInst *I) {
    return isLifetimeIntrinsic(I->getIntrinsicID());
  }
  static bool classof(const Value *V) {
    return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
  }
  /// @}
};

/// Check if \p ID corresponds to a debug info intrinsic.
static inline bool isDbgInfoIntrinsic(Intrinsic::ID ID) {
  switch (ID) {
  case Intrinsic::dbg_declare:
  case Intrinsic::dbg_value:
  case Intrinsic::dbg_label:
  case Intrinsic::dbg_assign:
    return true;
  default:
    return false;
  }
}

/// This is the common base class for debug info intrinsics.
class DbgInfoIntrinsic : public IntrinsicInst {
public:
  /// \name Casting methods
  /// @{
  static bool classof(const IntrinsicInst *I) {
    return isDbgInfoIntrinsic(I->getIntrinsicID());
  }
  static bool classof(const Value *V) {
    return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
  }
  /// @}
};

// Iterator for ValueAsMetadata that internally uses direct pointer iteration
// over either a ValueAsMetadata* or a ValueAsMetadata**, dereferencing to the
// ValueAsMetadata .
class location_op_iterator
    : public iterator_facade_base<location_op_iterator,
                                  std::bidirectional_iterator_tag, Value *> {
  PointerUnion<ValueAsMetadata *, ValueAsMetadata **> I;

public:
  location_op_iterator(ValueAsMetadata *SingleIter) : I(SingleIter) {}
  location_op_iterator(ValueAsMetadata **MultiIter) : I(MultiIter) {}

  location_op_iterator(const location_op_iterator &R) : I(R.I) {}
  location_op_iterator &operator=(const location_op_iterator &R) {
    I = R.I;
    return *this;
  }
  bool operator==(const location_op_iterator &RHS) const { return I == RHS.I; }
  const Value *operator*() const {
    ValueAsMetadata *VAM = isa<ValueAsMetadata *>(I)
                               ? cast<ValueAsMetadata *>(I)
                               : *cast<ValueAsMetadata **>(I);
    return VAM->getValue();
  };
  Value *operator*() {
    ValueAsMetadata *VAM = isa<ValueAsMetadata *>(I)
                               ? cast<ValueAsMetadata *>(I)
                               : *cast<ValueAsMetadata **>(I);
    return VAM->getValue();
  }
  location_op_iterator &operator++() {
    if (isa<ValueAsMetadata *>(I))
      I = cast<ValueAsMetadata *>(I) + 1;
    else
      I = cast<ValueAsMetadata **>(I) + 1;
    return *this;
  }
  location_op_iterator &operator--() {
    if (isa<ValueAsMetadata *>(I))
      I = cast<ValueAsMetadata *>(I) - 1;
    else
      I = cast<ValueAsMetadata **>(I) - 1;
    return *this;
  }
};

/// Lightweight class that wraps the location operand metadata of a debug
/// intrinsic. The raw location may be a ValueAsMetadata, an empty MDTuple,
/// or a DIArgList.
class RawLocationWrapper {
  Metadata *RawLocation = nullptr;

public:
  RawLocationWrapper() = default;
  explicit RawLocationWrapper(Metadata *RawLocation)
      : RawLocation(RawLocation) {
    // Allow ValueAsMetadata, empty MDTuple, DIArgList.
    assert(RawLocation && "unexpected null RawLocation");
    assert(isa<ValueAsMetadata>(RawLocation) || isa<DIArgList>(RawLocation) ||
           (isa<MDNode>(RawLocation) &&
            !cast<MDNode>(RawLocation)->getNumOperands()));
  }
  Metadata *getRawLocation() const { return RawLocation; }
  /// Get the locations corresponding to the variable referenced by the debug
  /// info intrinsic.  Depending on the intrinsic, this could be the
  /// variable's value or its address.
  iterator_range<location_op_iterator> location_ops() const;
  Value *getVariableLocationOp(unsigned OpIdx) const;
  unsigned getNumVariableLocationOps() const {
    if (hasArgList())
      return cast<DIArgList>(getRawLocation())->getArgs().size();
    return 1;
  }
  bool hasArgList() const { return isa<DIArgList>(getRawLocation()); }
  bool isKillLocation(const DIExpression *Expression) const {
    // Check for "kill" sentinel values.
    // Non-variadic: empty metadata.
    if (!hasArgList() && isa<MDNode>(getRawLocation()))
      return true;
    // Variadic: empty DIArgList with empty expression.
    if (getNumVariableLocationOps() == 0 && !Expression->isComplex())
      return true;
    // Variadic and non-variadic: Interpret expressions using undef or poison
    // values as kills.
    return any_of(location_ops(), [](Value *V) { return isa<UndefValue>(V); });
  }

  friend bool operator==(const RawLocationWrapper &A,
                         const RawLocationWrapper &B) {
    return A.RawLocation == B.RawLocation;
  }
  friend bool operator!=(const RawLocationWrapper &A,
                         const RawLocationWrapper &B) {
    return !(A == B);
  }
  friend bool operator>(const RawLocationWrapper &A,
                        const RawLocationWrapper &B) {
    return A.RawLocation > B.RawLocation;
  }
  friend bool operator>=(const RawLocationWrapper &A,
                         const RawLocationWrapper &B) {
    return A.RawLocation >= B.RawLocation;
  }
  friend bool operator<(const RawLocationWrapper &A,
                        const RawLocationWrapper &B) {
    return A.RawLocation < B.RawLocation;
  }
  friend bool operator<=(const RawLocationWrapper &A,
                         const RawLocationWrapper &B) {
    return A.RawLocation <= B.RawLocation;
  }
};

/// This is the common base class for debug info intrinsics for variables.
class DbgVariableIntrinsic : public DbgInfoIntrinsic {
public:
  /// Get the locations corresponding to the variable referenced by the debug
  /// info intrinsic.  Depending on the intrinsic, this could be the
  /// variable's value or its address.
  iterator_range<location_op_iterator> location_ops() const;

  Value *getVariableLocationOp(unsigned OpIdx) const;

  void replaceVariableLocationOp(Value *OldValue, Value *NewValue);
  void replaceVariableLocationOp(unsigned OpIdx, Value *NewValue);
  /// Adding a new location operand will always result in this intrinsic using
  /// an ArgList, and must always be accompanied by a new expression that uses
  /// the new operand.
  void addVariableLocationOps(ArrayRef<Value *> NewValues,
                              DIExpression *NewExpr);

  void setVariable(DILocalVariable *NewVar) {
    setArgOperand(1, MetadataAsValue::get(NewVar->getContext(), NewVar));
  }

  void setExpression(DIExpression *NewExpr) {
    setArgOperand(2, MetadataAsValue::get(NewExpr->getContext(), NewExpr));
  }

  unsigned getNumVariableLocationOps() const {
    return getWrappedLocation().getNumVariableLocationOps();
  }

  bool hasArgList() const { return getWrappedLocation().hasArgList(); }

  /// Does this describe the address of a local variable. True for dbg.declare,
  /// but not dbg.value, which describes its value, or dbg.assign, which
  /// describes a combination of the variable's value and address.
  bool isAddressOfVariable() const {
    return getIntrinsicID() == Intrinsic::dbg_declare;
  }

  void setKillLocation() {
    // TODO: When/if we remove duplicate values from DIArgLists, we don't need
    // this set anymore.
    SmallPtrSet<Value *, 4> RemovedValues;
    for (Value *OldValue : location_ops()) {
      if (!RemovedValues.insert(OldValue).second)
        continue;
      Value *Poison = PoisonValue::get(OldValue->getType());
      replaceVariableLocationOp(OldValue, Poison);
    }
  }

  bool isKillLocation() const {
    return getWrappedLocation().isKillLocation(getExpression());
  }

  DILocalVariable *getVariable() const {
    return cast<DILocalVariable>(getRawVariable());
  }

  DIExpression *getExpression() const {
    return cast<DIExpression>(getRawExpression());
  }

  Metadata *getRawLocation() const {
    return cast<MetadataAsValue>(getArgOperand(0))->getMetadata();
  }

  RawLocationWrapper getWrappedLocation() const {
    return RawLocationWrapper(getRawLocation());
  }

  Metadata *getRawVariable() const {
    return cast<MetadataAsValue>(getArgOperand(1))->getMetadata();
  }

  Metadata *getRawExpression() const {
    return cast<MetadataAsValue>(getArgOperand(2))->getMetadata();
  }

  /// Use of this should generally be avoided; instead,
  /// replaceVariableLocationOp and addVariableLocationOps should be used where
  /// possible to avoid creating invalid state.
  void setRawLocation(Metadata *Location) {
    return setArgOperand(0, MetadataAsValue::get(getContext(), Location));
  }

  /// Get the size (in bits) of the variable, or fragment of the variable that
  /// is described.
  std::optional<uint64_t> getFragmentSizeInBits() const;

  /// Get the FragmentInfo for the variable.
  std::optional<DIExpression::FragmentInfo> getFragment() const {
    return getExpression()->getFragmentInfo();
  }

  /// Get the FragmentInfo for the variable if it exists, otherwise return a
  /// FragmentInfo that covers the entire variable if the variable size is
  /// known, otherwise return a zero-sized fragment.
  DIExpression::FragmentInfo getFragmentOrEntireVariable() const {
    DIExpression::FragmentInfo VariableSlice(0, 0);
    // Get the fragment or variable size, or zero.
    if (auto Sz = getFragmentSizeInBits())
      VariableSlice.SizeInBits = *Sz;
    if (auto Frag = getExpression()->getFragmentInfo())
      VariableSlice.OffsetInBits = Frag->OffsetInBits;
    return VariableSlice;
  }

  /// \name Casting methods
  /// @{
  static bool classof(const IntrinsicInst *I) {
    switch (I->getIntrinsicID()) {
    case Intrinsic::dbg_declare:
    case Intrinsic::dbg_value:
    case Intrinsic::dbg_assign:
      return true;
    default:
      return false;
    }
  }
  static bool classof(const Value *V) {
    return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
  }
  /// @}
protected:
  void setArgOperand(unsigned i, Value *v) {
    DbgInfoIntrinsic::setArgOperand(i, v);
  }
  void setOperand(unsigned i, Value *v) { DbgInfoIntrinsic::setOperand(i, v); }
};

/// This represents the llvm.dbg.declare instruction.
class DbgDeclareInst : public DbgVariableIntrinsic {
public:
  Value *getAddress() const {
    assert(getNumVariableLocationOps() == 1 &&
           "dbg.declare must have exactly 1 location operand.");
    return getVariableLocationOp(0);
  }

  /// \name Casting methods
  /// @{
  static bool classof(const IntrinsicInst *I) {
    return I->getIntrinsicID() == Intrinsic::dbg_declare;
  }
  static bool classof(const Value *V) {
    return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
  }
  /// @}
};

/// This represents the llvm.dbg.value instruction.
class DbgValueInst : public DbgVariableIntrinsic {
public:
  // The default argument should only be used in ISel, and the default option
  // should be removed once ISel support for multiple location ops is complete.
  Value *getValue(unsigned OpIdx = 0) const {
    return getVariableLocationOp(OpIdx);
  }
  iterator_range<location_op_iterator> getValues() const {
    return location_ops();
  }

  /// \name Casting methods
  /// @{
  static bool classof(const IntrinsicInst *I) {
    return I->getIntrinsicID() == Intrinsic::dbg_value ||
           I->getIntrinsicID() == Intrinsic::dbg_assign;
  }
  static bool classof(const Value *V) {
    return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
  }
  /// @}
};

/// This represents the llvm.dbg.assign instruction.
class DbgAssignIntrinsic : public DbgValueInst {
  enum Operands {
    OpValue,
    OpVar,
    OpExpr,
    OpAssignID,
    OpAddress,
    OpAddressExpr,
  };

public:
  Value *getAddress() const;
  Metadata *getRawAddress() const {
    return cast<MetadataAsValue>(getArgOperand(OpAddress))->getMetadata();
  }
  Metadata *getRawAssignID() const {
    return cast<MetadataAsValue>(getArgOperand(OpAssignID))->getMetadata();
  }
  DIAssignID *getAssignID() const { return cast<DIAssignID>(getRawAssignID()); }
  Metadata *getRawAddressExpression() const {
    return cast<MetadataAsValue>(getArgOperand(OpAddressExpr))->getMetadata();
  }
  DIExpression *getAddressExpression() const {
    return cast<DIExpression>(getRawAddressExpression());
  }
  void setAddressExpression(DIExpression *NewExpr) {
    setArgOperand(OpAddressExpr,
                  MetadataAsValue::get(NewExpr->getContext(), NewExpr));
  }
  void setAssignId(DIAssignID *New);
  void setAddress(Value *V);
  /// Kill the address component.
  void setKillAddress();
  /// Check whether this kills the address component. This doesn't take into
  /// account the position of the intrinsic, therefore a returned value of false
  /// does not guarentee the address is a valid location for the variable at the
  /// intrinsic's position in IR.
  bool isKillAddress() const;
  void setValue(Value *V);
  /// \name Casting methods
  /// @{
  static bool classof(const IntrinsicInst *I) {
    return I->getIntrinsicID() == Intrinsic::dbg_assign;
  }
  static bool classof(const Value *V) {
    return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
  }
  /// @}
};

/// This represents the llvm.dbg.label instruction.
class DbgLabelInst : public DbgInfoIntrinsic {
public:
  DILabel *getLabel() const { return cast<DILabel>(getRawLabel()); }

  Metadata *getRawLabel() const {
    return cast<MetadataAsValue>(getArgOperand(0))->getMetadata();
  }

  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  /// @{
  static bool classof(const IntrinsicInst *I) {
    return I->getIntrinsicID() == Intrinsic::dbg_label;
  }
  static bool classof(const Value *V) {
    return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
  }
  /// @}
};

/// This is the common base class for vector predication intrinsics.
class VPIntrinsic : public IntrinsicInst {
public:
  /// \brief Declares a llvm.vp.* intrinsic in \p M that matches the parameters
  /// \p Params. Additionally, the load and gather intrinsics require
  /// \p ReturnType to be specified.
  static Function *getDeclarationForParams(Module *M, Intrinsic::ID,
                                           Type *ReturnType,
                                           ArrayRef<Value *> Params);

  static std::optional<unsigned> getMaskParamPos(Intrinsic::ID IntrinsicID);
  static std::optional<unsigned> getVectorLengthParamPos(
      Intrinsic::ID IntrinsicID);

  /// The llvm.vp.* intrinsics for this instruction Opcode
  static Intrinsic::ID getForOpcode(unsigned OC);

  // Whether \p ID is a VP intrinsic ID.
  static bool isVPIntrinsic(Intrinsic::ID);

  /// \return The mask parameter or nullptr.
  Value *getMaskParam() const;
  void setMaskParam(Value *);

  /// \return The vector length parameter or nullptr.
  Value *getVectorLengthParam() const;
  void setVectorLengthParam(Value *);

  /// \return Whether the vector length param can be ignored.
  bool canIgnoreVectorLengthParam() const;

  /// \return The static element count (vector number of elements) the vector
  /// length parameter applies to.
  ElementCount getStaticVectorLength() const;

  /// \return The alignment of the pointer used by this load/store/gather or
  /// scatter.
  MaybeAlign getPointerAlignment() const;
  // MaybeAlign setPointerAlignment(Align NewAlign); // TODO

  /// \return The pointer operand of this load,store, gather or scatter.
  Value *getMemoryPointerParam() const;
  static std::optional<unsigned> getMemoryPointerParamPos(Intrinsic::ID);

  /// \return The data (payload) operand of this store or scatter.
  Value *getMemoryDataParam() const;
  static std::optional<unsigned> getMemoryDataParamPos(Intrinsic::ID);

  // Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const IntrinsicInst *I) {
    return isVPIntrinsic(I->getIntrinsicID());
  }
  static bool classof(const Value *V) {
    return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
  }

  // Equivalent non-predicated opcode
  std::optional<unsigned> getFunctionalOpcode() const {
    return getFunctionalOpcodeForVP(getIntrinsicID());
  }

  // Equivalent non-predicated constrained ID
  std::optional<unsigned> getConstrainedIntrinsicID() const {
    return getConstrainedIntrinsicIDForVP(getIntrinsicID());
  }

  // Equivalent non-predicated opcode
  static std::optional<unsigned> getFunctionalOpcodeForVP(Intrinsic::ID ID);

  // Equivalent non-predicated constrained ID
  static std::optional<unsigned>
  getConstrainedIntrinsicIDForVP(Intrinsic::ID ID);
};

/// This represents vector predication reduction intrinsics.
class VPReductionIntrinsic : public VPIntrinsic {
public:
  static bool isVPReduction(Intrinsic::ID ID);

  unsigned getStartParamPos() const;
  unsigned getVectorParamPos() const;

  static std::optional<unsigned> getStartParamPos(Intrinsic::ID ID);
  static std::optional<unsigned> getVectorParamPos(Intrinsic::ID ID);

  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  /// @{
  static bool classof(const IntrinsicInst *I) {
    return VPReductionIntrinsic::isVPReduction(I->getIntrinsicID());
  }
  static bool classof(const Value *V) {
    return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
  }
  /// @}
};

class VPCastIntrinsic : public VPIntrinsic {
public:
  static bool isVPCast(Intrinsic::ID ID);

  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  /// @{
  static bool classof(const IntrinsicInst *I) {
    return VPCastIntrinsic::isVPCast(I->getIntrinsicID());
  }
  static bool classof(const Value *V) {
    return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
  }
  /// @}
};

class VPCmpIntrinsic : public VPIntrinsic {
public:
  static bool isVPCmp(Intrinsic::ID ID);

  CmpInst::Predicate getPredicate() const;

  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  /// @{
  static bool classof(const IntrinsicInst *I) {
    return VPCmpIntrinsic::isVPCmp(I->getIntrinsicID());
  }
  static bool classof(const Value *V) {
    return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
  }
  /// @}
};

/// This is the common base class for constrained floating point intrinsics.
class ConstrainedFPIntrinsic : public IntrinsicInst {
public:
  bool isUnaryOp() const;
  bool isTernaryOp() const;
  std::optional<RoundingMode> getRoundingMode() const;
  std::optional<fp::ExceptionBehavior> getExceptionBehavior() const;
  bool isDefaultFPEnvironment() const;

  // Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const IntrinsicInst *I);
  static bool classof(const Value *V) {
    return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
  }
};

/// Constrained floating point compare intrinsics.
class ConstrainedFPCmpIntrinsic : public ConstrainedFPIntrinsic {
public:
  FCmpInst::Predicate getPredicate() const;
  bool isSignaling() const {
    return getIntrinsicID() == Intrinsic::experimental_constrained_fcmps;
  }

  // Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const IntrinsicInst *I) {
    switch (I->getIntrinsicID()) {
    case Intrinsic::experimental_constrained_fcmp:
    case Intrinsic::experimental_constrained_fcmps:
      return true;
    default:
      return false;
    }
  }
  static bool classof(const Value *V) {
    return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
  }
};

/// This class represents min/max intrinsics.
class MinMaxIntrinsic : public IntrinsicInst {
public:
  static bool classof(const IntrinsicInst *I) {
    switch (I->getIntrinsicID()) {
    case Intrinsic::umin:
    case Intrinsic::umax:
    case Intrinsic::smin:
    case Intrinsic::smax:
      return true;
    default:
      return false;
    }
  }
  static bool classof(const Value *V) {
    return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
  }

  Value *getLHS() const { return const_cast<Value *>(getArgOperand(0)); }
  Value *getRHS() const { return const_cast<Value *>(getArgOperand(1)); }

  /// Returns the comparison predicate underlying the intrinsic.
  static ICmpInst::Predicate getPredicate(Intrinsic::ID ID) {
    switch (ID) {
    case Intrinsic::umin:
      return ICmpInst::Predicate::ICMP_ULT;
    case Intrinsic::umax:
      return ICmpInst::Predicate::ICMP_UGT;
    case Intrinsic::smin:
      return ICmpInst::Predicate::ICMP_SLT;
    case Intrinsic::smax:
      return ICmpInst::Predicate::ICMP_SGT;
    default:
      llvm_unreachable("Invalid intrinsic");
    }
  }

  /// Returns the comparison predicate underlying the intrinsic.
  ICmpInst::Predicate getPredicate() const {
    return getPredicate(getIntrinsicID());
  }

  /// Whether the intrinsic is signed or unsigned.
  static bool isSigned(Intrinsic::ID ID) {
    return ICmpInst::isSigned(getPredicate(ID));
  };

  /// Whether the intrinsic is signed or unsigned.
  bool isSigned() const { return isSigned(getIntrinsicID()); };

  /// Min/max intrinsics are monotonic, they operate on a fixed-bitwidth values,
  /// so there is a certain threshold value, upon reaching which,
  /// their value can no longer change. Return said threshold.
  static APInt getSaturationPoint(Intrinsic::ID ID, unsigned numBits) {
    switch (ID) {
    case Intrinsic::umin:
      return APInt::getMinValue(numBits);
    case Intrinsic::umax:
      return APInt::getMaxValue(numBits);
    case Intrinsic::smin:
      return APInt::getSignedMinValue(numBits);
    case Intrinsic::smax:
      return APInt::getSignedMaxValue(numBits);
    default:
      llvm_unreachable("Invalid intrinsic");
    }
  }

  /// Min/max intrinsics are monotonic, they operate on a fixed-bitwidth values,
  /// so there is a certain threshold value, upon reaching which,
  /// their value can no longer change. Return said threshold.
  APInt getSaturationPoint(unsigned numBits) const {
    return getSaturationPoint(getIntrinsicID(), numBits);
  }

  /// Min/max intrinsics are monotonic, they operate on a fixed-bitwidth values,
  /// so there is a certain threshold value, upon reaching which,
  /// their value can no longer change. Return said threshold.
  static Constant *getSaturationPoint(Intrinsic::ID ID, Type *Ty) {
    return Constant::getIntegerValue(
        Ty, getSaturationPoint(ID, Ty->getScalarSizeInBits()));
  }

  /// Min/max intrinsics are monotonic, they operate on a fixed-bitwidth values,
  /// so there is a certain threshold value, upon reaching which,
  /// their value can no longer change. Return said threshold.
  Constant *getSaturationPoint(Type *Ty) const {
    return getSaturationPoint(getIntrinsicID(), Ty);
  }
};

/// This class represents an intrinsic that is based on a binary operation.
/// This includes op.with.overflow and saturating add/sub intrinsics.
class BinaryOpIntrinsic : public IntrinsicInst {
public:
  static bool classof(const IntrinsicInst *I) {
    switch (I->getIntrinsicID()) {
    case Intrinsic::uadd_with_overflow:
    case Intrinsic::sadd_with_overflow:
    case Intrinsic::usub_with_overflow:
    case Intrinsic::ssub_with_overflow:
    case Intrinsic::umul_with_overflow:
    case Intrinsic::smul_with_overflow:
    case Intrinsic::uadd_sat:
    case Intrinsic::sadd_sat:
    case Intrinsic::usub_sat:
    case Intrinsic::ssub_sat:
      return true;
    default:
      return false;
    }
  }
  static bool classof(const Value *V) {
    return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
  }

  Value *getLHS() const { return const_cast<Value *>(getArgOperand(0)); }
  Value *getRHS() const { return const_cast<Value *>(getArgOperand(1)); }

  /// Returns the binary operation underlying the intrinsic.
  Instruction::BinaryOps getBinaryOp() const;

  /// Whether the intrinsic is signed or unsigned.
  bool isSigned() const;

  /// Returns one of OBO::NoSignedWrap or OBO::NoUnsignedWrap.
  unsigned getNoWrapKind() const;
};

/// Represents an op.with.overflow intrinsic.
class WithOverflowInst : public BinaryOpIntrinsic {
public:
  static bool classof(const IntrinsicInst *I) {
    switch (I->getIntrinsicID()) {
    case Intrinsic::uadd_with_overflow:
    case Intrinsic::sadd_with_overflow:
    case Intrinsic::usub_with_overflow:
    case Intrinsic::ssub_with_overflow:
    case Intrinsic::umul_with_overflow:
    case Intrinsic::smul_with_overflow:
      return true;
    default:
      return false;
    }
  }
  static bool classof(const Value *V) {
    return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
  }
};

/// Represents a saturating add/sub intrinsic.
class SaturatingInst : public BinaryOpIntrinsic {
public:
  static bool classof(const IntrinsicInst *I) {
    switch (I->getIntrinsicID()) {
    case Intrinsic::uadd_sat:
    case Intrinsic::sadd_sat:
    case Intrinsic::usub_sat:
    case Intrinsic::ssub_sat:
      return true;
    default:
      return false;
    }
  }
  static bool classof(const Value *V) {
    return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
  }
};

/// Common base class for all memory intrinsics. Simply provides
/// common methods.
/// Written as CRTP to avoid a common base class amongst the
/// three atomicity hierarchies.
template <typename Derived> class MemIntrinsicBase : public IntrinsicInst {
private:
  enum { ARG_DEST = 0, ARG_LENGTH = 2 };

public:
  Value *getRawDest() const {
    return const_cast<Value *>(getArgOperand(ARG_DEST));
  }
  const Use &getRawDestUse() const { return getArgOperandUse(ARG_DEST); }
  Use &getRawDestUse() { return getArgOperandUse(ARG_DEST); }

  Value *getLength() const {
    return const_cast<Value *>(getArgOperand(ARG_LENGTH));
  }
  const Use &getLengthUse() const { return getArgOperandUse(ARG_LENGTH); }
  Use &getLengthUse() { return getArgOperandUse(ARG_LENGTH); }

  /// This is just like getRawDest, but it strips off any cast
  /// instructions (including addrspacecast) that feed it, giving the
  /// original input.  The returned value is guaranteed to be a pointer.
  Value *getDest() const { return getRawDest()->stripPointerCasts(); }

  unsigned getDestAddressSpace() const {
    return cast<PointerType>(getRawDest()->getType())->getAddressSpace();
  }

  /// FIXME: Remove this function once transition to Align is over.
  /// Use getDestAlign() instead.
  LLVM_DEPRECATED("Use getDestAlign() instead", "getDestAlign")
  unsigned getDestAlignment() const {
    if (auto MA = getParamAlign(ARG_DEST))
      return MA->value();
    return 0;
  }
  MaybeAlign getDestAlign() const { return getParamAlign(ARG_DEST); }

  /// Set the specified arguments of the instruction.
  void setDest(Value *Ptr) {
    assert(getRawDest()->getType() == Ptr->getType() &&
           "setDest called with pointer of wrong type!");
    setArgOperand(ARG_DEST, Ptr);
  }

  void setDestAlignment(MaybeAlign Alignment) {
    removeParamAttr(ARG_DEST, Attribute::Alignment);
    if (Alignment)
      addParamAttr(ARG_DEST,
                   Attribute::getWithAlignment(getContext(), *Alignment));
  }
  void setDestAlignment(Align Alignment) {
    removeParamAttr(ARG_DEST, Attribute::Alignment);
    addParamAttr(ARG_DEST,
                 Attribute::getWithAlignment(getContext(), Alignment));
  }

  void setLength(Value *L) {
    assert(getLength()->getType() == L->getType() &&
           "setLength called with value of wrong type!");
    setArgOperand(ARG_LENGTH, L);
  }
};

/// Common base class for all memory transfer intrinsics. Simply provides
/// common methods.
template <class BaseCL> class MemTransferBase : public BaseCL {
private:
  enum { ARG_SOURCE = 1 };

public:
  /// Return the arguments to the instruction.
  Value *getRawSource() const {
    return const_cast<Value *>(BaseCL::getArgOperand(ARG_SOURCE));
  }
  const Use &getRawSourceUse() const {
    return BaseCL::getArgOperandUse(ARG_SOURCE);
  }
  Use &getRawSourceUse() { return BaseCL::getArgOperandUse(ARG_SOURCE); }

  /// This is just like getRawSource, but it strips off any cast
  /// instructions that feed it, giving the original input.  The returned
  /// value is guaranteed to be a pointer.
  Value *getSource() const { return getRawSource()->stripPointerCasts(); }

  unsigned getSourceAddressSpace() const {
    return cast<PointerType>(getRawSource()->getType())->getAddressSpace();
  }

  /// FIXME: Remove this function once transition to Align is over.
  /// Use getSourceAlign() instead.
  LLVM_DEPRECATED("Use getSourceAlign() instead", "getSourceAlign")
  unsigned getSourceAlignment() const {
    if (auto MA = BaseCL::getParamAlign(ARG_SOURCE))
      return MA->value();
    return 0;
  }

  MaybeAlign getSourceAlign() const {
    return BaseCL::getParamAlign(ARG_SOURCE);
  }

  void setSource(Value *Ptr) {
    assert(getRawSource()->getType() == Ptr->getType() &&
           "setSource called with pointer of wrong type!");
    BaseCL::setArgOperand(ARG_SOURCE, Ptr);
  }

  void setSourceAlignment(MaybeAlign Alignment) {
    BaseCL::removeParamAttr(ARG_SOURCE, Attribute::Alignment);
    if (Alignment)
      BaseCL::addParamAttr(ARG_SOURCE, Attribute::getWithAlignment(
                                           BaseCL::getContext(), *Alignment));
  }

  void setSourceAlignment(Align Alignment) {
    BaseCL::removeParamAttr(ARG_SOURCE, Attribute::Alignment);
    BaseCL::addParamAttr(ARG_SOURCE, Attribute::getWithAlignment(
                                         BaseCL::getContext(), Alignment));
  }
};

/// Common base class for all memset intrinsics. Simply provides
/// common methods.
template <class BaseCL> class MemSetBase : public BaseCL {
private:
  enum { ARG_VALUE = 1 };

public:
  Value *getValue() const {
    return const_cast<Value *>(BaseCL::getArgOperand(ARG_VALUE));
  }
  const Use &getValueUse() const { return BaseCL::getArgOperandUse(ARG_VALUE); }
  Use &getValueUse() { return BaseCL::getArgOperandUse(ARG_VALUE); }

  void setValue(Value *Val) {
    assert(getValue()->getType() == Val->getType() &&
           "setValue called with value of wrong type!");
    BaseCL::setArgOperand(ARG_VALUE, Val);
  }
};

// The common base class for the atomic memset/memmove/memcpy intrinsics
// i.e. llvm.element.unordered.atomic.memset/memcpy/memmove
class AtomicMemIntrinsic : public MemIntrinsicBase<AtomicMemIntrinsic> {
private:
  enum { ARG_ELEMENTSIZE = 3 };

public:
  Value *getRawElementSizeInBytes() const {
    return const_cast<Value *>(getArgOperand(ARG_ELEMENTSIZE));
  }

  ConstantInt *getElementSizeInBytesCst() const {
    return cast<ConstantInt>(getRawElementSizeInBytes());
  }

  uint32_t getElementSizeInBytes() const {
    return getElementSizeInBytesCst()->getZExtValue();
  }

  void setElementSizeInBytes(Constant *V) {
    assert(V->getType() == Type::getInt8Ty(getContext()) &&
           "setElementSizeInBytes called with value of wrong type!");
    setArgOperand(ARG_ELEMENTSIZE, V);
  }

  static bool classof(const IntrinsicInst *I) {
    switch (I->getIntrinsicID()) {
    case Intrinsic::memcpy_element_unordered_atomic:
    case Intrinsic::memmove_element_unordered_atomic:
    case Intrinsic::memset_element_unordered_atomic:
      return true;
    default:
      return false;
    }
  }
  static bool classof(const Value *V) {
    return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
  }
};

/// This class represents atomic memset intrinsic
// i.e. llvm.element.unordered.atomic.memset
class AtomicMemSetInst : public MemSetBase<AtomicMemIntrinsic> {
public:
  static bool classof(const IntrinsicInst *I) {
    return I->getIntrinsicID() == Intrinsic::memset_element_unordered_atomic;
  }
  static bool classof(const Value *V) {
    return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
  }
};

// This class wraps the atomic memcpy/memmove intrinsics
// i.e. llvm.element.unordered.atomic.memcpy/memmove
class AtomicMemTransferInst : public MemTransferBase<AtomicMemIntrinsic> {
public:
  static bool classof(const IntrinsicInst *I) {
    switch (I->getIntrinsicID()) {
    case Intrinsic::memcpy_element_unordered_atomic:
    case Intrinsic::memmove_element_unordered_atomic:
      return true;
    default:
      return false;
    }
  }
  static bool classof(const Value *V) {
    return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
  }
};

/// This class represents the atomic memcpy intrinsic
/// i.e. llvm.element.unordered.atomic.memcpy
class AtomicMemCpyInst : public AtomicMemTransferInst {
public:
  static bool classof(const IntrinsicInst *I) {
    return I->getIntrinsicID() == Intrinsic::memcpy_element_unordered_atomic;
  }
  static bool classof(const Value *V) {
    return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
  }
};

/// This class represents the atomic memmove intrinsic
/// i.e. llvm.element.unordered.atomic.memmove
class AtomicMemMoveInst : public AtomicMemTransferInst {
public:
  static bool classof(const IntrinsicInst *I) {
    return I->getIntrinsicID() == Intrinsic::memmove_element_unordered_atomic;
  }
  static bool classof(const Value *V) {
    return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
  }
};

/// This is the common base class for memset/memcpy/memmove.
class MemIntrinsic : public MemIntrinsicBase<MemIntrinsic> {
private:
  enum { ARG_VOLATILE = 3 };

public:
  ConstantInt *getVolatileCst() const {
    return cast<ConstantInt>(const_cast<Value *>(getArgOperand(ARG_VOLATILE)));
  }

  bool isVolatile() const { return !getVolatileCst()->isZero(); }

  void setVolatile(Constant *V) { setArgOperand(ARG_VOLATILE, V); }

  // Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const IntrinsicInst *I) {
    switch (I->getIntrinsicID()) {
    case Intrinsic::memcpy:
    case Intrinsic::memmove:
    case Intrinsic::memset:
    case Intrinsic::memset_inline:
    case Intrinsic::memcpy_inline:
      return true;
    default:
      return false;
    }
  }
  static bool classof(const Value *V) {
    return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
  }
};

/// This class wraps the llvm.memset and llvm.memset.inline intrinsics.
class MemSetInst : public MemSetBase<MemIntrinsic> {
public:
  // Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const IntrinsicInst *I) {
    switch (I->getIntrinsicID()) {
    case Intrinsic::memset:
    case Intrinsic::memset_inline:
      return true;
    default:
      return false;
    }
  }
  static bool classof(const Value *V) {
    return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
  }
};

/// This class wraps the llvm.memset.inline intrinsic.
class MemSetInlineInst : public MemSetInst {
public:
  ConstantInt *getLength() const {
    return cast<ConstantInt>(MemSetInst::getLength());
  }
  // Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const IntrinsicInst *I) {
    return I->getIntrinsicID() == Intrinsic::memset_inline;
  }
  static bool classof(const Value *V) {
    return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
  }
};

/// This class wraps the llvm.memcpy/memmove intrinsics.
class MemTransferInst : public MemTransferBase<MemIntrinsic> {
public:
  // Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const IntrinsicInst *I) {
    switch (I->getIntrinsicID()) {
    case Intrinsic::memcpy:
    case Intrinsic::memmove:
    case Intrinsic::memcpy_inline:
      return true;
    default:
      return false;
    }
  }
  static bool classof(const Value *V) {
    return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
  }
};

/// This class wraps the llvm.memcpy intrinsic.
class MemCpyInst : public MemTransferInst {
public:
  // Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const IntrinsicInst *I) {
    return I->getIntrinsicID() == Intrinsic::memcpy ||
           I->getIntrinsicID() == Intrinsic::memcpy_inline;
  }
  static bool classof(const Value *V) {
    return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
  }
};

/// This class wraps the llvm.memmove intrinsic.
class MemMoveInst : public MemTransferInst {
public:
  // Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const IntrinsicInst *I) {
    return I->getIntrinsicID() == Intrinsic::memmove;
  }
  static bool classof(const Value *V) {
    return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
  }
};

/// This class wraps the llvm.memcpy.inline intrinsic.
class MemCpyInlineInst : public MemCpyInst {
public:
  ConstantInt *getLength() const {
    return cast<ConstantInt>(MemCpyInst::getLength());
  }
  // Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const IntrinsicInst *I) {
    return I->getIntrinsicID() == Intrinsic::memcpy_inline;
  }
  static bool classof(const Value *V) {
    return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
  }
};

// The common base class for any memset/memmove/memcpy intrinsics;
// whether they be atomic or non-atomic.
// i.e. llvm.element.unordered.atomic.memset/memcpy/memmove
//  and llvm.memset/memcpy/memmove
class AnyMemIntrinsic : public MemIntrinsicBase<AnyMemIntrinsic> {
public:
  bool isVolatile() const {
    // Only the non-atomic intrinsics can be volatile
    if (auto *MI = dyn_cast<MemIntrinsic>(this))
      return MI->isVolatile();
    return false;
  }

  static bool classof(const IntrinsicInst *I) {
    switch (I->getIntrinsicID()) {
    case Intrinsic::memcpy:
    case Intrinsic::memcpy_inline:
    case Intrinsic::memmove:
    case Intrinsic::memset:
    case Intrinsic::memset_inline:
    case Intrinsic::memcpy_element_unordered_atomic:
    case Intrinsic::memmove_element_unordered_atomic:
    case Intrinsic::memset_element_unordered_atomic:
      return true;
    default:
      return false;
    }
  }
  static bool classof(const Value *V) {
    return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
  }
};

/// This class represents any memset intrinsic
// i.e. llvm.element.unordered.atomic.memset
// and  llvm.memset
class AnyMemSetInst : public MemSetBase<AnyMemIntrinsic> {
public:
  static bool classof(const IntrinsicInst *I) {
    switch (I->getIntrinsicID()) {
    case Intrinsic::memset:
    case Intrinsic::memset_inline:
    case Intrinsic::memset_element_unordered_atomic:
      return true;
    default:
      return false;
    }
  }
  static bool classof(const Value *V) {
    return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
  }
};

// This class wraps any memcpy/memmove intrinsics
// i.e. llvm.element.unordered.atomic.memcpy/memmove
// and  llvm.memcpy/memmove
class AnyMemTransferInst : public MemTransferBase<AnyMemIntrinsic> {
public:
  static bool classof(const IntrinsicInst *I) {
    switch (I->getIntrinsicID()) {
    case Intrinsic::memcpy:
    case Intrinsic::memcpy_inline:
    case Intrinsic::memmove:
    case Intrinsic::memcpy_element_unordered_atomic:
    case Intrinsic::memmove_element_unordered_atomic:
      return true;
    default:
      return false;
    }
  }
  static bool classof(const Value *V) {
    return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
  }
};

/// This class represents any memcpy intrinsic
/// i.e. llvm.element.unordered.atomic.memcpy
///  and llvm.memcpy
class AnyMemCpyInst : public AnyMemTransferInst {
public:
  static bool classof(const IntrinsicInst *I) {
    switch (I->getIntrinsicID()) {
    case Intrinsic::memcpy:
    case Intrinsic::memcpy_inline:
    case Intrinsic::memcpy_element_unordered_atomic:
      return true;
    default:
      return false;
    }
  }
  static bool classof(const Value *V) {
    return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
  }
};

/// This class represents any memmove intrinsic
/// i.e. llvm.element.unordered.atomic.memmove
///  and llvm.memmove
class AnyMemMoveInst : public AnyMemTransferInst {
public:
  static bool classof(const IntrinsicInst *I) {
    switch (I->getIntrinsicID()) {
    case Intrinsic::memmove:
    case Intrinsic::memmove_element_unordered_atomic:
      return true;
    default:
      return false;
    }
  }
  static bool classof(const Value *V) {
    return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
  }
};

/// This represents the llvm.va_start intrinsic.
class VAStartInst : public IntrinsicInst {
public:
  static bool classof(const IntrinsicInst *I) {
    return I->getIntrinsicID() == Intrinsic::vastart;
  }
  static bool classof(const Value *V) {
    return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
  }

  Value *getArgList() const { return const_cast<Value *>(getArgOperand(0)); }
};

/// This represents the llvm.va_end intrinsic.
class VAEndInst : public IntrinsicInst {
public:
  static bool classof(const IntrinsicInst *I) {
    return I->getIntrinsicID() == Intrinsic::vaend;
  }
  static bool classof(const Value *V) {
    return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
  }

  Value *getArgList() const { return const_cast<Value *>(getArgOperand(0)); }
};

/// This represents the llvm.va_copy intrinsic.
class VACopyInst : public IntrinsicInst {
public:
  static bool classof(const IntrinsicInst *I) {
    return I->getIntrinsicID() == Intrinsic::vacopy;
  }
  static bool classof(const Value *V) {
    return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
  }

  Value *getDest() const { return const_cast<Value *>(getArgOperand(0)); }
  Value *getSrc() const { return const_cast<Value *>(getArgOperand(1)); }
};

/// A base class for all instrprof intrinsics.
class InstrProfInstBase : public IntrinsicInst {
public:
  // The name of the instrumented function.
  GlobalVariable *getName() const {
    return cast<GlobalVariable>(
        const_cast<Value *>(getArgOperand(0))->stripPointerCasts());
  }
  // The hash of the CFG for the instrumented function.
  ConstantInt *getHash() const {
    return cast<ConstantInt>(const_cast<Value *>(getArgOperand(1)));
  }
  // The number of counters for the instrumented function.
  ConstantInt *getNumCounters() const;
  // The index of the counter that this instruction acts on.
  ConstantInt *getIndex() const;
};

/// This represents the llvm.instrprof.cover intrinsic.
class InstrProfCoverInst : public InstrProfInstBase {
public:
  static bool classof(const IntrinsicInst *I) {
    return I->getIntrinsicID() == Intrinsic::instrprof_cover;
  }
  static bool classof(const Value *V) {
    return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
  }
};

/// This represents the llvm.instrprof.increment intrinsic.
class InstrProfIncrementInst : public InstrProfInstBase {
public:
  static bool classof(const IntrinsicInst *I) {
    return I->getIntrinsicID() == Intrinsic::instrprof_increment ||
           I->getIntrinsicID() == Intrinsic::instrprof_increment_step;
  }
  static bool classof(const Value *V) {
    return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
  }
  Value *getStep() const;
};

/// This represents the llvm.instrprof.increment.step intrinsic.
class InstrProfIncrementInstStep : public InstrProfIncrementInst {
public:
  static bool classof(const IntrinsicInst *I) {
    return I->getIntrinsicID() == Intrinsic::instrprof_increment_step;
  }
  static bool classof(const Value *V) {
    return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
  }
};

/// This represents the llvm.instrprof.timestamp intrinsic.
class InstrProfTimestampInst : public InstrProfInstBase {
public:
  static bool classof(const IntrinsicInst *I) {
    return I->getIntrinsicID() == Intrinsic::instrprof_timestamp;
  }
  static bool classof(const Value *V) {
    return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
  }
};

/// This represents the llvm.instrprof.value.profile intrinsic.
class InstrProfValueProfileInst : public InstrProfInstBase {
public:
  static bool classof(const IntrinsicInst *I) {
    return I->getIntrinsicID() == Intrinsic::instrprof_value_profile;
  }
  static bool classof(const Value *V) {
    return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
  }

  Value *getTargetValue() const {
    return cast<Value>(const_cast<Value *>(getArgOperand(2)));
  }

  ConstantInt *getValueKind() const {
    return cast<ConstantInt>(const_cast<Value *>(getArgOperand(3)));
  }

  // Returns the value site index.
  ConstantInt *getIndex() const {
    return cast<ConstantInt>(const_cast<Value *>(getArgOperand(4)));
  }
};

class PseudoProbeInst : public IntrinsicInst {
public:
  static bool classof(const IntrinsicInst *I) {
    return I->getIntrinsicID() == Intrinsic::pseudoprobe;
  }

  static bool classof(const Value *V) {
    return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
  }

  ConstantInt *getFuncGuid() const {
    return cast<ConstantInt>(const_cast<Value *>(getArgOperand(0)));
  }

  ConstantInt *getIndex() const {
    return cast<ConstantInt>(const_cast<Value *>(getArgOperand(1)));
  }

  ConstantInt *getAttributes() const {
    return cast<ConstantInt>(const_cast<Value *>(getArgOperand(2)));
  }

  ConstantInt *getFactor() const {
    return cast<ConstantInt>(const_cast<Value *>(getArgOperand(3)));
  }
};

class NoAliasScopeDeclInst : public IntrinsicInst {
public:
  static bool classof(const IntrinsicInst *I) {
    return I->getIntrinsicID() == Intrinsic::experimental_noalias_scope_decl;
  }

  static bool classof(const Value *V) {
    return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
  }

  MDNode *getScopeList() const {
    auto *MV =
        cast<MetadataAsValue>(getOperand(Intrinsic::NoAliasScopeDeclScopeArg));
    return cast<MDNode>(MV->getMetadata());
  }

  void setScopeList(MDNode *ScopeList) {
    setOperand(Intrinsic::NoAliasScopeDeclScopeArg,
               MetadataAsValue::get(getContext(), ScopeList));
  }
};

/// Common base class for representing values projected from a statepoint.
/// Currently, the only projections available are gc.result and gc.relocate.
class GCProjectionInst : public IntrinsicInst {
public:
  static bool classof(const IntrinsicInst *I) {
    return I->getIntrinsicID() == Intrinsic::experimental_gc_relocate ||
      I->getIntrinsicID() == Intrinsic::experimental_gc_result;
  }

  static bool classof(const Value *V) {
    return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
  }

  /// Return true if this relocate is tied to the invoke statepoint.
  /// This includes relocates which are on the unwinding path.
  bool isTiedToInvoke() const {
    const Value *Token = getArgOperand(0);

    return isa<LandingPadInst>(Token) || isa<InvokeInst>(Token);
  }

  /// The statepoint with which this gc.relocate is associated.
  const Value *getStatepoint() const;
};

/// Represents calls to the gc.relocate intrinsic.
class GCRelocateInst : public GCProjectionInst {
public:
  static bool classof(const IntrinsicInst *I) {
    return I->getIntrinsicID() == Intrinsic::experimental_gc_relocate;
  }

  static bool classof(const Value *V) {
    return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
  }

  /// The index into the associate statepoint's argument list
  /// which contains the base pointer of the pointer whose
  /// relocation this gc.relocate describes.
  unsigned getBasePtrIndex() const {
    return cast<ConstantInt>(getArgOperand(1))->getZExtValue();
  }

  /// The index into the associate statepoint's argument list which
  /// contains the pointer whose relocation this gc.relocate describes.
  unsigned getDerivedPtrIndex() const {
    return cast<ConstantInt>(getArgOperand(2))->getZExtValue();
  }

  Value *getBasePtr() const;
  Value *getDerivedPtr() const;
};

/// Represents calls to the gc.result intrinsic.
class GCResultInst : public GCProjectionInst {
public:
  static bool classof(const IntrinsicInst *I) {
    return I->getIntrinsicID() == Intrinsic::experimental_gc_result;
  }

  static bool classof(const Value *V) {
    return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
  }
};


/// This represents the llvm.assume intrinsic.
class AssumeInst : public IntrinsicInst {
public:
  static bool classof(const IntrinsicInst *I) {
    return I->getIntrinsicID() == Intrinsic::assume;
  }
  static bool classof(const Value *V) {
    return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
  }
};

} // end namespace llvm

#endif // LLVM_IR_INTRINSICINST_H
PKjwFZ;dR���IR/IntrinsicsDirectX.hnu�[���/*===- TableGen'erated file -------------------------------------*- C++ -*-===*\
|*                                                                            *|
|* Intrinsic Function Source Fragment                                         *|
|*                                                                            *|
|* Automatically generated file, do not edit!                                 *|
|*                                                                            *|
\*===----------------------------------------------------------------------===*/

#ifndef LLVM_IR_INTRINSIC_DX_ENUMS_H
#define LLVM_IR_INTRINSIC_DX_ENUMS_H

namespace llvm {
namespace Intrinsic {
enum DXIntrinsics : unsigned {
// Enum values for intrinsics
    dx_create_handle = 3164,                          // llvm.dx.create.handle
    dx_flattened_thread_id_in_group,           // llvm.dx.flattened.thread.id.in.group
    dx_group_id,                               // llvm.dx.group.id
    dx_thread_id,                              // llvm.dx.thread.id
    dx_thread_id_in_group,                     // llvm.dx.thread.id.in.group
}; // enum
} // namespace Intrinsic
} // namespace llvm

#endif
PKjwFZ#mU�RXRXIR/DebugInfoMetadata.hnu�[���//===- llvm/IR/DebugInfoMetadata.h - Debug info metadata --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Declarations for metadata specific to debug info.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_DEBUGINFOMETADATA_H
#define LLVM_IR_DEBUGINFOMETADATA_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/BitmaskEnum.h"
#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Metadata.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Discriminator.h"
#include <cassert>
#include <climits>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <optional>
#include <vector>

// Helper macros for defining get() overrides.
#define DEFINE_MDNODE_GET_UNPACK_IMPL(...) __VA_ARGS__
#define DEFINE_MDNODE_GET_UNPACK(ARGS) DEFINE_MDNODE_GET_UNPACK_IMPL ARGS
#define DEFINE_MDNODE_GET_DISTINCT_TEMPORARY(CLASS, FORMAL, ARGS)              \
  static CLASS *getDistinct(LLVMContext &Context,                              \
                            DEFINE_MDNODE_GET_UNPACK(FORMAL)) {                \
    return getImpl(Context, DEFINE_MDNODE_GET_UNPACK(ARGS), Distinct);         \
  }                                                                            \
  static Temp##CLASS getTemporary(LLVMContext &Context,                        \
                                  DEFINE_MDNODE_GET_UNPACK(FORMAL)) {          \
    return Temp##CLASS(                                                        \
        getImpl(Context, DEFINE_MDNODE_GET_UNPACK(ARGS), Temporary));          \
  }
#define DEFINE_MDNODE_GET(CLASS, FORMAL, ARGS)                                 \
  static CLASS *get(LLVMContext &Context, DEFINE_MDNODE_GET_UNPACK(FORMAL)) {  \
    return getImpl(Context, DEFINE_MDNODE_GET_UNPACK(ARGS), Uniqued);          \
  }                                                                            \
  static CLASS *getIfExists(LLVMContext &Context,                              \
                            DEFINE_MDNODE_GET_UNPACK(FORMAL)) {                \
    return getImpl(Context, DEFINE_MDNODE_GET_UNPACK(ARGS), Uniqued,           \
                   /* ShouldCreate */ false);                                  \
  }                                                                            \
  DEFINE_MDNODE_GET_DISTINCT_TEMPORARY(CLASS, FORMAL, ARGS)

namespace llvm {

namespace dwarf {
enum Tag : uint16_t;
}

class DbgVariableIntrinsic;

extern cl::opt<bool> EnableFSDiscriminator;

class DITypeRefArray {
  const MDTuple *N = nullptr;

public:
  DITypeRefArray() = default;
  DITypeRefArray(const MDTuple *N) : N(N) {}

  explicit operator bool() const { return get(); }
  explicit operator MDTuple *() const { return get(); }

  MDTuple *get() const { return const_cast<MDTuple *>(N); }
  MDTuple *operator->() const { return get(); }
  MDTuple &operator*() const { return *get(); }

  // FIXME: Fix callers and remove condition on N.
  unsigned size() const { return N ? N->getNumOperands() : 0u; }
  DIType *operator[](unsigned I) const {
    return cast_or_null<DIType>(N->getOperand(I));
  }

  class iterator {
    MDNode::op_iterator I = nullptr;

  public:
    using iterator_category = std::input_iterator_tag;
    using value_type = DIType *;
    using difference_type = std::ptrdiff_t;
    using pointer = void;
    using reference = DIType *;

    iterator() = default;
    explicit iterator(MDNode::op_iterator I) : I(I) {}

    DIType *operator*() const { return cast_or_null<DIType>(*I); }

    iterator &operator++() {
      ++I;
      return *this;
    }

    iterator operator++(int) {
      iterator Temp(*this);
      ++I;
      return Temp;
    }

    bool operator==(const iterator &X) const { return I == X.I; }
    bool operator!=(const iterator &X) const { return I != X.I; }
  };

  // FIXME: Fix callers and remove condition on N.
  iterator begin() const { return N ? iterator(N->op_begin()) : iterator(); }
  iterator end() const { return N ? iterator(N->op_end()) : iterator(); }
};

/// Tagged DWARF-like metadata node.
///
/// A metadata node with a DWARF tag (i.e., a constant named \c DW_TAG_*,
/// defined in llvm/BinaryFormat/Dwarf.h).  Called \a DINode because it's
/// potentially used for non-DWARF output.
class DINode : public MDNode {
  friend class LLVMContextImpl;
  friend class MDNode;

protected:
  DINode(LLVMContext &C, unsigned ID, StorageType Storage, unsigned Tag,
         ArrayRef<Metadata *> Ops1, ArrayRef<Metadata *> Ops2 = std::nullopt)
      : MDNode(C, ID, Storage, Ops1, Ops2) {
    assert(Tag < 1u << 16);
    SubclassData16 = Tag;
  }
  ~DINode() = default;

  template <class Ty> Ty *getOperandAs(unsigned I) const {
    return cast_or_null<Ty>(getOperand(I));
  }

  StringRef getStringOperand(unsigned I) const {
    if (auto *S = getOperandAs<MDString>(I))
      return S->getString();
    return StringRef();
  }

  static MDString *getCanonicalMDString(LLVMContext &Context, StringRef S) {
    if (S.empty())
      return nullptr;
    return MDString::get(Context, S);
  }

  /// Allow subclasses to mutate the tag.
  void setTag(unsigned Tag) { SubclassData16 = Tag; }

public:
  dwarf::Tag getTag() const;

  /// Debug info flags.
  ///
  /// The three accessibility flags are mutually exclusive and rolled together
  /// in the first two bits.
  enum DIFlags : uint32_t {
#define HANDLE_DI_FLAG(ID, NAME) Flag##NAME = ID,
#define DI_FLAG_LARGEST_NEEDED
#include "llvm/IR/DebugInfoFlags.def"
    FlagAccessibility = FlagPrivate | FlagProtected | FlagPublic,
    FlagPtrToMemberRep = FlagSingleInheritance | FlagMultipleInheritance |
                         FlagVirtualInheritance,
    LLVM_MARK_AS_BITMASK_ENUM(FlagLargest)
  };

  static DIFlags getFlag(StringRef Flag);
  static StringRef getFlagString(DIFlags Flag);

  /// Split up a flags bitfield.
  ///
  /// Split \c Flags into \c SplitFlags, a vector of its components.  Returns
  /// any remaining (unrecognized) bits.
  static DIFlags splitFlags(DIFlags Flags,
                            SmallVectorImpl<DIFlags> &SplitFlags);

  static bool classof(const Metadata *MD) {
    switch (MD->getMetadataID()) {
    default:
      return false;
    case GenericDINodeKind:
    case DISubrangeKind:
    case DIEnumeratorKind:
    case DIBasicTypeKind:
    case DIStringTypeKind:
    case DIDerivedTypeKind:
    case DICompositeTypeKind:
    case DISubroutineTypeKind:
    case DIFileKind:
    case DICompileUnitKind:
    case DISubprogramKind:
    case DILexicalBlockKind:
    case DILexicalBlockFileKind:
    case DINamespaceKind:
    case DICommonBlockKind:
    case DITemplateTypeParameterKind:
    case DITemplateValueParameterKind:
    case DIGlobalVariableKind:
    case DILocalVariableKind:
    case DILabelKind:
    case DIObjCPropertyKind:
    case DIImportedEntityKind:
    case DIModuleKind:
    case DIGenericSubrangeKind:
    case DIAssignIDKind:
      return true;
    }
  }
};

/// Generic tagged DWARF-like metadata node.
///
/// An un-specialized DWARF-like metadata node.  The first operand is a
/// (possibly empty) null-separated \a MDString header that contains arbitrary
/// fields.  The remaining operands are \a dwarf_operands(), and are pointers
/// to other metadata.
class GenericDINode : public DINode {
  friend class LLVMContextImpl;
  friend class MDNode;

  GenericDINode(LLVMContext &C, StorageType Storage, unsigned Hash,
                unsigned Tag, ArrayRef<Metadata *> Ops1,
                ArrayRef<Metadata *> Ops2)
      : DINode(C, GenericDINodeKind, Storage, Tag, Ops1, Ops2) {
    setHash(Hash);
  }
  ~GenericDINode() { dropAllReferences(); }

  void setHash(unsigned Hash) { SubclassData32 = Hash; }
  void recalculateHash();

  static GenericDINode *getImpl(LLVMContext &Context, unsigned Tag,
                                StringRef Header, ArrayRef<Metadata *> DwarfOps,
                                StorageType Storage, bool ShouldCreate = true) {
    return getImpl(Context, Tag, getCanonicalMDString(Context, Header),
                   DwarfOps, Storage, ShouldCreate);
  }

  static GenericDINode *getImpl(LLVMContext &Context, unsigned Tag,
                                MDString *Header, ArrayRef<Metadata *> DwarfOps,
                                StorageType Storage, bool ShouldCreate = true);

  TempGenericDINode cloneImpl() const {
    return getTemporary(getContext(), getTag(), getHeader(),
                        SmallVector<Metadata *, 4>(dwarf_operands()));
  }

public:
  unsigned getHash() const { return SubclassData32; }

  DEFINE_MDNODE_GET(GenericDINode,
                    (unsigned Tag, StringRef Header,
                     ArrayRef<Metadata *> DwarfOps),
                    (Tag, Header, DwarfOps))
  DEFINE_MDNODE_GET(GenericDINode,
                    (unsigned Tag, MDString *Header,
                     ArrayRef<Metadata *> DwarfOps),
                    (Tag, Header, DwarfOps))

  /// Return a (temporary) clone of this.
  TempGenericDINode clone() const { return cloneImpl(); }

  dwarf::Tag getTag() const;
  StringRef getHeader() const { return getStringOperand(0); }
  MDString *getRawHeader() const { return getOperandAs<MDString>(0); }

  op_iterator dwarf_op_begin() const { return op_begin() + 1; }
  op_iterator dwarf_op_end() const { return op_end(); }
  op_range dwarf_operands() const {
    return op_range(dwarf_op_begin(), dwarf_op_end());
  }

  unsigned getNumDwarfOperands() const { return getNumOperands() - 1; }
  const MDOperand &getDwarfOperand(unsigned I) const {
    return getOperand(I + 1);
  }
  void replaceDwarfOperandWith(unsigned I, Metadata *New) {
    replaceOperandWith(I + 1, New);
  }

  static bool classof(const Metadata *MD) {
    return MD->getMetadataID() == GenericDINodeKind;
  }
};

/// Assignment ID.
/// Used to link stores (as an attachment) and dbg.assigns (as an operand).
/// DIAssignID metadata is never uniqued as we compare instances using
/// referential equality (the instance/address is the ID).
class DIAssignID : public MDNode {
  friend class LLVMContextImpl;
  friend class MDNode;

  DIAssignID(LLVMContext &C, StorageType Storage)
      : MDNode(C, DIAssignIDKind, Storage, std::nullopt) {}

  ~DIAssignID() { dropAllReferences(); }

  static DIAssignID *getImpl(LLVMContext &Context, StorageType Storage,
                             bool ShouldCreate = true);

  TempDIAssignID cloneImpl() const { return getTemporary(getContext()); }

public:
  // This node has no operands to replace.
  void replaceOperandWith(unsigned I, Metadata *New) = delete;

  static DIAssignID *getDistinct(LLVMContext &Context) {
    return getImpl(Context, Distinct);
  }
  static TempDIAssignID getTemporary(LLVMContext &Context) {
    return TempDIAssignID(getImpl(Context, Temporary));
  }
  // NOTE: Do not define get(LLVMContext&) - see class comment.

  static bool classof(const Metadata *MD) {
    return MD->getMetadataID() == DIAssignIDKind;
  }
};

/// Array subrange.
///
/// TODO: Merge into node for DW_TAG_array_type, which should have a custom
/// type.
class DISubrange : public DINode {
  friend class LLVMContextImpl;
  friend class MDNode;

  DISubrange(LLVMContext &C, StorageType Storage, ArrayRef<Metadata *> Ops);

  ~DISubrange() = default;

  static DISubrange *getImpl(LLVMContext &Context, int64_t Count,
                             int64_t LowerBound, StorageType Storage,
                             bool ShouldCreate = true);

  static DISubrange *getImpl(LLVMContext &Context, Metadata *CountNode,
                             int64_t LowerBound, StorageType Storage,
                             bool ShouldCreate = true);

  static DISubrange *getImpl(LLVMContext &Context, Metadata *CountNode,
                             Metadata *LowerBound, Metadata *UpperBound,
                             Metadata *Stride, StorageType Storage,
                             bool ShouldCreate = true);

  TempDISubrange cloneImpl() const {
    return getTemporary(getContext(), getRawCountNode(), getRawLowerBound(),
                        getRawUpperBound(), getRawStride());
  }

public:
  DEFINE_MDNODE_GET(DISubrange, (int64_t Count, int64_t LowerBound = 0),
                    (Count, LowerBound))

  DEFINE_MDNODE_GET(DISubrange, (Metadata * CountNode, int64_t LowerBound = 0),
                    (CountNode, LowerBound))

  DEFINE_MDNODE_GET(DISubrange,
                    (Metadata * CountNode, Metadata *LowerBound,
                     Metadata *UpperBound, Metadata *Stride),
                    (CountNode, LowerBound, UpperBound, Stride))

  TempDISubrange clone() const { return cloneImpl(); }

  Metadata *getRawCountNode() const { return getOperand(0).get(); }

  Metadata *getRawLowerBound() const { return getOperand(1).get(); }

  Metadata *getRawUpperBound() const { return getOperand(2).get(); }

  Metadata *getRawStride() const { return getOperand(3).get(); }

  typedef PointerUnion<ConstantInt *, DIVariable *, DIExpression *> BoundType;

  BoundType getCount() const;

  BoundType getLowerBound() const;

  BoundType getUpperBound() const;

  BoundType getStride() const;

  static bool classof(const Metadata *MD) {
    return MD->getMetadataID() == DISubrangeKind;
  }
};

class DIGenericSubrange : public DINode {
  friend class LLVMContextImpl;
  friend class MDNode;

  DIGenericSubrange(LLVMContext &C, StorageType Storage,
                    ArrayRef<Metadata *> Ops);

  ~DIGenericSubrange() = default;

  static DIGenericSubrange *getImpl(LLVMContext &Context, Metadata *CountNode,
                                    Metadata *LowerBound, Metadata *UpperBound,
                                    Metadata *Stride, StorageType Storage,
                                    bool ShouldCreate = true);

  TempDIGenericSubrange cloneImpl() const {
    return getTemporary(getContext(), getRawCountNode(), getRawLowerBound(),
                        getRawUpperBound(), getRawStride());
  }

public:
  DEFINE_MDNODE_GET(DIGenericSubrange,
                    (Metadata * CountNode, Metadata *LowerBound,
                     Metadata *UpperBound, Metadata *Stride),
                    (CountNode, LowerBound, UpperBound, Stride))

  TempDIGenericSubrange clone() const { return cloneImpl(); }

  Metadata *getRawCountNode() const { return getOperand(0).get(); }
  Metadata *getRawLowerBound() const { return getOperand(1).get(); }
  Metadata *getRawUpperBound() const { return getOperand(2).get(); }
  Metadata *getRawStride() const { return getOperand(3).get(); }

  using BoundType = PointerUnion<DIVariable *, DIExpression *>;

  BoundType getCount() const;
  BoundType getLowerBound() const;
  BoundType getUpperBound() const;
  BoundType getStride() const;

  static bool classof(const Metadata *MD) {
    return MD->getMetadataID() == DIGenericSubrangeKind;
  }
};

/// Enumeration value.
///
/// TODO: Add a pointer to the context (DW_TAG_enumeration_type) once that no
/// longer creates a type cycle.
class DIEnumerator : public DINode {
  friend class LLVMContextImpl;
  friend class MDNode;

  APInt Value;
  DIEnumerator(LLVMContext &C, StorageType Storage, const APInt &Value,
               bool IsUnsigned, ArrayRef<Metadata *> Ops);
  DIEnumerator(LLVMContext &C, StorageType Storage, int64_t Value,
               bool IsUnsigned, ArrayRef<Metadata *> Ops)
      : DIEnumerator(C, Storage, APInt(64, Value, !IsUnsigned), IsUnsigned,
                     Ops) {}
  ~DIEnumerator() = default;

  static DIEnumerator *getImpl(LLVMContext &Context, const APInt &Value,
                               bool IsUnsigned, StringRef Name,
                               StorageType Storage, bool ShouldCreate = true) {
    return getImpl(Context, Value, IsUnsigned,
                   getCanonicalMDString(Context, Name), Storage, ShouldCreate);
  }
  static DIEnumerator *getImpl(LLVMContext &Context, const APInt &Value,
                               bool IsUnsigned, MDString *Name,
                               StorageType Storage, bool ShouldCreate = true);

  TempDIEnumerator cloneImpl() const {
    return getTemporary(getContext(), getValue(), isUnsigned(), getName());
  }

public:
  DEFINE_MDNODE_GET(DIEnumerator,
                    (int64_t Value, bool IsUnsigned, StringRef Name),
                    (APInt(64, Value, !IsUnsigned), IsUnsigned, Name))
  DEFINE_MDNODE_GET(DIEnumerator,
                    (int64_t Value, bool IsUnsigned, MDString *Name),
                    (APInt(64, Value, !IsUnsigned), IsUnsigned, Name))
  DEFINE_MDNODE_GET(DIEnumerator,
                    (APInt Value, bool IsUnsigned, StringRef Name),
                    (Value, IsUnsigned, Name))
  DEFINE_MDNODE_GET(DIEnumerator,
                    (APInt Value, bool IsUnsigned, MDString *Name),
                    (Value, IsUnsigned, Name))

  TempDIEnumerator clone() const { return cloneImpl(); }

  const APInt &getValue() const { return Value; }
  bool isUnsigned() const { return SubclassData32; }
  StringRef getName() const { return getStringOperand(0); }

  MDString *getRawName() const { return getOperandAs<MDString>(0); }

  static bool classof(const Metadata *MD) {
    return MD->getMetadataID() == DIEnumeratorKind;
  }
};

/// Base class for scope-like contexts.
///
/// Base class for lexical scopes and types (which are also declaration
/// contexts).
///
/// TODO: Separate the concepts of declaration contexts and lexical scopes.
class DIScope : public DINode {
protected:
  DIScope(LLVMContext &C, unsigned ID, StorageType Storage, unsigned Tag,
          ArrayRef<Metadata *> Ops)
      : DINode(C, ID, Storage, Tag, Ops) {}
  ~DIScope() = default;

public:
  DIFile *getFile() const { return cast_or_null<DIFile>(getRawFile()); }

  inline StringRef getFilename() const;
  inline StringRef getDirectory() const;
  inline std::optional<StringRef> getSource() const;

  StringRef getName() const;
  DIScope *getScope() const;

  /// Return the raw underlying file.
  ///
  /// A \a DIFile is a \a DIScope, but it doesn't point at a separate file (it
  /// \em is the file).  If \c this is an \a DIFile, we need to return \c this.
  /// Otherwise, return the first operand, which is where all other subclasses
  /// store their file pointer.
  Metadata *getRawFile() const {
    return isa<DIFile>(this) ? const_cast<DIScope *>(this)
                             : static_cast<Metadata *>(getOperand(0));
  }

  static bool classof(const Metadata *MD) {
    switch (MD->getMetadataID()) {
    default:
      return false;
    case DIBasicTypeKind:
    case DIStringTypeKind:
    case DIDerivedTypeKind:
    case DICompositeTypeKind:
    case DISubroutineTypeKind:
    case DIFileKind:
    case DICompileUnitKind:
    case DISubprogramKind:
    case DILexicalBlockKind:
    case DILexicalBlockFileKind:
    case DINamespaceKind:
    case DICommonBlockKind:
    case DIModuleKind:
      return true;
    }
  }
};

/// File.
///
/// TODO: Merge with directory/file node (including users).
/// TODO: Canonicalize paths on creation.
class DIFile : public DIScope {
  friend class LLVMContextImpl;
  friend class MDNode;

public:
  /// Which algorithm (e.g. MD5) a checksum was generated with.
  ///
  /// The encoding is explicit because it is used directly in Bitcode. The
  /// value 0 is reserved to indicate the absence of a checksum in Bitcode.
  enum ChecksumKind {
    // The first variant was originally CSK_None, encoded as 0. The new
    // internal representation removes the need for this by wrapping the
    // ChecksumInfo in an Optional, but to preserve Bitcode compatibility the 0
    // encoding is reserved.
    CSK_MD5 = 1,
    CSK_SHA1 = 2,
    CSK_SHA256 = 3,
    CSK_Last = CSK_SHA256 // Should be last enumeration.
  };

  /// A single checksum, represented by a \a Kind and a \a Value (a string).
  template <typename T> struct ChecksumInfo {
    /// The kind of checksum which \a Value encodes.
    ChecksumKind Kind;
    /// The string value of the checksum.
    T Value;

    ChecksumInfo(ChecksumKind Kind, T Value) : Kind(Kind), Value(Value) {}
    ~ChecksumInfo() = default;
    bool operator==(const ChecksumInfo<T> &X) const {
      return Kind == X.Kind && Value == X.Value;
    }
    bool operator!=(const ChecksumInfo<T> &X) const { return !(*this == X); }
    StringRef getKindAsString() const { return getChecksumKindAsString(Kind); }
  };

private:
  std::optional<ChecksumInfo<MDString *>> Checksum;
  /// An optional source. A nullptr means none.
  MDString *Source;

  DIFile(LLVMContext &C, StorageType Storage,
         std::optional<ChecksumInfo<MDString *>> CS, MDString *Src,
         ArrayRef<Metadata *> Ops);
  ~DIFile() = default;

  static DIFile *getImpl(LLVMContext &Context, StringRef Filename,
                         StringRef Directory,
                         std::optional<ChecksumInfo<StringRef>> CS,
                         std::optional<StringRef> Source, StorageType Storage,
                         bool ShouldCreate = true) {
    std::optional<ChecksumInfo<MDString *>> MDChecksum;
    if (CS)
      MDChecksum.emplace(CS->Kind, getCanonicalMDString(Context, CS->Value));
    return getImpl(Context, getCanonicalMDString(Context, Filename),
                   getCanonicalMDString(Context, Directory), MDChecksum,
                   Source ? MDString::get(Context, *Source) : nullptr, Storage,
                   ShouldCreate);
  }
  static DIFile *getImpl(LLVMContext &Context, MDString *Filename,
                         MDString *Directory,
                         std::optional<ChecksumInfo<MDString *>> CS,
                         MDString *Source, StorageType Storage,
                         bool ShouldCreate = true);

  TempDIFile cloneImpl() const {
    return getTemporary(getContext(), getFilename(), getDirectory(),
                        getChecksum(), getSource());
  }

public:
  DEFINE_MDNODE_GET(DIFile,
                    (StringRef Filename, StringRef Directory,
                     std::optional<ChecksumInfo<StringRef>> CS = std::nullopt,
                     std::optional<StringRef> Source = std::nullopt),
                    (Filename, Directory, CS, Source))
  DEFINE_MDNODE_GET(DIFile,
                    (MDString * Filename, MDString *Directory,
                     std::optional<ChecksumInfo<MDString *>> CS = std::nullopt,
                     MDString *Source = nullptr),
                    (Filename, Directory, CS, Source))

  TempDIFile clone() const { return cloneImpl(); }

  StringRef getFilename() const { return getStringOperand(0); }
  StringRef getDirectory() const { return getStringOperand(1); }
  std::optional<ChecksumInfo<StringRef>> getChecksum() const {
    std::optional<ChecksumInfo<StringRef>> StringRefChecksum;
    if (Checksum)
      StringRefChecksum.emplace(Checksum->Kind, Checksum->Value->getString());
    return StringRefChecksum;
  }
  std::optional<StringRef> getSource() const {
    return Source ? std::optional<StringRef>(Source->getString())
                  : std::nullopt;
  }

  MDString *getRawFilename() const { return getOperandAs<MDString>(0); }
  MDString *getRawDirectory() const { return getOperandAs<MDString>(1); }
  std::optional<ChecksumInfo<MDString *>> getRawChecksum() const {
    return Checksum;
  }
  MDString *getRawSource() const { return Source; }

  static StringRef getChecksumKindAsString(ChecksumKind CSKind);
  static std::optional<ChecksumKind> getChecksumKind(StringRef CSKindStr);

  static bool classof(const Metadata *MD) {
    return MD->getMetadataID() == DIFileKind;
  }
};

StringRef DIScope::getFilename() const {
  if (auto *F = getFile())
    return F->getFilename();
  return "";
}

StringRef DIScope::getDirectory() const {
  if (auto *F = getFile())
    return F->getDirectory();
  return "";
}

std::optional<StringRef> DIScope::getSource() const {
  if (auto *F = getFile())
    return F->getSource();
  return std::nullopt;
}

/// Base class for types.
///
/// TODO: Remove the hardcoded name and context, since many types don't use
/// them.
/// TODO: Split up flags.
class DIType : public DIScope {
  unsigned Line;
  DIFlags Flags;
  uint64_t SizeInBits;
  uint64_t OffsetInBits;
  uint32_t AlignInBits;

protected:
  DIType(LLVMContext &C, unsigned ID, StorageType Storage, unsigned Tag,
         unsigned Line, uint64_t SizeInBits, uint32_t AlignInBits,
         uint64_t OffsetInBits, DIFlags Flags, ArrayRef<Metadata *> Ops)
      : DIScope(C, ID, Storage, Tag, Ops) {
    init(Line, SizeInBits, AlignInBits, OffsetInBits, Flags);
  }
  ~DIType() = default;

  void init(unsigned Line, uint64_t SizeInBits, uint32_t AlignInBits,
            uint64_t OffsetInBits, DIFlags Flags) {
    this->Line = Line;
    this->Flags = Flags;
    this->SizeInBits = SizeInBits;
    this->AlignInBits = AlignInBits;
    this->OffsetInBits = OffsetInBits;
  }

  /// Change fields in place.
  void mutate(unsigned Tag, unsigned Line, uint64_t SizeInBits,
              uint32_t AlignInBits, uint64_t OffsetInBits, DIFlags Flags) {
    assert(isDistinct() && "Only distinct nodes can mutate");
    setTag(Tag);
    init(Line, SizeInBits, AlignInBits, OffsetInBits, Flags);
  }

public:
  TempDIType clone() const {
    return TempDIType(cast<DIType>(MDNode::clone().release()));
  }

  unsigned getLine() const { return Line; }
  uint64_t getSizeInBits() const { return SizeInBits; }
  uint32_t getAlignInBits() const { return AlignInBits; }
  uint32_t getAlignInBytes() const { return getAlignInBits() / CHAR_BIT; }
  uint64_t getOffsetInBits() const { return OffsetInBits; }
  DIFlags getFlags() const { return Flags; }

  DIScope *getScope() const { return cast_or_null<DIScope>(getRawScope()); }
  StringRef getName() const { return getStringOperand(2); }

  Metadata *getRawScope() const { return getOperand(1); }
  MDString *getRawName() const { return getOperandAs<MDString>(2); }

  /// Returns a new temporary DIType with updated Flags
  TempDIType cloneWithFlags(DIFlags NewFlags) const {
    auto NewTy = clone();
    NewTy->Flags = NewFlags;
    return NewTy;
  }

  bool isPrivate() const {
    return (getFlags() & FlagAccessibility) == FlagPrivate;
  }
  bool isProtected() const {
    return (getFlags() & FlagAccessibility) == FlagProtected;
  }
  bool isPublic() const {
    return (getFlags() & FlagAccessibility) == FlagPublic;
  }
  bool isForwardDecl() const { return getFlags() & FlagFwdDecl; }
  bool isAppleBlockExtension() const { return getFlags() & FlagAppleBlock; }
  bool isVirtual() const { return getFlags() & FlagVirtual; }
  bool isArtificial() const { return getFlags() & FlagArtificial; }
  bool isObjectPointer() const { return getFlags() & FlagObjectPointer; }
  bool isObjcClassComplete() const {
    return getFlags() & FlagObjcClassComplete;
  }
  bool isVector() const { return getFlags() & FlagVector; }
  bool isBitField() const { return getFlags() & FlagBitField; }
  bool isStaticMember() const { return getFlags() & FlagStaticMember; }
  bool isLValueReference() const { return getFlags() & FlagLValueReference; }
  bool isRValueReference() const { return getFlags() & FlagRValueReference; }
  bool isTypePassByValue() const { return getFlags() & FlagTypePassByValue; }
  bool isTypePassByReference() const {
    return getFlags() & FlagTypePassByReference;
  }
  bool isBigEndian() const { return getFlags() & FlagBigEndian; }
  bool isLittleEndian() const { return getFlags() & FlagLittleEndian; }
  bool getExportSymbols() const { return getFlags() & FlagExportSymbols; }

  static bool classof(const Metadata *MD) {
    switch (MD->getMetadataID()) {
    default:
      return false;
    case DIBasicTypeKind:
    case DIStringTypeKind:
    case DIDerivedTypeKind:
    case DICompositeTypeKind:
    case DISubroutineTypeKind:
      return true;
    }
  }
};

/// Basic type, like 'int' or 'float'.
///
/// TODO: Split out DW_TAG_unspecified_type.
/// TODO: Drop unused accessors.
class DIBasicType : public DIType {
  friend class LLVMContextImpl;
  friend class MDNode;

  unsigned Encoding;

  DIBasicType(LLVMContext &C, StorageType Storage, unsigned Tag,
              uint64_t SizeInBits, uint32_t AlignInBits, unsigned Encoding,
              DIFlags Flags, ArrayRef<Metadata *> Ops)
      : DIType(C, DIBasicTypeKind, Storage, Tag, 0, SizeInBits, AlignInBits, 0,
               Flags, Ops),
        Encoding(Encoding) {}
  ~DIBasicType() = default;

  static DIBasicType *getImpl(LLVMContext &Context, unsigned Tag,
                              StringRef Name, uint64_t SizeInBits,
                              uint32_t AlignInBits, unsigned Encoding,
                              DIFlags Flags, StorageType Storage,
                              bool ShouldCreate = true) {
    return getImpl(Context, Tag, getCanonicalMDString(Context, Name),
                   SizeInBits, AlignInBits, Encoding, Flags, Storage,
                   ShouldCreate);
  }
  static DIBasicType *getImpl(LLVMContext &Context, unsigned Tag,
                              MDString *Name, uint64_t SizeInBits,
                              uint32_t AlignInBits, unsigned Encoding,
                              DIFlags Flags, StorageType Storage,
                              bool ShouldCreate = true);

  TempDIBasicType cloneImpl() const {
    return getTemporary(getContext(), getTag(), getName(), getSizeInBits(),
                        getAlignInBits(), getEncoding(), getFlags());
  }

public:
  DEFINE_MDNODE_GET(DIBasicType, (unsigned Tag, StringRef Name),
                    (Tag, Name, 0, 0, 0, FlagZero))
  DEFINE_MDNODE_GET(DIBasicType,
                    (unsigned Tag, StringRef Name, uint64_t SizeInBits),
                    (Tag, Name, SizeInBits, 0, 0, FlagZero))
  DEFINE_MDNODE_GET(DIBasicType,
                    (unsigned Tag, MDString *Name, uint64_t SizeInBits),
                    (Tag, Name, SizeInBits, 0, 0, FlagZero))
  DEFINE_MDNODE_GET(DIBasicType,
                    (unsigned Tag, StringRef Name, uint64_t SizeInBits,
                     uint32_t AlignInBits, unsigned Encoding, DIFlags Flags),
                    (Tag, Name, SizeInBits, AlignInBits, Encoding, Flags))
  DEFINE_MDNODE_GET(DIBasicType,
                    (unsigned Tag, MDString *Name, uint64_t SizeInBits,
                     uint32_t AlignInBits, unsigned Encoding, DIFlags Flags),
                    (Tag, Name, SizeInBits, AlignInBits, Encoding, Flags))

  TempDIBasicType clone() const { return cloneImpl(); }

  unsigned getEncoding() const { return Encoding; }

  enum class Signedness { Signed, Unsigned };

  /// Return the signedness of this type, or std::nullopt if this type is
  /// neither signed nor unsigned.
  std::optional<Signedness> getSignedness() const;

  static bool classof(const Metadata *MD) {
    return MD->getMetadataID() == DIBasicTypeKind;
  }
};

/// String type, Fortran CHARACTER(n)
class DIStringType : public DIType {
  friend class LLVMContextImpl;
  friend class MDNode;

  unsigned Encoding;

  DIStringType(LLVMContext &C, StorageType Storage, unsigned Tag,
               uint64_t SizeInBits, uint32_t AlignInBits, unsigned Encoding,
               ArrayRef<Metadata *> Ops)
      : DIType(C, DIStringTypeKind, Storage, Tag, 0, SizeInBits, AlignInBits, 0,
               FlagZero, Ops),
        Encoding(Encoding) {}
  ~DIStringType() = default;

  static DIStringType *getImpl(LLVMContext &Context, unsigned Tag,
                               StringRef Name, Metadata *StringLength,
                               Metadata *StrLenExp, Metadata *StrLocationExp,
                               uint64_t SizeInBits, uint32_t AlignInBits,
                               unsigned Encoding, StorageType Storage,
                               bool ShouldCreate = true) {
    return getImpl(Context, Tag, getCanonicalMDString(Context, Name),
                   StringLength, StrLenExp, StrLocationExp, SizeInBits,
                   AlignInBits, Encoding, Storage, ShouldCreate);
  }
  static DIStringType *getImpl(LLVMContext &Context, unsigned Tag,
                               MDString *Name, Metadata *StringLength,
                               Metadata *StrLenExp, Metadata *StrLocationExp,
                               uint64_t SizeInBits, uint32_t AlignInBits,
                               unsigned Encoding, StorageType Storage,
                               bool ShouldCreate = true);

  TempDIStringType cloneImpl() const {
    return getTemporary(getContext(), getTag(), getRawName(),
                        getRawStringLength(), getRawStringLengthExp(),
                        getRawStringLocationExp(), getSizeInBits(),
                        getAlignInBits(), getEncoding());
  }

public:
  DEFINE_MDNODE_GET(DIStringType,
                    (unsigned Tag, StringRef Name, uint64_t SizeInBits,
                     uint32_t AlignInBits),
                    (Tag, Name, nullptr, nullptr, nullptr, SizeInBits,
                     AlignInBits, 0))
  DEFINE_MDNODE_GET(DIStringType,
                    (unsigned Tag, MDString *Name, Metadata *StringLength,
                     Metadata *StringLengthExp, Metadata *StringLocationExp,
                     uint64_t SizeInBits, uint32_t AlignInBits,
                     unsigned Encoding),
                    (Tag, Name, StringLength, StringLengthExp,
                     StringLocationExp, SizeInBits, AlignInBits, Encoding))
  DEFINE_MDNODE_GET(DIStringType,
                    (unsigned Tag, StringRef Name, Metadata *StringLength,
                     Metadata *StringLengthExp, Metadata *StringLocationExp,
                     uint64_t SizeInBits, uint32_t AlignInBits,
                     unsigned Encoding),
                    (Tag, Name, StringLength, StringLengthExp,
                     StringLocationExp, SizeInBits, AlignInBits, Encoding))

  TempDIStringType clone() const { return cloneImpl(); }

  static bool classof(const Metadata *MD) {
    return MD->getMetadataID() == DIStringTypeKind;
  }

  DIVariable *getStringLength() const {
    return cast_or_null<DIVariable>(getRawStringLength());
  }

  DIExpression *getStringLengthExp() const {
    return cast_or_null<DIExpression>(getRawStringLengthExp());
  }

  DIExpression *getStringLocationExp() const {
    return cast_or_null<DIExpression>(getRawStringLocationExp());
  }

  unsigned getEncoding() const { return Encoding; }

  Metadata *getRawStringLength() const { return getOperand(3); }

  Metadata *getRawStringLengthExp() const { return getOperand(4); }

  Metadata *getRawStringLocationExp() const { return getOperand(5); }
};

/// Derived types.
///
/// This includes qualified types, pointers, references, friends, typedefs, and
/// class members.
///
/// TODO: Split out members (inheritance, fields, methods, etc.).
class DIDerivedType : public DIType {
  friend class LLVMContextImpl;
  friend class MDNode;

  /// The DWARF address space of the memory pointed to or referenced by a
  /// pointer or reference type respectively.
  std::optional<unsigned> DWARFAddressSpace;

  DIDerivedType(LLVMContext &C, StorageType Storage, unsigned Tag,
                unsigned Line, uint64_t SizeInBits, uint32_t AlignInBits,
                uint64_t OffsetInBits,
                std::optional<unsigned> DWARFAddressSpace, DIFlags Flags,
                ArrayRef<Metadata *> Ops)
      : DIType(C, DIDerivedTypeKind, Storage, Tag, Line, SizeInBits,
               AlignInBits, OffsetInBits, Flags, Ops),
        DWARFAddressSpace(DWARFAddressSpace) {}
  ~DIDerivedType() = default;
  static DIDerivedType *
  getImpl(LLVMContext &Context, unsigned Tag, StringRef Name, DIFile *File,
          unsigned Line, DIScope *Scope, DIType *BaseType, uint64_t SizeInBits,
          uint32_t AlignInBits, uint64_t OffsetInBits,
          std::optional<unsigned> DWARFAddressSpace, DIFlags Flags,
          Metadata *ExtraData, DINodeArray Annotations, StorageType Storage,
          bool ShouldCreate = true) {
    return getImpl(Context, Tag, getCanonicalMDString(Context, Name), File,
                   Line, Scope, BaseType, SizeInBits, AlignInBits, OffsetInBits,
                   DWARFAddressSpace, Flags, ExtraData, Annotations.get(),
                   Storage, ShouldCreate);
  }
  static DIDerivedType *
  getImpl(LLVMContext &Context, unsigned Tag, MDString *Name, Metadata *File,
          unsigned Line, Metadata *Scope, Metadata *BaseType,
          uint64_t SizeInBits, uint32_t AlignInBits, uint64_t OffsetInBits,
          std::optional<unsigned> DWARFAddressSpace, DIFlags Flags,
          Metadata *ExtraData, Metadata *Annotations, StorageType Storage,
          bool ShouldCreate = true);

  TempDIDerivedType cloneImpl() const {
    return getTemporary(
        getContext(), getTag(), getName(), getFile(), getLine(), getScope(),
        getBaseType(), getSizeInBits(), getAlignInBits(), getOffsetInBits(),
        getDWARFAddressSpace(), getFlags(), getExtraData(), getAnnotations());
  }

public:
  DEFINE_MDNODE_GET(
      DIDerivedType,
      (unsigned Tag, MDString *Name, Metadata *File, unsigned Line,
       Metadata *Scope, Metadata *BaseType, uint64_t SizeInBits,
       uint32_t AlignInBits, uint64_t OffsetInBits,
       std::optional<unsigned> DWARFAddressSpace, DIFlags Flags,
       Metadata *ExtraData = nullptr, Metadata *Annotations = nullptr),
      (Tag, Name, File, Line, Scope, BaseType, SizeInBits, AlignInBits,
       OffsetInBits, DWARFAddressSpace, Flags, ExtraData, Annotations))
  DEFINE_MDNODE_GET(DIDerivedType,
                    (unsigned Tag, StringRef Name, DIFile *File, unsigned Line,
                     DIScope *Scope, DIType *BaseType, uint64_t SizeInBits,
                     uint32_t AlignInBits, uint64_t OffsetInBits,
                     std::optional<unsigned> DWARFAddressSpace, DIFlags Flags,
                     Metadata *ExtraData = nullptr,
                     DINodeArray Annotations = nullptr),
                    (Tag, Name, File, Line, Scope, BaseType, SizeInBits,
                     AlignInBits, OffsetInBits, DWARFAddressSpace, Flags,
                     ExtraData, Annotations))

  TempDIDerivedType clone() const { return cloneImpl(); }

  /// Get the base type this is derived from.
  DIType *getBaseType() const { return cast_or_null<DIType>(getRawBaseType()); }
  Metadata *getRawBaseType() const { return getOperand(3); }

  /// \returns The DWARF address space of the memory pointed to or referenced by
  /// a pointer or reference type respectively.
  std::optional<unsigned> getDWARFAddressSpace() const {
    return DWARFAddressSpace;
  }

  /// Get extra data associated with this derived type.
  ///
  /// Class type for pointer-to-members, objective-c property node for ivars,
  /// global constant wrapper for static members, or virtual base pointer offset
  /// for inheritance.
  ///
  /// TODO: Separate out types that need this extra operand: pointer-to-member
  /// types and member fields (static members and ivars).
  Metadata *getExtraData() const { return getRawExtraData(); }
  Metadata *getRawExtraData() const { return getOperand(4); }

  /// Get annotations associated with this derived type.
  DINodeArray getAnnotations() const {
    return cast_or_null<MDTuple>(getRawAnnotations());
  }
  Metadata *getRawAnnotations() const { return getOperand(5); }

  /// Get casted version of extra data.
  /// @{
  DIType *getClassType() const;

  DIObjCProperty *getObjCProperty() const {
    return dyn_cast_or_null<DIObjCProperty>(getExtraData());
  }

  uint32_t getVBPtrOffset() const;

  Constant *getStorageOffsetInBits() const;

  Constant *getConstant() const;

  Constant *getDiscriminantValue() const;
  /// @}

  static bool classof(const Metadata *MD) {
    return MD->getMetadataID() == DIDerivedTypeKind;
  }
};

/// Composite types.
///
/// TODO: Detach from DerivedTypeBase (split out MDEnumType?).
/// TODO: Create a custom, unrelated node for DW_TAG_array_type.
class DICompositeType : public DIType {
  friend class LLVMContextImpl;
  friend class MDNode;

  unsigned RuntimeLang;

  DICompositeType(LLVMContext &C, StorageType Storage, unsigned Tag,
                  unsigned Line, unsigned RuntimeLang, uint64_t SizeInBits,
                  uint32_t AlignInBits, uint64_t OffsetInBits, DIFlags Flags,
                  ArrayRef<Metadata *> Ops)
      : DIType(C, DICompositeTypeKind, Storage, Tag, Line, SizeInBits,
               AlignInBits, OffsetInBits, Flags, Ops),
        RuntimeLang(RuntimeLang) {}
  ~DICompositeType() = default;

  /// Change fields in place.
  void mutate(unsigned Tag, unsigned Line, unsigned RuntimeLang,
              uint64_t SizeInBits, uint32_t AlignInBits, uint64_t OffsetInBits,
              DIFlags Flags) {
    assert(isDistinct() && "Only distinct nodes can mutate");
    assert(getRawIdentifier() && "Only ODR-uniqued nodes should mutate");
    this->RuntimeLang = RuntimeLang;
    DIType::mutate(Tag, Line, SizeInBits, AlignInBits, OffsetInBits, Flags);
  }

  static DICompositeType *
  getImpl(LLVMContext &Context, unsigned Tag, StringRef Name, Metadata *File,
          unsigned Line, DIScope *Scope, DIType *BaseType, uint64_t SizeInBits,
          uint32_t AlignInBits, uint64_t OffsetInBits, DIFlags Flags,
          DINodeArray Elements, unsigned RuntimeLang, DIType *VTableHolder,
          DITemplateParameterArray TemplateParams, StringRef Identifier,
          DIDerivedType *Discriminator, Metadata *DataLocation,
          Metadata *Associated, Metadata *Allocated, Metadata *Rank,
          DINodeArray Annotations, StorageType Storage,
          bool ShouldCreate = true) {
    return getImpl(
        Context, Tag, getCanonicalMDString(Context, Name), File, Line, Scope,
        BaseType, SizeInBits, AlignInBits, OffsetInBits, Flags, Elements.get(),
        RuntimeLang, VTableHolder, TemplateParams.get(),
        getCanonicalMDString(Context, Identifier), Discriminator, DataLocation,
        Associated, Allocated, Rank, Annotations.get(), Storage, ShouldCreate);
  }
  static DICompositeType *
  getImpl(LLVMContext &Context, unsigned Tag, MDString *Name, Metadata *File,
          unsigned Line, Metadata *Scope, Metadata *BaseType,
          uint64_t SizeInBits, uint32_t AlignInBits, uint64_t OffsetInBits,
          DIFlags Flags, Metadata *Elements, unsigned RuntimeLang,
          Metadata *VTableHolder, Metadata *TemplateParams,
          MDString *Identifier, Metadata *Discriminator, Metadata *DataLocation,
          Metadata *Associated, Metadata *Allocated, Metadata *Rank,
          Metadata *Annotations, StorageType Storage, bool ShouldCreate = true);

  TempDICompositeType cloneImpl() const {
    return getTemporary(
        getContext(), getTag(), getName(), getFile(), getLine(), getScope(),
        getBaseType(), getSizeInBits(), getAlignInBits(), getOffsetInBits(),
        getFlags(), getElements(), getRuntimeLang(), getVTableHolder(),
        getTemplateParams(), getIdentifier(), getDiscriminator(),
        getRawDataLocation(), getRawAssociated(), getRawAllocated(),
        getRawRank(), getAnnotations());
  }

public:
  DEFINE_MDNODE_GET(
      DICompositeType,
      (unsigned Tag, StringRef Name, DIFile *File, unsigned Line,
       DIScope *Scope, DIType *BaseType, uint64_t SizeInBits,
       uint32_t AlignInBits, uint64_t OffsetInBits, DIFlags Flags,
       DINodeArray Elements, unsigned RuntimeLang, DIType *VTableHolder,
       DITemplateParameterArray TemplateParams = nullptr,
       StringRef Identifier = "", DIDerivedType *Discriminator = nullptr,
       Metadata *DataLocation = nullptr, Metadata *Associated = nullptr,
       Metadata *Allocated = nullptr, Metadata *Rank = nullptr,
       DINodeArray Annotations = nullptr),
      (Tag, Name, File, Line, Scope, BaseType, SizeInBits, AlignInBits,
       OffsetInBits, Flags, Elements, RuntimeLang, VTableHolder, TemplateParams,
       Identifier, Discriminator, DataLocation, Associated, Allocated, Rank,
       Annotations))
  DEFINE_MDNODE_GET(
      DICompositeType,
      (unsigned Tag, MDString *Name, Metadata *File, unsigned Line,
       Metadata *Scope, Metadata *BaseType, uint64_t SizeInBits,
       uint32_t AlignInBits, uint64_t OffsetInBits, DIFlags Flags,
       Metadata *Elements, unsigned RuntimeLang, Metadata *VTableHolder,
       Metadata *TemplateParams = nullptr, MDString *Identifier = nullptr,
       Metadata *Discriminator = nullptr, Metadata *DataLocation = nullptr,
       Metadata *Associated = nullptr, Metadata *Allocated = nullptr,
       Metadata *Rank = nullptr, Metadata *Annotations = nullptr),
      (Tag, Name, File, Line, Scope, BaseType, SizeInBits, AlignInBits,
       OffsetInBits, Flags, Elements, RuntimeLang, VTableHolder, TemplateParams,
       Identifier, Discriminator, DataLocation, Associated, Allocated, Rank,
       Annotations))

  TempDICompositeType clone() const { return cloneImpl(); }

  /// Get a DICompositeType with the given ODR identifier.
  ///
  /// If \a LLVMContext::isODRUniquingDebugTypes(), gets the mapped
  /// DICompositeType for the given ODR \c Identifier.  If none exists, creates
  /// a new node.
  ///
  /// Else, returns \c nullptr.
  static DICompositeType *
  getODRType(LLVMContext &Context, MDString &Identifier, unsigned Tag,
             MDString *Name, Metadata *File, unsigned Line, Metadata *Scope,
             Metadata *BaseType, uint64_t SizeInBits, uint32_t AlignInBits,
             uint64_t OffsetInBits, DIFlags Flags, Metadata *Elements,
             unsigned RuntimeLang, Metadata *VTableHolder,
             Metadata *TemplateParams, Metadata *Discriminator,
             Metadata *DataLocation, Metadata *Associated, Metadata *Allocated,
             Metadata *Rank, Metadata *Annotations);
  static DICompositeType *getODRTypeIfExists(LLVMContext &Context,
                                             MDString &Identifier);

  /// Build a DICompositeType with the given ODR identifier.
  ///
  /// Looks up the mapped DICompositeType for the given ODR \c Identifier.  If
  /// it doesn't exist, creates a new one.  If it does exist and \a
  /// isForwardDecl(), and the new arguments would be a definition, mutates the
  /// the type in place.  In either case, returns the type.
  ///
  /// If not \a LLVMContext::isODRUniquingDebugTypes(), this function returns
  /// nullptr.
  static DICompositeType *
  buildODRType(LLVMContext &Context, MDString &Identifier, unsigned Tag,
               MDString *Name, Metadata *File, unsigned Line, Metadata *Scope,
               Metadata *BaseType, uint64_t SizeInBits, uint32_t AlignInBits,
               uint64_t OffsetInBits, DIFlags Flags, Metadata *Elements,
               unsigned RuntimeLang, Metadata *VTableHolder,
               Metadata *TemplateParams, Metadata *Discriminator,
               Metadata *DataLocation, Metadata *Associated,
               Metadata *Allocated, Metadata *Rank, Metadata *Annotations);

  DIType *getBaseType() const { return cast_or_null<DIType>(getRawBaseType()); }
  DINodeArray getElements() const {
    return cast_or_null<MDTuple>(getRawElements());
  }
  DIType *getVTableHolder() const {
    return cast_or_null<DIType>(getRawVTableHolder());
  }
  DITemplateParameterArray getTemplateParams() const {
    return cast_or_null<MDTuple>(getRawTemplateParams());
  }
  StringRef getIdentifier() const { return getStringOperand(7); }
  unsigned getRuntimeLang() const { return RuntimeLang; }

  Metadata *getRawBaseType() const { return getOperand(3); }
  Metadata *getRawElements() const { return getOperand(4); }
  Metadata *getRawVTableHolder() const { return getOperand(5); }
  Metadata *getRawTemplateParams() const { return getOperand(6); }
  MDString *getRawIdentifier() const { return getOperandAs<MDString>(7); }
  Metadata *getRawDiscriminator() const { return getOperand(8); }
  DIDerivedType *getDiscriminator() const {
    return getOperandAs<DIDerivedType>(8);
  }
  Metadata *getRawDataLocation() const { return getOperand(9); }
  DIVariable *getDataLocation() const {
    return dyn_cast_or_null<DIVariable>(getRawDataLocation());
  }
  DIExpression *getDataLocationExp() const {
    return dyn_cast_or_null<DIExpression>(getRawDataLocation());
  }
  Metadata *getRawAssociated() const { return getOperand(10); }
  DIVariable *getAssociated() const {
    return dyn_cast_or_null<DIVariable>(getRawAssociated());
  }
  DIExpression *getAssociatedExp() const {
    return dyn_cast_or_null<DIExpression>(getRawAssociated());
  }
  Metadata *getRawAllocated() const { return getOperand(11); }
  DIVariable *getAllocated() const {
    return dyn_cast_or_null<DIVariable>(getRawAllocated());
  }
  DIExpression *getAllocatedExp() const {
    return dyn_cast_or_null<DIExpression>(getRawAllocated());
  }
  Metadata *getRawRank() const { return getOperand(12); }
  ConstantInt *getRankConst() const {
    if (auto *MD = dyn_cast_or_null<ConstantAsMetadata>(getRawRank()))
      return dyn_cast_or_null<ConstantInt>(MD->getValue());
    return nullptr;
  }
  DIExpression *getRankExp() const {
    return dyn_cast_or_null<DIExpression>(getRawRank());
  }

  Metadata *getRawAnnotations() const { return getOperand(13); }
  DINodeArray getAnnotations() const {
    return cast_or_null<MDTuple>(getRawAnnotations());
  }

  /// Replace operands.
  ///
  /// If this \a isUniqued() and not \a isResolved(), on a uniquing collision
  /// this will be RAUW'ed and deleted.  Use a \a TrackingMDRef to keep track
  /// of its movement if necessary.
  /// @{
  void replaceElements(DINodeArray Elements) {
#ifndef NDEBUG
    for (DINode *Op : getElements())
      assert(is_contained(Elements->operands(), Op) &&
             "Lost a member during member list replacement");
#endif
    replaceOperandWith(4, Elements.get());
  }

  void replaceVTableHolder(DIType *VTableHolder) {
    replaceOperandWith(5, VTableHolder);
  }

  void replaceTemplateParams(DITemplateParameterArray TemplateParams) {
    replaceOperandWith(6, TemplateParams.get());
  }
  /// @}

  static bool classof(const Metadata *MD) {
    return MD->getMetadataID() == DICompositeTypeKind;
  }
};

/// Type array for a subprogram.
///
/// TODO: Fold the array of types in directly as operands.
class DISubroutineType : public DIType {
  friend class LLVMContextImpl;
  friend class MDNode;

  /// The calling convention used with DW_AT_calling_convention. Actually of
  /// type dwarf::CallingConvention.
  uint8_t CC;

  DISubroutineType(LLVMContext &C, StorageType Storage, DIFlags Flags,
                   uint8_t CC, ArrayRef<Metadata *> Ops);
  ~DISubroutineType() = default;

  static DISubroutineType *getImpl(LLVMContext &Context, DIFlags Flags,
                                   uint8_t CC, DITypeRefArray TypeArray,
                                   StorageType Storage,
                                   bool ShouldCreate = true) {
    return getImpl(Context, Flags, CC, TypeArray.get(), Storage, ShouldCreate);
  }
  static DISubroutineType *getImpl(LLVMContext &Context, DIFlags Flags,
                                   uint8_t CC, Metadata *TypeArray,
                                   StorageType Storage,
                                   bool ShouldCreate = true);

  TempDISubroutineType cloneImpl() const {
    return getTemporary(getContext(), getFlags(), getCC(), getTypeArray());
  }

public:
  DEFINE_MDNODE_GET(DISubroutineType,
                    (DIFlags Flags, uint8_t CC, DITypeRefArray TypeArray),
                    (Flags, CC, TypeArray))
  DEFINE_MDNODE_GET(DISubroutineType,
                    (DIFlags Flags, uint8_t CC, Metadata *TypeArray),
                    (Flags, CC, TypeArray))

  TempDISubroutineType clone() const { return cloneImpl(); }
  // Returns a new temporary DISubroutineType with updated CC
  TempDISubroutineType cloneWithCC(uint8_t CC) const {
    auto NewTy = clone();
    NewTy->CC = CC;
    return NewTy;
  }

  uint8_t getCC() const { return CC; }

  DITypeRefArray getTypeArray() const {
    return cast_or_null<MDTuple>(getRawTypeArray());
  }

  Metadata *getRawTypeArray() const { return getOperand(3); }

  static bool classof(const Metadata *MD) {
    return MD->getMetadataID() == DISubroutineTypeKind;
  }
};

/// Compile unit.
class DICompileUnit : public DIScope {
  friend class LLVMContextImpl;
  friend class MDNode;

public:
  enum DebugEmissionKind : unsigned {
    NoDebug = 0,
    FullDebug,
    LineTablesOnly,
    DebugDirectivesOnly,
    LastEmissionKind = DebugDirectivesOnly
  };

  enum class DebugNameTableKind : unsigned {
    Default = 0,
    GNU = 1,
    None = 2,
    Apple = 3,
    LastDebugNameTableKind = Apple
  };

  static std::optional<DebugEmissionKind> getEmissionKind(StringRef Str);
  static const char *emissionKindString(DebugEmissionKind EK);
  static std::optional<DebugNameTableKind> getNameTableKind(StringRef Str);
  static const char *nameTableKindString(DebugNameTableKind PK);

private:
  unsigned SourceLanguage;
  bool IsOptimized;
  unsigned RuntimeVersion;
  unsigned EmissionKind;
  uint64_t DWOId;
  bool SplitDebugInlining;
  bool DebugInfoForProfiling;
  unsigned NameTableKind;
  bool RangesBaseAddress;

  DICompileUnit(LLVMContext &C, StorageType Storage, unsigned SourceLanguage,
                bool IsOptimized, unsigned RuntimeVersion,
                unsigned EmissionKind, uint64_t DWOId, bool SplitDebugInlining,
                bool DebugInfoForProfiling, unsigned NameTableKind,
                bool RangesBaseAddress, ArrayRef<Metadata *> Ops);
  ~DICompileUnit() = default;

  static DICompileUnit *
  getImpl(LLVMContext &Context, unsigned SourceLanguage, DIFile *File,
          StringRef Producer, bool IsOptimized, StringRef Flags,
          unsigned RuntimeVersion, StringRef SplitDebugFilename,
          unsigned EmissionKind, DICompositeTypeArray EnumTypes,
          DIScopeArray RetainedTypes,
          DIGlobalVariableExpressionArray GlobalVariables,
          DIImportedEntityArray ImportedEntities, DIMacroNodeArray Macros,
          uint64_t DWOId, bool SplitDebugInlining, bool DebugInfoForProfiling,
          unsigned NameTableKind, bool RangesBaseAddress, StringRef SysRoot,
          StringRef SDK, StorageType Storage, bool ShouldCreate = true) {
    return getImpl(
        Context, SourceLanguage, File, getCanonicalMDString(Context, Producer),
        IsOptimized, getCanonicalMDString(Context, Flags), RuntimeVersion,
        getCanonicalMDString(Context, SplitDebugFilename), EmissionKind,
        EnumTypes.get(), RetainedTypes.get(), GlobalVariables.get(),
        ImportedEntities.get(), Macros.get(), DWOId, SplitDebugInlining,
        DebugInfoForProfiling, NameTableKind, RangesBaseAddress,
        getCanonicalMDString(Context, SysRoot),
        getCanonicalMDString(Context, SDK), Storage, ShouldCreate);
  }
  static DICompileUnit *
  getImpl(LLVMContext &Context, unsigned SourceLanguage, Metadata *File,
          MDString *Producer, bool IsOptimized, MDString *Flags,
          unsigned RuntimeVersion, MDString *SplitDebugFilename,
          unsigned EmissionKind, Metadata *EnumTypes, Metadata *RetainedTypes,
          Metadata *GlobalVariables, Metadata *ImportedEntities,
          Metadata *Macros, uint64_t DWOId, bool SplitDebugInlining,
          bool DebugInfoForProfiling, unsigned NameTableKind,
          bool RangesBaseAddress, MDString *SysRoot, MDString *SDK,
          StorageType Storage, bool ShouldCreate = true);

  TempDICompileUnit cloneImpl() const {
    return getTemporary(
        getContext(), getSourceLanguage(), getFile(), getProducer(),
        isOptimized(), getFlags(), getRuntimeVersion(), getSplitDebugFilename(),
        getEmissionKind(), getEnumTypes(), getRetainedTypes(),
        getGlobalVariables(), getImportedEntities(), getMacros(), DWOId,
        getSplitDebugInlining(), getDebugInfoForProfiling(), getNameTableKind(),
        getRangesBaseAddress(), getSysRoot(), getSDK());
  }

public:
  static void get() = delete;
  static void getIfExists() = delete;

  DEFINE_MDNODE_GET_DISTINCT_TEMPORARY(
      DICompileUnit,
      (unsigned SourceLanguage, DIFile *File, StringRef Producer,
       bool IsOptimized, StringRef Flags, unsigned RuntimeVersion,
       StringRef SplitDebugFilename, DebugEmissionKind EmissionKind,
       DICompositeTypeArray EnumTypes, DIScopeArray RetainedTypes,
       DIGlobalVariableExpressionArray GlobalVariables,
       DIImportedEntityArray ImportedEntities, DIMacroNodeArray Macros,
       uint64_t DWOId, bool SplitDebugInlining, bool DebugInfoForProfiling,
       DebugNameTableKind NameTableKind, bool RangesBaseAddress,
       StringRef SysRoot, StringRef SDK),
      (SourceLanguage, File, Producer, IsOptimized, Flags, RuntimeVersion,
       SplitDebugFilename, EmissionKind, EnumTypes, RetainedTypes,
       GlobalVariables, ImportedEntities, Macros, DWOId, SplitDebugInlining,
       DebugInfoForProfiling, (unsigned)NameTableKind, RangesBaseAddress,
       SysRoot, SDK))
  DEFINE_MDNODE_GET_DISTINCT_TEMPORARY(
      DICompileUnit,
      (unsigned SourceLanguage, Metadata *File, MDString *Producer,
       bool IsOptimized, MDString *Flags, unsigned RuntimeVersion,
       MDString *SplitDebugFilename, unsigned EmissionKind, Metadata *EnumTypes,
       Metadata *RetainedTypes, Metadata *GlobalVariables,
       Metadata *ImportedEntities, Metadata *Macros, uint64_t DWOId,
       bool SplitDebugInlining, bool DebugInfoForProfiling,
       unsigned NameTableKind, bool RangesBaseAddress, MDString *SysRoot,
       MDString *SDK),
      (SourceLanguage, File, Producer, IsOptimized, Flags, RuntimeVersion,
       SplitDebugFilename, EmissionKind, EnumTypes, RetainedTypes,
       GlobalVariables, ImportedEntities, Macros, DWOId, SplitDebugInlining,
       DebugInfoForProfiling, NameTableKind, RangesBaseAddress, SysRoot, SDK))

  TempDICompileUnit clone() const { return cloneImpl(); }

  unsigned getSourceLanguage() const { return SourceLanguage; }
  bool isOptimized() const { return IsOptimized; }
  unsigned getRuntimeVersion() const { return RuntimeVersion; }
  DebugEmissionKind getEmissionKind() const {
    return (DebugEmissionKind)EmissionKind;
  }
  bool isDebugDirectivesOnly() const {
    return EmissionKind == DebugDirectivesOnly;
  }
  bool getDebugInfoForProfiling() const { return DebugInfoForProfiling; }
  DebugNameTableKind getNameTableKind() const {
    return (DebugNameTableKind)NameTableKind;
  }
  bool getRangesBaseAddress() const { return RangesBaseAddress; }
  StringRef getProducer() const { return getStringOperand(1); }
  StringRef getFlags() const { return getStringOperand(2); }
  StringRef getSplitDebugFilename() const { return getStringOperand(3); }
  DICompositeTypeArray getEnumTypes() const {
    return cast_or_null<MDTuple>(getRawEnumTypes());
  }
  DIScopeArray getRetainedTypes() const {
    return cast_or_null<MDTuple>(getRawRetainedTypes());
  }
  DIGlobalVariableExpressionArray getGlobalVariables() const {
    return cast_or_null<MDTuple>(getRawGlobalVariables());
  }
  DIImportedEntityArray getImportedEntities() const {
    return cast_or_null<MDTuple>(getRawImportedEntities());
  }
  DIMacroNodeArray getMacros() const {
    return cast_or_null<MDTuple>(getRawMacros());
  }
  uint64_t getDWOId() const { return DWOId; }
  void setDWOId(uint64_t DwoId) { DWOId = DwoId; }
  bool getSplitDebugInlining() const { return SplitDebugInlining; }
  void setSplitDebugInlining(bool SplitDebugInlining) {
    this->SplitDebugInlining = SplitDebugInlining;
  }
  StringRef getSysRoot() const { return getStringOperand(9); }
  StringRef getSDK() const { return getStringOperand(10); }

  MDString *getRawProducer() const { return getOperandAs<MDString>(1); }
  MDString *getRawFlags() const { return getOperandAs<MDString>(2); }
  MDString *getRawSplitDebugFilename() const {
    return getOperandAs<MDString>(3);
  }
  Metadata *getRawEnumTypes() const { return getOperand(4); }
  Metadata *getRawRetainedTypes() const { return getOperand(5); }
  Metadata *getRawGlobalVariables() const { return getOperand(6); }
  Metadata *getRawImportedEntities() const { return getOperand(7); }
  Metadata *getRawMacros() const { return getOperand(8); }
  MDString *getRawSysRoot() const { return getOperandAs<MDString>(9); }
  MDString *getRawSDK() const { return getOperandAs<MDString>(10); }

  /// Replace arrays.
  ///
  /// If this \a isUniqued() and not \a isResolved(), it will be RAUW'ed and
  /// deleted on a uniquing collision.  In practice, uniquing collisions on \a
  /// DICompileUnit should be fairly rare.
  /// @{
  void replaceEnumTypes(DICompositeTypeArray N) {
    replaceOperandWith(4, N.get());
  }
  void replaceRetainedTypes(DITypeArray N) { replaceOperandWith(5, N.get()); }
  void replaceGlobalVariables(DIGlobalVariableExpressionArray N) {
    replaceOperandWith(6, N.get());
  }
  void replaceImportedEntities(DIImportedEntityArray N) {
    replaceOperandWith(7, N.get());
  }
  void replaceMacros(DIMacroNodeArray N) { replaceOperandWith(8, N.get()); }
  /// @}

  static bool classof(const Metadata *MD) {
    return MD->getMetadataID() == DICompileUnitKind;
  }
};

/// A scope for locals.
///
/// A legal scope for lexical blocks, local variables, and debug info
/// locations.  Subclasses are \a DISubprogram, \a DILexicalBlock, and \a
/// DILexicalBlockFile.
class DILocalScope : public DIScope {
protected:
  DILocalScope(LLVMContext &C, unsigned ID, StorageType Storage, unsigned Tag,
               ArrayRef<Metadata *> Ops)
      : DIScope(C, ID, Storage, Tag, Ops) {}
  ~DILocalScope() = default;

public:
  /// Get the subprogram for this scope.
  ///
  /// Return this if it's an \a DISubprogram; otherwise, look up the scope
  /// chain.
  DISubprogram *getSubprogram() const;

  /// Traverses the scope chain rooted at RootScope until it hits a Subprogram,
  /// recreating the chain with "NewSP" instead.
  static DILocalScope *
  cloneScopeForSubprogram(DILocalScope &RootScope, DISubprogram &NewSP,
                          LLVMContext &Ctx,
                          DenseMap<const MDNode *, MDNode *> &Cache);

  /// Get the first non DILexicalBlockFile scope of this scope.
  ///
  /// Return this if it's not a \a DILexicalBlockFIle; otherwise, look up the
  /// scope chain.
  DILocalScope *getNonLexicalBlockFileScope() const;

  static bool classof(const Metadata *MD) {
    return MD->getMetadataID() == DISubprogramKind ||
           MD->getMetadataID() == DILexicalBlockKind ||
           MD->getMetadataID() == DILexicalBlockFileKind;
  }
};

/// Subprogram description.
class DISubprogram : public DILocalScope {
  friend class LLVMContextImpl;
  friend class MDNode;

  unsigned Line;
  unsigned ScopeLine;
  unsigned VirtualIndex;

  /// In the MS ABI, the implicit 'this' parameter is adjusted in the prologue
  /// of method overrides from secondary bases by this amount. It may be
  /// negative.
  int ThisAdjustment;

public:
  /// Debug info subprogram flags.
  enum DISPFlags : uint32_t {
#define HANDLE_DISP_FLAG(ID, NAME) SPFlag##NAME = ID,
#define DISP_FLAG_LARGEST_NEEDED
#include "llvm/IR/DebugInfoFlags.def"
    SPFlagNonvirtual = SPFlagZero,
    SPFlagVirtuality = SPFlagVirtual | SPFlagPureVirtual,
    LLVM_MARK_AS_BITMASK_ENUM(SPFlagLargest)
  };

  static DISPFlags getFlag(StringRef Flag);
  static StringRef getFlagString(DISPFlags Flag);

  /// Split up a flags bitfield for easier printing.
  ///
  /// Split \c Flags into \c SplitFlags, a vector of its components.  Returns
  /// any remaining (unrecognized) bits.
  static DISPFlags splitFlags(DISPFlags Flags,
                              SmallVectorImpl<DISPFlags> &SplitFlags);

  // Helper for converting old bitfields to new flags word.
  static DISPFlags toSPFlags(bool IsLocalToUnit, bool IsDefinition,
                             bool IsOptimized,
                             unsigned Virtuality = SPFlagNonvirtual,
                             bool IsMainSubprogram = false);

private:
  DIFlags Flags;
  DISPFlags SPFlags;

  DISubprogram(LLVMContext &C, StorageType Storage, unsigned Line,
               unsigned ScopeLine, unsigned VirtualIndex, int ThisAdjustment,
               DIFlags Flags, DISPFlags SPFlags, ArrayRef<Metadata *> Ops);
  ~DISubprogram() = default;

  static DISubprogram *
  getImpl(LLVMContext &Context, DIScope *Scope, StringRef Name,
          StringRef LinkageName, DIFile *File, unsigned Line,
          DISubroutineType *Type, unsigned ScopeLine, DIType *ContainingType,
          unsigned VirtualIndex, int ThisAdjustment, DIFlags Flags,
          DISPFlags SPFlags, DICompileUnit *Unit,
          DITemplateParameterArray TemplateParams, DISubprogram *Declaration,
          DINodeArray RetainedNodes, DITypeArray ThrownTypes,
          DINodeArray Annotations, StringRef TargetFuncName,
          StorageType Storage, bool ShouldCreate = true) {
    return getImpl(Context, Scope, getCanonicalMDString(Context, Name),
                   getCanonicalMDString(Context, LinkageName), File, Line, Type,
                   ScopeLine, ContainingType, VirtualIndex, ThisAdjustment,
                   Flags, SPFlags, Unit, TemplateParams.get(), Declaration,
                   RetainedNodes.get(), ThrownTypes.get(), Annotations.get(),
                   getCanonicalMDString(Context, TargetFuncName),
                   Storage, ShouldCreate);
  }
  static DISubprogram *
  getImpl(LLVMContext &Context, Metadata *Scope, MDString *Name,
          MDString *LinkageName, Metadata *File, unsigned Line, Metadata *Type,
          unsigned ScopeLine, Metadata *ContainingType, unsigned VirtualIndex,
          int ThisAdjustment, DIFlags Flags, DISPFlags SPFlags, Metadata *Unit,
          Metadata *TemplateParams, Metadata *Declaration,
          Metadata *RetainedNodes, Metadata *ThrownTypes, Metadata *Annotations,
          MDString *TargetFuncName, StorageType Storage,
          bool ShouldCreate = true);

  TempDISubprogram cloneImpl() const {
    return getTemporary(getContext(), getScope(), getName(), getLinkageName(),
                        getFile(), getLine(), getType(), getScopeLine(),
                        getContainingType(), getVirtualIndex(),
                        getThisAdjustment(), getFlags(), getSPFlags(),
                        getUnit(), getTemplateParams(), getDeclaration(),
                        getRetainedNodes(), getThrownTypes(), getAnnotations(),
                        getTargetFuncName());
  }

public:
  DEFINE_MDNODE_GET(
      DISubprogram,
      (DIScope * Scope, StringRef Name, StringRef LinkageName, DIFile *File,
       unsigned Line, DISubroutineType *Type, unsigned ScopeLine,
       DIType *ContainingType, unsigned VirtualIndex, int ThisAdjustment,
       DIFlags Flags, DISPFlags SPFlags, DICompileUnit *Unit,
       DITemplateParameterArray TemplateParams = nullptr,
       DISubprogram *Declaration = nullptr, DINodeArray RetainedNodes = nullptr,
       DITypeArray ThrownTypes = nullptr, DINodeArray Annotations = nullptr,
       StringRef TargetFuncName = ""),
      (Scope, Name, LinkageName, File, Line, Type, ScopeLine, ContainingType,
       VirtualIndex, ThisAdjustment, Flags, SPFlags, Unit, TemplateParams,
       Declaration, RetainedNodes, ThrownTypes, Annotations, TargetFuncName))

  DEFINE_MDNODE_GET(
      DISubprogram,
      (Metadata * Scope, MDString *Name, MDString *LinkageName, Metadata *File,
       unsigned Line, Metadata *Type, unsigned ScopeLine,
       Metadata *ContainingType, unsigned VirtualIndex, int ThisAdjustment,
       DIFlags Flags, DISPFlags SPFlags, Metadata *Unit,
       Metadata *TemplateParams = nullptr, Metadata *Declaration = nullptr,
       Metadata *RetainedNodes = nullptr, Metadata *ThrownTypes = nullptr,
       Metadata *Annotations = nullptr, MDString *TargetFuncName = nullptr),
      (Scope, Name, LinkageName, File, Line, Type, ScopeLine, ContainingType,
       VirtualIndex, ThisAdjustment, Flags, SPFlags, Unit, TemplateParams,
       Declaration, RetainedNodes, ThrownTypes, Annotations, TargetFuncName))

  TempDISubprogram clone() const { return cloneImpl(); }

  /// Returns a new temporary DISubprogram with updated Flags
  TempDISubprogram cloneWithFlags(DIFlags NewFlags) const {
    auto NewSP = clone();
    NewSP->Flags = NewFlags;
    return NewSP;
  }

public:
  unsigned getLine() const { return Line; }
  unsigned getVirtuality() const { return getSPFlags() & SPFlagVirtuality; }
  unsigned getVirtualIndex() const { return VirtualIndex; }
  int getThisAdjustment() const { return ThisAdjustment; }
  unsigned getScopeLine() const { return ScopeLine; }
  void setScopeLine(unsigned L) {
    assert(isDistinct());
    ScopeLine = L;
  }
  DIFlags getFlags() const { return Flags; }
  DISPFlags getSPFlags() const { return SPFlags; }
  bool isLocalToUnit() const { return getSPFlags() & SPFlagLocalToUnit; }
  bool isDefinition() const { return getSPFlags() & SPFlagDefinition; }
  bool isOptimized() const { return getSPFlags() & SPFlagOptimized; }
  bool isMainSubprogram() const { return getSPFlags() & SPFlagMainSubprogram; }

  bool isArtificial() const { return getFlags() & FlagArtificial; }
  bool isPrivate() const {
    return (getFlags() & FlagAccessibility) == FlagPrivate;
  }
  bool isProtected() const {
    return (getFlags() & FlagAccessibility) == FlagProtected;
  }
  bool isPublic() const {
    return (getFlags() & FlagAccessibility) == FlagPublic;
  }
  bool isExplicit() const { return getFlags() & FlagExplicit; }
  bool isPrototyped() const { return getFlags() & FlagPrototyped; }
  bool areAllCallsDescribed() const {
    return getFlags() & FlagAllCallsDescribed;
  }
  bool isPure() const { return getSPFlags() & SPFlagPure; }
  bool isElemental() const { return getSPFlags() & SPFlagElemental; }
  bool isRecursive() const { return getSPFlags() & SPFlagRecursive; }
  bool isObjCDirect() const { return getSPFlags() & SPFlagObjCDirect; }

  /// Check if this is deleted member function.
  ///
  /// Return true if this subprogram is a C++11 special
  /// member function declared deleted.
  bool isDeleted() const { return getSPFlags() & SPFlagDeleted; }

  /// Check if this is reference-qualified.
  ///
  /// Return true if this subprogram is a C++11 reference-qualified non-static
  /// member function (void foo() &).
  bool isLValueReference() const { return getFlags() & FlagLValueReference; }

  /// Check if this is rvalue-reference-qualified.
  ///
  /// Return true if this subprogram is a C++11 rvalue-reference-qualified
  /// non-static member function (void foo() &&).
  bool isRValueReference() const { return getFlags() & FlagRValueReference; }

  /// Check if this is marked as noreturn.
  ///
  /// Return true if this subprogram is C++11 noreturn or C11 _Noreturn
  bool isNoReturn() const { return getFlags() & FlagNoReturn; }

  // Check if this routine is a compiler-generated thunk.
  //
  // Returns true if this subprogram is a thunk generated by the compiler.
  bool isThunk() const { return getFlags() & FlagThunk; }

  DIScope *getScope() const { return cast_or_null<DIScope>(getRawScope()); }

  StringRef getName() const { return getStringOperand(2); }
  StringRef getLinkageName() const { return getStringOperand(3); }
  /// Only used by clients of CloneFunction, and only right after the cloning.
  void replaceLinkageName(MDString *LN) { replaceOperandWith(3, LN); }

  DISubroutineType *getType() const {
    return cast_or_null<DISubroutineType>(getRawType());
  }
  DIType *getContainingType() const {
    return cast_or_null<DIType>(getRawContainingType());
  }
  void replaceType(DISubroutineType *Ty) {
    assert(isDistinct() && "Only distinct nodes can mutate");
    replaceOperandWith(4, Ty);
  }

  DICompileUnit *getUnit() const {
    return cast_or_null<DICompileUnit>(getRawUnit());
  }
  void replaceUnit(DICompileUnit *CU) { replaceOperandWith(5, CU); }
  DITemplateParameterArray getTemplateParams() const {
    return cast_or_null<MDTuple>(getRawTemplateParams());
  }
  DISubprogram *getDeclaration() const {
    return cast_or_null<DISubprogram>(getRawDeclaration());
  }
  DINodeArray getRetainedNodes() const {
    return cast_or_null<MDTuple>(getRawRetainedNodes());
  }
  DITypeArray getThrownTypes() const {
    return cast_or_null<MDTuple>(getRawThrownTypes());
  }
  DINodeArray getAnnotations() const {
    return cast_or_null<MDTuple>(getRawAnnotations());
  }
  StringRef getTargetFuncName() const {
    return (getRawTargetFuncName()) ? getStringOperand(12) : StringRef();
  }

  Metadata *getRawScope() const { return getOperand(1); }
  MDString *getRawName() const { return getOperandAs<MDString>(2); }
  MDString *getRawLinkageName() const { return getOperandAs<MDString>(3); }
  Metadata *getRawType() const { return getOperand(4); }
  Metadata *getRawUnit() const { return getOperand(5); }
  Metadata *getRawDeclaration() const { return getOperand(6); }
  Metadata *getRawRetainedNodes() const { return getOperand(7); }
  Metadata *getRawContainingType() const {
    return getNumOperands() > 8 ? getOperandAs<Metadata>(8) : nullptr;
  }
  Metadata *getRawTemplateParams() const {
    return getNumOperands() > 9 ? getOperandAs<Metadata>(9) : nullptr;
  }
  Metadata *getRawThrownTypes() const {
    return getNumOperands() > 10 ? getOperandAs<Metadata>(10) : nullptr;
  }
  Metadata *getRawAnnotations() const {
    return getNumOperands() > 11 ? getOperandAs<Metadata>(11) : nullptr;
  }
  MDString *getRawTargetFuncName() const {
    return getNumOperands() > 12 ? getOperandAs<MDString>(12) : nullptr;
  }

  void replaceRawLinkageName(MDString *LinkageName) {
    replaceOperandWith(3, LinkageName);
  }
  void replaceRetainedNodes(DINodeArray N) {
    replaceOperandWith(7, N.get());
  }

  /// Check if this subprogram describes the given function.
  ///
  /// FIXME: Should this be looking through bitcasts?
  bool describes(const Function *F) const;

  static bool classof(const Metadata *MD) {
    return MD->getMetadataID() == DISubprogramKind;
  }
};

/// Debug location.
///
/// A debug location in source code, used for debug info and otherwise.
class DILocation : public MDNode {
  friend class LLVMContextImpl;
  friend class MDNode;

  DILocation(LLVMContext &C, StorageType Storage, unsigned Line,
             unsigned Column, ArrayRef<Metadata *> MDs, bool ImplicitCode);
  ~DILocation() { dropAllReferences(); }

  static DILocation *getImpl(LLVMContext &Context, unsigned Line,
                             unsigned Column, Metadata *Scope,
                             Metadata *InlinedAt, bool ImplicitCode,
                             StorageType Storage, bool ShouldCreate = true);
  static DILocation *getImpl(LLVMContext &Context, unsigned Line,
                             unsigned Column, DILocalScope *Scope,
                             DILocation *InlinedAt, bool ImplicitCode,
                             StorageType Storage, bool ShouldCreate = true) {
    return getImpl(Context, Line, Column, static_cast<Metadata *>(Scope),
                   static_cast<Metadata *>(InlinedAt), ImplicitCode, Storage,
                   ShouldCreate);
  }

  TempDILocation cloneImpl() const {
    // Get the raw scope/inlinedAt since it is possible to invoke this on
    // a DILocation containing temporary metadata.
    return getTemporary(getContext(), getLine(), getColumn(), getRawScope(),
                        getRawInlinedAt(), isImplicitCode());
  }

public:
  // Disallow replacing operands.
  void replaceOperandWith(unsigned I, Metadata *New) = delete;

  DEFINE_MDNODE_GET(DILocation,
                    (unsigned Line, unsigned Column, Metadata *Scope,
                     Metadata *InlinedAt = nullptr, bool ImplicitCode = false),
                    (Line, Column, Scope, InlinedAt, ImplicitCode))
  DEFINE_MDNODE_GET(DILocation,
                    (unsigned Line, unsigned Column, DILocalScope *Scope,
                     DILocation *InlinedAt = nullptr,
                     bool ImplicitCode = false),
                    (Line, Column, Scope, InlinedAt, ImplicitCode))

  /// Return a (temporary) clone of this.
  TempDILocation clone() const { return cloneImpl(); }

  unsigned getLine() const { return SubclassData32; }
  unsigned getColumn() const { return SubclassData16; }
  DILocalScope *getScope() const { return cast<DILocalScope>(getRawScope()); }

  /// Return the linkage name of Subprogram. If the linkage name is empty,
  /// return scope name (the demangled name).
  StringRef getSubprogramLinkageName() const {
    DISubprogram *SP = getScope()->getSubprogram();
    if (!SP)
      return "";
    auto Name = SP->getLinkageName();
    if (!Name.empty())
      return Name;
    return SP->getName();
  }

  DILocation *getInlinedAt() const {
    return cast_or_null<DILocation>(getRawInlinedAt());
  }

  /// Check if the location corresponds to an implicit code.
  /// When the ImplicitCode flag is true, it means that the Instruction
  /// with this DILocation has been added by the front-end but it hasn't been
  /// written explicitly by the user (e.g. cleanup stuff in C++ put on a closing
  /// bracket). It's useful for code coverage to not show a counter on "empty"
  /// lines.
  bool isImplicitCode() const { return SubclassData1; }
  void setImplicitCode(bool ImplicitCode) { SubclassData1 = ImplicitCode; }

  DIFile *getFile() const { return getScope()->getFile(); }
  StringRef getFilename() const { return getScope()->getFilename(); }
  StringRef getDirectory() const { return getScope()->getDirectory(); }
  std::optional<StringRef> getSource() const { return getScope()->getSource(); }

  /// Get the scope where this is inlined.
  ///
  /// Walk through \a getInlinedAt() and return \a getScope() from the deepest
  /// location.
  DILocalScope *getInlinedAtScope() const {
    if (auto *IA = getInlinedAt())
      return IA->getInlinedAtScope();
    return getScope();
  }

  /// Get the DWARF discriminator.
  ///
  /// DWARF discriminators distinguish identical file locations between
  /// instructions that are on different basic blocks.
  ///
  /// There are 3 components stored in discriminator, from lower bits:
  ///
  /// Base discriminator: assigned by AddDiscriminators pass to identify IRs
  ///                     that are defined by the same source line, but
  ///                     different basic blocks.
  /// Duplication factor: assigned by optimizations that will scale down
  ///                     the execution frequency of the original IR.
  /// Copy Identifier: assigned by optimizations that clones the IR.
  ///                  Each copy of the IR will be assigned an identifier.
  ///
  /// Encoding:
  ///
  /// The above 3 components are encoded into a 32bit unsigned integer in
  /// order. If the lowest bit is 1, the current component is empty, and the
  /// next component will start in the next bit. Otherwise, the current
  /// component is non-empty, and its content starts in the next bit. The
  /// value of each components is either 5 bit or 12 bit: if the 7th bit
  /// is 0, the bit 2~6 (5 bits) are used to represent the component; if the
  /// 7th bit is 1, the bit 2~6 (5 bits) and 8~14 (7 bits) are combined to
  /// represent the component. Thus, the number of bits used for a component
  /// is either 0 (if it and all the next components are empty); 1 - if it is
  /// empty; 7 - if its value is up to and including 0x1f (lsb and msb are both
  /// 0); or 14, if its value is up to and including 0x1ff. Note that the last
  /// component is also capped at 0x1ff, even in the case when both first
  /// components are 0, and we'd technically have 29 bits available.
  ///
  /// For precise control over the data being encoded in the discriminator,
  /// use encodeDiscriminator/decodeDiscriminator.

  inline unsigned getDiscriminator() const;

  // For the regular discriminator, it stands for all empty components if all
  // the lowest 3 bits are non-zero and all higher 29 bits are unused(zero by
  // default). Here we fully leverage the higher 29 bits for pseudo probe use.
  // This is the format:
  // [2:0] - 0x7
  // [31:3] - pseudo probe fields guaranteed to be non-zero as a whole
  // So if the lower 3 bits is non-zero and the others has at least one
  // non-zero bit, it guarantees to be a pseudo probe discriminator
  inline static bool isPseudoProbeDiscriminator(unsigned Discriminator) {
    return ((Discriminator & 0x7) == 0x7) && (Discriminator & 0xFFFFFFF8);
  }

  /// Returns a new DILocation with updated \p Discriminator.
  inline const DILocation *cloneWithDiscriminator(unsigned Discriminator) const;

  /// Returns a new DILocation with updated base discriminator \p BD. Only the
  /// base discriminator is set in the new DILocation, the other encoded values
  /// are elided.
  /// If the discriminator cannot be encoded, the function returns std::nullopt.
  inline std::optional<const DILocation *>
  cloneWithBaseDiscriminator(unsigned BD) const;

  /// Returns the duplication factor stored in the discriminator, or 1 if no
  /// duplication factor (or 0) is encoded.
  inline unsigned getDuplicationFactor() const;

  /// Returns the copy identifier stored in the discriminator.
  inline unsigned getCopyIdentifier() const;

  /// Returns the base discriminator stored in the discriminator.
  inline unsigned getBaseDiscriminator() const;

  /// Returns a new DILocation with duplication factor \p DF * current
  /// duplication factor encoded in the discriminator. The current duplication
  /// factor is as defined by getDuplicationFactor().
  /// Returns std::nullopt if encoding failed.
  inline std::optional<const DILocation *>
  cloneByMultiplyingDuplicationFactor(unsigned DF) const;

  /// When two instructions are combined into a single instruction we also
  /// need to combine the original locations into a single location.
  /// When the locations are the same we can use either location.
  /// When they differ, we need a third location which is distinct from either.
  /// If they share a common scope, use this scope and compare the line/column
  /// pair of the locations with the common scope:
  /// * if both match, keep the line and column;
  /// * if only the line number matches, keep the line and set the column as 0;
  /// * otherwise set line and column as 0.
  /// If they do not share a common scope the location is ambiguous and can't be
  /// represented in a line entry. In this case, set line and column as 0 and
  /// use the scope of any location.
  ///
  /// \p LocA \p LocB: The locations to be merged.
  static DILocation *getMergedLocation(DILocation *LocA, DILocation *LocB);

  /// Try to combine the vector of locations passed as input in a single one.
  /// This function applies getMergedLocation() repeatedly left-to-right.
  ///
  /// \p Locs: The locations to be merged.
  static DILocation *getMergedLocations(ArrayRef<DILocation *> Locs);

  /// Return the masked discriminator value for an input discrimnator value D
  /// (i.e. zero out the (B+1)-th and above bits for D (B is 0-base).
  // Example: an input of (0x1FF, 7) returns 0xFF.
  static unsigned getMaskedDiscriminator(unsigned D, unsigned B) {
    return (D & getN1Bits(B));
  }

  /// Return the bits used for base discriminators.
  static unsigned getBaseDiscriminatorBits() { return getBaseFSBitEnd(); }

  /// Returns the base discriminator for a given encoded discriminator \p D.
  static unsigned
  getBaseDiscriminatorFromDiscriminator(unsigned D,
                                        bool IsFSDiscriminator = false) {
    if (IsFSDiscriminator)
      return getMaskedDiscriminator(D, getBaseDiscriminatorBits());
    return getUnsignedFromPrefixEncoding(D);
  }

  /// Raw encoding of the discriminator. APIs such as cloneWithDuplicationFactor
  /// have certain special case behavior (e.g. treating empty duplication factor
  /// as the value '1').
  /// This API, in conjunction with cloneWithDiscriminator, may be used to
  /// encode the raw values provided.
  ///
  /// \p BD: base discriminator
  /// \p DF: duplication factor
  /// \p CI: copy index
  ///
  /// The return is std::nullopt if the values cannot be encoded in 32 bits -
  /// for example, values for BD or DF larger than 12 bits. Otherwise, the
  /// return is the encoded value.
  static std::optional<unsigned> encodeDiscriminator(unsigned BD, unsigned DF,
                                                     unsigned CI);

  /// Raw decoder for values in an encoded discriminator D.
  static void decodeDiscriminator(unsigned D, unsigned &BD, unsigned &DF,
                                  unsigned &CI);

  /// Returns the duplication factor for a given encoded discriminator \p D, or
  /// 1 if no value or 0 is encoded.
  static unsigned getDuplicationFactorFromDiscriminator(unsigned D) {
    if (EnableFSDiscriminator)
      return 1;
    D = getNextComponentInDiscriminator(D);
    unsigned Ret = getUnsignedFromPrefixEncoding(D);
    if (Ret == 0)
      return 1;
    return Ret;
  }

  /// Returns the copy identifier for a given encoded discriminator \p D.
  static unsigned getCopyIdentifierFromDiscriminator(unsigned D) {
    return getUnsignedFromPrefixEncoding(
        getNextComponentInDiscriminator(getNextComponentInDiscriminator(D)));
  }

  Metadata *getRawScope() const { return getOperand(0); }
  Metadata *getRawInlinedAt() const {
    if (getNumOperands() == 2)
      return getOperand(1);
    return nullptr;
  }

  static bool classof(const Metadata *MD) {
    return MD->getMetadataID() == DILocationKind;
  }
};

class DILexicalBlockBase : public DILocalScope {
protected:
  DILexicalBlockBase(LLVMContext &C, unsigned ID, StorageType Storage,
                     ArrayRef<Metadata *> Ops);
  ~DILexicalBlockBase() = default;

public:
  DILocalScope *getScope() const { return cast<DILocalScope>(getRawScope()); }

  Metadata *getRawScope() const { return getOperand(1); }

  void replaceScope(DIScope *Scope) {
    assert(!isUniqued());
    setOperand(1, Scope);
  }

  static bool classof(const Metadata *MD) {
    return MD->getMetadataID() == DILexicalBlockKind ||
           MD->getMetadataID() == DILexicalBlockFileKind;
  }
};

class DILexicalBlock : public DILexicalBlockBase {
  friend class LLVMContextImpl;
  friend class MDNode;

  unsigned Line;
  uint16_t Column;

  DILexicalBlock(LLVMContext &C, StorageType Storage, unsigned Line,
                 unsigned Column, ArrayRef<Metadata *> Ops)
      : DILexicalBlockBase(C, DILexicalBlockKind, Storage, Ops), Line(Line),
        Column(Column) {
    assert(Column < (1u << 16) && "Expected 16-bit column");
  }
  ~DILexicalBlock() = default;

  static DILexicalBlock *getImpl(LLVMContext &Context, DILocalScope *Scope,
                                 DIFile *File, unsigned Line, unsigned Column,
                                 StorageType Storage,
                                 bool ShouldCreate = true) {
    return getImpl(Context, static_cast<Metadata *>(Scope),
                   static_cast<Metadata *>(File), Line, Column, Storage,
                   ShouldCreate);
  }

  static DILexicalBlock *getImpl(LLVMContext &Context, Metadata *Scope,
                                 Metadata *File, unsigned Line, unsigned Column,
                                 StorageType Storage, bool ShouldCreate = true);

  TempDILexicalBlock cloneImpl() const {
    return getTemporary(getContext(), getScope(), getFile(), getLine(),
                        getColumn());
  }

public:
  DEFINE_MDNODE_GET(DILexicalBlock,
                    (DILocalScope * Scope, DIFile *File, unsigned Line,
                     unsigned Column),
                    (Scope, File, Line, Column))
  DEFINE_MDNODE_GET(DILexicalBlock,
                    (Metadata * Scope, Metadata *File, unsigned Line,
                     unsigned Column),
                    (Scope, File, Line, Column))

  TempDILexicalBlock clone() const { return cloneImpl(); }

  unsigned getLine() const { return Line; }
  unsigned getColumn() const { return Column; }

  static bool classof(const Metadata *MD) {
    return MD->getMetadataID() == DILexicalBlockKind;
  }
};

class DILexicalBlockFile : public DILexicalBlockBase {
  friend class LLVMContextImpl;
  friend class MDNode;

  unsigned Discriminator;

  DILexicalBlockFile(LLVMContext &C, StorageType Storage,
                     unsigned Discriminator, ArrayRef<Metadata *> Ops)
      : DILexicalBlockBase(C, DILexicalBlockFileKind, Storage, Ops),
        Discriminator(Discriminator) {}
  ~DILexicalBlockFile() = default;

  static DILexicalBlockFile *getImpl(LLVMContext &Context, DILocalScope *Scope,
                                     DIFile *File, unsigned Discriminator,
                                     StorageType Storage,
                                     bool ShouldCreate = true) {
    return getImpl(Context, static_cast<Metadata *>(Scope),
                   static_cast<Metadata *>(File), Discriminator, Storage,
                   ShouldCreate);
  }

  static DILexicalBlockFile *getImpl(LLVMContext &Context, Metadata *Scope,
                                     Metadata *File, unsigned Discriminator,
                                     StorageType Storage,
                                     bool ShouldCreate = true);

  TempDILexicalBlockFile cloneImpl() const {
    return getTemporary(getContext(), getScope(), getFile(),
                        getDiscriminator());
  }

public:
  DEFINE_MDNODE_GET(DILexicalBlockFile,
                    (DILocalScope * Scope, DIFile *File,
                     unsigned Discriminator),
                    (Scope, File, Discriminator))
  DEFINE_MDNODE_GET(DILexicalBlockFile,
                    (Metadata * Scope, Metadata *File, unsigned Discriminator),
                    (Scope, File, Discriminator))

  TempDILexicalBlockFile clone() const { return cloneImpl(); }
  unsigned getDiscriminator() const { return Discriminator; }

  static bool classof(const Metadata *MD) {
    return MD->getMetadataID() == DILexicalBlockFileKind;
  }
};

unsigned DILocation::getDiscriminator() const {
  if (auto *F = dyn_cast<DILexicalBlockFile>(getScope()))
    return F->getDiscriminator();
  return 0;
}

const DILocation *
DILocation::cloneWithDiscriminator(unsigned Discriminator) const {
  DIScope *Scope = getScope();
  // Skip all parent DILexicalBlockFile that already have a discriminator
  // assigned. We do not want to have nested DILexicalBlockFiles that have
  // mutliple discriminators because only the leaf DILexicalBlockFile's
  // dominator will be used.
  for (auto *LBF = dyn_cast<DILexicalBlockFile>(Scope);
       LBF && LBF->getDiscriminator() != 0;
       LBF = dyn_cast<DILexicalBlockFile>(Scope))
    Scope = LBF->getScope();
  DILexicalBlockFile *NewScope =
      DILexicalBlockFile::get(getContext(), Scope, getFile(), Discriminator);
  return DILocation::get(getContext(), getLine(), getColumn(), NewScope,
                         getInlinedAt());
}

unsigned DILocation::getBaseDiscriminator() const {
  return getBaseDiscriminatorFromDiscriminator(getDiscriminator(),
                                               EnableFSDiscriminator);
}

unsigned DILocation::getDuplicationFactor() const {
  return getDuplicationFactorFromDiscriminator(getDiscriminator());
}

unsigned DILocation::getCopyIdentifier() const {
  return getCopyIdentifierFromDiscriminator(getDiscriminator());
}

std::optional<const DILocation *>
DILocation::cloneWithBaseDiscriminator(unsigned D) const {
  unsigned BD, DF, CI;

  if (EnableFSDiscriminator) {
    BD = getBaseDiscriminator();
    if (D == BD)
      return this;
    return cloneWithDiscriminator(D);
  }

  decodeDiscriminator(getDiscriminator(), BD, DF, CI);
  if (D == BD)
    return this;
  if (std::optional<unsigned> Encoded = encodeDiscriminator(D, DF, CI))
    return cloneWithDiscriminator(*Encoded);
  return std::nullopt;
}

std::optional<const DILocation *>
DILocation::cloneByMultiplyingDuplicationFactor(unsigned DF) const {
  assert(!EnableFSDiscriminator && "FSDiscriminator should not call this.");
  // Do no interfere with pseudo probes. Pseudo probe doesn't need duplication
  // factor support as samples collected on cloned probes will be aggregated.
  // Also pseudo probe at a callsite uses the dwarf discriminator to store
  // pseudo probe related information, such as the probe id.
  if (isPseudoProbeDiscriminator(getDiscriminator()))
    return this;

  DF *= getDuplicationFactor();
  if (DF <= 1)
    return this;

  unsigned BD = getBaseDiscriminator();
  unsigned CI = getCopyIdentifier();
  if (std::optional<unsigned> D = encodeDiscriminator(BD, DF, CI))
    return cloneWithDiscriminator(*D);
  return std::nullopt;
}

class DINamespace : public DIScope {
  friend class LLVMContextImpl;
  friend class MDNode;

  unsigned ExportSymbols : 1;

  DINamespace(LLVMContext &Context, StorageType Storage, bool ExportSymbols,
              ArrayRef<Metadata *> Ops);
  ~DINamespace() = default;

  static DINamespace *getImpl(LLVMContext &Context, DIScope *Scope,
                              StringRef Name, bool ExportSymbols,
                              StorageType Storage, bool ShouldCreate = true) {
    return getImpl(Context, Scope, getCanonicalMDString(Context, Name),
                   ExportSymbols, Storage, ShouldCreate);
  }
  static DINamespace *getImpl(LLVMContext &Context, Metadata *Scope,
                              MDString *Name, bool ExportSymbols,
                              StorageType Storage, bool ShouldCreate = true);

  TempDINamespace cloneImpl() const {
    return getTemporary(getContext(), getScope(), getName(),
                        getExportSymbols());
  }

public:
  DEFINE_MDNODE_GET(DINamespace,
                    (DIScope * Scope, StringRef Name, bool ExportSymbols),
                    (Scope, Name, ExportSymbols))
  DEFINE_MDNODE_GET(DINamespace,
                    (Metadata * Scope, MDString *Name, bool ExportSymbols),
                    (Scope, Name, ExportSymbols))

  TempDINamespace clone() const { return cloneImpl(); }

  bool getExportSymbols() const { return ExportSymbols; }
  DIScope *getScope() const { return cast_or_null<DIScope>(getRawScope()); }
  StringRef getName() const { return getStringOperand(2); }

  Metadata *getRawScope() const { return getOperand(1); }
  MDString *getRawName() const { return getOperandAs<MDString>(2); }

  static bool classof(const Metadata *MD) {
    return MD->getMetadataID() == DINamespaceKind;
  }
};

/// Represents a module in the programming language, for example, a Clang
/// module, or a Fortran module.
class DIModule : public DIScope {
  friend class LLVMContextImpl;
  friend class MDNode;
  unsigned LineNo;
  bool IsDecl;

  DIModule(LLVMContext &Context, StorageType Storage, unsigned LineNo,
           bool IsDecl, ArrayRef<Metadata *> Ops);
  ~DIModule() = default;

  static DIModule *getImpl(LLVMContext &Context, DIFile *File, DIScope *Scope,
                           StringRef Name, StringRef ConfigurationMacros,
                           StringRef IncludePath, StringRef APINotesFile,
                           unsigned LineNo, bool IsDecl, StorageType Storage,
                           bool ShouldCreate = true) {
    return getImpl(Context, File, Scope, getCanonicalMDString(Context, Name),
                   getCanonicalMDString(Context, ConfigurationMacros),
                   getCanonicalMDString(Context, IncludePath),
                   getCanonicalMDString(Context, APINotesFile), LineNo, IsDecl,
                   Storage, ShouldCreate);
  }
  static DIModule *getImpl(LLVMContext &Context, Metadata *File,
                           Metadata *Scope, MDString *Name,
                           MDString *ConfigurationMacros, MDString *IncludePath,
                           MDString *APINotesFile, unsigned LineNo, bool IsDecl,
                           StorageType Storage, bool ShouldCreate = true);

  TempDIModule cloneImpl() const {
    return getTemporary(getContext(), getFile(), getScope(), getName(),
                        getConfigurationMacros(), getIncludePath(),
                        getAPINotesFile(), getLineNo(), getIsDecl());
  }

public:
  DEFINE_MDNODE_GET(DIModule,
                    (DIFile * File, DIScope *Scope, StringRef Name,
                     StringRef ConfigurationMacros, StringRef IncludePath,
                     StringRef APINotesFile, unsigned LineNo,
                     bool IsDecl = false),
                    (File, Scope, Name, ConfigurationMacros, IncludePath,
                     APINotesFile, LineNo, IsDecl))
  DEFINE_MDNODE_GET(DIModule,
                    (Metadata * File, Metadata *Scope, MDString *Name,
                     MDString *ConfigurationMacros, MDString *IncludePath,
                     MDString *APINotesFile, unsigned LineNo,
                     bool IsDecl = false),
                    (File, Scope, Name, ConfigurationMacros, IncludePath,
                     APINotesFile, LineNo, IsDecl))

  TempDIModule clone() const { return cloneImpl(); }

  DIScope *getScope() const { return cast_or_null<DIScope>(getRawScope()); }
  StringRef getName() const { return getStringOperand(2); }
  StringRef getConfigurationMacros() const { return getStringOperand(3); }
  StringRef getIncludePath() const { return getStringOperand(4); }
  StringRef getAPINotesFile() const { return getStringOperand(5); }
  unsigned getLineNo() const { return LineNo; }
  bool getIsDecl() const { return IsDecl; }

  Metadata *getRawScope() const { return getOperand(1); }
  MDString *getRawName() const { return getOperandAs<MDString>(2); }
  MDString *getRawConfigurationMacros() const {
    return getOperandAs<MDString>(3);
  }
  MDString *getRawIncludePath() const { return getOperandAs<MDString>(4); }
  MDString *getRawAPINotesFile() const { return getOperandAs<MDString>(5); }

  static bool classof(const Metadata *MD) {
    return MD->getMetadataID() == DIModuleKind;
  }
};

/// Base class for template parameters.
class DITemplateParameter : public DINode {
protected:
  bool IsDefault;

  DITemplateParameter(LLVMContext &Context, unsigned ID, StorageType Storage,
                      unsigned Tag, bool IsDefault, ArrayRef<Metadata *> Ops)
      : DINode(Context, ID, Storage, Tag, Ops), IsDefault(IsDefault) {}
  ~DITemplateParameter() = default;

public:
  StringRef getName() const { return getStringOperand(0); }
  DIType *getType() const { return cast_or_null<DIType>(getRawType()); }

  MDString *getRawName() const { return getOperandAs<MDString>(0); }
  Metadata *getRawType() const { return getOperand(1); }
  bool isDefault() const { return IsDefault; }

  static bool classof(const Metadata *MD) {
    return MD->getMetadataID() == DITemplateTypeParameterKind ||
           MD->getMetadataID() == DITemplateValueParameterKind;
  }
};

class DITemplateTypeParameter : public DITemplateParameter {
  friend class LLVMContextImpl;
  friend class MDNode;

  DITemplateTypeParameter(LLVMContext &Context, StorageType Storage,
                          bool IsDefault, ArrayRef<Metadata *> Ops);
  ~DITemplateTypeParameter() = default;

  static DITemplateTypeParameter *getImpl(LLVMContext &Context, StringRef Name,
                                          DIType *Type, bool IsDefault,
                                          StorageType Storage,
                                          bool ShouldCreate = true) {
    return getImpl(Context, getCanonicalMDString(Context, Name), Type,
                   IsDefault, Storage, ShouldCreate);
  }
  static DITemplateTypeParameter *getImpl(LLVMContext &Context, MDString *Name,
                                          Metadata *Type, bool IsDefault,
                                          StorageType Storage,
                                          bool ShouldCreate = true);

  TempDITemplateTypeParameter cloneImpl() const {
    return getTemporary(getContext(), getName(), getType(), isDefault());
  }

public:
  DEFINE_MDNODE_GET(DITemplateTypeParameter,
                    (StringRef Name, DIType *Type, bool IsDefault),
                    (Name, Type, IsDefault))
  DEFINE_MDNODE_GET(DITemplateTypeParameter,
                    (MDString * Name, Metadata *Type, bool IsDefault),
                    (Name, Type, IsDefault))

  TempDITemplateTypeParameter clone() const { return cloneImpl(); }

  static bool classof(const Metadata *MD) {
    return MD->getMetadataID() == DITemplateTypeParameterKind;
  }
};

class DITemplateValueParameter : public DITemplateParameter {
  friend class LLVMContextImpl;
  friend class MDNode;

  DITemplateValueParameter(LLVMContext &Context, StorageType Storage,
                           unsigned Tag, bool IsDefault,
                           ArrayRef<Metadata *> Ops)
      : DITemplateParameter(Context, DITemplateValueParameterKind, Storage, Tag,
                            IsDefault, Ops) {}
  ~DITemplateValueParameter() = default;

  static DITemplateValueParameter *getImpl(LLVMContext &Context, unsigned Tag,
                                           StringRef Name, DIType *Type,
                                           bool IsDefault, Metadata *Value,
                                           StorageType Storage,
                                           bool ShouldCreate = true) {
    return getImpl(Context, Tag, getCanonicalMDString(Context, Name), Type,
                   IsDefault, Value, Storage, ShouldCreate);
  }
  static DITemplateValueParameter *getImpl(LLVMContext &Context, unsigned Tag,
                                           MDString *Name, Metadata *Type,
                                           bool IsDefault, Metadata *Value,
                                           StorageType Storage,
                                           bool ShouldCreate = true);

  TempDITemplateValueParameter cloneImpl() const {
    return getTemporary(getContext(), getTag(), getName(), getType(),
                        isDefault(), getValue());
  }

public:
  DEFINE_MDNODE_GET(DITemplateValueParameter,
                    (unsigned Tag, StringRef Name, DIType *Type, bool IsDefault,
                     Metadata *Value),
                    (Tag, Name, Type, IsDefault, Value))
  DEFINE_MDNODE_GET(DITemplateValueParameter,
                    (unsigned Tag, MDString *Name, Metadata *Type,
                     bool IsDefault, Metadata *Value),
                    (Tag, Name, Type, IsDefault, Value))

  TempDITemplateValueParameter clone() const { return cloneImpl(); }

  Metadata *getValue() const { return getOperand(2); }

  static bool classof(const Metadata *MD) {
    return MD->getMetadataID() == DITemplateValueParameterKind;
  }
};

/// Base class for variables.
class DIVariable : public DINode {
  unsigned Line;
  uint32_t AlignInBits;

protected:
  DIVariable(LLVMContext &C, unsigned ID, StorageType Storage, signed Line,
             ArrayRef<Metadata *> Ops, uint32_t AlignInBits = 0);
  ~DIVariable() = default;

public:
  unsigned getLine() const { return Line; }
  DIScope *getScope() const { return cast_or_null<DIScope>(getRawScope()); }
  StringRef getName() const { return getStringOperand(1); }
  DIFile *getFile() const { return cast_or_null<DIFile>(getRawFile()); }
  DIType *getType() const { return cast_or_null<DIType>(getRawType()); }
  uint32_t getAlignInBits() const { return AlignInBits; }
  uint32_t getAlignInBytes() const { return getAlignInBits() / CHAR_BIT; }
  /// Determines the size of the variable's type.
  std::optional<uint64_t> getSizeInBits() const;

  /// Return the signedness of this variable's type, or std::nullopt if this
  /// type is neither signed nor unsigned.
  std::optional<DIBasicType::Signedness> getSignedness() const {
    if (auto *BT = dyn_cast<DIBasicType>(getType()))
      return BT->getSignedness();
    return std::nullopt;
  }

  StringRef getFilename() const {
    if (auto *F = getFile())
      return F->getFilename();
    return "";
  }

  StringRef getDirectory() const {
    if (auto *F = getFile())
      return F->getDirectory();
    return "";
  }

  std::optional<StringRef> getSource() const {
    if (auto *F = getFile())
      return F->getSource();
    return std::nullopt;
  }

  Metadata *getRawScope() const { return getOperand(0); }
  MDString *getRawName() const { return getOperandAs<MDString>(1); }
  Metadata *getRawFile() const { return getOperand(2); }
  Metadata *getRawType() const { return getOperand(3); }

  static bool classof(const Metadata *MD) {
    return MD->getMetadataID() == DILocalVariableKind ||
           MD->getMetadataID() == DIGlobalVariableKind;
  }
};

/// DWARF expression.
///
/// This is (almost) a DWARF expression that modifies the location of a
/// variable, or the location of a single piece of a variable, or (when using
/// DW_OP_stack_value) is the constant variable value.
///
/// TODO: Co-allocate the expression elements.
/// TODO: Separate from MDNode, or otherwise drop Distinct and Temporary
/// storage types.
class DIExpression : public MDNode {
  friend class LLVMContextImpl;
  friend class MDNode;

  std::vector<uint64_t> Elements;

  DIExpression(LLVMContext &C, StorageType Storage, ArrayRef<uint64_t> Elements)
      : MDNode(C, DIExpressionKind, Storage, std::nullopt),
        Elements(Elements.begin(), Elements.end()) {}
  ~DIExpression() = default;

  static DIExpression *getImpl(LLVMContext &Context,
                               ArrayRef<uint64_t> Elements, StorageType Storage,
                               bool ShouldCreate = true);

  TempDIExpression cloneImpl() const {
    return getTemporary(getContext(), getElements());
  }

public:
  DEFINE_MDNODE_GET(DIExpression, (ArrayRef<uint64_t> Elements), (Elements))

  TempDIExpression clone() const { return cloneImpl(); }

  ArrayRef<uint64_t> getElements() const { return Elements; }

  unsigned getNumElements() const { return Elements.size(); }

  uint64_t getElement(unsigned I) const {
    assert(I < Elements.size() && "Index out of range");
    return Elements[I];
  }

  enum SignedOrUnsignedConstant { SignedConstant, UnsignedConstant };
  /// Determine whether this represents a constant value, if so
  // return it's sign information.
  std::optional<SignedOrUnsignedConstant> isConstant() const;

  /// Return the number of unique location operands referred to (via
  /// DW_OP_LLVM_arg) in this expression; this is not necessarily the number of
  /// instances of DW_OP_LLVM_arg within the expression.
  /// For example, for the expression:
  ///   (DW_OP_LLVM_arg 0, DW_OP_LLVM_arg 1, DW_OP_plus,
  ///    DW_OP_LLVM_arg 0, DW_OP_mul)
  /// This function would return 2, as there are two unique location operands
  /// (0 and 1).
  uint64_t getNumLocationOperands() const;

  using element_iterator = ArrayRef<uint64_t>::iterator;

  element_iterator elements_begin() const { return getElements().begin(); }
  element_iterator elements_end() const { return getElements().end(); }

  /// A lightweight wrapper around an expression operand.
  ///
  /// TODO: Store arguments directly and change \a DIExpression to store a
  /// range of these.
  class ExprOperand {
    const uint64_t *Op = nullptr;

  public:
    ExprOperand() = default;
    explicit ExprOperand(const uint64_t *Op) : Op(Op) {}

    const uint64_t *get() const { return Op; }

    /// Get the operand code.
    uint64_t getOp() const { return *Op; }

    /// Get an argument to the operand.
    ///
    /// Never returns the operand itself.
    uint64_t getArg(unsigned I) const { return Op[I + 1]; }

    unsigned getNumArgs() const { return getSize() - 1; }

    /// Return the size of the operand.
    ///
    /// Return the number of elements in the operand (1 + args).
    unsigned getSize() const;

    /// Append the elements of this operand to \p V.
    void appendToVector(SmallVectorImpl<uint64_t> &V) const {
      V.append(get(), get() + getSize());
    }
  };

  /// An iterator for expression operands.
  class expr_op_iterator {
    ExprOperand Op;

  public:
    using iterator_category = std::input_iterator_tag;
    using value_type = ExprOperand;
    using difference_type = std::ptrdiff_t;
    using pointer = value_type *;
    using reference = value_type &;

    expr_op_iterator() = default;
    explicit expr_op_iterator(element_iterator I) : Op(I) {}

    element_iterator getBase() const { return Op.get(); }
    const ExprOperand &operator*() const { return Op; }
    const ExprOperand *operator->() const { return &Op; }

    expr_op_iterator &operator++() {
      increment();
      return *this;
    }
    expr_op_iterator operator++(int) {
      expr_op_iterator T(*this);
      increment();
      return T;
    }

    /// Get the next iterator.
    ///
    /// \a std::next() doesn't work because this is technically an
    /// input_iterator, but it's a perfectly valid operation.  This is an
    /// accessor to provide the same functionality.
    expr_op_iterator getNext() const { return ++expr_op_iterator(*this); }

    bool operator==(const expr_op_iterator &X) const {
      return getBase() == X.getBase();
    }
    bool operator!=(const expr_op_iterator &X) const {
      return getBase() != X.getBase();
    }

  private:
    void increment() { Op = ExprOperand(getBase() + Op.getSize()); }
  };

  /// Visit the elements via ExprOperand wrappers.
  ///
  /// These range iterators visit elements through \a ExprOperand wrappers.
  /// This is not guaranteed to be a valid range unless \a isValid() gives \c
  /// true.
  ///
  /// \pre \a isValid() gives \c true.
  /// @{
  expr_op_iterator expr_op_begin() const {
    return expr_op_iterator(elements_begin());
  }
  expr_op_iterator expr_op_end() const {
    return expr_op_iterator(elements_end());
  }
  iterator_range<expr_op_iterator> expr_ops() const {
    return {expr_op_begin(), expr_op_end()};
  }
  /// @}

  bool isValid() const;

  static bool classof(const Metadata *MD) {
    return MD->getMetadataID() == DIExpressionKind;
  }

  /// Return whether the first element a DW_OP_deref.
  bool startsWithDeref() const;

  /// Return whether there is exactly one operator and it is a DW_OP_deref;
  bool isDeref() const;

  /// Holds the characteristics of one fragment of a larger variable.
  struct FragmentInfo {
    FragmentInfo() = default;
    FragmentInfo(uint64_t SizeInBits, uint64_t OffsetInBits)
        : SizeInBits(SizeInBits), OffsetInBits(OffsetInBits) {}
    uint64_t SizeInBits;
    uint64_t OffsetInBits;
    /// Return the index of the first bit of the fragment.
    uint64_t startInBits() const { return OffsetInBits; }
    /// Return the index of the bit after the end of the fragment, e.g. for
    /// fragment offset=16 and size=32 return their sum, 48.
    uint64_t endInBits() const { return OffsetInBits + SizeInBits; }

    /// Returns a zero-sized fragment if A and B don't intersect.
    static DIExpression::FragmentInfo intersect(DIExpression::FragmentInfo A,
                                                DIExpression::FragmentInfo B) {
      uint64_t StartInBits = std::max(A.OffsetInBits, B.OffsetInBits);
      uint64_t EndInBits = std::min(A.endInBits(), B.endInBits());
      if (EndInBits <= StartInBits)
        return {0, 0};
      return DIExpression::FragmentInfo(EndInBits - StartInBits, StartInBits);
    }
  };

  /// Retrieve the details of this fragment expression.
  static std::optional<FragmentInfo> getFragmentInfo(expr_op_iterator Start,
                                                     expr_op_iterator End);

  /// Retrieve the details of this fragment expression.
  std::optional<FragmentInfo> getFragmentInfo() const {
    return getFragmentInfo(expr_op_begin(), expr_op_end());
  }

  /// Return whether this is a piece of an aggregate variable.
  bool isFragment() const { return getFragmentInfo().has_value(); }

  /// Return whether this is an implicit location description.
  bool isImplicit() const;

  /// Return whether the location is computed on the expression stack, meaning
  /// it cannot be a simple register location.
  bool isComplex() const;

  /// Return whether the evaluated expression makes use of a single location at
  /// the start of the expression, i.e. if it contains only a single
  /// DW_OP_LLVM_arg op as its first operand, or if it contains none.
  bool isSingleLocationExpression() const;

  /// Removes all elements from \p Expr that do not apply to an undef debug
  /// value, which includes every operator that computes the value/location on
  /// the DWARF stack, including any DW_OP_LLVM_arg elements (making the result
  /// of this function always a single-location expression) while leaving
  /// everything that defines what the computed value applies to, i.e. the
  /// fragment information.
  static const DIExpression *convertToUndefExpression(const DIExpression *Expr);

  /// If \p Expr is a non-variadic expression (i.e. one that does not contain
  /// DW_OP_LLVM_arg), returns \p Expr converted to variadic form by adding a
  /// leading [DW_OP_LLVM_arg, 0] to the expression; otherwise returns \p Expr.
  static const DIExpression *
  convertToVariadicExpression(const DIExpression *Expr);

  /// If \p Expr is a valid single-location expression, i.e. it refers to only a
  /// single debug operand at the start of the expression, then return that
  /// expression in a non-variadic form by removing DW_OP_LLVM_arg from the
  /// expression if it is present; otherwise returns std::nullopt.
  static std::optional<const DIExpression *>
  convertToNonVariadicExpression(const DIExpression *Expr);

  /// Inserts the elements of \p Expr into \p Ops modified to a canonical form,
  /// which uses DW_OP_LLVM_arg (i.e. is a variadic expression) and folds the
  /// implied derefence from the \p IsIndirect flag into the expression. This
  /// allows us to check equivalence between expressions with differing
  /// directness or variadicness.
  static void canonicalizeExpressionOps(SmallVectorImpl<uint64_t> &Ops,
                                        const DIExpression *Expr,
                                        bool IsIndirect);

  /// Determines whether two debug values should produce equivalent DWARF
  /// expressions, using their DIExpressions and directness, ignoring the
  /// differences between otherwise identical expressions in variadic and
  /// non-variadic form and not considering the debug operands.
  /// \p FirstExpr is the DIExpression for the first debug value.
  /// \p FirstIndirect should be true if the first debug value is indirect; in
  /// IR this should be true for dbg.declare intrinsics and false for
  /// dbg.values, and in MIR this should be true only for DBG_VALUE instructions
  /// whose second operand is an immediate value.
  /// \p SecondExpr and \p SecondIndirect have the same meaning as the prior
  /// arguments, but apply to the second debug value.
  static bool isEqualExpression(const DIExpression *FirstExpr,
                                bool FirstIndirect,
                                const DIExpression *SecondExpr,
                                bool SecondIndirect);

  /// Append \p Ops with operations to apply the \p Offset.
  static void appendOffset(SmallVectorImpl<uint64_t> &Ops, int64_t Offset);

  /// If this is a constant offset, extract it. If there is no expression,
  /// return true with an offset of zero.
  bool extractIfOffset(int64_t &Offset) const;

  /// Returns true iff this DIExpression contains at least one instance of
  /// `DW_OP_LLVM_arg, n` for all n in [0, N).
  bool hasAllLocationOps(unsigned N) const;

  /// Checks if the last 4 elements of the expression are DW_OP_constu <DWARF
  /// Address Space> DW_OP_swap DW_OP_xderef and extracts the <DWARF Address
  /// Space>.
  static const DIExpression *extractAddressClass(const DIExpression *Expr,
                                                 unsigned &AddrClass);

  /// Used for DIExpression::prepend.
  enum PrependOps : uint8_t {
    ApplyOffset = 0,
    DerefBefore = 1 << 0,
    DerefAfter = 1 << 1,
    StackValue = 1 << 2,
    EntryValue = 1 << 3
  };

  /// Prepend \p DIExpr with a deref and offset operation and optionally turn it
  /// into a stack value or/and an entry value.
  static DIExpression *prepend(const DIExpression *Expr, uint8_t Flags,
                               int64_t Offset = 0);

  /// Prepend \p DIExpr with the given opcodes and optionally turn it into a
  /// stack value.
  static DIExpression *prependOpcodes(const DIExpression *Expr,
                                      SmallVectorImpl<uint64_t> &Ops,
                                      bool StackValue = false,
                                      bool EntryValue = false);

  /// Append the opcodes \p Ops to \p DIExpr. Unlike \ref appendToStack, the
  /// returned expression is a stack value only if \p DIExpr is a stack value.
  /// If \p DIExpr describes a fragment, the returned expression will describe
  /// the same fragment.
  static DIExpression *append(const DIExpression *Expr, ArrayRef<uint64_t> Ops);

  /// Convert \p DIExpr into a stack value if it isn't one already by appending
  /// DW_OP_deref if needed, and appending \p Ops to the resulting expression.
  /// If \p DIExpr describes a fragment, the returned expression will describe
  /// the same fragment.
  static DIExpression *appendToStack(const DIExpression *Expr,
                                     ArrayRef<uint64_t> Ops);

  /// Create a copy of \p Expr by appending the given list of \p Ops to each
  /// instance of the operand `DW_OP_LLVM_arg, \p ArgNo`. This is used to
  /// modify a specific location used by \p Expr, such as when salvaging that
  /// location.
  static DIExpression *appendOpsToArg(const DIExpression *Expr,
                                      ArrayRef<uint64_t> Ops, unsigned ArgNo,
                                      bool StackValue = false);

  /// Create a copy of \p Expr with each instance of
  /// `DW_OP_LLVM_arg, \p OldArg` replaced with `DW_OP_LLVM_arg, \p NewArg`,
  /// and each instance of `DW_OP_LLVM_arg, Arg` with `DW_OP_LLVM_arg, Arg - 1`
  /// for all Arg > \p OldArg.
  /// This is used when replacing one of the operands of a debug value list
  /// with another operand in the same list and deleting the old operand.
  static DIExpression *replaceArg(const DIExpression *Expr, uint64_t OldArg,
                                  uint64_t NewArg);

  /// Create a DIExpression to describe one part of an aggregate variable that
  /// is fragmented across multiple Values. The DW_OP_LLVM_fragment operation
  /// will be appended to the elements of \c Expr. If \c Expr already contains
  /// a \c DW_OP_LLVM_fragment \c OffsetInBits is interpreted as an offset
  /// into the existing fragment.
  ///
  /// \param OffsetInBits Offset of the piece in bits.
  /// \param SizeInBits   Size of the piece in bits.
  /// \return             Creating a fragment expression may fail if \c Expr
  ///                     contains arithmetic operations that would be
  ///                     truncated.
  static std::optional<DIExpression *>
  createFragmentExpression(const DIExpression *Expr, unsigned OffsetInBits,
                           unsigned SizeInBits);

  /// Determine the relative position of the fragments passed in.
  /// Returns -1 if this is entirely before Other, 0 if this and Other overlap,
  /// 1 if this is entirely after Other.
  static int fragmentCmp(const FragmentInfo &A, const FragmentInfo &B) {
    uint64_t l1 = A.OffsetInBits;
    uint64_t l2 = B.OffsetInBits;
    uint64_t r1 = l1 + A.SizeInBits;
    uint64_t r2 = l2 + B.SizeInBits;
    if (r1 <= l2)
      return -1;
    else if (r2 <= l1)
      return 1;
    else
      return 0;
  }

  using ExtOps = std::array<uint64_t, 6>;

  /// Returns the ops for a zero- or sign-extension in a DIExpression.
  static ExtOps getExtOps(unsigned FromSize, unsigned ToSize, bool Signed);

  /// Append a zero- or sign-extension to \p Expr. Converts the expression to a
  /// stack value if it isn't one already.
  static DIExpression *appendExt(const DIExpression *Expr, unsigned FromSize,
                                 unsigned ToSize, bool Signed);

  /// Check if fragments overlap between a pair of FragmentInfos.
  static bool fragmentsOverlap(const FragmentInfo &A, const FragmentInfo &B) {
    return fragmentCmp(A, B) == 0;
  }

  /// Determine the relative position of the fragments described by this
  /// DIExpression and \p Other. Calls static fragmentCmp implementation.
  int fragmentCmp(const DIExpression *Other) const {
    auto Fragment1 = *getFragmentInfo();
    auto Fragment2 = *Other->getFragmentInfo();
    return fragmentCmp(Fragment1, Fragment2);
  }

  /// Check if fragments overlap between this DIExpression and \p Other.
  bool fragmentsOverlap(const DIExpression *Other) const {
    if (!isFragment() || !Other->isFragment())
      return true;
    return fragmentCmp(Other) == 0;
  }

  /// Check if the expression consists of exactly one entry value operand.
  /// (This is the only configuration of entry values that is supported.)
  bool isEntryValue() const;

  /// Try to shorten an expression with an initial constant operand.
  /// Returns a new expression and constant on success, or the original
  /// expression and constant on failure.
  std::pair<DIExpression *, const ConstantInt *>
  constantFold(const ConstantInt *CI);
};

inline bool operator==(const DIExpression::FragmentInfo &A,
                       const DIExpression::FragmentInfo &B) {
  return std::tie(A.SizeInBits, A.OffsetInBits) ==
         std::tie(B.SizeInBits, B.OffsetInBits);
}

inline bool operator<(const DIExpression::FragmentInfo &A,
                      const DIExpression::FragmentInfo &B) {
  return std::tie(A.SizeInBits, A.OffsetInBits) <
         std::tie(B.SizeInBits, B.OffsetInBits);
}

template <> struct DenseMapInfo<DIExpression::FragmentInfo> {
  using FragInfo = DIExpression::FragmentInfo;
  static const uint64_t MaxVal = std::numeric_limits<uint64_t>::max();

  static inline FragInfo getEmptyKey() { return {MaxVal, MaxVal}; }

  static inline FragInfo getTombstoneKey() { return {MaxVal - 1, MaxVal - 1}; }

  static unsigned getHashValue(const FragInfo &Frag) {
    return (Frag.SizeInBits & 0xffff) << 16 | (Frag.OffsetInBits & 0xffff);
  }

  static bool isEqual(const FragInfo &A, const FragInfo &B) { return A == B; }
};

/// Global variables.
///
/// TODO: Remove DisplayName.  It's always equal to Name.
class DIGlobalVariable : public DIVariable {
  friend class LLVMContextImpl;
  friend class MDNode;

  bool IsLocalToUnit;
  bool IsDefinition;

  DIGlobalVariable(LLVMContext &C, StorageType Storage, unsigned Line,
                   bool IsLocalToUnit, bool IsDefinition, uint32_t AlignInBits,
                   ArrayRef<Metadata *> Ops)
      : DIVariable(C, DIGlobalVariableKind, Storage, Line, Ops, AlignInBits),
        IsLocalToUnit(IsLocalToUnit), IsDefinition(IsDefinition) {}
  ~DIGlobalVariable() = default;

  static DIGlobalVariable *
  getImpl(LLVMContext &Context, DIScope *Scope, StringRef Name,
          StringRef LinkageName, DIFile *File, unsigned Line, DIType *Type,
          bool IsLocalToUnit, bool IsDefinition,
          DIDerivedType *StaticDataMemberDeclaration, MDTuple *TemplateParams,
          uint32_t AlignInBits, DINodeArray Annotations, StorageType Storage,
          bool ShouldCreate = true) {
    return getImpl(Context, Scope, getCanonicalMDString(Context, Name),
                   getCanonicalMDString(Context, LinkageName), File, Line, Type,
                   IsLocalToUnit, IsDefinition, StaticDataMemberDeclaration,
                   cast_or_null<Metadata>(TemplateParams), AlignInBits,
                   Annotations.get(), Storage, ShouldCreate);
  }
  static DIGlobalVariable *
  getImpl(LLVMContext &Context, Metadata *Scope, MDString *Name,
          MDString *LinkageName, Metadata *File, unsigned Line, Metadata *Type,
          bool IsLocalToUnit, bool IsDefinition,
          Metadata *StaticDataMemberDeclaration, Metadata *TemplateParams,
          uint32_t AlignInBits, Metadata *Annotations, StorageType Storage,
          bool ShouldCreate = true);

  TempDIGlobalVariable cloneImpl() const {
    return getTemporary(getContext(), getScope(), getName(), getLinkageName(),
                        getFile(), getLine(), getType(), isLocalToUnit(),
                        isDefinition(), getStaticDataMemberDeclaration(),
                        getTemplateParams(), getAlignInBits(),
                        getAnnotations());
  }

public:
  DEFINE_MDNODE_GET(
      DIGlobalVariable,
      (DIScope * Scope, StringRef Name, StringRef LinkageName, DIFile *File,
       unsigned Line, DIType *Type, bool IsLocalToUnit, bool IsDefinition,
       DIDerivedType *StaticDataMemberDeclaration, MDTuple *TemplateParams,
       uint32_t AlignInBits, DINodeArray Annotations),
      (Scope, Name, LinkageName, File, Line, Type, IsLocalToUnit, IsDefinition,
       StaticDataMemberDeclaration, TemplateParams, AlignInBits, Annotations))
  DEFINE_MDNODE_GET(
      DIGlobalVariable,
      (Metadata * Scope, MDString *Name, MDString *LinkageName, Metadata *File,
       unsigned Line, Metadata *Type, bool IsLocalToUnit, bool IsDefinition,
       Metadata *StaticDataMemberDeclaration, Metadata *TemplateParams,
       uint32_t AlignInBits, Metadata *Annotations),
      (Scope, Name, LinkageName, File, Line, Type, IsLocalToUnit, IsDefinition,
       StaticDataMemberDeclaration, TemplateParams, AlignInBits, Annotations))

  TempDIGlobalVariable clone() const { return cloneImpl(); }

  bool isLocalToUnit() const { return IsLocalToUnit; }
  bool isDefinition() const { return IsDefinition; }
  StringRef getDisplayName() const { return getStringOperand(4); }
  StringRef getLinkageName() const { return getStringOperand(5); }
  DIDerivedType *getStaticDataMemberDeclaration() const {
    return cast_or_null<DIDerivedType>(getRawStaticDataMemberDeclaration());
  }
  DINodeArray getAnnotations() const {
    return cast_or_null<MDTuple>(getRawAnnotations());
  }

  MDString *getRawLinkageName() const { return getOperandAs<MDString>(5); }
  Metadata *getRawStaticDataMemberDeclaration() const { return getOperand(6); }
  Metadata *getRawTemplateParams() const { return getOperand(7); }
  MDTuple *getTemplateParams() const { return getOperandAs<MDTuple>(7); }
  Metadata *getRawAnnotations() const { return getOperand(8); }

  static bool classof(const Metadata *MD) {
    return MD->getMetadataID() == DIGlobalVariableKind;
  }
};

class DICommonBlock : public DIScope {
  unsigned LineNo;

  friend class LLVMContextImpl;
  friend class MDNode;

  DICommonBlock(LLVMContext &Context, StorageType Storage, unsigned LineNo,
                ArrayRef<Metadata *> Ops);

  static DICommonBlock *getImpl(LLVMContext &Context, DIScope *Scope,
                                DIGlobalVariable *Decl, StringRef Name,
                                DIFile *File, unsigned LineNo,
                                StorageType Storage, bool ShouldCreate = true) {
    return getImpl(Context, Scope, Decl, getCanonicalMDString(Context, Name),
                   File, LineNo, Storage, ShouldCreate);
  }
  static DICommonBlock *getImpl(LLVMContext &Context, Metadata *Scope,
                                Metadata *Decl, MDString *Name, Metadata *File,
                                unsigned LineNo, StorageType Storage,
                                bool ShouldCreate = true);

  TempDICommonBlock cloneImpl() const {
    return getTemporary(getContext(), getScope(), getDecl(), getName(),
                        getFile(), getLineNo());
  }

public:
  DEFINE_MDNODE_GET(DICommonBlock,
                    (DIScope * Scope, DIGlobalVariable *Decl, StringRef Name,
                     DIFile *File, unsigned LineNo),
                    (Scope, Decl, Name, File, LineNo))
  DEFINE_MDNODE_GET(DICommonBlock,
                    (Metadata * Scope, Metadata *Decl, MDString *Name,
                     Metadata *File, unsigned LineNo),
                    (Scope, Decl, Name, File, LineNo))

  TempDICommonBlock clone() const { return cloneImpl(); }

  DIScope *getScope() const { return cast_or_null<DIScope>(getRawScope()); }
  DIGlobalVariable *getDecl() const {
    return cast_or_null<DIGlobalVariable>(getRawDecl());
  }
  StringRef getName() const { return getStringOperand(2); }
  DIFile *getFile() const { return cast_or_null<DIFile>(getRawFile()); }
  unsigned getLineNo() const { return LineNo; }

  Metadata *getRawScope() const { return getOperand(0); }
  Metadata *getRawDecl() const { return getOperand(1); }
  MDString *getRawName() const { return getOperandAs<MDString>(2); }
  Metadata *getRawFile() const { return getOperand(3); }

  static bool classof(const Metadata *MD) {
    return MD->getMetadataID() == DICommonBlockKind;
  }
};

/// Local variable.
///
/// TODO: Split up flags.
class DILocalVariable : public DIVariable {
  friend class LLVMContextImpl;
  friend class MDNode;

  unsigned Arg : 16;
  DIFlags Flags;

  DILocalVariable(LLVMContext &C, StorageType Storage, unsigned Line,
                  unsigned Arg, DIFlags Flags, uint32_t AlignInBits,
                  ArrayRef<Metadata *> Ops)
      : DIVariable(C, DILocalVariableKind, Storage, Line, Ops, AlignInBits),
        Arg(Arg), Flags(Flags) {
    assert(Arg < (1 << 16) && "DILocalVariable: Arg out of range");
  }
  ~DILocalVariable() = default;

  static DILocalVariable *getImpl(LLVMContext &Context, DIScope *Scope,
                                  StringRef Name, DIFile *File, unsigned Line,
                                  DIType *Type, unsigned Arg, DIFlags Flags,
                                  uint32_t AlignInBits, DINodeArray Annotations,
                                  StorageType Storage,
                                  bool ShouldCreate = true) {
    return getImpl(Context, Scope, getCanonicalMDString(Context, Name), File,
                   Line, Type, Arg, Flags, AlignInBits, Annotations.get(),
                   Storage, ShouldCreate);
  }
  static DILocalVariable *getImpl(LLVMContext &Context, Metadata *Scope,
                                  MDString *Name, Metadata *File, unsigned Line,
                                  Metadata *Type, unsigned Arg, DIFlags Flags,
                                  uint32_t AlignInBits, Metadata *Annotations,
                                  StorageType Storage,
                                  bool ShouldCreate = true);

  TempDILocalVariable cloneImpl() const {
    return getTemporary(getContext(), getScope(), getName(), getFile(),
                        getLine(), getType(), getArg(), getFlags(),
                        getAlignInBits(), getAnnotations());
  }

public:
  DEFINE_MDNODE_GET(DILocalVariable,
                    (DILocalScope * Scope, StringRef Name, DIFile *File,
                     unsigned Line, DIType *Type, unsigned Arg, DIFlags Flags,
                     uint32_t AlignInBits, DINodeArray Annotations),
                    (Scope, Name, File, Line, Type, Arg, Flags, AlignInBits,
                     Annotations))
  DEFINE_MDNODE_GET(DILocalVariable,
                    (Metadata * Scope, MDString *Name, Metadata *File,
                     unsigned Line, Metadata *Type, unsigned Arg, DIFlags Flags,
                     uint32_t AlignInBits, Metadata *Annotations),
                    (Scope, Name, File, Line, Type, Arg, Flags, AlignInBits,
                     Annotations))

  TempDILocalVariable clone() const { return cloneImpl(); }

  /// Get the local scope for this variable.
  ///
  /// Variables must be defined in a local scope.
  DILocalScope *getScope() const {
    return cast<DILocalScope>(DIVariable::getScope());
  }

  bool isParameter() const { return Arg; }
  unsigned getArg() const { return Arg; }
  DIFlags getFlags() const { return Flags; }

  DINodeArray getAnnotations() const {
    return cast_or_null<MDTuple>(getRawAnnotations());
  }
  Metadata *getRawAnnotations() const { return getOperand(4); }

  bool isArtificial() const { return getFlags() & FlagArtificial; }
  bool isObjectPointer() const { return getFlags() & FlagObjectPointer; }

  /// Check that a location is valid for this variable.
  ///
  /// Check that \c DL exists, is in the same subprogram, and has the same
  /// inlined-at location as \c this.  (Otherwise, it's not a valid attachment
  /// to a \a DbgInfoIntrinsic.)
  bool isValidLocationForIntrinsic(const DILocation *DL) const {
    return DL && getScope()->getSubprogram() == DL->getScope()->getSubprogram();
  }

  static bool classof(const Metadata *MD) {
    return MD->getMetadataID() == DILocalVariableKind;
  }
};

/// Label.
///
class DILabel : public DINode {
  friend class LLVMContextImpl;
  friend class MDNode;

  unsigned Line;

  DILabel(LLVMContext &C, StorageType Storage, unsigned Line,
          ArrayRef<Metadata *> Ops);
  ~DILabel() = default;

  static DILabel *getImpl(LLVMContext &Context, DIScope *Scope, StringRef Name,
                          DIFile *File, unsigned Line, StorageType Storage,
                          bool ShouldCreate = true) {
    return getImpl(Context, Scope, getCanonicalMDString(Context, Name), File,
                   Line, Storage, ShouldCreate);
  }
  static DILabel *getImpl(LLVMContext &Context, Metadata *Scope, MDString *Name,
                          Metadata *File, unsigned Line, StorageType Storage,
                          bool ShouldCreate = true);

  TempDILabel cloneImpl() const {
    return getTemporary(getContext(), getScope(), getName(), getFile(),
                        getLine());
  }

public:
  DEFINE_MDNODE_GET(DILabel,
                    (DILocalScope * Scope, StringRef Name, DIFile *File,
                     unsigned Line),
                    (Scope, Name, File, Line))
  DEFINE_MDNODE_GET(DILabel,
                    (Metadata * Scope, MDString *Name, Metadata *File,
                     unsigned Line),
                    (Scope, Name, File, Line))

  TempDILabel clone() const { return cloneImpl(); }

  /// Get the local scope for this label.
  ///
  /// Labels must be defined in a local scope.
  DILocalScope *getScope() const {
    return cast_or_null<DILocalScope>(getRawScope());
  }
  unsigned getLine() const { return Line; }
  StringRef getName() const { return getStringOperand(1); }
  DIFile *getFile() const { return cast_or_null<DIFile>(getRawFile()); }

  Metadata *getRawScope() const { return getOperand(0); }
  MDString *getRawName() const { return getOperandAs<MDString>(1); }
  Metadata *getRawFile() const { return getOperand(2); }

  /// Check that a location is valid for this label.
  ///
  /// Check that \c DL exists, is in the same subprogram, and has the same
  /// inlined-at location as \c this.  (Otherwise, it's not a valid attachment
  /// to a \a DbgInfoIntrinsic.)
  bool isValidLocationForIntrinsic(const DILocation *DL) const {
    return DL && getScope()->getSubprogram() == DL->getScope()->getSubprogram();
  }

  static bool classof(const Metadata *MD) {
    return MD->getMetadataID() == DILabelKind;
  }
};

class DIObjCProperty : public DINode {
  friend class LLVMContextImpl;
  friend class MDNode;

  unsigned Line;
  unsigned Attributes;

  DIObjCProperty(LLVMContext &C, StorageType Storage, unsigned Line,
                 unsigned Attributes, ArrayRef<Metadata *> Ops);
  ~DIObjCProperty() = default;

  static DIObjCProperty *
  getImpl(LLVMContext &Context, StringRef Name, DIFile *File, unsigned Line,
          StringRef GetterName, StringRef SetterName, unsigned Attributes,
          DIType *Type, StorageType Storage, bool ShouldCreate = true) {
    return getImpl(Context, getCanonicalMDString(Context, Name), File, Line,
                   getCanonicalMDString(Context, GetterName),
                   getCanonicalMDString(Context, SetterName), Attributes, Type,
                   Storage, ShouldCreate);
  }
  static DIObjCProperty *getImpl(LLVMContext &Context, MDString *Name,
                                 Metadata *File, unsigned Line,
                                 MDString *GetterName, MDString *SetterName,
                                 unsigned Attributes, Metadata *Type,
                                 StorageType Storage, bool ShouldCreate = true);

  TempDIObjCProperty cloneImpl() const {
    return getTemporary(getContext(), getName(), getFile(), getLine(),
                        getGetterName(), getSetterName(), getAttributes(),
                        getType());
  }

public:
  DEFINE_MDNODE_GET(DIObjCProperty,
                    (StringRef Name, DIFile *File, unsigned Line,
                     StringRef GetterName, StringRef SetterName,
                     unsigned Attributes, DIType *Type),
                    (Name, File, Line, GetterName, SetterName, Attributes,
                     Type))
  DEFINE_MDNODE_GET(DIObjCProperty,
                    (MDString * Name, Metadata *File, unsigned Line,
                     MDString *GetterName, MDString *SetterName,
                     unsigned Attributes, Metadata *Type),
                    (Name, File, Line, GetterName, SetterName, Attributes,
                     Type))

  TempDIObjCProperty clone() const { return cloneImpl(); }

  unsigned getLine() const { return Line; }
  unsigned getAttributes() const { return Attributes; }
  StringRef getName() const { return getStringOperand(0); }
  DIFile *getFile() const { return cast_or_null<DIFile>(getRawFile()); }
  StringRef getGetterName() const { return getStringOperand(2); }
  StringRef getSetterName() const { return getStringOperand(3); }
  DIType *getType() const { return cast_or_null<DIType>(getRawType()); }

  StringRef getFilename() const {
    if (auto *F = getFile())
      return F->getFilename();
    return "";
  }

  StringRef getDirectory() const {
    if (auto *F = getFile())
      return F->getDirectory();
    return "";
  }

  MDString *getRawName() const { return getOperandAs<MDString>(0); }
  Metadata *getRawFile() const { return getOperand(1); }
  MDString *getRawGetterName() const { return getOperandAs<MDString>(2); }
  MDString *getRawSetterName() const { return getOperandAs<MDString>(3); }
  Metadata *getRawType() const { return getOperand(4); }

  static bool classof(const Metadata *MD) {
    return MD->getMetadataID() == DIObjCPropertyKind;
  }
};

/// An imported module (C++ using directive or similar).
class DIImportedEntity : public DINode {
  friend class LLVMContextImpl;
  friend class MDNode;

  unsigned Line;

  DIImportedEntity(LLVMContext &C, StorageType Storage, unsigned Tag,
                   unsigned Line, ArrayRef<Metadata *> Ops)
      : DINode(C, DIImportedEntityKind, Storage, Tag, Ops), Line(Line) {}
  ~DIImportedEntity() = default;

  static DIImportedEntity *getImpl(LLVMContext &Context, unsigned Tag,
                                   DIScope *Scope, DINode *Entity, DIFile *File,
                                   unsigned Line, StringRef Name,
                                   DINodeArray Elements, StorageType Storage,
                                   bool ShouldCreate = true) {
    return getImpl(Context, Tag, Scope, Entity, File, Line,
                   getCanonicalMDString(Context, Name), Elements.get(), Storage,
                   ShouldCreate);
  }
  static DIImportedEntity *
  getImpl(LLVMContext &Context, unsigned Tag, Metadata *Scope, Metadata *Entity,
          Metadata *File, unsigned Line, MDString *Name, Metadata *Elements,
          StorageType Storage, bool ShouldCreate = true);

  TempDIImportedEntity cloneImpl() const {
    return getTemporary(getContext(), getTag(), getScope(), getEntity(),
                        getFile(), getLine(), getName(), getElements());
  }

public:
  DEFINE_MDNODE_GET(DIImportedEntity,
                    (unsigned Tag, DIScope *Scope, DINode *Entity, DIFile *File,
                     unsigned Line, StringRef Name = "",
                     DINodeArray Elements = nullptr),
                    (Tag, Scope, Entity, File, Line, Name, Elements))
  DEFINE_MDNODE_GET(DIImportedEntity,
                    (unsigned Tag, Metadata *Scope, Metadata *Entity,
                     Metadata *File, unsigned Line, MDString *Name,
                     Metadata *Elements = nullptr),
                    (Tag, Scope, Entity, File, Line, Name, Elements))

  TempDIImportedEntity clone() const { return cloneImpl(); }

  unsigned getLine() const { return Line; }
  DIScope *getScope() const { return cast_or_null<DIScope>(getRawScope()); }
  DINode *getEntity() const { return cast_or_null<DINode>(getRawEntity()); }
  StringRef getName() const { return getStringOperand(2); }
  DIFile *getFile() const { return cast_or_null<DIFile>(getRawFile()); }
  DINodeArray getElements() const {
    return cast_or_null<MDTuple>(getRawElements());
  }

  Metadata *getRawScope() const { return getOperand(0); }
  Metadata *getRawEntity() const { return getOperand(1); }
  MDString *getRawName() const { return getOperandAs<MDString>(2); }
  Metadata *getRawFile() const { return getOperand(3); }
  Metadata *getRawElements() const { return getOperand(4); }

  static bool classof(const Metadata *MD) {
    return MD->getMetadataID() == DIImportedEntityKind;
  }
};

/// A pair of DIGlobalVariable and DIExpression.
class DIGlobalVariableExpression : public MDNode {
  friend class LLVMContextImpl;
  friend class MDNode;

  DIGlobalVariableExpression(LLVMContext &C, StorageType Storage,
                             ArrayRef<Metadata *> Ops)
      : MDNode(C, DIGlobalVariableExpressionKind, Storage, Ops) {}
  ~DIGlobalVariableExpression() = default;

  static DIGlobalVariableExpression *
  getImpl(LLVMContext &Context, Metadata *Variable, Metadata *Expression,
          StorageType Storage, bool ShouldCreate = true);

  TempDIGlobalVariableExpression cloneImpl() const {
    return getTemporary(getContext(), getVariable(), getExpression());
  }

public:
  DEFINE_MDNODE_GET(DIGlobalVariableExpression,
                    (Metadata * Variable, Metadata *Expression),
                    (Variable, Expression))

  TempDIGlobalVariableExpression clone() const { return cloneImpl(); }

  Metadata *getRawVariable() const { return getOperand(0); }

  DIGlobalVariable *getVariable() const {
    return cast_or_null<DIGlobalVariable>(getRawVariable());
  }

  Metadata *getRawExpression() const { return getOperand(1); }

  DIExpression *getExpression() const {
    return cast<DIExpression>(getRawExpression());
  }

  static bool classof(const Metadata *MD) {
    return MD->getMetadataID() == DIGlobalVariableExpressionKind;
  }
};

/// Macro Info DWARF-like metadata node.
///
/// A metadata node with a DWARF macro info (i.e., a constant named
/// \c DW_MACINFO_*, defined in llvm/BinaryFormat/Dwarf.h).  Called \a
/// DIMacroNode
/// because it's potentially used for non-DWARF output.
class DIMacroNode : public MDNode {
  friend class LLVMContextImpl;
  friend class MDNode;

protected:
  DIMacroNode(LLVMContext &C, unsigned ID, StorageType Storage, unsigned MIType,
              ArrayRef<Metadata *> Ops1,
              ArrayRef<Metadata *> Ops2 = std::nullopt)
      : MDNode(C, ID, Storage, Ops1, Ops2) {
    assert(MIType < 1u << 16);
    SubclassData16 = MIType;
  }
  ~DIMacroNode() = default;

  template <class Ty> Ty *getOperandAs(unsigned I) const {
    return cast_or_null<Ty>(getOperand(I));
  }

  StringRef getStringOperand(unsigned I) const {
    if (auto *S = getOperandAs<MDString>(I))
      return S->getString();
    return StringRef();
  }

  static MDString *getCanonicalMDString(LLVMContext &Context, StringRef S) {
    if (S.empty())
      return nullptr;
    return MDString::get(Context, S);
  }

public:
  unsigned getMacinfoType() const { return SubclassData16; }

  static bool classof(const Metadata *MD) {
    switch (MD->getMetadataID()) {
    default:
      return false;
    case DIMacroKind:
    case DIMacroFileKind:
      return true;
    }
  }
};

class DIMacro : public DIMacroNode {
  friend class LLVMContextImpl;
  friend class MDNode;

  unsigned Line;

  DIMacro(LLVMContext &C, StorageType Storage, unsigned MIType, unsigned Line,
          ArrayRef<Metadata *> Ops)
      : DIMacroNode(C, DIMacroKind, Storage, MIType, Ops), Line(Line) {}
  ~DIMacro() = default;

  static DIMacro *getImpl(LLVMContext &Context, unsigned MIType, unsigned Line,
                          StringRef Name, StringRef Value, StorageType Storage,
                          bool ShouldCreate = true) {
    return getImpl(Context, MIType, Line, getCanonicalMDString(Context, Name),
                   getCanonicalMDString(Context, Value), Storage, ShouldCreate);
  }
  static DIMacro *getImpl(LLVMContext &Context, unsigned MIType, unsigned Line,
                          MDString *Name, MDString *Value, StorageType Storage,
                          bool ShouldCreate = true);

  TempDIMacro cloneImpl() const {
    return getTemporary(getContext(), getMacinfoType(), getLine(), getName(),
                        getValue());
  }

public:
  DEFINE_MDNODE_GET(DIMacro,
                    (unsigned MIType, unsigned Line, StringRef Name,
                     StringRef Value = ""),
                    (MIType, Line, Name, Value))
  DEFINE_MDNODE_GET(DIMacro,
                    (unsigned MIType, unsigned Line, MDString *Name,
                     MDString *Value),
                    (MIType, Line, Name, Value))

  TempDIMacro clone() const { return cloneImpl(); }

  unsigned getLine() const { return Line; }

  StringRef getName() const { return getStringOperand(0); }
  StringRef getValue() const { return getStringOperand(1); }

  MDString *getRawName() const { return getOperandAs<MDString>(0); }
  MDString *getRawValue() const { return getOperandAs<MDString>(1); }

  static bool classof(const Metadata *MD) {
    return MD->getMetadataID() == DIMacroKind;
  }
};

class DIMacroFile : public DIMacroNode {
  friend class LLVMContextImpl;
  friend class MDNode;

  unsigned Line;

  DIMacroFile(LLVMContext &C, StorageType Storage, unsigned MIType,
              unsigned Line, ArrayRef<Metadata *> Ops)
      : DIMacroNode(C, DIMacroFileKind, Storage, MIType, Ops), Line(Line) {}
  ~DIMacroFile() = default;

  static DIMacroFile *getImpl(LLVMContext &Context, unsigned MIType,
                              unsigned Line, DIFile *File,
                              DIMacroNodeArray Elements, StorageType Storage,
                              bool ShouldCreate = true) {
    return getImpl(Context, MIType, Line, static_cast<Metadata *>(File),
                   Elements.get(), Storage, ShouldCreate);
  }

  static DIMacroFile *getImpl(LLVMContext &Context, unsigned MIType,
                              unsigned Line, Metadata *File, Metadata *Elements,
                              StorageType Storage, bool ShouldCreate = true);

  TempDIMacroFile cloneImpl() const {
    return getTemporary(getContext(), getMacinfoType(), getLine(), getFile(),
                        getElements());
  }

public:
  DEFINE_MDNODE_GET(DIMacroFile,
                    (unsigned MIType, unsigned Line, DIFile *File,
                     DIMacroNodeArray Elements),
                    (MIType, Line, File, Elements))
  DEFINE_MDNODE_GET(DIMacroFile,
                    (unsigned MIType, unsigned Line, Metadata *File,
                     Metadata *Elements),
                    (MIType, Line, File, Elements))

  TempDIMacroFile clone() const { return cloneImpl(); }

  void replaceElements(DIMacroNodeArray Elements) {
#ifndef NDEBUG
    for (DIMacroNode *Op : getElements())
      assert(is_contained(Elements->operands(), Op) &&
             "Lost a macro node during macro node list replacement");
#endif
    replaceOperandWith(1, Elements.get());
  }

  unsigned getLine() const { return Line; }
  DIFile *getFile() const { return cast_or_null<DIFile>(getRawFile()); }

  DIMacroNodeArray getElements() const {
    return cast_or_null<MDTuple>(getRawElements());
  }

  Metadata *getRawFile() const { return getOperand(0); }
  Metadata *getRawElements() const { return getOperand(1); }

  static bool classof(const Metadata *MD) {
    return MD->getMetadataID() == DIMacroFileKind;
  }
};

/// List of ValueAsMetadata, to be used as an argument to a dbg.value
/// intrinsic.
class DIArgList : public MDNode {
  friend class LLVMContextImpl;
  friend class MDNode;
  using iterator = SmallVectorImpl<ValueAsMetadata *>::iterator;

  SmallVector<ValueAsMetadata *, 4> Args;

  DIArgList(LLVMContext &C, StorageType Storage,
            ArrayRef<ValueAsMetadata *> Args)
      : MDNode(C, DIArgListKind, Storage, std::nullopt),
        Args(Args.begin(), Args.end()) {
    track();
  }
  ~DIArgList() { untrack(); }

  static DIArgList *getImpl(LLVMContext &Context,
                            ArrayRef<ValueAsMetadata *> Args,
                            StorageType Storage, bool ShouldCreate = true);

  TempDIArgList cloneImpl() const {
    return getTemporary(getContext(), getArgs());
  }

  void track();
  void untrack();
  void dropAllReferences();

public:
  DEFINE_MDNODE_GET(DIArgList, (ArrayRef<ValueAsMetadata *> Args), (Args))

  TempDIArgList clone() const { return cloneImpl(); }

  ArrayRef<ValueAsMetadata *> getArgs() const { return Args; }

  iterator args_begin() { return Args.begin(); }
  iterator args_end() { return Args.end(); }

  static bool classof(const Metadata *MD) {
    return MD->getMetadataID() == DIArgListKind;
  }

  void handleChangedOperand(void *Ref, Metadata *New);
};

/// Identifies a unique instance of a variable.
///
/// Storage for identifying a potentially inlined instance of a variable,
/// or a fragment thereof. This guarantees that exactly one variable instance
/// may be identified by this class, even when that variable is a fragment of
/// an aggregate variable and/or there is another inlined instance of the same
/// source code variable nearby.
/// This class does not necessarily uniquely identify that variable: it is
/// possible that a DebugVariable with different parameters may point to the
/// same variable instance, but not that one DebugVariable points to multiple
/// variable instances.
class DebugVariable {
  using FragmentInfo = DIExpression::FragmentInfo;

  const DILocalVariable *Variable;
  std::optional<FragmentInfo> Fragment;
  const DILocation *InlinedAt;

  /// Fragment that will overlap all other fragments. Used as default when
  /// caller demands a fragment.
  static const FragmentInfo DefaultFragment;

public:
  DebugVariable(const DbgVariableIntrinsic *DII);

  DebugVariable(const DILocalVariable *Var,
                std::optional<FragmentInfo> FragmentInfo,
                const DILocation *InlinedAt)
      : Variable(Var), Fragment(FragmentInfo), InlinedAt(InlinedAt) {}

  DebugVariable(const DILocalVariable *Var, const DIExpression *DIExpr,
                const DILocation *InlinedAt)
      : Variable(Var),
        Fragment(DIExpr ? DIExpr->getFragmentInfo() : std::nullopt),
        InlinedAt(InlinedAt) {}

  const DILocalVariable *getVariable() const { return Variable; }
  std::optional<FragmentInfo> getFragment() const { return Fragment; }
  const DILocation *getInlinedAt() const { return InlinedAt; }

  FragmentInfo getFragmentOrDefault() const {
    return Fragment.value_or(DefaultFragment);
  }

  static bool isDefaultFragment(const FragmentInfo F) {
    return F == DefaultFragment;
  }

  bool operator==(const DebugVariable &Other) const {
    return std::tie(Variable, Fragment, InlinedAt) ==
           std::tie(Other.Variable, Other.Fragment, Other.InlinedAt);
  }

  bool operator<(const DebugVariable &Other) const {
    return std::tie(Variable, Fragment, InlinedAt) <
           std::tie(Other.Variable, Other.Fragment, Other.InlinedAt);
  }
};

template <> struct DenseMapInfo<DebugVariable> {
  using FragmentInfo = DIExpression::FragmentInfo;

  /// Empty key: no key should be generated that has no DILocalVariable.
  static inline DebugVariable getEmptyKey() {
    return DebugVariable(nullptr, std::nullopt, nullptr);
  }

  /// Difference in tombstone is that the Optional is meaningful.
  static inline DebugVariable getTombstoneKey() {
    return DebugVariable(nullptr, {{0, 0}}, nullptr);
  }

  static unsigned getHashValue(const DebugVariable &D) {
    unsigned HV = 0;
    const std::optional<FragmentInfo> Fragment = D.getFragment();
    if (Fragment)
      HV = DenseMapInfo<FragmentInfo>::getHashValue(*Fragment);

    return hash_combine(D.getVariable(), HV, D.getInlinedAt());
  }

  static bool isEqual(const DebugVariable &A, const DebugVariable &B) {
    return A == B;
  }
};

/// Identifies a unique instance of a whole variable (discards/ignores fragment
/// information).
class DebugVariableAggregate : public DebugVariable {
public:
  DebugVariableAggregate(const DbgVariableIntrinsic *DVI);
  DebugVariableAggregate(const DebugVariable &V)
      : DebugVariable(V.getVariable(), std::nullopt, V.getInlinedAt()) {}
};

template <>
struct DenseMapInfo<DebugVariableAggregate>
    : public DenseMapInfo<DebugVariable> {};
} // end namespace llvm

#undef DEFINE_MDNODE_GET_UNPACK_IMPL
#undef DEFINE_MDNODE_GET_UNPACK
#undef DEFINE_MDNODE_GET

#endif // LLVM_IR_DEBUGINFOMETADATA_H
PKjwFZ�ep""IR/GlobalIFunc.hnu�[���//===-------- llvm/GlobalIFunc.h - GlobalIFunc class ------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file contains the declaration of the GlobalIFunc class, which
/// represents a single indirect function in the IR. Indirect function uses
/// ELF symbol type extension to mark that the address of a declaration should
/// be resolved at runtime by calling a resolver function.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_GLOBALIFUNC_H
#define LLVM_IR_GLOBALIFUNC_H

#include "llvm/ADT/ilist_node.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/GlobalObject.h"
#include "llvm/IR/OperandTraits.h"
#include "llvm/IR/Value.h"

namespace llvm {

class Twine;
class Module;

// Traits class for using GlobalIFunc in symbol table in Module.
template <typename ValueSubClass> class SymbolTableListTraits;

class GlobalIFunc final : public GlobalObject, public ilist_node<GlobalIFunc> {
  friend class SymbolTableListTraits<GlobalIFunc>;

  GlobalIFunc(Type *Ty, unsigned AddressSpace, LinkageTypes Linkage,
              const Twine &Name, Constant *Resolver, Module *Parent);

public:
  GlobalIFunc(const GlobalIFunc &) = delete;
  GlobalIFunc &operator=(const GlobalIFunc &) = delete;

  /// If a parent module is specified, the ifunc is automatically inserted into
  /// the end of the specified module's ifunc list.
  static GlobalIFunc *create(Type *Ty, unsigned AddressSpace,
                             LinkageTypes Linkage, const Twine &Name,
                             Constant *Resolver, Module *Parent);

  // allocate space for exactly one operand
  void *operator new(size_t S) { return User::operator new(S, 1); }
  void operator delete(void *Ptr) { User::operator delete(Ptr); }

  /// Provide fast operand accessors
  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Constant);

  void copyAttributesFrom(const GlobalIFunc *Src) {
    GlobalObject::copyAttributesFrom(Src);
  }

  /// This method unlinks 'this' from the containing module, but does not
  /// delete it.
  void removeFromParent();

  /// This method unlinks 'this' from the containing module and deletes it.
  void eraseFromParent();

  /// These methods retrieve and set ifunc resolver function.
  void setResolver(Constant *Resolver) { Op<0>().set(Resolver); }
  const Constant *getResolver() const {
    return static_cast<Constant *>(Op<0>().get());
  }
  Constant *getResolver() { return static_cast<Constant *>(Op<0>().get()); }

  // Return the resolver function after peeling off potential ConstantExpr
  // indirection.
  const Function *getResolverFunction() const;
  Function *getResolverFunction() {
    return const_cast<Function *>(
        static_cast<const GlobalIFunc *>(this)->getResolverFunction());
  }

  static FunctionType *getResolverFunctionType(Type *IFuncValTy) {
    return FunctionType::get(IFuncValTy->getPointerTo(), false);
  }

  static bool isValidLinkage(LinkageTypes L) {
    return isExternalLinkage(L) || isLocalLinkage(L) || isWeakLinkage(L) ||
           isLinkOnceLinkage(L);
  }

  // Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Value *V) {
    return V->getValueID() == Value::GlobalIFuncVal;
  }

  // Apply specific operation to all resolver-related values. If resolver target
  // is already a global object, then apply the operation to it directly. If
  // target is a GlobalExpr or a GlobalAlias, evaluate it to its base object and
  // apply the operation for the base object and all aliases along the path.
  void applyAlongResolverPath(function_ref<void(const GlobalValue &)> Op) const;
};

template <>
struct OperandTraits<GlobalIFunc>
    : public FixedNumOperandTraits<GlobalIFunc, 1> {};

DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GlobalIFunc, Constant)

} // end namespace llvm

#endif // LLVM_IR_GLOBALIFUNC_H
PKjwFZIc��IR/GetElementPtrTypeIterator.hnu�[���//===- GetElementPtrTypeIterator.h ------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements an iterator for walking through the types indexed by
// getelementptr instructions.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_GETELEMENTPTRTYPEITERATOR_H
#define LLVM_IR_GETELEMENTPTRTYPEITERATOR_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/PointerUnion.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/User.h"
#include "llvm/Support/Casting.h"
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <iterator>

namespace llvm {

template <typename ItTy = User::const_op_iterator>
class generic_gep_type_iterator {

  ItTy OpIt;
  PointerUnion<StructType *, Type *> CurTy;

  generic_gep_type_iterator() = default;

public:
  using iterator_category = std::forward_iterator_tag;
  using value_type = Type *;
  using difference_type = std::ptrdiff_t;
  using pointer = value_type *;
  using reference = value_type &;

  static generic_gep_type_iterator begin(Type *Ty, ItTy It) {
    generic_gep_type_iterator I;
    I.CurTy = Ty;
    I.OpIt = It;
    return I;
  }

  static generic_gep_type_iterator end(ItTy It) {
    generic_gep_type_iterator I;
    I.OpIt = It;
    return I;
  }

  bool operator==(const generic_gep_type_iterator &x) const {
    return OpIt == x.OpIt;
  }

  bool operator!=(const generic_gep_type_iterator &x) const {
    return !operator==(x);
  }

  // FIXME: Make this the iterator's operator*() after the 4.0 release.
  // operator*() had a different meaning in earlier releases, so we're
  // temporarily not giving this iterator an operator*() to avoid a subtle
  // semantics break.
  Type *getIndexedType() const {
    if (auto *T = dyn_cast_if_present<Type *>(CurTy))
      return T;
    return cast<StructType *>(CurTy)->getTypeAtIndex(getOperand());
  }

  Value *getOperand() const { return const_cast<Value *>(&**OpIt); }

  generic_gep_type_iterator &operator++() { // Preincrement
    Type *Ty = getIndexedType();
    if (auto *ATy = dyn_cast<ArrayType>(Ty))
      CurTy = ATy->getElementType();
    else if (auto *VTy = dyn_cast<VectorType>(Ty))
      CurTy = VTy->getElementType();
    else
      CurTy = dyn_cast<StructType>(Ty);
    ++OpIt;
    return *this;
  }

  generic_gep_type_iterator operator++(int) { // Postincrement
    generic_gep_type_iterator tmp = *this;
    ++*this;
    return tmp;
  }

  // All of the below API is for querying properties of the "outer type", i.e.
  // the type that contains the indexed type. Most of the time this is just
  // the type that was visited immediately prior to the indexed type, but for
  // the first element this is an unbounded array of the GEP's source element
  // type, for which there is no clearly corresponding IR type (we've
  // historically used a pointer type as the outer type in this case, but
  // pointers will soon lose their element type).
  //
  // FIXME: Most current users of this class are just interested in byte
  // offsets (a few need to know whether the outer type is a struct because
  // they are trying to replace a constant with a variable, which is only
  // legal for arrays, e.g. canReplaceOperandWithVariable in SimplifyCFG.cpp);
  // we should provide a more minimal API here that exposes not much more than
  // that.

  bool isStruct() const { return isa<StructType *>(CurTy); }
  bool isSequential() const { return isa<Type *>(CurTy); }

  StructType *getStructType() const { return cast<StructType *>(CurTy); }

  StructType *getStructTypeOrNull() const {
    return dyn_cast_if_present<StructType *>(CurTy);
  }
};

  using gep_type_iterator = generic_gep_type_iterator<>;

  inline gep_type_iterator gep_type_begin(const User *GEP) {
    auto *GEPOp = cast<GEPOperator>(GEP);
    return gep_type_iterator::begin(
        GEPOp->getSourceElementType(),
        GEP->op_begin() + 1);
  }

  inline gep_type_iterator gep_type_end(const User *GEP) {
    return gep_type_iterator::end(GEP->op_end());
  }

  inline gep_type_iterator gep_type_begin(const User &GEP) {
    auto &GEPOp = cast<GEPOperator>(GEP);
    return gep_type_iterator::begin(
        GEPOp.getSourceElementType(),
        GEP.op_begin() + 1);
  }

  inline gep_type_iterator gep_type_end(const User &GEP) {
    return gep_type_iterator::end(GEP.op_end());
  }

  template<typename T>
  inline generic_gep_type_iterator<const T *>
  gep_type_begin(Type *Op0, ArrayRef<T> A) {
    return generic_gep_type_iterator<const T *>::begin(Op0, A.begin());
  }

  template<typename T>
  inline generic_gep_type_iterator<const T *>
  gep_type_end(Type * /*Op0*/, ArrayRef<T> A) {
    return generic_gep_type_iterator<const T *>::end(A.end());
  }

} // end namespace llvm

#endif // LLVM_IR_GETELEMENTPTRTYPEITERATOR_H
PKjwFZ��i�CC
IR/FPEnv.hnu�[���//===- FPEnv.h ---- FP Environment ------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// @file
/// This file contains the declarations of entities that describe floating
/// point environment and related functions.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_FPENV_H
#define LLVM_IR_FPENV_H

#include "llvm/ADT/FloatingPointMode.h"
#include "llvm/IR/FMF.h"
#include <optional>

namespace llvm {
class StringRef;

namespace Intrinsic {
typedef unsigned ID;
}

class Instruction;

namespace fp {

/// Exception behavior used for floating point operations.
///
/// Each of these values correspond to some metadata argument value of a
/// constrained floating point intrinsic. See the LLVM Language Reference Manual
/// for details.
enum ExceptionBehavior : uint8_t {
  ebIgnore,  ///< This corresponds to "fpexcept.ignore".
  ebMayTrap, ///< This corresponds to "fpexcept.maytrap".
  ebStrict   ///< This corresponds to "fpexcept.strict".
};

}

/// Returns a valid RoundingMode enumerator when given a string
/// that is valid as input in constrained intrinsic rounding mode
/// metadata.
std::optional<RoundingMode> convertStrToRoundingMode(StringRef);

/// For any RoundingMode enumerator, returns a string valid as input in
/// constrained intrinsic rounding mode metadata.
std::optional<StringRef> convertRoundingModeToStr(RoundingMode);

/// Returns a valid ExceptionBehavior enumerator when given a string
/// valid as input in constrained intrinsic exception behavior metadata.
std::optional<fp::ExceptionBehavior> convertStrToExceptionBehavior(StringRef);

/// For any ExceptionBehavior enumerator, returns a string valid as
/// input in constrained intrinsic exception behavior metadata.
std::optional<StringRef> convertExceptionBehaviorToStr(fp::ExceptionBehavior);

/// Returns true if the exception handling behavior and rounding mode
/// match what is used in the default floating point environment.
inline bool isDefaultFPEnvironment(fp::ExceptionBehavior EB, RoundingMode RM) {
  return EB == fp::ebIgnore && RM == RoundingMode::NearestTiesToEven;
}

/// Returns constrained intrinsic id to represent the given instruction in
/// strictfp function. If the instruction is already a constrained intrinsic or
/// does not have a constrained intrinsic counterpart, the function returns
/// zero.
Intrinsic::ID getConstrainedIntrinsicID(const Instruction &Instr);

/// Returns true if the rounding mode RM may be QRM at compile time or
/// at run time.
inline bool canRoundingModeBe(RoundingMode RM, RoundingMode QRM) {
  return RM == QRM || RM == RoundingMode::Dynamic;
}

/// Returns true if the possibility of a signaling NaN can be safely
/// ignored.
inline bool canIgnoreSNaN(fp::ExceptionBehavior EB, FastMathFlags FMF) {
  return (EB == fp::ebIgnore || FMF.noNaNs());
}
}
#endif
PKjwFZ�;�//IR/SymbolTableListTraits.hnu�[���//===- llvm/SymbolTableListTraits.h - Traits for iplist ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines a generic class that is used to implement the automatic
// symbol table manipulation that occurs when you put (for example) a named
// instruction into a basic block.
//
// The way that this is implemented is by using a special traits class with the
// intrusive list that makes up the list of instructions in a basic block.  When
// a new element is added to the list of instructions, the traits class is
// notified, allowing the symbol table to be updated.
//
// This generic class implements the traits class.  It must be generic so that
// it can work for all uses it, which include lists of instructions, basic
// blocks, arguments, functions, global variables, etc...
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_SYMBOLTABLELISTTRAITS_H
#define LLVM_IR_SYMBOLTABLELISTTRAITS_H

#include "llvm/ADT/ilist.h"
#include "llvm/ADT/simple_ilist.h"
#include <cstddef>

namespace llvm {

class Argument;
class BasicBlock;
class Function;
class GlobalAlias;
class GlobalIFunc;
class GlobalVariable;
class Instruction;
class Module;
class ValueSymbolTable;

/// Template metafunction to get the parent type for a symbol table list.
///
/// Implementations create a typedef called \c type so that we only need a
/// single template parameter for the list and traits.
template <typename NodeTy> struct SymbolTableListParentType {};

#define DEFINE_SYMBOL_TABLE_PARENT_TYPE(NODE, PARENT)                          \
  template <> struct SymbolTableListParentType<NODE> { using type = PARENT; };
DEFINE_SYMBOL_TABLE_PARENT_TYPE(Instruction, BasicBlock)
DEFINE_SYMBOL_TABLE_PARENT_TYPE(BasicBlock, Function)
DEFINE_SYMBOL_TABLE_PARENT_TYPE(Argument, Function)
DEFINE_SYMBOL_TABLE_PARENT_TYPE(Function, Module)
DEFINE_SYMBOL_TABLE_PARENT_TYPE(GlobalVariable, Module)
DEFINE_SYMBOL_TABLE_PARENT_TYPE(GlobalAlias, Module)
DEFINE_SYMBOL_TABLE_PARENT_TYPE(GlobalIFunc, Module)
#undef DEFINE_SYMBOL_TABLE_PARENT_TYPE

template <typename NodeTy> class SymbolTableList;

// ValueSubClass   - The type of objects that I hold, e.g. Instruction.
// ItemParentClass - The type of object that owns the list, e.g. BasicBlock.
//
template <typename ValueSubClass>
class SymbolTableListTraits : public ilist_alloc_traits<ValueSubClass> {
  using ListTy = SymbolTableList<ValueSubClass>;
  using iterator = typename simple_ilist<ValueSubClass>::iterator;
  using ItemParentClass =
      typename SymbolTableListParentType<ValueSubClass>::type;

public:
  SymbolTableListTraits() = default;

private:
  /// getListOwner - Return the object that owns this list.  If this is a list
  /// of instructions, it returns the BasicBlock that owns them.
  ItemParentClass *getListOwner() {
    size_t Offset = reinterpret_cast<size_t>(
        &((ItemParentClass *)nullptr->*ItemParentClass::getSublistAccess(
                                           static_cast<ValueSubClass *>(
                                               nullptr))));
    ListTy *Anchor = static_cast<ListTy *>(this);
    return reinterpret_cast<ItemParentClass*>(reinterpret_cast<char*>(Anchor)-
                                              Offset);
  }

  static ListTy &getList(ItemParentClass *Par) {
    return Par->*(Par->getSublistAccess((ValueSubClass*)nullptr));
  }

  static ValueSymbolTable *getSymTab(ItemParentClass *Par) {
    return Par ? toPtr(Par->getValueSymbolTable()) : nullptr;
  }

public:
  void addNodeToList(ValueSubClass *V);
  void removeNodeFromList(ValueSubClass *V);
  void transferNodesFromList(SymbolTableListTraits &L2, iterator first,
                             iterator last);
  // private:
  template<typename TPtr>
  void setSymTabObject(TPtr *, TPtr);
  static ValueSymbolTable *toPtr(ValueSymbolTable *P) { return P; }
  static ValueSymbolTable *toPtr(ValueSymbolTable &R) { return &R; }
};

/// List that automatically updates parent links and symbol tables.
///
/// When nodes are inserted into and removed from this list, the associated
/// symbol table will be automatically updated.  Similarly, parent links get
/// updated automatically.
template <class T>
class SymbolTableList
    : public iplist_impl<simple_ilist<T>, SymbolTableListTraits<T>> {};

} // end namespace llvm

#endif // LLVM_IR_SYMBOLTABLELISTTRAITS_H
PKjwFZVIR/ModuleSlotTracker.hnu�[���//===-- llvm/IR/ModuleSlotTracker.h -----------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_MODULESLOTTRACKER_H
#define LLVM_IR_MODULESLOTTRACKER_H

#include <functional>
#include <memory>
#include <utility>
#include <vector>

namespace llvm {

class Module;
class Function;
class SlotTracker;
class Value;
class MDNode;

/// Abstract interface of slot tracker storage.
class AbstractSlotTrackerStorage {
public:
  virtual ~AbstractSlotTrackerStorage();

  virtual unsigned getNextMetadataSlot() = 0;

  virtual void createMetadataSlot(const MDNode *) = 0;
  virtual int getMetadataSlot(const MDNode *) = 0;
};

/// Manage lifetime of a slot tracker for printing IR.
///
/// Wrapper around the \a SlotTracker used internally by \a AsmWriter.  This
/// class allows callers to share the cost of incorporating the metadata in a
/// module or a function.
///
/// If the IR changes from underneath \a ModuleSlotTracker, strings like
/// "<badref>" will be printed, or, worse, the wrong slots entirely.
class ModuleSlotTracker {
  /// Storage for a slot tracker.
  std::unique_ptr<SlotTracker> MachineStorage;
  bool ShouldCreateStorage = false;
  bool ShouldInitializeAllMetadata = false;

  const Module *M = nullptr;
  const Function *F = nullptr;
  SlotTracker *Machine = nullptr;

  std::function<void(AbstractSlotTrackerStorage *, const Module *, bool)>
      ProcessModuleHookFn;
  std::function<void(AbstractSlotTrackerStorage *, const Function *, bool)>
      ProcessFunctionHookFn;

public:
  /// Wrap a preinitialized SlotTracker.
  ModuleSlotTracker(SlotTracker &Machine, const Module *M,
                    const Function *F = nullptr);

  /// Construct a slot tracker from a module.
  ///
  /// If \a M is \c nullptr, uses a null slot tracker.  Otherwise, initializes
  /// a slot tracker, and initializes all metadata slots.  \c
  /// ShouldInitializeAllMetadata defaults to true because this is expected to
  /// be shared between multiple callers, and otherwise MDNode references will
  /// not match up.
  explicit ModuleSlotTracker(const Module *M,
                             bool ShouldInitializeAllMetadata = true);

  /// Destructor to clean up storage.
  virtual ~ModuleSlotTracker();

  /// Lazily creates a slot tracker.
  SlotTracker *getMachine();

  const Module *getModule() const { return M; }
  const Function *getCurrentFunction() const { return F; }

  /// Incorporate the given function.
  ///
  /// Purge the currently incorporated function and incorporate \c F.  If \c F
  /// is currently incorporated, this is a no-op.
  void incorporateFunction(const Function &F);

  /// Return the slot number of the specified local value.
  ///
  /// A function that defines this value should be incorporated prior to calling
  /// this method.
  /// Return -1 if the value is not in the function's SlotTracker.
  int getLocalSlot(const Value *V);

  void setProcessHook(
      std::function<void(AbstractSlotTrackerStorage *, const Module *, bool)>);
  void setProcessHook(std::function<void(AbstractSlotTrackerStorage *,
                                         const Function *, bool)>);

  using MachineMDNodeListType =
      std::vector<std::pair<unsigned, const MDNode *>>;

  void collectMDNodes(MachineMDNodeListType &L, unsigned LB, unsigned UB) const;
};

} // end namespace llvm

#endif
PKjwFZ��^Ŵ�IR/UseListOrder.hnu�[���//===- llvm/IR/UseListOrder.h - LLVM Use List Order -------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file has structures and command-line options for preserving use-list
// order.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_USELISTORDER_H
#define LLVM_IR_USELISTORDER_H

#include <cstddef>
#include <vector>

namespace llvm {

class Function;
class Value;

/// Structure to hold a use-list order.
struct UseListOrder {
  const Value *V = nullptr;
  const Function *F = nullptr;
  std::vector<unsigned> Shuffle;

  UseListOrder(const Value *V, const Function *F, size_t ShuffleSize)
      : V(V), F(F), Shuffle(ShuffleSize) {}

  UseListOrder() = default;
  UseListOrder(UseListOrder &&) = default;
  UseListOrder &operator=(UseListOrder &&) = default;
};

using UseListOrderStack = std::vector<UseListOrder>;

} // end namespace llvm

#endif // LLVM_IR_USELISTORDER_H
PKjwFZ1ņ*IR/ValueSymbolTable.hnu�[���//===- llvm/ValueSymbolTable.h - Implement a Value Symtab -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements the name/Value symbol table for LLVM.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_VALUESYMBOLTABLE_H
#define LLVM_IR_VALUESYMBOLTABLE_H

#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/IR/Value.h"
#include <cstdint>

namespace llvm {

class Argument;
class BasicBlock;
class Function;
class GlobalAlias;
class GlobalIFunc;
class GlobalVariable;
class Instruction;
template <unsigned InternalLen> class SmallString;
template <typename ValueSubClass> class SymbolTableListTraits;

/// This class provides a symbol table of name/value pairs. It is essentially
/// a std::map<std::string,Value*> but has a controlled interface provided by
/// LLVM as well as ensuring uniqueness of names.
///
class ValueSymbolTable {
  friend class SymbolTableListTraits<Argument>;
  friend class SymbolTableListTraits<BasicBlock>;
  friend class SymbolTableListTraits<Function>;
  friend class SymbolTableListTraits<GlobalAlias>;
  friend class SymbolTableListTraits<GlobalIFunc>;
  friend class SymbolTableListTraits<GlobalVariable>;
  friend class SymbolTableListTraits<Instruction>;
  friend class Value;

/// @name Types
/// @{
public:
  /// A mapping of names to values.
  using ValueMap = StringMap<Value*>;

  /// An iterator over a ValueMap.
  using iterator = ValueMap::iterator;

  /// A const_iterator over a ValueMap.
  using const_iterator = ValueMap::const_iterator;

/// @}
/// @name Constructors
/// @{

  ValueSymbolTable(int MaxNameSize = -1) : vmap(0), MaxNameSize(MaxNameSize) {}
  ~ValueSymbolTable();

  /// @}
  /// @name Accessors
  /// @{

  /// This method finds the value with the given \p Name in the
  /// the symbol table.
  /// @returns the value associated with the \p Name
  /// Lookup a named Value.
  Value *lookup(StringRef Name) const {
    if (MaxNameSize > -1 && Name.size() > (unsigned)MaxNameSize)
      Name = Name.substr(0, std::max(1u, (unsigned)MaxNameSize));

    return vmap.lookup(Name);
  }

  /// @returns true iff the symbol table is empty
  /// Determine if the symbol table is empty
  inline bool empty() const { return vmap.empty(); }

  /// The number of name/type pairs is returned.
  inline unsigned size() const { return unsigned(vmap.size()); }

  /// This function can be used from the debugger to display the
  /// content of the symbol table while debugging.
  /// Print out symbol table on stderr
  void dump() const;

/// @}
/// @name Iteration
/// @{

  /// Get an iterator that from the beginning of the symbol table.
  inline iterator begin() { return vmap.begin(); }

  /// Get a const_iterator that from the beginning of the symbol table.
  inline const_iterator begin() const { return vmap.begin(); }

  /// Get an iterator to the end of the symbol table.
  inline iterator end() { return vmap.end(); }

  /// Get a const_iterator to the end of the symbol table.
  inline const_iterator end() const { return vmap.end(); }

  /// @}
  /// @name Mutators
  /// @{
private:
  ValueName *makeUniqueName(Value *V, SmallString<256> &UniqueName);

  /// This method adds the provided value \p N to the symbol table.  The Value
  /// must have a name which is used to place the value in the symbol table.
  /// If the inserted name conflicts, this renames the value.
  /// Add a named value to the symbol table
  void reinsertValue(Value *V);

  /// createValueName - This method attempts to create a value name and insert
  /// it into the symbol table with the specified name.  If it conflicts, it
  /// auto-renames the name and returns that instead.
  ValueName *createValueName(StringRef Name, Value *V);

  /// This method removes a value from the symbol table.  It leaves the
  /// ValueName attached to the value, but it is no longer inserted in the
  /// symtab.
  void removeValueName(ValueName *V);

  /// @}
  /// @name Internal Data
  /// @{

  ValueMap vmap;                    ///< The map that holds the symbol table.
  int MaxNameSize; ///< The maximum size for each name. If the limit is
                   ///< exceeded, the name is capped.
  mutable uint32_t LastUnique = 0;  ///< Counter for tracking unique names

/// @}
};

} // end namespace llvm

#endif // LLVM_IR_VALUESYMBOLTABLE_H
PKjwFZz2֧�3�3IR/PassManagerInternal.hnu�[���//===- PassManager internal APIs and implementation details -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This header provides internal APIs and implementation details used by the
/// pass management interfaces exposed in PassManager.h. To understand more
/// context of why these particular interfaces are needed, see that header
/// file. None of these APIs should be used elsewhere.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_PASSMANAGERINTERNAL_H
#define LLVM_IR_PASSMANAGERINTERNAL_H

#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/raw_ostream.h"
#include <memory>
#include <utility>

namespace llvm {

template <typename IRUnitT> class AllAnalysesOn;
template <typename IRUnitT, typename... ExtraArgTs> class AnalysisManager;
class PreservedAnalyses;

// Implementation details of the pass manager interfaces.
namespace detail {

/// Template for the abstract base class used to dispatch
/// polymorphically over pass objects.
template <typename IRUnitT, typename AnalysisManagerT, typename... ExtraArgTs>
struct PassConcept {
  // Boiler plate necessary for the container of derived classes.
  virtual ~PassConcept() = default;

  /// The polymorphic API which runs the pass over a given IR entity.
  ///
  /// Note that actual pass object can omit the analysis manager argument if
  /// desired. Also that the analysis manager may be null if there is no
  /// analysis manager in the pass pipeline.
  virtual PreservedAnalyses run(IRUnitT &IR, AnalysisManagerT &AM,
                                ExtraArgTs... ExtraArgs) = 0;

  virtual void
  printPipeline(raw_ostream &OS,
                function_ref<StringRef(StringRef)> MapClassName2PassName) = 0;
  /// Polymorphic method to access the name of a pass.
  virtual StringRef name() const = 0;

  /// Polymorphic method to to let a pass optionally exempted from skipping by
  /// PassInstrumentation.
  /// To opt-in, pass should implement `static bool isRequired()`. It's no-op
  /// to have `isRequired` always return false since that is the default.
  virtual bool isRequired() const = 0;
};

/// A template wrapper used to implement the polymorphic API.
///
/// Can be instantiated for any object which provides a \c run method accepting
/// an \c IRUnitT& and an \c AnalysisManager<IRUnit>&. It requires the pass to
/// be a copyable object.
template <typename IRUnitT, typename PassT, typename PreservedAnalysesT,
          typename AnalysisManagerT, typename... ExtraArgTs>
struct PassModel : PassConcept<IRUnitT, AnalysisManagerT, ExtraArgTs...> {
  explicit PassModel(PassT Pass) : Pass(std::move(Pass)) {}
  // We have to explicitly define all the special member functions because MSVC
  // refuses to generate them.
  PassModel(const PassModel &Arg) : Pass(Arg.Pass) {}
  PassModel(PassModel &&Arg) : Pass(std::move(Arg.Pass)) {}

  friend void swap(PassModel &LHS, PassModel &RHS) {
    using std::swap;
    swap(LHS.Pass, RHS.Pass);
  }

  PassModel &operator=(PassModel RHS) {
    swap(*this, RHS);
    return *this;
  }

  PreservedAnalysesT run(IRUnitT &IR, AnalysisManagerT &AM,
                         ExtraArgTs... ExtraArgs) override {
    return Pass.run(IR, AM, ExtraArgs...);
  }

  void printPipeline(
      raw_ostream &OS,
      function_ref<StringRef(StringRef)> MapClassName2PassName) override {
    Pass.printPipeline(OS, MapClassName2PassName);
  }

  StringRef name() const override { return PassT::name(); }

  template <typename T>
  using has_required_t = decltype(std::declval<T &>().isRequired());

  template <typename T>
  static std::enable_if_t<is_detected<has_required_t, T>::value, bool>
  passIsRequiredImpl() {
    return T::isRequired();
  }
  template <typename T>
  static std::enable_if_t<!is_detected<has_required_t, T>::value, bool>
  passIsRequiredImpl() {
    return false;
  }

  bool isRequired() const override { return passIsRequiredImpl<PassT>(); }

  PassT Pass;
};

/// Abstract concept of an analysis result.
///
/// This concept is parameterized over the IR unit that this result pertains
/// to.
template <typename IRUnitT, typename PreservedAnalysesT, typename InvalidatorT>
struct AnalysisResultConcept {
  virtual ~AnalysisResultConcept() = default;

  /// Method to try and mark a result as invalid.
  ///
  /// When the outer analysis manager detects a change in some underlying
  /// unit of the IR, it will call this method on all of the results cached.
  ///
  /// \p PA is a set of preserved analyses which can be used to avoid
  /// invalidation because the pass which changed the underlying IR took care
  /// to update or preserve the analysis result in some way.
  ///
  /// \p Inv is typically a \c AnalysisManager::Invalidator object that can be
  /// used by a particular analysis result to discover if other analyses
  /// results are also invalidated in the event that this result depends on
  /// them. See the documentation in the \c AnalysisManager for more details.
  ///
  /// \returns true if the result is indeed invalid (the default).
  virtual bool invalidate(IRUnitT &IR, const PreservedAnalysesT &PA,
                          InvalidatorT &Inv) = 0;
};

/// SFINAE metafunction for computing whether \c ResultT provides an
/// \c invalidate member function.
template <typename IRUnitT, typename ResultT> class ResultHasInvalidateMethod {
  using EnabledType = char;
  struct DisabledType {
    char a, b;
  };

  // Purely to help out MSVC which fails to disable the below specialization,
  // explicitly enable using the result type's invalidate routine if we can
  // successfully call that routine.
  template <typename T> struct Nonce { using Type = EnabledType; };
  template <typename T>
  static typename Nonce<decltype(std::declval<T>().invalidate(
      std::declval<IRUnitT &>(), std::declval<PreservedAnalyses>()))>::Type
      check(rank<2>);

  // First we define an overload that can only be taken if there is no
  // invalidate member. We do this by taking the address of an invalidate
  // member in an adjacent base class of a derived class. This would be
  // ambiguous if there were an invalidate member in the result type.
  template <typename T, typename U> static DisabledType NonceFunction(T U::*);
  struct CheckerBase { int invalidate; };
  template <typename T> struct Checker : CheckerBase, T {};
  template <typename T>
  static decltype(NonceFunction(&Checker<T>::invalidate)) check(rank<1>);

  // Now we have the fallback that will only be reached when there is an
  // invalidate member, and enables the trait.
  template <typename T>
  static EnabledType check(rank<0>);

public:
  enum { Value = sizeof(check<ResultT>(rank<2>())) == sizeof(EnabledType) };
};

/// Wrapper to model the analysis result concept.
///
/// By default, this will implement the invalidate method with a trivial
/// implementation so that the actual analysis result doesn't need to provide
/// an invalidation handler. It is only selected when the invalidation handler
/// is not part of the ResultT's interface.
template <typename IRUnitT, typename PassT, typename ResultT,
          typename PreservedAnalysesT, typename InvalidatorT,
          bool HasInvalidateHandler =
              ResultHasInvalidateMethod<IRUnitT, ResultT>::Value>
struct AnalysisResultModel;

/// Specialization of \c AnalysisResultModel which provides the default
/// invalidate functionality.
template <typename IRUnitT, typename PassT, typename ResultT,
          typename PreservedAnalysesT, typename InvalidatorT>
struct AnalysisResultModel<IRUnitT, PassT, ResultT, PreservedAnalysesT,
                           InvalidatorT, false>
    : AnalysisResultConcept<IRUnitT, PreservedAnalysesT, InvalidatorT> {
  explicit AnalysisResultModel(ResultT Result) : Result(std::move(Result)) {}
  // We have to explicitly define all the special member functions because MSVC
  // refuses to generate them.
  AnalysisResultModel(const AnalysisResultModel &Arg) : Result(Arg.Result) {}
  AnalysisResultModel(AnalysisResultModel &&Arg)
      : Result(std::move(Arg.Result)) {}

  friend void swap(AnalysisResultModel &LHS, AnalysisResultModel &RHS) {
    using std::swap;
    swap(LHS.Result, RHS.Result);
  }

  AnalysisResultModel &operator=(AnalysisResultModel RHS) {
    swap(*this, RHS);
    return *this;
  }

  /// The model bases invalidation solely on being in the preserved set.
  //
  // FIXME: We should actually use two different concepts for analysis results
  // rather than two different models, and avoid the indirect function call for
  // ones that use the trivial behavior.
  bool invalidate(IRUnitT &, const PreservedAnalysesT &PA,
                  InvalidatorT &) override {
    auto PAC = PA.template getChecker<PassT>();
    return !PAC.preserved() &&
           !PAC.template preservedSet<AllAnalysesOn<IRUnitT>>();
  }

  ResultT Result;
};

/// Specialization of \c AnalysisResultModel which delegates invalidate
/// handling to \c ResultT.
template <typename IRUnitT, typename PassT, typename ResultT,
          typename PreservedAnalysesT, typename InvalidatorT>
struct AnalysisResultModel<IRUnitT, PassT, ResultT, PreservedAnalysesT,
                           InvalidatorT, true>
    : AnalysisResultConcept<IRUnitT, PreservedAnalysesT, InvalidatorT> {
  explicit AnalysisResultModel(ResultT Result) : Result(std::move(Result)) {}
  // We have to explicitly define all the special member functions because MSVC
  // refuses to generate them.
  AnalysisResultModel(const AnalysisResultModel &Arg) : Result(Arg.Result) {}
  AnalysisResultModel(AnalysisResultModel &&Arg)
      : Result(std::move(Arg.Result)) {}

  friend void swap(AnalysisResultModel &LHS, AnalysisResultModel &RHS) {
    using std::swap;
    swap(LHS.Result, RHS.Result);
  }

  AnalysisResultModel &operator=(AnalysisResultModel RHS) {
    swap(*this, RHS);
    return *this;
  }

  /// The model delegates to the \c ResultT method.
  bool invalidate(IRUnitT &IR, const PreservedAnalysesT &PA,
                  InvalidatorT &Inv) override {
    return Result.invalidate(IR, PA, Inv);
  }

  ResultT Result;
};

/// Abstract concept of an analysis pass.
///
/// This concept is parameterized over the IR unit that it can run over and
/// produce an analysis result.
template <typename IRUnitT, typename PreservedAnalysesT, typename InvalidatorT,
          typename... ExtraArgTs>
struct AnalysisPassConcept {
  virtual ~AnalysisPassConcept() = default;

  /// Method to run this analysis over a unit of IR.
  /// \returns A unique_ptr to the analysis result object to be queried by
  /// users.
  virtual std::unique_ptr<
      AnalysisResultConcept<IRUnitT, PreservedAnalysesT, InvalidatorT>>
  run(IRUnitT &IR, AnalysisManager<IRUnitT, ExtraArgTs...> &AM,
      ExtraArgTs... ExtraArgs) = 0;

  /// Polymorphic method to access the name of a pass.
  virtual StringRef name() const = 0;
};

/// Wrapper to model the analysis pass concept.
///
/// Can wrap any type which implements a suitable \c run method. The method
/// must accept an \c IRUnitT& and an \c AnalysisManager<IRUnitT>& as arguments
/// and produce an object which can be wrapped in a \c AnalysisResultModel.
template <typename IRUnitT, typename PassT, typename PreservedAnalysesT,
          typename InvalidatorT, typename... ExtraArgTs>
struct AnalysisPassModel : AnalysisPassConcept<IRUnitT, PreservedAnalysesT,
                                               InvalidatorT, ExtraArgTs...> {
  explicit AnalysisPassModel(PassT Pass) : Pass(std::move(Pass)) {}
  // We have to explicitly define all the special member functions because MSVC
  // refuses to generate them.
  AnalysisPassModel(const AnalysisPassModel &Arg) : Pass(Arg.Pass) {}
  AnalysisPassModel(AnalysisPassModel &&Arg) : Pass(std::move(Arg.Pass)) {}

  friend void swap(AnalysisPassModel &LHS, AnalysisPassModel &RHS) {
    using std::swap;
    swap(LHS.Pass, RHS.Pass);
  }

  AnalysisPassModel &operator=(AnalysisPassModel RHS) {
    swap(*this, RHS);
    return *this;
  }

  // FIXME: Replace PassT::Result with type traits when we use C++11.
  using ResultModelT =
      AnalysisResultModel<IRUnitT, PassT, typename PassT::Result,
                          PreservedAnalysesT, InvalidatorT>;

  /// The model delegates to the \c PassT::run method.
  ///
  /// The return is wrapped in an \c AnalysisResultModel.
  std::unique_ptr<
      AnalysisResultConcept<IRUnitT, PreservedAnalysesT, InvalidatorT>>
  run(IRUnitT &IR, AnalysisManager<IRUnitT, ExtraArgTs...> &AM,
      ExtraArgTs... ExtraArgs) override {
    return std::make_unique<ResultModelT>(
        Pass.run(IR, AM, std::forward<ExtraArgTs>(ExtraArgs)...));
  }

  /// The model delegates to a static \c PassT::name method.
  ///
  /// The returned string ref must point to constant immutable data!
  StringRef name() const override { return PassT::name(); }

  PassT Pass;
};

} // end namespace detail

} // end namespace llvm

#endif // LLVM_IR_PASSMANAGERINTERNAL_H
PKjwFZV~A@ndndIR/DataLayout.hnu�[���//===- llvm/DataLayout.h - Data size & alignment info -----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines layout properties related to datatype size/offset/alignment
// information.  It uses lazy annotations to cache information about how
// structure types are laid out and used.
//
// This structure should be created once, filled in if the defaults are not
// correct and then passed around by const&.  None of the members functions
// require modification to the object.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_DATALAYOUT_H
#define LLVM_IR_DATALAYOUT_H

#include "llvm/ADT/APInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Type.h"
#include "llvm/Support/Alignment.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/TrailingObjects.h"
#include "llvm/Support/TypeSize.h"
#include <cassert>
#include <cstdint>
#include <string>

// This needs to be outside of the namespace, to avoid conflict with llvm-c
// decl.
using LLVMTargetDataRef = struct LLVMOpaqueTargetData *;

namespace llvm {

class GlobalVariable;
class LLVMContext;
class Module;
class StructLayout;
class Triple;
class Value;

/// Enum used to categorize the alignment types stored by LayoutAlignElem
enum AlignTypeEnum {
  INTEGER_ALIGN = 'i',
  VECTOR_ALIGN = 'v',
  FLOAT_ALIGN = 'f',
  AGGREGATE_ALIGN = 'a'
};

// FIXME: Currently the DataLayout string carries a "preferred alignment"
// for types. As the DataLayout is module/global, this should likely be
// sunk down to an FTTI element that is queried rather than a global
// preference.

/// Layout alignment element.
///
/// Stores the alignment data associated with a given type bit width.
///
/// \note The unusual order of elements in the structure attempts to reduce
/// padding and make the structure slightly more cache friendly.
struct LayoutAlignElem {
  uint32_t TypeBitWidth;
  Align ABIAlign;
  Align PrefAlign;

  static LayoutAlignElem get(Align ABIAlign, Align PrefAlign,
                             uint32_t BitWidth);

  bool operator==(const LayoutAlignElem &rhs) const;
};

/// Layout pointer alignment element.
///
/// Stores the alignment data associated with a given pointer and address space.
///
/// \note The unusual order of elements in the structure attempts to reduce
/// padding and make the structure slightly more cache friendly.
struct PointerAlignElem {
  Align ABIAlign;
  Align PrefAlign;
  uint32_t TypeBitWidth;
  uint32_t AddressSpace;
  uint32_t IndexBitWidth;

  /// Initializer
  static PointerAlignElem getInBits(uint32_t AddressSpace, Align ABIAlign,
                                    Align PrefAlign, uint32_t TypeBitWidth,
                                    uint32_t IndexBitWidth);

  bool operator==(const PointerAlignElem &rhs) const;
};

/// A parsed version of the target data layout string in and methods for
/// querying it.
///
/// The target data layout string is specified *by the target* - a frontend
/// generating LLVM IR is required to generate the right target data for the
/// target being codegen'd to.
class DataLayout {
public:
  enum class FunctionPtrAlignType {
    /// The function pointer alignment is independent of the function alignment.
    Independent,
    /// The function pointer alignment is a multiple of the function alignment.
    MultipleOfFunctionAlign,
  };
private:
  /// Defaults to false.
  bool BigEndian;

  unsigned AllocaAddrSpace;
  MaybeAlign StackNaturalAlign;
  unsigned ProgramAddrSpace;
  unsigned DefaultGlobalsAddrSpace;

  MaybeAlign FunctionPtrAlign;
  FunctionPtrAlignType TheFunctionPtrAlignType;

  enum ManglingModeT {
    MM_None,
    MM_ELF,
    MM_MachO,
    MM_WinCOFF,
    MM_WinCOFFX86,
    MM_GOFF,
    MM_Mips,
    MM_XCOFF
  };
  ManglingModeT ManglingMode;

  SmallVector<unsigned char, 8> LegalIntWidths;

  /// Primitive type alignment data. This is sorted by type and bit
  /// width during construction.
  using AlignmentsTy = SmallVector<LayoutAlignElem, 4>;
  AlignmentsTy IntAlignments;
  AlignmentsTy FloatAlignments;
  AlignmentsTy VectorAlignments;
  LayoutAlignElem StructAlignment;

  /// The string representation used to create this DataLayout
  std::string StringRepresentation;

  using PointersTy = SmallVector<PointerAlignElem, 8>;
  PointersTy Pointers;

  const PointerAlignElem &getPointerAlignElem(uint32_t AddressSpace) const;

  // The StructType -> StructLayout map.
  mutable void *LayoutMap = nullptr;

  /// Pointers in these address spaces are non-integral, and don't have a
  /// well-defined bitwise representation.
  SmallVector<unsigned, 8> NonIntegralAddressSpaces;

  /// Attempts to set the alignment of the given type. Returns an error
  /// description on failure.
  Error setAlignment(AlignTypeEnum AlignType, Align ABIAlign, Align PrefAlign,
                     uint32_t BitWidth);

  /// Attempts to set the alignment of a pointer in the given address space.
  /// Returns an error description on failure.
  Error setPointerAlignmentInBits(uint32_t AddrSpace, Align ABIAlign,
                                  Align PrefAlign, uint32_t TypeBitWidth,
                                  uint32_t IndexBitWidth);

  /// Internal helper to get alignment for integer of given bitwidth.
  Align getIntegerAlignment(uint32_t BitWidth, bool abi_or_pref) const;

  /// Internal helper method that returns requested alignment for type.
  Align getAlignment(Type *Ty, bool abi_or_pref) const;

  /// Attempts to parse a target data specification string and reports an error
  /// if the string is malformed.
  Error parseSpecifier(StringRef Desc);

  // Free all internal data structures.
  void clear();

public:
  /// Constructs a DataLayout from a specification string. See reset().
  explicit DataLayout(StringRef LayoutDescription) {
    reset(LayoutDescription);
  }

  /// Initialize target data from properties stored in the module.
  explicit DataLayout(const Module *M);

  DataLayout(const DataLayout &DL) { *this = DL; }

  ~DataLayout(); // Not virtual, do not subclass this class

  DataLayout &operator=(const DataLayout &DL) {
    clear();
    StringRepresentation = DL.StringRepresentation;
    BigEndian = DL.isBigEndian();
    AllocaAddrSpace = DL.AllocaAddrSpace;
    StackNaturalAlign = DL.StackNaturalAlign;
    FunctionPtrAlign = DL.FunctionPtrAlign;
    TheFunctionPtrAlignType = DL.TheFunctionPtrAlignType;
    ProgramAddrSpace = DL.ProgramAddrSpace;
    DefaultGlobalsAddrSpace = DL.DefaultGlobalsAddrSpace;
    ManglingMode = DL.ManglingMode;
    LegalIntWidths = DL.LegalIntWidths;
    IntAlignments = DL.IntAlignments;
    FloatAlignments = DL.FloatAlignments;
    VectorAlignments = DL.VectorAlignments;
    StructAlignment = DL.StructAlignment;
    Pointers = DL.Pointers;
    NonIntegralAddressSpaces = DL.NonIntegralAddressSpaces;
    return *this;
  }

  bool operator==(const DataLayout &Other) const;
  bool operator!=(const DataLayout &Other) const { return !(*this == Other); }

  void init(const Module *M);

  /// Parse a data layout string (with fallback to default values).
  void reset(StringRef LayoutDescription);

  /// Parse a data layout string and return the layout. Return an error
  /// description on failure.
  static Expected<DataLayout> parse(StringRef LayoutDescription);

  /// Layout endianness...
  bool isLittleEndian() const { return !BigEndian; }
  bool isBigEndian() const { return BigEndian; }

  /// Returns the string representation of the DataLayout.
  ///
  /// This representation is in the same format accepted by the string
  /// constructor above. This should not be used to compare two DataLayout as
  /// different string can represent the same layout.
  const std::string &getStringRepresentation() const {
    return StringRepresentation;
  }

  /// Test if the DataLayout was constructed from an empty string.
  bool isDefault() const { return StringRepresentation.empty(); }

  /// Returns true if the specified type is known to be a native integer
  /// type supported by the CPU.
  ///
  /// For example, i64 is not native on most 32-bit CPUs and i37 is not native
  /// on any known one. This returns false if the integer width is not legal.
  ///
  /// The width is specified in bits.
  bool isLegalInteger(uint64_t Width) const {
    return llvm::is_contained(LegalIntWidths, Width);
  }

  bool isIllegalInteger(uint64_t Width) const { return !isLegalInteger(Width); }

  /// Returns true if the given alignment exceeds the natural stack alignment.
  bool exceedsNaturalStackAlignment(Align Alignment) const {
    return StackNaturalAlign && (Alignment > *StackNaturalAlign);
  }

  Align getStackAlignment() const {
    assert(StackNaturalAlign && "StackNaturalAlign must be defined");
    return *StackNaturalAlign;
  }

  unsigned getAllocaAddrSpace() const { return AllocaAddrSpace; }

  /// Returns the alignment of function pointers, which may or may not be
  /// related to the alignment of functions.
  /// \see getFunctionPtrAlignType
  MaybeAlign getFunctionPtrAlign() const { return FunctionPtrAlign; }

  /// Return the type of function pointer alignment.
  /// \see getFunctionPtrAlign
  FunctionPtrAlignType getFunctionPtrAlignType() const {
    return TheFunctionPtrAlignType;
  }

  unsigned getProgramAddressSpace() const { return ProgramAddrSpace; }
  unsigned getDefaultGlobalsAddressSpace() const {
    return DefaultGlobalsAddrSpace;
  }

  bool hasMicrosoftFastStdCallMangling() const {
    return ManglingMode == MM_WinCOFFX86;
  }

  /// Returns true if symbols with leading question marks should not receive IR
  /// mangling. True for Windows mangling modes.
  bool doNotMangleLeadingQuestionMark() const {
    return ManglingMode == MM_WinCOFF || ManglingMode == MM_WinCOFFX86;
  }

  bool hasLinkerPrivateGlobalPrefix() const { return ManglingMode == MM_MachO; }

  StringRef getLinkerPrivateGlobalPrefix() const {
    if (ManglingMode == MM_MachO)
      return "l";
    return "";
  }

  char getGlobalPrefix() const {
    switch (ManglingMode) {
    case MM_None:
    case MM_ELF:
    case MM_GOFF:
    case MM_Mips:
    case MM_WinCOFF:
    case MM_XCOFF:
      return '\0';
    case MM_MachO:
    case MM_WinCOFFX86:
      return '_';
    }
    llvm_unreachable("invalid mangling mode");
  }

  StringRef getPrivateGlobalPrefix() const {
    switch (ManglingMode) {
    case MM_None:
      return "";
    case MM_ELF:
    case MM_WinCOFF:
      return ".L";
    case MM_GOFF:
      return "@";
    case MM_Mips:
      return "$";
    case MM_MachO:
    case MM_WinCOFFX86:
      return "L";
    case MM_XCOFF:
      return "L..";
    }
    llvm_unreachable("invalid mangling mode");
  }

  static const char *getManglingComponent(const Triple &T);

  /// Returns true if the specified type fits in a native integer type
  /// supported by the CPU.
  ///
  /// For example, if the CPU only supports i32 as a native integer type, then
  /// i27 fits in a legal integer type but i45 does not.
  bool fitsInLegalInteger(unsigned Width) const {
    for (unsigned LegalIntWidth : LegalIntWidths)
      if (Width <= LegalIntWidth)
        return true;
    return false;
  }

  /// Layout pointer alignment
  Align getPointerABIAlignment(unsigned AS) const;

  /// Return target's alignment for stack-based pointers
  /// FIXME: The defaults need to be removed once all of
  /// the backends/clients are updated.
  Align getPointerPrefAlignment(unsigned AS = 0) const;

  /// Layout pointer size in bytes, rounded up to a whole
  /// number of bytes.
  /// FIXME: The defaults need to be removed once all of
  /// the backends/clients are updated.
  unsigned getPointerSize(unsigned AS = 0) const;

  /// Returns the maximum index size over all address spaces.
  unsigned getMaxIndexSize() const;

  // Index size in bytes used for address calculation,
  /// rounded up to a whole number of bytes.
  unsigned getIndexSize(unsigned AS) const;

  /// Return the address spaces containing non-integral pointers.  Pointers in
  /// this address space don't have a well-defined bitwise representation.
  ArrayRef<unsigned> getNonIntegralAddressSpaces() const {
    return NonIntegralAddressSpaces;
  }

  bool isNonIntegralAddressSpace(unsigned AddrSpace) const {
    ArrayRef<unsigned> NonIntegralSpaces = getNonIntegralAddressSpaces();
    return is_contained(NonIntegralSpaces, AddrSpace);
  }

  bool isNonIntegralPointerType(PointerType *PT) const {
    return isNonIntegralAddressSpace(PT->getAddressSpace());
  }

  bool isNonIntegralPointerType(Type *Ty) const {
    auto *PTy = dyn_cast<PointerType>(Ty);
    return PTy && isNonIntegralPointerType(PTy);
  }

  /// Layout pointer size, in bits
  /// FIXME: The defaults need to be removed once all of
  /// the backends/clients are updated.
  unsigned getPointerSizeInBits(unsigned AS = 0) const {
    return getPointerAlignElem(AS).TypeBitWidth;
  }

  /// Returns the maximum index size over all address spaces.
  unsigned getMaxIndexSizeInBits() const {
    return getMaxIndexSize() * 8;
  }

  /// Size in bits of index used for address calculation in getelementptr.
  unsigned getIndexSizeInBits(unsigned AS) const {
    return getPointerAlignElem(AS).IndexBitWidth;
  }

  /// Layout pointer size, in bits, based on the type.  If this function is
  /// called with a pointer type, then the type size of the pointer is returned.
  /// If this function is called with a vector of pointers, then the type size
  /// of the pointer is returned.  This should only be called with a pointer or
  /// vector of pointers.
  unsigned getPointerTypeSizeInBits(Type *) const;

  /// Layout size of the index used in GEP calculation.
  /// The function should be called with pointer or vector of pointers type.
  unsigned getIndexTypeSizeInBits(Type *Ty) const;

  unsigned getPointerTypeSize(Type *Ty) const {
    return getPointerTypeSizeInBits(Ty) / 8;
  }

  /// Size examples:
  ///
  /// Type        SizeInBits  StoreSizeInBits  AllocSizeInBits[*]
  /// ----        ----------  ---------------  ---------------
  ///  i1            1           8                8
  ///  i8            8           8                8
  ///  i19          19          24               32
  ///  i32          32          32               32
  ///  i100        100         104              128
  ///  i128        128         128              128
  ///  Float        32          32               32
  ///  Double       64          64               64
  ///  X86_FP80     80          80               96
  ///
  /// [*] The alloc size depends on the alignment, and thus on the target.
  ///     These values are for x86-32 linux.

  /// Returns the number of bits necessary to hold the specified type.
  ///
  /// If Ty is a scalable vector type, the scalable property will be set and
  /// the runtime size will be a positive integer multiple of the base size.
  ///
  /// For example, returns 36 for i36 and 80 for x86_fp80. The type passed must
  /// have a size (Type::isSized() must return true).
  TypeSize getTypeSizeInBits(Type *Ty) const;

  /// Returns the maximum number of bytes that may be overwritten by
  /// storing the specified type.
  ///
  /// If Ty is a scalable vector type, the scalable property will be set and
  /// the runtime size will be a positive integer multiple of the base size.
  ///
  /// For example, returns 5 for i36 and 10 for x86_fp80.
  TypeSize getTypeStoreSize(Type *Ty) const {
    TypeSize BaseSize = getTypeSizeInBits(Ty);
    return {divideCeil(BaseSize.getKnownMinValue(), 8), BaseSize.isScalable()};
  }

  /// Returns the maximum number of bits that may be overwritten by
  /// storing the specified type; always a multiple of 8.
  ///
  /// If Ty is a scalable vector type, the scalable property will be set and
  /// the runtime size will be a positive integer multiple of the base size.
  ///
  /// For example, returns 40 for i36 and 80 for x86_fp80.
  TypeSize getTypeStoreSizeInBits(Type *Ty) const {
    return 8 * getTypeStoreSize(Ty);
  }

  /// Returns true if no extra padding bits are needed when storing the
  /// specified type.
  ///
  /// For example, returns false for i19 that has a 24-bit store size.
  bool typeSizeEqualsStoreSize(Type *Ty) const {
    return getTypeSizeInBits(Ty) == getTypeStoreSizeInBits(Ty);
  }

  /// Returns the offset in bytes between successive objects of the
  /// specified type, including alignment padding.
  ///
  /// If Ty is a scalable vector type, the scalable property will be set and
  /// the runtime size will be a positive integer multiple of the base size.
  ///
  /// This is the amount that alloca reserves for this type. For example,
  /// returns 12 or 16 for x86_fp80, depending on alignment.
  TypeSize getTypeAllocSize(Type *Ty) const {
    // Round up to the next alignment boundary.
    return alignTo(getTypeStoreSize(Ty), getABITypeAlign(Ty).value());
  }

  /// Returns the offset in bits between successive objects of the
  /// specified type, including alignment padding; always a multiple of 8.
  ///
  /// If Ty is a scalable vector type, the scalable property will be set and
  /// the runtime size will be a positive integer multiple of the base size.
  ///
  /// This is the amount that alloca reserves for this type. For example,
  /// returns 96 or 128 for x86_fp80, depending on alignment.
  TypeSize getTypeAllocSizeInBits(Type *Ty) const {
    return 8 * getTypeAllocSize(Ty);
  }

  /// Returns the minimum ABI-required alignment for the specified type.
  Align getABITypeAlign(Type *Ty) const;

  /// Helper function to return `Alignment` if it's set or the result of
  /// `getABITypeAlign(Ty)`, in any case the result is a valid alignment.
  inline Align getValueOrABITypeAlignment(MaybeAlign Alignment,
                                          Type *Ty) const {
    return Alignment ? *Alignment : getABITypeAlign(Ty);
  }

  /// Returns the minimum ABI-required alignment for an integer type of
  /// the specified bitwidth.
  Align getABIIntegerTypeAlignment(unsigned BitWidth) const {
    return getIntegerAlignment(BitWidth, /* abi_or_pref */ true);
  }

  /// Returns the preferred stack/global alignment for the specified
  /// type.
  ///
  /// This is always at least as good as the ABI alignment.
  /// FIXME: Deprecate this function once migration to Align is over.
  LLVM_DEPRECATED("use getPrefTypeAlign instead", "getPrefTypeAlign")
  uint64_t getPrefTypeAlignment(Type *Ty) const;

  /// Returns the preferred stack/global alignment for the specified
  /// type.
  ///
  /// This is always at least as good as the ABI alignment.
  Align getPrefTypeAlign(Type *Ty) const;

  /// Returns an integer type with size at least as big as that of a
  /// pointer in the given address space.
  IntegerType *getIntPtrType(LLVMContext &C, unsigned AddressSpace = 0) const;

  /// Returns an integer (vector of integer) type with size at least as
  /// big as that of a pointer of the given pointer (vector of pointer) type.
  Type *getIntPtrType(Type *) const;

  /// Returns the smallest integer type with size at least as big as
  /// Width bits.
  Type *getSmallestLegalIntType(LLVMContext &C, unsigned Width = 0) const;

  /// Returns the largest legal integer type, or null if none are set.
  Type *getLargestLegalIntType(LLVMContext &C) const {
    unsigned LargestSize = getLargestLegalIntTypeSizeInBits();
    return (LargestSize == 0) ? nullptr : Type::getIntNTy(C, LargestSize);
  }

  /// Returns the size of largest legal integer type size, or 0 if none
  /// are set.
  unsigned getLargestLegalIntTypeSizeInBits() const;

  /// Returns the type of a GEP index in AddressSpace.
  /// If it was not specified explicitly, it will be the integer type of the
  /// pointer width - IntPtrType.
  IntegerType *getIndexType(LLVMContext &C, unsigned AddressSpace) const;

  /// Returns the type of a GEP index.
  /// If it was not specified explicitly, it will be the integer type of the
  /// pointer width - IntPtrType.
  Type *getIndexType(Type *PtrTy) const;

  /// Returns the offset from the beginning of the type for the specified
  /// indices.
  ///
  /// Note that this takes the element type, not the pointer type.
  /// This is used to implement getelementptr.
  int64_t getIndexedOffsetInType(Type *ElemTy, ArrayRef<Value *> Indices) const;

  /// Get GEP indices to access Offset inside ElemTy. ElemTy is updated to be
  /// the result element type and Offset to be the residual offset.
  SmallVector<APInt> getGEPIndicesForOffset(Type *&ElemTy, APInt &Offset) const;

  /// Get single GEP index to access Offset inside ElemTy. Returns std::nullopt
  /// if index cannot be computed, e.g. because the type is not an aggregate.
  /// ElemTy is updated to be the result element type and Offset to be the
  /// residual offset.
  std::optional<APInt> getGEPIndexForOffset(Type *&ElemTy, APInt &Offset) const;

  /// Returns a StructLayout object, indicating the alignment of the
  /// struct, its size, and the offsets of its fields.
  ///
  /// Note that this information is lazily cached.
  const StructLayout *getStructLayout(StructType *Ty) const;

  /// Returns the preferred alignment of the specified global.
  ///
  /// This includes an explicitly requested alignment (if the global has one).
  Align getPreferredAlign(const GlobalVariable *GV) const;
};

inline DataLayout *unwrap(LLVMTargetDataRef P) {
  return reinterpret_cast<DataLayout *>(P);
}

inline LLVMTargetDataRef wrap(const DataLayout *P) {
  return reinterpret_cast<LLVMTargetDataRef>(const_cast<DataLayout *>(P));
}

/// Used to lazily calculate structure layout information for a target machine,
/// based on the DataLayout structure.
class StructLayout final : public TrailingObjects<StructLayout, TypeSize> {
  TypeSize StructSize;
  Align StructAlignment;
  unsigned IsPadded : 1;
  unsigned NumElements : 31;

public:
  TypeSize getSizeInBytes() const { return StructSize; }

  TypeSize getSizeInBits() const { return 8 * StructSize; }

  Align getAlignment() const { return StructAlignment; }

  /// Returns whether the struct has padding or not between its fields.
  /// NB: Padding in nested element is not taken into account.
  bool hasPadding() const { return IsPadded; }

  /// Given a valid byte offset into the structure, returns the structure
  /// index that contains it.
  unsigned getElementContainingOffset(uint64_t FixedOffset) const;

  MutableArrayRef<TypeSize> getMemberOffsets() {
    return llvm::MutableArrayRef(getTrailingObjects<TypeSize>(), NumElements);
  }

  ArrayRef<TypeSize> getMemberOffsets() const {
    return llvm::ArrayRef(getTrailingObjects<TypeSize>(), NumElements);
  }

  TypeSize getElementOffset(unsigned Idx) const {
    assert(Idx < NumElements && "Invalid element idx!");
    return getMemberOffsets()[Idx];
  }

  TypeSize getElementOffsetInBits(unsigned Idx) const {
    return getElementOffset(Idx) * 8;
  }

private:
  friend class DataLayout; // Only DataLayout can create this class

  StructLayout(StructType *ST, const DataLayout &DL);

  size_t numTrailingObjects(OverloadToken<TypeSize>) const {
    return NumElements;
  }
};

// The implementation of this method is provided inline as it is particularly
// well suited to constant folding when called on a specific Type subclass.
inline TypeSize DataLayout::getTypeSizeInBits(Type *Ty) const {
  assert(Ty->isSized() && "Cannot getTypeInfo() on a type that is unsized!");
  switch (Ty->getTypeID()) {
  case Type::LabelTyID:
    return TypeSize::Fixed(getPointerSizeInBits(0));
  case Type::PointerTyID:
    return TypeSize::Fixed(getPointerSizeInBits(Ty->getPointerAddressSpace()));
  case Type::ArrayTyID: {
    ArrayType *ATy = cast<ArrayType>(Ty);
    return ATy->getNumElements() *
           getTypeAllocSizeInBits(ATy->getElementType());
  }
  case Type::StructTyID:
    // Get the layout annotation... which is lazily created on demand.
    return getStructLayout(cast<StructType>(Ty))->getSizeInBits();
  case Type::IntegerTyID:
    return TypeSize::Fixed(Ty->getIntegerBitWidth());
  case Type::HalfTyID:
  case Type::BFloatTyID:
    return TypeSize::Fixed(16);
  case Type::FloatTyID:
    return TypeSize::Fixed(32);
  case Type::DoubleTyID:
  case Type::X86_MMXTyID:
    return TypeSize::Fixed(64);
  case Type::PPC_FP128TyID:
  case Type::FP128TyID:
    return TypeSize::Fixed(128);
  case Type::X86_AMXTyID:
    return TypeSize::Fixed(8192);
  // In memory objects this is always aligned to a higher boundary, but
  // only 80 bits contain information.
  case Type::X86_FP80TyID:
    return TypeSize::Fixed(80);
  case Type::FixedVectorTyID:
  case Type::ScalableVectorTyID: {
    VectorType *VTy = cast<VectorType>(Ty);
    auto EltCnt = VTy->getElementCount();
    uint64_t MinBits = EltCnt.getKnownMinValue() *
                       getTypeSizeInBits(VTy->getElementType()).getFixedValue();
    return TypeSize(MinBits, EltCnt.isScalable());
  }
  case Type::TargetExtTyID: {
    Type *LayoutTy = cast<TargetExtType>(Ty)->getLayoutType();
    return getTypeSizeInBits(LayoutTy);
  }
  default:
    llvm_unreachable("DataLayout::getTypeSizeInBits(): Unsupported type");
  }
}

} // end namespace llvm

#endif // LLVM_IR_DATALAYOUT_H
PKjwFZ��ë�
�
IR/DiagnosticPrinter.hnu�[���//===- llvm/IR/DiagnosticPrinter.h - Diagnostic Printer ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the main interface for printer backend diagnostic.
//
// Clients of the backend diagnostics should overload this interface based
// on their needs.
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_DIAGNOSTICPRINTER_H
#define LLVM_IR_DIAGNOSTICPRINTER_H

#include <string>

namespace llvm {

// Forward declarations.
class Module;
class raw_ostream;
class SMDiagnostic;
class StringRef;
class Twine;
class Value;

/// Interface for custom diagnostic printing.
class DiagnosticPrinter {
public:
  virtual ~DiagnosticPrinter() = default;

  // Simple types.
  virtual DiagnosticPrinter &operator<<(char C) = 0;
  virtual DiagnosticPrinter &operator<<(unsigned char C) = 0;
  virtual DiagnosticPrinter &operator<<(signed char C) = 0;
  virtual DiagnosticPrinter &operator<<(StringRef Str) = 0;
  virtual DiagnosticPrinter &operator<<(const char *Str) = 0;
  virtual DiagnosticPrinter &operator<<(const std::string &Str) = 0;
  virtual DiagnosticPrinter &operator<<(unsigned long N) = 0;
  virtual DiagnosticPrinter &operator<<(long N) = 0;
  virtual DiagnosticPrinter &operator<<(unsigned long long N) = 0;
  virtual DiagnosticPrinter &operator<<(long long N) = 0;
  virtual DiagnosticPrinter &operator<<(const void *P) = 0;
  virtual DiagnosticPrinter &operator<<(unsigned int N) = 0;
  virtual DiagnosticPrinter &operator<<(int N) = 0;
  virtual DiagnosticPrinter &operator<<(double N) = 0;
  virtual DiagnosticPrinter &operator<<(const Twine &Str) = 0;

  // IR related types.
  virtual DiagnosticPrinter &operator<<(const Value &V) = 0;
  virtual DiagnosticPrinter &operator<<(const Module &M) = 0;

  // Other types.
  virtual DiagnosticPrinter &operator<<(const SMDiagnostic &Diag) = 0;
};

/// Basic diagnostic printer that uses an underlying raw_ostream.
class DiagnosticPrinterRawOStream : public DiagnosticPrinter {
protected:
  raw_ostream &Stream;

public:
  DiagnosticPrinterRawOStream(raw_ostream &Stream) : Stream(Stream) {}

  // Simple types.
  DiagnosticPrinter &operator<<(char C) override;
  DiagnosticPrinter &operator<<(unsigned char C) override;
  DiagnosticPrinter &operator<<(signed char C) override;
  DiagnosticPrinter &operator<<(StringRef Str) override;
  DiagnosticPrinter &operator<<(const char *Str) override;
  DiagnosticPrinter &operator<<(const std::string &Str) override;
  DiagnosticPrinter &operator<<(unsigned long N) override;
  DiagnosticPrinter &operator<<(long N) override;
  DiagnosticPrinter &operator<<(unsigned long long N) override;
  DiagnosticPrinter &operator<<(long long N) override;
  DiagnosticPrinter &operator<<(const void *P) override;
  DiagnosticPrinter &operator<<(unsigned int N) override;
  DiagnosticPrinter &operator<<(int N) override;
  DiagnosticPrinter &operator<<(double N) override;
  DiagnosticPrinter &operator<<(const Twine &Str) override;

  // IR related types.
  DiagnosticPrinter &operator<<(const Value &V) override;
  DiagnosticPrinter &operator<<(const Module &M) override;

  // Other types.
  DiagnosticPrinter &operator<<(const SMDiagnostic &Diag) override;
};

} // end namespace llvm

#endif // LLVM_IR_DIAGNOSTICPRINTER_H
PKjwFZ�i���IR/CycleInfo.hnu�[���//===- CycleInfo.h - Cycle Info for LLVM IR -----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This file declares the LLVM IR specialization of the GenericCycle
/// templates.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_CYCLEINFO_H
#define LLVM_IR_CYCLEINFO_H

#include "llvm/ADT/GenericCycleInfo.h"
#include "llvm/IR/SSAContext.h"

namespace llvm {

extern template class GenericCycleInfo<SSAContext>;
extern template class GenericCycle<SSAContext>;

using CycleInfo = GenericCycleInfo<SSAContext>;
using Cycle = CycleInfo::CycleT;

} // namespace llvm

#endif // LLVM_IR_CYCLEINFO_H
PKjwFZp�+LLIR/DerivedUser.hnu�[���//===- DerivedUser.h - Base for non-IR Users --------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_DERIVEDUSER_H
#define LLVM_IR_DERIVEDUSER_H

#include "llvm/IR/User.h"

namespace llvm {

class Type;
class Use;

/// Extension point for the Value hierarchy. All classes outside of lib/IR
/// that wish to inherit from User should instead inherit from DerivedUser
/// instead. Inheriting from this class is discouraged.
///
/// Generally speaking, Value is the base of a closed class hierarchy
/// that can't be extended by code outside of lib/IR. This class creates a
/// loophole that allows classes outside of lib/IR to extend User to leverage
/// its use/def list machinery.
class DerivedUser : public User {
protected:
  using  DeleteValueTy = void (*)(DerivedUser *);

private:
  friend class Value;

  DeleteValueTy DeleteValue;

public:
  DerivedUser(Type *Ty, unsigned VK, Use *U, unsigned NumOps,
              DeleteValueTy DeleteValue)
      : User(Ty, VK, U, NumOps), DeleteValue(DeleteValue) {}
};

} // end namespace llvm

#endif // LLVM_IR_DERIVEDUSER_H
PKjwFZ��sh��IR/GCStrategy.hnu�[���//===- llvm/CodeGen/GCStrategy.h - Garbage collection -----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// GCStrategy coordinates code generation algorithms and implements some itself
// in order to generate code compatible with a target code generator as
// specified in a function's 'gc' attribute. Algorithms are enabled by setting
// flags in a subclass's constructor, and some virtual methods can be
// overridden.
//
// GCStrategy is relevant for implementations using either gc.root or
// gc.statepoint based lowering strategies, but is currently focused mostly on
// options for gc.root.  This will change over time.
//
// When requested by a subclass of GCStrategy, the gc.root implementation will
// populate GCModuleInfo and GCFunctionInfo with that about each Function in
// the Module that opts in to garbage collection.  Specifically:
//
// - Safe points
//   Garbage collection is generally only possible at certain points in code.
//   GCStrategy can request that the collector insert such points:
//
//     - At and after any call to a subroutine
//     - Before returning from the current function
//     - Before backwards branches (loops)
//
// - Roots
//   When a reference to a GC-allocated object exists on the stack, it must be
//   stored in an alloca registered with llvm.gcoot.
//
// This information can used to emit the metadata tables which are required by
// the target garbage collector runtime.
//
// When used with gc.statepoint, information about safepoint and roots can be
// found in the binary StackMap section after code generation.  Safepoint
// placement is currently the responsibility of the frontend, though late
// insertion support is planned.
//
// The read and write barrier support can be used with either implementation.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_GCSTRATEGY_H
#define LLVM_IR_GCSTRATEGY_H

#include "llvm/Support/Registry.h"
#include <optional>
#include <string>

namespace llvm {

class Type;

/// GCStrategy describes a garbage collector algorithm's code generation
/// requirements, and provides overridable hooks for those needs which cannot
/// be abstractly described.  GCStrategy objects must be looked up through
/// the Function.  The objects themselves are owned by the Context and must
/// be immutable.
class GCStrategy {
private:
  friend class GCModuleInfo;

  std::string Name;

protected:
  bool UseStatepoints = false; /// Uses gc.statepoints as opposed to gc.roots,
                               /// if set, NeededSafePoints and UsesMetadata
                               /// should be left at their default values.

  bool UseRS4GC = false; /// If UseStatepoints is set, this determines whether
                         /// the RewriteStatepointsForGC pass should rewrite
                         /// this function's calls.
                         /// This should only be set if UseStatepoints is set.

  bool NeededSafePoints = false;    ///< if set, calls are inferred to be safepoints
  bool UsesMetadata = false;     ///< If set, backend must emit metadata tables.

public:
  GCStrategy();
  virtual ~GCStrategy() = default;

  /// Return the name of the GC strategy.  This is the value of the collector
  /// name string specified on functions which use this strategy.
  const std::string &getName() const { return Name; }

  /// Returns true if this strategy is expecting the use of gc.statepoints,
  /// and false otherwise.
  bool useStatepoints() const { return UseStatepoints; }

  /** @name Statepoint Specific Properties */
  ///@{

  /// If the type specified can be reliably distinguished, returns true for
  /// pointers to GC managed locations and false for pointers to non-GC
  /// managed locations.  Note a GCStrategy can always return 'std::nullopt'
  /// (i.e. an empty optional indicating it can't reliably distinguish.
  virtual std::optional<bool> isGCManagedPointer(const Type *Ty) const {
    return std::nullopt;
  }

  /// Returns true if the RewriteStatepointsForGC pass should run on functions
  /// using this GC.
  bool useRS4GC() const {
    assert((!UseRS4GC || useStatepoints()) &&
           "GC strategy has useRS4GC but not useStatepoints set");
    return UseRS4GC;
  }

  ///@}

  /// If set, appropriate metadata tables must be emitted by the back-end
  /// (assembler, JIT, or otherwise). The default stackmap information can be
  /// found in the StackMap section as described in the documentation.
  bool usesMetadata() const { return UsesMetadata; }

  /** @name GCRoot Specific Properties
   * These properties and overrides only apply to collector strategies using
   * GCRoot.
   */
  ///@{

  /// True if safe points need to be inferred on call sites
  bool needsSafePoints() const { return NeededSafePoints; }

  ///@}
};

/// Subclasses of GCStrategy are made available for use during compilation by
/// adding them to the global GCRegistry.  This can done either within the
/// LLVM source tree or via a loadable plugin.  An example registeration
/// would be:
/// static GCRegistry::Add<CustomGC> X("custom-name",
///        "my custom supper fancy gc strategy");
///
/// Note that to use a custom GCMetadataPrinter, you must also
/// register your GCMetadataPrinter subclass with the
/// GCMetadataPrinterRegistery as well.
using GCRegistry = Registry<GCStrategy>;

/// Lookup the GCStrategy object associated with the given gc name.
std::unique_ptr<GCStrategy> getGCStrategy(const StringRef Name);

} // end namespace llvm

#endif // LLVM_IR_GCSTRATEGY_H
PKjwFZ::5�3�3IR/Attributes.tdnu�[���//===- Attributes.td - Defines all LLVM attributes ---------*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines all the LLVM attributes.
//
//===----------------------------------------------------------------------===//

/// Attribute property base class.
class AttrProperty;

/// Can be used as function attribute.
def FnAttr : AttrProperty;

/// Can be used as parameter attribute.
def ParamAttr : AttrProperty;

/// Can be used as return attribute.
def RetAttr : AttrProperty;

/// Attribute base class.
class Attr<string S, list<AttrProperty> P> {
  // String representation of this attribute in the IR.
  string AttrString = S;
  list<AttrProperty> Properties = P;
}

/// Enum attribute.
class EnumAttr<string S, list<AttrProperty> P> : Attr<S, P>;

/// Int attribute.
class IntAttr<string S, list<AttrProperty> P> : Attr<S, P>;

/// Type attribute.
class TypeAttr<string S, list<AttrProperty> P> : Attr<S, P>;

/// StringBool attribute.
class StrBoolAttr<string S> : Attr<S, []>;

/// Arbitrary string attribute.
class ComplexStrAttr<string S, list<AttrProperty> P> : Attr<S, P>;

/// Target-independent enum attributes.

/// Alignment of parameter (5 bits) stored as log2 of alignment with +1 bias.
/// 0 means unaligned (different from align(1)).
def Alignment : IntAttr<"align", [ParamAttr, RetAttr]>;

/// Parameter of a function that tells us the alignment of an allocation, as in
/// aligned_alloc and aligned ::operator::new.
def AllocAlign: EnumAttr<"allocalign", [ParamAttr]>;

/// Describes behavior of an allocator function in terms of known properties.
def AllocKind: IntAttr<"allockind", [FnAttr]>;

/// Parameter is the pointer to be manipulated by the allocator function.
def AllocatedPointer : EnumAttr<"allocptr", [ParamAttr]>;

/// The result of the function is guaranteed to point to a number of bytes that
/// we can determine if we know the value of the function's arguments.
def AllocSize : IntAttr<"allocsize", [FnAttr]>;

/// inline=always.
def AlwaysInline : EnumAttr<"alwaysinline", [FnAttr]>;

/// Callee is recognized as a builtin, despite nobuiltin attribute on its
/// declaration.
def Builtin : EnumAttr<"builtin", [FnAttr]>;

/// Pass structure by value.
def ByVal : TypeAttr<"byval", [ParamAttr]>;

/// Mark in-memory ABI type.
def ByRef : TypeAttr<"byref", [ParamAttr]>;

/// Parameter or return value may not contain uninitialized or poison bits.
def NoUndef : EnumAttr<"noundef", [ParamAttr, RetAttr]>;

/// Marks function as being in a cold path.
def Cold : EnumAttr<"cold", [FnAttr]>;

/// Can only be moved to control-equivalent blocks.
def Convergent : EnumAttr<"convergent", [FnAttr]>;

/// Marks function as being in a hot path and frequently called.
def Hot: EnumAttr<"hot", [FnAttr]>;

/// Pointer is known to be dereferenceable.
def Dereferenceable : IntAttr<"dereferenceable", [ParamAttr, RetAttr]>;

/// Pointer is either null or dereferenceable.
def DereferenceableOrNull : IntAttr<"dereferenceable_or_null",
                                    [ParamAttr, RetAttr]>;

/// Do not instrument function with sanitizers.
def DisableSanitizerInstrumentation: EnumAttr<"disable_sanitizer_instrumentation", [FnAttr]>;

/// Provide pointer element type to intrinsic.
def ElementType : TypeAttr<"elementtype", [ParamAttr]>;

/// Whether to keep return instructions, or replace with a jump to an external
/// symbol.
def FnRetThunkExtern : EnumAttr<"fn_ret_thunk_extern", [FnAttr]>;

/// Pass structure in an alloca.
def InAlloca : TypeAttr<"inalloca", [ParamAttr]>;

/// Source said inlining was desirable.
def InlineHint : EnumAttr<"inlinehint", [FnAttr]>;

/// Force argument to be passed in register.
def InReg : EnumAttr<"inreg", [ParamAttr, RetAttr]>;

/// Build jump-instruction tables and replace refs.
def JumpTable : EnumAttr<"jumptable", [FnAttr]>;

/// Memory effects of the function.
def Memory : IntAttr<"memory", [FnAttr]>;

/// Forbidden floating-point classes.
def NoFPClass : IntAttr<"nofpclass", [ParamAttr, RetAttr]>;

/// Function must be optimized for size first.
def MinSize : EnumAttr<"minsize", [FnAttr]>;

/// Naked function.
def Naked : EnumAttr<"naked", [FnAttr]>;

/// Nested function static chain.
def Nest : EnumAttr<"nest", [ParamAttr]>;

/// Considered to not alias after call.
def NoAlias : EnumAttr<"noalias", [ParamAttr, RetAttr]>;

/// Callee isn't recognized as a builtin.
def NoBuiltin : EnumAttr<"nobuiltin", [FnAttr]>;

/// Function cannot enter into caller's translation unit.
def NoCallback : EnumAttr<"nocallback", [FnAttr]>;

/// Function creates no aliases of pointer.
def NoCapture : EnumAttr<"nocapture", [ParamAttr]>;

/// Call cannot be duplicated.
def NoDuplicate : EnumAttr<"noduplicate", [FnAttr]>;

/// Function does not deallocate memory.
def NoFree : EnumAttr<"nofree", [FnAttr, ParamAttr]>;

/// Disable implicit floating point insts.
def NoImplicitFloat : EnumAttr<"noimplicitfloat", [FnAttr]>;

/// inline=never.
def NoInline : EnumAttr<"noinline", [FnAttr]>;

/// Function is called early and/or often, so lazy binding isn't worthwhile.
def NonLazyBind : EnumAttr<"nonlazybind", [FnAttr]>;

/// Disable merging for specified functions or call sites.
def NoMerge : EnumAttr<"nomerge", [FnAttr]>;

/// Pointer is known to be not null.
def NonNull : EnumAttr<"nonnull", [ParamAttr, RetAttr]>;

/// The function does not recurse.
def NoRecurse : EnumAttr<"norecurse", [FnAttr]>;

/// Disable redzone.
def NoRedZone : EnumAttr<"noredzone", [FnAttr]>;

/// Mark the function as not returning.
def NoReturn : EnumAttr<"noreturn", [FnAttr]>;

/// Function does not synchronize.
def NoSync : EnumAttr<"nosync", [FnAttr]>;

/// Disable Indirect Branch Tracking.
def NoCfCheck : EnumAttr<"nocf_check", [FnAttr]>;

/// Function should not be instrumented.
def NoProfile : EnumAttr<"noprofile", [FnAttr]>;

/// This function should not be instrumented but it is ok to inline profiled
// functions into it.
def SkipProfile : EnumAttr<"skipprofile", [FnAttr]>;

/// Function doesn't unwind stack.
def NoUnwind : EnumAttr<"nounwind", [FnAttr]>;

/// No SanitizeBounds instrumentation.
def NoSanitizeBounds : EnumAttr<"nosanitize_bounds", [FnAttr]>;

/// No SanitizeCoverage instrumentation.
def NoSanitizeCoverage : EnumAttr<"nosanitize_coverage", [FnAttr]>;

/// Null pointer in address space zero is valid.
def NullPointerIsValid : EnumAttr<"null_pointer_is_valid", [FnAttr]>;

/// Select optimizations for best fuzzing signal.
def OptForFuzzing : EnumAttr<"optforfuzzing", [FnAttr]>;

/// opt_size.
def OptimizeForSize : EnumAttr<"optsize", [FnAttr]>;

/// Function must not be optimized.
def OptimizeNone : EnumAttr<"optnone", [FnAttr]>;

/// Similar to byval but without a copy.
def Preallocated : TypeAttr<"preallocated", [FnAttr, ParamAttr]>;

/// Function does not access memory.
def ReadNone : EnumAttr<"readnone", [ParamAttr]>;

/// Function only reads from memory.
def ReadOnly : EnumAttr<"readonly", [ParamAttr]>;

/// Return value is always equal to this argument.
def Returned : EnumAttr<"returned", [ParamAttr]>;

/// Parameter is required to be a trivial constant.
def ImmArg : EnumAttr<"immarg", [ParamAttr]>;

/// Function can return twice.
def ReturnsTwice : EnumAttr<"returns_twice", [FnAttr]>;

/// Safe Stack protection.
def SafeStack : EnumAttr<"safestack", [FnAttr]>;

/// Shadow Call Stack protection.
def ShadowCallStack : EnumAttr<"shadowcallstack", [FnAttr]>;

/// Sign extended before/after call.
def SExt : EnumAttr<"signext", [ParamAttr, RetAttr]>;

/// Alignment of stack for function (3 bits)  stored as log2 of alignment with
/// +1 bias 0 means unaligned (different from alignstack=(1)).
def StackAlignment : IntAttr<"alignstack", [FnAttr, ParamAttr]>;

/// Function can be speculated.
def Speculatable : EnumAttr<"speculatable", [FnAttr]>;

/// Stack protection.
def StackProtect : EnumAttr<"ssp", [FnAttr]>;

/// Stack protection required.
def StackProtectReq : EnumAttr<"sspreq", [FnAttr]>;

/// Strong Stack protection.
def StackProtectStrong : EnumAttr<"sspstrong", [FnAttr]>;

/// Function was called in a scope requiring strict floating point semantics.
def StrictFP : EnumAttr<"strictfp", [FnAttr]>;

/// Hidden pointer to structure to return.
def StructRet : TypeAttr<"sret", [ParamAttr]>;

/// AddressSanitizer is on.
def SanitizeAddress : EnumAttr<"sanitize_address", [FnAttr]>;

/// ThreadSanitizer is on.
def SanitizeThread : EnumAttr<"sanitize_thread", [FnAttr]>;

/// MemorySanitizer is on.
def SanitizeMemory : EnumAttr<"sanitize_memory", [FnAttr]>;

/// HWAddressSanitizer is on.
def SanitizeHWAddress : EnumAttr<"sanitize_hwaddress", [FnAttr]>;

/// MemTagSanitizer is on.
def SanitizeMemTag : EnumAttr<"sanitize_memtag", [FnAttr]>;

/// Speculative Load Hardening is enabled.
///
/// Note that this uses the default compatibility (always compatible during
/// inlining) and a conservative merge strategy where inlining an attributed
/// body will add the attribute to the caller. This ensures that code carrying
/// this attribute will always be lowered with hardening enabled.
def SpeculativeLoadHardening : EnumAttr<"speculative_load_hardening",
                                        [FnAttr]>;

/// Argument is swift error.
def SwiftError : EnumAttr<"swifterror", [ParamAttr]>;

/// Argument is swift self/context.
def SwiftSelf : EnumAttr<"swiftself", [ParamAttr]>;

/// Argument is swift async context.
def SwiftAsync : EnumAttr<"swiftasync", [ParamAttr]>;

/// Function must be in a unwind table.
def UWTable : IntAttr<"uwtable", [FnAttr]>;

/// Minimum/Maximum vscale value for function.
def VScaleRange : IntAttr<"vscale_range", [FnAttr]>;

/// Function always comes back to callsite.
def WillReturn : EnumAttr<"willreturn", [FnAttr]>;

/// Function only writes to memory.
def WriteOnly : EnumAttr<"writeonly", [ParamAttr]>;

/// Zero extended before/after call.
def ZExt : EnumAttr<"zeroext", [ParamAttr, RetAttr]>;

/// Function is required to make Forward Progress.
def MustProgress : EnumAttr<"mustprogress", [FnAttr]>;

/// Function is a presplit coroutine.
def PresplitCoroutine : EnumAttr<"presplitcoroutine", [FnAttr]>;

/// Target-independent string attributes.
def LessPreciseFPMAD : StrBoolAttr<"less-precise-fpmad">;
def NoInfsFPMath : StrBoolAttr<"no-infs-fp-math">;
def NoNansFPMath : StrBoolAttr<"no-nans-fp-math">;
def ApproxFuncFPMath : StrBoolAttr<"approx-func-fp-math">;
def NoSignedZerosFPMath : StrBoolAttr<"no-signed-zeros-fp-math">;
def UnsafeFPMath : StrBoolAttr<"unsafe-fp-math">;
def NoJumpTables : StrBoolAttr<"no-jump-tables">;
def NoInlineLineTables : StrBoolAttr<"no-inline-line-tables">;
def ProfileSampleAccurate : StrBoolAttr<"profile-sample-accurate">;
def UseSampleProfile : StrBoolAttr<"use-sample-profile">;

def DenormalFPMath : ComplexStrAttr<"denormal-fp-math", [FnAttr]>;
def DenormalFPMathF32 : ComplexStrAttr<"denormal-fp-math-f32", [FnAttr]>;

class CompatRule<string F> {
  // The name of the function called to check the attribute of the caller and
  // callee and decide whether inlining should be allowed. The function's
  // signature must match "bool(const Function&, const Function &)", where the
  // first parameter is the reference to the caller and the second parameter is
  // the reference to the callee. It must return false if the attributes of the
  // caller and callee are incompatible, and true otherwise.
  string CompatFunc = F;
}

def : CompatRule<"isEqual<SanitizeAddressAttr>">;
def : CompatRule<"isEqual<SanitizeThreadAttr>">;
def : CompatRule<"isEqual<SanitizeMemoryAttr>">;
def : CompatRule<"isEqual<SanitizeHWAddressAttr>">;
def : CompatRule<"isEqual<SanitizeMemTagAttr>">;
def : CompatRule<"isEqual<SafeStackAttr>">;
def : CompatRule<"isEqual<ShadowCallStackAttr>">;
def : CompatRule<"isEqual<UseSampleProfileAttr>">;
def : CompatRule<"isEqual<NoProfileAttr>">;
def : CompatRule<"checkDenormMode">;


class MergeRule<string F> {
  // The name of the function called to merge the attributes of the caller and
  // callee. The function's signature must match
  // "void(Function&, const Function &)", where the first parameter is the
  // reference to the caller and the second parameter is the reference to the
  // callee.
  string MergeFunc = F;
}

def : MergeRule<"setAND<LessPreciseFPMADAttr>">;
def : MergeRule<"setAND<NoInfsFPMathAttr>">;
def : MergeRule<"setAND<NoNansFPMathAttr>">;
def : MergeRule<"setAND<ApproxFuncFPMathAttr>">;
def : MergeRule<"setAND<NoSignedZerosFPMathAttr>">;
def : MergeRule<"setAND<UnsafeFPMathAttr>">;
def : MergeRule<"setOR<NoImplicitFloatAttr>">;
def : MergeRule<"setOR<NoJumpTablesAttr>">;
def : MergeRule<"setOR<ProfileSampleAccurateAttr>">;
def : MergeRule<"setOR<SpeculativeLoadHardeningAttr>">;
def : MergeRule<"adjustCallerSSPLevel">;
def : MergeRule<"adjustCallerStackProbes">;
def : MergeRule<"adjustCallerStackProbeSize">;
def : MergeRule<"adjustMinLegalVectorWidth">;
def : MergeRule<"adjustNullPointerValidAttr">;
def : MergeRule<"setAND<MustProgressAttr>">;
PKjwFZ鬞��,�,IR/IntrinsicsX86.tdnu�[���//===- IntrinsicsX86.td - Defines X86 intrinsics -----------*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines all of the X86-specific intrinsics.
//
//===----------------------------------------------------------------------===//

//===----------------------------------------------------------------------===//
// Interrupt traps
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_int : Intrinsic<[], [llvm_i8_ty], [ImmArg<ArgIndex<0>>]>;
}

//===----------------------------------------------------------------------===//
// SEH intrinsics for Windows
let TargetPrefix = "x86" in {
  def int_x86_seh_lsda : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty], [IntrNoMem]>;

  // Marks the EH registration node created in LLVM IR prior to code generation.
  def int_x86_seh_ehregnode : Intrinsic<[], [llvm_ptr_ty], []>;

  // Marks the EH guard slot node created in LLVM IR prior to code generation.
  def int_x86_seh_ehguard : Intrinsic<[], [llvm_ptr_ty], []>;
}

//===----------------------------------------------------------------------===//
// FLAGS.
let TargetPrefix = "x86" in {
  def int_x86_flags_read_u32 : ClangBuiltin<"__builtin_ia32_readeflags_u32">,
        Intrinsic<[llvm_i32_ty], [], []>;
  def int_x86_flags_read_u64 : ClangBuiltin<"__builtin_ia32_readeflags_u64">,
        Intrinsic<[llvm_i64_ty], [], []>;
  def int_x86_flags_write_u32 : ClangBuiltin<"__builtin_ia32_writeeflags_u32">,
        Intrinsic<[], [llvm_i32_ty], []>;
  def int_x86_flags_write_u64 : ClangBuiltin<"__builtin_ia32_writeeflags_u64">,
        Intrinsic<[], [llvm_i64_ty], []>;
}

//===----------------------------------------------------------------------===//
// Read Time Stamp Counter.
let TargetPrefix = "x86" in {
  def int_x86_rdtsc : ClangBuiltin<"__builtin_ia32_rdtsc">,
              Intrinsic<[llvm_i64_ty], [], []>;
  def int_x86_rdtscp :
              Intrinsic<[llvm_i64_ty, llvm_i32_ty], [], []>;
}

// Read Performance-Monitoring Counter.
let TargetPrefix = "x86" in {
  def int_x86_rdpmc : ClangBuiltin<"__builtin_ia32_rdpmc">,
              Intrinsic<[llvm_i64_ty], [llvm_i32_ty], []>;
}

// Read processor ID.
let TargetPrefix = "x86" in {
  def int_x86_rdpid : ClangBuiltin<"__builtin_ia32_rdpid">,
              Intrinsic<[llvm_i32_ty], [], []>;
}

// Lock bit test.
let TargetPrefix = "x86" in {
  def int_x86_atomic_bts : Intrinsic<[llvm_anyint_ty], [llvm_ptr_ty, llvm_i8_ty],
                                     [ImmArg<ArgIndex<1>>]>;
  def int_x86_atomic_btc : Intrinsic<[llvm_anyint_ty], [llvm_ptr_ty, llvm_i8_ty],
                                     [ImmArg<ArgIndex<1>>]>;
  def int_x86_atomic_btr : Intrinsic<[llvm_anyint_ty], [llvm_ptr_ty, llvm_i8_ty],
                                     [ImmArg<ArgIndex<1>>]>;
  def int_x86_atomic_bts_rm  : Intrinsic<[llvm_i8_ty], [llvm_ptr_ty, llvm_anyint_ty],
                                         []>;
  def int_x86_atomic_btc_rm  : Intrinsic<[llvm_i8_ty], [llvm_ptr_ty, llvm_anyint_ty],
                                         []>;
  def int_x86_atomic_btr_rm  : Intrinsic<[llvm_i8_ty], [llvm_ptr_ty, llvm_anyint_ty],
                                         []>;


}

// Lock binary arith with CC.
let TargetPrefix = "x86" in {
  def int_x86_atomic_add_cc  : Intrinsic<[llvm_i8_ty], [llvm_ptr_ty, llvm_anyint_ty, llvm_i32_ty],
                                         [ImmArg<ArgIndex<2>>]>;
  def int_x86_atomic_sub_cc  : Intrinsic<[llvm_i8_ty], [llvm_ptr_ty, llvm_anyint_ty, llvm_i32_ty],
                                         [ImmArg<ArgIndex<2>>]>;
  def int_x86_atomic_or_cc  : Intrinsic<[llvm_i8_ty], [llvm_ptr_ty, llvm_anyint_ty, llvm_i32_ty],
                                         [ImmArg<ArgIndex<2>>]>;
  def int_x86_atomic_and_cc  : Intrinsic<[llvm_i8_ty], [llvm_ptr_ty, llvm_anyint_ty, llvm_i32_ty],
                                         [ImmArg<ArgIndex<2>>]>;
  def int_x86_atomic_xor_cc  : Intrinsic<[llvm_i8_ty], [llvm_ptr_ty, llvm_anyint_ty, llvm_i32_ty],
                                         [ImmArg<ArgIndex<2>>]>;
}

// Read Processor Register.
let TargetPrefix = "x86" in {
  def int_x86_rdpru : ClangBuiltin<"__builtin_ia32_rdpru">,
              Intrinsic<[llvm_i64_ty], [llvm_i32_ty], []>;
}

//===----------------------------------------------------------------------===//
// CET SS
let TargetPrefix = "x86" in {
  def int_x86_incsspd : ClangBuiltin<"__builtin_ia32_incsspd">,
              Intrinsic<[], [llvm_i32_ty], []>;
  def int_x86_incsspq : ClangBuiltin<"__builtin_ia32_incsspq">,
              Intrinsic<[], [llvm_i64_ty], []>;
  def int_x86_rdsspd : ClangBuiltin<"__builtin_ia32_rdsspd">,
              Intrinsic<[llvm_i32_ty], [llvm_i32_ty], []>;
  def int_x86_rdsspq : ClangBuiltin<"__builtin_ia32_rdsspq">,
              Intrinsic<[llvm_i64_ty], [llvm_i64_ty], []>;
  def int_x86_saveprevssp : ClangBuiltin<"__builtin_ia32_saveprevssp">,
              Intrinsic<[], [], []>;
  def int_x86_rstorssp : ClangBuiltin<"__builtin_ia32_rstorssp">,
              Intrinsic<[], [llvm_ptr_ty], []>;
  def int_x86_wrssd : ClangBuiltin<"__builtin_ia32_wrssd">,
              Intrinsic<[], [llvm_i32_ty, llvm_ptr_ty], []>;
  def int_x86_wrssq : ClangBuiltin<"__builtin_ia32_wrssq">,
              Intrinsic<[], [llvm_i64_ty, llvm_ptr_ty], []>;
  def int_x86_wrussd : ClangBuiltin<"__builtin_ia32_wrussd">,
              Intrinsic<[], [llvm_i32_ty, llvm_ptr_ty], []>;
  def int_x86_wrussq : ClangBuiltin<"__builtin_ia32_wrussq">,
              Intrinsic<[], [llvm_i64_ty, llvm_ptr_ty], []>;
  def int_x86_setssbsy : ClangBuiltin<"__builtin_ia32_setssbsy">,
              Intrinsic<[], [], []>;
  def int_x86_clrssbsy : ClangBuiltin<"__builtin_ia32_clrssbsy">,
              Intrinsic<[], [llvm_ptr_ty], []>;
}

//===----------------------------------------------------------------------===//
// 3DNow!

let TargetPrefix = "x86" in {
  def int_x86_3dnow_pavgusb : ClangBuiltin<"__builtin_ia32_pavgusb">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem]>;
  def int_x86_3dnow_pf2id : ClangBuiltin<"__builtin_ia32_pf2id">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty], [IntrNoMem]>;
  def int_x86_3dnow_pfacc : ClangBuiltin<"__builtin_ia32_pfacc">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem]>;
  def int_x86_3dnow_pfadd : ClangBuiltin<"__builtin_ia32_pfadd">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem]>;
  def int_x86_3dnow_pfcmpeq : ClangBuiltin<"__builtin_ia32_pfcmpeq">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem]>;
  def int_x86_3dnow_pfcmpge : ClangBuiltin<"__builtin_ia32_pfcmpge">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem]>;
  def int_x86_3dnow_pfcmpgt : ClangBuiltin<"__builtin_ia32_pfcmpgt">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem]>;
  def int_x86_3dnow_pfmax : ClangBuiltin<"__builtin_ia32_pfmax">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem]>;
  def int_x86_3dnow_pfmin : ClangBuiltin<"__builtin_ia32_pfmin">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem]>;
  def int_x86_3dnow_pfmul : ClangBuiltin<"__builtin_ia32_pfmul">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem]>;
  def int_x86_3dnow_pfrcp : ClangBuiltin<"__builtin_ia32_pfrcp">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty], [IntrNoMem]>;
  def int_x86_3dnow_pfrcpit1 : ClangBuiltin<"__builtin_ia32_pfrcpit1">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem]>;
  def int_x86_3dnow_pfrcpit2 : ClangBuiltin<"__builtin_ia32_pfrcpit2">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem]>;
  def int_x86_3dnow_pfrsqrt : ClangBuiltin<"__builtin_ia32_pfrsqrt">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty], [IntrNoMem]>;
  def int_x86_3dnow_pfrsqit1 : ClangBuiltin<"__builtin_ia32_pfrsqit1">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem]>;
  def int_x86_3dnow_pfsub : ClangBuiltin<"__builtin_ia32_pfsub">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem]>;
  def int_x86_3dnow_pfsubr : ClangBuiltin<"__builtin_ia32_pfsubr">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem]>;
  def int_x86_3dnow_pi2fd : ClangBuiltin<"__builtin_ia32_pi2fd">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty], [IntrNoMem]>;
  def int_x86_3dnow_pmulhrw : ClangBuiltin<"__builtin_ia32_pmulhrw">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem]>;
}

//===----------------------------------------------------------------------===//
// 3DNow! extensions

let TargetPrefix = "x86" in {
  def int_x86_3dnowa_pf2iw : ClangBuiltin<"__builtin_ia32_pf2iw">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty], [IntrNoMem]>;
  def int_x86_3dnowa_pfnacc : ClangBuiltin<"__builtin_ia32_pfnacc">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem]>;
  def int_x86_3dnowa_pfpnacc : ClangBuiltin<"__builtin_ia32_pfpnacc">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem]>;
  def int_x86_3dnowa_pi2fw : ClangBuiltin<"__builtin_ia32_pi2fw">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty], [IntrNoMem]>;
  def int_x86_3dnowa_pswapd :
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty], [IntrNoMem]>;
}

//===----------------------------------------------------------------------===//
// SSE1

// Arithmetic ops
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_sse_rcp_ss : ClangBuiltin<"__builtin_ia32_rcpss">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty],
                            [IntrNoMem]>;
  def int_x86_sse_rcp_ps : ClangBuiltin<"__builtin_ia32_rcpps">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty],
                            [IntrNoMem]>;
  def int_x86_sse_rsqrt_ss : ClangBuiltin<"__builtin_ia32_rsqrtss">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty],
                            [IntrNoMem]>;
  def int_x86_sse_rsqrt_ps : ClangBuiltin<"__builtin_ia32_rsqrtps">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty],
                            [IntrNoMem]>;
  def int_x86_sse_min_ss : ClangBuiltin<"__builtin_ia32_minss">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
                             llvm_v4f32_ty], [IntrNoMem]>;
  def int_x86_sse_min_ps : ClangBuiltin<"__builtin_ia32_minps">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
                             llvm_v4f32_ty], [IntrNoMem]>;
  def int_x86_sse_max_ss : ClangBuiltin<"__builtin_ia32_maxss">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
                             llvm_v4f32_ty], [IntrNoMem]>;
  def int_x86_sse_max_ps : ClangBuiltin<"__builtin_ia32_maxps">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
                             llvm_v4f32_ty], [IntrNoMem]>;
}

// Comparison ops
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_sse_cmp_ss : ClangBuiltin<"__builtin_ia32_cmpss">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
                             llvm_v4f32_ty, llvm_i8_ty],
                            [IntrNoMem, ImmArg<ArgIndex<2>>]>;
  // NOTE: This comparison intrinsic is not used by clang as long as the
  //       distinction in signaling behaviour is not implemented.
  def int_x86_sse_cmp_ps :
      DefaultAttrsIntrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
                             llvm_v4f32_ty, llvm_i8_ty],
                            [IntrNoMem, ImmArg<ArgIndex<2>>]>;
  def int_x86_sse_comieq_ss : ClangBuiltin<"__builtin_ia32_comieq">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v4f32_ty,
                             llvm_v4f32_ty], [IntrNoMem]>;
  def int_x86_sse_comilt_ss : ClangBuiltin<"__builtin_ia32_comilt">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v4f32_ty,
                             llvm_v4f32_ty], [IntrNoMem]>;
  def int_x86_sse_comile_ss : ClangBuiltin<"__builtin_ia32_comile">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v4f32_ty,
                             llvm_v4f32_ty], [IntrNoMem]>;
  def int_x86_sse_comigt_ss : ClangBuiltin<"__builtin_ia32_comigt">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v4f32_ty,
                             llvm_v4f32_ty], [IntrNoMem]>;
  def int_x86_sse_comige_ss : ClangBuiltin<"__builtin_ia32_comige">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v4f32_ty,
                             llvm_v4f32_ty], [IntrNoMem]>;
  def int_x86_sse_comineq_ss : ClangBuiltin<"__builtin_ia32_comineq">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v4f32_ty,
                             llvm_v4f32_ty], [IntrNoMem]>;
  def int_x86_sse_ucomieq_ss : ClangBuiltin<"__builtin_ia32_ucomieq">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v4f32_ty,
                             llvm_v4f32_ty], [IntrNoMem]>;
  def int_x86_sse_ucomilt_ss : ClangBuiltin<"__builtin_ia32_ucomilt">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v4f32_ty,
                             llvm_v4f32_ty], [IntrNoMem]>;
  def int_x86_sse_ucomile_ss : ClangBuiltin<"__builtin_ia32_ucomile">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v4f32_ty,
                             llvm_v4f32_ty], [IntrNoMem]>;
  def int_x86_sse_ucomigt_ss : ClangBuiltin<"__builtin_ia32_ucomigt">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v4f32_ty,
                             llvm_v4f32_ty], [IntrNoMem]>;
  def int_x86_sse_ucomige_ss : ClangBuiltin<"__builtin_ia32_ucomige">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v4f32_ty,
                             llvm_v4f32_ty], [IntrNoMem]>;
  def int_x86_sse_ucomineq_ss : ClangBuiltin<"__builtin_ia32_ucomineq">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v4f32_ty,
                             llvm_v4f32_ty], [IntrNoMem]>;
}


// Conversion ops
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_sse_cvtss2si : ClangBuiltin<"__builtin_ia32_cvtss2si">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
  def int_x86_sse_cvtss2si64 : ClangBuiltin<"__builtin_ia32_cvtss2si64">,
      DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_v4f32_ty], [IntrNoMem]>;
  def int_x86_sse_cvttss2si : ClangBuiltin<"__builtin_ia32_cvttss2si">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
  def int_x86_sse_cvttss2si64 : ClangBuiltin<"__builtin_ia32_cvttss2si64">,
      DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_v4f32_ty], [IntrNoMem]>;

  def int_x86_sse_cvtps2pi : ClangBuiltin<"__builtin_ia32_cvtps2pi">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_v4f32_ty], [IntrNoMem]>;
  def int_x86_sse_cvttps2pi: ClangBuiltin<"__builtin_ia32_cvttps2pi">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_v4f32_ty], [IntrNoMem]>;
  def int_x86_sse_cvtpi2ps : ClangBuiltin<"__builtin_ia32_cvtpi2ps">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
                             llvm_x86mmx_ty], [IntrNoMem]>;
}

// Cacheability support ops
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_sse_sfence : ClangBuiltin<"__builtin_ia32_sfence">,
              Intrinsic<[], [], []>;
}

// Control register.
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_sse_stmxcsr :
              Intrinsic<[], [llvm_ptr_ty],
                         [IntrWriteMem, IntrArgMemOnly,
                         // This prevents reordering with ldmxcsr
                         IntrHasSideEffects]>;
  def int_x86_sse_ldmxcsr :
              Intrinsic<[], [llvm_ptr_ty],
                         // FIXME: LDMXCSR does not actually write to memory,
                         // but intrinsic properties are generated incorrectly
                         // for IntrReadMem+IntrHasSideEffects.
                        [/*IntrReadMem, IntrArgMemOnly,*/ IntrHasSideEffects]>;
}

// Misc.
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_sse_movmsk_ps : ClangBuiltin<"__builtin_ia32_movmskps">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
}

//===----------------------------------------------------------------------===//
// SSE2

// FP arithmetic ops
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_sse2_min_sd : ClangBuiltin<"__builtin_ia32_minsd">,
      DefaultAttrsIntrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
                             llvm_v2f64_ty], [IntrNoMem]>;
  def int_x86_sse2_min_pd : ClangBuiltin<"__builtin_ia32_minpd">,
      DefaultAttrsIntrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
                             llvm_v2f64_ty], [IntrNoMem]>;
  def int_x86_sse2_max_sd : ClangBuiltin<"__builtin_ia32_maxsd">,
      DefaultAttrsIntrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
                             llvm_v2f64_ty], [IntrNoMem]>;
  def int_x86_sse2_max_pd : ClangBuiltin<"__builtin_ia32_maxpd">,
      DefaultAttrsIntrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
                             llvm_v2f64_ty], [IntrNoMem]>;
}

// FP comparison ops
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_sse2_cmp_sd : ClangBuiltin<"__builtin_ia32_cmpsd">,
      DefaultAttrsIntrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
                             llvm_v2f64_ty, llvm_i8_ty],
                            [IntrNoMem, ImmArg<ArgIndex<2>>]>;
  // NOTE: This comparison intrinsic is not used by clang as long as the
  //       distinction in signaling behaviour is not implemented.
  def int_x86_sse2_cmp_pd :
      DefaultAttrsIntrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
                             llvm_v2f64_ty, llvm_i8_ty],
                            [IntrNoMem, ImmArg<ArgIndex<2>>]>;
  def int_x86_sse2_comieq_sd : ClangBuiltin<"__builtin_ia32_comisdeq">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v2f64_ty,
                             llvm_v2f64_ty], [IntrNoMem]>;
  def int_x86_sse2_comilt_sd : ClangBuiltin<"__builtin_ia32_comisdlt">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v2f64_ty,
                             llvm_v2f64_ty], [IntrNoMem]>;
  def int_x86_sse2_comile_sd : ClangBuiltin<"__builtin_ia32_comisdle">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v2f64_ty,
                             llvm_v2f64_ty], [IntrNoMem]>;
  def int_x86_sse2_comigt_sd : ClangBuiltin<"__builtin_ia32_comisdgt">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v2f64_ty,
                             llvm_v2f64_ty], [IntrNoMem]>;
  def int_x86_sse2_comige_sd : ClangBuiltin<"__builtin_ia32_comisdge">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v2f64_ty,
                             llvm_v2f64_ty], [IntrNoMem]>;
  def int_x86_sse2_comineq_sd : ClangBuiltin<"__builtin_ia32_comisdneq">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v2f64_ty,
                             llvm_v2f64_ty], [IntrNoMem]>;
  def int_x86_sse2_ucomieq_sd : ClangBuiltin<"__builtin_ia32_ucomisdeq">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v2f64_ty,
                             llvm_v2f64_ty], [IntrNoMem]>;
  def int_x86_sse2_ucomilt_sd : ClangBuiltin<"__builtin_ia32_ucomisdlt">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v2f64_ty,
                             llvm_v2f64_ty], [IntrNoMem]>;
  def int_x86_sse2_ucomile_sd : ClangBuiltin<"__builtin_ia32_ucomisdle">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v2f64_ty,
                             llvm_v2f64_ty], [IntrNoMem]>;
  def int_x86_sse2_ucomigt_sd : ClangBuiltin<"__builtin_ia32_ucomisdgt">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v2f64_ty,
                             llvm_v2f64_ty], [IntrNoMem]>;
  def int_x86_sse2_ucomige_sd : ClangBuiltin<"__builtin_ia32_ucomisdge">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v2f64_ty,
                             llvm_v2f64_ty], [IntrNoMem]>;
  def int_x86_sse2_ucomineq_sd : ClangBuiltin<"__builtin_ia32_ucomisdneq">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v2f64_ty,
                             llvm_v2f64_ty], [IntrNoMem]>;
}

// Integer arithmetic ops.
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_sse2_pmulhu_w : ClangBuiltin<"__builtin_ia32_pmulhuw128">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
                             llvm_v8i16_ty], [IntrNoMem, Commutative]>;
  def int_x86_sse2_pmulh_w : ClangBuiltin<"__builtin_ia32_pmulhw128">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
                             llvm_v8i16_ty], [IntrNoMem, Commutative]>;
  def int_x86_sse2_pmadd_wd : ClangBuiltin<"__builtin_ia32_pmaddwd128">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty,
                             llvm_v8i16_ty], [IntrNoMem, Commutative]>;
  def int_x86_sse2_pavg_b : ClangBuiltin<"__builtin_ia32_pavgb128">,
      DefaultAttrsIntrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty,
                             llvm_v16i8_ty], [IntrNoMem, Commutative]>;
  def int_x86_sse2_pavg_w : ClangBuiltin<"__builtin_ia32_pavgw128">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
                             llvm_v8i16_ty], [IntrNoMem, Commutative]>;
  def int_x86_sse2_psad_bw : ClangBuiltin<"__builtin_ia32_psadbw128">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v16i8_ty,
                             llvm_v16i8_ty], [IntrNoMem, Commutative]>;
}

// Integer shift ops.
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_sse2_psll_w : ClangBuiltin<"__builtin_ia32_psllw128">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
                             llvm_v8i16_ty], [IntrNoMem]>;
  def int_x86_sse2_psll_d : ClangBuiltin<"__builtin_ia32_pslld128">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
                             llvm_v4i32_ty], [IntrNoMem]>;
  def int_x86_sse2_psll_q : ClangBuiltin<"__builtin_ia32_psllq128">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty,
                             llvm_v2i64_ty], [IntrNoMem]>;
  def int_x86_sse2_psrl_w : ClangBuiltin<"__builtin_ia32_psrlw128">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
                             llvm_v8i16_ty], [IntrNoMem]>;
  def int_x86_sse2_psrl_d : ClangBuiltin<"__builtin_ia32_psrld128">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
                             llvm_v4i32_ty], [IntrNoMem]>;
  def int_x86_sse2_psrl_q : ClangBuiltin<"__builtin_ia32_psrlq128">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty,
                             llvm_v2i64_ty], [IntrNoMem]>;
  def int_x86_sse2_psra_w : ClangBuiltin<"__builtin_ia32_psraw128">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
                             llvm_v8i16_ty], [IntrNoMem]>;
  def int_x86_sse2_psra_d : ClangBuiltin<"__builtin_ia32_psrad128">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
                             llvm_v4i32_ty], [IntrNoMem]>;

  // Oddly these don't require an immediate due to a gcc compatibility issue.
  def int_x86_sse2_pslli_w : ClangBuiltin<"__builtin_ia32_psllwi128">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
                             llvm_i32_ty], [IntrNoMem]>;
  def int_x86_sse2_pslli_d : ClangBuiltin<"__builtin_ia32_pslldi128">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
                             llvm_i32_ty], [IntrNoMem]>;
  def int_x86_sse2_pslli_q : ClangBuiltin<"__builtin_ia32_psllqi128">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty,
                             llvm_i32_ty], [IntrNoMem]>;
  def int_x86_sse2_psrli_w : ClangBuiltin<"__builtin_ia32_psrlwi128">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
                             llvm_i32_ty], [IntrNoMem]>;
  def int_x86_sse2_psrli_d : ClangBuiltin<"__builtin_ia32_psrldi128">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
                             llvm_i32_ty], [IntrNoMem]>;
  def int_x86_sse2_psrli_q : ClangBuiltin<"__builtin_ia32_psrlqi128">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty,
                             llvm_i32_ty], [IntrNoMem]>;
  def int_x86_sse2_psrai_w : ClangBuiltin<"__builtin_ia32_psrawi128">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
                             llvm_i32_ty], [IntrNoMem]>;
  def int_x86_sse2_psrai_d : ClangBuiltin<"__builtin_ia32_psradi128">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
                             llvm_i32_ty], [IntrNoMem]>;
}

// Conversion ops
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_sse2_cvtpd2dq : ClangBuiltin<"__builtin_ia32_cvtpd2dq">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v2f64_ty], [IntrNoMem]>;
  def int_x86_sse2_cvttpd2dq : ClangBuiltin<"__builtin_ia32_cvttpd2dq">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v2f64_ty], [IntrNoMem]>;
  def int_x86_sse2_cvtpd2ps : ClangBuiltin<"__builtin_ia32_cvtpd2ps">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty], [llvm_v2f64_ty], [IntrNoMem]>;
  def int_x86_sse2_cvtps2dq : ClangBuiltin<"__builtin_ia32_cvtps2dq">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
  def int_x86_sse2_cvttps2dq : ClangBuiltin<"__builtin_ia32_cvttps2dq">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
  def int_x86_sse2_cvtsd2si : ClangBuiltin<"__builtin_ia32_cvtsd2si">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v2f64_ty], [IntrNoMem]>;
  def int_x86_sse2_cvtsd2si64 : ClangBuiltin<"__builtin_ia32_cvtsd2si64">,
      DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
  def int_x86_sse2_cvttsd2si : ClangBuiltin<"__builtin_ia32_cvttsd2si">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v2f64_ty], [IntrNoMem]>;
  def int_x86_sse2_cvttsd2si64 : ClangBuiltin<"__builtin_ia32_cvttsd2si64">,
      DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
  def int_x86_sse2_cvtsd2ss : ClangBuiltin<"__builtin_ia32_cvtsd2ss">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
                             llvm_v2f64_ty], [IntrNoMem]>;
  def int_x86_sse_cvtpd2pi : ClangBuiltin<"__builtin_ia32_cvtpd2pi">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_v2f64_ty], [IntrNoMem]>;
  def int_x86_sse_cvttpd2pi: ClangBuiltin<"__builtin_ia32_cvttpd2pi">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_v2f64_ty], [IntrNoMem]>;
  def int_x86_sse_cvtpi2pd : ClangBuiltin<"__builtin_ia32_cvtpi2pd">,
      DefaultAttrsIntrinsic<[llvm_v2f64_ty], [llvm_x86mmx_ty], [IntrNoMem]>;
}

// Misc.
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_sse2_packsswb_128 : ClangBuiltin<"__builtin_ia32_packsswb128">,
      DefaultAttrsIntrinsic<[llvm_v16i8_ty], [llvm_v8i16_ty,
                             llvm_v8i16_ty], [IntrNoMem]>;
  def int_x86_sse2_packssdw_128 : ClangBuiltin<"__builtin_ia32_packssdw128">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_v4i32_ty,
                             llvm_v4i32_ty], [IntrNoMem]>;
  def int_x86_sse2_packuswb_128 : ClangBuiltin<"__builtin_ia32_packuswb128">,
      DefaultAttrsIntrinsic<[llvm_v16i8_ty], [llvm_v8i16_ty,
                             llvm_v8i16_ty], [IntrNoMem]>;
  def int_x86_sse2_movmsk_pd : ClangBuiltin<"__builtin_ia32_movmskpd">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v2f64_ty], [IntrNoMem]>;
  def int_x86_sse2_pmovmskb_128 : ClangBuiltin<"__builtin_ia32_pmovmskb128">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v16i8_ty], [IntrNoMem]>;
  def int_x86_sse2_maskmov_dqu : ClangBuiltin<"__builtin_ia32_maskmovdqu">,
              Intrinsic<[], [llvm_v16i8_ty,
                         llvm_v16i8_ty, llvm_ptr_ty], []>;
  def int_x86_sse2_clflush : ClangBuiltin<"__builtin_ia32_clflush">,
              Intrinsic<[], [llvm_ptr_ty], []>;
  def int_x86_sse2_lfence : ClangBuiltin<"__builtin_ia32_lfence">,
              Intrinsic<[], [], []>;
  def int_x86_sse2_mfence : ClangBuiltin<"__builtin_ia32_mfence">,
              Intrinsic<[], [], []>;
  def int_x86_sse2_pause : ClangBuiltin<"__builtin_ia32_pause">,
              Intrinsic<[], [], []>;
}

//===----------------------------------------------------------------------===//
// SSE3

// Addition / subtraction ops.
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_sse3_addsub_ps : ClangBuiltin<"__builtin_ia32_addsubps">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
                             llvm_v4f32_ty], [IntrNoMem]>;
  def int_x86_sse3_addsub_pd : ClangBuiltin<"__builtin_ia32_addsubpd">,
      DefaultAttrsIntrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
                             llvm_v2f64_ty], [IntrNoMem]>;
}

// Horizontal ops.
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_sse3_hadd_ps : ClangBuiltin<"__builtin_ia32_haddps">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
                             llvm_v4f32_ty], [IntrNoMem]>;
  def int_x86_sse3_hadd_pd : ClangBuiltin<"__builtin_ia32_haddpd">,
      DefaultAttrsIntrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
                             llvm_v2f64_ty], [IntrNoMem]>;
  def int_x86_sse3_hsub_ps : ClangBuiltin<"__builtin_ia32_hsubps">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
                             llvm_v4f32_ty], [IntrNoMem]>;
  def int_x86_sse3_hsub_pd : ClangBuiltin<"__builtin_ia32_hsubpd">,
      DefaultAttrsIntrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
                             llvm_v2f64_ty], [IntrNoMem]>;
}

// Specialized unaligned load.
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_sse3_ldu_dq : ClangBuiltin<"__builtin_ia32_lddqu">,
      DefaultAttrsIntrinsic<[llvm_v16i8_ty], [llvm_ptr_ty], [IntrReadMem]>;
}

// Thread synchronization ops.
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_sse3_monitor : ClangBuiltin<"__builtin_ia32_monitor">,
              Intrinsic<[], [llvm_ptr_ty,
                         llvm_i32_ty, llvm_i32_ty], []>;
  def int_x86_sse3_mwait : ClangBuiltin<"__builtin_ia32_mwait">,
              Intrinsic<[], [llvm_i32_ty,
                         llvm_i32_ty], []>;
}

//===----------------------------------------------------------------------===//
// SSSE3

// Horizontal arithmetic ops
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_ssse3_phadd_w         : ClangBuiltin<"__builtin_ia32_phaddw">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
                             llvm_x86mmx_ty], [IntrNoMem]>;
  def int_x86_ssse3_phadd_w_128     : ClangBuiltin<"__builtin_ia32_phaddw128">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
                             llvm_v8i16_ty], [IntrNoMem]>;

  def int_x86_ssse3_phadd_d         : ClangBuiltin<"__builtin_ia32_phaddd">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
                             llvm_x86mmx_ty], [IntrNoMem]>;
  def int_x86_ssse3_phadd_d_128     : ClangBuiltin<"__builtin_ia32_phaddd128">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
                             llvm_v4i32_ty], [IntrNoMem]>;

  def int_x86_ssse3_phadd_sw        : ClangBuiltin<"__builtin_ia32_phaddsw">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
                             llvm_x86mmx_ty], [IntrNoMem]>;
  def int_x86_ssse3_phadd_sw_128    : ClangBuiltin<"__builtin_ia32_phaddsw128">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
                             llvm_v8i16_ty], [IntrNoMem]>;

  def int_x86_ssse3_phsub_w         : ClangBuiltin<"__builtin_ia32_phsubw">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
                             llvm_x86mmx_ty], [IntrNoMem]>;
  def int_x86_ssse3_phsub_w_128     : ClangBuiltin<"__builtin_ia32_phsubw128">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
                             llvm_v8i16_ty], [IntrNoMem]>;

  def int_x86_ssse3_phsub_d         : ClangBuiltin<"__builtin_ia32_phsubd">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
                             llvm_x86mmx_ty], [IntrNoMem]>;
  def int_x86_ssse3_phsub_d_128     : ClangBuiltin<"__builtin_ia32_phsubd128">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
                             llvm_v4i32_ty], [IntrNoMem]>;

  def int_x86_ssse3_phsub_sw        : ClangBuiltin<"__builtin_ia32_phsubsw">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
                             llvm_x86mmx_ty], [IntrNoMem]>;
  def int_x86_ssse3_phsub_sw_128    : ClangBuiltin<"__builtin_ia32_phsubsw128">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
                             llvm_v8i16_ty], [IntrNoMem]>;

  def int_x86_ssse3_pmadd_ub_sw     : ClangBuiltin<"__builtin_ia32_pmaddubsw">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
                             llvm_x86mmx_ty], [IntrNoMem]>;
  def int_x86_ssse3_pmadd_ub_sw_128 : ClangBuiltin<"__builtin_ia32_pmaddubsw128">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty,
                             llvm_v16i8_ty], [IntrNoMem]>;
}

// Packed multiply high with round and scale
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_ssse3_pmul_hr_sw      : ClangBuiltin<"__builtin_ia32_pmulhrsw">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
                             llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
  def int_x86_ssse3_pmul_hr_sw_128  : ClangBuiltin<"__builtin_ia32_pmulhrsw128">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
                             llvm_v8i16_ty], [IntrNoMem, Commutative]>;
}

// Shuffle ops
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_ssse3_pshuf_b         : ClangBuiltin<"__builtin_ia32_pshufb">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
                             llvm_x86mmx_ty], [IntrNoMem]>;
  def int_x86_ssse3_pshuf_b_128     : ClangBuiltin<"__builtin_ia32_pshufb128">,
      DefaultAttrsIntrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty,
                             llvm_v16i8_ty], [IntrNoMem]>;
  def int_x86_sse_pshuf_w           : ClangBuiltin<"__builtin_ia32_pshufw">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_i8_ty],
                             [IntrNoMem, ImmArg<ArgIndex<1>>]>;
}

// Sign ops
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_ssse3_psign_b         : ClangBuiltin<"__builtin_ia32_psignb">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
                             llvm_x86mmx_ty], [IntrNoMem]>;
  def int_x86_ssse3_psign_b_128     : ClangBuiltin<"__builtin_ia32_psignb128">,
      DefaultAttrsIntrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty,
                             llvm_v16i8_ty], [IntrNoMem]>;

  def int_x86_ssse3_psign_w         : ClangBuiltin<"__builtin_ia32_psignw">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
                             llvm_x86mmx_ty], [IntrNoMem]>;
  def int_x86_ssse3_psign_w_128     : ClangBuiltin<"__builtin_ia32_psignw128">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
                             llvm_v8i16_ty], [IntrNoMem]>;

  def int_x86_ssse3_psign_d         : ClangBuiltin<"__builtin_ia32_psignd">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
                             llvm_x86mmx_ty], [IntrNoMem]>;
  def int_x86_ssse3_psign_d_128     : ClangBuiltin<"__builtin_ia32_psignd128">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
                             llvm_v4i32_ty], [IntrNoMem]>;
}

// Absolute value ops
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_ssse3_pabs_b     : ClangBuiltin<"__builtin_ia32_pabsb">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty], [IntrNoMem]>;

  def int_x86_ssse3_pabs_w     : ClangBuiltin<"__builtin_ia32_pabsw">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty], [IntrNoMem]>;

  def int_x86_ssse3_pabs_d     : ClangBuiltin<"__builtin_ia32_pabsd">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty], [IntrNoMem]>;
}

//===----------------------------------------------------------------------===//
// SSE4.1

// FP rounding ops
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_sse41_round_ss        : ClangBuiltin<"__builtin_ia32_roundss">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
                             llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<2>>]>;
  def int_x86_sse41_round_ps        : ClangBuiltin<"__builtin_ia32_roundps">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
                             llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
  def int_x86_sse41_round_sd        : ClangBuiltin<"__builtin_ia32_roundsd">,
      DefaultAttrsIntrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
                             llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<2>>]>;
  def int_x86_sse41_round_pd        : ClangBuiltin<"__builtin_ia32_roundpd">,
      DefaultAttrsIntrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
                             llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<1>>]>;
}

// Vector min element
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_sse41_phminposuw     : ClangBuiltin<"__builtin_ia32_phminposuw128">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty], [IntrNoMem]>;
}

// Advanced Encryption Standard (AES) Instructions
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_aesni_aesimc          : ClangBuiltin<"__builtin_ia32_aesimc128">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty], [IntrNoMem]>;

  def int_x86_aesni_aesenc          : ClangBuiltin<"__builtin_ia32_aesenc128">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
                            [IntrNoMem]>;
  def int_x86_aesni_aesenc_256      : ClangBuiltin<"__builtin_ia32_aesenc256">,
      DefaultAttrsIntrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty],
                            [IntrNoMem]>;
  def int_x86_aesni_aesenc_512      : ClangBuiltin<"__builtin_ia32_aesenc512">,
      DefaultAttrsIntrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty],
                            [IntrNoMem]>;

  def int_x86_aesni_aesenclast : ClangBuiltin<"__builtin_ia32_aesenclast128">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
                            [IntrNoMem]>;
  def int_x86_aesni_aesenclast_256 :
    ClangBuiltin<"__builtin_ia32_aesenclast256">,
    DefaultAttrsIntrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty],
                          [IntrNoMem]>;
  def int_x86_aesni_aesenclast_512 :
    ClangBuiltin<"__builtin_ia32_aesenclast512">,
    DefaultAttrsIntrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty],
                          [IntrNoMem]>;

  def int_x86_aesni_aesdec          : ClangBuiltin<"__builtin_ia32_aesdec128">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
                            [IntrNoMem]>;
  def int_x86_aesni_aesdec_256      : ClangBuiltin<"__builtin_ia32_aesdec256">,
      DefaultAttrsIntrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty],
                            [IntrNoMem]>;
  def int_x86_aesni_aesdec_512      : ClangBuiltin<"__builtin_ia32_aesdec512">,
      DefaultAttrsIntrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty],
                            [IntrNoMem]>;

  def int_x86_aesni_aesdeclast : ClangBuiltin<"__builtin_ia32_aesdeclast128">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
                            [IntrNoMem]>;
  def int_x86_aesni_aesdeclast_256 :
    ClangBuiltin<"__builtin_ia32_aesdeclast256">,
    DefaultAttrsIntrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty],
                          [IntrNoMem]>;
  def int_x86_aesni_aesdeclast_512 :
    ClangBuiltin<"__builtin_ia32_aesdeclast512">,
    DefaultAttrsIntrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty],
                          [IntrNoMem]>;

  def int_x86_aesni_aeskeygenassist :
    ClangBuiltin<"__builtin_ia32_aeskeygenassist128">,
    DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i8_ty],
                          [IntrNoMem, ImmArg<ArgIndex<1>>]>;
}

// PCLMUL instructions
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
  def int_x86_pclmulqdq : ClangBuiltin<"__builtin_ia32_pclmulqdq128">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty],
                            [llvm_v2i64_ty, llvm_v2i64_ty, llvm_i8_ty],
                            [IntrNoMem, ImmArg<ArgIndex<2>>]>;
  def int_x86_pclmulqdq_256 : ClangBuiltin<"__builtin_ia32_pclmulqdq256">,
          DefaultAttrsIntrinsic<[llvm_v4i64_ty],
                                [llvm_v4i64_ty, llvm_v4i64_ty, llvm_i8_ty],
                                [IntrNoMem, ImmArg<ArgIndex<2>>]>;
  def int_x86_pclmulqdq_512 : ClangBuiltin<"__builtin_ia32_pclmulqdq512">,
          DefaultAttrsIntrinsic<[llvm_v8i64_ty],
                                [llvm_v8i64_ty, llvm_v8i64_ty, llvm_i8_ty],
                                [IntrNoMem, ImmArg<ArgIndex<2>>]>;
}

// Vector pack
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_sse41_packusdw : ClangBuiltin<"__builtin_ia32_packusdw128">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
                            [IntrNoMem]>;
}

// Vector insert
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_sse41_insertps       : ClangBuiltin<"__builtin_ia32_insertps128">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty],
                            [llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty],
                            [IntrNoMem, ImmArg<ArgIndex<2>>]>;
}

// Vector blend
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_sse41_pblendvb       : ClangBuiltin<"__builtin_ia32_pblendvb128">,
      DefaultAttrsIntrinsic<[llvm_v16i8_ty],
                            [llvm_v16i8_ty, llvm_v16i8_ty,llvm_v16i8_ty],
                            [IntrNoMem]>;
  def int_x86_sse41_blendvpd       : ClangBuiltin<"__builtin_ia32_blendvpd">,
      DefaultAttrsIntrinsic<[llvm_v2f64_ty],
                            [llvm_v2f64_ty, llvm_v2f64_ty,llvm_v2f64_ty],
                            [IntrNoMem]>;
  def int_x86_sse41_blendvps       : ClangBuiltin<"__builtin_ia32_blendvps">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty],
                            [llvm_v4f32_ty, llvm_v4f32_ty,llvm_v4f32_ty],
                            [IntrNoMem]>;
}

// Vector dot product
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_sse41_dppd            : ClangBuiltin<"__builtin_ia32_dppd">,
      DefaultAttrsIntrinsic<[llvm_v2f64_ty],
                            [llvm_v2f64_ty, llvm_v2f64_ty, llvm_i8_ty],
                            [IntrNoMem, Commutative, ImmArg<ArgIndex<2>>]>;
  def int_x86_sse41_dpps            : ClangBuiltin<"__builtin_ia32_dpps">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty],
                            [llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty],
                            [IntrNoMem, Commutative, ImmArg<ArgIndex<2>>]>;
}

// Vector sum of absolute differences
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_sse41_mpsadbw         : ClangBuiltin<"__builtin_ia32_mpsadbw128">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty],
                            [llvm_v16i8_ty, llvm_v16i8_ty,llvm_i8_ty],
                            [IntrNoMem, ImmArg<ArgIndex<2>>]>;
}

// Test instruction with bitwise comparison.
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
  def int_x86_sse41_ptestz          : ClangBuiltin<"__builtin_ia32_ptestz128">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
                            [IntrNoMem]>;
  def int_x86_sse41_ptestc          : ClangBuiltin<"__builtin_ia32_ptestc128">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
                            [IntrNoMem]>;
  def int_x86_sse41_ptestnzc        : ClangBuiltin<"__builtin_ia32_ptestnzc128">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
                            [IntrNoMem]>;
}

//===----------------------------------------------------------------------===//
// SSE4.2

// Miscellaneous
// CRC Instruction
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
  def int_x86_sse42_crc32_32_8       : ClangBuiltin<"__builtin_ia32_crc32qi">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i8_ty],
                            [IntrNoMem]>;
  def int_x86_sse42_crc32_32_16      : ClangBuiltin<"__builtin_ia32_crc32hi">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i16_ty],
                            [IntrNoMem]>;
  def int_x86_sse42_crc32_32_32      : ClangBuiltin<"__builtin_ia32_crc32si">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
                            [IntrNoMem]>;
  def int_x86_sse42_crc32_64_64      : ClangBuiltin<"__builtin_ia32_crc32di">,
      DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty],
                            [IntrNoMem]>;
}

// String/text processing ops.
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
  def int_x86_sse42_pcmpistrm128  : ClangBuiltin<"__builtin_ia32_pcmpistrm128">,
    DefaultAttrsIntrinsic<[llvm_v16i8_ty],
        [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i8_ty],
        [IntrNoMem, ImmArg<ArgIndex<2>>]>;
  def int_x86_sse42_pcmpistri128  : ClangBuiltin<"__builtin_ia32_pcmpistri128">,
    DefaultAttrsIntrinsic<[llvm_i32_ty],
        [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i8_ty],
        [IntrNoMem, ImmArg<ArgIndex<2>>]>;
  def int_x86_sse42_pcmpistria128 : ClangBuiltin<"__builtin_ia32_pcmpistria128">,
    DefaultAttrsIntrinsic<[llvm_i32_ty],
        [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i8_ty],
        [IntrNoMem, ImmArg<ArgIndex<2>>]>;
  def int_x86_sse42_pcmpistric128 : ClangBuiltin<"__builtin_ia32_pcmpistric128">,
    DefaultAttrsIntrinsic<[llvm_i32_ty],
        [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i8_ty],
        [IntrNoMem, ImmArg<ArgIndex<2>>]>;
  def int_x86_sse42_pcmpistrio128 : ClangBuiltin<"__builtin_ia32_pcmpistrio128">,
    DefaultAttrsIntrinsic<[llvm_i32_ty],
        [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i8_ty],
        [IntrNoMem, ImmArg<ArgIndex<2>>]>;
  def int_x86_sse42_pcmpistris128 : ClangBuiltin<"__builtin_ia32_pcmpistris128">,
    DefaultAttrsIntrinsic<[llvm_i32_ty],
        [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i8_ty],
        [IntrNoMem, ImmArg<ArgIndex<2>>]>;
  def int_x86_sse42_pcmpistriz128 : ClangBuiltin<"__builtin_ia32_pcmpistriz128">,
    DefaultAttrsIntrinsic<[llvm_i32_ty],
        [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i8_ty],
        [IntrNoMem, ImmArg<ArgIndex<2>>]>;
  def int_x86_sse42_pcmpestrm128  : ClangBuiltin<"__builtin_ia32_pcmpestrm128">,
    DefaultAttrsIntrinsic<[llvm_v16i8_ty],
        [llvm_v16i8_ty, llvm_i32_ty, llvm_v16i8_ty, llvm_i32_ty,
         llvm_i8_ty],
        [IntrNoMem, ImmArg<ArgIndex<4>>]>;
  def int_x86_sse42_pcmpestri128  : ClangBuiltin<"__builtin_ia32_pcmpestri128">,
    DefaultAttrsIntrinsic<[llvm_i32_ty],
        [llvm_v16i8_ty, llvm_i32_ty, llvm_v16i8_ty, llvm_i32_ty,
         llvm_i8_ty],
        [IntrNoMem, ImmArg<ArgIndex<4>>]>;
  def int_x86_sse42_pcmpestria128 : ClangBuiltin<"__builtin_ia32_pcmpestria128">,
    DefaultAttrsIntrinsic<[llvm_i32_ty],
        [llvm_v16i8_ty, llvm_i32_ty, llvm_v16i8_ty, llvm_i32_ty,
         llvm_i8_ty],
        [IntrNoMem, ImmArg<ArgIndex<4>>]>;
  def int_x86_sse42_pcmpestric128 : ClangBuiltin<"__builtin_ia32_pcmpestric128">,
    DefaultAttrsIntrinsic<[llvm_i32_ty],
        [llvm_v16i8_ty, llvm_i32_ty, llvm_v16i8_ty, llvm_i32_ty,
         llvm_i8_ty],
        [IntrNoMem, ImmArg<ArgIndex<4>>]>;
  def int_x86_sse42_pcmpestrio128 : ClangBuiltin<"__builtin_ia32_pcmpestrio128">,
    DefaultAttrsIntrinsic<[llvm_i32_ty],
        [llvm_v16i8_ty, llvm_i32_ty, llvm_v16i8_ty, llvm_i32_ty,
         llvm_i8_ty],
        [IntrNoMem, ImmArg<ArgIndex<4>>]>;
  def int_x86_sse42_pcmpestris128 : ClangBuiltin<"__builtin_ia32_pcmpestris128">,
    DefaultAttrsIntrinsic<[llvm_i32_ty],
        [llvm_v16i8_ty, llvm_i32_ty, llvm_v16i8_ty, llvm_i32_ty,
         llvm_i8_ty],
        [IntrNoMem, ImmArg<ArgIndex<4>>]>;
  def int_x86_sse42_pcmpestriz128 : ClangBuiltin<"__builtin_ia32_pcmpestriz128">,
    DefaultAttrsIntrinsic<[llvm_i32_ty],
        [llvm_v16i8_ty, llvm_i32_ty, llvm_v16i8_ty, llvm_i32_ty,
         llvm_i8_ty],
        [IntrNoMem, ImmArg<ArgIndex<4>>]>;
}

//===----------------------------------------------------------------------===//
// SSE4A

let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_sse4a_extrqi : ClangBuiltin<"__builtin_ia32_extrqi">,
    DefaultAttrsIntrinsic<[llvm_v2i64_ty],
                          [llvm_v2i64_ty, llvm_i8_ty, llvm_i8_ty],
                          [IntrNoMem, ImmArg<ArgIndex<1>>,
                           ImmArg<ArgIndex<2>>]>;
  def int_x86_sse4a_extrq  : ClangBuiltin<"__builtin_ia32_extrq">,
    DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v16i8_ty],
                          [IntrNoMem]>;

  def int_x86_sse4a_insertqi : ClangBuiltin<"__builtin_ia32_insertqi">,
    DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty,
                                            llvm_i8_ty, llvm_i8_ty],
                          [IntrNoMem, ImmArg<ArgIndex<2>>,
                           ImmArg<ArgIndex<3>>]>;
  def int_x86_sse4a_insertq  : ClangBuiltin<"__builtin_ia32_insertq">,
    DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
                          [IntrNoMem]>;
}

//===----------------------------------------------------------------------===//
// AVX

// Arithmetic ops
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_avx_addsub_pd_256 : ClangBuiltin<"__builtin_ia32_addsubpd256">,
      DefaultAttrsIntrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty],
                            [IntrNoMem]>;
  def int_x86_avx_addsub_ps_256 : ClangBuiltin<"__builtin_ia32_addsubps256">,
      DefaultAttrsIntrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty],
                            [IntrNoMem]>;
  def int_x86_avx_max_pd_256 : ClangBuiltin<"__builtin_ia32_maxpd256">,
      DefaultAttrsIntrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty],
                            [IntrNoMem]>;
  def int_x86_avx_max_ps_256 : ClangBuiltin<"__builtin_ia32_maxps256">,
      DefaultAttrsIntrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty],
                            [IntrNoMem]>;
  def int_x86_avx_min_pd_256 : ClangBuiltin<"__builtin_ia32_minpd256">,
      DefaultAttrsIntrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty],
                            [IntrNoMem]>;
  def int_x86_avx_min_ps_256 : ClangBuiltin<"__builtin_ia32_minps256">,
      DefaultAttrsIntrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty],
                            [IntrNoMem]>;

  def int_x86_avx_rsqrt_ps_256 : ClangBuiltin<"__builtin_ia32_rsqrtps256">,
      DefaultAttrsIntrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty], [IntrNoMem]>;

  def int_x86_avx_rcp_ps_256 : ClangBuiltin<"__builtin_ia32_rcpps256">,
      DefaultAttrsIntrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty], [IntrNoMem]>;

  def int_x86_avx_round_pd_256 : ClangBuiltin<"__builtin_ia32_roundpd256">,
      DefaultAttrsIntrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<1>>]>;
  def int_x86_avx_round_ps_256 : ClangBuiltin<"__builtin_ia32_roundps256">,
      DefaultAttrsIntrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<1>>]>;
}

// Horizontal ops
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_avx_hadd_pd_256 : ClangBuiltin<"__builtin_ia32_haddpd256">,
      DefaultAttrsIntrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty],
                            [IntrNoMem]>;
  def int_x86_avx_hsub_ps_256 : ClangBuiltin<"__builtin_ia32_hsubps256">,
      DefaultAttrsIntrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty],
                            [IntrNoMem]>;
  def int_x86_avx_hsub_pd_256 : ClangBuiltin<"__builtin_ia32_hsubpd256">,
      DefaultAttrsIntrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty],
                            [IntrNoMem]>;
  def int_x86_avx_hadd_ps_256 : ClangBuiltin<"__builtin_ia32_haddps256">,
      DefaultAttrsIntrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty],
                            [IntrNoMem]>;
}

// Vector permutation
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_avx_vpermilvar_pd : ClangBuiltin<"__builtin_ia32_vpermilvarpd">,
      DefaultAttrsIntrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2i64_ty],
                            [IntrNoMem]>;
  def int_x86_avx_vpermilvar_ps : ClangBuiltin<"__builtin_ia32_vpermilvarps">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4i32_ty],
                            [IntrNoMem]>;

  def int_x86_avx_vpermilvar_pd_256 :
        ClangBuiltin<"__builtin_ia32_vpermilvarpd256">,
        DefaultAttrsIntrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4i64_ty],
                              [IntrNoMem]>;
  def int_x86_avx_vpermilvar_ps_256 :
        ClangBuiltin<"__builtin_ia32_vpermilvarps256">,
        DefaultAttrsIntrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8i32_ty],
                              [IntrNoMem]>;

  def int_x86_avx512_vpermi2var_d_128 :
       ClangBuiltin<"__builtin_ia32_vpermi2vard128">,
       DefaultAttrsIntrinsic<[llvm_v4i32_ty],
                             [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
                             [IntrNoMem]>;

  def int_x86_avx512_vpermi2var_d_256 :
        ClangBuiltin<"__builtin_ia32_vpermi2vard256">,
        DefaultAttrsIntrinsic<[llvm_v8i32_ty],
                              [llvm_v8i32_ty, llvm_v8i32_ty, llvm_v8i32_ty],
                              [IntrNoMem]>;

  def int_x86_avx512_vpermi2var_d_512 :
        ClangBuiltin<"__builtin_ia32_vpermi2vard512">,
        DefaultAttrsIntrinsic<[llvm_v16i32_ty],
                              [llvm_v16i32_ty, llvm_v16i32_ty, llvm_v16i32_ty],
                              [IntrNoMem]>;

  def int_x86_avx512_vpermi2var_hi_128 :
        ClangBuiltin<"__builtin_ia32_vpermi2varhi128">,
        DefaultAttrsIntrinsic<[llvm_v8i16_ty],
                              [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v8i16_ty],
                              [IntrNoMem]>;

  def int_x86_avx512_vpermi2var_hi_256 :
        ClangBuiltin<"__builtin_ia32_vpermi2varhi256">,
        DefaultAttrsIntrinsic<[llvm_v16i16_ty],
                              [llvm_v16i16_ty, llvm_v16i16_ty, llvm_v16i16_ty],
                              [IntrNoMem]>;

  def int_x86_avx512_vpermi2var_hi_512 :
        ClangBuiltin<"__builtin_ia32_vpermi2varhi512">,
        DefaultAttrsIntrinsic<[llvm_v32i16_ty],
                              [llvm_v32i16_ty, llvm_v32i16_ty, llvm_v32i16_ty],
                              [IntrNoMem]>;

  def int_x86_avx512_vpermi2var_pd_128 :
        ClangBuiltin<"__builtin_ia32_vpermi2varpd128">,
        DefaultAttrsIntrinsic<[llvm_v2f64_ty],
                              [llvm_v2f64_ty, llvm_v2i64_ty, llvm_v2f64_ty],
                              [IntrNoMem]>;

  def int_x86_avx512_vpermi2var_pd_256 :
        ClangBuiltin<"__builtin_ia32_vpermi2varpd256">,
        DefaultAttrsIntrinsic<[llvm_v4f64_ty],
                              [llvm_v4f64_ty, llvm_v4i64_ty, llvm_v4f64_ty],
                              [IntrNoMem]>;

  def int_x86_avx512_vpermi2var_pd_512 :
        ClangBuiltin<"__builtin_ia32_vpermi2varpd512">,
        DefaultAttrsIntrinsic<[llvm_v8f64_ty],
                              [llvm_v8f64_ty, llvm_v8i64_ty, llvm_v8f64_ty],
                              [IntrNoMem]>;

  def int_x86_avx512_vpermi2var_ps_128 :
        ClangBuiltin<"__builtin_ia32_vpermi2varps128">,
        DefaultAttrsIntrinsic<[llvm_v4f32_ty],
                              [llvm_v4f32_ty, llvm_v4i32_ty, llvm_v4f32_ty],
                              [IntrNoMem]>;

  def int_x86_avx512_vpermi2var_ps_256 :
        ClangBuiltin<"__builtin_ia32_vpermi2varps256">,
        DefaultAttrsIntrinsic<[llvm_v8f32_ty],
                              [llvm_v8f32_ty, llvm_v8i32_ty, llvm_v8f32_ty],
                              [IntrNoMem]>;

  def int_x86_avx512_vpermi2var_ps_512 :
        ClangBuiltin<"__builtin_ia32_vpermi2varps512">,
        DefaultAttrsIntrinsic<[llvm_v16f32_ty],
                              [llvm_v16f32_ty, llvm_v16i32_ty, llvm_v16f32_ty],
                              [IntrNoMem]>;

  def int_x86_avx512_vpermi2var_q_128 :
        ClangBuiltin<"__builtin_ia32_vpermi2varq128">,
        DefaultAttrsIntrinsic<[llvm_v2i64_ty],
                              [llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty],
                              [IntrNoMem]>;

  def int_x86_avx512_vpermi2var_q_256 :
        ClangBuiltin<"__builtin_ia32_vpermi2varq256">,
        DefaultAttrsIntrinsic<[llvm_v4i64_ty],
                              [llvm_v4i64_ty, llvm_v4i64_ty, llvm_v4i64_ty],
                              [IntrNoMem]>;

  def int_x86_avx512_vpermi2var_q_512 :
        ClangBuiltin<"__builtin_ia32_vpermi2varq512">,
        DefaultAttrsIntrinsic<[llvm_v8i64_ty],
                              [llvm_v8i64_ty, llvm_v8i64_ty, llvm_v8i64_ty],
                              [IntrNoMem]>;

  def int_x86_avx512_vpermi2var_qi_128 :
        ClangBuiltin<"__builtin_ia32_vpermi2varqi128">,
        DefaultAttrsIntrinsic<[llvm_v16i8_ty],
                              [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty],
                              [IntrNoMem]>;

  def int_x86_avx512_vpermi2var_qi_256 :
        ClangBuiltin<"__builtin_ia32_vpermi2varqi256">,
        DefaultAttrsIntrinsic<[llvm_v32i8_ty],
                              [llvm_v32i8_ty, llvm_v32i8_ty, llvm_v32i8_ty],
                              [IntrNoMem]>;

  def int_x86_avx512_vpermi2var_qi_512 :
        ClangBuiltin<"__builtin_ia32_vpermi2varqi512">,
        DefaultAttrsIntrinsic<[llvm_v64i8_ty],
                              [llvm_v64i8_ty, llvm_v64i8_ty, llvm_v64i8_ty],
                              [IntrNoMem]>;

  def int_x86_avx512_vpermilvar_pd_512 :
        ClangBuiltin<"__builtin_ia32_vpermilvarpd512">,
        DefaultAttrsIntrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8i64_ty],
                              [IntrNoMem]>;

  def int_x86_avx512_vpermilvar_ps_512 :
        ClangBuiltin<"__builtin_ia32_vpermilvarps512">,
        DefaultAttrsIntrinsic<[llvm_v16f32_ty],
                              [llvm_v16f32_ty, llvm_v16i32_ty], [IntrNoMem]>;

  def int_x86_avx512_pshuf_b_512 :
        ClangBuiltin<"__builtin_ia32_pshufb512">,
        DefaultAttrsIntrinsic<[llvm_v64i8_ty], [llvm_v64i8_ty, llvm_v64i8_ty],
                              [IntrNoMem]>;

}

// GFNI Instructions
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_vgf2p8affineinvqb_128 :
         ClangBuiltin<"__builtin_ia32_vgf2p8affineinvqb_v16qi">,
         DefaultAttrsIntrinsic<[llvm_v16i8_ty],
                               [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i8_ty],
                               [IntrNoMem, ImmArg<ArgIndex<2>>]>;
  def int_x86_vgf2p8affineinvqb_256 :
         ClangBuiltin<"__builtin_ia32_vgf2p8affineinvqb_v32qi">,
         DefaultAttrsIntrinsic<[llvm_v32i8_ty],
                               [llvm_v32i8_ty, llvm_v32i8_ty, llvm_i8_ty],
                               [IntrNoMem, ImmArg<ArgIndex<2>>]>;
  def int_x86_vgf2p8affineinvqb_512 :
         ClangBuiltin<"__builtin_ia32_vgf2p8affineinvqb_v64qi">,
         DefaultAttrsIntrinsic<[llvm_v64i8_ty],
                               [llvm_v64i8_ty, llvm_v64i8_ty, llvm_i8_ty],
                               [IntrNoMem, ImmArg<ArgIndex<2>>]>;

  def int_x86_vgf2p8affineqb_128 :
         ClangBuiltin<"__builtin_ia32_vgf2p8affineqb_v16qi">,
         DefaultAttrsIntrinsic<[llvm_v16i8_ty],
                               [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i8_ty],
                               [IntrNoMem, ImmArg<ArgIndex<2>>]>;
  def int_x86_vgf2p8affineqb_256 :
         ClangBuiltin<"__builtin_ia32_vgf2p8affineqb_v32qi">,
         DefaultAttrsIntrinsic<[llvm_v32i8_ty],
                               [llvm_v32i8_ty, llvm_v32i8_ty, llvm_i8_ty],
                               [IntrNoMem, ImmArg<ArgIndex<2>>]>;
  def int_x86_vgf2p8affineqb_512 :
         ClangBuiltin<"__builtin_ia32_vgf2p8affineqb_v64qi">,
         DefaultAttrsIntrinsic<[llvm_v64i8_ty],
                               [llvm_v64i8_ty, llvm_v64i8_ty, llvm_i8_ty],
                               [IntrNoMem, ImmArg<ArgIndex<2>>]>;

  def int_x86_vgf2p8mulb_128     :
         ClangBuiltin<"__builtin_ia32_vgf2p8mulb_v16qi">,
         DefaultAttrsIntrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
                               [IntrNoMem]>;
  def int_x86_vgf2p8mulb_256     :
         ClangBuiltin<"__builtin_ia32_vgf2p8mulb_v32qi">,
         DefaultAttrsIntrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty, llvm_v32i8_ty],
                               [IntrNoMem]>;
  def int_x86_vgf2p8mulb_512     :
         ClangBuiltin<"__builtin_ia32_vgf2p8mulb_v64qi">,
         DefaultAttrsIntrinsic<[llvm_v64i8_ty], [llvm_v64i8_ty, llvm_v64i8_ty],
                               [IntrNoMem]>;
}

// Vector blend
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_avx_blendv_pd_256 : ClangBuiltin<"__builtin_ia32_blendvpd256">,
      DefaultAttrsIntrinsic<[llvm_v4f64_ty],
                            [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty],
                            [IntrNoMem]>;
  def int_x86_avx_blendv_ps_256 : ClangBuiltin<"__builtin_ia32_blendvps256">,
      DefaultAttrsIntrinsic<[llvm_v8f32_ty],
                            [llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty],
                            [IntrNoMem]>;
}

// Vector dot product
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_avx_dp_ps_256 : ClangBuiltin<"__builtin_ia32_dpps256">,
      DefaultAttrsIntrinsic<[llvm_v8f32_ty],
                            [llvm_v8f32_ty, llvm_v8f32_ty, llvm_i8_ty],
                            [IntrNoMem, Commutative, ImmArg<ArgIndex<2>>]>;
}

// Vector compare
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_avx_cmp_pd_256 :
      DefaultAttrsIntrinsic<[llvm_v4f64_ty],
                            [llvm_v4f64_ty, llvm_v4f64_ty, llvm_i8_ty],
                            [IntrNoMem, ImmArg<ArgIndex<2>>]>;
  def int_x86_avx_cmp_ps_256 :
      DefaultAttrsIntrinsic<[llvm_v8f32_ty],
                            [llvm_v8f32_ty, llvm_v8f32_ty, llvm_i8_ty],
                            [IntrNoMem, ImmArg<ArgIndex<2>>]>;
}

// Vector convert
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_avx_cvt_pd2_ps_256 : ClangBuiltin<"__builtin_ia32_cvtpd2ps256">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty], [llvm_v4f64_ty], [IntrNoMem]>;
  def int_x86_avx_cvt_ps2dq_256 : ClangBuiltin<"__builtin_ia32_cvtps2dq256">,
      DefaultAttrsIntrinsic<[llvm_v8i32_ty], [llvm_v8f32_ty], [IntrNoMem]>;
  def int_x86_avx_cvtt_pd2dq_256 : ClangBuiltin<"__builtin_ia32_cvttpd2dq256">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4f64_ty], [IntrNoMem]>;
  def int_x86_avx_cvt_pd2dq_256 : ClangBuiltin<"__builtin_ia32_cvtpd2dq256">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4f64_ty], [IntrNoMem]>;
  def int_x86_avx_cvtt_ps2dq_256 : ClangBuiltin<"__builtin_ia32_cvttps2dq256">,
      DefaultAttrsIntrinsic<[llvm_v8i32_ty], [llvm_v8f32_ty], [IntrNoMem]>;
}

// Vector bit test
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_avx_vtestz_pd : ClangBuiltin<"__builtin_ia32_vtestzpd">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v2f64_ty, llvm_v2f64_ty],
                            [IntrNoMem]>;
  def int_x86_avx_vtestc_pd : ClangBuiltin<"__builtin_ia32_vtestcpd">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v2f64_ty, llvm_v2f64_ty],
                            [IntrNoMem]>;
  def int_x86_avx_vtestnzc_pd : ClangBuiltin<"__builtin_ia32_vtestnzcpd">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v2f64_ty, llvm_v2f64_ty],
                            [IntrNoMem]>;
  def int_x86_avx_vtestz_ps : ClangBuiltin<"__builtin_ia32_vtestzps">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty],
                            [IntrNoMem]>;
  def int_x86_avx_vtestc_ps : ClangBuiltin<"__builtin_ia32_vtestcps">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty],
                            [IntrNoMem]>;
  def int_x86_avx_vtestnzc_ps : ClangBuiltin<"__builtin_ia32_vtestnzcps">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty],
                            [IntrNoMem]>;
  def int_x86_avx_vtestz_pd_256 : ClangBuiltin<"__builtin_ia32_vtestzpd256">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v4f64_ty, llvm_v4f64_ty],
                            [IntrNoMem]>;
  def int_x86_avx_vtestc_pd_256 : ClangBuiltin<"__builtin_ia32_vtestcpd256">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v4f64_ty, llvm_v4f64_ty],
                            [IntrNoMem]>;
  def int_x86_avx_vtestnzc_pd_256 : ClangBuiltin<"__builtin_ia32_vtestnzcpd256">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v4f64_ty, llvm_v4f64_ty],
                            [IntrNoMem]>;
  def int_x86_avx_vtestz_ps_256 : ClangBuiltin<"__builtin_ia32_vtestzps256">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v8f32_ty, llvm_v8f32_ty],
                            [IntrNoMem]>;
  def int_x86_avx_vtestc_ps_256 : ClangBuiltin<"__builtin_ia32_vtestcps256">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v8f32_ty, llvm_v8f32_ty],
                            [IntrNoMem]>;
  def int_x86_avx_vtestnzc_ps_256 : ClangBuiltin<"__builtin_ia32_vtestnzcps256">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v8f32_ty, llvm_v8f32_ty],
                            [IntrNoMem]>;
  def int_x86_avx_ptestz_256 : ClangBuiltin<"__builtin_ia32_ptestz256">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v4i64_ty, llvm_v4i64_ty],
                            [IntrNoMem]>;
  def int_x86_avx_ptestc_256 : ClangBuiltin<"__builtin_ia32_ptestc256">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v4i64_ty, llvm_v4i64_ty],
                            [IntrNoMem]>;
  def int_x86_avx_ptestnzc_256 : ClangBuiltin<"__builtin_ia32_ptestnzc256">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v4i64_ty, llvm_v4i64_ty],
                            [IntrNoMem]>;

  def int_x86_avx512_fpclass_pd_128 :
      DefaultAttrsIntrinsic<[llvm_v2i1_ty], [llvm_v2f64_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<1>>]>;
  def int_x86_avx512_fpclass_pd_256 :
      DefaultAttrsIntrinsic<[llvm_v4i1_ty], [llvm_v4f64_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<1>>]>;
  def int_x86_avx512_fpclass_pd_512 :
      DefaultAttrsIntrinsic<[llvm_v8i1_ty], [llvm_v8f64_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<1>>]>;
  def int_x86_avx512_fpclass_ps_128 :
      DefaultAttrsIntrinsic<[llvm_v4i1_ty], [llvm_v4f32_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<1>>]>;
  def int_x86_avx512_fpclass_ps_256 :
      DefaultAttrsIntrinsic<[llvm_v8i1_ty], [llvm_v8f32_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<1>>]>;
  def int_x86_avx512_fpclass_ps_512 :
      DefaultAttrsIntrinsic<[llvm_v16i1_ty], [llvm_v16f32_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<1>>]>;
  def int_x86_avx512_mask_fpclass_sd :
      ClangBuiltin<"__builtin_ia32_fpclasssd_mask">,
      DefaultAttrsIntrinsic<[llvm_i8_ty],
                            [llvm_v2f64_ty, llvm_i32_ty, llvm_i8_ty],
                            [IntrNoMem, ImmArg<ArgIndex<1>>]>;
  def int_x86_avx512_mask_fpclass_ss :
      ClangBuiltin<"__builtin_ia32_fpclassss_mask">,
      DefaultAttrsIntrinsic<[llvm_i8_ty],
                            [llvm_v4f32_ty, llvm_i32_ty, llvm_i8_ty],
                            [IntrNoMem, ImmArg<ArgIndex<1>>]>;
}

// Vector extract sign mask
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_avx_movmsk_pd_256 : ClangBuiltin<"__builtin_ia32_movmskpd256">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v4f64_ty], [IntrNoMem]>;
  def int_x86_avx_movmsk_ps_256 : ClangBuiltin<"__builtin_ia32_movmskps256">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v8f32_ty], [IntrNoMem]>;
}

// Vector zero
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_avx_vzeroall : ClangBuiltin<"__builtin_ia32_vzeroall">,
        Intrinsic<[], [], [IntrNoMem, IntrHasSideEffects]>;
  def int_x86_avx_vzeroupper : ClangBuiltin<"__builtin_ia32_vzeroupper">,
        Intrinsic<[], [], [IntrNoMem, IntrHasSideEffects]>;
}

// SIMD load ops
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_avx_ldu_dq_256 : ClangBuiltin<"__builtin_ia32_lddqu256">,
      DefaultAttrsIntrinsic<[llvm_v32i8_ty], [llvm_ptr_ty], [IntrReadMem]>;
}

// Conditional load ops
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_avx_maskload_pd : ClangBuiltin<"__builtin_ia32_maskloadpd">,
      DefaultAttrsIntrinsic<[llvm_v2f64_ty], [llvm_ptr_ty, llvm_v2i64_ty],
                            [IntrReadMem, IntrArgMemOnly]>;
  def int_x86_avx_maskload_ps : ClangBuiltin<"__builtin_ia32_maskloadps">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty], [llvm_ptr_ty, llvm_v4i32_ty],
                            [IntrReadMem, IntrArgMemOnly]>;
  def int_x86_avx_maskload_pd_256 : ClangBuiltin<"__builtin_ia32_maskloadpd256">,
      DefaultAttrsIntrinsic<[llvm_v4f64_ty], [llvm_ptr_ty, llvm_v4i64_ty],
                            [IntrReadMem, IntrArgMemOnly]>;
  def int_x86_avx_maskload_ps_256 : ClangBuiltin<"__builtin_ia32_maskloadps256">,
      DefaultAttrsIntrinsic<[llvm_v8f32_ty], [llvm_ptr_ty, llvm_v8i32_ty],
                            [IntrReadMem, IntrArgMemOnly]>;
}

// Conditional store ops
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_avx_maskstore_pd : ClangBuiltin<"__builtin_ia32_maskstorepd">,
        Intrinsic<[], [llvm_ptr_ty,
                  llvm_v2i64_ty, llvm_v2f64_ty], [IntrArgMemOnly]>;
  def int_x86_avx_maskstore_ps : ClangBuiltin<"__builtin_ia32_maskstoreps">,
        Intrinsic<[], [llvm_ptr_ty,
                  llvm_v4i32_ty, llvm_v4f32_ty], [IntrArgMemOnly]>;
  def int_x86_avx_maskstore_pd_256 :
        ClangBuiltin<"__builtin_ia32_maskstorepd256">,
        Intrinsic<[], [llvm_ptr_ty,
                  llvm_v4i64_ty, llvm_v4f64_ty], [IntrArgMemOnly]>;
  def int_x86_avx_maskstore_ps_256 :
        ClangBuiltin<"__builtin_ia32_maskstoreps256">,
        Intrinsic<[], [llvm_ptr_ty,
                  llvm_v8i32_ty, llvm_v8f32_ty], [IntrArgMemOnly]>;
}

// BITALG bits shuffle
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_avx512_vpshufbitqmb_128 :
    DefaultAttrsIntrinsic<[llvm_v16i1_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
                          [IntrNoMem]>;
  def int_x86_avx512_vpshufbitqmb_256 :
    DefaultAttrsIntrinsic<[llvm_v32i1_ty], [llvm_v32i8_ty, llvm_v32i8_ty],
                          [IntrNoMem]>;
  def int_x86_avx512_vpshufbitqmb_512 :
    DefaultAttrsIntrinsic<[llvm_v64i1_ty], [llvm_v64i8_ty, llvm_v64i8_ty],
                          [IntrNoMem]>;
}

//===----------------------------------------------------------------------===//
// AVX2

// Integer arithmetic ops.
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_avx2_pmulhu_w : ClangBuiltin<"__builtin_ia32_pmulhuw256">,
      DefaultAttrsIntrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
                             llvm_v16i16_ty], [IntrNoMem, Commutative]>;
  def int_x86_avx2_pmulh_w : ClangBuiltin<"__builtin_ia32_pmulhw256">,
      DefaultAttrsIntrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
                             llvm_v16i16_ty], [IntrNoMem, Commutative]>;
  def int_x86_avx2_pmadd_wd : ClangBuiltin<"__builtin_ia32_pmaddwd256">,
      DefaultAttrsIntrinsic<[llvm_v8i32_ty], [llvm_v16i16_ty,
                             llvm_v16i16_ty], [IntrNoMem, Commutative]>;
  def int_x86_avx2_pavg_b : ClangBuiltin<"__builtin_ia32_pavgb256">,
      DefaultAttrsIntrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty,
                             llvm_v32i8_ty], [IntrNoMem, Commutative]>;
  def int_x86_avx2_pavg_w : ClangBuiltin<"__builtin_ia32_pavgw256">,
      DefaultAttrsIntrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
                             llvm_v16i16_ty], [IntrNoMem, Commutative]>;
  def int_x86_avx2_psad_bw : ClangBuiltin<"__builtin_ia32_psadbw256">,
      DefaultAttrsIntrinsic<[llvm_v4i64_ty], [llvm_v32i8_ty,
                             llvm_v32i8_ty], [IntrNoMem, Commutative]>;
}

// Integer shift ops.
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_avx2_psll_w : ClangBuiltin<"__builtin_ia32_psllw256">,
      DefaultAttrsIntrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
                             llvm_v8i16_ty], [IntrNoMem]>;
  def int_x86_avx2_psll_d : ClangBuiltin<"__builtin_ia32_pslld256">,
      DefaultAttrsIntrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
                             llvm_v4i32_ty], [IntrNoMem]>;
  def int_x86_avx2_psll_q : ClangBuiltin<"__builtin_ia32_psllq256">,
      DefaultAttrsIntrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty,
                             llvm_v2i64_ty], [IntrNoMem]>;
  def int_x86_avx2_psrl_w : ClangBuiltin<"__builtin_ia32_psrlw256">,
      DefaultAttrsIntrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
                             llvm_v8i16_ty], [IntrNoMem]>;
  def int_x86_avx2_psrl_d : ClangBuiltin<"__builtin_ia32_psrld256">,
      DefaultAttrsIntrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
                             llvm_v4i32_ty], [IntrNoMem]>;
  def int_x86_avx2_psrl_q : ClangBuiltin<"__builtin_ia32_psrlq256">,
      DefaultAttrsIntrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty,
                             llvm_v2i64_ty], [IntrNoMem]>;
  def int_x86_avx2_psra_w : ClangBuiltin<"__builtin_ia32_psraw256">,
      DefaultAttrsIntrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
                             llvm_v8i16_ty], [IntrNoMem]>;
  def int_x86_avx2_psra_d : ClangBuiltin<"__builtin_ia32_psrad256">,
      DefaultAttrsIntrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
                             llvm_v4i32_ty], [IntrNoMem]>;

  // Oddly these don't require an immediate due to a gcc compatibility issue.
  def int_x86_avx2_pslli_w : ClangBuiltin<"__builtin_ia32_psllwi256">,
      DefaultAttrsIntrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
                             llvm_i32_ty], [IntrNoMem]>;
  def int_x86_avx2_pslli_d : ClangBuiltin<"__builtin_ia32_pslldi256">,
      DefaultAttrsIntrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
                             llvm_i32_ty], [IntrNoMem]>;
  def int_x86_avx2_pslli_q : ClangBuiltin<"__builtin_ia32_psllqi256">,
      DefaultAttrsIntrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty,
                             llvm_i32_ty], [IntrNoMem]>;
  def int_x86_avx2_psrli_w : ClangBuiltin<"__builtin_ia32_psrlwi256">,
      DefaultAttrsIntrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
                             llvm_i32_ty], [IntrNoMem]>;
  def int_x86_avx2_psrli_d : ClangBuiltin<"__builtin_ia32_psrldi256">,
      DefaultAttrsIntrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
                             llvm_i32_ty], [IntrNoMem]>;
  def int_x86_avx2_psrli_q : ClangBuiltin<"__builtin_ia32_psrlqi256">,
      DefaultAttrsIntrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty,
                             llvm_i32_ty], [IntrNoMem]>;
  def int_x86_avx2_psrai_w : ClangBuiltin<"__builtin_ia32_psrawi256">,
      DefaultAttrsIntrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
                             llvm_i32_ty], [IntrNoMem]>;
  def int_x86_avx2_psrai_d : ClangBuiltin<"__builtin_ia32_psradi256">,
      DefaultAttrsIntrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
                             llvm_i32_ty], [IntrNoMem]>;

  def int_x86_avx512_psra_q_128 : ClangBuiltin<"__builtin_ia32_psraq128">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty,
                             llvm_v2i64_ty], [IntrNoMem]>;
  def int_x86_avx512_psra_q_256 : ClangBuiltin<"__builtin_ia32_psraq256">,
      DefaultAttrsIntrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty,
                             llvm_v2i64_ty], [IntrNoMem]>;

  // Oddly these don't require an immediate due to a gcc compatibility issue.
  def int_x86_avx512_psrai_q_128 : ClangBuiltin<"__builtin_ia32_psraqi128">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty,
                             llvm_i32_ty], [IntrNoMem]>;
  def int_x86_avx512_psrai_q_256 : ClangBuiltin<"__builtin_ia32_psraqi256">,
      DefaultAttrsIntrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty,
                             llvm_i32_ty], [IntrNoMem]>;

  def int_x86_avx512_psll_w_512 : ClangBuiltin<"__builtin_ia32_psllw512">,
      DefaultAttrsIntrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty,
                             llvm_v8i16_ty], [IntrNoMem]>;
  def int_x86_avx512_psll_d_512 : ClangBuiltin<"__builtin_ia32_pslld512">,
      DefaultAttrsIntrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty,
                             llvm_v4i32_ty], [IntrNoMem]>;
  def int_x86_avx512_psll_q_512 : ClangBuiltin<"__builtin_ia32_psllq512">,
      DefaultAttrsIntrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty,
                             llvm_v2i64_ty], [IntrNoMem]>;
  def int_x86_avx512_psrl_w_512 : ClangBuiltin<"__builtin_ia32_psrlw512">,
      DefaultAttrsIntrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty,
                             llvm_v8i16_ty], [IntrNoMem]>;
  def int_x86_avx512_psrl_d_512 : ClangBuiltin<"__builtin_ia32_psrld512">,
      DefaultAttrsIntrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty,
                             llvm_v4i32_ty], [IntrNoMem]>;
  def int_x86_avx512_psrl_q_512 : ClangBuiltin<"__builtin_ia32_psrlq512">,
      DefaultAttrsIntrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty,
                             llvm_v2i64_ty], [IntrNoMem]>;
  def int_x86_avx512_psra_w_512 : ClangBuiltin<"__builtin_ia32_psraw512">,
      DefaultAttrsIntrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty,
                             llvm_v8i16_ty], [IntrNoMem]>;
  def int_x86_avx512_psra_d_512 : ClangBuiltin<"__builtin_ia32_psrad512">,
      DefaultAttrsIntrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty,
                             llvm_v4i32_ty], [IntrNoMem]>;
  def int_x86_avx512_psra_q_512 : ClangBuiltin<"__builtin_ia32_psraq512">,
      DefaultAttrsIntrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty,
                             llvm_v2i64_ty], [IntrNoMem]>;

  // Oddly these don't require an immediate due to a gcc compatibility issue.
  def int_x86_avx512_pslli_w_512 : ClangBuiltin<"__builtin_ia32_psllwi512">,
      DefaultAttrsIntrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty,
                             llvm_i32_ty], [IntrNoMem]>;
  def int_x86_avx512_pslli_d_512 : ClangBuiltin<"__builtin_ia32_pslldi512">,
      DefaultAttrsIntrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty,
                             llvm_i32_ty], [IntrNoMem]>;
  def int_x86_avx512_pslli_q_512 : ClangBuiltin<"__builtin_ia32_psllqi512">,
      DefaultAttrsIntrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty,
                             llvm_i32_ty], [IntrNoMem]>;
  def int_x86_avx512_psrli_w_512 : ClangBuiltin<"__builtin_ia32_psrlwi512">,
      DefaultAttrsIntrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty,
                             llvm_i32_ty], [IntrNoMem]>;
  def int_x86_avx512_psrli_d_512 : ClangBuiltin<"__builtin_ia32_psrldi512">,
      DefaultAttrsIntrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty,
                             llvm_i32_ty], [IntrNoMem]>;
  def int_x86_avx512_psrli_q_512 : ClangBuiltin<"__builtin_ia32_psrlqi512">,
      DefaultAttrsIntrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty,
                             llvm_i32_ty], [IntrNoMem]>;
  def int_x86_avx512_psrai_w_512 : ClangBuiltin<"__builtin_ia32_psrawi512">,
      DefaultAttrsIntrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty,
                             llvm_i32_ty], [IntrNoMem]>;
  def int_x86_avx512_psrai_d_512 : ClangBuiltin<"__builtin_ia32_psradi512">,
      DefaultAttrsIntrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty,
                             llvm_i32_ty], [IntrNoMem]>;
  def int_x86_avx512_psrai_q_512 : ClangBuiltin<"__builtin_ia32_psraqi512">,
      DefaultAttrsIntrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty,
                             llvm_i32_ty], [IntrNoMem]>;

  def int_x86_avx512_pmultishift_qb_128:
        ClangBuiltin<"__builtin_ia32_vpmultishiftqb128">,
        DefaultAttrsIntrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
                              [IntrNoMem]>;
  def int_x86_avx512_pmultishift_qb_256:
        ClangBuiltin<"__builtin_ia32_vpmultishiftqb256">,
        DefaultAttrsIntrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty, llvm_v32i8_ty],
                              [IntrNoMem]>;
  def int_x86_avx512_pmultishift_qb_512:
        ClangBuiltin<"__builtin_ia32_vpmultishiftqb512">,
        DefaultAttrsIntrinsic<[llvm_v64i8_ty], [llvm_v64i8_ty, llvm_v64i8_ty],
                              [IntrNoMem]>;
}

// Pack ops.
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_avx2_packsswb : ClangBuiltin<"__builtin_ia32_packsswb256">,
      DefaultAttrsIntrinsic<[llvm_v32i8_ty], [llvm_v16i16_ty,
                             llvm_v16i16_ty], [IntrNoMem]>;
  def int_x86_avx2_packssdw : ClangBuiltin<"__builtin_ia32_packssdw256">,
      DefaultAttrsIntrinsic<[llvm_v16i16_ty], [llvm_v8i32_ty,
                             llvm_v8i32_ty], [IntrNoMem]>;
  def int_x86_avx2_packuswb : ClangBuiltin<"__builtin_ia32_packuswb256">,
      DefaultAttrsIntrinsic<[llvm_v32i8_ty], [llvm_v16i16_ty,
                             llvm_v16i16_ty], [IntrNoMem]>;
  def int_x86_avx2_packusdw : ClangBuiltin<"__builtin_ia32_packusdw256">,
      DefaultAttrsIntrinsic<[llvm_v16i16_ty], [llvm_v8i32_ty,
                             llvm_v8i32_ty], [IntrNoMem]>;
}

// Horizontal arithmetic ops
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_avx2_phadd_w : ClangBuiltin<"__builtin_ia32_phaddw256">,
      DefaultAttrsIntrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
                             llvm_v16i16_ty], [IntrNoMem]>;
  def int_x86_avx2_phadd_d : ClangBuiltin<"__builtin_ia32_phaddd256">,
      DefaultAttrsIntrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
                             llvm_v8i32_ty], [IntrNoMem]>;
  def int_x86_avx2_phadd_sw : ClangBuiltin<"__builtin_ia32_phaddsw256">,
      DefaultAttrsIntrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
                             llvm_v16i16_ty], [IntrNoMem]>;
  def int_x86_avx2_phsub_w : ClangBuiltin<"__builtin_ia32_phsubw256">,
      DefaultAttrsIntrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
                             llvm_v16i16_ty], [IntrNoMem]>;
  def int_x86_avx2_phsub_d : ClangBuiltin<"__builtin_ia32_phsubd256">,
      DefaultAttrsIntrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
                             llvm_v8i32_ty], [IntrNoMem]>;
  def int_x86_avx2_phsub_sw : ClangBuiltin<"__builtin_ia32_phsubsw256">,
      DefaultAttrsIntrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
                             llvm_v16i16_ty], [IntrNoMem]>;
  def int_x86_avx2_pmadd_ub_sw : ClangBuiltin<"__builtin_ia32_pmaddubsw256">,
      DefaultAttrsIntrinsic<[llvm_v16i16_ty], [llvm_v32i8_ty,
                             llvm_v32i8_ty], [IntrNoMem]>;
}

// Sign ops
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_avx2_psign_b : ClangBuiltin<"__builtin_ia32_psignb256">,
      DefaultAttrsIntrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty,
                             llvm_v32i8_ty], [IntrNoMem]>;
  def int_x86_avx2_psign_w : ClangBuiltin<"__builtin_ia32_psignw256">,
      DefaultAttrsIntrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
                             llvm_v16i16_ty], [IntrNoMem]>;
  def int_x86_avx2_psign_d : ClangBuiltin<"__builtin_ia32_psignd256">,
      DefaultAttrsIntrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
                             llvm_v8i32_ty], [IntrNoMem]>;
}

// Packed multiply high with round and scale
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_avx2_pmul_hr_sw : ClangBuiltin<"__builtin_ia32_pmulhrsw256">,
      DefaultAttrsIntrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
                             llvm_v16i16_ty], [IntrNoMem, Commutative]>;
  def int_x86_avx512_pmul_hr_sw_512 : ClangBuiltin<"__builtin_ia32_pmulhrsw512">,
      DefaultAttrsIntrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty,
                             llvm_v32i16_ty], [IntrNoMem, Commutative]>;
}

// Vector blend
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_avx2_pblendvb : ClangBuiltin<"__builtin_ia32_pblendvb256">,
      DefaultAttrsIntrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty, llvm_v32i8_ty,
                             llvm_v32i8_ty], [IntrNoMem]>;
}


// Vector permutation
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_avx2_permd : ClangBuiltin<"__builtin_ia32_permvarsi256">,
      DefaultAttrsIntrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty],
                            [IntrNoMem]>;
  def int_x86_avx2_permps : ClangBuiltin<"__builtin_ia32_permvarsf256">,
      DefaultAttrsIntrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8i32_ty],
                            [IntrNoMem]>;
}

// Conditional load ops
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_avx2_maskload_d : ClangBuiltin<"__builtin_ia32_maskloadd">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_ptr_ty, llvm_v4i32_ty],
                            [IntrReadMem, IntrArgMemOnly]>;
  def int_x86_avx2_maskload_q : ClangBuiltin<"__builtin_ia32_maskloadq">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_ptr_ty, llvm_v2i64_ty],
                            [IntrReadMem, IntrArgMemOnly]>;
  def int_x86_avx2_maskload_d_256 : ClangBuiltin<"__builtin_ia32_maskloadd256">,
      DefaultAttrsIntrinsic<[llvm_v8i32_ty], [llvm_ptr_ty, llvm_v8i32_ty],
                            [IntrReadMem, IntrArgMemOnly]>;
  def int_x86_avx2_maskload_q_256 : ClangBuiltin<"__builtin_ia32_maskloadq256">,
      DefaultAttrsIntrinsic<[llvm_v4i64_ty], [llvm_ptr_ty, llvm_v4i64_ty],
                            [IntrReadMem, IntrArgMemOnly]>;
}

// Conditional store ops
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_avx2_maskstore_d : ClangBuiltin<"__builtin_ia32_maskstored">,
        Intrinsic<[], [llvm_ptr_ty, llvm_v4i32_ty, llvm_v4i32_ty],
                  [IntrArgMemOnly]>;
  def int_x86_avx2_maskstore_q : ClangBuiltin<"__builtin_ia32_maskstoreq">,
        Intrinsic<[], [llvm_ptr_ty, llvm_v2i64_ty, llvm_v2i64_ty],
                  [IntrArgMemOnly]>;
  def int_x86_avx2_maskstore_d_256 :
        ClangBuiltin<"__builtin_ia32_maskstored256">,
        Intrinsic<[], [llvm_ptr_ty, llvm_v8i32_ty, llvm_v8i32_ty],
                  [IntrArgMemOnly]>;
  def int_x86_avx2_maskstore_q_256 :
        ClangBuiltin<"__builtin_ia32_maskstoreq256">,
        Intrinsic<[], [llvm_ptr_ty, llvm_v4i64_ty, llvm_v4i64_ty],
                  [IntrArgMemOnly]>;
}

// Variable bit shift ops
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_avx2_psllv_d : ClangBuiltin<"__builtin_ia32_psllv4si">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
                            [IntrNoMem]>;
  def int_x86_avx2_psllv_d_256 : ClangBuiltin<"__builtin_ia32_psllv8si">,
      DefaultAttrsIntrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty],
                            [IntrNoMem]>;
  def int_x86_avx2_psllv_q : ClangBuiltin<"__builtin_ia32_psllv2di">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
                            [IntrNoMem]>;
  def int_x86_avx2_psllv_q_256 : ClangBuiltin<"__builtin_ia32_psllv4di">,
      DefaultAttrsIntrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty],
                            [IntrNoMem]>;

  def int_x86_avx512_psllv_d_512 : ClangBuiltin<"__builtin_ia32_psllv16si">,
      DefaultAttrsIntrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_psllv_q_512 : ClangBuiltin<"__builtin_ia32_psllv8di">,
      DefaultAttrsIntrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty],
                            [IntrNoMem]>;

  def int_x86_avx2_psrlv_d : ClangBuiltin<"__builtin_ia32_psrlv4si">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
                            [IntrNoMem]>;
  def int_x86_avx2_psrlv_d_256 : ClangBuiltin<"__builtin_ia32_psrlv8si">,
      DefaultAttrsIntrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty],
                            [IntrNoMem]>;
  def int_x86_avx2_psrlv_q : ClangBuiltin<"__builtin_ia32_psrlv2di">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
                            [IntrNoMem]>;
  def int_x86_avx2_psrlv_q_256 : ClangBuiltin<"__builtin_ia32_psrlv4di">,
      DefaultAttrsIntrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty],
                            [IntrNoMem]>;

  def int_x86_avx512_psrlv_d_512 : ClangBuiltin<"__builtin_ia32_psrlv16si">,
      DefaultAttrsIntrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_psrlv_q_512 : ClangBuiltin<"__builtin_ia32_psrlv8di">,
      DefaultAttrsIntrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty],
                            [IntrNoMem]>;

  def int_x86_avx2_psrav_d : ClangBuiltin<"__builtin_ia32_psrav4si">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
                            [IntrNoMem]>;
  def int_x86_avx2_psrav_d_256 : ClangBuiltin<"__builtin_ia32_psrav8si">,
      DefaultAttrsIntrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty],
                            [IntrNoMem]>;

  def int_x86_avx512_psrav_d_512 : ClangBuiltin<"__builtin_ia32_psrav16si">,
      DefaultAttrsIntrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_psrav_q_128 : ClangBuiltin<"__builtin_ia32_psravq128">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_psrav_q_256 : ClangBuiltin<"__builtin_ia32_psravq256">,
      DefaultAttrsIntrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_psrav_q_512 : ClangBuiltin<"__builtin_ia32_psrav8di">,
      DefaultAttrsIntrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty],
                            [IntrNoMem]>;

  def int_x86_avx512_psllv_w_128 : ClangBuiltin<"__builtin_ia32_psllv8hi">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_psllv_w_256 : ClangBuiltin<"__builtin_ia32_psllv16hi">,
      DefaultAttrsIntrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_psllv_w_512 : ClangBuiltin<"__builtin_ia32_psllv32hi">,
      DefaultAttrsIntrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty],
                            [IntrNoMem]>;

  def int_x86_avx512_psrlv_w_128 : ClangBuiltin<"__builtin_ia32_psrlv8hi">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_psrlv_w_256 : ClangBuiltin<"__builtin_ia32_psrlv16hi">,
      DefaultAttrsIntrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_psrlv_w_512 : ClangBuiltin<"__builtin_ia32_psrlv32hi">,
      DefaultAttrsIntrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty],
                            [IntrNoMem]>;

  def int_x86_avx512_psrav_w_128 : ClangBuiltin<"__builtin_ia32_psrav8hi">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_psrav_w_256 : ClangBuiltin<"__builtin_ia32_psrav16hi">,
      DefaultAttrsIntrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_psrav_w_512 : ClangBuiltin<"__builtin_ia32_psrav32hi">,
      DefaultAttrsIntrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty],
                            [IntrNoMem]>;
}

// Gather ops
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  // NOTE: These can't be ArgMemOnly because you can put the address completely
  // in the index register.
  def int_x86_avx2_gather_d_pd : ClangBuiltin<"__builtin_ia32_gatherd_pd">,
      DefaultAttrsIntrinsic<[llvm_v2f64_ty],
        [llvm_v2f64_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_v2f64_ty, llvm_i8_ty],
        [IntrReadMem, ImmArg<ArgIndex<4>>]>;
  def int_x86_avx2_gather_d_pd_256 : ClangBuiltin<"__builtin_ia32_gatherd_pd256">,
      DefaultAttrsIntrinsic<[llvm_v4f64_ty],
        [llvm_v4f64_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_v4f64_ty, llvm_i8_ty],
        [IntrReadMem, ImmArg<ArgIndex<4>>]>;
  def int_x86_avx2_gather_q_pd : ClangBuiltin<"__builtin_ia32_gatherq_pd">,
      DefaultAttrsIntrinsic<[llvm_v2f64_ty],
        [llvm_v2f64_ty, llvm_ptr_ty, llvm_v2i64_ty, llvm_v2f64_ty, llvm_i8_ty],
        [IntrReadMem, ImmArg<ArgIndex<4>>]>;
  def int_x86_avx2_gather_q_pd_256 : ClangBuiltin<"__builtin_ia32_gatherq_pd256">,
      DefaultAttrsIntrinsic<[llvm_v4f64_ty],
        [llvm_v4f64_ty, llvm_ptr_ty, llvm_v4i64_ty, llvm_v4f64_ty, llvm_i8_ty],
        [IntrReadMem, ImmArg<ArgIndex<4>>]>;
  def int_x86_avx2_gather_d_ps : ClangBuiltin<"__builtin_ia32_gatherd_ps">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty],
        [llvm_v4f32_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_v4f32_ty, llvm_i8_ty],
        [IntrReadMem, ImmArg<ArgIndex<4>>]>;
  def int_x86_avx2_gather_d_ps_256 : ClangBuiltin<"__builtin_ia32_gatherd_ps256">,
      DefaultAttrsIntrinsic<[llvm_v8f32_ty],
        [llvm_v8f32_ty, llvm_ptr_ty, llvm_v8i32_ty, llvm_v8f32_ty, llvm_i8_ty],
        [IntrReadMem, ImmArg<ArgIndex<4>>]>;
  def int_x86_avx2_gather_q_ps : ClangBuiltin<"__builtin_ia32_gatherq_ps">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty],
        [llvm_v4f32_ty, llvm_ptr_ty, llvm_v2i64_ty, llvm_v4f32_ty, llvm_i8_ty],
        [IntrReadMem, ImmArg<ArgIndex<4>>]>;
  def int_x86_avx2_gather_q_ps_256 : ClangBuiltin<"__builtin_ia32_gatherq_ps256">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty],
        [llvm_v4f32_ty, llvm_ptr_ty, llvm_v4i64_ty, llvm_v4f32_ty, llvm_i8_ty],
        [IntrReadMem, ImmArg<ArgIndex<4>>]>;

  def int_x86_avx2_gather_d_q : ClangBuiltin<"__builtin_ia32_gatherd_q">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty],
        [llvm_v2i64_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_v2i64_ty, llvm_i8_ty],
        [IntrReadMem, ImmArg<ArgIndex<4>>]>;
  def int_x86_avx2_gather_d_q_256 : ClangBuiltin<"__builtin_ia32_gatherd_q256">,
      DefaultAttrsIntrinsic<[llvm_v4i64_ty],
        [llvm_v4i64_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_v4i64_ty, llvm_i8_ty],
        [IntrReadMem, ImmArg<ArgIndex<4>>]>;
  def int_x86_avx2_gather_q_q : ClangBuiltin<"__builtin_ia32_gatherq_q">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty],
        [llvm_v2i64_ty, llvm_ptr_ty, llvm_v2i64_ty, llvm_v2i64_ty, llvm_i8_ty],
        [IntrReadMem, ImmArg<ArgIndex<4>>]>;
  def int_x86_avx2_gather_q_q_256 : ClangBuiltin<"__builtin_ia32_gatherq_q256">,
      DefaultAttrsIntrinsic<[llvm_v4i64_ty],
        [llvm_v4i64_ty, llvm_ptr_ty, llvm_v4i64_ty, llvm_v4i64_ty, llvm_i8_ty],
        [IntrReadMem, ImmArg<ArgIndex<4>>]>;
  def int_x86_avx2_gather_d_d : ClangBuiltin<"__builtin_ia32_gatherd_d">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty],
        [llvm_v4i32_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_v4i32_ty, llvm_i8_ty],
        [IntrReadMem, ImmArg<ArgIndex<4>>]>;
  def int_x86_avx2_gather_d_d_256 : ClangBuiltin<"__builtin_ia32_gatherd_d256">,
      DefaultAttrsIntrinsic<[llvm_v8i32_ty],
        [llvm_v8i32_ty, llvm_ptr_ty, llvm_v8i32_ty, llvm_v8i32_ty, llvm_i8_ty],
        [IntrReadMem, ImmArg<ArgIndex<4>>]>;
  def int_x86_avx2_gather_q_d : ClangBuiltin<"__builtin_ia32_gatherq_d">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty],
        [llvm_v4i32_ty, llvm_ptr_ty, llvm_v2i64_ty, llvm_v4i32_ty, llvm_i8_ty],
        [IntrReadMem, ImmArg<ArgIndex<4>>]>;
  def int_x86_avx2_gather_q_d_256 : ClangBuiltin<"__builtin_ia32_gatherq_d256">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty],
        [llvm_v4i32_ty, llvm_ptr_ty, llvm_v4i64_ty, llvm_v4i32_ty, llvm_i8_ty],
        [IntrReadMem, ImmArg<ArgIndex<4>>]>;
}

// Misc.
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_avx2_pmovmskb : ClangBuiltin<"__builtin_ia32_pmovmskb256">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v32i8_ty], [IntrNoMem]>;
  def int_x86_avx2_pshuf_b : ClangBuiltin<"__builtin_ia32_pshufb256">,
      DefaultAttrsIntrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty,
                             llvm_v32i8_ty], [IntrNoMem]>;
  def int_x86_avx2_mpsadbw : ClangBuiltin<"__builtin_ia32_mpsadbw256">,
      DefaultAttrsIntrinsic<[llvm_v16i16_ty], [llvm_v32i8_ty, llvm_v32i8_ty,
                             llvm_i8_ty], [IntrNoMem, ImmArg<ArgIndex<2>>]>;
}

//===----------------------------------------------------------------------===//
// FMA3 and FMA4

let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_fma_vfmaddsub_ps : ClangBuiltin<"__builtin_ia32_vfmaddsubps">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty],
                            [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty],
                            [IntrNoMem]>;
  def int_x86_fma_vfmaddsub_pd : ClangBuiltin<"__builtin_ia32_vfmaddsubpd">,
      DefaultAttrsIntrinsic<[llvm_v2f64_ty],
                            [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty],
                            [IntrNoMem]>;
  def int_x86_fma_vfmaddsub_ps_256 :
      ClangBuiltin<"__builtin_ia32_vfmaddsubps256">,
      DefaultAttrsIntrinsic<[llvm_v8f32_ty],
                            [llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty],
                            [IntrNoMem]>;
  def int_x86_fma_vfmaddsub_pd_256 :
      ClangBuiltin<"__builtin_ia32_vfmaddsubpd256">,
      DefaultAttrsIntrinsic<[llvm_v4f64_ty],
                            [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty],
                            [IntrNoMem]>;

  def int_x86_avx512_vfmadd_pd_512 :
      DefaultAttrsIntrinsic<[llvm_v8f64_ty],
          [llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8f64_ty, llvm_i32_ty],
          [IntrNoMem, ImmArg<ArgIndex<3>>]>;

  def int_x86_avx512_vfmadd_ps_512 :
      DefaultAttrsIntrinsic<[llvm_v16f32_ty],
          [llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty, llvm_i32_ty],
          [IntrNoMem, ImmArg<ArgIndex<3>>]>;

  def int_x86_avx512_vfmaddsub_pd_512 :
      DefaultAttrsIntrinsic<[llvm_v8f64_ty],
          [llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8f64_ty, llvm_i32_ty],
          [IntrNoMem, ImmArg<ArgIndex<3>>]>;

  def int_x86_avx512_vfmaddsub_ps_512 :
      DefaultAttrsIntrinsic<[llvm_v16f32_ty],
          [llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty, llvm_i32_ty],
          [IntrNoMem, ImmArg<ArgIndex<3>>]>;

  def int_x86_avx512_vfmadd_f64 :
      DefaultAttrsIntrinsic<[llvm_double_ty],
          [llvm_double_ty, llvm_double_ty, llvm_double_ty, llvm_i32_ty],
          [IntrNoMem, ImmArg<ArgIndex<3>>]>;
  def int_x86_avx512_vfmadd_f32 :
      DefaultAttrsIntrinsic<[llvm_float_ty],
          [llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_i32_ty],
          [IntrNoMem, ImmArg<ArgIndex<3>>]>;

  def int_x86_avx512_vpmadd52h_uq_128 :
      ClangBuiltin<"__builtin_ia32_vpmadd52huq128">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty,
                             llvm_v2i64_ty], [IntrNoMem]>;
  def int_x86_avx512_vpmadd52l_uq_128 :
      ClangBuiltin<"__builtin_ia32_vpmadd52luq128">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty,
                             llvm_v2i64_ty], [IntrNoMem]>;
  def int_x86_avx512_vpmadd52h_uq_256 :
      ClangBuiltin<"__builtin_ia32_vpmadd52huq256">,
      DefaultAttrsIntrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty,
                             llvm_v4i64_ty], [IntrNoMem]>;
  def int_x86_avx512_vpmadd52l_uq_256 :
      ClangBuiltin<"__builtin_ia32_vpmadd52luq256">,
      DefaultAttrsIntrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty,
                             llvm_v4i64_ty], [IntrNoMem]>;
  def int_x86_avx512_vpmadd52h_uq_512 :
      ClangBuiltin<"__builtin_ia32_vpmadd52huq512">,
      DefaultAttrsIntrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty,
                             llvm_v8i64_ty], [IntrNoMem]>;
  def int_x86_avx512_vpmadd52l_uq_512 :
      ClangBuiltin<"__builtin_ia32_vpmadd52luq512">,
      DefaultAttrsIntrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty,
                             llvm_v8i64_ty], [IntrNoMem]>;
}

// VNNI
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_avx512_vpdpbusd_128 :
      ClangBuiltin<"__builtin_ia32_vpdpbusd128">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
                             llvm_v4i32_ty], [IntrNoMem]>;
  def int_x86_avx512_vpdpbusd_256 :
      ClangBuiltin<"__builtin_ia32_vpdpbusd256">,
      DefaultAttrsIntrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty,
                             llvm_v8i32_ty], [IntrNoMem]>;
  def int_x86_avx512_vpdpbusd_512 :
      ClangBuiltin<"__builtin_ia32_vpdpbusd512">,
      DefaultAttrsIntrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
                             llvm_v16i32_ty], [IntrNoMem]>;

  def int_x86_avx512_vpdpbusds_128 :
      ClangBuiltin<"__builtin_ia32_vpdpbusds128">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
                             llvm_v4i32_ty], [IntrNoMem]>;
  def int_x86_avx512_vpdpbusds_256 :
      ClangBuiltin<"__builtin_ia32_vpdpbusds256">,
      DefaultAttrsIntrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty,
                             llvm_v8i32_ty], [IntrNoMem]>;
  def int_x86_avx512_vpdpbusds_512 :
      ClangBuiltin<"__builtin_ia32_vpdpbusds512">,
      DefaultAttrsIntrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
                             llvm_v16i32_ty], [IntrNoMem]>;

  def int_x86_avx512_vpdpwssd_128 :
      ClangBuiltin<"__builtin_ia32_vpdpwssd128">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
                             llvm_v4i32_ty], [IntrNoMem]>;
  def int_x86_avx512_vpdpwssd_256 :
      ClangBuiltin<"__builtin_ia32_vpdpwssd256">,
      DefaultAttrsIntrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty,
                             llvm_v8i32_ty], [IntrNoMem]>;
  def int_x86_avx512_vpdpwssd_512 :
      ClangBuiltin<"__builtin_ia32_vpdpwssd512">,
      DefaultAttrsIntrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
                             llvm_v16i32_ty], [IntrNoMem]>;

  def int_x86_avx512_vpdpwssds_128 :
      ClangBuiltin<"__builtin_ia32_vpdpwssds128">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
                             llvm_v4i32_ty], [IntrNoMem]>;
  def int_x86_avx512_vpdpwssds_256 :
      ClangBuiltin<"__builtin_ia32_vpdpwssds256">,
      DefaultAttrsIntrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty,
                             llvm_v8i32_ty], [IntrNoMem]>;
  def int_x86_avx512_vpdpwssds_512 :
      ClangBuiltin<"__builtin_ia32_vpdpwssds512">,
      DefaultAttrsIntrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
                             llvm_v16i32_ty], [IntrNoMem]>;
  def int_x86_avx2_vpdpbssd_128
      : ClangBuiltin<"__builtin_ia32_vpdpbssd128">,
        DefaultAttrsIntrinsic<[llvm_v4i32_ty],
                              [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
                              [IntrNoMem]>;
  def int_x86_avx2_vpdpbssd_256
      : ClangBuiltin<"__builtin_ia32_vpdpbssd256">,
        DefaultAttrsIntrinsic<[llvm_v8i32_ty],
                              [llvm_v8i32_ty, llvm_v8i32_ty, llvm_v8i32_ty],
                              [IntrNoMem]>;
  def int_x86_avx2_vpdpbssds_128
      : ClangBuiltin<"__builtin_ia32_vpdpbssds128">,
        DefaultAttrsIntrinsic<[llvm_v4i32_ty],
                              [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
                              [IntrNoMem]>;
  def int_x86_avx2_vpdpbssds_256
      : ClangBuiltin<"__builtin_ia32_vpdpbssds256">,
        DefaultAttrsIntrinsic<[llvm_v8i32_ty],
                              [llvm_v8i32_ty, llvm_v8i32_ty, llvm_v8i32_ty],
                              [IntrNoMem]>;
  def int_x86_avx2_vpdpbsud_128
      : ClangBuiltin<"__builtin_ia32_vpdpbsud128">,
        DefaultAttrsIntrinsic<[llvm_v4i32_ty],
                              [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
                              [IntrNoMem]>;
  def int_x86_avx2_vpdpbsud_256
      : ClangBuiltin<"__builtin_ia32_vpdpbsud256">,
        DefaultAttrsIntrinsic<[llvm_v8i32_ty],
                              [llvm_v8i32_ty, llvm_v8i32_ty, llvm_v8i32_ty],
                              [IntrNoMem]>;
  def int_x86_avx2_vpdpbsuds_128
      : ClangBuiltin<"__builtin_ia32_vpdpbsuds128">,
        DefaultAttrsIntrinsic<[llvm_v4i32_ty],
                              [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
                              [IntrNoMem]>;
  def int_x86_avx2_vpdpbsuds_256
      : ClangBuiltin<"__builtin_ia32_vpdpbsuds256">,
        DefaultAttrsIntrinsic<[llvm_v8i32_ty],
                              [llvm_v8i32_ty, llvm_v8i32_ty, llvm_v8i32_ty],
                              [IntrNoMem]>;
  def int_x86_avx2_vpdpbuud_128
      : ClangBuiltin<"__builtin_ia32_vpdpbuud128">,
        DefaultAttrsIntrinsic<[llvm_v4i32_ty],
                              [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
                              [IntrNoMem]>;
  def int_x86_avx2_vpdpbuud_256
      : ClangBuiltin<"__builtin_ia32_vpdpbuud256">,
        DefaultAttrsIntrinsic<[llvm_v8i32_ty],
                              [llvm_v8i32_ty, llvm_v8i32_ty, llvm_v8i32_ty],
                              [IntrNoMem]>;
  def int_x86_avx2_vpdpbuuds_128
      : ClangBuiltin<"__builtin_ia32_vpdpbuuds128">,
        DefaultAttrsIntrinsic<[llvm_v4i32_ty],
                              [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
                              [IntrNoMem]>;
  def int_x86_avx2_vpdpbuuds_256
      : ClangBuiltin<"__builtin_ia32_vpdpbuuds256">,
        DefaultAttrsIntrinsic<[llvm_v8i32_ty],
                              [llvm_v8i32_ty, llvm_v8i32_ty, llvm_v8i32_ty],
                              [IntrNoMem]>;

  def int_x86_avx2_vpdpwsud_128
      : ClangBuiltin<"__builtin_ia32_vpdpwsud128">,
        DefaultAttrsIntrinsic<[llvm_v4i32_ty],
                              [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
                              [IntrNoMem]>;
  def int_x86_avx2_vpdpwsud_256
      : ClangBuiltin<"__builtin_ia32_vpdpwsud256">,
        DefaultAttrsIntrinsic<[llvm_v8i32_ty],
                              [llvm_v8i32_ty, llvm_v8i32_ty, llvm_v8i32_ty],
                              [IntrNoMem]>;
  def int_x86_avx2_vpdpwsuds_128
      : ClangBuiltin<"__builtin_ia32_vpdpwsuds128">,
        DefaultAttrsIntrinsic<[llvm_v4i32_ty],
                              [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
                              [IntrNoMem]>;
  def int_x86_avx2_vpdpwsuds_256
      : ClangBuiltin<"__builtin_ia32_vpdpwsuds256">,
        DefaultAttrsIntrinsic<[llvm_v8i32_ty],
                              [llvm_v8i32_ty, llvm_v8i32_ty, llvm_v8i32_ty],
                              [IntrNoMem]>;
  def int_x86_avx2_vpdpwusd_128
      : ClangBuiltin<"__builtin_ia32_vpdpwusd128">,
        DefaultAttrsIntrinsic<[llvm_v4i32_ty],
                              [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
                              [IntrNoMem]>;
  def int_x86_avx2_vpdpwusd_256
      : ClangBuiltin<"__builtin_ia32_vpdpwusd256">,
        DefaultAttrsIntrinsic<[llvm_v8i32_ty],
                              [llvm_v8i32_ty, llvm_v8i32_ty, llvm_v8i32_ty],
                              [IntrNoMem]>;
  def int_x86_avx2_vpdpwusds_128
      : ClangBuiltin<"__builtin_ia32_vpdpwusds128">,
        DefaultAttrsIntrinsic<[llvm_v4i32_ty],
                              [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
                              [IntrNoMem]>;
  def int_x86_avx2_vpdpwusds_256
      : ClangBuiltin<"__builtin_ia32_vpdpwusds256">,
        DefaultAttrsIntrinsic<[llvm_v8i32_ty],
                              [llvm_v8i32_ty, llvm_v8i32_ty, llvm_v8i32_ty],
                              [IntrNoMem]>;
  def int_x86_avx2_vpdpwuud_128
      : ClangBuiltin<"__builtin_ia32_vpdpwuud128">,
        DefaultAttrsIntrinsic<[llvm_v4i32_ty],
                              [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
                              [IntrNoMem]>;
  def int_x86_avx2_vpdpwuud_256
      : ClangBuiltin<"__builtin_ia32_vpdpwuud256">,
        DefaultAttrsIntrinsic<[llvm_v8i32_ty],
                              [llvm_v8i32_ty, llvm_v8i32_ty, llvm_v8i32_ty],
                              [IntrNoMem]>;
  def int_x86_avx2_vpdpwuuds_128
      : ClangBuiltin<"__builtin_ia32_vpdpwuuds128">,
        DefaultAttrsIntrinsic<[llvm_v4i32_ty],
                              [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
                              [IntrNoMem]>;
  def int_x86_avx2_vpdpwuuds_256
      : ClangBuiltin<"__builtin_ia32_vpdpwuuds256">,
        DefaultAttrsIntrinsic<[llvm_v8i32_ty],
                              [llvm_v8i32_ty, llvm_v8i32_ty, llvm_v8i32_ty],
                              [IntrNoMem]>;
}

//===----------------------------------------------------------------------===//
// XOP

let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_xop_vpermil2pd : ClangBuiltin<"__builtin_ia32_vpermil2pd">,
      DefaultAttrsIntrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
                                              llvm_v2i64_ty, llvm_i8_ty],
                            [IntrNoMem, ImmArg<ArgIndex<3>>]>;

  def int_x86_xop_vpermil2pd_256 :
      ClangBuiltin<"__builtin_ia32_vpermil2pd256">,
      DefaultAttrsIntrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty,
                                              llvm_v4i64_ty, llvm_i8_ty],
                            [IntrNoMem, ImmArg<ArgIndex<3>>]>;

  def int_x86_xop_vpermil2ps : ClangBuiltin<"__builtin_ia32_vpermil2ps">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
                                              llvm_v4i32_ty, llvm_i8_ty],
                            [IntrNoMem, ImmArg<ArgIndex<3>>]>;
  def int_x86_xop_vpermil2ps_256 :
      ClangBuiltin<"__builtin_ia32_vpermil2ps256">,
      DefaultAttrsIntrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty,
                                              llvm_v8i32_ty, llvm_i8_ty],
                            [IntrNoMem, ImmArg<ArgIndex<3>>]>;

  def int_x86_xop_vfrcz_pd : ClangBuiltin<"__builtin_ia32_vfrczpd">,
      DefaultAttrsIntrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
  def int_x86_xop_vfrcz_ps : ClangBuiltin<"__builtin_ia32_vfrczps">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
  def int_x86_xop_vfrcz_sd : ClangBuiltin<"__builtin_ia32_vfrczsd">,
      DefaultAttrsIntrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
  def int_x86_xop_vfrcz_ss : ClangBuiltin<"__builtin_ia32_vfrczss">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
  def int_x86_xop_vfrcz_pd_256 : ClangBuiltin<"__builtin_ia32_vfrczpd256">,
      DefaultAttrsIntrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty], [IntrNoMem]>;
  def int_x86_xop_vfrcz_ps_256 : ClangBuiltin<"__builtin_ia32_vfrczps256">,
      DefaultAttrsIntrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty], [IntrNoMem]>;

  def int_x86_xop_vphaddbd :
      ClangBuiltin<"__builtin_ia32_vphaddbd">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v16i8_ty], [IntrNoMem]>;
  def int_x86_xop_vphaddbq :
      ClangBuiltin<"__builtin_ia32_vphaddbq">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v16i8_ty], [IntrNoMem]>;
  def int_x86_xop_vphaddbw :
      ClangBuiltin<"__builtin_ia32_vphaddbw">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty], [IntrNoMem]>;
  def int_x86_xop_vphadddq :
      ClangBuiltin<"__builtin_ia32_vphadddq">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty], [IntrNoMem]>;
  def int_x86_xop_vphaddubd :
      ClangBuiltin<"__builtin_ia32_vphaddubd">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v16i8_ty], [IntrNoMem]>;
  def int_x86_xop_vphaddubq :
      ClangBuiltin<"__builtin_ia32_vphaddubq">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v16i8_ty], [IntrNoMem]>;
  def int_x86_xop_vphaddubw :
      ClangBuiltin<"__builtin_ia32_vphaddubw">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty], [IntrNoMem]>;
  def int_x86_xop_vphaddudq :
      ClangBuiltin<"__builtin_ia32_vphaddudq">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty], [IntrNoMem]>;
  def int_x86_xop_vphadduwd :
      ClangBuiltin<"__builtin_ia32_vphadduwd">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty], [IntrNoMem]>;
  def int_x86_xop_vphadduwq :
      ClangBuiltin<"__builtin_ia32_vphadduwq">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v8i16_ty], [IntrNoMem]>;
  def int_x86_xop_vphaddwd :
      ClangBuiltin<"__builtin_ia32_vphaddwd">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty], [IntrNoMem]>;
  def int_x86_xop_vphaddwq :
      ClangBuiltin<"__builtin_ia32_vphaddwq">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v8i16_ty], [IntrNoMem]>;
  def int_x86_xop_vphsubbw :
      ClangBuiltin<"__builtin_ia32_vphsubbw">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty], [IntrNoMem]>;
  def int_x86_xop_vphsubdq :
      ClangBuiltin<"__builtin_ia32_vphsubdq">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty], [IntrNoMem]>;
  def int_x86_xop_vphsubwd :
      ClangBuiltin<"__builtin_ia32_vphsubwd">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty], [IntrNoMem]>;
  def int_x86_xop_vpmacsdd :
      ClangBuiltin<"__builtin_ia32_vpmacsdd">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty],
                            [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
                            [IntrNoMem, Commutative]>;
  def int_x86_xop_vpmacsdqh :
      ClangBuiltin<"__builtin_ia32_vpmacsdqh">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty],
                            [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v2i64_ty],
                            [IntrNoMem, Commutative]>;
  def int_x86_xop_vpmacsdql :
      ClangBuiltin<"__builtin_ia32_vpmacsdql">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty],
                            [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v2i64_ty],
                            [IntrNoMem, Commutative]>;
  def int_x86_xop_vpmacssdd :
      ClangBuiltin<"__builtin_ia32_vpmacssdd">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty],
                            [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
                            [IntrNoMem, Commutative]>;
  def int_x86_xop_vpmacssdqh :
      ClangBuiltin<"__builtin_ia32_vpmacssdqh">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty],
                            [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v2i64_ty],
                            [IntrNoMem, Commutative]>;
  def int_x86_xop_vpmacssdql :
      ClangBuiltin<"__builtin_ia32_vpmacssdql">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty],
                            [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v2i64_ty],
                            [IntrNoMem, Commutative]>;
  def int_x86_xop_vpmacsswd :
      ClangBuiltin<"__builtin_ia32_vpmacsswd">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty],
                            [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v4i32_ty],
                            [IntrNoMem, Commutative]>;
  def int_x86_xop_vpmacssww :
      ClangBuiltin<"__builtin_ia32_vpmacssww">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty],
                            [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v8i16_ty],
                            [IntrNoMem, Commutative]>;
  def int_x86_xop_vpmacswd :
      ClangBuiltin<"__builtin_ia32_vpmacswd">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty],
                            [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v4i32_ty],
                            [IntrNoMem, Commutative]>;
  def int_x86_xop_vpmacsww :
      ClangBuiltin<"__builtin_ia32_vpmacsww">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty],
                            [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v8i16_ty],
                            [IntrNoMem, Commutative]>;
  def int_x86_xop_vpmadcsswd :
      ClangBuiltin<"__builtin_ia32_vpmadcsswd">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty],
                            [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v4i32_ty],
                            [IntrNoMem, Commutative]>;
  def int_x86_xop_vpmadcswd :
      ClangBuiltin<"__builtin_ia32_vpmadcswd">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty],
                            [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v4i32_ty],
                            [IntrNoMem, Commutative]>;
  def int_x86_xop_vpperm :
      ClangBuiltin<"__builtin_ia32_vpperm">,
      DefaultAttrsIntrinsic<[llvm_v16i8_ty],
                            [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty],
                            [IntrNoMem]>;
  def int_x86_xop_vpshab :
      ClangBuiltin<"__builtin_ia32_vpshab">,
      DefaultAttrsIntrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
                            [IntrNoMem]>;
  def int_x86_xop_vpshad :
      ClangBuiltin<"__builtin_ia32_vpshad">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
                            [IntrNoMem]>;
  def int_x86_xop_vpshaq :
      ClangBuiltin<"__builtin_ia32_vpshaq">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
                            [IntrNoMem]>;
  def int_x86_xop_vpshaw :
      ClangBuiltin<"__builtin_ia32_vpshaw">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
                            [IntrNoMem]>;
  def int_x86_xop_vpshlb :
      ClangBuiltin<"__builtin_ia32_vpshlb">,
      DefaultAttrsIntrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
                            [IntrNoMem]>;
  def int_x86_xop_vpshld :
      ClangBuiltin<"__builtin_ia32_vpshld">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
                            [IntrNoMem]>;
  def int_x86_xop_vpshlq :
      ClangBuiltin<"__builtin_ia32_vpshlq">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
                            [IntrNoMem]>;
  def int_x86_xop_vpshlw :
      ClangBuiltin<"__builtin_ia32_vpshlw">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
                            [IntrNoMem]>;
}

//===----------------------------------------------------------------------===//
// LWP
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_llwpcb :
              ClangBuiltin<"__builtin_ia32_llwpcb">,
              Intrinsic<[], [llvm_ptr_ty], []>;
  def int_x86_slwpcb :
              ClangBuiltin<"__builtin_ia32_slwpcb">,
              Intrinsic<[llvm_ptr_ty], [], []>;
  def int_x86_lwpins32 :
              ClangBuiltin<"__builtin_ia32_lwpins32">,
              Intrinsic<[llvm_i8_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
                        [ImmArg<ArgIndex<2>>]>;
  def int_x86_lwpins64 :
              ClangBuiltin<"__builtin_ia32_lwpins64">,
              Intrinsic<[llvm_i8_ty], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty],
                        [ImmArg<ArgIndex<2>>]>;
  def int_x86_lwpval32 :
              ClangBuiltin<"__builtin_ia32_lwpval32">,
              Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
                        [ImmArg<ArgIndex<2>>]>;
  def int_x86_lwpval64 :
              ClangBuiltin<"__builtin_ia32_lwpval64">,
              Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty],
                        [ImmArg<ArgIndex<2>>]>;
}

//===----------------------------------------------------------------------===//
// MMX

// Empty MMX state op.
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_mmx_emms  : ClangBuiltin<"__builtin_ia32_emms">,
              Intrinsic<[], [], []>;
  def int_x86_mmx_femms : ClangBuiltin<"__builtin_ia32_femms">,
              Intrinsic<[], [], []>;
}

// Integer arithmetic ops.
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  // Addition
  def int_x86_mmx_padd_b : ClangBuiltin<"__builtin_ia32_paddb">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem, Commutative]>;
  def int_x86_mmx_padd_w : ClangBuiltin<"__builtin_ia32_paddw">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem, Commutative]>;
  def int_x86_mmx_padd_d : ClangBuiltin<"__builtin_ia32_paddd">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem, Commutative]>;
  def int_x86_mmx_padd_q : ClangBuiltin<"__builtin_ia32_paddq">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem, Commutative]>;

  def int_x86_mmx_padds_b : ClangBuiltin<"__builtin_ia32_paddsb">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem, Commutative]>;
  def int_x86_mmx_padds_w : ClangBuiltin<"__builtin_ia32_paddsw">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem, Commutative]>;

  def int_x86_mmx_paddus_b : ClangBuiltin<"__builtin_ia32_paddusb">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem, Commutative]>;
  def int_x86_mmx_paddus_w : ClangBuiltin<"__builtin_ia32_paddusw">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem, Commutative]>;

  // Subtraction
  def int_x86_mmx_psub_b : ClangBuiltin<"__builtin_ia32_psubb">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem]>;
  def int_x86_mmx_psub_w : ClangBuiltin<"__builtin_ia32_psubw">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem]>;
  def int_x86_mmx_psub_d : ClangBuiltin<"__builtin_ia32_psubd">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem]>;
  def int_x86_mmx_psub_q : ClangBuiltin<"__builtin_ia32_psubq">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem]>;

  def int_x86_mmx_psubs_b : ClangBuiltin<"__builtin_ia32_psubsb">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem]>;
  def int_x86_mmx_psubs_w : ClangBuiltin<"__builtin_ia32_psubsw">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem]>;

  def int_x86_mmx_psubus_b : ClangBuiltin<"__builtin_ia32_psubusb">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem]>;
  def int_x86_mmx_psubus_w : ClangBuiltin<"__builtin_ia32_psubusw">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem]>;

  // Multiplication
  def int_x86_mmx_pmulh_w : ClangBuiltin<"__builtin_ia32_pmulhw">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem, Commutative]>;
  def int_x86_mmx_pmull_w : ClangBuiltin<"__builtin_ia32_pmullw">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem, Commutative]>;
  def int_x86_mmx_pmulhu_w : ClangBuiltin<"__builtin_ia32_pmulhuw">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem, Commutative]>;
  def int_x86_mmx_pmulu_dq : ClangBuiltin<"__builtin_ia32_pmuludq">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem, Commutative]>;
  def int_x86_mmx_pmadd_wd : ClangBuiltin<"__builtin_ia32_pmaddwd">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem, Commutative]>;

  // Bitwise operations
  def int_x86_mmx_pand : ClangBuiltin<"__builtin_ia32_pand">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem, Commutative]>;
  def int_x86_mmx_pandn : ClangBuiltin<"__builtin_ia32_pandn">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem]>;
  def int_x86_mmx_por : ClangBuiltin<"__builtin_ia32_por">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem, Commutative]>;
  def int_x86_mmx_pxor : ClangBuiltin<"__builtin_ia32_pxor">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem, Commutative]>;

  // Averages
  def int_x86_mmx_pavg_b : ClangBuiltin<"__builtin_ia32_pavgb">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem, Commutative]>;
  def int_x86_mmx_pavg_w : ClangBuiltin<"__builtin_ia32_pavgw">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem, Commutative]>;

  // Maximum
  def int_x86_mmx_pmaxu_b : ClangBuiltin<"__builtin_ia32_pmaxub">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem, Commutative]>;
  def int_x86_mmx_pmaxs_w : ClangBuiltin<"__builtin_ia32_pmaxsw">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem, Commutative]>;

  // Minimum
  def int_x86_mmx_pminu_b : ClangBuiltin<"__builtin_ia32_pminub">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem, Commutative]>;
  def int_x86_mmx_pmins_w : ClangBuiltin<"__builtin_ia32_pminsw">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem, Commutative]>;

  // Packed sum of absolute differences
  def int_x86_mmx_psad_bw : ClangBuiltin<"__builtin_ia32_psadbw">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem, Commutative]>;
}

// Integer shift ops.
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  // Shift left logical
  def int_x86_mmx_psll_w : ClangBuiltin<"__builtin_ia32_psllw">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem]>;
  def int_x86_mmx_psll_d : ClangBuiltin<"__builtin_ia32_pslld">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem]>;
  def int_x86_mmx_psll_q : ClangBuiltin<"__builtin_ia32_psllq">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem]>;

  def int_x86_mmx_psrl_w : ClangBuiltin<"__builtin_ia32_psrlw">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem]>;
  def int_x86_mmx_psrl_d : ClangBuiltin<"__builtin_ia32_psrld">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem]>;
  def int_x86_mmx_psrl_q : ClangBuiltin<"__builtin_ia32_psrlq">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem]>;

  def int_x86_mmx_psra_w : ClangBuiltin<"__builtin_ia32_psraw">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem]>;
  def int_x86_mmx_psra_d : ClangBuiltin<"__builtin_ia32_psrad">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem]>;

  // Oddly these don't require an immediate due to a gcc compatibility issue.
  def int_x86_mmx_pslli_w : ClangBuiltin<"__builtin_ia32_psllwi">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_i32_ty],
                            [IntrNoMem]>;
  def int_x86_mmx_pslli_d : ClangBuiltin<"__builtin_ia32_pslldi">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_i32_ty],
                            [IntrNoMem]>;
  def int_x86_mmx_pslli_q : ClangBuiltin<"__builtin_ia32_psllqi">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_i32_ty],
                            [IntrNoMem]>;

  def int_x86_mmx_psrli_w : ClangBuiltin<"__builtin_ia32_psrlwi">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_i32_ty],
                            [IntrNoMem]>;
  def int_x86_mmx_psrli_d : ClangBuiltin<"__builtin_ia32_psrldi">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_i32_ty],
                            [IntrNoMem]>;
  def int_x86_mmx_psrli_q : ClangBuiltin<"__builtin_ia32_psrlqi">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_i32_ty],
                            [IntrNoMem]>;

  def int_x86_mmx_psrai_w : ClangBuiltin<"__builtin_ia32_psrawi">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_i32_ty],
                            [IntrNoMem]>;
  def int_x86_mmx_psrai_d : ClangBuiltin<"__builtin_ia32_psradi">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_i32_ty],
                            [IntrNoMem]>;
}
// Permute
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_avx512_permvar_df_256 : ClangBuiltin<"__builtin_ia32_permvardf256">,
      DefaultAttrsIntrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4i64_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_permvar_df_512 : ClangBuiltin<"__builtin_ia32_permvardf512">,
      DefaultAttrsIntrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8i64_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_permvar_di_256 : ClangBuiltin<"__builtin_ia32_permvardi256">,
      DefaultAttrsIntrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_permvar_di_512 : ClangBuiltin<"__builtin_ia32_permvardi512">,
      DefaultAttrsIntrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_permvar_hi_128 : ClangBuiltin<"__builtin_ia32_permvarhi128">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_permvar_hi_256 : ClangBuiltin<"__builtin_ia32_permvarhi256">,
      DefaultAttrsIntrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_permvar_hi_512 : ClangBuiltin<"__builtin_ia32_permvarhi512">,
      DefaultAttrsIntrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_permvar_qi_128 : ClangBuiltin<"__builtin_ia32_permvarqi128">,
      DefaultAttrsIntrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_permvar_qi_256 : ClangBuiltin<"__builtin_ia32_permvarqi256">,
      DefaultAttrsIntrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty, llvm_v32i8_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_permvar_qi_512 : ClangBuiltin<"__builtin_ia32_permvarqi512">,
      DefaultAttrsIntrinsic<[llvm_v64i8_ty], [llvm_v64i8_ty, llvm_v64i8_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_permvar_sf_512 : ClangBuiltin<"__builtin_ia32_permvarsf512">,
      DefaultAttrsIntrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16i32_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_permvar_si_512 : ClangBuiltin<"__builtin_ia32_permvarsi512">,
      DefaultAttrsIntrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty],
                            [IntrNoMem]>;
}
// Pack ops.
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_mmx_packsswb : ClangBuiltin<"__builtin_ia32_packsswb">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem]>;
  def int_x86_mmx_packssdw : ClangBuiltin<"__builtin_ia32_packssdw">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem]>;
  def int_x86_mmx_packuswb : ClangBuiltin<"__builtin_ia32_packuswb">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem]>;
}

// Unpacking ops.
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_mmx_punpckhbw : ClangBuiltin<"__builtin_ia32_punpckhbw">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem]>;
  def int_x86_mmx_punpckhwd : ClangBuiltin<"__builtin_ia32_punpckhwd">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem]>;
  def int_x86_mmx_punpckhdq : ClangBuiltin<"__builtin_ia32_punpckhdq">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem]>;
  def int_x86_mmx_punpcklbw : ClangBuiltin<"__builtin_ia32_punpcklbw">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem]>;
  def int_x86_mmx_punpcklwd : ClangBuiltin<"__builtin_ia32_punpcklwd">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem]>;
  def int_x86_mmx_punpckldq : ClangBuiltin<"__builtin_ia32_punpckldq">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem]>;
}

// Integer comparison ops
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_mmx_pcmpeq_b : ClangBuiltin<"__builtin_ia32_pcmpeqb">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem, Commutative]>;
  def int_x86_mmx_pcmpeq_w : ClangBuiltin<"__builtin_ia32_pcmpeqw">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem, Commutative]>;
  def int_x86_mmx_pcmpeq_d : ClangBuiltin<"__builtin_ia32_pcmpeqd">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem, Commutative]>;

  def int_x86_mmx_pcmpgt_b : ClangBuiltin<"__builtin_ia32_pcmpgtb">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem]>;
  def int_x86_mmx_pcmpgt_w : ClangBuiltin<"__builtin_ia32_pcmpgtw">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem]>;
  def int_x86_mmx_pcmpgt_d : ClangBuiltin<"__builtin_ia32_pcmpgtd">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
                            [IntrNoMem]>;
}

// Misc.
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_mmx_maskmovq : ClangBuiltin<"__builtin_ia32_maskmovq">,
              Intrinsic<[], [llvm_x86mmx_ty, llvm_x86mmx_ty, llvm_ptr_ty], []>;

  def int_x86_mmx_pmovmskb : ClangBuiltin<"__builtin_ia32_pmovmskb">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_x86mmx_ty], [IntrNoMem]>;

  def int_x86_mmx_movnt_dq : ClangBuiltin<"__builtin_ia32_movntq">,
              Intrinsic<[], [llvm_ptr_ty, llvm_x86mmx_ty], []>;

  def int_x86_mmx_palignr_b : ClangBuiltin<"__builtin_ia32_palignr">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty],
                            [llvm_x86mmx_ty, llvm_x86mmx_ty, llvm_i8_ty],
                            [IntrNoMem, ImmArg<ArgIndex<2>>]>;

  def int_x86_mmx_pextr_w : ClangBuiltin<"__builtin_ia32_vec_ext_v4hi">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_x86mmx_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<1>>]>;

  def int_x86_mmx_pinsr_w : ClangBuiltin<"__builtin_ia32_vec_set_v4hi">,
      DefaultAttrsIntrinsic<[llvm_x86mmx_ty],
                            [llvm_x86mmx_ty, llvm_i32_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<2>>]>;
}

//===----------------------------------------------------------------------===//
// BMI

let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_bmi_bextr_32 : ClangBuiltin<"__builtin_ia32_bextr_u32">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
                            [IntrNoMem]>;
  def int_x86_bmi_bextr_64 : ClangBuiltin<"__builtin_ia32_bextr_u64">,
      DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty],
                            [IntrNoMem]>;
  def int_x86_bmi_bzhi_32 : ClangBuiltin<"__builtin_ia32_bzhi_si">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
                            [IntrNoMem]>;
  def int_x86_bmi_bzhi_64 : ClangBuiltin<"__builtin_ia32_bzhi_di">,
      DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty],
                            [IntrNoMem]>;
  def int_x86_bmi_pdep_32 : ClangBuiltin<"__builtin_ia32_pdep_si">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
                            [IntrNoMem]>;
  def int_x86_bmi_pdep_64 : ClangBuiltin<"__builtin_ia32_pdep_di">,
      DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty],
                            [IntrNoMem]>;
  def int_x86_bmi_pext_32 : ClangBuiltin<"__builtin_ia32_pext_si">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
                            [IntrNoMem]>;
  def int_x86_bmi_pext_64 : ClangBuiltin<"__builtin_ia32_pext_di">,
      DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty],
                            [IntrNoMem]>;
}

//===----------------------------------------------------------------------===//
// FS/GS Base

let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_rdfsbase_32 : ClangBuiltin<"__builtin_ia32_rdfsbase32">,
              Intrinsic<[llvm_i32_ty], []>;
  def int_x86_rdgsbase_32 : ClangBuiltin<"__builtin_ia32_rdgsbase32">,
              Intrinsic<[llvm_i32_ty], []>;
  def int_x86_rdfsbase_64 : ClangBuiltin<"__builtin_ia32_rdfsbase64">,
              Intrinsic<[llvm_i64_ty], []>;
  def int_x86_rdgsbase_64 : ClangBuiltin<"__builtin_ia32_rdgsbase64">,
              Intrinsic<[llvm_i64_ty], []>;
  def int_x86_wrfsbase_32 : ClangBuiltin<"__builtin_ia32_wrfsbase32">,
              Intrinsic<[], [llvm_i32_ty]>;
  def int_x86_wrgsbase_32 : ClangBuiltin<"__builtin_ia32_wrgsbase32">,
              Intrinsic<[], [llvm_i32_ty]>;
  def int_x86_wrfsbase_64 : ClangBuiltin<"__builtin_ia32_wrfsbase64">,
              Intrinsic<[], [llvm_i64_ty]>;
  def int_x86_wrgsbase_64 : ClangBuiltin<"__builtin_ia32_wrgsbase64">,
              Intrinsic<[], [llvm_i64_ty]>;
}

//===----------------------------------------------------------------------===//
// FXSR
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_fxrstor : ClangBuiltin<"__builtin_ia32_fxrstor">,
              Intrinsic<[], [llvm_ptr_ty], []>;
  def int_x86_fxrstor64 : ClangBuiltin<"__builtin_ia32_fxrstor64">,
              Intrinsic<[], [llvm_ptr_ty], []>;
  def int_x86_fxsave : ClangBuiltin<"__builtin_ia32_fxsave">,
              Intrinsic<[], [llvm_ptr_ty], []>;
  def int_x86_fxsave64 : ClangBuiltin<"__builtin_ia32_fxsave64">,
              Intrinsic<[], [llvm_ptr_ty], []>;
}

//===----------------------------------------------------------------------===//
// XSAVE
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_xsave :
              Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], []>;
  def int_x86_xsave64 :
              Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], []>;
  def int_x86_xrstor :
              Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], []>;
  def int_x86_xrstor64 :
              Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], []>;
  def int_x86_xsaveopt :
              Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], []>;
  def int_x86_xsaveopt64 :
              Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], []>;
  def int_x86_xrstors :
              Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], []>;
  def int_x86_xrstors64 :
              Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], []>;
  def int_x86_xsavec :
              Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], []>;
  def int_x86_xsavec64 :
              Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], []>;
  def int_x86_xsaves :
              Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], []>;
  def int_x86_xsaves64 :
              Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], []>;
  def int_x86_xgetbv :
              Intrinsic<[llvm_i64_ty], [llvm_i32_ty], []>;
  def int_x86_xsetbv :
              Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>;
}

//===----------------------------------------------------------------------===//
// CLFLUSHOPT and CLWB
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_clflushopt : ClangBuiltin<"__builtin_ia32_clflushopt">,
              Intrinsic<[], [llvm_ptr_ty], []>;

  def int_x86_clwb : ClangBuiltin<"__builtin_ia32_clwb">,
              Intrinsic<[], [llvm_ptr_ty], []>;
}

//===----------------------------------------------------------------------===//
// Support protection key
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_rdpkru : ClangBuiltin <"__builtin_ia32_rdpkru">,
              Intrinsic<[llvm_i32_ty], [], []>;
  def int_x86_wrpkru : ClangBuiltin<"__builtin_ia32_wrpkru">,
              Intrinsic<[], [llvm_i32_ty], []>;
}
//===----------------------------------------------------------------------===//
// Half float conversion

let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_vcvtps2ph_128 : ClangBuiltin<"__builtin_ia32_vcvtps2ph">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_v4f32_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<1>>]>;
  def int_x86_vcvtps2ph_256 : ClangBuiltin<"__builtin_ia32_vcvtps2ph256">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_v8f32_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<1>>]>;
  def int_x86_avx512_mask_vcvtph2ps_512 :
      DefaultAttrsIntrinsic<[llvm_v16f32_ty], [llvm_v16i16_ty, llvm_v16f32_ty,
                                           llvm_i16_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<3>>]>;
  def int_x86_avx512_mask_vcvtps2ph_512 : ClangBuiltin<"__builtin_ia32_vcvtps2ph512_mask">,
      DefaultAttrsIntrinsic<[llvm_v16i16_ty], [llvm_v16f32_ty, llvm_i32_ty,
                                               llvm_v16i16_ty, llvm_i16_ty],
                            [IntrNoMem, ImmArg<ArgIndex<1>>]>;
  def int_x86_avx512_mask_vcvtps2ph_256 : ClangBuiltin<"__builtin_ia32_vcvtps2ph256_mask">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_v8f32_ty, llvm_i32_ty,
                                           llvm_v8i16_ty, llvm_i8_ty],
                            [IntrNoMem, ImmArg<ArgIndex<1>>]>;
  def int_x86_avx512_mask_vcvtps2ph_128 : ClangBuiltin<"__builtin_ia32_vcvtps2ph_mask">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty], [llvm_v4f32_ty, llvm_i32_ty,
                                               llvm_v8i16_ty, llvm_i8_ty],
                            [IntrNoMem, ImmArg<ArgIndex<1>>]>;
}

//===----------------------------------------------------------------------===//
// TBM

let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_tbm_bextri_u32 : ClangBuiltin<"__builtin_ia32_bextri_u32">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<1>>]>;
  def int_x86_tbm_bextri_u64 : ClangBuiltin<"__builtin_ia32_bextri_u64">,
      DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty],
                            [IntrNoMem, ImmArg<ArgIndex<1>>]>;
}

//===----------------------------------------------------------------------===//
// RDRAND intrinsics - Return a random value and whether it is valid.
// RDSEED intrinsics - Return a NIST SP800-90B & C compliant random value and
// whether it is valid.

let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  // These are declared side-effecting so they don't get eliminated by CSE or
  // LICM.
  def int_x86_rdrand_16 : Intrinsic<[llvm_i16_ty, llvm_i32_ty], [], []>;
  def int_x86_rdrand_32 : Intrinsic<[llvm_i32_ty, llvm_i32_ty], [], []>;
  def int_x86_rdrand_64 : Intrinsic<[llvm_i64_ty, llvm_i32_ty], [], []>;
  def int_x86_rdseed_16 : Intrinsic<[llvm_i16_ty, llvm_i32_ty], [], []>;
  def int_x86_rdseed_32 : Intrinsic<[llvm_i32_ty, llvm_i32_ty], [], []>;
  def int_x86_rdseed_64 : Intrinsic<[llvm_i64_ty, llvm_i32_ty], [], []>;
}

//===----------------------------------------------------------------------===//
// ADX

let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_addcarry_32:
      DefaultAttrsIntrinsic<[llvm_i8_ty, llvm_i32_ty],
                            [llvm_i8_ty, llvm_i32_ty, llvm_i32_ty],
                            [IntrNoMem]>;
  def int_x86_addcarry_64:
      DefaultAttrsIntrinsic<[llvm_i8_ty, llvm_i64_ty],
                            [llvm_i8_ty, llvm_i64_ty, llvm_i64_ty],
                            [IntrNoMem]>;
  def int_x86_subborrow_32:
      DefaultAttrsIntrinsic<[llvm_i8_ty, llvm_i32_ty],
                            [llvm_i8_ty, llvm_i32_ty, llvm_i32_ty],
                            [IntrNoMem]>;
  def int_x86_subborrow_64:
      DefaultAttrsIntrinsic<[llvm_i8_ty, llvm_i64_ty],
                            [llvm_i8_ty, llvm_i64_ty, llvm_i64_ty],
                            [IntrNoMem]>;
}

//===----------------------------------------------------------------------===//
// RTM intrinsics. Transactional Memory support.

let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_xbegin : ClangBuiltin<"__builtin_ia32_xbegin">,
              Intrinsic<[llvm_i32_ty], [], []>;
  def int_x86_xend : ClangBuiltin<"__builtin_ia32_xend">,
              Intrinsic<[], [], []>;
  def int_x86_xabort : ClangBuiltin<"__builtin_ia32_xabort">,
              Intrinsic<[], [llvm_i8_ty], [ImmArg<ArgIndex<0>>]>;
  def int_x86_xtest : ClangBuiltin<"__builtin_ia32_xtest">,
              Intrinsic<[llvm_i32_ty], [], []>;
}

//===----------------------------------------------------------------------===//
// AVX512

// Mask ops
let TargetPrefix = "x86" in {
  def int_x86_avx512_kadd_b :
      DefaultAttrsIntrinsic<[llvm_v8i1_ty], [llvm_v8i1_ty, llvm_v8i1_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_kadd_w :
      DefaultAttrsIntrinsic<[llvm_v16i1_ty], [llvm_v16i1_ty, llvm_v16i1_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_kadd_d :
      DefaultAttrsIntrinsic<[llvm_v32i1_ty], [llvm_v32i1_ty, llvm_v32i1_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_kadd_q :
      DefaultAttrsIntrinsic<[llvm_v64i1_ty], [llvm_v64i1_ty, llvm_v64i1_ty],
                            [IntrNoMem]>;

  def int_x86_avx512_ktestc_b :
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v8i1_ty, llvm_v8i1_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_ktestc_w :
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v16i1_ty, llvm_v16i1_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_ktestc_d :
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v32i1_ty, llvm_v32i1_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_ktestc_q :
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v64i1_ty, llvm_v64i1_ty],
                            [IntrNoMem]>;

  def int_x86_avx512_ktestz_b :
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v8i1_ty, llvm_v8i1_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_ktestz_w :
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v16i1_ty, llvm_v16i1_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_ktestz_d :
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v32i1_ty, llvm_v32i1_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_ktestz_q :
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v64i1_ty, llvm_v64i1_ty],
                            [IntrNoMem]>;
}

// Conversion ops
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_avx512_cvttss2si : ClangBuiltin<"__builtin_ia32_vcvttss2si32">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v4f32_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<1>>]>;
  def int_x86_avx512_cvttss2si64 : ClangBuiltin<"__builtin_ia32_vcvttss2si64">,
      DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_v4f32_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<1>>]>;
  def int_x86_avx512_cvttss2usi : ClangBuiltin<"__builtin_ia32_vcvttss2usi32">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v4f32_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<1>>]>;
  def int_x86_avx512_cvttss2usi64 : ClangBuiltin<"__builtin_ia32_vcvttss2usi64">,
      DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_v4f32_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<1>>]>;
  def int_x86_avx512_cvtusi2ss : ClangBuiltin<"__builtin_ia32_cvtusi2ss32">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty],
                            [llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<2>>]>;
  def int_x86_avx512_cvtusi642ss : ClangBuiltin<"__builtin_ia32_cvtusi2ss64">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty],
                            [llvm_v4f32_ty, llvm_i64_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<2>>]>;
  def int_x86_avx512_cvttsd2si : ClangBuiltin<"__builtin_ia32_vcvttsd2si32">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v2f64_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<1>>]>;
  def int_x86_avx512_cvttsd2si64 : ClangBuiltin<"__builtin_ia32_vcvttsd2si64">,
      DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_v2f64_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<1>>]>;
  def int_x86_avx512_cvttsd2usi : ClangBuiltin<"__builtin_ia32_vcvttsd2usi32">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v2f64_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<1>>]>;
  def int_x86_avx512_cvttsd2usi64 : ClangBuiltin<"__builtin_ia32_vcvttsd2usi64">,
      DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_v2f64_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<1>>]>;
  def int_x86_avx512_cvtusi642sd : ClangBuiltin<"__builtin_ia32_cvtusi2sd64">,
      DefaultAttrsIntrinsic<[llvm_v2f64_ty],
                            [llvm_v2f64_ty, llvm_i64_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<2>>]>;
  def int_x86_avx512_vcvtss2usi32 : ClangBuiltin<"__builtin_ia32_vcvtss2usi32">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v4f32_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<1>>]>;
  def int_x86_avx512_vcvtss2usi64 : ClangBuiltin<"__builtin_ia32_vcvtss2usi64">,
      DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_v4f32_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<1>>]>;
  def int_x86_avx512_vcvtss2si32 : ClangBuiltin<"__builtin_ia32_vcvtss2si32">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v4f32_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<1>>]>;
  def int_x86_avx512_vcvtss2si64 : ClangBuiltin<"__builtin_ia32_vcvtss2si64">,
      DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_v4f32_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<1>>]>;
  def int_x86_avx512_vcvtsd2usi32 : ClangBuiltin<"__builtin_ia32_vcvtsd2usi32">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v2f64_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<1>>]>;
  def int_x86_avx512_vcvtsd2usi64 : ClangBuiltin<"__builtin_ia32_vcvtsd2usi64">,
      DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_v2f64_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<1>>]>;
  def int_x86_avx512_vcvtsd2si32 : ClangBuiltin<"__builtin_ia32_vcvtsd2si32">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v2f64_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<1>>]>;
  def int_x86_avx512_vcvtsd2si64 : ClangBuiltin<"__builtin_ia32_vcvtsd2si64">,
      DefaultAttrsIntrinsic<[llvm_i64_ty], [llvm_v2f64_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<1>>]>;
  def int_x86_avx512_cvtsi2ss32 : ClangBuiltin<"__builtin_ia32_cvtsi2ss32">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty],
                            [llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<2>>]>;
  def int_x86_avx512_cvtsi2ss64 : ClangBuiltin<"__builtin_ia32_cvtsi2ss64">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty],
                            [llvm_v4f32_ty, llvm_i64_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<2>>]>;
  def int_x86_avx512_cvtsi2sd64 : ClangBuiltin<"__builtin_ia32_cvtsi2sd64">,
      DefaultAttrsIntrinsic<[llvm_v2f64_ty],
                            [llvm_v2f64_ty, llvm_i64_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<2>>]>;
}

// Pack ops.
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_avx512_packsswb_512 : ClangBuiltin<"__builtin_ia32_packsswb512">,
      DefaultAttrsIntrinsic<[llvm_v64i8_ty], [llvm_v32i16_ty,llvm_v32i16_ty],
                           [IntrNoMem]>;
  def int_x86_avx512_packssdw_512 : ClangBuiltin<"__builtin_ia32_packssdw512">,
      DefaultAttrsIntrinsic<[llvm_v32i16_ty], [llvm_v16i32_ty, llvm_v16i32_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_packuswb_512 : ClangBuiltin<"__builtin_ia32_packuswb512">,
      DefaultAttrsIntrinsic<[llvm_v64i8_ty], [llvm_v32i16_ty,llvm_v32i16_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_packusdw_512 : ClangBuiltin<"__builtin_ia32_packusdw512">,
      DefaultAttrsIntrinsic<[llvm_v32i16_ty], [llvm_v16i32_ty, llvm_v16i32_ty],
                            [IntrNoMem]>;
}

// Vector convert
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
  def int_x86_avx512_sitofp_round :
      DefaultAttrsIntrinsic<[llvm_anyfloat_ty], [llvm_anyint_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<1>>]>;

  def int_x86_avx512_uitofp_round :
      DefaultAttrsIntrinsic<[llvm_anyfloat_ty], [llvm_anyint_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<1>>]>;

  def int_x86_avx512_mask_cvtpd2dq_128 :
        ClangBuiltin<"__builtin_ia32_cvtpd2dq128_mask">,
          DefaultAttrsIntrinsic<[llvm_v4i32_ty],
          [llvm_v2f64_ty, llvm_v4i32_ty,  llvm_i8_ty],
          [IntrNoMem]>;

  def int_x86_avx512_mask_cvtpd2dq_512 :
        ClangBuiltin<"__builtin_ia32_cvtpd2dq512_mask">,
          DefaultAttrsIntrinsic<[llvm_v8i32_ty],
          [llvm_v8f64_ty, llvm_v8i32_ty,  llvm_i8_ty,  llvm_i32_ty],
          [IntrNoMem, ImmArg<ArgIndex<3>>]>;

  def int_x86_avx512_mask_cvtpd2ps_512 :
        ClangBuiltin<"__builtin_ia32_cvtpd2ps512_mask">,
          DefaultAttrsIntrinsic<[llvm_v8f32_ty],
          [llvm_v8f64_ty, llvm_v8f32_ty,  llvm_i8_ty,  llvm_i32_ty],
          [IntrNoMem, ImmArg<ArgIndex<3>>]>;

  def int_x86_avx512_mask_cvtsd2ss_round :
        ClangBuiltin<"__builtin_ia32_cvtsd2ss_round_mask">,
          DefaultAttrsIntrinsic<[llvm_v4f32_ty],
          [llvm_v4f32_ty, llvm_v2f64_ty, llvm_v4f32_ty, llvm_i8_ty, llvm_i32_ty],
          [IntrNoMem, ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_mask_cvtss2sd_round :
        ClangBuiltin<"__builtin_ia32_cvtss2sd_round_mask">,
          DefaultAttrsIntrinsic<[llvm_v2f64_ty],
          [llvm_v2f64_ty, llvm_v4f32_ty, llvm_v2f64_ty, llvm_i8_ty, llvm_i32_ty],
          [IntrNoMem, ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_mask_cvtpd2ps :
        ClangBuiltin<"__builtin_ia32_cvtpd2ps_mask">,
          DefaultAttrsIntrinsic<[llvm_v4f32_ty],
          [llvm_v2f64_ty, llvm_v4f32_ty,  llvm_i8_ty],
          [IntrNoMem]>;

  def int_x86_avx512_mask_cvtpd2qq_128 :
        ClangBuiltin<"__builtin_ia32_cvtpd2qq128_mask">,
          DefaultAttrsIntrinsic<[llvm_v2i64_ty],
          [llvm_v2f64_ty, llvm_v2i64_ty,  llvm_i8_ty],
          [IntrNoMem]>;

  def int_x86_avx512_mask_cvtpd2qq_256 :
        ClangBuiltin<"__builtin_ia32_cvtpd2qq256_mask">,
          DefaultAttrsIntrinsic<[llvm_v4i64_ty],
          [llvm_v4f64_ty, llvm_v4i64_ty,  llvm_i8_ty],
          [IntrNoMem]>;

  def int_x86_avx512_mask_cvtpd2qq_512 :
        ClangBuiltin<"__builtin_ia32_cvtpd2qq512_mask">,
          DefaultAttrsIntrinsic<[llvm_v8i64_ty],
          [llvm_v8f64_ty, llvm_v8i64_ty,  llvm_i8_ty,  llvm_i32_ty],
          [IntrNoMem, ImmArg<ArgIndex<3>>]>;

  def int_x86_avx512_mask_cvtpd2udq_128 :
        ClangBuiltin<"__builtin_ia32_cvtpd2udq128_mask">,
          DefaultAttrsIntrinsic<[llvm_v4i32_ty],
          [llvm_v2f64_ty, llvm_v4i32_ty,  llvm_i8_ty],
          [IntrNoMem]>;

  def int_x86_avx512_mask_cvtpd2udq_256 :
        ClangBuiltin<"__builtin_ia32_cvtpd2udq256_mask">,
          DefaultAttrsIntrinsic<[llvm_v4i32_ty],
          [llvm_v4f64_ty, llvm_v4i32_ty,  llvm_i8_ty],
          [IntrNoMem]>;

  def int_x86_avx512_mask_cvtpd2udq_512 :
        ClangBuiltin<"__builtin_ia32_cvtpd2udq512_mask">,
          DefaultAttrsIntrinsic<[llvm_v8i32_ty],
          [llvm_v8f64_ty, llvm_v8i32_ty,  llvm_i8_ty,  llvm_i32_ty],
          [IntrNoMem, ImmArg<ArgIndex<3>>]>;

  def int_x86_avx512_mask_cvtpd2uqq_128 :
        ClangBuiltin<"__builtin_ia32_cvtpd2uqq128_mask">,
          DefaultAttrsIntrinsic<[llvm_v2i64_ty],
          [llvm_v2f64_ty, llvm_v2i64_ty,  llvm_i8_ty],
          [IntrNoMem]>;

  def int_x86_avx512_mask_cvtpd2uqq_256 :
        ClangBuiltin<"__builtin_ia32_cvtpd2uqq256_mask">,
          DefaultAttrsIntrinsic<[llvm_v4i64_ty],
          [llvm_v4f64_ty, llvm_v4i64_ty,  llvm_i8_ty],
          [IntrNoMem]>;

  def int_x86_avx512_mask_cvtpd2uqq_512 :
        ClangBuiltin<"__builtin_ia32_cvtpd2uqq512_mask">,
          DefaultAttrsIntrinsic<[llvm_v8i64_ty],
          [llvm_v8f64_ty, llvm_v8i64_ty,  llvm_i8_ty,  llvm_i32_ty],
          [IntrNoMem, ImmArg<ArgIndex<3>>]>;

  def int_x86_avx512_mask_cvtps2dq_128 :
        ClangBuiltin<"__builtin_ia32_cvtps2dq128_mask">,
          DefaultAttrsIntrinsic<[llvm_v4i32_ty],
          [llvm_v4f32_ty, llvm_v4i32_ty,  llvm_i8_ty],
          [IntrNoMem]>;

  def int_x86_avx512_mask_cvtps2dq_256 :
        ClangBuiltin<"__builtin_ia32_cvtps2dq256_mask">,
          DefaultAttrsIntrinsic<[llvm_v8i32_ty],
          [llvm_v8f32_ty, llvm_v8i32_ty,  llvm_i8_ty],
          [IntrNoMem]>;

  def int_x86_avx512_mask_cvtps2dq_512 :
        ClangBuiltin<"__builtin_ia32_cvtps2dq512_mask">,
          DefaultAttrsIntrinsic<[llvm_v16i32_ty],
          [llvm_v16f32_ty, llvm_v16i32_ty,  llvm_i16_ty,  llvm_i32_ty],
          [IntrNoMem, ImmArg<ArgIndex<3>>]>;

  def int_x86_avx512_mask_cvtps2pd_512 :
        ClangBuiltin<"__builtin_ia32_cvtps2pd512_mask">,
          DefaultAttrsIntrinsic<[llvm_v8f64_ty],
          [llvm_v8f32_ty, llvm_v8f64_ty,  llvm_i8_ty,  llvm_i32_ty],
          [IntrNoMem, ImmArg<ArgIndex<3>>]>;

  def int_x86_avx512_mask_cvtps2qq_128 :
        ClangBuiltin<"__builtin_ia32_cvtps2qq128_mask">,
          DefaultAttrsIntrinsic<[llvm_v2i64_ty],
          [llvm_v4f32_ty, llvm_v2i64_ty,  llvm_i8_ty],
          [IntrNoMem]>;

  def int_x86_avx512_mask_cvtps2qq_256 :
        ClangBuiltin<"__builtin_ia32_cvtps2qq256_mask">,
          DefaultAttrsIntrinsic<[llvm_v4i64_ty],
          [llvm_v4f32_ty, llvm_v4i64_ty,  llvm_i8_ty],
          [IntrNoMem]>;

  def int_x86_avx512_mask_cvtps2qq_512 :
        ClangBuiltin<"__builtin_ia32_cvtps2qq512_mask">,
          DefaultAttrsIntrinsic<[llvm_v8i64_ty],
          [llvm_v8f32_ty, llvm_v8i64_ty,  llvm_i8_ty,  llvm_i32_ty],
          [IntrNoMem, ImmArg<ArgIndex<3>>]>;

  def int_x86_avx512_mask_cvtps2udq_128 :
        ClangBuiltin<"__builtin_ia32_cvtps2udq128_mask">,
          DefaultAttrsIntrinsic<[llvm_v4i32_ty],
          [llvm_v4f32_ty, llvm_v4i32_ty,  llvm_i8_ty],
          [IntrNoMem]>;

  def int_x86_avx512_mask_cvtps2udq_256 :
        ClangBuiltin<"__builtin_ia32_cvtps2udq256_mask">,
          DefaultAttrsIntrinsic<[llvm_v8i32_ty],
          [llvm_v8f32_ty, llvm_v8i32_ty,  llvm_i8_ty],
          [IntrNoMem]>;

  def int_x86_avx512_mask_cvtps2udq_512 :
        ClangBuiltin<"__builtin_ia32_cvtps2udq512_mask">,
          DefaultAttrsIntrinsic<[llvm_v16i32_ty],
          [llvm_v16f32_ty, llvm_v16i32_ty,  llvm_i16_ty,  llvm_i32_ty],
          [IntrNoMem, ImmArg<ArgIndex<3>>]>;

  def int_x86_avx512_mask_cvtps2uqq_128 :
        ClangBuiltin<"__builtin_ia32_cvtps2uqq128_mask">,
          DefaultAttrsIntrinsic<[llvm_v2i64_ty],
          [llvm_v4f32_ty, llvm_v2i64_ty,  llvm_i8_ty],
          [IntrNoMem]>;

  def int_x86_avx512_mask_cvtps2uqq_256 :
        ClangBuiltin<"__builtin_ia32_cvtps2uqq256_mask">,
          DefaultAttrsIntrinsic<[llvm_v4i64_ty],
          [llvm_v4f32_ty, llvm_v4i64_ty,  llvm_i8_ty],
          [IntrNoMem]>;

  def int_x86_avx512_mask_cvtps2uqq_512 :
        ClangBuiltin<"__builtin_ia32_cvtps2uqq512_mask">,
          DefaultAttrsIntrinsic<[llvm_v8i64_ty],
          [llvm_v8f32_ty, llvm_v8i64_ty,  llvm_i8_ty,  llvm_i32_ty],
          [IntrNoMem, ImmArg<ArgIndex<3>>]>;

  def int_x86_avx512_mask_cvtqq2ps_128 :
        ClangBuiltin<"__builtin_ia32_cvtqq2ps128_mask">,
          DefaultAttrsIntrinsic<[llvm_v4f32_ty],
          [llvm_v2i64_ty, llvm_v4f32_ty,  llvm_i8_ty],
          [IntrNoMem]>;

  def int_x86_avx512_mask_cvttpd2dq_128 :
        ClangBuiltin<"__builtin_ia32_cvttpd2dq128_mask">,
          DefaultAttrsIntrinsic<[llvm_v4i32_ty],
          [llvm_v2f64_ty, llvm_v4i32_ty,  llvm_i8_ty],
          [IntrNoMem]>;

  def int_x86_avx512_mask_cvttpd2dq_512 :
        ClangBuiltin<"__builtin_ia32_cvttpd2dq512_mask">,
          DefaultAttrsIntrinsic<[llvm_v8i32_ty],
          [llvm_v8f64_ty, llvm_v8i32_ty,  llvm_i8_ty,  llvm_i32_ty],
          [IntrNoMem, ImmArg<ArgIndex<3>>]>;

  def int_x86_avx512_mask_cvttpd2qq_128 :
        ClangBuiltin<"__builtin_ia32_cvttpd2qq128_mask">,
          DefaultAttrsIntrinsic<[llvm_v2i64_ty],
          [llvm_v2f64_ty, llvm_v2i64_ty,  llvm_i8_ty],
          [IntrNoMem]>;

  def int_x86_avx512_mask_cvttpd2qq_256 :
        ClangBuiltin<"__builtin_ia32_cvttpd2qq256_mask">,
          DefaultAttrsIntrinsic<[llvm_v4i64_ty],
          [llvm_v4f64_ty, llvm_v4i64_ty,  llvm_i8_ty],
          [IntrNoMem]>;

  def int_x86_avx512_mask_cvttpd2qq_512 :
        ClangBuiltin<"__builtin_ia32_cvttpd2qq512_mask">,
          DefaultAttrsIntrinsic<[llvm_v8i64_ty],
          [llvm_v8f64_ty, llvm_v8i64_ty,  llvm_i8_ty,  llvm_i32_ty],
          [IntrNoMem, ImmArg<ArgIndex<3>>]>;

  def int_x86_avx512_mask_cvttpd2udq_128 :
        ClangBuiltin<"__builtin_ia32_cvttpd2udq128_mask">,
          DefaultAttrsIntrinsic<[llvm_v4i32_ty],
          [llvm_v2f64_ty, llvm_v4i32_ty,  llvm_i8_ty],
          [IntrNoMem]>;

  def int_x86_avx512_mask_cvttpd2udq_256 :
        ClangBuiltin<"__builtin_ia32_cvttpd2udq256_mask">,
          DefaultAttrsIntrinsic<[llvm_v4i32_ty],
          [llvm_v4f64_ty, llvm_v4i32_ty,  llvm_i8_ty],
          [IntrNoMem]>;

  def int_x86_avx512_mask_cvttpd2udq_512 :
        ClangBuiltin<"__builtin_ia32_cvttpd2udq512_mask">,
          DefaultAttrsIntrinsic<[llvm_v8i32_ty],
          [llvm_v8f64_ty, llvm_v8i32_ty,  llvm_i8_ty,  llvm_i32_ty],
          [IntrNoMem, ImmArg<ArgIndex<3>>]>;

  def int_x86_avx512_mask_cvttpd2uqq_128 :
        ClangBuiltin<"__builtin_ia32_cvttpd2uqq128_mask">,
          DefaultAttrsIntrinsic<[llvm_v2i64_ty],
          [llvm_v2f64_ty, llvm_v2i64_ty,  llvm_i8_ty],
          [IntrNoMem]>;

  def int_x86_avx512_mask_cvttpd2uqq_256 :
        ClangBuiltin<"__builtin_ia32_cvttpd2uqq256_mask">,
          DefaultAttrsIntrinsic<[llvm_v4i64_ty],
          [llvm_v4f64_ty, llvm_v4i64_ty,  llvm_i8_ty],
          [IntrNoMem]>;

  def int_x86_avx512_mask_cvttpd2uqq_512 :
        ClangBuiltin<"__builtin_ia32_cvttpd2uqq512_mask">,
          DefaultAttrsIntrinsic<[llvm_v8i64_ty],
          [llvm_v8f64_ty, llvm_v8i64_ty,  llvm_i8_ty,  llvm_i32_ty],
          [IntrNoMem, ImmArg<ArgIndex<3>>]>;

  def int_x86_avx512_mask_cvttps2dq_512 :
        ClangBuiltin<"__builtin_ia32_cvttps2dq512_mask">,
          DefaultAttrsIntrinsic<[llvm_v16i32_ty],
          [llvm_v16f32_ty, llvm_v16i32_ty,  llvm_i16_ty,  llvm_i32_ty],
          [IntrNoMem, ImmArg<ArgIndex<3>>]>;

  def int_x86_avx512_mask_cvttps2qq_128 :
        ClangBuiltin<"__builtin_ia32_cvttps2qq128_mask">,
          DefaultAttrsIntrinsic<[llvm_v2i64_ty],
          [llvm_v4f32_ty, llvm_v2i64_ty,  llvm_i8_ty],
          [IntrNoMem]>;

  def int_x86_avx512_mask_cvttps2qq_256 :
        ClangBuiltin<"__builtin_ia32_cvttps2qq256_mask">,
          DefaultAttrsIntrinsic<[llvm_v4i64_ty],
          [llvm_v4f32_ty, llvm_v4i64_ty,  llvm_i8_ty],
          [IntrNoMem]>;

  def int_x86_avx512_mask_cvttps2qq_512 :
        ClangBuiltin<"__builtin_ia32_cvttps2qq512_mask">,
          DefaultAttrsIntrinsic<[llvm_v8i64_ty],
          [llvm_v8f32_ty, llvm_v8i64_ty,  llvm_i8_ty,  llvm_i32_ty],
          [IntrNoMem, ImmArg<ArgIndex<3>>]>;

  def int_x86_avx512_mask_cvttps2udq_128 :
        ClangBuiltin<"__builtin_ia32_cvttps2udq128_mask">,
          DefaultAttrsIntrinsic<[llvm_v4i32_ty],
          [llvm_v4f32_ty, llvm_v4i32_ty,  llvm_i8_ty],
          [IntrNoMem]>;

  def int_x86_avx512_mask_cvttps2udq_256 :
        ClangBuiltin<"__builtin_ia32_cvttps2udq256_mask">,
          DefaultAttrsIntrinsic<[llvm_v8i32_ty],
          [llvm_v8f32_ty, llvm_v8i32_ty,  llvm_i8_ty],
          [IntrNoMem]>;

  def int_x86_avx512_mask_cvttps2udq_512 :
        ClangBuiltin<"__builtin_ia32_cvttps2udq512_mask">,
          DefaultAttrsIntrinsic<[llvm_v16i32_ty],
          [llvm_v16f32_ty, llvm_v16i32_ty,  llvm_i16_ty,  llvm_i32_ty],
          [IntrNoMem, ImmArg<ArgIndex<3>>]>;

  def int_x86_avx512_mask_cvttps2uqq_128 :
        ClangBuiltin<"__builtin_ia32_cvttps2uqq128_mask">,
          DefaultAttrsIntrinsic<[llvm_v2i64_ty],
          [llvm_v4f32_ty, llvm_v2i64_ty,  llvm_i8_ty],
          [IntrNoMem]>;

  def int_x86_avx512_mask_cvttps2uqq_256 :
        ClangBuiltin<"__builtin_ia32_cvttps2uqq256_mask">,
          DefaultAttrsIntrinsic<[llvm_v4i64_ty],
          [llvm_v4f32_ty, llvm_v4i64_ty,  llvm_i8_ty],
          [IntrNoMem]>;

  def int_x86_avx512_mask_cvttps2uqq_512 :
        ClangBuiltin<"__builtin_ia32_cvttps2uqq512_mask">,
          DefaultAttrsIntrinsic<[llvm_v8i64_ty],
          [llvm_v8f32_ty, llvm_v8i64_ty,  llvm_i8_ty,  llvm_i32_ty],
          [IntrNoMem, ImmArg<ArgIndex<3>>]>;

  def int_x86_avx512_mask_cvtuqq2ps_128 :
        ClangBuiltin<"__builtin_ia32_cvtuqq2ps128_mask">,
          DefaultAttrsIntrinsic<[llvm_v4f32_ty],
          [llvm_v2i64_ty, llvm_v4f32_ty,  llvm_i8_ty],
          [IntrNoMem]>;

  def int_x86_avx512_mask_rndscale_pd_128 : ClangBuiltin<"__builtin_ia32_rndscalepd_128_mask">,
      DefaultAttrsIntrinsic<[llvm_v2f64_ty],
                            [llvm_v2f64_ty, llvm_i32_ty, llvm_v2f64_ty,
                             llvm_i8_ty],
                            [IntrNoMem, ImmArg<ArgIndex<1>>]>;
  def int_x86_avx512_mask_rndscale_pd_256 : ClangBuiltin<"__builtin_ia32_rndscalepd_256_mask">,
      DefaultAttrsIntrinsic<[llvm_v4f64_ty],
                            [llvm_v4f64_ty, llvm_i32_ty, llvm_v4f64_ty,
                             llvm_i8_ty],
                            [IntrNoMem, ImmArg<ArgIndex<1>>]>;
  def int_x86_avx512_mask_rndscale_pd_512 : ClangBuiltin<"__builtin_ia32_rndscalepd_mask">,
      DefaultAttrsIntrinsic<[llvm_v8f64_ty],
                            [llvm_v8f64_ty, llvm_i32_ty, llvm_v8f64_ty,
                             llvm_i8_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<1>>,
                             ImmArg<ArgIndex<4>>]>;
  def int_x86_avx512_mask_rndscale_ps_128 : ClangBuiltin<"__builtin_ia32_rndscaleps_128_mask">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty],
                            [llvm_v4f32_ty, llvm_i32_ty, llvm_v4f32_ty,
                             llvm_i8_ty],
                            [IntrNoMem, ImmArg<ArgIndex<1>>]>;
  def int_x86_avx512_mask_rndscale_ps_256 : ClangBuiltin<"__builtin_ia32_rndscaleps_256_mask">,
      DefaultAttrsIntrinsic<[llvm_v8f32_ty],
                            [llvm_v8f32_ty, llvm_i32_ty, llvm_v8f32_ty,
                             llvm_i8_ty],
                            [IntrNoMem, ImmArg<ArgIndex<1>>]>;
  def int_x86_avx512_mask_rndscale_ps_512 : ClangBuiltin<"__builtin_ia32_rndscaleps_mask">,
      DefaultAttrsIntrinsic<[llvm_v16f32_ty],
                            [llvm_v16f32_ty, llvm_i32_ty, llvm_v16f32_ty,
                             llvm_i16_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<1>>,
                             ImmArg<ArgIndex<4>>]>;
  def int_x86_avx512_mask_reduce_pd_128 : ClangBuiltin<"__builtin_ia32_reducepd128_mask">,
      DefaultAttrsIntrinsic<[llvm_v2f64_ty],
                            [llvm_v2f64_ty, llvm_i32_ty, llvm_v2f64_ty,
                             llvm_i8_ty],
                            [IntrNoMem, ImmArg<ArgIndex<1>>]>;
  def int_x86_avx512_mask_reduce_pd_256 : ClangBuiltin<"__builtin_ia32_reducepd256_mask">,
      DefaultAttrsIntrinsic<[llvm_v4f64_ty],
                            [llvm_v4f64_ty, llvm_i32_ty, llvm_v4f64_ty,
                             llvm_i8_ty],
                            [IntrNoMem, ImmArg<ArgIndex<1>>]>;
  def int_x86_avx512_mask_reduce_pd_512 : ClangBuiltin<"__builtin_ia32_reducepd512_mask">,
      DefaultAttrsIntrinsic<[llvm_v8f64_ty],
                            [llvm_v8f64_ty, llvm_i32_ty, llvm_v8f64_ty,
                             llvm_i8_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<1>>,
                             ImmArg<ArgIndex<4>>]>;
  def int_x86_avx512_mask_reduce_ps_128 : ClangBuiltin<"__builtin_ia32_reduceps128_mask">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty],
                            [llvm_v4f32_ty, llvm_i32_ty, llvm_v4f32_ty,
                             llvm_i8_ty],
                            [IntrNoMem, ImmArg<ArgIndex<1>>]>;
  def int_x86_avx512_mask_reduce_ps_256 : ClangBuiltin<"__builtin_ia32_reduceps256_mask">,
      DefaultAttrsIntrinsic<[llvm_v8f32_ty],
                            [llvm_v8f32_ty, llvm_i32_ty, llvm_v8f32_ty,
                             llvm_i8_ty],
                            [IntrNoMem, ImmArg<ArgIndex<1>>]>;
  def int_x86_avx512_mask_reduce_ps_512 : ClangBuiltin<"__builtin_ia32_reduceps512_mask">,
      DefaultAttrsIntrinsic<[llvm_v16f32_ty],
                            [llvm_v16f32_ty, llvm_i32_ty, llvm_v16f32_ty,
                             llvm_i16_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<1>>,
                             ImmArg<ArgIndex<4>>]>;
def int_x86_avx512_mask_range_pd_128 : ClangBuiltin<"__builtin_ia32_rangepd128_mask">,
    DefaultAttrsIntrinsic<[llvm_v2f64_ty],
                          [llvm_v2f64_ty, llvm_v2f64_ty, llvm_i32_ty,
                           llvm_v2f64_ty,  llvm_i8_ty],
                          [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_avx512_mask_range_pd_256 : ClangBuiltin<"__builtin_ia32_rangepd256_mask">,
    DefaultAttrsIntrinsic<[llvm_v4f64_ty],
                          [llvm_v4f64_ty, llvm_v4f64_ty, llvm_i32_ty,
                           llvm_v4f64_ty,  llvm_i8_ty],
                          [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_avx512_mask_range_pd_512 : ClangBuiltin<"__builtin_ia32_rangepd512_mask">,
    DefaultAttrsIntrinsic<[llvm_v8f64_ty],
                          [llvm_v8f64_ty, llvm_v8f64_ty, llvm_i32_ty,
                           llvm_v8f64_ty, llvm_i8_ty, llvm_i32_ty],
                          [IntrNoMem, ImmArg<ArgIndex<2>>,
                           ImmArg<ArgIndex<5>>]>;
def int_x86_avx512_mask_range_ps_128 : ClangBuiltin<"__builtin_ia32_rangeps128_mask">,
    DefaultAttrsIntrinsic<[llvm_v4f32_ty],
                          [llvm_v4f32_ty, llvm_v4f32_ty, llvm_i32_ty,
                           llvm_v4f32_ty,  llvm_i8_ty],
                          [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_avx512_mask_range_ps_256 : ClangBuiltin<"__builtin_ia32_rangeps256_mask">,
    DefaultAttrsIntrinsic<[llvm_v8f32_ty],
                          [llvm_v8f32_ty, llvm_v8f32_ty, llvm_i32_ty,
                           llvm_v8f32_ty,  llvm_i8_ty],
                          [IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_x86_avx512_mask_range_ps_512 : ClangBuiltin<"__builtin_ia32_rangeps512_mask">,
    DefaultAttrsIntrinsic<[llvm_v16f32_ty],
                          [llvm_v16f32_ty, llvm_v16f32_ty, llvm_i32_ty,
                           llvm_v16f32_ty, llvm_i16_ty, llvm_i32_ty],
                          [IntrNoMem, ImmArg<ArgIndex<2>>,
                           ImmArg<ArgIndex<5>>]>;
}

// Vector broadcast from mask
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
   def int_x86_avx512_broadcastmw_512 :
       ClangBuiltin<"__builtin_ia32_broadcastmw512">,
       DefaultAttrsIntrinsic<[llvm_v16i32_ty], [llvm_i16_ty], [IntrNoMem]>;
   def int_x86_avx512_broadcastmw_256 :
       ClangBuiltin<"__builtin_ia32_broadcastmw256">,
       DefaultAttrsIntrinsic<[llvm_v8i32_ty], [llvm_i16_ty], [IntrNoMem]>;
   def int_x86_avx512_broadcastmw_128 :
       ClangBuiltin<"__builtin_ia32_broadcastmw128">,
       DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_i16_ty], [IntrNoMem]>;
   def int_x86_avx512_broadcastmb_512 :
       ClangBuiltin<"__builtin_ia32_broadcastmb512">,
       DefaultAttrsIntrinsic<[llvm_v8i64_ty], [llvm_i8_ty], [IntrNoMem]>;
   def int_x86_avx512_broadcastmb_256 :
       ClangBuiltin<"__builtin_ia32_broadcastmb256">,
       DefaultAttrsIntrinsic<[llvm_v4i64_ty], [llvm_i8_ty], [IntrNoMem]>;
   def int_x86_avx512_broadcastmb_128 :
       ClangBuiltin<"__builtin_ia32_broadcastmb128">,
       DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_i8_ty], [IntrNoMem]>;
}

// Arithmetic ops
let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".

  def int_x86_avx512_add_ps_512 : ClangBuiltin<"__builtin_ia32_addps512">,
      DefaultAttrsIntrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
                             llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<2>>]>;
  def int_x86_avx512_add_pd_512 : ClangBuiltin<"__builtin_ia32_addpd512">,
      DefaultAttrsIntrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
                             llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<2>>]>;
  def int_x86_avx512_sub_ps_512 : ClangBuiltin<"__builtin_ia32_subps512">,
      DefaultAttrsIntrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
                             llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<2>>]>;
  def int_x86_avx512_sub_pd_512 : ClangBuiltin<"__builtin_ia32_subpd512">,
      DefaultAttrsIntrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
                             llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<2>>]>;
  def int_x86_avx512_mul_ps_512 : ClangBuiltin<"__builtin_ia32_mulps512">,
      DefaultAttrsIntrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
                             llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<2>>]>;
  def int_x86_avx512_mul_pd_512 : ClangBuiltin<"__builtin_ia32_mulpd512">,
      DefaultAttrsIntrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
                             llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<2>>]>;
  def int_x86_avx512_div_ps_512 : ClangBuiltin<"__builtin_ia32_divps512">,
      DefaultAttrsIntrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
                             llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<2>>]>;
  def int_x86_avx512_div_pd_512 : ClangBuiltin<"__builtin_ia32_divpd512">,
      DefaultAttrsIntrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
                             llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<2>>]>;

  def int_x86_avx512_max_ps_512 : ClangBuiltin<"__builtin_ia32_maxps512">,
      DefaultAttrsIntrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
                             llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<2>>]>;
  def int_x86_avx512_max_pd_512 : ClangBuiltin<"__builtin_ia32_maxpd512">,
      DefaultAttrsIntrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
                             llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<2>>]>;
  def int_x86_avx512_min_ps_512 : ClangBuiltin<"__builtin_ia32_minps512">,
      DefaultAttrsIntrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
                             llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<2>>]>;
  def int_x86_avx512_min_pd_512 : ClangBuiltin<"__builtin_ia32_minpd512">,
      DefaultAttrsIntrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
                             llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<2>>]>;

  def int_x86_avx512_mask_add_ss_round : ClangBuiltin<"__builtin_ia32_addss_round_mask">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty],
                            [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,
                             llvm_i8_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<4>>]>;
  def int_x86_avx512_mask_div_ss_round : ClangBuiltin<"__builtin_ia32_divss_round_mask">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty],
                            [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,
                             llvm_i8_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<4>>]>;
  def int_x86_avx512_mask_mul_ss_round : ClangBuiltin<"__builtin_ia32_mulss_round_mask">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty],
                            [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,
                             llvm_i8_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<4>>]>;
  def int_x86_avx512_mask_sub_ss_round : ClangBuiltin<"__builtin_ia32_subss_round_mask">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty],
                            [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,
                             llvm_i8_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<4>>]>;
  def int_x86_avx512_mask_max_ss_round : ClangBuiltin<"__builtin_ia32_maxss_round_mask">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty],
                            [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,
                             llvm_i8_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<4>>]>;
  def int_x86_avx512_mask_min_ss_round : ClangBuiltin<"__builtin_ia32_minss_round_mask">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty],
                            [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,
                             llvm_i8_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<4>>]>;
  def int_x86_avx512_mask_add_sd_round : ClangBuiltin<"__builtin_ia32_addsd_round_mask">,
      DefaultAttrsIntrinsic<[llvm_v2f64_ty],
                            [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty,
                             llvm_i8_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<4>>]>;
  def int_x86_avx512_mask_div_sd_round : ClangBuiltin<"__builtin_ia32_divsd_round_mask">,
      DefaultAttrsIntrinsic<[llvm_v2f64_ty],
                            [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty,
                             llvm_i8_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<4>>]>;
  def int_x86_avx512_mask_mul_sd_round : ClangBuiltin<"__builtin_ia32_mulsd_round_mask">,
      DefaultAttrsIntrinsic<[llvm_v2f64_ty],
                            [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty,
                             llvm_i8_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<4>>]>;
  def int_x86_avx512_mask_sub_sd_round : ClangBuiltin<"__builtin_ia32_subsd_round_mask">,
      DefaultAttrsIntrinsic<[llvm_v2f64_ty],
                            [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty,
                             llvm_i8_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<4>>]>;
  def int_x86_avx512_mask_max_sd_round : ClangBuiltin<"__builtin_ia32_maxsd_round_mask">,
      DefaultAttrsIntrinsic<[llvm_v2f64_ty],
                            [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty,
                             llvm_i8_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<4>>]>;
  def int_x86_avx512_mask_min_sd_round : ClangBuiltin<"__builtin_ia32_minsd_round_mask">,
      DefaultAttrsIntrinsic<[llvm_v2f64_ty],
                            [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty,
                             llvm_i8_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_mask_rndscale_ss : ClangBuiltin<"__builtin_ia32_rndscaless_round_mask">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty],
                            [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,
                             llvm_i8_ty, llvm_i32_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<4>>,
                             ImmArg<ArgIndex<5>>]>;
  def int_x86_avx512_mask_rndscale_sd : ClangBuiltin<"__builtin_ia32_rndscalesd_round_mask">,
      DefaultAttrsIntrinsic<[llvm_v2f64_ty],
                            [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty,
                             llvm_i8_ty, llvm_i32_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<4>>,
                             ImmArg<ArgIndex<5>>]>;
  def int_x86_avx512_mask_range_ss : ClangBuiltin<"__builtin_ia32_rangess128_round_mask">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty],
                            [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,
                             llvm_i8_ty, llvm_i32_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<4>>,
                             ImmArg<ArgIndex<5>>]>;
  def int_x86_avx512_mask_range_sd : ClangBuiltin<"__builtin_ia32_rangesd128_round_mask">,
      DefaultAttrsIntrinsic<[llvm_v2f64_ty],
                            [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty,
                             llvm_i8_ty, llvm_i32_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<4>>,
                             ImmArg<ArgIndex<5>>]>;
  def int_x86_avx512_mask_reduce_ss : ClangBuiltin<"__builtin_ia32_reducess_mask">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty],
                            [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,
                             llvm_i8_ty, llvm_i32_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<4>>,
                             ImmArg<ArgIndex<5>>]>;
  def int_x86_avx512_mask_reduce_sd : ClangBuiltin<"__builtin_ia32_reducesd_mask">,
      DefaultAttrsIntrinsic<[llvm_v2f64_ty],
                            [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty,
                             llvm_i8_ty, llvm_i32_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<4>>,
                             ImmArg<ArgIndex<5>>]>;
  def int_x86_avx512_mask_scalef_sd : ClangBuiltin<"__builtin_ia32_scalefsd_round_mask">,
      DefaultAttrsIntrinsic<[llvm_v2f64_ty],
                            [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty,
                             llvm_i8_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<4>>]>;
  def int_x86_avx512_mask_scalef_ss : ClangBuiltin<"__builtin_ia32_scalefss_round_mask">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty],
                            [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,
                             llvm_i8_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<4>>]>;
  def int_x86_avx512_mask_scalef_pd_128 : ClangBuiltin<"__builtin_ia32_scalefpd128_mask">,
      DefaultAttrsIntrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
                             llvm_v2f64_ty, llvm_i8_ty], [IntrNoMem]>;
  def int_x86_avx512_mask_scalef_pd_256 : ClangBuiltin<"__builtin_ia32_scalefpd256_mask">,
      DefaultAttrsIntrinsic<[llvm_v4f64_ty],
                            [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty,
                             llvm_i8_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_mask_scalef_pd_512 : ClangBuiltin<"__builtin_ia32_scalefpd512_mask">,
      DefaultAttrsIntrinsic<[llvm_v8f64_ty],
                            [llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8f64_ty,
                             llvm_i8_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<4>>]>;
  def int_x86_avx512_mask_scalef_ps_128 : ClangBuiltin<"__builtin_ia32_scalefps128_mask">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
                             llvm_v4f32_ty, llvm_i8_ty], [IntrNoMem]>;
  def int_x86_avx512_mask_scalef_ps_256 : ClangBuiltin<"__builtin_ia32_scalefps256_mask">,
      DefaultAttrsIntrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty,
                             llvm_v8f32_ty, llvm_i8_ty], [IntrNoMem]>;
  def int_x86_avx512_mask_scalef_ps_512 : ClangBuiltin<"__builtin_ia32_scalefps512_mask">,
      DefaultAttrsIntrinsic<[llvm_v16f32_ty],
                            [llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty,
                             llvm_i16_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_mask_sqrt_ss :
      DefaultAttrsIntrinsic<[llvm_v4f32_ty],
                            [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,
                             llvm_i8_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<4>>]>;
  def int_x86_avx512_mask_sqrt_sd :
      DefaultAttrsIntrinsic<[llvm_v2f64_ty],
                            [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty,
                             llvm_i8_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_sqrt_pd_512 :
      DefaultAttrsIntrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<1>>]>;
  def int_x86_avx512_sqrt_ps_512 :
      DefaultAttrsIntrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<1>>]>;
  def int_x86_avx512_mask_fixupimm_pd_128 :
         ClangBuiltin<"__builtin_ia32_fixupimmpd128_mask">,
          DefaultAttrsIntrinsic<[llvm_v2f64_ty],
          [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2i64_ty, llvm_i32_ty, llvm_i8_ty],
          [IntrNoMem, ImmArg<ArgIndex<3>>]>;
  def int_x86_avx512_maskz_fixupimm_pd_128 :
         ClangBuiltin<"__builtin_ia32_fixupimmpd128_maskz">,
          DefaultAttrsIntrinsic<[llvm_v2f64_ty],
          [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2i64_ty, llvm_i32_ty, llvm_i8_ty],
          [IntrNoMem, ImmArg<ArgIndex<3>>]>;
  def int_x86_avx512_mask_fixupimm_pd_256 :
         ClangBuiltin<"__builtin_ia32_fixupimmpd256_mask">,
          DefaultAttrsIntrinsic<[llvm_v4f64_ty],
          [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4i64_ty, llvm_i32_ty, llvm_i8_ty],
          [IntrNoMem, ImmArg<ArgIndex<3>>]>;
  def int_x86_avx512_maskz_fixupimm_pd_256 :
         ClangBuiltin<"__builtin_ia32_fixupimmpd256_maskz">,
          DefaultAttrsIntrinsic<[llvm_v4f64_ty],
          [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4i64_ty, llvm_i32_ty, llvm_i8_ty],
          [IntrNoMem, ImmArg<ArgIndex<3>>]>;
  def int_x86_avx512_mask_fixupimm_pd_512 :
         ClangBuiltin<"__builtin_ia32_fixupimmpd512_mask">,
          DefaultAttrsIntrinsic<[llvm_v8f64_ty],
          [llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8i64_ty, llvm_i32_ty, llvm_i8_ty,
          llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<5>>]>;
  def int_x86_avx512_maskz_fixupimm_pd_512 :
         ClangBuiltin<"__builtin_ia32_fixupimmpd512_maskz">,
          DefaultAttrsIntrinsic<[llvm_v8f64_ty],
          [llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8i64_ty, llvm_i32_ty, llvm_i8_ty,
          llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<5>>]>;
  def int_x86_avx512_mask_fixupimm_ps_128 :
         ClangBuiltin<"__builtin_ia32_fixupimmps128_mask">,
          DefaultAttrsIntrinsic<[llvm_v4f32_ty],
          [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4i32_ty, llvm_i32_ty, llvm_i8_ty],
          [IntrNoMem, ImmArg<ArgIndex<3>>]>;
  def int_x86_avx512_maskz_fixupimm_ps_128 :
         ClangBuiltin<"__builtin_ia32_fixupimmps128_maskz">,
          DefaultAttrsIntrinsic<[llvm_v4f32_ty],
          [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4i32_ty, llvm_i32_ty, llvm_i8_ty],
          [IntrNoMem, ImmArg<ArgIndex<3>>]>;
  def int_x86_avx512_mask_fixupimm_ps_256 :
         ClangBuiltin<"__builtin_ia32_fixupimmps256_mask">,
          DefaultAttrsIntrinsic<[llvm_v8f32_ty],
          [llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8i32_ty, llvm_i32_ty, llvm_i8_ty],
          [IntrNoMem, ImmArg<ArgIndex<3>>]>;
  def int_x86_avx512_maskz_fixupimm_ps_256 :
         ClangBuiltin<"__builtin_ia32_fixupimmps256_maskz">,
          DefaultAttrsIntrinsic<[llvm_v8f32_ty],
          [llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8i32_ty, llvm_i32_ty, llvm_i8_ty],
          [IntrNoMem, ImmArg<ArgIndex<3>>]>;
  def int_x86_avx512_mask_fixupimm_ps_512 :
         ClangBuiltin<"__builtin_ia32_fixupimmps512_mask">,
          DefaultAttrsIntrinsic<[llvm_v16f32_ty],
          [llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16i32_ty, llvm_i32_ty,
          llvm_i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<5>>]>;
  def int_x86_avx512_maskz_fixupimm_ps_512 :
         ClangBuiltin<"__builtin_ia32_fixupimmps512_maskz">,
          DefaultAttrsIntrinsic<[llvm_v16f32_ty],
          [llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16i32_ty, llvm_i32_ty,
          llvm_i16_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<5>>]>;
  def int_x86_avx512_mask_fixupimm_sd :
         ClangBuiltin<"__builtin_ia32_fixupimmsd_mask">,
          DefaultAttrsIntrinsic<[llvm_v2f64_ty],
          [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2i64_ty, llvm_i32_ty, llvm_i8_ty,
          llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<5>>]>;
  def int_x86_avx512_maskz_fixupimm_sd :
         ClangBuiltin<"__builtin_ia32_fixupimmsd_maskz">,
          DefaultAttrsIntrinsic<[llvm_v2f64_ty],
          [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2i64_ty, llvm_i32_ty, llvm_i8_ty,
          llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<5>>]>;
  def int_x86_avx512_mask_fixupimm_ss :
         ClangBuiltin<"__builtin_ia32_fixupimmss_mask">,
          DefaultAttrsIntrinsic<[llvm_v4f32_ty],
          [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4i32_ty, llvm_i32_ty, llvm_i8_ty,
          llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<5>>]>;
  def int_x86_avx512_maskz_fixupimm_ss :
         ClangBuiltin<"__builtin_ia32_fixupimmss_maskz">,
          DefaultAttrsIntrinsic<[llvm_v4f32_ty],
          [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4i32_ty, llvm_i32_ty, llvm_i8_ty,
          llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<5>>]>;
  def int_x86_avx512_mask_getexp_pd_128 : ClangBuiltin<"__builtin_ia32_getexppd128_mask">,
      DefaultAttrsIntrinsic<[llvm_v2f64_ty],
                            [llvm_v2f64_ty, llvm_v2f64_ty, llvm_i8_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_mask_getexp_pd_256 : ClangBuiltin<"__builtin_ia32_getexppd256_mask">,
      DefaultAttrsIntrinsic<[llvm_v4f64_ty],
                            [llvm_v4f64_ty, llvm_v4f64_ty, llvm_i8_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_mask_getexp_pd_512 : ClangBuiltin<"__builtin_ia32_getexppd512_mask">,
      DefaultAttrsIntrinsic<[llvm_v8f64_ty],
                            [llvm_v8f64_ty, llvm_v8f64_ty, llvm_i8_ty,
                             llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<3>>]>;
  def int_x86_avx512_mask_getexp_ps_128 : ClangBuiltin<"__builtin_ia32_getexpps128_mask">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty],
                            [llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_mask_getexp_ps_256 : ClangBuiltin<"__builtin_ia32_getexpps256_mask">,
      DefaultAttrsIntrinsic<[llvm_v8f32_ty],
                            [llvm_v8f32_ty, llvm_v8f32_ty, llvm_i8_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_mask_getexp_ps_512 : ClangBuiltin<"__builtin_ia32_getexpps512_mask">,
      DefaultAttrsIntrinsic<[llvm_v16f32_ty],
                            [llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty,
                             llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<3>>]>;

  def int_x86_avx512_mask_getexp_ss : ClangBuiltin<"__builtin_ia32_getexpss128_round_mask">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty],
                            [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,
                             llvm_i8_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<4>>]>;
  def int_x86_avx512_mask_getexp_sd : ClangBuiltin<"__builtin_ia32_getexpsd128_round_mask">,
      DefaultAttrsIntrinsic<[llvm_v2f64_ty],
                            [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty,
                             llvm_i8_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_mask_getmant_pd_128 :
         ClangBuiltin<"__builtin_ia32_getmantpd128_mask">,
          DefaultAttrsIntrinsic<[llvm_v2f64_ty],
          [llvm_v2f64_ty,llvm_i32_ty, llvm_v2f64_ty,  llvm_i8_ty],
          [IntrNoMem, ImmArg<ArgIndex<1>>]>;

  def int_x86_avx512_mask_getmant_pd_256 :
         ClangBuiltin<"__builtin_ia32_getmantpd256_mask">,
          DefaultAttrsIntrinsic<[llvm_v4f64_ty],
          [llvm_v4f64_ty,llvm_i32_ty, llvm_v4f64_ty,  llvm_i8_ty],
          [IntrNoMem, ImmArg<ArgIndex<1>>]>;

  def int_x86_avx512_mask_getmant_pd_512 :
         ClangBuiltin<"__builtin_ia32_getmantpd512_mask">,
          DefaultAttrsIntrinsic<[llvm_v8f64_ty],
          [llvm_v8f64_ty,llvm_i32_ty, llvm_v8f64_ty,  llvm_i8_ty,llvm_i32_ty ],
          [IntrNoMem, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_mask_getmant_ps_128 :
         ClangBuiltin<"__builtin_ia32_getmantps128_mask">,
          DefaultAttrsIntrinsic<[llvm_v4f32_ty],
          [llvm_v4f32_ty, llvm_i32_ty, llvm_v4f32_ty,  llvm_i8_ty],
          [IntrNoMem, ImmArg<ArgIndex<1>>]>;

  def int_x86_avx512_mask_getmant_ps_256 :
         ClangBuiltin<"__builtin_ia32_getmantps256_mask">,
          DefaultAttrsIntrinsic<[llvm_v8f32_ty],
          [llvm_v8f32_ty, llvm_i32_ty, llvm_v8f32_ty,  llvm_i8_ty],
          [IntrNoMem, ImmArg<ArgIndex<1>>]>;

  def int_x86_avx512_mask_getmant_ps_512 :
         ClangBuiltin<"__builtin_ia32_getmantps512_mask">,
          DefaultAttrsIntrinsic<[llvm_v16f32_ty],
          [llvm_v16f32_ty,llvm_i32_ty, llvm_v16f32_ty,llvm_i16_ty,llvm_i32_ty],
          [IntrNoMem, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_mask_getmant_ss :
         ClangBuiltin<"__builtin_ia32_getmantss_round_mask">,
          DefaultAttrsIntrinsic<[llvm_v4f32_ty],
          [llvm_v4f32_ty, llvm_v4f32_ty, llvm_i32_ty, llvm_v4f32_ty,
           llvm_i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<5>>]>;

  def int_x86_avx512_mask_getmant_sd :
         ClangBuiltin<"__builtin_ia32_getmantsd_round_mask">,
          DefaultAttrsIntrinsic<[llvm_v2f64_ty],
          [llvm_v2f64_ty, llvm_v2f64_ty, llvm_i32_ty, llvm_v2f64_ty,
           llvm_i8_ty, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<5>>]>;

  def int_x86_avx512_rsqrt14_ss : ClangBuiltin<"__builtin_ia32_rsqrt14ss_mask">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty],
                            [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,
                             llvm_i8_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_rsqrt14_sd : ClangBuiltin<"__builtin_ia32_rsqrt14sd_mask">,
      DefaultAttrsIntrinsic<[llvm_v2f64_ty],
                            [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty,
                             llvm_i8_ty],
                            [IntrNoMem]>;

  def int_x86_avx512_rsqrt14_pd_128 : ClangBuiltin<"__builtin_ia32_rsqrt14pd128_mask">,
      DefaultAttrsIntrinsic<[llvm_v2f64_ty],
                            [llvm_v2f64_ty, llvm_v2f64_ty, llvm_i8_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_rsqrt14_pd_256 : ClangBuiltin<"__builtin_ia32_rsqrt14pd256_mask">,
      DefaultAttrsIntrinsic<[llvm_v4f64_ty],
                            [llvm_v4f64_ty, llvm_v4f64_ty, llvm_i8_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_rsqrt14_pd_512 : ClangBuiltin<"__builtin_ia32_rsqrt14pd512_mask">,
      DefaultAttrsIntrinsic<[llvm_v8f64_ty],
                            [llvm_v8f64_ty, llvm_v8f64_ty, llvm_i8_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_rsqrt14_ps_128 : ClangBuiltin<"__builtin_ia32_rsqrt14ps128_mask">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty],
                            [llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_rsqrt14_ps_256 : ClangBuiltin<"__builtin_ia32_rsqrt14ps256_mask">,
      DefaultAttrsIntrinsic<[llvm_v8f32_ty],
                            [llvm_v8f32_ty, llvm_v8f32_ty, llvm_i8_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_rsqrt14_ps_512 : ClangBuiltin<"__builtin_ia32_rsqrt14ps512_mask">,
      DefaultAttrsIntrinsic<[llvm_v16f32_ty],
                            [llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_rcp14_ss : ClangBuiltin<"__builtin_ia32_rcp14ss_mask">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty],
                            [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,
                             llvm_i8_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_rcp14_sd : ClangBuiltin<"__builtin_ia32_rcp14sd_mask">,
      DefaultAttrsIntrinsic<[llvm_v2f64_ty],
                            [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty,
                             llvm_i8_ty],
                            [IntrNoMem]>;

  def int_x86_avx512_rcp14_pd_128 : ClangBuiltin<"__builtin_ia32_rcp14pd128_mask">,
      DefaultAttrsIntrinsic<[llvm_v2f64_ty],
                            [llvm_v2f64_ty, llvm_v2f64_ty, llvm_i8_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_rcp14_pd_256 : ClangBuiltin<"__builtin_ia32_rcp14pd256_mask">,
      DefaultAttrsIntrinsic<[llvm_v4f64_ty],
                            [llvm_v4f64_ty, llvm_v4f64_ty, llvm_i8_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_rcp14_pd_512 : ClangBuiltin<"__builtin_ia32_rcp14pd512_mask">,
      DefaultAttrsIntrinsic<[llvm_v8f64_ty],
                            [llvm_v8f64_ty, llvm_v8f64_ty, llvm_i8_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_rcp14_ps_128 : ClangBuiltin<"__builtin_ia32_rcp14ps128_mask">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty],
                            [llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_rcp14_ps_256 : ClangBuiltin<"__builtin_ia32_rcp14ps256_mask">,
      DefaultAttrsIntrinsic<[llvm_v8f32_ty],
                            [llvm_v8f32_ty, llvm_v8f32_ty, llvm_i8_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_rcp14_ps_512 : ClangBuiltin<"__builtin_ia32_rcp14ps512_mask">,
      DefaultAttrsIntrinsic<[llvm_v16f32_ty],
                            [llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty],
                            [IntrNoMem]>;

  def int_x86_avx512_rcp28_ps : ClangBuiltin<"__builtin_ia32_rcp28ps_mask">,
      DefaultAttrsIntrinsic<[llvm_v16f32_ty],
                            [llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty,
                             llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<3>>]>;
  def int_x86_avx512_rcp28_pd : ClangBuiltin<"__builtin_ia32_rcp28pd_mask">,
      DefaultAttrsIntrinsic<[llvm_v8f64_ty],
                            [llvm_v8f64_ty, llvm_v8f64_ty, llvm_i8_ty,
                             llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<3>>]>;
  def int_x86_avx512_exp2_ps : ClangBuiltin<"__builtin_ia32_exp2ps_mask">,
      DefaultAttrsIntrinsic<[llvm_v16f32_ty],
                            [llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty,
                             llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<3>>]>;
  def int_x86_avx512_exp2_pd : ClangBuiltin<"__builtin_ia32_exp2pd_mask">,
      DefaultAttrsIntrinsic<[llvm_v8f64_ty],
                            [llvm_v8f64_ty, llvm_v8f64_ty, llvm_i8_ty,
                             llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<3>>]>;

  def int_x86_avx512_rcp28_ss : ClangBuiltin<"__builtin_ia32_rcp28ss_round_mask">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty],
                            [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,
                             llvm_i8_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<4>>]>;
  def int_x86_avx512_rcp28_sd : ClangBuiltin<"__builtin_ia32_rcp28sd_round_mask">,
      DefaultAttrsIntrinsic<[llvm_v2f64_ty],
                            [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty,
                             llvm_i8_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<4>>]>;
  def int_x86_avx512_rsqrt28_ps : ClangBuiltin<"__builtin_ia32_rsqrt28ps_mask">,
      DefaultAttrsIntrinsic<[llvm_v16f32_ty],
                            [llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty,
                             llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<3>>]>;
  def int_x86_avx512_rsqrt28_pd : ClangBuiltin<"__builtin_ia32_rsqrt28pd_mask">,
      DefaultAttrsIntrinsic<[llvm_v8f64_ty],
                            [llvm_v8f64_ty, llvm_v8f64_ty, llvm_i8_ty,
                             llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<3>>]>;
  def int_x86_avx512_rsqrt28_ss : ClangBuiltin<"__builtin_ia32_rsqrt28ss_round_mask">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty],
                            [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,
                             llvm_i8_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<4>>]>;
  def int_x86_avx512_rsqrt28_sd : ClangBuiltin<"__builtin_ia32_rsqrt28sd_round_mask">,
      DefaultAttrsIntrinsic<[llvm_v2f64_ty],
                            [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty,
                             llvm_i8_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<4>>]>;
  def int_x86_avx512_psad_bw_512 : ClangBuiltin<"__builtin_ia32_psadbw512">,
      DefaultAttrsIntrinsic<[llvm_v8i64_ty], [llvm_v64i8_ty, llvm_v64i8_ty],
                            [IntrNoMem, Commutative]>;
}
// Integer arithmetic ops
let TargetPrefix = "x86" in {
  def int_x86_avx512_pmulhu_w_512 : ClangBuiltin<"__builtin_ia32_pmulhuw512">,
      DefaultAttrsIntrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty],
                            [IntrNoMem, Commutative]>;
  def int_x86_avx512_pmulh_w_512 : ClangBuiltin<"__builtin_ia32_pmulhw512">,
      DefaultAttrsIntrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty],
                            [IntrNoMem, Commutative]>;
  def int_x86_avx512_pavg_b_512 : ClangBuiltin<"__builtin_ia32_pavgb512">,
      DefaultAttrsIntrinsic<[llvm_v64i8_ty], [llvm_v64i8_ty, llvm_v64i8_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_pavg_w_512 : ClangBuiltin<"__builtin_ia32_pavgw512">,
      DefaultAttrsIntrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_pmaddw_d_512 : ClangBuiltin<"__builtin_ia32_pmaddwd512">,
      DefaultAttrsIntrinsic<[llvm_v16i32_ty], [llvm_v32i16_ty, llvm_v32i16_ty],
                            [IntrNoMem, Commutative]>;
  def int_x86_avx512_pmaddubs_w_512 : ClangBuiltin<"__builtin_ia32_pmaddubsw512">,
      DefaultAttrsIntrinsic<[llvm_v32i16_ty], [llvm_v64i8_ty, llvm_v64i8_ty],
                            [IntrNoMem]>;

  def int_x86_avx512_dbpsadbw_128 :
      ClangBuiltin<"__builtin_ia32_dbpsadbw128">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty],
                            [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<2>>]>;

  def int_x86_avx512_dbpsadbw_256 :
      ClangBuiltin<"__builtin_ia32_dbpsadbw256">,
      DefaultAttrsIntrinsic<[llvm_v16i16_ty],
                            [llvm_v32i8_ty, llvm_v32i8_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<2>>]>;

  def int_x86_avx512_dbpsadbw_512 :
      ClangBuiltin<"__builtin_ia32_dbpsadbw512">,
      DefaultAttrsIntrinsic<[llvm_v32i16_ty],
                            [llvm_v64i8_ty, llvm_v64i8_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<2>>]>;
}

// Gather and Scatter ops
let TargetPrefix = "x86" in {
  // NOTE: These are deprecated in favor of the versions that take a vXi1 mask.
  // NOTE: These can't be ArgMemOnly because you can put the address completely
  // in the index register.
  def int_x86_avx512_gather_dpd_512  :
      DefaultAttrsIntrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_ptr_ty,
                             llvm_v8i32_ty, llvm_i8_ty, llvm_i32_ty],
                            [IntrReadMem, ImmArg<ArgIndex<4>>]>;
  def int_x86_avx512_gather_dps_512  :
      DefaultAttrsIntrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_ptr_ty,
                             llvm_v16i32_ty, llvm_i16_ty, llvm_i32_ty],
                            [IntrReadMem, ImmArg<ArgIndex<4>>]>;
  def int_x86_avx512_gather_qpd_512  :
      DefaultAttrsIntrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_ptr_ty,
                             llvm_v8i64_ty, llvm_i8_ty, llvm_i32_ty],
                            [IntrReadMem, ImmArg<ArgIndex<4>>]>;
  def int_x86_avx512_gather_qps_512  :
      DefaultAttrsIntrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_ptr_ty,
                             llvm_v8i64_ty, llvm_i8_ty, llvm_i32_ty],
                            [IntrReadMem, ImmArg<ArgIndex<4>>]>;


  def int_x86_avx512_gather_dpq_512  :
      DefaultAttrsIntrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_ptr_ty,
                             llvm_v8i32_ty, llvm_i8_ty, llvm_i32_ty],
                            [IntrReadMem, ImmArg<ArgIndex<4>>]>;
  def int_x86_avx512_gather_dpi_512  :
      DefaultAttrsIntrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_ptr_ty,
                             llvm_v16i32_ty, llvm_i16_ty, llvm_i32_ty],
                            [IntrReadMem, ImmArg<ArgIndex<4>>]>;
  def int_x86_avx512_gather_qpq_512  :
      DefaultAttrsIntrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_ptr_ty,
                             llvm_v8i64_ty, llvm_i8_ty, llvm_i32_ty],
                            [IntrReadMem, ImmArg<ArgIndex<4>>]>;
  def int_x86_avx512_gather_qpi_512  :
      DefaultAttrsIntrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_ptr_ty,
                             llvm_v8i64_ty, llvm_i8_ty, llvm_i32_ty],
                            [IntrReadMem, ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_gather3div2_df :
      DefaultAttrsIntrinsic<[llvm_v2f64_ty],
          [llvm_v2f64_ty, llvm_ptr_ty, llvm_v2i64_ty, llvm_i8_ty, llvm_i32_ty],
          [IntrReadMem, ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_gather3div2_di :
      DefaultAttrsIntrinsic<[llvm_v2i64_ty],
          [llvm_v2i64_ty, llvm_ptr_ty, llvm_v2i64_ty, llvm_i8_ty, llvm_i32_ty],
          [IntrReadMem, ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_gather3div4_df :
      DefaultAttrsIntrinsic<[llvm_v4f64_ty],
          [llvm_v4f64_ty, llvm_ptr_ty, llvm_v4i64_ty, llvm_i8_ty, llvm_i32_ty],
          [IntrReadMem, ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_gather3div4_di :
      DefaultAttrsIntrinsic<[llvm_v4i64_ty],
          [llvm_v4i64_ty, llvm_ptr_ty, llvm_v4i64_ty, llvm_i8_ty, llvm_i32_ty],
          [IntrReadMem, ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_gather3div4_sf :
      DefaultAttrsIntrinsic<[llvm_v4f32_ty],
          [llvm_v4f32_ty, llvm_ptr_ty, llvm_v2i64_ty, llvm_i8_ty, llvm_i32_ty],
          [IntrReadMem, ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_gather3div4_si :
      DefaultAttrsIntrinsic<[llvm_v4i32_ty],
          [llvm_v4i32_ty, llvm_ptr_ty, llvm_v2i64_ty, llvm_i8_ty, llvm_i32_ty],
          [IntrReadMem, ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_gather3div8_sf :
      DefaultAttrsIntrinsic<[llvm_v4f32_ty],
          [llvm_v4f32_ty, llvm_ptr_ty, llvm_v4i64_ty, llvm_i8_ty, llvm_i32_ty],
          [IntrReadMem, ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_gather3div8_si :
      DefaultAttrsIntrinsic<[llvm_v4i32_ty],
          [llvm_v4i32_ty, llvm_ptr_ty, llvm_v4i64_ty, llvm_i8_ty, llvm_i32_ty],
          [IntrReadMem, ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_gather3siv2_df :
      DefaultAttrsIntrinsic<[llvm_v2f64_ty],
          [llvm_v2f64_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_i8_ty, llvm_i32_ty],
          [IntrReadMem, ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_gather3siv2_di:
      DefaultAttrsIntrinsic<[llvm_v2i64_ty],
          [llvm_v2i64_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_i8_ty, llvm_i32_ty],
          [IntrReadMem, ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_gather3siv4_df :
      DefaultAttrsIntrinsic<[llvm_v4f64_ty],
          [llvm_v4f64_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_i8_ty, llvm_i32_ty],
          [IntrReadMem, ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_gather3siv4_di :
      DefaultAttrsIntrinsic<[llvm_v4i64_ty],
          [llvm_v4i64_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_i8_ty, llvm_i32_ty],
          [IntrReadMem, ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_gather3siv4_sf :
      DefaultAttrsIntrinsic<[llvm_v4f32_ty],
          [llvm_v4f32_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_i8_ty, llvm_i32_ty],
          [IntrReadMem, ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_gather3siv4_si :
      DefaultAttrsIntrinsic<[llvm_v4i32_ty],
          [llvm_v4i32_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_i8_ty, llvm_i32_ty],
          [IntrReadMem, ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_gather3siv8_sf :
      DefaultAttrsIntrinsic<[llvm_v8f32_ty],
          [llvm_v8f32_ty, llvm_ptr_ty, llvm_v8i32_ty, llvm_i8_ty, llvm_i32_ty],
          [IntrReadMem, ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_gather3siv8_si :
      DefaultAttrsIntrinsic<[llvm_v8i32_ty],
          [llvm_v8i32_ty, llvm_ptr_ty, llvm_v8i32_ty, llvm_i8_ty, llvm_i32_ty],
          [IntrReadMem, ImmArg<ArgIndex<4>>]>;

// scatter
  // NOTE: These are deprecated in favor of the versions that take a vXi1 mask.
  // NOTE: These can't be ArgMemOnly because you can put the address completely
  // in the index register.
  def int_x86_avx512_scatter_dpd_512  :
          Intrinsic<[], [llvm_ptr_ty, llvm_i8_ty,
                        llvm_v8i32_ty, llvm_v8f64_ty, llvm_i32_ty],
                    [ImmArg<ArgIndex<4>>]>;
  def int_x86_avx512_scatter_dps_512  :
          Intrinsic<[], [llvm_ptr_ty, llvm_i16_ty,
                       llvm_v16i32_ty, llvm_v16f32_ty, llvm_i32_ty],
                    [ImmArg<ArgIndex<4>>]>;
  def int_x86_avx512_scatter_qpd_512  :
          Intrinsic<[], [llvm_ptr_ty, llvm_i8_ty,
                     llvm_v8i64_ty, llvm_v8f64_ty, llvm_i32_ty],
                    [ImmArg<ArgIndex<4>>]>;
  def int_x86_avx512_scatter_qps_512  :
          Intrinsic<[], [llvm_ptr_ty, llvm_i8_ty,
                     llvm_v8i64_ty, llvm_v8f32_ty, llvm_i32_ty],
                    [ImmArg<ArgIndex<4>>]>;


  def int_x86_avx512_scatter_dpq_512  :
          Intrinsic<[], [llvm_ptr_ty, llvm_i8_ty,
                         llvm_v8i32_ty, llvm_v8i64_ty, llvm_i32_ty],
                    [ImmArg<ArgIndex<4>>]>;
  def int_x86_avx512_scatter_dpi_512  :
          Intrinsic<[], [llvm_ptr_ty, llvm_i16_ty,
                     llvm_v16i32_ty, llvm_v16i32_ty, llvm_i32_ty],
                    [ImmArg<ArgIndex<4>>]>;
  def int_x86_avx512_scatter_qpq_512  :
          Intrinsic<[], [llvm_ptr_ty, llvm_i8_ty,llvm_v8i64_ty, llvm_v8i64_ty,
                         llvm_i32_ty],
                    [ImmArg<ArgIndex<4>>]>;
  def int_x86_avx512_scatter_qpi_512  :
          Intrinsic<[], [llvm_ptr_ty, llvm_i8_ty, llvm_v8i64_ty, llvm_v8i32_ty,
                         llvm_i32_ty],
                    [ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_scatterdiv2_df :
        Intrinsic<[],
        [llvm_ptr_ty, llvm_i8_ty, llvm_v2i64_ty, llvm_v2f64_ty, llvm_i32_ty],
        [ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_scatterdiv2_di :
          Intrinsic<[],
          [llvm_ptr_ty, llvm_i8_ty, llvm_v2i64_ty, llvm_v2i64_ty, llvm_i32_ty],
          [ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_scatterdiv4_df :
          Intrinsic<[],
          [llvm_ptr_ty, llvm_i8_ty, llvm_v4i64_ty, llvm_v4f64_ty, llvm_i32_ty],
          [ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_scatterdiv4_di :
          Intrinsic<[],
          [llvm_ptr_ty, llvm_i8_ty, llvm_v4i64_ty, llvm_v4i64_ty, llvm_i32_ty],
          [ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_scatterdiv4_sf :
          Intrinsic<[],
          [llvm_ptr_ty, llvm_i8_ty, llvm_v2i64_ty, llvm_v4f32_ty, llvm_i32_ty],
          [ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_scatterdiv4_si :
          Intrinsic<[],
          [llvm_ptr_ty, llvm_i8_ty, llvm_v2i64_ty, llvm_v4i32_ty, llvm_i32_ty],
          [ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_scatterdiv8_sf :
          Intrinsic<[],
          [llvm_ptr_ty, llvm_i8_ty, llvm_v4i64_ty, llvm_v4f32_ty, llvm_i32_ty],
          [ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_scatterdiv8_si :
          Intrinsic<[],
          [llvm_ptr_ty, llvm_i8_ty, llvm_v4i64_ty, llvm_v4i32_ty, llvm_i32_ty],
          [ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_scattersiv2_df :
          Intrinsic<[],
          [llvm_ptr_ty, llvm_i8_ty, llvm_v4i32_ty, llvm_v2f64_ty, llvm_i32_ty],
          [ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_scattersiv2_di :
          Intrinsic<[],
          [llvm_ptr_ty, llvm_i8_ty, llvm_v4i32_ty, llvm_v2i64_ty, llvm_i32_ty],
          [ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_scattersiv4_df :
          Intrinsic<[],
          [llvm_ptr_ty, llvm_i8_ty, llvm_v4i32_ty, llvm_v4f64_ty, llvm_i32_ty],
          [ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_scattersiv4_di :
          Intrinsic<[],
          [llvm_ptr_ty, llvm_i8_ty, llvm_v4i32_ty, llvm_v4i64_ty, llvm_i32_ty],
          [ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_scattersiv4_sf :
          Intrinsic<[],
          [llvm_ptr_ty, llvm_i8_ty, llvm_v4i32_ty, llvm_v4f32_ty, llvm_i32_ty],
          [ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_scattersiv4_si :
          Intrinsic<[],
          [llvm_ptr_ty, llvm_i8_ty, llvm_v4i32_ty, llvm_v4i32_ty, llvm_i32_ty],
          [ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_scattersiv8_sf :
          Intrinsic<[],
          [llvm_ptr_ty, llvm_i8_ty, llvm_v8i32_ty, llvm_v8f32_ty, llvm_i32_ty],
          [ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_scattersiv8_si :
          Intrinsic<[],
          [llvm_ptr_ty, llvm_i8_ty, llvm_v8i32_ty, llvm_v8i32_ty, llvm_i32_ty],
          [ImmArg<ArgIndex<4>>]>;

  // gather prefetch
  // NOTE: These can't be ArgMemOnly because you can put the address completely
  // in the index register.
  def int_x86_avx512_gatherpf_dpd_512  : ClangBuiltin<"__builtin_ia32_gatherpfdpd">,
          Intrinsic<[], [llvm_i8_ty, llvm_v8i32_ty, llvm_ptr_ty,
                     llvm_i32_ty, llvm_i32_ty], [ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>]>;
  def int_x86_avx512_gatherpf_dps_512  : ClangBuiltin<"__builtin_ia32_gatherpfdps">,
          Intrinsic<[], [llvm_i16_ty, llvm_v16i32_ty, llvm_ptr_ty,
                     llvm_i32_ty, llvm_i32_ty], [ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>]>;
  def int_x86_avx512_gatherpf_qpd_512  : ClangBuiltin<"__builtin_ia32_gatherpfqpd">,
          Intrinsic<[], [llvm_i8_ty, llvm_v8i64_ty, llvm_ptr_ty,
                     llvm_i32_ty, llvm_i32_ty], [ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>]>;
  def int_x86_avx512_gatherpf_qps_512  : ClangBuiltin<"__builtin_ia32_gatherpfqps">,
          Intrinsic<[], [llvm_i8_ty, llvm_v8i64_ty, llvm_ptr_ty,
                     llvm_i32_ty, llvm_i32_ty], [ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>]>;

  // scatter prefetch
  // NOTE: These can't be ArgMemOnly because you can put the address completely
  // in the index register.
  def int_x86_avx512_scatterpf_dpd_512  : ClangBuiltin<"__builtin_ia32_scatterpfdpd">,
          Intrinsic<[], [llvm_i8_ty, llvm_v8i32_ty, llvm_ptr_ty,
                     llvm_i32_ty, llvm_i32_ty], [ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>]>;
  def int_x86_avx512_scatterpf_dps_512  : ClangBuiltin<"__builtin_ia32_scatterpfdps">,
          Intrinsic<[], [llvm_i16_ty, llvm_v16i32_ty, llvm_ptr_ty,
                     llvm_i32_ty, llvm_i32_ty], [ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>]>;
  def int_x86_avx512_scatterpf_qpd_512  : ClangBuiltin<"__builtin_ia32_scatterpfqpd">,
          Intrinsic<[], [llvm_i8_ty, llvm_v8i64_ty, llvm_ptr_ty,
                     llvm_i32_ty, llvm_i32_ty], [ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>]>;
  def int_x86_avx512_scatterpf_qps_512  : ClangBuiltin<"__builtin_ia32_scatterpfqps">,
          Intrinsic<[], [llvm_i8_ty, llvm_v8i64_ty, llvm_ptr_ty,
                     llvm_i32_ty, llvm_i32_ty], [ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>]>;
}

// AVX512 gather/scatter intrinsics that use vXi1 masks.
let TargetPrefix = "x86" in {
  // NOTE: These can't be ArgMemOnly because you can put the address completely
  // in the index register.
  def int_x86_avx512_mask_gather_dpd_512  :
      DefaultAttrsIntrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_ptr_ty,
                             llvm_v8i32_ty, llvm_v8i1_ty, llvm_i32_ty],
                            [IntrReadMem, ImmArg<ArgIndex<4>>]>;
  def int_x86_avx512_mask_gather_dps_512  :
      DefaultAttrsIntrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_ptr_ty,
                             llvm_v16i32_ty, llvm_v16i1_ty, llvm_i32_ty],
                            [IntrReadMem, ImmArg<ArgIndex<4>>]>;
  def int_x86_avx512_mask_gather_qpd_512  :
      DefaultAttrsIntrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_ptr_ty,
                             llvm_v8i64_ty, llvm_v8i1_ty, llvm_i32_ty],
                            [IntrReadMem, ImmArg<ArgIndex<4>>]>;
  def int_x86_avx512_mask_gather_qps_512  :
      DefaultAttrsIntrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_ptr_ty,
                             llvm_v8i64_ty, llvm_v8i1_ty, llvm_i32_ty],
                            [IntrReadMem, ImmArg<ArgIndex<4>>]>;


  def int_x86_avx512_mask_gather_dpq_512  :
      DefaultAttrsIntrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_ptr_ty,
                             llvm_v8i32_ty, llvm_v8i1_ty, llvm_i32_ty],
                            [IntrReadMem, ImmArg<ArgIndex<4>>]>;
  def int_x86_avx512_mask_gather_dpi_512  :
      DefaultAttrsIntrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_ptr_ty,
                             llvm_v16i32_ty, llvm_v16i1_ty, llvm_i32_ty],
                            [IntrReadMem, ImmArg<ArgIndex<4>>]>;
  def int_x86_avx512_mask_gather_qpq_512  :
      DefaultAttrsIntrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_ptr_ty,
                             llvm_v8i64_ty, llvm_v8i1_ty, llvm_i32_ty],
                            [IntrReadMem, ImmArg<ArgIndex<4>>]>;
  def int_x86_avx512_mask_gather_qpi_512  :
      DefaultAttrsIntrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_ptr_ty,
                             llvm_v8i64_ty, llvm_v8i1_ty, llvm_i32_ty],
                            [IntrReadMem, ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_mask_gather3div2_df :
      DefaultAttrsIntrinsic<[llvm_v2f64_ty],
          [llvm_v2f64_ty, llvm_ptr_ty, llvm_v2i64_ty, llvm_v2i1_ty, llvm_i32_ty],
          [IntrReadMem, ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_mask_gather3div2_di :
      DefaultAttrsIntrinsic<[llvm_v2i64_ty],
          [llvm_v2i64_ty, llvm_ptr_ty, llvm_v2i64_ty, llvm_v2i1_ty, llvm_i32_ty],
          [IntrReadMem, ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_mask_gather3div4_df :
      DefaultAttrsIntrinsic<[llvm_v4f64_ty],
          [llvm_v4f64_ty, llvm_ptr_ty, llvm_v4i64_ty, llvm_v4i1_ty, llvm_i32_ty],
          [IntrReadMem, ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_mask_gather3div4_di :
      DefaultAttrsIntrinsic<[llvm_v4i64_ty],
          [llvm_v4i64_ty, llvm_ptr_ty, llvm_v4i64_ty, llvm_v4i1_ty, llvm_i32_ty],
          [IntrReadMem, ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_mask_gather3div4_sf :
      DefaultAttrsIntrinsic<[llvm_v4f32_ty],
          [llvm_v4f32_ty, llvm_ptr_ty, llvm_v2i64_ty, llvm_v2i1_ty, llvm_i32_ty],
          [IntrReadMem, ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_mask_gather3div4_si :
      DefaultAttrsIntrinsic<[llvm_v4i32_ty],
          [llvm_v4i32_ty, llvm_ptr_ty, llvm_v2i64_ty, llvm_v2i1_ty, llvm_i32_ty],
          [IntrReadMem, ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_mask_gather3div8_sf :
      DefaultAttrsIntrinsic<[llvm_v4f32_ty],
          [llvm_v4f32_ty, llvm_ptr_ty, llvm_v4i64_ty, llvm_v4i1_ty, llvm_i32_ty],
          [IntrReadMem, ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_mask_gather3div8_si :
      DefaultAttrsIntrinsic<[llvm_v4i32_ty],
          [llvm_v4i32_ty, llvm_ptr_ty, llvm_v4i64_ty, llvm_v4i1_ty, llvm_i32_ty],
          [IntrReadMem, ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_mask_gather3siv2_df :
      DefaultAttrsIntrinsic<[llvm_v2f64_ty],
          [llvm_v2f64_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_v2i1_ty, llvm_i32_ty],
          [IntrReadMem, ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_mask_gather3siv2_di :
      DefaultAttrsIntrinsic<[llvm_v2i64_ty],
          [llvm_v2i64_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_v2i1_ty, llvm_i32_ty],
          [IntrReadMem, ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_mask_gather3siv4_df :
      DefaultAttrsIntrinsic<[llvm_v4f64_ty],
          [llvm_v4f64_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_v4i1_ty, llvm_i32_ty],
          [IntrReadMem, ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_mask_gather3siv4_di :
      DefaultAttrsIntrinsic<[llvm_v4i64_ty],
          [llvm_v4i64_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_v4i1_ty, llvm_i32_ty],
          [IntrReadMem, ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_mask_gather3siv4_sf :
      DefaultAttrsIntrinsic<[llvm_v4f32_ty],
          [llvm_v4f32_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_v4i1_ty, llvm_i32_ty],
          [IntrReadMem, ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_mask_gather3siv4_si :
      DefaultAttrsIntrinsic<[llvm_v4i32_ty],
          [llvm_v4i32_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_v4i1_ty, llvm_i32_ty],
          [IntrReadMem, ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_mask_gather3siv8_sf :
      DefaultAttrsIntrinsic<[llvm_v8f32_ty],
          [llvm_v8f32_ty, llvm_ptr_ty, llvm_v8i32_ty, llvm_v8i1_ty, llvm_i32_ty],
          [IntrReadMem, ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_mask_gather3siv8_si :
      DefaultAttrsIntrinsic<[llvm_v8i32_ty],
          [llvm_v8i32_ty, llvm_ptr_ty, llvm_v8i32_ty, llvm_v8i1_ty, llvm_i32_ty],
          [IntrReadMem, ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_mask_scatter_dpd_512  :
          Intrinsic<[], [llvm_ptr_ty, llvm_v8i1_ty,
                        llvm_v8i32_ty, llvm_v8f64_ty, llvm_i32_ty],
                    [ImmArg<ArgIndex<4>>]>;
  def int_x86_avx512_mask_scatter_dps_512  :
          Intrinsic<[], [llvm_ptr_ty, llvm_v16i1_ty,
                       llvm_v16i32_ty, llvm_v16f32_ty, llvm_i32_ty],
                    [ImmArg<ArgIndex<4>>]>;
  def int_x86_avx512_mask_scatter_qpd_512  :
          Intrinsic<[], [llvm_ptr_ty, llvm_v8i1_ty,
                     llvm_v8i64_ty, llvm_v8f64_ty, llvm_i32_ty],
                    [ImmArg<ArgIndex<4>>]>;
  def int_x86_avx512_mask_scatter_qps_512  :
          Intrinsic<[], [llvm_ptr_ty, llvm_v8i1_ty,
                     llvm_v8i64_ty, llvm_v8f32_ty, llvm_i32_ty],
                    [ImmArg<ArgIndex<4>>]>;


  // NOTE: These can't be ArgMemOnly because you can put the address completely
  // in the index register.
  def int_x86_avx512_mask_scatter_dpq_512  :
          Intrinsic<[], [llvm_ptr_ty, llvm_v8i1_ty,
                         llvm_v8i32_ty, llvm_v8i64_ty, llvm_i32_ty],
                    [ImmArg<ArgIndex<4>>]>;
  def int_x86_avx512_mask_scatter_dpi_512  :
          Intrinsic<[], [llvm_ptr_ty, llvm_v16i1_ty,
                     llvm_v16i32_ty, llvm_v16i32_ty, llvm_i32_ty],
                    [ImmArg<ArgIndex<4>>]>;
  def int_x86_avx512_mask_scatter_qpq_512  :
          Intrinsic<[], [llvm_ptr_ty, llvm_v8i1_ty,llvm_v8i64_ty, llvm_v8i64_ty,
                         llvm_i32_ty],
                    [ImmArg<ArgIndex<4>>]>;
  def int_x86_avx512_mask_scatter_qpi_512  :
          Intrinsic<[], [llvm_ptr_ty, llvm_v8i1_ty, llvm_v8i64_ty, llvm_v8i32_ty,
                         llvm_i32_ty],
                    [ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_mask_scatterdiv2_df :
        Intrinsic<[],
        [llvm_ptr_ty, llvm_v2i1_ty, llvm_v2i64_ty, llvm_v2f64_ty, llvm_i32_ty],
        [ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_mask_scatterdiv2_di :
          Intrinsic<[],
          [llvm_ptr_ty, llvm_v2i1_ty, llvm_v2i64_ty, llvm_v2i64_ty, llvm_i32_ty],
          [ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_mask_scatterdiv4_df :
          Intrinsic<[],
          [llvm_ptr_ty, llvm_v4i1_ty, llvm_v4i64_ty, llvm_v4f64_ty, llvm_i32_ty],
          [ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_mask_scatterdiv4_di :
          Intrinsic<[],
          [llvm_ptr_ty, llvm_v4i1_ty, llvm_v4i64_ty, llvm_v4i64_ty, llvm_i32_ty],
          [ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_mask_scatterdiv4_sf :
          Intrinsic<[],
          [llvm_ptr_ty, llvm_v2i1_ty, llvm_v2i64_ty, llvm_v4f32_ty, llvm_i32_ty],
          [ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_mask_scatterdiv4_si :
          Intrinsic<[],
          [llvm_ptr_ty, llvm_v2i1_ty, llvm_v2i64_ty, llvm_v4i32_ty, llvm_i32_ty],
          [ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_mask_scatterdiv8_sf :
          Intrinsic<[],
          [llvm_ptr_ty, llvm_v4i1_ty, llvm_v4i64_ty, llvm_v4f32_ty, llvm_i32_ty],
          [ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_mask_scatterdiv8_si :
          Intrinsic<[],
          [llvm_ptr_ty, llvm_v4i1_ty, llvm_v4i64_ty, llvm_v4i32_ty, llvm_i32_ty],
          [ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_mask_scattersiv2_df :
          Intrinsic<[],
          [llvm_ptr_ty, llvm_v2i1_ty, llvm_v4i32_ty, llvm_v2f64_ty, llvm_i32_ty],
          [ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_mask_scattersiv2_di :
          Intrinsic<[],
          [llvm_ptr_ty, llvm_v2i1_ty, llvm_v4i32_ty, llvm_v2i64_ty, llvm_i32_ty],
          [ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_mask_scattersiv4_df :
          Intrinsic<[],
          [llvm_ptr_ty, llvm_v4i1_ty, llvm_v4i32_ty, llvm_v4f64_ty, llvm_i32_ty],
          [ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_mask_scattersiv4_di :
          Intrinsic<[],
          [llvm_ptr_ty, llvm_v4i1_ty, llvm_v4i32_ty, llvm_v4i64_ty, llvm_i32_ty],
          [ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_mask_scattersiv4_sf :
          Intrinsic<[],
          [llvm_ptr_ty, llvm_v4i1_ty, llvm_v4i32_ty, llvm_v4f32_ty, llvm_i32_ty],
          [ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_mask_scattersiv4_si :
          Intrinsic<[],
          [llvm_ptr_ty, llvm_v4i1_ty, llvm_v4i32_ty, llvm_v4i32_ty, llvm_i32_ty],
          [ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_mask_scattersiv8_sf :
          Intrinsic<[],
          [llvm_ptr_ty, llvm_v8i1_ty, llvm_v8i32_ty, llvm_v8f32_ty, llvm_i32_ty],
          [ImmArg<ArgIndex<4>>]>;

  def int_x86_avx512_mask_scattersiv8_si :
          Intrinsic<[],
          [llvm_ptr_ty, llvm_v8i1_ty, llvm_v8i32_ty, llvm_v8i32_ty, llvm_i32_ty],
          [ImmArg<ArgIndex<4>>]>;
}

// AVX-512 conflict detection instruction
// Instructions that count the number of leading zero bits
let TargetPrefix = "x86" in {
  def int_x86_avx512_conflict_d_128 :
          ClangBuiltin<"__builtin_ia32_vpconflictsi_128">,
          DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty], [IntrNoMem]>;
  def int_x86_avx512_conflict_d_256 :
          ClangBuiltin<"__builtin_ia32_vpconflictsi_256">,
          DefaultAttrsIntrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty], [IntrNoMem]>;
  def int_x86_avx512_conflict_d_512 :
          ClangBuiltin<"__builtin_ia32_vpconflictsi_512">,
          DefaultAttrsIntrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty],
                                [IntrNoMem]>;

  def int_x86_avx512_conflict_q_128 :
          ClangBuiltin<"__builtin_ia32_vpconflictdi_128">,
          DefaultAttrsIntrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty], [IntrNoMem]>;
  def int_x86_avx512_conflict_q_256 :
          ClangBuiltin<"__builtin_ia32_vpconflictdi_256">,
          DefaultAttrsIntrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty], [IntrNoMem]>;
  def int_x86_avx512_conflict_q_512 :
          ClangBuiltin<"__builtin_ia32_vpconflictdi_512">,
          DefaultAttrsIntrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty], [IntrNoMem]>;
}

// Compares
let TargetPrefix = "x86" in {
  // 512-bit
  def int_x86_avx512_vcomi_sd : ClangBuiltin<"__builtin_ia32_vcomisd">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v2f64_ty,
                             llvm_v2f64_ty, llvm_i32_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<2>>,
                             ImmArg<ArgIndex<3>>]>;
  def int_x86_avx512_vcomi_ss : ClangBuiltin<"__builtin_ia32_vcomiss">,
      DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_v4f32_ty,
                             llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<2>>,
                             ImmArg<ArgIndex<3>>]>;
}

// Compress, Expand
let TargetPrefix = "x86" in {
  def int_x86_avx512_mask_compress :
      DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                            [LLVMMatchType<0>, LLVMMatchType<0>,
                             LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
                            [IntrNoMem]>;
  def int_x86_avx512_mask_expand :
      DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                            [LLVMMatchType<0>, LLVMMatchType<0>,
                             LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
                            [IntrNoMem]>;
}

// truncate
let TargetPrefix = "x86" in {
  def int_x86_avx512_mask_pmov_qb_128 :
      ClangBuiltin<"__builtin_ia32_pmovqb128_mask">,
      DefaultAttrsIntrinsic<[llvm_v16i8_ty],
                            [llvm_v2i64_ty, llvm_v16i8_ty, llvm_i8_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_mask_pmov_qb_mem_128 :
      ClangBuiltin<"__builtin_ia32_pmovqb128mem_mask">,
      DefaultAttrsIntrinsic<[],
                            [llvm_ptr_ty, llvm_v2i64_ty, llvm_i8_ty],
                            [IntrArgMemOnly]>;
  def int_x86_avx512_mask_pmovs_qb_128 :
      ClangBuiltin<"__builtin_ia32_pmovsqb128_mask">,
      DefaultAttrsIntrinsic<[llvm_v16i8_ty],
                            [llvm_v2i64_ty, llvm_v16i8_ty, llvm_i8_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_mask_pmovs_qb_mem_128 :
      ClangBuiltin<"__builtin_ia32_pmovsqb128mem_mask">,
      DefaultAttrsIntrinsic<[],
                            [llvm_ptr_ty, llvm_v2i64_ty, llvm_i8_ty],
                            [IntrArgMemOnly]>;
  def int_x86_avx512_mask_pmovus_qb_128 :
      ClangBuiltin<"__builtin_ia32_pmovusqb128_mask">,
      DefaultAttrsIntrinsic<[llvm_v16i8_ty],
                            [llvm_v2i64_ty, llvm_v16i8_ty, llvm_i8_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_mask_pmovus_qb_mem_128 :
      ClangBuiltin<"__builtin_ia32_pmovusqb128mem_mask">,
      DefaultAttrsIntrinsic<[],
                            [llvm_ptr_ty, llvm_v2i64_ty, llvm_i8_ty],
                            [IntrArgMemOnly]>;
  def int_x86_avx512_mask_pmov_qb_256 :
      ClangBuiltin<"__builtin_ia32_pmovqb256_mask">,
      DefaultAttrsIntrinsic<[llvm_v16i8_ty],
                            [llvm_v4i64_ty, llvm_v16i8_ty, llvm_i8_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_mask_pmov_qb_mem_256 :
      ClangBuiltin<"__builtin_ia32_pmovqb256mem_mask">,
      DefaultAttrsIntrinsic<[],
                            [llvm_ptr_ty, llvm_v4i64_ty, llvm_i8_ty],
                            [IntrArgMemOnly]>;
  def int_x86_avx512_mask_pmovs_qb_256 :
      ClangBuiltin<"__builtin_ia32_pmovsqb256_mask">,
      DefaultAttrsIntrinsic<[llvm_v16i8_ty],
                            [llvm_v4i64_ty, llvm_v16i8_ty, llvm_i8_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_mask_pmovs_qb_mem_256 :
      ClangBuiltin<"__builtin_ia32_pmovsqb256mem_mask">,
      DefaultAttrsIntrinsic<[],
                            [llvm_ptr_ty, llvm_v4i64_ty, llvm_i8_ty],
                            [IntrArgMemOnly]>;
  def int_x86_avx512_mask_pmovus_qb_256 :
      ClangBuiltin<"__builtin_ia32_pmovusqb256_mask">,
      DefaultAttrsIntrinsic<[llvm_v16i8_ty],
                            [llvm_v4i64_ty, llvm_v16i8_ty, llvm_i8_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_mask_pmovus_qb_mem_256 :
      ClangBuiltin<"__builtin_ia32_pmovusqb256mem_mask">,
      DefaultAttrsIntrinsic<[],
                            [llvm_ptr_ty, llvm_v4i64_ty, llvm_i8_ty],
                            [IntrArgMemOnly]>;
  def int_x86_avx512_mask_pmov_qb_512 :
      ClangBuiltin<"__builtin_ia32_pmovqb512_mask">,
      DefaultAttrsIntrinsic<[llvm_v16i8_ty],
                            [llvm_v8i64_ty, llvm_v16i8_ty, llvm_i8_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_mask_pmov_qb_mem_512 :
      ClangBuiltin<"__builtin_ia32_pmovqb512mem_mask">,
      DefaultAttrsIntrinsic<[],
                            [llvm_ptr_ty, llvm_v8i64_ty, llvm_i8_ty],
                            [IntrArgMemOnly]>;
  def int_x86_avx512_mask_pmovs_qb_512 :
      ClangBuiltin<"__builtin_ia32_pmovsqb512_mask">,
      DefaultAttrsIntrinsic<[llvm_v16i8_ty],
                            [llvm_v8i64_ty, llvm_v16i8_ty, llvm_i8_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_mask_pmovs_qb_mem_512 :
      ClangBuiltin<"__builtin_ia32_pmovsqb512mem_mask">,
      DefaultAttrsIntrinsic<[],
                            [llvm_ptr_ty, llvm_v8i64_ty, llvm_i8_ty],
                            [IntrArgMemOnly]>;
  def int_x86_avx512_mask_pmovus_qb_512 :
      ClangBuiltin<"__builtin_ia32_pmovusqb512_mask">,
      DefaultAttrsIntrinsic<[llvm_v16i8_ty],
                            [llvm_v8i64_ty, llvm_v16i8_ty, llvm_i8_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_mask_pmovus_qb_mem_512 :
      ClangBuiltin<"__builtin_ia32_pmovusqb512mem_mask">,
      DefaultAttrsIntrinsic<[],
                            [llvm_ptr_ty, llvm_v8i64_ty, llvm_i8_ty],
                            [IntrArgMemOnly]>;
  def int_x86_avx512_mask_pmov_qw_128 :
      ClangBuiltin<"__builtin_ia32_pmovqw128_mask">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty],
                            [llvm_v2i64_ty, llvm_v8i16_ty, llvm_i8_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_mask_pmov_qw_mem_128 :
      ClangBuiltin<"__builtin_ia32_pmovqw128mem_mask">,
      DefaultAttrsIntrinsic<[],
                            [llvm_ptr_ty, llvm_v2i64_ty, llvm_i8_ty],
                            [IntrArgMemOnly]>;
  def int_x86_avx512_mask_pmovs_qw_128 :
      ClangBuiltin<"__builtin_ia32_pmovsqw128_mask">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty],
                            [llvm_v2i64_ty, llvm_v8i16_ty, llvm_i8_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_mask_pmovs_qw_mem_128 :
      ClangBuiltin<"__builtin_ia32_pmovsqw128mem_mask">,
      DefaultAttrsIntrinsic<[],
                            [llvm_ptr_ty, llvm_v2i64_ty, llvm_i8_ty],
                            [IntrArgMemOnly]>;
  def int_x86_avx512_mask_pmovus_qw_128 :
      ClangBuiltin<"__builtin_ia32_pmovusqw128_mask">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty],
                            [llvm_v2i64_ty, llvm_v8i16_ty, llvm_i8_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_mask_pmovus_qw_mem_128 :
      ClangBuiltin<"__builtin_ia32_pmovusqw128mem_mask">,
      DefaultAttrsIntrinsic<[],
                            [llvm_ptr_ty, llvm_v2i64_ty, llvm_i8_ty],
                            [IntrArgMemOnly]>;
  def int_x86_avx512_mask_pmov_qw_256 :
      ClangBuiltin<"__builtin_ia32_pmovqw256_mask">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty],
                            [llvm_v4i64_ty, llvm_v8i16_ty, llvm_i8_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_mask_pmov_qw_mem_256 :
      ClangBuiltin<"__builtin_ia32_pmovqw256mem_mask">,
      DefaultAttrsIntrinsic<[],
                            [llvm_ptr_ty, llvm_v4i64_ty, llvm_i8_ty],
                            [IntrArgMemOnly]>;
  def int_x86_avx512_mask_pmovs_qw_256 :
      ClangBuiltin<"__builtin_ia32_pmovsqw256_mask">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty],
                            [llvm_v4i64_ty, llvm_v8i16_ty, llvm_i8_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_mask_pmovs_qw_mem_256 :
      ClangBuiltin<"__builtin_ia32_pmovsqw256mem_mask">,
      DefaultAttrsIntrinsic<[],
                            [llvm_ptr_ty, llvm_v4i64_ty, llvm_i8_ty],
                            [IntrArgMemOnly]>;
  def int_x86_avx512_mask_pmovus_qw_256 :
      ClangBuiltin<"__builtin_ia32_pmovusqw256_mask">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty],
                            [llvm_v4i64_ty, llvm_v8i16_ty, llvm_i8_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_mask_pmovus_qw_mem_256 :
      ClangBuiltin<"__builtin_ia32_pmovusqw256mem_mask">,
      DefaultAttrsIntrinsic<[],
                            [llvm_ptr_ty, llvm_v4i64_ty, llvm_i8_ty],
                            [IntrArgMemOnly]>;
  def int_x86_avx512_mask_pmov_qw_512 :
      DefaultAttrsIntrinsic<[llvm_v8i16_ty],
                            [llvm_v8i64_ty, llvm_v8i16_ty, llvm_i8_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_mask_pmov_qw_mem_512 :
      ClangBuiltin<"__builtin_ia32_pmovqw512mem_mask">,
      DefaultAttrsIntrinsic<[],
                            [llvm_ptr_ty, llvm_v8i64_ty, llvm_i8_ty],
                            [IntrArgMemOnly]>;
  def int_x86_avx512_mask_pmovs_qw_512 :
      ClangBuiltin<"__builtin_ia32_pmovsqw512_mask">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty],
                            [llvm_v8i64_ty, llvm_v8i16_ty, llvm_i8_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_mask_pmovs_qw_mem_512 :
      ClangBuiltin<"__builtin_ia32_pmovsqw512mem_mask">,
      DefaultAttrsIntrinsic<[],
                            [llvm_ptr_ty, llvm_v8i64_ty, llvm_i8_ty],
                            [IntrArgMemOnly]>;
  def int_x86_avx512_mask_pmovus_qw_512 :
      ClangBuiltin<"__builtin_ia32_pmovusqw512_mask">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty],
                            [llvm_v8i64_ty, llvm_v8i16_ty, llvm_i8_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_mask_pmovus_qw_mem_512 :
      ClangBuiltin<"__builtin_ia32_pmovusqw512mem_mask">,
      DefaultAttrsIntrinsic<[],
                            [llvm_ptr_ty, llvm_v8i64_ty, llvm_i8_ty],
                            [IntrArgMemOnly]>;
  def int_x86_avx512_mask_pmov_qd_128 :
      ClangBuiltin<"__builtin_ia32_pmovqd128_mask">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty],
                            [llvm_v2i64_ty, llvm_v4i32_ty, llvm_i8_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_mask_pmov_qd_mem_128 :
      ClangBuiltin<"__builtin_ia32_pmovqd128mem_mask">,
      DefaultAttrsIntrinsic<[],
                            [llvm_ptr_ty, llvm_v2i64_ty, llvm_i8_ty],
                            [IntrArgMemOnly]>;
  def int_x86_avx512_mask_pmovs_qd_128 :
      ClangBuiltin<"__builtin_ia32_pmovsqd128_mask">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty],
                            [llvm_v2i64_ty, llvm_v4i32_ty, llvm_i8_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_mask_pmovs_qd_mem_128 :
      ClangBuiltin<"__builtin_ia32_pmovsqd128mem_mask">,
      DefaultAttrsIntrinsic<[],
                            [llvm_ptr_ty, llvm_v2i64_ty, llvm_i8_ty],
                            [IntrArgMemOnly]>;
  def int_x86_avx512_mask_pmovus_qd_128 :
      ClangBuiltin<"__builtin_ia32_pmovusqd128_mask">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty],
                            [llvm_v2i64_ty, llvm_v4i32_ty, llvm_i8_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_mask_pmovus_qd_mem_128 :
      ClangBuiltin<"__builtin_ia32_pmovusqd128mem_mask">,
      DefaultAttrsIntrinsic<[],
                            [llvm_ptr_ty, llvm_v2i64_ty, llvm_i8_ty],
                            [IntrArgMemOnly]>;
  def int_x86_avx512_mask_pmov_qd_mem_256 :
      ClangBuiltin<"__builtin_ia32_pmovqd256mem_mask">,
      DefaultAttrsIntrinsic<[],
                            [llvm_ptr_ty, llvm_v4i64_ty, llvm_i8_ty],
                            [IntrArgMemOnly]>;
  def int_x86_avx512_mask_pmovs_qd_256 :
      ClangBuiltin<"__builtin_ia32_pmovsqd256_mask">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty],
                            [llvm_v4i64_ty, llvm_v4i32_ty, llvm_i8_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_mask_pmovs_qd_mem_256 :
      ClangBuiltin<"__builtin_ia32_pmovsqd256mem_mask">,
      DefaultAttrsIntrinsic<[],
                            [llvm_ptr_ty, llvm_v4i64_ty, llvm_i8_ty],
                            [IntrArgMemOnly]>;
  def int_x86_avx512_mask_pmovus_qd_256 :
      ClangBuiltin<"__builtin_ia32_pmovusqd256_mask">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty],
                            [llvm_v4i64_ty, llvm_v4i32_ty, llvm_i8_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_mask_pmovus_qd_mem_256 :
      ClangBuiltin<"__builtin_ia32_pmovusqd256mem_mask">,
      DefaultAttrsIntrinsic<[],
                            [llvm_ptr_ty, llvm_v4i64_ty, llvm_i8_ty],
                            [IntrArgMemOnly]>;
  def int_x86_avx512_mask_pmov_qd_mem_512 :
      ClangBuiltin<"__builtin_ia32_pmovqd512mem_mask">,
      DefaultAttrsIntrinsic<[],
                            [llvm_ptr_ty, llvm_v8i64_ty, llvm_i8_ty],
                            [IntrArgMemOnly]>;
  def int_x86_avx512_mask_pmovs_qd_512 :
      ClangBuiltin<"__builtin_ia32_pmovsqd512_mask">,
      DefaultAttrsIntrinsic<[llvm_v8i32_ty],
                            [llvm_v8i64_ty, llvm_v8i32_ty, llvm_i8_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_mask_pmovs_qd_mem_512 :
      ClangBuiltin<"__builtin_ia32_pmovsqd512mem_mask">,
      DefaultAttrsIntrinsic<[],
                            [llvm_ptr_ty, llvm_v8i64_ty, llvm_i8_ty],
                            [IntrArgMemOnly]>;
  def int_x86_avx512_mask_pmovus_qd_512 :
      ClangBuiltin<"__builtin_ia32_pmovusqd512_mask">,
      DefaultAttrsIntrinsic<[llvm_v8i32_ty],
                            [llvm_v8i64_ty, llvm_v8i32_ty, llvm_i8_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_mask_pmovus_qd_mem_512 :
      ClangBuiltin<"__builtin_ia32_pmovusqd512mem_mask">,
      DefaultAttrsIntrinsic<[],
                            [llvm_ptr_ty, llvm_v8i64_ty, llvm_i8_ty],
                            [IntrArgMemOnly]>;
  def int_x86_avx512_mask_pmov_db_128 :
      ClangBuiltin<"__builtin_ia32_pmovdb128_mask">,
      DefaultAttrsIntrinsic<[llvm_v16i8_ty],
                            [llvm_v4i32_ty, llvm_v16i8_ty, llvm_i8_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_mask_pmov_db_mem_128 :
      ClangBuiltin<"__builtin_ia32_pmovdb128mem_mask">,
      DefaultAttrsIntrinsic<[],
                            [llvm_ptr_ty, llvm_v4i32_ty, llvm_i8_ty],
                            [IntrArgMemOnly]>;
  def int_x86_avx512_mask_pmovs_db_128 :
      ClangBuiltin<"__builtin_ia32_pmovsdb128_mask">,
      DefaultAttrsIntrinsic<[llvm_v16i8_ty],
                            [llvm_v4i32_ty, llvm_v16i8_ty, llvm_i8_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_mask_pmovs_db_mem_128 :
      ClangBuiltin<"__builtin_ia32_pmovsdb128mem_mask">,
      DefaultAttrsIntrinsic<[],
                            [llvm_ptr_ty, llvm_v4i32_ty, llvm_i8_ty],
                            [IntrArgMemOnly]>;
  def int_x86_avx512_mask_pmovus_db_128 :
      ClangBuiltin<"__builtin_ia32_pmovusdb128_mask">,
      DefaultAttrsIntrinsic<[llvm_v16i8_ty],
                            [llvm_v4i32_ty, llvm_v16i8_ty, llvm_i8_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_mask_pmovus_db_mem_128 :
      ClangBuiltin<"__builtin_ia32_pmovusdb128mem_mask">,
      DefaultAttrsIntrinsic<[],
                            [llvm_ptr_ty, llvm_v4i32_ty, llvm_i8_ty],
                            [IntrArgMemOnly]>;
  def int_x86_avx512_mask_pmov_db_256 :
      ClangBuiltin<"__builtin_ia32_pmovdb256_mask">,
      DefaultAttrsIntrinsic<[llvm_v16i8_ty],
                            [llvm_v8i32_ty, llvm_v16i8_ty, llvm_i8_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_mask_pmov_db_mem_256 :
      ClangBuiltin<"__builtin_ia32_pmovdb256mem_mask">,
      DefaultAttrsIntrinsic<[],
                            [llvm_ptr_ty, llvm_v8i32_ty, llvm_i8_ty],
                            [IntrArgMemOnly]>;
  def int_x86_avx512_mask_pmovs_db_256 :
      ClangBuiltin<"__builtin_ia32_pmovsdb256_mask">,
      DefaultAttrsIntrinsic<[llvm_v16i8_ty],
                            [llvm_v8i32_ty, llvm_v16i8_ty, llvm_i8_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_mask_pmovs_db_mem_256 :
      ClangBuiltin<"__builtin_ia32_pmovsdb256mem_mask">,
      DefaultAttrsIntrinsic<[],
                            [llvm_ptr_ty, llvm_v8i32_ty, llvm_i8_ty],
                            [IntrArgMemOnly]>;
  def int_x86_avx512_mask_pmovus_db_256 :
      ClangBuiltin<"__builtin_ia32_pmovusdb256_mask">,
      DefaultAttrsIntrinsic<[llvm_v16i8_ty],
                            [llvm_v8i32_ty, llvm_v16i8_ty, llvm_i8_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_mask_pmovus_db_mem_256 :
      ClangBuiltin<"__builtin_ia32_pmovusdb256mem_mask">,
      DefaultAttrsIntrinsic<[],
                            [llvm_ptr_ty, llvm_v8i32_ty, llvm_i8_ty],
                            [IntrArgMemOnly]>;
  def int_x86_avx512_mask_pmov_db_512 :
      DefaultAttrsIntrinsic<[llvm_v16i8_ty],
                            [llvm_v16i32_ty, llvm_v16i8_ty, llvm_i16_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_mask_pmov_db_mem_512 :
      ClangBuiltin<"__builtin_ia32_pmovdb512mem_mask">,
      DefaultAttrsIntrinsic<[],
                            [llvm_ptr_ty, llvm_v16i32_ty, llvm_i16_ty],
                            [IntrArgMemOnly]>;
  def int_x86_avx512_mask_pmovs_db_512 :
      ClangBuiltin<"__builtin_ia32_pmovsdb512_mask">,
      DefaultAttrsIntrinsic<[llvm_v16i8_ty],
                            [llvm_v16i32_ty, llvm_v16i8_ty, llvm_i16_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_mask_pmovs_db_mem_512 :
      ClangBuiltin<"__builtin_ia32_pmovsdb512mem_mask">,
      DefaultAttrsIntrinsic<[],
                            [llvm_ptr_ty, llvm_v16i32_ty, llvm_i16_ty],
                            [IntrArgMemOnly]>;
  def int_x86_avx512_mask_pmovus_db_512 :
      ClangBuiltin<"__builtin_ia32_pmovusdb512_mask">,
      DefaultAttrsIntrinsic<[llvm_v16i8_ty],
                            [llvm_v16i32_ty, llvm_v16i8_ty, llvm_i16_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_mask_pmovus_db_mem_512 :
      ClangBuiltin<"__builtin_ia32_pmovusdb512mem_mask">,
      DefaultAttrsIntrinsic<[],
                            [llvm_ptr_ty, llvm_v16i32_ty, llvm_i16_ty],
                            [IntrArgMemOnly]>;
  def int_x86_avx512_mask_pmov_dw_128 :
      ClangBuiltin<"__builtin_ia32_pmovdw128_mask">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty],
                            [llvm_v4i32_ty, llvm_v8i16_ty, llvm_i8_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_mask_pmov_dw_mem_128 :
      ClangBuiltin<"__builtin_ia32_pmovdw128mem_mask">,
      DefaultAttrsIntrinsic<[],
                            [llvm_ptr_ty, llvm_v4i32_ty, llvm_i8_ty],
                            [IntrArgMemOnly]>;
  def int_x86_avx512_mask_pmovs_dw_128 :
      ClangBuiltin<"__builtin_ia32_pmovsdw128_mask">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty],
                            [llvm_v4i32_ty, llvm_v8i16_ty, llvm_i8_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_mask_pmovs_dw_mem_128 :
      ClangBuiltin<"__builtin_ia32_pmovsdw128mem_mask">,
      DefaultAttrsIntrinsic<[],
                            [llvm_ptr_ty, llvm_v4i32_ty, llvm_i8_ty],
                            [IntrArgMemOnly]>;
  def int_x86_avx512_mask_pmovus_dw_128 :
      ClangBuiltin<"__builtin_ia32_pmovusdw128_mask">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty],
                            [llvm_v4i32_ty, llvm_v8i16_ty, llvm_i8_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_mask_pmovus_dw_mem_128 :
      ClangBuiltin<"__builtin_ia32_pmovusdw128mem_mask">,
      DefaultAttrsIntrinsic<[],
                            [llvm_ptr_ty, llvm_v4i32_ty, llvm_i8_ty],
                            [IntrArgMemOnly]>;
  def int_x86_avx512_mask_pmov_dw_256 :
      ClangBuiltin<"__builtin_ia32_pmovdw256_mask">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty],
                            [llvm_v8i32_ty, llvm_v8i16_ty, llvm_i8_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_mask_pmov_dw_mem_256 :
      ClangBuiltin<"__builtin_ia32_pmovdw256mem_mask">,
      DefaultAttrsIntrinsic<[],
                            [llvm_ptr_ty, llvm_v8i32_ty, llvm_i8_ty],
                            [IntrArgMemOnly]>;
  def int_x86_avx512_mask_pmovs_dw_256 :
      ClangBuiltin<"__builtin_ia32_pmovsdw256_mask">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty],
                            [llvm_v8i32_ty, llvm_v8i16_ty, llvm_i8_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_mask_pmovs_dw_mem_256 :
      ClangBuiltin<"__builtin_ia32_pmovsdw256mem_mask">,
      DefaultAttrsIntrinsic<[],
                            [llvm_ptr_ty, llvm_v8i32_ty, llvm_i8_ty],
                            [IntrArgMemOnly]>;
  def int_x86_avx512_mask_pmovus_dw_256 :
      ClangBuiltin<"__builtin_ia32_pmovusdw256_mask">,
      DefaultAttrsIntrinsic<[llvm_v8i16_ty],
                            [llvm_v8i32_ty, llvm_v8i16_ty, llvm_i8_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_mask_pmovus_dw_mem_256 :
      ClangBuiltin<"__builtin_ia32_pmovusdw256mem_mask">,
      DefaultAttrsIntrinsic<[],
                            [llvm_ptr_ty, llvm_v8i32_ty, llvm_i8_ty],
                            [IntrArgMemOnly]>;
  def int_x86_avx512_mask_pmov_dw_512 :
      DefaultAttrsIntrinsic<[llvm_v16i16_ty],
                            [llvm_v16i32_ty, llvm_v16i16_ty, llvm_i16_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_mask_pmov_dw_mem_512 :
      ClangBuiltin<"__builtin_ia32_pmovdw512mem_mask">,
      DefaultAttrsIntrinsic<[],
                            [llvm_ptr_ty, llvm_v16i32_ty, llvm_i16_ty],
                            [IntrArgMemOnly]>;
  def int_x86_avx512_mask_pmovs_dw_512 :
      ClangBuiltin<"__builtin_ia32_pmovsdw512_mask">,
      DefaultAttrsIntrinsic<[llvm_v16i16_ty],
                            [llvm_v16i32_ty, llvm_v16i16_ty, llvm_i16_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_mask_pmovs_dw_mem_512 :
      ClangBuiltin<"__builtin_ia32_pmovsdw512mem_mask">,
      DefaultAttrsIntrinsic<[],
                            [llvm_ptr_ty, llvm_v16i32_ty, llvm_i16_ty],
                            [IntrArgMemOnly]>;
  def int_x86_avx512_mask_pmovus_dw_512 :
      ClangBuiltin<"__builtin_ia32_pmovusdw512_mask">,
      DefaultAttrsIntrinsic<[llvm_v16i16_ty],
                            [llvm_v16i32_ty, llvm_v16i16_ty, llvm_i16_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_mask_pmovus_dw_mem_512 :
      ClangBuiltin<"__builtin_ia32_pmovusdw512mem_mask">,
      DefaultAttrsIntrinsic<[],
                            [llvm_ptr_ty, llvm_v16i32_ty, llvm_i16_ty],
                            [IntrArgMemOnly]>;
  def int_x86_avx512_mask_pmov_wb_128 :
      ClangBuiltin<"__builtin_ia32_pmovwb128_mask">,
      DefaultAttrsIntrinsic<[llvm_v16i8_ty],
                            [llvm_v8i16_ty, llvm_v16i8_ty, llvm_i8_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_mask_pmov_wb_mem_128 :
      ClangBuiltin<"__builtin_ia32_pmovwb128mem_mask">,
      DefaultAttrsIntrinsic<[],
                            [llvm_ptr_ty, llvm_v8i16_ty, llvm_i8_ty],
                            [IntrArgMemOnly]>;
  def int_x86_avx512_mask_pmovs_wb_128 :
      ClangBuiltin<"__builtin_ia32_pmovswb128_mask">,
      DefaultAttrsIntrinsic<[llvm_v16i8_ty],
                            [llvm_v8i16_ty, llvm_v16i8_ty, llvm_i8_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_mask_pmovs_wb_mem_128 :
      ClangBuiltin<"__builtin_ia32_pmovswb128mem_mask">,
      DefaultAttrsIntrinsic<[],
                            [llvm_ptr_ty, llvm_v8i16_ty, llvm_i8_ty],
                            [IntrArgMemOnly]>;
  def int_x86_avx512_mask_pmovus_wb_128 :
      ClangBuiltin<"__builtin_ia32_pmovuswb128_mask">,
      DefaultAttrsIntrinsic<[llvm_v16i8_ty],
                            [llvm_v8i16_ty, llvm_v16i8_ty, llvm_i8_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_mask_pmovus_wb_mem_128 :
      ClangBuiltin<"__builtin_ia32_pmovuswb128mem_mask">,
      DefaultAttrsIntrinsic<[],
                            [llvm_ptr_ty, llvm_v8i16_ty, llvm_i8_ty],
                            [IntrArgMemOnly]>;
  def int_x86_avx512_mask_pmov_wb_mem_256 :
      ClangBuiltin<"__builtin_ia32_pmovwb256mem_mask">,
      DefaultAttrsIntrinsic<[],
                            [llvm_ptr_ty, llvm_v16i16_ty, llvm_i16_ty],
                            [IntrArgMemOnly]>;
  def int_x86_avx512_mask_pmovs_wb_256 :
      ClangBuiltin<"__builtin_ia32_pmovswb256_mask">,
      DefaultAttrsIntrinsic<[llvm_v16i8_ty],
                            [llvm_v16i16_ty, llvm_v16i8_ty, llvm_i16_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_mask_pmovs_wb_mem_256 :
      ClangBuiltin<"__builtin_ia32_pmovswb256mem_mask">,
      DefaultAttrsIntrinsic<[],
                          [llvm_ptr_ty, llvm_v16i16_ty, llvm_i16_ty],
                          [IntrArgMemOnly]>;
  def int_x86_avx512_mask_pmovus_wb_256 :
      ClangBuiltin<"__builtin_ia32_pmovuswb256_mask">,
      DefaultAttrsIntrinsic<[llvm_v16i8_ty],
                            [llvm_v16i16_ty, llvm_v16i8_ty, llvm_i16_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_mask_pmovus_wb_mem_256 :
      ClangBuiltin<"__builtin_ia32_pmovuswb256mem_mask">,
      DefaultAttrsIntrinsic<[],
                          [llvm_ptr_ty, llvm_v16i16_ty, llvm_i16_ty],
                          [IntrArgMemOnly]>;
  def int_x86_avx512_mask_pmov_wb_mem_512 :
      ClangBuiltin<"__builtin_ia32_pmovwb512mem_mask">,
      DefaultAttrsIntrinsic<[],
                            [llvm_ptr_ty, llvm_v32i16_ty, llvm_i32_ty],
                            [IntrArgMemOnly]>;
  def int_x86_avx512_mask_pmovs_wb_512 :
      ClangBuiltin<"__builtin_ia32_pmovswb512_mask">,
      DefaultAttrsIntrinsic<[llvm_v32i8_ty],
                            [llvm_v32i16_ty, llvm_v32i8_ty, llvm_i32_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_mask_pmovs_wb_mem_512 :
      ClangBuiltin<"__builtin_ia32_pmovswb512mem_mask">,
      DefaultAttrsIntrinsic<[],
                            [llvm_ptr_ty, llvm_v32i16_ty, llvm_i32_ty],
                            [IntrArgMemOnly]>;
  def int_x86_avx512_mask_pmovus_wb_512 :
      ClangBuiltin<"__builtin_ia32_pmovuswb512_mask">,
      DefaultAttrsIntrinsic<[llvm_v32i8_ty],
                            [llvm_v32i16_ty, llvm_v32i8_ty, llvm_i32_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_mask_pmovus_wb_mem_512 :
      ClangBuiltin<"__builtin_ia32_pmovuswb512mem_mask">,
      DefaultAttrsIntrinsic<[],
                            [llvm_ptr_ty, llvm_v32i16_ty, llvm_i32_ty],
                            [IntrArgMemOnly]>;
}

// Bitwise ternary logic
let TargetPrefix = "x86" in {
  def int_x86_avx512_pternlog_d_128 :
      ClangBuiltin<"__builtin_ia32_pternlogd128">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty],
                            [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty,
                             llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<3>>]>;

  def int_x86_avx512_pternlog_d_256 :
      ClangBuiltin<"__builtin_ia32_pternlogd256">,
      DefaultAttrsIntrinsic<[llvm_v8i32_ty],
                            [llvm_v8i32_ty, llvm_v8i32_ty, llvm_v8i32_ty,
                             llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<3>>]>;

  def int_x86_avx512_pternlog_d_512 :
      ClangBuiltin<"__builtin_ia32_pternlogd512">,
      DefaultAttrsIntrinsic<[llvm_v16i32_ty],
                            [llvm_v16i32_ty, llvm_v16i32_ty, llvm_v16i32_ty,
                             llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<3>>]>;

  def int_x86_avx512_pternlog_q_128 :
      ClangBuiltin<"__builtin_ia32_pternlogq128">,
      DefaultAttrsIntrinsic<[llvm_v2i64_ty],
                            [llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty,
                             llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<3>>]>;

  def int_x86_avx512_pternlog_q_256 :
      ClangBuiltin<"__builtin_ia32_pternlogq256">,
      DefaultAttrsIntrinsic<[llvm_v4i64_ty],
                            [llvm_v4i64_ty, llvm_v4i64_ty, llvm_v4i64_ty,
                             llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<3>>]>;

  def int_x86_avx512_pternlog_q_512 :
      ClangBuiltin<"__builtin_ia32_pternlogq512">,
      DefaultAttrsIntrinsic<[llvm_v8i64_ty],
                            [llvm_v8i64_ty, llvm_v8i64_ty, llvm_v8i64_ty,
                             llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<3>>]>;
}

// vp2intersect
let TargetPrefix = "x86" in {
  def int_x86_avx512_vp2intersect_q_512 :
      DefaultAttrsIntrinsic<[llvm_v8i1_ty, llvm_v8i1_ty],
                            [llvm_v8i64_ty, llvm_v8i64_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_vp2intersect_q_256 :
      DefaultAttrsIntrinsic<[llvm_v4i1_ty, llvm_v4i1_ty],
                            [llvm_v4i64_ty, llvm_v4i64_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_vp2intersect_q_128 :
      DefaultAttrsIntrinsic<[llvm_v2i1_ty, llvm_v2i1_ty],
                            [llvm_v2i64_ty, llvm_v2i64_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_vp2intersect_d_512 :
      DefaultAttrsIntrinsic<[llvm_v16i1_ty, llvm_v16i1_ty],
                            [llvm_v16i32_ty, llvm_v16i32_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_vp2intersect_d_256 :
      DefaultAttrsIntrinsic<[llvm_v8i1_ty, llvm_v8i1_ty],
                            [llvm_v8i32_ty, llvm_v8i32_ty],
                            [IntrNoMem]>;
  def int_x86_avx512_vp2intersect_d_128 :
      DefaultAttrsIntrinsic<[llvm_v4i1_ty, llvm_v4i1_ty],
                            [llvm_v4i32_ty, llvm_v4i32_ty],
                            [IntrNoMem]>;
}

// Misc.
let TargetPrefix = "x86" in {
  // NOTE: These comparison intrinsics are not used by clang as long as the
  //       distinction in signaling behaviour is not implemented.
  def int_x86_avx512_mask_cmp_ps_512 :
      DefaultAttrsIntrinsic<[llvm_v16i1_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
                             llvm_i32_ty, llvm_v16i1_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<2>>,
                             ImmArg<ArgIndex<4>>]>;
  def int_x86_avx512_mask_cmp_pd_512 :
      DefaultAttrsIntrinsic<[llvm_v8i1_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
                             llvm_i32_ty, llvm_v8i1_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<2>>,
                             ImmArg<ArgIndex<4>>]>;
  def int_x86_avx512_mask_cmp_ps_256 :
      DefaultAttrsIntrinsic<[llvm_v8i1_ty], [llvm_v8f32_ty, llvm_v8f32_ty,
                             llvm_i32_ty, llvm_v8i1_ty],
                            [IntrNoMem, ImmArg<ArgIndex<2>>]>;
  def int_x86_avx512_mask_cmp_pd_256 :
      DefaultAttrsIntrinsic<[llvm_v4i1_ty], [llvm_v4f64_ty, llvm_v4f64_ty,
                             llvm_i32_ty, llvm_v4i1_ty],
                            [IntrNoMem, ImmArg<ArgIndex<2>>]>;
  def int_x86_avx512_mask_cmp_ps_128 :
      DefaultAttrsIntrinsic<[llvm_v4i1_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
                             llvm_i32_ty, llvm_v4i1_ty],
                            [IntrNoMem, ImmArg<ArgIndex<2>>]>;
  def int_x86_avx512_mask_cmp_pd_128 :
      DefaultAttrsIntrinsic<[llvm_v2i1_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
                             llvm_i32_ty, llvm_v2i1_ty],
                            [IntrNoMem, ImmArg<ArgIndex<2>>]>;

  def int_x86_avx512_mask_cmp_ss :
      ClangBuiltin<"__builtin_ia32_cmpss_mask">,
      DefaultAttrsIntrinsic<[llvm_i8_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
                             llvm_i32_ty, llvm_i8_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<2>>,
                             ImmArg<ArgIndex<4>>]>;
  def int_x86_avx512_mask_cmp_sd :
      ClangBuiltin<"__builtin_ia32_cmpsd_mask">,
      DefaultAttrsIntrinsic<[llvm_i8_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
                             llvm_i32_ty, llvm_i8_ty, llvm_i32_ty],
                            [IntrNoMem, ImmArg<ArgIndex<2>>,
                             ImmArg<ArgIndex<4>>]>;
}

//===----------------------------------------------------------------------===//
// SHA intrinsics
let TargetPrefix = "x86" in {
  def int_x86_sha1rnds4 : ClangBuiltin<"__builtin_ia32_sha1rnds4">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
                             llvm_i8_ty], [IntrNoMem, ImmArg<ArgIndex<2>>]>;
  def int_x86_sha1nexte : ClangBuiltin<"__builtin_ia32_sha1nexte">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
                            [IntrNoMem]>;
  def int_x86_sha1msg1 : ClangBuiltin<"__builtin_ia32_sha1msg1">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
                            [IntrNoMem]>;
  def int_x86_sha1msg2 : ClangBuiltin<"__builtin_ia32_sha1msg2">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
                            [IntrNoMem]>;
  def int_x86_sha256rnds2 : ClangBuiltin<"__builtin_ia32_sha256rnds2">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
                             llvm_v4i32_ty], [IntrNoMem]>;
  def int_x86_sha256msg1 : ClangBuiltin<"__builtin_ia32_sha256msg1">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
                            [IntrNoMem]>;
  def int_x86_sha256msg2 : ClangBuiltin<"__builtin_ia32_sha256msg2">,
      DefaultAttrsIntrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
                            [IntrNoMem]>;
}

//===----------------------------------------------------------------------===//
// SHA512 intrinsics
let TargetPrefix = "x86" in {
def int_x86_vsha512msg1 : ClangBuiltin<"__builtin_ia32_vsha512msg1">,
    DefaultAttrsIntrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v2i64_ty],
                          [IntrNoMem]>;
def int_x86_vsha512msg2 : ClangBuiltin<"__builtin_ia32_vsha512msg2">,
    DefaultAttrsIntrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty],
                          [IntrNoMem]>;
def int_x86_vsha512rnds2 : ClangBuiltin<"__builtin_ia32_vsha512rnds2">,
    DefaultAttrsIntrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty, llvm_v2i64_ty],
                          [IntrNoMem]>;
}

//===----------------------------------------------------------------------===//
// Thread synchronization ops with timer.
let TargetPrefix = "x86" in {
  def int_x86_monitorx
      : ClangBuiltin<"__builtin_ia32_monitorx">,
        Intrinsic<[], [ llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty ], []>;
  def int_x86_mwaitx
      : ClangBuiltin<"__builtin_ia32_mwaitx">,
        Intrinsic<[], [ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty ], []>;
}

//===----------------------------------------------------------------------===//
// Cache-line zero
let TargetPrefix = "x86" in {
  def int_x86_clzero : ClangBuiltin<"__builtin_ia32_clzero">,
      Intrinsic<[], [llvm_ptr_ty], []>;
}

//===----------------------------------------------------------------------===//
// Cache write back intrinsics

let TargetPrefix = "x86" in {
  // Write back and invalidate
  def int_x86_wbinvd : ClangBuiltin<"__builtin_ia32_wbinvd">,
      Intrinsic<[], [], []>;

  // Write back no-invalidate
  def int_x86_wbnoinvd : ClangBuiltin<"__builtin_ia32_wbnoinvd">,
      Intrinsic<[], [], []>;
}

//===----------------------------------------------------------------------===//
// Cache-line demote

let TargetPrefix = "x86" in {
  def int_x86_cldemote : ClangBuiltin<"__builtin_ia32_cldemote">,
      Intrinsic<[], [llvm_ptr_ty], []>;
}

//===----------------------------------------------------------------------===//
// Wait and pause enhancements
let TargetPrefix = "x86" in {
  def int_x86_umonitor : ClangBuiltin<"__builtin_ia32_umonitor">,
              Intrinsic<[], [llvm_ptr_ty], []>;
  def int_x86_umwait : ClangBuiltin<"__builtin_ia32_umwait">,
              Intrinsic<[llvm_i8_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>;
  def int_x86_tpause : ClangBuiltin<"__builtin_ia32_tpause">,
              Intrinsic<[llvm_i8_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>;
}

//===----------------------------------------------------------------------===//
// Direct Move Instructions

let TargetPrefix = "x86" in {
  def int_x86_directstore32 : ClangBuiltin<"__builtin_ia32_directstore_u32">,
      Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty], []>;
  def int_x86_directstore64 : ClangBuiltin<"__builtin_ia32_directstore_u64">,
      Intrinsic<[], [llvm_ptr_ty, llvm_i64_ty], []>;
  def int_x86_movdir64b : ClangBuiltin<"__builtin_ia32_movdir64b">,
      Intrinsic<[], [llvm_ptr_ty, llvm_ptr_ty], []>;
}

//===----------------------------------------------------------------------===//
// PTWrite - Write data to processor trace pocket

let TargetPrefix = "x86" in {
  def int_x86_ptwrite32 : ClangBuiltin<"__builtin_ia32_ptwrite32">,
              Intrinsic<[], [llvm_i32_ty], []>;
  def int_x86_ptwrite64 : ClangBuiltin<"__builtin_ia32_ptwrite64">,
              Intrinsic<[], [llvm_i64_ty], []>;
}

//===----------------------------------------------------------------------===//
// INVPCID - Invalidate Process-Context Identifier

let TargetPrefix = "x86" in {
  def int_x86_invpcid : ClangBuiltin<"__builtin_ia32_invpcid">,
              Intrinsic<[], [llvm_i32_ty, llvm_ptr_ty], []>;
}

let TargetPrefix = "x86" in {
  def int_x86_avx512bf16_cvtne2ps2bf16_128:
      ClangBuiltin<"__builtin_ia32_cvtne2ps2bf16_128">,
      DefaultAttrsIntrinsic<[llvm_v8bf16_ty], [llvm_v4f32_ty, llvm_v4f32_ty],
                            [IntrNoMem]>;
  def int_x86_avx512bf16_cvtne2ps2bf16_256:
      ClangBuiltin<"__builtin_ia32_cvtne2ps2bf16_256">,
      DefaultAttrsIntrinsic<[llvm_v16bf16_ty], [llvm_v8f32_ty, llvm_v8f32_ty],
                            [IntrNoMem]>;
  def int_x86_avx512bf16_cvtne2ps2bf16_512:
      ClangBuiltin<"__builtin_ia32_cvtne2ps2bf16_512">,
      DefaultAttrsIntrinsic<[llvm_v32bf16_ty], [llvm_v16f32_ty, llvm_v16f32_ty],
                            [IntrNoMem]>;
  // Intrinsic must be masked due to it producing less than 128 bits of results.
  def int_x86_avx512bf16_mask_cvtneps2bf16_128:
      DefaultAttrsIntrinsic<[llvm_v8bf16_ty],
                            [llvm_v4f32_ty, llvm_v8bf16_ty, llvm_v4i1_ty],
                            [IntrNoMem]>;
  def int_x86_avx512bf16_cvtneps2bf16_256:
      ClangBuiltin<"__builtin_ia32_cvtneps2bf16_256">,
      DefaultAttrsIntrinsic<[llvm_v8bf16_ty], [llvm_v8f32_ty], [IntrNoMem]>;
  def int_x86_avx512bf16_cvtneps2bf16_512:
      ClangBuiltin<"__builtin_ia32_cvtneps2bf16_512">,
      DefaultAttrsIntrinsic<[llvm_v16bf16_ty], [llvm_v16f32_ty], [IntrNoMem]>;
  def int_x86_avx512bf16_dpbf16ps_128:
      ClangBuiltin<"__builtin_ia32_dpbf16ps_128">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty],
                            [llvm_v4f32_ty, llvm_v8bf16_ty, llvm_v8bf16_ty],
                            [IntrNoMem]>;
  def int_x86_avx512bf16_dpbf16ps_256:
      ClangBuiltin<"__builtin_ia32_dpbf16ps_256">,
      DefaultAttrsIntrinsic<[llvm_v8f32_ty],
                            [llvm_v8f32_ty, llvm_v16bf16_ty, llvm_v16bf16_ty],
                            [IntrNoMem]>;
  def int_x86_avx512bf16_dpbf16ps_512:
      ClangBuiltin<"__builtin_ia32_dpbf16ps_512">,
      DefaultAttrsIntrinsic<[llvm_v16f32_ty],
                            [llvm_v16f32_ty, llvm_v32bf16_ty, llvm_v32bf16_ty],
                            [IntrNoMem]>;
}

//===----------------------------------------------------------------------===//
// ENQCMD - Enqueue Stores Instructions

let TargetPrefix = "x86" in {
  def int_x86_enqcmd : ClangBuiltin<"__builtin_ia32_enqcmd">,
              Intrinsic<[llvm_i8_ty], [llvm_ptr_ty, llvm_ptr_ty], []>;
  def int_x86_enqcmds : ClangBuiltin<"__builtin_ia32_enqcmds">,
              Intrinsic<[llvm_i8_ty], [llvm_ptr_ty, llvm_ptr_ty], []>;
}

//===----------------------------------------------------------------------===//
// SERIALIZE - Serialize instruction fetch and execution

let TargetPrefix = "x86" in {
  def int_x86_serialize : ClangBuiltin<"__builtin_ia32_serialize">,
              Intrinsic<[], [], []>;
}

//===----------------------------------------------------------------------===//
// TSXLDTRK - TSX Suspend Load Address Tracking

let TargetPrefix = "x86" in {
  def int_x86_xsusldtrk : ClangBuiltin<"__builtin_ia32_xsusldtrk">,
              Intrinsic<[], [], []>;
  def int_x86_xresldtrk : ClangBuiltin<"__builtin_ia32_xresldtrk">,
              Intrinsic<[], [], []>;
}

//===----------------------------------------------------------------------===//
// Key Locker
let TargetPrefix = "x86" in {
  def int_x86_loadiwkey : ClangBuiltin<"__builtin_ia32_loadiwkey">,
      Intrinsic<[], [llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty, llvm_i32_ty],
                []>;
  def int_x86_encodekey128 :
      Intrinsic<[llvm_i32_ty, llvm_v2i64_ty, llvm_v2i64_ty,
                 llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty],
                [llvm_i32_ty, llvm_v2i64_ty], []>;
  def int_x86_encodekey256 :
      Intrinsic<[llvm_i32_ty, llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty,
                 llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty],
                [llvm_i32_ty, llvm_v2i64_ty, llvm_v2i64_ty], []>;
  def int_x86_aesenc128kl :
      Intrinsic<[llvm_i8_ty, llvm_v2i64_ty], [llvm_v2i64_ty, llvm_ptr_ty], []>;
  def int_x86_aesdec128kl :
      Intrinsic<[llvm_i8_ty, llvm_v2i64_ty], [llvm_v2i64_ty, llvm_ptr_ty], []>;
  def int_x86_aesenc256kl :
      Intrinsic<[llvm_i8_ty, llvm_v2i64_ty], [llvm_v2i64_ty, llvm_ptr_ty], []>;
  def int_x86_aesdec256kl :
      Intrinsic<[llvm_i8_ty, llvm_v2i64_ty], [llvm_v2i64_ty, llvm_ptr_ty], []>;
  def int_x86_aesencwide128kl :
      Intrinsic<[llvm_i8_ty, llvm_v2i64_ty, llvm_v2i64_ty,
                 llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty,
                 llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty],
                [llvm_ptr_ty, llvm_v2i64_ty, llvm_v2i64_ty,
                 llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty,
                 llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty], []>;
  def int_x86_aesdecwide128kl :
      Intrinsic<[llvm_i8_ty, llvm_v2i64_ty, llvm_v2i64_ty,
                 llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty,
                 llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty],
                [llvm_ptr_ty, llvm_v2i64_ty, llvm_v2i64_ty,
                 llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty,
                 llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty], []>;
  def int_x86_aesencwide256kl :
      Intrinsic<[llvm_i8_ty, llvm_v2i64_ty, llvm_v2i64_ty,
                 llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty,
                 llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty],
                [llvm_ptr_ty, llvm_v2i64_ty, llvm_v2i64_ty,
                 llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty,
                 llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty], []>;
  def int_x86_aesdecwide256kl :
      Intrinsic<[llvm_i8_ty, llvm_v2i64_ty, llvm_v2i64_ty,
                 llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty,
                 llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty],
                [llvm_ptr_ty, llvm_v2i64_ty, llvm_v2i64_ty,
                 llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty,
                 llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty], []>;
}

//===----------------------------------------------------------------------===//
// AMX - Intel AMX extensions

let TargetPrefix = "x86" in {
  def int_x86_ldtilecfg : ClangBuiltin<"__builtin_ia32_tile_loadconfig">,
              Intrinsic<[], [llvm_ptr_ty], []>;
  def int_x86_sttilecfg : ClangBuiltin<"__builtin_ia32_tile_storeconfig">,
              Intrinsic<[], [llvm_ptr_ty], []>;
  def int_x86_tilerelease : ClangBuiltin<"__builtin_ia32_tilerelease">,
              Intrinsic<[], [], []>;
  def int_x86_tilezero : ClangBuiltin<"__builtin_ia32_tilezero">,
              Intrinsic<[], [llvm_i8_ty], [ImmArg<ArgIndex<0>>]>;
  def int_x86_tileloadd64 : ClangBuiltin<"__builtin_ia32_tileloadd64">,
              Intrinsic<[], [llvm_i8_ty, llvm_ptr_ty, llvm_i64_ty],
                        [ImmArg<ArgIndex<0>>]>;
  def int_x86_tileloaddt164 : ClangBuiltin<"__builtin_ia32_tileloaddt164">,
              Intrinsic<[], [llvm_i8_ty, llvm_ptr_ty, llvm_i64_ty],
                        [ImmArg<ArgIndex<0>>]>;
  def int_x86_tilestored64 : ClangBuiltin<"__builtin_ia32_tilestored64">,
              Intrinsic<[], [llvm_i8_ty, llvm_ptr_ty, llvm_i64_ty],
                        [ImmArg<ArgIndex<0>>]>;
  def int_x86_tdpbssd : ClangBuiltin<"__builtin_ia32_tdpbssd">,
              Intrinsic<[], [llvm_i8_ty, llvm_i8_ty, llvm_i8_ty],
                        [ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>,
                         ImmArg<ArgIndex<2>>]>;
  def int_x86_tdpbsud : ClangBuiltin<"__builtin_ia32_tdpbsud">,
              Intrinsic<[], [llvm_i8_ty, llvm_i8_ty, llvm_i8_ty],
                        [ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>,
                         ImmArg<ArgIndex<2>>]>;
  def int_x86_tdpbusd : ClangBuiltin<"__builtin_ia32_tdpbusd">,
              Intrinsic<[], [llvm_i8_ty, llvm_i8_ty, llvm_i8_ty],
                        [ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>,
                         ImmArg<ArgIndex<2>>]>;
  def int_x86_tdpbuud : ClangBuiltin<"__builtin_ia32_tdpbuud">,
              Intrinsic<[], [llvm_i8_ty, llvm_i8_ty, llvm_i8_ty],
                        [ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>,
                         ImmArg<ArgIndex<2>>]>;
  def int_x86_tdpbf16ps : ClangBuiltin<"__builtin_ia32_tdpbf16ps">,
              Intrinsic<[], [llvm_i8_ty, llvm_i8_ty, llvm_i8_ty],
                        [ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>,
                         ImmArg<ArgIndex<2>>]>;
  // AMX-FP16 - Intel FP16 AMX extensions
  def int_x86_tdpfp16ps : ClangBuiltin<"__builtin_ia32_tdpfp16ps">,
              Intrinsic<[], [llvm_i8_ty, llvm_i8_ty, llvm_i8_ty],
                        [ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>,
                         ImmArg<ArgIndex<2>>]>;
  // AMX-COMPLEX
  def int_x86_tcmmimfp16ps : ClangBuiltin<"__builtin_ia32_tcmmimfp16ps">,
              Intrinsic<[], [llvm_i8_ty, llvm_i8_ty, llvm_i8_ty],
                        [ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>,
                         ImmArg<ArgIndex<2>>]>;
  def int_x86_tcmmrlfp16ps : ClangBuiltin<"__builtin_ia32_tcmmrlfp16ps">,
              Intrinsic<[], [llvm_i8_ty, llvm_i8_ty, llvm_i8_ty],
                        [ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<1>>,
                         ImmArg<ArgIndex<2>>]>;

  // AMX - internal intrinsics
  def int_x86_ldtilecfg_internal :
              ClangBuiltin<"__builtin_ia32_tile_loadconfig_internal">,
              Intrinsic<[], [llvm_ptr_ty], []>;
  def int_x86_tileloadd64_internal :
              ClangBuiltin<"__builtin_ia32_tileloadd64_internal">,
              Intrinsic<[llvm_x86amx_ty],
                        [llvm_i16_ty, llvm_i16_ty, llvm_ptr_ty, llvm_i64_ty],
                        []>;
  def int_x86_tileloaddt164_internal :
              ClangBuiltin<"__builtin_ia32_tileloaddt164_internal">,
              Intrinsic<[llvm_x86amx_ty],
                        [llvm_i16_ty, llvm_i16_ty, llvm_ptr_ty, llvm_i64_ty],
                        []>;
  def int_x86_tdpbssd_internal :
              ClangBuiltin<"__builtin_ia32_tdpbssd_internal">,
              Intrinsic<[llvm_x86amx_ty],
                        [llvm_i16_ty, llvm_i16_ty, llvm_i16_ty,
                         llvm_x86amx_ty, llvm_x86amx_ty,
                         llvm_x86amx_ty], []>;
  def int_x86_tdpbsud_internal :
              ClangBuiltin<"__builtin_ia32_tdpbsud_internal">,
              Intrinsic<[llvm_x86amx_ty],
                        [llvm_i16_ty, llvm_i16_ty, llvm_i16_ty,
                         llvm_x86amx_ty, llvm_x86amx_ty,
                         llvm_x86amx_ty], []>;
  def int_x86_tdpbusd_internal :
              ClangBuiltin<"__builtin_ia32_tdpbusd_internal">,
              Intrinsic<[llvm_x86amx_ty],
                        [llvm_i16_ty, llvm_i16_ty, llvm_i16_ty,
                         llvm_x86amx_ty, llvm_x86amx_ty,
                         llvm_x86amx_ty], []>;
  def int_x86_tdpbuud_internal :
              ClangBuiltin<"__builtin_ia32_tdpbuud_internal">,
              Intrinsic<[llvm_x86amx_ty],
                        [llvm_i16_ty, llvm_i16_ty, llvm_i16_ty,
                         llvm_x86amx_ty, llvm_x86amx_ty,
                         llvm_x86amx_ty], []>;
  def int_x86_tilestored64_internal :
              ClangBuiltin<"__builtin_ia32_tilestored64_internal">,
              Intrinsic<[], [llvm_i16_ty, llvm_i16_ty, llvm_ptr_ty,
                             llvm_i64_ty, llvm_x86amx_ty], []>;
  def int_x86_tilezero_internal :
              ClangBuiltin<"__builtin_ia32_tilezero_internal">,
              Intrinsic<[llvm_x86amx_ty], [llvm_i16_ty, llvm_i16_ty],
                        []>;
  def int_x86_tdpbf16ps_internal :
              ClangBuiltin<"__builtin_ia32_tdpbf16ps_internal">,
              Intrinsic<[llvm_x86amx_ty],
                        [llvm_i16_ty, llvm_i16_ty, llvm_i16_ty,
                         llvm_x86amx_ty, llvm_x86amx_ty,
                         llvm_x86amx_ty], []>;
  def int_x86_tdpfp16ps_internal :
              ClangBuiltin<"__builtin_ia32_tdpfp16ps_internal">,
              Intrinsic<[llvm_x86amx_ty],
                        [llvm_i16_ty, llvm_i16_ty, llvm_i16_ty,
                         llvm_x86amx_ty, llvm_x86amx_ty,
                         llvm_x86amx_ty], []>;
  // the vector size can be smaller than AMX register size (1024 bytes)
  def int_x86_cast_vector_to_tile:
      DefaultAttrsIntrinsic<[llvm_x86amx_ty], [llvm_anyvector_ty], [IntrNoMem]>;
  // the vector size can be smaller than AMX register size (1024 bytes)
  def int_x86_cast_tile_to_vector:
      DefaultAttrsIntrinsic<[llvm_anyvector_ty], [llvm_x86amx_ty], [IntrNoMem]>;

  def int_x86_tcmmimfp16ps_internal :
              ClangBuiltin<"__builtin_ia32_tcmmimfp16ps_internal">,
              Intrinsic<[llvm_x86amx_ty],
                        [llvm_i16_ty, llvm_i16_ty, llvm_i16_ty,
                         llvm_x86amx_ty, llvm_x86amx_ty,
                         llvm_x86amx_ty], []>;
  def int_x86_tcmmrlfp16ps_internal :
              ClangBuiltin<"__builtin_ia32_tcmmrlfp16ps_internal">,
              Intrinsic<[llvm_x86amx_ty],
                        [llvm_i16_ty, llvm_i16_ty, llvm_i16_ty,
                         llvm_x86amx_ty, llvm_x86amx_ty,
                         llvm_x86amx_ty], []>;
}

//===----------------------------------------------------------------------===//
let TargetPrefix = "x86" in {
// CMPCCXADD
def int_x86_cmpccxadd32
    : ClangBuiltin<"__builtin_ia32_cmpccxadd32">,
      Intrinsic<[llvm_i32_ty],
                [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
                [IntrArgMemOnly, ImmArg<ArgIndex<3>>]>;

def int_x86_cmpccxadd64
    : ClangBuiltin<"__builtin_ia32_cmpccxadd64">,
      Intrinsic<[llvm_i64_ty],
                [llvm_ptr_ty, llvm_i64_ty, llvm_i64_ty, llvm_i32_ty],
                [IntrArgMemOnly, ImmArg<ArgIndex<3>>]>;

// AVX-NE-CONVERT
def int_x86_vbcstnebf162ps128
    : ClangBuiltin<"__builtin_ia32_vbcstnebf162ps128">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty], [llvm_ptr_ty],
                            [IntrReadMem, IntrArgMemOnly]>;
def int_x86_vbcstnebf162ps256
    : ClangBuiltin<"__builtin_ia32_vbcstnebf162ps256">,
      DefaultAttrsIntrinsic<[llvm_v8f32_ty], [llvm_ptr_ty],
                            [IntrReadMem, IntrArgMemOnly]>;
def int_x86_vbcstnesh2ps128
    : ClangBuiltin<"__builtin_ia32_vbcstnesh2ps128">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty], [llvm_ptr_ty],
                            [IntrReadMem, IntrArgMemOnly]>;
def int_x86_vbcstnesh2ps256
    : ClangBuiltin<"__builtin_ia32_vbcstnesh2ps256">,
      DefaultAttrsIntrinsic<[llvm_v8f32_ty], [llvm_ptr_ty],
                            [IntrReadMem, IntrArgMemOnly]>;
def int_x86_vcvtneebf162ps128
    : ClangBuiltin<"__builtin_ia32_vcvtneebf162ps128">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty], [llvm_ptr_ty],
                            [IntrReadMem, IntrArgMemOnly]>;
def int_x86_vcvtneebf162ps256
    : ClangBuiltin<"__builtin_ia32_vcvtneebf162ps256">,
      DefaultAttrsIntrinsic<[llvm_v8f32_ty], [llvm_ptr_ty],
                            [IntrReadMem, IntrArgMemOnly]>;
def int_x86_vcvtneeph2ps128
    : ClangBuiltin<"__builtin_ia32_vcvtneeph2ps128">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty], [llvm_ptr_ty],
                            [IntrReadMem, IntrArgMemOnly]>;
def int_x86_vcvtneeph2ps256
    : ClangBuiltin<"__builtin_ia32_vcvtneeph2ps256">,
      DefaultAttrsIntrinsic<[llvm_v8f32_ty], [llvm_ptr_ty],
                            [IntrReadMem, IntrArgMemOnly]>;
def int_x86_vcvtneobf162ps128
    : ClangBuiltin<"__builtin_ia32_vcvtneobf162ps128">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty], [llvm_ptr_ty],
                            [IntrReadMem, IntrArgMemOnly]>;
def int_x86_vcvtneobf162ps256
    : ClangBuiltin<"__builtin_ia32_vcvtneobf162ps256">,
      DefaultAttrsIntrinsic<[llvm_v8f32_ty], [llvm_ptr_ty],
                            [IntrReadMem, IntrArgMemOnly]>;
def int_x86_vcvtneoph2ps128
    : ClangBuiltin<"__builtin_ia32_vcvtneoph2ps128">,
      DefaultAttrsIntrinsic<[llvm_v4f32_ty], [llvm_ptr_ty],
                            [IntrReadMem, IntrArgMemOnly]>;
def int_x86_vcvtneoph2ps256
    : ClangBuiltin<"__builtin_ia32_vcvtneoph2ps256">,
      DefaultAttrsIntrinsic<[llvm_v8f32_ty], [llvm_ptr_ty],
                            [IntrReadMem, IntrArgMemOnly]>;
def int_x86_vcvtneps2bf16128
    : ClangBuiltin<"__builtin_ia32_vcvtneps2bf16128">,
      DefaultAttrsIntrinsic<[llvm_v8bf16_ty], [llvm_v4f32_ty], [IntrNoMem]>;
def int_x86_vcvtneps2bf16256
    : ClangBuiltin<"__builtin_ia32_vcvtneps2bf16256">,
      DefaultAttrsIntrinsic<[llvm_v8bf16_ty], [llvm_v8f32_ty], [IntrNoMem]>;
}
//===----------------------------------------------------------------------===//
// SM3 intrinsics
let TargetPrefix = "x86" in {
  def int_x86_vsm3msg1
      : ClangBuiltin<"__builtin_ia32_vsm3msg1">,
        DefaultAttrsIntrinsic<[llvm_v4i32_ty],
        [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
        [IntrNoMem]>;
  def int_x86_vsm3msg2
      : ClangBuiltin<"__builtin_ia32_vsm3msg2">,
        DefaultAttrsIntrinsic<[llvm_v4i32_ty],
        [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
        [IntrNoMem]>;
  def int_x86_vsm3rnds2
      : ClangBuiltin<"__builtin_ia32_vsm3rnds2">,
        DefaultAttrsIntrinsic<[llvm_v4i32_ty],
        [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty, llvm_i32_ty],
        [ImmArg<ArgIndex<3>>, IntrNoMem]>;
}
//===----------------------------------------------------------------------===//
// SM4 intrinsics
let TargetPrefix = "x86" in {
  def int_x86_vsm4key4128
      : ClangBuiltin<"__builtin_ia32_vsm4key4128">,
        DefaultAttrsIntrinsic<[llvm_v4i32_ty],
        [llvm_v4i32_ty, llvm_v4i32_ty],
        [IntrNoMem]>;
  def int_x86_vsm4key4256
      : ClangBuiltin<"__builtin_ia32_vsm4key4256">,
        DefaultAttrsIntrinsic<[llvm_v8i32_ty],
        [llvm_v8i32_ty, llvm_v8i32_ty],
        [IntrNoMem]>;
  def int_x86_vsm4rnds4128
      : ClangBuiltin<"__builtin_ia32_vsm4rnds4128">,
        DefaultAttrsIntrinsic<[llvm_v4i32_ty],
        [llvm_v4i32_ty, llvm_v4i32_ty],
        [IntrNoMem]>;
  def int_x86_vsm4rnds4256
      : ClangBuiltin<"__builtin_ia32_vsm4rnds4256">,
        DefaultAttrsIntrinsic<[llvm_v8i32_ty],
        [llvm_v8i32_ty, llvm_v8i32_ty],
        [IntrNoMem]>;
}
//===----------------------------------------------------------------------===//
// RAO-INT intrinsics
let TargetPrefix = "x86" in {
  def int_x86_aadd32
      : ClangBuiltin<"__builtin_ia32_aadd32">,
        Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty], [IntrArgMemOnly]>;
  def int_x86_aadd64
      : ClangBuiltin<"__builtin_ia32_aadd64">,
        Intrinsic<[], [llvm_ptr_ty, llvm_i64_ty], [IntrArgMemOnly]>;
  def int_x86_aand32
      : ClangBuiltin<"__builtin_ia32_aand32">,
        Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty], [IntrArgMemOnly]>;
  def int_x86_aand64
      : ClangBuiltin<"__builtin_ia32_aand64">,
        Intrinsic<[], [llvm_ptr_ty, llvm_i64_ty], [IntrArgMemOnly]>;
  def int_x86_aor32
      : ClangBuiltin<"__builtin_ia32_aor32">,
        Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty], [IntrArgMemOnly]>;
  def int_x86_aor64
      : ClangBuiltin<"__builtin_ia32_aor64">,
        Intrinsic<[], [llvm_ptr_ty, llvm_i64_ty], [IntrArgMemOnly]>;
  def int_x86_axor32
      : ClangBuiltin<"__builtin_ia32_axor32">,
        Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty], [IntrArgMemOnly]>;
  def int_x86_axor64
      : ClangBuiltin<"__builtin_ia32_axor64">,
        Intrinsic<[], [llvm_ptr_ty, llvm_i64_ty], [IntrArgMemOnly]>;
}

//===----------------------------------------------------------------------===//
// UINTR - User Level Interrupt

let TargetPrefix = "x86" in {
  def int_x86_clui : ClangBuiltin<"__builtin_ia32_clui">,
              Intrinsic<[], [], []>;
  def int_x86_stui : ClangBuiltin<"__builtin_ia32_stui">,
              Intrinsic<[], [], []>;
  def int_x86_testui : ClangBuiltin<"__builtin_ia32_testui">,
              Intrinsic<[llvm_i8_ty], [], []>;
  def int_x86_senduipi : ClangBuiltin<"__builtin_ia32_senduipi">,
              Intrinsic<[], [llvm_i64_ty], []>;
}

//===----------------------------------------------------------------------===//
// avx512_fp16: vaddph
let TargetPrefix = "x86" in {
  def int_x86_avx512fp16_add_ph_512
      : ClangBuiltin<"__builtin_ia32_addph512">,
        DefaultAttrsIntrinsic<[ llvm_v32f16_ty ],
                              [ llvm_v32f16_ty, llvm_v32f16_ty, llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<2>> ]>;
  def int_x86_avx512fp16_sub_ph_512
      : ClangBuiltin<"__builtin_ia32_subph512">,
        DefaultAttrsIntrinsic<[ llvm_v32f16_ty ],
                              [ llvm_v32f16_ty, llvm_v32f16_ty, llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<2>> ]>;
  def int_x86_avx512fp16_mul_ph_512
      : ClangBuiltin<"__builtin_ia32_mulph512">,
        DefaultAttrsIntrinsic<[ llvm_v32f16_ty ],
                              [ llvm_v32f16_ty, llvm_v32f16_ty, llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<2>> ]>;
  def int_x86_avx512fp16_div_ph_512
      : ClangBuiltin<"__builtin_ia32_divph512">,
        DefaultAttrsIntrinsic<[ llvm_v32f16_ty ],
                              [ llvm_v32f16_ty, llvm_v32f16_ty, llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<2>> ]>;
  def int_x86_avx512fp16_max_ph_128
      : ClangBuiltin<"__builtin_ia32_maxph128">,
        DefaultAttrsIntrinsic<[ llvm_v8f16_ty ],
                              [ llvm_v8f16_ty, llvm_v8f16_ty ], [ IntrNoMem ]>;
  def int_x86_avx512fp16_max_ph_256
      : ClangBuiltin<"__builtin_ia32_maxph256">,
        DefaultAttrsIntrinsic<[ llvm_v16f16_ty ],
                              [ llvm_v16f16_ty, llvm_v16f16_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_max_ph_512
      : ClangBuiltin<"__builtin_ia32_maxph512">,
        DefaultAttrsIntrinsic<[ llvm_v32f16_ty ],
                              [ llvm_v32f16_ty, llvm_v32f16_ty, llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<2>> ]>;
  def int_x86_avx512fp16_min_ph_128
      : ClangBuiltin<"__builtin_ia32_minph128">,
        DefaultAttrsIntrinsic<[ llvm_v8f16_ty ],
                              [ llvm_v8f16_ty, llvm_v8f16_ty ], [ IntrNoMem ]>;
  def int_x86_avx512fp16_min_ph_256
      : ClangBuiltin<"__builtin_ia32_minph256">,
        DefaultAttrsIntrinsic<[ llvm_v16f16_ty ],
                              [ llvm_v16f16_ty, llvm_v16f16_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_min_ph_512
      : ClangBuiltin<"__builtin_ia32_minph512">,
        DefaultAttrsIntrinsic<[ llvm_v32f16_ty ],
                              [ llvm_v32f16_ty, llvm_v32f16_ty, llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<2>> ]>;

  def int_x86_avx512fp16_mask_cmp_ph_512
      : DefaultAttrsIntrinsic<[ llvm_v32i1_ty ],
                              [ llvm_v32f16_ty, llvm_v32f16_ty, llvm_i32_ty,
                                llvm_v32i1_ty, llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<2>>, ImmArg<ArgIndex<4>> ]>;
  def int_x86_avx512fp16_mask_cmp_ph_256
      : DefaultAttrsIntrinsic<[ llvm_v16i1_ty ],
                              [ llvm_v16f16_ty, llvm_v16f16_ty, llvm_i32_ty,
                                llvm_v16i1_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<2>> ]>;
  def int_x86_avx512fp16_mask_cmp_ph_128
      : DefaultAttrsIntrinsic<[ llvm_v8i1_ty ],
                              [ llvm_v8f16_ty, llvm_v8f16_ty, llvm_i32_ty,
                                llvm_v8i1_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<2>> ]>;

  def int_x86_avx512fp16_mask_add_sh_round
      : ClangBuiltin<"__builtin_ia32_addsh_round_mask">,
        DefaultAttrsIntrinsic<[ llvm_v8f16_ty ],
                              [ llvm_v8f16_ty, llvm_v8f16_ty, llvm_v8f16_ty,
                                llvm_i8_ty, llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<4>> ]>;
  def int_x86_avx512fp16_mask_sub_sh_round
      : ClangBuiltin<"__builtin_ia32_subsh_round_mask">,
        DefaultAttrsIntrinsic<[ llvm_v8f16_ty ],
                              [ llvm_v8f16_ty, llvm_v8f16_ty, llvm_v8f16_ty,
                                llvm_i8_ty, llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<4>> ]>;
  def int_x86_avx512fp16_mask_mul_sh_round
      : ClangBuiltin<"__builtin_ia32_mulsh_round_mask">,
        DefaultAttrsIntrinsic<[ llvm_v8f16_ty ],
                              [ llvm_v8f16_ty, llvm_v8f16_ty, llvm_v8f16_ty,
                                llvm_i8_ty, llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<4>> ]>;
  def int_x86_avx512fp16_mask_div_sh_round
      : ClangBuiltin<"__builtin_ia32_divsh_round_mask">,
        DefaultAttrsIntrinsic<[ llvm_v8f16_ty ],
                              [ llvm_v8f16_ty, llvm_v8f16_ty, llvm_v8f16_ty,
                                llvm_i8_ty, llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<4>> ]>;
  def int_x86_avx512fp16_mask_min_sh_round
      : ClangBuiltin<"__builtin_ia32_minsh_round_mask">,
        DefaultAttrsIntrinsic<[ llvm_v8f16_ty ],
                              [ llvm_v8f16_ty, llvm_v8f16_ty, llvm_v8f16_ty,
                                llvm_i8_ty, llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<4>> ]>;
  def int_x86_avx512fp16_mask_max_sh_round
      : ClangBuiltin<"__builtin_ia32_maxsh_round_mask">,
        DefaultAttrsIntrinsic<[ llvm_v8f16_ty ],
                              [ llvm_v8f16_ty, llvm_v8f16_ty, llvm_v8f16_ty,
                                llvm_i8_ty, llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<4>> ]>;
  def int_x86_avx512fp16_mask_cmp_sh
      : ClangBuiltin<"__builtin_ia32_cmpsh_mask">,
        DefaultAttrsIntrinsic<[ llvm_i8_ty ],
                              [ llvm_v8f16_ty, llvm_v8f16_ty, llvm_i32_ty,
                                llvm_i8_ty, llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<2>>,
                                ImmArg<ArgIndex<4>> ]>;
  def int_x86_avx512fp16_vcomi_sh
      : ClangBuiltin<"__builtin_ia32_vcomish">,
        DefaultAttrsIntrinsic<[ llvm_i32_ty ],
                              [ llvm_v8f16_ty, llvm_v8f16_ty, llvm_i32_ty,
                                llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<2>>,
                                ImmArg<ArgIndex<3>> ]>;

  def int_x86_avx512fp16_mask_vcvtph2psx_128
      : ClangBuiltin<"__builtin_ia32_vcvtph2psx128_mask">,
        DefaultAttrsIntrinsic<[ llvm_v4f32_ty ],
                              [ llvm_v8f16_ty, llvm_v4f32_ty, llvm_i8_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_mask_vcvtph2psx_256
      : ClangBuiltin<"__builtin_ia32_vcvtph2psx256_mask">,
        DefaultAttrsIntrinsic<[ llvm_v8f32_ty ],
                              [ llvm_v8f16_ty, llvm_v8f32_ty, llvm_i8_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_mask_vcvtph2psx_512
      : ClangBuiltin<"__builtin_ia32_vcvtph2psx512_mask">,
        DefaultAttrsIntrinsic<[ llvm_v16f32_ty ],
                              [ llvm_v16f16_ty, llvm_v16f32_ty, llvm_i16_ty,
                                llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<3>> ]>;
  def int_x86_avx512fp16_mask_vcvtps2phx_128
      : ClangBuiltin<"__builtin_ia32_vcvtps2phx128_mask">,
        DefaultAttrsIntrinsic<[ llvm_v8f16_ty ],
                              [ llvm_v4f32_ty, llvm_v8f16_ty, llvm_i8_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_mask_vcvtps2phx_256
      : ClangBuiltin<"__builtin_ia32_vcvtps2phx256_mask">,
        DefaultAttrsIntrinsic<[ llvm_v8f16_ty ],
                              [ llvm_v8f32_ty, llvm_v8f16_ty, llvm_i8_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_mask_vcvtps2phx_512
      : ClangBuiltin<"__builtin_ia32_vcvtps2phx512_mask">,
        DefaultAttrsIntrinsic<[ llvm_v16f16_ty ],
                              [ llvm_v16f32_ty, llvm_v16f16_ty, llvm_i16_ty,
                                llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<3>> ]>;
  def int_x86_avx512fp16_mask_vcvtpd2ph_128
      : ClangBuiltin<"__builtin_ia32_vcvtpd2ph128_mask">,
        DefaultAttrsIntrinsic<[ llvm_v8f16_ty ],
                              [ llvm_v2f64_ty, llvm_v8f16_ty, llvm_i8_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_mask_vcvtpd2ph_256
      : ClangBuiltin<"__builtin_ia32_vcvtpd2ph256_mask">,
        DefaultAttrsIntrinsic<[ llvm_v8f16_ty ],
                              [ llvm_v4f64_ty, llvm_v8f16_ty, llvm_i8_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_mask_vcvtpd2ph_512
      : ClangBuiltin<"__builtin_ia32_vcvtpd2ph512_mask">,
        DefaultAttrsIntrinsic<[ llvm_v8f16_ty ],
                              [ llvm_v8f64_ty, llvm_v8f16_ty, llvm_i8_ty,
                                llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<3>> ]>;
  def int_x86_avx512fp16_mask_vcvtph2pd_128
      : ClangBuiltin<"__builtin_ia32_vcvtph2pd128_mask">,
        DefaultAttrsIntrinsic<[ llvm_v2f64_ty ],
                              [ llvm_v8f16_ty, llvm_v2f64_ty, llvm_i8_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_mask_vcvtph2pd_256
      : ClangBuiltin<"__builtin_ia32_vcvtph2pd256_mask">,
        DefaultAttrsIntrinsic<[ llvm_v4f64_ty ],
                              [ llvm_v8f16_ty, llvm_v4f64_ty, llvm_i8_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_mask_vcvtph2pd_512
      : ClangBuiltin<"__builtin_ia32_vcvtph2pd512_mask">,
        DefaultAttrsIntrinsic<[ llvm_v8f64_ty ],
                              [ llvm_v8f16_ty, llvm_v8f64_ty, llvm_i8_ty,
                                llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<3>> ]>;
  def int_x86_avx512fp16_mask_vcvtsh2ss_round
      : ClangBuiltin<"__builtin_ia32_vcvtsh2ss_round_mask">,
        DefaultAttrsIntrinsic<[ llvm_v4f32_ty ],
                              [ llvm_v4f32_ty, llvm_v8f16_ty, llvm_v4f32_ty,
                                llvm_i8_ty, llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<4>> ]>;
  def int_x86_avx512fp16_mask_vcvtss2sh_round
      : ClangBuiltin<"__builtin_ia32_vcvtss2sh_round_mask">,
        DefaultAttrsIntrinsic<[ llvm_v8f16_ty ],
                              [ llvm_v8f16_ty, llvm_v4f32_ty, llvm_v8f16_ty,
                                llvm_i8_ty, llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<4>> ]>;
  def int_x86_avx512fp16_mask_vcvtsd2sh_round
      : ClangBuiltin<"__builtin_ia32_vcvtsd2sh_round_mask">,
        DefaultAttrsIntrinsic<[ llvm_v8f16_ty ],
                              [ llvm_v8f16_ty, llvm_v2f64_ty, llvm_v8f16_ty,
                                llvm_i8_ty, llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<4>> ]>;
  def int_x86_avx512fp16_mask_vcvtsh2sd_round
      : ClangBuiltin<"__builtin_ia32_vcvtsh2sd_round_mask">,
        DefaultAttrsIntrinsic<[ llvm_v2f64_ty ],
                              [ llvm_v2f64_ty, llvm_v8f16_ty, llvm_v2f64_ty,
                                llvm_i8_ty, llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<4>> ]>;

  def int_x86_avx512fp16_mask_vcvtph2w_128
      : ClangBuiltin<"__builtin_ia32_vcvtph2w128_mask">,
        DefaultAttrsIntrinsic<[ llvm_v8i16_ty ],
                              [ llvm_v8f16_ty, llvm_v8i16_ty, llvm_i8_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_mask_vcvtph2w_256
      : ClangBuiltin<"__builtin_ia32_vcvtph2w256_mask">,
        DefaultAttrsIntrinsic<[ llvm_v16i16_ty ],
                              [ llvm_v16f16_ty, llvm_v16i16_ty, llvm_i16_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_mask_vcvtph2w_512
      : ClangBuiltin<"__builtin_ia32_vcvtph2w512_mask">,
        DefaultAttrsIntrinsic<[ llvm_v32i16_ty ],
                              [ llvm_v32f16_ty, llvm_v32i16_ty, llvm_i32_ty,
                                llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<3>> ]>;
  def int_x86_avx512fp16_mask_vcvttph2w_128
      : ClangBuiltin<"__builtin_ia32_vcvttph2w128_mask">,
        DefaultAttrsIntrinsic<[ llvm_v8i16_ty ],
                              [ llvm_v8f16_ty, llvm_v8i16_ty, llvm_i8_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_mask_vcvttph2w_256
      : ClangBuiltin<"__builtin_ia32_vcvttph2w256_mask">,
        DefaultAttrsIntrinsic<[ llvm_v16i16_ty ],
                              [ llvm_v16f16_ty, llvm_v16i16_ty, llvm_i16_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_mask_vcvttph2w_512
      : ClangBuiltin<"__builtin_ia32_vcvttph2w512_mask">,
        DefaultAttrsIntrinsic<[ llvm_v32i16_ty ],
                              [ llvm_v32f16_ty, llvm_v32i16_ty, llvm_i32_ty,
                                llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<3>> ]>;
  def int_x86_avx512fp16_mask_vcvtph2uw_128
      : ClangBuiltin<"__builtin_ia32_vcvtph2uw128_mask">,
        DefaultAttrsIntrinsic<[ llvm_v8i16_ty ],
                              [ llvm_v8f16_ty, llvm_v8i16_ty, llvm_i8_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_mask_vcvtph2uw_256
      : ClangBuiltin<"__builtin_ia32_vcvtph2uw256_mask">,
        DefaultAttrsIntrinsic<[ llvm_v16i16_ty ],
                              [ llvm_v16f16_ty, llvm_v16i16_ty, llvm_i16_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_mask_vcvtph2uw_512
      : ClangBuiltin<"__builtin_ia32_vcvtph2uw512_mask">,
        DefaultAttrsIntrinsic<[ llvm_v32i16_ty ],
                              [ llvm_v32f16_ty, llvm_v32i16_ty, llvm_i32_ty,
                                llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<3>> ]>;
  def int_x86_avx512fp16_mask_vcvttph2uw_128
      : ClangBuiltin<"__builtin_ia32_vcvttph2uw128_mask">,
        DefaultAttrsIntrinsic<[ llvm_v8i16_ty ],
                              [ llvm_v8f16_ty, llvm_v8i16_ty, llvm_i8_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_mask_vcvttph2uw_256
      : ClangBuiltin<"__builtin_ia32_vcvttph2uw256_mask">,
        DefaultAttrsIntrinsic<[ llvm_v16i16_ty ],
                              [ llvm_v16f16_ty, llvm_v16i16_ty, llvm_i16_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_mask_vcvttph2uw_512
      : ClangBuiltin<"__builtin_ia32_vcvttph2uw512_mask">,
        DefaultAttrsIntrinsic<[ llvm_v32i16_ty ],
                              [ llvm_v32f16_ty, llvm_v32i16_ty, llvm_i32_ty,
                                llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<3>> ]>;

  def int_x86_avx512fp16_mask_vcvtph2dq_128
      : ClangBuiltin<"__builtin_ia32_vcvtph2dq128_mask">,
        DefaultAttrsIntrinsic<[ llvm_v4i32_ty ],
                              [ llvm_v8f16_ty, llvm_v4i32_ty, llvm_i8_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_mask_vcvtph2dq_256
      : ClangBuiltin<"__builtin_ia32_vcvtph2dq256_mask">,
        DefaultAttrsIntrinsic<[ llvm_v8i32_ty ],
                              [ llvm_v8f16_ty, llvm_v8i32_ty, llvm_i8_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_mask_vcvtph2dq_512
      : ClangBuiltin<"__builtin_ia32_vcvtph2dq512_mask">,
        DefaultAttrsIntrinsic<[ llvm_v16i32_ty ],
                              [ llvm_v16f16_ty, llvm_v16i32_ty, llvm_i16_ty,
                                llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<3>> ]>;
  def int_x86_avx512fp16_mask_vcvtph2udq_128
      : ClangBuiltin<"__builtin_ia32_vcvtph2udq128_mask">,
        DefaultAttrsIntrinsic<[ llvm_v4i32_ty ],
                              [ llvm_v8f16_ty, llvm_v4i32_ty, llvm_i8_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_mask_vcvtph2udq_256
      : ClangBuiltin<"__builtin_ia32_vcvtph2udq256_mask">,
        DefaultAttrsIntrinsic<[ llvm_v8i32_ty ],
                              [ llvm_v8f16_ty, llvm_v8i32_ty, llvm_i8_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_mask_vcvtph2udq_512
      : ClangBuiltin<"__builtin_ia32_vcvtph2udq512_mask">,
        DefaultAttrsIntrinsic<[ llvm_v16i32_ty ],
                              [ llvm_v16f16_ty, llvm_v16i32_ty, llvm_i16_ty,
                                llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<3>> ]>;
  def int_x86_avx512fp16_mask_vcvtdq2ph_128
      : ClangBuiltin<"__builtin_ia32_vcvtdq2ph128_mask">,
        DefaultAttrsIntrinsic<[ llvm_v8f16_ty ],
                              [ llvm_v4i32_ty, llvm_v8f16_ty, llvm_i8_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_mask_vcvtudq2ph_128
      : ClangBuiltin<"__builtin_ia32_vcvtudq2ph128_mask">,
        DefaultAttrsIntrinsic<[ llvm_v8f16_ty ],
                              [ llvm_v4i32_ty, llvm_v8f16_ty, llvm_i8_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_mask_vcvttph2dq_128
      : ClangBuiltin<"__builtin_ia32_vcvttph2dq128_mask">,
        DefaultAttrsIntrinsic<[ llvm_v4i32_ty ],
                              [ llvm_v8f16_ty, llvm_v4i32_ty, llvm_i8_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_mask_vcvttph2dq_256
      : ClangBuiltin<"__builtin_ia32_vcvttph2dq256_mask">,
        DefaultAttrsIntrinsic<[ llvm_v8i32_ty ],
                              [ llvm_v8f16_ty, llvm_v8i32_ty, llvm_i8_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_mask_vcvttph2dq_512
      : ClangBuiltin<"__builtin_ia32_vcvttph2dq512_mask">,
        DefaultAttrsIntrinsic<[ llvm_v16i32_ty ],
                              [ llvm_v16f16_ty, llvm_v16i32_ty, llvm_i16_ty,
                                llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<3>> ]>;
  def int_x86_avx512fp16_mask_vcvttph2udq_128
      : ClangBuiltin<"__builtin_ia32_vcvttph2udq128_mask">,
        DefaultAttrsIntrinsic<[ llvm_v4i32_ty ],
                              [ llvm_v8f16_ty, llvm_v4i32_ty, llvm_i8_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_mask_vcvttph2udq_256
      : ClangBuiltin<"__builtin_ia32_vcvttph2udq256_mask">,
        DefaultAttrsIntrinsic<[ llvm_v8i32_ty ],
                              [ llvm_v8f16_ty, llvm_v8i32_ty, llvm_i8_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_mask_vcvttph2udq_512
      : ClangBuiltin<"__builtin_ia32_vcvttph2udq512_mask">,
        DefaultAttrsIntrinsic<[ llvm_v16i32_ty ],
                              [ llvm_v16f16_ty, llvm_v16i32_ty, llvm_i16_ty,
                                llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<3>> ]>;

  def int_x86_avx512fp16_mask_vcvtqq2ph_128
      : ClangBuiltin<"__builtin_ia32_vcvtqq2ph128_mask">,
        DefaultAttrsIntrinsic<[ llvm_v8f16_ty ],
                              [ llvm_v2i64_ty, llvm_v8f16_ty, llvm_i8_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_mask_vcvtqq2ph_256
      : ClangBuiltin<"__builtin_ia32_vcvtqq2ph256_mask">,
        DefaultAttrsIntrinsic<[ llvm_v8f16_ty ],
                              [ llvm_v4i64_ty, llvm_v8f16_ty, llvm_i8_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_mask_vcvtph2qq_128
      : ClangBuiltin<"__builtin_ia32_vcvtph2qq128_mask">,
        DefaultAttrsIntrinsic<[ llvm_v2i64_ty ],
                              [ llvm_v8f16_ty, llvm_v2i64_ty, llvm_i8_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_mask_vcvtph2qq_256
      : ClangBuiltin<"__builtin_ia32_vcvtph2qq256_mask">,
        DefaultAttrsIntrinsic<[ llvm_v4i64_ty ],
                              [ llvm_v8f16_ty, llvm_v4i64_ty, llvm_i8_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_mask_vcvtph2qq_512
      : ClangBuiltin<"__builtin_ia32_vcvtph2qq512_mask">,
        DefaultAttrsIntrinsic<[ llvm_v8i64_ty ],
                              [ llvm_v8f16_ty, llvm_v8i64_ty, llvm_i8_ty,
                                llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<3>> ]>;
  def int_x86_avx512fp16_mask_vcvtuqq2ph_128
      : ClangBuiltin<"__builtin_ia32_vcvtuqq2ph128_mask">,
        DefaultAttrsIntrinsic<[ llvm_v8f16_ty ],
                              [ llvm_v2i64_ty, llvm_v8f16_ty, llvm_i8_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_mask_vcvtuqq2ph_256
      : ClangBuiltin<"__builtin_ia32_vcvtuqq2ph256_mask">,
        DefaultAttrsIntrinsic<[ llvm_v8f16_ty ],
                              [ llvm_v4i64_ty, llvm_v8f16_ty, llvm_i8_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_mask_vcvtph2uqq_128
      : ClangBuiltin<"__builtin_ia32_vcvtph2uqq128_mask">,
        DefaultAttrsIntrinsic<[ llvm_v2i64_ty ],
                              [ llvm_v8f16_ty, llvm_v2i64_ty, llvm_i8_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_mask_vcvtph2uqq_256
      : ClangBuiltin<"__builtin_ia32_vcvtph2uqq256_mask">,
        DefaultAttrsIntrinsic<[ llvm_v4i64_ty ],
                              [ llvm_v8f16_ty, llvm_v4i64_ty, llvm_i8_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_mask_vcvtph2uqq_512
      : ClangBuiltin<"__builtin_ia32_vcvtph2uqq512_mask">,
        DefaultAttrsIntrinsic<[ llvm_v8i64_ty ],
                              [ llvm_v8f16_ty, llvm_v8i64_ty, llvm_i8_ty,
                                llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<3>> ]>;
  def int_x86_avx512fp16_mask_vcvttph2qq_128
      : ClangBuiltin<"__builtin_ia32_vcvttph2qq128_mask">,
        DefaultAttrsIntrinsic<[ llvm_v2i64_ty ],
                              [ llvm_v8f16_ty, llvm_v2i64_ty, llvm_i8_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_mask_vcvttph2qq_256
      : ClangBuiltin<"__builtin_ia32_vcvttph2qq256_mask">,
        DefaultAttrsIntrinsic<[ llvm_v4i64_ty ],
                              [ llvm_v8f16_ty, llvm_v4i64_ty, llvm_i8_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_mask_vcvttph2qq_512
      : ClangBuiltin<"__builtin_ia32_vcvttph2qq512_mask">,
        DefaultAttrsIntrinsic<[ llvm_v8i64_ty ],
                              [ llvm_v8f16_ty, llvm_v8i64_ty, llvm_i8_ty,
                                llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<3>> ]>;
  def int_x86_avx512fp16_mask_vcvttph2uqq_128
      : ClangBuiltin<"__builtin_ia32_vcvttph2uqq128_mask">,
        DefaultAttrsIntrinsic<[ llvm_v2i64_ty ],
                              [ llvm_v8f16_ty, llvm_v2i64_ty, llvm_i8_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_mask_vcvttph2uqq_256
      : ClangBuiltin<"__builtin_ia32_vcvttph2uqq256_mask">,
        DefaultAttrsIntrinsic<[ llvm_v4i64_ty ],
                              [ llvm_v8f16_ty, llvm_v4i64_ty, llvm_i8_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_mask_vcvttph2uqq_512
      : ClangBuiltin<"__builtin_ia32_vcvttph2uqq512_mask">,
        DefaultAttrsIntrinsic<[ llvm_v8i64_ty ],
                              [ llvm_v8f16_ty, llvm_v8i64_ty, llvm_i8_ty,
                                llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<3>> ]>;

  def int_x86_avx512fp16_vcvtsh2si32
      : ClangBuiltin<"__builtin_ia32_vcvtsh2si32">,
        DefaultAttrsIntrinsic<[ llvm_i32_ty ], [ llvm_v8f16_ty, llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<1>> ]>;
  def int_x86_avx512fp16_vcvtsh2usi32
      : ClangBuiltin<"__builtin_ia32_vcvtsh2usi32">,
        DefaultAttrsIntrinsic<[ llvm_i32_ty ], [ llvm_v8f16_ty, llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<1>> ]>;
  def int_x86_avx512fp16_vcvtsh2si64
      : ClangBuiltin<"__builtin_ia32_vcvtsh2si64">,
        DefaultAttrsIntrinsic<[ llvm_i64_ty ], [ llvm_v8f16_ty, llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<1>> ]>;
  def int_x86_avx512fp16_vcvtsh2usi64
      : ClangBuiltin<"__builtin_ia32_vcvtsh2usi64">,
        DefaultAttrsIntrinsic<[ llvm_i64_ty ], [ llvm_v8f16_ty, llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<1>> ]>;
  def int_x86_avx512fp16_vcvtusi2sh
      : ClangBuiltin<"__builtin_ia32_vcvtusi2sh">,
        DefaultAttrsIntrinsic<[ llvm_v8f16_ty ],
                              [ llvm_v8f16_ty, llvm_i32_ty, llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<2>> ]>;
  def int_x86_avx512fp16_vcvtusi642sh
      : ClangBuiltin<"__builtin_ia32_vcvtusi642sh">,
        DefaultAttrsIntrinsic<[ llvm_v8f16_ty ],
                              [ llvm_v8f16_ty, llvm_i64_ty, llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<2>> ]>;
  def int_x86_avx512fp16_vcvtsi2sh
      : ClangBuiltin<"__builtin_ia32_vcvtsi2sh">,
        DefaultAttrsIntrinsic<[ llvm_v8f16_ty ],
                              [ llvm_v8f16_ty, llvm_i32_ty, llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<2>> ]>;
  def int_x86_avx512fp16_vcvtsi642sh
      : ClangBuiltin<"__builtin_ia32_vcvtsi642sh">,
        DefaultAttrsIntrinsic<[ llvm_v8f16_ty ],
                              [ llvm_v8f16_ty, llvm_i64_ty, llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<2>> ]>;
  def int_x86_avx512fp16_vcvttsh2si32
      : ClangBuiltin<"__builtin_ia32_vcvttsh2si32">,
        DefaultAttrsIntrinsic<[ llvm_i32_ty ], [ llvm_v8f16_ty, llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<1>> ]>;
  def int_x86_avx512fp16_vcvttsh2si64
      : ClangBuiltin<"__builtin_ia32_vcvttsh2si64">,
        DefaultAttrsIntrinsic<[ llvm_i64_ty ], [ llvm_v8f16_ty, llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<1>> ]>;
  def int_x86_avx512fp16_vcvttsh2usi32
      : ClangBuiltin<"__builtin_ia32_vcvttsh2usi32">,
        DefaultAttrsIntrinsic<[ llvm_i32_ty ], [ llvm_v8f16_ty, llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<1>> ]>;
  def int_x86_avx512fp16_vcvttsh2usi64
      : ClangBuiltin<"__builtin_ia32_vcvttsh2usi64">,
        DefaultAttrsIntrinsic<[ llvm_i64_ty ], [ llvm_v8f16_ty, llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<1>> ]>;

  def int_x86_avx512fp16_sqrt_ph_512
      : DefaultAttrsIntrinsic<[ llvm_v32f16_ty ],
                              [ llvm_v32f16_ty, llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<1>> ]>;
  def int_x86_avx512fp16_mask_sqrt_sh
      : DefaultAttrsIntrinsic<[ llvm_v8f16_ty ],
                              [ llvm_v8f16_ty, llvm_v8f16_ty, llvm_v8f16_ty,
                                llvm_i8_ty, llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<4>> ]>;
  def int_x86_avx512fp16_mask_rsqrt_ph_128
      : ClangBuiltin<"__builtin_ia32_rsqrtph128_mask">,
        DefaultAttrsIntrinsic<[ llvm_v8f16_ty ],
                              [ llvm_v8f16_ty, llvm_v8f16_ty, llvm_i8_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_mask_rsqrt_ph_256
      : ClangBuiltin<"__builtin_ia32_rsqrtph256_mask">,
        DefaultAttrsIntrinsic<[ llvm_v16f16_ty ],
                              [ llvm_v16f16_ty, llvm_v16f16_ty, llvm_i16_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_mask_rsqrt_ph_512
      : ClangBuiltin<"__builtin_ia32_rsqrtph512_mask">,
        DefaultAttrsIntrinsic<[ llvm_v32f16_ty ],
                              [ llvm_v32f16_ty, llvm_v32f16_ty, llvm_i32_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_mask_rsqrt_sh
      : ClangBuiltin<"__builtin_ia32_rsqrtsh_mask">,
        DefaultAttrsIntrinsic<[ llvm_v8f16_ty ],
                              [ llvm_v8f16_ty, llvm_v8f16_ty, llvm_v8f16_ty,
                                llvm_i8_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_mask_rcp_ph_128
      : ClangBuiltin<"__builtin_ia32_rcpph128_mask">,
        DefaultAttrsIntrinsic<[ llvm_v8f16_ty ],
                              [ llvm_v8f16_ty, llvm_v8f16_ty, llvm_i8_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_mask_rcp_ph_256
      : ClangBuiltin<"__builtin_ia32_rcpph256_mask">,
        DefaultAttrsIntrinsic<[ llvm_v16f16_ty ],
                              [ llvm_v16f16_ty, llvm_v16f16_ty, llvm_i16_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_mask_rcp_ph_512
      : ClangBuiltin<"__builtin_ia32_rcpph512_mask">,
        DefaultAttrsIntrinsic<[ llvm_v32f16_ty ],
                              [ llvm_v32f16_ty, llvm_v32f16_ty, llvm_i32_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_mask_rcp_sh
      : ClangBuiltin<"__builtin_ia32_rcpsh_mask">,
        DefaultAttrsIntrinsic<[ llvm_v8f16_ty ],
                              [ llvm_v8f16_ty, llvm_v8f16_ty, llvm_v8f16_ty,
                                llvm_i8_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_mask_reduce_ph_128
      : ClangBuiltin<"__builtin_ia32_reduceph128_mask">,
        DefaultAttrsIntrinsic<[ llvm_v8f16_ty ],
                              [ llvm_v8f16_ty, llvm_i32_ty, llvm_v8f16_ty,
                                llvm_i8_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<1>> ]>;
  def int_x86_avx512fp16_mask_reduce_ph_256
      : ClangBuiltin<"__builtin_ia32_reduceph256_mask">,
        DefaultAttrsIntrinsic<[ llvm_v16f16_ty ],
                              [ llvm_v16f16_ty, llvm_i32_ty, llvm_v16f16_ty,
                                llvm_i16_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<1>> ]>;
  def int_x86_avx512fp16_mask_reduce_ph_512
      : ClangBuiltin<"__builtin_ia32_reduceph512_mask">,
        DefaultAttrsIntrinsic<[ llvm_v32f16_ty ],
                              [ llvm_v32f16_ty, llvm_i32_ty, llvm_v32f16_ty,
                                llvm_i32_ty, llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<1>>,
                                ImmArg<ArgIndex<4>> ]>;
  def int_x86_avx512fp16_mask_reduce_sh
      : ClangBuiltin<"__builtin_ia32_reducesh_mask">,
        DefaultAttrsIntrinsic<[ llvm_v8f16_ty ],
                              [ llvm_v8f16_ty, llvm_v8f16_ty, llvm_v8f16_ty,
                                llvm_i8_ty, llvm_i32_ty, llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<4>>,
                                ImmArg<ArgIndex<5>> ]>;
  def int_x86_avx512fp16_fpclass_ph_128
      : DefaultAttrsIntrinsic<[ llvm_v8i1_ty ], [ llvm_v8f16_ty, llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<1>> ]>;
  def int_x86_avx512fp16_fpclass_ph_256
      : DefaultAttrsIntrinsic<[ llvm_v16i1_ty ],
                              [ llvm_v16f16_ty, llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<1>> ]>;
  def int_x86_avx512fp16_fpclass_ph_512
      : DefaultAttrsIntrinsic<[ llvm_v32i1_ty ],
                              [ llvm_v32f16_ty, llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<1>> ]>;
  def int_x86_avx512fp16_mask_fpclass_sh
      : ClangBuiltin<"__builtin_ia32_fpclasssh_mask">,
        DefaultAttrsIntrinsic<[ llvm_i8_ty ],
                              [ llvm_v8f16_ty, llvm_i32_ty, llvm_i8_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<1>> ]>;
  def int_x86_avx512fp16_mask_getexp_ph_128
      : ClangBuiltin<"__builtin_ia32_getexpph128_mask">,
        DefaultAttrsIntrinsic<[ llvm_v8f16_ty ],
                              [ llvm_v8f16_ty, llvm_v8f16_ty, llvm_i8_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_mask_getexp_ph_256
      : ClangBuiltin<"__builtin_ia32_getexpph256_mask">,
        DefaultAttrsIntrinsic<[ llvm_v16f16_ty ],
                              [ llvm_v16f16_ty, llvm_v16f16_ty, llvm_i16_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_mask_getexp_ph_512
      : ClangBuiltin<"__builtin_ia32_getexpph512_mask">,
        DefaultAttrsIntrinsic<[ llvm_v32f16_ty ],
                              [ llvm_v32f16_ty, llvm_v32f16_ty, llvm_i32_ty,
                                llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<3>> ]>;
  def int_x86_avx512fp16_mask_getexp_sh
      : ClangBuiltin<"__builtin_ia32_getexpsh128_round_mask">,
        DefaultAttrsIntrinsic<[ llvm_v8f16_ty ],
                              [ llvm_v8f16_ty, llvm_v8f16_ty, llvm_v8f16_ty,
                                llvm_i8_ty, llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<4>> ]>;
  def int_x86_avx512fp16_mask_getmant_ph_128
      : ClangBuiltin<"__builtin_ia32_getmantph128_mask">,
        DefaultAttrsIntrinsic<[ llvm_v8f16_ty ],
                              [ llvm_v8f16_ty, llvm_i32_ty, llvm_v8f16_ty,
                                llvm_i8_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<1>> ]>;
  def int_x86_avx512fp16_mask_getmant_ph_256
      : ClangBuiltin<"__builtin_ia32_getmantph256_mask">,
        DefaultAttrsIntrinsic<[ llvm_v16f16_ty ],
                              [ llvm_v16f16_ty, llvm_i32_ty, llvm_v16f16_ty,
                                llvm_i16_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<1>> ]>;
  def int_x86_avx512fp16_mask_getmant_ph_512
      : ClangBuiltin<"__builtin_ia32_getmantph512_mask">,
        DefaultAttrsIntrinsic<[ llvm_v32f16_ty ],
                              [ llvm_v32f16_ty, llvm_i32_ty, llvm_v32f16_ty,
                                llvm_i32_ty, llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<1>>,
                                ImmArg<ArgIndex<4>> ]>;
  def int_x86_avx512fp16_mask_getmant_sh
      : ClangBuiltin<"__builtin_ia32_getmantsh_round_mask">,
        DefaultAttrsIntrinsic<[ llvm_v8f16_ty ],
                              [ llvm_v8f16_ty, llvm_v8f16_ty, llvm_i32_ty,
                                llvm_v8f16_ty, llvm_i8_ty, llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<2>>,
                                ImmArg<ArgIndex<5>> ]>;
  def int_x86_avx512fp16_mask_rndscale_ph_128
      : ClangBuiltin<"__builtin_ia32_rndscaleph_128_mask">,
        DefaultAttrsIntrinsic<[ llvm_v8f16_ty ],
                              [ llvm_v8f16_ty, llvm_i32_ty, llvm_v8f16_ty,
                                llvm_i8_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<1>> ]>;
  def int_x86_avx512fp16_mask_rndscale_ph_256
      : ClangBuiltin<"__builtin_ia32_rndscaleph_256_mask">,
        DefaultAttrsIntrinsic<[ llvm_v16f16_ty ],
                              [ llvm_v16f16_ty, llvm_i32_ty, llvm_v16f16_ty,
                                llvm_i16_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<1>> ]>;
  def int_x86_avx512fp16_mask_rndscale_ph_512
      : ClangBuiltin<"__builtin_ia32_rndscaleph_mask">,
        DefaultAttrsIntrinsic<[ llvm_v32f16_ty ],
                              [ llvm_v32f16_ty, llvm_i32_ty, llvm_v32f16_ty,
                                llvm_i32_ty, llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<1>>,
                                ImmArg<ArgIndex<4>> ]>;
  def int_x86_avx512fp16_mask_rndscale_sh
      : ClangBuiltin<"__builtin_ia32_rndscalesh_round_mask">,
        DefaultAttrsIntrinsic<[ llvm_v8f16_ty ],
                              [ llvm_v8f16_ty, llvm_v8f16_ty, llvm_v8f16_ty,
                                llvm_i8_ty, llvm_i32_ty, llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<4>>,
                                ImmArg<ArgIndex<5>> ]>;
  def int_x86_avx512fp16_mask_scalef_ph_128
      : ClangBuiltin<"__builtin_ia32_scalefph128_mask">,
        DefaultAttrsIntrinsic<[ llvm_v8f16_ty ],
                              [ llvm_v8f16_ty, llvm_v8f16_ty, llvm_v8f16_ty,
                                llvm_i8_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_mask_scalef_ph_256
      : ClangBuiltin<"__builtin_ia32_scalefph256_mask">,
        DefaultAttrsIntrinsic<[ llvm_v16f16_ty ],
                              [ llvm_v16f16_ty, llvm_v16f16_ty, llvm_v16f16_ty,
                                llvm_i16_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_mask_scalef_ph_512
      : ClangBuiltin<"__builtin_ia32_scalefph512_mask">,
        DefaultAttrsIntrinsic<[ llvm_v32f16_ty ],
                              [ llvm_v32f16_ty, llvm_v32f16_ty, llvm_v32f16_ty,
                                llvm_i32_ty, llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<4>> ]>;
  def int_x86_avx512fp16_mask_scalef_sh
      : ClangBuiltin<"__builtin_ia32_scalefsh_round_mask">,
        DefaultAttrsIntrinsic<[ llvm_v8f16_ty ],
                              [ llvm_v8f16_ty, llvm_v8f16_ty, llvm_v8f16_ty,
                                llvm_i8_ty, llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<4>> ]>;

  def int_x86_avx512fp16_vfmadd_ph_512
      : DefaultAttrsIntrinsic<[ llvm_v32f16_ty ],
                              [ llvm_v32f16_ty, llvm_v32f16_ty, llvm_v32f16_ty,
                                llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<3>> ]>;
  def int_x86_avx512fp16_vfmaddsub_ph_128
      : ClangBuiltin<"__builtin_ia32_vfmaddsubph">,
        DefaultAttrsIntrinsic<[ llvm_v8f16_ty ],
                              [ llvm_v8f16_ty, llvm_v8f16_ty, llvm_v8f16_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_vfmaddsub_ph_256
      : ClangBuiltin<"__builtin_ia32_vfmaddsubph256">,
        DefaultAttrsIntrinsic<[ llvm_v16f16_ty ],
                              [ llvm_v16f16_ty, llvm_v16f16_ty,
                                llvm_v16f16_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_vfmaddsub_ph_512
      : DefaultAttrsIntrinsic<[ llvm_v32f16_ty ],
                              [ llvm_v32f16_ty, llvm_v32f16_ty, llvm_v32f16_ty,
                                llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<3>> ]>;
  def int_x86_avx512fp16_vfmadd_f16
      : DefaultAttrsIntrinsic<[ llvm_half_ty ],
                              [ llvm_half_ty, llvm_half_ty, llvm_half_ty,
                                llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<3>> ]>;

  def int_x86_avx512fp16_mask_vfcmadd_cph_128
      : ClangBuiltin<"__builtin_ia32_vfcmaddcph128_mask">,
        DefaultAttrsIntrinsic<[ llvm_v4f32_ty ],
                              [ llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,
                                llvm_i8_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_maskz_vfcmadd_cph_128
      : ClangBuiltin<"__builtin_ia32_vfcmaddcph128_maskz">,
        DefaultAttrsIntrinsic<[ llvm_v4f32_ty ],
                              [ llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,
                                llvm_i8_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_mask_vfcmadd_cph_256
      : ClangBuiltin<"__builtin_ia32_vfcmaddcph256_mask">,
        DefaultAttrsIntrinsic<[ llvm_v8f32_ty ],
                              [ llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty,
                                llvm_i8_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_maskz_vfcmadd_cph_256
      : ClangBuiltin<"__builtin_ia32_vfcmaddcph256_maskz">,
        DefaultAttrsIntrinsic<[ llvm_v8f32_ty ],
                              [ llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty,
                                llvm_i8_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_mask_vfcmadd_cph_512
      : ClangBuiltin<"__builtin_ia32_vfcmaddcph512_mask3">,
        DefaultAttrsIntrinsic<[ llvm_v16f32_ty ],
                              [ llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty,
                                llvm_i16_ty, llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<4>> ]>;
  def int_x86_avx512fp16_maskz_vfcmadd_cph_512
      : ClangBuiltin<"__builtin_ia32_vfcmaddcph512_maskz">,
        DefaultAttrsIntrinsic<[ llvm_v16f32_ty ],
                              [ llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty,
                                llvm_i16_ty, llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<4>> ]>;
  def int_x86_avx512fp16_mask_vfmadd_cph_128
      : ClangBuiltin<"__builtin_ia32_vfmaddcph128_mask">,
        DefaultAttrsIntrinsic<[ llvm_v4f32_ty ],
                              [ llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,
                                llvm_i8_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_maskz_vfmadd_cph_128
      : ClangBuiltin<"__builtin_ia32_vfmaddcph128_maskz">,
        DefaultAttrsIntrinsic<[ llvm_v4f32_ty ],
                              [ llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,
                                llvm_i8_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_mask_vfmadd_cph_256
      : ClangBuiltin<"__builtin_ia32_vfmaddcph256_mask">,
        DefaultAttrsIntrinsic<[ llvm_v8f32_ty ],
                              [ llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty,
                                llvm_i8_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_maskz_vfmadd_cph_256
      : ClangBuiltin<"__builtin_ia32_vfmaddcph256_maskz">,
        DefaultAttrsIntrinsic<[ llvm_v8f32_ty ],
                              [ llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty,
                                llvm_i8_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_mask_vfmadd_cph_512
      : ClangBuiltin<"__builtin_ia32_vfmaddcph512_mask3">,
        DefaultAttrsIntrinsic<[ llvm_v16f32_ty ],
                              [ llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty,
                                llvm_i16_ty, llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<4>> ]>;
  def int_x86_avx512fp16_maskz_vfmadd_cph_512
      : ClangBuiltin<"__builtin_ia32_vfmaddcph512_maskz">,
        DefaultAttrsIntrinsic<[ llvm_v16f32_ty ],
                              [ llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty,
                                llvm_i16_ty, llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<4>> ]>;
  def int_x86_avx512fp16_mask_vfmadd_csh
      : ClangBuiltin<"__builtin_ia32_vfmaddcsh_mask">,
        DefaultAttrsIntrinsic<[ llvm_v4f32_ty ],
                              [ llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,
                                llvm_i8_ty, llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<4>> ]>;
  def int_x86_avx512fp16_maskz_vfmadd_csh
      : ClangBuiltin<"__builtin_ia32_vfmaddcsh_maskz">,
        DefaultAttrsIntrinsic<[ llvm_v4f32_ty ],
                              [ llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,
                                llvm_i8_ty, llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<4>> ]>;
  def int_x86_avx512fp16_mask_vfcmadd_csh
      : ClangBuiltin<"__builtin_ia32_vfcmaddcsh_mask">,
        DefaultAttrsIntrinsic<[ llvm_v4f32_ty ],
                              [ llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,
                                llvm_i8_ty, llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<4>> ]>;
  def int_x86_avx512fp16_maskz_vfcmadd_csh
      : ClangBuiltin<"__builtin_ia32_vfcmaddcsh_maskz">,
        DefaultAttrsIntrinsic<[ llvm_v4f32_ty ],
                              [ llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,
                                llvm_i8_ty, llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<4>> ]>;
  def int_x86_avx512fp16_mask_vfmul_cph_128
      : ClangBuiltin<"__builtin_ia32_vfmulcph128_mask">,
        DefaultAttrsIntrinsic<[ llvm_v4f32_ty ],
                              [ llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,
                                llvm_i8_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_mask_vfcmul_cph_128
      : ClangBuiltin<"__builtin_ia32_vfcmulcph128_mask">,
        DefaultAttrsIntrinsic<[ llvm_v4f32_ty ],
                              [ llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,
                                llvm_i8_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_mask_vfmul_cph_256
      : ClangBuiltin<"__builtin_ia32_vfmulcph256_mask">,
        DefaultAttrsIntrinsic<[ llvm_v8f32_ty ],
                              [ llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty,
                                llvm_i8_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_mask_vfcmul_cph_256
      : ClangBuiltin<"__builtin_ia32_vfcmulcph256_mask">,
        DefaultAttrsIntrinsic<[ llvm_v8f32_ty ],
                              [ llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty,
                                llvm_i8_ty ],
                              [ IntrNoMem ]>;
  def int_x86_avx512fp16_mask_vfmul_cph_512
      : ClangBuiltin<"__builtin_ia32_vfmulcph512_mask">,
        DefaultAttrsIntrinsic<[ llvm_v16f32_ty ],
                              [ llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty,
                                llvm_i16_ty, llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<4>> ]>;
  def int_x86_avx512fp16_mask_vfcmul_cph_512
      : ClangBuiltin<"__builtin_ia32_vfcmulcph512_mask">,
        DefaultAttrsIntrinsic<[ llvm_v16f32_ty ],
                              [ llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty,
                                llvm_i16_ty, llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<4>> ]>;
  def int_x86_avx512fp16_mask_vfmul_csh
      : ClangBuiltin<"__builtin_ia32_vfmulcsh_mask">,
        DefaultAttrsIntrinsic<[ llvm_v4f32_ty ],
                              [ llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,
                                llvm_i8_ty, llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<4>> ]>;
  def int_x86_avx512fp16_mask_vfcmul_csh
      : ClangBuiltin<"__builtin_ia32_vfcmulcsh_mask">,
        DefaultAttrsIntrinsic<[ llvm_v4f32_ty ],
                              [ llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,
                                llvm_i8_ty, llvm_i32_ty ],
                              [ IntrNoMem, ImmArg<ArgIndex<4>> ]>;
}
PKjwFZ����0�0IR/Dominators.hnu�[���//===- Dominators.h - Dominator Info Calculation ----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the DominatorTree class, which provides fast and efficient
// dominance queries.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_DOMINATORS_H
#define LLVM_IR_DOMINATORS_H

#include "llvm/ADT/APInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Twine.h"
#include "llvm/ADT/ilist_iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CFG.h"
#include "llvm/IR/PassManager.h"
#include "llvm/IR/Use.h"
#include "llvm/Pass.h"
#include "llvm/Support/CFGDiff.h"
#include "llvm/Support/CFGUpdate.h"
#include "llvm/Support/GenericDomTree.h"
#include "llvm/Support/GenericDomTreeConstruction.h"
#include <algorithm>
#include <utility>
#include <vector>

namespace llvm {

class Function;
class Instruction;
class Module;
class Value;
class raw_ostream;
template <class GraphType> struct GraphTraits;

extern template class DomTreeNodeBase<BasicBlock>;
extern template class DominatorTreeBase<BasicBlock, false>; // DomTree
extern template class DominatorTreeBase<BasicBlock, true>; // PostDomTree

extern template class cfg::Update<BasicBlock *>;

namespace DomTreeBuilder {
using BBDomTree = DomTreeBase<BasicBlock>;
using BBPostDomTree = PostDomTreeBase<BasicBlock>;

using BBUpdates = ArrayRef<llvm::cfg::Update<BasicBlock *>>;

using BBDomTreeGraphDiff = GraphDiff<BasicBlock *, false>;
using BBPostDomTreeGraphDiff = GraphDiff<BasicBlock *, true>;

extern template void Calculate<BBDomTree>(BBDomTree &DT);
extern template void CalculateWithUpdates<BBDomTree>(BBDomTree &DT,
                                                     BBUpdates U);

extern template void Calculate<BBPostDomTree>(BBPostDomTree &DT);

extern template void InsertEdge<BBDomTree>(BBDomTree &DT, BasicBlock *From,
                                           BasicBlock *To);
extern template void InsertEdge<BBPostDomTree>(BBPostDomTree &DT,
                                               BasicBlock *From,
                                               BasicBlock *To);

extern template void DeleteEdge<BBDomTree>(BBDomTree &DT, BasicBlock *From,
                                           BasicBlock *To);
extern template void DeleteEdge<BBPostDomTree>(BBPostDomTree &DT,
                                               BasicBlock *From,
                                               BasicBlock *To);

extern template void ApplyUpdates<BBDomTree>(BBDomTree &DT,
                                             BBDomTreeGraphDiff &,
                                             BBDomTreeGraphDiff *);
extern template void ApplyUpdates<BBPostDomTree>(BBPostDomTree &DT,
                                                 BBPostDomTreeGraphDiff &,
                                                 BBPostDomTreeGraphDiff *);

extern template bool Verify<BBDomTree>(const BBDomTree &DT,
                                       BBDomTree::VerificationLevel VL);
extern template bool Verify<BBPostDomTree>(const BBPostDomTree &DT,
                                           BBPostDomTree::VerificationLevel VL);
}  // namespace DomTreeBuilder

using DomTreeNode = DomTreeNodeBase<BasicBlock>;

class BasicBlockEdge {
  const BasicBlock *Start;
  const BasicBlock *End;

public:
  BasicBlockEdge(const BasicBlock *Start_, const BasicBlock *End_) :
    Start(Start_), End(End_) {}

  BasicBlockEdge(const std::pair<BasicBlock *, BasicBlock *> &Pair)
      : Start(Pair.first), End(Pair.second) {}

  BasicBlockEdge(const std::pair<const BasicBlock *, const BasicBlock *> &Pair)
      : Start(Pair.first), End(Pair.second) {}

  const BasicBlock *getStart() const {
    return Start;
  }

  const BasicBlock *getEnd() const {
    return End;
  }

  /// Check if this is the only edge between Start and End.
  bool isSingleEdge() const;
};

template <> struct DenseMapInfo<BasicBlockEdge> {
  using BBInfo = DenseMapInfo<const BasicBlock *>;

  static unsigned getHashValue(const BasicBlockEdge *V);

  static inline BasicBlockEdge getEmptyKey() {
    return BasicBlockEdge(BBInfo::getEmptyKey(), BBInfo::getEmptyKey());
  }

  static inline BasicBlockEdge getTombstoneKey() {
    return BasicBlockEdge(BBInfo::getTombstoneKey(), BBInfo::getTombstoneKey());
  }

  static unsigned getHashValue(const BasicBlockEdge &Edge) {
    return hash_combine(BBInfo::getHashValue(Edge.getStart()),
                        BBInfo::getHashValue(Edge.getEnd()));
  }

  static bool isEqual(const BasicBlockEdge &LHS, const BasicBlockEdge &RHS) {
    return BBInfo::isEqual(LHS.getStart(), RHS.getStart()) &&
           BBInfo::isEqual(LHS.getEnd(), RHS.getEnd());
  }
};

/// Concrete subclass of DominatorTreeBase that is used to compute a
/// normal dominator tree.
///
/// Definition: A block is said to be forward statically reachable if there is
/// a path from the entry of the function to the block.  A statically reachable
/// block may become statically unreachable during optimization.
///
/// A forward unreachable block may appear in the dominator tree, or it may
/// not.  If it does, dominance queries will return results as if all reachable
/// blocks dominate it.  When asking for a Node corresponding to a potentially
/// unreachable block, calling code must handle the case where the block was
/// unreachable and the result of getNode() is nullptr.
///
/// Generally, a block known to be unreachable when the dominator tree is
/// constructed will not be in the tree.  One which becomes unreachable after
/// the dominator tree is initially constructed may still exist in the tree,
/// even if the tree is properly updated. Calling code should not rely on the
/// preceding statements; this is stated only to assist human understanding.
class DominatorTree : public DominatorTreeBase<BasicBlock, false> {
 public:
  using Base = DominatorTreeBase<BasicBlock, false>;

  DominatorTree() = default;
  explicit DominatorTree(Function &F) { recalculate(F); }
  explicit DominatorTree(DominatorTree &DT, DomTreeBuilder::BBUpdates U) {
    recalculate(*DT.Parent, U);
  }

  /// Handle invalidation explicitly.
  bool invalidate(Function &F, const PreservedAnalyses &PA,
                  FunctionAnalysisManager::Invalidator &);

  // Ensure base-class overloads are visible.
  using Base::dominates;

  /// Return true if the (end of the) basic block BB dominates the use U.
  bool dominates(const BasicBlock *BB, const Use &U) const;

  /// Return true if value Def dominates use U, in the sense that Def is
  /// available at U, and could be substituted as the used value without
  /// violating the SSA dominance requirement.
  ///
  /// In particular, it is worth noting that:
  ///  * Non-instruction Defs dominate everything.
  ///  * Def does not dominate a use in Def itself (outside of degenerate cases
  ///    like unreachable code or trivial phi cycles).
  ///  * Invoke Defs only dominate uses in their default destination.
  bool dominates(const Value *Def, const Use &U) const;
  /// Return true if value Def dominates all possible uses inside instruction
  /// User. Same comments as for the Use-based API apply.
  bool dominates(const Value *Def, const Instruction *User) const;

  /// Returns true if Def would dominate a use in any instruction in BB.
  /// If Def is an instruction in BB, then Def does not dominate BB.
  ///
  /// Does not accept Value to avoid ambiguity with dominance checks between
  /// two basic blocks.
  bool dominates(const Instruction *Def, const BasicBlock *BB) const;

  /// Return true if an edge dominates a use.
  ///
  /// If BBE is not a unique edge between start and end of the edge, it can
  /// never dominate the use.
  bool dominates(const BasicBlockEdge &BBE, const Use &U) const;
  bool dominates(const BasicBlockEdge &BBE, const BasicBlock *BB) const;
  /// Returns true if edge \p BBE1 dominates edge \p BBE2.
  bool dominates(const BasicBlockEdge &BBE1, const BasicBlockEdge &BBE2) const;

  // Ensure base class overloads are visible.
  using Base::isReachableFromEntry;

  /// Provide an overload for a Use.
  bool isReachableFromEntry(const Use &U) const;

  // Ensure base class overloads are visible.
  using Base::findNearestCommonDominator;

  /// Find the nearest instruction I that dominates both I1 and I2, in the sense
  /// that a result produced before I will be available at both I1 and I2.
  Instruction *findNearestCommonDominator(Instruction *I1,
                                          Instruction *I2) const;

  // Pop up a GraphViz/gv window with the Dominator Tree rendered using `dot`.
  void viewGraph(const Twine &Name, const Twine &Title);
  void viewGraph();
};

//===-------------------------------------
// DominatorTree GraphTraits specializations so the DominatorTree can be
// iterable by generic graph iterators.

template <class Node, class ChildIterator> struct DomTreeGraphTraitsBase {
  using NodeRef = Node *;
  using ChildIteratorType = ChildIterator;
  using nodes_iterator = df_iterator<Node *, df_iterator_default_set<Node*>>;

  static NodeRef getEntryNode(NodeRef N) { return N; }
  static ChildIteratorType child_begin(NodeRef N) { return N->begin(); }
  static ChildIteratorType child_end(NodeRef N) { return N->end(); }

  static nodes_iterator nodes_begin(NodeRef N) {
    return df_begin(getEntryNode(N));
  }

  static nodes_iterator nodes_end(NodeRef N) { return df_end(getEntryNode(N)); }
};

template <>
struct GraphTraits<DomTreeNode *>
    : public DomTreeGraphTraitsBase<DomTreeNode, DomTreeNode::const_iterator> {
};

template <>
struct GraphTraits<const DomTreeNode *>
    : public DomTreeGraphTraitsBase<const DomTreeNode,
                                    DomTreeNode::const_iterator> {};

template <> struct GraphTraits<DominatorTree*>
  : public GraphTraits<DomTreeNode*> {
  static NodeRef getEntryNode(DominatorTree *DT) { return DT->getRootNode(); }

  static nodes_iterator nodes_begin(DominatorTree *N) {
    return df_begin(getEntryNode(N));
  }

  static nodes_iterator nodes_end(DominatorTree *N) {
    return df_end(getEntryNode(N));
  }
};

/// Analysis pass which computes a \c DominatorTree.
class DominatorTreeAnalysis : public AnalysisInfoMixin<DominatorTreeAnalysis> {
  friend AnalysisInfoMixin<DominatorTreeAnalysis>;
  static AnalysisKey Key;

public:
  /// Provide the result typedef for this analysis pass.
  using Result = DominatorTree;

  /// Run the analysis pass over a function and produce a dominator tree.
  DominatorTree run(Function &F, FunctionAnalysisManager &);
};

/// Printer pass for the \c DominatorTree.
class DominatorTreePrinterPass
    : public PassInfoMixin<DominatorTreePrinterPass> {
  raw_ostream &OS;

public:
  explicit DominatorTreePrinterPass(raw_ostream &OS);

  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

/// Verifier pass for the \c DominatorTree.
struct DominatorTreeVerifierPass : PassInfoMixin<DominatorTreeVerifierPass> {
  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};

/// Enables verification of dominator trees.
///
/// This check is expensive and is disabled by default.  `-verify-dom-info`
/// allows selectively enabling the check without needing to recompile.
extern bool VerifyDomInfo;

/// Legacy analysis pass which computes a \c DominatorTree.
class DominatorTreeWrapperPass : public FunctionPass {
  DominatorTree DT;

public:
  static char ID;

  DominatorTreeWrapperPass();

  DominatorTree &getDomTree() { return DT; }
  const DominatorTree &getDomTree() const { return DT; }

  bool runOnFunction(Function &F) override;

  void verifyAnalysis() const override;

  void getAnalysisUsage(AnalysisUsage &AU) const override {
    AU.setPreservesAll();
  }

  void releaseMemory() override { DT.reset(); }

  void print(raw_ostream &OS, const Module *M = nullptr) const override;
};
} // end namespace llvm

#endif // LLVM_IR_DOMINATORS_H
PKjwFZ"I)��IR/IntrinsicsR600.hnu�[���/*===- TableGen'erated file -------------------------------------*- C++ -*-===*\
|*                                                                            *|
|* Intrinsic Function Source Fragment                                         *|
|*                                                                            *|
|* Automatically generated file, do not edit!                                 *|
|*                                                                            *|
\*===----------------------------------------------------------------------===*/

#ifndef LLVM_IR_INTRINSIC_R600_ENUMS_H
#define LLVM_IR_INTRINSIC_R600_ENUMS_H

namespace llvm {
namespace Intrinsic {
enum R600Intrinsics : unsigned {
// Enum values for intrinsics
    r600_cube = 8161,                                 // llvm.r600.cube
    r600_ddx,                                  // llvm.r600.ddx
    r600_ddy,                                  // llvm.r600.ddy
    r600_dot4,                                 // llvm.r600.dot4
    r600_group_barrier,                        // llvm.r600.group.barrier
    r600_implicitarg_ptr,                      // llvm.r600.implicitarg.ptr
    r600_kill,                                 // llvm.r600.kill
    r600_rat_store_typed,                      // llvm.r600.rat.store.typed
    r600_read_global_size_x,                   // llvm.r600.read.global.size.x
    r600_read_global_size_y,                   // llvm.r600.read.global.size.y
    r600_read_global_size_z,                   // llvm.r600.read.global.size.z
    r600_read_local_size_x,                    // llvm.r600.read.local.size.x
    r600_read_local_size_y,                    // llvm.r600.read.local.size.y
    r600_read_local_size_z,                    // llvm.r600.read.local.size.z
    r600_read_ngroups_x,                       // llvm.r600.read.ngroups.x
    r600_read_ngroups_y,                       // llvm.r600.read.ngroups.y
    r600_read_ngroups_z,                       // llvm.r600.read.ngroups.z
    r600_read_tgid_x,                          // llvm.r600.read.tgid.x
    r600_read_tgid_y,                          // llvm.r600.read.tgid.y
    r600_read_tgid_z,                          // llvm.r600.read.tgid.z
    r600_read_tidig_x,                         // llvm.r600.read.tidig.x
    r600_read_tidig_y,                         // llvm.r600.read.tidig.y
    r600_read_tidig_z,                         // llvm.r600.read.tidig.z
    r600_recipsqrt_clamped,                    // llvm.r600.recipsqrt.clamped
    r600_recipsqrt_ieee,                       // llvm.r600.recipsqrt.ieee
    r600_store_stream_output,                  // llvm.r600.store.stream.output
    r600_store_swizzle,                        // llvm.r600.store.swizzle
    r600_tex,                                  // llvm.r600.tex
    r600_texc,                                 // llvm.r600.texc
    r600_txb,                                  // llvm.r600.txb
    r600_txbc,                                 // llvm.r600.txbc
    r600_txf,                                  // llvm.r600.txf
    r600_txl,                                  // llvm.r600.txl
    r600_txlc,                                 // llvm.r600.txlc
    r600_txq,                                  // llvm.r600.txq
}; // enum
} // namespace Intrinsic
} // namespace llvm

#endif
PKjwFZ���uuIR/AttributesAMDGPU.tdnu�[���//===- AttributesAMDGPU.td - Defines AMDGPU attributes -----*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines AMDGPU specific attributes.
//
//===----------------------------------------------------------------------===//

def AMDGPUUnsafeFPAtomics : StrBoolAttr<"amdgpu-unsafe-fp-atomics">;
def : MergeRule<"setAND<AMDGPUUnsafeFPAtomicsAttr>">;
PKjwFZ"�(nyy
IR/NoFolder.hnu�[���//===- NoFolder.h - Constant folding helper ---------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the NoFolder class, a helper for IRBuilder.  It provides
// IRBuilder with a set of methods for creating unfolded constants.  This is
// useful for learners trying to understand how LLVM IR works, and who don't
// want details to be hidden by the constant folder.  For general constant
// creation and folding, use ConstantExpr and the routines in
// llvm/Analysis/ConstantFolding.h.
//
// Note: since it is not actually possible to create unfolded constants, this
// class returns instructions rather than constants.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_NOFOLDER_H
#define LLVM_IR_NOFOLDER_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/FMF.h"
#include "llvm/IR/IRBuilderFolder.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"

namespace llvm {

/// NoFolder - Create "constants" (actually, instructions) with no folding.
class NoFolder final : public IRBuilderFolder {
  virtual void anchor();

public:
  explicit NoFolder() = default;

  //===--------------------------------------------------------------------===//
  // Value-based folders.
  //
  // Return an existing value or a constant if the operation can be simplified.
  // Otherwise return nullptr.
  //===--------------------------------------------------------------------===//

  Value *FoldBinOp(Instruction::BinaryOps Opc, Value *LHS,
                   Value *RHS) const override {
    return nullptr;
  }

  Value *FoldExactBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS,
                        bool IsExact) const override {
    return nullptr;
  }

  Value *FoldNoWrapBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS,
                         bool HasNUW, bool HasNSW) const override {
    return nullptr;
  }

  Value *FoldBinOpFMF(Instruction::BinaryOps Opc, Value *LHS, Value *RHS,
                      FastMathFlags FMF) const override {
    return nullptr;
  }

  Value *FoldUnOpFMF(Instruction::UnaryOps Opc, Value *V,
                     FastMathFlags FMF) const override {
    return nullptr;
  }

  Value *FoldICmp(CmpInst::Predicate P, Value *LHS, Value *RHS) const override {
    return nullptr;
  }

  Value *FoldGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList,
                 bool IsInBounds = false) const override {
    return nullptr;
  }

  Value *FoldSelect(Value *C, Value *True, Value *False) const override {
    return nullptr;
  }

  Value *FoldExtractValue(Value *Agg,
                          ArrayRef<unsigned> IdxList) const override {
    return nullptr;
  }

  Value *FoldInsertValue(Value *Agg, Value *Val,
                         ArrayRef<unsigned> IdxList) const override {
    return nullptr;
  }

  Value *FoldExtractElement(Value *Vec, Value *Idx) const override {
    return nullptr;
  }

  Value *FoldInsertElement(Value *Vec, Value *NewElt,
                           Value *Idx) const override {
    return nullptr;
  }

  Value *FoldShuffleVector(Value *V1, Value *V2,
                           ArrayRef<int> Mask) const override {
    return nullptr;
  }

  //===--------------------------------------------------------------------===//
  // Cast/Conversion Operators
  //===--------------------------------------------------------------------===//

  Instruction *CreateCast(Instruction::CastOps Op, Constant *C,
                          Type *DestTy) const override {
    return CastInst::Create(Op, C, DestTy);
  }

  Instruction *CreatePointerCast(Constant *C, Type *DestTy) const override {
    return CastInst::CreatePointerCast(C, DestTy);
  }

  Instruction *CreatePointerBitCastOrAddrSpaceCast(
      Constant *C, Type *DestTy) const override {
    return CastInst::CreatePointerBitCastOrAddrSpaceCast(C, DestTy);
  }

  Instruction *CreateIntCast(Constant *C, Type *DestTy,
                             bool isSigned) const override {
    return CastInst::CreateIntegerCast(C, DestTy, isSigned);
  }

  Instruction *CreateFPCast(Constant *C, Type *DestTy) const override {
    return CastInst::CreateFPCast(C, DestTy);
  }

  Instruction *CreateBitCast(Constant *C, Type *DestTy) const override {
    return CreateCast(Instruction::BitCast, C, DestTy);
  }

  Instruction *CreateIntToPtr(Constant *C, Type *DestTy) const override {
    return CreateCast(Instruction::IntToPtr, C, DestTy);
  }

  Instruction *CreatePtrToInt(Constant *C, Type *DestTy) const override {
    return CreateCast(Instruction::PtrToInt, C, DestTy);
  }

  Instruction *CreateZExtOrBitCast(Constant *C, Type *DestTy) const override {
    return CastInst::CreateZExtOrBitCast(C, DestTy);
  }

  Instruction *CreateSExtOrBitCast(Constant *C, Type *DestTy) const override {
    return CastInst::CreateSExtOrBitCast(C, DestTy);
  }

  Instruction *CreateTruncOrBitCast(Constant *C, Type *DestTy) const override {
    return CastInst::CreateTruncOrBitCast(C, DestTy);
  }

  //===--------------------------------------------------------------------===//
  // Compare Instructions
  //===--------------------------------------------------------------------===//

  Instruction *CreateFCmp(CmpInst::Predicate P,
                          Constant *LHS, Constant *RHS) const override {
    return new FCmpInst(P, LHS, RHS);
  }
};

} // end namespace llvm

#endif // LLVM_IR_NOFOLDER_H
PKjwFZL�s�szszIR/IntrinsicEnums.incnu�[���/*===- TableGen'erated file -------------------------------------*- C++ -*-===*\
|*                                                                            *|
|* Intrinsic Function Source Fragment                                         *|
|*                                                                            *|
|* Automatically generated file, do not edit!                                 *|
|*                                                                            *|
\*===----------------------------------------------------------------------===*/

#ifdef GET_INTRINSIC_ENUM_VALUES
// Enum values for intrinsics
    abs = 1,                                       // llvm.abs
    addressofreturnaddress,                    // llvm.addressofreturnaddress
    adjust_trampoline,                         // llvm.adjust.trampoline
    annotation,                                // llvm.annotation
    arithmetic_fence,                          // llvm.arithmetic.fence
    asan_check_memaccess,                      // llvm.asan.check.memaccess
    assume,                                    // llvm.assume
    bitreverse,                                // llvm.bitreverse
    bswap,                                     // llvm.bswap
    call_preallocated_arg,                     // llvm.call.preallocated.arg
    call_preallocated_setup,                   // llvm.call.preallocated.setup
    call_preallocated_teardown,                // llvm.call.preallocated.teardown
    callbr_landingpad,                         // llvm.callbr.landingpad
    canonicalize,                              // llvm.canonicalize
    ceil,                                      // llvm.ceil
    clear_cache,                               // llvm.clear_cache
    codeview_annotation,                       // llvm.codeview.annotation
    convert_from_fp16,                         // llvm.convert.from.fp16
    convert_to_fp16,                           // llvm.convert.to.fp16
    copysign,                                  // llvm.copysign
    coro_align,                                // llvm.coro.align
    coro_alloc,                                // llvm.coro.alloc
    coro_alloca_alloc,                         // llvm.coro.alloca.alloc
    coro_alloca_free,                          // llvm.coro.alloca.free
    coro_alloca_get,                           // llvm.coro.alloca.get
    coro_async_context_alloc,                  // llvm.coro.async.context.alloc
    coro_async_context_dealloc,                // llvm.coro.async.context.dealloc
    coro_async_resume,                         // llvm.coro.async.resume
    coro_async_size_replace,                   // llvm.coro.async.size.replace
    coro_begin,                                // llvm.coro.begin
    coro_destroy,                              // llvm.coro.destroy
    coro_done,                                 // llvm.coro.done
    coro_end,                                  // llvm.coro.end
    coro_end_async,                            // llvm.coro.end.async
    coro_frame,                                // llvm.coro.frame
    coro_free,                                 // llvm.coro.free
    coro_id,                                   // llvm.coro.id
    coro_id_async,                             // llvm.coro.id.async
    coro_id_retcon,                            // llvm.coro.id.retcon
    coro_id_retcon_once,                       // llvm.coro.id.retcon.once
    coro_noop,                                 // llvm.coro.noop
    coro_prepare_async,                        // llvm.coro.prepare.async
    coro_prepare_retcon,                       // llvm.coro.prepare.retcon
    coro_promise,                              // llvm.coro.promise
    coro_resume,                               // llvm.coro.resume
    coro_save,                                 // llvm.coro.save
    coro_size,                                 // llvm.coro.size
    coro_subfn_addr,                           // llvm.coro.subfn.addr
    coro_suspend,                              // llvm.coro.suspend
    coro_suspend_async,                        // llvm.coro.suspend.async
    coro_suspend_retcon,                       // llvm.coro.suspend.retcon
    cos,                                       // llvm.cos
    ctlz,                                      // llvm.ctlz
    ctpop,                                     // llvm.ctpop
    cttz,                                      // llvm.cttz
    dbg_assign,                                // llvm.dbg.assign
    dbg_declare,                               // llvm.dbg.declare
    dbg_label,                                 // llvm.dbg.label
    dbg_value,                                 // llvm.dbg.value
    debugtrap,                                 // llvm.debugtrap
    donothing,                                 // llvm.donothing
    eh_dwarf_cfa,                              // llvm.eh.dwarf.cfa
    eh_exceptioncode,                          // llvm.eh.exceptioncode
    eh_exceptionpointer,                       // llvm.eh.exceptionpointer
    eh_recoverfp,                              // llvm.eh.recoverfp
    eh_return_i32,                             // llvm.eh.return.i32
    eh_return_i64,                             // llvm.eh.return.i64
    eh_sjlj_callsite,                          // llvm.eh.sjlj.callsite
    eh_sjlj_functioncontext,                   // llvm.eh.sjlj.functioncontext
    eh_sjlj_longjmp,                           // llvm.eh.sjlj.longjmp
    eh_sjlj_lsda,                              // llvm.eh.sjlj.lsda
    eh_sjlj_setjmp,                            // llvm.eh.sjlj.setjmp
    eh_sjlj_setup_dispatch,                    // llvm.eh.sjlj.setup.dispatch
    eh_typeid_for,                             // llvm.eh.typeid.for
    eh_unwind_init,                            // llvm.eh.unwind.init
    exp,                                       // llvm.exp
    exp2,                                      // llvm.exp2
    expect,                                    // llvm.expect
    expect_with_probability,                   // llvm.expect.with.probability
    experimental_constrained_ceil,             // llvm.experimental.constrained.ceil
    experimental_constrained_cos,              // llvm.experimental.constrained.cos
    experimental_constrained_exp,              // llvm.experimental.constrained.exp
    experimental_constrained_exp2,             // llvm.experimental.constrained.exp2
    experimental_constrained_fadd,             // llvm.experimental.constrained.fadd
    experimental_constrained_fcmp,             // llvm.experimental.constrained.fcmp
    experimental_constrained_fcmps,            // llvm.experimental.constrained.fcmps
    experimental_constrained_fdiv,             // llvm.experimental.constrained.fdiv
    experimental_constrained_floor,            // llvm.experimental.constrained.floor
    experimental_constrained_fma,              // llvm.experimental.constrained.fma
    experimental_constrained_fmul,             // llvm.experimental.constrained.fmul
    experimental_constrained_fmuladd,          // llvm.experimental.constrained.fmuladd
    experimental_constrained_fpext,            // llvm.experimental.constrained.fpext
    experimental_constrained_fptosi,           // llvm.experimental.constrained.fptosi
    experimental_constrained_fptoui,           // llvm.experimental.constrained.fptoui
    experimental_constrained_fptrunc,          // llvm.experimental.constrained.fptrunc
    experimental_constrained_frem,             // llvm.experimental.constrained.frem
    experimental_constrained_fsub,             // llvm.experimental.constrained.fsub
    experimental_constrained_ldexp,            // llvm.experimental.constrained.ldexp
    experimental_constrained_llrint,           // llvm.experimental.constrained.llrint
    experimental_constrained_llround,          // llvm.experimental.constrained.llround
    experimental_constrained_log,              // llvm.experimental.constrained.log
    experimental_constrained_log10,            // llvm.experimental.constrained.log10
    experimental_constrained_log2,             // llvm.experimental.constrained.log2
    experimental_constrained_lrint,            // llvm.experimental.constrained.lrint
    experimental_constrained_lround,           // llvm.experimental.constrained.lround
    experimental_constrained_maximum,          // llvm.experimental.constrained.maximum
    experimental_constrained_maxnum,           // llvm.experimental.constrained.maxnum
    experimental_constrained_minimum,          // llvm.experimental.constrained.minimum
    experimental_constrained_minnum,           // llvm.experimental.constrained.minnum
    experimental_constrained_nearbyint,        // llvm.experimental.constrained.nearbyint
    experimental_constrained_pow,              // llvm.experimental.constrained.pow
    experimental_constrained_powi,             // llvm.experimental.constrained.powi
    experimental_constrained_rint,             // llvm.experimental.constrained.rint
    experimental_constrained_round,            // llvm.experimental.constrained.round
    experimental_constrained_roundeven,        // llvm.experimental.constrained.roundeven
    experimental_constrained_sin,              // llvm.experimental.constrained.sin
    experimental_constrained_sitofp,           // llvm.experimental.constrained.sitofp
    experimental_constrained_sqrt,             // llvm.experimental.constrained.sqrt
    experimental_constrained_trunc,            // llvm.experimental.constrained.trunc
    experimental_constrained_uitofp,           // llvm.experimental.constrained.uitofp
    experimental_convergence_anchor,           // llvm.experimental.convergence.anchor
    experimental_convergence_entry,            // llvm.experimental.convergence.entry
    experimental_convergence_loop,             // llvm.experimental.convergence.loop
    experimental_deoptimize,                   // llvm.experimental.deoptimize
    experimental_gc_get_pointer_base,          // llvm.experimental.gc.get.pointer.base
    experimental_gc_get_pointer_offset,        // llvm.experimental.gc.get.pointer.offset
    experimental_gc_relocate,                  // llvm.experimental.gc.relocate
    experimental_gc_result,                    // llvm.experimental.gc.result
    experimental_gc_statepoint,                // llvm.experimental.gc.statepoint
    experimental_get_vector_length,            // llvm.experimental.get.vector.length
    experimental_guard,                        // llvm.experimental.guard
    experimental_noalias_scope_decl,           // llvm.experimental.noalias.scope.decl
    experimental_patchpoint_i64,               // llvm.experimental.patchpoint.i64
    experimental_patchpoint_void,              // llvm.experimental.patchpoint.void
    experimental_stackmap,                     // llvm.experimental.stackmap
    experimental_stepvector,                   // llvm.experimental.stepvector
    experimental_vector_deinterleave2,         // llvm.experimental.vector.deinterleave2
    experimental_vector_interleave2,           // llvm.experimental.vector.interleave2
    experimental_vector_reverse,               // llvm.experimental.vector.reverse
    experimental_vector_splice,                // llvm.experimental.vector.splice
    experimental_vp_splice,                    // llvm.experimental.vp.splice
    experimental_vp_strided_load,              // llvm.experimental.vp.strided.load
    experimental_vp_strided_store,             // llvm.experimental.vp.strided.store
    experimental_widenable_condition,          // llvm.experimental.widenable.condition
    fabs,                                      // llvm.fabs
    floor,                                     // llvm.floor
    fma,                                       // llvm.fma
    fmuladd,                                   // llvm.fmuladd
    fptosi_sat,                                // llvm.fptosi.sat
    fptoui_sat,                                // llvm.fptoui.sat
    fptrunc_round,                             // llvm.fptrunc.round
    frameaddress,                              // llvm.frameaddress
    frexp,                                     // llvm.frexp
    fshl,                                      // llvm.fshl
    fshr,                                      // llvm.fshr
    gcread,                                    // llvm.gcread
    gcroot,                                    // llvm.gcroot
    gcwrite,                                   // llvm.gcwrite
    get_active_lane_mask,                      // llvm.get.active.lane.mask
    get_dynamic_area_offset,                   // llvm.get.dynamic.area.offset
    get_fpenv,                                 // llvm.get.fpenv
    get_rounding,                              // llvm.get.rounding
    hwasan_check_memaccess,                    // llvm.hwasan.check.memaccess
    hwasan_check_memaccess_shortgranules,      // llvm.hwasan.check.memaccess.shortgranules
    icall_branch_funnel,                       // llvm.icall.branch.funnel
    init_trampoline,                           // llvm.init.trampoline
    instrprof_cover,                           // llvm.instrprof.cover
    instrprof_increment,                       // llvm.instrprof.increment
    instrprof_increment_step,                  // llvm.instrprof.increment.step
    instrprof_timestamp,                       // llvm.instrprof.timestamp
    instrprof_value_profile,                   // llvm.instrprof.value.profile
    invariant_end,                             // llvm.invariant.end
    invariant_start,                           // llvm.invariant.start
    is_constant,                               // llvm.is.constant
    is_fpclass,                                // llvm.is.fpclass
    launder_invariant_group,                   // llvm.launder.invariant.group
    ldexp,                                     // llvm.ldexp
    lifetime_end,                              // llvm.lifetime.end
    lifetime_start,                            // llvm.lifetime.start
    llrint,                                    // llvm.llrint
    llround,                                   // llvm.llround
    load_relative,                             // llvm.load.relative
    localaddress,                              // llvm.localaddress
    localescape,                               // llvm.localescape
    localrecover,                              // llvm.localrecover
    log,                                       // llvm.log
    log10,                                     // llvm.log10
    log2,                                      // llvm.log2
    loop_decrement,                            // llvm.loop.decrement
    loop_decrement_reg,                        // llvm.loop.decrement.reg
    lrint,                                     // llvm.lrint
    lround,                                    // llvm.lround
    masked_compressstore,                      // llvm.masked.compressstore
    masked_expandload,                         // llvm.masked.expandload
    masked_gather,                             // llvm.masked.gather
    masked_load,                               // llvm.masked.load
    masked_scatter,                            // llvm.masked.scatter
    masked_store,                              // llvm.masked.store
    matrix_column_major_load,                  // llvm.matrix.column.major.load
    matrix_column_major_store,                 // llvm.matrix.column.major.store
    matrix_multiply,                           // llvm.matrix.multiply
    matrix_transpose,                          // llvm.matrix.transpose
    maximum,                                   // llvm.maximum
    maxnum,                                    // llvm.maxnum
    memcpy,                                    // llvm.memcpy
    memcpy_element_unordered_atomic,           // llvm.memcpy.element.unordered.atomic
    memcpy_inline,                             // llvm.memcpy.inline
    memmove,                                   // llvm.memmove
    memmove_element_unordered_atomic,          // llvm.memmove.element.unordered.atomic
    memset,                                    // llvm.memset
    memset_element_unordered_atomic,           // llvm.memset.element.unordered.atomic
    memset_inline,                             // llvm.memset.inline
    minimum,                                   // llvm.minimum
    minnum,                                    // llvm.minnum
    nearbyint,                                 // llvm.nearbyint
    objc_arc_annotation_bottomup_bbend,        // llvm.objc.arc.annotation.bottomup.bbend
    objc_arc_annotation_bottomup_bbstart,      // llvm.objc.arc.annotation.bottomup.bbstart
    objc_arc_annotation_topdown_bbend,         // llvm.objc.arc.annotation.topdown.bbend
    objc_arc_annotation_topdown_bbstart,       // llvm.objc.arc.annotation.topdown.bbstart
    objc_autorelease,                          // llvm.objc.autorelease
    objc_autoreleasePoolPop,                   // llvm.objc.autoreleasePoolPop
    objc_autoreleasePoolPush,                  // llvm.objc.autoreleasePoolPush
    objc_autoreleaseReturnValue,               // llvm.objc.autoreleaseReturnValue
    objc_clang_arc_noop_use,                   // llvm.objc.clang.arc.noop.use
    objc_clang_arc_use,                        // llvm.objc.clang.arc.use
    objc_copyWeak,                             // llvm.objc.copyWeak
    objc_destroyWeak,                          // llvm.objc.destroyWeak
    objc_initWeak,                             // llvm.objc.initWeak
    objc_loadWeak,                             // llvm.objc.loadWeak
    objc_loadWeakRetained,                     // llvm.objc.loadWeakRetained
    objc_moveWeak,                             // llvm.objc.moveWeak
    objc_release,                              // llvm.objc.release
    objc_retain,                               // llvm.objc.retain
    objc_retain_autorelease,                   // llvm.objc.retain.autorelease
    objc_retainAutorelease,                    // llvm.objc.retainAutorelease
    objc_retainAutoreleaseReturnValue,         // llvm.objc.retainAutoreleaseReturnValue
    objc_retainAutoreleasedReturnValue,        // llvm.objc.retainAutoreleasedReturnValue
    objc_retainBlock,                          // llvm.objc.retainBlock
    objc_retainedObject,                       // llvm.objc.retainedObject
    objc_storeStrong,                          // llvm.objc.storeStrong
    objc_storeWeak,                            // llvm.objc.storeWeak
    objc_sync_enter,                           // llvm.objc.sync.enter
    objc_sync_exit,                            // llvm.objc.sync.exit
    objc_unretainedObject,                     // llvm.objc.unretainedObject
    objc_unretainedPointer,                    // llvm.objc.unretainedPointer
    objc_unsafeClaimAutoreleasedReturnValue,   // llvm.objc.unsafeClaimAutoreleasedReturnValue
    objectsize,                                // llvm.objectsize
    pcmarker,                                  // llvm.pcmarker
    pow,                                       // llvm.pow
    powi,                                      // llvm.powi
    prefetch,                                  // llvm.prefetch
    preserve_array_access_index,               // llvm.preserve.array.access.index
    preserve_struct_access_index,              // llvm.preserve.struct.access.index
    preserve_union_access_index,               // llvm.preserve.union.access.index
    pseudoprobe,                               // llvm.pseudoprobe
    ptr_annotation,                            // llvm.ptr.annotation
    ptrauth_auth,                              // llvm.ptrauth.auth
    ptrauth_blend,                             // llvm.ptrauth.blend
    ptrauth_resign,                            // llvm.ptrauth.resign
    ptrauth_sign,                              // llvm.ptrauth.sign
    ptrauth_sign_generic,                      // llvm.ptrauth.sign.generic
    ptrauth_strip,                             // llvm.ptrauth.strip
    ptrmask,                                   // llvm.ptrmask
    public_type_test,                          // llvm.public.type.test
    read_register,                             // llvm.read_register
    read_volatile_register,                    // llvm.read_volatile_register
    readcyclecounter,                          // llvm.readcyclecounter
    reset_fpenv,                               // llvm.reset.fpenv
    returnaddress,                             // llvm.returnaddress
    rint,                                      // llvm.rint
    round,                                     // llvm.round
    roundeven,                                 // llvm.roundeven
    sadd_sat,                                  // llvm.sadd.sat
    sadd_with_overflow,                        // llvm.sadd.with.overflow
    sdiv_fix,                                  // llvm.sdiv.fix
    sdiv_fix_sat,                              // llvm.sdiv.fix.sat
    seh_scope_begin,                           // llvm.seh.scope.begin
    seh_scope_end,                             // llvm.seh.scope.end
    seh_try_begin,                             // llvm.seh.try.begin
    seh_try_end,                               // llvm.seh.try.end
    set_fpenv,                                 // llvm.set.fpenv
    set_loop_iterations,                       // llvm.set.loop.iterations
    set_rounding,                              // llvm.set.rounding
    sideeffect,                                // llvm.sideeffect
    sin,                                       // llvm.sin
    smax,                                      // llvm.smax
    smin,                                      // llvm.smin
    smul_fix,                                  // llvm.smul.fix
    smul_fix_sat,                              // llvm.smul.fix.sat
    smul_with_overflow,                        // llvm.smul.with.overflow
    sponentry,                                 // llvm.sponentry
    sqrt,                                      // llvm.sqrt
    ssa_copy,                                  // llvm.ssa.copy
    sshl_sat,                                  // llvm.sshl.sat
    ssub_sat,                                  // llvm.ssub.sat
    ssub_with_overflow,                        // llvm.ssub.with.overflow
    stackguard,                                // llvm.stackguard
    stackprotector,                            // llvm.stackprotector
    stackrestore,                              // llvm.stackrestore
    stacksave,                                 // llvm.stacksave
    start_loop_iterations,                     // llvm.start.loop.iterations
    strip_invariant_group,                     // llvm.strip.invariant.group
    swift_async_context_addr,                  // llvm.swift.async.context.addr
    test_set_loop_iterations,                  // llvm.test.set.loop.iterations
    test_start_loop_iterations,                // llvm.test.start.loop.iterations
    thread_pointer,                            // llvm.thread.pointer
    threadlocal_address,                       // llvm.threadlocal.address
    trap,                                      // llvm.trap
    trunc,                                     // llvm.trunc
    type_checked_load,                         // llvm.type.checked.load
    type_checked_load_relative,                // llvm.type.checked.load.relative
    type_test,                                 // llvm.type.test
    uadd_sat,                                  // llvm.uadd.sat
    uadd_with_overflow,                        // llvm.uadd.with.overflow
    ubsantrap,                                 // llvm.ubsantrap
    udiv_fix,                                  // llvm.udiv.fix
    udiv_fix_sat,                              // llvm.udiv.fix.sat
    umax,                                      // llvm.umax
    umin,                                      // llvm.umin
    umul_fix,                                  // llvm.umul.fix
    umul_fix_sat,                              // llvm.umul.fix.sat
    umul_with_overflow,                        // llvm.umul.with.overflow
    ushl_sat,                                  // llvm.ushl.sat
    usub_sat,                                  // llvm.usub.sat
    usub_with_overflow,                        // llvm.usub.with.overflow
    vacopy,                                    // llvm.va_copy
    vaend,                                     // llvm.va_end
    vastart,                                   // llvm.va_start
    var_annotation,                            // llvm.var.annotation
    vector_extract,                            // llvm.vector.extract
    vector_insert,                             // llvm.vector.insert
    vector_reduce_add,                         // llvm.vector.reduce.add
    vector_reduce_and,                         // llvm.vector.reduce.and
    vector_reduce_fadd,                        // llvm.vector.reduce.fadd
    vector_reduce_fmax,                        // llvm.vector.reduce.fmax
    vector_reduce_fmaximum,                    // llvm.vector.reduce.fmaximum
    vector_reduce_fmin,                        // llvm.vector.reduce.fmin
    vector_reduce_fminimum,                    // llvm.vector.reduce.fminimum
    vector_reduce_fmul,                        // llvm.vector.reduce.fmul
    vector_reduce_mul,                         // llvm.vector.reduce.mul
    vector_reduce_or,                          // llvm.vector.reduce.or
    vector_reduce_smax,                        // llvm.vector.reduce.smax
    vector_reduce_smin,                        // llvm.vector.reduce.smin
    vector_reduce_umax,                        // llvm.vector.reduce.umax
    vector_reduce_umin,                        // llvm.vector.reduce.umin
    vector_reduce_xor,                         // llvm.vector.reduce.xor
    vp_abs,                                    // llvm.vp.abs
    vp_add,                                    // llvm.vp.add
    vp_and,                                    // llvm.vp.and
    vp_ashr,                                   // llvm.vp.ashr
    vp_bitreverse,                             // llvm.vp.bitreverse
    vp_bswap,                                  // llvm.vp.bswap
    vp_ceil,                                   // llvm.vp.ceil
    vp_copysign,                               // llvm.vp.copysign
    vp_ctlz,                                   // llvm.vp.ctlz
    vp_ctpop,                                  // llvm.vp.ctpop
    vp_cttz,                                   // llvm.vp.cttz
    vp_fabs,                                   // llvm.vp.fabs
    vp_fadd,                                   // llvm.vp.fadd
    vp_fcmp,                                   // llvm.vp.fcmp
    vp_fdiv,                                   // llvm.vp.fdiv
    vp_floor,                                  // llvm.vp.floor
    vp_fma,                                    // llvm.vp.fma
    vp_fmul,                                   // llvm.vp.fmul
    vp_fmuladd,                                // llvm.vp.fmuladd
    vp_fneg,                                   // llvm.vp.fneg
    vp_fpext,                                  // llvm.vp.fpext
    vp_fptosi,                                 // llvm.vp.fptosi
    vp_fptoui,                                 // llvm.vp.fptoui
    vp_fptrunc,                                // llvm.vp.fptrunc
    vp_frem,                                   // llvm.vp.frem
    vp_fshl,                                   // llvm.vp.fshl
    vp_fshr,                                   // llvm.vp.fshr
    vp_fsub,                                   // llvm.vp.fsub
    vp_gather,                                 // llvm.vp.gather
    vp_icmp,                                   // llvm.vp.icmp
    vp_inttoptr,                               // llvm.vp.inttoptr
    vp_load,                                   // llvm.vp.load
    vp_lshr,                                   // llvm.vp.lshr
    vp_maxnum,                                 // llvm.vp.maxnum
    vp_merge,                                  // llvm.vp.merge
    vp_minnum,                                 // llvm.vp.minnum
    vp_mul,                                    // llvm.vp.mul
    vp_nearbyint,                              // llvm.vp.nearbyint
    vp_or,                                     // llvm.vp.or
    vp_ptrtoint,                               // llvm.vp.ptrtoint
    vp_reduce_add,                             // llvm.vp.reduce.add
    vp_reduce_and,                             // llvm.vp.reduce.and
    vp_reduce_fadd,                            // llvm.vp.reduce.fadd
    vp_reduce_fmax,                            // llvm.vp.reduce.fmax
    vp_reduce_fmin,                            // llvm.vp.reduce.fmin
    vp_reduce_fmul,                            // llvm.vp.reduce.fmul
    vp_reduce_mul,                             // llvm.vp.reduce.mul
    vp_reduce_or,                              // llvm.vp.reduce.or
    vp_reduce_smax,                            // llvm.vp.reduce.smax
    vp_reduce_smin,                            // llvm.vp.reduce.smin
    vp_reduce_umax,                            // llvm.vp.reduce.umax
    vp_reduce_umin,                            // llvm.vp.reduce.umin
    vp_reduce_xor,                             // llvm.vp.reduce.xor
    vp_rint,                                   // llvm.vp.rint
    vp_round,                                  // llvm.vp.round
    vp_roundeven,                              // llvm.vp.roundeven
    vp_roundtozero,                            // llvm.vp.roundtozero
    vp_scatter,                                // llvm.vp.scatter
    vp_sdiv,                                   // llvm.vp.sdiv
    vp_select,                                 // llvm.vp.select
    vp_sext,                                   // llvm.vp.sext
    vp_shl,                                    // llvm.vp.shl
    vp_sitofp,                                 // llvm.vp.sitofp
    vp_smax,                                   // llvm.vp.smax
    vp_smin,                                   // llvm.vp.smin
    vp_sqrt,                                   // llvm.vp.sqrt
    vp_srem,                                   // llvm.vp.srem
    vp_store,                                  // llvm.vp.store
    vp_sub,                                    // llvm.vp.sub
    vp_trunc,                                  // llvm.vp.trunc
    vp_udiv,                                   // llvm.vp.udiv
    vp_uitofp,                                 // llvm.vp.uitofp
    vp_umax,                                   // llvm.vp.umax
    vp_umin,                                   // llvm.vp.umin
    vp_urem,                                   // llvm.vp.urem
    vp_xor,                                    // llvm.vp.xor
    vp_zext,                                   // llvm.vp.zext
    vscale,                                    // llvm.vscale
    write_register,                            // llvm.write_register
    xray_customevent,                          // llvm.xray.customevent
    xray_typedevent,                           // llvm.xray.typedevent
    num_intrinsics = 11926
#endif

// llvm::Intrinsic::IITDescriptor::ArgKind
#ifdef GET_INTRINSIC_ARGKIND
    AK_Any = 0,
    AK_AnyInteger = 1,
    AK_AnyFloat = 2,
    AK_AnyVector = 3,
    AK_AnyPointer = 4,
    AK_MatchType = 7,
#endif

PKjwFZ�Z~�:H:HIR/LegacyPassManagers.hnu�[���//===- LegacyPassManagers.h - Legacy Pass Infrastructure --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the LLVM Pass Manager infrastructure.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_LEGACYPASSMANAGERS_H
#define LLVM_IR_LEGACYPASSMANAGERS_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Pass.h"
#include <vector>

//===----------------------------------------------------------------------===//
// Overview:
// The Pass Manager Infrastructure manages passes. It's responsibilities are:
//
//   o Manage optimization pass execution order
//   o Make required Analysis information available before pass P is run
//   o Release memory occupied by dead passes
//   o If Analysis information is dirtied by a pass then regenerate Analysis
//     information before it is consumed by another pass.
//
// Pass Manager Infrastructure uses multiple pass managers.  They are
// PassManager, FunctionPassManager, MPPassManager, FPPassManager, BBPassManager.
// This class hierarchy uses multiple inheritance but pass managers do not
// derive from another pass manager.
//
// PassManager and FunctionPassManager are two top-level pass manager that
// represents the external interface of this entire pass manager infrastucture.
//
// Important classes :
//
// [o] class PMTopLevelManager;
//
// Two top level managers, PassManager and FunctionPassManager, derive from
// PMTopLevelManager. PMTopLevelManager manages information used by top level
// managers such as last user info.
//
// [o] class PMDataManager;
//
// PMDataManager manages information, e.g. list of available analysis info,
// used by a pass manager to manage execution order of passes. It also provides
// a place to implement common pass manager APIs. All pass managers derive from
// PMDataManager.
//
// [o] class FunctionPassManager;
//
// This is a external interface used to manage FunctionPasses. This
// interface relies on FunctionPassManagerImpl to do all the tasks.
//
// [o] class FunctionPassManagerImpl : public ModulePass, PMDataManager,
//                                     public PMTopLevelManager;
//
// FunctionPassManagerImpl is a top level manager. It manages FPPassManagers
//
// [o] class FPPassManager : public ModulePass, public PMDataManager;
//
// FPPassManager manages FunctionPasses and BBPassManagers
//
// [o] class MPPassManager : public Pass, public PMDataManager;
//
// MPPassManager manages ModulePasses and FPPassManagers
//
// [o] class PassManager;
//
// This is a external interface used by various tools to manages passes. It
// relies on PassManagerImpl to do all the tasks.
//
// [o] class PassManagerImpl : public Pass, public PMDataManager,
//                             public PMTopLevelManager
//
// PassManagerImpl is a top level pass manager responsible for managing
// MPPassManagers.
//===----------------------------------------------------------------------===//

#include "llvm/Support/PrettyStackTrace.h"

namespace llvm {
template <typename T> class ArrayRef;
class Module;
class StringRef;
class Value;
class PMDataManager;

// enums for debugging strings
enum PassDebuggingString {
  EXECUTION_MSG, // "Executing Pass '" + PassName
  MODIFICATION_MSG, // "Made Modification '" + PassName
  FREEING_MSG, // " Freeing Pass '" + PassName
  ON_FUNCTION_MSG, // "' on Function '" + FunctionName + "'...\n"
  ON_MODULE_MSG, // "' on Module '" + ModuleName + "'...\n"
  ON_REGION_MSG, // "' on Region '" + Msg + "'...\n'"
  ON_LOOP_MSG, // "' on Loop '" + Msg + "'...\n'"
  ON_CG_MSG // "' on Call Graph Nodes '" + Msg + "'...\n'"
};

/// PassManagerPrettyStackEntry - This is used to print informative information
/// about what pass is running when/if a stack trace is generated.
class PassManagerPrettyStackEntry : public PrettyStackTraceEntry {
  Pass *P;
  Value *V;
  Module *M;

public:
  explicit PassManagerPrettyStackEntry(Pass *p)
    : P(p), V(nullptr), M(nullptr) {}  // When P is releaseMemory'd.
  PassManagerPrettyStackEntry(Pass *p, Value &v)
    : P(p), V(&v), M(nullptr) {} // When P is run on V
  PassManagerPrettyStackEntry(Pass *p, Module &m)
    : P(p), V(nullptr), M(&m) {} // When P is run on M

  /// print - Emit information about this stack frame to OS.
  void print(raw_ostream &OS) const override;
};

//===----------------------------------------------------------------------===//
// PMStack
//
/// PMStack - This class implements a stack data structure of PMDataManager
/// pointers.
///
/// Top level pass managers (see PassManager.cpp) maintain active Pass Managers
/// using PMStack. Each Pass implements assignPassManager() to connect itself
/// with appropriate manager. assignPassManager() walks PMStack to find
/// suitable manager.
class PMStack {
public:
  typedef std::vector<PMDataManager *>::const_reverse_iterator iterator;
  iterator begin() const { return S.rbegin(); }
  iterator end() const { return S.rend(); }

  void pop();
  PMDataManager *top() const { return S.back(); }
  void push(PMDataManager *PM);
  bool empty() const { return S.empty(); }

  void dump() const;

private:
  std::vector<PMDataManager *> S;
};

//===----------------------------------------------------------------------===//
// PMTopLevelManager
//
/// PMTopLevelManager manages LastUser info and collects common APIs used by
/// top level pass managers.
class PMTopLevelManager {
protected:
  explicit PMTopLevelManager(PMDataManager *PMDM);

  unsigned getNumContainedManagers() const {
    return (unsigned)PassManagers.size();
  }

  void initializeAllAnalysisInfo();

private:
  virtual PMDataManager *getAsPMDataManager() = 0;
  virtual PassManagerType getTopLevelPassManagerType() = 0;

public:
  /// Schedule pass P for execution. Make sure that passes required by
  /// P are run before P is run. Update analysis info maintained by
  /// the manager. Remove dead passes. This is a recursive function.
  void schedulePass(Pass *P);

  /// Set pass P as the last user of the given analysis passes.
  void setLastUser(ArrayRef<Pass*> AnalysisPasses, Pass *P);

  /// Collect passes whose last user is P
  void collectLastUses(SmallVectorImpl<Pass *> &LastUses, Pass *P);

  /// Find the pass that implements Analysis AID. Search immutable
  /// passes and all pass managers. If desired pass is not found
  /// then return NULL.
  Pass *findAnalysisPass(AnalysisID AID);

  /// Retrieve the PassInfo for an analysis.
  const PassInfo *findAnalysisPassInfo(AnalysisID AID) const;

  /// Find analysis usage information for the pass P.
  AnalysisUsage *findAnalysisUsage(Pass *P);

  virtual ~PMTopLevelManager();

  /// Add immutable pass and initialize it.
  void addImmutablePass(ImmutablePass *P);

  inline SmallVectorImpl<ImmutablePass *>& getImmutablePasses() {
    return ImmutablePasses;
  }

  void addPassManager(PMDataManager *Manager) {
    PassManagers.push_back(Manager);
  }

  // Add Manager into the list of managers that are not directly
  // maintained by this top level pass manager
  inline void addIndirectPassManager(PMDataManager *Manager) {
    IndirectPassManagers.push_back(Manager);
  }

  // Print passes managed by this top level manager.
  void dumpPasses() const;
  void dumpArguments() const;

  // Active Pass Managers
  PMStack activeStack;

protected:
  /// Collection of pass managers
  SmallVector<PMDataManager *, 8> PassManagers;

private:
  /// Collection of pass managers that are not directly maintained
  /// by this pass manager
  SmallVector<PMDataManager *, 8> IndirectPassManagers;

  // Map to keep track of last user of the analysis pass.
  // LastUser->second is the last user of Lastuser->first.
  // This is kept in sync with InversedLastUser.
  DenseMap<Pass *, Pass *> LastUser;

  // Map to keep track of passes that are last used by a pass.
  // This is kept in sync with LastUser.
  DenseMap<Pass *, SmallPtrSet<Pass *, 8> > InversedLastUser;

  /// Immutable passes are managed by top level manager.
  SmallVector<ImmutablePass *, 16> ImmutablePasses;

  /// Map from ID to immutable passes.
  SmallDenseMap<AnalysisID, ImmutablePass *, 8> ImmutablePassMap;


  /// A wrapper around AnalysisUsage for the purpose of uniqueing.  The wrapper
  /// is used to avoid needing to make AnalysisUsage itself a folding set node.
  struct AUFoldingSetNode : public FoldingSetNode {
    AnalysisUsage AU;
    AUFoldingSetNode(const AnalysisUsage &AU) : AU(AU) {}
    void Profile(FoldingSetNodeID &ID) const {
      Profile(ID, AU);
    }
    static void Profile(FoldingSetNodeID &ID, const AnalysisUsage &AU) {
      // TODO: We could consider sorting the dependency arrays within the
      // AnalysisUsage (since they are conceptually unordered).
      ID.AddBoolean(AU.getPreservesAll());
      auto ProfileVec = [&](const SmallVectorImpl<AnalysisID>& Vec) {
        ID.AddInteger(Vec.size());
        for(AnalysisID AID : Vec)
          ID.AddPointer(AID);
      };
      ProfileVec(AU.getRequiredSet());
      ProfileVec(AU.getRequiredTransitiveSet());
      ProfileVec(AU.getPreservedSet());
      ProfileVec(AU.getUsedSet());
    }
  };

  // Contains all of the unique combinations of AnalysisUsage.  This is helpful
  // when we have multiple instances of the same pass since they'll usually
  // have the same analysis usage and can share storage.
  FoldingSet<AUFoldingSetNode> UniqueAnalysisUsages;

  // Allocator used for allocating UAFoldingSetNodes.  This handles deletion of
  // all allocated nodes in one fell swoop.
  SpecificBumpPtrAllocator<AUFoldingSetNode> AUFoldingSetNodeAllocator;

  // Maps from a pass to it's associated entry in UniqueAnalysisUsages.  Does
  // not own the storage associated with either key or value..
  DenseMap<Pass *, AnalysisUsage*> AnUsageMap;

  /// Collection of PassInfo objects found via analysis IDs and in this top
  /// level manager. This is used to memoize queries to the pass registry.
  /// FIXME: This is an egregious hack because querying the pass registry is
  /// either slow or racy.
  mutable DenseMap<AnalysisID, const PassInfo *> AnalysisPassInfos;
};

//===----------------------------------------------------------------------===//
// PMDataManager

/// PMDataManager provides the common place to manage the analysis data
/// used by pass managers.
class PMDataManager {
public:
  explicit PMDataManager() { initializeAnalysisInfo(); }

  virtual ~PMDataManager();

  virtual Pass *getAsPass() = 0;

  /// Augment AvailableAnalysis by adding analysis made available by pass P.
  void recordAvailableAnalysis(Pass *P);

  /// verifyPreservedAnalysis -- Verify analysis presreved by pass P.
  void verifyPreservedAnalysis(Pass *P);

  /// Remove Analysis that is not preserved by the pass
  void removeNotPreservedAnalysis(Pass *P);

  /// Remove dead passes used by P.
  void removeDeadPasses(Pass *P, StringRef Msg,
                        enum PassDebuggingString);

  /// Remove P.
  void freePass(Pass *P, StringRef Msg,
                enum PassDebuggingString);

  /// Add pass P into the PassVector. Update
  /// AvailableAnalysis appropriately if ProcessAnalysis is true.
  void add(Pass *P, bool ProcessAnalysis = true);

  /// Add RequiredPass into list of lower level passes required by pass P.
  /// RequiredPass is run on the fly by Pass Manager when P requests it
  /// through getAnalysis interface.
  virtual void addLowerLevelRequiredPass(Pass *P, Pass *RequiredPass);

  virtual std::tuple<Pass *, bool> getOnTheFlyPass(Pass *P, AnalysisID PI,
                                                   Function &F);

  /// Initialize available analysis information.
  void initializeAnalysisInfo() {
    AvailableAnalysis.clear();
    for (auto &IA : InheritedAnalysis)
      IA = nullptr;
  }

  // Return true if P preserves high level analysis used by other
  // passes that are managed by this manager.
  bool preserveHigherLevelAnalysis(Pass *P);

  /// Populate UsedPasses with analysis pass that are used or required by pass
  /// P and are available. Populate ReqPassNotAvailable with analysis pass that
  /// are required by pass P but are not available.
  void collectRequiredAndUsedAnalyses(
      SmallVectorImpl<Pass *> &UsedPasses,
      SmallVectorImpl<AnalysisID> &ReqPassNotAvailable, Pass *P);

  /// All Required analyses should be available to the pass as it runs!  Here
  /// we fill in the AnalysisImpls member of the pass so that it can
  /// successfully use the getAnalysis() method to retrieve the
  /// implementations it needs.
  void initializeAnalysisImpl(Pass *P);

  /// Find the pass that implements Analysis AID. If desired pass is not found
  /// then return NULL.
  Pass *findAnalysisPass(AnalysisID AID, bool Direction);

  // Access toplevel manager
  PMTopLevelManager *getTopLevelManager() { return TPM; }
  void setTopLevelManager(PMTopLevelManager *T) { TPM = T; }

  unsigned getDepth() const { return Depth; }
  void setDepth(unsigned newDepth) { Depth = newDepth; }

  // Print routines used by debug-pass
  void dumpLastUses(Pass *P, unsigned Offset) const;
  void dumpPassArguments() const;
  void dumpPassInfo(Pass *P, enum PassDebuggingString S1,
                    enum PassDebuggingString S2, StringRef Msg);
  void dumpRequiredSet(const Pass *P) const;
  void dumpPreservedSet(const Pass *P) const;
  void dumpUsedSet(const Pass *P) const;

  unsigned getNumContainedPasses() const {
    return (unsigned)PassVector.size();
  }

  virtual PassManagerType getPassManagerType() const {
    assert ( 0 && "Invalid use of getPassManagerType");
    return PMT_Unknown;
  }

  DenseMap<AnalysisID, Pass*> *getAvailableAnalysis() {
    return &AvailableAnalysis;
  }

  // Collect AvailableAnalysis from all the active Pass Managers.
  void populateInheritedAnalysis(PMStack &PMS) {
    unsigned Index = 0;
    for (PMDataManager *PMDM : PMS)
      InheritedAnalysis[Index++] = PMDM->getAvailableAnalysis();
  }

  /// Set the initial size of the module if the user has specified that they
  /// want remarks for size.
  /// Returns 0 if the remark was not requested.
  unsigned initSizeRemarkInfo(
      Module &M,
      StringMap<std::pair<unsigned, unsigned>> &FunctionToInstrCount);

  /// Emit a remark signifying that the number of IR instructions in the module
  /// changed.
  /// \p F is optionally passed by passes which run on Functions, and thus
  /// always know whether or not a non-empty function is available.
  ///
  /// \p FunctionToInstrCount maps the name of a \p Function to a pair. The
  /// first member of the pair is the IR count of the \p Function before running
  /// \p P, and the second member is the IR count of the \p Function after
  /// running \p P.
  void emitInstrCountChangedRemark(
      Pass *P, Module &M, int64_t Delta, unsigned CountBefore,
      StringMap<std::pair<unsigned, unsigned>> &FunctionToInstrCount,
      Function *F = nullptr);

protected:
  // Top level manager.
  PMTopLevelManager *TPM = nullptr;

  // Collection of pass that are managed by this manager
  SmallVector<Pass *, 16> PassVector;

  // Collection of Analysis provided by Parent pass manager and
  // used by current pass manager. At at time there can not be more
  // then PMT_Last active pass mangers.
  DenseMap<AnalysisID, Pass *> *InheritedAnalysis[PMT_Last];

  /// isPassDebuggingExecutionsOrMore - Return true if -debug-pass=Executions
  /// or higher is specified.
  bool isPassDebuggingExecutionsOrMore() const;

private:
  void dumpAnalysisUsage(StringRef Msg, const Pass *P,
                         const AnalysisUsage::VectorType &Set) const;

  // Set of available Analysis. This information is used while scheduling
  // pass. If a pass requires an analysis which is not available then
  // the required analysis pass is scheduled to run before the pass itself is
  // scheduled to run.
  DenseMap<AnalysisID, Pass*> AvailableAnalysis;

  // Collection of higher level analysis used by the pass managed by
  // this manager.
  SmallVector<Pass *, 16> HigherLevelAnalysis;

  unsigned Depth = 0;
};

//===----------------------------------------------------------------------===//
// FPPassManager
//
/// FPPassManager manages BBPassManagers and FunctionPasses.
/// It batches all function passes and basic block pass managers together and
/// sequence them to process one function at a time before processing next
/// function.
class FPPassManager : public ModulePass, public PMDataManager {
public:
  static char ID;
  explicit FPPassManager() : ModulePass(ID) {}

  /// run - Execute all of the passes scheduled for execution.  Keep track of
  /// whether any of the passes modifies the module, and if so, return true.
  bool runOnFunction(Function &F);
  bool runOnModule(Module &M) override;

  /// cleanup - After running all passes, clean up pass manager cache.
  void cleanup();

  /// doInitialization - Overrides ModulePass doInitialization for global
  /// initialization tasks
  ///
  using ModulePass::doInitialization;

  /// doInitialization - Run all of the initializers for the function passes.
  ///
  bool doInitialization(Module &M) override;

  /// doFinalization - Overrides ModulePass doFinalization for global
  /// finalization tasks
  ///
  using ModulePass::doFinalization;

  /// doFinalization - Run all of the finalizers for the function passes.
  ///
  bool doFinalization(Module &M) override;

  PMDataManager *getAsPMDataManager() override { return this; }
  Pass *getAsPass() override { return this; }

  /// Pass Manager itself does not invalidate any analysis info.
  void getAnalysisUsage(AnalysisUsage &Info) const override {
    Info.setPreservesAll();
  }

  // Print passes managed by this manager
  void dumpPassStructure(unsigned Offset) override;

  StringRef getPassName() const override { return "Function Pass Manager"; }

  FunctionPass *getContainedPass(unsigned N) {
    assert ( N < PassVector.size() && "Pass number out of range!");
    FunctionPass *FP = static_cast<FunctionPass *>(PassVector[N]);
    return FP;
  }

  PassManagerType getPassManagerType() const override {
    return PMT_FunctionPassManager;
  }
};

}

#endif
PKjwFZ��Yv��IR/DIBuilder.hnu�[���//===- DIBuilder.h - Debug Information Builder ------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines a DIBuilder that is useful for creating debugging
// information entries in LLVM IR form.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_DIBUILDER_H
#define LLVM_IR_DIBUILDER_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/BinaryFormat/Dwarf.h"
#include "llvm/IR/DebugInfoMetadata.h"
#include "llvm/IR/TrackingMDRef.h"
#include "llvm/Support/Casting.h"
#include <algorithm>
#include <cstdint>
#include <optional>

namespace llvm {

  class BasicBlock;
  class Constant;
  class Function;
  class Instruction;
  class LLVMContext;
  class Module;
  class Value;
  class DbgAssignIntrinsic;

  class DIBuilder {
    Module &M;
    LLVMContext &VMContext;

    DICompileUnit *CUNode;   ///< The one compile unit created by this DIBuiler.
    Function *DeclareFn;     ///< llvm.dbg.declare
    Function *ValueFn;       ///< llvm.dbg.value
    Function *LabelFn;       ///< llvm.dbg.label
    Function *AssignFn;      ///< llvm.dbg.assign

    SmallVector<TrackingMDNodeRef, 4> AllEnumTypes;
    /// Track the RetainTypes, since they can be updated later on.
    SmallVector<TrackingMDNodeRef, 4> AllRetainTypes;
    SmallVector<DISubprogram *, 4> AllSubprograms;
    SmallVector<Metadata *, 4> AllGVs;
    SmallVector<TrackingMDNodeRef, 4> ImportedModules;
    /// Map Macro parent (which can be DIMacroFile or nullptr) to a list of
    /// Metadata all of type DIMacroNode.
    /// DIMacroNode's with nullptr parent are DICompileUnit direct children.
    MapVector<MDNode *, SetVector<Metadata *>> AllMacrosPerParent;

    /// Track nodes that may be unresolved.
    SmallVector<TrackingMDNodeRef, 4> UnresolvedNodes;
    bool AllowUnresolvedNodes;

    /// Each subprogram's preserved local variables, labels and imported
    /// entities.
    ///
    /// Do not use a std::vector.  Some versions of libc++ apparently copy
    /// instead of move on grow operations, and TrackingMDRef is expensive to
    /// copy.
    DenseMap<DISubprogram *, SmallVector<TrackingMDNodeRef, 4>>
        SubprogramTrackedNodes;

    SmallVectorImpl<TrackingMDNodeRef> &
    getImportTrackingVector(const DIScope *S) {
      return isa_and_nonnull<DILocalScope>(S)
                 ? getSubprogramNodesTrackingVector(S)
                 : ImportedModules;
    }
    SmallVectorImpl<TrackingMDNodeRef> &
    getSubprogramNodesTrackingVector(const DIScope *S) {
      return SubprogramTrackedNodes[cast<DILocalScope>(S)->getSubprogram()];
    }

    /// Create a temporary.
    ///
    /// Create an \a temporary node and track it in \a UnresolvedNodes.
    void trackIfUnresolved(MDNode *N);

    /// Internal helper for insertDeclare.
    Instruction *insertDeclare(llvm::Value *Storage, DILocalVariable *VarInfo,
                               DIExpression *Expr, const DILocation *DL,
                               BasicBlock *InsertBB, Instruction *InsertBefore);

    /// Internal helper for insertLabel.
    Instruction *insertLabel(DILabel *LabelInfo, const DILocation *DL,
                             BasicBlock *InsertBB, Instruction *InsertBefore);

    /// Internal helper with common code used by insertDbg{Value,Addr}Intrinsic.
    Instruction *insertDbgIntrinsic(llvm::Function *Intrinsic, llvm::Value *Val,
                                    DILocalVariable *VarInfo,
                                    DIExpression *Expr, const DILocation *DL,
                                    BasicBlock *InsertBB,
                                    Instruction *InsertBefore);

    /// Internal helper for insertDbgValueIntrinsic.
    Instruction *
    insertDbgValueIntrinsic(llvm::Value *Val, DILocalVariable *VarInfo,
                            DIExpression *Expr, const DILocation *DL,
                            BasicBlock *InsertBB, Instruction *InsertBefore);

  public:
    /// Construct a builder for a module.
    ///
    /// If \c AllowUnresolved, collect unresolved nodes attached to the module
    /// in order to resolve cycles during \a finalize().
    ///
    /// If \p CU is given a value other than nullptr, then set \p CUNode to CU.
    explicit DIBuilder(Module &M, bool AllowUnresolved = true,
                       DICompileUnit *CU = nullptr);
    DIBuilder(const DIBuilder &) = delete;
    DIBuilder &operator=(const DIBuilder &) = delete;

    /// Construct any deferred debug info descriptors.
    void finalize();

    /// Finalize a specific subprogram - no new variables may be added to this
    /// subprogram afterwards.
    void finalizeSubprogram(DISubprogram *SP);

    /// A CompileUnit provides an anchor for all debugging
    /// information generated during this instance of compilation.
    /// \param Lang          Source programming language, eg. dwarf::DW_LANG_C99
    /// \param File          File info.
    /// \param Producer      Identify the producer of debugging information
    ///                      and code.  Usually this is a compiler
    ///                      version string.
    /// \param isOptimized   A boolean flag which indicates whether optimization
    ///                      is enabled or not.
    /// \param Flags         This string lists command line options. This
    ///                      string is directly embedded in debug info
    ///                      output which may be used by a tool
    ///                      analyzing generated debugging information.
    /// \param RV            This indicates runtime version for languages like
    ///                      Objective-C.
    /// \param SplitName     The name of the file that we'll split debug info
    ///                      out into.
    /// \param Kind          The kind of debug information to generate.
    /// \param DWOId         The DWOId if this is a split skeleton compile unit.
    /// \param SplitDebugInlining    Whether to emit inline debug info.
    /// \param DebugInfoForProfiling Whether to emit extra debug info for
    ///                              profile collection.
    /// \param NameTableKind  Whether to emit .debug_gnu_pubnames,
    ///                      .debug_pubnames, or no pubnames at all.
    /// \param SysRoot       The clang system root (value of -isysroot).
    /// \param SDK           The SDK name. On Darwin, this is the last component
    ///                      of the sysroot.
    DICompileUnit *
    createCompileUnit(unsigned Lang, DIFile *File, StringRef Producer,
                      bool isOptimized, StringRef Flags, unsigned RV,
                      StringRef SplitName = StringRef(),
                      DICompileUnit::DebugEmissionKind Kind =
                          DICompileUnit::DebugEmissionKind::FullDebug,
                      uint64_t DWOId = 0, bool SplitDebugInlining = true,
                      bool DebugInfoForProfiling = false,
                      DICompileUnit::DebugNameTableKind NameTableKind =
                          DICompileUnit::DebugNameTableKind::Default,
                      bool RangesBaseAddress = false, StringRef SysRoot = {},
                      StringRef SDK = {});

    /// Create a file descriptor to hold debugging information for a file.
    /// \param Filename  File name.
    /// \param Directory Directory.
    /// \param Checksum  Optional checksum kind (e.g. CSK_MD5, CSK_SHA1, etc.)
    ///                  and value.
    /// \param Source    Optional source text.
    DIFile *createFile(
        StringRef Filename, StringRef Directory,
        std::optional<DIFile::ChecksumInfo<StringRef>> Checksum = std::nullopt,
        std::optional<StringRef> Source = std::nullopt);

    /// Create debugging information entry for a macro.
    /// \param Parent     Macro parent (could be nullptr).
    /// \param Line       Source line number where the macro is defined.
    /// \param MacroType  DW_MACINFO_define or DW_MACINFO_undef.
    /// \param Name       Macro name.
    /// \param Value      Macro value.
    DIMacro *createMacro(DIMacroFile *Parent, unsigned Line, unsigned MacroType,
                         StringRef Name, StringRef Value = StringRef());

    /// Create debugging information temporary entry for a macro file.
    /// List of macro node direct children will be calculated by DIBuilder,
    /// using the \p Parent relationship.
    /// \param Parent     Macro file parent (could be nullptr).
    /// \param Line       Source line number where the macro file is included.
    /// \param File       File descriptor containing the name of the macro file.
    DIMacroFile *createTempMacroFile(DIMacroFile *Parent, unsigned Line,
                                     DIFile *File);

    /// Create a single enumerator value.
    DIEnumerator *createEnumerator(StringRef Name, const APSInt &Value);
    DIEnumerator *createEnumerator(StringRef Name, uint64_t Val,
                                   bool IsUnsigned = false);

    /// Create a DWARF unspecified type.
    DIBasicType *createUnspecifiedType(StringRef Name);

    /// Create C++11 nullptr type.
    DIBasicType *createNullPtrType();

    /// Create debugging information entry for a basic
    /// type.
    /// \param Name        Type name.
    /// \param SizeInBits  Size of the type.
    /// \param Encoding    DWARF encoding code, e.g., dwarf::DW_ATE_float.
    /// \param Flags       Optional DWARF attributes, e.g., DW_AT_endianity.
    DIBasicType *createBasicType(StringRef Name, uint64_t SizeInBits,
                                 unsigned Encoding,
                                 DINode::DIFlags Flags = DINode::FlagZero);

    /// Create debugging information entry for a string
    /// type.
    /// \param Name        Type name.
    /// \param SizeInBits  Size of the type.
    DIStringType *createStringType(StringRef Name, uint64_t SizeInBits);

    /// Create debugging information entry for Fortran
    /// assumed length string type.
    /// \param Name            Type name.
    /// \param StringLength    String length expressed as DIVariable *.
    /// \param StrLocationExp  Optional memory location of the string.
    DIStringType *createStringType(StringRef Name, DIVariable *StringLength,
                                   DIExpression *StrLocationExp = nullptr);

    /// Create debugging information entry for Fortran
    /// assumed length string type.
    /// \param Name             Type name.
    /// \param StringLengthExp  String length expressed in DIExpression form.
    /// \param StrLocationExp   Optional memory location of the string.
    DIStringType *createStringType(StringRef Name,
                                   DIExpression *StringLengthExp,
                                   DIExpression *StrLocationExp = nullptr);

    /// Create debugging information entry for a qualified
    /// type, e.g. 'const int'.
    /// \param Tag         Tag identifing type, e.g. dwarf::TAG_volatile_type
    /// \param FromTy      Base Type.
    DIDerivedType *createQualifiedType(unsigned Tag, DIType *FromTy);

    /// Create debugging information entry for a pointer.
    /// \param PointeeTy         Type pointed by this pointer.
    /// \param SizeInBits        Size.
    /// \param AlignInBits       Alignment. (optional)
    /// \param DWARFAddressSpace DWARF address space. (optional)
    /// \param Name              Pointer type name. (optional)
    /// \param Annotations       Member annotations.
    DIDerivedType *
    createPointerType(DIType *PointeeTy, uint64_t SizeInBits,
                      uint32_t AlignInBits = 0,
                      std::optional<unsigned> DWARFAddressSpace = std::nullopt,
                      StringRef Name = "", DINodeArray Annotations = nullptr);

    /// Create debugging information entry for a pointer to member.
    /// \param PointeeTy Type pointed to by this pointer.
    /// \param SizeInBits  Size.
    /// \param AlignInBits Alignment. (optional)
    /// \param Class Type for which this pointer points to members of.
    DIDerivedType *
    createMemberPointerType(DIType *PointeeTy, DIType *Class,
                            uint64_t SizeInBits, uint32_t AlignInBits = 0,
                            DINode::DIFlags Flags = DINode::FlagZero);

    /// Create debugging information entry for a c++
    /// style reference or rvalue reference type.
    DIDerivedType *createReferenceType(
        unsigned Tag, DIType *RTy, uint64_t SizeInBits = 0,
        uint32_t AlignInBits = 0,
        std::optional<unsigned> DWARFAddressSpace = std::nullopt);

    /// Create debugging information entry for a typedef.
    /// \param Ty          Original type.
    /// \param Name        Typedef name.
    /// \param File        File where this type is defined.
    /// \param LineNo      Line number.
    /// \param Context     The surrounding context for the typedef.
    /// \param AlignInBits Alignment. (optional)
    /// \param Flags       Flags to describe inheritance attribute, e.g. private
    /// \param Annotations Annotations. (optional)
    DIDerivedType *createTypedef(DIType *Ty, StringRef Name, DIFile *File,
                                 unsigned LineNo, DIScope *Context,
                                 uint32_t AlignInBits = 0,
                                 DINode::DIFlags Flags = DINode::FlagZero,
                                 DINodeArray Annotations = nullptr);

    /// Create debugging information entry for a 'friend'.
    DIDerivedType *createFriend(DIType *Ty, DIType *FriendTy);

    /// Create debugging information entry to establish
    /// inheritance relationship between two types.
    /// \param Ty           Original type.
    /// \param BaseTy       Base type. Ty is inherits from base.
    /// \param BaseOffset   Base offset.
    /// \param VBPtrOffset  Virtual base pointer offset.
    /// \param Flags        Flags to describe inheritance attribute,
    ///                     e.g. private
    DIDerivedType *createInheritance(DIType *Ty, DIType *BaseTy,
                                     uint64_t BaseOffset, uint32_t VBPtrOffset,
                                     DINode::DIFlags Flags);

    /// Create debugging information entry for a member.
    /// \param Scope        Member scope.
    /// \param Name         Member name.
    /// \param File         File where this member is defined.
    /// \param LineNo       Line number.
    /// \param SizeInBits   Member size.
    /// \param AlignInBits  Member alignment.
    /// \param OffsetInBits Member offset.
    /// \param Flags        Flags to encode member attribute, e.g. private
    /// \param Ty           Parent type.
    /// \param Annotations  Member annotations.
    DIDerivedType *createMemberType(DIScope *Scope, StringRef Name,
                                    DIFile *File, unsigned LineNo,
                                    uint64_t SizeInBits, uint32_t AlignInBits,
                                    uint64_t OffsetInBits,
                                    DINode::DIFlags Flags, DIType *Ty,
                                    DINodeArray Annotations = nullptr);

    /// Create debugging information entry for a variant.  A variant
    /// normally should be a member of a variant part.
    /// \param Scope        Member scope.
    /// \param Name         Member name.
    /// \param File         File where this member is defined.
    /// \param LineNo       Line number.
    /// \param SizeInBits   Member size.
    /// \param AlignInBits  Member alignment.
    /// \param OffsetInBits Member offset.
    /// \param Flags        Flags to encode member attribute, e.g. private
    /// \param Discriminant The discriminant for this branch; null for
    ///                     the default branch
    /// \param Ty           Parent type.
    DIDerivedType *createVariantMemberType(DIScope *Scope, StringRef Name,
					   DIFile *File, unsigned LineNo,
					   uint64_t SizeInBits,
					   uint32_t AlignInBits,
					   uint64_t OffsetInBits,
					   Constant *Discriminant,
					   DINode::DIFlags Flags, DIType *Ty);

    /// Create debugging information entry for a bit field member.
    /// \param Scope               Member scope.
    /// \param Name                Member name.
    /// \param File                File where this member is defined.
    /// \param LineNo              Line number.
    /// \param SizeInBits          Member size.
    /// \param OffsetInBits        Member offset.
    /// \param StorageOffsetInBits Member storage offset.
    /// \param Flags               Flags to encode member attribute.
    /// \param Ty                  Parent type.
    /// \param Annotations         Member annotations.
    DIDerivedType *createBitFieldMemberType(DIScope *Scope, StringRef Name,
                                            DIFile *File, unsigned LineNo,
                                            uint64_t SizeInBits,
                                            uint64_t OffsetInBits,
                                            uint64_t StorageOffsetInBits,
                                            DINode::DIFlags Flags, DIType *Ty,
                                            DINodeArray Annotations = nullptr);

    /// Create debugging information entry for a
    /// C++ static data member.
    /// \param Scope      Member scope.
    /// \param Name       Member name.
    /// \param File       File where this member is declared.
    /// \param LineNo     Line number.
    /// \param Ty         Type of the static member.
    /// \param Flags      Flags to encode member attribute, e.g. private.
    /// \param Val        Const initializer of the member.
    /// \param AlignInBits  Member alignment.
    DIDerivedType *createStaticMemberType(DIScope *Scope, StringRef Name,
                                          DIFile *File, unsigned LineNo,
                                          DIType *Ty, DINode::DIFlags Flags,
                                          Constant *Val,
                                          uint32_t AlignInBits = 0);

    /// Create debugging information entry for Objective-C
    /// instance variable.
    /// \param Name         Member name.
    /// \param File         File where this member is defined.
    /// \param LineNo       Line number.
    /// \param SizeInBits   Member size.
    /// \param AlignInBits  Member alignment.
    /// \param OffsetInBits Member offset.
    /// \param Flags        Flags to encode member attribute, e.g. private
    /// \param Ty           Parent type.
    /// \param PropertyNode Property associated with this ivar.
    DIDerivedType *createObjCIVar(StringRef Name, DIFile *File, unsigned LineNo,
                                  uint64_t SizeInBits, uint32_t AlignInBits,
                                  uint64_t OffsetInBits, DINode::DIFlags Flags,
                                  DIType *Ty, MDNode *PropertyNode);

    /// Create debugging information entry for Objective-C
    /// property.
    /// \param Name         Property name.
    /// \param File         File where this property is defined.
    /// \param LineNumber   Line number.
    /// \param GetterName   Name of the Objective C property getter selector.
    /// \param SetterName   Name of the Objective C property setter selector.
    /// \param PropertyAttributes Objective C property attributes.
    /// \param Ty           Type.
    DIObjCProperty *createObjCProperty(StringRef Name, DIFile *File,
                                       unsigned LineNumber,
                                       StringRef GetterName,
                                       StringRef SetterName,
                                       unsigned PropertyAttributes, DIType *Ty);

    /// Create debugging information entry for a class.
    /// \param Scope        Scope in which this class is defined.
    /// \param Name         class name.
    /// \param File         File where this member is defined.
    /// \param LineNumber   Line number.
    /// \param SizeInBits   Member size.
    /// \param AlignInBits  Member alignment.
    /// \param OffsetInBits Member offset.
    /// \param Flags        Flags to encode member attribute, e.g. private
    /// \param Elements     class members.
    /// \param VTableHolder Debug info of the base class that contains vtable
    ///                     for this type. This is used in
    ///                     DW_AT_containing_type. See DWARF documentation
    ///                     for more info.
    /// \param TemplateParms Template type parameters.
    /// \param UniqueIdentifier A unique identifier for the class.
    DICompositeType *createClassType(
        DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNumber,
        uint64_t SizeInBits, uint32_t AlignInBits, uint64_t OffsetInBits,
        DINode::DIFlags Flags, DIType *DerivedFrom, DINodeArray Elements,
        DIType *VTableHolder = nullptr, MDNode *TemplateParms = nullptr,
        StringRef UniqueIdentifier = "");

    /// Create debugging information entry for a struct.
    /// \param Scope        Scope in which this struct is defined.
    /// \param Name         Struct name.
    /// \param File         File where this member is defined.
    /// \param LineNumber   Line number.
    /// \param SizeInBits   Member size.
    /// \param AlignInBits  Member alignment.
    /// \param Flags        Flags to encode member attribute, e.g. private
    /// \param Elements     Struct elements.
    /// \param RunTimeLang  Optional parameter, Objective-C runtime version.
    /// \param UniqueIdentifier A unique identifier for the struct.
    DICompositeType *createStructType(
        DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNumber,
        uint64_t SizeInBits, uint32_t AlignInBits, DINode::DIFlags Flags,
        DIType *DerivedFrom, DINodeArray Elements, unsigned RunTimeLang = 0,
        DIType *VTableHolder = nullptr, StringRef UniqueIdentifier = "");

    /// Create debugging information entry for an union.
    /// \param Scope        Scope in which this union is defined.
    /// \param Name         Union name.
    /// \param File         File where this member is defined.
    /// \param LineNumber   Line number.
    /// \param SizeInBits   Member size.
    /// \param AlignInBits  Member alignment.
    /// \param Flags        Flags to encode member attribute, e.g. private
    /// \param Elements     Union elements.
    /// \param RunTimeLang  Optional parameter, Objective-C runtime version.
    /// \param UniqueIdentifier A unique identifier for the union.
    DICompositeType *createUnionType(DIScope *Scope, StringRef Name,
                                     DIFile *File, unsigned LineNumber,
                                     uint64_t SizeInBits, uint32_t AlignInBits,
                                     DINode::DIFlags Flags,
                                     DINodeArray Elements,
                                     unsigned RunTimeLang = 0,
                                     StringRef UniqueIdentifier = "");

    /// Create debugging information entry for a variant part.  A
    /// variant part normally has a discriminator (though this is not
    /// required) and a number of variant children.
    /// \param Scope        Scope in which this union is defined.
    /// \param Name         Union name.
    /// \param File         File where this member is defined.
    /// \param LineNumber   Line number.
    /// \param SizeInBits   Member size.
    /// \param AlignInBits  Member alignment.
    /// \param Flags        Flags to encode member attribute, e.g. private
    /// \param Discriminator Discriminant member
    /// \param Elements     Variant elements.
    /// \param UniqueIdentifier A unique identifier for the union.
    DICompositeType *createVariantPart(DIScope *Scope, StringRef Name,
				       DIFile *File, unsigned LineNumber,
				       uint64_t SizeInBits, uint32_t AlignInBits,
				       DINode::DIFlags Flags,
				       DIDerivedType *Discriminator,
				       DINodeArray Elements,
				       StringRef UniqueIdentifier = "");

    /// Create debugging information for template
    /// type parameter.
    /// \param Scope        Scope in which this type is defined.
    /// \param Name         Type parameter name.
    /// \param Ty           Parameter type.
    /// \param IsDefault    Parameter is default or not
    DITemplateTypeParameter *createTemplateTypeParameter(DIScope *Scope,
                                                         StringRef Name,
                                                         DIType *Ty,
                                                         bool IsDefault);

    /// Create debugging information for template
    /// value parameter.
    /// \param Scope        Scope in which this type is defined.
    /// \param Name         Value parameter name.
    /// \param Ty           Parameter type.
    /// \param IsDefault    Parameter is default or not
    /// \param Val          Constant parameter value.
    DITemplateValueParameter *
    createTemplateValueParameter(DIScope *Scope, StringRef Name, DIType *Ty,
                                 bool IsDefault, Constant *Val);

    /// Create debugging information for a template template parameter.
    /// \param Scope        Scope in which this type is defined.
    /// \param Name         Value parameter name.
    /// \param Ty           Parameter type.
    /// \param Val          The fully qualified name of the template.
    /// \param IsDefault    Parameter is default or not.
    DITemplateValueParameter *
    createTemplateTemplateParameter(DIScope *Scope, StringRef Name, DIType *Ty,
                                    StringRef Val, bool IsDefault = false);

    /// Create debugging information for a template parameter pack.
    /// \param Scope        Scope in which this type is defined.
    /// \param Name         Value parameter name.
    /// \param Ty           Parameter type.
    /// \param Val          An array of types in the pack.
    DITemplateValueParameter *createTemplateParameterPack(DIScope *Scope,
                                                          StringRef Name,
                                                          DIType *Ty,
                                                          DINodeArray Val);

    /// Create debugging information entry for an array.
    /// \param Size         Array size.
    /// \param AlignInBits  Alignment.
    /// \param Ty           Element type.
    /// \param Subscripts   Subscripts.
    /// \param DataLocation The location of the raw data of a descriptor-based
    ///                     Fortran array, either a DIExpression* or
    ///                     a DIVariable*.
    /// \param Associated   The associated attribute of a descriptor-based
    ///                     Fortran array, either a DIExpression* or
    ///                     a DIVariable*.
    /// \param Allocated    The allocated attribute of a descriptor-based
    ///                     Fortran array, either a DIExpression* or
    ///                     a DIVariable*.
    /// \param Rank         The rank attribute of a descriptor-based
    ///                     Fortran array, either a DIExpression* or
    ///                     a DIVariable*.
    DICompositeType *createArrayType(
        uint64_t Size, uint32_t AlignInBits, DIType *Ty, DINodeArray Subscripts,
        PointerUnion<DIExpression *, DIVariable *> DataLocation = nullptr,
        PointerUnion<DIExpression *, DIVariable *> Associated = nullptr,
        PointerUnion<DIExpression *, DIVariable *> Allocated = nullptr,
        PointerUnion<DIExpression *, DIVariable *> Rank = nullptr);

    /// Create debugging information entry for a vector type.
    /// \param Size         Array size.
    /// \param AlignInBits  Alignment.
    /// \param Ty           Element type.
    /// \param Subscripts   Subscripts.
    DICompositeType *createVectorType(uint64_t Size, uint32_t AlignInBits,
                                      DIType *Ty, DINodeArray Subscripts);

    /// Create debugging information entry for an
    /// enumeration.
    /// \param Scope          Scope in which this enumeration is defined.
    /// \param Name           Union name.
    /// \param File           File where this member is defined.
    /// \param LineNumber     Line number.
    /// \param SizeInBits     Member size.
    /// \param AlignInBits    Member alignment.
    /// \param Elements       Enumeration elements.
    /// \param UnderlyingType Underlying type of a C++11/ObjC fixed enum.
    /// \param UniqueIdentifier A unique identifier for the enum.
    /// \param IsScoped Boolean flag indicate if this is C++11/ObjC 'enum class'.
    DICompositeType *createEnumerationType(
        DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNumber,
        uint64_t SizeInBits, uint32_t AlignInBits, DINodeArray Elements,
        DIType *UnderlyingType, StringRef UniqueIdentifier = "", bool IsScoped = false);

    /// Create debugging information entry for a set.
    /// \param Scope          Scope in which this set is defined.
    /// \param Name           Set name.
    /// \param File           File where this set is defined.
    /// \param LineNo         Line number.
    /// \param SizeInBits     Set size.
    /// \param AlignInBits    Set alignment.
    /// \param Ty             Base type of the set.
    DIDerivedType *createSetType(DIScope *Scope, StringRef Name, DIFile *File,
                                 unsigned LineNo, uint64_t SizeInBits,
                                 uint32_t AlignInBits, DIType *Ty);

    /// Create subroutine type.
    /// \param ParameterTypes  An array of subroutine parameter types. This
    ///                        includes return type at 0th index.
    /// \param Flags           E.g.: LValueReference.
    ///                        These flags are used to emit dwarf attributes.
    /// \param CC              Calling convention, e.g. dwarf::DW_CC_normal
    DISubroutineType *
    createSubroutineType(DITypeRefArray ParameterTypes,
                         DINode::DIFlags Flags = DINode::FlagZero,
                         unsigned CC = 0);

    /// Create a distinct clone of \p SP with FlagArtificial set.
    static DISubprogram *createArtificialSubprogram(DISubprogram *SP);

    /// Create a uniqued clone of \p Ty with FlagArtificial set.
    static DIType *createArtificialType(DIType *Ty);

    /// Create a uniqued clone of \p Ty with FlagObjectPointer and
    /// FlagArtificial set.
    static DIType *createObjectPointerType(DIType *Ty);

    /// Create a permanent forward-declared type.
    DICompositeType *createForwardDecl(unsigned Tag, StringRef Name,
                                       DIScope *Scope, DIFile *F, unsigned Line,
                                       unsigned RuntimeLang = 0,
                                       uint64_t SizeInBits = 0,
                                       uint32_t AlignInBits = 0,
                                       StringRef UniqueIdentifier = "");

    /// Create a temporary forward-declared type.
    DICompositeType *createReplaceableCompositeType(
        unsigned Tag, StringRef Name, DIScope *Scope, DIFile *F, unsigned Line,
        unsigned RuntimeLang = 0, uint64_t SizeInBits = 0,
        uint32_t AlignInBits = 0, DINode::DIFlags Flags = DINode::FlagFwdDecl,
        StringRef UniqueIdentifier = "", DINodeArray Annotations = nullptr);

    /// Retain DIScope* in a module even if it is not referenced
    /// through debug info anchors.
    void retainType(DIScope *T);

    /// Create unspecified parameter type
    /// for a subroutine type.
    DIBasicType *createUnspecifiedParameter();

    /// Get a DINodeArray, create one if required.
    DINodeArray getOrCreateArray(ArrayRef<Metadata *> Elements);

    /// Get a DIMacroNodeArray, create one if required.
    DIMacroNodeArray getOrCreateMacroArray(ArrayRef<Metadata *> Elements);

    /// Get a DITypeRefArray, create one if required.
    DITypeRefArray getOrCreateTypeArray(ArrayRef<Metadata *> Elements);

    /// Create a descriptor for a value range.  This
    /// implicitly uniques the values returned.
    DISubrange *getOrCreateSubrange(int64_t Lo, int64_t Count);
    DISubrange *getOrCreateSubrange(int64_t Lo, Metadata *CountNode);
    DISubrange *getOrCreateSubrange(Metadata *Count, Metadata *LowerBound,
                                    Metadata *UpperBound, Metadata *Stride);

    DIGenericSubrange *
    getOrCreateGenericSubrange(DIGenericSubrange::BoundType Count,
                               DIGenericSubrange::BoundType LowerBound,
                               DIGenericSubrange::BoundType UpperBound,
                               DIGenericSubrange::BoundType Stride);

    /// Create a new descriptor for the specified variable.
    /// \param Context     Variable scope.
    /// \param Name        Name of the variable.
    /// \param LinkageName Mangled  name of the variable.
    /// \param File        File where this variable is defined.
    /// \param LineNo      Line number.
    /// \param Ty          Variable Type.
    /// \param IsLocalToUnit Boolean flag indicate whether this variable is
    ///                      externally visible or not.
    /// \param Expr        The location of the global relative to the attached
    ///                    GlobalVariable.
    /// \param Decl        Reference to the corresponding declaration.
    /// \param AlignInBits Variable alignment(or 0 if no alignment attr was
    ///                    specified)
    DIGlobalVariableExpression *createGlobalVariableExpression(
        DIScope *Context, StringRef Name, StringRef LinkageName, DIFile *File,
        unsigned LineNo, DIType *Ty, bool IsLocalToUnit, bool isDefined = true,
        DIExpression *Expr = nullptr, MDNode *Decl = nullptr,
        MDTuple *TemplateParams = nullptr, uint32_t AlignInBits = 0,
        DINodeArray Annotations = nullptr);

    /// Identical to createGlobalVariable
    /// except that the resulting DbgNode is temporary and meant to be RAUWed.
    DIGlobalVariable *createTempGlobalVariableFwdDecl(
        DIScope *Context, StringRef Name, StringRef LinkageName, DIFile *File,
        unsigned LineNo, DIType *Ty, bool IsLocalToUnit, MDNode *Decl = nullptr,
        MDTuple *TemplateParams = nullptr, uint32_t AlignInBits = 0);

    /// Create a new descriptor for an auto variable.  This is a local variable
    /// that is not a subprogram parameter.
    ///
    /// \c Scope must be a \a DILocalScope, and thus its scope chain eventually
    /// leads to a \a DISubprogram.
    ///
    /// If \c AlwaysPreserve, this variable will be referenced from its
    /// containing subprogram, and will survive some optimizations.
    DILocalVariable *
    createAutoVariable(DIScope *Scope, StringRef Name, DIFile *File,
                       unsigned LineNo, DIType *Ty, bool AlwaysPreserve = false,
                       DINode::DIFlags Flags = DINode::FlagZero,
                       uint32_t AlignInBits = 0);

    /// Create a new descriptor for an label.
    ///
    /// \c Scope must be a \a DILocalScope, and thus its scope chain eventually
    /// leads to a \a DISubprogram.
    DILabel *
    createLabel(DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNo,
                bool AlwaysPreserve = false);

    /// Create a new descriptor for a parameter variable.
    ///
    /// \c Scope must be a \a DILocalScope, and thus its scope chain eventually
    /// leads to a \a DISubprogram.
    ///
    /// \c ArgNo is the index (starting from \c 1) of this variable in the
    /// subprogram parameters.  \c ArgNo should not conflict with other
    /// parameters of the same subprogram.
    ///
    /// If \c AlwaysPreserve, this variable will be referenced from its
    /// containing subprogram, and will survive some optimizations.
    DILocalVariable *
    createParameterVariable(DIScope *Scope, StringRef Name, unsigned ArgNo,
                            DIFile *File, unsigned LineNo, DIType *Ty,
                            bool AlwaysPreserve = false,
                            DINode::DIFlags Flags = DINode::FlagZero,
                            DINodeArray Annotations = nullptr);

    /// Create a new descriptor for the specified
    /// variable which has a complex address expression for its address.
    /// \param Addr        An array of complex address operations.
    DIExpression *createExpression(ArrayRef<uint64_t> Addr = std::nullopt);

    /// Create an expression for a variable that does not have an address, but
    /// does have a constant value.
    DIExpression *createConstantValueExpression(uint64_t Val) {
      return DIExpression::get(
          VMContext, {dwarf::DW_OP_constu, Val, dwarf::DW_OP_stack_value});
    }

    /// Create a new descriptor for the specified subprogram.
    /// See comments in DISubprogram* for descriptions of these fields.
    /// \param Scope         Function scope.
    /// \param Name          Function name.
    /// \param LinkageName   Mangled function name.
    /// \param File          File where this variable is defined.
    /// \param LineNo        Line number.
    /// \param Ty            Function type.
    /// \param ScopeLine     Set to the beginning of the scope this starts
    /// \param Flags         e.g. is this function prototyped or not.
    ///                      These flags are used to emit dwarf attributes.
    /// \param SPFlags       Additional flags specific to subprograms.
    /// \param TParams       Function template parameters.
    /// \param ThrownTypes   Exception types this function may throw.
    /// \param Annotations   Attribute Annotations.
    /// \param TargetFuncName The name of the target function if this is
    ///                       a trampoline.
    DISubprogram *
    createFunction(DIScope *Scope, StringRef Name, StringRef LinkageName,
                   DIFile *File, unsigned LineNo, DISubroutineType *Ty,
                   unsigned ScopeLine, DINode::DIFlags Flags = DINode::FlagZero,
                   DISubprogram::DISPFlags SPFlags = DISubprogram::SPFlagZero,
                   DITemplateParameterArray TParams = nullptr,
                   DISubprogram *Decl = nullptr,
                   DITypeArray ThrownTypes = nullptr,
                   DINodeArray Annotations = nullptr,
                   StringRef TargetFuncName = "");

    /// Identical to createFunction,
    /// except that the resulting DbgNode is meant to be RAUWed.
    DISubprogram *createTempFunctionFwdDecl(
        DIScope *Scope, StringRef Name, StringRef LinkageName, DIFile *File,
        unsigned LineNo, DISubroutineType *Ty, unsigned ScopeLine,
        DINode::DIFlags Flags = DINode::FlagZero,
        DISubprogram::DISPFlags SPFlags = DISubprogram::SPFlagZero,
        DITemplateParameterArray TParams = nullptr,
        DISubprogram *Decl = nullptr, DITypeArray ThrownTypes = nullptr);

    /// Create a new descriptor for the specified C++ method.
    /// See comments in \a DISubprogram* for descriptions of these fields.
    /// \param Scope         Function scope.
    /// \param Name          Function name.
    /// \param LinkageName   Mangled function name.
    /// \param File          File where this variable is defined.
    /// \param LineNo        Line number.
    /// \param Ty            Function type.
    /// \param VTableIndex   Index no of this method in virtual table, or -1u if
    ///                      unrepresentable.
    /// \param ThisAdjustment
    ///                      MS ABI-specific adjustment of 'this' that occurs
    ///                      in the prologue.
    /// \param VTableHolder  Type that holds vtable.
    /// \param Flags         e.g. is this function prototyped or not.
    ///                      This flags are used to emit dwarf attributes.
    /// \param SPFlags       Additional flags specific to subprograms.
    /// \param TParams       Function template parameters.
    /// \param ThrownTypes   Exception types this function may throw.
    DISubprogram *
    createMethod(DIScope *Scope, StringRef Name, StringRef LinkageName,
                 DIFile *File, unsigned LineNo, DISubroutineType *Ty,
                 unsigned VTableIndex = 0, int ThisAdjustment = 0,
                 DIType *VTableHolder = nullptr,
                 DINode::DIFlags Flags = DINode::FlagZero,
                 DISubprogram::DISPFlags SPFlags = DISubprogram::SPFlagZero,
                 DITemplateParameterArray TParams = nullptr,
                 DITypeArray ThrownTypes = nullptr);

    /// Create common block entry for a Fortran common block.
    /// \param Scope       Scope of this common block.
    /// \param decl        Global variable declaration.
    /// \param Name        The name of this common block.
    /// \param File        The file this common block is defined.
    /// \param LineNo      Line number.
    DICommonBlock *createCommonBlock(DIScope *Scope, DIGlobalVariable *decl,
                                     StringRef Name, DIFile *File,
                                     unsigned LineNo);

    /// This creates new descriptor for a namespace with the specified
    /// parent scope.
    /// \param Scope       Namespace scope
    /// \param Name        Name of this namespace
    /// \param ExportSymbols True for C++ inline namespaces.
    DINamespace *createNameSpace(DIScope *Scope, StringRef Name,
                                 bool ExportSymbols);

    /// This creates new descriptor for a module with the specified
    /// parent scope.
    /// \param Scope       Parent scope
    /// \param Name        Name of this module
    /// \param ConfigurationMacros
    ///                    A space-separated shell-quoted list of -D macro
    ///                    definitions as they would appear on a command line.
    /// \param IncludePath The path to the module map file.
    /// \param APINotesFile The path to an API notes file for this module.
    /// \param File        Source file of the module.
    ///                    Used for Fortran modules.
    /// \param LineNo      Source line number of the module.
    ///                    Used for Fortran modules.
    /// \param IsDecl      This is a module declaration; default to false;
    ///                    when set to true, only Scope and Name are required
    ///                    as this entry is just a hint for the debugger to find
    ///                    the corresponding definition in the global scope.
    DIModule *createModule(DIScope *Scope, StringRef Name,
                           StringRef ConfigurationMacros, StringRef IncludePath,
                           StringRef APINotesFile = {}, DIFile *File = nullptr,
                           unsigned LineNo = 0, bool IsDecl = false);

    /// This creates a descriptor for a lexical block with a new file
    /// attached. This merely extends the existing
    /// lexical block as it crosses a file.
    /// \param Scope       Lexical block.
    /// \param File        Source file.
    /// \param Discriminator DWARF path discriminator value.
    DILexicalBlockFile *createLexicalBlockFile(DIScope *Scope, DIFile *File,
                                               unsigned Discriminator = 0);

    /// This creates a descriptor for a lexical block with the
    /// specified parent context.
    /// \param Scope         Parent lexical scope.
    /// \param File          Source file.
    /// \param Line          Line number.
    /// \param Col           Column number.
    DILexicalBlock *createLexicalBlock(DIScope *Scope, DIFile *File,
                                       unsigned Line, unsigned Col);

    /// Create a descriptor for an imported module.
    /// \param Context        The scope this module is imported into
    /// \param NS             The namespace being imported here.
    /// \param File           File where the declaration is located.
    /// \param Line           Line number of the declaration.
    /// \param Elements       Renamed elements.
    DIImportedEntity *createImportedModule(DIScope *Context, DINamespace *NS,
                                           DIFile *File, unsigned Line,
                                           DINodeArray Elements = nullptr);

    /// Create a descriptor for an imported module.
    /// \param Context The scope this module is imported into.
    /// \param NS      An aliased namespace.
    /// \param File    File where the declaration is located.
    /// \param Line    Line number of the declaration.
    /// \param Elements       Renamed elements.
    DIImportedEntity *createImportedModule(DIScope *Context,
                                           DIImportedEntity *NS, DIFile *File,
                                           unsigned Line,
                                           DINodeArray Elements = nullptr);

    /// Create a descriptor for an imported module.
    /// \param Context        The scope this module is imported into.
    /// \param M              The module being imported here
    /// \param File           File where the declaration is located.
    /// \param Line           Line number of the declaration.
    /// \param Elements       Renamed elements.
    DIImportedEntity *createImportedModule(DIScope *Context, DIModule *M,
                                           DIFile *File, unsigned Line,
                                           DINodeArray Elements = nullptr);

    /// Create a descriptor for an imported function.
    /// \param Context The scope this module is imported into.
    /// \param Decl    The declaration (or definition) of a function, type, or
    ///                variable.
    /// \param File    File where the declaration is located.
    /// \param Line    Line number of the declaration.
    /// \param Elements       Renamed elements.
    DIImportedEntity *createImportedDeclaration(DIScope *Context, DINode *Decl,
                                                DIFile *File, unsigned Line,
                                                StringRef Name = "",
                                                DINodeArray Elements = nullptr);

    /// Insert a new llvm.dbg.declare intrinsic call.
    /// \param Storage     llvm::Value of the variable
    /// \param VarInfo     Variable's debug info descriptor.
    /// \param Expr        A complex location expression.
    /// \param DL          Debug info location.
    /// \param InsertAtEnd Location for the new intrinsic.
    Instruction *insertDeclare(llvm::Value *Storage, DILocalVariable *VarInfo,
                               DIExpression *Expr, const DILocation *DL,
                               BasicBlock *InsertAtEnd);

    /// Insert a new llvm.dbg.assign intrinsic call.
    /// \param LinkedInstr   Instruction with a DIAssignID to link with the new
    ///                      intrinsic. The intrinsic will be inserted after
    ///                      this instruction.
    /// \param Val           The value component of this dbg.assign.
    /// \param SrcVar        Variable's debug info descriptor.
    /// \param ValExpr       A complex location expression to modify \p Val.
    /// \param Addr          The address component (store destination).
    /// \param AddrExpr      A complex location expression to modify \p Addr.
    ///                      NOTE: \p ValExpr carries the FragInfo for the
    ///                      variable.
    /// \param DL            Debug info location, usually: (line: 0,
    ///                      column: 0, scope: var-decl-scope). See
    ///                      getDebugValueLoc.
    DbgAssignIntrinsic *insertDbgAssign(Instruction *LinkedInstr, Value *Val,
                                        DILocalVariable *SrcVar,
                                        DIExpression *ValExpr, Value *Addr,
                                        DIExpression *AddrExpr,
                                        const DILocation *DL);

    /// Insert a new llvm.dbg.declare intrinsic call.
    /// \param Storage      llvm::Value of the variable
    /// \param VarInfo      Variable's debug info descriptor.
    /// \param Expr         A complex location expression.
    /// \param DL           Debug info location.
    /// \param InsertBefore Location for the new intrinsic.
    Instruction *insertDeclare(llvm::Value *Storage, DILocalVariable *VarInfo,
                               DIExpression *Expr, const DILocation *DL,
                               Instruction *InsertBefore);

    /// Insert a new llvm.dbg.label intrinsic call.
    /// \param LabelInfo    Label's debug info descriptor.
    /// \param DL           Debug info location.
    /// \param InsertBefore Location for the new intrinsic.
    Instruction *insertLabel(DILabel *LabelInfo, const DILocation *DL,
                             Instruction *InsertBefore);

    /// Insert a new llvm.dbg.label intrinsic call.
    /// \param LabelInfo    Label's debug info descriptor.
    /// \param DL           Debug info location.
    /// \param InsertAtEnd Location for the new intrinsic.
    Instruction *insertLabel(DILabel *LabelInfo, const DILocation *DL,
                             BasicBlock *InsertAtEnd);

    /// Insert a new llvm.dbg.value intrinsic call.
    /// \param Val          llvm::Value of the variable
    /// \param VarInfo      Variable's debug info descriptor.
    /// \param Expr         A complex location expression.
    /// \param DL           Debug info location.
    /// \param InsertAtEnd Location for the new intrinsic.
    Instruction *insertDbgValueIntrinsic(llvm::Value *Val,
                                         DILocalVariable *VarInfo,
                                         DIExpression *Expr,
                                         const DILocation *DL,
                                         BasicBlock *InsertAtEnd);

    /// Insert a new llvm.dbg.value intrinsic call.
    /// \param Val          llvm::Value of the variable
    /// \param VarInfo      Variable's debug info descriptor.
    /// \param Expr         A complex location expression.
    /// \param DL           Debug info location.
    /// \param InsertBefore Location for the new intrinsic.
    Instruction *insertDbgValueIntrinsic(llvm::Value *Val,
                                         DILocalVariable *VarInfo,
                                         DIExpression *Expr,
                                         const DILocation *DL,
                                         Instruction *InsertBefore);

    /// Replace the vtable holder in the given type.
    ///
    /// If this creates a self reference, it may orphan some unresolved cycles
    /// in the operands of \c T, so \a DIBuilder needs to track that.
    void replaceVTableHolder(DICompositeType *&T,
                             DIType *VTableHolder);

    /// Replace arrays on a composite type.
    ///
    /// If \c T is resolved, but the arrays aren't -- which can happen if \c T
    /// has a self-reference -- \a DIBuilder needs to track the array to
    /// resolve cycles.
    void replaceArrays(DICompositeType *&T, DINodeArray Elements,
                       DINodeArray TParams = DINodeArray());

    /// Replace a temporary node.
    ///
    /// Call \a MDNode::replaceAllUsesWith() on \c N, replacing it with \c
    /// Replacement.
    ///
    /// If \c Replacement is the same as \c N.get(), instead call \a
    /// MDNode::replaceWithUniqued().  In this case, the uniqued node could
    /// have a different address, so we return the final address.
    template <class NodeTy>
    NodeTy *replaceTemporary(TempMDNode &&N, NodeTy *Replacement) {
      if (N.get() == Replacement)
        return cast<NodeTy>(MDNode::replaceWithUniqued(std::move(N)));

      N->replaceAllUsesWith(Replacement);
      return Replacement;
    }
  };

  // Create wrappers for C Binding types (see CBindingWrapping.h).
  DEFINE_ISA_CONVERSION_FUNCTIONS(DIBuilder, LLVMDIBuilderRef)

} // end namespace llvm

#endif // LLVM_IR_DIBUILDER_H
PKjwFZ~5�f)g)gIR/VPIntrinsics.defnu�[���//===-- IR/VPIntrinsics.def - Describes llvm.vp.* Intrinsics -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains descriptions of the various Vector Predication intrinsics.
// This is used as a central place for enumerating the different instructions
// and should eventually be the place to put comments about the instructions.
//
//===----------------------------------------------------------------------===//

// NOTE: NO INCLUDE GUARD DESIRED!

// Provide definitions of macros so that users of this file do not have to
// define everything to use it...
//
// Register a VP intrinsic and begin its property scope.
// All VP intrinsic scopes are top level, ie it is illegal to place a
// BEGIN_REGISTER_VP_INTRINSIC within a VP intrinsic scope.
// \p VPID     The VP intrinsic id.
// \p MASKPOS  The mask operand position.
// \p EVLPOS   The explicit vector length operand position.
#ifndef BEGIN_REGISTER_VP_INTRINSIC
#define BEGIN_REGISTER_VP_INTRINSIC(VPID, MASKPOS, EVLPOS)
#endif

// End the property scope of a VP intrinsic.
#ifndef END_REGISTER_VP_INTRINSIC
#define END_REGISTER_VP_INTRINSIC(VPID)
#endif

// Register a new VP SDNode and begin its property scope.
// When the SDNode scope is nested within a VP intrinsic scope, it is
// implicitly registered as the canonical SDNode for this VP intrinsic. There
// is one VP intrinsic that maps directly to one SDNode that goes by the
// same name.  Since the operands are also the same, we open the property
// scopes for both the VPIntrinsic and the SDNode at once.
// \p VPSD     The SelectionDAG Node id (eg VP_ADD).
// \p LEGALPOS The operand position of the SDNode that is used for legalizing.
//             If LEGALPOS < 0, then the return type given by
//             TheNode->getValueType(-1-LEGALPOS) is used.
// \p TDNAME   The name of the TableGen definition of this SDNode.
// \p MASKPOS  The mask operand position.
// \p EVLPOS   The explicit vector length operand position.
#ifndef BEGIN_REGISTER_VP_SDNODE
#define BEGIN_REGISTER_VP_SDNODE(VPSD, LEGALPOS, TDNAME, MASKPOS, EVLPOS)
#endif

// End the property scope of a new VP SDNode.
#ifndef END_REGISTER_VP_SDNODE
#define END_REGISTER_VP_SDNODE(VPSD)
#endif

// Helper macro to set up the mapping from VP intrinsic to ISD opcode.
// Note: More than one VP intrinsic may map to one ISD opcode.
#ifndef HELPER_MAP_VPID_TO_VPSD
#define HELPER_MAP_VPID_TO_VPSD(VPID, VPSD)
#endif

// Helper macros for the common "1:1 - Intrinsic : SDNode" case.
//
// There is one VP intrinsic that maps directly to one SDNode that goes by the
// same name.  Since the operands are also the same, we open the property
// scopes for both the VPIntrinsic and the SDNode at once.
//
// \p VPID     The canonical name (eg `vp_add`, which at the same time is the
//             name of the intrinsic and the TableGen def of the SDNode).
// \p MASKPOS  The mask operand position.
// \p EVLPOS   The explicit vector length operand position.
// \p VPSD     The SelectionDAG Node id (eg VP_ADD).
// \p LEGALPOS The operand position of the SDNode that is used for legalizing
//             this SDNode. This can be `-1`, in which case the return type of
//             the SDNode is used.
#define BEGIN_REGISTER_VP(VPID, MASKPOS, EVLPOS, VPSD, LEGALPOS)               \
  BEGIN_REGISTER_VP_INTRINSIC(VPID, MASKPOS, EVLPOS)                           \
  BEGIN_REGISTER_VP_SDNODE(VPSD, LEGALPOS, VPID, MASKPOS, EVLPOS)              \
  HELPER_MAP_VPID_TO_VPSD(VPID, VPSD)

#define END_REGISTER_VP(VPID, VPSD)                                            \
  END_REGISTER_VP_INTRINSIC(VPID)                                              \
  END_REGISTER_VP_SDNODE(VPSD)

// The following macros attach properties to the scope they are placed in. This
// assigns the property to the VP Intrinsic and/or SDNode that belongs to the
// scope.
//
// Property Macros {

// The intrinsic and/or SDNode has the same function as this LLVM IR Opcode.
// \p OPC      The opcode of the instruction with the same function.
#ifndef VP_PROPERTY_FUNCTIONAL_OPC
#define VP_PROPERTY_FUNCTIONAL_OPC(OPC)
#endif

// Whether the intrinsic may have a rounding mode or exception behavior operand
// bundle.
// \p HASROUND   '1' if the intrinsic can have a rounding mode operand bundle,
//               '0' otherwise.
// \p HASEXCEPT  '1' if the intrinsic can have an exception behavior operand
//               bundle, '0' otherwise.
// \p INTRINID  The constrained fp intrinsic this VP intrinsic corresponds to.
#ifndef VP_PROPERTY_CONSTRAINEDFP
#define VP_PROPERTY_CONSTRAINEDFP(HASROUND, HASEXCEPT, INTRINID)
#endif

// The intrinsic and/or SDNode has the same function as this ISD Opcode.
// \p SDOPC      The opcode of the instruction with the same function.
#ifndef VP_PROPERTY_FUNCTIONAL_SDOPC
#define VP_PROPERTY_FUNCTIONAL_SDOPC(SDOPC)
#endif

// Map this VP intrinsic to its canonical functional intrinsic.
// \p INTRIN     The non-VP intrinsics with the same function.
#ifndef VP_PROPERTY_FUNCTIONAL_INTRINSIC
#define VP_PROPERTY_FUNCTIONAL_INTRINSIC(INTRIN)
#endif

// This VP Intrinsic is a memory operation
// The pointer arg is at POINTERPOS and the data arg is at DATAPOS.
#ifndef VP_PROPERTY_MEMOP
#define VP_PROPERTY_MEMOP(POINTERPOS, DATAPOS)
#endif

// Map this VP reduction intrinsic to its reduction operand positions.
#ifndef VP_PROPERTY_REDUCTION
#define VP_PROPERTY_REDUCTION(STARTPOS, VECTORPOS)
#endif

// A property to infer VP binary-op SDNode opcodes automatically.
#ifndef VP_PROPERTY_BINARYOP
#define VP_PROPERTY_BINARYOP
#endif

// A property to infer VP type casts automatically.
#ifndef VP_PROPERTY_CASTOP
#define VP_PROPERTY_CASTOP
#endif

// This VP Intrinsic is a comparison operation
// The condition code arg is at CCPOS and accepts floating-point condition
// codes if ISFP is set, else it accepts integer condition codes.
#ifndef VP_PROPERTY_CMP
#define VP_PROPERTY_CMP(CCPOS, ISFP)
#endif

/// } Property Macros

///// Integer Arithmetic {

// Specialized helper macro for integer binary operators (%x, %y, %mask, %evl).
#ifdef HELPER_REGISTER_BINARY_INT_VP
#error                                                                         \
    "The internal helper macro HELPER_REGISTER_BINARY_INT_VP is already defined!"
#endif
#define HELPER_REGISTER_BINARY_INT_VP(VPID, VPSD, IROPC, SDOPC)                \
  BEGIN_REGISTER_VP(VPID, 2, 3, VPSD, -1)                                      \
  VP_PROPERTY_FUNCTIONAL_OPC(IROPC)                                            \
  VP_PROPERTY_FUNCTIONAL_SDOPC(SDOPC)                                          \
  VP_PROPERTY_BINARYOP                                                         \
  END_REGISTER_VP(VPID, VPSD)

// llvm.vp.add(x,y,mask,vlen)
HELPER_REGISTER_BINARY_INT_VP(vp_add, VP_ADD, Add, ADD)

// llvm.vp.and(x,y,mask,vlen)
HELPER_REGISTER_BINARY_INT_VP(vp_and, VP_AND, And, AND)

// llvm.vp.ashr(x,y,mask,vlen)
HELPER_REGISTER_BINARY_INT_VP(vp_ashr, VP_ASHR, AShr, SRA)

// llvm.vp.lshr(x,y,mask,vlen)
HELPER_REGISTER_BINARY_INT_VP(vp_lshr, VP_LSHR, LShr, SRL)

// llvm.vp.mul(x,y,mask,vlen)
HELPER_REGISTER_BINARY_INT_VP(vp_mul, VP_MUL, Mul, MUL)

// llvm.vp.or(x,y,mask,vlen)
HELPER_REGISTER_BINARY_INT_VP(vp_or, VP_OR, Or, OR)

// llvm.vp.sdiv(x,y,mask,vlen)
HELPER_REGISTER_BINARY_INT_VP(vp_sdiv, VP_SDIV, SDiv, SDIV)

// llvm.vp.shl(x,y,mask,vlen)
HELPER_REGISTER_BINARY_INT_VP(vp_shl, VP_SHL, Shl, SHL)

// llvm.vp.srem(x,y,mask,vlen)
HELPER_REGISTER_BINARY_INT_VP(vp_srem, VP_SREM, SRem, SREM)

// llvm.vp.sub(x,y,mask,vlen)
HELPER_REGISTER_BINARY_INT_VP(vp_sub, VP_SUB, Sub, SUB)

// llvm.vp.udiv(x,y,mask,vlen)
HELPER_REGISTER_BINARY_INT_VP(vp_udiv, VP_UDIV, UDiv, UDIV)

// llvm.vp.urem(x,y,mask,vlen)
HELPER_REGISTER_BINARY_INT_VP(vp_urem, VP_UREM, URem, UREM)

// llvm.vp.xor(x,y,mask,vlen)
HELPER_REGISTER_BINARY_INT_VP(vp_xor, VP_XOR, Xor, XOR)

#undef HELPER_REGISTER_BINARY_INT_VP

// llvm.vp.smin(x,y,mask,vlen)
BEGIN_REGISTER_VP(vp_smin, 2, 3, VP_SMIN, -1)
VP_PROPERTY_BINARYOP
VP_PROPERTY_FUNCTIONAL_SDOPC(SMIN)
END_REGISTER_VP(vp_smin, VP_SMIN)

// llvm.vp.smax(x,y,mask,vlen)
BEGIN_REGISTER_VP(vp_smax, 2, 3, VP_SMAX, -1)
VP_PROPERTY_BINARYOP
VP_PROPERTY_FUNCTIONAL_SDOPC(SMAX)
END_REGISTER_VP(vp_smax, VP_SMAX)

// llvm.vp.umin(x,y,mask,vlen)
BEGIN_REGISTER_VP(vp_umin, 2, 3, VP_UMIN, -1)
VP_PROPERTY_BINARYOP
VP_PROPERTY_FUNCTIONAL_SDOPC(UMIN)
END_REGISTER_VP(vp_umin, VP_UMIN)

// llvm.vp.umax(x,y,mask,vlen)
BEGIN_REGISTER_VP(vp_umax, 2, 3, VP_UMAX, -1)
VP_PROPERTY_BINARYOP
VP_PROPERTY_FUNCTIONAL_SDOPC(UMAX)
END_REGISTER_VP(vp_umax, VP_UMAX)

// llvm.vp.abs(x,is_int_min_poison,mask,vlen)
BEGIN_REGISTER_VP_INTRINSIC(vp_abs, 2, 3)
BEGIN_REGISTER_VP_SDNODE(VP_ABS, -1, vp_abs, 1, 2)
HELPER_MAP_VPID_TO_VPSD(vp_abs, VP_ABS)
VP_PROPERTY_FUNCTIONAL_SDOPC(ABS)
END_REGISTER_VP(vp_abs, VP_ABS)

// llvm.vp.bswap(x,mask,vlen)
BEGIN_REGISTER_VP(vp_bswap, 1, 2, VP_BSWAP, -1)
VP_PROPERTY_FUNCTIONAL_SDOPC(BSWAP)
END_REGISTER_VP(vp_bswap, VP_BSWAP)

// llvm.vp.bitreverse(x,mask,vlen)
BEGIN_REGISTER_VP(vp_bitreverse, 1, 2, VP_BITREVERSE, -1)
VP_PROPERTY_FUNCTIONAL_SDOPC(BITREVERSE)
END_REGISTER_VP(vp_bitreverse, VP_BITREVERSE)

// llvm.vp.ctpop(x,mask,vlen)
BEGIN_REGISTER_VP(vp_ctpop, 1, 2, VP_CTPOP, -1)
VP_PROPERTY_FUNCTIONAL_SDOPC(CTPOP)
END_REGISTER_VP(vp_ctpop, VP_CTPOP)

// llvm.vp.ctlz(x,is_zero_poison,mask,vlen)
BEGIN_REGISTER_VP_INTRINSIC(vp_ctlz, 2, 3)
BEGIN_REGISTER_VP_SDNODE(VP_CTLZ, -1, vp_ctlz, 1, 2)
VP_PROPERTY_FUNCTIONAL_SDOPC(CTLZ)
END_REGISTER_VP_SDNODE(VP_CTLZ)
BEGIN_REGISTER_VP_SDNODE(VP_CTLZ_ZERO_UNDEF, -1, vp_ctlz_zero_undef, 1, 2)
END_REGISTER_VP_SDNODE(VP_CTLZ_ZERO_UNDEF)
END_REGISTER_VP_INTRINSIC(vp_ctlz)

// llvm.vp.cttz(x,is_zero_poison,mask,vlen)
BEGIN_REGISTER_VP_INTRINSIC(vp_cttz, 2, 3)
BEGIN_REGISTER_VP_SDNODE(VP_CTTZ, -1, vp_cttz, 1, 2)
VP_PROPERTY_FUNCTIONAL_SDOPC(CTTZ)
END_REGISTER_VP_SDNODE(VP_CTTZ)
BEGIN_REGISTER_VP_SDNODE(VP_CTTZ_ZERO_UNDEF, -1, vp_cttz_zero_undef, 1, 2)
END_REGISTER_VP_SDNODE(VP_CTTZ_ZERO_UNDEF)
END_REGISTER_VP_INTRINSIC(vp_cttz)

// llvm.vp.fshl(x,y,z,mask,vlen)
BEGIN_REGISTER_VP(vp_fshl, 3, 4, VP_FSHL, -1)
VP_PROPERTY_FUNCTIONAL_SDOPC(FSHL)
END_REGISTER_VP(vp_fshl, VP_FSHL)

// llvm.vp.fshr(x,y,z,mask,vlen)
BEGIN_REGISTER_VP(vp_fshr, 3, 4, VP_FSHR, -1)
VP_PROPERTY_FUNCTIONAL_SDOPC(FSHR)
END_REGISTER_VP(vp_fshr, VP_FSHR)
///// } Integer Arithmetic

///// Floating-Point Arithmetic {

// Specialized helper macro for floating-point binary operators
// <operation>(%x, %y, %mask, %evl).
#ifdef HELPER_REGISTER_BINARY_FP_VP
#error                                                                         \
    "The internal helper macro HELPER_REGISTER_BINARY_FP_VP is already defined!"
#endif
#define HELPER_REGISTER_BINARY_FP_VP(OPSUFFIX, VPSD, IROPC, SDOPC)             \
  BEGIN_REGISTER_VP(vp_##OPSUFFIX, 2, 3, VPSD, -1)                             \
  VP_PROPERTY_FUNCTIONAL_OPC(IROPC)                                            \
  VP_PROPERTY_CONSTRAINEDFP(1, 1, experimental_constrained_##OPSUFFIX)         \
  VP_PROPERTY_FUNCTIONAL_SDOPC(SDOPC)                                          \
  VP_PROPERTY_BINARYOP                                                         \
  END_REGISTER_VP(vp_##OPSUFFIX, VPSD)

// llvm.vp.fadd(x,y,mask,vlen)
HELPER_REGISTER_BINARY_FP_VP(fadd, VP_FADD, FAdd, FADD)

// llvm.vp.fsub(x,y,mask,vlen)
HELPER_REGISTER_BINARY_FP_VP(fsub, VP_FSUB, FSub, FSUB)

// llvm.vp.fmul(x,y,mask,vlen)
HELPER_REGISTER_BINARY_FP_VP(fmul, VP_FMUL, FMul, FMUL)

// llvm.vp.fdiv(x,y,mask,vlen)
HELPER_REGISTER_BINARY_FP_VP(fdiv, VP_FDIV, FDiv, FDIV)

// llvm.vp.frem(x,y,mask,vlen)
HELPER_REGISTER_BINARY_FP_VP(frem, VP_FREM, FRem, FREM)

#undef HELPER_REGISTER_BINARY_FP_VP

// llvm.vp.fneg(x,mask,vlen)
BEGIN_REGISTER_VP(vp_fneg, 1, 2, VP_FNEG, -1)
VP_PROPERTY_FUNCTIONAL_OPC(FNeg)
VP_PROPERTY_FUNCTIONAL_SDOPC(FNEG)
END_REGISTER_VP(vp_fneg, VP_FNEG)

// llvm.vp.fabs(x,mask,vlen)
BEGIN_REGISTER_VP(vp_fabs, 1, 2, VP_FABS, -1)
VP_PROPERTY_FUNCTIONAL_SDOPC(FABS)
END_REGISTER_VP(vp_fabs, VP_FABS)

// llvm.vp.sqrt(x,mask,vlen)
BEGIN_REGISTER_VP(vp_sqrt, 1, 2, VP_SQRT, -1)
VP_PROPERTY_FUNCTIONAL_SDOPC(FSQRT)
END_REGISTER_VP(vp_sqrt, VP_SQRT)

// llvm.vp.fma(x,y,z,mask,vlen)
BEGIN_REGISTER_VP(vp_fma, 3, 4, VP_FMA, -1)
VP_PROPERTY_CONSTRAINEDFP(1, 1, experimental_constrained_fma)
VP_PROPERTY_FUNCTIONAL_SDOPC(FMA)
END_REGISTER_VP(vp_fma, VP_FMA)

// llvm.vp.fmuladd(x,y,z,mask,vlen)
BEGIN_REGISTER_VP(vp_fmuladd, 3, 4, VP_FMULADD, -1)
VP_PROPERTY_CONSTRAINEDFP(1, 1, experimental_constrained_fmuladd)
VP_PROPERTY_FUNCTIONAL_SDOPC(FMAD)
END_REGISTER_VP(vp_fmuladd, VP_FMULADD)

// llvm.vp.copysign(x,y,mask,vlen)
BEGIN_REGISTER_VP(vp_copysign, 2, 3, VP_FCOPYSIGN, -1)
VP_PROPERTY_BINARYOP
VP_PROPERTY_FUNCTIONAL_SDOPC(FCOPYSIGN)
END_REGISTER_VP(vp_copysign, VP_FCOPYSIGN)

// llvm.vp.minnum(x, y, mask,vlen)
BEGIN_REGISTER_VP(vp_minnum, 2, 3, VP_FMINNUM, -1)
VP_PROPERTY_BINARYOP
VP_PROPERTY_FUNCTIONAL_SDOPC(FMINNUM)
END_REGISTER_VP(vp_minnum, VP_FMINNUM)

// llvm.vp.maxnum(x, y, mask,vlen)
BEGIN_REGISTER_VP(vp_maxnum, 2, 3, VP_FMAXNUM, -1)
VP_PROPERTY_BINARYOP
VP_PROPERTY_FUNCTIONAL_SDOPC(FMAXNUM)
END_REGISTER_VP(vp_maxnum, VP_FMAXNUM)

// llvm.vp.ceil(x,mask,vlen)
BEGIN_REGISTER_VP(vp_ceil, 1, 2, VP_FCEIL, -1)
VP_PROPERTY_FUNCTIONAL_SDOPC(FCEIL)
END_REGISTER_VP(vp_ceil, VP_FCEIL)

// llvm.vp.floor(x,mask,vlen)
BEGIN_REGISTER_VP(vp_floor, 1, 2, VP_FFLOOR, -1)
VP_PROPERTY_FUNCTIONAL_SDOPC(FFLOOR)
END_REGISTER_VP(vp_floor, VP_FFLOOR)

// llvm.vp.round(x,mask,vlen)
BEGIN_REGISTER_VP(vp_round, 1, 2, VP_FROUND, -1)
VP_PROPERTY_FUNCTIONAL_SDOPC(FROUND)
END_REGISTER_VP(vp_round, VP_FROUND)

// llvm.vp.roundeven(x,mask,vlen)
BEGIN_REGISTER_VP(vp_roundeven, 1, 2, VP_FROUNDEVEN, -1)
VP_PROPERTY_FUNCTIONAL_SDOPC(FROUNDEVEN)
END_REGISTER_VP(vp_roundeven, VP_FROUNDEVEN)

// llvm.vp.roundtozero(x,mask,vlen)
BEGIN_REGISTER_VP(vp_roundtozero, 1, 2, VP_FROUNDTOZERO, -1)
VP_PROPERTY_FUNCTIONAL_SDOPC(FTRUNC)
END_REGISTER_VP(vp_roundtozero, VP_FROUNDTOZERO)

// llvm.vp.rint(x,mask,vlen)
BEGIN_REGISTER_VP(vp_rint, 1, 2, VP_FRINT, -1)
VP_PROPERTY_FUNCTIONAL_SDOPC(FRINT)
END_REGISTER_VP(vp_rint, VP_FRINT)

// llvm.vp.nearbyint(x,mask,vlen)
BEGIN_REGISTER_VP(vp_nearbyint, 1, 2, VP_FNEARBYINT, -1)
VP_PROPERTY_FUNCTIONAL_SDOPC(FNEARBYINT)
END_REGISTER_VP(vp_nearbyint, VP_FNEARBYINT)

///// } Floating-Point Arithmetic

///// Type Casts {
// Specialized helper macro for type conversions.
// <operation>(%x, %mask, %evl).
#ifdef HELPER_REGISTER_FP_CAST_VP
#error                                                                         \
    "The internal helper macro HELPER_REGISTER_FP_CAST_VP is already defined!"
#endif
#define HELPER_REGISTER_FP_CAST_VP(OPSUFFIX, VPSD, IROPC, SDOPC, HASROUND)     \
  BEGIN_REGISTER_VP(vp_##OPSUFFIX, 1, 2, VPSD, -1)                             \
  VP_PROPERTY_FUNCTIONAL_OPC(IROPC)                                            \
  VP_PROPERTY_FUNCTIONAL_SDOPC(SDOPC)                                          \
  VP_PROPERTY_CONSTRAINEDFP(HASROUND, 1, experimental_constrained_##OPSUFFIX)  \
  VP_PROPERTY_CASTOP                                                           \
  END_REGISTER_VP(vp_##OPSUFFIX, VPSD)

// llvm.vp.fptoui(x,mask,vlen)
HELPER_REGISTER_FP_CAST_VP(fptoui, VP_FP_TO_UINT, FPToUI, FP_TO_UINT, 0)

// llvm.vp.fptosi(x,mask,vlen)
HELPER_REGISTER_FP_CAST_VP(fptosi, VP_FP_TO_SINT, FPToSI, FP_TO_SINT, 0)

// llvm.vp.uitofp(x,mask,vlen)
HELPER_REGISTER_FP_CAST_VP(uitofp, VP_UINT_TO_FP, UIToFP, UINT_TO_FP, 1)

// llvm.vp.sitofp(x,mask,vlen)
HELPER_REGISTER_FP_CAST_VP(sitofp, VP_SINT_TO_FP, SIToFP, SINT_TO_FP, 1)

// llvm.vp.fptrunc(x,mask,vlen)
HELPER_REGISTER_FP_CAST_VP(fptrunc, VP_FP_ROUND, FPTrunc, FP_ROUND, 1)

// llvm.vp.fpext(x,mask,vlen)
HELPER_REGISTER_FP_CAST_VP(fpext, VP_FP_EXTEND, FPExt, FP_EXTEND, 0)

#undef HELPER_REGISTER_FP_CAST_VP

// Specialized helper macro for integer type conversions.
// <operation>(%x, %mask, %evl).
#ifdef HELPER_REGISTER_INT_CAST_VP
#error                                                                         \
    "The internal helper macro HELPER_REGISTER_INT_CAST_VP is already defined!"
#endif
#define HELPER_REGISTER_INT_CAST_VP(OPSUFFIX, VPSD, IROPC, SDOPC)              \
  BEGIN_REGISTER_VP(vp_##OPSUFFIX, 1, 2, VPSD, -1)                             \
  VP_PROPERTY_FUNCTIONAL_OPC(IROPC)                                            \
  VP_PROPERTY_FUNCTIONAL_SDOPC(SDOPC)                                          \
  VP_PROPERTY_CASTOP                                                           \
  END_REGISTER_VP(vp_##OPSUFFIX, VPSD)

// llvm.vp.trunc(x,mask,vlen)
HELPER_REGISTER_INT_CAST_VP(trunc, VP_TRUNCATE, Trunc, TRUNCATE)

// llvm.vp.zext(x,mask,vlen)
HELPER_REGISTER_INT_CAST_VP(zext, VP_ZERO_EXTEND, ZExt, ZERO_EXTEND)

// llvm.vp.sext(x,mask,vlen)
HELPER_REGISTER_INT_CAST_VP(sext, VP_SIGN_EXTEND, SExt, SIGN_EXTEND)

// llvm.vp.ptrtoint(x,mask,vlen)
BEGIN_REGISTER_VP(vp_ptrtoint, 1, 2, VP_PTRTOINT, -1)
VP_PROPERTY_FUNCTIONAL_OPC(PtrToInt)
VP_PROPERTY_CASTOP
END_REGISTER_VP(vp_ptrtoint, VP_PTRTOINT)

// llvm.vp.inttoptr(x,mask,vlen)
BEGIN_REGISTER_VP(vp_inttoptr, 1, 2, VP_INTTOPTR, -1)
VP_PROPERTY_FUNCTIONAL_OPC(IntToPtr)
VP_PROPERTY_CASTOP
END_REGISTER_VP(vp_inttoptr, VP_INTTOPTR)

#undef HELPER_REGISTER_INT_CAST_VP

///// } Type Casts

///// Comparisons {

// VP_SETCC (ISel only)
BEGIN_REGISTER_VP_SDNODE(VP_SETCC, 0, vp_setcc, 3, 4)
END_REGISTER_VP_SDNODE(VP_SETCC)

// llvm.vp.fcmp(x,y,cc,mask,vlen)
BEGIN_REGISTER_VP_INTRINSIC(vp_fcmp, 3, 4)
HELPER_MAP_VPID_TO_VPSD(vp_fcmp, VP_SETCC)
VP_PROPERTY_FUNCTIONAL_OPC(FCmp)
VP_PROPERTY_CMP(2, true)
VP_PROPERTY_CONSTRAINEDFP(0, 1, experimental_constrained_fcmp)
END_REGISTER_VP_INTRINSIC(vp_fcmp)

// llvm.vp.icmp(x,y,cc,mask,vlen)
BEGIN_REGISTER_VP_INTRINSIC(vp_icmp, 3, 4)
HELPER_MAP_VPID_TO_VPSD(vp_icmp, VP_SETCC)
VP_PROPERTY_FUNCTIONAL_OPC(ICmp)
VP_PROPERTY_CMP(2, false)
END_REGISTER_VP_INTRINSIC(vp_icmp)

///// } Comparisons

///// Memory Operations {
// llvm.vp.store(val,ptr,mask,vlen)
BEGIN_REGISTER_VP_INTRINSIC(vp_store, 2, 3)
// chain = VP_STORE chain,val,base,offset,mask,evl
BEGIN_REGISTER_VP_SDNODE(VP_STORE, 1, vp_store, 4, 5)
HELPER_MAP_VPID_TO_VPSD(vp_store, VP_STORE)
VP_PROPERTY_FUNCTIONAL_OPC(Store)
VP_PROPERTY_FUNCTIONAL_INTRINSIC(masked_store)
VP_PROPERTY_MEMOP(1, 0)
END_REGISTER_VP(vp_store, VP_STORE)

// llvm.experimental.vp.strided.store(val,ptr,stride,mask,vlen)
BEGIN_REGISTER_VP_INTRINSIC(experimental_vp_strided_store, 3, 4)
// chain = EXPERIMENTAL_VP_STRIDED_STORE chain,val,base,offset,stride,mask,evl
BEGIN_REGISTER_VP_SDNODE(EXPERIMENTAL_VP_STRIDED_STORE, 1, experimental_vp_strided_store, 5, 6)
HELPER_MAP_VPID_TO_VPSD(experimental_vp_strided_store, EXPERIMENTAL_VP_STRIDED_STORE)
VP_PROPERTY_MEMOP(1, 0)
END_REGISTER_VP(experimental_vp_strided_store, EXPERIMENTAL_VP_STRIDED_STORE)

// llvm.vp.scatter(ptr,val,mask,vlen)
BEGIN_REGISTER_VP_INTRINSIC(vp_scatter, 2, 3)
// chain = VP_SCATTER chain,val,base,indices,scale,mask,evl
BEGIN_REGISTER_VP_SDNODE(VP_SCATTER, 1, vp_scatter, 5, 6)
HELPER_MAP_VPID_TO_VPSD(vp_scatter, VP_SCATTER)
VP_PROPERTY_FUNCTIONAL_INTRINSIC(masked_scatter)
VP_PROPERTY_MEMOP(1, 0)
END_REGISTER_VP(vp_scatter, VP_SCATTER)

// llvm.vp.load(ptr,mask,vlen)
BEGIN_REGISTER_VP_INTRINSIC(vp_load, 1, 2)
// val,chain = VP_LOAD chain,base,offset,mask,evl
BEGIN_REGISTER_VP_SDNODE(VP_LOAD, -1, vp_load, 3, 4)
HELPER_MAP_VPID_TO_VPSD(vp_load, VP_LOAD)
VP_PROPERTY_FUNCTIONAL_OPC(Load)
VP_PROPERTY_FUNCTIONAL_INTRINSIC(masked_load)
VP_PROPERTY_MEMOP(0, std::nullopt)
END_REGISTER_VP(vp_load, VP_LOAD)

// llvm.experimental.vp.strided.load(ptr,stride,mask,vlen)
BEGIN_REGISTER_VP_INTRINSIC(experimental_vp_strided_load, 2, 3)
// chain = EXPERIMENTAL_VP_STRIDED_LOAD chain,base,offset,stride,mask,evl
BEGIN_REGISTER_VP_SDNODE(EXPERIMENTAL_VP_STRIDED_LOAD, -1, experimental_vp_strided_load, 4, 5)
HELPER_MAP_VPID_TO_VPSD(experimental_vp_strided_load, EXPERIMENTAL_VP_STRIDED_LOAD)
VP_PROPERTY_MEMOP(0, std::nullopt)
END_REGISTER_VP(experimental_vp_strided_load, EXPERIMENTAL_VP_STRIDED_LOAD)

// llvm.vp.gather(ptr,mask,vlen)
BEGIN_REGISTER_VP_INTRINSIC(vp_gather, 1, 2)
// val,chain = VP_GATHER chain,base,indices,scale,mask,evl
BEGIN_REGISTER_VP_SDNODE(VP_GATHER, -1, vp_gather, 4, 5)
HELPER_MAP_VPID_TO_VPSD(vp_gather, VP_GATHER)
VP_PROPERTY_FUNCTIONAL_INTRINSIC(masked_gather)
VP_PROPERTY_MEMOP(0, std::nullopt)
END_REGISTER_VP(vp_gather, VP_GATHER)

///// } Memory Operations

///// Reductions {

// Specialized helper macro for VP reductions (%start, %x, %mask, %evl).
#ifdef HELPER_REGISTER_REDUCTION_VP
#error                                                                         \
    "The internal helper macro HELPER_REGISTER_REDUCTION_VP is already defined!"
#endif
#define HELPER_REGISTER_REDUCTION_VP(VPID, VPSD, INTRIN)                       \
  BEGIN_REGISTER_VP(VPID, 2, 3, VPSD, 1)                                       \
  VP_PROPERTY_FUNCTIONAL_INTRINSIC(INTRIN)                                     \
  VP_PROPERTY_REDUCTION(0, 1)                                                  \
  END_REGISTER_VP(VPID, VPSD)

// llvm.vp.reduce.add(start,x,mask,vlen)
HELPER_REGISTER_REDUCTION_VP(vp_reduce_add, VP_REDUCE_ADD,
                             experimental_vector_reduce_add)

// llvm.vp.reduce.mul(start,x,mask,vlen)
HELPER_REGISTER_REDUCTION_VP(vp_reduce_mul, VP_REDUCE_MUL,
                             experimental_vector_reduce_mul)

// llvm.vp.reduce.and(start,x,mask,vlen)
HELPER_REGISTER_REDUCTION_VP(vp_reduce_and, VP_REDUCE_AND,
                             experimental_vector_reduce_and)

// llvm.vp.reduce.or(start,x,mask,vlen)
HELPER_REGISTER_REDUCTION_VP(vp_reduce_or, VP_REDUCE_OR,
                             experimental_vector_reduce_or)

// llvm.vp.reduce.xor(start,x,mask,vlen)
HELPER_REGISTER_REDUCTION_VP(vp_reduce_xor, VP_REDUCE_XOR,
                             experimental_vector_reduce_xor)

// llvm.vp.reduce.smax(start,x,mask,vlen)
HELPER_REGISTER_REDUCTION_VP(vp_reduce_smax, VP_REDUCE_SMAX,
                             experimental_vector_reduce_smax)

// llvm.vp.reduce.smin(start,x,mask,vlen)
HELPER_REGISTER_REDUCTION_VP(vp_reduce_smin, VP_REDUCE_SMIN,
                             experimental_vector_reduce_smin)

// llvm.vp.reduce.umax(start,x,mask,vlen)
HELPER_REGISTER_REDUCTION_VP(vp_reduce_umax, VP_REDUCE_UMAX,
                             experimental_vector_reduce_umax)

// llvm.vp.reduce.umin(start,x,mask,vlen)
HELPER_REGISTER_REDUCTION_VP(vp_reduce_umin, VP_REDUCE_UMIN,
                             experimental_vector_reduce_umin)

// llvm.vp.reduce.fmax(start,x,mask,vlen)
HELPER_REGISTER_REDUCTION_VP(vp_reduce_fmax, VP_REDUCE_FMAX,
                             experimental_vector_reduce_fmax)

// llvm.vp.reduce.fmin(start,x,mask,vlen)
HELPER_REGISTER_REDUCTION_VP(vp_reduce_fmin, VP_REDUCE_FMIN,
                             experimental_vector_reduce_fmin)

#undef HELPER_REGISTER_REDUCTION_VP

// Specialized helper macro for VP reductions as above but with two forms:
// sequential and reassociative. These manifest as the presence of 'reassoc'
// fast-math flags in the IR and as two distinct ISD opcodes in the
// SelectionDAG.
// Note we by default map from the VP intrinsic to the SEQ ISD opcode, which
// can then be relaxed to the non-SEQ ISD opcode if the 'reassoc' flag is set.
#ifdef HELPER_REGISTER_REDUCTION_SEQ_VP
#error                                                                         \
    "The internal helper macro HELPER_REGISTER_REDUCTION_SEQ_VP is already defined!"
#endif
#define HELPER_REGISTER_REDUCTION_SEQ_VP(VPID, VPSD, SEQ_VPSD, INTRIN)         \
  BEGIN_REGISTER_VP_INTRINSIC(VPID, 2, 3)                                      \
  BEGIN_REGISTER_VP_SDNODE(VPSD, 1, VPID, 2, 3)                                \
  VP_PROPERTY_REDUCTION(0, 1)                                                  \
  END_REGISTER_VP_SDNODE(VPSD)                                                 \
  BEGIN_REGISTER_VP_SDNODE(SEQ_VPSD, 1, VPID, 2, 3)                            \
  HELPER_MAP_VPID_TO_VPSD(VPID, SEQ_VPSD)                                      \
  VP_PROPERTY_REDUCTION(0, 1)                                                  \
  END_REGISTER_VP_SDNODE(SEQ_VPSD)                                             \
  VP_PROPERTY_FUNCTIONAL_INTRINSIC(INTRIN)                                     \
  END_REGISTER_VP_INTRINSIC(VPID)

// llvm.vp.reduce.fadd(start,x,mask,vlen)
HELPER_REGISTER_REDUCTION_SEQ_VP(vp_reduce_fadd, VP_REDUCE_FADD,
                                 VP_REDUCE_SEQ_FADD,
                                 experimental_vector_reduce_fadd)

// llvm.vp.reduce.fmul(start,x,mask,vlen)
HELPER_REGISTER_REDUCTION_SEQ_VP(vp_reduce_fmul, VP_REDUCE_FMUL,
                                 VP_REDUCE_SEQ_FMUL,
                                 experimental_vector_reduce_fmul)

#undef HELPER_REGISTER_REDUCTION_SEQ_VP

///// } Reduction

///// Shuffles {

// The mask 'cond' operand of llvm.vp.select and llvm.vp.merge are not reported
// as masks with the BEGIN_REGISTER_VP_* macros.  This is because, unlike other
// VP intrinsics, these two have a defined result on lanes where the mask is
// false.
//
// llvm.vp.select(cond,on_true,on_false,vlen)
BEGIN_REGISTER_VP(vp_select, std::nullopt, 3, VP_SELECT, -1)
VP_PROPERTY_FUNCTIONAL_OPC(Select)
VP_PROPERTY_FUNCTIONAL_SDOPC(VSELECT)
END_REGISTER_VP(vp_select, VP_SELECT)

// llvm.vp.merge(cond,on_true,on_false,pivot)
BEGIN_REGISTER_VP(vp_merge, std::nullopt, 3, VP_MERGE, -1)
END_REGISTER_VP(vp_merge, VP_MERGE)

BEGIN_REGISTER_VP(experimental_vp_splice, 3, 5, EXPERIMENTAL_VP_SPLICE, -1)
END_REGISTER_VP(experimental_vp_splice, EXPERIMENTAL_VP_SPLICE)

///// } Shuffles

#undef BEGIN_REGISTER_VP
#undef BEGIN_REGISTER_VP_INTRINSIC
#undef BEGIN_REGISTER_VP_SDNODE
#undef END_REGISTER_VP
#undef END_REGISTER_VP_INTRINSIC
#undef END_REGISTER_VP_SDNODE
#undef HELPER_MAP_VPID_TO_VPSD
#undef VP_PROPERTY_BINARYOP
#undef VP_PROPERTY_CASTOP
#undef VP_PROPERTY_CMP
#undef VP_PROPERTY_CONSTRAINEDFP
#undef VP_PROPERTY_FUNCTIONAL_INTRINSIC
#undef VP_PROPERTY_FUNCTIONAL_OPC
#undef VP_PROPERTY_FUNCTIONAL_SDOPC
#undef VP_PROPERTY_MEMOP
#undef VP_PROPERTY_REDUCTION
PKjwFZ�H�b

IR/Assumptions.hnu�[���//===--- Assumptions.h - Assumption handling and organization ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// String assumptions that are known to optimization passes should be placed in
// the KnownAssumptionStrings set. This can be done in various ways, i.a.,
// via a static KnownAssumptionString object.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_ASSUMPTIONS_H
#define LLVM_IR_ASSUMPTIONS_H

#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSet.h"

namespace llvm {

class Function;
class CallBase;

/// The key we use for assumption attributes.
constexpr StringRef AssumptionAttrKey = "llvm.assume";

/// A set of known assumption strings that are accepted without warning and
/// which can be recommended as typo correction.
extern StringSet<> KnownAssumptionStrings;

/// Helper that allows to insert a new assumption string in the known assumption
/// set by creating a (static) object.
struct KnownAssumptionString {
  KnownAssumptionString(const char *AssumptionStr)
      : AssumptionStr(AssumptionStr) {
    KnownAssumptionStrings.insert(AssumptionStr);
  }
  KnownAssumptionString(StringRef AssumptionStr)
      : AssumptionStr(AssumptionStr) {
    KnownAssumptionStrings.insert(AssumptionStr);
  }
  operator StringRef() const { return AssumptionStr; }

private:
  StringRef AssumptionStr;
};

/// Return true if \p F has the assumption \p AssumptionStr attached.
bool hasAssumption(const Function &F,
                   const KnownAssumptionString &AssumptionStr);

/// Return true if \p CB or the callee has the assumption \p AssumptionStr
/// attached.
bool hasAssumption(const CallBase &CB,
                   const KnownAssumptionString &AssumptionStr);

/// Return the set of all assumptions for the function \p F.
DenseSet<StringRef> getAssumptions(const Function &F);

/// Return the set of all assumptions for the call \p CB.
DenseSet<StringRef> getAssumptions(const CallBase &CB);

/// Appends the set of assumptions \p Assumptions to \F.
bool addAssumptions(Function &F, const DenseSet<StringRef> &Assumptions);

/// Appends the set of assumptions \p Assumptions to \CB.
bool addAssumptions(CallBase &CB, const DenseSet<StringRef> &Assumptions);

} // namespace llvm

#endif
PKjwFZ'�"/"/	IR/User.hnu�[���//===- llvm/User.h - User class definition ----------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This class defines the interface that one who uses a Value must implement.
// Each instance of the Value class keeps track of what User's have handles
// to it.
//
//  * Instructions are the largest class of Users.
//  * Constants may be users of other constants (think arrays and stuff)
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_USER_H
#define LLVM_IR_USER_H

#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/IR/Use.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <iterator>

namespace llvm {

template <typename T> class ArrayRef;
template <typename T> class MutableArrayRef;

/// Compile-time customization of User operands.
///
/// Customizes operand-related allocators and accessors.
template <class>
struct OperandTraits;

class User : public Value {
  template <unsigned>
  friend struct HungoffOperandTraits;

  LLVM_ATTRIBUTE_ALWAYS_INLINE static void *
  allocateFixedOperandUser(size_t, unsigned, unsigned);

protected:
  /// Allocate a User with an operand pointer co-allocated.
  ///
  /// This is used for subclasses which need to allocate a variable number
  /// of operands, ie, 'hung off uses'.
  void *operator new(size_t Size);

  /// Allocate a User with the operands co-allocated.
  ///
  /// This is used for subclasses which have a fixed number of operands.
  void *operator new(size_t Size, unsigned Us);

  /// Allocate a User with the operands co-allocated.  If DescBytes is non-zero
  /// then allocate an additional DescBytes bytes before the operands. These
  /// bytes can be accessed by calling getDescriptor.
  ///
  /// DescBytes needs to be divisible by sizeof(void *).  The allocated
  /// descriptor, if any, is aligned to sizeof(void *) bytes.
  ///
  /// This is used for subclasses which have a fixed number of operands.
  void *operator new(size_t Size, unsigned Us, unsigned DescBytes);

  User(Type *ty, unsigned vty, Use *, unsigned NumOps)
      : Value(ty, vty) {
    assert(NumOps < (1u << NumUserOperandsBits) && "Too many operands");
    NumUserOperands = NumOps;
    // If we have hung off uses, then the operand list should initially be
    // null.
    assert((!HasHungOffUses || !getOperandList()) &&
           "Error in initializing hung off uses for User");
  }

  /// Allocate the array of Uses, followed by a pointer
  /// (with bottom bit set) to the User.
  /// \param IsPhi identifies callers which are phi nodes and which need
  /// N BasicBlock* allocated along with N
  void allocHungoffUses(unsigned N, bool IsPhi = false);

  /// Grow the number of hung off uses.  Note that allocHungoffUses
  /// should be called if there are no uses.
  void growHungoffUses(unsigned N, bool IsPhi = false);

protected:
  ~User() = default; // Use deleteValue() to delete a generic Instruction.

public:
  User(const User &) = delete;

  /// Free memory allocated for User and Use objects.
  void operator delete(void *Usr);
  /// Placement delete - required by std, called if the ctor throws.
  void operator delete(void *Usr, unsigned) {
    // Note: If a subclass manipulates the information which is required to calculate the
    // Usr memory pointer, e.g. NumUserOperands, the operator delete of that subclass has
    // to restore the changed information to the original value, since the dtor of that class
    // is not called if the ctor fails.
    User::operator delete(Usr);

#ifndef LLVM_ENABLE_EXCEPTIONS
    llvm_unreachable("Constructor throws?");
#endif
  }
  /// Placement delete - required by std, called if the ctor throws.
  void operator delete(void *Usr, unsigned, unsigned) {
    // Note: If a subclass manipulates the information which is required to calculate the
    // Usr memory pointer, e.g. NumUserOperands, the operator delete of that subclass has
    // to restore the changed information to the original value, since the dtor of that class
    // is not called if the ctor fails.
    User::operator delete(Usr);

#ifndef LLVM_ENABLE_EXCEPTIONS
    llvm_unreachable("Constructor throws?");
#endif
  }

protected:
  template <int Idx, typename U> static Use &OpFrom(const U *that) {
    return Idx < 0
      ? OperandTraits<U>::op_end(const_cast<U*>(that))[Idx]
      : OperandTraits<U>::op_begin(const_cast<U*>(that))[Idx];
  }

  template <int Idx> Use &Op() {
    return OpFrom<Idx>(this);
  }
  template <int Idx> const Use &Op() const {
    return OpFrom<Idx>(this);
  }

private:
  const Use *getHungOffOperands() const {
    return *(reinterpret_cast<const Use *const *>(this) - 1);
  }

  Use *&getHungOffOperands() { return *(reinterpret_cast<Use **>(this) - 1); }

  const Use *getIntrusiveOperands() const {
    return reinterpret_cast<const Use *>(this) - NumUserOperands;
  }

  Use *getIntrusiveOperands() {
    return reinterpret_cast<Use *>(this) - NumUserOperands;
  }

  void setOperandList(Use *NewList) {
    assert(HasHungOffUses &&
           "Setting operand list only required for hung off uses");
    getHungOffOperands() = NewList;
  }

public:
  const Use *getOperandList() const {
    return HasHungOffUses ? getHungOffOperands() : getIntrusiveOperands();
  }
  Use *getOperandList() {
    return const_cast<Use *>(static_cast<const User *>(this)->getOperandList());
  }

  Value *getOperand(unsigned i) const {
    assert(i < NumUserOperands && "getOperand() out of range!");
    return getOperandList()[i];
  }

  void setOperand(unsigned i, Value *Val) {
    assert(i < NumUserOperands && "setOperand() out of range!");
    assert((!isa<Constant>((const Value*)this) ||
            isa<GlobalValue>((const Value*)this)) &&
           "Cannot mutate a constant with setOperand!");
    getOperandList()[i] = Val;
  }

  const Use &getOperandUse(unsigned i) const {
    assert(i < NumUserOperands && "getOperandUse() out of range!");
    return getOperandList()[i];
  }
  Use &getOperandUse(unsigned i) {
    assert(i < NumUserOperands && "getOperandUse() out of range!");
    return getOperandList()[i];
  }

  unsigned getNumOperands() const { return NumUserOperands; }

  /// Returns the descriptor co-allocated with this User instance.
  ArrayRef<const uint8_t> getDescriptor() const;

  /// Returns the descriptor co-allocated with this User instance.
  MutableArrayRef<uint8_t> getDescriptor();

  /// Set the number of operands on a GlobalVariable.
  ///
  /// GlobalVariable always allocates space for a single operands, but
  /// doesn't always use it.
  ///
  /// FIXME: As that the number of operands is used to find the start of
  /// the allocated memory in operator delete, we need to always think we have
  /// 1 operand before delete.
  void setGlobalVariableNumOperands(unsigned NumOps) {
    assert(NumOps <= 1 && "GlobalVariable can only have 0 or 1 operands");
    NumUserOperands = NumOps;
  }

  /// Subclasses with hung off uses need to manage the operand count
  /// themselves.  In these instances, the operand count isn't used to find the
  /// OperandList, so there's no issue in having the operand count change.
  void setNumHungOffUseOperands(unsigned NumOps) {
    assert(HasHungOffUses && "Must have hung off uses to use this method");
    assert(NumOps < (1u << NumUserOperandsBits) && "Too many operands");
    NumUserOperands = NumOps;
  }

  /// A droppable user is a user for which uses can be dropped without affecting
  /// correctness and should be dropped rather than preventing a transformation
  /// from happening.
  bool isDroppable() const;

  // ---------------------------------------------------------------------------
  // Operand Iterator interface...
  //
  using op_iterator = Use*;
  using const_op_iterator = const Use*;
  using op_range = iterator_range<op_iterator>;
  using const_op_range = iterator_range<const_op_iterator>;

  op_iterator       op_begin()       { return getOperandList(); }
  const_op_iterator op_begin() const { return getOperandList(); }
  op_iterator       op_end()         {
    return getOperandList() + NumUserOperands;
  }
  const_op_iterator op_end()   const {
    return getOperandList() + NumUserOperands;
  }
  op_range operands() {
    return op_range(op_begin(), op_end());
  }
  const_op_range operands() const {
    return const_op_range(op_begin(), op_end());
  }

  /// Iterator for directly iterating over the operand Values.
  struct value_op_iterator
      : iterator_adaptor_base<value_op_iterator, op_iterator,
                              std::random_access_iterator_tag, Value *,
                              ptrdiff_t, Value *, Value *> {
    explicit value_op_iterator(Use *U = nullptr) : iterator_adaptor_base(U) {}

    Value *operator*() const { return *I; }
    Value *operator->() const { return operator*(); }
  };

  value_op_iterator value_op_begin() {
    return value_op_iterator(op_begin());
  }
  value_op_iterator value_op_end() {
    return value_op_iterator(op_end());
  }
  iterator_range<value_op_iterator> operand_values() {
    return make_range(value_op_begin(), value_op_end());
  }

  struct const_value_op_iterator
      : iterator_adaptor_base<const_value_op_iterator, const_op_iterator,
                              std::random_access_iterator_tag, const Value *,
                              ptrdiff_t, const Value *, const Value *> {
    explicit const_value_op_iterator(const Use *U = nullptr) :
      iterator_adaptor_base(U) {}

    const Value *operator*() const { return *I; }
    const Value *operator->() const { return operator*(); }
  };

  const_value_op_iterator value_op_begin() const {
    return const_value_op_iterator(op_begin());
  }
  const_value_op_iterator value_op_end() const {
    return const_value_op_iterator(op_end());
  }
  iterator_range<const_value_op_iterator> operand_values() const {
    return make_range(value_op_begin(), value_op_end());
  }

  /// Drop all references to operands.
  ///
  /// This function is in charge of "letting go" of all objects that this User
  /// refers to.  This allows one to 'delete' a whole class at a time, even
  /// though there may be circular references...  First all references are
  /// dropped, and all use counts go to zero.  Then everything is deleted for
  /// real.  Note that no operations are valid on an object that has "dropped
  /// all references", except operator delete.
  void dropAllReferences() {
    for (Use &U : operands())
      U.set(nullptr);
  }

  /// Replace uses of one Value with another.
  ///
  /// Replaces all references to the "From" definition with references to the
  /// "To" definition. Returns whether any uses were replaced.
  bool replaceUsesOfWith(Value *From, Value *To);

  // Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Value *V) {
    return isa<Instruction>(V) || isa<Constant>(V);
  }
};

// Either Use objects, or a Use pointer can be prepended to User.
static_assert(alignof(Use) >= alignof(User),
              "Alignment is insufficient after objects prepended to User");
static_assert(alignof(Use *) >= alignof(User),
              "Alignment is insufficient after objects prepended to User");

template<> struct simplify_type<User::op_iterator> {
  using SimpleType = Value*;

  static SimpleType getSimplifiedValue(User::op_iterator &Val) {
    return Val->get();
  }
};
template<> struct simplify_type<User::const_op_iterator> {
  using SimpleType = /*const*/ Value*;

  static SimpleType getSimplifiedValue(User::const_op_iterator &Val) {
    return Val->get();
  }
};

} // end namespace llvm

#endif // LLVM_IR_USER_H
PKjwFZ�(ðIR/OperandTraits.hnu�[���//===-- llvm/OperandTraits.h - OperandTraits class definition ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the traits classes that are handy for enforcing the correct
// layout of various User subclasses. It also provides the means for accessing
// the operands in the most efficient manner.
//

#ifndef LLVM_IR_OPERANDTRAITS_H
#define LLVM_IR_OPERANDTRAITS_H

#include "llvm/IR/User.h"

namespace llvm {

//===----------------------------------------------------------------------===//
//                          FixedNumOperand Trait Class
//===----------------------------------------------------------------------===//

/// FixedNumOperandTraits - determine the allocation regime of the Use array
/// when it is a prefix to the User object, and the number of Use objects is
/// known at compile time.

template <typename SubClass, unsigned ARITY>
struct FixedNumOperandTraits {
  static Use *op_begin(SubClass* U) {
    static_assert(
        !std::is_polymorphic<SubClass>::value,
        "adding virtual methods to subclasses of User breaks use lists");
    return reinterpret_cast<Use*>(U) - ARITY;
  }
  static Use *op_end(SubClass* U) {
    return reinterpret_cast<Use*>(U);
  }
  static unsigned operands(const User*) {
    return ARITY;
  }
};

//===----------------------------------------------------------------------===//
//                          OptionalOperand Trait Class
//===----------------------------------------------------------------------===//

/// OptionalOperandTraits - when the number of operands may change at runtime.
/// Naturally it may only decrease, because the allocations may not change.

template <typename SubClass, unsigned ARITY = 1>
struct OptionalOperandTraits : public FixedNumOperandTraits<SubClass, ARITY> {
  static unsigned operands(const User *U) {
    return U->getNumOperands();
  }
};

//===----------------------------------------------------------------------===//
//                          VariadicOperand Trait Class
//===----------------------------------------------------------------------===//

/// VariadicOperandTraits - determine the allocation regime of the Use array
/// when it is a prefix to the User object, and the number of Use objects is
/// only known at allocation time.

template <typename SubClass, unsigned MINARITY = 0>
struct VariadicOperandTraits {
  static Use *op_begin(SubClass* U) {
    static_assert(
        !std::is_polymorphic<SubClass>::value,
        "adding virtual methods to subclasses of User breaks use lists");
    return reinterpret_cast<Use*>(U) - static_cast<User*>(U)->getNumOperands();
  }
  static Use *op_end(SubClass* U) {
    return reinterpret_cast<Use*>(U);
  }
  static unsigned operands(const User *U) {
    return U->getNumOperands();
  }
};

//===----------------------------------------------------------------------===//
//                          HungoffOperand Trait Class
//===----------------------------------------------------------------------===//

/// HungoffOperandTraits - determine the allocation regime of the Use array
/// when it is not a prefix to the User object, but allocated at an unrelated
/// heap address.
///
/// This is the traits class that is needed when the Use array must be
/// resizable.

template <unsigned MINARITY = 1>
struct HungoffOperandTraits {
  static Use *op_begin(User* U) {
    return U->getOperandList();
  }
  static Use *op_end(User* U) {
    return U->getOperandList() + U->getNumOperands();
  }
  static unsigned operands(const User *U) {
    return U->getNumOperands();
  }
};

/// Macro for generating in-class operand accessor declarations.
/// It should only be called in the public section of the interface.
///
#define DECLARE_TRANSPARENT_OPERAND_ACCESSORS(VALUECLASS) \
  public: \
  inline VALUECLASS *getOperand(unsigned) const; \
  inline void setOperand(unsigned, VALUECLASS*); \
  inline op_iterator op_begin(); \
  inline const_op_iterator op_begin() const; \
  inline op_iterator op_end(); \
  inline const_op_iterator op_end() const; \
  protected: \
  template <int> inline Use &Op(); \
  template <int> inline const Use &Op() const; \
  public: \
  inline unsigned getNumOperands() const

/// Macro for generating out-of-class operand accessor definitions
#define DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CLASS, VALUECLASS) \
CLASS::op_iterator CLASS::op_begin() { \
  return OperandTraits<CLASS>::op_begin(this); \
} \
CLASS::const_op_iterator CLASS::op_begin() const { \
  return OperandTraits<CLASS>::op_begin(const_cast<CLASS*>(this)); \
} \
CLASS::op_iterator CLASS::op_end() { \
  return OperandTraits<CLASS>::op_end(this); \
} \
CLASS::const_op_iterator CLASS::op_end() const { \
  return OperandTraits<CLASS>::op_end(const_cast<CLASS*>(this)); \
} \
VALUECLASS *CLASS::getOperand(unsigned i_nocapture) const { \
  assert(i_nocapture < OperandTraits<CLASS>::operands(this) \
         && "getOperand() out of range!"); \
  return cast_or_null<VALUECLASS>( \
    OperandTraits<CLASS>::op_begin(const_cast<CLASS*>(this))[i_nocapture].get()); \
} \
void CLASS::setOperand(unsigned i_nocapture, VALUECLASS *Val_nocapture) { \
  assert(i_nocapture < OperandTraits<CLASS>::operands(this) \
         && "setOperand() out of range!"); \
  OperandTraits<CLASS>::op_begin(this)[i_nocapture] = Val_nocapture; \
} \
unsigned CLASS::getNumOperands() const { \
  return OperandTraits<CLASS>::operands(this); \
} \
template <int Idx_nocapture> Use &CLASS::Op() { \
  return this->OpFrom<Idx_nocapture>(this); \
} \
template <int Idx_nocapture> const Use &CLASS::Op() const { \
  return this->OpFrom<Idx_nocapture>(this); \
}


} // End llvm namespace

#endif
PKjwFZb�ɸ

IR/SSAContext.hnu�[���//===- SSAContext.h ---------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This file declares a specialization of the GenericSSAContext<X>
/// class template for LLVM IR.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_SSACONTEXT_H
#define LLVM_IR_SSACONTEXT_H

#include "llvm/ADT/GenericSSAContext.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/ModuleSlotTracker.h"
#include "llvm/Support/Printable.h"

#include <memory>

namespace llvm {
class BasicBlock;
class Function;
class Instruction;
class Value;
template <typename> class SmallVectorImpl;
template <typename, bool> class DominatorTreeBase;

inline auto instrs(const BasicBlock &BB) {
  return llvm::make_range(BB.begin(), BB.end());
}

template <> class GenericSSAContext<Function> {
  Function *F;

public:
  using BlockT = BasicBlock;
  using FunctionT = Function;
  using InstructionT = Instruction;
  using ValueRefT = Value *;
  using ConstValueRefT = const Value *;
  using UseT = Use;
  using DominatorTreeT = DominatorTreeBase<BlockT, false>;

  static constexpr Value *ValueRefNull = nullptr;

  void setFunction(Function &Fn);
  Function *getFunction() const { return F; }

  static BasicBlock *getEntryBlock(Function &F);
  static const BasicBlock *getEntryBlock(const Function &F);

  static void appendBlockDefs(SmallVectorImpl<Value *> &defs,
                              BasicBlock &block);
  static void appendBlockDefs(SmallVectorImpl<const Value *> &defs,
                              const BasicBlock &block);

  static void appendBlockTerms(SmallVectorImpl<Instruction *> &terms,
                               BasicBlock &block);
  static void appendBlockTerms(SmallVectorImpl<const Instruction *> &terms,
                               const BasicBlock &block);

  static bool comesBefore(const Instruction *lhs, const Instruction *rhs);
  static bool isConstantOrUndefValuePhi(const Instruction &Instr);
  const BasicBlock *getDefBlock(const Value *value) const;

  Printable print(const BasicBlock *Block) const;
  Printable print(const Instruction *Inst) const;
  Printable print(const Value *Value) const;
};

using SSAContext = GenericSSAContext<Function>;

} // namespace llvm

#endif // LLVM_IR_SSACONTEXT_H
PKjwFZ�|<��IR/GVMaterializer.hnu�[���//===- GVMaterializer.h - Interface for GV materializers --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file provides an abstract interface for loading a module from some
// place.  This interface allows incremental or random access loading of
// functions from the file.  This is useful for applications like JIT compilers
// or interprocedural optimizers that do not need the entire program in memory
// at the same time.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_GVMATERIALIZER_H
#define LLVM_IR_GVMATERIALIZER_H

#include <vector>

namespace llvm {

class Error;
class GlobalValue;
class StructType;

class GVMaterializer {
protected:
  GVMaterializer() = default;

public:
  virtual ~GVMaterializer();

  /// Make sure the given GlobalValue is fully read.
  ///
  virtual Error materialize(GlobalValue *GV) = 0;

  /// Make sure the entire Module has been completely read.
  ///
  virtual Error materializeModule() = 0;

  virtual Error materializeMetadata() = 0;
  virtual void setStripDebugInfo() = 0;

  virtual std::vector<StructType *> getIdentifiedStructTypes() const = 0;
};

} // end namespace llvm

#endif // LLVM_IR_GVMATERIALIZER_H
PKjwFZ f?���IR/IntrinsicsNVPTX.hnu�[���/*===- TableGen'erated file -------------------------------------*- C++ -*-===*\
|*                                                                            *|
|* Intrinsic Function Source Fragment                                         *|
|*                                                                            *|
|* Automatically generated file, do not edit!                                 *|
|*                                                                            *|
\*===----------------------------------------------------------------------===*/

#ifndef LLVM_IR_INTRINSIC_NVVM_ENUMS_H
#define LLVM_IR_INTRINSIC_NVVM_ENUMS_H

namespace llvm {
namespace Intrinsic {
enum NVVMIntrinsics : unsigned {
// Enum values for intrinsics
    nvvm_abs_bf16 = 5899,                             // llvm.nvvm.abs.bf16
    nvvm_abs_bf16x2,                           // llvm.nvvm.abs.bf16x2
    nvvm_add_rm_d,                             // llvm.nvvm.add.rm.d
    nvvm_add_rm_f,                             // llvm.nvvm.add.rm.f
    nvvm_add_rm_ftz_f,                         // llvm.nvvm.add.rm.ftz.f
    nvvm_add_rn_d,                             // llvm.nvvm.add.rn.d
    nvvm_add_rn_f,                             // llvm.nvvm.add.rn.f
    nvvm_add_rn_ftz_f,                         // llvm.nvvm.add.rn.ftz.f
    nvvm_add_rp_d,                             // llvm.nvvm.add.rp.d
    nvvm_add_rp_f,                             // llvm.nvvm.add.rp.f
    nvvm_add_rp_ftz_f,                         // llvm.nvvm.add.rp.ftz.f
    nvvm_add_rz_d,                             // llvm.nvvm.add.rz.d
    nvvm_add_rz_f,                             // llvm.nvvm.add.rz.f
    nvvm_add_rz_ftz_f,                         // llvm.nvvm.add.rz.ftz.f
    nvvm_atomic_add_gen_f_cta,                 // llvm.nvvm.atomic.add.gen.f.cta
    nvvm_atomic_add_gen_f_sys,                 // llvm.nvvm.atomic.add.gen.f.sys
    nvvm_atomic_add_gen_i_cta,                 // llvm.nvvm.atomic.add.gen.i.cta
    nvvm_atomic_add_gen_i_sys,                 // llvm.nvvm.atomic.add.gen.i.sys
    nvvm_atomic_and_gen_i_cta,                 // llvm.nvvm.atomic.and.gen.i.cta
    nvvm_atomic_and_gen_i_sys,                 // llvm.nvvm.atomic.and.gen.i.sys
    nvvm_atomic_cas_gen_i_cta,                 // llvm.nvvm.atomic.cas.gen.i.cta
    nvvm_atomic_cas_gen_i_sys,                 // llvm.nvvm.atomic.cas.gen.i.sys
    nvvm_atomic_dec_gen_i_cta,                 // llvm.nvvm.atomic.dec.gen.i.cta
    nvvm_atomic_dec_gen_i_sys,                 // llvm.nvvm.atomic.dec.gen.i.sys
    nvvm_atomic_exch_gen_i_cta,                // llvm.nvvm.atomic.exch.gen.i.cta
    nvvm_atomic_exch_gen_i_sys,                // llvm.nvvm.atomic.exch.gen.i.sys
    nvvm_atomic_inc_gen_i_cta,                 // llvm.nvvm.atomic.inc.gen.i.cta
    nvvm_atomic_inc_gen_i_sys,                 // llvm.nvvm.atomic.inc.gen.i.sys
    nvvm_atomic_load_dec_32,                   // llvm.nvvm.atomic.load.dec.32
    nvvm_atomic_load_inc_32,                   // llvm.nvvm.atomic.load.inc.32
    nvvm_atomic_max_gen_i_cta,                 // llvm.nvvm.atomic.max.gen.i.cta
    nvvm_atomic_max_gen_i_sys,                 // llvm.nvvm.atomic.max.gen.i.sys
    nvvm_atomic_min_gen_i_cta,                 // llvm.nvvm.atomic.min.gen.i.cta
    nvvm_atomic_min_gen_i_sys,                 // llvm.nvvm.atomic.min.gen.i.sys
    nvvm_atomic_or_gen_i_cta,                  // llvm.nvvm.atomic.or.gen.i.cta
    nvvm_atomic_or_gen_i_sys,                  // llvm.nvvm.atomic.or.gen.i.sys
    nvvm_atomic_xor_gen_i_cta,                 // llvm.nvvm.atomic.xor.gen.i.cta
    nvvm_atomic_xor_gen_i_sys,                 // llvm.nvvm.atomic.xor.gen.i.sys
    nvvm_bar_sync,                             // llvm.nvvm.bar.sync
    nvvm_bar_warp_sync,                        // llvm.nvvm.bar.warp.sync
    nvvm_barrier,                              // llvm.nvvm.barrier
    nvvm_barrier_cluster_arrive,               // llvm.nvvm.barrier.cluster.arrive
    nvvm_barrier_cluster_arrive_relaxed,       // llvm.nvvm.barrier.cluster.arrive.relaxed
    nvvm_barrier_cluster_wait,                 // llvm.nvvm.barrier.cluster.wait
    nvvm_barrier_n,                            // llvm.nvvm.barrier.n
    nvvm_barrier_sync,                         // llvm.nvvm.barrier.sync
    nvvm_barrier_sync_cnt,                     // llvm.nvvm.barrier.sync.cnt
    nvvm_barrier0,                             // llvm.nvvm.barrier0
    nvvm_barrier0_and,                         // llvm.nvvm.barrier0.and
    nvvm_barrier0_or,                          // llvm.nvvm.barrier0.or
    nvvm_barrier0_popc,                        // llvm.nvvm.barrier0.popc
    nvvm_bf2h_rn,                              // llvm.nvvm.bf2h.rn
    nvvm_bf2h_rn_ftz,                          // llvm.nvvm.bf2h.rn.ftz
    nvvm_bitcast_d2ll,                         // llvm.nvvm.bitcast.d2ll
    nvvm_bitcast_f2i,                          // llvm.nvvm.bitcast.f2i
    nvvm_bitcast_i2f,                          // llvm.nvvm.bitcast.i2f
    nvvm_bitcast_ll2d,                         // llvm.nvvm.bitcast.ll2d
    nvvm_ceil_d,                               // llvm.nvvm.ceil.d
    nvvm_ceil_f,                               // llvm.nvvm.ceil.f
    nvvm_ceil_ftz_f,                           // llvm.nvvm.ceil.ftz.f
    nvvm_compiler_error,                       // llvm.nvvm.compiler.error
    nvvm_compiler_warn,                        // llvm.nvvm.compiler.warn
    nvvm_cos_approx_f,                         // llvm.nvvm.cos.approx.f
    nvvm_cos_approx_ftz_f,                     // llvm.nvvm.cos.approx.ftz.f
    nvvm_cp_async_ca_shared_global_16,         // llvm.nvvm.cp.async.ca.shared.global.16
    nvvm_cp_async_ca_shared_global_16_s,       // llvm.nvvm.cp.async.ca.shared.global.16.s
    nvvm_cp_async_ca_shared_global_4,          // llvm.nvvm.cp.async.ca.shared.global.4
    nvvm_cp_async_ca_shared_global_4_s,        // llvm.nvvm.cp.async.ca.shared.global.4.s
    nvvm_cp_async_ca_shared_global_8,          // llvm.nvvm.cp.async.ca.shared.global.8
    nvvm_cp_async_ca_shared_global_8_s,        // llvm.nvvm.cp.async.ca.shared.global.8.s
    nvvm_cp_async_cg_shared_global_16,         // llvm.nvvm.cp.async.cg.shared.global.16
    nvvm_cp_async_cg_shared_global_16_s,       // llvm.nvvm.cp.async.cg.shared.global.16.s
    nvvm_cp_async_commit_group,                // llvm.nvvm.cp.async.commit.group
    nvvm_cp_async_mbarrier_arrive,             // llvm.nvvm.cp.async.mbarrier.arrive
    nvvm_cp_async_mbarrier_arrive_noinc,       // llvm.nvvm.cp.async.mbarrier.arrive.noinc
    nvvm_cp_async_mbarrier_arrive_noinc_shared,  // llvm.nvvm.cp.async.mbarrier.arrive.noinc.shared
    nvvm_cp_async_mbarrier_arrive_shared,      // llvm.nvvm.cp.async.mbarrier.arrive.shared
    nvvm_cp_async_wait_all,                    // llvm.nvvm.cp.async.wait.all
    nvvm_cp_async_wait_group,                  // llvm.nvvm.cp.async.wait.group
    nvvm_d2f_rm,                               // llvm.nvvm.d2f.rm
    nvvm_d2f_rm_ftz,                           // llvm.nvvm.d2f.rm.ftz
    nvvm_d2f_rn,                               // llvm.nvvm.d2f.rn
    nvvm_d2f_rn_ftz,                           // llvm.nvvm.d2f.rn.ftz
    nvvm_d2f_rp,                               // llvm.nvvm.d2f.rp
    nvvm_d2f_rp_ftz,                           // llvm.nvvm.d2f.rp.ftz
    nvvm_d2f_rz,                               // llvm.nvvm.d2f.rz
    nvvm_d2f_rz_ftz,                           // llvm.nvvm.d2f.rz.ftz
    nvvm_d2i_hi,                               // llvm.nvvm.d2i.hi
    nvvm_d2i_lo,                               // llvm.nvvm.d2i.lo
    nvvm_d2i_rm,                               // llvm.nvvm.d2i.rm
    nvvm_d2i_rn,                               // llvm.nvvm.d2i.rn
    nvvm_d2i_rp,                               // llvm.nvvm.d2i.rp
    nvvm_d2i_rz,                               // llvm.nvvm.d2i.rz
    nvvm_d2ll_rm,                              // llvm.nvvm.d2ll.rm
    nvvm_d2ll_rn,                              // llvm.nvvm.d2ll.rn
    nvvm_d2ll_rp,                              // llvm.nvvm.d2ll.rp
    nvvm_d2ll_rz,                              // llvm.nvvm.d2ll.rz
    nvvm_d2ui_rm,                              // llvm.nvvm.d2ui.rm
    nvvm_d2ui_rn,                              // llvm.nvvm.d2ui.rn
    nvvm_d2ui_rp,                              // llvm.nvvm.d2ui.rp
    nvvm_d2ui_rz,                              // llvm.nvvm.d2ui.rz
    nvvm_d2ull_rm,                             // llvm.nvvm.d2ull.rm
    nvvm_d2ull_rn,                             // llvm.nvvm.d2ull.rn
    nvvm_d2ull_rp,                             // llvm.nvvm.d2ull.rp
    nvvm_d2ull_rz,                             // llvm.nvvm.d2ull.rz
    nvvm_div_approx_f,                         // llvm.nvvm.div.approx.f
    nvvm_div_approx_ftz_f,                     // llvm.nvvm.div.approx.ftz.f
    nvvm_div_rm_d,                             // llvm.nvvm.div.rm.d
    nvvm_div_rm_f,                             // llvm.nvvm.div.rm.f
    nvvm_div_rm_ftz_f,                         // llvm.nvvm.div.rm.ftz.f
    nvvm_div_rn_d,                             // llvm.nvvm.div.rn.d
    nvvm_div_rn_f,                             // llvm.nvvm.div.rn.f
    nvvm_div_rn_ftz_f,                         // llvm.nvvm.div.rn.ftz.f
    nvvm_div_rp_d,                             // llvm.nvvm.div.rp.d
    nvvm_div_rp_f,                             // llvm.nvvm.div.rp.f
    nvvm_div_rp_ftz_f,                         // llvm.nvvm.div.rp.ftz.f
    nvvm_div_rz_d,                             // llvm.nvvm.div.rz.d
    nvvm_div_rz_f,                             // llvm.nvvm.div.rz.f
    nvvm_div_rz_ftz_f,                         // llvm.nvvm.div.rz.ftz.f
    nvvm_ex2_approx_d,                         // llvm.nvvm.ex2.approx.d
    nvvm_ex2_approx_f,                         // llvm.nvvm.ex2.approx.f
    nvvm_ex2_approx_f16,                       // llvm.nvvm.ex2.approx.f16
    nvvm_ex2_approx_f16x2,                     // llvm.nvvm.ex2.approx.f16x2
    nvvm_ex2_approx_ftz_f,                     // llvm.nvvm.ex2.approx.ftz.f
    nvvm_f2bf16_rn,                            // llvm.nvvm.f2bf16.rn
    nvvm_f2bf16_rn_relu,                       // llvm.nvvm.f2bf16.rn.relu
    nvvm_f2bf16_rz,                            // llvm.nvvm.f2bf16.rz
    nvvm_f2bf16_rz_relu,                       // llvm.nvvm.f2bf16.rz.relu
    nvvm_f2h_rn,                               // llvm.nvvm.f2h.rn
    nvvm_f2h_rn_ftz,                           // llvm.nvvm.f2h.rn.ftz
    nvvm_f2i_rm,                               // llvm.nvvm.f2i.rm
    nvvm_f2i_rm_ftz,                           // llvm.nvvm.f2i.rm.ftz
    nvvm_f2i_rn,                               // llvm.nvvm.f2i.rn
    nvvm_f2i_rn_ftz,                           // llvm.nvvm.f2i.rn.ftz
    nvvm_f2i_rp,                               // llvm.nvvm.f2i.rp
    nvvm_f2i_rp_ftz,                           // llvm.nvvm.f2i.rp.ftz
    nvvm_f2i_rz,                               // llvm.nvvm.f2i.rz
    nvvm_f2i_rz_ftz,                           // llvm.nvvm.f2i.rz.ftz
    nvvm_f2ll_rm,                              // llvm.nvvm.f2ll.rm
    nvvm_f2ll_rm_ftz,                          // llvm.nvvm.f2ll.rm.ftz
    nvvm_f2ll_rn,                              // llvm.nvvm.f2ll.rn
    nvvm_f2ll_rn_ftz,                          // llvm.nvvm.f2ll.rn.ftz
    nvvm_f2ll_rp,                              // llvm.nvvm.f2ll.rp
    nvvm_f2ll_rp_ftz,                          // llvm.nvvm.f2ll.rp.ftz
    nvvm_f2ll_rz,                              // llvm.nvvm.f2ll.rz
    nvvm_f2ll_rz_ftz,                          // llvm.nvvm.f2ll.rz.ftz
    nvvm_f2tf32_rna,                           // llvm.nvvm.f2tf32.rna
    nvvm_f2ui_rm,                              // llvm.nvvm.f2ui.rm
    nvvm_f2ui_rm_ftz,                          // llvm.nvvm.f2ui.rm.ftz
    nvvm_f2ui_rn,                              // llvm.nvvm.f2ui.rn
    nvvm_f2ui_rn_ftz,                          // llvm.nvvm.f2ui.rn.ftz
    nvvm_f2ui_rp,                              // llvm.nvvm.f2ui.rp
    nvvm_f2ui_rp_ftz,                          // llvm.nvvm.f2ui.rp.ftz
    nvvm_f2ui_rz,                              // llvm.nvvm.f2ui.rz
    nvvm_f2ui_rz_ftz,                          // llvm.nvvm.f2ui.rz.ftz
    nvvm_f2ull_rm,                             // llvm.nvvm.f2ull.rm
    nvvm_f2ull_rm_ftz,                         // llvm.nvvm.f2ull.rm.ftz
    nvvm_f2ull_rn,                             // llvm.nvvm.f2ull.rn
    nvvm_f2ull_rn_ftz,                         // llvm.nvvm.f2ull.rn.ftz
    nvvm_f2ull_rp,                             // llvm.nvvm.f2ull.rp
    nvvm_f2ull_rp_ftz,                         // llvm.nvvm.f2ull.rp.ftz
    nvvm_f2ull_rz,                             // llvm.nvvm.f2ull.rz
    nvvm_f2ull_rz_ftz,                         // llvm.nvvm.f2ull.rz.ftz
    nvvm_fabs_d,                               // llvm.nvvm.fabs.d
    nvvm_fabs_f,                               // llvm.nvvm.fabs.f
    nvvm_fabs_ftz_f,                           // llvm.nvvm.fabs.ftz.f
    nvvm_fence_sc_cluster,                     // llvm.nvvm.fence.sc.cluster
    nvvm_ff2bf16x2_rn,                         // llvm.nvvm.ff2bf16x2.rn
    nvvm_ff2bf16x2_rn_relu,                    // llvm.nvvm.ff2bf16x2.rn.relu
    nvvm_ff2bf16x2_rz,                         // llvm.nvvm.ff2bf16x2.rz
    nvvm_ff2bf16x2_rz_relu,                    // llvm.nvvm.ff2bf16x2.rz.relu
    nvvm_ff2f16x2_rn,                          // llvm.nvvm.ff2f16x2.rn
    nvvm_ff2f16x2_rn_relu,                     // llvm.nvvm.ff2f16x2.rn.relu
    nvvm_ff2f16x2_rz,                          // llvm.nvvm.ff2f16x2.rz
    nvvm_ff2f16x2_rz_relu,                     // llvm.nvvm.ff2f16x2.rz.relu
    nvvm_floor_d,                              // llvm.nvvm.floor.d
    nvvm_floor_f,                              // llvm.nvvm.floor.f
    nvvm_floor_ftz_f,                          // llvm.nvvm.floor.ftz.f
    nvvm_fma_rm_d,                             // llvm.nvvm.fma.rm.d
    nvvm_fma_rm_f,                             // llvm.nvvm.fma.rm.f
    nvvm_fma_rm_ftz_f,                         // llvm.nvvm.fma.rm.ftz.f
    nvvm_fma_rn_bf16,                          // llvm.nvvm.fma.rn.bf16
    nvvm_fma_rn_bf16x2,                        // llvm.nvvm.fma.rn.bf16x2
    nvvm_fma_rn_d,                             // llvm.nvvm.fma.rn.d
    nvvm_fma_rn_f,                             // llvm.nvvm.fma.rn.f
    nvvm_fma_rn_f16,                           // llvm.nvvm.fma.rn.f16
    nvvm_fma_rn_f16x2,                         // llvm.nvvm.fma.rn.f16x2
    nvvm_fma_rn_ftz_bf16,                      // llvm.nvvm.fma.rn.ftz.bf16
    nvvm_fma_rn_ftz_bf16x2,                    // llvm.nvvm.fma.rn.ftz.bf16x2
    nvvm_fma_rn_ftz_f,                         // llvm.nvvm.fma.rn.ftz.f
    nvvm_fma_rn_ftz_f16,                       // llvm.nvvm.fma.rn.ftz.f16
    nvvm_fma_rn_ftz_f16x2,                     // llvm.nvvm.fma.rn.ftz.f16x2
    nvvm_fma_rn_ftz_relu_bf16,                 // llvm.nvvm.fma.rn.ftz.relu.bf16
    nvvm_fma_rn_ftz_relu_bf16x2,               // llvm.nvvm.fma.rn.ftz.relu.bf16x2
    nvvm_fma_rn_ftz_relu_f16,                  // llvm.nvvm.fma.rn.ftz.relu.f16
    nvvm_fma_rn_ftz_relu_f16x2,                // llvm.nvvm.fma.rn.ftz.relu.f16x2
    nvvm_fma_rn_ftz_sat_bf16,                  // llvm.nvvm.fma.rn.ftz.sat.bf16
    nvvm_fma_rn_ftz_sat_bf16x2,                // llvm.nvvm.fma.rn.ftz.sat.bf16x2
    nvvm_fma_rn_ftz_sat_f16,                   // llvm.nvvm.fma.rn.ftz.sat.f16
    nvvm_fma_rn_ftz_sat_f16x2,                 // llvm.nvvm.fma.rn.ftz.sat.f16x2
    nvvm_fma_rn_relu_bf16,                     // llvm.nvvm.fma.rn.relu.bf16
    nvvm_fma_rn_relu_bf16x2,                   // llvm.nvvm.fma.rn.relu.bf16x2
    nvvm_fma_rn_relu_f16,                      // llvm.nvvm.fma.rn.relu.f16
    nvvm_fma_rn_relu_f16x2,                    // llvm.nvvm.fma.rn.relu.f16x2
    nvvm_fma_rn_sat_bf16,                      // llvm.nvvm.fma.rn.sat.bf16
    nvvm_fma_rn_sat_bf16x2,                    // llvm.nvvm.fma.rn.sat.bf16x2
    nvvm_fma_rn_sat_f16,                       // llvm.nvvm.fma.rn.sat.f16
    nvvm_fma_rn_sat_f16x2,                     // llvm.nvvm.fma.rn.sat.f16x2
    nvvm_fma_rp_d,                             // llvm.nvvm.fma.rp.d
    nvvm_fma_rp_f,                             // llvm.nvvm.fma.rp.f
    nvvm_fma_rp_ftz_f,                         // llvm.nvvm.fma.rp.ftz.f
    nvvm_fma_rz_d,                             // llvm.nvvm.fma.rz.d
    nvvm_fma_rz_f,                             // llvm.nvvm.fma.rz.f
    nvvm_fma_rz_ftz_f,                         // llvm.nvvm.fma.rz.ftz.f
    nvvm_fmax_bf16,                            // llvm.nvvm.fmax.bf16
    nvvm_fmax_bf16x2,                          // llvm.nvvm.fmax.bf16x2
    nvvm_fmax_d,                               // llvm.nvvm.fmax.d
    nvvm_fmax_f,                               // llvm.nvvm.fmax.f
    nvvm_fmax_f16,                             // llvm.nvvm.fmax.f16
    nvvm_fmax_f16x2,                           // llvm.nvvm.fmax.f16x2
    nvvm_fmax_ftz_bf16,                        // llvm.nvvm.fmax.ftz.bf16
    nvvm_fmax_ftz_bf16x2,                      // llvm.nvvm.fmax.ftz.bf16x2
    nvvm_fmax_ftz_f,                           // llvm.nvvm.fmax.ftz.f
    nvvm_fmax_ftz_f16,                         // llvm.nvvm.fmax.ftz.f16
    nvvm_fmax_ftz_f16x2,                       // llvm.nvvm.fmax.ftz.f16x2
    nvvm_fmax_ftz_nan_bf16,                    // llvm.nvvm.fmax.ftz.nan.bf16
    nvvm_fmax_ftz_nan_bf16x2,                  // llvm.nvvm.fmax.ftz.nan.bf16x2
    nvvm_fmax_ftz_nan_f,                       // llvm.nvvm.fmax.ftz.nan.f
    nvvm_fmax_ftz_nan_f16,                     // llvm.nvvm.fmax.ftz.nan.f16
    nvvm_fmax_ftz_nan_f16x2,                   // llvm.nvvm.fmax.ftz.nan.f16x2
    nvvm_fmax_ftz_nan_xorsign_abs_bf16,        // llvm.nvvm.fmax.ftz.nan.xorsign.abs.bf16
    nvvm_fmax_ftz_nan_xorsign_abs_bf16x2,      // llvm.nvvm.fmax.ftz.nan.xorsign.abs.bf16x2
    nvvm_fmax_ftz_nan_xorsign_abs_f,           // llvm.nvvm.fmax.ftz.nan.xorsign.abs.f
    nvvm_fmax_ftz_nan_xorsign_abs_f16,         // llvm.nvvm.fmax.ftz.nan.xorsign.abs.f16
    nvvm_fmax_ftz_nan_xorsign_abs_f16x2,       // llvm.nvvm.fmax.ftz.nan.xorsign.abs.f16x2
    nvvm_fmax_ftz_xorsign_abs_bf16,            // llvm.nvvm.fmax.ftz.xorsign.abs.bf16
    nvvm_fmax_ftz_xorsign_abs_bf16x2,          // llvm.nvvm.fmax.ftz.xorsign.abs.bf16x2
    nvvm_fmax_ftz_xorsign_abs_f,               // llvm.nvvm.fmax.ftz.xorsign.abs.f
    nvvm_fmax_ftz_xorsign_abs_f16,             // llvm.nvvm.fmax.ftz.xorsign.abs.f16
    nvvm_fmax_ftz_xorsign_abs_f16x2,           // llvm.nvvm.fmax.ftz.xorsign.abs.f16x2
    nvvm_fmax_nan_bf16,                        // llvm.nvvm.fmax.nan.bf16
    nvvm_fmax_nan_bf16x2,                      // llvm.nvvm.fmax.nan.bf16x2
    nvvm_fmax_nan_f,                           // llvm.nvvm.fmax.nan.f
    nvvm_fmax_nan_f16,                         // llvm.nvvm.fmax.nan.f16
    nvvm_fmax_nan_f16x2,                       // llvm.nvvm.fmax.nan.f16x2
    nvvm_fmax_nan_xorsign_abs_bf16,            // llvm.nvvm.fmax.nan.xorsign.abs.bf16
    nvvm_fmax_nan_xorsign_abs_bf16x2,          // llvm.nvvm.fmax.nan.xorsign.abs.bf16x2
    nvvm_fmax_nan_xorsign_abs_f,               // llvm.nvvm.fmax.nan.xorsign.abs.f
    nvvm_fmax_nan_xorsign_abs_f16,             // llvm.nvvm.fmax.nan.xorsign.abs.f16
    nvvm_fmax_nan_xorsign_abs_f16x2,           // llvm.nvvm.fmax.nan.xorsign.abs.f16x2
    nvvm_fmax_xorsign_abs_bf16,                // llvm.nvvm.fmax.xorsign.abs.bf16
    nvvm_fmax_xorsign_abs_bf16x2,              // llvm.nvvm.fmax.xorsign.abs.bf16x2
    nvvm_fmax_xorsign_abs_f,                   // llvm.nvvm.fmax.xorsign.abs.f
    nvvm_fmax_xorsign_abs_f16,                 // llvm.nvvm.fmax.xorsign.abs.f16
    nvvm_fmax_xorsign_abs_f16x2,               // llvm.nvvm.fmax.xorsign.abs.f16x2
    nvvm_fmin_bf16,                            // llvm.nvvm.fmin.bf16
    nvvm_fmin_bf16x2,                          // llvm.nvvm.fmin.bf16x2
    nvvm_fmin_d,                               // llvm.nvvm.fmin.d
    nvvm_fmin_f,                               // llvm.nvvm.fmin.f
    nvvm_fmin_f16,                             // llvm.nvvm.fmin.f16
    nvvm_fmin_f16x2,                           // llvm.nvvm.fmin.f16x2
    nvvm_fmin_ftz_bf16,                        // llvm.nvvm.fmin.ftz.bf16
    nvvm_fmin_ftz_bf16x2,                      // llvm.nvvm.fmin.ftz.bf16x2
    nvvm_fmin_ftz_f,                           // llvm.nvvm.fmin.ftz.f
    nvvm_fmin_ftz_f16,                         // llvm.nvvm.fmin.ftz.f16
    nvvm_fmin_ftz_f16x2,                       // llvm.nvvm.fmin.ftz.f16x2
    nvvm_fmin_ftz_nan_bf16,                    // llvm.nvvm.fmin.ftz.nan.bf16
    nvvm_fmin_ftz_nan_bf16x2,                  // llvm.nvvm.fmin.ftz.nan.bf16x2
    nvvm_fmin_ftz_nan_f,                       // llvm.nvvm.fmin.ftz.nan.f
    nvvm_fmin_ftz_nan_f16,                     // llvm.nvvm.fmin.ftz.nan.f16
    nvvm_fmin_ftz_nan_f16x2,                   // llvm.nvvm.fmin.ftz.nan.f16x2
    nvvm_fmin_ftz_nan_xorsign_abs_bf16,        // llvm.nvvm.fmin.ftz.nan.xorsign.abs.bf16
    nvvm_fmin_ftz_nan_xorsign_abs_bf16x2,      // llvm.nvvm.fmin.ftz.nan.xorsign.abs.bf16x2
    nvvm_fmin_ftz_nan_xorsign_abs_f,           // llvm.nvvm.fmin.ftz.nan.xorsign.abs.f
    nvvm_fmin_ftz_nan_xorsign_abs_f16,         // llvm.nvvm.fmin.ftz.nan.xorsign.abs.f16
    nvvm_fmin_ftz_nan_xorsign_abs_f16x2,       // llvm.nvvm.fmin.ftz.nan.xorsign.abs.f16x2
    nvvm_fmin_ftz_xorsign_abs_bf16,            // llvm.nvvm.fmin.ftz.xorsign.abs.bf16
    nvvm_fmin_ftz_xorsign_abs_bf16x2,          // llvm.nvvm.fmin.ftz.xorsign.abs.bf16x2
    nvvm_fmin_ftz_xorsign_abs_f,               // llvm.nvvm.fmin.ftz.xorsign.abs.f
    nvvm_fmin_ftz_xorsign_abs_f16,             // llvm.nvvm.fmin.ftz.xorsign.abs.f16
    nvvm_fmin_ftz_xorsign_abs_f16x2,           // llvm.nvvm.fmin.ftz.xorsign.abs.f16x2
    nvvm_fmin_nan_bf16,                        // llvm.nvvm.fmin.nan.bf16
    nvvm_fmin_nan_bf16x2,                      // llvm.nvvm.fmin.nan.bf16x2
    nvvm_fmin_nan_f,                           // llvm.nvvm.fmin.nan.f
    nvvm_fmin_nan_f16,                         // llvm.nvvm.fmin.nan.f16
    nvvm_fmin_nan_f16x2,                       // llvm.nvvm.fmin.nan.f16x2
    nvvm_fmin_nan_xorsign_abs_bf16,            // llvm.nvvm.fmin.nan.xorsign.abs.bf16
    nvvm_fmin_nan_xorsign_abs_bf16x2,          // llvm.nvvm.fmin.nan.xorsign.abs.bf16x2
    nvvm_fmin_nan_xorsign_abs_f,               // llvm.nvvm.fmin.nan.xorsign.abs.f
    nvvm_fmin_nan_xorsign_abs_f16,             // llvm.nvvm.fmin.nan.xorsign.abs.f16
    nvvm_fmin_nan_xorsign_abs_f16x2,           // llvm.nvvm.fmin.nan.xorsign.abs.f16x2
    nvvm_fmin_xorsign_abs_bf16,                // llvm.nvvm.fmin.xorsign.abs.bf16
    nvvm_fmin_xorsign_abs_bf16x2,              // llvm.nvvm.fmin.xorsign.abs.bf16x2
    nvvm_fmin_xorsign_abs_f,                   // llvm.nvvm.fmin.xorsign.abs.f
    nvvm_fmin_xorsign_abs_f16,                 // llvm.nvvm.fmin.xorsign.abs.f16
    nvvm_fmin_xorsign_abs_f16x2,               // llvm.nvvm.fmin.xorsign.abs.f16x2
    nvvm_fns,                                  // llvm.nvvm.fns
    nvvm_getctarank,                           // llvm.nvvm.getctarank
    nvvm_getctarank_shared_cluster,            // llvm.nvvm.getctarank.shared.cluster
    nvvm_i2d_rm,                               // llvm.nvvm.i2d.rm
    nvvm_i2d_rn,                               // llvm.nvvm.i2d.rn
    nvvm_i2d_rp,                               // llvm.nvvm.i2d.rp
    nvvm_i2d_rz,                               // llvm.nvvm.i2d.rz
    nvvm_i2f_rm,                               // llvm.nvvm.i2f.rm
    nvvm_i2f_rn,                               // llvm.nvvm.i2f.rn
    nvvm_i2f_rp,                               // llvm.nvvm.i2f.rp
    nvvm_i2f_rz,                               // llvm.nvvm.i2f.rz
    nvvm_is_explicit_cluster,                  // llvm.nvvm.is_explicit_cluster
    nvvm_isspacep_const,                       // llvm.nvvm.isspacep.const
    nvvm_isspacep_global,                      // llvm.nvvm.isspacep.global
    nvvm_isspacep_local,                       // llvm.nvvm.isspacep.local
    nvvm_isspacep_shared,                      // llvm.nvvm.isspacep.shared
    nvvm_isspacep_shared_cluster,              // llvm.nvvm.isspacep.shared.cluster
    nvvm_istypep_sampler,                      // llvm.nvvm.istypep.sampler
    nvvm_istypep_surface,                      // llvm.nvvm.istypep.surface
    nvvm_istypep_texture,                      // llvm.nvvm.istypep.texture
    nvvm_ldg_global_f,                         // llvm.nvvm.ldg.global.f
    nvvm_ldg_global_i,                         // llvm.nvvm.ldg.global.i
    nvvm_ldg_global_p,                         // llvm.nvvm.ldg.global.p
    nvvm_ldmatrix_sync_aligned_m8n8_x1_b16,    // llvm.nvvm.ldmatrix.sync.aligned.m8n8.x1.b16
    nvvm_ldmatrix_sync_aligned_m8n8_x1_trans_b16,  // llvm.nvvm.ldmatrix.sync.aligned.m8n8.x1.trans.b16
    nvvm_ldmatrix_sync_aligned_m8n8_x2_b16,    // llvm.nvvm.ldmatrix.sync.aligned.m8n8.x2.b16
    nvvm_ldmatrix_sync_aligned_m8n8_x2_trans_b16,  // llvm.nvvm.ldmatrix.sync.aligned.m8n8.x2.trans.b16
    nvvm_ldmatrix_sync_aligned_m8n8_x4_b16,    // llvm.nvvm.ldmatrix.sync.aligned.m8n8.x4.b16
    nvvm_ldmatrix_sync_aligned_m8n8_x4_trans_b16,  // llvm.nvvm.ldmatrix.sync.aligned.m8n8.x4.trans.b16
    nvvm_ldu_global_f,                         // llvm.nvvm.ldu.global.f
    nvvm_ldu_global_i,                         // llvm.nvvm.ldu.global.i
    nvvm_ldu_global_p,                         // llvm.nvvm.ldu.global.p
    nvvm_lg2_approx_d,                         // llvm.nvvm.lg2.approx.d
    nvvm_lg2_approx_f,                         // llvm.nvvm.lg2.approx.f
    nvvm_lg2_approx_ftz_f,                     // llvm.nvvm.lg2.approx.ftz.f
    nvvm_ll2d_rm,                              // llvm.nvvm.ll2d.rm
    nvvm_ll2d_rn,                              // llvm.nvvm.ll2d.rn
    nvvm_ll2d_rp,                              // llvm.nvvm.ll2d.rp
    nvvm_ll2d_rz,                              // llvm.nvvm.ll2d.rz
    nvvm_ll2f_rm,                              // llvm.nvvm.ll2f.rm
    nvvm_ll2f_rn,                              // llvm.nvvm.ll2f.rn
    nvvm_ll2f_rp,                              // llvm.nvvm.ll2f.rp
    nvvm_ll2f_rz,                              // llvm.nvvm.ll2f.rz
    nvvm_lohi_i2d,                             // llvm.nvvm.lohi.i2d
    nvvm_mapa,                                 // llvm.nvvm.mapa
    nvvm_mapa_shared_cluster,                  // llvm.nvvm.mapa.shared.cluster
    nvvm_match_all_sync_i32p,                  // llvm.nvvm.match.all.sync.i32p
    nvvm_match_all_sync_i64p,                  // llvm.nvvm.match.all.sync.i64p
    nvvm_match_any_sync_i32,                   // llvm.nvvm.match.any.sync.i32
    nvvm_match_any_sync_i64,                   // llvm.nvvm.match.any.sync.i64
    nvvm_mbarrier_arrive,                      // llvm.nvvm.mbarrier.arrive
    nvvm_mbarrier_arrive_drop,                 // llvm.nvvm.mbarrier.arrive.drop
    nvvm_mbarrier_arrive_drop_noComplete,      // llvm.nvvm.mbarrier.arrive.drop.noComplete
    nvvm_mbarrier_arrive_drop_noComplete_shared,  // llvm.nvvm.mbarrier.arrive.drop.noComplete.shared
    nvvm_mbarrier_arrive_drop_shared,          // llvm.nvvm.mbarrier.arrive.drop.shared
    nvvm_mbarrier_arrive_noComplete,           // llvm.nvvm.mbarrier.arrive.noComplete
    nvvm_mbarrier_arrive_noComplete_shared,    // llvm.nvvm.mbarrier.arrive.noComplete.shared
    nvvm_mbarrier_arrive_shared,               // llvm.nvvm.mbarrier.arrive.shared
    nvvm_mbarrier_init,                        // llvm.nvvm.mbarrier.init
    nvvm_mbarrier_init_shared,                 // llvm.nvvm.mbarrier.init.shared
    nvvm_mbarrier_inval,                       // llvm.nvvm.mbarrier.inval
    nvvm_mbarrier_inval_shared,                // llvm.nvvm.mbarrier.inval.shared
    nvvm_mbarrier_pending_count,               // llvm.nvvm.mbarrier.pending.count
    nvvm_mbarrier_test_wait,                   // llvm.nvvm.mbarrier.test.wait
    nvvm_mbarrier_test_wait_shared,            // llvm.nvvm.mbarrier.test.wait.shared
    nvvm_membar_cta,                           // llvm.nvvm.membar.cta
    nvvm_membar_gl,                            // llvm.nvvm.membar.gl
    nvvm_membar_sys,                           // llvm.nvvm.membar.sys
    nvvm_mma_and_popc_m16n8k128_row_col_b1,    // llvm.nvvm.mma.and.popc.m16n8k128.row.col.b1
    nvvm_mma_and_popc_m16n8k256_row_col_b1,    // llvm.nvvm.mma.and.popc.m16n8k256.row.col.b1
    nvvm_mma_and_popc_m8n8k128_row_col_b1,     // llvm.nvvm.mma.and.popc.m8n8k128.row.col.b1
    nvvm_mma_m16n8k16_row_col_bf16,            // llvm.nvvm.mma.m16n8k16.row.col.bf16
    nvvm_mma_m16n8k16_row_col_f16_f16,         // llvm.nvvm.mma.m16n8k16.row.col.f16.f16
    nvvm_mma_m16n8k16_row_col_f16_f32,         // llvm.nvvm.mma.m16n8k16.row.col.f16.f32
    nvvm_mma_m16n8k16_row_col_f32_f16,         // llvm.nvvm.mma.m16n8k16.row.col.f32.f16
    nvvm_mma_m16n8k16_row_col_f32_f32,         // llvm.nvvm.mma.m16n8k16.row.col.f32.f32
    nvvm_mma_m16n8k16_row_col_s8,              // llvm.nvvm.mma.m16n8k16.row.col.s8
    nvvm_mma_m16n8k16_row_col_s8_u8,           // llvm.nvvm.mma.m16n8k16.row.col.s8.u8
    nvvm_mma_m16n8k16_row_col_satfinite_s8,    // llvm.nvvm.mma.m16n8k16.row.col.satfinite.s8
    nvvm_mma_m16n8k16_row_col_satfinite_s8_u8,  // llvm.nvvm.mma.m16n8k16.row.col.satfinite.s8.u8
    nvvm_mma_m16n8k16_row_col_satfinite_u8,    // llvm.nvvm.mma.m16n8k16.row.col.satfinite.u8
    nvvm_mma_m16n8k16_row_col_satfinite_u8_s8,  // llvm.nvvm.mma.m16n8k16.row.col.satfinite.u8.s8
    nvvm_mma_m16n8k16_row_col_u8,              // llvm.nvvm.mma.m16n8k16.row.col.u8
    nvvm_mma_m16n8k16_row_col_u8_s8,           // llvm.nvvm.mma.m16n8k16.row.col.u8.s8
    nvvm_mma_m16n8k32_row_col_s4,              // llvm.nvvm.mma.m16n8k32.row.col.s4
    nvvm_mma_m16n8k32_row_col_s4_u4,           // llvm.nvvm.mma.m16n8k32.row.col.s4.u4
    nvvm_mma_m16n8k32_row_col_s8,              // llvm.nvvm.mma.m16n8k32.row.col.s8
    nvvm_mma_m16n8k32_row_col_s8_u8,           // llvm.nvvm.mma.m16n8k32.row.col.s8.u8
    nvvm_mma_m16n8k32_row_col_satfinite_s4,    // llvm.nvvm.mma.m16n8k32.row.col.satfinite.s4
    nvvm_mma_m16n8k32_row_col_satfinite_s4_u4,  // llvm.nvvm.mma.m16n8k32.row.col.satfinite.s4.u4
    nvvm_mma_m16n8k32_row_col_satfinite_s8,    // llvm.nvvm.mma.m16n8k32.row.col.satfinite.s8
    nvvm_mma_m16n8k32_row_col_satfinite_s8_u8,  // llvm.nvvm.mma.m16n8k32.row.col.satfinite.s8.u8
    nvvm_mma_m16n8k32_row_col_satfinite_u4,    // llvm.nvvm.mma.m16n8k32.row.col.satfinite.u4
    nvvm_mma_m16n8k32_row_col_satfinite_u4_s4,  // llvm.nvvm.mma.m16n8k32.row.col.satfinite.u4.s4
    nvvm_mma_m16n8k32_row_col_satfinite_u8,    // llvm.nvvm.mma.m16n8k32.row.col.satfinite.u8
    nvvm_mma_m16n8k32_row_col_satfinite_u8_s8,  // llvm.nvvm.mma.m16n8k32.row.col.satfinite.u8.s8
    nvvm_mma_m16n8k32_row_col_u4,              // llvm.nvvm.mma.m16n8k32.row.col.u4
    nvvm_mma_m16n8k32_row_col_u4_s4,           // llvm.nvvm.mma.m16n8k32.row.col.u4.s4
    nvvm_mma_m16n8k32_row_col_u8,              // llvm.nvvm.mma.m16n8k32.row.col.u8
    nvvm_mma_m16n8k32_row_col_u8_s8,           // llvm.nvvm.mma.m16n8k32.row.col.u8.s8
    nvvm_mma_m16n8k4_row_col_tf32,             // llvm.nvvm.mma.m16n8k4.row.col.tf32
    nvvm_mma_m16n8k64_row_col_s4,              // llvm.nvvm.mma.m16n8k64.row.col.s4
    nvvm_mma_m16n8k64_row_col_s4_u4,           // llvm.nvvm.mma.m16n8k64.row.col.s4.u4
    nvvm_mma_m16n8k64_row_col_satfinite_s4,    // llvm.nvvm.mma.m16n8k64.row.col.satfinite.s4
    nvvm_mma_m16n8k64_row_col_satfinite_s4_u4,  // llvm.nvvm.mma.m16n8k64.row.col.satfinite.s4.u4
    nvvm_mma_m16n8k64_row_col_satfinite_u4,    // llvm.nvvm.mma.m16n8k64.row.col.satfinite.u4
    nvvm_mma_m16n8k64_row_col_satfinite_u4_s4,  // llvm.nvvm.mma.m16n8k64.row.col.satfinite.u4.s4
    nvvm_mma_m16n8k64_row_col_u4,              // llvm.nvvm.mma.m16n8k64.row.col.u4
    nvvm_mma_m16n8k64_row_col_u4_s4,           // llvm.nvvm.mma.m16n8k64.row.col.u4.s4
    nvvm_mma_m16n8k8_row_col_bf16,             // llvm.nvvm.mma.m16n8k8.row.col.bf16
    nvvm_mma_m16n8k8_row_col_f16_f16,          // llvm.nvvm.mma.m16n8k8.row.col.f16.f16
    nvvm_mma_m16n8k8_row_col_f32_f32,          // llvm.nvvm.mma.m16n8k8.row.col.f32.f32
    nvvm_mma_m16n8k8_row_col_tf32,             // llvm.nvvm.mma.m16n8k8.row.col.tf32
    nvvm_mma_m8n8k16_row_col_s8,               // llvm.nvvm.mma.m8n8k16.row.col.s8
    nvvm_mma_m8n8k16_row_col_s8_u8,            // llvm.nvvm.mma.m8n8k16.row.col.s8.u8
    nvvm_mma_m8n8k16_row_col_satfinite_s8,     // llvm.nvvm.mma.m8n8k16.row.col.satfinite.s8
    nvvm_mma_m8n8k16_row_col_satfinite_s8_u8,  // llvm.nvvm.mma.m8n8k16.row.col.satfinite.s8.u8
    nvvm_mma_m8n8k16_row_col_satfinite_u8,     // llvm.nvvm.mma.m8n8k16.row.col.satfinite.u8
    nvvm_mma_m8n8k16_row_col_satfinite_u8_s8,  // llvm.nvvm.mma.m8n8k16.row.col.satfinite.u8.s8
    nvvm_mma_m8n8k16_row_col_u8,               // llvm.nvvm.mma.m8n8k16.row.col.u8
    nvvm_mma_m8n8k16_row_col_u8_s8,            // llvm.nvvm.mma.m8n8k16.row.col.u8.s8
    nvvm_mma_m8n8k32_row_col_s4,               // llvm.nvvm.mma.m8n8k32.row.col.s4
    nvvm_mma_m8n8k32_row_col_s4_u4,            // llvm.nvvm.mma.m8n8k32.row.col.s4.u4
    nvvm_mma_m8n8k32_row_col_satfinite_s4,     // llvm.nvvm.mma.m8n8k32.row.col.satfinite.s4
    nvvm_mma_m8n8k32_row_col_satfinite_s4_u4,  // llvm.nvvm.mma.m8n8k32.row.col.satfinite.s4.u4
    nvvm_mma_m8n8k32_row_col_satfinite_u4,     // llvm.nvvm.mma.m8n8k32.row.col.satfinite.u4
    nvvm_mma_m8n8k32_row_col_satfinite_u4_s4,  // llvm.nvvm.mma.m8n8k32.row.col.satfinite.u4.s4
    nvvm_mma_m8n8k32_row_col_u4,               // llvm.nvvm.mma.m8n8k32.row.col.u4
    nvvm_mma_m8n8k32_row_col_u4_s4,            // llvm.nvvm.mma.m8n8k32.row.col.u4.s4
    nvvm_mma_m8n8k4_col_col_f16_f16,           // llvm.nvvm.mma.m8n8k4.col.col.f16.f16
    nvvm_mma_m8n8k4_col_col_f32_f16,           // llvm.nvvm.mma.m8n8k4.col.col.f32.f16
    nvvm_mma_m8n8k4_col_col_f32_f32,           // llvm.nvvm.mma.m8n8k4.col.col.f32.f32
    nvvm_mma_m8n8k4_col_row_f16_f16,           // llvm.nvvm.mma.m8n8k4.col.row.f16.f16
    nvvm_mma_m8n8k4_col_row_f32_f16,           // llvm.nvvm.mma.m8n8k4.col.row.f32.f16
    nvvm_mma_m8n8k4_col_row_f32_f32,           // llvm.nvvm.mma.m8n8k4.col.row.f32.f32
    nvvm_mma_m8n8k4_row_col_f16_f16,           // llvm.nvvm.mma.m8n8k4.row.col.f16.f16
    nvvm_mma_m8n8k4_row_col_f32_f16,           // llvm.nvvm.mma.m8n8k4.row.col.f32.f16
    nvvm_mma_m8n8k4_row_col_f32_f32,           // llvm.nvvm.mma.m8n8k4.row.col.f32.f32
    nvvm_mma_m8n8k4_row_col_f64,               // llvm.nvvm.mma.m8n8k4.row.col.f64
    nvvm_mma_m8n8k4_row_row_f16_f16,           // llvm.nvvm.mma.m8n8k4.row.row.f16.f16
    nvvm_mma_m8n8k4_row_row_f32_f16,           // llvm.nvvm.mma.m8n8k4.row.row.f32.f16
    nvvm_mma_m8n8k4_row_row_f32_f32,           // llvm.nvvm.mma.m8n8k4.row.row.f32.f32
    nvvm_mma_xor_popc_m16n8k128_row_col_b1,    // llvm.nvvm.mma.xor.popc.m16n8k128.row.col.b1
    nvvm_mma_xor_popc_m16n8k256_row_col_b1,    // llvm.nvvm.mma.xor.popc.m16n8k256.row.col.b1
    nvvm_mma_xor_popc_m8n8k128_row_col_b1,     // llvm.nvvm.mma.xor.popc.m8n8k128.row.col.b1
    nvvm_move_double,                          // llvm.nvvm.move.double
    nvvm_move_float,                           // llvm.nvvm.move.float
    nvvm_move_i16,                             // llvm.nvvm.move.i16
    nvvm_move_i32,                             // llvm.nvvm.move.i32
    nvvm_move_i64,                             // llvm.nvvm.move.i64
    nvvm_move_ptr,                             // llvm.nvvm.move.ptr
    nvvm_mul_rm_d,                             // llvm.nvvm.mul.rm.d
    nvvm_mul_rm_f,                             // llvm.nvvm.mul.rm.f
    nvvm_mul_rm_ftz_f,                         // llvm.nvvm.mul.rm.ftz.f
    nvvm_mul_rn_d,                             // llvm.nvvm.mul.rn.d
    nvvm_mul_rn_f,                             // llvm.nvvm.mul.rn.f
    nvvm_mul_rn_ftz_f,                         // llvm.nvvm.mul.rn.ftz.f
    nvvm_mul_rp_d,                             // llvm.nvvm.mul.rp.d
    nvvm_mul_rp_f,                             // llvm.nvvm.mul.rp.f
    nvvm_mul_rp_ftz_f,                         // llvm.nvvm.mul.rp.ftz.f
    nvvm_mul_rz_d,                             // llvm.nvvm.mul.rz.d
    nvvm_mul_rz_f,                             // llvm.nvvm.mul.rz.f
    nvvm_mul_rz_ftz_f,                         // llvm.nvvm.mul.rz.ftz.f
    nvvm_mul24_i,                              // llvm.nvvm.mul24.i
    nvvm_mul24_ui,                             // llvm.nvvm.mul24.ui
    nvvm_mulhi_i,                              // llvm.nvvm.mulhi.i
    nvvm_mulhi_ll,                             // llvm.nvvm.mulhi.ll
    nvvm_mulhi_ui,                             // llvm.nvvm.mulhi.ui
    nvvm_mulhi_ull,                            // llvm.nvvm.mulhi.ull
    nvvm_neg_bf16,                             // llvm.nvvm.neg.bf16
    nvvm_neg_bf16x2,                           // llvm.nvvm.neg.bf16x2
    nvvm_prmt,                                 // llvm.nvvm.prmt
    nvvm_ptr_constant_to_gen,                  // llvm.nvvm.ptr.constant.to.gen
    nvvm_ptr_gen_to_constant,                  // llvm.nvvm.ptr.gen.to.constant
    nvvm_ptr_gen_to_global,                    // llvm.nvvm.ptr.gen.to.global
    nvvm_ptr_gen_to_local,                     // llvm.nvvm.ptr.gen.to.local
    nvvm_ptr_gen_to_param,                     // llvm.nvvm.ptr.gen.to.param
    nvvm_ptr_gen_to_shared,                    // llvm.nvvm.ptr.gen.to.shared
    nvvm_ptr_global_to_gen,                    // llvm.nvvm.ptr.global.to.gen
    nvvm_ptr_local_to_gen,                     // llvm.nvvm.ptr.local.to.gen
    nvvm_ptr_shared_to_gen,                    // llvm.nvvm.ptr.shared.to.gen
    nvvm_rcp_approx_ftz_d,                     // llvm.nvvm.rcp.approx.ftz.d
    nvvm_rcp_approx_ftz_f,                     // llvm.nvvm.rcp.approx.ftz.f
    nvvm_rcp_rm_d,                             // llvm.nvvm.rcp.rm.d
    nvvm_rcp_rm_f,                             // llvm.nvvm.rcp.rm.f
    nvvm_rcp_rm_ftz_f,                         // llvm.nvvm.rcp.rm.ftz.f
    nvvm_rcp_rn_d,                             // llvm.nvvm.rcp.rn.d
    nvvm_rcp_rn_f,                             // llvm.nvvm.rcp.rn.f
    nvvm_rcp_rn_ftz_f,                         // llvm.nvvm.rcp.rn.ftz.f
    nvvm_rcp_rp_d,                             // llvm.nvvm.rcp.rp.d
    nvvm_rcp_rp_f,                             // llvm.nvvm.rcp.rp.f
    nvvm_rcp_rp_ftz_f,                         // llvm.nvvm.rcp.rp.ftz.f
    nvvm_rcp_rz_d,                             // llvm.nvvm.rcp.rz.d
    nvvm_rcp_rz_f,                             // llvm.nvvm.rcp.rz.f
    nvvm_rcp_rz_ftz_f,                         // llvm.nvvm.rcp.rz.ftz.f
    nvvm_read_ptx_sreg_clock,                  // llvm.nvvm.read.ptx.sreg.clock
    nvvm_read_ptx_sreg_clock64,                // llvm.nvvm.read.ptx.sreg.clock64
    nvvm_read_ptx_sreg_cluster_ctaid_w,        // llvm.nvvm.read.ptx.sreg.cluster.ctaid.w
    nvvm_read_ptx_sreg_cluster_ctaid_x,        // llvm.nvvm.read.ptx.sreg.cluster.ctaid.x
    nvvm_read_ptx_sreg_cluster_ctaid_y,        // llvm.nvvm.read.ptx.sreg.cluster.ctaid.y
    nvvm_read_ptx_sreg_cluster_ctaid_z,        // llvm.nvvm.read.ptx.sreg.cluster.ctaid.z
    nvvm_read_ptx_sreg_cluster_ctarank,        // llvm.nvvm.read.ptx.sreg.cluster.ctarank
    nvvm_read_ptx_sreg_cluster_nctaid_w,       // llvm.nvvm.read.ptx.sreg.cluster.nctaid.w
    nvvm_read_ptx_sreg_cluster_nctaid_x,       // llvm.nvvm.read.ptx.sreg.cluster.nctaid.x
    nvvm_read_ptx_sreg_cluster_nctaid_y,       // llvm.nvvm.read.ptx.sreg.cluster.nctaid.y
    nvvm_read_ptx_sreg_cluster_nctaid_z,       // llvm.nvvm.read.ptx.sreg.cluster.nctaid.z
    nvvm_read_ptx_sreg_cluster_nctarank,       // llvm.nvvm.read.ptx.sreg.cluster.nctarank
    nvvm_read_ptx_sreg_clusterid_w,            // llvm.nvvm.read.ptx.sreg.clusterid.w
    nvvm_read_ptx_sreg_clusterid_x,            // llvm.nvvm.read.ptx.sreg.clusterid.x
    nvvm_read_ptx_sreg_clusterid_y,            // llvm.nvvm.read.ptx.sreg.clusterid.y
    nvvm_read_ptx_sreg_clusterid_z,            // llvm.nvvm.read.ptx.sreg.clusterid.z
    nvvm_read_ptx_sreg_ctaid_w,                // llvm.nvvm.read.ptx.sreg.ctaid.w
    nvvm_read_ptx_sreg_ctaid_x,                // llvm.nvvm.read.ptx.sreg.ctaid.x
    nvvm_read_ptx_sreg_ctaid_y,                // llvm.nvvm.read.ptx.sreg.ctaid.y
    nvvm_read_ptx_sreg_ctaid_z,                // llvm.nvvm.read.ptx.sreg.ctaid.z
    nvvm_read_ptx_sreg_envreg0,                // llvm.nvvm.read.ptx.sreg.envreg0
    nvvm_read_ptx_sreg_envreg1,                // llvm.nvvm.read.ptx.sreg.envreg1
    nvvm_read_ptx_sreg_envreg10,               // llvm.nvvm.read.ptx.sreg.envreg10
    nvvm_read_ptx_sreg_envreg11,               // llvm.nvvm.read.ptx.sreg.envreg11
    nvvm_read_ptx_sreg_envreg12,               // llvm.nvvm.read.ptx.sreg.envreg12
    nvvm_read_ptx_sreg_envreg13,               // llvm.nvvm.read.ptx.sreg.envreg13
    nvvm_read_ptx_sreg_envreg14,               // llvm.nvvm.read.ptx.sreg.envreg14
    nvvm_read_ptx_sreg_envreg15,               // llvm.nvvm.read.ptx.sreg.envreg15
    nvvm_read_ptx_sreg_envreg16,               // llvm.nvvm.read.ptx.sreg.envreg16
    nvvm_read_ptx_sreg_envreg17,               // llvm.nvvm.read.ptx.sreg.envreg17
    nvvm_read_ptx_sreg_envreg18,               // llvm.nvvm.read.ptx.sreg.envreg18
    nvvm_read_ptx_sreg_envreg19,               // llvm.nvvm.read.ptx.sreg.envreg19
    nvvm_read_ptx_sreg_envreg2,                // llvm.nvvm.read.ptx.sreg.envreg2
    nvvm_read_ptx_sreg_envreg20,               // llvm.nvvm.read.ptx.sreg.envreg20
    nvvm_read_ptx_sreg_envreg21,               // llvm.nvvm.read.ptx.sreg.envreg21
    nvvm_read_ptx_sreg_envreg22,               // llvm.nvvm.read.ptx.sreg.envreg22
    nvvm_read_ptx_sreg_envreg23,               // llvm.nvvm.read.ptx.sreg.envreg23
    nvvm_read_ptx_sreg_envreg24,               // llvm.nvvm.read.ptx.sreg.envreg24
    nvvm_read_ptx_sreg_envreg25,               // llvm.nvvm.read.ptx.sreg.envreg25
    nvvm_read_ptx_sreg_envreg26,               // llvm.nvvm.read.ptx.sreg.envreg26
    nvvm_read_ptx_sreg_envreg27,               // llvm.nvvm.read.ptx.sreg.envreg27
    nvvm_read_ptx_sreg_envreg28,               // llvm.nvvm.read.ptx.sreg.envreg28
    nvvm_read_ptx_sreg_envreg29,               // llvm.nvvm.read.ptx.sreg.envreg29
    nvvm_read_ptx_sreg_envreg3,                // llvm.nvvm.read.ptx.sreg.envreg3
    nvvm_read_ptx_sreg_envreg30,               // llvm.nvvm.read.ptx.sreg.envreg30
    nvvm_read_ptx_sreg_envreg31,               // llvm.nvvm.read.ptx.sreg.envreg31
    nvvm_read_ptx_sreg_envreg4,                // llvm.nvvm.read.ptx.sreg.envreg4
    nvvm_read_ptx_sreg_envreg5,                // llvm.nvvm.read.ptx.sreg.envreg5
    nvvm_read_ptx_sreg_envreg6,                // llvm.nvvm.read.ptx.sreg.envreg6
    nvvm_read_ptx_sreg_envreg7,                // llvm.nvvm.read.ptx.sreg.envreg7
    nvvm_read_ptx_sreg_envreg8,                // llvm.nvvm.read.ptx.sreg.envreg8
    nvvm_read_ptx_sreg_envreg9,                // llvm.nvvm.read.ptx.sreg.envreg9
    nvvm_read_ptx_sreg_gridid,                 // llvm.nvvm.read.ptx.sreg.gridid
    nvvm_read_ptx_sreg_laneid,                 // llvm.nvvm.read.ptx.sreg.laneid
    nvvm_read_ptx_sreg_lanemask_eq,            // llvm.nvvm.read.ptx.sreg.lanemask.eq
    nvvm_read_ptx_sreg_lanemask_ge,            // llvm.nvvm.read.ptx.sreg.lanemask.ge
    nvvm_read_ptx_sreg_lanemask_gt,            // llvm.nvvm.read.ptx.sreg.lanemask.gt
    nvvm_read_ptx_sreg_lanemask_le,            // llvm.nvvm.read.ptx.sreg.lanemask.le
    nvvm_read_ptx_sreg_lanemask_lt,            // llvm.nvvm.read.ptx.sreg.lanemask.lt
    nvvm_read_ptx_sreg_nclusterid_w,           // llvm.nvvm.read.ptx.sreg.nclusterid.w
    nvvm_read_ptx_sreg_nclusterid_x,           // llvm.nvvm.read.ptx.sreg.nclusterid.x
    nvvm_read_ptx_sreg_nclusterid_y,           // llvm.nvvm.read.ptx.sreg.nclusterid.y
    nvvm_read_ptx_sreg_nclusterid_z,           // llvm.nvvm.read.ptx.sreg.nclusterid.z
    nvvm_read_ptx_sreg_nctaid_w,               // llvm.nvvm.read.ptx.sreg.nctaid.w
    nvvm_read_ptx_sreg_nctaid_x,               // llvm.nvvm.read.ptx.sreg.nctaid.x
    nvvm_read_ptx_sreg_nctaid_y,               // llvm.nvvm.read.ptx.sreg.nctaid.y
    nvvm_read_ptx_sreg_nctaid_z,               // llvm.nvvm.read.ptx.sreg.nctaid.z
    nvvm_read_ptx_sreg_nsmid,                  // llvm.nvvm.read.ptx.sreg.nsmid
    nvvm_read_ptx_sreg_ntid_w,                 // llvm.nvvm.read.ptx.sreg.ntid.w
    nvvm_read_ptx_sreg_ntid_x,                 // llvm.nvvm.read.ptx.sreg.ntid.x
    nvvm_read_ptx_sreg_ntid_y,                 // llvm.nvvm.read.ptx.sreg.ntid.y
    nvvm_read_ptx_sreg_ntid_z,                 // llvm.nvvm.read.ptx.sreg.ntid.z
    nvvm_read_ptx_sreg_nwarpid,                // llvm.nvvm.read.ptx.sreg.nwarpid
    nvvm_read_ptx_sreg_pm0,                    // llvm.nvvm.read.ptx.sreg.pm0
    nvvm_read_ptx_sreg_pm1,                    // llvm.nvvm.read.ptx.sreg.pm1
    nvvm_read_ptx_sreg_pm2,                    // llvm.nvvm.read.ptx.sreg.pm2
    nvvm_read_ptx_sreg_pm3,                    // llvm.nvvm.read.ptx.sreg.pm3
    nvvm_read_ptx_sreg_smid,                   // llvm.nvvm.read.ptx.sreg.smid
    nvvm_read_ptx_sreg_tid_w,                  // llvm.nvvm.read.ptx.sreg.tid.w
    nvvm_read_ptx_sreg_tid_x,                  // llvm.nvvm.read.ptx.sreg.tid.x
    nvvm_read_ptx_sreg_tid_y,                  // llvm.nvvm.read.ptx.sreg.tid.y
    nvvm_read_ptx_sreg_tid_z,                  // llvm.nvvm.read.ptx.sreg.tid.z
    nvvm_read_ptx_sreg_warpid,                 // llvm.nvvm.read.ptx.sreg.warpid
    nvvm_read_ptx_sreg_warpsize,               // llvm.nvvm.read.ptx.sreg.warpsize
    nvvm_redux_sync_add,                       // llvm.nvvm.redux.sync.add
    nvvm_redux_sync_and,                       // llvm.nvvm.redux.sync.and
    nvvm_redux_sync_max,                       // llvm.nvvm.redux.sync.max
    nvvm_redux_sync_min,                       // llvm.nvvm.redux.sync.min
    nvvm_redux_sync_or,                        // llvm.nvvm.redux.sync.or
    nvvm_redux_sync_umax,                      // llvm.nvvm.redux.sync.umax
    nvvm_redux_sync_umin,                      // llvm.nvvm.redux.sync.umin
    nvvm_redux_sync_xor,                       // llvm.nvvm.redux.sync.xor
    nvvm_reflect,                              // llvm.nvvm.reflect
    nvvm_rotate_b32,                           // llvm.nvvm.rotate.b32
    nvvm_rotate_b64,                           // llvm.nvvm.rotate.b64
    nvvm_rotate_right_b64,                     // llvm.nvvm.rotate.right.b64
    nvvm_round_d,                              // llvm.nvvm.round.d
    nvvm_round_f,                              // llvm.nvvm.round.f
    nvvm_round_ftz_f,                          // llvm.nvvm.round.ftz.f
    nvvm_rsqrt_approx_d,                       // llvm.nvvm.rsqrt.approx.d
    nvvm_rsqrt_approx_f,                       // llvm.nvvm.rsqrt.approx.f
    nvvm_rsqrt_approx_ftz_f,                   // llvm.nvvm.rsqrt.approx.ftz.f
    nvvm_sad_i,                                // llvm.nvvm.sad.i
    nvvm_sad_ui,                               // llvm.nvvm.sad.ui
    nvvm_saturate_d,                           // llvm.nvvm.saturate.d
    nvvm_saturate_f,                           // llvm.nvvm.saturate.f
    nvvm_saturate_ftz_f,                       // llvm.nvvm.saturate.ftz.f
    nvvm_shfl_bfly_f32,                        // llvm.nvvm.shfl.bfly.f32
    nvvm_shfl_bfly_f32p,                       // llvm.nvvm.shfl.bfly.f32p
    nvvm_shfl_bfly_i32,                        // llvm.nvvm.shfl.bfly.i32
    nvvm_shfl_bfly_i32p,                       // llvm.nvvm.shfl.bfly.i32p
    nvvm_shfl_down_f32,                        // llvm.nvvm.shfl.down.f32
    nvvm_shfl_down_f32p,                       // llvm.nvvm.shfl.down.f32p
    nvvm_shfl_down_i32,                        // llvm.nvvm.shfl.down.i32
    nvvm_shfl_down_i32p,                       // llvm.nvvm.shfl.down.i32p
    nvvm_shfl_idx_f32,                         // llvm.nvvm.shfl.idx.f32
    nvvm_shfl_idx_f32p,                        // llvm.nvvm.shfl.idx.f32p
    nvvm_shfl_idx_i32,                         // llvm.nvvm.shfl.idx.i32
    nvvm_shfl_idx_i32p,                        // llvm.nvvm.shfl.idx.i32p
    nvvm_shfl_sync_bfly_f32,                   // llvm.nvvm.shfl.sync.bfly.f32
    nvvm_shfl_sync_bfly_f32p,                  // llvm.nvvm.shfl.sync.bfly.f32p
    nvvm_shfl_sync_bfly_i32,                   // llvm.nvvm.shfl.sync.bfly.i32
    nvvm_shfl_sync_bfly_i32p,                  // llvm.nvvm.shfl.sync.bfly.i32p
    nvvm_shfl_sync_down_f32,                   // llvm.nvvm.shfl.sync.down.f32
    nvvm_shfl_sync_down_f32p,                  // llvm.nvvm.shfl.sync.down.f32p
    nvvm_shfl_sync_down_i32,                   // llvm.nvvm.shfl.sync.down.i32
    nvvm_shfl_sync_down_i32p,                  // llvm.nvvm.shfl.sync.down.i32p
    nvvm_shfl_sync_idx_f32,                    // llvm.nvvm.shfl.sync.idx.f32
    nvvm_shfl_sync_idx_f32p,                   // llvm.nvvm.shfl.sync.idx.f32p
    nvvm_shfl_sync_idx_i32,                    // llvm.nvvm.shfl.sync.idx.i32
    nvvm_shfl_sync_idx_i32p,                   // llvm.nvvm.shfl.sync.idx.i32p
    nvvm_shfl_sync_up_f32,                     // llvm.nvvm.shfl.sync.up.f32
    nvvm_shfl_sync_up_f32p,                    // llvm.nvvm.shfl.sync.up.f32p
    nvvm_shfl_sync_up_i32,                     // llvm.nvvm.shfl.sync.up.i32
    nvvm_shfl_sync_up_i32p,                    // llvm.nvvm.shfl.sync.up.i32p
    nvvm_shfl_up_f32,                          // llvm.nvvm.shfl.up.f32
    nvvm_shfl_up_f32p,                         // llvm.nvvm.shfl.up.f32p
    nvvm_shfl_up_i32,                          // llvm.nvvm.shfl.up.i32
    nvvm_shfl_up_i32p,                         // llvm.nvvm.shfl.up.i32p
    nvvm_sin_approx_f,                         // llvm.nvvm.sin.approx.f
    nvvm_sin_approx_ftz_f,                     // llvm.nvvm.sin.approx.ftz.f
    nvvm_sqrt_approx_f,                        // llvm.nvvm.sqrt.approx.f
    nvvm_sqrt_approx_ftz_f,                    // llvm.nvvm.sqrt.approx.ftz.f
    nvvm_sqrt_f,                               // llvm.nvvm.sqrt.f
    nvvm_sqrt_rm_d,                            // llvm.nvvm.sqrt.rm.d
    nvvm_sqrt_rm_f,                            // llvm.nvvm.sqrt.rm.f
    nvvm_sqrt_rm_ftz_f,                        // llvm.nvvm.sqrt.rm.ftz.f
    nvvm_sqrt_rn_d,                            // llvm.nvvm.sqrt.rn.d
    nvvm_sqrt_rn_f,                            // llvm.nvvm.sqrt.rn.f
    nvvm_sqrt_rn_ftz_f,                        // llvm.nvvm.sqrt.rn.ftz.f
    nvvm_sqrt_rp_d,                            // llvm.nvvm.sqrt.rp.d
    nvvm_sqrt_rp_f,                            // llvm.nvvm.sqrt.rp.f
    nvvm_sqrt_rp_ftz_f,                        // llvm.nvvm.sqrt.rp.ftz.f
    nvvm_sqrt_rz_d,                            // llvm.nvvm.sqrt.rz.d
    nvvm_sqrt_rz_f,                            // llvm.nvvm.sqrt.rz.f
    nvvm_sqrt_rz_ftz_f,                        // llvm.nvvm.sqrt.rz.ftz.f
    nvvm_suld_1d_array_i16_clamp,              // llvm.nvvm.suld.1d.array.i16.clamp
    nvvm_suld_1d_array_i16_trap,               // llvm.nvvm.suld.1d.array.i16.trap
    nvvm_suld_1d_array_i16_zero,               // llvm.nvvm.suld.1d.array.i16.zero
    nvvm_suld_1d_array_i32_clamp,              // llvm.nvvm.suld.1d.array.i32.clamp
    nvvm_suld_1d_array_i32_trap,               // llvm.nvvm.suld.1d.array.i32.trap
    nvvm_suld_1d_array_i32_zero,               // llvm.nvvm.suld.1d.array.i32.zero
    nvvm_suld_1d_array_i64_clamp,              // llvm.nvvm.suld.1d.array.i64.clamp
    nvvm_suld_1d_array_i64_trap,               // llvm.nvvm.suld.1d.array.i64.trap
    nvvm_suld_1d_array_i64_zero,               // llvm.nvvm.suld.1d.array.i64.zero
    nvvm_suld_1d_array_i8_clamp,               // llvm.nvvm.suld.1d.array.i8.clamp
    nvvm_suld_1d_array_i8_trap,                // llvm.nvvm.suld.1d.array.i8.trap
    nvvm_suld_1d_array_i8_zero,                // llvm.nvvm.suld.1d.array.i8.zero
    nvvm_suld_1d_array_v2i16_clamp,            // llvm.nvvm.suld.1d.array.v2i16.clamp
    nvvm_suld_1d_array_v2i16_trap,             // llvm.nvvm.suld.1d.array.v2i16.trap
    nvvm_suld_1d_array_v2i16_zero,             // llvm.nvvm.suld.1d.array.v2i16.zero
    nvvm_suld_1d_array_v2i32_clamp,            // llvm.nvvm.suld.1d.array.v2i32.clamp
    nvvm_suld_1d_array_v2i32_trap,             // llvm.nvvm.suld.1d.array.v2i32.trap
    nvvm_suld_1d_array_v2i32_zero,             // llvm.nvvm.suld.1d.array.v2i32.zero
    nvvm_suld_1d_array_v2i64_clamp,            // llvm.nvvm.suld.1d.array.v2i64.clamp
    nvvm_suld_1d_array_v2i64_trap,             // llvm.nvvm.suld.1d.array.v2i64.trap
    nvvm_suld_1d_array_v2i64_zero,             // llvm.nvvm.suld.1d.array.v2i64.zero
    nvvm_suld_1d_array_v2i8_clamp,             // llvm.nvvm.suld.1d.array.v2i8.clamp
    nvvm_suld_1d_array_v2i8_trap,              // llvm.nvvm.suld.1d.array.v2i8.trap
    nvvm_suld_1d_array_v2i8_zero,              // llvm.nvvm.suld.1d.array.v2i8.zero
    nvvm_suld_1d_array_v4i16_clamp,            // llvm.nvvm.suld.1d.array.v4i16.clamp
    nvvm_suld_1d_array_v4i16_trap,             // llvm.nvvm.suld.1d.array.v4i16.trap
    nvvm_suld_1d_array_v4i16_zero,             // llvm.nvvm.suld.1d.array.v4i16.zero
    nvvm_suld_1d_array_v4i32_clamp,            // llvm.nvvm.suld.1d.array.v4i32.clamp
    nvvm_suld_1d_array_v4i32_trap,             // llvm.nvvm.suld.1d.array.v4i32.trap
    nvvm_suld_1d_array_v4i32_zero,             // llvm.nvvm.suld.1d.array.v4i32.zero
    nvvm_suld_1d_array_v4i8_clamp,             // llvm.nvvm.suld.1d.array.v4i8.clamp
    nvvm_suld_1d_array_v4i8_trap,              // llvm.nvvm.suld.1d.array.v4i8.trap
    nvvm_suld_1d_array_v4i8_zero,              // llvm.nvvm.suld.1d.array.v4i8.zero
    nvvm_suld_1d_i16_clamp,                    // llvm.nvvm.suld.1d.i16.clamp
    nvvm_suld_1d_i16_trap,                     // llvm.nvvm.suld.1d.i16.trap
    nvvm_suld_1d_i16_zero,                     // llvm.nvvm.suld.1d.i16.zero
    nvvm_suld_1d_i32_clamp,                    // llvm.nvvm.suld.1d.i32.clamp
    nvvm_suld_1d_i32_trap,                     // llvm.nvvm.suld.1d.i32.trap
    nvvm_suld_1d_i32_zero,                     // llvm.nvvm.suld.1d.i32.zero
    nvvm_suld_1d_i64_clamp,                    // llvm.nvvm.suld.1d.i64.clamp
    nvvm_suld_1d_i64_trap,                     // llvm.nvvm.suld.1d.i64.trap
    nvvm_suld_1d_i64_zero,                     // llvm.nvvm.suld.1d.i64.zero
    nvvm_suld_1d_i8_clamp,                     // llvm.nvvm.suld.1d.i8.clamp
    nvvm_suld_1d_i8_trap,                      // llvm.nvvm.suld.1d.i8.trap
    nvvm_suld_1d_i8_zero,                      // llvm.nvvm.suld.1d.i8.zero
    nvvm_suld_1d_v2i16_clamp,                  // llvm.nvvm.suld.1d.v2i16.clamp
    nvvm_suld_1d_v2i16_trap,                   // llvm.nvvm.suld.1d.v2i16.trap
    nvvm_suld_1d_v2i16_zero,                   // llvm.nvvm.suld.1d.v2i16.zero
    nvvm_suld_1d_v2i32_clamp,                  // llvm.nvvm.suld.1d.v2i32.clamp
    nvvm_suld_1d_v2i32_trap,                   // llvm.nvvm.suld.1d.v2i32.trap
    nvvm_suld_1d_v2i32_zero,                   // llvm.nvvm.suld.1d.v2i32.zero
    nvvm_suld_1d_v2i64_clamp,                  // llvm.nvvm.suld.1d.v2i64.clamp
    nvvm_suld_1d_v2i64_trap,                   // llvm.nvvm.suld.1d.v2i64.trap
    nvvm_suld_1d_v2i64_zero,                   // llvm.nvvm.suld.1d.v2i64.zero
    nvvm_suld_1d_v2i8_clamp,                   // llvm.nvvm.suld.1d.v2i8.clamp
    nvvm_suld_1d_v2i8_trap,                    // llvm.nvvm.suld.1d.v2i8.trap
    nvvm_suld_1d_v2i8_zero,                    // llvm.nvvm.suld.1d.v2i8.zero
    nvvm_suld_1d_v4i16_clamp,                  // llvm.nvvm.suld.1d.v4i16.clamp
    nvvm_suld_1d_v4i16_trap,                   // llvm.nvvm.suld.1d.v4i16.trap
    nvvm_suld_1d_v4i16_zero,                   // llvm.nvvm.suld.1d.v4i16.zero
    nvvm_suld_1d_v4i32_clamp,                  // llvm.nvvm.suld.1d.v4i32.clamp
    nvvm_suld_1d_v4i32_trap,                   // llvm.nvvm.suld.1d.v4i32.trap
    nvvm_suld_1d_v4i32_zero,                   // llvm.nvvm.suld.1d.v4i32.zero
    nvvm_suld_1d_v4i8_clamp,                   // llvm.nvvm.suld.1d.v4i8.clamp
    nvvm_suld_1d_v4i8_trap,                    // llvm.nvvm.suld.1d.v4i8.trap
    nvvm_suld_1d_v4i8_zero,                    // llvm.nvvm.suld.1d.v4i8.zero
    nvvm_suld_2d_array_i16_clamp,              // llvm.nvvm.suld.2d.array.i16.clamp
    nvvm_suld_2d_array_i16_trap,               // llvm.nvvm.suld.2d.array.i16.trap
    nvvm_suld_2d_array_i16_zero,               // llvm.nvvm.suld.2d.array.i16.zero
    nvvm_suld_2d_array_i32_clamp,              // llvm.nvvm.suld.2d.array.i32.clamp
    nvvm_suld_2d_array_i32_trap,               // llvm.nvvm.suld.2d.array.i32.trap
    nvvm_suld_2d_array_i32_zero,               // llvm.nvvm.suld.2d.array.i32.zero
    nvvm_suld_2d_array_i64_clamp,              // llvm.nvvm.suld.2d.array.i64.clamp
    nvvm_suld_2d_array_i64_trap,               // llvm.nvvm.suld.2d.array.i64.trap
    nvvm_suld_2d_array_i64_zero,               // llvm.nvvm.suld.2d.array.i64.zero
    nvvm_suld_2d_array_i8_clamp,               // llvm.nvvm.suld.2d.array.i8.clamp
    nvvm_suld_2d_array_i8_trap,                // llvm.nvvm.suld.2d.array.i8.trap
    nvvm_suld_2d_array_i8_zero,                // llvm.nvvm.suld.2d.array.i8.zero
    nvvm_suld_2d_array_v2i16_clamp,            // llvm.nvvm.suld.2d.array.v2i16.clamp
    nvvm_suld_2d_array_v2i16_trap,             // llvm.nvvm.suld.2d.array.v2i16.trap
    nvvm_suld_2d_array_v2i16_zero,             // llvm.nvvm.suld.2d.array.v2i16.zero
    nvvm_suld_2d_array_v2i32_clamp,            // llvm.nvvm.suld.2d.array.v2i32.clamp
    nvvm_suld_2d_array_v2i32_trap,             // llvm.nvvm.suld.2d.array.v2i32.trap
    nvvm_suld_2d_array_v2i32_zero,             // llvm.nvvm.suld.2d.array.v2i32.zero
    nvvm_suld_2d_array_v2i64_clamp,            // llvm.nvvm.suld.2d.array.v2i64.clamp
    nvvm_suld_2d_array_v2i64_trap,             // llvm.nvvm.suld.2d.array.v2i64.trap
    nvvm_suld_2d_array_v2i64_zero,             // llvm.nvvm.suld.2d.array.v2i64.zero
    nvvm_suld_2d_array_v2i8_clamp,             // llvm.nvvm.suld.2d.array.v2i8.clamp
    nvvm_suld_2d_array_v2i8_trap,              // llvm.nvvm.suld.2d.array.v2i8.trap
    nvvm_suld_2d_array_v2i8_zero,              // llvm.nvvm.suld.2d.array.v2i8.zero
    nvvm_suld_2d_array_v4i16_clamp,            // llvm.nvvm.suld.2d.array.v4i16.clamp
    nvvm_suld_2d_array_v4i16_trap,             // llvm.nvvm.suld.2d.array.v4i16.trap
    nvvm_suld_2d_array_v4i16_zero,             // llvm.nvvm.suld.2d.array.v4i16.zero
    nvvm_suld_2d_array_v4i32_clamp,            // llvm.nvvm.suld.2d.array.v4i32.clamp
    nvvm_suld_2d_array_v4i32_trap,             // llvm.nvvm.suld.2d.array.v4i32.trap
    nvvm_suld_2d_array_v4i32_zero,             // llvm.nvvm.suld.2d.array.v4i32.zero
    nvvm_suld_2d_array_v4i8_clamp,             // llvm.nvvm.suld.2d.array.v4i8.clamp
    nvvm_suld_2d_array_v4i8_trap,              // llvm.nvvm.suld.2d.array.v4i8.trap
    nvvm_suld_2d_array_v4i8_zero,              // llvm.nvvm.suld.2d.array.v4i8.zero
    nvvm_suld_2d_i16_clamp,                    // llvm.nvvm.suld.2d.i16.clamp
    nvvm_suld_2d_i16_trap,                     // llvm.nvvm.suld.2d.i16.trap
    nvvm_suld_2d_i16_zero,                     // llvm.nvvm.suld.2d.i16.zero
    nvvm_suld_2d_i32_clamp,                    // llvm.nvvm.suld.2d.i32.clamp
    nvvm_suld_2d_i32_trap,                     // llvm.nvvm.suld.2d.i32.trap
    nvvm_suld_2d_i32_zero,                     // llvm.nvvm.suld.2d.i32.zero
    nvvm_suld_2d_i64_clamp,                    // llvm.nvvm.suld.2d.i64.clamp
    nvvm_suld_2d_i64_trap,                     // llvm.nvvm.suld.2d.i64.trap
    nvvm_suld_2d_i64_zero,                     // llvm.nvvm.suld.2d.i64.zero
    nvvm_suld_2d_i8_clamp,                     // llvm.nvvm.suld.2d.i8.clamp
    nvvm_suld_2d_i8_trap,                      // llvm.nvvm.suld.2d.i8.trap
    nvvm_suld_2d_i8_zero,                      // llvm.nvvm.suld.2d.i8.zero
    nvvm_suld_2d_v2i16_clamp,                  // llvm.nvvm.suld.2d.v2i16.clamp
    nvvm_suld_2d_v2i16_trap,                   // llvm.nvvm.suld.2d.v2i16.trap
    nvvm_suld_2d_v2i16_zero,                   // llvm.nvvm.suld.2d.v2i16.zero
    nvvm_suld_2d_v2i32_clamp,                  // llvm.nvvm.suld.2d.v2i32.clamp
    nvvm_suld_2d_v2i32_trap,                   // llvm.nvvm.suld.2d.v2i32.trap
    nvvm_suld_2d_v2i32_zero,                   // llvm.nvvm.suld.2d.v2i32.zero
    nvvm_suld_2d_v2i64_clamp,                  // llvm.nvvm.suld.2d.v2i64.clamp
    nvvm_suld_2d_v2i64_trap,                   // llvm.nvvm.suld.2d.v2i64.trap
    nvvm_suld_2d_v2i64_zero,                   // llvm.nvvm.suld.2d.v2i64.zero
    nvvm_suld_2d_v2i8_clamp,                   // llvm.nvvm.suld.2d.v2i8.clamp
    nvvm_suld_2d_v2i8_trap,                    // llvm.nvvm.suld.2d.v2i8.trap
    nvvm_suld_2d_v2i8_zero,                    // llvm.nvvm.suld.2d.v2i8.zero
    nvvm_suld_2d_v4i16_clamp,                  // llvm.nvvm.suld.2d.v4i16.clamp
    nvvm_suld_2d_v4i16_trap,                   // llvm.nvvm.suld.2d.v4i16.trap
    nvvm_suld_2d_v4i16_zero,                   // llvm.nvvm.suld.2d.v4i16.zero
    nvvm_suld_2d_v4i32_clamp,                  // llvm.nvvm.suld.2d.v4i32.clamp
    nvvm_suld_2d_v4i32_trap,                   // llvm.nvvm.suld.2d.v4i32.trap
    nvvm_suld_2d_v4i32_zero,                   // llvm.nvvm.suld.2d.v4i32.zero
    nvvm_suld_2d_v4i8_clamp,                   // llvm.nvvm.suld.2d.v4i8.clamp
    nvvm_suld_2d_v4i8_trap,                    // llvm.nvvm.suld.2d.v4i8.trap
    nvvm_suld_2d_v4i8_zero,                    // llvm.nvvm.suld.2d.v4i8.zero
    nvvm_suld_3d_i16_clamp,                    // llvm.nvvm.suld.3d.i16.clamp
    nvvm_suld_3d_i16_trap,                     // llvm.nvvm.suld.3d.i16.trap
    nvvm_suld_3d_i16_zero,                     // llvm.nvvm.suld.3d.i16.zero
    nvvm_suld_3d_i32_clamp,                    // llvm.nvvm.suld.3d.i32.clamp
    nvvm_suld_3d_i32_trap,                     // llvm.nvvm.suld.3d.i32.trap
    nvvm_suld_3d_i32_zero,                     // llvm.nvvm.suld.3d.i32.zero
    nvvm_suld_3d_i64_clamp,                    // llvm.nvvm.suld.3d.i64.clamp
    nvvm_suld_3d_i64_trap,                     // llvm.nvvm.suld.3d.i64.trap
    nvvm_suld_3d_i64_zero,                     // llvm.nvvm.suld.3d.i64.zero
    nvvm_suld_3d_i8_clamp,                     // llvm.nvvm.suld.3d.i8.clamp
    nvvm_suld_3d_i8_trap,                      // llvm.nvvm.suld.3d.i8.trap
    nvvm_suld_3d_i8_zero,                      // llvm.nvvm.suld.3d.i8.zero
    nvvm_suld_3d_v2i16_clamp,                  // llvm.nvvm.suld.3d.v2i16.clamp
    nvvm_suld_3d_v2i16_trap,                   // llvm.nvvm.suld.3d.v2i16.trap
    nvvm_suld_3d_v2i16_zero,                   // llvm.nvvm.suld.3d.v2i16.zero
    nvvm_suld_3d_v2i32_clamp,                  // llvm.nvvm.suld.3d.v2i32.clamp
    nvvm_suld_3d_v2i32_trap,                   // llvm.nvvm.suld.3d.v2i32.trap
    nvvm_suld_3d_v2i32_zero,                   // llvm.nvvm.suld.3d.v2i32.zero
    nvvm_suld_3d_v2i64_clamp,                  // llvm.nvvm.suld.3d.v2i64.clamp
    nvvm_suld_3d_v2i64_trap,                   // llvm.nvvm.suld.3d.v2i64.trap
    nvvm_suld_3d_v2i64_zero,                   // llvm.nvvm.suld.3d.v2i64.zero
    nvvm_suld_3d_v2i8_clamp,                   // llvm.nvvm.suld.3d.v2i8.clamp
    nvvm_suld_3d_v2i8_trap,                    // llvm.nvvm.suld.3d.v2i8.trap
    nvvm_suld_3d_v2i8_zero,                    // llvm.nvvm.suld.3d.v2i8.zero
    nvvm_suld_3d_v4i16_clamp,                  // llvm.nvvm.suld.3d.v4i16.clamp
    nvvm_suld_3d_v4i16_trap,                   // llvm.nvvm.suld.3d.v4i16.trap
    nvvm_suld_3d_v4i16_zero,                   // llvm.nvvm.suld.3d.v4i16.zero
    nvvm_suld_3d_v4i32_clamp,                  // llvm.nvvm.suld.3d.v4i32.clamp
    nvvm_suld_3d_v4i32_trap,                   // llvm.nvvm.suld.3d.v4i32.trap
    nvvm_suld_3d_v4i32_zero,                   // llvm.nvvm.suld.3d.v4i32.zero
    nvvm_suld_3d_v4i8_clamp,                   // llvm.nvvm.suld.3d.v4i8.clamp
    nvvm_suld_3d_v4i8_trap,                    // llvm.nvvm.suld.3d.v4i8.trap
    nvvm_suld_3d_v4i8_zero,                    // llvm.nvvm.suld.3d.v4i8.zero
    nvvm_suq_array_size,                       // llvm.nvvm.suq.array.size
    nvvm_suq_channel_data_type,                // llvm.nvvm.suq.channel.data.type
    nvvm_suq_channel_order,                    // llvm.nvvm.suq.channel.order
    nvvm_suq_depth,                            // llvm.nvvm.suq.depth
    nvvm_suq_height,                           // llvm.nvvm.suq.height
    nvvm_suq_width,                            // llvm.nvvm.suq.width
    nvvm_sust_b_1d_array_i16_clamp,            // llvm.nvvm.sust.b.1d.array.i16.clamp
    nvvm_sust_b_1d_array_i16_trap,             // llvm.nvvm.sust.b.1d.array.i16.trap
    nvvm_sust_b_1d_array_i16_zero,             // llvm.nvvm.sust.b.1d.array.i16.zero
    nvvm_sust_b_1d_array_i32_clamp,            // llvm.nvvm.sust.b.1d.array.i32.clamp
    nvvm_sust_b_1d_array_i32_trap,             // llvm.nvvm.sust.b.1d.array.i32.trap
    nvvm_sust_b_1d_array_i32_zero,             // llvm.nvvm.sust.b.1d.array.i32.zero
    nvvm_sust_b_1d_array_i64_clamp,            // llvm.nvvm.sust.b.1d.array.i64.clamp
    nvvm_sust_b_1d_array_i64_trap,             // llvm.nvvm.sust.b.1d.array.i64.trap
    nvvm_sust_b_1d_array_i64_zero,             // llvm.nvvm.sust.b.1d.array.i64.zero
    nvvm_sust_b_1d_array_i8_clamp,             // llvm.nvvm.sust.b.1d.array.i8.clamp
    nvvm_sust_b_1d_array_i8_trap,              // llvm.nvvm.sust.b.1d.array.i8.trap
    nvvm_sust_b_1d_array_i8_zero,              // llvm.nvvm.sust.b.1d.array.i8.zero
    nvvm_sust_b_1d_array_v2i16_clamp,          // llvm.nvvm.sust.b.1d.array.v2i16.clamp
    nvvm_sust_b_1d_array_v2i16_trap,           // llvm.nvvm.sust.b.1d.array.v2i16.trap
    nvvm_sust_b_1d_array_v2i16_zero,           // llvm.nvvm.sust.b.1d.array.v2i16.zero
    nvvm_sust_b_1d_array_v2i32_clamp,          // llvm.nvvm.sust.b.1d.array.v2i32.clamp
    nvvm_sust_b_1d_array_v2i32_trap,           // llvm.nvvm.sust.b.1d.array.v2i32.trap
    nvvm_sust_b_1d_array_v2i32_zero,           // llvm.nvvm.sust.b.1d.array.v2i32.zero
    nvvm_sust_b_1d_array_v2i64_clamp,          // llvm.nvvm.sust.b.1d.array.v2i64.clamp
    nvvm_sust_b_1d_array_v2i64_trap,           // llvm.nvvm.sust.b.1d.array.v2i64.trap
    nvvm_sust_b_1d_array_v2i64_zero,           // llvm.nvvm.sust.b.1d.array.v2i64.zero
    nvvm_sust_b_1d_array_v2i8_clamp,           // llvm.nvvm.sust.b.1d.array.v2i8.clamp
    nvvm_sust_b_1d_array_v2i8_trap,            // llvm.nvvm.sust.b.1d.array.v2i8.trap
    nvvm_sust_b_1d_array_v2i8_zero,            // llvm.nvvm.sust.b.1d.array.v2i8.zero
    nvvm_sust_b_1d_array_v4i16_clamp,          // llvm.nvvm.sust.b.1d.array.v4i16.clamp
    nvvm_sust_b_1d_array_v4i16_trap,           // llvm.nvvm.sust.b.1d.array.v4i16.trap
    nvvm_sust_b_1d_array_v4i16_zero,           // llvm.nvvm.sust.b.1d.array.v4i16.zero
    nvvm_sust_b_1d_array_v4i32_clamp,          // llvm.nvvm.sust.b.1d.array.v4i32.clamp
    nvvm_sust_b_1d_array_v4i32_trap,           // llvm.nvvm.sust.b.1d.array.v4i32.trap
    nvvm_sust_b_1d_array_v4i32_zero,           // llvm.nvvm.sust.b.1d.array.v4i32.zero
    nvvm_sust_b_1d_array_v4i8_clamp,           // llvm.nvvm.sust.b.1d.array.v4i8.clamp
    nvvm_sust_b_1d_array_v4i8_trap,            // llvm.nvvm.sust.b.1d.array.v4i8.trap
    nvvm_sust_b_1d_array_v4i8_zero,            // llvm.nvvm.sust.b.1d.array.v4i8.zero
    nvvm_sust_b_1d_i16_clamp,                  // llvm.nvvm.sust.b.1d.i16.clamp
    nvvm_sust_b_1d_i16_trap,                   // llvm.nvvm.sust.b.1d.i16.trap
    nvvm_sust_b_1d_i16_zero,                   // llvm.nvvm.sust.b.1d.i16.zero
    nvvm_sust_b_1d_i32_clamp,                  // llvm.nvvm.sust.b.1d.i32.clamp
    nvvm_sust_b_1d_i32_trap,                   // llvm.nvvm.sust.b.1d.i32.trap
    nvvm_sust_b_1d_i32_zero,                   // llvm.nvvm.sust.b.1d.i32.zero
    nvvm_sust_b_1d_i64_clamp,                  // llvm.nvvm.sust.b.1d.i64.clamp
    nvvm_sust_b_1d_i64_trap,                   // llvm.nvvm.sust.b.1d.i64.trap
    nvvm_sust_b_1d_i64_zero,                   // llvm.nvvm.sust.b.1d.i64.zero
    nvvm_sust_b_1d_i8_clamp,                   // llvm.nvvm.sust.b.1d.i8.clamp
    nvvm_sust_b_1d_i8_trap,                    // llvm.nvvm.sust.b.1d.i8.trap
    nvvm_sust_b_1d_i8_zero,                    // llvm.nvvm.sust.b.1d.i8.zero
    nvvm_sust_b_1d_v2i16_clamp,                // llvm.nvvm.sust.b.1d.v2i16.clamp
    nvvm_sust_b_1d_v2i16_trap,                 // llvm.nvvm.sust.b.1d.v2i16.trap
    nvvm_sust_b_1d_v2i16_zero,                 // llvm.nvvm.sust.b.1d.v2i16.zero
    nvvm_sust_b_1d_v2i32_clamp,                // llvm.nvvm.sust.b.1d.v2i32.clamp
    nvvm_sust_b_1d_v2i32_trap,                 // llvm.nvvm.sust.b.1d.v2i32.trap
    nvvm_sust_b_1d_v2i32_zero,                 // llvm.nvvm.sust.b.1d.v2i32.zero
    nvvm_sust_b_1d_v2i64_clamp,                // llvm.nvvm.sust.b.1d.v2i64.clamp
    nvvm_sust_b_1d_v2i64_trap,                 // llvm.nvvm.sust.b.1d.v2i64.trap
    nvvm_sust_b_1d_v2i64_zero,                 // llvm.nvvm.sust.b.1d.v2i64.zero
    nvvm_sust_b_1d_v2i8_clamp,                 // llvm.nvvm.sust.b.1d.v2i8.clamp
    nvvm_sust_b_1d_v2i8_trap,                  // llvm.nvvm.sust.b.1d.v2i8.trap
    nvvm_sust_b_1d_v2i8_zero,                  // llvm.nvvm.sust.b.1d.v2i8.zero
    nvvm_sust_b_1d_v4i16_clamp,                // llvm.nvvm.sust.b.1d.v4i16.clamp
    nvvm_sust_b_1d_v4i16_trap,                 // llvm.nvvm.sust.b.1d.v4i16.trap
    nvvm_sust_b_1d_v4i16_zero,                 // llvm.nvvm.sust.b.1d.v4i16.zero
    nvvm_sust_b_1d_v4i32_clamp,                // llvm.nvvm.sust.b.1d.v4i32.clamp
    nvvm_sust_b_1d_v4i32_trap,                 // llvm.nvvm.sust.b.1d.v4i32.trap
    nvvm_sust_b_1d_v4i32_zero,                 // llvm.nvvm.sust.b.1d.v4i32.zero
    nvvm_sust_b_1d_v4i8_clamp,                 // llvm.nvvm.sust.b.1d.v4i8.clamp
    nvvm_sust_b_1d_v4i8_trap,                  // llvm.nvvm.sust.b.1d.v4i8.trap
    nvvm_sust_b_1d_v4i8_zero,                  // llvm.nvvm.sust.b.1d.v4i8.zero
    nvvm_sust_b_2d_array_i16_clamp,            // llvm.nvvm.sust.b.2d.array.i16.clamp
    nvvm_sust_b_2d_array_i16_trap,             // llvm.nvvm.sust.b.2d.array.i16.trap
    nvvm_sust_b_2d_array_i16_zero,             // llvm.nvvm.sust.b.2d.array.i16.zero
    nvvm_sust_b_2d_array_i32_clamp,            // llvm.nvvm.sust.b.2d.array.i32.clamp
    nvvm_sust_b_2d_array_i32_trap,             // llvm.nvvm.sust.b.2d.array.i32.trap
    nvvm_sust_b_2d_array_i32_zero,             // llvm.nvvm.sust.b.2d.array.i32.zero
    nvvm_sust_b_2d_array_i64_clamp,            // llvm.nvvm.sust.b.2d.array.i64.clamp
    nvvm_sust_b_2d_array_i64_trap,             // llvm.nvvm.sust.b.2d.array.i64.trap
    nvvm_sust_b_2d_array_i64_zero,             // llvm.nvvm.sust.b.2d.array.i64.zero
    nvvm_sust_b_2d_array_i8_clamp,             // llvm.nvvm.sust.b.2d.array.i8.clamp
    nvvm_sust_b_2d_array_i8_trap,              // llvm.nvvm.sust.b.2d.array.i8.trap
    nvvm_sust_b_2d_array_i8_zero,              // llvm.nvvm.sust.b.2d.array.i8.zero
    nvvm_sust_b_2d_array_v2i16_clamp,          // llvm.nvvm.sust.b.2d.array.v2i16.clamp
    nvvm_sust_b_2d_array_v2i16_trap,           // llvm.nvvm.sust.b.2d.array.v2i16.trap
    nvvm_sust_b_2d_array_v2i16_zero,           // llvm.nvvm.sust.b.2d.array.v2i16.zero
    nvvm_sust_b_2d_array_v2i32_clamp,          // llvm.nvvm.sust.b.2d.array.v2i32.clamp
    nvvm_sust_b_2d_array_v2i32_trap,           // llvm.nvvm.sust.b.2d.array.v2i32.trap
    nvvm_sust_b_2d_array_v2i32_zero,           // llvm.nvvm.sust.b.2d.array.v2i32.zero
    nvvm_sust_b_2d_array_v2i64_clamp,          // llvm.nvvm.sust.b.2d.array.v2i64.clamp
    nvvm_sust_b_2d_array_v2i64_trap,           // llvm.nvvm.sust.b.2d.array.v2i64.trap
    nvvm_sust_b_2d_array_v2i64_zero,           // llvm.nvvm.sust.b.2d.array.v2i64.zero
    nvvm_sust_b_2d_array_v2i8_clamp,           // llvm.nvvm.sust.b.2d.array.v2i8.clamp
    nvvm_sust_b_2d_array_v2i8_trap,            // llvm.nvvm.sust.b.2d.array.v2i8.trap
    nvvm_sust_b_2d_array_v2i8_zero,            // llvm.nvvm.sust.b.2d.array.v2i8.zero
    nvvm_sust_b_2d_array_v4i16_clamp,          // llvm.nvvm.sust.b.2d.array.v4i16.clamp
    nvvm_sust_b_2d_array_v4i16_trap,           // llvm.nvvm.sust.b.2d.array.v4i16.trap
    nvvm_sust_b_2d_array_v4i16_zero,           // llvm.nvvm.sust.b.2d.array.v4i16.zero
    nvvm_sust_b_2d_array_v4i32_clamp,          // llvm.nvvm.sust.b.2d.array.v4i32.clamp
    nvvm_sust_b_2d_array_v4i32_trap,           // llvm.nvvm.sust.b.2d.array.v4i32.trap
    nvvm_sust_b_2d_array_v4i32_zero,           // llvm.nvvm.sust.b.2d.array.v4i32.zero
    nvvm_sust_b_2d_array_v4i8_clamp,           // llvm.nvvm.sust.b.2d.array.v4i8.clamp
    nvvm_sust_b_2d_array_v4i8_trap,            // llvm.nvvm.sust.b.2d.array.v4i8.trap
    nvvm_sust_b_2d_array_v4i8_zero,            // llvm.nvvm.sust.b.2d.array.v4i8.zero
    nvvm_sust_b_2d_i16_clamp,                  // llvm.nvvm.sust.b.2d.i16.clamp
    nvvm_sust_b_2d_i16_trap,                   // llvm.nvvm.sust.b.2d.i16.trap
    nvvm_sust_b_2d_i16_zero,                   // llvm.nvvm.sust.b.2d.i16.zero
    nvvm_sust_b_2d_i32_clamp,                  // llvm.nvvm.sust.b.2d.i32.clamp
    nvvm_sust_b_2d_i32_trap,                   // llvm.nvvm.sust.b.2d.i32.trap
    nvvm_sust_b_2d_i32_zero,                   // llvm.nvvm.sust.b.2d.i32.zero
    nvvm_sust_b_2d_i64_clamp,                  // llvm.nvvm.sust.b.2d.i64.clamp
    nvvm_sust_b_2d_i64_trap,                   // llvm.nvvm.sust.b.2d.i64.trap
    nvvm_sust_b_2d_i64_zero,                   // llvm.nvvm.sust.b.2d.i64.zero
    nvvm_sust_b_2d_i8_clamp,                   // llvm.nvvm.sust.b.2d.i8.clamp
    nvvm_sust_b_2d_i8_trap,                    // llvm.nvvm.sust.b.2d.i8.trap
    nvvm_sust_b_2d_i8_zero,                    // llvm.nvvm.sust.b.2d.i8.zero
    nvvm_sust_b_2d_v2i16_clamp,                // llvm.nvvm.sust.b.2d.v2i16.clamp
    nvvm_sust_b_2d_v2i16_trap,                 // llvm.nvvm.sust.b.2d.v2i16.trap
    nvvm_sust_b_2d_v2i16_zero,                 // llvm.nvvm.sust.b.2d.v2i16.zero
    nvvm_sust_b_2d_v2i32_clamp,                // llvm.nvvm.sust.b.2d.v2i32.clamp
    nvvm_sust_b_2d_v2i32_trap,                 // llvm.nvvm.sust.b.2d.v2i32.trap
    nvvm_sust_b_2d_v2i32_zero,                 // llvm.nvvm.sust.b.2d.v2i32.zero
    nvvm_sust_b_2d_v2i64_clamp,                // llvm.nvvm.sust.b.2d.v2i64.clamp
    nvvm_sust_b_2d_v2i64_trap,                 // llvm.nvvm.sust.b.2d.v2i64.trap
    nvvm_sust_b_2d_v2i64_zero,                 // llvm.nvvm.sust.b.2d.v2i64.zero
    nvvm_sust_b_2d_v2i8_clamp,                 // llvm.nvvm.sust.b.2d.v2i8.clamp
    nvvm_sust_b_2d_v2i8_trap,                  // llvm.nvvm.sust.b.2d.v2i8.trap
    nvvm_sust_b_2d_v2i8_zero,                  // llvm.nvvm.sust.b.2d.v2i8.zero
    nvvm_sust_b_2d_v4i16_clamp,                // llvm.nvvm.sust.b.2d.v4i16.clamp
    nvvm_sust_b_2d_v4i16_trap,                 // llvm.nvvm.sust.b.2d.v4i16.trap
    nvvm_sust_b_2d_v4i16_zero,                 // llvm.nvvm.sust.b.2d.v4i16.zero
    nvvm_sust_b_2d_v4i32_clamp,                // llvm.nvvm.sust.b.2d.v4i32.clamp
    nvvm_sust_b_2d_v4i32_trap,                 // llvm.nvvm.sust.b.2d.v4i32.trap
    nvvm_sust_b_2d_v4i32_zero,                 // llvm.nvvm.sust.b.2d.v4i32.zero
    nvvm_sust_b_2d_v4i8_clamp,                 // llvm.nvvm.sust.b.2d.v4i8.clamp
    nvvm_sust_b_2d_v4i8_trap,                  // llvm.nvvm.sust.b.2d.v4i8.trap
    nvvm_sust_b_2d_v4i8_zero,                  // llvm.nvvm.sust.b.2d.v4i8.zero
    nvvm_sust_b_3d_i16_clamp,                  // llvm.nvvm.sust.b.3d.i16.clamp
    nvvm_sust_b_3d_i16_trap,                   // llvm.nvvm.sust.b.3d.i16.trap
    nvvm_sust_b_3d_i16_zero,                   // llvm.nvvm.sust.b.3d.i16.zero
    nvvm_sust_b_3d_i32_clamp,                  // llvm.nvvm.sust.b.3d.i32.clamp
    nvvm_sust_b_3d_i32_trap,                   // llvm.nvvm.sust.b.3d.i32.trap
    nvvm_sust_b_3d_i32_zero,                   // llvm.nvvm.sust.b.3d.i32.zero
    nvvm_sust_b_3d_i64_clamp,                  // llvm.nvvm.sust.b.3d.i64.clamp
    nvvm_sust_b_3d_i64_trap,                   // llvm.nvvm.sust.b.3d.i64.trap
    nvvm_sust_b_3d_i64_zero,                   // llvm.nvvm.sust.b.3d.i64.zero
    nvvm_sust_b_3d_i8_clamp,                   // llvm.nvvm.sust.b.3d.i8.clamp
    nvvm_sust_b_3d_i8_trap,                    // llvm.nvvm.sust.b.3d.i8.trap
    nvvm_sust_b_3d_i8_zero,                    // llvm.nvvm.sust.b.3d.i8.zero
    nvvm_sust_b_3d_v2i16_clamp,                // llvm.nvvm.sust.b.3d.v2i16.clamp
    nvvm_sust_b_3d_v2i16_trap,                 // llvm.nvvm.sust.b.3d.v2i16.trap
    nvvm_sust_b_3d_v2i16_zero,                 // llvm.nvvm.sust.b.3d.v2i16.zero
    nvvm_sust_b_3d_v2i32_clamp,                // llvm.nvvm.sust.b.3d.v2i32.clamp
    nvvm_sust_b_3d_v2i32_trap,                 // llvm.nvvm.sust.b.3d.v2i32.trap
    nvvm_sust_b_3d_v2i32_zero,                 // llvm.nvvm.sust.b.3d.v2i32.zero
    nvvm_sust_b_3d_v2i64_clamp,                // llvm.nvvm.sust.b.3d.v2i64.clamp
    nvvm_sust_b_3d_v2i64_trap,                 // llvm.nvvm.sust.b.3d.v2i64.trap
    nvvm_sust_b_3d_v2i64_zero,                 // llvm.nvvm.sust.b.3d.v2i64.zero
    nvvm_sust_b_3d_v2i8_clamp,                 // llvm.nvvm.sust.b.3d.v2i8.clamp
    nvvm_sust_b_3d_v2i8_trap,                  // llvm.nvvm.sust.b.3d.v2i8.trap
    nvvm_sust_b_3d_v2i8_zero,                  // llvm.nvvm.sust.b.3d.v2i8.zero
    nvvm_sust_b_3d_v4i16_clamp,                // llvm.nvvm.sust.b.3d.v4i16.clamp
    nvvm_sust_b_3d_v4i16_trap,                 // llvm.nvvm.sust.b.3d.v4i16.trap
    nvvm_sust_b_3d_v4i16_zero,                 // llvm.nvvm.sust.b.3d.v4i16.zero
    nvvm_sust_b_3d_v4i32_clamp,                // llvm.nvvm.sust.b.3d.v4i32.clamp
    nvvm_sust_b_3d_v4i32_trap,                 // llvm.nvvm.sust.b.3d.v4i32.trap
    nvvm_sust_b_3d_v4i32_zero,                 // llvm.nvvm.sust.b.3d.v4i32.zero
    nvvm_sust_b_3d_v4i8_clamp,                 // llvm.nvvm.sust.b.3d.v4i8.clamp
    nvvm_sust_b_3d_v4i8_trap,                  // llvm.nvvm.sust.b.3d.v4i8.trap
    nvvm_sust_b_3d_v4i8_zero,                  // llvm.nvvm.sust.b.3d.v4i8.zero
    nvvm_sust_p_1d_array_i16_trap,             // llvm.nvvm.sust.p.1d.array.i16.trap
    nvvm_sust_p_1d_array_i32_trap,             // llvm.nvvm.sust.p.1d.array.i32.trap
    nvvm_sust_p_1d_array_i8_trap,              // llvm.nvvm.sust.p.1d.array.i8.trap
    nvvm_sust_p_1d_array_v2i16_trap,           // llvm.nvvm.sust.p.1d.array.v2i16.trap
    nvvm_sust_p_1d_array_v2i32_trap,           // llvm.nvvm.sust.p.1d.array.v2i32.trap
    nvvm_sust_p_1d_array_v2i8_trap,            // llvm.nvvm.sust.p.1d.array.v2i8.trap
    nvvm_sust_p_1d_array_v4i16_trap,           // llvm.nvvm.sust.p.1d.array.v4i16.trap
    nvvm_sust_p_1d_array_v4i32_trap,           // llvm.nvvm.sust.p.1d.array.v4i32.trap
    nvvm_sust_p_1d_array_v4i8_trap,            // llvm.nvvm.sust.p.1d.array.v4i8.trap
    nvvm_sust_p_1d_i16_trap,                   // llvm.nvvm.sust.p.1d.i16.trap
    nvvm_sust_p_1d_i32_trap,                   // llvm.nvvm.sust.p.1d.i32.trap
    nvvm_sust_p_1d_i8_trap,                    // llvm.nvvm.sust.p.1d.i8.trap
    nvvm_sust_p_1d_v2i16_trap,                 // llvm.nvvm.sust.p.1d.v2i16.trap
    nvvm_sust_p_1d_v2i32_trap,                 // llvm.nvvm.sust.p.1d.v2i32.trap
    nvvm_sust_p_1d_v2i8_trap,                  // llvm.nvvm.sust.p.1d.v2i8.trap
    nvvm_sust_p_1d_v4i16_trap,                 // llvm.nvvm.sust.p.1d.v4i16.trap
    nvvm_sust_p_1d_v4i32_trap,                 // llvm.nvvm.sust.p.1d.v4i32.trap
    nvvm_sust_p_1d_v4i8_trap,                  // llvm.nvvm.sust.p.1d.v4i8.trap
    nvvm_sust_p_2d_array_i16_trap,             // llvm.nvvm.sust.p.2d.array.i16.trap
    nvvm_sust_p_2d_array_i32_trap,             // llvm.nvvm.sust.p.2d.array.i32.trap
    nvvm_sust_p_2d_array_i8_trap,              // llvm.nvvm.sust.p.2d.array.i8.trap
    nvvm_sust_p_2d_array_v2i16_trap,           // llvm.nvvm.sust.p.2d.array.v2i16.trap
    nvvm_sust_p_2d_array_v2i32_trap,           // llvm.nvvm.sust.p.2d.array.v2i32.trap
    nvvm_sust_p_2d_array_v2i8_trap,            // llvm.nvvm.sust.p.2d.array.v2i8.trap
    nvvm_sust_p_2d_array_v4i16_trap,           // llvm.nvvm.sust.p.2d.array.v4i16.trap
    nvvm_sust_p_2d_array_v4i32_trap,           // llvm.nvvm.sust.p.2d.array.v4i32.trap
    nvvm_sust_p_2d_array_v4i8_trap,            // llvm.nvvm.sust.p.2d.array.v4i8.trap
    nvvm_sust_p_2d_i16_trap,                   // llvm.nvvm.sust.p.2d.i16.trap
    nvvm_sust_p_2d_i32_trap,                   // llvm.nvvm.sust.p.2d.i32.trap
    nvvm_sust_p_2d_i8_trap,                    // llvm.nvvm.sust.p.2d.i8.trap
    nvvm_sust_p_2d_v2i16_trap,                 // llvm.nvvm.sust.p.2d.v2i16.trap
    nvvm_sust_p_2d_v2i32_trap,                 // llvm.nvvm.sust.p.2d.v2i32.trap
    nvvm_sust_p_2d_v2i8_trap,                  // llvm.nvvm.sust.p.2d.v2i8.trap
    nvvm_sust_p_2d_v4i16_trap,                 // llvm.nvvm.sust.p.2d.v4i16.trap
    nvvm_sust_p_2d_v4i32_trap,                 // llvm.nvvm.sust.p.2d.v4i32.trap
    nvvm_sust_p_2d_v4i8_trap,                  // llvm.nvvm.sust.p.2d.v4i8.trap
    nvvm_sust_p_3d_i16_trap,                   // llvm.nvvm.sust.p.3d.i16.trap
    nvvm_sust_p_3d_i32_trap,                   // llvm.nvvm.sust.p.3d.i32.trap
    nvvm_sust_p_3d_i8_trap,                    // llvm.nvvm.sust.p.3d.i8.trap
    nvvm_sust_p_3d_v2i16_trap,                 // llvm.nvvm.sust.p.3d.v2i16.trap
    nvvm_sust_p_3d_v2i32_trap,                 // llvm.nvvm.sust.p.3d.v2i32.trap
    nvvm_sust_p_3d_v2i8_trap,                  // llvm.nvvm.sust.p.3d.v2i8.trap
    nvvm_sust_p_3d_v4i16_trap,                 // llvm.nvvm.sust.p.3d.v4i16.trap
    nvvm_sust_p_3d_v4i32_trap,                 // llvm.nvvm.sust.p.3d.v4i32.trap
    nvvm_sust_p_3d_v4i8_trap,                  // llvm.nvvm.sust.p.3d.v4i8.trap
    nvvm_swap_lo_hi_b64,                       // llvm.nvvm.swap.lo.hi.b64
    nvvm_tex_1d_array_grad_v4f32_f32,          // llvm.nvvm.tex.1d.array.grad.v4f32.f32
    nvvm_tex_1d_array_grad_v4s32_f32,          // llvm.nvvm.tex.1d.array.grad.v4s32.f32
    nvvm_tex_1d_array_grad_v4u32_f32,          // llvm.nvvm.tex.1d.array.grad.v4u32.f32
    nvvm_tex_1d_array_level_v4f32_f32,         // llvm.nvvm.tex.1d.array.level.v4f32.f32
    nvvm_tex_1d_array_level_v4s32_f32,         // llvm.nvvm.tex.1d.array.level.v4s32.f32
    nvvm_tex_1d_array_level_v4u32_f32,         // llvm.nvvm.tex.1d.array.level.v4u32.f32
    nvvm_tex_1d_array_v4f32_f32,               // llvm.nvvm.tex.1d.array.v4f32.f32
    nvvm_tex_1d_array_v4f32_s32,               // llvm.nvvm.tex.1d.array.v4f32.s32
    nvvm_tex_1d_array_v4s32_f32,               // llvm.nvvm.tex.1d.array.v4s32.f32
    nvvm_tex_1d_array_v4s32_s32,               // llvm.nvvm.tex.1d.array.v4s32.s32
    nvvm_tex_1d_array_v4u32_f32,               // llvm.nvvm.tex.1d.array.v4u32.f32
    nvvm_tex_1d_array_v4u32_s32,               // llvm.nvvm.tex.1d.array.v4u32.s32
    nvvm_tex_1d_grad_v4f32_f32,                // llvm.nvvm.tex.1d.grad.v4f32.f32
    nvvm_tex_1d_grad_v4s32_f32,                // llvm.nvvm.tex.1d.grad.v4s32.f32
    nvvm_tex_1d_grad_v4u32_f32,                // llvm.nvvm.tex.1d.grad.v4u32.f32
    nvvm_tex_1d_level_v4f32_f32,               // llvm.nvvm.tex.1d.level.v4f32.f32
    nvvm_tex_1d_level_v4s32_f32,               // llvm.nvvm.tex.1d.level.v4s32.f32
    nvvm_tex_1d_level_v4u32_f32,               // llvm.nvvm.tex.1d.level.v4u32.f32
    nvvm_tex_1d_v4f32_f32,                     // llvm.nvvm.tex.1d.v4f32.f32
    nvvm_tex_1d_v4f32_s32,                     // llvm.nvvm.tex.1d.v4f32.s32
    nvvm_tex_1d_v4s32_f32,                     // llvm.nvvm.tex.1d.v4s32.f32
    nvvm_tex_1d_v4s32_s32,                     // llvm.nvvm.tex.1d.v4s32.s32
    nvvm_tex_1d_v4u32_f32,                     // llvm.nvvm.tex.1d.v4u32.f32
    nvvm_tex_1d_v4u32_s32,                     // llvm.nvvm.tex.1d.v4u32.s32
    nvvm_tex_2d_array_grad_v4f32_f32,          // llvm.nvvm.tex.2d.array.grad.v4f32.f32
    nvvm_tex_2d_array_grad_v4s32_f32,          // llvm.nvvm.tex.2d.array.grad.v4s32.f32
    nvvm_tex_2d_array_grad_v4u32_f32,          // llvm.nvvm.tex.2d.array.grad.v4u32.f32
    nvvm_tex_2d_array_level_v4f32_f32,         // llvm.nvvm.tex.2d.array.level.v4f32.f32
    nvvm_tex_2d_array_level_v4s32_f32,         // llvm.nvvm.tex.2d.array.level.v4s32.f32
    nvvm_tex_2d_array_level_v4u32_f32,         // llvm.nvvm.tex.2d.array.level.v4u32.f32
    nvvm_tex_2d_array_v4f32_f32,               // llvm.nvvm.tex.2d.array.v4f32.f32
    nvvm_tex_2d_array_v4f32_s32,               // llvm.nvvm.tex.2d.array.v4f32.s32
    nvvm_tex_2d_array_v4s32_f32,               // llvm.nvvm.tex.2d.array.v4s32.f32
    nvvm_tex_2d_array_v4s32_s32,               // llvm.nvvm.tex.2d.array.v4s32.s32
    nvvm_tex_2d_array_v4u32_f32,               // llvm.nvvm.tex.2d.array.v4u32.f32
    nvvm_tex_2d_array_v4u32_s32,               // llvm.nvvm.tex.2d.array.v4u32.s32
    nvvm_tex_2d_grad_v4f32_f32,                // llvm.nvvm.tex.2d.grad.v4f32.f32
    nvvm_tex_2d_grad_v4s32_f32,                // llvm.nvvm.tex.2d.grad.v4s32.f32
    nvvm_tex_2d_grad_v4u32_f32,                // llvm.nvvm.tex.2d.grad.v4u32.f32
    nvvm_tex_2d_level_v4f32_f32,               // llvm.nvvm.tex.2d.level.v4f32.f32
    nvvm_tex_2d_level_v4s32_f32,               // llvm.nvvm.tex.2d.level.v4s32.f32
    nvvm_tex_2d_level_v4u32_f32,               // llvm.nvvm.tex.2d.level.v4u32.f32
    nvvm_tex_2d_v4f32_f32,                     // llvm.nvvm.tex.2d.v4f32.f32
    nvvm_tex_2d_v4f32_s32,                     // llvm.nvvm.tex.2d.v4f32.s32
    nvvm_tex_2d_v4s32_f32,                     // llvm.nvvm.tex.2d.v4s32.f32
    nvvm_tex_2d_v4s32_s32,                     // llvm.nvvm.tex.2d.v4s32.s32
    nvvm_tex_2d_v4u32_f32,                     // llvm.nvvm.tex.2d.v4u32.f32
    nvvm_tex_2d_v4u32_s32,                     // llvm.nvvm.tex.2d.v4u32.s32
    nvvm_tex_3d_grad_v4f32_f32,                // llvm.nvvm.tex.3d.grad.v4f32.f32
    nvvm_tex_3d_grad_v4s32_f32,                // llvm.nvvm.tex.3d.grad.v4s32.f32
    nvvm_tex_3d_grad_v4u32_f32,                // llvm.nvvm.tex.3d.grad.v4u32.f32
    nvvm_tex_3d_level_v4f32_f32,               // llvm.nvvm.tex.3d.level.v4f32.f32
    nvvm_tex_3d_level_v4s32_f32,               // llvm.nvvm.tex.3d.level.v4s32.f32
    nvvm_tex_3d_level_v4u32_f32,               // llvm.nvvm.tex.3d.level.v4u32.f32
    nvvm_tex_3d_v4f32_f32,                     // llvm.nvvm.tex.3d.v4f32.f32
    nvvm_tex_3d_v4f32_s32,                     // llvm.nvvm.tex.3d.v4f32.s32
    nvvm_tex_3d_v4s32_f32,                     // llvm.nvvm.tex.3d.v4s32.f32
    nvvm_tex_3d_v4s32_s32,                     // llvm.nvvm.tex.3d.v4s32.s32
    nvvm_tex_3d_v4u32_f32,                     // llvm.nvvm.tex.3d.v4u32.f32
    nvvm_tex_3d_v4u32_s32,                     // llvm.nvvm.tex.3d.v4u32.s32
    nvvm_tex_cube_array_level_v4f32_f32,       // llvm.nvvm.tex.cube.array.level.v4f32.f32
    nvvm_tex_cube_array_level_v4s32_f32,       // llvm.nvvm.tex.cube.array.level.v4s32.f32
    nvvm_tex_cube_array_level_v4u32_f32,       // llvm.nvvm.tex.cube.array.level.v4u32.f32
    nvvm_tex_cube_array_v4f32_f32,             // llvm.nvvm.tex.cube.array.v4f32.f32
    nvvm_tex_cube_array_v4s32_f32,             // llvm.nvvm.tex.cube.array.v4s32.f32
    nvvm_tex_cube_array_v4u32_f32,             // llvm.nvvm.tex.cube.array.v4u32.f32
    nvvm_tex_cube_level_v4f32_f32,             // llvm.nvvm.tex.cube.level.v4f32.f32
    nvvm_tex_cube_level_v4s32_f32,             // llvm.nvvm.tex.cube.level.v4s32.f32
    nvvm_tex_cube_level_v4u32_f32,             // llvm.nvvm.tex.cube.level.v4u32.f32
    nvvm_tex_cube_v4f32_f32,                   // llvm.nvvm.tex.cube.v4f32.f32
    nvvm_tex_cube_v4s32_f32,                   // llvm.nvvm.tex.cube.v4s32.f32
    nvvm_tex_cube_v4u32_f32,                   // llvm.nvvm.tex.cube.v4u32.f32
    nvvm_tex_unified_1d_array_grad_v4f32_f32,  // llvm.nvvm.tex.unified.1d.array.grad.v4f32.f32
    nvvm_tex_unified_1d_array_grad_v4s32_f32,  // llvm.nvvm.tex.unified.1d.array.grad.v4s32.f32
    nvvm_tex_unified_1d_array_grad_v4u32_f32,  // llvm.nvvm.tex.unified.1d.array.grad.v4u32.f32
    nvvm_tex_unified_1d_array_level_v4f32_f32,  // llvm.nvvm.tex.unified.1d.array.level.v4f32.f32
    nvvm_tex_unified_1d_array_level_v4s32_f32,  // llvm.nvvm.tex.unified.1d.array.level.v4s32.f32
    nvvm_tex_unified_1d_array_level_v4u32_f32,  // llvm.nvvm.tex.unified.1d.array.level.v4u32.f32
    nvvm_tex_unified_1d_array_v4f32_f32,       // llvm.nvvm.tex.unified.1d.array.v4f32.f32
    nvvm_tex_unified_1d_array_v4f32_s32,       // llvm.nvvm.tex.unified.1d.array.v4f32.s32
    nvvm_tex_unified_1d_array_v4s32_f32,       // llvm.nvvm.tex.unified.1d.array.v4s32.f32
    nvvm_tex_unified_1d_array_v4s32_s32,       // llvm.nvvm.tex.unified.1d.array.v4s32.s32
    nvvm_tex_unified_1d_array_v4u32_f32,       // llvm.nvvm.tex.unified.1d.array.v4u32.f32
    nvvm_tex_unified_1d_array_v4u32_s32,       // llvm.nvvm.tex.unified.1d.array.v4u32.s32
    nvvm_tex_unified_1d_grad_v4f32_f32,        // llvm.nvvm.tex.unified.1d.grad.v4f32.f32
    nvvm_tex_unified_1d_grad_v4s32_f32,        // llvm.nvvm.tex.unified.1d.grad.v4s32.f32
    nvvm_tex_unified_1d_grad_v4u32_f32,        // llvm.nvvm.tex.unified.1d.grad.v4u32.f32
    nvvm_tex_unified_1d_level_v4f32_f32,       // llvm.nvvm.tex.unified.1d.level.v4f32.f32
    nvvm_tex_unified_1d_level_v4s32_f32,       // llvm.nvvm.tex.unified.1d.level.v4s32.f32
    nvvm_tex_unified_1d_level_v4u32_f32,       // llvm.nvvm.tex.unified.1d.level.v4u32.f32
    nvvm_tex_unified_1d_v4f32_f32,             // llvm.nvvm.tex.unified.1d.v4f32.f32
    nvvm_tex_unified_1d_v4f32_s32,             // llvm.nvvm.tex.unified.1d.v4f32.s32
    nvvm_tex_unified_1d_v4s32_f32,             // llvm.nvvm.tex.unified.1d.v4s32.f32
    nvvm_tex_unified_1d_v4s32_s32,             // llvm.nvvm.tex.unified.1d.v4s32.s32
    nvvm_tex_unified_1d_v4u32_f32,             // llvm.nvvm.tex.unified.1d.v4u32.f32
    nvvm_tex_unified_1d_v4u32_s32,             // llvm.nvvm.tex.unified.1d.v4u32.s32
    nvvm_tex_unified_2d_array_grad_v4f32_f32,  // llvm.nvvm.tex.unified.2d.array.grad.v4f32.f32
    nvvm_tex_unified_2d_array_grad_v4s32_f32,  // llvm.nvvm.tex.unified.2d.array.grad.v4s32.f32
    nvvm_tex_unified_2d_array_grad_v4u32_f32,  // llvm.nvvm.tex.unified.2d.array.grad.v4u32.f32
    nvvm_tex_unified_2d_array_level_v4f32_f32,  // llvm.nvvm.tex.unified.2d.array.level.v4f32.f32
    nvvm_tex_unified_2d_array_level_v4s32_f32,  // llvm.nvvm.tex.unified.2d.array.level.v4s32.f32
    nvvm_tex_unified_2d_array_level_v4u32_f32,  // llvm.nvvm.tex.unified.2d.array.level.v4u32.f32
    nvvm_tex_unified_2d_array_v4f32_f32,       // llvm.nvvm.tex.unified.2d.array.v4f32.f32
    nvvm_tex_unified_2d_array_v4f32_s32,       // llvm.nvvm.tex.unified.2d.array.v4f32.s32
    nvvm_tex_unified_2d_array_v4s32_f32,       // llvm.nvvm.tex.unified.2d.array.v4s32.f32
    nvvm_tex_unified_2d_array_v4s32_s32,       // llvm.nvvm.tex.unified.2d.array.v4s32.s32
    nvvm_tex_unified_2d_array_v4u32_f32,       // llvm.nvvm.tex.unified.2d.array.v4u32.f32
    nvvm_tex_unified_2d_array_v4u32_s32,       // llvm.nvvm.tex.unified.2d.array.v4u32.s32
    nvvm_tex_unified_2d_grad_v4f32_f32,        // llvm.nvvm.tex.unified.2d.grad.v4f32.f32
    nvvm_tex_unified_2d_grad_v4s32_f32,        // llvm.nvvm.tex.unified.2d.grad.v4s32.f32
    nvvm_tex_unified_2d_grad_v4u32_f32,        // llvm.nvvm.tex.unified.2d.grad.v4u32.f32
    nvvm_tex_unified_2d_level_v4f32_f32,       // llvm.nvvm.tex.unified.2d.level.v4f32.f32
    nvvm_tex_unified_2d_level_v4s32_f32,       // llvm.nvvm.tex.unified.2d.level.v4s32.f32
    nvvm_tex_unified_2d_level_v4u32_f32,       // llvm.nvvm.tex.unified.2d.level.v4u32.f32
    nvvm_tex_unified_2d_v4f32_f32,             // llvm.nvvm.tex.unified.2d.v4f32.f32
    nvvm_tex_unified_2d_v4f32_s32,             // llvm.nvvm.tex.unified.2d.v4f32.s32
    nvvm_tex_unified_2d_v4s32_f32,             // llvm.nvvm.tex.unified.2d.v4s32.f32
    nvvm_tex_unified_2d_v4s32_s32,             // llvm.nvvm.tex.unified.2d.v4s32.s32
    nvvm_tex_unified_2d_v4u32_f32,             // llvm.nvvm.tex.unified.2d.v4u32.f32
    nvvm_tex_unified_2d_v4u32_s32,             // llvm.nvvm.tex.unified.2d.v4u32.s32
    nvvm_tex_unified_3d_grad_v4f32_f32,        // llvm.nvvm.tex.unified.3d.grad.v4f32.f32
    nvvm_tex_unified_3d_grad_v4s32_f32,        // llvm.nvvm.tex.unified.3d.grad.v4s32.f32
    nvvm_tex_unified_3d_grad_v4u32_f32,        // llvm.nvvm.tex.unified.3d.grad.v4u32.f32
    nvvm_tex_unified_3d_level_v4f32_f32,       // llvm.nvvm.tex.unified.3d.level.v4f32.f32
    nvvm_tex_unified_3d_level_v4s32_f32,       // llvm.nvvm.tex.unified.3d.level.v4s32.f32
    nvvm_tex_unified_3d_level_v4u32_f32,       // llvm.nvvm.tex.unified.3d.level.v4u32.f32
    nvvm_tex_unified_3d_v4f32_f32,             // llvm.nvvm.tex.unified.3d.v4f32.f32
    nvvm_tex_unified_3d_v4f32_s32,             // llvm.nvvm.tex.unified.3d.v4f32.s32
    nvvm_tex_unified_3d_v4s32_f32,             // llvm.nvvm.tex.unified.3d.v4s32.f32
    nvvm_tex_unified_3d_v4s32_s32,             // llvm.nvvm.tex.unified.3d.v4s32.s32
    nvvm_tex_unified_3d_v4u32_f32,             // llvm.nvvm.tex.unified.3d.v4u32.f32
    nvvm_tex_unified_3d_v4u32_s32,             // llvm.nvvm.tex.unified.3d.v4u32.s32
    nvvm_tex_unified_cube_array_level_v4f32_f32,  // llvm.nvvm.tex.unified.cube.array.level.v4f32.f32
    nvvm_tex_unified_cube_array_level_v4s32_f32,  // llvm.nvvm.tex.unified.cube.array.level.v4s32.f32
    nvvm_tex_unified_cube_array_level_v4u32_f32,  // llvm.nvvm.tex.unified.cube.array.level.v4u32.f32
    nvvm_tex_unified_cube_array_v4f32_f32,     // llvm.nvvm.tex.unified.cube.array.v4f32.f32
    nvvm_tex_unified_cube_array_v4s32_f32,     // llvm.nvvm.tex.unified.cube.array.v4s32.f32
    nvvm_tex_unified_cube_array_v4u32_f32,     // llvm.nvvm.tex.unified.cube.array.v4u32.f32
    nvvm_tex_unified_cube_level_v4f32_f32,     // llvm.nvvm.tex.unified.cube.level.v4f32.f32
    nvvm_tex_unified_cube_level_v4s32_f32,     // llvm.nvvm.tex.unified.cube.level.v4s32.f32
    nvvm_tex_unified_cube_level_v4u32_f32,     // llvm.nvvm.tex.unified.cube.level.v4u32.f32
    nvvm_tex_unified_cube_v4f32_f32,           // llvm.nvvm.tex.unified.cube.v4f32.f32
    nvvm_tex_unified_cube_v4s32_f32,           // llvm.nvvm.tex.unified.cube.v4s32.f32
    nvvm_tex_unified_cube_v4u32_f32,           // llvm.nvvm.tex.unified.cube.v4u32.f32
    nvvm_texsurf_handle,                       // llvm.nvvm.texsurf.handle
    nvvm_texsurf_handle_internal,              // llvm.nvvm.texsurf.handle.internal
    nvvm_tld4_a_2d_v4f32_f32,                  // llvm.nvvm.tld4.a.2d.v4f32.f32
    nvvm_tld4_a_2d_v4s32_f32,                  // llvm.nvvm.tld4.a.2d.v4s32.f32
    nvvm_tld4_a_2d_v4u32_f32,                  // llvm.nvvm.tld4.a.2d.v4u32.f32
    nvvm_tld4_b_2d_v4f32_f32,                  // llvm.nvvm.tld4.b.2d.v4f32.f32
    nvvm_tld4_b_2d_v4s32_f32,                  // llvm.nvvm.tld4.b.2d.v4s32.f32
    nvvm_tld4_b_2d_v4u32_f32,                  // llvm.nvvm.tld4.b.2d.v4u32.f32
    nvvm_tld4_g_2d_v4f32_f32,                  // llvm.nvvm.tld4.g.2d.v4f32.f32
    nvvm_tld4_g_2d_v4s32_f32,                  // llvm.nvvm.tld4.g.2d.v4s32.f32
    nvvm_tld4_g_2d_v4u32_f32,                  // llvm.nvvm.tld4.g.2d.v4u32.f32
    nvvm_tld4_r_2d_v4f32_f32,                  // llvm.nvvm.tld4.r.2d.v4f32.f32
    nvvm_tld4_r_2d_v4s32_f32,                  // llvm.nvvm.tld4.r.2d.v4s32.f32
    nvvm_tld4_r_2d_v4u32_f32,                  // llvm.nvvm.tld4.r.2d.v4u32.f32
    nvvm_tld4_unified_a_2d_v4f32_f32,          // llvm.nvvm.tld4.unified.a.2d.v4f32.f32
    nvvm_tld4_unified_a_2d_v4s32_f32,          // llvm.nvvm.tld4.unified.a.2d.v4s32.f32
    nvvm_tld4_unified_a_2d_v4u32_f32,          // llvm.nvvm.tld4.unified.a.2d.v4u32.f32
    nvvm_tld4_unified_b_2d_v4f32_f32,          // llvm.nvvm.tld4.unified.b.2d.v4f32.f32
    nvvm_tld4_unified_b_2d_v4s32_f32,          // llvm.nvvm.tld4.unified.b.2d.v4s32.f32
    nvvm_tld4_unified_b_2d_v4u32_f32,          // llvm.nvvm.tld4.unified.b.2d.v4u32.f32
    nvvm_tld4_unified_g_2d_v4f32_f32,          // llvm.nvvm.tld4.unified.g.2d.v4f32.f32
    nvvm_tld4_unified_g_2d_v4s32_f32,          // llvm.nvvm.tld4.unified.g.2d.v4s32.f32
    nvvm_tld4_unified_g_2d_v4u32_f32,          // llvm.nvvm.tld4.unified.g.2d.v4u32.f32
    nvvm_tld4_unified_r_2d_v4f32_f32,          // llvm.nvvm.tld4.unified.r.2d.v4f32.f32
    nvvm_tld4_unified_r_2d_v4s32_f32,          // llvm.nvvm.tld4.unified.r.2d.v4s32.f32
    nvvm_tld4_unified_r_2d_v4u32_f32,          // llvm.nvvm.tld4.unified.r.2d.v4u32.f32
    nvvm_trunc_d,                              // llvm.nvvm.trunc.d
    nvvm_trunc_f,                              // llvm.nvvm.trunc.f
    nvvm_trunc_ftz_f,                          // llvm.nvvm.trunc.ftz.f
    nvvm_txq_array_size,                       // llvm.nvvm.txq.array.size
    nvvm_txq_channel_data_type,                // llvm.nvvm.txq.channel.data.type
    nvvm_txq_channel_order,                    // llvm.nvvm.txq.channel.order
    nvvm_txq_depth,                            // llvm.nvvm.txq.depth
    nvvm_txq_height,                           // llvm.nvvm.txq.height
    nvvm_txq_num_mipmap_levels,                // llvm.nvvm.txq.num.mipmap.levels
    nvvm_txq_num_samples,                      // llvm.nvvm.txq.num.samples
    nvvm_txq_width,                            // llvm.nvvm.txq.width
    nvvm_ui2d_rm,                              // llvm.nvvm.ui2d.rm
    nvvm_ui2d_rn,                              // llvm.nvvm.ui2d.rn
    nvvm_ui2d_rp,                              // llvm.nvvm.ui2d.rp
    nvvm_ui2d_rz,                              // llvm.nvvm.ui2d.rz
    nvvm_ui2f_rm,                              // llvm.nvvm.ui2f.rm
    nvvm_ui2f_rn,                              // llvm.nvvm.ui2f.rn
    nvvm_ui2f_rp,                              // llvm.nvvm.ui2f.rp
    nvvm_ui2f_rz,                              // llvm.nvvm.ui2f.rz
    nvvm_ull2d_rm,                             // llvm.nvvm.ull2d.rm
    nvvm_ull2d_rn,                             // llvm.nvvm.ull2d.rn
    nvvm_ull2d_rp,                             // llvm.nvvm.ull2d.rp
    nvvm_ull2d_rz,                             // llvm.nvvm.ull2d.rz
    nvvm_ull2f_rm,                             // llvm.nvvm.ull2f.rm
    nvvm_ull2f_rn,                             // llvm.nvvm.ull2f.rn
    nvvm_ull2f_rp,                             // llvm.nvvm.ull2f.rp
    nvvm_ull2f_rz,                             // llvm.nvvm.ull2f.rz
    nvvm_vote_all,                             // llvm.nvvm.vote.all
    nvvm_vote_all_sync,                        // llvm.nvvm.vote.all.sync
    nvvm_vote_any,                             // llvm.nvvm.vote.any
    nvvm_vote_any_sync,                        // llvm.nvvm.vote.any.sync
    nvvm_vote_ballot,                          // llvm.nvvm.vote.ballot
    nvvm_vote_ballot_sync,                     // llvm.nvvm.vote.ballot.sync
    nvvm_vote_uni,                             // llvm.nvvm.vote.uni
    nvvm_vote_uni_sync,                        // llvm.nvvm.vote.uni.sync
    nvvm_wmma_m16n16k16_load_a_bf16_col,       // llvm.nvvm.wmma.m16n16k16.load.a.col.bf16
    nvvm_wmma_m16n16k16_load_a_f16_col,        // llvm.nvvm.wmma.m16n16k16.load.a.col.f16
    nvvm_wmma_m16n16k16_load_a_s8_col,         // llvm.nvvm.wmma.m16n16k16.load.a.col.s8
    nvvm_wmma_m16n16k16_load_a_bf16_col_stride,  // llvm.nvvm.wmma.m16n16k16.load.a.col.stride.bf16
    nvvm_wmma_m16n16k16_load_a_f16_col_stride,  // llvm.nvvm.wmma.m16n16k16.load.a.col.stride.f16
    nvvm_wmma_m16n16k16_load_a_s8_col_stride,  // llvm.nvvm.wmma.m16n16k16.load.a.col.stride.s8
    nvvm_wmma_m16n16k16_load_a_u8_col_stride,  // llvm.nvvm.wmma.m16n16k16.load.a.col.stride.u8
    nvvm_wmma_m16n16k16_load_a_u8_col,         // llvm.nvvm.wmma.m16n16k16.load.a.col.u8
    nvvm_wmma_m16n16k16_load_a_bf16_row,       // llvm.nvvm.wmma.m16n16k16.load.a.row.bf16
    nvvm_wmma_m16n16k16_load_a_f16_row,        // llvm.nvvm.wmma.m16n16k16.load.a.row.f16
    nvvm_wmma_m16n16k16_load_a_s8_row,         // llvm.nvvm.wmma.m16n16k16.load.a.row.s8
    nvvm_wmma_m16n16k16_load_a_bf16_row_stride,  // llvm.nvvm.wmma.m16n16k16.load.a.row.stride.bf16
    nvvm_wmma_m16n16k16_load_a_f16_row_stride,  // llvm.nvvm.wmma.m16n16k16.load.a.row.stride.f16
    nvvm_wmma_m16n16k16_load_a_s8_row_stride,  // llvm.nvvm.wmma.m16n16k16.load.a.row.stride.s8
    nvvm_wmma_m16n16k16_load_a_u8_row_stride,  // llvm.nvvm.wmma.m16n16k16.load.a.row.stride.u8
    nvvm_wmma_m16n16k16_load_a_u8_row,         // llvm.nvvm.wmma.m16n16k16.load.a.row.u8
    nvvm_wmma_m16n16k16_load_b_bf16_col,       // llvm.nvvm.wmma.m16n16k16.load.b.col.bf16
    nvvm_wmma_m16n16k16_load_b_f16_col,        // llvm.nvvm.wmma.m16n16k16.load.b.col.f16
    nvvm_wmma_m16n16k16_load_b_s8_col,         // llvm.nvvm.wmma.m16n16k16.load.b.col.s8
    nvvm_wmma_m16n16k16_load_b_bf16_col_stride,  // llvm.nvvm.wmma.m16n16k16.load.b.col.stride.bf16
    nvvm_wmma_m16n16k16_load_b_f16_col_stride,  // llvm.nvvm.wmma.m16n16k16.load.b.col.stride.f16
    nvvm_wmma_m16n16k16_load_b_s8_col_stride,  // llvm.nvvm.wmma.m16n16k16.load.b.col.stride.s8
    nvvm_wmma_m16n16k16_load_b_u8_col_stride,  // llvm.nvvm.wmma.m16n16k16.load.b.col.stride.u8
    nvvm_wmma_m16n16k16_load_b_u8_col,         // llvm.nvvm.wmma.m16n16k16.load.b.col.u8
    nvvm_wmma_m16n16k16_load_b_bf16_row,       // llvm.nvvm.wmma.m16n16k16.load.b.row.bf16
    nvvm_wmma_m16n16k16_load_b_f16_row,        // llvm.nvvm.wmma.m16n16k16.load.b.row.f16
    nvvm_wmma_m16n16k16_load_b_s8_row,         // llvm.nvvm.wmma.m16n16k16.load.b.row.s8
    nvvm_wmma_m16n16k16_load_b_bf16_row_stride,  // llvm.nvvm.wmma.m16n16k16.load.b.row.stride.bf16
    nvvm_wmma_m16n16k16_load_b_f16_row_stride,  // llvm.nvvm.wmma.m16n16k16.load.b.row.stride.f16
    nvvm_wmma_m16n16k16_load_b_s8_row_stride,  // llvm.nvvm.wmma.m16n16k16.load.b.row.stride.s8
    nvvm_wmma_m16n16k16_load_b_u8_row_stride,  // llvm.nvvm.wmma.m16n16k16.load.b.row.stride.u8
    nvvm_wmma_m16n16k16_load_b_u8_row,         // llvm.nvvm.wmma.m16n16k16.load.b.row.u8
    nvvm_wmma_m16n16k16_load_c_f16_col,        // llvm.nvvm.wmma.m16n16k16.load.c.col.f16
    nvvm_wmma_m16n16k16_load_c_f32_col,        // llvm.nvvm.wmma.m16n16k16.load.c.col.f32
    nvvm_wmma_m16n16k16_load_c_s32_col,        // llvm.nvvm.wmma.m16n16k16.load.c.col.s32
    nvvm_wmma_m16n16k16_load_c_f16_col_stride,  // llvm.nvvm.wmma.m16n16k16.load.c.col.stride.f16
    nvvm_wmma_m16n16k16_load_c_f32_col_stride,  // llvm.nvvm.wmma.m16n16k16.load.c.col.stride.f32
    nvvm_wmma_m16n16k16_load_c_s32_col_stride,  // llvm.nvvm.wmma.m16n16k16.load.c.col.stride.s32
    nvvm_wmma_m16n16k16_load_c_f16_row,        // llvm.nvvm.wmma.m16n16k16.load.c.row.f16
    nvvm_wmma_m16n16k16_load_c_f32_row,        // llvm.nvvm.wmma.m16n16k16.load.c.row.f32
    nvvm_wmma_m16n16k16_load_c_s32_row,        // llvm.nvvm.wmma.m16n16k16.load.c.row.s32
    nvvm_wmma_m16n16k16_load_c_f16_row_stride,  // llvm.nvvm.wmma.m16n16k16.load.c.row.stride.f16
    nvvm_wmma_m16n16k16_load_c_f32_row_stride,  // llvm.nvvm.wmma.m16n16k16.load.c.row.stride.f32
    nvvm_wmma_m16n16k16_load_c_s32_row_stride,  // llvm.nvvm.wmma.m16n16k16.load.c.row.stride.s32
    nvvm_wmma_m16n16k16_mma_col_col_bf16,      // llvm.nvvm.wmma.m16n16k16.mma.col.col.bf16
    nvvm_wmma_m16n16k16_mma_col_col_f16_f16,   // llvm.nvvm.wmma.m16n16k16.mma.col.col.f16.f16
    nvvm_wmma_m16n16k16_mma_col_col_f16_f16_satfinite,  // llvm.nvvm.wmma.m16n16k16.mma.col.col.f16.f16.satfinite
    nvvm_wmma_m16n16k16_mma_col_col_f16_f32,   // llvm.nvvm.wmma.m16n16k16.mma.col.col.f16.f32
    nvvm_wmma_m16n16k16_mma_col_col_f16_f32_satfinite,  // llvm.nvvm.wmma.m16n16k16.mma.col.col.f16.f32.satfinite
    nvvm_wmma_m16n16k16_mma_col_col_f32_f16,   // llvm.nvvm.wmma.m16n16k16.mma.col.col.f32.f16
    nvvm_wmma_m16n16k16_mma_col_col_f32_f16_satfinite,  // llvm.nvvm.wmma.m16n16k16.mma.col.col.f32.f16.satfinite
    nvvm_wmma_m16n16k16_mma_col_col_f32_f32,   // llvm.nvvm.wmma.m16n16k16.mma.col.col.f32.f32
    nvvm_wmma_m16n16k16_mma_col_col_f32_f32_satfinite,  // llvm.nvvm.wmma.m16n16k16.mma.col.col.f32.f32.satfinite
    nvvm_wmma_m16n16k16_mma_col_col_s8,        // llvm.nvvm.wmma.m16n16k16.mma.col.col.s8
    nvvm_wmma_m16n16k16_mma_col_col_s8_satfinite,  // llvm.nvvm.wmma.m16n16k16.mma.col.col.s8.satfinite
    nvvm_wmma_m16n16k16_mma_col_col_u8,        // llvm.nvvm.wmma.m16n16k16.mma.col.col.u8
    nvvm_wmma_m16n16k16_mma_col_col_u8_satfinite,  // llvm.nvvm.wmma.m16n16k16.mma.col.col.u8.satfinite
    nvvm_wmma_m16n16k16_mma_col_row_bf16,      // llvm.nvvm.wmma.m16n16k16.mma.col.row.bf16
    nvvm_wmma_m16n16k16_mma_col_row_f16_f16,   // llvm.nvvm.wmma.m16n16k16.mma.col.row.f16.f16
    nvvm_wmma_m16n16k16_mma_col_row_f16_f16_satfinite,  // llvm.nvvm.wmma.m16n16k16.mma.col.row.f16.f16.satfinite
    nvvm_wmma_m16n16k16_mma_col_row_f16_f32,   // llvm.nvvm.wmma.m16n16k16.mma.col.row.f16.f32
    nvvm_wmma_m16n16k16_mma_col_row_f16_f32_satfinite,  // llvm.nvvm.wmma.m16n16k16.mma.col.row.f16.f32.satfinite
    nvvm_wmma_m16n16k16_mma_col_row_f32_f16,   // llvm.nvvm.wmma.m16n16k16.mma.col.row.f32.f16
    nvvm_wmma_m16n16k16_mma_col_row_f32_f16_satfinite,  // llvm.nvvm.wmma.m16n16k16.mma.col.row.f32.f16.satfinite
    nvvm_wmma_m16n16k16_mma_col_row_f32_f32,   // llvm.nvvm.wmma.m16n16k16.mma.col.row.f32.f32
    nvvm_wmma_m16n16k16_mma_col_row_f32_f32_satfinite,  // llvm.nvvm.wmma.m16n16k16.mma.col.row.f32.f32.satfinite
    nvvm_wmma_m16n16k16_mma_col_row_s8,        // llvm.nvvm.wmma.m16n16k16.mma.col.row.s8
    nvvm_wmma_m16n16k16_mma_col_row_s8_satfinite,  // llvm.nvvm.wmma.m16n16k16.mma.col.row.s8.satfinite
    nvvm_wmma_m16n16k16_mma_col_row_u8,        // llvm.nvvm.wmma.m16n16k16.mma.col.row.u8
    nvvm_wmma_m16n16k16_mma_col_row_u8_satfinite,  // llvm.nvvm.wmma.m16n16k16.mma.col.row.u8.satfinite
    nvvm_wmma_m16n16k16_mma_row_col_bf16,      // llvm.nvvm.wmma.m16n16k16.mma.row.col.bf16
    nvvm_wmma_m16n16k16_mma_row_col_f16_f16,   // llvm.nvvm.wmma.m16n16k16.mma.row.col.f16.f16
    nvvm_wmma_m16n16k16_mma_row_col_f16_f16_satfinite,  // llvm.nvvm.wmma.m16n16k16.mma.row.col.f16.f16.satfinite
    nvvm_wmma_m16n16k16_mma_row_col_f16_f32,   // llvm.nvvm.wmma.m16n16k16.mma.row.col.f16.f32
    nvvm_wmma_m16n16k16_mma_row_col_f16_f32_satfinite,  // llvm.nvvm.wmma.m16n16k16.mma.row.col.f16.f32.satfinite
    nvvm_wmma_m16n16k16_mma_row_col_f32_f16,   // llvm.nvvm.wmma.m16n16k16.mma.row.col.f32.f16
    nvvm_wmma_m16n16k16_mma_row_col_f32_f16_satfinite,  // llvm.nvvm.wmma.m16n16k16.mma.row.col.f32.f16.satfinite
    nvvm_wmma_m16n16k16_mma_row_col_f32_f32,   // llvm.nvvm.wmma.m16n16k16.mma.row.col.f32.f32
    nvvm_wmma_m16n16k16_mma_row_col_f32_f32_satfinite,  // llvm.nvvm.wmma.m16n16k16.mma.row.col.f32.f32.satfinite
    nvvm_wmma_m16n16k16_mma_row_col_s8,        // llvm.nvvm.wmma.m16n16k16.mma.row.col.s8
    nvvm_wmma_m16n16k16_mma_row_col_s8_satfinite,  // llvm.nvvm.wmma.m16n16k16.mma.row.col.s8.satfinite
    nvvm_wmma_m16n16k16_mma_row_col_u8,        // llvm.nvvm.wmma.m16n16k16.mma.row.col.u8
    nvvm_wmma_m16n16k16_mma_row_col_u8_satfinite,  // llvm.nvvm.wmma.m16n16k16.mma.row.col.u8.satfinite
    nvvm_wmma_m16n16k16_mma_row_row_bf16,      // llvm.nvvm.wmma.m16n16k16.mma.row.row.bf16
    nvvm_wmma_m16n16k16_mma_row_row_f16_f16,   // llvm.nvvm.wmma.m16n16k16.mma.row.row.f16.f16
    nvvm_wmma_m16n16k16_mma_row_row_f16_f16_satfinite,  // llvm.nvvm.wmma.m16n16k16.mma.row.row.f16.f16.satfinite
    nvvm_wmma_m16n16k16_mma_row_row_f16_f32,   // llvm.nvvm.wmma.m16n16k16.mma.row.row.f16.f32
    nvvm_wmma_m16n16k16_mma_row_row_f16_f32_satfinite,  // llvm.nvvm.wmma.m16n16k16.mma.row.row.f16.f32.satfinite
    nvvm_wmma_m16n16k16_mma_row_row_f32_f16,   // llvm.nvvm.wmma.m16n16k16.mma.row.row.f32.f16
    nvvm_wmma_m16n16k16_mma_row_row_f32_f16_satfinite,  // llvm.nvvm.wmma.m16n16k16.mma.row.row.f32.f16.satfinite
    nvvm_wmma_m16n16k16_mma_row_row_f32_f32,   // llvm.nvvm.wmma.m16n16k16.mma.row.row.f32.f32
    nvvm_wmma_m16n16k16_mma_row_row_f32_f32_satfinite,  // llvm.nvvm.wmma.m16n16k16.mma.row.row.f32.f32.satfinite
    nvvm_wmma_m16n16k16_mma_row_row_s8,        // llvm.nvvm.wmma.m16n16k16.mma.row.row.s8
    nvvm_wmma_m16n16k16_mma_row_row_s8_satfinite,  // llvm.nvvm.wmma.m16n16k16.mma.row.row.s8.satfinite
    nvvm_wmma_m16n16k16_mma_row_row_u8,        // llvm.nvvm.wmma.m16n16k16.mma.row.row.u8
    nvvm_wmma_m16n16k16_mma_row_row_u8_satfinite,  // llvm.nvvm.wmma.m16n16k16.mma.row.row.u8.satfinite
    nvvm_wmma_m16n16k16_store_d_f16_col,       // llvm.nvvm.wmma.m16n16k16.store.d.col.f16
    nvvm_wmma_m16n16k16_store_d_f32_col,       // llvm.nvvm.wmma.m16n16k16.store.d.col.f32
    nvvm_wmma_m16n16k16_store_d_s32_col,       // llvm.nvvm.wmma.m16n16k16.store.d.col.s32
    nvvm_wmma_m16n16k16_store_d_f16_col_stride,  // llvm.nvvm.wmma.m16n16k16.store.d.col.stride.f16
    nvvm_wmma_m16n16k16_store_d_f32_col_stride,  // llvm.nvvm.wmma.m16n16k16.store.d.col.stride.f32
    nvvm_wmma_m16n16k16_store_d_s32_col_stride,  // llvm.nvvm.wmma.m16n16k16.store.d.col.stride.s32
    nvvm_wmma_m16n16k16_store_d_f16_row,       // llvm.nvvm.wmma.m16n16k16.store.d.row.f16
    nvvm_wmma_m16n16k16_store_d_f32_row,       // llvm.nvvm.wmma.m16n16k16.store.d.row.f32
    nvvm_wmma_m16n16k16_store_d_s32_row,       // llvm.nvvm.wmma.m16n16k16.store.d.row.s32
    nvvm_wmma_m16n16k16_store_d_f16_row_stride,  // llvm.nvvm.wmma.m16n16k16.store.d.row.stride.f16
    nvvm_wmma_m16n16k16_store_d_f32_row_stride,  // llvm.nvvm.wmma.m16n16k16.store.d.row.stride.f32
    nvvm_wmma_m16n16k16_store_d_s32_row_stride,  // llvm.nvvm.wmma.m16n16k16.store.d.row.stride.s32
    nvvm_wmma_m16n16k8_load_a_tf32_col_stride,  // llvm.nvvm.wmma.m16n16k8.load.a.col.stride.tf32
    nvvm_wmma_m16n16k8_load_a_tf32_col,        // llvm.nvvm.wmma.m16n16k8.load.a.col.tf32
    nvvm_wmma_m16n16k8_load_a_tf32_row_stride,  // llvm.nvvm.wmma.m16n16k8.load.a.row.stride.tf32
    nvvm_wmma_m16n16k8_load_a_tf32_row,        // llvm.nvvm.wmma.m16n16k8.load.a.row.tf32
    nvvm_wmma_m16n16k8_load_b_tf32_col_stride,  // llvm.nvvm.wmma.m16n16k8.load.b.col.stride.tf32
    nvvm_wmma_m16n16k8_load_b_tf32_col,        // llvm.nvvm.wmma.m16n16k8.load.b.col.tf32
    nvvm_wmma_m16n16k8_load_b_tf32_row_stride,  // llvm.nvvm.wmma.m16n16k8.load.b.row.stride.tf32
    nvvm_wmma_m16n16k8_load_b_tf32_row,        // llvm.nvvm.wmma.m16n16k8.load.b.row.tf32
    nvvm_wmma_m16n16k8_load_c_f32_col,         // llvm.nvvm.wmma.m16n16k8.load.c.col.f32
    nvvm_wmma_m16n16k8_load_c_f32_col_stride,  // llvm.nvvm.wmma.m16n16k8.load.c.col.stride.f32
    nvvm_wmma_m16n16k8_load_c_f32_row,         // llvm.nvvm.wmma.m16n16k8.load.c.row.f32
    nvvm_wmma_m16n16k8_load_c_f32_row_stride,  // llvm.nvvm.wmma.m16n16k8.load.c.row.stride.f32
    nvvm_wmma_m16n16k8_mma_col_col_tf32,       // llvm.nvvm.wmma.m16n16k8.mma.col.col.tf32
    nvvm_wmma_m16n16k8_mma_col_row_tf32,       // llvm.nvvm.wmma.m16n16k8.mma.col.row.tf32
    nvvm_wmma_m16n16k8_mma_row_col_tf32,       // llvm.nvvm.wmma.m16n16k8.mma.row.col.tf32
    nvvm_wmma_m16n16k8_mma_row_row_tf32,       // llvm.nvvm.wmma.m16n16k8.mma.row.row.tf32
    nvvm_wmma_m16n16k8_store_d_f32_col,        // llvm.nvvm.wmma.m16n16k8.store.d.col.f32
    nvvm_wmma_m16n16k8_store_d_f32_col_stride,  // llvm.nvvm.wmma.m16n16k8.store.d.col.stride.f32
    nvvm_wmma_m16n16k8_store_d_f32_row,        // llvm.nvvm.wmma.m16n16k8.store.d.row.f32
    nvvm_wmma_m16n16k8_store_d_f32_row_stride,  // llvm.nvvm.wmma.m16n16k8.store.d.row.stride.f32
    nvvm_wmma_m32n8k16_load_a_bf16_col,        // llvm.nvvm.wmma.m32n8k16.load.a.col.bf16
    nvvm_wmma_m32n8k16_load_a_f16_col,         // llvm.nvvm.wmma.m32n8k16.load.a.col.f16
    nvvm_wmma_m32n8k16_load_a_s8_col,          // llvm.nvvm.wmma.m32n8k16.load.a.col.s8
    nvvm_wmma_m32n8k16_load_a_bf16_col_stride,  // llvm.nvvm.wmma.m32n8k16.load.a.col.stride.bf16
    nvvm_wmma_m32n8k16_load_a_f16_col_stride,  // llvm.nvvm.wmma.m32n8k16.load.a.col.stride.f16
    nvvm_wmma_m32n8k16_load_a_s8_col_stride,   // llvm.nvvm.wmma.m32n8k16.load.a.col.stride.s8
    nvvm_wmma_m32n8k16_load_a_u8_col_stride,   // llvm.nvvm.wmma.m32n8k16.load.a.col.stride.u8
    nvvm_wmma_m32n8k16_load_a_u8_col,          // llvm.nvvm.wmma.m32n8k16.load.a.col.u8
    nvvm_wmma_m32n8k16_load_a_bf16_row,        // llvm.nvvm.wmma.m32n8k16.load.a.row.bf16
    nvvm_wmma_m32n8k16_load_a_f16_row,         // llvm.nvvm.wmma.m32n8k16.load.a.row.f16
    nvvm_wmma_m32n8k16_load_a_s8_row,          // llvm.nvvm.wmma.m32n8k16.load.a.row.s8
    nvvm_wmma_m32n8k16_load_a_bf16_row_stride,  // llvm.nvvm.wmma.m32n8k16.load.a.row.stride.bf16
    nvvm_wmma_m32n8k16_load_a_f16_row_stride,  // llvm.nvvm.wmma.m32n8k16.load.a.row.stride.f16
    nvvm_wmma_m32n8k16_load_a_s8_row_stride,   // llvm.nvvm.wmma.m32n8k16.load.a.row.stride.s8
    nvvm_wmma_m32n8k16_load_a_u8_row_stride,   // llvm.nvvm.wmma.m32n8k16.load.a.row.stride.u8
    nvvm_wmma_m32n8k16_load_a_u8_row,          // llvm.nvvm.wmma.m32n8k16.load.a.row.u8
    nvvm_wmma_m32n8k16_load_b_bf16_col,        // llvm.nvvm.wmma.m32n8k16.load.b.col.bf16
    nvvm_wmma_m32n8k16_load_b_f16_col,         // llvm.nvvm.wmma.m32n8k16.load.b.col.f16
    nvvm_wmma_m32n8k16_load_b_s8_col,          // llvm.nvvm.wmma.m32n8k16.load.b.col.s8
    nvvm_wmma_m32n8k16_load_b_bf16_col_stride,  // llvm.nvvm.wmma.m32n8k16.load.b.col.stride.bf16
    nvvm_wmma_m32n8k16_load_b_f16_col_stride,  // llvm.nvvm.wmma.m32n8k16.load.b.col.stride.f16
    nvvm_wmma_m32n8k16_load_b_s8_col_stride,   // llvm.nvvm.wmma.m32n8k16.load.b.col.stride.s8
    nvvm_wmma_m32n8k16_load_b_u8_col_stride,   // llvm.nvvm.wmma.m32n8k16.load.b.col.stride.u8
    nvvm_wmma_m32n8k16_load_b_u8_col,          // llvm.nvvm.wmma.m32n8k16.load.b.col.u8
    nvvm_wmma_m32n8k16_load_b_bf16_row,        // llvm.nvvm.wmma.m32n8k16.load.b.row.bf16
    nvvm_wmma_m32n8k16_load_b_f16_row,         // llvm.nvvm.wmma.m32n8k16.load.b.row.f16
    nvvm_wmma_m32n8k16_load_b_s8_row,          // llvm.nvvm.wmma.m32n8k16.load.b.row.s8
    nvvm_wmma_m32n8k16_load_b_bf16_row_stride,  // llvm.nvvm.wmma.m32n8k16.load.b.row.stride.bf16
    nvvm_wmma_m32n8k16_load_b_f16_row_stride,  // llvm.nvvm.wmma.m32n8k16.load.b.row.stride.f16
    nvvm_wmma_m32n8k16_load_b_s8_row_stride,   // llvm.nvvm.wmma.m32n8k16.load.b.row.stride.s8
    nvvm_wmma_m32n8k16_load_b_u8_row_stride,   // llvm.nvvm.wmma.m32n8k16.load.b.row.stride.u8
    nvvm_wmma_m32n8k16_load_b_u8_row,          // llvm.nvvm.wmma.m32n8k16.load.b.row.u8
    nvvm_wmma_m32n8k16_load_c_f16_col,         // llvm.nvvm.wmma.m32n8k16.load.c.col.f16
    nvvm_wmma_m32n8k16_load_c_f32_col,         // llvm.nvvm.wmma.m32n8k16.load.c.col.f32
    nvvm_wmma_m32n8k16_load_c_s32_col,         // llvm.nvvm.wmma.m32n8k16.load.c.col.s32
    nvvm_wmma_m32n8k16_load_c_f16_col_stride,  // llvm.nvvm.wmma.m32n8k16.load.c.col.stride.f16
    nvvm_wmma_m32n8k16_load_c_f32_col_stride,  // llvm.nvvm.wmma.m32n8k16.load.c.col.stride.f32
    nvvm_wmma_m32n8k16_load_c_s32_col_stride,  // llvm.nvvm.wmma.m32n8k16.load.c.col.stride.s32
    nvvm_wmma_m32n8k16_load_c_f16_row,         // llvm.nvvm.wmma.m32n8k16.load.c.row.f16
    nvvm_wmma_m32n8k16_load_c_f32_row,         // llvm.nvvm.wmma.m32n8k16.load.c.row.f32
    nvvm_wmma_m32n8k16_load_c_s32_row,         // llvm.nvvm.wmma.m32n8k16.load.c.row.s32
    nvvm_wmma_m32n8k16_load_c_f16_row_stride,  // llvm.nvvm.wmma.m32n8k16.load.c.row.stride.f16
    nvvm_wmma_m32n8k16_load_c_f32_row_stride,  // llvm.nvvm.wmma.m32n8k16.load.c.row.stride.f32
    nvvm_wmma_m32n8k16_load_c_s32_row_stride,  // llvm.nvvm.wmma.m32n8k16.load.c.row.stride.s32
    nvvm_wmma_m32n8k16_mma_col_col_bf16,       // llvm.nvvm.wmma.m32n8k16.mma.col.col.bf16
    nvvm_wmma_m32n8k16_mma_col_col_f16_f16,    // llvm.nvvm.wmma.m32n8k16.mma.col.col.f16.f16
    nvvm_wmma_m32n8k16_mma_col_col_f16_f16_satfinite,  // llvm.nvvm.wmma.m32n8k16.mma.col.col.f16.f16.satfinite
    nvvm_wmma_m32n8k16_mma_col_col_f16_f32,    // llvm.nvvm.wmma.m32n8k16.mma.col.col.f16.f32
    nvvm_wmma_m32n8k16_mma_col_col_f16_f32_satfinite,  // llvm.nvvm.wmma.m32n8k16.mma.col.col.f16.f32.satfinite
    nvvm_wmma_m32n8k16_mma_col_col_f32_f16,    // llvm.nvvm.wmma.m32n8k16.mma.col.col.f32.f16
    nvvm_wmma_m32n8k16_mma_col_col_f32_f16_satfinite,  // llvm.nvvm.wmma.m32n8k16.mma.col.col.f32.f16.satfinite
    nvvm_wmma_m32n8k16_mma_col_col_f32_f32,    // llvm.nvvm.wmma.m32n8k16.mma.col.col.f32.f32
    nvvm_wmma_m32n8k16_mma_col_col_f32_f32_satfinite,  // llvm.nvvm.wmma.m32n8k16.mma.col.col.f32.f32.satfinite
    nvvm_wmma_m32n8k16_mma_col_col_s8,         // llvm.nvvm.wmma.m32n8k16.mma.col.col.s8
    nvvm_wmma_m32n8k16_mma_col_col_s8_satfinite,  // llvm.nvvm.wmma.m32n8k16.mma.col.col.s8.satfinite
    nvvm_wmma_m32n8k16_mma_col_col_u8,         // llvm.nvvm.wmma.m32n8k16.mma.col.col.u8
    nvvm_wmma_m32n8k16_mma_col_col_u8_satfinite,  // llvm.nvvm.wmma.m32n8k16.mma.col.col.u8.satfinite
    nvvm_wmma_m32n8k16_mma_col_row_bf16,       // llvm.nvvm.wmma.m32n8k16.mma.col.row.bf16
    nvvm_wmma_m32n8k16_mma_col_row_f16_f16,    // llvm.nvvm.wmma.m32n8k16.mma.col.row.f16.f16
    nvvm_wmma_m32n8k16_mma_col_row_f16_f16_satfinite,  // llvm.nvvm.wmma.m32n8k16.mma.col.row.f16.f16.satfinite
    nvvm_wmma_m32n8k16_mma_col_row_f16_f32,    // llvm.nvvm.wmma.m32n8k16.mma.col.row.f16.f32
    nvvm_wmma_m32n8k16_mma_col_row_f16_f32_satfinite,  // llvm.nvvm.wmma.m32n8k16.mma.col.row.f16.f32.satfinite
    nvvm_wmma_m32n8k16_mma_col_row_f32_f16,    // llvm.nvvm.wmma.m32n8k16.mma.col.row.f32.f16
    nvvm_wmma_m32n8k16_mma_col_row_f32_f16_satfinite,  // llvm.nvvm.wmma.m32n8k16.mma.col.row.f32.f16.satfinite
    nvvm_wmma_m32n8k16_mma_col_row_f32_f32,    // llvm.nvvm.wmma.m32n8k16.mma.col.row.f32.f32
    nvvm_wmma_m32n8k16_mma_col_row_f32_f32_satfinite,  // llvm.nvvm.wmma.m32n8k16.mma.col.row.f32.f32.satfinite
    nvvm_wmma_m32n8k16_mma_col_row_s8,         // llvm.nvvm.wmma.m32n8k16.mma.col.row.s8
    nvvm_wmma_m32n8k16_mma_col_row_s8_satfinite,  // llvm.nvvm.wmma.m32n8k16.mma.col.row.s8.satfinite
    nvvm_wmma_m32n8k16_mma_col_row_u8,         // llvm.nvvm.wmma.m32n8k16.mma.col.row.u8
    nvvm_wmma_m32n8k16_mma_col_row_u8_satfinite,  // llvm.nvvm.wmma.m32n8k16.mma.col.row.u8.satfinite
    nvvm_wmma_m32n8k16_mma_row_col_bf16,       // llvm.nvvm.wmma.m32n8k16.mma.row.col.bf16
    nvvm_wmma_m32n8k16_mma_row_col_f16_f16,    // llvm.nvvm.wmma.m32n8k16.mma.row.col.f16.f16
    nvvm_wmma_m32n8k16_mma_row_col_f16_f16_satfinite,  // llvm.nvvm.wmma.m32n8k16.mma.row.col.f16.f16.satfinite
    nvvm_wmma_m32n8k16_mma_row_col_f16_f32,    // llvm.nvvm.wmma.m32n8k16.mma.row.col.f16.f32
    nvvm_wmma_m32n8k16_mma_row_col_f16_f32_satfinite,  // llvm.nvvm.wmma.m32n8k16.mma.row.col.f16.f32.satfinite
    nvvm_wmma_m32n8k16_mma_row_col_f32_f16,    // llvm.nvvm.wmma.m32n8k16.mma.row.col.f32.f16
    nvvm_wmma_m32n8k16_mma_row_col_f32_f16_satfinite,  // llvm.nvvm.wmma.m32n8k16.mma.row.col.f32.f16.satfinite
    nvvm_wmma_m32n8k16_mma_row_col_f32_f32,    // llvm.nvvm.wmma.m32n8k16.mma.row.col.f32.f32
    nvvm_wmma_m32n8k16_mma_row_col_f32_f32_satfinite,  // llvm.nvvm.wmma.m32n8k16.mma.row.col.f32.f32.satfinite
    nvvm_wmma_m32n8k16_mma_row_col_s8,         // llvm.nvvm.wmma.m32n8k16.mma.row.col.s8
    nvvm_wmma_m32n8k16_mma_row_col_s8_satfinite,  // llvm.nvvm.wmma.m32n8k16.mma.row.col.s8.satfinite
    nvvm_wmma_m32n8k16_mma_row_col_u8,         // llvm.nvvm.wmma.m32n8k16.mma.row.col.u8
    nvvm_wmma_m32n8k16_mma_row_col_u8_satfinite,  // llvm.nvvm.wmma.m32n8k16.mma.row.col.u8.satfinite
    nvvm_wmma_m32n8k16_mma_row_row_bf16,       // llvm.nvvm.wmma.m32n8k16.mma.row.row.bf16
    nvvm_wmma_m32n8k16_mma_row_row_f16_f16,    // llvm.nvvm.wmma.m32n8k16.mma.row.row.f16.f16
    nvvm_wmma_m32n8k16_mma_row_row_f16_f16_satfinite,  // llvm.nvvm.wmma.m32n8k16.mma.row.row.f16.f16.satfinite
    nvvm_wmma_m32n8k16_mma_row_row_f16_f32,    // llvm.nvvm.wmma.m32n8k16.mma.row.row.f16.f32
    nvvm_wmma_m32n8k16_mma_row_row_f16_f32_satfinite,  // llvm.nvvm.wmma.m32n8k16.mma.row.row.f16.f32.satfinite
    nvvm_wmma_m32n8k16_mma_row_row_f32_f16,    // llvm.nvvm.wmma.m32n8k16.mma.row.row.f32.f16
    nvvm_wmma_m32n8k16_mma_row_row_f32_f16_satfinite,  // llvm.nvvm.wmma.m32n8k16.mma.row.row.f32.f16.satfinite
    nvvm_wmma_m32n8k16_mma_row_row_f32_f32,    // llvm.nvvm.wmma.m32n8k16.mma.row.row.f32.f32
    nvvm_wmma_m32n8k16_mma_row_row_f32_f32_satfinite,  // llvm.nvvm.wmma.m32n8k16.mma.row.row.f32.f32.satfinite
    nvvm_wmma_m32n8k16_mma_row_row_s8,         // llvm.nvvm.wmma.m32n8k16.mma.row.row.s8
    nvvm_wmma_m32n8k16_mma_row_row_s8_satfinite,  // llvm.nvvm.wmma.m32n8k16.mma.row.row.s8.satfinite
    nvvm_wmma_m32n8k16_mma_row_row_u8,         // llvm.nvvm.wmma.m32n8k16.mma.row.row.u8
    nvvm_wmma_m32n8k16_mma_row_row_u8_satfinite,  // llvm.nvvm.wmma.m32n8k16.mma.row.row.u8.satfinite
    nvvm_wmma_m32n8k16_store_d_f16_col,        // llvm.nvvm.wmma.m32n8k16.store.d.col.f16
    nvvm_wmma_m32n8k16_store_d_f32_col,        // llvm.nvvm.wmma.m32n8k16.store.d.col.f32
    nvvm_wmma_m32n8k16_store_d_s32_col,        // llvm.nvvm.wmma.m32n8k16.store.d.col.s32
    nvvm_wmma_m32n8k16_store_d_f16_col_stride,  // llvm.nvvm.wmma.m32n8k16.store.d.col.stride.f16
    nvvm_wmma_m32n8k16_store_d_f32_col_stride,  // llvm.nvvm.wmma.m32n8k16.store.d.col.stride.f32
    nvvm_wmma_m32n8k16_store_d_s32_col_stride,  // llvm.nvvm.wmma.m32n8k16.store.d.col.stride.s32
    nvvm_wmma_m32n8k16_store_d_f16_row,        // llvm.nvvm.wmma.m32n8k16.store.d.row.f16
    nvvm_wmma_m32n8k16_store_d_f32_row,        // llvm.nvvm.wmma.m32n8k16.store.d.row.f32
    nvvm_wmma_m32n8k16_store_d_s32_row,        // llvm.nvvm.wmma.m32n8k16.store.d.row.s32
    nvvm_wmma_m32n8k16_store_d_f16_row_stride,  // llvm.nvvm.wmma.m32n8k16.store.d.row.stride.f16
    nvvm_wmma_m32n8k16_store_d_f32_row_stride,  // llvm.nvvm.wmma.m32n8k16.store.d.row.stride.f32
    nvvm_wmma_m32n8k16_store_d_s32_row_stride,  // llvm.nvvm.wmma.m32n8k16.store.d.row.stride.s32
    nvvm_wmma_m8n32k16_load_a_bf16_col,        // llvm.nvvm.wmma.m8n32k16.load.a.col.bf16
    nvvm_wmma_m8n32k16_load_a_f16_col,         // llvm.nvvm.wmma.m8n32k16.load.a.col.f16
    nvvm_wmma_m8n32k16_load_a_s8_col,          // llvm.nvvm.wmma.m8n32k16.load.a.col.s8
    nvvm_wmma_m8n32k16_load_a_bf16_col_stride,  // llvm.nvvm.wmma.m8n32k16.load.a.col.stride.bf16
    nvvm_wmma_m8n32k16_load_a_f16_col_stride,  // llvm.nvvm.wmma.m8n32k16.load.a.col.stride.f16
    nvvm_wmma_m8n32k16_load_a_s8_col_stride,   // llvm.nvvm.wmma.m8n32k16.load.a.col.stride.s8
    nvvm_wmma_m8n32k16_load_a_u8_col_stride,   // llvm.nvvm.wmma.m8n32k16.load.a.col.stride.u8
    nvvm_wmma_m8n32k16_load_a_u8_col,          // llvm.nvvm.wmma.m8n32k16.load.a.col.u8
    nvvm_wmma_m8n32k16_load_a_bf16_row,        // llvm.nvvm.wmma.m8n32k16.load.a.row.bf16
    nvvm_wmma_m8n32k16_load_a_f16_row,         // llvm.nvvm.wmma.m8n32k16.load.a.row.f16
    nvvm_wmma_m8n32k16_load_a_s8_row,          // llvm.nvvm.wmma.m8n32k16.load.a.row.s8
    nvvm_wmma_m8n32k16_load_a_bf16_row_stride,  // llvm.nvvm.wmma.m8n32k16.load.a.row.stride.bf16
    nvvm_wmma_m8n32k16_load_a_f16_row_stride,  // llvm.nvvm.wmma.m8n32k16.load.a.row.stride.f16
    nvvm_wmma_m8n32k16_load_a_s8_row_stride,   // llvm.nvvm.wmma.m8n32k16.load.a.row.stride.s8
    nvvm_wmma_m8n32k16_load_a_u8_row_stride,   // llvm.nvvm.wmma.m8n32k16.load.a.row.stride.u8
    nvvm_wmma_m8n32k16_load_a_u8_row,          // llvm.nvvm.wmma.m8n32k16.load.a.row.u8
    nvvm_wmma_m8n32k16_load_b_bf16_col,        // llvm.nvvm.wmma.m8n32k16.load.b.col.bf16
    nvvm_wmma_m8n32k16_load_b_f16_col,         // llvm.nvvm.wmma.m8n32k16.load.b.col.f16
    nvvm_wmma_m8n32k16_load_b_s8_col,          // llvm.nvvm.wmma.m8n32k16.load.b.col.s8
    nvvm_wmma_m8n32k16_load_b_bf16_col_stride,  // llvm.nvvm.wmma.m8n32k16.load.b.col.stride.bf16
    nvvm_wmma_m8n32k16_load_b_f16_col_stride,  // llvm.nvvm.wmma.m8n32k16.load.b.col.stride.f16
    nvvm_wmma_m8n32k16_load_b_s8_col_stride,   // llvm.nvvm.wmma.m8n32k16.load.b.col.stride.s8
    nvvm_wmma_m8n32k16_load_b_u8_col_stride,   // llvm.nvvm.wmma.m8n32k16.load.b.col.stride.u8
    nvvm_wmma_m8n32k16_load_b_u8_col,          // llvm.nvvm.wmma.m8n32k16.load.b.col.u8
    nvvm_wmma_m8n32k16_load_b_bf16_row,        // llvm.nvvm.wmma.m8n32k16.load.b.row.bf16
    nvvm_wmma_m8n32k16_load_b_f16_row,         // llvm.nvvm.wmma.m8n32k16.load.b.row.f16
    nvvm_wmma_m8n32k16_load_b_s8_row,          // llvm.nvvm.wmma.m8n32k16.load.b.row.s8
    nvvm_wmma_m8n32k16_load_b_bf16_row_stride,  // llvm.nvvm.wmma.m8n32k16.load.b.row.stride.bf16
    nvvm_wmma_m8n32k16_load_b_f16_row_stride,  // llvm.nvvm.wmma.m8n32k16.load.b.row.stride.f16
    nvvm_wmma_m8n32k16_load_b_s8_row_stride,   // llvm.nvvm.wmma.m8n32k16.load.b.row.stride.s8
    nvvm_wmma_m8n32k16_load_b_u8_row_stride,   // llvm.nvvm.wmma.m8n32k16.load.b.row.stride.u8
    nvvm_wmma_m8n32k16_load_b_u8_row,          // llvm.nvvm.wmma.m8n32k16.load.b.row.u8
    nvvm_wmma_m8n32k16_load_c_f16_col,         // llvm.nvvm.wmma.m8n32k16.load.c.col.f16
    nvvm_wmma_m8n32k16_load_c_f32_col,         // llvm.nvvm.wmma.m8n32k16.load.c.col.f32
    nvvm_wmma_m8n32k16_load_c_s32_col,         // llvm.nvvm.wmma.m8n32k16.load.c.col.s32
    nvvm_wmma_m8n32k16_load_c_f16_col_stride,  // llvm.nvvm.wmma.m8n32k16.load.c.col.stride.f16
    nvvm_wmma_m8n32k16_load_c_f32_col_stride,  // llvm.nvvm.wmma.m8n32k16.load.c.col.stride.f32
    nvvm_wmma_m8n32k16_load_c_s32_col_stride,  // llvm.nvvm.wmma.m8n32k16.load.c.col.stride.s32
    nvvm_wmma_m8n32k16_load_c_f16_row,         // llvm.nvvm.wmma.m8n32k16.load.c.row.f16
    nvvm_wmma_m8n32k16_load_c_f32_row,         // llvm.nvvm.wmma.m8n32k16.load.c.row.f32
    nvvm_wmma_m8n32k16_load_c_s32_row,         // llvm.nvvm.wmma.m8n32k16.load.c.row.s32
    nvvm_wmma_m8n32k16_load_c_f16_row_stride,  // llvm.nvvm.wmma.m8n32k16.load.c.row.stride.f16
    nvvm_wmma_m8n32k16_load_c_f32_row_stride,  // llvm.nvvm.wmma.m8n32k16.load.c.row.stride.f32
    nvvm_wmma_m8n32k16_load_c_s32_row_stride,  // llvm.nvvm.wmma.m8n32k16.load.c.row.stride.s32
    nvvm_wmma_m8n32k16_mma_col_col_bf16,       // llvm.nvvm.wmma.m8n32k16.mma.col.col.bf16
    nvvm_wmma_m8n32k16_mma_col_col_f16_f16,    // llvm.nvvm.wmma.m8n32k16.mma.col.col.f16.f16
    nvvm_wmma_m8n32k16_mma_col_col_f16_f16_satfinite,  // llvm.nvvm.wmma.m8n32k16.mma.col.col.f16.f16.satfinite
    nvvm_wmma_m8n32k16_mma_col_col_f16_f32,    // llvm.nvvm.wmma.m8n32k16.mma.col.col.f16.f32
    nvvm_wmma_m8n32k16_mma_col_col_f16_f32_satfinite,  // llvm.nvvm.wmma.m8n32k16.mma.col.col.f16.f32.satfinite
    nvvm_wmma_m8n32k16_mma_col_col_f32_f16,    // llvm.nvvm.wmma.m8n32k16.mma.col.col.f32.f16
    nvvm_wmma_m8n32k16_mma_col_col_f32_f16_satfinite,  // llvm.nvvm.wmma.m8n32k16.mma.col.col.f32.f16.satfinite
    nvvm_wmma_m8n32k16_mma_col_col_f32_f32,    // llvm.nvvm.wmma.m8n32k16.mma.col.col.f32.f32
    nvvm_wmma_m8n32k16_mma_col_col_f32_f32_satfinite,  // llvm.nvvm.wmma.m8n32k16.mma.col.col.f32.f32.satfinite
    nvvm_wmma_m8n32k16_mma_col_col_s8,         // llvm.nvvm.wmma.m8n32k16.mma.col.col.s8
    nvvm_wmma_m8n32k16_mma_col_col_s8_satfinite,  // llvm.nvvm.wmma.m8n32k16.mma.col.col.s8.satfinite
    nvvm_wmma_m8n32k16_mma_col_col_u8,         // llvm.nvvm.wmma.m8n32k16.mma.col.col.u8
    nvvm_wmma_m8n32k16_mma_col_col_u8_satfinite,  // llvm.nvvm.wmma.m8n32k16.mma.col.col.u8.satfinite
    nvvm_wmma_m8n32k16_mma_col_row_bf16,       // llvm.nvvm.wmma.m8n32k16.mma.col.row.bf16
    nvvm_wmma_m8n32k16_mma_col_row_f16_f16,    // llvm.nvvm.wmma.m8n32k16.mma.col.row.f16.f16
    nvvm_wmma_m8n32k16_mma_col_row_f16_f16_satfinite,  // llvm.nvvm.wmma.m8n32k16.mma.col.row.f16.f16.satfinite
    nvvm_wmma_m8n32k16_mma_col_row_f16_f32,    // llvm.nvvm.wmma.m8n32k16.mma.col.row.f16.f32
    nvvm_wmma_m8n32k16_mma_col_row_f16_f32_satfinite,  // llvm.nvvm.wmma.m8n32k16.mma.col.row.f16.f32.satfinite
    nvvm_wmma_m8n32k16_mma_col_row_f32_f16,    // llvm.nvvm.wmma.m8n32k16.mma.col.row.f32.f16
    nvvm_wmma_m8n32k16_mma_col_row_f32_f16_satfinite,  // llvm.nvvm.wmma.m8n32k16.mma.col.row.f32.f16.satfinite
    nvvm_wmma_m8n32k16_mma_col_row_f32_f32,    // llvm.nvvm.wmma.m8n32k16.mma.col.row.f32.f32
    nvvm_wmma_m8n32k16_mma_col_row_f32_f32_satfinite,  // llvm.nvvm.wmma.m8n32k16.mma.col.row.f32.f32.satfinite
    nvvm_wmma_m8n32k16_mma_col_row_s8,         // llvm.nvvm.wmma.m8n32k16.mma.col.row.s8
    nvvm_wmma_m8n32k16_mma_col_row_s8_satfinite,  // llvm.nvvm.wmma.m8n32k16.mma.col.row.s8.satfinite
    nvvm_wmma_m8n32k16_mma_col_row_u8,         // llvm.nvvm.wmma.m8n32k16.mma.col.row.u8
    nvvm_wmma_m8n32k16_mma_col_row_u8_satfinite,  // llvm.nvvm.wmma.m8n32k16.mma.col.row.u8.satfinite
    nvvm_wmma_m8n32k16_mma_row_col_bf16,       // llvm.nvvm.wmma.m8n32k16.mma.row.col.bf16
    nvvm_wmma_m8n32k16_mma_row_col_f16_f16,    // llvm.nvvm.wmma.m8n32k16.mma.row.col.f16.f16
    nvvm_wmma_m8n32k16_mma_row_col_f16_f16_satfinite,  // llvm.nvvm.wmma.m8n32k16.mma.row.col.f16.f16.satfinite
    nvvm_wmma_m8n32k16_mma_row_col_f16_f32,    // llvm.nvvm.wmma.m8n32k16.mma.row.col.f16.f32
    nvvm_wmma_m8n32k16_mma_row_col_f16_f32_satfinite,  // llvm.nvvm.wmma.m8n32k16.mma.row.col.f16.f32.satfinite
    nvvm_wmma_m8n32k16_mma_row_col_f32_f16,    // llvm.nvvm.wmma.m8n32k16.mma.row.col.f32.f16
    nvvm_wmma_m8n32k16_mma_row_col_f32_f16_satfinite,  // llvm.nvvm.wmma.m8n32k16.mma.row.col.f32.f16.satfinite
    nvvm_wmma_m8n32k16_mma_row_col_f32_f32,    // llvm.nvvm.wmma.m8n32k16.mma.row.col.f32.f32
    nvvm_wmma_m8n32k16_mma_row_col_f32_f32_satfinite,  // llvm.nvvm.wmma.m8n32k16.mma.row.col.f32.f32.satfinite
    nvvm_wmma_m8n32k16_mma_row_col_s8,         // llvm.nvvm.wmma.m8n32k16.mma.row.col.s8
    nvvm_wmma_m8n32k16_mma_row_col_s8_satfinite,  // llvm.nvvm.wmma.m8n32k16.mma.row.col.s8.satfinite
    nvvm_wmma_m8n32k16_mma_row_col_u8,         // llvm.nvvm.wmma.m8n32k16.mma.row.col.u8
    nvvm_wmma_m8n32k16_mma_row_col_u8_satfinite,  // llvm.nvvm.wmma.m8n32k16.mma.row.col.u8.satfinite
    nvvm_wmma_m8n32k16_mma_row_row_bf16,       // llvm.nvvm.wmma.m8n32k16.mma.row.row.bf16
    nvvm_wmma_m8n32k16_mma_row_row_f16_f16,    // llvm.nvvm.wmma.m8n32k16.mma.row.row.f16.f16
    nvvm_wmma_m8n32k16_mma_row_row_f16_f16_satfinite,  // llvm.nvvm.wmma.m8n32k16.mma.row.row.f16.f16.satfinite
    nvvm_wmma_m8n32k16_mma_row_row_f16_f32,    // llvm.nvvm.wmma.m8n32k16.mma.row.row.f16.f32
    nvvm_wmma_m8n32k16_mma_row_row_f16_f32_satfinite,  // llvm.nvvm.wmma.m8n32k16.mma.row.row.f16.f32.satfinite
    nvvm_wmma_m8n32k16_mma_row_row_f32_f16,    // llvm.nvvm.wmma.m8n32k16.mma.row.row.f32.f16
    nvvm_wmma_m8n32k16_mma_row_row_f32_f16_satfinite,  // llvm.nvvm.wmma.m8n32k16.mma.row.row.f32.f16.satfinite
    nvvm_wmma_m8n32k16_mma_row_row_f32_f32,    // llvm.nvvm.wmma.m8n32k16.mma.row.row.f32.f32
    nvvm_wmma_m8n32k16_mma_row_row_f32_f32_satfinite,  // llvm.nvvm.wmma.m8n32k16.mma.row.row.f32.f32.satfinite
    nvvm_wmma_m8n32k16_mma_row_row_s8,         // llvm.nvvm.wmma.m8n32k16.mma.row.row.s8
    nvvm_wmma_m8n32k16_mma_row_row_s8_satfinite,  // llvm.nvvm.wmma.m8n32k16.mma.row.row.s8.satfinite
    nvvm_wmma_m8n32k16_mma_row_row_u8,         // llvm.nvvm.wmma.m8n32k16.mma.row.row.u8
    nvvm_wmma_m8n32k16_mma_row_row_u8_satfinite,  // llvm.nvvm.wmma.m8n32k16.mma.row.row.u8.satfinite
    nvvm_wmma_m8n32k16_store_d_f16_col,        // llvm.nvvm.wmma.m8n32k16.store.d.col.f16
    nvvm_wmma_m8n32k16_store_d_f32_col,        // llvm.nvvm.wmma.m8n32k16.store.d.col.f32
    nvvm_wmma_m8n32k16_store_d_s32_col,        // llvm.nvvm.wmma.m8n32k16.store.d.col.s32
    nvvm_wmma_m8n32k16_store_d_f16_col_stride,  // llvm.nvvm.wmma.m8n32k16.store.d.col.stride.f16
    nvvm_wmma_m8n32k16_store_d_f32_col_stride,  // llvm.nvvm.wmma.m8n32k16.store.d.col.stride.f32
    nvvm_wmma_m8n32k16_store_d_s32_col_stride,  // llvm.nvvm.wmma.m8n32k16.store.d.col.stride.s32
    nvvm_wmma_m8n32k16_store_d_f16_row,        // llvm.nvvm.wmma.m8n32k16.store.d.row.f16
    nvvm_wmma_m8n32k16_store_d_f32_row,        // llvm.nvvm.wmma.m8n32k16.store.d.row.f32
    nvvm_wmma_m8n32k16_store_d_s32_row,        // llvm.nvvm.wmma.m8n32k16.store.d.row.s32
    nvvm_wmma_m8n32k16_store_d_f16_row_stride,  // llvm.nvvm.wmma.m8n32k16.store.d.row.stride.f16
    nvvm_wmma_m8n32k16_store_d_f32_row_stride,  // llvm.nvvm.wmma.m8n32k16.store.d.row.stride.f32
    nvvm_wmma_m8n32k16_store_d_s32_row_stride,  // llvm.nvvm.wmma.m8n32k16.store.d.row.stride.s32
    nvvm_wmma_m8n8k128_load_a_b1_row,          // llvm.nvvm.wmma.m8n8k128.load.a.row.b1
    nvvm_wmma_m8n8k128_load_a_b1_row_stride,   // llvm.nvvm.wmma.m8n8k128.load.a.row.stride.b1
    nvvm_wmma_m8n8k128_load_b_b1_col,          // llvm.nvvm.wmma.m8n8k128.load.b.col.b1
    nvvm_wmma_m8n8k128_load_b_b1_col_stride,   // llvm.nvvm.wmma.m8n8k128.load.b.col.stride.b1
    nvvm_wmma_m8n8k128_load_c_s32_col,         // llvm.nvvm.wmma.m8n8k128.load.c.col.s32
    nvvm_wmma_m8n8k128_load_c_s32_col_stride,  // llvm.nvvm.wmma.m8n8k128.load.c.col.stride.s32
    nvvm_wmma_m8n8k128_load_c_s32_row,         // llvm.nvvm.wmma.m8n8k128.load.c.row.s32
    nvvm_wmma_m8n8k128_load_c_s32_row_stride,  // llvm.nvvm.wmma.m8n8k128.load.c.row.stride.s32
    nvvm_wmma_m8n8k128_mma_and_popc_row_col_b1,  // llvm.nvvm.wmma.m8n8k128.mma.and.popc.row.col.b1
    nvvm_wmma_m8n8k128_mma_xor_popc_row_col_b1,  // llvm.nvvm.wmma.m8n8k128.mma.xor.popc.row.col.b1
    nvvm_wmma_m8n8k128_store_d_s32_col,        // llvm.nvvm.wmma.m8n8k128.store.d.col.s32
    nvvm_wmma_m8n8k128_store_d_s32_col_stride,  // llvm.nvvm.wmma.m8n8k128.store.d.col.stride.s32
    nvvm_wmma_m8n8k128_store_d_s32_row,        // llvm.nvvm.wmma.m8n8k128.store.d.row.s32
    nvvm_wmma_m8n8k128_store_d_s32_row_stride,  // llvm.nvvm.wmma.m8n8k128.store.d.row.stride.s32
    nvvm_wmma_m8n8k32_load_a_s4_row,           // llvm.nvvm.wmma.m8n8k32.load.a.row.s4
    nvvm_wmma_m8n8k32_load_a_s4_row_stride,    // llvm.nvvm.wmma.m8n8k32.load.a.row.stride.s4
    nvvm_wmma_m8n8k32_load_a_u4_row_stride,    // llvm.nvvm.wmma.m8n8k32.load.a.row.stride.u4
    nvvm_wmma_m8n8k32_load_a_u4_row,           // llvm.nvvm.wmma.m8n8k32.load.a.row.u4
    nvvm_wmma_m8n8k32_load_b_s4_col,           // llvm.nvvm.wmma.m8n8k32.load.b.col.s4
    nvvm_wmma_m8n8k32_load_b_s4_col_stride,    // llvm.nvvm.wmma.m8n8k32.load.b.col.stride.s4
    nvvm_wmma_m8n8k32_load_b_u4_col_stride,    // llvm.nvvm.wmma.m8n8k32.load.b.col.stride.u4
    nvvm_wmma_m8n8k32_load_b_u4_col,           // llvm.nvvm.wmma.m8n8k32.load.b.col.u4
    nvvm_wmma_m8n8k32_load_c_s32_col,          // llvm.nvvm.wmma.m8n8k32.load.c.col.s32
    nvvm_wmma_m8n8k32_load_c_s32_col_stride,   // llvm.nvvm.wmma.m8n8k32.load.c.col.stride.s32
    nvvm_wmma_m8n8k32_load_c_s32_row,          // llvm.nvvm.wmma.m8n8k32.load.c.row.s32
    nvvm_wmma_m8n8k32_load_c_s32_row_stride,   // llvm.nvvm.wmma.m8n8k32.load.c.row.stride.s32
    nvvm_wmma_m8n8k32_mma_row_col_s4,          // llvm.nvvm.wmma.m8n8k32.mma.row.col.s4
    nvvm_wmma_m8n8k32_mma_row_col_s4_satfinite,  // llvm.nvvm.wmma.m8n8k32.mma.row.col.s4.satfinite
    nvvm_wmma_m8n8k32_mma_row_col_u4,          // llvm.nvvm.wmma.m8n8k32.mma.row.col.u4
    nvvm_wmma_m8n8k32_mma_row_col_u4_satfinite,  // llvm.nvvm.wmma.m8n8k32.mma.row.col.u4.satfinite
    nvvm_wmma_m8n8k32_store_d_s32_col,         // llvm.nvvm.wmma.m8n8k32.store.d.col.s32
    nvvm_wmma_m8n8k32_store_d_s32_col_stride,  // llvm.nvvm.wmma.m8n8k32.store.d.col.stride.s32
    nvvm_wmma_m8n8k32_store_d_s32_row,         // llvm.nvvm.wmma.m8n8k32.store.d.row.s32
    nvvm_wmma_m8n8k32_store_d_s32_row_stride,  // llvm.nvvm.wmma.m8n8k32.store.d.row.stride.s32
    nvvm_wmma_m8n8k4_load_a_f64_col,           // llvm.nvvm.wmma.m8n8k4.load.a.col.f64
    nvvm_wmma_m8n8k4_load_a_f64_col_stride,    // llvm.nvvm.wmma.m8n8k4.load.a.col.stride.f64
    nvvm_wmma_m8n8k4_load_a_f64_row,           // llvm.nvvm.wmma.m8n8k4.load.a.row.f64
    nvvm_wmma_m8n8k4_load_a_f64_row_stride,    // llvm.nvvm.wmma.m8n8k4.load.a.row.stride.f64
    nvvm_wmma_m8n8k4_load_b_f64_col,           // llvm.nvvm.wmma.m8n8k4.load.b.col.f64
    nvvm_wmma_m8n8k4_load_b_f64_col_stride,    // llvm.nvvm.wmma.m8n8k4.load.b.col.stride.f64
    nvvm_wmma_m8n8k4_load_b_f64_row,           // llvm.nvvm.wmma.m8n8k4.load.b.row.f64
    nvvm_wmma_m8n8k4_load_b_f64_row_stride,    // llvm.nvvm.wmma.m8n8k4.load.b.row.stride.f64
    nvvm_wmma_m8n8k4_load_c_f64_col,           // llvm.nvvm.wmma.m8n8k4.load.c.col.f64
    nvvm_wmma_m8n8k4_load_c_f64_col_stride,    // llvm.nvvm.wmma.m8n8k4.load.c.col.stride.f64
    nvvm_wmma_m8n8k4_load_c_f64_row,           // llvm.nvvm.wmma.m8n8k4.load.c.row.f64
    nvvm_wmma_m8n8k4_load_c_f64_row_stride,    // llvm.nvvm.wmma.m8n8k4.load.c.row.stride.f64
    nvvm_wmma_m8n8k4_mma_col_col_f64,          // llvm.nvvm.wmma.m8n8k4.mma.col.col.f64
    nvvm_wmma_m8n8k4_mma_col_col_rm_f64,       // llvm.nvvm.wmma.m8n8k4.mma.col.col.rm.f64
    nvvm_wmma_m8n8k4_mma_col_col_rn_f64,       // llvm.nvvm.wmma.m8n8k4.mma.col.col.rn.f64
    nvvm_wmma_m8n8k4_mma_col_col_rp_f64,       // llvm.nvvm.wmma.m8n8k4.mma.col.col.rp.f64
    nvvm_wmma_m8n8k4_mma_col_col_rz_f64,       // llvm.nvvm.wmma.m8n8k4.mma.col.col.rz.f64
    nvvm_wmma_m8n8k4_mma_col_row_f64,          // llvm.nvvm.wmma.m8n8k4.mma.col.row.f64
    nvvm_wmma_m8n8k4_mma_col_row_rm_f64,       // llvm.nvvm.wmma.m8n8k4.mma.col.row.rm.f64
    nvvm_wmma_m8n8k4_mma_col_row_rn_f64,       // llvm.nvvm.wmma.m8n8k4.mma.col.row.rn.f64
    nvvm_wmma_m8n8k4_mma_col_row_rp_f64,       // llvm.nvvm.wmma.m8n8k4.mma.col.row.rp.f64
    nvvm_wmma_m8n8k4_mma_col_row_rz_f64,       // llvm.nvvm.wmma.m8n8k4.mma.col.row.rz.f64
    nvvm_wmma_m8n8k4_mma_row_col_f64,          // llvm.nvvm.wmma.m8n8k4.mma.row.col.f64
    nvvm_wmma_m8n8k4_mma_row_col_rm_f64,       // llvm.nvvm.wmma.m8n8k4.mma.row.col.rm.f64
    nvvm_wmma_m8n8k4_mma_row_col_rn_f64,       // llvm.nvvm.wmma.m8n8k4.mma.row.col.rn.f64
    nvvm_wmma_m8n8k4_mma_row_col_rp_f64,       // llvm.nvvm.wmma.m8n8k4.mma.row.col.rp.f64
    nvvm_wmma_m8n8k4_mma_row_col_rz_f64,       // llvm.nvvm.wmma.m8n8k4.mma.row.col.rz.f64
    nvvm_wmma_m8n8k4_mma_row_row_f64,          // llvm.nvvm.wmma.m8n8k4.mma.row.row.f64
    nvvm_wmma_m8n8k4_mma_row_row_rm_f64,       // llvm.nvvm.wmma.m8n8k4.mma.row.row.rm.f64
    nvvm_wmma_m8n8k4_mma_row_row_rn_f64,       // llvm.nvvm.wmma.m8n8k4.mma.row.row.rn.f64
    nvvm_wmma_m8n8k4_mma_row_row_rp_f64,       // llvm.nvvm.wmma.m8n8k4.mma.row.row.rp.f64
    nvvm_wmma_m8n8k4_mma_row_row_rz_f64,       // llvm.nvvm.wmma.m8n8k4.mma.row.row.rz.f64
    nvvm_wmma_m8n8k4_store_d_f64_col,          // llvm.nvvm.wmma.m8n8k4.store.d.col.f64
    nvvm_wmma_m8n8k4_store_d_f64_col_stride,   // llvm.nvvm.wmma.m8n8k4.store.d.col.stride.f64
    nvvm_wmma_m8n8k4_store_d_f64_row,          // llvm.nvvm.wmma.m8n8k4.store.d.row.f64
    nvvm_wmma_m8n8k4_store_d_f64_row_stride,   // llvm.nvvm.wmma.m8n8k4.store.d.row.stride.f64
}; // enum
} // namespace Intrinsic
} // namespace llvm

#endif
PKjwFZ�,44IR/Instructions.hnu�[���//===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file exposes the class definitions of all of the subclasses of the
// Instruction class.  This is meant to be an easy way to get access to all
// instruction subclasses.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_INSTRUCTIONS_H
#define LLVM_IR_INSTRUCTIONS_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Bitfields.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Twine.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/IR/CFG.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/OperandTraits.h"
#include "llvm/IR/Use.h"
#include "llvm/IR/User.h"
#include "llvm/Support/AtomicOrdering.h"
#include "llvm/Support/ErrorHandling.h"
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <optional>

namespace llvm {

class APFloat;
class APInt;
class BasicBlock;
class ConstantInt;
class DataLayout;
class StringRef;
class Type;
class Value;

//===----------------------------------------------------------------------===//
//                                AllocaInst Class
//===----------------------------------------------------------------------===//

/// an instruction to allocate memory on the stack
class AllocaInst : public UnaryInstruction {
  Type *AllocatedType;

  using AlignmentField = AlignmentBitfieldElementT<0>;
  using UsedWithInAllocaField = BoolBitfieldElementT<AlignmentField::NextBit>;
  using SwiftErrorField = BoolBitfieldElementT<UsedWithInAllocaField::NextBit>;
  static_assert(Bitfield::areContiguous<AlignmentField, UsedWithInAllocaField,
                                        SwiftErrorField>(),
                "Bitfields must be contiguous");

protected:
  // Note: Instruction needs to be a friend here to call cloneImpl.
  friend class Instruction;

  AllocaInst *cloneImpl() const;

public:
  explicit AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
                      const Twine &Name, Instruction *InsertBefore);
  AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
             const Twine &Name, BasicBlock *InsertAtEnd);

  AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
             Instruction *InsertBefore);
  AllocaInst(Type *Ty, unsigned AddrSpace,
             const Twine &Name, BasicBlock *InsertAtEnd);

  AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
             const Twine &Name = "", Instruction *InsertBefore = nullptr);
  AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
             const Twine &Name, BasicBlock *InsertAtEnd);

  /// Return true if there is an allocation size parameter to the allocation
  /// instruction that is not 1.
  bool isArrayAllocation() const;

  /// Get the number of elements allocated. For a simple allocation of a single
  /// element, this will return a constant 1 value.
  const Value *getArraySize() const { return getOperand(0); }
  Value *getArraySize() { return getOperand(0); }

  /// Overload to return most specific pointer type.
  PointerType *getType() const {
    return cast<PointerType>(Instruction::getType());
  }

  /// Return the address space for the allocation.
  unsigned getAddressSpace() const {
    return getType()->getAddressSpace();
  }

  /// Get allocation size in bytes. Returns std::nullopt if size can't be
  /// determined, e.g. in case of a VLA.
  std::optional<TypeSize> getAllocationSize(const DataLayout &DL) const;

  /// Get allocation size in bits. Returns std::nullopt if size can't be
  /// determined, e.g. in case of a VLA.
  std::optional<TypeSize> getAllocationSizeInBits(const DataLayout &DL) const;

  /// Return the type that is being allocated by the instruction.
  Type *getAllocatedType() const { return AllocatedType; }
  /// for use only in special circumstances that need to generically
  /// transform a whole instruction (eg: IR linking and vectorization).
  void setAllocatedType(Type *Ty) { AllocatedType = Ty; }

  /// Return the alignment of the memory that is being allocated by the
  /// instruction.
  Align getAlign() const {
    return Align(1ULL << getSubclassData<AlignmentField>());
  }

  void setAlignment(Align Align) {
    setSubclassData<AlignmentField>(Log2(Align));
  }

  /// Return true if this alloca is in the entry block of the function and is a
  /// constant size. If so, the code generator will fold it into the
  /// prolog/epilog code, so it is basically free.
  bool isStaticAlloca() const;

  /// Return true if this alloca is used as an inalloca argument to a call. Such
  /// allocas are never considered static even if they are in the entry block.
  bool isUsedWithInAlloca() const {
    return getSubclassData<UsedWithInAllocaField>();
  }

  /// Specify whether this alloca is used to represent the arguments to a call.
  void setUsedWithInAlloca(bool V) {
    setSubclassData<UsedWithInAllocaField>(V);
  }

  /// Return true if this alloca is used as a swifterror argument to a call.
  bool isSwiftError() const { return getSubclassData<SwiftErrorField>(); }
  /// Specify whether this alloca is used to represent a swifterror.
  void setSwiftError(bool V) { setSubclassData<SwiftErrorField>(V); }

  // Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Instruction *I) {
    return (I->getOpcode() == Instruction::Alloca);
  }
  static bool classof(const Value *V) {
    return isa<Instruction>(V) && classof(cast<Instruction>(V));
  }

private:
  // Shadow Instruction::setInstructionSubclassData with a private forwarding
  // method so that subclasses cannot accidentally use it.
  template <typename Bitfield>
  void setSubclassData(typename Bitfield::Type Value) {
    Instruction::setSubclassData<Bitfield>(Value);
  }
};

//===----------------------------------------------------------------------===//
//                                LoadInst Class
//===----------------------------------------------------------------------===//

/// An instruction for reading from memory. This uses the SubclassData field in
/// Value to store whether or not the load is volatile.
class LoadInst : public UnaryInstruction {
  using VolatileField = BoolBitfieldElementT<0>;
  using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>;
  using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>;
  static_assert(
      Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
      "Bitfields must be contiguous");

  void AssertOK();

protected:
  // Note: Instruction needs to be a friend here to call cloneImpl.
  friend class Instruction;

  LoadInst *cloneImpl() const;

public:
  LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr,
           Instruction *InsertBefore);
  LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd);
  LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
           Instruction *InsertBefore);
  LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
           BasicBlock *InsertAtEnd);
  LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
           Align Align, Instruction *InsertBefore = nullptr);
  LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
           Align Align, BasicBlock *InsertAtEnd);
  LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
           Align Align, AtomicOrdering Order,
           SyncScope::ID SSID = SyncScope::System,
           Instruction *InsertBefore = nullptr);
  LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
           Align Align, AtomicOrdering Order, SyncScope::ID SSID,
           BasicBlock *InsertAtEnd);

  /// Return true if this is a load from a volatile memory location.
  bool isVolatile() const { return getSubclassData<VolatileField>(); }

  /// Specify whether this is a volatile load or not.
  void setVolatile(bool V) { setSubclassData<VolatileField>(V); }

  /// Return the alignment of the access that is being performed.
  Align getAlign() const {
    return Align(1ULL << (getSubclassData<AlignmentField>()));
  }

  void setAlignment(Align Align) {
    setSubclassData<AlignmentField>(Log2(Align));
  }

  /// Returns the ordering constraint of this load instruction.
  AtomicOrdering getOrdering() const {
    return getSubclassData<OrderingField>();
  }
  /// Sets the ordering constraint of this load instruction.  May not be Release
  /// or AcquireRelease.
  void setOrdering(AtomicOrdering Ordering) {
    setSubclassData<OrderingField>(Ordering);
  }

  /// Returns the synchronization scope ID of this load instruction.
  SyncScope::ID getSyncScopeID() const {
    return SSID;
  }

  /// Sets the synchronization scope ID of this load instruction.
  void setSyncScopeID(SyncScope::ID SSID) {
    this->SSID = SSID;
  }

  /// Sets the ordering constraint and the synchronization scope ID of this load
  /// instruction.
  void setAtomic(AtomicOrdering Ordering,
                 SyncScope::ID SSID = SyncScope::System) {
    setOrdering(Ordering);
    setSyncScopeID(SSID);
  }

  bool isSimple() const { return !isAtomic() && !isVolatile(); }

  bool isUnordered() const {
    return (getOrdering() == AtomicOrdering::NotAtomic ||
            getOrdering() == AtomicOrdering::Unordered) &&
           !isVolatile();
  }

  Value *getPointerOperand() { return getOperand(0); }
  const Value *getPointerOperand() const { return getOperand(0); }
  static unsigned getPointerOperandIndex() { return 0U; }
  Type *getPointerOperandType() const { return getPointerOperand()->getType(); }

  /// Returns the address space of the pointer operand.
  unsigned getPointerAddressSpace() const {
    return getPointerOperandType()->getPointerAddressSpace();
  }

  // Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Instruction *I) {
    return I->getOpcode() == Instruction::Load;
  }
  static bool classof(const Value *V) {
    return isa<Instruction>(V) && classof(cast<Instruction>(V));
  }

private:
  // Shadow Instruction::setInstructionSubclassData with a private forwarding
  // method so that subclasses cannot accidentally use it.
  template <typename Bitfield>
  void setSubclassData(typename Bitfield::Type Value) {
    Instruction::setSubclassData<Bitfield>(Value);
  }

  /// The synchronization scope ID of this load instruction.  Not quite enough
  /// room in SubClassData for everything, so synchronization scope ID gets its
  /// own field.
  SyncScope::ID SSID;
};

//===----------------------------------------------------------------------===//
//                                StoreInst Class
//===----------------------------------------------------------------------===//

/// An instruction for storing to memory.
class StoreInst : public Instruction {
  using VolatileField = BoolBitfieldElementT<0>;
  using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>;
  using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>;
  static_assert(
      Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
      "Bitfields must be contiguous");

  void AssertOK();

protected:
  // Note: Instruction needs to be a friend here to call cloneImpl.
  friend class Instruction;

  StoreInst *cloneImpl() const;

public:
  StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore);
  StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd);
  StoreInst(Value *Val, Value *Ptr, bool isVolatile, Instruction *InsertBefore);
  StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd);
  StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
            Instruction *InsertBefore = nullptr);
  StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
            BasicBlock *InsertAtEnd);
  StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
            AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System,
            Instruction *InsertBefore = nullptr);
  StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
            AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd);

  // allocate space for exactly two operands
  void *operator new(size_t S) { return User::operator new(S, 2); }
  void operator delete(void *Ptr) { User::operator delete(Ptr); }

  /// Return true if this is a store to a volatile memory location.
  bool isVolatile() const { return getSubclassData<VolatileField>(); }

  /// Specify whether this is a volatile store or not.
  void setVolatile(bool V) { setSubclassData<VolatileField>(V); }

  /// Transparently provide more efficient getOperand methods.
  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);

  Align getAlign() const {
    return Align(1ULL << (getSubclassData<AlignmentField>()));
  }

  void setAlignment(Align Align) {
    setSubclassData<AlignmentField>(Log2(Align));
  }

  /// Returns the ordering constraint of this store instruction.
  AtomicOrdering getOrdering() const {
    return getSubclassData<OrderingField>();
  }

  /// Sets the ordering constraint of this store instruction.  May not be
  /// Acquire or AcquireRelease.
  void setOrdering(AtomicOrdering Ordering) {
    setSubclassData<OrderingField>(Ordering);
  }

  /// Returns the synchronization scope ID of this store instruction.
  SyncScope::ID getSyncScopeID() const {
    return SSID;
  }

  /// Sets the synchronization scope ID of this store instruction.
  void setSyncScopeID(SyncScope::ID SSID) {
    this->SSID = SSID;
  }

  /// Sets the ordering constraint and the synchronization scope ID of this
  /// store instruction.
  void setAtomic(AtomicOrdering Ordering,
                 SyncScope::ID SSID = SyncScope::System) {
    setOrdering(Ordering);
    setSyncScopeID(SSID);
  }

  bool isSimple() const { return !isAtomic() && !isVolatile(); }

  bool isUnordered() const {
    return (getOrdering() == AtomicOrdering::NotAtomic ||
            getOrdering() == AtomicOrdering::Unordered) &&
           !isVolatile();
  }

  Value *getValueOperand() { return getOperand(0); }
  const Value *getValueOperand() const { return getOperand(0); }

  Value *getPointerOperand() { return getOperand(1); }
  const Value *getPointerOperand() const { return getOperand(1); }
  static unsigned getPointerOperandIndex() { return 1U; }
  Type *getPointerOperandType() const { return getPointerOperand()->getType(); }

  /// Returns the address space of the pointer operand.
  unsigned getPointerAddressSpace() const {
    return getPointerOperandType()->getPointerAddressSpace();
  }

  // Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Instruction *I) {
    return I->getOpcode() == Instruction::Store;
  }
  static bool classof(const Value *V) {
    return isa<Instruction>(V) && classof(cast<Instruction>(V));
  }

private:
  // Shadow Instruction::setInstructionSubclassData with a private forwarding
  // method so that subclasses cannot accidentally use it.
  template <typename Bitfield>
  void setSubclassData(typename Bitfield::Type Value) {
    Instruction::setSubclassData<Bitfield>(Value);
  }

  /// The synchronization scope ID of this store instruction.  Not quite enough
  /// room in SubClassData for everything, so synchronization scope ID gets its
  /// own field.
  SyncScope::ID SSID;
};

template <>
struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> {
};

DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value)

//===----------------------------------------------------------------------===//
//                                FenceInst Class
//===----------------------------------------------------------------------===//

/// An instruction for ordering other memory operations.
class FenceInst : public Instruction {
  using OrderingField = AtomicOrderingBitfieldElementT<0>;

  void Init(AtomicOrdering Ordering, SyncScope::ID SSID);

protected:
  // Note: Instruction needs to be a friend here to call cloneImpl.
  friend class Instruction;

  FenceInst *cloneImpl() const;

public:
  // Ordering may only be Acquire, Release, AcquireRelease, or
  // SequentiallyConsistent.
  FenceInst(LLVMContext &C, AtomicOrdering Ordering,
            SyncScope::ID SSID = SyncScope::System,
            Instruction *InsertBefore = nullptr);
  FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID,
            BasicBlock *InsertAtEnd);

  // allocate space for exactly zero operands
  void *operator new(size_t S) { return User::operator new(S, 0); }
  void operator delete(void *Ptr) { User::operator delete(Ptr); }

  /// Returns the ordering constraint of this fence instruction.
  AtomicOrdering getOrdering() const {
    return getSubclassData<OrderingField>();
  }

  /// Sets the ordering constraint of this fence instruction.  May only be
  /// Acquire, Release, AcquireRelease, or SequentiallyConsistent.
  void setOrdering(AtomicOrdering Ordering) {
    setSubclassData<OrderingField>(Ordering);
  }

  /// Returns the synchronization scope ID of this fence instruction.
  SyncScope::ID getSyncScopeID() const {
    return SSID;
  }

  /// Sets the synchronization scope ID of this fence instruction.
  void setSyncScopeID(SyncScope::ID SSID) {
    this->SSID = SSID;
  }

  // Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Instruction *I) {
    return I->getOpcode() == Instruction::Fence;
  }
  static bool classof(const Value *V) {
    return isa<Instruction>(V) && classof(cast<Instruction>(V));
  }

private:
  // Shadow Instruction::setInstructionSubclassData with a private forwarding
  // method so that subclasses cannot accidentally use it.
  template <typename Bitfield>
  void setSubclassData(typename Bitfield::Type Value) {
    Instruction::setSubclassData<Bitfield>(Value);
  }

  /// The synchronization scope ID of this fence instruction.  Not quite enough
  /// room in SubClassData for everything, so synchronization scope ID gets its
  /// own field.
  SyncScope::ID SSID;
};

//===----------------------------------------------------------------------===//
//                                AtomicCmpXchgInst Class
//===----------------------------------------------------------------------===//

/// An instruction that atomically checks whether a
/// specified value is in a memory location, and, if it is, stores a new value
/// there. The value returned by this instruction is a pair containing the
/// original value as first element, and an i1 indicating success (true) or
/// failure (false) as second element.
///
class AtomicCmpXchgInst : public Instruction {
  void Init(Value *Ptr, Value *Cmp, Value *NewVal, Align Align,
            AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering,
            SyncScope::ID SSID);

  template <unsigned Offset>
  using AtomicOrderingBitfieldElement =
      typename Bitfield::Element<AtomicOrdering, Offset, 3,
                                 AtomicOrdering::LAST>;

protected:
  // Note: Instruction needs to be a friend here to call cloneImpl.
  friend class Instruction;

  AtomicCmpXchgInst *cloneImpl() const;

public:
  AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
                    AtomicOrdering SuccessOrdering,
                    AtomicOrdering FailureOrdering, SyncScope::ID SSID,
                    Instruction *InsertBefore = nullptr);
  AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
                    AtomicOrdering SuccessOrdering,
                    AtomicOrdering FailureOrdering, SyncScope::ID SSID,
                    BasicBlock *InsertAtEnd);

  // allocate space for exactly three operands
  void *operator new(size_t S) { return User::operator new(S, 3); }
  void operator delete(void *Ptr) { User::operator delete(Ptr); }

  using VolatileField = BoolBitfieldElementT<0>;
  using WeakField = BoolBitfieldElementT<VolatileField::NextBit>;
  using SuccessOrderingField =
      AtomicOrderingBitfieldElementT<WeakField::NextBit>;
  using FailureOrderingField =
      AtomicOrderingBitfieldElementT<SuccessOrderingField::NextBit>;
  using AlignmentField =
      AlignmentBitfieldElementT<FailureOrderingField::NextBit>;
  static_assert(
      Bitfield::areContiguous<VolatileField, WeakField, SuccessOrderingField,
                              FailureOrderingField, AlignmentField>(),
      "Bitfields must be contiguous");

  /// Return the alignment of the memory that is being allocated by the
  /// instruction.
  Align getAlign() const {
    return Align(1ULL << getSubclassData<AlignmentField>());
  }

  void setAlignment(Align Align) {
    setSubclassData<AlignmentField>(Log2(Align));
  }

  /// Return true if this is a cmpxchg from a volatile memory
  /// location.
  ///
  bool isVolatile() const { return getSubclassData<VolatileField>(); }

  /// Specify whether this is a volatile cmpxchg.
  ///
  void setVolatile(bool V) { setSubclassData<VolatileField>(V); }

  /// Return true if this cmpxchg may spuriously fail.
  bool isWeak() const { return getSubclassData<WeakField>(); }

  void setWeak(bool IsWeak) { setSubclassData<WeakField>(IsWeak); }

  /// Transparently provide more efficient getOperand methods.
  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);

  static bool isValidSuccessOrdering(AtomicOrdering Ordering) {
    return Ordering != AtomicOrdering::NotAtomic &&
           Ordering != AtomicOrdering::Unordered;
  }

  static bool isValidFailureOrdering(AtomicOrdering Ordering) {
    return Ordering != AtomicOrdering::NotAtomic &&
           Ordering != AtomicOrdering::Unordered &&
           Ordering != AtomicOrdering::AcquireRelease &&
           Ordering != AtomicOrdering::Release;
  }

  /// Returns the success ordering constraint of this cmpxchg instruction.
  AtomicOrdering getSuccessOrdering() const {
    return getSubclassData<SuccessOrderingField>();
  }

  /// Sets the success ordering constraint of this cmpxchg instruction.
  void setSuccessOrdering(AtomicOrdering Ordering) {
    assert(isValidSuccessOrdering(Ordering) &&
           "invalid CmpXchg success ordering");
    setSubclassData<SuccessOrderingField>(Ordering);
  }

  /// Returns the failure ordering constraint of this cmpxchg instruction.
  AtomicOrdering getFailureOrdering() const {
    return getSubclassData<FailureOrderingField>();
  }

  /// Sets the failure ordering constraint of this cmpxchg instruction.
  void setFailureOrdering(AtomicOrdering Ordering) {
    assert(isValidFailureOrdering(Ordering) &&
           "invalid CmpXchg failure ordering");
    setSubclassData<FailureOrderingField>(Ordering);
  }

  /// Returns a single ordering which is at least as strong as both the
  /// success and failure orderings for this cmpxchg.
  AtomicOrdering getMergedOrdering() const {
    if (getFailureOrdering() == AtomicOrdering::SequentiallyConsistent)
      return AtomicOrdering::SequentiallyConsistent;
    if (getFailureOrdering() == AtomicOrdering::Acquire) {
      if (getSuccessOrdering() == AtomicOrdering::Monotonic)
        return AtomicOrdering::Acquire;
      if (getSuccessOrdering() == AtomicOrdering::Release)
        return AtomicOrdering::AcquireRelease;
    }
    return getSuccessOrdering();
  }

  /// Returns the synchronization scope ID of this cmpxchg instruction.
  SyncScope::ID getSyncScopeID() const {
    return SSID;
  }

  /// Sets the synchronization scope ID of this cmpxchg instruction.
  void setSyncScopeID(SyncScope::ID SSID) {
    this->SSID = SSID;
  }

  Value *getPointerOperand() { return getOperand(0); }
  const Value *getPointerOperand() const { return getOperand(0); }
  static unsigned getPointerOperandIndex() { return 0U; }

  Value *getCompareOperand() { return getOperand(1); }
  const Value *getCompareOperand() const { return getOperand(1); }

  Value *getNewValOperand() { return getOperand(2); }
  const Value *getNewValOperand() const { return getOperand(2); }

  /// Returns the address space of the pointer operand.
  unsigned getPointerAddressSpace() const {
    return getPointerOperand()->getType()->getPointerAddressSpace();
  }

  /// Returns the strongest permitted ordering on failure, given the
  /// desired ordering on success.
  ///
  /// If the comparison in a cmpxchg operation fails, there is no atomic store
  /// so release semantics cannot be provided. So this function drops explicit
  /// Release requests from the AtomicOrdering. A SequentiallyConsistent
  /// operation would remain SequentiallyConsistent.
  static AtomicOrdering
  getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) {
    switch (SuccessOrdering) {
    default:
      llvm_unreachable("invalid cmpxchg success ordering");
    case AtomicOrdering::Release:
    case AtomicOrdering::Monotonic:
      return AtomicOrdering::Monotonic;
    case AtomicOrdering::AcquireRelease:
    case AtomicOrdering::Acquire:
      return AtomicOrdering::Acquire;
    case AtomicOrdering::SequentiallyConsistent:
      return AtomicOrdering::SequentiallyConsistent;
    }
  }

  // Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Instruction *I) {
    return I->getOpcode() == Instruction::AtomicCmpXchg;
  }
  static bool classof(const Value *V) {
    return isa<Instruction>(V) && classof(cast<Instruction>(V));
  }

private:
  // Shadow Instruction::setInstructionSubclassData with a private forwarding
  // method so that subclasses cannot accidentally use it.
  template <typename Bitfield>
  void setSubclassData(typename Bitfield::Type Value) {
    Instruction::setSubclassData<Bitfield>(Value);
  }

  /// The synchronization scope ID of this cmpxchg instruction.  Not quite
  /// enough room in SubClassData for everything, so synchronization scope ID
  /// gets its own field.
  SyncScope::ID SSID;
};

template <>
struct OperandTraits<AtomicCmpXchgInst> :
    public FixedNumOperandTraits<AtomicCmpXchgInst, 3> {
};

DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicCmpXchgInst, Value)

//===----------------------------------------------------------------------===//
//                                AtomicRMWInst Class
//===----------------------------------------------------------------------===//

/// an instruction that atomically reads a memory location,
/// combines it with another value, and then stores the result back.  Returns
/// the old value.
///
class AtomicRMWInst : public Instruction {
protected:
  // Note: Instruction needs to be a friend here to call cloneImpl.
  friend class Instruction;

  AtomicRMWInst *cloneImpl() const;

public:
  /// This enumeration lists the possible modifications atomicrmw can make.  In
  /// the descriptions, 'p' is the pointer to the instruction's memory location,
  /// 'old' is the initial value of *p, and 'v' is the other value passed to the
  /// instruction.  These instructions always return 'old'.
  enum BinOp : unsigned {
    /// *p = v
    Xchg,
    /// *p = old + v
    Add,
    /// *p = old - v
    Sub,
    /// *p = old & v
    And,
    /// *p = ~(old & v)
    Nand,
    /// *p = old | v
    Or,
    /// *p = old ^ v
    Xor,
    /// *p = old >signed v ? old : v
    Max,
    /// *p = old <signed v ? old : v
    Min,
    /// *p = old >unsigned v ? old : v
    UMax,
    /// *p = old <unsigned v ? old : v
    UMin,

    /// *p = old + v
    FAdd,

    /// *p = old - v
    FSub,

    /// *p = maxnum(old, v)
    /// \p maxnum matches the behavior of \p llvm.maxnum.*.
    FMax,

    /// *p = minnum(old, v)
    /// \p minnum matches the behavior of \p llvm.minnum.*.
    FMin,

    /// Increment one up to a maximum value.
    /// *p = (old u>= v) ? 0 : (old + 1)
    UIncWrap,

    /// Decrement one until a minimum value or zero.
    /// *p = ((old == 0) || (old u> v)) ? v : (old - 1)
    UDecWrap,

    FIRST_BINOP = Xchg,
    LAST_BINOP = UDecWrap,
    BAD_BINOP
  };

private:
  template <unsigned Offset>
  using AtomicOrderingBitfieldElement =
      typename Bitfield::Element<AtomicOrdering, Offset, 3,
                                 AtomicOrdering::LAST>;

  template <unsigned Offset>
  using BinOpBitfieldElement =
      typename Bitfield::Element<BinOp, Offset, 5, BinOp::LAST_BINOP>;

public:
  AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
                AtomicOrdering Ordering, SyncScope::ID SSID,
                Instruction *InsertBefore = nullptr);
  AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
                AtomicOrdering Ordering, SyncScope::ID SSID,
                BasicBlock *InsertAtEnd);

  // allocate space for exactly two operands
  void *operator new(size_t S) { return User::operator new(S, 2); }
  void operator delete(void *Ptr) { User::operator delete(Ptr); }

  using VolatileField = BoolBitfieldElementT<0>;
  using AtomicOrderingField =
      AtomicOrderingBitfieldElementT<VolatileField::NextBit>;
  using OperationField = BinOpBitfieldElement<AtomicOrderingField::NextBit>;
  using AlignmentField = AlignmentBitfieldElementT<OperationField::NextBit>;
  static_assert(Bitfield::areContiguous<VolatileField, AtomicOrderingField,
                                        OperationField, AlignmentField>(),
                "Bitfields must be contiguous");

  BinOp getOperation() const { return getSubclassData<OperationField>(); }

  static StringRef getOperationName(BinOp Op);

  static bool isFPOperation(BinOp Op) {
    switch (Op) {
    case AtomicRMWInst::FAdd:
    case AtomicRMWInst::FSub:
    case AtomicRMWInst::FMax:
    case AtomicRMWInst::FMin:
      return true;
    default:
      return false;
    }
  }

  void setOperation(BinOp Operation) {
    setSubclassData<OperationField>(Operation);
  }

  /// Return the alignment of the memory that is being allocated by the
  /// instruction.
  Align getAlign() const {
    return Align(1ULL << getSubclassData<AlignmentField>());
  }

  void setAlignment(Align Align) {
    setSubclassData<AlignmentField>(Log2(Align));
  }

  /// Return true if this is a RMW on a volatile memory location.
  ///
  bool isVolatile() const { return getSubclassData<VolatileField>(); }

  /// Specify whether this is a volatile RMW or not.
  ///
  void setVolatile(bool V) { setSubclassData<VolatileField>(V); }

  /// Transparently provide more efficient getOperand methods.
  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);

  /// Returns the ordering constraint of this rmw instruction.
  AtomicOrdering getOrdering() const {
    return getSubclassData<AtomicOrderingField>();
  }

  /// Sets the ordering constraint of this rmw instruction.
  void setOrdering(AtomicOrdering Ordering) {
    assert(Ordering != AtomicOrdering::NotAtomic &&
           "atomicrmw instructions can only be atomic.");
    assert(Ordering != AtomicOrdering::Unordered &&
           "atomicrmw instructions cannot be unordered.");
    setSubclassData<AtomicOrderingField>(Ordering);
  }

  /// Returns the synchronization scope ID of this rmw instruction.
  SyncScope::ID getSyncScopeID() const {
    return SSID;
  }

  /// Sets the synchronization scope ID of this rmw instruction.
  void setSyncScopeID(SyncScope::ID SSID) {
    this->SSID = SSID;
  }

  Value *getPointerOperand() { return getOperand(0); }
  const Value *getPointerOperand() const { return getOperand(0); }
  static unsigned getPointerOperandIndex() { return 0U; }

  Value *getValOperand() { return getOperand(1); }
  const Value *getValOperand() const { return getOperand(1); }

  /// Returns the address space of the pointer operand.
  unsigned getPointerAddressSpace() const {
    return getPointerOperand()->getType()->getPointerAddressSpace();
  }

  bool isFloatingPointOperation() const {
    return isFPOperation(getOperation());
  }

  // Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Instruction *I) {
    return I->getOpcode() == Instruction::AtomicRMW;
  }
  static bool classof(const Value *V) {
    return isa<Instruction>(V) && classof(cast<Instruction>(V));
  }

private:
  void Init(BinOp Operation, Value *Ptr, Value *Val, Align Align,
            AtomicOrdering Ordering, SyncScope::ID SSID);

  // Shadow Instruction::setInstructionSubclassData with a private forwarding
  // method so that subclasses cannot accidentally use it.
  template <typename Bitfield>
  void setSubclassData(typename Bitfield::Type Value) {
    Instruction::setSubclassData<Bitfield>(Value);
  }

  /// The synchronization scope ID of this rmw instruction.  Not quite enough
  /// room in SubClassData for everything, so synchronization scope ID gets its
  /// own field.
  SyncScope::ID SSID;
};

template <>
struct OperandTraits<AtomicRMWInst>
    : public FixedNumOperandTraits<AtomicRMWInst,2> {
};

DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicRMWInst, Value)

//===----------------------------------------------------------------------===//
//                             GetElementPtrInst Class
//===----------------------------------------------------------------------===//

// checkGEPType - Simple wrapper function to give a better assertion failure
// message on bad indexes for a gep instruction.
//
inline Type *checkGEPType(Type *Ty) {
  assert(Ty && "Invalid GetElementPtrInst indices for type!");
  return Ty;
}

/// an instruction for type-safe pointer arithmetic to
/// access elements of arrays and structs
///
class GetElementPtrInst : public Instruction {
  Type *SourceElementType;
  Type *ResultElementType;

  GetElementPtrInst(const GetElementPtrInst &GEPI);

  /// Constructors - Create a getelementptr instruction with a base pointer an
  /// list of indices. The first ctor can optionally insert before an existing
  /// instruction, the second appends the new instruction to the specified
  /// BasicBlock.
  inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
                           ArrayRef<Value *> IdxList, unsigned Values,
                           const Twine &NameStr, Instruction *InsertBefore);
  inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
                           ArrayRef<Value *> IdxList, unsigned Values,
                           const Twine &NameStr, BasicBlock *InsertAtEnd);

  void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr);

protected:
  // Note: Instruction needs to be a friend here to call cloneImpl.
  friend class Instruction;

  GetElementPtrInst *cloneImpl() const;

public:
  static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
                                   ArrayRef<Value *> IdxList,
                                   const Twine &NameStr = "",
                                   Instruction *InsertBefore = nullptr) {
    unsigned Values = 1 + unsigned(IdxList.size());
    assert(PointeeType && "Must specify element type");
    return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
                                          NameStr, InsertBefore);
  }

  static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
                                   ArrayRef<Value *> IdxList,
                                   const Twine &NameStr,
                                   BasicBlock *InsertAtEnd) {
    unsigned Values = 1 + unsigned(IdxList.size());
    assert(PointeeType && "Must specify element type");
    return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
                                          NameStr, InsertAtEnd);
  }

  /// Create an "inbounds" getelementptr. See the documentation for the
  /// "inbounds" flag in LangRef.html for details.
  static GetElementPtrInst *
  CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList,
                 const Twine &NameStr = "",
                 Instruction *InsertBefore = nullptr) {
    GetElementPtrInst *GEP =
        Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore);
    GEP->setIsInBounds(true);
    return GEP;
  }

  static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr,
                                           ArrayRef<Value *> IdxList,
                                           const Twine &NameStr,
                                           BasicBlock *InsertAtEnd) {
    GetElementPtrInst *GEP =
        Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd);
    GEP->setIsInBounds(true);
    return GEP;
  }

  /// Transparently provide more efficient getOperand methods.
  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);

  Type *getSourceElementType() const { return SourceElementType; }

  void setSourceElementType(Type *Ty) { SourceElementType = Ty; }
  void setResultElementType(Type *Ty) { ResultElementType = Ty; }

  Type *getResultElementType() const {
    return ResultElementType;
  }

  /// Returns the address space of this instruction's pointer type.
  unsigned getAddressSpace() const {
    // Note that this is always the same as the pointer operand's address space
    // and that is cheaper to compute, so cheat here.
    return getPointerAddressSpace();
  }

  /// Returns the result type of a getelementptr with the given source
  /// element type and indexes.
  ///
  /// Null is returned if the indices are invalid for the specified
  /// source element type.
  static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList);
  static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList);
  static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList);

  /// Return the type of the element at the given index of an indexable
  /// type.  This is equivalent to "getIndexedType(Agg, {Zero, Idx})".
  ///
  /// Returns null if the type can't be indexed, or the given index is not
  /// legal for the given type.
  static Type *getTypeAtIndex(Type *Ty, Value *Idx);
  static Type *getTypeAtIndex(Type *Ty, uint64_t Idx);

  inline op_iterator       idx_begin()       { return op_begin()+1; }
  inline const_op_iterator idx_begin() const { return op_begin()+1; }
  inline op_iterator       idx_end()         { return op_end(); }
  inline const_op_iterator idx_end()   const { return op_end(); }

  inline iterator_range<op_iterator> indices() {
    return make_range(idx_begin(), idx_end());
  }

  inline iterator_range<const_op_iterator> indices() const {
    return make_range(idx_begin(), idx_end());
  }

  Value *getPointerOperand() {
    return getOperand(0);
  }
  const Value *getPointerOperand() const {
    return getOperand(0);
  }
  static unsigned getPointerOperandIndex() {
    return 0U;    // get index for modifying correct operand.
  }

  /// Method to return the pointer operand as a
  /// PointerType.
  Type *getPointerOperandType() const {
    return getPointerOperand()->getType();
  }

  /// Returns the address space of the pointer operand.
  unsigned getPointerAddressSpace() const {
    return getPointerOperandType()->getPointerAddressSpace();
  }

  /// Returns the pointer type returned by the GEP
  /// instruction, which may be a vector of pointers.
  static Type *getGEPReturnType(Value *Ptr, ArrayRef<Value *> IdxList) {
    // Vector GEP
    Type *Ty = Ptr->getType();
    if (Ty->isVectorTy())
      return Ty;

    for (Value *Index : IdxList)
      if (auto *IndexVTy = dyn_cast<VectorType>(Index->getType())) {
        ElementCount EltCount = IndexVTy->getElementCount();
        return VectorType::get(Ty, EltCount);
      }
    // Scalar GEP
    return Ty;
  }

  unsigned getNumIndices() const {  // Note: always non-negative
    return getNumOperands() - 1;
  }

  bool hasIndices() const {
    return getNumOperands() > 1;
  }

  /// Return true if all of the indices of this GEP are
  /// zeros.  If so, the result pointer and the first operand have the same
  /// value, just potentially different types.
  bool hasAllZeroIndices() const;

  /// Return true if all of the indices of this GEP are
  /// constant integers.  If so, the result pointer and the first operand have
  /// a constant offset between them.
  bool hasAllConstantIndices() const;

  /// Set or clear the inbounds flag on this GEP instruction.
  /// See LangRef.html for the meaning of inbounds on a getelementptr.
  void setIsInBounds(bool b = true);

  /// Determine whether the GEP has the inbounds flag.
  bool isInBounds() const;

  /// Accumulate the constant address offset of this GEP if possible.
  ///
  /// This routine accepts an APInt into which it will accumulate the constant
  /// offset of this GEP if the GEP is in fact constant. If the GEP is not
  /// all-constant, it returns false and the value of the offset APInt is
  /// undefined (it is *not* preserved!). The APInt passed into this routine
  /// must be at least as wide as the IntPtr type for the address space of
  /// the base GEP pointer.
  bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const;
  bool collectOffset(const DataLayout &DL, unsigned BitWidth,
                     MapVector<Value *, APInt> &VariableOffsets,
                     APInt &ConstantOffset) const;
  // Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Instruction *I) {
    return (I->getOpcode() == Instruction::GetElementPtr);
  }
  static bool classof(const Value *V) {
    return isa<Instruction>(V) && classof(cast<Instruction>(V));
  }
};

template <>
struct OperandTraits<GetElementPtrInst> :
  public VariadicOperandTraits<GetElementPtrInst, 1> {
};

GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
                                     ArrayRef<Value *> IdxList, unsigned Values,
                                     const Twine &NameStr,
                                     Instruction *InsertBefore)
    : Instruction(getGEPReturnType(Ptr, IdxList), GetElementPtr,
                  OperandTraits<GetElementPtrInst>::op_end(this) - Values,
                  Values, InsertBefore),
      SourceElementType(PointeeType),
      ResultElementType(getIndexedType(PointeeType, IdxList)) {
  init(Ptr, IdxList, NameStr);
}

GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
                                     ArrayRef<Value *> IdxList, unsigned Values,
                                     const Twine &NameStr,
                                     BasicBlock *InsertAtEnd)
    : Instruction(getGEPReturnType(Ptr, IdxList), GetElementPtr,
                  OperandTraits<GetElementPtrInst>::op_end(this) - Values,
                  Values, InsertAtEnd),
      SourceElementType(PointeeType),
      ResultElementType(getIndexedType(PointeeType, IdxList)) {
  init(Ptr, IdxList, NameStr);
}

DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)

//===----------------------------------------------------------------------===//
//                               ICmpInst Class
//===----------------------------------------------------------------------===//

/// This instruction compares its operands according to the predicate given
/// to the constructor. It only operates on integers or pointers. The operands
/// must be identical types.
/// Represent an integer comparison operator.
class ICmpInst: public CmpInst {
  void AssertOK() {
    assert(isIntPredicate() &&
           "Invalid ICmp predicate value");
    assert(getOperand(0)->getType() == getOperand(1)->getType() &&
          "Both operands to ICmp instruction are not of the same type!");
    // Check that the operands are the right type
    assert((getOperand(0)->getType()->isIntOrIntVectorTy() ||
            getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
           "Invalid operand types for ICmp instruction");
  }

protected:
  // Note: Instruction needs to be a friend here to call cloneImpl.
  friend class Instruction;

  /// Clone an identical ICmpInst
  ICmpInst *cloneImpl() const;

public:
  /// Constructor with insert-before-instruction semantics.
  ICmpInst(
    Instruction *InsertBefore,  ///< Where to insert
    Predicate pred,  ///< The predicate to use for the comparison
    Value *LHS,      ///< The left-hand-side of the expression
    Value *RHS,      ///< The right-hand-side of the expression
    const Twine &NameStr = ""  ///< Name of the instruction
  ) : CmpInst(makeCmpResultType(LHS->getType()),
              Instruction::ICmp, pred, LHS, RHS, NameStr,
              InsertBefore) {
#ifndef NDEBUG
  AssertOK();
#endif
  }

  /// Constructor with insert-at-end semantics.
  ICmpInst(
    BasicBlock &InsertAtEnd, ///< Block to insert into.
    Predicate pred,  ///< The predicate to use for the comparison
    Value *LHS,      ///< The left-hand-side of the expression
    Value *RHS,      ///< The right-hand-side of the expression
    const Twine &NameStr = ""  ///< Name of the instruction
  ) : CmpInst(makeCmpResultType(LHS->getType()),
              Instruction::ICmp, pred, LHS, RHS, NameStr,
              &InsertAtEnd) {
#ifndef NDEBUG
  AssertOK();
#endif
  }

  /// Constructor with no-insertion semantics
  ICmpInst(
    Predicate pred, ///< The predicate to use for the comparison
    Value *LHS,     ///< The left-hand-side of the expression
    Value *RHS,     ///< The right-hand-side of the expression
    const Twine &NameStr = "" ///< Name of the instruction
  ) : CmpInst(makeCmpResultType(LHS->getType()),
              Instruction::ICmp, pred, LHS, RHS, NameStr) {
#ifndef NDEBUG
  AssertOK();
#endif
  }

  /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
  /// @returns the predicate that would be the result if the operand were
  /// regarded as signed.
  /// Return the signed version of the predicate
  Predicate getSignedPredicate() const {
    return getSignedPredicate(getPredicate());
  }

  /// This is a static version that you can use without an instruction.
  /// Return the signed version of the predicate.
  static Predicate getSignedPredicate(Predicate pred);

  /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
  /// @returns the predicate that would be the result if the operand were
  /// regarded as unsigned.
  /// Return the unsigned version of the predicate
  Predicate getUnsignedPredicate() const {
    return getUnsignedPredicate(getPredicate());
  }

  /// This is a static version that you can use without an instruction.
  /// Return the unsigned version of the predicate.
  static Predicate getUnsignedPredicate(Predicate pred);

  /// Return true if this predicate is either EQ or NE.  This also
  /// tests for commutativity.
  static bool isEquality(Predicate P) {
    return P == ICMP_EQ || P == ICMP_NE;
  }

  /// Return true if this predicate is either EQ or NE.  This also
  /// tests for commutativity.
  bool isEquality() const {
    return isEquality(getPredicate());
  }

  /// @returns true if the predicate of this ICmpInst is commutative
  /// Determine if this relation is commutative.
  bool isCommutative() const { return isEquality(); }

  /// Return true if the predicate is relational (not EQ or NE).
  ///
  bool isRelational() const {
    return !isEquality();
  }

  /// Return true if the predicate is relational (not EQ or NE).
  ///
  static bool isRelational(Predicate P) {
    return !isEquality(P);
  }

  /// Return true if the predicate is SGT or UGT.
  ///
  static bool isGT(Predicate P) {
    return P == ICMP_SGT || P == ICMP_UGT;
  }

  /// Return true if the predicate is SLT or ULT.
  ///
  static bool isLT(Predicate P) {
    return P == ICMP_SLT || P == ICMP_ULT;
  }

  /// Return true if the predicate is SGE or UGE.
  ///
  static bool isGE(Predicate P) {
    return P == ICMP_SGE || P == ICMP_UGE;
  }

  /// Return true if the predicate is SLE or ULE.
  ///
  static bool isLE(Predicate P) {
    return P == ICMP_SLE || P == ICMP_ULE;
  }

  /// Returns the sequence of all ICmp predicates.
  ///
  static auto predicates() { return ICmpPredicates(); }

  /// Exchange the two operands to this instruction in such a way that it does
  /// not modify the semantics of the instruction. The predicate value may be
  /// changed to retain the same result if the predicate is order dependent
  /// (e.g. ult).
  /// Swap operands and adjust predicate.
  void swapOperands() {
    setPredicate(getSwappedPredicate());
    Op<0>().swap(Op<1>());
  }

  /// Return result of `LHS Pred RHS` comparison.
  static bool compare(const APInt &LHS, const APInt &RHS,
                      ICmpInst::Predicate Pred);

  // Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Instruction *I) {
    return I->getOpcode() == Instruction::ICmp;
  }
  static bool classof(const Value *V) {
    return isa<Instruction>(V) && classof(cast<Instruction>(V));
  }
};

//===----------------------------------------------------------------------===//
//                               FCmpInst Class
//===----------------------------------------------------------------------===//

/// This instruction compares its operands according to the predicate given
/// to the constructor. It only operates on floating point values or packed
/// vectors of floating point values. The operands must be identical types.
/// Represents a floating point comparison operator.
class FCmpInst: public CmpInst {
  void AssertOK() {
    assert(isFPPredicate() && "Invalid FCmp predicate value");
    assert(getOperand(0)->getType() == getOperand(1)->getType() &&
           "Both operands to FCmp instruction are not of the same type!");
    // Check that the operands are the right type
    assert(getOperand(0)->getType()->isFPOrFPVectorTy() &&
           "Invalid operand types for FCmp instruction");
  }

protected:
  // Note: Instruction needs to be a friend here to call cloneImpl.
  friend class Instruction;

  /// Clone an identical FCmpInst
  FCmpInst *cloneImpl() const;

public:
  /// Constructor with insert-before-instruction semantics.
  FCmpInst(
    Instruction *InsertBefore, ///< Where to insert
    Predicate pred,  ///< The predicate to use for the comparison
    Value *LHS,      ///< The left-hand-side of the expression
    Value *RHS,      ///< The right-hand-side of the expression
    const Twine &NameStr = ""  ///< Name of the instruction
  ) : CmpInst(makeCmpResultType(LHS->getType()),
              Instruction::FCmp, pred, LHS, RHS, NameStr,
              InsertBefore) {
    AssertOK();
  }

  /// Constructor with insert-at-end semantics.
  FCmpInst(
    BasicBlock &InsertAtEnd, ///< Block to insert into.
    Predicate pred,  ///< The predicate to use for the comparison
    Value *LHS,      ///< The left-hand-side of the expression
    Value *RHS,      ///< The right-hand-side of the expression
    const Twine &NameStr = ""  ///< Name of the instruction
  ) : CmpInst(makeCmpResultType(LHS->getType()),
              Instruction::FCmp, pred, LHS, RHS, NameStr,
              &InsertAtEnd) {
    AssertOK();
  }

  /// Constructor with no-insertion semantics
  FCmpInst(
    Predicate Pred, ///< The predicate to use for the comparison
    Value *LHS,     ///< The left-hand-side of the expression
    Value *RHS,     ///< The right-hand-side of the expression
    const Twine &NameStr = "", ///< Name of the instruction
    Instruction *FlagsSource = nullptr
  ) : CmpInst(makeCmpResultType(LHS->getType()), Instruction::FCmp, Pred, LHS,
              RHS, NameStr, nullptr, FlagsSource) {
    AssertOK();
  }

  /// @returns true if the predicate of this instruction is EQ or NE.
  /// Determine if this is an equality predicate.
  static bool isEquality(Predicate Pred) {
    return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ ||
           Pred == FCMP_UNE;
  }

  /// @returns true if the predicate of this instruction is EQ or NE.
  /// Determine if this is an equality predicate.
  bool isEquality() const { return isEquality(getPredicate()); }

  /// @returns true if the predicate of this instruction is commutative.
  /// Determine if this is a commutative predicate.
  bool isCommutative() const {
    return isEquality() ||
           getPredicate() == FCMP_FALSE ||
           getPredicate() == FCMP_TRUE ||
           getPredicate() == FCMP_ORD ||
           getPredicate() == FCMP_UNO;
  }

  /// @returns true if the predicate is relational (not EQ or NE).
  /// Determine if this a relational predicate.
  bool isRelational() const { return !isEquality(); }

  /// Exchange the two operands to this instruction in such a way that it does
  /// not modify the semantics of the instruction. The predicate value may be
  /// changed to retain the same result if the predicate is order dependent
  /// (e.g. ult).
  /// Swap operands and adjust predicate.
  void swapOperands() {
    setPredicate(getSwappedPredicate());
    Op<0>().swap(Op<1>());
  }

  /// Returns the sequence of all FCmp predicates.
  ///
  static auto predicates() { return FCmpPredicates(); }

  /// Return result of `LHS Pred RHS` comparison.
  static bool compare(const APFloat &LHS, const APFloat &RHS,
                      FCmpInst::Predicate Pred);

  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Instruction *I) {
    return I->getOpcode() == Instruction::FCmp;
  }
  static bool classof(const Value *V) {
    return isa<Instruction>(V) && classof(cast<Instruction>(V));
  }
};

//===----------------------------------------------------------------------===//
/// This class represents a function call, abstracting a target
/// machine's calling convention.  This class uses low bit of the SubClassData
/// field to indicate whether or not this is a tail call.  The rest of the bits
/// hold the calling convention of the call.
///
class CallInst : public CallBase {
  CallInst(const CallInst &CI);

  /// Construct a CallInst given a range of arguments.
  /// Construct a CallInst from a range of arguments
  inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
                  ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
                  Instruction *InsertBefore);

  inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
                  const Twine &NameStr, Instruction *InsertBefore)
      : CallInst(Ty, Func, Args, std::nullopt, NameStr, InsertBefore) {}

  /// Construct a CallInst given a range of arguments.
  /// Construct a CallInst from a range of arguments
  inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
                  ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
                  BasicBlock *InsertAtEnd);

  explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr,
                    Instruction *InsertBefore);

  CallInst(FunctionType *ty, Value *F, const Twine &NameStr,
           BasicBlock *InsertAtEnd);

  void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
            ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
  void init(FunctionType *FTy, Value *Func, const Twine &NameStr);

  /// Compute the number of operands to allocate.
  static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
    // We need one operand for the called function, plus the input operand
    // counts provided.
    return 1 + NumArgs + NumBundleInputs;
  }

protected:
  // Note: Instruction needs to be a friend here to call cloneImpl.
  friend class Instruction;

  CallInst *cloneImpl() const;

public:
  static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr = "",
                          Instruction *InsertBefore = nullptr) {
    return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertBefore);
  }

  static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
                          const Twine &NameStr,
                          Instruction *InsertBefore = nullptr) {
    return new (ComputeNumOperands(Args.size()))
        CallInst(Ty, Func, Args, std::nullopt, NameStr, InsertBefore);
  }

  static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
                          ArrayRef<OperandBundleDef> Bundles = std::nullopt,
                          const Twine &NameStr = "",
                          Instruction *InsertBefore = nullptr) {
    const int NumOperands =
        ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
    const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);

    return new (NumOperands, DescriptorBytes)
        CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore);
  }

  static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr,
                          BasicBlock *InsertAtEnd) {
    return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertAtEnd);
  }

  static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
                          const Twine &NameStr, BasicBlock *InsertAtEnd) {
    return new (ComputeNumOperands(Args.size()))
        CallInst(Ty, Func, Args, std::nullopt, NameStr, InsertAtEnd);
  }

  static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
                          ArrayRef<OperandBundleDef> Bundles,
                          const Twine &NameStr, BasicBlock *InsertAtEnd) {
    const int NumOperands =
        ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
    const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);

    return new (NumOperands, DescriptorBytes)
        CallInst(Ty, Func, Args, Bundles, NameStr, InsertAtEnd);
  }

  static CallInst *Create(FunctionCallee Func, const Twine &NameStr = "",
                          Instruction *InsertBefore = nullptr) {
    return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
                  InsertBefore);
  }

  static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
                          ArrayRef<OperandBundleDef> Bundles = std::nullopt,
                          const Twine &NameStr = "",
                          Instruction *InsertBefore = nullptr) {
    return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
                  NameStr, InsertBefore);
  }

  static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
                          const Twine &NameStr,
                          Instruction *InsertBefore = nullptr) {
    return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
                  InsertBefore);
  }

  static CallInst *Create(FunctionCallee Func, const Twine &NameStr,
                          BasicBlock *InsertAtEnd) {
    return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
                  InsertAtEnd);
  }

  static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
                          const Twine &NameStr, BasicBlock *InsertAtEnd) {
    return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
                  InsertAtEnd);
  }

  static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
                          ArrayRef<OperandBundleDef> Bundles,
                          const Twine &NameStr, BasicBlock *InsertAtEnd) {
    return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
                  NameStr, InsertAtEnd);
  }

  /// Create a clone of \p CI with a different set of operand bundles and
  /// insert it before \p InsertPt.
  ///
  /// The returned call instruction is identical \p CI in every way except that
  /// the operand bundles for the new instruction are set to the operand bundles
  /// in \p Bundles.
  static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles,
                          Instruction *InsertPt = nullptr);

  /// Generate the IR for a call to malloc:
  /// 1. Compute the malloc call's argument as the specified type's size,
  ///    possibly multiplied by the array size if the array size is not
  ///    constant 1.
  /// 2. Call malloc with that argument.
  /// 3. Bitcast the result of the malloc call to the specified type.
  static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
                                   Type *AllocTy, Value *AllocSize,
                                   Value *ArraySize = nullptr,
                                   Function *MallocF = nullptr,
                                   const Twine &Name = "");
  static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
                                   Type *AllocTy, Value *AllocSize,
                                   Value *ArraySize = nullptr,
                                   Function *MallocF = nullptr,
                                   const Twine &Name = "");
  static Instruction *
  CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy, Type *AllocTy,
               Value *AllocSize, Value *ArraySize = nullptr,
               ArrayRef<OperandBundleDef> Bundles = std::nullopt,
               Function *MallocF = nullptr, const Twine &Name = "");
  static Instruction *
  CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy, Type *AllocTy,
               Value *AllocSize, Value *ArraySize = nullptr,
               ArrayRef<OperandBundleDef> Bundles = std::nullopt,
               Function *MallocF = nullptr, const Twine &Name = "");
  /// Generate the IR for a call to the builtin free function.
  static Instruction *CreateFree(Value *Source, Instruction *InsertBefore);
  static Instruction *CreateFree(Value *Source, BasicBlock *InsertAtEnd);
  static Instruction *CreateFree(Value *Source,
                                 ArrayRef<OperandBundleDef> Bundles,
                                 Instruction *InsertBefore);
  static Instruction *CreateFree(Value *Source,
                                 ArrayRef<OperandBundleDef> Bundles,
                                 BasicBlock *InsertAtEnd);

  // Note that 'musttail' implies 'tail'.
  enum TailCallKind : unsigned {
    TCK_None = 0,
    TCK_Tail = 1,
    TCK_MustTail = 2,
    TCK_NoTail = 3,
    TCK_LAST = TCK_NoTail
  };

  using TailCallKindField = Bitfield::Element<TailCallKind, 0, 2, TCK_LAST>;
  static_assert(
      Bitfield::areContiguous<TailCallKindField, CallBase::CallingConvField>(),
      "Bitfields must be contiguous");

  TailCallKind getTailCallKind() const {
    return getSubclassData<TailCallKindField>();
  }

  bool isTailCall() const {
    TailCallKind Kind = getTailCallKind();
    return Kind == TCK_Tail || Kind == TCK_MustTail;
  }

  bool isMustTailCall() const { return getTailCallKind() == TCK_MustTail; }

  bool isNoTailCall() const { return getTailCallKind() == TCK_NoTail; }

  void setTailCallKind(TailCallKind TCK) {
    setSubclassData<TailCallKindField>(TCK);
  }

  void setTailCall(bool IsTc = true) {
    setTailCallKind(IsTc ? TCK_Tail : TCK_None);
  }

  /// Return true if the call can return twice
  bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); }
  void setCanReturnTwice() { addFnAttr(Attribute::ReturnsTwice); }

  // Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Instruction *I) {
    return I->getOpcode() == Instruction::Call;
  }
  static bool classof(const Value *V) {
    return isa<Instruction>(V) && classof(cast<Instruction>(V));
  }

  /// Updates profile metadata by scaling it by \p S / \p T.
  void updateProfWeight(uint64_t S, uint64_t T);

private:
  // Shadow Instruction::setInstructionSubclassData with a private forwarding
  // method so that subclasses cannot accidentally use it.
  template <typename Bitfield>
  void setSubclassData(typename Bitfield::Type Value) {
    Instruction::setSubclassData<Bitfield>(Value);
  }
};

CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
                   ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
                   BasicBlock *InsertAtEnd)
    : CallBase(Ty->getReturnType(), Instruction::Call,
               OperandTraits<CallBase>::op_end(this) -
                   (Args.size() + CountBundleInputs(Bundles) + 1),
               unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
               InsertAtEnd) {
  init(Ty, Func, Args, Bundles, NameStr);
}

CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
                   ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
                   Instruction *InsertBefore)
    : CallBase(Ty->getReturnType(), Instruction::Call,
               OperandTraits<CallBase>::op_end(this) -
                   (Args.size() + CountBundleInputs(Bundles) + 1),
               unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
               InsertBefore) {
  init(Ty, Func, Args, Bundles, NameStr);
}

//===----------------------------------------------------------------------===//
//                               SelectInst Class
//===----------------------------------------------------------------------===//

/// This class represents the LLVM 'select' instruction.
///
class SelectInst : public Instruction {
  SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
             Instruction *InsertBefore)
    : Instruction(S1->getType(), Instruction::Select,
                  &Op<0>(), 3, InsertBefore) {
    init(C, S1, S2);
    setName(NameStr);
  }

  SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
             BasicBlock *InsertAtEnd)
    : Instruction(S1->getType(), Instruction::Select,
                  &Op<0>(), 3, InsertAtEnd) {
    init(C, S1, S2);
    setName(NameStr);
  }

  void init(Value *C, Value *S1, Value *S2) {
    assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select");
    Op<0>() = C;
    Op<1>() = S1;
    Op<2>() = S2;
  }

protected:
  // Note: Instruction needs to be a friend here to call cloneImpl.
  friend class Instruction;

  SelectInst *cloneImpl() const;

public:
  static SelectInst *Create(Value *C, Value *S1, Value *S2,
                            const Twine &NameStr = "",
                            Instruction *InsertBefore = nullptr,
                            Instruction *MDFrom = nullptr) {
    SelectInst *Sel = new(3) SelectInst(C, S1, S2, NameStr, InsertBefore);
    if (MDFrom)
      Sel->copyMetadata(*MDFrom);
    return Sel;
  }

  static SelectInst *Create(Value *C, Value *S1, Value *S2,
                            const Twine &NameStr,
                            BasicBlock *InsertAtEnd) {
    return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd);
  }

  const Value *getCondition() const { return Op<0>(); }
  const Value *getTrueValue() const { return Op<1>(); }
  const Value *getFalseValue() const { return Op<2>(); }
  Value *getCondition() { return Op<0>(); }
  Value *getTrueValue() { return Op<1>(); }
  Value *getFalseValue() { return Op<2>(); }

  void setCondition(Value *V) { Op<0>() = V; }
  void setTrueValue(Value *V) { Op<1>() = V; }
  void setFalseValue(Value *V) { Op<2>() = V; }

  /// Swap the true and false values of the select instruction.
  /// This doesn't swap prof metadata.
  void swapValues() { Op<1>().swap(Op<2>()); }

  /// Return a string if the specified operands are invalid
  /// for a select operation, otherwise return null.
  static const char *areInvalidOperands(Value *Cond, Value *True, Value *False);

  /// Transparently provide more efficient getOperand methods.
  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);

  OtherOps getOpcode() const {
    return static_cast<OtherOps>(Instruction::getOpcode());
  }

  // Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Instruction *I) {
    return I->getOpcode() == Instruction::Select;
  }
  static bool classof(const Value *V) {
    return isa<Instruction>(V) && classof(cast<Instruction>(V));
  }
};

template <>
struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> {
};

DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectInst, Value)

//===----------------------------------------------------------------------===//
//                                VAArgInst Class
//===----------------------------------------------------------------------===//

/// This class represents the va_arg llvm instruction, which returns
/// an argument of the specified type given a va_list and increments that list
///
class VAArgInst : public UnaryInstruction {
protected:
  // Note: Instruction needs to be a friend here to call cloneImpl.
  friend class Instruction;

  VAArgInst *cloneImpl() const;

public:
  VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "",
             Instruction *InsertBefore = nullptr)
    : UnaryInstruction(Ty, VAArg, List, InsertBefore) {
    setName(NameStr);
  }

  VAArgInst(Value *List, Type *Ty, const Twine &NameStr,
            BasicBlock *InsertAtEnd)
    : UnaryInstruction(Ty, VAArg, List, InsertAtEnd) {
    setName(NameStr);
  }

  Value *getPointerOperand() { return getOperand(0); }
  const Value *getPointerOperand() const { return getOperand(0); }
  static unsigned getPointerOperandIndex() { return 0U; }

  // Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Instruction *I) {
    return I->getOpcode() == VAArg;
  }
  static bool classof(const Value *V) {
    return isa<Instruction>(V) && classof(cast<Instruction>(V));
  }
};

//===----------------------------------------------------------------------===//
//                                ExtractElementInst Class
//===----------------------------------------------------------------------===//

/// This instruction extracts a single (scalar)
/// element from a VectorType value
///
class ExtractElementInst : public Instruction {
  ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "",
                     Instruction *InsertBefore = nullptr);
  ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr,
                     BasicBlock *InsertAtEnd);

protected:
  // Note: Instruction needs to be a friend here to call cloneImpl.
  friend class Instruction;

  ExtractElementInst *cloneImpl() const;

public:
  static ExtractElementInst *Create(Value *Vec, Value *Idx,
                                   const Twine &NameStr = "",
                                   Instruction *InsertBefore = nullptr) {
    return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore);
  }

  static ExtractElementInst *Create(Value *Vec, Value *Idx,
                                   const Twine &NameStr,
                                   BasicBlock *InsertAtEnd) {
    return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertAtEnd);
  }

  /// Return true if an extractelement instruction can be
  /// formed with the specified operands.
  static bool isValidOperands(const Value *Vec, const Value *Idx);

  Value *getVectorOperand() { return Op<0>(); }
  Value *getIndexOperand() { return Op<1>(); }
  const Value *getVectorOperand() const { return Op<0>(); }
  const Value *getIndexOperand() const { return Op<1>(); }

  VectorType *getVectorOperandType() const {
    return cast<VectorType>(getVectorOperand()->getType());
  }

  /// Transparently provide more efficient getOperand methods.
  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);

  // Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Instruction *I) {
    return I->getOpcode() == Instruction::ExtractElement;
  }
  static bool classof(const Value *V) {
    return isa<Instruction>(V) && classof(cast<Instruction>(V));
  }
};

template <>
struct OperandTraits<ExtractElementInst> :
  public FixedNumOperandTraits<ExtractElementInst, 2> {
};

DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractElementInst, Value)

//===----------------------------------------------------------------------===//
//                                InsertElementInst Class
//===----------------------------------------------------------------------===//

/// This instruction inserts a single (scalar)
/// element into a VectorType value
///
class InsertElementInst : public Instruction {
  InsertElementInst(Value *Vec, Value *NewElt, Value *Idx,
                    const Twine &NameStr = "",
                    Instruction *InsertBefore = nullptr);
  InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr,
                    BasicBlock *InsertAtEnd);

protected:
  // Note: Instruction needs to be a friend here to call cloneImpl.
  friend class Instruction;

  InsertElementInst *cloneImpl() const;

public:
  static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
                                   const Twine &NameStr = "",
                                   Instruction *InsertBefore = nullptr) {
    return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore);
  }

  static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
                                   const Twine &NameStr,
                                   BasicBlock *InsertAtEnd) {
    return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd);
  }

  /// Return true if an insertelement instruction can be
  /// formed with the specified operands.
  static bool isValidOperands(const Value *Vec, const Value *NewElt,
                              const Value *Idx);

  /// Overload to return most specific vector type.
  ///
  VectorType *getType() const {
    return cast<VectorType>(Instruction::getType());
  }

  /// Transparently provide more efficient getOperand methods.
  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);

  // Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Instruction *I) {
    return I->getOpcode() == Instruction::InsertElement;
  }
  static bool classof(const Value *V) {
    return isa<Instruction>(V) && classof(cast<Instruction>(V));
  }
};

template <>
struct OperandTraits<InsertElementInst> :
  public FixedNumOperandTraits<InsertElementInst, 3> {
};

DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementInst, Value)

//===----------------------------------------------------------------------===//
//                           ShuffleVectorInst Class
//===----------------------------------------------------------------------===//

constexpr int PoisonMaskElem = -1;

/// This instruction constructs a fixed permutation of two
/// input vectors.
///
/// For each element of the result vector, the shuffle mask selects an element
/// from one of the input vectors to copy to the result. Non-negative elements
/// in the mask represent an index into the concatenated pair of input vectors.
/// PoisonMaskElem (-1) specifies that the result element is poison.
///
/// For scalable vectors, all the elements of the mask must be 0 or -1. This
/// requirement may be relaxed in the future.
class ShuffleVectorInst : public Instruction {
  SmallVector<int, 4> ShuffleMask;
  Constant *ShuffleMaskForBitcode;

protected:
  // Note: Instruction needs to be a friend here to call cloneImpl.
  friend class Instruction;

  ShuffleVectorInst *cloneImpl() const;

public:
  ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr = "",
                    Instruction *InsertBefore = nullptr);
  ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr,
                    BasicBlock *InsertAtEnd);
  ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr = "",
                    Instruction *InsertBefore = nullptr);
  ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr,
                    BasicBlock *InsertAtEnd);
  ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
                    const Twine &NameStr = "",
                    Instruction *InsertBefor = nullptr);
  ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
                    const Twine &NameStr, BasicBlock *InsertAtEnd);
  ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
                    const Twine &NameStr = "",
                    Instruction *InsertBefor = nullptr);
  ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
                    const Twine &NameStr, BasicBlock *InsertAtEnd);

  void *operator new(size_t S) { return User::operator new(S, 2); }
  void operator delete(void *Ptr) { return User::operator delete(Ptr); }

  /// Swap the operands and adjust the mask to preserve the semantics
  /// of the instruction.
  void commute();

  /// Return true if a shufflevector instruction can be
  /// formed with the specified operands.
  static bool isValidOperands(const Value *V1, const Value *V2,
                              const Value *Mask);
  static bool isValidOperands(const Value *V1, const Value *V2,
                              ArrayRef<int> Mask);

  /// Overload to return most specific vector type.
  ///
  VectorType *getType() const {
    return cast<VectorType>(Instruction::getType());
  }

  /// Transparently provide more efficient getOperand methods.
  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);

  /// Return the shuffle mask value of this instruction for the given element
  /// index. Return PoisonMaskElem if the element is undef.
  int getMaskValue(unsigned Elt) const { return ShuffleMask[Elt]; }

  /// Convert the input shuffle mask operand to a vector of integers. Undefined
  /// elements of the mask are returned as PoisonMaskElem.
  static void getShuffleMask(const Constant *Mask,
                             SmallVectorImpl<int> &Result);

  /// Return the mask for this instruction as a vector of integers. Undefined
  /// elements of the mask are returned as PoisonMaskElem.
  void getShuffleMask(SmallVectorImpl<int> &Result) const {
    Result.assign(ShuffleMask.begin(), ShuffleMask.end());
  }

  /// Return the mask for this instruction, for use in bitcode.
  ///
  /// TODO: This is temporary until we decide a new bitcode encoding for
  /// shufflevector.
  Constant *getShuffleMaskForBitcode() const { return ShuffleMaskForBitcode; }

  static Constant *convertShuffleMaskForBitcode(ArrayRef<int> Mask,
                                                Type *ResultTy);

  void setShuffleMask(ArrayRef<int> Mask);

  ArrayRef<int> getShuffleMask() const { return ShuffleMask; }

  /// Return true if this shuffle returns a vector with a different number of
  /// elements than its source vectors.
  /// Examples: shufflevector <4 x n> A, <4 x n> B, <1,2,3>
  ///           shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5>
  bool changesLength() const {
    unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
                                 ->getElementCount()
                                 .getKnownMinValue();
    unsigned NumMaskElts = ShuffleMask.size();
    return NumSourceElts != NumMaskElts;
  }

  /// Return true if this shuffle returns a vector with a greater number of
  /// elements than its source vectors.
  /// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3>
  bool increasesLength() const {
    unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
                                 ->getElementCount()
                                 .getKnownMinValue();
    unsigned NumMaskElts = ShuffleMask.size();
    return NumSourceElts < NumMaskElts;
  }

  /// Return true if this shuffle mask chooses elements from exactly one source
  /// vector.
  /// Example: <7,5,undef,7>
  /// This assumes that vector operands are the same length as the mask.
  static bool isSingleSourceMask(ArrayRef<int> Mask);
  static bool isSingleSourceMask(const Constant *Mask) {
    assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
    SmallVector<int, 16> MaskAsInts;
    getShuffleMask(Mask, MaskAsInts);
    return isSingleSourceMask(MaskAsInts);
  }

  /// Return true if this shuffle chooses elements from exactly one source
  /// vector without changing the length of that vector.
  /// Example: shufflevector <4 x n> A, <4 x n> B, <3,0,undef,3>
  /// TODO: Optionally allow length-changing shuffles.
  bool isSingleSource() const {
    return !changesLength() && isSingleSourceMask(ShuffleMask);
  }

  /// Return true if this shuffle mask chooses elements from exactly one source
  /// vector without lane crossings. A shuffle using this mask is not
  /// necessarily a no-op because it may change the number of elements from its
  /// input vectors or it may provide demanded bits knowledge via undef lanes.
  /// Example: <undef,undef,2,3>
  static bool isIdentityMask(ArrayRef<int> Mask);
  static bool isIdentityMask(const Constant *Mask) {
    assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");

    // Not possible to express a shuffle mask for a scalable vector for this
    // case.
    if (isa<ScalableVectorType>(Mask->getType()))
      return false;

    SmallVector<int, 16> MaskAsInts;
    getShuffleMask(Mask, MaskAsInts);
    return isIdentityMask(MaskAsInts);
  }

  /// Return true if this shuffle chooses elements from exactly one source
  /// vector without lane crossings and does not change the number of elements
  /// from its input vectors.
  /// Example: shufflevector <4 x n> A, <4 x n> B, <4,undef,6,undef>
  bool isIdentity() const {
    // Not possible to express a shuffle mask for a scalable vector for this
    // case.
    if (isa<ScalableVectorType>(getType()))
      return false;

    return !changesLength() && isIdentityMask(ShuffleMask);
  }

  /// Return true if this shuffle lengthens exactly one source vector with
  /// undefs in the high elements.
  bool isIdentityWithPadding() const;

  /// Return true if this shuffle extracts the first N elements of exactly one
  /// source vector.
  bool isIdentityWithExtract() const;

  /// Return true if this shuffle concatenates its 2 source vectors. This
  /// returns false if either input is undefined. In that case, the shuffle is
  /// is better classified as an identity with padding operation.
  bool isConcat() const;

  /// Return true if this shuffle mask chooses elements from its source vectors
  /// without lane crossings. A shuffle using this mask would be
  /// equivalent to a vector select with a constant condition operand.
  /// Example: <4,1,6,undef>
  /// This returns false if the mask does not choose from both input vectors.
  /// In that case, the shuffle is better classified as an identity shuffle.
  /// This assumes that vector operands are the same length as the mask
  /// (a length-changing shuffle can never be equivalent to a vector select).
  static bool isSelectMask(ArrayRef<int> Mask);
  static bool isSelectMask(const Constant *Mask) {
    assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
    SmallVector<int, 16> MaskAsInts;
    getShuffleMask(Mask, MaskAsInts);
    return isSelectMask(MaskAsInts);
  }

  /// Return true if this shuffle chooses elements from its source vectors
  /// without lane crossings and all operands have the same number of elements.
  /// In other words, this shuffle is equivalent to a vector select with a
  /// constant condition operand.
  /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,1,6,3>
  /// This returns false if the mask does not choose from both input vectors.
  /// In that case, the shuffle is better classified as an identity shuffle.
  /// TODO: Optionally allow length-changing shuffles.
  bool isSelect() const {
    return !changesLength() && isSelectMask(ShuffleMask);
  }

  /// Return true if this shuffle mask swaps the order of elements from exactly
  /// one source vector.
  /// Example: <7,6,undef,4>
  /// This assumes that vector operands are the same length as the mask.
  static bool isReverseMask(ArrayRef<int> Mask);
  static bool isReverseMask(const Constant *Mask) {
    assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
    SmallVector<int, 16> MaskAsInts;
    getShuffleMask(Mask, MaskAsInts);
    return isReverseMask(MaskAsInts);
  }

  /// Return true if this shuffle swaps the order of elements from exactly
  /// one source vector.
  /// Example: shufflevector <4 x n> A, <4 x n> B, <3,undef,1,undef>
  /// TODO: Optionally allow length-changing shuffles.
  bool isReverse() const {
    return !changesLength() && isReverseMask(ShuffleMask);
  }

  /// Return true if this shuffle mask chooses all elements with the same value
  /// as the first element of exactly one source vector.
  /// Example: <4,undef,undef,4>
  /// This assumes that vector operands are the same length as the mask.
  static bool isZeroEltSplatMask(ArrayRef<int> Mask);
  static bool isZeroEltSplatMask(const Constant *Mask) {
    assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
    SmallVector<int, 16> MaskAsInts;
    getShuffleMask(Mask, MaskAsInts);
    return isZeroEltSplatMask(MaskAsInts);
  }

  /// Return true if all elements of this shuffle are the same value as the
  /// first element of exactly one source vector without changing the length
  /// of that vector.
  /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,0,undef,0>
  /// TODO: Optionally allow length-changing shuffles.
  /// TODO: Optionally allow splats from other elements.
  bool isZeroEltSplat() const {
    return !changesLength() && isZeroEltSplatMask(ShuffleMask);
  }

  /// Return true if this shuffle mask is a transpose mask.
  /// Transpose vector masks transpose a 2xn matrix. They read corresponding
  /// even- or odd-numbered vector elements from two n-dimensional source
  /// vectors and write each result into consecutive elements of an
  /// n-dimensional destination vector. Two shuffles are necessary to complete
  /// the transpose, one for the even elements and another for the odd elements.
  /// This description closely follows how the TRN1 and TRN2 AArch64
  /// instructions operate.
  ///
  /// For example, a simple 2x2 matrix can be transposed with:
  ///
  ///   ; Original matrix
  ///   m0 = < a, b >
  ///   m1 = < c, d >
  ///
  ///   ; Transposed matrix
  ///   t0 = < a, c > = shufflevector m0, m1, < 0, 2 >
  ///   t1 = < b, d > = shufflevector m0, m1, < 1, 3 >
  ///
  /// For matrices having greater than n columns, the resulting nx2 transposed
  /// matrix is stored in two result vectors such that one vector contains
  /// interleaved elements from all the even-numbered rows and the other vector
  /// contains interleaved elements from all the odd-numbered rows. For example,
  /// a 2x4 matrix can be transposed with:
  ///
  ///   ; Original matrix
  ///   m0 = < a, b, c, d >
  ///   m1 = < e, f, g, h >
  ///
  ///   ; Transposed matrix
  ///   t0 = < a, e, c, g > = shufflevector m0, m1 < 0, 4, 2, 6 >
  ///   t1 = < b, f, d, h > = shufflevector m0, m1 < 1, 5, 3, 7 >
  static bool isTransposeMask(ArrayRef<int> Mask);
  static bool isTransposeMask(const Constant *Mask) {
    assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
    SmallVector<int, 16> MaskAsInts;
    getShuffleMask(Mask, MaskAsInts);
    return isTransposeMask(MaskAsInts);
  }

  /// Return true if this shuffle transposes the elements of its inputs without
  /// changing the length of the vectors. This operation may also be known as a
  /// merge or interleave. See the description for isTransposeMask() for the
  /// exact specification.
  /// Example: shufflevector <4 x n> A, <4 x n> B, <0,4,2,6>
  bool isTranspose() const {
    return !changesLength() && isTransposeMask(ShuffleMask);
  }

  /// Return true if this shuffle mask is a splice mask, concatenating the two
  /// inputs together and then extracts an original width vector starting from
  /// the splice index.
  /// Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
  static bool isSpliceMask(ArrayRef<int> Mask, int &Index);
  static bool isSpliceMask(const Constant *Mask, int &Index) {
    assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
    SmallVector<int, 16> MaskAsInts;
    getShuffleMask(Mask, MaskAsInts);
    return isSpliceMask(MaskAsInts, Index);
  }

  /// Return true if this shuffle splices two inputs without changing the length
  /// of the vectors. This operation concatenates the two inputs together and
  /// then extracts an original width vector starting from the splice index.
  /// Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
  bool isSplice(int &Index) const {
    return !changesLength() && isSpliceMask(ShuffleMask, Index);
  }

  /// Return true if this shuffle mask is an extract subvector mask.
  /// A valid extract subvector mask returns a smaller vector from a single
  /// source operand. The base extraction index is returned as well.
  static bool isExtractSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
                                     int &Index);
  static bool isExtractSubvectorMask(const Constant *Mask, int NumSrcElts,
                                     int &Index) {
    assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
    // Not possible to express a shuffle mask for a scalable vector for this
    // case.
    if (isa<ScalableVectorType>(Mask->getType()))
      return false;
    SmallVector<int, 16> MaskAsInts;
    getShuffleMask(Mask, MaskAsInts);
    return isExtractSubvectorMask(MaskAsInts, NumSrcElts, Index);
  }

  /// Return true if this shuffle mask is an extract subvector mask.
  bool isExtractSubvectorMask(int &Index) const {
    // Not possible to express a shuffle mask for a scalable vector for this
    // case.
    if (isa<ScalableVectorType>(getType()))
      return false;

    int NumSrcElts =
        cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
    return isExtractSubvectorMask(ShuffleMask, NumSrcElts, Index);
  }

  /// Return true if this shuffle mask is an insert subvector mask.
  /// A valid insert subvector mask inserts the lowest elements of a second
  /// source operand into an in-place first source operand operand.
  /// Both the sub vector width and the insertion index is returned.
  static bool isInsertSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
                                    int &NumSubElts, int &Index);
  static bool isInsertSubvectorMask(const Constant *Mask, int NumSrcElts,
                                    int &NumSubElts, int &Index) {
    assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
    // Not possible to express a shuffle mask for a scalable vector for this
    // case.
    if (isa<ScalableVectorType>(Mask->getType()))
      return false;
    SmallVector<int, 16> MaskAsInts;
    getShuffleMask(Mask, MaskAsInts);
    return isInsertSubvectorMask(MaskAsInts, NumSrcElts, NumSubElts, Index);
  }

  /// Return true if this shuffle mask is an insert subvector mask.
  bool isInsertSubvectorMask(int &NumSubElts, int &Index) const {
    // Not possible to express a shuffle mask for a scalable vector for this
    // case.
    if (isa<ScalableVectorType>(getType()))
      return false;

    int NumSrcElts =
        cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
    return isInsertSubvectorMask(ShuffleMask, NumSrcElts, NumSubElts, Index);
  }

  /// Return true if this shuffle mask replicates each of the \p VF elements
  /// in a vector \p ReplicationFactor times.
  /// For example, the mask for \p ReplicationFactor=3 and \p VF=4 is:
  ///   <0,0,0,1,1,1,2,2,2,3,3,3>
  static bool isReplicationMask(ArrayRef<int> Mask, int &ReplicationFactor,
                                int &VF);
  static bool isReplicationMask(const Constant *Mask, int &ReplicationFactor,
                                int &VF) {
    assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
    // Not possible to express a shuffle mask for a scalable vector for this
    // case.
    if (isa<ScalableVectorType>(Mask->getType()))
      return false;
    SmallVector<int, 16> MaskAsInts;
    getShuffleMask(Mask, MaskAsInts);
    return isReplicationMask(MaskAsInts, ReplicationFactor, VF);
  }

  /// Return true if this shuffle mask is a replication mask.
  bool isReplicationMask(int &ReplicationFactor, int &VF) const;

  /// Return true if this shuffle mask represents "clustered" mask of size VF,
  /// i.e. each index between [0..VF) is used exactly once in each submask of
  /// size VF.
  /// For example, the mask for \p VF=4 is:
  /// 0, 1, 2, 3, 3, 2, 0, 1 - "clustered", because each submask of size 4
  /// (0,1,2,3 and 3,2,0,1) uses indices [0..VF) exactly one time.
  /// 0, 1, 2, 3, 3, 3, 1, 0 - not "clustered", because
  ///                          element 3 is used twice in the second submask
  ///                          (3,3,1,0) and index 2 is not used at all.
  static bool isOneUseSingleSourceMask(ArrayRef<int> Mask, int VF);

  /// Return true if this shuffle mask is a one-use-single-source("clustered")
  /// mask.
  bool isOneUseSingleSourceMask(int VF) const;

  /// Change values in a shuffle permute mask assuming the two vector operands
  /// of length InVecNumElts have swapped position.
  static void commuteShuffleMask(MutableArrayRef<int> Mask,
                                 unsigned InVecNumElts) {
    for (int &Idx : Mask) {
      if (Idx == -1)
        continue;
      Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts;
      assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 &&
             "shufflevector mask index out of range");
    }
  }

  /// Return if this shuffle interleaves its two input vectors together.
  bool isInterleave(unsigned Factor);

  /// Return true if the mask interleaves one or more input vectors together.
  ///
  /// I.e. <0, LaneLen, ... , LaneLen*(Factor - 1), 1, LaneLen + 1, ...>
  /// E.g. For a Factor of 2 (LaneLen=4):
  ///   <0, 4, 1, 5, 2, 6, 3, 7>
  /// E.g. For a Factor of 3 (LaneLen=4):
  ///   <4, 0, 9, 5, 1, 10, 6, 2, 11, 7, 3, 12>
  /// E.g. For a Factor of 4 (LaneLen=2):
  ///   <0, 2, 6, 4, 1, 3, 7, 5>
  ///
  /// NumInputElts is the total number of elements in the input vectors.
  ///
  /// StartIndexes are the first indexes of each vector being interleaved,
  /// substituting any indexes that were undef
  /// E.g. <4, -1, 2, 5, 1, 3> (Factor=3): StartIndexes=<4, 0, 2>
  ///
  /// Note that this does not check if the input vectors are consecutive:
  /// It will return true for masks such as
  /// <0, 4, 6, 1, 5, 7> (Factor=3, LaneLen=2)
  static bool isInterleaveMask(ArrayRef<int> Mask, unsigned Factor,
                               unsigned NumInputElts,
                               SmallVectorImpl<unsigned> &StartIndexes);
  static bool isInterleaveMask(ArrayRef<int> Mask, unsigned Factor,
                               unsigned NumInputElts) {
    SmallVector<unsigned, 8> StartIndexes;
    return isInterleaveMask(Mask, Factor, NumInputElts, StartIndexes);
  }

  // Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Instruction *I) {
    return I->getOpcode() == Instruction::ShuffleVector;
  }
  static bool classof(const Value *V) {
    return isa<Instruction>(V) && classof(cast<Instruction>(V));
  }
};

template <>
struct OperandTraits<ShuffleVectorInst>
    : public FixedNumOperandTraits<ShuffleVectorInst, 2> {};

DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ShuffleVectorInst, Value)

//===----------------------------------------------------------------------===//
//                                ExtractValueInst Class
//===----------------------------------------------------------------------===//

/// This instruction extracts a struct member or array
/// element value from an aggregate value.
///
class ExtractValueInst : public UnaryInstruction {
  SmallVector<unsigned, 4> Indices;

  ExtractValueInst(const ExtractValueInst &EVI);

  /// Constructors - Create a extractvalue instruction with a base aggregate
  /// value and a list of indices.  The first ctor can optionally insert before
  /// an existing instruction, the second appends the new instruction to the
  /// specified BasicBlock.
  inline ExtractValueInst(Value *Agg,
                          ArrayRef<unsigned> Idxs,
                          const Twine &NameStr,
                          Instruction *InsertBefore);
  inline ExtractValueInst(Value *Agg,
                          ArrayRef<unsigned> Idxs,
                          const Twine &NameStr, BasicBlock *InsertAtEnd);

  void init(ArrayRef<unsigned> Idxs, const Twine &NameStr);

protected:
  // Note: Instruction needs to be a friend here to call cloneImpl.
  friend class Instruction;

  ExtractValueInst *cloneImpl() const;

public:
  static ExtractValueInst *Create(Value *Agg,
                                  ArrayRef<unsigned> Idxs,
                                  const Twine &NameStr = "",
                                  Instruction *InsertBefore = nullptr) {
    return new
      ExtractValueInst(Agg, Idxs, NameStr, InsertBefore);
  }

  static ExtractValueInst *Create(Value *Agg,
                                  ArrayRef<unsigned> Idxs,
                                  const Twine &NameStr,
                                  BasicBlock *InsertAtEnd) {
    return new ExtractValueInst(Agg, Idxs, NameStr, InsertAtEnd);
  }

  /// Returns the type of the element that would be extracted
  /// with an extractvalue instruction with the specified parameters.
  ///
  /// Null is returned if the indices are invalid for the specified type.
  static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs);

  using idx_iterator = const unsigned*;

  inline idx_iterator idx_begin() const { return Indices.begin(); }
  inline idx_iterator idx_end()   const { return Indices.end(); }
  inline iterator_range<idx_iterator> indices() const {
    return make_range(idx_begin(), idx_end());
  }

  Value *getAggregateOperand() {
    return getOperand(0);
  }
  const Value *getAggregateOperand() const {
    return getOperand(0);
  }
  static unsigned getAggregateOperandIndex() {
    return 0U;                      // get index for modifying correct operand
  }

  ArrayRef<unsigned> getIndices() const {
    return Indices;
  }

  unsigned getNumIndices() const {
    return (unsigned)Indices.size();
  }

  bool hasIndices() const {
    return true;
  }

  // Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Instruction *I) {
    return I->getOpcode() == Instruction::ExtractValue;
  }
  static bool classof(const Value *V) {
    return isa<Instruction>(V) && classof(cast<Instruction>(V));
  }
};

ExtractValueInst::ExtractValueInst(Value *Agg,
                                   ArrayRef<unsigned> Idxs,
                                   const Twine &NameStr,
                                   Instruction *InsertBefore)
  : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
                     ExtractValue, Agg, InsertBefore) {
  init(Idxs, NameStr);
}

ExtractValueInst::ExtractValueInst(Value *Agg,
                                   ArrayRef<unsigned> Idxs,
                                   const Twine &NameStr,
                                   BasicBlock *InsertAtEnd)
  : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
                     ExtractValue, Agg, InsertAtEnd) {
  init(Idxs, NameStr);
}

//===----------------------------------------------------------------------===//
//                                InsertValueInst Class
//===----------------------------------------------------------------------===//

/// This instruction inserts a struct field of array element
/// value into an aggregate value.
///
class InsertValueInst : public Instruction {
  SmallVector<unsigned, 4> Indices;

  InsertValueInst(const InsertValueInst &IVI);

  /// Constructors - Create a insertvalue instruction with a base aggregate
  /// value, a value to insert, and a list of indices.  The first ctor can
  /// optionally insert before an existing instruction, the second appends
  /// the new instruction to the specified BasicBlock.
  inline InsertValueInst(Value *Agg, Value *Val,
                         ArrayRef<unsigned> Idxs,
                         const Twine &NameStr,
                         Instruction *InsertBefore);
  inline InsertValueInst(Value *Agg, Value *Val,
                         ArrayRef<unsigned> Idxs,
                         const Twine &NameStr, BasicBlock *InsertAtEnd);

  /// Constructors - These two constructors are convenience methods because one
  /// and two index insertvalue instructions are so common.
  InsertValueInst(Value *Agg, Value *Val, unsigned Idx,
                  const Twine &NameStr = "",
                  Instruction *InsertBefore = nullptr);
  InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr,
                  BasicBlock *InsertAtEnd);

  void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
            const Twine &NameStr);

protected:
  // Note: Instruction needs to be a friend here to call cloneImpl.
  friend class Instruction;

  InsertValueInst *cloneImpl() const;

public:
  // allocate space for exactly two operands
  void *operator new(size_t S) { return User::operator new(S, 2); }
  void operator delete(void *Ptr) { User::operator delete(Ptr); }

  static InsertValueInst *Create(Value *Agg, Value *Val,
                                 ArrayRef<unsigned> Idxs,
                                 const Twine &NameStr = "",
                                 Instruction *InsertBefore = nullptr) {
    return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore);
  }

  static InsertValueInst *Create(Value *Agg, Value *Val,
                                 ArrayRef<unsigned> Idxs,
                                 const Twine &NameStr,
                                 BasicBlock *InsertAtEnd) {
    return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertAtEnd);
  }

  /// Transparently provide more efficient getOperand methods.
  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);

  using idx_iterator = const unsigned*;

  inline idx_iterator idx_begin() const { return Indices.begin(); }
  inline idx_iterator idx_end()   const { return Indices.end(); }
  inline iterator_range<idx_iterator> indices() const {
    return make_range(idx_begin(), idx_end());
  }

  Value *getAggregateOperand() {
    return getOperand(0);
  }
  const Value *getAggregateOperand() const {
    return getOperand(0);
  }
  static unsigned getAggregateOperandIndex() {
    return 0U;                      // get index for modifying correct operand
  }

  Value *getInsertedValueOperand() {
    return getOperand(1);
  }
  const Value *getInsertedValueOperand() const {
    return getOperand(1);
  }
  static unsigned getInsertedValueOperandIndex() {
    return 1U;                      // get index for modifying correct operand
  }

  ArrayRef<unsigned> getIndices() const {
    return Indices;
  }

  unsigned getNumIndices() const {
    return (unsigned)Indices.size();
  }

  bool hasIndices() const {
    return true;
  }

  // Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Instruction *I) {
    return I->getOpcode() == Instruction::InsertValue;
  }
  static bool classof(const Value *V) {
    return isa<Instruction>(V) && classof(cast<Instruction>(V));
  }
};

template <>
struct OperandTraits<InsertValueInst> :
  public FixedNumOperandTraits<InsertValueInst, 2> {
};

InsertValueInst::InsertValueInst(Value *Agg,
                                 Value *Val,
                                 ArrayRef<unsigned> Idxs,
                                 const Twine &NameStr,
                                 Instruction *InsertBefore)
  : Instruction(Agg->getType(), InsertValue,
                OperandTraits<InsertValueInst>::op_begin(this),
                2, InsertBefore) {
  init(Agg, Val, Idxs, NameStr);
}

InsertValueInst::InsertValueInst(Value *Agg,
                                 Value *Val,
                                 ArrayRef<unsigned> Idxs,
                                 const Twine &NameStr,
                                 BasicBlock *InsertAtEnd)
  : Instruction(Agg->getType(), InsertValue,
                OperandTraits<InsertValueInst>::op_begin(this),
                2, InsertAtEnd) {
  init(Agg, Val, Idxs, NameStr);
}

DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueInst, Value)

//===----------------------------------------------------------------------===//
//                               PHINode Class
//===----------------------------------------------------------------------===//

// PHINode - The PHINode class is used to represent the magical mystical PHI
// node, that can not exist in nature, but can be synthesized in a computer
// scientist's overactive imagination.
//
class PHINode : public Instruction {
  /// The number of operands actually allocated.  NumOperands is
  /// the number actually in use.
  unsigned ReservedSpace;

  PHINode(const PHINode &PN);

  explicit PHINode(Type *Ty, unsigned NumReservedValues,
                   const Twine &NameStr = "",
                   Instruction *InsertBefore = nullptr)
    : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertBefore),
      ReservedSpace(NumReservedValues) {
    assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!");
    setName(NameStr);
    allocHungoffUses(ReservedSpace);
  }

  PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr,
          BasicBlock *InsertAtEnd)
    : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertAtEnd),
      ReservedSpace(NumReservedValues) {
    assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!");
    setName(NameStr);
    allocHungoffUses(ReservedSpace);
  }

protected:
  // Note: Instruction needs to be a friend here to call cloneImpl.
  friend class Instruction;

  PHINode *cloneImpl() const;

  // allocHungoffUses - this is more complicated than the generic
  // User::allocHungoffUses, because we have to allocate Uses for the incoming
  // values and pointers to the incoming blocks, all in one allocation.
  void allocHungoffUses(unsigned N) {
    User::allocHungoffUses(N, /* IsPhi */ true);
  }

public:
  /// Constructors - NumReservedValues is a hint for the number of incoming
  /// edges that this phi node will have (use 0 if you really have no idea).
  static PHINode *Create(Type *Ty, unsigned NumReservedValues,
                         const Twine &NameStr = "",
                         Instruction *InsertBefore = nullptr) {
    return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore);
  }

  static PHINode *Create(Type *Ty, unsigned NumReservedValues,
                         const Twine &NameStr, BasicBlock *InsertAtEnd) {
    return new PHINode(Ty, NumReservedValues, NameStr, InsertAtEnd);
  }

  /// Provide fast operand accessors
  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);

  // Block iterator interface. This provides access to the list of incoming
  // basic blocks, which parallels the list of incoming values.
  // Please note that we are not providing non-const iterators for blocks to
  // force all updates go through an interface function.

  using block_iterator = BasicBlock **;
  using const_block_iterator = BasicBlock * const *;

  const_block_iterator block_begin() const {
    return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace);
  }

  const_block_iterator block_end() const {
    return block_begin() + getNumOperands();
  }

  iterator_range<const_block_iterator> blocks() const {
    return make_range(block_begin(), block_end());
  }

  op_range incoming_values() { return operands(); }

  const_op_range incoming_values() const { return operands(); }

  /// Return the number of incoming edges
  ///
  unsigned getNumIncomingValues() const { return getNumOperands(); }

  /// Return incoming value number x
  ///
  Value *getIncomingValue(unsigned i) const {
    return getOperand(i);
  }
  void setIncomingValue(unsigned i, Value *V) {
    assert(V && "PHI node got a null value!");
    assert(getType() == V->getType() &&
           "All operands to PHI node must be the same type as the PHI node!");
    setOperand(i, V);
  }

  static unsigned getOperandNumForIncomingValue(unsigned i) {
    return i;
  }

  static unsigned getIncomingValueNumForOperand(unsigned i) {
    return i;
  }

  /// Return incoming basic block number @p i.
  ///
  BasicBlock *getIncomingBlock(unsigned i) const {
    return block_begin()[i];
  }

  /// Return incoming basic block corresponding
  /// to an operand of the PHI.
  ///
  BasicBlock *getIncomingBlock(const Use &U) const {
    assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?");
    return getIncomingBlock(unsigned(&U - op_begin()));
  }

  /// Return incoming basic block corresponding
  /// to value use iterator.
  ///
  BasicBlock *getIncomingBlock(Value::const_user_iterator I) const {
    return getIncomingBlock(I.getUse());
  }

  void setIncomingBlock(unsigned i, BasicBlock *BB) {
    const_cast<block_iterator>(block_begin())[i] = BB;
  }

  /// Copies the basic blocks from \p BBRange to the incoming basic block list
  /// of this PHINode, starting at \p ToIdx.
  void copyIncomingBlocks(iterator_range<const_block_iterator> BBRange,
                          uint32_t ToIdx = 0) {
    copy(BBRange, const_cast<block_iterator>(block_begin()) + ToIdx);
  }

  /// Replace every incoming basic block \p Old to basic block \p New.
  void replaceIncomingBlockWith(const BasicBlock *Old, BasicBlock *New) {
    assert(New && Old && "PHI node got a null basic block!");
    for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
      if (getIncomingBlock(Op) == Old)
        setIncomingBlock(Op, New);
  }

  /// Add an incoming value to the end of the PHI list
  ///
  void addIncoming(Value *V, BasicBlock *BB) {
    if (getNumOperands() == ReservedSpace)
      growOperands();  // Get more space!
    // Initialize some new operands.
    setNumHungOffUseOperands(getNumOperands() + 1);
    setIncomingValue(getNumOperands() - 1, V);
    setIncomingBlock(getNumOperands() - 1, BB);
  }

  /// Remove an incoming value.  This is useful if a
  /// predecessor basic block is deleted.  The value removed is returned.
  ///
  /// If the last incoming value for a PHI node is removed (and DeletePHIIfEmpty
  /// is true), the PHI node is destroyed and any uses of it are replaced with
  /// dummy values.  The only time there should be zero incoming values to a PHI
  /// node is when the block is dead, so this strategy is sound.
  ///
  Value *removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty = true);

  Value *removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true) {
    int Idx = getBasicBlockIndex(BB);
    assert(Idx >= 0 && "Invalid basic block argument to remove!");
    return removeIncomingValue(Idx, DeletePHIIfEmpty);
  }

  /// Return the first index of the specified basic
  /// block in the value list for this PHI.  Returns -1 if no instance.
  ///
  int getBasicBlockIndex(const BasicBlock *BB) const {
    for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
      if (block_begin()[i] == BB)
        return i;
    return -1;
  }

  Value *getIncomingValueForBlock(const BasicBlock *BB) const {
    int Idx = getBasicBlockIndex(BB);
    assert(Idx >= 0 && "Invalid basic block argument!");
    return getIncomingValue(Idx);
  }

  /// Set every incoming value(s) for block \p BB to \p V.
  void setIncomingValueForBlock(const BasicBlock *BB, Value *V) {
    assert(BB && "PHI node got a null basic block!");
    bool Found = false;
    for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
      if (getIncomingBlock(Op) == BB) {
        Found = true;
        setIncomingValue(Op, V);
      }
    (void)Found;
    assert(Found && "Invalid basic block argument to set!");
  }

  /// If the specified PHI node always merges together the
  /// same value, return the value, otherwise return null.
  Value *hasConstantValue() const;

  /// Whether the specified PHI node always merges
  /// together the same value, assuming undefs are equal to a unique
  /// non-undef value.
  bool hasConstantOrUndefValue() const;

  /// If the PHI node is complete which means all of its parent's predecessors
  /// have incoming value in this PHI, return true, otherwise return false.
  bool isComplete() const {
    return llvm::all_of(predecessors(getParent()),
                        [this](const BasicBlock *Pred) {
                          return getBasicBlockIndex(Pred) >= 0;
                        });
  }

  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Instruction *I) {
    return I->getOpcode() == Instruction::PHI;
  }
  static bool classof(const Value *V) {
    return isa<Instruction>(V) && classof(cast<Instruction>(V));
  }

private:
  void growOperands();
};

template <>
struct OperandTraits<PHINode> : public HungoffOperandTraits<2> {
};

DEFINE_TRANSPARENT_OPERAND_ACCESSORS(PHINode, Value)

//===----------------------------------------------------------------------===//
//                           LandingPadInst Class
//===----------------------------------------------------------------------===//

//===---------------------------------------------------------------------------
/// The landingpad instruction holds all of the information
/// necessary to generate correct exception handling. The landingpad instruction
/// cannot be moved from the top of a landing pad block, which itself is
/// accessible only from the 'unwind' edge of an invoke. This uses the
/// SubclassData field in Value to store whether or not the landingpad is a
/// cleanup.
///
class LandingPadInst : public Instruction {
  using CleanupField = BoolBitfieldElementT<0>;

  /// The number of operands actually allocated.  NumOperands is
  /// the number actually in use.
  unsigned ReservedSpace;

  LandingPadInst(const LandingPadInst &LP);

public:
  enum ClauseType { Catch, Filter };

private:
  explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
                          const Twine &NameStr, Instruction *InsertBefore);
  explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
                          const Twine &NameStr, BasicBlock *InsertAtEnd);

  // Allocate space for exactly zero operands.
  void *operator new(size_t S) { return User::operator new(S); }

  void growOperands(unsigned Size);
  void init(unsigned NumReservedValues, const Twine &NameStr);

protected:
  // Note: Instruction needs to be a friend here to call cloneImpl.
  friend class Instruction;

  LandingPadInst *cloneImpl() const;

public:
  void operator delete(void *Ptr) { User::operator delete(Ptr); }

  /// Constructors - NumReservedClauses is a hint for the number of incoming
  /// clauses that this landingpad will have (use 0 if you really have no idea).
  static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
                                const Twine &NameStr = "",
                                Instruction *InsertBefore = nullptr);
  static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
                                const Twine &NameStr, BasicBlock *InsertAtEnd);

  /// Provide fast operand accessors
  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);

  /// Return 'true' if this landingpad instruction is a
  /// cleanup. I.e., it should be run when unwinding even if its landing pad
  /// doesn't catch the exception.
  bool isCleanup() const { return getSubclassData<CleanupField>(); }

  /// Indicate that this landingpad instruction is a cleanup.
  void setCleanup(bool V) { setSubclassData<CleanupField>(V); }

  /// Add a catch or filter clause to the landing pad.
  void addClause(Constant *ClauseVal);

  /// Get the value of the clause at index Idx. Use isCatch/isFilter to
  /// determine what type of clause this is.
  Constant *getClause(unsigned Idx) const {
    return cast<Constant>(getOperandList()[Idx]);
  }

  /// Return 'true' if the clause and index Idx is a catch clause.
  bool isCatch(unsigned Idx) const {
    return !isa<ArrayType>(getOperandList()[Idx]->getType());
  }

  /// Return 'true' if the clause and index Idx is a filter clause.
  bool isFilter(unsigned Idx) const {
    return isa<ArrayType>(getOperandList()[Idx]->getType());
  }

  /// Get the number of clauses for this landing pad.
  unsigned getNumClauses() const { return getNumOperands(); }

  /// Grow the size of the operand list to accommodate the new
  /// number of clauses.
  void reserveClauses(unsigned Size) { growOperands(Size); }

  // Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Instruction *I) {
    return I->getOpcode() == Instruction::LandingPad;
  }
  static bool classof(const Value *V) {
    return isa<Instruction>(V) && classof(cast<Instruction>(V));
  }
};

template <>
struct OperandTraits<LandingPadInst> : public HungoffOperandTraits<1> {
};

DEFINE_TRANSPARENT_OPERAND_ACCESSORS(LandingPadInst, Value)

//===----------------------------------------------------------------------===//
//                               ReturnInst Class
//===----------------------------------------------------------------------===//

//===---------------------------------------------------------------------------
/// Return a value (possibly void), from a function.  Execution
/// does not continue in this function any longer.
///
class ReturnInst : public Instruction {
  ReturnInst(const ReturnInst &RI);

private:
  // ReturnInst constructors:
  // ReturnInst()                  - 'ret void' instruction
  // ReturnInst(    null)          - 'ret void' instruction
  // ReturnInst(Value* X)          - 'ret X'    instruction
  // ReturnInst(    null, Inst *I) - 'ret void' instruction, insert before I
  // ReturnInst(Value* X, Inst *I) - 'ret X'    instruction, insert before I
  // ReturnInst(    null, BB *B)   - 'ret void' instruction, insert @ end of B
  // ReturnInst(Value* X, BB *B)   - 'ret X'    instruction, insert @ end of B
  //
  // NOTE: If the Value* passed is of type void then the constructor behaves as
  // if it was passed NULL.
  explicit ReturnInst(LLVMContext &C, Value *retVal = nullptr,
                      Instruction *InsertBefore = nullptr);
  ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd);
  explicit ReturnInst(LLVMContext &C, BasicBlock *InsertAtEnd);

protected:
  // Note: Instruction needs to be a friend here to call cloneImpl.
  friend class Instruction;

  ReturnInst *cloneImpl() const;

public:
  static ReturnInst* Create(LLVMContext &C, Value *retVal = nullptr,
                            Instruction *InsertBefore = nullptr) {
    return new(!!retVal) ReturnInst(C, retVal, InsertBefore);
  }

  static ReturnInst* Create(LLVMContext &C, Value *retVal,
                            BasicBlock *InsertAtEnd) {
    return new(!!retVal) ReturnInst(C, retVal, InsertAtEnd);
  }

  static ReturnInst* Create(LLVMContext &C, BasicBlock *InsertAtEnd) {
    return new(0) ReturnInst(C, InsertAtEnd);
  }

  /// Provide fast operand accessors
  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);

  /// Convenience accessor. Returns null if there is no return value.
  Value *getReturnValue() const {
    return getNumOperands() != 0 ? getOperand(0) : nullptr;
  }

  unsigned getNumSuccessors() const { return 0; }

  // Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Instruction *I) {
    return (I->getOpcode() == Instruction::Ret);
  }
  static bool classof(const Value *V) {
    return isa<Instruction>(V) && classof(cast<Instruction>(V));
  }

private:
  BasicBlock *getSuccessor(unsigned idx) const {
    llvm_unreachable("ReturnInst has no successors!");
  }

  void setSuccessor(unsigned idx, BasicBlock *B) {
    llvm_unreachable("ReturnInst has no successors!");
  }
};

template <>
struct OperandTraits<ReturnInst> : public VariadicOperandTraits<ReturnInst> {
};

DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ReturnInst, Value)

//===----------------------------------------------------------------------===//
//                               BranchInst Class
//===----------------------------------------------------------------------===//

//===---------------------------------------------------------------------------
/// Conditional or Unconditional Branch instruction.
///
class BranchInst : public Instruction {
  /// Ops list - Branches are strange.  The operands are ordered:
  ///  [Cond, FalseDest,] TrueDest.  This makes some accessors faster because
  /// they don't have to check for cond/uncond branchness. These are mostly
  /// accessed relative from op_end().
  BranchInst(const BranchInst &BI);
  // BranchInst constructors (where {B, T, F} are blocks, and C is a condition):
  // BranchInst(BB *B)                           - 'br B'
  // BranchInst(BB* T, BB *F, Value *C)          - 'br C, T, F'
  // BranchInst(BB* B, Inst *I)                  - 'br B'        insert before I
  // BranchInst(BB* T, BB *F, Value *C, Inst *I) - 'br C, T, F', insert before I
  // BranchInst(BB* B, BB *I)                    - 'br B'        insert at end
  // BranchInst(BB* T, BB *F, Value *C, BB *I)   - 'br C, T, F', insert at end
  explicit BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore = nullptr);
  BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
             Instruction *InsertBefore = nullptr);
  BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd);
  BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
             BasicBlock *InsertAtEnd);

  void AssertOK();

protected:
  // Note: Instruction needs to be a friend here to call cloneImpl.
  friend class Instruction;

  BranchInst *cloneImpl() const;

public:
  /// Iterator type that casts an operand to a basic block.
  ///
  /// This only makes sense because the successors are stored as adjacent
  /// operands for branch instructions.
  struct succ_op_iterator
      : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
                              std::random_access_iterator_tag, BasicBlock *,
                              ptrdiff_t, BasicBlock *, BasicBlock *> {
    explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {}

    BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
    BasicBlock *operator->() const { return operator*(); }
  };

  /// The const version of `succ_op_iterator`.
  struct const_succ_op_iterator
      : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
                              std::random_access_iterator_tag,
                              const BasicBlock *, ptrdiff_t, const BasicBlock *,
                              const BasicBlock *> {
    explicit const_succ_op_iterator(const_value_op_iterator I)
        : iterator_adaptor_base(I) {}

    const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
    const BasicBlock *operator->() const { return operator*(); }
  };

  static BranchInst *Create(BasicBlock *IfTrue,
                            Instruction *InsertBefore = nullptr) {
    return new(1) BranchInst(IfTrue, InsertBefore);
  }

  static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
                            Value *Cond, Instruction *InsertBefore = nullptr) {
    return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore);
  }

  static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) {
    return new(1) BranchInst(IfTrue, InsertAtEnd);
  }

  static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
                            Value *Cond, BasicBlock *InsertAtEnd) {
    return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertAtEnd);
  }

  /// Transparently provide more efficient getOperand methods.
  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);

  bool isUnconditional() const { return getNumOperands() == 1; }
  bool isConditional()   const { return getNumOperands() == 3; }

  Value *getCondition() const {
    assert(isConditional() && "Cannot get condition of an uncond branch!");
    return Op<-3>();
  }

  void setCondition(Value *V) {
    assert(isConditional() && "Cannot set condition of unconditional branch!");
    Op<-3>() = V;
  }

  unsigned getNumSuccessors() const { return 1+isConditional(); }

  BasicBlock *getSuccessor(unsigned i) const {
    assert(i < getNumSuccessors() && "Successor # out of range for Branch!");
    return cast_or_null<BasicBlock>((&Op<-1>() - i)->get());
  }

  void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
    assert(idx < getNumSuccessors() && "Successor # out of range for Branch!");
    *(&Op<-1>() - idx) = NewSucc;
  }

  /// Swap the successors of this branch instruction.
  ///
  /// Swaps the successors of the branch instruction. This also swaps any
  /// branch weight metadata associated with the instruction so that it
  /// continues to map correctly to each operand.
  void swapSuccessors();

  iterator_range<succ_op_iterator> successors() {
    return make_range(
        succ_op_iterator(std::next(value_op_begin(), isConditional() ? 1 : 0)),
        succ_op_iterator(value_op_end()));
  }

  iterator_range<const_succ_op_iterator> successors() const {
    return make_range(const_succ_op_iterator(
                          std::next(value_op_begin(), isConditional() ? 1 : 0)),
                      const_succ_op_iterator(value_op_end()));
  }

  // Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Instruction *I) {
    return (I->getOpcode() == Instruction::Br);
  }
  static bool classof(const Value *V) {
    return isa<Instruction>(V) && classof(cast<Instruction>(V));
  }
};

template <>
struct OperandTraits<BranchInst> : public VariadicOperandTraits<BranchInst, 1> {
};

DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BranchInst, Value)

//===----------------------------------------------------------------------===//
//                               SwitchInst Class
//===----------------------------------------------------------------------===//

//===---------------------------------------------------------------------------
/// Multiway switch
///
class SwitchInst : public Instruction {
  unsigned ReservedSpace;

  // Operand[0]    = Value to switch on
  // Operand[1]    = Default basic block destination
  // Operand[2n  ] = Value to match
  // Operand[2n+1] = BasicBlock to go to on match
  SwitchInst(const SwitchInst &SI);

  /// Create a new switch instruction, specifying a value to switch on and a
  /// default destination. The number of additional cases can be specified here
  /// to make memory allocation more efficient. This constructor can also
  /// auto-insert before another instruction.
  SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
             Instruction *InsertBefore);

  /// Create a new switch instruction, specifying a value to switch on and a
  /// default destination. The number of additional cases can be specified here
  /// to make memory allocation more efficient. This constructor also
  /// auto-inserts at the end of the specified BasicBlock.
  SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
             BasicBlock *InsertAtEnd);

  // allocate space for exactly zero operands
  void *operator new(size_t S) { return User::operator new(S); }

  void init(Value *Value, BasicBlock *Default, unsigned NumReserved);
  void growOperands();

protected:
  // Note: Instruction needs to be a friend here to call cloneImpl.
  friend class Instruction;

  SwitchInst *cloneImpl() const;

public:
  void operator delete(void *Ptr) { User::operator delete(Ptr); }

  // -2
  static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1);

  template <typename CaseHandleT> class CaseIteratorImpl;

  /// A handle to a particular switch case. It exposes a convenient interface
  /// to both the case value and the successor block.
  ///
  /// We define this as a template and instantiate it to form both a const and
  /// non-const handle.
  template <typename SwitchInstT, typename ConstantIntT, typename BasicBlockT>
  class CaseHandleImpl {
    // Directly befriend both const and non-const iterators.
    friend class SwitchInst::CaseIteratorImpl<
        CaseHandleImpl<SwitchInstT, ConstantIntT, BasicBlockT>>;

  protected:
    // Expose the switch type we're parameterized with to the iterator.
    using SwitchInstType = SwitchInstT;

    SwitchInstT *SI;
    ptrdiff_t Index;

    CaseHandleImpl() = default;
    CaseHandleImpl(SwitchInstT *SI, ptrdiff_t Index) : SI(SI), Index(Index) {}

  public:
    /// Resolves case value for current case.
    ConstantIntT *getCaseValue() const {
      assert((unsigned)Index < SI->getNumCases() &&
             "Index out the number of cases.");
      return reinterpret_cast<ConstantIntT *>(SI->getOperand(2 + Index * 2));
    }

    /// Resolves successor for current case.
    BasicBlockT *getCaseSuccessor() const {
      assert(((unsigned)Index < SI->getNumCases() ||
              (unsigned)Index == DefaultPseudoIndex) &&
             "Index out the number of cases.");
      return SI->getSuccessor(getSuccessorIndex());
    }

    /// Returns number of current case.
    unsigned getCaseIndex() const { return Index; }

    /// Returns successor index for current case successor.
    unsigned getSuccessorIndex() const {
      assert(((unsigned)Index == DefaultPseudoIndex ||
              (unsigned)Index < SI->getNumCases()) &&
             "Index out the number of cases.");
      return (unsigned)Index != DefaultPseudoIndex ? Index + 1 : 0;
    }

    bool operator==(const CaseHandleImpl &RHS) const {
      assert(SI == RHS.SI && "Incompatible operators.");
      return Index == RHS.Index;
    }
  };

  using ConstCaseHandle =
      CaseHandleImpl<const SwitchInst, const ConstantInt, const BasicBlock>;

  class CaseHandle
      : public CaseHandleImpl<SwitchInst, ConstantInt, BasicBlock> {
    friend class SwitchInst::CaseIteratorImpl<CaseHandle>;

  public:
    CaseHandle(SwitchInst *SI, ptrdiff_t Index) : CaseHandleImpl(SI, Index) {}

    /// Sets the new value for current case.
    void setValue(ConstantInt *V) const {
      assert((unsigned)Index < SI->getNumCases() &&
             "Index out the number of cases.");
      SI->setOperand(2 + Index*2, reinterpret_cast<Value*>(V));
    }

    /// Sets the new successor for current case.
    void setSuccessor(BasicBlock *S) const {
      SI->setSuccessor(getSuccessorIndex(), S);
    }
  };

  template <typename CaseHandleT>
  class CaseIteratorImpl
      : public iterator_facade_base<CaseIteratorImpl<CaseHandleT>,
                                    std::random_access_iterator_tag,
                                    const CaseHandleT> {
    using SwitchInstT = typename CaseHandleT::SwitchInstType;

    CaseHandleT Case;

  public:
    /// Default constructed iterator is in an invalid state until assigned to
    /// a case for a particular switch.
    CaseIteratorImpl() = default;

    /// Initializes case iterator for given SwitchInst and for given
    /// case number.
    CaseIteratorImpl(SwitchInstT *SI, unsigned CaseNum) : Case(SI, CaseNum) {}

    /// Initializes case iterator for given SwitchInst and for given
    /// successor index.
    static CaseIteratorImpl fromSuccessorIndex(SwitchInstT *SI,
                                               unsigned SuccessorIndex) {
      assert(SuccessorIndex < SI->getNumSuccessors() &&
             "Successor index # out of range!");
      return SuccessorIndex != 0 ? CaseIteratorImpl(SI, SuccessorIndex - 1)
                                 : CaseIteratorImpl(SI, DefaultPseudoIndex);
    }

    /// Support converting to the const variant. This will be a no-op for const
    /// variant.
    operator CaseIteratorImpl<ConstCaseHandle>() const {
      return CaseIteratorImpl<ConstCaseHandle>(Case.SI, Case.Index);
    }

    CaseIteratorImpl &operator+=(ptrdiff_t N) {
      // Check index correctness after addition.
      // Note: Index == getNumCases() means end().
      assert(Case.Index + N >= 0 &&
             (unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
             "Case.Index out the number of cases.");
      Case.Index += N;
      return *this;
    }
    CaseIteratorImpl &operator-=(ptrdiff_t N) {
      // Check index correctness after subtraction.
      // Note: Case.Index == getNumCases() means end().
      assert(Case.Index - N >= 0 &&
             (unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
             "Case.Index out the number of cases.");
      Case.Index -= N;
      return *this;
    }
    ptrdiff_t operator-(const CaseIteratorImpl &RHS) const {
      assert(Case.SI == RHS.Case.SI && "Incompatible operators.");
      return Case.Index - RHS.Case.Index;
    }
    bool operator==(const CaseIteratorImpl &RHS) const {
      return Case == RHS.Case;
    }
    bool operator<(const CaseIteratorImpl &RHS) const {
      assert(Case.SI == RHS.Case.SI && "Incompatible operators.");
      return Case.Index < RHS.Case.Index;
    }
    const CaseHandleT &operator*() const { return Case; }
  };

  using CaseIt = CaseIteratorImpl<CaseHandle>;
  using ConstCaseIt = CaseIteratorImpl<ConstCaseHandle>;

  static SwitchInst *Create(Value *Value, BasicBlock *Default,
                            unsigned NumCases,
                            Instruction *InsertBefore = nullptr) {
    return new SwitchInst(Value, Default, NumCases, InsertBefore);
  }

  static SwitchInst *Create(Value *Value, BasicBlock *Default,
                            unsigned NumCases, BasicBlock *InsertAtEnd) {
    return new SwitchInst(Value, Default, NumCases, InsertAtEnd);
  }

  /// Provide fast operand accessors
  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);

  // Accessor Methods for Switch stmt
  Value *getCondition() const { return getOperand(0); }
  void setCondition(Value *V) { setOperand(0, V); }

  BasicBlock *getDefaultDest() const {
    return cast<BasicBlock>(getOperand(1));
  }

  void setDefaultDest(BasicBlock *DefaultCase) {
    setOperand(1, reinterpret_cast<Value*>(DefaultCase));
  }

  /// Return the number of 'cases' in this switch instruction, excluding the
  /// default case.
  unsigned getNumCases() const {
    return getNumOperands()/2 - 1;
  }

  /// Returns a read/write iterator that points to the first case in the
  /// SwitchInst.
  CaseIt case_begin() {
    return CaseIt(this, 0);
  }

  /// Returns a read-only iterator that points to the first case in the
  /// SwitchInst.
  ConstCaseIt case_begin() const {
    return ConstCaseIt(this, 0);
  }

  /// Returns a read/write iterator that points one past the last in the
  /// SwitchInst.
  CaseIt case_end() {
    return CaseIt(this, getNumCases());
  }

  /// Returns a read-only iterator that points one past the last in the
  /// SwitchInst.
  ConstCaseIt case_end() const {
    return ConstCaseIt(this, getNumCases());
  }

  /// Iteration adapter for range-for loops.
  iterator_range<CaseIt> cases() {
    return make_range(case_begin(), case_end());
  }

  /// Constant iteration adapter for range-for loops.
  iterator_range<ConstCaseIt> cases() const {
    return make_range(case_begin(), case_end());
  }

  /// Returns an iterator that points to the default case.
  /// Note: this iterator allows to resolve successor only. Attempt
  /// to resolve case value causes an assertion.
  /// Also note, that increment and decrement also causes an assertion and
  /// makes iterator invalid.
  CaseIt case_default() {
    return CaseIt(this, DefaultPseudoIndex);
  }
  ConstCaseIt case_default() const {
    return ConstCaseIt(this, DefaultPseudoIndex);
  }

  /// Search all of the case values for the specified constant. If it is
  /// explicitly handled, return the case iterator of it, otherwise return
  /// default case iterator to indicate that it is handled by the default
  /// handler.
  CaseIt findCaseValue(const ConstantInt *C) {
    return CaseIt(
        this,
        const_cast<const SwitchInst *>(this)->findCaseValue(C)->getCaseIndex());
  }
  ConstCaseIt findCaseValue(const ConstantInt *C) const {
    ConstCaseIt I = llvm::find_if(cases(), [C](const ConstCaseHandle &Case) {
      return Case.getCaseValue() == C;
    });
    if (I != case_end())
      return I;

    return case_default();
  }

  /// Finds the unique case value for a given successor. Returns null if the
  /// successor is not found, not unique, or is the default case.
  ConstantInt *findCaseDest(BasicBlock *BB) {
    if (BB == getDefaultDest())
      return nullptr;

    ConstantInt *CI = nullptr;
    for (auto Case : cases()) {
      if (Case.getCaseSuccessor() != BB)
        continue;

      if (CI)
        return nullptr; // Multiple cases lead to BB.

      CI = Case.getCaseValue();
    }

    return CI;
  }

  /// Add an entry to the switch instruction.
  /// Note:
  /// This action invalidates case_end(). Old case_end() iterator will
  /// point to the added case.
  void addCase(ConstantInt *OnVal, BasicBlock *Dest);

  /// This method removes the specified case and its successor from the switch
  /// instruction. Note that this operation may reorder the remaining cases at
  /// index idx and above.
  /// Note:
  /// This action invalidates iterators for all cases following the one removed,
  /// including the case_end() iterator. It returns an iterator for the next
  /// case.
  CaseIt removeCase(CaseIt I);

  unsigned getNumSuccessors() const { return getNumOperands()/2; }
  BasicBlock *getSuccessor(unsigned idx) const {
    assert(idx < getNumSuccessors() &&"Successor idx out of range for switch!");
    return cast<BasicBlock>(getOperand(idx*2+1));
  }
  void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
    assert(idx < getNumSuccessors() && "Successor # out of range for switch!");
    setOperand(idx * 2 + 1, NewSucc);
  }

  // Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Instruction *I) {
    return I->getOpcode() == Instruction::Switch;
  }
  static bool classof(const Value *V) {
    return isa<Instruction>(V) && classof(cast<Instruction>(V));
  }
};

/// A wrapper class to simplify modification of SwitchInst cases along with
/// their prof branch_weights metadata.
class SwitchInstProfUpdateWrapper {
  SwitchInst &SI;
  std::optional<SmallVector<uint32_t, 8>> Weights;
  bool Changed = false;

protected:
  MDNode *buildProfBranchWeightsMD();

  void init();

public:
  using CaseWeightOpt = std::optional<uint32_t>;
  SwitchInst *operator->() { return &SI; }
  SwitchInst &operator*() { return SI; }
  operator SwitchInst *() { return &SI; }

  SwitchInstProfUpdateWrapper(SwitchInst &SI) : SI(SI) { init(); }

  ~SwitchInstProfUpdateWrapper() {
    if (Changed)
      SI.setMetadata(LLVMContext::MD_prof, buildProfBranchWeightsMD());
  }

  /// Delegate the call to the underlying SwitchInst::removeCase() and remove
  /// correspondent branch weight.
  SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I);

  /// Delegate the call to the underlying SwitchInst::addCase() and set the
  /// specified branch weight for the added case.
  void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W);

  /// Delegate the call to the underlying SwitchInst::eraseFromParent() and mark
  /// this object to not touch the underlying SwitchInst in destructor.
  SymbolTableList<Instruction>::iterator eraseFromParent();

  void setSuccessorWeight(unsigned idx, CaseWeightOpt W);
  CaseWeightOpt getSuccessorWeight(unsigned idx);

  static CaseWeightOpt getSuccessorWeight(const SwitchInst &SI, unsigned idx);
};

template <>
struct OperandTraits<SwitchInst> : public HungoffOperandTraits<2> {
};

DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SwitchInst, Value)

//===----------------------------------------------------------------------===//
//                             IndirectBrInst Class
//===----------------------------------------------------------------------===//

//===---------------------------------------------------------------------------
/// Indirect Branch Instruction.
///
class IndirectBrInst : public Instruction {
  unsigned ReservedSpace;

  // Operand[0]   = Address to jump to
  // Operand[n+1] = n-th destination
  IndirectBrInst(const IndirectBrInst &IBI);

  /// Create a new indirectbr instruction, specifying an
  /// Address to jump to.  The number of expected destinations can be specified
  /// here to make memory allocation more efficient.  This constructor can also
  /// autoinsert before another instruction.
  IndirectBrInst(Value *Address, unsigned NumDests, Instruction *InsertBefore);

  /// Create a new indirectbr instruction, specifying an
  /// Address to jump to.  The number of expected destinations can be specified
  /// here to make memory allocation more efficient.  This constructor also
  /// autoinserts at the end of the specified BasicBlock.
  IndirectBrInst(Value *Address, unsigned NumDests, BasicBlock *InsertAtEnd);

  // allocate space for exactly zero operands
  void *operator new(size_t S) { return User::operator new(S); }

  void init(Value *Address, unsigned NumDests);
  void growOperands();

protected:
  // Note: Instruction needs to be a friend here to call cloneImpl.
  friend class Instruction;

  IndirectBrInst *cloneImpl() const;

public:
  void operator delete(void *Ptr) { User::operator delete(Ptr); }

  /// Iterator type that casts an operand to a basic block.
  ///
  /// This only makes sense because the successors are stored as adjacent
  /// operands for indirectbr instructions.
  struct succ_op_iterator
      : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
                              std::random_access_iterator_tag, BasicBlock *,
                              ptrdiff_t, BasicBlock *, BasicBlock *> {
    explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {}

    BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
    BasicBlock *operator->() const { return operator*(); }
  };

  /// The const version of `succ_op_iterator`.
  struct const_succ_op_iterator
      : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
                              std::random_access_iterator_tag,
                              const BasicBlock *, ptrdiff_t, const BasicBlock *,
                              const BasicBlock *> {
    explicit const_succ_op_iterator(const_value_op_iterator I)
        : iterator_adaptor_base(I) {}

    const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
    const BasicBlock *operator->() const { return operator*(); }
  };

  static IndirectBrInst *Create(Value *Address, unsigned NumDests,
                                Instruction *InsertBefore = nullptr) {
    return new IndirectBrInst(Address, NumDests, InsertBefore);
  }

  static IndirectBrInst *Create(Value *Address, unsigned NumDests,
                                BasicBlock *InsertAtEnd) {
    return new IndirectBrInst(Address, NumDests, InsertAtEnd);
  }

  /// Provide fast operand accessors.
  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);

  // Accessor Methods for IndirectBrInst instruction.
  Value *getAddress() { return getOperand(0); }
  const Value *getAddress() const { return getOperand(0); }
  void setAddress(Value *V) { setOperand(0, V); }

  /// return the number of possible destinations in this
  /// indirectbr instruction.
  unsigned getNumDestinations() const { return getNumOperands()-1; }

  /// Return the specified destination.
  BasicBlock *getDestination(unsigned i) { return getSuccessor(i); }
  const BasicBlock *getDestination(unsigned i) const { return getSuccessor(i); }

  /// Add a destination.
  ///
  void addDestination(BasicBlock *Dest);

  /// This method removes the specified successor from the
  /// indirectbr instruction.
  void removeDestination(unsigned i);

  unsigned getNumSuccessors() const { return getNumOperands()-1; }
  BasicBlock *getSuccessor(unsigned i) const {
    return cast<BasicBlock>(getOperand(i+1));
  }
  void setSuccessor(unsigned i, BasicBlock *NewSucc) {
    setOperand(i + 1, NewSucc);
  }

  iterator_range<succ_op_iterator> successors() {
    return make_range(succ_op_iterator(std::next(value_op_begin())),
                      succ_op_iterator(value_op_end()));
  }

  iterator_range<const_succ_op_iterator> successors() const {
    return make_range(const_succ_op_iterator(std::next(value_op_begin())),
                      const_succ_op_iterator(value_op_end()));
  }

  // Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Instruction *I) {
    return I->getOpcode() == Instruction::IndirectBr;
  }
  static bool classof(const Value *V) {
    return isa<Instruction>(V) && classof(cast<Instruction>(V));
  }
};

template <>
struct OperandTraits<IndirectBrInst> : public HungoffOperandTraits<1> {
};

DEFINE_TRANSPARENT_OPERAND_ACCESSORS(IndirectBrInst, Value)

//===----------------------------------------------------------------------===//
//                               InvokeInst Class
//===----------------------------------------------------------------------===//

/// Invoke instruction.  The SubclassData field is used to hold the
/// calling convention of the call.
///
class InvokeInst : public CallBase {
  /// The number of operands for this call beyond the called function,
  /// arguments, and operand bundles.
  static constexpr int NumExtraOperands = 2;

  /// The index from the end of the operand array to the normal destination.
  static constexpr int NormalDestOpEndIdx = -3;

  /// The index from the end of the operand array to the unwind destination.
  static constexpr int UnwindDestOpEndIdx = -2;

  InvokeInst(const InvokeInst &BI);

  /// Construct an InvokeInst given a range of arguments.
  ///
  /// Construct an InvokeInst from a range of arguments
  inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
                    BasicBlock *IfException, ArrayRef<Value *> Args,
                    ArrayRef<OperandBundleDef> Bundles, int NumOperands,
                    const Twine &NameStr, Instruction *InsertBefore);

  inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
                    BasicBlock *IfException, ArrayRef<Value *> Args,
                    ArrayRef<OperandBundleDef> Bundles, int NumOperands,
                    const Twine &NameStr, BasicBlock *InsertAtEnd);

  void init(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
            BasicBlock *IfException, ArrayRef<Value *> Args,
            ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);

  /// Compute the number of operands to allocate.
  static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
    // We need one operand for the called function, plus our extra operands and
    // the input operand counts provided.
    return 1 + NumExtraOperands + NumArgs + NumBundleInputs;
  }

protected:
  // Note: Instruction needs to be a friend here to call cloneImpl.
  friend class Instruction;

  InvokeInst *cloneImpl() const;

public:
  static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
                            BasicBlock *IfException, ArrayRef<Value *> Args,
                            const Twine &NameStr,
                            Instruction *InsertBefore = nullptr) {
    int NumOperands = ComputeNumOperands(Args.size());
    return new (NumOperands)
        InvokeInst(Ty, Func, IfNormal, IfException, Args, std::nullopt,
                   NumOperands, NameStr, InsertBefore);
  }

  static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
                            BasicBlock *IfException, ArrayRef<Value *> Args,
                            ArrayRef<OperandBundleDef> Bundles = std::nullopt,
                            const Twine &NameStr = "",
                            Instruction *InsertBefore = nullptr) {
    int NumOperands =
        ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
    unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);

    return new (NumOperands, DescriptorBytes)
        InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands,
                   NameStr, InsertBefore);
  }

  static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
                            BasicBlock *IfException, ArrayRef<Value *> Args,
                            const Twine &NameStr, BasicBlock *InsertAtEnd) {
    int NumOperands = ComputeNumOperands(Args.size());
    return new (NumOperands)
        InvokeInst(Ty, Func, IfNormal, IfException, Args, std::nullopt,
                   NumOperands, NameStr, InsertAtEnd);
  }

  static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
                            BasicBlock *IfException, ArrayRef<Value *> Args,
                            ArrayRef<OperandBundleDef> Bundles,
                            const Twine &NameStr, BasicBlock *InsertAtEnd) {
    int NumOperands =
        ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
    unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);

    return new (NumOperands, DescriptorBytes)
        InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands,
                   NameStr, InsertAtEnd);
  }

  static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
                            BasicBlock *IfException, ArrayRef<Value *> Args,
                            const Twine &NameStr,
                            Instruction *InsertBefore = nullptr) {
    return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
                  IfException, Args, std::nullopt, NameStr, InsertBefore);
  }

  static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
                            BasicBlock *IfException, ArrayRef<Value *> Args,
                            ArrayRef<OperandBundleDef> Bundles = std::nullopt,
                            const Twine &NameStr = "",
                            Instruction *InsertBefore = nullptr) {
    return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
                  IfException, Args, Bundles, NameStr, InsertBefore);
  }

  static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
                            BasicBlock *IfException, ArrayRef<Value *> Args,
                            const Twine &NameStr, BasicBlock *InsertAtEnd) {
    return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
                  IfException, Args, NameStr, InsertAtEnd);
  }

  static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
                            BasicBlock *IfException, ArrayRef<Value *> Args,
                            ArrayRef<OperandBundleDef> Bundles,
                            const Twine &NameStr, BasicBlock *InsertAtEnd) {
    return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
                  IfException, Args, Bundles, NameStr, InsertAtEnd);
  }

  /// Create a clone of \p II with a different set of operand bundles and
  /// insert it before \p InsertPt.
  ///
  /// The returned invoke instruction is identical to \p II in every way except
  /// that the operand bundles for the new instruction are set to the operand
  /// bundles in \p Bundles.
  static InvokeInst *Create(InvokeInst *II, ArrayRef<OperandBundleDef> Bundles,
                            Instruction *InsertPt = nullptr);

  // get*Dest - Return the destination basic blocks...
  BasicBlock *getNormalDest() const {
    return cast<BasicBlock>(Op<NormalDestOpEndIdx>());
  }
  BasicBlock *getUnwindDest() const {
    return cast<BasicBlock>(Op<UnwindDestOpEndIdx>());
  }
  void setNormalDest(BasicBlock *B) {
    Op<NormalDestOpEndIdx>() = reinterpret_cast<Value *>(B);
  }
  void setUnwindDest(BasicBlock *B) {
    Op<UnwindDestOpEndIdx>() = reinterpret_cast<Value *>(B);
  }

  /// Get the landingpad instruction from the landing pad
  /// block (the unwind destination).
  LandingPadInst *getLandingPadInst() const;

  BasicBlock *getSuccessor(unsigned i) const {
    assert(i < 2 && "Successor # out of range for invoke!");
    return i == 0 ? getNormalDest() : getUnwindDest();
  }

  void setSuccessor(unsigned i, BasicBlock *NewSucc) {
    assert(i < 2 && "Successor # out of range for invoke!");
    if (i == 0)
      setNormalDest(NewSucc);
    else
      setUnwindDest(NewSucc);
  }

  unsigned getNumSuccessors() const { return 2; }

  // Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Instruction *I) {
    return (I->getOpcode() == Instruction::Invoke);
  }
  static bool classof(const Value *V) {
    return isa<Instruction>(V) && classof(cast<Instruction>(V));
  }

private:
  // Shadow Instruction::setInstructionSubclassData with a private forwarding
  // method so that subclasses cannot accidentally use it.
  template <typename Bitfield>
  void setSubclassData(typename Bitfield::Type Value) {
    Instruction::setSubclassData<Bitfield>(Value);
  }
};

InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
                       BasicBlock *IfException, ArrayRef<Value *> Args,
                       ArrayRef<OperandBundleDef> Bundles, int NumOperands,
                       const Twine &NameStr, Instruction *InsertBefore)
    : CallBase(Ty->getReturnType(), Instruction::Invoke,
               OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
               InsertBefore) {
  init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
}

InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
                       BasicBlock *IfException, ArrayRef<Value *> Args,
                       ArrayRef<OperandBundleDef> Bundles, int NumOperands,
                       const Twine &NameStr, BasicBlock *InsertAtEnd)
    : CallBase(Ty->getReturnType(), Instruction::Invoke,
               OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
               InsertAtEnd) {
  init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
}

//===----------------------------------------------------------------------===//
//                              CallBrInst Class
//===----------------------------------------------------------------------===//

/// CallBr instruction, tracking function calls that may not return control but
/// instead transfer it to a third location. The SubclassData field is used to
/// hold the calling convention of the call.
///
class CallBrInst : public CallBase {

  unsigned NumIndirectDests;

  CallBrInst(const CallBrInst &BI);

  /// Construct a CallBrInst given a range of arguments.
  ///
  /// Construct a CallBrInst from a range of arguments
  inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
                    ArrayRef<BasicBlock *> IndirectDests,
                    ArrayRef<Value *> Args,
                    ArrayRef<OperandBundleDef> Bundles, int NumOperands,
                    const Twine &NameStr, Instruction *InsertBefore);

  inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
                    ArrayRef<BasicBlock *> IndirectDests,
                    ArrayRef<Value *> Args,
                    ArrayRef<OperandBundleDef> Bundles, int NumOperands,
                    const Twine &NameStr, BasicBlock *InsertAtEnd);

  void init(FunctionType *FTy, Value *Func, BasicBlock *DefaultDest,
            ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args,
            ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);

  /// Compute the number of operands to allocate.
  static int ComputeNumOperands(int NumArgs, int NumIndirectDests,
                                int NumBundleInputs = 0) {
    // We need one operand for the called function, plus our extra operands and
    // the input operand counts provided.
    return 2 + NumIndirectDests + NumArgs + NumBundleInputs;
  }

protected:
  // Note: Instruction needs to be a friend here to call cloneImpl.
  friend class Instruction;

  CallBrInst *cloneImpl() const;

public:
  static CallBrInst *Create(FunctionType *Ty, Value *Func,
                            BasicBlock *DefaultDest,
                            ArrayRef<BasicBlock *> IndirectDests,
                            ArrayRef<Value *> Args, const Twine &NameStr,
                            Instruction *InsertBefore = nullptr) {
    int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size());
    return new (NumOperands)
        CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, std::nullopt,
                   NumOperands, NameStr, InsertBefore);
  }

  static CallBrInst *
  Create(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
         ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args,
         ArrayRef<OperandBundleDef> Bundles = std::nullopt,
         const Twine &NameStr = "", Instruction *InsertBefore = nullptr) {
    int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(),
                                         CountBundleInputs(Bundles));
    unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);

    return new (NumOperands, DescriptorBytes)
        CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles,
                   NumOperands, NameStr, InsertBefore);
  }

  static CallBrInst *Create(FunctionType *Ty, Value *Func,
                            BasicBlock *DefaultDest,
                            ArrayRef<BasicBlock *> IndirectDests,
                            ArrayRef<Value *> Args, const Twine &NameStr,
                            BasicBlock *InsertAtEnd) {
    int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size());
    return new (NumOperands)
        CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, std::nullopt,
                   NumOperands, NameStr, InsertAtEnd);
  }

  static CallBrInst *Create(FunctionType *Ty, Value *Func,
                            BasicBlock *DefaultDest,
                            ArrayRef<BasicBlock *> IndirectDests,
                            ArrayRef<Value *> Args,
                            ArrayRef<OperandBundleDef> Bundles,
                            const Twine &NameStr, BasicBlock *InsertAtEnd) {
    int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(),
                                         CountBundleInputs(Bundles));
    unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);

    return new (NumOperands, DescriptorBytes)
        CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles,
                   NumOperands, NameStr, InsertAtEnd);
  }

  static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
                            ArrayRef<BasicBlock *> IndirectDests,
                            ArrayRef<Value *> Args, const Twine &NameStr,
                            Instruction *InsertBefore = nullptr) {
    return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
                  IndirectDests, Args, NameStr, InsertBefore);
  }

  static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
                            ArrayRef<BasicBlock *> IndirectDests,
                            ArrayRef<Value *> Args,
                            ArrayRef<OperandBundleDef> Bundles = std::nullopt,
                            const Twine &NameStr = "",
                            Instruction *InsertBefore = nullptr) {
    return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
                  IndirectDests, Args, Bundles, NameStr, InsertBefore);
  }

  static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
                            ArrayRef<BasicBlock *> IndirectDests,
                            ArrayRef<Value *> Args, const Twine &NameStr,
                            BasicBlock *InsertAtEnd) {
    return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
                  IndirectDests, Args, NameStr, InsertAtEnd);
  }

  static CallBrInst *Create(FunctionCallee Func,
                            BasicBlock *DefaultDest,
                            ArrayRef<BasicBlock *> IndirectDests,
                            ArrayRef<Value *> Args,
                            ArrayRef<OperandBundleDef> Bundles,
                            const Twine &NameStr, BasicBlock *InsertAtEnd) {
    return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
                  IndirectDests, Args, Bundles, NameStr, InsertAtEnd);
  }

  /// Create a clone of \p CBI with a different set of operand bundles and
  /// insert it before \p InsertPt.
  ///
  /// The returned callbr instruction is identical to \p CBI in every way
  /// except that the operand bundles for the new instruction are set to the
  /// operand bundles in \p Bundles.
  static CallBrInst *Create(CallBrInst *CBI,
                            ArrayRef<OperandBundleDef> Bundles,
                            Instruction *InsertPt = nullptr);

  /// Return the number of callbr indirect dest labels.
  ///
  unsigned getNumIndirectDests() const { return NumIndirectDests; }

  /// getIndirectDestLabel - Return the i-th indirect dest label.
  ///
  Value *getIndirectDestLabel(unsigned i) const {
    assert(i < getNumIndirectDests() && "Out of bounds!");
    return getOperand(i + arg_size() + getNumTotalBundleOperands() + 1);
  }

  Value *getIndirectDestLabelUse(unsigned i) const {
    assert(i < getNumIndirectDests() && "Out of bounds!");
    return getOperandUse(i + arg_size() + getNumTotalBundleOperands() + 1);
  }

  // Return the destination basic blocks...
  BasicBlock *getDefaultDest() const {
    return cast<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() - 1));
  }
  BasicBlock *getIndirectDest(unsigned i) const {
    return cast_or_null<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() + i));
  }
  SmallVector<BasicBlock *, 16> getIndirectDests() const {
    SmallVector<BasicBlock *, 16> IndirectDests;
    for (unsigned i = 0, e = getNumIndirectDests(); i < e; ++i)
      IndirectDests.push_back(getIndirectDest(i));
    return IndirectDests;
  }
  void setDefaultDest(BasicBlock *B) {
    *(&Op<-1>() - getNumIndirectDests() - 1) = reinterpret_cast<Value *>(B);
  }
  void setIndirectDest(unsigned i, BasicBlock *B) {
    *(&Op<-1>() - getNumIndirectDests() + i) = reinterpret_cast<Value *>(B);
  }

  BasicBlock *getSuccessor(unsigned i) const {
    assert(i < getNumSuccessors() + 1 &&
           "Successor # out of range for callbr!");
    return i == 0 ? getDefaultDest() : getIndirectDest(i - 1);
  }

  void setSuccessor(unsigned i, BasicBlock *NewSucc) {
    assert(i < getNumIndirectDests() + 1 &&
           "Successor # out of range for callbr!");
    return i == 0 ? setDefaultDest(NewSucc) : setIndirectDest(i - 1, NewSucc);
  }

  unsigned getNumSuccessors() const { return getNumIndirectDests() + 1; }

  // Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Instruction *I) {
    return (I->getOpcode() == Instruction::CallBr);
  }
  static bool classof(const Value *V) {
    return isa<Instruction>(V) && classof(cast<Instruction>(V));
  }

private:
  // Shadow Instruction::setInstructionSubclassData with a private forwarding
  // method so that subclasses cannot accidentally use it.
  template <typename Bitfield>
  void setSubclassData(typename Bitfield::Type Value) {
    Instruction::setSubclassData<Bitfield>(Value);
  }
};

CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
                       ArrayRef<BasicBlock *> IndirectDests,
                       ArrayRef<Value *> Args,
                       ArrayRef<OperandBundleDef> Bundles, int NumOperands,
                       const Twine &NameStr, Instruction *InsertBefore)
    : CallBase(Ty->getReturnType(), Instruction::CallBr,
               OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
               InsertBefore) {
  init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr);
}

CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
                       ArrayRef<BasicBlock *> IndirectDests,
                       ArrayRef<Value *> Args,
                       ArrayRef<OperandBundleDef> Bundles, int NumOperands,
                       const Twine &NameStr, BasicBlock *InsertAtEnd)
    : CallBase(Ty->getReturnType(), Instruction::CallBr,
               OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
               InsertAtEnd) {
  init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr);
}

//===----------------------------------------------------------------------===//
//                              ResumeInst Class
//===----------------------------------------------------------------------===//

//===---------------------------------------------------------------------------
/// Resume the propagation of an exception.
///
class ResumeInst : public Instruction {
  ResumeInst(const ResumeInst &RI);

  explicit ResumeInst(Value *Exn, Instruction *InsertBefore=nullptr);
  ResumeInst(Value *Exn, BasicBlock *InsertAtEnd);

protected:
  // Note: Instruction needs to be a friend here to call cloneImpl.
  friend class Instruction;

  ResumeInst *cloneImpl() const;

public:
  static ResumeInst *Create(Value *Exn, Instruction *InsertBefore = nullptr) {
    return new(1) ResumeInst(Exn, InsertBefore);
  }

  static ResumeInst *Create(Value *Exn, BasicBlock *InsertAtEnd) {
    return new(1) ResumeInst(Exn, InsertAtEnd);
  }

  /// Provide fast operand accessors
  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);

  /// Convenience accessor.
  Value *getValue() const { return Op<0>(); }

  unsigned getNumSuccessors() const { return 0; }

  // Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Instruction *I) {
    return I->getOpcode() == Instruction::Resume;
  }
  static bool classof(const Value *V) {
    return isa<Instruction>(V) && classof(cast<Instruction>(V));
  }

private:
  BasicBlock *getSuccessor(unsigned idx) const {
    llvm_unreachable("ResumeInst has no successors!");
  }

  void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
    llvm_unreachable("ResumeInst has no successors!");
  }
};

template <>
struct OperandTraits<ResumeInst> :
    public FixedNumOperandTraits<ResumeInst, 1> {
};

DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ResumeInst, Value)

//===----------------------------------------------------------------------===//
//                         CatchSwitchInst Class
//===----------------------------------------------------------------------===//
class CatchSwitchInst : public Instruction {
  using UnwindDestField = BoolBitfieldElementT<0>;

  /// The number of operands actually allocated.  NumOperands is
  /// the number actually in use.
  unsigned ReservedSpace;

  // Operand[0] = Outer scope
  // Operand[1] = Unwind block destination
  // Operand[n] = BasicBlock to go to on match
  CatchSwitchInst(const CatchSwitchInst &CSI);

  /// Create a new switch instruction, specifying a
  /// default destination.  The number of additional handlers can be specified
  /// here to make memory allocation more efficient.
  /// This constructor can also autoinsert before another instruction.
  CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
                  unsigned NumHandlers, const Twine &NameStr,
                  Instruction *InsertBefore);

  /// Create a new switch instruction, specifying a
  /// default destination.  The number of additional handlers can be specified
  /// here to make memory allocation more efficient.
  /// This constructor also autoinserts at the end of the specified BasicBlock.
  CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
                  unsigned NumHandlers, const Twine &NameStr,
                  BasicBlock *InsertAtEnd);

  // allocate space for exactly zero operands
  void *operator new(size_t S) { return User::operator new(S); }

  void init(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumReserved);
  void growOperands(unsigned Size);

protected:
  // Note: Instruction needs to be a friend here to call cloneImpl.
  friend class Instruction;

  CatchSwitchInst *cloneImpl() const;

public:
  void operator delete(void *Ptr) { return User::operator delete(Ptr); }

  static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
                                 unsigned NumHandlers,
                                 const Twine &NameStr = "",
                                 Instruction *InsertBefore = nullptr) {
    return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
                               InsertBefore);
  }

  static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
                                 unsigned NumHandlers, const Twine &NameStr,
                                 BasicBlock *InsertAtEnd) {
    return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
                               InsertAtEnd);
  }

  /// Provide fast operand accessors
  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);

  // Accessor Methods for CatchSwitch stmt
  Value *getParentPad() const { return getOperand(0); }
  void setParentPad(Value *ParentPad) { setOperand(0, ParentPad); }

  // Accessor Methods for CatchSwitch stmt
  bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); }
  bool unwindsToCaller() const { return !hasUnwindDest(); }
  BasicBlock *getUnwindDest() const {
    if (hasUnwindDest())
      return cast<BasicBlock>(getOperand(1));
    return nullptr;
  }
  void setUnwindDest(BasicBlock *UnwindDest) {
    assert(UnwindDest);
    assert(hasUnwindDest());
    setOperand(1, UnwindDest);
  }

  /// return the number of 'handlers' in this catchswitch
  /// instruction, except the default handler
  unsigned getNumHandlers() const {
    if (hasUnwindDest())
      return getNumOperands() - 2;
    return getNumOperands() - 1;
  }

private:
  static BasicBlock *handler_helper(Value *V) { return cast<BasicBlock>(V); }
  static const BasicBlock *handler_helper(const Value *V) {
    return cast<BasicBlock>(V);
  }

public:
  using DerefFnTy = BasicBlock *(*)(Value *);
  using handler_iterator = mapped_iterator<op_iterator, DerefFnTy>;
  using handler_range = iterator_range<handler_iterator>;
  using ConstDerefFnTy = const BasicBlock *(*)(const Value *);
  using const_handler_iterator =
      mapped_iterator<const_op_iterator, ConstDerefFnTy>;
  using const_handler_range = iterator_range<const_handler_iterator>;

  /// Returns an iterator that points to the first handler in CatchSwitchInst.
  handler_iterator handler_begin() {
    op_iterator It = op_begin() + 1;
    if (hasUnwindDest())
      ++It;
    return handler_iterator(It, DerefFnTy(handler_helper));
  }

  /// Returns an iterator that points to the first handler in the
  /// CatchSwitchInst.
  const_handler_iterator handler_begin() const {
    const_op_iterator It = op_begin() + 1;
    if (hasUnwindDest())
      ++It;
    return const_handler_iterator(It, ConstDerefFnTy(handler_helper));
  }

  /// Returns a read-only iterator that points one past the last
  /// handler in the CatchSwitchInst.
  handler_iterator handler_end() {
    return handler_iterator(op_end(), DerefFnTy(handler_helper));
  }

  /// Returns an iterator that points one past the last handler in the
  /// CatchSwitchInst.
  const_handler_iterator handler_end() const {
    return const_handler_iterator(op_end(), ConstDerefFnTy(handler_helper));
  }

  /// iteration adapter for range-for loops.
  handler_range handlers() {
    return make_range(handler_begin(), handler_end());
  }

  /// iteration adapter for range-for loops.
  const_handler_range handlers() const {
    return make_range(handler_begin(), handler_end());
  }

  /// Add an entry to the switch instruction...
  /// Note:
  /// This action invalidates handler_end(). Old handler_end() iterator will
  /// point to the added handler.
  void addHandler(BasicBlock *Dest);

  void removeHandler(handler_iterator HI);

  unsigned getNumSuccessors() const { return getNumOperands() - 1; }
  BasicBlock *getSuccessor(unsigned Idx) const {
    assert(Idx < getNumSuccessors() &&
           "Successor # out of range for catchswitch!");
    return cast<BasicBlock>(getOperand(Idx + 1));
  }
  void setSuccessor(unsigned Idx, BasicBlock *NewSucc) {
    assert(Idx < getNumSuccessors() &&
           "Successor # out of range for catchswitch!");
    setOperand(Idx + 1, NewSucc);
  }

  // Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Instruction *I) {
    return I->getOpcode() == Instruction::CatchSwitch;
  }
  static bool classof(const Value *V) {
    return isa<Instruction>(V) && classof(cast<Instruction>(V));
  }
};

template <>
struct OperandTraits<CatchSwitchInst> : public HungoffOperandTraits<2> {};

DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchSwitchInst, Value)

//===----------------------------------------------------------------------===//
//                               CleanupPadInst Class
//===----------------------------------------------------------------------===//
class CleanupPadInst : public FuncletPadInst {
private:
  explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args,
                          unsigned Values, const Twine &NameStr,
                          Instruction *InsertBefore)
      : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values,
                       NameStr, InsertBefore) {}
  explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args,
                          unsigned Values, const Twine &NameStr,
                          BasicBlock *InsertAtEnd)
      : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values,
                       NameStr, InsertAtEnd) {}

public:
  static CleanupPadInst *Create(Value *ParentPad,
                                ArrayRef<Value *> Args = std::nullopt,
                                const Twine &NameStr = "",
                                Instruction *InsertBefore = nullptr) {
    unsigned Values = 1 + Args.size();
    return new (Values)
        CleanupPadInst(ParentPad, Args, Values, NameStr, InsertBefore);
  }

  static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args,
                                const Twine &NameStr, BasicBlock *InsertAtEnd) {
    unsigned Values = 1 + Args.size();
    return new (Values)
        CleanupPadInst(ParentPad, Args, Values, NameStr, InsertAtEnd);
  }

  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Instruction *I) {
    return I->getOpcode() == Instruction::CleanupPad;
  }
  static bool classof(const Value *V) {
    return isa<Instruction>(V) && classof(cast<Instruction>(V));
  }
};

//===----------------------------------------------------------------------===//
//                               CatchPadInst Class
//===----------------------------------------------------------------------===//
class CatchPadInst : public FuncletPadInst {
private:
  explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args,
                        unsigned Values, const Twine &NameStr,
                        Instruction *InsertBefore)
      : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values,
                       NameStr, InsertBefore) {}
  explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args,
                        unsigned Values, const Twine &NameStr,
                        BasicBlock *InsertAtEnd)
      : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values,
                       NameStr, InsertAtEnd) {}

public:
  static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args,
                              const Twine &NameStr = "",
                              Instruction *InsertBefore = nullptr) {
    unsigned Values = 1 + Args.size();
    return new (Values)
        CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertBefore);
  }

  static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args,
                              const Twine &NameStr, BasicBlock *InsertAtEnd) {
    unsigned Values = 1 + Args.size();
    return new (Values)
        CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertAtEnd);
  }

  /// Convenience accessors
  CatchSwitchInst *getCatchSwitch() const {
    return cast<CatchSwitchInst>(Op<-1>());
  }
  void setCatchSwitch(Value *CatchSwitch) {
    assert(CatchSwitch);
    Op<-1>() = CatchSwitch;
  }

  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Instruction *I) {
    return I->getOpcode() == Instruction::CatchPad;
  }
  static bool classof(const Value *V) {
    return isa<Instruction>(V) && classof(cast<Instruction>(V));
  }
};

//===----------------------------------------------------------------------===//
//                               CatchReturnInst Class
//===----------------------------------------------------------------------===//

class CatchReturnInst : public Instruction {
  CatchReturnInst(const CatchReturnInst &RI);
  CatchReturnInst(Value *CatchPad, BasicBlock *BB, Instruction *InsertBefore);
  CatchReturnInst(Value *CatchPad, BasicBlock *BB, BasicBlock *InsertAtEnd);

  void init(Value *CatchPad, BasicBlock *BB);

protected:
  // Note: Instruction needs to be a friend here to call cloneImpl.
  friend class Instruction;

  CatchReturnInst *cloneImpl() const;

public:
  static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB,
                                 Instruction *InsertBefore = nullptr) {
    assert(CatchPad);
    assert(BB);
    return new (2) CatchReturnInst(CatchPad, BB, InsertBefore);
  }

  static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB,
                                 BasicBlock *InsertAtEnd) {
    assert(CatchPad);
    assert(BB);
    return new (2) CatchReturnInst(CatchPad, BB, InsertAtEnd);
  }

  /// Provide fast operand accessors
  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);

  /// Convenience accessors.
  CatchPadInst *getCatchPad() const { return cast<CatchPadInst>(Op<0>()); }
  void setCatchPad(CatchPadInst *CatchPad) {
    assert(CatchPad);
    Op<0>() = CatchPad;
  }

  BasicBlock *getSuccessor() const { return cast<BasicBlock>(Op<1>()); }
  void setSuccessor(BasicBlock *NewSucc) {
    assert(NewSucc);
    Op<1>() = NewSucc;
  }
  unsigned getNumSuccessors() const { return 1; }

  /// Get the parentPad of this catchret's catchpad's catchswitch.
  /// The successor block is implicitly a member of this funclet.
  Value *getCatchSwitchParentPad() const {
    return getCatchPad()->getCatchSwitch()->getParentPad();
  }

  // Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Instruction *I) {
    return (I->getOpcode() == Instruction::CatchRet);
  }
  static bool classof(const Value *V) {
    return isa<Instruction>(V) && classof(cast<Instruction>(V));
  }

private:
  BasicBlock *getSuccessor(unsigned Idx) const {
    assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!");
    return getSuccessor();
  }

  void setSuccessor(unsigned Idx, BasicBlock *B) {
    assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!");
    setSuccessor(B);
  }
};

template <>
struct OperandTraits<CatchReturnInst>
    : public FixedNumOperandTraits<CatchReturnInst, 2> {};

DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchReturnInst, Value)

//===----------------------------------------------------------------------===//
//                               CleanupReturnInst Class
//===----------------------------------------------------------------------===//

class CleanupReturnInst : public Instruction {
  using UnwindDestField = BoolBitfieldElementT<0>;

private:
  CleanupReturnInst(const CleanupReturnInst &RI);
  CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values,
                    Instruction *InsertBefore = nullptr);
  CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values,
                    BasicBlock *InsertAtEnd);

  void init(Value *CleanupPad, BasicBlock *UnwindBB);

protected:
  // Note: Instruction needs to be a friend here to call cloneImpl.
  friend class Instruction;

  CleanupReturnInst *cloneImpl() const;

public:
  static CleanupReturnInst *Create(Value *CleanupPad,
                                   BasicBlock *UnwindBB = nullptr,
                                   Instruction *InsertBefore = nullptr) {
    assert(CleanupPad);
    unsigned Values = 1;
    if (UnwindBB)
      ++Values;
    return new (Values)
        CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertBefore);
  }

  static CleanupReturnInst *Create(Value *CleanupPad, BasicBlock *UnwindBB,
                                   BasicBlock *InsertAtEnd) {
    assert(CleanupPad);
    unsigned Values = 1;
    if (UnwindBB)
      ++Values;
    return new (Values)
        CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertAtEnd);
  }

  /// Provide fast operand accessors
  DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);

  bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); }
  bool unwindsToCaller() const { return !hasUnwindDest(); }

  /// Convenience accessor.
  CleanupPadInst *getCleanupPad() const {
    return cast<CleanupPadInst>(Op<0>());
  }
  void setCleanupPad(CleanupPadInst *CleanupPad) {
    assert(CleanupPad);
    Op<0>() = CleanupPad;
  }

  unsigned getNumSuccessors() const { return hasUnwindDest() ? 1 : 0; }

  BasicBlock *getUnwindDest() const {
    return hasUnwindDest() ? cast<BasicBlock>(Op<1>()) : nullptr;
  }
  void setUnwindDest(BasicBlock *NewDest) {
    assert(NewDest);
    assert(hasUnwindDest());
    Op<1>() = NewDest;
  }

  // Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Instruction *I) {
    return (I->getOpcode() == Instruction::CleanupRet);
  }
  static bool classof(const Value *V) {
    return isa<Instruction>(V) && classof(cast<Instruction>(V));
  }

private:
  BasicBlock *getSuccessor(unsigned Idx) const {
    assert(Idx == 0);
    return getUnwindDest();
  }

  void setSuccessor(unsigned Idx, BasicBlock *B) {
    assert(Idx == 0);
    setUnwindDest(B);
  }

  // Shadow Instruction::setInstructionSubclassData with a private forwarding
  // method so that subclasses cannot accidentally use it.
  template <typename Bitfield>
  void setSubclassData(typename Bitfield::Type Value) {
    Instruction::setSubclassData<Bitfield>(Value);
  }
};

template <>
struct OperandTraits<CleanupReturnInst>
    : public VariadicOperandTraits<CleanupReturnInst, /*MINARITY=*/1> {};

DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CleanupReturnInst, Value)

//===----------------------------------------------------------------------===//
//                           UnreachableInst Class
//===----------------------------------------------------------------------===//

//===---------------------------------------------------------------------------
/// This function has undefined behavior.  In particular, the
/// presence of this instruction indicates some higher level knowledge that the
/// end of the block cannot be reached.
///
class UnreachableInst : public Instruction {
protected:
  // Note: Instruction needs to be a friend here to call cloneImpl.
  friend class Instruction;

  UnreachableInst *cloneImpl() const;

public:
  explicit UnreachableInst(LLVMContext &C, Instruction *InsertBefore = nullptr);
  explicit UnreachableInst(LLVMContext &C, BasicBlock *InsertAtEnd);

  // allocate space for exactly zero operands
  void *operator new(size_t S) { return User::operator new(S, 0); }
  void operator delete(void *Ptr) { User::operator delete(Ptr); }

  unsigned getNumSuccessors() const { return 0; }

  // Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Instruction *I) {
    return I->getOpcode() == Instruction::Unreachable;
  }
  static bool classof(const Value *V) {
    return isa<Instruction>(V) && classof(cast<Instruction>(V));
  }

private:
  BasicBlock *getSuccessor(unsigned idx) const {
    llvm_unreachable("UnreachableInst has no successors!");
  }

  void setSuccessor(unsigned idx, BasicBlock *B) {
    llvm_unreachable("UnreachableInst has no successors!");
  }
};

//===----------------------------------------------------------------------===//
//                                 TruncInst Class
//===----------------------------------------------------------------------===//

/// This class represents a truncation of integer types.
class TruncInst : public CastInst {
protected:
  // Note: Instruction needs to be a friend here to call cloneImpl.
  friend class Instruction;

  /// Clone an identical TruncInst
  TruncInst *cloneImpl() const;

public:
  /// Constructor with insert-before-instruction semantics
  TruncInst(
    Value *S,                           ///< The value to be truncated
    Type *Ty,                           ///< The (smaller) type to truncate to
    const Twine &NameStr = "",          ///< A name for the new instruction
    Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
  );

  /// Constructor with insert-at-end-of-block semantics
  TruncInst(
    Value *S,                     ///< The value to be truncated
    Type *Ty,                     ///< The (smaller) type to truncate to
    const Twine &NameStr,         ///< A name for the new instruction
    BasicBlock *InsertAtEnd       ///< The block to insert the instruction into
  );

  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Instruction *I) {
    return I->getOpcode() == Trunc;
  }
  static bool classof(const Value *V) {
    return isa<Instruction>(V) && classof(cast<Instruction>(V));
  }
};

//===----------------------------------------------------------------------===//
//                                 ZExtInst Class
//===----------------------------------------------------------------------===//

/// This class represents zero extension of integer types.
class ZExtInst : public CastInst {
protected:
  // Note: Instruction needs to be a friend here to call cloneImpl.
  friend class Instruction;

  /// Clone an identical ZExtInst
  ZExtInst *cloneImpl() const;

public:
  /// Constructor with insert-before-instruction semantics
  ZExtInst(
    Value *S,                           ///< The value to be zero extended
    Type *Ty,                           ///< The type to zero extend to
    const Twine &NameStr = "",          ///< A name for the new instruction
    Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
  );

  /// Constructor with insert-at-end semantics.
  ZExtInst(
    Value *S,                     ///< The value to be zero extended
    Type *Ty,                     ///< The type to zero extend to
    const Twine &NameStr,         ///< A name for the new instruction
    BasicBlock *InsertAtEnd       ///< The block to insert the instruction into
  );

  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Instruction *I) {
    return I->getOpcode() == ZExt;
  }
  static bool classof(const Value *V) {
    return isa<Instruction>(V) && classof(cast<Instruction>(V));
  }
};

//===----------------------------------------------------------------------===//
//                                 SExtInst Class
//===----------------------------------------------------------------------===//

/// This class represents a sign extension of integer types.
class SExtInst : public CastInst {
protected:
  // Note: Instruction needs to be a friend here to call cloneImpl.
  friend class Instruction;

  /// Clone an identical SExtInst
  SExtInst *cloneImpl() const;

public:
  /// Constructor with insert-before-instruction semantics
  SExtInst(
    Value *S,                           ///< The value to be sign extended
    Type *Ty,                           ///< The type to sign extend to
    const Twine &NameStr = "",          ///< A name for the new instruction
    Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
  );

  /// Constructor with insert-at-end-of-block semantics
  SExtInst(
    Value *S,                     ///< The value to be sign extended
    Type *Ty,                     ///< The type to sign extend to
    const Twine &NameStr,         ///< A name for the new instruction
    BasicBlock *InsertAtEnd       ///< The block to insert the instruction into
  );

  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Instruction *I) {
    return I->getOpcode() == SExt;
  }
  static bool classof(const Value *V) {
    return isa<Instruction>(V) && classof(cast<Instruction>(V));
  }
};

//===----------------------------------------------------------------------===//
//                                 FPTruncInst Class
//===----------------------------------------------------------------------===//

/// This class represents a truncation of floating point types.
class FPTruncInst : public CastInst {
protected:
  // Note: Instruction needs to be a friend here to call cloneImpl.
  friend class Instruction;

  /// Clone an identical FPTruncInst
  FPTruncInst *cloneImpl() const;

public:
  /// Constructor with insert-before-instruction semantics
  FPTruncInst(
    Value *S,                           ///< The value to be truncated
    Type *Ty,                           ///< The type to truncate to
    const Twine &NameStr = "",          ///< A name for the new instruction
    Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
  );

  /// Constructor with insert-before-instruction semantics
  FPTruncInst(
    Value *S,                     ///< The value to be truncated
    Type *Ty,                     ///< The type to truncate to
    const Twine &NameStr,         ///< A name for the new instruction
    BasicBlock *InsertAtEnd       ///< The block to insert the instruction into
  );

  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Instruction *I) {
    return I->getOpcode() == FPTrunc;
  }
  static bool classof(const Value *V) {
    return isa<Instruction>(V) && classof(cast<Instruction>(V));
  }
};

//===----------------------------------------------------------------------===//
//                                 FPExtInst Class
//===----------------------------------------------------------------------===//

/// This class represents an extension of floating point types.
class FPExtInst : public CastInst {
protected:
  // Note: Instruction needs to be a friend here to call cloneImpl.
  friend class Instruction;

  /// Clone an identical FPExtInst
  FPExtInst *cloneImpl() const;

public:
  /// Constructor with insert-before-instruction semantics
  FPExtInst(
    Value *S,                           ///< The value to be extended
    Type *Ty,                           ///< The type to extend to
    const Twine &NameStr = "",          ///< A name for the new instruction
    Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
  );

  /// Constructor with insert-at-end-of-block semantics
  FPExtInst(
    Value *S,                     ///< The value to be extended
    Type *Ty,                     ///< The type to extend to
    const Twine &NameStr,         ///< A name for the new instruction
    BasicBlock *InsertAtEnd       ///< The block to insert the instruction into
  );

  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Instruction *I) {
    return I->getOpcode() == FPExt;
  }
  static bool classof(const Value *V) {
    return isa<Instruction>(V) && classof(cast<Instruction>(V));
  }
};

//===----------------------------------------------------------------------===//
//                                 UIToFPInst Class
//===----------------------------------------------------------------------===//

/// This class represents a cast unsigned integer to floating point.
class UIToFPInst : public CastInst {
protected:
  // Note: Instruction needs to be a friend here to call cloneImpl.
  friend class Instruction;

  /// Clone an identical UIToFPInst
  UIToFPInst *cloneImpl() const;

public:
  /// Constructor with insert-before-instruction semantics
  UIToFPInst(
    Value *S,                           ///< The value to be converted
    Type *Ty,                           ///< The type to convert to
    const Twine &NameStr = "",          ///< A name for the new instruction
    Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
  );

  /// Constructor with insert-at-end-of-block semantics
  UIToFPInst(
    Value *S,                     ///< The value to be converted
    Type *Ty,                     ///< The type to convert to
    const Twine &NameStr,         ///< A name for the new instruction
    BasicBlock *InsertAtEnd       ///< The block to insert the instruction into
  );

  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Instruction *I) {
    return I->getOpcode() == UIToFP;
  }
  static bool classof(const Value *V) {
    return isa<Instruction>(V) && classof(cast<Instruction>(V));
  }
};

//===----------------------------------------------------------------------===//
//                                 SIToFPInst Class
//===----------------------------------------------------------------------===//

/// This class represents a cast from signed integer to floating point.
class SIToFPInst : public CastInst {
protected:
  // Note: Instruction needs to be a friend here to call cloneImpl.
  friend class Instruction;

  /// Clone an identical SIToFPInst
  SIToFPInst *cloneImpl() const;

public:
  /// Constructor with insert-before-instruction semantics
  SIToFPInst(
    Value *S,                           ///< The value to be converted
    Type *Ty,                           ///< The type to convert to
    const Twine &NameStr = "",          ///< A name for the new instruction
    Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
  );

  /// Constructor with insert-at-end-of-block semantics
  SIToFPInst(
    Value *S,                     ///< The value to be converted
    Type *Ty,                     ///< The type to convert to
    const Twine &NameStr,         ///< A name for the new instruction
    BasicBlock *InsertAtEnd       ///< The block to insert the instruction into
  );

  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Instruction *I) {
    return I->getOpcode() == SIToFP;
  }
  static bool classof(const Value *V) {
    return isa<Instruction>(V) && classof(cast<Instruction>(V));
  }
};

//===----------------------------------------------------------------------===//
//                                 FPToUIInst Class
//===----------------------------------------------------------------------===//

/// This class represents a cast from floating point to unsigned integer
class FPToUIInst  : public CastInst {
protected:
  // Note: Instruction needs to be a friend here to call cloneImpl.
  friend class Instruction;

  /// Clone an identical FPToUIInst
  FPToUIInst *cloneImpl() const;

public:
  /// Constructor with insert-before-instruction semantics
  FPToUIInst(
    Value *S,                           ///< The value to be converted
    Type *Ty,                           ///< The type to convert to
    const Twine &NameStr = "",          ///< A name for the new instruction
    Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
  );

  /// Constructor with insert-at-end-of-block semantics
  FPToUIInst(
    Value *S,                     ///< The value to be converted
    Type *Ty,                     ///< The type to convert to
    const Twine &NameStr,         ///< A name for the new instruction
    BasicBlock *InsertAtEnd       ///< Where to insert the new instruction
  );

  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Instruction *I) {
    return I->getOpcode() == FPToUI;
  }
  static bool classof(const Value *V) {
    return isa<Instruction>(V) && classof(cast<Instruction>(V));
  }
};

//===----------------------------------------------------------------------===//
//                                 FPToSIInst Class
//===----------------------------------------------------------------------===//

/// This class represents a cast from floating point to signed integer.
class FPToSIInst  : public CastInst {
protected:
  // Note: Instruction needs to be a friend here to call cloneImpl.
  friend class Instruction;

  /// Clone an identical FPToSIInst
  FPToSIInst *cloneImpl() const;

public:
  /// Constructor with insert-before-instruction semantics
  FPToSIInst(
    Value *S,                           ///< The value to be converted
    Type *Ty,                           ///< The type to convert to
    const Twine &NameStr = "",          ///< A name for the new instruction
    Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
  );

  /// Constructor with insert-at-end-of-block semantics
  FPToSIInst(
    Value *S,                     ///< The value to be converted
    Type *Ty,                     ///< The type to convert to
    const Twine &NameStr,         ///< A name for the new instruction
    BasicBlock *InsertAtEnd       ///< The block to insert the instruction into
  );

  /// Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Instruction *I) {
    return I->getOpcode() == FPToSI;
  }
  static bool classof(const Value *V) {
    return isa<Instruction>(V) && classof(cast<Instruction>(V));
  }
};

//===----------------------------------------------------------------------===//
//                                 IntToPtrInst Class
//===----------------------------------------------------------------------===//

/// This class represents a cast from an integer to a pointer.
class IntToPtrInst : public CastInst {
public:
  // Note: Instruction needs to be a friend here to call cloneImpl.
  friend class Instruction;

  /// Constructor with insert-before-instruction semantics
  IntToPtrInst(
    Value *S,                           ///< The value to be converted
    Type *Ty,                           ///< The type to convert to
    const Twine &NameStr = "",          ///< A name for the new instruction
    Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
  );

  /// Constructor with insert-at-end-of-block semantics
  IntToPtrInst(
    Value *S,                     ///< The value to be converted
    Type *Ty,                     ///< The type to convert to
    const Twine &NameStr,         ///< A name for the new instruction
    BasicBlock *InsertAtEnd       ///< The block to insert the instruction into
  );

  /// Clone an identical IntToPtrInst.
  IntToPtrInst *cloneImpl() const;

  /// Returns the address space of this instruction's pointer type.
  unsigned getAddressSpace() const {
    return getType()->getPointerAddressSpace();
  }

  // Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Instruction *I) {
    return I->getOpcode() == IntToPtr;
  }
  static bool classof(const Value *V) {
    return isa<Instruction>(V) && classof(cast<Instruction>(V));
  }
};

//===----------------------------------------------------------------------===//
//                                 PtrToIntInst Class
//===----------------------------------------------------------------------===//

/// This class represents a cast from a pointer to an integer.
class PtrToIntInst : public CastInst {
protected:
  // Note: Instruction needs to be a friend here to call cloneImpl.
  friend class Instruction;

  /// Clone an identical PtrToIntInst.
  PtrToIntInst *cloneImpl() const;

public:
  /// Constructor with insert-before-instruction semantics
  PtrToIntInst(
    Value *S,                           ///< The value to be converted
    Type *Ty,                           ///< The type to convert to
    const Twine &NameStr = "",          ///< A name for the new instruction
    Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
  );

  /// Constructor with insert-at-end-of-block semantics
  PtrToIntInst(
    Value *S,                     ///< The value to be converted
    Type *Ty,                     ///< The type to convert to
    const Twine &NameStr,         ///< A name for the new instruction
    BasicBlock *InsertAtEnd       ///< The block to insert the instruction into
  );

  /// Gets the pointer operand.
  Value *getPointerOperand() { return getOperand(0); }
  /// Gets the pointer operand.
  const Value *getPointerOperand() const { return getOperand(0); }
  /// Gets the operand index of the pointer operand.
  static unsigned getPointerOperandIndex() { return 0U; }

  /// Returns the address space of the pointer operand.
  unsigned getPointerAddressSpace() const {
    return getPointerOperand()->getType()->getPointerAddressSpace();
  }

  // Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Instruction *I) {
    return I->getOpcode() == PtrToInt;
  }
  static bool classof(const Value *V) {
    return isa<Instruction>(V) && classof(cast<Instruction>(V));
  }
};

//===----------------------------------------------------------------------===//
//                             BitCastInst Class
//===----------------------------------------------------------------------===//

/// This class represents a no-op cast from one type to another.
class BitCastInst : public CastInst {
protected:
  // Note: Instruction needs to be a friend here to call cloneImpl.
  friend class Instruction;

  /// Clone an identical BitCastInst.
  BitCastInst *cloneImpl() const;

public:
  /// Constructor with insert-before-instruction semantics
  BitCastInst(
    Value *S,                           ///< The value to be casted
    Type *Ty,                           ///< The type to casted to
    const Twine &NameStr = "",          ///< A name for the new instruction
    Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
  );

  /// Constructor with insert-at-end-of-block semantics
  BitCastInst(
    Value *S,                     ///< The value to be casted
    Type *Ty,                     ///< The type to casted to
    const Twine &NameStr,         ///< A name for the new instruction
    BasicBlock *InsertAtEnd       ///< The block to insert the instruction into
  );

  // Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Instruction *I) {
    return I->getOpcode() == BitCast;
  }
  static bool classof(const Value *V) {
    return isa<Instruction>(V) && classof(cast<Instruction>(V));
  }
};

//===----------------------------------------------------------------------===//
//                          AddrSpaceCastInst Class
//===----------------------------------------------------------------------===//

/// This class represents a conversion between pointers from one address space
/// to another.
class AddrSpaceCastInst : public CastInst {
protected:
  // Note: Instruction needs to be a friend here to call cloneImpl.
  friend class Instruction;

  /// Clone an identical AddrSpaceCastInst.
  AddrSpaceCastInst *cloneImpl() const;

public:
  /// Constructor with insert-before-instruction semantics
  AddrSpaceCastInst(
    Value *S,                           ///< The value to be casted
    Type *Ty,                           ///< The type to casted to
    const Twine &NameStr = "",          ///< A name for the new instruction
    Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
  );

  /// Constructor with insert-at-end-of-block semantics
  AddrSpaceCastInst(
    Value *S,                     ///< The value to be casted
    Type *Ty,                     ///< The type to casted to
    const Twine &NameStr,         ///< A name for the new instruction
    BasicBlock *InsertAtEnd       ///< The block to insert the instruction into
  );

  // Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Instruction *I) {
    return I->getOpcode() == AddrSpaceCast;
  }
  static bool classof(const Value *V) {
    return isa<Instruction>(V) && classof(cast<Instruction>(V));
  }

  /// Gets the pointer operand.
  Value *getPointerOperand() {
    return getOperand(0);
  }

  /// Gets the pointer operand.
  const Value *getPointerOperand() const {
    return getOperand(0);
  }

  /// Gets the operand index of the pointer operand.
  static unsigned getPointerOperandIndex() {
    return 0U;
  }

  /// Returns the address space of the pointer operand.
  unsigned getSrcAddressSpace() const {
    return getPointerOperand()->getType()->getPointerAddressSpace();
  }

  /// Returns the address space of the result.
  unsigned getDestAddressSpace() const {
    return getType()->getPointerAddressSpace();
  }
};

//===----------------------------------------------------------------------===//
//                          Helper functions
//===----------------------------------------------------------------------===//

/// A helper function that returns the pointer operand of a load or store
/// instruction. Returns nullptr if not load or store.
inline const Value *getLoadStorePointerOperand(const Value *V) {
  if (auto *Load = dyn_cast<LoadInst>(V))
    return Load->getPointerOperand();
  if (auto *Store = dyn_cast<StoreInst>(V))
    return Store->getPointerOperand();
  return nullptr;
}
inline Value *getLoadStorePointerOperand(Value *V) {
  return const_cast<Value *>(
      getLoadStorePointerOperand(static_cast<const Value *>(V)));
}

/// A helper function that returns the pointer operand of a load, store
/// or GEP instruction. Returns nullptr if not load, store, or GEP.
inline const Value *getPointerOperand(const Value *V) {
  if (auto *Ptr = getLoadStorePointerOperand(V))
    return Ptr;
  if (auto *Gep = dyn_cast<GetElementPtrInst>(V))
    return Gep->getPointerOperand();
  return nullptr;
}
inline Value *getPointerOperand(Value *V) {
  return const_cast<Value *>(getPointerOperand(static_cast<const Value *>(V)));
}

/// A helper function that returns the alignment of load or store instruction.
inline Align getLoadStoreAlignment(Value *I) {
  assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
         "Expected Load or Store instruction");
  if (auto *LI = dyn_cast<LoadInst>(I))
    return LI->getAlign();
  return cast<StoreInst>(I)->getAlign();
}

/// A helper function that returns the address space of the pointer operand of
/// load or store instruction.
inline unsigned getLoadStoreAddressSpace(Value *I) {
  assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
         "Expected Load or Store instruction");
  if (auto *LI = dyn_cast<LoadInst>(I))
    return LI->getPointerAddressSpace();
  return cast<StoreInst>(I)->getPointerAddressSpace();
}

/// A helper function that returns the type of a load or store instruction.
inline Type *getLoadStoreType(Value *I) {
  assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
         "Expected Load or Store instruction");
  if (auto *LI = dyn_cast<LoadInst>(I))
    return LI->getType();
  return cast<StoreInst>(I)->getValueOperand()->getType();
}

/// A helper function that returns an atomic operation's sync scope; returns
/// std::nullopt if it is not an atomic operation.
inline std::optional<SyncScope::ID> getAtomicSyncScopeID(const Instruction *I) {
  if (!I->isAtomic())
    return std::nullopt;
  if (auto *AI = dyn_cast<LoadInst>(I))
    return AI->getSyncScopeID();
  if (auto *AI = dyn_cast<StoreInst>(I))
    return AI->getSyncScopeID();
  if (auto *AI = dyn_cast<FenceInst>(I))
    return AI->getSyncScopeID();
  if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I))
    return AI->getSyncScopeID();
  if (auto *AI = dyn_cast<AtomicRMWInst>(I))
    return AI->getSyncScopeID();
  llvm_unreachable("unhandled atomic operation");
}

//===----------------------------------------------------------------------===//
//                              FreezeInst Class
//===----------------------------------------------------------------------===//

/// This class represents a freeze function that returns random concrete
/// value if an operand is either a poison value or an undef value
class FreezeInst : public UnaryInstruction {
protected:
  // Note: Instruction needs to be a friend here to call cloneImpl.
  friend class Instruction;

  /// Clone an identical FreezeInst
  FreezeInst *cloneImpl() const;

public:
  explicit FreezeInst(Value *S,
                      const Twine &NameStr = "",
                      Instruction *InsertBefore = nullptr);
  FreezeInst(Value *S, const Twine &NameStr, BasicBlock *InsertAtEnd);

  // Methods for support type inquiry through isa, cast, and dyn_cast:
  static inline bool classof(const Instruction *I) {
    return I->getOpcode() == Freeze;
  }
  static inline bool classof(const Value *V) {
    return isa<Instruction>(V) && classof(cast<Instruction>(V));
  }
};

} // end namespace llvm

#endif // LLVM_IR_INSTRUCTIONS_H
PKjwFZ"��IR/TrackingMDRef.hnu�[���//===- llvm/IR/TrackingMDRef.h - Tracking Metadata references ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// References to metadata that track RAUW.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_TRACKINGMDREF_H
#define LLVM_IR_TRACKINGMDREF_H

#include "llvm/IR/Metadata.h"
#include <algorithm>
#include <cassert>

namespace llvm {

/// Tracking metadata reference.
///
/// This class behaves like \a TrackingVH, but for metadata.
class TrackingMDRef {
  Metadata *MD = nullptr;

public:
  TrackingMDRef() = default;
  explicit TrackingMDRef(Metadata *MD) : MD(MD) { track(); }

  TrackingMDRef(TrackingMDRef &&X) : MD(X.MD) { retrack(X); }
  TrackingMDRef(const TrackingMDRef &X) : MD(X.MD) { track(); }

  TrackingMDRef &operator=(TrackingMDRef &&X) {
    if (&X == this)
      return *this;

    untrack();
    MD = X.MD;
    retrack(X);
    return *this;
  }

  TrackingMDRef &operator=(const TrackingMDRef &X) {
    if (&X == this)
      return *this;

    untrack();
    MD = X.MD;
    track();
    return *this;
  }

  ~TrackingMDRef() { untrack(); }

  Metadata *get() const { return MD; }
  operator Metadata *() const { return get(); }
  Metadata *operator->() const { return get(); }
  Metadata &operator*() const { return *get(); }

  void reset() {
    untrack();
    MD = nullptr;
  }
  void reset(Metadata *MD) {
    untrack();
    this->MD = MD;
    track();
  }

  /// Check whether this has a trivial destructor.
  ///
  /// If \c MD isn't replaceable, the destructor will be a no-op.
  bool hasTrivialDestructor() const {
    return !MD || !MetadataTracking::isReplaceable(*MD);
  }

  bool operator==(const TrackingMDRef &X) const { return MD == X.MD; }
  bool operator!=(const TrackingMDRef &X) const { return MD != X.MD; }

private:
  void track() {
    if (MD)
      MetadataTracking::track(MD);
  }

  void untrack() {
    if (MD)
      MetadataTracking::untrack(MD);
  }

  void retrack(TrackingMDRef &X) {
    assert(MD == X.MD && "Expected values to match");
    if (X.MD) {
      MetadataTracking::retrack(X.MD, MD);
      X.MD = nullptr;
    }
  }
};

/// Typed tracking ref.
///
/// Track refererences of a particular type.  It's useful to use this for \a
/// MDNode and \a ValueAsMetadata.
template <class T> class TypedTrackingMDRef {
  TrackingMDRef Ref;

public:
  TypedTrackingMDRef() = default;
  explicit TypedTrackingMDRef(T *MD) : Ref(static_cast<Metadata *>(MD)) {}

  TypedTrackingMDRef(TypedTrackingMDRef &&X) : Ref(std::move(X.Ref)) {}
  TypedTrackingMDRef(const TypedTrackingMDRef &X) : Ref(X.Ref) {}

  TypedTrackingMDRef &operator=(TypedTrackingMDRef &&X) {
    Ref = std::move(X.Ref);
    return *this;
  }

  TypedTrackingMDRef &operator=(const TypedTrackingMDRef &X) {
    Ref = X.Ref;
    return *this;
  }

  T *get() const { return (T *)Ref.get(); }
  operator T *() const { return get(); }
  T *operator->() const { return get(); }
  T &operator*() const { return *get(); }

  bool operator==(const TypedTrackingMDRef &X) const { return Ref == X.Ref; }
  bool operator!=(const TypedTrackingMDRef &X) const { return Ref != X.Ref; }

  void reset() { Ref.reset(); }
  void reset(T *MD) { Ref.reset(static_cast<Metadata *>(MD)); }

  /// Check whether this has a trivial destructor.
  bool hasTrivialDestructor() const { return Ref.hasTrivialDestructor(); }
};

using TrackingMDNodeRef = TypedTrackingMDRef<MDNode>;
using TrackingValueAsMetadataRef = TypedTrackingMDRef<ValueAsMetadata>;

// Expose the underlying metadata to casting.
template <> struct simplify_type<TrackingMDRef> {
  using SimpleType = Metadata *;

  static SimpleType getSimplifiedValue(TrackingMDRef &MD) { return MD.get(); }
};

template <> struct simplify_type<const TrackingMDRef> {
  using SimpleType = Metadata *;

  static SimpleType getSimplifiedValue(const TrackingMDRef &MD) {
    return MD.get();
  }
};

template <class T> struct simplify_type<TypedTrackingMDRef<T>> {
  using SimpleType = T *;

  static SimpleType getSimplifiedValue(TypedTrackingMDRef<T> &MD) {
    return MD.get();
  }
};

template <class T> struct simplify_type<const TypedTrackingMDRef<T>> {
  using SimpleType = T *;

  static SimpleType getSimplifiedValue(const TypedTrackingMDRef<T> &MD) {
    return MD.get();
  }
};

} // end namespace llvm

#endif // LLVM_IR_TRACKINGMDREF_H
PKjwFZ^���IR/IntrinsicsBPF.hnu�[���/*===- TableGen'erated file -------------------------------------*- C++ -*-===*\
|*                                                                            *|
|* Intrinsic Function Source Fragment                                         *|
|*                                                                            *|
|* Automatically generated file, do not edit!                                 *|
|*                                                                            *|
\*===----------------------------------------------------------------------===*/

#ifndef LLVM_IR_INTRINSIC_BPF_ENUMS_H
#define LLVM_IR_INTRINSIC_BPF_ENUMS_H

namespace llvm {
namespace Intrinsic {
enum BPFIntrinsics : unsigned {
// Enum values for intrinsics
    bpf_btf_type_id = 3154,                           // llvm.bpf.btf.type.id
    bpf_compare,                               // llvm.bpf.compare
    bpf_load_byte,                             // llvm.bpf.load.byte
    bpf_load_half,                             // llvm.bpf.load.half
    bpf_load_word,                             // llvm.bpf.load.word
    bpf_passthrough,                           // llvm.bpf.passthrough
    bpf_preserve_enum_value,                   // llvm.bpf.preserve.enum.value
    bpf_preserve_field_info,                   // llvm.bpf.preserve.field.info
    bpf_preserve_type_info,                    // llvm.bpf.preserve.type.info
    bpf_pseudo,                                // llvm.bpf.pseudo
}; // enum
} // namespace Intrinsic
} // namespace llvm

#endif
PKjwFZI��>�:�:IR/LLVMContext.hnu�[���//===- llvm/LLVMContext.h - Class for managing "global" state ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares LLVMContext, a container of "global" state in LLVM, such
// as the global type and constant uniquing tables.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_LLVMCONTEXT_H
#define LLVM_IR_LLVMCONTEXT_H

#include "llvm-c/Types.h"
#include "llvm/IR/DiagnosticHandler.h"
#include "llvm/Support/CBindingWrapping.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>

namespace llvm {

class DiagnosticInfo;
enum DiagnosticSeverity : char;
class Function;
class Instruction;
class LLVMContextImpl;
class Module;
class OptPassGate;
template <typename T> class SmallVectorImpl;
template <typename T> class StringMapEntry;
class StringRef;
class Twine;
class LLVMRemarkStreamer;

namespace remarks {
class RemarkStreamer;
}

namespace SyncScope {

typedef uint8_t ID;

/// Known synchronization scope IDs, which always have the same value.  All
/// synchronization scope IDs that LLVM has special knowledge of are listed
/// here.  Additionally, this scheme allows LLVM to efficiently check for
/// specific synchronization scope ID without comparing strings.
enum {
  /// Synchronized with respect to signal handlers executing in the same thread.
  SingleThread = 0,

  /// Synchronized with respect to all concurrently executing threads.
  System = 1
};

} // end namespace SyncScope

/// This is an important class for using LLVM in a threaded context.  It
/// (opaquely) owns and manages the core "global" data of LLVM's core
/// infrastructure, including the type and constant uniquing tables.
/// LLVMContext itself provides no locking guarantees, so you should be careful
/// to have one context per thread.
class LLVMContext {
public:
  LLVMContextImpl *const pImpl;
  LLVMContext();
  LLVMContext(const LLVMContext &) = delete;
  LLVMContext &operator=(const LLVMContext &) = delete;
  ~LLVMContext();

  // Pinned metadata names, which always have the same value.  This is a
  // compile-time performance optimization, not a correctness optimization.
  enum : unsigned {
#define LLVM_FIXED_MD_KIND(EnumID, Name, Value) EnumID = Value,
#include "llvm/IR/FixedMetadataKinds.def"
#undef LLVM_FIXED_MD_KIND
  };

  /// Known operand bundle tag IDs, which always have the same value.  All
  /// operand bundle tags that LLVM has special knowledge of are listed here.
  /// Additionally, this scheme allows LLVM to efficiently check for specific
  /// operand bundle tags without comparing strings. Keep this in sync with
  /// LLVMContext::LLVMContext().
  enum : unsigned {
    OB_deopt = 0,                  // "deopt"
    OB_funclet = 1,                // "funclet"
    OB_gc_transition = 2,          // "gc-transition"
    OB_cfguardtarget = 3,          // "cfguardtarget"
    OB_preallocated = 4,           // "preallocated"
    OB_gc_live = 5,                // "gc-live"
    OB_clang_arc_attachedcall = 6, // "clang.arc.attachedcall"
    OB_ptrauth = 7,                // "ptrauth"
    OB_kcfi = 8,                   // "kcfi"
    OB_convergencectrl = 9,        // "convergencectrl"
  };

  /// getMDKindID - Return a unique non-zero ID for the specified metadata kind.
  /// This ID is uniqued across modules in the current LLVMContext.
  unsigned getMDKindID(StringRef Name) const;

  /// getMDKindNames - Populate client supplied SmallVector with the name for
  /// custom metadata IDs registered in this LLVMContext.
  void getMDKindNames(SmallVectorImpl<StringRef> &Result) const;

  /// getOperandBundleTags - Populate client supplied SmallVector with the
  /// bundle tags registered in this LLVMContext.  The bundle tags are ordered
  /// by increasing bundle IDs.
  /// \see LLVMContext::getOperandBundleTagID
  void getOperandBundleTags(SmallVectorImpl<StringRef> &Result) const;

  /// getOrInsertBundleTag - Returns the Tag to use for an operand bundle of
  /// name TagName.
  StringMapEntry<uint32_t> *getOrInsertBundleTag(StringRef TagName) const;

  /// getOperandBundleTagID - Maps a bundle tag to an integer ID.  Every bundle
  /// tag registered with an LLVMContext has an unique ID.
  uint32_t getOperandBundleTagID(StringRef Tag) const;

  /// getOrInsertSyncScopeID - Maps synchronization scope name to
  /// synchronization scope ID.  Every synchronization scope registered with
  /// LLVMContext has unique ID except pre-defined ones.
  SyncScope::ID getOrInsertSyncScopeID(StringRef SSN);

  /// getSyncScopeNames - Populates client supplied SmallVector with
  /// synchronization scope names registered with LLVMContext.  Synchronization
  /// scope names are ordered by increasing synchronization scope IDs.
  void getSyncScopeNames(SmallVectorImpl<StringRef> &SSNs) const;

  /// Define the GC for a function
  void setGC(const Function &Fn, std::string GCName);

  /// Return the GC for a function
  const std::string &getGC(const Function &Fn);

  /// Remove the GC for a function
  void deleteGC(const Function &Fn);

  /// Return true if the Context runtime configuration is set to discard all
  /// value names. When true, only GlobalValue names will be available in the
  /// IR.
  bool shouldDiscardValueNames() const;

  /// Set the Context runtime configuration to discard all value name (but
  /// GlobalValue). Clients can use this flag to save memory and runtime,
  /// especially in release mode.
  void setDiscardValueNames(bool Discard);

  /// Whether there is a string map for uniquing debug info
  /// identifiers across the context.  Off by default.
  bool isODRUniquingDebugTypes() const;
  void enableDebugTypeODRUniquing();
  void disableDebugTypeODRUniquing();

  /// Defines the type of a yield callback.
  /// \see LLVMContext::setYieldCallback.
  using YieldCallbackTy = void (*)(LLVMContext *Context, void *OpaqueHandle);

  /// setDiagnosticHandlerCallBack - This method sets a handler call back
  /// that is invoked when the backend needs to report anything to the user.
  /// The first argument is a function pointer and the second is a context pointer
  /// that gets passed into the DiagHandler.  The third argument should be set to
  /// true if the handler only expects enabled diagnostics.
  ///
  /// LLVMContext doesn't take ownership or interpret either of these
  /// pointers.
  void setDiagnosticHandlerCallBack(
      DiagnosticHandler::DiagnosticHandlerTy DiagHandler,
      void *DiagContext = nullptr, bool RespectFilters = false);

  /// setDiagnosticHandler - This method sets unique_ptr to object of
  /// DiagnosticHandler to provide custom diagnostic handling. The first
  /// argument is unique_ptr of object of type DiagnosticHandler or a derived
  /// of that. The second argument should be set to true if the handler only
  /// expects enabled diagnostics.
  ///
  /// Ownership of this pointer is moved to LLVMContextImpl.
  void setDiagnosticHandler(std::unique_ptr<DiagnosticHandler> &&DH,
                            bool RespectFilters = false);

  /// getDiagnosticHandlerCallBack - Return the diagnostic handler call back set by
  /// setDiagnosticHandlerCallBack.
  DiagnosticHandler::DiagnosticHandlerTy getDiagnosticHandlerCallBack() const;

  /// getDiagnosticContext - Return the diagnostic context set by
  /// setDiagnosticContext.
  void *getDiagnosticContext() const;

  /// getDiagHandlerPtr - Returns const raw pointer of DiagnosticHandler set by
  /// setDiagnosticHandler.
  const DiagnosticHandler *getDiagHandlerPtr() const;

  /// getDiagnosticHandler - transfers ownership of DiagnosticHandler unique_ptr
  /// to caller.
  std::unique_ptr<DiagnosticHandler> getDiagnosticHandler();

  /// Return if a code hotness metric should be included in optimization
  /// diagnostics.
  bool getDiagnosticsHotnessRequested() const;
  /// Set if a code hotness metric should be included in optimization
  /// diagnostics.
  void setDiagnosticsHotnessRequested(bool Requested);

  bool getMisExpectWarningRequested() const;
  void setMisExpectWarningRequested(bool Requested);
  void setDiagnosticsMisExpectTolerance(std::optional<uint32_t> Tolerance);
  uint32_t getDiagnosticsMisExpectTolerance() const;

  /// Return the minimum hotness value a diagnostic would need in order
  /// to be included in optimization diagnostics.
  ///
  /// Three possible return values:
  /// 0            - threshold is disabled. Everything will be printed out.
  /// positive int - threshold is set.
  /// UINT64_MAX   - threshold is not yet set, and needs to be synced from
  ///                profile summary. Note that in case of missing profile
  ///                summary, threshold will be kept at "MAX", effectively
  ///                suppresses all remarks output.
  uint64_t getDiagnosticsHotnessThreshold() const;

  /// Set the minimum hotness value a diagnostic needs in order to be
  /// included in optimization diagnostics.
  void setDiagnosticsHotnessThreshold(std::optional<uint64_t> Threshold);

  /// Return if hotness threshold is requested from PSI.
  bool isDiagnosticsHotnessThresholdSetFromPSI() const;

  /// The "main remark streamer" used by all the specialized remark streamers.
  /// This streamer keeps generic remark metadata in memory throughout the life
  /// of the LLVMContext. This metadata may be emitted in a section in object
  /// files depending on the format requirements.
  ///
  /// All specialized remark streamers should convert remarks to
  /// llvm::remarks::Remark and emit them through this streamer.
  remarks::RemarkStreamer *getMainRemarkStreamer();
  const remarks::RemarkStreamer *getMainRemarkStreamer() const;
  void setMainRemarkStreamer(
      std::unique_ptr<remarks::RemarkStreamer> MainRemarkStreamer);

  /// The "LLVM remark streamer" used by LLVM to serialize remark diagnostics
  /// comming from IR and MIR passes.
  ///
  /// If it does not exist, diagnostics are not saved in a file but only emitted
  /// via the diagnostic handler.
  LLVMRemarkStreamer *getLLVMRemarkStreamer();
  const LLVMRemarkStreamer *getLLVMRemarkStreamer() const;
  void
  setLLVMRemarkStreamer(std::unique_ptr<LLVMRemarkStreamer> RemarkStreamer);

  /// Get the prefix that should be printed in front of a diagnostic of
  ///        the given \p Severity
  static const char *getDiagnosticMessagePrefix(DiagnosticSeverity Severity);

  /// Report a message to the currently installed diagnostic handler.
  ///
  /// This function returns, in particular in the case of error reporting
  /// (DI.Severity == \a DS_Error), so the caller should leave the compilation
  /// process in a self-consistent state, even though the generated code
  /// need not be correct.
  ///
  /// The diagnostic message will be implicitly prefixed with a severity keyword
  /// according to \p DI.getSeverity(), i.e., "error: " for \a DS_Error,
  /// "warning: " for \a DS_Warning, and "note: " for \a DS_Note.
  void diagnose(const DiagnosticInfo &DI);

  /// Registers a yield callback with the given context.
  ///
  /// The yield callback function may be called by LLVM to transfer control back
  /// to the client that invoked the LLVM compilation. This can be used to yield
  /// control of the thread, or perform periodic work needed by the client.
  /// There is no guaranteed frequency at which callbacks must occur; in fact,
  /// the client is not guaranteed to ever receive this callback. It is at the
  /// sole discretion of LLVM to do so and only if it can guarantee that
  /// suspending the thread won't block any forward progress in other LLVM
  /// contexts in the same process.
  ///
  /// At a suspend point, the state of the current LLVM context is intentionally
  /// undefined. No assumptions about it can or should be made. Only LLVM
  /// context API calls that explicitly state that they can be used during a
  /// yield callback are allowed to be used. Any other API calls into the
  /// context are not supported until the yield callback function returns
  /// control to LLVM. Other LLVM contexts are unaffected by this restriction.
  void setYieldCallback(YieldCallbackTy Callback, void *OpaqueHandle);

  /// Calls the yield callback (if applicable).
  ///
  /// This transfers control of the current thread back to the client, which may
  /// suspend the current thread. Only call this method when LLVM doesn't hold
  /// any global mutex or cannot block the execution in another LLVM context.
  void yield();

  /// emitError - Emit an error message to the currently installed error handler
  /// with optional location information.  This function returns, so code should
  /// be prepared to drop the erroneous construct on the floor and "not crash".
  /// The generated code need not be correct.  The error message will be
  /// implicitly prefixed with "error: " and should not end with a ".".
  void emitError(uint64_t LocCookie, const Twine &ErrorStr);
  void emitError(const Instruction *I, const Twine &ErrorStr);
  void emitError(const Twine &ErrorStr);

  /// Access the object which can disable optional passes and individual
  /// optimizations at compile time.
  OptPassGate &getOptPassGate() const;

  /// Set the object which can disable optional passes and individual
  /// optimizations at compile time.
  ///
  /// The lifetime of the object must be guaranteed to extend as long as the
  /// LLVMContext is used by compilation.
  void setOptPassGate(OptPassGate&);

  /// Set whether opaque pointers are enabled. The method may be called multiple
  /// times, but only with the same value. Note that creating a pointer type or
  /// otherwise querying the opaque pointer mode performs an implicit set to
  /// the default value.
  [[deprecated("Opaque pointers are always enabled")]]
  void setOpaquePointers(bool Enable) const;

  /// Whether typed pointers are supported. If false, all pointers are opaque.
  [[deprecated("Always returns false")]]
  bool supportsTypedPointers() const;

private:
  // Module needs access to the add/removeModule methods.
  friend class Module;

  /// addModule - Register a module as being instantiated in this context.  If
  /// the context is deleted, the module will be deleted as well.
  void addModule(Module*);

  /// removeModule - Unregister a module from this context.
  void removeModule(Module*);
};

// Create wrappers for C Binding types (see CBindingWrapping.h).
DEFINE_SIMPLE_CONVERSION_FUNCTIONS(LLVMContext, LLVMContextRef)

/* Specialized opaque context conversions.
 */
inline LLVMContext **unwrap(LLVMContextRef* Tys) {
  return reinterpret_cast<LLVMContext**>(Tys);
}

inline LLVMContextRef *wrap(const LLVMContext **Tys) {
  return reinterpret_cast<LLVMContextRef*>(const_cast<LLVMContext**>(Tys));
}

} // end namespace llvm

#endif // LLVM_IR_LLVMCONTEXT_H
PKjwFZ*+Q`�?�?IR/IntrinsicsS390.hnu�[���/*===- TableGen'erated file -------------------------------------*- C++ -*-===*\
|*                                                                            *|
|* Intrinsic Function Source Fragment                                         *|
|*                                                                            *|
|* Automatically generated file, do not edit!                                 *|
|*                                                                            *|
\*===----------------------------------------------------------------------===*/

#ifndef LLVM_IR_INTRINSIC_S390_ENUMS_H
#define LLVM_IR_INTRINSIC_S390_ENUMS_H

namespace llvm {
namespace Intrinsic {
enum S390Intrinsics : unsigned {
// Enum values for intrinsics
    s390_efpc = 8850,                                 // llvm.s390.efpc
    s390_etnd,                                 // llvm.s390.etnd
    s390_lcbb,                                 // llvm.s390.lcbb
    s390_ntstg,                                // llvm.s390.ntstg
    s390_ppa_txassist,                         // llvm.s390.ppa.txassist
    s390_sfpc,                                 // llvm.s390.sfpc
    s390_tabort,                               // llvm.s390.tabort
    s390_tbegin,                               // llvm.s390.tbegin
    s390_tbegin_nofloat,                       // llvm.s390.tbegin.nofloat
    s390_tbeginc,                              // llvm.s390.tbeginc
    s390_tdc,                                  // llvm.s390.tdc
    s390_tend,                                 // llvm.s390.tend
    s390_vaccb,                                // llvm.s390.vaccb
    s390_vacccq,                               // llvm.s390.vacccq
    s390_vaccf,                                // llvm.s390.vaccf
    s390_vaccg,                                // llvm.s390.vaccg
    s390_vacch,                                // llvm.s390.vacch
    s390_vaccq,                                // llvm.s390.vaccq
    s390_vacq,                                 // llvm.s390.vacq
    s390_vaq,                                  // llvm.s390.vaq
    s390_vavgb,                                // llvm.s390.vavgb
    s390_vavgf,                                // llvm.s390.vavgf
    s390_vavgg,                                // llvm.s390.vavgg
    s390_vavgh,                                // llvm.s390.vavgh
    s390_vavglb,                               // llvm.s390.vavglb
    s390_vavglf,                               // llvm.s390.vavglf
    s390_vavglg,                               // llvm.s390.vavglg
    s390_vavglh,                               // llvm.s390.vavglh
    s390_vbperm,                               // llvm.s390.vbperm
    s390_vceqbs,                               // llvm.s390.vceqbs
    s390_vceqfs,                               // llvm.s390.vceqfs
    s390_vceqgs,                               // llvm.s390.vceqgs
    s390_vceqhs,                               // llvm.s390.vceqhs
    s390_vcfn,                                 // llvm.s390.vcfn
    s390_vchbs,                                // llvm.s390.vchbs
    s390_vchfs,                                // llvm.s390.vchfs
    s390_vchgs,                                // llvm.s390.vchgs
    s390_vchhs,                                // llvm.s390.vchhs
    s390_vchlbs,                               // llvm.s390.vchlbs
    s390_vchlfs,                               // llvm.s390.vchlfs
    s390_vchlgs,                               // llvm.s390.vchlgs
    s390_vchlhs,                               // llvm.s390.vchlhs
    s390_vcksm,                                // llvm.s390.vcksm
    s390_vclfnhs,                              // llvm.s390.vclfnhs
    s390_vclfnls,                              // llvm.s390.vclfnls
    s390_vcnf,                                 // llvm.s390.vcnf
    s390_vcrnfs,                               // llvm.s390.vcrnfs
    s390_verimb,                               // llvm.s390.verimb
    s390_verimf,                               // llvm.s390.verimf
    s390_verimg,                               // llvm.s390.verimg
    s390_verimh,                               // llvm.s390.verimh
    s390_verllb,                               // llvm.s390.verllb
    s390_verllf,                               // llvm.s390.verllf
    s390_verllg,                               // llvm.s390.verllg
    s390_verllh,                               // llvm.s390.verllh
    s390_verllvb,                              // llvm.s390.verllvb
    s390_verllvf,                              // llvm.s390.verllvf
    s390_verllvg,                              // llvm.s390.verllvg
    s390_verllvh,                              // llvm.s390.verllvh
    s390_vfaeb,                                // llvm.s390.vfaeb
    s390_vfaebs,                               // llvm.s390.vfaebs
    s390_vfaef,                                // llvm.s390.vfaef
    s390_vfaefs,                               // llvm.s390.vfaefs
    s390_vfaeh,                                // llvm.s390.vfaeh
    s390_vfaehs,                               // llvm.s390.vfaehs
    s390_vfaezb,                               // llvm.s390.vfaezb
    s390_vfaezbs,                              // llvm.s390.vfaezbs
    s390_vfaezf,                               // llvm.s390.vfaezf
    s390_vfaezfs,                              // llvm.s390.vfaezfs
    s390_vfaezh,                               // llvm.s390.vfaezh
    s390_vfaezhs,                              // llvm.s390.vfaezhs
    s390_vfcedbs,                              // llvm.s390.vfcedbs
    s390_vfcesbs,                              // llvm.s390.vfcesbs
    s390_vfchdbs,                              // llvm.s390.vfchdbs
    s390_vfchedbs,                             // llvm.s390.vfchedbs
    s390_vfchesbs,                             // llvm.s390.vfchesbs
    s390_vfchsbs,                              // llvm.s390.vfchsbs
    s390_vfeeb,                                // llvm.s390.vfeeb
    s390_vfeebs,                               // llvm.s390.vfeebs
    s390_vfeef,                                // llvm.s390.vfeef
    s390_vfeefs,                               // llvm.s390.vfeefs
    s390_vfeeh,                                // llvm.s390.vfeeh
    s390_vfeehs,                               // llvm.s390.vfeehs
    s390_vfeezb,                               // llvm.s390.vfeezb
    s390_vfeezbs,                              // llvm.s390.vfeezbs
    s390_vfeezf,                               // llvm.s390.vfeezf
    s390_vfeezfs,                              // llvm.s390.vfeezfs
    s390_vfeezh,                               // llvm.s390.vfeezh
    s390_vfeezhs,                              // llvm.s390.vfeezhs
    s390_vfeneb,                               // llvm.s390.vfeneb
    s390_vfenebs,                              // llvm.s390.vfenebs
    s390_vfenef,                               // llvm.s390.vfenef
    s390_vfenefs,                              // llvm.s390.vfenefs
    s390_vfeneh,                               // llvm.s390.vfeneh
    s390_vfenehs,                              // llvm.s390.vfenehs
    s390_vfenezb,                              // llvm.s390.vfenezb
    s390_vfenezbs,                             // llvm.s390.vfenezbs
    s390_vfenezf,                              // llvm.s390.vfenezf
    s390_vfenezfs,                             // llvm.s390.vfenezfs
    s390_vfenezh,                              // llvm.s390.vfenezh
    s390_vfenezhs,                             // llvm.s390.vfenezhs
    s390_vfidb,                                // llvm.s390.vfidb
    s390_vfisb,                                // llvm.s390.vfisb
    s390_vfmaxdb,                              // llvm.s390.vfmaxdb
    s390_vfmaxsb,                              // llvm.s390.vfmaxsb
    s390_vfmindb,                              // llvm.s390.vfmindb
    s390_vfminsb,                              // llvm.s390.vfminsb
    s390_vftcidb,                              // llvm.s390.vftcidb
    s390_vftcisb,                              // llvm.s390.vftcisb
    s390_vgfmab,                               // llvm.s390.vgfmab
    s390_vgfmaf,                               // llvm.s390.vgfmaf
    s390_vgfmag,                               // llvm.s390.vgfmag
    s390_vgfmah,                               // llvm.s390.vgfmah
    s390_vgfmb,                                // llvm.s390.vgfmb
    s390_vgfmf,                                // llvm.s390.vgfmf
    s390_vgfmg,                                // llvm.s390.vgfmg
    s390_vgfmh,                                // llvm.s390.vgfmh
    s390_vistrb,                               // llvm.s390.vistrb
    s390_vistrbs,                              // llvm.s390.vistrbs
    s390_vistrf,                               // llvm.s390.vistrf
    s390_vistrfs,                              // llvm.s390.vistrfs
    s390_vistrh,                               // llvm.s390.vistrh
    s390_vistrhs,                              // llvm.s390.vistrhs
    s390_vlbb,                                 // llvm.s390.vlbb
    s390_vll,                                  // llvm.s390.vll
    s390_vlrl,                                 // llvm.s390.vlrl
    s390_vmaeb,                                // llvm.s390.vmaeb
    s390_vmaef,                                // llvm.s390.vmaef
    s390_vmaeh,                                // llvm.s390.vmaeh
    s390_vmahb,                                // llvm.s390.vmahb
    s390_vmahf,                                // llvm.s390.vmahf
    s390_vmahh,                                // llvm.s390.vmahh
    s390_vmaleb,                               // llvm.s390.vmaleb
    s390_vmalef,                               // llvm.s390.vmalef
    s390_vmaleh,                               // llvm.s390.vmaleh
    s390_vmalhb,                               // llvm.s390.vmalhb
    s390_vmalhf,                               // llvm.s390.vmalhf
    s390_vmalhh,                               // llvm.s390.vmalhh
    s390_vmalob,                               // llvm.s390.vmalob
    s390_vmalof,                               // llvm.s390.vmalof
    s390_vmaloh,                               // llvm.s390.vmaloh
    s390_vmaob,                                // llvm.s390.vmaob
    s390_vmaof,                                // llvm.s390.vmaof
    s390_vmaoh,                                // llvm.s390.vmaoh
    s390_vmeb,                                 // llvm.s390.vmeb
    s390_vmef,                                 // llvm.s390.vmef
    s390_vmeh,                                 // llvm.s390.vmeh
    s390_vmhb,                                 // llvm.s390.vmhb
    s390_vmhf,                                 // llvm.s390.vmhf
    s390_vmhh,                                 // llvm.s390.vmhh
    s390_vmleb,                                // llvm.s390.vmleb
    s390_vmlef,                                // llvm.s390.vmlef
    s390_vmleh,                                // llvm.s390.vmleh
    s390_vmlhb,                                // llvm.s390.vmlhb
    s390_vmlhf,                                // llvm.s390.vmlhf
    s390_vmlhh,                                // llvm.s390.vmlhh
    s390_vmlob,                                // llvm.s390.vmlob
    s390_vmlof,                                // llvm.s390.vmlof
    s390_vmloh,                                // llvm.s390.vmloh
    s390_vmob,                                 // llvm.s390.vmob
    s390_vmof,                                 // llvm.s390.vmof
    s390_vmoh,                                 // llvm.s390.vmoh
    s390_vmslg,                                // llvm.s390.vmslg
    s390_vpdi,                                 // llvm.s390.vpdi
    s390_vperm,                                // llvm.s390.vperm
    s390_vpklsf,                               // llvm.s390.vpklsf
    s390_vpklsfs,                              // llvm.s390.vpklsfs
    s390_vpklsg,                               // llvm.s390.vpklsg
    s390_vpklsgs,                              // llvm.s390.vpklsgs
    s390_vpklsh,                               // llvm.s390.vpklsh
    s390_vpklshs,                              // llvm.s390.vpklshs
    s390_vpksf,                                // llvm.s390.vpksf
    s390_vpksfs,                               // llvm.s390.vpksfs
    s390_vpksg,                                // llvm.s390.vpksg
    s390_vpksgs,                               // llvm.s390.vpksgs
    s390_vpksh,                                // llvm.s390.vpksh
    s390_vpkshs,                               // llvm.s390.vpkshs
    s390_vsbcbiq,                              // llvm.s390.vsbcbiq
    s390_vsbiq,                                // llvm.s390.vsbiq
    s390_vscbib,                               // llvm.s390.vscbib
    s390_vscbif,                               // llvm.s390.vscbif
    s390_vscbig,                               // llvm.s390.vscbig
    s390_vscbih,                               // llvm.s390.vscbih
    s390_vscbiq,                               // llvm.s390.vscbiq
    s390_vsl,                                  // llvm.s390.vsl
    s390_vslb,                                 // llvm.s390.vslb
    s390_vsld,                                 // llvm.s390.vsld
    s390_vsldb,                                // llvm.s390.vsldb
    s390_vsq,                                  // llvm.s390.vsq
    s390_vsra,                                 // llvm.s390.vsra
    s390_vsrab,                                // llvm.s390.vsrab
    s390_vsrd,                                 // llvm.s390.vsrd
    s390_vsrl,                                 // llvm.s390.vsrl
    s390_vsrlb,                                // llvm.s390.vsrlb
    s390_vstl,                                 // llvm.s390.vstl
    s390_vstrcb,                               // llvm.s390.vstrcb
    s390_vstrcbs,                              // llvm.s390.vstrcbs
    s390_vstrcf,                               // llvm.s390.vstrcf
    s390_vstrcfs,                              // llvm.s390.vstrcfs
    s390_vstrch,                               // llvm.s390.vstrch
    s390_vstrchs,                              // llvm.s390.vstrchs
    s390_vstrczb,                              // llvm.s390.vstrczb
    s390_vstrczbs,                             // llvm.s390.vstrczbs
    s390_vstrczf,                              // llvm.s390.vstrczf
    s390_vstrczfs,                             // llvm.s390.vstrczfs
    s390_vstrczh,                              // llvm.s390.vstrczh
    s390_vstrczhs,                             // llvm.s390.vstrczhs
    s390_vstrl,                                // llvm.s390.vstrl
    s390_vstrsb,                               // llvm.s390.vstrsb
    s390_vstrsf,                               // llvm.s390.vstrsf
    s390_vstrsh,                               // llvm.s390.vstrsh
    s390_vstrszb,                              // llvm.s390.vstrszb
    s390_vstrszf,                              // llvm.s390.vstrszf
    s390_vstrszh,                              // llvm.s390.vstrszh
    s390_vsumb,                                // llvm.s390.vsumb
    s390_vsumgf,                               // llvm.s390.vsumgf
    s390_vsumgh,                               // llvm.s390.vsumgh
    s390_vsumh,                                // llvm.s390.vsumh
    s390_vsumqf,                               // llvm.s390.vsumqf
    s390_vsumqg,                               // llvm.s390.vsumqg
    s390_vtm,                                  // llvm.s390.vtm
    s390_vuphb,                                // llvm.s390.vuphb
    s390_vuphf,                                // llvm.s390.vuphf
    s390_vuphh,                                // llvm.s390.vuphh
    s390_vuplb,                                // llvm.s390.vuplb
    s390_vuplf,                                // llvm.s390.vuplf
    s390_vuplhb,                               // llvm.s390.vuplhb
    s390_vuplhf,                               // llvm.s390.vuplhf
    s390_vuplhh,                               // llvm.s390.vuplhh
    s390_vuplhw,                               // llvm.s390.vuplhw
    s390_vupllb,                               // llvm.s390.vupllb
    s390_vupllf,                               // llvm.s390.vupllf
    s390_vupllh,                               // llvm.s390.vupllh
}; // enum
} // namespace Intrinsic
} // namespace llvm

#endif
PKjwFZ�&�~��IR/LegacyPassNameParser.hnu�[���//===- LegacyPassNameParser.h -----------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the PassNameParser and FilteredPassNameParser<> classes,
// which are used to add command line arguments to a utility for all of the
// passes that have been registered into the system.
//
// The PassNameParser class adds ALL passes linked into the system (that are
// creatable) as command line arguments to the tool (when instantiated with the
// appropriate command line option template).  The FilteredPassNameParser<>
// template is used for the same purposes as PassNameParser, except that it only
// includes passes that have a PassType that are compatible with the filter
// (which is the template argument).
//
// Note that this is part of the legacy pass manager infrastructure and will be
// (eventually) going away.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_LEGACYPASSNAMEPARSER_H
#define LLVM_IR_LEGACYPASSNAMEPARSER_H

#include "llvm/ADT/STLExtras.h"
#include "llvm/Pass.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include <cstring>

namespace llvm {

//===----------------------------------------------------------------------===//
// PassNameParser class - Make use of the pass registration mechanism to
// automatically add a command line argument to opt for each pass.
//
class PassNameParser : public PassRegistrationListener,
                       public cl::parser<const PassInfo*> {
public:
  PassNameParser(cl::Option &O);
  ~PassNameParser() override;

  void initialize() {
    cl::parser<const PassInfo*>::initialize();

    // Add all of the passes to the map that got initialized before 'this' did.
    enumeratePasses();
  }

  // ignorablePassImpl - Can be overriden in subclasses to refine the list of
  // which passes we want to include.
  //
  virtual bool ignorablePassImpl(const PassInfo *P) const { return false; }

  inline bool ignorablePass(const PassInfo *P) const {
    // Ignore non-selectable and non-constructible passes!  Ignore
    // non-optimizations.
    return P->getPassArgument().empty() || P->getNormalCtor() == nullptr ||
           ignorablePassImpl(P);
  }

  // Implement the PassRegistrationListener callbacks used to populate our map
  //
  void passRegistered(const PassInfo *P) override {
    if (ignorablePass(P)) return;
    if (findOption(P->getPassArgument().data()) != getNumOptions()) {
      errs() << "Two passes with the same argument (-"
           << P->getPassArgument() << ") attempted to be registered!\n";
      llvm_unreachable(nullptr);
    }
    addLiteralOption(P->getPassArgument().data(), P, P->getPassName().data());
  }
  void passEnumerate(const PassInfo *P) override { passRegistered(P); }

  // printOptionInfo - Print out information about this option.  Override the
  // default implementation to sort the table before we print...
  void printOptionInfo(const cl::Option &O, size_t GlobalWidth) const override {
    PassNameParser *PNP = const_cast<PassNameParser*>(this);
    array_pod_sort(PNP->Values.begin(), PNP->Values.end(), ValCompare);
    cl::parser<const PassInfo*>::printOptionInfo(O, GlobalWidth);
  }

private:
  // ValCompare - Provide a sorting comparator for Values elements...
  static int ValCompare(const PassNameParser::OptionInfo *VT1,
                        const PassNameParser::OptionInfo *VT2) {
    return VT1->Name.compare(VT2->Name);
  }
};

} // End llvm namespace

#endif
PKjwFZQ�z�44IR/IntrinsicsDirectX.tdnu�[���//===- IntrinsicsDirectX.td - Defines DirectX intrinsics ---*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines all of the DirectX-specific intrinsics.
//
//===----------------------------------------------------------------------===//

let TargetPrefix = "dx" in {

def int_dx_thread_id : Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem, IntrWillReturn]>;
def int_dx_group_id : Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem, IntrWillReturn]>;
def int_dx_thread_id_in_group : Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem, IntrWillReturn]>;
def int_dx_flattened_thread_id_in_group : Intrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrWillReturn]>;

def int_dx_create_handle : ClangBuiltin<"__builtin_hlsl_create_handle">,
    Intrinsic<[ llvm_ptr_ty ], [llvm_i8_ty], [IntrWillReturn]>;
}
PKjwFZ�G�8�8
IR/ValueMap.hnu�[���//===- ValueMap.h - Safe map from Values to data ----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the ValueMap class.  ValueMap maps Value* or any subclass
// to an arbitrary other type.  It provides the DenseMap interface but updates
// itself to remain safe when keys are RAUWed or deleted.  By default, when a
// key is RAUWed from V1 to V2, the old mapping V1->target is removed, and a new
// mapping V2->target is added.  If V2 already existed, its old target is
// overwritten.  When a key is deleted, its mapping is removed.
//
// You can override a ValueMap's Config parameter to control exactly what
// happens on RAUW and destruction and to get called back on each event.  It's
// legal to call back into the ValueMap from a Config's callbacks.  Config
// parameters should inherit from ValueMapConfig<KeyT> to get default
// implementations of all the methods ValueMap uses.  See ValueMapConfig for
// documentation of the functions you can override.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_VALUEMAP_H
#define LLVM_IR_VALUEMAP_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/IR/TrackingMDRef.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Mutex.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <iterator>
#include <mutex>
#include <optional>
#include <type_traits>
#include <utility>

namespace llvm {

template<typename KeyT, typename ValueT, typename Config>
class ValueMapCallbackVH;
template<typename DenseMapT, typename KeyT>
class ValueMapIterator;
template<typename DenseMapT, typename KeyT>
class ValueMapConstIterator;

/// This class defines the default behavior for configurable aspects of
/// ValueMap<>.  User Configs should inherit from this class to be as compatible
/// as possible with future versions of ValueMap.
template<typename KeyT, typename MutexT = sys::Mutex>
struct ValueMapConfig {
  using mutex_type = MutexT;

  /// If FollowRAUW is true, the ValueMap will update mappings on RAUW. If it's
  /// false, the ValueMap will leave the original mapping in place.
  enum { FollowRAUW = true };

  // All methods will be called with a first argument of type ExtraData.  The
  // default implementations in this class take a templated first argument so
  // that users' subclasses can use any type they want without having to
  // override all the defaults.
  struct ExtraData {};

  template<typename ExtraDataT>
  static void onRAUW(const ExtraDataT & /*Data*/, KeyT /*Old*/, KeyT /*New*/) {}
  template<typename ExtraDataT>
  static void onDelete(const ExtraDataT &/*Data*/, KeyT /*Old*/) {}

  /// Returns a mutex that should be acquired around any changes to the map.
  /// This is only acquired from the CallbackVH (and held around calls to onRAUW
  /// and onDelete) and not inside other ValueMap methods.  NULL means that no
  /// mutex is necessary.
  template<typename ExtraDataT>
  static mutex_type *getMutex(const ExtraDataT &/*Data*/) { return nullptr; }
};

/// See the file comment.
template<typename KeyT, typename ValueT, typename Config =ValueMapConfig<KeyT>>
class ValueMap {
  friend class ValueMapCallbackVH<KeyT, ValueT, Config>;

  using ValueMapCVH = ValueMapCallbackVH<KeyT, ValueT, Config>;
  using MapT = DenseMap<ValueMapCVH, ValueT, DenseMapInfo<ValueMapCVH>>;
  using MDMapT = DenseMap<const Metadata *, TrackingMDRef>;
  using ExtraData = typename Config::ExtraData;

  MapT Map;
  std::optional<MDMapT> MDMap;
  ExtraData Data;

public:
  using key_type = KeyT;
  using mapped_type = ValueT;
  using value_type = std::pair<KeyT, ValueT>;
  using size_type = unsigned;

  explicit ValueMap(unsigned NumInitBuckets = 64)
      : Map(NumInitBuckets), Data() {}
  explicit ValueMap(const ExtraData &Data, unsigned NumInitBuckets = 64)
      : Map(NumInitBuckets), Data(Data) {}
  // ValueMap can't be copied nor moved, because the callbacks store pointer to
  // it.
  ValueMap(const ValueMap &) = delete;
  ValueMap(ValueMap &&) = delete;
  ValueMap &operator=(const ValueMap &) = delete;
  ValueMap &operator=(ValueMap &&) = delete;

  bool hasMD() const { return bool(MDMap); }
  MDMapT &MD() {
    if (!MDMap)
      MDMap.emplace();
    return *MDMap;
  }
  std::optional<MDMapT> &getMDMap() { return MDMap; }

  /// Get the mapped metadata, if it's in the map.
  std::optional<Metadata *> getMappedMD(const Metadata *MD) const {
    if (!MDMap)
      return std::nullopt;
    auto Where = MDMap->find(MD);
    if (Where == MDMap->end())
      return std::nullopt;
    return Where->second.get();
  }

  using iterator = ValueMapIterator<MapT, KeyT>;
  using const_iterator = ValueMapConstIterator<MapT, KeyT>;

  inline iterator begin() { return iterator(Map.begin()); }
  inline iterator end() { return iterator(Map.end()); }
  inline const_iterator begin() const { return const_iterator(Map.begin()); }
  inline const_iterator end() const { return const_iterator(Map.end()); }

  bool empty() const { return Map.empty(); }
  size_type size() const { return Map.size(); }

  /// Grow the map so that it has at least Size buckets. Does not shrink
  void reserve(size_t Size) { Map.reserve(Size); }

  void clear() {
    Map.clear();
    MDMap.reset();
  }

  /// Return 1 if the specified key is in the map, 0 otherwise.
  size_type count(const KeyT &Val) const {
    return Map.find_as(Val) == Map.end() ? 0 : 1;
  }

  iterator find(const KeyT &Val) {
    return iterator(Map.find_as(Val));
  }
  const_iterator find(const KeyT &Val) const {
    return const_iterator(Map.find_as(Val));
  }

  /// lookup - Return the entry for the specified key, or a default
  /// constructed value if no such entry exists.
  ValueT lookup(const KeyT &Val) const {
    typename MapT::const_iterator I = Map.find_as(Val);
    return I != Map.end() ? I->second : ValueT();
  }

  // Inserts key,value pair into the map if the key isn't already in the map.
  // If the key is already in the map, it returns false and doesn't update the
  // value.
  std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) {
    auto MapResult = Map.insert(std::make_pair(Wrap(KV.first), KV.second));
    return std::make_pair(iterator(MapResult.first), MapResult.second);
  }

  std::pair<iterator, bool> insert(std::pair<KeyT, ValueT> &&KV) {
    auto MapResult =
        Map.insert(std::make_pair(Wrap(KV.first), std::move(KV.second)));
    return std::make_pair(iterator(MapResult.first), MapResult.second);
  }

  /// insert - Range insertion of pairs.
  template<typename InputIt>
  void insert(InputIt I, InputIt E) {
    for (; I != E; ++I)
      insert(*I);
  }

  bool erase(const KeyT &Val) {
    typename MapT::iterator I = Map.find_as(Val);
    if (I == Map.end())
      return false;

    Map.erase(I);
    return true;
  }
  void erase(iterator I) {
    return Map.erase(I.base());
  }

  value_type& FindAndConstruct(const KeyT &Key) {
    return Map.FindAndConstruct(Wrap(Key));
  }

  ValueT &operator[](const KeyT &Key) {
    return Map[Wrap(Key)];
  }

  /// isPointerIntoBucketsArray - Return true if the specified pointer points
  /// somewhere into the ValueMap's array of buckets (i.e. either to a key or
  /// value in the ValueMap).
  bool isPointerIntoBucketsArray(const void *Ptr) const {
    return Map.isPointerIntoBucketsArray(Ptr);
  }

  /// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets
  /// array.  In conjunction with the previous method, this can be used to
  /// determine whether an insertion caused the ValueMap to reallocate.
  const void *getPointerIntoBucketsArray() const {
    return Map.getPointerIntoBucketsArray();
  }

private:
  // Takes a key being looked up in the map and wraps it into a
  // ValueMapCallbackVH, the actual key type of the map.  We use a helper
  // function because ValueMapCVH is constructed with a second parameter.
  ValueMapCVH Wrap(KeyT key) const {
    // The only way the resulting CallbackVH could try to modify *this (making
    // the const_cast incorrect) is if it gets inserted into the map.  But then
    // this function must have been called from a non-const method, making the
    // const_cast ok.
    return ValueMapCVH(key, const_cast<ValueMap*>(this));
  }
};

// This CallbackVH updates its ValueMap when the contained Value changes,
// according to the user's preferences expressed through the Config object.
template <typename KeyT, typename ValueT, typename Config>
class ValueMapCallbackVH final : public CallbackVH {
  friend class ValueMap<KeyT, ValueT, Config>;
  friend struct DenseMapInfo<ValueMapCallbackVH>;

  using ValueMapT = ValueMap<KeyT, ValueT, Config>;
  using KeySansPointerT = std::remove_pointer_t<KeyT>;

  ValueMapT *Map;

  ValueMapCallbackVH(KeyT Key, ValueMapT *Map)
      : CallbackVH(const_cast<Value*>(static_cast<const Value*>(Key))),
        Map(Map) {}

  // Private constructor used to create empty/tombstone DenseMap keys.
  ValueMapCallbackVH(Value *V) : CallbackVH(V), Map(nullptr) {}

public:
  KeyT Unwrap() const { return cast_or_null<KeySansPointerT>(getValPtr()); }

  void deleted() override {
    // Make a copy that won't get changed even when *this is destroyed.
    ValueMapCallbackVH Copy(*this);
    typename Config::mutex_type *M = Config::getMutex(Copy.Map->Data);
    std::unique_lock<typename Config::mutex_type> Guard;
    if (M)
      Guard = std::unique_lock<typename Config::mutex_type>(*M);
    Config::onDelete(Copy.Map->Data, Copy.Unwrap());  // May destroy *this.
    Copy.Map->Map.erase(Copy);  // Definitely destroys *this.
  }

  void allUsesReplacedWith(Value *new_key) override {
    assert(isa<KeySansPointerT>(new_key) &&
           "Invalid RAUW on key of ValueMap<>");
    // Make a copy that won't get changed even when *this is destroyed.
    ValueMapCallbackVH Copy(*this);
    typename Config::mutex_type *M = Config::getMutex(Copy.Map->Data);
    std::unique_lock<typename Config::mutex_type> Guard;
    if (M)
      Guard = std::unique_lock<typename Config::mutex_type>(*M);

    KeyT typed_new_key = cast<KeySansPointerT>(new_key);
    // Can destroy *this:
    Config::onRAUW(Copy.Map->Data, Copy.Unwrap(), typed_new_key);
    if (Config::FollowRAUW) {
      typename ValueMapT::MapT::iterator I = Copy.Map->Map.find(Copy);
      // I could == Copy.Map->Map.end() if the onRAUW callback already
      // removed the old mapping.
      if (I != Copy.Map->Map.end()) {
        ValueT Target(std::move(I->second));
        Copy.Map->Map.erase(I);  // Definitely destroys *this.
        Copy.Map->insert(std::make_pair(typed_new_key, std::move(Target)));
      }
    }
  }
};

template<typename KeyT, typename ValueT, typename Config>
struct DenseMapInfo<ValueMapCallbackVH<KeyT, ValueT, Config>> {
  using VH = ValueMapCallbackVH<KeyT, ValueT, Config>;

  static inline VH getEmptyKey() {
    return VH(DenseMapInfo<Value *>::getEmptyKey());
  }

  static inline VH getTombstoneKey() {
    return VH(DenseMapInfo<Value *>::getTombstoneKey());
  }

  static unsigned getHashValue(const VH &Val) {
    return DenseMapInfo<KeyT>::getHashValue(Val.Unwrap());
  }

  static unsigned getHashValue(const KeyT &Val) {
    return DenseMapInfo<KeyT>::getHashValue(Val);
  }

  static bool isEqual(const VH &LHS, const VH &RHS) {
    return LHS == RHS;
  }

  static bool isEqual(const KeyT &LHS, const VH &RHS) {
    return LHS == RHS.getValPtr();
  }
};

template <typename DenseMapT, typename KeyT> class ValueMapIterator {
  using BaseT = typename DenseMapT::iterator;
  using ValueT = typename DenseMapT::mapped_type;

  BaseT I;

public:
  using iterator_category = std::forward_iterator_tag;
  using value_type = std::pair<KeyT, typename DenseMapT::mapped_type>;
  using difference_type = std::ptrdiff_t;
  using pointer = value_type *;
  using reference = value_type &;

  ValueMapIterator() : I() {}
  ValueMapIterator(BaseT I) : I(I) {}

  BaseT base() const { return I; }

  struct ValueTypeProxy {
    const KeyT first;
    ValueT& second;

    ValueTypeProxy *operator->() { return this; }

    operator std::pair<KeyT, ValueT>() const {
      return std::make_pair(first, second);
    }
  };

  ValueTypeProxy operator*() const {
    ValueTypeProxy Result = {I->first.Unwrap(), I->second};
    return Result;
  }

  ValueTypeProxy operator->() const {
    return operator*();
  }

  bool operator==(const ValueMapIterator &RHS) const {
    return I == RHS.I;
  }
  bool operator!=(const ValueMapIterator &RHS) const {
    return I != RHS.I;
  }

  inline ValueMapIterator& operator++() {  // Preincrement
    ++I;
    return *this;
  }
  ValueMapIterator operator++(int) {  // Postincrement
    ValueMapIterator tmp = *this; ++*this; return tmp;
  }
};

template <typename DenseMapT, typename KeyT> class ValueMapConstIterator {
  using BaseT = typename DenseMapT::const_iterator;
  using ValueT = typename DenseMapT::mapped_type;

  BaseT I;

public:
  using iterator_category = std::forward_iterator_tag;
  using value_type = std::pair<KeyT, typename DenseMapT::mapped_type>;
  using difference_type = std::ptrdiff_t;
  using pointer = value_type *;
  using reference = value_type &;

  ValueMapConstIterator() : I() {}
  ValueMapConstIterator(BaseT I) : I(I) {}
  ValueMapConstIterator(ValueMapIterator<DenseMapT, KeyT> Other)
    : I(Other.base()) {}

  BaseT base() const { return I; }

  struct ValueTypeProxy {
    const KeyT first;
    const ValueT& second;
    ValueTypeProxy *operator->() { return this; }
    operator std::pair<KeyT, ValueT>() const {
      return std::make_pair(first, second);
    }
  };

  ValueTypeProxy operator*() const {
    ValueTypeProxy Result = {I->first.Unwrap(), I->second};
    return Result;
  }

  ValueTypeProxy operator->() const {
    return operator*();
  }

  bool operator==(const ValueMapConstIterator &RHS) const {
    return I == RHS.I;
  }
  bool operator!=(const ValueMapConstIterator &RHS) const {
    return I != RHS.I;
  }

  inline ValueMapConstIterator& operator++() {  // Preincrement
    ++I;
    return *this;
  }
  ValueMapConstIterator operator++(int) {  // Postincrement
    ValueMapConstIterator tmp = *this; ++*this; return tmp;
  }
};

} // end namespace llvm

#endif // LLVM_IR_VALUEMAP_H
PKjwFZa2�rE�E�IR/IntrinsicsX86.hnu�[���/*===- TableGen'erated file -------------------------------------*- C++ -*-===*\
|*                                                                            *|
|* Intrinsic Function Source Fragment                                         *|
|*                                                                            *|
|* Automatically generated file, do not edit!                                 *|
|*                                                                            *|
\*===----------------------------------------------------------------------===*/

#ifndef LLVM_IR_INTRINSIC_X86_ENUMS_H
#define LLVM_IR_INTRINSIC_X86_ENUMS_H

namespace llvm {
namespace Intrinsic {
enum X86Intrinsics : unsigned {
// Enum values for intrinsics
    x86_3dnow_pavgusb = 10429,                         // llvm.x86.3dnow.pavgusb
    x86_3dnow_pf2id,                           // llvm.x86.3dnow.pf2id
    x86_3dnow_pfacc,                           // llvm.x86.3dnow.pfacc
    x86_3dnow_pfadd,                           // llvm.x86.3dnow.pfadd
    x86_3dnow_pfcmpeq,                         // llvm.x86.3dnow.pfcmpeq
    x86_3dnow_pfcmpge,                         // llvm.x86.3dnow.pfcmpge
    x86_3dnow_pfcmpgt,                         // llvm.x86.3dnow.pfcmpgt
    x86_3dnow_pfmax,                           // llvm.x86.3dnow.pfmax
    x86_3dnow_pfmin,                           // llvm.x86.3dnow.pfmin
    x86_3dnow_pfmul,                           // llvm.x86.3dnow.pfmul
    x86_3dnow_pfrcp,                           // llvm.x86.3dnow.pfrcp
    x86_3dnow_pfrcpit1,                        // llvm.x86.3dnow.pfrcpit1
    x86_3dnow_pfrcpit2,                        // llvm.x86.3dnow.pfrcpit2
    x86_3dnow_pfrsqit1,                        // llvm.x86.3dnow.pfrsqit1
    x86_3dnow_pfrsqrt,                         // llvm.x86.3dnow.pfrsqrt
    x86_3dnow_pfsub,                           // llvm.x86.3dnow.pfsub
    x86_3dnow_pfsubr,                          // llvm.x86.3dnow.pfsubr
    x86_3dnow_pi2fd,                           // llvm.x86.3dnow.pi2fd
    x86_3dnow_pmulhrw,                         // llvm.x86.3dnow.pmulhrw
    x86_3dnowa_pf2iw,                          // llvm.x86.3dnowa.pf2iw
    x86_3dnowa_pfnacc,                         // llvm.x86.3dnowa.pfnacc
    x86_3dnowa_pfpnacc,                        // llvm.x86.3dnowa.pfpnacc
    x86_3dnowa_pi2fw,                          // llvm.x86.3dnowa.pi2fw
    x86_3dnowa_pswapd,                         // llvm.x86.3dnowa.pswapd
    x86_aadd32,                                // llvm.x86.aadd32
    x86_aadd64,                                // llvm.x86.aadd64
    x86_aand32,                                // llvm.x86.aand32
    x86_aand64,                                // llvm.x86.aand64
    x86_addcarry_32,                           // llvm.x86.addcarry.32
    x86_addcarry_64,                           // llvm.x86.addcarry.64
    x86_aesdec128kl,                           // llvm.x86.aesdec128kl
    x86_aesdec256kl,                           // llvm.x86.aesdec256kl
    x86_aesdecwide128kl,                       // llvm.x86.aesdecwide128kl
    x86_aesdecwide256kl,                       // llvm.x86.aesdecwide256kl
    x86_aesenc128kl,                           // llvm.x86.aesenc128kl
    x86_aesenc256kl,                           // llvm.x86.aesenc256kl
    x86_aesencwide128kl,                       // llvm.x86.aesencwide128kl
    x86_aesencwide256kl,                       // llvm.x86.aesencwide256kl
    x86_aesni_aesdec,                          // llvm.x86.aesni.aesdec
    x86_aesni_aesdec_256,                      // llvm.x86.aesni.aesdec.256
    x86_aesni_aesdec_512,                      // llvm.x86.aesni.aesdec.512
    x86_aesni_aesdeclast,                      // llvm.x86.aesni.aesdeclast
    x86_aesni_aesdeclast_256,                  // llvm.x86.aesni.aesdeclast.256
    x86_aesni_aesdeclast_512,                  // llvm.x86.aesni.aesdeclast.512
    x86_aesni_aesenc,                          // llvm.x86.aesni.aesenc
    x86_aesni_aesenc_256,                      // llvm.x86.aesni.aesenc.256
    x86_aesni_aesenc_512,                      // llvm.x86.aesni.aesenc.512
    x86_aesni_aesenclast,                      // llvm.x86.aesni.aesenclast
    x86_aesni_aesenclast_256,                  // llvm.x86.aesni.aesenclast.256
    x86_aesni_aesenclast_512,                  // llvm.x86.aesni.aesenclast.512
    x86_aesni_aesimc,                          // llvm.x86.aesni.aesimc
    x86_aesni_aeskeygenassist,                 // llvm.x86.aesni.aeskeygenassist
    x86_aor32,                                 // llvm.x86.aor32
    x86_aor64,                                 // llvm.x86.aor64
    x86_atomic_add_cc,                         // llvm.x86.atomic.add.cc
    x86_atomic_and_cc,                         // llvm.x86.atomic.and.cc
    x86_atomic_btc,                            // llvm.x86.atomic.btc
    x86_atomic_btc_rm,                         // llvm.x86.atomic.btc.rm
    x86_atomic_btr,                            // llvm.x86.atomic.btr
    x86_atomic_btr_rm,                         // llvm.x86.atomic.btr.rm
    x86_atomic_bts,                            // llvm.x86.atomic.bts
    x86_atomic_bts_rm,                         // llvm.x86.atomic.bts.rm
    x86_atomic_or_cc,                          // llvm.x86.atomic.or.cc
    x86_atomic_sub_cc,                         // llvm.x86.atomic.sub.cc
    x86_atomic_xor_cc,                         // llvm.x86.atomic.xor.cc
    x86_avx_addsub_pd_256,                     // llvm.x86.avx.addsub.pd.256
    x86_avx_addsub_ps_256,                     // llvm.x86.avx.addsub.ps.256
    x86_avx_blendv_pd_256,                     // llvm.x86.avx.blendv.pd.256
    x86_avx_blendv_ps_256,                     // llvm.x86.avx.blendv.ps.256
    x86_avx_cmp_pd_256,                        // llvm.x86.avx.cmp.pd.256
    x86_avx_cmp_ps_256,                        // llvm.x86.avx.cmp.ps.256
    x86_avx_cvt_pd2_ps_256,                    // llvm.x86.avx.cvt.pd2.ps.256
    x86_avx_cvt_pd2dq_256,                     // llvm.x86.avx.cvt.pd2dq.256
    x86_avx_cvt_ps2dq_256,                     // llvm.x86.avx.cvt.ps2dq.256
    x86_avx_cvtt_pd2dq_256,                    // llvm.x86.avx.cvtt.pd2dq.256
    x86_avx_cvtt_ps2dq_256,                    // llvm.x86.avx.cvtt.ps2dq.256
    x86_avx_dp_ps_256,                         // llvm.x86.avx.dp.ps.256
    x86_avx_hadd_pd_256,                       // llvm.x86.avx.hadd.pd.256
    x86_avx_hadd_ps_256,                       // llvm.x86.avx.hadd.ps.256
    x86_avx_hsub_pd_256,                       // llvm.x86.avx.hsub.pd.256
    x86_avx_hsub_ps_256,                       // llvm.x86.avx.hsub.ps.256
    x86_avx_ldu_dq_256,                        // llvm.x86.avx.ldu.dq.256
    x86_avx_maskload_pd,                       // llvm.x86.avx.maskload.pd
    x86_avx_maskload_pd_256,                   // llvm.x86.avx.maskload.pd.256
    x86_avx_maskload_ps,                       // llvm.x86.avx.maskload.ps
    x86_avx_maskload_ps_256,                   // llvm.x86.avx.maskload.ps.256
    x86_avx_maskstore_pd,                      // llvm.x86.avx.maskstore.pd
    x86_avx_maskstore_pd_256,                  // llvm.x86.avx.maskstore.pd.256
    x86_avx_maskstore_ps,                      // llvm.x86.avx.maskstore.ps
    x86_avx_maskstore_ps_256,                  // llvm.x86.avx.maskstore.ps.256
    x86_avx_max_pd_256,                        // llvm.x86.avx.max.pd.256
    x86_avx_max_ps_256,                        // llvm.x86.avx.max.ps.256
    x86_avx_min_pd_256,                        // llvm.x86.avx.min.pd.256
    x86_avx_min_ps_256,                        // llvm.x86.avx.min.ps.256
    x86_avx_movmsk_pd_256,                     // llvm.x86.avx.movmsk.pd.256
    x86_avx_movmsk_ps_256,                     // llvm.x86.avx.movmsk.ps.256
    x86_avx_ptestc_256,                        // llvm.x86.avx.ptestc.256
    x86_avx_ptestnzc_256,                      // llvm.x86.avx.ptestnzc.256
    x86_avx_ptestz_256,                        // llvm.x86.avx.ptestz.256
    x86_avx_rcp_ps_256,                        // llvm.x86.avx.rcp.ps.256
    x86_avx_round_pd_256,                      // llvm.x86.avx.round.pd.256
    x86_avx_round_ps_256,                      // llvm.x86.avx.round.ps.256
    x86_avx_rsqrt_ps_256,                      // llvm.x86.avx.rsqrt.ps.256
    x86_avx_vpermilvar_pd,                     // llvm.x86.avx.vpermilvar.pd
    x86_avx_vpermilvar_pd_256,                 // llvm.x86.avx.vpermilvar.pd.256
    x86_avx_vpermilvar_ps,                     // llvm.x86.avx.vpermilvar.ps
    x86_avx_vpermilvar_ps_256,                 // llvm.x86.avx.vpermilvar.ps.256
    x86_avx_vtestc_pd,                         // llvm.x86.avx.vtestc.pd
    x86_avx_vtestc_pd_256,                     // llvm.x86.avx.vtestc.pd.256
    x86_avx_vtestc_ps,                         // llvm.x86.avx.vtestc.ps
    x86_avx_vtestc_ps_256,                     // llvm.x86.avx.vtestc.ps.256
    x86_avx_vtestnzc_pd,                       // llvm.x86.avx.vtestnzc.pd
    x86_avx_vtestnzc_pd_256,                   // llvm.x86.avx.vtestnzc.pd.256
    x86_avx_vtestnzc_ps,                       // llvm.x86.avx.vtestnzc.ps
    x86_avx_vtestnzc_ps_256,                   // llvm.x86.avx.vtestnzc.ps.256
    x86_avx_vtestz_pd,                         // llvm.x86.avx.vtestz.pd
    x86_avx_vtestz_pd_256,                     // llvm.x86.avx.vtestz.pd.256
    x86_avx_vtestz_ps,                         // llvm.x86.avx.vtestz.ps
    x86_avx_vtestz_ps_256,                     // llvm.x86.avx.vtestz.ps.256
    x86_avx_vzeroall,                          // llvm.x86.avx.vzeroall
    x86_avx_vzeroupper,                        // llvm.x86.avx.vzeroupper
    x86_avx2_gather_d_d,                       // llvm.x86.avx2.gather.d.d
    x86_avx2_gather_d_d_256,                   // llvm.x86.avx2.gather.d.d.256
    x86_avx2_gather_d_pd,                      // llvm.x86.avx2.gather.d.pd
    x86_avx2_gather_d_pd_256,                  // llvm.x86.avx2.gather.d.pd.256
    x86_avx2_gather_d_ps,                      // llvm.x86.avx2.gather.d.ps
    x86_avx2_gather_d_ps_256,                  // llvm.x86.avx2.gather.d.ps.256
    x86_avx2_gather_d_q,                       // llvm.x86.avx2.gather.d.q
    x86_avx2_gather_d_q_256,                   // llvm.x86.avx2.gather.d.q.256
    x86_avx2_gather_q_d,                       // llvm.x86.avx2.gather.q.d
    x86_avx2_gather_q_d_256,                   // llvm.x86.avx2.gather.q.d.256
    x86_avx2_gather_q_pd,                      // llvm.x86.avx2.gather.q.pd
    x86_avx2_gather_q_pd_256,                  // llvm.x86.avx2.gather.q.pd.256
    x86_avx2_gather_q_ps,                      // llvm.x86.avx2.gather.q.ps
    x86_avx2_gather_q_ps_256,                  // llvm.x86.avx2.gather.q.ps.256
    x86_avx2_gather_q_q,                       // llvm.x86.avx2.gather.q.q
    x86_avx2_gather_q_q_256,                   // llvm.x86.avx2.gather.q.q.256
    x86_avx2_maskload_d,                       // llvm.x86.avx2.maskload.d
    x86_avx2_maskload_d_256,                   // llvm.x86.avx2.maskload.d.256
    x86_avx2_maskload_q,                       // llvm.x86.avx2.maskload.q
    x86_avx2_maskload_q_256,                   // llvm.x86.avx2.maskload.q.256
    x86_avx2_maskstore_d,                      // llvm.x86.avx2.maskstore.d
    x86_avx2_maskstore_d_256,                  // llvm.x86.avx2.maskstore.d.256
    x86_avx2_maskstore_q,                      // llvm.x86.avx2.maskstore.q
    x86_avx2_maskstore_q_256,                  // llvm.x86.avx2.maskstore.q.256
    x86_avx2_mpsadbw,                          // llvm.x86.avx2.mpsadbw
    x86_avx2_packssdw,                         // llvm.x86.avx2.packssdw
    x86_avx2_packsswb,                         // llvm.x86.avx2.packsswb
    x86_avx2_packusdw,                         // llvm.x86.avx2.packusdw
    x86_avx2_packuswb,                         // llvm.x86.avx2.packuswb
    x86_avx2_pavg_b,                           // llvm.x86.avx2.pavg.b
    x86_avx2_pavg_w,                           // llvm.x86.avx2.pavg.w
    x86_avx2_pblendvb,                         // llvm.x86.avx2.pblendvb
    x86_avx2_permd,                            // llvm.x86.avx2.permd
    x86_avx2_permps,                           // llvm.x86.avx2.permps
    x86_avx2_phadd_d,                          // llvm.x86.avx2.phadd.d
    x86_avx2_phadd_sw,                         // llvm.x86.avx2.phadd.sw
    x86_avx2_phadd_w,                          // llvm.x86.avx2.phadd.w
    x86_avx2_phsub_d,                          // llvm.x86.avx2.phsub.d
    x86_avx2_phsub_sw,                         // llvm.x86.avx2.phsub.sw
    x86_avx2_phsub_w,                          // llvm.x86.avx2.phsub.w
    x86_avx2_pmadd_ub_sw,                      // llvm.x86.avx2.pmadd.ub.sw
    x86_avx2_pmadd_wd,                         // llvm.x86.avx2.pmadd.wd
    x86_avx2_pmovmskb,                         // llvm.x86.avx2.pmovmskb
    x86_avx2_pmul_hr_sw,                       // llvm.x86.avx2.pmul.hr.sw
    x86_avx2_pmulh_w,                          // llvm.x86.avx2.pmulh.w
    x86_avx2_pmulhu_w,                         // llvm.x86.avx2.pmulhu.w
    x86_avx2_psad_bw,                          // llvm.x86.avx2.psad.bw
    x86_avx2_pshuf_b,                          // llvm.x86.avx2.pshuf.b
    x86_avx2_psign_b,                          // llvm.x86.avx2.psign.b
    x86_avx2_psign_d,                          // llvm.x86.avx2.psign.d
    x86_avx2_psign_w,                          // llvm.x86.avx2.psign.w
    x86_avx2_psll_d,                           // llvm.x86.avx2.psll.d
    x86_avx2_psll_q,                           // llvm.x86.avx2.psll.q
    x86_avx2_psll_w,                           // llvm.x86.avx2.psll.w
    x86_avx2_pslli_d,                          // llvm.x86.avx2.pslli.d
    x86_avx2_pslli_q,                          // llvm.x86.avx2.pslli.q
    x86_avx2_pslli_w,                          // llvm.x86.avx2.pslli.w
    x86_avx2_psllv_d,                          // llvm.x86.avx2.psllv.d
    x86_avx2_psllv_d_256,                      // llvm.x86.avx2.psllv.d.256
    x86_avx2_psllv_q,                          // llvm.x86.avx2.psllv.q
    x86_avx2_psllv_q_256,                      // llvm.x86.avx2.psllv.q.256
    x86_avx2_psra_d,                           // llvm.x86.avx2.psra.d
    x86_avx2_psra_w,                           // llvm.x86.avx2.psra.w
    x86_avx2_psrai_d,                          // llvm.x86.avx2.psrai.d
    x86_avx2_psrai_w,                          // llvm.x86.avx2.psrai.w
    x86_avx2_psrav_d,                          // llvm.x86.avx2.psrav.d
    x86_avx2_psrav_d_256,                      // llvm.x86.avx2.psrav.d.256
    x86_avx2_psrl_d,                           // llvm.x86.avx2.psrl.d
    x86_avx2_psrl_q,                           // llvm.x86.avx2.psrl.q
    x86_avx2_psrl_w,                           // llvm.x86.avx2.psrl.w
    x86_avx2_psrli_d,                          // llvm.x86.avx2.psrli.d
    x86_avx2_psrli_q,                          // llvm.x86.avx2.psrli.q
    x86_avx2_psrli_w,                          // llvm.x86.avx2.psrli.w
    x86_avx2_psrlv_d,                          // llvm.x86.avx2.psrlv.d
    x86_avx2_psrlv_d_256,                      // llvm.x86.avx2.psrlv.d.256
    x86_avx2_psrlv_q,                          // llvm.x86.avx2.psrlv.q
    x86_avx2_psrlv_q_256,                      // llvm.x86.avx2.psrlv.q.256
    x86_avx2_vpdpbssd_128,                     // llvm.x86.avx2.vpdpbssd.128
    x86_avx2_vpdpbssd_256,                     // llvm.x86.avx2.vpdpbssd.256
    x86_avx2_vpdpbssds_128,                    // llvm.x86.avx2.vpdpbssds.128
    x86_avx2_vpdpbssds_256,                    // llvm.x86.avx2.vpdpbssds.256
    x86_avx2_vpdpbsud_128,                     // llvm.x86.avx2.vpdpbsud.128
    x86_avx2_vpdpbsud_256,                     // llvm.x86.avx2.vpdpbsud.256
    x86_avx2_vpdpbsuds_128,                    // llvm.x86.avx2.vpdpbsuds.128
    x86_avx2_vpdpbsuds_256,                    // llvm.x86.avx2.vpdpbsuds.256
    x86_avx2_vpdpbuud_128,                     // llvm.x86.avx2.vpdpbuud.128
    x86_avx2_vpdpbuud_256,                     // llvm.x86.avx2.vpdpbuud.256
    x86_avx2_vpdpbuuds_128,                    // llvm.x86.avx2.vpdpbuuds.128
    x86_avx2_vpdpbuuds_256,                    // llvm.x86.avx2.vpdpbuuds.256
    x86_avx2_vpdpwsud_128,                     // llvm.x86.avx2.vpdpwsud.128
    x86_avx2_vpdpwsud_256,                     // llvm.x86.avx2.vpdpwsud.256
    x86_avx2_vpdpwsuds_128,                    // llvm.x86.avx2.vpdpwsuds.128
    x86_avx2_vpdpwsuds_256,                    // llvm.x86.avx2.vpdpwsuds.256
    x86_avx2_vpdpwusd_128,                     // llvm.x86.avx2.vpdpwusd.128
    x86_avx2_vpdpwusd_256,                     // llvm.x86.avx2.vpdpwusd.256
    x86_avx2_vpdpwusds_128,                    // llvm.x86.avx2.vpdpwusds.128
    x86_avx2_vpdpwusds_256,                    // llvm.x86.avx2.vpdpwusds.256
    x86_avx2_vpdpwuud_128,                     // llvm.x86.avx2.vpdpwuud.128
    x86_avx2_vpdpwuud_256,                     // llvm.x86.avx2.vpdpwuud.256
    x86_avx2_vpdpwuuds_128,                    // llvm.x86.avx2.vpdpwuuds.128
    x86_avx2_vpdpwuuds_256,                    // llvm.x86.avx2.vpdpwuuds.256
    x86_avx512_add_pd_512,                     // llvm.x86.avx512.add.pd.512
    x86_avx512_add_ps_512,                     // llvm.x86.avx512.add.ps.512
    x86_avx512_broadcastmb_128,                // llvm.x86.avx512.broadcastmb.128
    x86_avx512_broadcastmb_256,                // llvm.x86.avx512.broadcastmb.256
    x86_avx512_broadcastmb_512,                // llvm.x86.avx512.broadcastmb.512
    x86_avx512_broadcastmw_128,                // llvm.x86.avx512.broadcastmw.128
    x86_avx512_broadcastmw_256,                // llvm.x86.avx512.broadcastmw.256
    x86_avx512_broadcastmw_512,                // llvm.x86.avx512.broadcastmw.512
    x86_avx512_conflict_d_128,                 // llvm.x86.avx512.conflict.d.128
    x86_avx512_conflict_d_256,                 // llvm.x86.avx512.conflict.d.256
    x86_avx512_conflict_d_512,                 // llvm.x86.avx512.conflict.d.512
    x86_avx512_conflict_q_128,                 // llvm.x86.avx512.conflict.q.128
    x86_avx512_conflict_q_256,                 // llvm.x86.avx512.conflict.q.256
    x86_avx512_conflict_q_512,                 // llvm.x86.avx512.conflict.q.512
    x86_avx512_cvtsi2sd64,                     // llvm.x86.avx512.cvtsi2sd64
    x86_avx512_cvtsi2ss32,                     // llvm.x86.avx512.cvtsi2ss32
    x86_avx512_cvtsi2ss64,                     // llvm.x86.avx512.cvtsi2ss64
    x86_avx512_cvttsd2si,                      // llvm.x86.avx512.cvttsd2si
    x86_avx512_cvttsd2si64,                    // llvm.x86.avx512.cvttsd2si64
    x86_avx512_cvttsd2usi,                     // llvm.x86.avx512.cvttsd2usi
    x86_avx512_cvttsd2usi64,                   // llvm.x86.avx512.cvttsd2usi64
    x86_avx512_cvttss2si,                      // llvm.x86.avx512.cvttss2si
    x86_avx512_cvttss2si64,                    // llvm.x86.avx512.cvttss2si64
    x86_avx512_cvttss2usi,                     // llvm.x86.avx512.cvttss2usi
    x86_avx512_cvttss2usi64,                   // llvm.x86.avx512.cvttss2usi64
    x86_avx512_cvtusi2ss,                      // llvm.x86.avx512.cvtusi2ss
    x86_avx512_cvtusi642sd,                    // llvm.x86.avx512.cvtusi642sd
    x86_avx512_cvtusi642ss,                    // llvm.x86.avx512.cvtusi642ss
    x86_avx512_dbpsadbw_128,                   // llvm.x86.avx512.dbpsadbw.128
    x86_avx512_dbpsadbw_256,                   // llvm.x86.avx512.dbpsadbw.256
    x86_avx512_dbpsadbw_512,                   // llvm.x86.avx512.dbpsadbw.512
    x86_avx512_div_pd_512,                     // llvm.x86.avx512.div.pd.512
    x86_avx512_div_ps_512,                     // llvm.x86.avx512.div.ps.512
    x86_avx512_exp2_pd,                        // llvm.x86.avx512.exp2.pd
    x86_avx512_exp2_ps,                        // llvm.x86.avx512.exp2.ps
    x86_avx512_fpclass_pd_128,                 // llvm.x86.avx512.fpclass.pd.128
    x86_avx512_fpclass_pd_256,                 // llvm.x86.avx512.fpclass.pd.256
    x86_avx512_fpclass_pd_512,                 // llvm.x86.avx512.fpclass.pd.512
    x86_avx512_fpclass_ps_128,                 // llvm.x86.avx512.fpclass.ps.128
    x86_avx512_fpclass_ps_256,                 // llvm.x86.avx512.fpclass.ps.256
    x86_avx512_fpclass_ps_512,                 // llvm.x86.avx512.fpclass.ps.512
    x86_avx512_gather_dpd_512,                 // llvm.x86.avx512.gather.dpd.512
    x86_avx512_gather_dpi_512,                 // llvm.x86.avx512.gather.dpi.512
    x86_avx512_gather_dpq_512,                 // llvm.x86.avx512.gather.dpq.512
    x86_avx512_gather_dps_512,                 // llvm.x86.avx512.gather.dps.512
    x86_avx512_gather_qpd_512,                 // llvm.x86.avx512.gather.qpd.512
    x86_avx512_gather_qpi_512,                 // llvm.x86.avx512.gather.qpi.512
    x86_avx512_gather_qpq_512,                 // llvm.x86.avx512.gather.qpq.512
    x86_avx512_gather_qps_512,                 // llvm.x86.avx512.gather.qps.512
    x86_avx512_gather3div2_df,                 // llvm.x86.avx512.gather3div2.df
    x86_avx512_gather3div2_di,                 // llvm.x86.avx512.gather3div2.di
    x86_avx512_gather3div4_df,                 // llvm.x86.avx512.gather3div4.df
    x86_avx512_gather3div4_di,                 // llvm.x86.avx512.gather3div4.di
    x86_avx512_gather3div4_sf,                 // llvm.x86.avx512.gather3div4.sf
    x86_avx512_gather3div4_si,                 // llvm.x86.avx512.gather3div4.si
    x86_avx512_gather3div8_sf,                 // llvm.x86.avx512.gather3div8.sf
    x86_avx512_gather3div8_si,                 // llvm.x86.avx512.gather3div8.si
    x86_avx512_gather3siv2_df,                 // llvm.x86.avx512.gather3siv2.df
    x86_avx512_gather3siv2_di,                 // llvm.x86.avx512.gather3siv2.di
    x86_avx512_gather3siv4_df,                 // llvm.x86.avx512.gather3siv4.df
    x86_avx512_gather3siv4_di,                 // llvm.x86.avx512.gather3siv4.di
    x86_avx512_gather3siv4_sf,                 // llvm.x86.avx512.gather3siv4.sf
    x86_avx512_gather3siv4_si,                 // llvm.x86.avx512.gather3siv4.si
    x86_avx512_gather3siv8_sf,                 // llvm.x86.avx512.gather3siv8.sf
    x86_avx512_gather3siv8_si,                 // llvm.x86.avx512.gather3siv8.si
    x86_avx512_gatherpf_dpd_512,               // llvm.x86.avx512.gatherpf.dpd.512
    x86_avx512_gatherpf_dps_512,               // llvm.x86.avx512.gatherpf.dps.512
    x86_avx512_gatherpf_qpd_512,               // llvm.x86.avx512.gatherpf.qpd.512
    x86_avx512_gatherpf_qps_512,               // llvm.x86.avx512.gatherpf.qps.512
    x86_avx512_kadd_b,                         // llvm.x86.avx512.kadd.b
    x86_avx512_kadd_d,                         // llvm.x86.avx512.kadd.d
    x86_avx512_kadd_q,                         // llvm.x86.avx512.kadd.q
    x86_avx512_kadd_w,                         // llvm.x86.avx512.kadd.w
    x86_avx512_ktestc_b,                       // llvm.x86.avx512.ktestc.b
    x86_avx512_ktestc_d,                       // llvm.x86.avx512.ktestc.d
    x86_avx512_ktestc_q,                       // llvm.x86.avx512.ktestc.q
    x86_avx512_ktestc_w,                       // llvm.x86.avx512.ktestc.w
    x86_avx512_ktestz_b,                       // llvm.x86.avx512.ktestz.b
    x86_avx512_ktestz_d,                       // llvm.x86.avx512.ktestz.d
    x86_avx512_ktestz_q,                       // llvm.x86.avx512.ktestz.q
    x86_avx512_ktestz_w,                       // llvm.x86.avx512.ktestz.w
    x86_avx512_mask_add_sd_round,              // llvm.x86.avx512.mask.add.sd.round
    x86_avx512_mask_add_ss_round,              // llvm.x86.avx512.mask.add.ss.round
    x86_avx512_mask_cmp_pd_128,                // llvm.x86.avx512.mask.cmp.pd.128
    x86_avx512_mask_cmp_pd_256,                // llvm.x86.avx512.mask.cmp.pd.256
    x86_avx512_mask_cmp_pd_512,                // llvm.x86.avx512.mask.cmp.pd.512
    x86_avx512_mask_cmp_ps_128,                // llvm.x86.avx512.mask.cmp.ps.128
    x86_avx512_mask_cmp_ps_256,                // llvm.x86.avx512.mask.cmp.ps.256
    x86_avx512_mask_cmp_ps_512,                // llvm.x86.avx512.mask.cmp.ps.512
    x86_avx512_mask_cmp_sd,                    // llvm.x86.avx512.mask.cmp.sd
    x86_avx512_mask_cmp_ss,                    // llvm.x86.avx512.mask.cmp.ss
    x86_avx512_mask_compress,                  // llvm.x86.avx512.mask.compress
    x86_avx512_mask_cvtpd2dq_128,              // llvm.x86.avx512.mask.cvtpd2dq.128
    x86_avx512_mask_cvtpd2dq_512,              // llvm.x86.avx512.mask.cvtpd2dq.512
    x86_avx512_mask_cvtpd2ps,                  // llvm.x86.avx512.mask.cvtpd2ps
    x86_avx512_mask_cvtpd2ps_512,              // llvm.x86.avx512.mask.cvtpd2ps.512
    x86_avx512_mask_cvtpd2qq_128,              // llvm.x86.avx512.mask.cvtpd2qq.128
    x86_avx512_mask_cvtpd2qq_256,              // llvm.x86.avx512.mask.cvtpd2qq.256
    x86_avx512_mask_cvtpd2qq_512,              // llvm.x86.avx512.mask.cvtpd2qq.512
    x86_avx512_mask_cvtpd2udq_128,             // llvm.x86.avx512.mask.cvtpd2udq.128
    x86_avx512_mask_cvtpd2udq_256,             // llvm.x86.avx512.mask.cvtpd2udq.256
    x86_avx512_mask_cvtpd2udq_512,             // llvm.x86.avx512.mask.cvtpd2udq.512
    x86_avx512_mask_cvtpd2uqq_128,             // llvm.x86.avx512.mask.cvtpd2uqq.128
    x86_avx512_mask_cvtpd2uqq_256,             // llvm.x86.avx512.mask.cvtpd2uqq.256
    x86_avx512_mask_cvtpd2uqq_512,             // llvm.x86.avx512.mask.cvtpd2uqq.512
    x86_avx512_mask_cvtps2dq_128,              // llvm.x86.avx512.mask.cvtps2dq.128
    x86_avx512_mask_cvtps2dq_256,              // llvm.x86.avx512.mask.cvtps2dq.256
    x86_avx512_mask_cvtps2dq_512,              // llvm.x86.avx512.mask.cvtps2dq.512
    x86_avx512_mask_cvtps2pd_512,              // llvm.x86.avx512.mask.cvtps2pd.512
    x86_avx512_mask_cvtps2qq_128,              // llvm.x86.avx512.mask.cvtps2qq.128
    x86_avx512_mask_cvtps2qq_256,              // llvm.x86.avx512.mask.cvtps2qq.256
    x86_avx512_mask_cvtps2qq_512,              // llvm.x86.avx512.mask.cvtps2qq.512
    x86_avx512_mask_cvtps2udq_128,             // llvm.x86.avx512.mask.cvtps2udq.128
    x86_avx512_mask_cvtps2udq_256,             // llvm.x86.avx512.mask.cvtps2udq.256
    x86_avx512_mask_cvtps2udq_512,             // llvm.x86.avx512.mask.cvtps2udq.512
    x86_avx512_mask_cvtps2uqq_128,             // llvm.x86.avx512.mask.cvtps2uqq.128
    x86_avx512_mask_cvtps2uqq_256,             // llvm.x86.avx512.mask.cvtps2uqq.256
    x86_avx512_mask_cvtps2uqq_512,             // llvm.x86.avx512.mask.cvtps2uqq.512
    x86_avx512_mask_cvtqq2ps_128,              // llvm.x86.avx512.mask.cvtqq2ps.128
    x86_avx512_mask_cvtsd2ss_round,            // llvm.x86.avx512.mask.cvtsd2ss.round
    x86_avx512_mask_cvtss2sd_round,            // llvm.x86.avx512.mask.cvtss2sd.round
    x86_avx512_mask_cvttpd2dq_128,             // llvm.x86.avx512.mask.cvttpd2dq.128
    x86_avx512_mask_cvttpd2dq_512,             // llvm.x86.avx512.mask.cvttpd2dq.512
    x86_avx512_mask_cvttpd2qq_128,             // llvm.x86.avx512.mask.cvttpd2qq.128
    x86_avx512_mask_cvttpd2qq_256,             // llvm.x86.avx512.mask.cvttpd2qq.256
    x86_avx512_mask_cvttpd2qq_512,             // llvm.x86.avx512.mask.cvttpd2qq.512
    x86_avx512_mask_cvttpd2udq_128,            // llvm.x86.avx512.mask.cvttpd2udq.128
    x86_avx512_mask_cvttpd2udq_256,            // llvm.x86.avx512.mask.cvttpd2udq.256
    x86_avx512_mask_cvttpd2udq_512,            // llvm.x86.avx512.mask.cvttpd2udq.512
    x86_avx512_mask_cvttpd2uqq_128,            // llvm.x86.avx512.mask.cvttpd2uqq.128
    x86_avx512_mask_cvttpd2uqq_256,            // llvm.x86.avx512.mask.cvttpd2uqq.256
    x86_avx512_mask_cvttpd2uqq_512,            // llvm.x86.avx512.mask.cvttpd2uqq.512
    x86_avx512_mask_cvttps2dq_512,             // llvm.x86.avx512.mask.cvttps2dq.512
    x86_avx512_mask_cvttps2qq_128,             // llvm.x86.avx512.mask.cvttps2qq.128
    x86_avx512_mask_cvttps2qq_256,             // llvm.x86.avx512.mask.cvttps2qq.256
    x86_avx512_mask_cvttps2qq_512,             // llvm.x86.avx512.mask.cvttps2qq.512
    x86_avx512_mask_cvttps2udq_128,            // llvm.x86.avx512.mask.cvttps2udq.128
    x86_avx512_mask_cvttps2udq_256,            // llvm.x86.avx512.mask.cvttps2udq.256
    x86_avx512_mask_cvttps2udq_512,            // llvm.x86.avx512.mask.cvttps2udq.512
    x86_avx512_mask_cvttps2uqq_128,            // llvm.x86.avx512.mask.cvttps2uqq.128
    x86_avx512_mask_cvttps2uqq_256,            // llvm.x86.avx512.mask.cvttps2uqq.256
    x86_avx512_mask_cvttps2uqq_512,            // llvm.x86.avx512.mask.cvttps2uqq.512
    x86_avx512_mask_cvtuqq2ps_128,             // llvm.x86.avx512.mask.cvtuqq2ps.128
    x86_avx512_mask_div_sd_round,              // llvm.x86.avx512.mask.div.sd.round
    x86_avx512_mask_div_ss_round,              // llvm.x86.avx512.mask.div.ss.round
    x86_avx512_mask_expand,                    // llvm.x86.avx512.mask.expand
    x86_avx512_mask_fixupimm_pd_128,           // llvm.x86.avx512.mask.fixupimm.pd.128
    x86_avx512_mask_fixupimm_pd_256,           // llvm.x86.avx512.mask.fixupimm.pd.256
    x86_avx512_mask_fixupimm_pd_512,           // llvm.x86.avx512.mask.fixupimm.pd.512
    x86_avx512_mask_fixupimm_ps_128,           // llvm.x86.avx512.mask.fixupimm.ps.128
    x86_avx512_mask_fixupimm_ps_256,           // llvm.x86.avx512.mask.fixupimm.ps.256
    x86_avx512_mask_fixupimm_ps_512,           // llvm.x86.avx512.mask.fixupimm.ps.512
    x86_avx512_mask_fixupimm_sd,               // llvm.x86.avx512.mask.fixupimm.sd
    x86_avx512_mask_fixupimm_ss,               // llvm.x86.avx512.mask.fixupimm.ss
    x86_avx512_mask_fpclass_sd,                // llvm.x86.avx512.mask.fpclass.sd
    x86_avx512_mask_fpclass_ss,                // llvm.x86.avx512.mask.fpclass.ss
    x86_avx512_mask_gather_dpd_512,            // llvm.x86.avx512.mask.gather.dpd.512
    x86_avx512_mask_gather_dpi_512,            // llvm.x86.avx512.mask.gather.dpi.512
    x86_avx512_mask_gather_dpq_512,            // llvm.x86.avx512.mask.gather.dpq.512
    x86_avx512_mask_gather_dps_512,            // llvm.x86.avx512.mask.gather.dps.512
    x86_avx512_mask_gather_qpd_512,            // llvm.x86.avx512.mask.gather.qpd.512
    x86_avx512_mask_gather_qpi_512,            // llvm.x86.avx512.mask.gather.qpi.512
    x86_avx512_mask_gather_qpq_512,            // llvm.x86.avx512.mask.gather.qpq.512
    x86_avx512_mask_gather_qps_512,            // llvm.x86.avx512.mask.gather.qps.512
    x86_avx512_mask_gather3div2_df,            // llvm.x86.avx512.mask.gather3div2.df
    x86_avx512_mask_gather3div2_di,            // llvm.x86.avx512.mask.gather3div2.di
    x86_avx512_mask_gather3div4_df,            // llvm.x86.avx512.mask.gather3div4.df
    x86_avx512_mask_gather3div4_di,            // llvm.x86.avx512.mask.gather3div4.di
    x86_avx512_mask_gather3div4_sf,            // llvm.x86.avx512.mask.gather3div4.sf
    x86_avx512_mask_gather3div4_si,            // llvm.x86.avx512.mask.gather3div4.si
    x86_avx512_mask_gather3div8_sf,            // llvm.x86.avx512.mask.gather3div8.sf
    x86_avx512_mask_gather3div8_si,            // llvm.x86.avx512.mask.gather3div8.si
    x86_avx512_mask_gather3siv2_df,            // llvm.x86.avx512.mask.gather3siv2.df
    x86_avx512_mask_gather3siv2_di,            // llvm.x86.avx512.mask.gather3siv2.di
    x86_avx512_mask_gather3siv4_df,            // llvm.x86.avx512.mask.gather3siv4.df
    x86_avx512_mask_gather3siv4_di,            // llvm.x86.avx512.mask.gather3siv4.di
    x86_avx512_mask_gather3siv4_sf,            // llvm.x86.avx512.mask.gather3siv4.sf
    x86_avx512_mask_gather3siv4_si,            // llvm.x86.avx512.mask.gather3siv4.si
    x86_avx512_mask_gather3siv8_sf,            // llvm.x86.avx512.mask.gather3siv8.sf
    x86_avx512_mask_gather3siv8_si,            // llvm.x86.avx512.mask.gather3siv8.si
    x86_avx512_mask_getexp_pd_128,             // llvm.x86.avx512.mask.getexp.pd.128
    x86_avx512_mask_getexp_pd_256,             // llvm.x86.avx512.mask.getexp.pd.256
    x86_avx512_mask_getexp_pd_512,             // llvm.x86.avx512.mask.getexp.pd.512
    x86_avx512_mask_getexp_ps_128,             // llvm.x86.avx512.mask.getexp.ps.128
    x86_avx512_mask_getexp_ps_256,             // llvm.x86.avx512.mask.getexp.ps.256
    x86_avx512_mask_getexp_ps_512,             // llvm.x86.avx512.mask.getexp.ps.512
    x86_avx512_mask_getexp_sd,                 // llvm.x86.avx512.mask.getexp.sd
    x86_avx512_mask_getexp_ss,                 // llvm.x86.avx512.mask.getexp.ss
    x86_avx512_mask_getmant_pd_128,            // llvm.x86.avx512.mask.getmant.pd.128
    x86_avx512_mask_getmant_pd_256,            // llvm.x86.avx512.mask.getmant.pd.256
    x86_avx512_mask_getmant_pd_512,            // llvm.x86.avx512.mask.getmant.pd.512
    x86_avx512_mask_getmant_ps_128,            // llvm.x86.avx512.mask.getmant.ps.128
    x86_avx512_mask_getmant_ps_256,            // llvm.x86.avx512.mask.getmant.ps.256
    x86_avx512_mask_getmant_ps_512,            // llvm.x86.avx512.mask.getmant.ps.512
    x86_avx512_mask_getmant_sd,                // llvm.x86.avx512.mask.getmant.sd
    x86_avx512_mask_getmant_ss,                // llvm.x86.avx512.mask.getmant.ss
    x86_avx512_mask_max_sd_round,              // llvm.x86.avx512.mask.max.sd.round
    x86_avx512_mask_max_ss_round,              // llvm.x86.avx512.mask.max.ss.round
    x86_avx512_mask_min_sd_round,              // llvm.x86.avx512.mask.min.sd.round
    x86_avx512_mask_min_ss_round,              // llvm.x86.avx512.mask.min.ss.round
    x86_avx512_mask_mul_sd_round,              // llvm.x86.avx512.mask.mul.sd.round
    x86_avx512_mask_mul_ss_round,              // llvm.x86.avx512.mask.mul.ss.round
    x86_avx512_mask_pmov_db_128,               // llvm.x86.avx512.mask.pmov.db.128
    x86_avx512_mask_pmov_db_256,               // llvm.x86.avx512.mask.pmov.db.256
    x86_avx512_mask_pmov_db_512,               // llvm.x86.avx512.mask.pmov.db.512
    x86_avx512_mask_pmov_db_mem_128,           // llvm.x86.avx512.mask.pmov.db.mem.128
    x86_avx512_mask_pmov_db_mem_256,           // llvm.x86.avx512.mask.pmov.db.mem.256
    x86_avx512_mask_pmov_db_mem_512,           // llvm.x86.avx512.mask.pmov.db.mem.512
    x86_avx512_mask_pmov_dw_128,               // llvm.x86.avx512.mask.pmov.dw.128
    x86_avx512_mask_pmov_dw_256,               // llvm.x86.avx512.mask.pmov.dw.256
    x86_avx512_mask_pmov_dw_512,               // llvm.x86.avx512.mask.pmov.dw.512
    x86_avx512_mask_pmov_dw_mem_128,           // llvm.x86.avx512.mask.pmov.dw.mem.128
    x86_avx512_mask_pmov_dw_mem_256,           // llvm.x86.avx512.mask.pmov.dw.mem.256
    x86_avx512_mask_pmov_dw_mem_512,           // llvm.x86.avx512.mask.pmov.dw.mem.512
    x86_avx512_mask_pmov_qb_128,               // llvm.x86.avx512.mask.pmov.qb.128
    x86_avx512_mask_pmov_qb_256,               // llvm.x86.avx512.mask.pmov.qb.256
    x86_avx512_mask_pmov_qb_512,               // llvm.x86.avx512.mask.pmov.qb.512
    x86_avx512_mask_pmov_qb_mem_128,           // llvm.x86.avx512.mask.pmov.qb.mem.128
    x86_avx512_mask_pmov_qb_mem_256,           // llvm.x86.avx512.mask.pmov.qb.mem.256
    x86_avx512_mask_pmov_qb_mem_512,           // llvm.x86.avx512.mask.pmov.qb.mem.512
    x86_avx512_mask_pmov_qd_128,               // llvm.x86.avx512.mask.pmov.qd.128
    x86_avx512_mask_pmov_qd_mem_128,           // llvm.x86.avx512.mask.pmov.qd.mem.128
    x86_avx512_mask_pmov_qd_mem_256,           // llvm.x86.avx512.mask.pmov.qd.mem.256
    x86_avx512_mask_pmov_qd_mem_512,           // llvm.x86.avx512.mask.pmov.qd.mem.512
    x86_avx512_mask_pmov_qw_128,               // llvm.x86.avx512.mask.pmov.qw.128
    x86_avx512_mask_pmov_qw_256,               // llvm.x86.avx512.mask.pmov.qw.256
    x86_avx512_mask_pmov_qw_512,               // llvm.x86.avx512.mask.pmov.qw.512
    x86_avx512_mask_pmov_qw_mem_128,           // llvm.x86.avx512.mask.pmov.qw.mem.128
    x86_avx512_mask_pmov_qw_mem_256,           // llvm.x86.avx512.mask.pmov.qw.mem.256
    x86_avx512_mask_pmov_qw_mem_512,           // llvm.x86.avx512.mask.pmov.qw.mem.512
    x86_avx512_mask_pmov_wb_128,               // llvm.x86.avx512.mask.pmov.wb.128
    x86_avx512_mask_pmov_wb_mem_128,           // llvm.x86.avx512.mask.pmov.wb.mem.128
    x86_avx512_mask_pmov_wb_mem_256,           // llvm.x86.avx512.mask.pmov.wb.mem.256
    x86_avx512_mask_pmov_wb_mem_512,           // llvm.x86.avx512.mask.pmov.wb.mem.512
    x86_avx512_mask_pmovs_db_128,              // llvm.x86.avx512.mask.pmovs.db.128
    x86_avx512_mask_pmovs_db_256,              // llvm.x86.avx512.mask.pmovs.db.256
    x86_avx512_mask_pmovs_db_512,              // llvm.x86.avx512.mask.pmovs.db.512
    x86_avx512_mask_pmovs_db_mem_128,          // llvm.x86.avx512.mask.pmovs.db.mem.128
    x86_avx512_mask_pmovs_db_mem_256,          // llvm.x86.avx512.mask.pmovs.db.mem.256
    x86_avx512_mask_pmovs_db_mem_512,          // llvm.x86.avx512.mask.pmovs.db.mem.512
    x86_avx512_mask_pmovs_dw_128,              // llvm.x86.avx512.mask.pmovs.dw.128
    x86_avx512_mask_pmovs_dw_256,              // llvm.x86.avx512.mask.pmovs.dw.256
    x86_avx512_mask_pmovs_dw_512,              // llvm.x86.avx512.mask.pmovs.dw.512
    x86_avx512_mask_pmovs_dw_mem_128,          // llvm.x86.avx512.mask.pmovs.dw.mem.128
    x86_avx512_mask_pmovs_dw_mem_256,          // llvm.x86.avx512.mask.pmovs.dw.mem.256
    x86_avx512_mask_pmovs_dw_mem_512,          // llvm.x86.avx512.mask.pmovs.dw.mem.512
    x86_avx512_mask_pmovs_qb_128,              // llvm.x86.avx512.mask.pmovs.qb.128
    x86_avx512_mask_pmovs_qb_256,              // llvm.x86.avx512.mask.pmovs.qb.256
    x86_avx512_mask_pmovs_qb_512,              // llvm.x86.avx512.mask.pmovs.qb.512
    x86_avx512_mask_pmovs_qb_mem_128,          // llvm.x86.avx512.mask.pmovs.qb.mem.128
    x86_avx512_mask_pmovs_qb_mem_256,          // llvm.x86.avx512.mask.pmovs.qb.mem.256
    x86_avx512_mask_pmovs_qb_mem_512,          // llvm.x86.avx512.mask.pmovs.qb.mem.512
    x86_avx512_mask_pmovs_qd_128,              // llvm.x86.avx512.mask.pmovs.qd.128
    x86_avx512_mask_pmovs_qd_256,              // llvm.x86.avx512.mask.pmovs.qd.256
    x86_avx512_mask_pmovs_qd_512,              // llvm.x86.avx512.mask.pmovs.qd.512
    x86_avx512_mask_pmovs_qd_mem_128,          // llvm.x86.avx512.mask.pmovs.qd.mem.128
    x86_avx512_mask_pmovs_qd_mem_256,          // llvm.x86.avx512.mask.pmovs.qd.mem.256
    x86_avx512_mask_pmovs_qd_mem_512,          // llvm.x86.avx512.mask.pmovs.qd.mem.512
    x86_avx512_mask_pmovs_qw_128,              // llvm.x86.avx512.mask.pmovs.qw.128
    x86_avx512_mask_pmovs_qw_256,              // llvm.x86.avx512.mask.pmovs.qw.256
    x86_avx512_mask_pmovs_qw_512,              // llvm.x86.avx512.mask.pmovs.qw.512
    x86_avx512_mask_pmovs_qw_mem_128,          // llvm.x86.avx512.mask.pmovs.qw.mem.128
    x86_avx512_mask_pmovs_qw_mem_256,          // llvm.x86.avx512.mask.pmovs.qw.mem.256
    x86_avx512_mask_pmovs_qw_mem_512,          // llvm.x86.avx512.mask.pmovs.qw.mem.512
    x86_avx512_mask_pmovs_wb_128,              // llvm.x86.avx512.mask.pmovs.wb.128
    x86_avx512_mask_pmovs_wb_256,              // llvm.x86.avx512.mask.pmovs.wb.256
    x86_avx512_mask_pmovs_wb_512,              // llvm.x86.avx512.mask.pmovs.wb.512
    x86_avx512_mask_pmovs_wb_mem_128,          // llvm.x86.avx512.mask.pmovs.wb.mem.128
    x86_avx512_mask_pmovs_wb_mem_256,          // llvm.x86.avx512.mask.pmovs.wb.mem.256
    x86_avx512_mask_pmovs_wb_mem_512,          // llvm.x86.avx512.mask.pmovs.wb.mem.512
    x86_avx512_mask_pmovus_db_128,             // llvm.x86.avx512.mask.pmovus.db.128
    x86_avx512_mask_pmovus_db_256,             // llvm.x86.avx512.mask.pmovus.db.256
    x86_avx512_mask_pmovus_db_512,             // llvm.x86.avx512.mask.pmovus.db.512
    x86_avx512_mask_pmovus_db_mem_128,         // llvm.x86.avx512.mask.pmovus.db.mem.128
    x86_avx512_mask_pmovus_db_mem_256,         // llvm.x86.avx512.mask.pmovus.db.mem.256
    x86_avx512_mask_pmovus_db_mem_512,         // llvm.x86.avx512.mask.pmovus.db.mem.512
    x86_avx512_mask_pmovus_dw_128,             // llvm.x86.avx512.mask.pmovus.dw.128
    x86_avx512_mask_pmovus_dw_256,             // llvm.x86.avx512.mask.pmovus.dw.256
    x86_avx512_mask_pmovus_dw_512,             // llvm.x86.avx512.mask.pmovus.dw.512
    x86_avx512_mask_pmovus_dw_mem_128,         // llvm.x86.avx512.mask.pmovus.dw.mem.128
    x86_avx512_mask_pmovus_dw_mem_256,         // llvm.x86.avx512.mask.pmovus.dw.mem.256
    x86_avx512_mask_pmovus_dw_mem_512,         // llvm.x86.avx512.mask.pmovus.dw.mem.512
    x86_avx512_mask_pmovus_qb_128,             // llvm.x86.avx512.mask.pmovus.qb.128
    x86_avx512_mask_pmovus_qb_256,             // llvm.x86.avx512.mask.pmovus.qb.256
    x86_avx512_mask_pmovus_qb_512,             // llvm.x86.avx512.mask.pmovus.qb.512
    x86_avx512_mask_pmovus_qb_mem_128,         // llvm.x86.avx512.mask.pmovus.qb.mem.128
    x86_avx512_mask_pmovus_qb_mem_256,         // llvm.x86.avx512.mask.pmovus.qb.mem.256
    x86_avx512_mask_pmovus_qb_mem_512,         // llvm.x86.avx512.mask.pmovus.qb.mem.512
    x86_avx512_mask_pmovus_qd_128,             // llvm.x86.avx512.mask.pmovus.qd.128
    x86_avx512_mask_pmovus_qd_256,             // llvm.x86.avx512.mask.pmovus.qd.256
    x86_avx512_mask_pmovus_qd_512,             // llvm.x86.avx512.mask.pmovus.qd.512
    x86_avx512_mask_pmovus_qd_mem_128,         // llvm.x86.avx512.mask.pmovus.qd.mem.128
    x86_avx512_mask_pmovus_qd_mem_256,         // llvm.x86.avx512.mask.pmovus.qd.mem.256
    x86_avx512_mask_pmovus_qd_mem_512,         // llvm.x86.avx512.mask.pmovus.qd.mem.512
    x86_avx512_mask_pmovus_qw_128,             // llvm.x86.avx512.mask.pmovus.qw.128
    x86_avx512_mask_pmovus_qw_256,             // llvm.x86.avx512.mask.pmovus.qw.256
    x86_avx512_mask_pmovus_qw_512,             // llvm.x86.avx512.mask.pmovus.qw.512
    x86_avx512_mask_pmovus_qw_mem_128,         // llvm.x86.avx512.mask.pmovus.qw.mem.128
    x86_avx512_mask_pmovus_qw_mem_256,         // llvm.x86.avx512.mask.pmovus.qw.mem.256
    x86_avx512_mask_pmovus_qw_mem_512,         // llvm.x86.avx512.mask.pmovus.qw.mem.512
    x86_avx512_mask_pmovus_wb_128,             // llvm.x86.avx512.mask.pmovus.wb.128
    x86_avx512_mask_pmovus_wb_256,             // llvm.x86.avx512.mask.pmovus.wb.256
    x86_avx512_mask_pmovus_wb_512,             // llvm.x86.avx512.mask.pmovus.wb.512
    x86_avx512_mask_pmovus_wb_mem_128,         // llvm.x86.avx512.mask.pmovus.wb.mem.128
    x86_avx512_mask_pmovus_wb_mem_256,         // llvm.x86.avx512.mask.pmovus.wb.mem.256
    x86_avx512_mask_pmovus_wb_mem_512,         // llvm.x86.avx512.mask.pmovus.wb.mem.512
    x86_avx512_mask_range_pd_128,              // llvm.x86.avx512.mask.range.pd.128
    x86_avx512_mask_range_pd_256,              // llvm.x86.avx512.mask.range.pd.256
    x86_avx512_mask_range_pd_512,              // llvm.x86.avx512.mask.range.pd.512
    x86_avx512_mask_range_ps_128,              // llvm.x86.avx512.mask.range.ps.128
    x86_avx512_mask_range_ps_256,              // llvm.x86.avx512.mask.range.ps.256
    x86_avx512_mask_range_ps_512,              // llvm.x86.avx512.mask.range.ps.512
    x86_avx512_mask_range_sd,                  // llvm.x86.avx512.mask.range.sd
    x86_avx512_mask_range_ss,                  // llvm.x86.avx512.mask.range.ss
    x86_avx512_mask_reduce_pd_128,             // llvm.x86.avx512.mask.reduce.pd.128
    x86_avx512_mask_reduce_pd_256,             // llvm.x86.avx512.mask.reduce.pd.256
    x86_avx512_mask_reduce_pd_512,             // llvm.x86.avx512.mask.reduce.pd.512
    x86_avx512_mask_reduce_ps_128,             // llvm.x86.avx512.mask.reduce.ps.128
    x86_avx512_mask_reduce_ps_256,             // llvm.x86.avx512.mask.reduce.ps.256
    x86_avx512_mask_reduce_ps_512,             // llvm.x86.avx512.mask.reduce.ps.512
    x86_avx512_mask_reduce_sd,                 // llvm.x86.avx512.mask.reduce.sd
    x86_avx512_mask_reduce_ss,                 // llvm.x86.avx512.mask.reduce.ss
    x86_avx512_mask_rndscale_pd_128,           // llvm.x86.avx512.mask.rndscale.pd.128
    x86_avx512_mask_rndscale_pd_256,           // llvm.x86.avx512.mask.rndscale.pd.256
    x86_avx512_mask_rndscale_pd_512,           // llvm.x86.avx512.mask.rndscale.pd.512
    x86_avx512_mask_rndscale_ps_128,           // llvm.x86.avx512.mask.rndscale.ps.128
    x86_avx512_mask_rndscale_ps_256,           // llvm.x86.avx512.mask.rndscale.ps.256
    x86_avx512_mask_rndscale_ps_512,           // llvm.x86.avx512.mask.rndscale.ps.512
    x86_avx512_mask_rndscale_sd,               // llvm.x86.avx512.mask.rndscale.sd
    x86_avx512_mask_rndscale_ss,               // llvm.x86.avx512.mask.rndscale.ss
    x86_avx512_mask_scalef_pd_128,             // llvm.x86.avx512.mask.scalef.pd.128
    x86_avx512_mask_scalef_pd_256,             // llvm.x86.avx512.mask.scalef.pd.256
    x86_avx512_mask_scalef_pd_512,             // llvm.x86.avx512.mask.scalef.pd.512
    x86_avx512_mask_scalef_ps_128,             // llvm.x86.avx512.mask.scalef.ps.128
    x86_avx512_mask_scalef_ps_256,             // llvm.x86.avx512.mask.scalef.ps.256
    x86_avx512_mask_scalef_ps_512,             // llvm.x86.avx512.mask.scalef.ps.512
    x86_avx512_mask_scalef_sd,                 // llvm.x86.avx512.mask.scalef.sd
    x86_avx512_mask_scalef_ss,                 // llvm.x86.avx512.mask.scalef.ss
    x86_avx512_mask_scatter_dpd_512,           // llvm.x86.avx512.mask.scatter.dpd.512
    x86_avx512_mask_scatter_dpi_512,           // llvm.x86.avx512.mask.scatter.dpi.512
    x86_avx512_mask_scatter_dpq_512,           // llvm.x86.avx512.mask.scatter.dpq.512
    x86_avx512_mask_scatter_dps_512,           // llvm.x86.avx512.mask.scatter.dps.512
    x86_avx512_mask_scatter_qpd_512,           // llvm.x86.avx512.mask.scatter.qpd.512
    x86_avx512_mask_scatter_qpi_512,           // llvm.x86.avx512.mask.scatter.qpi.512
    x86_avx512_mask_scatter_qpq_512,           // llvm.x86.avx512.mask.scatter.qpq.512
    x86_avx512_mask_scatter_qps_512,           // llvm.x86.avx512.mask.scatter.qps.512
    x86_avx512_mask_scatterdiv2_df,            // llvm.x86.avx512.mask.scatterdiv2.df
    x86_avx512_mask_scatterdiv2_di,            // llvm.x86.avx512.mask.scatterdiv2.di
    x86_avx512_mask_scatterdiv4_df,            // llvm.x86.avx512.mask.scatterdiv4.df
    x86_avx512_mask_scatterdiv4_di,            // llvm.x86.avx512.mask.scatterdiv4.di
    x86_avx512_mask_scatterdiv4_sf,            // llvm.x86.avx512.mask.scatterdiv4.sf
    x86_avx512_mask_scatterdiv4_si,            // llvm.x86.avx512.mask.scatterdiv4.si
    x86_avx512_mask_scatterdiv8_sf,            // llvm.x86.avx512.mask.scatterdiv8.sf
    x86_avx512_mask_scatterdiv8_si,            // llvm.x86.avx512.mask.scatterdiv8.si
    x86_avx512_mask_scattersiv2_df,            // llvm.x86.avx512.mask.scattersiv2.df
    x86_avx512_mask_scattersiv2_di,            // llvm.x86.avx512.mask.scattersiv2.di
    x86_avx512_mask_scattersiv4_df,            // llvm.x86.avx512.mask.scattersiv4.df
    x86_avx512_mask_scattersiv4_di,            // llvm.x86.avx512.mask.scattersiv4.di
    x86_avx512_mask_scattersiv4_sf,            // llvm.x86.avx512.mask.scattersiv4.sf
    x86_avx512_mask_scattersiv4_si,            // llvm.x86.avx512.mask.scattersiv4.si
    x86_avx512_mask_scattersiv8_sf,            // llvm.x86.avx512.mask.scattersiv8.sf
    x86_avx512_mask_scattersiv8_si,            // llvm.x86.avx512.mask.scattersiv8.si
    x86_avx512_mask_sqrt_sd,                   // llvm.x86.avx512.mask.sqrt.sd
    x86_avx512_mask_sqrt_ss,                   // llvm.x86.avx512.mask.sqrt.ss
    x86_avx512_mask_sub_sd_round,              // llvm.x86.avx512.mask.sub.sd.round
    x86_avx512_mask_sub_ss_round,              // llvm.x86.avx512.mask.sub.ss.round
    x86_avx512_mask_vcvtph2ps_512,             // llvm.x86.avx512.mask.vcvtph2ps.512
    x86_avx512_mask_vcvtps2ph_128,             // llvm.x86.avx512.mask.vcvtps2ph.128
    x86_avx512_mask_vcvtps2ph_256,             // llvm.x86.avx512.mask.vcvtps2ph.256
    x86_avx512_mask_vcvtps2ph_512,             // llvm.x86.avx512.mask.vcvtps2ph.512
    x86_avx512_maskz_fixupimm_pd_128,          // llvm.x86.avx512.maskz.fixupimm.pd.128
    x86_avx512_maskz_fixupimm_pd_256,          // llvm.x86.avx512.maskz.fixupimm.pd.256
    x86_avx512_maskz_fixupimm_pd_512,          // llvm.x86.avx512.maskz.fixupimm.pd.512
    x86_avx512_maskz_fixupimm_ps_128,          // llvm.x86.avx512.maskz.fixupimm.ps.128
    x86_avx512_maskz_fixupimm_ps_256,          // llvm.x86.avx512.maskz.fixupimm.ps.256
    x86_avx512_maskz_fixupimm_ps_512,          // llvm.x86.avx512.maskz.fixupimm.ps.512
    x86_avx512_maskz_fixupimm_sd,              // llvm.x86.avx512.maskz.fixupimm.sd
    x86_avx512_maskz_fixupimm_ss,              // llvm.x86.avx512.maskz.fixupimm.ss
    x86_avx512_max_pd_512,                     // llvm.x86.avx512.max.pd.512
    x86_avx512_max_ps_512,                     // llvm.x86.avx512.max.ps.512
    x86_avx512_min_pd_512,                     // llvm.x86.avx512.min.pd.512
    x86_avx512_min_ps_512,                     // llvm.x86.avx512.min.ps.512
    x86_avx512_mul_pd_512,                     // llvm.x86.avx512.mul.pd.512
    x86_avx512_mul_ps_512,                     // llvm.x86.avx512.mul.ps.512
    x86_avx512_packssdw_512,                   // llvm.x86.avx512.packssdw.512
    x86_avx512_packsswb_512,                   // llvm.x86.avx512.packsswb.512
    x86_avx512_packusdw_512,                   // llvm.x86.avx512.packusdw.512
    x86_avx512_packuswb_512,                   // llvm.x86.avx512.packuswb.512
    x86_avx512_pavg_b_512,                     // llvm.x86.avx512.pavg.b.512
    x86_avx512_pavg_w_512,                     // llvm.x86.avx512.pavg.w.512
    x86_avx512_permvar_df_256,                 // llvm.x86.avx512.permvar.df.256
    x86_avx512_permvar_df_512,                 // llvm.x86.avx512.permvar.df.512
    x86_avx512_permvar_di_256,                 // llvm.x86.avx512.permvar.di.256
    x86_avx512_permvar_di_512,                 // llvm.x86.avx512.permvar.di.512
    x86_avx512_permvar_hi_128,                 // llvm.x86.avx512.permvar.hi.128
    x86_avx512_permvar_hi_256,                 // llvm.x86.avx512.permvar.hi.256
    x86_avx512_permvar_hi_512,                 // llvm.x86.avx512.permvar.hi.512
    x86_avx512_permvar_qi_128,                 // llvm.x86.avx512.permvar.qi.128
    x86_avx512_permvar_qi_256,                 // llvm.x86.avx512.permvar.qi.256
    x86_avx512_permvar_qi_512,                 // llvm.x86.avx512.permvar.qi.512
    x86_avx512_permvar_sf_512,                 // llvm.x86.avx512.permvar.sf.512
    x86_avx512_permvar_si_512,                 // llvm.x86.avx512.permvar.si.512
    x86_avx512_pmaddubs_w_512,                 // llvm.x86.avx512.pmaddubs.w.512
    x86_avx512_pmaddw_d_512,                   // llvm.x86.avx512.pmaddw.d.512
    x86_avx512_pmul_hr_sw_512,                 // llvm.x86.avx512.pmul.hr.sw.512
    x86_avx512_pmulh_w_512,                    // llvm.x86.avx512.pmulh.w.512
    x86_avx512_pmulhu_w_512,                   // llvm.x86.avx512.pmulhu.w.512
    x86_avx512_pmultishift_qb_128,             // llvm.x86.avx512.pmultishift.qb.128
    x86_avx512_pmultishift_qb_256,             // llvm.x86.avx512.pmultishift.qb.256
    x86_avx512_pmultishift_qb_512,             // llvm.x86.avx512.pmultishift.qb.512
    x86_avx512_psad_bw_512,                    // llvm.x86.avx512.psad.bw.512
    x86_avx512_pshuf_b_512,                    // llvm.x86.avx512.pshuf.b.512
    x86_avx512_psll_d_512,                     // llvm.x86.avx512.psll.d.512
    x86_avx512_psll_q_512,                     // llvm.x86.avx512.psll.q.512
    x86_avx512_psll_w_512,                     // llvm.x86.avx512.psll.w.512
    x86_avx512_pslli_d_512,                    // llvm.x86.avx512.pslli.d.512
    x86_avx512_pslli_q_512,                    // llvm.x86.avx512.pslli.q.512
    x86_avx512_pslli_w_512,                    // llvm.x86.avx512.pslli.w.512
    x86_avx512_psllv_d_512,                    // llvm.x86.avx512.psllv.d.512
    x86_avx512_psllv_q_512,                    // llvm.x86.avx512.psllv.q.512
    x86_avx512_psllv_w_128,                    // llvm.x86.avx512.psllv.w.128
    x86_avx512_psllv_w_256,                    // llvm.x86.avx512.psllv.w.256
    x86_avx512_psllv_w_512,                    // llvm.x86.avx512.psllv.w.512
    x86_avx512_psra_d_512,                     // llvm.x86.avx512.psra.d.512
    x86_avx512_psra_q_128,                     // llvm.x86.avx512.psra.q.128
    x86_avx512_psra_q_256,                     // llvm.x86.avx512.psra.q.256
    x86_avx512_psra_q_512,                     // llvm.x86.avx512.psra.q.512
    x86_avx512_psra_w_512,                     // llvm.x86.avx512.psra.w.512
    x86_avx512_psrai_d_512,                    // llvm.x86.avx512.psrai.d.512
    x86_avx512_psrai_q_128,                    // llvm.x86.avx512.psrai.q.128
    x86_avx512_psrai_q_256,                    // llvm.x86.avx512.psrai.q.256
    x86_avx512_psrai_q_512,                    // llvm.x86.avx512.psrai.q.512
    x86_avx512_psrai_w_512,                    // llvm.x86.avx512.psrai.w.512
    x86_avx512_psrav_d_512,                    // llvm.x86.avx512.psrav.d.512
    x86_avx512_psrav_q_128,                    // llvm.x86.avx512.psrav.q.128
    x86_avx512_psrav_q_256,                    // llvm.x86.avx512.psrav.q.256
    x86_avx512_psrav_q_512,                    // llvm.x86.avx512.psrav.q.512
    x86_avx512_psrav_w_128,                    // llvm.x86.avx512.psrav.w.128
    x86_avx512_psrav_w_256,                    // llvm.x86.avx512.psrav.w.256
    x86_avx512_psrav_w_512,                    // llvm.x86.avx512.psrav.w.512
    x86_avx512_psrl_d_512,                     // llvm.x86.avx512.psrl.d.512
    x86_avx512_psrl_q_512,                     // llvm.x86.avx512.psrl.q.512
    x86_avx512_psrl_w_512,                     // llvm.x86.avx512.psrl.w.512
    x86_avx512_psrli_d_512,                    // llvm.x86.avx512.psrli.d.512
    x86_avx512_psrli_q_512,                    // llvm.x86.avx512.psrli.q.512
    x86_avx512_psrli_w_512,                    // llvm.x86.avx512.psrli.w.512
    x86_avx512_psrlv_d_512,                    // llvm.x86.avx512.psrlv.d.512
    x86_avx512_psrlv_q_512,                    // llvm.x86.avx512.psrlv.q.512
    x86_avx512_psrlv_w_128,                    // llvm.x86.avx512.psrlv.w.128
    x86_avx512_psrlv_w_256,                    // llvm.x86.avx512.psrlv.w.256
    x86_avx512_psrlv_w_512,                    // llvm.x86.avx512.psrlv.w.512
    x86_avx512_pternlog_d_128,                 // llvm.x86.avx512.pternlog.d.128
    x86_avx512_pternlog_d_256,                 // llvm.x86.avx512.pternlog.d.256
    x86_avx512_pternlog_d_512,                 // llvm.x86.avx512.pternlog.d.512
    x86_avx512_pternlog_q_128,                 // llvm.x86.avx512.pternlog.q.128
    x86_avx512_pternlog_q_256,                 // llvm.x86.avx512.pternlog.q.256
    x86_avx512_pternlog_q_512,                 // llvm.x86.avx512.pternlog.q.512
    x86_avx512_rcp14_pd_128,                   // llvm.x86.avx512.rcp14.pd.128
    x86_avx512_rcp14_pd_256,                   // llvm.x86.avx512.rcp14.pd.256
    x86_avx512_rcp14_pd_512,                   // llvm.x86.avx512.rcp14.pd.512
    x86_avx512_rcp14_ps_128,                   // llvm.x86.avx512.rcp14.ps.128
    x86_avx512_rcp14_ps_256,                   // llvm.x86.avx512.rcp14.ps.256
    x86_avx512_rcp14_ps_512,                   // llvm.x86.avx512.rcp14.ps.512
    x86_avx512_rcp14_sd,                       // llvm.x86.avx512.rcp14.sd
    x86_avx512_rcp14_ss,                       // llvm.x86.avx512.rcp14.ss
    x86_avx512_rcp28_pd,                       // llvm.x86.avx512.rcp28.pd
    x86_avx512_rcp28_ps,                       // llvm.x86.avx512.rcp28.ps
    x86_avx512_rcp28_sd,                       // llvm.x86.avx512.rcp28.sd
    x86_avx512_rcp28_ss,                       // llvm.x86.avx512.rcp28.ss
    x86_avx512_rsqrt14_pd_128,                 // llvm.x86.avx512.rsqrt14.pd.128
    x86_avx512_rsqrt14_pd_256,                 // llvm.x86.avx512.rsqrt14.pd.256
    x86_avx512_rsqrt14_pd_512,                 // llvm.x86.avx512.rsqrt14.pd.512
    x86_avx512_rsqrt14_ps_128,                 // llvm.x86.avx512.rsqrt14.ps.128
    x86_avx512_rsqrt14_ps_256,                 // llvm.x86.avx512.rsqrt14.ps.256
    x86_avx512_rsqrt14_ps_512,                 // llvm.x86.avx512.rsqrt14.ps.512
    x86_avx512_rsqrt14_sd,                     // llvm.x86.avx512.rsqrt14.sd
    x86_avx512_rsqrt14_ss,                     // llvm.x86.avx512.rsqrt14.ss
    x86_avx512_rsqrt28_pd,                     // llvm.x86.avx512.rsqrt28.pd
    x86_avx512_rsqrt28_ps,                     // llvm.x86.avx512.rsqrt28.ps
    x86_avx512_rsqrt28_sd,                     // llvm.x86.avx512.rsqrt28.sd
    x86_avx512_rsqrt28_ss,                     // llvm.x86.avx512.rsqrt28.ss
    x86_avx512_scatter_dpd_512,                // llvm.x86.avx512.scatter.dpd.512
    x86_avx512_scatter_dpi_512,                // llvm.x86.avx512.scatter.dpi.512
    x86_avx512_scatter_dpq_512,                // llvm.x86.avx512.scatter.dpq.512
    x86_avx512_scatter_dps_512,                // llvm.x86.avx512.scatter.dps.512
    x86_avx512_scatter_qpd_512,                // llvm.x86.avx512.scatter.qpd.512
    x86_avx512_scatter_qpi_512,                // llvm.x86.avx512.scatter.qpi.512
    x86_avx512_scatter_qpq_512,                // llvm.x86.avx512.scatter.qpq.512
    x86_avx512_scatter_qps_512,                // llvm.x86.avx512.scatter.qps.512
    x86_avx512_scatterdiv2_df,                 // llvm.x86.avx512.scatterdiv2.df
    x86_avx512_scatterdiv2_di,                 // llvm.x86.avx512.scatterdiv2.di
    x86_avx512_scatterdiv4_df,                 // llvm.x86.avx512.scatterdiv4.df
    x86_avx512_scatterdiv4_di,                 // llvm.x86.avx512.scatterdiv4.di
    x86_avx512_scatterdiv4_sf,                 // llvm.x86.avx512.scatterdiv4.sf
    x86_avx512_scatterdiv4_si,                 // llvm.x86.avx512.scatterdiv4.si
    x86_avx512_scatterdiv8_sf,                 // llvm.x86.avx512.scatterdiv8.sf
    x86_avx512_scatterdiv8_si,                 // llvm.x86.avx512.scatterdiv8.si
    x86_avx512_scatterpf_dpd_512,              // llvm.x86.avx512.scatterpf.dpd.512
    x86_avx512_scatterpf_dps_512,              // llvm.x86.avx512.scatterpf.dps.512
    x86_avx512_scatterpf_qpd_512,              // llvm.x86.avx512.scatterpf.qpd.512
    x86_avx512_scatterpf_qps_512,              // llvm.x86.avx512.scatterpf.qps.512
    x86_avx512_scattersiv2_df,                 // llvm.x86.avx512.scattersiv2.df
    x86_avx512_scattersiv2_di,                 // llvm.x86.avx512.scattersiv2.di
    x86_avx512_scattersiv4_df,                 // llvm.x86.avx512.scattersiv4.df
    x86_avx512_scattersiv4_di,                 // llvm.x86.avx512.scattersiv4.di
    x86_avx512_scattersiv4_sf,                 // llvm.x86.avx512.scattersiv4.sf
    x86_avx512_scattersiv4_si,                 // llvm.x86.avx512.scattersiv4.si
    x86_avx512_scattersiv8_sf,                 // llvm.x86.avx512.scattersiv8.sf
    x86_avx512_scattersiv8_si,                 // llvm.x86.avx512.scattersiv8.si
    x86_avx512_sitofp_round,                   // llvm.x86.avx512.sitofp.round
    x86_avx512_sqrt_pd_512,                    // llvm.x86.avx512.sqrt.pd.512
    x86_avx512_sqrt_ps_512,                    // llvm.x86.avx512.sqrt.ps.512
    x86_avx512_sub_pd_512,                     // llvm.x86.avx512.sub.pd.512
    x86_avx512_sub_ps_512,                     // llvm.x86.avx512.sub.ps.512
    x86_avx512_uitofp_round,                   // llvm.x86.avx512.uitofp.round
    x86_avx512_vcomi_sd,                       // llvm.x86.avx512.vcomi.sd
    x86_avx512_vcomi_ss,                       // llvm.x86.avx512.vcomi.ss
    x86_avx512_vcvtsd2si32,                    // llvm.x86.avx512.vcvtsd2si32
    x86_avx512_vcvtsd2si64,                    // llvm.x86.avx512.vcvtsd2si64
    x86_avx512_vcvtsd2usi32,                   // llvm.x86.avx512.vcvtsd2usi32
    x86_avx512_vcvtsd2usi64,                   // llvm.x86.avx512.vcvtsd2usi64
    x86_avx512_vcvtss2si32,                    // llvm.x86.avx512.vcvtss2si32
    x86_avx512_vcvtss2si64,                    // llvm.x86.avx512.vcvtss2si64
    x86_avx512_vcvtss2usi32,                   // llvm.x86.avx512.vcvtss2usi32
    x86_avx512_vcvtss2usi64,                   // llvm.x86.avx512.vcvtss2usi64
    x86_avx512_vfmadd_f32,                     // llvm.x86.avx512.vfmadd.f32
    x86_avx512_vfmadd_f64,                     // llvm.x86.avx512.vfmadd.f64
    x86_avx512_vfmadd_pd_512,                  // llvm.x86.avx512.vfmadd.pd.512
    x86_avx512_vfmadd_ps_512,                  // llvm.x86.avx512.vfmadd.ps.512
    x86_avx512_vfmaddsub_pd_512,               // llvm.x86.avx512.vfmaddsub.pd.512
    x86_avx512_vfmaddsub_ps_512,               // llvm.x86.avx512.vfmaddsub.ps.512
    x86_avx512_vp2intersect_d_128,             // llvm.x86.avx512.vp2intersect.d.128
    x86_avx512_vp2intersect_d_256,             // llvm.x86.avx512.vp2intersect.d.256
    x86_avx512_vp2intersect_d_512,             // llvm.x86.avx512.vp2intersect.d.512
    x86_avx512_vp2intersect_q_128,             // llvm.x86.avx512.vp2intersect.q.128
    x86_avx512_vp2intersect_q_256,             // llvm.x86.avx512.vp2intersect.q.256
    x86_avx512_vp2intersect_q_512,             // llvm.x86.avx512.vp2intersect.q.512
    x86_avx512_vpdpbusd_128,                   // llvm.x86.avx512.vpdpbusd.128
    x86_avx512_vpdpbusd_256,                   // llvm.x86.avx512.vpdpbusd.256
    x86_avx512_vpdpbusd_512,                   // llvm.x86.avx512.vpdpbusd.512
    x86_avx512_vpdpbusds_128,                  // llvm.x86.avx512.vpdpbusds.128
    x86_avx512_vpdpbusds_256,                  // llvm.x86.avx512.vpdpbusds.256
    x86_avx512_vpdpbusds_512,                  // llvm.x86.avx512.vpdpbusds.512
    x86_avx512_vpdpwssd_128,                   // llvm.x86.avx512.vpdpwssd.128
    x86_avx512_vpdpwssd_256,                   // llvm.x86.avx512.vpdpwssd.256
    x86_avx512_vpdpwssd_512,                   // llvm.x86.avx512.vpdpwssd.512
    x86_avx512_vpdpwssds_128,                  // llvm.x86.avx512.vpdpwssds.128
    x86_avx512_vpdpwssds_256,                  // llvm.x86.avx512.vpdpwssds.256
    x86_avx512_vpdpwssds_512,                  // llvm.x86.avx512.vpdpwssds.512
    x86_avx512_vpermi2var_d_128,               // llvm.x86.avx512.vpermi2var.d.128
    x86_avx512_vpermi2var_d_256,               // llvm.x86.avx512.vpermi2var.d.256
    x86_avx512_vpermi2var_d_512,               // llvm.x86.avx512.vpermi2var.d.512
    x86_avx512_vpermi2var_hi_128,              // llvm.x86.avx512.vpermi2var.hi.128
    x86_avx512_vpermi2var_hi_256,              // llvm.x86.avx512.vpermi2var.hi.256
    x86_avx512_vpermi2var_hi_512,              // llvm.x86.avx512.vpermi2var.hi.512
    x86_avx512_vpermi2var_pd_128,              // llvm.x86.avx512.vpermi2var.pd.128
    x86_avx512_vpermi2var_pd_256,              // llvm.x86.avx512.vpermi2var.pd.256
    x86_avx512_vpermi2var_pd_512,              // llvm.x86.avx512.vpermi2var.pd.512
    x86_avx512_vpermi2var_ps_128,              // llvm.x86.avx512.vpermi2var.ps.128
    x86_avx512_vpermi2var_ps_256,              // llvm.x86.avx512.vpermi2var.ps.256
    x86_avx512_vpermi2var_ps_512,              // llvm.x86.avx512.vpermi2var.ps.512
    x86_avx512_vpermi2var_q_128,               // llvm.x86.avx512.vpermi2var.q.128
    x86_avx512_vpermi2var_q_256,               // llvm.x86.avx512.vpermi2var.q.256
    x86_avx512_vpermi2var_q_512,               // llvm.x86.avx512.vpermi2var.q.512
    x86_avx512_vpermi2var_qi_128,              // llvm.x86.avx512.vpermi2var.qi.128
    x86_avx512_vpermi2var_qi_256,              // llvm.x86.avx512.vpermi2var.qi.256
    x86_avx512_vpermi2var_qi_512,              // llvm.x86.avx512.vpermi2var.qi.512
    x86_avx512_vpermilvar_pd_512,              // llvm.x86.avx512.vpermilvar.pd.512
    x86_avx512_vpermilvar_ps_512,              // llvm.x86.avx512.vpermilvar.ps.512
    x86_avx512_vpmadd52h_uq_128,               // llvm.x86.avx512.vpmadd52h.uq.128
    x86_avx512_vpmadd52h_uq_256,               // llvm.x86.avx512.vpmadd52h.uq.256
    x86_avx512_vpmadd52h_uq_512,               // llvm.x86.avx512.vpmadd52h.uq.512
    x86_avx512_vpmadd52l_uq_128,               // llvm.x86.avx512.vpmadd52l.uq.128
    x86_avx512_vpmadd52l_uq_256,               // llvm.x86.avx512.vpmadd52l.uq.256
    x86_avx512_vpmadd52l_uq_512,               // llvm.x86.avx512.vpmadd52l.uq.512
    x86_avx512_vpshufbitqmb_128,               // llvm.x86.avx512.vpshufbitqmb.128
    x86_avx512_vpshufbitqmb_256,               // llvm.x86.avx512.vpshufbitqmb.256
    x86_avx512_vpshufbitqmb_512,               // llvm.x86.avx512.vpshufbitqmb.512
    x86_avx512bf16_cvtne2ps2bf16_128,          // llvm.x86.avx512bf16.cvtne2ps2bf16.128
    x86_avx512bf16_cvtne2ps2bf16_256,          // llvm.x86.avx512bf16.cvtne2ps2bf16.256
    x86_avx512bf16_cvtne2ps2bf16_512,          // llvm.x86.avx512bf16.cvtne2ps2bf16.512
    x86_avx512bf16_cvtneps2bf16_256,           // llvm.x86.avx512bf16.cvtneps2bf16.256
    x86_avx512bf16_cvtneps2bf16_512,           // llvm.x86.avx512bf16.cvtneps2bf16.512
    x86_avx512bf16_dpbf16ps_128,               // llvm.x86.avx512bf16.dpbf16ps.128
    x86_avx512bf16_dpbf16ps_256,               // llvm.x86.avx512bf16.dpbf16ps.256
    x86_avx512bf16_dpbf16ps_512,               // llvm.x86.avx512bf16.dpbf16ps.512
    x86_avx512bf16_mask_cvtneps2bf16_128,      // llvm.x86.avx512bf16.mask.cvtneps2bf16.128
    x86_avx512fp16_add_ph_512,                 // llvm.x86.avx512fp16.add.ph.512
    x86_avx512fp16_div_ph_512,                 // llvm.x86.avx512fp16.div.ph.512
    x86_avx512fp16_fpclass_ph_128,             // llvm.x86.avx512fp16.fpclass.ph.128
    x86_avx512fp16_fpclass_ph_256,             // llvm.x86.avx512fp16.fpclass.ph.256
    x86_avx512fp16_fpclass_ph_512,             // llvm.x86.avx512fp16.fpclass.ph.512
    x86_avx512fp16_mask_add_sh_round,          // llvm.x86.avx512fp16.mask.add.sh.round
    x86_avx512fp16_mask_cmp_ph_128,            // llvm.x86.avx512fp16.mask.cmp.ph.128
    x86_avx512fp16_mask_cmp_ph_256,            // llvm.x86.avx512fp16.mask.cmp.ph.256
    x86_avx512fp16_mask_cmp_ph_512,            // llvm.x86.avx512fp16.mask.cmp.ph.512
    x86_avx512fp16_mask_cmp_sh,                // llvm.x86.avx512fp16.mask.cmp.sh
    x86_avx512fp16_mask_div_sh_round,          // llvm.x86.avx512fp16.mask.div.sh.round
    x86_avx512fp16_mask_fpclass_sh,            // llvm.x86.avx512fp16.mask.fpclass.sh
    x86_avx512fp16_mask_getexp_ph_128,         // llvm.x86.avx512fp16.mask.getexp.ph.128
    x86_avx512fp16_mask_getexp_ph_256,         // llvm.x86.avx512fp16.mask.getexp.ph.256
    x86_avx512fp16_mask_getexp_ph_512,         // llvm.x86.avx512fp16.mask.getexp.ph.512
    x86_avx512fp16_mask_getexp_sh,             // llvm.x86.avx512fp16.mask.getexp.sh
    x86_avx512fp16_mask_getmant_ph_128,        // llvm.x86.avx512fp16.mask.getmant.ph.128
    x86_avx512fp16_mask_getmant_ph_256,        // llvm.x86.avx512fp16.mask.getmant.ph.256
    x86_avx512fp16_mask_getmant_ph_512,        // llvm.x86.avx512fp16.mask.getmant.ph.512
    x86_avx512fp16_mask_getmant_sh,            // llvm.x86.avx512fp16.mask.getmant.sh
    x86_avx512fp16_mask_max_sh_round,          // llvm.x86.avx512fp16.mask.max.sh.round
    x86_avx512fp16_mask_min_sh_round,          // llvm.x86.avx512fp16.mask.min.sh.round
    x86_avx512fp16_mask_mul_sh_round,          // llvm.x86.avx512fp16.mask.mul.sh.round
    x86_avx512fp16_mask_rcp_ph_128,            // llvm.x86.avx512fp16.mask.rcp.ph.128
    x86_avx512fp16_mask_rcp_ph_256,            // llvm.x86.avx512fp16.mask.rcp.ph.256
    x86_avx512fp16_mask_rcp_ph_512,            // llvm.x86.avx512fp16.mask.rcp.ph.512
    x86_avx512fp16_mask_rcp_sh,                // llvm.x86.avx512fp16.mask.rcp.sh
    x86_avx512fp16_mask_reduce_ph_128,         // llvm.x86.avx512fp16.mask.reduce.ph.128
    x86_avx512fp16_mask_reduce_ph_256,         // llvm.x86.avx512fp16.mask.reduce.ph.256
    x86_avx512fp16_mask_reduce_ph_512,         // llvm.x86.avx512fp16.mask.reduce.ph.512
    x86_avx512fp16_mask_reduce_sh,             // llvm.x86.avx512fp16.mask.reduce.sh
    x86_avx512fp16_mask_rndscale_ph_128,       // llvm.x86.avx512fp16.mask.rndscale.ph.128
    x86_avx512fp16_mask_rndscale_ph_256,       // llvm.x86.avx512fp16.mask.rndscale.ph.256
    x86_avx512fp16_mask_rndscale_ph_512,       // llvm.x86.avx512fp16.mask.rndscale.ph.512
    x86_avx512fp16_mask_rndscale_sh,           // llvm.x86.avx512fp16.mask.rndscale.sh
    x86_avx512fp16_mask_rsqrt_ph_128,          // llvm.x86.avx512fp16.mask.rsqrt.ph.128
    x86_avx512fp16_mask_rsqrt_ph_256,          // llvm.x86.avx512fp16.mask.rsqrt.ph.256
    x86_avx512fp16_mask_rsqrt_ph_512,          // llvm.x86.avx512fp16.mask.rsqrt.ph.512
    x86_avx512fp16_mask_rsqrt_sh,              // llvm.x86.avx512fp16.mask.rsqrt.sh
    x86_avx512fp16_mask_scalef_ph_128,         // llvm.x86.avx512fp16.mask.scalef.ph.128
    x86_avx512fp16_mask_scalef_ph_256,         // llvm.x86.avx512fp16.mask.scalef.ph.256
    x86_avx512fp16_mask_scalef_ph_512,         // llvm.x86.avx512fp16.mask.scalef.ph.512
    x86_avx512fp16_mask_scalef_sh,             // llvm.x86.avx512fp16.mask.scalef.sh
    x86_avx512fp16_mask_sqrt_sh,               // llvm.x86.avx512fp16.mask.sqrt.sh
    x86_avx512fp16_mask_sub_sh_round,          // llvm.x86.avx512fp16.mask.sub.sh.round
    x86_avx512fp16_mask_vcvtdq2ph_128,         // llvm.x86.avx512fp16.mask.vcvtdq2ph.128
    x86_avx512fp16_mask_vcvtpd2ph_128,         // llvm.x86.avx512fp16.mask.vcvtpd2ph.128
    x86_avx512fp16_mask_vcvtpd2ph_256,         // llvm.x86.avx512fp16.mask.vcvtpd2ph.256
    x86_avx512fp16_mask_vcvtpd2ph_512,         // llvm.x86.avx512fp16.mask.vcvtpd2ph.512
    x86_avx512fp16_mask_vcvtph2dq_128,         // llvm.x86.avx512fp16.mask.vcvtph2dq.128
    x86_avx512fp16_mask_vcvtph2dq_256,         // llvm.x86.avx512fp16.mask.vcvtph2dq.256
    x86_avx512fp16_mask_vcvtph2dq_512,         // llvm.x86.avx512fp16.mask.vcvtph2dq.512
    x86_avx512fp16_mask_vcvtph2pd_128,         // llvm.x86.avx512fp16.mask.vcvtph2pd.128
    x86_avx512fp16_mask_vcvtph2pd_256,         // llvm.x86.avx512fp16.mask.vcvtph2pd.256
    x86_avx512fp16_mask_vcvtph2pd_512,         // llvm.x86.avx512fp16.mask.vcvtph2pd.512
    x86_avx512fp16_mask_vcvtph2psx_128,        // llvm.x86.avx512fp16.mask.vcvtph2psx.128
    x86_avx512fp16_mask_vcvtph2psx_256,        // llvm.x86.avx512fp16.mask.vcvtph2psx.256
    x86_avx512fp16_mask_vcvtph2psx_512,        // llvm.x86.avx512fp16.mask.vcvtph2psx.512
    x86_avx512fp16_mask_vcvtph2qq_128,         // llvm.x86.avx512fp16.mask.vcvtph2qq.128
    x86_avx512fp16_mask_vcvtph2qq_256,         // llvm.x86.avx512fp16.mask.vcvtph2qq.256
    x86_avx512fp16_mask_vcvtph2qq_512,         // llvm.x86.avx512fp16.mask.vcvtph2qq.512
    x86_avx512fp16_mask_vcvtph2udq_128,        // llvm.x86.avx512fp16.mask.vcvtph2udq.128
    x86_avx512fp16_mask_vcvtph2udq_256,        // llvm.x86.avx512fp16.mask.vcvtph2udq.256
    x86_avx512fp16_mask_vcvtph2udq_512,        // llvm.x86.avx512fp16.mask.vcvtph2udq.512
    x86_avx512fp16_mask_vcvtph2uqq_128,        // llvm.x86.avx512fp16.mask.vcvtph2uqq.128
    x86_avx512fp16_mask_vcvtph2uqq_256,        // llvm.x86.avx512fp16.mask.vcvtph2uqq.256
    x86_avx512fp16_mask_vcvtph2uqq_512,        // llvm.x86.avx512fp16.mask.vcvtph2uqq.512
    x86_avx512fp16_mask_vcvtph2uw_128,         // llvm.x86.avx512fp16.mask.vcvtph2uw.128
    x86_avx512fp16_mask_vcvtph2uw_256,         // llvm.x86.avx512fp16.mask.vcvtph2uw.256
    x86_avx512fp16_mask_vcvtph2uw_512,         // llvm.x86.avx512fp16.mask.vcvtph2uw.512
    x86_avx512fp16_mask_vcvtph2w_128,          // llvm.x86.avx512fp16.mask.vcvtph2w.128
    x86_avx512fp16_mask_vcvtph2w_256,          // llvm.x86.avx512fp16.mask.vcvtph2w.256
    x86_avx512fp16_mask_vcvtph2w_512,          // llvm.x86.avx512fp16.mask.vcvtph2w.512
    x86_avx512fp16_mask_vcvtps2phx_128,        // llvm.x86.avx512fp16.mask.vcvtps2phx.128
    x86_avx512fp16_mask_vcvtps2phx_256,        // llvm.x86.avx512fp16.mask.vcvtps2phx.256
    x86_avx512fp16_mask_vcvtps2phx_512,        // llvm.x86.avx512fp16.mask.vcvtps2phx.512
    x86_avx512fp16_mask_vcvtqq2ph_128,         // llvm.x86.avx512fp16.mask.vcvtqq2ph.128
    x86_avx512fp16_mask_vcvtqq2ph_256,         // llvm.x86.avx512fp16.mask.vcvtqq2ph.256
    x86_avx512fp16_mask_vcvtsd2sh_round,       // llvm.x86.avx512fp16.mask.vcvtsd2sh.round
    x86_avx512fp16_mask_vcvtsh2sd_round,       // llvm.x86.avx512fp16.mask.vcvtsh2sd.round
    x86_avx512fp16_mask_vcvtsh2ss_round,       // llvm.x86.avx512fp16.mask.vcvtsh2ss.round
    x86_avx512fp16_mask_vcvtss2sh_round,       // llvm.x86.avx512fp16.mask.vcvtss2sh.round
    x86_avx512fp16_mask_vcvttph2dq_128,        // llvm.x86.avx512fp16.mask.vcvttph2dq.128
    x86_avx512fp16_mask_vcvttph2dq_256,        // llvm.x86.avx512fp16.mask.vcvttph2dq.256
    x86_avx512fp16_mask_vcvttph2dq_512,        // llvm.x86.avx512fp16.mask.vcvttph2dq.512
    x86_avx512fp16_mask_vcvttph2qq_128,        // llvm.x86.avx512fp16.mask.vcvttph2qq.128
    x86_avx512fp16_mask_vcvttph2qq_256,        // llvm.x86.avx512fp16.mask.vcvttph2qq.256
    x86_avx512fp16_mask_vcvttph2qq_512,        // llvm.x86.avx512fp16.mask.vcvttph2qq.512
    x86_avx512fp16_mask_vcvttph2udq_128,       // llvm.x86.avx512fp16.mask.vcvttph2udq.128
    x86_avx512fp16_mask_vcvttph2udq_256,       // llvm.x86.avx512fp16.mask.vcvttph2udq.256
    x86_avx512fp16_mask_vcvttph2udq_512,       // llvm.x86.avx512fp16.mask.vcvttph2udq.512
    x86_avx512fp16_mask_vcvttph2uqq_128,       // llvm.x86.avx512fp16.mask.vcvttph2uqq.128
    x86_avx512fp16_mask_vcvttph2uqq_256,       // llvm.x86.avx512fp16.mask.vcvttph2uqq.256
    x86_avx512fp16_mask_vcvttph2uqq_512,       // llvm.x86.avx512fp16.mask.vcvttph2uqq.512
    x86_avx512fp16_mask_vcvttph2uw_128,        // llvm.x86.avx512fp16.mask.vcvttph2uw.128
    x86_avx512fp16_mask_vcvttph2uw_256,        // llvm.x86.avx512fp16.mask.vcvttph2uw.256
    x86_avx512fp16_mask_vcvttph2uw_512,        // llvm.x86.avx512fp16.mask.vcvttph2uw.512
    x86_avx512fp16_mask_vcvttph2w_128,         // llvm.x86.avx512fp16.mask.vcvttph2w.128
    x86_avx512fp16_mask_vcvttph2w_256,         // llvm.x86.avx512fp16.mask.vcvttph2w.256
    x86_avx512fp16_mask_vcvttph2w_512,         // llvm.x86.avx512fp16.mask.vcvttph2w.512
    x86_avx512fp16_mask_vcvtudq2ph_128,        // llvm.x86.avx512fp16.mask.vcvtudq2ph.128
    x86_avx512fp16_mask_vcvtuqq2ph_128,        // llvm.x86.avx512fp16.mask.vcvtuqq2ph.128
    x86_avx512fp16_mask_vcvtuqq2ph_256,        // llvm.x86.avx512fp16.mask.vcvtuqq2ph.256
    x86_avx512fp16_mask_vfcmadd_cph_128,       // llvm.x86.avx512fp16.mask.vfcmadd.cph.128
    x86_avx512fp16_mask_vfcmadd_cph_256,       // llvm.x86.avx512fp16.mask.vfcmadd.cph.256
    x86_avx512fp16_mask_vfcmadd_cph_512,       // llvm.x86.avx512fp16.mask.vfcmadd.cph.512
    x86_avx512fp16_mask_vfcmadd_csh,           // llvm.x86.avx512fp16.mask.vfcmadd.csh
    x86_avx512fp16_mask_vfcmul_cph_128,        // llvm.x86.avx512fp16.mask.vfcmul.cph.128
    x86_avx512fp16_mask_vfcmul_cph_256,        // llvm.x86.avx512fp16.mask.vfcmul.cph.256
    x86_avx512fp16_mask_vfcmul_cph_512,        // llvm.x86.avx512fp16.mask.vfcmul.cph.512
    x86_avx512fp16_mask_vfcmul_csh,            // llvm.x86.avx512fp16.mask.vfcmul.csh
    x86_avx512fp16_mask_vfmadd_cph_128,        // llvm.x86.avx512fp16.mask.vfmadd.cph.128
    x86_avx512fp16_mask_vfmadd_cph_256,        // llvm.x86.avx512fp16.mask.vfmadd.cph.256
    x86_avx512fp16_mask_vfmadd_cph_512,        // llvm.x86.avx512fp16.mask.vfmadd.cph.512
    x86_avx512fp16_mask_vfmadd_csh,            // llvm.x86.avx512fp16.mask.vfmadd.csh
    x86_avx512fp16_mask_vfmul_cph_128,         // llvm.x86.avx512fp16.mask.vfmul.cph.128
    x86_avx512fp16_mask_vfmul_cph_256,         // llvm.x86.avx512fp16.mask.vfmul.cph.256
    x86_avx512fp16_mask_vfmul_cph_512,         // llvm.x86.avx512fp16.mask.vfmul.cph.512
    x86_avx512fp16_mask_vfmul_csh,             // llvm.x86.avx512fp16.mask.vfmul.csh
    x86_avx512fp16_maskz_vfcmadd_cph_128,      // llvm.x86.avx512fp16.maskz.vfcmadd.cph.128
    x86_avx512fp16_maskz_vfcmadd_cph_256,      // llvm.x86.avx512fp16.maskz.vfcmadd.cph.256
    x86_avx512fp16_maskz_vfcmadd_cph_512,      // llvm.x86.avx512fp16.maskz.vfcmadd.cph.512
    x86_avx512fp16_maskz_vfcmadd_csh,          // llvm.x86.avx512fp16.maskz.vfcmadd.csh
    x86_avx512fp16_maskz_vfmadd_cph_128,       // llvm.x86.avx512fp16.maskz.vfmadd.cph.128
    x86_avx512fp16_maskz_vfmadd_cph_256,       // llvm.x86.avx512fp16.maskz.vfmadd.cph.256
    x86_avx512fp16_maskz_vfmadd_cph_512,       // llvm.x86.avx512fp16.maskz.vfmadd.cph.512
    x86_avx512fp16_maskz_vfmadd_csh,           // llvm.x86.avx512fp16.maskz.vfmadd.csh
    x86_avx512fp16_max_ph_128,                 // llvm.x86.avx512fp16.max.ph.128
    x86_avx512fp16_max_ph_256,                 // llvm.x86.avx512fp16.max.ph.256
    x86_avx512fp16_max_ph_512,                 // llvm.x86.avx512fp16.max.ph.512
    x86_avx512fp16_min_ph_128,                 // llvm.x86.avx512fp16.min.ph.128
    x86_avx512fp16_min_ph_256,                 // llvm.x86.avx512fp16.min.ph.256
    x86_avx512fp16_min_ph_512,                 // llvm.x86.avx512fp16.min.ph.512
    x86_avx512fp16_mul_ph_512,                 // llvm.x86.avx512fp16.mul.ph.512
    x86_avx512fp16_sqrt_ph_512,                // llvm.x86.avx512fp16.sqrt.ph.512
    x86_avx512fp16_sub_ph_512,                 // llvm.x86.avx512fp16.sub.ph.512
    x86_avx512fp16_vcomi_sh,                   // llvm.x86.avx512fp16.vcomi.sh
    x86_avx512fp16_vcvtsh2si32,                // llvm.x86.avx512fp16.vcvtsh2si32
    x86_avx512fp16_vcvtsh2si64,                // llvm.x86.avx512fp16.vcvtsh2si64
    x86_avx512fp16_vcvtsh2usi32,               // llvm.x86.avx512fp16.vcvtsh2usi32
    x86_avx512fp16_vcvtsh2usi64,               // llvm.x86.avx512fp16.vcvtsh2usi64
    x86_avx512fp16_vcvtsi2sh,                  // llvm.x86.avx512fp16.vcvtsi2sh
    x86_avx512fp16_vcvtsi642sh,                // llvm.x86.avx512fp16.vcvtsi642sh
    x86_avx512fp16_vcvttsh2si32,               // llvm.x86.avx512fp16.vcvttsh2si32
    x86_avx512fp16_vcvttsh2si64,               // llvm.x86.avx512fp16.vcvttsh2si64
    x86_avx512fp16_vcvttsh2usi32,              // llvm.x86.avx512fp16.vcvttsh2usi32
    x86_avx512fp16_vcvttsh2usi64,              // llvm.x86.avx512fp16.vcvttsh2usi64
    x86_avx512fp16_vcvtusi2sh,                 // llvm.x86.avx512fp16.vcvtusi2sh
    x86_avx512fp16_vcvtusi642sh,               // llvm.x86.avx512fp16.vcvtusi642sh
    x86_avx512fp16_vfmadd_f16,                 // llvm.x86.avx512fp16.vfmadd.f16
    x86_avx512fp16_vfmadd_ph_512,              // llvm.x86.avx512fp16.vfmadd.ph.512
    x86_avx512fp16_vfmaddsub_ph_128,           // llvm.x86.avx512fp16.vfmaddsub.ph.128
    x86_avx512fp16_vfmaddsub_ph_256,           // llvm.x86.avx512fp16.vfmaddsub.ph.256
    x86_avx512fp16_vfmaddsub_ph_512,           // llvm.x86.avx512fp16.vfmaddsub.ph.512
    x86_axor32,                                // llvm.x86.axor32
    x86_axor64,                                // llvm.x86.axor64
    x86_bmi_bextr_32,                          // llvm.x86.bmi.bextr.32
    x86_bmi_bextr_64,                          // llvm.x86.bmi.bextr.64
    x86_bmi_bzhi_32,                           // llvm.x86.bmi.bzhi.32
    x86_bmi_bzhi_64,                           // llvm.x86.bmi.bzhi.64
    x86_bmi_pdep_32,                           // llvm.x86.bmi.pdep.32
    x86_bmi_pdep_64,                           // llvm.x86.bmi.pdep.64
    x86_bmi_pext_32,                           // llvm.x86.bmi.pext.32
    x86_bmi_pext_64,                           // llvm.x86.bmi.pext.64
    x86_cast_tile_to_vector,                   // llvm.x86.cast.tile.to.vector
    x86_cast_vector_to_tile,                   // llvm.x86.cast.vector.to.tile
    x86_cldemote,                              // llvm.x86.cldemote
    x86_clflushopt,                            // llvm.x86.clflushopt
    x86_clrssbsy,                              // llvm.x86.clrssbsy
    x86_clui,                                  // llvm.x86.clui
    x86_clwb,                                  // llvm.x86.clwb
    x86_clzero,                                // llvm.x86.clzero
    x86_cmpccxadd32,                           // llvm.x86.cmpccxadd32
    x86_cmpccxadd64,                           // llvm.x86.cmpccxadd64
    x86_directstore32,                         // llvm.x86.directstore32
    x86_directstore64,                         // llvm.x86.directstore64
    x86_encodekey128,                          // llvm.x86.encodekey128
    x86_encodekey256,                          // llvm.x86.encodekey256
    x86_enqcmd,                                // llvm.x86.enqcmd
    x86_enqcmds,                               // llvm.x86.enqcmds
    x86_flags_read_u32,                        // llvm.x86.flags.read.u32
    x86_flags_read_u64,                        // llvm.x86.flags.read.u64
    x86_flags_write_u32,                       // llvm.x86.flags.write.u32
    x86_flags_write_u64,                       // llvm.x86.flags.write.u64
    x86_fma_vfmaddsub_pd,                      // llvm.x86.fma.vfmaddsub.pd
    x86_fma_vfmaddsub_pd_256,                  // llvm.x86.fma.vfmaddsub.pd.256
    x86_fma_vfmaddsub_ps,                      // llvm.x86.fma.vfmaddsub.ps
    x86_fma_vfmaddsub_ps_256,                  // llvm.x86.fma.vfmaddsub.ps.256
    x86_fxrstor,                               // llvm.x86.fxrstor
    x86_fxrstor64,                             // llvm.x86.fxrstor64
    x86_fxsave,                                // llvm.x86.fxsave
    x86_fxsave64,                              // llvm.x86.fxsave64
    x86_incsspd,                               // llvm.x86.incsspd
    x86_incsspq,                               // llvm.x86.incsspq
    x86_int,                                   // llvm.x86.int
    x86_invpcid,                               // llvm.x86.invpcid
    x86_ldtilecfg,                             // llvm.x86.ldtilecfg
    x86_ldtilecfg_internal,                    // llvm.x86.ldtilecfg.internal
    x86_llwpcb,                                // llvm.x86.llwpcb
    x86_loadiwkey,                             // llvm.x86.loadiwkey
    x86_lwpins32,                              // llvm.x86.lwpins32
    x86_lwpins64,                              // llvm.x86.lwpins64
    x86_lwpval32,                              // llvm.x86.lwpval32
    x86_lwpval64,                              // llvm.x86.lwpval64
    x86_mmx_emms,                              // llvm.x86.mmx.emms
    x86_mmx_femms,                             // llvm.x86.mmx.femms
    x86_mmx_maskmovq,                          // llvm.x86.mmx.maskmovq
    x86_mmx_movnt_dq,                          // llvm.x86.mmx.movnt.dq
    x86_mmx_packssdw,                          // llvm.x86.mmx.packssdw
    x86_mmx_packsswb,                          // llvm.x86.mmx.packsswb
    x86_mmx_packuswb,                          // llvm.x86.mmx.packuswb
    x86_mmx_padd_b,                            // llvm.x86.mmx.padd.b
    x86_mmx_padd_d,                            // llvm.x86.mmx.padd.d
    x86_mmx_padd_q,                            // llvm.x86.mmx.padd.q
    x86_mmx_padd_w,                            // llvm.x86.mmx.padd.w
    x86_mmx_padds_b,                           // llvm.x86.mmx.padds.b
    x86_mmx_padds_w,                           // llvm.x86.mmx.padds.w
    x86_mmx_paddus_b,                          // llvm.x86.mmx.paddus.b
    x86_mmx_paddus_w,                          // llvm.x86.mmx.paddus.w
    x86_mmx_palignr_b,                         // llvm.x86.mmx.palignr.b
    x86_mmx_pand,                              // llvm.x86.mmx.pand
    x86_mmx_pandn,                             // llvm.x86.mmx.pandn
    x86_mmx_pavg_b,                            // llvm.x86.mmx.pavg.b
    x86_mmx_pavg_w,                            // llvm.x86.mmx.pavg.w
    x86_mmx_pcmpeq_b,                          // llvm.x86.mmx.pcmpeq.b
    x86_mmx_pcmpeq_d,                          // llvm.x86.mmx.pcmpeq.d
    x86_mmx_pcmpeq_w,                          // llvm.x86.mmx.pcmpeq.w
    x86_mmx_pcmpgt_b,                          // llvm.x86.mmx.pcmpgt.b
    x86_mmx_pcmpgt_d,                          // llvm.x86.mmx.pcmpgt.d
    x86_mmx_pcmpgt_w,                          // llvm.x86.mmx.pcmpgt.w
    x86_mmx_pextr_w,                           // llvm.x86.mmx.pextr.w
    x86_mmx_pinsr_w,                           // llvm.x86.mmx.pinsr.w
    x86_mmx_pmadd_wd,                          // llvm.x86.mmx.pmadd.wd
    x86_mmx_pmaxs_w,                           // llvm.x86.mmx.pmaxs.w
    x86_mmx_pmaxu_b,                           // llvm.x86.mmx.pmaxu.b
    x86_mmx_pmins_w,                           // llvm.x86.mmx.pmins.w
    x86_mmx_pminu_b,                           // llvm.x86.mmx.pminu.b
    x86_mmx_pmovmskb,                          // llvm.x86.mmx.pmovmskb
    x86_mmx_pmulh_w,                           // llvm.x86.mmx.pmulh.w
    x86_mmx_pmulhu_w,                          // llvm.x86.mmx.pmulhu.w
    x86_mmx_pmull_w,                           // llvm.x86.mmx.pmull.w
    x86_mmx_pmulu_dq,                          // llvm.x86.mmx.pmulu.dq
    x86_mmx_por,                               // llvm.x86.mmx.por
    x86_mmx_psad_bw,                           // llvm.x86.mmx.psad.bw
    x86_mmx_psll_d,                            // llvm.x86.mmx.psll.d
    x86_mmx_psll_q,                            // llvm.x86.mmx.psll.q
    x86_mmx_psll_w,                            // llvm.x86.mmx.psll.w
    x86_mmx_pslli_d,                           // llvm.x86.mmx.pslli.d
    x86_mmx_pslli_q,                           // llvm.x86.mmx.pslli.q
    x86_mmx_pslli_w,                           // llvm.x86.mmx.pslli.w
    x86_mmx_psra_d,                            // llvm.x86.mmx.psra.d
    x86_mmx_psra_w,                            // llvm.x86.mmx.psra.w
    x86_mmx_psrai_d,                           // llvm.x86.mmx.psrai.d
    x86_mmx_psrai_w,                           // llvm.x86.mmx.psrai.w
    x86_mmx_psrl_d,                            // llvm.x86.mmx.psrl.d
    x86_mmx_psrl_q,                            // llvm.x86.mmx.psrl.q
    x86_mmx_psrl_w,                            // llvm.x86.mmx.psrl.w
    x86_mmx_psrli_d,                           // llvm.x86.mmx.psrli.d
    x86_mmx_psrli_q,                           // llvm.x86.mmx.psrli.q
    x86_mmx_psrli_w,                           // llvm.x86.mmx.psrli.w
    x86_mmx_psub_b,                            // llvm.x86.mmx.psub.b
    x86_mmx_psub_d,                            // llvm.x86.mmx.psub.d
    x86_mmx_psub_q,                            // llvm.x86.mmx.psub.q
    x86_mmx_psub_w,                            // llvm.x86.mmx.psub.w
    x86_mmx_psubs_b,                           // llvm.x86.mmx.psubs.b
    x86_mmx_psubs_w,                           // llvm.x86.mmx.psubs.w
    x86_mmx_psubus_b,                          // llvm.x86.mmx.psubus.b
    x86_mmx_psubus_w,                          // llvm.x86.mmx.psubus.w
    x86_mmx_punpckhbw,                         // llvm.x86.mmx.punpckhbw
    x86_mmx_punpckhdq,                         // llvm.x86.mmx.punpckhdq
    x86_mmx_punpckhwd,                         // llvm.x86.mmx.punpckhwd
    x86_mmx_punpcklbw,                         // llvm.x86.mmx.punpcklbw
    x86_mmx_punpckldq,                         // llvm.x86.mmx.punpckldq
    x86_mmx_punpcklwd,                         // llvm.x86.mmx.punpcklwd
    x86_mmx_pxor,                              // llvm.x86.mmx.pxor
    x86_monitorx,                              // llvm.x86.monitorx
    x86_movdir64b,                             // llvm.x86.movdir64b
    x86_mwaitx,                                // llvm.x86.mwaitx
    x86_pclmulqdq,                             // llvm.x86.pclmulqdq
    x86_pclmulqdq_256,                         // llvm.x86.pclmulqdq.256
    x86_pclmulqdq_512,                         // llvm.x86.pclmulqdq.512
    x86_ptwrite32,                             // llvm.x86.ptwrite32
    x86_ptwrite64,                             // llvm.x86.ptwrite64
    x86_rdfsbase_32,                           // llvm.x86.rdfsbase.32
    x86_rdfsbase_64,                           // llvm.x86.rdfsbase.64
    x86_rdgsbase_32,                           // llvm.x86.rdgsbase.32
    x86_rdgsbase_64,                           // llvm.x86.rdgsbase.64
    x86_rdpid,                                 // llvm.x86.rdpid
    x86_rdpkru,                                // llvm.x86.rdpkru
    x86_rdpmc,                                 // llvm.x86.rdpmc
    x86_rdpru,                                 // llvm.x86.rdpru
    x86_rdrand_16,                             // llvm.x86.rdrand.16
    x86_rdrand_32,                             // llvm.x86.rdrand.32
    x86_rdrand_64,                             // llvm.x86.rdrand.64
    x86_rdseed_16,                             // llvm.x86.rdseed.16
    x86_rdseed_32,                             // llvm.x86.rdseed.32
    x86_rdseed_64,                             // llvm.x86.rdseed.64
    x86_rdsspd,                                // llvm.x86.rdsspd
    x86_rdsspq,                                // llvm.x86.rdsspq
    x86_rdtsc,                                 // llvm.x86.rdtsc
    x86_rdtscp,                                // llvm.x86.rdtscp
    x86_rstorssp,                              // llvm.x86.rstorssp
    x86_saveprevssp,                           // llvm.x86.saveprevssp
    x86_seh_ehguard,                           // llvm.x86.seh.ehguard
    x86_seh_ehregnode,                         // llvm.x86.seh.ehregnode
    x86_seh_lsda,                              // llvm.x86.seh.lsda
    x86_senduipi,                              // llvm.x86.senduipi
    x86_serialize,                             // llvm.x86.serialize
    x86_setssbsy,                              // llvm.x86.setssbsy
    x86_sha1msg1,                              // llvm.x86.sha1msg1
    x86_sha1msg2,                              // llvm.x86.sha1msg2
    x86_sha1nexte,                             // llvm.x86.sha1nexte
    x86_sha1rnds4,                             // llvm.x86.sha1rnds4
    x86_sha256msg1,                            // llvm.x86.sha256msg1
    x86_sha256msg2,                            // llvm.x86.sha256msg2
    x86_sha256rnds2,                           // llvm.x86.sha256rnds2
    x86_slwpcb,                                // llvm.x86.slwpcb
    x86_sse_cmp_ps,                            // llvm.x86.sse.cmp.ps
    x86_sse_cmp_ss,                            // llvm.x86.sse.cmp.ss
    x86_sse_comieq_ss,                         // llvm.x86.sse.comieq.ss
    x86_sse_comige_ss,                         // llvm.x86.sse.comige.ss
    x86_sse_comigt_ss,                         // llvm.x86.sse.comigt.ss
    x86_sse_comile_ss,                         // llvm.x86.sse.comile.ss
    x86_sse_comilt_ss,                         // llvm.x86.sse.comilt.ss
    x86_sse_comineq_ss,                        // llvm.x86.sse.comineq.ss
    x86_sse_cvtpd2pi,                          // llvm.x86.sse.cvtpd2pi
    x86_sse_cvtpi2pd,                          // llvm.x86.sse.cvtpi2pd
    x86_sse_cvtpi2ps,                          // llvm.x86.sse.cvtpi2ps
    x86_sse_cvtps2pi,                          // llvm.x86.sse.cvtps2pi
    x86_sse_cvtss2si,                          // llvm.x86.sse.cvtss2si
    x86_sse_cvtss2si64,                        // llvm.x86.sse.cvtss2si64
    x86_sse_cvttpd2pi,                         // llvm.x86.sse.cvttpd2pi
    x86_sse_cvttps2pi,                         // llvm.x86.sse.cvttps2pi
    x86_sse_cvttss2si,                         // llvm.x86.sse.cvttss2si
    x86_sse_cvttss2si64,                       // llvm.x86.sse.cvttss2si64
    x86_sse_ldmxcsr,                           // llvm.x86.sse.ldmxcsr
    x86_sse_max_ps,                            // llvm.x86.sse.max.ps
    x86_sse_max_ss,                            // llvm.x86.sse.max.ss
    x86_sse_min_ps,                            // llvm.x86.sse.min.ps
    x86_sse_min_ss,                            // llvm.x86.sse.min.ss
    x86_sse_movmsk_ps,                         // llvm.x86.sse.movmsk.ps
    x86_sse_pshuf_w,                           // llvm.x86.sse.pshuf.w
    x86_sse_rcp_ps,                            // llvm.x86.sse.rcp.ps
    x86_sse_rcp_ss,                            // llvm.x86.sse.rcp.ss
    x86_sse_rsqrt_ps,                          // llvm.x86.sse.rsqrt.ps
    x86_sse_rsqrt_ss,                          // llvm.x86.sse.rsqrt.ss
    x86_sse_sfence,                            // llvm.x86.sse.sfence
    x86_sse_stmxcsr,                           // llvm.x86.sse.stmxcsr
    x86_sse_ucomieq_ss,                        // llvm.x86.sse.ucomieq.ss
    x86_sse_ucomige_ss,                        // llvm.x86.sse.ucomige.ss
    x86_sse_ucomigt_ss,                        // llvm.x86.sse.ucomigt.ss
    x86_sse_ucomile_ss,                        // llvm.x86.sse.ucomile.ss
    x86_sse_ucomilt_ss,                        // llvm.x86.sse.ucomilt.ss
    x86_sse_ucomineq_ss,                       // llvm.x86.sse.ucomineq.ss
    x86_sse2_clflush,                          // llvm.x86.sse2.clflush
    x86_sse2_cmp_pd,                           // llvm.x86.sse2.cmp.pd
    x86_sse2_cmp_sd,                           // llvm.x86.sse2.cmp.sd
    x86_sse2_comieq_sd,                        // llvm.x86.sse2.comieq.sd
    x86_sse2_comige_sd,                        // llvm.x86.sse2.comige.sd
    x86_sse2_comigt_sd,                        // llvm.x86.sse2.comigt.sd
    x86_sse2_comile_sd,                        // llvm.x86.sse2.comile.sd
    x86_sse2_comilt_sd,                        // llvm.x86.sse2.comilt.sd
    x86_sse2_comineq_sd,                       // llvm.x86.sse2.comineq.sd
    x86_sse2_cvtpd2dq,                         // llvm.x86.sse2.cvtpd2dq
    x86_sse2_cvtpd2ps,                         // llvm.x86.sse2.cvtpd2ps
    x86_sse2_cvtps2dq,                         // llvm.x86.sse2.cvtps2dq
    x86_sse2_cvtsd2si,                         // llvm.x86.sse2.cvtsd2si
    x86_sse2_cvtsd2si64,                       // llvm.x86.sse2.cvtsd2si64
    x86_sse2_cvtsd2ss,                         // llvm.x86.sse2.cvtsd2ss
    x86_sse2_cvttpd2dq,                        // llvm.x86.sse2.cvttpd2dq
    x86_sse2_cvttps2dq,                        // llvm.x86.sse2.cvttps2dq
    x86_sse2_cvttsd2si,                        // llvm.x86.sse2.cvttsd2si
    x86_sse2_cvttsd2si64,                      // llvm.x86.sse2.cvttsd2si64
    x86_sse2_lfence,                           // llvm.x86.sse2.lfence
    x86_sse2_maskmov_dqu,                      // llvm.x86.sse2.maskmov.dqu
    x86_sse2_max_pd,                           // llvm.x86.sse2.max.pd
    x86_sse2_max_sd,                           // llvm.x86.sse2.max.sd
    x86_sse2_mfence,                           // llvm.x86.sse2.mfence
    x86_sse2_min_pd,                           // llvm.x86.sse2.min.pd
    x86_sse2_min_sd,                           // llvm.x86.sse2.min.sd
    x86_sse2_movmsk_pd,                        // llvm.x86.sse2.movmsk.pd
    x86_sse2_packssdw_128,                     // llvm.x86.sse2.packssdw.128
    x86_sse2_packsswb_128,                     // llvm.x86.sse2.packsswb.128
    x86_sse2_packuswb_128,                     // llvm.x86.sse2.packuswb.128
    x86_sse2_pause,                            // llvm.x86.sse2.pause
    x86_sse2_pavg_b,                           // llvm.x86.sse2.pavg.b
    x86_sse2_pavg_w,                           // llvm.x86.sse2.pavg.w
    x86_sse2_pmadd_wd,                         // llvm.x86.sse2.pmadd.wd
    x86_sse2_pmovmskb_128,                     // llvm.x86.sse2.pmovmskb.128
    x86_sse2_pmulh_w,                          // llvm.x86.sse2.pmulh.w
    x86_sse2_pmulhu_w,                         // llvm.x86.sse2.pmulhu.w
    x86_sse2_psad_bw,                          // llvm.x86.sse2.psad.bw
    x86_sse2_psll_d,                           // llvm.x86.sse2.psll.d
    x86_sse2_psll_q,                           // llvm.x86.sse2.psll.q
    x86_sse2_psll_w,                           // llvm.x86.sse2.psll.w
    x86_sse2_pslli_d,                          // llvm.x86.sse2.pslli.d
    x86_sse2_pslli_q,                          // llvm.x86.sse2.pslli.q
    x86_sse2_pslli_w,                          // llvm.x86.sse2.pslli.w
    x86_sse2_psra_d,                           // llvm.x86.sse2.psra.d
    x86_sse2_psra_w,                           // llvm.x86.sse2.psra.w
    x86_sse2_psrai_d,                          // llvm.x86.sse2.psrai.d
    x86_sse2_psrai_w,                          // llvm.x86.sse2.psrai.w
    x86_sse2_psrl_d,                           // llvm.x86.sse2.psrl.d
    x86_sse2_psrl_q,                           // llvm.x86.sse2.psrl.q
    x86_sse2_psrl_w,                           // llvm.x86.sse2.psrl.w
    x86_sse2_psrli_d,                          // llvm.x86.sse2.psrli.d
    x86_sse2_psrli_q,                          // llvm.x86.sse2.psrli.q
    x86_sse2_psrli_w,                          // llvm.x86.sse2.psrli.w
    x86_sse2_ucomieq_sd,                       // llvm.x86.sse2.ucomieq.sd
    x86_sse2_ucomige_sd,                       // llvm.x86.sse2.ucomige.sd
    x86_sse2_ucomigt_sd,                       // llvm.x86.sse2.ucomigt.sd
    x86_sse2_ucomile_sd,                       // llvm.x86.sse2.ucomile.sd
    x86_sse2_ucomilt_sd,                       // llvm.x86.sse2.ucomilt.sd
    x86_sse2_ucomineq_sd,                      // llvm.x86.sse2.ucomineq.sd
    x86_sse3_addsub_pd,                        // llvm.x86.sse3.addsub.pd
    x86_sse3_addsub_ps,                        // llvm.x86.sse3.addsub.ps
    x86_sse3_hadd_pd,                          // llvm.x86.sse3.hadd.pd
    x86_sse3_hadd_ps,                          // llvm.x86.sse3.hadd.ps
    x86_sse3_hsub_pd,                          // llvm.x86.sse3.hsub.pd
    x86_sse3_hsub_ps,                          // llvm.x86.sse3.hsub.ps
    x86_sse3_ldu_dq,                           // llvm.x86.sse3.ldu.dq
    x86_sse3_monitor,                          // llvm.x86.sse3.monitor
    x86_sse3_mwait,                            // llvm.x86.sse3.mwait
    x86_sse41_blendvpd,                        // llvm.x86.sse41.blendvpd
    x86_sse41_blendvps,                        // llvm.x86.sse41.blendvps
    x86_sse41_dppd,                            // llvm.x86.sse41.dppd
    x86_sse41_dpps,                            // llvm.x86.sse41.dpps
    x86_sse41_insertps,                        // llvm.x86.sse41.insertps
    x86_sse41_mpsadbw,                         // llvm.x86.sse41.mpsadbw
    x86_sse41_packusdw,                        // llvm.x86.sse41.packusdw
    x86_sse41_pblendvb,                        // llvm.x86.sse41.pblendvb
    x86_sse41_phminposuw,                      // llvm.x86.sse41.phminposuw
    x86_sse41_ptestc,                          // llvm.x86.sse41.ptestc
    x86_sse41_ptestnzc,                        // llvm.x86.sse41.ptestnzc
    x86_sse41_ptestz,                          // llvm.x86.sse41.ptestz
    x86_sse41_round_pd,                        // llvm.x86.sse41.round.pd
    x86_sse41_round_ps,                        // llvm.x86.sse41.round.ps
    x86_sse41_round_sd,                        // llvm.x86.sse41.round.sd
    x86_sse41_round_ss,                        // llvm.x86.sse41.round.ss
    x86_sse42_crc32_32_16,                     // llvm.x86.sse42.crc32.32.16
    x86_sse42_crc32_32_32,                     // llvm.x86.sse42.crc32.32.32
    x86_sse42_crc32_32_8,                      // llvm.x86.sse42.crc32.32.8
    x86_sse42_crc32_64_64,                     // llvm.x86.sse42.crc32.64.64
    x86_sse42_pcmpestri128,                    // llvm.x86.sse42.pcmpestri128
    x86_sse42_pcmpestria128,                   // llvm.x86.sse42.pcmpestria128
    x86_sse42_pcmpestric128,                   // llvm.x86.sse42.pcmpestric128
    x86_sse42_pcmpestrio128,                   // llvm.x86.sse42.pcmpestrio128
    x86_sse42_pcmpestris128,                   // llvm.x86.sse42.pcmpestris128
    x86_sse42_pcmpestriz128,                   // llvm.x86.sse42.pcmpestriz128
    x86_sse42_pcmpestrm128,                    // llvm.x86.sse42.pcmpestrm128
    x86_sse42_pcmpistri128,                    // llvm.x86.sse42.pcmpistri128
    x86_sse42_pcmpistria128,                   // llvm.x86.sse42.pcmpistria128
    x86_sse42_pcmpistric128,                   // llvm.x86.sse42.pcmpistric128
    x86_sse42_pcmpistrio128,                   // llvm.x86.sse42.pcmpistrio128
    x86_sse42_pcmpistris128,                   // llvm.x86.sse42.pcmpistris128
    x86_sse42_pcmpistriz128,                   // llvm.x86.sse42.pcmpistriz128
    x86_sse42_pcmpistrm128,                    // llvm.x86.sse42.pcmpistrm128
    x86_sse4a_extrq,                           // llvm.x86.sse4a.extrq
    x86_sse4a_extrqi,                          // llvm.x86.sse4a.extrqi
    x86_sse4a_insertq,                         // llvm.x86.sse4a.insertq
    x86_sse4a_insertqi,                        // llvm.x86.sse4a.insertqi
    x86_ssse3_pabs_b,                          // llvm.x86.ssse3.pabs.b
    x86_ssse3_pabs_d,                          // llvm.x86.ssse3.pabs.d
    x86_ssse3_pabs_w,                          // llvm.x86.ssse3.pabs.w
    x86_ssse3_phadd_d,                         // llvm.x86.ssse3.phadd.d
    x86_ssse3_phadd_d_128,                     // llvm.x86.ssse3.phadd.d.128
    x86_ssse3_phadd_sw,                        // llvm.x86.ssse3.phadd.sw
    x86_ssse3_phadd_sw_128,                    // llvm.x86.ssse3.phadd.sw.128
    x86_ssse3_phadd_w,                         // llvm.x86.ssse3.phadd.w
    x86_ssse3_phadd_w_128,                     // llvm.x86.ssse3.phadd.w.128
    x86_ssse3_phsub_d,                         // llvm.x86.ssse3.phsub.d
    x86_ssse3_phsub_d_128,                     // llvm.x86.ssse3.phsub.d.128
    x86_ssse3_phsub_sw,                        // llvm.x86.ssse3.phsub.sw
    x86_ssse3_phsub_sw_128,                    // llvm.x86.ssse3.phsub.sw.128
    x86_ssse3_phsub_w,                         // llvm.x86.ssse3.phsub.w
    x86_ssse3_phsub_w_128,                     // llvm.x86.ssse3.phsub.w.128
    x86_ssse3_pmadd_ub_sw,                     // llvm.x86.ssse3.pmadd.ub.sw
    x86_ssse3_pmadd_ub_sw_128,                 // llvm.x86.ssse3.pmadd.ub.sw.128
    x86_ssse3_pmul_hr_sw,                      // llvm.x86.ssse3.pmul.hr.sw
    x86_ssse3_pmul_hr_sw_128,                  // llvm.x86.ssse3.pmul.hr.sw.128
    x86_ssse3_pshuf_b,                         // llvm.x86.ssse3.pshuf.b
    x86_ssse3_pshuf_b_128,                     // llvm.x86.ssse3.pshuf.b.128
    x86_ssse3_psign_b,                         // llvm.x86.ssse3.psign.b
    x86_ssse3_psign_b_128,                     // llvm.x86.ssse3.psign.b.128
    x86_ssse3_psign_d,                         // llvm.x86.ssse3.psign.d
    x86_ssse3_psign_d_128,                     // llvm.x86.ssse3.psign.d.128
    x86_ssse3_psign_w,                         // llvm.x86.ssse3.psign.w
    x86_ssse3_psign_w_128,                     // llvm.x86.ssse3.psign.w.128
    x86_sttilecfg,                             // llvm.x86.sttilecfg
    x86_stui,                                  // llvm.x86.stui
    x86_subborrow_32,                          // llvm.x86.subborrow.32
    x86_subborrow_64,                          // llvm.x86.subborrow.64
    x86_tbm_bextri_u32,                        // llvm.x86.tbm.bextri.u32
    x86_tbm_bextri_u64,                        // llvm.x86.tbm.bextri.u64
    x86_tcmmimfp16ps,                          // llvm.x86.tcmmimfp16ps
    x86_tcmmimfp16ps_internal,                 // llvm.x86.tcmmimfp16ps.internal
    x86_tcmmrlfp16ps,                          // llvm.x86.tcmmrlfp16ps
    x86_tcmmrlfp16ps_internal,                 // llvm.x86.tcmmrlfp16ps.internal
    x86_tdpbf16ps,                             // llvm.x86.tdpbf16ps
    x86_tdpbf16ps_internal,                    // llvm.x86.tdpbf16ps.internal
    x86_tdpbssd,                               // llvm.x86.tdpbssd
    x86_tdpbssd_internal,                      // llvm.x86.tdpbssd.internal
    x86_tdpbsud,                               // llvm.x86.tdpbsud
    x86_tdpbsud_internal,                      // llvm.x86.tdpbsud.internal
    x86_tdpbusd,                               // llvm.x86.tdpbusd
    x86_tdpbusd_internal,                      // llvm.x86.tdpbusd.internal
    x86_tdpbuud,                               // llvm.x86.tdpbuud
    x86_tdpbuud_internal,                      // llvm.x86.tdpbuud.internal
    x86_tdpfp16ps,                             // llvm.x86.tdpfp16ps
    x86_tdpfp16ps_internal,                    // llvm.x86.tdpfp16ps.internal
    x86_testui,                                // llvm.x86.testui
    x86_tileloadd64,                           // llvm.x86.tileloadd64
    x86_tileloadd64_internal,                  // llvm.x86.tileloadd64.internal
    x86_tileloaddt164,                         // llvm.x86.tileloaddt164
    x86_tileloaddt164_internal,                // llvm.x86.tileloaddt164.internal
    x86_tilerelease,                           // llvm.x86.tilerelease
    x86_tilestored64,                          // llvm.x86.tilestored64
    x86_tilestored64_internal,                 // llvm.x86.tilestored64.internal
    x86_tilezero,                              // llvm.x86.tilezero
    x86_tilezero_internal,                     // llvm.x86.tilezero.internal
    x86_tpause,                                // llvm.x86.tpause
    x86_umonitor,                              // llvm.x86.umonitor
    x86_umwait,                                // llvm.x86.umwait
    x86_vbcstnebf162ps128,                     // llvm.x86.vbcstnebf162ps128
    x86_vbcstnebf162ps256,                     // llvm.x86.vbcstnebf162ps256
    x86_vbcstnesh2ps128,                       // llvm.x86.vbcstnesh2ps128
    x86_vbcstnesh2ps256,                       // llvm.x86.vbcstnesh2ps256
    x86_vcvtneebf162ps128,                     // llvm.x86.vcvtneebf162ps128
    x86_vcvtneebf162ps256,                     // llvm.x86.vcvtneebf162ps256
    x86_vcvtneeph2ps128,                       // llvm.x86.vcvtneeph2ps128
    x86_vcvtneeph2ps256,                       // llvm.x86.vcvtneeph2ps256
    x86_vcvtneobf162ps128,                     // llvm.x86.vcvtneobf162ps128
    x86_vcvtneobf162ps256,                     // llvm.x86.vcvtneobf162ps256
    x86_vcvtneoph2ps128,                       // llvm.x86.vcvtneoph2ps128
    x86_vcvtneoph2ps256,                       // llvm.x86.vcvtneoph2ps256
    x86_vcvtneps2bf16128,                      // llvm.x86.vcvtneps2bf16128
    x86_vcvtneps2bf16256,                      // llvm.x86.vcvtneps2bf16256
    x86_vcvtps2ph_128,                         // llvm.x86.vcvtps2ph.128
    x86_vcvtps2ph_256,                         // llvm.x86.vcvtps2ph.256
    x86_vgf2p8affineinvqb_128,                 // llvm.x86.vgf2p8affineinvqb.128
    x86_vgf2p8affineinvqb_256,                 // llvm.x86.vgf2p8affineinvqb.256
    x86_vgf2p8affineinvqb_512,                 // llvm.x86.vgf2p8affineinvqb.512
    x86_vgf2p8affineqb_128,                    // llvm.x86.vgf2p8affineqb.128
    x86_vgf2p8affineqb_256,                    // llvm.x86.vgf2p8affineqb.256
    x86_vgf2p8affineqb_512,                    // llvm.x86.vgf2p8affineqb.512
    x86_vgf2p8mulb_128,                        // llvm.x86.vgf2p8mulb.128
    x86_vgf2p8mulb_256,                        // llvm.x86.vgf2p8mulb.256
    x86_vgf2p8mulb_512,                        // llvm.x86.vgf2p8mulb.512
    x86_vsha512msg1,                           // llvm.x86.vsha512msg1
    x86_vsha512msg2,                           // llvm.x86.vsha512msg2
    x86_vsha512rnds2,                          // llvm.x86.vsha512rnds2
    x86_vsm3msg1,                              // llvm.x86.vsm3msg1
    x86_vsm3msg2,                              // llvm.x86.vsm3msg2
    x86_vsm3rnds2,                             // llvm.x86.vsm3rnds2
    x86_vsm4key4128,                           // llvm.x86.vsm4key4128
    x86_vsm4key4256,                           // llvm.x86.vsm4key4256
    x86_vsm4rnds4128,                          // llvm.x86.vsm4rnds4128
    x86_vsm4rnds4256,                          // llvm.x86.vsm4rnds4256
    x86_wbinvd,                                // llvm.x86.wbinvd
    x86_wbnoinvd,                              // llvm.x86.wbnoinvd
    x86_wrfsbase_32,                           // llvm.x86.wrfsbase.32
    x86_wrfsbase_64,                           // llvm.x86.wrfsbase.64
    x86_wrgsbase_32,                           // llvm.x86.wrgsbase.32
    x86_wrgsbase_64,                           // llvm.x86.wrgsbase.64
    x86_wrpkru,                                // llvm.x86.wrpkru
    x86_wrssd,                                 // llvm.x86.wrssd
    x86_wrssq,                                 // llvm.x86.wrssq
    x86_wrussd,                                // llvm.x86.wrussd
    x86_wrussq,                                // llvm.x86.wrussq
    x86_xabort,                                // llvm.x86.xabort
    x86_xbegin,                                // llvm.x86.xbegin
    x86_xend,                                  // llvm.x86.xend
    x86_xgetbv,                                // llvm.x86.xgetbv
    x86_xop_vfrcz_pd,                          // llvm.x86.xop.vfrcz.pd
    x86_xop_vfrcz_pd_256,                      // llvm.x86.xop.vfrcz.pd.256
    x86_xop_vfrcz_ps,                          // llvm.x86.xop.vfrcz.ps
    x86_xop_vfrcz_ps_256,                      // llvm.x86.xop.vfrcz.ps.256
    x86_xop_vfrcz_sd,                          // llvm.x86.xop.vfrcz.sd
    x86_xop_vfrcz_ss,                          // llvm.x86.xop.vfrcz.ss
    x86_xop_vpermil2pd,                        // llvm.x86.xop.vpermil2pd
    x86_xop_vpermil2pd_256,                    // llvm.x86.xop.vpermil2pd.256
    x86_xop_vpermil2ps,                        // llvm.x86.xop.vpermil2ps
    x86_xop_vpermil2ps_256,                    // llvm.x86.xop.vpermil2ps.256
    x86_xop_vphaddbd,                          // llvm.x86.xop.vphaddbd
    x86_xop_vphaddbq,                          // llvm.x86.xop.vphaddbq
    x86_xop_vphaddbw,                          // llvm.x86.xop.vphaddbw
    x86_xop_vphadddq,                          // llvm.x86.xop.vphadddq
    x86_xop_vphaddubd,                         // llvm.x86.xop.vphaddubd
    x86_xop_vphaddubq,                         // llvm.x86.xop.vphaddubq
    x86_xop_vphaddubw,                         // llvm.x86.xop.vphaddubw
    x86_xop_vphaddudq,                         // llvm.x86.xop.vphaddudq
    x86_xop_vphadduwd,                         // llvm.x86.xop.vphadduwd
    x86_xop_vphadduwq,                         // llvm.x86.xop.vphadduwq
    x86_xop_vphaddwd,                          // llvm.x86.xop.vphaddwd
    x86_xop_vphaddwq,                          // llvm.x86.xop.vphaddwq
    x86_xop_vphsubbw,                          // llvm.x86.xop.vphsubbw
    x86_xop_vphsubdq,                          // llvm.x86.xop.vphsubdq
    x86_xop_vphsubwd,                          // llvm.x86.xop.vphsubwd
    x86_xop_vpmacsdd,                          // llvm.x86.xop.vpmacsdd
    x86_xop_vpmacsdqh,                         // llvm.x86.xop.vpmacsdqh
    x86_xop_vpmacsdql,                         // llvm.x86.xop.vpmacsdql
    x86_xop_vpmacssdd,                         // llvm.x86.xop.vpmacssdd
    x86_xop_vpmacssdqh,                        // llvm.x86.xop.vpmacssdqh
    x86_xop_vpmacssdql,                        // llvm.x86.xop.vpmacssdql
    x86_xop_vpmacsswd,                         // llvm.x86.xop.vpmacsswd
    x86_xop_vpmacssww,                         // llvm.x86.xop.vpmacssww
    x86_xop_vpmacswd,                          // llvm.x86.xop.vpmacswd
    x86_xop_vpmacsww,                          // llvm.x86.xop.vpmacsww
    x86_xop_vpmadcsswd,                        // llvm.x86.xop.vpmadcsswd
    x86_xop_vpmadcswd,                         // llvm.x86.xop.vpmadcswd
    x86_xop_vpperm,                            // llvm.x86.xop.vpperm
    x86_xop_vpshab,                            // llvm.x86.xop.vpshab
    x86_xop_vpshad,                            // llvm.x86.xop.vpshad
    x86_xop_vpshaq,                            // llvm.x86.xop.vpshaq
    x86_xop_vpshaw,                            // llvm.x86.xop.vpshaw
    x86_xop_vpshlb,                            // llvm.x86.xop.vpshlb
    x86_xop_vpshld,                            // llvm.x86.xop.vpshld
    x86_xop_vpshlq,                            // llvm.x86.xop.vpshlq
    x86_xop_vpshlw,                            // llvm.x86.xop.vpshlw
    x86_xresldtrk,                             // llvm.x86.xresldtrk
    x86_xrstor,                                // llvm.x86.xrstor
    x86_xrstor64,                              // llvm.x86.xrstor64
    x86_xrstors,                               // llvm.x86.xrstors
    x86_xrstors64,                             // llvm.x86.xrstors64
    x86_xsave,                                 // llvm.x86.xsave
    x86_xsave64,                               // llvm.x86.xsave64
    x86_xsavec,                                // llvm.x86.xsavec
    x86_xsavec64,                              // llvm.x86.xsavec64
    x86_xsaveopt,                              // llvm.x86.xsaveopt
    x86_xsaveopt64,                            // llvm.x86.xsaveopt64
    x86_xsaves,                                // llvm.x86.xsaves
    x86_xsaves64,                              // llvm.x86.xsaves64
    x86_xsetbv,                                // llvm.x86.xsetbv
    x86_xsusldtrk,                             // llvm.x86.xsusldtrk
    x86_xtest,                                 // llvm.x86.xtest
}; // enum
} // namespace Intrinsic
} // namespace llvm

#endif
PKjwFZ&V@�"�"IR/Intrinsics.hnu�[���//===- Intrinsics.h - LLVM Intrinsic Function Handling ----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines a set of enums which allow processing of intrinsic
// functions.  Values of these enum types are returned by
// Function::getIntrinsicID.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_INTRINSICS_H
#define LLVM_IR_INTRINSICS_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/Support/TypeSize.h"
#include <optional>
#include <string>

namespace llvm {

class Type;
class FunctionType;
class Function;
class LLVMContext;
class Module;
class AttributeList;

/// This namespace contains an enum with a value for every intrinsic/builtin
/// function known by LLVM. The enum values are returned by
/// Function::getIntrinsicID().
namespace Intrinsic {
  // Abstraction for the arguments of the noalias intrinsics
  static const int NoAliasScopeDeclScopeArg = 0;

  // Intrinsic ID type. This is an opaque typedef to facilitate splitting up
  // the enum into target-specific enums.
  typedef unsigned ID;

  enum IndependentIntrinsics : unsigned {
    not_intrinsic = 0, // Must be zero

  // Get the intrinsic enums generated from Intrinsics.td
#define GET_INTRINSIC_ENUM_VALUES
#include "llvm/IR/IntrinsicEnums.inc"
#undef GET_INTRINSIC_ENUM_VALUES
  };

  /// Return the LLVM name for an intrinsic, such as "llvm.ppc.altivec.lvx".
  /// Note, this version is for intrinsics with no overloads.  Use the other
  /// version of getName if overloads are required.
  StringRef getName(ID id);

  /// Return the LLVM name for an intrinsic, without encoded types for
  /// overloading, such as "llvm.ssa.copy".
  StringRef getBaseName(ID id);

  /// Return the LLVM name for an intrinsic, such as "llvm.ppc.altivec.lvx" or
  /// "llvm.ssa.copy.p0s_s.1". Note, this version of getName supports overloads.
  /// This is less efficient than the StringRef version of this function.  If no
  /// overloads are required, it is safe to use this version, but better to use
  /// the StringRef version. If one of the types is based on an unnamed type, a
  /// function type will be computed. Providing FT will avoid this computation.
  std::string getName(ID Id, ArrayRef<Type *> Tys, Module *M,
                      FunctionType *FT = nullptr);

  /// Return the LLVM name for an intrinsic. This is a special version only to
  /// be used by LLVMIntrinsicCopyOverloadedName. It only supports overloads
  /// based on named types.
  std::string getNameNoUnnamedTypes(ID Id, ArrayRef<Type *> Tys);

  /// Return the function type for an intrinsic.
  FunctionType *getType(LLVMContext &Context, ID id,
                        ArrayRef<Type *> Tys = std::nullopt);

  /// Returns true if the intrinsic can be overloaded.
  bool isOverloaded(ID id);

  /// Return the attributes for an intrinsic.
  AttributeList getAttributes(LLVMContext &C, ID id);

  /// Create or insert an LLVM Function declaration for an intrinsic, and return
  /// it.
  ///
  /// The Tys parameter is for intrinsics with overloaded types (e.g., those
  /// using iAny, fAny, vAny, or iPTRAny).  For a declaration of an overloaded
  /// intrinsic, Tys must provide exactly one type for each overloaded type in
  /// the intrinsic.
  Function *getDeclaration(Module *M, ID id,
                           ArrayRef<Type *> Tys = std::nullopt);

  /// Looks up Name in NameTable via binary search. NameTable must be sorted
  /// and all entries must start with "llvm.".  If NameTable contains an exact
  /// match for Name or a prefix of Name followed by a dot, its index in
  /// NameTable is returned. Otherwise, -1 is returned.
  int lookupLLVMIntrinsicByName(ArrayRef<const char *> NameTable,
                                StringRef Name);

  /// Map a Clang builtin name to an intrinsic ID.
  ID getIntrinsicForClangBuiltin(const char *Prefix, StringRef BuiltinName);

  /// Map a MS builtin name to an intrinsic ID.
  ID getIntrinsicForMSBuiltin(const char *Prefix, StringRef BuiltinName);

  /// This is a type descriptor which explains the type requirements of an
  /// intrinsic. This is returned by getIntrinsicInfoTableEntries.
  struct IITDescriptor {
    enum IITDescriptorKind {
      Void,
      VarArg,
      MMX,
      Token,
      Metadata,
      Half,
      BFloat,
      Float,
      Double,
      Quad,
      Integer,
      Vector,
      Pointer,
      Struct,
      Argument,
      ExtendArgument,
      TruncArgument,
      HalfVecArgument,
      SameVecWidthArgument,
      VecOfAnyPtrsToElt,
      VecElementArgument,
      Subdivide2Argument,
      Subdivide4Argument,
      VecOfBitcastsToInt,
      AMX,
      PPCQuad,
      AArch64Svcount,
    } Kind;

    union {
      unsigned Integer_Width;
      unsigned Float_Width;
      unsigned Pointer_AddressSpace;
      unsigned Struct_NumElements;
      unsigned Argument_Info;
      ElementCount Vector_Width;
    };

    // AK_% : Defined in Intrinsics.td
    enum ArgKind {
#define GET_INTRINSIC_ARGKIND
#include "llvm/IR/IntrinsicEnums.inc"
#undef GET_INTRINSIC_ARGKIND
    };

    unsigned getArgumentNumber() const {
      assert(Kind == Argument || Kind == ExtendArgument ||
             Kind == TruncArgument || Kind == HalfVecArgument ||
             Kind == SameVecWidthArgument || Kind == VecElementArgument ||
             Kind == Subdivide2Argument || Kind == Subdivide4Argument ||
             Kind == VecOfBitcastsToInt);
      return Argument_Info >> 3;
    }
    ArgKind getArgumentKind() const {
      assert(Kind == Argument || Kind == ExtendArgument ||
             Kind == TruncArgument || Kind == HalfVecArgument ||
             Kind == SameVecWidthArgument ||
             Kind == VecElementArgument || Kind == Subdivide2Argument ||
             Kind == Subdivide4Argument || Kind == VecOfBitcastsToInt);
      return (ArgKind)(Argument_Info & 7);
    }

    // VecOfAnyPtrsToElt uses both an overloaded argument (for address space)
    // and a reference argument (for matching vector width and element types)
    unsigned getOverloadArgNumber() const {
      assert(Kind == VecOfAnyPtrsToElt);
      return Argument_Info >> 16;
    }
    unsigned getRefArgNumber() const {
      assert(Kind == VecOfAnyPtrsToElt);
      return Argument_Info & 0xFFFF;
    }

    static IITDescriptor get(IITDescriptorKind K, unsigned Field) {
      IITDescriptor Result = { K, { Field } };
      return Result;
    }

    static IITDescriptor get(IITDescriptorKind K, unsigned short Hi,
                             unsigned short Lo) {
      unsigned Field = Hi << 16 | Lo;
      IITDescriptor Result = {K, {Field}};
      return Result;
    }

    static IITDescriptor getVector(unsigned Width, bool IsScalable) {
      IITDescriptor Result = {Vector, {0}};
      Result.Vector_Width = ElementCount::get(Width, IsScalable);
      return Result;
    }
  };

  /// Return the IIT table descriptor for the specified intrinsic into an array
  /// of IITDescriptors.
  void getIntrinsicInfoTableEntries(ID id, SmallVectorImpl<IITDescriptor> &T);

  enum MatchIntrinsicTypesResult {
    MatchIntrinsicTypes_Match = 0,
    MatchIntrinsicTypes_NoMatchRet = 1,
    MatchIntrinsicTypes_NoMatchArg = 2,
  };

  /// Match the specified function type with the type constraints specified by
  /// the .td file. If the given type is an overloaded type it is pushed to the
  /// ArgTys vector.
  ///
  /// Returns false if the given type matches with the constraints, true
  /// otherwise.
  MatchIntrinsicTypesResult
  matchIntrinsicSignature(FunctionType *FTy, ArrayRef<IITDescriptor> &Infos,
                          SmallVectorImpl<Type *> &ArgTys);

  /// Verify if the intrinsic has variable arguments. This method is intended to
  /// be called after all the fixed arguments have been matched first.
  ///
  /// This method returns true on error.
  bool matchIntrinsicVarArg(bool isVarArg, ArrayRef<IITDescriptor> &Infos);

  /// Gets the type arguments of an intrinsic call by matching type contraints
  /// specified by the .td file. The overloaded types are pushed into the
  /// AgTys vector.
  ///
  /// Returns false if the given function is not a valid intrinsic call.
  bool getIntrinsicSignature(Function *F, SmallVectorImpl<Type *> &ArgTys);

  // Checks if the intrinsic name matches with its signature and if not
  // returns the declaration with the same signature and remangled name.
  // An existing GlobalValue with the wanted name but with a wrong prototype
  // or of the wrong kind will be renamed by adding ".renamed" to the name.
  std::optional<Function *> remangleIntrinsicFunction(Function *F);

} // End Intrinsic namespace

} // End llvm namespace

#endif
PKjwFZu�E�ccIR/GlobalValue.hnu�[���//===-- llvm/GlobalValue.h - Class to represent a global value --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is a common base class of all globally definable objects.  As such,
// it is subclassed by GlobalVariable, GlobalAlias and by Function.  This is
// used because you can do certain things with these global objects that you
// can't do to anything else.  For example, use the address of one as a
// constant.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_IR_GLOBALVALUE_H
#define LLVM_IR_GLOBALVALUE_H

#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MD5.h"
#include <cassert>
#include <cstdint>
#include <string>

namespace llvm {

class Comdat;
class ConstantRange;
class Error;
class GlobalObject;
class Module;

namespace Intrinsic {
typedef unsigned ID;
} // end namespace Intrinsic

class GlobalValue : public Constant {
public:
  /// An enumeration for the kinds of linkage for global values.
  enum LinkageTypes {
    ExternalLinkage = 0,///< Externally visible function
    AvailableExternallyLinkage, ///< Available for inspection, not emission.
    LinkOnceAnyLinkage, ///< Keep one copy of function when linking (inline)
    LinkOnceODRLinkage, ///< Same, but only replaced by something equivalent.
    WeakAnyLinkage,     ///< Keep one copy of named function when linking (weak)
    WeakODRLinkage,     ///< Same, but only replaced by something equivalent.
    AppendingLinkage,   ///< Special purpose, only applies to global arrays
    InternalLinkage,    ///< Rename collisions when linking (static functions).
    PrivateLinkage,     ///< Like Internal, but omit from symbol table.
    ExternalWeakLinkage,///< ExternalWeak linkage description.
    CommonLinkage       ///< Tentative definitions.
  };

  /// An enumeration for the kinds of visibility of global values.
  enum VisibilityTypes {
    DefaultVisibility = 0,  ///< The GV is visible
    HiddenVisibility,       ///< The GV is hidden
    ProtectedVisibility     ///< The GV is protected
  };

  /// Storage classes of global values for PE targets.
  enum DLLStorageClassTypes {
    DefaultStorageClass   = 0,
    DLLImportStorageClass = 1, ///< Function to be imported from DLL
    DLLExportStorageClass = 2  ///< Function to be accessible from DLL.
  };

protected:
  GlobalValue(Type *Ty, ValueTy VTy, Use *Ops, unsigned NumOps,
              LinkageTypes Linkage, const Twine &Name, unsigned AddressSpace)
      : Constant(PointerType::get(Ty, AddressSpace), VTy, Ops, NumOps),
        ValueType(Ty), Visibility(DefaultVisibility),
        UnnamedAddrVal(unsigned(UnnamedAddr::None)),
        DllStorageClass(DefaultStorageClass), ThreadLocal(NotThreadLocal),
        HasLLVMReservedName(false), IsDSOLocal(false), HasPartition(false),
        HasSanitizerMetadata(false) {
    setLinkage(Linkage);
    setName(Name);
  }

  Type *ValueType;

  static const unsigned GlobalValueSubClassDataBits = 15;

  // All bitfields use unsigned as the underlying type so that MSVC will pack
  // them.
  unsigned Linkage : 4;       // The linkage of this global
  unsigned Visibility : 2;    // The visibility style of this global
  unsigned UnnamedAddrVal : 2; // This value's address is not significant
  unsigned DllStorageClass : 2; // DLL storage class

  unsigned ThreadLocal : 3; // Is this symbol "Thread Local", if so, what is
                            // the desired model?

  /// True if the function's name starts with "llvm.".  This corresponds to the
  /// value of Function::isIntrinsic(), which may be true even if
  /// Function::intrinsicID() returns Intrinsic::not_intrinsic.
  unsigned HasLLVMReservedName : 1;

  /// If true then there is a definition within the same linkage unit and that
  /// definition cannot be runtime preempted.
  unsigned IsDSOLocal : 1;

  /// True if this symbol has a partition name assigned (see
  /// https://lld.llvm.org/Partitions.html).
  unsigned HasPartition : 1;

  /// True if this symbol has sanitizer metadata available. Should only happen
  /// if sanitizers were enabled when building the translation unit which
  /// contains this GV.
  unsigned HasSanitizerMetadata : 1;

private:
  // Give subclasses access to what otherwise would be wasted padding.
  // (15 + 4 + 2 + 2 + 2 + 3 + 1 + 1 + 1 + 1) == 32.
  unsigned SubClassData : GlobalValueSubClassDataBits;

  friend class Constant;

  void destroyConstantImpl();
  Value *handleOperandChangeImpl(Value *From, Value *To);

  /// Returns true if the definition of this global may be replaced by a
  /// differently optimized variant of the same source level function at link
  /// time.
  bool mayBeDerefined() const {
    switch (getLinkage()) {
    case WeakODRLinkage:
    case LinkOnceODRLinkage:
    case AvailableExternallyLinkage:
      return true;

    case WeakAnyLinkage:
    case LinkOnceAnyLinkage:
    case CommonLinkage:
    case ExternalWeakLinkage:
    case ExternalLinkage:
    case AppendingLinkage:
    case InternalLinkage:
    case PrivateLinkage:
      // Optimizations may assume builtin semantics for functions defined as
      // nobuiltin due to attributes at call-sites. To avoid applying IPO based
      // on nobuiltin semantics, treat such function definitions as maybe
      // derefined.
      return isInterposable() || isNobuiltinFnDef();
    }

    llvm_unreachable("Fully covered switch above!");
  }

  /// Returns true if the global is a function definition with the nobuiltin
  /// attribute.
  bool isNobuiltinFnDef() const;

protected:
  /// The intrinsic ID for this subclass (which must be a Function).
  ///
  /// This member is defined by this class, but not used for anything.
  /// Subclasses can use it to store their intrinsic ID, if they have one.
  ///
  /// This is stored here to save space in Function on 64-bit hosts.
  Intrinsic::ID IntID = (Intrinsic::ID)0U;

  unsigned getGlobalValueSubClassData() const {
    return SubClassData;
  }
  void setGlobalValueSubClassData(unsigned V) {
    assert(V < (1 << GlobalValueSubClassDataBits) && "It will not fit");
    SubClassData = V;
  }

  Module *Parent = nullptr; // The containing module.

  // Used by SymbolTableListTraits.
  void setParent(Module *parent) {
    Parent = parent;
  }

  ~GlobalValue() {
    removeDeadConstantUsers();   // remove any dead constants using this.
  }

public:
  enum ThreadLocalMode {
    NotThreadLocal = 0,
    GeneralDynamicTLSModel,
    LocalDynamicTLSModel,
    InitialExecTLSModel,
    LocalExecTLSModel
  };

  GlobalValue(const GlobalValue &) = delete;

  unsigned getAddressSpace() const {
    return getType()->getAddressSpace();
  }

  enum class UnnamedAddr {
    None,
    Local,
    Global,
  };

  bool hasGlobalUnnamedAddr() const {
    return getUnnamedAddr() == UnnamedAddr::Global;
  }

  /// Returns true if this value's address is not significant in this module.
  /// This attribute is intended to be used only by the code generator and LTO
  /// to allow the linker to decide whether the global needs to be in the symbol
  /// table. It should probably not be used in optimizations, as the value may
  /// have uses outside the module; use hasGlobalUnnamedAddr() instead.
  bool hasAtLeastLocalUnnamedAddr() const {
    return getUnnamedAddr() != UnnamedAddr::None;
  }

  UnnamedAddr getUnnamedAddr() const {
    return UnnamedAddr(UnnamedAddrVal);
  }
  void setUnnamedAddr(UnnamedAddr Val) { UnnamedAddrVal = unsigned(Val); }

  static UnnamedAddr getMinUnnamedAddr(UnnamedAddr A, UnnamedAddr B) {
    if (A == UnnamedAddr::None || B == UnnamedAddr::None)
      return UnnamedAddr::None;
    if (A == UnnamedAddr::Local || B == UnnamedAddr::Local)
      return UnnamedAddr::Local;
    return UnnamedAddr::Global;
  }

  bool hasComdat() const { return getComdat() != nullptr; }
  const Comdat *getComdat() const;
  Comdat *getComdat() {
    return const_cast<Comdat *>(
                           static_cast<const GlobalValue *>(this)->getComdat());
  }

  VisibilityTypes getVisibility() const { return VisibilityTypes(Visibility); }
  bool hasDefaultVisibility() const { return Visibility == DefaultVisibility; }
  bool hasHiddenVisibility() const { return Visibility == HiddenVisibility; }
  bool hasProtectedVisibility() const {
    return Visibility == ProtectedVisibility;
  }
  void setVisibility(VisibilityTypes V) {
    assert((!hasLocalLinkage() || V == DefaultVisibility) &&
           "local linkage requires default visibility");
    Visibility = V;
    if (isImplicitDSOLocal())
      setDSOLocal(true);
  }

  /// If the value is "Thread Local", its value isn't shared by the threads.
  bool isThreadLocal() const { return getThreadLocalMode() != NotThreadLocal; }
  void setThreadLocal(bool Val) {
    setThreadLocalMode(Val ? GeneralDynamicTLSModel : NotThreadLocal);
  }
  void setThreadLocalMode(ThreadLocalMode Val) {
    assert(Val == NotThreadLocal || getValueID() != Value::FunctionVal);
    ThreadLocal = Val;
  }
  ThreadLocalMode getThreadLocalMode() const {
    return static_cast<ThreadLocalMode>(ThreadLocal);
  }

  DLLStorageClassTypes getDLLStorageClass() const {
    return DLLStorageClassTypes(DllStorageClass);
  }
  bool hasDLLImportStorageClass() const {
    return DllStorageClass == DLLImportStorageClass;
  }
  bool hasDLLExportStorageClass() const {
    return DllStorageClass == DLLExportStorageClass;
  }
  void setDLLStorageClass(DLLStorageClassTypes C) {
    assert((!hasLocalLinkage() || C == DefaultStorageClass) &&
           "local linkage requires DefaultStorageClass");
    DllStorageClass = C;
  }

  bool hasSection() const { return !getSection().empty(); }
  StringRef getSection() const;

  /// Global values are always pointers.
  PointerType *getType() const { return cast<PointerType>(User::getType()); }

  Type *getValueType() const { return ValueType; }

  bool isImplicitDSOLocal() const {
    return hasLocalLinkage() ||
           (!hasDefaultVisibility() && !hasExternalWeakLinkage());
  }

  void setDSOLocal(bool Local) { IsDSOLocal = Local; }

  bool isDSOLocal() const {
    return IsDSOLocal;
  }

  bool hasPartition() const {
    return HasPartition;
  }
  StringRef getPartition() const;
  void setPartition(StringRef Part);

  // ASan, HWASan and Memtag sanitizers have some instrumentation that applies
  // specifically to global variables.
  struct SanitizerMetadata {
    SanitizerMetadata()
        : NoAddress(false), NoHWAddress(false),
          Memtag(false), IsDynInit(false) {}
    // For ASan and HWASan, this instrumentation is implicitly applied to all
    // global variables when built with -fsanitize=*. What we need is a way to
    // persist the information that a certain global variable should *not* have
    // sanitizers applied, which occurs if:
    //   1. The global variable is in the sanitizer ignore list, or
    //   2. The global variable is created by the sanitizers itself for internal
    //      usage, or
    //   3. The global variable has __attribute__((no_sanitize("..."))) or
    //      __attribute__((disable_sanitizer_instrumentation)).
    //
    // This is important, a some IR passes like GlobalMerge can delete global
    // variables and replace them with new ones. If the old variables were
    // marked to be unsanitized, then the new ones should also be.
    unsigned NoAddress : 1;
    unsigned NoHWAddress : 1;

    // Memtag sanitization works differently: sanitization is requested by clang
    // when `-fsanitize=memtag-globals` is provided, and the request can be
    // denied (and the attribute removed) by the AArch64 global tagging pass if
    // it can't be fulfilled (e.g. the global variable is a TLS variable).
    // Memtag sanitization has to interact with other parts of LLVM (like
    // supressing certain optimisations, emitting assembly directives, or
    // creating special relocation sections).
    //
    // Use `GlobalValue::isTagged()` to check whether tagging should be enabled
    // for a global variable.
    unsigned Memtag : 1;

    // ASan-specific metadata. Is this global variable dynamically initialized
    // (from a C++ language perspective), and should therefore be checked for
    // ODR violations.
    unsigned IsDynInit : 1;
  };

  bool hasSanitizerMetadata() const { return HasSanitizerMetadata; }
  const SanitizerMetadata &getSanitizerMetadata() const;
  // Note: Not byref as it's a POD and otherwise it's too easy to call
  // G.setSanitizerMetadata(G2.getSanitizerMetadata()), and the argument becomes
  // dangling when the backing storage allocates the metadata for `G`, as the
  // storage is shared between `G1` and `G2`.
  void setSanitizerMetadata(SanitizerMetadata Meta);
  void removeSanitizerMetadata();

  bool isTagged() const {
    return hasSanitizerMetadata() && getSanitizerMetadata().Memtag;
  }

  static LinkageTypes getLinkOnceLinkage(bool ODR) {
    return ODR ? LinkOnceODRLinkage : LinkOnceAnyLinkage;
  }
  static LinkageTypes getWeakLinkage(bool ODR) {
    return ODR ? WeakODRLinkage : WeakAnyLinkage;
  }

  static bool isExternalLinkage(LinkageTypes Linkage) {
    return Linkage == ExternalLinkage;
  }
  static bool isAvailableExternallyLinkage(LinkageTypes Linkage) {
    return Linkage == AvailableExternallyLinkage;
  }
  static bool isLinkOnceAnyLinkage(LinkageTypes Linkage) {
    return Linkage == LinkOnceAnyLinkage;
  }
  static bool isLinkOnceODRLinkage(LinkageTypes Linkage) {
    return Linkage == LinkOnceODRLinkage;
  }
  static bool isLinkOnceLinkage(LinkageTypes Linkage) {
    return isLinkOnceAnyLinkage(Linkage) || isLinkOnceODRLinkage(Linkage);
  }
  static bool isWeakAnyLinkage(LinkageTypes Linkage) {
    return Linkage == WeakAnyLinkage;
  }
  static bool isWeakODRLinkage(LinkageTypes Linkage) {
    return Linkage == WeakODRLinkage;
  }
  static bool isWeakLinkage(LinkageTypes Linkage) {
    return isWeakAnyLinkage(Linkage) || isWeakODRLinkage(Linkage);
  }
  static bool isAppendingLinkage(LinkageTypes Linkage) {
    return Linkage == AppendingLinkage;
  }
  static bool isInternalLinkage(LinkageTypes Linkage) {
    return Linkage == InternalLinkage;
  }
  static bool isPrivateLinkage(LinkageTypes Linkage) {
    return Linkage == PrivateLinkage;
  }
  static bool isLocalLinkage(LinkageTypes Linkage) {
    return isInternalLinkage(Linkage) || isPrivateLinkage(Linkage);
  }
  static bool isExternalWeakLinkage(LinkageTypes Linkage) {
    return Linkage == ExternalWeakLinkage;
  }
  static bool isCommonLinkage(LinkageTypes Linkage) {
    return Linkage == CommonLinkage;
  }
  static bool isValidDeclarationLinkage(LinkageTypes Linkage) {
    return isExternalWeakLinkage(Linkage) || isExternalLinkage(Linkage);
  }

  /// Whether the definition of this global may be replaced by something
  /// non-equivalent at link time. For example, if a function has weak linkage
  /// then the code defining it may be replaced by different code.
  static bool isInterposableLinkage(LinkageTypes Linkage) {
    switch (Linkage) {
    case WeakAnyLinkage:
    case LinkOnceAnyLinkage:
    case CommonLinkage:
    case ExternalWeakLinkage:
      return true;

    case AvailableExternallyLinkage:
    case LinkOnceODRLinkage:
    case WeakODRLinkage:
    // The above three cannot be overridden but can be de-refined.

    case ExternalLinkage:
    case AppendingLinkage:
    case InternalLinkage:
    case PrivateLinkage:
      return false;
    }
    llvm_unreachable("Fully covered switch above!");
  }

  /// Whether the definition of this global may be discarded if it is not used
  /// in its compilation unit.
  static bool isDiscardableIfUnused(LinkageTypes Linkage) {
    return isLinkOnceLinkage(Linkage) || isLocalLinkage(Linkage) ||
           isAvailableExternallyLinkage(Linkage);
  }

  /// Whether the definition of this global may be replaced at link time.  NB:
  /// Using this method outside of the code generators is almost always a
  /// mistake: when working at the IR level use isInterposable instead as it
  /// knows about ODR semantics.
  static bool isWeakForLinker(LinkageTypes Linkage)  {
    return Linkage == WeakAnyLinkage || Linkage == WeakODRLinkage ||
           Linkage == LinkOnceAnyLinkage || Linkage == LinkOnceODRLinkage ||
           Linkage == CommonLinkage || Linkage == ExternalWeakLinkage;
  }

  /// Return true if the currently visible definition of this global (if any) is
  /// exactly the definition we will see at runtime.
  ///
  /// Non-exact linkage types inhibits most non-inlining IPO, since a
  /// differently optimized variant of the same function can have different
  /// observable or undefined behavior than in the variant currently visible.
  /// For instance, we could have started with
  ///
  ///   void foo(int *v) {
  ///     int t = 5 / v[0];
  ///     (void) t;
  ///   }
  ///
  /// and "refined" it to
  ///
  ///   void foo(int *v) { }
  ///
  /// However, we cannot infer readnone for `foo`, since that would justify
  /// DSE'ing a store to `v[0]` across a call to `foo`, which can cause
  /// undefined behavior if the linker replaces the actual call destination with
  /// the unoptimized `foo`.
  ///
  /// Inlining is okay across non-exact linkage types as long as they're not
  /// interposable (see \c isInterposable), since in such cases the currently
  /// visible variant is *a* correct implementation of the original source
  /// function; it just isn't the *only* correct implementation.
  bool isDefinitionExact() const {
    return !mayBeDerefined();
  }

  /// Return true if this global has an exact defintion.
  bool hasExactDefinition() const {
    // While this computes exactly the same thing as
    // isStrongDefinitionForLinker, the intended uses are different.  This
    // function is intended to help decide if specific inter-procedural
    // transforms are correct, while isStrongDefinitionForLinker's intended use
    // is in low level code generation.
    return !isDeclaration() && isDefinitionExact();
  }

  /// Return true if this global's definition can be substituted with an
  /// *arbitrary* definition at link time or load time. We cannot do any IPO or
  /// inlining across interposable call edges, since the callee can be
  /// replaced with something arbitrary.
  bool isInterposable() const;
  bool canBenefitFromLocalAlias() const;

  bool hasExternalLinkage() const { return isExternalLinkage(getLinkage()); }
  bool hasAvailableExternallyLinkage() const {
    return isAvailableExternallyLinkage(getLinkage());
  }
  bool hasLinkOnceLinkage() const { return isLinkOnceLinkage(getLinkage()); }
  bool hasLinkOnceAnyLinkage() const {
    return isLinkOnceAnyLinkage(getLinkage());
  }
  bool hasLinkOnceODRLinkage() const {
    return isLinkOnceODRLinkage(getLinkage());
  }
  bool hasWeakLinkage() const { return isWeakLinkage(getLinkage()); }
  bool hasWeakAnyLinkage() const { return isWeakAnyLinkage(getLinkage()); }
  bool hasWeakODRLinkage() const { return isWeakODRLinkage(getLinkage()); }
  bool hasAppendingLinkage() const { return isAppendingLinkage(getLinkage()); }
  bool hasInternalLinkage() const { return isInternalLinkage(getLinkage()); }
  bool hasPrivateLinkage() const { return isPrivateLinkage(getLinkage()); }
  bool hasLocalLinkage() const { return isLocalLinkage(getLinkage()); }
  bool hasExternalWeakLinkage() const {
    return isExternalWeakLinkage(getLinkage());
  }
  bool hasCommonLinkage() const { return isCommonLinkage(getLinkage()); }
  bool hasValidDeclarationLinkage() const {
    return isValidDeclarationLinkage(getLinkage());
  }

  void setLinkage(LinkageTypes LT) {
    if (isLocalLinkage(LT)) {
      Visibility = DefaultVisibility;
      DllStorageClass = DefaultStorageClass;
    }
    Linkage = LT;
    if (isImplicitDSOLocal())
      setDSOLocal(true);
  }
  LinkageTypes getLinkage() const { return LinkageTypes(Linkage); }

  bool isDiscardableIfUnused() const {
    return isDiscardableIfUnused(getLinkage());
  }

  bool isWeakForLinker() const { return isWeakForLinker(getLinkage()); }

protected:
  /// Copy all additional attributes (those not needed to create a GlobalValue)
  /// from the GlobalValue Src to this one.
  void copyAttributesFrom(const GlobalValue *Src);

public:
  /// If the given string begins with the GlobalValue name mangling escape
  /// character '\1', drop it.
  ///
  /// This function applies a specific mangling that is used in PGO profiles,
  /// among other things. If you're trying to get a symbol name for an
  /// arbitrary GlobalValue, this is not the function you're looking for; see
  /// Mangler.h.
  static StringRef dropLLVMManglingEscape(StringRef Name) {
    if (!Name.empty() && Name[0] == '\1')
      return Name.substr(1);
    return Name;
  }

  /// Return the modified name for a global value suitable to be
  /// used as the key for a global lookup (e.g. profile or ThinLTO).
  /// The value's original name is \c Name and has linkage of type
  /// \c Linkage. The value is defined in module \c FileName.
  static std::string getGlobalIdentifier(StringRef Name,
                                         GlobalValue::LinkageTypes Linkage,
                                         StringRef FileName);

  /// Return the modified name for this global value suitable to be
  /// used as the key for a global lookup (e.g. profile or ThinLTO).
  std::string getGlobalIdentifier() const;

  /// Declare a type to represent a global unique identifier for a global value.
  /// This is a 64 bits hash that is used by PGO and ThinLTO to have a compact
  /// unique way to identify a symbol.
  using GUID = uint64_t;

  /// Return a 64-bit global unique ID constructed from global value name
  /// (i.e. returned by getGlobalIdentifier()).
  static GUID getGUID(StringRef GlobalName) { return MD5Hash(GlobalName); }

  /// Return a 64-bit global unique ID constructed from global value name
  /// (i.e. returned by getGlobalIdentifier()).
  GUID getGUID() const { return getGUID(getGlobalIdentifier()); }

  /// @name Materialization
  /// Materialization is used to construct functions only as they're needed.
  /// This
  /// is useful to reduce memory usage in LLVM or parsing work done by the
  /// BitcodeReader to load the Module.
  /// @{

  /// If this function's Module is being lazily streamed in functions from disk
  /// or some other source, this method can be used to check to see if the
  /// function has been read in yet or not.
  bool isMaterializable() const;

  /// Make sure this GlobalValue is fully read.
  Error materialize();

/// @}

  /// Return true if the primary definition of this global value is outside of
  /// the current translation unit.
  bool isDeclaration() const;

  bool isDeclarationForLinker() const {
    if (hasAvailableExternallyLinkage())
      return true;

    return isDeclaration();
  }

  /// Returns true if this global's definition will be the one chosen by the
  /// linker.
  ///
  /// NB! Ideally this should not be used at the IR level at all.  If you're
  /// interested in optimization constraints implied by the linker's ability to
  /// choose an implementation, prefer using \c hasExactDefinition.
  bool isStrongDefinitionForLinker() const {
    return !(isDeclarationForLinker() || isWeakForLinker());
  }

  const GlobalObject *getAliaseeObject() const;
  GlobalObject *getAliaseeObject() {
    return const_cast<GlobalObject *>(
        static_cast<const GlobalValue *>(this)->getAliaseeObject());
  }

  /// Returns whether this is a reference to an absolute symbol.
  bool isAbsoluteSymbolRef() const;

  /// If this is an absolute symbol reference, returns the range of the symbol,
  /// otherwise returns std::nullopt.
  std::optional<ConstantRange> getAbsoluteSymbolRange() const;

  /// This method unlinks 'this' from the containing module, but does not delete
  /// it.
  void removeFromParent();

  /// This method unlinks 'this' from the containing module and deletes it.
  void eraseFromParent();

  /// Get the module that this global value is contained inside of...
  Module *getParent() { return Parent; }
  const Module *getParent() const { return Parent; }

  // Methods for support type inquiry through isa, cast, and dyn_cast:
  static bool classof(const Value *V) {
    return V->getValueID() == Value::FunctionVal ||
           V->getValueID() == Value::GlobalVariableVal ||
           V->getValueID() == Value::GlobalAliasVal ||
           V->getValueID() == Value::GlobalIFuncVal;
  }

  /// True if GV can be left out of the object symbol table. This is the case
  /// for linkonce_odr values whose address is not significant. While legal, it
  /// is not normally profitable to omit them from the .o symbol table. Using
  /// this analysis makes sense when the information can be passed down to the
  /// linker or we are in LTO.
  bool canBeOmittedFromSymbolTable() const;
};

} // end namespace llvm

#endif // LLVM_IR_GLOBALVALUE_H
PKjwFZ���LineEditor/LineEditor.hnu�[���//===-- llvm/LineEditor/LineEditor.h - line editor --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_LINEEDITOR_LINEEDITOR_H
#define LLVM_LINEEDITOR_LINEEDITOR_H

#include "llvm/ADT/StringRef.h"
#include <cstdio>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>

namespace llvm {

class LineEditor {
public:
  /// Create a LineEditor object.
  ///
  /// \param ProgName The name of the current program. Used to form a default
  /// prompt.
  /// \param HistoryPath Path to the file in which to store history data, if
  /// possible.
  /// \param In The input stream used by the editor.
  /// \param Out The output stream used by the editor.
  /// \param Err The error stream used by the editor.
  LineEditor(StringRef ProgName, StringRef HistoryPath = "", FILE *In = stdin,
             FILE *Out = stdout, FILE *Err = stderr);
  ~LineEditor();

  /// Reads a line.
  ///
  /// \return The line, or std::optional<std::string>() on EOF.
  std::optional<std::string> readLine() const;

  void saveHistory();
  void loadHistory();

  static std::string getDefaultHistoryPath(StringRef ProgName);

  /// The action to perform upon a completion request.
  struct CompletionAction {
    enum ActionKind {
      /// Insert Text at the cursor position.
      AK_Insert,
      /// Show Completions, or beep if the list is empty.
      AK_ShowCompletions
    };

    ActionKind Kind;

    /// The text to insert.
    std::string Text;

    /// The list of completions to show.
    std::vector<std::string> Completions;
  };

  /// A possible completion at a given cursor position.
  struct Completion {
    Completion() = default;
    Completion(const std::string &TypedText, const std::string &DisplayText)
        : TypedText(TypedText), DisplayText(DisplayText) {}

    /// The text to insert. If the user has already input some of the
    /// completion, this should only include the rest of the text.
    std::string TypedText;

    /// A description of this completion. This may be the completion itself, or
    /// maybe a summary of its type or arguments.
    std::string DisplayText;
  };

  /// Set the completer for this LineEditor. A completer is a function object
  /// which takes arguments of type StringRef (the string to complete) and
  /// size_t (the zero-based cursor position in the StringRef) and returns a
  /// CompletionAction.
  template <typename T> void setCompleter(T Comp) {
    Completer.reset(new CompleterModel<T>(Comp));
  }

  /// Set the completer for this LineEditor to the given list completer.
  /// A list completer is a function object which takes arguments of type
  /// StringRef (the string to complete) and size_t (the zero-based cursor
  /// position in the StringRef) and returns a std::vector<Completion>.
  template <typename T> void setListCompleter(T Comp) {
    Completer.reset(new ListCompleterModel<T>(Comp));
  }

  /// Use the current completer to produce a CompletionAction for the given
  /// completion request. If the current completer is a list completer, this
  /// will return an AK_Insert CompletionAction if each completion has a common
  /// prefix, or an AK_ShowCompletions CompletionAction otherwise.
  ///
  /// \param Buffer The string to complete
  /// \param Pos The zero-based cursor position in the StringRef
  CompletionAction getCompletionAction(StringRef Buffer, size_t Pos) const;

  const std::string &getPrompt() const { return Prompt; }
  void setPrompt(const std::string &P) { Prompt = P; }

  // Public so callbacks in LineEditor.cpp can use it.
  struct InternalData;

private:
  std::string Prompt;
  std::string HistoryPath;
  std::unique_ptr<InternalData> Data;

  struct CompleterConcept {
    virtual ~CompleterConcept();
    virtual CompletionAction complete(StringRef Buffer, size_t Pos) const = 0;
  };

  struct ListCompleterConcept : CompleterConcept {
    ~ListCompleterConcept() override;
    CompletionAction complete(StringRef Buffer, size_t Pos) const override;
    static std::string getCommonPrefix(const std::vector<Completion> &Comps);
    virtual std::vector<Completion> getCompletions(StringRef Buffer,
                                                   size_t Pos) const = 0;
  };

  template <typename T>
  struct CompleterModel : CompleterConcept {
    CompleterModel(T Value) : Value(Value) {}
    CompletionAction complete(StringRef Buffer, size_t Pos) const override {
      return Value(Buffer, Pos);
    }
    T Value;
  };

  template <typename T>
  struct ListCompleterModel : ListCompleterConcept {
    ListCompleterModel(T Value) : Value(std::move(Value)) {}
    std::vector<Completion> getCompletions(StringRef Buffer,
                                           size_t Pos) const override {
      return Value(Buffer, Pos);
    }
    T Value;
  };

  std::unique_ptr<const CompleterConcept> Completer;
};

}

#endif
PKjwFZ�G����MC/MCWinEH.hnu�[���//===- MCWinEH.h - Windows Unwinding Support --------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCWINEH_H
#define LLVM_MC_MCWINEH_H

#include "llvm/ADT/MapVector.h"
#include <vector>

namespace llvm {
class MCSection;
class MCStreamer;
class MCSymbol;

namespace WinEH {
struct Instruction {
  const MCSymbol *Label;
  unsigned Offset;
  unsigned Register;
  unsigned Operation;

  Instruction(unsigned Op, MCSymbol *L, unsigned Reg, unsigned Off)
    : Label(L), Offset(Off), Register(Reg), Operation(Op) {}

  bool operator==(const Instruction &I) const {
    // Check whether two instructions refer to the same operation
    // applied at a different spot (i.e. pointing at a different label).
    return Offset == I.Offset && Register == I.Register &&
           Operation == I.Operation;
  }
  bool operator!=(const Instruction &I) const { return !(*this == I); }
};

struct FrameInfo {
  const MCSymbol *Begin = nullptr;
  const MCSymbol *End = nullptr;
  const MCSymbol *FuncletOrFuncEnd = nullptr;
  const MCSymbol *ExceptionHandler = nullptr;
  const MCSymbol *Function = nullptr;
  const MCSymbol *PrologEnd = nullptr;
  const MCSymbol *Symbol = nullptr;
  MCSection *TextSection = nullptr;
  uint32_t PackedInfo = 0;
  uint32_t PrologCodeBytes = 0;

  bool HandlesUnwind = false;
  bool HandlesExceptions = false;
  bool EmitAttempted = false;
  bool Fragment = false;

  int LastFrameInst = -1;
  const FrameInfo *ChainedParent = nullptr;
  std::vector<Instruction> Instructions;
  struct Epilog {
    std::vector<Instruction> Instructions;
    unsigned Condition;
    MCSymbol *End;
  };
  MapVector<MCSymbol *, Epilog> EpilogMap;

  // For splitting unwind info of large functions
  struct Segment {
    int64_t Offset;
    int64_t Length;
    bool HasProlog;
    MCSymbol *Symbol = nullptr;
    // Map an Epilog's symbol to its offset within the function.
    MapVector<MCSymbol *, int64_t> Epilogs;

    Segment(int64_t Offset, int64_t Length, bool HasProlog = false)
        : Offset(Offset), Length(Length), HasProlog(HasProlog) {}
  };

  std::vector<Segment> Segments;

  FrameInfo() = default;
  FrameInfo(const MCSymbol *Function, const MCSymbol *BeginFuncEHLabel)
      : Begin(BeginFuncEHLabel), Function(Function) {}
  FrameInfo(const MCSymbol *Function, const MCSymbol *BeginFuncEHLabel,
            const FrameInfo *ChainedParent)
      : Begin(BeginFuncEHLabel), Function(Function),
        ChainedParent(ChainedParent) {}

  bool empty() const {
    if (!Instructions.empty())
      return false;
    for (const auto &E : EpilogMap)
      if (!E.second.Instructions.empty())
        return false;
    return true;
  }
};

class UnwindEmitter {
public:
  virtual ~UnwindEmitter();

  /// This emits the unwind info sections (.pdata and .xdata in PE/COFF).
  virtual void Emit(MCStreamer &Streamer) const = 0;
  virtual void EmitUnwindInfo(MCStreamer &Streamer, FrameInfo *FI,
                              bool HandlerData) const = 0;
};
}
}

#endif
PKjwFZ��
((MC/MachineLocation.hnu�[���//===- llvm/MC/MachineLocation.h --------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// The MachineLocation class is used to represent a simple location in a machine
// frame.  Locations will be one of two forms; a register or an address formed
// from a base address plus an offset.  Register indirection can be specified by
// explicitly passing an offset to the constructor.
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MACHINELOCATION_H
#define LLVM_MC_MACHINELOCATION_H

#include <cstdint>
#include <cassert>

namespace llvm {

class MachineLocation {
private:
  bool IsRegister = false;              ///< True if location is a register.
  unsigned Register = 0;                ///< gcc/gdb register number.

public:
  enum : uint32_t {
    // The target register number for an abstract frame pointer. The value is
    // an arbitrary value that doesn't collide with any real target register.
    VirtualFP = ~0U
  };

  MachineLocation() = default;
  /// Create a direct register location.
  explicit MachineLocation(unsigned R, bool Indirect = false)
      : IsRegister(!Indirect), Register(R) {}

  bool operator==(const MachineLocation &Other) const {
    return IsRegister == Other.IsRegister && Register == Other.Register;
  }

  // Accessors.
  /// \return true iff this is a register-indirect location.
  bool isIndirect()      const { return !IsRegister; }
  bool isReg()           const { return IsRegister; }
  unsigned getReg()      const { return Register; }
  void setIsRegister(bool Is)  { IsRegister = Is; }
  void setRegister(unsigned R) { Register = R; }
};

inline bool operator!=(const MachineLocation &LHS, const MachineLocation &RHS) {
  return !(LHS == RHS);
}

} // end namespace llvm

#endif // LLVM_MC_MACHINELOCATION_H
PKjwFZ.9��MC/MCSPIRVObjectWriter.hnu�[���//===-- llvm/MC/MCSPIRVObjectWriter.h - SPIR-V Object Writer -----*- C++ *-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCSPIRVOBJECTWRITER_H
#define LLVM_MC_MCSPIRVOBJECTWRITER_H

#include "llvm/MC/MCObjectWriter.h"
#include "llvm/Support/raw_ostream.h"
#include <memory>

namespace llvm {

class MCSPIRVObjectTargetWriter : public MCObjectTargetWriter {
protected:
  explicit MCSPIRVObjectTargetWriter() {}

public:
  Triple::ObjectFormatType getFormat() const override { return Triple::SPIRV; }
  static bool classof(const MCObjectTargetWriter *W) {
    return W->getFormat() == Triple::SPIRV;
  }
};

/// Construct a new SPIR-V writer instance.
///
/// \param MOTW - The target specific SPIR-V writer subclass.
/// \param OS - The stream to write to.
/// \returns The constructed object writer.
std::unique_ptr<MCObjectWriter>
createSPIRVObjectWriter(std::unique_ptr<MCSPIRVObjectTargetWriter> MOTW,
                        raw_pwrite_stream &OS);

} // namespace llvm

#endif
PKjwFZ8�^��MC/MCSPIRVStreamer.hnu�[���//===- MCSPIRVStreamer.h - MCStreamer SPIR-V Object File Interface -*- C++ ===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Overrides MCObjectStreamer to disable all unnecessary features with stubs.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCSPIRVSTREAMER_H
#define LLVM_MC_MCSPIRVSTREAMER_H

#include "llvm/MC/MCAsmBackend.h"
#include "llvm/MC/MCCodeEmitter.h"
#include "llvm/MC/MCObjectStreamer.h"
#include "llvm/MC/MCObjectWriter.h"

namespace llvm {
class MCInst;
class raw_ostream;

class MCSPIRVStreamer : public MCObjectStreamer {
public:
  MCSPIRVStreamer(MCContext &Context, std::unique_ptr<MCAsmBackend> TAB,
                  std::unique_ptr<MCObjectWriter> OW,
                  std::unique_ptr<MCCodeEmitter> Emitter)
      : MCObjectStreamer(Context, std::move(TAB), std::move(OW),
                         std::move(Emitter)) {}

  bool emitSymbolAttribute(MCSymbol *Symbol, MCSymbolAttr Attribute) override {
    return false;
  }
  void emitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
                        Align ByteAlignment) override {}
  void emitZerofill(MCSection *Section, MCSymbol *Symbol = nullptr,
                    uint64_t Size = 0, Align ByteAlignment = Align(1),
                    SMLoc Loc = SMLoc()) override {}

private:
  void emitInstToData(const MCInst &Inst, const MCSubtargetInfo &) override;
};

} // end namespace llvm

#endif
PKjwFZ���MC/MCELFObjectWriter.hnu�[���//===- llvm/MC/MCELFObjectWriter.h - ELF Object Writer ----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCELFOBJECTWRITER_H
#define LLVM_MC_MCELFOBJECTWRITER_H

#include "llvm/BinaryFormat/ELF.h"
#include "llvm/MC/MCObjectWriter.h"
#include "llvm/MC/MCSectionELF.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/TargetParser/Triple.h"
#include <cstdint>
#include <vector>

namespace llvm {

class MCAssembler;
class MCContext;
class MCFixup;
class MCSymbol;
class MCSymbolELF;
class MCValue;

struct ELFRelocationEntry {
  uint64_t Offset; // Where is the relocation.
  const MCSymbolELF *Symbol; // The symbol to relocate with.
  unsigned Type;   // The type of the relocation.
  uint64_t Addend; // The addend to use.
  const MCSymbolELF *OriginalSymbol; // The original value of Symbol if we changed it.
  uint64_t OriginalAddend; // The original value of addend.

  ELFRelocationEntry(uint64_t Offset, const MCSymbolELF *Symbol, unsigned Type,
                     uint64_t Addend, const MCSymbolELF *OriginalSymbol,
                     uint64_t OriginalAddend)
      : Offset(Offset), Symbol(Symbol), Type(Type), Addend(Addend),
        OriginalSymbol(OriginalSymbol), OriginalAddend(OriginalAddend) {}

  void print(raw_ostream &Out) const {
    Out << "Off=" << Offset << ", Sym=" << Symbol << ", Type=" << Type
        << ", Addend=" << Addend << ", OriginalSymbol=" << OriginalSymbol
        << ", OriginalAddend=" << OriginalAddend;
  }

  LLVM_DUMP_METHOD void dump() const { print(errs()); }
};

class MCELFObjectTargetWriter : public MCObjectTargetWriter {
  const uint8_t OSABI;
  const uint8_t ABIVersion;
  const uint16_t EMachine;
  const unsigned HasRelocationAddend : 1;
  const unsigned Is64Bit : 1;

protected:
  MCELFObjectTargetWriter(bool Is64Bit_, uint8_t OSABI_, uint16_t EMachine_,
                          bool HasRelocationAddend_, uint8_t ABIVersion_ = 0);

public:
  virtual ~MCELFObjectTargetWriter() = default;

  Triple::ObjectFormatType getFormat() const override { return Triple::ELF; }
  static bool classof(const MCObjectTargetWriter *W) {
    return W->getFormat() == Triple::ELF;
  }

  static uint8_t getOSABI(Triple::OSType OSType) {
    switch (OSType) {
      case Triple::CloudABI:
        return ELF::ELFOSABI_CLOUDABI;
      case Triple::HermitCore:
        return ELF::ELFOSABI_STANDALONE;
      case Triple::PS4:
      case Triple::FreeBSD:
        return ELF::ELFOSABI_FREEBSD;
      case Triple::Solaris:
        return ELF::ELFOSABI_SOLARIS;
      default:
        return ELF::ELFOSABI_NONE;
    }
  }

  virtual unsigned getRelocType(MCContext &Ctx, const MCValue &Target,
                                const MCFixup &Fixup, bool IsPCRel) const = 0;

  virtual bool needsRelocateWithSymbol(const MCSymbol &Sym,
                                       unsigned Type) const;

  virtual void sortRelocs(const MCAssembler &Asm,
                          std::vector<ELFRelocationEntry> &Relocs);

  virtual void addTargetSectionFlags(MCContext &Ctx, MCSectionELF &Sec);

  /// \name Accessors
  /// @{
  uint8_t getOSABI() const { return OSABI; }
  uint8_t getABIVersion() const { return ABIVersion; }
  uint16_t getEMachine() const { return EMachine; }
  bool hasRelocationAddend() const { return HasRelocationAddend; }
  bool is64Bit() const { return Is64Bit; }
  /// @}

  // Instead of changing everyone's API we pack the N64 Type fields
  // into the existing 32 bit data unsigned.
#define R_TYPE_SHIFT 0
#define R_TYPE_MASK 0xffffff00
#define R_TYPE2_SHIFT 8
#define R_TYPE2_MASK 0xffff00ff
#define R_TYPE3_SHIFT 16
#define R_TYPE3_MASK 0xff00ffff
#define R_SSYM_SHIFT 24
#define R_SSYM_MASK 0x00ffffff

  // N64 relocation type accessors
  uint8_t getRType(uint32_t Type) const {
    return (unsigned)((Type >> R_TYPE_SHIFT) & 0xff);
  }
  uint8_t getRType2(uint32_t Type) const {
    return (unsigned)((Type >> R_TYPE2_SHIFT) & 0xff);
  }
  uint8_t getRType3(uint32_t Type) const {
    return (unsigned)((Type >> R_TYPE3_SHIFT) & 0xff);
  }
  uint8_t getRSsym(uint32_t Type) const {
    return (unsigned)((Type >> R_SSYM_SHIFT) & 0xff);
  }

  // N64 relocation type setting
  static unsigned setRTypes(unsigned Value1, unsigned Value2, unsigned Value3) {
    return ((Value1 & 0xff) << R_TYPE_SHIFT) |
           ((Value2 & 0xff) << R_TYPE2_SHIFT) |
           ((Value3 & 0xff) << R_TYPE3_SHIFT);
  }
  unsigned setRSsym(unsigned Value, unsigned Type) const {
    return (Type & R_SSYM_MASK) | ((Value & 0xff) << R_SSYM_SHIFT);
  }

  // On AArch64, return a new section to be added to the ELF object that
  // contains relocations used to describe every symbol that should have memory
  // tags applied. Returns nullptr if no such section is necessary (i.e. there's
  // no tagged globals).
  virtual MCSectionELF *getMemtagRelocsSection(MCContext &Ctx) const {
    return nullptr;
  }
};

/// Construct a new ELF writer instance.
///
/// \param MOTW - The target specific ELF writer subclass.
/// \param OS - The stream to write to.
/// \returns The constructed object writer.
std::unique_ptr<MCObjectWriter>
createELFObjectWriter(std::unique_ptr<MCELFObjectTargetWriter> MOTW,
                      raw_pwrite_stream &OS, bool IsLittleEndian);

std::unique_ptr<MCObjectWriter>
createELFDwoObjectWriter(std::unique_ptr<MCELFObjectTargetWriter> MOTW,
                         raw_pwrite_stream &OS, raw_pwrite_stream &DwoOS,
                         bool IsLittleEndian);

} // end namespace llvm

#endif // LLVM_MC_MCELFOBJECTWRITER_H
PKjwFZ�2�u��MC/MCCodeEmitter.hnu�[���//===- llvm/MC/MCCodeEmitter.h - Instruction Encoding -----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCCODEEMITTER_H
#define LLVM_MC_MCCODEEMITTER_H

namespace llvm {

class MCFixup;
class MCInst;
class MCSubtargetInfo;
class raw_ostream;
template<typename T> class SmallVectorImpl;

/// MCCodeEmitter - Generic instruction encoding interface.
class MCCodeEmitter {
protected: // Can only create subclasses.
  MCCodeEmitter();

  /// EncodeInstruction - Encode the given \p Inst to bytes on the output stream
  /// \p OS. Allows for an implementation of encodeInstruction that uses streams
  /// instead of a SmallVector.
  virtual void encodeInstruction(const MCInst &Inst, raw_ostream &OS,
                                 SmallVectorImpl<MCFixup> &Fixups,
                                 const MCSubtargetInfo &STI) const {}

public:
  MCCodeEmitter(const MCCodeEmitter &) = delete;
  MCCodeEmitter &operator=(const MCCodeEmitter &) = delete;
  virtual ~MCCodeEmitter();

  /// Lifetime management
  virtual void reset() {}

  /// Append the prefixes of given instruction to the code buffer.
  ///
  /// \param Inst a single low-level machine instruction.
  /// \param CB code buffer
  virtual void emitPrefix(const MCInst &Inst, SmallVectorImpl<char> &CB,
                          const MCSubtargetInfo &STI) const {}
  /// EncodeInstruction - Encode the given \p Inst to bytes and append to \p CB.
  virtual void encodeInstruction(const MCInst &Inst, SmallVectorImpl<char> &CB,
                                 SmallVectorImpl<MCFixup> &Fixups,
                                 const MCSubtargetInfo &STI) const;
};

} // end namespace llvm

#endif // LLVM_MC_MCCODEEMITTER_H
PKjwFZE4)���MC/MCTargetOptions.hnu�[���//===- MCTargetOptions.h - MC Target Options --------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCTARGETOPTIONS_H
#define LLVM_MC_MCTARGETOPTIONS_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/Support/Compression.h"
#include <string>
#include <vector>

namespace llvm {

enum class ExceptionHandling {
  None,     ///< No exception support
  DwarfCFI, ///< DWARF-like instruction based exceptions
  SjLj,     ///< setjmp/longjmp based exceptions
  ARM,      ///< ARM EHABI
  WinEH,    ///< Windows Exception Handling
  Wasm,     ///< WebAssembly Exception Handling
  AIX,      ///< AIX Exception Handling
};

enum class EmitDwarfUnwindType {
  Always,          // Always emit dwarf unwind
  NoCompactUnwind, // Only emit if compact unwind isn't available
  Default,         // Default behavior is based on the target
};

class StringRef;

class MCTargetOptions {
public:
  enum AsmInstrumentation {
    AsmInstrumentationNone,
    AsmInstrumentationAddress
  };

  bool MCRelaxAll : 1;
  bool MCNoExecStack : 1;
  bool MCFatalWarnings : 1;
  bool MCNoWarn : 1;
  bool MCNoDeprecatedWarn : 1;
  bool MCNoTypeCheck : 1;
  bool MCSaveTempLabels : 1;
  bool MCIncrementalLinkerCompatible : 1;
  bool ShowMCEncoding : 1;
  bool ShowMCInst : 1;
  bool AsmVerbose : 1;

  /// Preserve Comments in Assembly.
  bool PreserveAsmComments : 1;

  bool Dwarf64 : 1;

  EmitDwarfUnwindType EmitDwarfUnwind;

  int DwarfVersion = 0;

  enum DwarfDirectory {
    // Force disable
    DisableDwarfDirectory,
    // Force enable, for assemblers that support
    // `.file fileno directory filename' syntax
    EnableDwarfDirectory,
    // Default is based on the target
    DefaultDwarfDirectory
  };
  DwarfDirectory MCUseDwarfDirectory;

  std::string ABIName;
  std::string AssemblyLanguage;
  std::string SplitDwarfFile;
  std::string AsSecureLogFile;

  const char *Argv0 = nullptr;
  ArrayRef<std::string> CommandLineArgs;

  /// Additional paths to search for `.include` directives when using the
  /// integrated assembler.
  std::vector<std::string> IASSearchPaths;

  // Whether to emit compact-unwind for non-canonical personality
  // functions on Darwins.
  bool EmitCompactUnwindNonCanonical : 1;

  MCTargetOptions();

  /// getABIName - If this returns a non-empty string this represents the
  /// textual name of the ABI that we want the backend to use, e.g. o32, or
  /// aapcs-linux.
  StringRef getABIName() const;

  /// getAssemblyLanguage - If this returns a non-empty string this represents
  /// the textual name of the assembly language that we will use for this
  /// target, e.g. masm.
  StringRef getAssemblyLanguage() const;
};

} // end namespace llvm

#endif // LLVM_MC_MCTARGETOPTIONS_H
PKjwFZ��(�E
E
MC/StringTableBuilder.hnu�[���//===- StringTableBuilder.h - String table building utility -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_STRINGTABLEBUILDER_H
#define LLVM_MC_STRINGTABLEBUILDER_H

#include "llvm/ADT/CachedHashString.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Alignment.h"
#include <cstddef>
#include <cstdint>

namespace llvm {

class raw_ostream;

/// Utility for building string tables with deduplicated suffixes.
class StringTableBuilder {
public:
  enum Kind {
    ELF,
    WinCOFF,
    MachO,
    MachO64,
    MachOLinked,
    MachO64Linked,
    RAW,
    DWARF,
    XCOFF
  };

private:
  DenseMap<CachedHashStringRef, size_t> StringIndexMap;
  size_t Size = 0;
  Kind K;
  Align Alignment;
  bool Finalized = false;

  void finalizeStringTable(bool Optimize);
  void initSize();

public:
  StringTableBuilder(Kind K, Align Alignment = Align(1));
  ~StringTableBuilder();

  /// Add a string to the builder. Returns the position of S in the
  /// table. The position will be changed if finalize is used.
  /// Can only be used before the table is finalized.
  size_t add(CachedHashStringRef S);
  size_t add(StringRef S) { return add(CachedHashStringRef(S)); }

  /// Analyze the strings and build the final table. No more strings can
  /// be added after this point.
  void finalize();

  /// Finalize the string table without reording it. In this mode, offsets
  /// returned by add will still be valid.
  void finalizeInOrder();

  /// Get the offest of a string in the string table. Can only be used
  /// after the table is finalized.
  size_t getOffset(CachedHashStringRef S) const;
  size_t getOffset(StringRef S) const {
    return getOffset(CachedHashStringRef(S));
  }

  /// Check if a string is contained in the string table. Since this class
  /// doesn't store the string values, this function can be used to check if
  /// storage needs to be done prior to adding the string.
  bool contains(StringRef S) const {
    return contains(CachedHashStringRef(S));
  }
  bool contains(CachedHashStringRef S) const {
    return StringIndexMap.count(S);
  }

  size_t getSize() const { return Size; }
  void clear();

  void write(raw_ostream &OS) const;
  void write(uint8_t *Buf) const;

  bool isFinalized() const { return Finalized; }
};

} // end namespace llvm

#endif // LLVM_MC_STRINGTABLEBUILDER_H
PKjwFZ��M�MMMC/MCFixup.hnu�[���//===-- llvm/MC/MCFixup.h - Instruction Relocation and Patching -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCFIXUP_H
#define LLVM_MC_MCFIXUP_H

#include "llvm/Support/DataTypes.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/SMLoc.h"
#include <cassert>

namespace llvm {
class MCExpr;

/// Extensible enumeration to represent the type of a fixup.
enum MCFixupKind {
  FK_NONE = 0,    ///< A no-op fixup.
  FK_Data_1,      ///< A one-byte fixup.
  FK_Data_2,      ///< A two-byte fixup.
  FK_Data_4,      ///< A four-byte fixup.
  FK_Data_8,      ///< A eight-byte fixup.
  FK_Data_6b,     ///< A six-bits fixup.
  FK_PCRel_1,     ///< A one-byte pc relative fixup.
  FK_PCRel_2,     ///< A two-byte pc relative fixup.
  FK_PCRel_4,     ///< A four-byte pc relative fixup.
  FK_PCRel_8,     ///< A eight-byte pc relative fixup.
  FK_GPRel_1,     ///< A one-byte gp relative fixup.
  FK_GPRel_2,     ///< A two-byte gp relative fixup.
  FK_GPRel_4,     ///< A four-byte gp relative fixup.
  FK_GPRel_8,     ///< A eight-byte gp relative fixup.
  FK_DTPRel_4,    ///< A four-byte dtp relative fixup.
  FK_DTPRel_8,    ///< A eight-byte dtp relative fixup.
  FK_TPRel_4,     ///< A four-byte tp relative fixup.
  FK_TPRel_8,     ///< A eight-byte tp relative fixup.
  FK_SecRel_1,    ///< A one-byte section relative fixup.
  FK_SecRel_2,    ///< A two-byte section relative fixup.
  FK_SecRel_4,    ///< A four-byte section relative fixup.
  FK_SecRel_8,    ///< A eight-byte section relative fixup.

  FirstTargetFixupKind = 128,

  /// The range [FirstLiteralRelocationKind, MaxTargetFixupKind) is used for
  /// relocations coming from .reloc directive. Fixup kind
  /// FirstLiteralRelocationKind+V represents the relocation type with number V.
  FirstLiteralRelocationKind = 256,

  /// Set limit to accommodate the highest reloc type in use for all Targets,
  /// currently R_AARCH64_IRELATIVE at 1032, including room for expansion.
  MaxFixupKind = FirstLiteralRelocationKind + 1032 + 32,
};

/// Encode information on a single operation to perform on a byte
/// sequence (e.g., an encoded instruction) which requires assemble- or run-
/// time patching.
///
/// Fixups are used any time the target instruction encoder needs to represent
/// some value in an instruction which is not yet concrete. The encoder will
/// encode the instruction assuming the value is 0, and emit a fixup which
/// communicates to the assembler backend how it should rewrite the encoded
/// value.
///
/// During the process of relaxation, the assembler will apply fixups as
/// symbolic values become concrete. When relaxation is complete, any remaining
/// fixups become relocations in the object file (or errors, if the fixup cannot
/// be encoded on the target).
class MCFixup {
  /// The value to put into the fixup location. The exact interpretation of the
  /// expression is target dependent, usually it will be one of the operands to
  /// an instruction or an assembler directive.
  const MCExpr *Value = nullptr;

  /// The byte index of start of the relocation inside the MCFragment.
  uint32_t Offset = 0;

  /// The target dependent kind of fixup item this is. The kind is used to
  /// determine how the operand value should be encoded into the instruction.
  MCFixupKind Kind = FK_NONE;

  /// The source location which gave rise to the fixup, if any.
  SMLoc Loc;
public:
  static MCFixup create(uint32_t Offset, const MCExpr *Value,
                        MCFixupKind Kind, SMLoc Loc = SMLoc()) {
    assert(Kind <= MaxFixupKind && "Kind out of range!");
    MCFixup FI;
    FI.Value = Value;
    FI.Offset = Offset;
    FI.Kind = Kind;
    FI.Loc = Loc;
    return FI;
  }

  MCFixupKind getKind() const { return Kind; }

  unsigned getTargetKind() const { return Kind; }

  uint32_t getOffset() const { return Offset; }
  void setOffset(uint32_t Value) { Offset = Value; }

  const MCExpr *getValue() const { return Value; }

  /// Return the generic fixup kind for a value with the given size. It
  /// is an error to pass an unsupported size.
  static MCFixupKind getKindForSize(unsigned Size, bool IsPCRel) {
    switch (Size) {
    default: llvm_unreachable("Invalid generic fixup size!");
    case 1:
      return IsPCRel ? FK_PCRel_1 : FK_Data_1;
    case 2:
      return IsPCRel ? FK_PCRel_2 : FK_Data_2;
    case 4:
      return IsPCRel ? FK_PCRel_4 : FK_Data_4;
    case 8:
      return IsPCRel ? FK_PCRel_8 : FK_Data_8;
    }
  }

  /// Return the generic fixup kind for a value with the given size in bits.
  /// It is an error to pass an unsupported size.
  static MCFixupKind getKindForSizeInBits(unsigned Size, bool IsPCRel) {
    switch (Size) {
    default:
      llvm_unreachable("Invalid generic fixup size!");
    case 6:
      assert(!IsPCRel && "Invalid pc-relative fixup size!");
      return FK_Data_6b;
    case 8:
      return IsPCRel ? FK_PCRel_1 : FK_Data_1;
    case 16:
      return IsPCRel ? FK_PCRel_2 : FK_Data_2;
    case 32:
      return IsPCRel ? FK_PCRel_4 : FK_Data_4;
    case 64:
      return IsPCRel ? FK_PCRel_8 : FK_Data_8;
    }
  }

  SMLoc getLoc() const { return Loc; }
};

} // End llvm namespace

#endif
PKjwFZT�^��E�EMC/MCAssembler.hnu�[���//===- MCAssembler.h - Object File Generation -------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCASSEMBLER_H
#define LLVM_MC_MCASSEMBLER_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/BinaryFormat/MachO.h"
#include "llvm/MC/MCDirectives.h"
#include "llvm/MC/MCDwarf.h"
#include "llvm/MC/MCLinkerOptimizationHint.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Support/SMLoc.h"
#include "llvm/Support/VersionTuple.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <string>
#include <tuple>
#include <utility>
#include <vector>

namespace llvm {

class MCBoundaryAlignFragment;
class MCCVDefRangeFragment;
class MCCVInlineLineTableFragment;
class MCDwarfCallFrameFragment;
class MCDwarfLineAddrFragment;
class MCEncodedFragment;
class MCFixup;
class MCLEBFragment;
class MCPseudoProbeAddrFragment;
class MCRelaxableFragment;
class MCSymbolRefExpr;
class raw_ostream;
class MCAsmBackend;
class MCAsmLayout;
class MCContext;
class MCCodeEmitter;
class MCFragment;
class MCObjectWriter;
class MCSection;
class MCValue;

// FIXME: This really doesn't belong here. See comments below.
struct IndirectSymbolData {
  MCSymbol *Symbol;
  MCSection *Section;
};

// FIXME: Ditto this. Purely so the Streamer and the ObjectWriter can talk
// to one another.
struct DataRegionData {
  // This enum should be kept in sync w/ the mach-o definition in
  // llvm/Object/MachOFormat.h.
  enum KindTy { Data = 1, JumpTable8, JumpTable16, JumpTable32 } Kind;
  MCSymbol *Start;
  MCSymbol *End;
};

class MCAssembler {
  friend class MCAsmLayout;

public:
  using SectionListType = std::vector<MCSection *>;
  using SymbolDataListType = std::vector<const MCSymbol *>;

  using const_iterator = pointee_iterator<SectionListType::const_iterator>;
  using iterator = pointee_iterator<SectionListType::iterator>;

  using const_symbol_iterator =
      pointee_iterator<SymbolDataListType::const_iterator>;
  using symbol_iterator = pointee_iterator<SymbolDataListType::iterator>;

  using symbol_range = iterator_range<symbol_iterator>;
  using const_symbol_range = iterator_range<const_symbol_iterator>;

  using const_indirect_symbol_iterator =
      std::vector<IndirectSymbolData>::const_iterator;
  using indirect_symbol_iterator = std::vector<IndirectSymbolData>::iterator;

  using const_data_region_iterator =
      std::vector<DataRegionData>::const_iterator;
  using data_region_iterator = std::vector<DataRegionData>::iterator;

  /// MachO specific deployment target version info.
  // A Major version of 0 indicates that no version information was supplied
  // and so the corresponding load command should not be emitted.
  using VersionInfoType = struct {
    bool EmitBuildVersion;
    union {
      MCVersionMinType Type;          ///< Used when EmitBuildVersion==false.
      MachO::PlatformType Platform;   ///< Used when EmitBuildVersion==true.
    } TypeOrPlatform;
    unsigned Major;
    unsigned Minor;
    unsigned Update;
    /// An optional version of the SDK that was used to build the source.
    VersionTuple SDKVersion;
  };

private:
  MCContext &Context;

  std::unique_ptr<MCAsmBackend> Backend;

  std::unique_ptr<MCCodeEmitter> Emitter;

  std::unique_ptr<MCObjectWriter> Writer;

  SectionListType Sections;

  SymbolDataListType Symbols;

  std::vector<IndirectSymbolData> IndirectSymbols;

  std::vector<DataRegionData> DataRegions;

  /// The list of linker options to propagate into the object file.
  std::vector<std::vector<std::string>> LinkerOptions;

  /// List of declared file names
  std::vector<std::pair<std::string, size_t>> FileNames;

  MCDwarfLineTableParams LTParams;

  /// The set of function symbols for which a .thumb_func directive has
  /// been seen.
  //
  // FIXME: We really would like this in target specific code rather than
  // here. Maybe when the relocation stuff moves to target specific,
  // this can go with it? The streamer would need some target specific
  // refactoring too.
  mutable SmallPtrSet<const MCSymbol *, 32> ThumbFuncs;

  /// The bundle alignment size currently set in the assembler.
  ///
  /// By default it's 0, which means bundling is disabled.
  unsigned BundleAlignSize;

  bool RelaxAll : 1;
  bool SubsectionsViaSymbols : 1;
  bool IncrementalLinkerCompatible : 1;

  /// ELF specific e_header flags
  // It would be good if there were an MCELFAssembler class to hold this.
  // ELF header flags are used both by the integrated and standalone assemblers.
  // Access to the flags is necessary in cases where assembler directives affect
  // which flags to be set.
  unsigned ELFHeaderEFlags;

  /// Used to communicate Linker Optimization Hint information between
  /// the Streamer and the .o writer
  MCLOHContainer LOHContainer;

  VersionInfoType VersionInfo;
  VersionInfoType DarwinTargetVariantVersionInfo;

  /// Evaluate a fixup to a relocatable expression and the value which should be
  /// placed into the fixup.
  ///
  /// \param Layout The layout to use for evaluation.
  /// \param Fixup The fixup to evaluate.
  /// \param DF The fragment the fixup is inside.
  /// \param Target [out] On return, the relocatable expression the fixup
  /// evaluates to.
  /// \param Value [out] On return, the value of the fixup as currently laid
  /// out.
  /// \param WasForced [out] On return, the value in the fixup is set to the
  /// correct value if WasForced is true, even if evaluateFixup returns false.
  /// \return Whether the fixup value was fully resolved. This is true if the
  /// \p Value result is fixed, otherwise the value may change due to
  /// relocation.
  bool evaluateFixup(const MCAsmLayout &Layout, const MCFixup &Fixup,
                     const MCFragment *DF, MCValue &Target,
                     uint64_t &Value, bool &WasForced) const;

  /// Check whether a fixup can be satisfied, or whether it needs to be relaxed
  /// (increased in size, in order to hold its value correctly).
  bool fixupNeedsRelaxation(const MCFixup &Fixup, const MCRelaxableFragment *DF,
                            const MCAsmLayout &Layout) const;

  /// Check whether the given fragment needs relaxation.
  bool fragmentNeedsRelaxation(const MCRelaxableFragment *IF,
                               const MCAsmLayout &Layout) const;

  /// Perform one layout iteration and return true if any offsets
  /// were adjusted.
  bool layoutOnce(MCAsmLayout &Layout);

  /// Perform one layout iteration of the given section and return true
  /// if any offsets were adjusted.
  bool layoutSectionOnce(MCAsmLayout &Layout, MCSection &Sec);

  /// Perform relaxation on a single fragment - returns true if the fragment
  /// changes as a result of relaxation.
  bool relaxFragment(MCAsmLayout &Layout, MCFragment &F);
  bool relaxInstruction(MCAsmLayout &Layout, MCRelaxableFragment &IF);
  bool relaxLEB(MCAsmLayout &Layout, MCLEBFragment &IF);
  bool relaxBoundaryAlign(MCAsmLayout &Layout, MCBoundaryAlignFragment &BF);
  bool relaxDwarfLineAddr(MCAsmLayout &Layout, MCDwarfLineAddrFragment &DF);
  bool relaxDwarfCallFrameFragment(MCAsmLayout &Layout,
                                   MCDwarfCallFrameFragment &DF);
  bool relaxCVInlineLineTable(MCAsmLayout &Layout,
                              MCCVInlineLineTableFragment &DF);
  bool relaxCVDefRange(MCAsmLayout &Layout, MCCVDefRangeFragment &DF);
  bool relaxPseudoProbeAddr(MCAsmLayout &Layout, MCPseudoProbeAddrFragment &DF);

  /// finishLayout - Finalize a layout, including fragment lowering.
  void finishLayout(MCAsmLayout &Layout);

  std::tuple<MCValue, uint64_t, bool>
  handleFixup(const MCAsmLayout &Layout, MCFragment &F, const MCFixup &Fixup);

public:
  struct Symver {
    SMLoc Loc;
    const MCSymbol *Sym;
    StringRef Name;
    // True if .symver *, *@@@* or .symver *, *, remove.
    bool KeepOriginalSym;
  };
  std::vector<Symver> Symvers;

  /// Construct a new assembler instance.
  //
  // FIXME: How are we going to parameterize this? Two obvious options are stay
  // concrete and require clients to pass in a target like object. The other
  // option is to make this abstract, and have targets provide concrete
  // implementations as we do with AsmParser.
  MCAssembler(MCContext &Context, std::unique_ptr<MCAsmBackend> Backend,
              std::unique_ptr<MCCodeEmitter> Emitter,
              std::unique_ptr<MCObjectWriter> Writer);
  MCAssembler(const MCAssembler &) = delete;
  MCAssembler &operator=(const MCAssembler &) = delete;
  ~MCAssembler();

  /// Compute the effective fragment size assuming it is laid out at the given
  /// \p SectionAddress and \p FragmentOffset.
  uint64_t computeFragmentSize(const MCAsmLayout &Layout,
                               const MCFragment &F) const;

  /// Find the symbol which defines the atom containing the given symbol, or
  /// null if there is no such symbol.
  const MCSymbol *getAtom(const MCSymbol &S) const;

  /// Check whether a particular symbol is visible to the linker and is required
  /// in the symbol table, or whether it can be discarded by the assembler. This
  /// also effects whether the assembler treats the label as potentially
  /// defining a separate atom.
  bool isSymbolLinkerVisible(const MCSymbol &SD) const;

  /// Emit the section contents to \p OS.
  void writeSectionData(raw_ostream &OS, const MCSection *Section,
                        const MCAsmLayout &Layout) const;

  /// Check whether a given symbol has been flagged with .thumb_func.
  bool isThumbFunc(const MCSymbol *Func) const;

  /// Flag a function symbol as the target of a .thumb_func directive.
  void setIsThumbFunc(const MCSymbol *Func) { ThumbFuncs.insert(Func); }

  /// ELF e_header flags
  unsigned getELFHeaderEFlags() const { return ELFHeaderEFlags; }
  void setELFHeaderEFlags(unsigned Flags) { ELFHeaderEFlags = Flags; }

  /// MachO deployment target version information.
  const VersionInfoType &getVersionInfo() const { return VersionInfo; }
  void setVersionMin(MCVersionMinType Type, unsigned Major, unsigned Minor,
                     unsigned Update,
                     VersionTuple SDKVersion = VersionTuple()) {
    VersionInfo.EmitBuildVersion = false;
    VersionInfo.TypeOrPlatform.Type = Type;
    VersionInfo.Major = Major;
    VersionInfo.Minor = Minor;
    VersionInfo.Update = Update;
    VersionInfo.SDKVersion = SDKVersion;
  }
  void setBuildVersion(MachO::PlatformType Platform, unsigned Major,
                       unsigned Minor, unsigned Update,
                       VersionTuple SDKVersion = VersionTuple()) {
    VersionInfo.EmitBuildVersion = true;
    VersionInfo.TypeOrPlatform.Platform = Platform;
    VersionInfo.Major = Major;
    VersionInfo.Minor = Minor;
    VersionInfo.Update = Update;
    VersionInfo.SDKVersion = SDKVersion;
  }

  const VersionInfoType &getDarwinTargetVariantVersionInfo() const {
    return DarwinTargetVariantVersionInfo;
  }
  void setDarwinTargetVariantBuildVersion(MachO::PlatformType Platform,
                                          unsigned Major, unsigned Minor,
                                          unsigned Update,
                                          VersionTuple SDKVersion) {
    DarwinTargetVariantVersionInfo.EmitBuildVersion = true;
    DarwinTargetVariantVersionInfo.TypeOrPlatform.Platform = Platform;
    DarwinTargetVariantVersionInfo.Major = Major;
    DarwinTargetVariantVersionInfo.Minor = Minor;
    DarwinTargetVariantVersionInfo.Update = Update;
    DarwinTargetVariantVersionInfo.SDKVersion = SDKVersion;
  }

  /// Reuse an assembler instance
  ///
  void reset();

  MCContext &getContext() const { return Context; }

  MCAsmBackend *getBackendPtr() const { return Backend.get(); }

  MCCodeEmitter *getEmitterPtr() const { return Emitter.get(); }

  MCObjectWriter *getWriterPtr() const { return Writer.get(); }

  MCAsmBackend &getBackend() const { return *Backend; }

  MCCodeEmitter &getEmitter() const { return *Emitter; }

  MCObjectWriter &getWriter() const { return *Writer; }

  MCDwarfLineTableParams getDWARFLinetableParams() const { return LTParams; }
  void setDWARFLinetableParams(MCDwarfLineTableParams P) { LTParams = P; }

  /// Finish - Do final processing and write the object to the output stream.
  /// \p Writer is used for custom object writer (as the MCJIT does),
  /// if not specified it is automatically created from backend.
  void Finish();

  // Layout all section and prepare them for emission.
  void layout(MCAsmLayout &Layout);

  // FIXME: This does not belong here.
  bool getSubsectionsViaSymbols() const { return SubsectionsViaSymbols; }
  void setSubsectionsViaSymbols(bool Value) { SubsectionsViaSymbols = Value; }

  bool isIncrementalLinkerCompatible() const {
    return IncrementalLinkerCompatible;
  }
  void setIncrementalLinkerCompatible(bool Value) {
    IncrementalLinkerCompatible = Value;
  }

  bool getRelaxAll() const { return RelaxAll; }
  void setRelaxAll(bool Value) { RelaxAll = Value; }

  bool isBundlingEnabled() const { return BundleAlignSize != 0; }

  unsigned getBundleAlignSize() const { return BundleAlignSize; }

  void setBundleAlignSize(unsigned Size) {
    assert((Size == 0 || !(Size & (Size - 1))) &&
           "Expect a power-of-two bundle align size");
    BundleAlignSize = Size;
  }

  /// \name Section List Access
  /// @{

  iterator begin() { return Sections.begin(); }
  const_iterator begin() const { return Sections.begin(); }

  iterator end() { return Sections.end(); }
  const_iterator end() const { return Sections.end(); }

  size_t size() const { return Sections.size(); }

  /// @}
  /// \name Symbol List Access
  /// @{
  symbol_iterator symbol_begin() { return Symbols.begin(); }
  const_symbol_iterator symbol_begin() const { return Symbols.begin(); }

  symbol_iterator symbol_end() { return Symbols.end(); }
  const_symbol_iterator symbol_end() const { return Symbols.end(); }

  symbol_range symbols() { return make_range(symbol_begin(), symbol_end()); }
  const_symbol_range symbols() const {
    return make_range(symbol_begin(), symbol_end());
  }

  size_t symbol_size() const { return Symbols.size(); }

  /// @}
  /// \name Indirect Symbol List Access
  /// @{

  // FIXME: This is a total hack, this should not be here. Once things are
  // factored so that the streamer has direct access to the .o writer, it can
  // disappear.
  std::vector<IndirectSymbolData> &getIndirectSymbols() {
    return IndirectSymbols;
  }

  indirect_symbol_iterator indirect_symbol_begin() {
    return IndirectSymbols.begin();
  }
  const_indirect_symbol_iterator indirect_symbol_begin() const {
    return IndirectSymbols.begin();
  }

  indirect_symbol_iterator indirect_symbol_end() {
    return IndirectSymbols.end();
  }
  const_indirect_symbol_iterator indirect_symbol_end() const {
    return IndirectSymbols.end();
  }

  size_t indirect_symbol_size() const { return IndirectSymbols.size(); }

  /// @}
  /// \name Linker Option List Access
  /// @{

  std::vector<std::vector<std::string>> &getLinkerOptions() {
    return LinkerOptions;
  }

  /// @}
  /// \name Data Region List Access
  /// @{

  // FIXME: This is a total hack, this should not be here. Once things are
  // factored so that the streamer has direct access to the .o writer, it can
  // disappear.
  std::vector<DataRegionData> &getDataRegions() { return DataRegions; }

  data_region_iterator data_region_begin() { return DataRegions.begin(); }
  const_data_region_iterator data_region_begin() const {
    return DataRegions.begin();
  }

  data_region_iterator data_region_end() { return DataRegions.end(); }
  const_data_region_iterator data_region_end() const {
    return DataRegions.end();
  }

  size_t data_region_size() const { return DataRegions.size(); }

  /// @}
  /// \name Data Region List Access
  /// @{

  // FIXME: This is a total hack, this should not be here. Once things are
  // factored so that the streamer has direct access to the .o writer, it can
  // disappear.
  MCLOHContainer &getLOHContainer() { return LOHContainer; }
  const MCLOHContainer &getLOHContainer() const {
    return const_cast<MCAssembler *>(this)->getLOHContainer();
  }

  struct CGProfileEntry {
    const MCSymbolRefExpr *From;
    const MCSymbolRefExpr *To;
    uint64_t Count;
  };
  std::vector<CGProfileEntry> CGProfile;
  /// @}
  /// \name Backend Data Access
  /// @{

  bool registerSection(MCSection &Section);
  bool registerSymbol(const MCSymbol &Symbol);

  MutableArrayRef<std::pair<std::string, size_t>> getFileNames() {
    return FileNames;
  }

  void addFileName(StringRef FileName) {
    FileNames.emplace_back(std::string(FileName), Symbols.size());
  }

  /// Write the necessary bundle padding to \p OS.
  /// Expects a fragment \p F containing instructions and its size \p FSize.
  void writeFragmentPadding(raw_ostream &OS, const MCEncodedFragment &F,
                            uint64_t FSize) const;

  /// @}

  void dump() const;
};

/// Compute the amount of padding required before the fragment \p F to
/// obey bundling restrictions, where \p FOffset is the fragment's offset in
/// its section and \p FSize is the fragment's size.
uint64_t computeBundlePadding(const MCAssembler &Assembler,
                              const MCEncodedFragment *F, uint64_t FOffset,
                              uint64_t FSize);

} // end namespace llvm

#endif // LLVM_MC_MCASSEMBLER_H
PKjwFZ���MC/MCWasmStreamer.hnu�[���//===- MCWasmStreamer.h - MCStreamer Wasm Object File Interface -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCWASMSTREAMER_H
#define LLVM_MC_MCWASMSTREAMER_H

#include "MCAsmBackend.h"
#include "MCCodeEmitter.h"
#include "llvm/MC/MCDirectives.h"
#include "llvm/MC/MCObjectStreamer.h"
#include "llvm/MC/MCObjectWriter.h"
#include "llvm/Support/DataTypes.h"

namespace llvm {
class MCExpr;
class MCInst;

class MCWasmStreamer : public MCObjectStreamer {
public:
  MCWasmStreamer(MCContext &Context, std::unique_ptr<MCAsmBackend> TAB,
                 std::unique_ptr<MCObjectWriter> OW,
                 std::unique_ptr<MCCodeEmitter> Emitter)
      : MCObjectStreamer(Context, std::move(TAB), std::move(OW),
                         std::move(Emitter)),
        SeenIdent(false) {}

  ~MCWasmStreamer() override;

  /// state management
  void reset() override {
    SeenIdent = false;
    MCObjectStreamer::reset();
  }

  /// \name MCStreamer Interface
  /// @{

  void changeSection(MCSection *Section, const MCExpr *Subsection) override;
  void emitLabel(MCSymbol *Symbol, SMLoc Loc = SMLoc()) override;
  void emitLabelAtPos(MCSymbol *Symbol, SMLoc Loc, MCFragment *F,
                      uint64_t Offset) override;
  void emitAssemblerFlag(MCAssemblerFlag Flag) override;
  void emitThumbFunc(MCSymbol *Func) override;
  void emitWeakReference(MCSymbol *Alias, const MCSymbol *Symbol) override;
  bool emitSymbolAttribute(MCSymbol *Symbol, MCSymbolAttr Attribute) override;
  void emitSymbolDesc(MCSymbol *Symbol, unsigned DescValue) override;
  void emitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
                        Align ByteAlignment) override;

  void emitELFSize(MCSymbol *Symbol, const MCExpr *Value) override;

  void emitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size,
                             Align ByteAlignment) override;

  void emitZerofill(MCSection *Section, MCSymbol *Symbol = nullptr,
                    uint64_t Size = 0, Align ByteAlignment = Align(1),
                    SMLoc Loc = SMLoc()) override;
  void emitTBSSSymbol(MCSection *Section, MCSymbol *Symbol, uint64_t Size,
                      Align ByteAlignment = Align(1)) override;

  void emitIdent(StringRef IdentString) override;

  void finishImpl() override;

private:
  void emitInstToFragment(const MCInst &Inst, const MCSubtargetInfo &) override;
  void emitInstToData(const MCInst &Inst, const MCSubtargetInfo &) override;

  void fixSymbolsInTLSFixups(const MCExpr *expr);

  /// Merge the content of the fragment \p EF into the fragment \p DF.
  void mergeFragment(MCDataFragment *, MCDataFragment *);

  bool SeenIdent;
};

} // end namespace llvm

#endif
PKjwFZGS���MC/MCXCOFFObjectWriter.hnu�[���//===-- llvm/MC/MCXCOFFObjectWriter.h - XCOFF Object Writer ---------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCXCOFFOBJECTWRITER_H
#define LLVM_MC_MCXCOFFOBJECTWRITER_H

#include "llvm/MC/MCObjectWriter.h"

namespace llvm {

class raw_pwrite_stream;

class MCXCOFFObjectTargetWriter : public MCObjectTargetWriter {
protected:
  MCXCOFFObjectTargetWriter(bool Is64Bit);

public:
  ~MCXCOFFObjectTargetWriter() override;

  Triple::ObjectFormatType getFormat() const override { return Triple::XCOFF; }
  static bool classof(const MCObjectTargetWriter *W) {
    return W->getFormat() == Triple::XCOFF;
  }
  bool is64Bit() const { return Is64Bit; }

  // Returns relocation info such as type, sign and size.
  // First element of the pair contains type,
  // second element contains sign and size.
  virtual std::pair<uint8_t, uint8_t>
  getRelocTypeAndSignSize(const MCValue &Target, const MCFixup &Fixup,
                          bool IsPCRel) const = 0;

private:
  bool Is64Bit;
};

std::unique_ptr<MCObjectWriter>
createXCOFFObjectWriter(std::unique_ptr<MCXCOFFObjectTargetWriter> MOTW,
                        raw_pwrite_stream &OS);

} // end namespace llvm

#endif // LLVM_MC_MCXCOFFOBJECTWRITER_H
PKjwFZ�++MC/MCSubtargetInfo.hnu�[���//===- llvm/MC/MCSubtargetInfo.h - Subtarget Information --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file describes the subtarget options of a Target machine.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCSUBTARGETINFO_H
#define LLVM_MC_MCSUBTARGETINFO_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/MC/MCInstrItineraries.h"
#include "llvm/MC/MCSchedule.h"
#include "llvm/TargetParser/SubtargetFeature.h"
#include "llvm/TargetParser/Triple.h"
#include <cassert>
#include <cstdint>
#include <optional>
#include <string>

namespace llvm {

class MCInst;

//===----------------------------------------------------------------------===//

/// Used to provide key value pairs for feature and CPU bit flags.
struct SubtargetFeatureKV {
  const char *Key;                      ///< K-V key string
  const char *Desc;                     ///< Help descriptor
  unsigned Value;                       ///< K-V integer value
  FeatureBitArray Implies;              ///< K-V bit mask

  /// Compare routine for std::lower_bound
  bool operator<(StringRef S) const {
    return StringRef(Key) < S;
  }

  /// Compare routine for std::is_sorted.
  bool operator<(const SubtargetFeatureKV &Other) const {
    return StringRef(Key) < StringRef(Other.Key);
  }
};

//===----------------------------------------------------------------------===//

/// Used to provide key value pairs for feature and CPU bit flags.
struct SubtargetSubTypeKV {
  const char *Key;                      ///< K-V key string
  FeatureBitArray Implies;              ///< K-V bit mask
  FeatureBitArray TuneImplies;          ///< K-V bit mask
  const MCSchedModel *SchedModel;

  /// Compare routine for std::lower_bound
  bool operator<(StringRef S) const {
    return StringRef(Key) < S;
  }

  /// Compare routine for std::is_sorted.
  bool operator<(const SubtargetSubTypeKV &Other) const {
    return StringRef(Key) < StringRef(Other.Key);
  }
};

//===----------------------------------------------------------------------===//
///
/// Generic base class for all target subtargets.
///
class MCSubtargetInfo {
  Triple TargetTriple;
  std::string CPU; // CPU being targeted.
  std::string TuneCPU; // CPU being tuned for.
  ArrayRef<SubtargetFeatureKV> ProcFeatures;  // Processor feature list
  ArrayRef<SubtargetSubTypeKV> ProcDesc;  // Processor descriptions

  // Scheduler machine model
  const MCWriteProcResEntry *WriteProcResTable;
  const MCWriteLatencyEntry *WriteLatencyTable;
  const MCReadAdvanceEntry *ReadAdvanceTable;
  const MCSchedModel *CPUSchedModel;

  const InstrStage *Stages;            // Instruction itinerary stages
  const unsigned *OperandCycles;       // Itinerary operand cycles
  const unsigned *ForwardingPaths;
  FeatureBitset FeatureBits;           // Feature bits for current CPU + FS
  std::string FeatureString;           // Feature string

public:
  MCSubtargetInfo(const MCSubtargetInfo &) = default;
  MCSubtargetInfo(const Triple &TT, StringRef CPU, StringRef TuneCPU,
                  StringRef FS, ArrayRef<SubtargetFeatureKV> PF,
                  ArrayRef<SubtargetSubTypeKV> PD,
                  const MCWriteProcResEntry *WPR, const MCWriteLatencyEntry *WL,
                  const MCReadAdvanceEntry *RA, const InstrStage *IS,
                  const unsigned *OC, const unsigned *FP);
  MCSubtargetInfo() = delete;
  MCSubtargetInfo &operator=(const MCSubtargetInfo &) = delete;
  MCSubtargetInfo &operator=(MCSubtargetInfo &&) = delete;
  virtual ~MCSubtargetInfo() = default;

  const Triple &getTargetTriple() const { return TargetTriple; }
  StringRef getCPU() const { return CPU; }
  StringRef getTuneCPU() const { return TuneCPU; }

  const FeatureBitset& getFeatureBits() const { return FeatureBits; }
  void setFeatureBits(const FeatureBitset &FeatureBits_) {
    FeatureBits = FeatureBits_;
  }

  StringRef getFeatureString() const { return FeatureString; }

  bool hasFeature(unsigned Feature) const {
    return FeatureBits[Feature];
  }

protected:
  /// Initialize the scheduling model and feature bits.
  ///
  /// FIXME: Find a way to stick this in the constructor, since it should only
  /// be called during initialization.
  void InitMCProcessorInfo(StringRef CPU, StringRef TuneCPU, StringRef FS);

public:
  /// Set the features to the default for the given CPU and TuneCPU, with ano
  /// appended feature string.
  void setDefaultFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS);

  /// Toggle a feature and return the re-computed feature bits.
  /// This version does not change the implied bits.
  FeatureBitset ToggleFeature(uint64_t FB);

  /// Toggle a feature and return the re-computed feature bits.
  /// This version does not change the implied bits.
  FeatureBitset ToggleFeature(const FeatureBitset& FB);

  /// Toggle a set of features and return the re-computed feature bits.
  /// This version will also change all implied bits.
  FeatureBitset ToggleFeature(StringRef FS);

  /// Apply a feature flag and return the re-computed feature bits, including
  /// all feature bits implied by the flag.
  FeatureBitset ApplyFeatureFlag(StringRef FS);

  /// Set/clear additional feature bits, including all other bits they imply.
  FeatureBitset SetFeatureBitsTransitively(const FeatureBitset& FB);
  FeatureBitset ClearFeatureBitsTransitively(const FeatureBitset &FB);

  /// Check whether the subtarget features are enabled/disabled as per
  /// the provided string, ignoring all other features.
  bool checkFeatures(StringRef FS) const;

  /// Get the machine model of a CPU.
  const MCSchedModel &getSchedModelForCPU(StringRef CPU) const;

  /// Get the machine model for this subtarget's CPU.
  const MCSchedModel &getSchedModel() const { return *CPUSchedModel; }

  /// Return an iterator at the first process resource consumed by the given
  /// scheduling class.
  const MCWriteProcResEntry *getWriteProcResBegin(
    const MCSchedClassDesc *SC) const {
    return &WriteProcResTable[SC->WriteProcResIdx];
  }
  const MCWriteProcResEntry *getWriteProcResEnd(
    const MCSchedClassDesc *SC) const {
    return getWriteProcResBegin(SC) + SC->NumWriteProcResEntries;
  }

  const MCWriteLatencyEntry *getWriteLatencyEntry(const MCSchedClassDesc *SC,
                                                  unsigned DefIdx) const {
    assert(DefIdx < SC->NumWriteLatencyEntries &&
           "MachineModel does not specify a WriteResource for DefIdx");

    return &WriteLatencyTable[SC->WriteLatencyIdx + DefIdx];
  }

  int getReadAdvanceCycles(const MCSchedClassDesc *SC, unsigned UseIdx,
                           unsigned WriteResID) const {
    // TODO: The number of read advance entries in a class can be significant
    // (~50). Consider compressing the WriteID into a dense ID of those that are
    // used by ReadAdvance and representing them as a bitset.
    for (const MCReadAdvanceEntry *I = &ReadAdvanceTable[SC->ReadAdvanceIdx],
           *E = I + SC->NumReadAdvanceEntries; I != E; ++I) {
      if (I->UseIdx < UseIdx)
        continue;
      if (I->UseIdx > UseIdx)
        break;
      // Find the first WriteResIdx match, which has the highest cycle count.
      if (!I->WriteResourceID || I->WriteResourceID == WriteResID) {
        return I->Cycles;
      }
    }
    return 0;
  }

  /// Return the set of ReadAdvance entries declared by the scheduling class
  /// descriptor in input.
  ArrayRef<MCReadAdvanceEntry>
  getReadAdvanceEntries(const MCSchedClassDesc &SC) const {
    if (!SC.NumReadAdvanceEntries)
      return ArrayRef<MCReadAdvanceEntry>();
    return ArrayRef<MCReadAdvanceEntry>(&ReadAdvanceTable[SC.ReadAdvanceIdx],
                                        SC.NumReadAdvanceEntries);
  }

  /// Get scheduling itinerary of a CPU.
  InstrItineraryData getInstrItineraryForCPU(StringRef CPU) const;

  /// Initialize an InstrItineraryData instance.
  void initInstrItins(InstrItineraryData &InstrItins) const;

  /// Resolve a variant scheduling class for the given MCInst and CPU.
  virtual unsigned resolveVariantSchedClass(unsigned SchedClass,
                                            const MCInst *MI,
                                            const MCInstrInfo *MCII,
                                            unsigned CPUID) const {
    return 0;
  }

  /// Check whether the CPU string is valid.
  bool isCPUStringValid(StringRef CPU) const {
    auto Found = llvm::lower_bound(ProcDesc, CPU);
    return Found != ProcDesc.end() && StringRef(Found->Key) == CPU;
  }

  ArrayRef<SubtargetSubTypeKV> getAllProcessorDescriptions() const {
    return ProcDesc;
  }

  virtual unsigned getHwMode() const { return 0; }

  /// Return the cache size in bytes for the given level of cache.
  /// Level is zero-based, so a value of zero means the first level of
  /// cache.
  ///
  virtual std::optional<unsigned> getCacheSize(unsigned Level) const;

  /// Return the cache associatvity for the given level of cache.
  /// Level is zero-based, so a value of zero means the first level of
  /// cache.
  ///
  virtual std::optional<unsigned> getCacheAssociativity(unsigned Level) const;

  /// Return the target cache line size in bytes at a given level.
  ///
  virtual std::optional<unsigned> getCacheLineSize(unsigned Level) const;

  /// Return the target cache line size in bytes.  By default, return
  /// the line size for the bottom-most level of cache.  This provides
  /// a more convenient interface for the common case where all cache
  /// levels have the same line size.  Return zero if there is no
  /// cache model.
  ///
  virtual unsigned getCacheLineSize() const {
    std::optional<unsigned> Size = getCacheLineSize(0);
    if (Size)
      return *Size;

    return 0;
  }

  /// Return the preferred prefetch distance in terms of instructions.
  ///
  virtual unsigned getPrefetchDistance() const;

  /// Return the maximum prefetch distance in terms of loop
  /// iterations.
  ///
  virtual unsigned getMaxPrefetchIterationsAhead() const;

  /// \return True if prefetching should also be done for writes.
  ///
  virtual bool enableWritePrefetching() const;

  /// Return the minimum stride necessary to trigger software
  /// prefetching.
  ///
  virtual unsigned getMinPrefetchStride(unsigned NumMemAccesses,
                                        unsigned NumStridedMemAccesses,
                                        unsigned NumPrefetches,
                                        bool HasCall) const;

  /// \return if target want to issue a prefetch in address space \p AS.
  virtual bool shouldPrefetchAddressSpace(unsigned AS) const;
};

} // end namespace llvm

#endif // LLVM_MC_MCSUBTARGETINFO_H
PKjwFZ?�(��
�
MC/MCWin64EH.hnu�[���//===- MCWin64EH.h - Machine Code Win64 EH support --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains declarations to support the Win64 Exception Handling
// scheme in MC.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCWIN64EH_H
#define LLVM_MC_MCWIN64EH_H

#include "llvm/MC/MCWinEH.h"
#include "llvm/Support/Win64EH.h"

namespace llvm {
class MCStreamer;
class MCSymbol;

namespace Win64EH {
struct Instruction {
  static WinEH::Instruction PushNonVol(MCSymbol *L, unsigned Reg) {
    return WinEH::Instruction(Win64EH::UOP_PushNonVol, L, Reg, -1);
  }
  static WinEH::Instruction Alloc(MCSymbol *L, unsigned Size) {
    return WinEH::Instruction(Size > 128 ? UOP_AllocLarge : UOP_AllocSmall, L,
                              -1, Size);
  }
  static WinEH::Instruction PushMachFrame(MCSymbol *L, bool Code) {
    return WinEH::Instruction(UOP_PushMachFrame, L, -1, Code ? 1 : 0);
  }
  static WinEH::Instruction SaveNonVol(MCSymbol *L, unsigned Reg,
                                       unsigned Offset) {
    return WinEH::Instruction(Offset > 512 * 1024 - 8 ? UOP_SaveNonVolBig
                                                      : UOP_SaveNonVol,
                              L, Reg, Offset);
  }
  static WinEH::Instruction SaveXMM(MCSymbol *L, unsigned Reg,
                                    unsigned Offset) {
    return WinEH::Instruction(Offset > 512 * 1024 - 8 ? UOP_SaveXMM128Big
                                                      : UOP_SaveXMM128,
                              L, Reg, Offset);
  }
  static WinEH::Instruction SetFPReg(MCSymbol *L, unsigned Reg, unsigned Off) {
    return WinEH::Instruction(UOP_SetFPReg, L, Reg, Off);
  }
};

class UnwindEmitter : public WinEH::UnwindEmitter {
public:
  void Emit(MCStreamer &Streamer) const override;
  void EmitUnwindInfo(MCStreamer &Streamer, WinEH::FrameInfo *FI,
                      bool HandlerData) const override;
};

class ARMUnwindEmitter : public WinEH::UnwindEmitter {
public:
  void Emit(MCStreamer &Streamer) const override;
  void EmitUnwindInfo(MCStreamer &Streamer, WinEH::FrameInfo *FI,
                      bool HandlerData) const override;
};

class ARM64UnwindEmitter : public WinEH::UnwindEmitter {
public:
  void Emit(MCStreamer &Streamer) const override;
  void EmitUnwindInfo(MCStreamer &Streamer, WinEH::FrameInfo *FI,
                      bool HandlerData) const override;
};
}
} // end namespace llvm

#endif
PKjwFZ�t���MC/MCAsmInfoDarwin.hnu�[���//===- MCAsmInfoDarwin.h - Darwin asm properties ----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines target asm properties related what form asm statements
// should take in general on Darwin-based targets
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCASMINFODARWIN_H
#define LLVM_MC_MCASMINFODARWIN_H

#include "llvm/MC/MCAsmInfo.h"

namespace llvm {

class MCAsmInfoDarwin : public MCAsmInfo {
public:
  explicit MCAsmInfoDarwin();

  bool isSectionAtomizableBySymbols(const MCSection &Section) const override;
};

} // end namespace llvm

#endif // LLVM_MC_MCASMINFODARWIN_H
PKjwFZ��Y@�@�MC/TargetRegistry.hnu�[���//===- MC/TargetRegistry.h - Target Registration ----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file exposes the TargetRegistry interface, which tools can use to access
// the appropriate target specific classes (TargetMachine, AsmPrinter, etc.)
// which have been registered.
//
// Target specific class implementations should register themselves using the
// appropriate TargetRegistry interfaces.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_TARGETREGISTRY_H
#define LLVM_MC_TARGETREGISTRY_H

#include "llvm-c/DisassemblerTypes.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/MC/MCObjectFileInfo.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FormattedStream.h"
#include "llvm/TargetParser/Triple.h"
#include <cassert>
#include <cstddef>
#include <iterator>
#include <memory>
#include <optional>
#include <string>

namespace llvm {

class AsmPrinter;
class MCAsmBackend;
class MCAsmInfo;
class MCAsmParser;
class MCCodeEmitter;
class MCContext;
class MCDisassembler;
class MCInstPrinter;
class MCInstrAnalysis;
class MCInstrInfo;
class MCObjectWriter;
class MCRegisterInfo;
class MCRelocationInfo;
class MCStreamer;
class MCSubtargetInfo;
class MCSymbolizer;
class MCTargetAsmParser;
class MCTargetOptions;
class MCTargetStreamer;
class raw_ostream;
class TargetMachine;
class TargetOptions;
namespace mca {
class CustomBehaviour;
class InstrPostProcess;
class InstrumentManager;
struct SourceMgr;
} // namespace mca

MCStreamer *createNullStreamer(MCContext &Ctx);
// Takes ownership of \p TAB and \p CE.

/// Create a machine code streamer which will print out assembly for the native
/// target, suitable for compiling with a native assembler.
///
/// \param InstPrint - If given, the instruction printer to use. If not given
/// the MCInst representation will be printed.  This method takes ownership of
/// InstPrint.
///
/// \param CE - If given, a code emitter to use to show the instruction
/// encoding inline with the assembly. This method takes ownership of \p CE.
///
/// \param TAB - If given, a target asm backend to use to show the fixup
/// information in conjunction with encoding information. This method takes
/// ownership of \p TAB.
///
/// \param ShowInst - Whether to show the MCInst representation inline with
/// the assembly.
MCStreamer *
createAsmStreamer(MCContext &Ctx, std::unique_ptr<formatted_raw_ostream> OS,
                  bool isVerboseAsm, bool useDwarfDirectory,
                  MCInstPrinter *InstPrint, std::unique_ptr<MCCodeEmitter> &&CE,
                  std::unique_ptr<MCAsmBackend> &&TAB, bool ShowInst);

MCStreamer *createELFStreamer(MCContext &Ctx,
                              std::unique_ptr<MCAsmBackend> &&TAB,
                              std::unique_ptr<MCObjectWriter> &&OW,
                              std::unique_ptr<MCCodeEmitter> &&CE,
                              bool RelaxAll);
MCStreamer *createMachOStreamer(MCContext &Ctx,
                                std::unique_ptr<MCAsmBackend> &&TAB,
                                std::unique_ptr<MCObjectWriter> &&OW,
                                std::unique_ptr<MCCodeEmitter> &&CE,
                                bool RelaxAll, bool DWARFMustBeAtTheEnd,
                                bool LabelSections = false);
MCStreamer *createWasmStreamer(MCContext &Ctx,
                               std::unique_ptr<MCAsmBackend> &&TAB,
                               std::unique_ptr<MCObjectWriter> &&OW,
                               std::unique_ptr<MCCodeEmitter> &&CE,
                               bool RelaxAll);
MCStreamer *createXCOFFStreamer(MCContext &Ctx,
                                std::unique_ptr<MCAsmBackend> &&TAB,
                                std::unique_ptr<MCObjectWriter> &&OW,
                                std::unique_ptr<MCCodeEmitter> &&CE,
                                bool RelaxAll);
MCStreamer *createSPIRVStreamer(MCContext &Ctx,
                                std::unique_ptr<MCAsmBackend> &&TAB,
                                std::unique_ptr<MCObjectWriter> &&OW,
                                std::unique_ptr<MCCodeEmitter> &&CE,
                                bool RelaxAll);
MCStreamer *createDXContainerStreamer(MCContext &Ctx,
                                      std::unique_ptr<MCAsmBackend> &&TAB,
                                      std::unique_ptr<MCObjectWriter> &&OW,
                                      std::unique_ptr<MCCodeEmitter> &&CE,
                                      bool RelaxAll);

MCRelocationInfo *createMCRelocationInfo(const Triple &TT, MCContext &Ctx);

MCSymbolizer *createMCSymbolizer(const Triple &TT, LLVMOpInfoCallback GetOpInfo,
                                 LLVMSymbolLookupCallback SymbolLookUp,
                                 void *DisInfo, MCContext *Ctx,
                                 std::unique_ptr<MCRelocationInfo> &&RelInfo);

mca::CustomBehaviour *createCustomBehaviour(const MCSubtargetInfo &STI,
                                            const mca::SourceMgr &SrcMgr,
                                            const MCInstrInfo &MCII);

mca::InstrPostProcess *createInstrPostProcess(const MCSubtargetInfo &STI,
                                              const MCInstrInfo &MCII);

mca::InstrumentManager *createInstrumentManager(const MCSubtargetInfo &STI,
                                                const MCInstrInfo &MCII);

/// Target - Wrapper for Target specific information.
///
/// For registration purposes, this is a POD type so that targets can be
/// registered without the use of static constructors.
///
/// Targets should implement a single global instance of this class (which
/// will be zero initialized), and pass that instance to the TargetRegistry as
/// part of their initialization.
class Target {
public:
  friend struct TargetRegistry;

  using ArchMatchFnTy = bool (*)(Triple::ArchType Arch);

  using MCAsmInfoCtorFnTy = MCAsmInfo *(*)(const MCRegisterInfo &MRI,
                                           const Triple &TT,
                                           const MCTargetOptions &Options);
  using MCObjectFileInfoCtorFnTy = MCObjectFileInfo *(*)(MCContext &Ctx,
                                                         bool PIC,
                                                         bool LargeCodeModel);
  using MCInstrInfoCtorFnTy = MCInstrInfo *(*)();
  using MCInstrAnalysisCtorFnTy = MCInstrAnalysis *(*)(const MCInstrInfo *Info);
  using MCRegInfoCtorFnTy = MCRegisterInfo *(*)(const Triple &TT);
  using MCSubtargetInfoCtorFnTy = MCSubtargetInfo *(*)(const Triple &TT,
                                                       StringRef CPU,
                                                       StringRef Features);
  using TargetMachineCtorTy = TargetMachine
      *(*)(const Target &T, const Triple &TT, StringRef CPU, StringRef Features,
           const TargetOptions &Options, std::optional<Reloc::Model> RM,
           std::optional<CodeModel::Model> CM, CodeGenOpt::Level OL, bool JIT);
  // If it weren't for layering issues (this header is in llvm/Support, but
  // depends on MC?) this should take the Streamer by value rather than rvalue
  // reference.
  using AsmPrinterCtorTy = AsmPrinter *(*)(
      TargetMachine &TM, std::unique_ptr<MCStreamer> &&Streamer);
  using MCAsmBackendCtorTy = MCAsmBackend *(*)(const Target &T,
                                               const MCSubtargetInfo &STI,
                                               const MCRegisterInfo &MRI,
                                               const MCTargetOptions &Options);
  using MCAsmParserCtorTy = MCTargetAsmParser *(*)(
      const MCSubtargetInfo &STI, MCAsmParser &P, const MCInstrInfo &MII,
      const MCTargetOptions &Options);
  using MCDisassemblerCtorTy = MCDisassembler *(*)(const Target &T,
                                                   const MCSubtargetInfo &STI,
                                                   MCContext &Ctx);
  using MCInstPrinterCtorTy = MCInstPrinter *(*)(const Triple &T,
                                                 unsigned SyntaxVariant,
                                                 const MCAsmInfo &MAI,
                                                 const MCInstrInfo &MII,
                                                 const MCRegisterInfo &MRI);
  using MCCodeEmitterCtorTy = MCCodeEmitter *(*)(const MCInstrInfo &II,
                                                 MCContext &Ctx);
  using ELFStreamerCtorTy =
      MCStreamer *(*)(const Triple &T, MCContext &Ctx,
                      std::unique_ptr<MCAsmBackend> &&TAB,
                      std::unique_ptr<MCObjectWriter> &&OW,
                      std::unique_ptr<MCCodeEmitter> &&Emitter, bool RelaxAll);
  using MachOStreamerCtorTy =
      MCStreamer *(*)(MCContext &Ctx, std::unique_ptr<MCAsmBackend> &&TAB,
                      std::unique_ptr<MCObjectWriter> &&OW,
                      std::unique_ptr<MCCodeEmitter> &&Emitter, bool RelaxAll,
                      bool DWARFMustBeAtTheEnd);
  using COFFStreamerCtorTy =
      MCStreamer *(*)(MCContext &Ctx, std::unique_ptr<MCAsmBackend> &&TAB,
                      std::unique_ptr<MCObjectWriter> &&OW,
                      std::unique_ptr<MCCodeEmitter> &&Emitter, bool RelaxAll,
                      bool IncrementalLinkerCompatible);
  using WasmStreamerCtorTy =
      MCStreamer *(*)(const Triple &T, MCContext &Ctx,
                      std::unique_ptr<MCAsmBackend> &&TAB,
                      std::unique_ptr<MCObjectWriter> &&OW,
                      std::unique_ptr<MCCodeEmitter> &&Emitter, bool RelaxAll);
  using XCOFFStreamerCtorTy =
      MCStreamer *(*)(const Triple &T, MCContext &Ctx,
                      std::unique_ptr<MCAsmBackend> &&TAB,
                      std::unique_ptr<MCObjectWriter> &&OW,
                      std::unique_ptr<MCCodeEmitter> &&Emitter, bool RelaxAll);
  using SPIRVStreamerCtorTy =
      MCStreamer *(*)(const Triple &T, MCContext &Ctx,
                      std::unique_ptr<MCAsmBackend> &&TAB,
                      std::unique_ptr<MCObjectWriter> &&OW,
                      std::unique_ptr<MCCodeEmitter> &&Emitter, bool RelaxAll);
  
  using DXContainerStreamerCtorTy =
      MCStreamer *(*)(const Triple &T, MCContext &Ctx,
                      std::unique_ptr<MCAsmBackend> &&TAB,
                      std::unique_ptr<MCObjectWriter> &&OW,
                      std::unique_ptr<MCCodeEmitter> &&Emitter, bool RelaxAll);

  using NullTargetStreamerCtorTy = MCTargetStreamer *(*)(MCStreamer &S);
  using AsmTargetStreamerCtorTy = MCTargetStreamer *(*)(
      MCStreamer &S, formatted_raw_ostream &OS, MCInstPrinter *InstPrint,
      bool IsVerboseAsm);
  using ObjectTargetStreamerCtorTy = MCTargetStreamer *(*)(
      MCStreamer &S, const MCSubtargetInfo &STI);
  using MCRelocationInfoCtorTy = MCRelocationInfo *(*)(const Triple &TT,
                                                       MCContext &Ctx);
  using MCSymbolizerCtorTy = MCSymbolizer *(*)(
      const Triple &TT, LLVMOpInfoCallback GetOpInfo,
      LLVMSymbolLookupCallback SymbolLookUp, void *DisInfo, MCContext *Ctx,
      std::unique_ptr<MCRelocationInfo> &&RelInfo);

  using CustomBehaviourCtorTy =
      mca::CustomBehaviour *(*)(const MCSubtargetInfo &STI,
                                const mca::SourceMgr &SrcMgr,
                                const MCInstrInfo &MCII);

  using InstrPostProcessCtorTy =
      mca::InstrPostProcess *(*)(const MCSubtargetInfo &STI,
                                 const MCInstrInfo &MCII);

  using InstrumentManagerCtorTy =
      mca::InstrumentManager *(*)(const MCSubtargetInfo &STI,
                                  const MCInstrInfo &MCII);

private:
  /// Next - The next registered target in the linked list, maintained by the
  /// TargetRegistry.
  Target *Next;

  /// The target function for checking if an architecture is supported.
  ArchMatchFnTy ArchMatchFn;

  /// Name - The target name.
  const char *Name;

  /// ShortDesc - A short description of the target.
  const char *ShortDesc;

  /// BackendName - The name of the backend implementation. This must match the
  /// name of the 'def X : Target ...' in TableGen.
  const char *BackendName;

  /// HasJIT - Whether this target supports the JIT.
  bool HasJIT;

  /// MCAsmInfoCtorFn - Constructor function for this target's MCAsmInfo, if
  /// registered.
  MCAsmInfoCtorFnTy MCAsmInfoCtorFn;

  /// Constructor function for this target's MCObjectFileInfo, if registered.
  MCObjectFileInfoCtorFnTy MCObjectFileInfoCtorFn;

  /// MCInstrInfoCtorFn - Constructor function for this target's MCInstrInfo,
  /// if registered.
  MCInstrInfoCtorFnTy MCInstrInfoCtorFn;

  /// MCInstrAnalysisCtorFn - Constructor function for this target's
  /// MCInstrAnalysis, if registered.
  MCInstrAnalysisCtorFnTy MCInstrAnalysisCtorFn;

  /// MCRegInfoCtorFn - Constructor function for this target's MCRegisterInfo,
  /// if registered.
  MCRegInfoCtorFnTy MCRegInfoCtorFn;

  /// MCSubtargetInfoCtorFn - Constructor function for this target's
  /// MCSubtargetInfo, if registered.
  MCSubtargetInfoCtorFnTy MCSubtargetInfoCtorFn;

  /// TargetMachineCtorFn - Construction function for this target's
  /// TargetMachine, if registered.
  TargetMachineCtorTy TargetMachineCtorFn;

  /// MCAsmBackendCtorFn - Construction function for this target's
  /// MCAsmBackend, if registered.
  MCAsmBackendCtorTy MCAsmBackendCtorFn;

  /// MCAsmParserCtorFn - Construction function for this target's
  /// MCTargetAsmParser, if registered.
  MCAsmParserCtorTy MCAsmParserCtorFn;

  /// AsmPrinterCtorFn - Construction function for this target's AsmPrinter,
  /// if registered.
  AsmPrinterCtorTy AsmPrinterCtorFn;

  /// MCDisassemblerCtorFn - Construction function for this target's
  /// MCDisassembler, if registered.
  MCDisassemblerCtorTy MCDisassemblerCtorFn;

  /// MCInstPrinterCtorFn - Construction function for this target's
  /// MCInstPrinter, if registered.
  MCInstPrinterCtorTy MCInstPrinterCtorFn;

  /// MCCodeEmitterCtorFn - Construction function for this target's
  /// CodeEmitter, if registered.
  MCCodeEmitterCtorTy MCCodeEmitterCtorFn;

  // Construction functions for the various object formats, if registered.
  COFFStreamerCtorTy COFFStreamerCtorFn = nullptr;
  MachOStreamerCtorTy MachOStreamerCtorFn = nullptr;
  ELFStreamerCtorTy ELFStreamerCtorFn = nullptr;
  WasmStreamerCtorTy WasmStreamerCtorFn = nullptr;
  XCOFFStreamerCtorTy XCOFFStreamerCtorFn = nullptr;
  SPIRVStreamerCtorTy SPIRVStreamerCtorFn = nullptr;
  DXContainerStreamerCtorTy DXContainerStreamerCtorFn = nullptr;

  /// Construction function for this target's null TargetStreamer, if
  /// registered (default = nullptr).
  NullTargetStreamerCtorTy NullTargetStreamerCtorFn = nullptr;

  /// Construction function for this target's asm TargetStreamer, if
  /// registered (default = nullptr).
  AsmTargetStreamerCtorTy AsmTargetStreamerCtorFn = nullptr;

  /// Construction function for this target's obj TargetStreamer, if
  /// registered (default = nullptr).
  ObjectTargetStreamerCtorTy ObjectTargetStreamerCtorFn = nullptr;

  /// MCRelocationInfoCtorFn - Construction function for this target's
  /// MCRelocationInfo, if registered (default = llvm::createMCRelocationInfo)
  MCRelocationInfoCtorTy MCRelocationInfoCtorFn = nullptr;

  /// MCSymbolizerCtorFn - Construction function for this target's
  /// MCSymbolizer, if registered (default = llvm::createMCSymbolizer)
  MCSymbolizerCtorTy MCSymbolizerCtorFn = nullptr;

  /// CustomBehaviourCtorFn - Construction function for this target's
  /// CustomBehaviour, if registered (default = nullptr).
  CustomBehaviourCtorTy CustomBehaviourCtorFn = nullptr;

  /// InstrPostProcessCtorFn - Construction function for this target's
  /// InstrPostProcess, if registered (default = nullptr).
  InstrPostProcessCtorTy InstrPostProcessCtorFn = nullptr;

  /// InstrumentManagerCtorFn - Construction function for this target's
  /// InstrumentManager, if registered (default = nullptr).
  InstrumentManagerCtorTy InstrumentManagerCtorFn = nullptr;

public:
  Target() = default;

  /// @name Target Information
  /// @{

  // getNext - Return the next registered target.
  const Target *getNext() const { return Next; }

  /// getName - Get the target name.
  const char *getName() const { return Name; }

  /// getShortDescription - Get a short description of the target.
  const char *getShortDescription() const { return ShortDesc; }

  /// getBackendName - Get the backend name.
  const char *getBackendName() const { return BackendName; }

  /// @}
  /// @name Feature Predicates
  /// @{

  /// hasJIT - Check if this targets supports the just-in-time compilation.
  bool hasJIT() const { return HasJIT; }

  /// hasTargetMachine - Check if this target supports code generation.
  bool hasTargetMachine() const { return TargetMachineCtorFn != nullptr; }

  /// hasMCAsmBackend - Check if this target supports .o generation.
  bool hasMCAsmBackend() const { return MCAsmBackendCtorFn != nullptr; }

  /// hasMCAsmParser - Check if this target supports assembly parsing.
  bool hasMCAsmParser() const { return MCAsmParserCtorFn != nullptr; }

  /// @}
  /// @name Feature Constructors
  /// @{

  /// createMCAsmInfo - Create a MCAsmInfo implementation for the specified
  /// target triple.
  ///
  /// \param TheTriple This argument is used to determine the target machine
  /// feature set; it should always be provided. Generally this should be
  /// either the target triple from the module, or the target triple of the
  /// host if that does not exist.
  MCAsmInfo *createMCAsmInfo(const MCRegisterInfo &MRI, StringRef TheTriple,
                             const MCTargetOptions &Options) const {
    if (!MCAsmInfoCtorFn)
      return nullptr;
    return MCAsmInfoCtorFn(MRI, Triple(TheTriple), Options);
  }

  /// Create a MCObjectFileInfo implementation for the specified target
  /// triple.
  ///
  MCObjectFileInfo *createMCObjectFileInfo(MCContext &Ctx, bool PIC,
                                           bool LargeCodeModel = false) const {
    if (!MCObjectFileInfoCtorFn) {
      MCObjectFileInfo *MOFI = new MCObjectFileInfo();
      MOFI->initMCObjectFileInfo(Ctx, PIC, LargeCodeModel);
      return MOFI;
    }
    return MCObjectFileInfoCtorFn(Ctx, PIC, LargeCodeModel);
  }

  /// createMCInstrInfo - Create a MCInstrInfo implementation.
  ///
  MCInstrInfo *createMCInstrInfo() const {
    if (!MCInstrInfoCtorFn)
      return nullptr;
    return MCInstrInfoCtorFn();
  }

  /// createMCInstrAnalysis - Create a MCInstrAnalysis implementation.
  ///
  MCInstrAnalysis *createMCInstrAnalysis(const MCInstrInfo *Info) const {
    if (!MCInstrAnalysisCtorFn)
      return nullptr;
    return MCInstrAnalysisCtorFn(Info);
  }

  /// createMCRegInfo - Create a MCRegisterInfo implementation.
  ///
  MCRegisterInfo *createMCRegInfo(StringRef TT) const {
    if (!MCRegInfoCtorFn)
      return nullptr;
    return MCRegInfoCtorFn(Triple(TT));
  }

  /// createMCSubtargetInfo - Create a MCSubtargetInfo implementation.
  ///
  /// \param TheTriple This argument is used to determine the target machine
  /// feature set; it should always be provided. Generally this should be
  /// either the target triple from the module, or the target triple of the
  /// host if that does not exist.
  /// \param CPU This specifies the name of the target CPU.
  /// \param Features This specifies the string representation of the
  /// additional target features.
  MCSubtargetInfo *createMCSubtargetInfo(StringRef TheTriple, StringRef CPU,
                                         StringRef Features) const {
    if (!MCSubtargetInfoCtorFn)
      return nullptr;
    return MCSubtargetInfoCtorFn(Triple(TheTriple), CPU, Features);
  }

  /// createTargetMachine - Create a target specific machine implementation
  /// for the specified \p Triple.
  ///
  /// \param TT This argument is used to determine the target machine
  /// feature set; it should always be provided. Generally this should be
  /// either the target triple from the module, or the target triple of the
  /// host if that does not exist.
  TargetMachine *createTargetMachine(
      StringRef TT, StringRef CPU, StringRef Features,
      const TargetOptions &Options, std::optional<Reloc::Model> RM,
      std::optional<CodeModel::Model> CM = std::nullopt,
      CodeGenOpt::Level OL = CodeGenOpt::Default, bool JIT = false) const {
    if (!TargetMachineCtorFn)
      return nullptr;
    return TargetMachineCtorFn(*this, Triple(TT), CPU, Features, Options, RM,
                               CM, OL, JIT);
  }

  /// createMCAsmBackend - Create a target specific assembly parser.
  MCAsmBackend *createMCAsmBackend(const MCSubtargetInfo &STI,
                                   const MCRegisterInfo &MRI,
                                   const MCTargetOptions &Options) const {
    if (!MCAsmBackendCtorFn)
      return nullptr;
    return MCAsmBackendCtorFn(*this, STI, MRI, Options);
  }

  /// createMCAsmParser - Create a target specific assembly parser.
  ///
  /// \param Parser The target independent parser implementation to use for
  /// parsing and lexing.
  MCTargetAsmParser *createMCAsmParser(const MCSubtargetInfo &STI,
                                       MCAsmParser &Parser,
                                       const MCInstrInfo &MII,
                                       const MCTargetOptions &Options) const {
    if (!MCAsmParserCtorFn)
      return nullptr;
    return MCAsmParserCtorFn(STI, Parser, MII, Options);
  }

  /// createAsmPrinter - Create a target specific assembly printer pass.  This
  /// takes ownership of the MCStreamer object.
  AsmPrinter *createAsmPrinter(TargetMachine &TM,
                               std::unique_ptr<MCStreamer> &&Streamer) const {
    if (!AsmPrinterCtorFn)
      return nullptr;
    return AsmPrinterCtorFn(TM, std::move(Streamer));
  }

  MCDisassembler *createMCDisassembler(const MCSubtargetInfo &STI,
                                       MCContext &Ctx) const {
    if (!MCDisassemblerCtorFn)
      return nullptr;
    return MCDisassemblerCtorFn(*this, STI, Ctx);
  }

  MCInstPrinter *createMCInstPrinter(const Triple &T, unsigned SyntaxVariant,
                                     const MCAsmInfo &MAI,
                                     const MCInstrInfo &MII,
                                     const MCRegisterInfo &MRI) const {
    if (!MCInstPrinterCtorFn)
      return nullptr;
    return MCInstPrinterCtorFn(T, SyntaxVariant, MAI, MII, MRI);
  }

  /// createMCCodeEmitter - Create a target specific code emitter.
  MCCodeEmitter *createMCCodeEmitter(const MCInstrInfo &II,
                                     MCContext &Ctx) const {
    if (!MCCodeEmitterCtorFn)
      return nullptr;
    return MCCodeEmitterCtorFn(II, Ctx);
  }

  /// Create a target specific MCStreamer.
  ///
  /// \param T The target triple.
  /// \param Ctx The target context.
  /// \param TAB The target assembler backend object. Takes ownership.
  /// \param OW The stream object.
  /// \param Emitter The target independent assembler object.Takes ownership.
  /// \param RelaxAll Relax all fixups?
  MCStreamer *createMCObjectStreamer(const Triple &T, MCContext &Ctx,
                                     std::unique_ptr<MCAsmBackend> &&TAB,
                                     std::unique_ptr<MCObjectWriter> &&OW,
                                     std::unique_ptr<MCCodeEmitter> &&Emitter,
                                     const MCSubtargetInfo &STI, bool RelaxAll,
                                     bool IncrementalLinkerCompatible,
                                     bool DWARFMustBeAtTheEnd) const {
    MCStreamer *S = nullptr;
    switch (T.getObjectFormat()) {
    case Triple::UnknownObjectFormat:
      llvm_unreachable("Unknown object format");
    case Triple::COFF:
      assert((T.isOSWindows() || T.isUEFI()) &&
             "only Windows and UEFI COFF are supported");
      S = COFFStreamerCtorFn(Ctx, std::move(TAB), std::move(OW),
                             std::move(Emitter), RelaxAll,
                             IncrementalLinkerCompatible);
      break;
    case Triple::MachO:
      if (MachOStreamerCtorFn)
        S = MachOStreamerCtorFn(Ctx, std::move(TAB), std::move(OW),
                                std::move(Emitter), RelaxAll,
                                DWARFMustBeAtTheEnd);
      else
        S = createMachOStreamer(Ctx, std::move(TAB), std::move(OW),
                                std::move(Emitter), RelaxAll,
                                DWARFMustBeAtTheEnd);
      break;
    case Triple::ELF:
      if (ELFStreamerCtorFn)
        S = ELFStreamerCtorFn(T, Ctx, std::move(TAB), std::move(OW),
                              std::move(Emitter), RelaxAll);
      else
        S = createELFStreamer(Ctx, std::move(TAB), std::move(OW),
                              std::move(Emitter), RelaxAll);
      break;
    case Triple::Wasm:
      if (WasmStreamerCtorFn)
        S = WasmStreamerCtorFn(T, Ctx, std::move(TAB), std::move(OW),
                               std::move(Emitter), RelaxAll);
      else
        S = createWasmStreamer(Ctx, std::move(TAB), std::move(OW),
                               std::move(Emitter), RelaxAll);
      break;
    case Triple::GOFF:
      report_fatal_error("GOFF MCObjectStreamer not implemented yet");
    case Triple::XCOFF:
      if (XCOFFStreamerCtorFn)
        S = XCOFFStreamerCtorFn(T, Ctx, std::move(TAB), std::move(OW),
                                std::move(Emitter), RelaxAll);
      else
        S = createXCOFFStreamer(Ctx, std::move(TAB), std::move(OW),
                                std::move(Emitter), RelaxAll);
      break;
    case Triple::SPIRV:
      if (SPIRVStreamerCtorFn)
        S = SPIRVStreamerCtorFn(T, Ctx, std::move(TAB), std::move(OW),
                                std::move(Emitter), RelaxAll);
      else
        S = createSPIRVStreamer(Ctx, std::move(TAB), std::move(OW),
                                std::move(Emitter), RelaxAll);
      break;
    case Triple::DXContainer:
      if (DXContainerStreamerCtorFn)
        S = DXContainerStreamerCtorFn(T, Ctx, std::move(TAB), std::move(OW),
                              std::move(Emitter), RelaxAll);
      else
        S = createDXContainerStreamer(Ctx, std::move(TAB), std::move(OW),
                                      std::move(Emitter), RelaxAll);
      break;
    }
    if (ObjectTargetStreamerCtorFn)
      ObjectTargetStreamerCtorFn(*S, STI);
    return S;
  }

  MCStreamer *createAsmStreamer(MCContext &Ctx,
                                std::unique_ptr<formatted_raw_ostream> OS,
                                bool IsVerboseAsm, bool UseDwarfDirectory,
                                MCInstPrinter *InstPrint,
                                std::unique_ptr<MCCodeEmitter> &&CE,
                                std::unique_ptr<MCAsmBackend> &&TAB,
                                bool ShowInst) const {
    formatted_raw_ostream &OSRef = *OS;
    MCStreamer *S = llvm::createAsmStreamer(
        Ctx, std::move(OS), IsVerboseAsm, UseDwarfDirectory, InstPrint,
        std::move(CE), std::move(TAB), ShowInst);
    createAsmTargetStreamer(*S, OSRef, InstPrint, IsVerboseAsm);
    return S;
  }

  MCTargetStreamer *createAsmTargetStreamer(MCStreamer &S,
                                            formatted_raw_ostream &OS,
                                            MCInstPrinter *InstPrint,
                                            bool IsVerboseAsm) const {
    if (AsmTargetStreamerCtorFn)
      return AsmTargetStreamerCtorFn(S, OS, InstPrint, IsVerboseAsm);
    return nullptr;
  }

  MCStreamer *createNullStreamer(MCContext &Ctx) const {
    MCStreamer *S = llvm::createNullStreamer(Ctx);
    createNullTargetStreamer(*S);
    return S;
  }

  MCTargetStreamer *createNullTargetStreamer(MCStreamer &S) const {
    if (NullTargetStreamerCtorFn)
      return NullTargetStreamerCtorFn(S);
    return nullptr;
  }

  /// createMCRelocationInfo - Create a target specific MCRelocationInfo.
  ///
  /// \param TT The target triple.
  /// \param Ctx The target context.
  MCRelocationInfo *createMCRelocationInfo(StringRef TT, MCContext &Ctx) const {
    MCRelocationInfoCtorTy Fn = MCRelocationInfoCtorFn
                                    ? MCRelocationInfoCtorFn
                                    : llvm::createMCRelocationInfo;
    return Fn(Triple(TT), Ctx);
  }

  /// createMCSymbolizer - Create a target specific MCSymbolizer.
  ///
  /// \param TT The target triple.
  /// \param GetOpInfo The function to get the symbolic information for
  /// operands.
  /// \param SymbolLookUp The function to lookup a symbol name.
  /// \param DisInfo The pointer to the block of symbolic information for above
  /// call
  /// back.
  /// \param Ctx The target context.
  /// \param RelInfo The relocation information for this target. Takes
  /// ownership.
  MCSymbolizer *
  createMCSymbolizer(StringRef TT, LLVMOpInfoCallback GetOpInfo,
                     LLVMSymbolLookupCallback SymbolLookUp, void *DisInfo,
                     MCContext *Ctx,
                     std::unique_ptr<MCRelocationInfo> &&RelInfo) const {
    MCSymbolizerCtorTy Fn =
        MCSymbolizerCtorFn ? MCSymbolizerCtorFn : llvm::createMCSymbolizer;
    return Fn(Triple(TT), GetOpInfo, SymbolLookUp, DisInfo, Ctx,
              std::move(RelInfo));
  }

  /// createCustomBehaviour - Create a target specific CustomBehaviour.
  /// This class is used by llvm-mca and requires backend functionality.
  mca::CustomBehaviour *createCustomBehaviour(const MCSubtargetInfo &STI,
                                              const mca::SourceMgr &SrcMgr,
                                              const MCInstrInfo &MCII) const {
    if (CustomBehaviourCtorFn)
      return CustomBehaviourCtorFn(STI, SrcMgr, MCII);
    return nullptr;
  }

  /// createInstrPostProcess - Create a target specific InstrPostProcess.
  /// This class is used by llvm-mca and requires backend functionality.
  mca::InstrPostProcess *createInstrPostProcess(const MCSubtargetInfo &STI,
                                                const MCInstrInfo &MCII) const {
    if (InstrPostProcessCtorFn)
      return InstrPostProcessCtorFn(STI, MCII);
    return nullptr;
  }

  /// createInstrumentManager - Create a target specific
  /// InstrumentManager. This class is used by llvm-mca and requires
  /// backend functionality.
  mca::InstrumentManager *
  createInstrumentManager(const MCSubtargetInfo &STI,
                          const MCInstrInfo &MCII) const {
    if (InstrumentManagerCtorFn)
      return InstrumentManagerCtorFn(STI, MCII);
    return nullptr;
  }

  /// @}
};

/// TargetRegistry - Generic interface to target specific features.
struct TargetRegistry {
  // FIXME: Make this a namespace, probably just move all the Register*
  // functions into Target (currently they all just set members on the Target
  // anyway, and Target friends this class so those functions can...
  // function).
  TargetRegistry() = delete;

  class iterator {
    friend struct TargetRegistry;

    const Target *Current = nullptr;

    explicit iterator(Target *T) : Current(T) {}

  public:
    using iterator_category = std::forward_iterator_tag;
    using value_type = Target;
    using difference_type = std::ptrdiff_t;
    using pointer = value_type *;
    using reference = value_type &;

    iterator() = default;

    bool operator==(const iterator &x) const { return Current == x.Current; }
    bool operator!=(const iterator &x) const { return !operator==(x); }

    // Iterator traversal: forward iteration only
    iterator &operator++() { // Preincrement
      assert(Current && "Cannot increment end iterator!");
      Current = Current->getNext();
      return *this;
    }
    iterator operator++(int) { // Postincrement
      iterator tmp = *this;
      ++*this;
      return tmp;
    }

    const Target &operator*() const {
      assert(Current && "Cannot dereference end iterator!");
      return *Current;
    }

    const Target *operator->() const { return &operator*(); }
  };

  /// printRegisteredTargetsForVersion - Print the registered targets
  /// appropriately for inclusion in a tool's version output.
  static void printRegisteredTargetsForVersion(raw_ostream &OS);

  /// @name Registry Access
  /// @{

  static iterator_range<iterator> targets();

  /// lookupTarget - Lookup a target based on a target triple.
  ///
  /// \param Triple - The triple to use for finding a target.
  /// \param Error - On failure, an error string describing why no target was
  /// found.
  static const Target *lookupTarget(StringRef Triple, std::string &Error);

  /// lookupTarget - Lookup a target based on an architecture name
  /// and a target triple.  If the architecture name is non-empty,
  /// then the lookup is done by architecture.  Otherwise, the target
  /// triple is used.
  ///
  /// \param ArchName - The architecture to use for finding a target.
  /// \param TheTriple - The triple to use for finding a target.  The
  /// triple is updated with canonical architecture name if a lookup
  /// by architecture is done.
  /// \param Error - On failure, an error string describing why no target was
  /// found.
  static const Target *lookupTarget(StringRef ArchName, Triple &TheTriple,
                                    std::string &Error);

  /// @}
  /// @name Target Registration
  /// @{

  /// RegisterTarget - Register the given target. Attempts to register a
  /// target which has already been registered will be ignored.
  ///
  /// Clients are responsible for ensuring that registration doesn't occur
  /// while another thread is attempting to access the registry. Typically
  /// this is done by initializing all targets at program startup.
  ///
  /// @param T - The target being registered.
  /// @param Name - The target name. This should be a static string.
  /// @param ShortDesc - A short target description. This should be a static
  /// string.
  /// @param BackendName - The name of the backend. This should be a static
  /// string that is the same for all targets that share a backend
  /// implementation and must match the name used in the 'def X : Target ...' in
  /// TableGen.
  /// @param ArchMatchFn - The arch match checking function for this target.
  /// @param HasJIT - Whether the target supports JIT code
  /// generation.
  static void RegisterTarget(Target &T, const char *Name, const char *ShortDesc,
                             const char *BackendName,
                             Target::ArchMatchFnTy ArchMatchFn,
                             bool HasJIT = false);

  /// RegisterMCAsmInfo - Register a MCAsmInfo implementation for the
  /// given target.
  ///
  /// Clients are responsible for ensuring that registration doesn't occur
  /// while another thread is attempting to access the registry. Typically
  /// this is done by initializing all targets at program startup.
  ///
  /// @param T - The target being registered.
  /// @param Fn - A function to construct a MCAsmInfo for the target.
  static void RegisterMCAsmInfo(Target &T, Target::MCAsmInfoCtorFnTy Fn) {
    T.MCAsmInfoCtorFn = Fn;
  }

  /// Register a MCObjectFileInfo implementation for the given target.
  ///
  /// Clients are responsible for ensuring that registration doesn't occur
  /// while another thread is attempting to access the registry. Typically
  /// this is done by initializing all targets at program startup.
  ///
  /// @param T - The target being registered.
  /// @param Fn - A function to construct a MCObjectFileInfo for the target.
  static void RegisterMCObjectFileInfo(Target &T,
                                       Target::MCObjectFileInfoCtorFnTy Fn) {
    T.MCObjectFileInfoCtorFn = Fn;
  }

  /// RegisterMCInstrInfo - Register a MCInstrInfo implementation for the
  /// given target.
  ///
  /// Clients are responsible for ensuring that registration doesn't occur
  /// while another thread is attempting to access the registry. Typically
  /// this is done by initializing all targets at program startup.
  ///
  /// @param T - The target being registered.
  /// @param Fn - A function to construct a MCInstrInfo for the target.
  static void RegisterMCInstrInfo(Target &T, Target::MCInstrInfoCtorFnTy Fn) {
    T.MCInstrInfoCtorFn = Fn;
  }

  /// RegisterMCInstrAnalysis - Register a MCInstrAnalysis implementation for
  /// the given target.
  static void RegisterMCInstrAnalysis(Target &T,
                                      Target::MCInstrAnalysisCtorFnTy Fn) {
    T.MCInstrAnalysisCtorFn = Fn;
  }

  /// RegisterMCRegInfo - Register a MCRegisterInfo implementation for the
  /// given target.
  ///
  /// Clients are responsible for ensuring that registration doesn't occur
  /// while another thread is attempting to access the registry. Typically
  /// this is done by initializing all targets at program startup.
  ///
  /// @param T - The target being registered.
  /// @param Fn - A function to construct a MCRegisterInfo for the target.
  static void RegisterMCRegInfo(Target &T, Target::MCRegInfoCtorFnTy Fn) {
    T.MCRegInfoCtorFn = Fn;
  }

  /// RegisterMCSubtargetInfo - Register a MCSubtargetInfo implementation for
  /// the given target.
  ///
  /// Clients are responsible for ensuring that registration doesn't occur
  /// while another thread is attempting to access the registry. Typically
  /// this is done by initializing all targets at program startup.
  ///
  /// @param T - The target being registered.
  /// @param Fn - A function to construct a MCSubtargetInfo for the target.
  static void RegisterMCSubtargetInfo(Target &T,
                                      Target::MCSubtargetInfoCtorFnTy Fn) {
    T.MCSubtargetInfoCtorFn = Fn;
  }

  /// RegisterTargetMachine - Register a TargetMachine implementation for the
  /// given target.
  ///
  /// Clients are responsible for ensuring that registration doesn't occur
  /// while another thread is attempting to access the registry. Typically
  /// this is done by initializing all targets at program startup.
  ///
  /// @param T - The target being registered.
  /// @param Fn - A function to construct a TargetMachine for the target.
  static void RegisterTargetMachine(Target &T, Target::TargetMachineCtorTy Fn) {
    T.TargetMachineCtorFn = Fn;
  }

  /// RegisterMCAsmBackend - Register a MCAsmBackend implementation for the
  /// given target.
  ///
  /// Clients are responsible for ensuring that registration doesn't occur
  /// while another thread is attempting to access the registry. Typically
  /// this is done by initializing all targets at program startup.
  ///
  /// @param T - The target being registered.
  /// @param Fn - A function to construct an AsmBackend for the target.
  static void RegisterMCAsmBackend(Target &T, Target::MCAsmBackendCtorTy Fn) {
    T.MCAsmBackendCtorFn = Fn;
  }

  /// RegisterMCAsmParser - Register a MCTargetAsmParser implementation for
  /// the given target.
  ///
  /// Clients are responsible for ensuring that registration doesn't occur
  /// while another thread is attempting to access the registry. Typically
  /// this is done by initializing all targets at program startup.
  ///
  /// @param T - The target being registered.
  /// @param Fn - A function to construct an MCTargetAsmParser for the target.
  static void RegisterMCAsmParser(Target &T, Target::MCAsmParserCtorTy Fn) {
    T.MCAsmParserCtorFn = Fn;
  }

  /// RegisterAsmPrinter - Register an AsmPrinter implementation for the given
  /// target.
  ///
  /// Clients are responsible for ensuring that registration doesn't occur
  /// while another thread is attempting to access the registry. Typically
  /// this is done by initializing all targets at program startup.
  ///
  /// @param T - The target being registered.
  /// @param Fn - A function to construct an AsmPrinter for the target.
  static void RegisterAsmPrinter(Target &T, Target::AsmPrinterCtorTy Fn) {
    T.AsmPrinterCtorFn = Fn;
  }

  /// RegisterMCDisassembler - Register a MCDisassembler implementation for
  /// the given target.
  ///
  /// Clients are responsible for ensuring that registration doesn't occur
  /// while another thread is attempting to access the registry. Typically
  /// this is done by initializing all targets at program startup.
  ///
  /// @param T - The target being registered.
  /// @param Fn - A function to construct an MCDisassembler for the target.
  static void RegisterMCDisassembler(Target &T,
                                     Target::MCDisassemblerCtorTy Fn) {
    T.MCDisassemblerCtorFn = Fn;
  }

  /// RegisterMCInstPrinter - Register a MCInstPrinter implementation for the
  /// given target.
  ///
  /// Clients are responsible for ensuring that registration doesn't occur
  /// while another thread is attempting to access the registry. Typically
  /// this is done by initializing all targets at program startup.
  ///
  /// @param T - The target being registered.
  /// @param Fn - A function to construct an MCInstPrinter for the target.
  static void RegisterMCInstPrinter(Target &T, Target::MCInstPrinterCtorTy Fn) {
    T.MCInstPrinterCtorFn = Fn;
  }

  /// RegisterMCCodeEmitter - Register a MCCodeEmitter implementation for the
  /// given target.
  ///
  /// Clients are responsible for ensuring that registration doesn't occur
  /// while another thread is attempting to access the registry. Typically
  /// this is done by initializing all targets at program startup.
  ///
  /// @param T - The target being registered.
  /// @param Fn - A function to construct an MCCodeEmitter for the target.
  static void RegisterMCCodeEmitter(Target &T, Target::MCCodeEmitterCtorTy Fn) {
    T.MCCodeEmitterCtorFn = Fn;
  }

  static void RegisterCOFFStreamer(Target &T, Target::COFFStreamerCtorTy Fn) {
    T.COFFStreamerCtorFn = Fn;
  }

  static void RegisterMachOStreamer(Target &T, Target::MachOStreamerCtorTy Fn) {
    T.MachOStreamerCtorFn = Fn;
  }

  static void RegisterELFStreamer(Target &T, Target::ELFStreamerCtorTy Fn) {
    T.ELFStreamerCtorFn = Fn;
  }

  static void RegisterSPIRVStreamer(Target &T, Target::SPIRVStreamerCtorTy Fn) {
    T.SPIRVStreamerCtorFn = Fn;
  }

  static void RegisterDXContainerStreamer(Target &T, Target::DXContainerStreamerCtorTy Fn) {
    T.DXContainerStreamerCtorFn = Fn;
  }

  static void RegisterWasmStreamer(Target &T, Target::WasmStreamerCtorTy Fn) {
    T.WasmStreamerCtorFn = Fn;
  }

  static void RegisterXCOFFStreamer(Target &T, Target::XCOFFStreamerCtorTy Fn) {
    T.XCOFFStreamerCtorFn = Fn;
  }

  static void RegisterNullTargetStreamer(Target &T,
                                         Target::NullTargetStreamerCtorTy Fn) {
    T.NullTargetStreamerCtorFn = Fn;
  }

  static void RegisterAsmTargetStreamer(Target &T,
                                        Target::AsmTargetStreamerCtorTy Fn) {
    T.AsmTargetStreamerCtorFn = Fn;
  }

  static void
  RegisterObjectTargetStreamer(Target &T,
                               Target::ObjectTargetStreamerCtorTy Fn) {
    T.ObjectTargetStreamerCtorFn = Fn;
  }

  /// RegisterMCRelocationInfo - Register an MCRelocationInfo
  /// implementation for the given target.
  ///
  /// Clients are responsible for ensuring that registration doesn't occur
  /// while another thread is attempting to access the registry. Typically
  /// this is done by initializing all targets at program startup.
  ///
  /// @param T - The target being registered.
  /// @param Fn - A function to construct an MCRelocationInfo for the target.
  static void RegisterMCRelocationInfo(Target &T,
                                       Target::MCRelocationInfoCtorTy Fn) {
    T.MCRelocationInfoCtorFn = Fn;
  }

  /// RegisterMCSymbolizer - Register an MCSymbolizer
  /// implementation for the given target.
  ///
  /// Clients are responsible for ensuring that registration doesn't occur
  /// while another thread is attempting to access the registry. Typically
  /// this is done by initializing all targets at program startup.
  ///
  /// @param T - The target being registered.
  /// @param Fn - A function to construct an MCSymbolizer for the target.
  static void RegisterMCSymbolizer(Target &T, Target::MCSymbolizerCtorTy Fn) {
    T.MCSymbolizerCtorFn = Fn;
  }

  /// RegisterCustomBehaviour - Register a CustomBehaviour
  /// implementation for the given target.
  ///
  /// Clients are responsible for ensuring that registration doesn't occur
  /// while another thread is attempting to access the registry. Typically
  /// this is done by initializing all targets at program startup.
  ///
  /// @param T - The target being registered.
  /// @param Fn - A function to construct a CustomBehaviour for the target.
  static void RegisterCustomBehaviour(Target &T,
                                      Target::CustomBehaviourCtorTy Fn) {
    T.CustomBehaviourCtorFn = Fn;
  }

  /// RegisterInstrPostProcess - Register an InstrPostProcess
  /// implementation for the given target.
  ///
  /// Clients are responsible for ensuring that registration doesn't occur
  /// while another thread is attempting to access the registry. Typically
  /// this is done by initializing all targets at program startup.
  ///
  /// @param T - The target being registered.
  /// @param Fn - A function to construct an InstrPostProcess for the target.
  static void RegisterInstrPostProcess(Target &T,
                                       Target::InstrPostProcessCtorTy Fn) {
    T.InstrPostProcessCtorFn = Fn;
  }

  /// RegisterInstrumentManager - Register an InstrumentManager
  /// implementation for the given target.
  ///
  /// Clients are responsible for ensuring that registration doesn't occur
  /// while another thread is attempting to access the registry. Typically
  /// this is done by initializing all targets at program startup.
  ///
  /// @param T - The target being registered.
  /// @param Fn - A function to construct an InstrumentManager for the
  /// target.
  static void RegisterInstrumentManager(Target &T,
                                        Target::InstrumentManagerCtorTy Fn) {
    T.InstrumentManagerCtorFn = Fn;
  }

  /// @}
};

//===--------------------------------------------------------------------===//

/// RegisterTarget - Helper template for registering a target, for use in the
/// target's initialization function. Usage:
///
///
/// Target &getTheFooTarget() { // The global target instance.
///   static Target TheFooTarget;
///   return TheFooTarget;
/// }
/// extern "C" void LLVMInitializeFooTargetInfo() {
///   RegisterTarget<Triple::foo> X(getTheFooTarget(), "foo", "Foo
///   description", "Foo" /* Backend Name */);
/// }
template <Triple::ArchType TargetArchType = Triple::UnknownArch,
          bool HasJIT = false>
struct RegisterTarget {
  RegisterTarget(Target &T, const char *Name, const char *Desc,
                 const char *BackendName) {
    TargetRegistry::RegisterTarget(T, Name, Desc, BackendName, &getArchMatch,
                                   HasJIT);
  }

  static bool getArchMatch(Triple::ArchType Arch) {
    return Arch == TargetArchType;
  }
};

/// RegisterMCAsmInfo - Helper template for registering a target assembly info
/// implementation.  This invokes the static "Create" method on the class to
/// actually do the construction.  Usage:
///
/// extern "C" void LLVMInitializeFooTarget() {
///   extern Target TheFooTarget;
///   RegisterMCAsmInfo<FooMCAsmInfo> X(TheFooTarget);
/// }
template <class MCAsmInfoImpl> struct RegisterMCAsmInfo {
  RegisterMCAsmInfo(Target &T) {
    TargetRegistry::RegisterMCAsmInfo(T, &Allocator);
  }

private:
  static MCAsmInfo *Allocator(const MCRegisterInfo & /*MRI*/, const Triple &TT,
                              const MCTargetOptions &Options) {
    return new MCAsmInfoImpl(TT, Options);
  }
};

/// RegisterMCAsmInfoFn - Helper template for registering a target assembly info
/// implementation.  This invokes the specified function to do the
/// construction.  Usage:
///
/// extern "C" void LLVMInitializeFooTarget() {
///   extern Target TheFooTarget;
///   RegisterMCAsmInfoFn X(TheFooTarget, TheFunction);
/// }
struct RegisterMCAsmInfoFn {
  RegisterMCAsmInfoFn(Target &T, Target::MCAsmInfoCtorFnTy Fn) {
    TargetRegistry::RegisterMCAsmInfo(T, Fn);
  }
};

/// Helper template for registering a target object file info implementation.
/// This invokes the static "Create" method on the class to actually do the
/// construction.  Usage:
///
/// extern "C" void LLVMInitializeFooTarget() {
///   extern Target TheFooTarget;
///   RegisterMCObjectFileInfo<FooMCObjectFileInfo> X(TheFooTarget);
/// }
template <class MCObjectFileInfoImpl> struct RegisterMCObjectFileInfo {
  RegisterMCObjectFileInfo(Target &T) {
    TargetRegistry::RegisterMCObjectFileInfo(T, &Allocator);
  }

private:
  static MCObjectFileInfo *Allocator(MCContext &Ctx, bool PIC,
                                     bool LargeCodeModel = false) {
    return new MCObjectFileInfoImpl(Ctx, PIC, LargeCodeModel);
  }
};

/// Helper template for registering a target object file info implementation.
/// This invokes the specified function to do the construction.  Usage:
///
/// extern "C" void LLVMInitializeFooTarget() {
///   extern Target TheFooTarget;
///   RegisterMCObjectFileInfoFn X(TheFooTarget, TheFunction);
/// }
struct RegisterMCObjectFileInfoFn {
  RegisterMCObjectFileInfoFn(Target &T, Target::MCObjectFileInfoCtorFnTy Fn) {
    TargetRegistry::RegisterMCObjectFileInfo(T, Fn);
  }
};

/// RegisterMCInstrInfo - Helper template for registering a target instruction
/// info implementation.  This invokes the static "Create" method on the class
/// to actually do the construction.  Usage:
///
/// extern "C" void LLVMInitializeFooTarget() {
///   extern Target TheFooTarget;
///   RegisterMCInstrInfo<FooMCInstrInfo> X(TheFooTarget);
/// }
template <class MCInstrInfoImpl> struct RegisterMCInstrInfo {
  RegisterMCInstrInfo(Target &T) {
    TargetRegistry::RegisterMCInstrInfo(T, &Allocator);
  }

private:
  static MCInstrInfo *Allocator() { return new MCInstrInfoImpl(); }
};

/// RegisterMCInstrInfoFn - Helper template for registering a target
/// instruction info implementation.  This invokes the specified function to
/// do the construction.  Usage:
///
/// extern "C" void LLVMInitializeFooTarget() {
///   extern Target TheFooTarget;
///   RegisterMCInstrInfoFn X(TheFooTarget, TheFunction);
/// }
struct RegisterMCInstrInfoFn {
  RegisterMCInstrInfoFn(Target &T, Target::MCInstrInfoCtorFnTy Fn) {
    TargetRegistry::RegisterMCInstrInfo(T, Fn);
  }
};

/// RegisterMCInstrAnalysis - Helper template for registering a target
/// instruction analyzer implementation.  This invokes the static "Create"
/// method on the class to actually do the construction.  Usage:
///
/// extern "C" void LLVMInitializeFooTarget() {
///   extern Target TheFooTarget;
///   RegisterMCInstrAnalysis<FooMCInstrAnalysis> X(TheFooTarget);
/// }
template <class MCInstrAnalysisImpl> struct RegisterMCInstrAnalysis {
  RegisterMCInstrAnalysis(Target &T) {
    TargetRegistry::RegisterMCInstrAnalysis(T, &Allocator);
  }

private:
  static MCInstrAnalysis *Allocator(const MCInstrInfo *Info) {
    return new MCInstrAnalysisImpl(Info);
  }
};

/// RegisterMCInstrAnalysisFn - Helper template for registering a target
/// instruction analyzer implementation.  This invokes the specified function
/// to do the construction.  Usage:
///
/// extern "C" void LLVMInitializeFooTarget() {
///   extern Target TheFooTarget;
///   RegisterMCInstrAnalysisFn X(TheFooTarget, TheFunction);
/// }
struct RegisterMCInstrAnalysisFn {
  RegisterMCInstrAnalysisFn(Target &T, Target::MCInstrAnalysisCtorFnTy Fn) {
    TargetRegistry::RegisterMCInstrAnalysis(T, Fn);
  }
};

/// RegisterMCRegInfo - Helper template for registering a target register info
/// implementation.  This invokes the static "Create" method on the class to
/// actually do the construction.  Usage:
///
/// extern "C" void LLVMInitializeFooTarget() {
///   extern Target TheFooTarget;
///   RegisterMCRegInfo<FooMCRegInfo> X(TheFooTarget);
/// }
template <class MCRegisterInfoImpl> struct RegisterMCRegInfo {
  RegisterMCRegInfo(Target &T) {
    TargetRegistry::RegisterMCRegInfo(T, &Allocator);
  }

private:
  static MCRegisterInfo *Allocator(const Triple & /*TT*/) {
    return new MCRegisterInfoImpl();
  }
};

/// RegisterMCRegInfoFn - Helper template for registering a target register
/// info implementation.  This invokes the specified function to do the
/// construction.  Usage:
///
/// extern "C" void LLVMInitializeFooTarget() {
///   extern Target TheFooTarget;
///   RegisterMCRegInfoFn X(TheFooTarget, TheFunction);
/// }
struct RegisterMCRegInfoFn {
  RegisterMCRegInfoFn(Target &T, Target::MCRegInfoCtorFnTy Fn) {
    TargetRegistry::RegisterMCRegInfo(T, Fn);
  }
};

/// RegisterMCSubtargetInfo - Helper template for registering a target
/// subtarget info implementation.  This invokes the static "Create" method
/// on the class to actually do the construction.  Usage:
///
/// extern "C" void LLVMInitializeFooTarget() {
///   extern Target TheFooTarget;
///   RegisterMCSubtargetInfo<FooMCSubtargetInfo> X(TheFooTarget);
/// }
template <class MCSubtargetInfoImpl> struct RegisterMCSubtargetInfo {
  RegisterMCSubtargetInfo(Target &T) {
    TargetRegistry::RegisterMCSubtargetInfo(T, &Allocator);
  }

private:
  static MCSubtargetInfo *Allocator(const Triple & /*TT*/, StringRef /*CPU*/,
                                    StringRef /*FS*/) {
    return new MCSubtargetInfoImpl();
  }
};

/// RegisterMCSubtargetInfoFn - Helper template for registering a target
/// subtarget info implementation.  This invokes the specified function to
/// do the construction.  Usage:
///
/// extern "C" void LLVMInitializeFooTarget() {
///   extern Target TheFooTarget;
///   RegisterMCSubtargetInfoFn X(TheFooTarget, TheFunction);
/// }
struct RegisterMCSubtargetInfoFn {
  RegisterMCSubtargetInfoFn(Target &T, Target::MCSubtargetInfoCtorFnTy Fn) {
    TargetRegistry::RegisterMCSubtargetInfo(T, Fn);
  }
};

/// RegisterTargetMachine - Helper template for registering a target machine
/// implementation, for use in the target machine initialization
/// function. Usage:
///
/// extern "C" void LLVMInitializeFooTarget() {
///   extern Target TheFooTarget;
///   RegisterTargetMachine<FooTargetMachine> X(TheFooTarget);
/// }
template <class TargetMachineImpl> struct RegisterTargetMachine {
  RegisterTargetMachine(Target &T) {
    TargetRegistry::RegisterTargetMachine(T, &Allocator);
  }

private:
  static TargetMachine *Allocator(const Target &T, const Triple &TT,
                                  StringRef CPU, StringRef FS,
                                  const TargetOptions &Options,
                                  std::optional<Reloc::Model> RM,
                                  std::optional<CodeModel::Model> CM,
                                  CodeGenOpt::Level OL, bool JIT) {
    return new TargetMachineImpl(T, TT, CPU, FS, Options, RM, CM, OL, JIT);
  }
};

/// RegisterMCAsmBackend - Helper template for registering a target specific
/// assembler backend. Usage:
///
/// extern "C" void LLVMInitializeFooMCAsmBackend() {
///   extern Target TheFooTarget;
///   RegisterMCAsmBackend<FooAsmLexer> X(TheFooTarget);
/// }
template <class MCAsmBackendImpl> struct RegisterMCAsmBackend {
  RegisterMCAsmBackend(Target &T) {
    TargetRegistry::RegisterMCAsmBackend(T, &Allocator);
  }

private:
  static MCAsmBackend *Allocator(const Target &T, const MCSubtargetInfo &STI,
                                 const MCRegisterInfo &MRI,
                                 const MCTargetOptions &Options) {
    return new MCAsmBackendImpl(T, STI, MRI);
  }
};

/// RegisterMCAsmParser - Helper template for registering a target specific
/// assembly parser, for use in the target machine initialization
/// function. Usage:
///
/// extern "C" void LLVMInitializeFooMCAsmParser() {
///   extern Target TheFooTarget;
///   RegisterMCAsmParser<FooAsmParser> X(TheFooTarget);
/// }
template <class MCAsmParserImpl> struct RegisterMCAsmParser {
  RegisterMCAsmParser(Target &T) {
    TargetRegistry::RegisterMCAsmParser(T, &Allocator);
  }

private:
  static MCTargetAsmParser *Allocator(const MCSubtargetInfo &STI,
                                      MCAsmParser &P, const MCInstrInfo &MII,
                                      const MCTargetOptions &Options) {
    return new MCAsmParserImpl(STI, P, MII, Options);
  }
};

/// RegisterAsmPrinter - Helper template for registering a target specific
/// assembly printer, for use in the target machine initialization
/// function. Usage:
///
/// extern "C" void LLVMInitializeFooAsmPrinter() {
///   extern Target TheFooTarget;
///   RegisterAsmPrinter<FooAsmPrinter> X(TheFooTarget);
/// }
template <class AsmPrinterImpl> struct RegisterAsmPrinter {
  RegisterAsmPrinter(Target &T) {
    TargetRegistry::RegisterAsmPrinter(T, &Allocator);
  }

private:
  static AsmPrinter *Allocator(TargetMachine &TM,
                               std::unique_ptr<MCStreamer> &&Streamer) {
    return new AsmPrinterImpl(TM, std::move(Streamer));
  }
};

/// RegisterMCCodeEmitter - Helper template for registering a target specific
/// machine code emitter, for use in the target initialization
/// function. Usage:
///
/// extern "C" void LLVMInitializeFooMCCodeEmitter() {
///   extern Target TheFooTarget;
///   RegisterMCCodeEmitter<FooCodeEmitter> X(TheFooTarget);
/// }
template <class MCCodeEmitterImpl> struct RegisterMCCodeEmitter {
  RegisterMCCodeEmitter(Target &T) {
    TargetRegistry::RegisterMCCodeEmitter(T, &Allocator);
  }

private:
  static MCCodeEmitter *Allocator(const MCInstrInfo & /*II*/,
                                  MCContext & /*Ctx*/) {
    return new MCCodeEmitterImpl();
  }
};

} // end namespace llvm

#endif // LLVM_MC_TARGETREGISTRY_H
PKjwFZGa�mmMC/MCSectionMachO.hnu�[���//===- MCSectionMachO.h - MachO Machine Code Sections -----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the MCSectionMachO class.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCSECTIONMACHO_H
#define LLVM_MC_MCSECTIONMACHO_H

#include "llvm/ADT/StringRef.h"
#include "llvm/BinaryFormat/MachO.h"
#include "llvm/MC/MCSection.h"

namespace llvm {

/// This represents a section on a Mach-O system (used by Mac OS X).  On a Mac
/// system, these are also described in /usr/include/mach-o/loader.h.
class MCSectionMachO final : public MCSection {
  char SegmentName[16];  // Not necessarily null terminated!

  /// This is the SECTION_TYPE and SECTION_ATTRIBUTES field of a section, drawn
  /// from the enums below.
  unsigned TypeAndAttributes;

  /// The 'reserved2' field of a section, used to represent the size of stubs,
  /// for example.
  unsigned Reserved2;

  MCSectionMachO(StringRef Segment, StringRef Section, unsigned TAA,
                 unsigned reserved2, SectionKind K, MCSymbol *Begin);
  friend class MCContext;
public:

  StringRef getSegmentName() const {
    // SegmentName is not necessarily null terminated!
    if (SegmentName[15])
      return StringRef(SegmentName, 16);
    return StringRef(SegmentName);
  }

  unsigned getTypeAndAttributes() const { return TypeAndAttributes; }
  unsigned getStubSize() const { return Reserved2; }

  MachO::SectionType getType() const {
    return static_cast<MachO::SectionType>(TypeAndAttributes &
                                           MachO::SECTION_TYPE);
  }
  bool hasAttribute(unsigned Value) const {
    return (TypeAndAttributes & Value) != 0;
  }

  /// Parse the section specifier indicated by "Spec". This is a string that can
  /// appear after a .section directive in a mach-o flavored .s file.  If
  /// successful, this fills in the specified Out parameters and returns an
  /// empty string.  When an invalid section specifier is present, this returns
  /// an Error indicating the problem. If no TAA was parsed, TAA is not altered,
  /// and TAAWasSet becomes false.
  static Error ParseSectionSpecifier(StringRef Spec,      // In.
                                     StringRef &Segment,  // Out.
                                     StringRef &Section,  // Out.
                                     unsigned &TAA,       // Out.
                                     bool &TAAParsed,     // Out.
                                     unsigned &StubSize); // Out.

  void printSwitchToSection(const MCAsmInfo &MAI, const Triple &T,
                            raw_ostream &OS,
                            const MCExpr *Subsection) const override;
  bool useCodeAlign() const override;
  bool isVirtualSection() const override;

  static bool classof(const MCSection *S) {
    return S->getVariant() == SV_MachO;
  }
};

} // end namespace llvm

#endif
PKjwFZF�/lJlJMC/MCObjectFileInfo.hnu�[���//===-- llvm/MC/MCObjectFileInfo.h - Object File Info -----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file describes common object file formats.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCOBJECTFILEINFO_H
#define LLVM_MC_MCOBJECTFILEINFO_H

#include "llvm/BinaryFormat/Swift.h"
#include "llvm/MC/MCSection.h"
#include "llvm/Support/VersionTuple.h"
#include "llvm/TargetParser/Triple.h"

#include <array>
#include <optional>

namespace llvm {
class MCContext;
class MCSection;

class MCObjectFileInfo {
protected:
  /// True if target object file supports a weak_definition of constant 0 for an
  /// omitted EH frame.
  bool SupportsWeakOmittedEHFrame = false;

  /// True if the target object file supports emitting a compact unwind section
  /// without an associated EH frame section.
  bool SupportsCompactUnwindWithoutEHFrame = false;

  /// OmitDwarfIfHaveCompactUnwind - True if the target object file
  /// supports having some functions with compact unwind and other with
  /// dwarf unwind.
  bool OmitDwarfIfHaveCompactUnwind = false;

  /// FDE CFI encoding. Controls the encoding of the begin label in the
  /// .eh_frame section. Unlike the LSDA encoding, personality encoding, and
  /// type encodings, this is something that the assembler just "knows" about
  /// its target
  unsigned FDECFIEncoding = 0;

  /// Compact unwind encoding indicating that we should emit only an EH frame.
  unsigned CompactUnwindDwarfEHFrameOnly = 0;

  /// Section directive for standard text.
  MCSection *TextSection = nullptr;

  /// Section directive for standard data.
  MCSection *DataSection = nullptr;

  /// Section that is default initialized to zero.
  MCSection *BSSSection = nullptr;

  /// Section that is readonly and can contain arbitrary initialized data.
  /// Targets are not required to have a readonly section. If they don't,
  /// various bits of code will fall back to using the data section for
  /// constants.
  MCSection *ReadOnlySection = nullptr;

  /// If exception handling is supported by the target, this is the section the
  /// Language Specific Data Area information is emitted to.
  MCSection *LSDASection = nullptr;

  /// If exception handling is supported by the target and the target can
  /// support a compact representation of the CIE and FDE, this is the section
  /// to emit them into.
  MCSection *CompactUnwindSection = nullptr;

  // Dwarf sections for debug info.  If a target supports debug info, these must
  // be set.
  MCSection *DwarfAbbrevSection = nullptr;
  MCSection *DwarfInfoSection = nullptr;
  MCSection *DwarfLineSection = nullptr;
  MCSection *DwarfLineStrSection = nullptr;
  MCSection *DwarfFrameSection = nullptr;
  MCSection *DwarfPubTypesSection = nullptr;
  const MCSection *DwarfDebugInlineSection = nullptr;
  MCSection *DwarfStrSection = nullptr;
  MCSection *DwarfLocSection = nullptr;
  MCSection *DwarfARangesSection = nullptr;
  MCSection *DwarfRangesSection = nullptr;
  MCSection *DwarfMacinfoSection = nullptr;
  MCSection *DwarfMacroSection = nullptr;
  // The pubnames section is no longer generated by default.  The generation
  // can be enabled by a compiler flag.
  MCSection *DwarfPubNamesSection = nullptr;

  /// Accelerator table sections. DwarfDebugNamesSection is the DWARF v5
  /// accelerator table, while DwarfAccelNamesSection, DwarfAccelObjCSection,
  /// DwarfAccelNamespaceSection, DwarfAccelTypesSection are pre-DWARF v5
  /// extensions.
  MCSection *DwarfDebugNamesSection = nullptr;
  MCSection *DwarfAccelNamesSection = nullptr;
  MCSection *DwarfAccelObjCSection = nullptr;
  MCSection *DwarfAccelNamespaceSection = nullptr;
  MCSection *DwarfAccelTypesSection = nullptr;

  // These are used for the Fission separate debug information files.
  MCSection *DwarfInfoDWOSection = nullptr;
  MCSection *DwarfTypesDWOSection = nullptr;
  MCSection *DwarfAbbrevDWOSection = nullptr;
  MCSection *DwarfStrDWOSection = nullptr;
  MCSection *DwarfLineDWOSection = nullptr;
  MCSection *DwarfLocDWOSection = nullptr;
  MCSection *DwarfStrOffDWOSection = nullptr;
  MCSection *DwarfMacinfoDWOSection = nullptr;
  MCSection *DwarfMacroDWOSection = nullptr;

  /// The DWARF v5 string offset and address table sections.
  MCSection *DwarfStrOffSection = nullptr;
  MCSection *DwarfAddrSection = nullptr;
  /// The DWARF v5 range list section.
  MCSection *DwarfRnglistsSection = nullptr;
  /// The DWARF v5 locations list section.
  MCSection *DwarfLoclistsSection = nullptr;

  /// The DWARF v5 range and location list sections for fission.
  MCSection *DwarfRnglistsDWOSection = nullptr;
  MCSection *DwarfLoclistsDWOSection = nullptr;

  // These are for Fission DWP files.
  MCSection *DwarfCUIndexSection = nullptr;
  MCSection *DwarfTUIndexSection = nullptr;

  /// Section for newer gnu pubnames.
  MCSection *DwarfGnuPubNamesSection = nullptr;
  /// Section for newer gnu pubtypes.
  MCSection *DwarfGnuPubTypesSection = nullptr;

  // Section for Swift AST
  MCSection *DwarfSwiftASTSection = nullptr;

  MCSection *COFFDebugSymbolsSection = nullptr;
  MCSection *COFFDebugTypesSection = nullptr;
  MCSection *COFFGlobalTypeHashesSection = nullptr;

  /// Extra TLS Variable Data section.
  ///
  /// If the target needs to put additional information for a TLS variable,
  /// it'll go here.
  MCSection *TLSExtraDataSection = nullptr;

  /// Section directive for Thread Local data. ELF, MachO, COFF, and Wasm.
  MCSection *TLSDataSection = nullptr; // Defaults to ".tdata".

  /// Section directive for Thread Local uninitialized data.
  ///
  /// Null if this target doesn't support a BSS section. ELF and MachO only.
  MCSection *TLSBSSSection = nullptr; // Defaults to ".tbss".

  /// StackMap section.
  MCSection *StackMapSection = nullptr;

  /// FaultMap section.
  MCSection *FaultMapSection = nullptr;

  /// Remarks section.
  MCSection *RemarksSection = nullptr;

  /// EH frame section.
  ///
  /// It is initialized on demand so it can be overwritten (with uniquing).
  MCSection *EHFrameSection = nullptr;

  /// Section containing metadata on function stack sizes.
  MCSection *StackSizesSection = nullptr;

  /// Section for pseudo probe information used by AutoFDO
  MCSection *PseudoProbeSection = nullptr;
  MCSection *PseudoProbeDescSection = nullptr;

  // Section for metadata of llvm statistics.
  MCSection *LLVMStatsSection = nullptr;

  // ELF specific sections.
  MCSection *DataRelROSection = nullptr;
  MCSection *MergeableConst4Section = nullptr;
  MCSection *MergeableConst8Section = nullptr;
  MCSection *MergeableConst16Section = nullptr;
  MCSection *MergeableConst32Section = nullptr;

  // MachO specific sections.

  /// Section for thread local structure information.
  ///
  /// Contains the source code name of the variable, visibility and a pointer to
  /// the initial value (.tdata or .tbss).
  MCSection *TLSTLVSection = nullptr; // Defaults to ".tlv".

  /// Section for thread local data initialization functions.
   // Defaults to ".thread_init_func".
  const MCSection *TLSThreadInitSection = nullptr;

  MCSection *CStringSection = nullptr;
  MCSection *UStringSection = nullptr;
  MCSection *TextCoalSection = nullptr;
  MCSection *ConstTextCoalSection = nullptr;
  MCSection *ConstDataSection = nullptr;
  MCSection *DataCoalSection = nullptr;
  MCSection *ConstDataCoalSection = nullptr;
  MCSection *DataCommonSection = nullptr;
  MCSection *DataBSSSection = nullptr;
  MCSection *FourByteConstantSection = nullptr;
  MCSection *EightByteConstantSection = nullptr;
  MCSection *SixteenByteConstantSection = nullptr;
  MCSection *LazySymbolPointerSection = nullptr;
  MCSection *NonLazySymbolPointerSection = nullptr;
  MCSection *ThreadLocalPointerSection = nullptr;
  MCSection *AddrSigSection = nullptr;

  /// COFF specific sections.
  MCSection *DrectveSection = nullptr;
  MCSection *PDataSection = nullptr;
  MCSection *XDataSection = nullptr;
  MCSection *SXDataSection = nullptr;
  MCSection *GEHContSection = nullptr;
  MCSection *GFIDsSection = nullptr;
  MCSection *GIATsSection = nullptr;
  MCSection *GLJMPSection = nullptr;

  // GOFF specific sections.
  MCSection *PPA1Section = nullptr;
  MCSection *ADASection = nullptr;

  // XCOFF specific sections
  MCSection *TOCBaseSection = nullptr;
  MCSection *ReadOnly8Section = nullptr;
  MCSection *ReadOnly16Section = nullptr;

  // Swift5 Reflection Data Sections
  std::array<MCSection *, binaryformat::Swift5ReflectionSectionKind::last>
      Swift5ReflectionSections = {};

public:
  void initMCObjectFileInfo(MCContext &MCCtx, bool PIC,
                            bool LargeCodeModel = false);
  virtual ~MCObjectFileInfo();
  MCContext &getContext() const { return *Ctx; }

  bool getSupportsWeakOmittedEHFrame() const {
    return SupportsWeakOmittedEHFrame;
  }
  bool getSupportsCompactUnwindWithoutEHFrame() const {
    return SupportsCompactUnwindWithoutEHFrame;
  }
  bool getOmitDwarfIfHaveCompactUnwind() const {
    return OmitDwarfIfHaveCompactUnwind;
  }

  unsigned getFDEEncoding() const { return FDECFIEncoding; }

  unsigned getCompactUnwindDwarfEHFrameOnly() const {
    return CompactUnwindDwarfEHFrameOnly;
  }

  virtual unsigned getTextSectionAlignment() const { return 4; }
  MCSection *getTextSection() const { return TextSection; }
  MCSection *getDataSection() const { return DataSection; }
  MCSection *getBSSSection() const { return BSSSection; }
  MCSection *getReadOnlySection() const { return ReadOnlySection; }
  MCSection *getLSDASection() const { return LSDASection; }
  MCSection *getCompactUnwindSection() const { return CompactUnwindSection; }
  MCSection *getDwarfAbbrevSection() const { return DwarfAbbrevSection; }
  MCSection *getDwarfInfoSection() const { return DwarfInfoSection; }
  MCSection *getDwarfInfoSection(uint64_t Hash) const {
    return getDwarfComdatSection(".debug_info", Hash);
  }
  MCSection *getDwarfLineSection() const { return DwarfLineSection; }
  MCSection *getDwarfLineStrSection() const { return DwarfLineStrSection; }
  MCSection *getDwarfFrameSection() const { return DwarfFrameSection; }
  MCSection *getDwarfPubNamesSection() const { return DwarfPubNamesSection; }
  MCSection *getDwarfPubTypesSection() const { return DwarfPubTypesSection; }
  MCSection *getDwarfGnuPubNamesSection() const {
    return DwarfGnuPubNamesSection;
  }
  MCSection *getDwarfGnuPubTypesSection() const {
    return DwarfGnuPubTypesSection;
  }
  const MCSection *getDwarfDebugInlineSection() const {
    return DwarfDebugInlineSection;
  }
  MCSection *getDwarfStrSection() const { return DwarfStrSection; }
  MCSection *getDwarfLocSection() const { return DwarfLocSection; }
  MCSection *getDwarfARangesSection() const { return DwarfARangesSection; }
  MCSection *getDwarfRangesSection() const { return DwarfRangesSection; }
  MCSection *getDwarfRnglistsSection() const { return DwarfRnglistsSection; }
  MCSection *getDwarfLoclistsSection() const { return DwarfLoclistsSection; }
  MCSection *getDwarfMacinfoSection() const { return DwarfMacinfoSection; }
  MCSection *getDwarfMacroSection() const { return DwarfMacroSection; }

  MCSection *getDwarfDebugNamesSection() const {
    return DwarfDebugNamesSection;
  }
  MCSection *getDwarfAccelNamesSection() const {
    return DwarfAccelNamesSection;
  }
  MCSection *getDwarfAccelObjCSection() const { return DwarfAccelObjCSection; }
  MCSection *getDwarfAccelNamespaceSection() const {
    return DwarfAccelNamespaceSection;
  }
  MCSection *getDwarfAccelTypesSection() const {
    return DwarfAccelTypesSection;
  }
  MCSection *getDwarfInfoDWOSection() const { return DwarfInfoDWOSection; }
  MCSection *getDwarfTypesSection(uint64_t Hash) const {
    return getDwarfComdatSection(".debug_types", Hash);
  }
  MCSection *getDwarfTypesDWOSection() const { return DwarfTypesDWOSection; }
  MCSection *getDwarfAbbrevDWOSection() const { return DwarfAbbrevDWOSection; }
  MCSection *getDwarfStrDWOSection() const { return DwarfStrDWOSection; }
  MCSection *getDwarfLineDWOSection() const { return DwarfLineDWOSection; }
  MCSection *getDwarfLocDWOSection() const { return DwarfLocDWOSection; }
  MCSection *getDwarfStrOffDWOSection() const { return DwarfStrOffDWOSection; }
  MCSection *getDwarfStrOffSection() const { return DwarfStrOffSection; }
  MCSection *getDwarfAddrSection() const { return DwarfAddrSection; }
  MCSection *getDwarfRnglistsDWOSection() const {
    return DwarfRnglistsDWOSection;
  }
  MCSection *getDwarfLoclistsDWOSection() const {
    return DwarfLoclistsDWOSection;
  }
  MCSection *getDwarfMacroDWOSection() const { return DwarfMacroDWOSection; }
  MCSection *getDwarfMacinfoDWOSection() const {
    return DwarfMacinfoDWOSection;
  }
  MCSection *getDwarfCUIndexSection() const { return DwarfCUIndexSection; }
  MCSection *getDwarfTUIndexSection() const { return DwarfTUIndexSection; }
  MCSection *getDwarfSwiftASTSection() const { return DwarfSwiftASTSection; }

  MCSection *getCOFFDebugSymbolsSection() const {
    return COFFDebugSymbolsSection;
  }
  MCSection *getCOFFDebugTypesSection() const {
    return COFFDebugTypesSection;
  }
  MCSection *getCOFFGlobalTypeHashesSection() const {
    return COFFGlobalTypeHashesSection;
  }

  MCSection *getTLSExtraDataSection() const { return TLSExtraDataSection; }
  const MCSection *getTLSDataSection() const { return TLSDataSection; }
  MCSection *getTLSBSSSection() const { return TLSBSSSection; }

  MCSection *getStackMapSection() const { return StackMapSection; }
  MCSection *getFaultMapSection() const { return FaultMapSection; }
  MCSection *getRemarksSection() const { return RemarksSection; }

  MCSection *getStackSizesSection(const MCSection &TextSec) const;

  MCSection *getBBAddrMapSection(const MCSection &TextSec) const;

  MCSection *getKCFITrapSection(const MCSection &TextSec) const;

  MCSection *getPseudoProbeSection(const MCSection &TextSec) const;

  MCSection *getPseudoProbeDescSection(StringRef FuncName) const;

  MCSection *getLLVMStatsSection() const;

  MCSection *getPCSection(StringRef Name, const MCSection *TextSec) const;

  // ELF specific sections.
  MCSection *getDataRelROSection() const { return DataRelROSection; }
  const MCSection *getMergeableConst4Section() const {
    return MergeableConst4Section;
  }
  const MCSection *getMergeableConst8Section() const {
    return MergeableConst8Section;
  }
  const MCSection *getMergeableConst16Section() const {
    return MergeableConst16Section;
  }
  const MCSection *getMergeableConst32Section() const {
    return MergeableConst32Section;
  }

  // MachO specific sections.
  const MCSection *getTLSTLVSection() const { return TLSTLVSection; }
  const MCSection *getTLSThreadInitSection() const {
    return TLSThreadInitSection;
  }
  const MCSection *getCStringSection() const { return CStringSection; }
  const MCSection *getUStringSection() const { return UStringSection; }
  MCSection *getTextCoalSection() const { return TextCoalSection; }
  const MCSection *getConstTextCoalSection() const {
    return ConstTextCoalSection;
  }
  const MCSection *getConstDataSection() const { return ConstDataSection; }
  const MCSection *getDataCoalSection() const { return DataCoalSection; }
  const MCSection *getConstDataCoalSection() const {
    return ConstDataCoalSection;
  }
  const MCSection *getDataCommonSection() const { return DataCommonSection; }
  MCSection *getDataBSSSection() const { return DataBSSSection; }
  const MCSection *getFourByteConstantSection() const {
    return FourByteConstantSection;
  }
  const MCSection *getEightByteConstantSection() const {
    return EightByteConstantSection;
  }
  const MCSection *getSixteenByteConstantSection() const {
    return SixteenByteConstantSection;
  }
  MCSection *getLazySymbolPointerSection() const {
    return LazySymbolPointerSection;
  }
  MCSection *getNonLazySymbolPointerSection() const {
    return NonLazySymbolPointerSection;
  }
  MCSection *getThreadLocalPointerSection() const {
    return ThreadLocalPointerSection;
  }
  MCSection *getAddrSigSection() const { return AddrSigSection; }

  // COFF specific sections.
  MCSection *getDrectveSection() const { return DrectveSection; }
  MCSection *getPDataSection() const { return PDataSection; }
  MCSection *getXDataSection() const { return XDataSection; }
  MCSection *getSXDataSection() const { return SXDataSection; }
  MCSection *getGEHContSection() const { return GEHContSection; }
  MCSection *getGFIDsSection() const { return GFIDsSection; }
  MCSection *getGIATsSection() const { return GIATsSection; }
  MCSection *getGLJMPSection() const { return GLJMPSection; }

  // GOFF specific sections.
  MCSection *getPPA1Section() const { return PPA1Section; }
  MCSection *getADASection() const { return ADASection; }

  // XCOFF specific sections
  MCSection *getTOCBaseSection() const { return TOCBaseSection; }

  MCSection *getEHFrameSection() const { return EHFrameSection; }

  bool isPositionIndependent() const { return PositionIndependent; }

  // Swift5 Reflection Data Sections
  MCSection *getSwift5ReflectionSection(
      llvm::binaryformat::Swift5ReflectionSectionKind ReflSectionKind) {
    return ReflSectionKind !=
                   llvm::binaryformat::Swift5ReflectionSectionKind::unknown
               ? Swift5ReflectionSections[ReflSectionKind]
               : nullptr;
  }

private:
  bool PositionIndependent = false;
  MCContext *Ctx = nullptr;
  VersionTuple SDKVersion;
  std::optional<Triple> DarwinTargetVariantTriple;
  VersionTuple DarwinTargetVariantSDKVersion;

  void initMachOMCObjectFileInfo(const Triple &T);
  void initELFMCObjectFileInfo(const Triple &T, bool Large);
  void initGOFFMCObjectFileInfo(const Triple &T);
  void initCOFFMCObjectFileInfo(const Triple &T);
  void initSPIRVMCObjectFileInfo(const Triple &T);
  void initWasmMCObjectFileInfo(const Triple &T);
  void initXCOFFMCObjectFileInfo(const Triple &T);
  void initDXContainerObjectFileInfo(const Triple &T);
  MCSection *getDwarfComdatSection(const char *Name, uint64_t Hash) const;

public:
  void setSDKVersion(const VersionTuple &TheSDKVersion) {
    SDKVersion = TheSDKVersion;
  }

  const VersionTuple &getSDKVersion() const { return SDKVersion; }

  void setDarwinTargetVariantTriple(const Triple &T) {
    DarwinTargetVariantTriple = T;
  }

  const Triple *getDarwinTargetVariantTriple() const {
    return DarwinTargetVariantTriple ? &*DarwinTargetVariantTriple : nullptr;
  }

  void setDarwinTargetVariantSDKVersion(const VersionTuple &TheSDKVersion) {
    DarwinTargetVariantSDKVersion = TheSDKVersion;
  }

  const VersionTuple &getDarwinTargetVariantSDKVersion() const {
    return DarwinTargetVariantSDKVersion;
  }
};

} // end namespace llvm

#endif
PKjwFZ�R��ZZMC/MCAsmMacro.hnu�[���//===- MCAsmMacro.h - Assembly Macros ---------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCASMMACRO_H
#define LLVM_MC_MCASMMACRO_H

#include "llvm/ADT/APInt.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/SMLoc.h"
#include <vector>

namespace llvm {

/// Target independent representation for an assembler token.
class AsmToken {
public:
  enum TokenKind {
    // Markers
    Eof, Error,

    // String values.
    Identifier,
    String,

    // Integer values.
    Integer,
    BigNum, // larger than 64 bits

    // Real values.
    Real,

    // Comments
    Comment,
    HashDirective,
    // No-value.
    EndOfStatement,
    Colon,
    Space,
    Plus, Minus, Tilde,
    Slash,     // '/'
    BackSlash, // '\'
    LParen, RParen, LBrac, RBrac, LCurly, RCurly,
    Question, Star, Dot, Comma, Dollar, Equal, EqualEqual,

    Pipe, PipePipe, Caret,
    Amp, AmpAmp, Exclaim, ExclaimEqual, Percent, Hash,
    Less, LessEqual, LessLess, LessGreater,
    Greater, GreaterEqual, GreaterGreater, At, MinusGreater,

    // MIPS unary expression operators such as %neg.
    PercentCall16, PercentCall_Hi, PercentCall_Lo, PercentDtprel_Hi,
    PercentDtprel_Lo, PercentGot, PercentGot_Disp, PercentGot_Hi, PercentGot_Lo,
    PercentGot_Ofst, PercentGot_Page, PercentGottprel, PercentGp_Rel, PercentHi,
    PercentHigher, PercentHighest, PercentLo, PercentNeg, PercentPcrel_Hi,
    PercentPcrel_Lo, PercentTlsgd, PercentTlsldm, PercentTprel_Hi,
    PercentTprel_Lo
  };

private:
  TokenKind Kind = TokenKind::Eof;

  /// A reference to the entire token contents; this is always a pointer into
  /// a memory buffer owned by the source manager.
  StringRef Str;

  APInt IntVal;

public:
  AsmToken() = default;
  AsmToken(TokenKind Kind, StringRef Str, APInt IntVal)
      : Kind(Kind), Str(Str), IntVal(std::move(IntVal)) {}
  AsmToken(TokenKind Kind, StringRef Str, int64_t IntVal = 0)
      : Kind(Kind), Str(Str), IntVal(64, IntVal, true) {}

  TokenKind getKind() const { return Kind; }
  bool is(TokenKind K) const { return Kind == K; }
  bool isNot(TokenKind K) const { return Kind != K; }

  SMLoc getLoc() const;
  SMLoc getEndLoc() const;
  SMRange getLocRange() const;

  /// Get the contents of a string token (without quotes).
  StringRef getStringContents() const {
    assert(Kind == String && "This token isn't a string!");
    return Str.slice(1, Str.size() - 1);
  }

  /// Get the identifier string for the current token, which should be an
  /// identifier or a string. This gets the portion of the string which should
  /// be used as the identifier, e.g., it does not include the quotes on
  /// strings.
  StringRef getIdentifier() const {
    if (Kind == Identifier)
      return getString();
    return getStringContents();
  }

  /// Get the string for the current token, this includes all characters (for
  /// example, the quotes on strings) in the token.
  ///
  /// The returned StringRef points into the source manager's memory buffer, and
  /// is safe to store across calls to Lex().
  StringRef getString() const { return Str; }

  // FIXME: Don't compute this in advance, it makes every token larger, and is
  // also not generally what we want (it is nicer for recovery etc. to lex 123br
  // as a single token, then diagnose as an invalid number).
  int64_t getIntVal() const {
    assert(Kind == Integer && "This token isn't an integer!");
    return IntVal.getZExtValue();
  }

  APInt getAPIntVal() const {
    assert((Kind == Integer || Kind == BigNum) &&
           "This token isn't an integer!");
    return IntVal;
  }

  void dump(raw_ostream &OS) const;
};

struct MCAsmMacroParameter {
  StringRef Name;
  std::vector<AsmToken> Value;
  bool Required = false;
  bool Vararg = false;

#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
  void dump() const { dump(dbgs()); }
  LLVM_DUMP_METHOD void dump(raw_ostream &OS) const;
#endif
};

typedef std::vector<MCAsmMacroParameter> MCAsmMacroParameters;
struct MCAsmMacro {
  StringRef Name;
  StringRef Body;
  MCAsmMacroParameters Parameters;
  std::vector<std::string> Locals;
  bool IsFunction = false;

public:
  MCAsmMacro(StringRef N, StringRef B, MCAsmMacroParameters P)
      : Name(N), Body(B), Parameters(std::move(P)) {}
  MCAsmMacro(StringRef N, StringRef B, MCAsmMacroParameters P,
             std::vector<std::string> L, bool F)
      : Name(N), Body(B), Parameters(std::move(P)), Locals(std::move(L)),
        IsFunction(F) {}

#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
  void dump() const { dump(dbgs()); }
  LLVM_DUMP_METHOD void dump(raw_ostream &OS) const;
#endif
};
} // namespace llvm

#endif
PKjwFZ� 0ı���MC/MCAsmInfo.hnu�[���//===-- llvm/MC/MCAsmInfo.h - Asm info --------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains a class to be used as the basis for target specific
// asm writers.  This class primarily takes care of global printing constants,
// which are used in very similar ways across all targets.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCASMINFO_H
#define LLVM_MC_MCASMINFO_H

#include "llvm/ADT/StringRef.h"
#include "llvm/MC/MCDirectives.h"
#include "llvm/MC/MCTargetOptions.h"
#include <vector>

namespace llvm {

class MCContext;
class MCCFIInstruction;
class MCExpr;
class MCSection;
class MCStreamer;
class MCSubtargetInfo;
class MCSymbol;

namespace WinEH {

enum class EncodingType {
  Invalid, /// Invalid
  Alpha,   /// Windows Alpha
  Alpha64, /// Windows AXP64
  ARM,     /// Windows NT (Windows on ARM)
  CE,      /// Windows CE ARM, PowerPC, SH3, SH4
  Itanium, /// Windows x64, Windows Itanium (IA-64)
  X86,     /// Windows x86, uses no CFI, just EH tables
  MIPS = Alpha,
};

} // end namespace WinEH

namespace LCOMM {

enum LCOMMType { NoAlignment, ByteAlignment, Log2Alignment };

} // end namespace LCOMM

/// This class is intended to be used as a base class for asm
/// properties and features specific to the target.
class MCAsmInfo {
public:
  /// Assembly character literal syntax types.
  enum AsmCharLiteralSyntax {
    ACLS_Unknown, /// Unknown; character literals not used by LLVM for this
                  /// target.
    ACLS_SingleQuotePrefix, /// The desired character is prefixed by a single
                            /// quote, e.g., `'A`.
  };

protected:
  //===------------------------------------------------------------------===//
  // Properties to be set by the target writer, used to configure asm printer.
  //

  /// Code pointer size in bytes.  Default is 4.
  unsigned CodePointerSize = 4;

  /// Size of the stack slot reserved for callee-saved registers, in bytes.
  /// Default is same as pointer size.
  unsigned CalleeSaveStackSlotSize = 4;

  /// True if target is little endian.  Default is true.
  bool IsLittleEndian = true;

  /// True if target stack grow up.  Default is false.
  bool StackGrowsUp = false;

  /// True if this target has the MachO .subsections_via_symbols directive.
  /// Default is false.
  bool HasSubsectionsViaSymbols = false;

  /// True if this is a MachO target that supports the macho-specific .zerofill
  /// directive for emitting BSS Symbols.  Default is false.
  bool HasMachoZeroFillDirective = false;

  /// True if this is a MachO target that supports the macho-specific .tbss
  /// directive for emitting thread local BSS Symbols.  Default is false.
  bool HasMachoTBSSDirective = false;

  /// True if this is a non-GNU COFF target. The COFF port of the GNU linker
  /// doesn't handle associative comdats in the way that we would like to use
  /// them.
  bool HasCOFFAssociativeComdats = false;

  /// True if this is a non-GNU COFF target. For GNU targets, we don't generate
  /// constants into comdat sections.
  bool HasCOFFComdatConstants = false;

  /// True if this is an XCOFF target that supports visibility attributes as
  /// part of .global, .weak, .extern, and .comm. Default is false.
  bool HasVisibilityOnlyWithLinkage = false;

  /// This is the maximum possible length of an instruction, which is needed to
  /// compute the size of an inline asm.  Defaults to 4.
  unsigned MaxInstLength = 4;

  /// Every possible instruction length is a multiple of this value.  Factored
  /// out in .debug_frame and .debug_line.  Defaults to 1.
  unsigned MinInstAlignment = 1;

  /// The '$' token, when not referencing an identifier or constant, refers to
  /// the current PC.  Defaults to false.
  bool DollarIsPC = false;

  /// Allow '.' token, when not referencing an identifier or constant, to refer
  /// to the current PC. Defaults to true.
  bool DotIsPC = true;

  /// Whether the '*' token refers to the current PC. This is used for the
  /// HLASM dialect.
  bool StarIsPC = false;

  /// This string, if specified, is used to separate instructions from each
  /// other when on the same line.  Defaults to ';'
  const char *SeparatorString;

  /// This indicates the comment string used by the assembler.  Defaults to
  /// "#"
  StringRef CommentString;

  /// This indicates whether the comment string is only accepted as a comment
  /// at the beginning of statements. Defaults to false.
  bool RestrictCommentStringToStartOfStatement = false;

  /// This indicates whether to allow additional "comment strings" to be lexed
  /// as a comment. Setting this attribute to true, will ensure that C-style
  /// line comments (// ..), C-style block comments (/* .. */), and "#" are
  /// all treated as comments in addition to the string specified by the
  /// CommentString attribute.
  /// Default is true.
  bool AllowAdditionalComments = true;

  /// Should we emit the '\t' as the starting indentation marker for GNU inline
  /// asm statements. Defaults to true.
  bool EmitGNUAsmStartIndentationMarker = true;

  /// This is appended to emitted labels.  Defaults to ":"
  const char *LabelSuffix;

  /// Emit labels in purely upper case. Defaults to false.
  bool EmitLabelsInUpperCase = false;

  // Print the EH begin symbol with an assignment. Defaults to false.
  bool UseAssignmentForEHBegin = false;

  // Do we need to create a local symbol for .size?
  bool NeedsLocalForSize = false;

  /// This prefix is used for globals like constant pool entries that are
  /// completely private to the .s file and should not have names in the .o
  /// file.  Defaults to "L"
  StringRef PrivateGlobalPrefix;

  /// This prefix is used for labels for basic blocks. Defaults to the same as
  /// PrivateGlobalPrefix.
  StringRef PrivateLabelPrefix;

  /// This prefix is used for symbols that should be passed through the
  /// assembler but be removed by the linker.  This is 'l' on Darwin, currently
  /// used for some ObjC metadata.  The default of "" meast that for this system
  /// a plain private symbol should be used.  Defaults to "".
  StringRef LinkerPrivateGlobalPrefix;

  /// If these are nonempty, they contain a directive to emit before and after
  /// an inline assembly statement.  Defaults to "#APP\n", "#NO_APP\n"
  const char *InlineAsmStart;
  const char *InlineAsmEnd;

  /// These are assembly directives that tells the assembler to interpret the
  /// following instructions differently.  Defaults to ".code16", ".code32",
  /// ".code64".
  const char *Code16Directive;
  const char *Code32Directive;
  const char *Code64Directive;

  /// Which dialect of an assembler variant to use.  Defaults to 0
  unsigned AssemblerDialect = 0;

  /// This is true if the assembler allows @ characters in symbol names.
  /// Defaults to false.
  bool AllowAtInName = false;

  /// This is true if the assembler allows the "?" character at the start of
  /// of a string to be lexed as an AsmToken::Identifier.
  /// If the AsmLexer determines that the string can be lexed as a possible
  /// comment, setting this option will have no effect, and the string will
  /// still be lexed as a comment.
  bool AllowQuestionAtStartOfIdentifier = false;

  /// This is true if the assembler allows the "$" character at the start of
  /// of a string to be lexed as an AsmToken::Identifier.
  /// If the AsmLexer determines that the string can be lexed as a possible
  /// comment, setting this option will have no effect, and the string will
  /// still be lexed as a comment.
  bool AllowDollarAtStartOfIdentifier = false;

  /// This is true if the assembler allows the "@" character at the start of
  /// a string to be lexed as an AsmToken::Identifier.
  /// If the AsmLexer determines that the string can be lexed as a possible
  /// comment, setting this option will have no effect, and the string will
  /// still be lexed as a comment.
  bool AllowAtAtStartOfIdentifier = false;

  /// This is true if the assembler allows the "#" character at the start of
  /// a string to be lexed as an AsmToken::Identifier.
  /// If the AsmLexer determines that the string can be lexed as a possible
  /// comment, setting this option will have no effect, and the string will
  /// still be lexed as a comment.
  bool AllowHashAtStartOfIdentifier = false;

  /// If this is true, symbol names with invalid characters will be printed in
  /// quotes.
  bool SupportsQuotedNames = true;

  /// This is true if data region markers should be printed as
  /// ".data_region/.end_data_region" directives. If false, use "$d/$a" labels
  /// instead.
  bool UseDataRegionDirectives = false;

  /// True if .align is to be used for alignment. Only power-of-two
  /// alignment is supported.
  bool UseDotAlignForAlignment = false;

  /// True if the target supports LEB128 directives.
  bool HasLEB128Directives = true;

  //===--- Data Emission Directives -------------------------------------===//

  /// This should be set to the directive used to get some number of zero (and
  /// non-zero if supported by the directive) bytes emitted to the current
  /// section. Common cases are "\t.zero\t" and "\t.space\t". Defaults to
  /// "\t.zero\t"
  const char *ZeroDirective;

  /// This should be set to true if the zero directive supports a value to emit
  /// other than zero. If this is set to false, the Data*bitsDirective's will be
  /// used to emit these bytes. Defaults to true.
  bool ZeroDirectiveSupportsNonZeroValue = true;

  /// This directive allows emission of an ascii string with the standard C
  /// escape characters embedded into it.  If a target doesn't support this, it
  /// can be set to null. Defaults to "\t.ascii\t"
  const char *AsciiDirective;

  /// If not null, this allows for special handling of zero terminated strings
  /// on this target.  This is commonly supported as ".asciz".  If a target
  /// doesn't support this, it can be set to null.  Defaults to "\t.asciz\t"
  const char *AscizDirective;

  /// This directive accepts a comma-separated list of bytes for emission as a
  /// string of bytes.  For targets that do not support this, it shall be set to
  /// null.  Defaults to null.
  const char *ByteListDirective = nullptr;

  /// This directive allows emission of a zero-terminated ascii string without
  /// the standard C escape characters embedded into it.  If a target doesn't
  /// support this, it can be set to null. Defaults to null.
  const char *PlainStringDirective = nullptr;

  /// Form used for character literals in the assembly syntax.  Useful for
  /// producing strings as byte lists.  If a target does not use or support
  /// this, it shall be set to ACLS_Unknown.  Defaults to ACLS_Unknown.
  AsmCharLiteralSyntax CharacterLiteralSyntax = ACLS_Unknown;

  /// These directives are used to output some unit of integer data to the
  /// current section.  If a data directive is set to null, smaller data
  /// directives will be used to emit the large sizes.  Defaults to "\t.byte\t",
  /// "\t.short\t", "\t.long\t", "\t.quad\t"
  const char *Data8bitsDirective;
  const char *Data16bitsDirective;
  const char *Data32bitsDirective;
  const char *Data64bitsDirective;

  /// True if data directives support signed values
  bool SupportsSignedData = true;

  /// If non-null, a directive that is used to emit a word which should be
  /// relocated as a 64-bit GP-relative offset, e.g. .gpdword on Mips.  Defaults
  /// to nullptr.
  const char *GPRel64Directive = nullptr;

  /// If non-null, a directive that is used to emit a word which should be
  /// relocated as a 32-bit GP-relative offset, e.g. .gpword on Mips or .gprel32
  /// on Alpha.  Defaults to nullptr.
  const char *GPRel32Directive = nullptr;

  /// If non-null, directives that are used to emit a word/dword which should
  /// be relocated as a 32/64-bit DTP/TP-relative offset, e.g. .dtprelword/
  /// .dtpreldword/.tprelword/.tpreldword on Mips.
  const char *DTPRel32Directive = nullptr;
  const char *DTPRel64Directive = nullptr;
  const char *TPRel32Directive = nullptr;
  const char *TPRel64Directive = nullptr;

  /// This is true if this target uses "Sun Style" syntax for section switching
  /// ("#alloc,#write" etc) instead of the normal ELF syntax (,"a,w") in
  /// .section directives.  Defaults to false.
  bool SunStyleELFSectionSwitchSyntax = false;

  /// This is true if this target uses ELF '.section' directive before the
  /// '.bss' one. It's used for PPC/Linux which doesn't support the '.bss'
  /// directive only.  Defaults to false.
  bool UsesELFSectionDirectiveForBSS = false;

  bool NeedsDwarfSectionOffsetDirective = false;

  //===--- Alignment Information ----------------------------------------===//

  /// If this is true (the default) then the asmprinter emits ".align N"
  /// directives, where N is the number of bytes to align to.  Otherwise, it
  /// emits ".align log2(N)", e.g. 3 to align to an 8 byte boundary.  Defaults
  /// to true.
  bool AlignmentIsInBytes = true;

  /// If non-zero, this is used to fill the executable space created as the
  /// result of a alignment directive.  Defaults to 0
  unsigned TextAlignFillValue = 0;

  //===--- Global Variable Emission Directives --------------------------===//

  /// This is the directive used to declare a global entity. Defaults to
  /// ".globl".
  const char *GlobalDirective;

  /// True if the expression
  ///   .long f - g
  /// uses a relocation but it can be suppressed by writing
  ///   a = f - g
  ///   .long a
  bool SetDirectiveSuppressesReloc = false;

  /// False if the assembler requires that we use
  /// \code
  ///   Lc = a - b
  ///   .long Lc
  /// \endcode
  //
  /// instead of
  //
  /// \code
  ///   .long a - b
  /// \endcode
  ///
  ///  Defaults to true.
  bool HasAggressiveSymbolFolding = true;

  /// True is .comm's and .lcomms optional alignment is to be specified in bytes
  /// instead of log2(n).  Defaults to true.
  bool COMMDirectiveAlignmentIsInBytes = true;

  /// Describes if the .lcomm directive for the target supports an alignment
  /// argument and how it is interpreted.  Defaults to NoAlignment.
  LCOMM::LCOMMType LCOMMDirectiveAlignmentType = LCOMM::NoAlignment;

  /// True if the target only has basename for .file directive. False if the
  /// target also needs the directory along with the basename. Defaults to true.
  bool HasBasenameOnlyForFileDirective = true;

  /// True if the target represents string constants as mostly raw characters in
  /// paired double quotation with paired double quotation marks as the escape
  /// mechanism to represent a double quotation mark within the string. Defaults
  /// to false.
  bool HasPairedDoubleQuoteStringConstants = false;

  // True if the target allows .align directives on functions. This is true for
  // most targets, so defaults to true.
  bool HasFunctionAlignment = true;

  /// True if the target has .type and .size directives, this is true for most
  /// ELF targets.  Defaults to true.
  bool HasDotTypeDotSizeDirective = true;

  /// True if the target has a single parameter .file directive, this is true
  /// for ELF targets.  Defaults to true.
  bool HasSingleParameterDotFile = true;

  /// True if the target has a four strings .file directive, strings seperated
  /// by comma. Defaults to false.
  bool HasFourStringsDotFile = false;

  /// True if the target has a .ident directive, this is true for ELF targets.
  /// Defaults to false.
  bool HasIdentDirective = false;

  /// True if this target supports the MachO .no_dead_strip directive.  Defaults
  /// to false.
  bool HasNoDeadStrip = false;

  /// True if this target supports the MachO .alt_entry directive.  Defaults to
  /// false.
  bool HasAltEntry = false;

  /// Used to declare a global as being a weak symbol. Defaults to ".weak".
  const char *WeakDirective;

  /// This directive, if non-null, is used to declare a global as being a weak
  /// undefined symbol.  Defaults to nullptr.
  const char *WeakRefDirective = nullptr;

  /// True if we have a directive to declare a global as being a weak defined
  /// symbol.  Defaults to false.
  bool HasWeakDefDirective = false;

  /// True if we have a directive to declare a global as being a weak defined
  /// symbol that can be hidden (unexported).  Defaults to false.
  bool HasWeakDefCanBeHiddenDirective = false;

  /// True if we should mark symbols as global instead of weak, for
  /// weak*/linkonce*, if the symbol has a comdat.
  /// Defaults to false.
  bool AvoidWeakIfComdat = false;

  /// This attribute, if not MCSA_Invalid, is used to declare a symbol as having
  /// hidden visibility.  Defaults to MCSA_Hidden.
  MCSymbolAttr HiddenVisibilityAttr = MCSA_Hidden;

  /// This attribute, if not MCSA_Invalid, is used to declare a symbol as having
  /// exported visibility.  Defaults to MCSA_Exported.
  MCSymbolAttr ExportedVisibilityAttr = MCSA_Exported;

  /// This attribute, if not MCSA_Invalid, is used to declare an undefined
  /// symbol as having hidden visibility. Defaults to MCSA_Hidden.
  MCSymbolAttr HiddenDeclarationVisibilityAttr = MCSA_Hidden;

  /// This attribute, if not MCSA_Invalid, is used to declare a symbol as having
  /// protected visibility.  Defaults to MCSA_Protected
  MCSymbolAttr ProtectedVisibilityAttr = MCSA_Protected;

  MCSymbolAttr MemtagAttr = MCSA_Memtag;

  //===--- Dwarf Emission Directives -----------------------------------===//

  /// True if target supports emission of debugging information.  Defaults to
  /// false.
  bool SupportsDebugInformation = false;

  /// Exception handling format for the target.  Defaults to None.
  ExceptionHandling ExceptionsType = ExceptionHandling::None;

  /// True if target uses CFI unwind information for other purposes than EH
  /// (debugging / sanitizers) when `ExceptionsType == ExceptionHandling::None`.
  bool UsesCFIWithoutEH = false;

  /// Windows exception handling data (.pdata) encoding.  Defaults to Invalid.
  WinEH::EncodingType WinEHEncodingType = WinEH::EncodingType::Invalid;

  /// True if Dwarf2 output generally uses relocations for references to other
  /// .debug_* sections.
  bool DwarfUsesRelocationsAcrossSections = true;

  /// True if DWARF FDE symbol reference relocations should be replaced by an
  /// absolute difference.
  bool DwarfFDESymbolsUseAbsDiff = false;

  /// True if the target supports generating the DWARF line table through using
  /// the .loc/.file directives. Defaults to true.
  bool UsesDwarfFileAndLocDirectives = true;

  /// True if DWARF `.file directory' directive syntax is used by
  /// default.
  bool EnableDwarfFileDirectoryDefault = true;

  /// True if the target needs the DWARF section length in the header (if any)
  /// of the DWARF section in the assembly file. Defaults to true.
  bool DwarfSectionSizeRequired = true;

  /// True if dwarf register numbers are printed instead of symbolic register
  /// names in .cfi_* directives.  Defaults to false.
  bool DwarfRegNumForCFI = false;

  /// True if target uses parens to indicate the symbol variant instead of @.
  /// For example, foo(plt) instead of foo@plt.  Defaults to false.
  bool UseParensForSymbolVariant = false;

  /// True if the target uses parens for symbol names starting with
  /// '$' character to distinguish them from absolute names.
  bool UseParensForDollarSignNames = true;

  /// True if the target supports flags in ".loc" directive, false if only
  /// location is allowed.
  bool SupportsExtendedDwarfLocDirective = true;

  //===--- Prologue State ----------------------------------------------===//

  std::vector<MCCFIInstruction> InitialFrameState;

  //===--- Integrated Assembler Information ----------------------------===//

  // Generated object files can use all ELF features supported by GNU ld of
  // this binutils version and later. INT_MAX means all features can be used,
  // regardless of GNU ld support. The default value is referenced by
  // clang/Driver/Options.td.
  std::pair<int, int> BinutilsVersion = {2, 26};

  /// Should we use the integrated assembler?
  /// The integrated assembler should be enabled by default (by the
  /// constructors) when failing to parse a valid piece of assembly (inline
  /// or otherwise) is considered a bug. It may then be overridden after
  /// construction (see LLVMTargetMachine::initAsmInfo()).
  bool UseIntegratedAssembler;

  /// Use AsmParser to parse inlineAsm when UseIntegratedAssembler is not set.
  bool ParseInlineAsmUsingAsmParser;

  /// Preserve Comments in assembly
  bool PreserveAsmComments;

  /// Compress DWARF debug sections. Defaults to no compression.
  DebugCompressionType CompressDebugSections = DebugCompressionType::None;

  /// True if the integrated assembler should interpret 'a >> b' constant
  /// expressions as logical rather than arithmetic.
  bool UseLogicalShr = true;

  // If true, emit GOTPCRELX/REX_GOTPCRELX instead of GOTPCREL, on
  // X86_64 ELF.
  bool RelaxELFRelocations = true;

  // If true, then the lexer and expression parser will support %neg(),
  // %hi(), and similar unary operators.
  bool HasMipsExpressions = false;

  // If true, use Motorola-style integers in Assembly (ex. $0ac).
  bool UseMotorolaIntegers = false;

  // If true, emit function descriptor symbol on AIX.
  bool NeedsFunctionDescriptors = false;

public:
  explicit MCAsmInfo();
  virtual ~MCAsmInfo();

  /// Get the code pointer size in bytes.
  unsigned getCodePointerSize() const { return CodePointerSize; }

  /// Get the callee-saved register stack slot
  /// size in bytes.
  unsigned getCalleeSaveStackSlotSize() const {
    return CalleeSaveStackSlotSize;
  }

  /// True if the target is little endian.
  bool isLittleEndian() const { return IsLittleEndian; }

  /// True if target stack grow up.
  bool isStackGrowthDirectionUp() const { return StackGrowsUp; }

  bool hasSubsectionsViaSymbols() const { return HasSubsectionsViaSymbols; }

  // Data directive accessors.

  const char *getData8bitsDirective() const { return Data8bitsDirective; }
  const char *getData16bitsDirective() const { return Data16bitsDirective; }
  const char *getData32bitsDirective() const { return Data32bitsDirective; }
  const char *getData64bitsDirective() const { return Data64bitsDirective; }
  bool supportsSignedData() const { return SupportsSignedData; }
  const char *getGPRel64Directive() const { return GPRel64Directive; }
  const char *getGPRel32Directive() const { return GPRel32Directive; }
  const char *getDTPRel64Directive() const { return DTPRel64Directive; }
  const char *getDTPRel32Directive() const { return DTPRel32Directive; }
  const char *getTPRel64Directive() const { return TPRel64Directive; }
  const char *getTPRel32Directive() const { return TPRel32Directive; }

  /// Targets can implement this method to specify a section to switch to if the
  /// translation unit doesn't have any trampolines that require an executable
  /// stack.
  virtual MCSection *getNonexecutableStackSection(MCContext &Ctx) const {
    return nullptr;
  }

  /// True if the section is atomized using the symbols in it.
  /// This is false if the section is not atomized at all (most ELF sections) or
  /// if it is atomized based on its contents (MachO' __TEXT,__cstring for
  /// example).
  virtual bool isSectionAtomizableBySymbols(const MCSection &Section) const;

  virtual const MCExpr *getExprForPersonalitySymbol(const MCSymbol *Sym,
                                                    unsigned Encoding,
                                                    MCStreamer &Streamer) const;

  virtual const MCExpr *getExprForFDESymbol(const MCSymbol *Sym,
                                            unsigned Encoding,
                                            MCStreamer &Streamer) const;

  /// Return true if C is an acceptable character inside a symbol name.
  virtual bool isAcceptableChar(char C) const;

  /// Return true if the identifier \p Name does not need quotes to be
  /// syntactically correct.
  virtual bool isValidUnquotedName(StringRef Name) const;

  /// Return true if the .section directive should be omitted when
  /// emitting \p SectionName.  For example:
  ///
  /// shouldOmitSectionDirective(".text")
  ///
  /// returns false => .section .text,#alloc,#execinstr
  /// returns true  => .text
  virtual bool shouldOmitSectionDirective(StringRef SectionName) const;

  bool usesSunStyleELFSectionSwitchSyntax() const {
    return SunStyleELFSectionSwitchSyntax;
  }

  bool usesELFSectionDirectiveForBSS() const {
    return UsesELFSectionDirectiveForBSS;
  }

  bool needsDwarfSectionOffsetDirective() const {
    return NeedsDwarfSectionOffsetDirective;
  }

  // Accessors.

  bool hasMachoZeroFillDirective() const { return HasMachoZeroFillDirective; }
  bool hasMachoTBSSDirective() const { return HasMachoTBSSDirective; }
  bool hasCOFFAssociativeComdats() const { return HasCOFFAssociativeComdats; }
  bool hasCOFFComdatConstants() const { return HasCOFFComdatConstants; }
  bool hasVisibilityOnlyWithLinkage() const {
    return HasVisibilityOnlyWithLinkage;
  }

  /// Returns the maximum possible encoded instruction size in bytes. If \p STI
  /// is null, this should be the maximum size for any subtarget.
  virtual unsigned getMaxInstLength(const MCSubtargetInfo *STI = nullptr) const {
    return MaxInstLength;
  }

  unsigned getMinInstAlignment() const { return MinInstAlignment; }
  bool getDollarIsPC() const { return DollarIsPC; }
  bool getDotIsPC() const { return DotIsPC; }
  bool getStarIsPC() const { return StarIsPC; }
  const char *getSeparatorString() const { return SeparatorString; }

  /// This indicates the column (zero-based) at which asm comments should be
  /// printed.
  unsigned getCommentColumn() const { return 40; }

  StringRef getCommentString() const { return CommentString; }
  bool getRestrictCommentStringToStartOfStatement() const {
    return RestrictCommentStringToStartOfStatement;
  }
  bool shouldAllowAdditionalComments() const { return AllowAdditionalComments; }
  bool getEmitGNUAsmStartIndentationMarker() const {
    return EmitGNUAsmStartIndentationMarker;
  }
  const char *getLabelSuffix() const { return LabelSuffix; }
  bool shouldEmitLabelsInUpperCase() const { return EmitLabelsInUpperCase; }

  bool useAssignmentForEHBegin() const { return UseAssignmentForEHBegin; }
  bool needsLocalForSize() const { return NeedsLocalForSize; }
  StringRef getPrivateGlobalPrefix() const { return PrivateGlobalPrefix; }
  StringRef getPrivateLabelPrefix() const { return PrivateLabelPrefix; }

  bool hasLinkerPrivateGlobalPrefix() const {
    return !LinkerPrivateGlobalPrefix.empty();
  }

  StringRef getLinkerPrivateGlobalPrefix() const {
    if (hasLinkerPrivateGlobalPrefix())
      return LinkerPrivateGlobalPrefix;
    return getPrivateGlobalPrefix();
  }

  const char *getInlineAsmStart() const { return InlineAsmStart; }
  const char *getInlineAsmEnd() const { return InlineAsmEnd; }
  const char *getCode16Directive() const { return Code16Directive; }
  const char *getCode32Directive() const { return Code32Directive; }
  const char *getCode64Directive() const { return Code64Directive; }
  unsigned getAssemblerDialect() const { return AssemblerDialect; }
  bool doesAllowAtInName() const { return AllowAtInName; }
  void setAllowAtInName(bool V) { AllowAtInName = V; }
  bool doesAllowQuestionAtStartOfIdentifier() const {
    return AllowQuestionAtStartOfIdentifier;
  }
  bool doesAllowAtAtStartOfIdentifier() const {
    return AllowAtAtStartOfIdentifier;
  }
  bool doesAllowDollarAtStartOfIdentifier() const {
    return AllowDollarAtStartOfIdentifier;
  }
  bool doesAllowHashAtStartOfIdentifier() const {
    return AllowHashAtStartOfIdentifier;
  }
  bool supportsNameQuoting() const { return SupportsQuotedNames; }

  bool doesSupportDataRegionDirectives() const {
    return UseDataRegionDirectives;
  }

  bool useDotAlignForAlignment() const {
    return UseDotAlignForAlignment;
  }

  bool hasLEB128Directives() const { return HasLEB128Directives; }

  const char *getZeroDirective() const { return ZeroDirective; }
  bool doesZeroDirectiveSupportNonZeroValue() const {
    return ZeroDirectiveSupportsNonZeroValue;
  }
  const char *getAsciiDirective() const { return AsciiDirective; }
  const char *getAscizDirective() const { return AscizDirective; }
  const char *getByteListDirective() const { return ByteListDirective; }
  const char *getPlainStringDirective() const { return PlainStringDirective; }
  AsmCharLiteralSyntax characterLiteralSyntax() const {
    return CharacterLiteralSyntax;
  }
  bool getAlignmentIsInBytes() const { return AlignmentIsInBytes; }
  unsigned getTextAlignFillValue() const { return TextAlignFillValue; }
  const char *getGlobalDirective() const { return GlobalDirective; }

  bool doesSetDirectiveSuppressReloc() const {
    return SetDirectiveSuppressesReloc;
  }

  bool hasAggressiveSymbolFolding() const { return HasAggressiveSymbolFolding; }

  bool getCOMMDirectiveAlignmentIsInBytes() const {
    return COMMDirectiveAlignmentIsInBytes;
  }

  LCOMM::LCOMMType getLCOMMDirectiveAlignmentType() const {
    return LCOMMDirectiveAlignmentType;
  }

  bool hasBasenameOnlyForFileDirective() const {
    return HasBasenameOnlyForFileDirective;
  }
  bool hasPairedDoubleQuoteStringConstants() const {
    return HasPairedDoubleQuoteStringConstants;
  }
  bool hasFunctionAlignment() const { return HasFunctionAlignment; }
  bool hasDotTypeDotSizeDirective() const { return HasDotTypeDotSizeDirective; }
  bool hasSingleParameterDotFile() const { return HasSingleParameterDotFile; }
  bool hasFourStringsDotFile() const { return HasFourStringsDotFile; }
  bool hasIdentDirective() const { return HasIdentDirective; }
  bool hasNoDeadStrip() const { return HasNoDeadStrip; }
  bool hasAltEntry() const { return HasAltEntry; }
  const char *getWeakDirective() const { return WeakDirective; }
  const char *getWeakRefDirective() const { return WeakRefDirective; }
  bool hasWeakDefDirective() const { return HasWeakDefDirective; }

  bool hasWeakDefCanBeHiddenDirective() const {
    return HasWeakDefCanBeHiddenDirective;
  }

  bool avoidWeakIfComdat() const { return AvoidWeakIfComdat; }

  MCSymbolAttr getHiddenVisibilityAttr() const { return HiddenVisibilityAttr; }

  MCSymbolAttr getExportedVisibilityAttr() const { return ExportedVisibilityAttr; }

  MCSymbolAttr getHiddenDeclarationVisibilityAttr() const {
    return HiddenDeclarationVisibilityAttr;
  }

  MCSymbolAttr getProtectedVisibilityAttr() const {
    return ProtectedVisibilityAttr;
  }

  MCSymbolAttr getMemtagAttr() const { return MemtagAttr; }

  bool doesSupportDebugInformation() const { return SupportsDebugInformation; }

  ExceptionHandling getExceptionHandlingType() const { return ExceptionsType; }
  WinEH::EncodingType getWinEHEncodingType() const { return WinEHEncodingType; }

  void setExceptionsType(ExceptionHandling EH) {
    ExceptionsType = EH;
  }

  bool usesCFIWithoutEH() const {
    return ExceptionsType == ExceptionHandling::None && UsesCFIWithoutEH;
  }

  /// Returns true if the exception handling method for the platform uses call
  /// frame information to unwind.
  bool usesCFIForEH() const {
    return (ExceptionsType == ExceptionHandling::DwarfCFI ||
            ExceptionsType == ExceptionHandling::ARM || usesWindowsCFI());
  }

  bool usesWindowsCFI() const {
    return ExceptionsType == ExceptionHandling::WinEH &&
           (WinEHEncodingType != WinEH::EncodingType::Invalid &&
            WinEHEncodingType != WinEH::EncodingType::X86);
  }

  bool doesDwarfUseRelocationsAcrossSections() const {
    return DwarfUsesRelocationsAcrossSections;
  }

  bool doDwarfFDESymbolsUseAbsDiff() const { return DwarfFDESymbolsUseAbsDiff; }
  bool useDwarfRegNumForCFI() const { return DwarfRegNumForCFI; }
  bool useParensForSymbolVariant() const { return UseParensForSymbolVariant; }
  bool useParensForDollarSignNames() const {
    return UseParensForDollarSignNames;
  }
  bool supportsExtendedDwarfLocDirective() const {
    return SupportsExtendedDwarfLocDirective;
  }

  bool usesDwarfFileAndLocDirectives() const {
    return UsesDwarfFileAndLocDirectives;
  }

  bool needsDwarfSectionSizeInHeader() const {
    return DwarfSectionSizeRequired;
  }

  bool enableDwarfFileDirectoryDefault() const {
    return EnableDwarfFileDirectoryDefault;
  }

  void addInitialFrameState(const MCCFIInstruction &Inst);

  const std::vector<MCCFIInstruction> &getInitialFrameState() const {
    return InitialFrameState;
  }

  void setBinutilsVersion(std::pair<int, int> Value) {
    BinutilsVersion = Value;
  }

  /// Return true if assembly (inline or otherwise) should be parsed.
  bool useIntegratedAssembler() const { return UseIntegratedAssembler; }

  /// Return true if target want to use AsmParser to parse inlineasm.
  bool parseInlineAsmUsingAsmParser() const {
    return ParseInlineAsmUsingAsmParser;
  }

  bool binutilsIsAtLeast(int Major, int Minor) const {
    return BinutilsVersion >= std::make_pair(Major, Minor);
  }

  /// Set whether assembly (inline or otherwise) should be parsed.
  virtual void setUseIntegratedAssembler(bool Value) {
    UseIntegratedAssembler = Value;
  }

  /// Set whether target want to use AsmParser to parse inlineasm.
  virtual void setParseInlineAsmUsingAsmParser(bool Value) {
    ParseInlineAsmUsingAsmParser = Value;
  }

  /// Return true if assembly (inline or otherwise) should be parsed.
  bool preserveAsmComments() const { return PreserveAsmComments; }

  /// Set whether assembly (inline or otherwise) should be parsed.
  virtual void setPreserveAsmComments(bool Value) {
    PreserveAsmComments = Value;
  }

  DebugCompressionType compressDebugSections() const {
    return CompressDebugSections;
  }

  void setCompressDebugSections(DebugCompressionType CompressDebugSections) {
    this->CompressDebugSections = CompressDebugSections;
  }

  bool shouldUseLogicalShr() const { return UseLogicalShr; }

  bool canRelaxRelocations() const { return RelaxELFRelocations; }
  void setRelaxELFRelocations(bool V) { RelaxELFRelocations = V; }
  bool hasMipsExpressions() const { return HasMipsExpressions; }
  bool needsFunctionDescriptors() const { return NeedsFunctionDescriptors; }
  bool shouldUseMotorolaIntegers() const { return UseMotorolaIntegers; }
};

} // end namespace llvm

#endif // LLVM_MC_MCASMINFO_H
PKjwFZ=NP���MC/MCSymbolWasm.hnu�[���//===- MCSymbolWasm.h -  ----------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_MC_MCSYMBOLWASM_H
#define LLVM_MC_MCSYMBOLWASM_H

#include "llvm/BinaryFormat/Wasm.h"
#include "llvm/MC/MCSymbol.h"

namespace llvm {

class MCSymbolWasm : public MCSymbol {
  std::optional<wasm::WasmSymbolType> Type;
  bool IsWeak = false;
  bool IsHidden = false;
  bool IsComdat = false;
  bool OmitFromLinkingSection = false;
  mutable bool IsUsedInInitArray = false;
  mutable bool IsUsedInGOT = false;
  std::optional<StringRef> ImportModule;
  std::optional<StringRef> ImportName;
  std::optional<StringRef> ExportName;
  wasm::WasmSignature *Signature = nullptr;
  std::optional<wasm::WasmGlobalType> GlobalType;
  std::optional<wasm::WasmTableType> TableType;

  /// An expression describing how to calculate the size of a symbol. If a
  /// symbol has no size this field will be NULL.
  const MCExpr *SymbolSize = nullptr;

public:
  MCSymbolWasm(const StringMapEntry<bool> *Name, bool isTemporary)
      : MCSymbol(SymbolKindWasm, Name, isTemporary) {}
  static bool classof(const MCSymbol *S) { return S->isWasm(); }

  const MCExpr *getSize() const { return SymbolSize; }
  void setSize(const MCExpr *SS) { SymbolSize = SS; }

  bool isFunction() const { return Type == wasm::WASM_SYMBOL_TYPE_FUNCTION; }
  // Data is the default value if not set.
  bool isData() const { return !Type || Type == wasm::WASM_SYMBOL_TYPE_DATA; }
  bool isGlobal() const { return Type == wasm::WASM_SYMBOL_TYPE_GLOBAL; }
  bool isTable() const { return Type == wasm::WASM_SYMBOL_TYPE_TABLE; }
  bool isSection() const { return Type == wasm::WASM_SYMBOL_TYPE_SECTION; }
  bool isTag() const { return Type == wasm::WASM_SYMBOL_TYPE_TAG; }

  std::optional<wasm::WasmSymbolType> getType() const { return Type; }

  void setType(wasm::WasmSymbolType type) { Type = type; }

  bool isExported() const {
    return getFlags() & wasm::WASM_SYMBOL_EXPORTED;
  }
  void setExported() const {
    modifyFlags(wasm::WASM_SYMBOL_EXPORTED, wasm::WASM_SYMBOL_EXPORTED);
  }

  bool isNoStrip() const {
    return getFlags() & wasm::WASM_SYMBOL_NO_STRIP;
  }
  void setNoStrip() const {
    modifyFlags(wasm::WASM_SYMBOL_NO_STRIP, wasm::WASM_SYMBOL_NO_STRIP);
  }

  bool isTLS() const { return getFlags() & wasm::WASM_SYMBOL_TLS; }
  void setTLS() const {
    modifyFlags(wasm::WASM_SYMBOL_TLS, wasm::WASM_SYMBOL_TLS);
  }

  bool isWeak() const { return IsWeak; }
  void setWeak(bool isWeak) { IsWeak = isWeak; }

  bool isHidden() const { return IsHidden; }
  void setHidden(bool isHidden) { IsHidden = isHidden; }

  bool isComdat() const { return IsComdat; }
  void setComdat(bool isComdat) { IsComdat = isComdat; }

  // wasm-ld understands a finite set of symbol types.  This flag allows the
  // compiler to avoid emitting symbol table entries that would confuse the
  // linker, unless the user specifically requests the feature.
  bool omitFromLinkingSection() const { return OmitFromLinkingSection; }
  void setOmitFromLinkingSection() { OmitFromLinkingSection = true; }

  bool hasImportModule() const { return ImportModule.has_value(); }
  StringRef getImportModule() const {
    if (ImportModule)
      return *ImportModule;
    // Use a default module name of "env" for now, for compatibility with
    // existing tools.
    // TODO(sbc): Find a way to specify a default value in the object format
    // without picking a hardcoded value like this.
    return "env";
  }
  void setImportModule(StringRef Name) { ImportModule = Name; }

  bool hasImportName() const { return ImportName.has_value(); }
  StringRef getImportName() const {
    if (ImportName)
      return *ImportName;
    return getName();
  }
  void setImportName(StringRef Name) { ImportName = Name; }

  bool hasExportName() const { return ExportName.has_value(); }
  StringRef getExportName() const { return *ExportName; }
  void setExportName(StringRef Name) { ExportName = Name; }

  bool isFunctionTable() const {
    return isTable() && hasTableType() &&
           getTableType().ElemType == wasm::WASM_TYPE_FUNCREF;
  }
  void setFunctionTable() {
    setType(wasm::WASM_SYMBOL_TYPE_TABLE);
    setTableType(wasm::ValType::FUNCREF);
  }

  void setUsedInGOT() const { IsUsedInGOT = true; }
  bool isUsedInGOT() const { return IsUsedInGOT; }

  void setUsedInInitArray() const { IsUsedInInitArray = true; }
  bool isUsedInInitArray() const { return IsUsedInInitArray; }

  const wasm::WasmSignature *getSignature() const { return Signature; }
  void setSignature(wasm::WasmSignature *Sig) { Signature = Sig; }

  const wasm::WasmGlobalType &getGlobalType() const {
    assert(GlobalType);
    return *GlobalType;
  }
  void setGlobalType(wasm::WasmGlobalType GT) { GlobalType = GT; }

  bool hasTableType() const { return TableType.has_value(); }
  const wasm::WasmTableType &getTableType() const {
    assert(hasTableType());
    return *TableType;
  }
  void setTableType(wasm::WasmTableType TT) { TableType = TT; }
  void setTableType(wasm::ValType VT) {
    // Declare a table with element type VT and no limits (min size 0, no max
    // size).
    wasm::WasmLimits Limits = {wasm::WASM_LIMITS_FLAG_NONE, 0, 0};
    setTableType({uint8_t(VT), Limits});
  }
};

} // end namespace llvm

#endif // LLVM_MC_MCSYMBOLWASM_H
PKjwFZeYW$$MC/MCInstrItineraries.hnu�[���//===- llvm/MC/MCInstrItineraries.h - Scheduling ----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file describes the structures used for instruction
// itineraries, stages, and operand reads/writes.  This is used by
// schedulers to determine instruction stages and latencies.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCINSTRITINERARIES_H
#define LLVM_MC_MCINSTRITINERARIES_H

#include "llvm/MC/MCSchedule.h"
#include <algorithm>

namespace llvm {

//===----------------------------------------------------------------------===//
/// These values represent a non-pipelined step in
/// the execution of an instruction.  Cycles represents the number of
/// discrete time slots needed to complete the stage.  Units represent
/// the choice of functional units that can be used to complete the
/// stage.  Eg. IntUnit1, IntUnit2. NextCycles indicates how many
/// cycles should elapse from the start of this stage to the start of
/// the next stage in the itinerary. A value of -1 indicates that the
/// next stage should start immediately after the current one.
/// For example:
///
///   { 1, x, -1 }
///      indicates that the stage occupies FU x for 1 cycle and that
///      the next stage starts immediately after this one.
///
///   { 2, x|y, 1 }
///      indicates that the stage occupies either FU x or FU y for 2
///      consecutive cycles and that the next stage starts one cycle
///      after this stage starts. That is, the stage requirements
///      overlap in time.
///
///   { 1, x, 0 }
///      indicates that the stage occupies FU x for 1 cycle and that
///      the next stage starts in this same cycle. This can be used to
///      indicate that the instruction requires multiple stages at the
///      same time.
///
/// FU reservation can be of two different kinds:
///  - FUs which instruction actually requires
///  - FUs which instruction just reserves. Reserved unit is not available for
///    execution of other instruction. However, several instructions can reserve
///    the same unit several times.
/// Such two types of units reservation is used to model instruction domain
/// change stalls, FUs using the same resource (e.g. same register file), etc.

struct InstrStage {
  enum ReservationKinds {
    Required = 0,
    Reserved = 1
  };

  /// Bitmask representing a set of functional units.
  typedef uint64_t FuncUnits;

  unsigned Cycles_;  ///< Length of stage in machine cycles
  FuncUnits Units_;  ///< Choice of functional units
  int NextCycles_;   ///< Number of machine cycles to next stage
  ReservationKinds Kind_; ///< Kind of the FU reservation

  /// Returns the number of cycles the stage is occupied.
  unsigned getCycles() const {
    return Cycles_;
  }

  /// Returns the choice of FUs.
  FuncUnits getUnits() const {
    return Units_;
  }

  ReservationKinds getReservationKind() const {
    return Kind_;
  }

  /// Returns the number of cycles from the start of this stage to the
  /// start of the next stage in the itinerary
  unsigned getNextCycles() const {
    return (NextCycles_ >= 0) ? (unsigned)NextCycles_ : Cycles_;
  }
};

//===----------------------------------------------------------------------===//
/// An itinerary represents the scheduling information for an instruction.
/// This includes a set of stages occupied by the instruction and the pipeline
/// cycle in which operands are read and written.
///
struct InstrItinerary {
  int16_t  NumMicroOps;        ///< # of micro-ops, -1 means it's variable
  uint16_t FirstStage;         ///< Index of first stage in itinerary
  uint16_t LastStage;          ///< Index of last + 1 stage in itinerary
  uint16_t FirstOperandCycle;  ///< Index of first operand rd/wr
  uint16_t LastOperandCycle;   ///< Index of last + 1 operand rd/wr
};

//===----------------------------------------------------------------------===//
/// Itinerary data supplied by a subtarget to be used by a target.
///
class InstrItineraryData {
public:
  MCSchedModel SchedModel =
      MCSchedModel::GetDefaultSchedModel(); ///< Basic machine properties.
  const InstrStage *Stages = nullptr;       ///< Array of stages selected
  const unsigned *OperandCycles = nullptr; ///< Array of operand cycles selected
  const unsigned *Forwardings = nullptr; ///< Array of pipeline forwarding paths
  const InstrItinerary *Itineraries =
      nullptr; ///< Array of itineraries selected

  InstrItineraryData() = default;
  InstrItineraryData(const MCSchedModel &SM, const InstrStage *S,
                     const unsigned *OS, const unsigned *F)
    : SchedModel(SM), Stages(S), OperandCycles(OS), Forwardings(F),
      Itineraries(SchedModel.InstrItineraries) {}

  /// Returns true if there are no itineraries.
  bool isEmpty() const { return Itineraries == nullptr; }

  /// Returns true if the index is for the end marker itinerary.
  bool isEndMarker(unsigned ItinClassIndx) const {
    return ((Itineraries[ItinClassIndx].FirstStage == UINT16_MAX) &&
            (Itineraries[ItinClassIndx].LastStage == UINT16_MAX));
  }

  /// Return the first stage of the itinerary.
  const InstrStage *beginStage(unsigned ItinClassIndx) const {
    unsigned StageIdx = Itineraries[ItinClassIndx].FirstStage;
    return Stages + StageIdx;
  }

  /// Return the last+1 stage of the itinerary.
  const InstrStage *endStage(unsigned ItinClassIndx) const {
    unsigned StageIdx = Itineraries[ItinClassIndx].LastStage;
    return Stages + StageIdx;
  }

  /// Return the total stage latency of the given class.  The latency is
  /// the maximum completion time for any stage in the itinerary.  If no stages
  /// exist, it defaults to one cycle.
  unsigned getStageLatency(unsigned ItinClassIndx) const {
    // If the target doesn't provide itinerary information, use a simple
    // non-zero default value for all instructions.
    if (isEmpty())
      return 1;

    // Calculate the maximum completion time for any stage.
    unsigned Latency = 0, StartCycle = 0;
    for (const InstrStage *IS = beginStage(ItinClassIndx),
           *E = endStage(ItinClassIndx); IS != E; ++IS) {
      Latency = std::max(Latency, StartCycle + IS->getCycles());
      StartCycle += IS->getNextCycles();
    }
    return Latency;
  }

  /// Return the cycle for the given class and operand.  Return -1 if no
  /// cycle is specified for the operand.
  int getOperandCycle(unsigned ItinClassIndx, unsigned OperandIdx) const {
    if (isEmpty())
      return -1;

    unsigned FirstIdx = Itineraries[ItinClassIndx].FirstOperandCycle;
    unsigned LastIdx = Itineraries[ItinClassIndx].LastOperandCycle;
    if ((FirstIdx + OperandIdx) >= LastIdx)
      return -1;

    return (int)OperandCycles[FirstIdx + OperandIdx];
  }

  /// Return true if there is a pipeline forwarding between instructions
  /// of itinerary classes DefClass and UseClasses so that value produced by an
  /// instruction of itinerary class DefClass, operand index DefIdx can be
  /// bypassed when it's read by an instruction of itinerary class UseClass,
  /// operand index UseIdx.
  bool hasPipelineForwarding(unsigned DefClass, unsigned DefIdx,
                             unsigned UseClass, unsigned UseIdx) const {
    unsigned FirstDefIdx = Itineraries[DefClass].FirstOperandCycle;
    unsigned LastDefIdx = Itineraries[DefClass].LastOperandCycle;
    if ((FirstDefIdx + DefIdx) >= LastDefIdx)
      return false;
    if (Forwardings[FirstDefIdx + DefIdx] == 0)
      return false;

    unsigned FirstUseIdx = Itineraries[UseClass].FirstOperandCycle;
    unsigned LastUseIdx = Itineraries[UseClass].LastOperandCycle;
    if ((FirstUseIdx + UseIdx) >= LastUseIdx)
      return false;

    return Forwardings[FirstDefIdx + DefIdx] ==
      Forwardings[FirstUseIdx + UseIdx];
  }

  /// Compute and return the use operand latency of a given itinerary
  /// class and operand index if the value is produced by an instruction of the
  /// specified itinerary class and def operand index.
  int getOperandLatency(unsigned DefClass, unsigned DefIdx,
                        unsigned UseClass, unsigned UseIdx) const {
    if (isEmpty())
      return -1;

    int DefCycle = getOperandCycle(DefClass, DefIdx);
    if (DefCycle == -1)
      return -1;

    int UseCycle = getOperandCycle(UseClass, UseIdx);
    if (UseCycle == -1)
      return -1;

    UseCycle = DefCycle - UseCycle + 1;
    if (UseCycle > 0 &&
        hasPipelineForwarding(DefClass, DefIdx, UseClass, UseIdx))
      // FIXME: This assumes one cycle benefit for every pipeline forwarding.
      --UseCycle;
    return UseCycle;
  }

  /// Return the number of micro-ops that the given class decodes to.
  /// Return -1 for classes that require dynamic lookup via TargetInstrInfo.
  int getNumMicroOps(unsigned ItinClassIndx) const {
    if (isEmpty())
      return 1;
    return Itineraries[ItinClassIndx].NumMicroOps;
  }
};

} // end namespace llvm

#endif // LLVM_MC_MCINSTRITINERARIES_H
PKjwFZ��XXMC/SubtargetFeature.hnu�[���//===- llvm/MC/SubtargetFeature.h - CPU characteristics ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file Defines and manages user or tool specified CPU characteristics.
/// The intent is to be able to package specific features that should or should
/// not be used on a specific target processor.  A tool, such as llc, could, as
/// as example, gather chip info from the command line, a long with features
/// that should be used on that chip.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_SUBTARGETFEATURE_H
#define LLVM_MC_SUBTARGETFEATURE_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/MathExtras.h"
#include <array>
#include <initializer_list>
#include <string>
#include <vector>

namespace llvm {

class raw_ostream;
class Triple;

const unsigned MAX_SUBTARGET_WORDS = 4;
const unsigned MAX_SUBTARGET_FEATURES = MAX_SUBTARGET_WORDS * 64;

/// Container class for subtarget features.
/// This is a constexpr reimplementation of a subset of std::bitset. It would be
/// nice to use std::bitset directly, but it doesn't support constant
/// initialization.
class FeatureBitset {
  static_assert((MAX_SUBTARGET_FEATURES % 64) == 0,
                "Should be a multiple of 64!");
  std::array<uint64_t, MAX_SUBTARGET_WORDS> Bits{};

protected:
  constexpr FeatureBitset(const std::array<uint64_t, MAX_SUBTARGET_WORDS> &B)
      : Bits{B} {}

public:
  constexpr FeatureBitset() = default;
  constexpr FeatureBitset(std::initializer_list<unsigned> Init) {
    for (auto I : Init)
      set(I);
  }

  FeatureBitset &set() {
    std::fill(std::begin(Bits), std::end(Bits), -1ULL);
    return *this;
  }

  constexpr FeatureBitset &set(unsigned I) {
    // GCC <6.2 crashes if this is written in a single statement.
    uint64_t NewBits = Bits[I / 64] | (uint64_t(1) << (I % 64));
    Bits[I / 64] = NewBits;
    return *this;
  }

  constexpr FeatureBitset &reset(unsigned I) {
    // GCC <6.2 crashes if this is written in a single statement.
    uint64_t NewBits = Bits[I / 64] & ~(uint64_t(1) << (I % 64));
    Bits[I / 64] = NewBits;
    return *this;
  }

  constexpr FeatureBitset &flip(unsigned I) {
    // GCC <6.2 crashes if this is written in a single statement.
    uint64_t NewBits = Bits[I / 64] ^ (uint64_t(1) << (I % 64));
    Bits[I / 64] = NewBits;
    return *this;
  }

  constexpr bool operator[](unsigned I) const {
    uint64_t Mask = uint64_t(1) << (I % 64);
    return (Bits[I / 64] & Mask) != 0;
  }

  constexpr bool test(unsigned I) const { return (*this)[I]; }

  constexpr size_t size() const { return MAX_SUBTARGET_FEATURES; }

  bool any() const {
    return llvm::any_of(Bits, [](uint64_t I) { return I != 0; });
  }
  bool none() const { return !any(); }
  size_t count() const {
    size_t Count = 0;
    for (auto B : Bits)
      Count += llvm::popcount(B);
    return Count;
  }

  constexpr FeatureBitset &operator^=(const FeatureBitset &RHS) {
    for (unsigned I = 0, E = Bits.size(); I != E; ++I) {
      Bits[I] ^= RHS.Bits[I];
    }
    return *this;
  }
  constexpr FeatureBitset operator^(const FeatureBitset &RHS) const {
    FeatureBitset Result = *this;
    Result ^= RHS;
    return Result;
  }

  constexpr FeatureBitset &operator&=(const FeatureBitset &RHS) {
    for (unsigned I = 0, E = Bits.size(); I != E; ++I) {
      Bits[I] &= RHS.Bits[I];
    }
    return *this;
  }
  constexpr FeatureBitset operator&(const FeatureBitset &RHS) const {
    FeatureBitset Result = *this;
    Result &= RHS;
    return Result;
  }

  constexpr FeatureBitset &operator|=(const FeatureBitset &RHS) {
    for (unsigned I = 0, E = Bits.size(); I != E; ++I) {
      Bits[I] |= RHS.Bits[I];
    }
    return *this;
  }
  constexpr FeatureBitset operator|(const FeatureBitset &RHS) const {
    FeatureBitset Result = *this;
    Result |= RHS;
    return Result;
  }

  constexpr FeatureBitset operator~() const {
    FeatureBitset Result = *this;
    for (auto &B : Result.Bits)
      B = ~B;
    return Result;
  }

  bool operator==(const FeatureBitset &RHS) const {
    return std::equal(std::begin(Bits), std::end(Bits), std::begin(RHS.Bits));
  }

  bool operator!=(const FeatureBitset &RHS) const { return !(*this == RHS); }

  bool operator < (const FeatureBitset &Other) const {
    for (unsigned I = 0, E = size(); I != E; ++I) {
      bool LHS = test(I), RHS = Other.test(I);
      if (LHS != RHS)
        return LHS < RHS;
    }
    return false;
  }
};

/// Class used to store the subtarget bits in the tables created by tablegen.
class FeatureBitArray : public FeatureBitset {
public:
  constexpr FeatureBitArray(const std::array<uint64_t, MAX_SUBTARGET_WORDS> &B)
      : FeatureBitset(B) {}

  const FeatureBitset &getAsBitset() const { return *this; }
};

//===----------------------------------------------------------------------===//

/// Manages the enabling and disabling of subtarget specific features.
///
/// Features are encoded as a string of the form
///   "+attr1,+attr2,-attr3,...,+attrN"
/// A comma separates each feature from the next (all lowercase.)
/// Each of the remaining features is prefixed with + or - indicating whether
/// that feature should be enabled or disabled contrary to the cpu
/// specification.
class SubtargetFeatures {
  std::vector<std::string> Features;    ///< Subtarget features as a vector

public:
  explicit SubtargetFeatures(StringRef Initial = "");

  /// Returns features as a string.
  std::string getString() const;

  /// Adds Features.
  void AddFeature(StringRef String, bool Enable = true);

  void addFeaturesVector(const ArrayRef<std::string> OtherFeatures);

  /// Returns the vector of individual subtarget features.
  const std::vector<std::string> &getFeatures() const { return Features; }

  /// Prints feature string.
  void print(raw_ostream &OS) const;

  // Dumps feature info.
  void dump() const;

  /// Adds the default features for the specified target triple.
  void getDefaultSubtargetFeatures(const Triple& Triple);

  /// Determine if a feature has a flag; '+' or '-'
  static bool hasFlag(StringRef Feature) {
    assert(!Feature.empty() && "Empty string");
    // Get first character
    char Ch = Feature[0];
    // Check if first character is '+' or '-' flag
    return Ch == '+' || Ch =='-';
  }

  /// Return string stripped of flag.
  static StringRef StripFlag(StringRef Feature) {
    return hasFlag(Feature) ? Feature.substr(1) : Feature;
  }

  /// Return true if enable flag; '+'.
  static inline bool isEnabled(StringRef Feature) {
    assert(!Feature.empty() && "Empty string");
    // Get first character
    char Ch = Feature[0];
    // Check if first character is '+' for enabled
    return Ch == '+';
  }

  /// Splits a string of comma separated items in to a vector of strings.
  static void Split(std::vector<std::string> &V, StringRef S);
};

} // end namespace llvm

#endif // LLVM_MC_SUBTARGETFEATURE_H
PKjwFZ/�	$		MC/MCSymbolELF.hnu�[���//===- MCSymbolELF.h -  -----------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_MC_MCSYMBOLELF_H
#define LLVM_MC_MCSYMBOLELF_H

#include "llvm/MC/MCSymbol.h"

namespace llvm {
class MCSymbolELF : public MCSymbol {
  /// An expression describing how to calculate the size of a symbol. If a
  /// symbol has no size this field will be NULL.
  const MCExpr *SymbolSize = nullptr;

public:
  MCSymbolELF(const StringMapEntry<bool> *Name, bool isTemporary)
      : MCSymbol(SymbolKindELF, Name, isTemporary) {}
  void setSize(const MCExpr *SS) { SymbolSize = SS; }

  const MCExpr *getSize() const { return SymbolSize; }

  void setVisibility(unsigned Visibility);
  unsigned getVisibility() const;

  void setOther(unsigned Other);
  unsigned getOther() const;

  void setType(unsigned Type) const;
  unsigned getType() const;

  void setBinding(unsigned Binding) const;
  unsigned getBinding() const;

  bool isBindingSet() const;

  void setIsWeakrefUsedInReloc() const;
  bool isWeakrefUsedInReloc() const;

  void setIsSignature() const;
  bool isSignature() const;

  void setMemtag(bool Tagged);
  bool isMemtag() const;

  static bool classof(const MCSymbol *S) { return S->isELF(); }

private:
  void setIsBindingSet() const;
};
}

#endif
PKjwFZ��s�a�aMC/MCDwarf.hnu�[���//===- MCDwarf.h - Machine Code Dwarf support -------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the declaration of the MCDwarfFile to support the dwarf
// .file directive and the .loc directive.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCDWARF_H
#define LLVM_MC_MCDWARF_H

#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/MC/StringTableBuilder.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/MD5.h"
#include "llvm/Support/SMLoc.h"
#include "llvm/Support/StringSaver.h"
#include <cassert>
#include <cstdint>
#include <optional>
#include <string>
#include <utility>
#include <vector>

namespace llvm {

template <typename T> class ArrayRef;
class MCAsmBackend;
class MCContext;
class MCObjectStreamer;
class MCSection;
class MCStreamer;
class MCSymbol;
class raw_ostream;
class SourceMgr;

namespace mcdwarf {
// Emit the common part of the DWARF 5 range/locations list tables header.
MCSymbol *emitListsTableHeaderStart(MCStreamer &S);
} // namespace mcdwarf

/// Manage the .debug_line_str section contents, if we use it.
class MCDwarfLineStr {
  BumpPtrAllocator Alloc;
  StringSaver Saver{Alloc};
  MCSymbol *LineStrLabel = nullptr;
  StringTableBuilder LineStrings{StringTableBuilder::DWARF};
  bool UseRelocs = false;

public:
  /// Construct an instance that can emit .debug_line_str (for use in a normal
  /// v5 line table).
  explicit MCDwarfLineStr(MCContext &Ctx);

  StringSaver &getSaver() { return Saver; }

  /// Emit a reference to the string.
  void emitRef(MCStreamer *MCOS, StringRef Path);

  /// Emit the .debug_line_str section if appropriate.
  void emitSection(MCStreamer *MCOS);

  /// Returns finalized section.
  SmallString<0> getFinalizedData();

  /// Adds path \p Path to the line string. Returns offset in the
  /// .debug_line_str section.
  size_t addString(StringRef Path);
};

/// Instances of this class represent the name of the dwarf .file directive and
/// its associated dwarf file number in the MC file. MCDwarfFile's are created
/// and uniqued by the MCContext class. In Dwarf 4 file numbers start from 1;
/// i.e. the entry with file number 1 is the first element in the vector of
/// DwarfFiles and there is no MCDwarfFile with file number 0. In Dwarf 5 file
/// numbers start from 0, with the MCDwarfFile with file number 0 being the
/// primary source file, and file numbers correspond to their index in the
/// vector.
struct MCDwarfFile {
  // The base name of the file without its directory path.
  std::string Name;

  // The index into the list of directory names for this file name.
  unsigned DirIndex = 0;

  /// The MD5 checksum, if there is one. Non-owning pointer to data allocated
  /// in MCContext.
  std::optional<MD5::MD5Result> Checksum;

  /// The source code of the file. Non-owning reference to data allocated in
  /// MCContext.
  std::optional<StringRef> Source;
};

/// Instances of this class represent the information from a
/// dwarf .loc directive.
class MCDwarfLoc {
  uint32_t FileNum;
  uint32_t Line;
  uint16_t Column;
  // Flags (see #define's below)
  uint8_t Flags;
  uint8_t Isa;
  uint32_t Discriminator;

// Flag that indicates the initial value of the is_stmt_start flag.
#define DWARF2_LINE_DEFAULT_IS_STMT 1

#define DWARF2_FLAG_IS_STMT (1 << 0)
#define DWARF2_FLAG_BASIC_BLOCK (1 << 1)
#define DWARF2_FLAG_PROLOGUE_END (1 << 2)
#define DWARF2_FLAG_EPILOGUE_BEGIN (1 << 3)

private: // MCContext manages these
  friend class MCContext;
  friend class MCDwarfLineEntry;

  MCDwarfLoc(unsigned fileNum, unsigned line, unsigned column, unsigned flags,
             unsigned isa, unsigned discriminator)
      : FileNum(fileNum), Line(line), Column(column), Flags(flags), Isa(isa),
        Discriminator(discriminator) {}

  // Allow the default copy constructor and assignment operator to be used
  // for an MCDwarfLoc object.

public:
  /// Get the FileNum of this MCDwarfLoc.
  unsigned getFileNum() const { return FileNum; }

  /// Get the Line of this MCDwarfLoc.
  unsigned getLine() const { return Line; }

  /// Get the Column of this MCDwarfLoc.
  unsigned getColumn() const { return Column; }

  /// Get the Flags of this MCDwarfLoc.
  unsigned getFlags() const { return Flags; }

  /// Get the Isa of this MCDwarfLoc.
  unsigned getIsa() const { return Isa; }

  /// Get the Discriminator of this MCDwarfLoc.
  unsigned getDiscriminator() const { return Discriminator; }

  /// Set the FileNum of this MCDwarfLoc.
  void setFileNum(unsigned fileNum) { FileNum = fileNum; }

  /// Set the Line of this MCDwarfLoc.
  void setLine(unsigned line) { Line = line; }

  /// Set the Column of this MCDwarfLoc.
  void setColumn(unsigned column) {
    assert(column <= UINT16_MAX);
    Column = column;
  }

  /// Set the Flags of this MCDwarfLoc.
  void setFlags(unsigned flags) {
    assert(flags <= UINT8_MAX);
    Flags = flags;
  }

  /// Set the Isa of this MCDwarfLoc.
  void setIsa(unsigned isa) {
    assert(isa <= UINT8_MAX);
    Isa = isa;
  }

  /// Set the Discriminator of this MCDwarfLoc.
  void setDiscriminator(unsigned discriminator) {
    Discriminator = discriminator;
  }
};

/// Instances of this class represent the line information for
/// the dwarf line table entries.  Which is created after a machine
/// instruction is assembled and uses an address from a temporary label
/// created at the current address in the current section and the info from
/// the last .loc directive seen as stored in the context.
class MCDwarfLineEntry : public MCDwarfLoc {
  MCSymbol *Label;

private:
  // Allow the default copy constructor and assignment operator to be used
  // for an MCDwarfLineEntry object.

public:
  // Constructor to create an MCDwarfLineEntry given a symbol and the dwarf loc.
  MCDwarfLineEntry(MCSymbol *label, const MCDwarfLoc loc)
      : MCDwarfLoc(loc), Label(label) {}

  MCSymbol *getLabel() const { return Label; }

  // This indicates the line entry is synthesized for an end entry.
  bool IsEndEntry = false;

  // Override the label with the given EndLabel.
  void setEndLabel(MCSymbol *EndLabel) {
    Label = EndLabel;
    IsEndEntry = true;
  }

  // This is called when an instruction is assembled into the specified
  // section and if there is information from the last .loc directive that
  // has yet to have a line entry made for it is made.
  static void make(MCStreamer *MCOS, MCSection *Section);
};

/// Instances of this class represent the line information for a compile
/// unit where machine instructions have been assembled after seeing .loc
/// directives.  This is the information used to build the dwarf line
/// table for a section.
class MCLineSection {
public:
  // Add an entry to this MCLineSection's line entries.
  void addLineEntry(const MCDwarfLineEntry &LineEntry, MCSection *Sec) {
    MCLineDivisions[Sec].push_back(LineEntry);
  }

  // Add an end entry by cloning the last entry, if exists, for the section
  // the given EndLabel belongs to. The label is replaced by the given EndLabel.
  void addEndEntry(MCSymbol *EndLabel);

  using MCDwarfLineEntryCollection = std::vector<MCDwarfLineEntry>;
  using iterator = MCDwarfLineEntryCollection::iterator;
  using const_iterator = MCDwarfLineEntryCollection::const_iterator;
  using MCLineDivisionMap = MapVector<MCSection *, MCDwarfLineEntryCollection>;

private:
  // A collection of MCDwarfLineEntry for each section.
  MCLineDivisionMap MCLineDivisions;

public:
  // Returns the collection of MCDwarfLineEntry for a given Compile Unit ID.
  const MCLineDivisionMap &getMCLineEntries() const {
    return MCLineDivisions;
  }
};

struct MCDwarfLineTableParams {
  /// First special line opcode - leave room for the standard opcodes.
  /// Note: If you want to change this, you'll have to update the
  /// "StandardOpcodeLengths" table that is emitted in
  /// \c Emit().
  uint8_t DWARF2LineOpcodeBase = 13;
  /// Minimum line offset in a special line info. opcode.  The value
  /// -5 was chosen to give a reasonable range of values.
  int8_t DWARF2LineBase = -5;
  /// Range of line offsets in a special line info. opcode.
  uint8_t DWARF2LineRange = 14;
};

struct MCDwarfLineTableHeader {
  MCSymbol *Label = nullptr;
  SmallVector<std::string, 3> MCDwarfDirs;
  SmallVector<MCDwarfFile, 3> MCDwarfFiles;
  StringMap<unsigned> SourceIdMap;
  std::string CompilationDir;
  MCDwarfFile RootFile;
  bool HasSource = false;
private:
  bool HasAllMD5 = true;
  bool HasAnyMD5 = false;

public:
  MCDwarfLineTableHeader() = default;

  Expected<unsigned> tryGetFile(StringRef &Directory, StringRef &FileName,
                                std::optional<MD5::MD5Result> Checksum,
                                std::optional<StringRef> Source,
                                uint16_t DwarfVersion, unsigned FileNumber = 0);
  std::pair<MCSymbol *, MCSymbol *>
  Emit(MCStreamer *MCOS, MCDwarfLineTableParams Params,
       std::optional<MCDwarfLineStr> &LineStr) const;
  std::pair<MCSymbol *, MCSymbol *>
  Emit(MCStreamer *MCOS, MCDwarfLineTableParams Params,
       ArrayRef<char> SpecialOpcodeLengths,
       std::optional<MCDwarfLineStr> &LineStr) const;
  void resetMD5Usage() {
    HasAllMD5 = true;
    HasAnyMD5 = false;
  }
  void trackMD5Usage(bool MD5Used) {
    HasAllMD5 &= MD5Used;
    HasAnyMD5 |= MD5Used;
  }
  bool isMD5UsageConsistent() const {
    return MCDwarfFiles.empty() || (HasAllMD5 == HasAnyMD5);
  }

  void setRootFile(StringRef Directory, StringRef FileName,
                   std::optional<MD5::MD5Result> Checksum,
                   std::optional<StringRef> Source) {
    CompilationDir = std::string(Directory);
    RootFile.Name = std::string(FileName);
    RootFile.DirIndex = 0;
    RootFile.Checksum = Checksum;
    RootFile.Source = Source;
    trackMD5Usage(Checksum.has_value());
    HasSource = Source.has_value();
  }

  void resetFileTable() {
    MCDwarfDirs.clear();
    MCDwarfFiles.clear();
    RootFile.Name.clear();
    resetMD5Usage();
    HasSource = false;
  }

private:
  void emitV2FileDirTables(MCStreamer *MCOS) const;
  void emitV5FileDirTables(MCStreamer *MCOS,
                           std::optional<MCDwarfLineStr> &LineStr) const;
};

class MCDwarfDwoLineTable {
  MCDwarfLineTableHeader Header;
  bool HasSplitLineTable = false;

public:
  void maybeSetRootFile(StringRef Directory, StringRef FileName,
                        std::optional<MD5::MD5Result> Checksum,
                        std::optional<StringRef> Source) {
    if (!Header.RootFile.Name.empty())
      return;
    Header.setRootFile(Directory, FileName, Checksum, Source);
  }

  unsigned getFile(StringRef Directory, StringRef FileName,
                   std::optional<MD5::MD5Result> Checksum,
                   uint16_t DwarfVersion, std::optional<StringRef> Source) {
    HasSplitLineTable = true;
    return cantFail(Header.tryGetFile(Directory, FileName, Checksum, Source,
                                      DwarfVersion));
  }

  void Emit(MCStreamer &MCOS, MCDwarfLineTableParams Params,
            MCSection *Section) const;
};

class MCDwarfLineTable {
  MCDwarfLineTableHeader Header;
  MCLineSection MCLineSections;

public:
  // This emits the Dwarf file and the line tables for all Compile Units.
  static void emit(MCStreamer *MCOS, MCDwarfLineTableParams Params);

  // This emits the Dwarf file and the line tables for a given Compile Unit.
  void emitCU(MCStreamer *MCOS, MCDwarfLineTableParams Params,
              std::optional<MCDwarfLineStr> &LineStr) const;

  // This emits a single line table associated with a given Section.
  static void
  emitOne(MCStreamer *MCOS, MCSection *Section,
          const MCLineSection::MCDwarfLineEntryCollection &LineEntries);

  Expected<unsigned> tryGetFile(StringRef &Directory, StringRef &FileName,
                                std::optional<MD5::MD5Result> Checksum,
                                std::optional<StringRef> Source,
                                uint16_t DwarfVersion, unsigned FileNumber = 0);
  unsigned getFile(StringRef &Directory, StringRef &FileName,
                   std::optional<MD5::MD5Result> Checksum,
                   std::optional<StringRef> Source, uint16_t DwarfVersion,
                   unsigned FileNumber = 0) {
    return cantFail(tryGetFile(Directory, FileName, Checksum, Source,
                               DwarfVersion, FileNumber));
  }

  void setRootFile(StringRef Directory, StringRef FileName,
                   std::optional<MD5::MD5Result> Checksum,
                   std::optional<StringRef> Source) {
    Header.CompilationDir = std::string(Directory);
    Header.RootFile.Name = std::string(FileName);
    Header.RootFile.DirIndex = 0;
    Header.RootFile.Checksum = Checksum;
    Header.RootFile.Source = Source;
    Header.trackMD5Usage(Checksum.has_value());
    Header.HasSource = Source.has_value();
  }

  void resetFileTable() { Header.resetFileTable(); }

  bool hasRootFile() const { return !Header.RootFile.Name.empty(); }

  MCDwarfFile &getRootFile() { return Header.RootFile; }
  const MCDwarfFile &getRootFile() const { return Header.RootFile; }

  // Report whether MD5 usage has been consistent (all-or-none).
  bool isMD5UsageConsistent() const { return Header.isMD5UsageConsistent(); }

  MCSymbol *getLabel() const {
    return Header.Label;
  }

  void setLabel(MCSymbol *Label) {
    Header.Label = Label;
  }

  const SmallVectorImpl<std::string> &getMCDwarfDirs() const {
    return Header.MCDwarfDirs;
  }

  SmallVectorImpl<std::string> &getMCDwarfDirs() {
    return Header.MCDwarfDirs;
  }

  const SmallVectorImpl<MCDwarfFile> &getMCDwarfFiles() const {
    return Header.MCDwarfFiles;
  }

  SmallVectorImpl<MCDwarfFile> &getMCDwarfFiles() {
    return Header.MCDwarfFiles;
  }

  const MCLineSection &getMCLineSections() const {
    return MCLineSections;
  }
  MCLineSection &getMCLineSections() {
    return MCLineSections;
  }
};

class MCDwarfLineAddr {
public:
  /// Utility function to encode a Dwarf pair of LineDelta and AddrDeltas.
  static void encode(MCContext &Context, MCDwarfLineTableParams Params,
                     int64_t LineDelta, uint64_t AddrDelta, SmallVectorImpl<char> &OS);

  /// Utility function to emit the encoding to a streamer.
  static void Emit(MCStreamer *MCOS, MCDwarfLineTableParams Params,
                   int64_t LineDelta, uint64_t AddrDelta);
};

class MCGenDwarfInfo {
public:
  //
  // When generating dwarf for assembly source files this emits the Dwarf
  // sections.
  //
  static void Emit(MCStreamer *MCOS);
};

// When generating dwarf for assembly source files this is the info that is
// needed to be gathered for each symbol that will have a dwarf label.
class MCGenDwarfLabelEntry {
private:
  // Name of the symbol without a leading underbar, if any.
  StringRef Name;
  // The dwarf file number this symbol is in.
  unsigned FileNumber;
  // The line number this symbol is at.
  unsigned LineNumber;
  // The low_pc for the dwarf label is taken from this symbol.
  MCSymbol *Label;

public:
  MCGenDwarfLabelEntry(StringRef name, unsigned fileNumber, unsigned lineNumber,
                       MCSymbol *label)
      : Name(name), FileNumber(fileNumber), LineNumber(lineNumber),
        Label(label) {}

  StringRef getName() const { return Name; }
  unsigned getFileNumber() const { return FileNumber; }
  unsigned getLineNumber() const { return LineNumber; }
  MCSymbol *getLabel() const { return Label; }

  // This is called when label is created when we are generating dwarf for
  // assembly source files.
  static void Make(MCSymbol *Symbol, MCStreamer *MCOS, SourceMgr &SrcMgr,
                   SMLoc &Loc);
};

class MCCFIInstruction {
public:
  enum OpType {
    OpSameValue,
    OpRememberState,
    OpRestoreState,
    OpOffset,
    OpLLVMDefAspaceCfa,
    OpDefCfaRegister,
    OpDefCfaOffset,
    OpDefCfa,
    OpRelOffset,
    OpAdjustCfaOffset,
    OpEscape,
    OpRestore,
    OpUndefined,
    OpRegister,
    OpWindowSave,
    OpNegateRAState,
    OpGnuArgsSize
  };

private:
  OpType Operation;
  MCSymbol *Label;
  unsigned Register;
  union {
    int Offset;
    unsigned Register2;
  };
  unsigned AddressSpace = ~0u;
  SMLoc Loc;
  std::vector<char> Values;
  std::string Comment;

  MCCFIInstruction(OpType Op, MCSymbol *L, unsigned R, int O, SMLoc Loc,
                   StringRef V = "", StringRef Comment = "")
      : Operation(Op), Label(L), Register(R), Offset(O), Loc(Loc),
        Values(V.begin(), V.end()), Comment(Comment) {
    assert(Op != OpRegister && Op != OpLLVMDefAspaceCfa);
  }

  MCCFIInstruction(OpType Op, MCSymbol *L, unsigned R1, unsigned R2, SMLoc Loc)
      : Operation(Op), Label(L), Register(R1), Register2(R2), Loc(Loc) {
    assert(Op == OpRegister);
  }

  MCCFIInstruction(OpType Op, MCSymbol *L, unsigned R, int O, unsigned AS,
                   SMLoc Loc)
      : Operation(Op), Label(L), Register(R), Offset(O), AddressSpace(AS),
        Loc(Loc) {
    assert(Op == OpLLVMDefAspaceCfa);
  }

public:
  /// .cfi_def_cfa defines a rule for computing CFA as: take address from
  /// Register and add Offset to it.
  static MCCFIInstruction cfiDefCfa(MCSymbol *L, unsigned Register, int Offset,
                                    SMLoc Loc = {}) {
    return MCCFIInstruction(OpDefCfa, L, Register, Offset, Loc);
  }

  /// .cfi_def_cfa_register modifies a rule for computing CFA. From now
  /// on Register will be used instead of the old one. Offset remains the same.
  static MCCFIInstruction createDefCfaRegister(MCSymbol *L, unsigned Register,
                                               SMLoc Loc = {}) {
    return MCCFIInstruction(OpDefCfaRegister, L, Register, 0, Loc);
  }

  /// .cfi_def_cfa_offset modifies a rule for computing CFA. Register
  /// remains the same, but offset is new. Note that it is the absolute offset
  /// that will be added to a defined register to the compute CFA address.
  static MCCFIInstruction cfiDefCfaOffset(MCSymbol *L, int Offset,
                                          SMLoc Loc = {}) {
    return MCCFIInstruction(OpDefCfaOffset, L, 0, Offset, Loc);
  }

  /// .cfi_adjust_cfa_offset Same as .cfi_def_cfa_offset, but
  /// Offset is a relative value that is added/subtracted from the previous
  /// offset.
  static MCCFIInstruction createAdjustCfaOffset(MCSymbol *L, int Adjustment,
                                                SMLoc Loc = {}) {
    return MCCFIInstruction(OpAdjustCfaOffset, L, 0, Adjustment, Loc);
  }

  // FIXME: Update the remaining docs to use the new proposal wording.
  /// .cfi_llvm_def_aspace_cfa defines the rule for computing the CFA to
  /// be the result of evaluating the DWARF operation expression
  /// `DW_OP_constu AS; DW_OP_aspace_bregx R, B` as a location description.
  static MCCFIInstruction createLLVMDefAspaceCfa(MCSymbol *L, unsigned Register,
                                                 int Offset,
                                                 unsigned AddressSpace,
                                                 SMLoc Loc) {
    return MCCFIInstruction(OpLLVMDefAspaceCfa, L, Register, Offset,
                            AddressSpace, Loc);
  }

  /// .cfi_offset Previous value of Register is saved at offset Offset
  /// from CFA.
  static MCCFIInstruction createOffset(MCSymbol *L, unsigned Register,
                                       int Offset, SMLoc Loc = {}) {
    return MCCFIInstruction(OpOffset, L, Register, Offset, Loc);
  }

  /// .cfi_rel_offset Previous value of Register is saved at offset
  /// Offset from the current CFA register. This is transformed to .cfi_offset
  /// using the known displacement of the CFA register from the CFA.
  static MCCFIInstruction createRelOffset(MCSymbol *L, unsigned Register,
                                          int Offset, SMLoc Loc = {}) {
    return MCCFIInstruction(OpRelOffset, L, Register, Offset, Loc);
  }

  /// .cfi_register Previous value of Register1 is saved in
  /// register Register2.
  static MCCFIInstruction createRegister(MCSymbol *L, unsigned Register1,
                                         unsigned Register2, SMLoc Loc = {}) {
    return MCCFIInstruction(OpRegister, L, Register1, Register2, Loc);
  }

  /// .cfi_window_save SPARC register window is saved.
  static MCCFIInstruction createWindowSave(MCSymbol *L, SMLoc Loc = {}) {
    return MCCFIInstruction(OpWindowSave, L, 0, 0, Loc);
  }

  /// .cfi_negate_ra_state AArch64 negate RA state.
  static MCCFIInstruction createNegateRAState(MCSymbol *L, SMLoc Loc = {}) {
    return MCCFIInstruction(OpNegateRAState, L, 0, 0, Loc);
  }

  /// .cfi_restore says that the rule for Register is now the same as it
  /// was at the beginning of the function, after all initial instructions added
  /// by .cfi_startproc were executed.
  static MCCFIInstruction createRestore(MCSymbol *L, unsigned Register,
                                        SMLoc Loc = {}) {
    return MCCFIInstruction(OpRestore, L, Register, 0, Loc);
  }

  /// .cfi_undefined From now on the previous value of Register can't be
  /// restored anymore.
  static MCCFIInstruction createUndefined(MCSymbol *L, unsigned Register,
                                          SMLoc Loc = {}) {
    return MCCFIInstruction(OpUndefined, L, Register, 0, Loc);
  }

  /// .cfi_same_value Current value of Register is the same as in the
  /// previous frame. I.e., no restoration is needed.
  static MCCFIInstruction createSameValue(MCSymbol *L, unsigned Register,
                                          SMLoc Loc = {}) {
    return MCCFIInstruction(OpSameValue, L, Register, 0, Loc);
  }

  /// .cfi_remember_state Save all current rules for all registers.
  static MCCFIInstruction createRememberState(MCSymbol *L, SMLoc Loc = {}) {
    return MCCFIInstruction(OpRememberState, L, 0, 0, Loc);
  }

  /// .cfi_restore_state Restore the previously saved state.
  static MCCFIInstruction createRestoreState(MCSymbol *L, SMLoc Loc = {}) {
    return MCCFIInstruction(OpRestoreState, L, 0, 0, Loc);
  }

  /// .cfi_escape Allows the user to add arbitrary bytes to the unwind
  /// info.
  static MCCFIInstruction createEscape(MCSymbol *L, StringRef Vals,
                                       SMLoc Loc = {}, StringRef Comment = "") {
    return MCCFIInstruction(OpEscape, L, 0, 0, Loc, Vals, Comment);
  }

  /// A special wrapper for .cfi_escape that indicates GNU_ARGS_SIZE
  static MCCFIInstruction createGnuArgsSize(MCSymbol *L, int Size,
                                            SMLoc Loc = {}) {
    return MCCFIInstruction(OpGnuArgsSize, L, 0, Size, Loc);
  }

  OpType getOperation() const { return Operation; }
  MCSymbol *getLabel() const { return Label; }

  unsigned getRegister() const {
    assert(Operation == OpDefCfa || Operation == OpOffset ||
           Operation == OpRestore || Operation == OpUndefined ||
           Operation == OpSameValue || Operation == OpDefCfaRegister ||
           Operation == OpRelOffset || Operation == OpRegister ||
           Operation == OpLLVMDefAspaceCfa);
    return Register;
  }

  unsigned getRegister2() const {
    assert(Operation == OpRegister);
    return Register2;
  }

  unsigned getAddressSpace() const {
    assert(Operation == OpLLVMDefAspaceCfa);
    return AddressSpace;
  }

  int getOffset() const {
    assert(Operation == OpDefCfa || Operation == OpOffset ||
           Operation == OpRelOffset || Operation == OpDefCfaOffset ||
           Operation == OpAdjustCfaOffset || Operation == OpGnuArgsSize ||
           Operation == OpLLVMDefAspaceCfa);
    return Offset;
  }

  StringRef getValues() const {
    assert(Operation == OpEscape);
    return StringRef(&Values[0], Values.size());
  }

  StringRef getComment() const { return Comment; }
  SMLoc getLoc() const { return Loc; }
};

struct MCDwarfFrameInfo {
  MCDwarfFrameInfo() = default;

  MCSymbol *Begin = nullptr;
  MCSymbol *End = nullptr;
  const MCSymbol *Personality = nullptr;
  const MCSymbol *Lsda = nullptr;
  std::vector<MCCFIInstruction> Instructions;
  unsigned CurrentCfaRegister = 0;
  unsigned PersonalityEncoding = 0;
  unsigned LsdaEncoding = 0;
  uint32_t CompactUnwindEncoding = 0;
  bool IsSignalFrame = false;
  bool IsSimple = false;
  unsigned RAReg = static_cast<unsigned>(INT_MAX);
  bool IsBKeyFrame = false;
  bool IsMTETaggedFrame = false;
};

class MCDwarfFrameEmitter {
public:
  //
  // This emits the frame info section.
  //
  static void Emit(MCObjectStreamer &streamer, MCAsmBackend *MAB, bool isEH);
  static void encodeAdvanceLoc(MCContext &Context, uint64_t AddrDelta,
                               SmallVectorImpl<char> &OS);
};

} // end namespace llvm

#endif // LLVM_MC_MCDWARF_H
PKjwFZE��0��MC/MCWinCOFFStreamer.hnu�[���//===- MCWinCOFFStreamer.h - COFF Object File Interface ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCWINCOFFSTREAMER_H
#define LLVM_MC_MCWINCOFFSTREAMER_H

#include "llvm/MC/MCDirectives.h"
#include "llvm/MC/MCObjectStreamer.h"

namespace llvm {

class MCAsmBackend;
class MCContext;
class MCCodeEmitter;
class MCInst;
class MCSection;
class MCSubtargetInfo;
class MCSymbol;
class StringRef;
class raw_pwrite_stream;

class MCWinCOFFStreamer : public MCObjectStreamer {
public:
  MCWinCOFFStreamer(MCContext &Context, std::unique_ptr<MCAsmBackend> MAB,
                    std::unique_ptr<MCCodeEmitter> CE,
                    std::unique_ptr<MCObjectWriter> OW);

  /// state management
  void reset() override {
    CurSymbol = nullptr;
    MCObjectStreamer::reset();
  }

  /// \name MCStreamer interface
  /// \{

  void initSections(bool NoExecStack, const MCSubtargetInfo &STI) override;
  void emitLabel(MCSymbol *Symbol, SMLoc Loc = SMLoc()) override;
  void emitAssemblerFlag(MCAssemblerFlag Flag) override;
  void emitThumbFunc(MCSymbol *Func) override;
  bool emitSymbolAttribute(MCSymbol *Symbol, MCSymbolAttr Attribute) override;
  void emitSymbolDesc(MCSymbol *Symbol, unsigned DescValue) override;
  void beginCOFFSymbolDef(MCSymbol const *Symbol) override;
  void emitCOFFSymbolStorageClass(int StorageClass) override;
  void emitCOFFSymbolType(int Type) override;
  void endCOFFSymbolDef() override;
  void emitCOFFSafeSEH(MCSymbol const *Symbol) override;
  void emitCOFFSymbolIndex(MCSymbol const *Symbol) override;
  void emitCOFFSectionIndex(MCSymbol const *Symbol) override;
  void emitCOFFSecRel32(MCSymbol const *Symbol, uint64_t Offset) override;
  void emitCOFFImgRel32(MCSymbol const *Symbol, int64_t Offset) override;
  void emitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
                        Align ByteAlignment) override;
  void emitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size,
                             Align ByteAlignment) override;
  void emitWeakReference(MCSymbol *Alias, const MCSymbol *Symbol) override;
  void emitZerofill(MCSection *Section, MCSymbol *Symbol, uint64_t Size,
                    Align ByteAlignment, SMLoc Loc = SMLoc()) override;
  void emitTBSSSymbol(MCSection *Section, MCSymbol *Symbol, uint64_t Size,
                      Align ByteAlignment) override;
  void emitIdent(StringRef IdentString) override;
  void emitWinEHHandlerData(SMLoc Loc) override;
  void emitCGProfileEntry(const MCSymbolRefExpr *From,
                          const MCSymbolRefExpr *To, uint64_t Count) override;
  void finishImpl() override;

  /// \}

protected:
  const MCSymbol *CurSymbol;

  void emitInstToData(const MCInst &Inst, const MCSubtargetInfo &STI) override;

  void finalizeCGProfileEntry(const MCSymbolRefExpr *&S);
  void finalizeCGProfile();

private:
  void Error(const Twine &Msg) const;
};

} // end namespace llvm

#endif // LLVM_MC_MCWINCOFFSTREAMER_H
PKjwFZGE�4��MC/MCSection.hnu�[���//===- MCSection.h - Machine Code Sections ----------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the MCSection class.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCSECTION_H
#define LLVM_MC_MCSECTION_H

#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/ilist.h"
#include "llvm/MC/MCFragment.h"
#include "llvm/MC/SectionKind.h"
#include "llvm/Support/Alignment.h"
#include <cassert>
#include <utility>

namespace llvm {

class MCAsmInfo;
class MCContext;
class MCExpr;
class MCSymbol;
class raw_ostream;
class Triple;

template <> struct ilist_alloc_traits<MCFragment> {
  static void deleteNode(MCFragment *V);
};

/// Instances of this class represent a uniqued identifier for a section in the
/// current translation unit.  The MCContext class uniques and creates these.
class MCSection {
public:
  static constexpr unsigned NonUniqueID = ~0U;

  enum SectionVariant {
    SV_COFF = 0,
    SV_ELF,
    SV_GOFF,
    SV_MachO,
    SV_Wasm,
    SV_XCOFF,
    SV_SPIRV,
    SV_DXContainer,
  };

  /// Express the state of bundle locked groups while emitting code.
  enum BundleLockStateType {
    NotBundleLocked,
    BundleLocked,
    BundleLockedAlignToEnd
  };

  using FragmentListType = iplist<MCFragment>;

  using const_iterator = FragmentListType::const_iterator;
  using iterator = FragmentListType::iterator;

  using const_reverse_iterator = FragmentListType::const_reverse_iterator;
  using reverse_iterator = FragmentListType::reverse_iterator;

private:
  MCSymbol *Begin;
  MCSymbol *End = nullptr;
  /// The alignment requirement of this section.
  Align Alignment;
  /// The section index in the assemblers section list.
  unsigned Ordinal = 0;
  /// The index of this section in the layout order.
  unsigned LayoutOrder = 0;

  /// Keeping track of bundle-locked state.
  BundleLockStateType BundleLockState = NotBundleLocked;

  /// Current nesting depth of bundle_lock directives.
  unsigned BundleLockNestingDepth = 0;

  /// We've seen a bundle_lock directive but not its first instruction
  /// yet.
  bool BundleGroupBeforeFirstInst : 1;

  /// Whether this section has had instructions emitted into it.
  bool HasInstructions : 1;

  bool IsRegistered : 1;

  MCDummyFragment DummyFragment;

  FragmentListType Fragments;

  /// Mapping from subsection number to insertion point for subsection numbers
  /// below that number.
  SmallVector<std::pair<unsigned, MCFragment *>, 1> SubsectionFragmentMap;

  /// State for tracking labels that don't yet have Fragments
  struct PendingLabel {
    MCSymbol* Sym;
    unsigned Subsection;
    PendingLabel(MCSymbol* Sym, unsigned Subsection = 0)
      : Sym(Sym), Subsection(Subsection) {}
  };
  SmallVector<PendingLabel, 2> PendingLabels;

protected:
  // TODO Make Name private when possible.
  StringRef Name;
  SectionVariant Variant;
  SectionKind Kind;

  MCSection(SectionVariant V, StringRef Name, SectionKind K, MCSymbol *Begin);
  ~MCSection();

public:
  MCSection(const MCSection &) = delete;
  MCSection &operator=(const MCSection &) = delete;

  StringRef getName() const { return Name; }
  SectionKind getKind() const { return Kind; }

  SectionVariant getVariant() const { return Variant; }

  MCSymbol *getBeginSymbol() { return Begin; }
  const MCSymbol *getBeginSymbol() const {
    return const_cast<MCSection *>(this)->getBeginSymbol();
  }
  void setBeginSymbol(MCSymbol *Sym) {
    assert(!Begin);
    Begin = Sym;
  }
  MCSymbol *getEndSymbol(MCContext &Ctx);
  bool hasEnded() const;

  Align getAlign() const { return Alignment; }
  void setAlignment(Align Value) { Alignment = Value; }

  /// Makes sure that Alignment is at least MinAlignment.
  void ensureMinAlignment(Align MinAlignment) {
    if (Alignment < MinAlignment)
      Alignment = MinAlignment;
  }

  unsigned getOrdinal() const { return Ordinal; }
  void setOrdinal(unsigned Value) { Ordinal = Value; }

  unsigned getLayoutOrder() const { return LayoutOrder; }
  void setLayoutOrder(unsigned Value) { LayoutOrder = Value; }

  BundleLockStateType getBundleLockState() const { return BundleLockState; }
  void setBundleLockState(BundleLockStateType NewState);
  bool isBundleLocked() const { return BundleLockState != NotBundleLocked; }

  bool isBundleGroupBeforeFirstInst() const {
    return BundleGroupBeforeFirstInst;
  }
  void setBundleGroupBeforeFirstInst(bool IsFirst) {
    BundleGroupBeforeFirstInst = IsFirst;
  }

  bool hasInstructions() const { return HasInstructions; }
  void setHasInstructions(bool Value) { HasInstructions = Value; }

  bool isRegistered() const { return IsRegistered; }
  void setIsRegistered(bool Value) { IsRegistered = Value; }

  MCSection::FragmentListType &getFragmentList() { return Fragments; }
  const MCSection::FragmentListType &getFragmentList() const {
    return const_cast<MCSection *>(this)->getFragmentList();
  }

  /// Support for MCFragment::getNextNode().
  static FragmentListType MCSection::*getSublistAccess(MCFragment *) {
    return &MCSection::Fragments;
  }

  const MCDummyFragment &getDummyFragment() const { return DummyFragment; }
  MCDummyFragment &getDummyFragment() { return DummyFragment; }

  iterator begin() { return Fragments.begin(); }
  const_iterator begin() const { return Fragments.begin(); }

  iterator end() { return Fragments.end(); }
  const_iterator end() const { return Fragments.end(); }

  MCSection::iterator getSubsectionInsertionPoint(unsigned Subsection);

  void dump() const;

  virtual void printSwitchToSection(const MCAsmInfo &MAI, const Triple &T,
                                    raw_ostream &OS,
                                    const MCExpr *Subsection) const = 0;

  /// Return true if a .align directive should use "optimized nops" to fill
  /// instead of 0s.
  virtual bool useCodeAlign() const = 0;

  /// Check whether this section is "virtual", that is has no actual object
  /// file contents.
  virtual bool isVirtualSection() const = 0;

  virtual StringRef getVirtualSectionKind() const;

  /// Add a pending label for the requested subsection. This label will be
  /// associated with a fragment in flushPendingLabels()
  void addPendingLabel(MCSymbol* label, unsigned Subsection = 0);

  /// Associate all pending labels in a subsection with a fragment.
  void flushPendingLabels(MCFragment *F, uint64_t FOffset = 0,
			  unsigned Subsection = 0);

  /// Associate all pending labels with empty data fragments. One fragment
  /// will be created for each subsection as necessary.
  void flushPendingLabels();
};

} // end namespace llvm

#endif // LLVM_MC_MCSECTION_H
PKjwFZ�I��ZZMC/MCSymbolCOFF.hnu�[���//===- MCSymbolCOFF.h -  ----------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCSYMBOLCOFF_H
#define LLVM_MC_MCSYMBOLCOFF_H

#include "llvm/BinaryFormat/COFF.h"
#include "llvm/MC/MCSymbol.h"
#include <cstdint>

namespace llvm {

class MCSymbolCOFF : public MCSymbol {
  /// This corresponds to the e_type field of the COFF symbol.
  mutable uint16_t Type = 0;

  enum SymbolFlags : uint16_t {
    SF_ClassMask = 0x00FF,
    SF_ClassShift = 0,

    SF_SafeSEH = 0x0100,
    SF_WeakExternalCharacteristicsMask = 0x0E00,
    SF_WeakExternalCharacteristicsShift = 9,
  };

public:
  MCSymbolCOFF(const StringMapEntry<bool> *Name, bool isTemporary)
      : MCSymbol(SymbolKindCOFF, Name, isTemporary) {}

  uint16_t getType() const {
    return Type;
  }
  void setType(uint16_t Ty) const {
    Type = Ty;
  }

  uint16_t getClass() const {
    return (getFlags() & SF_ClassMask) >> SF_ClassShift;
  }
  void setClass(uint16_t StorageClass) const {
    modifyFlags(StorageClass << SF_ClassShift, SF_ClassMask);
  }

  COFF::WeakExternalCharacteristics getWeakExternalCharacteristics() const {
    return static_cast<COFF::WeakExternalCharacteristics>((getFlags() & SF_WeakExternalCharacteristicsMask) >>
           SF_WeakExternalCharacteristicsShift);
  }
  void setWeakExternalCharacteristics(COFF::WeakExternalCharacteristics Characteristics) const {
    modifyFlags(Characteristics << SF_WeakExternalCharacteristicsShift,
                SF_WeakExternalCharacteristicsMask);
  }
  void setIsWeakExternal(bool WeakExt) const {
    IsWeakExternal = WeakExt;
  }

  bool isSafeSEH() const {
    return getFlags() & SF_SafeSEH;
  }
  void setIsSafeSEH() const {
    modifyFlags(SF_SafeSEH, SF_SafeSEH);
  }

  static bool classof(const MCSymbol *S) { return S->isCOFF(); }
};

} // end namespace llvm

#endif // LLVM_MC_MCSYMBOLCOFF_H
PKjwFZhm���MC/MCSectionDXContainer.hnu�[���//===- MCSectionDXContainer.h - DXContainer MC Sections ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the MCSectionDXContainer class.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCSECTIONDXCONTAINER_H
#define LLVM_MC_MCSECTIONDXCONTAINER_H

#include "llvm/MC/MCSection.h"
#include "llvm/MC/SectionKind.h"

namespace llvm {

class MCSymbol;

class MCSectionDXContainer final : public MCSection {
  friend class MCContext;

  MCSectionDXContainer(StringRef Name, SectionKind K, MCSymbol *Begin)
      : MCSection(SV_DXContainer, Name, K, Begin) {}

public:
  void printSwitchToSection(const MCAsmInfo &, const Triple &, raw_ostream &,
                            const MCExpr *) const override;
  bool useCodeAlign() const override { return false; }
  bool isVirtualSection() const override { return false; }
};

} // end namespace llvm

#endif // LLVM_MC_MCSECTIONDXCONTAINER_H
PKjwFZ���MC/MCSymbolGOFF.hnu�[���//===-- llvm/MC/MCSymbolGOFF.h - GOFF Machine Code Symbols ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file contains the MCSymbolGOFF class
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_MC_MCSYMBOLGOFF_H
#define LLVM_MC_MCSYMBOLGOFF_H

#include "llvm/MC/MCSymbol.h"

namespace llvm {

class MCSymbolGOFF : public MCSymbol {
public:
  MCSymbolGOFF(const StringMapEntry<bool> *Name, bool IsTemporary)
      : MCSymbol(SymbolKindGOFF, Name, IsTemporary) {}
  static bool classof(const MCSymbol *S) { return S->isGOFF(); }
};
} // end namespace llvm

#endif
PKjwFZ�ˆ

MC/LaneBitmask.hnu�[���//===- llvm/MC/LaneBitmask.h ------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// A common definition of LaneBitmask for use in TableGen and CodeGen.
///
/// A lane mask is a bitmask representing the covering of a register with
/// sub-registers.
///
/// This is typically used to track liveness at sub-register granularity.
/// Lane masks for sub-register indices are similar to register units for
/// physical registers. The individual bits in a lane mask can't be assigned
/// any specific meaning. They can be used to check if two sub-register
/// indices overlap.
///
/// Iff the target has a register such that:
///
///   getSubReg(Reg, A) overlaps getSubReg(Reg, B)
///
/// then:
///
///   (getSubRegIndexLaneMask(A) & getSubRegIndexLaneMask(B)) != 0

#ifndef LLVM_MC_LANEBITMASK_H
#define LLVM_MC_LANEBITMASK_H

#include "llvm/Support/Compiler.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/Printable.h"
#include "llvm/Support/raw_ostream.h"

namespace llvm {

  struct LaneBitmask {
    // When changing the underlying type, change the format string as well.
    using Type = uint64_t;
    enum : unsigned { BitWidth = 8*sizeof(Type) };
    constexpr static const char *const FormatStr = "%016llX";

    constexpr LaneBitmask() = default;
    explicit constexpr LaneBitmask(Type V) : Mask(V) {}

    constexpr bool operator== (LaneBitmask M) const { return Mask == M.Mask; }
    constexpr bool operator!= (LaneBitmask M) const { return Mask != M.Mask; }
    constexpr bool operator< (LaneBitmask M)  const { return Mask < M.Mask; }
    constexpr bool none() const { return Mask == 0; }
    constexpr bool any()  const { return Mask != 0; }
    constexpr bool all()  const { return ~Mask == 0; }

    constexpr LaneBitmask operator~() const {
      return LaneBitmask(~Mask);
    }
    constexpr LaneBitmask operator|(LaneBitmask M) const {
      return LaneBitmask(Mask | M.Mask);
    }
    constexpr LaneBitmask operator&(LaneBitmask M) const {
      return LaneBitmask(Mask & M.Mask);
    }
    LaneBitmask &operator|=(LaneBitmask M) {
      Mask |= M.Mask;
      return *this;
    }
    LaneBitmask &operator&=(LaneBitmask M) {
      Mask &= M.Mask;
      return *this;
    }

    constexpr Type getAsInteger() const { return Mask; }

    unsigned getNumLanes() const { return llvm::popcount(Mask); }
    unsigned getHighestLane() const {
      return Log2_64(Mask);
    }

    static constexpr LaneBitmask getNone() { return LaneBitmask(0); }
    static constexpr LaneBitmask getAll() { return ~LaneBitmask(0); }
    static constexpr LaneBitmask getLane(unsigned Lane) {
      return LaneBitmask(Type(1) << Lane);
    }

  private:
    Type Mask = 0;
  };

  /// Create Printable object to print LaneBitmasks on a \ref raw_ostream.
  inline Printable PrintLaneMask(LaneBitmask LaneMask) {
    return Printable([LaneMask](raw_ostream &OS) {
      OS << format(LaneBitmask::FormatStr, LaneMask.getAsInteger());
    });
  }

} // end namespace llvm

#endif // LLVM_MC_LANEBITMASK_H
PKjwFZ=n�}}"MC/MCParser/MCAsmParserExtension.hnu�[���//===- llvm/MC/MCAsmParserExtension.h - Asm Parser Hooks --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCPARSER_MCASMPARSEREXTENSION_H
#define LLVM_MC_MCPARSER_MCASMPARSEREXTENSION_H

#include "llvm/ADT/STLFunctionalExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/MC/MCParser/MCAsmParser.h"
#include "llvm/Support/SMLoc.h"

namespace llvm {

class Twine;

/// Generic interface for extending the MCAsmParser,
/// which is implemented by target and object file assembly parser
/// implementations.
class MCAsmParserExtension {
  MCAsmParser *Parser = nullptr;

protected:
  MCAsmParserExtension();

  // Helper template for implementing static dispatch functions.
  template<typename T, bool (T::*Handler)(StringRef, SMLoc)>
  static bool HandleDirective(MCAsmParserExtension *Target,
                              StringRef Directive,
                              SMLoc DirectiveLoc) {
    T *Obj = static_cast<T*>(Target);
    return (Obj->*Handler)(Directive, DirectiveLoc);
  }

  bool BracketExpressionsSupported = false;

public:
  MCAsmParserExtension(const MCAsmParserExtension &) = delete;
  MCAsmParserExtension &operator=(const MCAsmParserExtension &) = delete;
  virtual ~MCAsmParserExtension();

  /// Initialize the extension for parsing using the given \p Parser.
  /// The extension should use the AsmParser interfaces to register its
  /// parsing routines.
  virtual void Initialize(MCAsmParser &Parser);

  /// \name MCAsmParser Proxy Interfaces
  /// @{

  MCContext &getContext() { return getParser().getContext(); }

  MCAsmLexer &getLexer() { return getParser().getLexer(); }
  const MCAsmLexer &getLexer() const {
    return const_cast<MCAsmParserExtension *>(this)->getLexer();
  }

  MCAsmParser &getParser() { return *Parser; }
  const MCAsmParser &getParser() const {
    return const_cast<MCAsmParserExtension*>(this)->getParser();
  }

  SourceMgr &getSourceManager() { return getParser().getSourceManager(); }
  MCStreamer &getStreamer() { return getParser().getStreamer(); }

  bool Warning(SMLoc L, const Twine &Msg) {
    return getParser().Warning(L, Msg);
  }

  bool Error(SMLoc L, const Twine &Msg, SMRange Range = SMRange()) {
    return getParser().Error(L, Msg, Range);
  }

  void Note(SMLoc L, const Twine &Msg) {
    getParser().Note(L, Msg);
  }

  bool TokError(const Twine &Msg) {
    return getParser().TokError(Msg);
  }

  const AsmToken &Lex() { return getParser().Lex(); }
  const AsmToken &getTok() { return getParser().getTok(); }
  bool parseToken(AsmToken::TokenKind T,
                  const Twine &Msg = "unexpected token") {
    return getParser().parseToken(T, Msg);
  }
  bool parseEOL() { return getParser().parseEOL(); }

  bool parseMany(function_ref<bool()> parseOne, bool hasComma = true) {
    return getParser().parseMany(parseOne, hasComma);
  }

  bool parseOptionalToken(AsmToken::TokenKind T) {
    return getParser().parseOptionalToken(T);
  }

  bool ParseDirectiveCGProfile(StringRef, SMLoc);

  bool check(bool P, const Twine &Msg) {
    return getParser().check(P, Msg);
  }

  bool check(bool P, SMLoc Loc, const Twine &Msg) {
    return getParser().check(P, Loc, Msg);
  }

  bool addErrorSuffix(const Twine &Suffix) {
    return getParser().addErrorSuffix(Suffix);
  }

  bool HasBracketExpressions() const { return BracketExpressionsSupported; }

  /// @}
};

} // end namespace llvm

#endif // LLVM_MC_MCPARSER_MCASMPARSEREXTENSION_H
PKjwFZ��b@WWMC/MCParser/AsmLexer.hnu�[���//===- AsmLexer.h - Lexer for Assembly Files --------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This class declares the lexer for assembly files.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCPARSER_ASMLEXER_H
#define LLVM_MC_MCPARSER_ASMLEXER_H

#include "llvm/ADT/StringRef.h"
#include "llvm/MC/MCParser/MCAsmLexer.h"
#include <string>

namespace llvm {

class MCAsmInfo;

/// AsmLexer - Lexer class for assembly files.
class AsmLexer : public MCAsmLexer {
  const MCAsmInfo &MAI;

  const char *CurPtr = nullptr;
  StringRef CurBuf;
  bool IsAtStartOfLine = true;
  bool IsAtStartOfStatement = true;
  bool IsPeeking = false;
  bool EndStatementAtEOF = true;

protected:
  /// LexToken - Read the next token and return its code.
  AsmToken LexToken() override;

public:
  AsmLexer(const MCAsmInfo &MAI);
  AsmLexer(const AsmLexer &) = delete;
  AsmLexer &operator=(const AsmLexer &) = delete;
  ~AsmLexer() override;

  void setBuffer(StringRef Buf, const char *ptr = nullptr,
                 bool EndStatementAtEOF = true);

  StringRef LexUntilEndOfStatement() override;

  size_t peekTokens(MutableArrayRef<AsmToken> Buf,
                    bool ShouldSkipSpace = true) override;

  const MCAsmInfo &getMAI() const { return MAI; }

private:
  bool isAtStartOfComment(const char *Ptr);
  bool isAtStatementSeparator(const char *Ptr);
  [[nodiscard]] int getNextChar();
  int peekNextChar();
  AsmToken ReturnError(const char *Loc, const std::string &Msg);

  AsmToken LexIdentifier();
  AsmToken LexSlash();
  AsmToken LexLineComment();
  AsmToken LexDigit();
  AsmToken LexSingleQuote();
  AsmToken LexQuote();
  AsmToken LexFloatLiteral();
  AsmToken LexHexFloatLiteral(bool NoIntDigits);

  StringRef LexUntilEndOfLine();
};

} // end namespace llvm

#endif // LLVM_MC_MCPARSER_ASMLEXER_H
PKjwFZ�`ڷ�� MC/MCParser/MCParsedAsmOperand.hnu�[���//===- llvm/MC/MCParsedAsmOperand.h - Asm Parser Operand --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCPARSER_MCPARSEDASMOPERAND_H
#define LLVM_MC_MCPARSER_MCPARSEDASMOPERAND_H

#include "llvm/ADT/StringRef.h"
#include "llvm/Support/SMLoc.h"
#include <string>

namespace llvm {

class raw_ostream;

/// MCParsedAsmOperand - This abstract class represents a source-level assembly
/// instruction operand.  It should be subclassed by target-specific code.  This
/// base class is used by target-independent clients and is the interface
/// between parsing an asm instruction and recognizing it.
class MCParsedAsmOperand {
  /// MCOperandNum - The corresponding MCInst operand number.  Only valid when
  /// parsing MS-style inline assembly.
  unsigned MCOperandNum = ~0u;

  /// Constraint - The constraint on this operand.  Only valid when parsing
  /// MS-style inline assembly.
  std::string Constraint;

protected:
  // This only seems to need to be movable (by ARMOperand) but ARMOperand has
  // lots of members and MSVC doesn't support defaulted move ops, so to avoid
  // that verbosity, just rely on defaulted copy ops. It's only the Constraint
  // string member that would benefit from movement anyway.
  MCParsedAsmOperand() = default;
  MCParsedAsmOperand(const MCParsedAsmOperand &RHS) = default;
  MCParsedAsmOperand &operator=(const MCParsedAsmOperand &) = default;

public:
  virtual ~MCParsedAsmOperand() = default;

  void setConstraint(StringRef C) { Constraint = C.str(); }
  StringRef getConstraint() { return Constraint; }

  void setMCOperandNum (unsigned OpNum) { MCOperandNum = OpNum; }
  unsigned getMCOperandNum() { return MCOperandNum; }

  virtual StringRef getSymName() { return StringRef(); }
  virtual void *getOpDecl() { return nullptr; }

  /// isToken - Is this a token operand?
  virtual bool isToken() const = 0;
  /// isImm - Is this an immediate operand?
  virtual bool isImm() const = 0;
  /// isReg - Is this a register operand?
  virtual bool isReg() const = 0;
  virtual unsigned getReg() const = 0;

  /// isMem - Is this a memory operand?
  virtual bool isMem() const = 0;

  /// isMemUseUpRegs - Is memory operand use up regs, for example, intel MS
  /// inline asm may use ARR[baseReg + IndexReg + ...] which may use up regs
  /// in [...] expr, so ARR[baseReg + IndexReg + ...] can not use extra reg
  /// for ARR. For example, calculating ARR address to a reg or use another
  /// base reg in PIC model.
  virtual bool isMemUseUpRegs() const { return false; }

  /// getStartLoc - Get the location of the first token of this operand.
  virtual SMLoc getStartLoc() const = 0;
  /// getEndLoc - Get the location of the last token of this operand.
  virtual SMLoc getEndLoc() const = 0;

  /// needAddressOf - Do we need to emit code to get the address of the
  /// variable/label?   Only valid when parsing MS-style inline assembly.
  virtual bool needAddressOf() const { return false; }

  /// isOffsetOfLocal - Do we need to emit code to get the offset of the local
  /// variable, rather than its value?   Only valid when parsing MS-style inline
  /// assembly.
  virtual bool isOffsetOfLocal() const { return false; }

  /// getOffsetOfLoc - Get the location of the offset operator.
  virtual SMLoc getOffsetOfLoc() const { return SMLoc(); }

  /// print - Print a debug representation of the operand to the given stream.
  virtual void print(raw_ostream &OS) const = 0;

  /// dump - Print to the debug stream.
  virtual void dump() const;
};

//===----------------------------------------------------------------------===//
// Debugging Support

inline raw_ostream& operator<<(raw_ostream &OS, const MCParsedAsmOperand &MO) {
  MO.print(OS);
  return OS;
}

} // end namespace llvm

#endif // LLVM_MC_MCPARSER_MCPARSEDASMOPERAND_H
PKjwFZ�Z���MC/MCParser/AsmCond.hnu�[���//===- AsmCond.h - Assembly file conditional assembly  ----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCPARSER_ASMCOND_H
#define LLVM_MC_MCPARSER_ASMCOND_H

namespace llvm {

/// AsmCond - Class to support conditional assembly
///
/// The conditional assembly feature (.if, .else, .elseif and .endif) is
/// implemented with AsmCond that tells us what we are in the middle of
/// processing.  Ignore can be either true or false.  When true we are ignoring
/// the block of code in the middle of a conditional.

class AsmCond {
public:
  enum ConditionalAssemblyType {
    NoCond,     // no conditional is being processed
    IfCond,     // inside if conditional
    ElseIfCond, // inside elseif conditional
    ElseCond    // inside else conditional
  };

  ConditionalAssemblyType TheCond = NoCond;
  bool CondMet = false;
  bool Ignore = false;
};

} // end namespace llvm

#endif // LLVM_MC_MCPARSER_ASMCOND_H
PKjwFZ�����.�.MC/MCParser/MCAsmParser.hnu�[���//===- llvm/MC/MCAsmParser.h - Abstract Asm Parser Interface ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCPARSER_MCASMPARSER_H
#define LLVM_MC_MCPARSER_MCASMPARSER_H

#include "llvm/ADT/STLFunctionalExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
#include "llvm/MC/MCAsmMacro.h"
#include "llvm/Support/SMLoc.h"
#include <cstdint>
#include <string>
#include <utility>

namespace llvm {

class MCAsmLexer;
class MCAsmInfo;
class MCAsmParserExtension;
class MCContext;
class MCExpr;
class MCInstPrinter;
class MCInstrInfo;
class MCStreamer;
class MCTargetAsmParser;
class SourceMgr;

struct InlineAsmIdentifierInfo {
  enum IdKind {
    IK_Invalid,  // Initial state. Unexpected after a successful parsing.
    IK_Label,    // Function/Label reference.
    IK_EnumVal,  // Value of enumeration type.
    IK_Var       // Variable.
  };
  // Represents an Enum value
  struct EnumIdentifier {
    int64_t EnumVal;
  };
  // Represents a label/function reference
  struct LabelIdentifier {
    void *Decl;
  };
  // Represents a variable
  struct VariableIdentifier {
    void *Decl;
    bool IsGlobalLV;
    unsigned Length;
    unsigned Size;
    unsigned Type;
  };
  // An InlineAsm identifier can only be one of those
  union {
    EnumIdentifier Enum;
    LabelIdentifier Label;
    VariableIdentifier Var;
  };
  bool isKind(IdKind kind) const { return Kind == kind; }
  // Initializers
  void setEnum(int64_t enumVal) {
    assert(isKind(IK_Invalid) && "should be initialized only once");
    Kind = IK_EnumVal;
    Enum.EnumVal = enumVal;
  }
  void setLabel(void *decl) {
    assert(isKind(IK_Invalid) && "should be initialized only once");
    Kind = IK_Label;
    Label.Decl = decl;
  }
  void setVar(void *decl, bool isGlobalLV, unsigned size, unsigned type) {
    assert(isKind(IK_Invalid) && "should be initialized only once");
    Kind = IK_Var;
    Var.Decl = decl;
    Var.IsGlobalLV = isGlobalLV;
    Var.Size = size;
    Var.Type = type;
    Var.Length = size / type;
  }
  InlineAsmIdentifierInfo() = default;

private:
  // Discriminate using the current kind.
  IdKind Kind = IK_Invalid;
};

// Generic type information for an assembly object.
// All sizes measured in bytes.
struct AsmTypeInfo {
  StringRef Name;
  unsigned Size = 0;
  unsigned ElementSize = 0;
  unsigned Length = 0;
};

struct AsmFieldInfo {
  AsmTypeInfo Type;
  unsigned Offset = 0;
};

/// Generic Sema callback for assembly parser.
class MCAsmParserSemaCallback {
public:
  virtual ~MCAsmParserSemaCallback();

  virtual void LookupInlineAsmIdentifier(StringRef &LineBuf,
                                         InlineAsmIdentifierInfo &Info,
                                         bool IsUnevaluatedContext) = 0;
  virtual StringRef LookupInlineAsmLabel(StringRef Identifier, SourceMgr &SM,
                                         SMLoc Location, bool Create) = 0;
  virtual bool LookupInlineAsmField(StringRef Base, StringRef Member,
                                    unsigned &Offset) = 0;
};

/// Generic assembler parser interface, for use by target specific
/// assembly parsers.
class MCAsmParser {
public:
  using DirectiveHandler = bool (*)(MCAsmParserExtension*, StringRef, SMLoc);
  using ExtensionDirectiveHandler =
      std::pair<MCAsmParserExtension*, DirectiveHandler>;

  struct MCPendingError {
    SMLoc Loc;
    SmallString<64> Msg;
    SMRange Range;
  };

private:
  MCTargetAsmParser *TargetParser = nullptr;

protected: // Can only create subclasses.
  MCAsmParser();

  SmallVector<MCPendingError, 0> PendingErrors;

  /// Flag tracking whether any errors have been encountered.
  bool HadError = false;

  bool ShowParsedOperands = false;

public:
  MCAsmParser(const MCAsmParser &) = delete;
  MCAsmParser &operator=(const MCAsmParser &) = delete;
  virtual ~MCAsmParser();

  virtual void addDirectiveHandler(StringRef Directive,
                                   ExtensionDirectiveHandler Handler) = 0;

  virtual void addAliasForDirective(StringRef Directive, StringRef Alias) = 0;

  virtual SourceMgr &getSourceManager() = 0;

  virtual MCAsmLexer &getLexer() = 0;
  const MCAsmLexer &getLexer() const {
    return const_cast<MCAsmParser*>(this)->getLexer();
  }

  virtual MCContext &getContext() = 0;

  /// Return the output streamer for the assembler.
  virtual MCStreamer &getStreamer() = 0;

  MCTargetAsmParser &getTargetParser() const { return *TargetParser; }
  void setTargetParser(MCTargetAsmParser &P);

  virtual unsigned getAssemblerDialect() { return 0;}
  virtual void setAssemblerDialect(unsigned i) { }

  bool getShowParsedOperands() const { return ShowParsedOperands; }
  void setShowParsedOperands(bool Value) { ShowParsedOperands = Value; }

  /// Run the parser on the input source buffer.
  virtual bool Run(bool NoInitialTextSection, bool NoFinalize = false) = 0;

  virtual void setParsingMSInlineAsm(bool V) = 0;
  virtual bool isParsingMSInlineAsm() = 0;

  virtual bool discardLTOSymbol(StringRef) const { return false; }

  virtual bool isParsingMasm() const { return false; }

  virtual bool defineMacro(StringRef Name, StringRef Value) { return true; }

  virtual bool lookUpField(StringRef Name, AsmFieldInfo &Info) const {
    return true;
  }
  virtual bool lookUpField(StringRef Base, StringRef Member,
                           AsmFieldInfo &Info) const {
    return true;
  }

  virtual bool lookUpType(StringRef Name, AsmTypeInfo &Info) const {
    return true;
  }

  /// Parse MS-style inline assembly.
  virtual bool parseMSInlineAsm(
      std::string &AsmString, unsigned &NumOutputs, unsigned &NumInputs,
      SmallVectorImpl<std::pair<void *, bool>> &OpDecls,
      SmallVectorImpl<std::string> &Constraints,
      SmallVectorImpl<std::string> &Clobbers, const MCInstrInfo *MII,
      const MCInstPrinter *IP, MCAsmParserSemaCallback &SI) = 0;

  /// Emit a note at the location \p L, with the message \p Msg.
  virtual void Note(SMLoc L, const Twine &Msg,
                    SMRange Range = std::nullopt) = 0;

  /// Emit a warning at the location \p L, with the message \p Msg.
  ///
  /// \return The return value is true, if warnings are fatal.
  virtual bool Warning(SMLoc L, const Twine &Msg,
                       SMRange Range = std::nullopt) = 0;

  /// Return an error at the location \p L, with the message \p Msg. This
  /// may be modified before being emitted.
  ///
  /// \return The return value is always true, as an idiomatic convenience to
  /// clients.
  bool Error(SMLoc L, const Twine &Msg, SMRange Range = std::nullopt);

  /// Emit an error at the location \p L, with the message \p Msg.
  ///
  /// \return The return value is always true, as an idiomatic convenience to
  /// clients.
  virtual bool printError(SMLoc L, const Twine &Msg,
                          SMRange Range = std::nullopt) = 0;

  bool hasPendingError() { return !PendingErrors.empty(); }

  bool printPendingErrors() {
    bool rv = !PendingErrors.empty();
    for (auto &Err : PendingErrors) {
      printError(Err.Loc, Twine(Err.Msg), Err.Range);
    }
    PendingErrors.clear();
    return rv;
  }

  void clearPendingErrors() { PendingErrors.clear(); }

  bool addErrorSuffix(const Twine &Suffix);

  /// Get the next AsmToken in the stream, possibly handling file
  /// inclusion first.
  virtual const AsmToken &Lex() = 0;

  /// Get the current AsmToken from the stream.
  const AsmToken &getTok() const;

  /// Report an error at the current lexer location.
  bool TokError(const Twine &Msg, SMRange Range = std::nullopt);

  bool parseTokenLoc(SMLoc &Loc);
  bool parseToken(AsmToken::TokenKind T, const Twine &Msg = "unexpected token");
  /// Attempt to parse and consume token, returning true on
  /// success.
  bool parseOptionalToken(AsmToken::TokenKind T);

  bool parseComma() { return parseToken(AsmToken::Comma, "expected comma"); }
  bool parseRParen() { return parseToken(AsmToken::RParen, "expected ')'"); }
  bool parseEOL();
  bool parseEOL(const Twine &ErrMsg);

  bool parseMany(function_ref<bool()> parseOne, bool hasComma = true);

  bool parseIntToken(int64_t &V, const Twine &ErrMsg);

  bool check(bool P, const Twine &Msg);
  bool check(bool P, SMLoc Loc, const Twine &Msg);

  /// Parse an identifier or string (as a quoted identifier) and set \p
  /// Res to the identifier contents.
  virtual bool parseIdentifier(StringRef &Res) = 0;

  /// Parse up to the end of statement and return the contents from the
  /// current token until the end of the statement; the current token on exit
  /// will be either the EndOfStatement or EOF.
  virtual StringRef parseStringToEndOfStatement() = 0;

  /// Parse the current token as a string which may include escaped
  /// characters and return the string contents.
  virtual bool parseEscapedString(std::string &Data) = 0;

  /// Parse an angle-bracket delimited string at the current position if one is
  /// present, returning the string contents.
  virtual bool parseAngleBracketString(std::string &Data) = 0;

  /// Skip to the end of the current statement, for error recovery.
  virtual void eatToEndOfStatement() = 0;

  /// Parse an arbitrary expression.
  ///
  /// \param Res - The value of the expression. The result is undefined
  /// on error.
  /// \return - False on success.
  virtual bool parseExpression(const MCExpr *&Res, SMLoc &EndLoc) = 0;
  bool parseExpression(const MCExpr *&Res);

  /// Parse a primary expression.
  ///
  /// \param Res - The value of the expression. The result is undefined
  /// on error.
  /// \return - False on success.
  virtual bool parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc,
                                AsmTypeInfo *TypeInfo) = 0;

  /// Parse an arbitrary expression, assuming that an initial '(' has
  /// already been consumed.
  ///
  /// \param Res - The value of the expression. The result is undefined
  /// on error.
  /// \return - False on success.
  virtual bool parseParenExpression(const MCExpr *&Res, SMLoc &EndLoc) = 0;

  /// Parse an expression which must evaluate to an absolute value.
  ///
  /// \param Res - The value of the absolute expression. The result is undefined
  /// on error.
  /// \return - False on success.
  virtual bool parseAbsoluteExpression(int64_t &Res) = 0;

  /// Ensure that we have a valid section set in the streamer. Otherwise,
  /// report an error and switch to .text.
  /// \return - False on success.
  virtual bool checkForValidSection() = 0;

  /// Parse an arbitrary expression of a specified parenthesis depth,
  /// assuming that the initial '(' characters have already been consumed.
  ///
  /// \param ParenDepth - Specifies how many trailing expressions outside the
  /// current parentheses we have to parse.
  /// \param Res - The value of the expression. The result is undefined
  /// on error.
  /// \return - False on success.
  virtual bool parseParenExprOfDepth(unsigned ParenDepth, const MCExpr *&Res,
                                     SMLoc &EndLoc) = 0;

  /// Parse a .gnu_attribute.
  bool parseGNUAttribute(SMLoc L, int64_t &Tag, int64_t &IntegerValue);
};

/// Create an MCAsmParser instance for parsing assembly similar to gas syntax
MCAsmParser *createMCAsmParser(SourceMgr &, MCContext &, MCStreamer &,
                               const MCAsmInfo &, unsigned CB = 0);

/// Create an MCAsmParser instance for parsing Microsoft MASM-style assembly
MCAsmParser *createMCMasmParser(SourceMgr &, MCContext &, MCStreamer &,
                                const MCAsmInfo &, struct tm, unsigned CB = 0);

} // end namespace llvm

#endif // LLVM_MC_MCPARSER_MCASMPARSER_H
PKjwFZ-�MC/MCParser/MCAsmParserUtils.hnu�[���//===- llvm/MC/MCAsmParserUtils.h - Asm Parser Utilities --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCPARSER_MCASMPARSERUTILS_H
#define LLVM_MC_MCPARSER_MCASMPARSERUTILS_H

namespace llvm {

class MCAsmParser;
class MCExpr;
class MCSymbol;
class StringRef;

namespace MCParserUtils {

/// Parse a value expression and return whether it can be assigned to a symbol
/// with the given name.
///
/// On success, returns false and sets the Symbol and Value output parameters.
bool parseAssignmentExpression(StringRef Name, bool allow_redef,
                               MCAsmParser &Parser, MCSymbol *&Symbol,
                               const MCExpr *&Value);

} // namespace MCParserUtils

} // namespace llvm

#endif // LLVM_MC_MCPARSER_MCASMPARSERUTILS_H
PKjwFZ�j~�]]MC/MCParser/MCAsmLexer.hnu�[���//===- llvm/MC/MCAsmLexer.h - Abstract Asm Lexer Interface ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCPARSER_MCASMLEXER_H
#define LLVM_MC_MCPARSER_MCASMLEXER_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/MC/MCAsmMacro.h"
#include <cassert>
#include <cstddef>
#include <string>

namespace llvm {

/// A callback class which is notified of each comment in an assembly file as
/// it is lexed.
class AsmCommentConsumer {
public:
  virtual ~AsmCommentConsumer() = default;

  /// Callback function for when a comment is lexed. Loc is the start of the
  /// comment text (excluding the comment-start marker). CommentText is the text
  /// of the comment, excluding the comment start and end markers, and the
  /// newline for single-line comments.
  virtual void HandleComment(SMLoc Loc, StringRef CommentText) = 0;
};


/// Generic assembler lexer interface, for use by target specific assembly
/// lexers.
class MCAsmLexer {
  /// The current token, stored in the base class for faster access.
  SmallVector<AsmToken, 1> CurTok;

  /// The location and description of the current error
  SMLoc ErrLoc;
  std::string Err;

protected: // Can only create subclasses.
  const char *TokStart = nullptr;
  bool SkipSpace = true;
  bool AllowAtInIdentifier = false;
  bool AllowHashInIdentifier = false;
  bool IsAtStartOfStatement = true;
  bool LexMasmHexFloats = false;
  bool LexMasmIntegers = false;
  bool LexMasmStrings = false;
  bool LexMotorolaIntegers = false;
  bool UseMasmDefaultRadix = false;
  unsigned DefaultRadix = 10;
  bool LexHLASMIntegers = false;
  bool LexHLASMStrings = false;
  AsmCommentConsumer *CommentConsumer = nullptr;

  MCAsmLexer();

  virtual AsmToken LexToken() = 0;

  void SetError(SMLoc errLoc, const std::string &err) {
    ErrLoc = errLoc;
    Err = err;
  }

public:
  MCAsmLexer(const MCAsmLexer &) = delete;
  MCAsmLexer &operator=(const MCAsmLexer &) = delete;
  virtual ~MCAsmLexer();

  /// Consume the next token from the input stream and return it.
  ///
  /// The lexer will continuously return the end-of-file token once the end of
  /// the main input file has been reached.
  const AsmToken &Lex() {
    assert(!CurTok.empty());
    // Mark if we parsing out a EndOfStatement.
    IsAtStartOfStatement = CurTok.front().getKind() == AsmToken::EndOfStatement;
    CurTok.erase(CurTok.begin());
    // LexToken may generate multiple tokens via UnLex but will always return
    // the first one. Place returned value at head of CurTok vector.
    if (CurTok.empty()) {
      AsmToken T = LexToken();
      CurTok.insert(CurTok.begin(), T);
    }
    return CurTok.front();
  }

  void UnLex(AsmToken const &Token) {
    IsAtStartOfStatement = false;
    CurTok.insert(CurTok.begin(), Token);
  }

  bool isAtStartOfStatement() { return IsAtStartOfStatement; }

  virtual StringRef LexUntilEndOfStatement() = 0;

  /// Get the current source location.
  SMLoc getLoc() const;

  /// Get the current (last) lexed token.
  const AsmToken &getTok() const {
    return CurTok[0];
  }

  /// Look ahead at the next token to be lexed.
  const AsmToken peekTok(bool ShouldSkipSpace = true) {
    AsmToken Tok;

    MutableArrayRef<AsmToken> Buf(Tok);
    size_t ReadCount = peekTokens(Buf, ShouldSkipSpace);

    assert(ReadCount == 1);
    (void)ReadCount;

    return Tok;
  }

  /// Look ahead an arbitrary number of tokens.
  virtual size_t peekTokens(MutableArrayRef<AsmToken> Buf,
                            bool ShouldSkipSpace = true) = 0;

  /// Get the current error location
  SMLoc getErrLoc() {
    return ErrLoc;
  }

  /// Get the current error string
  const std::string &getErr() {
    return Err;
  }

  /// Get the kind of current token.
  AsmToken::TokenKind getKind() const { return getTok().getKind(); }

  /// Check if the current token has kind \p K.
  bool is(AsmToken::TokenKind K) const { return getTok().is(K); }

  /// Check if the current token has kind \p K.
  bool isNot(AsmToken::TokenKind K) const { return getTok().isNot(K); }

  /// Set whether spaces should be ignored by the lexer
  void setSkipSpace(bool val) { SkipSpace = val; }

  bool getAllowAtInIdentifier() { return AllowAtInIdentifier; }
  void setAllowAtInIdentifier(bool v) { AllowAtInIdentifier = v; }

  void setAllowHashInIdentifier(bool V) { AllowHashInIdentifier = V; }

  void setCommentConsumer(AsmCommentConsumer *CommentConsumer) {
    this->CommentConsumer = CommentConsumer;
  }

  /// Set whether to lex masm-style binary (e.g., 0b1101) and radix-specified
  /// literals (e.g., 0ABCh [hex], 576t [decimal], 77o [octal], 1101y [binary]).
  void setLexMasmIntegers(bool V) { LexMasmIntegers = V; }

  /// Set whether to use masm-style default-radix integer literals. If disabled,
  /// assume decimal unless prefixed (e.g., 0x2c [hex], 077 [octal]).
  void useMasmDefaultRadix(bool V) { UseMasmDefaultRadix = V; }

  unsigned getMasmDefaultRadix() const { return DefaultRadix; }
  void setMasmDefaultRadix(unsigned Radix) { DefaultRadix = Radix; }

  /// Set whether to lex masm-style hex float literals, such as 3f800000r.
  void setLexMasmHexFloats(bool V) { LexMasmHexFloats = V; }

  /// Set whether to lex masm-style string literals, such as 'Can''t find file'
  /// and "This ""value"" not found".
  void setLexMasmStrings(bool V) { LexMasmStrings = V; }

  /// Set whether to lex Motorola-style integer literals, such as $deadbeef or
  /// %01010110.
  void setLexMotorolaIntegers(bool V) { LexMotorolaIntegers = V; }

  /// Set whether to lex HLASM-flavour integers. For now this is only [0-9]*
  void setLexHLASMIntegers(bool V) { LexHLASMIntegers = V; }

  /// Set whether to "lex" HLASM-flavour character and string literals. For now,
  /// setting this option to true, will disable lexing for character and string
  /// literals.
  void setLexHLASMStrings(bool V) { LexHLASMStrings = V; }
};

} // end namespace llvm

#endif // LLVM_MC_MCPARSER_MCASMLEXER_H
PKjwFZm/��Q�QMC/MCParser/MCTargetAsmParser.hnu�[���//===- llvm/MC/MCTargetAsmParser.h - Target Assembly Parser -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCPARSER_MCTARGETASMPARSER_H
#define LLVM_MC_MCPARSER_MCTARGETASMPARSER_H

#include "llvm/ADT/StringRef.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCParser/MCAsmParserExtension.h"
#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
#include "llvm/MC/MCTargetOptions.h"
#include "llvm/Support/SMLoc.h"
#include "llvm/TargetParser/SubtargetFeature.h"
#include <cstdint>
#include <memory>

namespace llvm {

class MCContext;
class MCInst;
class MCInstrInfo;
class MCRegister;
class MCStreamer;
class MCSubtargetInfo;
class MCSymbol;
template <typename T> class SmallVectorImpl;

using OperandVector = SmallVectorImpl<std::unique_ptr<MCParsedAsmOperand>>;

enum AsmRewriteKind {
  AOK_Align,          // Rewrite align as .align.
  AOK_EVEN,           // Rewrite even as .even.
  AOK_Emit,           // Rewrite _emit as .byte.
  AOK_CallInput,      // Rewrite in terms of ${N:P}.
  AOK_Input,          // Rewrite in terms of $N.
  AOK_Output,         // Rewrite in terms of $N.
  AOK_SizeDirective,  // Add a sizing directive (e.g., dword ptr).
  AOK_Label,          // Rewrite local labels.
  AOK_EndOfStatement, // Add EndOfStatement (e.g., "\n\t").
  AOK_Skip,           // Skip emission (e.g., offset/type operators).
  AOK_IntelExpr       // SizeDirective SymDisp [BaseReg + IndexReg * Scale + ImmDisp]
};

const char AsmRewritePrecedence [] = {
  2, // AOK_Align
  2, // AOK_EVEN
  2, // AOK_Emit
  3, // AOK_Input
  3, // AOK_CallInput
  3, // AOK_Output
  5, // AOK_SizeDirective
  1, // AOK_Label
  5, // AOK_EndOfStatement
  2, // AOK_Skip
  2  // AOK_IntelExpr
};

// Represent the various parts which make up an intel expression,
// used for emitting compound intel expressions
struct IntelExpr {
  bool NeedBracs = false;
  int64_t Imm = 0;
  StringRef BaseReg;
  StringRef IndexReg;
  StringRef OffsetName;
  unsigned Scale = 1;

  IntelExpr() = default;
  // [BaseReg + IndexReg * ScaleExpression + OFFSET name + ImmediateExpression]
  IntelExpr(StringRef baseReg, StringRef indexReg, unsigned scale,
            StringRef offsetName, int64_t imm, bool needBracs)
      : NeedBracs(needBracs), Imm(imm), BaseReg(baseReg), IndexReg(indexReg),
        OffsetName(offsetName), Scale(1) {
    if (scale)
      Scale = scale;
  }
  bool hasBaseReg() const { return !BaseReg.empty(); }
  bool hasIndexReg() const { return !IndexReg.empty(); }
  bool hasRegs() const { return hasBaseReg() || hasIndexReg(); }
  bool hasOffset() const { return !OffsetName.empty(); }
  // Normally we won't emit immediates unconditionally,
  // unless we've got no other components
  bool emitImm() const { return !(hasRegs() || hasOffset()); }
  bool isValid() const {
    return (Scale == 1) ||
           (hasIndexReg() && (Scale == 2 || Scale == 4 || Scale == 8));
  }
};

struct AsmRewrite {
  AsmRewriteKind Kind;
  SMLoc Loc;
  unsigned Len;
  bool Done;
  int64_t Val;
  StringRef Label;
  IntelExpr IntelExp;
  bool IntelExpRestricted;

public:
  AsmRewrite(AsmRewriteKind kind, SMLoc loc, unsigned len = 0, int64_t val = 0,
             bool Restricted = false)
      : Kind(kind), Loc(loc), Len(len), Done(false), Val(val) {
    IntelExpRestricted = Restricted;
  }
  AsmRewrite(AsmRewriteKind kind, SMLoc loc, unsigned len, StringRef label)
    : AsmRewrite(kind, loc, len) { Label = label; }
  AsmRewrite(SMLoc loc, unsigned len, IntelExpr exp)
    : AsmRewrite(AOK_IntelExpr, loc, len) { IntelExp = exp; }
};

struct ParseInstructionInfo {
  SmallVectorImpl<AsmRewrite> *AsmRewrites = nullptr;

  ParseInstructionInfo() = default;
  ParseInstructionInfo(SmallVectorImpl<AsmRewrite> *rewrites)
    : AsmRewrites(rewrites) {}
};

enum OperandMatchResultTy {
  MatchOperand_Success,  // operand matched successfully
  MatchOperand_NoMatch,  // operand did not match
  MatchOperand_ParseFail // operand matched but had errors
};

/// Ternary parse status returned by various parse* methods.
class ParseStatus {
  enum class StatusTy { Success, Failure, NoMatch } Status;

public:
#if __cplusplus >= 202002L
  using enum StatusTy;
#else
  static constexpr StatusTy Success = StatusTy::Success;
  static constexpr StatusTy Failure = StatusTy::Failure;
  static constexpr StatusTy NoMatch = StatusTy::NoMatch;
#endif

  constexpr ParseStatus() : Status(NoMatch) {}

  constexpr ParseStatus(StatusTy Status) : Status(Status) {}

  constexpr ParseStatus(bool Error) : Status(Error ? Failure : Success) {}

  template <typename T> constexpr ParseStatus(T) = delete;

  constexpr bool isSuccess() const { return Status == StatusTy::Success; }
  constexpr bool isFailure() const { return Status == StatusTy::Failure; }
  constexpr bool isNoMatch() const { return Status == StatusTy::NoMatch; }

  // Allow implicit conversions to / from OperandMatchResultTy.
  constexpr ParseStatus(OperandMatchResultTy R)
      : Status(R == MatchOperand_Success     ? Success
               : R == MatchOperand_ParseFail ? Failure
                                             : NoMatch) {}
  constexpr operator OperandMatchResultTy() const {
    return isSuccess()   ? MatchOperand_Success
           : isFailure() ? MatchOperand_ParseFail
                         : MatchOperand_NoMatch;
  }
};

enum class DiagnosticPredicateTy {
  Match,
  NearMatch,
  NoMatch,
};

// When an operand is parsed, the assembler will try to iterate through a set of
// possible operand classes that the operand might match and call the
// corresponding PredicateMethod to determine that.
//
// If there are two AsmOperands that would give a specific diagnostic if there
// is no match, there is currently no mechanism to distinguish which operand is
// a closer match. The DiagnosticPredicate distinguishes between 'completely
// no match' and 'near match', so the assembler can decide whether to give a
// specific diagnostic, or use 'InvalidOperand' and continue to find a
// 'better matching' diagnostic.
//
// For example:
//    opcode opnd0, onpd1, opnd2
//
// where:
//    opnd2 could be an 'immediate of range [-8, 7]'
//    opnd2 could be a  'register + shift/extend'.
//
// If opnd2 is a valid register, but with a wrong shift/extend suffix, it makes
// little sense to give a diagnostic that the operand should be an immediate
// in range [-8, 7].
//
// This is a light-weight alternative to the 'NearMissInfo' approach
// below which collects *all* possible diagnostics. This alternative
// is optional and fully backward compatible with existing
// PredicateMethods that return a 'bool' (match or no match).
struct DiagnosticPredicate {
  DiagnosticPredicateTy Type;

  explicit DiagnosticPredicate(bool Match)
      : Type(Match ? DiagnosticPredicateTy::Match
                   : DiagnosticPredicateTy::NearMatch) {}
  DiagnosticPredicate(DiagnosticPredicateTy T) : Type(T) {}
  DiagnosticPredicate(const DiagnosticPredicate &) = default;
  DiagnosticPredicate& operator=(const DiagnosticPredicate &) = default;

  operator bool() const { return Type == DiagnosticPredicateTy::Match; }
  bool isMatch() const { return Type == DiagnosticPredicateTy::Match; }
  bool isNearMatch() const { return Type == DiagnosticPredicateTy::NearMatch; }
  bool isNoMatch() const { return Type == DiagnosticPredicateTy::NoMatch; }
};

// When matching of an assembly instruction fails, there may be multiple
// encodings that are close to being a match. It's often ambiguous which one
// the programmer intended to use, so we want to report an error which mentions
// each of these "near-miss" encodings. This struct contains information about
// one such encoding, and why it did not match the parsed instruction.
class NearMissInfo {
public:
  enum NearMissKind {
    NoNearMiss,
    NearMissOperand,
    NearMissFeature,
    NearMissPredicate,
    NearMissTooFewOperands,
  };

  // The encoding is valid for the parsed assembly string. This is only used
  // internally to the table-generated assembly matcher.
  static NearMissInfo getSuccess() { return NearMissInfo(); }

  // The instruction encoding is not valid because it requires some target
  // features that are not currently enabled. MissingFeatures has a bit set for
  // each feature that the encoding needs but which is not enabled.
  static NearMissInfo getMissedFeature(const FeatureBitset &MissingFeatures) {
    NearMissInfo Result;
    Result.Kind = NearMissFeature;
    Result.Features = MissingFeatures;
    return Result;
  }

  // The instruction encoding is not valid because the target-specific
  // predicate function returned an error code. FailureCode is the
  // target-specific error code returned by the predicate.
  static NearMissInfo getMissedPredicate(unsigned FailureCode) {
    NearMissInfo Result;
    Result.Kind = NearMissPredicate;
    Result.PredicateError = FailureCode;
    return Result;
  }

  // The instruction encoding is not valid because one (and only one) parsed
  // operand is not of the correct type. OperandError is the error code
  // relating to the operand class expected by the encoding. OperandClass is
  // the type of the expected operand. Opcode is the opcode of the encoding.
  // OperandIndex is the index into the parsed operand list.
  static NearMissInfo getMissedOperand(unsigned OperandError,
                                       unsigned OperandClass, unsigned Opcode,
                                       unsigned OperandIndex) {
    NearMissInfo Result;
    Result.Kind = NearMissOperand;
    Result.MissedOperand.Error = OperandError;
    Result.MissedOperand.Class = OperandClass;
    Result.MissedOperand.Opcode = Opcode;
    Result.MissedOperand.Index = OperandIndex;
    return Result;
  }

  // The instruction encoding is not valid because it expects more operands
  // than were parsed. OperandClass is the class of the expected operand that
  // was not provided. Opcode is the instruction encoding.
  static NearMissInfo getTooFewOperands(unsigned OperandClass,
                                        unsigned Opcode) {
    NearMissInfo Result;
    Result.Kind = NearMissTooFewOperands;
    Result.TooFewOperands.Class = OperandClass;
    Result.TooFewOperands.Opcode = Opcode;
    return Result;
  }

  operator bool() const { return Kind != NoNearMiss; }

  NearMissKind getKind() const { return Kind; }

  // Feature flags required by the instruction, that the current target does
  // not have.
  const FeatureBitset& getFeatures() const {
    assert(Kind == NearMissFeature);
    return Features;
  }
  // Error code returned by the target predicate when validating this
  // instruction encoding.
  unsigned getPredicateError() const {
    assert(Kind == NearMissPredicate);
    return PredicateError;
  }
  // MatchClassKind of the operand that we expected to see.
  unsigned getOperandClass() const {
    assert(Kind == NearMissOperand || Kind == NearMissTooFewOperands);
    return MissedOperand.Class;
  }
  // Opcode of the encoding we were trying to match.
  unsigned getOpcode() const {
    assert(Kind == NearMissOperand || Kind == NearMissTooFewOperands);
    return MissedOperand.Opcode;
  }
  // Error code returned when validating the operand.
  unsigned getOperandError() const {
    assert(Kind == NearMissOperand);
    return MissedOperand.Error;
  }
  // Index of the actual operand we were trying to match in the list of parsed
  // operands.
  unsigned getOperandIndex() const {
    assert(Kind == NearMissOperand);
    return MissedOperand.Index;
  }

private:
  NearMissKind Kind;

  // These two structs share a common prefix, so we can safely rely on the fact
  // that they overlap in the union.
  struct MissedOpInfo {
    unsigned Class;
    unsigned Opcode;
    unsigned Error;
    unsigned Index;
  };

  struct TooFewOperandsInfo {
    unsigned Class;
    unsigned Opcode;
  };

  union {
    FeatureBitset Features;
    unsigned PredicateError;
    MissedOpInfo MissedOperand;
    TooFewOperandsInfo TooFewOperands;
  };

  NearMissInfo() : Kind(NoNearMiss) {}
};

/// MCTargetAsmParser - Generic interface to target specific assembly parsers.
class MCTargetAsmParser : public MCAsmParserExtension {
public:
  enum MatchResultTy {
    Match_InvalidOperand,
    Match_InvalidTiedOperand,
    Match_MissingFeature,
    Match_MnemonicFail,
    Match_Success,
    Match_NearMisses,
    FIRST_TARGET_MATCH_RESULT_TY
  };

protected: // Can only create subclasses.
  MCTargetAsmParser(MCTargetOptions const &, const MCSubtargetInfo &STI,
                    const MCInstrInfo &MII);

  /// Create a copy of STI and return a non-const reference to it.
  MCSubtargetInfo &copySTI();

  /// AvailableFeatures - The current set of available features.
  FeatureBitset AvailableFeatures;

  /// ParsingMSInlineAsm - Are we parsing ms-style inline assembly?
  bool ParsingMSInlineAsm = false;

  /// SemaCallback - The Sema callback implementation.  Must be set when parsing
  /// ms-style inline assembly.
  MCAsmParserSemaCallback *SemaCallback = nullptr;

  /// Set of options which affects instrumentation of inline assembly.
  MCTargetOptions MCOptions;

  /// Current STI.
  const MCSubtargetInfo *STI;

  const MCInstrInfo &MII;

public:
  MCTargetAsmParser(const MCTargetAsmParser &) = delete;
  MCTargetAsmParser &operator=(const MCTargetAsmParser &) = delete;

  ~MCTargetAsmParser() override;

  const MCSubtargetInfo &getSTI() const;

  const FeatureBitset& getAvailableFeatures() const {
    return AvailableFeatures;
  }
  void setAvailableFeatures(const FeatureBitset& Value) {
    AvailableFeatures = Value;
  }

  bool isParsingMSInlineAsm () { return ParsingMSInlineAsm; }
  void setParsingMSInlineAsm (bool Value) { ParsingMSInlineAsm = Value; }

  MCTargetOptions getTargetOptions() const { return MCOptions; }

  void setSemaCallback(MCAsmParserSemaCallback *Callback) {
    SemaCallback = Callback;
  }

  // Target-specific parsing of expression.
  virtual bool parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc) {
    return getParser().parsePrimaryExpr(Res, EndLoc, nullptr);
  }

  virtual bool parseRegister(MCRegister &Reg, SMLoc &StartLoc,
                             SMLoc &EndLoc) = 0;

  /// tryParseRegister - parse one register if possible
  ///
  /// Check whether a register specification can be parsed at the current
  /// location, without failing the entire parse if it can't. Must not consume
  /// tokens if the parse fails.
  virtual OperandMatchResultTy
  tryParseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc) = 0;

  /// ParseInstruction - Parse one assembly instruction.
  ///
  /// The parser is positioned following the instruction name. The target
  /// specific instruction parser should parse the entire instruction and
  /// construct the appropriate MCInst, or emit an error. On success, the entire
  /// line should be parsed up to and including the end-of-statement token. On
  /// failure, the parser is not required to read to the end of the line.
  //
  /// \param Name - The instruction name.
  /// \param NameLoc - The source location of the name.
  /// \param Operands [out] - The list of parsed operands, this returns
  ///        ownership of them to the caller.
  /// \return True on failure.
  virtual bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
                                SMLoc NameLoc, OperandVector &Operands) = 0;
  virtual bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
                                AsmToken Token, OperandVector &Operands) {
    return ParseInstruction(Info, Name, Token.getLoc(), Operands);
  }

  /// ParseDirective - Parse a target specific assembler directive
  /// This method is deprecated, use 'parseDirective' instead.
  ///
  /// The parser is positioned following the directive name.  The target
  /// specific directive parser should parse the entire directive doing or
  /// recording any target specific work, or return true and do nothing if the
  /// directive is not target specific. If the directive is specific for
  /// the target, the entire line is parsed up to and including the
  /// end-of-statement token and false is returned.
  ///
  /// \param DirectiveID - the identifier token of the directive.
  virtual bool ParseDirective(AsmToken DirectiveID) { return true; }

  /// Parses a target-specific assembler directive.
  ///
  /// The parser is positioned following the directive name. The target-specific
  /// directive parser should parse the entire directive doing or recording any
  /// target-specific work, or emit an error. On success, the entire line should
  /// be parsed up to and including the end-of-statement token. On failure, the
  /// parser is not required to read to the end of the line. If the directive is
  /// not target-specific, no tokens should be consumed and NoMatch is returned.
  ///
  /// \param DirectiveID - The token identifying the directive.
  virtual ParseStatus parseDirective(AsmToken DirectiveID);

  /// MatchAndEmitInstruction - Recognize a series of operands of a parsed
  /// instruction as an actual MCInst and emit it to the specified MCStreamer.
  /// This returns false on success and returns true on failure to match.
  ///
  /// On failure, the target parser is responsible for emitting a diagnostic
  /// explaining the match failure.
  virtual bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
                                       OperandVector &Operands, MCStreamer &Out,
                                       uint64_t &ErrorInfo,
                                       bool MatchingInlineAsm) = 0;

  /// Allows targets to let registers opt out of clobber lists.
  virtual bool OmitRegisterFromClobberLists(unsigned RegNo) { return false; }

  /// Allow a target to add special case operand matching for things that
  /// tblgen doesn't/can't handle effectively. For example, literal
  /// immediates on ARM. TableGen expects a token operand, but the parser
  /// will recognize them as immediates.
  virtual unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
                                              unsigned Kind) {
    return Match_InvalidOperand;
  }

  /// Validate the instruction match against any complex target predicates
  /// before rendering any operands to it.
  virtual unsigned
  checkEarlyTargetMatchPredicate(MCInst &Inst, const OperandVector &Operands) {
    return Match_Success;
  }

  /// checkTargetMatchPredicate - Validate the instruction match against
  /// any complex target predicates not expressible via match classes.
  virtual unsigned checkTargetMatchPredicate(MCInst &Inst) {
    return Match_Success;
  }

  virtual void convertToMapAndConstraints(unsigned Kind,
                                          const OperandVector &Operands) = 0;

  /// Returns whether two operands are registers and are equal. This is used
  /// by the tied-operands checks in the AsmMatcher. This method can be
  /// overridden to allow e.g. a sub- or super-register as the tied operand.
  virtual bool areEqualRegs(const MCParsedAsmOperand &Op1,
                            const MCParsedAsmOperand &Op2) const {
    return Op1.isReg() && Op2.isReg() && Op1.getReg() == Op2.getReg();
  }

  // Return whether this parser uses assignment statements with equals tokens
  virtual bool equalIsAsmAssignment() { return true; };
  // Return whether this start of statement identifier is a label
  virtual bool isLabel(AsmToken &Token) { return true; };
  // Return whether this parser accept star as start of statement
  virtual bool starIsStartOfStatement() { return false; };

  virtual const MCExpr *applyModifierToExpr(const MCExpr *E,
                                            MCSymbolRefExpr::VariantKind,
                                            MCContext &Ctx) {
    return nullptr;
  }

  // For actions that have to be performed before a label is emitted
  virtual void doBeforeLabelEmit(MCSymbol *Symbol, SMLoc IDLoc) {}
  
  virtual void onLabelParsed(MCSymbol *Symbol) {}

  /// Ensure that all previously parsed instructions have been emitted to the
  /// output streamer, if the target does not emit them immediately.
  virtual void flushPendingInstructions(MCStreamer &Out) {}

  virtual const MCExpr *createTargetUnaryExpr(const MCExpr *E,
                                              AsmToken::TokenKind OperatorToken,
                                              MCContext &Ctx) {
    return nullptr;
  }

  // For any initialization at the beginning of parsing.
  virtual void onBeginOfFile() {}

  // For any checks or cleanups at the end of parsing.
  virtual void onEndOfFile() {}
};

} // end namespace llvm

#endif // LLVM_MC_MCPARSER_MCTARGETASMPARSER_H
PKjwFZ�EcDcDMC/MCSchedule.hnu�[���//===-- llvm/MC/MCSchedule.h - Scheduling -----------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the classes used to describe a subtarget's machine model
// for scheduling and other instruction cost heuristics.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCSCHEDULE_H
#define LLVM_MC_MCSCHEDULE_H

#include "llvm/Config/llvm-config.h"
#include "llvm/Support/DataTypes.h"
#include <cassert>

namespace llvm {

template <typename T> class ArrayRef;
struct InstrItinerary;
class MCSubtargetInfo;
class MCInstrInfo;
class MCInst;
class InstrItineraryData;

/// Define a kind of processor resource that will be modeled by the scheduler.
struct MCProcResourceDesc {
  const char *Name;
  unsigned NumUnits; // Number of resource of this kind
  unsigned SuperIdx; // Index of the resources kind that contains this kind.

  // Number of resources that may be buffered.
  //
  // Buffered resources (BufferSize != 0) may be consumed at some indeterminate
  // cycle after dispatch. This should be used for out-of-order cpus when
  // instructions that use this resource can be buffered in a reservaton
  // station.
  //
  // Unbuffered resources (BufferSize == 0) always consume their resource some
  // fixed number of cycles after dispatch. If a resource is unbuffered, then
  // the scheduler will avoid scheduling instructions with conflicting resources
  // in the same cycle. This is for in-order cpus, or the in-order portion of
  // an out-of-order cpus.
  int BufferSize;

  // If the resource has sub-units, a pointer to the first element of an array
  // of `NumUnits` elements containing the ProcResourceIdx of the sub units.
  // nullptr if the resource does not have sub-units.
  const unsigned *SubUnitsIdxBegin;

  bool operator==(const MCProcResourceDesc &Other) const {
    return NumUnits == Other.NumUnits && SuperIdx == Other.SuperIdx
      && BufferSize == Other.BufferSize;
  }
};

/// Identify one of the processor resource kinds consumed by a
/// particular scheduling class for the specified number of cycles.
/// TODO: consider renaming the field `StartAtCycle` and `Cycles` to
/// `AcquireAtCycle` and `ReleaseAtCycle` respectively, to stress the
/// fact that resource allocation is now represented as an interval,
/// relatively to the issue cycle of the instruction.
struct MCWriteProcResEntry {
  uint16_t ProcResourceIdx;
  /// Cycle at which the resource will be released by an instruction,
  /// relatively to the cycle in which the instruction is issued
  /// (assuming no stalls inbetween).
  uint16_t Cycles;
  /// Cycle at which the resource will be grabbed by an instruction,
  /// relatively to the cycle in which the instruction is issued
  /// (assuming no stalls inbetween).
  uint16_t StartAtCycle;

  bool operator==(const MCWriteProcResEntry &Other) const {
    return ProcResourceIdx == Other.ProcResourceIdx && Cycles == Other.Cycles &&
           StartAtCycle == Other.StartAtCycle;
  }
};

/// Specify the latency in cpu cycles for a particular scheduling class and def
/// index. -1 indicates an invalid latency. Heuristics would typically consider
/// an instruction with invalid latency to have infinite latency.  Also identify
/// the WriteResources of this def. When the operand expands to a sequence of
/// writes, this ID is the last write in the sequence.
struct MCWriteLatencyEntry {
  int16_t Cycles;
  uint16_t WriteResourceID;

  bool operator==(const MCWriteLatencyEntry &Other) const {
    return Cycles == Other.Cycles && WriteResourceID == Other.WriteResourceID;
  }
};

/// Specify the number of cycles allowed after instruction issue before a
/// particular use operand reads its registers. This effectively reduces the
/// write's latency. Here we allow negative cycles for corner cases where
/// latency increases. This rule only applies when the entry's WriteResource
/// matches the write's WriteResource.
///
/// MCReadAdvanceEntries are sorted first by operand index (UseIdx), then by
/// WriteResourceIdx.
struct MCReadAdvanceEntry {
  unsigned UseIdx;
  unsigned WriteResourceID;
  int Cycles;

  bool operator==(const MCReadAdvanceEntry &Other) const {
    return UseIdx == Other.UseIdx && WriteResourceID == Other.WriteResourceID
      && Cycles == Other.Cycles;
  }
};

/// Summarize the scheduling resources required for an instruction of a
/// particular scheduling class.
///
/// Defined as an aggregate struct for creating tables with initializer lists.
struct MCSchedClassDesc {
  static const unsigned short InvalidNumMicroOps = (1U << 13) - 1;
  static const unsigned short VariantNumMicroOps = InvalidNumMicroOps - 1;

#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
  const char* Name;
#endif
  uint16_t NumMicroOps : 13;
  uint16_t BeginGroup : 1;
  uint16_t EndGroup : 1;
  uint16_t RetireOOO : 1;
  uint16_t WriteProcResIdx; // First index into WriteProcResTable.
  uint16_t NumWriteProcResEntries;
  uint16_t WriteLatencyIdx; // First index into WriteLatencyTable.
  uint16_t NumWriteLatencyEntries;
  uint16_t ReadAdvanceIdx; // First index into ReadAdvanceTable.
  uint16_t NumReadAdvanceEntries;

  bool isValid() const {
    return NumMicroOps != InvalidNumMicroOps;
  }
  bool isVariant() const {
    return NumMicroOps == VariantNumMicroOps;
  }
};

/// Specify the cost of a register definition in terms of number of physical
/// register allocated at register renaming stage. For example, AMD Jaguar.
/// natively supports 128-bit data types, and operations on 256-bit registers
/// (i.e. YMM registers) are internally split into two COPs (complex operations)
/// and each COP updates a physical register. Basically, on Jaguar, a YMM
/// register write effectively consumes two physical registers. That means,
/// the cost of a YMM write in the BtVer2 model is 2.
struct MCRegisterCostEntry {
  unsigned RegisterClassID;
  unsigned Cost;
  bool AllowMoveElimination;
};

/// A register file descriptor.
///
/// This struct allows to describe processor register files. In particular, it
/// helps describing the size of the register file, as well as the cost of
/// allocating a register file at register renaming stage.
/// FIXME: this struct can be extended to provide information about the number
/// of read/write ports to the register file.  A value of zero for field
/// 'NumPhysRegs' means: this register file has an unbounded number of physical
/// registers.
struct MCRegisterFileDesc {
  const char *Name;
  uint16_t NumPhysRegs;
  uint16_t NumRegisterCostEntries;
  // Index of the first cost entry in MCExtraProcessorInfo::RegisterCostTable.
  uint16_t RegisterCostEntryIdx;
  // A value of zero means: there is no limit in the number of moves that can be
  // eliminated every cycle.
  uint16_t MaxMovesEliminatedPerCycle;
  // Ture if this register file only knows how to optimize register moves from
  // known zero registers.
  bool AllowZeroMoveEliminationOnly;
};

/// Provide extra details about the machine processor.
///
/// This is a collection of "optional" processor information that is not
/// normally used by the LLVM machine schedulers, but that can be consumed by
/// external tools like llvm-mca to improve the quality of the peformance
/// analysis.
struct MCExtraProcessorInfo {
  // Actual size of the reorder buffer in hardware.
  unsigned ReorderBufferSize;
  // Number of instructions retired per cycle.
  unsigned MaxRetirePerCycle;
  const MCRegisterFileDesc *RegisterFiles;
  unsigned NumRegisterFiles;
  const MCRegisterCostEntry *RegisterCostTable;
  unsigned NumRegisterCostEntries;
  unsigned LoadQueueID;
  unsigned StoreQueueID;
};

/// Machine model for scheduling, bundling, and heuristics.
///
/// The machine model directly provides basic information about the
/// microarchitecture to the scheduler in the form of properties. It also
/// optionally refers to scheduler resource tables and itinerary
/// tables. Scheduler resource tables model the latency and cost for each
/// instruction type. Itinerary tables are an independent mechanism that
/// provides a detailed reservation table describing each cycle of instruction
/// execution. Subtargets may define any or all of the above categories of data
/// depending on the type of CPU and selected scheduler.
///
/// The machine independent properties defined here are used by the scheduler as
/// an abstract machine model. A real micro-architecture has a number of
/// buffers, queues, and stages. Declaring that a given machine-independent
/// abstract property corresponds to a specific physical property across all
/// subtargets can't be done. Nonetheless, the abstract model is
/// useful. Futhermore, subtargets typically extend this model with processor
/// specific resources to model any hardware features that can be exploited by
/// scheduling heuristics and aren't sufficiently represented in the abstract.
///
/// The abstract pipeline is built around the notion of an "issue point". This
/// is merely a reference point for counting machine cycles. The physical
/// machine will have pipeline stages that delay execution. The scheduler does
/// not model those delays because they are irrelevant as long as they are
/// consistent. Inaccuracies arise when instructions have different execution
/// delays relative to each other, in addition to their intrinsic latency. Those
/// special cases can be handled by TableGen constructs such as, ReadAdvance,
/// which reduces latency when reading data, and ResourceCycles, which consumes
/// a processor resource when writing data for a number of abstract
/// cycles.
///
/// TODO: One tool currently missing is the ability to add a delay to
/// ResourceCycles. That would be easy to add and would likely cover all cases
/// currently handled by the legacy itinerary tables.
///
/// A note on out-of-order execution and, more generally, instruction
/// buffers. Part of the CPU pipeline is always in-order. The issue point, which
/// is the point of reference for counting cycles, only makes sense as an
/// in-order part of the pipeline. Other parts of the pipeline are sometimes
/// falling behind and sometimes catching up. It's only interesting to model
/// those other, decoupled parts of the pipeline if they may be predictably
/// resource constrained in a way that the scheduler can exploit.
///
/// The LLVM machine model distinguishes between in-order constraints and
/// out-of-order constraints so that the target's scheduling strategy can apply
/// appropriate heuristics. For a well-balanced CPU pipeline, out-of-order
/// resources would not typically be treated as a hard scheduling
/// constraint. For example, in the GenericScheduler, a delay caused by limited
/// out-of-order resources is not directly reflected in the number of cycles
/// that the scheduler sees between issuing an instruction and its dependent
/// instructions. In other words, out-of-order resources don't directly increase
/// the latency between pairs of instructions. However, they can still be used
/// to detect potential bottlenecks across a sequence of instructions and bias
/// the scheduling heuristics appropriately.
struct MCSchedModel {
  // IssueWidth is the maximum number of instructions that may be scheduled in
  // the same per-cycle group. This is meant to be a hard in-order constraint
  // (a.k.a. "hazard"). In the GenericScheduler strategy, no more than
  // IssueWidth micro-ops can ever be scheduled in a particular cycle.
  //
  // In practice, IssueWidth is useful to model any bottleneck between the
  // decoder (after micro-op expansion) and the out-of-order reservation
  // stations or the decoder bandwidth itself. If the total number of
  // reservation stations is also a bottleneck, or if any other pipeline stage
  // has a bandwidth limitation, then that can be naturally modeled by adding an
  // out-of-order processor resource.
  unsigned IssueWidth;
  static const unsigned DefaultIssueWidth = 1;

  // MicroOpBufferSize is the number of micro-ops that the processor may buffer
  // for out-of-order execution.
  //
  // "0" means operations that are not ready in this cycle are not considered
  // for scheduling (they go in the pending queue). Latency is paramount. This
  // may be more efficient if many instructions are pending in a schedule.
  //
  // "1" means all instructions are considered for scheduling regardless of
  // whether they are ready in this cycle. Latency still causes issue stalls,
  // but we balance those stalls against other heuristics.
  //
  // "> 1" means the processor is out-of-order. This is a machine independent
  // estimate of highly machine specific characteristics such as the register
  // renaming pool and reorder buffer.
  unsigned MicroOpBufferSize;
  static const unsigned DefaultMicroOpBufferSize = 0;

  // LoopMicroOpBufferSize is the number of micro-ops that the processor may
  // buffer for optimized loop execution. More generally, this represents the
  // optimal number of micro-ops in a loop body. A loop may be partially
  // unrolled to bring the count of micro-ops in the loop body closer to this
  // number.
  unsigned LoopMicroOpBufferSize;
  static const unsigned DefaultLoopMicroOpBufferSize = 0;

  // LoadLatency is the expected latency of load instructions.
  unsigned LoadLatency;
  static const unsigned DefaultLoadLatency = 4;

  // HighLatency is the expected latency of "very high latency" operations.
  // See TargetInstrInfo::isHighLatencyDef().
  // By default, this is set to an arbitrarily high number of cycles
  // likely to have some impact on scheduling heuristics.
  unsigned HighLatency;
  static const unsigned DefaultHighLatency = 10;

  // MispredictPenalty is the typical number of extra cycles the processor
  // takes to recover from a branch misprediction.
  unsigned MispredictPenalty;
  static const unsigned DefaultMispredictPenalty = 10;

  bool PostRAScheduler; // default value is false

  bool CompleteModel;

  // Tells the MachineScheduler whether or not to track resource usage
  // using intervals via ResourceSegments (see
  // llvm/include/llvm/CodeGen/MachineScheduler.h).
  bool EnableIntervals;

  unsigned ProcID;
  const MCProcResourceDesc *ProcResourceTable;
  const MCSchedClassDesc *SchedClassTable;
  unsigned NumProcResourceKinds;
  unsigned NumSchedClasses;
  // Instruction itinerary tables used by InstrItineraryData.
  friend class InstrItineraryData;
  const InstrItinerary *InstrItineraries;

  const MCExtraProcessorInfo *ExtraProcessorInfo;

  bool hasExtraProcessorInfo() const { return ExtraProcessorInfo; }

  unsigned getProcessorID() const { return ProcID; }

  /// Does this machine model include instruction-level scheduling.
  bool hasInstrSchedModel() const { return SchedClassTable; }

  const MCExtraProcessorInfo &getExtraProcessorInfo() const {
    assert(hasExtraProcessorInfo() &&
           "No extra information available for this model");
    return *ExtraProcessorInfo;
  }

  /// Return true if this machine model data for all instructions with a
  /// scheduling class (itinerary class or SchedRW list).
  bool isComplete() const { return CompleteModel; }

  /// Return true if machine supports out of order execution.
  bool isOutOfOrder() const { return MicroOpBufferSize > 1; }

  unsigned getNumProcResourceKinds() const {
    return NumProcResourceKinds;
  }

  const MCProcResourceDesc *getProcResource(unsigned ProcResourceIdx) const {
    assert(hasInstrSchedModel() && "No scheduling machine model");

    assert(ProcResourceIdx < NumProcResourceKinds && "bad proc resource idx");
    return &ProcResourceTable[ProcResourceIdx];
  }

  const MCSchedClassDesc *getSchedClassDesc(unsigned SchedClassIdx) const {
    assert(hasInstrSchedModel() && "No scheduling machine model");

    assert(SchedClassIdx < NumSchedClasses && "bad scheduling class idx");
    return &SchedClassTable[SchedClassIdx];
  }

  /// Returns the latency value for the scheduling class.
  static int computeInstrLatency(const MCSubtargetInfo &STI,
                                 const MCSchedClassDesc &SCDesc);

  int computeInstrLatency(const MCSubtargetInfo &STI, unsigned SClass) const;
  int computeInstrLatency(const MCSubtargetInfo &STI, const MCInstrInfo &MCII,
                          const MCInst &Inst) const;

  // Returns the reciprocal throughput information from a MCSchedClassDesc.
  static double
  getReciprocalThroughput(const MCSubtargetInfo &STI,
                          const MCSchedClassDesc &SCDesc);

  static double
  getReciprocalThroughput(unsigned SchedClass, const InstrItineraryData &IID);

  double
  getReciprocalThroughput(const MCSubtargetInfo &STI, const MCInstrInfo &MCII,
                          const MCInst &Inst) const;

  /// Returns the maximum forwarding delay for register reads dependent on
  /// writes of scheduling class WriteResourceIdx.
  static unsigned getForwardingDelayCycles(ArrayRef<MCReadAdvanceEntry> Entries,
                                           unsigned WriteResourceIdx = 0);

  /// Returns the default initialized model.
  static const MCSchedModel &GetDefaultSchedModel() { return Default; }
  static const MCSchedModel Default;
};

} // namespace llvm

#endif
PKjwFZf7e��MC/MCFixedLenDisassembler.hnu�[���//===-- llvm/MC/MCFixedLenDisassembler.h - Decoder driver -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// Fixed length disassembler decoder state machine driver.
//===----------------------------------------------------------------------===//
#ifndef LLVM_MC_MCFIXEDLENDISASSEMBLER_H
#define LLVM_MC_MCFIXEDLENDISASSEMBLER_H

namespace llvm {

namespace MCD {
// Disassembler state machine opcodes.
enum DecoderOps {
  OPC_ExtractField = 1, // OPC_ExtractField(uint8_t Start, uint8_t Len)
  OPC_FilterValue,      // OPC_FilterValue(uleb128 Val, uint16_t NumToSkip)
  OPC_CheckField,       // OPC_CheckField(uint8_t Start, uint8_t Len,
                        //                uleb128 Val, uint16_t NumToSkip)
  OPC_CheckPredicate,   // OPC_CheckPredicate(uleb128 PIdx, uint16_t NumToSkip)
  OPC_Decode,           // OPC_Decode(uleb128 Opcode, uleb128 DIdx)
  OPC_TryDecode,        // OPC_TryDecode(uleb128 Opcode, uleb128 DIdx,
                        //               uint16_t NumToSkip)
  OPC_SoftFail,         // OPC_SoftFail(uleb128 PMask, uleb128 NMask)
  OPC_Fail              // OPC_Fail()
};

} // namespace MCD
} // namespace llvm

#endif
PKjwFZuwX���MC/MCLinkerOptimizationHint.hnu�[���//===- MCLinkerOptimizationHint.h - LOH interface ---------------*- C++ -*-===//
//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares some helpers classes to handle Linker Optimization Hint
// (LOH).
//
// FIXME: LOH interface supports only MachO format at the moment.
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCLINKEROPTIMIZATIONHINT_H
#define LLVM_MC_MCLINKEROPTIMIZATIONHINT_H

#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSwitch.h"
#include <cassert>
#include <cstdint>

namespace llvm {

class MachObjectWriter;
class MCAsmLayout;
class MCSymbol;
class raw_ostream;

/// Linker Optimization Hint Type.
enum MCLOHType {
  MCLOH_AdrpAdrp = 0x1u,      ///< Adrp xY, _v1@PAGE -> Adrp xY, _v2@PAGE.
  MCLOH_AdrpLdr = 0x2u,       ///< Adrp _v@PAGE -> Ldr _v@PAGEOFF.
  MCLOH_AdrpAddLdr = 0x3u,    ///< Adrp _v@PAGE -> Add _v@PAGEOFF -> Ldr.
  MCLOH_AdrpLdrGotLdr = 0x4u, ///< Adrp _v@GOTPAGE -> Ldr _v@GOTPAGEOFF -> Ldr.
  MCLOH_AdrpAddStr = 0x5u,    ///< Adrp _v@PAGE -> Add _v@PAGEOFF -> Str.
  MCLOH_AdrpLdrGotStr = 0x6u, ///< Adrp _v@GOTPAGE -> Ldr _v@GOTPAGEOFF -> Str.
  MCLOH_AdrpAdd = 0x7u,       ///< Adrp _v@PAGE -> Add _v@PAGEOFF.
  MCLOH_AdrpLdrGot = 0x8u     ///< Adrp _v@GOTPAGE -> Ldr _v@GOTPAGEOFF.
};

static inline StringRef MCLOHDirectiveName() {
  return StringRef(".loh");
}

static inline bool isValidMCLOHType(unsigned Kind) {
  return Kind >= MCLOH_AdrpAdrp && Kind <= MCLOH_AdrpLdrGot;
}

static inline int MCLOHNameToId(StringRef Name) {
#define MCLOHCaseNameToId(Name)     .Case(#Name, MCLOH_ ## Name)
  return StringSwitch<int>(Name)
    MCLOHCaseNameToId(AdrpAdrp)
    MCLOHCaseNameToId(AdrpLdr)
    MCLOHCaseNameToId(AdrpAddLdr)
    MCLOHCaseNameToId(AdrpLdrGotLdr)
    MCLOHCaseNameToId(AdrpAddStr)
    MCLOHCaseNameToId(AdrpLdrGotStr)
    MCLOHCaseNameToId(AdrpAdd)
    MCLOHCaseNameToId(AdrpLdrGot)
    .Default(-1);
#undef MCLOHCaseNameToId
}

static inline StringRef MCLOHIdToName(MCLOHType Kind) {
#define MCLOHCaseIdToName(Name)      case MCLOH_ ## Name: return StringRef(#Name);
  switch (Kind) {
    MCLOHCaseIdToName(AdrpAdrp);
    MCLOHCaseIdToName(AdrpLdr);
    MCLOHCaseIdToName(AdrpAddLdr);
    MCLOHCaseIdToName(AdrpLdrGotLdr);
    MCLOHCaseIdToName(AdrpAddStr);
    MCLOHCaseIdToName(AdrpLdrGotStr);
    MCLOHCaseIdToName(AdrpAdd);
    MCLOHCaseIdToName(AdrpLdrGot);
  }
  return StringRef();
#undef MCLOHCaseIdToName
}

static inline int MCLOHIdToNbArgs(MCLOHType Kind) {
  switch (Kind) {
    // LOH with two arguments
  case MCLOH_AdrpAdrp:
  case MCLOH_AdrpLdr:
  case MCLOH_AdrpAdd:
  case MCLOH_AdrpLdrGot:
    return 2;
    // LOH with three arguments
  case MCLOH_AdrpAddLdr:
  case MCLOH_AdrpLdrGotLdr:
  case MCLOH_AdrpAddStr:
  case MCLOH_AdrpLdrGotStr:
    return 3;
  }
  return -1;
}

/// Store Linker Optimization Hint information (LOH).
class MCLOHDirective {
  MCLOHType Kind;

  /// Arguments of this directive. Order matters.
  SmallVector<MCSymbol *, 3> Args;

  /// Emit this directive in \p OutStream using the information available
  /// in the given \p ObjWriter and \p Layout to get the address of the
  /// arguments within the object file.
  void emit_impl(raw_ostream &OutStream, const MachObjectWriter &ObjWriter,
                 const MCAsmLayout &Layout) const;

public:
  using LOHArgs = SmallVectorImpl<MCSymbol *>;

  MCLOHDirective(MCLOHType Kind, const LOHArgs &Args)
      : Kind(Kind), Args(Args.begin(), Args.end()) {
    assert(isValidMCLOHType(Kind) && "Invalid LOH directive type!");
  }

  MCLOHType getKind() const { return Kind; }

  const LOHArgs &getArgs() const { return Args; }

  /// Emit this directive as:
  /// <kind, numArgs, addr1, ..., addrN>
  void emit(MachObjectWriter &ObjWriter, const MCAsmLayout &Layout) const;

  /// Get the size in bytes of this directive if emitted in \p ObjWriter with
  /// the given \p Layout.
  uint64_t getEmitSize(const MachObjectWriter &ObjWriter,
                       const MCAsmLayout &Layout) const;
};

class MCLOHContainer {
  /// Keep track of the emit size of all the LOHs.
  mutable uint64_t EmitSize = 0;

  /// Keep track of all LOH directives.
  SmallVector<MCLOHDirective, 32> Directives;

public:
  using LOHDirectives = SmallVectorImpl<MCLOHDirective>;

  MCLOHContainer() = default;

  /// Const accessor to the directives.
  const LOHDirectives &getDirectives() const {
    return Directives;
  }

  /// Add the directive of the given kind \p Kind with the given arguments
  /// \p Args to the container.
  void addDirective(MCLOHType Kind, const MCLOHDirective::LOHArgs &Args) {
    Directives.push_back(MCLOHDirective(Kind, Args));
  }

  /// Get the size of the directives if emitted.
  uint64_t getEmitSize(const MachObjectWriter &ObjWriter,
                       const MCAsmLayout &Layout) const {
    if (!EmitSize) {
      for (const MCLOHDirective &D : Directives)
        EmitSize += D.getEmitSize(ObjWriter, Layout);
    }
    return EmitSize;
  }

  /// Emit all Linker Optimization Hint in one big table.
  /// Each line of the table is emitted by LOHDirective::emit.
  void emit(MachObjectWriter &ObjWriter, const MCAsmLayout &Layout) const {
    for (const MCLOHDirective &D : Directives)
      D.emit(ObjWriter, Layout);
  }

  void reset() {
    Directives.clear();
    EmitSize = 0;
  }
};

// Add types for specialized template using MCSymbol.
using MCLOHArgs = MCLOHDirective::LOHArgs;
using MCLOHDirectives = MCLOHContainer::LOHDirectives;

} // end namespace llvm

#endif // LLVM_MC_MCLINKEROPTIMIZATIONHINT_H
PKjwFZ 0ҽ��MC/MCSectionXCOFF.hnu�[���//===- MCSectionXCOFF.h - XCOFF Machine Code Sections -----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the MCSectionXCOFF class.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCSECTIONXCOFF_H
#define LLVM_MC_MCSECTIONXCOFF_H

#include "llvm/BinaryFormat/XCOFF.h"
#include "llvm/MC/MCSection.h"
#include "llvm/MC/MCSymbolXCOFF.h"

namespace llvm {

// This class represents an XCOFF `Control Section`, more commonly referred to
// as a csect. A csect represents the smallest possible unit of data/code which
// will be relocated as a single block. A csect can either be:
// 1) Initialized: The Type will be XTY_SD, and the symbols inside the csect
//    will have a label definition representing their offset within the csect.
// 2) Uninitialized: The Type will be XTY_CM, it will contain a single symbol,
//    and may not contain label definitions.
// 3) An external reference providing a symbol table entry for a symbol
//    contained in another XCOFF object file. External reference csects are not
//    implemented yet.
class MCSectionXCOFF final : public MCSection {
  friend class MCContext;

  std::optional<XCOFF::CsectProperties> CsectProp;
  MCSymbolXCOFF *const QualName;
  StringRef SymbolTableName;
  std::optional<XCOFF::DwarfSectionSubtypeFlags> DwarfSubtypeFlags;
  bool MultiSymbolsAllowed;
  static constexpr unsigned DefaultAlignVal = 4;
  static constexpr unsigned DefaultTextAlignVal = 32;

  MCSectionXCOFF(StringRef Name, XCOFF::StorageMappingClass SMC,
                 XCOFF::SymbolType ST, SectionKind K, MCSymbolXCOFF *QualName,
                 MCSymbol *Begin, StringRef SymbolTableName,
                 bool MultiSymbolsAllowed)
      : MCSection(SV_XCOFF, Name, K, Begin),
        CsectProp(XCOFF::CsectProperties(SMC, ST)), QualName(QualName),
        SymbolTableName(SymbolTableName), DwarfSubtypeFlags(std::nullopt),
        MultiSymbolsAllowed(MultiSymbolsAllowed) {
    assert(
        (ST == XCOFF::XTY_SD || ST == XCOFF::XTY_CM || ST == XCOFF::XTY_ER) &&
        "Invalid or unhandled type for csect.");
    assert(QualName != nullptr && "QualName is needed.");
    if (SMC == XCOFF::XMC_UL)
      assert((ST == XCOFF::XTY_CM || ST == XCOFF::XTY_ER) &&
             "Invalid csect type for storage mapping class XCOFF::XMC_UL");

    QualName->setRepresentedCsect(this);
    QualName->setStorageClass(XCOFF::C_HIDEXT);
    if (ST != XCOFF::XTY_ER) {
      // For a csect for program code, set the alignment to 32 bytes by default.
      // For other csects, set the alignment to 4 bytes by default.
      if (SMC == XCOFF::XMC_PR)
        setAlignment(Align(DefaultTextAlignVal));
      else
        setAlignment(Align(DefaultAlignVal));
    }
  }

  MCSectionXCOFF(StringRef Name, SectionKind K, MCSymbolXCOFF *QualName,
                 XCOFF::DwarfSectionSubtypeFlags DwarfSubtypeFlags,
                 MCSymbol *Begin, StringRef SymbolTableName,
                 bool MultiSymbolsAllowed)
      : MCSection(SV_XCOFF, Name, K, Begin), QualName(QualName),
        SymbolTableName(SymbolTableName), DwarfSubtypeFlags(DwarfSubtypeFlags),
        MultiSymbolsAllowed(MultiSymbolsAllowed) {
    assert(QualName != nullptr && "QualName is needed.");

    // FIXME: use a more meaningful name for non csect sections.
    QualName->setRepresentedCsect(this);

    // Use default text alignment as the alignment for DWARF sections.
    setAlignment(Align(DefaultTextAlignVal));
  }

  void printCsectDirective(raw_ostream &OS) const;

public:
  ~MCSectionXCOFF();

  static bool classof(const MCSection *S) {
    return S->getVariant() == SV_XCOFF;
  }

  XCOFF::StorageMappingClass getMappingClass() const {
    assert(isCsect() && "Only csect section has mapping class property!");
    return CsectProp->MappingClass;
  }
  XCOFF::StorageClass getStorageClass() const {
    return QualName->getStorageClass();
  }
  XCOFF::VisibilityType getVisibilityType() const {
    return QualName->getVisibilityType();
  }
  XCOFF::SymbolType getCSectType() const {
    assert(isCsect() && "Only csect section has symbol type property!");
    return CsectProp->Type;
  }
  MCSymbolXCOFF *getQualNameSymbol() const { return QualName; }

  void printSwitchToSection(const MCAsmInfo &MAI, const Triple &T,
                            raw_ostream &OS,
                            const MCExpr *Subsection) const override;
  bool useCodeAlign() const override;
  bool isVirtualSection() const override;
  StringRef getSymbolTableName() const { return SymbolTableName; }
  bool isMultiSymbolsAllowed() const { return MultiSymbolsAllowed; }
  bool isCsect() const { return CsectProp.has_value(); }
  bool isDwarfSect() const { return DwarfSubtypeFlags.has_value(); }
  std::optional<XCOFF::DwarfSectionSubtypeFlags> getDwarfSubtypeFlags() const {
    return DwarfSubtypeFlags;
  }
  std::optional<XCOFF::CsectProperties> getCsectProp() const {
    return CsectProp;
  }
};

} // end namespace llvm

#endif
PKjwFZee���� MC/MCTargetOptionsCommandFlags.hnu�[���//===-- MCTargetOptionsCommandFlags.h --------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains machine code-specific flags that are shared between
// different command line tools.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCTARGETOPTIONSCOMMANDFLAGS_H
#define LLVM_MC_MCTARGETOPTIONSCOMMANDFLAGS_H

#include <optional>
#include <string>

namespace llvm {

class MCTargetOptions;
enum class EmitDwarfUnwindType;

namespace mc {

bool getRelaxAll();
std::optional<bool> getExplicitRelaxAll();

bool getIncrementalLinkerCompatible();

int getDwarfVersion();

bool getDwarf64();

EmitDwarfUnwindType getEmitDwarfUnwind();

bool getEmitCompactUnwindNonCanonical();

bool getShowMCInst();

bool getFatalWarnings();

bool getNoWarn();

bool getNoDeprecatedWarn();

bool getNoTypeCheck();

std::string getABIName();

std::string getAsSecureLogFile();

/// Create this object with static storage to register mc-related command
/// line options.
struct RegisterMCTargetOptionsFlags {
  RegisterMCTargetOptionsFlags();
};

MCTargetOptions InitMCTargetOptionsFromFlags();

} // namespace mc

} // namespace llvm

#endif
PKjwFZW\rMC/MCInstPrinter.hnu�[���//===- MCInstPrinter.h - MCInst to target assembly syntax -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCINSTPRINTER_H
#define LLVM_MC_MCINSTPRINTER_H

#include "llvm/Support/Format.h"
#include <cstdint>

namespace llvm {

class MCAsmInfo;
class MCInst;
class MCInstrAnalysis;
class MCInstrInfo;
class MCOperand;
class MCRegister;
class MCRegisterInfo;
class MCSubtargetInfo;
class StringRef;
class raw_ostream;

/// Convert `Bytes' to a hex string and output to `OS'
void dumpBytes(ArrayRef<uint8_t> Bytes, raw_ostream &OS);

namespace HexStyle {

enum Style {
  C,  ///< 0xff
  Asm ///< 0ffh
};

} // end namespace HexStyle

struct AliasMatchingData;

/// This is an instance of a target assembly language printer that
/// converts an MCInst to valid target assembly syntax.
class MCInstPrinter {
protected:
  /// A stream that comments can be emitted to if desired.  Each comment
  /// must end with a newline.  This will be null if verbose assembly emission
  /// is disabled.
  raw_ostream *CommentStream = nullptr;
  const MCAsmInfo &MAI;
  const MCInstrInfo &MII;
  const MCRegisterInfo &MRI;
  const MCInstrAnalysis *MIA = nullptr;

  /// True if we are printing marked up assembly.
  bool UseMarkup = false;

  /// True if we prefer aliases (e.g. nop) to raw mnemonics.
  bool PrintAliases = true;

  /// True if we are printing immediates as hex.
  bool PrintImmHex = false;

  /// Which style to use for printing hexadecimal values.
  HexStyle::Style PrintHexStyle = HexStyle::C;

  /// If true, a branch immediate (e.g. bl 4) will be printed as a hexadecimal
  /// address (e.g. bl 0x20004). This is useful for a stream disassembler
  /// (llvm-objdump -d).
  bool PrintBranchImmAsAddress = false;

  /// If true, symbolize branch target and memory reference operands.
  bool SymbolizeOperands = false;

  /// Utility function for printing annotations.
  void printAnnotation(raw_ostream &OS, StringRef Annot);

  /// Helper for matching MCInsts to alias patterns when printing instructions.
  const char *matchAliasPatterns(const MCInst *MI, const MCSubtargetInfo *STI,
                                 const AliasMatchingData &M);

public:
  MCInstPrinter(const MCAsmInfo &mai, const MCInstrInfo &mii,
                const MCRegisterInfo &mri) : MAI(mai), MII(mii), MRI(mri) {}

  virtual ~MCInstPrinter();

  /// Customize the printer according to a command line option.
  /// @return true if the option is recognized and applied.
  virtual bool applyTargetSpecificCLOption(StringRef Opt) { return false; }

  /// Specify a stream to emit comments to.
  void setCommentStream(raw_ostream &OS) { CommentStream = &OS; }

  /// Returns a pair containing the mnemonic for \p MI and the number of bits
  /// left for further processing by printInstruction (generated by tablegen).
  virtual std::pair<const char *, uint64_t> getMnemonic(const MCInst *MI) = 0;

  /// Print the specified MCInst to the specified raw_ostream.
  ///
  /// \p Address the address of current instruction on most targets, used to
  /// print a PC relative immediate as the target address. On targets where a PC
  /// relative immediate is relative to the next instruction and the length of a
  /// MCInst is difficult to measure (e.g. x86), this is the address of the next
  /// instruction. If Address is 0, the immediate will be printed.
  virtual void printInst(const MCInst *MI, uint64_t Address, StringRef Annot,
                         const MCSubtargetInfo &STI, raw_ostream &OS) = 0;

  /// Return the name of the specified opcode enum (e.g. "MOV32ri") or
  /// empty if we can't resolve it.
  StringRef getOpcodeName(unsigned Opcode) const;

  /// Print the assembler register name.
  virtual void printRegName(raw_ostream &OS, MCRegister Reg) const;

  bool getUseMarkup() const { return UseMarkup; }
  void setUseMarkup(bool Value) { UseMarkup = Value; }

  /// Utility functions to make adding mark ups simpler.
  StringRef markup(StringRef s) const;

  bool getPrintImmHex() const { return PrintImmHex; }
  void setPrintImmHex(bool Value) { PrintImmHex = Value; }

  void setPrintHexStyle(HexStyle::Style Value) { PrintHexStyle = Value; }

  void setPrintBranchImmAsAddress(bool Value) {
    PrintBranchImmAsAddress = Value;
  }

  void setSymbolizeOperands(bool Value) { SymbolizeOperands = Value; }
  void setMCInstrAnalysis(const MCInstrAnalysis *Value) { MIA = Value; }

  /// Utility function to print immediates in decimal or hex.
  format_object<int64_t> formatImm(int64_t Value) const {
    return PrintImmHex ? formatHex(Value) : formatDec(Value);
  }

  /// Utility functions to print decimal/hexadecimal values.
  format_object<int64_t> formatDec(int64_t Value) const;
  format_object<int64_t> formatHex(int64_t Value) const;
  format_object<uint64_t> formatHex(uint64_t Value) const;
};

/// Map from opcode to pattern list by binary search.
struct PatternsForOpcode {
  uint32_t Opcode;
  uint16_t PatternStart;
  uint16_t NumPatterns;
};

/// Data for each alias pattern. Includes feature bits, string, number of
/// operands, and a variadic list of conditions to check.
struct AliasPattern {
  uint32_t AsmStrOffset;
  uint32_t AliasCondStart;
  uint8_t NumOperands;
  uint8_t NumConds;
};

struct AliasPatternCond {
  enum CondKind : uint8_t {
    K_Feature,       // Match only if a feature is enabled.
    K_NegFeature,    // Match only if a feature is disabled.
    K_OrFeature,     // Match only if one of a set of features is enabled.
    K_OrNegFeature,  // Match only if one of a set of features is disabled.
    K_EndOrFeatures, // Note end of list of K_Or(Neg)?Features.
    K_Ignore,        // Match any operand.
    K_Reg,           // Match a specific register.
    K_TiedReg,       // Match another already matched register.
    K_Imm,           // Match a specific immediate.
    K_RegClass,      // Match registers in a class.
    K_Custom,        // Call custom matcher by index.
  };

  CondKind Kind;
  uint32_t Value;
};

/// Tablegenerated data structures needed to match alias patterns.
struct AliasMatchingData {
  ArrayRef<PatternsForOpcode> OpToPatterns;
  ArrayRef<AliasPattern> Patterns;
  ArrayRef<AliasPatternCond> PatternConds;
  StringRef AsmStrings;
  bool (*ValidateMCOperand)(const MCOperand &MCOp, const MCSubtargetInfo &STI,
                            unsigned PredicateIndex);
};

} // end namespace llvm

#endif // LLVM_MC_MCINSTPRINTER_H
PKjwFZcN٨TTMC/MCDXContainerWriter.hnu�[���//===- llvm/MC/MCDXContainerWriter.h - DXContainer Writer -*- C++ -------*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCDXCONTAINERWRITER_H
#define LLVM_MC_MCDXCONTAINERWRITER_H

#include "llvm/MC/MCObjectWriter.h"
#include "llvm/TargetParser/Triple.h"

namespace llvm {

class raw_pwrite_stream;

class MCDXContainerTargetWriter : public MCObjectTargetWriter {
protected:
  MCDXContainerTargetWriter() {}

public:
  virtual ~MCDXContainerTargetWriter();

  Triple::ObjectFormatType getFormat() const override {
    return Triple::DXContainer;
  }
  static bool classof(const MCObjectTargetWriter *W) {
    return W->getFormat() == Triple::DXContainer;
  }
};

/// Construct a new DXContainer writer instance.
///
/// \param MOTW - The target specific DXContainer writer subclass.
/// \param OS - The stream to write to.
/// \returns The constructed object writer.
std::unique_ptr<MCObjectWriter>
createDXContainerObjectWriter(std::unique_ptr<MCDXContainerTargetWriter> MOTW,
                              raw_pwrite_stream &OS);

} // end namespace llvm

#endif // LLVM_MC_MCDXCONTAINERWRITER_H
PKjwFZ�tP%%MC/MCCodeView.hnu�[���//===- MCCodeView.h - Machine Code CodeView support -------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Holds state from .cv_file and .cv_loc directives for later emission.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCCODEVIEW_H
#define LLVM_MC_MCCODEVIEW_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include <map>
#include <vector>

namespace llvm {
class MCAsmLayout;
class MCCVDefRangeFragment;
class MCCVInlineLineTableFragment;
class MCDataFragment;
class MCFragment;
class MCSection;
class MCSymbol;
class MCContext;
class MCObjectStreamer;
class MCStreamer;

/// Instances of this class represent the information from a
/// .cv_loc directive.
class MCCVLoc {
  const MCSymbol *Label = nullptr;
  uint32_t FunctionId;
  uint32_t FileNum;
  uint32_t Line;
  uint16_t Column;
  uint16_t PrologueEnd : 1;
  uint16_t IsStmt : 1;

private: // CodeViewContext manages these
  friend class CodeViewContext;
  MCCVLoc(const MCSymbol *Label, unsigned functionid, unsigned fileNum,
          unsigned line, unsigned column, bool prologueend, bool isstmt)
      : Label(Label), FunctionId(functionid), FileNum(fileNum), Line(line),
        Column(column), PrologueEnd(prologueend), IsStmt(isstmt) {}

  // Allow the default copy constructor and assignment operator to be used
  // for an MCCVLoc object.

public:
  const MCSymbol *getLabel() const { return Label; }

  unsigned getFunctionId() const { return FunctionId; }

  /// Get the FileNum of this MCCVLoc.
  unsigned getFileNum() const { return FileNum; }

  /// Get the Line of this MCCVLoc.
  unsigned getLine() const { return Line; }

  /// Get the Column of this MCCVLoc.
  unsigned getColumn() const { return Column; }

  bool isPrologueEnd() const { return PrologueEnd; }
  bool isStmt() const { return IsStmt; }

  void setLabel(const MCSymbol *L) { Label = L; }

  void setFunctionId(unsigned FID) { FunctionId = FID; }

  /// Set the FileNum of this MCCVLoc.
  void setFileNum(unsigned fileNum) { FileNum = fileNum; }

  /// Set the Line of this MCCVLoc.
  void setLine(unsigned line) { Line = line; }

  /// Set the Column of this MCCVLoc.
  void setColumn(unsigned column) {
    assert(column <= UINT16_MAX);
    Column = column;
  }

  void setPrologueEnd(bool PE) { PrologueEnd = PE; }
  void setIsStmt(bool IS) { IsStmt = IS; }
};

/// Information describing a function or inlined call site introduced by
/// .cv_func_id or .cv_inline_site_id. Accumulates information from .cv_loc
/// directives used with this function's id or the id of an inlined call site
/// within this function or inlined call site.
struct MCCVFunctionInfo {
  /// If this represents an inlined call site, then ParentFuncIdPlusOne will be
  /// the parent function id plus one. If this represents a normal function,
  /// then there is no parent, and ParentFuncIdPlusOne will be FunctionSentinel.
  /// If this struct is an unallocated slot in the function info vector, then
  /// ParentFuncIdPlusOne will be zero.
  unsigned ParentFuncIdPlusOne = 0;

  enum : unsigned { FunctionSentinel = ~0U };

  struct LineInfo {
    unsigned File;
    unsigned Line;
    unsigned Col;
  };

  LineInfo InlinedAt;

  /// The section of the first .cv_loc directive used for this function, or null
  /// if none has been seen yet.
  MCSection *Section = nullptr;

  /// Map from inlined call site id to the inlined at location to use for that
  /// call site. Call chains are collapsed, so for the call chain 'f -> g -> h',
  /// the InlinedAtMap of 'f' will contain entries for 'g' and 'h' that both
  /// list the line info for the 'g' call site.
  DenseMap<unsigned, LineInfo> InlinedAtMap;

  /// Returns true if this is function info has not yet been used in a
  /// .cv_func_id or .cv_inline_site_id directive.
  bool isUnallocatedFunctionInfo() const { return ParentFuncIdPlusOne == 0; }

  /// Returns true if this represents an inlined call site, meaning
  /// ParentFuncIdPlusOne is neither zero nor ~0U.
  bool isInlinedCallSite() const {
    return !isUnallocatedFunctionInfo() &&
           ParentFuncIdPlusOne != FunctionSentinel;
  }

  unsigned getParentFuncId() const {
    assert(isInlinedCallSite());
    return ParentFuncIdPlusOne - 1;
  }
};

/// Holds state from .cv_file and .cv_loc directives for later emission.
class CodeViewContext {
public:
  CodeViewContext();
  ~CodeViewContext();

  CodeViewContext &operator=(const CodeViewContext &other) = delete;
  CodeViewContext(const CodeViewContext &other) = delete;

  bool isValidFileNumber(unsigned FileNumber) const;
  bool addFile(MCStreamer &OS, unsigned FileNumber, StringRef Filename,
               ArrayRef<uint8_t> ChecksumBytes, uint8_t ChecksumKind);

  /// Records the function id of a normal function. Returns false if the
  /// function id has already been used, and true otherwise.
  bool recordFunctionId(unsigned FuncId);

  /// Records the function id of an inlined call site. Records the "inlined at"
  /// location info of the call site, including what function or inlined call
  /// site it was inlined into. Returns false if the function id has already
  /// been used, and true otherwise.
  bool recordInlinedCallSiteId(unsigned FuncId, unsigned IAFunc,
                               unsigned IAFile, unsigned IALine,
                               unsigned IACol);

  /// Retreive the function info if this is a valid function id, or nullptr.
  MCCVFunctionInfo *getCVFunctionInfo(unsigned FuncId);

  /// Saves the information from the currently parsed .cv_loc directive
  /// and sets CVLocSeen.  When the next instruction is assembled an entry
  /// in the line number table with this information and the address of the
  /// instruction will be created.
  void recordCVLoc(MCContext &Ctx, const MCSymbol *Label, unsigned FunctionId,
                   unsigned FileNo, unsigned Line, unsigned Column,
                   bool PrologueEnd, bool IsStmt);

  /// Add a line entry.
  void addLineEntry(const MCCVLoc &LineEntry);

  std::vector<MCCVLoc> getFunctionLineEntries(unsigned FuncId);

  std::pair<size_t, size_t> getLineExtent(unsigned FuncId);

  ArrayRef<MCCVLoc> getLinesForExtent(size_t L, size_t R);

  /// Emits a line table substream.
  void emitLineTableForFunction(MCObjectStreamer &OS, unsigned FuncId,
                                const MCSymbol *FuncBegin,
                                const MCSymbol *FuncEnd);

  void emitInlineLineTableForFunction(MCObjectStreamer &OS,
                                      unsigned PrimaryFunctionId,
                                      unsigned SourceFileId,
                                      unsigned SourceLineNum,
                                      const MCSymbol *FnStartSym,
                                      const MCSymbol *FnEndSym);

  /// Encodes the binary annotations once we have a layout.
  void encodeInlineLineTable(MCAsmLayout &Layout,
                             MCCVInlineLineTableFragment &F);

  MCFragment *
  emitDefRange(MCObjectStreamer &OS,
               ArrayRef<std::pair<const MCSymbol *, const MCSymbol *>> Ranges,
               StringRef FixedSizePortion);

  void encodeDefRange(MCAsmLayout &Layout, MCCVDefRangeFragment &F);

  /// Emits the string table substream.
  void emitStringTable(MCObjectStreamer &OS);

  /// Emits the file checksum substream.
  void emitFileChecksums(MCObjectStreamer &OS);

  /// Emits the offset into the checksum table of the given file number.
  void emitFileChecksumOffset(MCObjectStreamer &OS, unsigned FileNo);

  /// Add something to the string table.  Returns the final string as well as
  /// offset into the string table.
  std::pair<StringRef, unsigned> addToStringTable(StringRef S);

private:
  /// Map from string to string table offset.
  StringMap<unsigned> StringTable;

  /// The fragment that ultimately holds our strings.
  MCDataFragment *StrTabFragment = nullptr;
  bool InsertedStrTabFragment = false;

  MCDataFragment *getStringTableFragment();

  /// Get a string table offset.
  unsigned getStringTableOffset(StringRef S);

  struct FileInfo {
    unsigned StringTableOffset;

    // Indicates if this FileInfo corresponds to an actual file, or hasn't been
    // set yet.
    bool Assigned = false;

    uint8_t ChecksumKind;

    ArrayRef<uint8_t> Checksum;

    // Checksum offset stored as a symbol because it might be requested
    // before it has been calculated, so a fixup may be needed.
    MCSymbol *ChecksumTableOffset;
  };

  /// Array storing added file information.
  SmallVector<FileInfo, 4> Files;

  /// The offset of the first and last .cv_loc directive for a given function
  /// id.
  std::map<unsigned, std::pair<size_t, size_t>> MCCVLineStartStop;

  /// A collection of MCCVLoc for each section.
  std::vector<MCCVLoc> MCCVLines;

  /// All known functions and inlined call sites, indexed by function id.
  std::vector<MCCVFunctionInfo> Functions;

  /// Indicate whether we have already laid out the checksum table addresses or
  /// not.
  bool ChecksumOffsetsAssigned = false;
};

} // end namespace llvm
#endif
PKjwFZ;�uffMC/MCInstrDesc.hnu�[���//===-- llvm/MC/MCInstrDesc.h - Instruction Descriptors -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the MCOperandInfo and MCInstrDesc classes, which
// are used to describe target instructions and their operands.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCINSTRDESC_H
#define LLVM_MC_MCINSTRDESC_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/MC/MCRegister.h"

namespace llvm {
class MCRegisterInfo;

class MCInst;

//===----------------------------------------------------------------------===//
// Machine Operand Flags and Description
//===----------------------------------------------------------------------===//

namespace MCOI {
/// Operand constraints. These are encoded in 16 bits with one of the
/// low-order 3 bits specifying that a constraint is present and the
/// corresponding high-order hex digit specifying the constraint value.
/// This allows for a maximum of 3 constraints.
enum OperandConstraint {
  TIED_TO = 0,  // Must be allocated the same register as specified value.
  EARLY_CLOBBER // If present, operand is an early clobber register.
};

// Define a macro to produce each constraint value.
#define MCOI_TIED_TO(op) \
  ((1 << MCOI::TIED_TO) | ((op) << (4 + MCOI::TIED_TO * 4)))

#define MCOI_EARLY_CLOBBER \
  (1 << MCOI::EARLY_CLOBBER)

/// These are flags set on operands, but should be considered
/// private, all access should go through the MCOperandInfo accessors.
/// See the accessors for a description of what these are.
enum OperandFlags {
  LookupPtrRegClass = 0,
  Predicate,
  OptionalDef,
  BranchTarget
};

/// Operands are tagged with one of the values of this enum.
enum OperandType {
  OPERAND_UNKNOWN = 0,
  OPERAND_IMMEDIATE = 1,
  OPERAND_REGISTER = 2,
  OPERAND_MEMORY = 3,
  OPERAND_PCREL = 4,

  OPERAND_FIRST_GENERIC = 6,
  OPERAND_GENERIC_0 = 6,
  OPERAND_GENERIC_1 = 7,
  OPERAND_GENERIC_2 = 8,
  OPERAND_GENERIC_3 = 9,
  OPERAND_GENERIC_4 = 10,
  OPERAND_GENERIC_5 = 11,
  OPERAND_LAST_GENERIC = 11,

  OPERAND_FIRST_GENERIC_IMM = 12,
  OPERAND_GENERIC_IMM_0 = 12,
  OPERAND_LAST_GENERIC_IMM = 12,

  OPERAND_FIRST_TARGET = 13,
};

} // namespace MCOI

/// This holds information about one operand of a machine instruction,
/// indicating the register class for register operands, etc.
class MCOperandInfo {
public:
  /// This specifies the register class enumeration of the operand
  /// if the operand is a register.  If isLookupPtrRegClass is set, then this is
  /// an index that is passed to TargetRegisterInfo::getPointerRegClass(x) to
  /// get a dynamic register class.
  int16_t RegClass;

  /// These are flags from the MCOI::OperandFlags enum.
  uint8_t Flags;

  /// Information about the type of the operand.
  uint8_t OperandType;

  /// Operand constraints (see OperandConstraint enum).
  uint16_t Constraints;

  /// Set if this operand is a pointer value and it requires a callback
  /// to look up its register class.
  bool isLookupPtrRegClass() const {
    return Flags & (1 << MCOI::LookupPtrRegClass);
  }

  /// Set if this is one of the operands that made up of the predicate
  /// operand that controls an isPredicable() instruction.
  bool isPredicate() const { return Flags & (1 << MCOI::Predicate); }

  /// Set if this operand is a optional def.
  bool isOptionalDef() const { return Flags & (1 << MCOI::OptionalDef); }

  /// Set if this operand is a branch target.
  bool isBranchTarget() const { return Flags & (1 << MCOI::BranchTarget); }

  bool isGenericType() const {
    return OperandType >= MCOI::OPERAND_FIRST_GENERIC &&
           OperandType <= MCOI::OPERAND_LAST_GENERIC;
  }

  unsigned getGenericTypeIndex() const {
    assert(isGenericType() && "non-generic types don't have an index");
    return OperandType - MCOI::OPERAND_FIRST_GENERIC;
  }

  bool isGenericImm() const {
    return OperandType >= MCOI::OPERAND_FIRST_GENERIC_IMM &&
           OperandType <= MCOI::OPERAND_LAST_GENERIC_IMM;
  }

  unsigned getGenericImmIndex() const {
    assert(isGenericImm() && "non-generic immediates don't have an index");
    return OperandType - MCOI::OPERAND_FIRST_GENERIC_IMM;
  }
};

//===----------------------------------------------------------------------===//
// Machine Instruction Flags and Description
//===----------------------------------------------------------------------===//

namespace MCID {
/// These should be considered private to the implementation of the
/// MCInstrDesc class.  Clients should use the predicate methods on MCInstrDesc,
/// not use these directly.  These all correspond to bitfields in the
/// MCInstrDesc::Flags field.
enum Flag {
  PreISelOpcode = 0,
  Variadic,
  HasOptionalDef,
  Pseudo,
  Meta,
  Return,
  EHScopeReturn,
  Call,
  Barrier,
  Terminator,
  Branch,
  IndirectBranch,
  Compare,
  MoveImm,
  MoveReg,
  Bitcast,
  Select,
  DelaySlot,
  FoldableAsLoad,
  MayLoad,
  MayStore,
  MayRaiseFPException,
  Predicable,
  NotDuplicable,
  UnmodeledSideEffects,
  Commutable,
  ConvertibleTo3Addr,
  UsesCustomInserter,
  HasPostISelHook,
  Rematerializable,
  CheapAsAMove,
  ExtraSrcRegAllocReq,
  ExtraDefRegAllocReq,
  RegSequence,
  ExtractSubreg,
  InsertSubreg,
  Convergent,
  Add,
  Trap,
  VariadicOpsAreDefs,
  Authenticated,
};
} // namespace MCID

/// Describe properties that are true of each instruction in the target
/// description file.  This captures information about side effects, register
/// use and many other things.  There is one instance of this struct for each
/// target instruction class, and the MachineInstr class points to this struct
/// directly to describe itself.
class MCInstrDesc {
public:
  // FIXME: Disable copies and moves.
  // Do not allow MCInstrDescs to be copied or moved. They should only exist in
  // the <Target>Insts table because they rely on knowing their own address to
  // find other information elsewhere in the same table.

  unsigned short Opcode;         // The opcode number
  unsigned short NumOperands;    // Num of args (may be more if variable_ops)
  unsigned char NumDefs;         // Num of args that are definitions
  unsigned char Size;            // Number of bytes in encoding.
  unsigned short SchedClass;     // enum identifying instr sched class
  unsigned char NumImplicitUses; // Num of regs implicitly used
  unsigned char NumImplicitDefs; // Num of regs implicitly defined
  unsigned short ImplicitOffset; // Offset to start of implicit op list
  unsigned short OpInfoOffset;   // Offset to info about operands
  uint64_t Flags;                // Flags identifying machine instr class
  uint64_t TSFlags;              // Target Specific Flag values

  /// Returns the value of the specified operand constraint if
  /// it is present. Returns -1 if it is not present.
  int getOperandConstraint(unsigned OpNum,
                           MCOI::OperandConstraint Constraint) const {
    if (OpNum < NumOperands &&
        (operands()[OpNum].Constraints & (1 << Constraint))) {
      unsigned ValuePos = 4 + Constraint * 4;
      return (int)(operands()[OpNum].Constraints >> ValuePos) & 0x0f;
    }
    return -1;
  }

  /// Return the opcode number for this descriptor.
  unsigned getOpcode() const { return Opcode; }

  /// Return the number of declared MachineOperands for this
  /// MachineInstruction.  Note that variadic (isVariadic() returns true)
  /// instructions may have additional operands at the end of the list, and note
  /// that the machine instruction may include implicit register def/uses as
  /// well.
  unsigned getNumOperands() const { return NumOperands; }

  ArrayRef<MCOperandInfo> operands() const {
    auto OpInfo = reinterpret_cast<const MCOperandInfo *>(this + Opcode + 1);
    return ArrayRef(OpInfo + OpInfoOffset, NumOperands);
  }

  /// Return the number of MachineOperands that are register
  /// definitions.  Register definitions always occur at the start of the
  /// machine operand list.  This is the number of "outs" in the .td file,
  /// and does not include implicit defs.
  unsigned getNumDefs() const { return NumDefs; }

  /// Return flags of this instruction.
  uint64_t getFlags() const { return Flags; }

  /// \returns true if this instruction is emitted before instruction selection
  /// and should be legalized/regbankselected/selected.
  bool isPreISelOpcode() const { return Flags & (1ULL << MCID::PreISelOpcode); }

  /// Return true if this instruction can have a variable number of
  /// operands.  In this case, the variable operands will be after the normal
  /// operands but before the implicit definitions and uses (if any are
  /// present).
  bool isVariadic() const { return Flags & (1ULL << MCID::Variadic); }

  /// Set if this instruction has an optional definition, e.g.
  /// ARM instructions which can set condition code if 's' bit is set.
  bool hasOptionalDef() const { return Flags & (1ULL << MCID::HasOptionalDef); }

  /// Return true if this is a pseudo instruction that doesn't
  /// correspond to a real machine instruction.
  bool isPseudo() const { return Flags & (1ULL << MCID::Pseudo); }

  /// Return true if this is a meta instruction that doesn't
  /// produce any output in the form of executable instructions.
  bool isMetaInstruction() const { return Flags & (1ULL << MCID::Meta); }

  /// Return true if the instruction is a return.
  bool isReturn() const { return Flags & (1ULL << MCID::Return); }

  /// Return true if the instruction is an add instruction.
  bool isAdd() const { return Flags & (1ULL << MCID::Add); }

  /// Return true if this instruction is a trap.
  bool isTrap() const { return Flags & (1ULL << MCID::Trap); }

  /// Return true if the instruction is a register to register move.
  bool isMoveReg() const { return Flags & (1ULL << MCID::MoveReg); }

  ///  Return true if the instruction is a call.
  bool isCall() const { return Flags & (1ULL << MCID::Call); }

  /// Returns true if the specified instruction stops control flow
  /// from executing the instruction immediately following it.  Examples include
  /// unconditional branches and return instructions.
  bool isBarrier() const { return Flags & (1ULL << MCID::Barrier); }

  /// Returns true if this instruction part of the terminator for
  /// a basic block.  Typically this is things like return and branch
  /// instructions.
  ///
  /// Various passes use this to insert code into the bottom of a basic block,
  /// but before control flow occurs.
  bool isTerminator() const { return Flags & (1ULL << MCID::Terminator); }

  /// Returns true if this is a conditional, unconditional, or
  /// indirect branch.  Predicates below can be used to discriminate between
  /// these cases, and the TargetInstrInfo::analyzeBranch method can be used to
  /// get more information.
  bool isBranch() const { return Flags & (1ULL << MCID::Branch); }

  /// Return true if this is an indirect branch, such as a
  /// branch through a register.
  bool isIndirectBranch() const { return Flags & (1ULL << MCID::IndirectBranch); }

  /// Return true if this is a branch which may fall
  /// through to the next instruction or may transfer control flow to some other
  /// block.  The TargetInstrInfo::analyzeBranch method can be used to get more
  /// information about this branch.
  bool isConditionalBranch() const {
    return isBranch() && !isBarrier() && !isIndirectBranch();
  }

  /// Return true if this is a branch which always
  /// transfers control flow to some other block.  The
  /// TargetInstrInfo::analyzeBranch method can be used to get more information
  /// about this branch.
  bool isUnconditionalBranch() const {
    return isBranch() && isBarrier() && !isIndirectBranch();
  }

  /// Return true if this is a branch or an instruction which directly
  /// writes to the program counter. Considered 'may' affect rather than
  /// 'does' affect as things like predication are not taken into account.
  bool mayAffectControlFlow(const MCInst &MI, const MCRegisterInfo &RI) const;

  /// Return true if this instruction has a predicate operand
  /// that controls execution. It may be set to 'always', or may be set to other
  /// values. There are various methods in TargetInstrInfo that can be used to
  /// control and modify the predicate in this instruction.
  bool isPredicable() const { return Flags & (1ULL << MCID::Predicable); }

  /// Return true if this instruction is a comparison.
  bool isCompare() const { return Flags & (1ULL << MCID::Compare); }

  /// Return true if this instruction is a move immediate
  /// (including conditional moves) instruction.
  bool isMoveImmediate() const { return Flags & (1ULL << MCID::MoveImm); }

  /// Return true if this instruction is a bitcast instruction.
  bool isBitcast() const { return Flags & (1ULL << MCID::Bitcast); }

  /// Return true if this is a select instruction.
  bool isSelect() const { return Flags & (1ULL << MCID::Select); }

  /// Return true if this instruction cannot be safely
  /// duplicated.  For example, if the instruction has a unique labels attached
  /// to it, duplicating it would cause multiple definition errors.
  bool isNotDuplicable() const { return Flags & (1ULL << MCID::NotDuplicable); }

  /// Returns true if the specified instruction has a delay slot which
  /// must be filled by the code generator.
  bool hasDelaySlot() const { return Flags & (1ULL << MCID::DelaySlot); }

  /// Return true for instructions that can be folded as memory operands
  /// in other instructions. The most common use for this is instructions that
  /// are simple loads from memory that don't modify the loaded value in any
  /// way, but it can also be used for instructions that can be expressed as
  /// constant-pool loads, such as V_SETALLONES on x86, to allow them to be
  /// folded when it is beneficial.  This should only be set on instructions
  /// that return a value in their only virtual register definition.
  bool canFoldAsLoad() const { return Flags & (1ULL << MCID::FoldableAsLoad); }

  /// Return true if this instruction behaves
  /// the same way as the generic REG_SEQUENCE instructions.
  /// E.g., on ARM,
  /// dX VMOVDRR rY, rZ
  /// is equivalent to
  /// dX = REG_SEQUENCE rY, ssub_0, rZ, ssub_1.
  ///
  /// Note that for the optimizers to be able to take advantage of
  /// this property, TargetInstrInfo::getRegSequenceLikeInputs has to be
  /// override accordingly.
  bool isRegSequenceLike() const { return Flags & (1ULL << MCID::RegSequence); }

  /// Return true if this instruction behaves
  /// the same way as the generic EXTRACT_SUBREG instructions.
  /// E.g., on ARM,
  /// rX, rY VMOVRRD dZ
  /// is equivalent to two EXTRACT_SUBREG:
  /// rX = EXTRACT_SUBREG dZ, ssub_0
  /// rY = EXTRACT_SUBREG dZ, ssub_1
  ///
  /// Note that for the optimizers to be able to take advantage of
  /// this property, TargetInstrInfo::getExtractSubregLikeInputs has to be
  /// override accordingly.
  bool isExtractSubregLike() const {
    return Flags & (1ULL << MCID::ExtractSubreg);
  }

  /// Return true if this instruction behaves
  /// the same way as the generic INSERT_SUBREG instructions.
  /// E.g., on ARM,
  /// dX = VSETLNi32 dY, rZ, Imm
  /// is equivalent to a INSERT_SUBREG:
  /// dX = INSERT_SUBREG dY, rZ, translateImmToSubIdx(Imm)
  ///
  /// Note that for the optimizers to be able to take advantage of
  /// this property, TargetInstrInfo::getInsertSubregLikeInputs has to be
  /// override accordingly.
  bool isInsertSubregLike() const { return Flags & (1ULL << MCID::InsertSubreg); }


  /// Return true if this instruction is convergent.
  ///
  /// Convergent instructions may not be made control-dependent on any
  /// additional values.
  bool isConvergent() const { return Flags & (1ULL << MCID::Convergent); }

  /// Return true if variadic operands of this instruction are definitions.
  bool variadicOpsAreDefs() const {
    return Flags & (1ULL << MCID::VariadicOpsAreDefs);
  }

  /// Return true if this instruction authenticates a pointer (e.g. LDRAx/BRAx
  /// from ARMv8.3, which perform loads/branches with authentication).
  ///
  /// An authenticated instruction may fail in an ABI-defined manner when
  /// operating on an invalid signed pointer.
  bool isAuthenticated() const {
    return Flags & (1ULL << MCID::Authenticated);
  }

  //===--------------------------------------------------------------------===//
  // Side Effect Analysis
  //===--------------------------------------------------------------------===//

  /// Return true if this instruction could possibly read memory.
  /// Instructions with this flag set are not necessarily simple load
  /// instructions, they may load a value and modify it, for example.
  bool mayLoad() const { return Flags & (1ULL << MCID::MayLoad); }

  /// Return true if this instruction could possibly modify memory.
  /// Instructions with this flag set are not necessarily simple store
  /// instructions, they may store a modified value based on their operands, or
  /// may not actually modify anything, for example.
  bool mayStore() const { return Flags & (1ULL << MCID::MayStore); }

  /// Return true if this instruction may raise a floating-point exception.
  bool mayRaiseFPException() const {
    return Flags & (1ULL << MCID::MayRaiseFPException);
  }

  /// Return true if this instruction has side
  /// effects that are not modeled by other flags.  This does not return true
  /// for instructions whose effects are captured by:
  ///
  ///  1. Their operand list and implicit definition/use list.  Register use/def
  ///     info is explicit for instructions.
  ///  2. Memory accesses.  Use mayLoad/mayStore.
  ///  3. Calling, branching, returning: use isCall/isReturn/isBranch.
  ///
  /// Examples of side effects would be modifying 'invisible' machine state like
  /// a control register, flushing a cache, modifying a register invisible to
  /// LLVM, etc.
  bool hasUnmodeledSideEffects() const {
    return Flags & (1ULL << MCID::UnmodeledSideEffects);
  }

  //===--------------------------------------------------------------------===//
  // Flags that indicate whether an instruction can be modified by a method.
  //===--------------------------------------------------------------------===//

  /// Return true if this may be a 2- or 3-address instruction (of the
  /// form "X = op Y, Z, ..."), which produces the same result if Y and Z are
  /// exchanged.  If this flag is set, then the
  /// TargetInstrInfo::commuteInstruction method may be used to hack on the
  /// instruction.
  ///
  /// Note that this flag may be set on instructions that are only commutable
  /// sometimes.  In these cases, the call to commuteInstruction will fail.
  /// Also note that some instructions require non-trivial modification to
  /// commute them.
  bool isCommutable() const { return Flags & (1ULL << MCID::Commutable); }

  /// Return true if this is a 2-address instruction which can be changed
  /// into a 3-address instruction if needed.  Doing this transformation can be
  /// profitable in the register allocator, because it means that the
  /// instruction can use a 2-address form if possible, but degrade into a less
  /// efficient form if the source and dest register cannot be assigned to the
  /// same register.  For example, this allows the x86 backend to turn a "shl
  /// reg, 3" instruction into an LEA instruction, which is the same speed as
  /// the shift but has bigger code size.
  ///
  /// If this returns true, then the target must implement the
  /// TargetInstrInfo::convertToThreeAddress method for this instruction, which
  /// is allowed to fail if the transformation isn't valid for this specific
  /// instruction (e.g. shl reg, 4 on x86).
  ///
  bool isConvertibleTo3Addr() const {
    return Flags & (1ULL << MCID::ConvertibleTo3Addr);
  }

  /// Return true if this instruction requires custom insertion support
  /// when the DAG scheduler is inserting it into a machine basic block.  If
  /// this is true for the instruction, it basically means that it is a pseudo
  /// instruction used at SelectionDAG time that is expanded out into magic code
  /// by the target when MachineInstrs are formed.
  ///
  /// If this is true, the TargetLoweringInfo::InsertAtEndOfBasicBlock method
  /// is used to insert this into the MachineBasicBlock.
  bool usesCustomInsertionHook() const {
    return Flags & (1ULL << MCID::UsesCustomInserter);
  }

  /// Return true if this instruction requires *adjustment* after
  /// instruction selection by calling a target hook. For example, this can be
  /// used to fill in ARM 's' optional operand depending on whether the
  /// conditional flag register is used.
  bool hasPostISelHook() const { return Flags & (1ULL << MCID::HasPostISelHook); }

  /// Returns true if this instruction is a candidate for remat. This
  /// flag is only used in TargetInstrInfo method isTriviallyRematerializable.
  ///
  /// If this flag is set, the isReallyTriviallyReMaterializable()
  /// or isReallyTriviallyReMaterializableGeneric methods are called to verify
  /// the instruction is really rematable.
  bool isRematerializable() const {
    return Flags & (1ULL << MCID::Rematerializable);
  }

  /// Returns true if this instruction has the same cost (or less) than a
  /// move instruction. This is useful during certain types of optimizations
  /// (e.g., remat during two-address conversion or machine licm) where we would
  /// like to remat or hoist the instruction, but not if it costs more than
  /// moving the instruction into the appropriate register. Note, we are not
  /// marking copies from and to the same register class with this flag.
  ///
  /// This method could be called by interface TargetInstrInfo::isAsCheapAsAMove
  /// for different subtargets.
  bool isAsCheapAsAMove() const { return Flags & (1ULL << MCID::CheapAsAMove); }

  /// Returns true if this instruction source operands have special
  /// register allocation requirements that are not captured by the operand
  /// register classes. e.g. ARM::STRD's two source registers must be an even /
  /// odd pair, ARM::STM registers have to be in ascending order.  Post-register
  /// allocation passes should not attempt to change allocations for sources of
  /// instructions with this flag.
  bool hasExtraSrcRegAllocReq() const {
    return Flags & (1ULL << MCID::ExtraSrcRegAllocReq);
  }

  /// Returns true if this instruction def operands have special register
  /// allocation requirements that are not captured by the operand register
  /// classes. e.g. ARM::LDRD's two def registers must be an even / odd pair,
  /// ARM::LDM registers have to be in ascending order.  Post-register
  /// allocation passes should not attempt to change allocations for definitions
  /// of instructions with this flag.
  bool hasExtraDefRegAllocReq() const {
    return Flags & (1ULL << MCID::ExtraDefRegAllocReq);
  }

  /// Return a list of registers that are potentially read by any
  /// instance of this machine instruction.  For example, on X86, the "adc"
  /// instruction adds two register operands and adds the carry bit in from the
  /// flags register.  In this case, the instruction is marked as implicitly
  /// reading the flags.  Likewise, the variable shift instruction on X86 is
  /// marked as implicitly reading the 'CL' register, which it always does.
  ArrayRef<MCPhysReg> implicit_uses() const {
    auto ImplicitOps =
        reinterpret_cast<const MCPhysReg *>(this + Opcode + 1) + ImplicitOffset;
    return {ImplicitOps, NumImplicitUses};
  }

  /// Return a list of registers that are potentially written by any
  /// instance of this machine instruction.  For example, on X86, many
  /// instructions implicitly set the flags register.  In this case, they are
  /// marked as setting the FLAGS.  Likewise, many instructions always deposit
  /// their result in a physical register.  For example, the X86 divide
  /// instruction always deposits the quotient and remainder in the EAX/EDX
  /// registers.  For that instruction, this will return a list containing the
  /// EAX/EDX/EFLAGS registers.
  ArrayRef<MCPhysReg> implicit_defs() const {
    auto ImplicitOps =
        reinterpret_cast<const MCPhysReg *>(this + Opcode + 1) + ImplicitOffset;
    return {ImplicitOps + NumImplicitUses, NumImplicitDefs};
  }

  /// Return true if this instruction implicitly
  /// uses the specified physical register.
  bool hasImplicitUseOfPhysReg(unsigned Reg) const {
    return is_contained(implicit_uses(), Reg);
  }

  /// Return true if this instruction implicitly
  /// defines the specified physical register.
  bool hasImplicitDefOfPhysReg(unsigned Reg,
                               const MCRegisterInfo *MRI = nullptr) const;

  /// Return the scheduling class for this instruction.  The
  /// scheduling class is an index into the InstrItineraryData table.  This
  /// returns zero if there is no known scheduling information for the
  /// instruction.
  unsigned getSchedClass() const { return SchedClass; }

  /// Return the number of bytes in the encoding of this instruction,
  /// or zero if the encoding size cannot be known from the opcode.
  unsigned getSize() const { return Size; }

  /// Find the index of the first operand in the
  /// operand list that is used to represent the predicate. It returns -1 if
  /// none is found.
  int findFirstPredOperandIdx() const {
    if (isPredicable()) {
      for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
        if (operands()[i].isPredicate())
          return i;
    }
    return -1;
  }

  /// Return true if this instruction defines the specified physical
  /// register, either explicitly or implicitly.
  bool hasDefOfPhysReg(const MCInst &MI, unsigned Reg,
                       const MCRegisterInfo &RI) const;
};

} // end namespace llvm

#endif
PKjwFZs��D��MC/MCXCOFFStreamer.hnu�[���//===- MCXCOFFObjectStreamer.h - MCStreamer XCOFF Object File Interface ---===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCXCOFFSTREAMER_H
#define LLVM_MC_MCXCOFFSTREAMER_H

#include "llvm/MC/MCObjectStreamer.h"

namespace llvm {

class MCXCOFFStreamer : public MCObjectStreamer {
public:
  MCXCOFFStreamer(MCContext &Context, std::unique_ptr<MCAsmBackend> MAB,
                  std::unique_ptr<MCObjectWriter> OW,
                  std::unique_ptr<MCCodeEmitter> Emitter);

  bool emitSymbolAttribute(MCSymbol *Symbol, MCSymbolAttr Attribute) override;
  void emitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
                        Align ByteAlignment) override;
  void emitZerofill(MCSection *Section, MCSymbol *Symbol = nullptr,
                    uint64_t Size = 0, Align ByteAlignment = Align(1),
                    SMLoc Loc = SMLoc()) override;
  void emitInstToData(const MCInst &Inst, const MCSubtargetInfo &) override;
  void emitXCOFFLocalCommonSymbol(MCSymbol *LabelSym, uint64_t Size,
                                  MCSymbol *CsectSym, Align Alignment) override;
  void emitXCOFFSymbolLinkageWithVisibility(MCSymbol *Symbol,
                                            MCSymbolAttr Linkage,
                                            MCSymbolAttr Visibility) override;
  void emitXCOFFRefDirective(const MCSymbol *Symbol) override;
  void emitXCOFFRenameDirective(const MCSymbol *Name,
                                StringRef Rename) override {
    report_fatal_error("emitXCOFFRenameDirective is not implemented yet on "
                       "object generation path");
  }
  void emitXCOFFExceptDirective(const MCSymbol *Symbol, const MCSymbol *Trap,
                                unsigned Lang, unsigned Reason,
                                unsigned FunctionSize, bool hasDebug) override;
  void emitXCOFFCInfoSym(StringRef Name, StringRef Metadata) override;
};

} // end namespace llvm

#endif // LLVM_MC_MCXCOFFSTREAMER_H
PKjwFZ74
���MC/MCInst.hnu�[���//===- llvm/MC/MCInst.h - MCInst class --------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the declaration of the MCInst and MCOperand classes, which
// is the basic representation used to represent low-level machine code
// instructions.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCINST_H
#define LLVM_MC_MCINST_H

#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/bit.h"
#include "llvm/Support/SMLoc.h"
#include <cassert>
#include <cstddef>
#include <cstdint>

namespace llvm {

class MCExpr;
class MCInst;
class MCInstPrinter;
class MCRegisterInfo;
class raw_ostream;

/// Instances of this class represent operands of the MCInst class.
/// This is a simple discriminated union.
class MCOperand {
  enum MachineOperandType : unsigned char {
    kInvalid,      ///< Uninitialized.
    kRegister,     ///< Register operand.
    kImmediate,    ///< Immediate operand.
    kSFPImmediate, ///< Single-floating-point immediate operand.
    kDFPImmediate, ///< Double-Floating-point immediate operand.
    kExpr,         ///< Relocatable immediate operand.
    kInst          ///< Sub-instruction operand.
  };
  MachineOperandType Kind = kInvalid;

  union {
    unsigned RegVal;
    int64_t ImmVal;
    uint32_t SFPImmVal;
    uint64_t FPImmVal;
    const MCExpr *ExprVal;
    const MCInst *InstVal;
  };

public:
  MCOperand() : FPImmVal(0) {}

  bool isValid() const { return Kind != kInvalid; }
  bool isReg() const { return Kind == kRegister; }
  bool isImm() const { return Kind == kImmediate; }
  bool isSFPImm() const { return Kind == kSFPImmediate; }
  bool isDFPImm() const { return Kind == kDFPImmediate; }
  bool isExpr() const { return Kind == kExpr; }
  bool isInst() const { return Kind == kInst; }

  /// Returns the register number.
  unsigned getReg() const {
    assert(isReg() && "This is not a register operand!");
    return RegVal;
  }

  /// Set the register number.
  void setReg(unsigned Reg) {
    assert(isReg() && "This is not a register operand!");
    RegVal = Reg;
  }

  int64_t getImm() const {
    assert(isImm() && "This is not an immediate");
    return ImmVal;
  }

  void setImm(int64_t Val) {
    assert(isImm() && "This is not an immediate");
    ImmVal = Val;
  }

  uint32_t getSFPImm() const {
    assert(isSFPImm() && "This is not an SFP immediate");
    return SFPImmVal;
  }

  void setSFPImm(uint32_t Val) {
    assert(isSFPImm() && "This is not an SFP immediate");
    SFPImmVal = Val;
  }

  uint64_t getDFPImm() const {
    assert(isDFPImm() && "This is not an FP immediate");
    return FPImmVal;
  }

  void setDFPImm(uint64_t Val) {
    assert(isDFPImm() && "This is not an FP immediate");
    FPImmVal = Val;
  }
  void setFPImm(double Val) {
    assert(isDFPImm() && "This is not an FP immediate");
    FPImmVal = bit_cast<uint64_t>(Val);
  }

  const MCExpr *getExpr() const {
    assert(isExpr() && "This is not an expression");
    return ExprVal;
  }

  void setExpr(const MCExpr *Val) {
    assert(isExpr() && "This is not an expression");
    ExprVal = Val;
  }

  const MCInst *getInst() const {
    assert(isInst() && "This is not a sub-instruction");
    return InstVal;
  }

  void setInst(const MCInst *Val) {
    assert(isInst() && "This is not a sub-instruction");
    InstVal = Val;
  }

  static MCOperand createReg(unsigned Reg) {
    MCOperand Op;
    Op.Kind = kRegister;
    Op.RegVal = Reg;
    return Op;
  }

  static MCOperand createImm(int64_t Val) {
    MCOperand Op;
    Op.Kind = kImmediate;
    Op.ImmVal = Val;
    return Op;
  }

  static MCOperand createSFPImm(uint32_t Val) {
    MCOperand Op;
    Op.Kind = kSFPImmediate;
    Op.SFPImmVal = Val;
    return Op;
  }

  static MCOperand createDFPImm(uint64_t Val) {
    MCOperand Op;
    Op.Kind = kDFPImmediate;
    Op.FPImmVal = Val;
    return Op;
  }

  static MCOperand createExpr(const MCExpr *Val) {
    MCOperand Op;
    Op.Kind = kExpr;
    Op.ExprVal = Val;
    return Op;
  }

  static MCOperand createInst(const MCInst *Val) {
    MCOperand Op;
    Op.Kind = kInst;
    Op.InstVal = Val;
    return Op;
  }

  void print(raw_ostream &OS, const MCRegisterInfo *RegInfo = nullptr) const;
  void dump() const;
  bool isBareSymbolRef() const;
  bool evaluateAsConstantImm(int64_t &Imm) const;
};

/// Instances of this class represent a single low-level machine
/// instruction.
class MCInst {
  unsigned Opcode = 0;
  // These flags could be used to pass some info from one target subcomponent
  // to another, for example, from disassembler to asm printer. The values of
  // the flags have any sense on target level only (e.g. prefixes on x86).
  unsigned Flags = 0;

  SMLoc Loc;
  SmallVector<MCOperand, 10> Operands;

public:
  MCInst() = default;

  void setOpcode(unsigned Op) { Opcode = Op; }
  unsigned getOpcode() const { return Opcode; }

  void setFlags(unsigned F) { Flags = F; }
  unsigned getFlags() const { return Flags; }

  void setLoc(SMLoc loc) { Loc = loc; }
  SMLoc getLoc() const { return Loc; }

  const MCOperand &getOperand(unsigned i) const { return Operands[i]; }
  MCOperand &getOperand(unsigned i) { return Operands[i]; }
  unsigned getNumOperands() const { return Operands.size(); }

  void addOperand(const MCOperand Op) { Operands.push_back(Op); }

  using iterator = SmallVectorImpl<MCOperand>::iterator;
  using const_iterator = SmallVectorImpl<MCOperand>::const_iterator;

  void clear() { Operands.clear(); }
  void erase(iterator I) { Operands.erase(I); }
  void erase(iterator First, iterator Last) { Operands.erase(First, Last); }
  size_t size() const { return Operands.size(); }
  iterator begin() { return Operands.begin(); }
  const_iterator begin() const { return Operands.begin(); }
  iterator end() { return Operands.end(); }
  const_iterator end() const { return Operands.end(); }

  iterator insert(iterator I, const MCOperand &Op) {
    return Operands.insert(I, Op);
  }

  void print(raw_ostream &OS, const MCRegisterInfo *RegInfo = nullptr) const;
  void dump() const;

  /// Dump the MCInst as prettily as possible using the additional MC
  /// structures, if given. Operators are separated by the \p Separator
  /// string.
  void dump_pretty(raw_ostream &OS, const MCInstPrinter *Printer = nullptr,
                   StringRef Separator = " ",
                   const MCRegisterInfo *RegInfo = nullptr) const;
  void dump_pretty(raw_ostream &OS, StringRef Name, StringRef Separator = " ",
                   const MCRegisterInfo *RegInfo = nullptr) const;
};

inline raw_ostream& operator<<(raw_ostream &OS, const MCOperand &MO) {
  MO.print(OS);
  return OS;
}

inline raw_ostream& operator<<(raw_ostream &OS, const MCInst &MI) {
  MI.print(OS);
  return OS;
}

} // end namespace llvm

#endif // LLVM_MC_MCINST_H
PKjwFZ@-R��MC/MCWasmObjectWriter.hnu�[���//===-- llvm/MC/MCWasmObjectWriter.h - Wasm Object Writer -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCWASMOBJECTWRITER_H
#define LLVM_MC_MCWASMOBJECTWRITER_H

#include "llvm/MC/MCObjectWriter.h"
#include <memory>

namespace llvm {

class MCFixup;
class MCSectionWasm;
class MCValue;
class raw_pwrite_stream;

class MCWasmObjectTargetWriter : public MCObjectTargetWriter {
  const unsigned Is64Bit : 1;
  const unsigned IsEmscripten : 1;

protected:
  explicit MCWasmObjectTargetWriter(bool Is64Bit_, bool IsEmscripten);

public:
  virtual ~MCWasmObjectTargetWriter();

  Triple::ObjectFormatType getFormat() const override { return Triple::Wasm; }
  static bool classof(const MCObjectTargetWriter *W) {
    return W->getFormat() == Triple::Wasm;
  }

  virtual unsigned getRelocType(const MCValue &Target, const MCFixup &Fixup,
                                const MCSectionWasm &FixupSection,
                                bool IsLocRel) const = 0;

  /// \name Accessors
  /// @{
  bool is64Bit() const { return Is64Bit; }
  bool isEmscripten() const { return IsEmscripten; }
  /// @}
};

/// Construct a new Wasm writer instance.
///
/// \param MOTW - The target specific Wasm writer subclass.
/// \param OS - The stream to write to.
/// \returns The constructed object writer.
std::unique_ptr<MCObjectWriter>
createWasmObjectWriter(std::unique_ptr<MCWasmObjectTargetWriter> MOTW,
                       raw_pwrite_stream &OS);

std::unique_ptr<MCObjectWriter>
createWasmDwoObjectWriter(std::unique_ptr<MCWasmObjectTargetWriter> MOTW,
                          raw_pwrite_stream &OS, raw_pwrite_stream &DwoOS);

} // namespace llvm

#endif
PKjwFZ50mP
%
%MC/MCAsmBackend.hnu�[���//===- llvm/MC/MCAsmBackend.h - MC Asm Backend ------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCASMBACKEND_H
#define LLVM_MC_MCASMBACKEND_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/MC/MCDirectives.h"
#include "llvm/MC/MCFixup.h"
#include "llvm/Support/Endian.h"
#include <cstdint>

namespace llvm {

class MCAlignFragment;
class MCDwarfCallFrameFragment;
class MCDwarfLineAddrFragment;
class MCFragment;
class MCRelaxableFragment;
class MCSymbol;
class MCAsmLayout;
class MCAssembler;
class MCContext;
struct MCDwarfFrameInfo;
struct MCFixupKindInfo;
class MCInst;
class MCObjectStreamer;
class MCObjectTargetWriter;
class MCObjectWriter;
class MCSubtargetInfo;
class MCValue;
class raw_pwrite_stream;
class StringRef;
class raw_ostream;

/// Generic interface to target specific assembler backends.
class MCAsmBackend {
protected: // Can only create subclasses.
  MCAsmBackend(support::endianness Endian,
               unsigned RelaxFixupKind = MaxFixupKind);

public:
  MCAsmBackend(const MCAsmBackend &) = delete;
  MCAsmBackend &operator=(const MCAsmBackend &) = delete;
  virtual ~MCAsmBackend();

  const support::endianness Endian;

  /// Fixup kind used for linker relaxation. Currently only used by RISC-V.
  const unsigned RelaxFixupKind;

  /// Return true if this target might automatically pad instructions and thus
  /// need to emit padding enable/disable directives around sensative code.
  virtual bool allowAutoPadding() const { return false; }
  /// Return true if this target allows an unrelaxable instruction to be
  /// emitted into RelaxableFragment and then we can increase its size in a
  /// tricky way for optimization.
  virtual bool allowEnhancedRelaxation() const { return false; }

  /// Give the target a chance to manipulate state related to instruction
  /// alignment (e.g. padding for optimization), instruction relaxablility, etc.
  /// before and after actually emitting the instruction.
  virtual void emitInstructionBegin(MCObjectStreamer &OS, const MCInst &Inst,
                                    const MCSubtargetInfo &STI) {}
  virtual void emitInstructionEnd(MCObjectStreamer &OS, const MCInst &Inst) {}

  /// lifetime management
  virtual void reset() {}

  /// Create a new MCObjectWriter instance for use by the assembler backend to
  /// emit the final object file.
  std::unique_ptr<MCObjectWriter>
  createObjectWriter(raw_pwrite_stream &OS) const;

  /// Create an MCObjectWriter that writes two object files: a .o file which is
  /// linked into the final program and a .dwo file which is used by debuggers.
  /// This function is only supported with ELF targets.
  std::unique_ptr<MCObjectWriter>
  createDwoObjectWriter(raw_pwrite_stream &OS, raw_pwrite_stream &DwoOS) const;

  virtual std::unique_ptr<MCObjectTargetWriter>
  createObjectTargetWriter() const = 0;

  /// \name Target Fixup Interfaces
  /// @{

  /// Get the number of target specific fixup kinds.
  virtual unsigned getNumFixupKinds() const = 0;

  /// Map a relocation name used in .reloc to a fixup kind.
  virtual std::optional<MCFixupKind> getFixupKind(StringRef Name) const;

  /// Get information on a fixup kind.
  virtual const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const;

  /// Hook to check if a relocation is needed for some target specific reason.
  virtual bool shouldForceRelocation(const MCAssembler &Asm,
                                     const MCFixup &Fixup,
                                     const MCValue &Target) {
    return false;
  }

  /// Hook to check if extra nop bytes must be inserted for alignment directive.
  /// For some targets this may be necessary in order to support linker
  /// relaxation. The number of bytes to insert are returned in Size.
  virtual bool shouldInsertExtraNopBytesForCodeAlign(const MCAlignFragment &AF,
                                                     unsigned &Size) {
    return false;
  }

  /// Hook which indicates if the target requires a fixup to be generated when
  /// handling an align directive in an executable section
  virtual bool shouldInsertFixupForCodeAlign(MCAssembler &Asm,
                                             const MCAsmLayout &Layout,
                                             MCAlignFragment &AF) {
    return false;
  }

  virtual bool evaluateTargetFixup(const MCAssembler &Asm,
                                   const MCAsmLayout &Layout,
                                   const MCFixup &Fixup, const MCFragment *DF,
                                   const MCValue &Target, uint64_t &Value,
                                   bool &WasForced) {
    llvm_unreachable("Need to implement hook if target has custom fixups");
  }

  virtual bool handleAddSubRelocations(const MCAsmLayout &Layout,
                                       const MCFragment &F,
                                       const MCFixup &Fixup,
                                       const MCValue &Target,
                                       uint64_t &FixedValue) const {
    return false;
  }

  /// Apply the \p Value for given \p Fixup into the provided data fragment, at
  /// the offset specified by the fixup and following the fixup kind as
  /// appropriate. Errors (such as an out of range fixup value) should be
  /// reported via \p Ctx.
  /// The  \p STI is present only for fragments of type MCRelaxableFragment and
  /// MCDataFragment with hasInstructions() == true.
  virtual void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
                          const MCValue &Target, MutableArrayRef<char> Data,
                          uint64_t Value, bool IsResolved,
                          const MCSubtargetInfo *STI) const = 0;

  /// @}

  /// \name Target Relaxation Interfaces
  /// @{

  /// Check whether the given instruction may need relaxation.
  ///
  /// \param Inst - The instruction to test.
  /// \param STI - The MCSubtargetInfo in effect when the instruction was
  /// encoded.
  virtual bool mayNeedRelaxation(const MCInst &Inst,
                                 const MCSubtargetInfo &STI) const {
    return false;
  }

  /// Target specific predicate for whether a given fixup requires the
  /// associated instruction to be relaxed.
  virtual bool fixupNeedsRelaxationAdvanced(const MCFixup &Fixup, bool Resolved,
                                            uint64_t Value,
                                            const MCRelaxableFragment *DF,
                                            const MCAsmLayout &Layout,
                                            const bool WasForced) const;

  /// Simple predicate for targets where !Resolved implies requiring relaxation
  virtual bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
                                    const MCRelaxableFragment *DF,
                                    const MCAsmLayout &Layout) const = 0;

  /// Relax the instruction in the given fragment to the next wider instruction.
  ///
  /// \param [out] Inst The instruction to relax, which is also the relaxed
  /// instruction.
  /// \param STI the subtarget information for the associated instruction.
  virtual void relaxInstruction(MCInst &Inst,
                                const MCSubtargetInfo &STI) const {};

  virtual bool relaxDwarfLineAddr(MCDwarfLineAddrFragment &DF,
                                  MCAsmLayout &Layout, bool &WasRelaxed) const {
    return false;
  }

  virtual bool relaxDwarfCFA(MCDwarfCallFrameFragment &DF, MCAsmLayout &Layout,
                             bool &WasRelaxed) const {
    return false;
  }

  /// @}

  /// Returns the minimum size of a nop in bytes on this target. The assembler
  /// will use this to emit excess padding in situations where the padding
  /// required for simple alignment would be less than the minimum nop size.
  ///
  virtual unsigned getMinimumNopSize() const { return 1; }

  /// Returns the maximum size of a nop in bytes on this target.
  ///
  virtual unsigned getMaximumNopSize(const MCSubtargetInfo &STI) const {
    return 0;
  }

  /// Write an (optimal) nop sequence of Count bytes to the given output. If the
  /// target cannot generate such a sequence, it should return an error.
  ///
  /// \return - True on success.
  virtual bool writeNopData(raw_ostream &OS, uint64_t Count,
                            const MCSubtargetInfo *STI) const = 0;

  /// Give backend an opportunity to finish layout after relaxation
  virtual void finishLayout(MCAssembler const &Asm,
                            MCAsmLayout &Layout) const {}

  /// Handle any target-specific assembler flags. By default, do nothing.
  virtual void handleAssemblerFlag(MCAssemblerFlag Flag) {}

  /// Generate the compact unwind encoding for the CFI instructions.
  virtual uint32_t generateCompactUnwindEncoding(const MCDwarfFrameInfo *FI,
                                                 const MCContext *Ctxt) const {
    return 0;
  }

  /// Check whether a given symbol has been flagged with MICROMIPS flag.
  virtual bool isMicroMips(const MCSymbol *Sym) const {
    return false;
  }

  bool isDarwinCanonicalPersonality(const MCSymbol *Sym) const;
};

} // end namespace llvm

#endif // LLVM_MC_MCASMBACKEND_H
PKjwFZ�_ʑMC/MCAsmLayout.hnu�[���//===- MCAsmLayout.h - Assembly Layout Object -------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCASMLAYOUT_H
#define LLVM_MC_MCASMLAYOUT_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"

namespace llvm {
class MCAssembler;
class MCFragment;
class MCSection;
class MCSymbol;

/// Encapsulates the layout of an assembly file at a particular point in time.
///
/// Assembly may require computing multiple layouts for a particular assembly
/// file as part of the relaxation process. This class encapsulates the layout
/// at a single point in time in such a way that it is always possible to
/// efficiently compute the exact address of any symbol in the assembly file,
/// even during the relaxation process.
class MCAsmLayout {
  MCAssembler &Assembler;

  /// List of sections in layout order.
  llvm::SmallVector<MCSection *, 16> SectionOrder;

  /// The last fragment which was laid out, or 0 if nothing has been laid
  /// out. Fragments are always laid out in order, so all fragments with a
  /// lower ordinal will be valid.
  mutable DenseMap<const MCSection *, MCFragment *> LastValidFragment;

  /// Make sure that the layout for the given fragment is valid, lazily
  /// computing it if necessary.
  void ensureValid(const MCFragment *F) const;

  /// Is the layout for this fragment valid?
  bool isFragmentValid(const MCFragment *F) const;

public:
  MCAsmLayout(MCAssembler &Assembler);

  /// Get the assembler object this is a layout for.
  MCAssembler &getAssembler() const { return Assembler; }

  /// \returns whether the offset of fragment \p F can be obtained via
  /// getFragmentOffset.
  bool canGetFragmentOffset(const MCFragment *F) const;

  /// Invalidate the fragments starting with F because it has been
  /// resized. The fragment's size should have already been updated, but
  /// its bundle padding will be recomputed.
  void invalidateFragmentsFrom(MCFragment *F);

  /// Perform layout for a single fragment, assuming that the previous
  /// fragment has already been laid out correctly, and the parent section has
  /// been initialized.
  void layoutFragment(MCFragment *Fragment);

  /// \name Section Access (in layout order)
  /// @{

  llvm::SmallVectorImpl<MCSection *> &getSectionOrder() { return SectionOrder; }
  const llvm::SmallVectorImpl<MCSection *> &getSectionOrder() const {
    return SectionOrder;
  }

  /// @}
  /// \name Fragment Layout Data
  /// @{

  /// Get the offset of the given fragment inside its containing section.
  uint64_t getFragmentOffset(const MCFragment *F) const;

  /// @}
  /// \name Utility Functions
  /// @{

  /// Get the address space size of the given section, as it effects
  /// layout. This may differ from the size reported by \see
  /// getSectionFileSize() by not including section tail padding.
  uint64_t getSectionAddressSize(const MCSection *Sec) const;

  /// Get the data size of the given section, as emitted to the object
  /// file. This may include additional padding, or be 0 for virtual sections.
  uint64_t getSectionFileSize(const MCSection *Sec) const;

  /// Get the offset of the given symbol, as computed in the current
  /// layout.
  /// \return True on success.
  bool getSymbolOffset(const MCSymbol &S, uint64_t &Val) const;

  /// Variant that reports a fatal error if the offset is not computable.
  uint64_t getSymbolOffset(const MCSymbol &S) const;

  /// If this symbol is equivalent to A + Constant, return A.
  const MCSymbol *getBaseSymbol(const MCSymbol &Symbol) const;

  /// @}
};

} // end namespace llvm

#endif
PKjwFZ%�BwO�O�MC/MCContext.hnu�[���//===- MCContext.h - Machine Code Context -----------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCCONTEXT_H
#define LLVM_MC_MCCONTEXT_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
#include "llvm/BinaryFormat/Dwarf.h"
#include "llvm/BinaryFormat/XCOFF.h"
#include "llvm/MC/MCAsmMacro.h"
#include "llvm/MC/MCDwarf.h"
#include "llvm/MC/MCPseudoProbe.h"
#include "llvm/MC/MCSection.h"
#include "llvm/MC/SectionKind.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/MD5.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <functional>
#include <map>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>

namespace llvm {

class CodeViewContext;
class MCAsmInfo;
class MCInst;
class MCLabel;
class MCObjectFileInfo;
class MCRegisterInfo;
class MCSection;
class MCSectionCOFF;
class MCSectionDXContainer;
class MCSectionELF;
class MCSectionGOFF;
class MCSectionMachO;
class MCSectionSPIRV;
class MCSectionWasm;
class MCSectionXCOFF;
class MCStreamer;
class MCSubtargetInfo;
class MCSymbol;
class MCSymbolELF;
class MCSymbolWasm;
class MCSymbolXCOFF;
class MCTargetOptions;
class MDNode;
template <typename T> class SmallVectorImpl;
class SMDiagnostic;
class SMLoc;
class SourceMgr;
enum class EmitDwarfUnwindType;

/// Context object for machine code objects.  This class owns all of the
/// sections that it creates.
///
class MCContext {
public:
  using SymbolTable = StringMap<MCSymbol *, BumpPtrAllocator &>;
  using DiagHandlerTy =
      std::function<void(const SMDiagnostic &, bool, const SourceMgr &,
                         std::vector<const MDNode *> &)>;
  enum Environment {
    IsMachO,
    IsELF,
    IsGOFF,
    IsCOFF,
    IsSPIRV,
    IsWasm,
    IsXCOFF,
    IsDXContainer
  };

private:
  Environment Env;

  /// The name of the Segment where Swift5 Reflection Section data will be
  /// outputted
  StringRef Swift5ReflectionSegmentName;

  /// The triple for this object.
  Triple TT;

  /// The SourceMgr for this object, if any.
  const SourceMgr *SrcMgr = nullptr;

  /// The SourceMgr for inline assembly, if any.
  std::unique_ptr<SourceMgr> InlineSrcMgr;
  std::vector<const MDNode *> LocInfos;

  DiagHandlerTy DiagHandler;

  /// The MCAsmInfo for this target.
  const MCAsmInfo *MAI = nullptr;

  /// The MCRegisterInfo for this target.
  const MCRegisterInfo *MRI = nullptr;

  /// The MCObjectFileInfo for this target.
  const MCObjectFileInfo *MOFI = nullptr;

  /// The MCSubtargetInfo for this target.
  const MCSubtargetInfo *MSTI = nullptr;

  std::unique_ptr<CodeViewContext> CVContext;

  /// Allocator object used for creating machine code objects.
  ///
  /// We use a bump pointer allocator to avoid the need to track all allocated
  /// objects.
  BumpPtrAllocator Allocator;

  SpecificBumpPtrAllocator<MCSectionCOFF> COFFAllocator;
  SpecificBumpPtrAllocator<MCSectionDXContainer> DXCAllocator;
  SpecificBumpPtrAllocator<MCSectionELF> ELFAllocator;
  SpecificBumpPtrAllocator<MCSectionMachO> MachOAllocator;
  SpecificBumpPtrAllocator<MCSectionGOFF> GOFFAllocator;
  SpecificBumpPtrAllocator<MCSectionSPIRV> SPIRVAllocator;
  SpecificBumpPtrAllocator<MCSectionWasm> WasmAllocator;
  SpecificBumpPtrAllocator<MCSectionXCOFF> XCOFFAllocator;
  SpecificBumpPtrAllocator<MCInst> MCInstAllocator;

  /// Bindings of names to symbols.
  SymbolTable Symbols;

  /// A mapping from a local label number and an instance count to a symbol.
  /// For example, in the assembly
  ///     1:
  ///     2:
  ///     1:
  /// We have three labels represented by the pairs (1, 0), (2, 0) and (1, 1)
  DenseMap<std::pair<unsigned, unsigned>, MCSymbol *> LocalSymbols;

  /// Keeps tracks of names that were used both for used declared and
  /// artificial symbols. The value is "true" if the name has been used for a
  /// non-section symbol (there can be at most one of those, plus an unlimited
  /// number of section symbols with the same name).
  StringMap<bool, BumpPtrAllocator &> UsedNames;

  /// Keeps track of labels that are used in inline assembly.
  SymbolTable InlineAsmUsedLabelNames;

  /// The next ID to dole out to an unnamed assembler temporary symbol with
  /// a given prefix.
  StringMap<unsigned> NextID;

  /// Instances of directional local labels.
  DenseMap<unsigned, MCLabel *> Instances;
  /// NextInstance() creates the next instance of the directional local label
  /// for the LocalLabelVal and adds it to the map if needed.
  unsigned NextInstance(unsigned LocalLabelVal);
  /// GetInstance() gets the current instance of the directional local label
  /// for the LocalLabelVal and adds it to the map if needed.
  unsigned GetInstance(unsigned LocalLabelVal);

  /// LLVM_BB_ADDR_MAP version to emit.
  uint8_t BBAddrMapVersion = 2;

  /// The file name of the log file from the environment variable
  /// AS_SECURE_LOG_FILE.  Which must be set before the .secure_log_unique
  /// directive is used or it is an error.
  std::string SecureLogFile;
  /// The stream that gets written to for the .secure_log_unique directive.
  std::unique_ptr<raw_fd_ostream> SecureLog;
  /// Boolean toggled when .secure_log_unique / .secure_log_reset is seen to
  /// catch errors if .secure_log_unique appears twice without
  /// .secure_log_reset appearing between them.
  bool SecureLogUsed = false;

  /// The compilation directory to use for DW_AT_comp_dir.
  SmallString<128> CompilationDir;

  /// Prefix replacement map for source file information.
  SmallVector<std::pair<std::string, std::string>, 0> DebugPrefixMap;

  /// The main file name if passed in explicitly.
  std::string MainFileName;

  /// The dwarf file and directory tables from the dwarf .file directive.
  /// We now emit a line table for each compile unit. To reduce the prologue
  /// size of each line table, the files and directories used by each compile
  /// unit are separated.
  std::map<unsigned, MCDwarfLineTable> MCDwarfLineTablesCUMap;

  /// The current dwarf line information from the last dwarf .loc directive.
  MCDwarfLoc CurrentDwarfLoc;
  bool DwarfLocSeen = false;

  /// Generate dwarf debugging info for assembly source files.
  bool GenDwarfForAssembly = false;

  /// The current dwarf file number when generate dwarf debugging info for
  /// assembly source files.
  unsigned GenDwarfFileNumber = 0;

  /// Sections for generating the .debug_ranges and .debug_aranges sections.
  SetVector<MCSection *> SectionsForRanges;

  /// The information gathered from labels that will have dwarf label
  /// entries when generating dwarf assembly source files.
  std::vector<MCGenDwarfLabelEntry> MCGenDwarfLabelEntries;

  /// The string to embed in the debug information for the compile unit, if
  /// non-empty.
  StringRef DwarfDebugFlags;

  /// The string to embed in as the dwarf AT_producer for the compile unit, if
  /// non-empty.
  StringRef DwarfDebugProducer;

  /// The maximum version of dwarf that we should emit.
  uint16_t DwarfVersion = 4;

  /// The format of dwarf that we emit.
  dwarf::DwarfFormat DwarfFormat = dwarf::DWARF32;

  /// Honor temporary labels, this is useful for debugging semantic
  /// differences between temporary and non-temporary labels (primarily on
  /// Darwin).
  bool AllowTemporaryLabels = true;
  bool UseNamesOnTempLabels = false;

  /// The Compile Unit ID that we are currently processing.
  unsigned DwarfCompileUnitID = 0;

  /// A collection of MCPseudoProbe in the current module
  MCPseudoProbeTable PseudoProbeTable;

  // Sections are differentiated by the quadruple (section_name, group_name,
  // unique_id, link_to_symbol_name). Sections sharing the same quadruple are
  // combined into one section.
  struct ELFSectionKey {
    std::string SectionName;
    StringRef GroupName;
    StringRef LinkedToName;
    unsigned UniqueID;

    ELFSectionKey(StringRef SectionName, StringRef GroupName,
                  StringRef LinkedToName, unsigned UniqueID)
        : SectionName(SectionName), GroupName(GroupName),
          LinkedToName(LinkedToName), UniqueID(UniqueID) {}

    bool operator<(const ELFSectionKey &Other) const {
      if (SectionName != Other.SectionName)
        return SectionName < Other.SectionName;
      if (GroupName != Other.GroupName)
        return GroupName < Other.GroupName;
      if (int O = LinkedToName.compare(Other.LinkedToName))
        return O < 0;
      return UniqueID < Other.UniqueID;
    }
  };

  struct COFFSectionKey {
    std::string SectionName;
    StringRef GroupName;
    int SelectionKey;
    unsigned UniqueID;

    COFFSectionKey(StringRef SectionName, StringRef GroupName, int SelectionKey,
                   unsigned UniqueID)
        : SectionName(SectionName), GroupName(GroupName),
          SelectionKey(SelectionKey), UniqueID(UniqueID) {}

    bool operator<(const COFFSectionKey &Other) const {
      if (SectionName != Other.SectionName)
        return SectionName < Other.SectionName;
      if (GroupName != Other.GroupName)
        return GroupName < Other.GroupName;
      if (SelectionKey != Other.SelectionKey)
        return SelectionKey < Other.SelectionKey;
      return UniqueID < Other.UniqueID;
    }
  };

  struct WasmSectionKey {
    std::string SectionName;
    StringRef GroupName;
    unsigned UniqueID;

    WasmSectionKey(StringRef SectionName, StringRef GroupName,
                   unsigned UniqueID)
        : SectionName(SectionName), GroupName(GroupName), UniqueID(UniqueID) {}

    bool operator<(const WasmSectionKey &Other) const {
      if (SectionName != Other.SectionName)
        return SectionName < Other.SectionName;
      if (GroupName != Other.GroupName)
        return GroupName < Other.GroupName;
      return UniqueID < Other.UniqueID;
    }
  };

  struct XCOFFSectionKey {
    // Section name.
    std::string SectionName;
    // Section property.
    // For csect section, it is storage mapping class.
    // For debug section, it is section type flags.
    union {
      XCOFF::StorageMappingClass MappingClass;
      XCOFF::DwarfSectionSubtypeFlags DwarfSubtypeFlags;
    };
    bool IsCsect;

    XCOFFSectionKey(StringRef SectionName,
                    XCOFF::StorageMappingClass MappingClass)
        : SectionName(SectionName), MappingClass(MappingClass), IsCsect(true) {}

    XCOFFSectionKey(StringRef SectionName,
                    XCOFF::DwarfSectionSubtypeFlags DwarfSubtypeFlags)
        : SectionName(SectionName), DwarfSubtypeFlags(DwarfSubtypeFlags),
          IsCsect(false) {}

    bool operator<(const XCOFFSectionKey &Other) const {
      if (IsCsect && Other.IsCsect)
        return std::tie(SectionName, MappingClass) <
               std::tie(Other.SectionName, Other.MappingClass);
      if (IsCsect != Other.IsCsect)
        return IsCsect;
      return std::tie(SectionName, DwarfSubtypeFlags) <
             std::tie(Other.SectionName, Other.DwarfSubtypeFlags);
    }
  };

  StringMap<MCSectionMachO *> MachOUniquingMap;
  std::map<ELFSectionKey, MCSectionELF *> ELFUniquingMap;
  std::map<COFFSectionKey, MCSectionCOFF *> COFFUniquingMap;
  std::map<std::string, MCSectionGOFF *> GOFFUniquingMap;
  std::map<WasmSectionKey, MCSectionWasm *> WasmUniquingMap;
  std::map<XCOFFSectionKey, MCSectionXCOFF *> XCOFFUniquingMap;
  StringMap<MCSectionDXContainer *> DXCUniquingMap;
  StringMap<bool> RelSecNames;

  SpecificBumpPtrAllocator<MCSubtargetInfo> MCSubtargetAllocator;

  /// Do automatic reset in destructor
  bool AutoReset;

  MCTargetOptions const *TargetOptions;

  bool HadError = false;

  void reportCommon(SMLoc Loc,
                    std::function<void(SMDiagnostic &, const SourceMgr *)>);

  MCSymbol *createSymbolImpl(const StringMapEntry<bool> *Name,
                             bool CanBeUnnamed);
  MCSymbol *createSymbol(StringRef Name, bool AlwaysAddSuffix,
                         bool IsTemporary);

  MCSymbol *getOrCreateDirectionalLocalSymbol(unsigned LocalLabelVal,
                                              unsigned Instance);

  MCSectionELF *createELFSectionImpl(StringRef Section, unsigned Type,
                                     unsigned Flags, SectionKind K,
                                     unsigned EntrySize,
                                     const MCSymbolELF *Group, bool IsComdat,
                                     unsigned UniqueID,
                                     const MCSymbolELF *LinkedToSym);

  MCSymbolXCOFF *createXCOFFSymbolImpl(const StringMapEntry<bool> *Name,
                                       bool IsTemporary);

  /// Map of currently defined macros.
  StringMap<MCAsmMacro> MacroMap;

  struct ELFEntrySizeKey {
    std::string SectionName;
    unsigned Flags;
    unsigned EntrySize;

    ELFEntrySizeKey(StringRef SectionName, unsigned Flags, unsigned EntrySize)
        : SectionName(SectionName), Flags(Flags), EntrySize(EntrySize) {}

    bool operator<(const ELFEntrySizeKey &Other) const {
      if (SectionName != Other.SectionName)
        return SectionName < Other.SectionName;
      if (Flags != Other.Flags)
        return Flags < Other.Flags;
      return EntrySize < Other.EntrySize;
    }
  };

  // Symbols must be assigned to a section with a compatible entry size and
  // flags. This map is used to assign unique IDs to sections to distinguish
  // between sections with identical names but incompatible entry sizes and/or
  // flags. This can occur when a symbol is explicitly assigned to a section,
  // e.g. via __attribute__((section("myname"))).
  std::map<ELFEntrySizeKey, unsigned> ELFEntrySizeMap;

  // This set is used to record the generic mergeable section names seen.
  // These are sections that are created as mergeable e.g. .debug_str. We need
  // to avoid assigning non-mergeable symbols to these sections. It is used
  // to prevent non-mergeable symbols being explicitly assigned  to mergeable
  // sections (e.g. via _attribute_((section("myname")))).
  DenseSet<StringRef> ELFSeenGenericMergeableSections;

public:
  explicit MCContext(const Triple &TheTriple, const MCAsmInfo *MAI,
                     const MCRegisterInfo *MRI, const MCSubtargetInfo *MSTI,
                     const SourceMgr *Mgr = nullptr,
                     MCTargetOptions const *TargetOpts = nullptr,
                     bool DoAutoReset = true,
                     StringRef Swift5ReflSegmentName = {});
  MCContext(const MCContext &) = delete;
  MCContext &operator=(const MCContext &) = delete;
  ~MCContext();

  Environment getObjectFileType() const { return Env; }

  const StringRef &getSwift5ReflectionSegmentName() const {
    return Swift5ReflectionSegmentName;
  }
  const Triple &getTargetTriple() const { return TT; }
  const SourceMgr *getSourceManager() const { return SrcMgr; }

  void initInlineSourceManager();
  SourceMgr *getInlineSourceManager() { return InlineSrcMgr.get(); }
  std::vector<const MDNode *> &getLocInfos() { return LocInfos; }
  void setDiagnosticHandler(DiagHandlerTy DiagHandler) {
    this->DiagHandler = DiagHandler;
  }

  void setObjectFileInfo(const MCObjectFileInfo *Mofi) { MOFI = Mofi; }

  const MCAsmInfo *getAsmInfo() const { return MAI; }

  const MCRegisterInfo *getRegisterInfo() const { return MRI; }

  const MCObjectFileInfo *getObjectFileInfo() const { return MOFI; }

  const MCSubtargetInfo *getSubtargetInfo() const { return MSTI; }

  CodeViewContext &getCVContext();

  void setAllowTemporaryLabels(bool Value) { AllowTemporaryLabels = Value; }
  void setUseNamesOnTempLabels(bool Value) { UseNamesOnTempLabels = Value; }

  /// \name Module Lifetime Management
  /// @{

  /// reset - return object to right after construction state to prepare
  /// to process a new module
  void reset();

  /// @}

  /// \name McInst Management

  /// Create and return a new MC instruction.
  MCInst *createMCInst();

  /// \name Symbol Management
  /// @{

  /// Create a new linker temporary symbol with the specified prefix (Name) or
  /// "tmp". This creates a "l"-prefixed symbol for Mach-O and is identical to
  /// createNamedTempSymbol for other object file formats.
  MCSymbol *createLinkerPrivateTempSymbol();
  MCSymbol *createLinkerPrivateSymbol(const Twine &Name);

  /// Create a temporary symbol with a unique name. The name will be omitted
  /// in the symbol table if UseNamesOnTempLabels is false (default except
  /// MCAsmStreamer). The overload without Name uses an unspecified name.
  MCSymbol *createTempSymbol();
  MCSymbol *createTempSymbol(const Twine &Name, bool AlwaysAddSuffix = true);

  /// Create a temporary symbol with a unique name whose name cannot be
  /// omitted in the symbol table. This is rarely used.
  MCSymbol *createNamedTempSymbol();
  MCSymbol *createNamedTempSymbol(const Twine &Name);

  /// Create the definition of a directional local symbol for numbered label
  /// (used for "1:" definitions).
  MCSymbol *createDirectionalLocalSymbol(unsigned LocalLabelVal);

  /// Create and return a directional local symbol for numbered label (used
  /// for "1b" or 1f" references).
  MCSymbol *getDirectionalLocalSymbol(unsigned LocalLabelVal, bool Before);

  /// Lookup the symbol inside with the specified \p Name.  If it exists,
  /// return it.  If not, create a forward reference and return it.
  ///
  /// \param Name - The symbol name, which must be unique across all symbols.
  MCSymbol *getOrCreateSymbol(const Twine &Name);

  /// Gets a symbol that will be defined to the final stack offset of a local
  /// variable after codegen.
  ///
  /// \param Idx - The index of a local variable passed to \@llvm.localescape.
  MCSymbol *getOrCreateFrameAllocSymbol(const Twine &FuncName, unsigned Idx);

  MCSymbol *getOrCreateParentFrameOffsetSymbol(const Twine &FuncName);

  MCSymbol *getOrCreateLSDASymbol(const Twine &FuncName);

  /// Get the symbol for \p Name, or null.
  MCSymbol *lookupSymbol(const Twine &Name) const;

  /// Set value for a symbol.
  void setSymbolValue(MCStreamer &Streamer, const Twine &Sym, uint64_t Val);

  /// getSymbols - Get a reference for the symbol table for clients that
  /// want to, for example, iterate over all symbols. 'const' because we
  /// still want any modifications to the table itself to use the MCContext
  /// APIs.
  const SymbolTable &getSymbols() const { return Symbols; }

  /// isInlineAsmLabel - Return true if the name is a label referenced in
  /// inline assembly.
  MCSymbol *getInlineAsmLabel(StringRef Name) const {
    return InlineAsmUsedLabelNames.lookup(Name);
  }

  /// registerInlineAsmLabel - Records that the name is a label referenced in
  /// inline assembly.
  void registerInlineAsmLabel(MCSymbol *Sym);

  /// @}

  /// \name Section Management
  /// @{

  enum : unsigned {
    /// Pass this value as the UniqueID during section creation to get the
    /// generic section with the given name and characteristics. The usual
    /// sections such as .text use this ID.
    GenericSectionID = ~0U
  };

  /// Return the MCSection for the specified mach-o section.  This requires
  /// the operands to be valid.
  MCSectionMachO *getMachOSection(StringRef Segment, StringRef Section,
                                  unsigned TypeAndAttributes,
                                  unsigned Reserved2, SectionKind K,
                                  const char *BeginSymName = nullptr);

  MCSectionMachO *getMachOSection(StringRef Segment, StringRef Section,
                                  unsigned TypeAndAttributes, SectionKind K,
                                  const char *BeginSymName = nullptr) {
    return getMachOSection(Segment, Section, TypeAndAttributes, 0, K,
                           BeginSymName);
  }

  MCSectionELF *getELFSection(const Twine &Section, unsigned Type,
                              unsigned Flags) {
    return getELFSection(Section, Type, Flags, 0, "", false);
  }

  MCSectionELF *getELFSection(const Twine &Section, unsigned Type,
                              unsigned Flags, unsigned EntrySize) {
    return getELFSection(Section, Type, Flags, EntrySize, "", false,
                         MCSection::NonUniqueID, nullptr);
  }

  MCSectionELF *getELFSection(const Twine &Section, unsigned Type,
                              unsigned Flags, unsigned EntrySize,
                              const Twine &Group, bool IsComdat) {
    return getELFSection(Section, Type, Flags, EntrySize, Group, IsComdat,
                         MCSection::NonUniqueID, nullptr);
  }

  MCSectionELF *getELFSection(const Twine &Section, unsigned Type,
                              unsigned Flags, unsigned EntrySize,
                              const Twine &Group, bool IsComdat,
                              unsigned UniqueID,
                              const MCSymbolELF *LinkedToSym);

  MCSectionELF *getELFSection(const Twine &Section, unsigned Type,
                              unsigned Flags, unsigned EntrySize,
                              const MCSymbolELF *Group, bool IsComdat,
                              unsigned UniqueID,
                              const MCSymbolELF *LinkedToSym);

  /// Get a section with the provided group identifier. This section is
  /// named by concatenating \p Prefix with '.' then \p Suffix. The \p Type
  /// describes the type of the section and \p Flags are used to further
  /// configure this named section.
  MCSectionELF *getELFNamedSection(const Twine &Prefix, const Twine &Suffix,
                                   unsigned Type, unsigned Flags,
                                   unsigned EntrySize = 0);

  MCSectionELF *createELFRelSection(const Twine &Name, unsigned Type,
                                    unsigned Flags, unsigned EntrySize,
                                    const MCSymbolELF *Group,
                                    const MCSectionELF *RelInfoSection);

  MCSectionELF *createELFGroupSection(const MCSymbolELF *Group, bool IsComdat);

  void recordELFMergeableSectionInfo(StringRef SectionName, unsigned Flags,
                                     unsigned UniqueID, unsigned EntrySize);

  bool isELFImplicitMergeableSectionNamePrefix(StringRef Name);

  bool isELFGenericMergeableSection(StringRef Name);

  /// Return the unique ID of the section with the given name, flags and entry
  /// size, if it exists.
  std::optional<unsigned> getELFUniqueIDForEntsize(StringRef SectionName,
                                                   unsigned Flags,
                                                   unsigned EntrySize);

  MCSectionGOFF *getGOFFSection(StringRef Section, SectionKind Kind,
                                MCSection *Parent, const MCExpr *SubsectionId);

  MCSectionCOFF *getCOFFSection(StringRef Section, unsigned Characteristics,
                                SectionKind Kind, StringRef COMDATSymName,
                                int Selection,
                                unsigned UniqueID = GenericSectionID,
                                const char *BeginSymName = nullptr);

  MCSectionCOFF *getCOFFSection(StringRef Section, unsigned Characteristics,
                                SectionKind Kind,
                                const char *BeginSymName = nullptr);

  /// Gets or creates a section equivalent to Sec that is associated with the
  /// section containing KeySym. For example, to create a debug info section
  /// associated with an inline function, pass the normal debug info section
  /// as Sec and the function symbol as KeySym.
  MCSectionCOFF *
  getAssociativeCOFFSection(MCSectionCOFF *Sec, const MCSymbol *KeySym,
                            unsigned UniqueID = GenericSectionID);

  MCSectionSPIRV *getSPIRVSection();

  MCSectionWasm *getWasmSection(const Twine &Section, SectionKind K,
                                unsigned Flags = 0) {
    return getWasmSection(Section, K, Flags, nullptr);
  }

  MCSectionWasm *getWasmSection(const Twine &Section, SectionKind K,
                                unsigned Flags, const char *BeginSymName) {
    return getWasmSection(Section, K, Flags, "", ~0, BeginSymName);
  }

  MCSectionWasm *getWasmSection(const Twine &Section, SectionKind K,
                                unsigned Flags, const Twine &Group,
                                unsigned UniqueID) {
    return getWasmSection(Section, K, Flags, Group, UniqueID, nullptr);
  }

  MCSectionWasm *getWasmSection(const Twine &Section, SectionKind K,
                                unsigned Flags, const Twine &Group,
                                unsigned UniqueID, const char *BeginSymName);

  MCSectionWasm *getWasmSection(const Twine &Section, SectionKind K,
                                unsigned Flags, const MCSymbolWasm *Group,
                                unsigned UniqueID, const char *BeginSymName);

  /// Get the section for the provided Section name
  MCSectionDXContainer *getDXContainerSection(StringRef Section, SectionKind K);

  bool hasXCOFFSection(StringRef Section,
                       XCOFF::CsectProperties CsectProp) const;

  MCSectionXCOFF *getXCOFFSection(
      StringRef Section, SectionKind K,
      std::optional<XCOFF::CsectProperties> CsectProp = std::nullopt,
      bool MultiSymbolsAllowed = false, const char *BeginSymName = nullptr,
      std::optional<XCOFF::DwarfSectionSubtypeFlags> DwarfSubtypeFlags =
          std::nullopt);

  // Create and save a copy of STI and return a reference to the copy.
  MCSubtargetInfo &getSubtargetCopy(const MCSubtargetInfo &STI);

  uint8_t getBBAddrMapVersion() const { return BBAddrMapVersion; }

  /// @}

  /// \name Dwarf Management
  /// @{

  /// Get the compilation directory for DW_AT_comp_dir
  /// The compilation directory should be set with \c setCompilationDir before
  /// calling this function. If it is unset, an empty string will be returned.
  StringRef getCompilationDir() const { return CompilationDir; }

  /// Set the compilation directory for DW_AT_comp_dir
  void setCompilationDir(StringRef S) { CompilationDir = S.str(); }

  /// Add an entry to the debug prefix map.
  void addDebugPrefixMapEntry(const std::string &From, const std::string &To);

  /// Remap one path in-place as per the debug prefix map.
  void remapDebugPath(SmallVectorImpl<char> &Path);

  // Remaps all debug directory paths in-place as per the debug prefix map.
  void RemapDebugPaths();

  /// Get the main file name for use in error messages and debug
  /// info. This can be set to ensure we've got the correct file name
  /// after preprocessing or for -save-temps.
  const std::string &getMainFileName() const { return MainFileName; }

  /// Set the main file name and override the default.
  void setMainFileName(StringRef S) { MainFileName = std::string(S); }

  /// Creates an entry in the dwarf file and directory tables.
  Expected<unsigned> getDwarfFile(StringRef Directory, StringRef FileName,
                                  unsigned FileNumber,
                                  std::optional<MD5::MD5Result> Checksum,
                                  std::optional<StringRef> Source,
                                  unsigned CUID);

  bool isValidDwarfFileNumber(unsigned FileNumber, unsigned CUID = 0);

  const std::map<unsigned, MCDwarfLineTable> &getMCDwarfLineTables() const {
    return MCDwarfLineTablesCUMap;
  }

  MCDwarfLineTable &getMCDwarfLineTable(unsigned CUID) {
    return MCDwarfLineTablesCUMap[CUID];
  }

  const MCDwarfLineTable &getMCDwarfLineTable(unsigned CUID) const {
    auto I = MCDwarfLineTablesCUMap.find(CUID);
    assert(I != MCDwarfLineTablesCUMap.end());
    return I->second;
  }

  const SmallVectorImpl<MCDwarfFile> &getMCDwarfFiles(unsigned CUID = 0) {
    return getMCDwarfLineTable(CUID).getMCDwarfFiles();
  }

  const SmallVectorImpl<std::string> &getMCDwarfDirs(unsigned CUID = 0) {
    return getMCDwarfLineTable(CUID).getMCDwarfDirs();
  }

  unsigned getDwarfCompileUnitID() { return DwarfCompileUnitID; }

  void setDwarfCompileUnitID(unsigned CUIndex) { DwarfCompileUnitID = CUIndex; }

  /// Specifies the "root" file and directory of the compilation unit.
  /// These are "file 0" and "directory 0" in DWARF v5.
  void setMCLineTableRootFile(unsigned CUID, StringRef CompilationDir,
                              StringRef Filename,
                              std::optional<MD5::MD5Result> Checksum,
                              std::optional<StringRef> Source) {
    getMCDwarfLineTable(CUID).setRootFile(CompilationDir, Filename, Checksum,
                                          Source);
  }

  /// Reports whether MD5 checksum usage is consistent (all-or-none).
  bool isDwarfMD5UsageConsistent(unsigned CUID) const {
    return getMCDwarfLineTable(CUID).isMD5UsageConsistent();
  }

  /// Saves the information from the currently parsed dwarf .loc directive
  /// and sets DwarfLocSeen.  When the next instruction is assembled an entry
  /// in the line number table with this information and the address of the
  /// instruction will be created.
  void setCurrentDwarfLoc(unsigned FileNum, unsigned Line, unsigned Column,
                          unsigned Flags, unsigned Isa,
                          unsigned Discriminator) {
    CurrentDwarfLoc.setFileNum(FileNum);
    CurrentDwarfLoc.setLine(Line);
    CurrentDwarfLoc.setColumn(Column);
    CurrentDwarfLoc.setFlags(Flags);
    CurrentDwarfLoc.setIsa(Isa);
    CurrentDwarfLoc.setDiscriminator(Discriminator);
    DwarfLocSeen = true;
  }

  void clearDwarfLocSeen() { DwarfLocSeen = false; }

  bool getDwarfLocSeen() { return DwarfLocSeen; }
  const MCDwarfLoc &getCurrentDwarfLoc() { return CurrentDwarfLoc; }

  bool getGenDwarfForAssembly() { return GenDwarfForAssembly; }
  void setGenDwarfForAssembly(bool Value) { GenDwarfForAssembly = Value; }
  unsigned getGenDwarfFileNumber() { return GenDwarfFileNumber; }
  EmitDwarfUnwindType emitDwarfUnwindInfo() const;
  bool emitCompactUnwindNonCanonical() const;

  void setGenDwarfFileNumber(unsigned FileNumber) {
    GenDwarfFileNumber = FileNumber;
  }

  /// Specifies information about the "root file" for assembler clients
  /// (e.g., llvm-mc). Assumes compilation dir etc. have been set up.
  void setGenDwarfRootFile(StringRef FileName, StringRef Buffer);

  const SetVector<MCSection *> &getGenDwarfSectionSyms() {
    return SectionsForRanges;
  }

  bool addGenDwarfSection(MCSection *Sec) {
    return SectionsForRanges.insert(Sec);
  }

  void finalizeDwarfSections(MCStreamer &MCOS);

  const std::vector<MCGenDwarfLabelEntry> &getMCGenDwarfLabelEntries() const {
    return MCGenDwarfLabelEntries;
  }

  void addMCGenDwarfLabelEntry(const MCGenDwarfLabelEntry &E) {
    MCGenDwarfLabelEntries.push_back(E);
  }

  void setDwarfDebugFlags(StringRef S) { DwarfDebugFlags = S; }
  StringRef getDwarfDebugFlags() { return DwarfDebugFlags; }

  void setDwarfDebugProducer(StringRef S) { DwarfDebugProducer = S; }
  StringRef getDwarfDebugProducer() { return DwarfDebugProducer; }

  void setDwarfFormat(dwarf::DwarfFormat f) { DwarfFormat = f; }
  dwarf::DwarfFormat getDwarfFormat() const { return DwarfFormat; }

  void setDwarfVersion(uint16_t v) { DwarfVersion = v; }
  uint16_t getDwarfVersion() const { return DwarfVersion; }

  /// @}

  StringRef getSecureLogFile() { return SecureLogFile; }
  raw_fd_ostream *getSecureLog() { return SecureLog.get(); }

  void setSecureLog(std::unique_ptr<raw_fd_ostream> Value) {
    SecureLog = std::move(Value);
  }

  bool getSecureLogUsed() { return SecureLogUsed; }
  void setSecureLogUsed(bool Value) { SecureLogUsed = Value; }

  void *allocate(unsigned Size, unsigned Align = 8) {
    return Allocator.Allocate(Size, Align);
  }

  void deallocate(void *Ptr) {}

  bool hadError() { return HadError; }
  void diagnose(const SMDiagnostic &SMD);
  void reportError(SMLoc L, const Twine &Msg);
  void reportWarning(SMLoc L, const Twine &Msg);

  const MCAsmMacro *lookupMacro(StringRef Name) {
    StringMap<MCAsmMacro>::iterator I = MacroMap.find(Name);
    return (I == MacroMap.end()) ? nullptr : &I->getValue();
  }

  void defineMacro(StringRef Name, MCAsmMacro Macro) {
    MacroMap.insert(std::make_pair(Name, std::move(Macro)));
  }

  void undefineMacro(StringRef Name) { MacroMap.erase(Name); }

  MCPseudoProbeTable &getMCPseudoProbeTable() { return PseudoProbeTable; }
};

} // end namespace llvm

// operator new and delete aren't allowed inside namespaces.
// The throw specifications are mandated by the standard.
/// Placement new for using the MCContext's allocator.
///
/// This placement form of operator new uses the MCContext's allocator for
/// obtaining memory. It is a non-throwing new, which means that it returns
/// null on error. (If that is what the allocator does. The current does, so if
/// this ever changes, this operator will have to be changed, too.)
/// Usage looks like this (assuming there's an MCContext 'Context' in scope):
/// \code
/// // Default alignment (8)
/// IntegerLiteral *Ex = new (Context) IntegerLiteral(arguments);
/// // Specific alignment
/// IntegerLiteral *Ex2 = new (Context, 4) IntegerLiteral(arguments);
/// \endcode
/// Please note that you cannot use delete on the pointer; it must be
/// deallocated using an explicit destructor call followed by
/// \c Context.Deallocate(Ptr).
///
/// \param Bytes The number of bytes to allocate. Calculated by the compiler.
/// \param C The MCContext that provides the allocator.
/// \param Alignment The alignment of the allocated memory (if the underlying
///                  allocator supports it).
/// \return The allocated memory. Could be NULL.
inline void *operator new(size_t Bytes, llvm::MCContext &C,
                          size_t Alignment = 8) noexcept {
  return C.allocate(Bytes, Alignment);
}
/// Placement delete companion to the new above.
///
/// This operator is just a companion to the new above. There is no way of
/// invoking it directly; see the new operator for more details. This operator
/// is called implicitly by the compiler if a placement new expression using
/// the MCContext throws in the object constructor.
inline void operator delete(void *Ptr, llvm::MCContext &C, size_t) noexcept {
  C.deallocate(Ptr);
}

/// This placement form of operator new[] uses the MCContext's allocator for
/// obtaining memory. It is a non-throwing new[], which means that it returns
/// null on error.
/// Usage looks like this (assuming there's an MCContext 'Context' in scope):
/// \code
/// // Default alignment (8)
/// char *data = new (Context) char[10];
/// // Specific alignment
/// char *data = new (Context, 4) char[10];
/// \endcode
/// Please note that you cannot use delete on the pointer; it must be
/// deallocated using an explicit destructor call followed by
/// \c Context.Deallocate(Ptr).
///
/// \param Bytes The number of bytes to allocate. Calculated by the compiler.
/// \param C The MCContext that provides the allocator.
/// \param Alignment The alignment of the allocated memory (if the underlying
///                  allocator supports it).
/// \return The allocated memory. Could be NULL.
inline void *operator new[](size_t Bytes, llvm::MCContext &C,
                            size_t Alignment = 8) noexcept {
  return C.allocate(Bytes, Alignment);
}

/// Placement delete[] companion to the new[] above.
///
/// This operator is just a companion to the new[] above. There is no way of
/// invoking it directly; see the new[] operator for more details. This operator
/// is called implicitly by the compiler if a placement new[] expression using
/// the MCContext throws in the object constructor.
inline void operator delete[](void *Ptr, llvm::MCContext &C) noexcept {
  C.deallocate(Ptr);
}

#endif // LLVM_MC_MCCONTEXT_H
PKjwFZ�#ffMC/MCDecoderOps.hnu�[���//===------------ llvm/MC/MCDecoderOps.h - Decoder driver -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// Disassembler decoder state machine driver.
//===----------------------------------------------------------------------===//
#ifndef LLVM_MC_MCDECODEROPS_H
#define LLVM_MC_MCDECODEROPS_H

namespace llvm {

namespace MCD {
// Disassembler state machine opcodes.
enum DecoderOps {
  OPC_ExtractField = 1, // OPC_ExtractField(uint8_t Start, uint8_t Len)
  OPC_FilterValue,      // OPC_FilterValue(uleb128 Val, uint16_t NumToSkip)
  OPC_CheckField,       // OPC_CheckField(uint8_t Start, uint8_t Len,
                        //                uleb128 Val, uint16_t NumToSkip)
  OPC_CheckPredicate,   // OPC_CheckPredicate(uleb128 PIdx, uint16_t NumToSkip)
  OPC_Decode,           // OPC_Decode(uleb128 Opcode, uleb128 DIdx)
  OPC_TryDecode,        // OPC_TryDecode(uleb128 Opcode, uleb128 DIdx,
                        //               uint16_t NumToSkip)
  OPC_SoftFail,         // OPC_SoftFail(uleb128 PMask, uleb128 NMask)
  OPC_Fail              // OPC_Fail()
};

} // namespace MCD
} // namespace llvm

#endif
PKjwFZR��o��MC/MCAsmInfoELF.hnu�[���//===- llvm/MC/MCAsmInfoELF.h - ELF Asm info --------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCASMINFOELF_H
#define LLVM_MC_MCASMINFOELF_H

#include "llvm/MC/MCAsmInfo.h"

namespace llvm {

class MCAsmInfoELF : public MCAsmInfo {
  virtual void anchor();
  MCSection *getNonexecutableStackSection(MCContext &Ctx) const final;

protected:
  MCAsmInfoELF();
};

} // end namespace llvm

#endif // LLVM_MC_MCASMINFOELF_H
PKjwFZ�AK(hhMC/MCFixupKindInfo.hnu�[���//===-- llvm/MC/MCFixupKindInfo.h - Fixup Descriptors -----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCFIXUPKINDINFO_H
#define LLVM_MC_MCFIXUPKINDINFO_H

namespace llvm {

/// Target independent information on a fixup kind.
struct MCFixupKindInfo {
  enum FixupKindFlags {
    /// Is this fixup kind PCrelative? This is used by the assembler backend to
    /// evaluate fixup values in a target independent manner when possible.
    FKF_IsPCRel = (1 << 0),

    /// Should this fixup kind force a 4-byte aligned effective PC value?
    FKF_IsAlignedDownTo32Bits = (1 << 1),

    /// Should this fixup be evaluated in a target dependent manner?
    FKF_IsTarget = (1 << 2),

    /// This fixup kind should be resolved if defined.
    /// FIXME This is a workaround because we don't support certain ARM
    /// relocation types. This flag should eventually be removed.
    FKF_Constant = 1 << 3,
  };

  /// A target specific name for the fixup kind. The names will be unique for
  /// distinct kinds on any given target.
  const char *Name;

  /// The bit offset to write the relocation into.
  unsigned TargetOffset;

  /// The number of bits written by this fixup. The bits are assumed to be
  /// contiguous.
  unsigned TargetSize;

  /// Flags describing additional information on this fixup kind.
  unsigned Flags;
};

} // End llvm namespace

#endif
PKjwFZ/5vֻ�MC/MCInstBuilder.hnu�[���//===-- llvm/MC/MCInstBuilder.h - Simplify creation of MCInsts --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the MCInstBuilder class for convenient creation of
// MCInsts.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCINSTBUILDER_H
#define LLVM_MC_MCINSTBUILDER_H

#include "llvm/MC/MCInst.h"

namespace llvm {

class MCInstBuilder {
  MCInst Inst;

public:
  /// Create a new MCInstBuilder for an MCInst with a specific opcode.
  MCInstBuilder(unsigned Opcode) {
    Inst.setOpcode(Opcode);
  }

  /// Add a new register operand.
  MCInstBuilder &addReg(unsigned Reg) {
    Inst.addOperand(MCOperand::createReg(Reg));
    return *this;
  }

  /// Add a new integer immediate operand.
  MCInstBuilder &addImm(int64_t Val) {
    Inst.addOperand(MCOperand::createImm(Val));
    return *this;
  }

  /// Add a new single floating point immediate operand.
  MCInstBuilder &addSFPImm(uint32_t Val) {
    Inst.addOperand(MCOperand::createSFPImm(Val));
    return *this;
  }

  /// Add a new floating point immediate operand.
  MCInstBuilder &addDFPImm(uint64_t Val) {
    Inst.addOperand(MCOperand::createDFPImm(Val));
    return *this;
  }

  /// Add a new MCExpr operand.
  MCInstBuilder &addExpr(const MCExpr *Val) {
    Inst.addOperand(MCOperand::createExpr(Val));
    return *this;
  }

  /// Add a new MCInst operand.
  MCInstBuilder &addInst(const MCInst *Val) {
    Inst.addOperand(MCOperand::createInst(Val));
    return *this;
  }

  /// Add an operand.
  MCInstBuilder &addOperand(const MCOperand &Op) {
    Inst.addOperand(Op);
    return *this;
  }

  operator MCInst&() {
    return Inst;
  }
};

} // end namespace llvm

#endif
PKjwFZp�RSSMC/MCDXContainerStreamer.hnu�[���//===- MCDXContainerStreamer.h - MCDXContainerStreamer Interface ---*- C++ ===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Overrides MCObjectStreamer to disable all unnecessary features with stubs.
// The DXContainer format isn't a fully featured object format. It doesn't
// support symbols, and initially it will not support instruction data since it
// is used as a bitcode container for DXIL.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCDXCONTAINERSTREAMER_H
#define LLVM_MC_MCDXCONTAINERSTREAMER_H

#include "llvm/MC/MCAsmBackend.h"
#include "llvm/MC/MCCodeEmitter.h"
#include "llvm/MC/MCObjectStreamer.h"
#include "llvm/MC/MCObjectWriter.h"

namespace llvm {
class MCInst;
class raw_ostream;

class MCDXContainerStreamer : public MCObjectStreamer {
public:
  MCDXContainerStreamer(MCContext &Context, std::unique_ptr<MCAsmBackend> TAB,
                        std::unique_ptr<MCObjectWriter> OW,
                        std::unique_ptr<MCCodeEmitter> Emitter)
      : MCObjectStreamer(Context, std::move(TAB), std::move(OW),
                         std::move(Emitter)) {}

  bool emitSymbolAttribute(MCSymbol *, MCSymbolAttr) override { return false; }
  void emitCommonSymbol(MCSymbol *, uint64_t, Align) override {}
  void emitZerofill(MCSection *, MCSymbol *Symbol = nullptr, uint64_t Size = 0,
                    Align ByteAlignment = Align(1),
                    SMLoc Loc = SMLoc()) override {}

private:
  void emitInstToData(const MCInst &, const MCSubtargetInfo &) override;
};

} // end namespace llvm

#endif // LLVM_MC_MCDXCONTAINERSTREAMER_H
PKjwFZ���W��MC/DXContainerPSVInfo.hnu�[���//===- llvm/MC/DXContainerPSVInfo.h - DXContainer PSVInfo -*- C++ -------*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_DXCONTAINERPSVINFO_H
#define LLVM_MC_DXCONTAINERPSVINFO_H

#include "llvm/BinaryFormat/DXContainer.h"
#include "llvm/TargetParser/Triple.h"

#include <numeric>
#include <stdint.h>
#include <vector>

namespace llvm {

class raw_ostream;

namespace mcdxbc {
// This data structure is a helper for reading and writing PSV RuntimeInfo data.
// It is implemented in the BinaryFormat library so that it can be used by both
// the MC layer and Object tools.
// This structure is used to represent the extracted data in an inspectable and
// modifiable format, and can be used to serialize the data back into valid PSV
// RuntimeInfo.
struct PSVRuntimeInfo {
  dxbc::PSV::v2::RuntimeInfo BaseData;
  std::vector<dxbc::PSV::v2::ResourceBindInfo> Resources;

  // Serialize PSVInfo into the provided raw_ostream. The version field
  // specifies the data version to encode, the default value specifies encoding
  // the highest supported version.
  void write(raw_ostream &OS,
             uint32_t Version = std::numeric_limits<uint32_t>::max()) const;

  void swapBytes(Triple::EnvironmentType Stage) {
    BaseData.swapBytes();
    BaseData.swapBytes(Stage);
    for (auto &Res : Resources)
      Res.swapBytes();
  }
};

} // namespace mcdxbc
} // namespace llvm

#endif // LLVM_MC_DXCONTAINERPSVINFO_H
PKjwFZ�v�<�<MC/MCPseudoProbe.hnu�[���//===- MCPseudoProbe.h - Pseudo probe encoding support ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the declaration of the MCPseudoProbe to support the pseudo
// probe encoding for AutoFDO. Pseudo probes together with their inline context
// are encoded in a DFS recursive way in the .pseudoprobe sections. For each
// .pseudoprobe section, the encoded binary data consist of a single or mutiple
// function records each for one outlined function. A function record has the
// following format :
//
// FUNCTION BODY (one for each outlined function present in the text section)
//    GUID (uint64)
//        GUID of the function's source name which may be different from the
//        actual binary linkage name. This GUID will be used to decode and
//        generate a profile against the source function name.
//    NPROBES (ULEB128)
//        Number of probes originating from this function.
//    NUM_INLINED_FUNCTIONS (ULEB128)
//        Number of callees inlined into this function, aka number of
//        first-level inlinees
//    PROBE RECORDS
//        A list of NPROBES entries. Each entry contains:
//          INDEX (ULEB128)
//          TYPE (uint4)
//            0 - block probe, 1 - indirect call, 2 - direct call
//          ATTRIBUTE (uint3)
//            1 - reserved
//            2 - Sentinel
//            4 - HasDiscriminator
//          ADDRESS_TYPE (uint1)
//            0 - code address for regular probes (for downwards compatibility)
//              - GUID of linkage name for sentinel probes
//            1 - address delta
//          CODE_ADDRESS (uint64 or ULEB128)
//            code address or address delta, depending on ADDRESS_TYPE
//          DISCRIMINATOR (ULEB128) if HasDiscriminator
//    INLINED FUNCTION RECORDS
//        A list of NUM_INLINED_FUNCTIONS entries describing each of the inlined
//        callees.  Each record contains:
//          INLINE SITE
//            ID of the callsite probe (ULEB128)
//          FUNCTION BODY
//            A FUNCTION BODY entry describing the inlined function.
//
// TODO: retire the ADDRESS_TYPE encoding for code addresses once compatibility
// is no longer an issue.
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCPSEUDOPROBE_H
#define LLVM_MC_MCPSEUDOPROBE_H

#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/IR/PseudoProbe.h"
#include "llvm/Support/ErrorOr.h"
#include <list>
#include <map>
#include <memory>
#include <string>
#include <tuple>
#include <type_traits>
#include <unordered_map>
#include <unordered_set>
#include <vector>

namespace llvm {

class MCSymbol;
class MCObjectStreamer;
class raw_ostream;

enum class MCPseudoProbeFlag {
  // If set, indicates that the probe is encoded as an address delta
  // instead of a real code address.
  AddressDelta = 0x1,
};

// Function descriptor decoded from .pseudo_probe_desc section
struct MCPseudoProbeFuncDesc {
  uint64_t FuncGUID = 0;
  uint64_t FuncHash = 0;
  std::string FuncName;

  MCPseudoProbeFuncDesc(uint64_t GUID, uint64_t Hash, StringRef Name)
      : FuncGUID(GUID), FuncHash(Hash), FuncName(Name){};

  void print(raw_ostream &OS);
};

class MCDecodedPseudoProbe;

// An inline frame has the form <CalleeGuid, ProbeID>
using InlineSite = std::tuple<uint64_t, uint32_t>;
using MCPseudoProbeInlineStack = SmallVector<InlineSite, 8>;
// GUID to PseudoProbeFuncDesc map
using GUIDProbeFunctionMap =
    std::unordered_map<uint64_t, MCPseudoProbeFuncDesc>;
// Address to pseudo probes map.
using AddressProbesMap =
    std::unordered_map<uint64_t, std::list<MCDecodedPseudoProbe>>;

class MCDecodedPseudoProbeInlineTree;

class MCPseudoProbeBase {
protected:
  uint64_t Guid;
  uint64_t Index;
  uint32_t Discriminator;
  uint8_t Attributes;
  uint8_t Type;
  // The value should be equal to PseudoProbeReservedId::Last + 1 which is
  // defined in SampleProfileProbe.h. The header file is not included here to
  // reduce the dependency from MC to IPO.
  const static uint32_t PseudoProbeFirstId = 1;

public:
  MCPseudoProbeBase(uint64_t G, uint64_t I, uint64_t At, uint8_t T, uint32_t D)
      : Guid(G), Index(I), Discriminator(D), Attributes(At), Type(T) {}

  bool isEntry() const { return Index == PseudoProbeFirstId; }

  uint64_t getGuid() const { return Guid; }

  uint64_t getIndex() const { return Index; }

  uint32_t getDiscriminator() const { return Discriminator; }

  uint8_t getAttributes() const { return Attributes; }

  uint8_t getType() const { return Type; }

  bool isBlock() const {
    return Type == static_cast<uint8_t>(PseudoProbeType::Block);
  }

  bool isIndirectCall() const {
    return Type == static_cast<uint8_t>(PseudoProbeType::IndirectCall);
  }

  bool isDirectCall() const {
    return Type == static_cast<uint8_t>(PseudoProbeType::DirectCall);
  }

  bool isCall() const { return isIndirectCall() || isDirectCall(); }

  void setAttributes(uint8_t Attr) { Attributes = Attr; }
};

/// Instances of this class represent a pseudo probe instance for a pseudo probe
/// table entry, which is created during a machine instruction is assembled and
/// uses an address from a temporary label created at the current address in the
/// current section.
class MCPseudoProbe : public MCPseudoProbeBase {
  MCSymbol *Label;

public:
  MCPseudoProbe(MCSymbol *Label, uint64_t Guid, uint64_t Index, uint64_t Type,
                uint64_t Attributes, uint32_t Discriminator)
      : MCPseudoProbeBase(Guid, Index, Attributes, Type, Discriminator),
        Label(Label) {
    assert(Type <= 0xFF && "Probe type too big to encode, exceeding 2^8");
    assert(Attributes <= 0xFF &&
           "Probe attributes too big to encode, exceeding 2^16");
  }

  MCSymbol *getLabel() const { return Label; }
  void emit(MCObjectStreamer *MCOS, const MCPseudoProbe *LastProbe) const;
};

// Represents a callsite with caller function name and probe id
using MCPseduoProbeFrameLocation = std::pair<StringRef, uint32_t>;

class MCDecodedPseudoProbe : public MCPseudoProbeBase {
  uint64_t Address;
  MCDecodedPseudoProbeInlineTree *InlineTree;

public:
  MCDecodedPseudoProbe(uint64_t Ad, uint64_t G, uint32_t I, PseudoProbeType K,
                       uint8_t At, uint32_t D,
                       MCDecodedPseudoProbeInlineTree *Tree)
      : MCPseudoProbeBase(G, I, At, static_cast<uint8_t>(K), D), Address(Ad),
        InlineTree(Tree){};

  uint64_t getAddress() const { return Address; }

  void setAddress(uint64_t Addr) { Address = Addr; }

  MCDecodedPseudoProbeInlineTree *getInlineTreeNode() const {
    return InlineTree;
  }

  // Get the inlined context by traversing current inline tree backwards,
  // each tree node has its InlineSite which is taken as the context.
  // \p ContextStack is populated in root to leaf order
  void
  getInlineContext(SmallVectorImpl<MCPseduoProbeFrameLocation> &ContextStack,
                   const GUIDProbeFunctionMap &GUID2FuncMAP) const;

  // Helper function to get the string from context stack
  std::string
  getInlineContextStr(const GUIDProbeFunctionMap &GUID2FuncMAP) const;

  // Print pseudo probe while disassembling
  void print(raw_ostream &OS, const GUIDProbeFunctionMap &GUID2FuncMAP,
             bool ShowName) const;
};

template <typename ProbeType, typename DerivedProbeInlineTreeType>
class MCPseudoProbeInlineTreeBase {
  struct InlineSiteHash {
    uint64_t operator()(const InlineSite &Site) const {
      return std::get<0>(Site) ^ std::get<1>(Site);
    }
  };

protected:
  // Track children (e.g. inlinees) of current context
  using InlinedProbeTreeMap = std::unordered_map<
      InlineSite, std::unique_ptr<DerivedProbeInlineTreeType>, InlineSiteHash>;
  InlinedProbeTreeMap Children;
  // Set of probes that come with the function.
  std::vector<ProbeType> Probes;
  MCPseudoProbeInlineTreeBase() {
    static_assert(std::is_base_of<MCPseudoProbeInlineTreeBase,
                                  DerivedProbeInlineTreeType>::value,
                  "DerivedProbeInlineTreeType must be subclass of "
                  "MCPseudoProbeInlineTreeBase");
  }

public:
  uint64_t Guid = 0;

  // Root node has a GUID 0.
  bool isRoot() const { return Guid == 0; }
  InlinedProbeTreeMap &getChildren() { return Children; }
  const InlinedProbeTreeMap &getChildren() const { return Children; }
  std::vector<ProbeType> &getProbes() { return Probes; }
  void addProbes(ProbeType Probe) { Probes.push_back(Probe); }
  // Caller node of the inline site
  MCPseudoProbeInlineTreeBase<ProbeType, DerivedProbeInlineTreeType> *Parent =
      nullptr;
  DerivedProbeInlineTreeType *getOrAddNode(const InlineSite &Site) {
    auto Ret = Children.emplace(
        Site, std::make_unique<DerivedProbeInlineTreeType>(Site));
    Ret.first->second->Parent = this;
    return Ret.first->second.get();
  };
};

// A Tri-tree based data structure to group probes by inline stack.
// A tree is allocated for a standalone .text section. A fake
// instance is created as the root of a tree.
// A real instance of this class is created for each function, either a
// not inlined function that has code in .text section or an inlined function.
class MCPseudoProbeInlineTree
    : public MCPseudoProbeInlineTreeBase<MCPseudoProbe,
                                         MCPseudoProbeInlineTree> {
public:
  MCPseudoProbeInlineTree() = default;
  MCPseudoProbeInlineTree(uint64_t Guid) { this->Guid = Guid; }
  MCPseudoProbeInlineTree(const InlineSite &Site) {
    this->Guid = std::get<0>(Site);
  }

  // MCPseudoProbeInlineTree method based on Inlinees
  void addPseudoProbe(const MCPseudoProbe &Probe,
                      const MCPseudoProbeInlineStack &InlineStack);
  void emit(MCObjectStreamer *MCOS, const MCPseudoProbe *&LastProbe);
};

// inline tree node for the decoded pseudo probe
class MCDecodedPseudoProbeInlineTree
    : public MCPseudoProbeInlineTreeBase<MCDecodedPseudoProbe *,
                                         MCDecodedPseudoProbeInlineTree> {
public:
  InlineSite ISite;
  // Used for decoding
  uint32_t ChildrenToProcess = 0;

  MCDecodedPseudoProbeInlineTree() = default;
  MCDecodedPseudoProbeInlineTree(const InlineSite &Site) : ISite(Site){};

  // Return false if it's a dummy inline site
  bool hasInlineSite() const { return !isRoot() && !Parent->isRoot(); }
};

/// Instances of this class represent the pseudo probes inserted into a compile
/// unit.
class MCPseudoProbeSections {
public:
  void addPseudoProbe(MCSymbol *FuncSym, const MCPseudoProbe &Probe,
                      const MCPseudoProbeInlineStack &InlineStack) {
    MCProbeDivisions[FuncSym].addPseudoProbe(Probe, InlineStack);
  }

  // TODO: Sort by getOrdinal to ensure a determinstic section order
  using MCProbeDivisionMap = std::map<MCSymbol *, MCPseudoProbeInlineTree>;

private:
  // A collection of MCPseudoProbe for each function. The MCPseudoProbes are
  // grouped by GUIDs due to inlining that can bring probes from different
  // functions into one function.
  MCProbeDivisionMap MCProbeDivisions;

public:
  const MCProbeDivisionMap &getMCProbes() const { return MCProbeDivisions; }

  bool empty() const { return MCProbeDivisions.empty(); }

  void emit(MCObjectStreamer *MCOS);
};

class MCPseudoProbeTable {
  // A collection of MCPseudoProbe in the current module grouped by
  // functions. MCPseudoProbes will be encoded into a corresponding
  // .pseudoprobe section. With functions emitted as separate comdats,
  // a text section really only contains the code of a function solely, and the
  // probes associated with the text section will be emitted into a standalone
  // .pseudoprobe section that shares the same comdat group with the function.
  MCPseudoProbeSections MCProbeSections;

public:
  static void emit(MCObjectStreamer *MCOS);

  MCPseudoProbeSections &getProbeSections() { return MCProbeSections; }

#ifndef NDEBUG
  static int DdgPrintIndent;
#endif
};

class MCPseudoProbeDecoder {
  // GUID to PseudoProbeFuncDesc map.
  GUIDProbeFunctionMap GUID2FuncDescMap;

  // Address to probes map.
  AddressProbesMap Address2ProbesMap;

  // The dummy root of the inline trie, all the outlined function will directly
  // be the children of the dummy root, all the inlined function will be the
  // children of its inlineer. So the relation would be like:
  // DummyRoot --> OutlinedFunc --> InlinedFunc1 --> InlinedFunc2
  MCDecodedPseudoProbeInlineTree DummyInlineRoot;

  /// Points to the current location in the buffer.
  const uint8_t *Data = nullptr;

  /// Points to the end of the buffer.
  const uint8_t *End = nullptr;

  /// Whether encoding is based on a starting probe with absolute code address.
  bool EncodingIsAddrBased = false;

  // Decoding helper function
  template <typename T> ErrorOr<T> readUnencodedNumber();
  template <typename T> ErrorOr<T> readUnsignedNumber();
  template <typename T> ErrorOr<T> readSignedNumber();
  ErrorOr<StringRef> readString(uint32_t Size);

public:
  using Uint64Set = DenseSet<uint64_t>;
  using Uint64Map = DenseMap<uint64_t, uint64_t>;

  // Decode pseudo_probe_desc section to build GUID to PseudoProbeFuncDesc map.
  bool buildGUID2FuncDescMap(const uint8_t *Start, std::size_t Size);

  // Decode pseudo_probe section to build address to probes map for specifed
  // functions only.
  bool buildAddress2ProbeMap(const uint8_t *Start, std::size_t Size,
                             const Uint64Set &GuildFilter,
                             const Uint64Map &FuncStartAddrs);

  bool buildAddress2ProbeMap(MCDecodedPseudoProbeInlineTree *Cur,
                             uint64_t &LastAddr, const Uint64Set &GuildFilter,
                             const Uint64Map &FuncStartAddrs);

  // Print pseudo_probe_desc section info
  void printGUID2FuncDescMap(raw_ostream &OS);

  // Print pseudo_probe section info, used along with show-disassembly
  void printProbeForAddress(raw_ostream &OS, uint64_t Address);

  // do printProbeForAddress for all addresses
  void printProbesForAllAddresses(raw_ostream &OS);

  // Look up the probe of a call for the input address
  const MCDecodedPseudoProbe *getCallProbeForAddr(uint64_t Address) const;

  const MCPseudoProbeFuncDesc *getFuncDescForGUID(uint64_t GUID) const;

  // Helper function to populate one probe's inline stack into
  // \p InlineContextStack.
  // Current leaf location info will be added if IncludeLeaf is true
  // Example:
  //  Current probe(bar:3) inlined at foo:2 then inlined at main:1
  //  IncludeLeaf = true,  Output: [main:1, foo:2, bar:3]
  //  IncludeLeaf = false, Output: [main:1, foo:2]
  void getInlineContextForProbe(
      const MCDecodedPseudoProbe *Probe,
      SmallVectorImpl<MCPseduoProbeFrameLocation> &InlineContextStack,
      bool IncludeLeaf) const;

  const AddressProbesMap &getAddress2ProbesMap() const {
    return Address2ProbesMap;
  }

  AddressProbesMap &getAddress2ProbesMap() { return Address2ProbesMap; }

  const GUIDProbeFunctionMap &getGUID2FuncDescMap() const {
    return GUID2FuncDescMap;
  }

  const MCPseudoProbeFuncDesc *
  getInlinerDescForProbe(const MCDecodedPseudoProbe *Probe) const;

  const MCDecodedPseudoProbeInlineTree &getDummyInlineRoot() const {
    return DummyInlineRoot;
  }
};

} // end namespace llvm

#endif // LLVM_MC_MCPSEUDOPROBE_H
PKjwFZ��@/'/'MC/MCObjectStreamer.hnu�[���//===- MCObjectStreamer.h - MCStreamer Object File Interface ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCOBJECTSTREAMER_H
#define LLVM_MC_MCOBJECTSTREAMER_H

#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/MC/MCFixup.h"
#include "llvm/MC/MCFragment.h"
#include "llvm/MC/MCSection.h"
#include "llvm/MC/MCStreamer.h"

namespace llvm {
class MCContext;
class MCInst;
class MCObjectWriter;
class MCSymbol;
struct MCDwarfFrameInfo;
class MCAssembler;
class MCCodeEmitter;
class MCSubtargetInfo;
class MCExpr;
class MCFragment;
class MCDataFragment;
class MCAsmBackend;
class raw_ostream;
class raw_pwrite_stream;

/// Streaming object file generation interface.
///
/// This class provides an implementation of the MCStreamer interface which is
/// suitable for use with the assembler backend. Specific object file formats
/// are expected to subclass this interface to implement directives specific
/// to that file format or custom semantics expected by the object writer
/// implementation.
class MCObjectStreamer : public MCStreamer {
  std::unique_ptr<MCAssembler> Assembler;
  MCSection::iterator CurInsertionPoint;
  bool EmitEHFrame;
  bool EmitDebugFrame;
  SmallVector<MCSymbol *, 2> PendingLabels;
  SmallSetVector<MCSection *, 4> PendingLabelSections;
  unsigned CurSubsectionIdx = 0;
  struct PendingMCFixup {
    const MCSymbol *Sym;
    MCFixup Fixup;
    MCDataFragment *DF;
    PendingMCFixup(const MCSymbol *McSym, MCDataFragment *F, MCFixup McFixup)
        : Sym(McSym), Fixup(McFixup), DF(F) {}
  };
  SmallVector<PendingMCFixup, 2> PendingFixups;

  struct PendingAssignment {
    MCSymbol *Symbol;
    const MCExpr *Value;
  };

  /// A list of conditional assignments we may need to emit if the target
  /// symbol is later emitted.
  DenseMap<const MCSymbol *, SmallVector<PendingAssignment, 1>>
      pendingAssignments;

  virtual void emitInstToData(const MCInst &Inst, const MCSubtargetInfo&) = 0;
  void emitCFIStartProcImpl(MCDwarfFrameInfo &Frame) override;
  void emitCFIEndProcImpl(MCDwarfFrameInfo &Frame) override;
  MCSymbol *emitCFILabel() override;
  void emitInstructionImpl(const MCInst &Inst, const MCSubtargetInfo &STI);
  void resolvePendingFixups();

protected:
  MCObjectStreamer(MCContext &Context, std::unique_ptr<MCAsmBackend> TAB,
                   std::unique_ptr<MCObjectWriter> OW,
                   std::unique_ptr<MCCodeEmitter> Emitter);
  ~MCObjectStreamer();

public:
  /// state management
  void reset() override;

  /// Object streamers require the integrated assembler.
  bool isIntegratedAssemblerRequired() const override { return true; }

  void emitFrames(MCAsmBackend *MAB);
  void emitCFISections(bool EH, bool Debug) override;

  MCFragment *getCurrentFragment() const;

  void insert(MCFragment *F) {
    flushPendingLabels(F);
    MCSection *CurSection = getCurrentSectionOnly();
    CurSection->getFragmentList().insert(CurInsertionPoint, F);
    F->setParent(CurSection);
  }

  /// Get a data fragment to write into, creating a new one if the current
  /// fragment is not a data fragment.
  /// Optionally a \p STI can be passed in so that a new fragment is created
  /// if the Subtarget differs from the current fragment.
  MCDataFragment *getOrCreateDataFragment(const MCSubtargetInfo* STI = nullptr);

protected:
  bool changeSectionImpl(MCSection *Section, const MCExpr *Subsection);

  /// Assign a label to the current Section and Subsection even though a
  /// fragment is not yet present. Use flushPendingLabels(F) to associate
  /// a fragment with this label.
  void addPendingLabel(MCSymbol* label);

  /// If any labels have been emitted but not assigned fragments in the current
  /// Section and Subsection, ensure that they get assigned to fragment F.
  /// Optionally, one can provide an offset \p FOffset as a symbol offset within
  /// the fragment.
  void flushPendingLabels(MCFragment *F, uint64_t FOffset = 0);

public:
  void visitUsedSymbol(const MCSymbol &Sym) override;

  /// Create a data fragment for any pending labels across all Sections
  /// and Subsections.
  void flushPendingLabels();

  MCAssembler &getAssembler() { return *Assembler; }
  MCAssembler *getAssemblerPtr() override;
  /// \name MCStreamer Interface
  /// @{

  void emitLabel(MCSymbol *Symbol, SMLoc Loc = SMLoc()) override;
  virtual void emitLabelAtPos(MCSymbol *Symbol, SMLoc Loc, MCFragment *F,
                              uint64_t Offset);
  void emitAssignment(MCSymbol *Symbol, const MCExpr *Value) override;
  void emitConditionalAssignment(MCSymbol *Symbol,
                                 const MCExpr *Value) override;
  void emitValueImpl(const MCExpr *Value, unsigned Size,
                     SMLoc Loc = SMLoc()) override;
  void emitULEB128Value(const MCExpr *Value) override;
  void emitSLEB128Value(const MCExpr *Value) override;
  void emitWeakReference(MCSymbol *Alias, const MCSymbol *Symbol) override;
  void changeSection(MCSection *Section, const MCExpr *Subsection) override;
  void emitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI) override;

  /// Emit an instruction to a special fragment, because this instruction
  /// can change its size during relaxation.
  virtual void emitInstToFragment(const MCInst &Inst, const MCSubtargetInfo &);

  void emitBundleAlignMode(Align Alignment) override;
  void emitBundleLock(bool AlignToEnd) override;
  void emitBundleUnlock() override;
  void emitBytes(StringRef Data) override;
  void emitValueToAlignment(Align Alignment, int64_t Value = 0,
                            unsigned ValueSize = 1,
                            unsigned MaxBytesToEmit = 0) override;
  void emitCodeAlignment(Align ByteAlignment, const MCSubtargetInfo *STI,
                         unsigned MaxBytesToEmit = 0) override;
  void emitValueToOffset(const MCExpr *Offset, unsigned char Value,
                         SMLoc Loc) override;
  void emitDwarfLocDirective(unsigned FileNo, unsigned Line, unsigned Column,
                             unsigned Flags, unsigned Isa,
                             unsigned Discriminator,
                             StringRef FileName) override;
  void emitDwarfAdvanceLineAddr(int64_t LineDelta, const MCSymbol *LastLabel,
                                const MCSymbol *Label,
                                unsigned PointerSize) override;
  void emitDwarfLineEndEntry(MCSection *Section, MCSymbol *LastLabel) override;
  void emitDwarfAdvanceFrameAddr(const MCSymbol *LastLabel,
                                 const MCSymbol *Label, SMLoc Loc);
  void emitCVLocDirective(unsigned FunctionId, unsigned FileNo, unsigned Line,
                          unsigned Column, bool PrologueEnd, bool IsStmt,
                          StringRef FileName, SMLoc Loc) override;
  void emitCVLinetableDirective(unsigned FunctionId, const MCSymbol *Begin,
                                const MCSymbol *End) override;
  void emitCVInlineLinetableDirective(unsigned PrimaryFunctionId,
                                      unsigned SourceFileId,
                                      unsigned SourceLineNum,
                                      const MCSymbol *FnStartSym,
                                      const MCSymbol *FnEndSym) override;
  void emitCVDefRangeDirective(
      ArrayRef<std::pair<const MCSymbol *, const MCSymbol *>> Ranges,
      StringRef FixedSizePortion) override;
  void emitCVStringTableDirective() override;
  void emitCVFileChecksumsDirective() override;
  void emitCVFileChecksumOffsetDirective(unsigned FileNo) override;
  void emitDTPRel32Value(const MCExpr *Value) override;
  void emitDTPRel64Value(const MCExpr *Value) override;
  void emitTPRel32Value(const MCExpr *Value) override;
  void emitTPRel64Value(const MCExpr *Value) override;
  void emitGPRel32Value(const MCExpr *Value) override;
  void emitGPRel64Value(const MCExpr *Value) override;
  std::optional<std::pair<bool, std::string>>
  emitRelocDirective(const MCExpr &Offset, StringRef Name, const MCExpr *Expr,
                     SMLoc Loc, const MCSubtargetInfo &STI) override;
  using MCStreamer::emitFill;
  void emitFill(const MCExpr &NumBytes, uint64_t FillValue,
                SMLoc Loc = SMLoc()) override;
  void emitFill(const MCExpr &NumValues, int64_t Size, int64_t Expr,
                SMLoc Loc = SMLoc()) override;
  void emitNops(int64_t NumBytes, int64_t ControlledNopLength, SMLoc Loc,
                const MCSubtargetInfo &STI) override;
  void emitFileDirective(StringRef Filename) override;
  void emitFileDirective(StringRef Filename, StringRef CompilerVerion,
                         StringRef TimeStamp, StringRef Description) override;

  void emitAddrsig() override;
  void emitAddrsigSym(const MCSymbol *Sym) override;

  void finishImpl() override;

  /// Emit the absolute difference between two symbols if possible.
  ///
  /// Emit the absolute difference between \c Hi and \c Lo, as long as we can
  /// compute it.  Currently, that requires that both symbols are in the same
  /// data fragment and that the target has not specified that diff expressions
  /// require relocations to be emitted. Otherwise, do nothing and return
  /// \c false.
  ///
  /// \pre Offset of \c Hi is greater than the offset \c Lo.
  void emitAbsoluteSymbolDiff(const MCSymbol *Hi, const MCSymbol *Lo,
                              unsigned Size) override;

  void emitAbsoluteSymbolDiffAsULEB128(const MCSymbol *Hi,
                                       const MCSymbol *Lo) override;

  bool mayHaveInstructions(MCSection &Sec) const override;

  /// Emits pending conditional assignments that depend on \p Symbol
  /// being emitted.
  void emitPendingAssignments(MCSymbol *Symbol);
};

} // end namespace llvm

#endif
PKjwFZD1��/
/
MC/MCDirectives.hnu�[���//===- MCDirectives.h - Enums for directives on various targets -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines various enums that represent target-specific directives.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCDIRECTIVES_H
#define LLVM_MC_MCDIRECTIVES_H

namespace llvm {

enum MCSymbolAttr {
  MCSA_Invalid = 0, ///< Not a valid directive.

  // Various directives in alphabetical order.
  MCSA_Cold,                    ///< .cold (MachO)
  MCSA_ELF_TypeFunction,        ///< .type _foo, STT_FUNC  # aka @function
  MCSA_ELF_TypeIndFunction,     ///< .type _foo, STT_GNU_IFUNC
  MCSA_ELF_TypeObject,          ///< .type _foo, STT_OBJECT  # aka @object
  MCSA_ELF_TypeTLS,             ///< .type _foo, STT_TLS     # aka @tls_object
  MCSA_ELF_TypeCommon,          ///< .type _foo, STT_COMMON  # aka @common
  MCSA_ELF_TypeNoType,          ///< .type _foo, STT_NOTYPE  # aka @notype
  MCSA_ELF_TypeGnuUniqueObject, /// .type _foo, @gnu_unique_object
  MCSA_Global,                  ///< .globl
  MCSA_LGlobal,                 ///< .lglobl (XCOFF)
  MCSA_Extern,                  ///< .extern (XCOFF)
  MCSA_Hidden,                  ///< .hidden (ELF)
  MCSA_Exported,                ///< .globl _foo, exported (XCOFF)
  MCSA_IndirectSymbol,          ///< .indirect_symbol (MachO)
  MCSA_Internal,                ///< .internal (ELF)
  MCSA_LazyReference,           ///< .lazy_reference (MachO)
  MCSA_Local,                   ///< .local (ELF)
  MCSA_NoDeadStrip,             ///< .no_dead_strip (MachO)
  MCSA_SymbolResolver,          ///< .symbol_resolver (MachO)
  MCSA_AltEntry,                ///< .alt_entry (MachO)
  MCSA_PrivateExtern,           ///< .private_extern (MachO)
  MCSA_Protected,               ///< .protected (ELF)
  MCSA_Reference,               ///< .reference (MachO)
  MCSA_Weak,                    ///< .weak
  MCSA_WeakDefinition,          ///< .weak_definition (MachO)
  MCSA_WeakReference,           ///< .weak_reference (MachO)
  MCSA_WeakDefAutoPrivate,      ///< .weak_def_can_be_hidden (MachO)
  MCSA_WeakAntiDep,             ///< .weak_anti_dep (COFF)
  MCSA_Memtag,                  ///< .memtag (ELF)
};

enum MCAssemblerFlag {
  MCAF_SyntaxUnified,         ///< .syntax (ARM/ELF)
  MCAF_SubsectionsViaSymbols, ///< .subsections_via_symbols (MachO)
  MCAF_Code16,                ///< .code16 (X86) / .code 16 (ARM)
  MCAF_Code32,                ///< .code32 (X86) / .code 32 (ARM)
  MCAF_Code64                 ///< .code64 (X86)
};

enum MCDataRegionType {
  MCDR_DataRegion,            ///< .data_region
  MCDR_DataRegionJT8,         ///< .data_region jt8
  MCDR_DataRegionJT16,        ///< .data_region jt16
  MCDR_DataRegionJT32,        ///< .data_region jt32
  MCDR_DataRegionEnd          ///< .end_data_region
};

enum MCVersionMinType {
  MCVM_IOSVersionMin,         ///< .ios_version_min
  MCVM_OSXVersionMin,         ///< .macosx_version_min
  MCVM_TvOSVersionMin,        ///< .tvos_version_min
  MCVM_WatchOSVersionMin,     ///< .watchos_version_min
};

} // end namespace llvm

#endif
PKjwFZ��&MC/MCAsmInfoXCOFF.hnu�[���//===- MCAsmInfoXCOFF.h - XCOFF asm properties ----------------- *- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCASMINFOXCOFF_H
#define LLVM_MC_MCASMINFOXCOFF_H

#include "llvm/MC/MCAsmInfo.h"

namespace llvm {

class MCAsmInfoXCOFF : public MCAsmInfo {
  virtual void anchor();

protected:
  MCAsmInfoXCOFF();

public:
  // Return true only when C is an acceptable character inside a
  // MCSymbolXCOFF.
  bool isAcceptableChar(char C) const override;
};

} // end namespace llvm

#endif // LLVM_MC_MCASMINFOXCOFF_H
PKjwFZ���h��MC/MCSymbolXCOFF.hnu�[���//===- MCSymbolXCOFF.h -  ----------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_MC_MCSYMBOLXCOFF_H
#define LLVM_MC_MCSYMBOLXCOFF_H

#include "llvm/ADT/StringRef.h"
#include "llvm/BinaryFormat/XCOFF.h"
#include "llvm/MC/MCSymbol.h"

namespace llvm {

class MCSectionXCOFF;

class MCSymbolXCOFF : public MCSymbol {
public:
  MCSymbolXCOFF(const StringMapEntry<bool> *Name, bool isTemporary)
      : MCSymbol(SymbolKindXCOFF, Name, isTemporary) {}

  static bool classof(const MCSymbol *S) { return S->isXCOFF(); }

  static StringRef getUnqualifiedName(StringRef Name) {
    if (Name.back() == ']') {
      StringRef Lhs, Rhs;
      std::tie(Lhs, Rhs) = Name.rsplit('[');
      assert(!Rhs.empty() && "Invalid SMC format in XCOFF symbol.");
      return Lhs;
    }
    return Name;
  }

  void setStorageClass(XCOFF::StorageClass SC) {
    StorageClass = SC;
  };

  XCOFF::StorageClass getStorageClass() const {
    assert(StorageClass && "StorageClass not set on XCOFF MCSymbol.");
    return *StorageClass;
  }

  StringRef getUnqualifiedName() const { return getUnqualifiedName(getName()); }

  MCSectionXCOFF *getRepresentedCsect() const;

  void setRepresentedCsect(MCSectionXCOFF *C);

  void setVisibilityType(XCOFF::VisibilityType SVT) { VisibilityType = SVT; };

  XCOFF::VisibilityType getVisibilityType() const { return VisibilityType; }

  bool hasRename() const { return !SymbolTableName.empty(); }

  void setSymbolTableName(StringRef STN) { SymbolTableName = STN; }

  StringRef getSymbolTableName() const {
    if (hasRename())
      return SymbolTableName;
    return getUnqualifiedName();
  }

private:
  std::optional<XCOFF::StorageClass> StorageClass;
  MCSectionXCOFF *RepresentedCsect = nullptr;
  XCOFF::VisibilityType VisibilityType = XCOFF::SYM_V_UNSPECIFIED;
  StringRef SymbolTableName;
};

} // end namespace llvm

#endif // LLVM_MC_MCSYMBOLXCOFF_H
PKjwFZV
C���MC/MCLabel.hnu�[���//===- MCLabel.h - Machine Code Directional Local Labels --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the declaration of the MCLabel class.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCLABEL_H
#define LLVM_MC_MCLABEL_H

namespace llvm {

class raw_ostream;

/// Instances of this class represent a label name in the MC file,
/// and MCLabel are created and uniqued by the MCContext class.  MCLabel
/// should only be constructed for valid instances in the object file.
class MCLabel {
  // The instance number of this Directional Local Label.
  unsigned Instance;

private: // MCContext creates and uniques these.
  friend class MCContext;

  MCLabel(unsigned instance) : Instance(instance) {}

public:
  MCLabel(const MCLabel &) = delete;
  MCLabel &operator=(const MCLabel &) = delete;

  /// Get the current instance of this Directional Local Label.
  unsigned getInstance() const { return Instance; }

  /// Increment the current instance of this Directional Local Label.
  unsigned incInstance() { return ++Instance; }

  /// Print the value to the stream \p OS.
  void print(raw_ostream &OS) const;

  /// Print the value to stderr.
  void dump() const;
};

inline raw_ostream &operator<<(raw_ostream &OS, const MCLabel &Label) {
  Label.print(OS);
  return OS;
}

} // end namespace llvm

#endif // LLVM_MC_MCLABEL_H
PKjwFZ�D�+��MC/MCSectionWasm.hnu�[���//===- MCSectionWasm.h - Wasm Machine Code Sections -------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the MCSectionWasm class.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCSECTIONWASM_H
#define LLVM_MC_MCSECTIONWASM_H

#include "llvm/MC/MCSection.h"

namespace llvm {

class MCSymbol;
class MCSymbolWasm;
class StringRef;
class raw_ostream;

/// This represents a section on wasm.
class MCSectionWasm final : public MCSection {
  unsigned UniqueID;

  const MCSymbolWasm *Group;

  // The offset of the MC function/data section in the wasm code/data section.
  // For data relocations the offset is relative to start of the data payload
  // itself and does not include the size of the section header.
  uint64_t SectionOffset = 0;

  // For data sections, this is the index of of the corresponding wasm data
  // segment
  uint32_t SegmentIndex = 0;

  // For data sections, whether to use a passive segment
  bool IsPassive = false;

  // For data sections, bitfield of WasmSegmentFlag
  unsigned SegmentFlags;

  // The storage of Name is owned by MCContext's WasmUniquingMap.
  friend class MCContext;
  MCSectionWasm(StringRef Name, SectionKind K, unsigned SegmentFlags,
                const MCSymbolWasm *Group, unsigned UniqueID, MCSymbol *Begin)
      : MCSection(SV_Wasm, Name, K, Begin), UniqueID(UniqueID), Group(Group),
        SegmentFlags(SegmentFlags) {}

public:
  /// Decides whether a '.section' directive should be printed before the
  /// section name
  bool shouldOmitSectionDirective(StringRef Name, const MCAsmInfo &MAI) const;

  const MCSymbolWasm *getGroup() const { return Group; }
  unsigned getSegmentFlags() const { return SegmentFlags; }

  void printSwitchToSection(const MCAsmInfo &MAI, const Triple &T,
                            raw_ostream &OS,
                            const MCExpr *Subsection) const override;
  bool useCodeAlign() const override;
  bool isVirtualSection() const override;

  bool isWasmData() const {
    return Kind.isGlobalWriteableData() || Kind.isReadOnly() ||
           Kind.isThreadLocal();
  }

  bool isUnique() const { return UniqueID != ~0U; }
  unsigned getUniqueID() const { return UniqueID; }

  uint64_t getSectionOffset() const { return SectionOffset; }
  void setSectionOffset(uint64_t Offset) { SectionOffset = Offset; }

  uint32_t getSegmentIndex() const { return SegmentIndex; }
  void setSegmentIndex(uint32_t Index) { SegmentIndex = Index; }

  bool getPassive() const {
    assert(isWasmData());
    return IsPassive;
  }
  void setPassive(bool V = true) {
    assert(isWasmData());
    IsPassive = V;
  }
  static bool classof(const MCSection *S) { return S->getVariant() == SV_Wasm; }
};

} // end namespace llvm

#endif
PKjwFZ�ת++MC/MCObjectWriter.hnu�[���//===- llvm/MC/MCObjectWriter.h - Object File Writer Interface --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCOBJECTWRITER_H
#define LLVM_MC_MCOBJECTWRITER_H

#include "llvm/MC/MCSymbol.h"
#include "llvm/TargetParser/Triple.h"
#include <cstdint>

namespace llvm {

class MCAsmLayout;
class MCAssembler;
class MCFixup;
class MCFragment;
class MCSymbol;
class MCSymbolRefExpr;
class MCValue;

/// Defines the object file and target independent interfaces used by the
/// assembler backend to write native file format object files.
///
/// The object writer contains a few callbacks used by the assembler to allow
/// the object writer to modify the assembler data structures at appropriate
/// points. Once assembly is complete, the object writer is given the
/// MCAssembler instance, which contains all the symbol and section data which
/// should be emitted as part of writeObject().
class MCObjectWriter {
protected:
  std::vector<const MCSymbol *> AddrsigSyms;
  bool EmitAddrsigSection = false;

  MCObjectWriter() = default;

public:
  MCObjectWriter(const MCObjectWriter &) = delete;
  MCObjectWriter &operator=(const MCObjectWriter &) = delete;
  virtual ~MCObjectWriter();

  /// lifetime management
  virtual void reset() {}

  /// \name High-Level API
  /// @{

  /// Perform any late binding of symbols (for example, to assign symbol
  /// indices for use when generating relocations).
  ///
  /// This routine is called by the assembler after layout and relaxation is
  /// complete.
  virtual void executePostLayoutBinding(MCAssembler &Asm,
                                        const MCAsmLayout &Layout) = 0;

  /// Record a relocation entry.
  ///
  /// This routine is called by the assembler after layout and relaxation, and
  /// post layout binding. The implementation is responsible for storing
  /// information about the relocation so that it can be emitted during
  /// writeObject().
  virtual void recordRelocation(MCAssembler &Asm, const MCAsmLayout &Layout,
                                const MCFragment *Fragment,
                                const MCFixup &Fixup, MCValue Target,
                                uint64_t &FixedValue) = 0;

  /// Check whether the difference (A - B) between two symbol references is
  /// fully resolved.
  ///
  /// Clients are not required to answer precisely and may conservatively return
  /// false, even when a difference is fully resolved.
  bool isSymbolRefDifferenceFullyResolved(const MCAssembler &Asm,
                                          const MCSymbolRefExpr *A,
                                          const MCSymbolRefExpr *B,
                                          bool InSet) const;

  virtual bool isSymbolRefDifferenceFullyResolvedImpl(const MCAssembler &Asm,
                                                      const MCSymbol &A,
                                                      const MCSymbol &B,
                                                      bool InSet) const;

  virtual bool isSymbolRefDifferenceFullyResolvedImpl(const MCAssembler &Asm,
                                                      const MCSymbol &SymA,
                                                      const MCFragment &FB,
                                                      bool InSet,
                                                      bool IsPCRel) const;

  /// ELF only. Mark that we have seen GNU ABI usage (e.g. SHF_GNU_RETAIN).
  virtual void markGnuAbi() {}

  /// Tell the object writer to emit an address-significance table during
  /// writeObject(). If this function is not called, all symbols are treated as
  /// address-significant.
  void emitAddrsigSection() { EmitAddrsigSection = true; }

  bool getEmitAddrsigSection() { return EmitAddrsigSection; }

  /// Record the given symbol in the address-significance table to be written
  /// diring writeObject().
  void addAddrsigSymbol(const MCSymbol *Sym) { AddrsigSyms.push_back(Sym); }

  std::vector<const MCSymbol *> &getAddrsigSyms() { return AddrsigSyms; }

  virtual void addExceptionEntry(const MCSymbol *Symbol, const MCSymbol *Trap,
                                 unsigned LanguageCode, unsigned ReasonCode,
                                 unsigned FunctionSize, bool hasDebug) {
    report_fatal_error("addExceptionEntry is only supported on XCOFF targets");
  }
  virtual void addCInfoSymEntry(StringRef Name, StringRef Metadata) {
    report_fatal_error("addCInfoSymEntry is only supported on XCOFF targets");
  }
  /// Write the object file and returns the number of bytes written.
  ///
  /// This routine is called by the assembler after layout and relaxation is
  /// complete, fixups have been evaluated and applied, and relocations
  /// generated.
  virtual uint64_t writeObject(MCAssembler &Asm, const MCAsmLayout &Layout) = 0;

  /// @}
};

/// Base class for classes that define behaviour that is specific to both the
/// target and the object format.
class MCObjectTargetWriter {
public:
  virtual ~MCObjectTargetWriter() = default;
  virtual Triple::ObjectFormatType getFormat() const = 0;
};

} // end namespace llvm

#endif // LLVM_MC_MCOBJECTWRITER_H
PKjwFZU˄��MC/MCAsmInfoCOFF.hnu�[���//===- MCAsmInfoCOFF.h - COFF asm properties --------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCASMINFOCOFF_H
#define LLVM_MC_MCASMINFOCOFF_H

#include "llvm/MC/MCAsmInfo.h"

namespace llvm {

class MCAsmInfoCOFF : public MCAsmInfo {
  virtual void anchor();

protected:
  explicit MCAsmInfoCOFF();
};

class MCAsmInfoMicrosoft : public MCAsmInfoCOFF {
  void anchor() override;

protected:
  explicit MCAsmInfoMicrosoft();
};

class MCAsmInfoGNUCOFF : public MCAsmInfoCOFF {
  void anchor() override;

protected:
  explicit MCAsmInfoGNUCOFF();
};

} // end namespace llvm

#endif // LLVM_MC_MCASMINFOCOFF_H
PKjwFZ�3�s#8#8
MC/MCSymbol.hnu�[���//===- MCSymbol.h - Machine Code Symbols ------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the declaration of the MCSymbol class.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCSYMBOL_H
#define LLVM_MC_MCSYMBOL_H

#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/StringMapEntry.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCFragment.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include <cassert>
#include <cstddef>
#include <cstdint>

namespace llvm {

class MCAsmInfo;
class MCContext;
class MCSection;
class raw_ostream;

/// MCSymbol - Instances of this class represent a symbol name in the MC file,
/// and MCSymbols are created and uniqued by the MCContext class.  MCSymbols
/// should only be constructed with valid names for the object file.
///
/// If the symbol is defined/emitted into the current translation unit, the
/// Section member is set to indicate what section it lives in.  Otherwise, if
/// it is a reference to an external entity, it has a null section.
class MCSymbol {
protected:
  /// The kind of the symbol.  If it is any value other than unset then this
  /// class is actually one of the appropriate subclasses of MCSymbol.
  enum SymbolKind {
    SymbolKindUnset,
    SymbolKindCOFF,
    SymbolKindELF,
    SymbolKindGOFF,
    SymbolKindMachO,
    SymbolKindWasm,
    SymbolKindXCOFF,
  };

  /// A symbol can contain an Offset, or Value, or be Common, but never more
  /// than one of these.
  enum Contents : uint8_t {
    SymContentsUnset,
    SymContentsOffset,
    SymContentsVariable,
    SymContentsCommon,
    SymContentsTargetCommon, // Index stores the section index
  };

  // Special sentinal value for the absolute pseudo fragment.
  static MCFragment *AbsolutePseudoFragment;

  /// If a symbol has a Fragment, the section is implied, so we only need
  /// one pointer.
  /// The special AbsolutePseudoFragment value is for absolute symbols.
  /// If this is a variable symbol, this caches the variable value's fragment.
  /// FIXME: We might be able to simplify this by having the asm streamer create
  /// dummy fragments.
  /// If this is a section, then it gives the symbol is defined in. This is null
  /// for undefined symbols.
  ///
  /// If this is a fragment, then it gives the fragment this symbol's value is
  /// relative to, if any.
  mutable MCFragment *Fragment = nullptr;

  /// True if this symbol is named.  A named symbol will have a pointer to the
  /// name allocated in the bytes immediately prior to the MCSymbol.
  unsigned HasName : 1;

  /// IsTemporary - True if this is an assembler temporary label, which
  /// typically does not survive in the .o file's symbol table.  Usually
  /// "Lfoo" or ".foo".
  unsigned IsTemporary : 1;

  /// True if this symbol can be redefined.
  unsigned IsRedefinable : 1;

  /// IsUsed - True if this symbol has been used.
  mutable unsigned IsUsed : 1;

  mutable unsigned IsRegistered : 1;

  /// True if this symbol is visible outside this translation unit. Note: ELF
  /// uses binding instead of this bit.
  mutable unsigned IsExternal : 1;

  /// This symbol is private extern.
  mutable unsigned IsPrivateExtern : 1;

  /// This symbol is weak external.
  mutable unsigned IsWeakExternal : 1;

  /// LLVM RTTI discriminator. This is actually a SymbolKind enumerator, but is
  /// unsigned to avoid sign extension and achieve better bitpacking with MSVC.
  unsigned Kind : 3;

  /// True if we have created a relocation that uses this symbol.
  mutable unsigned IsUsedInReloc : 1;

  /// This is actually a Contents enumerator, but is unsigned to avoid sign
  /// extension and achieve better bitpacking with MSVC.
  unsigned SymbolContents : 3;

  /// The alignment of the symbol if it is 'common'.
  ///
  /// Internally, this is stored as log2(align) + 1.
  /// We reserve 5 bits to encode this value which allows the following values
  /// 0b00000 -> unset
  /// 0b00001 -> 1ULL <<  0 = 1
  /// 0b00010 -> 1ULL <<  1 = 2
  /// 0b00011 -> 1ULL <<  2 = 4
  /// ...
  /// 0b11111 -> 1ULL << 30 = 1 GiB
  enum : unsigned { NumCommonAlignmentBits = 5 };
  unsigned CommonAlignLog2 : NumCommonAlignmentBits;

  /// The Flags field is used by object file implementations to store
  /// additional per symbol information which is not easily classified.
  enum : unsigned { NumFlagsBits = 16 };
  mutable uint32_t Flags : NumFlagsBits;

  /// Index field, for use by the object file implementation.
  mutable uint32_t Index = 0;

  union {
    /// The offset to apply to the fragment address to form this symbol's value.
    uint64_t Offset;

    /// The size of the symbol, if it is 'common'.
    uint64_t CommonSize;

    /// If non-null, the value for a variable symbol.
    const MCExpr *Value;
  };

  // MCContext creates and uniques these.
  friend class MCExpr;
  friend class MCContext;

  /// The name for a symbol.
  /// MCSymbol contains a uint64_t so is probably aligned to 8.  On a 32-bit
  /// system, the name is a pointer so isn't going to satisfy the 8 byte
  /// alignment of uint64_t.  Account for that here.
  using NameEntryStorageTy = union {
    const StringMapEntry<bool> *NameEntry;
    uint64_t AlignmentPadding;
  };

  MCSymbol(SymbolKind Kind, const StringMapEntry<bool> *Name, bool isTemporary)
      : IsTemporary(isTemporary), IsRedefinable(false), IsUsed(false),
        IsRegistered(false), IsExternal(false), IsPrivateExtern(false),
        IsWeakExternal(false), Kind(Kind), IsUsedInReloc(false),
        SymbolContents(SymContentsUnset), CommonAlignLog2(0), Flags(0) {
    Offset = 0;
    HasName = !!Name;
    if (Name)
      getNameEntryPtr() = Name;
  }

  // Provide custom new/delete as we will only allocate space for a name
  // if we need one.
  void *operator new(size_t s, const StringMapEntry<bool> *Name,
                     MCContext &Ctx);

private:
  void operator delete(void *);
  /// Placement delete - required by std, but never called.
  void operator delete(void*, unsigned) {
    llvm_unreachable("Constructor throws?");
  }
  /// Placement delete - required by std, but never called.
  void operator delete(void*, unsigned, bool) {
    llvm_unreachable("Constructor throws?");
  }

  /// Get a reference to the name field.  Requires that we have a name
  const StringMapEntry<bool> *&getNameEntryPtr() {
    assert(HasName && "Name is required");
    NameEntryStorageTy *Name = reinterpret_cast<NameEntryStorageTy *>(this);
    return (*(Name - 1)).NameEntry;
  }
  const StringMapEntry<bool> *&getNameEntryPtr() const {
    return const_cast<MCSymbol*>(this)->getNameEntryPtr();
  }

public:
  MCSymbol(const MCSymbol &) = delete;
  MCSymbol &operator=(const MCSymbol &) = delete;

  /// getName - Get the symbol name.
  StringRef getName() const {
    if (!HasName)
      return StringRef();

    return getNameEntryPtr()->first();
  }

  bool isRegistered() const { return IsRegistered; }
  void setIsRegistered(bool Value) const { IsRegistered = Value; }

  void setUsedInReloc() const { IsUsedInReloc = true; }
  bool isUsedInReloc() const { return IsUsedInReloc; }

  /// \name Accessors
  /// @{

  /// isTemporary - Check if this is an assembler temporary symbol.
  bool isTemporary() const { return IsTemporary; }

  /// isUsed - Check if this is used.
  bool isUsed() const { return IsUsed; }

  /// Check if this symbol is redefinable.
  bool isRedefinable() const { return IsRedefinable; }
  /// Mark this symbol as redefinable.
  void setRedefinable(bool Value) { IsRedefinable = Value; }
  /// Prepare this symbol to be redefined.
  void redefineIfPossible() {
    if (IsRedefinable) {
      if (SymbolContents == SymContentsVariable) {
        Value = nullptr;
        SymbolContents = SymContentsUnset;
      }
      setUndefined();
      IsRedefinable = false;
    }
  }

  /// @}
  /// \name Associated Sections
  /// @{

  /// isDefined - Check if this symbol is defined (i.e., it has an address).
  ///
  /// Defined symbols are either absolute or in some section.
  bool isDefined() const { return !isUndefined(); }

  /// isInSection - Check if this symbol is defined in some section (i.e., it
  /// is defined but not absolute).
  bool isInSection() const {
    return isDefined() && !isAbsolute();
  }

  /// isUndefined - Check if this symbol undefined (i.e., implicitly defined).
  bool isUndefined(bool SetUsed = true) const {
    return getFragment(SetUsed) == nullptr;
  }

  /// isAbsolute - Check if this is an absolute symbol.
  bool isAbsolute() const {
    return getFragment() == AbsolutePseudoFragment;
  }

  /// Get the section associated with a defined, non-absolute symbol.
  MCSection &getSection() const {
    assert(isInSection() && "Invalid accessor!");
    return *getFragment()->getParent();
  }

  /// Mark the symbol as defined in the fragment \p F.
  void setFragment(MCFragment *F) const {
    assert(!isVariable() && "Cannot set fragment of variable");
    Fragment = F;
  }

  /// Mark the symbol as undefined.
  void setUndefined() { Fragment = nullptr; }

  bool isELF() const { return Kind == SymbolKindELF; }

  bool isCOFF() const { return Kind == SymbolKindCOFF; }

  bool isGOFF() const { return Kind == SymbolKindGOFF; }

  bool isMachO() const { return Kind == SymbolKindMachO; }

  bool isWasm() const { return Kind == SymbolKindWasm; }

  bool isXCOFF() const { return Kind == SymbolKindXCOFF; }

  /// @}
  /// \name Variable Symbols
  /// @{

  /// isVariable - Check if this is a variable symbol.
  bool isVariable() const {
    return SymbolContents == SymContentsVariable;
  }

  /// getVariableValue - Get the value for variable symbols.
  const MCExpr *getVariableValue(bool SetUsed = true) const {
    assert(isVariable() && "Invalid accessor!");
    IsUsed |= SetUsed;
    return Value;
  }

  void setVariableValue(const MCExpr *Value);

  /// @}

  /// Get the (implementation defined) index.
  uint32_t getIndex() const {
    return Index;
  }

  /// Set the (implementation defined) index.
  void setIndex(uint32_t Value) const {
    Index = Value;
  }

  bool isUnset() const { return SymbolContents == SymContentsUnset; }

  uint64_t getOffset() const {
    assert((SymbolContents == SymContentsUnset ||
            SymbolContents == SymContentsOffset) &&
           "Cannot get offset for a common/variable symbol");
    return Offset;
  }
  void setOffset(uint64_t Value) {
    assert((SymbolContents == SymContentsUnset ||
            SymbolContents == SymContentsOffset) &&
           "Cannot set offset for a common/variable symbol");
    Offset = Value;
    SymbolContents = SymContentsOffset;
  }

  /// Return the size of a 'common' symbol.
  uint64_t getCommonSize() const {
    assert(isCommon() && "Not a 'common' symbol!");
    return CommonSize;
  }

  /// Mark this symbol as being 'common'.
  ///
  /// \param Size - The size of the symbol.
  /// \param Alignment - The alignment of the symbol.
  /// \param Target - Is the symbol a target-specific common-like symbol.
  void setCommon(uint64_t Size, Align Alignment, bool Target = false) {
    assert(getOffset() == 0);
    CommonSize = Size;
    SymbolContents = Target ? SymContentsTargetCommon : SymContentsCommon;

    unsigned Log2Align = encode(Alignment);
    assert(Log2Align < (1U << NumCommonAlignmentBits) &&
           "Out of range alignment");
    CommonAlignLog2 = Log2Align;
  }

  ///  Return the alignment of a 'common' symbol.
  MaybeAlign getCommonAlignment() const {
    assert(isCommon() && "Not a 'common' symbol!");
    return decodeMaybeAlign(CommonAlignLog2);
  }

  /// Declare this symbol as being 'common'.
  ///
  /// \param Size - The size of the symbol.
  /// \param Alignment - The alignment of the symbol.
  /// \param Target - Is the symbol a target-specific common-like symbol.
  /// \return True if symbol was already declared as a different type
  bool declareCommon(uint64_t Size, Align Alignment, bool Target = false) {
    assert(isCommon() || getOffset() == 0);
    if(isCommon()) {
      if (CommonSize != Size || getCommonAlignment() != Alignment ||
          isTargetCommon() != Target)
        return true;
    } else
      setCommon(Size, Alignment, Target);
    return false;
  }

  /// Is this a 'common' symbol.
  bool isCommon() const {
    return SymbolContents == SymContentsCommon ||
           SymbolContents == SymContentsTargetCommon;
  }

  /// Is this a target-specific common-like symbol.
  bool isTargetCommon() const {
    return SymbolContents == SymContentsTargetCommon;
  }

  MCFragment *getFragment(bool SetUsed = true) const {
    if (Fragment || !isVariable() || isWeakExternal())
      return Fragment;
    // If the symbol is a non-weak alias, get information about
    // the aliasee. (Don't try to resolve weak aliases.)
    Fragment = getVariableValue(SetUsed)->findAssociatedFragment();
    return Fragment;
  }

  bool isExternal() const { return IsExternal; }
  void setExternal(bool Value) const { IsExternal = Value; }

  bool isPrivateExtern() const { return IsPrivateExtern; }
  void setPrivateExtern(bool Value) { IsPrivateExtern = Value; }

  bool isWeakExternal() const { return IsWeakExternal; }

  /// print - Print the value to the stream \p OS.
  void print(raw_ostream &OS, const MCAsmInfo *MAI) const;

  /// dump - Print the value to stderr.
  void dump() const;

protected:
  /// Get the (implementation defined) symbol flags.
  uint32_t getFlags() const { return Flags; }

  /// Set the (implementation defined) symbol flags.
  void setFlags(uint32_t Value) const {
    assert(Value < (1U << NumFlagsBits) && "Out of range flags");
    Flags = Value;
  }

  /// Modify the flags via a mask
  void modifyFlags(uint32_t Value, uint32_t Mask) const {
    assert(Value < (1U << NumFlagsBits) && "Out of range flags");
    Flags = (Flags & ~Mask) | Value;
  }
};

inline raw_ostream &operator<<(raw_ostream &OS, const MCSymbol &Sym) {
  Sym.print(OS, nullptr);
  return OS;
}

} // end namespace llvm

#endif // LLVM_MC_MCSYMBOL_H
PKjwFZ���]]MC/MCSectionELF.hnu�[���//===- MCSectionELF.h - ELF Machine Code Sections ---------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the MCSectionELF class.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCSECTIONELF_H
#define LLVM_MC_MCSECTIONELF_H

#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/MC/MCSection.h"
#include "llvm/MC/MCSymbolELF.h"
#include "llvm/MC/SectionKind.h"

namespace llvm {

/// This represents a section on linux, lots of unix variants and some bare
/// metal systems.
class MCSectionELF final : public MCSection {
  /// This is the sh_type field of a section, drawn from the enums below.
  unsigned Type;

  /// This is the sh_flags field of a section, drawn from the enums below.
  unsigned Flags;

  unsigned UniqueID;

  /// The size of each entry in this section. This size only makes sense for
  /// sections that contain fixed-sized entries. If a section does not contain
  /// fixed-sized entries 'EntrySize' will be 0.
  unsigned EntrySize;

  /// The section group signature symbol (if not null) and a bool indicating
  /// whether this is a GRP_COMDAT group.
  const PointerIntPair<const MCSymbolELF *, 1, bool> Group;

  /// Used by SHF_LINK_ORDER. If non-null, the sh_link field will be set to the
  /// section header index of the section where LinkedToSym is defined.
  const MCSymbol *LinkedToSym;

private:
  friend class MCContext;

  // The storage of Name is owned by MCContext's ELFUniquingMap.
  MCSectionELF(StringRef Name, unsigned type, unsigned flags, SectionKind K,
               unsigned entrySize, const MCSymbolELF *group, bool IsComdat,
               unsigned UniqueID, MCSymbol *Begin,
               const MCSymbolELF *LinkedToSym)
      : MCSection(SV_ELF, Name, K, Begin), Type(type), Flags(flags),
        UniqueID(UniqueID), EntrySize(entrySize), Group(group, IsComdat),
        LinkedToSym(LinkedToSym) {
    if (Group.getPointer())
      Group.getPointer()->setIsSignature();
  }

  // TODO Delete after we stop supporting generation of GNU-style .zdebug_*
  // sections.
  void setSectionName(StringRef Name) { this->Name = Name; }

public:
  /// Decides whether a '.section' directive should be printed before the
  /// section name
  bool shouldOmitSectionDirective(StringRef Name, const MCAsmInfo &MAI) const;

  unsigned getType() const { return Type; }
  unsigned getFlags() const { return Flags; }
  unsigned getEntrySize() const { return EntrySize; }
  void setFlags(unsigned F) { Flags = F; }
  const MCSymbolELF *getGroup() const { return Group.getPointer(); }
  bool isComdat() const { return Group.getInt(); }

  void printSwitchToSection(const MCAsmInfo &MAI, const Triple &T,
                            raw_ostream &OS,
                            const MCExpr *Subsection) const override;
  bool useCodeAlign() const override;
  bool isVirtualSection() const override;
  StringRef getVirtualSectionKind() const override;

  bool isUnique() const { return UniqueID != NonUniqueID; }
  unsigned getUniqueID() const { return UniqueID; }

  const MCSection *getLinkedToSection() const {
    return &LinkedToSym->getSection();
  }
  const MCSymbol *getLinkedToSymbol() const { return LinkedToSym; }

  static bool classof(const MCSection *S) {
    return S->getVariant() == SV_ELF;
  }
};

} // end namespace llvm

#endif // LLVM_MC_MCSECTIONELF_H
PKjwFZ�ۓFFMC/MCAsmInfoGOFF.hnu�[���//===- MCAsmInfoGOFF.h - GOFF Asm Info Fields -------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file defines certain target specific asm properties for GOFF (z/OS)
/// based targets.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCASMINFOGOFF_H
#define LLVM_MC_MCASMINFOGOFF_H

#include "llvm/MC/MCAsmInfo.h"

namespace llvm {
class MCAsmInfoGOFF : public MCAsmInfo {
  virtual void anchor();

protected:
  MCAsmInfoGOFF();
};
} // end namespace llvm

#endif // LLVM_MC_MCASMINFOGOFF_H
PKjwFZ��wV**MC/MCInstrInfo.hnu�[���//===-- llvm/MC/MCInstrInfo.h - Target Instruction Info ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file describes the target machine instruction set.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCINSTRINFO_H
#define LLVM_MC_MCINSTRINFO_H

#include "llvm/ADT/StringRef.h"
#include "llvm/MC/MCInstrDesc.h"
#include <cassert>

namespace llvm {

class MCSubtargetInfo;

//---------------------------------------------------------------------------
/// Interface to description of machine instruction set.
class MCInstrInfo {
public:
  using ComplexDeprecationPredicate = bool (*)(MCInst &,
                                               const MCSubtargetInfo &,
                                               std::string &);

private:
  const MCInstrDesc *LastDesc;      // Raw array to allow static init'n
  const unsigned *InstrNameIndices; // Array for name indices in InstrNameData
  const char *InstrNameData;        // Instruction name string pool
  // Subtarget feature that an instruction is deprecated on, if any
  // -1 implies this is not deprecated by any single feature. It may still be
  // deprecated due to a "complex" reason, below.
  const uint8_t *DeprecatedFeatures;
  // A complex method to determine if a certain instruction is deprecated or
  // not, and return the reason for deprecation.
  const ComplexDeprecationPredicate *ComplexDeprecationInfos;
  unsigned NumOpcodes;              // Number of entries in the desc array

public:
  /// Initialize MCInstrInfo, called by TableGen auto-generated routines.
  /// *DO NOT USE*.
  void InitMCInstrInfo(const MCInstrDesc *D, const unsigned *NI, const char *ND,
                       const uint8_t *DF,
                       const ComplexDeprecationPredicate *CDI, unsigned NO) {
    LastDesc = D + NO - 1;
    InstrNameIndices = NI;
    InstrNameData = ND;
    DeprecatedFeatures = DF;
    ComplexDeprecationInfos = CDI;
    NumOpcodes = NO;
  }

  unsigned getNumOpcodes() const { return NumOpcodes; }

  /// Return the machine instruction descriptor that corresponds to the
  /// specified instruction opcode.
  const MCInstrDesc &get(unsigned Opcode) const {
    assert(Opcode < NumOpcodes && "Invalid opcode!");
    // The table is indexed backwards from the last entry.
    return *(LastDesc - Opcode);
  }

  /// Returns the name for the instructions with the given opcode.
  StringRef getName(unsigned Opcode) const {
    assert(Opcode < NumOpcodes && "Invalid opcode!");
    return StringRef(&InstrNameData[InstrNameIndices[Opcode]]);
  }

  /// Returns true if a certain instruction is deprecated and if so
  /// returns the reason in \p Info.
  bool getDeprecatedInfo(MCInst &MI, const MCSubtargetInfo &STI,
                         std::string &Info) const;
};

} // End llvm namespace

#endif
PKjwFZ��R��MC/MCWinCOFFObjectWriter.hnu�[���//===- llvm/MC/MCWinCOFFObjectWriter.h - Win COFF Object Writer -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCWINCOFFOBJECTWRITER_H
#define LLVM_MC_MCWINCOFFOBJECTWRITER_H

#include "llvm/MC/MCObjectWriter.h"
#include <memory>

namespace llvm {

class MCAsmBackend;
class MCContext;
class MCFixup;
class MCValue;
class raw_pwrite_stream;

class MCWinCOFFObjectTargetWriter : public MCObjectTargetWriter {
  virtual void anchor();

  const unsigned Machine;

protected:
  MCWinCOFFObjectTargetWriter(unsigned Machine_);

public:
  virtual ~MCWinCOFFObjectTargetWriter() = default;

  Triple::ObjectFormatType getFormat() const override { return Triple::COFF; }
  static bool classof(const MCObjectTargetWriter *W) {
    return W->getFormat() == Triple::COFF;
  }

  unsigned getMachine() const { return Machine; }
  virtual unsigned getRelocType(MCContext &Ctx, const MCValue &Target,
                                const MCFixup &Fixup, bool IsCrossSection,
                                const MCAsmBackend &MAB) const = 0;
  virtual bool recordRelocation(const MCFixup &) const { return true; }
};

/// Construct a new Win COFF writer instance.
///
/// \param MOTW - The target specific WinCOFF writer subclass.
/// \param OS - The stream to write to.
/// \returns The constructed object writer.
std::unique_ptr<MCObjectWriter>
createWinCOFFObjectWriter(std::unique_ptr<MCWinCOFFObjectTargetWriter> MOTW,
                          raw_pwrite_stream &OS);

std::unique_ptr<MCObjectWriter>
createWinCOFFDwoObjectWriter(std::unique_ptr<MCWinCOFFObjectTargetWriter> MOTW,
                             raw_pwrite_stream &OS, raw_pwrite_stream &DwoOS);
} // end namespace llvm

#endif // LLVM_MC_MCWINCOFFOBJECTWRITER_H
PKjwFZ�������MC/MCStreamer.hnu�[���//===- MCStreamer.h - High-level Streaming Machine Code Output --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the MCStreamer class.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCSTREAMER_H
#define LLVM_MC_MCSTREAMER_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/MC/MCDirectives.h"
#include "llvm/MC/MCDwarf.h"
#include "llvm/MC/MCLinkerOptimizationHint.h"
#include "llvm/MC/MCPseudoProbe.h"
#include "llvm/MC/MCWinEH.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/MD5.h"
#include "llvm/Support/SMLoc.h"
#include "llvm/Support/VersionTuple.h"
#include "llvm/TargetParser/ARMTargetParser.h"
#include <cassert>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>

namespace llvm {

class APInt;
class AssemblerConstantPools;
class MCAsmBackend;
class MCAssembler;
class MCContext;
class MCExpr;
class MCFragment;
class MCInst;
class MCInstPrinter;
class MCRegister;
class MCSection;
class MCStreamer;
class MCSubtargetInfo;
class MCSymbol;
class MCSymbolRefExpr;
class Triple;
class Twine;
class raw_ostream;

namespace codeview {
struct DefRangeRegisterRelHeader;
struct DefRangeSubfieldRegisterHeader;
struct DefRangeRegisterHeader;
struct DefRangeFramePointerRelHeader;
}

using MCSectionSubPair = std::pair<MCSection *, const MCExpr *>;

/// Target specific streamer interface. This is used so that targets can
/// implement support for target specific assembly directives.
///
/// If target foo wants to use this, it should implement 3 classes:
/// * FooTargetStreamer : public MCTargetStreamer
/// * FooTargetAsmStreamer : public FooTargetStreamer
/// * FooTargetELFStreamer : public FooTargetStreamer
///
/// FooTargetStreamer should have a pure virtual method for each directive. For
/// example, for a ".bar symbol_name" directive, it should have
/// virtual emitBar(const MCSymbol &Symbol) = 0;
///
/// The FooTargetAsmStreamer and FooTargetELFStreamer classes implement the
/// method. The assembly streamer just prints ".bar symbol_name". The object
/// streamer does whatever is needed to implement .bar in the object file.
///
/// In the assembly printer and parser the target streamer can be used by
/// calling getTargetStreamer and casting it to FooTargetStreamer:
///
/// MCTargetStreamer &TS = OutStreamer.getTargetStreamer();
/// FooTargetStreamer &ATS = static_cast<FooTargetStreamer &>(TS);
///
/// The base classes FooTargetAsmStreamer and FooTargetELFStreamer should
/// *never* be treated differently. Callers should always talk to a
/// FooTargetStreamer.
class MCTargetStreamer {
protected:
  MCStreamer &Streamer;

public:
  MCTargetStreamer(MCStreamer &S);
  virtual ~MCTargetStreamer();

  MCStreamer &getStreamer() { return Streamer; }

  // Allow a target to add behavior to the EmitLabel of MCStreamer.
  virtual void emitLabel(MCSymbol *Symbol);
  // Allow a target to add behavior to the emitAssignment of MCStreamer.
  virtual void emitAssignment(MCSymbol *Symbol, const MCExpr *Value);

  virtual void prettyPrintAsm(MCInstPrinter &InstPrinter, uint64_t Address,
                              const MCInst &Inst, const MCSubtargetInfo &STI,
                              raw_ostream &OS);

  virtual void emitDwarfFileDirective(StringRef Directive);

  /// Update streamer for a new active section.
  ///
  /// This is called by popSection and switchSection, if the current
  /// section changes.
  virtual void changeSection(const MCSection *CurSection, MCSection *Section,
                             const MCExpr *SubSection, raw_ostream &OS);

  virtual void emitValue(const MCExpr *Value);

  /// Emit the bytes in \p Data into the output.
  ///
  /// This is used to emit bytes in \p Data as sequence of .byte directives.
  virtual void emitRawBytes(StringRef Data);

  virtual void emitConstantPools();

  virtual void finish();
};

// FIXME: declared here because it is used from
// lib/CodeGen/AsmPrinter/ARMException.cpp.
class ARMTargetStreamer : public MCTargetStreamer {
public:
  ARMTargetStreamer(MCStreamer &S);
  ~ARMTargetStreamer() override;

  virtual void emitFnStart();
  virtual void emitFnEnd();
  virtual void emitCantUnwind();
  virtual void emitPersonality(const MCSymbol *Personality);
  virtual void emitPersonalityIndex(unsigned Index);
  virtual void emitHandlerData();
  virtual void emitSetFP(unsigned FpReg, unsigned SpReg,
                         int64_t Offset = 0);
  virtual void emitMovSP(unsigned Reg, int64_t Offset = 0);
  virtual void emitPad(int64_t Offset);
  virtual void emitRegSave(const SmallVectorImpl<unsigned> &RegList,
                           bool isVector);
  virtual void emitUnwindRaw(int64_t StackOffset,
                             const SmallVectorImpl<uint8_t> &Opcodes);

  virtual void switchVendor(StringRef Vendor);
  virtual void emitAttribute(unsigned Attribute, unsigned Value);
  virtual void emitTextAttribute(unsigned Attribute, StringRef String);
  virtual void emitIntTextAttribute(unsigned Attribute, unsigned IntValue,
                                    StringRef StringValue = "");
  virtual void emitFPU(ARM::FPUKind FPU);
  virtual void emitArch(ARM::ArchKind Arch);
  virtual void emitArchExtension(uint64_t ArchExt);
  virtual void emitObjectArch(ARM::ArchKind Arch);
  void emitTargetAttributes(const MCSubtargetInfo &STI);
  virtual void finishAttributeSection();
  virtual void emitInst(uint32_t Inst, char Suffix = '\0');

  virtual void annotateTLSDescriptorSequence(const MCSymbolRefExpr *SRE);

  virtual void emitThumbSet(MCSymbol *Symbol, const MCExpr *Value);

  void emitConstantPools() override;

  virtual void emitARMWinCFIAllocStack(unsigned Size, bool Wide);
  virtual void emitARMWinCFISaveRegMask(unsigned Mask, bool Wide);
  virtual void emitARMWinCFISaveSP(unsigned Reg);
  virtual void emitARMWinCFISaveFRegs(unsigned First, unsigned Last);
  virtual void emitARMWinCFISaveLR(unsigned Offset);
  virtual void emitARMWinCFIPrologEnd(bool Fragment);
  virtual void emitARMWinCFINop(bool Wide);
  virtual void emitARMWinCFIEpilogStart(unsigned Condition);
  virtual void emitARMWinCFIEpilogEnd();
  virtual void emitARMWinCFICustom(unsigned Opcode);

  /// Reset any state between object emissions, i.e. the equivalent of
  /// MCStreamer's reset method.
  virtual void reset();

  /// Callback used to implement the ldr= pseudo.
  /// Add a new entry to the constant pool for the current section and return an
  /// MCExpr that can be used to refer to the constant pool location.
  const MCExpr *addConstantPoolEntry(const MCExpr *, SMLoc Loc);

  /// Callback used to implement the .ltorg directive.
  /// Emit contents of constant pool for the current section.
  void emitCurrentConstantPool();

private:
  std::unique_ptr<AssemblerConstantPools> ConstantPools;
};

/// Streaming machine code generation interface.
///
/// This interface is intended to provide a programmatic interface that is very
/// similar to the level that an assembler .s file provides.  It has callbacks
/// to emit bytes, handle directives, etc.  The implementation of this interface
/// retains state to know what the current section is etc.
///
/// There are multiple implementations of this interface: one for writing out
/// a .s file, and implementations that write out .o files of various formats.
///
class MCStreamer {
  MCContext &Context;
  std::unique_ptr<MCTargetStreamer> TargetStreamer;

  std::vector<MCDwarfFrameInfo> DwarfFrameInfos;
  // This is a pair of index into DwarfFrameInfos and the MCSection associated
  // with the frame. Note, we use an index instead of an iterator because they
  // can be invalidated in std::vector.
  SmallVector<std::pair<size_t, MCSection *>, 1> FrameInfoStack;
  MCDwarfFrameInfo *getCurrentDwarfFrameInfo();

  /// Similar to DwarfFrameInfos, but for SEH unwind info. Chained frames may
  /// refer to each other, so use std::unique_ptr to provide pointer stability.
  std::vector<std::unique_ptr<WinEH::FrameInfo>> WinFrameInfos;

  WinEH::FrameInfo *CurrentWinFrameInfo;
  size_t CurrentProcWinFrameInfoStartIndex;

  /// Tracks an index to represent the order a symbol was emitted in.
  /// Zero means we did not emit that symbol.
  DenseMap<const MCSymbol *, unsigned> SymbolOrdering;

  /// This is stack of current and previous section values saved by
  /// pushSection.
  SmallVector<std::pair<MCSectionSubPair, MCSectionSubPair>, 4> SectionStack;

  /// Pointer to the parser's SMLoc if available. This is used to provide
  /// locations for diagnostics.
  const SMLoc *StartTokLocPtr = nullptr;

  /// The next unique ID to use when creating a WinCFI-related section (.pdata
  /// or .xdata). This ID ensures that we have a one-to-one mapping from
  /// code section to unwind info section, which MSVC's incremental linker
  /// requires.
  unsigned NextWinCFIID = 0;

  bool UseAssemblerInfoForParsing;

  /// Is the assembler allowed to insert padding automatically?  For
  /// correctness reasons, we sometimes need to ensure instructions aren't
  /// separated in unexpected ways.  At the moment, this feature is only
  /// useable from an integrated assembler, but assembly syntax is under
  /// discussion for future inclusion.
  bool AllowAutoPadding = false;

protected:
  MCStreamer(MCContext &Ctx);

  virtual void emitCFIStartProcImpl(MCDwarfFrameInfo &Frame);
  virtual void emitCFIEndProcImpl(MCDwarfFrameInfo &CurFrame);

  WinEH::FrameInfo *getCurrentWinFrameInfo() {
    return CurrentWinFrameInfo;
  }

  virtual void emitWindowsUnwindTables(WinEH::FrameInfo *Frame);

  virtual void emitWindowsUnwindTables();

  virtual void emitRawTextImpl(StringRef String);

  /// Returns true if the the .cv_loc directive is in the right section.
  bool checkCVLocSection(unsigned FuncId, unsigned FileNo, SMLoc Loc);

public:
  MCStreamer(const MCStreamer &) = delete;
  MCStreamer &operator=(const MCStreamer &) = delete;
  virtual ~MCStreamer();

  void visitUsedExpr(const MCExpr &Expr);
  virtual void visitUsedSymbol(const MCSymbol &Sym);

  void setTargetStreamer(MCTargetStreamer *TS) {
    TargetStreamer.reset(TS);
  }

  void setStartTokLocPtr(const SMLoc *Loc) { StartTokLocPtr = Loc; }
  SMLoc getStartTokLoc() const {
    return StartTokLocPtr ? *StartTokLocPtr : SMLoc();
  }

  /// State management
  ///
  virtual void reset();

  MCContext &getContext() const { return Context; }

  virtual MCAssembler *getAssemblerPtr() { return nullptr; }

  void setUseAssemblerInfoForParsing(bool v) { UseAssemblerInfoForParsing = v; }
  bool getUseAssemblerInfoForParsing() { return UseAssemblerInfoForParsing; }

  MCTargetStreamer *getTargetStreamer() {
    return TargetStreamer.get();
  }

  void setAllowAutoPadding(bool v) { AllowAutoPadding = v; }
  bool getAllowAutoPadding() const { return AllowAutoPadding; }

  /// When emitting an object file, create and emit a real label. When emitting
  /// textual assembly, this should do nothing to avoid polluting our output.
  virtual MCSymbol *emitCFILabel();

  /// Retrieve the current frame info if one is available and it is not yet
  /// closed. Otherwise, issue an error and return null.
  WinEH::FrameInfo *EnsureValidWinFrameInfo(SMLoc Loc);

  unsigned getNumFrameInfos();
  ArrayRef<MCDwarfFrameInfo> getDwarfFrameInfos() const;

  bool hasUnfinishedDwarfFrameInfo();

  unsigned getNumWinFrameInfos() { return WinFrameInfos.size(); }
  ArrayRef<std::unique_ptr<WinEH::FrameInfo>> getWinFrameInfos() const {
    return WinFrameInfos;
  }

  void generateCompactUnwindEncodings(MCAsmBackend *MAB);

  /// \name Assembly File Formatting.
  /// @{

  /// Return true if this streamer supports verbose assembly and if it is
  /// enabled.
  virtual bool isVerboseAsm() const { return false; }

  /// Return true if this asm streamer supports emitting unformatted text
  /// to the .s file with EmitRawText.
  virtual bool hasRawTextSupport() const { return false; }

  /// Is the integrated assembler required for this streamer to function
  /// correctly?
  virtual bool isIntegratedAssemblerRequired() const { return false; }

  /// Add a textual comment.
  ///
  /// Typically for comments that can be emitted to the generated .s
  /// file if applicable as a QoI issue to make the output of the compiler
  /// more readable.  This only affects the MCAsmStreamer, and only when
  /// verbose assembly output is enabled.
  ///
  /// If the comment includes embedded \n's, they will each get the comment
  /// prefix as appropriate.  The added comment should not end with a \n.
  /// By default, each comment is terminated with an end of line, i.e. the
  /// EOL param is set to true by default. If one prefers not to end the
  /// comment with a new line then the EOL param should be passed
  /// with a false value.
  virtual void AddComment(const Twine &T, bool EOL = true) {}

  /// Return a raw_ostream that comments can be written to. Unlike
  /// AddComment, you are required to terminate comments with \n if you use this
  /// method.
  virtual raw_ostream &getCommentOS();

  /// Print T and prefix it with the comment string (normally #) and
  /// optionally a tab. This prints the comment immediately, not at the end of
  /// the current line. It is basically a safe version of EmitRawText: since it
  /// only prints comments, the object streamer ignores it instead of asserting.
  virtual void emitRawComment(const Twine &T, bool TabPrefix = true);

  /// Add explicit comment T. T is required to be a valid
  /// comment in the output and does not need to be escaped.
  virtual void addExplicitComment(const Twine &T);

  /// Emit added explicit comments.
  virtual void emitExplicitComments();

  /// Emit a blank line to a .s file to pretty it up.
  virtual void addBlankLine() {}

  /// @}

  /// \name Symbol & Section Management
  /// @{

  /// Return the current section that the streamer is emitting code to.
  MCSectionSubPair getCurrentSection() const {
    if (!SectionStack.empty())
      return SectionStack.back().first;
    return MCSectionSubPair();
  }
  MCSection *getCurrentSectionOnly() const { return getCurrentSection().first; }

  /// Return the previous section that the streamer is emitting code to.
  MCSectionSubPair getPreviousSection() const {
    if (!SectionStack.empty())
      return SectionStack.back().second;
    return MCSectionSubPair();
  }

  /// Returns an index to represent the order a symbol was emitted in.
  /// (zero if we did not emit that symbol)
  unsigned getSymbolOrder(const MCSymbol *Sym) const {
    return SymbolOrdering.lookup(Sym);
  }

  /// Update streamer for a new active section.
  ///
  /// This is called by popSection and switchSection, if the current
  /// section changes.
  virtual void changeSection(MCSection *, const MCExpr *);

  /// Save the current and previous section on the section stack.
  void pushSection() {
    SectionStack.push_back(
        std::make_pair(getCurrentSection(), getPreviousSection()));
  }

  /// Restore the current and previous section from the section stack.
  /// Calls changeSection as needed.
  ///
  /// Returns false if the stack was empty.
  bool popSection() {
    if (SectionStack.size() <= 1)
      return false;
    auto I = SectionStack.end();
    --I;
    MCSectionSubPair OldSection = I->first;
    --I;
    MCSectionSubPair NewSection = I->first;

    if (NewSection.first && OldSection != NewSection)
      changeSection(NewSection.first, NewSection.second);
    SectionStack.pop_back();
    return true;
  }

  bool subSection(const MCExpr *Subsection) {
    if (SectionStack.empty())
      return false;

    switchSection(SectionStack.back().first.first, Subsection);
    return true;
  }

  /// Set the current section where code is being emitted to \p Section.  This
  /// is required to update CurSection.
  ///
  /// This corresponds to assembler directives like .section, .text, etc.
  virtual void switchSection(MCSection *Section,
                             const MCExpr *Subsection = nullptr);

  /// Set the current section where code is being emitted to \p Section.
  /// This is required to update CurSection. This version does not call
  /// changeSection.
  void switchSectionNoChange(MCSection *Section,
                             const MCExpr *Subsection = nullptr) {
    assert(Section && "Cannot switch to a null section!");
    MCSectionSubPair curSection = SectionStack.back().first;
    SectionStack.back().second = curSection;
    if (MCSectionSubPair(Section, Subsection) != curSection)
      SectionStack.back().first = MCSectionSubPair(Section, Subsection);
  }

  /// Create the default sections and set the initial one.
  virtual void initSections(bool NoExecStack, const MCSubtargetInfo &STI);

  MCSymbol *endSection(MCSection *Section);

  /// Sets the symbol's section.
  ///
  /// Each emitted symbol will be tracked in the ordering table,
  /// so we can sort on them later.
  void assignFragment(MCSymbol *Symbol, MCFragment *Fragment);

  /// Returns the mnemonic for \p MI, if the streamer has access to a
  /// instruction printer and returns an empty string otherwise.
  virtual StringRef getMnemonic(MCInst &MI) { return ""; }

  /// Emit a label for \p Symbol into the current section.
  ///
  /// This corresponds to an assembler statement such as:
  ///   foo:
  ///
  /// \param Symbol - The symbol to emit. A given symbol should only be
  /// emitted as a label once, and symbols emitted as a label should never be
  /// used in an assignment.
  // FIXME: These emission are non-const because we mutate the symbol to
  // add the section we're emitting it to later.
  virtual void emitLabel(MCSymbol *Symbol, SMLoc Loc = SMLoc());

  virtual void emitEHSymAttributes(const MCSymbol *Symbol, MCSymbol *EHSymbol);

  /// Note in the output the specified \p Flag.
  virtual void emitAssemblerFlag(MCAssemblerFlag Flag);

  /// Emit the given list \p Options of strings as linker
  /// options into the output.
  virtual void emitLinkerOptions(ArrayRef<std::string> Kind) {}

  /// Note in the output the specified region \p Kind.
  virtual void emitDataRegion(MCDataRegionType Kind) {}

  /// Specify the Mach-O minimum deployment target version.
  virtual void emitVersionMin(MCVersionMinType Type, unsigned Major,
                              unsigned Minor, unsigned Update,
                              VersionTuple SDKVersion) {}

  /// Emit/Specify Mach-O build version command.
  /// \p Platform should be one of MachO::PlatformType.
  virtual void emitBuildVersion(unsigned Platform, unsigned Major,
                                unsigned Minor, unsigned Update,
                                VersionTuple SDKVersion) {}

  virtual void emitDarwinTargetVariantBuildVersion(unsigned Platform,
                                                   unsigned Major,
                                                   unsigned Minor,
                                                   unsigned Update,
                                                   VersionTuple SDKVersion) {}

  void emitVersionForTarget(const Triple &Target,
                            const VersionTuple &SDKVersion,
                            const Triple *DarwinTargetVariantTriple,
                            const VersionTuple &DarwinTargetVariantSDKVersion);

  /// Note in the output that the specified \p Func is a Thumb mode
  /// function (ARM target only).
  virtual void emitThumbFunc(MCSymbol *Func);

  /// Emit an assignment of \p Value to \p Symbol.
  ///
  /// This corresponds to an assembler statement such as:
  ///  symbol = value
  ///
  /// The assignment generates no code, but has the side effect of binding the
  /// value in the current context. For the assembly streamer, this prints the
  /// binding into the .s file.
  ///
  /// \param Symbol - The symbol being assigned to.
  /// \param Value - The value for the symbol.
  virtual void emitAssignment(MCSymbol *Symbol, const MCExpr *Value);

  /// Emit an assignment of \p Value to \p Symbol, but only if \p Value is also
  /// emitted.
  virtual void emitConditionalAssignment(MCSymbol *Symbol, const MCExpr *Value);

  /// Emit an weak reference from \p Alias to \p Symbol.
  ///
  /// This corresponds to an assembler statement such as:
  ///  .weakref alias, symbol
  ///
  /// \param Alias - The alias that is being created.
  /// \param Symbol - The symbol being aliased.
  virtual void emitWeakReference(MCSymbol *Alias, const MCSymbol *Symbol);

  /// Add the given \p Attribute to \p Symbol.
  virtual bool emitSymbolAttribute(MCSymbol *Symbol,
                                   MCSymbolAttr Attribute) = 0;

  /// Set the \p DescValue for the \p Symbol.
  ///
  /// \param Symbol - The symbol to have its n_desc field set.
  /// \param DescValue - The value to set into the n_desc field.
  virtual void emitSymbolDesc(MCSymbol *Symbol, unsigned DescValue);

  /// Start emitting COFF symbol definition
  ///
  /// \param Symbol - The symbol to have its External & Type fields set.
  virtual void beginCOFFSymbolDef(const MCSymbol *Symbol);

  /// Emit the storage class of the symbol.
  ///
  /// \param StorageClass - The storage class the symbol should have.
  virtual void emitCOFFSymbolStorageClass(int StorageClass);

  /// Emit the type of the symbol.
  ///
  /// \param Type - A COFF type identifier (see COFF::SymbolType in X86COFF.h)
  virtual void emitCOFFSymbolType(int Type);

  /// Marks the end of the symbol definition.
  virtual void endCOFFSymbolDef();

  virtual void emitCOFFSafeSEH(MCSymbol const *Symbol);

  /// Emits the symbol table index of a Symbol into the current section.
  virtual void emitCOFFSymbolIndex(MCSymbol const *Symbol);

  /// Emits a COFF section index.
  ///
  /// \param Symbol - Symbol the section number relocation should point to.
  virtual void emitCOFFSectionIndex(MCSymbol const *Symbol);

  /// Emits a COFF section relative relocation.
  ///
  /// \param Symbol - Symbol the section relative relocation should point to.
  virtual void emitCOFFSecRel32(MCSymbol const *Symbol, uint64_t Offset);

  /// Emits a COFF image relative relocation.
  ///
  /// \param Symbol - Symbol the image relative relocation should point to.
  virtual void emitCOFFImgRel32(MCSymbol const *Symbol, int64_t Offset);

  /// Emits an lcomm directive with XCOFF csect information.
  ///
  /// \param LabelSym - Label on the block of storage.
  /// \param Size - The size of the block of storage.
  /// \param CsectSym - Csect name for the block of storage.
  /// \param Alignment - The alignment of the symbol in bytes.
  virtual void emitXCOFFLocalCommonSymbol(MCSymbol *LabelSym, uint64_t Size,
                                          MCSymbol *CsectSym, Align Alignment);

  /// Emit a symbol's linkage and visibility with a linkage directive for XCOFF.
  ///
  /// \param Symbol - The symbol to emit.
  /// \param Linkage - The linkage of the symbol to emit.
  /// \param Visibility - The visibility of the symbol to emit or MCSA_Invalid
  /// if the symbol does not have an explicit visibility.
  virtual void emitXCOFFSymbolLinkageWithVisibility(MCSymbol *Symbol,
                                                    MCSymbolAttr Linkage,
                                                    MCSymbolAttr Visibility);

  /// Emit a XCOFF .rename directive which creates a synonym for an illegal or
  /// undesirable name.
  ///
  /// \param Name - The name used internally in the assembly for references to
  /// the symbol.
  /// \param Rename - The value to which the Name parameter is
  /// changed at the end of assembly.
  virtual void emitXCOFFRenameDirective(const MCSymbol *Name, StringRef Rename);

  /// Emit an XCOFF .except directive which adds information about
  /// a trap instruction to the object file exception section
  ///
  /// \param Symbol - The function containing the trap.
  /// \param Lang - The language code for the exception entry.
  /// \param Reason - The reason code for the exception entry.
  virtual void emitXCOFFExceptDirective(const MCSymbol *Symbol,
                                        const MCSymbol *Trap,
                                        unsigned Lang, unsigned Reason,
                                        unsigned FunctionSize, bool hasDebug);

  /// Emit a XCOFF .ref directive which creates R_REF type entry in the
  /// relocation table for one or more symbols.
  ///
  /// \param Sym - The symbol on the .ref directive.
  virtual void emitXCOFFRefDirective(const MCSymbol *Symbol);

  /// Emit a C_INFO symbol with XCOFF embedded metadata to the .info section.
  ///
  /// \param Name - The embedded metadata name
  /// \param Metadata - The embedded metadata
  virtual void emitXCOFFCInfoSym(StringRef Name, StringRef Metadata);

  /// Emit an ELF .size directive.
  ///
  /// This corresponds to an assembler statement such as:
  ///  .size symbol, expression
  virtual void emitELFSize(MCSymbol *Symbol, const MCExpr *Value);

  /// Emit an ELF .symver directive.
  ///
  /// This corresponds to an assembler statement such as:
  ///  .symver _start, foo@@SOME_VERSION
  virtual void emitELFSymverDirective(const MCSymbol *OriginalSym,
                                      StringRef Name, bool KeepOriginalSym);

  /// Emit a Linker Optimization Hint (LOH) directive.
  /// \param Args - Arguments of the LOH.
  virtual void emitLOHDirective(MCLOHType Kind, const MCLOHArgs &Args) {}

  /// Emit a .gnu_attribute directive.
  virtual void emitGNUAttribute(unsigned Tag, unsigned Value) {}

  /// Emit a common symbol.
  ///
  /// \param Symbol - The common symbol to emit.
  /// \param Size - The size of the common symbol.
  /// \param ByteAlignment - The alignment of the symbol.
  virtual void emitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
                                Align ByteAlignment) = 0;

  /// Emit a local common (.lcomm) symbol.
  ///
  /// \param Symbol - The common symbol to emit.
  /// \param Size - The size of the common symbol.
  /// \param ByteAlignment - The alignment of the common symbol in bytes.
  virtual void emitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size,
                                     Align ByteAlignment);

  /// Emit the zerofill section and an optional symbol.
  ///
  /// \param Section - The zerofill section to create and or to put the symbol
  /// \param Symbol - The zerofill symbol to emit, if non-NULL.
  /// \param Size - The size of the zerofill symbol.
  /// \param ByteAlignment - The alignment of the zerofill symbol.
  virtual void emitZerofill(MCSection *Section, MCSymbol *Symbol = nullptr,
                            uint64_t Size = 0, Align ByteAlignment = Align(1),
                            SMLoc Loc = SMLoc()) = 0;

  /// Emit a thread local bss (.tbss) symbol.
  ///
  /// \param Section - The thread local common section.
  /// \param Symbol - The thread local common symbol to emit.
  /// \param Size - The size of the symbol.
  /// \param ByteAlignment - The alignment of the thread local common symbol.
  virtual void emitTBSSSymbol(MCSection *Section, MCSymbol *Symbol,
                              uint64_t Size, Align ByteAlignment = Align(1));

  /// @}
  /// \name Generating Data
  /// @{

  /// Emit the bytes in \p Data into the output.
  ///
  /// This is used to implement assembler directives such as .byte, .ascii,
  /// etc.
  virtual void emitBytes(StringRef Data);

  /// Functionally identical to EmitBytes. When emitting textual assembly, this
  /// method uses .byte directives instead of .ascii or .asciz for readability.
  virtual void emitBinaryData(StringRef Data);

  /// Emit the expression \p Value into the output as a native
  /// integer of the given \p Size bytes.
  ///
  /// This is used to implement assembler directives such as .word, .quad,
  /// etc.
  ///
  /// \param Value - The value to emit.
  /// \param Size - The size of the integer (in bytes) to emit. This must
  /// match a native machine width.
  /// \param Loc - The location of the expression for error reporting.
  virtual void emitValueImpl(const MCExpr *Value, unsigned Size,
                             SMLoc Loc = SMLoc());

  void emitValue(const MCExpr *Value, unsigned Size, SMLoc Loc = SMLoc());

  /// Special case of EmitValue that avoids the client having
  /// to pass in a MCExpr for constant integers.
  virtual void emitIntValue(uint64_t Value, unsigned Size);
  virtual void emitIntValue(APInt Value);

  /// Special case of EmitValue that avoids the client having to pass
  /// in a MCExpr for constant integers & prints in Hex format for certain
  /// modes.
  virtual void emitIntValueInHex(uint64_t Value, unsigned Size) {
    emitIntValue(Value, Size);
  }

  void emitInt8(uint64_t Value) { emitIntValue(Value, 1); }
  void emitInt16(uint64_t Value) { emitIntValue(Value, 2); }
  void emitInt32(uint64_t Value) { emitIntValue(Value, 4); }
  void emitInt64(uint64_t Value) { emitIntValue(Value, 8); }

  /// Special case of EmitValue that avoids the client having to pass
  /// in a MCExpr for constant integers & prints in Hex format for certain
  /// modes, pads the field with leading zeros to Size width
  virtual void emitIntValueInHexWithPadding(uint64_t Value, unsigned Size) {
    emitIntValue(Value, Size);
  }

  virtual void emitULEB128Value(const MCExpr *Value);

  virtual void emitSLEB128Value(const MCExpr *Value);

  /// Special case of EmitULEB128Value that avoids the client having to
  /// pass in a MCExpr for constant integers.
  unsigned emitULEB128IntValue(uint64_t Value, unsigned PadTo = 0);

  /// Special case of EmitSLEB128Value that avoids the client having to
  /// pass in a MCExpr for constant integers.
  unsigned emitSLEB128IntValue(int64_t Value);

  /// Special case of EmitValue that avoids the client having to pass in
  /// a MCExpr for MCSymbols.
  void emitSymbolValue(const MCSymbol *Sym, unsigned Size,
                       bool IsSectionRelative = false);

  /// Emit the expression \p Value into the output as a dtprel
  /// (64-bit DTP relative) value.
  ///
  /// This is used to implement assembler directives such as .dtpreldword on
  /// targets that support them.
  virtual void emitDTPRel64Value(const MCExpr *Value);

  /// Emit the expression \p Value into the output as a dtprel
  /// (32-bit DTP relative) value.
  ///
  /// This is used to implement assembler directives such as .dtprelword on
  /// targets that support them.
  virtual void emitDTPRel32Value(const MCExpr *Value);

  /// Emit the expression \p Value into the output as a tprel
  /// (64-bit TP relative) value.
  ///
  /// This is used to implement assembler directives such as .tpreldword on
  /// targets that support them.
  virtual void emitTPRel64Value(const MCExpr *Value);

  /// Emit the expression \p Value into the output as a tprel
  /// (32-bit TP relative) value.
  ///
  /// This is used to implement assembler directives such as .tprelword on
  /// targets that support them.
  virtual void emitTPRel32Value(const MCExpr *Value);

  /// Emit the expression \p Value into the output as a gprel64 (64-bit
  /// GP relative) value.
  ///
  /// This is used to implement assembler directives such as .gpdword on
  /// targets that support them.
  virtual void emitGPRel64Value(const MCExpr *Value);

  /// Emit the expression \p Value into the output as a gprel32 (32-bit
  /// GP relative) value.
  ///
  /// This is used to implement assembler directives such as .gprel32 on
  /// targets that support them.
  virtual void emitGPRel32Value(const MCExpr *Value);

  /// Emit NumBytes bytes worth of the value specified by FillValue.
  /// This implements directives such as '.space'.
  void emitFill(uint64_t NumBytes, uint8_t FillValue);

  /// Emit \p Size bytes worth of the value specified by \p FillValue.
  ///
  /// This is used to implement assembler directives such as .space or .skip.
  ///
  /// \param NumBytes - The number of bytes to emit.
  /// \param FillValue - The value to use when filling bytes.
  /// \param Loc - The location of the expression for error reporting.
  virtual void emitFill(const MCExpr &NumBytes, uint64_t FillValue,
                        SMLoc Loc = SMLoc());

  /// Emit \p NumValues copies of \p Size bytes. Each \p Size bytes is
  /// taken from the lowest order 4 bytes of \p Expr expression.
  ///
  /// This is used to implement assembler directives such as .fill.
  ///
  /// \param NumValues - The number of copies of \p Size bytes to emit.
  /// \param Size - The size (in bytes) of each repeated value.
  /// \param Expr - The expression from which \p Size bytes are used.
  virtual void emitFill(const MCExpr &NumValues, int64_t Size, int64_t Expr,
                        SMLoc Loc = SMLoc());

  virtual void emitNops(int64_t NumBytes, int64_t ControlledNopLength,
                        SMLoc Loc, const MCSubtargetInfo& STI);

  /// Emit NumBytes worth of zeros.
  /// This function properly handles data in virtual sections.
  void emitZeros(uint64_t NumBytes);

  /// Emit some number of copies of \p Value until the byte alignment \p
  /// ByteAlignment is reached.
  ///
  /// If the number of bytes need to emit for the alignment is not a multiple
  /// of \p ValueSize, then the contents of the emitted fill bytes is
  /// undefined.
  ///
  /// This used to implement the .align assembler directive.
  ///
  /// \param Alignment - The alignment to reach.
  /// \param Value - The value to use when filling bytes.
  /// \param ValueSize - The size of the integer (in bytes) to emit for
  /// \p Value. This must match a native machine width.
  /// \param MaxBytesToEmit - The maximum numbers of bytes to emit, or 0. If
  /// the alignment cannot be reached in this many bytes, no bytes are
  /// emitted.
  virtual void emitValueToAlignment(Align Alignment, int64_t Value = 0,
                                    unsigned ValueSize = 1,
                                    unsigned MaxBytesToEmit = 0);

  /// Emit nops until the byte alignment \p ByteAlignment is reached.
  ///
  /// This used to align code where the alignment bytes may be executed.  This
  /// can emit different bytes for different sizes to optimize execution.
  ///
  /// \param Alignment - The alignment to reach.
  /// \param STI - The MCSubtargetInfo in operation when padding is emitted.
  /// \param MaxBytesToEmit - The maximum numbers of bytes to emit, or 0. If
  /// the alignment cannot be reached in this many bytes, no bytes are
  /// emitted.
  virtual void emitCodeAlignment(Align Alignment, const MCSubtargetInfo *STI,
                                 unsigned MaxBytesToEmit = 0);

  /// Emit some number of copies of \p Value until the byte offset \p
  /// Offset is reached.
  ///
  /// This is used to implement assembler directives such as .org.
  ///
  /// \param Offset - The offset to reach. This may be an expression, but the
  /// expression must be associated with the current section.
  /// \param Value - The value to use when filling bytes.
  virtual void emitValueToOffset(const MCExpr *Offset, unsigned char Value,
                                 SMLoc Loc);

  /// @}

  /// Switch to a new logical file.  This is used to implement the '.file
  /// "foo.c"' assembler directive.
  virtual void emitFileDirective(StringRef Filename);

  /// Emit ".file assembler diretive with additioal info.
  virtual void emitFileDirective(StringRef Filename, StringRef CompilerVerion,
                                 StringRef TimeStamp, StringRef Description);

  /// Emit the "identifiers" directive.  This implements the
  /// '.ident "version foo"' assembler directive.
  virtual void emitIdent(StringRef IdentString) {}

  /// Associate a filename with a specified logical file number.  This
  /// implements the DWARF2 '.file 4 "foo.c"' assembler directive.
  unsigned emitDwarfFileDirective(
      unsigned FileNo, StringRef Directory, StringRef Filename,
      std::optional<MD5::MD5Result> Checksum = std::nullopt,
      std::optional<StringRef> Source = std::nullopt, unsigned CUID = 0) {
    return cantFail(
        tryEmitDwarfFileDirective(FileNo, Directory, Filename, Checksum,
                                  Source, CUID));
  }

  /// Associate a filename with a specified logical file number.
  /// Also associate a directory, optional checksum, and optional source
  /// text with the logical file.  This implements the DWARF2
  /// '.file 4 "dir/foo.c"' assembler directive, and the DWARF5
  /// '.file 4 "dir/foo.c" md5 "..." source "..."' assembler directive.
  virtual Expected<unsigned> tryEmitDwarfFileDirective(
      unsigned FileNo, StringRef Directory, StringRef Filename,
      std::optional<MD5::MD5Result> Checksum = std::nullopt,
      std::optional<StringRef> Source = std::nullopt, unsigned CUID = 0);

  /// Specify the "root" file of the compilation, using the ".file 0" extension.
  virtual void emitDwarfFile0Directive(StringRef Directory, StringRef Filename,
                                       std::optional<MD5::MD5Result> Checksum,
                                       std::optional<StringRef> Source,
                                       unsigned CUID = 0);

  virtual void emitCFIBKeyFrame();
  virtual void emitCFIMTETaggedFrame();

  /// This implements the DWARF2 '.loc fileno lineno ...' assembler
  /// directive.
  virtual void emitDwarfLocDirective(unsigned FileNo, unsigned Line,
                                     unsigned Column, unsigned Flags,
                                     unsigned Isa, unsigned Discriminator,
                                     StringRef FileName);

  /// Associate a filename with a specified logical file number, and also
  /// specify that file's checksum information.  This implements the '.cv_file 4
  /// "foo.c"' assembler directive. Returns true on success.
  virtual bool emitCVFileDirective(unsigned FileNo, StringRef Filename,
                                   ArrayRef<uint8_t> Checksum,
                                   unsigned ChecksumKind);

  /// Introduces a function id for use with .cv_loc.
  virtual bool emitCVFuncIdDirective(unsigned FunctionId);

  /// Introduces an inline call site id for use with .cv_loc. Includes
  /// extra information for inline line table generation.
  virtual bool emitCVInlineSiteIdDirective(unsigned FunctionId, unsigned IAFunc,
                                           unsigned IAFile, unsigned IALine,
                                           unsigned IACol, SMLoc Loc);

  /// This implements the CodeView '.cv_loc' assembler directive.
  virtual void emitCVLocDirective(unsigned FunctionId, unsigned FileNo,
                                  unsigned Line, unsigned Column,
                                  bool PrologueEnd, bool IsStmt,
                                  StringRef FileName, SMLoc Loc);

  /// This implements the CodeView '.cv_linetable' assembler directive.
  virtual void emitCVLinetableDirective(unsigned FunctionId,
                                        const MCSymbol *FnStart,
                                        const MCSymbol *FnEnd);

  /// This implements the CodeView '.cv_inline_linetable' assembler
  /// directive.
  virtual void emitCVInlineLinetableDirective(unsigned PrimaryFunctionId,
                                              unsigned SourceFileId,
                                              unsigned SourceLineNum,
                                              const MCSymbol *FnStartSym,
                                              const MCSymbol *FnEndSym);

  /// This implements the CodeView '.cv_def_range' assembler
  /// directive.
  virtual void emitCVDefRangeDirective(
      ArrayRef<std::pair<const MCSymbol *, const MCSymbol *>> Ranges,
      StringRef FixedSizePortion);

  virtual void emitCVDefRangeDirective(
      ArrayRef<std::pair<const MCSymbol *, const MCSymbol *>> Ranges,
      codeview::DefRangeRegisterRelHeader DRHdr);

  virtual void emitCVDefRangeDirective(
      ArrayRef<std::pair<const MCSymbol *, const MCSymbol *>> Ranges,
      codeview::DefRangeSubfieldRegisterHeader DRHdr);

  virtual void emitCVDefRangeDirective(
      ArrayRef<std::pair<const MCSymbol *, const MCSymbol *>> Ranges,
      codeview::DefRangeRegisterHeader DRHdr);

  virtual void emitCVDefRangeDirective(
      ArrayRef<std::pair<const MCSymbol *, const MCSymbol *>> Ranges,
      codeview::DefRangeFramePointerRelHeader DRHdr);

  /// This implements the CodeView '.cv_stringtable' assembler directive.
  virtual void emitCVStringTableDirective() {}

  /// This implements the CodeView '.cv_filechecksums' assembler directive.
  virtual void emitCVFileChecksumsDirective() {}

  /// This implements the CodeView '.cv_filechecksumoffset' assembler
  /// directive.
  virtual void emitCVFileChecksumOffsetDirective(unsigned FileNo) {}

  /// This implements the CodeView '.cv_fpo_data' assembler directive.
  virtual void emitCVFPOData(const MCSymbol *ProcSym, SMLoc Loc = {}) {}

  /// Emit the absolute difference between two symbols.
  ///
  /// \pre Offset of \c Hi is greater than the offset \c Lo.
  virtual void emitAbsoluteSymbolDiff(const MCSymbol *Hi, const MCSymbol *Lo,
                                      unsigned Size);

  /// Emit the absolute difference between two symbols encoded with ULEB128.
  virtual void emitAbsoluteSymbolDiffAsULEB128(const MCSymbol *Hi,
                                               const MCSymbol *Lo);

  virtual MCSymbol *getDwarfLineTableSymbol(unsigned CUID);
  virtual void emitCFISections(bool EH, bool Debug);
  void emitCFIStartProc(bool IsSimple, SMLoc Loc = SMLoc());
  void emitCFIEndProc();
  virtual void emitCFIDefCfa(int64_t Register, int64_t Offset, SMLoc Loc = {});
  virtual void emitCFIDefCfaOffset(int64_t Offset, SMLoc Loc = {});
  virtual void emitCFIDefCfaRegister(int64_t Register, SMLoc Loc = {});
  virtual void emitCFILLVMDefAspaceCfa(int64_t Register, int64_t Offset,
                                       int64_t AddressSpace, SMLoc Loc = {});
  virtual void emitCFIOffset(int64_t Register, int64_t Offset, SMLoc Loc = {});
  virtual void emitCFIPersonality(const MCSymbol *Sym, unsigned Encoding);
  virtual void emitCFILsda(const MCSymbol *Sym, unsigned Encoding);
  virtual void emitCFIRememberState(SMLoc Loc);
  virtual void emitCFIRestoreState(SMLoc Loc);
  virtual void emitCFISameValue(int64_t Register, SMLoc Loc = {});
  virtual void emitCFIRestore(int64_t Register, SMLoc Loc = {});
  virtual void emitCFIRelOffset(int64_t Register, int64_t Offset, SMLoc Loc);
  virtual void emitCFIAdjustCfaOffset(int64_t Adjustment, SMLoc Loc = {});
  virtual void emitCFIEscape(StringRef Values, SMLoc Loc = {});
  virtual void emitCFIReturnColumn(int64_t Register);
  virtual void emitCFIGnuArgsSize(int64_t Size, SMLoc Loc = {});
  virtual void emitCFISignalFrame();
  virtual void emitCFIUndefined(int64_t Register, SMLoc Loc = {});
  virtual void emitCFIRegister(int64_t Register1, int64_t Register2,
                               SMLoc Loc = {});
  virtual void emitCFIWindowSave(SMLoc Loc = {});
  virtual void emitCFINegateRAState(SMLoc Loc = {});

  virtual void emitWinCFIStartProc(const MCSymbol *Symbol, SMLoc Loc = SMLoc());
  virtual void emitWinCFIEndProc(SMLoc Loc = SMLoc());
  /// This is used on platforms, such as Windows on ARM64, that require function
  /// or funclet sizes to be emitted in .xdata before the End marker is emitted
  /// for the frame.  We cannot use the End marker, as it is not set at the
  /// point of emitting .xdata, in order to indicate that the frame is active.
  virtual void emitWinCFIFuncletOrFuncEnd(SMLoc Loc = SMLoc());
  virtual void emitWinCFIStartChained(SMLoc Loc = SMLoc());
  virtual void emitWinCFIEndChained(SMLoc Loc = SMLoc());
  virtual void emitWinCFIPushReg(MCRegister Register, SMLoc Loc = SMLoc());
  virtual void emitWinCFISetFrame(MCRegister Register, unsigned Offset,
                                  SMLoc Loc = SMLoc());
  virtual void emitWinCFIAllocStack(unsigned Size, SMLoc Loc = SMLoc());
  virtual void emitWinCFISaveReg(MCRegister Register, unsigned Offset,
                                 SMLoc Loc = SMLoc());
  virtual void emitWinCFISaveXMM(MCRegister Register, unsigned Offset,
                                 SMLoc Loc = SMLoc());
  virtual void emitWinCFIPushFrame(bool Code, SMLoc Loc = SMLoc());
  virtual void emitWinCFIEndProlog(SMLoc Loc = SMLoc());
  virtual void emitWinEHHandler(const MCSymbol *Sym, bool Unwind, bool Except,
                                SMLoc Loc = SMLoc());
  virtual void emitWinEHHandlerData(SMLoc Loc = SMLoc());

  virtual void emitCGProfileEntry(const MCSymbolRefExpr *From,
                                  const MCSymbolRefExpr *To, uint64_t Count);

  /// Get the .pdata section used for the given section. Typically the given
  /// section is either the main .text section or some other COMDAT .text
  /// section, but it may be any section containing code.
  MCSection *getAssociatedPDataSection(const MCSection *TextSec);

  /// Get the .xdata section used for the given section.
  MCSection *getAssociatedXDataSection(const MCSection *TextSec);

  virtual void emitSyntaxDirective();

  /// Record a relocation described by the .reloc directive. Return std::nullopt
  /// if succeeded. Otherwise, return a pair (Name is invalid, error message).
  virtual std::optional<std::pair<bool, std::string>>
  emitRelocDirective(const MCExpr &Offset, StringRef Name, const MCExpr *Expr,
                     SMLoc Loc, const MCSubtargetInfo &STI) {
    return std::nullopt;
  }

  virtual void emitAddrsig() {}
  virtual void emitAddrsigSym(const MCSymbol *Sym) {}

  /// Emit the given \p Instruction into the current section.
  virtual void emitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI);

  /// Emit the a pseudo probe into the current section.
  virtual void emitPseudoProbe(uint64_t Guid, uint64_t Index, uint64_t Type,
                               uint64_t Attr, uint64_t Discriminator,
                               const MCPseudoProbeInlineStack &InlineStack,
                               MCSymbol *FnSym);

  /// Set the bundle alignment mode from now on in the section.
  /// The value 1 means turn the bundle alignment off.
  virtual void emitBundleAlignMode(Align Alignment);

  /// The following instructions are a bundle-locked group.
  ///
  /// \param AlignToEnd - If true, the bundle-locked group will be aligned to
  ///                     the end of a bundle.
  virtual void emitBundleLock(bool AlignToEnd);

  /// Ends a bundle-locked group.
  virtual void emitBundleUnlock();

  /// If this file is backed by a assembly streamer, this dumps the
  /// specified string in the output .s file.  This capability is indicated by
  /// the hasRawTextSupport() predicate.  By default this aborts.
  void emitRawText(const Twine &String);

  /// Streamer specific finalization.
  virtual void finishImpl();
  /// Finish emission of machine code.
  void finish(SMLoc EndLoc = SMLoc());

  virtual bool mayHaveInstructions(MCSection &Sec) const { return true; }

  /// Emit a special value of 0xffffffff if producing 64-bit debugging info.
  void maybeEmitDwarf64Mark();

  /// Emit a unit length field. The actual format, DWARF32 or DWARF64, is chosen
  /// according to the settings.
  virtual void emitDwarfUnitLength(uint64_t Length, const Twine &Comment);

  /// Emit a unit length field. The actual format, DWARF32 or DWARF64, is chosen
  /// according to the settings.
  /// Return the end symbol generated inside, the caller needs to emit it.
  virtual MCSymbol *emitDwarfUnitLength(const Twine &Prefix,
                                        const Twine &Comment);

  /// Emit the debug line start label.
  virtual void emitDwarfLineStartLabel(MCSymbol *StartSym);

  /// Emit the debug line end entry.
  virtual void emitDwarfLineEndEntry(MCSection *Section, MCSymbol *LastLabel) {}

  /// If targets does not support representing debug line section by .loc/.file
  /// directives in assembly output, we need to populate debug line section with
  /// raw debug line contents.
  virtual void emitDwarfAdvanceLineAddr(int64_t LineDelta,
                                        const MCSymbol *LastLabel,
                                        const MCSymbol *Label,
                                        unsigned PointerSize) {}

  /// Do finalization for the streamer at the end of a section.
  virtual void doFinalizationAtSectionEnd(MCSection *Section) {}
};

/// Create a dummy machine code streamer, which does nothing. This is useful for
/// timing the assembler front end.
MCStreamer *createNullStreamer(MCContext &Ctx);

} // end namespace llvm

#endif // LLVM_MC_MCSTREAMER_H
PKjwFZ5	�MC/MCSectionSPIRV.hnu�[���//===- MCSectionSPIRV.h - SPIR-V Machine Code Sections ----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the MCSectionSPIRV class.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCSECTIONSPIRV_H
#define LLVM_MC_MCSECTIONSPIRV_H

#include "llvm/MC/MCSection.h"
#include "llvm/MC/SectionKind.h"

namespace llvm {

class MCSymbol;

class MCSectionSPIRV final : public MCSection {
  friend class MCContext;

  MCSectionSPIRV(SectionKind K, MCSymbol *Begin)
      : MCSection(SV_SPIRV, "", K, Begin) {}
  // TODO: Add StringRef Name to MCSectionSPIRV.

public:
  ~MCSectionSPIRV() = default;
  void printSwitchToSection(const MCAsmInfo &MAI, const Triple &T,
                            raw_ostream &OS,
                            const MCExpr *Subsection) const override {}
  bool useCodeAlign() const override { return false; }
  bool isVirtualSection() const override { return false; }
};

} // end namespace llvm

#endif // LLVM_MC_MCSECTIONSPIRV_H
PKjwFZ���EMC/MCELFStreamer.hnu�[���//===- MCELFStreamer.h - MCStreamer ELF Object File Interface ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCELFSTREAMER_H
#define LLVM_MC_MCELFSTREAMER_H

#include "llvm/ADT/SmallVector.h"
#include "llvm/MC/MCDirectives.h"
#include "llvm/MC/MCObjectStreamer.h"

namespace llvm {

class MCContext;
class MCDataFragment;
class MCFragment;
class MCObjectWriter;
class MCSection;
class MCSubtargetInfo;
class MCSymbol;
class MCSymbolRefExpr;
class MCAsmBackend;
class MCCodeEmitter;
class MCExpr;
class MCInst;

class MCELFStreamer : public MCObjectStreamer {
public:
  MCELFStreamer(MCContext &Context, std::unique_ptr<MCAsmBackend> TAB,
                std::unique_ptr<MCObjectWriter> OW,
                std::unique_ptr<MCCodeEmitter> Emitter);

  ~MCELFStreamer() override = default;

  /// state management
  void reset() override {
    SeenIdent = false;
    BundleGroups.clear();
    MCObjectStreamer::reset();
  }

  /// \name MCStreamer Interface
  /// @{

  void initSections(bool NoExecStack, const MCSubtargetInfo &STI) override;
  void changeSection(MCSection *Section, const MCExpr *Subsection) override;
  void emitLabel(MCSymbol *Symbol, SMLoc Loc = SMLoc()) override;
  void emitLabelAtPos(MCSymbol *Symbol, SMLoc Loc, MCFragment *F,
                      uint64_t Offset) override;
  void emitAssemblerFlag(MCAssemblerFlag Flag) override;
  void emitThumbFunc(MCSymbol *Func) override;
  void emitWeakReference(MCSymbol *Alias, const MCSymbol *Symbol) override;
  bool emitSymbolAttribute(MCSymbol *Symbol, MCSymbolAttr Attribute) override;
  void emitSymbolDesc(MCSymbol *Symbol, unsigned DescValue) override;
  void emitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
                        Align ByteAlignment) override;

  void emitELFSize(MCSymbol *Symbol, const MCExpr *Value) override;
  void emitELFSymverDirective(const MCSymbol *OriginalSym, StringRef Name,
                              bool KeepOriginalSym) override;

  void emitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size,
                             Align ByteAlignment) override;

  void emitZerofill(MCSection *Section, MCSymbol *Symbol = nullptr,
                    uint64_t Size = 0, Align ByteAlignment = Align(1),
                    SMLoc L = SMLoc()) override;
  void emitTBSSSymbol(MCSection *Section, MCSymbol *Symbol, uint64_t Size,
                      Align ByteAlignment = Align(1)) override;
  void emitValueImpl(const MCExpr *Value, unsigned Size,
                     SMLoc Loc = SMLoc()) override;

  void emitIdent(StringRef IdentString) override;

  void emitValueToAlignment(Align, int64_t, unsigned, unsigned) override;

  void emitCGProfileEntry(const MCSymbolRefExpr *From,
                          const MCSymbolRefExpr *To, uint64_t Count) override;

  void finishImpl() override;

  void emitBundleAlignMode(Align Alignment) override;
  void emitBundleLock(bool AlignToEnd) override;
  void emitBundleUnlock() override;

  /// ELF object attributes section emission support
  struct AttributeItem {
    // This structure holds all attributes, accounting for their string /
    // numeric value, so we can later emit them in declaration order, keeping
    // all in the same vector.
    enum {
      HiddenAttribute = 0,
      NumericAttribute,
      TextAttribute,
      NumericAndTextAttributes
    } Type;
    unsigned Tag;
    unsigned IntValue;
    std::string StringValue;
  };

  // Attributes that are added and managed entirely by target.
  SmallVector<AttributeItem, 64> Contents;
  void setAttributeItem(unsigned Attribute, unsigned Value,
                        bool OverwriteExisting);
  void setAttributeItem(unsigned Attribute, StringRef Value,
                        bool OverwriteExisting);
  void setAttributeItems(unsigned Attribute, unsigned IntValue,
                         StringRef StringValue, bool OverwriteExisting);
  void emitAttributesSection(StringRef Vendor, const Twine &Section,
                             unsigned Type, MCSection *&AttributeSection) {
    createAttributesSection(Vendor, Section, Type, AttributeSection, Contents);
  }

private:
  AttributeItem *getAttributeItem(unsigned Attribute);
  size_t calculateContentSize(SmallVector<AttributeItem, 64> &AttrsVec);
  void createAttributesSection(StringRef Vendor, const Twine &Section,
                               unsigned Type, MCSection *&AttributeSection,
                               SmallVector<AttributeItem, 64> &AttrsVec);

  // GNU attributes that will get emitted at the end of the asm file.
  SmallVector<AttributeItem, 64> GNUAttributes;

public:
  void emitGNUAttribute(unsigned Tag, unsigned Value) override {
    AttributeItem Item = {AttributeItem::NumericAttribute, Tag, Value,
                          std::string(StringRef(""))};
    GNUAttributes.push_back(Item);
  }

private:
  bool isBundleLocked() const;
  void emitInstToFragment(const MCInst &Inst, const MCSubtargetInfo &) override;
  void emitInstToData(const MCInst &Inst, const MCSubtargetInfo &) override;

  void fixSymbolsInTLSFixups(const MCExpr *expr);
  void finalizeCGProfileEntry(const MCSymbolRefExpr *&S, uint64_t Offset);
  void finalizeCGProfile();

  /// Merge the content of the fragment \p EF into the fragment \p DF.
  void mergeFragment(MCDataFragment *, MCDataFragment *);

  bool SeenIdent = false;

  /// BundleGroups - The stack of fragments holding the bundle-locked
  /// instructions.
  SmallVector<MCDataFragment *, 4> BundleGroups;
};

MCELFStreamer *createARMELFStreamer(MCContext &Context,
                                    std::unique_ptr<MCAsmBackend> TAB,
                                    std::unique_ptr<MCObjectWriter> OW,
                                    std::unique_ptr<MCCodeEmitter> Emitter,
                                    bool RelaxAll, bool IsThumb, bool IsAndroid);

} // end namespace llvm

#endif // LLVM_MC_MCELFSTREAMER_H
PKjwFZH���(MC/MCDisassembler/MCExternalSymbolizer.hnu�[���//===-- llvm/MC/MCExternalSymbolizer.h - ------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the declaration of the MCExternalSymbolizer class, which
// enables library users to provide callbacks (through the C API) to do the
// symbolization externally.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCDISASSEMBLER_MCEXTERNALSYMBOLIZER_H
#define LLVM_MC_MCDISASSEMBLER_MCEXTERNALSYMBOLIZER_H

#include "llvm-c/DisassemblerTypes.h"
#include "llvm/MC/MCDisassembler/MCSymbolizer.h"
#include <memory>

namespace llvm {

/// Symbolize using user-provided, C API, callbacks.
///
/// See llvm-c/Disassembler.h.
class MCExternalSymbolizer : public MCSymbolizer {
protected:
  /// \name Hooks for symbolic disassembly via the public 'C' interface.
  /// @{
  /// The function to get the symbolic information for operands.
  LLVMOpInfoCallback GetOpInfo;
  /// The function to lookup a symbol name.
  LLVMSymbolLookupCallback SymbolLookUp;
  /// The pointer to the block of symbolic information for above call back.
  void *DisInfo;
  /// @}

public:
  MCExternalSymbolizer(MCContext &Ctx,
                       std::unique_ptr<MCRelocationInfo> RelInfo,
                       LLVMOpInfoCallback getOpInfo,
                       LLVMSymbolLookupCallback symbolLookUp, void *disInfo)
    : MCSymbolizer(Ctx, std::move(RelInfo)), GetOpInfo(getOpInfo),
      SymbolLookUp(symbolLookUp), DisInfo(disInfo) {}

  bool tryAddingSymbolicOperand(MCInst &MI, raw_ostream &CommentStream,
                                int64_t Value, uint64_t Address, bool IsBranch,
                                uint64_t Offset, uint64_t OpSize,
                                uint64_t InstSize) override;
  void tryAddingPcLoadReferenceComment(raw_ostream &CommentStream,
                                       int64_t Value,
                                       uint64_t Address) override;
};

}

#endif
PKjwFZ`���$�$"MC/MCDisassembler/MCDisassembler.hnu�[���//===- llvm/MC/MCDisassembler.h - Disassembler interface --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCDISASSEMBLER_MCDISASSEMBLER_H
#define LLVM_MC_MCDISASSEMBLER_MCDISASSEMBLER_H

#include "llvm/ADT/StringRef.h"
#include "llvm/BinaryFormat/XCOFF.h"
#include "llvm/MC/MCDisassembler/MCSymbolizer.h"
#include <cstdint>
#include <memory>
#include <vector>

namespace llvm {

struct XCOFFSymbolInfoTy {
  std::optional<XCOFF::StorageMappingClass> StorageMappingClass;
  std::optional<uint32_t> Index;
  bool IsLabel = false;
  bool operator<(const XCOFFSymbolInfoTy &SymInfo) const;
};

struct SymbolInfoTy {
  uint64_t Addr;
  StringRef Name;
  // XCOFF uses XCOFFSymInfo. Other targets use Type.
  XCOFFSymbolInfoTy XCOFFSymInfo;
  uint8_t Type;

private:
  bool IsXCOFF;
  bool HasType;

public:
  SymbolInfoTy(uint64_t Addr, StringRef Name,
               std::optional<XCOFF::StorageMappingClass> Smc,
               std::optional<uint32_t> Idx, bool Label)
      : Addr(Addr), Name(Name), XCOFFSymInfo{Smc, Idx, Label}, Type(0),
        IsXCOFF(true), HasType(false) {}
  SymbolInfoTy(uint64_t Addr, StringRef Name, uint8_t Type,
               bool IsXCOFF = false)
      : Addr(Addr), Name(Name), Type(Type), IsXCOFF(IsXCOFF), HasType(true) {}
  bool isXCOFF() const { return IsXCOFF; }

private:
  friend bool operator<(const SymbolInfoTy &P1, const SymbolInfoTy &P2) {
    assert((P1.IsXCOFF == P2.IsXCOFF && P1.HasType == P2.HasType) &&
           "The value of IsXCOFF and HasType in P1 and P2 should be the same "
           "respectively.");

    if (P1.IsXCOFF && P1.HasType)
      return std::tie(P1.Addr, P1.Type, P1.Name) <
             std::tie(P2.Addr, P2.Type, P2.Name);

    if (P1.IsXCOFF)
      return std::tie(P1.Addr, P1.XCOFFSymInfo, P1.Name) <
             std::tie(P2.Addr, P2.XCOFFSymInfo, P2.Name);

    return std::tie(P1.Addr, P1.Name, P1.Type) <
           std::tie(P2.Addr, P2.Name, P2.Type);
  }
};

using SectionSymbolsTy = std::vector<SymbolInfoTy>;

template <typename T> class ArrayRef;
class MCContext;
class MCInst;
class MCSubtargetInfo;
class raw_ostream;

/// Superclass for all disassemblers. Consumes a memory region and provides an
/// array of assembly instructions.
class MCDisassembler {
public:
  /// Ternary decode status. Most backends will just use Fail and
  /// Success, however some have a concept of an instruction with
  /// understandable semantics but which is architecturally
  /// incorrect. An example of this is ARM UNPREDICTABLE instructions
  /// which are disassemblable but cause undefined behaviour.
  ///
  /// Because it makes sense to disassemble these instructions, there
  /// is a "soft fail" failure mode that indicates the MCInst& is
  /// valid but architecturally incorrect.
  ///
  /// The enum numbers are deliberately chosen such that reduction
  /// from Success->SoftFail ->Fail can be done with a simple
  /// bitwise-AND:
  ///
  ///   LEFT & TOP =  | Success       Unpredictable   Fail
  ///   --------------+-----------------------------------
  ///   Success       | Success       Unpredictable   Fail
  ///   Unpredictable | Unpredictable Unpredictable   Fail
  ///   Fail          | Fail          Fail            Fail
  ///
  /// An easy way of encoding this is as 0b11, 0b01, 0b00 for
  /// Success, SoftFail, Fail respectively.
  enum DecodeStatus {
    Fail = 0,
    SoftFail = 1,
    Success = 3
  };

  MCDisassembler(const MCSubtargetInfo &STI, MCContext &Ctx)
    : Ctx(Ctx), STI(STI) {}

  virtual ~MCDisassembler();

  /// Returns the disassembly of a single instruction.
  ///
  /// \param Instr    - An MCInst to populate with the contents of the
  ///                   instruction.
  /// \param Size     - A value to populate with the size of the instruction, or
  ///                   the number of bytes consumed while attempting to decode
  ///                   an invalid instruction.
  /// \param Address  - The address, in the memory space of region, of the first
  ///                   byte of the instruction.
  /// \param Bytes    - A reference to the actual bytes of the instruction.
  /// \param CStream  - The stream to print comments and annotations on.
  /// \return         - MCDisassembler::Success if the instruction is valid,
  ///                   MCDisassembler::SoftFail if the instruction was
  ///                                            disassemblable but invalid,
  ///                   MCDisassembler::Fail if the instruction was invalid.
  virtual DecodeStatus getInstruction(MCInst &Instr, uint64_t &Size,
                                      ArrayRef<uint8_t> Bytes, uint64_t Address,
                                      raw_ostream &CStream) const = 0;

  /// Used to perform separate target specific disassembly for a particular
  /// symbol. May parse any prelude that precedes instructions after the
  /// start of a symbol, or the entire symbol.
  /// This is used for example by WebAssembly to decode preludes.
  ///
  /// Base implementation returns std::nullopt. So all targets by default ignore
  /// to treat symbols separately.
  ///
  /// \param Symbol   - The symbol.
  /// \param Size     - The number of bytes consumed.
  /// \param Address  - The address, in the memory space of region, of the first
  ///                   byte of the symbol.
  /// \param Bytes    - A reference to the actual bytes at the symbol location.
  /// \param CStream  - The stream to print comments and annotations on.
  /// \return         - MCDisassembler::Success if bytes are decoded
  ///                   successfully. Size must hold the number of bytes that
  ///                   were decoded.
  ///                 - MCDisassembler::Fail if the bytes are invalid. Size
  ///                   must hold the number of bytes that were decoded before
  ///                   failing. The target must print nothing. This can be
  ///                   done by buffering the output if needed.
  ///                 - std::nullopt if the target doesn't want to handle the
  ///                   symbol separately. Value of Size is ignored in this
  ///                   case.
  virtual std::optional<DecodeStatus>
  onSymbolStart(SymbolInfoTy &Symbol, uint64_t &Size, ArrayRef<uint8_t> Bytes,
                uint64_t Address, raw_ostream &CStream) const;
  // TODO:
  // Implement similar hooks that can be used at other points during
  // disassembly. Something along the following lines:
  // - onBeforeInstructionDecode()
  // - onAfterInstructionDecode()
  // - onSymbolEnd()
  // It should help move much of the target specific code from llvm-objdump to
  // respective target disassemblers.

  /// Suggest a distance to skip in a buffer of data to find the next
  /// place to look for the start of an instruction. For example, if
  /// all instructions have a fixed alignment, this might advance to
  /// the next multiple of that alignment.
  ///
  /// If not overridden, the default is 1.
  ///
  /// \param Address  - The address, in the memory space of region, of the
  ///                   starting point (typically the first byte of something
  ///                   that did not decode as a valid instruction at all).
  /// \param Bytes    - A reference to the actual bytes at Address. May be
  ///                   needed in order to determine the width of an
  ///                   unrecognized instruction (e.g. in Thumb this is a simple
  ///                   consistent criterion that doesn't require knowing the
  ///                   specific instruction). The caller can pass as much data
  ///                   as they have available, and the function is required to
  ///                   make a reasonable default choice if not enough data is
  ///                   available to make a better one.
  /// \return         - A number of bytes to skip. Must always be greater than
  ///                   zero. May be greater than the size of Bytes.
  virtual uint64_t suggestBytesToSkip(ArrayRef<uint8_t> Bytes,
                                      uint64_t Address) const;

private:
  MCContext &Ctx;

protected:
  // Subtarget information, for instruction decoding predicates if required.
  const MCSubtargetInfo &STI;
  std::unique_ptr<MCSymbolizer> Symbolizer;

public:
  // Helpers around MCSymbolizer
  bool tryAddingSymbolicOperand(MCInst &Inst, int64_t Value, uint64_t Address,
                                bool IsBranch, uint64_t Offset, uint64_t OpSize,
                                uint64_t InstSize) const;

  void tryAddingPcLoadReferenceComment(int64_t Value, uint64_t Address) const;

  /// Set \p Symzer as the current symbolizer.
  /// This takes ownership of \p Symzer, and deletes the previously set one.
  void setSymbolizer(std::unique_ptr<MCSymbolizer> Symzer);

  MCContext& getContext() const { return Ctx; }

  const MCSubtargetInfo& getSubtargetInfo() const { return STI; }

  // Marked mutable because we cache it inside the disassembler, rather than
  // having to pass it around as an argument through all the autogenerated code.
  mutable raw_ostream *CommentStream = nullptr;
};

} // end namespace llvm

#endif // LLVM_MC_MCDISASSEMBLER_MCDISASSEMBLER_H
PKjwFZ�&n�??$MC/MCDisassembler/MCRelocationInfo.hnu�[���//===- llvm/MC/MCRelocationInfo.h -------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the MCRelocationInfo class, which provides methods to
// create MCExprs from relocations, either found in an object::ObjectFile
// (object::RelocationRef), or provided through the C API.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCDISASSEMBLER_MCRELOCATIONINFO_H
#define LLVM_MC_MCDISASSEMBLER_MCRELOCATIONINFO_H

namespace llvm {

class MCContext;
class MCExpr;

/// Create MCExprs from relocations found in an object file.
class MCRelocationInfo {
protected:
  MCContext &Ctx;

public:
  MCRelocationInfo(MCContext &Ctx);
  MCRelocationInfo(const MCRelocationInfo &) = delete;
  MCRelocationInfo &operator=(const MCRelocationInfo &) = delete;
  virtual ~MCRelocationInfo();

  /// Create an MCExpr for the target-specific \p VariantKind.
  /// The VariantKinds are defined in llvm-c/Disassembler.h.
  /// Used by MCExternalSymbolizer.
  /// \returns If possible, an MCExpr corresponding to VariantKind, else 0.
  virtual const MCExpr *createExprForCAPIVariantKind(const MCExpr *SubExpr,
                                                     unsigned VariantKind);
};

} // end namespace llvm

#endif // LLVM_MC_MCDISASSEMBLER_MCRELOCATIONINFO_H
PKjwFZ�z���� MC/MCDisassembler/MCSymbolizer.hnu�[���//===- llvm/MC/MCSymbolizer.h - MCSymbolizer class --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the declaration of the MCSymbolizer class, which is used
// to symbolize instructions decoded from an object, that is, transform their
// immediate operands to MCExprs.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCDISASSEMBLER_MCSYMBOLIZER_H
#define LLVM_MC_MCDISASSEMBLER_MCSYMBOLIZER_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/MC/MCDisassembler/MCRelocationInfo.h"
#include <cstdint>
#include <memory>
#include <utility>

namespace llvm {

class MCContext;
class MCInst;
class raw_ostream;

/// Symbolize and annotate disassembled instructions.
///
/// For now this mimics the old symbolization logic (from both ARM and x86), that
/// relied on user-provided (C API) callbacks to do the actual symbol lookup in
/// the object file. This was moved to MCExternalSymbolizer.
/// A better API would not rely on actually calling the two methods here from
/// inside each disassembler, but would use the instr info to determine what
/// operands are actually symbolizable, and in what way. I don't think this
/// information exists right now.
class MCSymbolizer {
protected:
  MCContext &Ctx;
  std::unique_ptr<MCRelocationInfo> RelInfo;

public:
  /// Construct an MCSymbolizer, taking ownership of \p RelInfo.
  MCSymbolizer(MCContext &Ctx, std::unique_ptr<MCRelocationInfo> RelInfo)
    : Ctx(Ctx), RelInfo(std::move(RelInfo)) {
  }

  MCSymbolizer(const MCSymbolizer &) = delete;
  MCSymbolizer &operator=(const MCSymbolizer &) = delete;
  virtual ~MCSymbolizer();

  /// Try to add a symbolic operand instead of \p Value to the MCInst.
  ///
  /// Instead of having a difficult to read immediate, a symbolic operand would
  /// represent this immediate in a more understandable way, for instance as a
  /// symbol or an offset from a symbol. Relocations can also be used to enrich
  /// the symbolic expression.
  /// \param Inst      - The MCInst where to insert the symbolic operand.
  /// \param cStream   - Stream to print comments and annotations on.
  /// \param Value     - Operand value, pc-adjusted by the caller if necessary.
  /// \param Address   - Load address of the instruction.
  /// \param IsBranch  - Is the instruction a branch?
  /// \param Offset    - Byte offset of the operand inside the inst.
  /// \param OpSize    - Size of the operand in bytes.
  /// \param InstSize  - Size of the instruction in bytes.
  /// \return Whether a symbolic operand was added.
  virtual bool tryAddingSymbolicOperand(MCInst &Inst, raw_ostream &cStream,
                                        int64_t Value, uint64_t Address,
                                        bool IsBranch, uint64_t Offset,
                                        uint64_t OpSize, uint64_t InstSize) = 0;

  /// Try to add a comment on the PC-relative load.
  /// For instance, in Mach-O, this is used to add annotations to instructions
  /// that use C string literals, as found in __cstring.
  virtual void tryAddingPcLoadReferenceComment(raw_ostream &cStream,
                                               int64_t Value,
                                               uint64_t Address) = 0;

  /// Get the MCSymbolizer's list of addresses that were referenced by
  /// symbolizable operands but not resolved to a symbol. The caller (some
  /// code that is disassembling a section or other chunk of code) would
  /// typically create a synthetic label at each address and add them to its
  /// list of symbols in the section, before creating a new MCSymbolizer with
  /// the enhanced symbol list and retrying disassembling the section.
  /// The returned array is unordered and may have duplicates.
  /// The returned ArrayRef stops being valid on any call to or destruction of
  /// the MCSymbolizer object.
  virtual ArrayRef<uint64_t> getReferencedAddresses() const { return {}; }
};

} // end namespace llvm

#endif // LLVM_MC_MCDISASSEMBLER_MCSYMBOLIZER_H
PKjwFZ�.YjK K MC/SectionKind.hnu�[���//===-- llvm/MC/SectionKind.h - Classification of sections ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_SECTIONKIND_H
#define LLVM_MC_SECTIONKIND_H

namespace llvm {

/// SectionKind - This is a simple POD value that classifies the properties of
/// a section.  A section is classified into the deepest possible
/// classification, and then the target maps them onto their sections based on
/// what capabilities they have.
///
/// The comments below describe these as if they were an inheritance hierarchy
/// in order to explain the predicates below.
///
class SectionKind {
  enum Kind {
    /// Metadata - Debug info sections or other metadata.
    Metadata,

    /// Exclude - This section will be excluded from the final executable or
    /// shared library. Only valid for ELF / COFF targets.
    Exclude,

    /// Text - Text section, used for functions and other executable code.
    Text,

           /// ExecuteOnly, Text section that is not readable.
           ExecuteOnly,

    /// ReadOnly - Data that is never written to at program runtime by the
    /// program or the dynamic linker.  Things in the top-level readonly
    /// SectionKind are not mergeable.
    ReadOnly,

        /// MergableCString - Any null-terminated string which allows merging.
        /// These values are known to end in a nul value of the specified size,
        /// not otherwise contain a nul value, and be mergable.  This allows the
        /// linker to unique the strings if it so desires.

           /// Mergeable1ByteCString - 1 byte mergable, null terminated, string.
           Mergeable1ByteCString,

           /// Mergeable2ByteCString - 2 byte mergable, null terminated, string.
           Mergeable2ByteCString,

           /// Mergeable4ByteCString - 4 byte mergable, null terminated, string.
           Mergeable4ByteCString,

        /// MergeableConst - These are sections for merging fixed-length
        /// constants together.  For example, this can be used to unique
        /// constant pool entries etc.

            /// MergeableConst4 - This is a section used by 4-byte constants,
            /// for example, floats.
            MergeableConst4,

            /// MergeableConst8 - This is a section used by 8-byte constants,
            /// for example, doubles.
            MergeableConst8,

            /// MergeableConst16 - This is a section used by 16-byte constants,
            /// for example, vectors.
            MergeableConst16,

            /// MergeableConst32 - This is a section used by 32-byte constants,
            /// for example, vectors.
            MergeableConst32,

    /// Writeable - This is the base of all segments that need to be written
    /// to during program runtime.

       /// ThreadLocal - This is the base of all TLS segments.  All TLS
       /// objects must be writeable, otherwise there is no reason for them to
       /// be thread local!

           /// ThreadBSS - Zero-initialized TLS data objects.
           ThreadBSS,

           /// ThreadData - Initialized TLS data objects.
           ThreadData,

           /// ThreadBSSLocal - Zero-initialized TLS data objects with local linkage.
           ThreadBSSLocal,

       /// GlobalWriteableData - Writeable data that is global (not thread
       /// local).

           /// BSS - Zero initialized writeable data.
           BSS,

               /// BSSLocal - This is BSS (zero initialized and writable) data
               /// which has local linkage.
               BSSLocal,

               /// BSSExtern - This is BSS data with normal external linkage.
               BSSExtern,

           /// Common - Data with common linkage.  These represent tentative
           /// definitions, which always have a zero initializer and are never
           /// marked 'constant'.
           Common,

           /// This is writeable data that has a non-zero initializer.
           Data,

           /// ReadOnlyWithRel - These are global variables that are never
           /// written to by the program, but that have relocations, so they
           /// must be stuck in a writeable section so that the dynamic linker
           /// can write to them.  If it chooses to, the dynamic linker can
           /// mark the pages these globals end up on as read-only after it is
           /// done with its relocation phase.
           ReadOnlyWithRel
  } K : 8;
public:

  bool isMetadata() const { return K == Metadata; }

  bool isExclude() const { return K == Exclude; }

  bool isText() const { return K == Text || K == ExecuteOnly; }

  bool isExecuteOnly() const { return K == ExecuteOnly; }

  bool isReadOnly() const {
    return K == ReadOnly || isMergeableCString() ||
           isMergeableConst();
  }

  bool isMergeableCString() const {
    return K == Mergeable1ByteCString || K == Mergeable2ByteCString ||
           K == Mergeable4ByteCString;
  }
  bool isMergeable1ByteCString() const { return K == Mergeable1ByteCString; }
  bool isMergeable2ByteCString() const { return K == Mergeable2ByteCString; }
  bool isMergeable4ByteCString() const { return K == Mergeable4ByteCString; }

  bool isMergeableConst() const {
    return K == MergeableConst4 || K == MergeableConst8 ||
           K == MergeableConst16 || K == MergeableConst32;
  }
  bool isMergeableConst4() const { return K == MergeableConst4; }
  bool isMergeableConst8() const { return K == MergeableConst8; }
  bool isMergeableConst16() const { return K == MergeableConst16; }
  bool isMergeableConst32() const { return K == MergeableConst32; }

  bool isWriteable() const {
    return isThreadLocal() || isGlobalWriteableData();
  }

  bool isThreadLocal() const {
    return K == ThreadData || K == ThreadBSS || K == ThreadBSSLocal;
  }

  bool isThreadBSS() const { return K == ThreadBSS || K == ThreadBSSLocal; }
  bool isThreadData() const { return K == ThreadData; }
  bool isThreadBSSLocal() const { return K == ThreadBSSLocal; }

  bool isGlobalWriteableData() const {
    return isBSS() || isCommon() || isData() || isReadOnlyWithRel();
  }

  bool isBSS() const { return K == BSS || K == BSSLocal || K == BSSExtern; }
  bool isBSSLocal() const { return K == BSSLocal; }
  bool isBSSExtern() const { return K == BSSExtern; }

  bool isCommon() const { return K == Common; }

  bool isData() const { return K == Data; }

  bool isReadOnlyWithRel() const {
    return K == ReadOnlyWithRel;
  }
private:
  static SectionKind get(Kind K) {
    SectionKind Res;
    Res.K = K;
    return Res;
  }
public:

  static SectionKind getMetadata() { return get(Metadata); }
  static SectionKind getExclude() { return get(Exclude); }
  static SectionKind getText() { return get(Text); }
  static SectionKind getExecuteOnly() { return get(ExecuteOnly); }
  static SectionKind getReadOnly() { return get(ReadOnly); }
  static SectionKind getMergeable1ByteCString() {
    return get(Mergeable1ByteCString);
  }
  static SectionKind getMergeable2ByteCString() {
    return get(Mergeable2ByteCString);
  }
  static SectionKind getMergeable4ByteCString() {
    return get(Mergeable4ByteCString);
  }
  static SectionKind getMergeableConst4() { return get(MergeableConst4); }
  static SectionKind getMergeableConst8() { return get(MergeableConst8); }
  static SectionKind getMergeableConst16() { return get(MergeableConst16); }
  static SectionKind getMergeableConst32() { return get(MergeableConst32); }
  static SectionKind getThreadBSS() { return get(ThreadBSS); }
  static SectionKind getThreadData() { return get(ThreadData); }
  static SectionKind getThreadBSSLocal() { return get(ThreadBSSLocal); }
  static SectionKind getBSS() { return get(BSS); }
  static SectionKind getBSSLocal() { return get(BSSLocal); }
  static SectionKind getBSSExtern() { return get(BSSExtern); }
  static SectionKind getCommon() { return get(Common); }
  static SectionKind getData() { return get(Data); }
  static SectionKind getReadOnlyWithRel() { return get(ReadOnlyWithRel); }
};

} // end namespace llvm

#endif
PKjwFZ��l�V�VMC/MCExpr.hnu�[���//===- MCExpr.h - Assembly Level Expressions --------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCEXPR_H
#define LLVM_MC_MCEXPR_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/Support/SMLoc.h"
#include <cstdint>

namespace llvm {

class MCAsmInfo;
class MCAsmLayout;
class MCAssembler;
class MCContext;
class MCFixup;
class MCFragment;
class MCSection;
class MCStreamer;
class MCSymbol;
class MCValue;
class raw_ostream;
class StringRef;

using SectionAddrMap = DenseMap<const MCSection *, uint64_t>;

/// Base class for the full range of assembler expressions which are
/// needed for parsing.
class MCExpr {
public:
  enum ExprKind : uint8_t {
    Binary,    ///< Binary expressions.
    Constant,  ///< Constant expressions.
    SymbolRef, ///< References to labels and assigned expressions.
    Unary,     ///< Unary expressions.
    Target     ///< Target specific expression.
  };

private:
  static const unsigned NumSubclassDataBits = 24;
  static_assert(
      NumSubclassDataBits == CHAR_BIT * (sizeof(unsigned) - sizeof(ExprKind)),
      "ExprKind and SubclassData together should take up one word");

  ExprKind Kind;
  /// Field reserved for use by MCExpr subclasses.
  unsigned SubclassData : NumSubclassDataBits;
  SMLoc Loc;

  bool evaluateAsAbsolute(int64_t &Res, const MCAssembler *Asm,
                          const MCAsmLayout *Layout,
                          const SectionAddrMap *Addrs, bool InSet) const;

protected:
  explicit MCExpr(ExprKind Kind, SMLoc Loc, unsigned SubclassData = 0)
      : Kind(Kind), SubclassData(SubclassData), Loc(Loc) {
    assert(SubclassData < (1 << NumSubclassDataBits) &&
           "Subclass data too large");
  }

  bool evaluateAsRelocatableImpl(MCValue &Res, const MCAssembler *Asm,
                                 const MCAsmLayout *Layout,
                                 const MCFixup *Fixup,
                                 const SectionAddrMap *Addrs, bool InSet) const;

  unsigned getSubclassData() const { return SubclassData; }

public:
  MCExpr(const MCExpr &) = delete;
  MCExpr &operator=(const MCExpr &) = delete;

  /// \name Accessors
  /// @{

  ExprKind getKind() const { return Kind; }
  SMLoc getLoc() const { return Loc; }

  /// @}
  /// \name Utility Methods
  /// @{

  void print(raw_ostream &OS, const MCAsmInfo *MAI,
             bool InParens = false) const;
  void dump() const;

  /// @}
  /// \name Expression Evaluation
  /// @{

  /// Try to evaluate the expression to an absolute value.
  ///
  /// \param Res - The absolute value, if evaluation succeeds.
  /// \param Layout - The assembler layout object to use for evaluating symbol
  /// values. If not given, then only non-symbolic expressions will be
  /// evaluated.
  /// \return - True on success.
  bool evaluateAsAbsolute(int64_t &Res, const MCAsmLayout &Layout,
                          const SectionAddrMap &Addrs) const;
  bool evaluateAsAbsolute(int64_t &Res) const;
  bool evaluateAsAbsolute(int64_t &Res, const MCAssembler &Asm) const;
  bool evaluateAsAbsolute(int64_t &Res, const MCAssembler *Asm) const;
  bool evaluateAsAbsolute(int64_t &Res, const MCAsmLayout &Layout) const;

  bool evaluateKnownAbsolute(int64_t &Res, const MCAsmLayout &Layout) const;

  /// Try to evaluate the expression to a relocatable value, i.e. an
  /// expression of the fixed form (a - b + constant).
  ///
  /// \param Res - The relocatable value, if evaluation succeeds.
  /// \param Layout - The assembler layout object to use for evaluating values.
  /// \param Fixup - The Fixup object if available.
  /// \return - True on success.
  bool evaluateAsRelocatable(MCValue &Res, const MCAsmLayout *Layout,
                             const MCFixup *Fixup) const;

  /// Try to evaluate the expression to the form (a - b + constant) where
  /// neither a nor b are variables.
  ///
  /// This is a more aggressive variant of evaluateAsRelocatable. The intended
  /// use is for when relocations are not available, like the .size directive.
  bool evaluateAsValue(MCValue &Res, const MCAsmLayout &Layout) const;

  /// Find the "associated section" for this expression, which is
  /// currently defined as the absolute section for constants, or
  /// otherwise the section associated with the first defined symbol in the
  /// expression.
  MCFragment *findAssociatedFragment() const;

  /// @}
};

inline raw_ostream &operator<<(raw_ostream &OS, const MCExpr &E) {
  E.print(OS, nullptr);
  return OS;
}

////  Represent a constant integer expression.
class MCConstantExpr : public MCExpr {
  int64_t Value;

  // Subclass data stores SizeInBytes in bits 0..7 and PrintInHex in bit 8.
  static const unsigned SizeInBytesBits = 8;
  static const unsigned SizeInBytesMask = (1 << SizeInBytesBits) - 1;
  static const unsigned PrintInHexBit = 1 << SizeInBytesBits;

  static unsigned encodeSubclassData(bool PrintInHex, unsigned SizeInBytes) {
    assert(SizeInBytes <= sizeof(int64_t) && "Excessive size");
    return SizeInBytes | (PrintInHex ? PrintInHexBit : 0);
  }

  MCConstantExpr(int64_t Value, bool PrintInHex, unsigned SizeInBytes)
      : MCExpr(MCExpr::Constant, SMLoc(),
               encodeSubclassData(PrintInHex, SizeInBytes)), Value(Value) {}

public:
  /// \name Construction
  /// @{

  static const MCConstantExpr *create(int64_t Value, MCContext &Ctx,
                                      bool PrintInHex = false,
                                      unsigned SizeInBytes = 0);

  /// @}
  /// \name Accessors
  /// @{

  int64_t getValue() const { return Value; }
  unsigned getSizeInBytes() const {
    return getSubclassData() & SizeInBytesMask;
  }

  bool useHexFormat() const { return (getSubclassData() & PrintInHexBit) != 0; }

  /// @}

  static bool classof(const MCExpr *E) {
    return E->getKind() == MCExpr::Constant;
  }
};

///  Represent a reference to a symbol from inside an expression.
///
/// A symbol reference in an expression may be a use of a label, a use of an
/// assembler variable (defined constant), or constitute an implicit definition
/// of the symbol as external.
class MCSymbolRefExpr : public MCExpr {
public:
  enum VariantKind : uint16_t {
    VK_None,
    VK_Invalid,

    VK_GOT,
    VK_GOTOFF,
    VK_GOTREL,
    VK_PCREL,
    VK_GOTPCREL,
    VK_GOTPCREL_NORELAX,
    VK_GOTTPOFF,
    VK_INDNTPOFF,
    VK_NTPOFF,
    VK_GOTNTPOFF,
    VK_PLT,
    VK_TLSGD,
    VK_TLSLD,
    VK_TLSLDM,
    VK_TPOFF,
    VK_DTPOFF,
    VK_TLSCALL, // symbol(tlscall)
    VK_TLSDESC, // symbol(tlsdesc)
    VK_TLVP,    // Mach-O thread local variable relocations
    VK_TLVPPAGE,
    VK_TLVPPAGEOFF,
    VK_PAGE,
    VK_PAGEOFF,
    VK_GOTPAGE,
    VK_GOTPAGEOFF,
    VK_SECREL,
    VK_SIZE,    // symbol@SIZE
    VK_WEAKREF, // The link between the symbols in .weakref foo, bar

    VK_X86_ABS8,
    VK_X86_PLTOFF,

    VK_ARM_NONE,
    VK_ARM_GOT_PREL,
    VK_ARM_TARGET1,
    VK_ARM_TARGET2,
    VK_ARM_PREL31,
    VK_ARM_SBREL,  // symbol(sbrel)
    VK_ARM_TLSLDO, // symbol(tlsldo)
    VK_ARM_TLSDESCSEQ,

    VK_AVR_NONE,
    VK_AVR_LO8,
    VK_AVR_HI8,
    VK_AVR_HLO8,
    VK_AVR_DIFF8,
    VK_AVR_DIFF16,
    VK_AVR_DIFF32,
    VK_AVR_PM,

    VK_PPC_LO,              // symbol@l
    VK_PPC_HI,              // symbol@h
    VK_PPC_HA,              // symbol@ha
    VK_PPC_HIGH,            // symbol@high
    VK_PPC_HIGHA,           // symbol@higha
    VK_PPC_HIGHER,          // symbol@higher
    VK_PPC_HIGHERA,         // symbol@highera
    VK_PPC_HIGHEST,         // symbol@highest
    VK_PPC_HIGHESTA,        // symbol@highesta
    VK_PPC_GOT_LO,          // symbol@got@l
    VK_PPC_GOT_HI,          // symbol@got@h
    VK_PPC_GOT_HA,          // symbol@got@ha
    VK_PPC_TOCBASE,         // symbol@tocbase
    VK_PPC_TOC,             // symbol@toc
    VK_PPC_TOC_LO,          // symbol@toc@l
    VK_PPC_TOC_HI,          // symbol@toc@h
    VK_PPC_TOC_HA,          // symbol@toc@ha
    VK_PPC_U,               // symbol@u
    VK_PPC_L,               // symbol@l
    VK_PPC_DTPMOD,          // symbol@dtpmod
    VK_PPC_TPREL_LO,        // symbol@tprel@l
    VK_PPC_TPREL_HI,        // symbol@tprel@h
    VK_PPC_TPREL_HA,        // symbol@tprel@ha
    VK_PPC_TPREL_HIGH,      // symbol@tprel@high
    VK_PPC_TPREL_HIGHA,     // symbol@tprel@higha
    VK_PPC_TPREL_HIGHER,    // symbol@tprel@higher
    VK_PPC_TPREL_HIGHERA,   // symbol@tprel@highera
    VK_PPC_TPREL_HIGHEST,   // symbol@tprel@highest
    VK_PPC_TPREL_HIGHESTA,  // symbol@tprel@highesta
    VK_PPC_DTPREL_LO,       // symbol@dtprel@l
    VK_PPC_DTPREL_HI,       // symbol@dtprel@h
    VK_PPC_DTPREL_HA,       // symbol@dtprel@ha
    VK_PPC_DTPREL_HIGH,     // symbol@dtprel@high
    VK_PPC_DTPREL_HIGHA,    // symbol@dtprel@higha
    VK_PPC_DTPREL_HIGHER,   // symbol@dtprel@higher
    VK_PPC_DTPREL_HIGHERA,  // symbol@dtprel@highera
    VK_PPC_DTPREL_HIGHEST,  // symbol@dtprel@highest
    VK_PPC_DTPREL_HIGHESTA, // symbol@dtprel@highesta
    VK_PPC_GOT_TPREL,       // symbol@got@tprel
    VK_PPC_GOT_TPREL_LO,    // symbol@got@tprel@l
    VK_PPC_GOT_TPREL_HI,    // symbol@got@tprel@h
    VK_PPC_GOT_TPREL_HA,    // symbol@got@tprel@ha
    VK_PPC_GOT_DTPREL,      // symbol@got@dtprel
    VK_PPC_GOT_DTPREL_LO,   // symbol@got@dtprel@l
    VK_PPC_GOT_DTPREL_HI,   // symbol@got@dtprel@h
    VK_PPC_GOT_DTPREL_HA,   // symbol@got@dtprel@ha
    VK_PPC_TLS,             // symbol@tls
    VK_PPC_GOT_TLSGD,       // symbol@got@tlsgd
    VK_PPC_GOT_TLSGD_LO,    // symbol@got@tlsgd@l
    VK_PPC_GOT_TLSGD_HI,    // symbol@got@tlsgd@h
    VK_PPC_GOT_TLSGD_HA,    // symbol@got@tlsgd@ha
    VK_PPC_TLSGD,           // symbol@tlsgd
    VK_PPC_AIX_TLSGD,       // symbol@gd
    VK_PPC_AIX_TLSGDM,      // symbol@m
    VK_PPC_AIX_TLSLE,       // symbol@le
    VK_PPC_GOT_TLSLD,       // symbol@got@tlsld
    VK_PPC_GOT_TLSLD_LO,    // symbol@got@tlsld@l
    VK_PPC_GOT_TLSLD_HI,    // symbol@got@tlsld@h
    VK_PPC_GOT_TLSLD_HA,    // symbol@got@tlsld@ha
    VK_PPC_GOT_PCREL,       // symbol@got@pcrel
    VK_PPC_GOT_TLSGD_PCREL, // symbol@got@tlsgd@pcrel
    VK_PPC_GOT_TLSLD_PCREL, // symbol@got@tlsld@pcrel
    VK_PPC_GOT_TPREL_PCREL, // symbol@got@tprel@pcrel
    VK_PPC_TLS_PCREL,       // symbol@tls@pcrel
    VK_PPC_TLSLD,           // symbol@tlsld
    VK_PPC_LOCAL,           // symbol@local
    VK_PPC_NOTOC,           // symbol@notoc
    VK_PPC_PCREL_OPT,       // .reloc expr, R_PPC64_PCREL_OPT, expr

    VK_COFF_IMGREL32, // symbol@imgrel (image-relative)

    VK_Hexagon_LO16,
    VK_Hexagon_HI16,
    VK_Hexagon_GPREL,
    VK_Hexagon_GD_GOT,
    VK_Hexagon_LD_GOT,
    VK_Hexagon_GD_PLT,
    VK_Hexagon_LD_PLT,
    VK_Hexagon_IE,
    VK_Hexagon_IE_GOT,

    VK_WASM_TYPEINDEX, // Reference to a symbol's type (signature)
    VK_WASM_TLSREL,    // Memory address relative to __tls_base
    VK_WASM_MBREL,     // Memory address relative to __memory_base
    VK_WASM_TBREL,     // Table index relative to __table_base
    VK_WASM_GOT_TLS,   // Wasm global index of TLS symbol.
    VK_WASM_FUNCINDEX, // Wasm function index.

    VK_AMDGPU_GOTPCREL32_LO, // symbol@gotpcrel32@lo
    VK_AMDGPU_GOTPCREL32_HI, // symbol@gotpcrel32@hi
    VK_AMDGPU_REL32_LO,      // symbol@rel32@lo
    VK_AMDGPU_REL32_HI,      // symbol@rel32@hi
    VK_AMDGPU_REL64,         // symbol@rel64
    VK_AMDGPU_ABS32_LO,      // symbol@abs32@lo
    VK_AMDGPU_ABS32_HI,      // symbol@abs32@hi

    VK_VE_HI32,        // symbol@hi
    VK_VE_LO32,        // symbol@lo
    VK_VE_PC_HI32,     // symbol@pc_hi
    VK_VE_PC_LO32,     // symbol@pc_lo
    VK_VE_GOT_HI32,    // symbol@got_hi
    VK_VE_GOT_LO32,    // symbol@got_lo
    VK_VE_GOTOFF_HI32, // symbol@gotoff_hi
    VK_VE_GOTOFF_LO32, // symbol@gotoff_lo
    VK_VE_PLT_HI32,    // symbol@plt_hi
    VK_VE_PLT_LO32,    // symbol@plt_lo
    VK_VE_TLS_GD_HI32, // symbol@tls_gd_hi
    VK_VE_TLS_GD_LO32, // symbol@tls_gd_lo
    VK_VE_TPOFF_HI32,  // symbol@tpoff_hi
    VK_VE_TPOFF_LO32,  // symbol@tpoff_lo

    VK_TPREL,
    VK_DTPREL
  };

private:
  /// The symbol being referenced.
  const MCSymbol *Symbol;

  // Subclass data stores VariantKind in bits 0..15 and HasSubsectionsViaSymbols
  // in bit 16.
  static const unsigned VariantKindBits = 16;
  static const unsigned VariantKindMask = (1 << VariantKindBits) - 1;

  // FIXME: Remove this bit.
  static const unsigned HasSubsectionsViaSymbolsBit = 1 << VariantKindBits;

  static unsigned encodeSubclassData(VariantKind Kind,
                                     bool HasSubsectionsViaSymbols) {
    return (unsigned)Kind |
           (HasSubsectionsViaSymbols ? HasSubsectionsViaSymbolsBit : 0);
  }

  explicit MCSymbolRefExpr(const MCSymbol *Symbol, VariantKind Kind,
                           const MCAsmInfo *MAI, SMLoc Loc = SMLoc());

public:
  /// \name Construction
  /// @{

  static const MCSymbolRefExpr *create(const MCSymbol *Symbol, MCContext &Ctx) {
    return MCSymbolRefExpr::create(Symbol, VK_None, Ctx);
  }

  static const MCSymbolRefExpr *create(const MCSymbol *Symbol, VariantKind Kind,
                                       MCContext &Ctx, SMLoc Loc = SMLoc());
  static const MCSymbolRefExpr *create(StringRef Name, VariantKind Kind,
                                       MCContext &Ctx);

  /// @}
  /// \name Accessors
  /// @{

  const MCSymbol &getSymbol() const { return *Symbol; }

  VariantKind getKind() const {
    return (VariantKind)(getSubclassData() & VariantKindMask);
  }

  bool hasSubsectionsViaSymbols() const {
    return (getSubclassData() & HasSubsectionsViaSymbolsBit) != 0;
  }

  /// @}
  /// \name Static Utility Functions
  /// @{

  static StringRef getVariantKindName(VariantKind Kind);

  static VariantKind getVariantKindForName(StringRef Name);

  /// @}

  static bool classof(const MCExpr *E) {
    return E->getKind() == MCExpr::SymbolRef;
  }
};

/// Unary assembler expressions.
class MCUnaryExpr : public MCExpr {
public:
  enum Opcode {
    LNot,  ///< Logical negation.
    Minus, ///< Unary minus.
    Not,   ///< Bitwise negation.
    Plus   ///< Unary plus.
  };

private:
  const MCExpr *Expr;

  MCUnaryExpr(Opcode Op, const MCExpr *Expr, SMLoc Loc)
      : MCExpr(MCExpr::Unary, Loc, Op), Expr(Expr) {}

public:
  /// \name Construction
  /// @{

  static const MCUnaryExpr *create(Opcode Op, const MCExpr *Expr,
                                   MCContext &Ctx, SMLoc Loc = SMLoc());

  static const MCUnaryExpr *createLNot(const MCExpr *Expr, MCContext &Ctx, SMLoc Loc = SMLoc()) {
    return create(LNot, Expr, Ctx, Loc);
  }

  static const MCUnaryExpr *createMinus(const MCExpr *Expr, MCContext &Ctx, SMLoc Loc = SMLoc()) {
    return create(Minus, Expr, Ctx, Loc);
  }

  static const MCUnaryExpr *createNot(const MCExpr *Expr, MCContext &Ctx, SMLoc Loc = SMLoc()) {
    return create(Not, Expr, Ctx, Loc);
  }

  static const MCUnaryExpr *createPlus(const MCExpr *Expr, MCContext &Ctx, SMLoc Loc = SMLoc()) {
    return create(Plus, Expr, Ctx, Loc);
  }

  /// @}
  /// \name Accessors
  /// @{

  /// Get the kind of this unary expression.
  Opcode getOpcode() const { return (Opcode)getSubclassData(); }

  /// Get the child of this unary expression.
  const MCExpr *getSubExpr() const { return Expr; }

  /// @}

  static bool classof(const MCExpr *E) {
    return E->getKind() == MCExpr::Unary;
  }
};

/// Binary assembler expressions.
class MCBinaryExpr : public MCExpr {
public:
  enum Opcode {
    Add,  ///< Addition.
    And,  ///< Bitwise and.
    Div,  ///< Signed division.
    EQ,   ///< Equality comparison.
    GT,   ///< Signed greater than comparison (result is either 0 or some
          ///< target-specific non-zero value)
    GTE,  ///< Signed greater than or equal comparison (result is either 0 or
          ///< some target-specific non-zero value).
    LAnd, ///< Logical and.
    LOr,  ///< Logical or.
    LT,   ///< Signed less than comparison (result is either 0 or
          ///< some target-specific non-zero value).
    LTE,  ///< Signed less than or equal comparison (result is either 0 or
          ///< some target-specific non-zero value).
    Mod,  ///< Signed remainder.
    Mul,  ///< Multiplication.
    NE,   ///< Inequality comparison.
    Or,   ///< Bitwise or.
    OrNot, ///< Bitwise or not.
    Shl,  ///< Shift left.
    AShr, ///< Arithmetic shift right.
    LShr, ///< Logical shift right.
    Sub,  ///< Subtraction.
    Xor   ///< Bitwise exclusive or.
  };

private:
  const MCExpr *LHS, *RHS;

  MCBinaryExpr(Opcode Op, const MCExpr *LHS, const MCExpr *RHS,
               SMLoc Loc = SMLoc())
      : MCExpr(MCExpr::Binary, Loc, Op), LHS(LHS), RHS(RHS) {}

public:
  /// \name Construction
  /// @{

  static const MCBinaryExpr *create(Opcode Op, const MCExpr *LHS,
                                    const MCExpr *RHS, MCContext &Ctx,
                                    SMLoc Loc = SMLoc());

  static const MCBinaryExpr *createAdd(const MCExpr *LHS, const MCExpr *RHS,
                                       MCContext &Ctx) {
    return create(Add, LHS, RHS, Ctx);
  }

  static const MCBinaryExpr *createAnd(const MCExpr *LHS, const MCExpr *RHS,
                                       MCContext &Ctx) {
    return create(And, LHS, RHS, Ctx);
  }

  static const MCBinaryExpr *createDiv(const MCExpr *LHS, const MCExpr *RHS,
                                       MCContext &Ctx) {
    return create(Div, LHS, RHS, Ctx);
  }

  static const MCBinaryExpr *createEQ(const MCExpr *LHS, const MCExpr *RHS,
                                      MCContext &Ctx) {
    return create(EQ, LHS, RHS, Ctx);
  }

  static const MCBinaryExpr *createGT(const MCExpr *LHS, const MCExpr *RHS,
                                      MCContext &Ctx) {
    return create(GT, LHS, RHS, Ctx);
  }

  static const MCBinaryExpr *createGTE(const MCExpr *LHS, const MCExpr *RHS,
                                       MCContext &Ctx) {
    return create(GTE, LHS, RHS, Ctx);
  }

  static const MCBinaryExpr *createLAnd(const MCExpr *LHS, const MCExpr *RHS,
                                        MCContext &Ctx) {
    return create(LAnd, LHS, RHS, Ctx);
  }

  static const MCBinaryExpr *createLOr(const MCExpr *LHS, const MCExpr *RHS,
                                       MCContext &Ctx) {
    return create(LOr, LHS, RHS, Ctx);
  }

  static const MCBinaryExpr *createLT(const MCExpr *LHS, const MCExpr *RHS,
                                      MCContext &Ctx) {
    return create(LT, LHS, RHS, Ctx);
  }

  static const MCBinaryExpr *createLTE(const MCExpr *LHS, const MCExpr *RHS,
                                       MCContext &Ctx) {
    return create(LTE, LHS, RHS, Ctx);
  }

  static const MCBinaryExpr *createMod(const MCExpr *LHS, const MCExpr *RHS,
                                       MCContext &Ctx) {
    return create(Mod, LHS, RHS, Ctx);
  }

  static const MCBinaryExpr *createMul(const MCExpr *LHS, const MCExpr *RHS,
                                       MCContext &Ctx) {
    return create(Mul, LHS, RHS, Ctx);
  }

  static const MCBinaryExpr *createNE(const MCExpr *LHS, const MCExpr *RHS,
                                      MCContext &Ctx) {
    return create(NE, LHS, RHS, Ctx);
  }

  static const MCBinaryExpr *createOr(const MCExpr *LHS, const MCExpr *RHS,
                                      MCContext &Ctx) {
    return create(Or, LHS, RHS, Ctx);
  }

  static const MCBinaryExpr *createShl(const MCExpr *LHS, const MCExpr *RHS,
                                       MCContext &Ctx) {
    return create(Shl, LHS, RHS, Ctx);
  }

  static const MCBinaryExpr *createAShr(const MCExpr *LHS, const MCExpr *RHS,
                                       MCContext &Ctx) {
    return create(AShr, LHS, RHS, Ctx);
  }

  static const MCBinaryExpr *createLShr(const MCExpr *LHS, const MCExpr *RHS,
                                       MCContext &Ctx) {
    return create(LShr, LHS, RHS, Ctx);
  }

  static const MCBinaryExpr *createSub(const MCExpr *LHS, const MCExpr *RHS,
                                       MCContext &Ctx) {
    return create(Sub, LHS, RHS, Ctx);
  }

  static const MCBinaryExpr *createXor(const MCExpr *LHS, const MCExpr *RHS,
                                       MCContext &Ctx) {
    return create(Xor, LHS, RHS, Ctx);
  }

  /// @}
  /// \name Accessors
  /// @{

  /// Get the kind of this binary expression.
  Opcode getOpcode() const { return (Opcode)getSubclassData(); }

  /// Get the left-hand side expression of the binary operator.
  const MCExpr *getLHS() const { return LHS; }

  /// Get the right-hand side expression of the binary operator.
  const MCExpr *getRHS() const { return RHS; }

  /// @}

  static bool classof(const MCExpr *E) {
    return E->getKind() == MCExpr::Binary;
  }
};

/// This is an extension point for target-specific MCExpr subclasses to
/// implement.
///
/// NOTE: All subclasses are required to have trivial destructors because
/// MCExprs are bump pointer allocated and not destructed.
class MCTargetExpr : public MCExpr {
  virtual void anchor();

protected:
  MCTargetExpr() : MCExpr(Target, SMLoc()) {}
  virtual ~MCTargetExpr() = default;

public:
  virtual void printImpl(raw_ostream &OS, const MCAsmInfo *MAI) const = 0;
  virtual bool evaluateAsRelocatableImpl(MCValue &Res,
                                         const MCAsmLayout *Layout,
                                         const MCFixup *Fixup) const = 0;
  // allow Target Expressions to be checked for equality
  virtual bool isEqualTo(const MCExpr *x) const { return false; }
  // This should be set when assigned expressions are not valid ".set"
  // expressions, e.g. registers, and must be inlined.
  virtual bool inlineAssignedExpr() const { return false; }
  virtual void visitUsedExpr(MCStreamer& Streamer) const = 0;
  virtual MCFragment *findAssociatedFragment() const = 0;

  virtual void fixELFSymbolsInTLSFixups(MCAssembler &) const = 0;

  static bool classof(const MCExpr *E) {
    return E->getKind() == MCExpr::Target;
  }
};

} // end namespace llvm

#endif // LLVM_MC_MCEXPR_H
PKjwFZ�5�r�rMC/MCRegisterInfo.hnu�[���//===- MC/MCRegisterInfo.h - Target Register Description --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file describes an abstract interface used to get information about a
// target machines register file.  This information is used for a variety of
// purposed, especially register allocation.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCREGISTERINFO_H
#define LLVM_MC_MCREGISTERINFO_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/MC/LaneBitmask.h"
#include "llvm/MC/MCRegister.h"
#include <cassert>
#include <cstdint>
#include <iterator>
#include <utility>

namespace llvm {

class MCRegUnitIterator;
class MCSubRegIterator;
class MCSuperRegIterator;

/// MCRegisterClass - Base class of TargetRegisterClass.
class MCRegisterClass {
public:
  using iterator = const MCPhysReg*;
  using const_iterator = const MCPhysReg*;

  const iterator RegsBegin;
  const uint8_t *const RegSet;
  const uint32_t NameIdx;
  const uint16_t RegsSize;
  const uint16_t RegSetSize;
  const uint16_t ID;
  const uint16_t RegSizeInBits;
  const int8_t CopyCost;
  const bool Allocatable;

  /// getID() - Return the register class ID number.
  ///
  unsigned getID() const { return ID; }

  /// begin/end - Return all of the registers in this class.
  ///
  iterator       begin() const { return RegsBegin; }
  iterator         end() const { return RegsBegin + RegsSize; }

  /// getNumRegs - Return the number of registers in this class.
  ///
  unsigned getNumRegs() const { return RegsSize; }

  /// getRegister - Return the specified register in the class.
  ///
  unsigned getRegister(unsigned i) const {
    assert(i < getNumRegs() && "Register number out of range!");
    return RegsBegin[i];
  }

  /// contains - Return true if the specified register is included in this
  /// register class.  This does not include virtual registers.
  bool contains(MCRegister Reg) const {
    unsigned RegNo = unsigned(Reg);
    unsigned InByte = RegNo % 8;
    unsigned Byte = RegNo / 8;
    if (Byte >= RegSetSize)
      return false;
    return (RegSet[Byte] & (1 << InByte)) != 0;
  }

  /// contains - Return true if both registers are in this class.
  bool contains(MCRegister Reg1, MCRegister Reg2) const {
    return contains(Reg1) && contains(Reg2);
  }

  /// Return the size of the physical register in bits if we are able to
  /// determine it. This always returns zero for registers of targets that use
  /// HW modes, as we need more information to determine the size of registers
  /// in such cases. Use TargetRegisterInfo to cover them.
  unsigned getSizeInBits() const { return RegSizeInBits; }

  /// getCopyCost - Return the cost of copying a value between two registers in
  /// this class. A negative number means the register class is very expensive
  /// to copy e.g. status flag register classes.
  int getCopyCost() const { return CopyCost; }

  /// isAllocatable - Return true if this register class may be used to create
  /// virtual registers.
  bool isAllocatable() const { return Allocatable; }
};

/// MCRegisterDesc - This record contains information about a particular
/// register.  The SubRegs field is a zero terminated array of registers that
/// are sub-registers of the specific register, e.g. AL, AH are sub-registers
/// of AX. The SuperRegs field is a zero terminated array of registers that are
/// super-registers of the specific register, e.g. RAX, EAX, are
/// super-registers of AX.
///
struct MCRegisterDesc {
  uint32_t Name;      // Printable name for the reg (for debugging)
  uint32_t SubRegs;   // Sub-register set, described above
  uint32_t SuperRegs; // Super-register set, described above

  // Offset into MCRI::SubRegIndices of a list of sub-register indices for each
  // sub-register in SubRegs.
  uint32_t SubRegIndices;

  // Points to the list of register units. The low bits hold the first regunit
  // number, the high bits hold an offset into DiffLists. See MCRegUnitIterator.
  uint32_t RegUnits;

  /// Index into list with lane mask sequences. The sequence contains a lanemask
  /// for every register unit.
  uint16_t RegUnitLaneMasks;
};

/// MCRegisterInfo base class - We assume that the target defines a static
/// array of MCRegisterDesc objects that represent all of the machine
/// registers that the target has.  As such, we simply have to track a pointer
/// to this array so that we can turn register number into a register
/// descriptor.
///
/// Note this class is designed to be a base class of TargetRegisterInfo, which
/// is the interface used by codegen. However, specific targets *should never*
/// specialize this class. MCRegisterInfo should only contain getters to access
/// TableGen generated physical register data. It must not be extended with
/// virtual methods.
///
class MCRegisterInfo {
public:
  using regclass_iterator = const MCRegisterClass *;

  /// DwarfLLVMRegPair - Emitted by tablegen so Dwarf<->LLVM reg mappings can be
  /// performed with a binary search.
  struct DwarfLLVMRegPair {
    unsigned FromReg;
    unsigned ToReg;

    bool operator<(DwarfLLVMRegPair RHS) const { return FromReg < RHS.FromReg; }
  };

  /// SubRegCoveredBits - Emitted by tablegen: bit range covered by a subreg
  /// index, -1 in any being invalid.
  struct SubRegCoveredBits {
    uint16_t Offset;
    uint16_t Size;
  };

private:
  const MCRegisterDesc *Desc;                 // Pointer to the descriptor array
  unsigned NumRegs;                           // Number of entries in the array
  MCRegister RAReg;                           // Return address register
  MCRegister PCReg;                           // Program counter register
  const MCRegisterClass *Classes;             // Pointer to the regclass array
  unsigned NumClasses;                        // Number of entries in the array
  unsigned NumRegUnits;                       // Number of regunits.
  const MCPhysReg (*RegUnitRoots)[2];         // Pointer to regunit root table.
  const int16_t *DiffLists;                   // Pointer to the difflists array
  const LaneBitmask *RegUnitMaskSequences;    // Pointer to lane mask sequences
                                              // for register units.
  const char *RegStrings;                     // Pointer to the string table.
  const char *RegClassStrings;                // Pointer to the class strings.
  const uint16_t *SubRegIndices;              // Pointer to the subreg lookup
                                              // array.
  const SubRegCoveredBits *SubRegIdxRanges;   // Pointer to the subreg covered
                                              // bit ranges array.
  unsigned NumSubRegIndices;                  // Number of subreg indices.
  const uint16_t *RegEncodingTable;           // Pointer to array of register
                                              // encodings.

  unsigned L2DwarfRegsSize;
  unsigned EHL2DwarfRegsSize;
  unsigned Dwarf2LRegsSize;
  unsigned EHDwarf2LRegsSize;
  const DwarfLLVMRegPair *L2DwarfRegs;        // LLVM to Dwarf regs mapping
  const DwarfLLVMRegPair *EHL2DwarfRegs;      // LLVM to Dwarf regs mapping EH
  const DwarfLLVMRegPair *Dwarf2LRegs;        // Dwarf to LLVM regs mapping
  const DwarfLLVMRegPair *EHDwarf2LRegs;      // Dwarf to LLVM regs mapping EH
  DenseMap<MCRegister, int> L2SEHRegs;        // LLVM to SEH regs mapping
  DenseMap<MCRegister, int> L2CVRegs;         // LLVM to CV regs mapping

  /// Iterator class that can traverse the differentially encoded values in
  /// DiffLists. Don't use this class directly, use one of the adaptors below.
  class DiffListIterator
      : public iterator_facade_base<DiffListIterator, std::forward_iterator_tag,
                                    unsigned> {
    unsigned Val = 0;
    const int16_t *List = nullptr;

  public:
    /// Constructs an invalid iterator, which is also the end iterator.
    /// Call init() to point to something useful.
    DiffListIterator() = default;

    /// Point the iterator to InitVal, decoding subsequent values from DiffList.
    void init(unsigned InitVal, const int16_t *DiffList) {
      Val = InitVal;
      List = DiffList;
    }

    /// Returns true if this iterator is not yet at the end.
    bool isValid() const { return List; }

    /// Dereference the iterator to get the value at the current position.
    const unsigned &operator*() const { return Val; }

    using DiffListIterator::iterator_facade_base::operator++;
    /// Pre-increment to move to the next position.
    DiffListIterator &operator++() {
      assert(isValid() && "Cannot move off the end of the list.");
      int16_t D = *List++;
      Val += D;
      // The end of the list is encoded as a 0 differential.
      if (!D)
        List = nullptr;
      return *this;
    }

    bool operator==(const DiffListIterator &Other) const {
      return List == Other.List;
    }
  };

public:
  /// Return an iterator range over all sub-registers of \p Reg, excluding \p
  /// Reg.
  iterator_range<MCSubRegIterator> subregs(MCRegister Reg) const;

  /// Return an iterator range over all sub-registers of \p Reg, including \p
  /// Reg.
  iterator_range<MCSubRegIterator> subregs_inclusive(MCRegister Reg) const;

  /// Return an iterator range over all super-registers of \p Reg, excluding \p
  /// Reg.
  iterator_range<MCSuperRegIterator> superregs(MCRegister Reg) const;

  /// Return an iterator range over all super-registers of \p Reg, including \p
  /// Reg.
  iterator_range<MCSuperRegIterator> superregs_inclusive(MCRegister Reg) const;

  /// Return an iterator range over all sub- and super-registers of \p Reg,
  /// including \p Reg.
  detail::concat_range<const MCPhysReg, iterator_range<MCSubRegIterator>,
                       iterator_range<MCSuperRegIterator>>
  sub_and_superregs_inclusive(MCRegister Reg) const;

  /// Returns an iterator range over all regunits for \p Reg.
  iterator_range<MCRegUnitIterator> regunits(MCRegister Reg) const;

  // These iterators are allowed to sub-class DiffListIterator and access
  // internal list pointers.
  friend class MCSubRegIterator;
  friend class MCSubRegIndexIterator;
  friend class MCSuperRegIterator;
  friend class MCRegUnitIterator;
  friend class MCRegUnitMaskIterator;
  friend class MCRegUnitRootIterator;

  /// Initialize MCRegisterInfo, called by TableGen
  /// auto-generated routines. *DO NOT USE*.
  void InitMCRegisterInfo(const MCRegisterDesc *D, unsigned NR, unsigned RA,
                          unsigned PC, const MCRegisterClass *C, unsigned NC,
                          const MCPhysReg (*RURoots)[2], unsigned NRU,
                          const int16_t *DL, const LaneBitmask *RUMS,
                          const char *Strings, const char *ClassStrings,
                          const uint16_t *SubIndices, unsigned NumIndices,
                          const SubRegCoveredBits *SubIdxRanges,
                          const uint16_t *RET) {
    Desc = D;
    NumRegs = NR;
    RAReg = RA;
    PCReg = PC;
    Classes = C;
    DiffLists = DL;
    RegUnitMaskSequences = RUMS;
    RegStrings = Strings;
    RegClassStrings = ClassStrings;
    NumClasses = NC;
    RegUnitRoots = RURoots;
    NumRegUnits = NRU;
    SubRegIndices = SubIndices;
    NumSubRegIndices = NumIndices;
    SubRegIdxRanges = SubIdxRanges;
    RegEncodingTable = RET;

    // Initialize DWARF register mapping variables
    EHL2DwarfRegs = nullptr;
    EHL2DwarfRegsSize = 0;
    L2DwarfRegs = nullptr;
    L2DwarfRegsSize = 0;
    EHDwarf2LRegs = nullptr;
    EHDwarf2LRegsSize = 0;
    Dwarf2LRegs = nullptr;
    Dwarf2LRegsSize = 0;
  }

  /// Used to initialize LLVM register to Dwarf
  /// register number mapping. Called by TableGen auto-generated routines.
  /// *DO NOT USE*.
  void mapLLVMRegsToDwarfRegs(const DwarfLLVMRegPair *Map, unsigned Size,
                              bool isEH) {
    if (isEH) {
      EHL2DwarfRegs = Map;
      EHL2DwarfRegsSize = Size;
    } else {
      L2DwarfRegs = Map;
      L2DwarfRegsSize = Size;
    }
  }

  /// Used to initialize Dwarf register to LLVM
  /// register number mapping. Called by TableGen auto-generated routines.
  /// *DO NOT USE*.
  void mapDwarfRegsToLLVMRegs(const DwarfLLVMRegPair *Map, unsigned Size,
                              bool isEH) {
    if (isEH) {
      EHDwarf2LRegs = Map;
      EHDwarf2LRegsSize = Size;
    } else {
      Dwarf2LRegs = Map;
      Dwarf2LRegsSize = Size;
    }
  }

  /// mapLLVMRegToSEHReg - Used to initialize LLVM register to SEH register
  /// number mapping. By default the SEH register number is just the same
  /// as the LLVM register number.
  /// FIXME: TableGen these numbers. Currently this requires target specific
  /// initialization code.
  void mapLLVMRegToSEHReg(MCRegister LLVMReg, int SEHReg) {
    L2SEHRegs[LLVMReg] = SEHReg;
  }

  void mapLLVMRegToCVReg(MCRegister LLVMReg, int CVReg) {
    L2CVRegs[LLVMReg] = CVReg;
  }

  /// This method should return the register where the return
  /// address can be found.
  MCRegister getRARegister() const {
    return RAReg;
  }

  /// Return the register which is the program counter.
  MCRegister getProgramCounter() const {
    return PCReg;
  }

  const MCRegisterDesc &operator[](MCRegister RegNo) const {
    assert(RegNo < NumRegs &&
           "Attempting to access record for invalid register number!");
    return Desc[RegNo];
  }

  /// Provide a get method, equivalent to [], but more useful with a
  /// pointer to this object.
  const MCRegisterDesc &get(MCRegister RegNo) const {
    return operator[](RegNo);
  }

  /// Returns the physical register number of sub-register "Index"
  /// for physical register RegNo. Return zero if the sub-register does not
  /// exist.
  MCRegister getSubReg(MCRegister Reg, unsigned Idx) const;

  /// Return a super-register of the specified register
  /// Reg so its sub-register of index SubIdx is Reg.
  MCRegister getMatchingSuperReg(MCRegister Reg, unsigned SubIdx,
                                 const MCRegisterClass *RC) const;

  /// For a given register pair, return the sub-register index
  /// if the second register is a sub-register of the first. Return zero
  /// otherwise.
  unsigned getSubRegIndex(MCRegister RegNo, MCRegister SubRegNo) const;

  /// Get the size of the bit range covered by a sub-register index.
  /// If the index isn't continuous, return the sum of the sizes of its parts.
  /// If the index is used to access subregisters of different sizes, return -1.
  unsigned getSubRegIdxSize(unsigned Idx) const;

  /// Get the offset of the bit range covered by a sub-register index.
  /// If an Offset doesn't make sense (the index isn't continuous, or is used to
  /// access sub-registers at different offsets), return -1.
  unsigned getSubRegIdxOffset(unsigned Idx) const;

  /// Return the human-readable symbolic target-specific name for the
  /// specified physical register.
  const char *getName(MCRegister RegNo) const {
    return RegStrings + get(RegNo).Name;
  }

  /// Return the number of registers this target has (useful for
  /// sizing arrays holding per register information)
  unsigned getNumRegs() const {
    return NumRegs;
  }

  /// Return the number of sub-register indices
  /// understood by the target. Index 0 is reserved for the no-op sub-register,
  /// while 1 to getNumSubRegIndices() - 1 represent real sub-registers.
  unsigned getNumSubRegIndices() const {
    return NumSubRegIndices;
  }

  /// Return the number of (native) register units in the
  /// target. Register units are numbered from 0 to getNumRegUnits() - 1. They
  /// can be accessed through MCRegUnitIterator defined below.
  unsigned getNumRegUnits() const {
    return NumRegUnits;
  }

  /// Map a target register to an equivalent dwarf register
  /// number.  Returns -1 if there is no equivalent value.  The second
  /// parameter allows targets to use different numberings for EH info and
  /// debugging info.
  int getDwarfRegNum(MCRegister RegNum, bool isEH) const;

  /// Map a dwarf register back to a target register. Returns std::nullopt is
  /// there is no mapping.
  std::optional<unsigned> getLLVMRegNum(unsigned RegNum, bool isEH) const;

  /// Map a target EH register number to an equivalent DWARF register
  /// number.
  int getDwarfRegNumFromDwarfEHRegNum(unsigned RegNum) const;

  /// Map a target register to an equivalent SEH register
  /// number.  Returns LLVM register number if there is no equivalent value.
  int getSEHRegNum(MCRegister RegNum) const;

  /// Map a target register to an equivalent CodeView register
  /// number.
  int getCodeViewRegNum(MCRegister RegNum) const;

  regclass_iterator regclass_begin() const { return Classes; }
  regclass_iterator regclass_end() const { return Classes+NumClasses; }
  iterator_range<regclass_iterator> regclasses() const {
    return make_range(regclass_begin(), regclass_end());
  }

  unsigned getNumRegClasses() const {
    return (unsigned)(regclass_end()-regclass_begin());
  }

  /// Returns the register class associated with the enumeration
  /// value.  See class MCOperandInfo.
  const MCRegisterClass& getRegClass(unsigned i) const {
    assert(i < getNumRegClasses() && "Register Class ID out of range");
    return Classes[i];
  }

  const char *getRegClassName(const MCRegisterClass *Class) const {
    return RegClassStrings + Class->NameIdx;
  }

   /// Returns the encoding for RegNo
  uint16_t getEncodingValue(MCRegister RegNo) const {
    assert(RegNo < NumRegs &&
           "Attempting to get encoding for invalid register number!");
    return RegEncodingTable[RegNo];
  }

  /// Returns true if RegB is a sub-register of RegA.
  bool isSubRegister(MCRegister RegA, MCRegister RegB) const {
    return isSuperRegister(RegB, RegA);
  }

  /// Returns true if RegB is a super-register of RegA.
  bool isSuperRegister(MCRegister RegA, MCRegister RegB) const;

  /// Returns true if RegB is a sub-register of RegA or if RegB == RegA.
  bool isSubRegisterEq(MCRegister RegA, MCRegister RegB) const {
    return isSuperRegisterEq(RegB, RegA);
  }

  /// Returns true if RegB is a super-register of RegA or if
  /// RegB == RegA.
  bool isSuperRegisterEq(MCRegister RegA, MCRegister RegB) const {
    return RegA == RegB || isSuperRegister(RegA, RegB);
  }

  /// Returns true if RegB is a super-register or sub-register of RegA
  /// or if RegB == RegA.
  bool isSuperOrSubRegisterEq(MCRegister RegA, MCRegister RegB) const {
    return isSubRegisterEq(RegA, RegB) || isSuperRegister(RegA, RegB);
  }

  /// Returns true if the two registers are equal or alias each other.
  bool regsOverlap(MCRegister RegA, MCRegister RegB) const;
};

//===----------------------------------------------------------------------===//
//                          Register List Iterators
//===----------------------------------------------------------------------===//

// MCRegisterInfo provides lists of super-registers, sub-registers, and
// aliasing registers. Use these iterator classes to traverse the lists.

/// MCSubRegIterator enumerates all sub-registers of Reg.
/// If IncludeSelf is set, Reg itself is included in the list.
class MCSubRegIterator
    : public iterator_adaptor_base<MCSubRegIterator,
                                   MCRegisterInfo::DiffListIterator,
                                   std::forward_iterator_tag, const MCPhysReg> {
  // Cache the current value, so that we can return a reference to it.
  MCPhysReg Val;

public:
  /// Constructs an end iterator.
  MCSubRegIterator() = default;

  MCSubRegIterator(MCRegister Reg, const MCRegisterInfo *MCRI,
                   bool IncludeSelf = false) {
    assert(MCRegister::isPhysicalRegister(Reg.id()));
    I.init(Reg.id(), MCRI->DiffLists + MCRI->get(Reg).SubRegs);
    // Initially, the iterator points to Reg itself.
    Val = MCPhysReg(*I);
    if (!IncludeSelf)
      ++*this;
  }

  const MCPhysReg &operator*() const { return Val; }

  using iterator_adaptor_base::operator++;
  MCSubRegIterator &operator++() {
    Val = MCPhysReg(*++I);
    return *this;
  }

  /// Returns true if this iterator is not yet at the end.
  bool isValid() const { return I.isValid(); }
};

/// Iterator that enumerates the sub-registers of a Reg and the associated
/// sub-register indices.
class MCSubRegIndexIterator {
  MCSubRegIterator SRIter;
  const uint16_t *SRIndex;

public:
  /// Constructs an iterator that traverses subregisters and their
  /// associated subregister indices.
  MCSubRegIndexIterator(MCRegister Reg, const MCRegisterInfo *MCRI)
    : SRIter(Reg, MCRI) {
    SRIndex = MCRI->SubRegIndices + MCRI->get(Reg).SubRegIndices;
  }

  /// Returns current sub-register.
  MCRegister getSubReg() const {
    return *SRIter;
  }

  /// Returns sub-register index of the current sub-register.
  unsigned getSubRegIndex() const {
    return *SRIndex;
  }

  /// Returns true if this iterator is not yet at the end.
  bool isValid() const { return SRIter.isValid(); }

  /// Moves to the next position.
  void operator++() {
    ++SRIter;
    ++SRIndex;
  }
};

/// MCSuperRegIterator enumerates all super-registers of Reg.
/// If IncludeSelf is set, Reg itself is included in the list.
class MCSuperRegIterator
    : public iterator_adaptor_base<MCSuperRegIterator,
                                   MCRegisterInfo::DiffListIterator,
                                   std::forward_iterator_tag, const MCPhysReg> {
  // Cache the current value, so that we can return a reference to it.
  MCPhysReg Val;

public:
  /// Constructs an end iterator.
  MCSuperRegIterator() = default;

  MCSuperRegIterator(MCRegister Reg, const MCRegisterInfo *MCRI,
                     bool IncludeSelf = false) {
    assert(MCRegister::isPhysicalRegister(Reg.id()));
    I.init(Reg.id(), MCRI->DiffLists + MCRI->get(Reg).SuperRegs);
    // Initially, the iterator points to Reg itself.
    Val = MCPhysReg(*I);
    if (!IncludeSelf)
      ++*this;
  }

  const MCPhysReg &operator*() const { return Val; }

  using iterator_adaptor_base::operator++;
  MCSuperRegIterator &operator++() {
    Val = MCPhysReg(*++I);
    return *this;
  }

  /// Returns true if this iterator is not yet at the end.
  bool isValid() const { return I.isValid(); }
};

// Definition for isSuperRegister. Put it down here since it needs the
// iterator defined above in addition to the MCRegisterInfo class itself.
inline bool MCRegisterInfo::isSuperRegister(MCRegister RegA, MCRegister RegB) const{
  return is_contained(superregs(RegA), RegB);
}

//===----------------------------------------------------------------------===//
//                               Register Units
//===----------------------------------------------------------------------===//

// MCRegUnitIterator enumerates a list of register units for Reg. The list is
// in ascending numerical order.
class MCRegUnitIterator
    : public iterator_adaptor_base<MCRegUnitIterator,
                                   MCRegisterInfo::DiffListIterator,
                                   std::forward_iterator_tag, const MCRegUnit> {
  // The value must be kept in sync with RegisterInfoEmitter.cpp.
  static constexpr unsigned RegUnitBits = 12;
  // Cache the current value, so that we can return a reference to it.
  MCRegUnit Val;

public:
  /// Constructs an end iterator.
  MCRegUnitIterator() = default;

  MCRegUnitIterator(MCRegister Reg, const MCRegisterInfo *MCRI) {
    assert(Reg && "Null register has no regunits");
    assert(MCRegister::isPhysicalRegister(Reg.id()));
    // Decode the RegUnits MCRegisterDesc field.
    unsigned RU = MCRI->get(Reg).RegUnits;
    unsigned FirstRU = RU & ((1u << RegUnitBits) - 1);
    unsigned Offset = RU >> RegUnitBits;
    I.init(FirstRU, MCRI->DiffLists + Offset);
    Val = MCRegUnit(*I);
  }

  const MCRegUnit &operator*() const { return Val; }

  using iterator_adaptor_base::operator++;
  MCRegUnitIterator &operator++() {
    Val = MCRegUnit(*++I);
    return *this;
  }

  /// Returns true if this iterator is not yet at the end.
  bool isValid() const { return I.isValid(); }
};

/// MCRegUnitMaskIterator enumerates a list of register units and their
/// associated lane masks for Reg. The register units are in ascending
/// numerical order.
class MCRegUnitMaskIterator {
  MCRegUnitIterator RUIter;
  const LaneBitmask *MaskListIter;

public:
  MCRegUnitMaskIterator() = default;

  /// Constructs an iterator that traverses the register units and their
  /// associated LaneMasks in Reg.
  MCRegUnitMaskIterator(MCRegister Reg, const MCRegisterInfo *MCRI)
    : RUIter(Reg, MCRI) {
      uint16_t Idx = MCRI->get(Reg).RegUnitLaneMasks;
      MaskListIter = &MCRI->RegUnitMaskSequences[Idx];
  }

  /// Returns a (RegUnit, LaneMask) pair.
  std::pair<unsigned,LaneBitmask> operator*() const {
    return std::make_pair(*RUIter, *MaskListIter);
  }

  /// Returns true if this iterator is not yet at the end.
  bool isValid() const { return RUIter.isValid(); }

  /// Moves to the next position.
  void operator++() {
    ++MaskListIter;
    ++RUIter;
  }
};

// Each register unit has one or two root registers. The complete set of
// registers containing a register unit is the union of the roots and their
// super-registers. All registers aliasing Unit can be visited like this:
//
//   for (MCRegUnitRootIterator RI(Unit, MCRI); RI.isValid(); ++RI) {
//     for (MCSuperRegIterator SI(*RI, MCRI, true); SI.isValid(); ++SI)
//       visit(*SI);
//    }

/// MCRegUnitRootIterator enumerates the root registers of a register unit.
class MCRegUnitRootIterator {
  uint16_t Reg0 = 0;
  uint16_t Reg1 = 0;

public:
  MCRegUnitRootIterator() = default;

  MCRegUnitRootIterator(unsigned RegUnit, const MCRegisterInfo *MCRI) {
    assert(RegUnit < MCRI->getNumRegUnits() && "Invalid register unit");
    Reg0 = MCRI->RegUnitRoots[RegUnit][0];
    Reg1 = MCRI->RegUnitRoots[RegUnit][1];
  }

  /// Dereference to get the current root register.
  unsigned operator*() const {
    return Reg0;
  }

  /// Check if the iterator is at the end of the list.
  bool isValid() const {
    return Reg0;
  }

  /// Preincrement to move to the next root register.
  void operator++() {
    assert(isValid() && "Cannot move off the end of the list.");
    Reg0 = Reg1;
    Reg1 = 0;
  }
};

/// MCRegAliasIterator enumerates all registers aliasing Reg.  If IncludeSelf is
/// set, Reg itself is included in the list.  This iterator does not guarantee
/// any ordering or that entries are unique.
class MCRegAliasIterator {
private:
  MCRegister Reg;
  const MCRegisterInfo *MCRI;
  bool IncludeSelf;

  MCRegUnitIterator RI;
  MCRegUnitRootIterator RRI;
  MCSuperRegIterator SI;

public:
  MCRegAliasIterator(MCRegister Reg, const MCRegisterInfo *MCRI,
                     bool IncludeSelf)
    : Reg(Reg), MCRI(MCRI), IncludeSelf(IncludeSelf) {
    // Initialize the iterators.
    for (RI = MCRegUnitIterator(Reg, MCRI); RI.isValid(); ++RI) {
      for (RRI = MCRegUnitRootIterator(*RI, MCRI); RRI.isValid(); ++RRI) {
        for (SI = MCSuperRegIterator(*RRI, MCRI, true); SI.isValid(); ++SI) {
          if (!(!IncludeSelf && Reg == *SI))
            return;
        }
      }
    }
  }

  bool isValid() const { return RI.isValid(); }

  MCRegister operator*() const {
    assert(SI.isValid() && "Cannot dereference an invalid iterator.");
    return *SI;
  }

  void advance() {
    // Assuming SI is valid.
    ++SI;
    if (SI.isValid()) return;

    ++RRI;
    if (RRI.isValid()) {
      SI = MCSuperRegIterator(*RRI, MCRI, true);
      return;
    }

    ++RI;
    if (RI.isValid()) {
      RRI = MCRegUnitRootIterator(*RI, MCRI);
      SI = MCSuperRegIterator(*RRI, MCRI, true);
    }
  }

  void operator++() {
    assert(isValid() && "Cannot move off the end of the list.");
    do advance();
    while (!IncludeSelf && isValid() && *SI == Reg);
  }
};

inline iterator_range<MCSubRegIterator>
MCRegisterInfo::subregs(MCRegister Reg) const {
  return make_range({Reg, this, /*IncludeSelf=*/false}, MCSubRegIterator());
}

inline iterator_range<MCSubRegIterator>
MCRegisterInfo::subregs_inclusive(MCRegister Reg) const {
  return make_range({Reg, this, /*IncludeSelf=*/true}, MCSubRegIterator());
}

inline iterator_range<MCSuperRegIterator>
MCRegisterInfo::superregs(MCRegister Reg) const {
  return make_range({Reg, this, /*IncludeSelf=*/false}, MCSuperRegIterator());
}

inline iterator_range<MCSuperRegIterator>
MCRegisterInfo::superregs_inclusive(MCRegister Reg) const {
  return make_range({Reg, this, /*IncludeSelf=*/true}, MCSuperRegIterator());
}

inline detail::concat_range<const MCPhysReg, iterator_range<MCSubRegIterator>,
                            iterator_range<MCSuperRegIterator>>
MCRegisterInfo::sub_and_superregs_inclusive(MCRegister Reg) const {
  return concat<const MCPhysReg>(subregs_inclusive(Reg), superregs(Reg));
}

inline iterator_range<MCRegUnitIterator>
MCRegisterInfo::regunits(MCRegister Reg) const {
  return make_range({Reg, this}, MCRegUnitIterator());
}

} // end namespace llvm

#endif // LLVM_MC_MCREGISTERINFO_H
PKjwFZ+�Ip�
�
MC/ConstantPools.hnu�[���//===- ConstantPools.h - Keep track of assembler-generated ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the ConstantPool and AssemblerConstantPools classes.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_CONSTANTPOOLS_H
#define LLVM_MC_CONSTANTPOOLS_H

#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/SMLoc.h"
#include <cstdint>
#include <map>

namespace llvm {

class MCContext;
class MCExpr;
class MCSection;
class MCStreamer;
class MCSymbol;
class MCSymbolRefExpr;

struct ConstantPoolEntry {
  ConstantPoolEntry(MCSymbol *L, const MCExpr *Val, unsigned Sz, SMLoc Loc_)
    : Label(L), Value(Val), Size(Sz), Loc(Loc_) {}

  MCSymbol *Label;
  const MCExpr *Value;
  unsigned Size;
  SMLoc Loc;
};

// A class to keep track of assembler-generated constant pools that are use to
// implement the ldr-pseudo.
class ConstantPool {
  using EntryVecTy = SmallVector<ConstantPoolEntry, 4>;
  EntryVecTy Entries;
  std::map<int64_t, const MCSymbolRefExpr *> CachedConstantEntries;
  DenseMap<const MCSymbol *, const MCSymbolRefExpr *> CachedSymbolEntries;

public:
  // Initialize a new empty constant pool
  ConstantPool() = default;

  // Add a new entry to the constant pool in the next slot.
  // \param Value is the new entry to put in the constant pool.
  // \param Size is the size in bytes of the entry
  //
  // \returns a MCExpr that references the newly inserted value
  const MCExpr *addEntry(const MCExpr *Value, MCContext &Context,
                         unsigned Size, SMLoc Loc);

  // Emit the contents of the constant pool using the provided streamer.
  void emitEntries(MCStreamer &Streamer);

  // Return true if the constant pool is empty
  bool empty();

  void clearCache();
};

class AssemblerConstantPools {
  // Map type used to keep track of per-Section constant pools used by the
  // ldr-pseudo opcode. The map associates a section to its constant pool. The
  // constant pool is a vector of (label, value) pairs. When the ldr
  // pseudo is parsed we insert a new (label, value) pair into the constant pool
  // for the current section and add MCSymbolRefExpr to the new label as
  // an opcode to the ldr. After we have parsed all the user input we
  // output the (label, value) pairs in each constant pool at the end of the
  // section.
  //
  // We use the MapVector for the map type to ensure stable iteration of
  // the sections at the end of the parse. We need to iterate over the
  // sections in a stable order to ensure that we have print the
  // constant pools in a deterministic order when printing an assembly
  // file.
  using ConstantPoolMapTy = MapVector<MCSection *, ConstantPool>;
  ConstantPoolMapTy ConstantPools;

public:
  void emitAll(MCStreamer &Streamer);
  void emitForCurrentSection(MCStreamer &Streamer);
  void clearCacheForCurrentSection(MCStreamer &Streamer);
  const MCExpr *addEntry(MCStreamer &Streamer, const MCExpr *Expr,
                         unsigned Size, SMLoc Loc);

private:
  ConstantPool *getConstantPool(MCSection *Section);
  ConstantPool &getOrCreateConstantPool(MCSection *Section);
};

} // end namespace llvm

#endif // LLVM_MC_CONSTANTPOOLS_H
PKjwFZ����

MC/MCSectionCOFF.hnu�[���//===- MCSectionCOFF.h - COFF Machine Code Sections -------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the MCSectionCOFF class.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCSECTIONCOFF_H
#define LLVM_MC_MCSECTIONCOFF_H

#include "llvm/ADT/StringRef.h"
#include "llvm/MC/MCSection.h"
#include "llvm/MC/SectionKind.h"
#include <cassert>

namespace llvm {

class MCSymbol;

/// This represents a section on Windows
class MCSectionCOFF final : public MCSection {
  // FIXME: The following fields should not be mutable, but are for now so the
  // asm parser can honor the .linkonce directive.

  /// This is the Characteristics field of a section, drawn from the enums
  /// below.
  mutable unsigned Characteristics;

  /// The unique IDs used with the .pdata and .xdata sections created internally
  /// by the assembler. This ID is used to ensure that for every .text section,
  /// there is exactly one .pdata and one .xdata section, which is required by
  /// the Microsoft incremental linker. This data is mutable because this ID is
  /// not notionally part of the section.
  mutable unsigned WinCFISectionID = ~0U;

  /// The COMDAT symbol of this section. Only valid if this is a COMDAT section.
  /// Two COMDAT sections are merged if they have the same COMDAT symbol.
  MCSymbol *COMDATSymbol;

  /// This is the Selection field for the section symbol, if it is a COMDAT
  /// section (Characteristics & IMAGE_SCN_LNK_COMDAT) != 0
  mutable int Selection;

private:
  friend class MCContext;
  // The storage of Name is owned by MCContext's COFFUniquingMap.
  MCSectionCOFF(StringRef Name, unsigned Characteristics,
                MCSymbol *COMDATSymbol, int Selection, SectionKind K,
                MCSymbol *Begin)
      : MCSection(SV_COFF, Name, K, Begin), Characteristics(Characteristics),
        COMDATSymbol(COMDATSymbol), Selection(Selection) {
    assert((Characteristics & 0x00F00000) == 0 &&
           "alignment must not be set upon section creation");
  }

public:
  /// Decides whether a '.section' directive should be printed before the
  /// section name
  bool shouldOmitSectionDirective(StringRef Name, const MCAsmInfo &MAI) const;

  unsigned getCharacteristics() const { return Characteristics; }
  MCSymbol *getCOMDATSymbol() const { return COMDATSymbol; }
  int getSelection() const { return Selection; }

  void setSelection(int Selection) const;

  void printSwitchToSection(const MCAsmInfo &MAI, const Triple &T,
                            raw_ostream &OS,
                            const MCExpr *Subsection) const override;
  bool useCodeAlign() const override;
  bool isVirtualSection() const override;
  StringRef getVirtualSectionKind() const override;

  unsigned getOrAssignWinCFISectionID(unsigned *NextID) const {
    if (WinCFISectionID == ~0U)
      WinCFISectionID = (*NextID)++;
    return WinCFISectionID;
  }

  static bool isImplicitlyDiscardable(StringRef Name) {
    return Name.startswith(".debug");
  }

  static bool classof(const MCSection *S) { return S->getVariant() == SV_COFF; }
};

} // end namespace llvm

#endif // LLVM_MC_MCSECTIONCOFF_H
PKjwFZ3���ddMC/MCAsmInfoWasm.hnu�[���//===-- llvm/MC/MCAsmInfoWasm.h - Wasm Asm info -----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCASMINFOWASM_H
#define LLVM_MC_MCASMINFOWASM_H

#include "llvm/MC/MCAsmInfo.h"

namespace llvm {
class MCAsmInfoWasm : public MCAsmInfo {
  virtual void anchor();

protected:
  MCAsmInfoWasm();
};
} // namespace llvm

#endif
PKjwFZt 7���MC/MCRegister.hnu�[���//===-- llvm/MC/Register.h --------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCREGISTER_H
#define LLVM_MC_MCREGISTER_H

#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/ADT/Hashing.h"
#include <cassert>
#include <limits>

namespace llvm {

/// An unsigned integer type large enough to represent all physical registers,
/// but not necessarily virtual registers.
using MCPhysReg = uint16_t;

/// Register units are used to compute register aliasing. Every register has at
/// least one register unit, but it can have more. Two registers overlap if and
/// only if they have a common register unit.
///
/// A target with a complicated sub-register structure will typically have many
/// fewer register units than actual registers. MCRI::getNumRegUnits() returns
/// the number of register units in the target.
using MCRegUnit = unsigned;

/// Wrapper class representing physical registers. Should be passed by value.
class MCRegister {
  friend hash_code hash_value(const MCRegister &);
  unsigned Reg;

public:
  constexpr MCRegister(unsigned Val = 0) : Reg(Val) {}

  // Register numbers can represent physical registers, virtual registers, and
  // sometimes stack slots. The unsigned values are divided into these ranges:
  //
  //   0           Not a register, can be used as a sentinel.
  //   [1;2^30)    Physical registers assigned by TableGen.
  //   [2^30;2^31) Stack slots. (Rarely used.)
  //   [2^31;2^32) Virtual registers assigned by MachineRegisterInfo.
  //
  // Further sentinels can be allocated from the small negative integers.
  // DenseMapInfo<unsigned> uses -1u and -2u.
  static_assert(std::numeric_limits<decltype(Reg)>::max() >= 0xFFFFFFFF,
                "Reg isn't large enough to hold full range.");
  static constexpr unsigned NoRegister = 0u;
  static constexpr unsigned FirstPhysicalReg = 1u;
  static constexpr unsigned FirstStackSlot = 1u << 30;
  static constexpr unsigned VirtualRegFlag = 1u << 31;

  /// This is the portion of the positive number space that is not a physical
  /// register. StackSlot values do not exist in the MC layer, see
  /// Register::isStackSlot() for the more information on them.
  ///
  static constexpr bool isStackSlot(unsigned Reg) {
    return FirstStackSlot <= Reg && Reg < VirtualRegFlag;
  }

  /// Return true if the specified register number is in
  /// the physical register namespace.
  static constexpr bool isPhysicalRegister(unsigned Reg) {
    return FirstPhysicalReg <= Reg && Reg < FirstStackSlot;
  }

  constexpr operator unsigned() const { return Reg; }

  /// Check the provided unsigned value is a valid MCRegister.
  static MCRegister from(unsigned Val) {
    assert(Val == NoRegister || isPhysicalRegister(Val));
    return MCRegister(Val);
  }

  constexpr unsigned id() const { return Reg; }

  constexpr bool isValid() const { return Reg != NoRegister; }

  /// Comparisons between register objects
  constexpr bool operator==(const MCRegister &Other) const {
    return Reg == Other.Reg;
  }
  constexpr bool operator!=(const MCRegister &Other) const {
    return Reg != Other.Reg;
  }

  /// Comparisons against register constants. E.g.
  /// * R == AArch64::WZR
  /// * R == 0
  /// * R == VirtRegMap::NO_PHYS_REG
  constexpr bool operator==(unsigned Other) const { return Reg == Other; }
  constexpr bool operator!=(unsigned Other) const { return Reg != Other; }
  constexpr bool operator==(int Other) const { return Reg == unsigned(Other); }
  constexpr bool operator!=(int Other) const { return Reg != unsigned(Other); }
  // MSVC requires that we explicitly declare these two as well.
  constexpr bool operator==(MCPhysReg Other) const {
    return Reg == unsigned(Other);
  }
  constexpr bool operator!=(MCPhysReg Other) const {
    return Reg != unsigned(Other);
  }
};

// Provide DenseMapInfo for MCRegister
template <> struct DenseMapInfo<MCRegister> {
  static inline unsigned getEmptyKey() {
    return DenseMapInfo<unsigned>::getEmptyKey();
  }
  static inline unsigned getTombstoneKey() {
    return DenseMapInfo<unsigned>::getTombstoneKey();
  }
  static unsigned getHashValue(const MCRegister &Val) {
    return DenseMapInfo<unsigned>::getHashValue(Val.id());
  }
  static bool isEqual(const MCRegister &LHS, const MCRegister &RHS) {
    return DenseMapInfo<unsigned>::isEqual(LHS.id(), RHS.id());
  }
};

inline hash_code hash_value(const MCRegister &Reg) {
  return hash_value(Reg.id());
}
} // namespace llvm

#endif // LLVM_MC_MCREGISTER_H
PKjwFZ�i��MC/MCInstrAnalysis.hnu�[���//===- llvm/MC/MCInstrAnalysis.h - InstrDesc target hooks -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the MCInstrAnalysis class which the MCTargetDescs can
// derive from to give additional information to MC.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCINSTRANALYSIS_H
#define LLVM_MC_MCINSTRANALYSIS_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCInstrDesc.h"
#include "llvm/MC/MCInstrInfo.h"
#include <cstdint>
#include <vector>

namespace llvm {

class MCRegisterInfo;
class Triple;

class MCInstrAnalysis {
protected:
  friend class Target;

  const MCInstrInfo *Info;

public:
  MCInstrAnalysis(const MCInstrInfo *Info) : Info(Info) {}
  virtual ~MCInstrAnalysis() = default;

  virtual bool isBranch(const MCInst &Inst) const {
    return Info->get(Inst.getOpcode()).isBranch();
  }

  virtual bool isConditionalBranch(const MCInst &Inst) const {
    return Info->get(Inst.getOpcode()).isConditionalBranch();
  }

  virtual bool isUnconditionalBranch(const MCInst &Inst) const {
    return Info->get(Inst.getOpcode()).isUnconditionalBranch();
  }

  virtual bool isIndirectBranch(const MCInst &Inst) const {
    return Info->get(Inst.getOpcode()).isIndirectBranch();
  }

  virtual bool isCall(const MCInst &Inst) const {
    return Info->get(Inst.getOpcode()).isCall();
  }

  virtual bool isReturn(const MCInst &Inst) const {
    return Info->get(Inst.getOpcode()).isReturn();
  }

  virtual bool isTerminator(const MCInst &Inst) const {
    return Info->get(Inst.getOpcode()).isTerminator();
  }

  /// Returns true if at least one of the register writes performed by
  /// \param Inst implicitly clears the upper portion of all super-registers.
  ///
  /// Example: on X86-64, a write to EAX implicitly clears the upper half of
  /// RAX. Also (still on x86) an XMM write perfomed by an AVX 128-bit
  /// instruction implicitly clears the upper portion of the correspondent
  /// YMM register.
  ///
  /// This method also updates an APInt which is used as mask of register
  /// writes. There is one bit for every explicit/implicit write performed by
  /// the instruction. If a write implicitly clears its super-registers, then
  /// the corresponding bit is set (vic. the corresponding bit is cleared).
  ///
  /// The first bits in the APint are related to explicit writes. The remaining
  /// bits are related to implicit writes. The sequence of writes follows the
  /// machine operand sequence. For implicit writes, the sequence is defined by
  /// the MCInstrDesc.
  ///
  /// The assumption is that the bit-width of the APInt is correctly set by
  /// the caller. The default implementation conservatively assumes that none of
  /// the writes clears the upper portion of a super-register.
  virtual bool clearsSuperRegisters(const MCRegisterInfo &MRI,
                                    const MCInst &Inst,
                                    APInt &Writes) const;

  /// Returns true if MI is a dependency breaking zero-idiom for the given
  /// subtarget.
  ///
  /// Mask is used to identify input operands that have their dependency
  /// broken. Each bit of the mask is associated with a specific input operand.
  /// Bits associated with explicit input operands are laid out first in the
  /// mask; implicit operands come after explicit operands.
  /// 
  /// Dependencies are broken only for operands that have their corresponding bit
  /// set. Operands that have their bit cleared, or that don't have a
  /// corresponding bit in the mask don't have their dependency broken.  Note
  /// that Mask may not be big enough to describe all operands.  The assumption
  /// for operands that don't have a correspondent bit in the mask is that those
  /// are still data dependent.
  /// 
  /// The only exception to the rule is for when Mask has all zeroes.
  /// A zero mask means: dependencies are broken for all explicit register
  /// operands.
  virtual bool isZeroIdiom(const MCInst &MI, APInt &Mask,
                           unsigned CPUID) const {
    return false;
  }

  /// Returns true if MI is a dependency breaking instruction for the
  /// subtarget associated with CPUID .
  ///
  /// The value computed by a dependency breaking instruction is not dependent
  /// on the inputs. An example of dependency breaking instruction on X86 is
  /// `XOR %eax, %eax`.
  ///
  /// If MI is a dependency breaking instruction for subtarget CPUID, then Mask
  /// can be inspected to identify independent operands.
  ///
  /// Essentially, each bit of the mask corresponds to an input operand.
  /// Explicit operands are laid out first in the mask; implicit operands follow
  /// explicit operands. Bits are set for operands that are independent.
  ///
  /// Note that the number of bits in Mask may not be equivalent to the sum of
  /// explicit and implicit operands in MI. Operands that don't have a
  /// corresponding bit in Mask are assumed "not independente".
  ///
  /// The only exception is for when Mask is all zeroes. That means: explicit
  /// input operands of MI are independent.
  virtual bool isDependencyBreaking(const MCInst &MI, APInt &Mask,
                                    unsigned CPUID) const {
    return isZeroIdiom(MI, Mask, CPUID);
  }

  /// Returns true if MI is a candidate for move elimination.
  ///
  /// Different subtargets may apply different constraints to optimizable
  /// register moves. For example, on most X86 subtargets, a candidate for move
  /// elimination cannot specify the same register for both source and
  /// destination.
  virtual bool isOptimizableRegisterMove(const MCInst &MI,
                                         unsigned CPUID) const {
    return false;
  }

  /// Given a branch instruction try to get the address the branch
  /// targets. Return true on success, and the address in Target.
  virtual bool
  evaluateBranch(const MCInst &Inst, uint64_t Addr, uint64_t Size,
                 uint64_t &Target) const;

  /// Given an instruction tries to get the address of a memory operand. Returns
  /// the address on success.
  virtual std::optional<uint64_t>
  evaluateMemoryOperandAddress(const MCInst &Inst, const MCSubtargetInfo *STI,
                               uint64_t Addr, uint64_t Size) const;

  /// Given an instruction with a memory operand that could require relocation,
  /// returns the offset within the instruction of that relocation.
  virtual std::optional<uint64_t>
  getMemoryOperandRelocationOffset(const MCInst &Inst, uint64_t Size) const;

  /// Returns (PLT virtual address, GOT virtual address) pairs for PLT entries.
  virtual std::vector<std::pair<uint64_t, uint64_t>>
  findPltEntries(uint64_t PltSectionVA, ArrayRef<uint8_t> PltContents,
                 const Triple &TargetTriple) const {
    return {};
  }
};

} // end namespace llvm

#endif // LLVM_MC_MCINSTRANALYSIS_H
PKjwFZ���%�%MC/MCMachObjectWriter.hnu�[���//===- llvm/MC/MCMachObjectWriter.h - Mach Object Writer --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCMACHOBJECTWRITER_H
#define LLVM_MC_MCMACHOBJECTWRITER_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/BinaryFormat/MachO.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCObjectWriter.h"
#include "llvm/MC/MCSection.h"
#include "llvm/MC/StringTableBuilder.h"
#include "llvm/Support/EndianStream.h"
#include <cstdint>
#include <memory>
#include <string>
#include <vector>

namespace llvm {

class MachObjectWriter;

class MCMachObjectTargetWriter : public MCObjectTargetWriter {
  const unsigned Is64Bit : 1;
  const uint32_t CPUType;
protected:
  uint32_t CPUSubtype;
public:
  unsigned LocalDifference_RIT = 0;

protected:
  MCMachObjectTargetWriter(bool Is64Bit_, uint32_t CPUType_,
                           uint32_t CPUSubtype_);

  void setLocalDifferenceRelocationType(unsigned Type) {
    LocalDifference_RIT = Type;
  }

public:
  virtual ~MCMachObjectTargetWriter();

  Triple::ObjectFormatType getFormat() const override { return Triple::MachO; }
  static bool classof(const MCObjectTargetWriter *W) {
    return W->getFormat() == Triple::MachO;
  }

  /// \name Lifetime Management
  /// @{

  virtual void reset() {}

  /// @}

  /// \name Accessors
  /// @{

  bool is64Bit() const { return Is64Bit; }
  uint32_t getCPUType() const { return CPUType; }
  uint32_t getCPUSubtype() const { return CPUSubtype; }
  unsigned getLocalDifferenceRelocationType() const {
    return LocalDifference_RIT;
  }

  /// @}

  /// \name API
  /// @{

  virtual void recordRelocation(MachObjectWriter *Writer, MCAssembler &Asm,
                                const MCAsmLayout &Layout,
                                const MCFragment *Fragment,
                                const MCFixup &Fixup, MCValue Target,
                                uint64_t &FixedValue) = 0;

  /// @}
};

class MachObjectWriter : public MCObjectWriter {
  /// Helper struct for containing some precomputed information on symbols.
  struct MachSymbolData {
    const MCSymbol *Symbol;
    uint64_t StringIndex;
    uint8_t SectionIndex;

    // Support lexicographic sorting.
    bool operator<(const MachSymbolData &RHS) const;
  };

  /// The target specific Mach-O writer instance.
  std::unique_ptr<MCMachObjectTargetWriter> TargetObjectWriter;

  /// \name Relocation Data
  /// @{

  struct RelAndSymbol {
    const MCSymbol *Sym;
    MachO::any_relocation_info MRE;
    RelAndSymbol(const MCSymbol *Sym, const MachO::any_relocation_info &MRE)
        : Sym(Sym), MRE(MRE) {}
  };

  DenseMap<const MCSection *, std::vector<RelAndSymbol>> Relocations;
  DenseMap<const MCSection *, unsigned> IndirectSymBase;

  SectionAddrMap SectionAddress;

  /// @}
  /// \name Symbol Table Data
  /// @{

  StringTableBuilder StringTable;
  std::vector<MachSymbolData> LocalSymbolData;
  std::vector<MachSymbolData> ExternalSymbolData;
  std::vector<MachSymbolData> UndefinedSymbolData;

  /// @}

  MachSymbolData *findSymbolData(const MCSymbol &Sym);

  void writeWithPadding(StringRef Str, uint64_t Size);

public:
  MachObjectWriter(std::unique_ptr<MCMachObjectTargetWriter> MOTW,
                   raw_pwrite_stream &OS, bool IsLittleEndian)
      : TargetObjectWriter(std::move(MOTW)),
        StringTable(TargetObjectWriter->is64Bit() ? StringTableBuilder::MachO64
                                                  : StringTableBuilder::MachO),
        W(OS, IsLittleEndian ? support::little : support::big) {}

  support::endian::Writer W;

  const MCSymbol &findAliasedSymbol(const MCSymbol &Sym) const;

  /// \name Lifetime management Methods
  /// @{

  void reset() override;

  /// @}

  /// \name Utility Methods
  /// @{

  bool isFixupKindPCRel(const MCAssembler &Asm, unsigned Kind);

  SectionAddrMap &getSectionAddressMap() { return SectionAddress; }

  uint64_t getSectionAddress(const MCSection *Sec) const {
    return SectionAddress.lookup(Sec);
  }
  uint64_t getSymbolAddress(const MCSymbol &S, const MCAsmLayout &Layout) const;

  uint64_t getFragmentAddress(const MCFragment *Fragment,
                              const MCAsmLayout &Layout) const;

  uint64_t getPaddingSize(const MCSection *SD, const MCAsmLayout &Layout) const;

  bool doesSymbolRequireExternRelocation(const MCSymbol &S);

  /// @}

  /// \name Target Writer Proxy Accessors
  /// @{

  bool is64Bit() const { return TargetObjectWriter->is64Bit(); }
  bool isX86_64() const {
    uint32_t CPUType = TargetObjectWriter->getCPUType();
    return CPUType == MachO::CPU_TYPE_X86_64;
  }

  /// @}

  void writeHeader(MachO::HeaderFileType Type, unsigned NumLoadCommands,
                   unsigned LoadCommandsSize, bool SubsectionsViaSymbols);

  /// Write a segment load command.
  ///
  /// \param NumSections The number of sections in this segment.
  /// \param SectionDataSize The total size of the sections.
  void writeSegmentLoadCommand(StringRef Name, unsigned NumSections,
                               uint64_t VMAddr, uint64_t VMSize,
                               uint64_t SectionDataStartOffset,
                               uint64_t SectionDataSize, uint32_t MaxProt,
                               uint32_t InitProt);

  void writeSection(const MCAsmLayout &Layout, const MCSection &Sec,
                    uint64_t VMAddr, uint64_t FileOffset, unsigned Flags,
                    uint64_t RelocationsStart, unsigned NumRelocations);

  void writeSymtabLoadCommand(uint32_t SymbolOffset, uint32_t NumSymbols,
                              uint32_t StringTableOffset,
                              uint32_t StringTableSize);

  void writeDysymtabLoadCommand(
      uint32_t FirstLocalSymbol, uint32_t NumLocalSymbols,
      uint32_t FirstExternalSymbol, uint32_t NumExternalSymbols,
      uint32_t FirstUndefinedSymbol, uint32_t NumUndefinedSymbols,
      uint32_t IndirectSymbolOffset, uint32_t NumIndirectSymbols);

  void writeNlist(MachSymbolData &MSD, const MCAsmLayout &Layout);

  void writeLinkeditLoadCommand(uint32_t Type, uint32_t DataOffset,
                                uint32_t DataSize);

  void writeLinkerOptionsLoadCommand(const std::vector<std::string> &Options);

  // FIXME: We really need to improve the relocation validation. Basically, we
  // want to implement a separate computation which evaluates the relocation
  // entry as the linker would, and verifies that the resultant fixup value is
  // exactly what the encoder wanted. This will catch several classes of
  // problems:
  //
  //  - Relocation entry bugs, the two algorithms are unlikely to have the same
  //    exact bug.
  //
  //  - Relaxation issues, where we forget to relax something.
  //
  //  - Input errors, where something cannot be correctly encoded. 'as' allows
  //    these through in many cases.

  // Add a relocation to be output in the object file. At the time this is
  // called, the symbol indexes are not know, so if the relocation refers
  // to a symbol it should be passed as \p RelSymbol so that it can be updated
  // afterwards. If the relocation doesn't refer to a symbol, nullptr should be
  // used.
  void addRelocation(const MCSymbol *RelSymbol, const MCSection *Sec,
                     MachO::any_relocation_info &MRE) {
    RelAndSymbol P(RelSymbol, MRE);
    Relocations[Sec].push_back(P);
  }

  void recordRelocation(MCAssembler &Asm, const MCAsmLayout &Layout,
                        const MCFragment *Fragment, const MCFixup &Fixup,
                        MCValue Target, uint64_t &FixedValue) override;

  void bindIndirectSymbols(MCAssembler &Asm);

  /// Compute the symbol table data.
  void computeSymbolTable(MCAssembler &Asm,
                          std::vector<MachSymbolData> &LocalSymbolData,
                          std::vector<MachSymbolData> &ExternalSymbolData,
                          std::vector<MachSymbolData> &UndefinedSymbolData);

  void computeSectionAddresses(const MCAssembler &Asm,
                               const MCAsmLayout &Layout);

  void executePostLayoutBinding(MCAssembler &Asm,
                                const MCAsmLayout &Layout) override;

  bool isSymbolRefDifferenceFullyResolvedImpl(const MCAssembler &Asm,
                                              const MCSymbol &A,
                                              const MCSymbol &B,
                                              bool InSet) const override;

  bool isSymbolRefDifferenceFullyResolvedImpl(const MCAssembler &Asm,
                                              const MCSymbol &SymA,
                                              const MCFragment &FB, bool InSet,
                                              bool IsPCRel) const override;

  void populateAddrSigSection(MCAssembler &Asm);

  uint64_t writeObject(MCAssembler &Asm, const MCAsmLayout &Layout) override;
};

/// Construct a new Mach-O writer instance.
///
/// This routine takes ownership of the target writer subclass.
///
/// \param MOTW - The target specific Mach-O writer subclass.
/// \param OS - The stream to write to.
/// \returns The constructed object writer.
std::unique_ptr<MCObjectWriter>
createMachObjectWriter(std::unique_ptr<MCMachObjectTargetWriter> MOTW,
                       raw_pwrite_stream &OS, bool IsLittleEndian);

} // end namespace llvm

#endif // LLVM_MC_MCMACHOBJECTWRITER_H
PKjwFZ�;
���MC/MCSymbolMachO.hnu�[���//===- MCSymbolMachO.h -  ---------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_MC_MCSYMBOLMACHO_H
#define LLVM_MC_MCSYMBOLMACHO_H

#include "llvm/ADT/Twine.h"
#include "llvm/MC/MCSymbol.h"

namespace llvm {
class MCSymbolMachO : public MCSymbol {
  /// We store the value for the 'desc' symbol field in the
  /// lowest 16 bits of the implementation defined flags.
  enum MachOSymbolFlags : uint16_t { // See <mach-o/nlist.h>.
    SF_DescFlagsMask                        = 0xFFFF,

    // Reference type flags.
    SF_ReferenceTypeMask                    = 0x0007,
    SF_ReferenceTypeUndefinedNonLazy        = 0x0000,
    SF_ReferenceTypeUndefinedLazy           = 0x0001,
    SF_ReferenceTypeDefined                 = 0x0002,
    SF_ReferenceTypePrivateDefined          = 0x0003,
    SF_ReferenceTypePrivateUndefinedNonLazy = 0x0004,
    SF_ReferenceTypePrivateUndefinedLazy    = 0x0005,

    // Other 'desc' flags.
    SF_ThumbFunc                            = 0x0008,
    SF_NoDeadStrip                          = 0x0020,
    SF_WeakReference                        = 0x0040,
    SF_WeakDefinition                       = 0x0080,
    SF_SymbolResolver                       = 0x0100,
    SF_AltEntry                             = 0x0200,
    SF_Cold                                 = 0x0400,

    // Common alignment
    SF_CommonAlignmentMask                  = 0xF0FF,
    SF_CommonAlignmentShift                 = 8
  };

public:
  MCSymbolMachO(const StringMapEntry<bool> *Name, bool isTemporary)
      : MCSymbol(SymbolKindMachO, Name, isTemporary) {}

  // Reference type methods.

  void clearReferenceType() const {
    modifyFlags(0, SF_ReferenceTypeMask);
  }

  void setReferenceTypeUndefinedLazy(bool Value) const {
    modifyFlags(Value ? SF_ReferenceTypeUndefinedLazy : 0,
                SF_ReferenceTypeUndefinedLazy);
  }

  // Other 'desc' methods.

  void setThumbFunc() const {
    modifyFlags(SF_ThumbFunc, SF_ThumbFunc);
  }

  bool isNoDeadStrip() const {
    return getFlags() & SF_NoDeadStrip;
  }
  void setNoDeadStrip() const {
    modifyFlags(SF_NoDeadStrip, SF_NoDeadStrip);
  }

  bool isWeakReference() const {
    return getFlags() & SF_WeakReference;
  }
  void setWeakReference() const {
    modifyFlags(SF_WeakReference, SF_WeakReference);
  }

  bool isWeakDefinition() const {
    return getFlags() & SF_WeakDefinition;
  }
  void setWeakDefinition() const {
    modifyFlags(SF_WeakDefinition, SF_WeakDefinition);
  }

  bool isSymbolResolver() const {
    return getFlags() & SF_SymbolResolver;
  }
  void setSymbolResolver() const {
    modifyFlags(SF_SymbolResolver, SF_SymbolResolver);
  }

  void setAltEntry() const {
    modifyFlags(SF_AltEntry, SF_AltEntry);
  }

  bool isAltEntry() const {
    return getFlags() & SF_AltEntry;
  }

  void setCold() const { modifyFlags(SF_Cold, SF_Cold); }

  bool isCold() const { return getFlags() & SF_Cold; }

  void setDesc(unsigned Value) const {
    assert(Value == (Value & SF_DescFlagsMask) &&
           "Invalid .desc value!");
    setFlags(Value & SF_DescFlagsMask);
  }

  /// Get the encoded value of the flags as they will be emitted in to
  /// the MachO binary
  uint16_t getEncodedFlags(bool EncodeAsAltEntry) const {
    uint16_t Flags = getFlags();

    // Common alignment is packed into the 'desc' bits.
    if (isCommon()) {
      if (MaybeAlign MaybeAlignment = getCommonAlignment()) {
        Align Alignment = *MaybeAlignment;
        unsigned Log2Size = Log2(Alignment);
        if (Log2Size > 15)
          report_fatal_error("invalid 'common' alignment '" +
                                 Twine(Alignment.value()) + "' for '" +
                                 getName() + "'",
                             false);
        Flags = (Flags & SF_CommonAlignmentMask) |
                (Log2Size << SF_CommonAlignmentShift);
      }
    }

    if (EncodeAsAltEntry)
      Flags |= SF_AltEntry;

    return Flags;
  }

  static bool classof(const MCSymbol *S) { return S->isMachO(); }
};
}

#endif
PKjwFZ�*�
�P�PMC/MCFragment.hnu�[���//===- MCFragment.h - Fragment type hierarchy -------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCFRAGMENT_H
#define LLVM_MC_MCFRAGMENT_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/ilist_node.h"
#include "llvm/MC/MCFixup.h"
#include "llvm/MC/MCInst.h"
#include "llvm/Support/Alignment.h"
#include "llvm/Support/SMLoc.h"
#include <cstdint>
#include <utility>

namespace llvm {

class MCSection;
class MCSubtargetInfo;
class MCSymbol;

class MCFragment : public ilist_node_with_parent<MCFragment, MCSection> {
  friend class MCAsmLayout;

public:
  enum FragmentType : uint8_t {
    FT_Align,
    FT_Data,
    FT_CompactEncodedInst,
    FT_Fill,
    FT_Nops,
    FT_Relaxable,
    FT_Org,
    FT_Dwarf,
    FT_DwarfFrame,
    FT_LEB,
    FT_BoundaryAlign,
    FT_SymbolId,
    FT_CVInlineLines,
    FT_CVDefRange,
    FT_PseudoProbe,
    FT_Dummy
  };

private:
  /// The data for the section this fragment is in.
  MCSection *Parent;

  /// The atom this fragment is in, as represented by its defining symbol.
  const MCSymbol *Atom;

  /// The offset of this fragment in its section. This is ~0 until
  /// initialized.
  uint64_t Offset;

  /// The layout order of this fragment.
  unsigned LayoutOrder;

  /// The subsection this fragment belongs to. This is 0 if the fragment is not
  // in any subsection.
  unsigned SubsectionNumber = 0;

  FragmentType Kind;

  /// Whether fragment is being laid out.
  bool IsBeingLaidOut;

protected:
  bool HasInstructions;
  bool LinkerRelaxable = false;

  MCFragment(FragmentType Kind, bool HasInstructions,
             MCSection *Parent = nullptr);

public:
  MCFragment() = delete;
  MCFragment(const MCFragment &) = delete;
  MCFragment &operator=(const MCFragment &) = delete;

  /// Destroys the current fragment.
  ///
  /// This must be used instead of delete as MCFragment is non-virtual.
  /// This method will dispatch to the appropriate subclass.
  void destroy();

  FragmentType getKind() const { return Kind; }

  MCSection *getParent() const { return Parent; }
  void setParent(MCSection *Value) { Parent = Value; }

  const MCSymbol *getAtom() const { return Atom; }
  void setAtom(const MCSymbol *Value) { Atom = Value; }

  unsigned getLayoutOrder() const { return LayoutOrder; }
  void setLayoutOrder(unsigned Value) { LayoutOrder = Value; }

  /// Does this fragment have instructions emitted into it? By default
  /// this is false, but specific fragment types may set it to true.
  bool hasInstructions() const { return HasInstructions; }

  void dump() const;

  void setSubsectionNumber(unsigned Value) { SubsectionNumber = Value; }
  unsigned getSubsectionNumber() const { return SubsectionNumber; }
};

class MCDummyFragment : public MCFragment {
public:
  explicit MCDummyFragment(MCSection *Sec) : MCFragment(FT_Dummy, false, Sec) {}

  static bool classof(const MCFragment *F) { return F->getKind() == FT_Dummy; }
};

/// Interface implemented by fragments that contain encoded instructions and/or
/// data.
///
class MCEncodedFragment : public MCFragment {
  /// Should this fragment be aligned to the end of a bundle?
  bool AlignToBundleEnd = false;

  uint8_t BundlePadding = 0;

protected:
  MCEncodedFragment(MCFragment::FragmentType FType, bool HasInstructions,
                    MCSection *Sec)
      : MCFragment(FType, HasInstructions, Sec) {}

  /// The MCSubtargetInfo in effect when the instruction was encoded.
  /// It must be non-null for instructions.
  const MCSubtargetInfo *STI = nullptr;

public:
  static bool classof(const MCFragment *F) {
    MCFragment::FragmentType Kind = F->getKind();
    switch (Kind) {
    default:
      return false;
    case MCFragment::FT_Relaxable:
    case MCFragment::FT_CompactEncodedInst:
    case MCFragment::FT_Data:
    case MCFragment::FT_Dwarf:
    case MCFragment::FT_DwarfFrame:
    case MCFragment::FT_PseudoProbe:
      return true;
    }
  }

  /// Should this fragment be placed at the end of an aligned bundle?
  bool alignToBundleEnd() const { return AlignToBundleEnd; }
  void setAlignToBundleEnd(bool V) { AlignToBundleEnd = V; }

  /// Get the padding size that must be inserted before this fragment.
  /// Used for bundling. By default, no padding is inserted.
  /// Note that padding size is restricted to 8 bits. This is an optimization
  /// to reduce the amount of space used for each fragment. In practice, larger
  /// padding should never be required.
  uint8_t getBundlePadding() const { return BundlePadding; }

  /// Set the padding size for this fragment. By default it's a no-op,
  /// and only some fragments have a meaningful implementation.
  void setBundlePadding(uint8_t N) { BundlePadding = N; }

  /// Retrieve the MCSubTargetInfo in effect when the instruction was encoded.
  /// Guaranteed to be non-null if hasInstructions() == true
  const MCSubtargetInfo *getSubtargetInfo() const { return STI; }

  /// Record that the fragment contains instructions with the MCSubtargetInfo in
  /// effect when the instruction was encoded.
  void setHasInstructions(const MCSubtargetInfo &STI) {
    HasInstructions = true;
    this->STI = &STI;
  }
};

/// Interface implemented by fragments that contain encoded instructions and/or
/// data.
///
template<unsigned ContentsSize>
class MCEncodedFragmentWithContents : public MCEncodedFragment {
  SmallVector<char, ContentsSize> Contents;

protected:
  MCEncodedFragmentWithContents(MCFragment::FragmentType FType,
                                bool HasInstructions,
                                MCSection *Sec)
      : MCEncodedFragment(FType, HasInstructions, Sec) {}

public:
  SmallVectorImpl<char> &getContents() { return Contents; }
  const SmallVectorImpl<char> &getContents() const { return Contents; }
};

/// Interface implemented by fragments that contain encoded instructions and/or
/// data and also have fixups registered.
///
template<unsigned ContentsSize, unsigned FixupsSize>
class MCEncodedFragmentWithFixups :
  public MCEncodedFragmentWithContents<ContentsSize> {

  /// The list of fixups in this fragment.
  SmallVector<MCFixup, FixupsSize> Fixups;

protected:
  MCEncodedFragmentWithFixups(MCFragment::FragmentType FType,
                              bool HasInstructions,
                              MCSection *Sec)
      : MCEncodedFragmentWithContents<ContentsSize>(FType, HasInstructions,
                                                    Sec) {}

public:

  using const_fixup_iterator = SmallVectorImpl<MCFixup>::const_iterator;
  using fixup_iterator = SmallVectorImpl<MCFixup>::iterator;

  SmallVectorImpl<MCFixup> &getFixups() { return Fixups; }
  const SmallVectorImpl<MCFixup> &getFixups() const { return Fixups; }

  fixup_iterator fixup_begin() { return Fixups.begin(); }
  const_fixup_iterator fixup_begin() const { return Fixups.begin(); }

  fixup_iterator fixup_end() { return Fixups.end(); }
  const_fixup_iterator fixup_end() const { return Fixups.end(); }

  static bool classof(const MCFragment *F) {
    MCFragment::FragmentType Kind = F->getKind();
    return Kind == MCFragment::FT_Relaxable || Kind == MCFragment::FT_Data ||
           Kind == MCFragment::FT_CVDefRange || Kind == MCFragment::FT_Dwarf ||
           Kind == MCFragment::FT_DwarfFrame;
  }
};

/// Fragment for data and encoded instructions.
///
class MCDataFragment : public MCEncodedFragmentWithFixups<32, 4> {
public:
  MCDataFragment(MCSection *Sec = nullptr)
      : MCEncodedFragmentWithFixups<32, 4>(FT_Data, false, Sec) {}

  static bool classof(const MCFragment *F) {
    return F->getKind() == MCFragment::FT_Data;
  }

  bool isLinkerRelaxable() const { return LinkerRelaxable; }
  void setLinkerRelaxable() { LinkerRelaxable = true; }
};

/// This is a compact (memory-size-wise) fragment for holding an encoded
/// instruction (non-relaxable) that has no fixups registered. When applicable,
/// it can be used instead of MCDataFragment and lead to lower memory
/// consumption.
///
class MCCompactEncodedInstFragment : public MCEncodedFragmentWithContents<4> {
public:
  MCCompactEncodedInstFragment(MCSection *Sec = nullptr)
      : MCEncodedFragmentWithContents(FT_CompactEncodedInst, true, Sec) {
  }

  static bool classof(const MCFragment *F) {
    return F->getKind() == MCFragment::FT_CompactEncodedInst;
  }
};

/// A relaxable fragment holds on to its MCInst, since it may need to be
/// relaxed during the assembler layout and relaxation stage.
///
class MCRelaxableFragment : public MCEncodedFragmentWithFixups<8, 1> {

  /// The instruction this is a fragment for.
  MCInst Inst;
  /// Can we auto pad the instruction?
  bool AllowAutoPadding = false;

public:
  MCRelaxableFragment(const MCInst &Inst, const MCSubtargetInfo &STI,
                      MCSection *Sec = nullptr)
      : MCEncodedFragmentWithFixups(FT_Relaxable, true, Sec),
        Inst(Inst) { this->STI = &STI; }

  const MCInst &getInst() const { return Inst; }
  void setInst(const MCInst &Value) { Inst = Value; }

  bool getAllowAutoPadding() const { return AllowAutoPadding; }
  void setAllowAutoPadding(bool V) { AllowAutoPadding = V; }

  static bool classof(const MCFragment *F) {
    return F->getKind() == MCFragment::FT_Relaxable;
  }
};

class MCAlignFragment : public MCFragment {
  /// The alignment to ensure, in bytes.
  Align Alignment;

  /// Flag to indicate that (optimal) NOPs should be emitted instead
  /// of using the provided value. The exact interpretation of this flag is
  /// target dependent.
  bool EmitNops : 1;

  /// Value to use for filling padding bytes.
  int64_t Value;

  /// The size of the integer (in bytes) of \p Value.
  unsigned ValueSize;

  /// The maximum number of bytes to emit; if the alignment
  /// cannot be satisfied in this width then this fragment is ignored.
  unsigned MaxBytesToEmit;

  /// When emitting Nops some subtargets have specific nop encodings.
  const MCSubtargetInfo *STI = nullptr;

public:
  MCAlignFragment(Align Alignment, int64_t Value, unsigned ValueSize,
                  unsigned MaxBytesToEmit, MCSection *Sec = nullptr)
      : MCFragment(FT_Align, false, Sec), Alignment(Alignment), EmitNops(false),
        Value(Value), ValueSize(ValueSize), MaxBytesToEmit(MaxBytesToEmit) {}

  Align getAlignment() const { return Alignment; }

  int64_t getValue() const { return Value; }

  unsigned getValueSize() const { return ValueSize; }

  unsigned getMaxBytesToEmit() const { return MaxBytesToEmit; }

  bool hasEmitNops() const { return EmitNops; }
  void setEmitNops(bool Value, const MCSubtargetInfo *STI) {
    EmitNops = Value;
    this->STI = STI;
  }

  const MCSubtargetInfo *getSubtargetInfo() const { return STI; }

  static bool classof(const MCFragment *F) {
    return F->getKind() == MCFragment::FT_Align;
  }
};

class MCFillFragment : public MCFragment {
  uint8_t ValueSize;
  /// Value to use for filling bytes.
  uint64_t Value;
  /// The number of bytes to insert.
  const MCExpr &NumValues;

  /// Source location of the directive that this fragment was created for.
  SMLoc Loc;

public:
  MCFillFragment(uint64_t Value, uint8_t VSize, const MCExpr &NumValues,
                 SMLoc Loc, MCSection *Sec = nullptr)
      : MCFragment(FT_Fill, false, Sec), ValueSize(VSize), Value(Value),
        NumValues(NumValues), Loc(Loc) {}

  uint64_t getValue() const { return Value; }
  uint8_t getValueSize() const { return ValueSize; }
  const MCExpr &getNumValues() const { return NumValues; }

  SMLoc getLoc() const { return Loc; }

  static bool classof(const MCFragment *F) {
    return F->getKind() == MCFragment::FT_Fill;
  }
};

class MCNopsFragment : public MCFragment {
  /// The number of bytes to insert.
  int64_t Size;
  /// Maximum number of bytes allowed in each NOP instruction.
  int64_t ControlledNopLength;

  /// Source location of the directive that this fragment was created for.
  SMLoc Loc;

  /// When emitting Nops some subtargets have specific nop encodings.
  const MCSubtargetInfo &STI;

public:
  MCNopsFragment(int64_t NumBytes, int64_t ControlledNopLength, SMLoc L,
                 const MCSubtargetInfo &STI, MCSection *Sec = nullptr)
      : MCFragment(FT_Nops, false, Sec), Size(NumBytes),
        ControlledNopLength(ControlledNopLength), Loc(L), STI(STI) {}

  int64_t getNumBytes() const { return Size; }
  int64_t getControlledNopLength() const { return ControlledNopLength; }

  SMLoc getLoc() const { return Loc; }

  const MCSubtargetInfo *getSubtargetInfo() const { return &STI; }

  static bool classof(const MCFragment *F) {
    return F->getKind() == MCFragment::FT_Nops;
  }
};

class MCOrgFragment : public MCFragment {
  /// Value to use for filling bytes.
  int8_t Value;

  /// The offset this fragment should start at.
  const MCExpr *Offset;

  /// Source location of the directive that this fragment was created for.
  SMLoc Loc;

public:
  MCOrgFragment(const MCExpr &Offset, int8_t Value, SMLoc Loc,
                MCSection *Sec = nullptr)
      : MCFragment(FT_Org, false, Sec), Value(Value), Offset(&Offset),
        Loc(Loc) {}

  const MCExpr &getOffset() const { return *Offset; }

  uint8_t getValue() const { return Value; }

  SMLoc getLoc() const { return Loc; }

  static bool classof(const MCFragment *F) {
    return F->getKind() == MCFragment::FT_Org;
  }
};

class MCLEBFragment : public MCFragment {
  /// True if this is a sleb128, false if uleb128.
  bool IsSigned;

  /// The value this fragment should contain.
  const MCExpr *Value;

  SmallString<8> Contents;

public:
  MCLEBFragment(const MCExpr &Value_, bool IsSigned_, MCSection *Sec = nullptr)
      : MCFragment(FT_LEB, false, Sec), IsSigned(IsSigned_), Value(&Value_) {
    Contents.push_back(0);
  }

  const MCExpr &getValue() const { return *Value; }

  bool isSigned() const { return IsSigned; }

  SmallString<8> &getContents() { return Contents; }
  const SmallString<8> &getContents() const { return Contents; }

  /// @}

  static bool classof(const MCFragment *F) {
    return F->getKind() == MCFragment::FT_LEB;
  }
};

class MCDwarfLineAddrFragment : public MCEncodedFragmentWithFixups<8, 1> {
  /// The value of the difference between the two line numbers
  /// between two .loc dwarf directives.
  int64_t LineDelta;

  /// The expression for the difference of the two symbols that
  /// make up the address delta between two .loc dwarf directives.
  const MCExpr *AddrDelta;

public:
  MCDwarfLineAddrFragment(int64_t LineDelta, const MCExpr &AddrDelta,
                          MCSection *Sec = nullptr)
      : MCEncodedFragmentWithFixups<8, 1>(FT_Dwarf, false, Sec),
        LineDelta(LineDelta), AddrDelta(&AddrDelta) {}

  int64_t getLineDelta() const { return LineDelta; }

  const MCExpr &getAddrDelta() const { return *AddrDelta; }

  static bool classof(const MCFragment *F) {
    return F->getKind() == MCFragment::FT_Dwarf;
  }
};

class MCDwarfCallFrameFragment : public MCEncodedFragmentWithFixups<8, 1> {
  /// The expression for the difference of the two symbols that
  /// make up the address delta between two .cfi_* dwarf directives.
  const MCExpr *AddrDelta;

public:
  MCDwarfCallFrameFragment(const MCExpr &AddrDelta, MCSection *Sec = nullptr)
      : MCEncodedFragmentWithFixups<8, 1>(FT_DwarfFrame, false, Sec),
        AddrDelta(&AddrDelta) {}

  const MCExpr &getAddrDelta() const { return *AddrDelta; }
  void setAddrDelta(const MCExpr *E) { AddrDelta = E; }

  static bool classof(const MCFragment *F) {
    return F->getKind() == MCFragment::FT_DwarfFrame;
  }
};

/// Represents a symbol table index fragment.
class MCSymbolIdFragment : public MCFragment {
  const MCSymbol *Sym;

public:
  MCSymbolIdFragment(const MCSymbol *Sym, MCSection *Sec = nullptr)
      : MCFragment(FT_SymbolId, false, Sec), Sym(Sym) {}

  const MCSymbol *getSymbol() { return Sym; }
  const MCSymbol *getSymbol() const { return Sym; }

  static bool classof(const MCFragment *F) {
    return F->getKind() == MCFragment::FT_SymbolId;
  }
};

/// Fragment representing the binary annotations produced by the
/// .cv_inline_linetable directive.
class MCCVInlineLineTableFragment : public MCFragment {
  unsigned SiteFuncId;
  unsigned StartFileId;
  unsigned StartLineNum;
  const MCSymbol *FnStartSym;
  const MCSymbol *FnEndSym;
  SmallString<8> Contents;

  /// CodeViewContext has the real knowledge about this format, so let it access
  /// our members.
  friend class CodeViewContext;

public:
  MCCVInlineLineTableFragment(unsigned SiteFuncId, unsigned StartFileId,
                              unsigned StartLineNum, const MCSymbol *FnStartSym,
                              const MCSymbol *FnEndSym,
                              MCSection *Sec = nullptr)
      : MCFragment(FT_CVInlineLines, false, Sec), SiteFuncId(SiteFuncId),
        StartFileId(StartFileId), StartLineNum(StartLineNum),
        FnStartSym(FnStartSym), FnEndSym(FnEndSym) {}

  const MCSymbol *getFnStartSym() const { return FnStartSym; }
  const MCSymbol *getFnEndSym() const { return FnEndSym; }

  SmallString<8> &getContents() { return Contents; }
  const SmallString<8> &getContents() const { return Contents; }

  static bool classof(const MCFragment *F) {
    return F->getKind() == MCFragment::FT_CVInlineLines;
  }
};

/// Fragment representing the .cv_def_range directive.
class MCCVDefRangeFragment : public MCEncodedFragmentWithFixups<32, 4> {
  SmallVector<std::pair<const MCSymbol *, const MCSymbol *>, 2> Ranges;
  SmallString<32> FixedSizePortion;

  /// CodeViewContext has the real knowledge about this format, so let it access
  /// our members.
  friend class CodeViewContext;

public:
  MCCVDefRangeFragment(
      ArrayRef<std::pair<const MCSymbol *, const MCSymbol *>> Ranges,
      StringRef FixedSizePortion, MCSection *Sec = nullptr)
      : MCEncodedFragmentWithFixups<32, 4>(FT_CVDefRange, false, Sec),
        Ranges(Ranges.begin(), Ranges.end()),
        FixedSizePortion(FixedSizePortion) {}

  ArrayRef<std::pair<const MCSymbol *, const MCSymbol *>> getRanges() const {
    return Ranges;
  }

  StringRef getFixedSizePortion() const { return FixedSizePortion.str(); }

  static bool classof(const MCFragment *F) {
    return F->getKind() == MCFragment::FT_CVDefRange;
  }
};

/// Represents required padding such that a particular other set of fragments
/// does not cross a particular power-of-two boundary. The other fragments must
/// follow this one within the same section.
class MCBoundaryAlignFragment : public MCFragment {
  /// The alignment requirement of the branch to be aligned.
  Align AlignBoundary;
  /// The last fragment in the set of fragments to be aligned.
  const MCFragment *LastFragment = nullptr;
  /// The size of the fragment.  The size is lazily set during relaxation, and
  /// is not meaningful before that.
  uint64_t Size = 0;

  /// When emitting Nops some subtargets have specific nop encodings.
  const MCSubtargetInfo &STI;

public:
  MCBoundaryAlignFragment(Align AlignBoundary, const MCSubtargetInfo &STI,
                          MCSection *Sec = nullptr)
      : MCFragment(FT_BoundaryAlign, false, Sec), AlignBoundary(AlignBoundary),
        STI(STI) {}

  uint64_t getSize() const { return Size; }
  void setSize(uint64_t Value) { Size = Value; }

  Align getAlignment() const { return AlignBoundary; }
  void setAlignment(Align Value) { AlignBoundary = Value; }

  const MCFragment *getLastFragment() const { return LastFragment; }
  void setLastFragment(const MCFragment *F) {
    assert(!F || getParent() == F->getParent());
    LastFragment = F;
  }

  const MCSubtargetInfo *getSubtargetInfo() const { return &STI; }

  static bool classof(const MCFragment *F) {
    return F->getKind() == MCFragment::FT_BoundaryAlign;
  }
};

class MCPseudoProbeAddrFragment : public MCEncodedFragmentWithFixups<8, 1> {
  /// The expression for the difference of the two symbols that
  /// make up the address delta between two .pseudoprobe directives.
  const MCExpr *AddrDelta;

public:
  MCPseudoProbeAddrFragment(const MCExpr *AddrDelta, MCSection *Sec = nullptr)
      : MCEncodedFragmentWithFixups<8, 1>(FT_PseudoProbe, false, Sec),
        AddrDelta(AddrDelta) {}

  const MCExpr &getAddrDelta() const { return *AddrDelta; }

  static bool classof(const MCFragment *F) {
    return F->getKind() == MCFragment::FT_PseudoProbe;
  }
};
} // end namespace llvm

#endif // LLVM_MC_MCFRAGMENT_H
PKjwFZ��C}�	�	MC/MCValue.hnu�[���//===-- llvm/MC/MCValue.h - MCValue class -----------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the declaration of the MCValue class.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCVALUE_H
#define LLVM_MC_MCVALUE_H

#include "llvm/MC/MCExpr.h"
#include "llvm/Support/DataTypes.h"

namespace llvm {
class raw_ostream;

/// This represents an "assembler immediate".
///
///  In its most general form, this can hold ":Kind:(SymbolA - SymbolB +
///  imm64)".  Not all targets supports relocations of this general form, but we
///  need to represent this anyway.
///
/// In general both SymbolA and SymbolB will also have a modifier
/// analogous to the top-level Kind. Current targets are not expected
/// to make use of both though. The choice comes down to whether
/// relocation modifiers apply to the closest symbol or the whole
/// expression.
///
/// Note that this class must remain a simple POD value class, because we need
/// it to live in unions etc.
class MCValue {
  const MCSymbolRefExpr *SymA = nullptr, *SymB = nullptr;
  int64_t Cst = 0;
  uint32_t RefKind = 0;

public:
  MCValue() = default;
  int64_t getConstant() const { return Cst; }
  const MCSymbolRefExpr *getSymA() const { return SymA; }
  const MCSymbolRefExpr *getSymB() const { return SymB; }
  uint32_t getRefKind() const { return RefKind; }

  /// Is this an absolute (as opposed to relocatable) value.
  bool isAbsolute() const { return !SymA && !SymB; }

  /// Print the value to the stream \p OS.
  void print(raw_ostream &OS) const;

  /// Print the value to stderr.
  void dump() const;

  MCSymbolRefExpr::VariantKind getAccessVariant() const;

  static MCValue get(const MCSymbolRefExpr *SymA,
                     const MCSymbolRefExpr *SymB = nullptr,
                     int64_t Val = 0, uint32_t RefKind = 0) {
    MCValue R;
    R.Cst = Val;
    R.SymA = SymA;
    R.SymB = SymB;
    R.RefKind = RefKind;
    return R;
  }

  static MCValue get(int64_t Val) {
    MCValue R;
    R.Cst = Val;
    R.SymA = nullptr;
    R.SymB = nullptr;
    R.RefKind = 0;
    return R;
  }

};

} // end namespace llvm

#endif
PKjwFZ��Xa��MC/MCSectionGOFF.hnu�[���//===-- llvm/MC/MCSectionGOFF.h - GOFF Machine Code Sections ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file declares the MCSectionGOFF class, which contains all of the
/// necessary machine code sections for the GOFF file format.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_MC_MCSECTIONGOFF_H
#define LLVM_MC_MCSECTIONGOFF_H

#include "llvm/BinaryFormat/GOFF.h"
#include "llvm/MC/MCSection.h"
#include "llvm/Support/raw_ostream.h"

namespace llvm {

class MCExpr;

class MCSectionGOFF final : public MCSection {
private:
  MCSection *Parent;
  const MCExpr *SubsectionId;

  friend class MCContext;
  MCSectionGOFF(StringRef Name, SectionKind K, MCSection *P, const MCExpr *Sub)
      : MCSection(SV_GOFF, Name, K, nullptr), Parent(P), SubsectionId(Sub) {}

public:
  void printSwitchToSection(const MCAsmInfo &MAI, const Triple &T,
                            raw_ostream &OS,
                            const MCExpr *Subsection) const override {
    OS << "\t.section\t\"" << getName() << "\"\n";
  }

  bool useCodeAlign() const override { return false; }

  bool isVirtualSection() const override { return false; }

  MCSection *getParent() const { return Parent; }
  const MCExpr *getSubsectionId() const { return SubsectionId; }

  static bool classof(const MCSection *S) { return S->getVariant() == SV_GOFF; }
};
} // end namespace llvm

#endif
PKjwFZ��\FuzzMutate/Random.hnu�[���//===--- Random.h - Utilities for random sampling -------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Utilities for random sampling.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_FUZZMUTATE_RANDOM_H
#define LLVM_FUZZMUTATE_RANDOM_H

#include "llvm/Support/raw_ostream.h"
#include <random>
namespace llvm {

/// Return a uniformly distributed random value between \c Min and \c Max
template <typename T, typename GenT> T uniform(GenT &Gen, T Min, T Max) {
  return std::uniform_int_distribution<T>(Min, Max)(Gen);
}

/// Return a uniformly distributed random value of type \c T
template <typename T, typename GenT> T uniform(GenT &Gen) {
  return uniform<T>(Gen, std::numeric_limits<T>::min(),
                    std::numeric_limits<T>::max());
}

/// Randomly selects an item by sampling into a set with an unknown number of
/// elements, which may each be weighted to be more likely choices.
template <typename T, typename GenT> class ReservoirSampler {
  GenT &RandGen;
  std::remove_const_t<T> Selection = {};
  uint64_t TotalWeight = 0;

public:
  ReservoirSampler(GenT &RandGen) : RandGen(RandGen) {}

  uint64_t totalWeight() const { return TotalWeight; }
  bool isEmpty() const { return TotalWeight == 0; }

  const T &getSelection() const {
    assert(!isEmpty() && "Nothing selected");
    return Selection;
  }

  explicit operator bool() const { return !isEmpty(); }
  const T &operator*() const { return getSelection(); }

  /// Sample each item in \c Items with unit weight
  template <typename RangeT> ReservoirSampler &sample(RangeT &&Items) {
    for (auto &I : Items)
      sample(I, 1);
    return *this;
  }

  /// Sample a single item with the given weight.
  ReservoirSampler &sample(const T &Item, uint64_t Weight) {
    if (!Weight)
      // If the weight is zero, do nothing.
      return *this;
    TotalWeight += Weight;
    // Consider switching from the current element to this one.
    if (uniform<uint64_t>(RandGen, 1, TotalWeight) <= Weight)
      Selection = Item;
    return *this;
  }
};

template <typename GenT, typename RangeT,
          typename ElT = std::remove_reference_t<
              decltype(*std::begin(std::declval<RangeT>()))>>
ReservoirSampler<ElT, GenT> makeSampler(GenT &RandGen, RangeT &&Items) {
  ReservoirSampler<ElT, GenT> RS(RandGen);
  RS.sample(Items);
  return RS;
}

template <typename GenT, typename T>
ReservoirSampler<T, GenT> makeSampler(GenT &RandGen, const T &Item,
                                      uint64_t Weight) {
  ReservoirSampler<T, GenT> RS(RandGen);
  RS.sample(Item, Weight);
  return RS;
}

template <typename T, typename GenT>
ReservoirSampler<T, GenT> makeSampler(GenT &RandGen) {
  return ReservoirSampler<T, GenT>(RandGen);
}

} // namespace llvm

#endif // LLVM_FUZZMUTATE_RANDOM_H
PKjwFZr���FuzzMutate/IRMutator.hnu�[���//===-- IRMutator.h - Mutation engine for fuzzing IR ------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Provides the IRMutator class, which drives mutations on IR based on a
// configurable set of strategies. Some common strategies are also included
// here.
//
// Fuzzer-friendly (de)serialization functions are also provided, as these
// are usually needed when mutating IR.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_FUZZMUTATE_IRMUTATOR_H
#define LLVM_FUZZMUTATE_IRMUTATOR_H

#include "llvm/FuzzMutate/OpDescriptor.h"
#include "llvm/Support/ErrorHandling.h"
#include <optional>

namespace llvm {
class BasicBlock;
class Function;
class Instruction;
class Module;

struct RandomIRBuilder;

/// Base class for describing how to mutate a module. mutation functions for
/// each IR unit forward to the contained unit.
class IRMutationStrategy {
public:
  virtual ~IRMutationStrategy() = default;

  /// Provide a weight to bias towards choosing this strategy for a mutation.
  ///
  /// The value of the weight is arbitrary, but a good default is "the number of
  /// distinct ways in which this strategy can mutate a unit". This can also be
  /// used to prefer strategies that shrink the overall size of the result when
  /// we start getting close to \c MaxSize.
  virtual uint64_t getWeight(size_t CurrentSize, size_t MaxSize,
                             uint64_t CurrentWeight) = 0;

  /// @{
  /// Mutators for each IR unit. By default these forward to a contained
  /// instance of the next smaller unit.
  virtual void mutate(Module &M, RandomIRBuilder &IB);
  virtual void mutate(Function &F, RandomIRBuilder &IB);
  virtual void mutate(BasicBlock &BB, RandomIRBuilder &IB);
  virtual void mutate(Instruction &I, RandomIRBuilder &IB) {
    llvm_unreachable("Strategy does not implement any mutators");
  }
  /// @}
};

using TypeGetter = std::function<Type *(LLVMContext &)>;

/// Entry point for configuring and running IR mutations.
class IRMutator {
  std::vector<TypeGetter> AllowedTypes;
  std::vector<std::unique_ptr<IRMutationStrategy>> Strategies;

public:
  IRMutator(std::vector<TypeGetter> &&AllowedTypes,
            std::vector<std::unique_ptr<IRMutationStrategy>> &&Strategies)
      : AllowedTypes(std::move(AllowedTypes)),
        Strategies(std::move(Strategies)) {}

  /// Calculate the size of module as the number of objects in it, i.e.
  /// instructions, basic blocks, functions, and aliases.
  ///
  /// \param M module
  /// \return number of objects in module
  static size_t getModuleSize(const Module &M);

  /// Mutate given module. No change will be made if no strategy is selected.
  ///
  /// \param M  module to mutate
  /// \param Seed seed for random mutation
  /// \param MaxSize max module size (see getModuleSize)
  void mutateModule(Module &M, int Seed, size_t MaxSize);
};

/// Strategy that injects operations into the function.
class InjectorIRStrategy : public IRMutationStrategy {
  std::vector<fuzzerop::OpDescriptor> Operations;

  std::optional<fuzzerop::OpDescriptor> chooseOperation(Value *Src,
                                                        RandomIRBuilder &IB);

public:
  InjectorIRStrategy() : Operations(getDefaultOps()) {}
  InjectorIRStrategy(std::vector<fuzzerop::OpDescriptor> &&Operations)
      : Operations(std::move(Operations)) {}
  static std::vector<fuzzerop::OpDescriptor> getDefaultOps();

  uint64_t getWeight(size_t CurrentSize, size_t MaxSize,
                     uint64_t CurrentWeight) override {
    return Operations.size();
  }

  using IRMutationStrategy::mutate;
  void mutate(Function &F, RandomIRBuilder &IB) override;
  void mutate(BasicBlock &BB, RandomIRBuilder &IB) override;
};

/// Strategy that deletes instructions when the Module is too large.
class InstDeleterIRStrategy : public IRMutationStrategy {
public:
  uint64_t getWeight(size_t CurrentSize, size_t MaxSize,
                     uint64_t CurrentWeight) override;

  using IRMutationStrategy::mutate;
  void mutate(Function &F, RandomIRBuilder &IB) override;
  void mutate(Instruction &Inst, RandomIRBuilder &IB) override;
};

/// Strategy that modifies instruction attributes and operands.
class InstModificationIRStrategy : public IRMutationStrategy {
public:
  uint64_t getWeight(size_t CurrentSize, size_t MaxSize,
                     uint64_t CurrentWeight) override {
    return 4;
  }

  using IRMutationStrategy::mutate;
  void mutate(Instruction &Inst, RandomIRBuilder &IB) override;
};

/// Strategy that generates new function calls and inserts function signatures
/// to the modules. If any signatures are present in the module it will be
/// called.
class InsertFunctionStrategy : public IRMutationStrategy {
public:
  uint64_t getWeight(size_t CurrentSize, size_t MaxSize,
                     uint64_t CurrentWeight) override {
    return 10;
  }

  using IRMutationStrategy::mutate;
  void mutate(BasicBlock &BB, RandomIRBuilder &IB) override;
};

/// Strategy to split a random block and insert a random CFG in between.
class InsertCFGStrategy : public IRMutationStrategy {
private:
  uint64_t MaxNumCases;
  enum CFGToSink { Return, DirectSink, SinkOrSelfLoop, EndOfCFGToLink };

public:
  InsertCFGStrategy(uint64_t MNC = 8) : MaxNumCases(MNC){};
  uint64_t getWeight(size_t CurrentSize, size_t MaxSize,
                     uint64_t CurrentWeight) override {
    return 5;
  }

  void mutate(BasicBlock &BB, RandomIRBuilder &IB) override;

private:
  void connectBlocksToSink(ArrayRef<BasicBlock *> Blocks, BasicBlock *Sink,
                           RandomIRBuilder &IB);
};

/// Strategy to insert PHI Nodes at the head of each basic block.
class InsertPHIStrategy : public IRMutationStrategy {
public:
  uint64_t getWeight(size_t CurrentSize, size_t MaxSize,
                     uint64_t CurrentWeight) override {
    return 2;
  }

  void mutate(BasicBlock &BB, RandomIRBuilder &IB) override;
};

/// Strategy to select a random instruction and add a new sink (user) to it to
/// increate data dependency.
class SinkInstructionStrategy : public IRMutationStrategy {
public:
  uint64_t getWeight(size_t CurrentSize, size_t MaxSize,
                     uint64_t CurrentWeight) override {
    return 2;
  }

  void mutate(Function &F, RandomIRBuilder &IB) override;
  void mutate(BasicBlock &BB, RandomIRBuilder &IB) override;
};

/// Strategy to randomly select a block and shuffle the operations without
/// affecting data dependency.
class ShuffleBlockStrategy : public IRMutationStrategy {
public:
  uint64_t getWeight(size_t CurrentSize, size_t MaxSize,
                     uint64_t CurrentWeight) override {
    return 2;
  }

  void mutate(BasicBlock &BB, RandomIRBuilder &IB) override;
};

/// Fuzzer friendly interface for the llvm bitcode parser.
///
/// \param Data Bitcode we are going to parse
/// \param Size Size of the 'Data' in bytes
/// \return New module or nullptr in case of error
std::unique_ptr<Module> parseModule(const uint8_t *Data, size_t Size,
                                    LLVMContext &Context);

/// Fuzzer friendly interface for the llvm bitcode printer.
///
/// \param M Module to print
/// \param Dest Location to store serialized module
/// \param MaxSize Size of the destination buffer
/// \return Number of bytes that were written. When module size exceeds MaxSize
///         returns 0 and leaves Dest unchanged.
size_t writeModule(const Module &M, uint8_t *Dest, size_t MaxSize);

/// Try to parse module and verify it. May output verification errors to the
/// errs().
/// \return New module or nullptr in case of error.
std::unique_ptr<Module> parseAndVerify(const uint8_t *Data, size_t Size,
                                       LLVMContext &Context);

} // namespace llvm

#endif // LLVM_FUZZMUTATE_IRMUTATOR_H
PKjwFZ���%�%FuzzMutate/OpDescriptor.hnu�[���//===-- OpDescriptor.h ------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Provides the fuzzerop::Descriptor class and related tools for describing
// operations an IR fuzzer can work with.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_FUZZMUTATE_OPDESCRIPTOR_H
#define LLVM_FUZZMUTATE_OPDESCRIPTOR_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Value.h"
#include <functional>

namespace llvm {
class Instruction;
namespace fuzzerop {

/// @{
/// Populate a small list of potentially interesting constants of a given type.
void makeConstantsWithType(Type *T, std::vector<Constant *> &Cs);
std::vector<Constant *> makeConstantsWithType(Type *T);
/// @}

/// A matcher/generator for finding suitable values for the next source in an
/// operation's partially completed argument list.
///
/// Given that we're building some operation X and may have already filled some
/// subset of its operands, this predicate determines if some value New is
/// suitable for the next operand or generates a set of values that are
/// suitable.
class SourcePred {
public:
  /// Given a list of already selected operands, returns whether a given new
  /// operand is suitable for the next operand.
  using PredT = std::function<bool(ArrayRef<Value *> Cur, const Value *New)>;
  /// Given a list of already selected operands and a set of valid base types
  /// for a fuzzer, generates a list of constants that could be used for the
  /// next operand.
  using MakeT = std::function<std::vector<Constant *>(
      ArrayRef<Value *> Cur, ArrayRef<Type *> BaseTypes)>;

private:
  PredT Pred;
  MakeT Make;

public:
  /// Create a fully general source predicate.
  SourcePred(PredT Pred, MakeT Make) : Pred(Pred), Make(Make) {}
  SourcePred(PredT Pred, std::nullopt_t) : Pred(Pred) {
    Make = [Pred](ArrayRef<Value *> Cur, ArrayRef<Type *> BaseTypes) {
      // Default filter just calls Pred on each of the base types.
      std::vector<Constant *> Result;
      for (Type *T : BaseTypes) {
        Constant *V = UndefValue::get(T);
        if (Pred(Cur, V))
          makeConstantsWithType(T, Result);
      }
      if (Result.empty())
        report_fatal_error("Predicate does not match for base types");
      return Result;
    };
  }

  /// Returns true if \c New is compatible for the argument after \c Cur
  bool matches(ArrayRef<Value *> Cur, const Value *New) {
    return Pred(Cur, New);
  }

  /// Generates a list of potential values for the argument after \c Cur.
  std::vector<Constant *> generate(ArrayRef<Value *> Cur,
                                   ArrayRef<Type *> BaseTypes) {
    return Make(Cur, BaseTypes);
  }
};

/// A description of some operation we can build while fuzzing IR.
struct OpDescriptor {
  unsigned Weight;
  SmallVector<SourcePred, 2> SourcePreds;
  std::function<Value *(ArrayRef<Value *>, Instruction *)> BuilderFunc;
};

static inline SourcePred onlyType(Type *Only) {
  auto Pred = [Only](ArrayRef<Value *>, const Value *V) {
    return V->getType() == Only;
  };
  auto Make = [Only](ArrayRef<Value *>, ArrayRef<Type *>) {
    return makeConstantsWithType(Only);
  };
  return {Pred, Make};
}

static inline SourcePred anyType() {
  auto Pred = [](ArrayRef<Value *>, const Value *V) {
    return !V->getType()->isVoidTy();
  };
  auto Make = std::nullopt;
  return {Pred, Make};
}

static inline SourcePred anyIntType() {
  auto Pred = [](ArrayRef<Value *>, const Value *V) {
    return V->getType()->isIntegerTy();
  };
  auto Make = std::nullopt;
  return {Pred, Make};
}

static inline SourcePred anyIntOrVecIntType() {
  auto Pred = [](ArrayRef<Value *>, const Value *V) {
    return V->getType()->isIntOrIntVectorTy();
  };
  return {Pred, std::nullopt};
}

static inline SourcePred boolOrVecBoolType() {
  auto Pred = [](ArrayRef<Value *>, const Value *V) {
    return V->getType()->isIntOrIntVectorTy(1);
  };
  return {Pred, std::nullopt};
}

static inline SourcePred anyFloatType() {
  auto Pred = [](ArrayRef<Value *>, const Value *V) {
    return V->getType()->isFloatingPointTy();
  };
  auto Make = std::nullopt;
  return {Pred, Make};
}

static inline SourcePred anyFloatOrVecFloatType() {
  auto Pred = [](ArrayRef<Value *>, const Value *V) {
    return V->getType()->isFPOrFPVectorTy();
  };
  return {Pred, std::nullopt};
}

static inline SourcePred anyPtrType() {
  auto Pred = [](ArrayRef<Value *>, const Value *V) {
    return V->getType()->isPointerTy() && !V->isSwiftError();
  };
  auto Make = [](ArrayRef<Value *>, ArrayRef<Type *> Ts) {
    std::vector<Constant *> Result;
    // TODO: Should these point at something?
    for (Type *T : Ts)
      Result.push_back(UndefValue::get(PointerType::getUnqual(T)));
    return Result;
  };
  return {Pred, Make};
}

static inline SourcePred sizedPtrType() {
  auto Pred = [](ArrayRef<Value *>, const Value *V) {
    if (V->isSwiftError())
      return false;

    return V->getType()->isPointerTy();
  };
  auto Make = [](ArrayRef<Value *>, ArrayRef<Type *> Ts) {
    std::vector<Constant *> Result;

    // TODO: This doesn't really make sense with opaque pointers,
    // as the pointer type will always be the same.
    for (Type *T : Ts)
      if (T->isSized())
        Result.push_back(UndefValue::get(PointerType::getUnqual(T)));

    return Result;
  };
  return {Pred, Make};
}

static inline SourcePred matchFirstLengthWAnyType() {
  auto Pred = [](ArrayRef<Value *> Cur, const Value *V) {
    assert(!Cur.empty() && "No first source yet");
    Type *This = V->getType(), *First = Cur[0]->getType();
    VectorType *ThisVec = dyn_cast<VectorType>(This);
    VectorType *FirstVec = dyn_cast<VectorType>(First);
    if (ThisVec && FirstVec) {
      return ThisVec->getElementCount() == FirstVec->getElementCount();
    }
    return (ThisVec == nullptr) && (FirstVec == nullptr) && (!This->isVoidTy());
  };
  auto Make = [](ArrayRef<Value *> Cur, ArrayRef<Type *> BaseTypes) {
    assert(!Cur.empty() && "No first source yet");
    std::vector<Constant *> Result;
    ElementCount EC;
    bool isVec = false;
    if (VectorType *VecTy = dyn_cast<VectorType>(Cur[0]->getType())) {
      EC = VecTy->getElementCount();
      isVec = true;
    }
    for (Type *T : BaseTypes) {
      if (VectorType::isValidElementType(T)) {
        if (isVec)
          // If the first pred is <i1 x N>, make the result <T x N>
          makeConstantsWithType(VectorType::get(T, EC), Result);
        else
          makeConstantsWithType(T, Result);
      }
    }
    assert(!Result.empty() && "No potential constants.");
    return Result;
  };
  return {Pred, Make};
}

/// Match values that have the same type as the first source.
static inline SourcePred matchSecondType() {
  auto Pred = [](ArrayRef<Value *> Cur, const Value *V) {
    assert((Cur.size() > 1) && "No second source yet");
    return V->getType() == Cur[1]->getType();
  };
  auto Make = [](ArrayRef<Value *> Cur, ArrayRef<Type *>) {
    assert((Cur.size() > 1) && "No second source yet");
    return makeConstantsWithType(Cur[1]->getType());
  };
  return {Pred, Make};
}

static inline SourcePred anyAggregateType() {
  auto Pred = [](ArrayRef<Value *>, const Value *V) {
    // We can't index zero sized arrays.
    if (isa<ArrayType>(V->getType()))
      return V->getType()->getArrayNumElements() > 0;

    // Structs can also be zero sized. I.e opaque types.
    if (isa<StructType>(V->getType()))
      return V->getType()->getStructNumElements() > 0;

    return V->getType()->isAggregateType();
  };
  // TODO: For now we only find aggregates in BaseTypes. It might be better to
  // manufacture them out of the base types in some cases.
  auto Find = std::nullopt;
  return {Pred, Find};
}

static inline SourcePred anyVectorType() {
  auto Pred = [](ArrayRef<Value *>, const Value *V) {
    return V->getType()->isVectorTy();
  };
  // TODO: For now we only find vectors in BaseTypes. It might be better to
  // manufacture vectors out of the base types, but it's tricky to be sure
  // that's actually a reasonable type.
  auto Make = std::nullopt;
  return {Pred, Make};
}

/// Match values that have the same type as the first source.
static inline SourcePred matchFirstType() {
  auto Pred = [](ArrayRef<Value *> Cur, const Value *V) {
    assert(!Cur.empty() && "No first source yet");
    return V->getType() == Cur[0]->getType();
  };
  auto Make = [](ArrayRef<Value *> Cur, ArrayRef<Type *>) {
    assert(!Cur.empty() && "No first source yet");
    return makeConstantsWithType(Cur[0]->getType());
  };
  return {Pred, Make};
}

/// Match values that have the first source's scalar type.
static inline SourcePred matchScalarOfFirstType() {
  auto Pred = [](ArrayRef<Value *> Cur, const Value *V) {
    assert(!Cur.empty() && "No first source yet");
    return V->getType() == Cur[0]->getType()->getScalarType();
  };
  auto Make = [](ArrayRef<Value *> Cur, ArrayRef<Type *>) {
    assert(!Cur.empty() && "No first source yet");
    return makeConstantsWithType(Cur[0]->getType()->getScalarType());
  };
  return {Pred, Make};
}

} // namespace fuzzerop
} // namespace llvm

#endif // LLVM_FUZZMUTATE_OPDESCRIPTOR_H
PKjwFZ�C�9��FuzzMutate/FuzzerCLI.hnu�[���//===-- FuzzerCLI.h - Common logic for CLIs of fuzzers ----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Common logic needed to implement LLVM's fuzz targets' CLIs - including LLVM
// concepts like cl::opt and libFuzzer concepts like -ignore_remaining_args=1.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_FUZZMUTATE_FUZZERCLI_H
#define LLVM_FUZZMUTATE_FUZZERCLI_H

#include "llvm/Support/DataTypes.h"
#include <stddef.h>

namespace llvm {

class StringRef;

/// Parse cl::opts from a fuzz target commandline.
///
/// This handles all arguments after -ignore_remaining_args=1 as cl::opts.
void parseFuzzerCLOpts(int ArgC, char *ArgV[]);

/// Handle backend options that are encoded in the executable name.
///
/// Parses some common backend options out of a specially crafted executable
/// name (argv[0]). For example, a name like llvm-foo-fuzzer--aarch64-gisel
/// might set up an AArch64 triple and the Global ISel selector. This should be
/// called *before* parseFuzzerCLOpts if calling both.
///
/// This is meant to be used for environments like OSS-Fuzz that aren't capable
/// of passing in command line arguments in the normal way.
void handleExecNameEncodedBEOpts(StringRef ExecName);

/// Handle optimizer options which are encoded in the executable name.
/// Same semantics as in 'handleExecNameEncodedBEOpts'.
void handleExecNameEncodedOptimizerOpts(StringRef ExecName);

using FuzzerTestFun = int (*)(const uint8_t *Data, size_t Size);
using FuzzerInitFun = int (*)(int *argc, char ***argv);

/// Runs a fuzz target on the inputs specified on the command line.
///
/// Useful for testing fuzz targets without linking to libFuzzer. Finds inputs
/// in the argument list in a libFuzzer compatible way.
int runFuzzerOnInputs(
    int ArgC, char *ArgV[], FuzzerTestFun TestOne,
    FuzzerInitFun Init = [](int *, char ***) { return 0; });

} // namespace llvm

#endif // LLVM_FUZZMUTATE_FUZZERCLI_H
PKjwFZyH�FuzzMutate/RandomIRBuilder.hnu�[���//===- RandomIRBuilder.h - Utils for randomly mutation IR -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Provides the Mutator class, which is used to mutate IR for fuzzing.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_FUZZMUTATE_RANDOMIRBUILDER_H
#define LLVM_FUZZMUTATE_RANDOMIRBUILDER_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include <random>

namespace llvm {
class AllocaInst;
class BasicBlock;
class Function;
class GlobalVariable;
class Instruction;
class LLVMContext;
class Module;
class Type;
class Value;

namespace fuzzerop {
class SourcePred;
}

using RandomEngine = std::mt19937;

struct RandomIRBuilder {
  RandomEngine Rand;
  SmallVector<Type *, 16> KnownTypes;

  uint64_t MinArgNum = 0;
  uint64_t MaxArgNum = 5;
  uint64_t MinFunctionNum = 1;

  RandomIRBuilder(int Seed, ArrayRef<Type *> AllowedTypes)
      : Rand(Seed), KnownTypes(AllowedTypes.begin(), AllowedTypes.end()) {}

  // TODO: Try to make this a bit less of a random mishmash of functions.

  /// Create a stack memory at the head of the function, store \c Init to the
  /// memory if provided.
  AllocaInst *createStackMemory(Function *F, Type *Ty, Value *Init = nullptr);
  /// Find or create a global variable. It will be initialized by random
  /// constants that satisfies \c Pred. It will also report whether this global
  /// variable found or created.
  std::pair<GlobalVariable *, bool>
  findOrCreateGlobalVariable(Module *M, ArrayRef<Value *> Srcs,
                             fuzzerop::SourcePred Pred);
  enum SourceType {
    SrcFromInstInCurBlock,
    FunctionArgument,
    InstInDominator,
    SrcFromGlobalVariable,
    NewConstOrStack,
    EndOfValueSource,
  };
  /// Find a "source" for some operation, which will be used in one of the
  /// operation's operands. This either selects an instruction in \c Insts or
  /// returns some new arbitrary Value.
  Value *findOrCreateSource(BasicBlock &BB, ArrayRef<Instruction *> Insts);
  /// Find a "source" for some operation, which will be used in one of the
  /// operation's operands. This either selects an instruction in \c Insts that
  /// matches \c Pred, or returns some new Value that matches \c Pred. The
  /// values in \c Srcs should be source operands that have already been
  /// selected.
  Value *findOrCreateSource(BasicBlock &BB, ArrayRef<Instruction *> Insts,
                            ArrayRef<Value *> Srcs, fuzzerop::SourcePred Pred,
                            bool allowConstant = true);
  /// Create some Value suitable as a source for some operation.
  Value *newSource(BasicBlock &BB, ArrayRef<Instruction *> Insts,
                   ArrayRef<Value *> Srcs, fuzzerop::SourcePred Pred,
                   bool allowConstant = true);

  enum SinkType {
    /// TODO: Also consider pointers in function argument.
    SinkToInstInCurBlock,
    PointersInDominator,
    InstInDominatee,
    NewStore,
    SinkToGlobalVariable,
    EndOfValueSink,
  };
  /// Find a viable user for \c V in \c Insts, which should all be contained in
  /// \c BB. This may also create some new instruction in \c BB and use that.
  Instruction *connectToSink(BasicBlock &BB, ArrayRef<Instruction *> Insts,
                             Value *V);
  /// Create a user for \c V in \c BB.
  Instruction *newSink(BasicBlock &BB, ArrayRef<Instruction *> Insts, Value *V);
  Value *findPointer(BasicBlock &BB, ArrayRef<Instruction *> Insts);
  /// Return a uniformly choosen type from \c AllowedTypes
  Type *randomType();
  Function *createFunctionDeclaration(Module &M, uint64_t ArgNum);
  Function *createFunctionDeclaration(Module &M);
  Function *createFunctionDefinition(Module &M, uint64_t ArgNum);
  Function *createFunctionDefinition(Module &M);
};

} // namespace llvm

#endif // LLVM_FUZZMUTATE_RANDOMIRBUILDER_H
PKjwFZ�n�(��FuzzMutate/Operations.hnu�[���//===-- Operations.h - ----------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Implementations of common fuzzer operation descriptors for building an IR
// mutator.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_FUZZMUTATE_OPERATIONS_H
#define LLVM_FUZZMUTATE_OPERATIONS_H

#include "llvm/FuzzMutate/OpDescriptor.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instruction.h"

namespace llvm {

/// Getters for the default sets of operations, per general category.
/// @{
void describeFuzzerIntOps(std::vector<fuzzerop::OpDescriptor> &Ops);
void describeFuzzerFloatOps(std::vector<fuzzerop::OpDescriptor> &Ops);
void describeFuzzerControlFlowOps(std::vector<fuzzerop::OpDescriptor> &Ops);
void describeFuzzerPointerOps(std::vector<fuzzerop::OpDescriptor> &Ops);
void describeFuzzerAggregateOps(std::vector<fuzzerop::OpDescriptor> &Ops);
void describeFuzzerVectorOps(std::vector<fuzzerop::OpDescriptor> &Ops);
void describeFuzzerUnaryOperations(std::vector<fuzzerop::OpDescriptor> &Ops);
void describeFuzzerOtherOps(std::vector<fuzzerop::OpDescriptor> &Ops);
/// @}

namespace fuzzerop {

/// Descriptors for individual operations.
/// @{
OpDescriptor selectDescriptor(unsigned Weight);
OpDescriptor fnegDescriptor(unsigned Weight);
OpDescriptor binOpDescriptor(unsigned Weight, Instruction::BinaryOps Op);
OpDescriptor cmpOpDescriptor(unsigned Weight, Instruction::OtherOps CmpOp,
                             CmpInst::Predicate Pred);
OpDescriptor splitBlockDescriptor(unsigned Weight);
OpDescriptor gepDescriptor(unsigned Weight);
OpDescriptor extractValueDescriptor(unsigned Weight);
OpDescriptor insertValueDescriptor(unsigned Weight);
OpDescriptor extractElementDescriptor(unsigned Weight);
OpDescriptor insertElementDescriptor(unsigned Weight);
OpDescriptor shuffleVectorDescriptor(unsigned Weight);

/// @}

} // namespace fuzzerop

} // namespace llvm

#endif // LLVM_FUZZMUTATE_OPERATIONS_H
PKjwFZy����LinkAllPasses.hnu�[���//===- llvm/LinkAllPasses.h ------------ Reference All Passes ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This header file pulls in all transformation and analysis passes for tools
// like opt and bugpoint that need this functionality.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_LINKALLPASSES_H
#define LLVM_LINKALLPASSES_H

#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AliasAnalysisEvaluator.h"
#include "llvm/Analysis/AliasSetTracker.h"
#include "llvm/Analysis/BasicAliasAnalysis.h"
#include "llvm/Analysis/CallPrinter.h"
#include "llvm/Analysis/DomPrinter.h"
#include "llvm/Analysis/GlobalsModRef.h"
#include "llvm/Analysis/IntervalPartition.h"
#include "llvm/Analysis/Lint.h"
#include "llvm/Analysis/Passes.h"
#include "llvm/Analysis/PostDominators.h"
#include "llvm/Analysis/RegionPass.h"
#include "llvm/Analysis/RegionPrinter.h"
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h"
#include "llvm/Analysis/ScopedNoAliasAA.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/TypeBasedAliasAnalysis.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/IRPrintingPasses.h"
#include "llvm/Support/Valgrind.h"
#include "llvm/Transforms/AggressiveInstCombine/AggressiveInstCombine.h"
#include "llvm/Transforms/IPO.h"
#include "llvm/Transforms/IPO/AlwaysInliner.h"
#include "llvm/Transforms/IPO/Attributor.h"
#include "llvm/Transforms/IPO/FunctionAttrs.h"
#include "llvm/Transforms/InstCombine/InstCombine.h"
#include "llvm/Transforms/Instrumentation.h"
#include "llvm/Transforms/Instrumentation/BoundsChecking.h"
#include "llvm/Transforms/ObjCARC.h"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Scalar/GVN.h"
#include "llvm/Transforms/Scalar/InstSimplifyPass.h"
#include "llvm/Transforms/Scalar/Scalarizer.h"
#include "llvm/Transforms/Utils.h"
#include "llvm/Transforms/Utils/SymbolRewriter.h"
#include "llvm/Transforms/Utils/UnifyFunctionExitNodes.h"
#include "llvm/Transforms/Vectorize.h"
#include <cstdlib>

namespace {
  struct ForcePassLinking {
    ForcePassLinking() {
      // We must reference the passes in such a way that compilers will not
      // delete it all as dead code, even with whole program optimization,
      // yet is effectively a NO-OP. As the compiler isn't smart enough
      // to know that getenv() never returns -1, this will do the job.
      // This is so that globals in the translation units where these functions
      // are defined are forced to be initialized, populating various
      // registries.
      if (std::getenv("bar") != (char*) -1)
        return;

      (void) llvm::createAAEvalPass();
      (void) llvm::createBasicAAWrapperPass();
      (void) llvm::createSCEVAAWrapperPass();
      (void) llvm::createTypeBasedAAWrapperPass();
      (void) llvm::createScopedNoAliasAAWrapperPass();
      (void) llvm::createBreakCriticalEdgesPass();
      (void) llvm::createCallGraphDOTPrinterPass();
      (void) llvm::createCallGraphViewerPass();
      (void) llvm::createCFGSimplificationPass();
      (void) llvm::createStructurizeCFGPass();
      (void) llvm::createCostModelAnalysisPass();
      (void) llvm::createDeadArgEliminationPass();
      (void) llvm::createDeadCodeEliminationPass();
      (void) llvm::createDependenceAnalysisWrapperPass();
      (void) llvm::createDomOnlyPrinterWrapperPassPass();
      (void) llvm::createDomPrinterWrapperPassPass();
      (void) llvm::createDomOnlyViewerWrapperPassPass();
      (void) llvm::createDomViewerWrapperPassPass();
      (void) llvm::createAlwaysInlinerLegacyPass();
      (void) llvm::createGlobalsAAWrapperPass();
      (void) llvm::createGuardWideningPass();
      (void) llvm::createLoopGuardWideningPass();
      (void) llvm::createInstSimplifyLegacyPass();
      (void) llvm::createInstructionCombiningPass();
      (void) llvm::createJMCInstrumenterPass();
      (void) llvm::createKCFIPass();
      (void) llvm::createLCSSAPass();
      (void) llvm::createLICMPass();
      (void) llvm::createLoopSinkPass();
      (void) llvm::createLazyValueInfoPass();
      (void) llvm::createLoopExtractorPass();
      (void) llvm::createLoopPredicationPass();
      (void) llvm::createLoopSimplifyPass();
      (void) llvm::createLoopSimplifyCFGPass();
      (void) llvm::createLoopStrengthReducePass();
      (void) llvm::createLoopUnrollPass();
      (void) llvm::createLoopRotatePass();
      (void) llvm::createLowerConstantIntrinsicsPass();
      (void) llvm::createLowerExpectIntrinsicPass();
      (void) llvm::createLowerGlobalDtorsLegacyPass();
      (void) llvm::createLowerInvokePass();
      (void) llvm::createLowerSwitchPass();
      (void) llvm::createNaryReassociatePass();
      (void) llvm::createObjCARCContractPass();
      (void) llvm::createPromoteMemoryToRegisterPass();
      (void) llvm::createDemoteRegisterToMemoryPass();
      (void)llvm::createPostDomOnlyPrinterWrapperPassPass();
      (void)llvm::createPostDomPrinterWrapperPassPass();
      (void)llvm::createPostDomOnlyViewerWrapperPassPass();
      (void)llvm::createPostDomViewerWrapperPassPass();
      (void) llvm::createReassociatePass();
      (void) llvm::createRedundantDbgInstEliminationPass();
      (void) llvm::createRegionInfoPass();
      (void) llvm::createRegionOnlyPrinterPass();
      (void) llvm::createRegionOnlyViewerPass();
      (void) llvm::createRegionPrinterPass();
      (void) llvm::createRegionViewerPass();
      (void) llvm::createSafeStackPass();
      (void) llvm::createSROAPass();
      (void) llvm::createSingleLoopExtractorPass();
      (void) llvm::createTailCallEliminationPass();
      (void)llvm::createTLSVariableHoistPass();
      (void) llvm::createUnifyFunctionExitNodesPass();
      (void) llvm::createInstCountPass();
      (void) llvm::createConstantHoistingPass();
      (void) llvm::createCodeGenPreparePass();
      (void) llvm::createEarlyCSEPass();
      (void) llvm::createMergedLoadStoreMotionPass();
      (void) llvm::createGVNPass();
      (void) llvm::createPostDomTree();
      (void) llvm::createMergeICmpsLegacyPass();
      (void) llvm::createExpandLargeDivRemPass();
      (void) llvm::createExpandMemCmpPass();
      (void) llvm::createExpandVectorPredicationPass();
      std::string buf;
      llvm::raw_string_ostream os(buf);
      (void) llvm::createPrintModulePass(os);
      (void) llvm::createPrintFunctionPass(os);
      (void) llvm::createSinkingPass();
      (void) llvm::createLowerAtomicPass();
      (void) llvm::createLoadStoreVectorizerPass();
      (void) llvm::createPartiallyInlineLibCallsPass();
      (void) llvm::createScalarizerPass();
      (void) llvm::createSeparateConstOffsetFromGEPPass();
      (void) llvm::createSpeculativeExecutionPass();
      (void) llvm::createSpeculativeExecutionIfHasBranchDivergencePass();
      (void) llvm::createStraightLineStrengthReducePass();
      (void)llvm::createScalarizeMaskedMemIntrinLegacyPass();
      (void) llvm::createHardwareLoopsLegacyPass();
      (void) llvm::createUnifyLoopExitsPass();
      (void) llvm::createFixIrreduciblePass();
      (void)llvm::createSelectOptimizePass();

      (void)new llvm::IntervalPartition();
      (void)new llvm::ScalarEvolutionWrapperPass();
      llvm::Function::Create(nullptr, llvm::GlobalValue::ExternalLinkage)->viewCFGOnly();
      llvm::RGPassManager RGM;
      llvm::TargetLibraryInfoImpl TLII;
      llvm::TargetLibraryInfo TLI(TLII);
      llvm::AliasAnalysis AA(TLI);
      llvm::BatchAAResults BAA(AA);
      llvm::AliasSetTracker X(BAA);
      X.add(nullptr, llvm::LocationSize::beforeOrAfterPointer(),
            llvm::AAMDNodes()); // for -print-alias-sets
      (void) llvm::AreStatisticsEnabled();
      (void) llvm::sys::RunningOnValgrind();
    }
  } ForcePassLinking; // Force link by creating a global definition.
}

#endif
PKjwFZ��1�YYDWP/DWPStringPool.hnu�[���#ifndef LLVM_DWP_DWPSTRINGPOOL_H
#define LLVM_DWP_DWPSTRINGPOOL_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/MC/MCSection.h"
#include "llvm/MC/MCStreamer.h"
#include <cassert>

namespace llvm {
class DWPStringPool {

  struct CStrDenseMapInfo {
    static inline const char *getEmptyKey() {
      return reinterpret_cast<const char *>(~static_cast<uintptr_t>(0));
    }
    static inline const char *getTombstoneKey() {
      return reinterpret_cast<const char *>(~static_cast<uintptr_t>(1));
    }
    static unsigned getHashValue(const char *Val) {
      assert(Val != getEmptyKey() && "Cannot hash the empty key!");
      assert(Val != getTombstoneKey() && "Cannot hash the tombstone key!");
      return (unsigned)hash_value(StringRef(Val));
    }
    static bool isEqual(const char *LHS, const char *RHS) {
      if (RHS == getEmptyKey())
        return LHS == getEmptyKey();
      if (RHS == getTombstoneKey())
        return LHS == getTombstoneKey();
      return strcmp(LHS, RHS) == 0;
    }
  };

  MCStreamer &Out;
  MCSection *Sec;
  DenseMap<const char *, uint32_t, CStrDenseMapInfo> Pool;
  uint32_t Offset = 0;

public:
  DWPStringPool(MCStreamer &Out, MCSection *Sec) : Out(Out), Sec(Sec) {}

  uint32_t getOffset(const char *Str, unsigned Length) {
    assert(strlen(Str) + 1 == Length && "Ensure length hint is correct");

    auto Pair = Pool.insert(std::make_pair(Str, Offset));
    if (Pair.second) {
      Out.switchSection(Sec);
      Out.emitBytes(StringRef(Str, Length));
      Offset += Length;
    }

    return Pair.first->second;
  }
};
} // namespace llvm

#endif // LLVM_DWP_DWPSTRINGPOOL_H
PKjwFZ��
�
	DWP/DWP.hnu�[���#ifndef LLVM_DWP_DWP_H
#define LLVM_DWP_DWP_H

#include "DWPStringPool.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/DebugInfo/DWARF/DWARFContext.h"
#include "llvm/DebugInfo/DWARF/DWARFUnitIndex.h"
#include "llvm/MC/MCSection.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/Object/ObjectFile.h"
#include "llvm/Support/Error.h"
#include <deque>
#include <vector>

namespace llvm {
struct UnitIndexEntry {
  DWARFUnitIndex::Entry::SectionContribution Contributions[8];
  std::string Name;
  std::string DWOName;
  StringRef DWPName;
};

// Holds data for Skeleton, Split Compilation, and Type Unit Headers (only in
// v5) as defined in Dwarf 5 specification, 7.5.1.2, 7.5.1.3 and Dwarf 4
// specification 7.5.1.1.
struct InfoSectionUnitHeader {
  // unit_length field. Note that the type is uint64_t even in 32-bit dwarf.
  uint64_t Length = 0;

  // version field.
  uint16_t Version = 0;

  // unit_type field. Initialized only if Version >= 5.
  uint8_t UnitType = 0;

  // address_size field.
  uint8_t AddrSize = 0;

  // debug_abbrev_offset field. Note that the type is uint64_t even in 32-bit
  // dwarf. It is assumed to be 0.
  uint64_t DebugAbbrevOffset = 0;

  // dwo_id field. This resides in the header only if Version >= 5.
  // In earlier versions, it is read from DW_AT_GNU_dwo_id.
  std::optional<uint64_t> Signature;

  // Derived from the length of Length field.
  dwarf::DwarfFormat Format = dwarf::DwarfFormat::DWARF32;

  // The size of the Header in bytes. This is derived while parsing the header,
  // and is stored as a convenience.
  uint8_t HeaderSize = 0;
};

struct CompileUnitIdentifiers {
  uint64_t Signature = 0;
  const char *Name = "";
  const char *DWOName = "";
};

Error write(MCStreamer &Out, ArrayRef<std::string> Inputs,
            bool ContinueOnCuIndexOverflow);

unsigned getContributionIndex(DWARFSectionKind Kind, uint32_t IndexVersion);

Error handleSection(
    const StringMap<std::pair<MCSection *, DWARFSectionKind>> &KnownSections,
    const MCSection *StrSection, const MCSection *StrOffsetSection,
    const MCSection *TypesSection, const MCSection *CUIndexSection,
    const MCSection *TUIndexSection, const MCSection *InfoSection,
    const object::SectionRef &Section, MCStreamer &Out,
    std::deque<SmallString<32>> &UncompressedSections,
    uint32_t (&ContributionOffsets)[8], UnitIndexEntry &CurEntry,
    StringRef &CurStrSection, StringRef &CurStrOffsetSection,
    std::vector<StringRef> &CurTypesSection,
    std::vector<StringRef> &CurInfoSection, StringRef &AbbrevSection,
    StringRef &CurCUIndexSection, StringRef &CurTUIndexSection,
    std::vector<std::pair<DWARFSectionKind, uint32_t>> &SectionLength);

Expected<InfoSectionUnitHeader> parseInfoSectionUnitHeader(StringRef Info);

void writeStringsAndOffsets(MCStreamer &Out, DWPStringPool &Strings,
                            MCSection *StrOffsetSection,
                            StringRef CurStrSection,
                            StringRef CurStrOffsetSection, uint16_t Version);

Error buildDuplicateError(const std::pair<uint64_t, UnitIndexEntry> &PrevE,
                          const CompileUnitIdentifiers &ID, StringRef DWPName);

void writeIndex(MCStreamer &Out, MCSection *Section,
                ArrayRef<unsigned> ContributionOffsets,
                const MapVector<uint64_t, UnitIndexEntry> &IndexEntries,
                uint32_t IndexVersion);

} // namespace llvm
#endif // LLVM_DWP_DWP_H
PKjwFZ51�DWP/DWPError.hnu�[���#ifndef LLVM_DWP_DWPERROR_H
#define LLVM_DWP_DWPERROR_H

#include "llvm/Support/Error.h"
#include "llvm/Support/ErrorHandling.h"
#include <string>

namespace llvm {
class DWPError : public ErrorInfo<DWPError> {
public:
  DWPError(std::string Info) : Info(std::move(Info)) {}
  void log(raw_ostream &OS) const override { OS << Info; }
  std::error_code convertToErrorCode() const override {
    llvm_unreachable("Not implemented");
  }
  static char ID;

private:
  std::string Info;
};
} // namespace llvm

#endif // LLVM_DWP_DWPERROR_H
PKjwFZ*��#Bitcode/BitcodeWriterPass.hnu�[���//===-- BitcodeWriterPass.h - Bitcode writing pass --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This file provides a bitcode writing pass.
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_BITCODE_BITCODEWRITERPASS_H
#define LLVM_BITCODE_BITCODEWRITERPASS_H

#include "llvm/IR/PassManager.h"

namespace llvm {
class Module;
class ModulePass;
class Pass;
class raw_ostream;

/// Create and return a pass that writes the module to the specified
/// ostream. Note that this pass is designed for use with the legacy pass
/// manager.
///
/// If \c ShouldPreserveUseListOrder, encode use-list order so it can be
/// reproduced when deserialized.
///
/// If \c EmitSummaryIndex, emit the summary index (currently for use in ThinLTO
/// optimization).
///
/// If \c EmitModuleHash, compute and emit the module hash in the bitcode
/// (currently for use in ThinLTO incremental build).
ModulePass *createBitcodeWriterPass(raw_ostream &Str,
                                    bool ShouldPreserveUseListOrder = false,
                                    bool EmitSummaryIndex = false,
                                    bool EmitModuleHash = false);

/// Check whether a pass is a BitcodeWriterPass.
bool isBitcodeWriterPass(Pass *P);

/// Pass for writing a module of IR out to a bitcode file.
///
/// Note that this is intended for use with the new pass manager. To construct
/// a pass for the legacy pass manager, use the function above.
class BitcodeWriterPass : public PassInfoMixin<BitcodeWriterPass> {
  raw_ostream &OS;
  bool ShouldPreserveUseListOrder;
  bool EmitSummaryIndex;
  bool EmitModuleHash;

public:
  /// Construct a bitcode writer pass around a particular output stream.
  ///
  /// If \c ShouldPreserveUseListOrder, encode use-list order so it can be
  /// reproduced when deserialized.
  ///
  /// If \c EmitSummaryIndex, emit the summary index (currently
  /// for use in ThinLTO optimization).
  explicit BitcodeWriterPass(raw_ostream &OS,
                             bool ShouldPreserveUseListOrder = false,
                             bool EmitSummaryIndex = false,
                             bool EmitModuleHash = false)
      : OS(OS), ShouldPreserveUseListOrder(ShouldPreserveUseListOrder),
  EmitSummaryIndex(EmitSummaryIndex), EmitModuleHash(EmitModuleHash) {}

  /// Run the bitcode writer pass, and output the module to the selected
  /// output stream.
  PreservedAnalyses run(Module &M, ModuleAnalysisManager &);

  static bool isRequired() { return true; }
};

}

#endif
PKjwFZmg':�
�
Bitcode/BitcodeAnalyzer.hnu�[���//===- llvm/Bitcode/BitcodeAnalyzer.h - Bitcode analyzer --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This header defines interfaces to analyze LLVM bitcode files/streams.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_BITCODE_BITCODEANALYZER_H
#define LLVM_BITCODE_BITCODEANALYZER_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Bitstream/BitstreamReader.h"
#include "llvm/Support/Error.h"
#include <map>
#include <optional>
#include <vector>

namespace llvm {

class raw_ostream;

/// CurStreamTypeType - A type for CurStreamType
enum CurStreamTypeType {
  UnknownBitstream,
  LLVMIRBitstream,
  ClangSerializedASTBitstream,
  ClangSerializedDiagnosticsBitstream,
  LLVMBitstreamRemarks
};

struct BCDumpOptions {
  /// The stream.
  raw_ostream &OS;
  /// Print per-code histogram.
  bool Histogram = false;
  /// Don't emit numeric info in dump if symbolic info is available.
  bool Symbolic = false;
  /// Print binary blobs using hex escapes.
  bool ShowBinaryBlobs = false;
  /// Print BLOCKINFO block details.
  bool DumpBlockinfo = false;

  BCDumpOptions(raw_ostream &OS) : OS(OS) {}
};

class BitcodeAnalyzer {
  BitstreamCursor Stream;
  BitstreamBlockInfo BlockInfo;
  CurStreamTypeType CurStreamType;
  std::optional<BitstreamCursor> BlockInfoStream;
  unsigned NumTopBlocks = 0;

  struct PerRecordStats {
    unsigned NumInstances = 0;
    unsigned NumAbbrev = 0;
    uint64_t TotalBits = 0;
    PerRecordStats() = default;
  };

  struct PerBlockIDStats {
    /// NumInstances - This the number of times this block ID has been seen.
    unsigned NumInstances = 0;
    /// NumBits - The total size in bits of all of these blocks.
    uint64_t NumBits = 0;
    /// NumSubBlocks - The total number of blocks these blocks contain.
    unsigned NumSubBlocks = 0;
    /// NumAbbrevs - The total number of abbreviations.
    unsigned NumAbbrevs = 0;
    /// NumRecords - The total number of records these blocks contain, and the
    /// number that are abbreviated.
    unsigned NumRecords = 0, NumAbbreviatedRecords = 0;
    /// CodeFreq - Keep track of the number of times we see each code.
    std::vector<PerRecordStats> CodeFreq;
    PerBlockIDStats() = default;
  };

  std::map<unsigned, PerBlockIDStats> BlockIDStats;

public:
  BitcodeAnalyzer(StringRef Buffer,
                  std::optional<StringRef> BlockInfoBuffer = std::nullopt);
  /// Analyze the bitcode file.
  Error analyze(std::optional<BCDumpOptions> O = std::nullopt,
                std::optional<StringRef> CheckHash = std::nullopt);
  /// Print stats about the bitcode file.
  void printStats(BCDumpOptions O,
                  std::optional<StringRef> Filename = std::nullopt);

private:
  /// Read a block, updating statistics, etc.
  Error parseBlock(unsigned BlockID, unsigned IndentLevel,
                   std::optional<BCDumpOptions> O = std::nullopt,
                   std::optional<StringRef> CheckHash = std::nullopt);

  Error decodeMetadataStringsBlob(StringRef Indent, ArrayRef<uint64_t> Record,
                                  StringRef Blob, raw_ostream &OS);
};

} // end namespace llvm

#endif // LLVM_BITCODE_BITCODEANALYZER_H
PKjwFZi�ҔF�FBitcode/BitcodeConvenience.hnu�[���//===- llvm/Bitcode/BitcodeConvenience.h - Convenience Wrappers -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file Convenience wrappers for the LLVM bitcode format and bitstream APIs.
///
/// This allows you to use a sort of DSL to declare and use bitcode
/// abbreviations and records. Example:
///
/// \code
///     using Metadata = BCRecordLayout<
///       METADATA_ID,  // ID
///       BCFixed<16>,  // Module format major version
///       BCFixed<16>,  // Module format minor version
///       BCBlob        // misc. version information
///     >;
///     Metadata metadata(Out);
///     metadata.emit(ScratchRecord, VERSION_MAJOR, VERSION_MINOR, Data);
/// \endcode
///
/// For details on the bitcode format, see
///   http://llvm.org/docs/BitCodeFormat.html
///
//===----------------------------------------------------------------------===//

#ifndef LLVM_BITCODE_BITCODECONVENIENCE_H
#define LLVM_BITCODE_BITCODECONVENIENCE_H

#include "llvm/Bitstream/BitCodes.h"
#include "llvm/Bitstream/BitstreamWriter.h"
#include <cstdint>
#include <optional>

namespace llvm {
namespace detail {
/// Convenience base for all kinds of bitcode abbreviation fields.
///
/// This just defines common properties queried by the metaprogramming.
template <bool Compound = false> class BCField {
public:
  static const bool IsCompound = Compound;

  /// Asserts that the given data is a valid value for this field.
  template <typename T> static void assertValid(const T &data) {}

  /// Converts a raw numeric representation of this value to its preferred
  /// type.
  template <typename T> static T convert(T rawValue) { return rawValue; }
};
} // namespace detail

/// Represents a literal operand in a bitcode record.
///
/// The value of a literal operand is the same for all instances of the record,
/// so it is only emitted in the abbreviation definition.
///
/// Note that because this uses a compile-time template, you cannot have a
/// literal operand that is fixed at run-time without dropping down to the
/// raw LLVM APIs.
template <uint64_t Value> class BCLiteral : public detail::BCField<> {
public:
  static void emitOp(llvm::BitCodeAbbrev &abbrev) {
    abbrev.Add(llvm::BitCodeAbbrevOp(Value));
  }

  template <typename T> static void assertValid(const T &data) {
    assert(data == Value && "data value does not match declared literal value");
  }
};

/// Represents a fixed-width value in a bitcode record.
///
/// Note that the LLVM bitcode format only supports unsigned values.
template <unsigned Width> class BCFixed : public detail::BCField<> {
public:
  static_assert(Width <= 64, "fixed-width field is too large");

  static void emitOp(llvm::BitCodeAbbrev &abbrev) {
    abbrev.Add(llvm::BitCodeAbbrevOp(llvm::BitCodeAbbrevOp::Fixed, Width));
  }

  static void assertValid(const bool &data) {
    assert(llvm::isUInt<Width>(data) &&
           "data value does not fit in the given bit width");
  }

  template <typename T> static void assertValid(const T &data) {
    assert(data >= 0 && "cannot encode signed integers");
    assert(llvm::isUInt<Width>(data) &&
           "data value does not fit in the given bit width");
  }
};

/// Represents a variable-width value in a bitcode record.
///
/// The \p Width parameter should include the continuation bit.
///
/// Note that the LLVM bitcode format only supports unsigned values.
template <unsigned Width> class BCVBR : public detail::BCField<> {
  static_assert(Width >= 2, "width does not have room for continuation bit");

public:
  static void emitOp(llvm::BitCodeAbbrev &abbrev) {
    abbrev.Add(llvm::BitCodeAbbrevOp(llvm::BitCodeAbbrevOp::VBR, Width));
  }

  template <typename T> static void assertValid(const T &data) {
    assert(data >= 0 && "cannot encode signed integers");
  }
};

/// Represents a character encoded in LLVM's Char6 encoding.
///
/// This format is suitable for encoding decimal numbers (without signs or
/// exponents) and C identifiers (without dollar signs), but not much else.
///
/// \sa http://llvm.org/docs/BitCodeFormat.html#char6-encoded-value
class BCChar6 : public detail::BCField<> {
public:
  static void emitOp(llvm::BitCodeAbbrev &abbrev) {
    abbrev.Add(llvm::BitCodeAbbrevOp(llvm::BitCodeAbbrevOp::Char6));
  }

  template <typename T> static void assertValid(const T &data) {
    assert(llvm::BitCodeAbbrevOp::isChar6(data) && "invalid Char6 data");
  }

  template <typename T> char convert(T rawValue) {
    return static_cast<char>(rawValue);
  }
};

/// Represents an untyped blob of bytes.
///
/// If present, this must be the last field in a record.
class BCBlob : public detail::BCField<true> {
public:
  static void emitOp(llvm::BitCodeAbbrev &abbrev) {
    abbrev.Add(llvm::BitCodeAbbrevOp(llvm::BitCodeAbbrevOp::Blob));
  }
};

/// Represents an array of some other type.
///
/// If present, this must be the last field in a record.
template <typename ElementTy> class BCArray : public detail::BCField<true> {
  static_assert(!ElementTy::IsCompound, "arrays can only contain scalar types");

public:
  static void emitOp(llvm::BitCodeAbbrev &abbrev) {
    abbrev.Add(llvm::BitCodeAbbrevOp(llvm::BitCodeAbbrevOp::Array));
    ElementTy::emitOp(abbrev);
  }
};

namespace detail {
/// Attaches the last field to an abbreviation.
///
/// This is the base case for \c emitOps.
///
/// \sa BCRecordLayout::emitAbbrev
template <typename FieldTy> static void emitOps(llvm::BitCodeAbbrev &abbrev) {
  FieldTy::emitOp(abbrev);
}

/// Attaches fields to an abbreviation.
///
/// This is the recursive case for \c emitOps.
///
/// \sa BCRecordLayout::emitAbbrev
template <typename FieldTy, typename Next, typename... Rest>
static void emitOps(llvm::BitCodeAbbrev &abbrev) {
  static_assert(!FieldTy::IsCompound,
                "arrays and blobs may not appear in the middle of a record");
  FieldTy::emitOp(abbrev);
  emitOps<Next, Rest...>(abbrev);
}

/// Helper class for dealing with a scalar element in the middle of a record.
///
/// \sa BCRecordLayout
template <typename ElementTy, typename... Fields> class BCRecordCoding {
public:
  template <typename BufferTy, typename ElementDataTy, typename... DataTy>
  static void emit(llvm::BitstreamWriter &Stream, BufferTy &buffer,
                   unsigned code, ElementDataTy element, DataTy &&...data) {
    static_assert(!ElementTy::IsCompound,
                  "arrays and blobs may not appear in the middle of a record");
    ElementTy::assertValid(element);
    buffer.push_back(element);
    BCRecordCoding<Fields...>::emit(Stream, buffer, code,
                                    std::forward<DataTy>(data)...);
  }

  template <typename T, typename ElementDataTy, typename... DataTy>
  static void read(ArrayRef<T> buffer, ElementDataTy &element,
                   DataTy &&...data) {
    assert(!buffer.empty() && "too few elements in buffer");
    element = ElementTy::convert(buffer.front());
    BCRecordCoding<Fields...>::read(buffer.slice(1),
                                    std::forward<DataTy>(data)...);
  }

  template <typename T, typename... DataTy>
  static void read(ArrayRef<T> buffer, std::nullopt_t, DataTy &&...data) {
    assert(!buffer.empty() && "too few elements in buffer");
    BCRecordCoding<Fields...>::read(buffer.slice(1),
                                    std::forward<DataTy>(data)...);
  }
};

/// Helper class for dealing with a scalar element at the end of a record.
///
/// This has a separate implementation because up until now we've only been
/// \em building the record (into a data buffer), and now we need to hand it
/// off to the BitstreamWriter to be emitted.
///
/// \sa BCRecordLayout
template <typename ElementTy> class BCRecordCoding<ElementTy> {
public:
  template <typename BufferTy, typename DataTy>
  static void emit(llvm::BitstreamWriter &Stream, BufferTy &buffer,
                   unsigned code, const DataTy &data) {
    static_assert(!ElementTy::IsCompound,
                  "arrays and blobs need special handling");
    ElementTy::assertValid(data);
    buffer.push_back(data);
    Stream.EmitRecordWithAbbrev(code, buffer);
  }

  template <typename T, typename DataTy>
  static void read(ArrayRef<T> buffer, DataTy &data) {
    assert(buffer.size() == 1 && "record data does not match layout");
    data = ElementTy::convert(buffer.front());
  }

  template <typename T> static void read(ArrayRef<T> buffer, std::nullopt_t) {
    assert(buffer.size() == 1 && "record data does not match layout");
    (void)buffer;
  }

  template <typename T> static void read(ArrayRef<T> buffer) = delete;
};

/// Helper class for dealing with an array at the end of a record.
///
/// \sa BCRecordLayout::emitRecord
template <typename ElementTy> class BCRecordCoding<BCArray<ElementTy>> {
public:
  template <typename BufferTy>
  static void emit(llvm::BitstreamWriter &Stream, BufferTy &buffer,
                   unsigned code, StringRef data) {
    // TODO: validate array data.
    Stream.EmitRecordWithArray(code, buffer, data);
  }

  template <typename BufferTy, typename ArrayTy>
  static void emit(llvm::BitstreamWriter &Stream, BufferTy &buffer,
                   unsigned code, const ArrayTy &array) {
#ifndef NDEBUG
    for (auto &element : array)
      ElementTy::assertValid(element);
#endif
    buffer.reserve(buffer.size() + std::distance(array.begin(), array.end()));
    std::copy(array.begin(), array.end(), std::back_inserter(buffer));
    Stream.EmitRecordWithAbbrev(code, buffer);
  }

  template <typename BufferTy, typename ElementDataTy, typename... DataTy>
  static void emit(llvm::BitstreamWriter &Stream, BufferTy &buffer,
                   unsigned code, ElementDataTy element, DataTy... data) {
    std::array<ElementDataTy, 1 + sizeof...(data)> array{{element, data...}};
    emit(Stream, buffer, code, array);
  }

  template <typename BufferTy>
  static void emit(llvm::BitstreamWriter &Stream, BufferTy &Buffer,
                   unsigned code, std::nullopt_t) {
    Stream.EmitRecordWithAbbrev(code, Buffer);
  }

  template <typename T>
  static void read(ArrayRef<T> Buffer, ArrayRef<T> &rawData) {
    rawData = Buffer;
  }

  template <typename T, typename ArrayTy>
  static void read(ArrayRef<T> buffer, ArrayTy &array) {
    array.append(llvm::map_iterator(buffer.begin(), T::convert),
                 llvm::map_iterator(buffer.end(), T::convert));
  }

  template <typename T> static void read(ArrayRef<T> buffer, std::nullopt_t) {
    (void)buffer;
  }

  template <typename T> static void read(ArrayRef<T> buffer) = delete;
};

/// Helper class for dealing with a blob at the end of a record.
///
/// \sa BCRecordLayout
template <> class BCRecordCoding<BCBlob> {
public:
  template <typename BufferTy>
  static void emit(llvm::BitstreamWriter &Stream, BufferTy &buffer,
                   unsigned code, StringRef data) {
    Stream.EmitRecordWithBlob(code, buffer, data);
  }

  template <typename T> static void read(ArrayRef<T> buffer) { (void)buffer; }

  /// Blob data is not stored in the buffer if you are using the correct
  /// accessor; this method should not be used.
  template <typename T, typename DataTy>
  static void read(ArrayRef<T> buffer, DataTy &data) = delete;
};

/// A type trait whose \c type field is the last of its template parameters.
template <typename Head, typename... Tail> struct last_type {
  using type = typename last_type<Tail...>::type;
};

template <typename Head> struct last_type<Head> { using type = Head; };

/// A type trait whose \c value field is \c true if the last type is BCBlob.
template <typename... Types>
using has_blob = std::is_same<BCBlob, typename last_type<int, Types...>::type>;

/// A type trait whose \c value field is \c true if the given type is a
/// BCArray (of any element kind).
template <typename T> struct is_array {
private:
  template <typename E> static bool check(BCArray<E> *);
  static int check(...);

public:
  typedef bool value_type;
  static constexpr bool value = !std::is_same<decltype(check((T *)nullptr)),
                                              decltype(check(false))>::value;
};

/// A type trait whose \c value field is \c true if the last type is a
/// BCArray (of any element kind).
template <typename... Types>
using has_array = is_array<typename last_type<int, Types...>::type>;
} // namespace detail

/// Represents a single bitcode record type.
///
/// This class template is meant to be instantiated and then given a name,
/// so that from then on that name can be used.
template <typename IDField, typename... Fields> class BCGenericRecordLayout {
  llvm::BitstreamWriter &Stream;

public:
  /// The abbreviation code used for this record in the current block.
  ///
  /// Note that this is not the same as the semantic record code, which is the
  /// first field of the record.
  const unsigned AbbrevCode;

  /// Create a layout and register it with the given bitstream writer.
  explicit BCGenericRecordLayout(llvm::BitstreamWriter &Stream)
      : Stream(Stream), AbbrevCode(emitAbbrev(Stream)) {}

  /// Emit a record to the bitstream writer, using the given buffer for scratch
  /// space.
  ///
  /// Note that even fixed arguments must be specified here.
  template <typename BufferTy, typename... Data>
  void emit(BufferTy &buffer, unsigned id, Data &&...data) const {
    emitRecord(Stream, buffer, AbbrevCode, id, std::forward<Data>(data)...);
  }

  /// Registers this record's layout with the bitstream reader.
  ///
  /// eturns The abbreviation code for the newly-registered record type.
  static unsigned emitAbbrev(llvm::BitstreamWriter &Stream) {
    auto Abbrev = std::make_shared<llvm::BitCodeAbbrev>();
    detail::emitOps<IDField, Fields...>(*Abbrev);
    return Stream.EmitAbbrev(std::move(Abbrev));
  }

  /// Emit a record identified by \p abbrCode to bitstream reader \p Stream,
  /// using \p buffer for scratch space.
  ///
  /// Note that even fixed arguments must be specified here. Blobs are passed
  /// as StringRefs, while arrays can be passed inline, as aggregates, or as
  /// pre-encoded StringRef data. Skipped values and empty arrays should use
  /// the special Nothing value.
  template <typename BufferTy, typename... Data>
  static void emitRecord(llvm::BitstreamWriter &Stream, BufferTy &buffer,
                         unsigned abbrCode, unsigned recordID, Data &&...data) {
    static_assert(sizeof...(data) <= sizeof...(Fields) ||
                      detail::has_array<Fields...>::value,
                  "Too many record elements");
    static_assert(sizeof...(data) >= sizeof...(Fields),
                  "Too few record elements");
    buffer.clear();
    detail::BCRecordCoding<IDField, Fields...>::emit(
        Stream, buffer, abbrCode, recordID, std::forward<Data>(data)...);
  }

  /// Extract record data from \p buffer into the given data fields.
  ///
  /// Note that even fixed arguments must be specified here. Pass \c Nothing
  /// if you don't care about a particular parameter. Blob data is not included
  /// in the buffer and should be handled separately by the caller.
  template <typename ElementTy, typename... Data>
  static void readRecord(ArrayRef<ElementTy> buffer, Data &&...data) {
    static_assert(sizeof...(data) <= sizeof...(Fields),
                  "Too many record elements");
    static_assert(sizeof...(Fields) <=
                      sizeof...(data) + detail::has_blob<Fields...>::value,
                  "Too few record elements");
    return detail::BCRecordCoding<Fields...>::read(buffer,
                                                   std::forward<Data>(data)...);
  }

  /// Extract record data from \p buffer into the given data fields.
  ///
  /// Note that even fixed arguments must be specified here. Pass \c Nothing
  /// if you don't care about a particular parameter. Blob data is not included
  /// in the buffer and should be handled separately by the caller.
  template <typename BufferTy, typename... Data>
  static void readRecord(BufferTy &buffer, Data &&...data) {
    return readRecord(llvm::ArrayRef(buffer), std::forward<Data>(data)...);
  }
};

/// A record with a fixed record code.
template <unsigned RecordCode, typename... Fields>
class BCRecordLayout
    : public BCGenericRecordLayout<BCLiteral<RecordCode>, Fields...> {
  using Base = BCGenericRecordLayout<BCLiteral<RecordCode>, Fields...>;

public:
  enum : unsigned {
    /// The record code associated with this layout.
    Code = RecordCode
  };

  /// Create a layout and register it with the given bitstream writer.
  explicit BCRecordLayout(llvm::BitstreamWriter &Stream) : Base(Stream) {}

  /// Emit a record to the bitstream writer, using the given buffer for scratch
  /// space.
  ///
  /// Note that even fixed arguments must be specified here.
  template <typename BufferTy, typename... Data>
  void emit(BufferTy &buffer, Data &&...data) const {
    Base::emit(buffer, RecordCode, std::forward<Data>(data)...);
  }

  /// Emit a record identified by \p abbrCode to bitstream reader \p Stream,
  /// using \p buffer for scratch space.
  ///
  /// Note that even fixed arguments must be specified here. Currently, arrays
  /// and blobs can only be passed as StringRefs.
  template <typename BufferTy, typename... Data>
  static void emitRecord(llvm::BitstreamWriter &Stream, BufferTy &buffer,
                         unsigned abbrCode, Data &&...data) {
    Base::emitRecord(Stream, buffer, abbrCode, RecordCode,
                     std::forward<Data>(data)...);
  }
};

/// RAII object to pair entering and exiting a sub-block.
class BCBlockRAII {
  llvm::BitstreamWriter &Stream;

public:
  BCBlockRAII(llvm::BitstreamWriter &Stream, unsigned block, unsigned abbrev)
      : Stream(Stream) {
    Stream.EnterSubblock(block, abbrev);
  }

  ~BCBlockRAII() { Stream.ExitBlock(); }
};
} // namespace llvm

#endif
PKjwFZ�h���s�sBitcode/LLVMBitCodes.hnu�[���//===- LLVMBitCodes.h - Enum values for the LLVM bitcode format -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This header defines Bitcode enum values for LLVM IR bitcode files.
//
// The enum values defined in this file should be considered permanent.  If
// new features are added, they should have values added at the end of the
// respective lists.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_BITCODE_LLVMBITCODES_H
#define LLVM_BITCODE_LLVMBITCODES_H

// This is the only file included, and it, in turn, is a leaf header.
// This allows external tools to dump the AST of this file and analyze it for
// changes without needing to fully or partially build LLVM itself.
#include "llvm/Bitstream/BitCodeEnums.h"

namespace llvm {
namespace bitc {
// The only top-level block types are MODULE, IDENTIFICATION, STRTAB and SYMTAB.
enum BlockIDs {
  // Blocks
  MODULE_BLOCK_ID = FIRST_APPLICATION_BLOCKID,

  // Module sub-block id's.
  PARAMATTR_BLOCK_ID,
  PARAMATTR_GROUP_BLOCK_ID,

  CONSTANTS_BLOCK_ID,
  FUNCTION_BLOCK_ID,

  // Block intended to contains information on the bitcode versioning.
  // Can be used to provide better error messages when we fail to parse a
  // bitcode file.
  IDENTIFICATION_BLOCK_ID,

  VALUE_SYMTAB_BLOCK_ID,
  METADATA_BLOCK_ID,
  METADATA_ATTACHMENT_ID,

  TYPE_BLOCK_ID_NEW,

  USELIST_BLOCK_ID,

  MODULE_STRTAB_BLOCK_ID,
  GLOBALVAL_SUMMARY_BLOCK_ID,

  OPERAND_BUNDLE_TAGS_BLOCK_ID,

  METADATA_KIND_BLOCK_ID,

  STRTAB_BLOCK_ID,

  FULL_LTO_GLOBALVAL_SUMMARY_BLOCK_ID,

  SYMTAB_BLOCK_ID,

  SYNC_SCOPE_NAMES_BLOCK_ID,
};

/// Identification block contains a string that describes the producer details,
/// and an epoch that defines the auto-upgrade capability.
enum IdentificationCodes {
  IDENTIFICATION_CODE_STRING = 1, // IDENTIFICATION:      [strchr x N]
  IDENTIFICATION_CODE_EPOCH = 2,  // EPOCH:               [epoch#]
};

/// The epoch that defines the auto-upgrade compatibility for the bitcode.
///
/// LLVM guarantees in a major release that a minor release can read bitcode
/// generated by previous minor releases. We translate this by making the reader
/// accepting only bitcode with the same epoch, except for the X.0 release which
/// also accepts N-1.
enum { BITCODE_CURRENT_EPOCH = 0 };

/// MODULE blocks have a number of optional fields and subblocks.
enum ModuleCodes {
  MODULE_CODE_VERSION = 1,     // VERSION:     [version#]
  MODULE_CODE_TRIPLE = 2,      // TRIPLE:      [strchr x N]
  MODULE_CODE_DATALAYOUT = 3,  // DATALAYOUT:  [strchr x N]
  MODULE_CODE_ASM = 4,         // ASM:         [strchr x N]
  MODULE_CODE_SECTIONNAME = 5, // SECTIONNAME: [strchr x N]

  // Deprecated, but still needed to read old bitcode files.
  MODULE_CODE_DEPLIB = 6, // DEPLIB:      [strchr x N]

  // GLOBALVAR: [pointer type, isconst, initid,
  //             linkage, alignment, section, visibility, threadlocal]
  MODULE_CODE_GLOBALVAR = 7,

  // FUNCTION:  [type, callingconv, isproto, linkage, paramattrs, alignment,
  //             section, visibility, gc, unnamed_addr]
  MODULE_CODE_FUNCTION = 8,

  // ALIAS: [alias type, aliasee val#, linkage, visibility]
  MODULE_CODE_ALIAS_OLD = 9,

  MODULE_CODE_GCNAME = 11, // GCNAME: [strchr x N]
  MODULE_CODE_COMDAT = 12, // COMDAT: [selection_kind, name]

  MODULE_CODE_VSTOFFSET = 13, // VSTOFFSET: [offset]

  // ALIAS: [alias value type, addrspace, aliasee val#, linkage, visibility]
  MODULE_CODE_ALIAS = 14,

  MODULE_CODE_METADATA_VALUES_UNUSED = 15,

  // SOURCE_FILENAME: [namechar x N]
  MODULE_CODE_SOURCE_FILENAME = 16,

  // HASH: [5*i32]
  MODULE_CODE_HASH = 17,

  // IFUNC: [ifunc value type, addrspace, resolver val#, linkage, visibility]
  MODULE_CODE_IFUNC = 18,
};

/// PARAMATTR blocks have code for defining a parameter attribute set.
enum AttributeCodes {
  // Deprecated, but still needed to read old bitcode files.
  PARAMATTR_CODE_ENTRY_OLD = 1, // ENTRY: [paramidx0, attr0,
                                //         paramidx1, attr1...]
  PARAMATTR_CODE_ENTRY = 2,     // ENTRY: [attrgrp0, attrgrp1, ...]
  PARAMATTR_GRP_CODE_ENTRY = 3  // ENTRY: [grpid, idx, attr0, attr1, ...]
};

/// TYPE blocks have codes for each type primitive they use.
enum TypeCodes {
  TYPE_CODE_NUMENTRY = 1, // NUMENTRY: [numentries]

  // Type Codes
  TYPE_CODE_VOID = 2,    // VOID
  TYPE_CODE_FLOAT = 3,   // FLOAT
  TYPE_CODE_DOUBLE = 4,  // DOUBLE
  TYPE_CODE_LABEL = 5,   // LABEL
  TYPE_CODE_OPAQUE = 6,  // OPAQUE
  TYPE_CODE_INTEGER = 7, // INTEGER: [width]
  TYPE_CODE_POINTER = 8, // POINTER: [pointee type]

  TYPE_CODE_FUNCTION_OLD = 9, // FUNCTION: [vararg, attrid, retty,
                              //            paramty x N]

  TYPE_CODE_HALF = 10, // HALF

  TYPE_CODE_ARRAY = 11,  // ARRAY: [numelts, eltty]
  TYPE_CODE_VECTOR = 12, // VECTOR: [numelts, eltty]

  // These are not with the other floating point types because they're
  // a late addition, and putting them in the right place breaks
  // binary compatibility.
  TYPE_CODE_X86_FP80 = 13,  // X86 LONG DOUBLE
  TYPE_CODE_FP128 = 14,     // LONG DOUBLE (112 bit mantissa)
  TYPE_CODE_PPC_FP128 = 15, // PPC LONG DOUBLE (2 doubles)

  TYPE_CODE_METADATA = 16, // METADATA

  TYPE_CODE_X86_MMX = 17, // X86 MMX

  TYPE_CODE_STRUCT_ANON = 18,  // STRUCT_ANON: [ispacked, eltty x N]
  TYPE_CODE_STRUCT_NAME = 19,  // STRUCT_NAME: [strchr x N]
  TYPE_CODE_STRUCT_NAMED = 20, // STRUCT_NAMED: [ispacked, eltty x N]

  TYPE_CODE_FUNCTION = 21, // FUNCTION: [vararg, retty, paramty x N]

  TYPE_CODE_TOKEN = 22, // TOKEN

  TYPE_CODE_BFLOAT = 23,  // BRAIN FLOATING POINT
  TYPE_CODE_X86_AMX = 24, // X86 AMX

  TYPE_CODE_OPAQUE_POINTER = 25, // OPAQUE_POINTER: [addrspace]

  TYPE_CODE_TARGET_TYPE = 26, // TARGET_TYPE
};

enum OperandBundleTagCode {
  OPERAND_BUNDLE_TAG = 1, // TAG: [strchr x N]
};

enum SyncScopeNameCode {
  SYNC_SCOPE_NAME = 1,
};

// Value symbol table codes.
enum ValueSymtabCodes {
  VST_CODE_ENTRY = 1,   // VST_ENTRY: [valueid, namechar x N]
  VST_CODE_BBENTRY = 2, // VST_BBENTRY: [bbid, namechar x N]
  VST_CODE_FNENTRY = 3, // VST_FNENTRY: [valueid, offset, namechar x N]
  // VST_COMBINED_ENTRY: [valueid, refguid]
  VST_CODE_COMBINED_ENTRY = 5
};

// The module path symbol table only has one code (MST_CODE_ENTRY).
enum ModulePathSymtabCodes {
  MST_CODE_ENTRY = 1, // MST_ENTRY: [modid, namechar x N]
  MST_CODE_HASH = 2,  // MST_HASH:  [5*i32]
};

// The summary section uses different codes in the per-module
// and combined index cases.
enum GlobalValueSummarySymtabCodes {
  // PERMODULE: [valueid, flags, instcount, numrefs, numrefs x valueid,
  //             n x (valueid)]
  FS_PERMODULE = 1,
  // PERMODULE_PROFILE: [valueid, flags, instcount, numrefs,
  //                     numrefs x valueid,
  //                     n x (valueid, hotness)]
  FS_PERMODULE_PROFILE = 2,
  // PERMODULE_GLOBALVAR_INIT_REFS: [valueid, flags, n x valueid]
  FS_PERMODULE_GLOBALVAR_INIT_REFS = 3,
  // COMBINED: [valueid, modid, flags, instcount, numrefs, numrefs x valueid,
  //            n x (valueid)]
  FS_COMBINED = 4,
  // COMBINED_PROFILE: [valueid, modid, flags, instcount, numrefs,
  //                    numrefs x valueid,
  //                    n x (valueid, hotness)]
  FS_COMBINED_PROFILE = 5,
  // COMBINED_GLOBALVAR_INIT_REFS: [valueid, modid, flags, n x valueid]
  FS_COMBINED_GLOBALVAR_INIT_REFS = 6,
  // ALIAS: [valueid, flags, valueid]
  FS_ALIAS = 7,
  // COMBINED_ALIAS: [valueid, modid, flags, valueid]
  FS_COMBINED_ALIAS = 8,
  // COMBINED_ORIGINAL_NAME: [original_name_hash]
  FS_COMBINED_ORIGINAL_NAME = 9,
  // VERSION of the summary, bumped when adding flags for instance.
  FS_VERSION = 10,
  // The list of llvm.type.test type identifiers used by the following function
  // that are used other than by an llvm.assume.
  // [n x typeid]
  FS_TYPE_TESTS = 11,
  // The list of virtual calls made by this function using
  // llvm.assume(llvm.type.test) intrinsics that do not have all constant
  // integer arguments.
  // [n x (typeid, offset)]
  FS_TYPE_TEST_ASSUME_VCALLS = 12,
  // The list of virtual calls made by this function using
  // llvm.type.checked.load intrinsics that do not have all constant integer
  // arguments.
  // [n x (typeid, offset)]
  FS_TYPE_CHECKED_LOAD_VCALLS = 13,
  // Identifies a virtual call made by this function using an
  // llvm.assume(llvm.type.test) intrinsic with all constant integer arguments.
  // [typeid, offset, n x arg]
  FS_TYPE_TEST_ASSUME_CONST_VCALL = 14,
  // Identifies a virtual call made by this function using an
  // llvm.type.checked.load intrinsic with all constant integer arguments.
  // [typeid, offset, n x arg]
  FS_TYPE_CHECKED_LOAD_CONST_VCALL = 15,
  // Assigns a GUID to a value ID. This normally appears only in combined
  // summaries, but it can also appear in per-module summaries for PGO data.
  // [valueid, guid]
  FS_VALUE_GUID = 16,
  // The list of local functions with CFI jump tables. Function names are
  // strings in strtab.
  // [n * name]
  FS_CFI_FUNCTION_DEFS = 17,
  // The list of external functions with CFI jump tables. Function names are
  // strings in strtab.
  // [n * name]
  FS_CFI_FUNCTION_DECLS = 18,
  // Per-module summary that also adds relative block frequency to callee info.
  // PERMODULE_RELBF: [valueid, flags, instcount, numrefs,
  //                   numrefs x valueid,
  //                   n x (valueid, relblockfreq)]
  FS_PERMODULE_RELBF = 19,
  // Index-wide flags
  FS_FLAGS = 20,
  // Maps type identifier to summary information for that type identifier.
  // Produced by the thin link (only lives in combined index).
  // TYPE_ID: [typeid, kind, bitwidth, align, size, bitmask, inlinebits,
  //           n x (typeid, kind, name, numrba,
  //                numrba x (numarg, numarg x arg, kind, info, byte, bit))]
  FS_TYPE_ID = 21,
  // For background see overview at https://llvm.org/docs/TypeMetadata.html.
  // The type metadata includes both the type identifier and the offset of
  // the address point of the type (the address held by objects of that type
  // which may not be the beginning of the virtual table). Vtable definitions
  // are decorated with type metadata for the types they are compatible with.
  //
  // Maps type identifier to summary information for that type identifier
  // computed from type metadata: the valueid of each vtable definition
  // decorated with a type metadata for that identifier, and the offset from
  // the corresponding type metadata.
  // Exists in the per-module summary to provide information to thin link
  // for index-based whole program devirtualization.
  // TYPE_ID_METADATA: [typeid, n x (valueid, offset)]
  FS_TYPE_ID_METADATA = 22,
  // Summarizes vtable definition for use in index-based whole program
  // devirtualization during the thin link.
  // PERMODULE_VTABLE_GLOBALVAR_INIT_REFS: [valueid, flags, varflags,
  //                                        numrefs, numrefs x valueid,
  //                                        n x (valueid, offset)]
  FS_PERMODULE_VTABLE_GLOBALVAR_INIT_REFS = 23,
  // The total number of basic blocks in the module.
  FS_BLOCK_COUNT = 24,
  // Range information for accessed offsets for every argument.
  // [n x (paramno, range, numcalls, numcalls x (callee_guid, paramno, range))]
  FS_PARAM_ACCESS = 25,
  // Summary of per-module memprof callsite metadata.
  // [valueid, n x stackidindex]
  FS_PERMODULE_CALLSITE_INFO = 26,
  // Summary of per-module allocation memprof metadata.
  // [n x (alloc type, nummib, nummib x stackidindex)]
  FS_PERMODULE_ALLOC_INFO = 27,
  // Summary of combined index memprof callsite metadata.
  // [valueid, numstackindices, numver,
  //  numstackindices x stackidindex, numver x version]
  FS_COMBINED_CALLSITE_INFO = 28,
  // Summary of combined index allocation memprof metadata.
  // [nummib, numver,
  //  nummib x (alloc type, numstackids, numstackids x stackidindex),
  //  numver x version]
  FS_COMBINED_ALLOC_INFO = 29,
  FS_STACK_IDS = 30,
};

enum MetadataCodes {
  METADATA_STRING_OLD = 1,     // MDSTRING:      [values]
  METADATA_VALUE = 2,          // VALUE:         [type num, value num]
  METADATA_NODE = 3,           // NODE:          [n x md num]
  METADATA_NAME = 4,           // STRING:        [values]
  METADATA_DISTINCT_NODE = 5,  // DISTINCT_NODE: [n x md num]
  METADATA_KIND = 6,           // [n x [id, name]]
  METADATA_LOCATION = 7,       // [distinct, line, col, scope, inlined-at?]
  METADATA_OLD_NODE = 8,       // OLD_NODE:      [n x (type num, value num)]
  METADATA_OLD_FN_NODE = 9,    // OLD_FN_NODE:   [n x (type num, value num)]
  METADATA_NAMED_NODE = 10,    // NAMED_NODE:    [n x mdnodes]
  METADATA_ATTACHMENT = 11,    // [m x [value, [n x [id, mdnode]]]
  METADATA_GENERIC_DEBUG = 12, // [distinct, tag, vers, header, n x md num]
  METADATA_SUBRANGE = 13,      // [distinct, count, lo]
  METADATA_ENUMERATOR = 14,    // [isUnsigned|distinct, value, name]
  METADATA_BASIC_TYPE = 15,    // [distinct, tag, name, size, align, enc]
  METADATA_FILE = 16, // [distinct, filename, directory, checksumkind, checksum]
  METADATA_DERIVED_TYPE = 17,       // [distinct, ...]
  METADATA_COMPOSITE_TYPE = 18,     // [distinct, ...]
  METADATA_SUBROUTINE_TYPE = 19,    // [distinct, flags, types, cc]
  METADATA_COMPILE_UNIT = 20,       // [distinct, ...]
  METADATA_SUBPROGRAM = 21,         // [distinct, ...]
  METADATA_LEXICAL_BLOCK = 22,      // [distinct, scope, file, line, column]
  METADATA_LEXICAL_BLOCK_FILE = 23, //[distinct, scope, file, discriminator]
  METADATA_NAMESPACE = 24, // [distinct, scope, file, name, line, exportSymbols]
  METADATA_TEMPLATE_TYPE = 25,   // [distinct, scope, name, type, ...]
  METADATA_TEMPLATE_VALUE = 26,  // [distinct, scope, name, type, value, ...]
  METADATA_GLOBAL_VAR = 27,      // [distinct, ...]
  METADATA_LOCAL_VAR = 28,       // [distinct, ...]
  METADATA_EXPRESSION = 29,      // [distinct, n x element]
  METADATA_OBJC_PROPERTY = 30,   // [distinct, name, file, line, ...]
  METADATA_IMPORTED_ENTITY = 31, // [distinct, tag, scope, entity, line, name]
  METADATA_MODULE = 32,          // [distinct, scope, name, ...]
  METADATA_MACRO = 33,           // [distinct, macinfo, line, name, value]
  METADATA_MACRO_FILE = 34,      // [distinct, macinfo, line, file, ...]
  METADATA_STRINGS = 35,         // [count, offset] blob([lengths][chars])
  METADATA_GLOBAL_DECL_ATTACHMENT = 36, // [valueid, n x [id, mdnode]]
  METADATA_GLOBAL_VAR_EXPR = 37,        // [distinct, var, expr]
  METADATA_INDEX_OFFSET = 38,           // [offset]
  METADATA_INDEX = 39,                  // [bitpos]
  METADATA_LABEL = 40,                  // [distinct, scope, name, file, line]
  METADATA_STRING_TYPE = 41,            // [distinct, name, size, align,...]
  // Codes 42 and 43 are reserved for support for Fortran array specific debug
  // info.
  METADATA_COMMON_BLOCK = 44,     // [distinct, scope, name, variable,...]
  METADATA_GENERIC_SUBRANGE = 45, // [distinct, count, lo, up, stride]
  METADATA_ARG_LIST = 46,         // [n x [type num, value num]]
  METADATA_ASSIGN_ID = 47,        // [distinct, ...]
};

// The constants block (CONSTANTS_BLOCK_ID) describes emission for each
// constant and maintains an implicit current type value.
enum ConstantsCodes {
  CST_CODE_SETTYPE = 1,          // SETTYPE:       [typeid]
  CST_CODE_NULL = 2,             // NULL
  CST_CODE_UNDEF = 3,            // UNDEF
  CST_CODE_INTEGER = 4,          // INTEGER:       [intval]
  CST_CODE_WIDE_INTEGER = 5,     // WIDE_INTEGER:  [n x intval]
  CST_CODE_FLOAT = 6,            // FLOAT:         [fpval]
  CST_CODE_AGGREGATE = 7,        // AGGREGATE:     [n x value number]
  CST_CODE_STRING = 8,           // STRING:        [values]
  CST_CODE_CSTRING = 9,          // CSTRING:       [values]
  CST_CODE_CE_BINOP = 10,        // CE_BINOP:      [opcode, opval, opval]
  CST_CODE_CE_CAST = 11,         // CE_CAST:       [opcode, opty, opval]
  CST_CODE_CE_GEP = 12,          // CE_GEP:        [n x operands]
  CST_CODE_CE_SELECT = 13,       // CE_SELECT:     [opval, opval, opval]
  CST_CODE_CE_EXTRACTELT = 14,   // CE_EXTRACTELT: [opty, opval, opval]
  CST_CODE_CE_INSERTELT = 15,    // CE_INSERTELT:  [opval, opval, opval]
  CST_CODE_CE_SHUFFLEVEC = 16,   // CE_SHUFFLEVEC: [opval, opval, opval]
  CST_CODE_CE_CMP = 17,          // CE_CMP:        [opty, opval, opval, pred]
  CST_CODE_INLINEASM_OLD = 18,   // INLINEASM:     [sideeffect|alignstack,
                                 //                 asmstr,conststr]
  CST_CODE_CE_SHUFVEC_EX = 19,   // SHUFVEC_EX:    [opty, opval, opval, opval]
  CST_CODE_CE_INBOUNDS_GEP = 20, // INBOUNDS_GEP:  [n x operands]
  CST_CODE_BLOCKADDRESS = 21,    // CST_CODE_BLOCKADDRESS [fnty, fnval, bb#]
  CST_CODE_DATA = 22,            // DATA:          [n x elements]
  CST_CODE_INLINEASM_OLD2 = 23,  // INLINEASM:     [sideeffect|alignstack|
                                 //                 asmdialect,asmstr,conststr]
  CST_CODE_CE_GEP_WITH_INRANGE_INDEX = 24, //      [opty, flags, n x operands]
  CST_CODE_CE_UNOP = 25,                   // CE_UNOP:      [opcode, opval]
  CST_CODE_POISON = 26,                    // POISON
  CST_CODE_DSO_LOCAL_EQUIVALENT = 27,      // DSO_LOCAL_EQUIVALENT [gvty, gv]
  CST_CODE_INLINEASM_OLD3 = 28,    // INLINEASM:     [sideeffect|alignstack|
                                   //                 asmdialect|unwind,
                                   //                 asmstr,conststr]
  CST_CODE_NO_CFI_VALUE = 29, // NO_CFI [ fty, f ]
  CST_CODE_INLINEASM = 30,    // INLINEASM:     [fnty,
                              //                 sideeffect|alignstack|
                              //                 asmdialect|unwind,
                              //                 asmstr,conststr]
};

/// CastOpcodes - These are values used in the bitcode files to encode which
/// cast a CST_CODE_CE_CAST or a XXX refers to.  The values of these enums
/// have no fixed relation to the LLVM IR enum values.  Changing these will
/// break compatibility with old files.
enum CastOpcodes {
  CAST_TRUNC = 0,
  CAST_ZEXT = 1,
  CAST_SEXT = 2,
  CAST_FPTOUI = 3,
  CAST_FPTOSI = 4,
  CAST_UITOFP = 5,
  CAST_SITOFP = 6,
  CAST_FPTRUNC = 7,
  CAST_FPEXT = 8,
  CAST_PTRTOINT = 9,
  CAST_INTTOPTR = 10,
  CAST_BITCAST = 11,
  CAST_ADDRSPACECAST = 12
};

/// UnaryOpcodes - These are values used in the bitcode files to encode which
/// unop a CST_CODE_CE_UNOP or a XXX refers to.  The values of these enums
/// have no fixed relation to the LLVM IR enum values.  Changing these will
/// break compatibility with old files.
enum UnaryOpcodes {
  UNOP_FNEG = 0
};

/// BinaryOpcodes - These are values used in the bitcode files to encode which
/// binop a CST_CODE_CE_BINOP or a XXX refers to.  The values of these enums
/// have no fixed relation to the LLVM IR enum values.  Changing these will
/// break compatibility with old files.
enum BinaryOpcodes {
  BINOP_ADD = 0,
  BINOP_SUB = 1,
  BINOP_MUL = 2,
  BINOP_UDIV = 3,
  BINOP_SDIV = 4, // overloaded for FP
  BINOP_UREM = 5,
  BINOP_SREM = 6, // overloaded for FP
  BINOP_SHL = 7,
  BINOP_LSHR = 8,
  BINOP_ASHR = 9,
  BINOP_AND = 10,
  BINOP_OR = 11,
  BINOP_XOR = 12
};

/// These are values used in the bitcode files to encode AtomicRMW operations.
/// The values of these enums have no fixed relation to the LLVM IR enum
/// values.  Changing these will break compatibility with old files.
enum RMWOperations {
  RMW_XCHG = 0,
  RMW_ADD = 1,
  RMW_SUB = 2,
  RMW_AND = 3,
  RMW_NAND = 4,
  RMW_OR = 5,
  RMW_XOR = 6,
  RMW_MAX = 7,
  RMW_MIN = 8,
  RMW_UMAX = 9,
  RMW_UMIN = 10,
  RMW_FADD = 11,
  RMW_FSUB = 12,
  RMW_FMAX = 13,
  RMW_FMIN = 14,
  RMW_UINC_WRAP = 15,
  RMW_UDEC_WRAP = 16
};

/// OverflowingBinaryOperatorOptionalFlags - Flags for serializing
/// OverflowingBinaryOperator's SubclassOptionalData contents.
enum OverflowingBinaryOperatorOptionalFlags {
  OBO_NO_UNSIGNED_WRAP = 0,
  OBO_NO_SIGNED_WRAP = 1
};

/// FastMath Flags
/// This is a fixed layout derived from the bitcode emitted by LLVM 5.0
/// intended to decouple the in-memory representation from the serialization.
enum FastMathMap {
  UnsafeAlgebra   = (1 << 0), // Legacy
  NoNaNs          = (1 << 1),
  NoInfs          = (1 << 2),
  NoSignedZeros   = (1 << 3),
  AllowReciprocal = (1 << 4),
  AllowContract   = (1 << 5),
  ApproxFunc      = (1 << 6),
  AllowReassoc    = (1 << 7)
};

/// PossiblyExactOperatorOptionalFlags - Flags for serializing
/// PossiblyExactOperator's SubclassOptionalData contents.
enum PossiblyExactOperatorOptionalFlags { PEO_EXACT = 0 };

/// Encoded AtomicOrdering values.
enum AtomicOrderingCodes {
  ORDERING_NOTATOMIC = 0,
  ORDERING_UNORDERED = 1,
  ORDERING_MONOTONIC = 2,
  ORDERING_ACQUIRE = 3,
  ORDERING_RELEASE = 4,
  ORDERING_ACQREL = 5,
  ORDERING_SEQCST = 6
};

/// Markers and flags for call instruction.
enum CallMarkersFlags {
  CALL_TAIL = 0,
  CALL_CCONV = 1,
  CALL_MUSTTAIL = 14,
  CALL_EXPLICIT_TYPE = 15,
  CALL_NOTAIL = 16,
  CALL_FMF = 17 // Call has optional fast-math-flags.
};

// The function body block (FUNCTION_BLOCK_ID) describes function bodies.  It
// can contain a constant block (CONSTANTS_BLOCK_ID).
enum FunctionCodes {
  FUNC_CODE_DECLAREBLOCKS = 1, // DECLAREBLOCKS: [n]

  FUNC_CODE_INST_BINOP = 2,      // BINOP:      [opcode, ty, opval, opval]
  FUNC_CODE_INST_CAST = 3,       // CAST:       [opcode, ty, opty, opval]
  FUNC_CODE_INST_GEP_OLD = 4,    // GEP:        [n x operands]
  FUNC_CODE_INST_SELECT = 5,     // SELECT:     [ty, opval, opval, opval]
  FUNC_CODE_INST_EXTRACTELT = 6, // EXTRACTELT: [opty, opval, opval]
  FUNC_CODE_INST_INSERTELT = 7,  // INSERTELT:  [ty, opval, opval, opval]
  FUNC_CODE_INST_SHUFFLEVEC = 8, // SHUFFLEVEC: [ty, opval, opval, opval]
  FUNC_CODE_INST_CMP = 9,        // CMP:        [opty, opval, opval, pred]

  FUNC_CODE_INST_RET = 10,    // RET:        [opty,opval<both optional>]
  FUNC_CODE_INST_BR = 11,     // BR:         [bb#, bb#, cond] or [bb#]
  FUNC_CODE_INST_SWITCH = 12, // SWITCH:     [opty, op0, op1, ...]
  FUNC_CODE_INST_INVOKE = 13, // INVOKE:     [attr, fnty, op0,op1, ...]
  // 14 is unused.
  FUNC_CODE_INST_UNREACHABLE = 15, // UNREACHABLE

  FUNC_CODE_INST_PHI = 16, // PHI:        [ty, val0,bb0, ...]
  // 17 is unused.
  // 18 is unused.
  FUNC_CODE_INST_ALLOCA = 19, // ALLOCA:     [instty, opty, op, align]
  FUNC_CODE_INST_LOAD = 20,   // LOAD:       [opty, op, align, vol]
  // 21 is unused.
  // 22 is unused.
  FUNC_CODE_INST_VAARG = 23, // VAARG:      [valistty, valist, instty]
  // This store code encodes the pointer type, rather than the value type
  // this is so information only available in the pointer type (e.g. address
  // spaces) is retained.
  FUNC_CODE_INST_STORE_OLD = 24, // STORE:      [ptrty,ptr,val, align, vol]
  // 25 is unused.
  FUNC_CODE_INST_EXTRACTVAL = 26, // EXTRACTVAL: [n x operands]
  FUNC_CODE_INST_INSERTVAL = 27,  // INSERTVAL:  [n x operands]
  // fcmp/icmp returning Int1TY or vector of Int1Ty. Same as CMP, exists to
  // support legacy vicmp/vfcmp instructions.
  FUNC_CODE_INST_CMP2 = 28, // CMP2:       [opty, opval, opval, pred]
  // new select on i1 or [N x i1]
  FUNC_CODE_INST_VSELECT = 29, // VSELECT:    [ty,opval,opval,predty,pred]
  FUNC_CODE_INST_INBOUNDS_GEP_OLD = 30, // INBOUNDS_GEP: [n x operands]
  FUNC_CODE_INST_INDIRECTBR = 31,       // INDIRECTBR: [opty, op0, op1, ...]
  // 32 is unused.
  FUNC_CODE_DEBUG_LOC_AGAIN = 33, // DEBUG_LOC_AGAIN

  FUNC_CODE_INST_CALL = 34, // CALL:    [attr, cc, fnty, fnid, args...]

  FUNC_CODE_DEBUG_LOC = 35,          // DEBUG_LOC:  [Line,Col,ScopeVal, IAVal]
  FUNC_CODE_INST_FENCE = 36,         // FENCE: [ordering, synchscope]
  FUNC_CODE_INST_CMPXCHG_OLD = 37,   // CMPXCHG: [ptrty, ptr, cmp, val, vol,
                                     //            ordering, synchscope,
                                     //            failure_ordering?, weak?]
  FUNC_CODE_INST_ATOMICRMW_OLD = 38, // ATOMICRMW: [ptrty,ptr,val, operation,
                                     //             align, vol,
                                     //             ordering, synchscope]
  FUNC_CODE_INST_RESUME = 39,        // RESUME:     [opval]
  FUNC_CODE_INST_LANDINGPAD_OLD =
      40,                         // LANDINGPAD: [ty,val,val,num,id0,val0...]
  FUNC_CODE_INST_LOADATOMIC = 41, // LOAD: [opty, op, align, vol,
                                  //        ordering, synchscope]
  FUNC_CODE_INST_STOREATOMIC_OLD = 42, // STORE: [ptrty,ptr,val, align, vol
                                       //         ordering, synchscope]
  FUNC_CODE_INST_GEP = 43,             // GEP:  [inbounds, n x operands]
  FUNC_CODE_INST_STORE = 44,       // STORE: [ptrty,ptr,valty,val, align, vol]
  FUNC_CODE_INST_STOREATOMIC = 45, // STORE: [ptrty,ptr,val, align, vol
  FUNC_CODE_INST_CMPXCHG = 46,     // CMPXCHG: [ptrty, ptr, cmp, val, vol,
                                   //           success_ordering, synchscope,
                                   //           failure_ordering, weak]
  FUNC_CODE_INST_LANDINGPAD = 47,  // LANDINGPAD: [ty,val,num,id0,val0...]
  FUNC_CODE_INST_CLEANUPRET = 48,  // CLEANUPRET: [val] or [val,bb#]
  FUNC_CODE_INST_CATCHRET = 49,    // CATCHRET: [val,bb#]
  FUNC_CODE_INST_CATCHPAD = 50,    // CATCHPAD: [bb#,bb#,num,args...]
  FUNC_CODE_INST_CLEANUPPAD = 51,  // CLEANUPPAD: [num,args...]
  FUNC_CODE_INST_CATCHSWITCH =
      52, // CATCHSWITCH: [num,args...] or [num,args...,bb]
  // 53 is unused.
  // 54 is unused.
  FUNC_CODE_OPERAND_BUNDLE = 55,  // OPERAND_BUNDLE: [tag#, value...]
  FUNC_CODE_INST_UNOP = 56,       // UNOP:       [opcode, ty, opval]
  FUNC_CODE_INST_CALLBR = 57,     // CALLBR:     [attr, cc, norm, transfs,
                                  //              fnty, fnid, args...]
  FUNC_CODE_INST_FREEZE = 58,     // FREEZE: [opty, opval]
  FUNC_CODE_INST_ATOMICRMW = 59,  // ATOMICRMW: [ptrty, ptr, valty, val,
                                  //             operation, align, vol,
                                  //             ordering, synchscope]
  FUNC_CODE_BLOCKADDR_USERS = 60, // BLOCKADDR_USERS: [value...]
};

enum UseListCodes {
  USELIST_CODE_DEFAULT = 1, // DEFAULT: [index..., value-id]
  USELIST_CODE_BB = 2       // BB: [index..., bb-id]
};

enum AttributeKindCodes {
  // = 0 is unused
  ATTR_KIND_ALIGNMENT = 1,
  ATTR_KIND_ALWAYS_INLINE = 2,
  ATTR_KIND_BY_VAL = 3,
  ATTR_KIND_INLINE_HINT = 4,
  ATTR_KIND_IN_REG = 5,
  ATTR_KIND_MIN_SIZE = 6,
  ATTR_KIND_NAKED = 7,
  ATTR_KIND_NEST = 8,
  ATTR_KIND_NO_ALIAS = 9,
  ATTR_KIND_NO_BUILTIN = 10,
  ATTR_KIND_NO_CAPTURE = 11,
  ATTR_KIND_NO_DUPLICATE = 12,
  ATTR_KIND_NO_IMPLICIT_FLOAT = 13,
  ATTR_KIND_NO_INLINE = 14,
  ATTR_KIND_NON_LAZY_BIND = 15,
  ATTR_KIND_NO_RED_ZONE = 16,
  ATTR_KIND_NO_RETURN = 17,
  ATTR_KIND_NO_UNWIND = 18,
  ATTR_KIND_OPTIMIZE_FOR_SIZE = 19,
  ATTR_KIND_READ_NONE = 20,
  ATTR_KIND_READ_ONLY = 21,
  ATTR_KIND_RETURNED = 22,
  ATTR_KIND_RETURNS_TWICE = 23,
  ATTR_KIND_S_EXT = 24,
  ATTR_KIND_STACK_ALIGNMENT = 25,
  ATTR_KIND_STACK_PROTECT = 26,
  ATTR_KIND_STACK_PROTECT_REQ = 27,
  ATTR_KIND_STACK_PROTECT_STRONG = 28,
  ATTR_KIND_STRUCT_RET = 29,
  ATTR_KIND_SANITIZE_ADDRESS = 30,
  ATTR_KIND_SANITIZE_THREAD = 31,
  ATTR_KIND_SANITIZE_MEMORY = 32,
  ATTR_KIND_UW_TABLE = 33,
  ATTR_KIND_Z_EXT = 34,
  ATTR_KIND_BUILTIN = 35,
  ATTR_KIND_COLD = 36,
  ATTR_KIND_OPTIMIZE_NONE = 37,
  ATTR_KIND_IN_ALLOCA = 38,
  ATTR_KIND_NON_NULL = 39,
  ATTR_KIND_JUMP_TABLE = 40,
  ATTR_KIND_DEREFERENCEABLE = 41,
  ATTR_KIND_DEREFERENCEABLE_OR_NULL = 42,
  ATTR_KIND_CONVERGENT = 43,
  ATTR_KIND_SAFESTACK = 44,
  ATTR_KIND_ARGMEMONLY = 45,
  ATTR_KIND_SWIFT_SELF = 46,
  ATTR_KIND_SWIFT_ERROR = 47,
  ATTR_KIND_NO_RECURSE = 48,
  ATTR_KIND_INACCESSIBLEMEM_ONLY = 49,
  ATTR_KIND_INACCESSIBLEMEM_OR_ARGMEMONLY = 50,
  ATTR_KIND_ALLOC_SIZE = 51,
  ATTR_KIND_WRITEONLY = 52,
  ATTR_KIND_SPECULATABLE = 53,
  ATTR_KIND_STRICT_FP = 54,
  ATTR_KIND_SANITIZE_HWADDRESS = 55,
  ATTR_KIND_NOCF_CHECK = 56,
  ATTR_KIND_OPT_FOR_FUZZING = 57,
  ATTR_KIND_SHADOWCALLSTACK = 58,
  ATTR_KIND_SPECULATIVE_LOAD_HARDENING = 59,
  ATTR_KIND_IMMARG = 60,
  ATTR_KIND_WILLRETURN = 61,
  ATTR_KIND_NOFREE = 62,
  ATTR_KIND_NOSYNC = 63,
  ATTR_KIND_SANITIZE_MEMTAG = 64,
  ATTR_KIND_PREALLOCATED = 65,
  ATTR_KIND_NO_MERGE = 66,
  ATTR_KIND_NULL_POINTER_IS_VALID = 67,
  ATTR_KIND_NOUNDEF = 68,
  ATTR_KIND_BYREF = 69,
  ATTR_KIND_MUSTPROGRESS = 70,
  ATTR_KIND_NO_CALLBACK = 71,
  ATTR_KIND_HOT = 72,
  ATTR_KIND_NO_PROFILE = 73,
  ATTR_KIND_VSCALE_RANGE = 74,
  ATTR_KIND_SWIFT_ASYNC = 75,
  ATTR_KIND_NO_SANITIZE_COVERAGE = 76,
  ATTR_KIND_ELEMENTTYPE = 77,
  ATTR_KIND_DISABLE_SANITIZER_INSTRUMENTATION = 78,
  ATTR_KIND_NO_SANITIZE_BOUNDS = 79,
  ATTR_KIND_ALLOC_ALIGN = 80,
  ATTR_KIND_ALLOCATED_POINTER = 81,
  ATTR_KIND_ALLOC_KIND = 82,
  ATTR_KIND_PRESPLIT_COROUTINE = 83,
  ATTR_KIND_FNRETTHUNK_EXTERN = 84,
  ATTR_KIND_SKIP_PROFILE = 85,
  ATTR_KIND_MEMORY = 86,
  ATTR_KIND_NOFPCLASS = 87,
};

enum ComdatSelectionKindCodes {
  COMDAT_SELECTION_KIND_ANY = 1,
  COMDAT_SELECTION_KIND_EXACT_MATCH = 2,
  COMDAT_SELECTION_KIND_LARGEST = 3,
  COMDAT_SELECTION_KIND_NO_DUPLICATES = 4,
  COMDAT_SELECTION_KIND_SAME_SIZE = 5,
};

enum StrtabCodes {
  STRTAB_BLOB = 1,
};

enum SymtabCodes {
  SYMTAB_BLOB = 1,
};

} // End bitc namespace
} // End llvm namespace

#endif
PKjwFZ�My�Bitcode/BitcodeCommon.hnu�[���//===- BitcodeCommon.h - Common code for encode/decode   --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This header defines common code to be used by BitcodeWriter and
// BitcodeReader.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_BITCODE_BITCODECOMMON_H
#define LLVM_BITCODE_BITCODECOMMON_H

#include "llvm/ADT/Bitfields.h"

namespace llvm {

struct AllocaPackedValues {
  // We increased the number of bits needed to represent alignment to be more
  // than 5, but to preserve backward compatibility we store the upper bits
  // separately.
  using AlignLower = Bitfield::Element<unsigned, 0, 5>;
  using UsedWithInAlloca = Bitfield::Element<bool, AlignLower::NextBit, 1>;
  using ExplicitType = Bitfield::Element<bool, UsedWithInAlloca::NextBit, 1>;
  using SwiftError = Bitfield::Element<bool, ExplicitType::NextBit, 1>;
  using AlignUpper = Bitfield::Element<unsigned, SwiftError::NextBit, 3>;
};

} // namespace llvm

#endif // LLVM_BITCODE_BITCODECOMMON_H
PKjwFZ�{�h33Bitcode/BitcodeReader.hnu�[���//===- llvm/Bitcode/BitcodeReader.h - Bitcode reader ------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This header defines interfaces to read LLVM bitcode files/streams.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_BITCODE_BITCODEREADER_H
#define LLVM_BITCODE_BITCODEREADER_H

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Bitstream/BitCodeEnums.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/ErrorOr.h"
#include "llvm/Support/MemoryBufferRef.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <system_error>
#include <vector>
namespace llvm {

class LLVMContext;
class Module;
class MemoryBuffer;
class Metadata;
class ModuleSummaryIndex;
class Type;
class Value;

// Callback to override the data layout string of an imported bitcode module.
// The first argument is the target triple, the second argument the data layout
// string from the input, or a default string. It will be used if the callback
// returns std::nullopt.
typedef std::function<std::optional<std::string>(StringRef, StringRef)>
    DataLayoutCallbackFuncTy;

typedef std::function<Type *(unsigned)> GetTypeByIDTy;

typedef std::function<unsigned(unsigned, unsigned)> GetContainedTypeIDTy;

typedef std::function<void(Value *, unsigned, GetTypeByIDTy,
                           GetContainedTypeIDTy)>
    ValueTypeCallbackTy;

typedef std::function<void(Metadata **, unsigned, GetTypeByIDTy,
                           GetContainedTypeIDTy)>
    MDTypeCallbackTy;

// These functions are for converting Expected/Error values to
// ErrorOr/std::error_code for compatibility with legacy clients. FIXME:
// Remove these functions once no longer needed by the C and libLTO APIs.

std::error_code errorToErrorCodeAndEmitErrors(LLVMContext &Ctx, Error Err);

template <typename T>
ErrorOr<T> expectedToErrorOrAndEmitErrors(LLVMContext &Ctx, Expected<T> Val) {
  if (!Val)
    return errorToErrorCodeAndEmitErrors(Ctx, Val.takeError());
  return std::move(*Val);
}

struct ParserCallbacks {
  std::optional<DataLayoutCallbackFuncTy> DataLayout;
  /// The ValueType callback is called for every function definition or
  /// declaration and allows accessing the type information, also behind
  /// pointers. This can be useful, when the opaque pointer upgrade cleans all
  /// type information behind pointers.
  /// The second argument to ValueTypeCallback is the type ID of the
  /// function, the two passed functions can be used to extract type
  /// information.
  std::optional<ValueTypeCallbackTy> ValueType;
  /// The MDType callback is called for every value in metadata.
  std::optional<MDTypeCallbackTy> MDType;

  ParserCallbacks() = default;
  explicit ParserCallbacks(DataLayoutCallbackFuncTy DataLayout)
      : DataLayout(DataLayout) {}
};

  struct BitcodeFileContents;

  /// Basic information extracted from a bitcode module to be used for LTO.
  struct BitcodeLTOInfo {
    bool IsThinLTO;
    bool HasSummary;
    bool EnableSplitLTOUnit;
    bool UnifiedLTO;
  };

  /// Represents a module in a bitcode file.
  class BitcodeModule {
    // This covers the identification (if present) and module blocks.
    ArrayRef<uint8_t> Buffer;
    StringRef ModuleIdentifier;

    // The string table used to interpret this module.
    StringRef Strtab;

    // The bitstream location of the IDENTIFICATION_BLOCK.
    uint64_t IdentificationBit;

    // The bitstream location of this module's MODULE_BLOCK.
    uint64_t ModuleBit;

    BitcodeModule(ArrayRef<uint8_t> Buffer, StringRef ModuleIdentifier,
                  uint64_t IdentificationBit, uint64_t ModuleBit)
        : Buffer(Buffer), ModuleIdentifier(ModuleIdentifier),
          IdentificationBit(IdentificationBit), ModuleBit(ModuleBit) {}

    // Calls the ctor.
    friend Expected<BitcodeFileContents>
    getBitcodeFileContents(MemoryBufferRef Buffer);

    Expected<std::unique_ptr<Module>>
    getModuleImpl(LLVMContext &Context, bool MaterializeAll,
                  bool ShouldLazyLoadMetadata, bool IsImporting,
                  ParserCallbacks Callbacks = {});

  public:
    StringRef getBuffer() const {
      return StringRef((const char *)Buffer.begin(), Buffer.size());
    }

    StringRef getStrtab() const { return Strtab; }

    StringRef getModuleIdentifier() const { return ModuleIdentifier; }

    /// Read the bitcode module and prepare for lazy deserialization of function
    /// bodies. If ShouldLazyLoadMetadata is true, lazily load metadata as well.
    /// If IsImporting is true, this module is being parsed for ThinLTO
    /// importing into another module.
    Expected<std::unique_ptr<Module>>
    getLazyModule(LLVMContext &Context, bool ShouldLazyLoadMetadata,
                  bool IsImporting, ParserCallbacks Callbacks = {});

    /// Read the entire bitcode module and return it.
    Expected<std::unique_ptr<Module>>
    parseModule(LLVMContext &Context, ParserCallbacks Callbacks = {});

    /// Returns information about the module to be used for LTO: whether to
    /// compile with ThinLTO, and whether it has a summary.
    Expected<BitcodeLTOInfo> getLTOInfo();

    /// Parse the specified bitcode buffer, returning the module summary index.
    Expected<std::unique_ptr<ModuleSummaryIndex>> getSummary();

    /// Parse the specified bitcode buffer and merge its module summary index
    /// into CombinedIndex.
    Error
    readSummary(ModuleSummaryIndex &CombinedIndex, StringRef ModulePath,
                uint64_t ModuleId,
                std::function<bool(GlobalValue::GUID)> IsPrevailing = nullptr);
  };

  struct BitcodeFileContents {
    std::vector<BitcodeModule> Mods;
    StringRef Symtab, StrtabForSymtab;
  };

  /// Returns the contents of a bitcode file. This includes the raw contents of
  /// the symbol table embedded in the bitcode file. Clients which require a
  /// symbol table should prefer to use irsymtab::read instead of this function
  /// because it creates a reader for the irsymtab and handles upgrading bitcode
  /// files without a symbol table or with an old symbol table.
  Expected<BitcodeFileContents> getBitcodeFileContents(MemoryBufferRef Buffer);

  /// Returns a list of modules in the specified bitcode buffer.
  Expected<std::vector<BitcodeModule>>
  getBitcodeModuleList(MemoryBufferRef Buffer);

  /// Read the header of the specified bitcode buffer and prepare for lazy
  /// deserialization of function bodies. If ShouldLazyLoadMetadata is true,
  /// lazily load metadata as well. If IsImporting is true, this module is
  /// being parsed for ThinLTO importing into another module.
  Expected<std::unique_ptr<Module>>
  getLazyBitcodeModule(MemoryBufferRef Buffer, LLVMContext &Context,
                       bool ShouldLazyLoadMetadata = false,
                       bool IsImporting = false,
                       ParserCallbacks Callbacks = {});

  /// Like getLazyBitcodeModule, except that the module takes ownership of
  /// the memory buffer if successful. If successful, this moves Buffer. On
  /// error, this *does not* move Buffer. If IsImporting is true, this module is
  /// being parsed for ThinLTO importing into another module.
  Expected<std::unique_ptr<Module>> getOwningLazyBitcodeModule(
      std::unique_ptr<MemoryBuffer> &&Buffer, LLVMContext &Context,
      bool ShouldLazyLoadMetadata = false, bool IsImporting = false,
      ParserCallbacks Callbacks = {});

  /// Read the header of the specified bitcode buffer and extract just the
  /// triple information. If successful, this returns a string. On error, this
  /// returns "".
  Expected<std::string> getBitcodeTargetTriple(MemoryBufferRef Buffer);

  /// Return true if \p Buffer contains a bitcode file with ObjC code (category
  /// or class) in it.
  Expected<bool> isBitcodeContainingObjCCategory(MemoryBufferRef Buffer);

  /// Read the header of the specified bitcode buffer and extract just the
  /// producer string information. If successful, this returns a string. On
  /// error, this returns "".
  Expected<std::string> getBitcodeProducerString(MemoryBufferRef Buffer);

  /// Read the specified bitcode file, returning the module.
  Expected<std::unique_ptr<Module>>
  parseBitcodeFile(MemoryBufferRef Buffer, LLVMContext &Context,
                   ParserCallbacks Callbacks = {});

  /// Returns LTO information for the specified bitcode file.
  Expected<BitcodeLTOInfo> getBitcodeLTOInfo(MemoryBufferRef Buffer);

  /// Parse the specified bitcode buffer, returning the module summary index.
  Expected<std::unique_ptr<ModuleSummaryIndex>>
  getModuleSummaryIndex(MemoryBufferRef Buffer);

  /// Parse the specified bitcode buffer and merge the index into CombinedIndex.
  Error readModuleSummaryIndex(MemoryBufferRef Buffer,
                               ModuleSummaryIndex &CombinedIndex,
                               uint64_t ModuleId);

  /// Parse the module summary index out of an IR file and return the module
  /// summary index object if found, or an empty summary if not. If Path refers
  /// to an empty file and IgnoreEmptyThinLTOIndexFile is true, then
  /// this function will return nullptr.
  Expected<std::unique_ptr<ModuleSummaryIndex>>
  getModuleSummaryIndexForFile(StringRef Path,
                               bool IgnoreEmptyThinLTOIndexFile = false);

  /// isBitcodeWrapper - Return true if the given bytes are the magic bytes
  /// for an LLVM IR bitcode wrapper.
  inline bool isBitcodeWrapper(const unsigned char *BufPtr,
                               const unsigned char *BufEnd) {
    // See if you can find the hidden message in the magic bytes :-).
    // (Hint: it's a little-endian encoding.)
    return BufPtr != BufEnd &&
           BufPtr[0] == 0xDE &&
           BufPtr[1] == 0xC0 &&
           BufPtr[2] == 0x17 &&
           BufPtr[3] == 0x0B;
  }

  /// isRawBitcode - Return true if the given bytes are the magic bytes for
  /// raw LLVM IR bitcode (without a wrapper).
  inline bool isRawBitcode(const unsigned char *BufPtr,
                           const unsigned char *BufEnd) {
    // These bytes sort of have a hidden message, but it's not in
    // little-endian this time, and it's a little redundant.
    return BufPtr != BufEnd &&
           BufPtr[0] == 'B' &&
           BufPtr[1] == 'C' &&
           BufPtr[2] == 0xc0 &&
           BufPtr[3] == 0xde;
  }

  /// isBitcode - Return true if the given bytes are the magic bytes for
  /// LLVM IR bitcode, either with or without a wrapper.
  inline bool isBitcode(const unsigned char *BufPtr,
                        const unsigned char *BufEnd) {
    return isBitcodeWrapper(BufPtr, BufEnd) ||
           isRawBitcode(BufPtr, BufEnd);
  }

  /// SkipBitcodeWrapperHeader - Some systems wrap bc files with a special
  /// header for padding or other reasons.  The format of this header is:
  ///
  /// struct bc_header {
  ///   uint32_t Magic;         // 0x0B17C0DE
  ///   uint32_t Version;       // Version, currently always 0.
  ///   uint32_t BitcodeOffset; // Offset to traditional bitcode file.
  ///   uint32_t BitcodeSize;   // Size of traditional bitcode file.
  ///   ... potentially other gunk ...
  /// };
  ///
  /// This function is called when we find a file with a matching magic number.
  /// In this case, skip down to the subsection of the file that is actually a
  /// BC file.
  /// If 'VerifyBufferSize' is true, check that the buffer is large enough to
  /// contain the whole bitcode file.
  inline bool SkipBitcodeWrapperHeader(const unsigned char *&BufPtr,
                                       const unsigned char *&BufEnd,
                                       bool VerifyBufferSize) {
    // Must contain the offset and size field!
    if (unsigned(BufEnd - BufPtr) < BWH_SizeField + 4)
      return true;

    unsigned Offset = support::endian::read32le(&BufPtr[BWH_OffsetField]);
    unsigned Size = support::endian::read32le(&BufPtr[BWH_SizeField]);
    uint64_t BitcodeOffsetEnd = (uint64_t)Offset + (uint64_t)Size;

    // Verify that Offset+Size fits in the file.
    if (VerifyBufferSize && BitcodeOffsetEnd > uint64_t(BufEnd-BufPtr))
      return true;
    BufPtr += Offset;
    BufEnd = BufPtr+Size;
    return false;
  }

  APInt readWideAPInt(ArrayRef<uint64_t> Vals, unsigned TypeBits);

  const std::error_category &BitcodeErrorCategory();
  enum class BitcodeError { CorruptedBitcode = 1 };
  inline std::error_code make_error_code(BitcodeError E) {
    return std::error_code(static_cast<int>(E), BitcodeErrorCategory());
  }

} // end namespace llvm

namespace std {

template <> struct is_error_code_enum<llvm::BitcodeError> : std::true_type {};

} // end namespace std

#endif // LLVM_BITCODE_BITCODEREADER_H
PKjwFZK�P���Bitcode/BitcodeWriter.hnu�[���//===- llvm/Bitcode/BitcodeWriter.h - Bitcode writers -----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This header defines interfaces to write LLVM bitcode files/streams.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_BITCODE_BITCODEWRITER_H
#define LLVM_BITCODE_BITCODEWRITER_H

#include "llvm/ADT/StringRef.h"
#include "llvm/IR/ModuleSummaryIndex.h"
#include "llvm/MC/StringTableBuilder.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/MemoryBufferRef.h"
#include <map>
#include <memory>
#include <string>
#include <vector>

namespace llvm {

class BitstreamWriter;
class Module;
class raw_ostream;

  class BitcodeWriter {
    SmallVectorImpl<char> &Buffer;
    std::unique_ptr<BitstreamWriter> Stream;

    StringTableBuilder StrtabBuilder{StringTableBuilder::RAW};

    // Owns any strings created by the irsymtab writer until we create the
    // string table.
    BumpPtrAllocator Alloc;

    bool WroteStrtab = false, WroteSymtab = false;

    void writeBlob(unsigned Block, unsigned Record, StringRef Blob);

    std::vector<Module *> Mods;

  public:
    /// Create a BitcodeWriter that writes to Buffer.
    BitcodeWriter(SmallVectorImpl<char> &Buffer, raw_fd_stream *FS = nullptr);

    ~BitcodeWriter();

    /// Attempt to write a symbol table to the bitcode file. This must be called
    /// at most once after all modules have been written.
    ///
    /// A reader does not require a symbol table to interpret a bitcode file;
    /// the symbol table is needed only to improve link-time performance. So
    /// this function may decide not to write a symbol table. It may so decide
    /// if, for example, the target is unregistered or the IR is malformed.
    void writeSymtab();

    /// Write the bitcode file's string table. This must be called exactly once
    /// after all modules and the optional symbol table have been written.
    void writeStrtab();

    /// Copy the string table for another module into this bitcode file. This
    /// should be called after copying the module itself into the bitcode file.
    void copyStrtab(StringRef Strtab);

    /// Write the specified module to the buffer specified at construction time.
    ///
    /// If \c ShouldPreserveUseListOrder, encode the use-list order for each \a
    /// Value in \c M.  These will be reconstructed exactly when \a M is
    /// deserialized.
    ///
    /// If \c Index is supplied, the bitcode will contain the summary index
    /// (currently for use in ThinLTO optimization).
    ///
    /// \p GenerateHash enables hashing the Module and including the hash in the
    /// bitcode (currently for use in ThinLTO incremental build).
    ///
    /// If \p ModHash is non-null, when GenerateHash is true, the resulting
    /// hash is written into ModHash. When GenerateHash is false, that value
    /// is used as the hash instead of computing from the generated bitcode.
    /// Can be used to produce the same module hash for a minimized bitcode
    /// used just for the thin link as in the regular full bitcode that will
    /// be used in the backend.
    void writeModule(const Module &M, bool ShouldPreserveUseListOrder = false,
                     const ModuleSummaryIndex *Index = nullptr,
                     bool GenerateHash = false, ModuleHash *ModHash = nullptr);

    /// Write the specified thin link bitcode file (i.e., the minimized bitcode
    /// file) to the buffer specified at construction time. The thin link
    /// bitcode file is used for thin link, and it only contains the necessary
    /// information for thin link.
    ///
    /// ModHash is for use in ThinLTO incremental build, generated while the
    /// IR bitcode file writing.
    void writeThinLinkBitcode(const Module &M, const ModuleSummaryIndex &Index,
                              const ModuleHash &ModHash);

    void writeIndex(
        const ModuleSummaryIndex *Index,
        const std::map<std::string, GVSummaryMapTy> *ModuleToSummariesForIndex);
  };

  /// Write the specified module to the specified raw output stream.
  ///
  /// For streams where it matters, the given stream should be in "binary"
  /// mode.
  ///
  /// If \c ShouldPreserveUseListOrder, encode the use-list order for each \a
  /// Value in \c M.  These will be reconstructed exactly when \a M is
  /// deserialized.
  ///
  /// If \c Index is supplied, the bitcode will contain the summary index
  /// (currently for use in ThinLTO optimization).
  ///
  /// \p GenerateHash enables hashing the Module and including the hash in the
  /// bitcode (currently for use in ThinLTO incremental build).
  ///
  /// If \p ModHash is non-null, when GenerateHash is true, the resulting
  /// hash is written into ModHash. When GenerateHash is false, that value
  /// is used as the hash instead of computing from the generated bitcode.
  /// Can be used to produce the same module hash for a minimized bitcode
  /// used just for the thin link as in the regular full bitcode that will
  /// be used in the backend.
  void WriteBitcodeToFile(const Module &M, raw_ostream &Out,
                          bool ShouldPreserveUseListOrder = false,
                          const ModuleSummaryIndex *Index = nullptr,
                          bool GenerateHash = false,
                          ModuleHash *ModHash = nullptr);

  /// Write the specified thin link bitcode file (i.e., the minimized bitcode
  /// file) to the given raw output stream, where it will be written in a new
  /// bitcode block. The thin link bitcode file is used for thin link, and it
  /// only contains the necessary information for thin link.
  ///
  /// ModHash is for use in ThinLTO incremental build, generated while the IR
  /// bitcode file writing.
  void writeThinLinkBitcodeToFile(const Module &M, raw_ostream &Out,
                                  const ModuleSummaryIndex &Index,
                                  const ModuleHash &ModHash);

  /// Write the specified module summary index to the given raw output stream,
  /// where it will be written in a new bitcode block. This is used when
  /// writing the combined index file for ThinLTO. When writing a subset of the
  /// index for a distributed backend, provide the \p ModuleToSummariesForIndex
  /// map.
  void writeIndexToFile(const ModuleSummaryIndex &Index, raw_ostream &Out,
                        const std::map<std::string, GVSummaryMapTy>
                            *ModuleToSummariesForIndex = nullptr);

  /// If EmbedBitcode is set, save a copy of the llvm IR as data in the
  ///  __LLVM,__bitcode section (.llvmbc on non-MacOS).
  /// If available, pass the serialized module via the Buf parameter. If not,
  /// pass an empty (default-initialized) MemoryBufferRef, and the serialization
  /// will be handled by this API. The same behavior happens if the provided Buf
  /// is not bitcode (i.e. if it's invalid data or even textual LLVM assembly).
  /// If EmbedCmdline is set, the command line is also exported in
  /// the corresponding section (__LLVM,_cmdline / .llvmcmd) - even if CmdArgs
  /// were empty.
  void embedBitcodeInModule(Module &M, MemoryBufferRef Buf, bool EmbedBitcode,
                            bool EmbedCmdline,
                            const std::vector<uint8_t> &CmdArgs);

} // end namespace llvm

#endif // LLVM_BITCODE_BITCODEWRITER_H
PKhwFZeש{**PassAnalysisSupport.hnu�[���PKhwFZ��R���c*Debuginfod/HTTPServer.hnu�[���PKhwFZu��PNN~:Debuginfod/DIFetcher.hnu�[���PKhwFZHV*�
�
?Debuginfod/HTTPClient.hnu�[���PKhwFZ1gx�IDebuginfod/Debuginfod.hnu�[���PKhwFZ���ˮ�:_Debuginfod/BuildIDFetcher.hnu�[���PKhwFZdO����3dObject/RelocationResolver.hnu�[���PKhwFZ�_F(�9�9)jObject/StackMapParser.hnu�[���PKhwFZ��Y���)�Object/ModuleSymbolTable.hnu�[���PKhwFZ߿��$$M�Object/Minidump.hnu�[���PKhwFZEu���Object/TapiFile.hnu�[���PKhwFZ��,X�X���Object/ELFTypes.hnu�[���PKhwFZVlJ,33_Object/Archive.hnu�[���PKhwFZ;DŽ��$�$ԒObject/WindowsResource.hnu�[���PKhwFZ�D�n����Object/OffloadBinary.hnu�[���PKhwFZ:���%%��Object/COFFModuleDefinition.hnu�[���PKhwFZ�@��>>�Object/FaultMapParser.hnu�[���PKhwFZ< �ӊ���Object/SymbolicFile.hnu�[���PKhwFZJ���c
Object/Error.hnu�[���PKhwFZ��I���Object/BuildID.hnu�[���PKhwFZ��Y���_Object/Binary.hnu�[���PKhwFZ%�%��r8Object/DXContainer.hnu�[���PKhwFZ���AAoUObject/ArchiveWriter.hnu�[���PKhwFZ�}ľ��]Object/IRObjectFile.hnu�[���PKhwFZZ��2�T�T�iObject/ObjectFile.hnu�[���PKhwFZv�5u>.>.5�Object/IRSymtab.hnu�[���PKhwFZ?����5�5
��Object/Wasm.hnu�[���PKhwFZy\�w���#Object/Decompressor.hnu�[���PKhwFZ̦��

q+Object/MachOUniversalWriter.hnu�[���PKhwFZ�U���8Object/GOFFObjectFile.hnu�[���PKhwFZ(�
��0MObject/CVDebugRecord.hnu�[���PKhwFZ�g/ҷ"�"
IRObject/GOFF.hnu�[���PKhwFZK�:�:�
=uObject/COFF.hnu�[���PKhwFZ��ۇ�)Object/WindowsMachineFlag.hnu�[���PKhwFZ�iKo�}�}.Object/XCOFFObjectFile.hnu�[���PKhwFZ;R��S�S�D�Object/ELF.hnu�[���PKhwFZ�^�A�eObject/SymbolSize.hnu�[���PKhwFZ�\�ˡ���/iObject/ELFObjectFile.hnu�[���PKhwFZhc^��Object/MachOUniversal.hnu�[���PKhwFZ�`��ll2)Object/COFFImportFile.hnu�[���PKhwFZ:���
�
�7Object/TapiUniversal.hnu�[���PKhwFZ�N��͈͈�EObject/MachO.hnu�[���PKhwFZ1����)��WindowsResource/ResourceScriptTokenList.hnu�[���PKhwFZ�jo#�WindowsResource/ResourceProcessor.hnu�[���PKhwFZ�����%��WindowsResource/ResourceScriptToken.hnu�[���PKhwFZm��ww#�ProfileData/SymbolRemappingReader.hnu�[���PKhwFZO*���!��ProfileData/InstrProfCorrelator.hnu�[���PKhwFZ��h�5
5
�ProfileData/MIBEntryDef.incnu�[���PKhwFZ��UgjjHProfileData/InstrProfReader.hnu�[���PKhwFZ8�.C������ProfileData/InstrProfData.incnu�[���PKhwFZ�/V�b$b$�ProfileData/GCOV.hnu�[���PKhwFZ%�U���(1ProfileData/InstrProfWriter.hnu�[���PKhwFZ�(�u�R�R8OProfileData/MemProf.hnu�[���PKhwFZ^F�c!c!,q�ProfileData/Coverage/CoverageMappingReader.hnu�[���PKhwFZ�d��ҘҘ&0�ProfileData/Coverage/CoverageMapping.hnu�[���PKhwFZ��",X]	ProfileData/Coverage/CoverageMappingWriter.hnu�[���PKhwFZ.W�ZC
C
�e	ProfileData/ProfileCommon.hnu�[���PKhwFZ��%�3�3�Gs	ProfileData/SampleProfReader.hnu�[���PKhwFZ�%�e�B�B�	ProfileData/SampleProfWriter.hnu�[���PKhwFZ1U��WW�>
ProfileData/MemProfData.incnu�[���PKhwFZc�A�8�8��\
ProfileData/SampleProf.hnu�[���PKhwFZl�?~��8ProfileData/InstrProf.hnu�[���PKhwFZ�`�;��D�ProfileData/RawMemProfReader.hnu�[���PKhwFZ,��@��*GProfileData/ItaniumManglingCanonicalizer.hnu�[���PKhwFZ����6�6EPass.hnu�[���PKhwFZ�O��

uESupport/UnicodeCharRanges.hnu�[���PKhwFZ檞��RSupport/MSP430Attributes.hnu�[���PKhwFZ=3�z��9XSupport/Locale.hnu�[���PKhwFZ#�,�,XYSupport/Program.hnu�[���PKhwFZ2�ILL<�Support/Solaris/sys/regset.hnu�[���PKhwFZ�bv��ԉSupport/TypeName.hnu�[���PKhwFZg4U�

��Support/SwapByteOrder.hnu�[���PKhwFZ�h�oPP�Support/TargetParser.hnu�[���PKhwFZ���TATA��Support/SMTAPI.hnu�[���PKhwFZUk�kk*�Support/raw_ostream.hnu�[���PKhwFZw1�vǐǐ�O
Support/JSON.hnu�[���PKhwFZu��">.>.��
Support/MemoryBuffer.hnu�[���PKhwFZ,���!!Support/MemoryBufferRef.hnu�[���PKhwFZZ�`.SS}Support/RISCVTargetParser.defnu�[���PKhwFZT��\\Support/Atomic.hnu�[���PKhwFZd�S$S$� Support/ARMBuildAttributes.hnu�[���PKhwFZ͓NB//XESupport/LICENSE.TXTnu�[���PKhwFZ�������FSupport/circular_raw_ostream.hnu�[���PKhwFZ�G��;�;�ZSupport/TrailingObjects.hnu�[���PKhwFZ[��66��Support/ErrorHandling.hnu�[���PKhwFZ2Ƕ��A�A*�Support/LowLevelTypeImpl.hnu�[���PKhwFZ@����%�Support/Valgrind.hnu�[���PKhwFZO+��	�	�Support/SHA256.hnu�[���PKhwFZ�Œq�.�.GSupport/Alignment.hnu�[���PKhwFZ*6N}�%�%3Support/CrashRecoveryContext.hnu�[���PKhwFZ`9���RYSupport/CheckedArithmetic.hnu�[���PKhwFZا�Y5Y5\iSupport/ConvertUTF.hnu�[���PKhwFZ�vd�����Support/Error.hnu�[���PKhwFZ_�)-AyAy=SSupport/Casting.hnu�[���PKhwFZ�_ff��Support/ARMTargetParserCommon.hnu�[���PKhwFZ��ȯt�Support/Discriminator.hnu�[���PKhwFZt�E�E��Support/KnownBits.hnu�[���PKhwFZ=`WV#9#9�&Support/Endian.hnu�[���PKhwFZ��n͘�&,`Support/ItaniumManglingCanonicalizer.hnu�[���PKhwFZ�|"vzzmSupport/ReverseIteration.hnu�[���PKhwFZ�T,�
�
�nSupport/Recycler.hnu�[���PKhwFZf���}Support/SaveAndRestore.hnu�[���PKhwFZ��t)҂Support/SystemUtils.hnu�[���PKhwFZ�f���'�Support/BinaryStreamWriter.hnu�[���PKhwFZQ�"�lvlv&�Support/X86DisassemblerDecoderCommon.hnu�[���PKhwFZ?���  �Support/BalancedPartitioning.hnu�[���PKhwFZ����ffA=Support/LoongArchTargetParser.hnu�[���PKhwFZ5-!�J�J�?Support/AArch64TargetParser.defnu�[���PKhwFZԲ�Q��B�Support/BLAKE3.hnu�[���PKhwFZ�s�>>/�Support/FormatVariadicDetails.hnu�[���PKhwFZg}�]]��Support/ARMEHABI.hnu�[���PKhwFZ����'�'[�Support/Parallel.hnu�[���PKhwFZ}ƄZZI�Support/ARMTargetParser.hnu�[���PKhwFZv�)�����Support/Errno.hnu�[���PKhwFZe��++��Support/Memory.hnu�[���PKhwFZ�\3�}
}
GSupport/MSVCErrorWorkarounds.hnu�[���PKhwFZ�'��
�

Support/MD5.hnu�[���PKhwFZ]�T2m=m=!(Support/TypeSize.hnu�[���PKhwFZ�֊��r�r�eSupport/GenericLoopInfoImpl.hnu�[���PKhwFZ���	�	��Support/RecyclingAllocator.hnu�[���PKhwFZ����nn��Support/DataTypes.hnu�[���PKhwFZ{.��!,!,�Support/CommandLine.hnu�[���PKhwFZ�#{K�Support/LEB128.hnu�[���PKhwFZo�:kk7+Support/GenericLoopInfo.hnu�[���PKhwFZ�*^G�U�U��Support/ARMTargetParser.defnu�[���PKhwFZfRs(	(	��Support/RandomNumberGenerator.hnu�[���PKhwFZ��7�  �Support/Compression.hnu�[���PKhwFZ�àdlSupport/ArrayRecycler.hnu�[���PKhwFZ�G��b+b+�Support/BinaryStreamReader.hnu�[���PKhwFZ��	�~ESupport/AutoConvert.hnu�[���PKhwFZFKq=���JSupport/DynamicLibrary.hnu�[���PKhwFZ]}���eSupport/Signposts.hnu�[���PKhwFZ��D��kSupport/PGOOptions.hnu�[���PKhwFZ8��&&CrSupport/Format.hnu�[���PKhwFZ�}����Support/ManagedStatic.hnu�[���PKhwFZs������Support/Signals.hnu�[���PKhwFZ3D����Support/RISCVAttributes.hnu�[���PKhwFZ�ff��Support/TrigramIndex.hnu�[���PKhwFZ9�@������Support/MachineValueType.hnu�[���PKhwFZ[�u�PP��Support/ThreadLocal.hnu�[���PKhwFZ���L�LB�Support/Compiler.hnu�[���PKhwFZ��X��~�Support/type_traits.hnu�[���PKhwFZ(�1P��NSupport/MipsABIFlags.hnu�[���PKhwFZ䮦��W�WBSupport/OnDiskHashTable.hnu�[���PKhwFZO�][ZZQlSupport/X86TargetParser.hnu�[���PKhwFZ<��€�
�nSupport/CRC.hnu�[���PKhwFZ�f����uSupport/TaskQueue.hnu�[���PKhwFZb�m�

�Support/ARMAttributeParser.hnu�[���PKhwFZ
vU		��Support/CSKYAttributes.hnu�[���PKhwFZtO6�&#&#�Support/ModRef.hnu�[���PKhwFZ� :��w�Support/RISCVAttributeParser.hnu�[���PKhwFZ�,p���f�Support/LockFileManager.hnu�[���PKhwFZ�kzz3�Support/Regex.hnu�[���PKhwFZ�W�>ZZ��Support/VersionTuple.hnu�[���PKhwFZ�_`���Support/Registry.hnu�[���PKhwFZ�e��bb\Support/AArch64TargetParser.hnu�[���PKhwFZTX ���Support/SpecialCaseList.hnu�[���PKhwFZ�+�$$N1Support/raw_os_ostream.hnu�[���PKhwFZ|��-�$�$�6Support/Timer.hnu�[���PKhwFZŗ��11�[Support/ELFAttributes.hnu�[���PKhwFZ;�L��k�k`Support/CSKYTargetParser.defnu�[���PKhwFZ�� ^^�Support/X86TargetParser.defnu�[���PKhwFZ��DN�z�z��Support/ScaledNumber.hnu�[���PKhwFZzb�&66�ISupport/FormatCommon.hnu�[���PKhwFZ#��C_C_RSupport/MathExtras.hnu�[���PKhwFZ�+�UU��Support/AllocatorBase.hnu�[���PKhwFZ��
֢֢:�Support/VirtualFileSystem.hnu�[���PKhwFZG}��""[gSupport/Printable.hnu�[���PKhwFZ[ff�mSupport/thread.hnu�[���PKhwFZ�G5?::f�Support/raw_sha1_ostream.hnu�[���PKhwFZ�V�]]�Support/PluginLoader.hnu�[���PKhwFZ�g��
��Support/COM.hnu�[���PKhwFZ��o�!�!��Support/InstructionCost.hnu�[���PKhwFZ�ܴ�������Support/FileSystem.hnu�[���PKhwFZd"�e��ڢSupport/ExitCodes.hnu�[���PKhwFZE<XX ާSupport/DivisionByConstantInfo.hnu�[���PKhwFZJ�	�	��Support/ELFAttributeParser.hnu�[���PKhwFZ�Nk���f�Support/X86FoldTablesUtils.hnu�[���PKhwFZ���� 5 5:�Support/GraphWriter.hnu�[���PKhwFZ��RS����Support/FormattedStream.hnu�[���PKhwFZ��ެII�Support/xxhash.hnu�[���PKhwFZ�AG ??Support/Unicode.hnu�[���PKhwFZ]+r�NN�#Support/Mutex.hnu�[���PKhwFZ-��n��,,Support/OptimizedStructLayout.hnu�[���PKhwFZ��W,,DSupport/VCSRevision.hnu�[���PKhwFZ�&&#�DSupport/PerThreadBumpPtrAllocator.hnu�[���PKhwFZ��X����$�SSupport/GenericDomTreeConstruction.hnu�[���PKhwFZ@�|�))DJSupport/BinaryStreamError.hnu�[���PKhwFZ3s�����OSupport/MemAlloc.hnu�[���PKhwFZ͉���\Support/CodeGenCoverage.hnu�[���PKhwFZۂG��1�1�aSupport/BinaryStreamArray.hnu�[���PKhwFZ�2�C�C�Support/Allocator.hnu�[���PKhwFZ|������Support/LLVMDriver.hnu�[���PKhwFZP���NN��Support/SMLoc.hnu�[���PKhwFZ~��zzb�Support/CachePruning.hnu�[���PKhwFZ��"�Support/YAMLTraits.hnu�[���PKhwFZ
l�.mm*r Support/GenericIteratedDominanceFrontier.hnu�[���PKhwFZޑZ�y�y9. Support/DataExtractor.hnu�[���PKhwFZVh����!&� Support/SmallVectorMemoryBuffer.hnu�[���PKhwFZ�|�?#� Support/WithColor.hnu�[���PKhwFZW�a'a'r� Support/FormatVariadic.hnu�[���PKhwFZ���>��� Support/Capacity.hnu�[���PKhwFZ��I��� )� Support/Windows/WindowsSupport.hnu�[���PKhwFZ�.�WW!Support/InitLLVM.hnu�[���PKhwFZė[�NJNJ�!Support/AMDGPUMetadata.hnu�[���PKhwFZ���/��C`!Support/SuffixTreeNode.hnu�[���PKhwFZ'��q$<$<cz!Support/HashBuilder.hnu�[���PKhwFZ PECVV̶!Support/Duration.hnu�[���PKhwFZ`����d�!Support/Base64.hnu�[���PKhwFZ��0�&�&��!Support/Automaton.hnu�[���PKhwFZ��̭��j�!Support/GenericDomTree.hnu�[���PKhwFZ83�F���o"Support/Host.hnu�[���PKhwFZ��L�r"Support/DXILOperationCommon.hnu�[���PKhwFZ
��� z"Support/PrettyStackTrace.hnu�[���PKhwFZ:��h�/�/6�"Support/SourceMgr.hnu�[���PKhwFZLO�jH�"Support/LineIterator.hnu�[���PKhwFZ��--��"Support/Extension.defnu�[���PKhwFZ���Cyy�"Support/CFGUpdate.hnu�[���PKhwFZ�K�[[��"Support/BinaryItemStream.hnu�[���PKhwFZ[
�  d�"Support/AtomicOrdering.hnu�[���PKhwFZ�$)-���#Support/PointerLikeTypeTraits.hnu�[���PKhwFZ�Ĝ7���#Support/ExtensibleRTTI.hnu�[���PKhwFZ@ñi22

*#Support/DJB.hnu�[���PKhwFZw���|.#Support/DebugCounter.hnu�[���PKhwFZ�h�rr�J#Support/CFGDiff.hnu�[���PKhwFZъn:66id#Support/Debug.hnu�[���PKhwFZ��R^�u#Support/CodeGen.hnu�[���PKhwFZ}.��i$i$6�#Support/BinaryByteStream.hnu�[���PKhwFZ��H'���#Support/FileCollector.hnu�[���PKhwFZ�H��77�#Support/FileUtilities.hnu�[���PKhwFZ3�f���g�#Support/ErrorOr.hnu�[���PKhwFZ�d2�	�	_�#Support/ToolOutputFile.hnu�[���PKhwFZ��;ZZx�#Support/CSKYAttributeParser.hnu�[���PKhwFZ� �y

�#Support/StringSaver.hnu�[���PKhwFZk�rrq$Support/DOTGraphTraits.hnu�[���PKhwFZâ���+$Support/EndianStream.hnu�[���PKhwFZ�ӜF��`#$Support/BranchProbability.hnu�[���PKhwFZ}QF��j�j]C$Support/TargetOpcodes.defnu�[���PKhwFZ��wVVVV4�$Support/ARMWinEH.hnu�[���PKhwFZ�y'?G?G�%Support/Path.hnu�[���PKhwFZ��ggIL%Support/SymbolRemappingReader.hnu�[���PKhwFZ֊Š33�]%Support/Chrono.hnu�[���PKhwFZ��穾�ru%Support/RWMutex.hnu�[���PKhwFZ���66q�%Support/Errc.hnu�[���PKhwFZ̰h�%Support/WindowsError.hnu�[���PKhwFZ�[H
%
%H�%Support/ThreadPool.hnu�[���PKhwFZ*��		��%Support/FileSystem/UniqueID.hnu�[���PKhwFZ��L�\\e�%Support/CSKYTargetParser.hnu�[���PKhwFZ�V�'55�%Support/RISCVISAInfo.hnu�[���PKhwFZ�|J*�
�
��%Support/Caching.hnu�[���PKhwFZ�8�!����%Support/TimeProfiler.hnu�[���PKhwFZN��&Support/BuryPointer.hnu�[���PKhwFZ@�5����
&Support/NativeFormatting.hnu�[���PKhwFZi��>{
{
�&Support/FileOutputBuffer.hnu�[���PKhwFZ�1��X(X(�&Support/BinaryStreamRef.hnu�[���PKhwFZ�|�{�<�<=H&Support/FormatProviders.hnu�[���PKhwFZ�D��ff
$�&Support/BCD.hnu�[���PKhwFZb�
K��nj&Support/Win64EH.hnu�[���PKhwFZ������&Support/TargetSelect.hnu�[���PKhwFZ���ll�&Support/SuffixTree.hnu�[���PKhwFZ��ԏ���&Support/BlockFrequency.hnu�[���PKhwFZ��gЫ���&Support/TarWriter.hnu�[���PKhwFZ���ɑ&�& ��&Support/AMDHSAKernelDescriptor.hnu�[���PKhwFZ��u��B�Bg'Support/YAMLParser.hnu�[���PKhwFZ�‹��sY'Support/BinaryStream.hnu�[���PKhwFZ�.�� ( (�h'Support/Threading.hnu�[���PKhwFZ{O(�nn�'Support/CBindingWrapping.hnu�[���PKhwFZiX���ј'Support/Watchdog.hnu�[���PKhwFZ�K�qq��'Support/GlobPattern.hnu�[���PKhwFZ�k%�^�'Support/MSP430AttributeParser.hnu�[���PKhwFZK����%�%��'Support/Process.hnu�[���PKhwFZ1!��J	J	��'Support/SHA1.hnu�[���PKhwFZ��I��
�
B�'Support/FormatAdapters.hnu�[���PKhwFZ����lflf�'Support/ScopedPrinter.hnu�[���PKhwFZ9;�����N(Support/ConvertEBCDIC.hnu�[���PKhwFZe	����R(Support/AlignOf.hnu�[���PKhwFZP1�uu�W(FileCheck/FileCheck.hnu�[���PKhwFZ(�>�(]w(ToolDrivers/llvm-dlltool/DlltoolDriver.hnu�[���PKhwFZ���� �z(ToolDrivers/llvm-lib/LibDriver.hnu�[���PKhwFZ�A(bb7~(IRReader/IRReader.hnu�[���PKhwFZF�6�!!܉(TextAPI/TextAPIReader.hnu�[���PKhwFZ1]�K�7�7D�(TextAPI/InterfaceFile.hnu�[���PKhwFZ"�����(TextAPI/Architecture.defnu�[���PKhwFZ��\]tt5�(TextAPI/Platform.hnu�[���PKhwFZ9�M��(TextAPI/Architecture.hnu�[���PKhwFZ�&�{{1�(TextAPI/ArchitectureSet.hnu�[���PKhwFZ�~
^00��(TextAPI/TextAPIWriter.hnu�[���PKhwFZ#�

l�(TextAPI/Target.hnu�[���PKhwFZ5?

�(TextAPI/PackedVersion.hnu�[���PKhwFZ~��
��)TextAPI/SymbolSet.hnu�[���PKhwFZ,�EgssR)TextAPI/Symbol.hnu�[���PKhwFZu*Y�oMoM+)WindowsDriver/MSVCSetupApi.hnu�[���PKhwFZ2a^����x)WindowsDriver/MSVCPaths.hnu�[���PKhwFZZl)+__��)Testing/Support/Error.hnu�[���PKhwFZU@�RR I�)Testing/Support/SupportHelpers.hnu�[���PKhwFZqT~����)Testing/Support/Annotations.hnu�[���PKhwFZ��/�GG!��)Testing/Annotations/Annotations.hnu�[���PKhwFZ�Ʃ::g�)Testing/ADT/StringMapEntry.hnu�[���PKhwFZ!��$$�)Testing/ADT/StringMap.hnu�[���PKhwFZ-í��	�	%X�)DebugInfo/DWARF/DWARFDebugArangeSet.hnu�[���PKhwFZ��T�-�- q*DebugInfo/DWARF/DWARFListTable.hnu�[���PKhwFZU��U��_6*DebugInfo/DWARF/DWARFSection.hnu�[���PKhwFZd2A~~v9*DebugInfo/DWARF/DWARFDebugLoc.hnu�[���PKhwFZp�W(("CP*DebugInfo/DWARF/DWARFCompileUnit.hnu�[���PKhwFZ���&�
�
 �V*DebugInfo/DWARF/DWARFDebugAddr.hnu�[���PKhwFZ�{�A�A �d*DebugInfo/DWARF/DWARFDebugLine.hnu�[���PKhwFZ�	PG��!��*DebugInfo/DWARF/DWARFDebugMacro.hnu�[���PKhwFZ�E�ss��*DebugInfo/DWARF/DWARFTypeUnit.hnu�[���PKhwFZD�����)N�*DebugInfo/DWARF/DWARFLocationExpression.hnu�[���PKhwFZ�-`1��S�*DebugInfo/DWARF/DWARFGdbIndex.hnu�[���PKhwFZ�\�)�7�7 h�*DebugInfo/DWARF/DWARFFormValue.hnu�[���PKhwFZi��nU
U
$]	+DebugInfo/DWARF/DWARFDebugPubTable.hnu�[���PKhwFZ�F�d
d
$+DebugInfo/DWARF/DWARFDebugRnglists.hnu�[���PKhwFZx��"�	�	#�+DebugInfo/DWARF/DWARFDebugAranges.hnu�[���PKhwFZm��..#�(+DebugInfo/DWARF/DWARFAddressRange.hnu�[���PKhwFZ��gm0G0G.5+DebugInfo/DWARF/DWARFDie.hnu�[���PKhwFZ����r�r!�|+DebugInfo/DWARF/DWARFDebugFrame.hnu�[���PKhwFZ��3f$	$	"��+DebugInfo/DWARF/DWARFTypePrinter.hnu�[���PKhwFZ��d	d	%2�+DebugInfo/DWARF/DWARFDebugInfoEntry.hnu�[���PKhwFZbw����.�,DebugInfo/DWARF/DWARFAbbreviationDeclaration.hnu�[���PKhwFZo��g�g'�",DebugInfo/DWARF/DWARFAcceleratorTable.hnu�[���PKhwFZ�J���$��,DebugInfo/DWARF/DWARFDataExtractor.hnu�[���PKhwFZ>�1I��!�,DebugInfo/DWARF/DWARFExpression.hnu�[���PKhwFZ��dd <�,DebugInfo/DWARF/DWARFUnitIndex.hnu�[���PKhwFZv��[�[��,DebugInfo/DWARF/DWARFUnit.hnu�[���PKhwFZ%5qRBCBC�*-DebugInfo/DWARF/DWARFContext.hnu�[���PKhwFZ�WY�� Zn-DebugInfo/DWARF/DWARFAttribute.hnu�[���PKhwFZg�^R^^�u-DebugInfo/DWARF/DWARFObject.hnu�[���PKhwFZ�~�zz%B�-DebugInfo/DWARF/DWARFDebugRangeList.hnu�[���PKhwFZ�s�	�	"�-DebugInfo/DWARF/DWARFDebugAbbrev.hnu�[���PKhwFZ4Q>����-DebugInfo/DWARF/DWARFRelocMap.hnu�[���PKhwFZ�y��8�8�-DebugInfo/DWARF/DWARFVerifier.hnu�[���PKhwFZ�K�:����-DebugInfo/MSF/MSFCommon.hnu�[���PKhwFZ��T�88�-DebugInfo/MSF/IMSFFile.hnu�[���PKhwFZad%��m�-DebugInfo/MSF/MSFBuilder.hnu�[���PKhwFZ5o����.DebugInfo/MSF/MSFError.hnu�[���PKhwFZ��[��!q.DebugInfo/MSF/MappedBlockStream.hnu�[���PKhwFZ�/F`{/{/a7.DebugInfo/GSYM/GsymReader.hnu�[���PKhwFZ.F(�'''g.DebugInfo/GSYM/Header.hnu�[���PKhwFZ2
8)4R4R�}.DebugInfo/GSYM/GsymCreator.hnu�[���PKhwFZP:{�
�
�.DebugInfo/GSYM/LookupResult.hnu�[���PKhwFZ��$$)�.DebugInfo/GSYM/LineTable.hnu�[���PKhwFZA�)��#�#x�.DebugInfo/GSYM/FunctionInfo.hnu�[���PKhwFZ�Ŕ�#/DebugInfo/GSYM/LineEntry.hnu�[���PKhwFZq�&�*/DebugInfo/GSYM/ObjectFileTransformer.hnu�[���PKhwFZ(���^^!V1/DebugInfo/GSYM/DwarfTransformer.hnu�[���PKhwFZO
��>/DebugInfo/GSYM/Range.hnu�[���PKhwFZv����
�
�P/DebugInfo/GSYM/ExtractRanges.hnu�[���PKhwFZ�g���#\/DebugInfo/GSYM/FileWriter.hnu�[���PKhwFZ�+�!!Rn/DebugInfo/GSYM/StringTable.hnu�[���PKhwFZ��b' ' �t/DebugInfo/GSYM/InlineInfo.hnu�[���PKhwFZ��d@221�/DebugInfo/GSYM/FileEntry.hnu�[���PKhwFZ���,HH��/DebugInfo/PDB/IPDBSourceFile.hnu�[���PKhwFZ��C%��C�/DebugInfo/PDB/PDBExtras.hnu�[���PKhwFZat�z,
,
A�/DebugInfo/PDB/PDBSymDumper.hnu�[���PKhwFZ;�2����/DebugInfo/PDB/UDTLayout.hnu�[���PKhwFZ҈(�ZZ(��/DebugInfo/PDB/ConcreteSymbolEnumerator.hnu�[���PKhwFZUu�� j�/DebugInfo/PDB/IPDBEnumChildren.hnu�[���PKhwFZ
�@�@^�/DebugInfo/PDB/PDBTypes.hnu�[���PKhwFZ��e�==%)0DebugInfo/PDB/PDBSymbolPublicSymbol.hnu�[���PKhwFZƯpՎ�'�"0DebugInfo/PDB/PDBSymbolUsingNamespace.hnu�[���PKhwFZԗ+����&0DebugInfo/PDB/PDBSymbolData.hnu�[���PKhwFZ���0��"p.0DebugInfo/PDB/IPDBInjectedSource.hnu�[���PKhwFZA%��_	_	�40DebugInfo/PDB/PDBContext.hnu�[���PKhwFZ4���5>0DebugInfo/PDB/PDB.hnu�[���PKhwFZ�PU���("B0DebugInfo/PDB/PDBSymbolTypeFunctionSig.hnu�[���PKhwFZ�[�|��fH0DebugInfo/PDB/IPDBLineNumber.hnu�[���PKhwFZ�Ⱦ�&&"BM0DebugInfo/PDB/PDBSymbolCompiland.hnu�[���PKhwFZ��yp�Q0DebugInfo/PDB/PDBSymbolCustom.hnu�[���PKhwFZ
s���$V0DebugInfo/PDB/IPDBDataStream.hnu�[���PKhwFZ �{��%*[0DebugInfo/PDB/PDBSymbolCompilandEnv.hnu�[���PKhwFZ��s�xx_0DebugInfo/PDB/GenericError.hnu�[���PKhwFZk	@qpp)�d0DebugInfo/PDB/Native/NativePublicSymbol.hnu�[���PKhwFZ��M��(�j0DebugInfo/PDB/Native/NativeTypeVTShape.hnu�[���PKhwFZ	���(�p0DebugInfo/PDB/Native/NativeEnumGlobals.hnu�[���PKhwFZ�-J
J
$�u0DebugInfo/PDB/Native/NativeTypeUDT.hnu�[���PKhwFZW����&��0DebugInfo/PDB/Native/NativeExeSymbol.hnu�[���PKhwFZJ,�<��"…0DebugInfo/PDB/Native/SymbolCache.hnu�[���PKhwFZ�CCQQ!
�0DebugInfo/PDB/Native/FormatUtil.hnu�[���PKhwFZ��e�� ��0DebugInfo/PDB/Native/DbiStream.hnu�[���PKhwFZ��
\\$��0DebugInfo/PDB/Native/GlobalsStream.hnu�[���PKhwFZ�̋(��0DebugInfo/PDB/Native/NativeEnumModules.hnu�[���PKhwFZx�V�55-�0DebugInfo/PDB/Native/NativeInlineSiteSymbol.hnu�[���PKhwFZ���		,��0DebugInfo/PDB/Native/NativeTypeFunctionSig.hnu�[���PKhwFZ�������0DebugInfo/PDB/Native/RawError.hnu�[���PKhwFZ��g�+C�0DebugInfo/PDB/Native/NativeFunctionSymbol.hnu�[���PKhwFZ�~.LL(�0DebugInfo/PDB/Native/NativeTypeTypedef.hnu�[���PKhwFZ^"}�"��0DebugInfo/PDB/Native/LinePrinter.hnu�[���PKhwFZUP�njj,01DebugInfo/PDB/Native/NativeEnumLineNumbers.hnu�[���PKhwFZ�Zj�ll0�1DebugInfo/PDB/Native/NativeEnumInjectedSources.hnu�[���PKhwFZ��!NN$�1DebugInfo/PDB/Native/PublicsStream.hnu�[���PKhwFZL��
�
 d1DebugInfo/PDB/Native/TpiStream.hnu�[���PKhwFZ��DD+m(1DebugInfo/PDB/Native/InjectedSourceStream.hnu�[���PKhwFZv^��$.1DebugInfo/PDB/Native/NativeSession.hnu�[���PKhwFZ�0^��/�/E1DebugInfo/PDB/Native/RawTypes.hnu�[���PKhwFZ�i��r'r'&u1DebugInfo/PDB/Native/NativeRawSymbol.hnu�[���PKhwFZ&g��

(Ӝ1DebugInfo/PDB/Native/InfoStreamBuilder.hnu�[���PKhwFZ��}###8�1DebugInfo/PDB/Native/RawConstants.hnu�[���PKhwFZ�X�.��$��1DebugInfo/PDB/Native/DbiModuleList.hnu�[���PKhwFZw}�ߓ�(�1DebugInfo/PDB/Native/NativeEnumSymbols.hnu�[���PKhwFZS�DJ��,��1DebugInfo/PDB/Native/NativeCompilandSymbol.hnu�[���PKhwFZ.q�T
T
%�1DebugInfo/PDB/Native/NativeTypeEnum.hnu�[���PKhwFZ�R�R��(��1DebugInfo/PDB/Native/NativeTypeBuiltin.hnu�[���PKhwFZ��߫�!��1DebugInfo/PDB/Native/EnumTables.hnu�[���PKhwFZ_]����'��1DebugInfo/PDB/Native/DbiStreamBuilder.hnu�[���PKhwFZ�p

'��1DebugInfo/PDB/Native/TpiStreamBuilder.hnu�[���PKhwFZ��=(&�1DebugInfo/PDB/Native/ModuleDebugStream.hnu�[���PKhwFZ�%��
�
%�2DebugInfo/PDB/Native/PDBFileBuilder.hnu�[���PKhwFZ���\mm*�2DebugInfo/PDB/Native/DbiModuleDescriptor.hnu�[���PKhwFZ^��D'�2DebugInfo/PDB/Native/NativeLineNumber.hnu�[���PKhwFZG����'%2DebugInfo/PDB/Native/GSIStreamBuilder.hnu�[���PKhwFZ��e�82DebugInfo/PDB/Native/Hash.hnu�[���PKhwFZ�8f�!w;2DebugInfo/PDB/Native/Formatters.hnu�[���PKhwFZ%�aa!�B2DebugInfo/PDB/Native/InfoStream.hnu�[���PKhwFZ[�)��&�J2DebugInfo/PDB/Native/NativeEnumTypes.hnu�[���PKhwFZl���\\(�P2DebugInfo/PDB/Native/NativeTypePointer.hnu�[���PKhwFZƇ��ff'bY2DebugInfo/PDB/Native/NativeSourceFile.hnu�[���PKhwFZ�G�t(t( _2DebugInfo/PDB/Native/HashTable.hnu�[���PKhwFZS�H��� �2DebugInfo/PDB/Native/InputFile.hnu�[���PKhwFZ� H1��1ԣ2DebugInfo/PDB/Native/DbiModuleDescriptorBuilder.hnu�[���PKhwFZ?O���%��2DebugInfo/PDB/Native/NamedStreamMap.hnu�[���PKhwFZ}�U�**,ۿ2DebugInfo/PDB/Native/PDBStringTableBuilder.hnu�[���PKhwFZ������!a�2DebugInfo/PDB/Native/TpiHashing.hnu�[���PKhwFZ��v[[{�2DebugInfo/PDB/Native/PDBFile.hnu�[���PKhwFZ=u	�-$�2DebugInfo/PDB/Native/NativeSymbolEnumerator.hnu�[���PKhwFZM�M��&��2DebugInfo/PDB/Native/NativeTypeArray.hnu�[���PKhwFZ�Xewjj-��2DebugInfo/PDB/Native/ISectionContribVisitor.hnu�[���PKhwFZgD�ww#S�2DebugInfo/PDB/Native/SymbolStream.hnu�[���PKhwFZ������%�2DebugInfo/PDB/Native/PDBStringTable.hnu�[���PKhwFZ�I��7�2DebugInfo/PDB/PDBSymbolFunc.hnu�[���PKhwFZ�LU�(x3DebugInfo/PDB/PDBSymbolTypeVTableShape.hnu�[���PKhwFZ[&�'AA$�3DebugInfo/PDB/PDBSymbolTypeTypedef.hnu�[���PKhwFZ7
@QQ$j3DebugInfo/PDB/PDBSymbolTypeManaged.hnu�[���PKhwFZr��܅�3DebugInfo/PDB/IPDBSession.hnu�[���PKhwFZ�x�����)3DebugInfo/PDB/PDBSymbolThunk.hnu�[���PKhwFZIY��'�03DebugInfo/PDB/DIA/DIAEnumDebugStreams.hnu�[���PKhwFZ�l-�!g53DebugInfo/PDB/DIA/DIALineNumber.hnu�[���PKhwFZ�s����*�:3DebugInfo/PDB/DIA/DIAEnumSectionContribs.hnu�[���PKhwFZ��T���!�?3DebugInfo/PDB/DIA/DIADataStream.hnu�[���PKhwFZXO�1dd&"D3DebugInfo/PDB/DIA/DIAEnumSourceFiles.hnu�[���PKhwFZ=
���!�H3DebugInfo/PDB/DIA/DIASourceFile.hnu�[���PKhwFZW��'��%�M3DebugInfo/PDB/DIA/DIAInjectedSource.hnu�[���PKhwFZ� <�vv*�R3DebugInfo/PDB/DIA/DIAEnumInjectedSources.hnu�[���PKhwFZ�2���W3DebugInfo/PDB/DIA/DIAUtils.hnu�[���PKhwFZ���((!�[3DebugInfo/PDB/DIA/DIAEnumTables.hnu�[���PKhwFZk�qD$m`3DebugInfo/PDB/DIA/DIAEnumFrameData.hnu�[���PKhwFZT�{k�&�& �d3DebugInfo/PDB/DIA/DIARawSymbol.hnu�[���PKhwFZa����3DebugInfo/PDB/DIA/DIAError.hnu�[���PKhwFZ6��99 ��3DebugInfo/PDB/DIA/DIAFrameData.hnu�[���PKhwFZ�R�WW"G�3DebugInfo/PDB/DIA/DIAEnumSymbols.hnu�[���PKhwFZ^.(�&�3DebugInfo/PDB/DIA/DIAEnumLineNumbers.hnu�[���PKhwFZ�5lU�3DebugInfo/PDB/DIA/DIASupport.hnu�[���PKhwFZ���ee��3DebugInfo/PDB/DIA/DIATable.hnu�[���PKhwFZ�.��dd%q�3DebugInfo/PDB/DIA/DIASectionContrib.hnu�[���PKhwFZ��1mm*�3DebugInfo/PDB/DIA/DIASession.hnu�[���PKhwFZ󣗰c,c,�3DebugInfo/PDB/IPDBRawSymbol.hnu�[���PKhwFZ+�L�88)��3DebugInfo/PDB/PDBSymbolCompilandDetails.hnu�[���PKhwFZ[�'$&�3DebugInfo/PDB/PDBSymbolTypeBuiltin.hnu�[���PKhwFZL���<<#��3DebugInfo/PDB/PDBSymbolTypeVTable.hnu�[���PKhwFZA�F
��!'�3DebugInfo/PDB/PDBSymbolTypeEnum.hnu�[���PKhwFZx*�#V4DebugInfo/PDB/PDBSymbolAnnotation.hnu�[���PKhwFZ,K�)��#�4DebugInfo/PDB/PDBSymbolTypeFriend.hnu�[���PKhwFZ��L		�4DebugInfo/PDB/IPDBFrameData.hnu�[���PKhwFZ�a	��%�4DebugInfo/PDB/PDBSymbolFuncDebugEnd.hnu�[���PKhwFZZ\%rr$�4DebugInfo/PDB/PDBSymbolTypePointer.hnu�[���PKhwFZ�����&�4DebugInfo/PDB/PDBSymbolTypeDimension.hnu�[���PKhwFZ�7|]��(�4DebugInfo/PDB/PDBSymbolTypeFunctionArg.hnu�[���PKhwFZ}�@ZZ �#4DebugInfo/PDB/PDBSymbolUnknown.hnu�[���PKhwFZ�jP~mm a'4DebugInfo/PDB/PDBSymbolTypeUDT.hnu�[���PKhwFZ���&.4DebugInfo/PDB/PDBSymbolTypeBaseClass.hnu�[���PKhwFZ:�,����54DebugInfo/PDB/IPDBTable.hnu�[���PKhwFZi��q��'94DebugInfo/PDB/PDBSymbolExe.hnu�[���PKhwFZ��4I::]>4DebugInfo/PDB/PDBSymbolBlock.hnu�[���PKhwFZ������B4DebugInfo/PDB/PDBSymbolLabel.hnu�[���PKhwFZ���Y��"�H4DebugInfo/PDB/PDBSymbolTypeArray.hnu�[���PKhwFZ��b����M4DebugInfo/PDB/PDBSymbol.hnu�[���PKhwFZ����"�k4DebugInfo/PDB/IPDBSectionContrib.hnu�[���PKhwFZY1����'=s4DebugInfo/PDB/PDBSymbolFuncDebugStart.hnu�[���PKhwFZ�m��uu#[y4DebugInfo/PDB/PDBSymbolTypeCustom.hnu�[���PKhwFZ������#}4DebugInfo/BTF/BTFContext.hnu�[���PKhwFZ��+##��4DebugInfo/BTF/BTF.hnu�[���PKhwFZ	R�jjR�4DebugInfo/BTF/BTF.defnu�[���PKhwFZ,�n8�4DebugInfo/BTF/BTFParser.hnu�[���PKhwFZ���r==,e�4DebugInfo/Symbolize/SymbolizableObjectFile.hnu�[���PKhwFZ]�^	��"��4DebugInfo/Symbolize/MarkupFilter.hnu�[���PKhwFZB�K�xx��4DebugInfo/Symbolize/Markup.hnu�[���PKhwFZ����x$x$��4DebugInfo/Symbolize/Symbolize.hnu�[���PKhwFZ�%"QMM(q5DebugInfo/Symbolize/SymbolizableModule.hnu�[���PKhwFZL��5DebugInfo/Symbolize/DIFetcher.hnu�[���PKhwFZoۿ���u!5DebugInfo/Symbolize/DIPrinter.hnu�[���PKhwFZ��]z;0;0&�45DebugInfo/LogicalView/Core/LVElement.hnu�[���PKhwFZ�p��w�w$Ce5DebugInfo/LogicalView/Core/LVScope.hnu�[���PKhwFZ�q�55)5)&I�5DebugInfo/LogicalView/Core/LVSupport.hnu�[���PKhwFZ��2kk'�6DebugInfo/LogicalView/Core/LVLocation.hnu�[���PKhwFZ�'�_DWDW&� 6DebugInfo/LogicalView/Core/LVOptions.hnu�[���PKhwFZq„t[[&0x6DebugInfo/LogicalView/Core/LVCompare.hnu�[���PKhwFZ�\��!!%�6DebugInfo/LogicalView/Core/LVSymbol.hnu�[���PKhwFZ*T�=EE#W�6DebugInfo/LogicalView/Core/LVSort.hnu�[���PKhwFZZ�j$�6DebugInfo/LogicalView/Core/LVRange.hnu�[���PKhwFZ�����+�+%V�6DebugInfo/LogicalView/Core/LVReader.hnu�[���PKhwFZ_g�yvv)��6DebugInfo/LogicalView/Core/LVStringPool.hnu�[���PKhwFZ���#i�6DebugInfo/LogicalView/Core/LVLine.hnu�[���PKhwFZj`�X�$�$#�7DebugInfo/LogicalView/Core/LVType.hnu�[���PKhwFZ�$��\-\-%�%7DebugInfo/LogicalView/Core/LVObject.hnu�[���PKhwFZw+%!W$W$0�S7DebugInfo/LogicalView/Readers/LVCodeViewReader.hnu�[���PKhwFZ�	"��.ax7DebugInfo/LogicalView/Readers/LVBinaryReader.hnu�[���PKhwFZ���HOHO1B�7DebugInfo/LogicalView/Readers/LVCodeViewVisitor.hnu�[���PKhwFZ;��+��7DebugInfo/LogicalView/Readers/LVELFReader.hnu�[���PKhwFZ���
�
'�7DebugInfo/LogicalView/LVReaderHandler.hnu�[���PKhwFZM��+�+8DebugInfo/DIContext.hnu�[���PKhwFZ%zZnFnF�78DebugInfo/CodeView/CodeView.hnu�[���PKhwFZ��GX
X
2�~8DebugInfo/CodeView/SymbolVisitorCallbackPipeline.hnu�[���PKhwFZ�ߑ�I	I	M�8DebugInfo/CodeView/EnumTables.hnu�[���PKhwFZ��*
*
$�8DebugInfo/CodeView/TypeDumpVisitor.hnu�[���PKhwFZ��K�tt-c�8DebugInfo/CodeView/DebugFrameDataSubsection.hnu�[���PKhwFZ�/<��04�8DebugInfo/CodeView/TypeVisitorCallbackPipeline.hnu�[���PKhwFZ��Q�}�}��8DebugInfo/CodeView/TypeRecord.hnu�[���PKhwFZ������69DebugInfo/CodeView/CVRecord.hnu�[���PKhwFZr�|�,
H9DebugInfo/CodeView/DebugCrossImpSubsection.hnu�[���PKhwFZ�xx+vS9DebugInfo/CodeView/DebugSubsectionVisitor.hnu�[���PKhwFZ�hU��'�'Id9DebugInfo/CodeView/TypeIndex.hnu�[���PKhwFZ���U�#�# x�9DebugInfo/CodeView/TypeHashing.hnu�[���PKhwFZ@^�Q��%��9DebugInfo/CodeView/TypeStreamMerger.hnu�[���PKhwFZ!@(��9DebugInfo/CodeView/SymbolRecordMapping.hnu�[���PKhwFZ��<)HH0$�9DebugInfo/CodeView/DebugInlineeLinesSubsection.hnu�[���PKhwFZ	Yv^		"��9DebugInfo/CodeView/CVTypeVisitor.hnu�[���PKhwFZ�P���)&�9DebugInfo/CodeView/SimpleTypeSerializer.hnu�[���PKhwFZ����MM-t�9DebugInfo/CodeView/DebugSymbolRVASubsection.hnu�[���PKhwFZ���I&I&&�9DebugInfo/CodeView/CodeViewSymbols.defnu�[���PKhwFZ���%%%�:DebugInfo/CodeView/CodeViewRecordIO.hnu�[���PKhwFZF�J�(((76:DebugInfo/CodeView/RecordSerialization.hnu�[���PKhwFZ�M�<�R:DebugInfo/CodeView/Line.hnu�[���PKhwFZ�AOV*a:DebugInfo/CodeView/SymbolVisitorDelegate.hnu�[���PKhwFZ�\	d��"�e:DebugInfo/CodeView/CodeViewError.hnu�[���PKhwFZ>�ȗ�
�
,�k:DebugInfo/CodeView/MergingTypeTableBuilder.hnu�[���PKhwFZ
��'��'�v:DebugInfo/CodeView/TypeIndexDiscovery.hnu�[���PKhwFZi���uu+�}:DebugInfo/CodeView/GlobalTypeTableBuilder.hnu�[���PKhwFZ�/�c��%k�:DebugInfo/CodeView/SymbolSerializer.hnu�[���PKhwFZ1Ƨ�$��:DebugInfo/CodeView/DebugSubsection.hnu�[���PKhwFZ����55&��:DebugInfo/CodeView/TypeRecordMapping.hnu�[���PKhwFZbFBB-9�:DebugInfo/CodeView/DebugChecksumsSubsection.hnu�[���PKhwFZ�P��oo&ض:DebugInfo/CodeView/TypeRecordHelpers.hnu�[���PKhwFZ̻��oo+��:DebugInfo/CodeView/DebugCrossExSubsection.hnu�[���PKhwFZ���](g�:DebugInfo/CodeView/TypeTableCollection.hnu�[���PKhwFZ?�R�jj#��:DebugInfo/CodeView/TypeCollection.hnu�[���PKhwFZ�����(��:DebugInfo/CodeView/StringsAndChecksums.hnu�[���PKhwFZ�>�WW$��:DebugInfo/CodeView/CodeViewTypes.defnu�[���PKhwFZ��
ff+D;DebugInfo/CodeView/DebugSymbolsSubsection.hnu�[���PKhwFZ,�
�tt&	;DebugInfo/CodeView/TypeSymbolEmitter.hnu�[���PKhwFZ�G�+n+n!�;DebugInfo/CodeView/SymbolRecord.hnu�[���PKhwFZ�jv�K{;DebugInfo/CodeView/RecordName.hnu�[���PKhwFZg���.�~;DebugInfo/CodeView/AppendingTypeTableBuilder.hnu�[���PKhwFZ#N�6��%�;DebugInfo/CodeView/TypeDeserializer.hnu�[���PKhwFZ_�22!�;DebugInfo/CodeView/SymbolDumper.hnu�[���PKhwFZ[n�����;DebugInfo/CodeView/Formatters.hnu�[���PKhwFZ�a�)��;DebugInfo/CodeView/DebugLinesSubsection.hnu�[���PKhwFZ��L�L(/�;DebugInfo/CodeView/CodeViewRegisters.defnu�[���PKhwFZ����ww.z<DebugInfo/CodeView/ContinuationRecordBuilder.hnu�[���PKhwFZ"���EE(O<DebugInfo/CodeView/SymbolRecordHelpers.hnu�[���PKhwFZ�Ĕ	``�<DebugInfo/CodeView/GUID.hnu�[���PKhwFZ�9�AA+�$<DebugInfo/CodeView/SymbolVisitorCallbacks.hnu�[���PKhwFZ�<Ϭ
�
)1,<DebugInfo/CodeView/TypeVisitorCallbacks.hnu�[���PKhwFZ�!���'67<DebugInfo/CodeView/SymbolDumpDelegate.hnu�[���PKhwFZ1�f�$F<<DebugInfo/CodeView/CVSymbolVisitor.hnu�[���PKhwFZ!�9TT-B<DebugInfo/CodeView/LazyRandomTypeCollection.hnu�[���PKhwFZR򰄕�+�T<DebugInfo/CodeView/DebugUnknownSubsection.hnu�[���PKhwFZG�)�
�
'�X<DebugInfo/CodeView/SymbolDeserializer.hnu�[���PKhwFZީJ��/�f<DebugInfo/CodeView/DebugStringTableSubsection.hnu�[���PKhwFZ*/�q``*s<DebugInfo/CodeView/DebugSubsectionRecord.hnu�[���PKhwFZ'�B__�<DebugInfo/CodeView/FunctionId.hnu�[���PKhwFZ>O�w��h�<BinaryFormat/Swift.hnu�[���PKhwFZr
C�<BinaryFormat/WasmTraits.hnu�[���PKhwFZ�\P�EE��<BinaryFormat/MsgPackReader.hnu�[���PKhwFZn��ee<�<BinaryFormat/ELF.hnu�[���PKhwFZ�Ƣ�:�:�=BinaryFormat/MsgPackDocument.hnu�[���PKhwFZpMنr'r'%�=BinaryFormat/DXContainer.hnu�[���PKhwFZ�e�]%	%	%�>BinaryFormat/DXContainerConstants.defnu�[���PKhwFZu>�N88[&>BinaryFormat/MsgPackWriter.hnu�[���PKhwFZܙ�S���6>BinaryFormat/MachO.defnu�[���PKhwFZ�d'/�
�
%�O>BinaryFormat/AMDGPUMetadataVerifier.hnu�[���PKhwFZ7���66�Z>BinaryFormat/Swift.defnu�[���PKhwFZ;��?�
�
sa>BinaryFormat/Magic.hnu�[���PKhwFZ��{+iico>BinaryFormat/COFF.hnu�[���PKhwFZ|� �]
]
��>BinaryFormat/MsgPack.hnu�[���PKhwFZB>�g"O"OO�>BinaryFormat/XCOFF.hnu�[���PKhwFZE	B�
�
�5?BinaryFormat/GOFF.hnu�[���PKhwFZ/!��77�C?BinaryFormat/DynamicTags.defnu�[���PKhwFZ�,�VJ2J2:{?BinaryFormat/Wasm.hnu�[���PKhwFZ+��i�iǭ?BinaryFormat/Dwarf.hnu�[���PKhwFZr�8{�@BinaryFormat/WasmRelocs.defnu�[���PKhwFZ�i�9#9#�@BinaryFormat/Minidump.hnu�[���PKhwFZ��zҔ���s@@BinaryFormat/MachO.hnu�[���PKhwFZ�s���K=ABinaryFormat/MsgPack.defnu�[���PKhwFZ�7�QQ"?JABinaryFormat/MinidumpConstants.defnu�[���PKhwFZΦL�II"�dABinaryFormat/ELFRelocs/Hexagon.defnu�[���PKhwFZ*
�0	0	!}uABinaryFormat/ELFRelocs/Xtensa.defnu�[���PKhwFZ����
�
�~ABinaryFormat/ELFRelocs/CSKY.defnu�[���PKhwFZ��w

!J�ABinaryFormat/ELFRelocs/MSP430.defnu�[���PKhwFZ��nG��$��ABinaryFormat/ELFRelocs/LoongArch.defnu�[���PKhwFZx0ŖK3K3"��ABinaryFormat/ELFRelocs/AArch64.defnu�[���PKhwFZ�8�ABinaryFormat/ELFRelocs/i386.defnu�[���PKhwFZTCc�� ��ABinaryFormat/ELFRelocs/RISCV.defnu�[���PKhwFZ�Kzf��ABinaryFormat/ELFRelocs/BPF.defnu�[���PKhwFZ��DD ��ABinaryFormat/ELFRelocs/Sparc.defnu�[���PKhwFZ/��QQp�ABinaryFormat/ELFRelocs/ARM.defnu�[���PKhwFZ�15~��BBinaryFormat/ELFRelocs/M68k.defnu�[���PKhwFZK
�1��GBBinaryFormat/ELFRelocs/ARC.defnu�[���PKhwFZ��1g��!%$BBinaryFormat/ELFRelocs/x86_64.defnu�[���PKhwFZ{�����Y*BBinaryFormat/ELFRelocs/Mips.defnu�[���PKhwFZ�!���3=BBinaryFormat/ELFRelocs/VE.defnu�[���PKhwFZ�)�&&VCBBinaryFormat/ELFRelocs/AVR.defnu�[���PKhwFZh6&QQ!�IBBinaryFormat/ELFRelocs/AMDGPU.defnu�[���PKhwFZ��_dd"lLBBinaryFormat/ELFRelocs/PowerPC.defnu�[���PKhwFZ��
�ii$"aBBinaryFormat/ELFRelocs/PowerPC64.defnu�[���PKhwFZ+�.� �~BBinaryFormat/ELFRelocs/Lanai.defnu�[���PKhwFZ:'n		"M�BBinaryFormat/ELFRelocs/SystemZ.defnu�[���PKhwFZ�D1�s�s���BBinaryFormat/Dwarf.defnu�[���PKhwFZ���rZPCPassRegistry.hnu�[���PKhwFZ�Y'
'
_CCodeGen/MachineSSAContext.hnu�[���PKhwFZ�8����iCCodeGen/AsmPrinterHandler.hnu�[���PKhwFZs��'�'�uCCodeGen/LexicalScopes.hnu�[���PKhwFZL0�zz%z%*؝CCodeGen/MachineOptimizationRemarkEmitter.hnu�[���PKhwFZ��I.\.\��CCodeGen/MachinePipeliner.hnu�[���PKhwFZ7�o���$ DCodeGen/ExpandReductions.hnu�[���PKhwFZ㕣��
�
D#DCodeGen/LiveStacks.hnu�[���PKhwFZ��x���J1DCodeGen/RegAllocCommon.hnu�[���PKhwFZB�i��w5DCodeGen/AntiDepBreaker.hnu�[���PKhwFZTߎ���EDCodeGen/MachineFunctionPass.hnu�[���PKhwFZjn�~ֲֲ�QDCodeGen/CodeGenPassBuilder.hnu�[���PKhwFZ��Η�Q�Q�ECodeGen/CallingConvLower.hnu�[���PKhwFZ4��oo�VECodeGen/MachineSizeOpts.hnu�[���PKhwFZNq_�>+>+b^ECodeGen/FunctionLoweringInfo.hnu�[���PKhwFZ.cc�ECodeGen/LatencyPriorityQueue.hnu�[���PKhwFZsJJ"����ECodeGen/TargetOpcodes.hnu�[���PKhwFZ�������ECodeGen/GCMetadata.hnu�[���PKhwFZ(�c7&&�ECodeGen/MachineSSAUpdater.hnu�[���PKhwFZw��5
5
R�ECodeGen/StackProtector.hnu�[���PKhwFZ���#��ECodeGen/ComplexDeinterleavingPass.hnu�[���PKhwFZ��5�<�<"�ECodeGen/ScheduleDAGInstrs.hnu�[���PKhwFZ�M,\��FCodeGen/LiveRegMatrix.hnu�[���PKhwFZ�J:ݒݒ9FCodeGen/BasicTTIImpl.hnu�[���PKhwFZ*A�

&�GCodeGen/AtomicExpandUtils.hnu�[���PKhwFZ������"��GCodeGen/MachineDominanceFrontier.hnu�[���PKhwFZ�
HJ��w�GCodeGen/WinEHFuncInfo.hnu�[���PKhwFZ�';����GCodeGen/ResourcePriorityQueue.hnu�[���PKhwFZlQڪjHCodeGen/ParallelCG.hnu�[���PKhwFZ�C��FF�HCodeGen/GenVT.incnu�[���PKhwFZHL���RHCodeGen/RDFLiveness.hnu�[���PKhwFZx����fHCodeGen/IntrinsicLowering.hnu�[���PKhwFZ&u׼�,�,�mHCodeGen/ReachingDefAnalysis.hnu�[���PKhwFZ^^�;��"��HCodeGen/NonRelocatableStringpool.hnu�[���PKhwFZ*6�ˤ���HCodeGen/PseudoSourceValue.hnu�[���PKhwFZ�}3�'�'��HCodeGen/MachineDominators.hnu�[���PKhwFZي��o�HCodeGen/ByteProvider.hnu�[���PKhwFZ�^������HCodeGen/EdgeBundles.hnu�[���PKhwFZ���>!>!��HCodeGen/MachineModuleInfo.hnu�[���PKhwFZN�VZZ
ICodeGen/CSEConfigBase.hnu�[���PKhwFZ�eA�d�d�#ICodeGen/MachineInstrBuilder.hnu�[���PKhwFZz�����ICodeGen/TailDuplicator.hnu�[���PKhwFZj�	����ICodeGen/MBFIWrapper.hnu�[���PKhwFZ�B�v�N�N#�ICodeGen/TargetFrameLowering.hnu�[���PKhwFZ��Le++X�ICodeGen/ScheduleDFS.hnu�[���PKhwFZ�+��B�B�JCodeGen/RegAllocPBQP.hnu�[���PKhwFZ@��ڝ&�&�OJCodeGen/SwitchLoweringUtils.hnu�[���PKhwFZ�mm �vJCodeGen/MachineCombinerPattern.hnu�[���PKhwFZ���R!R!P�JCodeGen/PBQP/Math.hnu�[���PKhwFZ+��A���JCodeGen/PBQP/CostAllocator.hnu�[���PKiwFZ�52�V�V�JCodeGen/PBQP/Graph.hnu�[���PKiwFZݯm���KCodeGen/PBQP/ReductionRules.hnu�[���PKiwFZCe����*KCodeGen/PBQP/Solution.hnu�[���PKiwFZ�[�u�u�1KCodeGen/ScheduleDAG.hnu�[���PKiwFZ�%���اKCodeGen/MIRPrinter.hnu�[���PKiwFZ`%���v�v��KCodeGen/MIRYamlMapping.hnu�[���PKiwFZ]�X���"&LCodeGen/ScheduleHazardRecognizer.hnu�[���PKiwFZ��%%:LCodeGen/VirtRegMap.hnu�[���PKiwFZr��{VLCodeGen/MachineLoopUtils.hnu�[���PKiwFZ,��8�� �\LCodeGen/SelectionDAGTargetInfo.hnu�[���PKiwFZ�:ff�|LCodeGen/LiveIntervalUnion.hnu�[���PKiwFZރ�&�5�5&p�LCodeGen/TargetLoweringObjectFileImpl.hnu�[���PKiwFZ�f�c&c&��LCodeGen/MachineOutliner.hnu�[���PKiwFZ��x+P+P`�LCodeGen/MachineValueType.hnu�[���PKiwFZ��Z���FMCodeGen/WasmEHFuncInfo.hnu�[���PKiwFZ��GG�RMCodeGen/MachineCycleAnalysis.hnu�[���PKiwFZJ(>���ZMCodeGen/Analysis.hnu�[���PKiwFZ:Q��uvMCodeGen/DIEValue.defnu�[���PKiwFZ��_�Z
Z
p|MCodeGen/MIRParser/MIRParser.hnu�[���PKiwFZ:�ó�!�!�MCodeGen/MIRParser/MIParser.hnu�[���PKiwFZ�Ѐ��� 
�MCodeGen/MachineModuleInfoImpls.hnu�[���PKiwFZ���cc!3�MCodeGen/SwiftErrorValueTracking.hnu�[���PKiwFZy�I�6�6��MCodeGen/MachineMemOperand.hnu�[���PKiwFZ���K;;'�NCodeGen/LazyMachineBlockFrequencyInfo.hnu�[���PKiwFZ}_�U���
NCodeGen/CommandFlags.hnu�[���PKiwFZK��+��e NCodeGen/CalcSpillWeights.hnu�[���PKiwFZ��`�P>P>�2NCodeGen/ValueTypes.tdnu�[���PKiwFZgn�I&I&qNCodeGen/MachinePassManager.hnu�[���PKiwFZ۹�u�E�E��NCodeGen/MachineTraceMetrics.hnu�[���PKiwFZ@�s@�b�b#��NCodeGen/GlobalISel/MIPatternMatch.hnu�[���PKiwFZݢ�EVEV%�@OCodeGen/GlobalISel/MachineIRBuilder.hnu�[���PKiwFZ�
�qeqe"��PCodeGen/GlobalISel/RegBankSelect.hnu�[���PKiwFZ�CH �)�))Q�PCodeGen/GlobalISel/GenericMachineInstrs.hnu�[���PKiwFZ$ْ?��!4'QCodeGen/GlobalISel/LoadStoreOpt.hnu�[���PKiwFZ}-�fvfv!XBQCodeGen/GlobalISel/IRTranslator.hnu�[���PKiwFZ%'�:�:�"�QCodeGen/GlobalISel/LegalizerInfo.hnu�[���PKiwFZ(��zi#i#��RCodeGen/GlobalISel/CSEInfo.hnu�[���PKiwFZyd���P�RCodeGen/GlobalISel/Localizer.hnu�[���PKiwFZ,�مa�a�#v�RCodeGen/GlobalISel/CombinerHelper.hnu�[���PKiwFZr�+
`
`*USCodeGen/GlobalISel/Utils.hnu�[���PKiwFZ
T6//(��SCodeGen/GlobalISel/InstructionSelector.hnu�[���PKiwFZ���9R9R(�SCodeGen/GlobalISel/LegacyLegalizerInfo.hnu�[���PKiwFZb1;t[	[	&�
TCodeGen/GlobalISel/InlineAsmLowering.hnu�[���PKiwFZ0�K��(JTCodeGen/GlobalISel/GISelChangeObserver.hnu�[���PKiwFZ((T�#�#�-u+TCodeGen/GlobalISel/GIMatchTableExecutorImpl.hnu�[���PKiwFZ|6cWxWx%��TCodeGen/GlobalISel/RegisterBankInfo.hnu�[���PKiwFZ�>��U�U)�jUCodeGen/GlobalISel/GIMatchTableExecutor.hnu�[���PKiwFZ�̃�[d[d!�UCodeGen/GlobalISel/CallLowering.hnu�[���PKiwFZ�v�o�
�
!}%VCodeGen/GlobalISel/CombinerInfo.hnu�[���PKiwFZ�΄
<<)�0VCodeGen/GlobalISel/LostDebugLocObserver.hnu�[���PKiwFZ�TAp�R�R$Q9VCodeGen/GlobalISel/LegalizerHelper.hnu�[���PKiwFZl�7-��!J�VCodeGen/GlobalISel/RegisterBank.hnu�[���PKiwFZ�
���"_�VCodeGen/GlobalISel/CSEMIRBuilder.hnu�[���PKiwFZȱE�4�4�,��VCodeGen/GlobalISel/InstructionSelectorImpl.hnu�[���PKiwFZ�$��P
P
(lWCodeGen/GlobalISel/Legalizer.hnu�[���PKiwFZ�!�.�.�1�vWCodeGen/GlobalISel/LegalizationArtifactCombiner.hnu�[���PKiwFZ�����UTXCodeGen/GlobalISel/Combiner.hnu�[���PKiwFZEXzZkk#6ZXCodeGen/GlobalISel/GISelKnownBits.hnu�[���PKiwFZ�p/
��"�lXCodeGen/GlobalISel/GISelWorkList.hnu�[���PKiwFZnN���&.|XCodeGen/GlobalISel/InstructionSelect.hnu�[���PKiwFZx�`"r�XCodeGen/MachineModuleSlotTracker.hnu�[���PKiwFZT��8!8!�XCodeGen/DFAPacketizer.hnu�[���PKiwFZ�e��"*"*a�XCodeGen/RDFRegisters.hnu�[���PKiwFZ ������XCodeGen/UnreachableBlockElim.hnu�[���PKiwFZ!���#��XCodeGen/MachineBlockFrequencyInfo.hnu�[���PKiwFZ�`�X����XCodeGen/LivePhysRegs.hnu�[���PKiwFZ�?�&�&��
YCodeGen/MachineRegisterInfo.hnu�[���PKiwFZ?��2�2�,�YCodeGen/LiveInterval.hnu�[���PKiwFZ�s���gZCodeGen/LoopTraversal.hnu�[���PKiwFZhi��syZCodeGen/ReplaceWithVeclib.hnu�[���PKiwFZ�!��	�	^ZCodeGen/GCMetadataPrinter.hnu�[���PKiwFZ���7����ZCodeGen/Spiller.hnu�[���PKiwFZӇ���)�)��ZCodeGen/MachineInstrBundle.hnu�[���PKiwFZa26�����øZCodeGen/TargetRegisterInfo.hnu�[���PKiwFZ';RUff	�[CodeGen/MachinePassRegistry.hnu�[���PKiwFZ`d������[CodeGen/FaultMaps.hnu�[���PKiwFZ�?����[CodeGen/StableHashing.hnu�[���PKiwFZ��o���[CodeGen/TileShapeInfo.hnu�[���PKiwFZk��7�7>�[CodeGen/SelectionDAGISel.hnu�[���PKiwFZ�t\\CodeGen/RegisterClassInfo.hnu�[���PKiwFZ1r��h \CodeGen/ExecutionDomainFix.hnu�[���PKiwFZ�r)���?\CodeGen/TypePromotion.hnu�[���PKiwFZ��HO	O	�C\CodeGen/RegAllocRegistry.hnu�[���PKiwFZ�-�Õ�$nM\CodeGen/AssignmentTrackingAnalysis.hnu�[���PKiwFZ[��~~W]\CodeGen/LiveRegUnits.hnu�[���PKiwFZ�5X���v\CodeGen/MachineFrameInfo.hnu�[���PKiwFZ�Z�?WWf]CodeGen/PBQPRAConstraint.hnu�[���PKiwFZ_E�i]]]CodeGen/Passes.hnu�[���PKiwFZA]P��$Xe]CodeGen/LinkAllAsmWriterComponents.hnu�[���PKiwFZ��3P���k]CodeGen/MachinePostDominators.hnu�[���PKiwFZ�&&$�w]CodeGen/DbgEntityHistoryCalculator.hnu�[���PKiwFZ/ 3���-�]CodeGen/ISDOpcodes.hnu�[���PKiwFZT�ӡ//c�^CodeGen/LowLevelTypeUtils.hnu�[���PKiwFZ:t�C�F�Fݛ^CodeGen/MachineInstr.hnu�[���PKiwFZ��h 
)
)��_CodeGen/LiveRangeEdit.hnu�[���PKiwFZ7pE�::`CodeGen/RegisterBank.hnu�[���PKiwFZ����`CodeGen/SchedulerRegistry.hnu�[���PKiwFZ1=3&)�)`CodeGen/BasicBlockSectionsProfileReader.hnu�[���PKiwFZ�
d	66$c;`CodeGen/ScoreboardHazardRecognizer.hnu�[���PKiwFZ#���?G?G�J`CodeGen/TargetPassConfig.hnu�[���PKiwFZ.��V  v�`CodeGen/MachineConstantPool.hnu�[���PKiwFZl�$``�`CodeGen/CFIFixup.hnu�[���PKiwFZ����4�4��`CodeGen/LiveVariables.hnu�[���PKiwFZ8�eu����`CodeGen/MachORelocation.hnu�[���PKiwFZ����*�*��`CodeGen/CodeGenCommonISel.hnu�[���PKiwFZxS��
�
�aCodeGen/CostTable.hnu�[���PKiwFZd����� aCodeGen/DetectDeadLanes.hnu�[���PKiwFZ0
c�#�#�1aCodeGen/VLIWMachineScheduler.hnu�[���PKiwFZ�7�9�+�+$�UaCodeGen/MachineInstrBundleIterator.hnu�[���PKiwFZ�'[A[A݁aCodeGen/LowLevelType.hnu�[���PKiwFZy>P�?�?~�aCodeGen/ModuloSchedule.hnu�[���PKiwFZq^$<D�D��bCodeGen/MachineOperand.hnu�[���PKiwFZ�܃�/-/-7�bCodeGen/MachinePassRegistry.defnu�[���PKiwFZ�H�O�3�3��bCodeGen/TargetSubtargetInfo.hnu�[���PKiwFZ�a:�R�R��cCodeGen/SelectionDAG.hnu�[���PKiwFZڨ*-��B�dCodeGen/MIRFormatter.hnu�[���PKiwFZ���T����
{�dCodeGen/DIE.hnu�[���PKiwFZ�W	��%ZSeCodeGen/SelectionDAGAddressAnalysis.hnu�[���PKiwFZOBu|�N�N�beCodeGen/ValueTypes.hnu�[���PKiwFZ	�YY��eCodeGen/DebugHandlerBase.hnu�[���PKiwFZ�l/#/#0�eCodeGen/TargetCallingConv.hnu�[���PKiwFZo���z�z��eCodeGen/RegisterBankInfo.hnu�[���PKiwFZO_�C���dfCodeGen/MachineLoopInfo.hnu�[���PKiwFZ��H��ЃfCodeGen/ScheduleDAGMutation.hnu�[���PKiwFZ���v���fCodeGen/MIRFSDiscriminator.hnu�[���PKiwFZ��5�5&�fCodeGen/StackMaps.hnu�[���PKiwFZ�0c��fCodeGen/MachineCFGPrinter.hnu�[���PKiwFZZ��C�T�T[�fCodeGen/RegisterPressure.hnu�[���PKiwFZ���R}�}�8(gCodeGen/MachineFunction.hnu�[���PKiwFZœXg�	�	&�hCodeGen/MachineBranchProbabilityInfo.hnu�[���PKiwFZ�[��,, hCodeGen/MachineJumpTableInfo.hnu�[���PKiwFZ��=����"hCodeGen/SelectionDAGNodes.hnu�[���PKiwFZ�#��oo#��iCodeGen/MachineUniformityAnalysis.hnu�[���PKiwFZ�~�q�q���iCodeGen/MachineScheduler.hnu�[���PKiwFZ���Vq�q�z�jCodeGen/RDFGraph.hnu�[���PKiwFZ��N&	&	-!kCodeGen/RegisterUsageInfo.hnu�[���PKiwFZ��Ԡ����*kCodeGen/MachineBasicBlock.hnu�[���PKiwFZQ���ww�lCodeGen/WasmAddressSpaces.hnu�[���PKiwFZ��.^^KlCodeGen/DAGCombine.hnu�[���PKiwFZ���ŅŅ�lCodeGen/TargetInstrInfo.hnu�[���PKiwFZJ����mCodeGen/Register.hnu�[���PKiwFZp��5
5
L�mCodeGen/MacroFusion.hnu�[���PKiwFZ5i���ƹmCodeGen/MachineRegionInfo.hnu�[���PKiwFZ�0Q�HH��mCodeGen/SDNodeProperties.tdnu�[���PKiwFZ;ȡ���!Y�mCodeGen/ExpandVectorPredication.hnu�[���PKiwFZ���99��mCodeGen/RuntimeLibcalls.hnu�[���PKiwFZ$��Tk`k`%�mCodeGen/SlotIndexes.hnu�[���PKiwFZ��
U
U�LnCodeGen/FastISel.hnu�[���PKiwFZ�~��		 !�nCodeGen/BasicBlockSectionUtils.hnu�[���PKiwFZ��<�99z�nCodeGen/MultiHazardRecognizer.hnu�[���PKiwFZo�`�		"�nCodeGen/LinkAllCodegenComponents.hnu�[���PKiwFZK�s%%"s�nCodeGen/PreISelIntrinsicLowering.hnu�[���PKiwFZ?��?�?��nCodeGen/TargetLowering.hnu�[���PKiwFZYrGggqUrCodeGen/IndirectThunks.hnu�[���PKiwFZ���n�� hrCodeGen/RegisterScavenging.hnu�[���PKiwFZ:����	�rCodeGen/MIRSampleProfile.hnu�[���PKiwFZ
U��N�N	�rCodeGen/LiveIntervals.hnu�[���PKiwFZ��i��"�rCodeGen/DwarfStringPoolEntry.hnu�[���PKiwFZio�Ś��rCodeGen/LiveIntervalCalc.hnu�[���PKiwFZNɷ?�?��rCodeGen/AsmPrinter.hnu�[���PKiwFZ���¯���sCodeGen/MachineStableHash.hnu�[���PKiwFZx��:""~�sCodeGen/TargetSchedule.hnu�[���PKiwFZ�����sCodeGen/HardwareLoops.hnu�[���PKiwFZ��o�5�5G�sCodeGen/AccelTable.hnu�[���PKiwFZw���-�-$�sCodeGen/LiveRangeCalc.hnu�[���PKiwFZp�����3tRemarks/BitstreamRemarkParser.hnu�[���PKiwFZE-�>>"~#tRemarks/BitstreamRemarkContainer.hnu�[���PKiwFZ��Z��3tRemarks/RemarkFormat.hnu�[���PKiwFZuP�33�7tRemarks/YAMLRemarkSerializer.hnu�[���PKiwFZ�ص�G
G
eGtRemarks/RemarkStringTable.hnu�[���PKiwFZ
�/ree�QtRemarks/RemarkParser.hnu�[���PKiwFZ:h���^tRemarks/Remark.hnu�[���PKiwFZ��["��mytRemarks/RemarkSerializer.hnu�[���PKiwFZd�y���#L�tRemarks/BitstreamRemarkSerializer.hnu�[���PKiwFZ��ۻ�{�tRemarks/RemarkStreamer.hnu�[���PKiwFZ[c_~�tRemarks/RemarkLinker.hnu�[���PKiwFZA:i��� ��tRemarks/HotnessThresholdParser.hnu�[���PKiwFZ�__��tExecutionEngine/MCJIT.hnu�[���PKiwFZZ徎TTG�tExecutionEngine/GenericValue.hnu�[���PKiwFZ�dCC��tExecutionEngine/ObjectCache.hnu�[���PKiwFZ�O�1#y�tExecutionEngine/JITLink/ELF_riscv.hnu�[���PKiwFZ��Y��3�3#��tExecutionEngine/JITLink/loongarch.hnu�[���PKiwFZS����%�uExecutionEngine/JITLink/COFF_x86_64.hnu�[���PKiwFZ���{{�uExecutionEngine/JITLink/riscv.hnu�[���PKiwFZZ�HH%�3uExecutionEngine/JITLink/MachO_arm64.hnu�[���PKiwFZ#D�"\<uExecutionEngine/JITLink/ELF_i386.hnu�[���PKiwFZ=f�2%-BuExecutionEngine/JITLink/MemoryFlags.hnu�[���PKiwFZ�n
�c+c+�]uExecutionEngine/JITLink/ppc64.hnu�[���PKiwFZ�ۛK(i(i!Q�uExecutionEngine/JITLink/aarch64.hnu�[���PKiwFZ�ZЄ��%��uExecutionEngine/JITLink/ELF_aarch64.hnu�[���PKiwFZ�i
��
�
&��uExecutionEngine/JITLink/TableManager.hnu�[���PKiwFZ@�6I��'�vExecutionEngine/JITLink/ELF_loongarch.hnu�[���PKiwFZgpd{8{8.�	vExecutionEngine/JITLink/JITLinkMemoryManager.hnu�[���PKiwFZFO#55�BvExecutionEngine/JITLink/COFF.hnu�[���PKiwFZ4G�P��#NHvExecutionEngine/JITLink/ELF_ppc64.hnu�[���PKiwFZNB��RWRW [PvExecutionEngine/JITLink/x86_64.hnu�[���PKiwFZ�&�&!��vExecutionEngine/JITLink/aarch32.hnu�[���PKiwFZ>g,8kk%1�vExecutionEngine/JITLink/ELF_aarch32.hnu�[���PKiwFZ��ii��vExecutionEngine/JITLink/MachO.hnu�[���PKiwFZC���%%&��vExecutionEngine/JITLink/MachO_x86_64.hnu�[���PKiwFZ�)���4$�vExecutionEngine/JITLink/DWARFRecordSectionSplitter.hnu�[���PKiwFZ%p8p8q�vExecutionEngine/JITLink/i386.hnu�[���PKiwFZ �4?��&/!wExecutionEngine/JITLink/JITLinkDylib.hnu�[���PKiwFZuwbh00$W%wExecutionEngine/JITLink/ELF_x86_64.hnu�[���PKiwFZ�|U�(�*wExecutionEngine/JITLink/EHFrameSupport.hnu�[���PKiwFZ�}�++8<wExecutionEngine/JITLink/ELF.hnu�[���PKiwFZ˲h���!�AwExecutionEngine/JITLink/JITLink.hnu�[���PKiwFZ����!�!&�VxExecutionEngine/SectionMemoryManager.hnu�[���PKiwFZ�7q+||!�xxExecutionEngine/OProfileWrapper.hnu�[���PKiwFZX�9�$�$&ŒxExecutionEngine/Orc/SymbolStringPool.hnu�[���PKiwFZ�(G""3��xExecutionEngine/Orc/EPCGenericRTDyldMemoryManager.hnu�[���PKiwFZ����� 3�xExecutionEngine/Orc/DebugUtils.hnu�[���PKiwFZ�m���!�xExecutionEngine/Orc/Speculation.hnu�[���PKiwFZ*��V-V-#�xExecutionEngine/Orc/MachOPlatform.hnu�[���PKiwFZ[�O���4�!yExecutionEngine/Orc/EPCGenericJITLinkMemoryManager.hnu�[���PKiwFZ�ڀ�&�.yExecutionEngine/Orc/ThreadSafeModule.hnu�[���PKiwFZ�5&�FyExecutionEngine/Orc/IRTransformLayer.hnu�[���PKiwFZ��mЃ$�$(8NyExecutionEngine/Orc/ObjectLinkingLayer.hnu�[���PKiwFZ����qq9syExecutionEngine/Orc/TargetProcess/SimpleRemoteEPCServer.hnu�[���PKiwFZ@���<�yExecutionEngine/Orc/TargetProcess/ExecutorBootstrapService.hnu�[���PKiwFZp��ww4�yExecutionEngine/Orc/TargetProcess/RegisterEHFrames.hnu�[���PKiwFZ�I�q*	*	?�yExecutionEngine/Orc/TargetProcess/SimpleExecutorMemoryManager.hnu�[���PKiwFZ��0��yExecutionEngine/Orc/TargetProcess/JITLoaderGDB.hnu�[���PKiwFZ?J�j��>�yExecutionEngine/Orc/TargetProcess/SimpleExecutorDylibManager.hnu�[���PKiwFZ�w��83�yExecutionEngine/Orc/TargetProcess/TargetExecutionUtils.hnu�[���PKiwFZO��/

Ev�yExecutionEngine/Orc/TargetProcess/ExecutorSharedMemoryMapperService.hnu�[���PKiwFZ�nA��%	�yExecutionEngine/Orc/SimpleRemoteEPC.hnu�[���PKiwFZ����J�J#3�yExecutionEngine/Orc/OrcABISupport.hnu�[���PKiwFZS�p||'{%zExecutionEngine/Orc/SpeculateAnalyses.hnu�[���PKiwFZ�d2u,N1zExecutionEngine/Orc/EPCGenericMemoryAccess.hnu�[���PKiwFZ@&�C�M�M,�=zExecutionEngine/Orc/ExecutorProcessControl.hnu�[���PKiwFZf-�Ә�"�zExecutionEngine/Orc/TaskDispatch.hnu�[���PKiwFZm	�m��,��zExecutionEngine/Orc/EPCGenericDylibManager.hnu�[���PKiwFZ�"r���*)�zExecutionEngine/Orc/CompileOnDemandLayer.hnu�[���PKiwFZ-�u��S�S~�zExecutionEngine/Orc/LLJIT.hnu�[���PKiwFZ�]��)�
{ExecutionEngine/Orc/ObjectFileInterface.hnu�[���PKiwFZ�H��4�4$�{ExecutionEngine/Orc/ExecutionUtils.hnu�[���PKiwFZ'b���!�!)�E{ExecutionEngine/Orc/EPCIndirectionUtils.hnu�[���PKiwFZ|0�ڷ
�
6�g{ExecutionEngine/Orc/EPCDynamicLibrarySearchGenerator.hnu�[���PKiwFZe��
�
*�r{ExecutionEngine/Orc/COFFVCRuntimeSupport.hnu�[���PKiwFZ�P�\��-C�{ExecutionEngine/Orc/JITTargetMachineBuilder.hnu�[���PKiwFZv}J	J	)f�{ExecutionEngine/Orc/EPCEHFrameRegistrar.hnu�[���PKiwFZ��RU��.	�{ExecutionEngine/Orc/RTDyldObjectLinkingLayer.hnu�[���PKiwFZ�+-??%X�{ExecutionEngine/Orc/Shared/OrcError.hnu�[���PKiwFZ"�#���.��{ExecutionEngine/Orc/Shared/ExecutorSymbolDef.hnu�[���PKiwFZU�B�ss.��{ExecutionEngine/Orc/Shared/AllocationActions.hnu�[���PKiwFZ���+�a�a6��{ExecutionEngine/Orc/Shared/SimplePackedSerialization.hnu�[���PKiwFZz���)�),�:|ExecutionEngine/Orc/Shared/ExecutorAddress.hnu�[���PKiwFZ�`V�	�	*�d|ExecutionEngine/Orc/Shared/ObjectFormats.hnu�[���PKiwFZ|�^��1�n|ExecutionEngine/Orc/Shared/SimpleRemoteEPCUtils.hnu�[���PKiwFZL/��&�&62�|ExecutionEngine/Orc/Shared/TargetProcessControlTypes.hnu�[���PKiwFZ��G���(��|ExecutionEngine/Orc/Shared/OrcRTBridge.hnu�[���PKiwFZ[
_S��(��|ExecutionEngine/Orc/Shared/MemoryFlags.hnu�[���PKiwFZ
K/n/n1�|ExecutionEngine/Orc/Shared/WrapperFunctionUtils.hnu�[���PKiwFZ���j���T}ExecutionEngine/Orc/Layer.hnu�[���PKiwFZC��)n"n""�r}ExecutionEngine/Orc/COFFPlatform.hnu�[���PKiwFZVLoV�	�	-o�}ExecutionEngine/Orc/EPCDebugObjectRegistrar.hnu�[���PKiwFZ���"..*��}ExecutionEngine/Orc/ObjectTransformLayer.hnu�[���PKiwFZ��~j��"
�}ExecutionEngine/Orc/CompileUtils.hnu�[���PKiwFZa�پ#Y�}ExecutionEngine/Orc/LazyReexports.hnu�[���PKiwFZ�&�7Q7Q&��}ExecutionEngine/Orc/IndirectionUtils.hnu�[���PKiwFZ� ^%�
�
*Q~ExecutionEngine/Orc/LookupAndRecordAddrs.hnu�[���PKiwFZ�i��	�	0u*~ExecutionEngine/Orc/MapperJITLinkMemoryManager.hnu�[���PKiwFZ]P��,4,4�4~ExecutionEngine/Orc/Core.hnu�[���PKiwFZ�7R��	�	+4iExecutionEngine/Orc/DebuggerSupportPlugin.hnu�[���PKiwFZ�4��$<sExecutionEngine/Orc/IRCompileLayer.hnu�[���PKiwFZ��J�/2/2$A|ExecutionEngine/Orc/ELFNixPlatform.hnu�[���PKiwFZ6K�**ĮExecutionEngine/Orc/Mangling.hnu�[���PKiwFZ-���.<�ExecutionEngine/Orc/DebugObjectManagerPlugin.hnu�[���PKiwFZ��J"��"<�ExecutionEngine/Orc/MemoryMapper.hnu�[���PKiwFZ�W����"f�ExecutionEngine/JITEventListener.hnu�[���PKiwFZ� wQgQg!��ExecutionEngine/ExecutionEngine.hnu�[���PKiwFZa��55:U�ExecutionEngine/RuntimeDyld.hnu�[���PKiwFZ�6j���%���ExecutionEngine/RTDyldMemoryManager.hnu�[���PKiwFZA�Ӱhh$��ExecutionEngine/RuntimeDyldChecker.hnu�[���PKiwFZ!��H�9�9;��ExecutionEngine/JITSymbol.hnu�[���PKiwFZ�O�ii���ExecutionEngine/Interpreter.hnu�[���PKiwFZd&����7��LinkAllIR.hnu�[���PKiwFZV���*�*Z�Analysis/DomTreeUpdater.hnu�[���PKiwFZ�[ӹ%s/�Analysis/FunctionPropertiesAnalysis.hnu�[���PKiwFZ8)�=�� �@�Analysis/CFLAliasAnalysisUtils.hnu�[���PKiwFZh�>�##�G�Analysis/TargetTransformInfo.hnu�[���PKiwFZ��SggSX�Analysis/ConstraintSystem.hnu�[���PKiwFZ�,�:;=;=n�Analysis/ProfileSummaryInfo.hnu�[���PKiwFZ�C�������Analysis/RegionInfo.hnu�[���PKiwFZQ*
:����|8�Analysis/LazyCallGraph.hnu�[���PKiwFZKD;PP���Analysis/InstCount.hnu�[���PKiwFZ��+D��=�Analysis/HeatUtils.hnu�[���PKiwFZQ�ϔA;A;A�Analysis/InlineAdvisor.hnu�[���PKiwFZ��C���C�Analysis/DDGPrinter.hnu�[���PKiwFZ��9�"�"��R�Analysis/ScalarEvolution.hnu�[���PKiwFZޒ�!��(2��Analysis/InstructionPrecedenceTracking.hnu�[���PKiwFZ�����&�Analysis/DemandedBits.hnu�[���PKiwFZ�Њ���$T�Analysis/OptimizationRemarkEmitter.hnu�[���PKiwFZ8�@J���9�Analysis/Interval.hnu�[���PKiwFZ�=)��0�0�L�Analysis/MemoryBuiltins.hnu�[���PKiwFZ'��r1X1X�}�Analysis/TargetLibraryInfo.hnu�[���PKiwFZ��/$$7ևAnalysis/TargetFolder.hnu�[���PKiwFZ]�Ʊ�5�5���Analysis/InlineCost.hnu�[���PKiwFZu��W#W#�0�Analysis/LoopIterator.hnu�[���PKiwFZ��s����JT�Analysis/DependenceAnalysis.hnu�[���PKiwFZ��U ZZ -�Analysis/DominanceFrontierImpl.hnu�[���PKiwFZ�ɾ�vFvF��Analysis/IVDescriptors.hnu�[���PKiwFZ�r��(�(�c�Analysis/CFGPrinter.hnu�[���PKiwFZ�#[
��Ȍ�Analysis/LazyValueInfo.hnu�[���PKiwFZ~���>>ե�Analysis/MemoryProfileInfo.hnu�[���PKiwFZ�h�_‰Analysis/RegionPass.hnu�[���PKiwFZˍ�ۤ	�	�҉Analysis/EHUtils.hnu�[���PKiwFZkz

�܉Analysis/CallGraphSCCPass.hnu�[���PKiwFZ?o��&�&��Analysis/ConstantFolding.hnu�[���PKiwFZ�00��Analysis/IntervalPartition.hnu�[���PKiwFZM�♀�K(�Analysis/CFG.hnu�[���PKiwFZ����	E�Analysis/TensorSpec.hnu�[���PKiwFZg.q�mmY�Analysis/GuardUtils.hnu�[���PKiwFZj��2�2�a�Analysis/MemoryLocation.hnu�[���PKiwFZ,�������Analysis/CaptureTracking.hnu�[���PKiwFZD�		���Analysis/UniformityAnalysis.hnu�[���PKiwFZ���}
}
��Analysis/InlineOrder.hnu�[���PKiwFZ��@5;;�ŊAnalysis/ScalarFuncs.defnu�[���PKiwFZ���f��M؊Analysis/LoopNestAnalysis.hnu�[���PKiwFZ�hh��Analysis/AssumeBundleQueries.hnu�[���PKiwFZ���05�Analysis/StackSafetyAnalysis.hnu�[���PKiwFZ��߲)�)�*�Analysis/IntervalIterator.hnu�[���PKiwFZ0]��M�M�T�Analysis/SparsePropagation.hnu�[���PKiwFZWmM���!���Analysis/CFLAndersAliasAnalysis.hnu�[���PKiwFZU�)�EpEp�Analysis/LoopInfoImpl.hnu�[���PKiwFZՁ� � !~#�Analysis/InlineModelFeatureMaps.hnu�[���PKiwFZ�N�:���D�Analysis/ValueLatticeUtils.hnu�[���PKiwFZF�5�X&X&�K�Analysis/ObjCARCAnalysisUtils.hnu�[���PKiwFZ����ar�Analysis/IndirectCallVisitor.hnu�[���PKiwFZ�����[w�Analysis/CFGSCCPrinter.hnu�[���PKiwFZ:?oё=�=�z�Analysis/InstructionSimplify.hnu�[���PKiwFZ�:�[��!q��Analysis/SyncDependenceAnalysis.hnu�[���PKiwFZ����lČAnalysis/Utils/Local.hnu�[���PKiwFZ�﹒hɌAnalysis/Utils/TFUtils.hnu�[���PKiwFZ���/ٌAnalysis/Utils/TrainingLogger.hnu�[���PKiwFZ������4�Analysis/Utils/ImportedFunctionsInliningStatistics.hnu�[���PKiwFZ&(ϊ�
�
(�Analysis/ObjCARCUtil.hnu�[���PKiwFZp
z/ww&�Analysis/InlineSizeEstimatorAnalysis.hnu�[���PKiwFZ^U��R�R��Analysis/MustExecute.hnu�[���PKiwFZ�֤B���d�Analysis/BasicAliasAnalysis.hnu�[���PKiwFZ�L������Analysis/CmpInstAnalysis.hnu�[���PKiwFZ�jP�����Analysis/ReplayInlineAdvisor.hnu�[���PKiwFZ����N�N��Analysis/DDG.hnu�[���PKiwFZ{�սLL�Analysis/Lint.hnu�[���PKiwFZ���//!��Analysis/CFLSteensAliasAnalysis.hnu�[���PKiwFZr���88�Analysis/GlobalsModRef.hnu�[���PKiwFZ!EO�vv��Analysis/ObjCARCInstKind.hnu�[���PKiwFZ��m�B�B�V/�Analysis/ValueTracking.hnu�[���PKiwFZ��D!��Analysis/InteractiveModelRunner.hnu�[���PKiwFZ��T
�
�"F�Analysis/TargetTransformInfoImpl.hnu�[���PKiwFZ�̓"
"
�܏Analysis/LoopUnrollAnalyzer.hnu�[���PKiwFZ�{׆q
q
(�Analysis/IndirectCallPromotionAnalysis.hnu�[���PKiwFZF����!��Analysis/BlockFrequencyInfoImpl.hnu�[���PKiwFZ`�I����Analysis/CodeMetrics.hnu�[���PKiwFZG�xc�	�	�Analysis/MLModelRunner.hnu�[���PKiwFZ5Ԛl�� A�Analysis/ModuleSummaryAnalysis.hnu�[���PKiwFZ?zYX6�6�+�Analysis/VecFuncs.defnu�[���PKiwFZδ׬b~b~%�ԑAnalysis/ScalarEvolutionExpressions.hnu�[���PKiwFZ�+T�||KS�Analysis/SyntheticCountsUtils.hnu�[���PKiwFZR��$$[�Analysis/LoopPass.hnu�[���PKiwFZ)Z<��!}l�Analysis/ModuleDebugInfoPrinter.hnu�[���PKiwFZt�%��!Yp�Analysis/NoInferenceModelRunner.hnu�[���PKiwFZ��;����|u�Analysis/MemorySSA.hnu�[���PKiwFZ���I	I	!@6�Analysis/AliasAnalysisEvaluator.hnu�[���PKiwFZ&َ��9�9�?�Analysis/ValueLattice.hnu�[���PKiwFZ�h�S7676z�Analysis/AliasSetTracker.hnu�[���PKiwFZ�����Analysis/InstSimplifyFolder.hnu�[���PKiwFZ�XZP���˓Analysis/Delinearization.hnu�[���PKiwFZx���!��Analysis/TypeBasedAliasAnalysis.hnu�[���PKiwFZ�(��~
~
'I�Analysis/ScalarEvolutionNormalization.hnu�[���PKiwFZ���i�i��Analysis/LoopInfo.hnu�[���PKiwFZ���e��
f�Analysis/PhiValues.hnu�[���PKiwFZ����
�
#{�Analysis/LegacyDivergenceAnalysis.hnu�[���PKiwFZ���s}*}*؈�Analysis/DOTGraphTraitsPass.hnu�[���PKiwFZ<r8r8���Analysis/RegionIterator.hnu�[���PKiwFZ���""]�Analysis/ScopedNoAliasAA.hnu�[���PKiwFZ�a�D����Analysis/CallPrinter.hnu�[���PKiwFZ��6�W�W#��Analysis/MemoryDependenceAnalysis.hnu�[���PKiwFZa)n�n��T�Analysis/AliasAnalysis.hnu�[���PKiwFZ�qѽ�J�J��Analysis/CallGraph.hnu�[���PKiwFZx�VD
D
�:�Analysis/RegionPrinter.hnu�[���PKiwFZJ��{�L�L eE�Analysis/BranchProbabilityInfo.hnu�[���PKiwFZh�/��'���Analysis/ScalarEvolutionAliasAnalysis.hnu�[���PKiwFZ#PϣB:B:x��Analysis/TargetLibraryInfo.defnu�[���PKiwFZ�6�����֗Analysis/LoopAccessAnalysis.hnu�[���PKiwFZ*ƣ*/2/2�W�Analysis/LoopCacheAnalysis.hnu�[���PKiwFZ=��ee\��Analysis/Trace.hnu�[���PKiwFZxY�M�	�	$��Analysis/IteratedDominanceFrontier.hnu�[���PKiwFZ�4ާ?�?�,��Analysis/VectorUtils.hnu�[���PKiwFZ�@m��&�&�D�Analysis/PtrUseVisitor.hnu�[���PKiwFZ�=]���k�Analysis/DomPrinter.hnu�[���PKiwFZ-;f�!�!�|�Analysis/AssumptionCache.hnu�[���PKiwFZ�=�B(	(	��Analysis/CycleAnalysis.hnu�[���PKiwFZ���&&t��Analysis/Loads.hnu�[���PKiwFZ˹��ΙAnalysis/MemDerefPrinter.hnu�[���PKiwFZ��z(�(�!$ҙAnalysis/IRSimilarityIdentifier.hnu�[���PKiwFZ��2==���Analysis/CostModel.hnu�[���PKiwFZ;-�q��Analysis/StackLifetime.hnu�[���PKiwFZ_�	�
�
#m��Analysis/ModelUnderTrainingRunner.hnu�[���PKiwFZ)&�]
]
�ŚAnalysis/PostDominators.hnu�[���PKiwFZ��ͷ��_ӚAnalysis/EHPersonalities.hnu�[���PKiwFZf����o�Analysis/OverflowInstAnalysis.hnu�[���PKiwFZC�Z�SS��Analysis/ObjCARCAliasAnalysis.hnu�[���PKiwFZ���Z�d�d&�Analysis/RegionInfoImpl.hnu�[���PKiwFZj�d�&&!
X�Analysis/ReleaseModeModelRunner.hnu�[���PKiwFZ9�Q�s9s9�f�Analysis/MemorySSAUpdater.hnu�[���PKiwFZ�w�;��B��Analysis/TypeMetadataUtils.hnu�[���PKiwFZ���M��Analysis/Passes.hnu�[���PKiwFZ����! ��Analysis/DependenceGraphBuilder.hnu�[���PKiwFZ�9b���XԛAnalysis/DivergenceAnalysis.hnu�[���PKiwFZ1���NNL�Analysis/LoopAnalysisManager.hnu�[���PKiwFZ �]�hh�	�Analysis/CGSCCPassManager.hnu�[���PKiwFZ	����8r�Analysis/IVUsers.hnu�[���PKiwFZ��DFFo��Analysis/PHITransAddr.hnu�[���PKiwFZB&�8rr���Analysis/DominanceFrontier.hnu�[���PKiwFZzA���$���Analysis/LazyBranchProbabilityInfo.hnu�[���PKiwFZ���00�ȜAnalysis/BlockFrequencyInfo.hnu�[���PKiwFZKf��**![ߜAnalysis/LazyBlockFrequencyInfo.hnu�[���PKiwFZ�q����Analysis/MLInlineAdvisor.hnu�[���PKiwFZ�V��
�
"��Analysis/ScalarEvolutionDivision.hnu�[���PKiwFZ���$�I�I�
�Bitstream/BitstreamReader.hnu�[���PKiwFZ3PV
V
X�Bitstream/BitCodeEnums.hnu�[���PKiwFZ���rWrW�e�Bitstream/BitstreamWriter.hnu�[���PKiwFZ7�፭�x��Bitstream/BitCodes.hnu�[���PKiwFZ�kuSSiΝObjCopy/COFF/COFFObjcopy.hnu�[���PKiwFZ�nl44ӝObjCopy/COFF/COFFConfig.hnu�[���PKiwFZ�
����֝ObjCopy/ELF/ELFObjcopy.hnu�[���PKiwFZ#ߏ3���ޝObjCopy/ELF/ELFConfig.hnu�[���PKiwFZѩ�ϗ���ObjCopy/wasm/WasmConfig.hnu�[���PKiwFZ�8!QQb�ObjCopy/wasm/WasmObjcopy.hnu�[���PKiwFZ������ObjCopy/ObjCopy.hnu�[���PKiwFZl�__��ObjCopy/XCOFF/XCOFFObjcopy.hnu�[���PKiwFZD9�ğ����ObjCopy/XCOFF/XCOFFConfig.hnu�[���PKiwFZ�3Af``���ObjCopy/MachO/MachOConfig.hnu�[���PKiwFZF��		8��ObjCopy/MachO/MachOObjcopy.hnu�[���PKiwFZ�B�����ObjCopy/MultiFormatConfig.hnu�[���PKiwFZ���UU�	�ObjCopy/CommonConfig.hnu�[���PKiwFZ.+6!��(�ObjCopy/ConfigManager.hnu�[���PKiwFZ9w�		<.�Config/Disassemblers.defnu�[���PKiwFZ`��kk�6�Config/TargetExegesis.defnu�[���PKiwFZlpm�$$A>�Config/llvm-config-x86_64.hnu�[���PKiwFZ�>���	�	�O�Config/abi-breaking.hnu�[���PKiwFZ��y����Y�Config/AsmParsers.defnu�[���PKiwFZ;�����a�Config/Targets.defnu�[���PKiwFZ�b����i�Config/llvm-config.hnu�[���PKiwFZH��n�Config/AsmPrinters.defnu�[���PKiwFZ��w..Fw�Config/TargetMCAs.defnu�[���PKiwFZ�_��~�Linker/Linker.hnu�[���PKiwFZ��X�TT	��Linker/IRMover.hnu�[���PKiwFZـ�y�����MCA/InstrBuilder.hnu�[���PKiwFZ*"���ݢ�MCA/IncrementalSourceMgr.hnu�[���PKiwFZ�ўo�����MCA/SourceMgr.hnu�[���PKiwFZ$�6������MCA/HWEventListener.hnu�[���PKiwFZs�����ԞMCA/Stages/RetireStage.hnu�[���PKiwFZ8x�G���۞MCA/Stages/InstructionTables.hnu�[���PKiwFZzD��JJ��MCA/Stages/EntryStage.hnu�[���PKiwFZi�9�
�
�MCA/Stages/ExecuteStage.hnu�[���PKiwFZV|8::��MCA/Stages/MicroOpQueueStage.hnu�[���PKiwFZ�PԚ���MCA/Stages/Stage.hnu�[���PKiwFZĂ���{�MCA/Stages/DispatchStage.hnu�[���PKiwFZ�g�rr��MCA/Stages/InOrderIssueStage.hnu�[���PKiwFZ�	n��
�,�MCA/View.hnu�[���PKiwFZ9�F^^
u1�MCA/Support.hnu�[���PKiwFZ���~33 C�MCA/HardwareUnits/RegisterFile.hnu�[���PKiwFZR
_bBHBHwv�MCA/HardwareUnits/LSUnit.hnu�[���PKiwFZ!�[l�.�.��MCA/HardwareUnits/Scheduler.hnu�[���PKiwFZw�e%+�MCA/HardwareUnits/RetireControlUnit.hnu�[���PKiwFZ�����E�E#���MCA/HardwareUnits/ResourceManager.hnu�[���PKiwFZ�-���� �D�MCA/HardwareUnits/HardwareUnit.hnu�[���PKiwFZ�6�l��$I�MCA/CustomBehaviour.hnu�[���PKiwFZ�W5qGGh�MCA/CodeEmitter.hnu�[���PKiwFZ2�3+��
�p�MCA/Context.hnu�[���PKiwFZ}%�NjNj�}�MCA/Instruction.hnu�[���PKiwFZK.5dd(�MCA/Pipeline.hnu�[���PKiwFZ�(v��
��PassInfo.hnu�[���PKiwFZL�|is�s���Passes/PassBuilder.hnu�[���PKiwFZ�¦�0V0V!u��Passes/StandardInstrumentations.hnu�[���PKiwFZI6�

�ݡPasses/OptimizationLevel.hnu�[���PKiwFZx�d�((J�Passes/PassPlugin.hnu�[���PKiwFZ������IRPrinter/IRPrintingPasses.hnu�[���PKiwFZ����GG��InitializePasses.hnu�[���PKiwFZ�����/�/IS�DWARFLinker/DWARFStreamer.hnu�[���PKiwFZ��-���$^��DWARFLinker/DWARFLinkerDeclContext.hnu�[���PKiwFZ;�&>�>�;��DWARFLinker/DWARFLinker.hnu�[���PKiwFZ��H-H-$�2�DWARFLinker/DWARFLinkerCompileUnit.hnu�[���PKiwFZ��������^`�TargetParser/Triple.hnu�[���PKiwFZ��j���&%�TargetParser/LoongArchTargetParser.defnu�[���PKiwFZ�ʑq��%�TargetParser/RISCVTargetParserDef.incnu�[���PKiwFZ��""$��TargetParser/ARMTargetParserCommon.hnu�[���PKiwFZ�[������TargetParser/CSKYTargetParser.hnu�[���PKiwFZ���0��$��TargetParser/LoongArchTargetParser.hnu�[���PKiwFZ�z�CVCV ��TargetParser/ARMTargetParser.defnu�[���PKiwFZN�{&*!*!�q�TargetParser/ARMTargetParser.hnu�[���PKiwFZ2������TargetParser/X86TargetParser.hnu�[���PKiwFZ����,�,  ��TargetParser/X86TargetParser.defnu�[���PKiwFZ~-!q!q"2ҤTargetParser/AArch64TargetParser.hnu�[���PKiwFZ	�]����C�TargetParser/Host.hnu�[���PKiwFZ;�L��k�k!�P�TargetParser/CSKYTargetParser.defnu�[���PKiwFZ��
�� ���TargetParser/RISCVTargetParser.hnu�[���PKiwFZ�=*vv���TargetParser/SubtargetFeature.hnu�[���PKiwFZX�u��VޥTargetParser/TargetParser.hnu�[���PKiwFZ�O_��Demangle/Utility.hnu�[���PKiwFZ)�������Demangle/StringViewExtras.hnu�[���PKiwFZ�ˠ�)�)��Demangle/MicrosoftDemangle.hnu�[���PKiwFZ��0�G�G!6�Demangle/MicrosoftDemangleNodes.hnu�[���PKiwFZ�_R�S
S
	~�Demangle/StringView.hnu�[���PKiwFZPJ������Demangle/Demangle.hnu�[���PKiwFZ]
R|�|�מ�Demangle/ItaniumDemangle.hnu�[���PKiwFZ�Fy����@�Demangle/ItaniumNodes.defnu�[���PKiwFZ��P���I�Demangle/DemangleConfig.hnu�[���PKiwFZ���{�%�%�U�ADT/IntrusiveRefCntPtr.hnu�[���PKiwFZP����o{�ADT/ilist_iterator.hnu�[���PKiwFZUrH�H�
���ADT/APFloat.hnu�[���PKiwFZu�0��h�hp�ADT/SparseBitVector.hnu�[���PKiwFZb�4���2٪ADT/SmallVector.hnu�[���PKiwFZ<�7D�����ADT/CombinationGenerator.hnu�[���PKiwFZLԬ�b(b(���ADT/ImmutableMap.hnu�[���PKiwFZ�n�h�6�6�ЫADT/iterator.hnu�[���PKiwFZFM��,�,	p�ADT/bit.hnu�[���PKiwFZ Fh��S4�ADT/StringMapEntry.hnu�[���PKiwFZ�}iFF*M�ADT/STLForwardCompat.hnu�[���PKiwFZ�\��+�+�U�ADT/EquivalenceClasses.hnu�[���PKiwFZ��)��Á�ADT/SmallVectorExtras.hnu�[���PKiwFZB�3X
X
ֆ�ADT/SetOperations.hnu�[���PKiwFZ�IR:�i�i
q��ADT/Hashing.hnu�[���PKiwFZa��I�R�Rt��ADT/SmallBitVector.hnu�[���PKiwFZ���Aii�Q�ADT/IntervalTree.hnu�[���PKiwFZ{c�~F~Fj��ADT/SparseMultiSet.hnu�[���PKiwFZ�P.
!
!,�ADT/ScopedHashTable.hnu�[���PKiwFZ< a�P4P4~#�ADT/APFixedPoint.hnu�[���PKiwFZA�QUX�ADT/Uniformity.hnu�[���PKiwFZvX݅��`\�ADT/IntEqClasses.hnu�[���PKiwFZ���X X (h�ADT/PriorityWorklist.hnu�[���PKiwFZ�Jc��ƈ�ADT/AddressRanges.hnu�[���PKiwFZ��\�C'C'���ADT/TinyPtrVector.hnu�[���PKiwFZ~_(��&ˮADT/GraphTraits.hnu�[���PKiwFZ��}2@@V�ADT/FunctionExtras.hnu�[���PKiwFZGb�â����"�ADT/ImmutableSet.hnu�[���PKiwFZB�����~��ADT/identity.hnu�[���PKiwFZ�|���!�!���ADT/SmallString.hnu�[���PKiwFZ���Z00�ݯADT/TypeSwitch.hnu�[���PKiwFZۺ��
�
)��ADT/EpochTracker.hnu�[���PKiwFZ���[�
�
�ADT/ilist_base.hnu�[���PKiwFZ�+���#�ADT/PackedVector.hnu�[���PKiwFZ*[�̅
�
 !�ADT/STLFunctionalExtras.hnu�[���PKiwFZ����4�4�+�ADT/APInt.hnu�[���PKiwFZ�,�IIa�ADT/Twine.hnu�[���PKiwFZdx��R<R<e��ADT/CoalescingBitVector.hnu�[���PKiwFZ�Y��}�}�ADT/FoldingSet.hnu�[���PKiwFZ����0�0e�ADT/GenericCycleInfo.hnu�[���PKiwFZ�:*���ӕ�ADT/StringSet.hnu�[���PKiwFZP�)�f&f&���ADT/DirectedGraph.hnu�[���PKiwFZ�F�K�"�"PòADT/SmallSet.hnu�[���PKiwFZ�X/o'o'0�ADT/ilist_node.hnu�[���PKiwFZ[;)�.�.�
�ADT/Bitfields.hnu�[���PKiwFZE�nL�D�D�<�ADT/SmallPtrSet.hnu�[���PKiwFZ�2��YYԁ�ADT/UniqueVector.hnu�[���PKiwFZ�߉V3V3o��ADT/ConcurrentHashtable.hnu�[���PKiwFZt��#��³ADT/CachedHashString.hnu�[���PKiwFZ��g�$$�ٳADT/DeltaAlgorithm.hnu�[���PKiwFZ�J���)�)c�ADT/DepthFirstIterator.hnu�[���PKiwFZ��xRYCYCf�ADT/StringMap.hnu�[���PKiwFZ�k��N
N
�U�ADT/GenericUniformityInfo.hnu�[���PKiwFZxl9��)�)�c�ADT/IntervalMap.hnu�[���PKiwFZ�f��TTq��ADT/STLArrayExtras.hnu�[���PKiwFZN=?::	��ADT/StringSwitch.hnu�[���PKiwFZ���

���ADT/IndexedMap.hnu�[���PKiwFZ�d`<<۵�ADT/MapVector.hnu�[���PKiwFZേ�%%VյADT/ImmutableList.hnu�[���PKiwFZlGM��	�	��ADT/DenseMapInfoVariant.hnu�[���PKiwFZ�һ�gg
���ADT/None.hnu�[���PKiwFZ��c�2�2��ADT/SCCIterator.hnu�[���PKiwFZ&AG.G.�5�ADT/SparseSet.hnu�[���PKiwFZ\�ڲ���Nd�ADT/DenseMap.hnu�[���PKiwFZR�ǽ44>�ADT/GenericSSAContext.hnu�[���PKiwFZ���$�ADT/EnumeratedArray.hnu�[���PKiwFZ��̣bb0�ADT/ArrayRef.hnu�[���PKiwFZĔ�HHN��ADT/Triple.hnu�[���PKiwFZ	C��uu	Ҕ�ADT/ADL.hnu�[���PKiwFZ0��**���ADT/edit_distance.hnu�[���PKiwFZ�9b���ADT/iterator_range.hnu�[���PKiwFZ�	F�F����ADT/StringRef.hnu�[���PKiwFZ
�%^�5�5BG�ADT/Sequence.hnu�[���PKiwFZ���ag#g#}�ADT/FloatingPointMode.hnu�[���PKiwFZFV%��.�.���ADT/SetVector.hnu�[���PKiwFZ.��_&_&�ϸADT/DenseSet.hnu�[���PKiwFZ�]뫽,�,e��ADT/PostOrderIterator.hnu�[���PKiwFZD�Mn.n.i#�ADT/ilist.hnu�[���PKiwFZ��]�	+	+R�ADT/simple_ilist.hnu�[���PKiwFZ��@�HH]}�ADT/BitmaskEnum.hnu�[���PKiwFZ�x��NN晹ADT/StringExtras.hnu�[���PKiwFZ|YC�
�
/�ADT/PriorityQueue.hnu�[���PKiwFZ� |�BlBl=�ADT/STLExtras.hnu�[���PKiwFZ]����	�_�ADT/Any.hnu�[���PKiwFZ4d�;;�x�ADT/ScopeExit.hnu�[���PKiwFZ/�F�ww'��ADT/BreadthFirstIterator.hnu�[���PKiwFZ��55蓻ADT/PointerEmbeddedInt.hnu�[���PKiwFZY�e)��e��ADT/Statistic.hnu�[���PKiwFZ���"ll���ADT/BitVector.hnu�[���PKiwFZD�\�.�.�,�ADT/PointerSumType.hnu�[���PKiwFZ�b٭(*(*�[�ADT/PointerUnion.hnu�[���PKiwFZL_מ����<��ADT/GenericUniformityImpl.hnu�[���PKiwFZȌ((GA�ADT/PointerIntPair.hnu�[���PKiwFZ����m4m4�i�ADT/APSInt.hnu�[���PKiwFZ����EEH��ADT/ilist_node_options.hnu�[���PKiwFZ�f4��ղ�ADT/Optional.hnu�[���PKiwFZ����m'm'���ADT/DenseMapInfo.hnu�[���PKiwFZ��(܈�k޽ADT/DAGDeltaAlgorithm.hnu�[���PKiwFZQ=����:�ADT/ilist_node_base.hnu�[���PKiwFZ�o3�$!$!L�ADT/fallible_iterator.hnu�[���PKiwFZ�8�d<d<��ADT/GenericCycleImpl.hnu�[���PKiwFZ�g**aP�ADT/AllocatorList.hnu�[���PKiwFZ9:q�DD�m�Target/TargetPfmCounters.tdnu�[���PKiwFZ.�t�H�H]v�Target/TargetOptions.hnu�[���PKiwFZ�=z�(�(&��Target/Target.tdnu�[���PKiwFZYdn??��Target/TargetInstrPredicate.tdnu�[���PKiwFZ26F'�Target/CGPassBuilderOption.hnu�[���PKiwFZ��bfbf�/�Target/TargetSchedule.tdnu�[���PKiwFZ}��s�	�	M��Target/TargetIntrinsicInfo.hnu�[���PKiwFZn�g��S��Target/GenericOpcodes.tdnu�[���PKiwFZ(Ȥ�QQ!�]�Target/GlobalISel/RegisterBank.tdnu�[���PKiwFZ
�FRRP`�Target/GlobalISel/Target.tdnu�[���PKiwFZd��v&v&'�k�Target/GlobalISel/SelectionDAGCompat.tdnu�[���PKiwFZòw�4�4����Target/GlobalISel/Combine.tdnu�[���PKiwFZE���S@S@:Y�Target/TargetSelectionDAG.tdnu�[���PKiwFZМ��-P-Pٙ�Target/TargetMachine.hnu�[���PKiwFZ-���S"S"L��Target/TargetCallingConv.tdnu�[���PKiwFZG��q�-�-!��Target/TargetLoweringObjectFile.hnu�[���PKiwFZ��LL�:�Target/CodeGenCWrappers.hnu�[���PKiwFZ "i�;;�B�Target/TargetItinerary.tdnu�[���PKiwFZ��^�;;#a�Frontend/Directive/DirectiveBase.tdnu�[���PKiwFZ>9ss�u�Frontend/HLSL/HLSLResource.hnu�[���PKiwFZ���a}�Frontend/OpenACC/ACC.incnu�[���PKiwFZ'�W$$8��Frontend/OpenACC/ACC.h.incnu�[���PKiwFZ␸n�>�>���Frontend/OpenACC/ACC.tdnu�[���PKiwFZa:-���Frontend/OpenMP/OMPGridValues.hnu�[���PKiwFZ�j��@�@����Frontend/OpenMP/OMPIRBuilder.hnu�[���PKiwFZ�*[�@-@-y��Frontend/OpenMP/OMPConstants.hnu�[���PKiwFZB{���$�Frontend/OpenMP/OMPDeviceConstants.hnu�[���PKiwFZͯb)a)aI�Frontend/OpenMP/OMP.h.incnu�[���PKiwFZ͙�n�!�!�}�Frontend/OpenMP/OMPContext.hnu�[���PKiwFZj��PSS��Frontend/OpenMP/OMPAssume.hnu�[���PKiwFZ	����	�	���Frontend/OpenMP/OMP.tdnu�[���PKiwFZ<��m�������Frontend/OpenMP/OMPKinds.defnu�[���PKiwFZvx�Yll���Frontend/OpenMP/OMP.incnu�[���PKiwFZ*Q���Frontend/Debug/Options.hnu�[���PKiwFZrm�[��'�WindowsManifest/WindowsManifestMerger.hnu�[���PKiwFZԪ�„�* �Transforms/Vectorize/LoadStoreVectorizer.hnu�[���PKiwFZ+i�d
"
"$�#�Transforms/Vectorize/LoopVectorize.hnu�[���PKiwFZ�˜�VV0RF�Transforms/Vectorize/LoopVectorizationLegality.hnu�[���PKiwFZ��;$˜�Transforms/Vectorize/VectorCombine.hnu�[���PKiwFZ���#zz$.��Transforms/Vectorize/SLPVectorizer.hnu�[���PKiwFZhL�EE%���Transforms/Utils/FunctionComparator.hnu�[���PKiwFZ��@�((i�Transforms/Utils/SizeOpts.hnu�[���PKiwFZ[ę���Transforms/Utils/Mem2Reg.hnu�[���PKiwFZw�zz,��Transforms/Utils/CanonicalizeFreezeInLoops.hnu�[���PKiwFZ���,^,^��Transforms/Utils/Local.hnu�[���PKiwFZT��//	z�Transforms/Utils/SSAUpdater.hnu�[���PKiwFZZ�����Transforms/Utils/MoveAutoInit.hnu�[���PKiwFZF�p;&&��Transforms/Utils/LoopPeel.hnu�[���PKiwFZ�N�[��"S��Transforms/Utils/IntegerDivision.hnu�[���PKiwFZ!%u���+��Transforms/Utils/MetaRenamer.hnu�[���PKiwFZ(��d��#0��Transforms/Utils/EscapeEnumerator.hnu�[���PKiwFZ�|�^�T�T*��Transforms/Utils/ScalarEvolutionExpander.hnu�[���PKiwFZ��

%
�Transforms/Utils/LowerMemIntrinsics.hnu�[���PKiwFZ�.�	�	w�Transforms/Utils/CodeLayout.hnu�[���PKiwFZQ�ք�3�3 �!�Transforms/Utils/BuildLibCalls.hnu�[���PKiwFZE�����U�Transforms/Utils/MisExpect.hnu�[���PKiwFZHL�jj�e�Transforms/Utils/LoopSimplify.hnu�[���PKiwFZ,��~PP#�q�Transforms/Utils/CallGraphUpdater.hnu�[���PKiwFZ�X#�SS&,��Transforms/Utils/InstructionWorklist.hnu�[���PKiwFZ���cYY$Ց�Transforms/Utils/LoopRotationUtils.hnu�[���PKiwFZ�
h�����Transforms/Utils/HelloWorld.hnu�[���PKiwFZSb���� ���Transforms/Utils/PredicateInfo.hnu�[���PKiwFZ29͜�!���Transforms/Utils/SymbolRewriter.hnu�[���PKiwFZ-(����%���Transforms/Utils/CallPromotionUtils.hnu�[���PKiwFZ,f���}��Transforms/Utils/CtorUtils.hnu�[���PKiwFZ*��Q5
5
%���Transforms/Utils/BypassSlowDivision.hnu�[���PKiwFZ��@�VV$2��Transforms/Utils/AddDiscriminators.hnu�[���PKiwFZJ�����%���Transforms/Utils/LibCallsShrinkWrap.hnu�[���PKiwFZ�F�Ի@�@���Transforms/Utils/Cloning.hnu�[���PKiwFZ��?�RR�7�Transforms/Utils/LowerInvoke.hnu�[���PKiwFZ��)�))!b<�Transforms/Utils/SSAUpdaterBulk.hnu�[���PKiwFZ��T[CC"�H�Transforms/Utils/PromoteMemToReg.hnu�[���PKiwFZؐ���!qO�Transforms/Utils/UnifyLoopExits.hnu�[���PKiwFZTV����&�R�Transforms/Utils/FunctionImportUtils.hnu�[���PKiwFZh��$!�i�Transforms/Utils/LoopVersioning.hnu�[���PKiwFZW������Transforms/Utils/LowerIFunc.hnu�[���PKiwFZ{����-��Transforms/Utils/GlobalStatus.hnu�[���PKiwFZ`�;�pypy"k��Transforms/Utils/BasicBlockUtils.hnu�[���PKiwFZ$���DD-
�Transforms/Utils/SCCPSolver.hnu�[���PKiwFZK��K\B\B!�,�Transforms/Utils/SSAUpdaterImpl.hnu�[���PKiwFZA:���'ko�Transforms/Utils/MemoryTaggingSupport.hnu�[���PKiwFZ=w"�!!�{�Transforms/Utils/CountVisits.hnu�[���PKiwFZ���z���Transforms/Utils/Evaluator.hnu�[���PKiwFZ�Z]]�,�, W��Transforms/Utils/CodeExtractor.hnu�[���PKiwFZ�p�&�	�	%L��Transforms/Utils/SimplifyCFGOptions.hnu�[���PKiwFZ,Y%�<<3��Transforms/Utils/LCSSA.hnu�[���PKiwFZO�3�l�l���Transforms/Utils/LoopUtils.hnu�[���PKiwFZ1!�r��#�A�Transforms/Utils/InstructionNamer.hnu�[���PKiwFZ�(����!�D�Transforms/Utils/FixIrreducible.hnu�[���PKiwFZܩM��G�Transforms/Utils/VNCoercion.hnu�[���PKiwFZs���$NY�Transforms/Utils/InjectTLIMappings.hnu�[���PKiwFZ����#T]�Transforms/Utils/StripGCRelocates.hnu�[���PKiwFZ��)/)/�`�Transforms/Utils/Debugify.hnu�[���PKiwFZ��4�.��Transforms/Utils/SampleProfileLoaderBaseUtil.hnu�[���PKiwFZ'�B�'�')x��Transforms/Utils/SampleProfileInference.hnu�[���PKiwFZk��X"���Transforms/Utils/NameAnonGlobals.hnu�[���PKiwFZ�|o��%��Transforms/Utils/BreakCriticalEdges.hnu�[���PKiwFZ�|^�/�/���Transforms/Utils/ValueMapper.hnu�[���PKiwFZ��fJ���Transforms/Utils/GuardUtils.hnu�[���PKiwFZ�'HH!	�Transforms/Utils/CodeMoverUtils.hnu�[���PKiwFZ�1�(//��Transforms/Utils/LowerAtomic.hnu�[���PKiwFZ.��LL('�Transforms/Utils/EntryExitInstrumenter.hnu�[���PKiwFZ�a�}��&�"�Transforms/Utils/CanonicalizeAliases.hnu�[���PKiwFZǍ��!!!�&�Transforms/Utils/SanitizerStats.hnu�[���PKiwFZ�����!j-�Transforms/Utils/MemoryOpRemark.hnu�[���PKiwFZb0}}&a>�Transforms/Utils/AssumeBundleBuilder.hnu�[���PKiwFZ4�^�#4K�Transforms/Utils/LowerGlobalDtors.hnu�[���PKiwFZPO�'�O�Transforms/Utils/ASanStackFrameLayout.hnu�[���PKiwFZ��sv�]�Transforms/Utils/SplitModule.hnu�[���PKiwFZ�y�6��#ad�Transforms/Utils/AMDGPUEmitPrintf.hnu�[���PKiwFZV�:��-�-#Ah�Transforms/Utils/SimplifyLibCalls.hnu�[���PKiwFZ	�������Transforms/Utils/LowerSwitch.hnu�[���PKiwFZ_�	̓�)���Transforms/Utils/UnifyFunctionExitNodes.hnu�[���PKiwFZ���!���Transforms/Utils/SimplifyIndVar.hnu�[���PKiwFZ�$����~��Transforms/Utils/MatrixUtils.hnu�[���PKiwFZ�r!�����.Ÿ�Transforms/Utils/SampleProfileLoaderBaseImpl.hnu�[���PKiwFZY98�77�`�Transforms/Utils/ModuleUtils.hnu�[���PKiwFZG�2�//-4{�Transforms/Utils/StripNonLineTableDebugInfo.hnu�[���PKiwFZ�=qt++�~�Transforms/Utils/UnrollLoop.hnu�[���PKiwFZQ۪
�
*8��Transforms/Utils/RelLookupTableConverter.hnu�[���PKiwFZ�a��<��Transforms/Coroutines.hnu�[���PKiwFZO��
BB���Transforms/ObjCARC.hnu�[���PKiwFZ<�����Transforms/Vectorize.hnu�[���PKiwFZ�y������Transforms/IPO/AlwaysInliner.hnu�[���PKiwFZ�o�%�%'��Transforms/IPO/FunctionSpecialization.hnu�[���PKiwFZ�9~)�����Transforms/IPO/SampleProfile.hnu�[���PKiwFZ�Iغ����Transforms/IPO/GlobalSplit.hnu�[���PKiwFZ��>

#���Transforms/IPO/ForceFunctionAttrs.hnu�[���PKiwFZ�I�nn:��Transforms/IPO/StripSymbols.hnu�[���PKiwFZt/�  ���Transforms/IPO/GlobalOpt.hnu�[���PKiwFZߌ��c�c�_��Transforms/IPO/Attributor.hnu�[���PKiwFZ�bk���
��Transforms/IPO/ExtractGV.hnu�[���PKiwFZH�K-kk$���Transforms/IPO/StripDeadPrototypes.hnu�[���PKiwFZ��z��$���Transforms/IPO/Annotation2Metadata.hnu�[���PKiwFZ��,�k$k$%���Transforms/IPO/SampleContextTracker.hnu�[���PKiwFZk�J&�����Transforms/IPO/ModuleInliner.hnu�[���PKiwFZ��F��"���Transforms/IPO/ArgumentPromotion.hnu�[���PKiwFZ+��EE(���Transforms/IPO/DeadArgumentElimination.hnu�[���PKiwFZ0��**���Transforms/IPO/LowerTypeTests.hnu�[���PKiwFZ/���#�Transforms/IPO/SampleProfileProbe.hnu�[���PKiwFZ8g�f!
!
!Q(�Transforms/IPO/HotColdSplitting.hnu�[���PKiwFZ���,,#�2�Transforms/IPO/InferFunctionAttrs.hnu�[���PKiwFZ��g$$#B7�Transforms/IPO/PassManagerBuilder.hnu�[���PKiwFZ�(]�&,&,�J�Transforms/IPO/FunctionImport.hnu�[���PKiwFZʴ���.w�Transforms/IPO/BlockExtractor.hnu�[���PKiwFZ�SDYGG|�Transforms/IPO/Inliner.hnu�[���PKiwFZ�3U>>���Transforms/IPO/ConstantMerge.hnu�[���PKiwFZ��.$��.��Transforms/IPO/SCCP.hnu�[���PKiwFZ�n;+M��Transforms/IPO/MergeFunctions.hnu�[���PKiwFZV�k��� ���Transforms/IPO/PartialInlining.hnu�[���PKiwFZj5������Transforms/IPO/FunctionAttrs.hnu�[���PKiwFZ�,
�UU'��Transforms/IPO/CalledValuePropagation.hnu�[���PKiwFZy΍:@!@!"���Transforms/IPO/ProfiledCallGraph.hnu�[���PKiwFZ]/���%(��Transforms/IPO/ThinLTOBitcodeWriter.hnu�[���PKiwFZ:+2��+���Transforms/IPO/SyntheticCountsPropagation.hnu�[���PKiwFZ_���A��Transforms/IPO/OpenMPOpt.hnu�[���PKiwFZ�0
AAB��Transforms/IPO/GlobalDCE.hnu�[���PKiwFZ0��qii-���Transforms/IPO/MemProfContextDisambiguation.hnu�[���PKiwFZ9J����!���Transforms/IPO/EmbedBitcodePass.hnu�[���PKiwFZ������Transforms/IPO/CrossDSOCFI.hnu�[���PKiwFZ��/11 ��Transforms/IPO/ElimAvailExtern.hnu�[���PKiwFZԿB��(�(#D
�Transforms/IPO/WholeProgramDevirt.hnu�[���PKiwFZ�ﴘ��66�Transforms/IPO/Internalize.hnu�[���PKiwFZ����L�L5C�Transforms/IPO/IROutliner.hnu�[���PKiwFZ-t[��b��Transforms/IPO/LoopExtractor.hnu�[���PKiwFZe�����Transforms/Utils.hnu�[���PKiwFZxp;@77!��Transforms/Coroutines/CoroElide.hnu�[���PKiwFZ߁t uu.l��Transforms/Coroutines/CoroConditionalWrapper.hnu�[���PKiwFZ�+��{{!?��Transforms/Coroutines/CoroEarly.hnu�[���PKiwFZv�O��#��Transforms/Coroutines/CoroCleanup.hnu�[���PKiwFZ��t""!��Transforms/Coroutines/CoroSplit.hnu�[���PKiwFZ�+
�,,���Transforms/Scalar.hnu�[���PKiwFZmD�T���Transforms/Instrumentation.hnu�[���PKiwFZ��2�dd5�Transforms/IPO.hnu�[���PKiwFZ0�G���8�
�Transforms/AggressiveInstCombine/AggressiveInstCombine.hnu�[���PKiwFZ�V�## �Transforms/Scalar/LoopDeletion.hnu�[���PKiwFZ����(��Transforms/Scalar/LowerExpectIntrinsic.hnu�[���PKiwFZ-�a�ll��Transforms/Scalar/LoopFuse.hnu�[���PKiwFZ!t�W~~��Transforms/Scalar/Scalarizer.hnu�[���PKiwFZ����"c(�Transforms/Scalar/StructurizeCFG.hnu�[���PKiwFZF�A{kk#�+�Transforms/Scalar/LoopPredication.hnu�[���PKiwFZ���pXXF0�Transforms/Scalar/Reg2Mem.hnu�[���PKiwFZ�7Txx�3�Transforms/Scalar/SimplifyCFG.hnu�[���PKiwFZ���.�;�Transforms/Scalar/SeparateConstOffsetFromGEP.hnu�[���PKiwFZ��>!!!@�Transforms/Scalar/JumpThreading.hnu�[���PKiwFZ�����ta�Transforms/Scalar/LoopSink.hnu�[���PKiwFZ���U��2�g�Transforms/Scalar/InductiveRangeCheckElimination.hnu�[���PKiwFZ�@bbb&�k�Transforms/Scalar/InferAddressSpaces.hnu�[���PKiwFZ�vSSoo�Transforms/Scalar/Float2Int.hnu�[���PKiwFZw�(w�Transforms/Scalar/LoopUnrollAndJamPass.hnu�[���PKiwFZ�����#�{�Transforms/Scalar/PlaceSafepoints.hnu�[���PKiwFZ&^����%���Transforms/Scalar/AnnotationRemarks.hnu�[���PKiwFZ�}���)݌�Transforms/Scalar/MergedLoadStoreMotion.hnu�[���PKjwFZ<I�(+ݕ�Transforms/Scalar/PartiallyInlineLibCalls.hnu�[���PKjwFZ�[M!!.U��Transforms/Scalar/StraightLineStrengthReduce.hnu�[���PKjwFZsTS`��ԝ�Transforms/Scalar/Sink.hnu�[���PKjwFZ�JՎ��!���Transforms/Scalar/GuardWidening.hnu�[���PKjwFZUkW)),��Transforms/Scalar/ScalarizeMaskedMemIntrin.hnu�[���PKjwFZ��B%%w��Transforms/Scalar/LoopFlatten.hnu�[���PKjwFZ�������Transforms/Scalar/ADCE.hnu�[���PKjwFZ�r�d�����Transforms/Scalar/SROA.hnu�[���PKjwFZ�?�c��'���Transforms/Scalar/LowerGuardIntrinsic.hnu�[���PKjwFZKx�}��"���Transforms/Scalar/IVUsersPrinter.hnu�[���PKjwFZ���k		(���Transforms/Scalar/WarnMissedTransforms.hnu�[���PKjwFZ��k6ii';��Transforms/Scalar/LoopLoadElimination.hnu�[���PKjwFZ�D�%���Transforms/Scalar/CallSiteSplitting.hnu�[���PKjwFZAl����-j��Transforms/Scalar/LoopAccessAnalysisPrinter.hnu�[���PKjwFZ����{{#���Transforms/Scalar/LoopInterchange.hnu�[���PKjwFZ�&/�88 U��Transforms/Scalar/LoopRotation.hnu�[���PKjwFZo�\�)
)
,��Transforms/Scalar/TailRecursionElimination.hnu�[���PKjwFZ��c")b�Transforms/Scalar/ConstraintElimination.hnu�[���PKjwFZ*y�R�7�7��Transforms/Scalar/GVN.hnu�[���PKjwFZ�x,�5[5[#�?�Transforms/Scalar/LoopPassManager.hnu�[���PKjwFZ���LL#!��Transforms/Scalar/MemCpyOptimizer.hnu�[���PKjwFZ�p��(���Transforms/Scalar/SpeculativeExecution.hnu�[���PKjwFZC�|�88&2��Transforms/Scalar/SimpleLoopUnswitch.hnu�[���PKjwFZZ����"���Transforms/Scalar/IndVarSimplify.hnu�[���PKjwFZ��Vh�����Transforms/Scalar/NewGVN.hnu�[���PKjwFZ?�}*���Transforms/Scalar/EarlyCSE.hnu�[���PKjwFZ�PII$��Transforms/Scalar/LoopInstSimplify.hnu�[���PKjwFZ���""���Transforms/Scalar/DCE.hnu�[���PKjwFZ+�����Transforms/Scalar/DivRemPairs.hnu�[���PKjwFZ�011#{��Transforms/Scalar/LoopSimplifyCFG.hnu�[���PKjwFZ�/Db�����Transforms/Scalar/LICM.hnu�[���PKjwFZD(�bjj��Transforms/Scalar/Reassociate.hnu�[���PKjwFZ�����$��Transforms/Scalar/InstSimplifyPass.hnu�[���PKjwFZ��B�""$�Transforms/Scalar/ConstantHoisting.hnu�[���PKjwFZ�n��UU"r0�Transforms/Scalar/LoopDistribute.hnu�[���PKjwFZ�9v���(5�Transforms/Scalar/DeadStoreElimination.hnu�[���PKjwFZ�r����&<:�Transforms/Scalar/LoopVersioningLICM.hnu�[���PKjwFZ��r�&>�Transforms/Scalar/LoopStrengthReduce.hnu�[���PKjwFZ��44$�D�Transforms/Scalar/LoopDataPrefetch.hnu�[���PKjwFZ��_@�Q�Q!	I�Transforms/Scalar/GVNExpression.hnu�[���PKjwFZ4U:QGG��Transforms/Scalar/LoopReroll.hnu�[���PKjwFZ�����,v��Transforms/Scalar/AlignmentFromAssumptions.hnu�[���PKjwFZ�:���$U��Transforms/Scalar/TLSVariableHoist.hnu�[���PKjwFZ���������Transforms/Scalar/SCCP.hnu�[���PKjwFZ��ii)���Transforms/Scalar/LowerMatrixIntrinsics.hnu�[���PKjwFZ��"X��Transforms/Scalar/LoopBoundSplit.hnu�[���PKjwFZ
�+���Transforms/Scalar/RewriteStatepointsForGC.hnu�[���PKjwFZ�(3��$��Transforms/Scalar/DFAJumpThreading.hnu�[���PKjwFZi"&;�����Transforms/Scalar/MergeICmps.hnu�[���PKjwFZ%s�����Transforms/Scalar/LowerAtomic.hnu�[���PKjwFZ�)���+)��Transforms/Scalar/LowerConstantIntrinsics.hnu�[���PKjwFZ���rr"��Transforms/Scalar/LoopUnrollPass.hnu�[���PKjwFZ�B�?cc&���Transforms/Scalar/MakeGuardsExplicit.hnu�[���PKjwFZ�������Transforms/Scalar/BDCE.hnu�[���PKjwFZeB������Transforms/Scalar/FlattenCFG.hnu�[���PKjwFZqٹ��+��Transforms/Scalar/LowerWidenableCondition.hnu�[���PKjwFZ����**.��Transforms/Scalar/CorrelatedValuePropagation.hnu�[���PKjwFZ
��~#~
�Transforms/Scalar/LowerAtomicPass.hnu�[���PKjwFZ�}��]]#��Transforms/Scalar/NaryReassociate.hnu�[���PKjwFZ�i_
��&�-�Transforms/Scalar/LoopIdiomRecognize.hnu�[���PKjwFZ���R�)�)#�3�Transforms/Instrumentation/CFGMST.hnu�[���PKjwFZ��-~((3�]�Transforms/Instrumentation/BlockCoverageInference.hnu�[���PKjwFZ���Q,,37j�Transforms/Instrumentation/ControlHeightReduction.hnu�[���PKjwFZ�[Q�$$+�n�Transforms/Instrumentation/PoisonChecking.hnu�[���PKjwFZ7Dm���+Er�Transforms/Instrumentation/InstrProfiling.hnu�[���PKjwFZtCn99)1��Transforms/Instrumentation/GCOVProfiler.hnu�[���PKjwFZeL���.Ë�Transforms/Instrumentation/DataFlowSanitizer.hnu�[���PKjwFZe�		4��Transforms/Instrumentation/SanitizerBinaryMetadata.hnu�[���PKjwFZ�ho߆	�	/���Transforms/Instrumentation/HWAddressSanitizer.hnu�[���PKjwFZ�Ɖ�
�
/l��Transforms/Instrumentation/PGOInstrumentation.hnu�[���PKjwFZCրww4���Transforms/Instrumentation/AddressSanitizerOptions.hnu�[���PKjwFZY��#��,f��Transforms/Instrumentation/MemorySanitizer.hnu�[���PKjwFZ��t��!���Transforms/Instrumentation/KCFI.hnu�[���PKjwFZ�v�tt(���Transforms/Instrumentation/MemProfiler.hnu�[���PKjwFZ��r�

,t��Transforms/Instrumentation/ThreadSanitizer.hnu�[���PKjwFZ ��		3���Transforms/Instrumentation/AddressSanitizerCommon.hnu�[���PKjwFZ�&\ט�+M��Transforms/Instrumentation/InstrOrderFile.hnu�[���PKjwFZ`@ƨ��&@��Transforms/Instrumentation/CGProfile.hnu�[���PKjwFZ�_��]	]	-+��Transforms/Instrumentation/AddressSanitizer.hnu�[���PKjwFZ�$��.���Transforms/Instrumentation/SanitizerCoverage.hnu�[���PKjwFZ�u{��+��Transforms/Instrumentation/BoundsChecking.hnu�[���PKjwFZ�VV��Transforms/CFGuard.hnu�[���PKjwFZ83��
�
$w��Transforms/InstCombine/InstCombine.hnu�[���PKjwFZQ�|��T�T%�	�Transforms/InstCombine/InstCombiner.hnu�[���PKjwFZ�s//
�^�PassSupport.hnu�[���PKjwFZ]*8��InterfaceStub/ELFObjHandler.hnu�[���PKjwFZQ=e8��v��InterfaceStub/IFSStub.hnu�[���PKjwFZ��Ԭ�h��InterfaceStub/IFSHandler.hnu�[���PKjwFZmp@^��ObjectYAML/CodeViewYAMLTypes.hnu�[���PKjwFZ� *�o�o���ObjectYAML/ELFYAML.hnu�[���PKjwFZ�s��S!S!�)�ObjectYAML/COFFYAML.hnu�[���PKjwFZ��5��K�ObjectYAML/ArchiveYAML.hnu�[���PKjwFZ��P�� �S�ObjectYAML/CodeViewYAMLSymbols.hnu�[���PKjwFZ�Nj�&�&�Y�ObjectYAML/MinidumpYAML.hnu�[���PKjwFZ���		7��ObjectYAML/OffloadYAML.hnu�[���PKjwFZ2�}'#'#���ObjectYAML/XCOFFYAML.hnu�[���PKjwFZ�G �'�'��ObjectYAML/MachOYAML.hnu�[���PKjwFZ�"4���P��ObjectYAML/DXContainerYAML.hnu�[���PKjwFZ|`̚BB@��ObjectYAML/ObjectYAML.hnu�[���PKjwFZ*!�W88$���ObjectYAML/CodeViewYAMLTypeHashing.hnu�[���PKjwFZ�9�:�:U��ObjectYAML/DWARFYAML.hnu�[���PKjwFZ�1T[�
�
&'1�ObjectYAML/CodeViewYAMLDebugSections.hnu�[���PKjwFZ)�?���?�ObjectYAML/DWARFEmitter.hnu�[���PKjwFZ����T	T	EG�ObjectYAML/yaml2obj.hnu�[���PKjwFZ��՞��P�ObjectYAML/YAML.hnu�[���PKjwFZ�:BS;S;�_�ObjectYAML/WasmYAML.hnu�[���PKjwFZ�R�?��U��TableGen/Record.hnu�[���PKjwFZ��j��'��TableGen/Automaton.tdnu�[���PKjwFZy+�##.��TableGen/SearchableTable.tdnu�[���PKjwFZ�/T�����TableGen/StringMatcher.hnu�[���PKjwFZ�Ѣe�����TableGen/Main.hnu�[���PKjwFZ�K��99���TableGen/StringToOffsetTable.hnu�[���PKjwFZ#��=��_��TableGen/Parser.hnu�[���PKjwFZ���00O��TableGen/TableGenBackend.hnu�[���PKjwFZӋ�ͽ���TableGen/DirectiveEmitter.hnu�[���PKjwFZ�`C���TableGen/SetTheory.hnu�[���PKjwFZʰ^��('�TableGen/Error.hnu�[���PKjwFZ݀g��"�"!�.�DWARFLinkerParallel/DWARFLinker.hnu�[���PKjwFZ��

 ;R�DWARFLinkerParallel/StringPool.hnu�[���PKjwFZ�r��		�\�DWARFLinkerParallel/DWARFFile.hnu�[���PKjwFZ�3'>>!�e�DWARFLinkerParallel/StringTable.hnu�[���PKjwFZ�}3"�q�DWARFLinkerParallel/AddressesMap.hnu�[���PKjwFZw�{�HDHD�|�XRay/Graph.hnu�[���PKjwFZy��u��XRay/Profile.hnu�[���PKjwFZ`�;�TT���XRay/FDRRecordProducer.hnu�[���PKjwFZ>�2�
�
P��XRay/Trace.hnu�[���PKjwFZm�?����XRay/InstrumentationMap.hnu�[���PKjwFZ�³�����XRay/BlockIndexer.hnu�[���PKjwFZ0�������XRay/BlockPrinter.hnu�[���PKjwFZx�DQQ��XRay/RecordPrinter.hnu�[���PKjwFZ��z%%c�XRay/FDRRecordConsumer.hnu�[���PKjwFZï��
�
��XRay/XRayRecord.hnu�[���PKjwFZV,*�\\� �XRay/FileHeaderReader.hnu�[���PKjwFZ�`�?%�XRay/FDRTraceWriter.hnu�[���PKjwFZ$~���-�XRay/YAMLXRayRecord.hnu�[���PKjwFZ�.m�	1	1�:�XRay/FDRRecords.hnu�[���PKjwFZ�ٰ6���k�XRay/BlockVerifier.hnu�[���PKjwFZ�J�,CCt�XRay/FDRTraceExpander.hnu�[���PKjwFZ-����|�XRay/FDRLogBuilder.hnu�[���PKjwFZ�i��

���Option/Arg.hnu�[���PKjwFZ_�	���ߖ�Option/Option.hnu�[���PKjwFZfߐ,�G�G��Option/ArgList.hnu�[���PKjwFZR���,�,8��Option/OptTable.hnu�[���PKjwFZ*bmLLn'�Option/OptSpecifier.hnu�[���PKjwFZN�r%g&g&�+�Option/OptParser.tdnu�[���PKjwFZ&ëL
0
0!�R�LTO/legacy/ThinLTOCodeGenerator.hnu�[���PKjwFZ�p`����LTO/legacy/UpdateCompilerUsed.hnu�[���PKjwFZ[������LTO/legacy/LTOModule.hnu�[���PKjwFZnF��&&��LTO/legacy/LTOCodeGenerator.hnu�[���PKjwFZ�w?N?N	}��LTO/LTO.hnu�[���PKjwFZ�� �yy��LTO/SummaryBasedOptimizations.hnu�[���PKjwFZlw�ҡ.�.��LTO/Config.hnu�[���PKjwFZ\��ee�M�LTO/LTOBackend.hnu�[���PKjwFZn�Z\($($?Z�AsmParser/Parser.hnu�[���PKjwFZV�{�((�~�AsmParser/SlotMapping.hnu�[���PKjwFZ��D\ \ ��AsmParser/LLToken.hnu�[���PKjwFZc�A�����AsmParser/LLLexer.hnu�[���PKjwFZ\�K�3n3n���AsmParser/LLParser.hnu�[���PKjwFZ�K �IR/BuiltinGCs.hnu�[���PKjwFZ��Ma�	�	l$�IR/FixedMetadataKinds.defnu�[���PKjwFZΩ�z.�.��.�IR/Module.hnu�[���PKjwFZ^��+����IR/Value.defnu�[���PKjwFZw������IR/GlobalAlias.hnu�[���PKjwFZ,�%�%���IR/MDBuilder.hnu�[���PKjwFZy�D�+�+���IR/IntrinsicsARM.hnu�[���PKjwFZ��r�22��IR/StructuralHash.hnu�[���PKjwFZ;��<V�V����IR/IRBuilder.hnu�[���PKjwFZC�}v�� L�IR/VectorBuilder.hnu�[���PKjwFZ!hP�8)8)
Y�IR/Constant.hnu�[���PKjwFZ��I�((v��IR/IntrinsicsRISCVXsf.tdnu�[���PKjwFZk�>�hh��IR/IRPrintingPasses.hnu�[���PKjwFZ�T�f�����IR/GlobalObject.hnu�[���PKjwFZ�W{i){i)���IR/IntrinsicImpl.incnu�[���PKjwFZ������p'IR/DiagnosticInfo.hnu�[���PKjwFZ޵��??\�IR/InstIterator.hnu�[���PKjwFZF��W������IR/Constants.hnu�[���PKjwFZS�-�q�q�
��IR/Value.hnu�[���PKjwFZAP�	�	z^IR/Comdat.hnu�[���PKjwFZ��RV}j}jhIR/BasicBlock.hnu�[���PKjwFZЊ��}>}>;�IR/IntrinsicsRISCV.tdnu�[���PKjwFZ䧃�����IR/Intrinsics.tdnu�[���PKjwFZ�X�STST	�IR/Type.hnu�[���PKjwFZ'M';ssVIR/InstrTypes.hnu�[���PKjwFZ@dp��n�n��IR/IntrinsicsHexagonDep.tdnu�[���PKjwFZ5��9!IR/IntrinsicsRISCVXTHead.tdnu�[���PKjwFZ^1�R_>!IR/IntrinsicsVE.tdnu�[���PKjwFZ�p��""�C!IR/Instruction.defnu�[���PKjwFZ��][��f!IR/FMF.hnu�[���PKjwFZ%͹�t!IR/IntrinsicsXCore.hnu�[���PKjwFZ���BWW(�!IR/IntrinsicsAArch64.tdnu�[���PKjwFZQ�B�����#IR/ProfDataUtils.hnu�[���PKjwFZ�γ<%4%4��#IR/PassInstrumentation.hnu�[���PKjwFZz�Q.�;�;"$IR/IntrinsicsWebAssembly.tdnu�[���PKjwFZJ�%J
J
K^$IR/LLVMRemarkStreamer.hnu�[���PKjwFZsҥ-�����k$IR/Instruction.hnu�[���PKjwFZ��ܯ����$IR/SafepointIRVerifier.hnu�[���PKjwFZL�e���%IR/IntrinsicsAMDGPU.tdnu�[���PKjwFZ�����&IR/Metadata.defnu�[���PKjwFZ^�jn� � ('IR/ConstantFolder.hnu�[���PKjwFZ#o�o*o*"'IR/Attributes.incnu�[���PKjwFZ���NT*T*�L'IR/MatrixBuilder.hnu�[���PKjwFZ/`���
�
Lw'IR/ConstantFold.hnu�[���PKjwFZk߮�xxS�'IR/TypedPointerType.hnu�[���PKjwFZ��o)�%�%�'IR/AbstractCallSite.hnu�[���PKjwFZ�������'IR/Attributes.hnu�[���PKjwFZ4�P����
u(IR/Function.hnu�[���PKjwFZz{\�
	�(IR/DebugLoc.hnu�[���PKjwFZ���
d
dT)IR/ConstantRange.hnu�[���PKjwFZ�y��??�t)IR/Mangler.hnu�[���PKjwFZyB$�z�z})IR/DerivedTypes.hnu�[���PKjwFZ�������)IR/EHPersonalities.hnu�[���PKjwFZ
��k��*IR/AssemblyAnnotationWriter.hnu�[���PKjwFZ��{{3*IR/IntrinsicsARM.tdnu�[���PKjwFZZ,��}&}&�+IR/GlobalVariable.hnu�[���PKjwFZ�0�

�F+IR/AttributeMask.hnu�[���PKjwFZ�}�`YYQ+IR/IntrinsicsXCore.tdnu�[���PKjwFZ}�\�l+IR/AutoUpgrade.hnu�[���PKjwFZ�a����}+IR/ConstrainedOps.defnu�[���PKjwFZN"���L�L�+IR/FixedPointBuilder.hnu�[���PKjwFZ��F.�"�"��+IR/Statepoint.hnu�[���PKjwFZ�?pA��-,IR/IntrinsicsMips.tdnu�[���PKjwFZ��bQ�
�
u�-IR/OptBisect.hnu�[���PKjwFZ������e�-IR/ProfileSummary.hnu�[���PKjwFZ��6�i�iw�-IR/PatternMatch.hnu�[���PKjwFZf+���6�6E/IR/CFG.hnu�[���PKjwFZV�:�/
/
\F/IR/PredIteratorCache.hnu�[���PKjwFZ^��D�D�P/IR/InlineAsm.hnu�[���PKjwFZޣ�5�j�j�/IR/RuntimeLibcalls.defnu�[���PKjwFZ]�j���0IR/DiagnosticHandler.hnu�[���PKjwFZ$f�L���0IR/ModuleSummaryIndex.hnu�[���PKjwFZ�.K�K��(1IR/IntrinsicsMips.hnu�[���PKjwFZ��*%I%I(�1IR/ValueHandle.hnu�[���PKjwFZ�C�K�(2IR/IRBuilderFolder.hnu�[���PKjwFZ�)+�]s]s�92IR/IntrinsicsVE.hnu�[���PKjwFZĠ�%��p�3IR/Use.hnu�[���PKjwFZXg@����3IR/LegacyPassManager.hnu�[���PKjwFZ$cB�t,t,��3IR/ModuleSummaryIndexYAML.hnu�[���PKjwFZv�F�F�v�3IR/IntrinsicsNVVM.tdnu�[���PKjwFZ�u��S4S4~7IR/DebugInfo.hnu�[���PKjwFZ:&f,�,�
��7IR/Metadata.hnu�[���PKjwFZ^ώ����8IR/IntrinsicsBPF.tdnu�[���PKjwFZG��	�	 �8IR/IntrinsicsVEVL.gen.tdnu�[���PKjwFZ$.�q�q��<IR/PassManager.hnu�[���PKjwFZ�ݬ99�z=IR/IntrinsicsSPIRV.tdnu�[���PKjwFZ�?�GGC�=IR/IntrinsicsSPIRV.hnu�[���PKjwFZ�Ѓ���΋=IR/IntrinsicsLoongArch.tdnu�[���PKjwFZC�53�#�#�=IR/CallingConv.hnu�[���PKjwFZ
4�������=IR/IntrinsicsPowerPC.hnu�[���PKjwFZ$�vc���{>IR/PseudoProbe.hnu�[���PKjwFZ�T�[[��>IR/IntrinsicsAMDGPU.hnu�[���PKjwFZ�
wg��]�?IR/PassManagerImpl.hnu�[���PKjwFZtό�
�
��?IR/TypeFinder.hnu�[���PKjwFZ�4v�||��?IR/DebugInfoFlags.defnu�[���PKjwFZ��С??��?IR/PassTimingInfo.hnu�[���PKjwFZf)�.�.��?IR/IntrinsicsRISCV.hnu�[���PKjwFZ���N�Nv�@IR/IntrinsicsHexagon.tdnu�[���PKjwFZ���QOQO��@IR/IntrinsicsSystemZ.tdnu�[���PKjwFZ<_�i0�0�B:AIR/IntrinsicsAArch64.hnu�[���PKjwFZ[�������BIR/IntrinsicsLoongArch.hnu�[���PKjwFZ.];/((��BIR/IntrinsicsWebAssembly.hnu�[���PKjwFZV���
	CIR/Argument.hnu�[���PKjwFZ1ǖ��8�8#CIR/InstVisitor.hnu�[���PKjwFZNrD$$
\CIR/Verifier.hnu�[���PKjwFZ��f<�c�crqCIR/IntrinsicsHexagon.hnu�[���PKjwFZ՗XC�G�G
}�EIR/Operator.hnu�[���PKjwFZ���Ӆ��FIR/PrintPasses.hnu�[���PKjwFZUN�����|)FIR/IntrinsicsPowerPC.tdnu�[���PKjwFZ��>��{�GIR/ReplaceConstant.hnu�[���PKjwFZ��
Ps�s���GIR/IntrinsicInst.hnu�[���PKjwFZ;dR���U�HIR/IntrinsicsDirectX.hnu�[���PKjwFZ#mU�RXRX<�HIR/DebugInfoMetadata.hnu�[���PKjwFZ�ep""��JIR/GlobalIFunc.hnu�[���PKjwFZIc��6�JIR/GetElementPtrTypeIterator.hnu�[���PKjwFZ��i�CC
�KIR/FPEnv.hnu�[���PKjwFZ�;�//�KIR/SymbolTableListTraits.hnu�[���PKjwFZVx*KIR/ModuleSlotTracker.hnu�[���PKjwFZ��^Ŵ��8KIR/UseListOrder.hnu�[���PKjwFZ1ņ*�=KIR/ValueSymbolTable.hnu�[���PKjwFZz2֧�3�33PKIR/PassManagerInternal.hnu�[���PKjwFZV~A@ndnd`�KIR/DataLayout.hnu�[���PKjwFZ��ë�
�

�KIR/DiagnosticPrinter.hnu�[���PKjwFZ�i���-�KIR/CycleInfo.hnu�[���PKjwFZp�+LL�KIR/DerivedUser.hnu�[���PKjwFZ��sh���LIR/GCStrategy.hnu�[���PKjwFZ::5�3�3�LIR/Attributes.tdnu�[���PKjwFZ鬞��,�,vKLIR/IntrinsicsX86.tdnu�[���PKjwFZ����0�0�xQIR/Dominators.hnu�[���PKjwFZ"I)����QIR/IntrinsicsR600.hnu�[���PKjwFZ���uuʶQIR/AttributesAMDGPU.tdnu�[���PKjwFZ"�(nyy
��QIR/NoFolder.hnu�[���PKjwFZL�s�szsz;�QIR/IntrinsicEnums.incnu�[���PKjwFZ�Z~�:H:H�JRIR/LegacyPassManagers.hnu�[���PKjwFZ��Yv��t�RIR/DIBuilder.hnu�[���PKjwFZ~5�f)g)g�`SIR/VPIntrinsics.defnu�[���PKjwFZ�H�b

�SIR/Assumptions.hnu�[���PKjwFZ'�"/"/	`�SIR/User.hnu�[���PKjwFZ�(ð�TIR/OperandTraits.hnu�[���PKjwFZb�ɸ

TIR/SSAContext.hnu�[���PKjwFZ�|<��e#TIR/GVMaterializer.hnu�[���PKjwFZ f?���s)TIR/IntrinsicsNVPTX.hnu�[���PKjwFZ�,44gIVIR/Instructions.hnu�[���PKjwFZ"���}YIR/TrackingMDRef.hnu�[���PKjwFZ^����YIR/IntrinsicsBPF.hnu�[���PKjwFZI��>�:�:E�YIR/LLVMContext.hnu�[���PKjwFZ*+Q`�?�?P�YIR/IntrinsicsS390.hnu�[���PKjwFZ�&�~���ZIR/LegacyPassNameParser.hnu�[���PKjwFZQ�z�44� ZIR/IntrinsicsDirectX.tdnu�[���PKjwFZ�G�8�8
<%ZIR/ValueMap.hnu�[���PKjwFZa2�rE�E�\^ZIR/IntrinsicsX86.hnu�[���PKjwFZ&V@�"�"�\IR/Intrinsics.hnu�[���PKjwFZu�E�cc?\IR/GlobalValue.hnu�[���PKjwFZ���i�\LineEditor/LineEditor.hnu�[���PKjwFZ�G����ζ\MC/MCWinEH.hnu�[���PKjwFZ��
((��\MC/MachineLocation.hnu�[���PKjwFZ.9��:�\MC/MCSPIRVObjectWriter.hnu�[���PKjwFZ8�^��a�\MC/MCSPIRVStreamer.hnu�[���PKjwFZ���<�\MC/MCELFObjectWriter.hnu�[���PKjwFZ�2�u��?�\MC/MCCodeEmitter.hnu�[���PKjwFZE4)���1�\MC/MCTargetOptions.hnu�[���PKjwFZ��(�E
E
']MC/StringTableBuilder.hnu�[���PKjwFZ��M�MM�
]MC/MCFixup.hnu�[���PKjwFZT�^��E�E<#]MC/MCAssembler.hnu�[���PKjwFZ���
i]MC/MCWasmStreamer.hnu�[���PKjwFZGS����t]MC/MCXCOFFObjectWriter.hnu�[���PKjwFZ�++�z]MC/MCSubtargetInfo.hnu�[���PKjwFZ?�(��
�
C�]MC/MCWin64EH.hnu�[���PKjwFZ�t���N�]MC/MCAsmInfoDarwin.hnu�[���PKjwFZ��Y@�@�1�]MC/TargetRegistry.hnu�[���PKjwFZGa�mm��^MC/MCSectionMachO.hnu�[���PKjwFZF�/lJlJd�^MC/MCObjectFileInfo.hnu�[���PKjwFZ�R��ZZ�^MC/MCAsmMacro.hnu�[���PKjwFZ� 0ı����_MC/MCAsmInfo.hnu�[���PKjwFZ=NP�����_MC/MCSymbolWasm.hnu�[���PKjwFZeYW$$��_MC/MCInstrItineraries.hnu�[���PKjwFZ��XX�_MC/SubtargetFeature.hnu�[���PKjwFZ/�	$		��_MC/MCSymbolELF.hnu�[���PKjwFZ��s�a�a��_MC/MCDwarf.hnu�[���PKjwFZE��0��!L`MC/MCWinCOFFStreamer.hnu�[���PKjwFZGE�4���X`MC/MCSection.hnu�[���PKjwFZ�I��ZZ1t`MC/MCSymbolCOFF.hnu�[���PKjwFZhm����|`MC/MCSectionDXContainer.hnu�[���PKjwFZ���߁`MC/MCSymbolGOFF.hnu�[���PKjwFZ�ˆ

��`MC/LaneBitmask.hnu�[���PKjwFZ=n�}}"�`MC/MCParser/MCAsmParserExtension.hnu�[���PKjwFZ��b@WW��`MC/MCParser/AsmLexer.hnu�[���PKjwFZ�`ڷ�� ]�`MC/MCParser/MCParsedAsmOperand.hnu�[���PKjwFZ�Z�����`MC/MCParser/AsmCond.hnu�[���PKjwFZ�����.�.��`MC/MCParser/MCAsmParser.hnu�[���PKjwFZ-�n�`MC/MCParser/MCAsmParserUtils.hnu�[���PKjwFZ�j~�]]��`MC/MCParser/MCAsmLexer.hnu�[���PKjwFZm/��Q�QwaMC/MCParser/MCTargetAsmParser.hnu�[���PKjwFZ�EcDcD�]aMC/MCSchedule.hnu�[���PKjwFZf7e��K�aMC/MCFixedLenDisassembler.hnu�[���PKjwFZuwX����aMC/MCLinkerOptimizationHint.hnu�[���PKjwFZ 0ҽ��F�aMC/MCSectionXCOFF.hnu�[���PKjwFZee����  �aMC/MCTargetOptionsCommandFlags.hnu�[���PKjwFZW\r)�aMC/MCInstPrinter.hnu�[���PKjwFZcN٨TT��aMC/MCDXContainerWriter.hnu�[���PKjwFZ�tP%%�aMC/MCCodeView.hnu�[���PKjwFZ;�uffhbMC/MCInstrDesc.hnu�[���PKjwFZs��D����bMC/MCXCOFFStreamer.hnu�[���PKjwFZ74
�����bMC/MCInst.hnu�[���PKjwFZ@-R����bMC/MCWasmObjectWriter.hnu�[���PKjwFZ50mP
%
%z�bMC/MCAsmBackend.hnu�[���PKjwFZ�_ʑ��bMC/MCAsmLayout.hnu�[���PKjwFZ%�BwO�O��bMC/MCContext.hnu�[���PKjwFZ�#ff�tcMC/MCDecoderOps.hnu�[���PKjwFZR��o��@zcMC/MCAsmInfoELF.hnu�[���PKjwFZ�AK(hhH}cMC/MCFixupKindInfo.hnu�[���PKjwFZ/5vֻ��cMC/MCInstBuilder.hnu�[���PKjwFZp�RSS�cMC/MCDXContainerStreamer.hnu�[���PKjwFZ���W����cMC/DXContainerPSVInfo.hnu�[���PKjwFZ�v�<�<h�cMC/MCPseudoProbe.hnu�[���PKjwFZ��@/'/'��cMC/MCObjectStreamer.hnu�[���PKjwFZD1��/
/
�cMC/MCDirectives.hnu�[���PKjwFZ��&�dMC/MCAsmInfoXCOFF.hnu�[���PKjwFZ���h���dMC/MCSymbolXCOFF.hnu�[���PKjwFZV
C����dMC/MCLabel.hnu�[���PKjwFZ�D�+��pdMC/MCSectionWasm.hnu�[���PKjwFZ�ת++�+dMC/MCObjectWriter.hnu�[���PKjwFZU˄��AdMC/MCAsmInfoCOFF.hnu�[���PKjwFZ�3�s#8#8
�DdMC/MCSymbol.hnu�[���PKjwFZ���]]8}dMC/MCSectionELF.hnu�[���PKjwFZ�ۓFF֋dMC/MCAsmInfoGOFF.hnu�[���PKjwFZ��wV**^�dMC/MCInstrInfo.hnu�[���PKjwFZ��R��țdMC/MCWinCOFFObjectWriter.hnu�[���PKjwFZ�������ޣdMC/MCStreamer.hnu�[���PKjwFZ5	��ceMC/MCSectionSPIRV.hnu�[���PKjwFZ���EieMC/MCELFStreamer.hnu�[���PKjwFZH���(e�eMC/MCDisassembler/MCExternalSymbolizer.hnu�[���PKjwFZ`���$�$"z�eMC/MCDisassembler/MCDisassembler.hnu�[���PKjwFZ�&n�??$��eMC/MCDisassembler/MCRelocationInfo.hnu�[���PKjwFZ�z���� M�eMC/MCDisassembler/MCSymbolizer.hnu�[���PKjwFZ�.YjK K n�eMC/SectionKind.hnu�[���PKjwFZ��l�V�V��eMC/MCExpr.hnu�[���PKjwFZ�5�r�r#?fMC/MCRegisterInfo.hnu�[���PKjwFZ+�Ip�
�
2�fMC/ConstantPools.hnu�[���PKjwFZ����

-�fMC/MCSectionCOFF.hnu�[���PKjwFZ3���dd��fMC/MCAsmInfoWasm.hnu�[���PKjwFZt 7�����fMC/MCRegister.hnu�[���PKjwFZ�i��p�fMC/MCInstrAnalysis.hnu�[���PKjwFZ���%�%�fMC/MCMachObjectWriter.hnu�[���PKjwFZ�;
����%gMC/MCSymbolMachO.hnu�[���PKjwFZ�*�
�P�P�6gMC/MCFragment.hnu�[���PKjwFZ��C}�	�	�gMC/MCValue.hnu�[���PKjwFZ��Xa��ޑgMC/MCSectionGOFF.hnu�[���PKjwFZ��\��gFuzzMutate/Random.hnu�[���PKjwFZr����gFuzzMutate/IRMutator.hnu�[���PKjwFZ���%�%��gFuzzMutate/OpDescriptor.hnu�[���PKjwFZ�C�9����gFuzzMutate/FuzzerCLI.hnu�[���PKjwFZyH���gFuzzMutate/RandomIRBuilder.hnu�[���PKjwFZ�n�(��!hFuzzMutate/Operations.hnu�[���PKjwFZy����2
hLinkAllPasses.hnu�[���PKjwFZ��1�YYO-hDWP/DWPStringPool.hnu�[���PKjwFZ��
�
	�3hDWP/DWP.hnu�[���PKjwFZ51��AhDWP/DWPError.hnu�[���PKjwFZ*��#.DhBitcode/BitcodeWriterPass.hnu�[���PKjwFZmg':�
�
�OhBitcode/BitcodeAnalyzer.hnu�[���PKjwFZi�ҔF�F�]hBitcode/BitcodeConvenience.hnu�[���PKjwFZ�h���s�sd�hBitcode/LLVMBitCodes.hnu�[���PKjwFZ�My�riBitcode/BitcodeCommon.hnu�[���PKjwFZ�{�h33�iBitcode/BitcodeReader.hnu�[���PKjwFZK�P���!QiBitcode/BitcodeWriter.hnu�[���PKhh��;oi
© 2025 GrazzMean